From 2db7cb4b53525e96475de8f09f5cfc4d1b036d41 Mon Sep 17 00:00:00 2001 From: Liu Yiqun Date: Wed, 10 Sep 2025 17:02:38 +0800 Subject: [PATCH 1/5] Add cv models with more than 6 subgraphs. --- .../subgraph_0/graph_hash.txt | 1 + .../subgraph_0/graph_net.json | 6 + .../subgraph_0/input_meta.py | 9 + .../BlazeFace-FPN-SSH/subgraph_0/model.py | 2726 ++++++++++ .../subgraph_0/weight_meta.py | 2579 +++++++++ .../subgraph_1/graph_hash.txt | 1 + .../subgraph_1/graph_net.json | 6 + .../subgraph_1/input_meta.py | 9 + .../BlazeFace-FPN-SSH/subgraph_1/model.py | 2834 ++++++++++ .../subgraph_1/weight_meta.py | 2499 +++++++++ .../BlazeFace/subgraph_0/graph_hash.txt | 1 + .../BlazeFace/subgraph_0/graph_net.json | 6 + .../BlazeFace/subgraph_0/input_meta.py | 9 + .../PaddleX/BlazeFace/subgraph_0/model.py | 1976 +++++++ .../BlazeFace/subgraph_0/weight_meta.py | 1968 +++++++ .../BlazeFace/subgraph_1/graph_hash.txt | 1 + .../BlazeFace/subgraph_1/graph_net.json | 6 + .../BlazeFace/subgraph_1/input_meta.py | 33 + .../PaddleX/BlazeFace/subgraph_1/model.py | 77 + .../BlazeFace/subgraph_1/weight_meta.py | 1 + .../BlazeFace/subgraph_2/graph_hash.txt | 1 + .../BlazeFace/subgraph_2/graph_net.json | 6 + .../BlazeFace/subgraph_2/input_meta.py | 78 + .../PaddleX/BlazeFace/subgraph_2/model.py | 290 + .../BlazeFace/subgraph_2/weight_meta.py | 1 + .../BlazeFace/subgraph_3/graph_hash.txt | 1 + .../BlazeFace/subgraph_3/graph_net.json | 6 + .../BlazeFace/subgraph_3/input_meta.py | 29 + .../PaddleX/BlazeFace/subgraph_3/model.py | 186 + .../input_meta.py | 29 + .../weight_meta.py | 78 + .../BlazeFace/subgraph_3/weight_meta.py | 78 + .../BlazeFace/subgraph_4/graph_hash.txt | 1 + .../BlazeFace/subgraph_4/graph_net.json | 6 + .../BlazeFace/subgraph_4/input_meta.py | 9 + .../PaddleX/BlazeFace/subgraph_4/model.py | 2041 ++++++++ .../BlazeFace/subgraph_4/weight_meta.py | 1888 +++++++ .../BlazeFace/subgraph_5/graph_hash.txt | 1 + .../BlazeFace/subgraph_5/graph_net.json | 6 + .../BlazeFace/subgraph_5/input_meta.py | 49 + .../PaddleX/BlazeFace/subgraph_5/model.py | 69 + .../BlazeFace/subgraph_5/weight_meta.py | 1 + .../BlazeFace/subgraph_6/graph_hash.txt | 1 + .../BlazeFace/subgraph_6/graph_net.json | 6 + .../BlazeFace/subgraph_6/input_meta.py | 12 + .../PaddleX/BlazeFace/subgraph_6/model.py | 70 + .../BlazeFace/subgraph_6/weight_meta.py | 1 + .../BlazeFace/subgraph_7/graph_hash.txt | 1 + .../BlazeFace/subgraph_7/graph_net.json | 6 + .../BlazeFace/subgraph_7/input_meta.py | 83 + .../PaddleX/BlazeFace/subgraph_7/model.py | 570 ++ .../BlazeFace/subgraph_7/weight_meta.py | 1 + .../BlazeFace/subgraph_8/graph_hash.txt | 1 + .../BlazeFace/subgraph_8/graph_net.json | 6 + .../BlazeFace/subgraph_8/input_meta.py | 45 + .../PaddleX/BlazeFace/subgraph_8/model.py | 41 + .../BlazeFace/subgraph_8/weight_meta.py | 1 + .../BlazeFace/subgraph_9/graph_hash.txt | 1 + .../BlazeFace/subgraph_9/graph_net.json | 6 + .../BlazeFace/subgraph_9/input_meta.py | 5 + .../PaddleX/BlazeFace/subgraph_9/model.py | 53 + .../BlazeFace/subgraph_9/weight_meta.py | 1 + .../PaddleX/SOLOv2/subgraph_0/graph_hash.txt | 1 + .../PaddleX/SOLOv2/subgraph_0/graph_net.json | 6 + .../PaddleX/SOLOv2/subgraph_0/input_meta.py | 9 + .../PaddleX/SOLOv2/subgraph_0/model.py | 4594 ++++++++++++++++ .../PaddleX/SOLOv2/subgraph_0/weight_meta.py | 3603 +++++++++++++ .../PaddleX/SOLOv2/subgraph_1/graph_hash.txt | 1 + .../PaddleX/SOLOv2/subgraph_1/graph_net.json | 6 + .../PaddleX/SOLOv2/subgraph_1/input_meta.py | 48 + .../PaddleX/SOLOv2/subgraph_1/model.py | 321 ++ .../PaddleX/SOLOv2/subgraph_1/weight_meta.py | 1 + .../PaddleX/SOLOv2/subgraph_2/graph_hash.txt | 1 + .../PaddleX/SOLOv2/subgraph_2/graph_net.json | 6 + .../PaddleX/SOLOv2/subgraph_2/input_meta.py | 9 + .../PaddleX/SOLOv2/subgraph_2/model.py | 4656 +++++++++++++++++ .../PaddleX/SOLOv2/subgraph_2/weight_meta.py | 3603 +++++++++++++ .../PaddleX/SOLOv2/subgraph_3/graph_hash.txt | 1 + .../PaddleX/SOLOv2/subgraph_3/graph_net.json | 6 + .../PaddleX/SOLOv2/subgraph_3/input_meta.py | 36 + .../PaddleX/SOLOv2/subgraph_3/model.py | 26 + .../PaddleX/SOLOv2/subgraph_3/weight_meta.py | 1 + .../PaddleX/SOLOv2/subgraph_4/graph_hash.txt | 1 + .../PaddleX/SOLOv2/subgraph_4/graph_net.json | 6 + .../PaddleX/SOLOv2/subgraph_4/input_meta.py | 92 + .../PaddleX/SOLOv2/subgraph_4/model.py | 222 + .../PaddleX/SOLOv2/subgraph_4/weight_meta.py | 1 + .../PaddleX/SOLOv2/subgraph_5/graph_hash.txt | 1 + .../PaddleX/SOLOv2/subgraph_5/graph_net.json | 6 + .../PaddleX/SOLOv2/subgraph_5/input_meta.py | 15 + .../PaddleX/SOLOv2/subgraph_5/model.py | 46 + .../PaddleX/SOLOv2/subgraph_5/weight_meta.py | 1 + .../PaddleX/SOLOv2/subgraph_6/graph_hash.txt | 1 + .../PaddleX/SOLOv2/subgraph_6/graph_net.json | 6 + .../PaddleX/SOLOv2/subgraph_6/input_meta.py | 50 + .../PaddleX/SOLOv2/subgraph_6/model.py | 106 + .../PaddleX/SOLOv2/subgraph_6/weight_meta.py | 1 + .../PaddleX/SOLOv2/subgraph_7/graph_hash.txt | 1 + .../PaddleX/SOLOv2/subgraph_7/graph_net.json | 6 + .../PaddleX/SOLOv2/subgraph_7/input_meta.py | 36 + .../PaddleX/SOLOv2/subgraph_7/model.py | 102 + .../PaddleX/SOLOv2/subgraph_7/weight_meta.py | 1 + .../PaddleX/SOLOv2/subgraph_8/graph_hash.txt | 1 + .../PaddleX/SOLOv2/subgraph_8/graph_net.json | 6 + .../PaddleX/SOLOv2/subgraph_8/input_meta.py | 26 + .../PaddleX/SOLOv2/subgraph_8/model.py | 174 + .../PaddleX/SOLOv2/subgraph_8/weight_meta.py | 1 + .../PaddleX/SOLOv2/subgraph_9/graph_hash.txt | 1 + .../PaddleX/SOLOv2/subgraph_9/graph_net.json | 6 + .../PaddleX/SOLOv2/subgraph_9/input_meta.py | 40 + .../PaddleX/SOLOv2/subgraph_9/model.py | 187 + .../PaddleX/SOLOv2/subgraph_9/weight_meta.py | 1 + .../TimesNet_cls/subgraph_0/graph_hash.txt | 1 + .../TimesNet_cls/subgraph_0/graph_net.json | 6 + .../TimesNet_cls/subgraph_0/input_meta.py | 23 + .../PaddleX/TimesNet_cls/subgraph_0/model.py | 361 ++ .../TimesNet_cls/subgraph_0/weight_meta.py | 238 + .../TimesNet_cls/subgraph_1/graph_hash.txt | 1 + .../TimesNet_cls/subgraph_1/graph_net.json | 6 + .../TimesNet_cls/subgraph_1/input_meta.py | 20 + .../PaddleX/TimesNet_cls/subgraph_1/model.py | 102 + .../TimesNet_cls/subgraph_1/weight_meta.py | 9 + .../TimesNet_cls/subgraph_2/graph_hash.txt | 1 + .../TimesNet_cls/subgraph_2/graph_net.json | 6 + .../TimesNet_cls/subgraph_2/input_meta.py | 23 + .../PaddleX/TimesNet_cls/subgraph_2/model.py | 331 ++ .../TimesNet_cls/subgraph_2/weight_meta.py | 238 + .../TimesNet_cls/subgraph_3/graph_hash.txt | 1 + .../TimesNet_cls/subgraph_3/graph_net.json | 6 + .../TimesNet_cls/subgraph_3/input_meta.py | 12 + .../PaddleX/TimesNet_cls/subgraph_3/model.py | 37 + .../TimesNet_cls/subgraph_3/weight_meta.py | 1 + .../TimesNet_cls/subgraph_4/graph_hash.txt | 1 + .../TimesNet_cls/subgraph_4/graph_net.json | 6 + .../TimesNet_cls/subgraph_4/input_meta.py | 23 + .../PaddleX/TimesNet_cls/subgraph_4/model.py | 394 ++ .../TimesNet_cls/subgraph_4/weight_meta.py | 238 + .../TimesNet_cls/subgraph_5/graph_hash.txt | 1 + .../TimesNet_cls/subgraph_5/graph_net.json | 6 + .../TimesNet_cls/subgraph_5/input_meta.py | 18 + .../PaddleX/TimesNet_cls/subgraph_5/model.py | 57 + .../TimesNet_cls/subgraph_5/weight_meta.py | 18 + .../TimesNet_cls/subgraph_6/graph_hash.txt | 1 + .../TimesNet_cls/subgraph_6/graph_net.json | 6 + .../TimesNet_cls/subgraph_6/input_meta.py | 20 + .../PaddleX/TimesNet_cls/subgraph_6/model.py | 92 + .../TimesNet_cls/subgraph_6/weight_meta.py | 9 + .../TimesNet_cls/subgraph_7/graph_hash.txt | 1 + .../TimesNet_cls/subgraph_7/graph_net.json | 6 + .../TimesNet_cls/subgraph_7/input_meta.py | 19 + .../PaddleX/TimesNet_cls/subgraph_7/model.py | 54 + .../TimesNet_cls/subgraph_7/weight_meta.py | 1 + .../TimesNet_cls/subgraph_8/graph_hash.txt | 1 + .../TimesNet_cls/subgraph_8/graph_net.json | 6 + .../TimesNet_cls/subgraph_8/input_meta.py | 23 + .../PaddleX/TimesNet_cls/subgraph_8/model.py | 297 ++ .../TimesNet_cls/subgraph_8/weight_meta.py | 238 + .../TimesNet_cls/subgraph_9/graph_hash.txt | 1 + .../TimesNet_cls/subgraph_9/graph_net.json | 6 + .../TimesNet_cls/subgraph_9/input_meta.py | 83 + .../PaddleX/TimesNet_cls/subgraph_9/model.py | 65 + .../TimesNet_cls/subgraph_9/weight_meta.py | 1 + 162 files changed, 41707 insertions(+) create mode 100644 paddle_samples/PaddleX/BlazeFace-FPN-SSH/subgraph_0/graph_hash.txt create mode 100644 paddle_samples/PaddleX/BlazeFace-FPN-SSH/subgraph_0/graph_net.json create mode 100644 paddle_samples/PaddleX/BlazeFace-FPN-SSH/subgraph_0/input_meta.py create mode 100644 paddle_samples/PaddleX/BlazeFace-FPN-SSH/subgraph_0/model.py create mode 100644 paddle_samples/PaddleX/BlazeFace-FPN-SSH/subgraph_0/weight_meta.py create mode 100644 paddle_samples/PaddleX/BlazeFace-FPN-SSH/subgraph_1/graph_hash.txt create mode 100644 paddle_samples/PaddleX/BlazeFace-FPN-SSH/subgraph_1/graph_net.json create mode 100644 paddle_samples/PaddleX/BlazeFace-FPN-SSH/subgraph_1/input_meta.py create mode 100644 paddle_samples/PaddleX/BlazeFace-FPN-SSH/subgraph_1/model.py create mode 100644 paddle_samples/PaddleX/BlazeFace-FPN-SSH/subgraph_1/weight_meta.py create mode 100644 paddle_samples/PaddleX/BlazeFace/subgraph_0/graph_hash.txt create mode 100644 paddle_samples/PaddleX/BlazeFace/subgraph_0/graph_net.json create mode 100644 paddle_samples/PaddleX/BlazeFace/subgraph_0/input_meta.py create mode 100644 paddle_samples/PaddleX/BlazeFace/subgraph_0/model.py create mode 100644 paddle_samples/PaddleX/BlazeFace/subgraph_0/weight_meta.py create mode 100644 paddle_samples/PaddleX/BlazeFace/subgraph_1/graph_hash.txt create mode 100644 paddle_samples/PaddleX/BlazeFace/subgraph_1/graph_net.json create mode 100644 paddle_samples/PaddleX/BlazeFace/subgraph_1/input_meta.py create mode 100644 paddle_samples/PaddleX/BlazeFace/subgraph_1/model.py create mode 100644 paddle_samples/PaddleX/BlazeFace/subgraph_1/weight_meta.py create mode 100644 paddle_samples/PaddleX/BlazeFace/subgraph_2/graph_hash.txt create mode 100644 paddle_samples/PaddleX/BlazeFace/subgraph_2/graph_net.json create mode 100644 paddle_samples/PaddleX/BlazeFace/subgraph_2/input_meta.py create mode 100644 paddle_samples/PaddleX/BlazeFace/subgraph_2/model.py create mode 100644 paddle_samples/PaddleX/BlazeFace/subgraph_2/weight_meta.py create mode 100644 paddle_samples/PaddleX/BlazeFace/subgraph_3/graph_hash.txt create mode 100644 paddle_samples/PaddleX/BlazeFace/subgraph_3/graph_net.json create mode 100644 paddle_samples/PaddleX/BlazeFace/subgraph_3/input_meta.py create mode 100644 paddle_samples/PaddleX/BlazeFace/subgraph_3/model.py create mode 100644 paddle_samples/PaddleX/BlazeFace/subgraph_3/shape_petches_BlazeFace-FPN-SSH/input_meta.py create mode 100644 paddle_samples/PaddleX/BlazeFace/subgraph_3/shape_petches_BlazeFace-FPN-SSH/weight_meta.py create mode 100644 paddle_samples/PaddleX/BlazeFace/subgraph_3/weight_meta.py create mode 100644 paddle_samples/PaddleX/BlazeFace/subgraph_4/graph_hash.txt create mode 100644 paddle_samples/PaddleX/BlazeFace/subgraph_4/graph_net.json create mode 100644 paddle_samples/PaddleX/BlazeFace/subgraph_4/input_meta.py create mode 100644 paddle_samples/PaddleX/BlazeFace/subgraph_4/model.py create mode 100644 paddle_samples/PaddleX/BlazeFace/subgraph_4/weight_meta.py create mode 100644 paddle_samples/PaddleX/BlazeFace/subgraph_5/graph_hash.txt create mode 100644 paddle_samples/PaddleX/BlazeFace/subgraph_5/graph_net.json create mode 100644 paddle_samples/PaddleX/BlazeFace/subgraph_5/input_meta.py create mode 100644 paddle_samples/PaddleX/BlazeFace/subgraph_5/model.py create mode 100644 paddle_samples/PaddleX/BlazeFace/subgraph_5/weight_meta.py create mode 100644 paddle_samples/PaddleX/BlazeFace/subgraph_6/graph_hash.txt create mode 100644 paddle_samples/PaddleX/BlazeFace/subgraph_6/graph_net.json create mode 100644 paddle_samples/PaddleX/BlazeFace/subgraph_6/input_meta.py create mode 100644 paddle_samples/PaddleX/BlazeFace/subgraph_6/model.py create mode 100644 paddle_samples/PaddleX/BlazeFace/subgraph_6/weight_meta.py create mode 100644 paddle_samples/PaddleX/BlazeFace/subgraph_7/graph_hash.txt create mode 100644 paddle_samples/PaddleX/BlazeFace/subgraph_7/graph_net.json create mode 100644 paddle_samples/PaddleX/BlazeFace/subgraph_7/input_meta.py create mode 100644 paddle_samples/PaddleX/BlazeFace/subgraph_7/model.py create mode 100644 paddle_samples/PaddleX/BlazeFace/subgraph_7/weight_meta.py create mode 100644 paddle_samples/PaddleX/BlazeFace/subgraph_8/graph_hash.txt create mode 100644 paddle_samples/PaddleX/BlazeFace/subgraph_8/graph_net.json create mode 100644 paddle_samples/PaddleX/BlazeFace/subgraph_8/input_meta.py create mode 100644 paddle_samples/PaddleX/BlazeFace/subgraph_8/model.py create mode 100644 paddle_samples/PaddleX/BlazeFace/subgraph_8/weight_meta.py create mode 100644 paddle_samples/PaddleX/BlazeFace/subgraph_9/graph_hash.txt create mode 100644 paddle_samples/PaddleX/BlazeFace/subgraph_9/graph_net.json create mode 100644 paddle_samples/PaddleX/BlazeFace/subgraph_9/input_meta.py create mode 100644 paddle_samples/PaddleX/BlazeFace/subgraph_9/model.py create mode 100644 paddle_samples/PaddleX/BlazeFace/subgraph_9/weight_meta.py create mode 100644 paddle_samples/PaddleX/SOLOv2/subgraph_0/graph_hash.txt create mode 100644 paddle_samples/PaddleX/SOLOv2/subgraph_0/graph_net.json create mode 100644 paddle_samples/PaddleX/SOLOv2/subgraph_0/input_meta.py create mode 100644 paddle_samples/PaddleX/SOLOv2/subgraph_0/model.py create mode 100644 paddle_samples/PaddleX/SOLOv2/subgraph_0/weight_meta.py create mode 100644 paddle_samples/PaddleX/SOLOv2/subgraph_1/graph_hash.txt create mode 100644 paddle_samples/PaddleX/SOLOv2/subgraph_1/graph_net.json create mode 100644 paddle_samples/PaddleX/SOLOv2/subgraph_1/input_meta.py create mode 100644 paddle_samples/PaddleX/SOLOv2/subgraph_1/model.py create mode 100644 paddle_samples/PaddleX/SOLOv2/subgraph_1/weight_meta.py create mode 100644 paddle_samples/PaddleX/SOLOv2/subgraph_2/graph_hash.txt create mode 100644 paddle_samples/PaddleX/SOLOv2/subgraph_2/graph_net.json create mode 100644 paddle_samples/PaddleX/SOLOv2/subgraph_2/input_meta.py create mode 100644 paddle_samples/PaddleX/SOLOv2/subgraph_2/model.py create mode 100644 paddle_samples/PaddleX/SOLOv2/subgraph_2/weight_meta.py create mode 100644 paddle_samples/PaddleX/SOLOv2/subgraph_3/graph_hash.txt create mode 100644 paddle_samples/PaddleX/SOLOv2/subgraph_3/graph_net.json create mode 100644 paddle_samples/PaddleX/SOLOv2/subgraph_3/input_meta.py create mode 100644 paddle_samples/PaddleX/SOLOv2/subgraph_3/model.py create mode 100644 paddle_samples/PaddleX/SOLOv2/subgraph_3/weight_meta.py create mode 100644 paddle_samples/PaddleX/SOLOv2/subgraph_4/graph_hash.txt create mode 100644 paddle_samples/PaddleX/SOLOv2/subgraph_4/graph_net.json create mode 100644 paddle_samples/PaddleX/SOLOv2/subgraph_4/input_meta.py create mode 100644 paddle_samples/PaddleX/SOLOv2/subgraph_4/model.py create mode 100644 paddle_samples/PaddleX/SOLOv2/subgraph_4/weight_meta.py create mode 100644 paddle_samples/PaddleX/SOLOv2/subgraph_5/graph_hash.txt create mode 100644 paddle_samples/PaddleX/SOLOv2/subgraph_5/graph_net.json create mode 100644 paddle_samples/PaddleX/SOLOv2/subgraph_5/input_meta.py create mode 100644 paddle_samples/PaddleX/SOLOv2/subgraph_5/model.py create mode 100644 paddle_samples/PaddleX/SOLOv2/subgraph_5/weight_meta.py create mode 100644 paddle_samples/PaddleX/SOLOv2/subgraph_6/graph_hash.txt create mode 100644 paddle_samples/PaddleX/SOLOv2/subgraph_6/graph_net.json create mode 100644 paddle_samples/PaddleX/SOLOv2/subgraph_6/input_meta.py create mode 100644 paddle_samples/PaddleX/SOLOv2/subgraph_6/model.py create mode 100644 paddle_samples/PaddleX/SOLOv2/subgraph_6/weight_meta.py create mode 100644 paddle_samples/PaddleX/SOLOv2/subgraph_7/graph_hash.txt create mode 100644 paddle_samples/PaddleX/SOLOv2/subgraph_7/graph_net.json create mode 100644 paddle_samples/PaddleX/SOLOv2/subgraph_7/input_meta.py create mode 100644 paddle_samples/PaddleX/SOLOv2/subgraph_7/model.py create mode 100644 paddle_samples/PaddleX/SOLOv2/subgraph_7/weight_meta.py create mode 100644 paddle_samples/PaddleX/SOLOv2/subgraph_8/graph_hash.txt create mode 100644 paddle_samples/PaddleX/SOLOv2/subgraph_8/graph_net.json create mode 100644 paddle_samples/PaddleX/SOLOv2/subgraph_8/input_meta.py create mode 100644 paddle_samples/PaddleX/SOLOv2/subgraph_8/model.py create mode 100644 paddle_samples/PaddleX/SOLOv2/subgraph_8/weight_meta.py create mode 100644 paddle_samples/PaddleX/SOLOv2/subgraph_9/graph_hash.txt create mode 100644 paddle_samples/PaddleX/SOLOv2/subgraph_9/graph_net.json create mode 100644 paddle_samples/PaddleX/SOLOv2/subgraph_9/input_meta.py create mode 100644 paddle_samples/PaddleX/SOLOv2/subgraph_9/model.py create mode 100644 paddle_samples/PaddleX/SOLOv2/subgraph_9/weight_meta.py create mode 100644 paddle_samples/PaddleX/TimesNet_cls/subgraph_0/graph_hash.txt create mode 100644 paddle_samples/PaddleX/TimesNet_cls/subgraph_0/graph_net.json create mode 100644 paddle_samples/PaddleX/TimesNet_cls/subgraph_0/input_meta.py create mode 100644 paddle_samples/PaddleX/TimesNet_cls/subgraph_0/model.py create mode 100644 paddle_samples/PaddleX/TimesNet_cls/subgraph_0/weight_meta.py create mode 100644 paddle_samples/PaddleX/TimesNet_cls/subgraph_1/graph_hash.txt create mode 100644 paddle_samples/PaddleX/TimesNet_cls/subgraph_1/graph_net.json create mode 100644 paddle_samples/PaddleX/TimesNet_cls/subgraph_1/input_meta.py create mode 100644 paddle_samples/PaddleX/TimesNet_cls/subgraph_1/model.py create mode 100644 paddle_samples/PaddleX/TimesNet_cls/subgraph_1/weight_meta.py create mode 100644 paddle_samples/PaddleX/TimesNet_cls/subgraph_2/graph_hash.txt create mode 100644 paddle_samples/PaddleX/TimesNet_cls/subgraph_2/graph_net.json create mode 100644 paddle_samples/PaddleX/TimesNet_cls/subgraph_2/input_meta.py create mode 100644 paddle_samples/PaddleX/TimesNet_cls/subgraph_2/model.py create mode 100644 paddle_samples/PaddleX/TimesNet_cls/subgraph_2/weight_meta.py create mode 100644 paddle_samples/PaddleX/TimesNet_cls/subgraph_3/graph_hash.txt create mode 100644 paddle_samples/PaddleX/TimesNet_cls/subgraph_3/graph_net.json create mode 100644 paddle_samples/PaddleX/TimesNet_cls/subgraph_3/input_meta.py create mode 100644 paddle_samples/PaddleX/TimesNet_cls/subgraph_3/model.py create mode 100644 paddle_samples/PaddleX/TimesNet_cls/subgraph_3/weight_meta.py create mode 100644 paddle_samples/PaddleX/TimesNet_cls/subgraph_4/graph_hash.txt create mode 100644 paddle_samples/PaddleX/TimesNet_cls/subgraph_4/graph_net.json create mode 100644 paddle_samples/PaddleX/TimesNet_cls/subgraph_4/input_meta.py create mode 100644 paddle_samples/PaddleX/TimesNet_cls/subgraph_4/model.py create mode 100644 paddle_samples/PaddleX/TimesNet_cls/subgraph_4/weight_meta.py create mode 100644 paddle_samples/PaddleX/TimesNet_cls/subgraph_5/graph_hash.txt create mode 100644 paddle_samples/PaddleX/TimesNet_cls/subgraph_5/graph_net.json create mode 100644 paddle_samples/PaddleX/TimesNet_cls/subgraph_5/input_meta.py create mode 100644 paddle_samples/PaddleX/TimesNet_cls/subgraph_5/model.py create mode 100644 paddle_samples/PaddleX/TimesNet_cls/subgraph_5/weight_meta.py create mode 100644 paddle_samples/PaddleX/TimesNet_cls/subgraph_6/graph_hash.txt create mode 100644 paddle_samples/PaddleX/TimesNet_cls/subgraph_6/graph_net.json create mode 100644 paddle_samples/PaddleX/TimesNet_cls/subgraph_6/input_meta.py create mode 100644 paddle_samples/PaddleX/TimesNet_cls/subgraph_6/model.py create mode 100644 paddle_samples/PaddleX/TimesNet_cls/subgraph_6/weight_meta.py create mode 100644 paddle_samples/PaddleX/TimesNet_cls/subgraph_7/graph_hash.txt create mode 100644 paddle_samples/PaddleX/TimesNet_cls/subgraph_7/graph_net.json create mode 100644 paddle_samples/PaddleX/TimesNet_cls/subgraph_7/input_meta.py create mode 100644 paddle_samples/PaddleX/TimesNet_cls/subgraph_7/model.py create mode 100644 paddle_samples/PaddleX/TimesNet_cls/subgraph_7/weight_meta.py create mode 100644 paddle_samples/PaddleX/TimesNet_cls/subgraph_8/graph_hash.txt create mode 100644 paddle_samples/PaddleX/TimesNet_cls/subgraph_8/graph_net.json create mode 100644 paddle_samples/PaddleX/TimesNet_cls/subgraph_8/input_meta.py create mode 100644 paddle_samples/PaddleX/TimesNet_cls/subgraph_8/model.py create mode 100644 paddle_samples/PaddleX/TimesNet_cls/subgraph_8/weight_meta.py create mode 100644 paddle_samples/PaddleX/TimesNet_cls/subgraph_9/graph_hash.txt create mode 100644 paddle_samples/PaddleX/TimesNet_cls/subgraph_9/graph_net.json create mode 100644 paddle_samples/PaddleX/TimesNet_cls/subgraph_9/input_meta.py create mode 100644 paddle_samples/PaddleX/TimesNet_cls/subgraph_9/model.py create mode 100644 paddle_samples/PaddleX/TimesNet_cls/subgraph_9/weight_meta.py diff --git a/paddle_samples/PaddleX/BlazeFace-FPN-SSH/subgraph_0/graph_hash.txt b/paddle_samples/PaddleX/BlazeFace-FPN-SSH/subgraph_0/graph_hash.txt new file mode 100644 index 000000000..fabb8dbca --- /dev/null +++ b/paddle_samples/PaddleX/BlazeFace-FPN-SSH/subgraph_0/graph_hash.txt @@ -0,0 +1 @@ +256c18df369e1fd1bf5c66bdf10442132323fb1bfa22679bdb38b0727c49b013 \ No newline at end of file diff --git a/paddle_samples/PaddleX/BlazeFace-FPN-SSH/subgraph_0/graph_net.json b/paddle_samples/PaddleX/BlazeFace-FPN-SSH/subgraph_0/graph_net.json new file mode 100644 index 000000000..eae7167df --- /dev/null +++ b/paddle_samples/PaddleX/BlazeFace-FPN-SSH/subgraph_0/graph_net.json @@ -0,0 +1,6 @@ +{ + "framework": "paddle", + "model_name": "BlazeFace-FPN-SSH", + "num_devices_required": 1, + "num_nodes_required": 1 +} \ No newline at end of file diff --git a/paddle_samples/PaddleX/BlazeFace-FPN-SSH/subgraph_0/input_meta.py b/paddle_samples/PaddleX/BlazeFace-FPN-SSH/subgraph_0/input_meta.py new file mode 100644 index 000000000..f7c78a24b --- /dev/null +++ b/paddle_samples/PaddleX/BlazeFace-FPN-SSH/subgraph_0/input_meta.py @@ -0,0 +1,9 @@ +class Program_weight_tensor_data_0: + name = "data_0" + shape = [1, 3, 678, 1024] + dtype = "float32" + min_val = float("-0.964689") + max_val = float("1.18429") + mean = float("-0.24172") + std = float("0.515477") + data = None diff --git a/paddle_samples/PaddleX/BlazeFace-FPN-SSH/subgraph_0/model.py b/paddle_samples/PaddleX/BlazeFace-FPN-SSH/subgraph_0/model.py new file mode 100644 index 000000000..25fb85a68 --- /dev/null +++ b/paddle_samples/PaddleX/BlazeFace-FPN-SSH/subgraph_0/model.py @@ -0,0 +1,2726 @@ +import paddle + + +class GraphModule(paddle.nn.Layer): + def __init__(self): + super().__init__() + + def forward( + self, + parameter_0, + parameter_1, + parameter_2, + parameter_3, + parameter_4, + parameter_5, + parameter_6, + parameter_7, + parameter_8, + parameter_9, + parameter_10, + parameter_11, + parameter_12, + parameter_13, + parameter_14, + parameter_15, + parameter_16, + parameter_17, + parameter_18, + parameter_19, + parameter_20, + parameter_21, + parameter_22, + parameter_23, + parameter_24, + parameter_25, + parameter_26, + parameter_27, + parameter_28, + parameter_29, + parameter_30, + parameter_31, + parameter_32, + parameter_33, + parameter_34, + parameter_35, + parameter_36, + parameter_37, + parameter_38, + parameter_39, + parameter_40, + parameter_41, + parameter_42, + parameter_43, + parameter_44, + parameter_45, + parameter_46, + parameter_47, + parameter_48, + parameter_49, + parameter_50, + parameter_51, + parameter_52, + parameter_53, + parameter_54, + parameter_55, + parameter_56, + parameter_57, + parameter_58, + parameter_59, + parameter_60, + parameter_61, + parameter_62, + parameter_63, + parameter_64, + parameter_65, + parameter_66, + parameter_67, + parameter_68, + parameter_69, + parameter_70, + parameter_71, + parameter_72, + parameter_73, + parameter_74, + parameter_75, + parameter_76, + parameter_77, + parameter_78, + parameter_79, + parameter_80, + parameter_81, + parameter_82, + parameter_83, + parameter_84, + parameter_85, + parameter_86, + parameter_87, + parameter_88, + parameter_89, + parameter_90, + parameter_91, + parameter_92, + parameter_93, + parameter_94, + parameter_95, + parameter_96, + parameter_97, + parameter_98, + parameter_99, + parameter_100, + parameter_101, + parameter_102, + parameter_103, + parameter_104, + parameter_105, + parameter_106, + parameter_107, + parameter_108, + parameter_109, + parameter_110, + parameter_111, + parameter_112, + parameter_113, + parameter_114, + parameter_115, + parameter_116, + parameter_117, + parameter_118, + parameter_119, + parameter_120, + parameter_121, + parameter_122, + parameter_123, + parameter_124, + parameter_125, + parameter_126, + parameter_127, + parameter_128, + parameter_129, + parameter_130, + parameter_131, + parameter_132, + parameter_133, + parameter_134, + parameter_135, + parameter_136, + parameter_137, + parameter_138, + parameter_139, + parameter_140, + parameter_141, + parameter_142, + parameter_143, + parameter_144, + parameter_145, + parameter_146, + parameter_147, + parameter_148, + parameter_149, + parameter_150, + parameter_151, + parameter_152, + parameter_153, + parameter_154, + parameter_155, + parameter_156, + parameter_157, + parameter_158, + parameter_159, + parameter_160, + parameter_161, + parameter_162, + parameter_163, + parameter_164, + parameter_165, + parameter_166, + parameter_167, + parameter_168, + parameter_169, + parameter_170, + parameter_171, + parameter_172, + parameter_173, + parameter_174, + parameter_175, + parameter_176, + parameter_177, + parameter_178, + parameter_179, + parameter_180, + parameter_181, + parameter_182, + parameter_183, + parameter_184, + parameter_185, + parameter_186, + parameter_187, + parameter_188, + parameter_189, + parameter_190, + parameter_191, + parameter_192, + parameter_193, + parameter_194, + parameter_195, + parameter_196, + parameter_197, + parameter_198, + parameter_199, + parameter_200, + parameter_201, + parameter_202, + parameter_203, + parameter_204, + parameter_205, + parameter_206, + parameter_207, + parameter_208, + parameter_209, + parameter_210, + parameter_211, + parameter_212, + parameter_213, + parameter_214, + parameter_215, + parameter_216, + parameter_217, + parameter_218, + parameter_219, + parameter_220, + parameter_221, + parameter_222, + parameter_223, + parameter_224, + parameter_225, + parameter_226, + parameter_227, + parameter_228, + parameter_229, + parameter_230, + parameter_231, + parameter_232, + parameter_233, + parameter_234, + parameter_235, + parameter_236, + parameter_237, + parameter_238, + parameter_239, + parameter_240, + parameter_241, + parameter_242, + parameter_243, + parameter_244, + parameter_245, + parameter_246, + parameter_247, + parameter_248, + parameter_249, + parameter_250, + parameter_251, + parameter_252, + parameter_253, + parameter_254, + parameter_255, + parameter_256, + parameter_257, + parameter_258, + parameter_259, + parameter_260, + parameter_261, + parameter_262, + data_0, + ): + # pd_op.conv2d: (-1x24x-1x-1xf32) <- (-1x3x-1x-1xf32, 24x3x3x3xf32) + conv2d_0 = paddle._C_ops.conv2d( + data_0, parameter_262, [2, 2], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_262 + + # pd_op.batch_norm_: (-1x24x-1x-1xf32, 24xf32, 24xf32, 24xf32, 24xf32, -1xui8) <- (-1x24x-1x-1xf32, 24xf32, 24xf32, 24xf32, 24xf32) + ( + batch_norm__0, + batch_norm__1, + batch_norm__2, + batch_norm__3, + batch_norm__4, + batch_norm__5, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_0, + parameter_261, + parameter_260, + parameter_259, + parameter_258, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_0, parameter_258, parameter_259, parameter_260, parameter_261 + + # pd_op.relu: (-1x24x-1x-1xf32) <- (-1x24x-1x-1xf32) + relu_0 = paddle._C_ops.relu(batch_norm__0) + del batch_norm__0 + + # pd_op.depthwise_conv2d: (-1x24x-1x-1xf32) <- (-1x24x-1x-1xf32, 24x1x5x5xf32) + depthwise_conv2d_0 = paddle._C_ops.depthwise_conv2d( + relu_0, parameter_257, [1, 1], [2, 2], "EXPLICIT", 24, [1, 1], "NCHW" + ) + del parameter_257 + + # pd_op.batch_norm_: (-1x24x-1x-1xf32, 24xf32, 24xf32, 24xf32, 24xf32, -1xui8) <- (-1x24x-1x-1xf32, 24xf32, 24xf32, 24xf32, 24xf32) + ( + batch_norm__6, + batch_norm__7, + batch_norm__8, + batch_norm__9, + batch_norm__10, + batch_norm__11, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + depthwise_conv2d_0, + parameter_256, + parameter_255, + parameter_254, + parameter_253, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del ( + depthwise_conv2d_0, + parameter_253, + parameter_254, + parameter_255, + parameter_256, + ) + + # pd_op.relu: (-1x24x-1x-1xf32) <- (-1x24x-1x-1xf32) + relu_1 = paddle._C_ops.relu(batch_norm__6) + del batch_norm__6 + + # pd_op.conv2d: (-1x24x-1x-1xf32) <- (-1x24x-1x-1xf32, 24x24x1x1xf32) + conv2d_1 = paddle._C_ops.conv2d( + relu_1, parameter_252, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_252, relu_1 + + # pd_op.batch_norm_: (-1x24x-1x-1xf32, 24xf32, 24xf32, 24xf32, 24xf32, -1xui8) <- (-1x24x-1x-1xf32, 24xf32, 24xf32, 24xf32, 24xf32) + ( + batch_norm__12, + batch_norm__13, + batch_norm__14, + batch_norm__15, + batch_norm__16, + batch_norm__17, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_1, + parameter_251, + parameter_250, + parameter_249, + parameter_248, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_1, parameter_248, parameter_249, parameter_250, parameter_251 + + # pd_op.add: (-1x24x-1x-1xf32) <- (-1x24x-1x-1xf32, -1x24x-1x-1xf32) + add_0 = paddle._C_ops.add(relu_0, batch_norm__12) + del batch_norm__12, relu_0 + + # pd_op.relu: (-1x24x-1x-1xf32) <- (-1x24x-1x-1xf32) + relu_2 = paddle._C_ops.relu(add_0) + del add_0 + + # pd_op.depthwise_conv2d: (-1x24x-1x-1xf32) <- (-1x24x-1x-1xf32, 24x1x5x5xf32) + depthwise_conv2d_1 = paddle._C_ops.depthwise_conv2d( + relu_2, parameter_247, [1, 1], [2, 2], "EXPLICIT", 24, [1, 1], "NCHW" + ) + del parameter_247 + + # pd_op.batch_norm_: (-1x24x-1x-1xf32, 24xf32, 24xf32, 24xf32, 24xf32, -1xui8) <- (-1x24x-1x-1xf32, 24xf32, 24xf32, 24xf32, 24xf32) + ( + batch_norm__18, + batch_norm__19, + batch_norm__20, + batch_norm__21, + batch_norm__22, + batch_norm__23, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + depthwise_conv2d_1, + parameter_246, + parameter_245, + parameter_244, + parameter_243, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del ( + depthwise_conv2d_1, + parameter_243, + parameter_244, + parameter_245, + parameter_246, + ) + + # pd_op.relu: (-1x24x-1x-1xf32) <- (-1x24x-1x-1xf32) + relu_3 = paddle._C_ops.relu(batch_norm__18) + del batch_norm__18 + + # pd_op.conv2d: (-1x24x-1x-1xf32) <- (-1x24x-1x-1xf32, 24x24x1x1xf32) + conv2d_2 = paddle._C_ops.conv2d( + relu_3, parameter_242, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_242, relu_3 + + # pd_op.batch_norm_: (-1x24x-1x-1xf32, 24xf32, 24xf32, 24xf32, 24xf32, -1xui8) <- (-1x24x-1x-1xf32, 24xf32, 24xf32, 24xf32, 24xf32) + ( + batch_norm__24, + batch_norm__25, + batch_norm__26, + batch_norm__27, + batch_norm__28, + batch_norm__29, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_2, + parameter_241, + parameter_240, + parameter_239, + parameter_238, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_2, parameter_238, parameter_239, parameter_240, parameter_241 + + # pd_op.add: (-1x24x-1x-1xf32) <- (-1x24x-1x-1xf32, -1x24x-1x-1xf32) + add_1 = paddle._C_ops.add(relu_2, batch_norm__24) + del batch_norm__24, relu_2 + + # pd_op.relu: (-1x24x-1x-1xf32) <- (-1x24x-1x-1xf32) + relu_4 = paddle._C_ops.relu(add_1) + del add_1 + + # pd_op.depthwise_conv2d: (-1x24x-1x-1xf32) <- (-1x24x-1x-1xf32, 24x1x5x5xf32) + depthwise_conv2d_2 = paddle._C_ops.depthwise_conv2d( + relu_4, parameter_237, [2, 2], [2, 2], "EXPLICIT", 24, [1, 1], "NCHW" + ) + del parameter_237 + + # pd_op.batch_norm_: (-1x24x-1x-1xf32, 24xf32, 24xf32, 24xf32, 24xf32, -1xui8) <- (-1x24x-1x-1xf32, 24xf32, 24xf32, 24xf32, 24xf32) + ( + batch_norm__30, + batch_norm__31, + batch_norm__32, + batch_norm__33, + batch_norm__34, + batch_norm__35, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + depthwise_conv2d_2, + parameter_236, + parameter_235, + parameter_234, + parameter_233, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del ( + depthwise_conv2d_2, + parameter_233, + parameter_234, + parameter_235, + parameter_236, + ) + + # pd_op.relu: (-1x24x-1x-1xf32) <- (-1x24x-1x-1xf32) + relu_5 = paddle._C_ops.relu(batch_norm__30) + del batch_norm__30 + + # pd_op.conv2d: (-1x48x-1x-1xf32) <- (-1x24x-1x-1xf32, 48x24x1x1xf32) + conv2d_3 = paddle._C_ops.conv2d( + relu_5, parameter_232, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_232, relu_5 + + # pd_op.batch_norm_: (-1x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32, -1xui8) <- (-1x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32) + ( + batch_norm__36, + batch_norm__37, + batch_norm__38, + batch_norm__39, + batch_norm__40, + batch_norm__41, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_3, + parameter_231, + parameter_230, + parameter_229, + parameter_228, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_3, parameter_228, parameter_229, parameter_230, parameter_231 + + # pd_op.full_int_array: (2xi64) <- () + full_int_array_0 = [2, 2] + + # pd_op.pool2d: (-1x24x-1x-1xf32) <- (-1x24x-1x-1xf32, 2xi64) + pool2d_0 = paddle._C_ops.pool2d( + relu_4, + full_int_array_0, + [2, 2], + [0, 0], + True, + True, + "NCHW", + "max", + False, + False, + "EXPLICIT", + ) + del relu_4 + + # pd_op.conv2d: (-1x48x-1x-1xf32) <- (-1x24x-1x-1xf32, 48x24x1x1xf32) + conv2d_4 = paddle._C_ops.conv2d( + pool2d_0, parameter_227, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_227, pool2d_0 + + # pd_op.batch_norm_: (-1x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32, -1xui8) <- (-1x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32) + ( + batch_norm__42, + batch_norm__43, + batch_norm__44, + batch_norm__45, + batch_norm__46, + batch_norm__47, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_4, + parameter_226, + parameter_225, + parameter_224, + parameter_223, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_4, parameter_223, parameter_224, parameter_225, parameter_226 + + # pd_op.relu: (-1x48x-1x-1xf32) <- (-1x48x-1x-1xf32) + relu_6 = paddle._C_ops.relu(batch_norm__42) + del batch_norm__42 + + # pd_op.add: (-1x48x-1x-1xf32) <- (-1x48x-1x-1xf32, -1x48x-1x-1xf32) + add_2 = paddle._C_ops.add(relu_6, batch_norm__36) + del batch_norm__36, relu_6 + + # pd_op.relu: (-1x48x-1x-1xf32) <- (-1x48x-1x-1xf32) + relu_7 = paddle._C_ops.relu(add_2) + del add_2 + + # pd_op.depthwise_conv2d: (-1x48x-1x-1xf32) <- (-1x48x-1x-1xf32, 48x1x5x5xf32) + depthwise_conv2d_3 = paddle._C_ops.depthwise_conv2d( + relu_7, parameter_222, [1, 1], [2, 2], "EXPLICIT", 48, [1, 1], "NCHW" + ) + del parameter_222 + + # pd_op.batch_norm_: (-1x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32, -1xui8) <- (-1x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32) + ( + batch_norm__48, + batch_norm__49, + batch_norm__50, + batch_norm__51, + batch_norm__52, + batch_norm__53, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + depthwise_conv2d_3, + parameter_221, + parameter_220, + parameter_219, + parameter_218, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del ( + depthwise_conv2d_3, + parameter_218, + parameter_219, + parameter_220, + parameter_221, + ) + + # pd_op.relu: (-1x48x-1x-1xf32) <- (-1x48x-1x-1xf32) + relu_8 = paddle._C_ops.relu(batch_norm__48) + del batch_norm__48 + + # pd_op.conv2d: (-1x48x-1x-1xf32) <- (-1x48x-1x-1xf32, 48x48x1x1xf32) + conv2d_5 = paddle._C_ops.conv2d( + relu_8, parameter_217, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_217, relu_8 + + # pd_op.batch_norm_: (-1x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32, -1xui8) <- (-1x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32) + ( + batch_norm__54, + batch_norm__55, + batch_norm__56, + batch_norm__57, + batch_norm__58, + batch_norm__59, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_5, + parameter_216, + parameter_215, + parameter_214, + parameter_213, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_5, parameter_213, parameter_214, parameter_215, parameter_216 + + # pd_op.add: (-1x48x-1x-1xf32) <- (-1x48x-1x-1xf32, -1x48x-1x-1xf32) + add_3 = paddle._C_ops.add(relu_7, batch_norm__54) + del batch_norm__54, relu_7 + + # pd_op.relu: (-1x48x-1x-1xf32) <- (-1x48x-1x-1xf32) + relu_9 = paddle._C_ops.relu(add_3) + del add_3 + + # pd_op.depthwise_conv2d: (-1x48x-1x-1xf32) <- (-1x48x-1x-1xf32, 48x1x5x5xf32) + depthwise_conv2d_4 = paddle._C_ops.depthwise_conv2d( + relu_9, parameter_212, [1, 1], [2, 2], "EXPLICIT", 48, [1, 1], "NCHW" + ) + del parameter_212 + + # pd_op.batch_norm_: (-1x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32, -1xui8) <- (-1x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32) + ( + batch_norm__60, + batch_norm__61, + batch_norm__62, + batch_norm__63, + batch_norm__64, + batch_norm__65, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + depthwise_conv2d_4, + parameter_211, + parameter_210, + parameter_209, + parameter_208, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del ( + depthwise_conv2d_4, + parameter_208, + parameter_209, + parameter_210, + parameter_211, + ) + + # pd_op.relu: (-1x48x-1x-1xf32) <- (-1x48x-1x-1xf32) + relu_10 = paddle._C_ops.relu(batch_norm__60) + del batch_norm__60 + + # pd_op.conv2d: (-1x48x-1x-1xf32) <- (-1x48x-1x-1xf32, 48x48x1x1xf32) + conv2d_6 = paddle._C_ops.conv2d( + relu_10, parameter_207, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_207, relu_10 + + # pd_op.batch_norm_: (-1x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32, -1xui8) <- (-1x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32) + ( + batch_norm__66, + batch_norm__67, + batch_norm__68, + batch_norm__69, + batch_norm__70, + batch_norm__71, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_6, + parameter_206, + parameter_205, + parameter_204, + parameter_203, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_6, parameter_203, parameter_204, parameter_205, parameter_206 + + # pd_op.add: (-1x48x-1x-1xf32) <- (-1x48x-1x-1xf32, -1x48x-1x-1xf32) + add_4 = paddle._C_ops.add(relu_9, batch_norm__66) + del batch_norm__66, relu_9 + + # pd_op.relu: (-1x48x-1x-1xf32) <- (-1x48x-1x-1xf32) + relu_11 = paddle._C_ops.relu(add_4) + del add_4 + + # pd_op.depthwise_conv2d: (-1x48x-1x-1xf32) <- (-1x48x-1x-1xf32, 48x1x5x5xf32) + depthwise_conv2d_5 = paddle._C_ops.depthwise_conv2d( + relu_11, parameter_202, [2, 2], [2, 2], "EXPLICIT", 48, [1, 1], "NCHW" + ) + del parameter_202 + + # pd_op.batch_norm_: (-1x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32, -1xui8) <- (-1x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32) + ( + batch_norm__72, + batch_norm__73, + batch_norm__74, + batch_norm__75, + batch_norm__76, + batch_norm__77, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + depthwise_conv2d_5, + parameter_201, + parameter_200, + parameter_199, + parameter_198, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del ( + depthwise_conv2d_5, + parameter_198, + parameter_199, + parameter_200, + parameter_201, + ) + + # pd_op.relu: (-1x48x-1x-1xf32) <- (-1x48x-1x-1xf32) + relu_12 = paddle._C_ops.relu(batch_norm__72) + del batch_norm__72 + + # pd_op.conv2d: (-1x24x-1x-1xf32) <- (-1x48x-1x-1xf32, 24x48x1x1xf32) + conv2d_7 = paddle._C_ops.conv2d( + relu_12, parameter_197, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_197, relu_12 + + # pd_op.batch_norm_: (-1x24x-1x-1xf32, 24xf32, 24xf32, 24xf32, 24xf32, -1xui8) <- (-1x24x-1x-1xf32, 24xf32, 24xf32, 24xf32, 24xf32) + ( + batch_norm__78, + batch_norm__79, + batch_norm__80, + batch_norm__81, + batch_norm__82, + batch_norm__83, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_7, + parameter_196, + parameter_195, + parameter_194, + parameter_193, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_7, parameter_193, parameter_194, parameter_195, parameter_196 + + # pd_op.full: (1xf32) <- () + full_0 = paddle._C_ops.full( + [1], float("1"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.scale: (-1x24x-1x-1xf32) <- (-1x24x-1x-1xf32, 1xf32) + scale_0 = paddle._C_ops.scale(batch_norm__78, full_0, float("3"), True) + + # pd_op.relu6: (-1x24x-1x-1xf32) <- (-1x24x-1x-1xf32) + relu6_0 = paddle._C_ops.relu6(scale_0) + del scale_0 + + # pd_op.multiply: (-1x24x-1x-1xf32) <- (-1x24x-1x-1xf32, -1x24x-1x-1xf32) + multiply_0 = paddle._C_ops.multiply(batch_norm__78, relu6_0) + del batch_norm__78, relu6_0 + + # pd_op.full: (1xf32) <- () + full_1 = paddle._C_ops.full( + [1], float("0.166667"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.scale: (-1x24x-1x-1xf32) <- (-1x24x-1x-1xf32, 1xf32) + scale_1 = paddle._C_ops.scale(multiply_0, full_1, float("0"), True) + del multiply_0 + + # pd_op.depthwise_conv2d: (-1x24x-1x-1xf32) <- (-1x24x-1x-1xf32, 24x1x5x5xf32) + depthwise_conv2d_6 = paddle._C_ops.depthwise_conv2d( + scale_1, parameter_192, [1, 1], [2, 2], "EXPLICIT", 24, [1, 1], "NCHW" + ) + del parameter_192, scale_1 + + # pd_op.batch_norm_: (-1x24x-1x-1xf32, 24xf32, 24xf32, 24xf32, 24xf32, -1xui8) <- (-1x24x-1x-1xf32, 24xf32, 24xf32, 24xf32, 24xf32) + ( + batch_norm__84, + batch_norm__85, + batch_norm__86, + batch_norm__87, + batch_norm__88, + batch_norm__89, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + depthwise_conv2d_6, + parameter_191, + parameter_190, + parameter_189, + parameter_188, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del ( + depthwise_conv2d_6, + parameter_188, + parameter_189, + parameter_190, + parameter_191, + ) + + # pd_op.relu: (-1x24x-1x-1xf32) <- (-1x24x-1x-1xf32) + relu_13 = paddle._C_ops.relu(batch_norm__84) + del batch_norm__84 + + # pd_op.conv2d: (-1x96x-1x-1xf32) <- (-1x24x-1x-1xf32, 96x24x1x1xf32) + conv2d_8 = paddle._C_ops.conv2d( + relu_13, parameter_187, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_187, relu_13 + + # pd_op.batch_norm_: (-1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (-1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__90, + batch_norm__91, + batch_norm__92, + batch_norm__93, + batch_norm__94, + batch_norm__95, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_8, + parameter_186, + parameter_185, + parameter_184, + parameter_183, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_8, parameter_183, parameter_184, parameter_185, parameter_186 + + # pd_op.relu: (-1x96x-1x-1xf32) <- (-1x96x-1x-1xf32) + relu_14 = paddle._C_ops.relu(batch_norm__90) + del batch_norm__90 + + # pd_op.pool2d: (-1x48x-1x-1xf32) <- (-1x48x-1x-1xf32, 2xi64) + pool2d_1 = paddle._C_ops.pool2d( + relu_11, + full_int_array_0, + [2, 2], + [0, 0], + True, + True, + "NCHW", + "max", + False, + False, + "EXPLICIT", + ) + del relu_11 + + # pd_op.conv2d: (-1x96x-1x-1xf32) <- (-1x48x-1x-1xf32, 96x48x1x1xf32) + conv2d_9 = paddle._C_ops.conv2d( + pool2d_1, parameter_182, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_182, pool2d_1 + + # pd_op.batch_norm_: (-1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (-1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__96, + batch_norm__97, + batch_norm__98, + batch_norm__99, + batch_norm__100, + batch_norm__101, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_9, + parameter_181, + parameter_180, + parameter_179, + parameter_178, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_9, parameter_178, parameter_179, parameter_180, parameter_181 + + # pd_op.relu: (-1x96x-1x-1xf32) <- (-1x96x-1x-1xf32) + relu_15 = paddle._C_ops.relu(batch_norm__96) + del batch_norm__96 + + # pd_op.add: (-1x96x-1x-1xf32) <- (-1x96x-1x-1xf32, -1x96x-1x-1xf32) + add_5 = paddle._C_ops.add(relu_15, relu_14) + del relu_14, relu_15 + + # pd_op.relu: (-1x96x-1x-1xf32) <- (-1x96x-1x-1xf32) + relu_16 = paddle._C_ops.relu(add_5) + del add_5 + + # pd_op.depthwise_conv2d: (-1x96x-1x-1xf32) <- (-1x96x-1x-1xf32, 96x1x5x5xf32) + depthwise_conv2d_7 = paddle._C_ops.depthwise_conv2d( + relu_16, parameter_177, [1, 1], [2, 2], "EXPLICIT", 96, [1, 1], "NCHW" + ) + del parameter_177 + + # pd_op.batch_norm_: (-1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (-1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__102, + batch_norm__103, + batch_norm__104, + batch_norm__105, + batch_norm__106, + batch_norm__107, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + depthwise_conv2d_7, + parameter_176, + parameter_175, + parameter_174, + parameter_173, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del ( + depthwise_conv2d_7, + parameter_173, + parameter_174, + parameter_175, + parameter_176, + ) + + # pd_op.relu: (-1x96x-1x-1xf32) <- (-1x96x-1x-1xf32) + relu_17 = paddle._C_ops.relu(batch_norm__102) + del batch_norm__102 + + # pd_op.conv2d: (-1x24x-1x-1xf32) <- (-1x96x-1x-1xf32, 24x96x1x1xf32) + conv2d_10 = paddle._C_ops.conv2d( + relu_17, parameter_172, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_172, relu_17 + + # pd_op.batch_norm_: (-1x24x-1x-1xf32, 24xf32, 24xf32, 24xf32, 24xf32, -1xui8) <- (-1x24x-1x-1xf32, 24xf32, 24xf32, 24xf32, 24xf32) + ( + batch_norm__108, + batch_norm__109, + batch_norm__110, + batch_norm__111, + batch_norm__112, + batch_norm__113, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_10, + parameter_171, + parameter_170, + parameter_169, + parameter_168, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_10, parameter_168, parameter_169, parameter_170, parameter_171 + + # pd_op.scale: (-1x24x-1x-1xf32) <- (-1x24x-1x-1xf32, 1xf32) + scale_2 = paddle._C_ops.scale(batch_norm__108, full_0, float("3"), True) + + # pd_op.relu6: (-1x24x-1x-1xf32) <- (-1x24x-1x-1xf32) + relu6_1 = paddle._C_ops.relu6(scale_2) + del scale_2 + + # pd_op.multiply: (-1x24x-1x-1xf32) <- (-1x24x-1x-1xf32, -1x24x-1x-1xf32) + multiply_1 = paddle._C_ops.multiply(batch_norm__108, relu6_1) + del batch_norm__108, relu6_1 + + # pd_op.scale: (-1x24x-1x-1xf32) <- (-1x24x-1x-1xf32, 1xf32) + scale_3 = paddle._C_ops.scale(multiply_1, full_1, float("0"), True) + del multiply_1 + + # pd_op.depthwise_conv2d: (-1x24x-1x-1xf32) <- (-1x24x-1x-1xf32, 24x1x5x5xf32) + depthwise_conv2d_8 = paddle._C_ops.depthwise_conv2d( + scale_3, parameter_167, [1, 1], [2, 2], "EXPLICIT", 24, [1, 1], "NCHW" + ) + del parameter_167, scale_3 + + # pd_op.batch_norm_: (-1x24x-1x-1xf32, 24xf32, 24xf32, 24xf32, 24xf32, -1xui8) <- (-1x24x-1x-1xf32, 24xf32, 24xf32, 24xf32, 24xf32) + ( + batch_norm__114, + batch_norm__115, + batch_norm__116, + batch_norm__117, + batch_norm__118, + batch_norm__119, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + depthwise_conv2d_8, + parameter_166, + parameter_165, + parameter_164, + parameter_163, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del ( + depthwise_conv2d_8, + parameter_163, + parameter_164, + parameter_165, + parameter_166, + ) + + # pd_op.relu: (-1x24x-1x-1xf32) <- (-1x24x-1x-1xf32) + relu_18 = paddle._C_ops.relu(batch_norm__114) + del batch_norm__114 + + # pd_op.conv2d: (-1x96x-1x-1xf32) <- (-1x24x-1x-1xf32, 96x24x1x1xf32) + conv2d_11 = paddle._C_ops.conv2d( + relu_18, parameter_162, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_162, relu_18 + + # pd_op.batch_norm_: (-1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (-1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__120, + batch_norm__121, + batch_norm__122, + batch_norm__123, + batch_norm__124, + batch_norm__125, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_11, + parameter_161, + parameter_160, + parameter_159, + parameter_158, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_11, parameter_158, parameter_159, parameter_160, parameter_161 + + # pd_op.relu: (-1x96x-1x-1xf32) <- (-1x96x-1x-1xf32) + relu_19 = paddle._C_ops.relu(batch_norm__120) + del batch_norm__120 + + # pd_op.add: (-1x96x-1x-1xf32) <- (-1x96x-1x-1xf32, -1x96x-1x-1xf32) + add_6 = paddle._C_ops.add(relu_16, relu_19) + del relu_16, relu_19 + + # pd_op.relu: (-1x96x-1x-1xf32) <- (-1x96x-1x-1xf32) + relu_20 = paddle._C_ops.relu(add_6) + del add_6 + + # pd_op.depthwise_conv2d: (-1x96x-1x-1xf32) <- (-1x96x-1x-1xf32, 96x1x5x5xf32) + depthwise_conv2d_9 = paddle._C_ops.depthwise_conv2d( + relu_20, parameter_157, [1, 1], [2, 2], "EXPLICIT", 96, [1, 1], "NCHW" + ) + del parameter_157 + + # pd_op.batch_norm_: (-1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (-1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__126, + batch_norm__127, + batch_norm__128, + batch_norm__129, + batch_norm__130, + batch_norm__131, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + depthwise_conv2d_9, + parameter_156, + parameter_155, + parameter_154, + parameter_153, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del ( + depthwise_conv2d_9, + parameter_153, + parameter_154, + parameter_155, + parameter_156, + ) + + # pd_op.relu: (-1x96x-1x-1xf32) <- (-1x96x-1x-1xf32) + relu_21 = paddle._C_ops.relu(batch_norm__126) + del batch_norm__126 + + # pd_op.conv2d: (-1x24x-1x-1xf32) <- (-1x96x-1x-1xf32, 24x96x1x1xf32) + conv2d_12 = paddle._C_ops.conv2d( + relu_21, parameter_152, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_152, relu_21 + + # pd_op.batch_norm_: (-1x24x-1x-1xf32, 24xf32, 24xf32, 24xf32, 24xf32, -1xui8) <- (-1x24x-1x-1xf32, 24xf32, 24xf32, 24xf32, 24xf32) + ( + batch_norm__132, + batch_norm__133, + batch_norm__134, + batch_norm__135, + batch_norm__136, + batch_norm__137, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_12, + parameter_151, + parameter_150, + parameter_149, + parameter_148, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_12, parameter_148, parameter_149, parameter_150, parameter_151 + + # pd_op.scale: (-1x24x-1x-1xf32) <- (-1x24x-1x-1xf32, 1xf32) + scale_4 = paddle._C_ops.scale(batch_norm__132, full_0, float("3"), True) + + # pd_op.relu6: (-1x24x-1x-1xf32) <- (-1x24x-1x-1xf32) + relu6_2 = paddle._C_ops.relu6(scale_4) + del scale_4 + + # pd_op.multiply: (-1x24x-1x-1xf32) <- (-1x24x-1x-1xf32, -1x24x-1x-1xf32) + multiply_2 = paddle._C_ops.multiply(batch_norm__132, relu6_2) + del batch_norm__132, relu6_2 + + # pd_op.scale: (-1x24x-1x-1xf32) <- (-1x24x-1x-1xf32, 1xf32) + scale_5 = paddle._C_ops.scale(multiply_2, full_1, float("0"), True) + del multiply_2 + + # pd_op.depthwise_conv2d: (-1x24x-1x-1xf32) <- (-1x24x-1x-1xf32, 24x1x5x5xf32) + depthwise_conv2d_10 = paddle._C_ops.depthwise_conv2d( + scale_5, parameter_147, [1, 1], [2, 2], "EXPLICIT", 24, [1, 1], "NCHW" + ) + del parameter_147, scale_5 + + # pd_op.batch_norm_: (-1x24x-1x-1xf32, 24xf32, 24xf32, 24xf32, 24xf32, -1xui8) <- (-1x24x-1x-1xf32, 24xf32, 24xf32, 24xf32, 24xf32) + ( + batch_norm__138, + batch_norm__139, + batch_norm__140, + batch_norm__141, + batch_norm__142, + batch_norm__143, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + depthwise_conv2d_10, + parameter_146, + parameter_145, + parameter_144, + parameter_143, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del ( + depthwise_conv2d_10, + parameter_143, + parameter_144, + parameter_145, + parameter_146, + ) + + # pd_op.relu: (-1x24x-1x-1xf32) <- (-1x24x-1x-1xf32) + relu_22 = paddle._C_ops.relu(batch_norm__138) + del batch_norm__138 + + # pd_op.conv2d: (-1x96x-1x-1xf32) <- (-1x24x-1x-1xf32, 96x24x1x1xf32) + conv2d_13 = paddle._C_ops.conv2d( + relu_22, parameter_142, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_142, relu_22 + + # pd_op.batch_norm_: (-1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (-1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__144, + batch_norm__145, + batch_norm__146, + batch_norm__147, + batch_norm__148, + batch_norm__149, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_13, + parameter_141, + parameter_140, + parameter_139, + parameter_138, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_13, parameter_138, parameter_139, parameter_140, parameter_141 + + # pd_op.relu: (-1x96x-1x-1xf32) <- (-1x96x-1x-1xf32) + relu_23 = paddle._C_ops.relu(batch_norm__144) + del batch_norm__144 + + # pd_op.add: (-1x96x-1x-1xf32) <- (-1x96x-1x-1xf32, -1x96x-1x-1xf32) + add_7 = paddle._C_ops.add(relu_20, relu_23) + del relu_20, relu_23 + + # pd_op.relu: (-1x96x-1x-1xf32) <- (-1x96x-1x-1xf32) + relu_24 = paddle._C_ops.relu(add_7) + del add_7 + + # pd_op.depthwise_conv2d: (-1x96x-1x-1xf32) <- (-1x96x-1x-1xf32, 96x1x5x5xf32) + depthwise_conv2d_11 = paddle._C_ops.depthwise_conv2d( + relu_24, parameter_137, [2, 2], [2, 2], "EXPLICIT", 96, [1, 1], "NCHW" + ) + del parameter_137 + + # pd_op.batch_norm_: (-1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (-1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__150, + batch_norm__151, + batch_norm__152, + batch_norm__153, + batch_norm__154, + batch_norm__155, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + depthwise_conv2d_11, + parameter_136, + parameter_135, + parameter_134, + parameter_133, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del ( + depthwise_conv2d_11, + parameter_133, + parameter_134, + parameter_135, + parameter_136, + ) + + # pd_op.relu: (-1x96x-1x-1xf32) <- (-1x96x-1x-1xf32) + relu_25 = paddle._C_ops.relu(batch_norm__150) + del batch_norm__150 + + # pd_op.conv2d: (-1x24x-1x-1xf32) <- (-1x96x-1x-1xf32, 24x96x1x1xf32) + conv2d_14 = paddle._C_ops.conv2d( + relu_25, parameter_132, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_132, relu_25 + + # pd_op.batch_norm_: (-1x24x-1x-1xf32, 24xf32, 24xf32, 24xf32, 24xf32, -1xui8) <- (-1x24x-1x-1xf32, 24xf32, 24xf32, 24xf32, 24xf32) + ( + batch_norm__156, + batch_norm__157, + batch_norm__158, + batch_norm__159, + batch_norm__160, + batch_norm__161, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_14, + parameter_131, + parameter_130, + parameter_129, + parameter_128, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_14, parameter_128, parameter_129, parameter_130, parameter_131 + + # pd_op.scale: (-1x24x-1x-1xf32) <- (-1x24x-1x-1xf32, 1xf32) + scale_6 = paddle._C_ops.scale(batch_norm__156, full_0, float("3"), True) + + # pd_op.relu6: (-1x24x-1x-1xf32) <- (-1x24x-1x-1xf32) + relu6_3 = paddle._C_ops.relu6(scale_6) + del scale_6 + + # pd_op.multiply: (-1x24x-1x-1xf32) <- (-1x24x-1x-1xf32, -1x24x-1x-1xf32) + multiply_3 = paddle._C_ops.multiply(batch_norm__156, relu6_3) + del batch_norm__156, relu6_3 + + # pd_op.scale: (-1x24x-1x-1xf32) <- (-1x24x-1x-1xf32, 1xf32) + scale_7 = paddle._C_ops.scale(multiply_3, full_1, float("0"), True) + del multiply_3 + + # pd_op.depthwise_conv2d: (-1x24x-1x-1xf32) <- (-1x24x-1x-1xf32, 24x1x5x5xf32) + depthwise_conv2d_12 = paddle._C_ops.depthwise_conv2d( + scale_7, parameter_127, [1, 1], [2, 2], "EXPLICIT", 24, [1, 1], "NCHW" + ) + del parameter_127, scale_7 + + # pd_op.batch_norm_: (-1x24x-1x-1xf32, 24xf32, 24xf32, 24xf32, 24xf32, -1xui8) <- (-1x24x-1x-1xf32, 24xf32, 24xf32, 24xf32, 24xf32) + ( + batch_norm__162, + batch_norm__163, + batch_norm__164, + batch_norm__165, + batch_norm__166, + batch_norm__167, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + depthwise_conv2d_12, + parameter_126, + parameter_125, + parameter_124, + parameter_123, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del ( + depthwise_conv2d_12, + parameter_123, + parameter_124, + parameter_125, + parameter_126, + ) + + # pd_op.relu: (-1x24x-1x-1xf32) <- (-1x24x-1x-1xf32) + relu_26 = paddle._C_ops.relu(batch_norm__162) + del batch_norm__162 + + # pd_op.conv2d: (-1x96x-1x-1xf32) <- (-1x24x-1x-1xf32, 96x24x1x1xf32) + conv2d_15 = paddle._C_ops.conv2d( + relu_26, parameter_122, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_122, relu_26 + + # pd_op.batch_norm_: (-1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (-1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__168, + batch_norm__169, + batch_norm__170, + batch_norm__171, + batch_norm__172, + batch_norm__173, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_15, + parameter_121, + parameter_120, + parameter_119, + parameter_118, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_15, parameter_118, parameter_119, parameter_120, parameter_121 + + # pd_op.relu: (-1x96x-1x-1xf32) <- (-1x96x-1x-1xf32) + relu_27 = paddle._C_ops.relu(batch_norm__168) + del batch_norm__168 + + # pd_op.pool2d: (-1x96x-1x-1xf32) <- (-1x96x-1x-1xf32, 2xi64) + pool2d_2 = paddle._C_ops.pool2d( + relu_24, + full_int_array_0, + [2, 2], + [0, 0], + True, + True, + "NCHW", + "max", + False, + False, + "EXPLICIT", + ) + del full_int_array_0 + + # pd_op.conv2d: (-1x96x-1x-1xf32) <- (-1x96x-1x-1xf32, 96x96x1x1xf32) + conv2d_16 = paddle._C_ops.conv2d( + pool2d_2, parameter_117, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_117, pool2d_2 + + # pd_op.batch_norm_: (-1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (-1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__174, + batch_norm__175, + batch_norm__176, + batch_norm__177, + batch_norm__178, + batch_norm__179, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_16, + parameter_116, + parameter_115, + parameter_114, + parameter_113, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_16, parameter_113, parameter_114, parameter_115, parameter_116 + + # pd_op.relu: (-1x96x-1x-1xf32) <- (-1x96x-1x-1xf32) + relu_28 = paddle._C_ops.relu(batch_norm__174) + del batch_norm__174 + + # pd_op.add: (-1x96x-1x-1xf32) <- (-1x96x-1x-1xf32, -1x96x-1x-1xf32) + add_8 = paddle._C_ops.add(relu_28, relu_27) + del relu_27, relu_28 + + # pd_op.relu: (-1x96x-1x-1xf32) <- (-1x96x-1x-1xf32) + relu_29 = paddle._C_ops.relu(add_8) + del add_8 + + # pd_op.depthwise_conv2d: (-1x96x-1x-1xf32) <- (-1x96x-1x-1xf32, 96x1x5x5xf32) + depthwise_conv2d_13 = paddle._C_ops.depthwise_conv2d( + relu_29, parameter_112, [1, 1], [2, 2], "EXPLICIT", 96, [1, 1], "NCHW" + ) + del parameter_112 + + # pd_op.batch_norm_: (-1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (-1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__180, + batch_norm__181, + batch_norm__182, + batch_norm__183, + batch_norm__184, + batch_norm__185, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + depthwise_conv2d_13, + parameter_111, + parameter_110, + parameter_109, + parameter_108, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del ( + depthwise_conv2d_13, + parameter_108, + parameter_109, + parameter_110, + parameter_111, + ) + + # pd_op.relu: (-1x96x-1x-1xf32) <- (-1x96x-1x-1xf32) + relu_30 = paddle._C_ops.relu(batch_norm__180) + del batch_norm__180 + + # pd_op.conv2d: (-1x24x-1x-1xf32) <- (-1x96x-1x-1xf32, 24x96x1x1xf32) + conv2d_17 = paddle._C_ops.conv2d( + relu_30, parameter_107, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_107, relu_30 + + # pd_op.batch_norm_: (-1x24x-1x-1xf32, 24xf32, 24xf32, 24xf32, 24xf32, -1xui8) <- (-1x24x-1x-1xf32, 24xf32, 24xf32, 24xf32, 24xf32) + ( + batch_norm__186, + batch_norm__187, + batch_norm__188, + batch_norm__189, + batch_norm__190, + batch_norm__191, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_17, + parameter_106, + parameter_105, + parameter_104, + parameter_103, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_17, parameter_103, parameter_104, parameter_105, parameter_106 + + # pd_op.scale: (-1x24x-1x-1xf32) <- (-1x24x-1x-1xf32, 1xf32) + scale_8 = paddle._C_ops.scale(batch_norm__186, full_0, float("3"), True) + + # pd_op.relu6: (-1x24x-1x-1xf32) <- (-1x24x-1x-1xf32) + relu6_4 = paddle._C_ops.relu6(scale_8) + del scale_8 + + # pd_op.multiply: (-1x24x-1x-1xf32) <- (-1x24x-1x-1xf32, -1x24x-1x-1xf32) + multiply_4 = paddle._C_ops.multiply(batch_norm__186, relu6_4) + del batch_norm__186, relu6_4 + + # pd_op.scale: (-1x24x-1x-1xf32) <- (-1x24x-1x-1xf32, 1xf32) + scale_9 = paddle._C_ops.scale(multiply_4, full_1, float("0"), True) + del multiply_4 + + # pd_op.depthwise_conv2d: (-1x24x-1x-1xf32) <- (-1x24x-1x-1xf32, 24x1x5x5xf32) + depthwise_conv2d_14 = paddle._C_ops.depthwise_conv2d( + scale_9, parameter_102, [1, 1], [2, 2], "EXPLICIT", 24, [1, 1], "NCHW" + ) + del parameter_102, scale_9 + + # pd_op.batch_norm_: (-1x24x-1x-1xf32, 24xf32, 24xf32, 24xf32, 24xf32, -1xui8) <- (-1x24x-1x-1xf32, 24xf32, 24xf32, 24xf32, 24xf32) + ( + batch_norm__192, + batch_norm__193, + batch_norm__194, + batch_norm__195, + batch_norm__196, + batch_norm__197, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + depthwise_conv2d_14, + parameter_101, + parameter_100, + parameter_99, + parameter_98, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del ( + depthwise_conv2d_14, + parameter_100, + parameter_101, + parameter_98, + parameter_99, + ) + + # pd_op.relu: (-1x24x-1x-1xf32) <- (-1x24x-1x-1xf32) + relu_31 = paddle._C_ops.relu(batch_norm__192) + del batch_norm__192 + + # pd_op.conv2d: (-1x96x-1x-1xf32) <- (-1x24x-1x-1xf32, 96x24x1x1xf32) + conv2d_18 = paddle._C_ops.conv2d( + relu_31, parameter_97, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_97, relu_31 + + # pd_op.batch_norm_: (-1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (-1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__198, + batch_norm__199, + batch_norm__200, + batch_norm__201, + batch_norm__202, + batch_norm__203, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_18, + parameter_96, + parameter_95, + parameter_94, + parameter_93, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_18, parameter_93, parameter_94, parameter_95, parameter_96 + + # pd_op.relu: (-1x96x-1x-1xf32) <- (-1x96x-1x-1xf32) + relu_32 = paddle._C_ops.relu(batch_norm__198) + del batch_norm__198 + + # pd_op.add: (-1x96x-1x-1xf32) <- (-1x96x-1x-1xf32, -1x96x-1x-1xf32) + add_9 = paddle._C_ops.add(relu_29, relu_32) + del relu_29, relu_32 + + # pd_op.relu: (-1x96x-1x-1xf32) <- (-1x96x-1x-1xf32) + relu_33 = paddle._C_ops.relu(add_9) + del add_9 + + # pd_op.depthwise_conv2d: (-1x96x-1x-1xf32) <- (-1x96x-1x-1xf32, 96x1x5x5xf32) + depthwise_conv2d_15 = paddle._C_ops.depthwise_conv2d( + relu_33, parameter_92, [1, 1], [2, 2], "EXPLICIT", 96, [1, 1], "NCHW" + ) + del parameter_92 + + # pd_op.batch_norm_: (-1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (-1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__204, + batch_norm__205, + batch_norm__206, + batch_norm__207, + batch_norm__208, + batch_norm__209, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + depthwise_conv2d_15, + parameter_91, + parameter_90, + parameter_89, + parameter_88, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del depthwise_conv2d_15, parameter_88, parameter_89, parameter_90, parameter_91 + + # pd_op.relu: (-1x96x-1x-1xf32) <- (-1x96x-1x-1xf32) + relu_34 = paddle._C_ops.relu(batch_norm__204) + del batch_norm__204 + + # pd_op.conv2d: (-1x24x-1x-1xf32) <- (-1x96x-1x-1xf32, 24x96x1x1xf32) + conv2d_19 = paddle._C_ops.conv2d( + relu_34, parameter_87, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_87, relu_34 + + # pd_op.batch_norm_: (-1x24x-1x-1xf32, 24xf32, 24xf32, 24xf32, 24xf32, -1xui8) <- (-1x24x-1x-1xf32, 24xf32, 24xf32, 24xf32, 24xf32) + ( + batch_norm__210, + batch_norm__211, + batch_norm__212, + batch_norm__213, + batch_norm__214, + batch_norm__215, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_19, + parameter_86, + parameter_85, + parameter_84, + parameter_83, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_19, parameter_83, parameter_84, parameter_85, parameter_86 + + # pd_op.scale: (-1x24x-1x-1xf32) <- (-1x24x-1x-1xf32, 1xf32) + scale_10 = paddle._C_ops.scale(batch_norm__210, full_0, float("3"), True) + del full_0 + + # pd_op.relu6: (-1x24x-1x-1xf32) <- (-1x24x-1x-1xf32) + relu6_5 = paddle._C_ops.relu6(scale_10) + del scale_10 + + # pd_op.multiply: (-1x24x-1x-1xf32) <- (-1x24x-1x-1xf32, -1x24x-1x-1xf32) + multiply_5 = paddle._C_ops.multiply(batch_norm__210, relu6_5) + del batch_norm__210, relu6_5 + + # pd_op.scale: (-1x24x-1x-1xf32) <- (-1x24x-1x-1xf32, 1xf32) + scale_11 = paddle._C_ops.scale(multiply_5, full_1, float("0"), True) + del full_1, multiply_5 + + # pd_op.depthwise_conv2d: (-1x24x-1x-1xf32) <- (-1x24x-1x-1xf32, 24x1x5x5xf32) + depthwise_conv2d_16 = paddle._C_ops.depthwise_conv2d( + scale_11, parameter_82, [1, 1], [2, 2], "EXPLICIT", 24, [1, 1], "NCHW" + ) + del parameter_82, scale_11 + + # pd_op.batch_norm_: (-1x24x-1x-1xf32, 24xf32, 24xf32, 24xf32, 24xf32, -1xui8) <- (-1x24x-1x-1xf32, 24xf32, 24xf32, 24xf32, 24xf32) + ( + batch_norm__216, + batch_norm__217, + batch_norm__218, + batch_norm__219, + batch_norm__220, + batch_norm__221, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + depthwise_conv2d_16, + parameter_81, + parameter_80, + parameter_79, + parameter_78, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del depthwise_conv2d_16, parameter_78, parameter_79, parameter_80, parameter_81 + + # pd_op.relu: (-1x24x-1x-1xf32) <- (-1x24x-1x-1xf32) + relu_35 = paddle._C_ops.relu(batch_norm__216) + del batch_norm__216 + + # pd_op.conv2d: (-1x96x-1x-1xf32) <- (-1x24x-1x-1xf32, 96x24x1x1xf32) + conv2d_20 = paddle._C_ops.conv2d( + relu_35, parameter_77, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_77, relu_35 + + # pd_op.batch_norm_: (-1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (-1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__222, + batch_norm__223, + batch_norm__224, + batch_norm__225, + batch_norm__226, + batch_norm__227, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_20, + parameter_76, + parameter_75, + parameter_74, + parameter_73, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_20, parameter_73, parameter_74, parameter_75, parameter_76 + + # pd_op.relu: (-1x96x-1x-1xf32) <- (-1x96x-1x-1xf32) + relu_36 = paddle._C_ops.relu(batch_norm__222) + del batch_norm__222 + + # pd_op.add: (-1x96x-1x-1xf32) <- (-1x96x-1x-1xf32, -1x96x-1x-1xf32) + add_10 = paddle._C_ops.add(relu_33, relu_36) + del relu_33, relu_36 + + # pd_op.relu: (-1x96x-1x-1xf32) <- (-1x96x-1x-1xf32) + relu_37 = paddle._C_ops.relu(add_10) + del add_10 + + # pd_op.conv2d: (-1x48x-1x-1xf32) <- (-1x96x-1x-1xf32, 48x96x1x1xf32) + conv2d_21 = paddle._C_ops.conv2d( + relu_24, parameter_72, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_72, relu_24 + + # pd_op.batch_norm_: (-1x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32, -1xui8) <- (-1x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32) + ( + batch_norm__228, + batch_norm__229, + batch_norm__230, + batch_norm__231, + batch_norm__232, + batch_norm__233, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_21, + parameter_71, + parameter_70, + parameter_69, + parameter_68, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_21, parameter_68, parameter_69, parameter_70, parameter_71 + + # pd_op.leaky_relu: (-1x48x-1x-1xf32) <- (-1x48x-1x-1xf32) + leaky_relu_0 = paddle._C_ops.leaky_relu(batch_norm__228, float("0.01")) + del batch_norm__228 + + # pd_op.conv2d: (-1x48x-1x-1xf32) <- (-1x96x-1x-1xf32, 48x96x1x1xf32) + conv2d_22 = paddle._C_ops.conv2d( + relu_37, parameter_67, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_67, relu_37 + + # pd_op.batch_norm_: (-1x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32, -1xui8) <- (-1x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32) + ( + batch_norm__234, + batch_norm__235, + batch_norm__236, + batch_norm__237, + batch_norm__238, + batch_norm__239, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_22, + parameter_66, + parameter_65, + parameter_64, + parameter_63, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_22, parameter_63, parameter_64, parameter_65, parameter_66 + + # pd_op.leaky_relu: (-1x48x-1x-1xf32) <- (-1x48x-1x-1xf32) + leaky_relu_1 = paddle._C_ops.leaky_relu(batch_norm__234, float("0.01")) + del batch_norm__234 + + # pd_op.shape64: (4xi64) <- (-1x48x-1x-1xf32) + shape64_0 = paddle._C_ops.shape64(leaky_relu_0) + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_1 = [0] + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_2 = [1] + + # pd_op.slice: (xi64) <- (4xi64, 1xi64, 1xi64) + slice_0 = paddle._C_ops.slice( + shape64_0, [0], full_int_array_1, full_int_array_2, [1], [0] + ) + del full_int_array_1, full_int_array_2, shape64_0 + + # pd_op.shape64: (4xi64) <- (-1x48x-1x-1xf32) + shape64_1 = paddle._C_ops.shape64(leaky_relu_0) + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_3 = [2] + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_4 = [3] + + # pd_op.slice: (xi64) <- (4xi64, 1xi64, 1xi64) + slice_1 = paddle._C_ops.slice( + shape64_1, [0], full_int_array_3, full_int_array_4, [1], [0] + ) + del full_int_array_3, shape64_1 + + # pd_op.shape64: (4xi64) <- (-1x48x-1x-1xf32) + shape64_2 = paddle._C_ops.shape64(leaky_relu_0) + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_5 = [4] + + # pd_op.slice: (xi64) <- (4xi64, 1xi64, 1xi64) + slice_2 = paddle._C_ops.slice( + shape64_2, [0], full_int_array_4, full_int_array_5, [1], [0] + ) + del full_int_array_4, full_int_array_5, shape64_2 + + # builtin.combine: ([xi64, xi64]) <- (xi64, xi64) + combine_0 = [slice_1, slice_2] + del slice_1, slice_2 + + # pd_op.nearest_interp: (-1x48x-1x-1xf32) <- (-1x48x-1x-1xf32, None, [xi64, xi64], None) + nearest_interp_0 = paddle._C_ops.nearest_interp( + leaky_relu_1, + None, + combine_0, + None, + "NCHW", + -1, + -1, + -1, + [], + "nearest", + False, + 0, + ) + del combine_0 + + # pd_op.add: (-1x48x-1x-1xf32) <- (-1x48x-1x-1xf32, -1x48x-1x-1xf32) + add_11 = paddle._C_ops.add(leaky_relu_0, nearest_interp_0) + del leaky_relu_0, nearest_interp_0 + + # pd_op.conv2d: (-1x48x-1x-1xf32) <- (-1x48x-1x-1xf32, 48x48x3x3xf32) + conv2d_23 = paddle._C_ops.conv2d( + add_11, parameter_62, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del add_11, parameter_62 + + # pd_op.batch_norm_: (-1x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32, -1xui8) <- (-1x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32) + ( + batch_norm__240, + batch_norm__241, + batch_norm__242, + batch_norm__243, + batch_norm__244, + batch_norm__245, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_23, + parameter_61, + parameter_60, + parameter_59, + parameter_58, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_23, parameter_58, parameter_59, parameter_60, parameter_61 + + # pd_op.leaky_relu: (-1x48x-1x-1xf32) <- (-1x48x-1x-1xf32) + leaky_relu_2 = paddle._C_ops.leaky_relu(batch_norm__240, float("0.01")) + del batch_norm__240 + + # pd_op.conv2d: (-1x24x-1x-1xf32) <- (-1x48x-1x-1xf32, 24x48x3x3xf32) + conv2d_24 = paddle._C_ops.conv2d( + leaky_relu_2, parameter_57, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del leaky_relu_2, parameter_57 + + # pd_op.batch_norm_: (-1x24x-1x-1xf32, 24xf32, 24xf32, 24xf32, 24xf32, -1xui8) <- (-1x24x-1x-1xf32, 24xf32, 24xf32, 24xf32, 24xf32) + ( + batch_norm__246, + batch_norm__247, + batch_norm__248, + batch_norm__249, + batch_norm__250, + batch_norm__251, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_24, + parameter_56, + parameter_55, + parameter_54, + parameter_53, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_24, parameter_53, parameter_54, parameter_55, parameter_56 + + # pd_op.conv2d: (-1x12x-1x-1xf32) <- (-1x24x-1x-1xf32, 12x24x3x3xf32) + conv2d_25 = paddle._C_ops.conv2d( + batch_norm__246, parameter_52, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_52 + + # pd_op.batch_norm_: (-1x12x-1x-1xf32, 12xf32, 12xf32, 12xf32, 12xf32, -1xui8) <- (-1x12x-1x-1xf32, 12xf32, 12xf32, 12xf32, 12xf32) + ( + batch_norm__252, + batch_norm__253, + batch_norm__254, + batch_norm__255, + batch_norm__256, + batch_norm__257, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_25, + parameter_51, + parameter_50, + parameter_49, + parameter_48, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_25, parameter_48, parameter_49, parameter_50, parameter_51 + + # pd_op.leaky_relu: (-1x12x-1x-1xf32) <- (-1x12x-1x-1xf32) + leaky_relu_3 = paddle._C_ops.leaky_relu(batch_norm__252, float("0.01")) + del batch_norm__252 + + # pd_op.conv2d: (-1x12x-1x-1xf32) <- (-1x12x-1x-1xf32, 12x12x3x3xf32) + conv2d_26 = paddle._C_ops.conv2d( + leaky_relu_3, parameter_47, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del leaky_relu_3, parameter_47 + + # pd_op.batch_norm_: (-1x12x-1x-1xf32, 12xf32, 12xf32, 12xf32, 12xf32, -1xui8) <- (-1x12x-1x-1xf32, 12xf32, 12xf32, 12xf32, 12xf32) + ( + batch_norm__258, + batch_norm__259, + batch_norm__260, + batch_norm__261, + batch_norm__262, + batch_norm__263, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_26, + parameter_46, + parameter_45, + parameter_44, + parameter_43, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_26, parameter_43, parameter_44, parameter_45, parameter_46 + + # pd_op.conv2d: (-1x12x-1x-1xf32) <- (-1x12x-1x-1xf32, 12x12x3x3xf32) + conv2d_27 = paddle._C_ops.conv2d( + batch_norm__258, parameter_42, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_42 + + # pd_op.batch_norm_: (-1x12x-1x-1xf32, 12xf32, 12xf32, 12xf32, 12xf32, -1xui8) <- (-1x12x-1x-1xf32, 12xf32, 12xf32, 12xf32, 12xf32) + ( + batch_norm__264, + batch_norm__265, + batch_norm__266, + batch_norm__267, + batch_norm__268, + batch_norm__269, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_27, + parameter_41, + parameter_40, + parameter_39, + parameter_38, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_27, parameter_38, parameter_39, parameter_40, parameter_41 + + # pd_op.leaky_relu: (-1x12x-1x-1xf32) <- (-1x12x-1x-1xf32) + leaky_relu_4 = paddle._C_ops.leaky_relu(batch_norm__264, float("0.01")) + del batch_norm__264 + + # pd_op.conv2d: (-1x12x-1x-1xf32) <- (-1x12x-1x-1xf32, 12x12x3x3xf32) + conv2d_28 = paddle._C_ops.conv2d( + leaky_relu_4, parameter_37, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del leaky_relu_4, parameter_37 + + # pd_op.batch_norm_: (-1x12x-1x-1xf32, 12xf32, 12xf32, 12xf32, 12xf32, -1xui8) <- (-1x12x-1x-1xf32, 12xf32, 12xf32, 12xf32, 12xf32) + ( + batch_norm__270, + batch_norm__271, + batch_norm__272, + batch_norm__273, + batch_norm__274, + batch_norm__275, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_28, + parameter_36, + parameter_35, + parameter_34, + parameter_33, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_28, parameter_33, parameter_34, parameter_35, parameter_36 + + # pd_op.full: (1xi32) <- () + full_2 = paddle._C_ops.full( + [1], float("1"), paddle.int32, paddle.core.CPUPlace() + ) + + # builtin.combine: ([-1x24x-1x-1xf32, -1x12x-1x-1xf32, -1x12x-1x-1xf32]) <- (-1x24x-1x-1xf32, -1x12x-1x-1xf32, -1x12x-1x-1xf32) + combine_1 = [batch_norm__246, batch_norm__258, batch_norm__270] + del batch_norm__246, batch_norm__258, batch_norm__270 + + # pd_op.concat: (-1x48x-1x-1xf32) <- ([-1x24x-1x-1xf32, -1x12x-1x-1xf32, -1x12x-1x-1xf32], 1xi32) + concat_0 = paddle._C_ops.concat(combine_1, full_2) + del combine_1 + + # pd_op.relu: (-1x48x-1x-1xf32) <- (-1x48x-1x-1xf32) + relu_38 = paddle._C_ops.relu(concat_0) + del concat_0 + + # pd_op.conv2d: (-1x24x-1x-1xf32) <- (-1x48x-1x-1xf32, 24x48x3x3xf32) + conv2d_29 = paddle._C_ops.conv2d( + leaky_relu_1, parameter_32, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del leaky_relu_1, parameter_32 + + # pd_op.batch_norm_: (-1x24x-1x-1xf32, 24xf32, 24xf32, 24xf32, 24xf32, -1xui8) <- (-1x24x-1x-1xf32, 24xf32, 24xf32, 24xf32, 24xf32) + ( + batch_norm__276, + batch_norm__277, + batch_norm__278, + batch_norm__279, + batch_norm__280, + batch_norm__281, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_29, + parameter_31, + parameter_30, + parameter_29, + parameter_28, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_29, parameter_28, parameter_29, parameter_30, parameter_31 + + # pd_op.conv2d: (-1x12x-1x-1xf32) <- (-1x24x-1x-1xf32, 12x24x3x3xf32) + conv2d_30 = paddle._C_ops.conv2d( + batch_norm__276, parameter_27, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_27 + + # pd_op.batch_norm_: (-1x12x-1x-1xf32, 12xf32, 12xf32, 12xf32, 12xf32, -1xui8) <- (-1x12x-1x-1xf32, 12xf32, 12xf32, 12xf32, 12xf32) + ( + batch_norm__282, + batch_norm__283, + batch_norm__284, + batch_norm__285, + batch_norm__286, + batch_norm__287, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_30, + parameter_26, + parameter_25, + parameter_24, + parameter_23, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_30, parameter_23, parameter_24, parameter_25, parameter_26 + + # pd_op.leaky_relu: (-1x12x-1x-1xf32) <- (-1x12x-1x-1xf32) + leaky_relu_5 = paddle._C_ops.leaky_relu(batch_norm__282, float("0.01")) + del batch_norm__282 + + # pd_op.conv2d: (-1x12x-1x-1xf32) <- (-1x12x-1x-1xf32, 12x12x3x3xf32) + conv2d_31 = paddle._C_ops.conv2d( + leaky_relu_5, parameter_22, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del leaky_relu_5, parameter_22 + + # pd_op.batch_norm_: (-1x12x-1x-1xf32, 12xf32, 12xf32, 12xf32, 12xf32, -1xui8) <- (-1x12x-1x-1xf32, 12xf32, 12xf32, 12xf32, 12xf32) + ( + batch_norm__288, + batch_norm__289, + batch_norm__290, + batch_norm__291, + batch_norm__292, + batch_norm__293, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_31, + parameter_21, + parameter_20, + parameter_19, + parameter_18, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_31, parameter_18, parameter_19, parameter_20, parameter_21 + + # pd_op.conv2d: (-1x12x-1x-1xf32) <- (-1x12x-1x-1xf32, 12x12x3x3xf32) + conv2d_32 = paddle._C_ops.conv2d( + batch_norm__288, parameter_17, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_17 + + # pd_op.batch_norm_: (-1x12x-1x-1xf32, 12xf32, 12xf32, 12xf32, 12xf32, -1xui8) <- (-1x12x-1x-1xf32, 12xf32, 12xf32, 12xf32, 12xf32) + ( + batch_norm__294, + batch_norm__295, + batch_norm__296, + batch_norm__297, + batch_norm__298, + batch_norm__299, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_32, + parameter_16, + parameter_15, + parameter_14, + parameter_13, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_32, parameter_13, parameter_14, parameter_15, parameter_16 + + # pd_op.leaky_relu: (-1x12x-1x-1xf32) <- (-1x12x-1x-1xf32) + leaky_relu_6 = paddle._C_ops.leaky_relu(batch_norm__294, float("0.01")) + del batch_norm__294 + + # pd_op.conv2d: (-1x12x-1x-1xf32) <- (-1x12x-1x-1xf32, 12x12x3x3xf32) + conv2d_33 = paddle._C_ops.conv2d( + leaky_relu_6, parameter_12, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del leaky_relu_6, parameter_12 + + # pd_op.batch_norm_: (-1x12x-1x-1xf32, 12xf32, 12xf32, 12xf32, 12xf32, -1xui8) <- (-1x12x-1x-1xf32, 12xf32, 12xf32, 12xf32, 12xf32) + ( + batch_norm__300, + batch_norm__301, + batch_norm__302, + batch_norm__303, + batch_norm__304, + batch_norm__305, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_33, + parameter_11, + parameter_10, + parameter_9, + parameter_8, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_33, parameter_10, parameter_11, parameter_8, parameter_9 + + # builtin.combine: ([-1x24x-1x-1xf32, -1x12x-1x-1xf32, -1x12x-1x-1xf32]) <- (-1x24x-1x-1xf32, -1x12x-1x-1xf32, -1x12x-1x-1xf32) + combine_2 = [batch_norm__276, batch_norm__288, batch_norm__300] + del batch_norm__276, batch_norm__288, batch_norm__300 + + # pd_op.concat: (-1x48x-1x-1xf32) <- ([-1x24x-1x-1xf32, -1x12x-1x-1xf32, -1x12x-1x-1xf32], 1xi32) + concat_1 = paddle._C_ops.concat(combine_2, full_2) + del combine_2, full_2 + + # pd_op.relu: (-1x48x-1x-1xf32) <- (-1x48x-1x-1xf32) + relu_39 = paddle._C_ops.relu(concat_1) + del concat_1 + + # pd_op.conv2d: (-1x8x-1x-1xf32) <- (-1x48x-1x-1xf32, 8x48x3x3xf32) + conv2d_34 = paddle._C_ops.conv2d( + relu_38, parameter_7, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_7 + + # pd_op.full_int_array: (4xi64) <- () + full_int_array_6 = [1, -1, 1, 1] + + # pd_op.reshape: (1x8x1x1xf32) <- (8xf32, 4xi64) + reshape_6 = paddle._C_ops.reshape(parameter_6, full_int_array_6) + del parameter_6 + + # pd_op.add: (-1x8x-1x-1xf32) <- (-1x8x-1x-1xf32, 1x8x1x1xf32) + add_12 = paddle._C_ops.add(conv2d_34, reshape_6) + del conv2d_34, reshape_6 + + # pd_op.transpose: (-1x-1x-1x8xf32) <- (-1x8x-1x-1xf32) + transpose_0 = paddle._C_ops.transpose(add_12, [0, 2, 3, 1]) + del add_12 + + # pd_op.full_int_array: (3xi64) <- () + full_int_array_7 = [0, -1, 4] + + # pd_op.reshape: (-1x-1x4xf32) <- (-1x-1x-1x8xf32, 3xi64) + reshape_0 = paddle._C_ops.reshape(transpose_0, full_int_array_7) + del transpose_0 + + # pd_op.conv2d: (-1x4x-1x-1xf32) <- (-1x48x-1x-1xf32, 4x48x3x3xf32) + conv2d_35 = paddle._C_ops.conv2d( + relu_38, parameter_5, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_5 + + # pd_op.reshape: (1x4x1x1xf32) <- (4xf32, 4xi64) + reshape_7 = paddle._C_ops.reshape(parameter_4, full_int_array_6) + del parameter_4 + + # pd_op.add: (-1x4x-1x-1xf32) <- (-1x4x-1x-1xf32, 1x4x1x1xf32) + add_13 = paddle._C_ops.add(conv2d_35, reshape_7) + del conv2d_35, reshape_7 + + # pd_op.transpose: (-1x-1x-1x4xf32) <- (-1x4x-1x-1xf32) + transpose_1 = paddle._C_ops.transpose(add_13, [0, 2, 3, 1]) + del add_13 + + # pd_op.full_int_array: (3xi64) <- () + full_int_array_8 = [0, -1, 2] + + # pd_op.reshape: (-1x-1x2xf32) <- (-1x-1x-1x4xf32, 3xi64) + reshape_2 = paddle._C_ops.reshape(transpose_1, full_int_array_8) + del transpose_1 + + # pd_op.conv2d: (-1x24x-1x-1xf32) <- (-1x48x-1x-1xf32, 24x48x3x3xf32) + conv2d_36 = paddle._C_ops.conv2d( + relu_39, parameter_3, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_3 + + # pd_op.reshape: (1x24x1x1xf32) <- (24xf32, 4xi64) + reshape_8 = paddle._C_ops.reshape(parameter_2, full_int_array_6) + del parameter_2 + + # pd_op.add: (-1x24x-1x-1xf32) <- (-1x24x-1x-1xf32, 1x24x1x1xf32) + add_14 = paddle._C_ops.add(conv2d_36, reshape_8) + del conv2d_36, reshape_8 + + # pd_op.transpose: (-1x-1x-1x24xf32) <- (-1x24x-1x-1xf32) + transpose_2 = paddle._C_ops.transpose(add_14, [0, 2, 3, 1]) + del add_14 + + # pd_op.reshape: (-1x-1x4xf32) <- (-1x-1x-1x24xf32, 3xi64) + reshape_1 = paddle._C_ops.reshape(transpose_2, full_int_array_7) + del full_int_array_7, transpose_2 + + # pd_op.conv2d: (-1x12x-1x-1xf32) <- (-1x48x-1x-1xf32, 12x48x3x3xf32) + conv2d_37 = paddle._C_ops.conv2d( + relu_39, parameter_1, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_1 + + # pd_op.reshape: (1x12x1x1xf32) <- (12xf32, 4xi64) + reshape_9 = paddle._C_ops.reshape(parameter_0, full_int_array_6) + del full_int_array_6, parameter_0 + + # pd_op.add: (-1x12x-1x-1xf32) <- (-1x12x-1x-1xf32, 1x12x1x1xf32) + add_15 = paddle._C_ops.add(conv2d_37, reshape_9) + del conv2d_37, reshape_9 + + # pd_op.transpose: (-1x-1x-1x12xf32) <- (-1x12x-1x-1xf32) + transpose_3 = paddle._C_ops.transpose(add_15, [0, 2, 3, 1]) + del add_15 + + # pd_op.reshape: (-1x-1x2xf32) <- (-1x-1x-1x12xf32, 3xi64) + reshape_3 = paddle._C_ops.reshape(transpose_3, full_int_array_8) + del full_int_array_8, transpose_3 + + # pd_op.prior_box: (-1x-1x2x4xf32, -1x-1x2x4xf32) <- (-1x48x-1x-1xf32, -1x3x-1x-1xf32) + prior_box_0, prior_box_1 = (lambda x, f: f(x))( + paddle._C_ops.prior_box( + relu_38, + data_0, + [float("16"), float("24")], + [], + [float("1")], + [float("0.1"), float("0.1"), float("0.2"), float("0.2")], + False, + False, + float("8"), + float("8"), + float("0.5"), + False, + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None), + ) + del relu_38 + + # pd_op.full_int_array: (2xi64) <- () + full_int_array_9 = [-1, 4] + + # pd_op.reshape: (-1x4xf32) <- (-1x-1x2x4xf32, 2xi64) + reshape_4 = paddle._C_ops.reshape(prior_box_0, full_int_array_9) + del prior_box_0 + + # pd_op.prior_box: (-1x-1x6x4xf32, -1x-1x6x4xf32) <- (-1x48x-1x-1xf32, -1x3x-1x-1xf32) + prior_box_2, prior_box_3 = (lambda x, f: f(x))( + paddle._C_ops.prior_box( + relu_39, + data_0, + [ + float("32"), + float("48"), + float("64"), + float("80"), + float("96"), + float("128"), + ], + [], + [float("1")], + [float("0.1"), float("0.1"), float("0.2"), float("0.2")], + False, + False, + float("16"), + float("16"), + float("0.5"), + False, + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None), + ) + del data_0, relu_39 + + # pd_op.reshape: (-1x4xf32) <- (-1x-1x6x4xf32, 2xi64) + reshape_5 = paddle._C_ops.reshape(prior_box_2, full_int_array_9) + del full_int_array_9, prior_box_2 + + return reshape_0, reshape_1, reshape_2, reshape_3, reshape_4, reshape_5 diff --git a/paddle_samples/PaddleX/BlazeFace-FPN-SSH/subgraph_0/weight_meta.py b/paddle_samples/PaddleX/BlazeFace-FPN-SSH/subgraph_0/weight_meta.py new file mode 100644 index 000000000..8073149c6 --- /dev/null +++ b/paddle_samples/PaddleX/BlazeFace-FPN-SSH/subgraph_0/weight_meta.py @@ -0,0 +1,2579 @@ +class Program_weight_tensor_parameter_0: + name = "parameter_0" + shape = [12] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_1: + name = "parameter_1" + shape = [12, 48, 3, 3] + dtype = "float32" + min_val = float("-0.212809") + max_val = float("0.212811") + mean = float("-2.28567e-07") + std = float("0.050261") + data = None + + +class Program_weight_tensor_parameter_2: + name = "parameter_2" + shape = [24] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_3: + name = "parameter_3" + shape = [24, 48, 3, 3] + dtype = "float32" + min_val = float("-0.322965") + max_val = float("0.362289") + mean = float("-0.000107283") + std = float("0.0437837") + data = None + + +class Program_weight_tensor_parameter_4: + name = "parameter_4" + shape = [4] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_5: + name = "parameter_5" + shape = [4, 48, 3, 3] + dtype = "float32" + min_val = float("-0.326828") + max_val = float("0.326795") + mean = float("9.95345e-06") + std = float("0.0457119") + data = None + + +class Program_weight_tensor_parameter_6: + name = "parameter_6" + shape = [8] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_7: + name = "parameter_7" + shape = [8, 48, 3, 3] + dtype = "float32" + min_val = float("-0.440137") + max_val = float("0.287013") + mean = float("-0.000680203") + std = float("0.0547224") + data = None + + +class Program_weight_tensor_parameter_8: + name = "parameter_8" + shape = [12] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_9: + name = "parameter_9" + shape = [12] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_10: + name = "parameter_10" + shape = [12] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_11: + name = "parameter_11" + shape = [12] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_12: + name = "parameter_12" + shape = [12, 12, 3, 3] + dtype = "float32" + min_val = float("-0.30769") + max_val = float("0.29729") + mean = float("-0.0103647") + std = float("0.0850306") + data = None + + +class Program_weight_tensor_parameter_13: + name = "parameter_13" + shape = [12] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_14: + name = "parameter_14" + shape = [12] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_15: + name = "parameter_15" + shape = [12] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_16: + name = "parameter_16" + shape = [12] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_17: + name = "parameter_17" + shape = [12, 12, 3, 3] + dtype = "float32" + min_val = float("-0.264329") + max_val = float("0.342854") + mean = float("0.00102005") + std = float("0.0857059") + data = None + + +class Program_weight_tensor_parameter_18: + name = "parameter_18" + shape = [12] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_19: + name = "parameter_19" + shape = [12] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_20: + name = "parameter_20" + shape = [12] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_21: + name = "parameter_21" + shape = [12] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_22: + name = "parameter_22" + shape = [12, 12, 3, 3] + dtype = "float32" + min_val = float("-0.344645") + max_val = float("0.322634") + mean = float("-0.00621524") + std = float("0.0947199") + data = None + + +class Program_weight_tensor_parameter_23: + name = "parameter_23" + shape = [12] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_24: + name = "parameter_24" + shape = [12] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_25: + name = "parameter_25" + shape = [12] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_26: + name = "parameter_26" + shape = [12] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_27: + name = "parameter_27" + shape = [12, 24, 3, 3] + dtype = "float32" + min_val = float("-0.271543") + max_val = float("0.295597") + mean = float("0.00313789") + std = float("0.0637104") + data = None + + +class Program_weight_tensor_parameter_28: + name = "parameter_28" + shape = [24] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_29: + name = "parameter_29" + shape = [24] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_30: + name = "parameter_30" + shape = [24] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_31: + name = "parameter_31" + shape = [24] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_32: + name = "parameter_32" + shape = [24, 48, 3, 3] + dtype = "float32" + min_val = float("-0.27286") + max_val = float("0.223781") + mean = float("-0.00204682") + std = float("0.045447") + data = None + + +class Program_weight_tensor_parameter_33: + name = "parameter_33" + shape = [12] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_34: + name = "parameter_34" + shape = [12] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_35: + name = "parameter_35" + shape = [12] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_36: + name = "parameter_36" + shape = [12] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_37: + name = "parameter_37" + shape = [12, 12, 3, 3] + dtype = "float32" + min_val = float("-0.27765") + max_val = float("0.218401") + mean = float("-0.00944567") + std = float("0.0571653") + data = None + + +class Program_weight_tensor_parameter_38: + name = "parameter_38" + shape = [12] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_39: + name = "parameter_39" + shape = [12] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_40: + name = "parameter_40" + shape = [12] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_41: + name = "parameter_41" + shape = [12] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_42: + name = "parameter_42" + shape = [12, 12, 3, 3] + dtype = "float32" + min_val = float("-0.20321") + max_val = float("0.294491") + mean = float("0.000701918") + std = float("0.0572048") + data = None + + +class Program_weight_tensor_parameter_43: + name = "parameter_43" + shape = [12] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_44: + name = "parameter_44" + shape = [12] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_45: + name = "parameter_45" + shape = [12] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_46: + name = "parameter_46" + shape = [12] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_47: + name = "parameter_47" + shape = [12, 12, 3, 3] + dtype = "float32" + min_val = float("-0.260294") + max_val = float("0.302808") + mean = float("-0.00145974") + std = float("0.0637301") + data = None + + +class Program_weight_tensor_parameter_48: + name = "parameter_48" + shape = [12] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_49: + name = "parameter_49" + shape = [12] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_50: + name = "parameter_50" + shape = [12] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_51: + name = "parameter_51" + shape = [12] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_52: + name = "parameter_52" + shape = [12, 24, 3, 3] + dtype = "float32" + min_val = float("-0.189033") + max_val = float("0.193287") + mean = float("0.00265422") + std = float("0.0437311") + data = None + + +class Program_weight_tensor_parameter_53: + name = "parameter_53" + shape = [24] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_54: + name = "parameter_54" + shape = [24] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_55: + name = "parameter_55" + shape = [24] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_56: + name = "parameter_56" + shape = [24] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_57: + name = "parameter_57" + shape = [24, 48, 3, 3] + dtype = "float32" + min_val = float("-0.15301") + max_val = float("0.202185") + mean = float("-0.00349348") + std = float("0.0312847") + data = None + + +class Program_weight_tensor_parameter_58: + name = "parameter_58" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_59: + name = "parameter_59" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_60: + name = "parameter_60" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_61: + name = "parameter_61" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_62: + name = "parameter_62" + shape = [48, 48, 3, 3] + dtype = "float32" + min_val = float("-0.118651") + max_val = float("0.15418") + mean = float("-0.000843023") + std = float("0.0278833") + data = None + + +class Program_weight_tensor_parameter_63: + name = "parameter_63" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_64: + name = "parameter_64" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_65: + name = "parameter_65" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_66: + name = "parameter_66" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_67: + name = "parameter_67" + shape = [48, 96, 1, 1] + dtype = "float32" + min_val = float("-0.404591") + max_val = float("0.322663") + mean = float("-0.00121975") + std = float("0.0840977") + data = None + + +class Program_weight_tensor_parameter_68: + name = "parameter_68" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_69: + name = "parameter_69" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_70: + name = "parameter_70" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_71: + name = "parameter_71" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_72: + name = "parameter_72" + shape = [48, 96, 1, 1] + dtype = "float32" + min_val = float("-0.23749") + max_val = float("0.253186") + mean = float("0.000948868") + std = float("0.0529204") + data = None + + +class Program_weight_tensor_parameter_73: + name = "parameter_73" + shape = [96] + dtype = "float32" + min_val = float("-0.203468") + max_val = float("0.244084") + mean = float("0.032513") + std = float("0.0886495") + data = None + + +class Program_weight_tensor_parameter_74: + name = "parameter_74" + shape = [96] + dtype = "float32" + min_val = float("-0.00888077") + max_val = float("0.64511") + mean = float("0.195654") + std = float("0.11134") + data = None + + +class Program_weight_tensor_parameter_75: + name = "parameter_75" + shape = [96] + dtype = "float32" + min_val = float("9.3215e-07") + max_val = float("0.0468354") + mean = float("0.00988351") + std = float("0.00900349") + data = None + + +class Program_weight_tensor_parameter_76: + name = "parameter_76" + shape = [96] + dtype = "float32" + min_val = float("-0.522551") + max_val = float("0.271078") + mean = float("-0.0587447") + std = float("0.163134") + data = None + + +class Program_weight_tensor_parameter_77: + name = "parameter_77" + shape = [96, 24, 1, 1] + dtype = "float32" + min_val = float("-0.547421") + max_val = float("0.581448") + mean = float("-0.00750784") + std = float("0.121617") + data = None + + +class Program_weight_tensor_parameter_78: + name = "parameter_78" + shape = [24] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_79: + name = "parameter_79" + shape = [24] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_80: + name = "parameter_80" + shape = [24] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_81: + name = "parameter_81" + shape = [24] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_82: + name = "parameter_82" + shape = [24, 1, 5, 5] + dtype = "float32" + min_val = float("-0.445237") + max_val = float("0.421132") + mean = float("0.0177318") + std = float("0.116988") + data = None + + +class Program_weight_tensor_parameter_83: + name = "parameter_83" + shape = [24] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_84: + name = "parameter_84" + shape = [24] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_85: + name = "parameter_85" + shape = [24] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_86: + name = "parameter_86" + shape = [24] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_87: + name = "parameter_87" + shape = [24, 96, 1, 1] + dtype = "float32" + min_val = float("-0.812156") + max_val = float("0.836568") + mean = float("0.00502019") + std = float("0.127055") + data = None + + +class Program_weight_tensor_parameter_88: + name = "parameter_88" + shape = [96] + dtype = "float32" + min_val = float("-0.194873") + max_val = float("0.303438") + mean = float("0.0599241") + std = float("0.100933") + data = None + + +class Program_weight_tensor_parameter_89: + name = "parameter_89" + shape = [96] + dtype = "float32" + min_val = float("-0.275771") + max_val = float("0.345063") + mean = float("0.165033") + std = float("0.0808102") + data = None + + +class Program_weight_tensor_parameter_90: + name = "parameter_90" + shape = [96] + dtype = "float32" + min_val = float("1.62809e-06") + max_val = float("0.0543306") + mean = float("0.0098635") + std = float("0.011592") + data = None + + +class Program_weight_tensor_parameter_91: + name = "parameter_91" + shape = [96] + dtype = "float32" + min_val = float("-0.265977") + max_val = float("0.286321") + mean = float("0.0287959") + std = float("0.0974711") + data = None + + +class Program_weight_tensor_parameter_92: + name = "parameter_92" + shape = [96, 1, 5, 5] + dtype = "float32" + min_val = float("-0.650416") + max_val = float("0.603147") + mean = float("0.00625221") + std = float("0.100355") + data = None + + +class Program_weight_tensor_parameter_93: + name = "parameter_93" + shape = [96] + dtype = "float32" + min_val = float("-0.178999") + max_val = float("0.273999") + mean = float("0.0355551") + std = float("0.0872794") + data = None + + +class Program_weight_tensor_parameter_94: + name = "parameter_94" + shape = [96] + dtype = "float32" + min_val = float("-0.199651") + max_val = float("0.317413") + mean = float("0.149518") + std = float("0.0719826") + data = None + + +class Program_weight_tensor_parameter_95: + name = "parameter_95" + shape = [96] + dtype = "float32" + min_val = float("8.45527e-07") + max_val = float("0.0654628") + mean = float("0.0140125") + std = float("0.0115922") + data = None + + +class Program_weight_tensor_parameter_96: + name = "parameter_96" + shape = [96] + dtype = "float32" + min_val = float("-0.410517") + max_val = float("0.746688") + mean = float("0.0224906") + std = float("0.163775") + data = None + + +class Program_weight_tensor_parameter_97: + name = "parameter_97" + shape = [96, 24, 1, 1] + dtype = "float32" + min_val = float("-0.621451") + max_val = float("0.730512") + mean = float("0.00288269") + std = float("0.133274") + data = None + + +class Program_weight_tensor_parameter_98: + name = "parameter_98" + shape = [24] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_99: + name = "parameter_99" + shape = [24] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_100: + name = "parameter_100" + shape = [24] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_101: + name = "parameter_101" + shape = [24] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_102: + name = "parameter_102" + shape = [24, 1, 5, 5] + dtype = "float32" + min_val = float("-0.576709") + max_val = float("0.472079") + mean = float("0.0318705") + std = float("0.0954388") + data = None + + +class Program_weight_tensor_parameter_103: + name = "parameter_103" + shape = [24] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_104: + name = "parameter_104" + shape = [24] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_105: + name = "parameter_105" + shape = [24] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_106: + name = "parameter_106" + shape = [24] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_107: + name = "parameter_107" + shape = [24, 96, 1, 1] + dtype = "float32" + min_val = float("-0.551569") + max_val = float("0.79233") + mean = float("0.00656941") + std = float("0.132789") + data = None + + +class Program_weight_tensor_parameter_108: + name = "parameter_108" + shape = [96] + dtype = "float32" + min_val = float("-0.210287") + max_val = float("0.360676") + mean = float("0.061663") + std = float("0.11524") + data = None + + +class Program_weight_tensor_parameter_109: + name = "parameter_109" + shape = [96] + dtype = "float32" + min_val = float("2.20691e-22") + max_val = float("0.374086") + mean = float("0.187567") + std = float("0.0716244") + data = None + + +class Program_weight_tensor_parameter_110: + name = "parameter_110" + shape = [96] + dtype = "float32" + min_val = float("4.88719e-07") + max_val = float("0.0409852") + mean = float("0.00468511") + std = float("0.00701591") + data = None + + +class Program_weight_tensor_parameter_111: + name = "parameter_111" + shape = [96] + dtype = "float32" + min_val = float("-0.265455") + max_val = float("0.11002") + mean = float("-0.00687413") + std = float("0.0603266") + data = None + + +class Program_weight_tensor_parameter_112: + name = "parameter_112" + shape = [96, 1, 5, 5] + dtype = "float32" + min_val = float("-0.638166") + max_val = float("0.605925") + mean = float("0.000959776") + std = float("0.110415") + data = None + + +class Program_weight_tensor_parameter_113: + name = "parameter_113" + shape = [96] + dtype = "float32" + min_val = float("-0.27398") + max_val = float("0.213707") + mean = float("0.00609781") + std = float("0.0883516") + data = None + + +class Program_weight_tensor_parameter_114: + name = "parameter_114" + shape = [96] + dtype = "float32" + min_val = float("2.56342e-17") + max_val = float("0.338329") + mean = float("0.108218") + std = float("0.0607836") + data = None + + +class Program_weight_tensor_parameter_115: + name = "parameter_115" + shape = [96] + dtype = "float32" + min_val = float("9.07426e-05") + max_val = float("0.255421") + mean = float("0.0651215") + std = float("0.0480313") + data = None + + +class Program_weight_tensor_parameter_116: + name = "parameter_116" + shape = [96] + dtype = "float32" + min_val = float("-0.534187") + max_val = float("0.769281") + mean = float("0.0598954") + std = float("0.281707") + data = None + + +class Program_weight_tensor_parameter_117: + name = "parameter_117" + shape = [96, 96, 1, 1] + dtype = "float32" + min_val = float("-0.521996") + max_val = float("0.677526") + mean = float("0.00162045") + std = float("0.0930917") + data = None + + +class Program_weight_tensor_parameter_118: + name = "parameter_118" + shape = [96] + dtype = "float32" + min_val = float("-0.0862824") + max_val = float("0.192923") + mean = float("0.0354027") + std = float("0.0589408") + data = None + + +class Program_weight_tensor_parameter_119: + name = "parameter_119" + shape = [96] + dtype = "float32" + min_val = float("-0.0886051") + max_val = float("0.305765") + mean = float("0.0688854") + std = float("0.0637069") + data = None + + +class Program_weight_tensor_parameter_120: + name = "parameter_120" + shape = [96] + dtype = "float32" + min_val = float("5.43817e-07") + max_val = float("0.0624206") + mean = float("0.00860248") + std = float("0.00952633") + data = None + + +class Program_weight_tensor_parameter_121: + name = "parameter_121" + shape = [96] + dtype = "float32" + min_val = float("-0.29155") + max_val = float("0.529408") + mean = float("0.032949") + std = float("0.1087") + data = None + + +class Program_weight_tensor_parameter_122: + name = "parameter_122" + shape = [96, 24, 1, 1] + dtype = "float32" + min_val = float("-0.542727") + max_val = float("0.731026") + mean = float("0.00441357") + std = float("0.101699") + data = None + + +class Program_weight_tensor_parameter_123: + name = "parameter_123" + shape = [24] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_124: + name = "parameter_124" + shape = [24] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_125: + name = "parameter_125" + shape = [24] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_126: + name = "parameter_126" + shape = [24] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_127: + name = "parameter_127" + shape = [24, 1, 5, 5] + dtype = "float32" + min_val = float("-0.526963") + max_val = float("0.718417") + mean = float("0.0271707") + std = float("0.137399") + data = None + + +class Program_weight_tensor_parameter_128: + name = "parameter_128" + shape = [24] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_129: + name = "parameter_129" + shape = [24] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_130: + name = "parameter_130" + shape = [24] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_131: + name = "parameter_131" + shape = [24] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_132: + name = "parameter_132" + shape = [24, 96, 1, 1] + dtype = "float32" + min_val = float("-0.750764") + max_val = float("0.615568") + mean = float("0.00650651") + std = float("0.102173") + data = None + + +class Program_weight_tensor_parameter_133: + name = "parameter_133" + shape = [96] + dtype = "float32" + min_val = float("-0.196603") + max_val = float("0.293187") + mean = float("0.0461721") + std = float("0.0994475") + data = None + + +class Program_weight_tensor_parameter_134: + name = "parameter_134" + shape = [96] + dtype = "float32" + min_val = float("-0.143706") + max_val = float("0.288717") + mean = float("0.1386") + std = float("0.067955") + data = None + + +class Program_weight_tensor_parameter_135: + name = "parameter_135" + shape = [96] + dtype = "float32" + min_val = float("2.42391e-07") + max_val = float("0.0543417") + mean = float("0.0120346") + std = float("0.0113319") + data = None + + +class Program_weight_tensor_parameter_136: + name = "parameter_136" + shape = [96] + dtype = "float32" + min_val = float("-0.385787") + max_val = float("0.315506") + mean = float("0.0299118") + std = float("0.151299") + data = None + + +class Program_weight_tensor_parameter_137: + name = "parameter_137" + shape = [96, 1, 5, 5] + dtype = "float32" + min_val = float("-0.244368") + max_val = float("0.322879") + mean = float("0.00444439") + std = float("0.0615502") + data = None + + +class Program_weight_tensor_parameter_138: + name = "parameter_138" + shape = [96] + dtype = "float32" + min_val = float("-0.218769") + max_val = float("0.26645") + mean = float("0.0120794") + std = float("0.101719") + data = None + + +class Program_weight_tensor_parameter_139: + name = "parameter_139" + shape = [96] + dtype = "float32" + min_val = float("-0.151104") + max_val = float("0.455434") + mean = float("0.204337") + std = float("0.108727") + data = None + + +class Program_weight_tensor_parameter_140: + name = "parameter_140" + shape = [96] + dtype = "float32" + min_val = float("4.99443e-07") + max_val = float("0.0520413") + mean = float("0.0143669") + std = float("0.0110796") + data = None + + +class Program_weight_tensor_parameter_141: + name = "parameter_141" + shape = [96] + dtype = "float32" + min_val = float("-0.529203") + max_val = float("0.316652") + mean = float("-0.110592") + std = float("0.206376") + data = None + + +class Program_weight_tensor_parameter_142: + name = "parameter_142" + shape = [96, 24, 1, 1] + dtype = "float32" + min_val = float("-0.66539") + max_val = float("0.659582") + mean = float("-0.00743331") + std = float("0.14798") + data = None + + +class Program_weight_tensor_parameter_143: + name = "parameter_143" + shape = [24] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_144: + name = "parameter_144" + shape = [24] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_145: + name = "parameter_145" + shape = [24] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_146: + name = "parameter_146" + shape = [24] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_147: + name = "parameter_147" + shape = [24, 1, 5, 5] + dtype = "float32" + min_val = float("-0.311998") + max_val = float("0.42518") + mean = float("0.032433") + std = float("0.103398") + data = None + + +class Program_weight_tensor_parameter_148: + name = "parameter_148" + shape = [24] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_149: + name = "parameter_149" + shape = [24] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_150: + name = "parameter_150" + shape = [24] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_151: + name = "parameter_151" + shape = [24] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_152: + name = "parameter_152" + shape = [24, 96, 1, 1] + dtype = "float32" + min_val = float("-0.62501") + max_val = float("0.868868") + mean = float("0.00120134") + std = float("0.151183") + data = None + + +class Program_weight_tensor_parameter_153: + name = "parameter_153" + shape = [96] + dtype = "float32" + min_val = float("-0.201096") + max_val = float("0.381824") + mean = float("0.0872388") + std = float("0.129896") + data = None + + +class Program_weight_tensor_parameter_154: + name = "parameter_154" + shape = [96] + dtype = "float32" + min_val = float("-0.19896") + max_val = float("0.444492") + mean = float("0.164146") + std = float("0.0764125") + data = None + + +class Program_weight_tensor_parameter_155: + name = "parameter_155" + shape = [96] + dtype = "float32" + min_val = float("1.46061e-07") + max_val = float("0.063922") + mean = float("0.0108972") + std = float("0.0132575") + data = None + + +class Program_weight_tensor_parameter_156: + name = "parameter_156" + shape = [96] + dtype = "float32" + min_val = float("-0.349646") + max_val = float("0.289415") + mean = float("3.35905e-05") + std = float("0.118501") + data = None + + +class Program_weight_tensor_parameter_157: + name = "parameter_157" + shape = [96, 1, 5, 5] + dtype = "float32" + min_val = float("-2.3427") + max_val = float("1.31794") + mean = float("0.00274227") + std = float("0.157065") + data = None + + +class Program_weight_tensor_parameter_158: + name = "parameter_158" + shape = [96] + dtype = "float32" + min_val = float("-0.25461") + max_val = float("0.357711") + mean = float("0.0716788") + std = float("0.102575") + data = None + + +class Program_weight_tensor_parameter_159: + name = "parameter_159" + shape = [96] + dtype = "float32" + min_val = float("-3.8339e-21") + max_val = float("0.335899") + mean = float("0.140561") + std = float("0.0733341") + data = None + + +class Program_weight_tensor_parameter_160: + name = "parameter_160" + shape = [96] + dtype = "float32" + min_val = float("9.84651e-07") + max_val = float("0.0749838") + mean = float("0.0171509") + std = float("0.0142494") + data = None + + +class Program_weight_tensor_parameter_161: + name = "parameter_161" + shape = [96] + dtype = "float32" + min_val = float("-0.584624") + max_val = float("0.697001") + mean = float("0.0541336") + std = float("0.215706") + data = None + + +class Program_weight_tensor_parameter_162: + name = "parameter_162" + shape = [96, 24, 1, 1] + dtype = "float32" + min_val = float("-0.629576") + max_val = float("0.792518") + mean = float("0.00726174") + std = float("0.145475") + data = None + + +class Program_weight_tensor_parameter_163: + name = "parameter_163" + shape = [24] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_164: + name = "parameter_164" + shape = [24] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_165: + name = "parameter_165" + shape = [24] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_166: + name = "parameter_166" + shape = [24] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_167: + name = "parameter_167" + shape = [24, 1, 5, 5] + dtype = "float32" + min_val = float("-0.550579") + max_val = float("1.44293") + mean = float("0.04042") + std = float("0.123449") + data = None + + +class Program_weight_tensor_parameter_168: + name = "parameter_168" + shape = [24] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_169: + name = "parameter_169" + shape = [24] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_170: + name = "parameter_170" + shape = [24] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_171: + name = "parameter_171" + shape = [24] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_172: + name = "parameter_172" + shape = [24, 96, 1, 1] + dtype = "float32" + min_val = float("-0.760392") + max_val = float("0.620198") + mean = float("-0.000455801") + std = float("0.147689") + data = None + + +class Program_weight_tensor_parameter_173: + name = "parameter_173" + shape = [96] + dtype = "float32" + min_val = float("-0.163777") + max_val = float("0.438977") + mean = float("0.060866") + std = float("0.113573") + data = None + + +class Program_weight_tensor_parameter_174: + name = "parameter_174" + shape = [96] + dtype = "float32" + min_val = float("1.60807e-16") + max_val = float("0.312574") + mean = float("0.190441") + std = float("0.0540036") + data = None + + +class Program_weight_tensor_parameter_175: + name = "parameter_175" + shape = [96] + dtype = "float32" + min_val = float("5.60519e-45") + max_val = float("0.0280074") + mean = float("0.00547939") + std = float("0.00656876") + data = None + + +class Program_weight_tensor_parameter_176: + name = "parameter_176" + shape = [96] + dtype = "float32" + min_val = float("-0.217489") + max_val = float("0.166804") + mean = float("-0.000321742") + std = float("0.0656498") + data = None + + +class Program_weight_tensor_parameter_177: + name = "parameter_177" + shape = [96, 1, 5, 5] + dtype = "float32" + min_val = float("-1.4242") + max_val = float("1.34228") + mean = float("0.00195072") + std = float("0.163816") + data = None + + +class Program_weight_tensor_parameter_178: + name = "parameter_178" + shape = [96] + dtype = "float32" + min_val = float("-0.0691111") + max_val = float("0.309389") + mean = float("0.0730593") + std = float("0.0709506") + data = None + + +class Program_weight_tensor_parameter_179: + name = "parameter_179" + shape = [96] + dtype = "float32" + min_val = float("2.25676e-25") + max_val = float("0.259667") + mean = float("0.0864477") + std = float("0.0560166") + data = None + + +class Program_weight_tensor_parameter_180: + name = "parameter_180" + shape = [96] + dtype = "float32" + min_val = float("4.10029e-06") + max_val = float("0.407229") + mean = float("0.0932327") + std = float("0.0628289") + data = None + + +class Program_weight_tensor_parameter_181: + name = "parameter_181" + shape = [96] + dtype = "float32" + min_val = float("-0.679014") + max_val = float("1.09952") + mean = float("0.0606732") + std = float("0.334586") + data = None + + +class Program_weight_tensor_parameter_182: + name = "parameter_182" + shape = [96, 48, 1, 1] + dtype = "float32" + min_val = float("-0.685316") + max_val = float("0.866351") + mean = float("0.0033609") + std = float("0.128862") + data = None + + +class Program_weight_tensor_parameter_183: + name = "parameter_183" + shape = [96] + dtype = "float32" + min_val = float("-0.124345") + max_val = float("0.200402") + mean = float("0.0303271") + std = float("0.0618885") + data = None + + +class Program_weight_tensor_parameter_184: + name = "parameter_184" + shape = [96] + dtype = "float32" + min_val = float("-0.0645199") + max_val = float("0.241304") + mean = float("0.0531667") + std = float("0.0595377") + data = None + + +class Program_weight_tensor_parameter_185: + name = "parameter_185" + shape = [96] + dtype = "float32" + min_val = float("2.12488e-07") + max_val = float("0.0185689") + mean = float("0.00290344") + std = float("0.0035459") + data = None + + +class Program_weight_tensor_parameter_186: + name = "parameter_186" + shape = [96] + dtype = "float32" + min_val = float("-0.296585") + max_val = float("0.269225") + mean = float("0.000687888") + std = float("0.117362") + data = None + + +class Program_weight_tensor_parameter_187: + name = "parameter_187" + shape = [96, 24, 1, 1] + dtype = "float32" + min_val = float("-0.692254") + max_val = float("0.591045") + mean = float("-0.000107685") + std = float("0.0949256") + data = None + + +class Program_weight_tensor_parameter_188: + name = "parameter_188" + shape = [24] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_189: + name = "parameter_189" + shape = [24] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_190: + name = "parameter_190" + shape = [24] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_191: + name = "parameter_191" + shape = [24] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_192: + name = "parameter_192" + shape = [24, 1, 5, 5] + dtype = "float32" + min_val = float("-0.861787") + max_val = float("0.911569") + mean = float("0.00654302") + std = float("0.122113") + data = None + + +class Program_weight_tensor_parameter_193: + name = "parameter_193" + shape = [24] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_194: + name = "parameter_194" + shape = [24] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_195: + name = "parameter_195" + shape = [24] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_196: + name = "parameter_196" + shape = [24] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_197: + name = "parameter_197" + shape = [24, 48, 1, 1] + dtype = "float32" + min_val = float("-0.680274") + max_val = float("0.900081") + mean = float("0.00519539") + std = float("0.132547") + data = None + + +class Program_weight_tensor_parameter_198: + name = "parameter_198" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_199: + name = "parameter_199" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_200: + name = "parameter_200" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_201: + name = "parameter_201" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_202: + name = "parameter_202" + shape = [48, 1, 5, 5] + dtype = "float32" + min_val = float("-0.438729") + max_val = float("0.595238") + mean = float("0.00271517") + std = float("0.081926") + data = None + + +class Program_weight_tensor_parameter_203: + name = "parameter_203" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_204: + name = "parameter_204" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_205: + name = "parameter_205" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_206: + name = "parameter_206" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_207: + name = "parameter_207" + shape = [48, 48, 1, 1] + dtype = "float32" + min_val = float("-1.03509") + max_val = float("0.884909") + mean = float("-0.0042555") + std = float("0.154426") + data = None + + +class Program_weight_tensor_parameter_208: + name = "parameter_208" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_209: + name = "parameter_209" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_210: + name = "parameter_210" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_211: + name = "parameter_211" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_212: + name = "parameter_212" + shape = [48, 1, 5, 5] + dtype = "float32" + min_val = float("-1.74588") + max_val = float("1.25968") + mean = float("0.00544187") + std = float("0.150889") + data = None + + +class Program_weight_tensor_parameter_213: + name = "parameter_213" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_214: + name = "parameter_214" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_215: + name = "parameter_215" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_216: + name = "parameter_216" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_217: + name = "parameter_217" + shape = [48, 48, 1, 1] + dtype = "float32" + min_val = float("-0.97275") + max_val = float("0.934075") + mean = float("0.00743306") + std = float("0.172809") + data = None + + +class Program_weight_tensor_parameter_218: + name = "parameter_218" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_219: + name = "parameter_219" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_220: + name = "parameter_220" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_221: + name = "parameter_221" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_222: + name = "parameter_222" + shape = [48, 1, 5, 5] + dtype = "float32" + min_val = float("-1.70542") + max_val = float("2.82382") + mean = float("0.00566599") + std = float("0.213991") + data = None + + +class Program_weight_tensor_parameter_223: + name = "parameter_223" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_224: + name = "parameter_224" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_225: + name = "parameter_225" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_226: + name = "parameter_226" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_227: + name = "parameter_227" + shape = [48, 24, 1, 1] + dtype = "float32" + min_val = float("-0.768745") + max_val = float("0.908478") + mean = float("-3.51793e-05") + std = float("0.162772") + data = None + + +class Program_weight_tensor_parameter_228: + name = "parameter_228" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_229: + name = "parameter_229" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_230: + name = "parameter_230" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_231: + name = "parameter_231" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_232: + name = "parameter_232" + shape = [48, 24, 1, 1] + dtype = "float32" + min_val = float("-0.828263") + max_val = float("0.83713") + mean = float("-0.00176522") + std = float("0.168156") + data = None + + +class Program_weight_tensor_parameter_233: + name = "parameter_233" + shape = [24] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_234: + name = "parameter_234" + shape = [24] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_235: + name = "parameter_235" + shape = [24] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_236: + name = "parameter_236" + shape = [24] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_237: + name = "parameter_237" + shape = [24, 1, 5, 5] + dtype = "float32" + min_val = float("-0.434799") + max_val = float("0.378905") + mean = float("-0.00524577") + std = float("0.12878") + data = None + + +class Program_weight_tensor_parameter_238: + name = "parameter_238" + shape = [24] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_239: + name = "parameter_239" + shape = [24] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_240: + name = "parameter_240" + shape = [24] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_241: + name = "parameter_241" + shape = [24] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_242: + name = "parameter_242" + shape = [24, 24, 1, 1] + dtype = "float32" + min_val = float("-0.874988") + max_val = float("0.922446") + mean = float("-0.00532013") + std = float("0.20765") + data = None + + +class Program_weight_tensor_parameter_243: + name = "parameter_243" + shape = [24] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_244: + name = "parameter_244" + shape = [24] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_245: + name = "parameter_245" + shape = [24] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_246: + name = "parameter_246" + shape = [24] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_247: + name = "parameter_247" + shape = [24, 1, 5, 5] + dtype = "float32" + min_val = float("-1.57788") + max_val = float("1.06265") + mean = float("0.00676683") + std = float("0.213433") + data = None + + +class Program_weight_tensor_parameter_248: + name = "parameter_248" + shape = [24] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_249: + name = "parameter_249" + shape = [24] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_250: + name = "parameter_250" + shape = [24] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_251: + name = "parameter_251" + shape = [24] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_252: + name = "parameter_252" + shape = [24, 24, 1, 1] + dtype = "float32" + min_val = float("-1.16097") + max_val = float("0.752904") + mean = float("-0.0190263") + std = float("0.170152") + data = None + + +class Program_weight_tensor_parameter_253: + name = "parameter_253" + shape = [24] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_254: + name = "parameter_254" + shape = [24] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_255: + name = "parameter_255" + shape = [24] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_256: + name = "parameter_256" + shape = [24] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_257: + name = "parameter_257" + shape = [24, 1, 5, 5] + dtype = "float32" + min_val = float("-4.60955") + max_val = float("4.42196") + mean = float("0.00121477") + std = float("0.476149") + data = None + + +class Program_weight_tensor_parameter_258: + name = "parameter_258" + shape = [24] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_259: + name = "parameter_259" + shape = [24] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_260: + name = "parameter_260" + shape = [24] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_261: + name = "parameter_261" + shape = [24] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_262: + name = "parameter_262" + shape = [24, 3, 3, 3] + dtype = "float32" + min_val = float("-3.87888") + max_val = float("4.02333") + mean = float("-0.00669001") + std = float("0.495656") + data = None diff --git a/paddle_samples/PaddleX/BlazeFace-FPN-SSH/subgraph_1/graph_hash.txt b/paddle_samples/PaddleX/BlazeFace-FPN-SSH/subgraph_1/graph_hash.txt new file mode 100644 index 000000000..e288630fc --- /dev/null +++ b/paddle_samples/PaddleX/BlazeFace-FPN-SSH/subgraph_1/graph_hash.txt @@ -0,0 +1 @@ +0831bb8355759c3dc255347e6c154e104f75359e5dc89d72b2d2e294743f94bd \ No newline at end of file diff --git a/paddle_samples/PaddleX/BlazeFace-FPN-SSH/subgraph_1/graph_net.json b/paddle_samples/PaddleX/BlazeFace-FPN-SSH/subgraph_1/graph_net.json new file mode 100644 index 000000000..eae7167df --- /dev/null +++ b/paddle_samples/PaddleX/BlazeFace-FPN-SSH/subgraph_1/graph_net.json @@ -0,0 +1,6 @@ +{ + "framework": "paddle", + "model_name": "BlazeFace-FPN-SSH", + "num_devices_required": 1, + "num_nodes_required": 1 +} \ No newline at end of file diff --git a/paddle_samples/PaddleX/BlazeFace-FPN-SSH/subgraph_1/input_meta.py b/paddle_samples/PaddleX/BlazeFace-FPN-SSH/subgraph_1/input_meta.py new file mode 100644 index 000000000..c199e488a --- /dev/null +++ b/paddle_samples/PaddleX/BlazeFace-FPN-SSH/subgraph_1/input_meta.py @@ -0,0 +1,9 @@ +class Program_weight_tensor_data_0: + name = "data_0" + shape = [4, 3, 640, 640] + dtype = "float32" + min_val = float("-0.964689") + max_val = float("1.18429") + mean = float("-0.100344") + std = float("0.417956") + data = None diff --git a/paddle_samples/PaddleX/BlazeFace-FPN-SSH/subgraph_1/model.py b/paddle_samples/PaddleX/BlazeFace-FPN-SSH/subgraph_1/model.py new file mode 100644 index 000000000..add748ba7 --- /dev/null +++ b/paddle_samples/PaddleX/BlazeFace-FPN-SSH/subgraph_1/model.py @@ -0,0 +1,2834 @@ +import paddle + + +class GraphModule(paddle.nn.Layer): + def __init__(self): + super().__init__() + + def forward( + self, + parameter_0, + parameter_1, + parameter_2, + parameter_3, + parameter_4, + parameter_5, + parameter_6, + parameter_7, + parameter_8, + parameter_9, + parameter_10, + parameter_11, + parameter_12, + parameter_13, + parameter_14, + parameter_15, + parameter_16, + parameter_17, + parameter_18, + parameter_19, + parameter_20, + parameter_21, + parameter_22, + parameter_23, + parameter_24, + parameter_25, + parameter_26, + parameter_27, + parameter_28, + parameter_29, + parameter_30, + parameter_31, + parameter_32, + parameter_33, + parameter_34, + parameter_35, + parameter_36, + parameter_37, + parameter_38, + parameter_39, + parameter_40, + parameter_41, + parameter_42, + parameter_43, + parameter_44, + parameter_45, + parameter_46, + parameter_47, + parameter_48, + parameter_49, + parameter_50, + parameter_51, + parameter_52, + parameter_53, + parameter_54, + parameter_55, + parameter_56, + parameter_57, + parameter_58, + parameter_59, + parameter_60, + parameter_61, + parameter_62, + parameter_63, + parameter_64, + parameter_65, + parameter_66, + parameter_67, + parameter_68, + parameter_69, + parameter_70, + parameter_71, + parameter_72, + parameter_73, + parameter_74, + parameter_75, + parameter_76, + parameter_77, + parameter_78, + parameter_79, + parameter_80, + parameter_81, + parameter_82, + parameter_83, + parameter_84, + parameter_85, + parameter_86, + parameter_87, + parameter_88, + parameter_89, + parameter_90, + parameter_91, + parameter_92, + parameter_93, + parameter_94, + parameter_95, + parameter_96, + parameter_97, + parameter_98, + parameter_99, + parameter_100, + parameter_101, + parameter_102, + parameter_103, + parameter_104, + parameter_105, + parameter_106, + parameter_107, + parameter_108, + parameter_109, + parameter_110, + parameter_111, + parameter_112, + parameter_113, + parameter_114, + parameter_115, + parameter_116, + parameter_117, + parameter_118, + parameter_119, + parameter_120, + parameter_121, + parameter_122, + parameter_123, + parameter_124, + parameter_125, + parameter_126, + parameter_127, + parameter_128, + parameter_129, + parameter_130, + parameter_131, + parameter_132, + parameter_133, + parameter_134, + parameter_135, + parameter_136, + parameter_137, + parameter_138, + parameter_139, + parameter_140, + parameter_141, + parameter_142, + parameter_143, + parameter_144, + parameter_145, + parameter_146, + parameter_147, + parameter_148, + parameter_149, + parameter_150, + parameter_151, + parameter_152, + parameter_153, + parameter_154, + parameter_155, + parameter_156, + parameter_157, + parameter_158, + parameter_159, + parameter_160, + parameter_161, + parameter_162, + parameter_163, + parameter_164, + parameter_165, + parameter_166, + parameter_167, + parameter_168, + parameter_169, + parameter_170, + parameter_171, + parameter_172, + parameter_173, + parameter_174, + parameter_175, + parameter_176, + parameter_177, + parameter_178, + parameter_179, + parameter_180, + parameter_181, + parameter_182, + parameter_183, + parameter_184, + parameter_185, + parameter_186, + parameter_187, + parameter_188, + parameter_189, + parameter_190, + parameter_191, + parameter_192, + parameter_193, + parameter_194, + parameter_195, + parameter_196, + parameter_197, + parameter_198, + parameter_199, + parameter_200, + parameter_201, + parameter_202, + parameter_203, + parameter_204, + parameter_205, + parameter_206, + parameter_207, + parameter_208, + parameter_209, + parameter_210, + parameter_211, + parameter_212, + parameter_213, + parameter_214, + parameter_215, + parameter_216, + parameter_217, + parameter_218, + parameter_219, + parameter_220, + parameter_221, + parameter_222, + parameter_223, + parameter_224, + parameter_225, + parameter_226, + parameter_227, + parameter_228, + parameter_229, + parameter_230, + parameter_231, + parameter_232, + parameter_233, + parameter_234, + parameter_235, + parameter_236, + parameter_237, + parameter_238, + parameter_239, + parameter_240, + parameter_241, + parameter_242, + parameter_243, + parameter_244, + parameter_245, + parameter_246, + parameter_247, + parameter_248, + parameter_249, + parameter_250, + parameter_251, + parameter_252, + parameter_253, + parameter_254, + data_0, + ): + # pd_op.conv2d: (4x24x320x320xf32) <- (4x3x640x640xf32, 24x3x3x3xf32) + conv2d_0 = paddle._C_ops.conv2d( + data_0, parameter_254, [2, 2], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del data_0, parameter_254 + + # pd_op.batch_norm_: (4x24x320x320xf32, 24xf32, 24xf32, 24xf32, 24xf32, -1xui8) <- (4x24x320x320xf32, 24xf32, 24xf32, 24xf32, 24xf32) + ( + batch_norm__0, + batch_norm__1, + batch_norm__2, + batch_norm__3, + batch_norm__4, + batch_norm__5, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_0, + parameter_253, + parameter_252, + parameter_251, + parameter_250, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_250, parameter_251, parameter_252, parameter_253 + + # pd_op.relu: (4x24x320x320xf32) <- (4x24x320x320xf32) + relu_2 = paddle._C_ops.relu(batch_norm__0) + del batch_norm__0 + + # pd_op.depthwise_conv2d: (4x24x320x320xf32) <- (4x24x320x320xf32, 24x1x5x5xf32) + depthwise_conv2d_0 = paddle._C_ops.depthwise_conv2d( + relu_2, parameter_249, [1, 1], [2, 2], "EXPLICIT", 24, [1, 1], "NCHW" + ) + del parameter_249 + + # pd_op.batch_norm_: (4x24x320x320xf32, 24xf32, 24xf32, 24xf32, 24xf32, -1xui8) <- (4x24x320x320xf32, 24xf32, 24xf32, 24xf32, 24xf32) + ( + batch_norm__6, + batch_norm__7, + batch_norm__8, + batch_norm__9, + batch_norm__10, + batch_norm__11, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + depthwise_conv2d_0, + parameter_248, + parameter_247, + parameter_246, + parameter_245, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_245, parameter_246, parameter_247, parameter_248 + + # pd_op.relu: (4x24x320x320xf32) <- (4x24x320x320xf32) + relu_3 = paddle._C_ops.relu(batch_norm__6) + del batch_norm__6 + + # pd_op.conv2d: (4x24x320x320xf32) <- (4x24x320x320xf32, 24x24x1x1xf32) + conv2d_1 = paddle._C_ops.conv2d( + relu_3, parameter_244, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_244 + + # pd_op.batch_norm_: (4x24x320x320xf32, 24xf32, 24xf32, 24xf32, 24xf32, -1xui8) <- (4x24x320x320xf32, 24xf32, 24xf32, 24xf32, 24xf32) + ( + batch_norm__12, + batch_norm__13, + batch_norm__14, + batch_norm__15, + batch_norm__16, + batch_norm__17, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_1, + parameter_243, + parameter_242, + parameter_241, + parameter_240, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_240, parameter_241, parameter_242, parameter_243 + + # pd_op.add: (4x24x320x320xf32) <- (4x24x320x320xf32, 4x24x320x320xf32) + add_0 = paddle._C_ops.add(relu_2, batch_norm__12) + + # pd_op.relu: (4x24x320x320xf32) <- (4x24x320x320xf32) + relu_4 = paddle._C_ops.relu(add_0) + del add_0 + + # pd_op.depthwise_conv2d: (4x24x320x320xf32) <- (4x24x320x320xf32, 24x1x5x5xf32) + depthwise_conv2d_1 = paddle._C_ops.depthwise_conv2d( + relu_4, parameter_239, [1, 1], [2, 2], "EXPLICIT", 24, [1, 1], "NCHW" + ) + del parameter_239 + + # pd_op.batch_norm_: (4x24x320x320xf32, 24xf32, 24xf32, 24xf32, 24xf32, -1xui8) <- (4x24x320x320xf32, 24xf32, 24xf32, 24xf32, 24xf32) + ( + batch_norm__18, + batch_norm__19, + batch_norm__20, + batch_norm__21, + batch_norm__22, + batch_norm__23, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + depthwise_conv2d_1, + parameter_238, + parameter_237, + parameter_236, + parameter_235, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_235, parameter_236, parameter_237, parameter_238 + + # pd_op.relu: (4x24x320x320xf32) <- (4x24x320x320xf32) + relu_5 = paddle._C_ops.relu(batch_norm__18) + del batch_norm__18 + + # pd_op.conv2d: (4x24x320x320xf32) <- (4x24x320x320xf32, 24x24x1x1xf32) + conv2d_2 = paddle._C_ops.conv2d( + relu_5, parameter_234, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_234 + + # pd_op.batch_norm_: (4x24x320x320xf32, 24xf32, 24xf32, 24xf32, 24xf32, -1xui8) <- (4x24x320x320xf32, 24xf32, 24xf32, 24xf32, 24xf32) + ( + batch_norm__24, + batch_norm__25, + batch_norm__26, + batch_norm__27, + batch_norm__28, + batch_norm__29, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_2, + parameter_233, + parameter_232, + parameter_231, + parameter_230, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_230, parameter_231, parameter_232, parameter_233 + + # pd_op.add: (4x24x320x320xf32) <- (4x24x320x320xf32, 4x24x320x320xf32) + add_1 = paddle._C_ops.add(relu_4, batch_norm__24) + + # pd_op.relu: (4x24x320x320xf32) <- (4x24x320x320xf32) + relu_6 = paddle._C_ops.relu(add_1) + del add_1 + + # pd_op.depthwise_conv2d: (4x24x160x160xf32) <- (4x24x320x320xf32, 24x1x5x5xf32) + depthwise_conv2d_2 = paddle._C_ops.depthwise_conv2d( + relu_6, parameter_229, [2, 2], [2, 2], "EXPLICIT", 24, [1, 1], "NCHW" + ) + del parameter_229 + + # pd_op.batch_norm_: (4x24x160x160xf32, 24xf32, 24xf32, 24xf32, 24xf32, -1xui8) <- (4x24x160x160xf32, 24xf32, 24xf32, 24xf32, 24xf32) + ( + batch_norm__30, + batch_norm__31, + batch_norm__32, + batch_norm__33, + batch_norm__34, + batch_norm__35, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + depthwise_conv2d_2, + parameter_228, + parameter_227, + parameter_226, + parameter_225, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_225, parameter_226, parameter_227, parameter_228 + + # pd_op.relu: (4x24x160x160xf32) <- (4x24x160x160xf32) + relu_7 = paddle._C_ops.relu(batch_norm__30) + del batch_norm__30 + + # pd_op.conv2d: (4x48x160x160xf32) <- (4x24x160x160xf32, 48x24x1x1xf32) + conv2d_3 = paddle._C_ops.conv2d( + relu_7, parameter_224, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_224 + + # pd_op.batch_norm_: (4x48x160x160xf32, 48xf32, 48xf32, 48xf32, 48xf32, -1xui8) <- (4x48x160x160xf32, 48xf32, 48xf32, 48xf32, 48xf32) + ( + batch_norm__36, + batch_norm__37, + batch_norm__38, + batch_norm__39, + batch_norm__40, + batch_norm__41, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_3, + parameter_223, + parameter_222, + parameter_221, + parameter_220, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_220, parameter_221, parameter_222, parameter_223 + + # pd_op.full_int_array: (2xi64) <- () + full_int_array_0 = [2, 2] + + # pd_op.assign: (2xi64) <- (2xi64) + assign_0 = full_int_array_0 + + # pd_op.assign: (2xi64) <- (2xi64) + assign_1 = full_int_array_0 + + # pd_op.pool2d: (4x24x160x160xf32) <- (4x24x320x320xf32, 2xi64) + pool2d_0 = paddle._C_ops.pool2d( + relu_6, + full_int_array_0, + [2, 2], + [0, 0], + True, + True, + "NCHW", + "max", + False, + False, + "EXPLICIT", + ) + + # pd_op.conv2d: (4x48x160x160xf32) <- (4x24x160x160xf32, 48x24x1x1xf32) + conv2d_4 = paddle._C_ops.conv2d( + pool2d_0, parameter_219, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_219 + + # pd_op.batch_norm_: (4x48x160x160xf32, 48xf32, 48xf32, 48xf32, 48xf32, -1xui8) <- (4x48x160x160xf32, 48xf32, 48xf32, 48xf32, 48xf32) + ( + batch_norm__42, + batch_norm__43, + batch_norm__44, + batch_norm__45, + batch_norm__46, + batch_norm__47, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_4, + parameter_218, + parameter_217, + parameter_216, + parameter_215, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_215, parameter_216, parameter_217, parameter_218 + + # pd_op.relu: (4x48x160x160xf32) <- (4x48x160x160xf32) + relu_8 = paddle._C_ops.relu(batch_norm__42) + del batch_norm__42 + + # pd_op.add: (4x48x160x160xf32) <- (4x48x160x160xf32, 4x48x160x160xf32) + add_2 = paddle._C_ops.add(relu_8, batch_norm__36) + + # pd_op.relu: (4x48x160x160xf32) <- (4x48x160x160xf32) + relu_9 = paddle._C_ops.relu(add_2) + del add_2 + + # pd_op.depthwise_conv2d: (4x48x160x160xf32) <- (4x48x160x160xf32, 48x1x5x5xf32) + depthwise_conv2d_3 = paddle._C_ops.depthwise_conv2d( + relu_9, parameter_214, [1, 1], [2, 2], "EXPLICIT", 48, [1, 1], "NCHW" + ) + del parameter_214 + + # pd_op.batch_norm_: (4x48x160x160xf32, 48xf32, 48xf32, 48xf32, 48xf32, -1xui8) <- (4x48x160x160xf32, 48xf32, 48xf32, 48xf32, 48xf32) + ( + batch_norm__48, + batch_norm__49, + batch_norm__50, + batch_norm__51, + batch_norm__52, + batch_norm__53, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + depthwise_conv2d_3, + parameter_213, + parameter_212, + parameter_211, + parameter_210, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_210, parameter_211, parameter_212, parameter_213 + + # pd_op.relu: (4x48x160x160xf32) <- (4x48x160x160xf32) + relu_10 = paddle._C_ops.relu(batch_norm__48) + del batch_norm__48 + + # pd_op.conv2d: (4x48x160x160xf32) <- (4x48x160x160xf32, 48x48x1x1xf32) + conv2d_5 = paddle._C_ops.conv2d( + relu_10, parameter_209, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_209 + + # pd_op.batch_norm_: (4x48x160x160xf32, 48xf32, 48xf32, 48xf32, 48xf32, -1xui8) <- (4x48x160x160xf32, 48xf32, 48xf32, 48xf32, 48xf32) + ( + batch_norm__54, + batch_norm__55, + batch_norm__56, + batch_norm__57, + batch_norm__58, + batch_norm__59, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_5, + parameter_208, + parameter_207, + parameter_206, + parameter_205, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_205, parameter_206, parameter_207, parameter_208 + + # pd_op.add: (4x48x160x160xf32) <- (4x48x160x160xf32, 4x48x160x160xf32) + add_3 = paddle._C_ops.add(relu_9, batch_norm__54) + + # pd_op.relu: (4x48x160x160xf32) <- (4x48x160x160xf32) + relu_11 = paddle._C_ops.relu(add_3) + del add_3 + + # pd_op.depthwise_conv2d: (4x48x160x160xf32) <- (4x48x160x160xf32, 48x1x5x5xf32) + depthwise_conv2d_4 = paddle._C_ops.depthwise_conv2d( + relu_11, parameter_204, [1, 1], [2, 2], "EXPLICIT", 48, [1, 1], "NCHW" + ) + del parameter_204 + + # pd_op.batch_norm_: (4x48x160x160xf32, 48xf32, 48xf32, 48xf32, 48xf32, -1xui8) <- (4x48x160x160xf32, 48xf32, 48xf32, 48xf32, 48xf32) + ( + batch_norm__60, + batch_norm__61, + batch_norm__62, + batch_norm__63, + batch_norm__64, + batch_norm__65, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + depthwise_conv2d_4, + parameter_203, + parameter_202, + parameter_201, + parameter_200, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_200, parameter_201, parameter_202, parameter_203 + + # pd_op.relu: (4x48x160x160xf32) <- (4x48x160x160xf32) + relu_12 = paddle._C_ops.relu(batch_norm__60) + del batch_norm__60 + + # pd_op.conv2d: (4x48x160x160xf32) <- (4x48x160x160xf32, 48x48x1x1xf32) + conv2d_6 = paddle._C_ops.conv2d( + relu_12, parameter_199, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_199 + + # pd_op.batch_norm_: (4x48x160x160xf32, 48xf32, 48xf32, 48xf32, 48xf32, -1xui8) <- (4x48x160x160xf32, 48xf32, 48xf32, 48xf32, 48xf32) + ( + batch_norm__66, + batch_norm__67, + batch_norm__68, + batch_norm__69, + batch_norm__70, + batch_norm__71, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_6, + parameter_198, + parameter_197, + parameter_196, + parameter_195, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_195, parameter_196, parameter_197, parameter_198 + + # pd_op.add: (4x48x160x160xf32) <- (4x48x160x160xf32, 4x48x160x160xf32) + add_4 = paddle._C_ops.add(relu_11, batch_norm__66) + + # pd_op.relu: (4x48x160x160xf32) <- (4x48x160x160xf32) + relu_13 = paddle._C_ops.relu(add_4) + del add_4 + + # pd_op.depthwise_conv2d: (4x48x80x80xf32) <- (4x48x160x160xf32, 48x1x5x5xf32) + depthwise_conv2d_5 = paddle._C_ops.depthwise_conv2d( + relu_13, parameter_194, [2, 2], [2, 2], "EXPLICIT", 48, [1, 1], "NCHW" + ) + del parameter_194 + + # pd_op.batch_norm_: (4x48x80x80xf32, 48xf32, 48xf32, 48xf32, 48xf32, -1xui8) <- (4x48x80x80xf32, 48xf32, 48xf32, 48xf32, 48xf32) + ( + batch_norm__72, + batch_norm__73, + batch_norm__74, + batch_norm__75, + batch_norm__76, + batch_norm__77, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + depthwise_conv2d_5, + parameter_193, + parameter_192, + parameter_191, + parameter_190, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_190, parameter_191, parameter_192, parameter_193 + + # pd_op.relu: (4x48x80x80xf32) <- (4x48x80x80xf32) + relu_14 = paddle._C_ops.relu(batch_norm__72) + del batch_norm__72 + + # pd_op.conv2d: (4x24x80x80xf32) <- (4x48x80x80xf32, 24x48x1x1xf32) + conv2d_7 = paddle._C_ops.conv2d( + relu_14, parameter_189, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_189 + + # pd_op.batch_norm_: (4x24x80x80xf32, 24xf32, 24xf32, 24xf32, 24xf32, -1xui8) <- (4x24x80x80xf32, 24xf32, 24xf32, 24xf32, 24xf32) + ( + batch_norm__78, + batch_norm__79, + batch_norm__80, + batch_norm__81, + batch_norm__82, + batch_norm__83, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_7, + parameter_188, + parameter_187, + parameter_186, + parameter_185, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_185, parameter_186, parameter_187, parameter_188 + + # pd_op.full: (1xf32) <- () + full_0 = paddle._C_ops.full( + [1], float("1"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.assign: (1xf32) <- (1xf32) + assign_2 = full_0 + + # pd_op.assign: (1xf32) <- (1xf32) + assign_3 = full_0 + + # pd_op.assign: (1xf32) <- (1xf32) + assign_4 = full_0 + + # pd_op.assign: (1xf32) <- (1xf32) + assign_5 = full_0 + + # pd_op.assign: (1xf32) <- (1xf32) + assign_6 = full_0 + + # pd_op.scale: (4x24x80x80xf32) <- (4x24x80x80xf32, 1xf32) + scale_0 = paddle._C_ops.scale(batch_norm__78, full_0, float("3"), True) + + # pd_op.relu6: (4x24x80x80xf32) <- (4x24x80x80xf32) + relu6_0 = paddle._C_ops.relu6(scale_0) + del scale_0 + + # pd_op.multiply: (4x24x80x80xf32) <- (4x24x80x80xf32, 4x24x80x80xf32) + multiply_0 = paddle._C_ops.multiply(batch_norm__78, relu6_0) + + # pd_op.full: (1xf32) <- () + full_1 = paddle._C_ops.full( + [1], float("0.166667"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.assign: (1xf32) <- (1xf32) + assign_7 = full_1 + + # pd_op.assign: (1xf32) <- (1xf32) + assign_8 = full_1 + + # pd_op.assign: (1xf32) <- (1xf32) + assign_9 = full_1 + + # pd_op.assign: (1xf32) <- (1xf32) + assign_10 = full_1 + + # pd_op.assign: (1xf32) <- (1xf32) + assign_11 = full_1 + + # pd_op.scale: (4x24x80x80xf32) <- (4x24x80x80xf32, 1xf32) + scale_1 = paddle._C_ops.scale(multiply_0, full_1, float("0"), True) + del multiply_0 + + # pd_op.depthwise_conv2d: (4x24x80x80xf32) <- (4x24x80x80xf32, 24x1x5x5xf32) + depthwise_conv2d_6 = paddle._C_ops.depthwise_conv2d( + scale_1, parameter_184, [1, 1], [2, 2], "EXPLICIT", 24, [1, 1], "NCHW" + ) + del parameter_184 + + # pd_op.batch_norm_: (4x24x80x80xf32, 24xf32, 24xf32, 24xf32, 24xf32, -1xui8) <- (4x24x80x80xf32, 24xf32, 24xf32, 24xf32, 24xf32) + ( + batch_norm__84, + batch_norm__85, + batch_norm__86, + batch_norm__87, + batch_norm__88, + batch_norm__89, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + depthwise_conv2d_6, + parameter_183, + parameter_182, + parameter_181, + parameter_180, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_180, parameter_181, parameter_182, parameter_183 + + # pd_op.relu: (4x24x80x80xf32) <- (4x24x80x80xf32) + relu_15 = paddle._C_ops.relu(batch_norm__84) + del batch_norm__84 + + # pd_op.conv2d: (4x96x80x80xf32) <- (4x24x80x80xf32, 96x24x1x1xf32) + conv2d_8 = paddle._C_ops.conv2d( + relu_15, parameter_179, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_179 + + # pd_op.batch_norm_: (4x96x80x80xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (4x96x80x80xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__90, + batch_norm__91, + batch_norm__92, + batch_norm__93, + batch_norm__94, + batch_norm__95, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_8, + parameter_178, + parameter_177, + parameter_176, + parameter_175, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_175, parameter_176, parameter_177, parameter_178 + + # pd_op.relu: (4x96x80x80xf32) <- (4x96x80x80xf32) + relu_16 = paddle._C_ops.relu(batch_norm__90) + del batch_norm__90 + + # pd_op.pool2d: (4x48x80x80xf32) <- (4x48x160x160xf32, 2xi64) + pool2d_1 = paddle._C_ops.pool2d( + relu_13, + full_int_array_0, + [2, 2], + [0, 0], + True, + True, + "NCHW", + "max", + False, + False, + "EXPLICIT", + ) + + # pd_op.conv2d: (4x96x80x80xf32) <- (4x48x80x80xf32, 96x48x1x1xf32) + conv2d_9 = paddle._C_ops.conv2d( + pool2d_1, parameter_174, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_174 + + # pd_op.batch_norm_: (4x96x80x80xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (4x96x80x80xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__96, + batch_norm__97, + batch_norm__98, + batch_norm__99, + batch_norm__100, + batch_norm__101, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_9, + parameter_173, + parameter_172, + parameter_171, + parameter_170, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_170, parameter_171, parameter_172, parameter_173 + + # pd_op.relu: (4x96x80x80xf32) <- (4x96x80x80xf32) + relu_17 = paddle._C_ops.relu(batch_norm__96) + del batch_norm__96 + + # pd_op.add: (4x96x80x80xf32) <- (4x96x80x80xf32, 4x96x80x80xf32) + add_5 = paddle._C_ops.add(relu_17, relu_16) + + # pd_op.relu: (4x96x80x80xf32) <- (4x96x80x80xf32) + relu_18 = paddle._C_ops.relu(add_5) + del add_5 + + # pd_op.depthwise_conv2d: (4x96x80x80xf32) <- (4x96x80x80xf32, 96x1x5x5xf32) + depthwise_conv2d_7 = paddle._C_ops.depthwise_conv2d( + relu_18, parameter_169, [1, 1], [2, 2], "EXPLICIT", 96, [1, 1], "NCHW" + ) + del parameter_169 + + # pd_op.batch_norm_: (4x96x80x80xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (4x96x80x80xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__102, + batch_norm__103, + batch_norm__104, + batch_norm__105, + batch_norm__106, + batch_norm__107, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + depthwise_conv2d_7, + parameter_168, + parameter_167, + parameter_166, + parameter_165, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_165, parameter_166, parameter_167, parameter_168 + + # pd_op.relu: (4x96x80x80xf32) <- (4x96x80x80xf32) + relu_19 = paddle._C_ops.relu(batch_norm__102) + del batch_norm__102 + + # pd_op.conv2d: (4x24x80x80xf32) <- (4x96x80x80xf32, 24x96x1x1xf32) + conv2d_10 = paddle._C_ops.conv2d( + relu_19, parameter_164, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_164 + + # pd_op.batch_norm_: (4x24x80x80xf32, 24xf32, 24xf32, 24xf32, 24xf32, -1xui8) <- (4x24x80x80xf32, 24xf32, 24xf32, 24xf32, 24xf32) + ( + batch_norm__108, + batch_norm__109, + batch_norm__110, + batch_norm__111, + batch_norm__112, + batch_norm__113, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_10, + parameter_163, + parameter_162, + parameter_161, + parameter_160, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_160, parameter_161, parameter_162, parameter_163 + + # pd_op.scale: (4x24x80x80xf32) <- (4x24x80x80xf32, 1xf32) + scale_2 = paddle._C_ops.scale(batch_norm__108, full_0, float("3"), True) + + # pd_op.relu6: (4x24x80x80xf32) <- (4x24x80x80xf32) + relu6_1 = paddle._C_ops.relu6(scale_2) + del scale_2 + + # pd_op.multiply: (4x24x80x80xf32) <- (4x24x80x80xf32, 4x24x80x80xf32) + multiply_1 = paddle._C_ops.multiply(batch_norm__108, relu6_1) + + # pd_op.scale: (4x24x80x80xf32) <- (4x24x80x80xf32, 1xf32) + scale_3 = paddle._C_ops.scale(multiply_1, full_1, float("0"), True) + del multiply_1 + + # pd_op.depthwise_conv2d: (4x24x80x80xf32) <- (4x24x80x80xf32, 24x1x5x5xf32) + depthwise_conv2d_8 = paddle._C_ops.depthwise_conv2d( + scale_3, parameter_159, [1, 1], [2, 2], "EXPLICIT", 24, [1, 1], "NCHW" + ) + del parameter_159 + + # pd_op.batch_norm_: (4x24x80x80xf32, 24xf32, 24xf32, 24xf32, 24xf32, -1xui8) <- (4x24x80x80xf32, 24xf32, 24xf32, 24xf32, 24xf32) + ( + batch_norm__114, + batch_norm__115, + batch_norm__116, + batch_norm__117, + batch_norm__118, + batch_norm__119, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + depthwise_conv2d_8, + parameter_158, + parameter_157, + parameter_156, + parameter_155, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_155, parameter_156, parameter_157, parameter_158 + + # pd_op.relu: (4x24x80x80xf32) <- (4x24x80x80xf32) + relu_20 = paddle._C_ops.relu(batch_norm__114) + del batch_norm__114 + + # pd_op.conv2d: (4x96x80x80xf32) <- (4x24x80x80xf32, 96x24x1x1xf32) + conv2d_11 = paddle._C_ops.conv2d( + relu_20, parameter_154, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_154 + + # pd_op.batch_norm_: (4x96x80x80xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (4x96x80x80xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__120, + batch_norm__121, + batch_norm__122, + batch_norm__123, + batch_norm__124, + batch_norm__125, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_11, + parameter_153, + parameter_152, + parameter_151, + parameter_150, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_150, parameter_151, parameter_152, parameter_153 + + # pd_op.relu: (4x96x80x80xf32) <- (4x96x80x80xf32) + relu_21 = paddle._C_ops.relu(batch_norm__120) + del batch_norm__120 + + # pd_op.add: (4x96x80x80xf32) <- (4x96x80x80xf32, 4x96x80x80xf32) + add_6 = paddle._C_ops.add(relu_18, relu_21) + + # pd_op.relu: (4x96x80x80xf32) <- (4x96x80x80xf32) + relu_22 = paddle._C_ops.relu(add_6) + del add_6 + + # pd_op.depthwise_conv2d: (4x96x80x80xf32) <- (4x96x80x80xf32, 96x1x5x5xf32) + depthwise_conv2d_9 = paddle._C_ops.depthwise_conv2d( + relu_22, parameter_149, [1, 1], [2, 2], "EXPLICIT", 96, [1, 1], "NCHW" + ) + del parameter_149 + + # pd_op.batch_norm_: (4x96x80x80xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (4x96x80x80xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__126, + batch_norm__127, + batch_norm__128, + batch_norm__129, + batch_norm__130, + batch_norm__131, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + depthwise_conv2d_9, + parameter_148, + parameter_147, + parameter_146, + parameter_145, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_145, parameter_146, parameter_147, parameter_148 + + # pd_op.relu: (4x96x80x80xf32) <- (4x96x80x80xf32) + relu_23 = paddle._C_ops.relu(batch_norm__126) + del batch_norm__126 + + # pd_op.conv2d: (4x24x80x80xf32) <- (4x96x80x80xf32, 24x96x1x1xf32) + conv2d_12 = paddle._C_ops.conv2d( + relu_23, parameter_144, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_144 + + # pd_op.batch_norm_: (4x24x80x80xf32, 24xf32, 24xf32, 24xf32, 24xf32, -1xui8) <- (4x24x80x80xf32, 24xf32, 24xf32, 24xf32, 24xf32) + ( + batch_norm__132, + batch_norm__133, + batch_norm__134, + batch_norm__135, + batch_norm__136, + batch_norm__137, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_12, + parameter_143, + parameter_142, + parameter_141, + parameter_140, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_140, parameter_141, parameter_142, parameter_143 + + # pd_op.scale: (4x24x80x80xf32) <- (4x24x80x80xf32, 1xf32) + scale_4 = paddle._C_ops.scale(batch_norm__132, full_0, float("3"), True) + + # pd_op.relu6: (4x24x80x80xf32) <- (4x24x80x80xf32) + relu6_2 = paddle._C_ops.relu6(scale_4) + del scale_4 + + # pd_op.multiply: (4x24x80x80xf32) <- (4x24x80x80xf32, 4x24x80x80xf32) + multiply_2 = paddle._C_ops.multiply(batch_norm__132, relu6_2) + + # pd_op.scale: (4x24x80x80xf32) <- (4x24x80x80xf32, 1xf32) + scale_5 = paddle._C_ops.scale(multiply_2, full_1, float("0"), True) + del multiply_2 + + # pd_op.depthwise_conv2d: (4x24x80x80xf32) <- (4x24x80x80xf32, 24x1x5x5xf32) + depthwise_conv2d_10 = paddle._C_ops.depthwise_conv2d( + scale_5, parameter_139, [1, 1], [2, 2], "EXPLICIT", 24, [1, 1], "NCHW" + ) + del parameter_139 + + # pd_op.batch_norm_: (4x24x80x80xf32, 24xf32, 24xf32, 24xf32, 24xf32, -1xui8) <- (4x24x80x80xf32, 24xf32, 24xf32, 24xf32, 24xf32) + ( + batch_norm__138, + batch_norm__139, + batch_norm__140, + batch_norm__141, + batch_norm__142, + batch_norm__143, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + depthwise_conv2d_10, + parameter_138, + parameter_137, + parameter_136, + parameter_135, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_135, parameter_136, parameter_137, parameter_138 + + # pd_op.relu: (4x24x80x80xf32) <- (4x24x80x80xf32) + relu_24 = paddle._C_ops.relu(batch_norm__138) + del batch_norm__138 + + # pd_op.conv2d: (4x96x80x80xf32) <- (4x24x80x80xf32, 96x24x1x1xf32) + conv2d_13 = paddle._C_ops.conv2d( + relu_24, parameter_134, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_134 + + # pd_op.batch_norm_: (4x96x80x80xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (4x96x80x80xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__144, + batch_norm__145, + batch_norm__146, + batch_norm__147, + batch_norm__148, + batch_norm__149, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_13, + parameter_133, + parameter_132, + parameter_131, + parameter_130, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_130, parameter_131, parameter_132, parameter_133 + + # pd_op.relu: (4x96x80x80xf32) <- (4x96x80x80xf32) + relu_25 = paddle._C_ops.relu(batch_norm__144) + del batch_norm__144 + + # pd_op.add: (4x96x80x80xf32) <- (4x96x80x80xf32, 4x96x80x80xf32) + add_7 = paddle._C_ops.add(relu_22, relu_25) + + # pd_op.relu: (4x96x80x80xf32) <- (4x96x80x80xf32) + relu_26 = paddle._C_ops.relu(add_7) + del add_7 + + # pd_op.depthwise_conv2d: (4x96x40x40xf32) <- (4x96x80x80xf32, 96x1x5x5xf32) + depthwise_conv2d_11 = paddle._C_ops.depthwise_conv2d( + relu_26, parameter_129, [2, 2], [2, 2], "EXPLICIT", 96, [1, 1], "NCHW" + ) + del parameter_129 + + # pd_op.batch_norm_: (4x96x40x40xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (4x96x40x40xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__150, + batch_norm__151, + batch_norm__152, + batch_norm__153, + batch_norm__154, + batch_norm__155, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + depthwise_conv2d_11, + parameter_128, + parameter_127, + parameter_126, + parameter_125, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_125, parameter_126, parameter_127, parameter_128 + + # pd_op.relu: (4x96x40x40xf32) <- (4x96x40x40xf32) + relu_27 = paddle._C_ops.relu(batch_norm__150) + del batch_norm__150 + + # pd_op.conv2d: (4x24x40x40xf32) <- (4x96x40x40xf32, 24x96x1x1xf32) + conv2d_14 = paddle._C_ops.conv2d( + relu_27, parameter_124, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_124 + + # pd_op.batch_norm_: (4x24x40x40xf32, 24xf32, 24xf32, 24xf32, 24xf32, -1xui8) <- (4x24x40x40xf32, 24xf32, 24xf32, 24xf32, 24xf32) + ( + batch_norm__156, + batch_norm__157, + batch_norm__158, + batch_norm__159, + batch_norm__160, + batch_norm__161, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_14, + parameter_123, + parameter_122, + parameter_121, + parameter_120, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_120, parameter_121, parameter_122, parameter_123 + + # pd_op.scale: (4x24x40x40xf32) <- (4x24x40x40xf32, 1xf32) + scale_6 = paddle._C_ops.scale(batch_norm__156, full_0, float("3"), True) + + # pd_op.relu6: (4x24x40x40xf32) <- (4x24x40x40xf32) + relu6_3 = paddle._C_ops.relu6(scale_6) + del scale_6 + + # pd_op.multiply: (4x24x40x40xf32) <- (4x24x40x40xf32, 4x24x40x40xf32) + multiply_3 = paddle._C_ops.multiply(batch_norm__156, relu6_3) + + # pd_op.scale: (4x24x40x40xf32) <- (4x24x40x40xf32, 1xf32) + scale_7 = paddle._C_ops.scale(multiply_3, full_1, float("0"), True) + del multiply_3 + + # pd_op.depthwise_conv2d: (4x24x40x40xf32) <- (4x24x40x40xf32, 24x1x5x5xf32) + depthwise_conv2d_12 = paddle._C_ops.depthwise_conv2d( + scale_7, parameter_119, [1, 1], [2, 2], "EXPLICIT", 24, [1, 1], "NCHW" + ) + del parameter_119 + + # pd_op.batch_norm_: (4x24x40x40xf32, 24xf32, 24xf32, 24xf32, 24xf32, -1xui8) <- (4x24x40x40xf32, 24xf32, 24xf32, 24xf32, 24xf32) + ( + batch_norm__162, + batch_norm__163, + batch_norm__164, + batch_norm__165, + batch_norm__166, + batch_norm__167, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + depthwise_conv2d_12, + parameter_118, + parameter_117, + parameter_116, + parameter_115, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_115, parameter_116, parameter_117, parameter_118 + + # pd_op.relu: (4x24x40x40xf32) <- (4x24x40x40xf32) + relu_28 = paddle._C_ops.relu(batch_norm__162) + del batch_norm__162 + + # pd_op.conv2d: (4x96x40x40xf32) <- (4x24x40x40xf32, 96x24x1x1xf32) + conv2d_15 = paddle._C_ops.conv2d( + relu_28, parameter_114, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_114 + + # pd_op.batch_norm_: (4x96x40x40xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (4x96x40x40xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__168, + batch_norm__169, + batch_norm__170, + batch_norm__171, + batch_norm__172, + batch_norm__173, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_15, + parameter_113, + parameter_112, + parameter_111, + parameter_110, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_110, parameter_111, parameter_112, parameter_113 + + # pd_op.relu: (4x96x40x40xf32) <- (4x96x40x40xf32) + relu_29 = paddle._C_ops.relu(batch_norm__168) + del batch_norm__168 + + # pd_op.pool2d: (4x96x40x40xf32) <- (4x96x80x80xf32, 2xi64) + pool2d_2 = paddle._C_ops.pool2d( + relu_26, + full_int_array_0, + [2, 2], + [0, 0], + True, + True, + "NCHW", + "max", + False, + False, + "EXPLICIT", + ) + + # pd_op.conv2d: (4x96x40x40xf32) <- (4x96x40x40xf32, 96x96x1x1xf32) + conv2d_16 = paddle._C_ops.conv2d( + pool2d_2, parameter_109, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_109 + + # pd_op.batch_norm_: (4x96x40x40xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (4x96x40x40xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__174, + batch_norm__175, + batch_norm__176, + batch_norm__177, + batch_norm__178, + batch_norm__179, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_16, + parameter_108, + parameter_107, + parameter_106, + parameter_105, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_105, parameter_106, parameter_107, parameter_108 + + # pd_op.relu: (4x96x40x40xf32) <- (4x96x40x40xf32) + relu_30 = paddle._C_ops.relu(batch_norm__174) + del batch_norm__174 + + # pd_op.add: (4x96x40x40xf32) <- (4x96x40x40xf32, 4x96x40x40xf32) + add_8 = paddle._C_ops.add(relu_30, relu_29) + + # pd_op.relu: (4x96x40x40xf32) <- (4x96x40x40xf32) + relu_31 = paddle._C_ops.relu(add_8) + del add_8 + + # pd_op.depthwise_conv2d: (4x96x40x40xf32) <- (4x96x40x40xf32, 96x1x5x5xf32) + depthwise_conv2d_13 = paddle._C_ops.depthwise_conv2d( + relu_31, parameter_104, [1, 1], [2, 2], "EXPLICIT", 96, [1, 1], "NCHW" + ) + del parameter_104 + + # pd_op.batch_norm_: (4x96x40x40xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (4x96x40x40xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__180, + batch_norm__181, + batch_norm__182, + batch_norm__183, + batch_norm__184, + batch_norm__185, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + depthwise_conv2d_13, + parameter_103, + parameter_102, + parameter_101, + parameter_100, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_100, parameter_101, parameter_102, parameter_103 + + # pd_op.relu: (4x96x40x40xf32) <- (4x96x40x40xf32) + relu_32 = paddle._C_ops.relu(batch_norm__180) + del batch_norm__180 + + # pd_op.conv2d: (4x24x40x40xf32) <- (4x96x40x40xf32, 24x96x1x1xf32) + conv2d_17 = paddle._C_ops.conv2d( + relu_32, parameter_99, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_99 + + # pd_op.batch_norm_: (4x24x40x40xf32, 24xf32, 24xf32, 24xf32, 24xf32, -1xui8) <- (4x24x40x40xf32, 24xf32, 24xf32, 24xf32, 24xf32) + ( + batch_norm__186, + batch_norm__187, + batch_norm__188, + batch_norm__189, + batch_norm__190, + batch_norm__191, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_17, + parameter_98, + parameter_97, + parameter_96, + parameter_95, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_95, parameter_96, parameter_97, parameter_98 + + # pd_op.scale: (4x24x40x40xf32) <- (4x24x40x40xf32, 1xf32) + scale_8 = paddle._C_ops.scale(batch_norm__186, full_0, float("3"), True) + + # pd_op.relu6: (4x24x40x40xf32) <- (4x24x40x40xf32) + relu6_4 = paddle._C_ops.relu6(scale_8) + del scale_8 + + # pd_op.multiply: (4x24x40x40xf32) <- (4x24x40x40xf32, 4x24x40x40xf32) + multiply_4 = paddle._C_ops.multiply(batch_norm__186, relu6_4) + + # pd_op.scale: (4x24x40x40xf32) <- (4x24x40x40xf32, 1xf32) + scale_9 = paddle._C_ops.scale(multiply_4, full_1, float("0"), True) + del multiply_4 + + # pd_op.depthwise_conv2d: (4x24x40x40xf32) <- (4x24x40x40xf32, 24x1x5x5xf32) + depthwise_conv2d_14 = paddle._C_ops.depthwise_conv2d( + scale_9, parameter_94, [1, 1], [2, 2], "EXPLICIT", 24, [1, 1], "NCHW" + ) + del parameter_94 + + # pd_op.batch_norm_: (4x24x40x40xf32, 24xf32, 24xf32, 24xf32, 24xf32, -1xui8) <- (4x24x40x40xf32, 24xf32, 24xf32, 24xf32, 24xf32) + ( + batch_norm__192, + batch_norm__193, + batch_norm__194, + batch_norm__195, + batch_norm__196, + batch_norm__197, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + depthwise_conv2d_14, + parameter_93, + parameter_92, + parameter_91, + parameter_90, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_90, parameter_91, parameter_92, parameter_93 + + # pd_op.relu: (4x24x40x40xf32) <- (4x24x40x40xf32) + relu_33 = paddle._C_ops.relu(batch_norm__192) + del batch_norm__192 + + # pd_op.conv2d: (4x96x40x40xf32) <- (4x24x40x40xf32, 96x24x1x1xf32) + conv2d_18 = paddle._C_ops.conv2d( + relu_33, parameter_89, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_89 + + # pd_op.batch_norm_: (4x96x40x40xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (4x96x40x40xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__198, + batch_norm__199, + batch_norm__200, + batch_norm__201, + batch_norm__202, + batch_norm__203, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_18, + parameter_88, + parameter_87, + parameter_86, + parameter_85, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_85, parameter_86, parameter_87, parameter_88 + + # pd_op.relu: (4x96x40x40xf32) <- (4x96x40x40xf32) + relu_34 = paddle._C_ops.relu(batch_norm__198) + del batch_norm__198 + + # pd_op.add: (4x96x40x40xf32) <- (4x96x40x40xf32, 4x96x40x40xf32) + add_9 = paddle._C_ops.add(relu_31, relu_34) + + # pd_op.relu: (4x96x40x40xf32) <- (4x96x40x40xf32) + relu_35 = paddle._C_ops.relu(add_9) + del add_9 + + # pd_op.depthwise_conv2d: (4x96x40x40xf32) <- (4x96x40x40xf32, 96x1x5x5xf32) + depthwise_conv2d_15 = paddle._C_ops.depthwise_conv2d( + relu_35, parameter_84, [1, 1], [2, 2], "EXPLICIT", 96, [1, 1], "NCHW" + ) + del parameter_84 + + # pd_op.batch_norm_: (4x96x40x40xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (4x96x40x40xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__204, + batch_norm__205, + batch_norm__206, + batch_norm__207, + batch_norm__208, + batch_norm__209, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + depthwise_conv2d_15, + parameter_83, + parameter_82, + parameter_81, + parameter_80, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_80, parameter_81, parameter_82, parameter_83 + + # pd_op.relu: (4x96x40x40xf32) <- (4x96x40x40xf32) + relu_36 = paddle._C_ops.relu(batch_norm__204) + del batch_norm__204 + + # pd_op.conv2d: (4x24x40x40xf32) <- (4x96x40x40xf32, 24x96x1x1xf32) + conv2d_19 = paddle._C_ops.conv2d( + relu_36, parameter_79, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_79 + + # pd_op.batch_norm_: (4x24x40x40xf32, 24xf32, 24xf32, 24xf32, 24xf32, -1xui8) <- (4x24x40x40xf32, 24xf32, 24xf32, 24xf32, 24xf32) + ( + batch_norm__210, + batch_norm__211, + batch_norm__212, + batch_norm__213, + batch_norm__214, + batch_norm__215, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_19, + parameter_78, + parameter_77, + parameter_76, + parameter_75, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_75, parameter_76, parameter_77, parameter_78 + + # pd_op.scale: (4x24x40x40xf32) <- (4x24x40x40xf32, 1xf32) + scale_10 = paddle._C_ops.scale(batch_norm__210, full_0, float("3"), True) + + # pd_op.relu6: (4x24x40x40xf32) <- (4x24x40x40xf32) + relu6_5 = paddle._C_ops.relu6(scale_10) + del scale_10 + + # pd_op.multiply: (4x24x40x40xf32) <- (4x24x40x40xf32, 4x24x40x40xf32) + multiply_5 = paddle._C_ops.multiply(batch_norm__210, relu6_5) + + # pd_op.scale: (4x24x40x40xf32) <- (4x24x40x40xf32, 1xf32) + scale_11 = paddle._C_ops.scale(multiply_5, full_1, float("0"), True) + del multiply_5 + + # pd_op.depthwise_conv2d: (4x24x40x40xf32) <- (4x24x40x40xf32, 24x1x5x5xf32) + depthwise_conv2d_16 = paddle._C_ops.depthwise_conv2d( + scale_11, parameter_74, [1, 1], [2, 2], "EXPLICIT", 24, [1, 1], "NCHW" + ) + del parameter_74 + + # pd_op.batch_norm_: (4x24x40x40xf32, 24xf32, 24xf32, 24xf32, 24xf32, -1xui8) <- (4x24x40x40xf32, 24xf32, 24xf32, 24xf32, 24xf32) + ( + batch_norm__216, + batch_norm__217, + batch_norm__218, + batch_norm__219, + batch_norm__220, + batch_norm__221, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + depthwise_conv2d_16, + parameter_73, + parameter_72, + parameter_71, + parameter_70, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_70, parameter_71, parameter_72, parameter_73 + + # pd_op.relu: (4x24x40x40xf32) <- (4x24x40x40xf32) + relu_37 = paddle._C_ops.relu(batch_norm__216) + del batch_norm__216 + + # pd_op.conv2d: (4x96x40x40xf32) <- (4x24x40x40xf32, 96x24x1x1xf32) + conv2d_20 = paddle._C_ops.conv2d( + relu_37, parameter_69, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_69 + + # pd_op.batch_norm_: (4x96x40x40xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (4x96x40x40xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__222, + batch_norm__223, + batch_norm__224, + batch_norm__225, + batch_norm__226, + batch_norm__227, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_20, + parameter_68, + parameter_67, + parameter_66, + parameter_65, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_65, parameter_66, parameter_67, parameter_68 + + # pd_op.relu: (4x96x40x40xf32) <- (4x96x40x40xf32) + relu_38 = paddle._C_ops.relu(batch_norm__222) + del batch_norm__222 + + # pd_op.add: (4x96x40x40xf32) <- (4x96x40x40xf32, 4x96x40x40xf32) + add_10 = paddle._C_ops.add(relu_35, relu_38) + + # pd_op.relu: (4x96x40x40xf32) <- (4x96x40x40xf32) + relu_39 = paddle._C_ops.relu(add_10) + del add_10 + + # pd_op.conv2d: (4x48x80x80xf32) <- (4x96x80x80xf32, 48x96x1x1xf32) + conv2d_21 = paddle._C_ops.conv2d( + relu_26, parameter_64, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_64 + + # pd_op.batch_norm_: (4x48x80x80xf32, 48xf32, 48xf32, 48xf32, 48xf32, -1xui8) <- (4x48x80x80xf32, 48xf32, 48xf32, 48xf32, 48xf32) + ( + batch_norm__228, + batch_norm__229, + batch_norm__230, + batch_norm__231, + batch_norm__232, + batch_norm__233, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_21, + parameter_63, + parameter_62, + parameter_61, + parameter_60, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_60, parameter_61, parameter_62, parameter_63 + + # pd_op.leaky_relu: (4x48x80x80xf32) <- (4x48x80x80xf32) + leaky_relu_0 = paddle._C_ops.leaky_relu(batch_norm__228, float("0.01")) + + # pd_op.conv2d: (4x48x40x40xf32) <- (4x96x40x40xf32, 48x96x1x1xf32) + conv2d_22 = paddle._C_ops.conv2d( + relu_39, parameter_59, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_59 + + # pd_op.batch_norm_: (4x48x40x40xf32, 48xf32, 48xf32, 48xf32, 48xf32, -1xui8) <- (4x48x40x40xf32, 48xf32, 48xf32, 48xf32, 48xf32) + ( + batch_norm__234, + batch_norm__235, + batch_norm__236, + batch_norm__237, + batch_norm__238, + batch_norm__239, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_22, + parameter_58, + parameter_57, + parameter_56, + parameter_55, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_55, parameter_56, parameter_57, parameter_58 + + # pd_op.leaky_relu: (4x48x40x40xf32) <- (4x48x40x40xf32) + leaky_relu_1 = paddle._C_ops.leaky_relu(batch_norm__234, float("0.01")) + + # pd_op.nearest_interp: (4x48x80x80xf32) <- (4x48x40x40xf32, None, None, None) + nearest_interp_0 = paddle._C_ops.nearest_interp( + leaky_relu_1, None, None, None, "NCHW", -1, 80, 80, [], "nearest", False, 0 + ) + + # pd_op.add: (4x48x80x80xf32) <- (4x48x80x80xf32, 4x48x80x80xf32) + add_11 = paddle._C_ops.add(leaky_relu_0, nearest_interp_0) + + # pd_op.conv2d: (4x48x80x80xf32) <- (4x48x80x80xf32, 48x48x3x3xf32) + conv2d_23 = paddle._C_ops.conv2d( + add_11, parameter_54, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_54 + + # pd_op.batch_norm_: (4x48x80x80xf32, 48xf32, 48xf32, 48xf32, 48xf32, -1xui8) <- (4x48x80x80xf32, 48xf32, 48xf32, 48xf32, 48xf32) + ( + batch_norm__240, + batch_norm__241, + batch_norm__242, + batch_norm__243, + batch_norm__244, + batch_norm__245, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_23, + parameter_53, + parameter_52, + parameter_51, + parameter_50, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_50, parameter_51, parameter_52, parameter_53 + + # pd_op.leaky_relu: (4x48x80x80xf32) <- (4x48x80x80xf32) + leaky_relu_2 = paddle._C_ops.leaky_relu(batch_norm__240, float("0.01")) + + # pd_op.conv2d: (4x24x80x80xf32) <- (4x48x80x80xf32, 24x48x3x3xf32) + conv2d_24 = paddle._C_ops.conv2d( + leaky_relu_2, parameter_49, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_49 + + # pd_op.batch_norm_: (4x24x80x80xf32, 24xf32, 24xf32, 24xf32, 24xf32, -1xui8) <- (4x24x80x80xf32, 24xf32, 24xf32, 24xf32, 24xf32) + ( + batch_norm__246, + batch_norm__247, + batch_norm__248, + batch_norm__249, + batch_norm__250, + batch_norm__251, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_24, + parameter_48, + parameter_47, + parameter_46, + parameter_45, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_45, parameter_46, parameter_47, parameter_48 + + # pd_op.conv2d: (4x12x80x80xf32) <- (4x24x80x80xf32, 12x24x3x3xf32) + conv2d_25 = paddle._C_ops.conv2d( + batch_norm__246, parameter_44, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_44 + + # pd_op.batch_norm_: (4x12x80x80xf32, 12xf32, 12xf32, 12xf32, 12xf32, -1xui8) <- (4x12x80x80xf32, 12xf32, 12xf32, 12xf32, 12xf32) + ( + batch_norm__252, + batch_norm__253, + batch_norm__254, + batch_norm__255, + batch_norm__256, + batch_norm__257, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_25, + parameter_43, + parameter_42, + parameter_41, + parameter_40, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_40, parameter_41, parameter_42, parameter_43 + + # pd_op.leaky_relu: (4x12x80x80xf32) <- (4x12x80x80xf32) + leaky_relu_3 = paddle._C_ops.leaky_relu(batch_norm__252, float("0.01")) + + # pd_op.conv2d: (4x12x80x80xf32) <- (4x12x80x80xf32, 12x12x3x3xf32) + conv2d_26 = paddle._C_ops.conv2d( + leaky_relu_3, parameter_39, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_39 + + # pd_op.batch_norm_: (4x12x80x80xf32, 12xf32, 12xf32, 12xf32, 12xf32, -1xui8) <- (4x12x80x80xf32, 12xf32, 12xf32, 12xf32, 12xf32) + ( + batch_norm__258, + batch_norm__259, + batch_norm__260, + batch_norm__261, + batch_norm__262, + batch_norm__263, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_26, + parameter_38, + parameter_37, + parameter_36, + parameter_35, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_35, parameter_36, parameter_37, parameter_38 + + # pd_op.conv2d: (4x12x80x80xf32) <- (4x12x80x80xf32, 12x12x3x3xf32) + conv2d_27 = paddle._C_ops.conv2d( + batch_norm__258, parameter_34, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_34 + + # pd_op.batch_norm_: (4x12x80x80xf32, 12xf32, 12xf32, 12xf32, 12xf32, -1xui8) <- (4x12x80x80xf32, 12xf32, 12xf32, 12xf32, 12xf32) + ( + batch_norm__264, + batch_norm__265, + batch_norm__266, + batch_norm__267, + batch_norm__268, + batch_norm__269, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_27, + parameter_33, + parameter_32, + parameter_31, + parameter_30, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_30, parameter_31, parameter_32, parameter_33 + + # pd_op.leaky_relu: (4x12x80x80xf32) <- (4x12x80x80xf32) + leaky_relu_4 = paddle._C_ops.leaky_relu(batch_norm__264, float("0.01")) + + # pd_op.conv2d: (4x12x80x80xf32) <- (4x12x80x80xf32, 12x12x3x3xf32) + conv2d_28 = paddle._C_ops.conv2d( + leaky_relu_4, parameter_29, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_29 + + # pd_op.batch_norm_: (4x12x80x80xf32, 12xf32, 12xf32, 12xf32, 12xf32, -1xui8) <- (4x12x80x80xf32, 12xf32, 12xf32, 12xf32, 12xf32) + ( + batch_norm__270, + batch_norm__271, + batch_norm__272, + batch_norm__273, + batch_norm__274, + batch_norm__275, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_28, + parameter_28, + parameter_27, + parameter_26, + parameter_25, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_25, parameter_26, parameter_27, parameter_28 + + # pd_op.full: (1xi32) <- () + full_2 = paddle._C_ops.full( + [1], float("1"), paddle.int32, paddle.core.CPUPlace() + ) + + # pd_op.assign: (1xi32) <- (1xi32) + assign_12 = full_2 + + # builtin.combine: ([4x24x80x80xf32, 4x12x80x80xf32, 4x12x80x80xf32]) <- (4x24x80x80xf32, 4x12x80x80xf32, 4x12x80x80xf32) + combine_0 = [batch_norm__246, batch_norm__258, batch_norm__270] + + # pd_op.concat: (4x48x80x80xf32) <- ([4x24x80x80xf32, 4x12x80x80xf32, 4x12x80x80xf32], 1xi32) + concat_0 = paddle._C_ops.concat(combine_0, full_2) + del combine_0 + + # pd_op.relu: (4x48x80x80xf32) <- (4x48x80x80xf32) + relu_0 = paddle._C_ops.relu(concat_0) + del concat_0 + + # pd_op.conv2d: (4x24x40x40xf32) <- (4x48x40x40xf32, 24x48x3x3xf32) + conv2d_29 = paddle._C_ops.conv2d( + leaky_relu_1, parameter_24, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_24 + + # pd_op.batch_norm_: (4x24x40x40xf32, 24xf32, 24xf32, 24xf32, 24xf32, -1xui8) <- (4x24x40x40xf32, 24xf32, 24xf32, 24xf32, 24xf32) + ( + batch_norm__276, + batch_norm__277, + batch_norm__278, + batch_norm__279, + batch_norm__280, + batch_norm__281, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_29, + parameter_23, + parameter_22, + parameter_21, + parameter_20, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_20, parameter_21, parameter_22, parameter_23 + + # pd_op.conv2d: (4x12x40x40xf32) <- (4x24x40x40xf32, 12x24x3x3xf32) + conv2d_30 = paddle._C_ops.conv2d( + batch_norm__276, parameter_19, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_19 + + # pd_op.batch_norm_: (4x12x40x40xf32, 12xf32, 12xf32, 12xf32, 12xf32, -1xui8) <- (4x12x40x40xf32, 12xf32, 12xf32, 12xf32, 12xf32) + ( + batch_norm__282, + batch_norm__283, + batch_norm__284, + batch_norm__285, + batch_norm__286, + batch_norm__287, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_30, + parameter_18, + parameter_17, + parameter_16, + parameter_15, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_15, parameter_16, parameter_17, parameter_18 + + # pd_op.leaky_relu: (4x12x40x40xf32) <- (4x12x40x40xf32) + leaky_relu_5 = paddle._C_ops.leaky_relu(batch_norm__282, float("0.01")) + + # pd_op.conv2d: (4x12x40x40xf32) <- (4x12x40x40xf32, 12x12x3x3xf32) + conv2d_31 = paddle._C_ops.conv2d( + leaky_relu_5, parameter_14, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_14 + + # pd_op.batch_norm_: (4x12x40x40xf32, 12xf32, 12xf32, 12xf32, 12xf32, -1xui8) <- (4x12x40x40xf32, 12xf32, 12xf32, 12xf32, 12xf32) + ( + batch_norm__288, + batch_norm__289, + batch_norm__290, + batch_norm__291, + batch_norm__292, + batch_norm__293, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_31, + parameter_13, + parameter_12, + parameter_11, + parameter_10, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_10, parameter_11, parameter_12, parameter_13 + + # pd_op.conv2d: (4x12x40x40xf32) <- (4x12x40x40xf32, 12x12x3x3xf32) + conv2d_32 = paddle._C_ops.conv2d( + batch_norm__288, parameter_9, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_9 + + # pd_op.batch_norm_: (4x12x40x40xf32, 12xf32, 12xf32, 12xf32, 12xf32, -1xui8) <- (4x12x40x40xf32, 12xf32, 12xf32, 12xf32, 12xf32) + ( + batch_norm__294, + batch_norm__295, + batch_norm__296, + batch_norm__297, + batch_norm__298, + batch_norm__299, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_32, + parameter_8, + parameter_7, + parameter_6, + parameter_5, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_5, parameter_6, parameter_7, parameter_8 + + # pd_op.leaky_relu: (4x12x40x40xf32) <- (4x12x40x40xf32) + leaky_relu_6 = paddle._C_ops.leaky_relu(batch_norm__294, float("0.01")) + + # pd_op.conv2d: (4x12x40x40xf32) <- (4x12x40x40xf32, 12x12x3x3xf32) + conv2d_33 = paddle._C_ops.conv2d( + leaky_relu_6, parameter_4, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_4 + + # pd_op.batch_norm_: (4x12x40x40xf32, 12xf32, 12xf32, 12xf32, 12xf32, -1xui8) <- (4x12x40x40xf32, 12xf32, 12xf32, 12xf32, 12xf32) + ( + batch_norm__300, + batch_norm__301, + batch_norm__302, + batch_norm__303, + batch_norm__304, + batch_norm__305, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_33, + parameter_3, + parameter_2, + parameter_1, + parameter_0, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_0, parameter_1, parameter_2, parameter_3 + + # builtin.combine: ([4x24x40x40xf32, 4x12x40x40xf32, 4x12x40x40xf32]) <- (4x24x40x40xf32, 4x12x40x40xf32, 4x12x40x40xf32) + combine_1 = [batch_norm__276, batch_norm__288, batch_norm__300] + + # pd_op.concat: (4x48x40x40xf32) <- ([4x24x40x40xf32, 4x12x40x40xf32, 4x12x40x40xf32], 1xi32) + concat_1 = paddle._C_ops.concat(combine_1, full_2) + del combine_1 + + # pd_op.relu: (4x48x40x40xf32) <- (4x48x40x40xf32) + relu_1 = paddle._C_ops.relu(concat_1) + del ( + add_11, + assign_0, + assign_1, + assign_10, + assign_11, + assign_12, + assign_2, + assign_3, + assign_4, + assign_5, + assign_6, + assign_7, + assign_8, + assign_9, + batch_norm__1, + batch_norm__10, + batch_norm__100, + batch_norm__101, + batch_norm__103, + batch_norm__104, + batch_norm__105, + batch_norm__106, + batch_norm__107, + batch_norm__108, + batch_norm__109, + batch_norm__11, + batch_norm__110, + batch_norm__111, + batch_norm__112, + batch_norm__113, + batch_norm__115, + batch_norm__116, + batch_norm__117, + batch_norm__118, + batch_norm__119, + batch_norm__12, + batch_norm__121, + batch_norm__122, + batch_norm__123, + batch_norm__124, + batch_norm__125, + batch_norm__127, + batch_norm__128, + batch_norm__129, + batch_norm__13, + batch_norm__130, + batch_norm__131, + batch_norm__132, + batch_norm__133, + batch_norm__134, + batch_norm__135, + batch_norm__136, + batch_norm__137, + batch_norm__139, + batch_norm__14, + batch_norm__140, + batch_norm__141, + batch_norm__142, + batch_norm__143, + batch_norm__145, + batch_norm__146, + batch_norm__147, + batch_norm__148, + batch_norm__149, + batch_norm__15, + batch_norm__151, + batch_norm__152, + batch_norm__153, + batch_norm__154, + batch_norm__155, + batch_norm__156, + batch_norm__157, + batch_norm__158, + batch_norm__159, + batch_norm__16, + batch_norm__160, + batch_norm__161, + batch_norm__163, + batch_norm__164, + batch_norm__165, + batch_norm__166, + batch_norm__167, + batch_norm__169, + batch_norm__17, + batch_norm__170, + batch_norm__171, + batch_norm__172, + batch_norm__173, + batch_norm__175, + batch_norm__176, + batch_norm__177, + batch_norm__178, + batch_norm__179, + batch_norm__181, + batch_norm__182, + batch_norm__183, + batch_norm__184, + batch_norm__185, + batch_norm__186, + batch_norm__187, + batch_norm__188, + batch_norm__189, + batch_norm__19, + batch_norm__190, + batch_norm__191, + batch_norm__193, + batch_norm__194, + batch_norm__195, + batch_norm__196, + batch_norm__197, + batch_norm__199, + batch_norm__2, + batch_norm__20, + batch_norm__200, + batch_norm__201, + batch_norm__202, + batch_norm__203, + batch_norm__205, + batch_norm__206, + batch_norm__207, + batch_norm__208, + batch_norm__209, + batch_norm__21, + batch_norm__210, + batch_norm__211, + batch_norm__212, + batch_norm__213, + batch_norm__214, + batch_norm__215, + batch_norm__217, + batch_norm__218, + batch_norm__219, + batch_norm__22, + batch_norm__220, + batch_norm__221, + batch_norm__223, + batch_norm__224, + batch_norm__225, + batch_norm__226, + batch_norm__227, + batch_norm__228, + batch_norm__229, + batch_norm__23, + batch_norm__230, + batch_norm__231, + batch_norm__232, + batch_norm__233, + batch_norm__234, + batch_norm__235, + batch_norm__236, + batch_norm__237, + batch_norm__238, + batch_norm__239, + batch_norm__24, + batch_norm__240, + batch_norm__241, + batch_norm__242, + batch_norm__243, + batch_norm__244, + batch_norm__245, + batch_norm__246, + batch_norm__247, + batch_norm__248, + batch_norm__249, + batch_norm__25, + batch_norm__250, + batch_norm__251, + batch_norm__252, + batch_norm__253, + batch_norm__254, + batch_norm__255, + batch_norm__256, + batch_norm__257, + batch_norm__258, + batch_norm__259, + batch_norm__26, + batch_norm__260, + batch_norm__261, + batch_norm__262, + batch_norm__263, + batch_norm__264, + batch_norm__265, + batch_norm__266, + batch_norm__267, + batch_norm__268, + batch_norm__269, + batch_norm__27, + batch_norm__270, + batch_norm__271, + batch_norm__272, + batch_norm__273, + batch_norm__274, + batch_norm__275, + batch_norm__276, + batch_norm__277, + batch_norm__278, + batch_norm__279, + batch_norm__28, + batch_norm__280, + batch_norm__281, + batch_norm__282, + batch_norm__283, + batch_norm__284, + batch_norm__285, + batch_norm__286, + batch_norm__287, + batch_norm__288, + batch_norm__289, + batch_norm__29, + batch_norm__290, + batch_norm__291, + batch_norm__292, + batch_norm__293, + batch_norm__294, + batch_norm__295, + batch_norm__296, + batch_norm__297, + batch_norm__298, + batch_norm__299, + batch_norm__3, + batch_norm__300, + batch_norm__301, + batch_norm__302, + batch_norm__303, + batch_norm__304, + batch_norm__305, + batch_norm__31, + batch_norm__32, + batch_norm__33, + batch_norm__34, + batch_norm__35, + batch_norm__36, + batch_norm__37, + batch_norm__38, + batch_norm__39, + batch_norm__4, + batch_norm__40, + batch_norm__41, + batch_norm__43, + batch_norm__44, + batch_norm__45, + batch_norm__46, + batch_norm__47, + batch_norm__49, + batch_norm__5, + batch_norm__50, + batch_norm__51, + batch_norm__52, + batch_norm__53, + batch_norm__54, + batch_norm__55, + batch_norm__56, + batch_norm__57, + batch_norm__58, + batch_norm__59, + batch_norm__61, + batch_norm__62, + batch_norm__63, + batch_norm__64, + batch_norm__65, + batch_norm__66, + batch_norm__67, + batch_norm__68, + batch_norm__69, + batch_norm__7, + batch_norm__70, + batch_norm__71, + batch_norm__73, + batch_norm__74, + batch_norm__75, + batch_norm__76, + batch_norm__77, + batch_norm__78, + batch_norm__79, + batch_norm__8, + batch_norm__80, + batch_norm__81, + batch_norm__82, + batch_norm__83, + batch_norm__85, + batch_norm__86, + batch_norm__87, + batch_norm__88, + batch_norm__89, + batch_norm__9, + batch_norm__91, + batch_norm__92, + batch_norm__93, + batch_norm__94, + batch_norm__95, + batch_norm__97, + batch_norm__98, + batch_norm__99, + concat_1, + conv2d_0, + conv2d_1, + conv2d_10, + conv2d_11, + conv2d_12, + conv2d_13, + conv2d_14, + conv2d_15, + conv2d_16, + conv2d_17, + conv2d_18, + conv2d_19, + conv2d_2, + conv2d_20, + conv2d_21, + conv2d_22, + conv2d_23, + conv2d_24, + conv2d_25, + conv2d_26, + conv2d_27, + conv2d_28, + conv2d_29, + conv2d_3, + conv2d_30, + conv2d_31, + conv2d_32, + conv2d_33, + conv2d_4, + conv2d_5, + conv2d_6, + conv2d_7, + conv2d_8, + conv2d_9, + depthwise_conv2d_0, + depthwise_conv2d_1, + depthwise_conv2d_10, + depthwise_conv2d_11, + depthwise_conv2d_12, + depthwise_conv2d_13, + depthwise_conv2d_14, + depthwise_conv2d_15, + depthwise_conv2d_16, + depthwise_conv2d_2, + depthwise_conv2d_3, + depthwise_conv2d_4, + depthwise_conv2d_5, + depthwise_conv2d_6, + depthwise_conv2d_7, + depthwise_conv2d_8, + depthwise_conv2d_9, + full_0, + full_1, + full_2, + full_int_array_0, + leaky_relu_0, + leaky_relu_1, + leaky_relu_2, + leaky_relu_3, + leaky_relu_4, + leaky_relu_5, + leaky_relu_6, + nearest_interp_0, + pool2d_0, + pool2d_1, + pool2d_2, + relu6_0, + relu6_1, + relu6_2, + relu6_3, + relu6_4, + relu6_5, + relu_10, + relu_11, + relu_12, + relu_13, + relu_14, + relu_15, + relu_16, + relu_17, + relu_18, + relu_19, + relu_2, + relu_20, + relu_21, + relu_22, + relu_23, + relu_24, + relu_25, + relu_26, + relu_27, + relu_28, + relu_29, + relu_3, + relu_30, + relu_31, + relu_32, + relu_33, + relu_34, + relu_35, + relu_36, + relu_37, + relu_38, + relu_39, + relu_4, + relu_5, + relu_6, + relu_7, + relu_8, + relu_9, + scale_1, + scale_11, + scale_3, + scale_5, + scale_7, + scale_9, + ) + + return relu_0, relu_1 diff --git a/paddle_samples/PaddleX/BlazeFace-FPN-SSH/subgraph_1/weight_meta.py b/paddle_samples/PaddleX/BlazeFace-FPN-SSH/subgraph_1/weight_meta.py new file mode 100644 index 000000000..485e241c9 --- /dev/null +++ b/paddle_samples/PaddleX/BlazeFace-FPN-SSH/subgraph_1/weight_meta.py @@ -0,0 +1,2499 @@ +class Program_weight_tensor_parameter_0: + name = "parameter_0" + shape = [12] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_1: + name = "parameter_1" + shape = [12] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_2: + name = "parameter_2" + shape = [12] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_3: + name = "parameter_3" + shape = [12] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_4: + name = "parameter_4" + shape = [12, 12, 3, 3] + dtype = "float32" + min_val = float("-0.307146") + max_val = float("0.297947") + mean = float("-0.0103325") + std = float("0.0850401") + data = None + + +class Program_weight_tensor_parameter_5: + name = "parameter_5" + shape = [12] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_6: + name = "parameter_6" + shape = [12] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_7: + name = "parameter_7" + shape = [12] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_8: + name = "parameter_8" + shape = [12] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_9: + name = "parameter_9" + shape = [12, 12, 3, 3] + dtype = "float32" + min_val = float("-0.263994") + max_val = float("0.343277") + mean = float("0.00107494") + std = float("0.0857121") + data = None + + +class Program_weight_tensor_parameter_10: + name = "parameter_10" + shape = [12] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_11: + name = "parameter_11" + shape = [12] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_12: + name = "parameter_12" + shape = [12] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_13: + name = "parameter_13" + shape = [12] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_14: + name = "parameter_14" + shape = [12, 12, 3, 3] + dtype = "float32" + min_val = float("-0.345005") + max_val = float("0.322847") + mean = float("-0.00626467") + std = float("0.0947249") + data = None + + +class Program_weight_tensor_parameter_15: + name = "parameter_15" + shape = [12] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_16: + name = "parameter_16" + shape = [12] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_17: + name = "parameter_17" + shape = [12] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_18: + name = "parameter_18" + shape = [12] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_19: + name = "parameter_19" + shape = [12, 24, 3, 3] + dtype = "float32" + min_val = float("-0.27172") + max_val = float("0.295579") + mean = float("0.0031677") + std = float("0.0637117") + data = None + + +class Program_weight_tensor_parameter_20: + name = "parameter_20" + shape = [24] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_21: + name = "parameter_21" + shape = [24] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_22: + name = "parameter_22" + shape = [24] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_23: + name = "parameter_23" + shape = [24] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_24: + name = "parameter_24" + shape = [24, 48, 3, 3] + dtype = "float32" + min_val = float("-0.272522") + max_val = float("0.223536") + mean = float("-0.00209246") + std = float("0.0454485") + data = None + + +class Program_weight_tensor_parameter_25: + name = "parameter_25" + shape = [12] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_26: + name = "parameter_26" + shape = [12] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_27: + name = "parameter_27" + shape = [12] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_28: + name = "parameter_28" + shape = [12] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_29: + name = "parameter_29" + shape = [12, 12, 3, 3] + dtype = "float32" + min_val = float("-0.277237") + max_val = float("0.218199") + mean = float("-0.00940977") + std = float("0.0571712") + data = None + + +class Program_weight_tensor_parameter_30: + name = "parameter_30" + shape = [12] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_31: + name = "parameter_31" + shape = [12] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_32: + name = "parameter_32" + shape = [12] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_33: + name = "parameter_33" + shape = [12] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_34: + name = "parameter_34" + shape = [12, 12, 3, 3] + dtype = "float32" + min_val = float("-0.203267") + max_val = float("0.294432") + mean = float("0.000712481") + std = float("0.057203") + data = None + + +class Program_weight_tensor_parameter_35: + name = "parameter_35" + shape = [12] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_36: + name = "parameter_36" + shape = [12] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_37: + name = "parameter_37" + shape = [12] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_38: + name = "parameter_38" + shape = [12] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_39: + name = "parameter_39" + shape = [12, 12, 3, 3] + dtype = "float32" + min_val = float("-0.260145") + max_val = float("0.303114") + mean = float("-0.00150672") + std = float("0.0637241") + data = None + + +class Program_weight_tensor_parameter_40: + name = "parameter_40" + shape = [12] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_41: + name = "parameter_41" + shape = [12] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_42: + name = "parameter_42" + shape = [12] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_43: + name = "parameter_43" + shape = [12] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_44: + name = "parameter_44" + shape = [12, 24, 3, 3] + dtype = "float32" + min_val = float("-0.189365") + max_val = float("0.193685") + mean = float("0.00266083") + std = float("0.043731") + data = None + + +class Program_weight_tensor_parameter_45: + name = "parameter_45" + shape = [24] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_46: + name = "parameter_46" + shape = [24] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_47: + name = "parameter_47" + shape = [24] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_48: + name = "parameter_48" + shape = [24] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_49: + name = "parameter_49" + shape = [24, 48, 3, 3] + dtype = "float32" + min_val = float("-0.152696") + max_val = float("0.202503") + mean = float("-0.00345673") + std = float("0.031285") + data = None + + +class Program_weight_tensor_parameter_50: + name = "parameter_50" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_51: + name = "parameter_51" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_52: + name = "parameter_52" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_53: + name = "parameter_53" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_54: + name = "parameter_54" + shape = [48, 48, 3, 3] + dtype = "float32" + min_val = float("-0.118866") + max_val = float("0.15463") + mean = float("-0.000852771") + std = float("0.0278829") + data = None + + +class Program_weight_tensor_parameter_55: + name = "parameter_55" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_56: + name = "parameter_56" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_57: + name = "parameter_57" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_58: + name = "parameter_58" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_59: + name = "parameter_59" + shape = [48, 96, 1, 1] + dtype = "float32" + min_val = float("-0.404517") + max_val = float("0.322962") + mean = float("-0.00121101") + std = float("0.0840991") + data = None + + +class Program_weight_tensor_parameter_60: + name = "parameter_60" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_61: + name = "parameter_61" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_62: + name = "parameter_62" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_63: + name = "parameter_63" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_64: + name = "parameter_64" + shape = [48, 96, 1, 1] + dtype = "float32" + min_val = float("-0.236753") + max_val = float("0.25341") + mean = float("0.000970275") + std = float("0.0529201") + data = None + + +class Program_weight_tensor_parameter_65: + name = "parameter_65" + shape = [96] + dtype = "float32" + min_val = float("-0.195197") + max_val = float("0.251267") + mean = float("0.0332275") + std = float("0.0889119") + data = None + + +class Program_weight_tensor_parameter_66: + name = "parameter_66" + shape = [96] + dtype = "float32" + min_val = float("-2.1098e-08") + max_val = float("0.646137") + mean = float("0.197283") + std = float("0.111499") + data = None + + +class Program_weight_tensor_parameter_67: + name = "parameter_67" + shape = [96] + dtype = "float32" + min_val = float("9.96979e-07") + max_val = float("0.0511034") + mean = float("0.00977312") + std = float("0.00929004") + data = None + + +class Program_weight_tensor_parameter_68: + name = "parameter_68" + shape = [96] + dtype = "float32" + min_val = float("-0.526823") + max_val = float("0.26322") + mean = float("-0.0585887") + std = float("0.162182") + data = None + + +class Program_weight_tensor_parameter_69: + name = "parameter_69" + shape = [96, 24, 1, 1] + dtype = "float32" + min_val = float("-0.546703") + max_val = float("0.581634") + mean = float("-0.00751692") + std = float("0.121635") + data = None + + +class Program_weight_tensor_parameter_70: + name = "parameter_70" + shape = [24] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_71: + name = "parameter_71" + shape = [24] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_72: + name = "parameter_72" + shape = [24] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_73: + name = "parameter_73" + shape = [24] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_74: + name = "parameter_74" + shape = [24, 1, 5, 5] + dtype = "float32" + min_val = float("-0.445168") + max_val = float("0.421467") + mean = float("0.0176986") + std = float("0.116996") + data = None + + +class Program_weight_tensor_parameter_75: + name = "parameter_75" + shape = [24] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_76: + name = "parameter_76" + shape = [24] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_77: + name = "parameter_77" + shape = [24] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_78: + name = "parameter_78" + shape = [24] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_79: + name = "parameter_79" + shape = [24, 96, 1, 1] + dtype = "float32" + min_val = float("-0.812039") + max_val = float("0.837223") + mean = float("0.00502906") + std = float("0.127069") + data = None + + +class Program_weight_tensor_parameter_80: + name = "parameter_80" + shape = [96] + dtype = "float32" + min_val = float("-0.201342") + max_val = float("0.299995") + mean = float("0.0599143") + std = float("0.100801") + data = None + + +class Program_weight_tensor_parameter_81: + name = "parameter_81" + shape = [96] + dtype = "float32" + min_val = float("-0.273684") + max_val = float("0.33826") + mean = float("0.164867") + std = float("0.0811513") + data = None + + +class Program_weight_tensor_parameter_82: + name = "parameter_82" + shape = [96] + dtype = "float32" + min_val = float("1.58566e-06") + max_val = float("0.0597997") + mean = float("0.0100391") + std = float("0.0119776") + data = None + + +class Program_weight_tensor_parameter_83: + name = "parameter_83" + shape = [96] + dtype = "float32" + min_val = float("-0.267196") + max_val = float("0.285633") + mean = float("0.0281174") + std = float("0.096931") + data = None + + +class Program_weight_tensor_parameter_84: + name = "parameter_84" + shape = [96, 1, 5, 5] + dtype = "float32" + min_val = float("-0.650395") + max_val = float("0.603524") + mean = float("0.00627068") + std = float("0.100368") + data = None + + +class Program_weight_tensor_parameter_85: + name = "parameter_85" + shape = [96] + dtype = "float32" + min_val = float("-0.178257") + max_val = float("0.269106") + mean = float("0.0356469") + std = float("0.0869269") + data = None + + +class Program_weight_tensor_parameter_86: + name = "parameter_86" + shape = [96] + dtype = "float32" + min_val = float("-0.200877") + max_val = float("0.311302") + mean = float("0.14944") + std = float("0.0716415") + data = None + + +class Program_weight_tensor_parameter_87: + name = "parameter_87" + shape = [96] + dtype = "float32" + min_val = float("8.06948e-07") + max_val = float("0.0717811") + mean = float("0.0141268") + std = float("0.0121784") + data = None + + +class Program_weight_tensor_parameter_88: + name = "parameter_88" + shape = [96] + dtype = "float32" + min_val = float("-0.409764") + max_val = float("0.747562") + mean = float("0.0228727") + std = float("0.164013") + data = None + + +class Program_weight_tensor_parameter_89: + name = "parameter_89" + shape = [96, 24, 1, 1] + dtype = "float32" + min_val = float("-0.621523") + max_val = float("0.730028") + mean = float("0.00288503") + std = float("0.133288") + data = None + + +class Program_weight_tensor_parameter_90: + name = "parameter_90" + shape = [24] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_91: + name = "parameter_91" + shape = [24] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_92: + name = "parameter_92" + shape = [24] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_93: + name = "parameter_93" + shape = [24] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_94: + name = "parameter_94" + shape = [24, 1, 5, 5] + dtype = "float32" + min_val = float("-0.576286") + max_val = float("0.472535") + mean = float("0.0318795") + std = float("0.0954394") + data = None + + +class Program_weight_tensor_parameter_95: + name = "parameter_95" + shape = [24] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_96: + name = "parameter_96" + shape = [24] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_97: + name = "parameter_97" + shape = [24] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_98: + name = "parameter_98" + shape = [24] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_99: + name = "parameter_99" + shape = [24, 96, 1, 1] + dtype = "float32" + min_val = float("-0.551904") + max_val = float("0.792285") + mean = float("0.00655041") + std = float("0.132802") + data = None + + +class Program_weight_tensor_parameter_100: + name = "parameter_100" + shape = [96] + dtype = "float32" + min_val = float("-0.209885") + max_val = float("0.361262") + mean = float("0.0625402") + std = float("0.115588") + data = None + + +class Program_weight_tensor_parameter_101: + name = "parameter_101" + shape = [96] + dtype = "float32" + min_val = float("2.26504e-22") + max_val = float("0.362981") + mean = float("0.18764") + std = float("0.0713053") + data = None + + +class Program_weight_tensor_parameter_102: + name = "parameter_102" + shape = [96] + dtype = "float32" + min_val = float("4.86932e-07") + max_val = float("0.0387658") + mean = float("0.00470377") + std = float("0.00699802") + data = None + + +class Program_weight_tensor_parameter_103: + name = "parameter_103" + shape = [96] + dtype = "float32" + min_val = float("-0.25658") + max_val = float("0.112381") + mean = float("-0.00677154") + std = float("0.0600893") + data = None + + +class Program_weight_tensor_parameter_104: + name = "parameter_104" + shape = [96, 1, 5, 5] + dtype = "float32" + min_val = float("-0.637877") + max_val = float("0.606071") + mean = float("0.000935495") + std = float("0.110426") + data = None + + +class Program_weight_tensor_parameter_105: + name = "parameter_105" + shape = [96] + dtype = "float32" + min_val = float("-0.26894") + max_val = float("0.213087") + mean = float("0.005827") + std = float("0.0876862") + data = None + + +class Program_weight_tensor_parameter_106: + name = "parameter_106" + shape = [96] + dtype = "float32" + min_val = float("2.63095e-17") + max_val = float("0.337233") + mean = float("0.107556") + std = float("0.0606133") + data = None + + +class Program_weight_tensor_parameter_107: + name = "parameter_107" + shape = [96] + dtype = "float32" + min_val = float("9.50139e-05") + max_val = float("0.277057") + mean = float("0.0675491") + std = float("0.0512089") + data = None + + +class Program_weight_tensor_parameter_108: + name = "parameter_108" + shape = [96] + dtype = "float32" + min_val = float("-0.525881") + max_val = float("0.764992") + mean = float("0.0584549") + std = float("0.277114") + data = None + + +class Program_weight_tensor_parameter_109: + name = "parameter_109" + shape = [96, 96, 1, 1] + dtype = "float32" + min_val = float("-0.522209") + max_val = float("0.677645") + mean = float("0.00163558") + std = float("0.0931042") + data = None + + +class Program_weight_tensor_parameter_110: + name = "parameter_110" + shape = [96] + dtype = "float32" + min_val = float("-0.0826409") + max_val = float("0.192969") + mean = float("0.0349991") + std = float("0.0587703") + data = None + + +class Program_weight_tensor_parameter_111: + name = "parameter_111" + shape = [96] + dtype = "float32" + min_val = float("-0.0862456") + max_val = float("0.299698") + mean = float("0.0685149") + std = float("0.0632791") + data = None + + +class Program_weight_tensor_parameter_112: + name = "parameter_112" + shape = [96] + dtype = "float32" + min_val = float("5.52547e-07") + max_val = float("0.066365") + mean = float("0.0087356") + std = float("0.00984267") + data = None + + +class Program_weight_tensor_parameter_113: + name = "parameter_113" + shape = [96] + dtype = "float32" + min_val = float("-0.290734") + max_val = float("0.527936") + mean = float("0.0331889") + std = float("0.108276") + data = None + + +class Program_weight_tensor_parameter_114: + name = "parameter_114" + shape = [96, 24, 1, 1] + dtype = "float32" + min_val = float("-0.543347") + max_val = float("0.730797") + mean = float("0.00439744") + std = float("0.101712") + data = None + + +class Program_weight_tensor_parameter_115: + name = "parameter_115" + shape = [24] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_116: + name = "parameter_116" + shape = [24] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_117: + name = "parameter_117" + shape = [24] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_118: + name = "parameter_118" + shape = [24] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_119: + name = "parameter_119" + shape = [24, 1, 5, 5] + dtype = "float32" + min_val = float("-0.527624") + max_val = float("0.71861") + mean = float("0.0271583") + std = float("0.137418") + data = None + + +class Program_weight_tensor_parameter_120: + name = "parameter_120" + shape = [24] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_121: + name = "parameter_121" + shape = [24] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_122: + name = "parameter_122" + shape = [24] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_123: + name = "parameter_123" + shape = [24] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_124: + name = "parameter_124" + shape = [24, 96, 1, 1] + dtype = "float32" + min_val = float("-0.750532") + max_val = float("0.614889") + mean = float("0.00653156") + std = float("0.102183") + data = None + + +class Program_weight_tensor_parameter_125: + name = "parameter_125" + shape = [96] + dtype = "float32" + min_val = float("-0.195649") + max_val = float("0.292902") + mean = float("0.0458993") + std = float("0.0984341") + data = None + + +class Program_weight_tensor_parameter_126: + name = "parameter_126" + shape = [96] + dtype = "float32" + min_val = float("-0.142891") + max_val = float("0.288841") + mean = float("0.139238") + std = float("0.0685107") + data = None + + +class Program_weight_tensor_parameter_127: + name = "parameter_127" + shape = [96] + dtype = "float32" + min_val = float("2.22093e-07") + max_val = float("0.0524549") + mean = float("0.012291") + std = float("0.0113583") + data = None + + +class Program_weight_tensor_parameter_128: + name = "parameter_128" + shape = [96] + dtype = "float32" + min_val = float("-0.395088") + max_val = float("0.315232") + mean = float("0.0296724") + std = float("0.150703") + data = None + + +class Program_weight_tensor_parameter_129: + name = "parameter_129" + shape = [96, 1, 5, 5] + dtype = "float32" + min_val = float("-0.244045") + max_val = float("0.3229") + mean = float("0.00447302") + std = float("0.0615556") + data = None + + +class Program_weight_tensor_parameter_130: + name = "parameter_130" + shape = [96] + dtype = "float32" + min_val = float("-0.217959") + max_val = float("0.27506") + mean = float("0.01201") + std = float("0.101205") + data = None + + +class Program_weight_tensor_parameter_131: + name = "parameter_131" + shape = [96] + dtype = "float32" + min_val = float("-0.149965") + max_val = float("0.450118") + mean = float("0.204849") + std = float("0.10916") + data = None + + +class Program_weight_tensor_parameter_132: + name = "parameter_132" + shape = [96] + dtype = "float32" + min_val = float("4.93891e-07") + max_val = float("0.0538803") + mean = float("0.0142048") + std = float("0.0109944") + data = None + + +class Program_weight_tensor_parameter_133: + name = "parameter_133" + shape = [96] + dtype = "float32" + min_val = float("-0.532192") + max_val = float("0.319059") + mean = float("-0.110722") + std = float("0.206301") + data = None + + +class Program_weight_tensor_parameter_134: + name = "parameter_134" + shape = [96, 24, 1, 1] + dtype = "float32" + min_val = float("-0.666092") + max_val = float("0.660025") + mean = float("-0.00742784") + std = float("0.147992") + data = None + + +class Program_weight_tensor_parameter_135: + name = "parameter_135" + shape = [24] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_136: + name = "parameter_136" + shape = [24] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_137: + name = "parameter_137" + shape = [24] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_138: + name = "parameter_138" + shape = [24] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_139: + name = "parameter_139" + shape = [24, 1, 5, 5] + dtype = "float32" + min_val = float("-0.311613") + max_val = float("0.424949") + mean = float("0.0324912") + std = float("0.103376") + data = None + + +class Program_weight_tensor_parameter_140: + name = "parameter_140" + shape = [24] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_141: + name = "parameter_141" + shape = [24] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_142: + name = "parameter_142" + shape = [24] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_143: + name = "parameter_143" + shape = [24] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_144: + name = "parameter_144" + shape = [24, 96, 1, 1] + dtype = "float32" + min_val = float("-0.624846") + max_val = float("0.869047") + mean = float("0.00120444") + std = float("0.151201") + data = None + + +class Program_weight_tensor_parameter_145: + name = "parameter_145" + shape = [96] + dtype = "float32" + min_val = float("-0.200949") + max_val = float("0.389941") + mean = float("0.0874119") + std = float("0.12948") + data = None + + +class Program_weight_tensor_parameter_146: + name = "parameter_146" + shape = [96] + dtype = "float32" + min_val = float("-0.207661") + max_val = float("0.444018") + mean = float("0.164478") + std = float("0.0766073") + data = None + + +class Program_weight_tensor_parameter_147: + name = "parameter_147" + shape = [96] + dtype = "float32" + min_val = float("1.43624e-07") + max_val = float("0.0623485") + mean = float("0.0103772") + std = float("0.0124403") + data = None + + +class Program_weight_tensor_parameter_148: + name = "parameter_148" + shape = [96] + dtype = "float32" + min_val = float("-0.352059") + max_val = float("0.287181") + mean = float("-5.3647e-05") + std = float("0.118162") + data = None + + +class Program_weight_tensor_parameter_149: + name = "parameter_149" + shape = [96, 1, 5, 5] + dtype = "float32" + min_val = float("-2.34212") + max_val = float("1.31906") + mean = float("0.00277151") + std = float("0.157075") + data = None + + +class Program_weight_tensor_parameter_150: + name = "parameter_150" + shape = [96] + dtype = "float32" + min_val = float("-0.256194") + max_val = float("0.365056") + mean = float("0.0711997") + std = float("0.103455") + data = None + + +class Program_weight_tensor_parameter_151: + name = "parameter_151" + shape = [96] + dtype = "float32" + min_val = float("-3.9349e-21") + max_val = float("0.335995") + mean = float("0.140415") + std = float("0.0727859") + data = None + + +class Program_weight_tensor_parameter_152: + name = "parameter_152" + shape = [96] + dtype = "float32" + min_val = float("9.42449e-07") + max_val = float("0.0752741") + mean = float("0.0169591") + std = float("0.014332") + data = None + + +class Program_weight_tensor_parameter_153: + name = "parameter_153" + shape = [96] + dtype = "float32" + min_val = float("-0.590606") + max_val = float("0.699242") + mean = float("0.0543433") + std = float("0.216921") + data = None + + +class Program_weight_tensor_parameter_154: + name = "parameter_154" + shape = [96, 24, 1, 1] + dtype = "float32" + min_val = float("-0.628851") + max_val = float("0.793109") + mean = float("0.00726744") + std = float("0.145487") + data = None + + +class Program_weight_tensor_parameter_155: + name = "parameter_155" + shape = [24] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_156: + name = "parameter_156" + shape = [24] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_157: + name = "parameter_157" + shape = [24] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_158: + name = "parameter_158" + shape = [24] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_159: + name = "parameter_159" + shape = [24, 1, 5, 5] + dtype = "float32" + min_val = float("-0.550344") + max_val = float("1.44281") + mean = float("0.0404223") + std = float("0.123457") + data = None + + +class Program_weight_tensor_parameter_160: + name = "parameter_160" + shape = [24] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_161: + name = "parameter_161" + shape = [24] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_162: + name = "parameter_162" + shape = [24] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_163: + name = "parameter_163" + shape = [24] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_164: + name = "parameter_164" + shape = [24, 96, 1, 1] + dtype = "float32" + min_val = float("-0.760655") + max_val = float("0.620057") + mean = float("-0.000483954") + std = float("0.147705") + data = None + + +class Program_weight_tensor_parameter_165: + name = "parameter_165" + shape = [96] + dtype = "float32" + min_val = float("-0.15872") + max_val = float("0.442358") + mean = float("0.0611677") + std = float("0.112876") + data = None + + +class Program_weight_tensor_parameter_166: + name = "parameter_166" + shape = [96] + dtype = "float32" + min_val = float("1.65044e-16") + max_val = float("0.323613") + mean = float("0.190422") + std = float("0.0540682") + data = None + + +class Program_weight_tensor_parameter_167: + name = "parameter_167" + shape = [96] + dtype = "float32" + min_val = float("5.60519e-45") + max_val = float("0.027553") + mean = float("0.00518674") + std = float("0.00628902") + data = None + + +class Program_weight_tensor_parameter_168: + name = "parameter_168" + shape = [96] + dtype = "float32" + min_val = float("-0.219055") + max_val = float("0.167722") + mean = float("-0.000634198") + std = float("0.0652388") + data = None + + +class Program_weight_tensor_parameter_169: + name = "parameter_169" + shape = [96, 1, 5, 5] + dtype = "float32" + min_val = float("-1.4245") + max_val = float("1.34226") + mean = float("0.00197232") + std = float("0.163827") + data = None + + +class Program_weight_tensor_parameter_170: + name = "parameter_170" + shape = [96] + dtype = "float32" + min_val = float("-0.0677146") + max_val = float("0.308459") + mean = float("0.0735753") + std = float("0.0705391") + data = None + + +class Program_weight_tensor_parameter_171: + name = "parameter_171" + shape = [96] + dtype = "float32" + min_val = float("2.31621e-25") + max_val = float("0.256885") + mean = float("0.0863789") + std = float("0.0553004") + data = None + + +class Program_weight_tensor_parameter_172: + name = "parameter_172" + shape = [96] + dtype = "float32" + min_val = float("4.32892e-06") + max_val = float("0.399993") + mean = float("0.0917837") + std = float("0.0609957") + data = None + + +class Program_weight_tensor_parameter_173: + name = "parameter_173" + shape = [96] + dtype = "float32" + min_val = float("-0.66654") + max_val = float("1.07076") + mean = float("0.0578748") + std = float("0.327955") + data = None + + +class Program_weight_tensor_parameter_174: + name = "parameter_174" + shape = [96, 48, 1, 1] + dtype = "float32" + min_val = float("-0.685359") + max_val = float("0.866696") + mean = float("0.00337658") + std = float("0.128872") + data = None + + +class Program_weight_tensor_parameter_175: + name = "parameter_175" + shape = [96] + dtype = "float32" + min_val = float("-0.118548") + max_val = float("0.192986") + mean = float("0.0301135") + std = float("0.0620807") + data = None + + +class Program_weight_tensor_parameter_176: + name = "parameter_176" + shape = [96] + dtype = "float32" + min_val = float("-0.0631303") + max_val = float("0.238717") + mean = float("0.0527771") + std = float("0.0588953") + data = None + + +class Program_weight_tensor_parameter_177: + name = "parameter_177" + shape = [96] + dtype = "float32" + min_val = float("1.98616e-07") + max_val = float("0.0161844") + mean = float("0.00288829") + std = float("0.00345225") + data = None + + +class Program_weight_tensor_parameter_178: + name = "parameter_178" + shape = [96] + dtype = "float32" + min_val = float("-0.300082") + max_val = float("0.270617") + mean = float("0.00124521") + std = float("0.118049") + data = None + + +class Program_weight_tensor_parameter_179: + name = "parameter_179" + shape = [96, 24, 1, 1] + dtype = "float32" + min_val = float("-0.691856") + max_val = float("0.591602") + mean = float("-0.00011515") + std = float("0.0949342") + data = None + + +class Program_weight_tensor_parameter_180: + name = "parameter_180" + shape = [24] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_181: + name = "parameter_181" + shape = [24] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_182: + name = "parameter_182" + shape = [24] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_183: + name = "parameter_183" + shape = [24] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_184: + name = "parameter_184" + shape = [24, 1, 5, 5] + dtype = "float32" + min_val = float("-0.862342") + max_val = float("0.91077") + mean = float("0.0065398") + std = float("0.122104") + data = None + + +class Program_weight_tensor_parameter_185: + name = "parameter_185" + shape = [24] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_186: + name = "parameter_186" + shape = [24] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_187: + name = "parameter_187" + shape = [24] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_188: + name = "parameter_188" + shape = [24] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_189: + name = "parameter_189" + shape = [24, 48, 1, 1] + dtype = "float32" + min_val = float("-0.681029") + max_val = float("0.900169") + mean = float("0.00520401") + std = float("0.132565") + data = None + + +class Program_weight_tensor_parameter_190: + name = "parameter_190" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_191: + name = "parameter_191" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_192: + name = "parameter_192" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_193: + name = "parameter_193" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_194: + name = "parameter_194" + shape = [48, 1, 5, 5] + dtype = "float32" + min_val = float("-0.438811") + max_val = float("0.595003") + mean = float("0.00271417") + std = float("0.0819477") + data = None + + +class Program_weight_tensor_parameter_195: + name = "parameter_195" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_196: + name = "parameter_196" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_197: + name = "parameter_197" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_198: + name = "parameter_198" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_199: + name = "parameter_199" + shape = [48, 48, 1, 1] + dtype = "float32" + min_val = float("-1.03414") + max_val = float("0.884684") + mean = float("-0.00423944") + std = float("0.154432") + data = None + + +class Program_weight_tensor_parameter_200: + name = "parameter_200" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_201: + name = "parameter_201" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_202: + name = "parameter_202" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_203: + name = "parameter_203" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_204: + name = "parameter_204" + shape = [48, 1, 5, 5] + dtype = "float32" + min_val = float("-1.74615") + max_val = float("1.25974") + mean = float("0.00546213") + std = float("0.150894") + data = None + + +class Program_weight_tensor_parameter_205: + name = "parameter_205" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_206: + name = "parameter_206" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_207: + name = "parameter_207" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_208: + name = "parameter_208" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_209: + name = "parameter_209" + shape = [48, 48, 1, 1] + dtype = "float32" + min_val = float("-0.972952") + max_val = float("0.934554") + mean = float("0.0074201") + std = float("0.172821") + data = None + + +class Program_weight_tensor_parameter_210: + name = "parameter_210" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_211: + name = "parameter_211" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_212: + name = "parameter_212" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_213: + name = "parameter_213" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_214: + name = "parameter_214" + shape = [48, 1, 5, 5] + dtype = "float32" + min_val = float("-1.70501") + max_val = float("2.82363") + mean = float("0.00564004") + std = float("0.214006") + data = None + + +class Program_weight_tensor_parameter_215: + name = "parameter_215" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_216: + name = "parameter_216" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_217: + name = "parameter_217" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_218: + name = "parameter_218" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_219: + name = "parameter_219" + shape = [48, 24, 1, 1] + dtype = "float32" + min_val = float("-0.768797") + max_val = float("0.908842") + mean = float("-2.10748e-05") + std = float("0.162774") + data = None + + +class Program_weight_tensor_parameter_220: + name = "parameter_220" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_221: + name = "parameter_221" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_222: + name = "parameter_222" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_223: + name = "parameter_223" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_224: + name = "parameter_224" + shape = [48, 24, 1, 1] + dtype = "float32" + min_val = float("-0.828017") + max_val = float("0.836866") + mean = float("-0.00175476") + std = float("0.168164") + data = None + + +class Program_weight_tensor_parameter_225: + name = "parameter_225" + shape = [24] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_226: + name = "parameter_226" + shape = [24] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_227: + name = "parameter_227" + shape = [24] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_228: + name = "parameter_228" + shape = [24] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_229: + name = "parameter_229" + shape = [24, 1, 5, 5] + dtype = "float32" + min_val = float("-0.435317") + max_val = float("0.378746") + mean = float("-0.00527442") + std = float("0.128794") + data = None + + +class Program_weight_tensor_parameter_230: + name = "parameter_230" + shape = [24] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_231: + name = "parameter_231" + shape = [24] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_232: + name = "parameter_232" + shape = [24] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_233: + name = "parameter_233" + shape = [24] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_234: + name = "parameter_234" + shape = [24, 24, 1, 1] + dtype = "float32" + min_val = float("-0.875129") + max_val = float("0.92272") + mean = float("-0.00530111") + std = float("0.207659") + data = None + + +class Program_weight_tensor_parameter_235: + name = "parameter_235" + shape = [24] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_236: + name = "parameter_236" + shape = [24] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_237: + name = "parameter_237" + shape = [24] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_238: + name = "parameter_238" + shape = [24] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_239: + name = "parameter_239" + shape = [24, 1, 5, 5] + dtype = "float32" + min_val = float("-1.57756") + max_val = float("1.06225") + mean = float("0.00671393") + std = float("0.213442") + data = None + + +class Program_weight_tensor_parameter_240: + name = "parameter_240" + shape = [24] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_241: + name = "parameter_241" + shape = [24] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_242: + name = "parameter_242" + shape = [24] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_243: + name = "parameter_243" + shape = [24] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_244: + name = "parameter_244" + shape = [24, 24, 1, 1] + dtype = "float32" + min_val = float("-1.16107") + max_val = float("0.753424") + mean = float("-0.0189932") + std = float("0.170179") + data = None + + +class Program_weight_tensor_parameter_245: + name = "parameter_245" + shape = [24] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_246: + name = "parameter_246" + shape = [24] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_247: + name = "parameter_247" + shape = [24] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_248: + name = "parameter_248" + shape = [24] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_249: + name = "parameter_249" + shape = [24, 1, 5, 5] + dtype = "float32" + min_val = float("-4.61026") + max_val = float("4.42141") + mean = float("0.00108297") + std = float("0.476164") + data = None + + +class Program_weight_tensor_parameter_250: + name = "parameter_250" + shape = [24] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_251: + name = "parameter_251" + shape = [24] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_252: + name = "parameter_252" + shape = [24] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_253: + name = "parameter_253" + shape = [24] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_254: + name = "parameter_254" + shape = [24, 3, 3, 3] + dtype = "float32" + min_val = float("-3.8795") + max_val = float("4.02281") + mean = float("-0.00675521") + std = float("0.495667") + data = None diff --git a/paddle_samples/PaddleX/BlazeFace/subgraph_0/graph_hash.txt b/paddle_samples/PaddleX/BlazeFace/subgraph_0/graph_hash.txt new file mode 100644 index 000000000..f7247ba79 --- /dev/null +++ b/paddle_samples/PaddleX/BlazeFace/subgraph_0/graph_hash.txt @@ -0,0 +1 @@ +d5cf1876de9f3a36be22c33acb7d538a666b27b805b404faa7c878ee1e4e694d \ No newline at end of file diff --git a/paddle_samples/PaddleX/BlazeFace/subgraph_0/graph_net.json b/paddle_samples/PaddleX/BlazeFace/subgraph_0/graph_net.json new file mode 100644 index 000000000..d9c8238e8 --- /dev/null +++ b/paddle_samples/PaddleX/BlazeFace/subgraph_0/graph_net.json @@ -0,0 +1,6 @@ +{ + "framework": "paddle", + "model_name": "BlazeFace", + "num_devices_required": 1, + "num_nodes_required": 1 +} \ No newline at end of file diff --git a/paddle_samples/PaddleX/BlazeFace/subgraph_0/input_meta.py b/paddle_samples/PaddleX/BlazeFace/subgraph_0/input_meta.py new file mode 100644 index 000000000..f7c78a24b --- /dev/null +++ b/paddle_samples/PaddleX/BlazeFace/subgraph_0/input_meta.py @@ -0,0 +1,9 @@ +class Program_weight_tensor_data_0: + name = "data_0" + shape = [1, 3, 678, 1024] + dtype = "float32" + min_val = float("-0.964689") + max_val = float("1.18429") + mean = float("-0.24172") + std = float("0.515477") + data = None diff --git a/paddle_samples/PaddleX/BlazeFace/subgraph_0/model.py b/paddle_samples/PaddleX/BlazeFace/subgraph_0/model.py new file mode 100644 index 000000000..d94cfc8e8 --- /dev/null +++ b/paddle_samples/PaddleX/BlazeFace/subgraph_0/model.py @@ -0,0 +1,1976 @@ +import paddle + + +class GraphModule(paddle.nn.Layer): + def __init__(self): + super().__init__() + + def forward( + self, + parameter_0, + parameter_1, + parameter_2, + parameter_3, + parameter_4, + parameter_5, + parameter_6, + parameter_7, + parameter_8, + parameter_9, + parameter_10, + parameter_11, + parameter_12, + parameter_13, + parameter_14, + parameter_15, + parameter_16, + parameter_17, + parameter_18, + parameter_19, + parameter_20, + parameter_21, + parameter_22, + parameter_23, + parameter_24, + parameter_25, + parameter_26, + parameter_27, + parameter_28, + parameter_29, + parameter_30, + parameter_31, + parameter_32, + parameter_33, + parameter_34, + parameter_35, + parameter_36, + parameter_37, + parameter_38, + parameter_39, + parameter_40, + parameter_41, + parameter_42, + parameter_43, + parameter_44, + parameter_45, + parameter_46, + parameter_47, + parameter_48, + parameter_49, + parameter_50, + parameter_51, + parameter_52, + parameter_53, + parameter_54, + parameter_55, + parameter_56, + parameter_57, + parameter_58, + parameter_59, + parameter_60, + parameter_61, + parameter_62, + parameter_63, + parameter_64, + parameter_65, + parameter_66, + parameter_67, + parameter_68, + parameter_69, + parameter_70, + parameter_71, + parameter_72, + parameter_73, + parameter_74, + parameter_75, + parameter_76, + parameter_77, + parameter_78, + parameter_79, + parameter_80, + parameter_81, + parameter_82, + parameter_83, + parameter_84, + parameter_85, + parameter_86, + parameter_87, + parameter_88, + parameter_89, + parameter_90, + parameter_91, + parameter_92, + parameter_93, + parameter_94, + parameter_95, + parameter_96, + parameter_97, + parameter_98, + parameter_99, + parameter_100, + parameter_101, + parameter_102, + parameter_103, + parameter_104, + parameter_105, + parameter_106, + parameter_107, + parameter_108, + parameter_109, + parameter_110, + parameter_111, + parameter_112, + parameter_113, + parameter_114, + parameter_115, + parameter_116, + parameter_117, + parameter_118, + parameter_119, + parameter_120, + parameter_121, + parameter_122, + parameter_123, + parameter_124, + parameter_125, + parameter_126, + parameter_127, + parameter_128, + parameter_129, + parameter_130, + parameter_131, + parameter_132, + parameter_133, + parameter_134, + parameter_135, + parameter_136, + parameter_137, + parameter_138, + parameter_139, + parameter_140, + parameter_141, + parameter_142, + parameter_143, + parameter_144, + parameter_145, + parameter_146, + parameter_147, + parameter_148, + parameter_149, + parameter_150, + parameter_151, + parameter_152, + parameter_153, + parameter_154, + parameter_155, + parameter_156, + parameter_157, + parameter_158, + parameter_159, + parameter_160, + parameter_161, + parameter_162, + parameter_163, + parameter_164, + parameter_165, + parameter_166, + parameter_167, + parameter_168, + parameter_169, + parameter_170, + parameter_171, + parameter_172, + parameter_173, + parameter_174, + parameter_175, + parameter_176, + parameter_177, + parameter_178, + parameter_179, + parameter_180, + parameter_181, + parameter_182, + parameter_183, + parameter_184, + parameter_185, + parameter_186, + parameter_187, + parameter_188, + parameter_189, + parameter_190, + parameter_191, + parameter_192, + parameter_193, + parameter_194, + parameter_195, + parameter_196, + parameter_197, + data_0, + ): + # pd_op.conv2d: (-1x24x-1x-1xf32) <- (-1x3x-1x-1xf32, 24x3x3x3xf32) + conv2d_0 = paddle._C_ops.conv2d( + data_0, parameter_197, [2, 2], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_197 + + # pd_op.batch_norm_: (-1x24x-1x-1xf32, 24xf32, 24xf32, 24xf32, 24xf32, -1xui8) <- (-1x24x-1x-1xf32, 24xf32, 24xf32, 24xf32, 24xf32) + ( + batch_norm__0, + batch_norm__1, + batch_norm__2, + batch_norm__3, + batch_norm__4, + batch_norm__5, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_0, + parameter_196, + parameter_195, + parameter_194, + parameter_193, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_0, parameter_193, parameter_194, parameter_195, parameter_196 + + # pd_op.relu: (-1x24x-1x-1xf32) <- (-1x24x-1x-1xf32) + relu_0 = paddle._C_ops.relu(batch_norm__0) + del batch_norm__0 + + # pd_op.depthwise_conv2d: (-1x24x-1x-1xf32) <- (-1x24x-1x-1xf32, 24x1x5x5xf32) + depthwise_conv2d_0 = paddle._C_ops.depthwise_conv2d( + relu_0, parameter_192, [1, 1], [2, 2], "EXPLICIT", 24, [1, 1], "NCHW" + ) + del parameter_192 + + # pd_op.batch_norm_: (-1x24x-1x-1xf32, 24xf32, 24xf32, 24xf32, 24xf32, -1xui8) <- (-1x24x-1x-1xf32, 24xf32, 24xf32, 24xf32, 24xf32) + ( + batch_norm__6, + batch_norm__7, + batch_norm__8, + batch_norm__9, + batch_norm__10, + batch_norm__11, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + depthwise_conv2d_0, + parameter_191, + parameter_190, + parameter_189, + parameter_188, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del ( + depthwise_conv2d_0, + parameter_188, + parameter_189, + parameter_190, + parameter_191, + ) + + # pd_op.relu: (-1x24x-1x-1xf32) <- (-1x24x-1x-1xf32) + relu_1 = paddle._C_ops.relu(batch_norm__6) + del batch_norm__6 + + # pd_op.conv2d: (-1x24x-1x-1xf32) <- (-1x24x-1x-1xf32, 24x24x1x1xf32) + conv2d_1 = paddle._C_ops.conv2d( + relu_1, parameter_187, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_187, relu_1 + + # pd_op.batch_norm_: (-1x24x-1x-1xf32, 24xf32, 24xf32, 24xf32, 24xf32, -1xui8) <- (-1x24x-1x-1xf32, 24xf32, 24xf32, 24xf32, 24xf32) + ( + batch_norm__12, + batch_norm__13, + batch_norm__14, + batch_norm__15, + batch_norm__16, + batch_norm__17, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_1, + parameter_186, + parameter_185, + parameter_184, + parameter_183, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_1, parameter_183, parameter_184, parameter_185, parameter_186 + + # pd_op.add: (-1x24x-1x-1xf32) <- (-1x24x-1x-1xf32, -1x24x-1x-1xf32) + add_0 = paddle._C_ops.add(relu_0, batch_norm__12) + del batch_norm__12, relu_0 + + # pd_op.relu: (-1x24x-1x-1xf32) <- (-1x24x-1x-1xf32) + relu_2 = paddle._C_ops.relu(add_0) + del add_0 + + # pd_op.depthwise_conv2d: (-1x24x-1x-1xf32) <- (-1x24x-1x-1xf32, 24x1x5x5xf32) + depthwise_conv2d_1 = paddle._C_ops.depthwise_conv2d( + relu_2, parameter_182, [1, 1], [2, 2], "EXPLICIT", 24, [1, 1], "NCHW" + ) + del parameter_182 + + # pd_op.batch_norm_: (-1x24x-1x-1xf32, 24xf32, 24xf32, 24xf32, 24xf32, -1xui8) <- (-1x24x-1x-1xf32, 24xf32, 24xf32, 24xf32, 24xf32) + ( + batch_norm__18, + batch_norm__19, + batch_norm__20, + batch_norm__21, + batch_norm__22, + batch_norm__23, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + depthwise_conv2d_1, + parameter_181, + parameter_180, + parameter_179, + parameter_178, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del ( + depthwise_conv2d_1, + parameter_178, + parameter_179, + parameter_180, + parameter_181, + ) + + # pd_op.relu: (-1x24x-1x-1xf32) <- (-1x24x-1x-1xf32) + relu_3 = paddle._C_ops.relu(batch_norm__18) + del batch_norm__18 + + # pd_op.conv2d: (-1x24x-1x-1xf32) <- (-1x24x-1x-1xf32, 24x24x1x1xf32) + conv2d_2 = paddle._C_ops.conv2d( + relu_3, parameter_177, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_177, relu_3 + + # pd_op.batch_norm_: (-1x24x-1x-1xf32, 24xf32, 24xf32, 24xf32, 24xf32, -1xui8) <- (-1x24x-1x-1xf32, 24xf32, 24xf32, 24xf32, 24xf32) + ( + batch_norm__24, + batch_norm__25, + batch_norm__26, + batch_norm__27, + batch_norm__28, + batch_norm__29, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_2, + parameter_176, + parameter_175, + parameter_174, + parameter_173, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_2, parameter_173, parameter_174, parameter_175, parameter_176 + + # pd_op.add: (-1x24x-1x-1xf32) <- (-1x24x-1x-1xf32, -1x24x-1x-1xf32) + add_1 = paddle._C_ops.add(relu_2, batch_norm__24) + del batch_norm__24, relu_2 + + # pd_op.relu: (-1x24x-1x-1xf32) <- (-1x24x-1x-1xf32) + relu_4 = paddle._C_ops.relu(add_1) + del add_1 + + # pd_op.depthwise_conv2d: (-1x24x-1x-1xf32) <- (-1x24x-1x-1xf32, 24x1x5x5xf32) + depthwise_conv2d_2 = paddle._C_ops.depthwise_conv2d( + relu_4, parameter_172, [2, 2], [2, 2], "EXPLICIT", 24, [1, 1], "NCHW" + ) + del parameter_172 + + # pd_op.batch_norm_: (-1x24x-1x-1xf32, 24xf32, 24xf32, 24xf32, 24xf32, -1xui8) <- (-1x24x-1x-1xf32, 24xf32, 24xf32, 24xf32, 24xf32) + ( + batch_norm__30, + batch_norm__31, + batch_norm__32, + batch_norm__33, + batch_norm__34, + batch_norm__35, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + depthwise_conv2d_2, + parameter_171, + parameter_170, + parameter_169, + parameter_168, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del ( + depthwise_conv2d_2, + parameter_168, + parameter_169, + parameter_170, + parameter_171, + ) + + # pd_op.relu: (-1x24x-1x-1xf32) <- (-1x24x-1x-1xf32) + relu_5 = paddle._C_ops.relu(batch_norm__30) + del batch_norm__30 + + # pd_op.conv2d: (-1x48x-1x-1xf32) <- (-1x24x-1x-1xf32, 48x24x1x1xf32) + conv2d_3 = paddle._C_ops.conv2d( + relu_5, parameter_167, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_167, relu_5 + + # pd_op.batch_norm_: (-1x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32, -1xui8) <- (-1x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32) + ( + batch_norm__36, + batch_norm__37, + batch_norm__38, + batch_norm__39, + batch_norm__40, + batch_norm__41, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_3, + parameter_166, + parameter_165, + parameter_164, + parameter_163, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_3, parameter_163, parameter_164, parameter_165, parameter_166 + + # pd_op.full_int_array: (2xi64) <- () + full_int_array_0 = [2, 2] + + # pd_op.pool2d: (-1x24x-1x-1xf32) <- (-1x24x-1x-1xf32, 2xi64) + pool2d_0 = paddle._C_ops.pool2d( + relu_4, + full_int_array_0, + [2, 2], + [0, 0], + True, + True, + "NCHW", + "max", + False, + False, + "EXPLICIT", + ) + del relu_4 + + # pd_op.conv2d: (-1x48x-1x-1xf32) <- (-1x24x-1x-1xf32, 48x24x1x1xf32) + conv2d_4 = paddle._C_ops.conv2d( + pool2d_0, parameter_162, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_162, pool2d_0 + + # pd_op.batch_norm_: (-1x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32, -1xui8) <- (-1x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32) + ( + batch_norm__42, + batch_norm__43, + batch_norm__44, + batch_norm__45, + batch_norm__46, + batch_norm__47, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_4, + parameter_161, + parameter_160, + parameter_159, + parameter_158, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_4, parameter_158, parameter_159, parameter_160, parameter_161 + + # pd_op.relu: (-1x48x-1x-1xf32) <- (-1x48x-1x-1xf32) + relu_6 = paddle._C_ops.relu(batch_norm__42) + del batch_norm__42 + + # pd_op.add: (-1x48x-1x-1xf32) <- (-1x48x-1x-1xf32, -1x48x-1x-1xf32) + add_2 = paddle._C_ops.add(relu_6, batch_norm__36) + del batch_norm__36, relu_6 + + # pd_op.relu: (-1x48x-1x-1xf32) <- (-1x48x-1x-1xf32) + relu_7 = paddle._C_ops.relu(add_2) + del add_2 + + # pd_op.depthwise_conv2d: (-1x48x-1x-1xf32) <- (-1x48x-1x-1xf32, 48x1x5x5xf32) + depthwise_conv2d_3 = paddle._C_ops.depthwise_conv2d( + relu_7, parameter_157, [1, 1], [2, 2], "EXPLICIT", 48, [1, 1], "NCHW" + ) + del parameter_157 + + # pd_op.batch_norm_: (-1x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32, -1xui8) <- (-1x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32) + ( + batch_norm__48, + batch_norm__49, + batch_norm__50, + batch_norm__51, + batch_norm__52, + batch_norm__53, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + depthwise_conv2d_3, + parameter_156, + parameter_155, + parameter_154, + parameter_153, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del ( + depthwise_conv2d_3, + parameter_153, + parameter_154, + parameter_155, + parameter_156, + ) + + # pd_op.relu: (-1x48x-1x-1xf32) <- (-1x48x-1x-1xf32) + relu_8 = paddle._C_ops.relu(batch_norm__48) + del batch_norm__48 + + # pd_op.conv2d: (-1x48x-1x-1xf32) <- (-1x48x-1x-1xf32, 48x48x1x1xf32) + conv2d_5 = paddle._C_ops.conv2d( + relu_8, parameter_152, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_152, relu_8 + + # pd_op.batch_norm_: (-1x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32, -1xui8) <- (-1x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32) + ( + batch_norm__54, + batch_norm__55, + batch_norm__56, + batch_norm__57, + batch_norm__58, + batch_norm__59, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_5, + parameter_151, + parameter_150, + parameter_149, + parameter_148, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_5, parameter_148, parameter_149, parameter_150, parameter_151 + + # pd_op.add: (-1x48x-1x-1xf32) <- (-1x48x-1x-1xf32, -1x48x-1x-1xf32) + add_3 = paddle._C_ops.add(relu_7, batch_norm__54) + del batch_norm__54, relu_7 + + # pd_op.relu: (-1x48x-1x-1xf32) <- (-1x48x-1x-1xf32) + relu_9 = paddle._C_ops.relu(add_3) + del add_3 + + # pd_op.depthwise_conv2d: (-1x48x-1x-1xf32) <- (-1x48x-1x-1xf32, 48x1x5x5xf32) + depthwise_conv2d_4 = paddle._C_ops.depthwise_conv2d( + relu_9, parameter_147, [1, 1], [2, 2], "EXPLICIT", 48, [1, 1], "NCHW" + ) + del parameter_147 + + # pd_op.batch_norm_: (-1x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32, -1xui8) <- (-1x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32) + ( + batch_norm__60, + batch_norm__61, + batch_norm__62, + batch_norm__63, + batch_norm__64, + batch_norm__65, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + depthwise_conv2d_4, + parameter_146, + parameter_145, + parameter_144, + parameter_143, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del ( + depthwise_conv2d_4, + parameter_143, + parameter_144, + parameter_145, + parameter_146, + ) + + # pd_op.relu: (-1x48x-1x-1xf32) <- (-1x48x-1x-1xf32) + relu_10 = paddle._C_ops.relu(batch_norm__60) + del batch_norm__60 + + # pd_op.conv2d: (-1x48x-1x-1xf32) <- (-1x48x-1x-1xf32, 48x48x1x1xf32) + conv2d_6 = paddle._C_ops.conv2d( + relu_10, parameter_142, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_142, relu_10 + + # pd_op.batch_norm_: (-1x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32, -1xui8) <- (-1x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32) + ( + batch_norm__66, + batch_norm__67, + batch_norm__68, + batch_norm__69, + batch_norm__70, + batch_norm__71, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_6, + parameter_141, + parameter_140, + parameter_139, + parameter_138, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_6, parameter_138, parameter_139, parameter_140, parameter_141 + + # pd_op.add: (-1x48x-1x-1xf32) <- (-1x48x-1x-1xf32, -1x48x-1x-1xf32) + add_4 = paddle._C_ops.add(relu_9, batch_norm__66) + del batch_norm__66, relu_9 + + # pd_op.relu: (-1x48x-1x-1xf32) <- (-1x48x-1x-1xf32) + relu_11 = paddle._C_ops.relu(add_4) + del add_4 + + # pd_op.depthwise_conv2d: (-1x48x-1x-1xf32) <- (-1x48x-1x-1xf32, 48x1x5x5xf32) + depthwise_conv2d_5 = paddle._C_ops.depthwise_conv2d( + relu_11, parameter_137, [2, 2], [2, 2], "EXPLICIT", 48, [1, 1], "NCHW" + ) + del parameter_137 + + # pd_op.batch_norm_: (-1x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32, -1xui8) <- (-1x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32) + ( + batch_norm__72, + batch_norm__73, + batch_norm__74, + batch_norm__75, + batch_norm__76, + batch_norm__77, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + depthwise_conv2d_5, + parameter_136, + parameter_135, + parameter_134, + parameter_133, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del ( + depthwise_conv2d_5, + parameter_133, + parameter_134, + parameter_135, + parameter_136, + ) + + # pd_op.relu: (-1x48x-1x-1xf32) <- (-1x48x-1x-1xf32) + relu_12 = paddle._C_ops.relu(batch_norm__72) + del batch_norm__72 + + # pd_op.conv2d: (-1x24x-1x-1xf32) <- (-1x48x-1x-1xf32, 24x48x1x1xf32) + conv2d_7 = paddle._C_ops.conv2d( + relu_12, parameter_132, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_132, relu_12 + + # pd_op.batch_norm_: (-1x24x-1x-1xf32, 24xf32, 24xf32, 24xf32, 24xf32, -1xui8) <- (-1x24x-1x-1xf32, 24xf32, 24xf32, 24xf32, 24xf32) + ( + batch_norm__78, + batch_norm__79, + batch_norm__80, + batch_norm__81, + batch_norm__82, + batch_norm__83, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_7, + parameter_131, + parameter_130, + parameter_129, + parameter_128, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_7, parameter_128, parameter_129, parameter_130, parameter_131 + + # pd_op.relu: (-1x24x-1x-1xf32) <- (-1x24x-1x-1xf32) + relu_13 = paddle._C_ops.relu(batch_norm__78) + del batch_norm__78 + + # pd_op.depthwise_conv2d: (-1x24x-1x-1xf32) <- (-1x24x-1x-1xf32, 24x1x5x5xf32) + depthwise_conv2d_6 = paddle._C_ops.depthwise_conv2d( + relu_13, parameter_127, [1, 1], [2, 2], "EXPLICIT", 24, [1, 1], "NCHW" + ) + del parameter_127, relu_13 + + # pd_op.batch_norm_: (-1x24x-1x-1xf32, 24xf32, 24xf32, 24xf32, 24xf32, -1xui8) <- (-1x24x-1x-1xf32, 24xf32, 24xf32, 24xf32, 24xf32) + ( + batch_norm__84, + batch_norm__85, + batch_norm__86, + batch_norm__87, + batch_norm__88, + batch_norm__89, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + depthwise_conv2d_6, + parameter_126, + parameter_125, + parameter_124, + parameter_123, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del ( + depthwise_conv2d_6, + parameter_123, + parameter_124, + parameter_125, + parameter_126, + ) + + # pd_op.relu: (-1x24x-1x-1xf32) <- (-1x24x-1x-1xf32) + relu_14 = paddle._C_ops.relu(batch_norm__84) + del batch_norm__84 + + # pd_op.conv2d: (-1x96x-1x-1xf32) <- (-1x24x-1x-1xf32, 96x24x1x1xf32) + conv2d_8 = paddle._C_ops.conv2d( + relu_14, parameter_122, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_122, relu_14 + + # pd_op.batch_norm_: (-1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (-1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__90, + batch_norm__91, + batch_norm__92, + batch_norm__93, + batch_norm__94, + batch_norm__95, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_8, + parameter_121, + parameter_120, + parameter_119, + parameter_118, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_8, parameter_118, parameter_119, parameter_120, parameter_121 + + # pd_op.relu: (-1x96x-1x-1xf32) <- (-1x96x-1x-1xf32) + relu_15 = paddle._C_ops.relu(batch_norm__90) + del batch_norm__90 + + # pd_op.pool2d: (-1x48x-1x-1xf32) <- (-1x48x-1x-1xf32, 2xi64) + pool2d_1 = paddle._C_ops.pool2d( + relu_11, + full_int_array_0, + [2, 2], + [0, 0], + True, + True, + "NCHW", + "max", + False, + False, + "EXPLICIT", + ) + del relu_11 + + # pd_op.conv2d: (-1x96x-1x-1xf32) <- (-1x48x-1x-1xf32, 96x48x1x1xf32) + conv2d_9 = paddle._C_ops.conv2d( + pool2d_1, parameter_117, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_117, pool2d_1 + + # pd_op.batch_norm_: (-1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (-1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__96, + batch_norm__97, + batch_norm__98, + batch_norm__99, + batch_norm__100, + batch_norm__101, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_9, + parameter_116, + parameter_115, + parameter_114, + parameter_113, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_9, parameter_113, parameter_114, parameter_115, parameter_116 + + # pd_op.relu: (-1x96x-1x-1xf32) <- (-1x96x-1x-1xf32) + relu_16 = paddle._C_ops.relu(batch_norm__96) + del batch_norm__96 + + # pd_op.add: (-1x96x-1x-1xf32) <- (-1x96x-1x-1xf32, -1x96x-1x-1xf32) + add_5 = paddle._C_ops.add(relu_16, relu_15) + del relu_15, relu_16 + + # pd_op.relu: (-1x96x-1x-1xf32) <- (-1x96x-1x-1xf32) + relu_17 = paddle._C_ops.relu(add_5) + del add_5 + + # pd_op.depthwise_conv2d: (-1x96x-1x-1xf32) <- (-1x96x-1x-1xf32, 96x1x5x5xf32) + depthwise_conv2d_7 = paddle._C_ops.depthwise_conv2d( + relu_17, parameter_112, [1, 1], [2, 2], "EXPLICIT", 96, [1, 1], "NCHW" + ) + del parameter_112 + + # pd_op.batch_norm_: (-1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (-1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__102, + batch_norm__103, + batch_norm__104, + batch_norm__105, + batch_norm__106, + batch_norm__107, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + depthwise_conv2d_7, + parameter_111, + parameter_110, + parameter_109, + parameter_108, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del ( + depthwise_conv2d_7, + parameter_108, + parameter_109, + parameter_110, + parameter_111, + ) + + # pd_op.relu: (-1x96x-1x-1xf32) <- (-1x96x-1x-1xf32) + relu_18 = paddle._C_ops.relu(batch_norm__102) + del batch_norm__102 + + # pd_op.conv2d: (-1x24x-1x-1xf32) <- (-1x96x-1x-1xf32, 24x96x1x1xf32) + conv2d_10 = paddle._C_ops.conv2d( + relu_18, parameter_107, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_107, relu_18 + + # pd_op.batch_norm_: (-1x24x-1x-1xf32, 24xf32, 24xf32, 24xf32, 24xf32, -1xui8) <- (-1x24x-1x-1xf32, 24xf32, 24xf32, 24xf32, 24xf32) + ( + batch_norm__108, + batch_norm__109, + batch_norm__110, + batch_norm__111, + batch_norm__112, + batch_norm__113, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_10, + parameter_106, + parameter_105, + parameter_104, + parameter_103, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_10, parameter_103, parameter_104, parameter_105, parameter_106 + + # pd_op.relu: (-1x24x-1x-1xf32) <- (-1x24x-1x-1xf32) + relu_19 = paddle._C_ops.relu(batch_norm__108) + del batch_norm__108 + + # pd_op.depthwise_conv2d: (-1x24x-1x-1xf32) <- (-1x24x-1x-1xf32, 24x1x5x5xf32) + depthwise_conv2d_8 = paddle._C_ops.depthwise_conv2d( + relu_19, parameter_102, [1, 1], [2, 2], "EXPLICIT", 24, [1, 1], "NCHW" + ) + del parameter_102, relu_19 + + # pd_op.batch_norm_: (-1x24x-1x-1xf32, 24xf32, 24xf32, 24xf32, 24xf32, -1xui8) <- (-1x24x-1x-1xf32, 24xf32, 24xf32, 24xf32, 24xf32) + ( + batch_norm__114, + batch_norm__115, + batch_norm__116, + batch_norm__117, + batch_norm__118, + batch_norm__119, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + depthwise_conv2d_8, + parameter_101, + parameter_100, + parameter_99, + parameter_98, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del depthwise_conv2d_8, parameter_100, parameter_101, parameter_98, parameter_99 + + # pd_op.relu: (-1x24x-1x-1xf32) <- (-1x24x-1x-1xf32) + relu_20 = paddle._C_ops.relu(batch_norm__114) + del batch_norm__114 + + # pd_op.conv2d: (-1x96x-1x-1xf32) <- (-1x24x-1x-1xf32, 96x24x1x1xf32) + conv2d_11 = paddle._C_ops.conv2d( + relu_20, parameter_97, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_97, relu_20 + + # pd_op.batch_norm_: (-1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (-1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__120, + batch_norm__121, + batch_norm__122, + batch_norm__123, + batch_norm__124, + batch_norm__125, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_11, + parameter_96, + parameter_95, + parameter_94, + parameter_93, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_11, parameter_93, parameter_94, parameter_95, parameter_96 + + # pd_op.relu: (-1x96x-1x-1xf32) <- (-1x96x-1x-1xf32) + relu_21 = paddle._C_ops.relu(batch_norm__120) + del batch_norm__120 + + # pd_op.add: (-1x96x-1x-1xf32) <- (-1x96x-1x-1xf32, -1x96x-1x-1xf32) + add_6 = paddle._C_ops.add(relu_17, relu_21) + del relu_17, relu_21 + + # pd_op.relu: (-1x96x-1x-1xf32) <- (-1x96x-1x-1xf32) + relu_22 = paddle._C_ops.relu(add_6) + del add_6 + + # pd_op.depthwise_conv2d: (-1x96x-1x-1xf32) <- (-1x96x-1x-1xf32, 96x1x5x5xf32) + depthwise_conv2d_9 = paddle._C_ops.depthwise_conv2d( + relu_22, parameter_92, [1, 1], [2, 2], "EXPLICIT", 96, [1, 1], "NCHW" + ) + del parameter_92 + + # pd_op.batch_norm_: (-1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (-1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__126, + batch_norm__127, + batch_norm__128, + batch_norm__129, + batch_norm__130, + batch_norm__131, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + depthwise_conv2d_9, + parameter_91, + parameter_90, + parameter_89, + parameter_88, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del depthwise_conv2d_9, parameter_88, parameter_89, parameter_90, parameter_91 + + # pd_op.relu: (-1x96x-1x-1xf32) <- (-1x96x-1x-1xf32) + relu_23 = paddle._C_ops.relu(batch_norm__126) + del batch_norm__126 + + # pd_op.conv2d: (-1x24x-1x-1xf32) <- (-1x96x-1x-1xf32, 24x96x1x1xf32) + conv2d_12 = paddle._C_ops.conv2d( + relu_23, parameter_87, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_87, relu_23 + + # pd_op.batch_norm_: (-1x24x-1x-1xf32, 24xf32, 24xf32, 24xf32, 24xf32, -1xui8) <- (-1x24x-1x-1xf32, 24xf32, 24xf32, 24xf32, 24xf32) + ( + batch_norm__132, + batch_norm__133, + batch_norm__134, + batch_norm__135, + batch_norm__136, + batch_norm__137, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_12, + parameter_86, + parameter_85, + parameter_84, + parameter_83, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_12, parameter_83, parameter_84, parameter_85, parameter_86 + + # pd_op.relu: (-1x24x-1x-1xf32) <- (-1x24x-1x-1xf32) + relu_24 = paddle._C_ops.relu(batch_norm__132) + del batch_norm__132 + + # pd_op.depthwise_conv2d: (-1x24x-1x-1xf32) <- (-1x24x-1x-1xf32, 24x1x5x5xf32) + depthwise_conv2d_10 = paddle._C_ops.depthwise_conv2d( + relu_24, parameter_82, [1, 1], [2, 2], "EXPLICIT", 24, [1, 1], "NCHW" + ) + del parameter_82, relu_24 + + # pd_op.batch_norm_: (-1x24x-1x-1xf32, 24xf32, 24xf32, 24xf32, 24xf32, -1xui8) <- (-1x24x-1x-1xf32, 24xf32, 24xf32, 24xf32, 24xf32) + ( + batch_norm__138, + batch_norm__139, + batch_norm__140, + batch_norm__141, + batch_norm__142, + batch_norm__143, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + depthwise_conv2d_10, + parameter_81, + parameter_80, + parameter_79, + parameter_78, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del depthwise_conv2d_10, parameter_78, parameter_79, parameter_80, parameter_81 + + # pd_op.relu: (-1x24x-1x-1xf32) <- (-1x24x-1x-1xf32) + relu_25 = paddle._C_ops.relu(batch_norm__138) + del batch_norm__138 + + # pd_op.conv2d: (-1x96x-1x-1xf32) <- (-1x24x-1x-1xf32, 96x24x1x1xf32) + conv2d_13 = paddle._C_ops.conv2d( + relu_25, parameter_77, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_77, relu_25 + + # pd_op.batch_norm_: (-1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (-1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__144, + batch_norm__145, + batch_norm__146, + batch_norm__147, + batch_norm__148, + batch_norm__149, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_13, + parameter_76, + parameter_75, + parameter_74, + parameter_73, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_13, parameter_73, parameter_74, parameter_75, parameter_76 + + # pd_op.relu: (-1x96x-1x-1xf32) <- (-1x96x-1x-1xf32) + relu_26 = paddle._C_ops.relu(batch_norm__144) + del batch_norm__144 + + # pd_op.add: (-1x96x-1x-1xf32) <- (-1x96x-1x-1xf32, -1x96x-1x-1xf32) + add_7 = paddle._C_ops.add(relu_22, relu_26) + del relu_22, relu_26 + + # pd_op.relu: (-1x96x-1x-1xf32) <- (-1x96x-1x-1xf32) + relu_27 = paddle._C_ops.relu(add_7) + del add_7 + + # pd_op.depthwise_conv2d: (-1x96x-1x-1xf32) <- (-1x96x-1x-1xf32, 96x1x5x5xf32) + depthwise_conv2d_11 = paddle._C_ops.depthwise_conv2d( + relu_27, parameter_72, [2, 2], [2, 2], "EXPLICIT", 96, [1, 1], "NCHW" + ) + del parameter_72 + + # pd_op.batch_norm_: (-1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (-1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__150, + batch_norm__151, + batch_norm__152, + batch_norm__153, + batch_norm__154, + batch_norm__155, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + depthwise_conv2d_11, + parameter_71, + parameter_70, + parameter_69, + parameter_68, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del depthwise_conv2d_11, parameter_68, parameter_69, parameter_70, parameter_71 + + # pd_op.relu: (-1x96x-1x-1xf32) <- (-1x96x-1x-1xf32) + relu_28 = paddle._C_ops.relu(batch_norm__150) + del batch_norm__150 + + # pd_op.conv2d: (-1x24x-1x-1xf32) <- (-1x96x-1x-1xf32, 24x96x1x1xf32) + conv2d_14 = paddle._C_ops.conv2d( + relu_28, parameter_67, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_67, relu_28 + + # pd_op.batch_norm_: (-1x24x-1x-1xf32, 24xf32, 24xf32, 24xf32, 24xf32, -1xui8) <- (-1x24x-1x-1xf32, 24xf32, 24xf32, 24xf32, 24xf32) + ( + batch_norm__156, + batch_norm__157, + batch_norm__158, + batch_norm__159, + batch_norm__160, + batch_norm__161, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_14, + parameter_66, + parameter_65, + parameter_64, + parameter_63, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_14, parameter_63, parameter_64, parameter_65, parameter_66 + + # pd_op.relu: (-1x24x-1x-1xf32) <- (-1x24x-1x-1xf32) + relu_29 = paddle._C_ops.relu(batch_norm__156) + del batch_norm__156 + + # pd_op.depthwise_conv2d: (-1x24x-1x-1xf32) <- (-1x24x-1x-1xf32, 24x1x5x5xf32) + depthwise_conv2d_12 = paddle._C_ops.depthwise_conv2d( + relu_29, parameter_62, [1, 1], [2, 2], "EXPLICIT", 24, [1, 1], "NCHW" + ) + del parameter_62, relu_29 + + # pd_op.batch_norm_: (-1x24x-1x-1xf32, 24xf32, 24xf32, 24xf32, 24xf32, -1xui8) <- (-1x24x-1x-1xf32, 24xf32, 24xf32, 24xf32, 24xf32) + ( + batch_norm__162, + batch_norm__163, + batch_norm__164, + batch_norm__165, + batch_norm__166, + batch_norm__167, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + depthwise_conv2d_12, + parameter_61, + parameter_60, + parameter_59, + parameter_58, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del depthwise_conv2d_12, parameter_58, parameter_59, parameter_60, parameter_61 + + # pd_op.relu: (-1x24x-1x-1xf32) <- (-1x24x-1x-1xf32) + relu_30 = paddle._C_ops.relu(batch_norm__162) + del batch_norm__162 + + # pd_op.conv2d: (-1x96x-1x-1xf32) <- (-1x24x-1x-1xf32, 96x24x1x1xf32) + conv2d_15 = paddle._C_ops.conv2d( + relu_30, parameter_57, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_57, relu_30 + + # pd_op.batch_norm_: (-1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (-1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__168, + batch_norm__169, + batch_norm__170, + batch_norm__171, + batch_norm__172, + batch_norm__173, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_15, + parameter_56, + parameter_55, + parameter_54, + parameter_53, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_15, parameter_53, parameter_54, parameter_55, parameter_56 + + # pd_op.relu: (-1x96x-1x-1xf32) <- (-1x96x-1x-1xf32) + relu_31 = paddle._C_ops.relu(batch_norm__168) + del batch_norm__168 + + # pd_op.pool2d: (-1x96x-1x-1xf32) <- (-1x96x-1x-1xf32, 2xi64) + pool2d_2 = paddle._C_ops.pool2d( + relu_27, + full_int_array_0, + [2, 2], + [0, 0], + True, + True, + "NCHW", + "max", + False, + False, + "EXPLICIT", + ) + del full_int_array_0 + + # pd_op.conv2d: (-1x96x-1x-1xf32) <- (-1x96x-1x-1xf32, 96x96x1x1xf32) + conv2d_16 = paddle._C_ops.conv2d( + pool2d_2, parameter_52, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_52, pool2d_2 + + # pd_op.batch_norm_: (-1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (-1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__174, + batch_norm__175, + batch_norm__176, + batch_norm__177, + batch_norm__178, + batch_norm__179, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_16, + parameter_51, + parameter_50, + parameter_49, + parameter_48, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_16, parameter_48, parameter_49, parameter_50, parameter_51 + + # pd_op.relu: (-1x96x-1x-1xf32) <- (-1x96x-1x-1xf32) + relu_32 = paddle._C_ops.relu(batch_norm__174) + del batch_norm__174 + + # pd_op.add: (-1x96x-1x-1xf32) <- (-1x96x-1x-1xf32, -1x96x-1x-1xf32) + add_8 = paddle._C_ops.add(relu_32, relu_31) + del relu_31, relu_32 + + # pd_op.relu: (-1x96x-1x-1xf32) <- (-1x96x-1x-1xf32) + relu_33 = paddle._C_ops.relu(add_8) + del add_8 + + # pd_op.depthwise_conv2d: (-1x96x-1x-1xf32) <- (-1x96x-1x-1xf32, 96x1x5x5xf32) + depthwise_conv2d_13 = paddle._C_ops.depthwise_conv2d( + relu_33, parameter_47, [1, 1], [2, 2], "EXPLICIT", 96, [1, 1], "NCHW" + ) + del parameter_47 + + # pd_op.batch_norm_: (-1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (-1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__180, + batch_norm__181, + batch_norm__182, + batch_norm__183, + batch_norm__184, + batch_norm__185, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + depthwise_conv2d_13, + parameter_46, + parameter_45, + parameter_44, + parameter_43, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del depthwise_conv2d_13, parameter_43, parameter_44, parameter_45, parameter_46 + + # pd_op.relu: (-1x96x-1x-1xf32) <- (-1x96x-1x-1xf32) + relu_34 = paddle._C_ops.relu(batch_norm__180) + del batch_norm__180 + + # pd_op.conv2d: (-1x24x-1x-1xf32) <- (-1x96x-1x-1xf32, 24x96x1x1xf32) + conv2d_17 = paddle._C_ops.conv2d( + relu_34, parameter_42, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_42, relu_34 + + # pd_op.batch_norm_: (-1x24x-1x-1xf32, 24xf32, 24xf32, 24xf32, 24xf32, -1xui8) <- (-1x24x-1x-1xf32, 24xf32, 24xf32, 24xf32, 24xf32) + ( + batch_norm__186, + batch_norm__187, + batch_norm__188, + batch_norm__189, + batch_norm__190, + batch_norm__191, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_17, + parameter_41, + parameter_40, + parameter_39, + parameter_38, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_17, parameter_38, parameter_39, parameter_40, parameter_41 + + # pd_op.relu: (-1x24x-1x-1xf32) <- (-1x24x-1x-1xf32) + relu_35 = paddle._C_ops.relu(batch_norm__186) + del batch_norm__186 + + # pd_op.depthwise_conv2d: (-1x24x-1x-1xf32) <- (-1x24x-1x-1xf32, 24x1x5x5xf32) + depthwise_conv2d_14 = paddle._C_ops.depthwise_conv2d( + relu_35, parameter_37, [1, 1], [2, 2], "EXPLICIT", 24, [1, 1], "NCHW" + ) + del parameter_37, relu_35 + + # pd_op.batch_norm_: (-1x24x-1x-1xf32, 24xf32, 24xf32, 24xf32, 24xf32, -1xui8) <- (-1x24x-1x-1xf32, 24xf32, 24xf32, 24xf32, 24xf32) + ( + batch_norm__192, + batch_norm__193, + batch_norm__194, + batch_norm__195, + batch_norm__196, + batch_norm__197, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + depthwise_conv2d_14, + parameter_36, + parameter_35, + parameter_34, + parameter_33, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del depthwise_conv2d_14, parameter_33, parameter_34, parameter_35, parameter_36 + + # pd_op.relu: (-1x24x-1x-1xf32) <- (-1x24x-1x-1xf32) + relu_36 = paddle._C_ops.relu(batch_norm__192) + del batch_norm__192 + + # pd_op.conv2d: (-1x96x-1x-1xf32) <- (-1x24x-1x-1xf32, 96x24x1x1xf32) + conv2d_18 = paddle._C_ops.conv2d( + relu_36, parameter_32, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_32, relu_36 + + # pd_op.batch_norm_: (-1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (-1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__198, + batch_norm__199, + batch_norm__200, + batch_norm__201, + batch_norm__202, + batch_norm__203, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_18, + parameter_31, + parameter_30, + parameter_29, + parameter_28, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_18, parameter_28, parameter_29, parameter_30, parameter_31 + + # pd_op.relu: (-1x96x-1x-1xf32) <- (-1x96x-1x-1xf32) + relu_37 = paddle._C_ops.relu(batch_norm__198) + del batch_norm__198 + + # pd_op.add: (-1x96x-1x-1xf32) <- (-1x96x-1x-1xf32, -1x96x-1x-1xf32) + add_9 = paddle._C_ops.add(relu_33, relu_37) + del relu_33, relu_37 + + # pd_op.relu: (-1x96x-1x-1xf32) <- (-1x96x-1x-1xf32) + relu_38 = paddle._C_ops.relu(add_9) + del add_9 + + # pd_op.depthwise_conv2d: (-1x96x-1x-1xf32) <- (-1x96x-1x-1xf32, 96x1x5x5xf32) + depthwise_conv2d_15 = paddle._C_ops.depthwise_conv2d( + relu_38, parameter_27, [1, 1], [2, 2], "EXPLICIT", 96, [1, 1], "NCHW" + ) + del parameter_27 + + # pd_op.batch_norm_: (-1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (-1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__204, + batch_norm__205, + batch_norm__206, + batch_norm__207, + batch_norm__208, + batch_norm__209, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + depthwise_conv2d_15, + parameter_26, + parameter_25, + parameter_24, + parameter_23, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del depthwise_conv2d_15, parameter_23, parameter_24, parameter_25, parameter_26 + + # pd_op.relu: (-1x96x-1x-1xf32) <- (-1x96x-1x-1xf32) + relu_39 = paddle._C_ops.relu(batch_norm__204) + del batch_norm__204 + + # pd_op.conv2d: (-1x24x-1x-1xf32) <- (-1x96x-1x-1xf32, 24x96x1x1xf32) + conv2d_19 = paddle._C_ops.conv2d( + relu_39, parameter_22, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_22, relu_39 + + # pd_op.batch_norm_: (-1x24x-1x-1xf32, 24xf32, 24xf32, 24xf32, 24xf32, -1xui8) <- (-1x24x-1x-1xf32, 24xf32, 24xf32, 24xf32, 24xf32) + ( + batch_norm__210, + batch_norm__211, + batch_norm__212, + batch_norm__213, + batch_norm__214, + batch_norm__215, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_19, + parameter_21, + parameter_20, + parameter_19, + parameter_18, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_19, parameter_18, parameter_19, parameter_20, parameter_21 + + # pd_op.relu: (-1x24x-1x-1xf32) <- (-1x24x-1x-1xf32) + relu_40 = paddle._C_ops.relu(batch_norm__210) + del batch_norm__210 + + # pd_op.depthwise_conv2d: (-1x24x-1x-1xf32) <- (-1x24x-1x-1xf32, 24x1x5x5xf32) + depthwise_conv2d_16 = paddle._C_ops.depthwise_conv2d( + relu_40, parameter_17, [1, 1], [2, 2], "EXPLICIT", 24, [1, 1], "NCHW" + ) + del parameter_17, relu_40 + + # pd_op.batch_norm_: (-1x24x-1x-1xf32, 24xf32, 24xf32, 24xf32, 24xf32, -1xui8) <- (-1x24x-1x-1xf32, 24xf32, 24xf32, 24xf32, 24xf32) + ( + batch_norm__216, + batch_norm__217, + batch_norm__218, + batch_norm__219, + batch_norm__220, + batch_norm__221, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + depthwise_conv2d_16, + parameter_16, + parameter_15, + parameter_14, + parameter_13, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del depthwise_conv2d_16, parameter_13, parameter_14, parameter_15, parameter_16 + + # pd_op.relu: (-1x24x-1x-1xf32) <- (-1x24x-1x-1xf32) + relu_41 = paddle._C_ops.relu(batch_norm__216) + del batch_norm__216 + + # pd_op.conv2d: (-1x96x-1x-1xf32) <- (-1x24x-1x-1xf32, 96x24x1x1xf32) + conv2d_20 = paddle._C_ops.conv2d( + relu_41, parameter_12, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_12, relu_41 + + # pd_op.batch_norm_: (-1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (-1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__222, + batch_norm__223, + batch_norm__224, + batch_norm__225, + batch_norm__226, + batch_norm__227, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_20, + parameter_11, + parameter_10, + parameter_9, + parameter_8, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_20, parameter_10, parameter_11, parameter_8, parameter_9 + + # pd_op.relu: (-1x96x-1x-1xf32) <- (-1x96x-1x-1xf32) + relu_42 = paddle._C_ops.relu(batch_norm__222) + del batch_norm__222 + + # pd_op.add: (-1x96x-1x-1xf32) <- (-1x96x-1x-1xf32, -1x96x-1x-1xf32) + add_10 = paddle._C_ops.add(relu_38, relu_42) + del relu_38, relu_42 + + # pd_op.relu: (-1x96x-1x-1xf32) <- (-1x96x-1x-1xf32) + relu_43 = paddle._C_ops.relu(add_10) + del add_10 + + # pd_op.conv2d: (-1x8x-1x-1xf32) <- (-1x96x-1x-1xf32, 8x96x3x3xf32) + conv2d_21 = paddle._C_ops.conv2d( + relu_27, parameter_7, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_7 + + # pd_op.full_int_array: (4xi64) <- () + full_int_array_1 = [1, -1, 1, 1] + + # pd_op.reshape: (1x8x1x1xf32) <- (8xf32, 4xi64) + reshape_6 = paddle._C_ops.reshape(parameter_6, full_int_array_1) + del parameter_6 + + # pd_op.add: (-1x8x-1x-1xf32) <- (-1x8x-1x-1xf32, 1x8x1x1xf32) + add_11 = paddle._C_ops.add(conv2d_21, reshape_6) + del conv2d_21, reshape_6 + + # pd_op.transpose: (-1x-1x-1x8xf32) <- (-1x8x-1x-1xf32) + transpose_0 = paddle._C_ops.transpose(add_11, [0, 2, 3, 1]) + del add_11 + + # pd_op.full_int_array: (3xi64) <- () + full_int_array_2 = [0, -1, 4] + + # pd_op.reshape: (-1x-1x4xf32) <- (-1x-1x-1x8xf32, 3xi64) + reshape_0 = paddle._C_ops.reshape(transpose_0, full_int_array_2) + del transpose_0 + + # pd_op.conv2d: (-1x4x-1x-1xf32) <- (-1x96x-1x-1xf32, 4x96x3x3xf32) + conv2d_22 = paddle._C_ops.conv2d( + relu_27, parameter_5, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_5 + + # pd_op.reshape: (1x4x1x1xf32) <- (4xf32, 4xi64) + reshape_7 = paddle._C_ops.reshape(parameter_4, full_int_array_1) + del parameter_4 + + # pd_op.add: (-1x4x-1x-1xf32) <- (-1x4x-1x-1xf32, 1x4x1x1xf32) + add_12 = paddle._C_ops.add(conv2d_22, reshape_7) + del conv2d_22, reshape_7 + + # pd_op.transpose: (-1x-1x-1x4xf32) <- (-1x4x-1x-1xf32) + transpose_1 = paddle._C_ops.transpose(add_12, [0, 2, 3, 1]) + del add_12 + + # pd_op.full_int_array: (3xi64) <- () + full_int_array_3 = [0, -1, 2] + + # pd_op.reshape: (-1x-1x2xf32) <- (-1x-1x-1x4xf32, 3xi64) + reshape_2 = paddle._C_ops.reshape(transpose_1, full_int_array_3) + del transpose_1 + + # pd_op.conv2d: (-1x24x-1x-1xf32) <- (-1x96x-1x-1xf32, 24x96x3x3xf32) + conv2d_23 = paddle._C_ops.conv2d( + relu_43, parameter_3, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_3 + + # pd_op.reshape: (1x24x1x1xf32) <- (24xf32, 4xi64) + reshape_8 = paddle._C_ops.reshape(parameter_2, full_int_array_1) + del parameter_2 + + # pd_op.add: (-1x24x-1x-1xf32) <- (-1x24x-1x-1xf32, 1x24x1x1xf32) + add_13 = paddle._C_ops.add(conv2d_23, reshape_8) + del conv2d_23, reshape_8 + + # pd_op.transpose: (-1x-1x-1x24xf32) <- (-1x24x-1x-1xf32) + transpose_2 = paddle._C_ops.transpose(add_13, [0, 2, 3, 1]) + del add_13 + + # pd_op.reshape: (-1x-1x4xf32) <- (-1x-1x-1x24xf32, 3xi64) + reshape_1 = paddle._C_ops.reshape(transpose_2, full_int_array_2) + del full_int_array_2, transpose_2 + + # pd_op.conv2d: (-1x12x-1x-1xf32) <- (-1x96x-1x-1xf32, 12x96x3x3xf32) + conv2d_24 = paddle._C_ops.conv2d( + relu_43, parameter_1, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_1 + + # pd_op.reshape: (1x12x1x1xf32) <- (12xf32, 4xi64) + reshape_9 = paddle._C_ops.reshape(parameter_0, full_int_array_1) + del full_int_array_1, parameter_0 + + # pd_op.add: (-1x12x-1x-1xf32) <- (-1x12x-1x-1xf32, 1x12x1x1xf32) + add_14 = paddle._C_ops.add(conv2d_24, reshape_9) + del conv2d_24, reshape_9 + + # pd_op.transpose: (-1x-1x-1x12xf32) <- (-1x12x-1x-1xf32) + transpose_3 = paddle._C_ops.transpose(add_14, [0, 2, 3, 1]) + del add_14 + + # pd_op.reshape: (-1x-1x2xf32) <- (-1x-1x-1x12xf32, 3xi64) + reshape_3 = paddle._C_ops.reshape(transpose_3, full_int_array_3) + del full_int_array_3, transpose_3 + + # pd_op.prior_box: (-1x-1x2x4xf32, -1x-1x2x4xf32) <- (-1x96x-1x-1xf32, -1x3x-1x-1xf32) + prior_box_0, prior_box_1 = (lambda x, f: f(x))( + paddle._C_ops.prior_box( + relu_27, + data_0, + [float("16"), float("24")], + [], + [float("1")], + [float("0.1"), float("0.1"), float("0.2"), float("0.2")], + False, + False, + float("8"), + float("8"), + float("0.5"), + False, + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None), + ) + del relu_27 + + # pd_op.full_int_array: (2xi64) <- () + full_int_array_4 = [-1, 4] + + # pd_op.reshape: (-1x4xf32) <- (-1x-1x2x4xf32, 2xi64) + reshape_4 = paddle._C_ops.reshape(prior_box_0, full_int_array_4) + del prior_box_0 + + # pd_op.prior_box: (-1x-1x6x4xf32, -1x-1x6x4xf32) <- (-1x96x-1x-1xf32, -1x3x-1x-1xf32) + prior_box_2, prior_box_3 = (lambda x, f: f(x))( + paddle._C_ops.prior_box( + relu_43, + data_0, + [ + float("32"), + float("48"), + float("64"), + float("80"), + float("96"), + float("128"), + ], + [], + [float("1")], + [float("0.1"), float("0.1"), float("0.2"), float("0.2")], + False, + False, + float("16"), + float("16"), + float("0.5"), + False, + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None), + ) + del data_0, relu_43 + + # pd_op.reshape: (-1x4xf32) <- (-1x-1x6x4xf32, 2xi64) + reshape_5 = paddle._C_ops.reshape(prior_box_2, full_int_array_4) + del full_int_array_4, prior_box_2 + + return reshape_0, reshape_1, reshape_2, reshape_3, reshape_4, reshape_5 diff --git a/paddle_samples/PaddleX/BlazeFace/subgraph_0/weight_meta.py b/paddle_samples/PaddleX/BlazeFace/subgraph_0/weight_meta.py new file mode 100644 index 000000000..d26029be9 --- /dev/null +++ b/paddle_samples/PaddleX/BlazeFace/subgraph_0/weight_meta.py @@ -0,0 +1,1968 @@ +class Program_weight_tensor_parameter_0: + name = "parameter_0" + shape = [12] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_1: + name = "parameter_1" + shape = [12, 96, 3, 3] + dtype = "float32" + min_val = float("-0.382441") + max_val = float("0.382444") + mean = float("5.42699e-07") + std = float("0.0398048") + data = None + + +class Program_weight_tensor_parameter_2: + name = "parameter_2" + shape = [24] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_3: + name = "parameter_3" + shape = [24, 96, 3, 3] + dtype = "float32" + min_val = float("-0.432492") + max_val = float("0.514938") + mean = float("0.000469344") + std = float("0.0373668") + data = None + + +class Program_weight_tensor_parameter_4: + name = "parameter_4" + shape = [4] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_5: + name = "parameter_5" + shape = [4, 96, 3, 3] + dtype = "float32" + min_val = float("-0.406238") + max_val = float("0.406176") + mean = float("-1.41812e-05") + std = float("0.0390198") + data = None + + +class Program_weight_tensor_parameter_6: + name = "parameter_6" + shape = [8] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_7: + name = "parameter_7" + shape = [8, 96, 3, 3] + dtype = "float32" + min_val = float("-0.489891") + max_val = float("0.638774") + mean = float("0.00114816") + std = float("0.057221") + data = None + + +class Program_weight_tensor_parameter_8: + name = "parameter_8" + shape = [96] + dtype = "float32" + min_val = float("-0.530965") + max_val = float("0.535467") + mean = float("-0.0388929") + std = float("0.226117") + data = None + + +class Program_weight_tensor_parameter_9: + name = "parameter_9" + shape = [96] + dtype = "float32" + min_val = float("-0.546922") + max_val = float("0.799445") + mean = float("0.277148") + std = float("0.285238") + data = None + + +class Program_weight_tensor_parameter_10: + name = "parameter_10" + shape = [96] + dtype = "float32" + min_val = float("7.68001e-09") + max_val = float("0.0844836") + mean = float("0.00410107") + std = float("0.00967812") + data = None + + +class Program_weight_tensor_parameter_11: + name = "parameter_11" + shape = [96] + dtype = "float32" + min_val = float("-0.370255") + max_val = float("0.784697") + mean = float("0.00220434") + std = float("0.142908") + data = None + + +class Program_weight_tensor_parameter_12: + name = "parameter_12" + shape = [96, 24, 1, 1] + dtype = "float32" + min_val = float("-0.545076") + max_val = float("0.50241") + mean = float("0.00108669") + std = float("0.0796126") + data = None + + +class Program_weight_tensor_parameter_13: + name = "parameter_13" + shape = [24] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_14: + name = "parameter_14" + shape = [24] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_15: + name = "parameter_15" + shape = [24] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_16: + name = "parameter_16" + shape = [24] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_17: + name = "parameter_17" + shape = [24, 1, 5, 5] + dtype = "float32" + min_val = float("-0.639974") + max_val = float("0.622565") + mean = float("-0.0140181") + std = float("0.117357") + data = None + + +class Program_weight_tensor_parameter_18: + name = "parameter_18" + shape = [24] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_19: + name = "parameter_19" + shape = [24] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_20: + name = "parameter_20" + shape = [24] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_21: + name = "parameter_21" + shape = [24] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_22: + name = "parameter_22" + shape = [24, 96, 1, 1] + dtype = "float32" + min_val = float("-0.341607") + max_val = float("0.290605") + mean = float("-0.0028622") + std = float("0.0608513") + data = None + + +class Program_weight_tensor_parameter_23: + name = "parameter_23" + shape = [96] + dtype = "float32" + min_val = float("-0.197204") + max_val = float("0.393155") + mean = float("0.0562337") + std = float("0.108549") + data = None + + +class Program_weight_tensor_parameter_24: + name = "parameter_24" + shape = [96] + dtype = "float32" + min_val = float("-8.62902e-10") + max_val = float("0.380619") + mean = float("0.128543") + std = float("0.0657393") + data = None + + +class Program_weight_tensor_parameter_25: + name = "parameter_25" + shape = [96] + dtype = "float32" + min_val = float("5.60519e-45") + max_val = float("0.100952") + mean = float("0.0073439") + std = float("0.013398") + data = None + + +class Program_weight_tensor_parameter_26: + name = "parameter_26" + shape = [96] + dtype = "float32" + min_val = float("-0.171936") + max_val = float("0.239221") + mean = float("0.00161268") + std = float("0.0562356") + data = None + + +class Program_weight_tensor_parameter_27: + name = "parameter_27" + shape = [96, 1, 5, 5] + dtype = "float32" + min_val = float("-0.475068") + max_val = float("0.411726") + mean = float("-0.000369142") + std = float("0.0859856") + data = None + + +class Program_weight_tensor_parameter_28: + name = "parameter_28" + shape = [96] + dtype = "float32" + min_val = float("-0.463041") + max_val = float("0.196923") + mean = float("-0.036747") + std = float("0.0882828") + data = None + + +class Program_weight_tensor_parameter_29: + name = "parameter_29" + shape = [96] + dtype = "float32" + min_val = float("-3.31763e-13") + max_val = float("0.521317") + mean = float("0.114533") + std = float("0.136903") + data = None + + +class Program_weight_tensor_parameter_30: + name = "parameter_30" + shape = [96] + dtype = "float32" + min_val = float("8.7614e-09") + max_val = float("0.0299676") + mean = float("0.00623299") + std = float("0.00756914") + data = None + + +class Program_weight_tensor_parameter_31: + name = "parameter_31" + shape = [96] + dtype = "float32" + min_val = float("-0.22972") + max_val = float("0.185811") + mean = float("-0.004658") + std = float("0.0688207") + data = None + + +class Program_weight_tensor_parameter_32: + name = "parameter_32" + shape = [96, 24, 1, 1] + dtype = "float32" + min_val = float("-0.511841") + max_val = float("0.464881") + mean = float("0.00018354") + std = float("0.0789605") + data = None + + +class Program_weight_tensor_parameter_33: + name = "parameter_33" + shape = [24] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_34: + name = "parameter_34" + shape = [24] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_35: + name = "parameter_35" + shape = [24] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_36: + name = "parameter_36" + shape = [24] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_37: + name = "parameter_37" + shape = [24, 1, 5, 5] + dtype = "float32" + min_val = float("-0.383961") + max_val = float("0.44425") + mean = float("0.0203368") + std = float("0.115922") + data = None + + +class Program_weight_tensor_parameter_38: + name = "parameter_38" + shape = [24] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_39: + name = "parameter_39" + shape = [24] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_40: + name = "parameter_40" + shape = [24] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_41: + name = "parameter_41" + shape = [24] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_42: + name = "parameter_42" + shape = [24, 96, 1, 1] + dtype = "float32" + min_val = float("-0.347535") + max_val = float("0.318632") + mean = float("0.00184119") + std = float("0.0572548") + data = None + + +class Program_weight_tensor_parameter_43: + name = "parameter_43" + shape = [96] + dtype = "float32" + min_val = float("-0.0909796") + max_val = float("0.328702") + mean = float("0.0651876") + std = float("0.095695") + data = None + + +class Program_weight_tensor_parameter_44: + name = "parameter_44" + shape = [96] + dtype = "float32" + min_val = float("-0.205075") + max_val = float("0.316856") + mean = float("0.114872") + std = float("0.109664") + data = None + + +class Program_weight_tensor_parameter_45: + name = "parameter_45" + shape = [96] + dtype = "float32" + min_val = float("5.60519e-45") + max_val = float("0.0461909") + mean = float("0.00403738") + std = float("0.00869253") + data = None + + +class Program_weight_tensor_parameter_46: + name = "parameter_46" + shape = [96] + dtype = "float32" + min_val = float("-0.15257") + max_val = float("0.177267") + mean = float("0.0147156") + std = float("0.0450549") + data = None + + +class Program_weight_tensor_parameter_47: + name = "parameter_47" + shape = [96, 1, 5, 5] + dtype = "float32" + min_val = float("-0.455388") + max_val = float("0.468574") + mean = float("0.00519543") + std = float("0.0905435") + data = None + + +class Program_weight_tensor_parameter_48: + name = "parameter_48" + shape = [96] + dtype = "float32" + min_val = float("-0.426719") + max_val = float("0.688608") + mean = float("-0.00119067") + std = float("0.125492") + data = None + + +class Program_weight_tensor_parameter_49: + name = "parameter_49" + shape = [96] + dtype = "float32" + min_val = float("-2.23795e-13") + max_val = float("0.715096") + mean = float("0.119807") + std = float("0.171189") + data = None + + +class Program_weight_tensor_parameter_50: + name = "parameter_50" + shape = [96] + dtype = "float32" + min_val = float("4.10051e-08") + max_val = float("0.285653") + mean = float("0.0296094") + std = float("0.0504642") + data = None + + +class Program_weight_tensor_parameter_51: + name = "parameter_51" + shape = [96] + dtype = "float32" + min_val = float("-0.634342") + max_val = float("0.28884") + mean = float("-0.0535244") + std = float("0.155893") + data = None + + +class Program_weight_tensor_parameter_52: + name = "parameter_52" + shape = [96, 96, 1, 1] + dtype = "float32" + min_val = float("-0.335344") + max_val = float("0.339272") + mean = float("-0.00157035") + std = float("0.051136") + data = None + + +class Program_weight_tensor_parameter_53: + name = "parameter_53" + shape = [96] + dtype = "float32" + min_val = float("-0.255257") + max_val = float("0.17827") + mean = float("-0.00311647") + std = float("0.0572616") + data = None + + +class Program_weight_tensor_parameter_54: + name = "parameter_54" + shape = [96] + dtype = "float32" + min_val = float("-4.95376e-13") + max_val = float("0.293316") + mean = float("0.0579104") + std = float("0.0786886") + data = None + + +class Program_weight_tensor_parameter_55: + name = "parameter_55" + shape = [96] + dtype = "float32" + min_val = float("7.39245e-09") + max_val = float("0.0178894") + mean = float("0.00219131") + std = float("0.00374228") + data = None + + +class Program_weight_tensor_parameter_56: + name = "parameter_56" + shape = [96] + dtype = "float32" + min_val = float("-0.168069") + max_val = float("0.218718") + mean = float("0.019995") + std = float("0.0468614") + data = None + + +class Program_weight_tensor_parameter_57: + name = "parameter_57" + shape = [96, 24, 1, 1] + dtype = "float32" + min_val = float("-0.39618") + max_val = float("0.460166") + mean = float("0.00453488") + std = float("0.0607674") + data = None + + +class Program_weight_tensor_parameter_58: + name = "parameter_58" + shape = [24] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_59: + name = "parameter_59" + shape = [24] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_60: + name = "parameter_60" + shape = [24] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_61: + name = "parameter_61" + shape = [24] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_62: + name = "parameter_62" + shape = [24, 1, 5, 5] + dtype = "float32" + min_val = float("-0.386188") + max_val = float("0.46749") + mean = float("0.00250396") + std = float("0.101209") + data = None + + +class Program_weight_tensor_parameter_63: + name = "parameter_63" + shape = [24] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_64: + name = "parameter_64" + shape = [24] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_65: + name = "parameter_65" + shape = [24] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_66: + name = "parameter_66" + shape = [24] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_67: + name = "parameter_67" + shape = [24, 96, 1, 1] + dtype = "float32" + min_val = float("-0.350718") + max_val = float("0.340677") + mean = float("0.000647809") + std = float("0.0529755") + data = None + + +class Program_weight_tensor_parameter_68: + name = "parameter_68" + shape = [96] + dtype = "float32" + min_val = float("-0.142927") + max_val = float("0.339345") + mean = float("0.0524882") + std = float("0.0902266") + data = None + + +class Program_weight_tensor_parameter_69: + name = "parameter_69" + shape = [96] + dtype = "float32" + min_val = float("-0.0681067") + max_val = float("0.256962") + mean = float("0.120333") + std = float("0.0655545") + data = None + + +class Program_weight_tensor_parameter_70: + name = "parameter_70" + shape = [96] + dtype = "float32" + min_val = float("2.20691e-08") + max_val = float("0.119258") + mean = float("0.013011") + std = float("0.0182635") + data = None + + +class Program_weight_tensor_parameter_71: + name = "parameter_71" + shape = [96] + dtype = "float32" + min_val = float("-1.06311") + max_val = float("0.188749") + mean = float("-0.0242736") + std = float("0.160053") + data = None + + +class Program_weight_tensor_parameter_72: + name = "parameter_72" + shape = [96, 1, 5, 5] + dtype = "float32" + min_val = float("-0.471014") + max_val = float("0.345648") + mean = float("-0.000325033") + std = float("0.0715848") + data = None + + +class Program_weight_tensor_parameter_73: + name = "parameter_73" + shape = [96] + dtype = "float32" + min_val = float("-1.22023") + max_val = float("0.532788") + mean = float("-0.0589886") + std = float("0.23631") + data = None + + +class Program_weight_tensor_parameter_74: + name = "parameter_74" + shape = [96] + dtype = "float32" + min_val = float("-9.58246e-17") + max_val = float("0.658497") + mean = float("0.24476") + std = float("0.170981") + data = None + + +class Program_weight_tensor_parameter_75: + name = "parameter_75" + shape = [96] + dtype = "float32" + min_val = float("5.3862e-09") + max_val = float("0.0383129") + mean = float("0.00583024") + std = float("0.00687651") + data = None + + +class Program_weight_tensor_parameter_76: + name = "parameter_76" + shape = [96] + dtype = "float32" + min_val = float("-0.287318") + max_val = float("0.532702") + mean = float("0.00227577") + std = float("0.124167") + data = None + + +class Program_weight_tensor_parameter_77: + name = "parameter_77" + shape = [96, 24, 1, 1] + dtype = "float32" + min_val = float("-0.370282") + max_val = float("0.580933") + mean = float("0.00621631") + std = float("0.0931289") + data = None + + +class Program_weight_tensor_parameter_78: + name = "parameter_78" + shape = [24] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_79: + name = "parameter_79" + shape = [24] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_80: + name = "parameter_80" + shape = [24] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_81: + name = "parameter_81" + shape = [24] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_82: + name = "parameter_82" + shape = [24, 1, 5, 5] + dtype = "float32" + min_val = float("-0.372781") + max_val = float("0.473772") + mean = float("-0.00583105") + std = float("0.132274") + data = None + + +class Program_weight_tensor_parameter_83: + name = "parameter_83" + shape = [24] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_84: + name = "parameter_84" + shape = [24] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_85: + name = "parameter_85" + shape = [24] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_86: + name = "parameter_86" + shape = [24] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_87: + name = "parameter_87" + shape = [24, 96, 1, 1] + dtype = "float32" + min_val = float("-0.295058") + max_val = float("0.315388") + mean = float("-0.00290463") + std = float("0.0767514") + data = None + + +class Program_weight_tensor_parameter_88: + name = "parameter_88" + shape = [96] + dtype = "float32" + min_val = float("-0.191284") + max_val = float("0.423162") + mean = float("0.0874791") + std = float("0.121622") + data = None + + +class Program_weight_tensor_parameter_89: + name = "parameter_89" + shape = [96] + dtype = "float32" + min_val = float("1.81069e-35") + max_val = float("0.371142") + mean = float("0.184375") + std = float("0.0685866") + data = None + + +class Program_weight_tensor_parameter_90: + name = "parameter_90" + shape = [96] + dtype = "float32" + min_val = float("2.55319e-12") + max_val = float("0.119933") + mean = float("0.0124737") + std = float("0.0177892") + data = None + + +class Program_weight_tensor_parameter_91: + name = "parameter_91" + shape = [96] + dtype = "float32" + min_val = float("-0.436534") + max_val = float("0.257682") + mean = float("0.0237803") + std = float("0.102218") + data = None + + +class Program_weight_tensor_parameter_92: + name = "parameter_92" + shape = [96, 1, 5, 5] + dtype = "float32" + min_val = float("-0.440349") + max_val = float("0.630526") + mean = float("0.00566761") + std = float("0.114988") + data = None + + +class Program_weight_tensor_parameter_93: + name = "parameter_93" + shape = [96] + dtype = "float32" + min_val = float("-0.267612") + max_val = float("0.250954") + mean = float("-0.00500821") + std = float("0.08326") + data = None + + +class Program_weight_tensor_parameter_94: + name = "parameter_94" + shape = [96] + dtype = "float32" + min_val = float("-9.97754e-12") + max_val = float("0.387866") + mean = float("0.143993") + std = float("0.106538") + data = None + + +class Program_weight_tensor_parameter_95: + name = "parameter_95" + shape = [96] + dtype = "float32" + min_val = float("1.01979e-08") + max_val = float("0.0409273") + mean = float("0.0129612") + std = float("0.0105548") + data = None + + +class Program_weight_tensor_parameter_96: + name = "parameter_96" + shape = [96] + dtype = "float32" + min_val = float("-0.377232") + max_val = float("0.46158") + mean = float("0.028949") + std = float("0.149697") + data = None + + +class Program_weight_tensor_parameter_97: + name = "parameter_97" + shape = [96, 24, 1, 1] + dtype = "float32" + min_val = float("-0.445627") + max_val = float("0.515465") + mean = float("0.00808711") + std = float("0.107539") + data = None + + +class Program_weight_tensor_parameter_98: + name = "parameter_98" + shape = [24] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_99: + name = "parameter_99" + shape = [24] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_100: + name = "parameter_100" + shape = [24] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_101: + name = "parameter_101" + shape = [24] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_102: + name = "parameter_102" + shape = [24, 1, 5, 5] + dtype = "float32" + min_val = float("-0.581288") + max_val = float("0.55028") + mean = float("0.00386") + std = float("0.155374") + data = None + + +class Program_weight_tensor_parameter_103: + name = "parameter_103" + shape = [24] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_104: + name = "parameter_104" + shape = [24] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_105: + name = "parameter_105" + shape = [24] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_106: + name = "parameter_106" + shape = [24] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_107: + name = "parameter_107" + shape = [24, 96, 1, 1] + dtype = "float32" + min_val = float("-0.359155") + max_val = float("0.414832") + mean = float("-0.000912677") + std = float("0.0827877") + data = None + + +class Program_weight_tensor_parameter_108: + name = "parameter_108" + shape = [96] + dtype = "float32" + min_val = float("-0.124575") + max_val = float("0.423472") + mean = float("0.0850134") + std = float("0.12137") + data = None + + +class Program_weight_tensor_parameter_109: + name = "parameter_109" + shape = [96] + dtype = "float32" + min_val = float("1.07372e-33") + max_val = float("0.415349") + mean = float("0.181789") + std = float("0.0735111") + data = None + + +class Program_weight_tensor_parameter_110: + name = "parameter_110" + shape = [96] + dtype = "float32" + min_val = float("5.60519e-45") + max_val = float("0.0959627") + mean = float("0.00517028") + std = float("0.0108865") + data = None + + +class Program_weight_tensor_parameter_111: + name = "parameter_111" + shape = [96] + dtype = "float32" + min_val = float("-0.28275") + max_val = float("0.207843") + mean = float("0.0166553") + std = float("0.0792488") + data = None + + +class Program_weight_tensor_parameter_112: + name = "parameter_112" + shape = [96, 1, 5, 5] + dtype = "float32" + min_val = float("-0.535511") + max_val = float("0.501058") + mean = float("0.00765561") + std = float("0.111856") + data = None + + +class Program_weight_tensor_parameter_113: + name = "parameter_113" + shape = [96] + dtype = "float32" + min_val = float("-0.51497") + max_val = float("0.673828") + mean = float("0.0118958") + std = float("0.138666") + data = None + + +class Program_weight_tensor_parameter_114: + name = "parameter_114" + shape = [96] + dtype = "float32" + min_val = float("-3.99485e-33") + max_val = float("0.368012") + mean = float("0.114992") + std = float("0.0799636") + data = None + + +class Program_weight_tensor_parameter_115: + name = "parameter_115" + shape = [96] + dtype = "float32" + min_val = float("5.61326e-08") + max_val = float("0.286435") + mean = float("0.104653") + std = float("0.0696559") + data = None + + +class Program_weight_tensor_parameter_116: + name = "parameter_116" + shape = [96] + dtype = "float32" + min_val = float("-0.888045") + max_val = float("0.868095") + mean = float("-0.167544") + std = float("0.295687") + data = None + + +class Program_weight_tensor_parameter_117: + name = "parameter_117" + shape = [96, 48, 1, 1] + dtype = "float32" + min_val = float("-0.420277") + max_val = float("0.439216") + mean = float("-0.00674336") + std = float("0.100833") + data = None + + +class Program_weight_tensor_parameter_118: + name = "parameter_118" + shape = [96] + dtype = "float32" + min_val = float("-0.304824") + max_val = float("0.211332") + mean = float("-0.00048278") + std = float("0.0687736") + data = None + + +class Program_weight_tensor_parameter_119: + name = "parameter_119" + shape = [96] + dtype = "float32" + min_val = float("-2.85126e-15") + max_val = float("0.276496") + mean = float("0.081308") + std = float("0.0711722") + data = None + + +class Program_weight_tensor_parameter_120: + name = "parameter_120" + shape = [96] + dtype = "float32" + min_val = float("2.5374e-08") + max_val = float("0.0632919") + mean = float("0.0116602") + std = float("0.0114943") + data = None + + +class Program_weight_tensor_parameter_121: + name = "parameter_121" + shape = [96] + dtype = "float32" + min_val = float("-0.295723") + max_val = float("0.246494") + mean = float("0.0285897") + std = float("0.0983471") + data = None + + +class Program_weight_tensor_parameter_122: + name = "parameter_122" + shape = [96, 24, 1, 1] + dtype = "float32" + min_val = float("-0.498643") + max_val = float("0.511003") + mean = float("0.00342643") + std = float("0.097562") + data = None + + +class Program_weight_tensor_parameter_123: + name = "parameter_123" + shape = [24] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_124: + name = "parameter_124" + shape = [24] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_125: + name = "parameter_125" + shape = [24] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_126: + name = "parameter_126" + shape = [24] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_127: + name = "parameter_127" + shape = [24, 1, 5, 5] + dtype = "float32" + min_val = float("-0.607644") + max_val = float("0.404109") + mean = float("0.00617096") + std = float("0.121979") + data = None + + +class Program_weight_tensor_parameter_128: + name = "parameter_128" + shape = [24] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_129: + name = "parameter_129" + shape = [24] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_130: + name = "parameter_130" + shape = [24] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_131: + name = "parameter_131" + shape = [24] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_132: + name = "parameter_132" + shape = [24, 48, 1, 1] + dtype = "float32" + min_val = float("-0.473035") + max_val = float("0.41846") + mean = float("0.00557985") + std = float("0.0954456") + data = None + + +class Program_weight_tensor_parameter_133: + name = "parameter_133" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_134: + name = "parameter_134" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_135: + name = "parameter_135" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_136: + name = "parameter_136" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_137: + name = "parameter_137" + shape = [48, 1, 5, 5] + dtype = "float32" + min_val = float("-0.431641") + max_val = float("0.346767") + mean = float("0.00928836") + std = float("0.0875686") + data = None + + +class Program_weight_tensor_parameter_138: + name = "parameter_138" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_139: + name = "parameter_139" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_140: + name = "parameter_140" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_141: + name = "parameter_141" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_142: + name = "parameter_142" + shape = [48, 48, 1, 1] + dtype = "float32" + min_val = float("-0.471687") + max_val = float("0.42132") + mean = float("-0.000746861") + std = float("0.113081") + data = None + + +class Program_weight_tensor_parameter_143: + name = "parameter_143" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_144: + name = "parameter_144" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_145: + name = "parameter_145" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_146: + name = "parameter_146" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_147: + name = "parameter_147" + shape = [48, 1, 5, 5] + dtype = "float32" + min_val = float("-0.437036") + max_val = float("0.483728") + mean = float("0.00653367") + std = float("0.137463") + data = None + + +class Program_weight_tensor_parameter_148: + name = "parameter_148" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_149: + name = "parameter_149" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_150: + name = "parameter_150" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_151: + name = "parameter_151" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_152: + name = "parameter_152" + shape = [48, 48, 1, 1] + dtype = "float32" + min_val = float("-0.463506") + max_val = float("0.402659") + mean = float("-0.000876578") + std = float("0.114242") + data = None + + +class Program_weight_tensor_parameter_153: + name = "parameter_153" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_154: + name = "parameter_154" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_155: + name = "parameter_155" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_156: + name = "parameter_156" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_157: + name = "parameter_157" + shape = [48, 1, 5, 5] + dtype = "float32" + min_val = float("-0.56525") + max_val = float("0.601458") + mean = float("0.00602393") + std = float("0.155033") + data = None + + +class Program_weight_tensor_parameter_158: + name = "parameter_158" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_159: + name = "parameter_159" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_160: + name = "parameter_160" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_161: + name = "parameter_161" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_162: + name = "parameter_162" + shape = [48, 24, 1, 1] + dtype = "float32" + min_val = float("-0.579933") + max_val = float("0.522799") + mean = float("0.00883244") + std = float("0.149145") + data = None + + +class Program_weight_tensor_parameter_163: + name = "parameter_163" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_164: + name = "parameter_164" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_165: + name = "parameter_165" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_166: + name = "parameter_166" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_167: + name = "parameter_167" + shape = [48, 24, 1, 1] + dtype = "float32" + min_val = float("-0.510092") + max_val = float("0.673948") + mean = float("0.00679488") + std = float("0.142246") + data = None + + +class Program_weight_tensor_parameter_168: + name = "parameter_168" + shape = [24] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_169: + name = "parameter_169" + shape = [24] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_170: + name = "parameter_170" + shape = [24] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_171: + name = "parameter_171" + shape = [24] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_172: + name = "parameter_172" + shape = [24, 1, 5, 5] + dtype = "float32" + min_val = float("-0.568287") + max_val = float("0.479119") + mean = float("-0.000253342") + std = float("0.168872") + data = None + + +class Program_weight_tensor_parameter_173: + name = "parameter_173" + shape = [24] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_174: + name = "parameter_174" + shape = [24] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_175: + name = "parameter_175" + shape = [24] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_176: + name = "parameter_176" + shape = [24] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_177: + name = "parameter_177" + shape = [24, 24, 1, 1] + dtype = "float32" + min_val = float("-0.445222") + max_val = float("0.539663") + mean = float("0.00184063") + std = float("0.16764") + data = None + + +class Program_weight_tensor_parameter_178: + name = "parameter_178" + shape = [24] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_179: + name = "parameter_179" + shape = [24] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_180: + name = "parameter_180" + shape = [24] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_181: + name = "parameter_181" + shape = [24] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_182: + name = "parameter_182" + shape = [24, 1, 5, 5] + dtype = "float32" + min_val = float("-0.611166") + max_val = float("0.614136") + mean = float("-0.00540836") + std = float("0.178024") + data = None + + +class Program_weight_tensor_parameter_183: + name = "parameter_183" + shape = [24] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_184: + name = "parameter_184" + shape = [24] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_185: + name = "parameter_185" + shape = [24] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_186: + name = "parameter_186" + shape = [24] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_187: + name = "parameter_187" + shape = [24, 24, 1, 1] + dtype = "float32" + min_val = float("-0.688665") + max_val = float("0.492885") + mean = float("-0.019021") + std = float("0.182666") + data = None + + +class Program_weight_tensor_parameter_188: + name = "parameter_188" + shape = [24] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_189: + name = "parameter_189" + shape = [24] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_190: + name = "parameter_190" + shape = [24] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_191: + name = "parameter_191" + shape = [24] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_192: + name = "parameter_192" + shape = [24, 1, 5, 5] + dtype = "float32" + min_val = float("-0.758426") + max_val = float("0.782294") + mean = float("-0.00751704") + std = float("0.213272") + data = None + + +class Program_weight_tensor_parameter_193: + name = "parameter_193" + shape = [24] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_194: + name = "parameter_194" + shape = [24] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_195: + name = "parameter_195" + shape = [24] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_196: + name = "parameter_196" + shape = [24] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_197: + name = "parameter_197" + shape = [24, 3, 3, 3] + dtype = "float32" + min_val = float("-0.653518") + max_val = float("0.686266") + mean = float("-0.000838119") + std = float("0.200618") + data = None diff --git a/paddle_samples/PaddleX/BlazeFace/subgraph_1/graph_hash.txt b/paddle_samples/PaddleX/BlazeFace/subgraph_1/graph_hash.txt new file mode 100644 index 000000000..5662df894 --- /dev/null +++ b/paddle_samples/PaddleX/BlazeFace/subgraph_1/graph_hash.txt @@ -0,0 +1 @@ +eb0f6c1aa007c19037b62cbd0178fc7740e173fc1d66b77e318024dac8bbc2de \ No newline at end of file diff --git a/paddle_samples/PaddleX/BlazeFace/subgraph_1/graph_net.json b/paddle_samples/PaddleX/BlazeFace/subgraph_1/graph_net.json new file mode 100644 index 000000000..d9c8238e8 --- /dev/null +++ b/paddle_samples/PaddleX/BlazeFace/subgraph_1/graph_net.json @@ -0,0 +1,6 @@ +{ + "framework": "paddle", + "model_name": "BlazeFace", + "num_devices_required": 1, + "num_nodes_required": 1 +} \ No newline at end of file diff --git a/paddle_samples/PaddleX/BlazeFace/subgraph_1/input_meta.py b/paddle_samples/PaddleX/BlazeFace/subgraph_1/input_meta.py new file mode 100644 index 000000000..5a7ddbd7b --- /dev/null +++ b/paddle_samples/PaddleX/BlazeFace/subgraph_1/input_meta.py @@ -0,0 +1,33 @@ +class Program_weight_tensor_data_0: + name = "data_0" + shape = [4, 22400] + dtype = "bool" + min_val = 0 + max_val = 2 + data = None + + +class Program_weight_tensor_data_1: + name = "data_1" + shape = [4, 22400, 1] + dtype = "float32" + max_val = float("2.99171") + mean = float("0.0364044") + std = float("0.0325241") + data = None + + +class Program_weight_tensor_data_2: + name = "data_2" + shape = [4, 22400, 1] + dtype = "int64" + min_val = 0 + max_val = 1 + data = None + + +class Program_weight_tensor_data_3: + name = "data_3" + shape = [] + dtype = "float32" + data = [393.858] diff --git a/paddle_samples/PaddleX/BlazeFace/subgraph_1/model.py b/paddle_samples/PaddleX/BlazeFace/subgraph_1/model.py new file mode 100644 index 000000000..6ede00c15 --- /dev/null +++ b/paddle_samples/PaddleX/BlazeFace/subgraph_1/model.py @@ -0,0 +1,77 @@ +import paddle + + +class GraphModule(paddle.nn.Layer): + def __init__(self): + super().__init__() + + def forward(self, data_0, data_1, data_2, data_3): + # pd_op.full_int_array: (1xi64) <- () + full_int_array_0 = [-1] + + # pd_op.unsqueeze: (4x22400x1xb) <- (4x22400xb, 1xi64) + unsqueeze_0 = paddle._C_ops.unsqueeze(data_0, full_int_array_0) + del data_0, full_int_array_0 + + # pd_op.masked_select: (-1xf32) <- (4x22400x1xf32, 4x22400x1xb) + masked_select_0 = paddle._C_ops.masked_select(data_1, unsqueeze_0) + del data_1 + + # pd_op.full_int_array: (0xi64) <- () + full_int_array_1 = [] + + # pd_op.sum: (xf32) <- (-1xf32, 0xi64) + sum_0 = paddle._C_ops.sum(masked_select_0, full_int_array_1, None, False) + + # pd_op.full: (1xf32) <- () + full_0 = paddle._C_ops.full( + [1], float("1"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.scale: (xf32) <- (xf32, 1xf32) + scale_0 = paddle._C_ops.scale(sum_0, full_0, float("0"), True) + del sum_0 + + # pd_op.full: (xi64) <- () + full_1 = paddle._C_ops.full( + [], float("1"), paddle.int64, paddle.framework._current_expected_place() + ) + + # pd_op.not_equal: (4x22400x1xb) <- (4x22400x1xi64, xi64) + not_equal_0 = paddle._C_ops.not_equal(data_2, full_1) + del data_2, full_1 + + # pd_op.cast: (4x22400x1xf32) <- (4x22400x1xb) + cast_0 = paddle._C_ops.cast(not_equal_0, paddle.float32) + del not_equal_0 + + # pd_op.sum: (xf32) <- (4x22400x1xf32, 0xi64) + sum_1 = paddle._C_ops.sum(cast_0, full_int_array_1, None, False) + del cast_0 + + # pd_op.full: (1xf32) <- () + full_2 = paddle._C_ops.full( + [1], float("3.40282e+38"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.clip: (xf32) <- (xf32, 1xf32, 1xf32) + clip_0 = paddle._C_ops.clip(sum_1, full_0, full_2) + del full_2, sum_1 + + # pd_op.add: (xf32) <- (xf32, xf32) + add_0 = paddle._C_ops.add(scale_0, data_3) + del data_3 + + # pd_op.divide: (xf32) <- (xf32, xf32) + divide_0 = paddle._C_ops.divide(add_0, clip_0) + del ( + add_0, + clip_0, + full_0, + full_int_array_1, + masked_select_0, + scale_0, + unsqueeze_0, + ) + + return divide_0 diff --git a/paddle_samples/PaddleX/BlazeFace/subgraph_1/weight_meta.py b/paddle_samples/PaddleX/BlazeFace/subgraph_1/weight_meta.py new file mode 100644 index 000000000..8b1378917 --- /dev/null +++ b/paddle_samples/PaddleX/BlazeFace/subgraph_1/weight_meta.py @@ -0,0 +1 @@ + diff --git a/paddle_samples/PaddleX/BlazeFace/subgraph_2/graph_hash.txt b/paddle_samples/PaddleX/BlazeFace/subgraph_2/graph_hash.txt new file mode 100644 index 000000000..46d8833f6 --- /dev/null +++ b/paddle_samples/PaddleX/BlazeFace/subgraph_2/graph_hash.txt @@ -0,0 +1 @@ +d207034db8fcbf509b86ec1be4306b07b3fcbb02483af005c8502051b51779cc \ No newline at end of file diff --git a/paddle_samples/PaddleX/BlazeFace/subgraph_2/graph_net.json b/paddle_samples/PaddleX/BlazeFace/subgraph_2/graph_net.json new file mode 100644 index 000000000..d9c8238e8 --- /dev/null +++ b/paddle_samples/PaddleX/BlazeFace/subgraph_2/graph_net.json @@ -0,0 +1,6 @@ +{ + "framework": "paddle", + "model_name": "BlazeFace", + "num_devices_required": 1, + "num_nodes_required": 1 +} \ No newline at end of file diff --git a/paddle_samples/PaddleX/BlazeFace/subgraph_2/input_meta.py b/paddle_samples/PaddleX/BlazeFace/subgraph_2/input_meta.py new file mode 100644 index 000000000..60c5f6ad2 --- /dev/null +++ b/paddle_samples/PaddleX/BlazeFace/subgraph_2/input_meta.py @@ -0,0 +1,78 @@ +class Program_weight_tensor_data_0: + name = "data_0" + shape = [1, 23808, 4] + dtype = "float32" + min_val = float("-4.38122") + max_val = float("5.77879") + mean = float("0.283729") + std = float("0.891874") + data = None + + +class Program_weight_tensor_data_1: + name = "data_1" + shape = [1, 18048, 4] + dtype = "float32" + min_val = float("-5.55512") + max_val = float("5.59403") + mean = float("0.0708887") + std = float("1.01141") + data = None + + +class Program_weight_tensor_data_2: + name = "data_2" + shape = [1, 23808, 2] + dtype = "float32" + min_val = float("-3.13319") + max_val = float("3.13469") + mean = float("-0.00733385") + std = float("1.71921") + data = None + + +class Program_weight_tensor_data_3: + name = "data_3" + shape = [1, 18048, 2] + dtype = "float32" + min_val = float("-4.29068") + max_val = float("4.29078") + mean = float("0.000183691") + std = float("1.524") + data = None + + +class Program_weight_tensor_data_4: + name = "data_4" + shape = [23808, 4] + dtype = "float32" + min_val = float("-0.0107817") + max_val = float("1.01348") + mean = float("0.500674") + std = float("0.289302") + data = None + + +class Program_weight_tensor_data_5: + name = "data_5" + shape = [18048, 4] + dtype = "float32" + min_val = float("-0.0754717") + max_val = float("1.08895") + mean = float("0.503369") + std = float("0.294484") + data = None + + +class Program_weight_tensor_data_6: + name = "data_6" + shape = [1, 2] + dtype = "float32" + data = [742.0, 1024.0] + + +class Program_weight_tensor_data_7: + name = "data_7" + shape = [1, 2] + dtype = "float32" + data = [1.0, 1.0] diff --git a/paddle_samples/PaddleX/BlazeFace/subgraph_2/model.py b/paddle_samples/PaddleX/BlazeFace/subgraph_2/model.py new file mode 100644 index 000000000..69f2df4ca --- /dev/null +++ b/paddle_samples/PaddleX/BlazeFace/subgraph_2/model.py @@ -0,0 +1,290 @@ +import paddle + + +class GraphModule(paddle.nn.Layer): + def __init__(self): + super().__init__() + + def forward(self, data_0, data_1, data_2, data_3, data_4, data_5, data_6, data_7): + # pd_op.full: (1xi32) <- () + full_0 = paddle._C_ops.full( + [1], float("1"), paddle.int32, paddle.core.CPUPlace() + ) + + # builtin.combine: ([1x-1x4xf32, 1x-1x4xf32]) <- (1x-1x4xf32, 1x-1x4xf32) + combine_0 = [data_0, data_1] + del data_0, data_1 + + # pd_op.concat: (1x-1x4xf32) <- ([1x-1x4xf32, 1x-1x4xf32], 1xi32) + concat_0 = paddle._C_ops.concat(combine_0, full_0) + del combine_0 + + # pd_op.full: (1xi32) <- () + full_1 = paddle._C_ops.full( + [1], float("0"), paddle.int32, paddle.core.CPUPlace() + ) + + # builtin.combine: ([-1x4xf32, -1x4xf32]) <- (-1x4xf32, -1x4xf32) + combine_1 = [data_4, data_5] + del data_4, data_5 + + # pd_op.concat: (-1x4xf32) <- ([-1x4xf32, -1x4xf32], 1xi32) + concat_1 = paddle._C_ops.concat(combine_1, full_1) + del combine_1, full_1 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_0 = [2] + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_1 = [3] + + # pd_op.slice: (-1xf32) <- (-1x4xf32, 1xi64, 1xi64) + slice_0 = paddle._C_ops.slice( + concat_1, [1], full_int_array_0, full_int_array_1, [1], [1] + ) + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_2 = [0] + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_3 = [1] + + # pd_op.slice: (-1xf32) <- (-1x4xf32, 1xi64, 1xi64) + slice_1 = paddle._C_ops.slice( + concat_1, [1], full_int_array_2, full_int_array_3, [1], [1] + ) + + # pd_op.subtract: (-1xf32) <- (-1xf32, -1xf32) + subtract_0 = paddle._C_ops.subtract(slice_0, slice_1) + del slice_0 + + # pd_op.full: (1xf32) <- () + full_2 = paddle._C_ops.full( + [1], float("1"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.scale: (-1xf32) <- (-1xf32, 1xf32) + scale_0 = paddle._C_ops.scale(subtract_0, full_2, float("0"), True) + del subtract_0 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_4 = [4] + + # pd_op.slice: (-1xf32) <- (-1x4xf32, 1xi64, 1xi64) + slice_2 = paddle._C_ops.slice( + concat_1, [1], full_int_array_1, full_int_array_4, [1], [1] + ) + + # pd_op.slice: (-1xf32) <- (-1x4xf32, 1xi64, 1xi64) + slice_3 = paddle._C_ops.slice( + concat_1, [1], full_int_array_3, full_int_array_0, [1], [1] + ) + del concat_1 + + # pd_op.subtract: (-1xf32) <- (-1xf32, -1xf32) + subtract_1 = paddle._C_ops.subtract(slice_2, slice_3) + del slice_2 + + # pd_op.scale: (-1xf32) <- (-1xf32, 1xf32) + scale_1 = paddle._C_ops.scale(subtract_1, full_2, float("0"), True) + del full_2, subtract_1 + + # pd_op.full: (1xf32) <- () + full_3 = paddle._C_ops.full( + [1], float("0.5"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.scale: (-1xf32) <- (-1xf32, 1xf32) + scale_2 = paddle._C_ops.scale(scale_0, full_3, float("0"), True) + + # pd_op.add: (-1xf32) <- (-1xf32, -1xf32) + add_0 = paddle._C_ops.add(slice_1, scale_2) + del scale_2, slice_1 + + # pd_op.scale: (-1xf32) <- (-1xf32, 1xf32) + scale_3 = paddle._C_ops.scale(scale_1, full_3, float("0"), True) + + # pd_op.add: (-1xf32) <- (-1xf32, -1xf32) + add_1 = paddle._C_ops.add(slice_3, scale_3) + del scale_3, slice_3 + + # pd_op.slice: (1x-1xf32) <- (1x-1x4xf32, 1xi64, 1xi64) + slice_4 = paddle._C_ops.slice( + concat_0, [2], full_int_array_2, full_int_array_3, [1], [2] + ) + + # pd_op.multiply: (1x-1xf32) <- (1x-1xf32, -1xf32) + multiply_1 = paddle._C_ops.multiply(slice_4, scale_0) + del slice_4 + + # pd_op.full: (1xf32) <- () + full_4 = paddle._C_ops.full( + [1], float("0.1"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.scale: (1x-1xf32) <- (1x-1xf32, 1xf32) + scale_4 = paddle._C_ops.scale(multiply_1, full_4, float("0"), True) + del multiply_1 + + # pd_op.add: (1x-1xf32) <- (-1xf32, 1x-1xf32) + add_2 = paddle._C_ops.add(add_0, scale_4) + del add_0, scale_4 + + # pd_op.slice: (1x-1xf32) <- (1x-1x4xf32, 1xi64, 1xi64) + slice_5 = paddle._C_ops.slice( + concat_0, [2], full_int_array_3, full_int_array_0, [1], [2] + ) + + # pd_op.multiply: (1x-1xf32) <- (1x-1xf32, -1xf32) + multiply_2 = paddle._C_ops.multiply(slice_5, scale_1) + del slice_5 + + # pd_op.scale: (1x-1xf32) <- (1x-1xf32, 1xf32) + scale_5 = paddle._C_ops.scale(multiply_2, full_4, float("0"), True) + del full_4, multiply_2 + + # pd_op.add: (1x-1xf32) <- (-1xf32, 1x-1xf32) + add_3 = paddle._C_ops.add(add_1, scale_5) + del add_1, scale_5 + + # pd_op.slice: (1x-1xf32) <- (1x-1x4xf32, 1xi64, 1xi64) + slice_6 = paddle._C_ops.slice( + concat_0, [2], full_int_array_0, full_int_array_1, [1], [2] + ) + + # pd_op.full: (1xf32) <- () + full_5 = paddle._C_ops.full( + [1], float("0.2"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.scale: (1x-1xf32) <- (1x-1xf32, 1xf32) + scale_6 = paddle._C_ops.scale(slice_6, full_5, float("0"), True) + del slice_6 + + # pd_op.exp: (1x-1xf32) <- (1x-1xf32) + exp_0 = paddle._C_ops.exp(scale_6) + del scale_6 + + # pd_op.multiply: (1x-1xf32) <- (1x-1xf32, -1xf32) + multiply_3 = paddle._C_ops.multiply(exp_0, scale_0) + del exp_0, scale_0 + + # pd_op.slice: (1x-1xf32) <- (1x-1x4xf32, 1xi64, 1xi64) + slice_7 = paddle._C_ops.slice( + concat_0, [2], full_int_array_1, full_int_array_4, [1], [2] + ) + del concat_0, full_int_array_1, full_int_array_4 + + # pd_op.scale: (1x-1xf32) <- (1x-1xf32, 1xf32) + scale_7 = paddle._C_ops.scale(slice_7, full_5, float("0"), True) + del full_5, slice_7 + + # pd_op.exp: (1x-1xf32) <- (1x-1xf32) + exp_1 = paddle._C_ops.exp(scale_7) + del scale_7 + + # pd_op.multiply: (1x-1xf32) <- (1x-1xf32, -1xf32) + multiply_4 = paddle._C_ops.multiply(exp_1, scale_1) + del exp_1, scale_1 + + # pd_op.scale: (1x-1xf32) <- (1x-1xf32, 1xf32) + scale_8 = paddle._C_ops.scale(multiply_3, full_3, float("0"), True) + del multiply_3 + + # pd_op.subtract: (1x-1xf32) <- (1x-1xf32, 1x-1xf32) + subtract_2 = paddle._C_ops.subtract(add_2, scale_8) + + # pd_op.scale: (1x-1xf32) <- (1x-1xf32, 1xf32) + scale_9 = paddle._C_ops.scale(multiply_4, full_3, float("0"), True) + del full_3, multiply_4 + + # pd_op.subtract: (1x-1xf32) <- (1x-1xf32, 1x-1xf32) + subtract_3 = paddle._C_ops.subtract(add_3, scale_9) + + # pd_op.add: (1x-1xf32) <- (1x-1xf32, 1x-1xf32) + add_4 = paddle._C_ops.add(add_2, scale_8) + del add_2, scale_8 + + # pd_op.add: (1x-1xf32) <- (1x-1xf32, 1x-1xf32) + add_5 = paddle._C_ops.add(add_3, scale_9) + del add_3, scale_9 + + # builtin.combine: ([1x-1xf32, 1x-1xf32, 1x-1xf32, 1x-1xf32]) <- (1x-1xf32, 1x-1xf32, 1x-1xf32, 1x-1xf32) + combine_2 = [subtract_2, subtract_3, add_4, add_5] + del add_4, add_5, subtract_2, subtract_3 + + # pd_op.stack: (1x-1x4xf32) <- ([1x-1xf32, 1x-1xf32, 1x-1xf32, 1x-1xf32]) + stack_0 = paddle._C_ops.stack(combine_2, -1) + del combine_2 + + # pd_op.slice: (1xf32) <- (1x2xf32, 1xi64, 1xi64) + slice_8 = paddle._C_ops.slice( + data_6, [1], full_int_array_2, full_int_array_3, [1], [1] + ) + + # pd_op.slice: (1xf32) <- (1x2xf32, 1xi64, 1xi64) + slice_9 = paddle._C_ops.slice( + data_7, [1], full_int_array_2, full_int_array_3, [1], [1] + ) + del full_int_array_2 + + # pd_op.divide: (1xf32) <- (1xf32, 1xf32) + divide_0 = paddle._C_ops.divide(slice_8, slice_9) + del slice_8, slice_9 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_5 = [-1] + + # pd_op.unsqueeze: (1x1xf32) <- (1xf32, 1xi64) + unsqueeze_0 = paddle._C_ops.unsqueeze(divide_0, full_int_array_5) + del divide_0 + + # pd_op.slice: (1xf32) <- (1x2xf32, 1xi64, 1xi64) + slice_10 = paddle._C_ops.slice( + data_6, [1], full_int_array_3, full_int_array_0, [1], [1] + ) + del data_6 + + # pd_op.slice: (1xf32) <- (1x2xf32, 1xi64, 1xi64) + slice_11 = paddle._C_ops.slice( + data_7, [1], full_int_array_3, full_int_array_0, [1], [1] + ) + del data_7, full_int_array_0, full_int_array_3 + + # pd_op.divide: (1xf32) <- (1xf32, 1xf32) + divide_1 = paddle._C_ops.divide(slice_10, slice_11) + del slice_10, slice_11 + + # pd_op.unsqueeze: (1x1xf32) <- (1xf32, 1xi64) + unsqueeze_1 = paddle._C_ops.unsqueeze(divide_1, full_int_array_5) + del divide_1, full_int_array_5 + + # builtin.combine: ([1x1xf32, 1x1xf32, 1x1xf32, 1x1xf32]) <- (1x1xf32, 1x1xf32, 1x1xf32, 1x1xf32) + combine_3 = [unsqueeze_1, unsqueeze_0, unsqueeze_1, unsqueeze_0] + del unsqueeze_0, unsqueeze_1 + + # pd_op.stack: (1x1x4xf32) <- ([1x1xf32, 1x1xf32, 1x1xf32, 1x1xf32]) + stack_1 = paddle._C_ops.stack(combine_3, -1) + del combine_3 + + # pd_op.multiply: (1x-1x4xf32) <- (1x-1x4xf32, 1x1x4xf32) + multiply_0 = paddle._C_ops.multiply(stack_0, stack_1) + del stack_0, stack_1 + + # builtin.combine: ([1x-1x2xf32, 1x-1x2xf32]) <- (1x-1x2xf32, 1x-1x2xf32) + combine_4 = [data_2, data_3] + del data_2, data_3 + + # pd_op.concat: (1x-1x2xf32) <- ([1x-1x2xf32, 1x-1x2xf32], 1xi32) + concat_2 = paddle._C_ops.concat(combine_4, full_0) + del combine_4, full_0 + + # pd_op.softmax: (1x-1x2xf32) <- (1x-1x2xf32) + softmax_0 = paddle._C_ops.softmax(concat_2, -1) + del concat_2 + + # pd_op.transpose: (1x2x-1xf32) <- (1x-1x2xf32) + transpose_0 = paddle._C_ops.transpose(softmax_0, [0, 2, 1]) + del softmax_0 + + return multiply_0, transpose_0 diff --git a/paddle_samples/PaddleX/BlazeFace/subgraph_2/weight_meta.py b/paddle_samples/PaddleX/BlazeFace/subgraph_2/weight_meta.py new file mode 100644 index 000000000..8b1378917 --- /dev/null +++ b/paddle_samples/PaddleX/BlazeFace/subgraph_2/weight_meta.py @@ -0,0 +1 @@ + diff --git a/paddle_samples/PaddleX/BlazeFace/subgraph_3/graph_hash.txt b/paddle_samples/PaddleX/BlazeFace/subgraph_3/graph_hash.txt new file mode 100644 index 000000000..7729c6b56 --- /dev/null +++ b/paddle_samples/PaddleX/BlazeFace/subgraph_3/graph_hash.txt @@ -0,0 +1 @@ +bb6d6c93042dcd4cea89dabceecd42cfcda5d9c8914efd69f24d58bc0e868d8b \ No newline at end of file diff --git a/paddle_samples/PaddleX/BlazeFace/subgraph_3/graph_net.json b/paddle_samples/PaddleX/BlazeFace/subgraph_3/graph_net.json new file mode 100644 index 000000000..d9c8238e8 --- /dev/null +++ b/paddle_samples/PaddleX/BlazeFace/subgraph_3/graph_net.json @@ -0,0 +1,6 @@ +{ + "framework": "paddle", + "model_name": "BlazeFace", + "num_devices_required": 1, + "num_nodes_required": 1 +} \ No newline at end of file diff --git a/paddle_samples/PaddleX/BlazeFace/subgraph_3/input_meta.py b/paddle_samples/PaddleX/BlazeFace/subgraph_3/input_meta.py new file mode 100644 index 000000000..02c2ab287 --- /dev/null +++ b/paddle_samples/PaddleX/BlazeFace/subgraph_3/input_meta.py @@ -0,0 +1,29 @@ +class Program_weight_tensor_data_0: + name = "data_0" + shape = [4, 96, 80, 80] + dtype = "float32" + max_val = float("8.15454") + mean = float("0.251416") + std = float("0.317099") + data = None + + +class Program_weight_tensor_data_1: + name = "data_1" + shape = [4, 96, 40, 40] + dtype = "float32" + max_val = float("6.10383") + mean = float("0.219364") + std = float("0.388425") + data = None + + +class Program_weight_tensor_data_2: + name = "data_2" + shape = [4, 3, 640, 640] + dtype = "float32" + min_val = float("-0.964689") + max_val = float("1.18429") + mean = float("-0.211199") + std = float("0.540454") + data = None diff --git a/paddle_samples/PaddleX/BlazeFace/subgraph_3/model.py b/paddle_samples/PaddleX/BlazeFace/subgraph_3/model.py new file mode 100644 index 000000000..378ff130b --- /dev/null +++ b/paddle_samples/PaddleX/BlazeFace/subgraph_3/model.py @@ -0,0 +1,186 @@ +import paddle + + +class GraphModule(paddle.nn.Layer): + def __init__(self): + super().__init__() + + def forward( + self, + parameter_0, + parameter_1, + parameter_2, + parameter_3, + parameter_4, + parameter_5, + parameter_6, + parameter_7, + data_0, + data_1, + data_2, + ): + # pd_op.conv2d: (4x8x80x80xf32) <- (4x96x80x80xf32, 8x96x3x3xf32) + conv2d_0 = paddle._C_ops.conv2d( + data_0, parameter_7, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_7 + + # pd_op.full_int_array: (4xi64) <- () + full_int_array_0 = [1, -1, 1, 1] + + # pd_op.reshape: (1x8x1x1xf32) <- (8xf32, 4xi64) + reshape_6 = paddle._C_ops.reshape(parameter_6, full_int_array_0) + del parameter_6 + + # pd_op.add: (4x8x80x80xf32) <- (4x8x80x80xf32, 1x8x1x1xf32) + add_0 = paddle._C_ops.add(conv2d_0, reshape_6) + + # pd_op.transpose: (4x80x80x8xf32) <- (4x8x80x80xf32) + transpose_0 = paddle._C_ops.transpose(add_0, [0, 2, 3, 1]) + del add_0 + + # pd_op.full_int_array: (3xi64) <- () + full_int_array_1 = [0, -1, 4] + + # pd_op.reshape: (4x12800x4xf32) <- (4x80x80x8xf32, 3xi64) + reshape_0 = paddle._C_ops.reshape(transpose_0, full_int_array_1) + + # pd_op.conv2d: (4x4x80x80xf32) <- (4x96x80x80xf32, 4x96x3x3xf32) + conv2d_1 = paddle._C_ops.conv2d( + data_0, parameter_5, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_5 + + # pd_op.reshape: (1x4x1x1xf32) <- (4xf32, 4xi64) + reshape_7 = paddle._C_ops.reshape(parameter_4, full_int_array_0) + del parameter_4 + + # pd_op.add: (4x4x80x80xf32) <- (4x4x80x80xf32, 1x4x1x1xf32) + add_1 = paddle._C_ops.add(conv2d_1, reshape_7) + + # pd_op.transpose: (4x80x80x4xf32) <- (4x4x80x80xf32) + transpose_1 = paddle._C_ops.transpose(add_1, [0, 2, 3, 1]) + del add_1 + + # pd_op.full_int_array: (3xi64) <- () + full_int_array_2 = [0, -1, 2] + + # pd_op.reshape: (4x12800x2xf32) <- (4x80x80x4xf32, 3xi64) + reshape_2 = paddle._C_ops.reshape(transpose_1, full_int_array_2) + + # pd_op.conv2d: (4x24x40x40xf32) <- (4x96x40x40xf32, 24x96x3x3xf32) + conv2d_2 = paddle._C_ops.conv2d( + data_1, parameter_3, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_3 + + # pd_op.reshape: (1x24x1x1xf32) <- (24xf32, 4xi64) + reshape_8 = paddle._C_ops.reshape(parameter_2, full_int_array_0) + del parameter_2 + + # pd_op.add: (4x24x40x40xf32) <- (4x24x40x40xf32, 1x24x1x1xf32) + add_2 = paddle._C_ops.add(conv2d_2, reshape_8) + + # pd_op.transpose: (4x40x40x24xf32) <- (4x24x40x40xf32) + transpose_2 = paddle._C_ops.transpose(add_2, [0, 2, 3, 1]) + del add_2 + + # pd_op.reshape: (4x9600x4xf32) <- (4x40x40x24xf32, 3xi64) + reshape_1 = paddle._C_ops.reshape(transpose_2, full_int_array_1) + del full_int_array_1 + + # pd_op.conv2d: (4x12x40x40xf32) <- (4x96x40x40xf32, 12x96x3x3xf32) + conv2d_3 = paddle._C_ops.conv2d( + data_1, parameter_1, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_1 + + # pd_op.reshape: (1x12x1x1xf32) <- (12xf32, 4xi64) + reshape_9 = paddle._C_ops.reshape(parameter_0, full_int_array_0) + del full_int_array_0, parameter_0 + + # pd_op.add: (4x12x40x40xf32) <- (4x12x40x40xf32, 1x12x1x1xf32) + add_3 = paddle._C_ops.add(conv2d_3, reshape_9) + + # pd_op.transpose: (4x40x40x12xf32) <- (4x12x40x40xf32) + transpose_3 = paddle._C_ops.transpose(add_3, [0, 2, 3, 1]) + del add_3 + + # pd_op.reshape: (4x9600x2xf32) <- (4x40x40x12xf32, 3xi64) + reshape_3 = paddle._C_ops.reshape(transpose_3, full_int_array_2) + del full_int_array_2 + + # pd_op.prior_box: (80x80x2x4xf32, 80x80x2x4xf32) <- (4x96x80x80xf32, 4x3x640x640xf32) + prior_box_0, prior_box_1 = (lambda x, f: f(x))( + paddle._C_ops.prior_box( + data_0, + data_2, + [float("16"), float("24")], + [], + [float("1")], + [float("0.1"), float("0.1"), float("0.2"), float("0.2")], + False, + False, + float("8"), + float("8"), + float("0.5"), + False, + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None), + ) + del data_0 + + # pd_op.full_int_array: (2xi64) <- () + full_int_array_3 = [-1, 4] + + # pd_op.reshape: (12800x4xf32) <- (80x80x2x4xf32, 2xi64) + reshape_4 = paddle._C_ops.reshape(prior_box_0, full_int_array_3) + del prior_box_0 + + # pd_op.prior_box: (40x40x6x4xf32, 40x40x6x4xf32) <- (4x96x40x40xf32, 4x3x640x640xf32) + prior_box_2, prior_box_3 = (lambda x, f: f(x))( + paddle._C_ops.prior_box( + data_1, + data_2, + [ + float("32"), + float("48"), + float("64"), + float("80"), + float("96"), + float("128"), + ], + [], + [float("1")], + [float("0.1"), float("0.1"), float("0.2"), float("0.2")], + False, + False, + float("16"), + float("16"), + float("0.5"), + False, + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None), + ) + del data_1, data_2 + + # pd_op.reshape: (9600x4xf32) <- (40x40x6x4xf32, 2xi64) + reshape_5 = paddle._C_ops.reshape(prior_box_2, full_int_array_3) + del ( + conv2d_0, + conv2d_1, + conv2d_2, + conv2d_3, + full_int_array_3, + prior_box_2, + reshape_6, + reshape_7, + reshape_8, + reshape_9, + transpose_0, + transpose_1, + transpose_2, + transpose_3, + ) + + return reshape_0, reshape_1, reshape_2, reshape_3, reshape_4, reshape_5 diff --git a/paddle_samples/PaddleX/BlazeFace/subgraph_3/shape_petches_BlazeFace-FPN-SSH/input_meta.py b/paddle_samples/PaddleX/BlazeFace/subgraph_3/shape_petches_BlazeFace-FPN-SSH/input_meta.py new file mode 100644 index 000000000..2e9103c74 --- /dev/null +++ b/paddle_samples/PaddleX/BlazeFace/subgraph_3/shape_petches_BlazeFace-FPN-SSH/input_meta.py @@ -0,0 +1,29 @@ +class Program_weight_tensor_data_0: + name = "data_0" + shape = [4, 48, 80, 80] + dtype = "float32" + max_val = float("27.9606") + mean = float("0.204761") + std = float("0.302141") + data = None + + +class Program_weight_tensor_data_1: + name = "data_1" + shape = [4, 48, 40, 40] + dtype = "float32" + max_val = float("6.54075") + mean = float("0.262741") + std = float("0.404166") + data = None + + +class Program_weight_tensor_data_2: + name = "data_2" + shape = [4, 3, 640, 640] + dtype = "float32" + min_val = float("-0.964689") + max_val = float("1.18429") + mean = float("-0.100344") + std = float("0.417956") + data = None diff --git a/paddle_samples/PaddleX/BlazeFace/subgraph_3/shape_petches_BlazeFace-FPN-SSH/weight_meta.py b/paddle_samples/PaddleX/BlazeFace/subgraph_3/shape_petches_BlazeFace-FPN-SSH/weight_meta.py new file mode 100644 index 000000000..cda22f35a --- /dev/null +++ b/paddle_samples/PaddleX/BlazeFace/subgraph_3/shape_petches_BlazeFace-FPN-SSH/weight_meta.py @@ -0,0 +1,78 @@ +class Program_weight_tensor_parameter_0: + name = "parameter_0" + shape = [12] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_1: + name = "parameter_1" + shape = [12, 48, 3, 3] + dtype = "float32" + min_val = float("-0.213016") + max_val = float("0.213018") + mean = float("-2.24769e-07") + std = float("0.050618") + data = None + + +class Program_weight_tensor_parameter_2: + name = "parameter_2" + shape = [24] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_3: + name = "parameter_3" + shape = [24, 48, 3, 3] + dtype = "float32" + min_val = float("-0.316825") + max_val = float("0.358267") + mean = float("-5.38973e-05") + std = float("0.043486") + data = None + + +class Program_weight_tensor_parameter_4: + name = "parameter_4" + shape = [4] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_5: + name = "parameter_5" + shape = [4, 48, 3, 3] + dtype = "float32" + min_val = float("-0.327187") + max_val = float("0.327153") + mean = float("9.96015e-06") + std = float("0.0461265") + data = None + + +class Program_weight_tensor_parameter_6: + name = "parameter_6" + shape = [8] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_7: + name = "parameter_7" + shape = [8, 48, 3, 3] + dtype = "float32" + min_val = float("-0.429988") + max_val = float("0.281067") + mean = float("-0.000527659") + std = float("0.0541435") + data = None diff --git a/paddle_samples/PaddleX/BlazeFace/subgraph_3/weight_meta.py b/paddle_samples/PaddleX/BlazeFace/subgraph_3/weight_meta.py new file mode 100644 index 000000000..243f2c5dc --- /dev/null +++ b/paddle_samples/PaddleX/BlazeFace/subgraph_3/weight_meta.py @@ -0,0 +1,78 @@ +class Program_weight_tensor_parameter_0: + name = "parameter_0" + shape = [12] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_1: + name = "parameter_1" + shape = [12, 96, 3, 3] + dtype = "float32" + min_val = float("-0.378841") + max_val = float("0.378844") + mean = float("5.46803e-07") + std = float("0.0398333") + data = None + + +class Program_weight_tensor_parameter_2: + name = "parameter_2" + shape = [24] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_3: + name = "parameter_3" + shape = [24, 96, 3, 3] + dtype = "float32" + min_val = float("-0.433104") + max_val = float("0.513117") + mean = float("0.00031893") + std = float("0.0372768") + data = None + + +class Program_weight_tensor_parameter_4: + name = "parameter_4" + shape = [4] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_5: + name = "parameter_5" + shape = [4, 96, 3, 3] + dtype = "float32" + min_val = float("-0.410144") + max_val = float("0.410081") + mean = float("-1.41836e-05") + std = float("0.0390869") + data = None + + +class Program_weight_tensor_parameter_6: + name = "parameter_6" + shape = [8] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_7: + name = "parameter_7" + shape = [8, 96, 3, 3] + dtype = "float32" + min_val = float("-0.488245") + max_val = float("0.636433") + mean = float("0.00123409") + std = float("0.0571081") + data = None diff --git a/paddle_samples/PaddleX/BlazeFace/subgraph_4/graph_hash.txt b/paddle_samples/PaddleX/BlazeFace/subgraph_4/graph_hash.txt new file mode 100644 index 000000000..22be6dce9 --- /dev/null +++ b/paddle_samples/PaddleX/BlazeFace/subgraph_4/graph_hash.txt @@ -0,0 +1 @@ +ddc98aa4517e17f5898f37e27bd110db7146c2e789bf569a980ba4201bf6d8cc \ No newline at end of file diff --git a/paddle_samples/PaddleX/BlazeFace/subgraph_4/graph_net.json b/paddle_samples/PaddleX/BlazeFace/subgraph_4/graph_net.json new file mode 100644 index 000000000..d9c8238e8 --- /dev/null +++ b/paddle_samples/PaddleX/BlazeFace/subgraph_4/graph_net.json @@ -0,0 +1,6 @@ +{ + "framework": "paddle", + "model_name": "BlazeFace", + "num_devices_required": 1, + "num_nodes_required": 1 +} \ No newline at end of file diff --git a/paddle_samples/PaddleX/BlazeFace/subgraph_4/input_meta.py b/paddle_samples/PaddleX/BlazeFace/subgraph_4/input_meta.py new file mode 100644 index 000000000..3e8762962 --- /dev/null +++ b/paddle_samples/PaddleX/BlazeFace/subgraph_4/input_meta.py @@ -0,0 +1,9 @@ +class Program_weight_tensor_data_0: + name = "data_0" + shape = [4, 3, 640, 640] + dtype = "float32" + min_val = float("-0.964689") + max_val = float("1.18429") + mean = float("-0.211199") + std = float("0.540454") + data = None diff --git a/paddle_samples/PaddleX/BlazeFace/subgraph_4/model.py b/paddle_samples/PaddleX/BlazeFace/subgraph_4/model.py new file mode 100644 index 000000000..38fe01293 --- /dev/null +++ b/paddle_samples/PaddleX/BlazeFace/subgraph_4/model.py @@ -0,0 +1,2041 @@ +import paddle + + +class GraphModule(paddle.nn.Layer): + def __init__(self): + super().__init__() + + def forward( + self, + parameter_0, + parameter_1, + parameter_2, + parameter_3, + parameter_4, + parameter_5, + parameter_6, + parameter_7, + parameter_8, + parameter_9, + parameter_10, + parameter_11, + parameter_12, + parameter_13, + parameter_14, + parameter_15, + parameter_16, + parameter_17, + parameter_18, + parameter_19, + parameter_20, + parameter_21, + parameter_22, + parameter_23, + parameter_24, + parameter_25, + parameter_26, + parameter_27, + parameter_28, + parameter_29, + parameter_30, + parameter_31, + parameter_32, + parameter_33, + parameter_34, + parameter_35, + parameter_36, + parameter_37, + parameter_38, + parameter_39, + parameter_40, + parameter_41, + parameter_42, + parameter_43, + parameter_44, + parameter_45, + parameter_46, + parameter_47, + parameter_48, + parameter_49, + parameter_50, + parameter_51, + parameter_52, + parameter_53, + parameter_54, + parameter_55, + parameter_56, + parameter_57, + parameter_58, + parameter_59, + parameter_60, + parameter_61, + parameter_62, + parameter_63, + parameter_64, + parameter_65, + parameter_66, + parameter_67, + parameter_68, + parameter_69, + parameter_70, + parameter_71, + parameter_72, + parameter_73, + parameter_74, + parameter_75, + parameter_76, + parameter_77, + parameter_78, + parameter_79, + parameter_80, + parameter_81, + parameter_82, + parameter_83, + parameter_84, + parameter_85, + parameter_86, + parameter_87, + parameter_88, + parameter_89, + parameter_90, + parameter_91, + parameter_92, + parameter_93, + parameter_94, + parameter_95, + parameter_96, + parameter_97, + parameter_98, + parameter_99, + parameter_100, + parameter_101, + parameter_102, + parameter_103, + parameter_104, + parameter_105, + parameter_106, + parameter_107, + parameter_108, + parameter_109, + parameter_110, + parameter_111, + parameter_112, + parameter_113, + parameter_114, + parameter_115, + parameter_116, + parameter_117, + parameter_118, + parameter_119, + parameter_120, + parameter_121, + parameter_122, + parameter_123, + parameter_124, + parameter_125, + parameter_126, + parameter_127, + parameter_128, + parameter_129, + parameter_130, + parameter_131, + parameter_132, + parameter_133, + parameter_134, + parameter_135, + parameter_136, + parameter_137, + parameter_138, + parameter_139, + parameter_140, + parameter_141, + parameter_142, + parameter_143, + parameter_144, + parameter_145, + parameter_146, + parameter_147, + parameter_148, + parameter_149, + parameter_150, + parameter_151, + parameter_152, + parameter_153, + parameter_154, + parameter_155, + parameter_156, + parameter_157, + parameter_158, + parameter_159, + parameter_160, + parameter_161, + parameter_162, + parameter_163, + parameter_164, + parameter_165, + parameter_166, + parameter_167, + parameter_168, + parameter_169, + parameter_170, + parameter_171, + parameter_172, + parameter_173, + parameter_174, + parameter_175, + parameter_176, + parameter_177, + parameter_178, + parameter_179, + parameter_180, + parameter_181, + parameter_182, + parameter_183, + parameter_184, + parameter_185, + parameter_186, + parameter_187, + parameter_188, + parameter_189, + data_0, + ): + # pd_op.conv2d: (4x24x320x320xf32) <- (4x3x640x640xf32, 24x3x3x3xf32) + conv2d_0 = paddle._C_ops.conv2d( + data_0, parameter_189, [2, 2], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del data_0, parameter_189 + + # pd_op.batch_norm_: (4x24x320x320xf32, 24xf32, 24xf32, 24xf32, 24xf32, -1xui8) <- (4x24x320x320xf32, 24xf32, 24xf32, 24xf32, 24xf32) + ( + batch_norm__0, + batch_norm__1, + batch_norm__2, + batch_norm__3, + batch_norm__4, + batch_norm__5, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_0, + parameter_188, + parameter_187, + parameter_186, + parameter_185, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_185, parameter_186, parameter_187, parameter_188 + + # pd_op.relu: (4x24x320x320xf32) <- (4x24x320x320xf32) + relu_1 = paddle._C_ops.relu(batch_norm__0) + del batch_norm__0 + + # pd_op.depthwise_conv2d: (4x24x320x320xf32) <- (4x24x320x320xf32, 24x1x5x5xf32) + depthwise_conv2d_0 = paddle._C_ops.depthwise_conv2d( + relu_1, parameter_184, [1, 1], [2, 2], "EXPLICIT", 24, [1, 1], "NCHW" + ) + del parameter_184 + + # pd_op.batch_norm_: (4x24x320x320xf32, 24xf32, 24xf32, 24xf32, 24xf32, -1xui8) <- (4x24x320x320xf32, 24xf32, 24xf32, 24xf32, 24xf32) + ( + batch_norm__6, + batch_norm__7, + batch_norm__8, + batch_norm__9, + batch_norm__10, + batch_norm__11, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + depthwise_conv2d_0, + parameter_183, + parameter_182, + parameter_181, + parameter_180, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_180, parameter_181, parameter_182, parameter_183 + + # pd_op.relu: (4x24x320x320xf32) <- (4x24x320x320xf32) + relu_2 = paddle._C_ops.relu(batch_norm__6) + del batch_norm__6 + + # pd_op.conv2d: (4x24x320x320xf32) <- (4x24x320x320xf32, 24x24x1x1xf32) + conv2d_1 = paddle._C_ops.conv2d( + relu_2, parameter_179, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_179 + + # pd_op.batch_norm_: (4x24x320x320xf32, 24xf32, 24xf32, 24xf32, 24xf32, -1xui8) <- (4x24x320x320xf32, 24xf32, 24xf32, 24xf32, 24xf32) + ( + batch_norm__12, + batch_norm__13, + batch_norm__14, + batch_norm__15, + batch_norm__16, + batch_norm__17, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_1, + parameter_178, + parameter_177, + parameter_176, + parameter_175, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_175, parameter_176, parameter_177, parameter_178 + + # pd_op.add: (4x24x320x320xf32) <- (4x24x320x320xf32, 4x24x320x320xf32) + add_0 = paddle._C_ops.add(relu_1, batch_norm__12) + + # pd_op.relu: (4x24x320x320xf32) <- (4x24x320x320xf32) + relu_3 = paddle._C_ops.relu(add_0) + del add_0 + + # pd_op.depthwise_conv2d: (4x24x320x320xf32) <- (4x24x320x320xf32, 24x1x5x5xf32) + depthwise_conv2d_1 = paddle._C_ops.depthwise_conv2d( + relu_3, parameter_174, [1, 1], [2, 2], "EXPLICIT", 24, [1, 1], "NCHW" + ) + del parameter_174 + + # pd_op.batch_norm_: (4x24x320x320xf32, 24xf32, 24xf32, 24xf32, 24xf32, -1xui8) <- (4x24x320x320xf32, 24xf32, 24xf32, 24xf32, 24xf32) + ( + batch_norm__18, + batch_norm__19, + batch_norm__20, + batch_norm__21, + batch_norm__22, + batch_norm__23, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + depthwise_conv2d_1, + parameter_173, + parameter_172, + parameter_171, + parameter_170, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_170, parameter_171, parameter_172, parameter_173 + + # pd_op.relu: (4x24x320x320xf32) <- (4x24x320x320xf32) + relu_4 = paddle._C_ops.relu(batch_norm__18) + del batch_norm__18 + + # pd_op.conv2d: (4x24x320x320xf32) <- (4x24x320x320xf32, 24x24x1x1xf32) + conv2d_2 = paddle._C_ops.conv2d( + relu_4, parameter_169, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_169 + + # pd_op.batch_norm_: (4x24x320x320xf32, 24xf32, 24xf32, 24xf32, 24xf32, -1xui8) <- (4x24x320x320xf32, 24xf32, 24xf32, 24xf32, 24xf32) + ( + batch_norm__24, + batch_norm__25, + batch_norm__26, + batch_norm__27, + batch_norm__28, + batch_norm__29, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_2, + parameter_168, + parameter_167, + parameter_166, + parameter_165, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_165, parameter_166, parameter_167, parameter_168 + + # pd_op.add: (4x24x320x320xf32) <- (4x24x320x320xf32, 4x24x320x320xf32) + add_1 = paddle._C_ops.add(relu_3, batch_norm__24) + + # pd_op.relu: (4x24x320x320xf32) <- (4x24x320x320xf32) + relu_5 = paddle._C_ops.relu(add_1) + del add_1 + + # pd_op.depthwise_conv2d: (4x24x160x160xf32) <- (4x24x320x320xf32, 24x1x5x5xf32) + depthwise_conv2d_2 = paddle._C_ops.depthwise_conv2d( + relu_5, parameter_164, [2, 2], [2, 2], "EXPLICIT", 24, [1, 1], "NCHW" + ) + del parameter_164 + + # pd_op.batch_norm_: (4x24x160x160xf32, 24xf32, 24xf32, 24xf32, 24xf32, -1xui8) <- (4x24x160x160xf32, 24xf32, 24xf32, 24xf32, 24xf32) + ( + batch_norm__30, + batch_norm__31, + batch_norm__32, + batch_norm__33, + batch_norm__34, + batch_norm__35, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + depthwise_conv2d_2, + parameter_163, + parameter_162, + parameter_161, + parameter_160, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_160, parameter_161, parameter_162, parameter_163 + + # pd_op.relu: (4x24x160x160xf32) <- (4x24x160x160xf32) + relu_6 = paddle._C_ops.relu(batch_norm__30) + del batch_norm__30 + + # pd_op.conv2d: (4x48x160x160xf32) <- (4x24x160x160xf32, 48x24x1x1xf32) + conv2d_3 = paddle._C_ops.conv2d( + relu_6, parameter_159, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_159 + + # pd_op.batch_norm_: (4x48x160x160xf32, 48xf32, 48xf32, 48xf32, 48xf32, -1xui8) <- (4x48x160x160xf32, 48xf32, 48xf32, 48xf32, 48xf32) + ( + batch_norm__36, + batch_norm__37, + batch_norm__38, + batch_norm__39, + batch_norm__40, + batch_norm__41, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_3, + parameter_158, + parameter_157, + parameter_156, + parameter_155, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_155, parameter_156, parameter_157, parameter_158 + + # pd_op.full_int_array: (2xi64) <- () + full_int_array_0 = [2, 2] + + # pd_op.assign: (2xi64) <- (2xi64) + assign_0 = full_int_array_0 + + # pd_op.assign: (2xi64) <- (2xi64) + assign_1 = full_int_array_0 + + # pd_op.pool2d: (4x24x160x160xf32) <- (4x24x320x320xf32, 2xi64) + pool2d_0 = paddle._C_ops.pool2d( + relu_5, + full_int_array_0, + [2, 2], + [0, 0], + True, + True, + "NCHW", + "max", + False, + False, + "EXPLICIT", + ) + + # pd_op.conv2d: (4x48x160x160xf32) <- (4x24x160x160xf32, 48x24x1x1xf32) + conv2d_4 = paddle._C_ops.conv2d( + pool2d_0, parameter_154, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_154 + + # pd_op.batch_norm_: (4x48x160x160xf32, 48xf32, 48xf32, 48xf32, 48xf32, -1xui8) <- (4x48x160x160xf32, 48xf32, 48xf32, 48xf32, 48xf32) + ( + batch_norm__42, + batch_norm__43, + batch_norm__44, + batch_norm__45, + batch_norm__46, + batch_norm__47, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_4, + parameter_153, + parameter_152, + parameter_151, + parameter_150, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_150, parameter_151, parameter_152, parameter_153 + + # pd_op.relu: (4x48x160x160xf32) <- (4x48x160x160xf32) + relu_7 = paddle._C_ops.relu(batch_norm__42) + del batch_norm__42 + + # pd_op.add: (4x48x160x160xf32) <- (4x48x160x160xf32, 4x48x160x160xf32) + add_2 = paddle._C_ops.add(relu_7, batch_norm__36) + + # pd_op.relu: (4x48x160x160xf32) <- (4x48x160x160xf32) + relu_8 = paddle._C_ops.relu(add_2) + del add_2 + + # pd_op.depthwise_conv2d: (4x48x160x160xf32) <- (4x48x160x160xf32, 48x1x5x5xf32) + depthwise_conv2d_3 = paddle._C_ops.depthwise_conv2d( + relu_8, parameter_149, [1, 1], [2, 2], "EXPLICIT", 48, [1, 1], "NCHW" + ) + del parameter_149 + + # pd_op.batch_norm_: (4x48x160x160xf32, 48xf32, 48xf32, 48xf32, 48xf32, -1xui8) <- (4x48x160x160xf32, 48xf32, 48xf32, 48xf32, 48xf32) + ( + batch_norm__48, + batch_norm__49, + batch_norm__50, + batch_norm__51, + batch_norm__52, + batch_norm__53, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + depthwise_conv2d_3, + parameter_148, + parameter_147, + parameter_146, + parameter_145, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_145, parameter_146, parameter_147, parameter_148 + + # pd_op.relu: (4x48x160x160xf32) <- (4x48x160x160xf32) + relu_9 = paddle._C_ops.relu(batch_norm__48) + del batch_norm__48 + + # pd_op.conv2d: (4x48x160x160xf32) <- (4x48x160x160xf32, 48x48x1x1xf32) + conv2d_5 = paddle._C_ops.conv2d( + relu_9, parameter_144, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_144 + + # pd_op.batch_norm_: (4x48x160x160xf32, 48xf32, 48xf32, 48xf32, 48xf32, -1xui8) <- (4x48x160x160xf32, 48xf32, 48xf32, 48xf32, 48xf32) + ( + batch_norm__54, + batch_norm__55, + batch_norm__56, + batch_norm__57, + batch_norm__58, + batch_norm__59, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_5, + parameter_143, + parameter_142, + parameter_141, + parameter_140, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_140, parameter_141, parameter_142, parameter_143 + + # pd_op.add: (4x48x160x160xf32) <- (4x48x160x160xf32, 4x48x160x160xf32) + add_3 = paddle._C_ops.add(relu_8, batch_norm__54) + + # pd_op.relu: (4x48x160x160xf32) <- (4x48x160x160xf32) + relu_10 = paddle._C_ops.relu(add_3) + del add_3 + + # pd_op.depthwise_conv2d: (4x48x160x160xf32) <- (4x48x160x160xf32, 48x1x5x5xf32) + depthwise_conv2d_4 = paddle._C_ops.depthwise_conv2d( + relu_10, parameter_139, [1, 1], [2, 2], "EXPLICIT", 48, [1, 1], "NCHW" + ) + del parameter_139 + + # pd_op.batch_norm_: (4x48x160x160xf32, 48xf32, 48xf32, 48xf32, 48xf32, -1xui8) <- (4x48x160x160xf32, 48xf32, 48xf32, 48xf32, 48xf32) + ( + batch_norm__60, + batch_norm__61, + batch_norm__62, + batch_norm__63, + batch_norm__64, + batch_norm__65, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + depthwise_conv2d_4, + parameter_138, + parameter_137, + parameter_136, + parameter_135, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_135, parameter_136, parameter_137, parameter_138 + + # pd_op.relu: (4x48x160x160xf32) <- (4x48x160x160xf32) + relu_11 = paddle._C_ops.relu(batch_norm__60) + del batch_norm__60 + + # pd_op.conv2d: (4x48x160x160xf32) <- (4x48x160x160xf32, 48x48x1x1xf32) + conv2d_6 = paddle._C_ops.conv2d( + relu_11, parameter_134, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_134 + + # pd_op.batch_norm_: (4x48x160x160xf32, 48xf32, 48xf32, 48xf32, 48xf32, -1xui8) <- (4x48x160x160xf32, 48xf32, 48xf32, 48xf32, 48xf32) + ( + batch_norm__66, + batch_norm__67, + batch_norm__68, + batch_norm__69, + batch_norm__70, + batch_norm__71, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_6, + parameter_133, + parameter_132, + parameter_131, + parameter_130, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_130, parameter_131, parameter_132, parameter_133 + + # pd_op.add: (4x48x160x160xf32) <- (4x48x160x160xf32, 4x48x160x160xf32) + add_4 = paddle._C_ops.add(relu_10, batch_norm__66) + + # pd_op.relu: (4x48x160x160xf32) <- (4x48x160x160xf32) + relu_12 = paddle._C_ops.relu(add_4) + del add_4 + + # pd_op.depthwise_conv2d: (4x48x80x80xf32) <- (4x48x160x160xf32, 48x1x5x5xf32) + depthwise_conv2d_5 = paddle._C_ops.depthwise_conv2d( + relu_12, parameter_129, [2, 2], [2, 2], "EXPLICIT", 48, [1, 1], "NCHW" + ) + del parameter_129 + + # pd_op.batch_norm_: (4x48x80x80xf32, 48xf32, 48xf32, 48xf32, 48xf32, -1xui8) <- (4x48x80x80xf32, 48xf32, 48xf32, 48xf32, 48xf32) + ( + batch_norm__72, + batch_norm__73, + batch_norm__74, + batch_norm__75, + batch_norm__76, + batch_norm__77, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + depthwise_conv2d_5, + parameter_128, + parameter_127, + parameter_126, + parameter_125, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_125, parameter_126, parameter_127, parameter_128 + + # pd_op.relu: (4x48x80x80xf32) <- (4x48x80x80xf32) + relu_13 = paddle._C_ops.relu(batch_norm__72) + del batch_norm__72 + + # pd_op.conv2d: (4x24x80x80xf32) <- (4x48x80x80xf32, 24x48x1x1xf32) + conv2d_7 = paddle._C_ops.conv2d( + relu_13, parameter_124, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_124 + + # pd_op.batch_norm_: (4x24x80x80xf32, 24xf32, 24xf32, 24xf32, 24xf32, -1xui8) <- (4x24x80x80xf32, 24xf32, 24xf32, 24xf32, 24xf32) + ( + batch_norm__78, + batch_norm__79, + batch_norm__80, + batch_norm__81, + batch_norm__82, + batch_norm__83, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_7, + parameter_123, + parameter_122, + parameter_121, + parameter_120, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_120, parameter_121, parameter_122, parameter_123 + + # pd_op.relu: (4x24x80x80xf32) <- (4x24x80x80xf32) + relu_14 = paddle._C_ops.relu(batch_norm__78) + del batch_norm__78 + + # pd_op.depthwise_conv2d: (4x24x80x80xf32) <- (4x24x80x80xf32, 24x1x5x5xf32) + depthwise_conv2d_6 = paddle._C_ops.depthwise_conv2d( + relu_14, parameter_119, [1, 1], [2, 2], "EXPLICIT", 24, [1, 1], "NCHW" + ) + del parameter_119 + + # pd_op.batch_norm_: (4x24x80x80xf32, 24xf32, 24xf32, 24xf32, 24xf32, -1xui8) <- (4x24x80x80xf32, 24xf32, 24xf32, 24xf32, 24xf32) + ( + batch_norm__84, + batch_norm__85, + batch_norm__86, + batch_norm__87, + batch_norm__88, + batch_norm__89, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + depthwise_conv2d_6, + parameter_118, + parameter_117, + parameter_116, + parameter_115, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_115, parameter_116, parameter_117, parameter_118 + + # pd_op.relu: (4x24x80x80xf32) <- (4x24x80x80xf32) + relu_15 = paddle._C_ops.relu(batch_norm__84) + del batch_norm__84 + + # pd_op.conv2d: (4x96x80x80xf32) <- (4x24x80x80xf32, 96x24x1x1xf32) + conv2d_8 = paddle._C_ops.conv2d( + relu_15, parameter_114, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_114 + + # pd_op.batch_norm_: (4x96x80x80xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (4x96x80x80xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__90, + batch_norm__91, + batch_norm__92, + batch_norm__93, + batch_norm__94, + batch_norm__95, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_8, + parameter_113, + parameter_112, + parameter_111, + parameter_110, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_110, parameter_111, parameter_112, parameter_113 + + # pd_op.relu: (4x96x80x80xf32) <- (4x96x80x80xf32) + relu_16 = paddle._C_ops.relu(batch_norm__90) + del batch_norm__90 + + # pd_op.pool2d: (4x48x80x80xf32) <- (4x48x160x160xf32, 2xi64) + pool2d_1 = paddle._C_ops.pool2d( + relu_12, + full_int_array_0, + [2, 2], + [0, 0], + True, + True, + "NCHW", + "max", + False, + False, + "EXPLICIT", + ) + + # pd_op.conv2d: (4x96x80x80xf32) <- (4x48x80x80xf32, 96x48x1x1xf32) + conv2d_9 = paddle._C_ops.conv2d( + pool2d_1, parameter_109, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_109 + + # pd_op.batch_norm_: (4x96x80x80xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (4x96x80x80xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__96, + batch_norm__97, + batch_norm__98, + batch_norm__99, + batch_norm__100, + batch_norm__101, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_9, + parameter_108, + parameter_107, + parameter_106, + parameter_105, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_105, parameter_106, parameter_107, parameter_108 + + # pd_op.relu: (4x96x80x80xf32) <- (4x96x80x80xf32) + relu_17 = paddle._C_ops.relu(batch_norm__96) + del batch_norm__96 + + # pd_op.add: (4x96x80x80xf32) <- (4x96x80x80xf32, 4x96x80x80xf32) + add_5 = paddle._C_ops.add(relu_17, relu_16) + + # pd_op.relu: (4x96x80x80xf32) <- (4x96x80x80xf32) + relu_18 = paddle._C_ops.relu(add_5) + del add_5 + + # pd_op.depthwise_conv2d: (4x96x80x80xf32) <- (4x96x80x80xf32, 96x1x5x5xf32) + depthwise_conv2d_7 = paddle._C_ops.depthwise_conv2d( + relu_18, parameter_104, [1, 1], [2, 2], "EXPLICIT", 96, [1, 1], "NCHW" + ) + del parameter_104 + + # pd_op.batch_norm_: (4x96x80x80xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (4x96x80x80xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__102, + batch_norm__103, + batch_norm__104, + batch_norm__105, + batch_norm__106, + batch_norm__107, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + depthwise_conv2d_7, + parameter_103, + parameter_102, + parameter_101, + parameter_100, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_100, parameter_101, parameter_102, parameter_103 + + # pd_op.relu: (4x96x80x80xf32) <- (4x96x80x80xf32) + relu_19 = paddle._C_ops.relu(batch_norm__102) + del batch_norm__102 + + # pd_op.conv2d: (4x24x80x80xf32) <- (4x96x80x80xf32, 24x96x1x1xf32) + conv2d_10 = paddle._C_ops.conv2d( + relu_19, parameter_99, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_99 + + # pd_op.batch_norm_: (4x24x80x80xf32, 24xf32, 24xf32, 24xf32, 24xf32, -1xui8) <- (4x24x80x80xf32, 24xf32, 24xf32, 24xf32, 24xf32) + ( + batch_norm__108, + batch_norm__109, + batch_norm__110, + batch_norm__111, + batch_norm__112, + batch_norm__113, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_10, + parameter_98, + parameter_97, + parameter_96, + parameter_95, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_95, parameter_96, parameter_97, parameter_98 + + # pd_op.relu: (4x24x80x80xf32) <- (4x24x80x80xf32) + relu_20 = paddle._C_ops.relu(batch_norm__108) + del batch_norm__108 + + # pd_op.depthwise_conv2d: (4x24x80x80xf32) <- (4x24x80x80xf32, 24x1x5x5xf32) + depthwise_conv2d_8 = paddle._C_ops.depthwise_conv2d( + relu_20, parameter_94, [1, 1], [2, 2], "EXPLICIT", 24, [1, 1], "NCHW" + ) + del parameter_94 + + # pd_op.batch_norm_: (4x24x80x80xf32, 24xf32, 24xf32, 24xf32, 24xf32, -1xui8) <- (4x24x80x80xf32, 24xf32, 24xf32, 24xf32, 24xf32) + ( + batch_norm__114, + batch_norm__115, + batch_norm__116, + batch_norm__117, + batch_norm__118, + batch_norm__119, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + depthwise_conv2d_8, + parameter_93, + parameter_92, + parameter_91, + parameter_90, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_90, parameter_91, parameter_92, parameter_93 + + # pd_op.relu: (4x24x80x80xf32) <- (4x24x80x80xf32) + relu_21 = paddle._C_ops.relu(batch_norm__114) + del batch_norm__114 + + # pd_op.conv2d: (4x96x80x80xf32) <- (4x24x80x80xf32, 96x24x1x1xf32) + conv2d_11 = paddle._C_ops.conv2d( + relu_21, parameter_89, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_89 + + # pd_op.batch_norm_: (4x96x80x80xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (4x96x80x80xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__120, + batch_norm__121, + batch_norm__122, + batch_norm__123, + batch_norm__124, + batch_norm__125, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_11, + parameter_88, + parameter_87, + parameter_86, + parameter_85, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_85, parameter_86, parameter_87, parameter_88 + + # pd_op.relu: (4x96x80x80xf32) <- (4x96x80x80xf32) + relu_22 = paddle._C_ops.relu(batch_norm__120) + del batch_norm__120 + + # pd_op.add: (4x96x80x80xf32) <- (4x96x80x80xf32, 4x96x80x80xf32) + add_6 = paddle._C_ops.add(relu_18, relu_22) + + # pd_op.relu: (4x96x80x80xf32) <- (4x96x80x80xf32) + relu_23 = paddle._C_ops.relu(add_6) + del add_6 + + # pd_op.depthwise_conv2d: (4x96x80x80xf32) <- (4x96x80x80xf32, 96x1x5x5xf32) + depthwise_conv2d_9 = paddle._C_ops.depthwise_conv2d( + relu_23, parameter_84, [1, 1], [2, 2], "EXPLICIT", 96, [1, 1], "NCHW" + ) + del parameter_84 + + # pd_op.batch_norm_: (4x96x80x80xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (4x96x80x80xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__126, + batch_norm__127, + batch_norm__128, + batch_norm__129, + batch_norm__130, + batch_norm__131, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + depthwise_conv2d_9, + parameter_83, + parameter_82, + parameter_81, + parameter_80, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_80, parameter_81, parameter_82, parameter_83 + + # pd_op.relu: (4x96x80x80xf32) <- (4x96x80x80xf32) + relu_24 = paddle._C_ops.relu(batch_norm__126) + del batch_norm__126 + + # pd_op.conv2d: (4x24x80x80xf32) <- (4x96x80x80xf32, 24x96x1x1xf32) + conv2d_12 = paddle._C_ops.conv2d( + relu_24, parameter_79, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_79 + + # pd_op.batch_norm_: (4x24x80x80xf32, 24xf32, 24xf32, 24xf32, 24xf32, -1xui8) <- (4x24x80x80xf32, 24xf32, 24xf32, 24xf32, 24xf32) + ( + batch_norm__132, + batch_norm__133, + batch_norm__134, + batch_norm__135, + batch_norm__136, + batch_norm__137, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_12, + parameter_78, + parameter_77, + parameter_76, + parameter_75, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_75, parameter_76, parameter_77, parameter_78 + + # pd_op.relu: (4x24x80x80xf32) <- (4x24x80x80xf32) + relu_25 = paddle._C_ops.relu(batch_norm__132) + del batch_norm__132 + + # pd_op.depthwise_conv2d: (4x24x80x80xf32) <- (4x24x80x80xf32, 24x1x5x5xf32) + depthwise_conv2d_10 = paddle._C_ops.depthwise_conv2d( + relu_25, parameter_74, [1, 1], [2, 2], "EXPLICIT", 24, [1, 1], "NCHW" + ) + del parameter_74 + + # pd_op.batch_norm_: (4x24x80x80xf32, 24xf32, 24xf32, 24xf32, 24xf32, -1xui8) <- (4x24x80x80xf32, 24xf32, 24xf32, 24xf32, 24xf32) + ( + batch_norm__138, + batch_norm__139, + batch_norm__140, + batch_norm__141, + batch_norm__142, + batch_norm__143, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + depthwise_conv2d_10, + parameter_73, + parameter_72, + parameter_71, + parameter_70, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_70, parameter_71, parameter_72, parameter_73 + + # pd_op.relu: (4x24x80x80xf32) <- (4x24x80x80xf32) + relu_26 = paddle._C_ops.relu(batch_norm__138) + del batch_norm__138 + + # pd_op.conv2d: (4x96x80x80xf32) <- (4x24x80x80xf32, 96x24x1x1xf32) + conv2d_13 = paddle._C_ops.conv2d( + relu_26, parameter_69, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_69 + + # pd_op.batch_norm_: (4x96x80x80xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (4x96x80x80xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__144, + batch_norm__145, + batch_norm__146, + batch_norm__147, + batch_norm__148, + batch_norm__149, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_13, + parameter_68, + parameter_67, + parameter_66, + parameter_65, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_65, parameter_66, parameter_67, parameter_68 + + # pd_op.relu: (4x96x80x80xf32) <- (4x96x80x80xf32) + relu_27 = paddle._C_ops.relu(batch_norm__144) + del batch_norm__144 + + # pd_op.add: (4x96x80x80xf32) <- (4x96x80x80xf32, 4x96x80x80xf32) + add_7 = paddle._C_ops.add(relu_23, relu_27) + + # pd_op.relu: (4x96x80x80xf32) <- (4x96x80x80xf32) + relu_28 = paddle._C_ops.relu(add_7) + del add_7 + + # pd_op.depthwise_conv2d: (4x96x40x40xf32) <- (4x96x80x80xf32, 96x1x5x5xf32) + depthwise_conv2d_11 = paddle._C_ops.depthwise_conv2d( + relu_28, parameter_64, [2, 2], [2, 2], "EXPLICIT", 96, [1, 1], "NCHW" + ) + del parameter_64 + + # pd_op.batch_norm_: (4x96x40x40xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (4x96x40x40xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__150, + batch_norm__151, + batch_norm__152, + batch_norm__153, + batch_norm__154, + batch_norm__155, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + depthwise_conv2d_11, + parameter_63, + parameter_62, + parameter_61, + parameter_60, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_60, parameter_61, parameter_62, parameter_63 + + # pd_op.relu: (4x96x40x40xf32) <- (4x96x40x40xf32) + relu_29 = paddle._C_ops.relu(batch_norm__150) + del batch_norm__150 + + # pd_op.conv2d: (4x24x40x40xf32) <- (4x96x40x40xf32, 24x96x1x1xf32) + conv2d_14 = paddle._C_ops.conv2d( + relu_29, parameter_59, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_59 + + # pd_op.batch_norm_: (4x24x40x40xf32, 24xf32, 24xf32, 24xf32, 24xf32, -1xui8) <- (4x24x40x40xf32, 24xf32, 24xf32, 24xf32, 24xf32) + ( + batch_norm__156, + batch_norm__157, + batch_norm__158, + batch_norm__159, + batch_norm__160, + batch_norm__161, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_14, + parameter_58, + parameter_57, + parameter_56, + parameter_55, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_55, parameter_56, parameter_57, parameter_58 + + # pd_op.relu: (4x24x40x40xf32) <- (4x24x40x40xf32) + relu_30 = paddle._C_ops.relu(batch_norm__156) + del batch_norm__156 + + # pd_op.depthwise_conv2d: (4x24x40x40xf32) <- (4x24x40x40xf32, 24x1x5x5xf32) + depthwise_conv2d_12 = paddle._C_ops.depthwise_conv2d( + relu_30, parameter_54, [1, 1], [2, 2], "EXPLICIT", 24, [1, 1], "NCHW" + ) + del parameter_54 + + # pd_op.batch_norm_: (4x24x40x40xf32, 24xf32, 24xf32, 24xf32, 24xf32, -1xui8) <- (4x24x40x40xf32, 24xf32, 24xf32, 24xf32, 24xf32) + ( + batch_norm__162, + batch_norm__163, + batch_norm__164, + batch_norm__165, + batch_norm__166, + batch_norm__167, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + depthwise_conv2d_12, + parameter_53, + parameter_52, + parameter_51, + parameter_50, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_50, parameter_51, parameter_52, parameter_53 + + # pd_op.relu: (4x24x40x40xf32) <- (4x24x40x40xf32) + relu_31 = paddle._C_ops.relu(batch_norm__162) + del batch_norm__162 + + # pd_op.conv2d: (4x96x40x40xf32) <- (4x24x40x40xf32, 96x24x1x1xf32) + conv2d_15 = paddle._C_ops.conv2d( + relu_31, parameter_49, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_49 + + # pd_op.batch_norm_: (4x96x40x40xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (4x96x40x40xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__168, + batch_norm__169, + batch_norm__170, + batch_norm__171, + batch_norm__172, + batch_norm__173, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_15, + parameter_48, + parameter_47, + parameter_46, + parameter_45, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_45, parameter_46, parameter_47, parameter_48 + + # pd_op.relu: (4x96x40x40xf32) <- (4x96x40x40xf32) + relu_32 = paddle._C_ops.relu(batch_norm__168) + del batch_norm__168 + + # pd_op.pool2d: (4x96x40x40xf32) <- (4x96x80x80xf32, 2xi64) + pool2d_2 = paddle._C_ops.pool2d( + relu_28, + full_int_array_0, + [2, 2], + [0, 0], + True, + True, + "NCHW", + "max", + False, + False, + "EXPLICIT", + ) + + # pd_op.conv2d: (4x96x40x40xf32) <- (4x96x40x40xf32, 96x96x1x1xf32) + conv2d_16 = paddle._C_ops.conv2d( + pool2d_2, parameter_44, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_44 + + # pd_op.batch_norm_: (4x96x40x40xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (4x96x40x40xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__174, + batch_norm__175, + batch_norm__176, + batch_norm__177, + batch_norm__178, + batch_norm__179, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_16, + parameter_43, + parameter_42, + parameter_41, + parameter_40, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_40, parameter_41, parameter_42, parameter_43 + + # pd_op.relu: (4x96x40x40xf32) <- (4x96x40x40xf32) + relu_33 = paddle._C_ops.relu(batch_norm__174) + del batch_norm__174 + + # pd_op.add: (4x96x40x40xf32) <- (4x96x40x40xf32, 4x96x40x40xf32) + add_8 = paddle._C_ops.add(relu_33, relu_32) + + # pd_op.relu: (4x96x40x40xf32) <- (4x96x40x40xf32) + relu_34 = paddle._C_ops.relu(add_8) + del add_8 + + # pd_op.depthwise_conv2d: (4x96x40x40xf32) <- (4x96x40x40xf32, 96x1x5x5xf32) + depthwise_conv2d_13 = paddle._C_ops.depthwise_conv2d( + relu_34, parameter_39, [1, 1], [2, 2], "EXPLICIT", 96, [1, 1], "NCHW" + ) + del parameter_39 + + # pd_op.batch_norm_: (4x96x40x40xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (4x96x40x40xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__180, + batch_norm__181, + batch_norm__182, + batch_norm__183, + batch_norm__184, + batch_norm__185, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + depthwise_conv2d_13, + parameter_38, + parameter_37, + parameter_36, + parameter_35, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_35, parameter_36, parameter_37, parameter_38 + + # pd_op.relu: (4x96x40x40xf32) <- (4x96x40x40xf32) + relu_35 = paddle._C_ops.relu(batch_norm__180) + del batch_norm__180 + + # pd_op.conv2d: (4x24x40x40xf32) <- (4x96x40x40xf32, 24x96x1x1xf32) + conv2d_17 = paddle._C_ops.conv2d( + relu_35, parameter_34, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_34 + + # pd_op.batch_norm_: (4x24x40x40xf32, 24xf32, 24xf32, 24xf32, 24xf32, -1xui8) <- (4x24x40x40xf32, 24xf32, 24xf32, 24xf32, 24xf32) + ( + batch_norm__186, + batch_norm__187, + batch_norm__188, + batch_norm__189, + batch_norm__190, + batch_norm__191, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_17, + parameter_33, + parameter_32, + parameter_31, + parameter_30, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_30, parameter_31, parameter_32, parameter_33 + + # pd_op.relu: (4x24x40x40xf32) <- (4x24x40x40xf32) + relu_36 = paddle._C_ops.relu(batch_norm__186) + del batch_norm__186 + + # pd_op.depthwise_conv2d: (4x24x40x40xf32) <- (4x24x40x40xf32, 24x1x5x5xf32) + depthwise_conv2d_14 = paddle._C_ops.depthwise_conv2d( + relu_36, parameter_29, [1, 1], [2, 2], "EXPLICIT", 24, [1, 1], "NCHW" + ) + del parameter_29 + + # pd_op.batch_norm_: (4x24x40x40xf32, 24xf32, 24xf32, 24xf32, 24xf32, -1xui8) <- (4x24x40x40xf32, 24xf32, 24xf32, 24xf32, 24xf32) + ( + batch_norm__192, + batch_norm__193, + batch_norm__194, + batch_norm__195, + batch_norm__196, + batch_norm__197, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + depthwise_conv2d_14, + parameter_28, + parameter_27, + parameter_26, + parameter_25, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_25, parameter_26, parameter_27, parameter_28 + + # pd_op.relu: (4x24x40x40xf32) <- (4x24x40x40xf32) + relu_37 = paddle._C_ops.relu(batch_norm__192) + del batch_norm__192 + + # pd_op.conv2d: (4x96x40x40xf32) <- (4x24x40x40xf32, 96x24x1x1xf32) + conv2d_18 = paddle._C_ops.conv2d( + relu_37, parameter_24, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_24 + + # pd_op.batch_norm_: (4x96x40x40xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (4x96x40x40xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__198, + batch_norm__199, + batch_norm__200, + batch_norm__201, + batch_norm__202, + batch_norm__203, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_18, + parameter_23, + parameter_22, + parameter_21, + parameter_20, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_20, parameter_21, parameter_22, parameter_23 + + # pd_op.relu: (4x96x40x40xf32) <- (4x96x40x40xf32) + relu_38 = paddle._C_ops.relu(batch_norm__198) + del batch_norm__198 + + # pd_op.add: (4x96x40x40xf32) <- (4x96x40x40xf32, 4x96x40x40xf32) + add_9 = paddle._C_ops.add(relu_34, relu_38) + + # pd_op.relu: (4x96x40x40xf32) <- (4x96x40x40xf32) + relu_39 = paddle._C_ops.relu(add_9) + del add_9 + + # pd_op.depthwise_conv2d: (4x96x40x40xf32) <- (4x96x40x40xf32, 96x1x5x5xf32) + depthwise_conv2d_15 = paddle._C_ops.depthwise_conv2d( + relu_39, parameter_19, [1, 1], [2, 2], "EXPLICIT", 96, [1, 1], "NCHW" + ) + del parameter_19 + + # pd_op.batch_norm_: (4x96x40x40xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (4x96x40x40xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__204, + batch_norm__205, + batch_norm__206, + batch_norm__207, + batch_norm__208, + batch_norm__209, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + depthwise_conv2d_15, + parameter_18, + parameter_17, + parameter_16, + parameter_15, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_15, parameter_16, parameter_17, parameter_18 + + # pd_op.relu: (4x96x40x40xf32) <- (4x96x40x40xf32) + relu_40 = paddle._C_ops.relu(batch_norm__204) + del batch_norm__204 + + # pd_op.conv2d: (4x24x40x40xf32) <- (4x96x40x40xf32, 24x96x1x1xf32) + conv2d_19 = paddle._C_ops.conv2d( + relu_40, parameter_14, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_14 + + # pd_op.batch_norm_: (4x24x40x40xf32, 24xf32, 24xf32, 24xf32, 24xf32, -1xui8) <- (4x24x40x40xf32, 24xf32, 24xf32, 24xf32, 24xf32) + ( + batch_norm__210, + batch_norm__211, + batch_norm__212, + batch_norm__213, + batch_norm__214, + batch_norm__215, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_19, + parameter_13, + parameter_12, + parameter_11, + parameter_10, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_10, parameter_11, parameter_12, parameter_13 + + # pd_op.relu: (4x24x40x40xf32) <- (4x24x40x40xf32) + relu_41 = paddle._C_ops.relu(batch_norm__210) + del batch_norm__210 + + # pd_op.depthwise_conv2d: (4x24x40x40xf32) <- (4x24x40x40xf32, 24x1x5x5xf32) + depthwise_conv2d_16 = paddle._C_ops.depthwise_conv2d( + relu_41, parameter_9, [1, 1], [2, 2], "EXPLICIT", 24, [1, 1], "NCHW" + ) + del parameter_9 + + # pd_op.batch_norm_: (4x24x40x40xf32, 24xf32, 24xf32, 24xf32, 24xf32, -1xui8) <- (4x24x40x40xf32, 24xf32, 24xf32, 24xf32, 24xf32) + ( + batch_norm__216, + batch_norm__217, + batch_norm__218, + batch_norm__219, + batch_norm__220, + batch_norm__221, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + depthwise_conv2d_16, + parameter_8, + parameter_7, + parameter_6, + parameter_5, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_5, parameter_6, parameter_7, parameter_8 + + # pd_op.relu: (4x24x40x40xf32) <- (4x24x40x40xf32) + relu_42 = paddle._C_ops.relu(batch_norm__216) + del batch_norm__216 + + # pd_op.conv2d: (4x96x40x40xf32) <- (4x24x40x40xf32, 96x24x1x1xf32) + conv2d_20 = paddle._C_ops.conv2d( + relu_42, parameter_4, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_4 + + # pd_op.batch_norm_: (4x96x40x40xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (4x96x40x40xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__222, + batch_norm__223, + batch_norm__224, + batch_norm__225, + batch_norm__226, + batch_norm__227, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_20, + parameter_3, + parameter_2, + parameter_1, + parameter_0, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_0, parameter_1, parameter_2, parameter_3 + + # pd_op.relu: (4x96x40x40xf32) <- (4x96x40x40xf32) + relu_43 = paddle._C_ops.relu(batch_norm__222) + del batch_norm__222 + + # pd_op.add: (4x96x40x40xf32) <- (4x96x40x40xf32, 4x96x40x40xf32) + add_10 = paddle._C_ops.add(relu_39, relu_43) + + # pd_op.relu: (4x96x40x40xf32) <- (4x96x40x40xf32) + relu_0 = paddle._C_ops.relu(add_10) + del ( + add_10, + assign_0, + assign_1, + batch_norm__1, + batch_norm__10, + batch_norm__100, + batch_norm__101, + batch_norm__103, + batch_norm__104, + batch_norm__105, + batch_norm__106, + batch_norm__107, + batch_norm__109, + batch_norm__11, + batch_norm__110, + batch_norm__111, + batch_norm__112, + batch_norm__113, + batch_norm__115, + batch_norm__116, + batch_norm__117, + batch_norm__118, + batch_norm__119, + batch_norm__12, + batch_norm__121, + batch_norm__122, + batch_norm__123, + batch_norm__124, + batch_norm__125, + batch_norm__127, + batch_norm__128, + batch_norm__129, + batch_norm__13, + batch_norm__130, + batch_norm__131, + batch_norm__133, + batch_norm__134, + batch_norm__135, + batch_norm__136, + batch_norm__137, + batch_norm__139, + batch_norm__14, + batch_norm__140, + batch_norm__141, + batch_norm__142, + batch_norm__143, + batch_norm__145, + batch_norm__146, + batch_norm__147, + batch_norm__148, + batch_norm__149, + batch_norm__15, + batch_norm__151, + batch_norm__152, + batch_norm__153, + batch_norm__154, + batch_norm__155, + batch_norm__157, + batch_norm__158, + batch_norm__159, + batch_norm__16, + batch_norm__160, + batch_norm__161, + batch_norm__163, + batch_norm__164, + batch_norm__165, + batch_norm__166, + batch_norm__167, + batch_norm__169, + batch_norm__17, + batch_norm__170, + batch_norm__171, + batch_norm__172, + batch_norm__173, + batch_norm__175, + batch_norm__176, + batch_norm__177, + batch_norm__178, + batch_norm__179, + batch_norm__181, + batch_norm__182, + batch_norm__183, + batch_norm__184, + batch_norm__185, + batch_norm__187, + batch_norm__188, + batch_norm__189, + batch_norm__19, + batch_norm__190, + batch_norm__191, + batch_norm__193, + batch_norm__194, + batch_norm__195, + batch_norm__196, + batch_norm__197, + batch_norm__199, + batch_norm__2, + batch_norm__20, + batch_norm__200, + batch_norm__201, + batch_norm__202, + batch_norm__203, + batch_norm__205, + batch_norm__206, + batch_norm__207, + batch_norm__208, + batch_norm__209, + batch_norm__21, + batch_norm__211, + batch_norm__212, + batch_norm__213, + batch_norm__214, + batch_norm__215, + batch_norm__217, + batch_norm__218, + batch_norm__219, + batch_norm__22, + batch_norm__220, + batch_norm__221, + batch_norm__223, + batch_norm__224, + batch_norm__225, + batch_norm__226, + batch_norm__227, + batch_norm__23, + batch_norm__24, + batch_norm__25, + batch_norm__26, + batch_norm__27, + batch_norm__28, + batch_norm__29, + batch_norm__3, + batch_norm__31, + batch_norm__32, + batch_norm__33, + batch_norm__34, + batch_norm__35, + batch_norm__36, + batch_norm__37, + batch_norm__38, + batch_norm__39, + batch_norm__4, + batch_norm__40, + batch_norm__41, + batch_norm__43, + batch_norm__44, + batch_norm__45, + batch_norm__46, + batch_norm__47, + batch_norm__49, + batch_norm__5, + batch_norm__50, + batch_norm__51, + batch_norm__52, + batch_norm__53, + batch_norm__54, + batch_norm__55, + batch_norm__56, + batch_norm__57, + batch_norm__58, + batch_norm__59, + batch_norm__61, + batch_norm__62, + batch_norm__63, + batch_norm__64, + batch_norm__65, + batch_norm__66, + batch_norm__67, + batch_norm__68, + batch_norm__69, + batch_norm__7, + batch_norm__70, + batch_norm__71, + batch_norm__73, + batch_norm__74, + batch_norm__75, + batch_norm__76, + batch_norm__77, + batch_norm__79, + batch_norm__8, + batch_norm__80, + batch_norm__81, + batch_norm__82, + batch_norm__83, + batch_norm__85, + batch_norm__86, + batch_norm__87, + batch_norm__88, + batch_norm__89, + batch_norm__9, + batch_norm__91, + batch_norm__92, + batch_norm__93, + batch_norm__94, + batch_norm__95, + batch_norm__97, + batch_norm__98, + batch_norm__99, + conv2d_0, + conv2d_1, + conv2d_10, + conv2d_11, + conv2d_12, + conv2d_13, + conv2d_14, + conv2d_15, + conv2d_16, + conv2d_17, + conv2d_18, + conv2d_19, + conv2d_2, + conv2d_20, + conv2d_3, + conv2d_4, + conv2d_5, + conv2d_6, + conv2d_7, + conv2d_8, + conv2d_9, + depthwise_conv2d_0, + depthwise_conv2d_1, + depthwise_conv2d_10, + depthwise_conv2d_11, + depthwise_conv2d_12, + depthwise_conv2d_13, + depthwise_conv2d_14, + depthwise_conv2d_15, + depthwise_conv2d_16, + depthwise_conv2d_2, + depthwise_conv2d_3, + depthwise_conv2d_4, + depthwise_conv2d_5, + depthwise_conv2d_6, + depthwise_conv2d_7, + depthwise_conv2d_8, + depthwise_conv2d_9, + full_int_array_0, + pool2d_0, + pool2d_1, + pool2d_2, + relu_1, + relu_10, + relu_11, + relu_12, + relu_13, + relu_14, + relu_15, + relu_16, + relu_17, + relu_18, + relu_19, + relu_2, + relu_20, + relu_21, + relu_22, + relu_23, + relu_24, + relu_25, + relu_26, + relu_27, + relu_28, + relu_29, + relu_3, + relu_30, + relu_31, + relu_32, + relu_33, + relu_34, + relu_35, + relu_36, + relu_37, + relu_38, + relu_39, + relu_4, + relu_40, + relu_41, + relu_42, + relu_43, + relu_5, + relu_6, + relu_7, + relu_8, + relu_9, + ) + + return relu_0 diff --git a/paddle_samples/PaddleX/BlazeFace/subgraph_4/weight_meta.py b/paddle_samples/PaddleX/BlazeFace/subgraph_4/weight_meta.py new file mode 100644 index 000000000..360206470 --- /dev/null +++ b/paddle_samples/PaddleX/BlazeFace/subgraph_4/weight_meta.py @@ -0,0 +1,1888 @@ +class Program_weight_tensor_parameter_0: + name = "parameter_0" + shape = [96] + dtype = "float32" + min_val = float("-0.529807") + max_val = float("0.535502") + mean = float("-0.0377045") + std = float("0.225552") + data = None + + +class Program_weight_tensor_parameter_1: + name = "parameter_1" + shape = [96] + dtype = "float32" + min_val = float("-0.551012") + max_val = float("0.798407") + mean = float("0.278371") + std = float("0.286229") + data = None + + +class Program_weight_tensor_parameter_2: + name = "parameter_2" + shape = [96] + dtype = "float32" + min_val = float("8.15129e-09") + max_val = float("0.0788801") + mean = float("0.00405359") + std = float("0.00920447") + data = None + + +class Program_weight_tensor_parameter_3: + name = "parameter_3" + shape = [96] + dtype = "float32" + min_val = float("-0.370452") + max_val = float("0.780718") + mean = float("0.00212746") + std = float("0.142489") + data = None + + +class Program_weight_tensor_parameter_4: + name = "parameter_4" + shape = [96, 24, 1, 1] + dtype = "float32" + min_val = float("-0.545153") + max_val = float("0.502341") + mean = float("0.00107313") + std = float("0.0796188") + data = None + + +class Program_weight_tensor_parameter_5: + name = "parameter_5" + shape = [24] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_6: + name = "parameter_6" + shape = [24] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_7: + name = "parameter_7" + shape = [24] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_8: + name = "parameter_8" + shape = [24] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_9: + name = "parameter_9" + shape = [24, 1, 5, 5] + dtype = "float32" + min_val = float("-0.64011") + max_val = float("0.622501") + mean = float("-0.0140157") + std = float("0.11738") + data = None + + +class Program_weight_tensor_parameter_10: + name = "parameter_10" + shape = [24] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_11: + name = "parameter_11" + shape = [24] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_12: + name = "parameter_12" + shape = [24] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_13: + name = "parameter_13" + shape = [24] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_14: + name = "parameter_14" + shape = [24, 96, 1, 1] + dtype = "float32" + min_val = float("-0.341631") + max_val = float("0.290683") + mean = float("-0.00286471") + std = float("0.0608644") + data = None + + +class Program_weight_tensor_parameter_15: + name = "parameter_15" + shape = [96] + dtype = "float32" + min_val = float("-0.202404") + max_val = float("0.392666") + mean = float("0.0562522") + std = float("0.109007") + data = None + + +class Program_weight_tensor_parameter_16: + name = "parameter_16" + shape = [96] + dtype = "float32" + min_val = float("-8.85634e-10") + max_val = float("0.377806") + mean = float("0.128982") + std = float("0.0655102") + data = None + + +class Program_weight_tensor_parameter_17: + name = "parameter_17" + shape = [96] + dtype = "float32" + min_val = float("5.60519e-45") + max_val = float("0.101432") + mean = float("0.00729591") + std = float("0.0132781") + data = None + + +class Program_weight_tensor_parameter_18: + name = "parameter_18" + shape = [96] + dtype = "float32" + min_val = float("-0.172632") + max_val = float("0.239256") + mean = float("0.00137129") + std = float("0.0566031") + data = None + + +class Program_weight_tensor_parameter_19: + name = "parameter_19" + shape = [96, 1, 5, 5] + dtype = "float32" + min_val = float("-0.475652") + max_val = float("0.411896") + mean = float("-0.000381762") + std = float("0.0860005") + data = None + + +class Program_weight_tensor_parameter_20: + name = "parameter_20" + shape = [96] + dtype = "float32" + min_val = float("-0.465842") + max_val = float("0.198826") + mean = float("-0.0358144") + std = float("0.0882215") + data = None + + +class Program_weight_tensor_parameter_21: + name = "parameter_21" + shape = [96] + dtype = "float32" + min_val = float("-3.40503e-13") + max_val = float("0.518627") + mean = float("0.115253") + std = float("0.1373") + data = None + + +class Program_weight_tensor_parameter_22: + name = "parameter_22" + shape = [96] + dtype = "float32" + min_val = float("8.9822e-09") + max_val = float("0.0313503") + mean = float("0.00633044") + std = float("0.00772982") + data = None + + +class Program_weight_tensor_parameter_23: + name = "parameter_23" + shape = [96] + dtype = "float32" + min_val = float("-0.233942") + max_val = float("0.184187") + mean = float("-0.00519005") + std = float("0.0690654") + data = None + + +class Program_weight_tensor_parameter_24: + name = "parameter_24" + shape = [96, 24, 1, 1] + dtype = "float32" + min_val = float("-0.512118") + max_val = float("0.465328") + mean = float("0.00016734") + std = float("0.0789779") + data = None + + +class Program_weight_tensor_parameter_25: + name = "parameter_25" + shape = [24] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_26: + name = "parameter_26" + shape = [24] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_27: + name = "parameter_27" + shape = [24] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_28: + name = "parameter_28" + shape = [24] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_29: + name = "parameter_29" + shape = [24, 1, 5, 5] + dtype = "float32" + min_val = float("-0.384301") + max_val = float("0.444187") + mean = float("0.0203379") + std = float("0.115945") + data = None + + +class Program_weight_tensor_parameter_30: + name = "parameter_30" + shape = [24] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_31: + name = "parameter_31" + shape = [24] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_32: + name = "parameter_32" + shape = [24] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_33: + name = "parameter_33" + shape = [24] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_34: + name = "parameter_34" + shape = [24, 96, 1, 1] + dtype = "float32" + min_val = float("-0.347811") + max_val = float("0.318564") + mean = float("0.00185653") + std = float("0.0572469") + data = None + + +class Program_weight_tensor_parameter_35: + name = "parameter_35" + shape = [96] + dtype = "float32" + min_val = float("-0.0932884") + max_val = float("0.330553") + mean = float("0.0652257") + std = float("0.096441") + data = None + + +class Program_weight_tensor_parameter_36: + name = "parameter_36" + shape = [96] + dtype = "float32" + min_val = float("-0.207055") + max_val = float("0.311883") + mean = float("0.114658") + std = float("0.109459") + data = None + + +class Program_weight_tensor_parameter_37: + name = "parameter_37" + shape = [96] + dtype = "float32" + min_val = float("5.60519e-45") + max_val = float("0.0483353") + mean = float("0.00408484") + std = float("0.00883911") + data = None + + +class Program_weight_tensor_parameter_38: + name = "parameter_38" + shape = [96] + dtype = "float32" + min_val = float("-0.152281") + max_val = float("0.175683") + mean = float("0.0148372") + std = float("0.0455184") + data = None + + +class Program_weight_tensor_parameter_39: + name = "parameter_39" + shape = [96, 1, 5, 5] + dtype = "float32" + min_val = float("-0.45586") + max_val = float("0.468981") + mean = float("0.0052202") + std = float("0.0905533") + data = None + + +class Program_weight_tensor_parameter_40: + name = "parameter_40" + shape = [96] + dtype = "float32" + min_val = float("-0.427837") + max_val = float("0.688668") + mean = float("-0.0012812") + std = float("0.125576") + data = None + + +class Program_weight_tensor_parameter_41: + name = "parameter_41" + shape = [96] + dtype = "float32" + min_val = float("-2.29691e-13") + max_val = float("0.711377") + mean = float("0.119751") + std = float("0.171285") + data = None + + +class Program_weight_tensor_parameter_42: + name = "parameter_42" + shape = [96] + dtype = "float32" + min_val = float("3.98621e-08") + max_val = float("0.290012") + mean = float("0.0296902") + std = float("0.0502514") + data = None + + +class Program_weight_tensor_parameter_43: + name = "parameter_43" + shape = [96] + dtype = "float32" + min_val = float("-0.637207") + max_val = float("0.292276") + mean = float("-0.0526347") + std = float("0.156049") + data = None + + +class Program_weight_tensor_parameter_44: + name = "parameter_44" + shape = [96, 96, 1, 1] + dtype = "float32" + min_val = float("-0.335112") + max_val = float("0.339325") + mean = float("-0.00155268") + std = float("0.0511387") + data = None + + +class Program_weight_tensor_parameter_45: + name = "parameter_45" + shape = [96] + dtype = "float32" + min_val = float("-0.25511") + max_val = float("0.178232") + mean = float("-0.00302172") + std = float("0.0578235") + data = None + + +class Program_weight_tensor_parameter_46: + name = "parameter_46" + shape = [96] + dtype = "float32" + min_val = float("-5.08427e-13") + max_val = float("0.292616") + mean = float("0.0572912") + std = float("0.0782013") + data = None + + +class Program_weight_tensor_parameter_47: + name = "parameter_47" + shape = [96] + dtype = "float32" + min_val = float("7.67263e-09") + max_val = float("0.0174608") + mean = float("0.00217477") + std = float("0.00368529") + data = None + + +class Program_weight_tensor_parameter_48: + name = "parameter_48" + shape = [96] + dtype = "float32" + min_val = float("-0.168333") + max_val = float("0.218261") + mean = float("0.0198797") + std = float("0.0466794") + data = None + + +class Program_weight_tensor_parameter_49: + name = "parameter_49" + shape = [96, 24, 1, 1] + dtype = "float32" + min_val = float("-0.396373") + max_val = float("0.459772") + mean = float("0.00452505") + std = float("0.0607818") + data = None + + +class Program_weight_tensor_parameter_50: + name = "parameter_50" + shape = [24] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_51: + name = "parameter_51" + shape = [24] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_52: + name = "parameter_52" + shape = [24] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_53: + name = "parameter_53" + shape = [24] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_54: + name = "parameter_54" + shape = [24, 1, 5, 5] + dtype = "float32" + min_val = float("-0.386131") + max_val = float("0.466922") + mean = float("0.00250372") + std = float("0.101205") + data = None + + +class Program_weight_tensor_parameter_55: + name = "parameter_55" + shape = [24] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_56: + name = "parameter_56" + shape = [24] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_57: + name = "parameter_57" + shape = [24] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_58: + name = "parameter_58" + shape = [24] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_59: + name = "parameter_59" + shape = [24, 96, 1, 1] + dtype = "float32" + min_val = float("-0.350087") + max_val = float("0.340497") + mean = float("0.000661329") + std = float("0.0529721") + data = None + + +class Program_weight_tensor_parameter_60: + name = "parameter_60" + shape = [96] + dtype = "float32" + min_val = float("-0.147295") + max_val = float("0.342112") + mean = float("0.0527102") + std = float("0.0901145") + data = None + + +class Program_weight_tensor_parameter_61: + name = "parameter_61" + shape = [96] + dtype = "float32" + min_val = float("-0.0676544") + max_val = float("0.260389") + mean = float("0.12047") + std = float("0.0659297") + data = None + + +class Program_weight_tensor_parameter_62: + name = "parameter_62" + shape = [96] + dtype = "float32" + min_val = float("2.24623e-08") + max_val = float("0.116312") + mean = float("0.0128798") + std = float("0.0178453") + data = None + + +class Program_weight_tensor_parameter_63: + name = "parameter_63" + shape = [96] + dtype = "float32" + min_val = float("-1.05319") + max_val = float("0.191935") + mean = float("-0.0240638") + std = float("0.159753") + data = None + + +class Program_weight_tensor_parameter_64: + name = "parameter_64" + shape = [96, 1, 5, 5] + dtype = "float32" + min_val = float("-0.470956") + max_val = float("0.345094") + mean = float("-0.000294711") + std = float("0.0715992") + data = None + + +class Program_weight_tensor_parameter_65: + name = "parameter_65" + shape = [96] + dtype = "float32" + min_val = float("-1.21486") + max_val = float("0.532211") + mean = float("-0.0582742") + std = float("0.236056") + data = None + + +class Program_weight_tensor_parameter_66: + name = "parameter_66" + shape = [96] + dtype = "float32" + min_val = float("-9.8349e-17") + max_val = float("0.655524") + mean = float("0.244989") + std = float("0.171081") + data = None + + +class Program_weight_tensor_parameter_67: + name = "parameter_67" + shape = [96] + dtype = "float32" + min_val = float("5.30205e-09") + max_val = float("0.0395659") + mean = float("0.0058397") + std = float("0.00693151") + data = None + + +class Program_weight_tensor_parameter_68: + name = "parameter_68" + shape = [96] + dtype = "float32" + min_val = float("-0.291176") + max_val = float("0.536533") + mean = float("0.00241241") + std = float("0.125459") + data = None + + +class Program_weight_tensor_parameter_69: + name = "parameter_69" + shape = [96, 24, 1, 1] + dtype = "float32" + min_val = float("-0.370448") + max_val = float("0.580956") + mean = float("0.00621474") + std = float("0.0931395") + data = None + + +class Program_weight_tensor_parameter_70: + name = "parameter_70" + shape = [24] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_71: + name = "parameter_71" + shape = [24] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_72: + name = "parameter_72" + shape = [24] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_73: + name = "parameter_73" + shape = [24] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_74: + name = "parameter_74" + shape = [24, 1, 5, 5] + dtype = "float32" + min_val = float("-0.37256") + max_val = float("0.473569") + mean = float("-0.00583875") + std = float("0.132282") + data = None + + +class Program_weight_tensor_parameter_75: + name = "parameter_75" + shape = [24] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_76: + name = "parameter_76" + shape = [24] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_77: + name = "parameter_77" + shape = [24] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_78: + name = "parameter_78" + shape = [24] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_79: + name = "parameter_79" + shape = [24, 96, 1, 1] + dtype = "float32" + min_val = float("-0.294907") + max_val = float("0.315206") + mean = float("-0.00290561") + std = float("0.076759") + data = None + + +class Program_weight_tensor_parameter_80: + name = "parameter_80" + shape = [96] + dtype = "float32" + min_val = float("-0.189029") + max_val = float("0.425148") + mean = float("0.0870868") + std = float("0.121496") + data = None + + +class Program_weight_tensor_parameter_81: + name = "parameter_81" + shape = [96] + dtype = "float32" + min_val = float("1.85839e-35") + max_val = float("0.371") + mean = float("0.184782") + std = float("0.0682357") + data = None + + +class Program_weight_tensor_parameter_82: + name = "parameter_82" + shape = [96] + dtype = "float32" + min_val = float("9.93521e-12") + max_val = float("0.126547") + mean = float("0.0124598") + std = float("0.0182872") + data = None + + +class Program_weight_tensor_parameter_83: + name = "parameter_83" + shape = [96] + dtype = "float32" + min_val = float("-0.428026") + max_val = float("0.267546") + mean = float("0.0242352") + std = float("0.102687") + data = None + + +class Program_weight_tensor_parameter_84: + name = "parameter_84" + shape = [96, 1, 5, 5] + dtype = "float32" + min_val = float("-0.440289") + max_val = float("0.630188") + mean = float("0.005698") + std = float("0.114993") + data = None + + +class Program_weight_tensor_parameter_85: + name = "parameter_85" + shape = [96] + dtype = "float32" + min_val = float("-0.263735") + max_val = float("0.254792") + mean = float("-0.00480034") + std = float("0.0833061") + data = None + + +class Program_weight_tensor_parameter_86: + name = "parameter_86" + shape = [96] + dtype = "float32" + min_val = float("-1.02404e-11") + max_val = float("0.389111") + mean = float("0.144142") + std = float("0.106879") + data = None + + +class Program_weight_tensor_parameter_87: + name = "parameter_87" + shape = [96] + dtype = "float32" + min_val = float("1.12343e-08") + max_val = float("0.0416685") + mean = float("0.0128878") + std = float("0.0104704") + data = None + + +class Program_weight_tensor_parameter_88: + name = "parameter_88" + shape = [96] + dtype = "float32" + min_val = float("-0.379638") + max_val = float("0.462345") + mean = float("0.0286401") + std = float("0.14993") + data = None + + +class Program_weight_tensor_parameter_89: + name = "parameter_89" + shape = [96, 24, 1, 1] + dtype = "float32" + min_val = float("-0.445249") + max_val = float("0.516047") + mean = float("0.00808885") + std = float("0.107548") + data = None + + +class Program_weight_tensor_parameter_90: + name = "parameter_90" + shape = [24] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_91: + name = "parameter_91" + shape = [24] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_92: + name = "parameter_92" + shape = [24] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_93: + name = "parameter_93" + shape = [24] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_94: + name = "parameter_94" + shape = [24, 1, 5, 5] + dtype = "float32" + min_val = float("-0.582137") + max_val = float("0.550435") + mean = float("0.00382985") + std = float("0.155391") + data = None + + +class Program_weight_tensor_parameter_95: + name = "parameter_95" + shape = [24] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_96: + name = "parameter_96" + shape = [24] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_97: + name = "parameter_97" + shape = [24] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_98: + name = "parameter_98" + shape = [24] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_99: + name = "parameter_99" + shape = [24, 96, 1, 1] + dtype = "float32" + min_val = float("-0.359294") + max_val = float("0.414197") + mean = float("-0.000937421") + std = float("0.0827917") + data = None + + +class Program_weight_tensor_parameter_100: + name = "parameter_100" + shape = [96] + dtype = "float32" + min_val = float("-0.123006") + max_val = float("0.427898") + mean = float("0.0848361") + std = float("0.122103") + data = None + + +class Program_weight_tensor_parameter_101: + name = "parameter_101" + shape = [96] + dtype = "float32" + min_val = float("1.10201e-33") + max_val = float("0.410979") + mean = float("0.181922") + std = float("0.0728994") + data = None + + +class Program_weight_tensor_parameter_102: + name = "parameter_102" + shape = [96] + dtype = "float32" + min_val = float("5.60519e-45") + max_val = float("0.0926534") + mean = float("0.00507757") + std = float("0.010533") + data = None + + +class Program_weight_tensor_parameter_103: + name = "parameter_103" + shape = [96] + dtype = "float32" + min_val = float("-0.285646") + max_val = float("0.204501") + mean = float("0.0167405") + std = float("0.0798369") + data = None + + +class Program_weight_tensor_parameter_104: + name = "parameter_104" + shape = [96, 1, 5, 5] + dtype = "float32" + min_val = float("-0.5353") + max_val = float("0.501915") + mean = float("0.00768155") + std = float("0.111863") + data = None + + +class Program_weight_tensor_parameter_105: + name = "parameter_105" + shape = [96] + dtype = "float32" + min_val = float("-0.514387") + max_val = float("0.676997") + mean = float("0.0125891") + std = float("0.139171") + data = None + + +class Program_weight_tensor_parameter_106: + name = "parameter_106" + shape = [96] + dtype = "float32" + min_val = float("-4.10009e-33") + max_val = float("0.361954") + mean = float("0.114493") + std = float("0.0796294") + data = None + + +class Program_weight_tensor_parameter_107: + name = "parameter_107" + shape = [96] + dtype = "float32" + min_val = float("5.3083e-08") + max_val = float("0.30467") + mean = float("0.103876") + std = float("0.0705489") + data = None + + +class Program_weight_tensor_parameter_108: + name = "parameter_108" + shape = [96] + dtype = "float32" + min_val = float("-0.86835") + max_val = float("0.8484") + mean = float("-0.161871") + std = float("0.289439") + data = None + + +class Program_weight_tensor_parameter_109: + name = "parameter_109" + shape = [96, 48, 1, 1] + dtype = "float32" + min_val = float("-0.419689") + max_val = float("0.43841") + mean = float("-0.00672462") + std = float("0.100841") + data = None + + +class Program_weight_tensor_parameter_110: + name = "parameter_110" + shape = [96] + dtype = "float32" + min_val = float("-0.299824") + max_val = float("0.215576") + mean = float("0.000306278") + std = float("0.068349") + data = None + + +class Program_weight_tensor_parameter_111: + name = "parameter_111" + shape = [96] + dtype = "float32" + min_val = float("-2.92638e-15") + max_val = float("0.277966") + mean = float("0.0817295") + std = float("0.0718212") + data = None + + +class Program_weight_tensor_parameter_112: + name = "parameter_112" + shape = [96] + dtype = "float32" + min_val = float("2.58654e-08") + max_val = float("0.059335") + mean = float("0.0112805") + std = float("0.0109694") + data = None + + +class Program_weight_tensor_parameter_113: + name = "parameter_113" + shape = [96] + dtype = "float32" + min_val = float("-0.295701") + max_val = float("0.24228") + mean = float("0.0285976") + std = float("0.0985937") + data = None + + +class Program_weight_tensor_parameter_114: + name = "parameter_114" + shape = [96, 24, 1, 1] + dtype = "float32" + min_val = float("-0.498495") + max_val = float("0.51089") + mean = float("0.00341417") + std = float("0.0975677") + data = None + + +class Program_weight_tensor_parameter_115: + name = "parameter_115" + shape = [24] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_116: + name = "parameter_116" + shape = [24] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_117: + name = "parameter_117" + shape = [24] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_118: + name = "parameter_118" + shape = [24] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_119: + name = "parameter_119" + shape = [24, 1, 5, 5] + dtype = "float32" + min_val = float("-0.608019") + max_val = float("0.404112") + mean = float("0.00618299") + std = float("0.12199") + data = None + + +class Program_weight_tensor_parameter_120: + name = "parameter_120" + shape = [24] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_121: + name = "parameter_121" + shape = [24] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_122: + name = "parameter_122" + shape = [24] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_123: + name = "parameter_123" + shape = [24] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_124: + name = "parameter_124" + shape = [24, 48, 1, 1] + dtype = "float32" + min_val = float("-0.473876") + max_val = float("0.418075") + mean = float("0.00557681") + std = float("0.0954504") + data = None + + +class Program_weight_tensor_parameter_125: + name = "parameter_125" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_126: + name = "parameter_126" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_127: + name = "parameter_127" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_128: + name = "parameter_128" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_129: + name = "parameter_129" + shape = [48, 1, 5, 5] + dtype = "float32" + min_val = float("-0.431916") + max_val = float("0.346585") + mean = float("0.00927386") + std = float("0.0875719") + data = None + + +class Program_weight_tensor_parameter_130: + name = "parameter_130" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_131: + name = "parameter_131" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_132: + name = "parameter_132" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_133: + name = "parameter_133" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_134: + name = "parameter_134" + shape = [48, 48, 1, 1] + dtype = "float32" + min_val = float("-0.471579") + max_val = float("0.42115") + mean = float("-0.000745672") + std = float("0.113089") + data = None + + +class Program_weight_tensor_parameter_135: + name = "parameter_135" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_136: + name = "parameter_136" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_137: + name = "parameter_137" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_138: + name = "parameter_138" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_139: + name = "parameter_139" + shape = [48, 1, 5, 5] + dtype = "float32" + min_val = float("-0.436746") + max_val = float("0.48435") + mean = float("0.00652353") + std = float("0.137478") + data = None + + +class Program_weight_tensor_parameter_140: + name = "parameter_140" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_141: + name = "parameter_141" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_142: + name = "parameter_142" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_143: + name = "parameter_143" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_144: + name = "parameter_144" + shape = [48, 48, 1, 1] + dtype = "float32" + min_val = float("-0.463259") + max_val = float("0.40208") + mean = float("-0.00089771") + std = float("0.114243") + data = None + + +class Program_weight_tensor_parameter_145: + name = "parameter_145" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_146: + name = "parameter_146" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_147: + name = "parameter_147" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_148: + name = "parameter_148" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_149: + name = "parameter_149" + shape = [48, 1, 5, 5] + dtype = "float32" + min_val = float("-0.565344") + max_val = float("0.600962") + mean = float("0.0059708") + std = float("0.155045") + data = None + + +class Program_weight_tensor_parameter_150: + name = "parameter_150" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_151: + name = "parameter_151" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_152: + name = "parameter_152" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_153: + name = "parameter_153" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_154: + name = "parameter_154" + shape = [48, 24, 1, 1] + dtype = "float32" + min_val = float("-0.579735") + max_val = float("0.523383") + mean = float("0.00882696") + std = float("0.149158") + data = None + + +class Program_weight_tensor_parameter_155: + name = "parameter_155" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_156: + name = "parameter_156" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_157: + name = "parameter_157" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_158: + name = "parameter_158" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_159: + name = "parameter_159" + shape = [48, 24, 1, 1] + dtype = "float32" + min_val = float("-0.509437") + max_val = float("0.673686") + mean = float("0.00680382") + std = float("0.142248") + data = None + + +class Program_weight_tensor_parameter_160: + name = "parameter_160" + shape = [24] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_161: + name = "parameter_161" + shape = [24] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_162: + name = "parameter_162" + shape = [24] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_163: + name = "parameter_163" + shape = [24] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_164: + name = "parameter_164" + shape = [24, 1, 5, 5] + dtype = "float32" + min_val = float("-0.568438") + max_val = float("0.478393") + mean = float("-0.000164992") + std = float("0.168889") + data = None + + +class Program_weight_tensor_parameter_165: + name = "parameter_165" + shape = [24] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_166: + name = "parameter_166" + shape = [24] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_167: + name = "parameter_167" + shape = [24] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_168: + name = "parameter_168" + shape = [24] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_169: + name = "parameter_169" + shape = [24, 24, 1, 1] + dtype = "float32" + min_val = float("-0.44595") + max_val = float("0.540002") + mean = float("0.00183311") + std = float("0.16766") + data = None + + +class Program_weight_tensor_parameter_170: + name = "parameter_170" + shape = [24] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_171: + name = "parameter_171" + shape = [24] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_172: + name = "parameter_172" + shape = [24] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_173: + name = "parameter_173" + shape = [24] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_174: + name = "parameter_174" + shape = [24, 1, 5, 5] + dtype = "float32" + min_val = float("-0.611047") + max_val = float("0.614482") + mean = float("-0.00539658") + std = float("0.178044") + data = None + + +class Program_weight_tensor_parameter_175: + name = "parameter_175" + shape = [24] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_176: + name = "parameter_176" + shape = [24] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_177: + name = "parameter_177" + shape = [24] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_178: + name = "parameter_178" + shape = [24] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_179: + name = "parameter_179" + shape = [24, 24, 1, 1] + dtype = "float32" + min_val = float("-0.690421") + max_val = float("0.493279") + mean = float("-0.0190106") + std = float("0.182687") + data = None + + +class Program_weight_tensor_parameter_180: + name = "parameter_180" + shape = [24] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_181: + name = "parameter_181" + shape = [24] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_182: + name = "parameter_182" + shape = [24] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_183: + name = "parameter_183" + shape = [24] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_184: + name = "parameter_184" + shape = [24, 1, 5, 5] + dtype = "float32" + min_val = float("-0.75812") + max_val = float("0.782331") + mean = float("-0.00739648") + std = float("0.213283") + data = None + + +class Program_weight_tensor_parameter_185: + name = "parameter_185" + shape = [24] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_186: + name = "parameter_186" + shape = [24] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_187: + name = "parameter_187" + shape = [24] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_188: + name = "parameter_188" + shape = [24] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_189: + name = "parameter_189" + shape = [24, 3, 3, 3] + dtype = "float32" + min_val = float("-0.653631") + max_val = float("0.685809") + mean = float("-0.000923303") + std = float("0.200627") + data = None diff --git a/paddle_samples/PaddleX/BlazeFace/subgraph_5/graph_hash.txt b/paddle_samples/PaddleX/BlazeFace/subgraph_5/graph_hash.txt new file mode 100644 index 000000000..82adea4a9 --- /dev/null +++ b/paddle_samples/PaddleX/BlazeFace/subgraph_5/graph_hash.txt @@ -0,0 +1 @@ +721225ab8bed148704d6e4977784e7240bb3daddff88da89c3a29c69ebd43c1f \ No newline at end of file diff --git a/paddle_samples/PaddleX/BlazeFace/subgraph_5/graph_net.json b/paddle_samples/PaddleX/BlazeFace/subgraph_5/graph_net.json new file mode 100644 index 000000000..d9c8238e8 --- /dev/null +++ b/paddle_samples/PaddleX/BlazeFace/subgraph_5/graph_net.json @@ -0,0 +1,6 @@ +{ + "framework": "paddle", + "model_name": "BlazeFace", + "num_devices_required": 1, + "num_nodes_required": 1 +} \ No newline at end of file diff --git a/paddle_samples/PaddleX/BlazeFace/subgraph_5/input_meta.py b/paddle_samples/PaddleX/BlazeFace/subgraph_5/input_meta.py new file mode 100644 index 000000000..68e2e302e --- /dev/null +++ b/paddle_samples/PaddleX/BlazeFace/subgraph_5/input_meta.py @@ -0,0 +1,49 @@ +class Program_weight_tensor_data_0: + name = "data_0" + shape = [4, 22400, 4] + dtype = "float32" + min_val = float("-5.16022") + max_val = float("9.99565") + mean = float("0.0949014") + std = float("1.08368") + data = None + + +class Program_weight_tensor_data_1: + name = "data_1" + shape = [4, 22400, 4] + dtype = "bool" + min_val = 0 + max_val = 2 + data = None + + +class Program_weight_tensor_data_2: + name = "data_2" + shape = [4, 22400, 4] + dtype = "float32" + min_val = float("-inf") + max_val = float("343.787") + mean = float("-inf") + std = float("nan") + data = None + + +class Program_weight_tensor_data_3: + name = "data_3" + shape = [4, 22400, 2] + dtype = "float32" + min_val = float("-9.31402") + max_val = float("9.31491") + mean = float("-0.00452724") + std = float("2.1579") + data = None + + +class Program_weight_tensor_data_4: + name = "data_4" + shape = [4, 22400, 1] + dtype = "int64" + min_val = 0 + max_val = 1 + data = None diff --git a/paddle_samples/PaddleX/BlazeFace/subgraph_5/model.py b/paddle_samples/PaddleX/BlazeFace/subgraph_5/model.py new file mode 100644 index 000000000..1608d7efd --- /dev/null +++ b/paddle_samples/PaddleX/BlazeFace/subgraph_5/model.py @@ -0,0 +1,69 @@ +import paddle + + +class GraphModule(paddle.nn.Layer): + def __init__(self): + super().__init__() + + def forward(self, data_0, data_1, data_2, data_3, data_4): + # pd_op.masked_select: (-1xf32) <- (4x22400x4xf32, 4x22400x4xb) + masked_select_0 = paddle._C_ops.masked_select(data_0, data_1) + del data_0 + + # pd_op.masked_select: (-1xf32) <- (4x22400x4xf32, 4x22400x4xb) + masked_select_1 = paddle._C_ops.masked_select(data_2, data_1) + del data_1, data_2 + + # pd_op.huber_loss: (-1xf32, -1xf32) <- (-1xf32, -1xf32) + huber_loss_1, huber_loss_0 = (lambda x, f: f(x))( + paddle._C_ops.huber_loss(masked_select_0, masked_select_1, float("1")), + lambda out: out if isinstance(out, (list, tuple)) else (out, None), + ) + del masked_select_0, masked_select_1 + + # pd_op.full_int_array: (0xi64) <- () + full_int_array_0 = [] + + # pd_op.sum: (xf32) <- (-1xf32, 0xi64) + sum_0 = paddle._C_ops.sum(huber_loss_1, full_int_array_0, None, False) + + # pd_op.full: (1xf32) <- () + full_0 = paddle._C_ops.full( + [1], float("1"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.scale: (xf32) <- (xf32, 1xf32) + scale_0 = paddle._C_ops.scale(sum_0, full_0, float("0"), True) + del sum_0 + + # pd_op.cross_entropy_with_softmax: (4x22400x2xf32, 4x22400x1xf32) <- (4x22400x2xf32, 4x22400x1xi64) + cross_entropy_with_softmax_0, cross_entropy_with_softmax_1 = ( + lambda x, f: f(x) + )( + paddle._C_ops.cross_entropy_with_softmax( + data_3, data_4, False, True, True, -100, -1 + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None), + ) + del data_3 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_1 = [-1] + + # pd_op.squeeze: (4x22400xf32) <- (4x22400x1xf32, 1xi64) + squeeze_0 = paddle._C_ops.squeeze( + cross_entropy_with_softmax_1, full_int_array_1 + ) + + # pd_op.squeeze: (4x22400xi64) <- (4x22400x1xi64, 1xi64) + squeeze_1 = paddle._C_ops.squeeze(data_4, full_int_array_1) + del ( + cross_entropy_with_softmax_1, + data_4, + full_0, + full_int_array_0, + full_int_array_1, + huber_loss_1, + ) + + return huber_loss_0, cross_entropy_with_softmax_0, squeeze_0, squeeze_1, scale_0 diff --git a/paddle_samples/PaddleX/BlazeFace/subgraph_5/weight_meta.py b/paddle_samples/PaddleX/BlazeFace/subgraph_5/weight_meta.py new file mode 100644 index 000000000..8b1378917 --- /dev/null +++ b/paddle_samples/PaddleX/BlazeFace/subgraph_5/weight_meta.py @@ -0,0 +1 @@ + diff --git a/paddle_samples/PaddleX/BlazeFace/subgraph_6/graph_hash.txt b/paddle_samples/PaddleX/BlazeFace/subgraph_6/graph_hash.txt new file mode 100644 index 000000000..0f83386a5 --- /dev/null +++ b/paddle_samples/PaddleX/BlazeFace/subgraph_6/graph_hash.txt @@ -0,0 +1 @@ +c4fdad07c14b6fab0ca8ccf5f6cf815117975b9707c7eec9e822c3eaf9a188c0 \ No newline at end of file diff --git a/paddle_samples/PaddleX/BlazeFace/subgraph_6/graph_net.json b/paddle_samples/PaddleX/BlazeFace/subgraph_6/graph_net.json new file mode 100644 index 000000000..d9c8238e8 --- /dev/null +++ b/paddle_samples/PaddleX/BlazeFace/subgraph_6/graph_net.json @@ -0,0 +1,6 @@ +{ + "framework": "paddle", + "model_name": "BlazeFace", + "num_devices_required": 1, + "num_nodes_required": 1 +} \ No newline at end of file diff --git a/paddle_samples/PaddleX/BlazeFace/subgraph_6/input_meta.py b/paddle_samples/PaddleX/BlazeFace/subgraph_6/input_meta.py new file mode 100644 index 000000000..b120eaeed --- /dev/null +++ b/paddle_samples/PaddleX/BlazeFace/subgraph_6/input_meta.py @@ -0,0 +1,12 @@ +class Program_weight_tensor_data_0: + name = "data_0" + shape = [] + dtype = "int64" + data = [1] + + +class Program_weight_tensor_data_1: + name = "data_1" + shape = [4, 1] + dtype = "float32" + data = [116.0, 132.0, 36.0, 14.0] diff --git a/paddle_samples/PaddleX/BlazeFace/subgraph_6/model.py b/paddle_samples/PaddleX/BlazeFace/subgraph_6/model.py new file mode 100644 index 000000000..ceea0369d --- /dev/null +++ b/paddle_samples/PaddleX/BlazeFace/subgraph_6/model.py @@ -0,0 +1,70 @@ +import paddle + + +class GraphModule(paddle.nn.Layer): + def __init__(self): + super().__init__() + + def forward(self, data_0, data_1): + # pd_op.full: (1xf32) <- () + full_0 = paddle._C_ops.full( + [1], float("1"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.scale: (xi64) <- (xi64, 1xf32) + scale_0 = paddle._C_ops.scale(data_0, full_0, float("1"), True) + del full_0 + + # builtin.combine: ([xi64]) <- (xi64) + combine_0 = [data_0] + del data_0 + + # pd_op.stack: (1xi64) <- ([xi64]) + stack_0 = paddle._C_ops.stack(combine_0, 0) + del combine_0 + + # builtin.combine: ([xi64]) <- (xi64) + combine_1 = [scale_0] + del scale_0 + + # pd_op.stack: (1xi64) <- ([xi64]) + stack_1 = paddle._C_ops.stack(combine_1, 0) + del combine_1 + + # pd_op.slice: (1xf32) <- (4x1xf32, 1xi64, 1xi64) + slice_0 = paddle._C_ops.slice(data_1, [0], stack_0, stack_1, [-1], [0]) + del data_1, stack_0, stack_1 + + # pd_op.full: (1xf32) <- () + full_1 = paddle._C_ops.full( + [1], float("3"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.scale: (1xf32) <- (1xf32, 1xf32) + scale_1 = paddle._C_ops.scale(slice_0, full_1, float("0"), True) + del full_1 + + # pd_op.full: (1xf32) <- () + full_2 = paddle._C_ops.full( + [1], float("-3.40282e+38"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.full: (1xf32) <- () + full_3 = paddle._C_ops.full( + [1], float("22400"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.clip: (1xf32) <- (1xf32, 1xf32, 1xf32) + clip_0 = paddle._C_ops.clip(scale_1, full_2, full_3) + del full_2, full_3, scale_1 + + # pd_op.full: (xf32) <- () + full_4 = paddle._C_ops.full( + [], float("0"), paddle.float32, paddle.framework._current_expected_place() + ) + + # pd_op.greater_than: (1xb) <- (1xf32, xf32) + greater_than_0 = paddle._C_ops.greater_than(clip_0, full_4) + del clip_0, full_4, slice_0 + + return greater_than_0 diff --git a/paddle_samples/PaddleX/BlazeFace/subgraph_6/weight_meta.py b/paddle_samples/PaddleX/BlazeFace/subgraph_6/weight_meta.py new file mode 100644 index 000000000..8b1378917 --- /dev/null +++ b/paddle_samples/PaddleX/BlazeFace/subgraph_6/weight_meta.py @@ -0,0 +1 @@ + diff --git a/paddle_samples/PaddleX/BlazeFace/subgraph_7/graph_hash.txt b/paddle_samples/PaddleX/BlazeFace/subgraph_7/graph_hash.txt new file mode 100644 index 000000000..1932b0c08 --- /dev/null +++ b/paddle_samples/PaddleX/BlazeFace/subgraph_7/graph_hash.txt @@ -0,0 +1 @@ +47d56d040d883898c99b1d0fb79e6bf402dcb888bc16b18fa453a750a20e0539 \ No newline at end of file diff --git a/paddle_samples/PaddleX/BlazeFace/subgraph_7/graph_net.json b/paddle_samples/PaddleX/BlazeFace/subgraph_7/graph_net.json new file mode 100644 index 000000000..d9c8238e8 --- /dev/null +++ b/paddle_samples/PaddleX/BlazeFace/subgraph_7/graph_net.json @@ -0,0 +1,6 @@ +{ + "framework": "paddle", + "model_name": "BlazeFace", + "num_devices_required": 1, + "num_nodes_required": 1 +} \ No newline at end of file diff --git a/paddle_samples/PaddleX/BlazeFace/subgraph_7/input_meta.py b/paddle_samples/PaddleX/BlazeFace/subgraph_7/input_meta.py new file mode 100644 index 000000000..b353fa323 --- /dev/null +++ b/paddle_samples/PaddleX/BlazeFace/subgraph_7/input_meta.py @@ -0,0 +1,83 @@ +class Program_weight_tensor_data_0: + name = "data_0" + shape = [4, 12800, 4] + dtype = "float32" + min_val = float("-5.16022") + max_val = float("9.99565") + mean = float("0.199758") + std = float("1.08608") + data = None + + +class Program_weight_tensor_data_1: + name = "data_1" + shape = [4, 9600, 4] + dtype = "float32" + min_val = float("-4.81517") + max_val = float("5.22088") + mean = float("-0.0449079") + std = float("1.06453") + data = None + + +class Program_weight_tensor_data_2: + name = "data_2" + shape = [4, 12800, 2] + dtype = "float32" + min_val = float("-7.10677") + max_val = float("7.10966") + mean = float("-0.00799267") + std = float("2.00454") + data = None + + +class Program_weight_tensor_data_3: + name = "data_3" + shape = [4, 9600, 2] + dtype = "float32" + min_val = float("-9.31402") + max_val = float("9.31491") + mean = float("9.33382e-05") + std = float("2.34683") + data = None + + +class Program_weight_tensor_data_4: + name = "data_4" + shape = [4, 90, 4] + dtype = "float32" + max_val = float("0.994016") + mean = float("0.151741") + std = float("0.272922") + data = None + + +class Program_weight_tensor_data_5: + name = "data_5" + shape = [4, 90] + dtype = "int32" + min_val = 0 + max_val = 0 + data = None + + +class Program_weight_tensor_data_6: + name = "data_6" + shape = [12800, 4] + dtype = "float32" + min_val = float("-0.0125") + max_val = float("1.0125") + mean = float("0.5") + std = float("0.289092") + data = None + + +class Program_weight_tensor_data_7: + name = "data_7" + shape = [9600, 4] + dtype = "float32" + min_val = float("-0.0875") + max_val = float("1.0875") + mean = float("0.5") + std = float("0.295452") + data = None diff --git a/paddle_samples/PaddleX/BlazeFace/subgraph_7/model.py b/paddle_samples/PaddleX/BlazeFace/subgraph_7/model.py new file mode 100644 index 000000000..4e3c795d2 --- /dev/null +++ b/paddle_samples/PaddleX/BlazeFace/subgraph_7/model.py @@ -0,0 +1,570 @@ +import paddle + + +class GraphModule(paddle.nn.Layer): + def __init__(self): + super().__init__() + + def forward(self, data_0, data_1, data_2, data_3, data_4, data_5, data_6, data_7): + # pd_op.full: (1xi32) <- () + full_0 = paddle._C_ops.full( + [1], float("1"), paddle.int32, paddle.core.CPUPlace() + ) + + # pd_op.assign: (1xi32) <- (1xi32) + assign_0 = full_0 + + # builtin.combine: ([4x12800x4xf32, 4x9600x4xf32]) <- (4x12800x4xf32, 4x9600x4xf32) + combine_0 = [data_0, data_1] + del data_0, data_1 + + # pd_op.concat: (4x22400x4xf32) <- ([4x12800x4xf32, 4x9600x4xf32], 1xi32) + concat_0 = paddle._C_ops.concat(combine_0, full_0) + del combine_0 + + # builtin.combine: ([4x12800x2xf32, 4x9600x2xf32]) <- (4x12800x2xf32, 4x9600x2xf32) + combine_1 = [data_2, data_3] + del data_2, data_3 + + # pd_op.concat: (4x22400x2xf32) <- ([4x12800x2xf32, 4x9600x2xf32], 1xi32) + concat_1 = paddle._C_ops.concat(combine_1, full_0) + del combine_1 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_0 = [-1] + + # pd_op.unsqueeze: (4x90x1xi32) <- (4x90xi32, 1xi64) + unsqueeze_0 = paddle._C_ops.unsqueeze(data_5, full_int_array_0) + del data_5 + + # pd_op.cast: (4x90x1xi64) <- (4x90x1xi32) + cast_0 = paddle._C_ops.cast(unsqueeze_0, paddle.int64) + del unsqueeze_0 + + # pd_op.full: (1xi32) <- () + full_1 = paddle._C_ops.full( + [1], float("0"), paddle.int32, paddle.core.CPUPlace() + ) + + # builtin.combine: ([12800x4xf32, 9600x4xf32]) <- (12800x4xf32, 9600x4xf32) + combine_2 = [data_6, data_7] + del data_6, data_7 + + # pd_op.concat: (22400x4xf32) <- ([12800x4xf32, 9600x4xf32], 1xi32) + concat_2 = paddle._C_ops.concat(combine_2, full_1) + del combine_2, full_1 + + # pd_op.full_int_array: (2xi64) <- () + full_int_array_1 = [-1, 4] + + # pd_op.reshape: (360x4xf32) <- (4x90x4xf32, 2xi64) + reshape_1 = paddle._C_ops.reshape(data_4, full_int_array_1) + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_2 = [1] + + # pd_op.unsqueeze: (360x1x4xf32) <- (360x4xf32, 1xi64) + unsqueeze_1 = paddle._C_ops.unsqueeze(reshape_1, full_int_array_2) + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_3 = [0] + + # pd_op.unsqueeze: (1x22400x4xf32) <- (22400x4xf32, 1xi64) + unsqueeze_2 = paddle._C_ops.unsqueeze(concat_2, full_int_array_3) + del concat_2 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_4 = [2] + + # pd_op.slice: (360x1x2xf32) <- (360x1x4xf32, 1xi64, 1xi64) + slice_0 = paddle._C_ops.slice( + unsqueeze_1, [2], full_int_array_3, full_int_array_4, [1], [] + ) + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_5 = [2147483647] + + # pd_op.slice: (360x1x2xf32) <- (360x1x4xf32, 1xi64, 1xi64) + slice_1 = paddle._C_ops.slice( + unsqueeze_1, [2], full_int_array_4, full_int_array_5, [1], [] + ) + del unsqueeze_1 + + # pd_op.slice: (1x22400x2xf32) <- (1x22400x4xf32, 1xi64, 1xi64) + slice_2 = paddle._C_ops.slice( + unsqueeze_2, [2], full_int_array_3, full_int_array_4, [1], [] + ) + + # pd_op.slice: (1x22400x2xf32) <- (1x22400x4xf32, 1xi64, 1xi64) + slice_3 = paddle._C_ops.slice( + unsqueeze_2, [2], full_int_array_4, full_int_array_5, [1], [] + ) + del full_int_array_5 + + # pd_op.maximum: (360x22400x2xf32) <- (360x1x2xf32, 1x22400x2xf32) + maximum_0 = paddle._C_ops.maximum(slice_0, slice_2) + + # pd_op.minimum: (360x22400x2xf32) <- (360x1x2xf32, 1x22400x2xf32) + minimum_0 = paddle._C_ops.minimum(slice_1, slice_3) + + # pd_op.subtract: (360x22400x2xf32) <- (360x22400x2xf32, 360x22400x2xf32) + subtract_0 = paddle._C_ops.subtract(minimum_0, maximum_0) + del maximum_0, minimum_0 + + # pd_op.full: (1xf32) <- () + full_2 = paddle._C_ops.full( + [1], float("0"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.full: (1xf32) <- () + full_3 = paddle._C_ops.full( + [1], float("3.40282e+38"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.clip: (360x22400x2xf32) <- (360x22400x2xf32, 1xf32, 1xf32) + clip_0 = paddle._C_ops.clip(subtract_0, full_2, full_3) + del subtract_0 + + # pd_op.prod: (360x22400xf32) <- (360x22400x2xf32, 1xi64) + prod_0 = paddle._C_ops.prod(clip_0, full_int_array_0, False, False) + del clip_0 + + # pd_op.subtract: (360x1x2xf32) <- (360x1x2xf32, 360x1x2xf32) + subtract_1 = paddle._C_ops.subtract(slice_1, slice_0) + del slice_0, slice_1 + + # pd_op.clip: (360x1x2xf32) <- (360x1x2xf32, 1xf32, 1xf32) + clip_1 = paddle._C_ops.clip(subtract_1, full_2, full_3) + del subtract_1 + + # pd_op.prod: (360x1xf32) <- (360x1x2xf32, 1xi64) + prod_1 = paddle._C_ops.prod(clip_1, full_int_array_0, False, False) + del clip_1 + + # pd_op.subtract: (1x22400x2xf32) <- (1x22400x2xf32, 1x22400x2xf32) + subtract_2 = paddle._C_ops.subtract(slice_3, slice_2) + del slice_2, slice_3 + + # pd_op.clip: (1x22400x2xf32) <- (1x22400x2xf32, 1xf32, 1xf32) + clip_2 = paddle._C_ops.clip(subtract_2, full_2, full_3) + del full_2, full_3, subtract_2 + + # pd_op.prod: (1x22400xf32) <- (1x22400x2xf32, 1xi64) + prod_2 = paddle._C_ops.prod(clip_2, full_int_array_0, False, False) + del clip_2 + + # pd_op.add: (360x22400xf32) <- (360x1xf32, 1x22400xf32) + add_0 = paddle._C_ops.add(prod_1, prod_2) + del prod_1, prod_2 + + # pd_op.subtract: (360x22400xf32) <- (360x22400xf32, 360x22400xf32) + subtract_3 = paddle._C_ops.subtract(add_0, prod_0) + del add_0 + + # pd_op.full: (1xf32) <- () + full_4 = paddle._C_ops.full( + [1], float("1"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.scale: (360x22400xf32) <- (360x22400xf32, 1xf32) + scale_0 = paddle._C_ops.scale(subtract_3, full_4, float("1e-10"), True) + del full_4, subtract_3 + + # pd_op.divide: (360x22400xf32) <- (360x22400xf32, 360x22400xf32) + divide_0 = paddle._C_ops.divide(prod_0, scale_0) + del prod_0, scale_0 + + # pd_op.full_int_array: (3xi64) <- () + full_int_array_6 = [4, -1, 22400] + + # pd_op.reshape: (4x90x22400xf32) <- (360x22400xf32, 3xi64) + reshape_2 = paddle._C_ops.reshape(divide_0, full_int_array_6) + del divide_0, full_int_array_6 + + # pd_op.max: (4x22400xf32) <- (4x90x22400xf32, 1xi64) + max_0 = paddle._C_ops.max(reshape_2, full_int_array_2, False) + + # pd_op.full: (1xi64) <- () + full_5 = paddle._C_ops.full( + [1], float("1"), paddle.int64, paddle.core.CPUPlace() + ) + + # pd_op.argmax: (4x22400xi64) <- (4x90x22400xf32, 1xi64) + argmax_0 = paddle._C_ops.argmax(reshape_2, full_5, False, False, paddle.int64) + del full_5 + + # pd_op.max: (4x90xf32) <- (4x90x22400xf32, 1xi64) + max_1 = paddle._C_ops.max(reshape_2, full_int_array_4, False) + + # pd_op.full: (1xi64) <- () + full_6 = paddle._C_ops.full( + [1], float("2"), paddle.int64, paddle.core.CPUPlace() + ) + + # pd_op.argmax: (4x90xi64) <- (4x90x22400xf32, 1xi64) + argmax_1 = paddle._C_ops.argmax(reshape_2, full_6, False, False, paddle.int64) + del full_6, reshape_2 + + # pd_op.full: (1xf64) <- () + full_7 = paddle._C_ops.full( + [1], float("0"), paddle.float64, paddle.core.CPUPlace() + ) + + # pd_op.full: (1xf64) <- () + full_8 = paddle._C_ops.full( + [1], float("4"), paddle.float64, paddle.core.CPUPlace() + ) + + # pd_op.full: (1xf64) <- () + full_9 = paddle._C_ops.full( + [1], float("1"), paddle.float64, paddle.core.CPUPlace() + ) + + # pd_op.arange: (4xi64) <- (1xf64, 1xf64, 1xf64) + arange_0 = paddle.arange(full_7, full_8, full_9, dtype="int64") + del full_7, full_8, full_9 + + # pd_op.unsqueeze: (4x1xi64) <- (4xi64, 1xi64) + unsqueeze_3 = paddle._C_ops.unsqueeze(arange_0, full_int_array_0) + del arange_0 + + # pd_op.full_int_array: (2xi64) <- () + full_int_array_7 = [1, 22400] + + # pd_op.tile: (4x22400xi64) <- (4x1xi64, 2xi64) + tile_0 = paddle._C_ops.tile(unsqueeze_3, full_int_array_7) + del full_int_array_7 + + # builtin.combine: ([4x22400xi64, 4x22400xi64]) <- (4x22400xi64, 4x22400xi64) + combine_3 = [tile_0, argmax_0] + del argmax_0, tile_0 + + # pd_op.stack: (4x22400x2xi64) <- ([4x22400xi64, 4x22400xi64]) + stack_0 = paddle._C_ops.stack(combine_3, -1) + del combine_3 + + # pd_op.gather_nd: (4x22400x4xf32) <- (4x90x4xf32, 4x22400x2xi64) + gather_nd_0 = paddle._C_ops.gather_nd(data_4, stack_0) + del data_4 + + # pd_op.gather_nd: (4x22400x1xi64) <- (4x90x1xi64, 4x22400x2xi64) + gather_nd_1 = paddle._C_ops.gather_nd(cast_0, stack_0) + del stack_0 + + # pd_op.full: (4x22400x1xi64) <- () + full_10 = paddle._C_ops.full( + [4, 22400, 1], + float("1"), + paddle.int64, + paddle.framework._current_expected_place(), + ) + + # pd_op.unsqueeze: (4x22400x1xf32) <- (4x22400xf32, 1xi64) + unsqueeze_4 = paddle._C_ops.unsqueeze(max_0, full_int_array_0) + del full_int_array_0, max_0 + + # pd_op.full: (xf32) <- () + full_11 = paddle._C_ops.full( + [], + float("0.35"), + paddle.float32, + paddle.framework._current_expected_place(), + ) + + # pd_op.less_than: (4x22400x1xb) <- (4x22400x1xf32, xf32) + less_than_0 = paddle._C_ops.less_than(unsqueeze_4, full_11) + del full_11, unsqueeze_4 + + # pd_op.where: (4x22400x1xi64) <- (4x22400x1xb, 4x22400x1xi64, 4x22400x1xi64) + where_0 = paddle._C_ops.where(less_than_0, full_10, gather_nd_1) + del full_10, gather_nd_1, less_than_0 + + # pd_op.full: (1xf32) <- () + full_12 = paddle._C_ops.full( + [1], float("22400"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.scale: (4x1xi64) <- (4x1xi64, 1xf32) + scale_1 = paddle._C_ops.scale(unsqueeze_3, full_12, float("0"), True) + del full_12, unsqueeze_3 + + # pd_op.add: (4x90xi64) <- (4x1xi64, 4x90xi64) + add_1 = paddle._C_ops.add(scale_1, argmax_1) + del argmax_1, scale_1 + + # pd_op.flatten: (360xi64) <- (4x90xi64) + flatten_0 = paddle._C_ops.flatten(add_1, 0, 1) + del add_1 + + # pd_op.reshape: (89600x4xf32) <- (4x22400x4xf32, 2xi64) + reshape_3 = paddle._C_ops.reshape(gather_nd_0, full_int_array_1) + del gather_nd_0 + + # pd_op.scatter: (89600x4xf32) <- (89600x4xf32, 360xi64, 360x4xf32) + scatter_0 = paddle._C_ops.scatter(reshape_3, flatten_0, reshape_1, True) + del reshape_1, reshape_3 + + # pd_op.full_int_array: (3xi64) <- () + full_int_array_8 = [4, -1, 4] + + # pd_op.reshape: (4x22400x4xf32) <- (89600x4xf32, 3xi64) + reshape_4 = paddle._C_ops.reshape(scatter_0, full_int_array_8) + del scatter_0 + + # pd_op.full_int_array: (2xi64) <- () + full_int_array_9 = [-1, 1] + + # pd_op.reshape: (89600x1xi64) <- (4x22400x1xi64, 2xi64) + reshape_5 = paddle._C_ops.reshape(where_0, full_int_array_9) + del where_0 + + # pd_op.reshape: (360x1xi64) <- (4x90x1xi64, 2xi64) + reshape_6 = paddle._C_ops.reshape(cast_0, full_int_array_9) + del cast_0, full_int_array_9 + + # pd_op.scatter: (89600x1xi64) <- (89600x1xi64, 360xi64, 360x1xi64) + scatter_1 = paddle._C_ops.scatter(reshape_5, flatten_0, reshape_6, True) + del flatten_0, reshape_5, reshape_6 + + # pd_op.full_int_array: (3xi64) <- () + full_int_array_10 = [4, -1, 1] + + # pd_op.reshape: (4x22400x1xi64) <- (89600x1xi64, 3xi64) + reshape_7 = paddle._C_ops.reshape(scatter_1, full_int_array_10) + del full_int_array_10, scatter_1 + + # pd_op.set_value_: (4x22400x1xi64) <- (4x22400x1xi64, 1xi64, 1xi64, 1xi64) + set_value__0 = paddle._C_ops.set_value_( + reshape_7, + full_int_array_3, + full_int_array_2, + full_int_array_2, + [1], + [], + [], + [1], + [float("1")], + ) + del reshape_7 + + # pd_op.full_int_array: (3xi64) <- () + full_int_array_11 = [4, 1, 1] + + # pd_op.tile: (4x22400x4xf32) <- (1x22400x4xf32, 3xi64) + tile_1 = paddle._C_ops.tile(unsqueeze_2, full_int_array_11) + del full_int_array_11, unsqueeze_2 + + # pd_op.reshape: (89600x4xf32) <- (4x22400x4xf32, 2xi64) + reshape_8 = paddle._C_ops.reshape(tile_1, full_int_array_1) + del tile_1 + + # pd_op.reshape: (89600x4xf32) <- (4x22400x4xf32, 2xi64) + reshape_9 = paddle._C_ops.reshape(reshape_4, full_int_array_1) + del full_int_array_1, reshape_4 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_12 = [3] + + # pd_op.slice: (89600xf32) <- (89600x4xf32, 1xi64, 1xi64) + slice_4 = paddle._C_ops.slice( + reshape_8, [1], full_int_array_4, full_int_array_12, [1], [1] + ) + + # pd_op.slice: (89600xf32) <- (89600x4xf32, 1xi64, 1xi64) + slice_5 = paddle._C_ops.slice( + reshape_8, [1], full_int_array_3, full_int_array_2, [1], [1] + ) + + # pd_op.subtract: (89600xf32) <- (89600xf32, 89600xf32) + subtract_4 = paddle._C_ops.subtract(slice_4, slice_5) + del slice_4 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_13 = [4] + + # pd_op.slice: (89600xf32) <- (89600x4xf32, 1xi64, 1xi64) + slice_6 = paddle._C_ops.slice( + reshape_8, [1], full_int_array_12, full_int_array_13, [1], [1] + ) + + # pd_op.slice: (89600xf32) <- (89600x4xf32, 1xi64, 1xi64) + slice_7 = paddle._C_ops.slice( + reshape_8, [1], full_int_array_2, full_int_array_4, [1], [1] + ) + del reshape_8 + + # pd_op.subtract: (89600xf32) <- (89600xf32, 89600xf32) + subtract_5 = paddle._C_ops.subtract(slice_6, slice_7) + del slice_6 + + # pd_op.full: (1xf32) <- () + full_13 = paddle._C_ops.full( + [1], float("0.5"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.scale: (89600xf32) <- (89600xf32, 1xf32) + scale_2 = paddle._C_ops.scale(subtract_4, full_13, float("0"), True) + + # pd_op.add: (89600xf32) <- (89600xf32, 89600xf32) + add_2 = paddle._C_ops.add(slice_5, scale_2) + del scale_2, slice_5 + + # pd_op.scale: (89600xf32) <- (89600xf32, 1xf32) + scale_3 = paddle._C_ops.scale(subtract_5, full_13, float("0"), True) + + # pd_op.add: (89600xf32) <- (89600xf32, 89600xf32) + add_3 = paddle._C_ops.add(slice_7, scale_3) + del scale_3, slice_7 + + # pd_op.slice: (89600xf32) <- (89600x4xf32, 1xi64, 1xi64) + slice_8 = paddle._C_ops.slice( + reshape_9, [1], full_int_array_4, full_int_array_12, [1], [1] + ) + + # pd_op.slice: (89600xf32) <- (89600x4xf32, 1xi64, 1xi64) + slice_9 = paddle._C_ops.slice( + reshape_9, [1], full_int_array_3, full_int_array_2, [1], [1] + ) + del full_int_array_3 + + # pd_op.subtract: (89600xf32) <- (89600xf32, 89600xf32) + subtract_6 = paddle._C_ops.subtract(slice_8, slice_9) + del slice_8 + + # pd_op.slice: (89600xf32) <- (89600x4xf32, 1xi64, 1xi64) + slice_10 = paddle._C_ops.slice( + reshape_9, [1], full_int_array_12, full_int_array_13, [1], [1] + ) + del full_int_array_12, full_int_array_13 + + # pd_op.slice: (89600xf32) <- (89600x4xf32, 1xi64, 1xi64) + slice_11 = paddle._C_ops.slice( + reshape_9, [1], full_int_array_2, full_int_array_4, [1], [1] + ) + del full_int_array_2, full_int_array_4, reshape_9 + + # pd_op.subtract: (89600xf32) <- (89600xf32, 89600xf32) + subtract_7 = paddle._C_ops.subtract(slice_10, slice_11) + del slice_10 + + # pd_op.scale: (89600xf32) <- (89600xf32, 1xf32) + scale_4 = paddle._C_ops.scale(subtract_6, full_13, float("0"), True) + + # pd_op.add: (89600xf32) <- (89600xf32, 89600xf32) + add_4 = paddle._C_ops.add(slice_9, scale_4) + del scale_4, slice_9 + + # pd_op.scale: (89600xf32) <- (89600xf32, 1xf32) + scale_5 = paddle._C_ops.scale(subtract_7, full_13, float("0"), True) + del full_13 + + # pd_op.add: (89600xf32) <- (89600xf32, 89600xf32) + add_5 = paddle._C_ops.add(slice_11, scale_5) + del scale_5, slice_11 + + # pd_op.subtract: (89600xf32) <- (89600xf32, 89600xf32) + subtract_8 = paddle._C_ops.subtract(add_4, add_2) + del add_2, add_4 + + # pd_op.full: (1xf32) <- () + full_14 = paddle._C_ops.full( + [1], float("10"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.scale: (89600xf32) <- (89600xf32, 1xf32) + scale_6 = paddle._C_ops.scale(subtract_8, full_14, float("0"), True) + del subtract_8 + + # pd_op.divide: (89600xf32) <- (89600xf32, 89600xf32) + divide_1 = paddle._C_ops.divide(scale_6, subtract_4) + del scale_6 + + # pd_op.subtract: (89600xf32) <- (89600xf32, 89600xf32) + subtract_9 = paddle._C_ops.subtract(add_5, add_3) + del add_3, add_5 + + # pd_op.scale: (89600xf32) <- (89600xf32, 1xf32) + scale_7 = paddle._C_ops.scale(subtract_9, full_14, float("0"), True) + del full_14, subtract_9 + + # pd_op.divide: (89600xf32) <- (89600xf32, 89600xf32) + divide_2 = paddle._C_ops.divide(scale_7, subtract_5) + del scale_7 + + # pd_op.divide: (89600xf32) <- (89600xf32, 89600xf32) + divide_3 = paddle._C_ops.divide(subtract_6, subtract_4) + del subtract_4, subtract_6 + + # pd_op.log: (89600xf32) <- (89600xf32) + log_0 = paddle._C_ops.log(divide_3) + del divide_3 + + # pd_op.full: (1xf32) <- () + full_15 = paddle._C_ops.full( + [1], float("5"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.scale: (89600xf32) <- (89600xf32, 1xf32) + scale_8 = paddle._C_ops.scale(log_0, full_15, float("0"), True) + del log_0 + + # pd_op.divide: (89600xf32) <- (89600xf32, 89600xf32) + divide_4 = paddle._C_ops.divide(subtract_7, subtract_5) + del subtract_5, subtract_7 + + # pd_op.log: (89600xf32) <- (89600xf32) + log_1 = paddle._C_ops.log(divide_4) + del divide_4 + + # pd_op.scale: (89600xf32) <- (89600xf32, 1xf32) + scale_9 = paddle._C_ops.scale(log_1, full_15, float("0"), True) + del full_15, log_1 + + # builtin.combine: ([89600xf32, 89600xf32, 89600xf32, 89600xf32]) <- (89600xf32, 89600xf32, 89600xf32, 89600xf32) + combine_4 = [divide_1, divide_2, scale_8, scale_9] + del divide_1, divide_2, scale_8, scale_9 + + # pd_op.stack: (89600x4xf32) <- ([89600xf32, 89600xf32, 89600xf32, 89600xf32]) + stack_1 = paddle._C_ops.stack(combine_4, 1) + del combine_4 + + # pd_op.reshape: (4x22400x4xf32) <- (89600x4xf32, 3xi64) + reshape_0 = paddle._C_ops.reshape(stack_1, full_int_array_8) + del full_int_array_8, stack_1 + + # pd_op.full: (xi64) <- () + full_16 = paddle._C_ops.full( + [], float("1"), paddle.int64, paddle.framework._current_expected_place() + ) + + # pd_op.not_equal: (4x22400x1xb) <- (4x22400x1xi64, xi64) + not_equal_0 = paddle._C_ops.not_equal(set_value__0, full_16) + del full_16 + + # pd_op.full_int_array: (3xi64) <- () + full_int_array_14 = [1, 1, 4] + + # pd_op.tile: (4x22400x4xb) <- (4x22400x1xb, 3xi64) + tile_2 = paddle._C_ops.tile(not_equal_0, full_int_array_14) + del full_int_array_14, not_equal_0 + + # pd_op.cast: (4x22400x4xf32) <- (4x22400x4xb) + cast_1 = paddle._C_ops.cast(tile_2, paddle.float32) + + # pd_op.full_int_array: (0xi64) <- () + full_int_array_15 = [] + + # pd_op.sum: (xf32) <- (4x22400x4xf32, 0xi64) + sum_0 = paddle._C_ops.sum(cast_1, full_int_array_15, None, False) + del cast_1, full_int_array_15 + + # pd_op.full: (xf32) <- () + full_17 = paddle._C_ops.full( + [], float("0"), paddle.float32, paddle.framework._current_expected_place() + ) + + # pd_op.greater_than: (xb) <- (xf32, xf32) + greater_than_0 = paddle._C_ops.greater_than(sum_0, full_17) + del assign_0, full_0, full_17, set_value__0, sum_0, tile_2 + + return greater_than_0, concat_0, reshape_0, concat_1 diff --git a/paddle_samples/PaddleX/BlazeFace/subgraph_7/weight_meta.py b/paddle_samples/PaddleX/BlazeFace/subgraph_7/weight_meta.py new file mode 100644 index 000000000..8b1378917 --- /dev/null +++ b/paddle_samples/PaddleX/BlazeFace/subgraph_7/weight_meta.py @@ -0,0 +1 @@ + diff --git a/paddle_samples/PaddleX/BlazeFace/subgraph_8/graph_hash.txt b/paddle_samples/PaddleX/BlazeFace/subgraph_8/graph_hash.txt new file mode 100644 index 000000000..f5ad73703 --- /dev/null +++ b/paddle_samples/PaddleX/BlazeFace/subgraph_8/graph_hash.txt @@ -0,0 +1 @@ +dba6be48cd5bc0a5da59f495e449d3aa5012d07bcd14065493a8f1cb5da2626d \ No newline at end of file diff --git a/paddle_samples/PaddleX/BlazeFace/subgraph_8/graph_net.json b/paddle_samples/PaddleX/BlazeFace/subgraph_8/graph_net.json new file mode 100644 index 000000000..d9c8238e8 --- /dev/null +++ b/paddle_samples/PaddleX/BlazeFace/subgraph_8/graph_net.json @@ -0,0 +1,6 @@ +{ + "framework": "paddle", + "model_name": "BlazeFace", + "num_devices_required": 1, + "num_nodes_required": 1 +} \ No newline at end of file diff --git a/paddle_samples/PaddleX/BlazeFace/subgraph_8/input_meta.py b/paddle_samples/PaddleX/BlazeFace/subgraph_8/input_meta.py new file mode 100644 index 000000000..430b51ea5 --- /dev/null +++ b/paddle_samples/PaddleX/BlazeFace/subgraph_8/input_meta.py @@ -0,0 +1,45 @@ +class Program_weight_tensor_data_0: + name = "data_0" + shape = [1] + dtype = "float32" + data = [348.0] + + +class Program_weight_tensor_data_1: + name = "data_1" + shape = [1] + dtype = "float32" + data = [396.0] + + +class Program_weight_tensor_data_2: + name = "data_2" + shape = [1] + dtype = "float32" + data = [108.0] + + +class Program_weight_tensor_data_3: + name = "data_3" + shape = [1] + dtype = "float32" + data = [42.0] + + +class Program_weight_tensor_data_4: + name = "data_4" + shape = [4, 22400] + dtype = "int64" + min_val = 0 + max_val = 22399 + data = None + + +class Program_weight_tensor_data_5: + name = "data_5" + shape = [4, 22400] + dtype = "float32" + max_val = float("1.0") + mean = float("0.00332589") + std = float("0.0575746") + data = None diff --git a/paddle_samples/PaddleX/BlazeFace/subgraph_8/model.py b/paddle_samples/PaddleX/BlazeFace/subgraph_8/model.py new file mode 100644 index 000000000..4b071663d --- /dev/null +++ b/paddle_samples/PaddleX/BlazeFace/subgraph_8/model.py @@ -0,0 +1,41 @@ +import paddle + + +class GraphModule(paddle.nn.Layer): + def __init__(self): + super().__init__() + + def forward(self, data_0, data_1, data_2, data_3, data_4, data_5): + # builtin.combine: ([1xf32, 1xf32, 1xf32, 1xf32]) <- (1xf32, 1xf32, 1xf32, 1xf32) + combine_0 = [data_0, data_1, data_2, data_3] + del data_0, data_1, data_2, data_3 + + # pd_op.stack: (4x1xf32) <- ([1xf32, 1xf32, 1xf32, 1xf32]) + stack_0 = paddle._C_ops.stack(combine_0, 0) + del combine_0 + + # pd_op.expand_as: (4x22400xf32) <- (4x1xf32, 4x22400xi64) + expand_as_0 = paddle._C_ops.expand_as(stack_0, data_4, [4, 22400]) + del stack_0 + + # pd_op.cast: (4x22400xf32) <- (4x22400xi64) + cast_1 = paddle._C_ops.cast(data_4, paddle.float32) + del data_4 + + # pd_op.less_than: (4x22400xb) <- (4x22400xf32, 4x22400xf32) + less_than_0 = paddle._C_ops.less_than(cast_1, expand_as_0) + del cast_1, expand_as_0 + + # pd_op.cast: (4x22400xf32) <- (4x22400xb) + cast_2 = paddle._C_ops.cast(less_than_0, paddle.float32) + del less_than_0 + + # pd_op.add: (4x22400xf32) <- (4x22400xf32, 4x22400xf32) + add_0 = paddle._C_ops.add(cast_2, data_5) + del cast_2, data_5 + + # pd_op.cast: (4x22400xb) <- (4x22400xf32) + cast_0 = paddle._C_ops.cast(add_0, paddle.bool) + del add_0 + + return cast_0 diff --git a/paddle_samples/PaddleX/BlazeFace/subgraph_8/weight_meta.py b/paddle_samples/PaddleX/BlazeFace/subgraph_8/weight_meta.py new file mode 100644 index 000000000..8b1378917 --- /dev/null +++ b/paddle_samples/PaddleX/BlazeFace/subgraph_8/weight_meta.py @@ -0,0 +1 @@ + diff --git a/paddle_samples/PaddleX/BlazeFace/subgraph_9/graph_hash.txt b/paddle_samples/PaddleX/BlazeFace/subgraph_9/graph_hash.txt new file mode 100644 index 000000000..83eb1f3c3 --- /dev/null +++ b/paddle_samples/PaddleX/BlazeFace/subgraph_9/graph_hash.txt @@ -0,0 +1 @@ +d41355b25bbebfcf073261daf285d4c76047c4e7b04e191d4a36ef1756676b31 \ No newline at end of file diff --git a/paddle_samples/PaddleX/BlazeFace/subgraph_9/graph_net.json b/paddle_samples/PaddleX/BlazeFace/subgraph_9/graph_net.json new file mode 100644 index 000000000..d9c8238e8 --- /dev/null +++ b/paddle_samples/PaddleX/BlazeFace/subgraph_9/graph_net.json @@ -0,0 +1,6 @@ +{ + "framework": "paddle", + "model_name": "BlazeFace", + "num_devices_required": 1, + "num_nodes_required": 1 +} \ No newline at end of file diff --git a/paddle_samples/PaddleX/BlazeFace/subgraph_9/input_meta.py b/paddle_samples/PaddleX/BlazeFace/subgraph_9/input_meta.py new file mode 100644 index 000000000..11d302e3b --- /dev/null +++ b/paddle_samples/PaddleX/BlazeFace/subgraph_9/input_meta.py @@ -0,0 +1,5 @@ +class Program_weight_tensor_data_0: + name = "data_0" + shape = [4, 1] + dtype = "float32" + data = [116.0, 132.0, 36.0, 14.0] diff --git a/paddle_samples/PaddleX/BlazeFace/subgraph_9/model.py b/paddle_samples/PaddleX/BlazeFace/subgraph_9/model.py new file mode 100644 index 000000000..000d63a5b --- /dev/null +++ b/paddle_samples/PaddleX/BlazeFace/subgraph_9/model.py @@ -0,0 +1,53 @@ +import paddle + + +class GraphModule(paddle.nn.Layer): + def __init__(self): + super().__init__() + + def forward(self, data_0): + # pd_op.full_int_array: (1xi64) <- () + full_int_array_0 = [0] + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_1 = [1] + + # pd_op.slice: (1xf32) <- (4x1xf32, 1xi64, 1xi64) + slice_0 = paddle._C_ops.slice( + data_0, [0], full_int_array_0, full_int_array_1, [1], [0] + ) + del data_0, full_int_array_0, full_int_array_1 + + # pd_op.full: (1xf32) <- () + full_0 = paddle._C_ops.full( + [1], float("3"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.scale: (1xf32) <- (1xf32, 1xf32) + scale_0 = paddle._C_ops.scale(slice_0, full_0, float("0"), True) + del full_0 + + # pd_op.full: (1xf32) <- () + full_1 = paddle._C_ops.full( + [1], float("-3.40282e+38"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.full: (1xf32) <- () + full_2 = paddle._C_ops.full( + [1], float("22400"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.clip: (1xf32) <- (1xf32, 1xf32, 1xf32) + clip_0 = paddle._C_ops.clip(scale_0, full_1, full_2) + del full_1, full_2, scale_0 + + # pd_op.full: (xf32) <- () + full_3 = paddle._C_ops.full( + [], float("0"), paddle.float32, paddle.framework._current_expected_place() + ) + + # pd_op.greater_than: (1xb) <- (1xf32, xf32) + greater_than_0 = paddle._C_ops.greater_than(clip_0, full_3) + del clip_0, full_3, slice_0 + + return greater_than_0 diff --git a/paddle_samples/PaddleX/BlazeFace/subgraph_9/weight_meta.py b/paddle_samples/PaddleX/BlazeFace/subgraph_9/weight_meta.py new file mode 100644 index 000000000..8b1378917 --- /dev/null +++ b/paddle_samples/PaddleX/BlazeFace/subgraph_9/weight_meta.py @@ -0,0 +1 @@ + diff --git a/paddle_samples/PaddleX/SOLOv2/subgraph_0/graph_hash.txt b/paddle_samples/PaddleX/SOLOv2/subgraph_0/graph_hash.txt new file mode 100644 index 000000000..b9ddde7cd --- /dev/null +++ b/paddle_samples/PaddleX/SOLOv2/subgraph_0/graph_hash.txt @@ -0,0 +1 @@ +6dde9b3999847b971fd8655181659e46538a8e2213a97717d67b09d9fdacdcd0 \ No newline at end of file diff --git a/paddle_samples/PaddleX/SOLOv2/subgraph_0/graph_net.json b/paddle_samples/PaddleX/SOLOv2/subgraph_0/graph_net.json new file mode 100644 index 000000000..cdd8699a9 --- /dev/null +++ b/paddle_samples/PaddleX/SOLOv2/subgraph_0/graph_net.json @@ -0,0 +1,6 @@ +{ + "framework": "paddle", + "model_name": "SOLOv2", + "num_devices_required": 1, + "num_nodes_required": 1 +} \ No newline at end of file diff --git a/paddle_samples/PaddleX/SOLOv2/subgraph_0/input_meta.py b/paddle_samples/PaddleX/SOLOv2/subgraph_0/input_meta.py new file mode 100644 index 000000000..8950b9023 --- /dev/null +++ b/paddle_samples/PaddleX/SOLOv2/subgraph_0/input_meta.py @@ -0,0 +1,9 @@ +class Program_weight_tensor_data_0: + name = "data_0" + shape = [1, 3, 800, 928] + dtype = "float32" + min_val = float("-2.1179") + max_val = float("2.64") + mean = float("0.123881") + std = float("1.133") + data = None diff --git a/paddle_samples/PaddleX/SOLOv2/subgraph_0/model.py b/paddle_samples/PaddleX/SOLOv2/subgraph_0/model.py new file mode 100644 index 000000000..607505fe5 --- /dev/null +++ b/paddle_samples/PaddleX/SOLOv2/subgraph_0/model.py @@ -0,0 +1,4594 @@ +import paddle + + +class GraphModule(paddle.nn.Layer): + def __init__(self): + super().__init__() + + def forward( + self, + parameter_0, + parameter_1, + parameter_2, + parameter_3, + parameter_4, + parameter_5, + parameter_6, + parameter_7, + parameter_8, + parameter_9, + parameter_10, + parameter_11, + parameter_12, + parameter_13, + parameter_14, + parameter_15, + parameter_16, + parameter_17, + parameter_18, + parameter_19, + parameter_20, + parameter_21, + parameter_22, + parameter_23, + parameter_24, + parameter_25, + parameter_26, + parameter_27, + parameter_28, + parameter_29, + parameter_30, + parameter_31, + parameter_32, + parameter_33, + parameter_34, + parameter_35, + parameter_36, + parameter_37, + parameter_38, + parameter_39, + parameter_40, + parameter_41, + parameter_42, + parameter_43, + parameter_44, + parameter_45, + parameter_46, + parameter_47, + parameter_48, + parameter_49, + parameter_50, + parameter_51, + parameter_52, + parameter_53, + parameter_54, + parameter_55, + parameter_56, + parameter_57, + parameter_58, + parameter_59, + parameter_60, + parameter_61, + parameter_62, + parameter_63, + parameter_64, + parameter_65, + parameter_66, + parameter_67, + parameter_68, + parameter_69, + parameter_70, + parameter_71, + parameter_72, + parameter_73, + parameter_74, + parameter_75, + parameter_76, + parameter_77, + parameter_78, + parameter_79, + parameter_80, + parameter_81, + parameter_82, + parameter_83, + parameter_84, + parameter_85, + parameter_86, + parameter_87, + parameter_88, + parameter_89, + parameter_90, + parameter_91, + parameter_92, + parameter_93, + parameter_94, + parameter_95, + parameter_96, + parameter_97, + parameter_98, + parameter_99, + parameter_100, + parameter_101, + parameter_102, + parameter_103, + parameter_104, + parameter_105, + parameter_106, + parameter_107, + parameter_108, + parameter_109, + parameter_110, + parameter_111, + parameter_112, + parameter_113, + parameter_114, + parameter_115, + parameter_116, + parameter_117, + parameter_118, + parameter_119, + parameter_120, + parameter_121, + parameter_122, + parameter_123, + parameter_124, + parameter_125, + parameter_126, + parameter_127, + parameter_128, + parameter_129, + parameter_130, + parameter_131, + parameter_132, + parameter_133, + parameter_134, + parameter_135, + parameter_136, + parameter_137, + parameter_138, + parameter_139, + parameter_140, + parameter_141, + parameter_142, + parameter_143, + parameter_144, + parameter_145, + parameter_146, + parameter_147, + parameter_148, + parameter_149, + parameter_150, + parameter_151, + parameter_152, + parameter_153, + parameter_154, + parameter_155, + parameter_156, + parameter_157, + parameter_158, + parameter_159, + parameter_160, + parameter_161, + parameter_162, + parameter_163, + parameter_164, + parameter_165, + parameter_166, + parameter_167, + parameter_168, + parameter_169, + parameter_170, + parameter_171, + parameter_172, + parameter_173, + parameter_174, + parameter_175, + parameter_176, + parameter_177, + parameter_178, + parameter_179, + parameter_180, + parameter_181, + parameter_182, + parameter_183, + parameter_184, + parameter_185, + parameter_186, + parameter_187, + parameter_188, + parameter_189, + parameter_190, + parameter_191, + parameter_192, + parameter_193, + parameter_194, + parameter_195, + parameter_196, + parameter_197, + parameter_198, + parameter_199, + parameter_200, + parameter_201, + parameter_202, + parameter_203, + parameter_204, + parameter_205, + parameter_206, + parameter_207, + parameter_208, + parameter_209, + parameter_210, + parameter_211, + parameter_212, + parameter_213, + parameter_214, + parameter_215, + parameter_216, + parameter_217, + parameter_218, + parameter_219, + parameter_220, + parameter_221, + parameter_222, + parameter_223, + parameter_224, + parameter_225, + parameter_226, + parameter_227, + parameter_228, + parameter_229, + parameter_230, + parameter_231, + parameter_232, + parameter_233, + parameter_234, + parameter_235, + parameter_236, + parameter_237, + parameter_238, + parameter_239, + parameter_240, + parameter_241, + parameter_242, + parameter_243, + parameter_244, + parameter_245, + parameter_246, + parameter_247, + parameter_248, + parameter_249, + parameter_250, + parameter_251, + parameter_252, + parameter_253, + parameter_254, + parameter_255, + parameter_256, + parameter_257, + parameter_258, + parameter_259, + parameter_260, + parameter_261, + parameter_262, + parameter_263, + parameter_264, + parameter_265, + parameter_266, + parameter_267, + parameter_268, + parameter_269, + parameter_270, + parameter_271, + parameter_272, + parameter_273, + parameter_274, + parameter_275, + parameter_276, + parameter_277, + parameter_278, + parameter_279, + parameter_280, + parameter_281, + parameter_282, + parameter_283, + parameter_284, + parameter_285, + parameter_286, + parameter_287, + parameter_288, + parameter_289, + parameter_290, + parameter_291, + parameter_292, + parameter_293, + parameter_294, + parameter_295, + parameter_296, + parameter_297, + parameter_298, + parameter_299, + parameter_300, + parameter_301, + parameter_302, + parameter_303, + parameter_304, + parameter_305, + parameter_306, + parameter_307, + parameter_308, + parameter_309, + parameter_310, + parameter_311, + parameter_312, + parameter_313, + parameter_314, + parameter_315, + parameter_316, + parameter_317, + parameter_318, + parameter_319, + parameter_320, + parameter_321, + parameter_322, + parameter_323, + parameter_324, + parameter_325, + parameter_326, + parameter_327, + parameter_328, + parameter_329, + parameter_330, + parameter_331, + parameter_332, + data_0, + ): + # pd_op.conv2d: (1x64x400x-1xf32) <- (1x3x800x-1xf32, 64x3x7x7xf32) + conv2d_0 = paddle._C_ops.conv2d( + data_0, parameter_332, [2, 2], [3, 3], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del data_0, parameter_332 + + # pd_op.batch_norm_: (1x64x400x-1xf32, 64xf32, 64xf32, 64xf32, 64xf32, -1xui8) <- (1x64x400x-1xf32, 64xf32, 64xf32, 64xf32, 64xf32) + ( + batch_norm__0, + batch_norm__1, + batch_norm__2, + batch_norm__3, + batch_norm__4, + batch_norm__5, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_0, + parameter_331, + parameter_330, + parameter_329, + parameter_328, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_0, parameter_328, parameter_329, parameter_330, parameter_331 + + # pd_op.relu: (1x64x400x-1xf32) <- (1x64x400x-1xf32) + relu_1 = paddle._C_ops.relu(batch_norm__0) + del batch_norm__0 + + # pd_op.full_int_array: (2xi64) <- () + full_int_array_0 = [3, 3] + + # pd_op.pool2d: (1x64x200x-1xf32) <- (1x64x400x-1xf32, 2xi64) + pool2d_0 = paddle._C_ops.pool2d( + relu_1, + full_int_array_0, + [2, 2], + [1, 1], + False, + True, + "NCHW", + "max", + False, + False, + "EXPLICIT", + ) + del full_int_array_0, relu_1 + + # pd_op.conv2d: (1x64x200x-1xf32) <- (1x64x200x-1xf32, 64x64x1x1xf32) + conv2d_1 = paddle._C_ops.conv2d( + pool2d_0, parameter_327, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_327 + + # pd_op.batch_norm_: (1x64x200x-1xf32, 64xf32, 64xf32, 64xf32, 64xf32, -1xui8) <- (1x64x200x-1xf32, 64xf32, 64xf32, 64xf32, 64xf32) + ( + batch_norm__6, + batch_norm__7, + batch_norm__8, + batch_norm__9, + batch_norm__10, + batch_norm__11, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_1, + parameter_326, + parameter_325, + parameter_324, + parameter_323, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_1, parameter_323, parameter_324, parameter_325, parameter_326 + + # pd_op.relu: (1x64x200x-1xf32) <- (1x64x200x-1xf32) + relu_2 = paddle._C_ops.relu(batch_norm__6) + del batch_norm__6 + + # pd_op.conv2d: (1x64x200x-1xf32) <- (1x64x200x-1xf32, 64x64x3x3xf32) + conv2d_2 = paddle._C_ops.conv2d( + relu_2, parameter_322, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_322, relu_2 + + # pd_op.batch_norm_: (1x64x200x-1xf32, 64xf32, 64xf32, 64xf32, 64xf32, -1xui8) <- (1x64x200x-1xf32, 64xf32, 64xf32, 64xf32, 64xf32) + ( + batch_norm__12, + batch_norm__13, + batch_norm__14, + batch_norm__15, + batch_norm__16, + batch_norm__17, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_2, + parameter_321, + parameter_320, + parameter_319, + parameter_318, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_2, parameter_318, parameter_319, parameter_320, parameter_321 + + # pd_op.relu: (1x64x200x-1xf32) <- (1x64x200x-1xf32) + relu_3 = paddle._C_ops.relu(batch_norm__12) + del batch_norm__12 + + # pd_op.conv2d: (1x256x200x-1xf32) <- (1x64x200x-1xf32, 256x64x1x1xf32) + conv2d_3 = paddle._C_ops.conv2d( + relu_3, parameter_317, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_317, relu_3 + + # pd_op.batch_norm_: (1x256x200x-1xf32, 256xf32, 256xf32, 256xf32, 256xf32, -1xui8) <- (1x256x200x-1xf32, 256xf32, 256xf32, 256xf32, 256xf32) + ( + batch_norm__18, + batch_norm__19, + batch_norm__20, + batch_norm__21, + batch_norm__22, + batch_norm__23, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_3, + parameter_316, + parameter_315, + parameter_314, + parameter_313, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_3, parameter_313, parameter_314, parameter_315, parameter_316 + + # pd_op.conv2d: (1x256x200x-1xf32) <- (1x64x200x-1xf32, 256x64x1x1xf32) + conv2d_4 = paddle._C_ops.conv2d( + pool2d_0, parameter_312, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_312, pool2d_0 + + # pd_op.batch_norm_: (1x256x200x-1xf32, 256xf32, 256xf32, 256xf32, 256xf32, -1xui8) <- (1x256x200x-1xf32, 256xf32, 256xf32, 256xf32, 256xf32) + ( + batch_norm__24, + batch_norm__25, + batch_norm__26, + batch_norm__27, + batch_norm__28, + batch_norm__29, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_4, + parameter_311, + parameter_310, + parameter_309, + parameter_308, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_4, parameter_308, parameter_309, parameter_310, parameter_311 + + # pd_op.add: (1x256x200x-1xf32) <- (1x256x200x-1xf32, 1x256x200x-1xf32) + add_5 = paddle._C_ops.add(batch_norm__18, batch_norm__24) + del batch_norm__18, batch_norm__24 + + # pd_op.relu: (1x256x200x-1xf32) <- (1x256x200x-1xf32) + relu_4 = paddle._C_ops.relu(add_5) + del add_5 + + # pd_op.conv2d: (1x64x200x-1xf32) <- (1x256x200x-1xf32, 64x256x1x1xf32) + conv2d_5 = paddle._C_ops.conv2d( + relu_4, parameter_307, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_307 + + # pd_op.batch_norm_: (1x64x200x-1xf32, 64xf32, 64xf32, 64xf32, 64xf32, -1xui8) <- (1x64x200x-1xf32, 64xf32, 64xf32, 64xf32, 64xf32) + ( + batch_norm__30, + batch_norm__31, + batch_norm__32, + batch_norm__33, + batch_norm__34, + batch_norm__35, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_5, + parameter_306, + parameter_305, + parameter_304, + parameter_303, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_5, parameter_303, parameter_304, parameter_305, parameter_306 + + # pd_op.relu: (1x64x200x-1xf32) <- (1x64x200x-1xf32) + relu_5 = paddle._C_ops.relu(batch_norm__30) + del batch_norm__30 + + # pd_op.conv2d: (1x64x200x-1xf32) <- (1x64x200x-1xf32, 64x64x3x3xf32) + conv2d_6 = paddle._C_ops.conv2d( + relu_5, parameter_302, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_302, relu_5 + + # pd_op.batch_norm_: (1x64x200x-1xf32, 64xf32, 64xf32, 64xf32, 64xf32, -1xui8) <- (1x64x200x-1xf32, 64xf32, 64xf32, 64xf32, 64xf32) + ( + batch_norm__36, + batch_norm__37, + batch_norm__38, + batch_norm__39, + batch_norm__40, + batch_norm__41, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_6, + parameter_301, + parameter_300, + parameter_299, + parameter_298, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_6, parameter_298, parameter_299, parameter_300, parameter_301 + + # pd_op.relu: (1x64x200x-1xf32) <- (1x64x200x-1xf32) + relu_6 = paddle._C_ops.relu(batch_norm__36) + del batch_norm__36 + + # pd_op.conv2d: (1x256x200x-1xf32) <- (1x64x200x-1xf32, 256x64x1x1xf32) + conv2d_7 = paddle._C_ops.conv2d( + relu_6, parameter_297, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_297, relu_6 + + # pd_op.batch_norm_: (1x256x200x-1xf32, 256xf32, 256xf32, 256xf32, 256xf32, -1xui8) <- (1x256x200x-1xf32, 256xf32, 256xf32, 256xf32, 256xf32) + ( + batch_norm__42, + batch_norm__43, + batch_norm__44, + batch_norm__45, + batch_norm__46, + batch_norm__47, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_7, + parameter_296, + parameter_295, + parameter_294, + parameter_293, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_7, parameter_293, parameter_294, parameter_295, parameter_296 + + # pd_op.add: (1x256x200x-1xf32) <- (1x256x200x-1xf32, 1x256x200x-1xf32) + add_6 = paddle._C_ops.add(batch_norm__42, relu_4) + del batch_norm__42, relu_4 + + # pd_op.relu: (1x256x200x-1xf32) <- (1x256x200x-1xf32) + relu_7 = paddle._C_ops.relu(add_6) + del add_6 + + # pd_op.conv2d: (1x64x200x-1xf32) <- (1x256x200x-1xf32, 64x256x1x1xf32) + conv2d_8 = paddle._C_ops.conv2d( + relu_7, parameter_292, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_292 + + # pd_op.batch_norm_: (1x64x200x-1xf32, 64xf32, 64xf32, 64xf32, 64xf32, -1xui8) <- (1x64x200x-1xf32, 64xf32, 64xf32, 64xf32, 64xf32) + ( + batch_norm__48, + batch_norm__49, + batch_norm__50, + batch_norm__51, + batch_norm__52, + batch_norm__53, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_8, + parameter_291, + parameter_290, + parameter_289, + parameter_288, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_8, parameter_288, parameter_289, parameter_290, parameter_291 + + # pd_op.relu: (1x64x200x-1xf32) <- (1x64x200x-1xf32) + relu_8 = paddle._C_ops.relu(batch_norm__48) + del batch_norm__48 + + # pd_op.conv2d: (1x64x200x-1xf32) <- (1x64x200x-1xf32, 64x64x3x3xf32) + conv2d_9 = paddle._C_ops.conv2d( + relu_8, parameter_287, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_287, relu_8 + + # pd_op.batch_norm_: (1x64x200x-1xf32, 64xf32, 64xf32, 64xf32, 64xf32, -1xui8) <- (1x64x200x-1xf32, 64xf32, 64xf32, 64xf32, 64xf32) + ( + batch_norm__54, + batch_norm__55, + batch_norm__56, + batch_norm__57, + batch_norm__58, + batch_norm__59, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_9, + parameter_286, + parameter_285, + parameter_284, + parameter_283, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_9, parameter_283, parameter_284, parameter_285, parameter_286 + + # pd_op.relu: (1x64x200x-1xf32) <- (1x64x200x-1xf32) + relu_9 = paddle._C_ops.relu(batch_norm__54) + del batch_norm__54 + + # pd_op.conv2d: (1x256x200x-1xf32) <- (1x64x200x-1xf32, 256x64x1x1xf32) + conv2d_10 = paddle._C_ops.conv2d( + relu_9, parameter_282, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_282, relu_9 + + # pd_op.batch_norm_: (1x256x200x-1xf32, 256xf32, 256xf32, 256xf32, 256xf32, -1xui8) <- (1x256x200x-1xf32, 256xf32, 256xf32, 256xf32, 256xf32) + ( + batch_norm__60, + batch_norm__61, + batch_norm__62, + batch_norm__63, + batch_norm__64, + batch_norm__65, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_10, + parameter_281, + parameter_280, + parameter_279, + parameter_278, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_10, parameter_278, parameter_279, parameter_280, parameter_281 + + # pd_op.add: (1x256x200x-1xf32) <- (1x256x200x-1xf32, 1x256x200x-1xf32) + add_7 = paddle._C_ops.add(batch_norm__60, relu_7) + del batch_norm__60, relu_7 + + # pd_op.relu: (1x256x200x-1xf32) <- (1x256x200x-1xf32) + relu_10 = paddle._C_ops.relu(add_7) + del add_7 + + # pd_op.conv2d: (1x128x200x-1xf32) <- (1x256x200x-1xf32, 128x256x1x1xf32) + conv2d_11 = paddle._C_ops.conv2d( + relu_10, parameter_277, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_277 + + # pd_op.batch_norm_: (1x128x200x-1xf32, 128xf32, 128xf32, 128xf32, 128xf32, -1xui8) <- (1x128x200x-1xf32, 128xf32, 128xf32, 128xf32, 128xf32) + ( + batch_norm__66, + batch_norm__67, + batch_norm__68, + batch_norm__69, + batch_norm__70, + batch_norm__71, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_11, + parameter_276, + parameter_275, + parameter_274, + parameter_273, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_11, parameter_273, parameter_274, parameter_275, parameter_276 + + # pd_op.relu: (1x128x200x-1xf32) <- (1x128x200x-1xf32) + relu_11 = paddle._C_ops.relu(batch_norm__66) + del batch_norm__66 + + # pd_op.conv2d: (1x128x100x-1xf32) <- (1x128x200x-1xf32, 128x128x3x3xf32) + conv2d_12 = paddle._C_ops.conv2d( + relu_11, parameter_272, [2, 2], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_272, relu_11 + + # pd_op.batch_norm_: (1x128x100x-1xf32, 128xf32, 128xf32, 128xf32, 128xf32, -1xui8) <- (1x128x100x-1xf32, 128xf32, 128xf32, 128xf32, 128xf32) + ( + batch_norm__72, + batch_norm__73, + batch_norm__74, + batch_norm__75, + batch_norm__76, + batch_norm__77, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_12, + parameter_271, + parameter_270, + parameter_269, + parameter_268, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_12, parameter_268, parameter_269, parameter_270, parameter_271 + + # pd_op.relu: (1x128x100x-1xf32) <- (1x128x100x-1xf32) + relu_12 = paddle._C_ops.relu(batch_norm__72) + del batch_norm__72 + + # pd_op.conv2d: (1x512x100x-1xf32) <- (1x128x100x-1xf32, 512x128x1x1xf32) + conv2d_13 = paddle._C_ops.conv2d( + relu_12, parameter_267, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_267, relu_12 + + # pd_op.batch_norm_: (1x512x100x-1xf32, 512xf32, 512xf32, 512xf32, 512xf32, -1xui8) <- (1x512x100x-1xf32, 512xf32, 512xf32, 512xf32, 512xf32) + ( + batch_norm__78, + batch_norm__79, + batch_norm__80, + batch_norm__81, + batch_norm__82, + batch_norm__83, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_13, + parameter_266, + parameter_265, + parameter_264, + parameter_263, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_13, parameter_263, parameter_264, parameter_265, parameter_266 + + # pd_op.conv2d: (1x512x100x-1xf32) <- (1x256x200x-1xf32, 512x256x1x1xf32) + conv2d_14 = paddle._C_ops.conv2d( + relu_10, parameter_262, [2, 2], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_262 + + # pd_op.batch_norm_: (1x512x100x-1xf32, 512xf32, 512xf32, 512xf32, 512xf32, -1xui8) <- (1x512x100x-1xf32, 512xf32, 512xf32, 512xf32, 512xf32) + ( + batch_norm__84, + batch_norm__85, + batch_norm__86, + batch_norm__87, + batch_norm__88, + batch_norm__89, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_14, + parameter_261, + parameter_260, + parameter_259, + parameter_258, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_14, parameter_258, parameter_259, parameter_260, parameter_261 + + # pd_op.add: (1x512x100x-1xf32) <- (1x512x100x-1xf32, 1x512x100x-1xf32) + add_8 = paddle._C_ops.add(batch_norm__78, batch_norm__84) + del batch_norm__78, batch_norm__84 + + # pd_op.relu: (1x512x100x-1xf32) <- (1x512x100x-1xf32) + relu_13 = paddle._C_ops.relu(add_8) + del add_8 + + # pd_op.conv2d: (1x128x100x-1xf32) <- (1x512x100x-1xf32, 128x512x1x1xf32) + conv2d_15 = paddle._C_ops.conv2d( + relu_13, parameter_257, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_257 + + # pd_op.batch_norm_: (1x128x100x-1xf32, 128xf32, 128xf32, 128xf32, 128xf32, -1xui8) <- (1x128x100x-1xf32, 128xf32, 128xf32, 128xf32, 128xf32) + ( + batch_norm__90, + batch_norm__91, + batch_norm__92, + batch_norm__93, + batch_norm__94, + batch_norm__95, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_15, + parameter_256, + parameter_255, + parameter_254, + parameter_253, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_15, parameter_253, parameter_254, parameter_255, parameter_256 + + # pd_op.relu: (1x128x100x-1xf32) <- (1x128x100x-1xf32) + relu_14 = paddle._C_ops.relu(batch_norm__90) + del batch_norm__90 + + # pd_op.conv2d: (1x128x100x-1xf32) <- (1x128x100x-1xf32, 128x128x3x3xf32) + conv2d_16 = paddle._C_ops.conv2d( + relu_14, parameter_252, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_252, relu_14 + + # pd_op.batch_norm_: (1x128x100x-1xf32, 128xf32, 128xf32, 128xf32, 128xf32, -1xui8) <- (1x128x100x-1xf32, 128xf32, 128xf32, 128xf32, 128xf32) + ( + batch_norm__96, + batch_norm__97, + batch_norm__98, + batch_norm__99, + batch_norm__100, + batch_norm__101, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_16, + parameter_251, + parameter_250, + parameter_249, + parameter_248, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_16, parameter_248, parameter_249, parameter_250, parameter_251 + + # pd_op.relu: (1x128x100x-1xf32) <- (1x128x100x-1xf32) + relu_15 = paddle._C_ops.relu(batch_norm__96) + del batch_norm__96 + + # pd_op.conv2d: (1x512x100x-1xf32) <- (1x128x100x-1xf32, 512x128x1x1xf32) + conv2d_17 = paddle._C_ops.conv2d( + relu_15, parameter_247, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_247, relu_15 + + # pd_op.batch_norm_: (1x512x100x-1xf32, 512xf32, 512xf32, 512xf32, 512xf32, -1xui8) <- (1x512x100x-1xf32, 512xf32, 512xf32, 512xf32, 512xf32) + ( + batch_norm__102, + batch_norm__103, + batch_norm__104, + batch_norm__105, + batch_norm__106, + batch_norm__107, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_17, + parameter_246, + parameter_245, + parameter_244, + parameter_243, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_17, parameter_243, parameter_244, parameter_245, parameter_246 + + # pd_op.add: (1x512x100x-1xf32) <- (1x512x100x-1xf32, 1x512x100x-1xf32) + add_9 = paddle._C_ops.add(batch_norm__102, relu_13) + del batch_norm__102, relu_13 + + # pd_op.relu: (1x512x100x-1xf32) <- (1x512x100x-1xf32) + relu_16 = paddle._C_ops.relu(add_9) + del add_9 + + # pd_op.conv2d: (1x128x100x-1xf32) <- (1x512x100x-1xf32, 128x512x1x1xf32) + conv2d_18 = paddle._C_ops.conv2d( + relu_16, parameter_242, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_242 + + # pd_op.batch_norm_: (1x128x100x-1xf32, 128xf32, 128xf32, 128xf32, 128xf32, -1xui8) <- (1x128x100x-1xf32, 128xf32, 128xf32, 128xf32, 128xf32) + ( + batch_norm__108, + batch_norm__109, + batch_norm__110, + batch_norm__111, + batch_norm__112, + batch_norm__113, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_18, + parameter_241, + parameter_240, + parameter_239, + parameter_238, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_18, parameter_238, parameter_239, parameter_240, parameter_241 + + # pd_op.relu: (1x128x100x-1xf32) <- (1x128x100x-1xf32) + relu_17 = paddle._C_ops.relu(batch_norm__108) + del batch_norm__108 + + # pd_op.conv2d: (1x128x100x-1xf32) <- (1x128x100x-1xf32, 128x128x3x3xf32) + conv2d_19 = paddle._C_ops.conv2d( + relu_17, parameter_237, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_237, relu_17 + + # pd_op.batch_norm_: (1x128x100x-1xf32, 128xf32, 128xf32, 128xf32, 128xf32, -1xui8) <- (1x128x100x-1xf32, 128xf32, 128xf32, 128xf32, 128xf32) + ( + batch_norm__114, + batch_norm__115, + batch_norm__116, + batch_norm__117, + batch_norm__118, + batch_norm__119, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_19, + parameter_236, + parameter_235, + parameter_234, + parameter_233, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_19, parameter_233, parameter_234, parameter_235, parameter_236 + + # pd_op.relu: (1x128x100x-1xf32) <- (1x128x100x-1xf32) + relu_18 = paddle._C_ops.relu(batch_norm__114) + del batch_norm__114 + + # pd_op.conv2d: (1x512x100x-1xf32) <- (1x128x100x-1xf32, 512x128x1x1xf32) + conv2d_20 = paddle._C_ops.conv2d( + relu_18, parameter_232, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_232, relu_18 + + # pd_op.batch_norm_: (1x512x100x-1xf32, 512xf32, 512xf32, 512xf32, 512xf32, -1xui8) <- (1x512x100x-1xf32, 512xf32, 512xf32, 512xf32, 512xf32) + ( + batch_norm__120, + batch_norm__121, + batch_norm__122, + batch_norm__123, + batch_norm__124, + batch_norm__125, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_20, + parameter_231, + parameter_230, + parameter_229, + parameter_228, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_20, parameter_228, parameter_229, parameter_230, parameter_231 + + # pd_op.add: (1x512x100x-1xf32) <- (1x512x100x-1xf32, 1x512x100x-1xf32) + add_10 = paddle._C_ops.add(batch_norm__120, relu_16) + del batch_norm__120, relu_16 + + # pd_op.relu: (1x512x100x-1xf32) <- (1x512x100x-1xf32) + relu_19 = paddle._C_ops.relu(add_10) + del add_10 + + # pd_op.conv2d: (1x128x100x-1xf32) <- (1x512x100x-1xf32, 128x512x1x1xf32) + conv2d_21 = paddle._C_ops.conv2d( + relu_19, parameter_227, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_227 + + # pd_op.batch_norm_: (1x128x100x-1xf32, 128xf32, 128xf32, 128xf32, 128xf32, -1xui8) <- (1x128x100x-1xf32, 128xf32, 128xf32, 128xf32, 128xf32) + ( + batch_norm__126, + batch_norm__127, + batch_norm__128, + batch_norm__129, + batch_norm__130, + batch_norm__131, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_21, + parameter_226, + parameter_225, + parameter_224, + parameter_223, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_21, parameter_223, parameter_224, parameter_225, parameter_226 + + # pd_op.relu: (1x128x100x-1xf32) <- (1x128x100x-1xf32) + relu_20 = paddle._C_ops.relu(batch_norm__126) + del batch_norm__126 + + # pd_op.conv2d: (1x128x100x-1xf32) <- (1x128x100x-1xf32, 128x128x3x3xf32) + conv2d_22 = paddle._C_ops.conv2d( + relu_20, parameter_222, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_222, relu_20 + + # pd_op.batch_norm_: (1x128x100x-1xf32, 128xf32, 128xf32, 128xf32, 128xf32, -1xui8) <- (1x128x100x-1xf32, 128xf32, 128xf32, 128xf32, 128xf32) + ( + batch_norm__132, + batch_norm__133, + batch_norm__134, + batch_norm__135, + batch_norm__136, + batch_norm__137, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_22, + parameter_221, + parameter_220, + parameter_219, + parameter_218, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_22, parameter_218, parameter_219, parameter_220, parameter_221 + + # pd_op.relu: (1x128x100x-1xf32) <- (1x128x100x-1xf32) + relu_21 = paddle._C_ops.relu(batch_norm__132) + del batch_norm__132 + + # pd_op.conv2d: (1x512x100x-1xf32) <- (1x128x100x-1xf32, 512x128x1x1xf32) + conv2d_23 = paddle._C_ops.conv2d( + relu_21, parameter_217, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_217, relu_21 + + # pd_op.batch_norm_: (1x512x100x-1xf32, 512xf32, 512xf32, 512xf32, 512xf32, -1xui8) <- (1x512x100x-1xf32, 512xf32, 512xf32, 512xf32, 512xf32) + ( + batch_norm__138, + batch_norm__139, + batch_norm__140, + batch_norm__141, + batch_norm__142, + batch_norm__143, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_23, + parameter_216, + parameter_215, + parameter_214, + parameter_213, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_23, parameter_213, parameter_214, parameter_215, parameter_216 + + # pd_op.add: (1x512x100x-1xf32) <- (1x512x100x-1xf32, 1x512x100x-1xf32) + add_11 = paddle._C_ops.add(batch_norm__138, relu_19) + del batch_norm__138, relu_19 + + # pd_op.relu: (1x512x100x-1xf32) <- (1x512x100x-1xf32) + relu_22 = paddle._C_ops.relu(add_11) + del add_11 + + # pd_op.conv2d: (1x256x100x-1xf32) <- (1x512x100x-1xf32, 256x512x1x1xf32) + conv2d_24 = paddle._C_ops.conv2d( + relu_22, parameter_212, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_212 + + # pd_op.batch_norm_: (1x256x100x-1xf32, 256xf32, 256xf32, 256xf32, 256xf32, -1xui8) <- (1x256x100x-1xf32, 256xf32, 256xf32, 256xf32, 256xf32) + ( + batch_norm__144, + batch_norm__145, + batch_norm__146, + batch_norm__147, + batch_norm__148, + batch_norm__149, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_24, + parameter_211, + parameter_210, + parameter_209, + parameter_208, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_24, parameter_208, parameter_209, parameter_210, parameter_211 + + # pd_op.relu: (1x256x100x-1xf32) <- (1x256x100x-1xf32) + relu_23 = paddle._C_ops.relu(batch_norm__144) + del batch_norm__144 + + # pd_op.conv2d: (1x256x50x-1xf32) <- (1x256x100x-1xf32, 256x256x3x3xf32) + conv2d_25 = paddle._C_ops.conv2d( + relu_23, parameter_207, [2, 2], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_207, relu_23 + + # pd_op.batch_norm_: (1x256x50x-1xf32, 256xf32, 256xf32, 256xf32, 256xf32, -1xui8) <- (1x256x50x-1xf32, 256xf32, 256xf32, 256xf32, 256xf32) + ( + batch_norm__150, + batch_norm__151, + batch_norm__152, + batch_norm__153, + batch_norm__154, + batch_norm__155, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_25, + parameter_206, + parameter_205, + parameter_204, + parameter_203, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_25, parameter_203, parameter_204, parameter_205, parameter_206 + + # pd_op.relu: (1x256x50x-1xf32) <- (1x256x50x-1xf32) + relu_24 = paddle._C_ops.relu(batch_norm__150) + del batch_norm__150 + + # pd_op.conv2d: (1x1024x50x-1xf32) <- (1x256x50x-1xf32, 1024x256x1x1xf32) + conv2d_26 = paddle._C_ops.conv2d( + relu_24, parameter_202, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_202, relu_24 + + # pd_op.batch_norm_: (1x1024x50x-1xf32, 1024xf32, 1024xf32, 1024xf32, 1024xf32, -1xui8) <- (1x1024x50x-1xf32, 1024xf32, 1024xf32, 1024xf32, 1024xf32) + ( + batch_norm__156, + batch_norm__157, + batch_norm__158, + batch_norm__159, + batch_norm__160, + batch_norm__161, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_26, + parameter_201, + parameter_200, + parameter_199, + parameter_198, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_26, parameter_198, parameter_199, parameter_200, parameter_201 + + # pd_op.conv2d: (1x1024x50x-1xf32) <- (1x512x100x-1xf32, 1024x512x1x1xf32) + conv2d_27 = paddle._C_ops.conv2d( + relu_22, parameter_197, [2, 2], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_197 + + # pd_op.batch_norm_: (1x1024x50x-1xf32, 1024xf32, 1024xf32, 1024xf32, 1024xf32, -1xui8) <- (1x1024x50x-1xf32, 1024xf32, 1024xf32, 1024xf32, 1024xf32) + ( + batch_norm__162, + batch_norm__163, + batch_norm__164, + batch_norm__165, + batch_norm__166, + batch_norm__167, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_27, + parameter_196, + parameter_195, + parameter_194, + parameter_193, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_27, parameter_193, parameter_194, parameter_195, parameter_196 + + # pd_op.add: (1x1024x50x-1xf32) <- (1x1024x50x-1xf32, 1x1024x50x-1xf32) + add_12 = paddle._C_ops.add(batch_norm__156, batch_norm__162) + del batch_norm__156, batch_norm__162 + + # pd_op.relu: (1x1024x50x-1xf32) <- (1x1024x50x-1xf32) + relu_25 = paddle._C_ops.relu(add_12) + del add_12 + + # pd_op.conv2d: (1x256x50x-1xf32) <- (1x1024x50x-1xf32, 256x1024x1x1xf32) + conv2d_28 = paddle._C_ops.conv2d( + relu_25, parameter_192, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_192 + + # pd_op.batch_norm_: (1x256x50x-1xf32, 256xf32, 256xf32, 256xf32, 256xf32, -1xui8) <- (1x256x50x-1xf32, 256xf32, 256xf32, 256xf32, 256xf32) + ( + batch_norm__168, + batch_norm__169, + batch_norm__170, + batch_norm__171, + batch_norm__172, + batch_norm__173, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_28, + parameter_191, + parameter_190, + parameter_189, + parameter_188, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_28, parameter_188, parameter_189, parameter_190, parameter_191 + + # pd_op.relu: (1x256x50x-1xf32) <- (1x256x50x-1xf32) + relu_26 = paddle._C_ops.relu(batch_norm__168) + del batch_norm__168 + + # pd_op.conv2d: (1x256x50x-1xf32) <- (1x256x50x-1xf32, 256x256x3x3xf32) + conv2d_29 = paddle._C_ops.conv2d( + relu_26, parameter_187, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_187, relu_26 + + # pd_op.batch_norm_: (1x256x50x-1xf32, 256xf32, 256xf32, 256xf32, 256xf32, -1xui8) <- (1x256x50x-1xf32, 256xf32, 256xf32, 256xf32, 256xf32) + ( + batch_norm__174, + batch_norm__175, + batch_norm__176, + batch_norm__177, + batch_norm__178, + batch_norm__179, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_29, + parameter_186, + parameter_185, + parameter_184, + parameter_183, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_29, parameter_183, parameter_184, parameter_185, parameter_186 + + # pd_op.relu: (1x256x50x-1xf32) <- (1x256x50x-1xf32) + relu_27 = paddle._C_ops.relu(batch_norm__174) + del batch_norm__174 + + # pd_op.conv2d: (1x1024x50x-1xf32) <- (1x256x50x-1xf32, 1024x256x1x1xf32) + conv2d_30 = paddle._C_ops.conv2d( + relu_27, parameter_182, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_182, relu_27 + + # pd_op.batch_norm_: (1x1024x50x-1xf32, 1024xf32, 1024xf32, 1024xf32, 1024xf32, -1xui8) <- (1x1024x50x-1xf32, 1024xf32, 1024xf32, 1024xf32, 1024xf32) + ( + batch_norm__180, + batch_norm__181, + batch_norm__182, + batch_norm__183, + batch_norm__184, + batch_norm__185, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_30, + parameter_181, + parameter_180, + parameter_179, + parameter_178, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_30, parameter_178, parameter_179, parameter_180, parameter_181 + + # pd_op.add: (1x1024x50x-1xf32) <- (1x1024x50x-1xf32, 1x1024x50x-1xf32) + add_13 = paddle._C_ops.add(batch_norm__180, relu_25) + del batch_norm__180, relu_25 + + # pd_op.relu: (1x1024x50x-1xf32) <- (1x1024x50x-1xf32) + relu_28 = paddle._C_ops.relu(add_13) + del add_13 + + # pd_op.conv2d: (1x256x50x-1xf32) <- (1x1024x50x-1xf32, 256x1024x1x1xf32) + conv2d_31 = paddle._C_ops.conv2d( + relu_28, parameter_177, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_177 + + # pd_op.batch_norm_: (1x256x50x-1xf32, 256xf32, 256xf32, 256xf32, 256xf32, -1xui8) <- (1x256x50x-1xf32, 256xf32, 256xf32, 256xf32, 256xf32) + ( + batch_norm__186, + batch_norm__187, + batch_norm__188, + batch_norm__189, + batch_norm__190, + batch_norm__191, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_31, + parameter_176, + parameter_175, + parameter_174, + parameter_173, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_31, parameter_173, parameter_174, parameter_175, parameter_176 + + # pd_op.relu: (1x256x50x-1xf32) <- (1x256x50x-1xf32) + relu_29 = paddle._C_ops.relu(batch_norm__186) + del batch_norm__186 + + # pd_op.conv2d: (1x256x50x-1xf32) <- (1x256x50x-1xf32, 256x256x3x3xf32) + conv2d_32 = paddle._C_ops.conv2d( + relu_29, parameter_172, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_172, relu_29 + + # pd_op.batch_norm_: (1x256x50x-1xf32, 256xf32, 256xf32, 256xf32, 256xf32, -1xui8) <- (1x256x50x-1xf32, 256xf32, 256xf32, 256xf32, 256xf32) + ( + batch_norm__192, + batch_norm__193, + batch_norm__194, + batch_norm__195, + batch_norm__196, + batch_norm__197, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_32, + parameter_171, + parameter_170, + parameter_169, + parameter_168, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_32, parameter_168, parameter_169, parameter_170, parameter_171 + + # pd_op.relu: (1x256x50x-1xf32) <- (1x256x50x-1xf32) + relu_30 = paddle._C_ops.relu(batch_norm__192) + del batch_norm__192 + + # pd_op.conv2d: (1x1024x50x-1xf32) <- (1x256x50x-1xf32, 1024x256x1x1xf32) + conv2d_33 = paddle._C_ops.conv2d( + relu_30, parameter_167, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_167, relu_30 + + # pd_op.batch_norm_: (1x1024x50x-1xf32, 1024xf32, 1024xf32, 1024xf32, 1024xf32, -1xui8) <- (1x1024x50x-1xf32, 1024xf32, 1024xf32, 1024xf32, 1024xf32) + ( + batch_norm__198, + batch_norm__199, + batch_norm__200, + batch_norm__201, + batch_norm__202, + batch_norm__203, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_33, + parameter_166, + parameter_165, + parameter_164, + parameter_163, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_33, parameter_163, parameter_164, parameter_165, parameter_166 + + # pd_op.add: (1x1024x50x-1xf32) <- (1x1024x50x-1xf32, 1x1024x50x-1xf32) + add_14 = paddle._C_ops.add(batch_norm__198, relu_28) + del batch_norm__198, relu_28 + + # pd_op.relu: (1x1024x50x-1xf32) <- (1x1024x50x-1xf32) + relu_31 = paddle._C_ops.relu(add_14) + del add_14 + + # pd_op.conv2d: (1x256x50x-1xf32) <- (1x1024x50x-1xf32, 256x1024x1x1xf32) + conv2d_34 = paddle._C_ops.conv2d( + relu_31, parameter_162, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_162 + + # pd_op.batch_norm_: (1x256x50x-1xf32, 256xf32, 256xf32, 256xf32, 256xf32, -1xui8) <- (1x256x50x-1xf32, 256xf32, 256xf32, 256xf32, 256xf32) + ( + batch_norm__204, + batch_norm__205, + batch_norm__206, + batch_norm__207, + batch_norm__208, + batch_norm__209, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_34, + parameter_161, + parameter_160, + parameter_159, + parameter_158, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_34, parameter_158, parameter_159, parameter_160, parameter_161 + + # pd_op.relu: (1x256x50x-1xf32) <- (1x256x50x-1xf32) + relu_32 = paddle._C_ops.relu(batch_norm__204) + del batch_norm__204 + + # pd_op.conv2d: (1x256x50x-1xf32) <- (1x256x50x-1xf32, 256x256x3x3xf32) + conv2d_35 = paddle._C_ops.conv2d( + relu_32, parameter_157, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_157, relu_32 + + # pd_op.batch_norm_: (1x256x50x-1xf32, 256xf32, 256xf32, 256xf32, 256xf32, -1xui8) <- (1x256x50x-1xf32, 256xf32, 256xf32, 256xf32, 256xf32) + ( + batch_norm__210, + batch_norm__211, + batch_norm__212, + batch_norm__213, + batch_norm__214, + batch_norm__215, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_35, + parameter_156, + parameter_155, + parameter_154, + parameter_153, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_35, parameter_153, parameter_154, parameter_155, parameter_156 + + # pd_op.relu: (1x256x50x-1xf32) <- (1x256x50x-1xf32) + relu_33 = paddle._C_ops.relu(batch_norm__210) + del batch_norm__210 + + # pd_op.conv2d: (1x1024x50x-1xf32) <- (1x256x50x-1xf32, 1024x256x1x1xf32) + conv2d_36 = paddle._C_ops.conv2d( + relu_33, parameter_152, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_152, relu_33 + + # pd_op.batch_norm_: (1x1024x50x-1xf32, 1024xf32, 1024xf32, 1024xf32, 1024xf32, -1xui8) <- (1x1024x50x-1xf32, 1024xf32, 1024xf32, 1024xf32, 1024xf32) + ( + batch_norm__216, + batch_norm__217, + batch_norm__218, + batch_norm__219, + batch_norm__220, + batch_norm__221, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_36, + parameter_151, + parameter_150, + parameter_149, + parameter_148, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_36, parameter_148, parameter_149, parameter_150, parameter_151 + + # pd_op.add: (1x1024x50x-1xf32) <- (1x1024x50x-1xf32, 1x1024x50x-1xf32) + add_15 = paddle._C_ops.add(batch_norm__216, relu_31) + del batch_norm__216, relu_31 + + # pd_op.relu: (1x1024x50x-1xf32) <- (1x1024x50x-1xf32) + relu_34 = paddle._C_ops.relu(add_15) + del add_15 + + # pd_op.conv2d: (1x256x50x-1xf32) <- (1x1024x50x-1xf32, 256x1024x1x1xf32) + conv2d_37 = paddle._C_ops.conv2d( + relu_34, parameter_147, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_147 + + # pd_op.batch_norm_: (1x256x50x-1xf32, 256xf32, 256xf32, 256xf32, 256xf32, -1xui8) <- (1x256x50x-1xf32, 256xf32, 256xf32, 256xf32, 256xf32) + ( + batch_norm__222, + batch_norm__223, + batch_norm__224, + batch_norm__225, + batch_norm__226, + batch_norm__227, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_37, + parameter_146, + parameter_145, + parameter_144, + parameter_143, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_37, parameter_143, parameter_144, parameter_145, parameter_146 + + # pd_op.relu: (1x256x50x-1xf32) <- (1x256x50x-1xf32) + relu_35 = paddle._C_ops.relu(batch_norm__222) + del batch_norm__222 + + # pd_op.conv2d: (1x256x50x-1xf32) <- (1x256x50x-1xf32, 256x256x3x3xf32) + conv2d_38 = paddle._C_ops.conv2d( + relu_35, parameter_142, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_142, relu_35 + + # pd_op.batch_norm_: (1x256x50x-1xf32, 256xf32, 256xf32, 256xf32, 256xf32, -1xui8) <- (1x256x50x-1xf32, 256xf32, 256xf32, 256xf32, 256xf32) + ( + batch_norm__228, + batch_norm__229, + batch_norm__230, + batch_norm__231, + batch_norm__232, + batch_norm__233, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_38, + parameter_141, + parameter_140, + parameter_139, + parameter_138, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_38, parameter_138, parameter_139, parameter_140, parameter_141 + + # pd_op.relu: (1x256x50x-1xf32) <- (1x256x50x-1xf32) + relu_36 = paddle._C_ops.relu(batch_norm__228) + del batch_norm__228 + + # pd_op.conv2d: (1x1024x50x-1xf32) <- (1x256x50x-1xf32, 1024x256x1x1xf32) + conv2d_39 = paddle._C_ops.conv2d( + relu_36, parameter_137, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_137, relu_36 + + # pd_op.batch_norm_: (1x1024x50x-1xf32, 1024xf32, 1024xf32, 1024xf32, 1024xf32, -1xui8) <- (1x1024x50x-1xf32, 1024xf32, 1024xf32, 1024xf32, 1024xf32) + ( + batch_norm__234, + batch_norm__235, + batch_norm__236, + batch_norm__237, + batch_norm__238, + batch_norm__239, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_39, + parameter_136, + parameter_135, + parameter_134, + parameter_133, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_39, parameter_133, parameter_134, parameter_135, parameter_136 + + # pd_op.add: (1x1024x50x-1xf32) <- (1x1024x50x-1xf32, 1x1024x50x-1xf32) + add_16 = paddle._C_ops.add(batch_norm__234, relu_34) + del batch_norm__234, relu_34 + + # pd_op.relu: (1x1024x50x-1xf32) <- (1x1024x50x-1xf32) + relu_37 = paddle._C_ops.relu(add_16) + del add_16 + + # pd_op.conv2d: (1x256x50x-1xf32) <- (1x1024x50x-1xf32, 256x1024x1x1xf32) + conv2d_40 = paddle._C_ops.conv2d( + relu_37, parameter_132, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_132 + + # pd_op.batch_norm_: (1x256x50x-1xf32, 256xf32, 256xf32, 256xf32, 256xf32, -1xui8) <- (1x256x50x-1xf32, 256xf32, 256xf32, 256xf32, 256xf32) + ( + batch_norm__240, + batch_norm__241, + batch_norm__242, + batch_norm__243, + batch_norm__244, + batch_norm__245, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_40, + parameter_131, + parameter_130, + parameter_129, + parameter_128, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_40, parameter_128, parameter_129, parameter_130, parameter_131 + + # pd_op.relu: (1x256x50x-1xf32) <- (1x256x50x-1xf32) + relu_38 = paddle._C_ops.relu(batch_norm__240) + del batch_norm__240 + + # pd_op.conv2d: (1x256x50x-1xf32) <- (1x256x50x-1xf32, 256x256x3x3xf32) + conv2d_41 = paddle._C_ops.conv2d( + relu_38, parameter_127, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_127, relu_38 + + # pd_op.batch_norm_: (1x256x50x-1xf32, 256xf32, 256xf32, 256xf32, 256xf32, -1xui8) <- (1x256x50x-1xf32, 256xf32, 256xf32, 256xf32, 256xf32) + ( + batch_norm__246, + batch_norm__247, + batch_norm__248, + batch_norm__249, + batch_norm__250, + batch_norm__251, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_41, + parameter_126, + parameter_125, + parameter_124, + parameter_123, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_41, parameter_123, parameter_124, parameter_125, parameter_126 + + # pd_op.relu: (1x256x50x-1xf32) <- (1x256x50x-1xf32) + relu_39 = paddle._C_ops.relu(batch_norm__246) + del batch_norm__246 + + # pd_op.conv2d: (1x1024x50x-1xf32) <- (1x256x50x-1xf32, 1024x256x1x1xf32) + conv2d_42 = paddle._C_ops.conv2d( + relu_39, parameter_122, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_122, relu_39 + + # pd_op.batch_norm_: (1x1024x50x-1xf32, 1024xf32, 1024xf32, 1024xf32, 1024xf32, -1xui8) <- (1x1024x50x-1xf32, 1024xf32, 1024xf32, 1024xf32, 1024xf32) + ( + batch_norm__252, + batch_norm__253, + batch_norm__254, + batch_norm__255, + batch_norm__256, + batch_norm__257, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_42, + parameter_121, + parameter_120, + parameter_119, + parameter_118, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_42, parameter_118, parameter_119, parameter_120, parameter_121 + + # pd_op.add: (1x1024x50x-1xf32) <- (1x1024x50x-1xf32, 1x1024x50x-1xf32) + add_17 = paddle._C_ops.add(batch_norm__252, relu_37) + del batch_norm__252, relu_37 + + # pd_op.relu: (1x1024x50x-1xf32) <- (1x1024x50x-1xf32) + relu_40 = paddle._C_ops.relu(add_17) + del add_17 + + # pd_op.conv2d: (1x512x50x-1xf32) <- (1x1024x50x-1xf32, 512x1024x1x1xf32) + conv2d_43 = paddle._C_ops.conv2d( + relu_40, parameter_117, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_117 + + # pd_op.batch_norm_: (1x512x50x-1xf32, 512xf32, 512xf32, 512xf32, 512xf32, -1xui8) <- (1x512x50x-1xf32, 512xf32, 512xf32, 512xf32, 512xf32) + ( + batch_norm__258, + batch_norm__259, + batch_norm__260, + batch_norm__261, + batch_norm__262, + batch_norm__263, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_43, + parameter_116, + parameter_115, + parameter_114, + parameter_113, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_43, parameter_113, parameter_114, parameter_115, parameter_116 + + # pd_op.relu: (1x512x50x-1xf32) <- (1x512x50x-1xf32) + relu_41 = paddle._C_ops.relu(batch_norm__258) + del batch_norm__258 + + # pd_op.conv2d: (1x512x25x-1xf32) <- (1x512x50x-1xf32, 512x512x3x3xf32) + conv2d_44 = paddle._C_ops.conv2d( + relu_41, parameter_112, [2, 2], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_112, relu_41 + + # pd_op.batch_norm_: (1x512x25x-1xf32, 512xf32, 512xf32, 512xf32, 512xf32, -1xui8) <- (1x512x25x-1xf32, 512xf32, 512xf32, 512xf32, 512xf32) + ( + batch_norm__264, + batch_norm__265, + batch_norm__266, + batch_norm__267, + batch_norm__268, + batch_norm__269, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_44, + parameter_111, + parameter_110, + parameter_109, + parameter_108, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_44, parameter_108, parameter_109, parameter_110, parameter_111 + + # pd_op.relu: (1x512x25x-1xf32) <- (1x512x25x-1xf32) + relu_42 = paddle._C_ops.relu(batch_norm__264) + del batch_norm__264 + + # pd_op.conv2d: (1x2048x25x-1xf32) <- (1x512x25x-1xf32, 2048x512x1x1xf32) + conv2d_45 = paddle._C_ops.conv2d( + relu_42, parameter_107, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_107, relu_42 + + # pd_op.batch_norm_: (1x2048x25x-1xf32, 2048xf32, 2048xf32, 2048xf32, 2048xf32, -1xui8) <- (1x2048x25x-1xf32, 2048xf32, 2048xf32, 2048xf32, 2048xf32) + ( + batch_norm__270, + batch_norm__271, + batch_norm__272, + batch_norm__273, + batch_norm__274, + batch_norm__275, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_45, + parameter_106, + parameter_105, + parameter_104, + parameter_103, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_45, parameter_103, parameter_104, parameter_105, parameter_106 + + # pd_op.conv2d: (1x2048x25x-1xf32) <- (1x1024x50x-1xf32, 2048x1024x1x1xf32) + conv2d_46 = paddle._C_ops.conv2d( + relu_40, parameter_102, [2, 2], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_102 + + # pd_op.batch_norm_: (1x2048x25x-1xf32, 2048xf32, 2048xf32, 2048xf32, 2048xf32, -1xui8) <- (1x2048x25x-1xf32, 2048xf32, 2048xf32, 2048xf32, 2048xf32) + ( + batch_norm__276, + batch_norm__277, + batch_norm__278, + batch_norm__279, + batch_norm__280, + batch_norm__281, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_46, + parameter_101, + parameter_100, + parameter_99, + parameter_98, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_46, parameter_100, parameter_101, parameter_98, parameter_99 + + # pd_op.add: (1x2048x25x-1xf32) <- (1x2048x25x-1xf32, 1x2048x25x-1xf32) + add_18 = paddle._C_ops.add(batch_norm__270, batch_norm__276) + del batch_norm__270, batch_norm__276 + + # pd_op.relu: (1x2048x25x-1xf32) <- (1x2048x25x-1xf32) + relu_43 = paddle._C_ops.relu(add_18) + del add_18 + + # pd_op.conv2d: (1x512x25x-1xf32) <- (1x2048x25x-1xf32, 512x2048x1x1xf32) + conv2d_47 = paddle._C_ops.conv2d( + relu_43, parameter_97, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_97 + + # pd_op.batch_norm_: (1x512x25x-1xf32, 512xf32, 512xf32, 512xf32, 512xf32, -1xui8) <- (1x512x25x-1xf32, 512xf32, 512xf32, 512xf32, 512xf32) + ( + batch_norm__282, + batch_norm__283, + batch_norm__284, + batch_norm__285, + batch_norm__286, + batch_norm__287, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_47, + parameter_96, + parameter_95, + parameter_94, + parameter_93, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_47, parameter_93, parameter_94, parameter_95, parameter_96 + + # pd_op.relu: (1x512x25x-1xf32) <- (1x512x25x-1xf32) + relu_44 = paddle._C_ops.relu(batch_norm__282) + del batch_norm__282 + + # pd_op.conv2d: (1x512x25x-1xf32) <- (1x512x25x-1xf32, 512x512x3x3xf32) + conv2d_48 = paddle._C_ops.conv2d( + relu_44, parameter_92, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_92, relu_44 + + # pd_op.batch_norm_: (1x512x25x-1xf32, 512xf32, 512xf32, 512xf32, 512xf32, -1xui8) <- (1x512x25x-1xf32, 512xf32, 512xf32, 512xf32, 512xf32) + ( + batch_norm__288, + batch_norm__289, + batch_norm__290, + batch_norm__291, + batch_norm__292, + batch_norm__293, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_48, + parameter_91, + parameter_90, + parameter_89, + parameter_88, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_48, parameter_88, parameter_89, parameter_90, parameter_91 + + # pd_op.relu: (1x512x25x-1xf32) <- (1x512x25x-1xf32) + relu_45 = paddle._C_ops.relu(batch_norm__288) + del batch_norm__288 + + # pd_op.conv2d: (1x2048x25x-1xf32) <- (1x512x25x-1xf32, 2048x512x1x1xf32) + conv2d_49 = paddle._C_ops.conv2d( + relu_45, parameter_87, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_87, relu_45 + + # pd_op.batch_norm_: (1x2048x25x-1xf32, 2048xf32, 2048xf32, 2048xf32, 2048xf32, -1xui8) <- (1x2048x25x-1xf32, 2048xf32, 2048xf32, 2048xf32, 2048xf32) + ( + batch_norm__294, + batch_norm__295, + batch_norm__296, + batch_norm__297, + batch_norm__298, + batch_norm__299, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_49, + parameter_86, + parameter_85, + parameter_84, + parameter_83, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_49, parameter_83, parameter_84, parameter_85, parameter_86 + + # pd_op.add: (1x2048x25x-1xf32) <- (1x2048x25x-1xf32, 1x2048x25x-1xf32) + add_19 = paddle._C_ops.add(batch_norm__294, relu_43) + del batch_norm__294, relu_43 + + # pd_op.relu: (1x2048x25x-1xf32) <- (1x2048x25x-1xf32) + relu_46 = paddle._C_ops.relu(add_19) + del add_19 + + # pd_op.conv2d: (1x512x25x-1xf32) <- (1x2048x25x-1xf32, 512x2048x1x1xf32) + conv2d_50 = paddle._C_ops.conv2d( + relu_46, parameter_82, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_82 + + # pd_op.batch_norm_: (1x512x25x-1xf32, 512xf32, 512xf32, 512xf32, 512xf32, -1xui8) <- (1x512x25x-1xf32, 512xf32, 512xf32, 512xf32, 512xf32) + ( + batch_norm__300, + batch_norm__301, + batch_norm__302, + batch_norm__303, + batch_norm__304, + batch_norm__305, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_50, + parameter_81, + parameter_80, + parameter_79, + parameter_78, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_50, parameter_78, parameter_79, parameter_80, parameter_81 + + # pd_op.relu: (1x512x25x-1xf32) <- (1x512x25x-1xf32) + relu_47 = paddle._C_ops.relu(batch_norm__300) + del batch_norm__300 + + # pd_op.conv2d: (1x512x25x-1xf32) <- (1x512x25x-1xf32, 512x512x3x3xf32) + conv2d_51 = paddle._C_ops.conv2d( + relu_47, parameter_77, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_77, relu_47 + + # pd_op.batch_norm_: (1x512x25x-1xf32, 512xf32, 512xf32, 512xf32, 512xf32, -1xui8) <- (1x512x25x-1xf32, 512xf32, 512xf32, 512xf32, 512xf32) + ( + batch_norm__306, + batch_norm__307, + batch_norm__308, + batch_norm__309, + batch_norm__310, + batch_norm__311, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_51, + parameter_76, + parameter_75, + parameter_74, + parameter_73, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_51, parameter_73, parameter_74, parameter_75, parameter_76 + + # pd_op.relu: (1x512x25x-1xf32) <- (1x512x25x-1xf32) + relu_48 = paddle._C_ops.relu(batch_norm__306) + del batch_norm__306 + + # pd_op.conv2d: (1x2048x25x-1xf32) <- (1x512x25x-1xf32, 2048x512x1x1xf32) + conv2d_52 = paddle._C_ops.conv2d( + relu_48, parameter_72, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_72, relu_48 + + # pd_op.batch_norm_: (1x2048x25x-1xf32, 2048xf32, 2048xf32, 2048xf32, 2048xf32, -1xui8) <- (1x2048x25x-1xf32, 2048xf32, 2048xf32, 2048xf32, 2048xf32) + ( + batch_norm__312, + batch_norm__313, + batch_norm__314, + batch_norm__315, + batch_norm__316, + batch_norm__317, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_52, + parameter_71, + parameter_70, + parameter_69, + parameter_68, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_52, parameter_68, parameter_69, parameter_70, parameter_71 + + # pd_op.add: (1x2048x25x-1xf32) <- (1x2048x25x-1xf32, 1x2048x25x-1xf32) + add_20 = paddle._C_ops.add(batch_norm__312, relu_46) + del batch_norm__312, relu_46 + + # pd_op.relu: (1x2048x25x-1xf32) <- (1x2048x25x-1xf32) + relu_49 = paddle._C_ops.relu(add_20) + del add_20 + + # pd_op.conv2d: (1x256x200x-1xf32) <- (1x256x200x-1xf32, 256x256x1x1xf32) + conv2d_53 = paddle._C_ops.conv2d( + relu_10, parameter_67, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_67, relu_10 + + # pd_op.full_int_array: (4xi64) <- () + full_int_array_1 = [1, -1, 1, 1] + + # pd_op.reshape: (1x256x1x1xf32) <- (256xf32, 4xi64) + reshape_0 = paddle._C_ops.reshape(parameter_66, full_int_array_1) + del parameter_66 + + # pd_op.add: (1x256x200x-1xf32) <- (1x256x200x-1xf32, 1x256x1x1xf32) + add_21 = paddle._C_ops.add(conv2d_53, reshape_0) + del conv2d_53, reshape_0 + + # pd_op.conv2d: (1x256x100x-1xf32) <- (1x512x100x-1xf32, 256x512x1x1xf32) + conv2d_54 = paddle._C_ops.conv2d( + relu_22, parameter_65, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_65, relu_22 + + # pd_op.reshape: (1x256x1x1xf32) <- (256xf32, 4xi64) + reshape_1 = paddle._C_ops.reshape(parameter_64, full_int_array_1) + del parameter_64 + + # pd_op.add: (1x256x100x-1xf32) <- (1x256x100x-1xf32, 1x256x1x1xf32) + add_22 = paddle._C_ops.add(conv2d_54, reshape_1) + del conv2d_54, reshape_1 + + # pd_op.conv2d: (1x256x50x-1xf32) <- (1x1024x50x-1xf32, 256x1024x1x1xf32) + conv2d_55 = paddle._C_ops.conv2d( + relu_40, parameter_63, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_63, relu_40 + + # pd_op.reshape: (1x256x1x1xf32) <- (256xf32, 4xi64) + reshape_2 = paddle._C_ops.reshape(parameter_62, full_int_array_1) + del parameter_62 + + # pd_op.add: (1x256x50x-1xf32) <- (1x256x50x-1xf32, 1x256x1x1xf32) + add_23 = paddle._C_ops.add(conv2d_55, reshape_2) + del conv2d_55, reshape_2 + + # pd_op.conv2d: (1x256x25x-1xf32) <- (1x2048x25x-1xf32, 256x2048x1x1xf32) + conv2d_56 = paddle._C_ops.conv2d( + relu_49, parameter_61, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_61, relu_49 + + # pd_op.reshape: (1x256x1x1xf32) <- (256xf32, 4xi64) + reshape_3 = paddle._C_ops.reshape(parameter_60, full_int_array_1) + del parameter_60 + + # pd_op.add: (1x256x25x-1xf32) <- (1x256x25x-1xf32, 1x256x1x1xf32) + add_24 = paddle._C_ops.add(conv2d_56, reshape_3) + del conv2d_56, reshape_3 + + # pd_op.nearest_interp: (1x256x50x-1xf32) <- (1x256x25x-1xf32, None, None, None) + nearest_interp_0 = paddle._C_ops.nearest_interp( + add_24, + None, + None, + None, + "NCHW", + -1, + -1, + -1, + [float("2"), float("2")], + "nearest", + False, + 0, + ) + + # pd_op.add: (1x256x50x-1xf32) <- (1x256x50x-1xf32, 1x256x50x-1xf32) + add_25 = paddle._C_ops.add(add_23, nearest_interp_0) + del add_23, nearest_interp_0 + + # pd_op.nearest_interp: (1x256x100x-1xf32) <- (1x256x50x-1xf32, None, None, None) + nearest_interp_1 = paddle._C_ops.nearest_interp( + add_25, + None, + None, + None, + "NCHW", + -1, + -1, + -1, + [float("2"), float("2")], + "nearest", + False, + 0, + ) + + # pd_op.add: (1x256x100x-1xf32) <- (1x256x100x-1xf32, 1x256x100x-1xf32) + add_26 = paddle._C_ops.add(add_22, nearest_interp_1) + del add_22, nearest_interp_1 + + # pd_op.nearest_interp: (1x256x200x-1xf32) <- (1x256x100x-1xf32, None, None, None) + nearest_interp_2 = paddle._C_ops.nearest_interp( + add_26, + None, + None, + None, + "NCHW", + -1, + -1, + -1, + [float("2"), float("2")], + "nearest", + False, + 0, + ) + + # pd_op.add: (1x256x200x-1xf32) <- (1x256x200x-1xf32, 1x256x200x-1xf32) + add_27 = paddle._C_ops.add(add_21, nearest_interp_2) + del add_21, nearest_interp_2 + + # pd_op.conv2d: (1x256x200x-1xf32) <- (1x256x200x-1xf32, 256x256x3x3xf32) + conv2d_57 = paddle._C_ops.conv2d( + add_27, parameter_59, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del add_27, parameter_59 + + # pd_op.reshape: (1x256x1x1xf32) <- (256xf32, 4xi64) + reshape_4 = paddle._C_ops.reshape(parameter_58, full_int_array_1) + del parameter_58 + + # pd_op.add: (1x256x200x-1xf32) <- (1x256x200x-1xf32, 1x256x1x1xf32) + add_28 = paddle._C_ops.add(conv2d_57, reshape_4) + del conv2d_57, reshape_4 + + # pd_op.conv2d: (1x256x100x-1xf32) <- (1x256x100x-1xf32, 256x256x3x3xf32) + conv2d_58 = paddle._C_ops.conv2d( + add_26, parameter_57, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del add_26, parameter_57 + + # pd_op.reshape: (1x256x1x1xf32) <- (256xf32, 4xi64) + reshape_5 = paddle._C_ops.reshape(parameter_56, full_int_array_1) + del parameter_56 + + # pd_op.add: (1x256x100x-1xf32) <- (1x256x100x-1xf32, 1x256x1x1xf32) + add_29 = paddle._C_ops.add(conv2d_58, reshape_5) + del conv2d_58, reshape_5 + + # pd_op.conv2d: (1x256x50x-1xf32) <- (1x256x50x-1xf32, 256x256x3x3xf32) + conv2d_59 = paddle._C_ops.conv2d( + add_25, parameter_55, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del add_25, parameter_55 + + # pd_op.reshape: (1x256x1x1xf32) <- (256xf32, 4xi64) + reshape_6 = paddle._C_ops.reshape(parameter_54, full_int_array_1) + del parameter_54 + + # pd_op.add: (1x256x50x-1xf32) <- (1x256x50x-1xf32, 1x256x1x1xf32) + add_30 = paddle._C_ops.add(conv2d_59, reshape_6) + del conv2d_59, reshape_6 + + # pd_op.conv2d: (1x256x25x-1xf32) <- (1x256x25x-1xf32, 256x256x3x3xf32) + conv2d_60 = paddle._C_ops.conv2d( + add_24, parameter_53, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del add_24, parameter_53 + + # pd_op.reshape: (1x256x1x1xf32) <- (256xf32, 4xi64) + reshape_7 = paddle._C_ops.reshape(parameter_52, full_int_array_1) + del parameter_52 + + # pd_op.add: (1x256x25x-1xf32) <- (1x256x25x-1xf32, 1x256x1x1xf32) + add_31 = paddle._C_ops.add(conv2d_60, reshape_7) + del conv2d_60, reshape_7 + + # pd_op.full_int_array: (2xi64) <- () + full_int_array_2 = [1, 1] + + # pd_op.pool2d: (1x256x13x-1xf32) <- (1x256x25x-1xf32, 2xi64) + pool2d_1 = paddle._C_ops.pool2d( + add_31, + full_int_array_2, + [2, 2], + [0, 0], + False, + True, + "NCHW", + "max", + False, + False, + "EXPLICIT", + ) + del full_int_array_2 + + # pd_op.conv2d: (1x128x200x-1xf32) <- (1x256x200x-1xf32, 128x256x3x3xf32) + conv2d_61 = paddle._C_ops.conv2d( + add_28, parameter_51, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_51 + + # pd_op.group_norm: (1x128x200x-1xf32, 1x32xf32, 1x32xf32) <- (1x128x200x-1xf32, 128xf32, 128xf32) + group_norm_0, group_norm_1, group_norm_2 = (lambda x, f: f(x))( + paddle._C_ops.group_norm( + conv2d_61, parameter_50, parameter_49, float("1e-05"), 32, "NCHW" + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None, None), + ) + del conv2d_61, parameter_49, parameter_50 + + # pd_op.relu: (1x128x200x-1xf32) <- (1x128x200x-1xf32) + relu_50 = paddle._C_ops.relu(group_norm_0) + del group_norm_0 + + # pd_op.conv2d: (1x128x100x-1xf32) <- (1x256x100x-1xf32, 128x256x3x3xf32) + conv2d_62 = paddle._C_ops.conv2d( + add_29, parameter_48, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_48 + + # pd_op.group_norm: (1x128x100x-1xf32, 1x32xf32, 1x32xf32) <- (1x128x100x-1xf32, 128xf32, 128xf32) + group_norm_3, group_norm_4, group_norm_5 = (lambda x, f: f(x))( + paddle._C_ops.group_norm( + conv2d_62, parameter_47, parameter_46, float("1e-05"), 32, "NCHW" + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None, None), + ) + del conv2d_62, parameter_46, parameter_47 + + # pd_op.relu: (1x128x100x-1xf32) <- (1x128x100x-1xf32) + relu_51 = paddle._C_ops.relu(group_norm_3) + del group_norm_3 + + # pd_op.bilinear_interp: (1x128x200x-1xf32) <- (1x128x100x-1xf32, None, None, None) + bilinear_interp_0 = paddle._C_ops.bilinear_interp( + relu_51, + None, + None, + None, + "NCHW", + -1, + -1, + -1, + [float("2"), float("2")], + "bilinear", + False, + 0, + ) + del relu_51 + + # pd_op.add: (1x128x200x-1xf32) <- (1x128x200x-1xf32, 1x128x200x-1xf32) + add_32 = paddle._C_ops.add(relu_50, bilinear_interp_0) + del bilinear_interp_0, relu_50 + + # pd_op.conv2d: (1x128x50x-1xf32) <- (1x256x50x-1xf32, 128x256x3x3xf32) + conv2d_63 = paddle._C_ops.conv2d( + add_30, parameter_45, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_45 + + # pd_op.group_norm: (1x128x50x-1xf32, 1x32xf32, 1x32xf32) <- (1x128x50x-1xf32, 128xf32, 128xf32) + group_norm_6, group_norm_7, group_norm_8 = (lambda x, f: f(x))( + paddle._C_ops.group_norm( + conv2d_63, parameter_44, parameter_43, float("1e-05"), 32, "NCHW" + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None, None), + ) + del conv2d_63, parameter_43, parameter_44 + + # pd_op.relu: (1x128x50x-1xf32) <- (1x128x50x-1xf32) + relu_52 = paddle._C_ops.relu(group_norm_6) + del group_norm_6 + + # pd_op.bilinear_interp: (1x128x100x-1xf32) <- (1x128x50x-1xf32, None, None, None) + bilinear_interp_1 = paddle._C_ops.bilinear_interp( + relu_52, + None, + None, + None, + "NCHW", + -1, + -1, + -1, + [float("2"), float("2")], + "bilinear", + False, + 0, + ) + del relu_52 + + # pd_op.conv2d: (1x128x100x-1xf32) <- (1x128x100x-1xf32, 128x128x3x3xf32) + conv2d_64 = paddle._C_ops.conv2d( + bilinear_interp_1, + parameter_42, + [1, 1], + [1, 1], + "EXPLICIT", + [1, 1], + 1, + "NCHW", + ) + del bilinear_interp_1, parameter_42 + + # pd_op.group_norm: (1x128x100x-1xf32, 1x32xf32, 1x32xf32) <- (1x128x100x-1xf32, 128xf32, 128xf32) + group_norm_9, group_norm_10, group_norm_11 = (lambda x, f: f(x))( + paddle._C_ops.group_norm( + conv2d_64, parameter_41, parameter_40, float("1e-05"), 32, "NCHW" + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None, None), + ) + del conv2d_64, parameter_40, parameter_41 + + # pd_op.relu: (1x128x100x-1xf32) <- (1x128x100x-1xf32) + relu_53 = paddle._C_ops.relu(group_norm_9) + del group_norm_9 + + # pd_op.bilinear_interp: (1x128x200x-1xf32) <- (1x128x100x-1xf32, None, None, None) + bilinear_interp_2 = paddle._C_ops.bilinear_interp( + relu_53, + None, + None, + None, + "NCHW", + -1, + -1, + -1, + [float("2"), float("2")], + "bilinear", + False, + 0, + ) + del relu_53 + + # pd_op.add: (1x128x200x-1xf32) <- (1x128x200x-1xf32, 1x128x200x-1xf32) + add_33 = paddle._C_ops.add(add_32, bilinear_interp_2) + del add_32, bilinear_interp_2 + + # pd_op.shape64: (4xi64) <- (1x256x25x-1xf32) + shape64_0 = paddle._C_ops.shape64(add_31) + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_3 = [3] + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_4 = [4] + + # pd_op.slice: (xi64) <- (4xi64, 1xi64, 1xi64) + slice_0 = paddle._C_ops.slice( + shape64_0, [0], full_int_array_3, full_int_array_4, [1], [0] + ) + del shape64_0 + + # pd_op.full: (1xf32) <- () + full_0 = paddle._C_ops.full( + [1], float("-1"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.full: (1xf32) <- () + full_1 = paddle._C_ops.full( + [1], float("1"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.linspace: (-1xf32) <- (1xf32, 1xf32, xi64) + linspace_0 = paddle._C_ops.linspace( + full_0, + full_1, + slice_0, + paddle.float32, + paddle.framework._current_expected_place(), + ) + + # pd_op.full: (1xi32) <- () + full_2 = paddle._C_ops.full( + [1], float("25"), paddle.int32, paddle.core.CPUPlace() + ) + + # pd_op.linspace: (-1xf32) <- (1xf32, 1xf32, 1xi32) + linspace_1 = paddle._C_ops.linspace( + full_0, + full_1, + full_2, + paddle.float32, + paddle.framework._current_expected_place(), + ) + + # builtin.combine: ([-1xf32, -1xf32]) <- (-1xf32, -1xf32) + combine_0 = [linspace_1, linspace_0] + del linspace_0 + + # pd_op.meshgrid: ([-1x-1xf32, -1x-1xf32]) <- ([-1xf32, -1xf32]) + meshgrid_0 = paddle._C_ops.meshgrid(combine_0) + del combine_0 + + # builtin.split: (-1x-1xf32, -1x-1xf32) <- ([-1x-1xf32, -1x-1xf32]) + ( + split_0, + split_1, + ) = meshgrid_0 + del meshgrid_0 + + # pd_op.full_int_array: (2xi64) <- () + full_int_array_5 = [0, 1] + + # pd_op.unsqueeze: (1x1x-1x-1xf32) <- (-1x-1xf32, 2xi64) + unsqueeze_0 = paddle._C_ops.unsqueeze(split_1, full_int_array_5) + del split_1 + + # pd_op.unsqueeze: (1x1x-1x-1xf32) <- (-1x-1xf32, 2xi64) + unsqueeze_1 = paddle._C_ops.unsqueeze(split_0, full_int_array_5) + del split_0 + + # pd_op.full_int_array: (4xi64) <- () + full_int_array_6 = [1, 1, -1, -1] + + # pd_op.expand: (1x1x-1x-1xf32) <- (1x1x-1x-1xf32, 4xi64) + expand_0 = paddle._C_ops.expand(unsqueeze_1, full_int_array_6) + del unsqueeze_1 + + # pd_op.expand: (1x1x-1x-1xf32) <- (1x1x-1x-1xf32, 4xi64) + expand_1 = paddle._C_ops.expand(unsqueeze_0, full_int_array_6) + del unsqueeze_0 + + # pd_op.full: (1xi32) <- () + full_3 = paddle._C_ops.full( + [1], float("1"), paddle.int32, paddle.core.CPUPlace() + ) + + # builtin.combine: ([1x1x-1x-1xf32, 1x1x-1x-1xf32]) <- (1x1x-1x-1xf32, 1x1x-1x-1xf32) + combine_1 = [expand_1, expand_0] + del expand_0, expand_1 + + # pd_op.concat: (1x2x-1x-1xf32) <- ([1x1x-1x-1xf32, 1x1x-1x-1xf32], 1xi32) + concat_0 = paddle._C_ops.concat(combine_1, full_3) + del combine_1 + + # builtin.combine: ([1x256x25x-1xf32, 1x2x-1x-1xf32]) <- (1x256x25x-1xf32, 1x2x-1x-1xf32) + combine_2 = [add_31, concat_0] + del concat_0 + + # pd_op.concat: (1x258x25x-1xf32) <- ([1x256x25x-1xf32, 1x2x-1x-1xf32], 1xi32) + concat_1 = paddle._C_ops.concat(combine_2, full_3) + del combine_2 + + # pd_op.conv2d: (1x128x25x-1xf32) <- (1x258x25x-1xf32, 128x258x3x3xf32) + conv2d_65 = paddle._C_ops.conv2d( + concat_1, parameter_39, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del concat_1, parameter_39 + + # pd_op.group_norm: (1x128x25x-1xf32, 1x32xf32, 1x32xf32) <- (1x128x25x-1xf32, 128xf32, 128xf32) + group_norm_12, group_norm_13, group_norm_14 = (lambda x, f: f(x))( + paddle._C_ops.group_norm( + conv2d_65, parameter_38, parameter_37, float("1e-05"), 32, "NCHW" + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None, None), + ) + del conv2d_65, parameter_37, parameter_38 + + # pd_op.relu: (1x128x25x-1xf32) <- (1x128x25x-1xf32) + relu_54 = paddle._C_ops.relu(group_norm_12) + del group_norm_12 + + # pd_op.bilinear_interp: (1x128x50x-1xf32) <- (1x128x25x-1xf32, None, None, None) + bilinear_interp_3 = paddle._C_ops.bilinear_interp( + relu_54, + None, + None, + None, + "NCHW", + -1, + -1, + -1, + [float("2"), float("2")], + "bilinear", + False, + 0, + ) + del relu_54 + + # pd_op.conv2d: (1x128x50x-1xf32) <- (1x128x50x-1xf32, 128x128x3x3xf32) + conv2d_66 = paddle._C_ops.conv2d( + bilinear_interp_3, + parameter_36, + [1, 1], + [1, 1], + "EXPLICIT", + [1, 1], + 1, + "NCHW", + ) + del bilinear_interp_3, parameter_36 + + # pd_op.group_norm: (1x128x50x-1xf32, 1x32xf32, 1x32xf32) <- (1x128x50x-1xf32, 128xf32, 128xf32) + group_norm_15, group_norm_16, group_norm_17 = (lambda x, f: f(x))( + paddle._C_ops.group_norm( + conv2d_66, parameter_35, parameter_34, float("1e-05"), 32, "NCHW" + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None, None), + ) + del conv2d_66, parameter_34, parameter_35 + + # pd_op.relu: (1x128x50x-1xf32) <- (1x128x50x-1xf32) + relu_55 = paddle._C_ops.relu(group_norm_15) + del group_norm_15 + + # pd_op.bilinear_interp: (1x128x100x-1xf32) <- (1x128x50x-1xf32, None, None, None) + bilinear_interp_4 = paddle._C_ops.bilinear_interp( + relu_55, + None, + None, + None, + "NCHW", + -1, + -1, + -1, + [float("2"), float("2")], + "bilinear", + False, + 0, + ) + del relu_55 + + # pd_op.conv2d: (1x128x100x-1xf32) <- (1x128x100x-1xf32, 128x128x3x3xf32) + conv2d_67 = paddle._C_ops.conv2d( + bilinear_interp_4, + parameter_33, + [1, 1], + [1, 1], + "EXPLICIT", + [1, 1], + 1, + "NCHW", + ) + del bilinear_interp_4, parameter_33 + + # pd_op.group_norm: (1x128x100x-1xf32, 1x32xf32, 1x32xf32) <- (1x128x100x-1xf32, 128xf32, 128xf32) + group_norm_18, group_norm_19, group_norm_20 = (lambda x, f: f(x))( + paddle._C_ops.group_norm( + conv2d_67, parameter_32, parameter_31, float("1e-05"), 32, "NCHW" + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None, None), + ) + del conv2d_67, parameter_31, parameter_32 + + # pd_op.relu: (1x128x100x-1xf32) <- (1x128x100x-1xf32) + relu_56 = paddle._C_ops.relu(group_norm_18) + del group_norm_18 + + # pd_op.bilinear_interp: (1x128x200x-1xf32) <- (1x128x100x-1xf32, None, None, None) + bilinear_interp_5 = paddle._C_ops.bilinear_interp( + relu_56, + None, + None, + None, + "NCHW", + -1, + -1, + -1, + [float("2"), float("2")], + "bilinear", + False, + 0, + ) + del relu_56 + + # pd_op.add: (1x128x200x-1xf32) <- (1x128x200x-1xf32, 1x128x200x-1xf32) + add_34 = paddle._C_ops.add(add_33, bilinear_interp_5) + del add_33, bilinear_interp_5 + + # pd_op.conv2d: (1x256x200x-1xf32) <- (1x128x200x-1xf32, 256x128x1x1xf32) + conv2d_68 = paddle._C_ops.conv2d( + add_34, parameter_30, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del add_34, parameter_30 + + # pd_op.group_norm: (1x256x200x-1xf32, 1x32xf32, 1x32xf32) <- (1x256x200x-1xf32, 256xf32, 256xf32) + group_norm_21, group_norm_22, group_norm_23 = (lambda x, f: f(x))( + paddle._C_ops.group_norm( + conv2d_68, parameter_29, parameter_28, float("1e-05"), 32, "NCHW" + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None, None), + ) + del conv2d_68, parameter_28, parameter_29 + + # pd_op.relu: (1x256x200x-1xf32) <- (1x256x200x-1xf32) + relu_0 = paddle._C_ops.relu(group_norm_21) + del group_norm_21 + + # pd_op.bilinear_interp: (1x256x100x-1xf32) <- (1x256x200x-1xf32, None, None, None) + bilinear_interp_6 = paddle._C_ops.bilinear_interp( + add_28, + None, + None, + None, + "NCHW", + -1, + -1, + -1, + [float("0.5"), float("0.5")], + "bilinear", + False, + 0, + ) + del add_28 + + # builtin.combine: ([1xi32, xi64]) <- (1xi32, xi64) + combine_3 = [full_2, slice_0] + del full_2 + + # pd_op.bilinear_interp: (1x256x25x-1xf32) <- (1x256x13x-1xf32, None, [1xi32, xi64], None) + bilinear_interp_7 = paddle._C_ops.bilinear_interp( + pool2d_1, + None, + combine_3, + None, + "NCHW", + -1, + 25, + -1, + [], + "bilinear", + False, + 0, + ) + del combine_3, pool2d_1 + + # pd_op.shape64: (4xi64) <- (1x256x100x-1xf32) + shape64_1 = paddle._C_ops.shape64(bilinear_interp_6) + + # pd_op.slice: (xi64) <- (4xi64, 1xi64, 1xi64) + slice_1 = paddle._C_ops.slice( + shape64_1, [0], full_int_array_3, full_int_array_4, [1], [0] + ) + del shape64_1 + + # pd_op.linspace: (-1xf32) <- (1xf32, 1xf32, xi64) + linspace_2 = paddle._C_ops.linspace( + full_0, + full_1, + slice_1, + paddle.float32, + paddle.framework._current_expected_place(), + ) + del slice_1 + + # pd_op.full: (1xi32) <- () + full_4 = paddle._C_ops.full( + [1], float("100"), paddle.int32, paddle.core.CPUPlace() + ) + + # pd_op.linspace: (-1xf32) <- (1xf32, 1xf32, 1xi32) + linspace_3 = paddle._C_ops.linspace( + full_0, + full_1, + full_4, + paddle.float32, + paddle.framework._current_expected_place(), + ) + del full_4 + + # builtin.combine: ([-1xf32, -1xf32]) <- (-1xf32, -1xf32) + combine_4 = [linspace_3, linspace_2] + del linspace_2 + + # pd_op.meshgrid: ([-1x-1xf32, -1x-1xf32]) <- ([-1xf32, -1xf32]) + meshgrid_1 = paddle._C_ops.meshgrid(combine_4) + del combine_4 + + # builtin.split: (-1x-1xf32, -1x-1xf32) <- ([-1x-1xf32, -1x-1xf32]) + ( + split_2, + split_3, + ) = meshgrid_1 + del meshgrid_1 + + # pd_op.unsqueeze: (1x1x-1x-1xf32) <- (-1x-1xf32, 2xi64) + unsqueeze_2 = paddle._C_ops.unsqueeze(split_3, full_int_array_5) + del split_3 + + # pd_op.unsqueeze: (1x1x-1x-1xf32) <- (-1x-1xf32, 2xi64) + unsqueeze_3 = paddle._C_ops.unsqueeze(split_2, full_int_array_5) + del split_2 + + # pd_op.expand: (1x1x-1x-1xf32) <- (1x1x-1x-1xf32, 4xi64) + expand_2 = paddle._C_ops.expand(unsqueeze_3, full_int_array_6) + del unsqueeze_3 + + # pd_op.expand: (1x1x-1x-1xf32) <- (1x1x-1x-1xf32, 4xi64) + expand_3 = paddle._C_ops.expand(unsqueeze_2, full_int_array_6) + del unsqueeze_2 + + # builtin.combine: ([1x1x-1x-1xf32, 1x1x-1x-1xf32]) <- (1x1x-1x-1xf32, 1x1x-1x-1xf32) + combine_5 = [expand_3, expand_2] + del expand_2, expand_3 + + # pd_op.concat: (1x2x-1x-1xf32) <- ([1x1x-1x-1xf32, 1x1x-1x-1xf32], 1xi32) + concat_2 = paddle._C_ops.concat(combine_5, full_3) + del combine_5 + + # builtin.combine: ([1x256x100x-1xf32, 1x2x-1x-1xf32]) <- (1x256x100x-1xf32, 1x2x-1x-1xf32) + combine_6 = [bilinear_interp_6, concat_2] + del bilinear_interp_6, concat_2 + + # pd_op.concat: (1x258x100x-1xf32) <- ([1x256x100x-1xf32, 1x2x-1x-1xf32], 1xi32) + concat_3 = paddle._C_ops.concat(combine_6, full_3) + del combine_6 + + # pd_op.bilinear_interp: (1x258x40x40xf32) <- (1x258x100x-1xf32, None, None, None) + bilinear_interp_8 = paddle._C_ops.bilinear_interp( + concat_3, None, None, None, "NCHW", -1, 40, 40, [], "bilinear", False, 0 + ) + del concat_3 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_7 = [0] + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_8 = [-2] + + # pd_op.slice: (1x256x40x40xf32) <- (1x258x40x40xf32, 1xi64, 1xi64) + slice_2 = paddle._C_ops.slice( + bilinear_interp_8, [1], full_int_array_7, full_int_array_8, [1], [] + ) + + # pd_op.conv2d: (1x512x40x40xf32) <- (1x258x40x40xf32, 512x258x3x3xf32) + conv2d_69 = paddle._C_ops.conv2d( + bilinear_interp_8, + parameter_27, + [1, 1], + [1, 1], + "EXPLICIT", + [1, 1], + 1, + "NCHW", + ) + del bilinear_interp_8 + + # pd_op.group_norm: (1x512x40x40xf32, 1x32xf32, 1x32xf32) <- (1x512x40x40xf32, 512xf32, 512xf32) + group_norm_24, group_norm_25, group_norm_26 = (lambda x, f: f(x))( + paddle._C_ops.group_norm( + conv2d_69, parameter_26, parameter_25, float("1e-05"), 32, "NCHW" + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None, None), + ) + del conv2d_69 + + # pd_op.relu: (1x512x40x40xf32) <- (1x512x40x40xf32) + relu_57 = paddle._C_ops.relu(group_norm_24) + del group_norm_24 + + # pd_op.conv2d: (1x512x40x40xf32) <- (1x512x40x40xf32, 512x512x3x3xf32) + conv2d_70 = paddle._C_ops.conv2d( + relu_57, parameter_24, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del relu_57 + + # pd_op.group_norm: (1x512x40x40xf32, 1x32xf32, 1x32xf32) <- (1x512x40x40xf32, 512xf32, 512xf32) + group_norm_27, group_norm_28, group_norm_29 = (lambda x, f: f(x))( + paddle._C_ops.group_norm( + conv2d_70, parameter_23, parameter_22, float("1e-05"), 32, "NCHW" + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None, None), + ) + del conv2d_70 + + # pd_op.relu: (1x512x40x40xf32) <- (1x512x40x40xf32) + relu_58 = paddle._C_ops.relu(group_norm_27) + del group_norm_27 + + # pd_op.conv2d: (1x512x40x40xf32) <- (1x512x40x40xf32, 512x512x3x3xf32) + conv2d_71 = paddle._C_ops.conv2d( + relu_58, parameter_21, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del relu_58 + + # pd_op.group_norm: (1x512x40x40xf32, 1x32xf32, 1x32xf32) <- (1x512x40x40xf32, 512xf32, 512xf32) + group_norm_30, group_norm_31, group_norm_32 = (lambda x, f: f(x))( + paddle._C_ops.group_norm( + conv2d_71, parameter_20, parameter_19, float("1e-05"), 32, "NCHW" + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None, None), + ) + del conv2d_71 + + # pd_op.relu: (1x512x40x40xf32) <- (1x512x40x40xf32) + relu_59 = paddle._C_ops.relu(group_norm_30) + del group_norm_30 + + # pd_op.conv2d: (1x512x40x40xf32) <- (1x512x40x40xf32, 512x512x3x3xf32) + conv2d_72 = paddle._C_ops.conv2d( + relu_59, parameter_18, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del relu_59 + + # pd_op.group_norm: (1x512x40x40xf32, 1x32xf32, 1x32xf32) <- (1x512x40x40xf32, 512xf32, 512xf32) + group_norm_33, group_norm_34, group_norm_35 = (lambda x, f: f(x))( + paddle._C_ops.group_norm( + conv2d_72, parameter_17, parameter_16, float("1e-05"), 32, "NCHW" + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None, None), + ) + del conv2d_72 + + # pd_op.relu: (1x512x40x40xf32) <- (1x512x40x40xf32) + relu_60 = paddle._C_ops.relu(group_norm_33) + del group_norm_33 + + # pd_op.conv2d: (1x256x40x40xf32) <- (1x512x40x40xf32, 256x512x3x3xf32) + conv2d_73 = paddle._C_ops.conv2d( + relu_60, parameter_15, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del relu_60 + + # pd_op.reshape: (1x256x1x1xf32) <- (256xf32, 4xi64) + reshape_8 = paddle._C_ops.reshape(parameter_14, full_int_array_1) + del parameter_14 + + # pd_op.add: (1x256x40x40xf32) <- (1x256x40x40xf32, 1x256x1x1xf32) + add_0 = paddle._C_ops.add(conv2d_73, reshape_8) + del conv2d_73 + + # pd_op.conv2d: (1x512x40x40xf32) <- (1x256x40x40xf32, 512x256x3x3xf32) + conv2d_74 = paddle._C_ops.conv2d( + slice_2, parameter_13, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del slice_2 + + # pd_op.group_norm: (1x512x40x40xf32, 1x32xf32, 1x32xf32) <- (1x512x40x40xf32, 512xf32, 512xf32) + group_norm_36, group_norm_37, group_norm_38 = (lambda x, f: f(x))( + paddle._C_ops.group_norm( + conv2d_74, parameter_12, parameter_11, float("1e-05"), 32, "NCHW" + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None, None), + ) + del conv2d_74 + + # pd_op.relu: (1x512x40x40xf32) <- (1x512x40x40xf32) + relu_61 = paddle._C_ops.relu(group_norm_36) + del group_norm_36 + + # pd_op.conv2d: (1x512x40x40xf32) <- (1x512x40x40xf32, 512x512x3x3xf32) + conv2d_75 = paddle._C_ops.conv2d( + relu_61, parameter_10, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del relu_61 + + # pd_op.group_norm: (1x512x40x40xf32, 1x32xf32, 1x32xf32) <- (1x512x40x40xf32, 512xf32, 512xf32) + group_norm_39, group_norm_40, group_norm_41 = (lambda x, f: f(x))( + paddle._C_ops.group_norm( + conv2d_75, parameter_9, parameter_8, float("1e-05"), 32, "NCHW" + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None, None), + ) + del conv2d_75 + + # pd_op.relu: (1x512x40x40xf32) <- (1x512x40x40xf32) + relu_62 = paddle._C_ops.relu(group_norm_39) + del group_norm_39 + + # pd_op.conv2d: (1x512x40x40xf32) <- (1x512x40x40xf32, 512x512x3x3xf32) + conv2d_76 = paddle._C_ops.conv2d( + relu_62, parameter_7, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del relu_62 + + # pd_op.group_norm: (1x512x40x40xf32, 1x32xf32, 1x32xf32) <- (1x512x40x40xf32, 512xf32, 512xf32) + group_norm_42, group_norm_43, group_norm_44 = (lambda x, f: f(x))( + paddle._C_ops.group_norm( + conv2d_76, parameter_6, parameter_5, float("1e-05"), 32, "NCHW" + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None, None), + ) + del conv2d_76 + + # pd_op.relu: (1x512x40x40xf32) <- (1x512x40x40xf32) + relu_63 = paddle._C_ops.relu(group_norm_42) + del group_norm_42 + + # pd_op.conv2d: (1x512x40x40xf32) <- (1x512x40x40xf32, 512x512x3x3xf32) + conv2d_77 = paddle._C_ops.conv2d( + relu_63, parameter_4, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del relu_63 + + # pd_op.group_norm: (1x512x40x40xf32, 1x32xf32, 1x32xf32) <- (1x512x40x40xf32, 512xf32, 512xf32) + group_norm_45, group_norm_46, group_norm_47 = (lambda x, f: f(x))( + paddle._C_ops.group_norm( + conv2d_77, parameter_3, parameter_2, float("1e-05"), 32, "NCHW" + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None, None), + ) + del conv2d_77 + + # pd_op.relu: (1x512x40x40xf32) <- (1x512x40x40xf32) + relu_64 = paddle._C_ops.relu(group_norm_45) + del group_norm_45 + + # pd_op.conv2d: (1x2x40x40xf32) <- (1x512x40x40xf32, 2x512x3x3xf32) + conv2d_78 = paddle._C_ops.conv2d( + relu_64, parameter_1, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del relu_64 + + # pd_op.reshape: (1x2x1x1xf32) <- (2xf32, 4xi64) + reshape_9 = paddle._C_ops.reshape(parameter_0, full_int_array_1) + del full_int_array_1, parameter_0 + + # pd_op.add: (1x2x40x40xf32) <- (1x2x40x40xf32, 1x2x1x1xf32) + add_35 = paddle._C_ops.add(conv2d_78, reshape_9) + del conv2d_78 + + # pd_op.sigmoid: (1x2x40x40xf32) <- (1x2x40x40xf32) + sigmoid_0 = paddle._C_ops.sigmoid(add_35) + del add_35 + + # pd_op.full_int_array: (2xi64) <- () + full_int_array_9 = [2, 2] + + # pd_op.pool2d: (1x2x41x41xf32) <- (1x2x40x40xf32, 2xi64) + pool2d_2 = paddle._C_ops.pool2d( + sigmoid_0, + full_int_array_9, + [1, 1], + [1, 1], + False, + True, + "NCHW", + "max", + False, + False, + "EXPLICIT", + ) + + # pd_op.full_int_array: (2xi64) <- () + full_int_array_10 = [0, 0] + + # pd_op.full_int_array: (2xi64) <- () + full_int_array_11 = [-1, -1] + + # pd_op.slice: (1x2x40x40xf32) <- (1x2x41x41xf32, 2xi64, 2xi64) + slice_3 = paddle._C_ops.slice( + pool2d_2, [2, 3], full_int_array_10, full_int_array_11, [1, 1], [] + ) + del pool2d_2 + + # pd_op.equal: (1x2x40x40xb) <- (1x2x40x40xf32, 1x2x40x40xf32) + equal_0 = paddle._C_ops.equal(slice_3, sigmoid_0) + del slice_3 + + # pd_op.cast: (1x2x40x40xf32) <- (1x2x40x40xb) + cast_0 = paddle._C_ops.cast(equal_0, paddle.float32) + del equal_0 + + # pd_op.multiply: (1x2x40x40xf32) <- (1x2x40x40xf32, 1x2x40x40xf32) + multiply_0 = paddle._C_ops.multiply(sigmoid_0, cast_0) + del cast_0, sigmoid_0 + + # pd_op.transpose: (1x40x40x2xf32) <- (1x2x40x40xf32) + transpose_0 = paddle._C_ops.transpose(multiply_0, [0, 2, 3, 1]) + del multiply_0 + + # pd_op.shape64: (4xi64) <- (1x256x100x-1xf32) + shape64_2 = paddle._C_ops.shape64(add_29) + + # pd_op.slice: (xi64) <- (4xi64, 1xi64, 1xi64) + slice_4 = paddle._C_ops.slice( + shape64_2, [0], full_int_array_3, full_int_array_4, [1], [0] + ) + del shape64_2 + + # pd_op.linspace: (-1xf32) <- (1xf32, 1xf32, xi64) + linspace_4 = paddle._C_ops.linspace( + full_0, + full_1, + slice_4, + paddle.float32, + paddle.framework._current_expected_place(), + ) + del slice_4 + + # builtin.combine: ([-1xf32, -1xf32]) <- (-1xf32, -1xf32) + combine_7 = [linspace_3, linspace_4] + del linspace_3, linspace_4 + + # pd_op.meshgrid: ([-1x-1xf32, -1x-1xf32]) <- ([-1xf32, -1xf32]) + meshgrid_2 = paddle._C_ops.meshgrid(combine_7) + del combine_7 + + # builtin.split: (-1x-1xf32, -1x-1xf32) <- ([-1x-1xf32, -1x-1xf32]) + ( + split_4, + split_5, + ) = meshgrid_2 + del meshgrid_2 + + # pd_op.unsqueeze: (1x1x-1x-1xf32) <- (-1x-1xf32, 2xi64) + unsqueeze_4 = paddle._C_ops.unsqueeze(split_5, full_int_array_5) + del split_5 + + # pd_op.unsqueeze: (1x1x-1x-1xf32) <- (-1x-1xf32, 2xi64) + unsqueeze_5 = paddle._C_ops.unsqueeze(split_4, full_int_array_5) + del split_4 + + # pd_op.expand: (1x1x-1x-1xf32) <- (1x1x-1x-1xf32, 4xi64) + expand_4 = paddle._C_ops.expand(unsqueeze_5, full_int_array_6) + del unsqueeze_5 + + # pd_op.expand: (1x1x-1x-1xf32) <- (1x1x-1x-1xf32, 4xi64) + expand_5 = paddle._C_ops.expand(unsqueeze_4, full_int_array_6) + del unsqueeze_4 + + # builtin.combine: ([1x1x-1x-1xf32, 1x1x-1x-1xf32]) <- (1x1x-1x-1xf32, 1x1x-1x-1xf32) + combine_8 = [expand_5, expand_4] + del expand_4, expand_5 + + # pd_op.concat: (1x2x-1x-1xf32) <- ([1x1x-1x-1xf32, 1x1x-1x-1xf32], 1xi32) + concat_4 = paddle._C_ops.concat(combine_8, full_3) + del combine_8 + + # builtin.combine: ([1x256x100x-1xf32, 1x2x-1x-1xf32]) <- (1x256x100x-1xf32, 1x2x-1x-1xf32) + combine_9 = [add_29, concat_4] + del add_29, concat_4 + + # pd_op.concat: (1x258x100x-1xf32) <- ([1x256x100x-1xf32, 1x2x-1x-1xf32], 1xi32) + concat_5 = paddle._C_ops.concat(combine_9, full_3) + del combine_9 + + # pd_op.bilinear_interp: (1x258x36x36xf32) <- (1x258x100x-1xf32, None, None, None) + bilinear_interp_9 = paddle._C_ops.bilinear_interp( + concat_5, None, None, None, "NCHW", -1, 36, 36, [], "bilinear", False, 0 + ) + del concat_5 + + # pd_op.slice: (1x256x36x36xf32) <- (1x258x36x36xf32, 1xi64, 1xi64) + slice_5 = paddle._C_ops.slice( + bilinear_interp_9, [1], full_int_array_7, full_int_array_8, [1], [] + ) + + # pd_op.conv2d: (1x512x36x36xf32) <- (1x258x36x36xf32, 512x258x3x3xf32) + conv2d_79 = paddle._C_ops.conv2d( + bilinear_interp_9, + parameter_27, + [1, 1], + [1, 1], + "EXPLICIT", + [1, 1], + 1, + "NCHW", + ) + del bilinear_interp_9 + + # pd_op.group_norm: (1x512x36x36xf32, 1x32xf32, 1x32xf32) <- (1x512x36x36xf32, 512xf32, 512xf32) + group_norm_48, group_norm_49, group_norm_50 = (lambda x, f: f(x))( + paddle._C_ops.group_norm( + conv2d_79, parameter_26, parameter_25, float("1e-05"), 32, "NCHW" + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None, None), + ) + del conv2d_79 + + # pd_op.relu: (1x512x36x36xf32) <- (1x512x36x36xf32) + relu_65 = paddle._C_ops.relu(group_norm_48) + del group_norm_48 + + # pd_op.conv2d: (1x512x36x36xf32) <- (1x512x36x36xf32, 512x512x3x3xf32) + conv2d_80 = paddle._C_ops.conv2d( + relu_65, parameter_24, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del relu_65 + + # pd_op.group_norm: (1x512x36x36xf32, 1x32xf32, 1x32xf32) <- (1x512x36x36xf32, 512xf32, 512xf32) + group_norm_51, group_norm_52, group_norm_53 = (lambda x, f: f(x))( + paddle._C_ops.group_norm( + conv2d_80, parameter_23, parameter_22, float("1e-05"), 32, "NCHW" + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None, None), + ) + del conv2d_80 + + # pd_op.relu: (1x512x36x36xf32) <- (1x512x36x36xf32) + relu_66 = paddle._C_ops.relu(group_norm_51) + del group_norm_51 + + # pd_op.conv2d: (1x512x36x36xf32) <- (1x512x36x36xf32, 512x512x3x3xf32) + conv2d_81 = paddle._C_ops.conv2d( + relu_66, parameter_21, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del relu_66 + + # pd_op.group_norm: (1x512x36x36xf32, 1x32xf32, 1x32xf32) <- (1x512x36x36xf32, 512xf32, 512xf32) + group_norm_54, group_norm_55, group_norm_56 = (lambda x, f: f(x))( + paddle._C_ops.group_norm( + conv2d_81, parameter_20, parameter_19, float("1e-05"), 32, "NCHW" + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None, None), + ) + del conv2d_81 + + # pd_op.relu: (1x512x36x36xf32) <- (1x512x36x36xf32) + relu_67 = paddle._C_ops.relu(group_norm_54) + del group_norm_54 + + # pd_op.conv2d: (1x512x36x36xf32) <- (1x512x36x36xf32, 512x512x3x3xf32) + conv2d_82 = paddle._C_ops.conv2d( + relu_67, parameter_18, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del relu_67 + + # pd_op.group_norm: (1x512x36x36xf32, 1x32xf32, 1x32xf32) <- (1x512x36x36xf32, 512xf32, 512xf32) + group_norm_57, group_norm_58, group_norm_59 = (lambda x, f: f(x))( + paddle._C_ops.group_norm( + conv2d_82, parameter_17, parameter_16, float("1e-05"), 32, "NCHW" + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None, None), + ) + del conv2d_82 + + # pd_op.relu: (1x512x36x36xf32) <- (1x512x36x36xf32) + relu_68 = paddle._C_ops.relu(group_norm_57) + del group_norm_57 + + # pd_op.conv2d: (1x256x36x36xf32) <- (1x512x36x36xf32, 256x512x3x3xf32) + conv2d_83 = paddle._C_ops.conv2d( + relu_68, parameter_15, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del relu_68 + + # pd_op.add: (1x256x36x36xf32) <- (1x256x36x36xf32, 1x256x1x1xf32) + add_1 = paddle._C_ops.add(conv2d_83, reshape_8) + del conv2d_83 + + # pd_op.conv2d: (1x512x36x36xf32) <- (1x256x36x36xf32, 512x256x3x3xf32) + conv2d_84 = paddle._C_ops.conv2d( + slice_5, parameter_13, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del slice_5 + + # pd_op.group_norm: (1x512x36x36xf32, 1x32xf32, 1x32xf32) <- (1x512x36x36xf32, 512xf32, 512xf32) + group_norm_60, group_norm_61, group_norm_62 = (lambda x, f: f(x))( + paddle._C_ops.group_norm( + conv2d_84, parameter_12, parameter_11, float("1e-05"), 32, "NCHW" + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None, None), + ) + del conv2d_84 + + # pd_op.relu: (1x512x36x36xf32) <- (1x512x36x36xf32) + relu_69 = paddle._C_ops.relu(group_norm_60) + del group_norm_60 + + # pd_op.conv2d: (1x512x36x36xf32) <- (1x512x36x36xf32, 512x512x3x3xf32) + conv2d_85 = paddle._C_ops.conv2d( + relu_69, parameter_10, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del relu_69 + + # pd_op.group_norm: (1x512x36x36xf32, 1x32xf32, 1x32xf32) <- (1x512x36x36xf32, 512xf32, 512xf32) + group_norm_63, group_norm_64, group_norm_65 = (lambda x, f: f(x))( + paddle._C_ops.group_norm( + conv2d_85, parameter_9, parameter_8, float("1e-05"), 32, "NCHW" + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None, None), + ) + del conv2d_85 + + # pd_op.relu: (1x512x36x36xf32) <- (1x512x36x36xf32) + relu_70 = paddle._C_ops.relu(group_norm_63) + del group_norm_63 + + # pd_op.conv2d: (1x512x36x36xf32) <- (1x512x36x36xf32, 512x512x3x3xf32) + conv2d_86 = paddle._C_ops.conv2d( + relu_70, parameter_7, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del relu_70 + + # pd_op.group_norm: (1x512x36x36xf32, 1x32xf32, 1x32xf32) <- (1x512x36x36xf32, 512xf32, 512xf32) + group_norm_66, group_norm_67, group_norm_68 = (lambda x, f: f(x))( + paddle._C_ops.group_norm( + conv2d_86, parameter_6, parameter_5, float("1e-05"), 32, "NCHW" + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None, None), + ) + del conv2d_86 + + # pd_op.relu: (1x512x36x36xf32) <- (1x512x36x36xf32) + relu_71 = paddle._C_ops.relu(group_norm_66) + del group_norm_66 + + # pd_op.conv2d: (1x512x36x36xf32) <- (1x512x36x36xf32, 512x512x3x3xf32) + conv2d_87 = paddle._C_ops.conv2d( + relu_71, parameter_4, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del relu_71 + + # pd_op.group_norm: (1x512x36x36xf32, 1x32xf32, 1x32xf32) <- (1x512x36x36xf32, 512xf32, 512xf32) + group_norm_69, group_norm_70, group_norm_71 = (lambda x, f: f(x))( + paddle._C_ops.group_norm( + conv2d_87, parameter_3, parameter_2, float("1e-05"), 32, "NCHW" + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None, None), + ) + del conv2d_87 + + # pd_op.relu: (1x512x36x36xf32) <- (1x512x36x36xf32) + relu_72 = paddle._C_ops.relu(group_norm_69) + del group_norm_69 + + # pd_op.conv2d: (1x2x36x36xf32) <- (1x512x36x36xf32, 2x512x3x3xf32) + conv2d_88 = paddle._C_ops.conv2d( + relu_72, parameter_1, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del relu_72 + + # pd_op.add: (1x2x36x36xf32) <- (1x2x36x36xf32, 1x2x1x1xf32) + add_36 = paddle._C_ops.add(conv2d_88, reshape_9) + del conv2d_88 + + # pd_op.sigmoid: (1x2x36x36xf32) <- (1x2x36x36xf32) + sigmoid_1 = paddle._C_ops.sigmoid(add_36) + del add_36 + + # pd_op.pool2d: (1x2x37x37xf32) <- (1x2x36x36xf32, 2xi64) + pool2d_3 = paddle._C_ops.pool2d( + sigmoid_1, + full_int_array_9, + [1, 1], + [1, 1], + False, + True, + "NCHW", + "max", + False, + False, + "EXPLICIT", + ) + + # pd_op.slice: (1x2x36x36xf32) <- (1x2x37x37xf32, 2xi64, 2xi64) + slice_6 = paddle._C_ops.slice( + pool2d_3, [2, 3], full_int_array_10, full_int_array_11, [1, 1], [] + ) + del pool2d_3 + + # pd_op.equal: (1x2x36x36xb) <- (1x2x36x36xf32, 1x2x36x36xf32) + equal_1 = paddle._C_ops.equal(slice_6, sigmoid_1) + del slice_6 + + # pd_op.cast: (1x2x36x36xf32) <- (1x2x36x36xb) + cast_1 = paddle._C_ops.cast(equal_1, paddle.float32) + del equal_1 + + # pd_op.multiply: (1x2x36x36xf32) <- (1x2x36x36xf32, 1x2x36x36xf32) + multiply_1 = paddle._C_ops.multiply(sigmoid_1, cast_1) + del cast_1, sigmoid_1 + + # pd_op.transpose: (1x36x36x2xf32) <- (1x2x36x36xf32) + transpose_1 = paddle._C_ops.transpose(multiply_1, [0, 2, 3, 1]) + del multiply_1 + + # pd_op.shape64: (4xi64) <- (1x256x50x-1xf32) + shape64_3 = paddle._C_ops.shape64(add_30) + + # pd_op.slice: (xi64) <- (4xi64, 1xi64, 1xi64) + slice_7 = paddle._C_ops.slice( + shape64_3, [0], full_int_array_3, full_int_array_4, [1], [0] + ) + del shape64_3 + + # pd_op.linspace: (-1xf32) <- (1xf32, 1xf32, xi64) + linspace_5 = paddle._C_ops.linspace( + full_0, + full_1, + slice_7, + paddle.float32, + paddle.framework._current_expected_place(), + ) + del slice_7 + + # pd_op.full: (1xi32) <- () + full_5 = paddle._C_ops.full( + [1], float("50"), paddle.int32, paddle.core.CPUPlace() + ) + + # pd_op.linspace: (-1xf32) <- (1xf32, 1xf32, 1xi32) + linspace_6 = paddle._C_ops.linspace( + full_0, + full_1, + full_5, + paddle.float32, + paddle.framework._current_expected_place(), + ) + del full_5 + + # builtin.combine: ([-1xf32, -1xf32]) <- (-1xf32, -1xf32) + combine_10 = [linspace_6, linspace_5] + del linspace_5, linspace_6 + + # pd_op.meshgrid: ([-1x-1xf32, -1x-1xf32]) <- ([-1xf32, -1xf32]) + meshgrid_3 = paddle._C_ops.meshgrid(combine_10) + del combine_10 + + # builtin.split: (-1x-1xf32, -1x-1xf32) <- ([-1x-1xf32, -1x-1xf32]) + ( + split_6, + split_7, + ) = meshgrid_3 + del meshgrid_3 + + # pd_op.unsqueeze: (1x1x-1x-1xf32) <- (-1x-1xf32, 2xi64) + unsqueeze_6 = paddle._C_ops.unsqueeze(split_7, full_int_array_5) + del split_7 + + # pd_op.unsqueeze: (1x1x-1x-1xf32) <- (-1x-1xf32, 2xi64) + unsqueeze_7 = paddle._C_ops.unsqueeze(split_6, full_int_array_5) + del split_6 + + # pd_op.expand: (1x1x-1x-1xf32) <- (1x1x-1x-1xf32, 4xi64) + expand_6 = paddle._C_ops.expand(unsqueeze_7, full_int_array_6) + del unsqueeze_7 + + # pd_op.expand: (1x1x-1x-1xf32) <- (1x1x-1x-1xf32, 4xi64) + expand_7 = paddle._C_ops.expand(unsqueeze_6, full_int_array_6) + del unsqueeze_6 + + # builtin.combine: ([1x1x-1x-1xf32, 1x1x-1x-1xf32]) <- (1x1x-1x-1xf32, 1x1x-1x-1xf32) + combine_11 = [expand_7, expand_6] + del expand_6, expand_7 + + # pd_op.concat: (1x2x-1x-1xf32) <- ([1x1x-1x-1xf32, 1x1x-1x-1xf32], 1xi32) + concat_6 = paddle._C_ops.concat(combine_11, full_3) + del combine_11 + + # builtin.combine: ([1x256x50x-1xf32, 1x2x-1x-1xf32]) <- (1x256x50x-1xf32, 1x2x-1x-1xf32) + combine_12 = [add_30, concat_6] + del add_30, concat_6 + + # pd_op.concat: (1x258x50x-1xf32) <- ([1x256x50x-1xf32, 1x2x-1x-1xf32], 1xi32) + concat_7 = paddle._C_ops.concat(combine_12, full_3) + del combine_12 + + # pd_op.bilinear_interp: (1x258x24x24xf32) <- (1x258x50x-1xf32, None, None, None) + bilinear_interp_10 = paddle._C_ops.bilinear_interp( + concat_7, None, None, None, "NCHW", -1, 24, 24, [], "bilinear", False, 0 + ) + del concat_7 + + # pd_op.slice: (1x256x24x24xf32) <- (1x258x24x24xf32, 1xi64, 1xi64) + slice_8 = paddle._C_ops.slice( + bilinear_interp_10, [1], full_int_array_7, full_int_array_8, [1], [] + ) + + # pd_op.conv2d: (1x512x24x24xf32) <- (1x258x24x24xf32, 512x258x3x3xf32) + conv2d_89 = paddle._C_ops.conv2d( + bilinear_interp_10, + parameter_27, + [1, 1], + [1, 1], + "EXPLICIT", + [1, 1], + 1, + "NCHW", + ) + del bilinear_interp_10 + + # pd_op.group_norm: (1x512x24x24xf32, 1x32xf32, 1x32xf32) <- (1x512x24x24xf32, 512xf32, 512xf32) + group_norm_72, group_norm_73, group_norm_74 = (lambda x, f: f(x))( + paddle._C_ops.group_norm( + conv2d_89, parameter_26, parameter_25, float("1e-05"), 32, "NCHW" + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None, None), + ) + del conv2d_89 + + # pd_op.relu: (1x512x24x24xf32) <- (1x512x24x24xf32) + relu_73 = paddle._C_ops.relu(group_norm_72) + del group_norm_72 + + # pd_op.conv2d: (1x512x24x24xf32) <- (1x512x24x24xf32, 512x512x3x3xf32) + conv2d_90 = paddle._C_ops.conv2d( + relu_73, parameter_24, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del relu_73 + + # pd_op.group_norm: (1x512x24x24xf32, 1x32xf32, 1x32xf32) <- (1x512x24x24xf32, 512xf32, 512xf32) + group_norm_75, group_norm_76, group_norm_77 = (lambda x, f: f(x))( + paddle._C_ops.group_norm( + conv2d_90, parameter_23, parameter_22, float("1e-05"), 32, "NCHW" + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None, None), + ) + del conv2d_90 + + # pd_op.relu: (1x512x24x24xf32) <- (1x512x24x24xf32) + relu_74 = paddle._C_ops.relu(group_norm_75) + del group_norm_75 + + # pd_op.conv2d: (1x512x24x24xf32) <- (1x512x24x24xf32, 512x512x3x3xf32) + conv2d_91 = paddle._C_ops.conv2d( + relu_74, parameter_21, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del relu_74 + + # pd_op.group_norm: (1x512x24x24xf32, 1x32xf32, 1x32xf32) <- (1x512x24x24xf32, 512xf32, 512xf32) + group_norm_78, group_norm_79, group_norm_80 = (lambda x, f: f(x))( + paddle._C_ops.group_norm( + conv2d_91, parameter_20, parameter_19, float("1e-05"), 32, "NCHW" + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None, None), + ) + del conv2d_91 + + # pd_op.relu: (1x512x24x24xf32) <- (1x512x24x24xf32) + relu_75 = paddle._C_ops.relu(group_norm_78) + del group_norm_78 + + # pd_op.conv2d: (1x512x24x24xf32) <- (1x512x24x24xf32, 512x512x3x3xf32) + conv2d_92 = paddle._C_ops.conv2d( + relu_75, parameter_18, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del relu_75 + + # pd_op.group_norm: (1x512x24x24xf32, 1x32xf32, 1x32xf32) <- (1x512x24x24xf32, 512xf32, 512xf32) + group_norm_81, group_norm_82, group_norm_83 = (lambda x, f: f(x))( + paddle._C_ops.group_norm( + conv2d_92, parameter_17, parameter_16, float("1e-05"), 32, "NCHW" + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None, None), + ) + del conv2d_92 + + # pd_op.relu: (1x512x24x24xf32) <- (1x512x24x24xf32) + relu_76 = paddle._C_ops.relu(group_norm_81) + del group_norm_81 + + # pd_op.conv2d: (1x256x24x24xf32) <- (1x512x24x24xf32, 256x512x3x3xf32) + conv2d_93 = paddle._C_ops.conv2d( + relu_76, parameter_15, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del relu_76 + + # pd_op.add: (1x256x24x24xf32) <- (1x256x24x24xf32, 1x256x1x1xf32) + add_2 = paddle._C_ops.add(conv2d_93, reshape_8) + del conv2d_93 + + # pd_op.conv2d: (1x512x24x24xf32) <- (1x256x24x24xf32, 512x256x3x3xf32) + conv2d_94 = paddle._C_ops.conv2d( + slice_8, parameter_13, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del slice_8 + + # pd_op.group_norm: (1x512x24x24xf32, 1x32xf32, 1x32xf32) <- (1x512x24x24xf32, 512xf32, 512xf32) + group_norm_84, group_norm_85, group_norm_86 = (lambda x, f: f(x))( + paddle._C_ops.group_norm( + conv2d_94, parameter_12, parameter_11, float("1e-05"), 32, "NCHW" + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None, None), + ) + del conv2d_94 + + # pd_op.relu: (1x512x24x24xf32) <- (1x512x24x24xf32) + relu_77 = paddle._C_ops.relu(group_norm_84) + del group_norm_84 + + # pd_op.conv2d: (1x512x24x24xf32) <- (1x512x24x24xf32, 512x512x3x3xf32) + conv2d_95 = paddle._C_ops.conv2d( + relu_77, parameter_10, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del relu_77 + + # pd_op.group_norm: (1x512x24x24xf32, 1x32xf32, 1x32xf32) <- (1x512x24x24xf32, 512xf32, 512xf32) + group_norm_87, group_norm_88, group_norm_89 = (lambda x, f: f(x))( + paddle._C_ops.group_norm( + conv2d_95, parameter_9, parameter_8, float("1e-05"), 32, "NCHW" + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None, None), + ) + del conv2d_95 + + # pd_op.relu: (1x512x24x24xf32) <- (1x512x24x24xf32) + relu_78 = paddle._C_ops.relu(group_norm_87) + del group_norm_87 + + # pd_op.conv2d: (1x512x24x24xf32) <- (1x512x24x24xf32, 512x512x3x3xf32) + conv2d_96 = paddle._C_ops.conv2d( + relu_78, parameter_7, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del relu_78 + + # pd_op.group_norm: (1x512x24x24xf32, 1x32xf32, 1x32xf32) <- (1x512x24x24xf32, 512xf32, 512xf32) + group_norm_90, group_norm_91, group_norm_92 = (lambda x, f: f(x))( + paddle._C_ops.group_norm( + conv2d_96, parameter_6, parameter_5, float("1e-05"), 32, "NCHW" + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None, None), + ) + del conv2d_96 + + # pd_op.relu: (1x512x24x24xf32) <- (1x512x24x24xf32) + relu_79 = paddle._C_ops.relu(group_norm_90) + del group_norm_90 + + # pd_op.conv2d: (1x512x24x24xf32) <- (1x512x24x24xf32, 512x512x3x3xf32) + conv2d_97 = paddle._C_ops.conv2d( + relu_79, parameter_4, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del relu_79 + + # pd_op.group_norm: (1x512x24x24xf32, 1x32xf32, 1x32xf32) <- (1x512x24x24xf32, 512xf32, 512xf32) + group_norm_93, group_norm_94, group_norm_95 = (lambda x, f: f(x))( + paddle._C_ops.group_norm( + conv2d_97, parameter_3, parameter_2, float("1e-05"), 32, "NCHW" + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None, None), + ) + del conv2d_97 + + # pd_op.relu: (1x512x24x24xf32) <- (1x512x24x24xf32) + relu_80 = paddle._C_ops.relu(group_norm_93) + del group_norm_93 + + # pd_op.conv2d: (1x2x24x24xf32) <- (1x512x24x24xf32, 2x512x3x3xf32) + conv2d_98 = paddle._C_ops.conv2d( + relu_80, parameter_1, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del relu_80 + + # pd_op.add: (1x2x24x24xf32) <- (1x2x24x24xf32, 1x2x1x1xf32) + add_37 = paddle._C_ops.add(conv2d_98, reshape_9) + del conv2d_98 + + # pd_op.sigmoid: (1x2x24x24xf32) <- (1x2x24x24xf32) + sigmoid_2 = paddle._C_ops.sigmoid(add_37) + del add_37 + + # pd_op.pool2d: (1x2x25x25xf32) <- (1x2x24x24xf32, 2xi64) + pool2d_4 = paddle._C_ops.pool2d( + sigmoid_2, + full_int_array_9, + [1, 1], + [1, 1], + False, + True, + "NCHW", + "max", + False, + False, + "EXPLICIT", + ) + + # pd_op.slice: (1x2x24x24xf32) <- (1x2x25x25xf32, 2xi64, 2xi64) + slice_9 = paddle._C_ops.slice( + pool2d_4, [2, 3], full_int_array_10, full_int_array_11, [1, 1], [] + ) + del pool2d_4 + + # pd_op.equal: (1x2x24x24xb) <- (1x2x24x24xf32, 1x2x24x24xf32) + equal_2 = paddle._C_ops.equal(slice_9, sigmoid_2) + del slice_9 + + # pd_op.cast: (1x2x24x24xf32) <- (1x2x24x24xb) + cast_2 = paddle._C_ops.cast(equal_2, paddle.float32) + del equal_2 + + # pd_op.multiply: (1x2x24x24xf32) <- (1x2x24x24xf32, 1x2x24x24xf32) + multiply_2 = paddle._C_ops.multiply(sigmoid_2, cast_2) + del cast_2, sigmoid_2 + + # pd_op.transpose: (1x24x24x2xf32) <- (1x2x24x24xf32) + transpose_2 = paddle._C_ops.transpose(multiply_2, [0, 2, 3, 1]) + del multiply_2 + + # pd_op.linspace: (-1xf32) <- (1xf32, 1xf32, xi64) + linspace_7 = paddle._C_ops.linspace( + full_0, + full_1, + slice_0, + paddle.float32, + paddle.framework._current_expected_place(), + ) + del slice_0 + + # builtin.combine: ([-1xf32, -1xf32]) <- (-1xf32, -1xf32) + combine_13 = [linspace_1, linspace_7] + del linspace_7 + + # pd_op.meshgrid: ([-1x-1xf32, -1x-1xf32]) <- ([-1xf32, -1xf32]) + meshgrid_4 = paddle._C_ops.meshgrid(combine_13) + del combine_13 + + # builtin.split: (-1x-1xf32, -1x-1xf32) <- ([-1x-1xf32, -1x-1xf32]) + ( + split_8, + split_9, + ) = meshgrid_4 + del meshgrid_4 + + # pd_op.unsqueeze: (1x1x-1x-1xf32) <- (-1x-1xf32, 2xi64) + unsqueeze_8 = paddle._C_ops.unsqueeze(split_9, full_int_array_5) + del split_9 + + # pd_op.unsqueeze: (1x1x-1x-1xf32) <- (-1x-1xf32, 2xi64) + unsqueeze_9 = paddle._C_ops.unsqueeze(split_8, full_int_array_5) + del split_8 + + # pd_op.expand: (1x1x-1x-1xf32) <- (1x1x-1x-1xf32, 4xi64) + expand_8 = paddle._C_ops.expand(unsqueeze_9, full_int_array_6) + del unsqueeze_9 + + # pd_op.expand: (1x1x-1x-1xf32) <- (1x1x-1x-1xf32, 4xi64) + expand_9 = paddle._C_ops.expand(unsqueeze_8, full_int_array_6) + del unsqueeze_8 + + # builtin.combine: ([1x1x-1x-1xf32, 1x1x-1x-1xf32]) <- (1x1x-1x-1xf32, 1x1x-1x-1xf32) + combine_14 = [expand_9, expand_8] + del expand_8, expand_9 + + # pd_op.concat: (1x2x-1x-1xf32) <- ([1x1x-1x-1xf32, 1x1x-1x-1xf32], 1xi32) + concat_8 = paddle._C_ops.concat(combine_14, full_3) + del combine_14 + + # builtin.combine: ([1x256x25x-1xf32, 1x2x-1x-1xf32]) <- (1x256x25x-1xf32, 1x2x-1x-1xf32) + combine_15 = [add_31, concat_8] + del add_31, concat_8 + + # pd_op.concat: (1x258x25x-1xf32) <- ([1x256x25x-1xf32, 1x2x-1x-1xf32], 1xi32) + concat_9 = paddle._C_ops.concat(combine_15, full_3) + del combine_15 + + # pd_op.bilinear_interp: (1x258x16x16xf32) <- (1x258x25x-1xf32, None, None, None) + bilinear_interp_11 = paddle._C_ops.bilinear_interp( + concat_9, None, None, None, "NCHW", -1, 16, 16, [], "bilinear", False, 0 + ) + del concat_9 + + # pd_op.slice: (1x256x16x16xf32) <- (1x258x16x16xf32, 1xi64, 1xi64) + slice_10 = paddle._C_ops.slice( + bilinear_interp_11, [1], full_int_array_7, full_int_array_8, [1], [] + ) + + # pd_op.conv2d: (1x512x16x16xf32) <- (1x258x16x16xf32, 512x258x3x3xf32) + conv2d_99 = paddle._C_ops.conv2d( + bilinear_interp_11, + parameter_27, + [1, 1], + [1, 1], + "EXPLICIT", + [1, 1], + 1, + "NCHW", + ) + del bilinear_interp_11 + + # pd_op.group_norm: (1x512x16x16xf32, 1x32xf32, 1x32xf32) <- (1x512x16x16xf32, 512xf32, 512xf32) + group_norm_96, group_norm_97, group_norm_98 = (lambda x, f: f(x))( + paddle._C_ops.group_norm( + conv2d_99, parameter_26, parameter_25, float("1e-05"), 32, "NCHW" + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None, None), + ) + del conv2d_99 + + # pd_op.relu: (1x512x16x16xf32) <- (1x512x16x16xf32) + relu_81 = paddle._C_ops.relu(group_norm_96) + del group_norm_96 + + # pd_op.conv2d: (1x512x16x16xf32) <- (1x512x16x16xf32, 512x512x3x3xf32) + conv2d_100 = paddle._C_ops.conv2d( + relu_81, parameter_24, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del relu_81 + + # pd_op.group_norm: (1x512x16x16xf32, 1x32xf32, 1x32xf32) <- (1x512x16x16xf32, 512xf32, 512xf32) + group_norm_99, group_norm_100, group_norm_101 = (lambda x, f: f(x))( + paddle._C_ops.group_norm( + conv2d_100, parameter_23, parameter_22, float("1e-05"), 32, "NCHW" + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None, None), + ) + del conv2d_100 + + # pd_op.relu: (1x512x16x16xf32) <- (1x512x16x16xf32) + relu_82 = paddle._C_ops.relu(group_norm_99) + del group_norm_99 + + # pd_op.conv2d: (1x512x16x16xf32) <- (1x512x16x16xf32, 512x512x3x3xf32) + conv2d_101 = paddle._C_ops.conv2d( + relu_82, parameter_21, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del relu_82 + + # pd_op.group_norm: (1x512x16x16xf32, 1x32xf32, 1x32xf32) <- (1x512x16x16xf32, 512xf32, 512xf32) + group_norm_102, group_norm_103, group_norm_104 = (lambda x, f: f(x))( + paddle._C_ops.group_norm( + conv2d_101, parameter_20, parameter_19, float("1e-05"), 32, "NCHW" + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None, None), + ) + del conv2d_101 + + # pd_op.relu: (1x512x16x16xf32) <- (1x512x16x16xf32) + relu_83 = paddle._C_ops.relu(group_norm_102) + del group_norm_102 + + # pd_op.conv2d: (1x512x16x16xf32) <- (1x512x16x16xf32, 512x512x3x3xf32) + conv2d_102 = paddle._C_ops.conv2d( + relu_83, parameter_18, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del relu_83 + + # pd_op.group_norm: (1x512x16x16xf32, 1x32xf32, 1x32xf32) <- (1x512x16x16xf32, 512xf32, 512xf32) + group_norm_105, group_norm_106, group_norm_107 = (lambda x, f: f(x))( + paddle._C_ops.group_norm( + conv2d_102, parameter_17, parameter_16, float("1e-05"), 32, "NCHW" + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None, None), + ) + del conv2d_102 + + # pd_op.relu: (1x512x16x16xf32) <- (1x512x16x16xf32) + relu_84 = paddle._C_ops.relu(group_norm_105) + del group_norm_105 + + # pd_op.conv2d: (1x256x16x16xf32) <- (1x512x16x16xf32, 256x512x3x3xf32) + conv2d_103 = paddle._C_ops.conv2d( + relu_84, parameter_15, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del relu_84 + + # pd_op.add: (1x256x16x16xf32) <- (1x256x16x16xf32, 1x256x1x1xf32) + add_3 = paddle._C_ops.add(conv2d_103, reshape_8) + del conv2d_103 + + # pd_op.conv2d: (1x512x16x16xf32) <- (1x256x16x16xf32, 512x256x3x3xf32) + conv2d_104 = paddle._C_ops.conv2d( + slice_10, parameter_13, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del slice_10 + + # pd_op.group_norm: (1x512x16x16xf32, 1x32xf32, 1x32xf32) <- (1x512x16x16xf32, 512xf32, 512xf32) + group_norm_108, group_norm_109, group_norm_110 = (lambda x, f: f(x))( + paddle._C_ops.group_norm( + conv2d_104, parameter_12, parameter_11, float("1e-05"), 32, "NCHW" + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None, None), + ) + del conv2d_104 + + # pd_op.relu: (1x512x16x16xf32) <- (1x512x16x16xf32) + relu_85 = paddle._C_ops.relu(group_norm_108) + del group_norm_108 + + # pd_op.conv2d: (1x512x16x16xf32) <- (1x512x16x16xf32, 512x512x3x3xf32) + conv2d_105 = paddle._C_ops.conv2d( + relu_85, parameter_10, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del relu_85 + + # pd_op.group_norm: (1x512x16x16xf32, 1x32xf32, 1x32xf32) <- (1x512x16x16xf32, 512xf32, 512xf32) + group_norm_111, group_norm_112, group_norm_113 = (lambda x, f: f(x))( + paddle._C_ops.group_norm( + conv2d_105, parameter_9, parameter_8, float("1e-05"), 32, "NCHW" + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None, None), + ) + del conv2d_105 + + # pd_op.relu: (1x512x16x16xf32) <- (1x512x16x16xf32) + relu_86 = paddle._C_ops.relu(group_norm_111) + del group_norm_111 + + # pd_op.conv2d: (1x512x16x16xf32) <- (1x512x16x16xf32, 512x512x3x3xf32) + conv2d_106 = paddle._C_ops.conv2d( + relu_86, parameter_7, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del relu_86 + + # pd_op.group_norm: (1x512x16x16xf32, 1x32xf32, 1x32xf32) <- (1x512x16x16xf32, 512xf32, 512xf32) + group_norm_114, group_norm_115, group_norm_116 = (lambda x, f: f(x))( + paddle._C_ops.group_norm( + conv2d_106, parameter_6, parameter_5, float("1e-05"), 32, "NCHW" + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None, None), + ) + del conv2d_106 + + # pd_op.relu: (1x512x16x16xf32) <- (1x512x16x16xf32) + relu_87 = paddle._C_ops.relu(group_norm_114) + del group_norm_114 + + # pd_op.conv2d: (1x512x16x16xf32) <- (1x512x16x16xf32, 512x512x3x3xf32) + conv2d_107 = paddle._C_ops.conv2d( + relu_87, parameter_4, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del relu_87 + + # pd_op.group_norm: (1x512x16x16xf32, 1x32xf32, 1x32xf32) <- (1x512x16x16xf32, 512xf32, 512xf32) + group_norm_117, group_norm_118, group_norm_119 = (lambda x, f: f(x))( + paddle._C_ops.group_norm( + conv2d_107, parameter_3, parameter_2, float("1e-05"), 32, "NCHW" + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None, None), + ) + del conv2d_107 + + # pd_op.relu: (1x512x16x16xf32) <- (1x512x16x16xf32) + relu_88 = paddle._C_ops.relu(group_norm_117) + del group_norm_117 + + # pd_op.conv2d: (1x2x16x16xf32) <- (1x512x16x16xf32, 2x512x3x3xf32) + conv2d_108 = paddle._C_ops.conv2d( + relu_88, parameter_1, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del relu_88 + + # pd_op.add: (1x2x16x16xf32) <- (1x2x16x16xf32, 1x2x1x1xf32) + add_38 = paddle._C_ops.add(conv2d_108, reshape_9) + del conv2d_108 + + # pd_op.sigmoid: (1x2x16x16xf32) <- (1x2x16x16xf32) + sigmoid_3 = paddle._C_ops.sigmoid(add_38) + del add_38 + + # pd_op.pool2d: (1x2x17x17xf32) <- (1x2x16x16xf32, 2xi64) + pool2d_5 = paddle._C_ops.pool2d( + sigmoid_3, + full_int_array_9, + [1, 1], + [1, 1], + False, + True, + "NCHW", + "max", + False, + False, + "EXPLICIT", + ) + + # pd_op.slice: (1x2x16x16xf32) <- (1x2x17x17xf32, 2xi64, 2xi64) + slice_11 = paddle._C_ops.slice( + pool2d_5, [2, 3], full_int_array_10, full_int_array_11, [1, 1], [] + ) + del pool2d_5 + + # pd_op.equal: (1x2x16x16xb) <- (1x2x16x16xf32, 1x2x16x16xf32) + equal_3 = paddle._C_ops.equal(slice_11, sigmoid_3) + del slice_11 + + # pd_op.cast: (1x2x16x16xf32) <- (1x2x16x16xb) + cast_3 = paddle._C_ops.cast(equal_3, paddle.float32) + del equal_3 + + # pd_op.multiply: (1x2x16x16xf32) <- (1x2x16x16xf32, 1x2x16x16xf32) + multiply_3 = paddle._C_ops.multiply(sigmoid_3, cast_3) + del cast_3, sigmoid_3 + + # pd_op.transpose: (1x16x16x2xf32) <- (1x2x16x16xf32) + transpose_3 = paddle._C_ops.transpose(multiply_3, [0, 2, 3, 1]) + del multiply_3 + + # pd_op.shape64: (4xi64) <- (1x256x25x-1xf32) + shape64_4 = paddle._C_ops.shape64(bilinear_interp_7) + + # pd_op.slice: (xi64) <- (4xi64, 1xi64, 1xi64) + slice_12 = paddle._C_ops.slice( + shape64_4, [0], full_int_array_3, full_int_array_4, [1], [0] + ) + del full_int_array_3, full_int_array_4, shape64_4 + + # pd_op.linspace: (-1xf32) <- (1xf32, 1xf32, xi64) + linspace_8 = paddle._C_ops.linspace( + full_0, + full_1, + slice_12, + paddle.float32, + paddle.framework._current_expected_place(), + ) + del full_0, full_1, slice_12 + + # builtin.combine: ([-1xf32, -1xf32]) <- (-1xf32, -1xf32) + combine_16 = [linspace_1, linspace_8] + del linspace_1, linspace_8 + + # pd_op.meshgrid: ([-1x-1xf32, -1x-1xf32]) <- ([-1xf32, -1xf32]) + meshgrid_5 = paddle._C_ops.meshgrid(combine_16) + del combine_16 + + # builtin.split: (-1x-1xf32, -1x-1xf32) <- ([-1x-1xf32, -1x-1xf32]) + ( + split_10, + split_11, + ) = meshgrid_5 + del meshgrid_5 + + # pd_op.unsqueeze: (1x1x-1x-1xf32) <- (-1x-1xf32, 2xi64) + unsqueeze_10 = paddle._C_ops.unsqueeze(split_11, full_int_array_5) + del split_11 + + # pd_op.unsqueeze: (1x1x-1x-1xf32) <- (-1x-1xf32, 2xi64) + unsqueeze_11 = paddle._C_ops.unsqueeze(split_10, full_int_array_5) + del full_int_array_5, split_10 + + # pd_op.expand: (1x1x-1x-1xf32) <- (1x1x-1x-1xf32, 4xi64) + expand_10 = paddle._C_ops.expand(unsqueeze_11, full_int_array_6) + del unsqueeze_11 + + # pd_op.expand: (1x1x-1x-1xf32) <- (1x1x-1x-1xf32, 4xi64) + expand_11 = paddle._C_ops.expand(unsqueeze_10, full_int_array_6) + del full_int_array_6, unsqueeze_10 + + # builtin.combine: ([1x1x-1x-1xf32, 1x1x-1x-1xf32]) <- (1x1x-1x-1xf32, 1x1x-1x-1xf32) + combine_17 = [expand_11, expand_10] + del expand_10, expand_11 + + # pd_op.concat: (1x2x-1x-1xf32) <- ([1x1x-1x-1xf32, 1x1x-1x-1xf32], 1xi32) + concat_10 = paddle._C_ops.concat(combine_17, full_3) + del combine_17 + + # builtin.combine: ([1x256x25x-1xf32, 1x2x-1x-1xf32]) <- (1x256x25x-1xf32, 1x2x-1x-1xf32) + combine_18 = [bilinear_interp_7, concat_10] + del bilinear_interp_7, concat_10 + + # pd_op.concat: (1x258x25x-1xf32) <- ([1x256x25x-1xf32, 1x2x-1x-1xf32], 1xi32) + concat_11 = paddle._C_ops.concat(combine_18, full_3) + del combine_18, full_3 + + # pd_op.bilinear_interp: (1x258x12x12xf32) <- (1x258x25x-1xf32, None, None, None) + bilinear_interp_12 = paddle._C_ops.bilinear_interp( + concat_11, None, None, None, "NCHW", -1, 12, 12, [], "bilinear", False, 0 + ) + del concat_11 + + # pd_op.slice: (1x256x12x12xf32) <- (1x258x12x12xf32, 1xi64, 1xi64) + slice_13 = paddle._C_ops.slice( + bilinear_interp_12, [1], full_int_array_7, full_int_array_8, [1], [] + ) + del full_int_array_7, full_int_array_8 + + # pd_op.conv2d: (1x512x12x12xf32) <- (1x258x12x12xf32, 512x258x3x3xf32) + conv2d_109 = paddle._C_ops.conv2d( + bilinear_interp_12, + parameter_27, + [1, 1], + [1, 1], + "EXPLICIT", + [1, 1], + 1, + "NCHW", + ) + del bilinear_interp_12, parameter_27 + + # pd_op.group_norm: (1x512x12x12xf32, 1x32xf32, 1x32xf32) <- (1x512x12x12xf32, 512xf32, 512xf32) + group_norm_120, group_norm_121, group_norm_122 = (lambda x, f: f(x))( + paddle._C_ops.group_norm( + conv2d_109, parameter_26, parameter_25, float("1e-05"), 32, "NCHW" + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None, None), + ) + del conv2d_109, parameter_25, parameter_26 + + # pd_op.relu: (1x512x12x12xf32) <- (1x512x12x12xf32) + relu_89 = paddle._C_ops.relu(group_norm_120) + del group_norm_120 + + # pd_op.conv2d: (1x512x12x12xf32) <- (1x512x12x12xf32, 512x512x3x3xf32) + conv2d_110 = paddle._C_ops.conv2d( + relu_89, parameter_24, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_24, relu_89 + + # pd_op.group_norm: (1x512x12x12xf32, 1x32xf32, 1x32xf32) <- (1x512x12x12xf32, 512xf32, 512xf32) + group_norm_123, group_norm_124, group_norm_125 = (lambda x, f: f(x))( + paddle._C_ops.group_norm( + conv2d_110, parameter_23, parameter_22, float("1e-05"), 32, "NCHW" + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None, None), + ) + del conv2d_110, parameter_22, parameter_23 + + # pd_op.relu: (1x512x12x12xf32) <- (1x512x12x12xf32) + relu_90 = paddle._C_ops.relu(group_norm_123) + del group_norm_123 + + # pd_op.conv2d: (1x512x12x12xf32) <- (1x512x12x12xf32, 512x512x3x3xf32) + conv2d_111 = paddle._C_ops.conv2d( + relu_90, parameter_21, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_21, relu_90 + + # pd_op.group_norm: (1x512x12x12xf32, 1x32xf32, 1x32xf32) <- (1x512x12x12xf32, 512xf32, 512xf32) + group_norm_126, group_norm_127, group_norm_128 = (lambda x, f: f(x))( + paddle._C_ops.group_norm( + conv2d_111, parameter_20, parameter_19, float("1e-05"), 32, "NCHW" + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None, None), + ) + del conv2d_111, parameter_19, parameter_20 + + # pd_op.relu: (1x512x12x12xf32) <- (1x512x12x12xf32) + relu_91 = paddle._C_ops.relu(group_norm_126) + del group_norm_126 + + # pd_op.conv2d: (1x512x12x12xf32) <- (1x512x12x12xf32, 512x512x3x3xf32) + conv2d_112 = paddle._C_ops.conv2d( + relu_91, parameter_18, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_18, relu_91 + + # pd_op.group_norm: (1x512x12x12xf32, 1x32xf32, 1x32xf32) <- (1x512x12x12xf32, 512xf32, 512xf32) + group_norm_129, group_norm_130, group_norm_131 = (lambda x, f: f(x))( + paddle._C_ops.group_norm( + conv2d_112, parameter_17, parameter_16, float("1e-05"), 32, "NCHW" + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None, None), + ) + del conv2d_112, parameter_16, parameter_17 + + # pd_op.relu: (1x512x12x12xf32) <- (1x512x12x12xf32) + relu_92 = paddle._C_ops.relu(group_norm_129) + del group_norm_129 + + # pd_op.conv2d: (1x256x12x12xf32) <- (1x512x12x12xf32, 256x512x3x3xf32) + conv2d_113 = paddle._C_ops.conv2d( + relu_92, parameter_15, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_15, relu_92 + + # pd_op.add: (1x256x12x12xf32) <- (1x256x12x12xf32, 1x256x1x1xf32) + add_4 = paddle._C_ops.add(conv2d_113, reshape_8) + del conv2d_113, reshape_8 + + # pd_op.conv2d: (1x512x12x12xf32) <- (1x256x12x12xf32, 512x256x3x3xf32) + conv2d_114 = paddle._C_ops.conv2d( + slice_13, parameter_13, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_13, slice_13 + + # pd_op.group_norm: (1x512x12x12xf32, 1x32xf32, 1x32xf32) <- (1x512x12x12xf32, 512xf32, 512xf32) + group_norm_132, group_norm_133, group_norm_134 = (lambda x, f: f(x))( + paddle._C_ops.group_norm( + conv2d_114, parameter_12, parameter_11, float("1e-05"), 32, "NCHW" + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None, None), + ) + del conv2d_114, parameter_11, parameter_12 + + # pd_op.relu: (1x512x12x12xf32) <- (1x512x12x12xf32) + relu_93 = paddle._C_ops.relu(group_norm_132) + del group_norm_132 + + # pd_op.conv2d: (1x512x12x12xf32) <- (1x512x12x12xf32, 512x512x3x3xf32) + conv2d_115 = paddle._C_ops.conv2d( + relu_93, parameter_10, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_10, relu_93 + + # pd_op.group_norm: (1x512x12x12xf32, 1x32xf32, 1x32xf32) <- (1x512x12x12xf32, 512xf32, 512xf32) + group_norm_135, group_norm_136, group_norm_137 = (lambda x, f: f(x))( + paddle._C_ops.group_norm( + conv2d_115, parameter_9, parameter_8, float("1e-05"), 32, "NCHW" + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None, None), + ) + del conv2d_115, parameter_8, parameter_9 + + # pd_op.relu: (1x512x12x12xf32) <- (1x512x12x12xf32) + relu_94 = paddle._C_ops.relu(group_norm_135) + del group_norm_135 + + # pd_op.conv2d: (1x512x12x12xf32) <- (1x512x12x12xf32, 512x512x3x3xf32) + conv2d_116 = paddle._C_ops.conv2d( + relu_94, parameter_7, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_7, relu_94 + + # pd_op.group_norm: (1x512x12x12xf32, 1x32xf32, 1x32xf32) <- (1x512x12x12xf32, 512xf32, 512xf32) + group_norm_138, group_norm_139, group_norm_140 = (lambda x, f: f(x))( + paddle._C_ops.group_norm( + conv2d_116, parameter_6, parameter_5, float("1e-05"), 32, "NCHW" + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None, None), + ) + del conv2d_116, parameter_5, parameter_6 + + # pd_op.relu: (1x512x12x12xf32) <- (1x512x12x12xf32) + relu_95 = paddle._C_ops.relu(group_norm_138) + del group_norm_138 + + # pd_op.conv2d: (1x512x12x12xf32) <- (1x512x12x12xf32, 512x512x3x3xf32) + conv2d_117 = paddle._C_ops.conv2d( + relu_95, parameter_4, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_4, relu_95 + + # pd_op.group_norm: (1x512x12x12xf32, 1x32xf32, 1x32xf32) <- (1x512x12x12xf32, 512xf32, 512xf32) + group_norm_141, group_norm_142, group_norm_143 = (lambda x, f: f(x))( + paddle._C_ops.group_norm( + conv2d_117, parameter_3, parameter_2, float("1e-05"), 32, "NCHW" + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None, None), + ) + del conv2d_117, parameter_2, parameter_3 + + # pd_op.relu: (1x512x12x12xf32) <- (1x512x12x12xf32) + relu_96 = paddle._C_ops.relu(group_norm_141) + del group_norm_141 + + # pd_op.conv2d: (1x2x12x12xf32) <- (1x512x12x12xf32, 2x512x3x3xf32) + conv2d_118 = paddle._C_ops.conv2d( + relu_96, parameter_1, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_1, relu_96 + + # pd_op.add: (1x2x12x12xf32) <- (1x2x12x12xf32, 1x2x1x1xf32) + add_39 = paddle._C_ops.add(conv2d_118, reshape_9) + del conv2d_118, reshape_9 + + # pd_op.sigmoid: (1x2x12x12xf32) <- (1x2x12x12xf32) + sigmoid_4 = paddle._C_ops.sigmoid(add_39) + del add_39 + + # pd_op.pool2d: (1x2x13x13xf32) <- (1x2x12x12xf32, 2xi64) + pool2d_6 = paddle._C_ops.pool2d( + sigmoid_4, + full_int_array_9, + [1, 1], + [1, 1], + False, + True, + "NCHW", + "max", + False, + False, + "EXPLICIT", + ) + del full_int_array_9 + + # pd_op.slice: (1x2x12x12xf32) <- (1x2x13x13xf32, 2xi64, 2xi64) + slice_14 = paddle._C_ops.slice( + pool2d_6, [2, 3], full_int_array_10, full_int_array_11, [1, 1], [] + ) + del full_int_array_10, full_int_array_11, pool2d_6 + + # pd_op.equal: (1x2x12x12xb) <- (1x2x12x12xf32, 1x2x12x12xf32) + equal_4 = paddle._C_ops.equal(slice_14, sigmoid_4) + del slice_14 + + # pd_op.cast: (1x2x12x12xf32) <- (1x2x12x12xb) + cast_4 = paddle._C_ops.cast(equal_4, paddle.float32) + del equal_4 + + # pd_op.multiply: (1x2x12x12xf32) <- (1x2x12x12xf32, 1x2x12x12xf32) + multiply_4 = paddle._C_ops.multiply(sigmoid_4, cast_4) + del cast_4, sigmoid_4 + + # pd_op.transpose: (1x12x12x2xf32) <- (1x2x12x12xf32) + transpose_4 = paddle._C_ops.transpose(multiply_4, [0, 2, 3, 1]) + del multiply_4 + + return ( + transpose_0, + transpose_1, + transpose_2, + transpose_3, + transpose_4, + add_0, + add_1, + add_2, + add_3, + add_4, + relu_0, + ) diff --git a/paddle_samples/PaddleX/SOLOv2/subgraph_0/weight_meta.py b/paddle_samples/PaddleX/SOLOv2/subgraph_0/weight_meta.py new file mode 100644 index 000000000..f4353b95a --- /dev/null +++ b/paddle_samples/PaddleX/SOLOv2/subgraph_0/weight_meta.py @@ -0,0 +1,3603 @@ +class Program_weight_tensor_parameter_0: + name = "parameter_0" + shape = [2] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_1: + name = "parameter_1" + shape = [2, 512, 3, 3] + dtype = "float32" + min_val = float("-0.0349261") + max_val = float("0.0333573") + mean = float("0.000902387") + std = float("0.0102302") + data = None + + +class Program_weight_tensor_parameter_2: + name = "parameter_2" + shape = [512] + dtype = "float32" + min_val = float("-0.164361") + max_val = float("7.56304e-05") + mean = float("-0.0726798") + std = float("0.0268363") + data = None + + +class Program_weight_tensor_parameter_3: + name = "parameter_3" + shape = [512] + dtype = "float32" + min_val = float("0.819882") + max_val = float("1.07511") + mean = float("0.968146") + std = float("0.0247266") + data = None + + +class Program_weight_tensor_parameter_4: + name = "parameter_4" + shape = [512, 512, 3, 3] + dtype = "float32" + min_val = float("-0.0714306") + max_val = float("0.0742621") + mean = float("-0.000840419") + std = float("0.00699752") + data = None + + +class Program_weight_tensor_parameter_5: + name = "parameter_5" + shape = [512] + dtype = "float32" + min_val = float("-0.140229") + max_val = float("0.0111035") + mean = float("-0.0178543") + std = float("0.0140589") + data = None + + +class Program_weight_tensor_parameter_6: + name = "parameter_6" + shape = [512] + dtype = "float32" + min_val = float("0.85017") + max_val = float("1.11075") + mean = float("0.999578") + std = float("0.0235032") + data = None + + +class Program_weight_tensor_parameter_7: + name = "parameter_7" + shape = [512, 512, 3, 3] + dtype = "float32" + min_val = float("-0.0881653") + max_val = float("0.0991429") + mean = float("-0.000339617") + std = float("0.00690231") + data = None + + +class Program_weight_tensor_parameter_8: + name = "parameter_8" + shape = [512] + dtype = "float32" + min_val = float("-0.133168") + max_val = float("0.0111188") + mean = float("-0.0266653") + std = float("0.0165038") + data = None + + +class Program_weight_tensor_parameter_9: + name = "parameter_9" + shape = [512] + dtype = "float32" + min_val = float("0.896954") + max_val = float("1.12795") + mean = float("0.999335") + std = float("0.0224704") + data = None + + +class Program_weight_tensor_parameter_10: + name = "parameter_10" + shape = [512, 512, 3, 3] + dtype = "float32" + min_val = float("-0.0769052") + max_val = float("0.0728452") + mean = float("-0.000339189") + std = float("0.00696801") + data = None + + +class Program_weight_tensor_parameter_11: + name = "parameter_11" + shape = [512] + dtype = "float32" + min_val = float("-0.0838783") + max_val = float("-0.0114142") + mean = float("-0.0344879") + std = float("0.0115531") + data = None + + +class Program_weight_tensor_parameter_12: + name = "parameter_12" + shape = [512] + dtype = "float32" + min_val = float("0.930605") + max_val = float("1.07865") + mean = float("0.999267") + std = float("0.0179719") + data = None + + +class Program_weight_tensor_parameter_13: + name = "parameter_13" + shape = [512, 256, 3, 3] + dtype = "float32" + min_val = float("-0.0528755") + max_val = float("0.0542216") + mean = float("4.81767e-06") + std = float("0.00754803") + data = None + + +class Program_weight_tensor_parameter_14: + name = "parameter_14" + shape = [256] + dtype = "float32" + min_val = float("-0.108389") + max_val = float("0.0287567") + mean = float("-0.00584688") + std = float("0.012664") + data = None + + +class Program_weight_tensor_parameter_15: + name = "parameter_15" + shape = [256, 512, 3, 3] + dtype = "float32" + min_val = float("-0.114531") + max_val = float("0.130451") + mean = float("-0.00108967") + std = float("0.0089358") + data = None + + +class Program_weight_tensor_parameter_16: + name = "parameter_16" + shape = [512] + dtype = "float32" + min_val = float("-0.167083") + max_val = float("-1.2406e-05") + mean = float("-0.0479069") + std = float("0.0268731") + data = None + + +class Program_weight_tensor_parameter_17: + name = "parameter_17" + shape = [512] + dtype = "float32" + min_val = float("0.527187") + max_val = float("1.07287") + mean = float("0.977065") + std = float("0.0410534") + data = None + + +class Program_weight_tensor_parameter_18: + name = "parameter_18" + shape = [512, 512, 3, 3] + dtype = "float32" + min_val = float("-0.077059") + max_val = float("0.0720572") + mean = float("-0.000560833") + std = float("0.00724195") + data = None + + +class Program_weight_tensor_parameter_19: + name = "parameter_19" + shape = [512] + dtype = "float32" + min_val = float("-0.109315") + max_val = float("0.0155806") + mean = float("-0.0154887") + std = float("0.0192258") + data = None + + +class Program_weight_tensor_parameter_20: + name = "parameter_20" + shape = [512] + dtype = "float32" + min_val = float("0.56948") + max_val = float("1.14345") + mean = float("0.998843") + std = float("0.0449128") + data = None + + +class Program_weight_tensor_parameter_21: + name = "parameter_21" + shape = [512, 512, 3, 3] + dtype = "float32" + min_val = float("-0.0796294") + max_val = float("0.0873741") + mean = float("-0.000299688") + std = float("0.00679602") + data = None + + +class Program_weight_tensor_parameter_22: + name = "parameter_22" + shape = [512] + dtype = "float32" + min_val = float("-0.140072") + max_val = float("0.00863622") + mean = float("-0.0211956") + std = float("0.0193657") + data = None + + +class Program_weight_tensor_parameter_23: + name = "parameter_23" + shape = [512] + dtype = "float32" + min_val = float("0.447613") + max_val = float("1.13217") + mean = float("0.998482") + std = float("0.0517255") + data = None + + +class Program_weight_tensor_parameter_24: + name = "parameter_24" + shape = [512, 512, 3, 3] + dtype = "float32" + min_val = float("-0.0713139") + max_val = float("0.0662423") + mean = float("-0.000199088") + std = float("0.00685973") + data = None + + +class Program_weight_tensor_parameter_25: + name = "parameter_25" + shape = [512] + dtype = "float32" + min_val = float("-0.1152") + max_val = float("-0.00172203") + mean = float("-0.0323252") + std = float("0.0146714") + data = None + + +class Program_weight_tensor_parameter_26: + name = "parameter_26" + shape = [512] + dtype = "float32" + min_val = float("0.890313") + max_val = float("1.1007") + mean = float("0.999208") + std = float("0.0232561") + data = None + + +class Program_weight_tensor_parameter_27: + name = "parameter_27" + shape = [512, 258, 3, 3] + dtype = "float32" + min_val = float("-0.177044") + max_val = float("0.134197") + mean = float("1.25615e-05") + std = float("0.00795031") + data = None + + +class Program_weight_tensor_parameter_28: + name = "parameter_28" + shape = [256] + dtype = "float32" + min_val = float("-0.260234") + max_val = float("0.121812") + mean = float("-0.0681893") + std = float("0.0685236") + data = None + + +class Program_weight_tensor_parameter_29: + name = "parameter_29" + shape = [256] + dtype = "float32" + min_val = float("0.363452") + max_val = float("1.10496") + mean = float("0.973592") + std = float("0.0690278") + data = None + + +class Program_weight_tensor_parameter_30: + name = "parameter_30" + shape = [256, 128, 1, 1] + dtype = "float32" + min_val = float("-0.235533") + max_val = float("0.241761") + mean = float("-0.000842845") + std = float("0.0365451") + data = None + + +class Program_weight_tensor_parameter_31: + name = "parameter_31" + shape = [128] + dtype = "float32" + min_val = float("-0.118527") + max_val = float("0.0569759") + mean = float("-0.0131514") + std = float("0.0280279") + data = None + + +class Program_weight_tensor_parameter_32: + name = "parameter_32" + shape = [128] + dtype = "float32" + min_val = float("0.836141") + max_val = float("1.10658") + mean = float("0.995076") + std = float("0.0398035") + data = None + + +class Program_weight_tensor_parameter_33: + name = "parameter_33" + shape = [128, 128, 3, 3] + dtype = "float32" + min_val = float("-0.0902413") + max_val = float("0.109485") + mean = float("0.000110607") + std = float("0.0100809") + data = None + + +class Program_weight_tensor_parameter_34: + name = "parameter_34" + shape = [128] + dtype = "float32" + min_val = float("-0.118766") + max_val = float("0.0647619") + mean = float("-0.0126035") + std = float("0.0311929") + data = None + + +class Program_weight_tensor_parameter_35: + name = "parameter_35" + shape = [128] + dtype = "float32" + min_val = float("0.807154") + max_val = float("1.07147") + mean = float("0.999209") + std = float("0.0474714") + data = None + + +class Program_weight_tensor_parameter_36: + name = "parameter_36" + shape = [128, 128, 3, 3] + dtype = "float32" + min_val = float("-0.092151") + max_val = float("0.0931281") + mean = float("5.64221e-05") + std = float("0.0106764") + data = None + + +class Program_weight_tensor_parameter_37: + name = "parameter_37" + shape = [128] + dtype = "float32" + min_val = float("-0.183982") + max_val = float("0.158289") + mean = float("-0.0193848") + std = float("0.0464484") + data = None + + +class Program_weight_tensor_parameter_38: + name = "parameter_38" + shape = [128] + dtype = "float32" + min_val = float("0.819542") + max_val = float("1.06033") + mean = float("0.998563") + std = float("0.039962") + data = None + + +class Program_weight_tensor_parameter_39: + name = "parameter_39" + shape = [128, 258, 3, 3] + dtype = "float32" + min_val = float("-0.335542") + max_val = float("0.260022") + mean = float("5.1022e-05") + std = float("0.0102759") + data = None + + +class Program_weight_tensor_parameter_40: + name = "parameter_40" + shape = [128] + dtype = "float32" + min_val = float("-0.130034") + max_val = float("0.0416465") + mean = float("-0.0184565") + std = float("0.0194914") + data = None + + +class Program_weight_tensor_parameter_41: + name = "parameter_41" + shape = [128] + dtype = "float32" + min_val = float("0.532729") + max_val = float("1.06613") + mean = float("1.00366") + std = float("0.0508603") + data = None + + +class Program_weight_tensor_parameter_42: + name = "parameter_42" + shape = [128, 128, 3, 3] + dtype = "float32" + min_val = float("-0.0706972") + max_val = float("0.0714813") + mean = float("-0.000323506") + std = float("0.0085216") + data = None + + +class Program_weight_tensor_parameter_43: + name = "parameter_43" + shape = [128] + dtype = "float32" + min_val = float("-0.0981304") + max_val = float("0.00684915") + mean = float("-0.0175423") + std = float("0.016073") + data = None + + +class Program_weight_tensor_parameter_44: + name = "parameter_44" + shape = [128] + dtype = "float32" + min_val = float("0.796152") + max_val = float("1.04458") + mean = float("0.999206") + std = float("0.034254") + data = None + + +class Program_weight_tensor_parameter_45: + name = "parameter_45" + shape = [128, 256, 3, 3] + dtype = "float32" + min_val = float("-0.0437951") + max_val = float("0.047386") + mean = float("-5.80159e-05") + std = float("0.00748989") + data = None + + +class Program_weight_tensor_parameter_46: + name = "parameter_46" + shape = [128] + dtype = "float32" + min_val = float("-0.128166") + max_val = float("0.0556366") + mean = float("-0.0386808") + std = float("0.022891") + data = None + + +class Program_weight_tensor_parameter_47: + name = "parameter_47" + shape = [128] + dtype = "float32" + min_val = float("0.534046") + max_val = float("1.10631") + mean = float("0.990947") + std = float("0.0580826") + data = None + + +class Program_weight_tensor_parameter_48: + name = "parameter_48" + shape = [128, 256, 3, 3] + dtype = "float32" + min_val = float("-0.0504267") + max_val = float("0.0497977") + mean = float("4.68237e-06") + std = float("0.00778869") + data = None + + +class Program_weight_tensor_parameter_49: + name = "parameter_49" + shape = [128] + dtype = "float32" + min_val = float("-0.135621") + max_val = float("-0.00540058") + mean = float("-0.0603129") + std = float("0.0256372") + data = None + + +class Program_weight_tensor_parameter_50: + name = "parameter_50" + shape = [128] + dtype = "float32" + min_val = float("0.885653") + max_val = float("1.18737") + mean = float("1.0037") + std = float("0.0469978") + data = None + + +class Program_weight_tensor_parameter_51: + name = "parameter_51" + shape = [128, 256, 3, 3] + dtype = "float32" + min_val = float("-0.051786") + max_val = float("0.0476963") + mean = float("-4.17787e-05") + std = float("0.00742553") + data = None + + +class Program_weight_tensor_parameter_52: + name = "parameter_52" + shape = [256] + dtype = "float32" + min_val = float("-0.140687") + max_val = float("0.0875958") + mean = float("0.00247056") + std = float("0.035212") + data = None + + +class Program_weight_tensor_parameter_53: + name = "parameter_53" + shape = [256, 256, 3, 3] + dtype = "float32" + min_val = float("-0.0449519") + max_val = float("0.0465913") + mean = float("8.7281e-06") + std = float("0.012841") + data = None + + +class Program_weight_tensor_parameter_54: + name = "parameter_54" + shape = [256] + dtype = "float32" + min_val = float("-0.0684487") + max_val = float("0.0643653") + mean = float("0.00163392") + std = float("0.0261378") + data = None + + +class Program_weight_tensor_parameter_55: + name = "parameter_55" + shape = [256, 256, 3, 3] + dtype = "float32" + min_val = float("-0.0372709") + max_val = float("0.0378098") + mean = float("-7.22823e-06") + std = float("0.0122174") + data = None + + +class Program_weight_tensor_parameter_56: + name = "parameter_56" + shape = [256] + dtype = "float32" + min_val = float("-0.0925719") + max_val = float("0.0978216") + mean = float("-0.00319254") + std = float("0.0287798") + data = None + + +class Program_weight_tensor_parameter_57: + name = "parameter_57" + shape = [256, 256, 3, 3] + dtype = "float32" + min_val = float("-0.0363277") + max_val = float("0.0374825") + mean = float("1.40615e-05") + std = float("0.0121226") + data = None + + +class Program_weight_tensor_parameter_58: + name = "parameter_58" + shape = [256] + dtype = "float32" + min_val = float("-0.0806452") + max_val = float("0.0952202") + mean = float("-0.00327234") + std = float("0.0295771") + data = None + + +class Program_weight_tensor_parameter_59: + name = "parameter_59" + shape = [256, 256, 3, 3] + dtype = "float32" + min_val = float("-0.0385173") + max_val = float("0.0416478") + mean = float("-1.25885e-05") + std = float("0.0122309") + data = None + + +class Program_weight_tensor_parameter_60: + name = "parameter_60" + shape = [256] + dtype = "float32" + min_val = float("-0.119148") + max_val = float("0.148317") + mean = float("0.00657404") + std = float("0.0392118") + data = None + + +class Program_weight_tensor_parameter_61: + name = "parameter_61" + shape = [256, 2048, 1, 1] + dtype = "float32" + min_val = float("-0.0718746") + max_val = float("0.0765018") + mean = float("-6.92067e-05") + std = float("0.0142115") + data = None + + +class Program_weight_tensor_parameter_62: + name = "parameter_62" + shape = [256] + dtype = "float32" + min_val = float("-0.0679942") + max_val = float("0.07887") + mean = float("0.00222854") + std = float("0.0266624") + data = None + + +class Program_weight_tensor_parameter_63: + name = "parameter_63" + shape = [256, 1024, 1, 1] + dtype = "float32" + min_val = float("-0.110228") + max_val = float("0.101324") + mean = float("-2.15317e-05") + std = float("0.0188882") + data = None + + +class Program_weight_tensor_parameter_64: + name = "parameter_64" + shape = [256] + dtype = "float32" + min_val = float("-0.0655844") + max_val = float("0.0742491") + mean = float("0.000482213") + std = float("0.0263958") + data = None + + +class Program_weight_tensor_parameter_65: + name = "parameter_65" + shape = [256, 512, 1, 1] + dtype = "float32" + min_val = float("-0.0781448") + max_val = float("0.0743354") + mean = float("-2.91099e-05") + std = float("0.0244321") + data = None + + +class Program_weight_tensor_parameter_66: + name = "parameter_66" + shape = [256] + dtype = "float32" + min_val = float("-0.072923") + max_val = float("0.0701023") + mean = float("-0.000528406") + std = float("0.0263263") + data = None + + +class Program_weight_tensor_parameter_67: + name = "parameter_67" + shape = [256, 256, 1, 1] + dtype = "float32" + min_val = float("-0.0820079") + max_val = float("0.0776173") + mean = float("1.1467e-05") + std = float("0.0344443") + data = None + + +class Program_weight_tensor_parameter_68: + name = "parameter_68" + shape = [2048] + dtype = "float32" + min_val = float("-0.0151986") + max_val = float("0.548302") + mean = float("0.184979") + std = float("0.0543057") + data = None + + +class Program_weight_tensor_parameter_69: + name = "parameter_69" + shape = [2048] + dtype = "float32" + min_val = float("0.0697239") + max_val = float("2.1928") + mean = float("0.84625") + std = float("0.221893") + data = None + + +class Program_weight_tensor_parameter_70: + name = "parameter_70" + shape = [2048] + dtype = "float32" + min_val = float("1.81634e-05") + max_val = float("0.00461601") + mean = float("0.000837051") + std = float("0.00036967") + data = None + + +class Program_weight_tensor_parameter_71: + name = "parameter_71" + shape = [2048] + dtype = "float32" + min_val = float("-0.0611881") + max_val = float("0.0309583") + mean = float("-0.0166939") + std = float("0.00834277") + data = None + + +class Program_weight_tensor_parameter_72: + name = "parameter_72" + shape = [2048, 512, 1, 1] + dtype = "float32" + min_val = float("-0.175753") + max_val = float("0.321267") + mean = float("-0.00236302") + std = float("0.0103289") + data = None + + +class Program_weight_tensor_parameter_73: + name = "parameter_73" + shape = [512] + dtype = "float32" + min_val = float("-0.256637") + max_val = float("0.509206") + mean = float("-0.0807075") + std = float("0.0607222") + data = None + + +class Program_weight_tensor_parameter_74: + name = "parameter_74" + shape = [512] + dtype = "float32" + min_val = float("0.142703") + max_val = float("0.29885") + mean = float("0.202958") + std = float("0.0283985") + data = None + + +class Program_weight_tensor_parameter_75: + name = "parameter_75" + shape = [512] + dtype = "float32" + min_val = float("0.00782461") + max_val = float("0.0362781") + mean = float("0.0117981") + std = float("0.00204555") + data = None + + +class Program_weight_tensor_parameter_76: + name = "parameter_76" + shape = [512] + dtype = "float32" + min_val = float("-0.213289") + max_val = float("0.00238845") + mean = float("-0.0920613") + std = float("0.0166986") + data = None + + +class Program_weight_tensor_parameter_77: + name = "parameter_77" + shape = [512, 512, 3, 3] + dtype = "float32" + min_val = float("-0.154763") + max_val = float("0.151896") + mean = float("-0.00108184") + std = float("0.00814588") + data = None + + +class Program_weight_tensor_parameter_78: + name = "parameter_78" + shape = [512] + dtype = "float32" + min_val = float("-0.37403") + max_val = float("0.119353") + mean = float("-0.139679") + std = float("0.0581028") + data = None + + +class Program_weight_tensor_parameter_79: + name = "parameter_79" + shape = [512] + dtype = "float32" + min_val = float("0.112228") + max_val = float("0.465189") + mean = float("0.214344") + std = float("0.0285329") + data = None + + +class Program_weight_tensor_parameter_80: + name = "parameter_80" + shape = [512] + dtype = "float32" + min_val = float("0.00368759") + max_val = float("0.066979") + mean = float("0.00530424") + std = float("0.00301387") + data = None + + +class Program_weight_tensor_parameter_81: + name = "parameter_81" + shape = [512] + dtype = "float32" + min_val = float("-0.165443") + max_val = float("0.174839") + mean = float("-0.0359508") + std = float("0.0213069") + data = None + + +class Program_weight_tensor_parameter_82: + name = "parameter_82" + shape = [512, 2048, 1, 1] + dtype = "float32" + min_val = float("-0.311529") + max_val = float("0.30136") + mean = float("-0.00111762") + std = float("0.0140264") + data = None + + +class Program_weight_tensor_parameter_83: + name = "parameter_83" + shape = [2048] + dtype = "float32" + min_val = float("-0.344631") + max_val = float("0.401359") + mean = float("-0.158227") + std = float("0.032536") + data = None + + +class Program_weight_tensor_parameter_84: + name = "parameter_84" + shape = [2048] + dtype = "float32" + min_val = float("-0.0556075") + max_val = float("0.217378") + mean = float("0.13018") + std = float("0.0219564") + data = None + + +class Program_weight_tensor_parameter_85: + name = "parameter_85" + shape = [2048] + dtype = "float32" + min_val = float("0.000374117") + max_val = float("0.00489936") + mean = float("0.000888948") + std = float("0.000281992") + data = None + + +class Program_weight_tensor_parameter_86: + name = "parameter_86" + shape = [2048] + dtype = "float32" + min_val = float("-0.143769") + max_val = float("0.0820542") + mean = float("-0.0229567") + std = float("0.0142941") + data = None + + +class Program_weight_tensor_parameter_87: + name = "parameter_87" + shape = [2048, 512, 1, 1] + dtype = "float32" + min_val = float("-0.262652") + max_val = float("0.272915") + mean = float("-0.00157969") + std = float("0.0120553") + data = None + + +class Program_weight_tensor_parameter_88: + name = "parameter_88" + shape = [512] + dtype = "float32" + min_val = float("-0.281487") + max_val = float("0.181162") + mean = float("-0.130536") + std = float("0.037587") + data = None + + +class Program_weight_tensor_parameter_89: + name = "parameter_89" + shape = [512] + dtype = "float32" + min_val = float("0.103009") + max_val = float("0.261748") + mean = float("0.192296") + std = float("0.0182279") + data = None + + +class Program_weight_tensor_parameter_90: + name = "parameter_90" + shape = [512] + dtype = "float32" + min_val = float("0.00667127") + max_val = float("0.0496363") + mean = float("0.0113008") + std = float("0.00321697") + data = None + + +class Program_weight_tensor_parameter_91: + name = "parameter_91" + shape = [512] + dtype = "float32" + min_val = float("-0.333268") + max_val = float("0.452042") + mean = float("-0.0658262") + std = float("0.0511294") + data = None + + +class Program_weight_tensor_parameter_92: + name = "parameter_92" + shape = [512, 512, 3, 3] + dtype = "float32" + min_val = float("-0.16425") + max_val = float("0.144161") + mean = float("-0.000523042") + std = float("0.00887156") + data = None + + +class Program_weight_tensor_parameter_93: + name = "parameter_93" + shape = [512] + dtype = "float32" + min_val = float("-0.204657") + max_val = float("0.214884") + mean = float("-0.112746") + std = float("0.0407913") + data = None + + +class Program_weight_tensor_parameter_94: + name = "parameter_94" + shape = [512] + dtype = "float32" + min_val = float("0.110153") + max_val = float("0.23698") + mean = float("0.182554") + std = float("0.0168829") + data = None + + +class Program_weight_tensor_parameter_95: + name = "parameter_95" + shape = [512] + dtype = "float32" + min_val = float("0.00384403") + max_val = float("0.0267019") + mean = float("0.00550499") + std = float("0.00140394") + data = None + + +class Program_weight_tensor_parameter_96: + name = "parameter_96" + shape = [512] + dtype = "float32" + min_val = float("-0.156824") + max_val = float("0.213265") + mean = float("-0.0508984") + std = float("0.0216917") + data = None + + +class Program_weight_tensor_parameter_97: + name = "parameter_97" + shape = [512, 2048, 1, 1] + dtype = "float32" + min_val = float("-0.199091") + max_val = float("0.197033") + mean = float("-0.00103909") + std = float("0.0111277") + data = None + + +class Program_weight_tensor_parameter_98: + name = "parameter_98" + shape = [2048] + dtype = "float32" + min_val = float("-0.175843") + max_val = float("0.0839479") + mean = float("-0.0886424") + std = float("0.0238497") + data = None + + +class Program_weight_tensor_parameter_99: + name = "parameter_99" + shape = [2048] + dtype = "float32" + min_val = float("0.0445195") + max_val = float("0.224302") + mean = float("0.11517") + std = float("0.0171066") + data = None + + +class Program_weight_tensor_parameter_100: + name = "parameter_100" + shape = [2048] + dtype = "float32" + min_val = float("0.00110241") + max_val = float("0.0211157") + mean = float("0.00255611") + std = float("0.00108592") + data = None + + +class Program_weight_tensor_parameter_101: + name = "parameter_101" + shape = [2048] + dtype = "float32" + min_val = float("-0.149371") + max_val = float("0.261173") + mean = float("-0.0169219") + std = float("0.0272638") + data = None + + +class Program_weight_tensor_parameter_102: + name = "parameter_102" + shape = [2048, 1024, 1, 1] + dtype = "float32" + min_val = float("-0.230573") + max_val = float("0.336078") + mean = float("-0.000426473") + std = float("0.00875135") + data = None + + +class Program_weight_tensor_parameter_103: + name = "parameter_103" + shape = [2048] + dtype = "float32" + min_val = float("-0.175843") + max_val = float("0.0839479") + mean = float("-0.0886424") + std = float("0.0238497") + data = None + + +class Program_weight_tensor_parameter_104: + name = "parameter_104" + shape = [2048] + dtype = "float32" + min_val = float("-0.0299624") + max_val = float("0.247374") + mean = float("0.152015") + std = float("0.0269642") + data = None + + +class Program_weight_tensor_parameter_105: + name = "parameter_105" + shape = [2048] + dtype = "float32" + min_val = float("0.000316897") + max_val = float("0.0119787") + mean = float("0.00184257") + std = float("0.000489207") + data = None + + +class Program_weight_tensor_parameter_106: + name = "parameter_106" + shape = [2048] + dtype = "float32" + min_val = float("-0.119339") + max_val = float("0.171807") + mean = float("-0.0363601") + std = float("0.0247316") + data = None + + +class Program_weight_tensor_parameter_107: + name = "parameter_107" + shape = [2048, 512, 1, 1] + dtype = "float32" + min_val = float("-0.282002") + max_val = float("0.350314") + mean = float("-0.00149403") + std = float("0.0124786") + data = None + + +class Program_weight_tensor_parameter_108: + name = "parameter_108" + shape = [512] + dtype = "float32" + min_val = float("-0.166324") + max_val = float("0.333823") + mean = float("-0.0623815") + std = float("0.0473493") + data = None + + +class Program_weight_tensor_parameter_109: + name = "parameter_109" + shape = [512] + dtype = "float32" + min_val = float("0.122111") + max_val = float("0.321502") + mean = float("0.177554") + std = float("0.0214185") + data = None + + +class Program_weight_tensor_parameter_110: + name = "parameter_110" + shape = [512] + dtype = "float32" + min_val = float("0.00556363") + max_val = float("0.0696198") + mean = float("0.0123156") + std = float("0.00464772") + data = None + + +class Program_weight_tensor_parameter_111: + name = "parameter_111" + shape = [512] + dtype = "float32" + min_val = float("-0.114129") + max_val = float("0.215283") + mean = float("-0.0404652") + std = float("0.0316874") + data = None + + +class Program_weight_tensor_parameter_112: + name = "parameter_112" + shape = [512, 512, 3, 3] + dtype = "float32" + min_val = float("-0.323367") + max_val = float("0.296343") + mean = float("-0.000394217") + std = float("0.00855277") + data = None + + +class Program_weight_tensor_parameter_113: + name = "parameter_113" + shape = [512] + dtype = "float32" + min_val = float("-0.323683") + max_val = float("0.189289") + mean = float("-0.16759") + std = float("0.0548881") + data = None + + +class Program_weight_tensor_parameter_114: + name = "parameter_114" + shape = [512] + dtype = "float32" + min_val = float("0.103589") + max_val = float("0.266322") + mean = float("0.202582") + std = float("0.0208949") + data = None + + +class Program_weight_tensor_parameter_115: + name = "parameter_115" + shape = [512] + dtype = "float32" + min_val = float("0.00626156") + max_val = float("0.027742") + mean = float("0.0110146") + std = float("0.00269234") + data = None + + +class Program_weight_tensor_parameter_116: + name = "parameter_116" + shape = [512] + dtype = "float32" + min_val = float("-0.154333") + max_val = float("0.262058") + mean = float("-0.0482834") + std = float("0.0385199") + data = None + + +class Program_weight_tensor_parameter_117: + name = "parameter_117" + shape = [512, 1024, 1, 1] + dtype = "float32" + min_val = float("-0.276323") + max_val = float("0.534223") + mean = float("-0.00104276") + std = float("0.0165542") + data = None + + +class Program_weight_tensor_parameter_118: + name = "parameter_118" + shape = [1024] + dtype = "float32" + min_val = float("-0.350923") + max_val = float("0.376408") + mean = float("-0.0871135") + std = float("0.0697404") + data = None + + +class Program_weight_tensor_parameter_119: + name = "parameter_119" + shape = [1024] + dtype = "float32" + min_val = float("-0.0704466") + max_val = float("0.260302") + mean = float("0.0752669") + std = float("0.0561504") + data = None + + +class Program_weight_tensor_parameter_120: + name = "parameter_120" + shape = [1024] + dtype = "float32" + min_val = float("7.2606e-05") + max_val = float("0.0237334") + mean = float("0.000963188") + std = float("0.00100983") + data = None + + +class Program_weight_tensor_parameter_121: + name = "parameter_121" + shape = [1024] + dtype = "float32" + min_val = float("-0.14425") + max_val = float("0.160504") + mean = float("-0.0189685") + std = float("0.0237042") + data = None + + +class Program_weight_tensor_parameter_122: + name = "parameter_122" + shape = [1024, 256, 1, 1] + dtype = "float32" + min_val = float("-0.53185") + max_val = float("0.41753") + mean = float("-0.00206602") + std = float("0.0147182") + data = None + + +class Program_weight_tensor_parameter_123: + name = "parameter_123" + shape = [256] + dtype = "float32" + min_val = float("-0.21119") + max_val = float("0.2076") + mean = float("-0.0769485") + std = float("0.0604492") + data = None + + +class Program_weight_tensor_parameter_124: + name = "parameter_124" + shape = [256] + dtype = "float32" + min_val = float("0.11773") + max_val = float("0.375442") + mean = float("0.186314") + std = float("0.0296775") + data = None + + +class Program_weight_tensor_parameter_125: + name = "parameter_125" + shape = [256] + dtype = "float32" + min_val = float("0.0034165") + max_val = float("0.0334953") + mean = float("0.00764904") + std = float("0.00343798") + data = None + + +class Program_weight_tensor_parameter_126: + name = "parameter_126" + shape = [256] + dtype = "float32" + min_val = float("-0.17006") + max_val = float("0.139317") + mean = float("-0.0394256") + std = float("0.0482774") + data = None + + +class Program_weight_tensor_parameter_127: + name = "parameter_127" + shape = [256, 256, 3, 3] + dtype = "float32" + min_val = float("-0.237766") + max_val = float("0.184697") + mean = float("-0.000605493") + std = float("0.0118749") + data = None + + +class Program_weight_tensor_parameter_128: + name = "parameter_128" + shape = [256] + dtype = "float32" + min_val = float("-0.311719") + max_val = float("0.1238") + mean = float("-0.11228") + std = float("0.0715737") + data = None + + +class Program_weight_tensor_parameter_129: + name = "parameter_129" + shape = [256] + dtype = "float32" + min_val = float("0.0999653") + max_val = float("0.257498") + mean = float("0.168535") + std = float("0.027973") + data = None + + +class Program_weight_tensor_parameter_130: + name = "parameter_130" + shape = [256] + dtype = "float32" + min_val = float("0.00541324") + max_val = float("0.0401863") + mean = float("0.00971417") + std = float("0.00366722") + data = None + + +class Program_weight_tensor_parameter_131: + name = "parameter_131" + shape = [256] + dtype = "float32" + min_val = float("-0.217744") + max_val = float("0.149159") + mean = float("-0.0379885") + std = float("0.0499749") + data = None + + +class Program_weight_tensor_parameter_132: + name = "parameter_132" + shape = [256, 1024, 1, 1] + dtype = "float32" + min_val = float("-0.232746") + max_val = float("0.338805") + mean = float("-0.000892504") + std = float("0.0154701") + data = None + + +class Program_weight_tensor_parameter_133: + name = "parameter_133" + shape = [1024] + dtype = "float32" + min_val = float("-0.257451") + max_val = float("0.105004") + mean = float("-0.0666821") + std = float("0.0487657") + data = None + + +class Program_weight_tensor_parameter_134: + name = "parameter_134" + shape = [1024] + dtype = "float32" + min_val = float("-0.0934567") + max_val = float("0.23334") + mean = float("0.0748934") + std = float("0.0447769") + data = None + + +class Program_weight_tensor_parameter_135: + name = "parameter_135" + shape = [1024] + dtype = "float32" + min_val = float("3.37401e-05") + max_val = float("0.00494006") + mean = float("0.000767935") + std = float("0.000555318") + data = None + + +class Program_weight_tensor_parameter_136: + name = "parameter_136" + shape = [1024] + dtype = "float32" + min_val = float("-0.0803722") + max_val = float("0.095245") + mean = float("-0.0123875") + std = float("0.0179512") + data = None + + +class Program_weight_tensor_parameter_137: + name = "parameter_137" + shape = [1024, 256, 1, 1] + dtype = "float32" + min_val = float("-0.227086") + max_val = float("0.323502") + mean = float("-0.00118155") + std = float("0.0144309") + data = None + + +class Program_weight_tensor_parameter_138: + name = "parameter_138" + shape = [256] + dtype = "float32" + min_val = float("-0.230738") + max_val = float("0.139429") + mean = float("-0.0745415") + std = float("0.0720042") + data = None + + +class Program_weight_tensor_parameter_139: + name = "parameter_139" + shape = [256] + dtype = "float32" + min_val = float("0.0986424") + max_val = float("0.352413") + mean = float("0.179668") + std = float("0.0297447") + data = None + + +class Program_weight_tensor_parameter_140: + name = "parameter_140" + shape = [256] + dtype = "float32" + min_val = float("0.00330071") + max_val = float("0.0279471") + mean = float("0.00664013") + std = float("0.00278299") + data = None + + +class Program_weight_tensor_parameter_141: + name = "parameter_141" + shape = [256] + dtype = "float32" + min_val = float("-0.162212") + max_val = float("0.194153") + mean = float("-0.0413669") + std = float("0.0513468") + data = None + + +class Program_weight_tensor_parameter_142: + name = "parameter_142" + shape = [256, 256, 3, 3] + dtype = "float32" + min_val = float("-0.151922") + max_val = float("0.211359") + mean = float("-0.000677904") + std = float("0.0117839") + data = None + + +class Program_weight_tensor_parameter_143: + name = "parameter_143" + shape = [256] + dtype = "float32" + min_val = float("-0.30878") + max_val = float("0.117998") + mean = float("-0.0978964") + std = float("0.0730731") + data = None + + +class Program_weight_tensor_parameter_144: + name = "parameter_144" + shape = [256] + dtype = "float32" + min_val = float("0.091752") + max_val = float("0.259828") + mean = float("0.161612") + std = float("0.0292353") + data = None + + +class Program_weight_tensor_parameter_145: + name = "parameter_145" + shape = [256] + dtype = "float32" + min_val = float("0.00507588") + max_val = float("0.0262703") + mean = float("0.0109557") + std = float("0.00324557") + data = None + + +class Program_weight_tensor_parameter_146: + name = "parameter_146" + shape = [256] + dtype = "float32" + min_val = float("-0.253355") + max_val = float("0.121652") + mean = float("-0.0542848") + std = float("0.0542532") + data = None + + +class Program_weight_tensor_parameter_147: + name = "parameter_147" + shape = [256, 1024, 1, 1] + dtype = "float32" + min_val = float("-0.181943") + max_val = float("0.356588") + mean = float("-0.00108068") + std = float("0.0154561") + data = None + + +class Program_weight_tensor_parameter_148: + name = "parameter_148" + shape = [1024] + dtype = "float32" + min_val = float("-0.277434") + max_val = float("0.146613") + mean = float("-0.0570223") + std = float("0.0518947") + data = None + + +class Program_weight_tensor_parameter_149: + name = "parameter_149" + shape = [1024] + dtype = "float32" + min_val = float("-0.0669331") + max_val = float("0.263999") + mean = float("0.0731652") + std = float("0.054519") + data = None + + +class Program_weight_tensor_parameter_150: + name = "parameter_150" + shape = [1024] + dtype = "float32" + min_val = float("3.75198e-05") + max_val = float("0.00687895") + mean = float("0.000898002") + std = float("0.000696388") + data = None + + +class Program_weight_tensor_parameter_151: + name = "parameter_151" + shape = [1024] + dtype = "float32" + min_val = float("-0.0982832") + max_val = float("0.0788582") + mean = float("-0.00935298") + std = float("0.0214394") + data = None + + +class Program_weight_tensor_parameter_152: + name = "parameter_152" + shape = [1024, 256, 1, 1] + dtype = "float32" + min_val = float("-0.216755") + max_val = float("0.412141") + mean = float("-0.00100756") + std = float("0.0154782") + data = None + + +class Program_weight_tensor_parameter_153: + name = "parameter_153" + shape = [256] + dtype = "float32" + min_val = float("-0.296816") + max_val = float("0.203979") + mean = float("-0.0590104") + std = float("0.0837132") + data = None + + +class Program_weight_tensor_parameter_154: + name = "parameter_154" + shape = [256] + dtype = "float32" + min_val = float("0.107435") + max_val = float("0.39755") + mean = float("0.178744") + std = float("0.0314985") + data = None + + +class Program_weight_tensor_parameter_155: + name = "parameter_155" + shape = [256] + dtype = "float32" + min_val = float("0.00385646") + max_val = float("0.0233426") + mean = float("0.00855859") + std = float("0.00309764") + data = None + + +class Program_weight_tensor_parameter_156: + name = "parameter_156" + shape = [256] + dtype = "float32" + min_val = float("-0.169503") + max_val = float("0.258877") + mean = float("-0.0370232") + std = float("0.0546523") + data = None + + +class Program_weight_tensor_parameter_157: + name = "parameter_157" + shape = [256, 256, 3, 3] + dtype = "float32" + min_val = float("-0.156506") + max_val = float("0.221218") + mean = float("-0.000658629") + std = float("0.0126227") + data = None + + +class Program_weight_tensor_parameter_158: + name = "parameter_158" + shape = [256] + dtype = "float32" + min_val = float("-0.213348") + max_val = float("0.207989") + mean = float("-0.0730882") + std = float("0.0598612") + data = None + + +class Program_weight_tensor_parameter_159: + name = "parameter_159" + shape = [256] + dtype = "float32" + min_val = float("0.0886164") + max_val = float("0.234968") + mean = float("0.155301") + std = float("0.022324") + data = None + + +class Program_weight_tensor_parameter_160: + name = "parameter_160" + shape = [256] + dtype = "float32" + min_val = float("0.00512839") + max_val = float("0.0486237") + mean = float("0.0113862") + std = float("0.0037092") + data = None + + +class Program_weight_tensor_parameter_161: + name = "parameter_161" + shape = [256] + dtype = "float32" + min_val = float("-0.186114") + max_val = float("0.104412") + mean = float("-0.0347102") + std = float("0.0492274") + data = None + + +class Program_weight_tensor_parameter_162: + name = "parameter_162" + shape = [256, 1024, 1, 1] + dtype = "float32" + min_val = float("-0.203824") + max_val = float("0.330751") + mean = float("-0.000615627") + std = float("0.0148675") + data = None + + +class Program_weight_tensor_parameter_163: + name = "parameter_163" + shape = [1024] + dtype = "float32" + min_val = float("-0.250531") + max_val = float("0.212052") + mean = float("-0.0534412") + std = float("0.0507326") + data = None + + +class Program_weight_tensor_parameter_164: + name = "parameter_164" + shape = [1024] + dtype = "float32" + min_val = float("-0.0906585") + max_val = float("0.259754") + mean = float("0.0702987") + std = float("0.0527645") + data = None + + +class Program_weight_tensor_parameter_165: + name = "parameter_165" + shape = [1024] + dtype = "float32" + min_val = float("5.60519e-45") + max_val = float("0.00907432") + mean = float("0.000964403") + std = float("0.000856945") + data = None + + +class Program_weight_tensor_parameter_166: + name = "parameter_166" + shape = [1024] + dtype = "float32" + min_val = float("-0.167247") + max_val = float("0.0922496") + mean = float("-0.0113321") + std = float("0.0239985") + data = None + + +class Program_weight_tensor_parameter_167: + name = "parameter_167" + shape = [1024, 256, 1, 1] + dtype = "float32" + min_val = float("-0.322723") + max_val = float("0.358359") + mean = float("-0.00129655") + std = float("0.0149065") + data = None + + +class Program_weight_tensor_parameter_168: + name = "parameter_168" + shape = [256] + dtype = "float32" + min_val = float("-0.35206") + max_val = float("0.241553") + mean = float("-0.0517875") + std = float("0.0689942") + data = None + + +class Program_weight_tensor_parameter_169: + name = "parameter_169" + shape = [256] + dtype = "float32" + min_val = float("0.109761") + max_val = float("0.390957") + mean = float("0.176878") + std = float("0.0288588") + data = None + + +class Program_weight_tensor_parameter_170: + name = "parameter_170" + shape = [256] + dtype = "float32" + min_val = float("0.00447337") + max_val = float("0.0340642") + mean = float("0.0105546") + std = float("0.00364562") + data = None + + +class Program_weight_tensor_parameter_171: + name = "parameter_171" + shape = [256] + dtype = "float32" + min_val = float("-0.27309") + max_val = float("0.374102") + mean = float("-0.0352638") + std = float("0.0589054") + data = None + + +class Program_weight_tensor_parameter_172: + name = "parameter_172" + shape = [256, 256, 3, 3] + dtype = "float32" + min_val = float("-0.143202") + max_val = float("0.207459") + mean = float("-0.000476604") + std = float("0.0117463") + data = None + + +class Program_weight_tensor_parameter_173: + name = "parameter_173" + shape = [256] + dtype = "float32" + min_val = float("-0.244607") + max_val = float("0.0912815") + mean = float("-0.0498462") + std = float("0.0625104") + data = None + + +class Program_weight_tensor_parameter_174: + name = "parameter_174" + shape = [256] + dtype = "float32" + min_val = float("0.0940088") + max_val = float("0.275561") + mean = float("0.150497") + std = float("0.0251361") + data = None + + +class Program_weight_tensor_parameter_175: + name = "parameter_175" + shape = [256] + dtype = "float32" + min_val = float("0.00569095") + max_val = float("0.0330212") + mean = float("0.0130381") + std = float("0.00433576") + data = None + + +class Program_weight_tensor_parameter_176: + name = "parameter_176" + shape = [256] + dtype = "float32" + min_val = float("-0.216939") + max_val = float("0.154978") + mean = float("-0.0231354") + std = float("0.0541784") + data = None + + +class Program_weight_tensor_parameter_177: + name = "parameter_177" + shape = [256, 1024, 1, 1] + dtype = "float32" + min_val = float("-0.194415") + max_val = float("0.324382") + mean = float("-0.000344136") + std = float("0.0136937") + data = None + + +class Program_weight_tensor_parameter_178: + name = "parameter_178" + shape = [1024] + dtype = "float32" + min_val = float("-0.28682") + max_val = float("0.140438") + mean = float("-0.0416996") + std = float("0.0479403") + data = None + + +class Program_weight_tensor_parameter_179: + name = "parameter_179" + shape = [1024] + dtype = "float32" + min_val = float("-0.095438") + max_val = float("0.4389") + mean = float("0.0744024") + std = float("0.0544762") + data = None + + +class Program_weight_tensor_parameter_180: + name = "parameter_180" + shape = [1024] + dtype = "float32" + min_val = float("5.60519e-45") + max_val = float("0.00716343") + mean = float("0.00103639") + std = float("0.000898989") + data = None + + +class Program_weight_tensor_parameter_181: + name = "parameter_181" + shape = [1024] + dtype = "float32" + min_val = float("-0.115913") + max_val = float("0.0782403") + mean = float("-0.00319781") + std = float("0.0211123") + data = None + + +class Program_weight_tensor_parameter_182: + name = "parameter_182" + shape = [1024, 256, 1, 1] + dtype = "float32" + min_val = float("-0.237181") + max_val = float("0.419075") + mean = float("-0.000910016") + std = float("0.0156151") + data = None + + +class Program_weight_tensor_parameter_183: + name = "parameter_183" + shape = [256] + dtype = "float32" + min_val = float("-0.375338") + max_val = float("0.272544") + mean = float("-0.0647334") + std = float("0.0809183") + data = None + + +class Program_weight_tensor_parameter_184: + name = "parameter_184" + shape = [256] + dtype = "float32" + min_val = float("0.112331") + max_val = float("0.387223") + mean = float("0.184195") + std = float("0.0296639") + data = None + + +class Program_weight_tensor_parameter_185: + name = "parameter_185" + shape = [256] + dtype = "float32" + min_val = float("0.00590442") + max_val = float("0.0544441") + mean = float("0.0133796") + std = float("0.00533314") + data = None + + +class Program_weight_tensor_parameter_186: + name = "parameter_186" + shape = [256] + dtype = "float32" + min_val = float("-0.703403") + max_val = float("0.631713") + mean = float("-0.0169264") + std = float("0.108926") + data = None + + +class Program_weight_tensor_parameter_187: + name = "parameter_187" + shape = [256, 256, 3, 3] + dtype = "float32" + min_val = float("-0.278981") + max_val = float("0.229707") + mean = float("-0.000408512") + std = float("0.0122667") + data = None + + +class Program_weight_tensor_parameter_188: + name = "parameter_188" + shape = [256] + dtype = "float32" + min_val = float("-0.206504") + max_val = float("0.339113") + mean = float("-0.0332304") + std = float("0.072116") + data = None + + +class Program_weight_tensor_parameter_189: + name = "parameter_189" + shape = [256] + dtype = "float32" + min_val = float("0.0505531") + max_val = float("0.226288") + mean = float("0.138144") + std = float("0.0261678") + data = None + + +class Program_weight_tensor_parameter_190: + name = "parameter_190" + shape = [256] + dtype = "float32" + min_val = float("0.00534171") + max_val = float("0.0374661") + mean = float("0.0144267") + std = float("0.00632604") + data = None + + +class Program_weight_tensor_parameter_191: + name = "parameter_191" + shape = [256] + dtype = "float32" + min_val = float("-0.874778") + max_val = float("0.271624") + mean = float("-0.0408335") + std = float("0.0907325") + data = None + + +class Program_weight_tensor_parameter_192: + name = "parameter_192" + shape = [256, 1024, 1, 1] + dtype = "float32" + min_val = float("-0.147456") + max_val = float("0.234876") + mean = float("-0.000673983") + std = float("0.0122662") + data = None + + +class Program_weight_tensor_parameter_193: + name = "parameter_193" + shape = [1024] + dtype = "float32" + min_val = float("-0.129305") + max_val = float("0.139271") + mean = float("-0.0098929") + std = float("0.0358316") + data = None + + +class Program_weight_tensor_parameter_194: + name = "parameter_194" + shape = [1024] + dtype = "float32" + min_val = float("-0.0782405") + max_val = float("0.297465") + mean = float("0.0887483") + std = float("0.058831") + data = None + + +class Program_weight_tensor_parameter_195: + name = "parameter_195" + shape = [1024] + dtype = "float32" + min_val = float("5.60519e-45") + max_val = float("0.0293973") + mean = float("0.00517825") + std = float("0.00449503") + data = None + + +class Program_weight_tensor_parameter_196: + name = "parameter_196" + shape = [1024] + dtype = "float32" + min_val = float("-0.212177") + max_val = float("0.268765") + mean = float("0.00619636") + std = float("0.049949") + data = None + + +class Program_weight_tensor_parameter_197: + name = "parameter_197" + shape = [1024, 512, 1, 1] + dtype = "float32" + min_val = float("-0.294114") + max_val = float("0.29535") + mean = float("0.00020782") + std = float("0.0120761") + data = None + + +class Program_weight_tensor_parameter_198: + name = "parameter_198" + shape = [1024] + dtype = "float32" + min_val = float("-0.129305") + max_val = float("0.139271") + mean = float("-0.0098929") + std = float("0.0358316") + data = None + + +class Program_weight_tensor_parameter_199: + name = "parameter_199" + shape = [1024] + dtype = "float32" + min_val = float("-0.112497") + max_val = float("0.306851") + mean = float("0.115037") + std = float("0.0726397") + data = None + + +class Program_weight_tensor_parameter_200: + name = "parameter_200" + shape = [1024] + dtype = "float32" + min_val = float("5.60519e-45") + max_val = float("0.0103119") + mean = float("0.00298727") + std = float("0.00200745") + data = None + + +class Program_weight_tensor_parameter_201: + name = "parameter_201" + shape = [1024] + dtype = "float32" + min_val = float("-0.274559") + max_val = float("0.262423") + mean = float("-0.00688684") + std = float("0.0526932") + data = None + + +class Program_weight_tensor_parameter_202: + name = "parameter_202" + shape = [1024, 256, 1, 1] + dtype = "float32" + min_val = float("-0.270319") + max_val = float("0.320896") + mean = float("-0.000243514") + std = float("0.0189919") + data = None + + +class Program_weight_tensor_parameter_203: + name = "parameter_203" + shape = [256] + dtype = "float32" + min_val = float("-0.169522") + max_val = float("0.24108") + mean = float("0.0337387") + std = float("0.0893673") + data = None + + +class Program_weight_tensor_parameter_204: + name = "parameter_204" + shape = [256] + dtype = "float32" + min_val = float("0.134911") + max_val = float("0.280871") + mean = float("0.182639") + std = float("0.0244767") + data = None + + +class Program_weight_tensor_parameter_205: + name = "parameter_205" + shape = [256] + dtype = "float32" + min_val = float("0.00830091") + max_val = float("0.0599813") + mean = float("0.0218677") + std = float("0.00980468") + data = None + + +class Program_weight_tensor_parameter_206: + name = "parameter_206" + shape = [256] + dtype = "float32" + min_val = float("-0.334949") + max_val = float("0.127422") + mean = float("-0.0167348") + std = float("0.0636368") + data = None + + +class Program_weight_tensor_parameter_207: + name = "parameter_207" + shape = [256, 256, 3, 3] + dtype = "float32" + min_val = float("-0.161917") + max_val = float("0.184461") + mean = float("-0.000447478") + std = float("0.0133152") + data = None + + +class Program_weight_tensor_parameter_208: + name = "parameter_208" + shape = [256] + dtype = "float32" + min_val = float("-0.37354") + max_val = float("0.130682") + mean = float("-0.122896") + std = float("0.0737988") + data = None + + +class Program_weight_tensor_parameter_209: + name = "parameter_209" + shape = [256] + dtype = "float32" + min_val = float("0.131707") + max_val = float("0.306188") + mean = float("0.218482") + std = float("0.029329") + data = None + + +class Program_weight_tensor_parameter_210: + name = "parameter_210" + shape = [256] + dtype = "float32" + min_val = float("0.00841896") + max_val = float("0.0413624") + mean = float("0.0191254") + std = float("0.00583118") + data = None + + +class Program_weight_tensor_parameter_211: + name = "parameter_211" + shape = [256] + dtype = "float32" + min_val = float("-0.320503") + max_val = float("0.329766") + mean = float("-0.0425911") + std = float("0.0888525") + data = None + + +class Program_weight_tensor_parameter_212: + name = "parameter_212" + shape = [256, 512, 1, 1] + dtype = "float32" + min_val = float("-0.255429") + max_val = float("0.340427") + mean = float("-0.00169888") + std = float("0.026346") + data = None + + +class Program_weight_tensor_parameter_213: + name = "parameter_213" + shape = [512] + dtype = "float32" + min_val = float("-0.265757") + max_val = float("0.18098") + mean = float("-0.0551155") + std = float("0.0710863") + data = None + + +class Program_weight_tensor_parameter_214: + name = "parameter_214" + shape = [512] + dtype = "float32" + min_val = float("-0.118525") + max_val = float("0.265331") + mean = float("0.0605188") + std = float("0.0747383") + data = None + + +class Program_weight_tensor_parameter_215: + name = "parameter_215" + shape = [512] + dtype = "float32" + min_val = float("6.90463e-05") + max_val = float("0.00491621") + mean = float("0.000851993") + std = float("0.000814133") + data = None + + +class Program_weight_tensor_parameter_216: + name = "parameter_216" + shape = [512] + dtype = "float32" + min_val = float("-0.118861") + max_val = float("0.0793485") + mean = float("-0.00135119") + std = float("0.0222945") + data = None + + +class Program_weight_tensor_parameter_217: + name = "parameter_217" + shape = [512, 128, 1, 1] + dtype = "float32" + min_val = float("-0.176259") + max_val = float("0.319649") + mean = float("-0.00118932") + std = float("0.0219026") + data = None + + +class Program_weight_tensor_parameter_218: + name = "parameter_218" + shape = [128] + dtype = "float32" + min_val = float("-0.232824") + max_val = float("0.252755") + mean = float("-0.0608347") + std = float("0.0935444") + data = None + + +class Program_weight_tensor_parameter_219: + name = "parameter_219" + shape = [128] + dtype = "float32" + min_val = float("0.118575") + max_val = float("0.350103") + mean = float("0.195164") + std = float("0.0318741") + data = None + + +class Program_weight_tensor_parameter_220: + name = "parameter_220" + shape = [128] + dtype = "float32" + min_val = float("0.00369018") + max_val = float("0.0308134") + mean = float("0.011865") + std = float("0.00405604") + data = None + + +class Program_weight_tensor_parameter_221: + name = "parameter_221" + shape = [128] + dtype = "float32" + min_val = float("-0.258314") + max_val = float("0.31528") + mean = float("-0.0349642") + std = float("0.0634947") + data = None + + +class Program_weight_tensor_parameter_222: + name = "parameter_222" + shape = [128, 128, 3, 3] + dtype = "float32" + min_val = float("-0.14727") + max_val = float("0.174497") + mean = float("-0.000975128") + std = float("0.019049") + data = None + + +class Program_weight_tensor_parameter_223: + name = "parameter_223" + shape = [128] + dtype = "float32" + min_val = float("-0.217196") + max_val = float("0.15299") + mean = float("-0.0481659") + std = float("0.0730298") + data = None + + +class Program_weight_tensor_parameter_224: + name = "parameter_224" + shape = [128] + dtype = "float32" + min_val = float("0.0919349") + max_val = float("0.226493") + mean = float("0.166581") + std = float("0.0236465") + data = None + + +class Program_weight_tensor_parameter_225: + name = "parameter_225" + shape = [128] + dtype = "float32" + min_val = float("0.00814274") + max_val = float("0.031877") + mean = float("0.0148642") + std = float("0.00446453") + data = None + + +class Program_weight_tensor_parameter_226: + name = "parameter_226" + shape = [128] + dtype = "float32" + min_val = float("-0.257526") + max_val = float("0.20432") + mean = float("-0.0229181") + std = float("0.0807673") + data = None + + +class Program_weight_tensor_parameter_227: + name = "parameter_227" + shape = [128, 512, 1, 1] + dtype = "float32" + min_val = float("-0.170298") + max_val = float("0.232714") + mean = float("-0.000991681") + std = float("0.0222174") + data = None + + +class Program_weight_tensor_parameter_228: + name = "parameter_228" + shape = [512] + dtype = "float32" + min_val = float("-0.237422") + max_val = float("0.21759") + mean = float("-0.0445364") + std = float("0.0633277") + data = None + + +class Program_weight_tensor_parameter_229: + name = "parameter_229" + shape = [512] + dtype = "float32" + min_val = float("-0.139337") + max_val = float("0.250169") + mean = float("0.0669814") + std = float("0.0755184") + data = None + + +class Program_weight_tensor_parameter_230: + name = "parameter_230" + shape = [512] + dtype = "float32" + min_val = float("5.60519e-45") + max_val = float("0.00559176") + mean = float("0.00104555") + std = float("0.00109474") + data = None + + +class Program_weight_tensor_parameter_231: + name = "parameter_231" + shape = [512] + dtype = "float32" + min_val = float("-0.117879") + max_val = float("0.141154") + mean = float("0.00137843") + std = float("0.0242038") + data = None + + +class Program_weight_tensor_parameter_232: + name = "parameter_232" + shape = [512, 128, 1, 1] + dtype = "float32" + min_val = float("-0.231102") + max_val = float("0.281805") + mean = float("-0.000255305") + std = float("0.0217553") + data = None + + +class Program_weight_tensor_parameter_233: + name = "parameter_233" + shape = [128] + dtype = "float32" + min_val = float("-0.242849") + max_val = float("0.273584") + mean = float("-0.0109895") + std = float("0.0940812") + data = None + + +class Program_weight_tensor_parameter_234: + name = "parameter_234" + shape = [128] + dtype = "float32" + min_val = float("0.105254") + max_val = float("0.344767") + mean = float("0.174253") + std = float("0.0324788") + data = None + + +class Program_weight_tensor_parameter_235: + name = "parameter_235" + shape = [128] + dtype = "float32" + min_val = float("0.00364882") + max_val = float("0.0310589") + mean = float("0.013433") + std = float("0.00481079") + data = None + + +class Program_weight_tensor_parameter_236: + name = "parameter_236" + shape = [128] + dtype = "float32" + min_val = float("-0.254192") + max_val = float("0.221824") + mean = float("-0.0185789") + std = float("0.0744171") + data = None + + +class Program_weight_tensor_parameter_237: + name = "parameter_237" + shape = [128, 128, 3, 3] + dtype = "float32" + min_val = float("-0.171269") + max_val = float("0.17266") + mean = float("-0.00050048") + std = float("0.0174446") + data = None + + +class Program_weight_tensor_parameter_238: + name = "parameter_238" + shape = [128] + dtype = "float32" + min_val = float("-0.193408") + max_val = float("0.168849") + mean = float("-0.0177396") + std = float("0.074171") + data = None + + +class Program_weight_tensor_parameter_239: + name = "parameter_239" + shape = [128] + dtype = "float32" + min_val = float("0.105105") + max_val = float("0.242827") + mean = float("0.157451") + std = float("0.0271128") + data = None + + +class Program_weight_tensor_parameter_240: + name = "parameter_240" + shape = [128] + dtype = "float32" + min_val = float("0.00566029") + max_val = float("0.05857") + mean = float("0.0163651") + std = float("0.0072262") + data = None + + +class Program_weight_tensor_parameter_241: + name = "parameter_241" + shape = [128] + dtype = "float32" + min_val = float("-0.218906") + max_val = float("0.380567") + mean = float("-0.0294434") + std = float("0.0838679") + data = None + + +class Program_weight_tensor_parameter_242: + name = "parameter_242" + shape = [128, 512, 1, 1] + dtype = "float32" + min_val = float("-0.159413") + max_val = float("0.218365") + mean = float("-0.000763743") + std = float("0.0201366") + data = None + + +class Program_weight_tensor_parameter_243: + name = "parameter_243" + shape = [512] + dtype = "float32" + min_val = float("-0.24628") + max_val = float("0.217708") + mean = float("-0.0311961") + std = float("0.0633679") + data = None + + +class Program_weight_tensor_parameter_244: + name = "parameter_244" + shape = [512] + dtype = "float32" + min_val = float("-0.192908") + max_val = float("0.291095") + mean = float("0.055776") + std = float("0.0953223") + data = None + + +class Program_weight_tensor_parameter_245: + name = "parameter_245" + shape = [512] + dtype = "float32" + min_val = float("5.60519e-45") + max_val = float("0.00790918") + mean = float("0.00114117") + std = float("0.00170112") + data = None + + +class Program_weight_tensor_parameter_246: + name = "parameter_246" + shape = [512] + dtype = "float32" + min_val = float("-0.127209") + max_val = float("0.131354") + mean = float("-0.00697471") + std = float("0.0238302") + data = None + + +class Program_weight_tensor_parameter_247: + name = "parameter_247" + shape = [512, 128, 1, 1] + dtype = "float32" + min_val = float("-0.322905") + max_val = float("0.314345") + mean = float("-0.000768445") + std = float("0.0215683") + data = None + + +class Program_weight_tensor_parameter_248: + name = "parameter_248" + shape = [128] + dtype = "float32" + min_val = float("-0.361119") + max_val = float("0.229816") + mean = float("-0.000110899") + std = float("0.0886123") + data = None + + +class Program_weight_tensor_parameter_249: + name = "parameter_249" + shape = [128] + dtype = "float32" + min_val = float("0.108785") + max_val = float("0.344712") + mean = float("0.163661") + std = float("0.0268447") + data = None + + +class Program_weight_tensor_parameter_250: + name = "parameter_250" + shape = [128] + dtype = "float32" + min_val = float("0.0020564") + max_val = float("0.0629993") + mean = float("0.0150465") + std = float("0.0100431") + data = None + + +class Program_weight_tensor_parameter_251: + name = "parameter_251" + shape = [128] + dtype = "float32" + min_val = float("-0.881857") + max_val = float("0.354848") + mean = float("0.0254079") + std = float("0.153673") + data = None + + +class Program_weight_tensor_parameter_252: + name = "parameter_252" + shape = [128, 128, 3, 3] + dtype = "float32" + min_val = float("-0.243456") + max_val = float("0.330213") + mean = float("0.000152746") + std = float("0.0166181") + data = None + + +class Program_weight_tensor_parameter_253: + name = "parameter_253" + shape = [128] + dtype = "float32" + min_val = float("-0.338885") + max_val = float("0.360894") + mean = float("0.00728028") + std = float("0.101298") + data = None + + +class Program_weight_tensor_parameter_254: + name = "parameter_254" + shape = [128] + dtype = "float32" + min_val = float("0.0665754") + max_val = float("0.233636") + mean = float("0.134798") + std = float("0.0315315") + data = None + + +class Program_weight_tensor_parameter_255: + name = "parameter_255" + shape = [128] + dtype = "float32" + min_val = float("0.00275181") + max_val = float("0.177524") + mean = float("0.0213047") + std = float("0.0241815") + data = None + + +class Program_weight_tensor_parameter_256: + name = "parameter_256" + shape = [128] + dtype = "float32" + min_val = float("-0.271882") + max_val = float("0.364487") + mean = float("-0.00787709") + std = float("0.0941561") + data = None + + +class Program_weight_tensor_parameter_257: + name = "parameter_257" + shape = [128, 512, 1, 1] + dtype = "float32" + min_val = float("-0.195535") + max_val = float("0.254447") + mean = float("-0.000541883") + std = float("0.017971") + data = None + + +class Program_weight_tensor_parameter_258: + name = "parameter_258" + shape = [512] + dtype = "float32" + min_val = float("-0.173999") + max_val = float("0.178778") + mean = float("0.0104332") + std = float("0.0450756") + data = None + + +class Program_weight_tensor_parameter_259: + name = "parameter_259" + shape = [512] + dtype = "float32" + min_val = float("-0.0847974") + max_val = float("0.347141") + mean = float("0.104401") + std = float("0.0950331") + data = None + + +class Program_weight_tensor_parameter_260: + name = "parameter_260" + shape = [512] + dtype = "float32" + min_val = float("5.60519e-45") + max_val = float("0.0600736") + mean = float("0.00907374") + std = float("0.0107161") + data = None + + +class Program_weight_tensor_parameter_261: + name = "parameter_261" + shape = [512] + dtype = "float32" + min_val = float("-0.532972") + max_val = float("0.349244") + mean = float("-0.0147123") + std = float("0.07433") + data = None + + +class Program_weight_tensor_parameter_262: + name = "parameter_262" + shape = [512, 256, 1, 1] + dtype = "float32" + min_val = float("-0.411257") + max_val = float("0.34301") + mean = float("0.000118072") + std = float("0.0220944") + data = None + + +class Program_weight_tensor_parameter_263: + name = "parameter_263" + shape = [512] + dtype = "float32" + min_val = float("-0.173999") + max_val = float("0.178778") + mean = float("0.0104332") + std = float("0.0450756") + data = None + + +class Program_weight_tensor_parameter_264: + name = "parameter_264" + shape = [512] + dtype = "float32" + min_val = float("-0.110676") + max_val = float("0.360044") + mean = float("0.0747975") + std = float("0.0909131") + data = None + + +class Program_weight_tensor_parameter_265: + name = "parameter_265" + shape = [512] + dtype = "float32" + min_val = float("5.60519e-45") + max_val = float("0.0224734") + mean = float("0.00195282") + std = float("0.00230705") + data = None + + +class Program_weight_tensor_parameter_266: + name = "parameter_266" + shape = [512] + dtype = "float32" + min_val = float("-0.415464") + max_val = float("0.301824") + mean = float("-0.00186618") + std = float("0.0519181") + data = None + + +class Program_weight_tensor_parameter_267: + name = "parameter_267" + shape = [512, 128, 1, 1] + dtype = "float32" + min_val = float("-0.306972") + max_val = float("0.36222") + mean = float("-0.000479687") + std = float("0.0255366") + data = None + + +class Program_weight_tensor_parameter_268: + name = "parameter_268" + shape = [128] + dtype = "float32" + min_val = float("-0.0749191") + max_val = float("0.383717") + mean = float("0.00992787") + std = float("0.0893128") + data = None + + +class Program_weight_tensor_parameter_269: + name = "parameter_269" + shape = [128] + dtype = "float32" + min_val = float("0.151982") + max_val = float("0.258211") + mean = float("0.208953") + std = float("0.0230586") + data = None + + +class Program_weight_tensor_parameter_270: + name = "parameter_270" + shape = [128] + dtype = "float32" + min_val = float("0.011571") + max_val = float("0.0270516") + mean = float("0.0174302") + std = float("0.00377445") + data = None + + +class Program_weight_tensor_parameter_271: + name = "parameter_271" + shape = [128] + dtype = "float32" + min_val = float("-0.199453") + max_val = float("0.225939") + mean = float("-0.0293134") + std = float("0.052256") + data = None + + +class Program_weight_tensor_parameter_272: + name = "parameter_272" + shape = [128, 128, 3, 3] + dtype = "float32" + min_val = float("-0.152539") + max_val = float("0.176493") + mean = float("-0.000482472") + std = float("0.0196659") + data = None + + +class Program_weight_tensor_parameter_273: + name = "parameter_273" + shape = [128] + dtype = "float32" + min_val = float("-0.254357") + max_val = float("0.105134") + mean = float("-0.059691") + std = float("0.0926679") + data = None + + +class Program_weight_tensor_parameter_274: + name = "parameter_274" + shape = [128] + dtype = "float32" + min_val = float("7.24973e-19") + max_val = float("0.248465") + mean = float("0.173546") + std = float("0.0393691") + data = None + + +class Program_weight_tensor_parameter_275: + name = "parameter_275" + shape = [128] + dtype = "float32" + min_val = float("5.60519e-45") + max_val = float("0.0475623") + mean = float("0.0181743") + std = float("0.00711628") + data = None + + +class Program_weight_tensor_parameter_276: + name = "parameter_276" + shape = [128] + dtype = "float32" + min_val = float("-0.268525") + max_val = float("0.132098") + mean = float("-0.0574633") + std = float("0.089508") + data = None + + +class Program_weight_tensor_parameter_277: + name = "parameter_277" + shape = [128, 256, 1, 1] + dtype = "float32" + min_val = float("-0.224242") + max_val = float("0.289975") + mean = float("-0.0010173") + std = float("0.0369007") + data = None + + +class Program_weight_tensor_parameter_278: + name = "parameter_278" + shape = [256] + dtype = "float32" + min_val = float("-0.164026") + max_val = float("0.206317") + mean = float("-0.00531759") + std = float("0.0546818") + data = None + + +class Program_weight_tensor_parameter_279: + name = "parameter_279" + shape = [256] + dtype = "float32" + min_val = float("-0.255219") + max_val = float("0.302547") + mean = float("0.0429001") + std = float("0.108481") + data = None + + +class Program_weight_tensor_parameter_280: + name = "parameter_280" + shape = [256] + dtype = "float32" + min_val = float("7.12898e-05") + max_val = float("0.0083103") + mean = float("0.00137519") + std = float("0.00174094") + data = None + + +class Program_weight_tensor_parameter_281: + name = "parameter_281" + shape = [256] + dtype = "float32" + min_val = float("-0.133956") + max_val = float("0.182093") + mean = float("1.89526e-05") + std = float("0.0382032") + data = None + + +class Program_weight_tensor_parameter_282: + name = "parameter_282" + shape = [256, 64, 1, 1] + dtype = "float32" + min_val = float("-0.325233") + max_val = float("0.371292") + mean = float("-0.000368685") + std = float("0.0323619") + data = None + + +class Program_weight_tensor_parameter_283: + name = "parameter_283" + shape = [64] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_284: + name = "parameter_284" + shape = [64] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_285: + name = "parameter_285" + shape = [64] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_286: + name = "parameter_286" + shape = [64] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_287: + name = "parameter_287" + shape = [64, 64, 3, 3] + dtype = "float32" + min_val = float("-0.259976") + max_val = float("0.260293") + mean = float("-0.000605252") + std = float("0.0317955") + data = None + + +class Program_weight_tensor_parameter_288: + name = "parameter_288" + shape = [64] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_289: + name = "parameter_289" + shape = [64] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_290: + name = "parameter_290" + shape = [64] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_291: + name = "parameter_291" + shape = [64] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_292: + name = "parameter_292" + shape = [64, 256, 1, 1] + dtype = "float32" + min_val = float("-0.188924") + max_val = float("0.2369") + mean = float("-0.000203353") + std = float("0.0317779") + data = None + + +class Program_weight_tensor_parameter_293: + name = "parameter_293" + shape = [256] + dtype = "float32" + min_val = float("-0.177785") + max_val = float("0.192687") + mean = float("-0.00904678") + std = float("0.0582621") + data = None + + +class Program_weight_tensor_parameter_294: + name = "parameter_294" + shape = [256] + dtype = "float32" + min_val = float("-0.203582") + max_val = float("0.321081") + mean = float("0.00967383") + std = float("0.0943443") + data = None + + +class Program_weight_tensor_parameter_295: + name = "parameter_295" + shape = [256] + dtype = "float32" + min_val = float("5.60519e-45") + max_val = float("0.00624877") + mean = float("0.000464025") + std = float("0.0010426") + data = None + + +class Program_weight_tensor_parameter_296: + name = "parameter_296" + shape = [256] + dtype = "float32" + min_val = float("-0.0838559") + max_val = float("0.114342") + mean = float("-0.00431057") + std = float("0.0194668") + data = None + + +class Program_weight_tensor_parameter_297: + name = "parameter_297" + shape = [256, 64, 1, 1] + dtype = "float32" + min_val = float("-0.25193") + max_val = float("0.296788") + mean = float("-0.000771138") + std = float("0.0229857") + data = None + + +class Program_weight_tensor_parameter_298: + name = "parameter_298" + shape = [64] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_299: + name = "parameter_299" + shape = [64] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_300: + name = "parameter_300" + shape = [64] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_301: + name = "parameter_301" + shape = [64] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_302: + name = "parameter_302" + shape = [64, 64, 3, 3] + dtype = "float32" + min_val = float("-0.277854") + max_val = float("0.463245") + mean = float("-0.000341722") + std = float("0.0229625") + data = None + + +class Program_weight_tensor_parameter_303: + name = "parameter_303" + shape = [64] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_304: + name = "parameter_304" + shape = [64] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_305: + name = "parameter_305" + shape = [64] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_306: + name = "parameter_306" + shape = [64] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_307: + name = "parameter_307" + shape = [64, 256, 1, 1] + dtype = "float32" + min_val = float("-0.340224") + max_val = float("0.346524") + mean = float("0.00042888") + std = float("0.0242323") + data = None + + +class Program_weight_tensor_parameter_308: + name = "parameter_308" + shape = [256] + dtype = "float32" + min_val = float("-0.214949") + max_val = float("0.189687") + mean = float("0.0248474") + std = float("0.0675151") + data = None + + +class Program_weight_tensor_parameter_309: + name = "parameter_309" + shape = [256] + dtype = "float32" + min_val = float("-0.140797") + max_val = float("0.44859") + mean = float("0.185639") + std = float("0.128272") + data = None + + +class Program_weight_tensor_parameter_310: + name = "parameter_310" + shape = [256] + dtype = "float32" + min_val = float("5.60519e-45") + max_val = float("0.353666") + mean = float("0.0397985") + std = float("0.0391881") + data = None + + +class Program_weight_tensor_parameter_311: + name = "parameter_311" + shape = [256] + dtype = "float32" + min_val = float("-0.788633") + max_val = float("1.35179") + mean = float("-0.0432466") + std = float("0.244709") + data = None + + +class Program_weight_tensor_parameter_312: + name = "parameter_312" + shape = [256, 64, 1, 1] + dtype = "float32" + min_val = float("-0.636505") + max_val = float("0.512807") + mean = float("-0.00145991") + std = float("0.0539196") + data = None + + +class Program_weight_tensor_parameter_313: + name = "parameter_313" + shape = [256] + dtype = "float32" + min_val = float("-0.214949") + max_val = float("0.189687") + mean = float("0.0248474") + std = float("0.0675151") + data = None + + +class Program_weight_tensor_parameter_314: + name = "parameter_314" + shape = [256] + dtype = "float32" + min_val = float("-0.351382") + max_val = float("0.349111") + mean = float("0.100573") + std = float("0.13099") + data = None + + +class Program_weight_tensor_parameter_315: + name = "parameter_315" + shape = [256] + dtype = "float32" + min_val = float("5.60519e-45") + max_val = float("0.0267983") + mean = float("0.00646355") + std = float("0.00705122") + data = None + + +class Program_weight_tensor_parameter_316: + name = "parameter_316" + shape = [256] + dtype = "float32" + min_val = float("-0.221928") + max_val = float("0.207279") + mean = float("0.00942316") + std = float("0.0652644") + data = None + + +class Program_weight_tensor_parameter_317: + name = "parameter_317" + shape = [256, 64, 1, 1] + dtype = "float32" + min_val = float("-0.416398") + max_val = float("0.445069") + mean = float("0.000454476") + std = float("0.0384569") + data = None + + +class Program_weight_tensor_parameter_318: + name = "parameter_318" + shape = [64] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_319: + name = "parameter_319" + shape = [64] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_320: + name = "parameter_320" + shape = [64] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_321: + name = "parameter_321" + shape = [64] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_322: + name = "parameter_322" + shape = [64, 64, 3, 3] + dtype = "float32" + min_val = float("-0.367549") + max_val = float("0.551431") + mean = float("0.000884031") + std = float("0.0322617") + data = None + + +class Program_weight_tensor_parameter_323: + name = "parameter_323" + shape = [64] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_324: + name = "parameter_324" + shape = [64] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_325: + name = "parameter_325" + shape = [64] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_326: + name = "parameter_326" + shape = [64] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_327: + name = "parameter_327" + shape = [64, 64, 1, 1] + dtype = "float32" + min_val = float("-0.628423") + max_val = float("0.355848") + mean = float("-0.00641673") + std = float("0.0746228") + data = None + + +class Program_weight_tensor_parameter_328: + name = "parameter_328" + shape = [64] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_329: + name = "parameter_329" + shape = [64] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_330: + name = "parameter_330" + shape = [64] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_331: + name = "parameter_331" + shape = [64] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_332: + name = "parameter_332" + shape = [64, 3, 7, 7] + dtype = "float32" + min_val = float("-0.593645") + max_val = float("0.652022") + mean = float("-0.000404981") + std = float("0.108572") + data = None diff --git a/paddle_samples/PaddleX/SOLOv2/subgraph_1/graph_hash.txt b/paddle_samples/PaddleX/SOLOv2/subgraph_1/graph_hash.txt new file mode 100644 index 000000000..ab134e425 --- /dev/null +++ b/paddle_samples/PaddleX/SOLOv2/subgraph_1/graph_hash.txt @@ -0,0 +1 @@ +e5b28d9563e0c8c0b6c97c531d1648c40841384f325ca7d47b87502f67952c8e \ No newline at end of file diff --git a/paddle_samples/PaddleX/SOLOv2/subgraph_1/graph_net.json b/paddle_samples/PaddleX/SOLOv2/subgraph_1/graph_net.json new file mode 100644 index 000000000..cdd8699a9 --- /dev/null +++ b/paddle_samples/PaddleX/SOLOv2/subgraph_1/graph_net.json @@ -0,0 +1,6 @@ +{ + "framework": "paddle", + "model_name": "SOLOv2", + "num_devices_required": 1, + "num_nodes_required": 1 +} \ No newline at end of file diff --git a/paddle_samples/PaddleX/SOLOv2/subgraph_1/input_meta.py b/paddle_samples/PaddleX/SOLOv2/subgraph_1/input_meta.py new file mode 100644 index 000000000..e4575c93b --- /dev/null +++ b/paddle_samples/PaddleX/SOLOv2/subgraph_1/input_meta.py @@ -0,0 +1,48 @@ +class Program_weight_tensor_data_0: + name = "data_0" + shape = [127, 200, 232] + dtype = "float32" + max_val = float("1.0") + mean = float("0.245742") + std = float("0.400336") + data = None + + +class Program_weight_tensor_data_1: + name = "data_1" + shape = [127, 200, 232] + dtype = "float32" + max_val = float("1.0") + mean = float("0.239167") + std = float("0.426575") + data = None + + +class Program_weight_tensor_data_2: + name = "data_2" + shape = [127] + dtype = "int64" + min_val = 0 + max_val = 1 + data = None + + +class Program_weight_tensor_data_3: + name = "data_3" + shape = [127] + dtype = "float32" + max_val = float("0.741376") + mean = float("0.159095") + std = float("0.136759") + data = None + + +class Program_weight_tensor_data_4: + name = "data_4" + shape = [127] + dtype = "float32" + min_val = float("12.0") + max_val = float("46400.0") + mean = float("11097.4") + std = float("10564.6") + data = None diff --git a/paddle_samples/PaddleX/SOLOv2/subgraph_1/model.py b/paddle_samples/PaddleX/SOLOv2/subgraph_1/model.py new file mode 100644 index 000000000..f133c6ffa --- /dev/null +++ b/paddle_samples/PaddleX/SOLOv2/subgraph_1/model.py @@ -0,0 +1,321 @@ +import paddle + + +class GraphModule(paddle.nn.Layer): + def __init__(self): + super().__init__() + + def forward(self, data_0, data_1, data_2, data_3, data_4): + # pd_op.argsort: (127xf32, 127xi64) <- (127xf32) + argsort_0, argsort_1 = (lambda x, f: f(x))( + paddle._C_ops.argsort(data_3, -1, True, False), + lambda out: out if isinstance(out, (list, tuple)) else (out, None), + ) + + # pd_op.full: (1xi32) <- () + full_0 = paddle._C_ops.full( + [1], float("0"), paddle.int32, paddle.core.CPUPlace() + ) + + # pd_op.gather: (127x200x232xf32) <- (127x200x232xf32, 127xi64, 1xi32) + gather_3 = paddle._C_ops.gather(data_1, argsort_1, full_0) + del data_1 + + # pd_op.gather: (127x200x232xf32) <- (127x200x232xf32, 127xi64, 1xi32) + gather_4 = paddle._C_ops.gather(data_0, argsort_1, full_0) + del data_0 + + # pd_op.gather: (127xf32) <- (127xf32, 127xi64, 1xi32) + gather_5 = paddle._C_ops.gather(data_4, argsort_1, full_0) + del data_4 + + # pd_op.gather: (127xf32) <- (127xf32, 127xi64, 1xi32) + gather_6 = paddle._C_ops.gather(data_3, argsort_1, full_0) + del data_3 + + # pd_op.gather: (127xi64) <- (127xi64, 127xi64, 1xi32) + gather_7 = paddle._C_ops.gather(data_2, argsort_1, full_0) + del argsort_1, data_2 + + # pd_op.flatten: (127x46400xf32) <- (127x200x232xf32) + flatten_0 = paddle._C_ops.flatten(gather_3, 1, 2) + del gather_3 + + # pd_op.transpose: (46400x127xf32) <- (127x46400xf32) + transpose_0 = paddle._C_ops.transpose(flatten_0, [1, 0]) + + # pd_op.matmul: (127x127xf32) <- (127x46400xf32, 46400x127xf32) + matmul_0 = paddle._C_ops.matmul(flatten_0, transpose_0, False, False) + del flatten_0, transpose_0 + + # pd_op.full: (1xi64) <- () + full_1 = paddle._C_ops.full( + [1], float("0"), paddle.int64, paddle.framework._current_expected_place() + ) + + # pd_op.assign_value_: (1xi64) <- (1xi64) + assign_value__0 = paddle._C_ops.assign_value_( + full_1, + [1], + paddle.int64, + [float("127")], + paddle.framework._current_expected_place(), + ) + del full_1 + + # pd_op.cast: (1xi32) <- (1xi64) + cast_0 = paddle._C_ops.cast(assign_value__0, paddle.int32) + del assign_value__0 + + # pd_op.cast: (1xi64) <- (1xi32) + cast_1 = paddle._C_ops.cast(cast_0, paddle.int64) + + # pd_op.full_int_array: (0xi64) <- () + full_int_array_0 = [] + + # pd_op.reshape: (xi64) <- (1xi64, 0xi64) + reshape_0 = paddle._C_ops.reshape(cast_1, full_int_array_0) + del cast_1 + + # pd_op.cast: (1xi64) <- (1xi32) + cast_2 = paddle._C_ops.cast(cast_0, paddle.int64) + + # pd_op.reshape: (xi64) <- (1xi64, 0xi64) + reshape_1 = paddle._C_ops.reshape(cast_2, full_int_array_0) + del cast_2 + + # builtin.combine: ([xi64, xi64]) <- (xi64, xi64) + combine_0 = [reshape_0, reshape_1] + del reshape_0, reshape_1 + + # pd_op.stack: (2xi64) <- ([xi64, xi64]) + stack_0 = paddle._C_ops.stack(combine_0, 0) + del combine_0 + + # pd_op.expand: (-1x-1xf32) <- (127xf32, 2xi64) + expand_0 = paddle._C_ops.expand(gather_5, stack_0) + del gather_5, stack_0 + + # pd_op.transpose: (-1x-1xf32) <- (-1x-1xf32) + transpose_1 = paddle._C_ops.transpose(expand_0, [1, 0]) + + # pd_op.add: (-1x-1xf32) <- (-1x-1xf32, -1x-1xf32) + add_0 = paddle._C_ops.add(expand_0, transpose_1) + del expand_0, transpose_1 + + # pd_op.subtract: (127x127xf32) <- (-1x-1xf32, 127x127xf32) + subtract_0 = paddle._C_ops.subtract(add_0, matmul_0) + del add_0 + + # pd_op.divide: (127x127xf32) <- (127x127xf32, 127x127xf32) + divide_0 = paddle._C_ops.divide(matmul_0, subtract_0) + del matmul_0, subtract_0 + + # pd_op.triu: (127x127xf32) <- (127x127xf32) + triu_0 = paddle._C_ops.triu(divide_0, 1) + del divide_0 + + # pd_op.cast: (1xi64) <- (1xi32) + cast_3 = paddle._C_ops.cast(cast_0, paddle.int64) + + # pd_op.reshape: (xi64) <- (1xi64, 0xi64) + reshape_2 = paddle._C_ops.reshape(cast_3, full_int_array_0) + del cast_3 + + # pd_op.cast: (1xi64) <- (1xi32) + cast_4 = paddle._C_ops.cast(cast_0, paddle.int64) + + # pd_op.reshape: (xi64) <- (1xi64, 0xi64) + reshape_3 = paddle._C_ops.reshape(cast_4, full_int_array_0) + del cast_4 + + # builtin.combine: ([xi64, xi64]) <- (xi64, xi64) + combine_1 = [reshape_2, reshape_3] + del reshape_2, reshape_3 + + # pd_op.stack: (2xi64) <- ([xi64, xi64]) + stack_1 = paddle._C_ops.stack(combine_1, 0) + del combine_1 + + # pd_op.expand: (-1x-1xi64) <- (127xi64, 2xi64) + expand_1 = paddle._C_ops.expand(gather_7, stack_1) + del stack_1 + + # pd_op.transpose: (-1x-1xi64) <- (-1x-1xi64) + transpose_2 = paddle._C_ops.transpose(expand_1, [1, 0]) + + # pd_op.equal: (-1x-1xb) <- (-1x-1xi64, -1x-1xi64) + equal_0 = paddle._C_ops.equal(expand_1, transpose_2) + del expand_1, transpose_2 + + # pd_op.cast: (-1x-1xf32) <- (-1x-1xb) + cast_5 = paddle._C_ops.cast(equal_0, paddle.float32) + del equal_0 + + # pd_op.triu: (-1x-1xf32) <- (-1x-1xf32) + triu_1 = paddle._C_ops.triu(cast_5, 1) + del cast_5 + + # pd_op.multiply: (127x127xf32) <- (127x127xf32, -1x-1xf32) + multiply_0 = paddle._C_ops.multiply(triu_0, triu_1) + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_1 = [0] + + # pd_op.max: (127xf32) <- (127x127xf32, 1xi64) + max_0 = paddle._C_ops.max(multiply_0, full_int_array_1, False) + del multiply_0 + + # pd_op.cast: (1xi64) <- (1xi32) + cast_6 = paddle._C_ops.cast(cast_0, paddle.int64) + + # pd_op.reshape: (xi64) <- (1xi64, 0xi64) + reshape_4 = paddle._C_ops.reshape(cast_6, full_int_array_0) + del cast_6 + + # pd_op.cast: (1xi64) <- (1xi32) + cast_7 = paddle._C_ops.cast(cast_0, paddle.int64) + del cast_0 + + # pd_op.reshape: (xi64) <- (1xi64, 0xi64) + reshape_5 = paddle._C_ops.reshape(cast_7, full_int_array_0) + del cast_7, full_int_array_0 + + # builtin.combine: ([xi64, xi64]) <- (xi64, xi64) + combine_2 = [reshape_4, reshape_5] + del reshape_4, reshape_5 + + # pd_op.stack: (2xi64) <- ([xi64, xi64]) + stack_2 = paddle._C_ops.stack(combine_2, 0) + del combine_2 + + # pd_op.expand: (-1x-1xf32) <- (127xf32, 2xi64) + expand_2 = paddle._C_ops.expand(max_0, stack_2) + del max_0, stack_2 + + # pd_op.transpose: (-1x-1xf32) <- (-1x-1xf32) + transpose_3 = paddle._C_ops.transpose(expand_2, [1, 0]) + del expand_2 + + # pd_op.multiply: (127x127xf32) <- (127x127xf32, -1x-1xf32) + multiply_1 = paddle._C_ops.multiply(triu_0, triu_1) + del triu_0, triu_1 + + # pd_op.full: (xf32) <- () + full_2 = paddle._C_ops.full( + [], float("2"), paddle.float32, paddle.framework._current_expected_place() + ) + + # pd_op.elementwise_pow: (127x127xf32) <- (127x127xf32, xf32) + elementwise_pow_0 = paddle._C_ops.elementwise_pow(multiply_1, full_2) + del multiply_1 + + # pd_op.full: (1xf32) <- () + full_3 = paddle._C_ops.full( + [1], float("-2"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.scale: (127x127xf32) <- (127x127xf32, 1xf32) + scale_0 = paddle._C_ops.scale(elementwise_pow_0, full_3, float("0"), True) + del elementwise_pow_0 + + # pd_op.exp: (127x127xf32) <- (127x127xf32) + exp_0 = paddle._C_ops.exp(scale_0) + del scale_0 + + # pd_op.elementwise_pow: (-1x-1xf32) <- (-1x-1xf32, xf32) + elementwise_pow_1 = paddle._C_ops.elementwise_pow(transpose_3, full_2) + del full_2, transpose_3 + + # pd_op.scale: (-1x-1xf32) <- (-1x-1xf32, 1xf32) + scale_1 = paddle._C_ops.scale(elementwise_pow_1, full_3, float("0"), True) + del elementwise_pow_1, full_3 + + # pd_op.exp: (-1x-1xf32) <- (-1x-1xf32) + exp_1 = paddle._C_ops.exp(scale_1) + del scale_1 + + # pd_op.divide: (127x127xf32) <- (127x127xf32, -1x-1xf32) + divide_1 = paddle._C_ops.divide(exp_0, exp_1) + del exp_0, exp_1 + + # pd_op.min: (127xf32) <- (127x127xf32, 1xi64) + min_0 = paddle._C_ops.min(divide_1, full_int_array_1, False) + del divide_1, full_int_array_1 + + # pd_op.multiply: (127xf32) <- (127xf32, 127xf32) + multiply_2 = paddle._C_ops.multiply(gather_6, min_0) + del gather_6, min_0 + + # pd_op.full: (127xf32) <- () + full_4 = paddle._C_ops.full( + [127], + float("0"), + paddle.float32, + paddle.framework._current_expected_place(), + ) + + # pd_op.full: (xf32) <- () + full_5 = paddle._C_ops.full( + [], + float("0.05"), + paddle.float32, + paddle.framework._current_expected_place(), + ) + + # pd_op.greater_equal: (127xb) <- (127xf32, xf32) + greater_equal_0 = paddle._C_ops.greater_equal(multiply_2, full_5) + del full_5 + + # pd_op.where: (127xf32) <- (127xb, 127xf32, 127xf32) + where_0 = paddle._C_ops.where(greater_equal_0, multiply_2, full_4) + del full_4, greater_equal_0 + + # pd_op.nonzero: (-1x1xi64) <- (127xf32) + nonzero_0 = paddle._C_ops.nonzero(where_0) + del where_0 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_2 = [1] + + # pd_op.squeeze: (-1xi64) <- (-1x1xi64, 1xi64) + squeeze_0 = paddle._C_ops.squeeze(nonzero_0, full_int_array_2) + del full_int_array_2, nonzero_0 + + # pd_op.shape64: (1xi64) <- (127xf32) + shape64_0 = paddle._C_ops.shape64(multiply_2) + + # pd_op.full: (1xf32) <- () + full_6 = paddle._C_ops.full( + [1], float("1"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.scale: (1xi64) <- (1xi64, 1xf32) + scale_2 = paddle._C_ops.scale(shape64_0, full_6, float("-1"), True) + del full_6, shape64_0 + + # pd_op.cast: (1xi64) <- (1xi64) + cast_8 = paddle._C_ops.cast(scale_2, paddle.int64) + del scale_2 + + # builtin.combine: ([-1xi64, 1xi64]) <- (-1xi64, 1xi64) + combine_3 = [squeeze_0, cast_8] + del cast_8, squeeze_0 + + # pd_op.concat: (-1xi64) <- ([-1xi64, 1xi64], 1xi32) + concat_0 = paddle._C_ops.concat(combine_3, full_0) + del combine_3 + + # pd_op.gather: (-1x200x232xf32) <- (127x200x232xf32, -1xi64, 1xi32) + gather_1 = paddle._C_ops.gather(gather_4, concat_0, full_0) + del gather_4 + + # pd_op.gather: (-1xf32) <- (127xf32, -1xi64, 1xi32) + gather_0 = paddle._C_ops.gather(multiply_2, concat_0, full_0) + del multiply_2 + + # pd_op.gather: (-1xi64) <- (127xi64, -1xi64, 1xi32) + gather_2 = paddle._C_ops.gather(gather_7, concat_0, full_0) + del concat_0, full_0, gather_7 + + return gather_0, gather_1, gather_2 diff --git a/paddle_samples/PaddleX/SOLOv2/subgraph_1/weight_meta.py b/paddle_samples/PaddleX/SOLOv2/subgraph_1/weight_meta.py new file mode 100644 index 000000000..8b1378917 --- /dev/null +++ b/paddle_samples/PaddleX/SOLOv2/subgraph_1/weight_meta.py @@ -0,0 +1 @@ + diff --git a/paddle_samples/PaddleX/SOLOv2/subgraph_2/graph_hash.txt b/paddle_samples/PaddleX/SOLOv2/subgraph_2/graph_hash.txt new file mode 100644 index 000000000..d65f184ba --- /dev/null +++ b/paddle_samples/PaddleX/SOLOv2/subgraph_2/graph_hash.txt @@ -0,0 +1 @@ +2e6d0097251e1dd497d96c82ea9bff6c91bfae2fb5d733a187b0334611f12364 \ No newline at end of file diff --git a/paddle_samples/PaddleX/SOLOv2/subgraph_2/graph_net.json b/paddle_samples/PaddleX/SOLOv2/subgraph_2/graph_net.json new file mode 100644 index 000000000..cdd8699a9 --- /dev/null +++ b/paddle_samples/PaddleX/SOLOv2/subgraph_2/graph_net.json @@ -0,0 +1,6 @@ +{ + "framework": "paddle", + "model_name": "SOLOv2", + "num_devices_required": 1, + "num_nodes_required": 1 +} \ No newline at end of file diff --git a/paddle_samples/PaddleX/SOLOv2/subgraph_2/input_meta.py b/paddle_samples/PaddleX/SOLOv2/subgraph_2/input_meta.py new file mode 100644 index 000000000..8950b9023 --- /dev/null +++ b/paddle_samples/PaddleX/SOLOv2/subgraph_2/input_meta.py @@ -0,0 +1,9 @@ +class Program_weight_tensor_data_0: + name = "data_0" + shape = [1, 3, 800, 928] + dtype = "float32" + min_val = float("-2.1179") + max_val = float("2.64") + mean = float("0.123881") + std = float("1.133") + data = None diff --git a/paddle_samples/PaddleX/SOLOv2/subgraph_2/model.py b/paddle_samples/PaddleX/SOLOv2/subgraph_2/model.py new file mode 100644 index 000000000..6bc05f2be --- /dev/null +++ b/paddle_samples/PaddleX/SOLOv2/subgraph_2/model.py @@ -0,0 +1,4656 @@ +import paddle + + +class GraphModule(paddle.nn.Layer): + def __init__(self): + super().__init__() + + def forward( + self, + parameter_0, + parameter_1, + parameter_2, + parameter_3, + parameter_4, + parameter_5, + parameter_6, + parameter_7, + parameter_8, + parameter_9, + parameter_10, + parameter_11, + parameter_12, + parameter_13, + parameter_14, + parameter_15, + parameter_16, + parameter_17, + parameter_18, + parameter_19, + parameter_20, + parameter_21, + parameter_22, + parameter_23, + parameter_24, + parameter_25, + parameter_26, + parameter_27, + parameter_28, + parameter_29, + parameter_30, + parameter_31, + parameter_32, + parameter_33, + parameter_34, + parameter_35, + parameter_36, + parameter_37, + parameter_38, + parameter_39, + parameter_40, + parameter_41, + parameter_42, + parameter_43, + parameter_44, + parameter_45, + parameter_46, + parameter_47, + parameter_48, + parameter_49, + parameter_50, + parameter_51, + parameter_52, + parameter_53, + parameter_54, + parameter_55, + parameter_56, + parameter_57, + parameter_58, + parameter_59, + parameter_60, + parameter_61, + parameter_62, + parameter_63, + parameter_64, + parameter_65, + parameter_66, + parameter_67, + parameter_68, + parameter_69, + parameter_70, + parameter_71, + parameter_72, + parameter_73, + parameter_74, + parameter_75, + parameter_76, + parameter_77, + parameter_78, + parameter_79, + parameter_80, + parameter_81, + parameter_82, + parameter_83, + parameter_84, + parameter_85, + parameter_86, + parameter_87, + parameter_88, + parameter_89, + parameter_90, + parameter_91, + parameter_92, + parameter_93, + parameter_94, + parameter_95, + parameter_96, + parameter_97, + parameter_98, + parameter_99, + parameter_100, + parameter_101, + parameter_102, + parameter_103, + parameter_104, + parameter_105, + parameter_106, + parameter_107, + parameter_108, + parameter_109, + parameter_110, + parameter_111, + parameter_112, + parameter_113, + parameter_114, + parameter_115, + parameter_116, + parameter_117, + parameter_118, + parameter_119, + parameter_120, + parameter_121, + parameter_122, + parameter_123, + parameter_124, + parameter_125, + parameter_126, + parameter_127, + parameter_128, + parameter_129, + parameter_130, + parameter_131, + parameter_132, + parameter_133, + parameter_134, + parameter_135, + parameter_136, + parameter_137, + parameter_138, + parameter_139, + parameter_140, + parameter_141, + parameter_142, + parameter_143, + parameter_144, + parameter_145, + parameter_146, + parameter_147, + parameter_148, + parameter_149, + parameter_150, + parameter_151, + parameter_152, + parameter_153, + parameter_154, + parameter_155, + parameter_156, + parameter_157, + parameter_158, + parameter_159, + parameter_160, + parameter_161, + parameter_162, + parameter_163, + parameter_164, + parameter_165, + parameter_166, + parameter_167, + parameter_168, + parameter_169, + parameter_170, + parameter_171, + parameter_172, + parameter_173, + parameter_174, + parameter_175, + parameter_176, + parameter_177, + parameter_178, + parameter_179, + parameter_180, + parameter_181, + parameter_182, + parameter_183, + parameter_184, + parameter_185, + parameter_186, + parameter_187, + parameter_188, + parameter_189, + parameter_190, + parameter_191, + parameter_192, + parameter_193, + parameter_194, + parameter_195, + parameter_196, + parameter_197, + parameter_198, + parameter_199, + parameter_200, + parameter_201, + parameter_202, + parameter_203, + parameter_204, + parameter_205, + parameter_206, + parameter_207, + parameter_208, + parameter_209, + parameter_210, + parameter_211, + parameter_212, + parameter_213, + parameter_214, + parameter_215, + parameter_216, + parameter_217, + parameter_218, + parameter_219, + parameter_220, + parameter_221, + parameter_222, + parameter_223, + parameter_224, + parameter_225, + parameter_226, + parameter_227, + parameter_228, + parameter_229, + parameter_230, + parameter_231, + parameter_232, + parameter_233, + parameter_234, + parameter_235, + parameter_236, + parameter_237, + parameter_238, + parameter_239, + parameter_240, + parameter_241, + parameter_242, + parameter_243, + parameter_244, + parameter_245, + parameter_246, + parameter_247, + parameter_248, + parameter_249, + parameter_250, + parameter_251, + parameter_252, + parameter_253, + parameter_254, + parameter_255, + parameter_256, + parameter_257, + parameter_258, + parameter_259, + parameter_260, + parameter_261, + parameter_262, + parameter_263, + parameter_264, + parameter_265, + parameter_266, + parameter_267, + parameter_268, + parameter_269, + parameter_270, + parameter_271, + parameter_272, + parameter_273, + parameter_274, + parameter_275, + parameter_276, + parameter_277, + parameter_278, + parameter_279, + parameter_280, + parameter_281, + parameter_282, + parameter_283, + parameter_284, + parameter_285, + parameter_286, + parameter_287, + parameter_288, + parameter_289, + parameter_290, + parameter_291, + parameter_292, + parameter_293, + parameter_294, + parameter_295, + parameter_296, + parameter_297, + parameter_298, + parameter_299, + parameter_300, + parameter_301, + parameter_302, + parameter_303, + parameter_304, + parameter_305, + parameter_306, + parameter_307, + parameter_308, + parameter_309, + parameter_310, + parameter_311, + parameter_312, + parameter_313, + parameter_314, + parameter_315, + parameter_316, + parameter_317, + parameter_318, + parameter_319, + parameter_320, + parameter_321, + parameter_322, + parameter_323, + parameter_324, + parameter_325, + parameter_326, + parameter_327, + parameter_328, + parameter_329, + parameter_330, + parameter_331, + parameter_332, + data_0, + ): + # pd_op.conv2d: (1x64x-1x-1xf32) <- (1x3x-1x-1xf32, 64x3x7x7xf32) + conv2d_0 = paddle._C_ops.conv2d( + data_0, parameter_332, [2, 2], [3, 3], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del data_0, parameter_332 + + # pd_op.batch_norm_: (1x64x-1x-1xf32, 64xf32, 64xf32, 64xf32, 64xf32, -1xui8) <- (1x64x-1x-1xf32, 64xf32, 64xf32, 64xf32, 64xf32) + ( + batch_norm__0, + batch_norm__1, + batch_norm__2, + batch_norm__3, + batch_norm__4, + batch_norm__5, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_0, + parameter_331, + parameter_330, + parameter_329, + parameter_328, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_0, parameter_328, parameter_329, parameter_330, parameter_331 + + # pd_op.relu: (1x64x-1x-1xf32) <- (1x64x-1x-1xf32) + relu_1 = paddle._C_ops.relu(batch_norm__0) + del batch_norm__0 + + # pd_op.full_int_array: (2xi64) <- () + full_int_array_0 = [3, 3] + + # pd_op.pool2d: (1x64x-1x-1xf32) <- (1x64x-1x-1xf32, 2xi64) + pool2d_0 = paddle._C_ops.pool2d( + relu_1, + full_int_array_0, + [2, 2], + [1, 1], + False, + True, + "NCHW", + "max", + False, + False, + "EXPLICIT", + ) + del full_int_array_0, relu_1 + + # pd_op.conv2d: (1x64x-1x-1xf32) <- (1x64x-1x-1xf32, 64x64x1x1xf32) + conv2d_1 = paddle._C_ops.conv2d( + pool2d_0, parameter_327, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_327 + + # pd_op.batch_norm_: (1x64x-1x-1xf32, 64xf32, 64xf32, 64xf32, 64xf32, -1xui8) <- (1x64x-1x-1xf32, 64xf32, 64xf32, 64xf32, 64xf32) + ( + batch_norm__6, + batch_norm__7, + batch_norm__8, + batch_norm__9, + batch_norm__10, + batch_norm__11, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_1, + parameter_326, + parameter_325, + parameter_324, + parameter_323, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_1, parameter_323, parameter_324, parameter_325, parameter_326 + + # pd_op.relu: (1x64x-1x-1xf32) <- (1x64x-1x-1xf32) + relu_2 = paddle._C_ops.relu(batch_norm__6) + del batch_norm__6 + + # pd_op.conv2d: (1x64x-1x-1xf32) <- (1x64x-1x-1xf32, 64x64x3x3xf32) + conv2d_2 = paddle._C_ops.conv2d( + relu_2, parameter_322, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_322, relu_2 + + # pd_op.batch_norm_: (1x64x-1x-1xf32, 64xf32, 64xf32, 64xf32, 64xf32, -1xui8) <- (1x64x-1x-1xf32, 64xf32, 64xf32, 64xf32, 64xf32) + ( + batch_norm__12, + batch_norm__13, + batch_norm__14, + batch_norm__15, + batch_norm__16, + batch_norm__17, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_2, + parameter_321, + parameter_320, + parameter_319, + parameter_318, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_2, parameter_318, parameter_319, parameter_320, parameter_321 + + # pd_op.relu: (1x64x-1x-1xf32) <- (1x64x-1x-1xf32) + relu_3 = paddle._C_ops.relu(batch_norm__12) + del batch_norm__12 + + # pd_op.conv2d: (1x256x-1x-1xf32) <- (1x64x-1x-1xf32, 256x64x1x1xf32) + conv2d_3 = paddle._C_ops.conv2d( + relu_3, parameter_317, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_317, relu_3 + + # pd_op.batch_norm_: (1x256x-1x-1xf32, 256xf32, 256xf32, 256xf32, 256xf32, -1xui8) <- (1x256x-1x-1xf32, 256xf32, 256xf32, 256xf32, 256xf32) + ( + batch_norm__18, + batch_norm__19, + batch_norm__20, + batch_norm__21, + batch_norm__22, + batch_norm__23, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_3, + parameter_316, + parameter_315, + parameter_314, + parameter_313, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_3, parameter_313, parameter_314, parameter_315, parameter_316 + + # pd_op.conv2d: (1x256x-1x-1xf32) <- (1x64x-1x-1xf32, 256x64x1x1xf32) + conv2d_4 = paddle._C_ops.conv2d( + pool2d_0, parameter_312, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_312, pool2d_0 + + # pd_op.batch_norm_: (1x256x-1x-1xf32, 256xf32, 256xf32, 256xf32, 256xf32, -1xui8) <- (1x256x-1x-1xf32, 256xf32, 256xf32, 256xf32, 256xf32) + ( + batch_norm__24, + batch_norm__25, + batch_norm__26, + batch_norm__27, + batch_norm__28, + batch_norm__29, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_4, + parameter_311, + parameter_310, + parameter_309, + parameter_308, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_4, parameter_308, parameter_309, parameter_310, parameter_311 + + # pd_op.add: (1x256x-1x-1xf32) <- (1x256x-1x-1xf32, 1x256x-1x-1xf32) + add_5 = paddle._C_ops.add(batch_norm__18, batch_norm__24) + del batch_norm__18, batch_norm__24 + + # pd_op.relu: (1x256x-1x-1xf32) <- (1x256x-1x-1xf32) + relu_4 = paddle._C_ops.relu(add_5) + del add_5 + + # pd_op.conv2d: (1x64x-1x-1xf32) <- (1x256x-1x-1xf32, 64x256x1x1xf32) + conv2d_5 = paddle._C_ops.conv2d( + relu_4, parameter_307, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_307 + + # pd_op.batch_norm_: (1x64x-1x-1xf32, 64xf32, 64xf32, 64xf32, 64xf32, -1xui8) <- (1x64x-1x-1xf32, 64xf32, 64xf32, 64xf32, 64xf32) + ( + batch_norm__30, + batch_norm__31, + batch_norm__32, + batch_norm__33, + batch_norm__34, + batch_norm__35, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_5, + parameter_306, + parameter_305, + parameter_304, + parameter_303, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_5, parameter_303, parameter_304, parameter_305, parameter_306 + + # pd_op.relu: (1x64x-1x-1xf32) <- (1x64x-1x-1xf32) + relu_5 = paddle._C_ops.relu(batch_norm__30) + del batch_norm__30 + + # pd_op.conv2d: (1x64x-1x-1xf32) <- (1x64x-1x-1xf32, 64x64x3x3xf32) + conv2d_6 = paddle._C_ops.conv2d( + relu_5, parameter_302, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_302, relu_5 + + # pd_op.batch_norm_: (1x64x-1x-1xf32, 64xf32, 64xf32, 64xf32, 64xf32, -1xui8) <- (1x64x-1x-1xf32, 64xf32, 64xf32, 64xf32, 64xf32) + ( + batch_norm__36, + batch_norm__37, + batch_norm__38, + batch_norm__39, + batch_norm__40, + batch_norm__41, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_6, + parameter_301, + parameter_300, + parameter_299, + parameter_298, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_6, parameter_298, parameter_299, parameter_300, parameter_301 + + # pd_op.relu: (1x64x-1x-1xf32) <- (1x64x-1x-1xf32) + relu_6 = paddle._C_ops.relu(batch_norm__36) + del batch_norm__36 + + # pd_op.conv2d: (1x256x-1x-1xf32) <- (1x64x-1x-1xf32, 256x64x1x1xf32) + conv2d_7 = paddle._C_ops.conv2d( + relu_6, parameter_297, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_297, relu_6 + + # pd_op.batch_norm_: (1x256x-1x-1xf32, 256xf32, 256xf32, 256xf32, 256xf32, -1xui8) <- (1x256x-1x-1xf32, 256xf32, 256xf32, 256xf32, 256xf32) + ( + batch_norm__42, + batch_norm__43, + batch_norm__44, + batch_norm__45, + batch_norm__46, + batch_norm__47, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_7, + parameter_296, + parameter_295, + parameter_294, + parameter_293, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_7, parameter_293, parameter_294, parameter_295, parameter_296 + + # pd_op.add: (1x256x-1x-1xf32) <- (1x256x-1x-1xf32, 1x256x-1x-1xf32) + add_6 = paddle._C_ops.add(batch_norm__42, relu_4) + del batch_norm__42, relu_4 + + # pd_op.relu: (1x256x-1x-1xf32) <- (1x256x-1x-1xf32) + relu_7 = paddle._C_ops.relu(add_6) + del add_6 + + # pd_op.conv2d: (1x64x-1x-1xf32) <- (1x256x-1x-1xf32, 64x256x1x1xf32) + conv2d_8 = paddle._C_ops.conv2d( + relu_7, parameter_292, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_292 + + # pd_op.batch_norm_: (1x64x-1x-1xf32, 64xf32, 64xf32, 64xf32, 64xf32, -1xui8) <- (1x64x-1x-1xf32, 64xf32, 64xf32, 64xf32, 64xf32) + ( + batch_norm__48, + batch_norm__49, + batch_norm__50, + batch_norm__51, + batch_norm__52, + batch_norm__53, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_8, + parameter_291, + parameter_290, + parameter_289, + parameter_288, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_8, parameter_288, parameter_289, parameter_290, parameter_291 + + # pd_op.relu: (1x64x-1x-1xf32) <- (1x64x-1x-1xf32) + relu_8 = paddle._C_ops.relu(batch_norm__48) + del batch_norm__48 + + # pd_op.conv2d: (1x64x-1x-1xf32) <- (1x64x-1x-1xf32, 64x64x3x3xf32) + conv2d_9 = paddle._C_ops.conv2d( + relu_8, parameter_287, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_287, relu_8 + + # pd_op.batch_norm_: (1x64x-1x-1xf32, 64xf32, 64xf32, 64xf32, 64xf32, -1xui8) <- (1x64x-1x-1xf32, 64xf32, 64xf32, 64xf32, 64xf32) + ( + batch_norm__54, + batch_norm__55, + batch_norm__56, + batch_norm__57, + batch_norm__58, + batch_norm__59, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_9, + parameter_286, + parameter_285, + parameter_284, + parameter_283, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_9, parameter_283, parameter_284, parameter_285, parameter_286 + + # pd_op.relu: (1x64x-1x-1xf32) <- (1x64x-1x-1xf32) + relu_9 = paddle._C_ops.relu(batch_norm__54) + del batch_norm__54 + + # pd_op.conv2d: (1x256x-1x-1xf32) <- (1x64x-1x-1xf32, 256x64x1x1xf32) + conv2d_10 = paddle._C_ops.conv2d( + relu_9, parameter_282, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_282, relu_9 + + # pd_op.batch_norm_: (1x256x-1x-1xf32, 256xf32, 256xf32, 256xf32, 256xf32, -1xui8) <- (1x256x-1x-1xf32, 256xf32, 256xf32, 256xf32, 256xf32) + ( + batch_norm__60, + batch_norm__61, + batch_norm__62, + batch_norm__63, + batch_norm__64, + batch_norm__65, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_10, + parameter_281, + parameter_280, + parameter_279, + parameter_278, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_10, parameter_278, parameter_279, parameter_280, parameter_281 + + # pd_op.add: (1x256x-1x-1xf32) <- (1x256x-1x-1xf32, 1x256x-1x-1xf32) + add_7 = paddle._C_ops.add(batch_norm__60, relu_7) + del batch_norm__60, relu_7 + + # pd_op.relu: (1x256x-1x-1xf32) <- (1x256x-1x-1xf32) + relu_10 = paddle._C_ops.relu(add_7) + del add_7 + + # pd_op.conv2d: (1x128x-1x-1xf32) <- (1x256x-1x-1xf32, 128x256x1x1xf32) + conv2d_11 = paddle._C_ops.conv2d( + relu_10, parameter_277, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_277 + + # pd_op.batch_norm_: (1x128x-1x-1xf32, 128xf32, 128xf32, 128xf32, 128xf32, -1xui8) <- (1x128x-1x-1xf32, 128xf32, 128xf32, 128xf32, 128xf32) + ( + batch_norm__66, + batch_norm__67, + batch_norm__68, + batch_norm__69, + batch_norm__70, + batch_norm__71, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_11, + parameter_276, + parameter_275, + parameter_274, + parameter_273, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_11, parameter_273, parameter_274, parameter_275, parameter_276 + + # pd_op.relu: (1x128x-1x-1xf32) <- (1x128x-1x-1xf32) + relu_11 = paddle._C_ops.relu(batch_norm__66) + del batch_norm__66 + + # pd_op.conv2d: (1x128x-1x-1xf32) <- (1x128x-1x-1xf32, 128x128x3x3xf32) + conv2d_12 = paddle._C_ops.conv2d( + relu_11, parameter_272, [2, 2], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_272, relu_11 + + # pd_op.batch_norm_: (1x128x-1x-1xf32, 128xf32, 128xf32, 128xf32, 128xf32, -1xui8) <- (1x128x-1x-1xf32, 128xf32, 128xf32, 128xf32, 128xf32) + ( + batch_norm__72, + batch_norm__73, + batch_norm__74, + batch_norm__75, + batch_norm__76, + batch_norm__77, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_12, + parameter_271, + parameter_270, + parameter_269, + parameter_268, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_12, parameter_268, parameter_269, parameter_270, parameter_271 + + # pd_op.relu: (1x128x-1x-1xf32) <- (1x128x-1x-1xf32) + relu_12 = paddle._C_ops.relu(batch_norm__72) + del batch_norm__72 + + # pd_op.conv2d: (1x512x-1x-1xf32) <- (1x128x-1x-1xf32, 512x128x1x1xf32) + conv2d_13 = paddle._C_ops.conv2d( + relu_12, parameter_267, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_267, relu_12 + + # pd_op.batch_norm_: (1x512x-1x-1xf32, 512xf32, 512xf32, 512xf32, 512xf32, -1xui8) <- (1x512x-1x-1xf32, 512xf32, 512xf32, 512xf32, 512xf32) + ( + batch_norm__78, + batch_norm__79, + batch_norm__80, + batch_norm__81, + batch_norm__82, + batch_norm__83, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_13, + parameter_266, + parameter_265, + parameter_264, + parameter_263, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_13, parameter_263, parameter_264, parameter_265, parameter_266 + + # pd_op.conv2d: (1x512x-1x-1xf32) <- (1x256x-1x-1xf32, 512x256x1x1xf32) + conv2d_14 = paddle._C_ops.conv2d( + relu_10, parameter_262, [2, 2], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_262 + + # pd_op.batch_norm_: (1x512x-1x-1xf32, 512xf32, 512xf32, 512xf32, 512xf32, -1xui8) <- (1x512x-1x-1xf32, 512xf32, 512xf32, 512xf32, 512xf32) + ( + batch_norm__84, + batch_norm__85, + batch_norm__86, + batch_norm__87, + batch_norm__88, + batch_norm__89, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_14, + parameter_261, + parameter_260, + parameter_259, + parameter_258, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_14, parameter_258, parameter_259, parameter_260, parameter_261 + + # pd_op.add: (1x512x-1x-1xf32) <- (1x512x-1x-1xf32, 1x512x-1x-1xf32) + add_8 = paddle._C_ops.add(batch_norm__78, batch_norm__84) + del batch_norm__78, batch_norm__84 + + # pd_op.relu: (1x512x-1x-1xf32) <- (1x512x-1x-1xf32) + relu_13 = paddle._C_ops.relu(add_8) + del add_8 + + # pd_op.conv2d: (1x128x-1x-1xf32) <- (1x512x-1x-1xf32, 128x512x1x1xf32) + conv2d_15 = paddle._C_ops.conv2d( + relu_13, parameter_257, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_257 + + # pd_op.batch_norm_: (1x128x-1x-1xf32, 128xf32, 128xf32, 128xf32, 128xf32, -1xui8) <- (1x128x-1x-1xf32, 128xf32, 128xf32, 128xf32, 128xf32) + ( + batch_norm__90, + batch_norm__91, + batch_norm__92, + batch_norm__93, + batch_norm__94, + batch_norm__95, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_15, + parameter_256, + parameter_255, + parameter_254, + parameter_253, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_15, parameter_253, parameter_254, parameter_255, parameter_256 + + # pd_op.relu: (1x128x-1x-1xf32) <- (1x128x-1x-1xf32) + relu_14 = paddle._C_ops.relu(batch_norm__90) + del batch_norm__90 + + # pd_op.conv2d: (1x128x-1x-1xf32) <- (1x128x-1x-1xf32, 128x128x3x3xf32) + conv2d_16 = paddle._C_ops.conv2d( + relu_14, parameter_252, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_252, relu_14 + + # pd_op.batch_norm_: (1x128x-1x-1xf32, 128xf32, 128xf32, 128xf32, 128xf32, -1xui8) <- (1x128x-1x-1xf32, 128xf32, 128xf32, 128xf32, 128xf32) + ( + batch_norm__96, + batch_norm__97, + batch_norm__98, + batch_norm__99, + batch_norm__100, + batch_norm__101, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_16, + parameter_251, + parameter_250, + parameter_249, + parameter_248, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_16, parameter_248, parameter_249, parameter_250, parameter_251 + + # pd_op.relu: (1x128x-1x-1xf32) <- (1x128x-1x-1xf32) + relu_15 = paddle._C_ops.relu(batch_norm__96) + del batch_norm__96 + + # pd_op.conv2d: (1x512x-1x-1xf32) <- (1x128x-1x-1xf32, 512x128x1x1xf32) + conv2d_17 = paddle._C_ops.conv2d( + relu_15, parameter_247, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_247, relu_15 + + # pd_op.batch_norm_: (1x512x-1x-1xf32, 512xf32, 512xf32, 512xf32, 512xf32, -1xui8) <- (1x512x-1x-1xf32, 512xf32, 512xf32, 512xf32, 512xf32) + ( + batch_norm__102, + batch_norm__103, + batch_norm__104, + batch_norm__105, + batch_norm__106, + batch_norm__107, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_17, + parameter_246, + parameter_245, + parameter_244, + parameter_243, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_17, parameter_243, parameter_244, parameter_245, parameter_246 + + # pd_op.add: (1x512x-1x-1xf32) <- (1x512x-1x-1xf32, 1x512x-1x-1xf32) + add_9 = paddle._C_ops.add(batch_norm__102, relu_13) + del batch_norm__102, relu_13 + + # pd_op.relu: (1x512x-1x-1xf32) <- (1x512x-1x-1xf32) + relu_16 = paddle._C_ops.relu(add_9) + del add_9 + + # pd_op.conv2d: (1x128x-1x-1xf32) <- (1x512x-1x-1xf32, 128x512x1x1xf32) + conv2d_18 = paddle._C_ops.conv2d( + relu_16, parameter_242, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_242 + + # pd_op.batch_norm_: (1x128x-1x-1xf32, 128xf32, 128xf32, 128xf32, 128xf32, -1xui8) <- (1x128x-1x-1xf32, 128xf32, 128xf32, 128xf32, 128xf32) + ( + batch_norm__108, + batch_norm__109, + batch_norm__110, + batch_norm__111, + batch_norm__112, + batch_norm__113, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_18, + parameter_241, + parameter_240, + parameter_239, + parameter_238, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_18, parameter_238, parameter_239, parameter_240, parameter_241 + + # pd_op.relu: (1x128x-1x-1xf32) <- (1x128x-1x-1xf32) + relu_17 = paddle._C_ops.relu(batch_norm__108) + del batch_norm__108 + + # pd_op.conv2d: (1x128x-1x-1xf32) <- (1x128x-1x-1xf32, 128x128x3x3xf32) + conv2d_19 = paddle._C_ops.conv2d( + relu_17, parameter_237, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_237, relu_17 + + # pd_op.batch_norm_: (1x128x-1x-1xf32, 128xf32, 128xf32, 128xf32, 128xf32, -1xui8) <- (1x128x-1x-1xf32, 128xf32, 128xf32, 128xf32, 128xf32) + ( + batch_norm__114, + batch_norm__115, + batch_norm__116, + batch_norm__117, + batch_norm__118, + batch_norm__119, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_19, + parameter_236, + parameter_235, + parameter_234, + parameter_233, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_19, parameter_233, parameter_234, parameter_235, parameter_236 + + # pd_op.relu: (1x128x-1x-1xf32) <- (1x128x-1x-1xf32) + relu_18 = paddle._C_ops.relu(batch_norm__114) + del batch_norm__114 + + # pd_op.conv2d: (1x512x-1x-1xf32) <- (1x128x-1x-1xf32, 512x128x1x1xf32) + conv2d_20 = paddle._C_ops.conv2d( + relu_18, parameter_232, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_232, relu_18 + + # pd_op.batch_norm_: (1x512x-1x-1xf32, 512xf32, 512xf32, 512xf32, 512xf32, -1xui8) <- (1x512x-1x-1xf32, 512xf32, 512xf32, 512xf32, 512xf32) + ( + batch_norm__120, + batch_norm__121, + batch_norm__122, + batch_norm__123, + batch_norm__124, + batch_norm__125, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_20, + parameter_231, + parameter_230, + parameter_229, + parameter_228, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_20, parameter_228, parameter_229, parameter_230, parameter_231 + + # pd_op.add: (1x512x-1x-1xf32) <- (1x512x-1x-1xf32, 1x512x-1x-1xf32) + add_10 = paddle._C_ops.add(batch_norm__120, relu_16) + del batch_norm__120, relu_16 + + # pd_op.relu: (1x512x-1x-1xf32) <- (1x512x-1x-1xf32) + relu_19 = paddle._C_ops.relu(add_10) + del add_10 + + # pd_op.conv2d: (1x128x-1x-1xf32) <- (1x512x-1x-1xf32, 128x512x1x1xf32) + conv2d_21 = paddle._C_ops.conv2d( + relu_19, parameter_227, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_227 + + # pd_op.batch_norm_: (1x128x-1x-1xf32, 128xf32, 128xf32, 128xf32, 128xf32, -1xui8) <- (1x128x-1x-1xf32, 128xf32, 128xf32, 128xf32, 128xf32) + ( + batch_norm__126, + batch_norm__127, + batch_norm__128, + batch_norm__129, + batch_norm__130, + batch_norm__131, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_21, + parameter_226, + parameter_225, + parameter_224, + parameter_223, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_21, parameter_223, parameter_224, parameter_225, parameter_226 + + # pd_op.relu: (1x128x-1x-1xf32) <- (1x128x-1x-1xf32) + relu_20 = paddle._C_ops.relu(batch_norm__126) + del batch_norm__126 + + # pd_op.conv2d: (1x128x-1x-1xf32) <- (1x128x-1x-1xf32, 128x128x3x3xf32) + conv2d_22 = paddle._C_ops.conv2d( + relu_20, parameter_222, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_222, relu_20 + + # pd_op.batch_norm_: (1x128x-1x-1xf32, 128xf32, 128xf32, 128xf32, 128xf32, -1xui8) <- (1x128x-1x-1xf32, 128xf32, 128xf32, 128xf32, 128xf32) + ( + batch_norm__132, + batch_norm__133, + batch_norm__134, + batch_norm__135, + batch_norm__136, + batch_norm__137, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_22, + parameter_221, + parameter_220, + parameter_219, + parameter_218, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_22, parameter_218, parameter_219, parameter_220, parameter_221 + + # pd_op.relu: (1x128x-1x-1xf32) <- (1x128x-1x-1xf32) + relu_21 = paddle._C_ops.relu(batch_norm__132) + del batch_norm__132 + + # pd_op.conv2d: (1x512x-1x-1xf32) <- (1x128x-1x-1xf32, 512x128x1x1xf32) + conv2d_23 = paddle._C_ops.conv2d( + relu_21, parameter_217, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_217, relu_21 + + # pd_op.batch_norm_: (1x512x-1x-1xf32, 512xf32, 512xf32, 512xf32, 512xf32, -1xui8) <- (1x512x-1x-1xf32, 512xf32, 512xf32, 512xf32, 512xf32) + ( + batch_norm__138, + batch_norm__139, + batch_norm__140, + batch_norm__141, + batch_norm__142, + batch_norm__143, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_23, + parameter_216, + parameter_215, + parameter_214, + parameter_213, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_23, parameter_213, parameter_214, parameter_215, parameter_216 + + # pd_op.add: (1x512x-1x-1xf32) <- (1x512x-1x-1xf32, 1x512x-1x-1xf32) + add_11 = paddle._C_ops.add(batch_norm__138, relu_19) + del batch_norm__138, relu_19 + + # pd_op.relu: (1x512x-1x-1xf32) <- (1x512x-1x-1xf32) + relu_22 = paddle._C_ops.relu(add_11) + del add_11 + + # pd_op.conv2d: (1x256x-1x-1xf32) <- (1x512x-1x-1xf32, 256x512x1x1xf32) + conv2d_24 = paddle._C_ops.conv2d( + relu_22, parameter_212, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_212 + + # pd_op.batch_norm_: (1x256x-1x-1xf32, 256xf32, 256xf32, 256xf32, 256xf32, -1xui8) <- (1x256x-1x-1xf32, 256xf32, 256xf32, 256xf32, 256xf32) + ( + batch_norm__144, + batch_norm__145, + batch_norm__146, + batch_norm__147, + batch_norm__148, + batch_norm__149, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_24, + parameter_211, + parameter_210, + parameter_209, + parameter_208, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_24, parameter_208, parameter_209, parameter_210, parameter_211 + + # pd_op.relu: (1x256x-1x-1xf32) <- (1x256x-1x-1xf32) + relu_23 = paddle._C_ops.relu(batch_norm__144) + del batch_norm__144 + + # pd_op.conv2d: (1x256x-1x-1xf32) <- (1x256x-1x-1xf32, 256x256x3x3xf32) + conv2d_25 = paddle._C_ops.conv2d( + relu_23, parameter_207, [2, 2], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_207, relu_23 + + # pd_op.batch_norm_: (1x256x-1x-1xf32, 256xf32, 256xf32, 256xf32, 256xf32, -1xui8) <- (1x256x-1x-1xf32, 256xf32, 256xf32, 256xf32, 256xf32) + ( + batch_norm__150, + batch_norm__151, + batch_norm__152, + batch_norm__153, + batch_norm__154, + batch_norm__155, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_25, + parameter_206, + parameter_205, + parameter_204, + parameter_203, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_25, parameter_203, parameter_204, parameter_205, parameter_206 + + # pd_op.relu: (1x256x-1x-1xf32) <- (1x256x-1x-1xf32) + relu_24 = paddle._C_ops.relu(batch_norm__150) + del batch_norm__150 + + # pd_op.conv2d: (1x1024x-1x-1xf32) <- (1x256x-1x-1xf32, 1024x256x1x1xf32) + conv2d_26 = paddle._C_ops.conv2d( + relu_24, parameter_202, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_202, relu_24 + + # pd_op.batch_norm_: (1x1024x-1x-1xf32, 1024xf32, 1024xf32, 1024xf32, 1024xf32, -1xui8) <- (1x1024x-1x-1xf32, 1024xf32, 1024xf32, 1024xf32, 1024xf32) + ( + batch_norm__156, + batch_norm__157, + batch_norm__158, + batch_norm__159, + batch_norm__160, + batch_norm__161, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_26, + parameter_201, + parameter_200, + parameter_199, + parameter_198, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_26, parameter_198, parameter_199, parameter_200, parameter_201 + + # pd_op.conv2d: (1x1024x-1x-1xf32) <- (1x512x-1x-1xf32, 1024x512x1x1xf32) + conv2d_27 = paddle._C_ops.conv2d( + relu_22, parameter_197, [2, 2], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_197 + + # pd_op.batch_norm_: (1x1024x-1x-1xf32, 1024xf32, 1024xf32, 1024xf32, 1024xf32, -1xui8) <- (1x1024x-1x-1xf32, 1024xf32, 1024xf32, 1024xf32, 1024xf32) + ( + batch_norm__162, + batch_norm__163, + batch_norm__164, + batch_norm__165, + batch_norm__166, + batch_norm__167, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_27, + parameter_196, + parameter_195, + parameter_194, + parameter_193, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_27, parameter_193, parameter_194, parameter_195, parameter_196 + + # pd_op.add: (1x1024x-1x-1xf32) <- (1x1024x-1x-1xf32, 1x1024x-1x-1xf32) + add_12 = paddle._C_ops.add(batch_norm__156, batch_norm__162) + del batch_norm__156, batch_norm__162 + + # pd_op.relu: (1x1024x-1x-1xf32) <- (1x1024x-1x-1xf32) + relu_25 = paddle._C_ops.relu(add_12) + del add_12 + + # pd_op.conv2d: (1x256x-1x-1xf32) <- (1x1024x-1x-1xf32, 256x1024x1x1xf32) + conv2d_28 = paddle._C_ops.conv2d( + relu_25, parameter_192, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_192 + + # pd_op.batch_norm_: (1x256x-1x-1xf32, 256xf32, 256xf32, 256xf32, 256xf32, -1xui8) <- (1x256x-1x-1xf32, 256xf32, 256xf32, 256xf32, 256xf32) + ( + batch_norm__168, + batch_norm__169, + batch_norm__170, + batch_norm__171, + batch_norm__172, + batch_norm__173, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_28, + parameter_191, + parameter_190, + parameter_189, + parameter_188, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_28, parameter_188, parameter_189, parameter_190, parameter_191 + + # pd_op.relu: (1x256x-1x-1xf32) <- (1x256x-1x-1xf32) + relu_26 = paddle._C_ops.relu(batch_norm__168) + del batch_norm__168 + + # pd_op.conv2d: (1x256x-1x-1xf32) <- (1x256x-1x-1xf32, 256x256x3x3xf32) + conv2d_29 = paddle._C_ops.conv2d( + relu_26, parameter_187, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_187, relu_26 + + # pd_op.batch_norm_: (1x256x-1x-1xf32, 256xf32, 256xf32, 256xf32, 256xf32, -1xui8) <- (1x256x-1x-1xf32, 256xf32, 256xf32, 256xf32, 256xf32) + ( + batch_norm__174, + batch_norm__175, + batch_norm__176, + batch_norm__177, + batch_norm__178, + batch_norm__179, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_29, + parameter_186, + parameter_185, + parameter_184, + parameter_183, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_29, parameter_183, parameter_184, parameter_185, parameter_186 + + # pd_op.relu: (1x256x-1x-1xf32) <- (1x256x-1x-1xf32) + relu_27 = paddle._C_ops.relu(batch_norm__174) + del batch_norm__174 + + # pd_op.conv2d: (1x1024x-1x-1xf32) <- (1x256x-1x-1xf32, 1024x256x1x1xf32) + conv2d_30 = paddle._C_ops.conv2d( + relu_27, parameter_182, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_182, relu_27 + + # pd_op.batch_norm_: (1x1024x-1x-1xf32, 1024xf32, 1024xf32, 1024xf32, 1024xf32, -1xui8) <- (1x1024x-1x-1xf32, 1024xf32, 1024xf32, 1024xf32, 1024xf32) + ( + batch_norm__180, + batch_norm__181, + batch_norm__182, + batch_norm__183, + batch_norm__184, + batch_norm__185, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_30, + parameter_181, + parameter_180, + parameter_179, + parameter_178, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_30, parameter_178, parameter_179, parameter_180, parameter_181 + + # pd_op.add: (1x1024x-1x-1xf32) <- (1x1024x-1x-1xf32, 1x1024x-1x-1xf32) + add_13 = paddle._C_ops.add(batch_norm__180, relu_25) + del batch_norm__180, relu_25 + + # pd_op.relu: (1x1024x-1x-1xf32) <- (1x1024x-1x-1xf32) + relu_28 = paddle._C_ops.relu(add_13) + del add_13 + + # pd_op.conv2d: (1x256x-1x-1xf32) <- (1x1024x-1x-1xf32, 256x1024x1x1xf32) + conv2d_31 = paddle._C_ops.conv2d( + relu_28, parameter_177, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_177 + + # pd_op.batch_norm_: (1x256x-1x-1xf32, 256xf32, 256xf32, 256xf32, 256xf32, -1xui8) <- (1x256x-1x-1xf32, 256xf32, 256xf32, 256xf32, 256xf32) + ( + batch_norm__186, + batch_norm__187, + batch_norm__188, + batch_norm__189, + batch_norm__190, + batch_norm__191, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_31, + parameter_176, + parameter_175, + parameter_174, + parameter_173, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_31, parameter_173, parameter_174, parameter_175, parameter_176 + + # pd_op.relu: (1x256x-1x-1xf32) <- (1x256x-1x-1xf32) + relu_29 = paddle._C_ops.relu(batch_norm__186) + del batch_norm__186 + + # pd_op.conv2d: (1x256x-1x-1xf32) <- (1x256x-1x-1xf32, 256x256x3x3xf32) + conv2d_32 = paddle._C_ops.conv2d( + relu_29, parameter_172, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_172, relu_29 + + # pd_op.batch_norm_: (1x256x-1x-1xf32, 256xf32, 256xf32, 256xf32, 256xf32, -1xui8) <- (1x256x-1x-1xf32, 256xf32, 256xf32, 256xf32, 256xf32) + ( + batch_norm__192, + batch_norm__193, + batch_norm__194, + batch_norm__195, + batch_norm__196, + batch_norm__197, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_32, + parameter_171, + parameter_170, + parameter_169, + parameter_168, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_32, parameter_168, parameter_169, parameter_170, parameter_171 + + # pd_op.relu: (1x256x-1x-1xf32) <- (1x256x-1x-1xf32) + relu_30 = paddle._C_ops.relu(batch_norm__192) + del batch_norm__192 + + # pd_op.conv2d: (1x1024x-1x-1xf32) <- (1x256x-1x-1xf32, 1024x256x1x1xf32) + conv2d_33 = paddle._C_ops.conv2d( + relu_30, parameter_167, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_167, relu_30 + + # pd_op.batch_norm_: (1x1024x-1x-1xf32, 1024xf32, 1024xf32, 1024xf32, 1024xf32, -1xui8) <- (1x1024x-1x-1xf32, 1024xf32, 1024xf32, 1024xf32, 1024xf32) + ( + batch_norm__198, + batch_norm__199, + batch_norm__200, + batch_norm__201, + batch_norm__202, + batch_norm__203, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_33, + parameter_166, + parameter_165, + parameter_164, + parameter_163, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_33, parameter_163, parameter_164, parameter_165, parameter_166 + + # pd_op.add: (1x1024x-1x-1xf32) <- (1x1024x-1x-1xf32, 1x1024x-1x-1xf32) + add_14 = paddle._C_ops.add(batch_norm__198, relu_28) + del batch_norm__198, relu_28 + + # pd_op.relu: (1x1024x-1x-1xf32) <- (1x1024x-1x-1xf32) + relu_31 = paddle._C_ops.relu(add_14) + del add_14 + + # pd_op.conv2d: (1x256x-1x-1xf32) <- (1x1024x-1x-1xf32, 256x1024x1x1xf32) + conv2d_34 = paddle._C_ops.conv2d( + relu_31, parameter_162, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_162 + + # pd_op.batch_norm_: (1x256x-1x-1xf32, 256xf32, 256xf32, 256xf32, 256xf32, -1xui8) <- (1x256x-1x-1xf32, 256xf32, 256xf32, 256xf32, 256xf32) + ( + batch_norm__204, + batch_norm__205, + batch_norm__206, + batch_norm__207, + batch_norm__208, + batch_norm__209, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_34, + parameter_161, + parameter_160, + parameter_159, + parameter_158, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_34, parameter_158, parameter_159, parameter_160, parameter_161 + + # pd_op.relu: (1x256x-1x-1xf32) <- (1x256x-1x-1xf32) + relu_32 = paddle._C_ops.relu(batch_norm__204) + del batch_norm__204 + + # pd_op.conv2d: (1x256x-1x-1xf32) <- (1x256x-1x-1xf32, 256x256x3x3xf32) + conv2d_35 = paddle._C_ops.conv2d( + relu_32, parameter_157, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_157, relu_32 + + # pd_op.batch_norm_: (1x256x-1x-1xf32, 256xf32, 256xf32, 256xf32, 256xf32, -1xui8) <- (1x256x-1x-1xf32, 256xf32, 256xf32, 256xf32, 256xf32) + ( + batch_norm__210, + batch_norm__211, + batch_norm__212, + batch_norm__213, + batch_norm__214, + batch_norm__215, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_35, + parameter_156, + parameter_155, + parameter_154, + parameter_153, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_35, parameter_153, parameter_154, parameter_155, parameter_156 + + # pd_op.relu: (1x256x-1x-1xf32) <- (1x256x-1x-1xf32) + relu_33 = paddle._C_ops.relu(batch_norm__210) + del batch_norm__210 + + # pd_op.conv2d: (1x1024x-1x-1xf32) <- (1x256x-1x-1xf32, 1024x256x1x1xf32) + conv2d_36 = paddle._C_ops.conv2d( + relu_33, parameter_152, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_152, relu_33 + + # pd_op.batch_norm_: (1x1024x-1x-1xf32, 1024xf32, 1024xf32, 1024xf32, 1024xf32, -1xui8) <- (1x1024x-1x-1xf32, 1024xf32, 1024xf32, 1024xf32, 1024xf32) + ( + batch_norm__216, + batch_norm__217, + batch_norm__218, + batch_norm__219, + batch_norm__220, + batch_norm__221, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_36, + parameter_151, + parameter_150, + parameter_149, + parameter_148, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_36, parameter_148, parameter_149, parameter_150, parameter_151 + + # pd_op.add: (1x1024x-1x-1xf32) <- (1x1024x-1x-1xf32, 1x1024x-1x-1xf32) + add_15 = paddle._C_ops.add(batch_norm__216, relu_31) + del batch_norm__216, relu_31 + + # pd_op.relu: (1x1024x-1x-1xf32) <- (1x1024x-1x-1xf32) + relu_34 = paddle._C_ops.relu(add_15) + del add_15 + + # pd_op.conv2d: (1x256x-1x-1xf32) <- (1x1024x-1x-1xf32, 256x1024x1x1xf32) + conv2d_37 = paddle._C_ops.conv2d( + relu_34, parameter_147, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_147 + + # pd_op.batch_norm_: (1x256x-1x-1xf32, 256xf32, 256xf32, 256xf32, 256xf32, -1xui8) <- (1x256x-1x-1xf32, 256xf32, 256xf32, 256xf32, 256xf32) + ( + batch_norm__222, + batch_norm__223, + batch_norm__224, + batch_norm__225, + batch_norm__226, + batch_norm__227, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_37, + parameter_146, + parameter_145, + parameter_144, + parameter_143, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_37, parameter_143, parameter_144, parameter_145, parameter_146 + + # pd_op.relu: (1x256x-1x-1xf32) <- (1x256x-1x-1xf32) + relu_35 = paddle._C_ops.relu(batch_norm__222) + del batch_norm__222 + + # pd_op.conv2d: (1x256x-1x-1xf32) <- (1x256x-1x-1xf32, 256x256x3x3xf32) + conv2d_38 = paddle._C_ops.conv2d( + relu_35, parameter_142, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_142, relu_35 + + # pd_op.batch_norm_: (1x256x-1x-1xf32, 256xf32, 256xf32, 256xf32, 256xf32, -1xui8) <- (1x256x-1x-1xf32, 256xf32, 256xf32, 256xf32, 256xf32) + ( + batch_norm__228, + batch_norm__229, + batch_norm__230, + batch_norm__231, + batch_norm__232, + batch_norm__233, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_38, + parameter_141, + parameter_140, + parameter_139, + parameter_138, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_38, parameter_138, parameter_139, parameter_140, parameter_141 + + # pd_op.relu: (1x256x-1x-1xf32) <- (1x256x-1x-1xf32) + relu_36 = paddle._C_ops.relu(batch_norm__228) + del batch_norm__228 + + # pd_op.conv2d: (1x1024x-1x-1xf32) <- (1x256x-1x-1xf32, 1024x256x1x1xf32) + conv2d_39 = paddle._C_ops.conv2d( + relu_36, parameter_137, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_137, relu_36 + + # pd_op.batch_norm_: (1x1024x-1x-1xf32, 1024xf32, 1024xf32, 1024xf32, 1024xf32, -1xui8) <- (1x1024x-1x-1xf32, 1024xf32, 1024xf32, 1024xf32, 1024xf32) + ( + batch_norm__234, + batch_norm__235, + batch_norm__236, + batch_norm__237, + batch_norm__238, + batch_norm__239, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_39, + parameter_136, + parameter_135, + parameter_134, + parameter_133, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_39, parameter_133, parameter_134, parameter_135, parameter_136 + + # pd_op.add: (1x1024x-1x-1xf32) <- (1x1024x-1x-1xf32, 1x1024x-1x-1xf32) + add_16 = paddle._C_ops.add(batch_norm__234, relu_34) + del batch_norm__234, relu_34 + + # pd_op.relu: (1x1024x-1x-1xf32) <- (1x1024x-1x-1xf32) + relu_37 = paddle._C_ops.relu(add_16) + del add_16 + + # pd_op.conv2d: (1x256x-1x-1xf32) <- (1x1024x-1x-1xf32, 256x1024x1x1xf32) + conv2d_40 = paddle._C_ops.conv2d( + relu_37, parameter_132, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_132 + + # pd_op.batch_norm_: (1x256x-1x-1xf32, 256xf32, 256xf32, 256xf32, 256xf32, -1xui8) <- (1x256x-1x-1xf32, 256xf32, 256xf32, 256xf32, 256xf32) + ( + batch_norm__240, + batch_norm__241, + batch_norm__242, + batch_norm__243, + batch_norm__244, + batch_norm__245, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_40, + parameter_131, + parameter_130, + parameter_129, + parameter_128, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_40, parameter_128, parameter_129, parameter_130, parameter_131 + + # pd_op.relu: (1x256x-1x-1xf32) <- (1x256x-1x-1xf32) + relu_38 = paddle._C_ops.relu(batch_norm__240) + del batch_norm__240 + + # pd_op.conv2d: (1x256x-1x-1xf32) <- (1x256x-1x-1xf32, 256x256x3x3xf32) + conv2d_41 = paddle._C_ops.conv2d( + relu_38, parameter_127, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_127, relu_38 + + # pd_op.batch_norm_: (1x256x-1x-1xf32, 256xf32, 256xf32, 256xf32, 256xf32, -1xui8) <- (1x256x-1x-1xf32, 256xf32, 256xf32, 256xf32, 256xf32) + ( + batch_norm__246, + batch_norm__247, + batch_norm__248, + batch_norm__249, + batch_norm__250, + batch_norm__251, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_41, + parameter_126, + parameter_125, + parameter_124, + parameter_123, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_41, parameter_123, parameter_124, parameter_125, parameter_126 + + # pd_op.relu: (1x256x-1x-1xf32) <- (1x256x-1x-1xf32) + relu_39 = paddle._C_ops.relu(batch_norm__246) + del batch_norm__246 + + # pd_op.conv2d: (1x1024x-1x-1xf32) <- (1x256x-1x-1xf32, 1024x256x1x1xf32) + conv2d_42 = paddle._C_ops.conv2d( + relu_39, parameter_122, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_122, relu_39 + + # pd_op.batch_norm_: (1x1024x-1x-1xf32, 1024xf32, 1024xf32, 1024xf32, 1024xf32, -1xui8) <- (1x1024x-1x-1xf32, 1024xf32, 1024xf32, 1024xf32, 1024xf32) + ( + batch_norm__252, + batch_norm__253, + batch_norm__254, + batch_norm__255, + batch_norm__256, + batch_norm__257, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_42, + parameter_121, + parameter_120, + parameter_119, + parameter_118, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_42, parameter_118, parameter_119, parameter_120, parameter_121 + + # pd_op.add: (1x1024x-1x-1xf32) <- (1x1024x-1x-1xf32, 1x1024x-1x-1xf32) + add_17 = paddle._C_ops.add(batch_norm__252, relu_37) + del batch_norm__252, relu_37 + + # pd_op.relu: (1x1024x-1x-1xf32) <- (1x1024x-1x-1xf32) + relu_40 = paddle._C_ops.relu(add_17) + del add_17 + + # pd_op.conv2d: (1x512x-1x-1xf32) <- (1x1024x-1x-1xf32, 512x1024x1x1xf32) + conv2d_43 = paddle._C_ops.conv2d( + relu_40, parameter_117, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_117 + + # pd_op.batch_norm_: (1x512x-1x-1xf32, 512xf32, 512xf32, 512xf32, 512xf32, -1xui8) <- (1x512x-1x-1xf32, 512xf32, 512xf32, 512xf32, 512xf32) + ( + batch_norm__258, + batch_norm__259, + batch_norm__260, + batch_norm__261, + batch_norm__262, + batch_norm__263, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_43, + parameter_116, + parameter_115, + parameter_114, + parameter_113, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_43, parameter_113, parameter_114, parameter_115, parameter_116 + + # pd_op.relu: (1x512x-1x-1xf32) <- (1x512x-1x-1xf32) + relu_41 = paddle._C_ops.relu(batch_norm__258) + del batch_norm__258 + + # pd_op.conv2d: (1x512x-1x-1xf32) <- (1x512x-1x-1xf32, 512x512x3x3xf32) + conv2d_44 = paddle._C_ops.conv2d( + relu_41, parameter_112, [2, 2], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_112, relu_41 + + # pd_op.batch_norm_: (1x512x-1x-1xf32, 512xf32, 512xf32, 512xf32, 512xf32, -1xui8) <- (1x512x-1x-1xf32, 512xf32, 512xf32, 512xf32, 512xf32) + ( + batch_norm__264, + batch_norm__265, + batch_norm__266, + batch_norm__267, + batch_norm__268, + batch_norm__269, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_44, + parameter_111, + parameter_110, + parameter_109, + parameter_108, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_44, parameter_108, parameter_109, parameter_110, parameter_111 + + # pd_op.relu: (1x512x-1x-1xf32) <- (1x512x-1x-1xf32) + relu_42 = paddle._C_ops.relu(batch_norm__264) + del batch_norm__264 + + # pd_op.conv2d: (1x2048x-1x-1xf32) <- (1x512x-1x-1xf32, 2048x512x1x1xf32) + conv2d_45 = paddle._C_ops.conv2d( + relu_42, parameter_107, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_107, relu_42 + + # pd_op.batch_norm_: (1x2048x-1x-1xf32, 2048xf32, 2048xf32, 2048xf32, 2048xf32, -1xui8) <- (1x2048x-1x-1xf32, 2048xf32, 2048xf32, 2048xf32, 2048xf32) + ( + batch_norm__270, + batch_norm__271, + batch_norm__272, + batch_norm__273, + batch_norm__274, + batch_norm__275, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_45, + parameter_106, + parameter_105, + parameter_104, + parameter_103, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_45, parameter_103, parameter_104, parameter_105, parameter_106 + + # pd_op.conv2d: (1x2048x-1x-1xf32) <- (1x1024x-1x-1xf32, 2048x1024x1x1xf32) + conv2d_46 = paddle._C_ops.conv2d( + relu_40, parameter_102, [2, 2], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_102 + + # pd_op.batch_norm_: (1x2048x-1x-1xf32, 2048xf32, 2048xf32, 2048xf32, 2048xf32, -1xui8) <- (1x2048x-1x-1xf32, 2048xf32, 2048xf32, 2048xf32, 2048xf32) + ( + batch_norm__276, + batch_norm__277, + batch_norm__278, + batch_norm__279, + batch_norm__280, + batch_norm__281, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_46, + parameter_101, + parameter_100, + parameter_99, + parameter_98, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_46, parameter_100, parameter_101, parameter_98, parameter_99 + + # pd_op.add: (1x2048x-1x-1xf32) <- (1x2048x-1x-1xf32, 1x2048x-1x-1xf32) + add_18 = paddle._C_ops.add(batch_norm__270, batch_norm__276) + del batch_norm__270, batch_norm__276 + + # pd_op.relu: (1x2048x-1x-1xf32) <- (1x2048x-1x-1xf32) + relu_43 = paddle._C_ops.relu(add_18) + del add_18 + + # pd_op.conv2d: (1x512x-1x-1xf32) <- (1x2048x-1x-1xf32, 512x2048x1x1xf32) + conv2d_47 = paddle._C_ops.conv2d( + relu_43, parameter_97, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_97 + + # pd_op.batch_norm_: (1x512x-1x-1xf32, 512xf32, 512xf32, 512xf32, 512xf32, -1xui8) <- (1x512x-1x-1xf32, 512xf32, 512xf32, 512xf32, 512xf32) + ( + batch_norm__282, + batch_norm__283, + batch_norm__284, + batch_norm__285, + batch_norm__286, + batch_norm__287, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_47, + parameter_96, + parameter_95, + parameter_94, + parameter_93, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_47, parameter_93, parameter_94, parameter_95, parameter_96 + + # pd_op.relu: (1x512x-1x-1xf32) <- (1x512x-1x-1xf32) + relu_44 = paddle._C_ops.relu(batch_norm__282) + del batch_norm__282 + + # pd_op.conv2d: (1x512x-1x-1xf32) <- (1x512x-1x-1xf32, 512x512x3x3xf32) + conv2d_48 = paddle._C_ops.conv2d( + relu_44, parameter_92, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_92, relu_44 + + # pd_op.batch_norm_: (1x512x-1x-1xf32, 512xf32, 512xf32, 512xf32, 512xf32, -1xui8) <- (1x512x-1x-1xf32, 512xf32, 512xf32, 512xf32, 512xf32) + ( + batch_norm__288, + batch_norm__289, + batch_norm__290, + batch_norm__291, + batch_norm__292, + batch_norm__293, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_48, + parameter_91, + parameter_90, + parameter_89, + parameter_88, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_48, parameter_88, parameter_89, parameter_90, parameter_91 + + # pd_op.relu: (1x512x-1x-1xf32) <- (1x512x-1x-1xf32) + relu_45 = paddle._C_ops.relu(batch_norm__288) + del batch_norm__288 + + # pd_op.conv2d: (1x2048x-1x-1xf32) <- (1x512x-1x-1xf32, 2048x512x1x1xf32) + conv2d_49 = paddle._C_ops.conv2d( + relu_45, parameter_87, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_87, relu_45 + + # pd_op.batch_norm_: (1x2048x-1x-1xf32, 2048xf32, 2048xf32, 2048xf32, 2048xf32, -1xui8) <- (1x2048x-1x-1xf32, 2048xf32, 2048xf32, 2048xf32, 2048xf32) + ( + batch_norm__294, + batch_norm__295, + batch_norm__296, + batch_norm__297, + batch_norm__298, + batch_norm__299, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_49, + parameter_86, + parameter_85, + parameter_84, + parameter_83, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_49, parameter_83, parameter_84, parameter_85, parameter_86 + + # pd_op.add: (1x2048x-1x-1xf32) <- (1x2048x-1x-1xf32, 1x2048x-1x-1xf32) + add_19 = paddle._C_ops.add(batch_norm__294, relu_43) + del batch_norm__294, relu_43 + + # pd_op.relu: (1x2048x-1x-1xf32) <- (1x2048x-1x-1xf32) + relu_46 = paddle._C_ops.relu(add_19) + del add_19 + + # pd_op.conv2d: (1x512x-1x-1xf32) <- (1x2048x-1x-1xf32, 512x2048x1x1xf32) + conv2d_50 = paddle._C_ops.conv2d( + relu_46, parameter_82, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_82 + + # pd_op.batch_norm_: (1x512x-1x-1xf32, 512xf32, 512xf32, 512xf32, 512xf32, -1xui8) <- (1x512x-1x-1xf32, 512xf32, 512xf32, 512xf32, 512xf32) + ( + batch_norm__300, + batch_norm__301, + batch_norm__302, + batch_norm__303, + batch_norm__304, + batch_norm__305, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_50, + parameter_81, + parameter_80, + parameter_79, + parameter_78, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_50, parameter_78, parameter_79, parameter_80, parameter_81 + + # pd_op.relu: (1x512x-1x-1xf32) <- (1x512x-1x-1xf32) + relu_47 = paddle._C_ops.relu(batch_norm__300) + del batch_norm__300 + + # pd_op.conv2d: (1x512x-1x-1xf32) <- (1x512x-1x-1xf32, 512x512x3x3xf32) + conv2d_51 = paddle._C_ops.conv2d( + relu_47, parameter_77, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_77, relu_47 + + # pd_op.batch_norm_: (1x512x-1x-1xf32, 512xf32, 512xf32, 512xf32, 512xf32, -1xui8) <- (1x512x-1x-1xf32, 512xf32, 512xf32, 512xf32, 512xf32) + ( + batch_norm__306, + batch_norm__307, + batch_norm__308, + batch_norm__309, + batch_norm__310, + batch_norm__311, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_51, + parameter_76, + parameter_75, + parameter_74, + parameter_73, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_51, parameter_73, parameter_74, parameter_75, parameter_76 + + # pd_op.relu: (1x512x-1x-1xf32) <- (1x512x-1x-1xf32) + relu_48 = paddle._C_ops.relu(batch_norm__306) + del batch_norm__306 + + # pd_op.conv2d: (1x2048x-1x-1xf32) <- (1x512x-1x-1xf32, 2048x512x1x1xf32) + conv2d_52 = paddle._C_ops.conv2d( + relu_48, parameter_72, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_72, relu_48 + + # pd_op.batch_norm_: (1x2048x-1x-1xf32, 2048xf32, 2048xf32, 2048xf32, 2048xf32, -1xui8) <- (1x2048x-1x-1xf32, 2048xf32, 2048xf32, 2048xf32, 2048xf32) + ( + batch_norm__312, + batch_norm__313, + batch_norm__314, + batch_norm__315, + batch_norm__316, + batch_norm__317, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_52, + parameter_71, + parameter_70, + parameter_69, + parameter_68, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_52, parameter_68, parameter_69, parameter_70, parameter_71 + + # pd_op.add: (1x2048x-1x-1xf32) <- (1x2048x-1x-1xf32, 1x2048x-1x-1xf32) + add_20 = paddle._C_ops.add(batch_norm__312, relu_46) + del batch_norm__312, relu_46 + + # pd_op.relu: (1x2048x-1x-1xf32) <- (1x2048x-1x-1xf32) + relu_49 = paddle._C_ops.relu(add_20) + del add_20 + + # pd_op.conv2d: (1x256x-1x-1xf32) <- (1x256x-1x-1xf32, 256x256x1x1xf32) + conv2d_53 = paddle._C_ops.conv2d( + relu_10, parameter_67, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_67, relu_10 + + # pd_op.full_int_array: (4xi64) <- () + full_int_array_1 = [1, -1, 1, 1] + + # pd_op.reshape: (1x256x1x1xf32) <- (256xf32, 4xi64) + reshape_0 = paddle._C_ops.reshape(parameter_66, full_int_array_1) + del parameter_66 + + # pd_op.add: (1x256x-1x-1xf32) <- (1x256x-1x-1xf32, 1x256x1x1xf32) + add_21 = paddle._C_ops.add(conv2d_53, reshape_0) + del conv2d_53, reshape_0 + + # pd_op.conv2d: (1x256x-1x-1xf32) <- (1x512x-1x-1xf32, 256x512x1x1xf32) + conv2d_54 = paddle._C_ops.conv2d( + relu_22, parameter_65, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_65, relu_22 + + # pd_op.reshape: (1x256x1x1xf32) <- (256xf32, 4xi64) + reshape_1 = paddle._C_ops.reshape(parameter_64, full_int_array_1) + del parameter_64 + + # pd_op.add: (1x256x-1x-1xf32) <- (1x256x-1x-1xf32, 1x256x1x1xf32) + add_22 = paddle._C_ops.add(conv2d_54, reshape_1) + del conv2d_54, reshape_1 + + # pd_op.conv2d: (1x256x-1x-1xf32) <- (1x1024x-1x-1xf32, 256x1024x1x1xf32) + conv2d_55 = paddle._C_ops.conv2d( + relu_40, parameter_63, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_63, relu_40 + + # pd_op.reshape: (1x256x1x1xf32) <- (256xf32, 4xi64) + reshape_2 = paddle._C_ops.reshape(parameter_62, full_int_array_1) + del parameter_62 + + # pd_op.add: (1x256x-1x-1xf32) <- (1x256x-1x-1xf32, 1x256x1x1xf32) + add_23 = paddle._C_ops.add(conv2d_55, reshape_2) + del conv2d_55, reshape_2 + + # pd_op.conv2d: (1x256x-1x-1xf32) <- (1x2048x-1x-1xf32, 256x2048x1x1xf32) + conv2d_56 = paddle._C_ops.conv2d( + relu_49, parameter_61, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_61, relu_49 + + # pd_op.reshape: (1x256x1x1xf32) <- (256xf32, 4xi64) + reshape_3 = paddle._C_ops.reshape(parameter_60, full_int_array_1) + del parameter_60 + + # pd_op.add: (1x256x-1x-1xf32) <- (1x256x-1x-1xf32, 1x256x1x1xf32) + add_24 = paddle._C_ops.add(conv2d_56, reshape_3) + del conv2d_56, reshape_3 + + # pd_op.nearest_interp: (1x256x-1x-1xf32) <- (1x256x-1x-1xf32, None, None, None) + nearest_interp_0 = paddle._C_ops.nearest_interp( + add_24, + None, + None, + None, + "NCHW", + -1, + -1, + -1, + [float("2"), float("2")], + "nearest", + False, + 0, + ) + + # pd_op.add: (1x256x-1x-1xf32) <- (1x256x-1x-1xf32, 1x256x-1x-1xf32) + add_25 = paddle._C_ops.add(add_23, nearest_interp_0) + del add_23, nearest_interp_0 + + # pd_op.nearest_interp: (1x256x-1x-1xf32) <- (1x256x-1x-1xf32, None, None, None) + nearest_interp_1 = paddle._C_ops.nearest_interp( + add_25, + None, + None, + None, + "NCHW", + -1, + -1, + -1, + [float("2"), float("2")], + "nearest", + False, + 0, + ) + + # pd_op.add: (1x256x-1x-1xf32) <- (1x256x-1x-1xf32, 1x256x-1x-1xf32) + add_26 = paddle._C_ops.add(add_22, nearest_interp_1) + del add_22, nearest_interp_1 + + # pd_op.nearest_interp: (1x256x-1x-1xf32) <- (1x256x-1x-1xf32, None, None, None) + nearest_interp_2 = paddle._C_ops.nearest_interp( + add_26, + None, + None, + None, + "NCHW", + -1, + -1, + -1, + [float("2"), float("2")], + "nearest", + False, + 0, + ) + + # pd_op.add: (1x256x-1x-1xf32) <- (1x256x-1x-1xf32, 1x256x-1x-1xf32) + add_27 = paddle._C_ops.add(add_21, nearest_interp_2) + del add_21, nearest_interp_2 + + # pd_op.conv2d: (1x256x-1x-1xf32) <- (1x256x-1x-1xf32, 256x256x3x3xf32) + conv2d_57 = paddle._C_ops.conv2d( + add_27, parameter_59, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del add_27, parameter_59 + + # pd_op.reshape: (1x256x1x1xf32) <- (256xf32, 4xi64) + reshape_4 = paddle._C_ops.reshape(parameter_58, full_int_array_1) + del parameter_58 + + # pd_op.add: (1x256x-1x-1xf32) <- (1x256x-1x-1xf32, 1x256x1x1xf32) + add_28 = paddle._C_ops.add(conv2d_57, reshape_4) + del conv2d_57, reshape_4 + + # pd_op.conv2d: (1x256x-1x-1xf32) <- (1x256x-1x-1xf32, 256x256x3x3xf32) + conv2d_58 = paddle._C_ops.conv2d( + add_26, parameter_57, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del add_26, parameter_57 + + # pd_op.reshape: (1x256x1x1xf32) <- (256xf32, 4xi64) + reshape_5 = paddle._C_ops.reshape(parameter_56, full_int_array_1) + del parameter_56 + + # pd_op.add: (1x256x-1x-1xf32) <- (1x256x-1x-1xf32, 1x256x1x1xf32) + add_29 = paddle._C_ops.add(conv2d_58, reshape_5) + del conv2d_58, reshape_5 + + # pd_op.conv2d: (1x256x-1x-1xf32) <- (1x256x-1x-1xf32, 256x256x3x3xf32) + conv2d_59 = paddle._C_ops.conv2d( + add_25, parameter_55, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del add_25, parameter_55 + + # pd_op.reshape: (1x256x1x1xf32) <- (256xf32, 4xi64) + reshape_6 = paddle._C_ops.reshape(parameter_54, full_int_array_1) + del parameter_54 + + # pd_op.add: (1x256x-1x-1xf32) <- (1x256x-1x-1xf32, 1x256x1x1xf32) + add_30 = paddle._C_ops.add(conv2d_59, reshape_6) + del conv2d_59, reshape_6 + + # pd_op.conv2d: (1x256x-1x-1xf32) <- (1x256x-1x-1xf32, 256x256x3x3xf32) + conv2d_60 = paddle._C_ops.conv2d( + add_24, parameter_53, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del add_24, parameter_53 + + # pd_op.reshape: (1x256x1x1xf32) <- (256xf32, 4xi64) + reshape_7 = paddle._C_ops.reshape(parameter_52, full_int_array_1) + del parameter_52 + + # pd_op.add: (1x256x-1x-1xf32) <- (1x256x-1x-1xf32, 1x256x1x1xf32) + add_31 = paddle._C_ops.add(conv2d_60, reshape_7) + del conv2d_60, reshape_7 + + # pd_op.full_int_array: (2xi64) <- () + full_int_array_2 = [1, 1] + + # pd_op.pool2d: (1x256x-1x-1xf32) <- (1x256x-1x-1xf32, 2xi64) + pool2d_1 = paddle._C_ops.pool2d( + add_31, + full_int_array_2, + [2, 2], + [0, 0], + False, + True, + "NCHW", + "max", + False, + False, + "EXPLICIT", + ) + del full_int_array_2 + + # pd_op.conv2d: (1x128x-1x-1xf32) <- (1x256x-1x-1xf32, 128x256x3x3xf32) + conv2d_61 = paddle._C_ops.conv2d( + add_28, parameter_51, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_51 + + # pd_op.group_norm: (1x128x-1x-1xf32, 1x32xf32, 1x32xf32) <- (1x128x-1x-1xf32, 128xf32, 128xf32) + group_norm_0, group_norm_1, group_norm_2 = (lambda x, f: f(x))( + paddle._C_ops.group_norm( + conv2d_61, parameter_50, parameter_49, float("1e-05"), 32, "NCHW" + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None, None), + ) + del conv2d_61, parameter_49, parameter_50 + + # pd_op.relu: (1x128x-1x-1xf32) <- (1x128x-1x-1xf32) + relu_50 = paddle._C_ops.relu(group_norm_0) + del group_norm_0 + + # pd_op.conv2d: (1x128x-1x-1xf32) <- (1x256x-1x-1xf32, 128x256x3x3xf32) + conv2d_62 = paddle._C_ops.conv2d( + add_29, parameter_48, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_48 + + # pd_op.group_norm: (1x128x-1x-1xf32, 1x32xf32, 1x32xf32) <- (1x128x-1x-1xf32, 128xf32, 128xf32) + group_norm_3, group_norm_4, group_norm_5 = (lambda x, f: f(x))( + paddle._C_ops.group_norm( + conv2d_62, parameter_47, parameter_46, float("1e-05"), 32, "NCHW" + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None, None), + ) + del conv2d_62, parameter_46, parameter_47 + + # pd_op.relu: (1x128x-1x-1xf32) <- (1x128x-1x-1xf32) + relu_51 = paddle._C_ops.relu(group_norm_3) + del group_norm_3 + + # pd_op.bilinear_interp: (1x128x-1x-1xf32) <- (1x128x-1x-1xf32, None, None, None) + bilinear_interp_0 = paddle._C_ops.bilinear_interp( + relu_51, + None, + None, + None, + "NCHW", + -1, + -1, + -1, + [float("2"), float("2")], + "bilinear", + False, + 0, + ) + del relu_51 + + # pd_op.add: (1x128x-1x-1xf32) <- (1x128x-1x-1xf32, 1x128x-1x-1xf32) + add_32 = paddle._C_ops.add(relu_50, bilinear_interp_0) + del bilinear_interp_0, relu_50 + + # pd_op.conv2d: (1x128x-1x-1xf32) <- (1x256x-1x-1xf32, 128x256x3x3xf32) + conv2d_63 = paddle._C_ops.conv2d( + add_30, parameter_45, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_45 + + # pd_op.group_norm: (1x128x-1x-1xf32, 1x32xf32, 1x32xf32) <- (1x128x-1x-1xf32, 128xf32, 128xf32) + group_norm_6, group_norm_7, group_norm_8 = (lambda x, f: f(x))( + paddle._C_ops.group_norm( + conv2d_63, parameter_44, parameter_43, float("1e-05"), 32, "NCHW" + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None, None), + ) + del conv2d_63, parameter_43, parameter_44 + + # pd_op.relu: (1x128x-1x-1xf32) <- (1x128x-1x-1xf32) + relu_52 = paddle._C_ops.relu(group_norm_6) + del group_norm_6 + + # pd_op.bilinear_interp: (1x128x-1x-1xf32) <- (1x128x-1x-1xf32, None, None, None) + bilinear_interp_1 = paddle._C_ops.bilinear_interp( + relu_52, + None, + None, + None, + "NCHW", + -1, + -1, + -1, + [float("2"), float("2")], + "bilinear", + False, + 0, + ) + del relu_52 + + # pd_op.conv2d: (1x128x-1x-1xf32) <- (1x128x-1x-1xf32, 128x128x3x3xf32) + conv2d_64 = paddle._C_ops.conv2d( + bilinear_interp_1, + parameter_42, + [1, 1], + [1, 1], + "EXPLICIT", + [1, 1], + 1, + "NCHW", + ) + del bilinear_interp_1, parameter_42 + + # pd_op.group_norm: (1x128x-1x-1xf32, 1x32xf32, 1x32xf32) <- (1x128x-1x-1xf32, 128xf32, 128xf32) + group_norm_9, group_norm_10, group_norm_11 = (lambda x, f: f(x))( + paddle._C_ops.group_norm( + conv2d_64, parameter_41, parameter_40, float("1e-05"), 32, "NCHW" + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None, None), + ) + del conv2d_64, parameter_40, parameter_41 + + # pd_op.relu: (1x128x-1x-1xf32) <- (1x128x-1x-1xf32) + relu_53 = paddle._C_ops.relu(group_norm_9) + del group_norm_9 + + # pd_op.bilinear_interp: (1x128x-1x-1xf32) <- (1x128x-1x-1xf32, None, None, None) + bilinear_interp_2 = paddle._C_ops.bilinear_interp( + relu_53, + None, + None, + None, + "NCHW", + -1, + -1, + -1, + [float("2"), float("2")], + "bilinear", + False, + 0, + ) + del relu_53 + + # pd_op.add: (1x128x-1x-1xf32) <- (1x128x-1x-1xf32, 1x128x-1x-1xf32) + add_33 = paddle._C_ops.add(add_32, bilinear_interp_2) + del add_32, bilinear_interp_2 + + # pd_op.shape64: (4xi64) <- (1x256x-1x-1xf32) + shape64_0 = paddle._C_ops.shape64(add_31) + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_3 = [2] + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_4 = [3] + + # pd_op.slice: (xi64) <- (4xi64, 1xi64, 1xi64) + slice_0 = paddle._C_ops.slice( + shape64_0, [0], full_int_array_3, full_int_array_4, [1], [0] + ) + del shape64_0 + + # pd_op.shape64: (4xi64) <- (1x256x-1x-1xf32) + shape64_1 = paddle._C_ops.shape64(add_31) + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_5 = [4] + + # pd_op.slice: (xi64) <- (4xi64, 1xi64, 1xi64) + slice_1 = paddle._C_ops.slice( + shape64_1, [0], full_int_array_4, full_int_array_5, [1], [0] + ) + del shape64_1 + + # pd_op.full: (1xf32) <- () + full_0 = paddle._C_ops.full( + [1], float("-1"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.full: (1xf32) <- () + full_1 = paddle._C_ops.full( + [1], float("1"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.linspace: (-1xf32) <- (1xf32, 1xf32, xi64) + linspace_0 = paddle._C_ops.linspace( + full_0, + full_1, + slice_1, + paddle.float32, + paddle.framework._current_expected_place(), + ) + + # pd_op.linspace: (-1xf32) <- (1xf32, 1xf32, xi64) + linspace_1 = paddle._C_ops.linspace( + full_0, + full_1, + slice_0, + paddle.float32, + paddle.framework._current_expected_place(), + ) + + # builtin.combine: ([-1xf32, -1xf32]) <- (-1xf32, -1xf32) + combine_0 = [linspace_1, linspace_0] + del linspace_0, linspace_1 + + # pd_op.meshgrid: ([-1x-1xf32, -1x-1xf32]) <- ([-1xf32, -1xf32]) + meshgrid_0 = paddle._C_ops.meshgrid(combine_0) + del combine_0 + + # builtin.split: (-1x-1xf32, -1x-1xf32) <- ([-1x-1xf32, -1x-1xf32]) + ( + split_0, + split_1, + ) = meshgrid_0 + del meshgrid_0 + + # pd_op.full_int_array: (2xi64) <- () + full_int_array_6 = [0, 1] + + # pd_op.unsqueeze: (1x1x-1x-1xf32) <- (-1x-1xf32, 2xi64) + unsqueeze_0 = paddle._C_ops.unsqueeze(split_1, full_int_array_6) + del split_1 + + # pd_op.unsqueeze: (1x1x-1x-1xf32) <- (-1x-1xf32, 2xi64) + unsqueeze_1 = paddle._C_ops.unsqueeze(split_0, full_int_array_6) + del split_0 + + # pd_op.full_int_array: (4xi64) <- () + full_int_array_7 = [1, 1, -1, -1] + + # pd_op.expand: (1x1x-1x-1xf32) <- (1x1x-1x-1xf32, 4xi64) + expand_0 = paddle._C_ops.expand(unsqueeze_1, full_int_array_7) + del unsqueeze_1 + + # pd_op.expand: (1x1x-1x-1xf32) <- (1x1x-1x-1xf32, 4xi64) + expand_1 = paddle._C_ops.expand(unsqueeze_0, full_int_array_7) + del unsqueeze_0 + + # pd_op.full: (1xi32) <- () + full_2 = paddle._C_ops.full( + [1], float("1"), paddle.int32, paddle.core.CPUPlace() + ) + + # builtin.combine: ([1x1x-1x-1xf32, 1x1x-1x-1xf32]) <- (1x1x-1x-1xf32, 1x1x-1x-1xf32) + combine_1 = [expand_1, expand_0] + del expand_0, expand_1 + + # pd_op.concat: (1x2x-1x-1xf32) <- ([1x1x-1x-1xf32, 1x1x-1x-1xf32], 1xi32) + concat_0 = paddle._C_ops.concat(combine_1, full_2) + del combine_1 + + # builtin.combine: ([1x256x-1x-1xf32, 1x2x-1x-1xf32]) <- (1x256x-1x-1xf32, 1x2x-1x-1xf32) + combine_2 = [add_31, concat_0] + del concat_0 + + # pd_op.concat: (1x258x-1x-1xf32) <- ([1x256x-1x-1xf32, 1x2x-1x-1xf32], 1xi32) + concat_1 = paddle._C_ops.concat(combine_2, full_2) + del combine_2 + + # pd_op.conv2d: (1x128x-1x-1xf32) <- (1x258x-1x-1xf32, 128x258x3x3xf32) + conv2d_65 = paddle._C_ops.conv2d( + concat_1, parameter_39, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del concat_1, parameter_39 + + # pd_op.group_norm: (1x128x-1x-1xf32, 1x32xf32, 1x32xf32) <- (1x128x-1x-1xf32, 128xf32, 128xf32) + group_norm_12, group_norm_13, group_norm_14 = (lambda x, f: f(x))( + paddle._C_ops.group_norm( + conv2d_65, parameter_38, parameter_37, float("1e-05"), 32, "NCHW" + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None, None), + ) + del conv2d_65, parameter_37, parameter_38 + + # pd_op.relu: (1x128x-1x-1xf32) <- (1x128x-1x-1xf32) + relu_54 = paddle._C_ops.relu(group_norm_12) + del group_norm_12 + + # pd_op.bilinear_interp: (1x128x-1x-1xf32) <- (1x128x-1x-1xf32, None, None, None) + bilinear_interp_3 = paddle._C_ops.bilinear_interp( + relu_54, + None, + None, + None, + "NCHW", + -1, + -1, + -1, + [float("2"), float("2")], + "bilinear", + False, + 0, + ) + del relu_54 + + # pd_op.conv2d: (1x128x-1x-1xf32) <- (1x128x-1x-1xf32, 128x128x3x3xf32) + conv2d_66 = paddle._C_ops.conv2d( + bilinear_interp_3, + parameter_36, + [1, 1], + [1, 1], + "EXPLICIT", + [1, 1], + 1, + "NCHW", + ) + del bilinear_interp_3, parameter_36 + + # pd_op.group_norm: (1x128x-1x-1xf32, 1x32xf32, 1x32xf32) <- (1x128x-1x-1xf32, 128xf32, 128xf32) + group_norm_15, group_norm_16, group_norm_17 = (lambda x, f: f(x))( + paddle._C_ops.group_norm( + conv2d_66, parameter_35, parameter_34, float("1e-05"), 32, "NCHW" + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None, None), + ) + del conv2d_66, parameter_34, parameter_35 + + # pd_op.relu: (1x128x-1x-1xf32) <- (1x128x-1x-1xf32) + relu_55 = paddle._C_ops.relu(group_norm_15) + del group_norm_15 + + # pd_op.bilinear_interp: (1x128x-1x-1xf32) <- (1x128x-1x-1xf32, None, None, None) + bilinear_interp_4 = paddle._C_ops.bilinear_interp( + relu_55, + None, + None, + None, + "NCHW", + -1, + -1, + -1, + [float("2"), float("2")], + "bilinear", + False, + 0, + ) + del relu_55 + + # pd_op.conv2d: (1x128x-1x-1xf32) <- (1x128x-1x-1xf32, 128x128x3x3xf32) + conv2d_67 = paddle._C_ops.conv2d( + bilinear_interp_4, + parameter_33, + [1, 1], + [1, 1], + "EXPLICIT", + [1, 1], + 1, + "NCHW", + ) + del bilinear_interp_4, parameter_33 + + # pd_op.group_norm: (1x128x-1x-1xf32, 1x32xf32, 1x32xf32) <- (1x128x-1x-1xf32, 128xf32, 128xf32) + group_norm_18, group_norm_19, group_norm_20 = (lambda x, f: f(x))( + paddle._C_ops.group_norm( + conv2d_67, parameter_32, parameter_31, float("1e-05"), 32, "NCHW" + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None, None), + ) + del conv2d_67, parameter_31, parameter_32 + + # pd_op.relu: (1x128x-1x-1xf32) <- (1x128x-1x-1xf32) + relu_56 = paddle._C_ops.relu(group_norm_18) + del group_norm_18 + + # pd_op.bilinear_interp: (1x128x-1x-1xf32) <- (1x128x-1x-1xf32, None, None, None) + bilinear_interp_5 = paddle._C_ops.bilinear_interp( + relu_56, + None, + None, + None, + "NCHW", + -1, + -1, + -1, + [float("2"), float("2")], + "bilinear", + False, + 0, + ) + del relu_56 + + # pd_op.add: (1x128x-1x-1xf32) <- (1x128x-1x-1xf32, 1x128x-1x-1xf32) + add_34 = paddle._C_ops.add(add_33, bilinear_interp_5) + del add_33, bilinear_interp_5 + + # pd_op.conv2d: (1x256x-1x-1xf32) <- (1x128x-1x-1xf32, 256x128x1x1xf32) + conv2d_68 = paddle._C_ops.conv2d( + add_34, parameter_30, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del add_34, parameter_30 + + # pd_op.group_norm: (1x256x-1x-1xf32, 1x32xf32, 1x32xf32) <- (1x256x-1x-1xf32, 256xf32, 256xf32) + group_norm_21, group_norm_22, group_norm_23 = (lambda x, f: f(x))( + paddle._C_ops.group_norm( + conv2d_68, parameter_29, parameter_28, float("1e-05"), 32, "NCHW" + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None, None), + ) + del conv2d_68, parameter_28, parameter_29 + + # pd_op.relu: (1x256x-1x-1xf32) <- (1x256x-1x-1xf32) + relu_0 = paddle._C_ops.relu(group_norm_21) + del group_norm_21 + + # pd_op.bilinear_interp: (1x256x-1x-1xf32) <- (1x256x-1x-1xf32, None, None, None) + bilinear_interp_6 = paddle._C_ops.bilinear_interp( + add_28, + None, + None, + None, + "NCHW", + -1, + -1, + -1, + [float("0.5"), float("0.5")], + "bilinear", + False, + 0, + ) + del add_28 + + # builtin.combine: ([xi64, xi64]) <- (xi64, xi64) + combine_3 = [slice_0, slice_1] + + # pd_op.bilinear_interp: (1x256x-1x-1xf32) <- (1x256x-1x-1xf32, None, [xi64, xi64], None) + bilinear_interp_7 = paddle._C_ops.bilinear_interp( + pool2d_1, + None, + combine_3, + None, + "NCHW", + -1, + -1, + -1, + [], + "bilinear", + False, + 0, + ) + del combine_3, pool2d_1 + + # pd_op.shape64: (4xi64) <- (1x256x-1x-1xf32) + shape64_2 = paddle._C_ops.shape64(bilinear_interp_6) + + # pd_op.slice: (xi64) <- (4xi64, 1xi64, 1xi64) + slice_2 = paddle._C_ops.slice( + shape64_2, [0], full_int_array_3, full_int_array_4, [1], [0] + ) + del shape64_2 + + # pd_op.shape64: (4xi64) <- (1x256x-1x-1xf32) + shape64_3 = paddle._C_ops.shape64(bilinear_interp_6) + + # pd_op.slice: (xi64) <- (4xi64, 1xi64, 1xi64) + slice_3 = paddle._C_ops.slice( + shape64_3, [0], full_int_array_4, full_int_array_5, [1], [0] + ) + del shape64_3 + + # pd_op.linspace: (-1xf32) <- (1xf32, 1xf32, xi64) + linspace_2 = paddle._C_ops.linspace( + full_0, + full_1, + slice_3, + paddle.float32, + paddle.framework._current_expected_place(), + ) + del slice_3 + + # pd_op.linspace: (-1xf32) <- (1xf32, 1xf32, xi64) + linspace_3 = paddle._C_ops.linspace( + full_0, + full_1, + slice_2, + paddle.float32, + paddle.framework._current_expected_place(), + ) + del slice_2 + + # builtin.combine: ([-1xf32, -1xf32]) <- (-1xf32, -1xf32) + combine_4 = [linspace_3, linspace_2] + del linspace_2, linspace_3 + + # pd_op.meshgrid: ([-1x-1xf32, -1x-1xf32]) <- ([-1xf32, -1xf32]) + meshgrid_1 = paddle._C_ops.meshgrid(combine_4) + del combine_4 + + # builtin.split: (-1x-1xf32, -1x-1xf32) <- ([-1x-1xf32, -1x-1xf32]) + ( + split_2, + split_3, + ) = meshgrid_1 + del meshgrid_1 + + # pd_op.unsqueeze: (1x1x-1x-1xf32) <- (-1x-1xf32, 2xi64) + unsqueeze_2 = paddle._C_ops.unsqueeze(split_3, full_int_array_6) + del split_3 + + # pd_op.unsqueeze: (1x1x-1x-1xf32) <- (-1x-1xf32, 2xi64) + unsqueeze_3 = paddle._C_ops.unsqueeze(split_2, full_int_array_6) + del split_2 + + # pd_op.expand: (1x1x-1x-1xf32) <- (1x1x-1x-1xf32, 4xi64) + expand_2 = paddle._C_ops.expand(unsqueeze_3, full_int_array_7) + del unsqueeze_3 + + # pd_op.expand: (1x1x-1x-1xf32) <- (1x1x-1x-1xf32, 4xi64) + expand_3 = paddle._C_ops.expand(unsqueeze_2, full_int_array_7) + del unsqueeze_2 + + # builtin.combine: ([1x1x-1x-1xf32, 1x1x-1x-1xf32]) <- (1x1x-1x-1xf32, 1x1x-1x-1xf32) + combine_5 = [expand_3, expand_2] + del expand_2, expand_3 + + # pd_op.concat: (1x2x-1x-1xf32) <- ([1x1x-1x-1xf32, 1x1x-1x-1xf32], 1xi32) + concat_2 = paddle._C_ops.concat(combine_5, full_2) + del combine_5 + + # builtin.combine: ([1x256x-1x-1xf32, 1x2x-1x-1xf32]) <- (1x256x-1x-1xf32, 1x2x-1x-1xf32) + combine_6 = [bilinear_interp_6, concat_2] + del bilinear_interp_6, concat_2 + + # pd_op.concat: (1x258x-1x-1xf32) <- ([1x256x-1x-1xf32, 1x2x-1x-1xf32], 1xi32) + concat_3 = paddle._C_ops.concat(combine_6, full_2) + del combine_6 + + # pd_op.bilinear_interp: (1x258x40x40xf32) <- (1x258x-1x-1xf32, None, None, None) + bilinear_interp_8 = paddle._C_ops.bilinear_interp( + concat_3, None, None, None, "NCHW", -1, 40, 40, [], "bilinear", False, 0 + ) + del concat_3 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_8 = [0] + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_9 = [-2] + + # pd_op.slice: (1x256x40x40xf32) <- (1x258x40x40xf32, 1xi64, 1xi64) + slice_4 = paddle._C_ops.slice( + bilinear_interp_8, [1], full_int_array_8, full_int_array_9, [1], [] + ) + + # pd_op.conv2d: (1x512x40x40xf32) <- (1x258x40x40xf32, 512x258x3x3xf32) + conv2d_69 = paddle._C_ops.conv2d( + bilinear_interp_8, + parameter_27, + [1, 1], + [1, 1], + "EXPLICIT", + [1, 1], + 1, + "NCHW", + ) + del bilinear_interp_8 + + # pd_op.group_norm: (1x512x40x40xf32, 1x32xf32, 1x32xf32) <- (1x512x40x40xf32, 512xf32, 512xf32) + group_norm_24, group_norm_25, group_norm_26 = (lambda x, f: f(x))( + paddle._C_ops.group_norm( + conv2d_69, parameter_26, parameter_25, float("1e-05"), 32, "NCHW" + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None, None), + ) + del conv2d_69 + + # pd_op.relu: (1x512x40x40xf32) <- (1x512x40x40xf32) + relu_57 = paddle._C_ops.relu(group_norm_24) + del group_norm_24 + + # pd_op.conv2d: (1x512x40x40xf32) <- (1x512x40x40xf32, 512x512x3x3xf32) + conv2d_70 = paddle._C_ops.conv2d( + relu_57, parameter_24, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del relu_57 + + # pd_op.group_norm: (1x512x40x40xf32, 1x32xf32, 1x32xf32) <- (1x512x40x40xf32, 512xf32, 512xf32) + group_norm_27, group_norm_28, group_norm_29 = (lambda x, f: f(x))( + paddle._C_ops.group_norm( + conv2d_70, parameter_23, parameter_22, float("1e-05"), 32, "NCHW" + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None, None), + ) + del conv2d_70 + + # pd_op.relu: (1x512x40x40xf32) <- (1x512x40x40xf32) + relu_58 = paddle._C_ops.relu(group_norm_27) + del group_norm_27 + + # pd_op.conv2d: (1x512x40x40xf32) <- (1x512x40x40xf32, 512x512x3x3xf32) + conv2d_71 = paddle._C_ops.conv2d( + relu_58, parameter_21, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del relu_58 + + # pd_op.group_norm: (1x512x40x40xf32, 1x32xf32, 1x32xf32) <- (1x512x40x40xf32, 512xf32, 512xf32) + group_norm_30, group_norm_31, group_norm_32 = (lambda x, f: f(x))( + paddle._C_ops.group_norm( + conv2d_71, parameter_20, parameter_19, float("1e-05"), 32, "NCHW" + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None, None), + ) + del conv2d_71 + + # pd_op.relu: (1x512x40x40xf32) <- (1x512x40x40xf32) + relu_59 = paddle._C_ops.relu(group_norm_30) + del group_norm_30 + + # pd_op.conv2d: (1x512x40x40xf32) <- (1x512x40x40xf32, 512x512x3x3xf32) + conv2d_72 = paddle._C_ops.conv2d( + relu_59, parameter_18, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del relu_59 + + # pd_op.group_norm: (1x512x40x40xf32, 1x32xf32, 1x32xf32) <- (1x512x40x40xf32, 512xf32, 512xf32) + group_norm_33, group_norm_34, group_norm_35 = (lambda x, f: f(x))( + paddle._C_ops.group_norm( + conv2d_72, parameter_17, parameter_16, float("1e-05"), 32, "NCHW" + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None, None), + ) + del conv2d_72 + + # pd_op.relu: (1x512x40x40xf32) <- (1x512x40x40xf32) + relu_60 = paddle._C_ops.relu(group_norm_33) + del group_norm_33 + + # pd_op.conv2d: (1x256x40x40xf32) <- (1x512x40x40xf32, 256x512x3x3xf32) + conv2d_73 = paddle._C_ops.conv2d( + relu_60, parameter_15, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del relu_60 + + # pd_op.reshape: (1x256x1x1xf32) <- (256xf32, 4xi64) + reshape_8 = paddle._C_ops.reshape(parameter_14, full_int_array_1) + del parameter_14 + + # pd_op.add: (1x256x40x40xf32) <- (1x256x40x40xf32, 1x256x1x1xf32) + add_0 = paddle._C_ops.add(conv2d_73, reshape_8) + del conv2d_73 + + # pd_op.conv2d: (1x512x40x40xf32) <- (1x256x40x40xf32, 512x256x3x3xf32) + conv2d_74 = paddle._C_ops.conv2d( + slice_4, parameter_13, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del slice_4 + + # pd_op.group_norm: (1x512x40x40xf32, 1x32xf32, 1x32xf32) <- (1x512x40x40xf32, 512xf32, 512xf32) + group_norm_36, group_norm_37, group_norm_38 = (lambda x, f: f(x))( + paddle._C_ops.group_norm( + conv2d_74, parameter_12, parameter_11, float("1e-05"), 32, "NCHW" + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None, None), + ) + del conv2d_74 + + # pd_op.relu: (1x512x40x40xf32) <- (1x512x40x40xf32) + relu_61 = paddle._C_ops.relu(group_norm_36) + del group_norm_36 + + # pd_op.conv2d: (1x512x40x40xf32) <- (1x512x40x40xf32, 512x512x3x3xf32) + conv2d_75 = paddle._C_ops.conv2d( + relu_61, parameter_10, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del relu_61 + + # pd_op.group_norm: (1x512x40x40xf32, 1x32xf32, 1x32xf32) <- (1x512x40x40xf32, 512xf32, 512xf32) + group_norm_39, group_norm_40, group_norm_41 = (lambda x, f: f(x))( + paddle._C_ops.group_norm( + conv2d_75, parameter_9, parameter_8, float("1e-05"), 32, "NCHW" + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None, None), + ) + del conv2d_75 + + # pd_op.relu: (1x512x40x40xf32) <- (1x512x40x40xf32) + relu_62 = paddle._C_ops.relu(group_norm_39) + del group_norm_39 + + # pd_op.conv2d: (1x512x40x40xf32) <- (1x512x40x40xf32, 512x512x3x3xf32) + conv2d_76 = paddle._C_ops.conv2d( + relu_62, parameter_7, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del relu_62 + + # pd_op.group_norm: (1x512x40x40xf32, 1x32xf32, 1x32xf32) <- (1x512x40x40xf32, 512xf32, 512xf32) + group_norm_42, group_norm_43, group_norm_44 = (lambda x, f: f(x))( + paddle._C_ops.group_norm( + conv2d_76, parameter_6, parameter_5, float("1e-05"), 32, "NCHW" + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None, None), + ) + del conv2d_76 + + # pd_op.relu: (1x512x40x40xf32) <- (1x512x40x40xf32) + relu_63 = paddle._C_ops.relu(group_norm_42) + del group_norm_42 + + # pd_op.conv2d: (1x512x40x40xf32) <- (1x512x40x40xf32, 512x512x3x3xf32) + conv2d_77 = paddle._C_ops.conv2d( + relu_63, parameter_4, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del relu_63 + + # pd_op.group_norm: (1x512x40x40xf32, 1x32xf32, 1x32xf32) <- (1x512x40x40xf32, 512xf32, 512xf32) + group_norm_45, group_norm_46, group_norm_47 = (lambda x, f: f(x))( + paddle._C_ops.group_norm( + conv2d_77, parameter_3, parameter_2, float("1e-05"), 32, "NCHW" + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None, None), + ) + del conv2d_77 + + # pd_op.relu: (1x512x40x40xf32) <- (1x512x40x40xf32) + relu_64 = paddle._C_ops.relu(group_norm_45) + del group_norm_45 + + # pd_op.conv2d: (1x2x40x40xf32) <- (1x512x40x40xf32, 2x512x3x3xf32) + conv2d_78 = paddle._C_ops.conv2d( + relu_64, parameter_1, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del relu_64 + + # pd_op.reshape: (1x2x1x1xf32) <- (2xf32, 4xi64) + reshape_9 = paddle._C_ops.reshape(parameter_0, full_int_array_1) + del full_int_array_1, parameter_0 + + # pd_op.add: (1x2x40x40xf32) <- (1x2x40x40xf32, 1x2x1x1xf32) + add_35 = paddle._C_ops.add(conv2d_78, reshape_9) + del conv2d_78 + + # pd_op.sigmoid: (1x2x40x40xf32) <- (1x2x40x40xf32) + sigmoid_0 = paddle._C_ops.sigmoid(add_35) + del add_35 + + # pd_op.full_int_array: (2xi64) <- () + full_int_array_10 = [2, 2] + + # pd_op.pool2d: (1x2x41x41xf32) <- (1x2x40x40xf32, 2xi64) + pool2d_2 = paddle._C_ops.pool2d( + sigmoid_0, + full_int_array_10, + [1, 1], + [1, 1], + False, + True, + "NCHW", + "max", + False, + False, + "EXPLICIT", + ) + + # pd_op.full_int_array: (2xi64) <- () + full_int_array_11 = [0, 0] + + # pd_op.full_int_array: (2xi64) <- () + full_int_array_12 = [-1, -1] + + # pd_op.slice: (1x2x40x40xf32) <- (1x2x41x41xf32, 2xi64, 2xi64) + slice_5 = paddle._C_ops.slice( + pool2d_2, [2, 3], full_int_array_11, full_int_array_12, [1, 1], [] + ) + del pool2d_2 + + # pd_op.equal: (1x2x40x40xb) <- (1x2x40x40xf32, 1x2x40x40xf32) + equal_0 = paddle._C_ops.equal(slice_5, sigmoid_0) + del slice_5 + + # pd_op.cast: (1x2x40x40xf32) <- (1x2x40x40xb) + cast_0 = paddle._C_ops.cast(equal_0, paddle.float32) + del equal_0 + + # pd_op.multiply: (1x2x40x40xf32) <- (1x2x40x40xf32, 1x2x40x40xf32) + multiply_0 = paddle._C_ops.multiply(sigmoid_0, cast_0) + del cast_0, sigmoid_0 + + # pd_op.transpose: (1x40x40x2xf32) <- (1x2x40x40xf32) + transpose_0 = paddle._C_ops.transpose(multiply_0, [0, 2, 3, 1]) + del multiply_0 + + # pd_op.shape64: (4xi64) <- (1x256x-1x-1xf32) + shape64_4 = paddle._C_ops.shape64(add_29) + + # pd_op.slice: (xi64) <- (4xi64, 1xi64, 1xi64) + slice_6 = paddle._C_ops.slice( + shape64_4, [0], full_int_array_3, full_int_array_4, [1], [0] + ) + del shape64_4 + + # pd_op.shape64: (4xi64) <- (1x256x-1x-1xf32) + shape64_5 = paddle._C_ops.shape64(add_29) + + # pd_op.slice: (xi64) <- (4xi64, 1xi64, 1xi64) + slice_7 = paddle._C_ops.slice( + shape64_5, [0], full_int_array_4, full_int_array_5, [1], [0] + ) + del shape64_5 + + # pd_op.linspace: (-1xf32) <- (1xf32, 1xf32, xi64) + linspace_4 = paddle._C_ops.linspace( + full_0, + full_1, + slice_7, + paddle.float32, + paddle.framework._current_expected_place(), + ) + del slice_7 + + # pd_op.linspace: (-1xf32) <- (1xf32, 1xf32, xi64) + linspace_5 = paddle._C_ops.linspace( + full_0, + full_1, + slice_6, + paddle.float32, + paddle.framework._current_expected_place(), + ) + del slice_6 + + # builtin.combine: ([-1xf32, -1xf32]) <- (-1xf32, -1xf32) + combine_7 = [linspace_5, linspace_4] + del linspace_4, linspace_5 + + # pd_op.meshgrid: ([-1x-1xf32, -1x-1xf32]) <- ([-1xf32, -1xf32]) + meshgrid_2 = paddle._C_ops.meshgrid(combine_7) + del combine_7 + + # builtin.split: (-1x-1xf32, -1x-1xf32) <- ([-1x-1xf32, -1x-1xf32]) + ( + split_4, + split_5, + ) = meshgrid_2 + del meshgrid_2 + + # pd_op.unsqueeze: (1x1x-1x-1xf32) <- (-1x-1xf32, 2xi64) + unsqueeze_4 = paddle._C_ops.unsqueeze(split_5, full_int_array_6) + del split_5 + + # pd_op.unsqueeze: (1x1x-1x-1xf32) <- (-1x-1xf32, 2xi64) + unsqueeze_5 = paddle._C_ops.unsqueeze(split_4, full_int_array_6) + del split_4 + + # pd_op.expand: (1x1x-1x-1xf32) <- (1x1x-1x-1xf32, 4xi64) + expand_4 = paddle._C_ops.expand(unsqueeze_5, full_int_array_7) + del unsqueeze_5 + + # pd_op.expand: (1x1x-1x-1xf32) <- (1x1x-1x-1xf32, 4xi64) + expand_5 = paddle._C_ops.expand(unsqueeze_4, full_int_array_7) + del unsqueeze_4 + + # builtin.combine: ([1x1x-1x-1xf32, 1x1x-1x-1xf32]) <- (1x1x-1x-1xf32, 1x1x-1x-1xf32) + combine_8 = [expand_5, expand_4] + del expand_4, expand_5 + + # pd_op.concat: (1x2x-1x-1xf32) <- ([1x1x-1x-1xf32, 1x1x-1x-1xf32], 1xi32) + concat_4 = paddle._C_ops.concat(combine_8, full_2) + del combine_8 + + # builtin.combine: ([1x256x-1x-1xf32, 1x2x-1x-1xf32]) <- (1x256x-1x-1xf32, 1x2x-1x-1xf32) + combine_9 = [add_29, concat_4] + del add_29, concat_4 + + # pd_op.concat: (1x258x-1x-1xf32) <- ([1x256x-1x-1xf32, 1x2x-1x-1xf32], 1xi32) + concat_5 = paddle._C_ops.concat(combine_9, full_2) + del combine_9 + + # pd_op.bilinear_interp: (1x258x36x36xf32) <- (1x258x-1x-1xf32, None, None, None) + bilinear_interp_9 = paddle._C_ops.bilinear_interp( + concat_5, None, None, None, "NCHW", -1, 36, 36, [], "bilinear", False, 0 + ) + del concat_5 + + # pd_op.slice: (1x256x36x36xf32) <- (1x258x36x36xf32, 1xi64, 1xi64) + slice_8 = paddle._C_ops.slice( + bilinear_interp_9, [1], full_int_array_8, full_int_array_9, [1], [] + ) + + # pd_op.conv2d: (1x512x36x36xf32) <- (1x258x36x36xf32, 512x258x3x3xf32) + conv2d_79 = paddle._C_ops.conv2d( + bilinear_interp_9, + parameter_27, + [1, 1], + [1, 1], + "EXPLICIT", + [1, 1], + 1, + "NCHW", + ) + del bilinear_interp_9 + + # pd_op.group_norm: (1x512x36x36xf32, 1x32xf32, 1x32xf32) <- (1x512x36x36xf32, 512xf32, 512xf32) + group_norm_48, group_norm_49, group_norm_50 = (lambda x, f: f(x))( + paddle._C_ops.group_norm( + conv2d_79, parameter_26, parameter_25, float("1e-05"), 32, "NCHW" + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None, None), + ) + del conv2d_79 + + # pd_op.relu: (1x512x36x36xf32) <- (1x512x36x36xf32) + relu_65 = paddle._C_ops.relu(group_norm_48) + del group_norm_48 + + # pd_op.conv2d: (1x512x36x36xf32) <- (1x512x36x36xf32, 512x512x3x3xf32) + conv2d_80 = paddle._C_ops.conv2d( + relu_65, parameter_24, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del relu_65 + + # pd_op.group_norm: (1x512x36x36xf32, 1x32xf32, 1x32xf32) <- (1x512x36x36xf32, 512xf32, 512xf32) + group_norm_51, group_norm_52, group_norm_53 = (lambda x, f: f(x))( + paddle._C_ops.group_norm( + conv2d_80, parameter_23, parameter_22, float("1e-05"), 32, "NCHW" + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None, None), + ) + del conv2d_80 + + # pd_op.relu: (1x512x36x36xf32) <- (1x512x36x36xf32) + relu_66 = paddle._C_ops.relu(group_norm_51) + del group_norm_51 + + # pd_op.conv2d: (1x512x36x36xf32) <- (1x512x36x36xf32, 512x512x3x3xf32) + conv2d_81 = paddle._C_ops.conv2d( + relu_66, parameter_21, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del relu_66 + + # pd_op.group_norm: (1x512x36x36xf32, 1x32xf32, 1x32xf32) <- (1x512x36x36xf32, 512xf32, 512xf32) + group_norm_54, group_norm_55, group_norm_56 = (lambda x, f: f(x))( + paddle._C_ops.group_norm( + conv2d_81, parameter_20, parameter_19, float("1e-05"), 32, "NCHW" + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None, None), + ) + del conv2d_81 + + # pd_op.relu: (1x512x36x36xf32) <- (1x512x36x36xf32) + relu_67 = paddle._C_ops.relu(group_norm_54) + del group_norm_54 + + # pd_op.conv2d: (1x512x36x36xf32) <- (1x512x36x36xf32, 512x512x3x3xf32) + conv2d_82 = paddle._C_ops.conv2d( + relu_67, parameter_18, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del relu_67 + + # pd_op.group_norm: (1x512x36x36xf32, 1x32xf32, 1x32xf32) <- (1x512x36x36xf32, 512xf32, 512xf32) + group_norm_57, group_norm_58, group_norm_59 = (lambda x, f: f(x))( + paddle._C_ops.group_norm( + conv2d_82, parameter_17, parameter_16, float("1e-05"), 32, "NCHW" + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None, None), + ) + del conv2d_82 + + # pd_op.relu: (1x512x36x36xf32) <- (1x512x36x36xf32) + relu_68 = paddle._C_ops.relu(group_norm_57) + del group_norm_57 + + # pd_op.conv2d: (1x256x36x36xf32) <- (1x512x36x36xf32, 256x512x3x3xf32) + conv2d_83 = paddle._C_ops.conv2d( + relu_68, parameter_15, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del relu_68 + + # pd_op.add: (1x256x36x36xf32) <- (1x256x36x36xf32, 1x256x1x1xf32) + add_1 = paddle._C_ops.add(conv2d_83, reshape_8) + del conv2d_83 + + # pd_op.conv2d: (1x512x36x36xf32) <- (1x256x36x36xf32, 512x256x3x3xf32) + conv2d_84 = paddle._C_ops.conv2d( + slice_8, parameter_13, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del slice_8 + + # pd_op.group_norm: (1x512x36x36xf32, 1x32xf32, 1x32xf32) <- (1x512x36x36xf32, 512xf32, 512xf32) + group_norm_60, group_norm_61, group_norm_62 = (lambda x, f: f(x))( + paddle._C_ops.group_norm( + conv2d_84, parameter_12, parameter_11, float("1e-05"), 32, "NCHW" + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None, None), + ) + del conv2d_84 + + # pd_op.relu: (1x512x36x36xf32) <- (1x512x36x36xf32) + relu_69 = paddle._C_ops.relu(group_norm_60) + del group_norm_60 + + # pd_op.conv2d: (1x512x36x36xf32) <- (1x512x36x36xf32, 512x512x3x3xf32) + conv2d_85 = paddle._C_ops.conv2d( + relu_69, parameter_10, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del relu_69 + + # pd_op.group_norm: (1x512x36x36xf32, 1x32xf32, 1x32xf32) <- (1x512x36x36xf32, 512xf32, 512xf32) + group_norm_63, group_norm_64, group_norm_65 = (lambda x, f: f(x))( + paddle._C_ops.group_norm( + conv2d_85, parameter_9, parameter_8, float("1e-05"), 32, "NCHW" + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None, None), + ) + del conv2d_85 + + # pd_op.relu: (1x512x36x36xf32) <- (1x512x36x36xf32) + relu_70 = paddle._C_ops.relu(group_norm_63) + del group_norm_63 + + # pd_op.conv2d: (1x512x36x36xf32) <- (1x512x36x36xf32, 512x512x3x3xf32) + conv2d_86 = paddle._C_ops.conv2d( + relu_70, parameter_7, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del relu_70 + + # pd_op.group_norm: (1x512x36x36xf32, 1x32xf32, 1x32xf32) <- (1x512x36x36xf32, 512xf32, 512xf32) + group_norm_66, group_norm_67, group_norm_68 = (lambda x, f: f(x))( + paddle._C_ops.group_norm( + conv2d_86, parameter_6, parameter_5, float("1e-05"), 32, "NCHW" + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None, None), + ) + del conv2d_86 + + # pd_op.relu: (1x512x36x36xf32) <- (1x512x36x36xf32) + relu_71 = paddle._C_ops.relu(group_norm_66) + del group_norm_66 + + # pd_op.conv2d: (1x512x36x36xf32) <- (1x512x36x36xf32, 512x512x3x3xf32) + conv2d_87 = paddle._C_ops.conv2d( + relu_71, parameter_4, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del relu_71 + + # pd_op.group_norm: (1x512x36x36xf32, 1x32xf32, 1x32xf32) <- (1x512x36x36xf32, 512xf32, 512xf32) + group_norm_69, group_norm_70, group_norm_71 = (lambda x, f: f(x))( + paddle._C_ops.group_norm( + conv2d_87, parameter_3, parameter_2, float("1e-05"), 32, "NCHW" + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None, None), + ) + del conv2d_87 + + # pd_op.relu: (1x512x36x36xf32) <- (1x512x36x36xf32) + relu_72 = paddle._C_ops.relu(group_norm_69) + del group_norm_69 + + # pd_op.conv2d: (1x2x36x36xf32) <- (1x512x36x36xf32, 2x512x3x3xf32) + conv2d_88 = paddle._C_ops.conv2d( + relu_72, parameter_1, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del relu_72 + + # pd_op.add: (1x2x36x36xf32) <- (1x2x36x36xf32, 1x2x1x1xf32) + add_36 = paddle._C_ops.add(conv2d_88, reshape_9) + del conv2d_88 + + # pd_op.sigmoid: (1x2x36x36xf32) <- (1x2x36x36xf32) + sigmoid_1 = paddle._C_ops.sigmoid(add_36) + del add_36 + + # pd_op.pool2d: (1x2x37x37xf32) <- (1x2x36x36xf32, 2xi64) + pool2d_3 = paddle._C_ops.pool2d( + sigmoid_1, + full_int_array_10, + [1, 1], + [1, 1], + False, + True, + "NCHW", + "max", + False, + False, + "EXPLICIT", + ) + + # pd_op.slice: (1x2x36x36xf32) <- (1x2x37x37xf32, 2xi64, 2xi64) + slice_9 = paddle._C_ops.slice( + pool2d_3, [2, 3], full_int_array_11, full_int_array_12, [1, 1], [] + ) + del pool2d_3 + + # pd_op.equal: (1x2x36x36xb) <- (1x2x36x36xf32, 1x2x36x36xf32) + equal_1 = paddle._C_ops.equal(slice_9, sigmoid_1) + del slice_9 + + # pd_op.cast: (1x2x36x36xf32) <- (1x2x36x36xb) + cast_1 = paddle._C_ops.cast(equal_1, paddle.float32) + del equal_1 + + # pd_op.multiply: (1x2x36x36xf32) <- (1x2x36x36xf32, 1x2x36x36xf32) + multiply_1 = paddle._C_ops.multiply(sigmoid_1, cast_1) + del cast_1, sigmoid_1 + + # pd_op.transpose: (1x36x36x2xf32) <- (1x2x36x36xf32) + transpose_1 = paddle._C_ops.transpose(multiply_1, [0, 2, 3, 1]) + del multiply_1 + + # pd_op.shape64: (4xi64) <- (1x256x-1x-1xf32) + shape64_6 = paddle._C_ops.shape64(add_30) + + # pd_op.slice: (xi64) <- (4xi64, 1xi64, 1xi64) + slice_10 = paddle._C_ops.slice( + shape64_6, [0], full_int_array_3, full_int_array_4, [1], [0] + ) + del shape64_6 + + # pd_op.shape64: (4xi64) <- (1x256x-1x-1xf32) + shape64_7 = paddle._C_ops.shape64(add_30) + + # pd_op.slice: (xi64) <- (4xi64, 1xi64, 1xi64) + slice_11 = paddle._C_ops.slice( + shape64_7, [0], full_int_array_4, full_int_array_5, [1], [0] + ) + del shape64_7 + + # pd_op.linspace: (-1xf32) <- (1xf32, 1xf32, xi64) + linspace_6 = paddle._C_ops.linspace( + full_0, + full_1, + slice_11, + paddle.float32, + paddle.framework._current_expected_place(), + ) + del slice_11 + + # pd_op.linspace: (-1xf32) <- (1xf32, 1xf32, xi64) + linspace_7 = paddle._C_ops.linspace( + full_0, + full_1, + slice_10, + paddle.float32, + paddle.framework._current_expected_place(), + ) + del slice_10 + + # builtin.combine: ([-1xf32, -1xf32]) <- (-1xf32, -1xf32) + combine_10 = [linspace_7, linspace_6] + del linspace_6, linspace_7 + + # pd_op.meshgrid: ([-1x-1xf32, -1x-1xf32]) <- ([-1xf32, -1xf32]) + meshgrid_3 = paddle._C_ops.meshgrid(combine_10) + del combine_10 + + # builtin.split: (-1x-1xf32, -1x-1xf32) <- ([-1x-1xf32, -1x-1xf32]) + ( + split_6, + split_7, + ) = meshgrid_3 + del meshgrid_3 + + # pd_op.unsqueeze: (1x1x-1x-1xf32) <- (-1x-1xf32, 2xi64) + unsqueeze_6 = paddle._C_ops.unsqueeze(split_7, full_int_array_6) + del split_7 + + # pd_op.unsqueeze: (1x1x-1x-1xf32) <- (-1x-1xf32, 2xi64) + unsqueeze_7 = paddle._C_ops.unsqueeze(split_6, full_int_array_6) + del split_6 + + # pd_op.expand: (1x1x-1x-1xf32) <- (1x1x-1x-1xf32, 4xi64) + expand_6 = paddle._C_ops.expand(unsqueeze_7, full_int_array_7) + del unsqueeze_7 + + # pd_op.expand: (1x1x-1x-1xf32) <- (1x1x-1x-1xf32, 4xi64) + expand_7 = paddle._C_ops.expand(unsqueeze_6, full_int_array_7) + del unsqueeze_6 + + # builtin.combine: ([1x1x-1x-1xf32, 1x1x-1x-1xf32]) <- (1x1x-1x-1xf32, 1x1x-1x-1xf32) + combine_11 = [expand_7, expand_6] + del expand_6, expand_7 + + # pd_op.concat: (1x2x-1x-1xf32) <- ([1x1x-1x-1xf32, 1x1x-1x-1xf32], 1xi32) + concat_6 = paddle._C_ops.concat(combine_11, full_2) + del combine_11 + + # builtin.combine: ([1x256x-1x-1xf32, 1x2x-1x-1xf32]) <- (1x256x-1x-1xf32, 1x2x-1x-1xf32) + combine_12 = [add_30, concat_6] + del add_30, concat_6 + + # pd_op.concat: (1x258x-1x-1xf32) <- ([1x256x-1x-1xf32, 1x2x-1x-1xf32], 1xi32) + concat_7 = paddle._C_ops.concat(combine_12, full_2) + del combine_12 + + # pd_op.bilinear_interp: (1x258x24x24xf32) <- (1x258x-1x-1xf32, None, None, None) + bilinear_interp_10 = paddle._C_ops.bilinear_interp( + concat_7, None, None, None, "NCHW", -1, 24, 24, [], "bilinear", False, 0 + ) + del concat_7 + + # pd_op.slice: (1x256x24x24xf32) <- (1x258x24x24xf32, 1xi64, 1xi64) + slice_12 = paddle._C_ops.slice( + bilinear_interp_10, [1], full_int_array_8, full_int_array_9, [1], [] + ) + + # pd_op.conv2d: (1x512x24x24xf32) <- (1x258x24x24xf32, 512x258x3x3xf32) + conv2d_89 = paddle._C_ops.conv2d( + bilinear_interp_10, + parameter_27, + [1, 1], + [1, 1], + "EXPLICIT", + [1, 1], + 1, + "NCHW", + ) + del bilinear_interp_10 + + # pd_op.group_norm: (1x512x24x24xf32, 1x32xf32, 1x32xf32) <- (1x512x24x24xf32, 512xf32, 512xf32) + group_norm_72, group_norm_73, group_norm_74 = (lambda x, f: f(x))( + paddle._C_ops.group_norm( + conv2d_89, parameter_26, parameter_25, float("1e-05"), 32, "NCHW" + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None, None), + ) + del conv2d_89 + + # pd_op.relu: (1x512x24x24xf32) <- (1x512x24x24xf32) + relu_73 = paddle._C_ops.relu(group_norm_72) + del group_norm_72 + + # pd_op.conv2d: (1x512x24x24xf32) <- (1x512x24x24xf32, 512x512x3x3xf32) + conv2d_90 = paddle._C_ops.conv2d( + relu_73, parameter_24, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del relu_73 + + # pd_op.group_norm: (1x512x24x24xf32, 1x32xf32, 1x32xf32) <- (1x512x24x24xf32, 512xf32, 512xf32) + group_norm_75, group_norm_76, group_norm_77 = (lambda x, f: f(x))( + paddle._C_ops.group_norm( + conv2d_90, parameter_23, parameter_22, float("1e-05"), 32, "NCHW" + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None, None), + ) + del conv2d_90 + + # pd_op.relu: (1x512x24x24xf32) <- (1x512x24x24xf32) + relu_74 = paddle._C_ops.relu(group_norm_75) + del group_norm_75 + + # pd_op.conv2d: (1x512x24x24xf32) <- (1x512x24x24xf32, 512x512x3x3xf32) + conv2d_91 = paddle._C_ops.conv2d( + relu_74, parameter_21, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del relu_74 + + # pd_op.group_norm: (1x512x24x24xf32, 1x32xf32, 1x32xf32) <- (1x512x24x24xf32, 512xf32, 512xf32) + group_norm_78, group_norm_79, group_norm_80 = (lambda x, f: f(x))( + paddle._C_ops.group_norm( + conv2d_91, parameter_20, parameter_19, float("1e-05"), 32, "NCHW" + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None, None), + ) + del conv2d_91 + + # pd_op.relu: (1x512x24x24xf32) <- (1x512x24x24xf32) + relu_75 = paddle._C_ops.relu(group_norm_78) + del group_norm_78 + + # pd_op.conv2d: (1x512x24x24xf32) <- (1x512x24x24xf32, 512x512x3x3xf32) + conv2d_92 = paddle._C_ops.conv2d( + relu_75, parameter_18, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del relu_75 + + # pd_op.group_norm: (1x512x24x24xf32, 1x32xf32, 1x32xf32) <- (1x512x24x24xf32, 512xf32, 512xf32) + group_norm_81, group_norm_82, group_norm_83 = (lambda x, f: f(x))( + paddle._C_ops.group_norm( + conv2d_92, parameter_17, parameter_16, float("1e-05"), 32, "NCHW" + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None, None), + ) + del conv2d_92 + + # pd_op.relu: (1x512x24x24xf32) <- (1x512x24x24xf32) + relu_76 = paddle._C_ops.relu(group_norm_81) + del group_norm_81 + + # pd_op.conv2d: (1x256x24x24xf32) <- (1x512x24x24xf32, 256x512x3x3xf32) + conv2d_93 = paddle._C_ops.conv2d( + relu_76, parameter_15, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del relu_76 + + # pd_op.add: (1x256x24x24xf32) <- (1x256x24x24xf32, 1x256x1x1xf32) + add_2 = paddle._C_ops.add(conv2d_93, reshape_8) + del conv2d_93 + + # pd_op.conv2d: (1x512x24x24xf32) <- (1x256x24x24xf32, 512x256x3x3xf32) + conv2d_94 = paddle._C_ops.conv2d( + slice_12, parameter_13, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del slice_12 + + # pd_op.group_norm: (1x512x24x24xf32, 1x32xf32, 1x32xf32) <- (1x512x24x24xf32, 512xf32, 512xf32) + group_norm_84, group_norm_85, group_norm_86 = (lambda x, f: f(x))( + paddle._C_ops.group_norm( + conv2d_94, parameter_12, parameter_11, float("1e-05"), 32, "NCHW" + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None, None), + ) + del conv2d_94 + + # pd_op.relu: (1x512x24x24xf32) <- (1x512x24x24xf32) + relu_77 = paddle._C_ops.relu(group_norm_84) + del group_norm_84 + + # pd_op.conv2d: (1x512x24x24xf32) <- (1x512x24x24xf32, 512x512x3x3xf32) + conv2d_95 = paddle._C_ops.conv2d( + relu_77, parameter_10, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del relu_77 + + # pd_op.group_norm: (1x512x24x24xf32, 1x32xf32, 1x32xf32) <- (1x512x24x24xf32, 512xf32, 512xf32) + group_norm_87, group_norm_88, group_norm_89 = (lambda x, f: f(x))( + paddle._C_ops.group_norm( + conv2d_95, parameter_9, parameter_8, float("1e-05"), 32, "NCHW" + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None, None), + ) + del conv2d_95 + + # pd_op.relu: (1x512x24x24xf32) <- (1x512x24x24xf32) + relu_78 = paddle._C_ops.relu(group_norm_87) + del group_norm_87 + + # pd_op.conv2d: (1x512x24x24xf32) <- (1x512x24x24xf32, 512x512x3x3xf32) + conv2d_96 = paddle._C_ops.conv2d( + relu_78, parameter_7, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del relu_78 + + # pd_op.group_norm: (1x512x24x24xf32, 1x32xf32, 1x32xf32) <- (1x512x24x24xf32, 512xf32, 512xf32) + group_norm_90, group_norm_91, group_norm_92 = (lambda x, f: f(x))( + paddle._C_ops.group_norm( + conv2d_96, parameter_6, parameter_5, float("1e-05"), 32, "NCHW" + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None, None), + ) + del conv2d_96 + + # pd_op.relu: (1x512x24x24xf32) <- (1x512x24x24xf32) + relu_79 = paddle._C_ops.relu(group_norm_90) + del group_norm_90 + + # pd_op.conv2d: (1x512x24x24xf32) <- (1x512x24x24xf32, 512x512x3x3xf32) + conv2d_97 = paddle._C_ops.conv2d( + relu_79, parameter_4, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del relu_79 + + # pd_op.group_norm: (1x512x24x24xf32, 1x32xf32, 1x32xf32) <- (1x512x24x24xf32, 512xf32, 512xf32) + group_norm_93, group_norm_94, group_norm_95 = (lambda x, f: f(x))( + paddle._C_ops.group_norm( + conv2d_97, parameter_3, parameter_2, float("1e-05"), 32, "NCHW" + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None, None), + ) + del conv2d_97 + + # pd_op.relu: (1x512x24x24xf32) <- (1x512x24x24xf32) + relu_80 = paddle._C_ops.relu(group_norm_93) + del group_norm_93 + + # pd_op.conv2d: (1x2x24x24xf32) <- (1x512x24x24xf32, 2x512x3x3xf32) + conv2d_98 = paddle._C_ops.conv2d( + relu_80, parameter_1, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del relu_80 + + # pd_op.add: (1x2x24x24xf32) <- (1x2x24x24xf32, 1x2x1x1xf32) + add_37 = paddle._C_ops.add(conv2d_98, reshape_9) + del conv2d_98 + + # pd_op.sigmoid: (1x2x24x24xf32) <- (1x2x24x24xf32) + sigmoid_2 = paddle._C_ops.sigmoid(add_37) + del add_37 + + # pd_op.pool2d: (1x2x25x25xf32) <- (1x2x24x24xf32, 2xi64) + pool2d_4 = paddle._C_ops.pool2d( + sigmoid_2, + full_int_array_10, + [1, 1], + [1, 1], + False, + True, + "NCHW", + "max", + False, + False, + "EXPLICIT", + ) + + # pd_op.slice: (1x2x24x24xf32) <- (1x2x25x25xf32, 2xi64, 2xi64) + slice_13 = paddle._C_ops.slice( + pool2d_4, [2, 3], full_int_array_11, full_int_array_12, [1, 1], [] + ) + del pool2d_4 + + # pd_op.equal: (1x2x24x24xb) <- (1x2x24x24xf32, 1x2x24x24xf32) + equal_2 = paddle._C_ops.equal(slice_13, sigmoid_2) + del slice_13 + + # pd_op.cast: (1x2x24x24xf32) <- (1x2x24x24xb) + cast_2 = paddle._C_ops.cast(equal_2, paddle.float32) + del equal_2 + + # pd_op.multiply: (1x2x24x24xf32) <- (1x2x24x24xf32, 1x2x24x24xf32) + multiply_2 = paddle._C_ops.multiply(sigmoid_2, cast_2) + del cast_2, sigmoid_2 + + # pd_op.transpose: (1x24x24x2xf32) <- (1x2x24x24xf32) + transpose_2 = paddle._C_ops.transpose(multiply_2, [0, 2, 3, 1]) + del multiply_2 + + # pd_op.linspace: (-1xf32) <- (1xf32, 1xf32, xi64) + linspace_8 = paddle._C_ops.linspace( + full_0, + full_1, + slice_1, + paddle.float32, + paddle.framework._current_expected_place(), + ) + del slice_1 + + # pd_op.linspace: (-1xf32) <- (1xf32, 1xf32, xi64) + linspace_9 = paddle._C_ops.linspace( + full_0, + full_1, + slice_0, + paddle.float32, + paddle.framework._current_expected_place(), + ) + del slice_0 + + # builtin.combine: ([-1xf32, -1xf32]) <- (-1xf32, -1xf32) + combine_13 = [linspace_9, linspace_8] + del linspace_8, linspace_9 + + # pd_op.meshgrid: ([-1x-1xf32, -1x-1xf32]) <- ([-1xf32, -1xf32]) + meshgrid_4 = paddle._C_ops.meshgrid(combine_13) + del combine_13 + + # builtin.split: (-1x-1xf32, -1x-1xf32) <- ([-1x-1xf32, -1x-1xf32]) + ( + split_8, + split_9, + ) = meshgrid_4 + del meshgrid_4 + + # pd_op.unsqueeze: (1x1x-1x-1xf32) <- (-1x-1xf32, 2xi64) + unsqueeze_8 = paddle._C_ops.unsqueeze(split_9, full_int_array_6) + del split_9 + + # pd_op.unsqueeze: (1x1x-1x-1xf32) <- (-1x-1xf32, 2xi64) + unsqueeze_9 = paddle._C_ops.unsqueeze(split_8, full_int_array_6) + del split_8 + + # pd_op.expand: (1x1x-1x-1xf32) <- (1x1x-1x-1xf32, 4xi64) + expand_8 = paddle._C_ops.expand(unsqueeze_9, full_int_array_7) + del unsqueeze_9 + + # pd_op.expand: (1x1x-1x-1xf32) <- (1x1x-1x-1xf32, 4xi64) + expand_9 = paddle._C_ops.expand(unsqueeze_8, full_int_array_7) + del unsqueeze_8 + + # builtin.combine: ([1x1x-1x-1xf32, 1x1x-1x-1xf32]) <- (1x1x-1x-1xf32, 1x1x-1x-1xf32) + combine_14 = [expand_9, expand_8] + del expand_8, expand_9 + + # pd_op.concat: (1x2x-1x-1xf32) <- ([1x1x-1x-1xf32, 1x1x-1x-1xf32], 1xi32) + concat_8 = paddle._C_ops.concat(combine_14, full_2) + del combine_14 + + # builtin.combine: ([1x256x-1x-1xf32, 1x2x-1x-1xf32]) <- (1x256x-1x-1xf32, 1x2x-1x-1xf32) + combine_15 = [add_31, concat_8] + del add_31, concat_8 + + # pd_op.concat: (1x258x-1x-1xf32) <- ([1x256x-1x-1xf32, 1x2x-1x-1xf32], 1xi32) + concat_9 = paddle._C_ops.concat(combine_15, full_2) + del combine_15 + + # pd_op.bilinear_interp: (1x258x16x16xf32) <- (1x258x-1x-1xf32, None, None, None) + bilinear_interp_11 = paddle._C_ops.bilinear_interp( + concat_9, None, None, None, "NCHW", -1, 16, 16, [], "bilinear", False, 0 + ) + del concat_9 + + # pd_op.slice: (1x256x16x16xf32) <- (1x258x16x16xf32, 1xi64, 1xi64) + slice_14 = paddle._C_ops.slice( + bilinear_interp_11, [1], full_int_array_8, full_int_array_9, [1], [] + ) + + # pd_op.conv2d: (1x512x16x16xf32) <- (1x258x16x16xf32, 512x258x3x3xf32) + conv2d_99 = paddle._C_ops.conv2d( + bilinear_interp_11, + parameter_27, + [1, 1], + [1, 1], + "EXPLICIT", + [1, 1], + 1, + "NCHW", + ) + del bilinear_interp_11 + + # pd_op.group_norm: (1x512x16x16xf32, 1x32xf32, 1x32xf32) <- (1x512x16x16xf32, 512xf32, 512xf32) + group_norm_96, group_norm_97, group_norm_98 = (lambda x, f: f(x))( + paddle._C_ops.group_norm( + conv2d_99, parameter_26, parameter_25, float("1e-05"), 32, "NCHW" + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None, None), + ) + del conv2d_99 + + # pd_op.relu: (1x512x16x16xf32) <- (1x512x16x16xf32) + relu_81 = paddle._C_ops.relu(group_norm_96) + del group_norm_96 + + # pd_op.conv2d: (1x512x16x16xf32) <- (1x512x16x16xf32, 512x512x3x3xf32) + conv2d_100 = paddle._C_ops.conv2d( + relu_81, parameter_24, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del relu_81 + + # pd_op.group_norm: (1x512x16x16xf32, 1x32xf32, 1x32xf32) <- (1x512x16x16xf32, 512xf32, 512xf32) + group_norm_99, group_norm_100, group_norm_101 = (lambda x, f: f(x))( + paddle._C_ops.group_norm( + conv2d_100, parameter_23, parameter_22, float("1e-05"), 32, "NCHW" + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None, None), + ) + del conv2d_100 + + # pd_op.relu: (1x512x16x16xf32) <- (1x512x16x16xf32) + relu_82 = paddle._C_ops.relu(group_norm_99) + del group_norm_99 + + # pd_op.conv2d: (1x512x16x16xf32) <- (1x512x16x16xf32, 512x512x3x3xf32) + conv2d_101 = paddle._C_ops.conv2d( + relu_82, parameter_21, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del relu_82 + + # pd_op.group_norm: (1x512x16x16xf32, 1x32xf32, 1x32xf32) <- (1x512x16x16xf32, 512xf32, 512xf32) + group_norm_102, group_norm_103, group_norm_104 = (lambda x, f: f(x))( + paddle._C_ops.group_norm( + conv2d_101, parameter_20, parameter_19, float("1e-05"), 32, "NCHW" + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None, None), + ) + del conv2d_101 + + # pd_op.relu: (1x512x16x16xf32) <- (1x512x16x16xf32) + relu_83 = paddle._C_ops.relu(group_norm_102) + del group_norm_102 + + # pd_op.conv2d: (1x512x16x16xf32) <- (1x512x16x16xf32, 512x512x3x3xf32) + conv2d_102 = paddle._C_ops.conv2d( + relu_83, parameter_18, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del relu_83 + + # pd_op.group_norm: (1x512x16x16xf32, 1x32xf32, 1x32xf32) <- (1x512x16x16xf32, 512xf32, 512xf32) + group_norm_105, group_norm_106, group_norm_107 = (lambda x, f: f(x))( + paddle._C_ops.group_norm( + conv2d_102, parameter_17, parameter_16, float("1e-05"), 32, "NCHW" + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None, None), + ) + del conv2d_102 + + # pd_op.relu: (1x512x16x16xf32) <- (1x512x16x16xf32) + relu_84 = paddle._C_ops.relu(group_norm_105) + del group_norm_105 + + # pd_op.conv2d: (1x256x16x16xf32) <- (1x512x16x16xf32, 256x512x3x3xf32) + conv2d_103 = paddle._C_ops.conv2d( + relu_84, parameter_15, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del relu_84 + + # pd_op.add: (1x256x16x16xf32) <- (1x256x16x16xf32, 1x256x1x1xf32) + add_3 = paddle._C_ops.add(conv2d_103, reshape_8) + del conv2d_103 + + # pd_op.conv2d: (1x512x16x16xf32) <- (1x256x16x16xf32, 512x256x3x3xf32) + conv2d_104 = paddle._C_ops.conv2d( + slice_14, parameter_13, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del slice_14 + + # pd_op.group_norm: (1x512x16x16xf32, 1x32xf32, 1x32xf32) <- (1x512x16x16xf32, 512xf32, 512xf32) + group_norm_108, group_norm_109, group_norm_110 = (lambda x, f: f(x))( + paddle._C_ops.group_norm( + conv2d_104, parameter_12, parameter_11, float("1e-05"), 32, "NCHW" + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None, None), + ) + del conv2d_104 + + # pd_op.relu: (1x512x16x16xf32) <- (1x512x16x16xf32) + relu_85 = paddle._C_ops.relu(group_norm_108) + del group_norm_108 + + # pd_op.conv2d: (1x512x16x16xf32) <- (1x512x16x16xf32, 512x512x3x3xf32) + conv2d_105 = paddle._C_ops.conv2d( + relu_85, parameter_10, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del relu_85 + + # pd_op.group_norm: (1x512x16x16xf32, 1x32xf32, 1x32xf32) <- (1x512x16x16xf32, 512xf32, 512xf32) + group_norm_111, group_norm_112, group_norm_113 = (lambda x, f: f(x))( + paddle._C_ops.group_norm( + conv2d_105, parameter_9, parameter_8, float("1e-05"), 32, "NCHW" + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None, None), + ) + del conv2d_105 + + # pd_op.relu: (1x512x16x16xf32) <- (1x512x16x16xf32) + relu_86 = paddle._C_ops.relu(group_norm_111) + del group_norm_111 + + # pd_op.conv2d: (1x512x16x16xf32) <- (1x512x16x16xf32, 512x512x3x3xf32) + conv2d_106 = paddle._C_ops.conv2d( + relu_86, parameter_7, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del relu_86 + + # pd_op.group_norm: (1x512x16x16xf32, 1x32xf32, 1x32xf32) <- (1x512x16x16xf32, 512xf32, 512xf32) + group_norm_114, group_norm_115, group_norm_116 = (lambda x, f: f(x))( + paddle._C_ops.group_norm( + conv2d_106, parameter_6, parameter_5, float("1e-05"), 32, "NCHW" + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None, None), + ) + del conv2d_106 + + # pd_op.relu: (1x512x16x16xf32) <- (1x512x16x16xf32) + relu_87 = paddle._C_ops.relu(group_norm_114) + del group_norm_114 + + # pd_op.conv2d: (1x512x16x16xf32) <- (1x512x16x16xf32, 512x512x3x3xf32) + conv2d_107 = paddle._C_ops.conv2d( + relu_87, parameter_4, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del relu_87 + + # pd_op.group_norm: (1x512x16x16xf32, 1x32xf32, 1x32xf32) <- (1x512x16x16xf32, 512xf32, 512xf32) + group_norm_117, group_norm_118, group_norm_119 = (lambda x, f: f(x))( + paddle._C_ops.group_norm( + conv2d_107, parameter_3, parameter_2, float("1e-05"), 32, "NCHW" + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None, None), + ) + del conv2d_107 + + # pd_op.relu: (1x512x16x16xf32) <- (1x512x16x16xf32) + relu_88 = paddle._C_ops.relu(group_norm_117) + del group_norm_117 + + # pd_op.conv2d: (1x2x16x16xf32) <- (1x512x16x16xf32, 2x512x3x3xf32) + conv2d_108 = paddle._C_ops.conv2d( + relu_88, parameter_1, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del relu_88 + + # pd_op.add: (1x2x16x16xf32) <- (1x2x16x16xf32, 1x2x1x1xf32) + add_38 = paddle._C_ops.add(conv2d_108, reshape_9) + del conv2d_108 + + # pd_op.sigmoid: (1x2x16x16xf32) <- (1x2x16x16xf32) + sigmoid_3 = paddle._C_ops.sigmoid(add_38) + del add_38 + + # pd_op.pool2d: (1x2x17x17xf32) <- (1x2x16x16xf32, 2xi64) + pool2d_5 = paddle._C_ops.pool2d( + sigmoid_3, + full_int_array_10, + [1, 1], + [1, 1], + False, + True, + "NCHW", + "max", + False, + False, + "EXPLICIT", + ) + + # pd_op.slice: (1x2x16x16xf32) <- (1x2x17x17xf32, 2xi64, 2xi64) + slice_15 = paddle._C_ops.slice( + pool2d_5, [2, 3], full_int_array_11, full_int_array_12, [1, 1], [] + ) + del pool2d_5 + + # pd_op.equal: (1x2x16x16xb) <- (1x2x16x16xf32, 1x2x16x16xf32) + equal_3 = paddle._C_ops.equal(slice_15, sigmoid_3) + del slice_15 + + # pd_op.cast: (1x2x16x16xf32) <- (1x2x16x16xb) + cast_3 = paddle._C_ops.cast(equal_3, paddle.float32) + del equal_3 + + # pd_op.multiply: (1x2x16x16xf32) <- (1x2x16x16xf32, 1x2x16x16xf32) + multiply_3 = paddle._C_ops.multiply(sigmoid_3, cast_3) + del cast_3, sigmoid_3 + + # pd_op.transpose: (1x16x16x2xf32) <- (1x2x16x16xf32) + transpose_3 = paddle._C_ops.transpose(multiply_3, [0, 2, 3, 1]) + del multiply_3 + + # pd_op.shape64: (4xi64) <- (1x256x-1x-1xf32) + shape64_8 = paddle._C_ops.shape64(bilinear_interp_7) + + # pd_op.slice: (xi64) <- (4xi64, 1xi64, 1xi64) + slice_16 = paddle._C_ops.slice( + shape64_8, [0], full_int_array_3, full_int_array_4, [1], [0] + ) + del full_int_array_3, shape64_8 + + # pd_op.shape64: (4xi64) <- (1x256x-1x-1xf32) + shape64_9 = paddle._C_ops.shape64(bilinear_interp_7) + + # pd_op.slice: (xi64) <- (4xi64, 1xi64, 1xi64) + slice_17 = paddle._C_ops.slice( + shape64_9, [0], full_int_array_4, full_int_array_5, [1], [0] + ) + del full_int_array_4, full_int_array_5, shape64_9 + + # pd_op.linspace: (-1xf32) <- (1xf32, 1xf32, xi64) + linspace_10 = paddle._C_ops.linspace( + full_0, + full_1, + slice_17, + paddle.float32, + paddle.framework._current_expected_place(), + ) + del slice_17 + + # pd_op.linspace: (-1xf32) <- (1xf32, 1xf32, xi64) + linspace_11 = paddle._C_ops.linspace( + full_0, + full_1, + slice_16, + paddle.float32, + paddle.framework._current_expected_place(), + ) + del full_0, full_1, slice_16 + + # builtin.combine: ([-1xf32, -1xf32]) <- (-1xf32, -1xf32) + combine_16 = [linspace_11, linspace_10] + del linspace_10, linspace_11 + + # pd_op.meshgrid: ([-1x-1xf32, -1x-1xf32]) <- ([-1xf32, -1xf32]) + meshgrid_5 = paddle._C_ops.meshgrid(combine_16) + del combine_16 + + # builtin.split: (-1x-1xf32, -1x-1xf32) <- ([-1x-1xf32, -1x-1xf32]) + ( + split_10, + split_11, + ) = meshgrid_5 + del meshgrid_5 + + # pd_op.unsqueeze: (1x1x-1x-1xf32) <- (-1x-1xf32, 2xi64) + unsqueeze_10 = paddle._C_ops.unsqueeze(split_11, full_int_array_6) + del split_11 + + # pd_op.unsqueeze: (1x1x-1x-1xf32) <- (-1x-1xf32, 2xi64) + unsqueeze_11 = paddle._C_ops.unsqueeze(split_10, full_int_array_6) + del full_int_array_6, split_10 + + # pd_op.expand: (1x1x-1x-1xf32) <- (1x1x-1x-1xf32, 4xi64) + expand_10 = paddle._C_ops.expand(unsqueeze_11, full_int_array_7) + del unsqueeze_11 + + # pd_op.expand: (1x1x-1x-1xf32) <- (1x1x-1x-1xf32, 4xi64) + expand_11 = paddle._C_ops.expand(unsqueeze_10, full_int_array_7) + del full_int_array_7, unsqueeze_10 + + # builtin.combine: ([1x1x-1x-1xf32, 1x1x-1x-1xf32]) <- (1x1x-1x-1xf32, 1x1x-1x-1xf32) + combine_17 = [expand_11, expand_10] + del expand_10, expand_11 + + # pd_op.concat: (1x2x-1x-1xf32) <- ([1x1x-1x-1xf32, 1x1x-1x-1xf32], 1xi32) + concat_10 = paddle._C_ops.concat(combine_17, full_2) + del combine_17 + + # builtin.combine: ([1x256x-1x-1xf32, 1x2x-1x-1xf32]) <- (1x256x-1x-1xf32, 1x2x-1x-1xf32) + combine_18 = [bilinear_interp_7, concat_10] + del bilinear_interp_7, concat_10 + + # pd_op.concat: (1x258x-1x-1xf32) <- ([1x256x-1x-1xf32, 1x2x-1x-1xf32], 1xi32) + concat_11 = paddle._C_ops.concat(combine_18, full_2) + del combine_18, full_2 + + # pd_op.bilinear_interp: (1x258x12x12xf32) <- (1x258x-1x-1xf32, None, None, None) + bilinear_interp_12 = paddle._C_ops.bilinear_interp( + concat_11, None, None, None, "NCHW", -1, 12, 12, [], "bilinear", False, 0 + ) + del concat_11 + + # pd_op.slice: (1x256x12x12xf32) <- (1x258x12x12xf32, 1xi64, 1xi64) + slice_18 = paddle._C_ops.slice( + bilinear_interp_12, [1], full_int_array_8, full_int_array_9, [1], [] + ) + del full_int_array_8, full_int_array_9 + + # pd_op.conv2d: (1x512x12x12xf32) <- (1x258x12x12xf32, 512x258x3x3xf32) + conv2d_109 = paddle._C_ops.conv2d( + bilinear_interp_12, + parameter_27, + [1, 1], + [1, 1], + "EXPLICIT", + [1, 1], + 1, + "NCHW", + ) + del bilinear_interp_12, parameter_27 + + # pd_op.group_norm: (1x512x12x12xf32, 1x32xf32, 1x32xf32) <- (1x512x12x12xf32, 512xf32, 512xf32) + group_norm_120, group_norm_121, group_norm_122 = (lambda x, f: f(x))( + paddle._C_ops.group_norm( + conv2d_109, parameter_26, parameter_25, float("1e-05"), 32, "NCHW" + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None, None), + ) + del conv2d_109, parameter_25, parameter_26 + + # pd_op.relu: (1x512x12x12xf32) <- (1x512x12x12xf32) + relu_89 = paddle._C_ops.relu(group_norm_120) + del group_norm_120 + + # pd_op.conv2d: (1x512x12x12xf32) <- (1x512x12x12xf32, 512x512x3x3xf32) + conv2d_110 = paddle._C_ops.conv2d( + relu_89, parameter_24, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_24, relu_89 + + # pd_op.group_norm: (1x512x12x12xf32, 1x32xf32, 1x32xf32) <- (1x512x12x12xf32, 512xf32, 512xf32) + group_norm_123, group_norm_124, group_norm_125 = (lambda x, f: f(x))( + paddle._C_ops.group_norm( + conv2d_110, parameter_23, parameter_22, float("1e-05"), 32, "NCHW" + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None, None), + ) + del conv2d_110, parameter_22, parameter_23 + + # pd_op.relu: (1x512x12x12xf32) <- (1x512x12x12xf32) + relu_90 = paddle._C_ops.relu(group_norm_123) + del group_norm_123 + + # pd_op.conv2d: (1x512x12x12xf32) <- (1x512x12x12xf32, 512x512x3x3xf32) + conv2d_111 = paddle._C_ops.conv2d( + relu_90, parameter_21, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_21, relu_90 + + # pd_op.group_norm: (1x512x12x12xf32, 1x32xf32, 1x32xf32) <- (1x512x12x12xf32, 512xf32, 512xf32) + group_norm_126, group_norm_127, group_norm_128 = (lambda x, f: f(x))( + paddle._C_ops.group_norm( + conv2d_111, parameter_20, parameter_19, float("1e-05"), 32, "NCHW" + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None, None), + ) + del conv2d_111, parameter_19, parameter_20 + + # pd_op.relu: (1x512x12x12xf32) <- (1x512x12x12xf32) + relu_91 = paddle._C_ops.relu(group_norm_126) + del group_norm_126 + + # pd_op.conv2d: (1x512x12x12xf32) <- (1x512x12x12xf32, 512x512x3x3xf32) + conv2d_112 = paddle._C_ops.conv2d( + relu_91, parameter_18, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_18, relu_91 + + # pd_op.group_norm: (1x512x12x12xf32, 1x32xf32, 1x32xf32) <- (1x512x12x12xf32, 512xf32, 512xf32) + group_norm_129, group_norm_130, group_norm_131 = (lambda x, f: f(x))( + paddle._C_ops.group_norm( + conv2d_112, parameter_17, parameter_16, float("1e-05"), 32, "NCHW" + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None, None), + ) + del conv2d_112, parameter_16, parameter_17 + + # pd_op.relu: (1x512x12x12xf32) <- (1x512x12x12xf32) + relu_92 = paddle._C_ops.relu(group_norm_129) + del group_norm_129 + + # pd_op.conv2d: (1x256x12x12xf32) <- (1x512x12x12xf32, 256x512x3x3xf32) + conv2d_113 = paddle._C_ops.conv2d( + relu_92, parameter_15, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_15, relu_92 + + # pd_op.add: (1x256x12x12xf32) <- (1x256x12x12xf32, 1x256x1x1xf32) + add_4 = paddle._C_ops.add(conv2d_113, reshape_8) + del conv2d_113, reshape_8 + + # pd_op.conv2d: (1x512x12x12xf32) <- (1x256x12x12xf32, 512x256x3x3xf32) + conv2d_114 = paddle._C_ops.conv2d( + slice_18, parameter_13, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_13, slice_18 + + # pd_op.group_norm: (1x512x12x12xf32, 1x32xf32, 1x32xf32) <- (1x512x12x12xf32, 512xf32, 512xf32) + group_norm_132, group_norm_133, group_norm_134 = (lambda x, f: f(x))( + paddle._C_ops.group_norm( + conv2d_114, parameter_12, parameter_11, float("1e-05"), 32, "NCHW" + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None, None), + ) + del conv2d_114, parameter_11, parameter_12 + + # pd_op.relu: (1x512x12x12xf32) <- (1x512x12x12xf32) + relu_93 = paddle._C_ops.relu(group_norm_132) + del group_norm_132 + + # pd_op.conv2d: (1x512x12x12xf32) <- (1x512x12x12xf32, 512x512x3x3xf32) + conv2d_115 = paddle._C_ops.conv2d( + relu_93, parameter_10, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_10, relu_93 + + # pd_op.group_norm: (1x512x12x12xf32, 1x32xf32, 1x32xf32) <- (1x512x12x12xf32, 512xf32, 512xf32) + group_norm_135, group_norm_136, group_norm_137 = (lambda x, f: f(x))( + paddle._C_ops.group_norm( + conv2d_115, parameter_9, parameter_8, float("1e-05"), 32, "NCHW" + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None, None), + ) + del conv2d_115, parameter_8, parameter_9 + + # pd_op.relu: (1x512x12x12xf32) <- (1x512x12x12xf32) + relu_94 = paddle._C_ops.relu(group_norm_135) + del group_norm_135 + + # pd_op.conv2d: (1x512x12x12xf32) <- (1x512x12x12xf32, 512x512x3x3xf32) + conv2d_116 = paddle._C_ops.conv2d( + relu_94, parameter_7, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_7, relu_94 + + # pd_op.group_norm: (1x512x12x12xf32, 1x32xf32, 1x32xf32) <- (1x512x12x12xf32, 512xf32, 512xf32) + group_norm_138, group_norm_139, group_norm_140 = (lambda x, f: f(x))( + paddle._C_ops.group_norm( + conv2d_116, parameter_6, parameter_5, float("1e-05"), 32, "NCHW" + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None, None), + ) + del conv2d_116, parameter_5, parameter_6 + + # pd_op.relu: (1x512x12x12xf32) <- (1x512x12x12xf32) + relu_95 = paddle._C_ops.relu(group_norm_138) + del group_norm_138 + + # pd_op.conv2d: (1x512x12x12xf32) <- (1x512x12x12xf32, 512x512x3x3xf32) + conv2d_117 = paddle._C_ops.conv2d( + relu_95, parameter_4, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_4, relu_95 + + # pd_op.group_norm: (1x512x12x12xf32, 1x32xf32, 1x32xf32) <- (1x512x12x12xf32, 512xf32, 512xf32) + group_norm_141, group_norm_142, group_norm_143 = (lambda x, f: f(x))( + paddle._C_ops.group_norm( + conv2d_117, parameter_3, parameter_2, float("1e-05"), 32, "NCHW" + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None, None), + ) + del conv2d_117, parameter_2, parameter_3 + + # pd_op.relu: (1x512x12x12xf32) <- (1x512x12x12xf32) + relu_96 = paddle._C_ops.relu(group_norm_141) + del group_norm_141 + + # pd_op.conv2d: (1x2x12x12xf32) <- (1x512x12x12xf32, 2x512x3x3xf32) + conv2d_118 = paddle._C_ops.conv2d( + relu_96, parameter_1, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_1, relu_96 + + # pd_op.add: (1x2x12x12xf32) <- (1x2x12x12xf32, 1x2x1x1xf32) + add_39 = paddle._C_ops.add(conv2d_118, reshape_9) + del conv2d_118, reshape_9 + + # pd_op.sigmoid: (1x2x12x12xf32) <- (1x2x12x12xf32) + sigmoid_4 = paddle._C_ops.sigmoid(add_39) + del add_39 + + # pd_op.pool2d: (1x2x13x13xf32) <- (1x2x12x12xf32, 2xi64) + pool2d_6 = paddle._C_ops.pool2d( + sigmoid_4, + full_int_array_10, + [1, 1], + [1, 1], + False, + True, + "NCHW", + "max", + False, + False, + "EXPLICIT", + ) + del full_int_array_10 + + # pd_op.slice: (1x2x12x12xf32) <- (1x2x13x13xf32, 2xi64, 2xi64) + slice_19 = paddle._C_ops.slice( + pool2d_6, [2, 3], full_int_array_11, full_int_array_12, [1, 1], [] + ) + del full_int_array_11, full_int_array_12, pool2d_6 + + # pd_op.equal: (1x2x12x12xb) <- (1x2x12x12xf32, 1x2x12x12xf32) + equal_4 = paddle._C_ops.equal(slice_19, sigmoid_4) + del slice_19 + + # pd_op.cast: (1x2x12x12xf32) <- (1x2x12x12xb) + cast_4 = paddle._C_ops.cast(equal_4, paddle.float32) + del equal_4 + + # pd_op.multiply: (1x2x12x12xf32) <- (1x2x12x12xf32, 1x2x12x12xf32) + multiply_4 = paddle._C_ops.multiply(sigmoid_4, cast_4) + del cast_4, sigmoid_4 + + # pd_op.transpose: (1x12x12x2xf32) <- (1x2x12x12xf32) + transpose_4 = paddle._C_ops.transpose(multiply_4, [0, 2, 3, 1]) + del multiply_4 + + return ( + transpose_0, + transpose_1, + transpose_2, + transpose_3, + transpose_4, + add_0, + add_1, + add_2, + add_3, + add_4, + relu_0, + ) diff --git a/paddle_samples/PaddleX/SOLOv2/subgraph_2/weight_meta.py b/paddle_samples/PaddleX/SOLOv2/subgraph_2/weight_meta.py new file mode 100644 index 000000000..8e12d2616 --- /dev/null +++ b/paddle_samples/PaddleX/SOLOv2/subgraph_2/weight_meta.py @@ -0,0 +1,3603 @@ +class Program_weight_tensor_parameter_0: + name = "parameter_0" + shape = [2] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_1: + name = "parameter_1" + shape = [2, 512, 3, 3] + dtype = "float32" + min_val = float("-0.0408139") + max_val = float("0.0597572") + mean = float("0.000241518") + std = float("0.0108463") + data = None + + +class Program_weight_tensor_parameter_2: + name = "parameter_2" + shape = [512] + dtype = "float32" + min_val = float("-0.164576") + max_val = float("7.56304e-05") + mean = float("-0.0728378") + std = float("0.026892") + data = None + + +class Program_weight_tensor_parameter_3: + name = "parameter_3" + shape = [512] + dtype = "float32" + min_val = float("0.820262") + max_val = float("1.07435") + mean = float("0.967989") + std = float("0.0247432") + data = None + + +class Program_weight_tensor_parameter_4: + name = "parameter_4" + shape = [512, 512, 3, 3] + dtype = "float32" + min_val = float("-0.0716389") + max_val = float("0.0739773") + mean = float("-0.000841378") + std = float("0.00700024") + data = None + + +class Program_weight_tensor_parameter_5: + name = "parameter_5" + shape = [512] + dtype = "float32" + min_val = float("-0.140273") + max_val = float("0.0111794") + mean = float("-0.0178403") + std = float("0.0140496") + data = None + + +class Program_weight_tensor_parameter_6: + name = "parameter_6" + shape = [512] + dtype = "float32" + min_val = float("0.849924") + max_val = float("1.11095") + mean = float("0.999578") + std = float("0.0235233") + data = None + + +class Program_weight_tensor_parameter_7: + name = "parameter_7" + shape = [512, 512, 3, 3] + dtype = "float32" + min_val = float("-0.0881508") + max_val = float("0.0991058") + mean = float("-0.000337377") + std = float("0.00690436") + data = None + + +class Program_weight_tensor_parameter_8: + name = "parameter_8" + shape = [512] + dtype = "float32" + min_val = float("-0.133432") + max_val = float("0.0110725") + mean = float("-0.0266697") + std = float("0.0165112") + data = None + + +class Program_weight_tensor_parameter_9: + name = "parameter_9" + shape = [512] + dtype = "float32" + min_val = float("0.896877") + max_val = float("1.12814") + mean = float("0.999335") + std = float("0.0224697") + data = None + + +class Program_weight_tensor_parameter_10: + name = "parameter_10" + shape = [512, 512, 3, 3] + dtype = "float32" + min_val = float("-0.0774497") + max_val = float("0.072937") + mean = float("-0.000341836") + std = float("0.00696914") + data = None + + +class Program_weight_tensor_parameter_11: + name = "parameter_11" + shape = [512] + dtype = "float32" + min_val = float("-0.0835772") + max_val = float("-0.0114493") + mean = float("-0.0344999") + std = float("0.0115603") + data = None + + +class Program_weight_tensor_parameter_12: + name = "parameter_12" + shape = [512] + dtype = "float32" + min_val = float("0.930588") + max_val = float("1.07872") + mean = float("0.999266") + std = float("0.0180065") + data = None + + +class Program_weight_tensor_parameter_13: + name = "parameter_13" + shape = [512, 256, 3, 3] + dtype = "float32" + min_val = float("-0.0527954") + max_val = float("0.0542711") + mean = float("5.3121e-06") + std = float("0.0075493") + data = None + + +class Program_weight_tensor_parameter_14: + name = "parameter_14" + shape = [256] + dtype = "float32" + min_val = float("-0.108704") + max_val = float("0.0289304") + mean = float("-0.00575223") + std = float("0.0126979") + data = None + + +class Program_weight_tensor_parameter_15: + name = "parameter_15" + shape = [256, 512, 3, 3] + dtype = "float32" + min_val = float("-0.114528") + max_val = float("0.130447") + mean = float("-0.00108201") + std = float("0.00893668") + data = None + + +class Program_weight_tensor_parameter_16: + name = "parameter_16" + shape = [512] + dtype = "float32" + min_val = float("-0.167228") + max_val = float("-1.2406e-05") + mean = float("-0.0479157") + std = float("0.0268721") + data = None + + +class Program_weight_tensor_parameter_17: + name = "parameter_17" + shape = [512] + dtype = "float32" + min_val = float("0.527656") + max_val = float("1.07286") + mean = float("0.977064") + std = float("0.0410395") + data = None + + +class Program_weight_tensor_parameter_18: + name = "parameter_18" + shape = [512, 512, 3, 3] + dtype = "float32" + min_val = float("-0.0770595") + max_val = float("0.0720516") + mean = float("-0.000560165") + std = float("0.00724206") + data = None + + +class Program_weight_tensor_parameter_19: + name = "parameter_19" + shape = [512] + dtype = "float32" + min_val = float("-0.109454") + max_val = float("0.0155805") + mean = float("-0.0154898") + std = float("0.019233") + data = None + + +class Program_weight_tensor_parameter_20: + name = "parameter_20" + shape = [512] + dtype = "float32" + min_val = float("0.568624") + max_val = float("1.14345") + mean = float("0.998841") + std = float("0.0449438") + data = None + + +class Program_weight_tensor_parameter_21: + name = "parameter_21" + shape = [512, 512, 3, 3] + dtype = "float32" + min_val = float("-0.0796277") + max_val = float("0.0873588") + mean = float("-0.000300177") + std = float("0.00679615") + data = None + + +class Program_weight_tensor_parameter_22: + name = "parameter_22" + shape = [512] + dtype = "float32" + min_val = float("-0.140032") + max_val = float("0.0086507") + mean = float("-0.0211997") + std = float("0.0193673") + data = None + + +class Program_weight_tensor_parameter_23: + name = "parameter_23" + shape = [512] + dtype = "float32" + min_val = float("0.447633") + max_val = float("1.13208") + mean = float("0.998482") + std = float("0.0517143") + data = None + + +class Program_weight_tensor_parameter_24: + name = "parameter_24" + shape = [512, 512, 3, 3] + dtype = "float32" + min_val = float("-0.0713307") + max_val = float("0.0662597") + mean = float("-0.000198336") + std = float("0.00686002") + data = None + + +class Program_weight_tensor_parameter_25: + name = "parameter_25" + shape = [512] + dtype = "float32" + min_val = float("-0.115167") + max_val = float("-0.0016886") + mean = float("-0.0323269") + std = float("0.0146759") + data = None + + +class Program_weight_tensor_parameter_26: + name = "parameter_26" + shape = [512] + dtype = "float32" + min_val = float("0.889679") + max_val = float("1.10071") + mean = float("0.999208") + std = float("0.0232613") + data = None + + +class Program_weight_tensor_parameter_27: + name = "parameter_27" + shape = [512, 258, 3, 3] + dtype = "float32" + min_val = float("-0.176995") + max_val = float("0.134287") + mean = float("1.24662e-05") + std = float("0.00795077") + data = None + + +class Program_weight_tensor_parameter_28: + name = "parameter_28" + shape = [256] + dtype = "float32" + min_val = float("-0.260207") + max_val = float("0.121872") + mean = float("-0.0681992") + std = float("0.0685324") + data = None + + +class Program_weight_tensor_parameter_29: + name = "parameter_29" + shape = [256] + dtype = "float32" + min_val = float("0.362334") + max_val = float("1.10499") + mean = float("0.973586") + std = float("0.0690627") + data = None + + +class Program_weight_tensor_parameter_30: + name = "parameter_30" + shape = [256, 128, 1, 1] + dtype = "float32" + min_val = float("-0.235548") + max_val = float("0.241674") + mean = float("-0.000841938") + std = float("0.0365446") + data = None + + +class Program_weight_tensor_parameter_31: + name = "parameter_31" + shape = [128] + dtype = "float32" + min_val = float("-0.118527") + max_val = float("0.0569334") + mean = float("-0.0131621") + std = float("0.0280401") + data = None + + +class Program_weight_tensor_parameter_32: + name = "parameter_32" + shape = [128] + dtype = "float32" + min_val = float("0.835934") + max_val = float("1.10599") + mean = float("0.995058") + std = float("0.0398102") + data = None + + +class Program_weight_tensor_parameter_33: + name = "parameter_33" + shape = [128, 128, 3, 3] + dtype = "float32" + min_val = float("-0.0902691") + max_val = float("0.109419") + mean = float("0.000111336") + std = float("0.0100807") + data = None + + +class Program_weight_tensor_parameter_34: + name = "parameter_34" + shape = [128] + dtype = "float32" + min_val = float("-0.118716") + max_val = float("0.0649332") + mean = float("-0.0126097") + std = float("0.0311738") + data = None + + +class Program_weight_tensor_parameter_35: + name = "parameter_35" + shape = [128] + dtype = "float32" + min_val = float("0.807064") + max_val = float("1.07146") + mean = float("0.999208") + std = float("0.0474893") + data = None + + +class Program_weight_tensor_parameter_36: + name = "parameter_36" + shape = [128, 128, 3, 3] + dtype = "float32" + min_val = float("-0.0921701") + max_val = float("0.0931507") + mean = float("5.73767e-05") + std = float("0.0106762") + data = None + + +class Program_weight_tensor_parameter_37: + name = "parameter_37" + shape = [128] + dtype = "float32" + min_val = float("-0.18393") + max_val = float("0.158299") + mean = float("-0.0193893") + std = float("0.046439") + data = None + + +class Program_weight_tensor_parameter_38: + name = "parameter_38" + shape = [128] + dtype = "float32" + min_val = float("0.819504") + max_val = float("1.06041") + mean = float("0.998564") + std = float("0.0399652") + data = None + + +class Program_weight_tensor_parameter_39: + name = "parameter_39" + shape = [128, 258, 3, 3] + dtype = "float32" + min_val = float("-0.335587") + max_val = float("0.259937") + mean = float("5.06249e-05") + std = float("0.0102757") + data = None + + +class Program_weight_tensor_parameter_40: + name = "parameter_40" + shape = [128] + dtype = "float32" + min_val = float("-0.129893") + max_val = float("0.0417231") + mean = float("-0.0184517") + std = float("0.019486") + data = None + + +class Program_weight_tensor_parameter_41: + name = "parameter_41" + shape = [128] + dtype = "float32" + min_val = float("0.532926") + max_val = float("1.06616") + mean = float("1.00367") + std = float("0.0508428") + data = None + + +class Program_weight_tensor_parameter_42: + name = "parameter_42" + shape = [128, 128, 3, 3] + dtype = "float32" + min_val = float("-0.0707103") + max_val = float("0.0714848") + mean = float("-0.00032312") + std = float("0.00852153") + data = None + + +class Program_weight_tensor_parameter_43: + name = "parameter_43" + shape = [128] + dtype = "float32" + min_val = float("-0.09812") + max_val = float("0.00681841") + mean = float("-0.0175448") + std = float("0.0160672") + data = None + + +class Program_weight_tensor_parameter_44: + name = "parameter_44" + shape = [128] + dtype = "float32" + min_val = float("0.796179") + max_val = float("1.04458") + mean = float("0.999206") + std = float("0.0342519") + data = None + + +class Program_weight_tensor_parameter_45: + name = "parameter_45" + shape = [128, 256, 3, 3] + dtype = "float32" + min_val = float("-0.043773") + max_val = float("0.0473348") + mean = float("-5.79449e-05") + std = float("0.00748983") + data = None + + +class Program_weight_tensor_parameter_46: + name = "parameter_46" + shape = [128] + dtype = "float32" + min_val = float("-0.128115") + max_val = float("0.0557194") + mean = float("-0.0386796") + std = float("0.022895") + data = None + + +class Program_weight_tensor_parameter_47: + name = "parameter_47" + shape = [128] + dtype = "float32" + min_val = float("0.533942") + max_val = float("1.10645") + mean = float("0.990954") + std = float("0.0580884") + data = None + + +class Program_weight_tensor_parameter_48: + name = "parameter_48" + shape = [128, 256, 3, 3] + dtype = "float32" + min_val = float("-0.0504074") + max_val = float("0.0497478") + mean = float("4.59365e-06") + std = float("0.00778853") + data = None + + +class Program_weight_tensor_parameter_49: + name = "parameter_49" + shape = [128] + dtype = "float32" + min_val = float("-0.135607") + max_val = float("-0.00537059") + mean = float("-0.0603145") + std = float("0.0256366") + data = None + + +class Program_weight_tensor_parameter_50: + name = "parameter_50" + shape = [128] + dtype = "float32" + min_val = float("0.885637") + max_val = float("1.18744") + mean = float("1.0037") + std = float("0.0470053") + data = None + + +class Program_weight_tensor_parameter_51: + name = "parameter_51" + shape = [128, 256, 3, 3] + dtype = "float32" + min_val = float("-0.0518155") + max_val = float("0.0477341") + mean = float("-4.18363e-05") + std = float("0.00742536") + data = None + + +class Program_weight_tensor_parameter_52: + name = "parameter_52" + shape = [256] + dtype = "float32" + min_val = float("-0.141143") + max_val = float("0.0854563") + mean = float("0.00238833") + std = float("0.0351064") + data = None + + +class Program_weight_tensor_parameter_53: + name = "parameter_53" + shape = [256, 256, 3, 3] + dtype = "float32" + min_val = float("-0.0449099") + max_val = float("0.0467238") + mean = float("9.25399e-06") + std = float("0.0128415") + data = None + + +class Program_weight_tensor_parameter_54: + name = "parameter_54" + shape = [256] + dtype = "float32" + min_val = float("-0.0684464") + max_val = float("0.0642124") + mean = float("0.00163747") + std = float("0.0261189") + data = None + + +class Program_weight_tensor_parameter_55: + name = "parameter_55" + shape = [256, 256, 3, 3] + dtype = "float32" + min_val = float("-0.0372319") + max_val = float("0.037816") + mean = float("-7.2032e-06") + std = float("0.012217") + data = None + + +class Program_weight_tensor_parameter_56: + name = "parameter_56" + shape = [256] + dtype = "float32" + min_val = float("-0.0926705") + max_val = float("0.09789") + mean = float("-0.00321178") + std = float("0.0288454") + data = None + + +class Program_weight_tensor_parameter_57: + name = "parameter_57" + shape = [256, 256, 3, 3] + dtype = "float32" + min_val = float("-0.0363118") + max_val = float("0.0375119") + mean = float("1.42394e-05") + std = float("0.0121222") + data = None + + +class Program_weight_tensor_parameter_58: + name = "parameter_58" + shape = [256] + dtype = "float32" + min_val = float("-0.0809814") + max_val = float("0.0952501") + mean = float("-0.00329499") + std = float("0.0296118") + data = None + + +class Program_weight_tensor_parameter_59: + name = "parameter_59" + shape = [256, 256, 3, 3] + dtype = "float32" + min_val = float("-0.0385208") + max_val = float("0.041591") + mean = float("-1.2453e-05") + std = float("0.0122306") + data = None + + +class Program_weight_tensor_parameter_60: + name = "parameter_60" + shape = [256] + dtype = "float32" + min_val = float("-0.120511") + max_val = float("0.148") + mean = float("0.00666033") + std = float("0.0392653") + data = None + + +class Program_weight_tensor_parameter_61: + name = "parameter_61" + shape = [256, 2048, 1, 1] + dtype = "float32" + min_val = float("-0.0718854") + max_val = float("0.0765562") + mean = float("-6.78027e-05") + std = float("0.0142117") + data = None + + +class Program_weight_tensor_parameter_62: + name = "parameter_62" + shape = [256] + dtype = "float32" + min_val = float("-0.0679209") + max_val = float("0.0791148") + mean = float("0.00220837") + std = float("0.0266435") + data = None + + +class Program_weight_tensor_parameter_63: + name = "parameter_63" + shape = [256, 1024, 1, 1] + dtype = "float32" + min_val = float("-0.11025") + max_val = float("0.101318") + mean = float("-2.22789e-05") + std = float("0.018888") + data = None + + +class Program_weight_tensor_parameter_64: + name = "parameter_64" + shape = [256] + dtype = "float32" + min_val = float("-0.0655969") + max_val = float("0.0730176") + mean = float("0.000449903") + std = float("0.0264177") + data = None + + +class Program_weight_tensor_parameter_65: + name = "parameter_65" + shape = [256, 512, 1, 1] + dtype = "float32" + min_val = float("-0.0782085") + max_val = float("0.0742485") + mean = float("-3.18699e-05") + std = float("0.0244314") + data = None + + +class Program_weight_tensor_parameter_66: + name = "parameter_66" + shape = [256] + dtype = "float32" + min_val = float("-0.0731329") + max_val = float("0.0701132") + mean = float("-0.000549911") + std = float("0.0263313") + data = None + + +class Program_weight_tensor_parameter_67: + name = "parameter_67" + shape = [256, 256, 1, 1] + dtype = "float32" + min_val = float("-0.0819963") + max_val = float("0.0776369") + mean = float("8.2871e-06") + std = float("0.0344435") + data = None + + +class Program_weight_tensor_parameter_68: + name = "parameter_68" + shape = [2048] + dtype = "float32" + min_val = float("-0.0151986") + max_val = float("0.548302") + mean = float("0.184979") + std = float("0.0543057") + data = None + + +class Program_weight_tensor_parameter_69: + name = "parameter_69" + shape = [2048] + dtype = "float32" + min_val = float("0.0697239") + max_val = float("2.1928") + mean = float("0.84625") + std = float("0.221893") + data = None + + +class Program_weight_tensor_parameter_70: + name = "parameter_70" + shape = [2048] + dtype = "float32" + min_val = float("1.81634e-05") + max_val = float("0.00461601") + mean = float("0.000837051") + std = float("0.00036967") + data = None + + +class Program_weight_tensor_parameter_71: + name = "parameter_71" + shape = [2048] + dtype = "float32" + min_val = float("-0.0611881") + max_val = float("0.0309583") + mean = float("-0.0166939") + std = float("0.00834277") + data = None + + +class Program_weight_tensor_parameter_72: + name = "parameter_72" + shape = [2048, 512, 1, 1] + dtype = "float32" + min_val = float("-0.175748") + max_val = float("0.323546") + mean = float("-0.00236465") + std = float("0.0103294") + data = None + + +class Program_weight_tensor_parameter_73: + name = "parameter_73" + shape = [512] + dtype = "float32" + min_val = float("-0.256637") + max_val = float("0.509206") + mean = float("-0.0807075") + std = float("0.0607222") + data = None + + +class Program_weight_tensor_parameter_74: + name = "parameter_74" + shape = [512] + dtype = "float32" + min_val = float("0.142703") + max_val = float("0.29885") + mean = float("0.202958") + std = float("0.0283985") + data = None + + +class Program_weight_tensor_parameter_75: + name = "parameter_75" + shape = [512] + dtype = "float32" + min_val = float("0.00782461") + max_val = float("0.0362781") + mean = float("0.0117981") + std = float("0.00204555") + data = None + + +class Program_weight_tensor_parameter_76: + name = "parameter_76" + shape = [512] + dtype = "float32" + min_val = float("-0.213289") + max_val = float("0.00238845") + mean = float("-0.0920613") + std = float("0.0166986") + data = None + + +class Program_weight_tensor_parameter_77: + name = "parameter_77" + shape = [512, 512, 3, 3] + dtype = "float32" + min_val = float("-0.15478") + max_val = float("0.151967") + mean = float("-0.00107967") + std = float("0.00814632") + data = None + + +class Program_weight_tensor_parameter_78: + name = "parameter_78" + shape = [512] + dtype = "float32" + min_val = float("-0.37403") + max_val = float("0.119353") + mean = float("-0.139679") + std = float("0.0581028") + data = None + + +class Program_weight_tensor_parameter_79: + name = "parameter_79" + shape = [512] + dtype = "float32" + min_val = float("0.112228") + max_val = float("0.465189") + mean = float("0.214344") + std = float("0.0285329") + data = None + + +class Program_weight_tensor_parameter_80: + name = "parameter_80" + shape = [512] + dtype = "float32" + min_val = float("0.00368759") + max_val = float("0.066979") + mean = float("0.00530424") + std = float("0.00301387") + data = None + + +class Program_weight_tensor_parameter_81: + name = "parameter_81" + shape = [512] + dtype = "float32" + min_val = float("-0.165443") + max_val = float("0.174839") + mean = float("-0.0359508") + std = float("0.0213069") + data = None + + +class Program_weight_tensor_parameter_82: + name = "parameter_82" + shape = [512, 2048, 1, 1] + dtype = "float32" + min_val = float("-0.308366") + max_val = float("0.301668") + mean = float("-0.00112007") + std = float("0.0140263") + data = None + + +class Program_weight_tensor_parameter_83: + name = "parameter_83" + shape = [2048] + dtype = "float32" + min_val = float("-0.344631") + max_val = float("0.401359") + mean = float("-0.158227") + std = float("0.032536") + data = None + + +class Program_weight_tensor_parameter_84: + name = "parameter_84" + shape = [2048] + dtype = "float32" + min_val = float("-0.0556075") + max_val = float("0.217378") + mean = float("0.13018") + std = float("0.0219564") + data = None + + +class Program_weight_tensor_parameter_85: + name = "parameter_85" + shape = [2048] + dtype = "float32" + min_val = float("0.000374117") + max_val = float("0.00489936") + mean = float("0.000888948") + std = float("0.000281992") + data = None + + +class Program_weight_tensor_parameter_86: + name = "parameter_86" + shape = [2048] + dtype = "float32" + min_val = float("-0.143769") + max_val = float("0.0820542") + mean = float("-0.0229567") + std = float("0.0142941") + data = None + + +class Program_weight_tensor_parameter_87: + name = "parameter_87" + shape = [2048, 512, 1, 1] + dtype = "float32" + min_val = float("-0.262648") + max_val = float("0.273459") + mean = float("-0.00157955") + std = float("0.0120553") + data = None + + +class Program_weight_tensor_parameter_88: + name = "parameter_88" + shape = [512] + dtype = "float32" + min_val = float("-0.281487") + max_val = float("0.181162") + mean = float("-0.130536") + std = float("0.037587") + data = None + + +class Program_weight_tensor_parameter_89: + name = "parameter_89" + shape = [512] + dtype = "float32" + min_val = float("0.103009") + max_val = float("0.261748") + mean = float("0.192296") + std = float("0.0182279") + data = None + + +class Program_weight_tensor_parameter_90: + name = "parameter_90" + shape = [512] + dtype = "float32" + min_val = float("0.00667127") + max_val = float("0.0496363") + mean = float("0.0113008") + std = float("0.00321697") + data = None + + +class Program_weight_tensor_parameter_91: + name = "parameter_91" + shape = [512] + dtype = "float32" + min_val = float("-0.333268") + max_val = float("0.452042") + mean = float("-0.0658262") + std = float("0.0511294") + data = None + + +class Program_weight_tensor_parameter_92: + name = "parameter_92" + shape = [512, 512, 3, 3] + dtype = "float32" + min_val = float("-0.164359") + max_val = float("0.143986") + mean = float("-0.00052186") + std = float("0.00887161") + data = None + + +class Program_weight_tensor_parameter_93: + name = "parameter_93" + shape = [512] + dtype = "float32" + min_val = float("-0.204657") + max_val = float("0.214884") + mean = float("-0.112746") + std = float("0.0407913") + data = None + + +class Program_weight_tensor_parameter_94: + name = "parameter_94" + shape = [512] + dtype = "float32" + min_val = float("0.110153") + max_val = float("0.23698") + mean = float("0.182554") + std = float("0.0168829") + data = None + + +class Program_weight_tensor_parameter_95: + name = "parameter_95" + shape = [512] + dtype = "float32" + min_val = float("0.00384403") + max_val = float("0.0267019") + mean = float("0.00550499") + std = float("0.00140394") + data = None + + +class Program_weight_tensor_parameter_96: + name = "parameter_96" + shape = [512] + dtype = "float32" + min_val = float("-0.156824") + max_val = float("0.213265") + mean = float("-0.0508984") + std = float("0.0216917") + data = None + + +class Program_weight_tensor_parameter_97: + name = "parameter_97" + shape = [512, 2048, 1, 1] + dtype = "float32" + min_val = float("-0.199223") + max_val = float("0.196998") + mean = float("-0.00103884") + std = float("0.0111276") + data = None + + +class Program_weight_tensor_parameter_98: + name = "parameter_98" + shape = [2048] + dtype = "float32" + min_val = float("-0.175843") + max_val = float("0.0839479") + mean = float("-0.0886424") + std = float("0.0238497") + data = None + + +class Program_weight_tensor_parameter_99: + name = "parameter_99" + shape = [2048] + dtype = "float32" + min_val = float("0.0445195") + max_val = float("0.224302") + mean = float("0.11517") + std = float("0.0171066") + data = None + + +class Program_weight_tensor_parameter_100: + name = "parameter_100" + shape = [2048] + dtype = "float32" + min_val = float("0.00110241") + max_val = float("0.0211157") + mean = float("0.00255611") + std = float("0.00108592") + data = None + + +class Program_weight_tensor_parameter_101: + name = "parameter_101" + shape = [2048] + dtype = "float32" + min_val = float("-0.149371") + max_val = float("0.261173") + mean = float("-0.0169219") + std = float("0.0272638") + data = None + + +class Program_weight_tensor_parameter_102: + name = "parameter_102" + shape = [2048, 1024, 1, 1] + dtype = "float32" + min_val = float("-0.230213") + max_val = float("0.336085") + mean = float("-0.000426346") + std = float("0.00875138") + data = None + + +class Program_weight_tensor_parameter_103: + name = "parameter_103" + shape = [2048] + dtype = "float32" + min_val = float("-0.175843") + max_val = float("0.0839479") + mean = float("-0.0886424") + std = float("0.0238497") + data = None + + +class Program_weight_tensor_parameter_104: + name = "parameter_104" + shape = [2048] + dtype = "float32" + min_val = float("-0.0299624") + max_val = float("0.247374") + mean = float("0.152015") + std = float("0.0269642") + data = None + + +class Program_weight_tensor_parameter_105: + name = "parameter_105" + shape = [2048] + dtype = "float32" + min_val = float("0.000316897") + max_val = float("0.0119787") + mean = float("0.00184257") + std = float("0.000489207") + data = None + + +class Program_weight_tensor_parameter_106: + name = "parameter_106" + shape = [2048] + dtype = "float32" + min_val = float("-0.119339") + max_val = float("0.171807") + mean = float("-0.0363601") + std = float("0.0247316") + data = None + + +class Program_weight_tensor_parameter_107: + name = "parameter_107" + shape = [2048, 512, 1, 1] + dtype = "float32" + min_val = float("-0.281965") + max_val = float("0.350346") + mean = float("-0.00149391") + std = float("0.0124785") + data = None + + +class Program_weight_tensor_parameter_108: + name = "parameter_108" + shape = [512] + dtype = "float32" + min_val = float("-0.166324") + max_val = float("0.333823") + mean = float("-0.0623815") + std = float("0.0473493") + data = None + + +class Program_weight_tensor_parameter_109: + name = "parameter_109" + shape = [512] + dtype = "float32" + min_val = float("0.122111") + max_val = float("0.321502") + mean = float("0.177554") + std = float("0.0214185") + data = None + + +class Program_weight_tensor_parameter_110: + name = "parameter_110" + shape = [512] + dtype = "float32" + min_val = float("0.00556363") + max_val = float("0.0696198") + mean = float("0.0123156") + std = float("0.00464772") + data = None + + +class Program_weight_tensor_parameter_111: + name = "parameter_111" + shape = [512] + dtype = "float32" + min_val = float("-0.114129") + max_val = float("0.215283") + mean = float("-0.0404652") + std = float("0.0316874") + data = None + + +class Program_weight_tensor_parameter_112: + name = "parameter_112" + shape = [512, 512, 3, 3] + dtype = "float32" + min_val = float("-0.32344") + max_val = float("0.296284") + mean = float("-0.000394135") + std = float("0.00855279") + data = None + + +class Program_weight_tensor_parameter_113: + name = "parameter_113" + shape = [512] + dtype = "float32" + min_val = float("-0.323683") + max_val = float("0.189289") + mean = float("-0.16759") + std = float("0.0548881") + data = None + + +class Program_weight_tensor_parameter_114: + name = "parameter_114" + shape = [512] + dtype = "float32" + min_val = float("0.103589") + max_val = float("0.266322") + mean = float("0.202582") + std = float("0.0208949") + data = None + + +class Program_weight_tensor_parameter_115: + name = "parameter_115" + shape = [512] + dtype = "float32" + min_val = float("0.00626156") + max_val = float("0.027742") + mean = float("0.0110146") + std = float("0.00269234") + data = None + + +class Program_weight_tensor_parameter_116: + name = "parameter_116" + shape = [512] + dtype = "float32" + min_val = float("-0.154333") + max_val = float("0.262058") + mean = float("-0.0482834") + std = float("0.0385199") + data = None + + +class Program_weight_tensor_parameter_117: + name = "parameter_117" + shape = [512, 1024, 1, 1] + dtype = "float32" + min_val = float("-0.276262") + max_val = float("0.534078") + mean = float("-0.00104149") + std = float("0.0165544") + data = None + + +class Program_weight_tensor_parameter_118: + name = "parameter_118" + shape = [1024] + dtype = "float32" + min_val = float("-0.350923") + max_val = float("0.376408") + mean = float("-0.0871135") + std = float("0.0697404") + data = None + + +class Program_weight_tensor_parameter_119: + name = "parameter_119" + shape = [1024] + dtype = "float32" + min_val = float("-0.0704466") + max_val = float("0.260302") + mean = float("0.0752669") + std = float("0.0561504") + data = None + + +class Program_weight_tensor_parameter_120: + name = "parameter_120" + shape = [1024] + dtype = "float32" + min_val = float("7.2606e-05") + max_val = float("0.0237334") + mean = float("0.000963188") + std = float("0.00100983") + data = None + + +class Program_weight_tensor_parameter_121: + name = "parameter_121" + shape = [1024] + dtype = "float32" + min_val = float("-0.14425") + max_val = float("0.160504") + mean = float("-0.0189685") + std = float("0.0237042") + data = None + + +class Program_weight_tensor_parameter_122: + name = "parameter_122" + shape = [1024, 256, 1, 1] + dtype = "float32" + min_val = float("-0.531898") + max_val = float("0.417136") + mean = float("-0.00206365") + std = float("0.0147186") + data = None + + +class Program_weight_tensor_parameter_123: + name = "parameter_123" + shape = [256] + dtype = "float32" + min_val = float("-0.21119") + max_val = float("0.2076") + mean = float("-0.0769485") + std = float("0.0604492") + data = None + + +class Program_weight_tensor_parameter_124: + name = "parameter_124" + shape = [256] + dtype = "float32" + min_val = float("0.11773") + max_val = float("0.375442") + mean = float("0.186314") + std = float("0.0296775") + data = None + + +class Program_weight_tensor_parameter_125: + name = "parameter_125" + shape = [256] + dtype = "float32" + min_val = float("0.0034165") + max_val = float("0.0334953") + mean = float("0.00764904") + std = float("0.00343798") + data = None + + +class Program_weight_tensor_parameter_126: + name = "parameter_126" + shape = [256] + dtype = "float32" + min_val = float("-0.17006") + max_val = float("0.139317") + mean = float("-0.0394256") + std = float("0.0482774") + data = None + + +class Program_weight_tensor_parameter_127: + name = "parameter_127" + shape = [256, 256, 3, 3] + dtype = "float32" + min_val = float("-0.237546") + max_val = float("0.185067") + mean = float("-0.000605371") + std = float("0.0118746") + data = None + + +class Program_weight_tensor_parameter_128: + name = "parameter_128" + shape = [256] + dtype = "float32" + min_val = float("-0.311719") + max_val = float("0.1238") + mean = float("-0.11228") + std = float("0.0715737") + data = None + + +class Program_weight_tensor_parameter_129: + name = "parameter_129" + shape = [256] + dtype = "float32" + min_val = float("0.0999653") + max_val = float("0.257498") + mean = float("0.168535") + std = float("0.027973") + data = None + + +class Program_weight_tensor_parameter_130: + name = "parameter_130" + shape = [256] + dtype = "float32" + min_val = float("0.00541324") + max_val = float("0.0401863") + mean = float("0.00971417") + std = float("0.00366722") + data = None + + +class Program_weight_tensor_parameter_131: + name = "parameter_131" + shape = [256] + dtype = "float32" + min_val = float("-0.217744") + max_val = float("0.149159") + mean = float("-0.0379885") + std = float("0.0499749") + data = None + + +class Program_weight_tensor_parameter_132: + name = "parameter_132" + shape = [256, 1024, 1, 1] + dtype = "float32" + min_val = float("-0.232663") + max_val = float("0.338982") + mean = float("-0.000898047") + std = float("0.0154688") + data = None + + +class Program_weight_tensor_parameter_133: + name = "parameter_133" + shape = [1024] + dtype = "float32" + min_val = float("-0.257451") + max_val = float("0.105004") + mean = float("-0.0666821") + std = float("0.0487657") + data = None + + +class Program_weight_tensor_parameter_134: + name = "parameter_134" + shape = [1024] + dtype = "float32" + min_val = float("-0.0934567") + max_val = float("0.23334") + mean = float("0.0748934") + std = float("0.0447769") + data = None + + +class Program_weight_tensor_parameter_135: + name = "parameter_135" + shape = [1024] + dtype = "float32" + min_val = float("3.37401e-05") + max_val = float("0.00494006") + mean = float("0.000767935") + std = float("0.000555318") + data = None + + +class Program_weight_tensor_parameter_136: + name = "parameter_136" + shape = [1024] + dtype = "float32" + min_val = float("-0.0803722") + max_val = float("0.095245") + mean = float("-0.0123875") + std = float("0.0179512") + data = None + + +class Program_weight_tensor_parameter_137: + name = "parameter_137" + shape = [1024, 256, 1, 1] + dtype = "float32" + min_val = float("-0.227153") + max_val = float("0.323561") + mean = float("-0.00118101") + std = float("0.0144307") + data = None + + +class Program_weight_tensor_parameter_138: + name = "parameter_138" + shape = [256] + dtype = "float32" + min_val = float("-0.230738") + max_val = float("0.139429") + mean = float("-0.0745415") + std = float("0.0720042") + data = None + + +class Program_weight_tensor_parameter_139: + name = "parameter_139" + shape = [256] + dtype = "float32" + min_val = float("0.0986424") + max_val = float("0.352413") + mean = float("0.179668") + std = float("0.0297447") + data = None + + +class Program_weight_tensor_parameter_140: + name = "parameter_140" + shape = [256] + dtype = "float32" + min_val = float("0.00330071") + max_val = float("0.0279471") + mean = float("0.00664013") + std = float("0.00278299") + data = None + + +class Program_weight_tensor_parameter_141: + name = "parameter_141" + shape = [256] + dtype = "float32" + min_val = float("-0.162212") + max_val = float("0.194153") + mean = float("-0.0413669") + std = float("0.0513468") + data = None + + +class Program_weight_tensor_parameter_142: + name = "parameter_142" + shape = [256, 256, 3, 3] + dtype = "float32" + min_val = float("-0.151859") + max_val = float("0.21159") + mean = float("-0.000677608") + std = float("0.0117836") + data = None + + +class Program_weight_tensor_parameter_143: + name = "parameter_143" + shape = [256] + dtype = "float32" + min_val = float("-0.30878") + max_val = float("0.117998") + mean = float("-0.0978964") + std = float("0.0730731") + data = None + + +class Program_weight_tensor_parameter_144: + name = "parameter_144" + shape = [256] + dtype = "float32" + min_val = float("0.091752") + max_val = float("0.259828") + mean = float("0.161612") + std = float("0.0292353") + data = None + + +class Program_weight_tensor_parameter_145: + name = "parameter_145" + shape = [256] + dtype = "float32" + min_val = float("0.00507588") + max_val = float("0.0262703") + mean = float("0.0109557") + std = float("0.00324557") + data = None + + +class Program_weight_tensor_parameter_146: + name = "parameter_146" + shape = [256] + dtype = "float32" + min_val = float("-0.253355") + max_val = float("0.121652") + mean = float("-0.0542848") + std = float("0.0542532") + data = None + + +class Program_weight_tensor_parameter_147: + name = "parameter_147" + shape = [256, 1024, 1, 1] + dtype = "float32" + min_val = float("-0.181888") + max_val = float("0.356815") + mean = float("-0.00108238") + std = float("0.0154556") + data = None + + +class Program_weight_tensor_parameter_148: + name = "parameter_148" + shape = [1024] + dtype = "float32" + min_val = float("-0.277434") + max_val = float("0.146613") + mean = float("-0.0570223") + std = float("0.0518947") + data = None + + +class Program_weight_tensor_parameter_149: + name = "parameter_149" + shape = [1024] + dtype = "float32" + min_val = float("-0.0669331") + max_val = float("0.263999") + mean = float("0.0731652") + std = float("0.054519") + data = None + + +class Program_weight_tensor_parameter_150: + name = "parameter_150" + shape = [1024] + dtype = "float32" + min_val = float("3.75198e-05") + max_val = float("0.00687895") + mean = float("0.000898002") + std = float("0.000696388") + data = None + + +class Program_weight_tensor_parameter_151: + name = "parameter_151" + shape = [1024] + dtype = "float32" + min_val = float("-0.0982832") + max_val = float("0.0788582") + mean = float("-0.00935298") + std = float("0.0214394") + data = None + + +class Program_weight_tensor_parameter_152: + name = "parameter_152" + shape = [1024, 256, 1, 1] + dtype = "float32" + min_val = float("-0.216702") + max_val = float("0.412622") + mean = float("-0.00100657") + std = float("0.0154782") + data = None + + +class Program_weight_tensor_parameter_153: + name = "parameter_153" + shape = [256] + dtype = "float32" + min_val = float("-0.296816") + max_val = float("0.203979") + mean = float("-0.0590104") + std = float("0.0837132") + data = None + + +class Program_weight_tensor_parameter_154: + name = "parameter_154" + shape = [256] + dtype = "float32" + min_val = float("0.107435") + max_val = float("0.39755") + mean = float("0.178744") + std = float("0.0314985") + data = None + + +class Program_weight_tensor_parameter_155: + name = "parameter_155" + shape = [256] + dtype = "float32" + min_val = float("0.00385646") + max_val = float("0.0233426") + mean = float("0.00855859") + std = float("0.00309764") + data = None + + +class Program_weight_tensor_parameter_156: + name = "parameter_156" + shape = [256] + dtype = "float32" + min_val = float("-0.169503") + max_val = float("0.258877") + mean = float("-0.0370232") + std = float("0.0546523") + data = None + + +class Program_weight_tensor_parameter_157: + name = "parameter_157" + shape = [256, 256, 3, 3] + dtype = "float32" + min_val = float("-0.156512") + max_val = float("0.222824") + mean = float("-0.000659499") + std = float("0.0126225") + data = None + + +class Program_weight_tensor_parameter_158: + name = "parameter_158" + shape = [256] + dtype = "float32" + min_val = float("-0.213348") + max_val = float("0.207989") + mean = float("-0.0730882") + std = float("0.0598612") + data = None + + +class Program_weight_tensor_parameter_159: + name = "parameter_159" + shape = [256] + dtype = "float32" + min_val = float("0.0886164") + max_val = float("0.234968") + mean = float("0.155301") + std = float("0.022324") + data = None + + +class Program_weight_tensor_parameter_160: + name = "parameter_160" + shape = [256] + dtype = "float32" + min_val = float("0.00512839") + max_val = float("0.0486237") + mean = float("0.0113862") + std = float("0.0037092") + data = None + + +class Program_weight_tensor_parameter_161: + name = "parameter_161" + shape = [256] + dtype = "float32" + min_val = float("-0.186114") + max_val = float("0.104412") + mean = float("-0.0347102") + std = float("0.0492274") + data = None + + +class Program_weight_tensor_parameter_162: + name = "parameter_162" + shape = [256, 1024, 1, 1] + dtype = "float32" + min_val = float("-0.203743") + max_val = float("0.33071") + mean = float("-0.000615238") + std = float("0.0148674") + data = None + + +class Program_weight_tensor_parameter_163: + name = "parameter_163" + shape = [1024] + dtype = "float32" + min_val = float("-0.250531") + max_val = float("0.212052") + mean = float("-0.0534412") + std = float("0.0507326") + data = None + + +class Program_weight_tensor_parameter_164: + name = "parameter_164" + shape = [1024] + dtype = "float32" + min_val = float("-0.0906585") + max_val = float("0.259754") + mean = float("0.0702987") + std = float("0.0527645") + data = None + + +class Program_weight_tensor_parameter_165: + name = "parameter_165" + shape = [1024] + dtype = "float32" + min_val = float("5.60519e-45") + max_val = float("0.00907432") + mean = float("0.000964403") + std = float("0.000856945") + data = None + + +class Program_weight_tensor_parameter_166: + name = "parameter_166" + shape = [1024] + dtype = "float32" + min_val = float("-0.167247") + max_val = float("0.0922496") + mean = float("-0.0113321") + std = float("0.0239985") + data = None + + +class Program_weight_tensor_parameter_167: + name = "parameter_167" + shape = [1024, 256, 1, 1] + dtype = "float32" + min_val = float("-0.322799") + max_val = float("0.358177") + mean = float("-0.0012953") + std = float("0.0149063") + data = None + + +class Program_weight_tensor_parameter_168: + name = "parameter_168" + shape = [256] + dtype = "float32" + min_val = float("-0.35206") + max_val = float("0.241553") + mean = float("-0.0517875") + std = float("0.0689942") + data = None + + +class Program_weight_tensor_parameter_169: + name = "parameter_169" + shape = [256] + dtype = "float32" + min_val = float("0.109761") + max_val = float("0.390957") + mean = float("0.176878") + std = float("0.0288588") + data = None + + +class Program_weight_tensor_parameter_170: + name = "parameter_170" + shape = [256] + dtype = "float32" + min_val = float("0.00447337") + max_val = float("0.0340642") + mean = float("0.0105546") + std = float("0.00364562") + data = None + + +class Program_weight_tensor_parameter_171: + name = "parameter_171" + shape = [256] + dtype = "float32" + min_val = float("-0.27309") + max_val = float("0.374102") + mean = float("-0.0352638") + std = float("0.0589054") + data = None + + +class Program_weight_tensor_parameter_172: + name = "parameter_172" + shape = [256, 256, 3, 3] + dtype = "float32" + min_val = float("-0.1432") + max_val = float("0.207235") + mean = float("-0.00047799") + std = float("0.0117461") + data = None + + +class Program_weight_tensor_parameter_173: + name = "parameter_173" + shape = [256] + dtype = "float32" + min_val = float("-0.244607") + max_val = float("0.0912815") + mean = float("-0.0498462") + std = float("0.0625104") + data = None + + +class Program_weight_tensor_parameter_174: + name = "parameter_174" + shape = [256] + dtype = "float32" + min_val = float("0.0940088") + max_val = float("0.275561") + mean = float("0.150497") + std = float("0.0251361") + data = None + + +class Program_weight_tensor_parameter_175: + name = "parameter_175" + shape = [256] + dtype = "float32" + min_val = float("0.00569095") + max_val = float("0.0330212") + mean = float("0.0130381") + std = float("0.00433576") + data = None + + +class Program_weight_tensor_parameter_176: + name = "parameter_176" + shape = [256] + dtype = "float32" + min_val = float("-0.216939") + max_val = float("0.154978") + mean = float("-0.0231354") + std = float("0.0541784") + data = None + + +class Program_weight_tensor_parameter_177: + name = "parameter_177" + shape = [256, 1024, 1, 1] + dtype = "float32" + min_val = float("-0.194384") + max_val = float("0.323798") + mean = float("-0.000344646") + std = float("0.0136932") + data = None + + +class Program_weight_tensor_parameter_178: + name = "parameter_178" + shape = [1024] + dtype = "float32" + min_val = float("-0.28682") + max_val = float("0.140438") + mean = float("-0.0416996") + std = float("0.0479403") + data = None + + +class Program_weight_tensor_parameter_179: + name = "parameter_179" + shape = [1024] + dtype = "float32" + min_val = float("-0.095438") + max_val = float("0.4389") + mean = float("0.0744024") + std = float("0.0544762") + data = None + + +class Program_weight_tensor_parameter_180: + name = "parameter_180" + shape = [1024] + dtype = "float32" + min_val = float("5.60519e-45") + max_val = float("0.00716343") + mean = float("0.00103639") + std = float("0.000898989") + data = None + + +class Program_weight_tensor_parameter_181: + name = "parameter_181" + shape = [1024] + dtype = "float32" + min_val = float("-0.115913") + max_val = float("0.0782403") + mean = float("-0.00319781") + std = float("0.0211123") + data = None + + +class Program_weight_tensor_parameter_182: + name = "parameter_182" + shape = [1024, 256, 1, 1] + dtype = "float32" + min_val = float("-0.237178") + max_val = float("0.419401") + mean = float("-0.000909865") + std = float("0.0156152") + data = None + + +class Program_weight_tensor_parameter_183: + name = "parameter_183" + shape = [256] + dtype = "float32" + min_val = float("-0.375338") + max_val = float("0.272544") + mean = float("-0.0647334") + std = float("0.0809183") + data = None + + +class Program_weight_tensor_parameter_184: + name = "parameter_184" + shape = [256] + dtype = "float32" + min_val = float("0.112331") + max_val = float("0.387223") + mean = float("0.184195") + std = float("0.0296639") + data = None + + +class Program_weight_tensor_parameter_185: + name = "parameter_185" + shape = [256] + dtype = "float32" + min_val = float("0.00590442") + max_val = float("0.0544441") + mean = float("0.0133796") + std = float("0.00533314") + data = None + + +class Program_weight_tensor_parameter_186: + name = "parameter_186" + shape = [256] + dtype = "float32" + min_val = float("-0.703403") + max_val = float("0.631713") + mean = float("-0.0169264") + std = float("0.108926") + data = None + + +class Program_weight_tensor_parameter_187: + name = "parameter_187" + shape = [256, 256, 3, 3] + dtype = "float32" + min_val = float("-0.278966") + max_val = float("0.22964") + mean = float("-0.000409405") + std = float("0.0122664") + data = None + + +class Program_weight_tensor_parameter_188: + name = "parameter_188" + shape = [256] + dtype = "float32" + min_val = float("-0.206504") + max_val = float("0.339113") + mean = float("-0.0332304") + std = float("0.072116") + data = None + + +class Program_weight_tensor_parameter_189: + name = "parameter_189" + shape = [256] + dtype = "float32" + min_val = float("0.0505531") + max_val = float("0.226288") + mean = float("0.138144") + std = float("0.0261678") + data = None + + +class Program_weight_tensor_parameter_190: + name = "parameter_190" + shape = [256] + dtype = "float32" + min_val = float("0.00534171") + max_val = float("0.0374661") + mean = float("0.0144267") + std = float("0.00632604") + data = None + + +class Program_weight_tensor_parameter_191: + name = "parameter_191" + shape = [256] + dtype = "float32" + min_val = float("-0.874778") + max_val = float("0.271624") + mean = float("-0.0408335") + std = float("0.0907325") + data = None + + +class Program_weight_tensor_parameter_192: + name = "parameter_192" + shape = [256, 1024, 1, 1] + dtype = "float32" + min_val = float("-0.147392") + max_val = float("0.234831") + mean = float("-0.000675859") + std = float("0.0122654") + data = None + + +class Program_weight_tensor_parameter_193: + name = "parameter_193" + shape = [1024] + dtype = "float32" + min_val = float("-0.129305") + max_val = float("0.139271") + mean = float("-0.0098929") + std = float("0.0358316") + data = None + + +class Program_weight_tensor_parameter_194: + name = "parameter_194" + shape = [1024] + dtype = "float32" + min_val = float("-0.0782405") + max_val = float("0.297465") + mean = float("0.0887483") + std = float("0.058831") + data = None + + +class Program_weight_tensor_parameter_195: + name = "parameter_195" + shape = [1024] + dtype = "float32" + min_val = float("5.60519e-45") + max_val = float("0.0293973") + mean = float("0.00517825") + std = float("0.00449503") + data = None + + +class Program_weight_tensor_parameter_196: + name = "parameter_196" + shape = [1024] + dtype = "float32" + min_val = float("-0.212177") + max_val = float("0.268765") + mean = float("0.00619636") + std = float("0.049949") + data = None + + +class Program_weight_tensor_parameter_197: + name = "parameter_197" + shape = [1024, 512, 1, 1] + dtype = "float32" + min_val = float("-0.294154") + max_val = float("0.294806") + mean = float("0.000207696") + std = float("0.0120758") + data = None + + +class Program_weight_tensor_parameter_198: + name = "parameter_198" + shape = [1024] + dtype = "float32" + min_val = float("-0.129305") + max_val = float("0.139271") + mean = float("-0.0098929") + std = float("0.0358316") + data = None + + +class Program_weight_tensor_parameter_199: + name = "parameter_199" + shape = [1024] + dtype = "float32" + min_val = float("-0.112497") + max_val = float("0.306851") + mean = float("0.115037") + std = float("0.0726397") + data = None + + +class Program_weight_tensor_parameter_200: + name = "parameter_200" + shape = [1024] + dtype = "float32" + min_val = float("5.60519e-45") + max_val = float("0.0103119") + mean = float("0.00298727") + std = float("0.00200745") + data = None + + +class Program_weight_tensor_parameter_201: + name = "parameter_201" + shape = [1024] + dtype = "float32" + min_val = float("-0.274559") + max_val = float("0.262423") + mean = float("-0.00688684") + std = float("0.0526932") + data = None + + +class Program_weight_tensor_parameter_202: + name = "parameter_202" + shape = [1024, 256, 1, 1] + dtype = "float32" + min_val = float("-0.270378") + max_val = float("0.320951") + mean = float("-0.000243848") + std = float("0.0189913") + data = None + + +class Program_weight_tensor_parameter_203: + name = "parameter_203" + shape = [256] + dtype = "float32" + min_val = float("-0.169522") + max_val = float("0.24108") + mean = float("0.0337387") + std = float("0.0893673") + data = None + + +class Program_weight_tensor_parameter_204: + name = "parameter_204" + shape = [256] + dtype = "float32" + min_val = float("0.134911") + max_val = float("0.280871") + mean = float("0.182639") + std = float("0.0244767") + data = None + + +class Program_weight_tensor_parameter_205: + name = "parameter_205" + shape = [256] + dtype = "float32" + min_val = float("0.00830091") + max_val = float("0.0599813") + mean = float("0.0218677") + std = float("0.00980468") + data = None + + +class Program_weight_tensor_parameter_206: + name = "parameter_206" + shape = [256] + dtype = "float32" + min_val = float("-0.334949") + max_val = float("0.127422") + mean = float("-0.0167348") + std = float("0.0636368") + data = None + + +class Program_weight_tensor_parameter_207: + name = "parameter_207" + shape = [256, 256, 3, 3] + dtype = "float32" + min_val = float("-0.161937") + max_val = float("0.184429") + mean = float("-0.000448081") + std = float("0.0133147") + data = None + + +class Program_weight_tensor_parameter_208: + name = "parameter_208" + shape = [256] + dtype = "float32" + min_val = float("-0.37354") + max_val = float("0.130682") + mean = float("-0.122896") + std = float("0.0737988") + data = None + + +class Program_weight_tensor_parameter_209: + name = "parameter_209" + shape = [256] + dtype = "float32" + min_val = float("0.131707") + max_val = float("0.306188") + mean = float("0.218482") + std = float("0.029329") + data = None + + +class Program_weight_tensor_parameter_210: + name = "parameter_210" + shape = [256] + dtype = "float32" + min_val = float("0.00841896") + max_val = float("0.0413624") + mean = float("0.0191254") + std = float("0.00583118") + data = None + + +class Program_weight_tensor_parameter_211: + name = "parameter_211" + shape = [256] + dtype = "float32" + min_val = float("-0.320503") + max_val = float("0.329766") + mean = float("-0.0425911") + std = float("0.0888525") + data = None + + +class Program_weight_tensor_parameter_212: + name = "parameter_212" + shape = [256, 512, 1, 1] + dtype = "float32" + min_val = float("-0.255573") + max_val = float("0.340504") + mean = float("-0.00169811") + std = float("0.0263455") + data = None + + +class Program_weight_tensor_parameter_213: + name = "parameter_213" + shape = [512] + dtype = "float32" + min_val = float("-0.265757") + max_val = float("0.18098") + mean = float("-0.0551155") + std = float("0.0710863") + data = None + + +class Program_weight_tensor_parameter_214: + name = "parameter_214" + shape = [512] + dtype = "float32" + min_val = float("-0.118525") + max_val = float("0.265331") + mean = float("0.0605188") + std = float("0.0747383") + data = None + + +class Program_weight_tensor_parameter_215: + name = "parameter_215" + shape = [512] + dtype = "float32" + min_val = float("6.90463e-05") + max_val = float("0.00491621") + mean = float("0.000851993") + std = float("0.000814133") + data = None + + +class Program_weight_tensor_parameter_216: + name = "parameter_216" + shape = [512] + dtype = "float32" + min_val = float("-0.118861") + max_val = float("0.0793485") + mean = float("-0.00135119") + std = float("0.0222945") + data = None + + +class Program_weight_tensor_parameter_217: + name = "parameter_217" + shape = [512, 128, 1, 1] + dtype = "float32" + min_val = float("-0.176336") + max_val = float("0.31954") + mean = float("-0.00118975") + std = float("0.0219006") + data = None + + +class Program_weight_tensor_parameter_218: + name = "parameter_218" + shape = [128] + dtype = "float32" + min_val = float("-0.232824") + max_val = float("0.252755") + mean = float("-0.0608347") + std = float("0.0935444") + data = None + + +class Program_weight_tensor_parameter_219: + name = "parameter_219" + shape = [128] + dtype = "float32" + min_val = float("0.118575") + max_val = float("0.350103") + mean = float("0.195164") + std = float("0.0318741") + data = None + + +class Program_weight_tensor_parameter_220: + name = "parameter_220" + shape = [128] + dtype = "float32" + min_val = float("0.00369018") + max_val = float("0.0308134") + mean = float("0.011865") + std = float("0.00405604") + data = None + + +class Program_weight_tensor_parameter_221: + name = "parameter_221" + shape = [128] + dtype = "float32" + min_val = float("-0.258314") + max_val = float("0.31528") + mean = float("-0.0349642") + std = float("0.0634947") + data = None + + +class Program_weight_tensor_parameter_222: + name = "parameter_222" + shape = [128, 128, 3, 3] + dtype = "float32" + min_val = float("-0.147398") + max_val = float("0.174491") + mean = float("-0.000978171") + std = float("0.019047") + data = None + + +class Program_weight_tensor_parameter_223: + name = "parameter_223" + shape = [128] + dtype = "float32" + min_val = float("-0.217196") + max_val = float("0.15299") + mean = float("-0.0481659") + std = float("0.0730298") + data = None + + +class Program_weight_tensor_parameter_224: + name = "parameter_224" + shape = [128] + dtype = "float32" + min_val = float("0.0919349") + max_val = float("0.226493") + mean = float("0.166581") + std = float("0.0236465") + data = None + + +class Program_weight_tensor_parameter_225: + name = "parameter_225" + shape = [128] + dtype = "float32" + min_val = float("0.00814274") + max_val = float("0.031877") + mean = float("0.0148642") + std = float("0.00446453") + data = None + + +class Program_weight_tensor_parameter_226: + name = "parameter_226" + shape = [128] + dtype = "float32" + min_val = float("-0.257526") + max_val = float("0.20432") + mean = float("-0.0229181") + std = float("0.0807673") + data = None + + +class Program_weight_tensor_parameter_227: + name = "parameter_227" + shape = [128, 512, 1, 1] + dtype = "float32" + min_val = float("-0.170314") + max_val = float("0.232509") + mean = float("-0.00100615") + std = float("0.0222141") + data = None + + +class Program_weight_tensor_parameter_228: + name = "parameter_228" + shape = [512] + dtype = "float32" + min_val = float("-0.237422") + max_val = float("0.21759") + mean = float("-0.0445364") + std = float("0.0633277") + data = None + + +class Program_weight_tensor_parameter_229: + name = "parameter_229" + shape = [512] + dtype = "float32" + min_val = float("-0.139337") + max_val = float("0.250169") + mean = float("0.0669814") + std = float("0.0755184") + data = None + + +class Program_weight_tensor_parameter_230: + name = "parameter_230" + shape = [512] + dtype = "float32" + min_val = float("5.60519e-45") + max_val = float("0.00559176") + mean = float("0.00104555") + std = float("0.00109474") + data = None + + +class Program_weight_tensor_parameter_231: + name = "parameter_231" + shape = [512] + dtype = "float32" + min_val = float("-0.117879") + max_val = float("0.141154") + mean = float("0.00137843") + std = float("0.0242038") + data = None + + +class Program_weight_tensor_parameter_232: + name = "parameter_232" + shape = [512, 128, 1, 1] + dtype = "float32" + min_val = float("-0.230988") + max_val = float("0.282028") + mean = float("-0.000251065") + std = float("0.0217553") + data = None + + +class Program_weight_tensor_parameter_233: + name = "parameter_233" + shape = [128] + dtype = "float32" + min_val = float("-0.242849") + max_val = float("0.273584") + mean = float("-0.0109895") + std = float("0.0940812") + data = None + + +class Program_weight_tensor_parameter_234: + name = "parameter_234" + shape = [128] + dtype = "float32" + min_val = float("0.105254") + max_val = float("0.344767") + mean = float("0.174253") + std = float("0.0324788") + data = None + + +class Program_weight_tensor_parameter_235: + name = "parameter_235" + shape = [128] + dtype = "float32" + min_val = float("0.00364882") + max_val = float("0.0310589") + mean = float("0.013433") + std = float("0.00481079") + data = None + + +class Program_weight_tensor_parameter_236: + name = "parameter_236" + shape = [128] + dtype = "float32" + min_val = float("-0.254192") + max_val = float("0.221824") + mean = float("-0.0185789") + std = float("0.0744171") + data = None + + +class Program_weight_tensor_parameter_237: + name = "parameter_237" + shape = [128, 128, 3, 3] + dtype = "float32" + min_val = float("-0.171305") + max_val = float("0.172678") + mean = float("-0.000500686") + std = float("0.0174442") + data = None + + +class Program_weight_tensor_parameter_238: + name = "parameter_238" + shape = [128] + dtype = "float32" + min_val = float("-0.193408") + max_val = float("0.168849") + mean = float("-0.0177396") + std = float("0.074171") + data = None + + +class Program_weight_tensor_parameter_239: + name = "parameter_239" + shape = [128] + dtype = "float32" + min_val = float("0.105105") + max_val = float("0.242827") + mean = float("0.157451") + std = float("0.0271128") + data = None + + +class Program_weight_tensor_parameter_240: + name = "parameter_240" + shape = [128] + dtype = "float32" + min_val = float("0.00566029") + max_val = float("0.05857") + mean = float("0.0163651") + std = float("0.0072262") + data = None + + +class Program_weight_tensor_parameter_241: + name = "parameter_241" + shape = [128] + dtype = "float32" + min_val = float("-0.218906") + max_val = float("0.380567") + mean = float("-0.0294434") + std = float("0.0838679") + data = None + + +class Program_weight_tensor_parameter_242: + name = "parameter_242" + shape = [128, 512, 1, 1] + dtype = "float32" + min_val = float("-0.159404") + max_val = float("0.218293") + mean = float("-0.00076861") + std = float("0.0201355") + data = None + + +class Program_weight_tensor_parameter_243: + name = "parameter_243" + shape = [512] + dtype = "float32" + min_val = float("-0.24628") + max_val = float("0.217708") + mean = float("-0.0311961") + std = float("0.0633679") + data = None + + +class Program_weight_tensor_parameter_244: + name = "parameter_244" + shape = [512] + dtype = "float32" + min_val = float("-0.192908") + max_val = float("0.291095") + mean = float("0.055776") + std = float("0.0953223") + data = None + + +class Program_weight_tensor_parameter_245: + name = "parameter_245" + shape = [512] + dtype = "float32" + min_val = float("5.60519e-45") + max_val = float("0.00790918") + mean = float("0.00114117") + std = float("0.00170112") + data = None + + +class Program_weight_tensor_parameter_246: + name = "parameter_246" + shape = [512] + dtype = "float32" + min_val = float("-0.127209") + max_val = float("0.131354") + mean = float("-0.00697471") + std = float("0.0238302") + data = None + + +class Program_weight_tensor_parameter_247: + name = "parameter_247" + shape = [512, 128, 1, 1] + dtype = "float32" + min_val = float("-0.322991") + max_val = float("0.314657") + mean = float("-0.000762637") + std = float("0.0215696") + data = None + + +class Program_weight_tensor_parameter_248: + name = "parameter_248" + shape = [128] + dtype = "float32" + min_val = float("-0.361119") + max_val = float("0.229816") + mean = float("-0.000110899") + std = float("0.0886123") + data = None + + +class Program_weight_tensor_parameter_249: + name = "parameter_249" + shape = [128] + dtype = "float32" + min_val = float("0.108785") + max_val = float("0.344712") + mean = float("0.163661") + std = float("0.0268447") + data = None + + +class Program_weight_tensor_parameter_250: + name = "parameter_250" + shape = [128] + dtype = "float32" + min_val = float("0.0020564") + max_val = float("0.0629993") + mean = float("0.0150465") + std = float("0.0100431") + data = None + + +class Program_weight_tensor_parameter_251: + name = "parameter_251" + shape = [128] + dtype = "float32" + min_val = float("-0.881857") + max_val = float("0.354848") + mean = float("0.0254079") + std = float("0.153673") + data = None + + +class Program_weight_tensor_parameter_252: + name = "parameter_252" + shape = [128, 128, 3, 3] + dtype = "float32" + min_val = float("-0.243431") + max_val = float("0.33055") + mean = float("0.000154301") + std = float("0.0166187") + data = None + + +class Program_weight_tensor_parameter_253: + name = "parameter_253" + shape = [128] + dtype = "float32" + min_val = float("-0.338885") + max_val = float("0.360894") + mean = float("0.00728028") + std = float("0.101298") + data = None + + +class Program_weight_tensor_parameter_254: + name = "parameter_254" + shape = [128] + dtype = "float32" + min_val = float("0.0665754") + max_val = float("0.233636") + mean = float("0.134798") + std = float("0.0315315") + data = None + + +class Program_weight_tensor_parameter_255: + name = "parameter_255" + shape = [128] + dtype = "float32" + min_val = float("0.00275181") + max_val = float("0.177524") + mean = float("0.0213047") + std = float("0.0241815") + data = None + + +class Program_weight_tensor_parameter_256: + name = "parameter_256" + shape = [128] + dtype = "float32" + min_val = float("-0.271882") + max_val = float("0.364487") + mean = float("-0.00787709") + std = float("0.0941561") + data = None + + +class Program_weight_tensor_parameter_257: + name = "parameter_257" + shape = [128, 512, 1, 1] + dtype = "float32" + min_val = float("-0.195609") + max_val = float("0.254539") + mean = float("-0.000536262") + std = float("0.0179727") + data = None + + +class Program_weight_tensor_parameter_258: + name = "parameter_258" + shape = [512] + dtype = "float32" + min_val = float("-0.173999") + max_val = float("0.178778") + mean = float("0.0104332") + std = float("0.0450756") + data = None + + +class Program_weight_tensor_parameter_259: + name = "parameter_259" + shape = [512] + dtype = "float32" + min_val = float("-0.0847974") + max_val = float("0.347141") + mean = float("0.104401") + std = float("0.0950331") + data = None + + +class Program_weight_tensor_parameter_260: + name = "parameter_260" + shape = [512] + dtype = "float32" + min_val = float("5.60519e-45") + max_val = float("0.0600736") + mean = float("0.00907374") + std = float("0.0107161") + data = None + + +class Program_weight_tensor_parameter_261: + name = "parameter_261" + shape = [512] + dtype = "float32" + min_val = float("-0.532972") + max_val = float("0.349244") + mean = float("-0.0147123") + std = float("0.07433") + data = None + + +class Program_weight_tensor_parameter_262: + name = "parameter_262" + shape = [512, 256, 1, 1] + dtype = "float32" + min_val = float("-0.41124") + max_val = float("0.342961") + mean = float("0.000119026") + std = float("0.0220948") + data = None + + +class Program_weight_tensor_parameter_263: + name = "parameter_263" + shape = [512] + dtype = "float32" + min_val = float("-0.173999") + max_val = float("0.178778") + mean = float("0.0104332") + std = float("0.0450756") + data = None + + +class Program_weight_tensor_parameter_264: + name = "parameter_264" + shape = [512] + dtype = "float32" + min_val = float("-0.110676") + max_val = float("0.360044") + mean = float("0.0747975") + std = float("0.0909131") + data = None + + +class Program_weight_tensor_parameter_265: + name = "parameter_265" + shape = [512] + dtype = "float32" + min_val = float("5.60519e-45") + max_val = float("0.0224734") + mean = float("0.00195282") + std = float("0.00230705") + data = None + + +class Program_weight_tensor_parameter_266: + name = "parameter_266" + shape = [512] + dtype = "float32" + min_val = float("-0.415464") + max_val = float("0.301824") + mean = float("-0.00186618") + std = float("0.0519181") + data = None + + +class Program_weight_tensor_parameter_267: + name = "parameter_267" + shape = [512, 128, 1, 1] + dtype = "float32" + min_val = float("-0.307118") + max_val = float("0.362652") + mean = float("-0.000480775") + std = float("0.0255374") + data = None + + +class Program_weight_tensor_parameter_268: + name = "parameter_268" + shape = [128] + dtype = "float32" + min_val = float("-0.0749191") + max_val = float("0.383717") + mean = float("0.00992787") + std = float("0.0893128") + data = None + + +class Program_weight_tensor_parameter_269: + name = "parameter_269" + shape = [128] + dtype = "float32" + min_val = float("0.151982") + max_val = float("0.258211") + mean = float("0.208953") + std = float("0.0230586") + data = None + + +class Program_weight_tensor_parameter_270: + name = "parameter_270" + shape = [128] + dtype = "float32" + min_val = float("0.011571") + max_val = float("0.0270516") + mean = float("0.0174302") + std = float("0.00377445") + data = None + + +class Program_weight_tensor_parameter_271: + name = "parameter_271" + shape = [128] + dtype = "float32" + min_val = float("-0.199453") + max_val = float("0.225939") + mean = float("-0.0293134") + std = float("0.052256") + data = None + + +class Program_weight_tensor_parameter_272: + name = "parameter_272" + shape = [128, 128, 3, 3] + dtype = "float32" + min_val = float("-0.15259") + max_val = float("0.176445") + mean = float("-0.000476337") + std = float("0.0196661") + data = None + + +class Program_weight_tensor_parameter_273: + name = "parameter_273" + shape = [128] + dtype = "float32" + min_val = float("-0.254357") + max_val = float("0.105134") + mean = float("-0.059691") + std = float("0.0926679") + data = None + + +class Program_weight_tensor_parameter_274: + name = "parameter_274" + shape = [128] + dtype = "float32" + min_val = float("7.24973e-19") + max_val = float("0.248465") + mean = float("0.173546") + std = float("0.0393691") + data = None + + +class Program_weight_tensor_parameter_275: + name = "parameter_275" + shape = [128] + dtype = "float32" + min_val = float("5.60519e-45") + max_val = float("0.0475623") + mean = float("0.0181743") + std = float("0.00711628") + data = None + + +class Program_weight_tensor_parameter_276: + name = "parameter_276" + shape = [128] + dtype = "float32" + min_val = float("-0.268525") + max_val = float("0.132098") + mean = float("-0.0574633") + std = float("0.089508") + data = None + + +class Program_weight_tensor_parameter_277: + name = "parameter_277" + shape = [128, 256, 1, 1] + dtype = "float32" + min_val = float("-0.224228") + max_val = float("0.290006") + mean = float("-0.00100814") + std = float("0.0369021") + data = None + + +class Program_weight_tensor_parameter_278: + name = "parameter_278" + shape = [256] + dtype = "float32" + min_val = float("-0.164026") + max_val = float("0.206317") + mean = float("-0.00531759") + std = float("0.0546818") + data = None + + +class Program_weight_tensor_parameter_279: + name = "parameter_279" + shape = [256] + dtype = "float32" + min_val = float("-0.255219") + max_val = float("0.302547") + mean = float("0.0429001") + std = float("0.108481") + data = None + + +class Program_weight_tensor_parameter_280: + name = "parameter_280" + shape = [256] + dtype = "float32" + min_val = float("7.12898e-05") + max_val = float("0.0083103") + mean = float("0.00137519") + std = float("0.00174094") + data = None + + +class Program_weight_tensor_parameter_281: + name = "parameter_281" + shape = [256] + dtype = "float32" + min_val = float("-0.133956") + max_val = float("0.182093") + mean = float("1.89526e-05") + std = float("0.0382032") + data = None + + +class Program_weight_tensor_parameter_282: + name = "parameter_282" + shape = [256, 64, 1, 1] + dtype = "float32" + min_val = float("-0.325233") + max_val = float("0.371292") + mean = float("-0.000368685") + std = float("0.0323619") + data = None + + +class Program_weight_tensor_parameter_283: + name = "parameter_283" + shape = [64] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_284: + name = "parameter_284" + shape = [64] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_285: + name = "parameter_285" + shape = [64] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_286: + name = "parameter_286" + shape = [64] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_287: + name = "parameter_287" + shape = [64, 64, 3, 3] + dtype = "float32" + min_val = float("-0.259976") + max_val = float("0.260293") + mean = float("-0.000605252") + std = float("0.0317955") + data = None + + +class Program_weight_tensor_parameter_288: + name = "parameter_288" + shape = [64] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_289: + name = "parameter_289" + shape = [64] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_290: + name = "parameter_290" + shape = [64] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_291: + name = "parameter_291" + shape = [64] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_292: + name = "parameter_292" + shape = [64, 256, 1, 1] + dtype = "float32" + min_val = float("-0.188924") + max_val = float("0.2369") + mean = float("-0.000203353") + std = float("0.0317779") + data = None + + +class Program_weight_tensor_parameter_293: + name = "parameter_293" + shape = [256] + dtype = "float32" + min_val = float("-0.177785") + max_val = float("0.192687") + mean = float("-0.00904678") + std = float("0.0582621") + data = None + + +class Program_weight_tensor_parameter_294: + name = "parameter_294" + shape = [256] + dtype = "float32" + min_val = float("-0.203582") + max_val = float("0.321081") + mean = float("0.00967383") + std = float("0.0943443") + data = None + + +class Program_weight_tensor_parameter_295: + name = "parameter_295" + shape = [256] + dtype = "float32" + min_val = float("5.60519e-45") + max_val = float("0.00624877") + mean = float("0.000464025") + std = float("0.0010426") + data = None + + +class Program_weight_tensor_parameter_296: + name = "parameter_296" + shape = [256] + dtype = "float32" + min_val = float("-0.0838559") + max_val = float("0.114342") + mean = float("-0.00431057") + std = float("0.0194668") + data = None + + +class Program_weight_tensor_parameter_297: + name = "parameter_297" + shape = [256, 64, 1, 1] + dtype = "float32" + min_val = float("-0.25193") + max_val = float("0.296788") + mean = float("-0.000771138") + std = float("0.0229857") + data = None + + +class Program_weight_tensor_parameter_298: + name = "parameter_298" + shape = [64] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_299: + name = "parameter_299" + shape = [64] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_300: + name = "parameter_300" + shape = [64] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_301: + name = "parameter_301" + shape = [64] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_302: + name = "parameter_302" + shape = [64, 64, 3, 3] + dtype = "float32" + min_val = float("-0.277854") + max_val = float("0.463245") + mean = float("-0.000341722") + std = float("0.0229625") + data = None + + +class Program_weight_tensor_parameter_303: + name = "parameter_303" + shape = [64] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_304: + name = "parameter_304" + shape = [64] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_305: + name = "parameter_305" + shape = [64] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_306: + name = "parameter_306" + shape = [64] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_307: + name = "parameter_307" + shape = [64, 256, 1, 1] + dtype = "float32" + min_val = float("-0.340224") + max_val = float("0.346524") + mean = float("0.00042888") + std = float("0.0242323") + data = None + + +class Program_weight_tensor_parameter_308: + name = "parameter_308" + shape = [256] + dtype = "float32" + min_val = float("-0.214949") + max_val = float("0.189687") + mean = float("0.0248474") + std = float("0.0675151") + data = None + + +class Program_weight_tensor_parameter_309: + name = "parameter_309" + shape = [256] + dtype = "float32" + min_val = float("-0.140797") + max_val = float("0.44859") + mean = float("0.185639") + std = float("0.128272") + data = None + + +class Program_weight_tensor_parameter_310: + name = "parameter_310" + shape = [256] + dtype = "float32" + min_val = float("5.60519e-45") + max_val = float("0.353666") + mean = float("0.0397985") + std = float("0.0391881") + data = None + + +class Program_weight_tensor_parameter_311: + name = "parameter_311" + shape = [256] + dtype = "float32" + min_val = float("-0.788633") + max_val = float("1.35179") + mean = float("-0.0432466") + std = float("0.244709") + data = None + + +class Program_weight_tensor_parameter_312: + name = "parameter_312" + shape = [256, 64, 1, 1] + dtype = "float32" + min_val = float("-0.636505") + max_val = float("0.512807") + mean = float("-0.00145991") + std = float("0.0539196") + data = None + + +class Program_weight_tensor_parameter_313: + name = "parameter_313" + shape = [256] + dtype = "float32" + min_val = float("-0.214949") + max_val = float("0.189687") + mean = float("0.0248474") + std = float("0.0675151") + data = None + + +class Program_weight_tensor_parameter_314: + name = "parameter_314" + shape = [256] + dtype = "float32" + min_val = float("-0.351382") + max_val = float("0.349111") + mean = float("0.100573") + std = float("0.13099") + data = None + + +class Program_weight_tensor_parameter_315: + name = "parameter_315" + shape = [256] + dtype = "float32" + min_val = float("5.60519e-45") + max_val = float("0.0267983") + mean = float("0.00646355") + std = float("0.00705122") + data = None + + +class Program_weight_tensor_parameter_316: + name = "parameter_316" + shape = [256] + dtype = "float32" + min_val = float("-0.221928") + max_val = float("0.207279") + mean = float("0.00942316") + std = float("0.0652644") + data = None + + +class Program_weight_tensor_parameter_317: + name = "parameter_317" + shape = [256, 64, 1, 1] + dtype = "float32" + min_val = float("-0.416398") + max_val = float("0.445069") + mean = float("0.000454476") + std = float("0.0384569") + data = None + + +class Program_weight_tensor_parameter_318: + name = "parameter_318" + shape = [64] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_319: + name = "parameter_319" + shape = [64] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_320: + name = "parameter_320" + shape = [64] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_321: + name = "parameter_321" + shape = [64] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_322: + name = "parameter_322" + shape = [64, 64, 3, 3] + dtype = "float32" + min_val = float("-0.367549") + max_val = float("0.551431") + mean = float("0.000884031") + std = float("0.0322617") + data = None + + +class Program_weight_tensor_parameter_323: + name = "parameter_323" + shape = [64] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_324: + name = "parameter_324" + shape = [64] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_325: + name = "parameter_325" + shape = [64] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_326: + name = "parameter_326" + shape = [64] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_327: + name = "parameter_327" + shape = [64, 64, 1, 1] + dtype = "float32" + min_val = float("-0.628423") + max_val = float("0.355848") + mean = float("-0.00641673") + std = float("0.0746228") + data = None + + +class Program_weight_tensor_parameter_328: + name = "parameter_328" + shape = [64] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_329: + name = "parameter_329" + shape = [64] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_330: + name = "parameter_330" + shape = [64] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_331: + name = "parameter_331" + shape = [64] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_332: + name = "parameter_332" + shape = [64, 3, 7, 7] + dtype = "float32" + min_val = float("-0.593645") + max_val = float("0.652022") + mean = float("-0.000404981") + std = float("0.108572") + data = None diff --git a/paddle_samples/PaddleX/SOLOv2/subgraph_3/graph_hash.txt b/paddle_samples/PaddleX/SOLOv2/subgraph_3/graph_hash.txt new file mode 100644 index 000000000..13132b8ae --- /dev/null +++ b/paddle_samples/PaddleX/SOLOv2/subgraph_3/graph_hash.txt @@ -0,0 +1 @@ +1ea9f83c910201b9c22d32433c75a7d47d1a11f15bc12245cf2788eeee5d17d5 \ No newline at end of file diff --git a/paddle_samples/PaddleX/SOLOv2/subgraph_3/graph_net.json b/paddle_samples/PaddleX/SOLOv2/subgraph_3/graph_net.json new file mode 100644 index 000000000..cdd8699a9 --- /dev/null +++ b/paddle_samples/PaddleX/SOLOv2/subgraph_3/graph_net.json @@ -0,0 +1,6 @@ +{ + "framework": "paddle", + "model_name": "SOLOv2", + "num_devices_required": 1, + "num_nodes_required": 1 +} \ No newline at end of file diff --git a/paddle_samples/PaddleX/SOLOv2/subgraph_3/input_meta.py b/paddle_samples/PaddleX/SOLOv2/subgraph_3/input_meta.py new file mode 100644 index 000000000..85a614883 --- /dev/null +++ b/paddle_samples/PaddleX/SOLOv2/subgraph_3/input_meta.py @@ -0,0 +1,36 @@ +class Program_weight_tensor_data_0: + name = "data_0" + shape = [68] + dtype = "int64" + min_val = 0 + max_val = 67 + data = None + + +class Program_weight_tensor_data_1: + name = "data_1" + shape = [68, 304, 200] + dtype = "float32" + max_val = float("1.0") + mean = float("0.173561") + std = float("0.329417") + data = None + + +class Program_weight_tensor_data_2: + name = "data_2" + shape = [68] + dtype = "float32" + max_val = float("0.748209") + mean = float("0.0880978") + std = float("0.0946481") + data = None + + +class Program_weight_tensor_data_3: + name = "data_3" + shape = [68] + dtype = "int64" + min_val = 0 + max_val = 1 + data = None diff --git a/paddle_samples/PaddleX/SOLOv2/subgraph_3/model.py b/paddle_samples/PaddleX/SOLOv2/subgraph_3/model.py new file mode 100644 index 000000000..0803fe537 --- /dev/null +++ b/paddle_samples/PaddleX/SOLOv2/subgraph_3/model.py @@ -0,0 +1,26 @@ +import paddle + + +class GraphModule(paddle.nn.Layer): + def __init__(self): + super().__init__() + + def forward(self, data_0, data_1, data_2, data_3): + # pd_op.full: (1xi32) <- () + full_0 = paddle._C_ops.full( + [1], float("0"), paddle.int32, paddle.core.CPUPlace() + ) + + # pd_op.gather: (-1x-1x-1xf32) <- (-1x-1x-1xf32, -1xi64, 1xi32) + gather_0 = paddle._C_ops.gather(data_1, data_0, full_0) + del data_1 + + # pd_op.gather: (-1xf32) <- (-1xf32, -1xi64, 1xi32) + gather_1 = paddle._C_ops.gather(data_2, data_0, full_0) + del data_2 + + # pd_op.gather: (-1xi64) <- (-1xi64, -1xi64, 1xi32) + gather_2 = paddle._C_ops.gather(data_3, data_0, full_0) + del data_0, data_3, full_0 + + return gather_0, gather_1, gather_2 diff --git a/paddle_samples/PaddleX/SOLOv2/subgraph_3/weight_meta.py b/paddle_samples/PaddleX/SOLOv2/subgraph_3/weight_meta.py new file mode 100644 index 000000000..8b1378917 --- /dev/null +++ b/paddle_samples/PaddleX/SOLOv2/subgraph_3/weight_meta.py @@ -0,0 +1 @@ + diff --git a/paddle_samples/PaddleX/SOLOv2/subgraph_4/graph_hash.txt b/paddle_samples/PaddleX/SOLOv2/subgraph_4/graph_hash.txt new file mode 100644 index 000000000..3366fc57d --- /dev/null +++ b/paddle_samples/PaddleX/SOLOv2/subgraph_4/graph_hash.txt @@ -0,0 +1 @@ +5f4b39704b823bc0571121283ed212cdeb399e5816fcf0142365b58bbdf77174 \ No newline at end of file diff --git a/paddle_samples/PaddleX/SOLOv2/subgraph_4/graph_net.json b/paddle_samples/PaddleX/SOLOv2/subgraph_4/graph_net.json new file mode 100644 index 000000000..cdd8699a9 --- /dev/null +++ b/paddle_samples/PaddleX/SOLOv2/subgraph_4/graph_net.json @@ -0,0 +1,6 @@ +{ + "framework": "paddle", + "model_name": "SOLOv2", + "num_devices_required": 1, + "num_nodes_required": 1 +} \ No newline at end of file diff --git a/paddle_samples/PaddleX/SOLOv2/subgraph_4/input_meta.py b/paddle_samples/PaddleX/SOLOv2/subgraph_4/input_meta.py new file mode 100644 index 000000000..376f6b72d --- /dev/null +++ b/paddle_samples/PaddleX/SOLOv2/subgraph_4/input_meta.py @@ -0,0 +1,92 @@ +class Program_weight_tensor_data_0: + name = "data_0" + shape = [1600] + dtype = "int32" + min_val = 8 + max_val = 8 + data = None + + +class Program_weight_tensor_data_1: + name = "data_1" + shape = [1296] + dtype = "int32" + min_val = 8 + max_val = 8 + data = None + + +class Program_weight_tensor_data_2: + name = "data_2" + shape = [576] + dtype = "int32" + min_val = 16 + max_val = 16 + data = None + + +class Program_weight_tensor_data_3: + name = "data_3" + shape = [256] + dtype = "int32" + min_val = 32 + max_val = 32 + data = None + + +class Program_weight_tensor_data_4: + name = "data_4" + shape = [144] + dtype = "int32" + min_val = 32 + max_val = 32 + data = None + + +class Program_weight_tensor_data_5: + name = "data_5" + shape = [126, 2] + dtype = "int64" + min_val = 0 + max_val = 3872 + data = None + + +class Program_weight_tensor_data_6: + name = "data_6" + shape = [126, 256] + dtype = "float32" + min_val = float("-13.8939") + max_val = float("4.44222") + mean = float("-0.618971") + std = float("1.74014") + data = None + + +class Program_weight_tensor_data_7: + name = "data_7" + shape = [1, 256, 304, 200] + dtype = "float32" + max_val = float("82.2773") + mean = float("0.0840985") + std = float("0.634015") + data = None + + +class Program_weight_tensor_data_8: + name = "data_8" + shape = [126] + dtype = "float32" + max_val = float("0.758779") + mean = float("0.172851") + std = float("0.116889") + data = None + + +class Program_weight_tensor_data_9: + name = "data_9" + shape = [126] + dtype = "int64" + min_val = 0 + max_val = 1 + data = None diff --git a/paddle_samples/PaddleX/SOLOv2/subgraph_4/model.py b/paddle_samples/PaddleX/SOLOv2/subgraph_4/model.py new file mode 100644 index 000000000..1f7a26a1e --- /dev/null +++ b/paddle_samples/PaddleX/SOLOv2/subgraph_4/model.py @@ -0,0 +1,222 @@ +import paddle + + +class GraphModule(paddle.nn.Layer): + def __init__(self): + super().__init__() + + def forward( + self, + data_0, + data_1, + data_2, + data_3, + data_4, + data_5, + data_6, + data_7, + data_8, + data_9, + ): + # pd_op.full: (1xi32) <- () + full_0 = paddle._C_ops.full( + [1], float("0"), paddle.int32, paddle.core.CPUPlace() + ) + + # builtin.combine: ([1600xi32, 1296xi32, 576xi32, 256xi32, 144xi32]) <- (1600xi32, 1296xi32, 576xi32, 256xi32, 144xi32) + combine_0 = [data_0, data_1, data_2, data_3, data_4] + del data_0, data_1, data_2, data_3, data_4 + + # pd_op.concat: (3872xi32) <- ([1600xi32, 1296xi32, 576xi32, 256xi32, 144xi32], 1xi32) + concat_0 = paddle._C_ops.concat(combine_0, full_0) + del combine_0 + + # pd_op.full: (1xi32) <- () + full_1 = paddle._C_ops.full( + [1], float("0"), paddle.int32, paddle.framework._current_expected_place() + ) + + # builtin.combine: ([3872xi32, 1xi32]) <- (3872xi32, 1xi32) + combine_1 = [concat_0, full_1] + del concat_0, full_1 + + # pd_op.concat: (3873xi32) <- ([3872xi32, 1xi32], 1xi32) + concat_1 = paddle._C_ops.concat(combine_1, full_0) + del combine_1 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_0 = [0] + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_1 = [1] + + # pd_op.slice: (-1xi64) <- (-1x2xi64, 1xi64, 1xi64) + slice_0 = paddle._C_ops.slice( + data_5, [1], full_int_array_0, full_int_array_1, [1], [1] + ) + del data_5 + + # pd_op.gather: (-1xi32) <- (3873xi32, -1xi64, 1xi32) + gather_1 = paddle._C_ops.gather(concat_1, slice_0, full_0) + del concat_1, slice_0 + + # pd_op.full_int_array: (2xi64) <- () + full_int_array_2 = [2, 3] + + # pd_op.unsqueeze: (-1x256x1x1xf32) <- (-1x256xf32, 2xi64) + unsqueeze_0 = paddle._C_ops.unsqueeze(data_6, full_int_array_2) + del data_6, full_int_array_2 + + # pd_op.conv2d: (1x-1x-1x-1xf32) <- (1x256x-1x-1xf32, -1x256x1x1xf32) + conv2d_0 = paddle._C_ops.conv2d( + data_7, unsqueeze_0, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del data_7, unsqueeze_0 + + # pd_op.squeeze: (-1x-1x-1xf32) <- (1x-1x-1x-1xf32, 1xi64) + squeeze_0 = paddle._C_ops.squeeze(conv2d_0, full_int_array_0) + del conv2d_0, full_int_array_0 + + # pd_op.sigmoid: (-1x-1x-1xf32) <- (-1x-1x-1xf32) + sigmoid_0 = paddle._C_ops.sigmoid(squeeze_0) + del squeeze_0 + + # pd_op.full: (xf32) <- () + full_2 = paddle._C_ops.full( + [], float("0.5"), paddle.float32, paddle.framework._current_expected_place() + ) + + # pd_op.greater_than: (-1x-1x-1xb) <- (-1x-1x-1xf32, xf32) + greater_than_0 = paddle._C_ops.greater_than(sigmoid_0, full_2) + del full_2 + + # pd_op.cast: (-1x-1x-1xf32) <- (-1x-1x-1xb) + cast_0 = paddle._C_ops.cast(greater_than_0, paddle.float32) + del greater_than_0 + + # pd_op.full_int_array: (2xi64) <- () + full_int_array_3 = [1, 2] + + # pd_op.sum: (-1xf32) <- (-1x-1x-1xf32, 2xi64) + sum_0 = paddle._C_ops.sum(cast_0, full_int_array_3, None, False) + + # pd_op.shape64: (1xi64) <- (-1xf32) + shape64_0 = paddle._C_ops.shape64(sum_0) + + # pd_op.full: (1xf32) <- () + full_3 = paddle._C_ops.full( + [1], float("0"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.full_with_tensor: (-1xf32) <- (1xf32, 1xi64) + full_with_tensor_0 = paddle._C_ops.full_with_tensor( + full_3, shape64_0, paddle.float32 + ) + del full_3 + + # pd_op.cast: (-1xf32) <- (-1xi32) + cast_1 = paddle._C_ops.cast(gather_1, paddle.float32) + del gather_1 + + # pd_op.greater_than: (-1xb) <- (-1xf32, -1xf32) + greater_than_1 = paddle._C_ops.greater_than(sum_0, cast_1) + del cast_1 + + # pd_op.where: (-1xf32) <- (-1xb, -1xf32, -1xf32) + where_0 = paddle._C_ops.where(greater_than_1, sum_0, full_with_tensor_0) + del full_with_tensor_0, greater_than_1 + + # pd_op.nonzero: (-1x1xi64) <- (-1xf32) + nonzero_0 = paddle._C_ops.nonzero(where_0) + del where_0 + + # pd_op.squeeze: (-1xi64) <- (-1x1xi64, 1xi64) + squeeze_1 = paddle._C_ops.squeeze(nonzero_0, full_int_array_1) + del full_int_array_1, nonzero_0 + + # pd_op.full: (1xf32) <- () + full_4 = paddle._C_ops.full( + [1], float("1"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.scale: (1xi64) <- (1xi64, 1xf32) + scale_0 = paddle._C_ops.scale(shape64_0, full_4, float("-1"), True) + del full_4 + + # pd_op.cast: (1xi64) <- (1xi64) + cast_2 = paddle._C_ops.cast(scale_0, paddle.int64) + del scale_0 + + # builtin.combine: ([-1xi64, 1xi64]) <- (-1xi64, 1xi64) + combine_2 = [squeeze_1, cast_2] + del cast_2 + + # pd_op.concat: (-1xi64) <- ([-1xi64, 1xi64], 1xi32) + concat_2 = paddle._C_ops.concat(combine_2, full_0) + del combine_2 + + # pd_op.cast: (1xi64) <- (1xi64) + cast_3 = paddle._C_ops.cast(shape64_0, paddle.int64) + del shape64_0 + + # builtin.combine: ([-1xi64, 1xi64]) <- (-1xi64, 1xi64) + combine_3 = [squeeze_1, cast_3] + del cast_3, squeeze_1 + + # pd_op.concat: (-1xi64) <- ([-1xi64, 1xi64], 1xi32) + concat_3 = paddle._C_ops.concat(combine_3, full_0) + del combine_3 + + # pd_op.full: (1xf32) <- () + full_5 = paddle._C_ops.full( + [1], float("0"), paddle.float32, paddle.framework._current_expected_place() + ) + + # builtin.combine: ([-1xf32, 1xf32]) <- (-1xf32, 1xf32) + combine_4 = [data_8, full_5] + del data_8, full_5 + + # pd_op.concat: (-1xf32) <- ([-1xf32, 1xf32], 1xi32) + concat_4 = paddle._C_ops.concat(combine_4, full_0) + del combine_4 + + # pd_op.gather: (-1x-1x-1xf32) <- (-1x-1x-1xf32, -1xi64, 1xi32) + gather_2 = paddle._C_ops.gather(cast_0, concat_2, full_0) + del cast_0 + + # pd_op.gather: (-1x-1x-1xf32) <- (-1x-1x-1xf32, -1xi64, 1xi32) + gather_3 = paddle._C_ops.gather(sigmoid_0, concat_2, full_0) + del sigmoid_0 + + # pd_op.gather: (-1xf32) <- (-1xf32, -1xi64, 1xi32) + gather_4 = paddle._C_ops.gather(sum_0, concat_2, full_0) + del sum_0 + + # pd_op.gather: (-1xi64) <- (-1xi64, -1xi64, 1xi32) + gather_0 = paddle._C_ops.gather(data_9, concat_2, full_0) + del concat_2, data_9 + + # pd_op.gather: (-1xf32) <- (-1xf32, -1xi64, 1xi32) + gather_5 = paddle._C_ops.gather(concat_4, concat_3, full_0) + del concat_3, concat_4, full_0 + + # pd_op.multiply: (-1x-1x-1xf32) <- (-1x-1x-1xf32, -1x-1x-1xf32) + multiply_1 = paddle._C_ops.multiply(gather_3, gather_2) + + # pd_op.cast: (-1x-1x-1xf32) <- (-1x-1x-1xf32) + cast_4 = paddle._C_ops.cast(multiply_1, paddle.float32) + del multiply_1 + + # pd_op.sum: (-1xf32) <- (-1x-1x-1xf32, 2xi64) + sum_1 = paddle._C_ops.sum(cast_4, full_int_array_3, None, False) + del cast_4, full_int_array_3 + + # pd_op.divide: (-1xf32) <- (-1xf32, -1xf32) + divide_0 = paddle._C_ops.divide(sum_1, gather_4) + del sum_1 + + # pd_op.multiply: (-1xf32) <- (-1xf32, -1xf32) + multiply_0 = paddle._C_ops.multiply(gather_5, divide_0) + del divide_0, gather_2, gather_3, gather_4, gather_5 + + return gather_0, multiply_0 diff --git a/paddle_samples/PaddleX/SOLOv2/subgraph_4/weight_meta.py b/paddle_samples/PaddleX/SOLOv2/subgraph_4/weight_meta.py new file mode 100644 index 000000000..8b1378917 --- /dev/null +++ b/paddle_samples/PaddleX/SOLOv2/subgraph_4/weight_meta.py @@ -0,0 +1 @@ + diff --git a/paddle_samples/PaddleX/SOLOv2/subgraph_5/graph_hash.txt b/paddle_samples/PaddleX/SOLOv2/subgraph_5/graph_hash.txt new file mode 100644 index 000000000..3d610d869 --- /dev/null +++ b/paddle_samples/PaddleX/SOLOv2/subgraph_5/graph_hash.txt @@ -0,0 +1 @@ +892a21687443236e06b6ff04746d6c0d8d5387b64dd201e8784c838088358399 \ No newline at end of file diff --git a/paddle_samples/PaddleX/SOLOv2/subgraph_5/graph_net.json b/paddle_samples/PaddleX/SOLOv2/subgraph_5/graph_net.json new file mode 100644 index 000000000..cdd8699a9 --- /dev/null +++ b/paddle_samples/PaddleX/SOLOv2/subgraph_5/graph_net.json @@ -0,0 +1,6 @@ +{ + "framework": "paddle", + "model_name": "SOLOv2", + "num_devices_required": 1, + "num_nodes_required": 1 +} \ No newline at end of file diff --git a/paddle_samples/PaddleX/SOLOv2/subgraph_5/input_meta.py b/paddle_samples/PaddleX/SOLOv2/subgraph_5/input_meta.py new file mode 100644 index 000000000..53bdc5e1a --- /dev/null +++ b/paddle_samples/PaddleX/SOLOv2/subgraph_5/input_meta.py @@ -0,0 +1,15 @@ +class Program_weight_tensor_data_0: + name = "data_0" + shape = [] + dtype = "int64" + data = [68] + + +class Program_weight_tensor_data_1: + name = "data_1" + shape = [68] + dtype = "float32" + max_val = float("0.748209") + mean = float("0.0880978") + std = float("0.0946481") + data = None diff --git a/paddle_samples/PaddleX/SOLOv2/subgraph_5/model.py b/paddle_samples/PaddleX/SOLOv2/subgraph_5/model.py new file mode 100644 index 000000000..96a38a406 --- /dev/null +++ b/paddle_samples/PaddleX/SOLOv2/subgraph_5/model.py @@ -0,0 +1,46 @@ +import paddle + + +class GraphModule(paddle.nn.Layer): + def __init__(self): + super().__init__() + + def forward(self, data_0, data_1): + # pd_op.full: (xi64) <- () + full_0 = paddle._C_ops.full( + [], float("100"), paddle.int64, paddle.framework._current_expected_place() + ) + + # pd_op.greater_than: (xb) <- (xi64, xi64) + greater_than_0 = paddle._C_ops.greater_than(data_0, full_0) + del data_0, full_0 + + # pd_op.cast: (xi64) <- (xb) + cast_0 = paddle._C_ops.cast(greater_than_0, paddle.int64) + del greater_than_0 + + # pd_op.full: (xi64) <- () + full_1 = paddle._C_ops.full( + [], float("0"), paddle.int64, paddle.framework._current_expected_place() + ) + + # pd_op.not_equal: (xb) <- (xi64, xi64) + not_equal_0 = paddle._C_ops.not_equal(cast_0, full_1) + del cast_0 + + # pd_op.cast: (xi64) <- (xb) + cast_1 = paddle._C_ops.cast(not_equal_0, paddle.int64) + del not_equal_0 + + # pd_op.equal: (xb) <- (xi64, xi64) + equal_0 = paddle._C_ops.equal(cast_1, full_1) + del cast_1, full_1 + + # pd_op.argsort: (-1xf32, -1xi64) <- (-1xf32) + argsort_1, argsort_0 = (lambda x, f: f(x))( + paddle._C_ops.argsort(data_1, -1, True, False), + lambda out: out if isinstance(out, (list, tuple)) else (out, None), + ) + del data_1 + + return argsort_0 diff --git a/paddle_samples/PaddleX/SOLOv2/subgraph_5/weight_meta.py b/paddle_samples/PaddleX/SOLOv2/subgraph_5/weight_meta.py new file mode 100644 index 000000000..8b1378917 --- /dev/null +++ b/paddle_samples/PaddleX/SOLOv2/subgraph_5/weight_meta.py @@ -0,0 +1 @@ + diff --git a/paddle_samples/PaddleX/SOLOv2/subgraph_6/graph_hash.txt b/paddle_samples/PaddleX/SOLOv2/subgraph_6/graph_hash.txt new file mode 100644 index 000000000..f0746bdb3 --- /dev/null +++ b/paddle_samples/PaddleX/SOLOv2/subgraph_6/graph_hash.txt @@ -0,0 +1 @@ +ceb8e72515b1bcba0a48a8ff9e1105f8f4babee8bb743def723b163543be41f5 \ No newline at end of file diff --git a/paddle_samples/PaddleX/SOLOv2/subgraph_6/graph_net.json b/paddle_samples/PaddleX/SOLOv2/subgraph_6/graph_net.json new file mode 100644 index 000000000..cdd8699a9 --- /dev/null +++ b/paddle_samples/PaddleX/SOLOv2/subgraph_6/graph_net.json @@ -0,0 +1,6 @@ +{ + "framework": "paddle", + "model_name": "SOLOv2", + "num_devices_required": 1, + "num_nodes_required": 1 +} \ No newline at end of file diff --git a/paddle_samples/PaddleX/SOLOv2/subgraph_6/input_meta.py b/paddle_samples/PaddleX/SOLOv2/subgraph_6/input_meta.py new file mode 100644 index 000000000..349f107fa --- /dev/null +++ b/paddle_samples/PaddleX/SOLOv2/subgraph_6/input_meta.py @@ -0,0 +1,50 @@ +class Program_weight_tensor_data_0: + name = "data_0" + shape = [] + dtype = "int64" + data = [1216] + + +class Program_weight_tensor_data_1: + name = "data_1" + shape = [] + dtype = "int64" + data = [800] + + +class Program_weight_tensor_data_2: + name = "data_2" + shape = [68, 304, 200] + dtype = "float32" + max_val = float("1.0") + mean = float("0.173561") + std = float("0.329417") + data = None + + +class Program_weight_tensor_data_3: + name = "data_3" + shape = [2] + dtype = "float32" + data = [1205.0, 800.0] + + +class Program_weight_tensor_data_4: + name = "data_4" + shape = [] + dtype = "float32" + data = [1.50625] + + +class Program_weight_tensor_data_5: + name = "data_5" + shape = [] + dtype = "int32" + data = [1205] + + +class Program_weight_tensor_data_6: + name = "data_6" + shape = [] + dtype = "int32" + data = [800] diff --git a/paddle_samples/PaddleX/SOLOv2/subgraph_6/model.py b/paddle_samples/PaddleX/SOLOv2/subgraph_6/model.py new file mode 100644 index 000000000..5af886eae --- /dev/null +++ b/paddle_samples/PaddleX/SOLOv2/subgraph_6/model.py @@ -0,0 +1,106 @@ +import paddle + + +class GraphModule(paddle.nn.Layer): + def __init__(self): + super().__init__() + + def forward(self, data_0, data_1, data_2, data_3, data_4, data_5, data_6): + # pd_op.divide: (2xf32) <- (2xf32, xf32) + divide_0 = paddle._C_ops.divide(data_3, data_4) + del data_3, data_4 + + # pd_op.full: (1xf32) <- () + full_0 = paddle._C_ops.full( + [1], float("1"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.scale: (2xf32) <- (2xf32, 1xf32) + scale_0 = paddle._C_ops.scale(divide_0, full_0, float("0.5"), True) + del divide_0, full_0 + + # pd_op.cast: (2xi32) <- (2xf32) + cast_1 = paddle._C_ops.cast(scale_0, paddle.int32) + del scale_0 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_0 = [0] + + # pd_op.unsqueeze: (1x-1x-1x-1xf32) <- (-1x-1x-1xf32, 1xi64) + unsqueeze_0 = paddle._C_ops.unsqueeze(data_2, full_int_array_0) + del data_2 + + # builtin.combine: ([xi64, xi64]) <- (xi64, xi64) + combine_0 = [data_0, data_1] + del data_0, data_1 + + # pd_op.bilinear_interp: (1x-1x-1x-1xf32) <- (1x-1x-1x-1xf32, None, [xi64, xi64], None) + bilinear_interp_0 = paddle._C_ops.bilinear_interp( + unsqueeze_0, + None, + combine_0, + None, + "NCHW", + -1, + -1, + -1, + [], + "bilinear", + False, + 0, + ) + del combine_0, unsqueeze_0 + + # pd_op.cast: (xi64) <- (xi32) + cast_2 = paddle._C_ops.cast(data_5, paddle.int64) + del data_5 + + # pd_op.cast: (xi64) <- (xi32) + cast_3 = paddle._C_ops.cast(data_6, paddle.int64) + del data_6 + + # pd_op.full_int_array: (2xi64) <- () + full_int_array_1 = [0, 0] + + # builtin.combine: ([xi64, xi64]) <- (xi64, xi64) + combine_1 = [cast_2, cast_3] + del cast_2, cast_3 + + # pd_op.stack: (2xi64) <- ([xi64, xi64]) + stack_0 = paddle._C_ops.stack(combine_1, 0) + del combine_1 + + # pd_op.slice: (1x-1x-1x-1xf32) <- (1x-1x-1x-1xf32, 2xi64, 2xi64) + slice_0 = paddle._C_ops.slice( + bilinear_interp_0, [2, 3], full_int_array_1, stack_0, [-1, -1], [] + ) + del bilinear_interp_0, full_int_array_1, stack_0 + + # pd_op.cast: (2xi32) <- (2xi32) + cast_4 = paddle._C_ops.cast(cast_1, paddle.int32) + del cast_1 + + # pd_op.bilinear_interp: (1x-1x-1x-1xf32) <- (1x-1x-1x-1xf32, 2xi32, None, None) + bilinear_interp_1 = paddle._C_ops.bilinear_interp( + slice_0, cast_4, None, None, "NCHW", -1, -1, -1, [], "bilinear", False, 0 + ) + del cast_4, slice_0 + + # pd_op.squeeze: (-1x-1x-1xf32) <- (1x-1x-1x-1xf32, 1xi64) + squeeze_0 = paddle._C_ops.squeeze(bilinear_interp_1, full_int_array_0) + del bilinear_interp_1, full_int_array_0 + + # pd_op.full: (xf32) <- () + full_1 = paddle._C_ops.full( + [], float("0.5"), paddle.float32, paddle.framework._current_expected_place() + ) + + # pd_op.greater_than: (-1x-1x-1xb) <- (-1x-1x-1xf32, xf32) + greater_than_0 = paddle._C_ops.greater_than(squeeze_0, full_1) + del full_1, squeeze_0 + + # pd_op.cast: (-1x-1x-1xui8) <- (-1x-1x-1xb) + cast_0 = paddle._C_ops.cast(greater_than_0, paddle.uint8) + del greater_than_0 + + return cast_0 diff --git a/paddle_samples/PaddleX/SOLOv2/subgraph_6/weight_meta.py b/paddle_samples/PaddleX/SOLOv2/subgraph_6/weight_meta.py new file mode 100644 index 000000000..8b1378917 --- /dev/null +++ b/paddle_samples/PaddleX/SOLOv2/subgraph_6/weight_meta.py @@ -0,0 +1 @@ + diff --git a/paddle_samples/PaddleX/SOLOv2/subgraph_7/graph_hash.txt b/paddle_samples/PaddleX/SOLOv2/subgraph_7/graph_hash.txt new file mode 100644 index 000000000..08841a320 --- /dev/null +++ b/paddle_samples/PaddleX/SOLOv2/subgraph_7/graph_hash.txt @@ -0,0 +1 @@ +44b0e5a0abd31e56b8f6b2ef348d91bd50489c26e6a903a4aaf04592b7055754 \ No newline at end of file diff --git a/paddle_samples/PaddleX/SOLOv2/subgraph_7/graph_net.json b/paddle_samples/PaddleX/SOLOv2/subgraph_7/graph_net.json new file mode 100644 index 000000000..cdd8699a9 --- /dev/null +++ b/paddle_samples/PaddleX/SOLOv2/subgraph_7/graph_net.json @@ -0,0 +1,6 @@ +{ + "framework": "paddle", + "model_name": "SOLOv2", + "num_devices_required": 1, + "num_nodes_required": 1 +} \ No newline at end of file diff --git a/paddle_samples/PaddleX/SOLOv2/subgraph_7/input_meta.py b/paddle_samples/PaddleX/SOLOv2/subgraph_7/input_meta.py new file mode 100644 index 000000000..4ffaaa55b --- /dev/null +++ b/paddle_samples/PaddleX/SOLOv2/subgraph_7/input_meta.py @@ -0,0 +1,36 @@ +class Program_weight_tensor_data_0: + name = "data_0" + shape = [65, 200, 232] + dtype = "float32" + max_val = float("1.0") + mean = float("0.184764") + std = float("0.357111") + data = None + + +class Program_weight_tensor_data_1: + name = "data_1" + shape = [2] + dtype = "float32" + data = [800.0, 901.0] + + +class Program_weight_tensor_data_2: + name = "data_2" + shape = [] + dtype = "float32" + data = [1.0] + + +class Program_weight_tensor_data_3: + name = "data_3" + shape = [] + dtype = "int32" + data = [800] + + +class Program_weight_tensor_data_4: + name = "data_4" + shape = [] + dtype = "int32" + data = [901] diff --git a/paddle_samples/PaddleX/SOLOv2/subgraph_7/model.py b/paddle_samples/PaddleX/SOLOv2/subgraph_7/model.py new file mode 100644 index 000000000..32aa00f2b --- /dev/null +++ b/paddle_samples/PaddleX/SOLOv2/subgraph_7/model.py @@ -0,0 +1,102 @@ +import paddle + + +class GraphModule(paddle.nn.Layer): + def __init__(self): + super().__init__() + + def forward(self, data_0, data_1, data_2, data_3, data_4): + # pd_op.divide: (2xf32) <- (2xf32, xf32) + divide_0 = paddle._C_ops.divide(data_1, data_2) + del data_1, data_2 + + # pd_op.full: (1xf32) <- () + full_0 = paddle._C_ops.full( + [1], float("1"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.scale: (2xf32) <- (2xf32, 1xf32) + scale_0 = paddle._C_ops.scale(divide_0, full_0, float("0.5"), True) + del divide_0, full_0 + + # pd_op.cast: (2xi32) <- (2xf32) + cast_1 = paddle._C_ops.cast(scale_0, paddle.int32) + del scale_0 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_0 = [0] + + # pd_op.unsqueeze: (1x65x200x232xf32) <- (65x200x232xf32, 1xi64) + unsqueeze_0 = paddle._C_ops.unsqueeze(data_0, full_int_array_0) + del data_0 + + # pd_op.bilinear_interp: (1x65x800x928xf32) <- (1x65x200x232xf32, None, None, None) + bilinear_interp_0 = paddle._C_ops.bilinear_interp( + unsqueeze_0, + None, + None, + None, + "NCHW", + -1, + 800, + 928, + [], + "bilinear", + False, + 0, + ) + del unsqueeze_0 + + # pd_op.cast: (xi64) <- (xi32) + cast_2 = paddle._C_ops.cast(data_3, paddle.int64) + del data_3 + + # pd_op.cast: (xi64) <- (xi32) + cast_3 = paddle._C_ops.cast(data_4, paddle.int64) + del data_4 + + # pd_op.full_int_array: (2xi64) <- () + full_int_array_1 = [0, 0] + + # builtin.combine: ([xi64, xi64]) <- (xi64, xi64) + combine_0 = [cast_2, cast_3] + del cast_2, cast_3 + + # pd_op.stack: (2xi64) <- ([xi64, xi64]) + stack_0 = paddle._C_ops.stack(combine_0, 0) + del combine_0 + + # pd_op.slice: (1x65x-1x-1xf32) <- (1x65x800x928xf32, 2xi64, 2xi64) + slice_0 = paddle._C_ops.slice( + bilinear_interp_0, [2, 3], full_int_array_1, stack_0, [-1, -1], [] + ) + del bilinear_interp_0, full_int_array_1, stack_0 + + # pd_op.cast: (2xi32) <- (2xi32) + cast_4 = paddle._C_ops.cast(cast_1, paddle.int32) + del cast_1 + + # pd_op.bilinear_interp: (1x65x-1x-1xf32) <- (1x65x-1x-1xf32, 2xi32, None, None) + bilinear_interp_1 = paddle._C_ops.bilinear_interp( + slice_0, cast_4, None, None, "NCHW", -1, -1, -1, [], "bilinear", False, 0 + ) + del cast_4, slice_0 + + # pd_op.squeeze: (65x-1x-1xf32) <- (1x65x-1x-1xf32, 1xi64) + squeeze_0 = paddle._C_ops.squeeze(bilinear_interp_1, full_int_array_0) + del bilinear_interp_1, full_int_array_0 + + # pd_op.full: (xf32) <- () + full_1 = paddle._C_ops.full( + [], float("0.5"), paddle.float32, paddle.framework._current_expected_place() + ) + + # pd_op.greater_than: (65x-1x-1xb) <- (65x-1x-1xf32, xf32) + greater_than_0 = paddle._C_ops.greater_than(squeeze_0, full_1) + del full_1, squeeze_0 + + # pd_op.cast: (65x-1x-1xui8) <- (65x-1x-1xb) + cast_0 = paddle._C_ops.cast(greater_than_0, paddle.uint8) + del greater_than_0 + + return cast_0 diff --git a/paddle_samples/PaddleX/SOLOv2/subgraph_7/weight_meta.py b/paddle_samples/PaddleX/SOLOv2/subgraph_7/weight_meta.py new file mode 100644 index 000000000..8b1378917 --- /dev/null +++ b/paddle_samples/PaddleX/SOLOv2/subgraph_7/weight_meta.py @@ -0,0 +1 @@ + diff --git a/paddle_samples/PaddleX/SOLOv2/subgraph_8/graph_hash.txt b/paddle_samples/PaddleX/SOLOv2/subgraph_8/graph_hash.txt new file mode 100644 index 000000000..028cfcf1d --- /dev/null +++ b/paddle_samples/PaddleX/SOLOv2/subgraph_8/graph_hash.txt @@ -0,0 +1 @@ +7beb3812b29fbb67389359ab98b6d74fea785189036c6a05cc32bf8079176365 \ No newline at end of file diff --git a/paddle_samples/PaddleX/SOLOv2/subgraph_8/graph_net.json b/paddle_samples/PaddleX/SOLOv2/subgraph_8/graph_net.json new file mode 100644 index 000000000..cdd8699a9 --- /dev/null +++ b/paddle_samples/PaddleX/SOLOv2/subgraph_8/graph_net.json @@ -0,0 +1,6 @@ +{ + "framework": "paddle", + "model_name": "SOLOv2", + "num_devices_required": 1, + "num_nodes_required": 1 +} \ No newline at end of file diff --git a/paddle_samples/PaddleX/SOLOv2/subgraph_8/input_meta.py b/paddle_samples/PaddleX/SOLOv2/subgraph_8/input_meta.py new file mode 100644 index 000000000..9ee0ac845 --- /dev/null +++ b/paddle_samples/PaddleX/SOLOv2/subgraph_8/input_meta.py @@ -0,0 +1,26 @@ +class Program_weight_tensor_data_0: + name = "data_0" + shape = [3872, 2] + dtype = "float32" + max_val = float("0.752022") + mean = float("0.0131341") + std = float("0.0349963") + data = None + + +class Program_weight_tensor_data_1: + name = "data_1" + shape = [3872, 256] + dtype = "float32" + min_val = float("-17.5282") + max_val = float("5.95754") + mean = float("-1.36755") + std = float("2.31431") + data = None + + +class Program_weight_tensor_data_2: + name = "data_2" + shape = [2] + dtype = "float32" + data = [800.0, 901.0] diff --git a/paddle_samples/PaddleX/SOLOv2/subgraph_8/model.py b/paddle_samples/PaddleX/SOLOv2/subgraph_8/model.py new file mode 100644 index 000000000..8f95d7d29 --- /dev/null +++ b/paddle_samples/PaddleX/SOLOv2/subgraph_8/model.py @@ -0,0 +1,174 @@ +import paddle + + +class GraphModule(paddle.nn.Layer): + def __init__(self): + super().__init__() + + def forward(self, data_0, data_1, data_2): + # pd_op.full_int_array: (1xi64) <- () + full_int_array_0 = [0] + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_1 = [1] + + # pd_op.slice: (xf32) <- (2xf32, 1xi64, 1xi64) + slice_0 = paddle._C_ops.slice( + data_2, [0], full_int_array_0, full_int_array_1, [1], [0] + ) + + # pd_op.cast: (xi32) <- (xf32) + cast_0 = paddle._C_ops.cast(slice_0, paddle.int32) + del slice_0 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_2 = [2] + + # pd_op.slice: (xf32) <- (2xf32, 1xi64, 1xi64) + slice_1 = paddle._C_ops.slice( + data_2, [0], full_int_array_1, full_int_array_2, [1], [0] + ) + del data_2 + + # pd_op.cast: (xi32) <- (xf32) + cast_1 = paddle._C_ops.cast(slice_1, paddle.int32) + del slice_1 + + # pd_op.full: (3872x2xf32) <- () + full_0 = paddle._C_ops.full( + [3872, 2], + float("0"), + paddle.float32, + paddle.framework._current_expected_place(), + ) + + # pd_op.full: (xf32) <- () + full_1 = paddle._C_ops.full( + [], float("0.1"), paddle.float32, paddle.framework._current_expected_place() + ) + + # pd_op.greater_than: (3872x2xb) <- (3872x2xf32, xf32) + greater_than_0 = paddle._C_ops.greater_than(data_0, full_1) + del full_1 + + # pd_op.where: (3872x2xf32) <- (3872x2xb, 3872x2xf32, 3872x2xf32) + where_0 = paddle._C_ops.where(greater_than_0, data_0, full_0) + del full_0, greater_than_0 + + # pd_op.nonzero: (-1x2xi64) <- (3872x2xf32) + nonzero_0 = paddle._C_ops.nonzero(where_0) + del where_0 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_3 = [-1] + + # pd_op.reshape: (7744xf32) <- (3872x2xf32, 1xi64) + reshape_0 = paddle._C_ops.reshape(data_0, full_int_array_3) + del data_0, full_int_array_3 + + # pd_op.shape64: (2xi64) <- (3872x256xf32) + shape64_0 = paddle._C_ops.shape64(data_1) + + # pd_op.slice: (1xi64) <- (2xi64, 1xi64, 1xi64) + slice_2 = paddle._C_ops.slice( + shape64_0, [0], full_int_array_0, full_int_array_1, [1], [] + ) + del shape64_0 + + # pd_op.cast: (1xi64) <- (1xi64) + cast_2 = paddle._C_ops.cast(slice_2, paddle.int64) + del slice_2 + + # pd_op.full: (1xi64) <- () + full_2 = paddle._C_ops.full( + [1], float("0"), paddle.int64, paddle.framework._current_expected_place() + ) + + # pd_op.full: (1xi32) <- () + full_3 = paddle._C_ops.full( + [1], float("0"), paddle.int32, paddle.core.CPUPlace() + ) + + # builtin.combine: ([1xi64, 1xi64]) <- (1xi64, 1xi64) + combine_0 = [cast_2, full_2] + del cast_2, full_2 + + # pd_op.concat: (2xi64) <- ([1xi64, 1xi64], 1xi32) + concat_0 = paddle._C_ops.concat(combine_0, full_3) + del combine_0 + + # pd_op.unsqueeze: (1x2xi64) <- (2xi64, 1xi64) + unsqueeze_0 = paddle._C_ops.unsqueeze(concat_0, full_int_array_0) + del concat_0 + + # builtin.combine: ([-1x2xi64, 1x2xi64]) <- (-1x2xi64, 1x2xi64) + combine_1 = [nonzero_0, unsqueeze_0] + del nonzero_0, unsqueeze_0 + + # pd_op.concat: (-1x2xi64) <- ([-1x2xi64, 1x2xi64], 1xi32) + concat_1 = paddle._C_ops.concat(combine_1, full_3) + del combine_1 + + # pd_op.full: (1x256xf32) <- () + full_4 = paddle._C_ops.full( + [1, 256], + float("1"), + paddle.float32, + paddle.framework._current_expected_place(), + ) + + # builtin.combine: ([3872x256xf32, 1x256xf32]) <- (3872x256xf32, 1x256xf32) + combine_2 = [data_1, full_4] + del data_1, full_4 + + # pd_op.concat: (3873x256xf32) <- ([3872x256xf32, 1x256xf32], 1xi32) + concat_2 = paddle._C_ops.concat(combine_2, full_3) + del combine_2 + + # pd_op.full: (1xf32) <- () + full_5 = paddle._C_ops.full( + [1], float("0"), paddle.float32, paddle.framework._current_expected_place() + ) + + # builtin.combine: ([7744xf32, 1xf32]) <- (7744xf32, 1xf32) + combine_3 = [reshape_0, full_5] + del full_5, reshape_0 + + # pd_op.concat: (7745xf32) <- ([7744xf32, 1xf32], 1xi32) + concat_3 = paddle._C_ops.concat(combine_3, full_3) + del combine_3 + + # pd_op.slice: (-1xi64) <- (-1x2xi64, 1xi64, 1xi64) + slice_3 = paddle._C_ops.slice( + concat_1, [1], full_int_array_1, full_int_array_2, [1], [1] + ) + del full_int_array_2 + + # pd_op.slice: (-1xi64) <- (-1x2xi64, 1xi64, 1xi64) + slice_4 = paddle._C_ops.slice( + concat_1, [1], full_int_array_0, full_int_array_1, [1], [1] + ) + del full_int_array_0, full_int_array_1 + + # pd_op.gather: (-1x256xf32) <- (3873x256xf32, -1xi64, 1xi32) + gather_0 = paddle._C_ops.gather(concat_2, slice_4, full_3) + del concat_2 + + # pd_op.full: (1xf32) <- () + full_6 = paddle._C_ops.full( + [1], float("2"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.scale: (-1xi64) <- (-1xi64, 1xf32) + scale_0 = paddle._C_ops.scale(slice_4, full_6, float("0"), True) + del full_6, slice_4 + + # pd_op.add: (-1xi64) <- (-1xi64, -1xi64) + add_0 = paddle._C_ops.add(scale_0, slice_3) + del scale_0 + + # pd_op.gather: (-1xf32) <- (7745xf32, -1xi64, 1xi32) + gather_1 = paddle._C_ops.gather(concat_3, add_0, full_3) + del add_0, concat_1, concat_3, full_3, slice_3 + + return gather_0, gather_1, cast_0, cast_1 diff --git a/paddle_samples/PaddleX/SOLOv2/subgraph_8/weight_meta.py b/paddle_samples/PaddleX/SOLOv2/subgraph_8/weight_meta.py new file mode 100644 index 000000000..8b1378917 --- /dev/null +++ b/paddle_samples/PaddleX/SOLOv2/subgraph_8/weight_meta.py @@ -0,0 +1 @@ + diff --git a/paddle_samples/PaddleX/SOLOv2/subgraph_9/graph_hash.txt b/paddle_samples/PaddleX/SOLOv2/subgraph_9/graph_hash.txt new file mode 100644 index 000000000..5734db492 --- /dev/null +++ b/paddle_samples/PaddleX/SOLOv2/subgraph_9/graph_hash.txt @@ -0,0 +1 @@ +874de3a15489cde34e66f862555ec31a7e4d9af002f657d235dcbb4266ad0395 \ No newline at end of file diff --git a/paddle_samples/PaddleX/SOLOv2/subgraph_9/graph_net.json b/paddle_samples/PaddleX/SOLOv2/subgraph_9/graph_net.json new file mode 100644 index 000000000..cdd8699a9 --- /dev/null +++ b/paddle_samples/PaddleX/SOLOv2/subgraph_9/graph_net.json @@ -0,0 +1,6 @@ +{ + "framework": "paddle", + "model_name": "SOLOv2", + "num_devices_required": 1, + "num_nodes_required": 1 +} \ No newline at end of file diff --git a/paddle_samples/PaddleX/SOLOv2/subgraph_9/input_meta.py b/paddle_samples/PaddleX/SOLOv2/subgraph_9/input_meta.py new file mode 100644 index 000000000..be2db4322 --- /dev/null +++ b/paddle_samples/PaddleX/SOLOv2/subgraph_9/input_meta.py @@ -0,0 +1,40 @@ +class Program_weight_tensor_data_0: + name = "data_0" + shape = [] + dtype = "int64" + data = [304] + + +class Program_weight_tensor_data_1: + name = "data_1" + shape = [] + dtype = "int64" + data = [200] + + +class Program_weight_tensor_data_2: + name = "data_2" + shape = [3872, 2] + dtype = "float32" + max_val = float("0.758779") + mean = float("0.0131502") + std = float("0.0333869") + data = None + + +class Program_weight_tensor_data_3: + name = "data_3" + shape = [3872, 256] + dtype = "float32" + min_val = float("-16.7561") + max_val = float("5.4528") + mean = float("-1.35773") + std = float("2.32133") + data = None + + +class Program_weight_tensor_data_4: + name = "data_4" + shape = [2] + dtype = "float32" + data = [1205.0, 800.0] diff --git a/paddle_samples/PaddleX/SOLOv2/subgraph_9/model.py b/paddle_samples/PaddleX/SOLOv2/subgraph_9/model.py new file mode 100644 index 000000000..1b5a362c5 --- /dev/null +++ b/paddle_samples/PaddleX/SOLOv2/subgraph_9/model.py @@ -0,0 +1,187 @@ +import paddle + + +class GraphModule(paddle.nn.Layer): + def __init__(self): + super().__init__() + + def forward(self, data_0, data_1, data_2, data_3, data_4): + # pd_op.full_int_array: (1xi64) <- () + full_int_array_0 = [0] + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_1 = [1] + + # pd_op.slice: (xf32) <- (2xf32, 1xi64, 1xi64) + slice_0 = paddle._C_ops.slice( + data_4, [0], full_int_array_0, full_int_array_1, [1], [0] + ) + + # pd_op.cast: (xi32) <- (xf32) + cast_0 = paddle._C_ops.cast(slice_0, paddle.int32) + del slice_0 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_2 = [2] + + # pd_op.slice: (xf32) <- (2xf32, 1xi64, 1xi64) + slice_1 = paddle._C_ops.slice( + data_4, [0], full_int_array_1, full_int_array_2, [1], [0] + ) + del data_4 + + # pd_op.cast: (xi32) <- (xf32) + cast_1 = paddle._C_ops.cast(slice_1, paddle.int32) + del slice_1 + + # pd_op.full: (1xf32) <- () + full_0 = paddle._C_ops.full( + [1], float("4"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.scale: (xi64) <- (xi64, 1xf32) + scale_0 = paddle._C_ops.scale(data_0, full_0, float("0"), True) + del data_0 + + # pd_op.scale: (xi64) <- (xi64, 1xf32) + scale_1 = paddle._C_ops.scale(data_1, full_0, float("0"), True) + del data_1, full_0 + + # pd_op.full: (3872x2xf32) <- () + full_1 = paddle._C_ops.full( + [3872, 2], + float("0"), + paddle.float32, + paddle.framework._current_expected_place(), + ) + + # pd_op.full: (xf32) <- () + full_2 = paddle._C_ops.full( + [], float("0.1"), paddle.float32, paddle.framework._current_expected_place() + ) + + # pd_op.greater_than: (3872x2xb) <- (3872x2xf32, xf32) + greater_than_0 = paddle._C_ops.greater_than(data_2, full_2) + del full_2 + + # pd_op.where: (3872x2xf32) <- (3872x2xb, 3872x2xf32, 3872x2xf32) + where_0 = paddle._C_ops.where(greater_than_0, data_2, full_1) + del full_1, greater_than_0 + + # pd_op.nonzero: (-1x2xi64) <- (3872x2xf32) + nonzero_0 = paddle._C_ops.nonzero(where_0) + del where_0 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_3 = [-1] + + # pd_op.reshape: (7744xf32) <- (3872x2xf32, 1xi64) + reshape_0 = paddle._C_ops.reshape(data_2, full_int_array_3) + del data_2, full_int_array_3 + + # pd_op.shape64: (2xi64) <- (3872x256xf32) + shape64_0 = paddle._C_ops.shape64(data_3) + + # pd_op.slice: (1xi64) <- (2xi64, 1xi64, 1xi64) + slice_2 = paddle._C_ops.slice( + shape64_0, [0], full_int_array_0, full_int_array_1, [1], [] + ) + del shape64_0 + + # pd_op.cast: (1xi64) <- (1xi64) + cast_2 = paddle._C_ops.cast(slice_2, paddle.int64) + del slice_2 + + # pd_op.full: (1xi64) <- () + full_3 = paddle._C_ops.full( + [1], float("0"), paddle.int64, paddle.framework._current_expected_place() + ) + + # pd_op.full: (1xi32) <- () + full_4 = paddle._C_ops.full( + [1], float("0"), paddle.int32, paddle.core.CPUPlace() + ) + + # builtin.combine: ([1xi64, 1xi64]) <- (1xi64, 1xi64) + combine_0 = [cast_2, full_3] + del cast_2, full_3 + + # pd_op.concat: (2xi64) <- ([1xi64, 1xi64], 1xi32) + concat_0 = paddle._C_ops.concat(combine_0, full_4) + del combine_0 + + # pd_op.unsqueeze: (1x2xi64) <- (2xi64, 1xi64) + unsqueeze_0 = paddle._C_ops.unsqueeze(concat_0, full_int_array_0) + del concat_0 + + # builtin.combine: ([-1x2xi64, 1x2xi64]) <- (-1x2xi64, 1x2xi64) + combine_1 = [nonzero_0, unsqueeze_0] + del nonzero_0, unsqueeze_0 + + # pd_op.concat: (-1x2xi64) <- ([-1x2xi64, 1x2xi64], 1xi32) + concat_1 = paddle._C_ops.concat(combine_1, full_4) + del combine_1 + + # pd_op.full: (1x256xf32) <- () + full_5 = paddle._C_ops.full( + [1, 256], + float("1"), + paddle.float32, + paddle.framework._current_expected_place(), + ) + + # builtin.combine: ([3872x256xf32, 1x256xf32]) <- (3872x256xf32, 1x256xf32) + combine_2 = [data_3, full_5] + del data_3, full_5 + + # pd_op.concat: (3873x256xf32) <- ([3872x256xf32, 1x256xf32], 1xi32) + concat_2 = paddle._C_ops.concat(combine_2, full_4) + del combine_2 + + # pd_op.full: (1xf32) <- () + full_6 = paddle._C_ops.full( + [1], float("0"), paddle.float32, paddle.framework._current_expected_place() + ) + + # builtin.combine: ([7744xf32, 1xf32]) <- (7744xf32, 1xf32) + combine_3 = [reshape_0, full_6] + del full_6, reshape_0 + + # pd_op.concat: (7745xf32) <- ([7744xf32, 1xf32], 1xi32) + concat_3 = paddle._C_ops.concat(combine_3, full_4) + del combine_3 + + # pd_op.slice: (-1xi64) <- (-1x2xi64, 1xi64, 1xi64) + slice_3 = paddle._C_ops.slice( + concat_1, [1], full_int_array_1, full_int_array_2, [1], [1] + ) + del full_int_array_2 + + # pd_op.slice: (-1xi64) <- (-1x2xi64, 1xi64, 1xi64) + slice_4 = paddle._C_ops.slice( + concat_1, [1], full_int_array_0, full_int_array_1, [1], [1] + ) + del full_int_array_0, full_int_array_1 + + # pd_op.gather: (-1x256xf32) <- (3873x256xf32, -1xi64, 1xi32) + gather_0 = paddle._C_ops.gather(concat_2, slice_4, full_4) + del concat_2 + + # pd_op.full: (1xf32) <- () + full_7 = paddle._C_ops.full( + [1], float("2"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.scale: (-1xi64) <- (-1xi64, 1xf32) + scale_2 = paddle._C_ops.scale(slice_4, full_7, float("0"), True) + del full_7, slice_4 + + # pd_op.add: (-1xi64) <- (-1xi64, -1xi64) + add_0 = paddle._C_ops.add(scale_2, slice_3) + del scale_2 + + # pd_op.gather: (-1xf32) <- (7745xf32, -1xi64, 1xi32) + gather_1 = paddle._C_ops.gather(concat_3, add_0, full_4) + del add_0, concat_1, concat_3, full_4, slice_3 + + return gather_0, gather_1, scale_0, scale_1, cast_0, cast_1 diff --git a/paddle_samples/PaddleX/SOLOv2/subgraph_9/weight_meta.py b/paddle_samples/PaddleX/SOLOv2/subgraph_9/weight_meta.py new file mode 100644 index 000000000..8b1378917 --- /dev/null +++ b/paddle_samples/PaddleX/SOLOv2/subgraph_9/weight_meta.py @@ -0,0 +1 @@ + diff --git a/paddle_samples/PaddleX/TimesNet_cls/subgraph_0/graph_hash.txt b/paddle_samples/PaddleX/TimesNet_cls/subgraph_0/graph_hash.txt new file mode 100644 index 000000000..d50a2c1fc --- /dev/null +++ b/paddle_samples/PaddleX/TimesNet_cls/subgraph_0/graph_hash.txt @@ -0,0 +1 @@ +2c54d953e620a4fb6482bb71e87cbc34eaa47c195f93ffa0634b877b4b7579d3 \ No newline at end of file diff --git a/paddle_samples/PaddleX/TimesNet_cls/subgraph_0/graph_net.json b/paddle_samples/PaddleX/TimesNet_cls/subgraph_0/graph_net.json new file mode 100644 index 000000000..ffc4d714a --- /dev/null +++ b/paddle_samples/PaddleX/TimesNet_cls/subgraph_0/graph_net.json @@ -0,0 +1,6 @@ +{ + "framework": "paddle", + "model_name": "TimesNet_cls", + "num_devices_required": 1, + "num_nodes_required": 1 +} \ No newline at end of file diff --git a/paddle_samples/PaddleX/TimesNet_cls/subgraph_0/input_meta.py b/paddle_samples/PaddleX/TimesNet_cls/subgraph_0/input_meta.py new file mode 100644 index 000000000..e441b35bb --- /dev/null +++ b/paddle_samples/PaddleX/TimesNet_cls/subgraph_0/input_meta.py @@ -0,0 +1,23 @@ +class Program_weight_tensor_data_0: + name = "data_0" + shape = [] + dtype = "int32" + data = [202] + + +class Program_weight_tensor_data_1: + name = "data_1" + shape = [11, 405, 32] + dtype = "float32" + min_val = float("-3.55259") + max_val = float("3.57388") + mean = float("-2.17619e-05") + std = float("0.999977") + data = None + + +class Program_weight_tensor_data_2: + name = "data_2" + shape = [] + dtype = "int64" + data = [405] diff --git a/paddle_samples/PaddleX/TimesNet_cls/subgraph_0/model.py b/paddle_samples/PaddleX/TimesNet_cls/subgraph_0/model.py new file mode 100644 index 000000000..27c98449e --- /dev/null +++ b/paddle_samples/PaddleX/TimesNet_cls/subgraph_0/model.py @@ -0,0 +1,361 @@ +import paddle + + +class GraphModule(paddle.nn.Layer): + def __init__(self): + super().__init__() + + def forward( + self, + parameter_0, + parameter_1, + parameter_2, + parameter_3, + parameter_4, + parameter_5, + parameter_6, + parameter_7, + parameter_8, + parameter_9, + parameter_10, + parameter_11, + parameter_12, + parameter_13, + parameter_14, + parameter_15, + parameter_16, + parameter_17, + parameter_18, + parameter_19, + parameter_20, + parameter_21, + parameter_22, + parameter_23, + data_0, + data_1, + data_2, + ): + # pd_op.cast: (xi64) <- (xi32) + cast_0 = paddle._C_ops.cast(data_0, paddle.int64) + del data_0 + + # pd_op.floor_divide: (xi64) <- (xi64, xi64) + floor_divide_0 = paddle._C_ops.floor_divide(data_2, cast_0) + + # pd_op.full: (1xf32) <- () + full_0 = paddle._C_ops.full( + [1], float("1"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.scale: (xi64) <- (xi64, 1xf32) + scale_0 = paddle._C_ops.scale(floor_divide_0, full_0, float("1"), True) + del floor_divide_0, full_0 + + # pd_op.multiply: (xi64) <- (xi64, xi64) + multiply_0 = paddle._C_ops.multiply(scale_0, cast_0) + del scale_0 + + # pd_op.subtract: (xi64) <- (xi64, xi64) + subtract_0 = paddle._C_ops.subtract(multiply_0, data_2) + del data_2 + + # pd_op.full: (xi64) <- () + full_1 = paddle._C_ops.full( + [], float("11"), paddle.int64, paddle.core.CPUPlace() + ) + + # pd_op.full: (xi64) <- () + full_2 = paddle._C_ops.full( + [], float("32"), paddle.int64, paddle.core.CPUPlace() + ) + + # builtin.combine: ([xi64, xi64, xi64]) <- (xi64, xi64, xi64) + combine_0 = [full_1, subtract_0, full_2] + del subtract_0 + + # pd_op.stack: (3xi64) <- ([xi64, xi64, xi64]) + stack_0 = paddle._C_ops.stack(combine_0, 0) + del combine_0 + + # pd_op.full: (1xf32) <- () + full_3 = paddle._C_ops.full( + [1], float("0"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.full_with_tensor: (11x-1x32xf32) <- (1xf32, 3xi64) + full_with_tensor_0 = paddle._C_ops.full_with_tensor( + full_3, stack_0, paddle.float32 + ) + del full_3, stack_0 + + # pd_op.cast: (11x405x32xf32) <- (11x405x32xf32) + cast_1 = paddle._C_ops.cast(data_1, paddle.float32) + del data_1 + + # pd_op.cast: (11x-1x32xf32) <- (11x-1x32xf32) + cast_2 = paddle._C_ops.cast(full_with_tensor_0, paddle.float32) + + # pd_op.full: (1xi32) <- () + full_4 = paddle._C_ops.full( + [1], float("1"), paddle.int32, paddle.core.CPUPlace() + ) + + # builtin.combine: ([11x405x32xf32, 11x-1x32xf32]) <- (11x405x32xf32, 11x-1x32xf32) + combine_1 = [cast_1, cast_2] + del cast_1, cast_2 + + # pd_op.concat: (11x-1x32xf32) <- ([11x405x32xf32, 11x-1x32xf32], 1xi32) + concat_0 = paddle._C_ops.concat(combine_1, full_4) + del combine_1, full_4 + + # pd_op.floor_divide: (xi64) <- (xi64, xi64) + floor_divide_1 = paddle._C_ops.floor_divide(multiply_0, cast_0) + + # builtin.combine: ([xi64, xi64, xi64, xi64]) <- (xi64, xi64, xi64, xi64) + combine_2 = [full_1, floor_divide_1, cast_0, full_2] + del cast_0, floor_divide_1, full_1, full_2 + + # pd_op.stack: (4xi64) <- ([xi64, xi64, xi64, xi64]) + stack_1 = paddle._C_ops.stack(combine_2, 0) + del combine_2 + + # pd_op.reshape: (11x-1x-1x32xf32) <- (11x-1x32xf32, 4xi64) + reshape_0 = paddle._C_ops.reshape(concat_0, stack_1) + del concat_0, stack_1 + + # pd_op.transpose: (11x32x-1x-1xf32) <- (11x-1x-1x32xf32) + transpose_0 = paddle._C_ops.transpose(reshape_0, [0, 3, 1, 2]) + del reshape_0 + + # pd_op.conv2d: (11x64x-1x-1xf32) <- (11x32x-1x-1xf32, 64x32x1x1xf32) + conv2d_0 = paddle._C_ops.conv2d( + transpose_0, parameter_23, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_23 + + # pd_op.full_int_array: (4xi64) <- () + full_int_array_0 = [1, -1, 1, 1] + + # pd_op.reshape: (1x64x1x1xf32) <- (64xf32, 4xi64) + reshape_1 = paddle._C_ops.reshape(parameter_22, full_int_array_0) + del parameter_22 + + # pd_op.add: (11x64x-1x-1xf32) <- (11x64x-1x-1xf32, 1x64x1x1xf32) + add_0 = paddle._C_ops.add(conv2d_0, reshape_1) + del conv2d_0, reshape_1 + + # pd_op.conv2d: (11x64x-1x-1xf32) <- (11x32x-1x-1xf32, 64x32x3x3xf32) + conv2d_1 = paddle._C_ops.conv2d( + transpose_0, parameter_21, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_21 + + # pd_op.reshape: (1x64x1x1xf32) <- (64xf32, 4xi64) + reshape_2 = paddle._C_ops.reshape(parameter_20, full_int_array_0) + del parameter_20 + + # pd_op.add: (11x64x-1x-1xf32) <- (11x64x-1x-1xf32, 1x64x1x1xf32) + add_1 = paddle._C_ops.add(conv2d_1, reshape_2) + del conv2d_1, reshape_2 + + # pd_op.conv2d: (11x64x-1x-1xf32) <- (11x32x-1x-1xf32, 64x32x5x5xf32) + conv2d_2 = paddle._C_ops.conv2d( + transpose_0, parameter_19, [1, 1], [2, 2], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_19 + + # pd_op.reshape: (1x64x1x1xf32) <- (64xf32, 4xi64) + reshape_3 = paddle._C_ops.reshape(parameter_18, full_int_array_0) + del parameter_18 + + # pd_op.add: (11x64x-1x-1xf32) <- (11x64x-1x-1xf32, 1x64x1x1xf32) + add_2 = paddle._C_ops.add(conv2d_2, reshape_3) + del conv2d_2, reshape_3 + + # pd_op.conv2d: (11x64x-1x-1xf32) <- (11x32x-1x-1xf32, 64x32x7x7xf32) + conv2d_3 = paddle._C_ops.conv2d( + transpose_0, parameter_17, [1, 1], [3, 3], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_17 + + # pd_op.reshape: (1x64x1x1xf32) <- (64xf32, 4xi64) + reshape_4 = paddle._C_ops.reshape(parameter_16, full_int_array_0) + del parameter_16 + + # pd_op.add: (11x64x-1x-1xf32) <- (11x64x-1x-1xf32, 1x64x1x1xf32) + add_3 = paddle._C_ops.add(conv2d_3, reshape_4) + del conv2d_3, reshape_4 + + # pd_op.conv2d: (11x64x-1x-1xf32) <- (11x32x-1x-1xf32, 64x32x9x9xf32) + conv2d_4 = paddle._C_ops.conv2d( + transpose_0, parameter_15, [1, 1], [4, 4], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_15 + + # pd_op.reshape: (1x64x1x1xf32) <- (64xf32, 4xi64) + reshape_5 = paddle._C_ops.reshape(parameter_14, full_int_array_0) + del parameter_14 + + # pd_op.add: (11x64x-1x-1xf32) <- (11x64x-1x-1xf32, 1x64x1x1xf32) + add_4 = paddle._C_ops.add(conv2d_4, reshape_5) + del conv2d_4, reshape_5 + + # pd_op.conv2d: (11x64x-1x-1xf32) <- (11x32x-1x-1xf32, 64x32x11x11xf32) + conv2d_5 = paddle._C_ops.conv2d( + transpose_0, parameter_13, [1, 1], [5, 5], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_13, transpose_0 + + # pd_op.reshape: (1x64x1x1xf32) <- (64xf32, 4xi64) + reshape_6 = paddle._C_ops.reshape(parameter_12, full_int_array_0) + del parameter_12 + + # pd_op.add: (11x64x-1x-1xf32) <- (11x64x-1x-1xf32, 1x64x1x1xf32) + add_5 = paddle._C_ops.add(conv2d_5, reshape_6) + del conv2d_5, reshape_6 + + # builtin.combine: ([11x64x-1x-1xf32, 11x64x-1x-1xf32, 11x64x-1x-1xf32, 11x64x-1x-1xf32, 11x64x-1x-1xf32, 11x64x-1x-1xf32]) <- (11x64x-1x-1xf32, 11x64x-1x-1xf32, 11x64x-1x-1xf32, 11x64x-1x-1xf32, 11x64x-1x-1xf32, 11x64x-1x-1xf32) + combine_3 = [add_0, add_1, add_2, add_3, add_4, add_5] + del add_0, add_1, add_2, add_3, add_4, add_5 + + # pd_op.stack: (11x64x-1x-1x6xf32) <- ([11x64x-1x-1xf32, 11x64x-1x-1xf32, 11x64x-1x-1xf32, 11x64x-1x-1xf32, 11x64x-1x-1xf32, 11x64x-1x-1xf32]) + stack_2 = paddle._C_ops.stack(combine_3, -1) + del combine_3 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_1 = [-1] + + # pd_op.mean: (11x64x-1x-1xf32) <- (11x64x-1x-1x6xf32, 1xi64) + mean_0 = paddle._C_ops.mean(stack_2, full_int_array_1, False) + del stack_2 + + # pd_op.gelu: (11x64x-1x-1xf32) <- (11x64x-1x-1xf32) + gelu_0 = paddle._C_ops.gelu(mean_0, False) + del mean_0 + + # pd_op.conv2d: (11x32x-1x-1xf32) <- (11x64x-1x-1xf32, 32x64x1x1xf32) + conv2d_6 = paddle._C_ops.conv2d( + gelu_0, parameter_11, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_11 + + # pd_op.reshape: (1x32x1x1xf32) <- (32xf32, 4xi64) + reshape_7 = paddle._C_ops.reshape(parameter_10, full_int_array_0) + del parameter_10 + + # pd_op.add: (11x32x-1x-1xf32) <- (11x32x-1x-1xf32, 1x32x1x1xf32) + add_6 = paddle._C_ops.add(conv2d_6, reshape_7) + del conv2d_6, reshape_7 + + # pd_op.conv2d: (11x32x-1x-1xf32) <- (11x64x-1x-1xf32, 32x64x3x3xf32) + conv2d_7 = paddle._C_ops.conv2d( + gelu_0, parameter_9, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_9 + + # pd_op.reshape: (1x32x1x1xf32) <- (32xf32, 4xi64) + reshape_8 = paddle._C_ops.reshape(parameter_8, full_int_array_0) + del parameter_8 + + # pd_op.add: (11x32x-1x-1xf32) <- (11x32x-1x-1xf32, 1x32x1x1xf32) + add_7 = paddle._C_ops.add(conv2d_7, reshape_8) + del conv2d_7, reshape_8 + + # pd_op.conv2d: (11x32x-1x-1xf32) <- (11x64x-1x-1xf32, 32x64x5x5xf32) + conv2d_8 = paddle._C_ops.conv2d( + gelu_0, parameter_7, [1, 1], [2, 2], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_7 + + # pd_op.reshape: (1x32x1x1xf32) <- (32xf32, 4xi64) + reshape_9 = paddle._C_ops.reshape(parameter_6, full_int_array_0) + del parameter_6 + + # pd_op.add: (11x32x-1x-1xf32) <- (11x32x-1x-1xf32, 1x32x1x1xf32) + add_8 = paddle._C_ops.add(conv2d_8, reshape_9) + del conv2d_8, reshape_9 + + # pd_op.conv2d: (11x32x-1x-1xf32) <- (11x64x-1x-1xf32, 32x64x7x7xf32) + conv2d_9 = paddle._C_ops.conv2d( + gelu_0, parameter_5, [1, 1], [3, 3], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_5 + + # pd_op.reshape: (1x32x1x1xf32) <- (32xf32, 4xi64) + reshape_10 = paddle._C_ops.reshape(parameter_4, full_int_array_0) + del parameter_4 + + # pd_op.add: (11x32x-1x-1xf32) <- (11x32x-1x-1xf32, 1x32x1x1xf32) + add_9 = paddle._C_ops.add(conv2d_9, reshape_10) + del conv2d_9, reshape_10 + + # pd_op.conv2d: (11x32x-1x-1xf32) <- (11x64x-1x-1xf32, 32x64x9x9xf32) + conv2d_10 = paddle._C_ops.conv2d( + gelu_0, parameter_3, [1, 1], [4, 4], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_3 + + # pd_op.reshape: (1x32x1x1xf32) <- (32xf32, 4xi64) + reshape_11 = paddle._C_ops.reshape(parameter_2, full_int_array_0) + del parameter_2 + + # pd_op.add: (11x32x-1x-1xf32) <- (11x32x-1x-1xf32, 1x32x1x1xf32) + add_10 = paddle._C_ops.add(conv2d_10, reshape_11) + del conv2d_10, reshape_11 + + # pd_op.conv2d: (11x32x-1x-1xf32) <- (11x64x-1x-1xf32, 32x64x11x11xf32) + conv2d_11 = paddle._C_ops.conv2d( + gelu_0, parameter_1, [1, 1], [5, 5], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del gelu_0, parameter_1 + + # pd_op.reshape: (1x32x1x1xf32) <- (32xf32, 4xi64) + reshape_12 = paddle._C_ops.reshape(parameter_0, full_int_array_0) + del full_int_array_0, parameter_0 + + # pd_op.add: (11x32x-1x-1xf32) <- (11x32x-1x-1xf32, 1x32x1x1xf32) + add_11 = paddle._C_ops.add(conv2d_11, reshape_12) + del conv2d_11, reshape_12 + + # builtin.combine: ([11x32x-1x-1xf32, 11x32x-1x-1xf32, 11x32x-1x-1xf32, 11x32x-1x-1xf32, 11x32x-1x-1xf32, 11x32x-1x-1xf32]) <- (11x32x-1x-1xf32, 11x32x-1x-1xf32, 11x32x-1x-1xf32, 11x32x-1x-1xf32, 11x32x-1x-1xf32, 11x32x-1x-1xf32) + combine_4 = [add_6, add_7, add_8, add_9, add_10, add_11] + del add_10, add_11, add_6, add_7, add_8, add_9 + + # pd_op.stack: (11x32x-1x-1x6xf32) <- ([11x32x-1x-1xf32, 11x32x-1x-1xf32, 11x32x-1x-1xf32, 11x32x-1x-1xf32, 11x32x-1x-1xf32, 11x32x-1x-1xf32]) + stack_3 = paddle._C_ops.stack(combine_4, -1) + del combine_4 + + # pd_op.mean: (11x32x-1x-1xf32) <- (11x32x-1x-1x6xf32, 1xi64) + mean_1 = paddle._C_ops.mean(stack_3, full_int_array_1, False) + del full_int_array_1, stack_3 + + # pd_op.transpose: (11x-1x-1x32xf32) <- (11x32x-1x-1xf32) + transpose_1 = paddle._C_ops.transpose(mean_1, [0, 2, 3, 1]) + del mean_1 + + # pd_op.full_int_array: (3xi64) <- () + full_int_array_2 = [11, -1, 32] + + # pd_op.reshape: (11x-1x32xf32) <- (11x-1x-1x32xf32, 3xi64) + reshape_13 = paddle._C_ops.reshape(transpose_1, full_int_array_2) + del full_int_array_2, transpose_1 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_3 = [0] + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_4 = [405] + + # pd_op.slice: (11x-1x32xf32) <- (11x-1x32xf32, 1xi64, 1xi64) + slice_0 = paddle._C_ops.slice( + reshape_13, [1], full_int_array_3, full_int_array_4, [1], [] + ) + del ( + full_int_array_3, + full_int_array_4, + full_with_tensor_0, + multiply_0, + reshape_13, + ) + + return slice_0 diff --git a/paddle_samples/PaddleX/TimesNet_cls/subgraph_0/weight_meta.py b/paddle_samples/PaddleX/TimesNet_cls/subgraph_0/weight_meta.py new file mode 100644 index 000000000..8dee6962e --- /dev/null +++ b/paddle_samples/PaddleX/TimesNet_cls/subgraph_0/weight_meta.py @@ -0,0 +1,238 @@ +class Program_weight_tensor_parameter_0: + name = "parameter_0" + shape = [32] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_1: + name = "parameter_1" + shape = [32, 64, 11, 11] + dtype = "float32" + min_val = float("-0.0865714") + max_val = float("0.0766605") + mean = float("1.3109e-05") + std = float("0.0160787") + data = None + + +class Program_weight_tensor_parameter_2: + name = "parameter_2" + shape = [32] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_3: + name = "parameter_3" + shape = [32, 64, 9, 9] + dtype = "float32" + min_val = float("-0.0963141") + max_val = float("0.0864051") + mean = float("-9.67914e-05") + std = float("0.0196263") + data = None + + +class Program_weight_tensor_parameter_4: + name = "parameter_4" + shape = [32] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_5: + name = "parameter_5" + shape = [32, 64, 7, 7] + dtype = "float32" + min_val = float("-0.113538") + max_val = float("0.107465") + mean = float("-5.53685e-05") + std = float("0.0252383") + data = None + + +class Program_weight_tensor_parameter_6: + name = "parameter_6" + shape = [32] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_7: + name = "parameter_7" + shape = [32, 64, 5, 5] + dtype = "float32" + min_val = float("-0.143487") + max_val = float("0.144777") + mean = float("0.000162487") + std = float("0.0356301") + data = None + + +class Program_weight_tensor_parameter_8: + name = "parameter_8" + shape = [32] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_9: + name = "parameter_9" + shape = [32, 64, 3, 3] + dtype = "float32" + min_val = float("-0.257152") + max_val = float("0.254454") + mean = float("-0.00053034") + std = float("0.0586078") + data = None + + +class Program_weight_tensor_parameter_10: + name = "parameter_10" + shape = [32] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_11: + name = "parameter_11" + shape = [32, 64, 1, 1] + dtype = "float32" + min_val = float("-0.590583") + max_val = float("0.573722") + mean = float("0.00757032") + std = float("0.175691") + data = None + + +class Program_weight_tensor_parameter_12: + name = "parameter_12" + shape = [64] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_13: + name = "parameter_13" + shape = [64, 32, 11, 11] + dtype = "float32" + min_val = float("-0.107155") + max_val = float("0.109478") + mean = float("-9.5172e-06") + std = float("0.022672") + data = None + + +class Program_weight_tensor_parameter_14: + name = "parameter_14" + shape = [64] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_15: + name = "parameter_15" + shape = [64, 32, 9, 9] + dtype = "float32" + min_val = float("-0.126869") + max_val = float("0.116599") + mean = float("-9.2705e-05") + std = float("0.0277662") + data = None + + +class Program_weight_tensor_parameter_16: + name = "parameter_16" + shape = [64] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_17: + name = "parameter_17" + shape = [64, 32, 7, 7] + dtype = "float32" + min_val = float("-0.159823") + max_val = float("0.143485") + mean = float("-1.04451e-05") + std = float("0.0357584") + data = None + + +class Program_weight_tensor_parameter_18: + name = "parameter_18" + shape = [64] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_19: + name = "parameter_19" + shape = [64, 32, 5, 5] + dtype = "float32" + min_val = float("-0.218304") + max_val = float("0.200231") + mean = float("0.000425884") + std = float("0.0499644") + data = None + + +class Program_weight_tensor_parameter_20: + name = "parameter_20" + shape = [64] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_21: + name = "parameter_21" + shape = [64, 32, 3, 3] + dtype = "float32" + min_val = float("-0.343896") + max_val = float("0.408045") + mean = float("0.000283063") + std = float("0.0837488") + data = None + + +class Program_weight_tensor_parameter_22: + name = "parameter_22" + shape = [64] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_23: + name = "parameter_23" + shape = [64, 32, 1, 1] + dtype = "float32" + min_val = float("-0.998181") + max_val = float("0.896186") + mean = float("-0.00344661") + std = float("0.251472") + data = None diff --git a/paddle_samples/PaddleX/TimesNet_cls/subgraph_1/graph_hash.txt b/paddle_samples/PaddleX/TimesNet_cls/subgraph_1/graph_hash.txt new file mode 100644 index 000000000..c2f1152de --- /dev/null +++ b/paddle_samples/PaddleX/TimesNet_cls/subgraph_1/graph_hash.txt @@ -0,0 +1 @@ +e92156205e42b2441b82d71a0940d5ef74197298882d280cad587da9cad0bbb8 \ No newline at end of file diff --git a/paddle_samples/PaddleX/TimesNet_cls/subgraph_1/graph_net.json b/paddle_samples/PaddleX/TimesNet_cls/subgraph_1/graph_net.json new file mode 100644 index 000000000..ffc4d714a --- /dev/null +++ b/paddle_samples/PaddleX/TimesNet_cls/subgraph_1/graph_net.json @@ -0,0 +1,6 @@ +{ + "framework": "paddle", + "model_name": "TimesNet_cls", + "num_devices_required": 1, + "num_nodes_required": 1 +} \ No newline at end of file diff --git a/paddle_samples/PaddleX/TimesNet_cls/subgraph_1/input_meta.py b/paddle_samples/PaddleX/TimesNet_cls/subgraph_1/input_meta.py new file mode 100644 index 000000000..868a06037 --- /dev/null +++ b/paddle_samples/PaddleX/TimesNet_cls/subgraph_1/input_meta.py @@ -0,0 +1,20 @@ +class Program_weight_tensor_data_0: + name = "data_0" + shape = [11, 405, 3] + dtype = "float32" + min_val = float("-0.797899") + max_val = float("12.5232") + mean = float("7.45058e-09") + std = float("1.0") + data = None + + +class Program_weight_tensor_data_1: + name = "data_1" + shape = [1, 5000, 32] + dtype = "float32" + min_val = float("-1.0") + max_val = float("1.0") + mean = float("0.119002") + std = float("0.697021") + data = None diff --git a/paddle_samples/PaddleX/TimesNet_cls/subgraph_1/model.py b/paddle_samples/PaddleX/TimesNet_cls/subgraph_1/model.py new file mode 100644 index 000000000..1d440c3ac --- /dev/null +++ b/paddle_samples/PaddleX/TimesNet_cls/subgraph_1/model.py @@ -0,0 +1,102 @@ +import paddle + + +class GraphModule(paddle.nn.Layer): + def __init__(self): + super().__init__() + + def forward(self, parameter_0, data_0, data_1): + # pd_op.transpose: (11x3x405xf32) <- (11x405x3xf32) + transpose_0 = paddle._C_ops.transpose(data_0, [0, 2, 1]) + del data_0 + + # pd_op.full_int_array: (2xi64) <- () + full_int_array_0 = [3, 4] + + # pd_op.unsqueeze: (11x3x405x1x1xf32) <- (11x3x405xf32, 2xi64) + unsqueeze_0 = paddle._C_ops.unsqueeze(transpose_0, full_int_array_0) + del transpose_0 + + # pd_op.full_int_array: (6xi64) <- () + full_int_array_1 = [0, 0, 0, 0, 1, 1] + + # pd_op.pad3d: (11x3x407x1x1xf32) <- (11x3x405x1x1xf32, 6xi64) + pad3d_0 = paddle._C_ops.pad3d( + unsqueeze_0, full_int_array_1, "circular", float("0"), "NCDHW" + ) + del full_int_array_1, unsqueeze_0 + + # pd_op.squeeze: (11x3x407xf32) <- (11x3x407x1x1xf32, 2xi64) + squeeze_0 = paddle._C_ops.squeeze(pad3d_0, full_int_array_0) + del full_int_array_0, pad3d_0 + + # pd_op.assign: (32x3x3xf32) <- (32x3x3xf32) + assign_0 = parameter_0 + del parameter_0 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_2 = [-2] + + # pd_op.assign: (1xi64) <- (1xi64) + assign_1 = full_int_array_2 + + # pd_op.unsqueeze: (32x3x1x3xf32) <- (32x3x3xf32, 1xi64) + unsqueeze_1 = paddle._C_ops.unsqueeze(assign_0, full_int_array_2) + + # pd_op.unsqueeze: (11x3x1x407xf32) <- (11x3x407xf32, 1xi64) + unsqueeze_2 = paddle._C_ops.unsqueeze(squeeze_0, full_int_array_2) + del squeeze_0 + + # pd_op.conv2d: (11x32x1x405xf32) <- (11x3x1x407xf32, 32x3x1x3xf32) + conv2d_0 = paddle._C_ops.conv2d( + unsqueeze_2, unsqueeze_1, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + + # pd_op.squeeze: (11x32x405xf32) <- (11x32x1x405xf32, 1xi64) + squeeze_1 = paddle._C_ops.squeeze(conv2d_0, full_int_array_2) + + # pd_op.transpose: (11x405x32xf32) <- (11x32x405xf32) + transpose_1 = paddle._C_ops.transpose(squeeze_1, [0, 2, 1]) + del squeeze_1 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_3 = [0] + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_4 = [405] + + # pd_op.slice: (1x405x32xf32) <- (1x5000x32xf32, 1xi64, 1xi64) + slice_0 = paddle._C_ops.slice( + data_1, [1], full_int_array_3, full_int_array_4, [1], [] + ) + del data_1, full_int_array_3, full_int_array_4 + + # pd_op.add: (11x405x32xf32) <- (11x405x32xf32, 1x405x32xf32) + add_0 = paddle._C_ops.add(transpose_1, slice_0) + + # pd_op.full: (1xf32) <- () + full_0 = paddle._C_ops.full( + [1], float("0.1"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.dropout: (11x405x32xf32, 11x405x32xui8) <- (11x405x32xf32, None, 1xf32) + dropout_0, dropout_1 = (lambda x, f: f(x))( + paddle._C_ops.dropout( + add_0, None, full_0, False, "upscale_in_train", 0, False + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None), + ) + del ( + add_0, + assign_0, + assign_1, + conv2d_0, + full_0, + full_int_array_2, + slice_0, + transpose_1, + unsqueeze_1, + unsqueeze_2, + ) + + return dropout_0 diff --git a/paddle_samples/PaddleX/TimesNet_cls/subgraph_1/weight_meta.py b/paddle_samples/PaddleX/TimesNet_cls/subgraph_1/weight_meta.py new file mode 100644 index 000000000..1697e0f93 --- /dev/null +++ b/paddle_samples/PaddleX/TimesNet_cls/subgraph_1/weight_meta.py @@ -0,0 +1,9 @@ +class Program_weight_tensor_parameter_0: + name = "parameter_0" + shape = [32, 3, 3] + dtype = "float32" + min_val = float("-1.19228") + max_val = float("1.47394") + mean = float("0.0485314") + std = float("0.465735") + data = None diff --git a/paddle_samples/PaddleX/TimesNet_cls/subgraph_2/graph_hash.txt b/paddle_samples/PaddleX/TimesNet_cls/subgraph_2/graph_hash.txt new file mode 100644 index 000000000..dbf5d1680 --- /dev/null +++ b/paddle_samples/PaddleX/TimesNet_cls/subgraph_2/graph_hash.txt @@ -0,0 +1 @@ +0d01b53e37ea4d00e7ca170493dcc1a034955a35fe5920d8c49798302d01457b \ No newline at end of file diff --git a/paddle_samples/PaddleX/TimesNet_cls/subgraph_2/graph_net.json b/paddle_samples/PaddleX/TimesNet_cls/subgraph_2/graph_net.json new file mode 100644 index 000000000..ffc4d714a --- /dev/null +++ b/paddle_samples/PaddleX/TimesNet_cls/subgraph_2/graph_net.json @@ -0,0 +1,6 @@ +{ + "framework": "paddle", + "model_name": "TimesNet_cls", + "num_devices_required": 1, + "num_nodes_required": 1 +} \ No newline at end of file diff --git a/paddle_samples/PaddleX/TimesNet_cls/subgraph_2/input_meta.py b/paddle_samples/PaddleX/TimesNet_cls/subgraph_2/input_meta.py new file mode 100644 index 000000000..fbdfea804 --- /dev/null +++ b/paddle_samples/PaddleX/TimesNet_cls/subgraph_2/input_meta.py @@ -0,0 +1,23 @@ +class Program_weight_tensor_data_0: + name = "data_0" + shape = [11, 405, 32] + dtype = "float32" + min_val = float("-3.61332") + max_val = float("3.75615") + mean = float("-2.11438e-05") + std = float("0.999978") + data = None + + +class Program_weight_tensor_data_1: + name = "data_1" + shape = [] + dtype = "int32" + data = [405] + + +class Program_weight_tensor_data_2: + name = "data_2" + shape = [] + dtype = "int64" + data = [405] diff --git a/paddle_samples/PaddleX/TimesNet_cls/subgraph_2/model.py b/paddle_samples/PaddleX/TimesNet_cls/subgraph_2/model.py new file mode 100644 index 000000000..75a67072b --- /dev/null +++ b/paddle_samples/PaddleX/TimesNet_cls/subgraph_2/model.py @@ -0,0 +1,331 @@ +import paddle + + +class GraphModule(paddle.nn.Layer): + def __init__(self): + super().__init__() + + def forward( + self, + parameter_0, + parameter_1, + parameter_2, + parameter_3, + parameter_4, + parameter_5, + parameter_6, + parameter_7, + parameter_8, + parameter_9, + parameter_10, + parameter_11, + parameter_12, + parameter_13, + parameter_14, + parameter_15, + parameter_16, + parameter_17, + parameter_18, + parameter_19, + parameter_20, + parameter_21, + parameter_22, + parameter_23, + data_0, + data_1, + data_2, + ): + # pd_op.cast: (xi64) <- (xi32) + cast_0 = paddle._C_ops.cast(data_1, paddle.int64) + del data_1 + + # pd_op.floor_divide: (xi64) <- (xi64, xi64) + floor_divide_0 = paddle._C_ops.floor_divide(data_2, cast_0) + del data_2 + + # pd_op.full: (xi64) <- () + full_0 = paddle._C_ops.full( + [], float("11"), paddle.int64, paddle.core.CPUPlace() + ) + + # pd_op.full: (xi64) <- () + full_1 = paddle._C_ops.full( + [], float("32"), paddle.int64, paddle.core.CPUPlace() + ) + + # builtin.combine: ([xi64, xi64, xi64, xi64]) <- (xi64, xi64, xi64, xi64) + combine_0 = [full_0, floor_divide_0, cast_0, full_1] + del cast_0, floor_divide_0, full_0, full_1 + + # pd_op.stack: (4xi64) <- ([xi64, xi64, xi64, xi64]) + stack_0 = paddle._C_ops.stack(combine_0, 0) + del combine_0 + + # pd_op.reshape: (11x-1x-1x32xf32) <- (11x405x32xf32, 4xi64) + reshape_0 = paddle._C_ops.reshape(data_0, stack_0) + del data_0, stack_0 + + # pd_op.transpose: (11x32x-1x-1xf32) <- (11x-1x-1x32xf32) + transpose_0 = paddle._C_ops.transpose(reshape_0, [0, 3, 1, 2]) + del reshape_0 + + # pd_op.conv2d: (11x64x-1x-1xf32) <- (11x32x-1x-1xf32, 64x32x1x1xf32) + conv2d_0 = paddle._C_ops.conv2d( + transpose_0, parameter_23, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_23 + + # pd_op.full_int_array: (4xi64) <- () + full_int_array_0 = [1, -1, 1, 1] + + # pd_op.reshape: (1x64x1x1xf32) <- (64xf32, 4xi64) + reshape_1 = paddle._C_ops.reshape(parameter_22, full_int_array_0) + del parameter_22 + + # pd_op.add: (11x64x-1x-1xf32) <- (11x64x-1x-1xf32, 1x64x1x1xf32) + add_0 = paddle._C_ops.add(conv2d_0, reshape_1) + + # pd_op.conv2d: (11x64x-1x-1xf32) <- (11x32x-1x-1xf32, 64x32x3x3xf32) + conv2d_1 = paddle._C_ops.conv2d( + transpose_0, parameter_21, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_21 + + # pd_op.reshape: (1x64x1x1xf32) <- (64xf32, 4xi64) + reshape_2 = paddle._C_ops.reshape(parameter_20, full_int_array_0) + del parameter_20 + + # pd_op.add: (11x64x-1x-1xf32) <- (11x64x-1x-1xf32, 1x64x1x1xf32) + add_1 = paddle._C_ops.add(conv2d_1, reshape_2) + + # pd_op.conv2d: (11x64x-1x-1xf32) <- (11x32x-1x-1xf32, 64x32x5x5xf32) + conv2d_2 = paddle._C_ops.conv2d( + transpose_0, parameter_19, [1, 1], [2, 2], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_19 + + # pd_op.reshape: (1x64x1x1xf32) <- (64xf32, 4xi64) + reshape_3 = paddle._C_ops.reshape(parameter_18, full_int_array_0) + del parameter_18 + + # pd_op.add: (11x64x-1x-1xf32) <- (11x64x-1x-1xf32, 1x64x1x1xf32) + add_2 = paddle._C_ops.add(conv2d_2, reshape_3) + + # pd_op.conv2d: (11x64x-1x-1xf32) <- (11x32x-1x-1xf32, 64x32x7x7xf32) + conv2d_3 = paddle._C_ops.conv2d( + transpose_0, parameter_17, [1, 1], [3, 3], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_17 + + # pd_op.reshape: (1x64x1x1xf32) <- (64xf32, 4xi64) + reshape_4 = paddle._C_ops.reshape(parameter_16, full_int_array_0) + del parameter_16 + + # pd_op.add: (11x64x-1x-1xf32) <- (11x64x-1x-1xf32, 1x64x1x1xf32) + add_3 = paddle._C_ops.add(conv2d_3, reshape_4) + + # pd_op.conv2d: (11x64x-1x-1xf32) <- (11x32x-1x-1xf32, 64x32x9x9xf32) + conv2d_4 = paddle._C_ops.conv2d( + transpose_0, parameter_15, [1, 1], [4, 4], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_15 + + # pd_op.reshape: (1x64x1x1xf32) <- (64xf32, 4xi64) + reshape_5 = paddle._C_ops.reshape(parameter_14, full_int_array_0) + del parameter_14 + + # pd_op.add: (11x64x-1x-1xf32) <- (11x64x-1x-1xf32, 1x64x1x1xf32) + add_4 = paddle._C_ops.add(conv2d_4, reshape_5) + + # pd_op.conv2d: (11x64x-1x-1xf32) <- (11x32x-1x-1xf32, 64x32x11x11xf32) + conv2d_5 = paddle._C_ops.conv2d( + transpose_0, parameter_13, [1, 1], [5, 5], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_13 + + # pd_op.reshape: (1x64x1x1xf32) <- (64xf32, 4xi64) + reshape_6 = paddle._C_ops.reshape(parameter_12, full_int_array_0) + del parameter_12 + + # pd_op.add: (11x64x-1x-1xf32) <- (11x64x-1x-1xf32, 1x64x1x1xf32) + add_5 = paddle._C_ops.add(conv2d_5, reshape_6) + + # builtin.combine: ([11x64x-1x-1xf32, 11x64x-1x-1xf32, 11x64x-1x-1xf32, 11x64x-1x-1xf32, 11x64x-1x-1xf32, 11x64x-1x-1xf32]) <- (11x64x-1x-1xf32, 11x64x-1x-1xf32, 11x64x-1x-1xf32, 11x64x-1x-1xf32, 11x64x-1x-1xf32, 11x64x-1x-1xf32) + combine_1 = [add_0, add_1, add_2, add_3, add_4, add_5] + + # pd_op.stack: (11x64x-1x-1x6xf32) <- ([11x64x-1x-1xf32, 11x64x-1x-1xf32, 11x64x-1x-1xf32, 11x64x-1x-1xf32, 11x64x-1x-1xf32, 11x64x-1x-1xf32]) + stack_1 = paddle._C_ops.stack(combine_1, -1) + del combine_1 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_1 = [-1] + + # pd_op.assign: (1xi64) <- (1xi64) + assign_0 = full_int_array_1 + + # pd_op.mean: (11x64x-1x-1xf32) <- (11x64x-1x-1x6xf32, 1xi64) + mean_0 = paddle._C_ops.mean(stack_1, full_int_array_1, False) + + # pd_op.gelu: (11x64x-1x-1xf32) <- (11x64x-1x-1xf32) + gelu_0 = paddle._C_ops.gelu(mean_0, False) + + # pd_op.conv2d: (11x32x-1x-1xf32) <- (11x64x-1x-1xf32, 32x64x1x1xf32) + conv2d_6 = paddle._C_ops.conv2d( + gelu_0, parameter_11, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_11 + + # pd_op.reshape: (1x32x1x1xf32) <- (32xf32, 4xi64) + reshape_7 = paddle._C_ops.reshape(parameter_10, full_int_array_0) + del parameter_10 + + # pd_op.add: (11x32x-1x-1xf32) <- (11x32x-1x-1xf32, 1x32x1x1xf32) + add_6 = paddle._C_ops.add(conv2d_6, reshape_7) + + # pd_op.conv2d: (11x32x-1x-1xf32) <- (11x64x-1x-1xf32, 32x64x3x3xf32) + conv2d_7 = paddle._C_ops.conv2d( + gelu_0, parameter_9, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_9 + + # pd_op.reshape: (1x32x1x1xf32) <- (32xf32, 4xi64) + reshape_8 = paddle._C_ops.reshape(parameter_8, full_int_array_0) + del parameter_8 + + # pd_op.add: (11x32x-1x-1xf32) <- (11x32x-1x-1xf32, 1x32x1x1xf32) + add_7 = paddle._C_ops.add(conv2d_7, reshape_8) + + # pd_op.conv2d: (11x32x-1x-1xf32) <- (11x64x-1x-1xf32, 32x64x5x5xf32) + conv2d_8 = paddle._C_ops.conv2d( + gelu_0, parameter_7, [1, 1], [2, 2], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_7 + + # pd_op.reshape: (1x32x1x1xf32) <- (32xf32, 4xi64) + reshape_9 = paddle._C_ops.reshape(parameter_6, full_int_array_0) + del parameter_6 + + # pd_op.add: (11x32x-1x-1xf32) <- (11x32x-1x-1xf32, 1x32x1x1xf32) + add_8 = paddle._C_ops.add(conv2d_8, reshape_9) + + # pd_op.conv2d: (11x32x-1x-1xf32) <- (11x64x-1x-1xf32, 32x64x7x7xf32) + conv2d_9 = paddle._C_ops.conv2d( + gelu_0, parameter_5, [1, 1], [3, 3], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_5 + + # pd_op.reshape: (1x32x1x1xf32) <- (32xf32, 4xi64) + reshape_10 = paddle._C_ops.reshape(parameter_4, full_int_array_0) + del parameter_4 + + # pd_op.add: (11x32x-1x-1xf32) <- (11x32x-1x-1xf32, 1x32x1x1xf32) + add_9 = paddle._C_ops.add(conv2d_9, reshape_10) + + # pd_op.conv2d: (11x32x-1x-1xf32) <- (11x64x-1x-1xf32, 32x64x9x9xf32) + conv2d_10 = paddle._C_ops.conv2d( + gelu_0, parameter_3, [1, 1], [4, 4], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_3 + + # pd_op.reshape: (1x32x1x1xf32) <- (32xf32, 4xi64) + reshape_11 = paddle._C_ops.reshape(parameter_2, full_int_array_0) + del parameter_2 + + # pd_op.add: (11x32x-1x-1xf32) <- (11x32x-1x-1xf32, 1x32x1x1xf32) + add_10 = paddle._C_ops.add(conv2d_10, reshape_11) + + # pd_op.conv2d: (11x32x-1x-1xf32) <- (11x64x-1x-1xf32, 32x64x11x11xf32) + conv2d_11 = paddle._C_ops.conv2d( + gelu_0, parameter_1, [1, 1], [5, 5], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_1 + + # pd_op.reshape: (1x32x1x1xf32) <- (32xf32, 4xi64) + reshape_12 = paddle._C_ops.reshape(parameter_0, full_int_array_0) + del full_int_array_0, parameter_0 + + # pd_op.add: (11x32x-1x-1xf32) <- (11x32x-1x-1xf32, 1x32x1x1xf32) + add_11 = paddle._C_ops.add(conv2d_11, reshape_12) + + # builtin.combine: ([11x32x-1x-1xf32, 11x32x-1x-1xf32, 11x32x-1x-1xf32, 11x32x-1x-1xf32, 11x32x-1x-1xf32, 11x32x-1x-1xf32]) <- (11x32x-1x-1xf32, 11x32x-1x-1xf32, 11x32x-1x-1xf32, 11x32x-1x-1xf32, 11x32x-1x-1xf32, 11x32x-1x-1xf32) + combine_2 = [add_6, add_7, add_8, add_9, add_10, add_11] + + # pd_op.stack: (11x32x-1x-1x6xf32) <- ([11x32x-1x-1xf32, 11x32x-1x-1xf32, 11x32x-1x-1xf32, 11x32x-1x-1xf32, 11x32x-1x-1xf32, 11x32x-1x-1xf32]) + stack_2 = paddle._C_ops.stack(combine_2, -1) + del combine_2 + + # pd_op.mean: (11x32x-1x-1xf32) <- (11x32x-1x-1x6xf32, 1xi64) + mean_1 = paddle._C_ops.mean(stack_2, full_int_array_1, False) + + # pd_op.transpose: (11x-1x-1x32xf32) <- (11x32x-1x-1xf32) + transpose_1 = paddle._C_ops.transpose(mean_1, [0, 2, 3, 1]) + del mean_1 + + # pd_op.full_int_array: (3xi64) <- () + full_int_array_2 = [11, -1, 32] + + # pd_op.reshape: (11x-1x32xf32) <- (11x-1x-1x32xf32, 3xi64) + reshape_13 = paddle._C_ops.reshape(transpose_1, full_int_array_2) + del full_int_array_2 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_3 = [0] + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_4 = [405] + + # pd_op.slice: (11x-1x32xf32) <- (11x-1x32xf32, 1xi64, 1xi64) + slice_0 = paddle._C_ops.slice( + reshape_13, [1], full_int_array_3, full_int_array_4, [1], [] + ) + del ( + add_0, + add_1, + add_10, + add_11, + add_2, + add_3, + add_4, + add_5, + add_6, + add_7, + add_8, + add_9, + assign_0, + conv2d_0, + conv2d_1, + conv2d_10, + conv2d_11, + conv2d_2, + conv2d_3, + conv2d_4, + conv2d_5, + conv2d_6, + conv2d_7, + conv2d_8, + conv2d_9, + full_int_array_1, + full_int_array_3, + full_int_array_4, + gelu_0, + mean_0, + reshape_1, + reshape_10, + reshape_11, + reshape_12, + reshape_13, + reshape_2, + reshape_3, + reshape_4, + reshape_5, + reshape_6, + reshape_7, + reshape_8, + reshape_9, + stack_1, + stack_2, + transpose_0, + transpose_1, + ) + + return slice_0 diff --git a/paddle_samples/PaddleX/TimesNet_cls/subgraph_2/weight_meta.py b/paddle_samples/PaddleX/TimesNet_cls/subgraph_2/weight_meta.py new file mode 100644 index 000000000..8dee6962e --- /dev/null +++ b/paddle_samples/PaddleX/TimesNet_cls/subgraph_2/weight_meta.py @@ -0,0 +1,238 @@ +class Program_weight_tensor_parameter_0: + name = "parameter_0" + shape = [32] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_1: + name = "parameter_1" + shape = [32, 64, 11, 11] + dtype = "float32" + min_val = float("-0.0865714") + max_val = float("0.0766605") + mean = float("1.3109e-05") + std = float("0.0160787") + data = None + + +class Program_weight_tensor_parameter_2: + name = "parameter_2" + shape = [32] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_3: + name = "parameter_3" + shape = [32, 64, 9, 9] + dtype = "float32" + min_val = float("-0.0963141") + max_val = float("0.0864051") + mean = float("-9.67914e-05") + std = float("0.0196263") + data = None + + +class Program_weight_tensor_parameter_4: + name = "parameter_4" + shape = [32] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_5: + name = "parameter_5" + shape = [32, 64, 7, 7] + dtype = "float32" + min_val = float("-0.113538") + max_val = float("0.107465") + mean = float("-5.53685e-05") + std = float("0.0252383") + data = None + + +class Program_weight_tensor_parameter_6: + name = "parameter_6" + shape = [32] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_7: + name = "parameter_7" + shape = [32, 64, 5, 5] + dtype = "float32" + min_val = float("-0.143487") + max_val = float("0.144777") + mean = float("0.000162487") + std = float("0.0356301") + data = None + + +class Program_weight_tensor_parameter_8: + name = "parameter_8" + shape = [32] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_9: + name = "parameter_9" + shape = [32, 64, 3, 3] + dtype = "float32" + min_val = float("-0.257152") + max_val = float("0.254454") + mean = float("-0.00053034") + std = float("0.0586078") + data = None + + +class Program_weight_tensor_parameter_10: + name = "parameter_10" + shape = [32] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_11: + name = "parameter_11" + shape = [32, 64, 1, 1] + dtype = "float32" + min_val = float("-0.590583") + max_val = float("0.573722") + mean = float("0.00757032") + std = float("0.175691") + data = None + + +class Program_weight_tensor_parameter_12: + name = "parameter_12" + shape = [64] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_13: + name = "parameter_13" + shape = [64, 32, 11, 11] + dtype = "float32" + min_val = float("-0.107155") + max_val = float("0.109478") + mean = float("-9.5172e-06") + std = float("0.022672") + data = None + + +class Program_weight_tensor_parameter_14: + name = "parameter_14" + shape = [64] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_15: + name = "parameter_15" + shape = [64, 32, 9, 9] + dtype = "float32" + min_val = float("-0.126869") + max_val = float("0.116599") + mean = float("-9.2705e-05") + std = float("0.0277662") + data = None + + +class Program_weight_tensor_parameter_16: + name = "parameter_16" + shape = [64] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_17: + name = "parameter_17" + shape = [64, 32, 7, 7] + dtype = "float32" + min_val = float("-0.159823") + max_val = float("0.143485") + mean = float("-1.04451e-05") + std = float("0.0357584") + data = None + + +class Program_weight_tensor_parameter_18: + name = "parameter_18" + shape = [64] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_19: + name = "parameter_19" + shape = [64, 32, 5, 5] + dtype = "float32" + min_val = float("-0.218304") + max_val = float("0.200231") + mean = float("0.000425884") + std = float("0.0499644") + data = None + + +class Program_weight_tensor_parameter_20: + name = "parameter_20" + shape = [64] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_21: + name = "parameter_21" + shape = [64, 32, 3, 3] + dtype = "float32" + min_val = float("-0.343896") + max_val = float("0.408045") + mean = float("0.000283063") + std = float("0.0837488") + data = None + + +class Program_weight_tensor_parameter_22: + name = "parameter_22" + shape = [64] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_23: + name = "parameter_23" + shape = [64, 32, 1, 1] + dtype = "float32" + min_val = float("-0.998181") + max_val = float("0.896186") + mean = float("-0.00344661") + std = float("0.251472") + data = None diff --git a/paddle_samples/PaddleX/TimesNet_cls/subgraph_3/graph_hash.txt b/paddle_samples/PaddleX/TimesNet_cls/subgraph_3/graph_hash.txt new file mode 100644 index 000000000..8b0cdf304 --- /dev/null +++ b/paddle_samples/PaddleX/TimesNet_cls/subgraph_3/graph_hash.txt @@ -0,0 +1 @@ +0bdb4aa40b9c866241dde20d16f12a08dfdef9fcb8d56597fdae08744f673964 \ No newline at end of file diff --git a/paddle_samples/PaddleX/TimesNet_cls/subgraph_3/graph_net.json b/paddle_samples/PaddleX/TimesNet_cls/subgraph_3/graph_net.json new file mode 100644 index 000000000..ffc4d714a --- /dev/null +++ b/paddle_samples/PaddleX/TimesNet_cls/subgraph_3/graph_net.json @@ -0,0 +1,6 @@ +{ + "framework": "paddle", + "model_name": "TimesNet_cls", + "num_devices_required": 1, + "num_nodes_required": 1 +} \ No newline at end of file diff --git a/paddle_samples/PaddleX/TimesNet_cls/subgraph_3/input_meta.py b/paddle_samples/PaddleX/TimesNet_cls/subgraph_3/input_meta.py new file mode 100644 index 000000000..83155ffd2 --- /dev/null +++ b/paddle_samples/PaddleX/TimesNet_cls/subgraph_3/input_meta.py @@ -0,0 +1,12 @@ +class Program_weight_tensor_data_0: + name = "data_0" + shape = [] + dtype = "int64" + data = [405] + + +class Program_weight_tensor_data_1: + name = "data_1" + shape = [3] + dtype = "int32" + data = [405, 202, 101] diff --git a/paddle_samples/PaddleX/TimesNet_cls/subgraph_3/model.py b/paddle_samples/PaddleX/TimesNet_cls/subgraph_3/model.py new file mode 100644 index 000000000..fe028392a --- /dev/null +++ b/paddle_samples/PaddleX/TimesNet_cls/subgraph_3/model.py @@ -0,0 +1,37 @@ +import paddle + + +class GraphModule(paddle.nn.Layer): + def __init__(self): + super().__init__() + + def forward(self, data_0, data_1): + # pd_op.full_int_array: (1xi64) <- () + full_int_array_0 = [0] + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_1 = [1] + + # pd_op.slice: (xi32) <- (3xi32, 1xi64, 1xi64) + slice_0 = paddle._C_ops.slice( + data_1, [0], full_int_array_0, full_int_array_1, [1], [0] + ) + del data_1, full_int_array_0, full_int_array_1 + + # pd_op.cast: (xi64) <- (xi32) + cast_0 = paddle._C_ops.cast(slice_0, paddle.int64) + + # pd_op.remainder: (xi64) <- (xi64, xi64) + remainder_0 = paddle._C_ops.remainder(data_0, cast_0) + del cast_0, data_0 + + # pd_op.full: (xi64) <- () + full_0 = paddle._C_ops.full( + [], float("0"), paddle.int64, paddle.framework._current_expected_place() + ) + + # pd_op.not_equal: (xb) <- (xi64, xi64) + not_equal_0 = paddle._C_ops.not_equal(remainder_0, full_0) + del full_0, remainder_0, slice_0 + + return not_equal_0 diff --git a/paddle_samples/PaddleX/TimesNet_cls/subgraph_3/weight_meta.py b/paddle_samples/PaddleX/TimesNet_cls/subgraph_3/weight_meta.py new file mode 100644 index 000000000..8b1378917 --- /dev/null +++ b/paddle_samples/PaddleX/TimesNet_cls/subgraph_3/weight_meta.py @@ -0,0 +1 @@ + diff --git a/paddle_samples/PaddleX/TimesNet_cls/subgraph_4/graph_hash.txt b/paddle_samples/PaddleX/TimesNet_cls/subgraph_4/graph_hash.txt new file mode 100644 index 000000000..055ebcca9 --- /dev/null +++ b/paddle_samples/PaddleX/TimesNet_cls/subgraph_4/graph_hash.txt @@ -0,0 +1 @@ +244321bc84fa4ab6f485de7a0c7146beabf59c446ac2dca06dfc26ec31964ba7 \ No newline at end of file diff --git a/paddle_samples/PaddleX/TimesNet_cls/subgraph_4/graph_net.json b/paddle_samples/PaddleX/TimesNet_cls/subgraph_4/graph_net.json new file mode 100644 index 000000000..ffc4d714a --- /dev/null +++ b/paddle_samples/PaddleX/TimesNet_cls/subgraph_4/graph_net.json @@ -0,0 +1,6 @@ +{ + "framework": "paddle", + "model_name": "TimesNet_cls", + "num_devices_required": 1, + "num_nodes_required": 1 +} \ No newline at end of file diff --git a/paddle_samples/PaddleX/TimesNet_cls/subgraph_4/input_meta.py b/paddle_samples/PaddleX/TimesNet_cls/subgraph_4/input_meta.py new file mode 100644 index 000000000..34265adc4 --- /dev/null +++ b/paddle_samples/PaddleX/TimesNet_cls/subgraph_4/input_meta.py @@ -0,0 +1,23 @@ +class Program_weight_tensor_data_0: + name = "data_0" + shape = [] + dtype = "int32" + data = [101] + + +class Program_weight_tensor_data_1: + name = "data_1" + shape = [11, 405, 32] + dtype = "float32" + min_val = float("-3.61332") + max_val = float("3.75615") + mean = float("-2.11438e-05") + std = float("0.999978") + data = None + + +class Program_weight_tensor_data_2: + name = "data_2" + shape = [] + dtype = "int64" + data = [405] diff --git a/paddle_samples/PaddleX/TimesNet_cls/subgraph_4/model.py b/paddle_samples/PaddleX/TimesNet_cls/subgraph_4/model.py new file mode 100644 index 000000000..e3284c566 --- /dev/null +++ b/paddle_samples/PaddleX/TimesNet_cls/subgraph_4/model.py @@ -0,0 +1,394 @@ +import paddle + + +class GraphModule(paddle.nn.Layer): + def __init__(self): + super().__init__() + + def forward( + self, + parameter_0, + parameter_1, + parameter_2, + parameter_3, + parameter_4, + parameter_5, + parameter_6, + parameter_7, + parameter_8, + parameter_9, + parameter_10, + parameter_11, + parameter_12, + parameter_13, + parameter_14, + parameter_15, + parameter_16, + parameter_17, + parameter_18, + parameter_19, + parameter_20, + parameter_21, + parameter_22, + parameter_23, + data_0, + data_1, + data_2, + ): + # pd_op.cast: (xi64) <- (xi32) + cast_0 = paddle._C_ops.cast(data_0, paddle.int64) + del data_0 + + # pd_op.floor_divide: (xi64) <- (xi64, xi64) + floor_divide_0 = paddle._C_ops.floor_divide(data_2, cast_0) + + # pd_op.full: (1xf32) <- () + full_0 = paddle._C_ops.full( + [1], float("1"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.scale: (xi64) <- (xi64, 1xf32) + scale_0 = paddle._C_ops.scale(floor_divide_0, full_0, float("1"), True) + del floor_divide_0, full_0 + + # pd_op.multiply: (xi64) <- (xi64, xi64) + multiply_0 = paddle._C_ops.multiply(scale_0, cast_0) + del scale_0 + + # pd_op.subtract: (xi64) <- (xi64, xi64) + subtract_0 = paddle._C_ops.subtract(multiply_0, data_2) + del data_2 + + # pd_op.full: (xi64) <- () + full_1 = paddle._C_ops.full( + [], float("11"), paddle.int64, paddle.core.CPUPlace() + ) + + # pd_op.full: (xi64) <- () + full_2 = paddle._C_ops.full( + [], float("32"), paddle.int64, paddle.core.CPUPlace() + ) + + # builtin.combine: ([xi64, xi64, xi64]) <- (xi64, xi64, xi64) + combine_0 = [full_1, subtract_0, full_2] + del subtract_0 + + # pd_op.stack: (3xi64) <- ([xi64, xi64, xi64]) + stack_0 = paddle._C_ops.stack(combine_0, 0) + del combine_0 + + # pd_op.full: (1xf32) <- () + full_3 = paddle._C_ops.full( + [1], float("0"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.full_with_tensor: (11x-1x32xf32) <- (1xf32, 3xi64) + full_with_tensor_0 = paddle._C_ops.full_with_tensor( + full_3, stack_0, paddle.float32 + ) + del full_3, stack_0 + + # pd_op.cast: (11x405x32xf32) <- (11x405x32xf32) + cast_1 = paddle._C_ops.cast(data_1, paddle.float32) + del data_1 + + # pd_op.cast: (11x-1x32xf32) <- (11x-1x32xf32) + cast_2 = paddle._C_ops.cast(full_with_tensor_0, paddle.float32) + + # pd_op.full: (1xi32) <- () + full_4 = paddle._C_ops.full( + [1], float("1"), paddle.int32, paddle.core.CPUPlace() + ) + + # builtin.combine: ([11x405x32xf32, 11x-1x32xf32]) <- (11x405x32xf32, 11x-1x32xf32) + combine_1 = [cast_1, cast_2] + + # pd_op.concat: (11x-1x32xf32) <- ([11x405x32xf32, 11x-1x32xf32], 1xi32) + concat_0 = paddle._C_ops.concat(combine_1, full_4) + del combine_1 + + # pd_op.floor_divide: (xi64) <- (xi64, xi64) + floor_divide_1 = paddle._C_ops.floor_divide(multiply_0, cast_0) + + # builtin.combine: ([xi64, xi64, xi64, xi64]) <- (xi64, xi64, xi64, xi64) + combine_2 = [full_1, floor_divide_1, cast_0, full_2] + del cast_0, floor_divide_1, full_1, full_2 + + # pd_op.stack: (4xi64) <- ([xi64, xi64, xi64, xi64]) + stack_1 = paddle._C_ops.stack(combine_2, 0) + del combine_2 + + # pd_op.reshape: (11x-1x-1x32xf32) <- (11x-1x32xf32, 4xi64) + reshape_0 = paddle._C_ops.reshape(concat_0, stack_1) + del stack_1 + + # pd_op.transpose: (11x32x-1x-1xf32) <- (11x-1x-1x32xf32) + transpose_0 = paddle._C_ops.transpose(reshape_0, [0, 3, 1, 2]) + del reshape_0 + + # pd_op.conv2d: (11x64x-1x-1xf32) <- (11x32x-1x-1xf32, 64x32x1x1xf32) + conv2d_0 = paddle._C_ops.conv2d( + transpose_0, parameter_23, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_23 + + # pd_op.full_int_array: (4xi64) <- () + full_int_array_0 = [1, -1, 1, 1] + + # pd_op.reshape: (1x64x1x1xf32) <- (64xf32, 4xi64) + reshape_1 = paddle._C_ops.reshape(parameter_22, full_int_array_0) + del parameter_22 + + # pd_op.add: (11x64x-1x-1xf32) <- (11x64x-1x-1xf32, 1x64x1x1xf32) + add_0 = paddle._C_ops.add(conv2d_0, reshape_1) + + # pd_op.conv2d: (11x64x-1x-1xf32) <- (11x32x-1x-1xf32, 64x32x3x3xf32) + conv2d_1 = paddle._C_ops.conv2d( + transpose_0, parameter_21, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_21 + + # pd_op.reshape: (1x64x1x1xf32) <- (64xf32, 4xi64) + reshape_2 = paddle._C_ops.reshape(parameter_20, full_int_array_0) + del parameter_20 + + # pd_op.add: (11x64x-1x-1xf32) <- (11x64x-1x-1xf32, 1x64x1x1xf32) + add_1 = paddle._C_ops.add(conv2d_1, reshape_2) + + # pd_op.conv2d: (11x64x-1x-1xf32) <- (11x32x-1x-1xf32, 64x32x5x5xf32) + conv2d_2 = paddle._C_ops.conv2d( + transpose_0, parameter_19, [1, 1], [2, 2], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_19 + + # pd_op.reshape: (1x64x1x1xf32) <- (64xf32, 4xi64) + reshape_3 = paddle._C_ops.reshape(parameter_18, full_int_array_0) + del parameter_18 + + # pd_op.add: (11x64x-1x-1xf32) <- (11x64x-1x-1xf32, 1x64x1x1xf32) + add_2 = paddle._C_ops.add(conv2d_2, reshape_3) + + # pd_op.conv2d: (11x64x-1x-1xf32) <- (11x32x-1x-1xf32, 64x32x7x7xf32) + conv2d_3 = paddle._C_ops.conv2d( + transpose_0, parameter_17, [1, 1], [3, 3], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_17 + + # pd_op.reshape: (1x64x1x1xf32) <- (64xf32, 4xi64) + reshape_4 = paddle._C_ops.reshape(parameter_16, full_int_array_0) + del parameter_16 + + # pd_op.add: (11x64x-1x-1xf32) <- (11x64x-1x-1xf32, 1x64x1x1xf32) + add_3 = paddle._C_ops.add(conv2d_3, reshape_4) + + # pd_op.conv2d: (11x64x-1x-1xf32) <- (11x32x-1x-1xf32, 64x32x9x9xf32) + conv2d_4 = paddle._C_ops.conv2d( + transpose_0, parameter_15, [1, 1], [4, 4], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_15 + + # pd_op.reshape: (1x64x1x1xf32) <- (64xf32, 4xi64) + reshape_5 = paddle._C_ops.reshape(parameter_14, full_int_array_0) + del parameter_14 + + # pd_op.add: (11x64x-1x-1xf32) <- (11x64x-1x-1xf32, 1x64x1x1xf32) + add_4 = paddle._C_ops.add(conv2d_4, reshape_5) + + # pd_op.conv2d: (11x64x-1x-1xf32) <- (11x32x-1x-1xf32, 64x32x11x11xf32) + conv2d_5 = paddle._C_ops.conv2d( + transpose_0, parameter_13, [1, 1], [5, 5], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_13 + + # pd_op.reshape: (1x64x1x1xf32) <- (64xf32, 4xi64) + reshape_6 = paddle._C_ops.reshape(parameter_12, full_int_array_0) + del parameter_12 + + # pd_op.add: (11x64x-1x-1xf32) <- (11x64x-1x-1xf32, 1x64x1x1xf32) + add_5 = paddle._C_ops.add(conv2d_5, reshape_6) + + # builtin.combine: ([11x64x-1x-1xf32, 11x64x-1x-1xf32, 11x64x-1x-1xf32, 11x64x-1x-1xf32, 11x64x-1x-1xf32, 11x64x-1x-1xf32]) <- (11x64x-1x-1xf32, 11x64x-1x-1xf32, 11x64x-1x-1xf32, 11x64x-1x-1xf32, 11x64x-1x-1xf32, 11x64x-1x-1xf32) + combine_3 = [add_0, add_1, add_2, add_3, add_4, add_5] + + # pd_op.stack: (11x64x-1x-1x6xf32) <- ([11x64x-1x-1xf32, 11x64x-1x-1xf32, 11x64x-1x-1xf32, 11x64x-1x-1xf32, 11x64x-1x-1xf32, 11x64x-1x-1xf32]) + stack_2 = paddle._C_ops.stack(combine_3, -1) + del combine_3 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_1 = [-1] + + # pd_op.assign: (1xi64) <- (1xi64) + assign_0 = full_int_array_1 + + # pd_op.mean: (11x64x-1x-1xf32) <- (11x64x-1x-1x6xf32, 1xi64) + mean_0 = paddle._C_ops.mean(stack_2, full_int_array_1, False) + + # pd_op.gelu: (11x64x-1x-1xf32) <- (11x64x-1x-1xf32) + gelu_0 = paddle._C_ops.gelu(mean_0, False) + + # pd_op.conv2d: (11x32x-1x-1xf32) <- (11x64x-1x-1xf32, 32x64x1x1xf32) + conv2d_6 = paddle._C_ops.conv2d( + gelu_0, parameter_11, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_11 + + # pd_op.reshape: (1x32x1x1xf32) <- (32xf32, 4xi64) + reshape_7 = paddle._C_ops.reshape(parameter_10, full_int_array_0) + del parameter_10 + + # pd_op.add: (11x32x-1x-1xf32) <- (11x32x-1x-1xf32, 1x32x1x1xf32) + add_6 = paddle._C_ops.add(conv2d_6, reshape_7) + + # pd_op.conv2d: (11x32x-1x-1xf32) <- (11x64x-1x-1xf32, 32x64x3x3xf32) + conv2d_7 = paddle._C_ops.conv2d( + gelu_0, parameter_9, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_9 + + # pd_op.reshape: (1x32x1x1xf32) <- (32xf32, 4xi64) + reshape_8 = paddle._C_ops.reshape(parameter_8, full_int_array_0) + del parameter_8 + + # pd_op.add: (11x32x-1x-1xf32) <- (11x32x-1x-1xf32, 1x32x1x1xf32) + add_7 = paddle._C_ops.add(conv2d_7, reshape_8) + + # pd_op.conv2d: (11x32x-1x-1xf32) <- (11x64x-1x-1xf32, 32x64x5x5xf32) + conv2d_8 = paddle._C_ops.conv2d( + gelu_0, parameter_7, [1, 1], [2, 2], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_7 + + # pd_op.reshape: (1x32x1x1xf32) <- (32xf32, 4xi64) + reshape_9 = paddle._C_ops.reshape(parameter_6, full_int_array_0) + del parameter_6 + + # pd_op.add: (11x32x-1x-1xf32) <- (11x32x-1x-1xf32, 1x32x1x1xf32) + add_8 = paddle._C_ops.add(conv2d_8, reshape_9) + + # pd_op.conv2d: (11x32x-1x-1xf32) <- (11x64x-1x-1xf32, 32x64x7x7xf32) + conv2d_9 = paddle._C_ops.conv2d( + gelu_0, parameter_5, [1, 1], [3, 3], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_5 + + # pd_op.reshape: (1x32x1x1xf32) <- (32xf32, 4xi64) + reshape_10 = paddle._C_ops.reshape(parameter_4, full_int_array_0) + del parameter_4 + + # pd_op.add: (11x32x-1x-1xf32) <- (11x32x-1x-1xf32, 1x32x1x1xf32) + add_9 = paddle._C_ops.add(conv2d_9, reshape_10) + + # pd_op.conv2d: (11x32x-1x-1xf32) <- (11x64x-1x-1xf32, 32x64x9x9xf32) + conv2d_10 = paddle._C_ops.conv2d( + gelu_0, parameter_3, [1, 1], [4, 4], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_3 + + # pd_op.reshape: (1x32x1x1xf32) <- (32xf32, 4xi64) + reshape_11 = paddle._C_ops.reshape(parameter_2, full_int_array_0) + del parameter_2 + + # pd_op.add: (11x32x-1x-1xf32) <- (11x32x-1x-1xf32, 1x32x1x1xf32) + add_10 = paddle._C_ops.add(conv2d_10, reshape_11) + + # pd_op.conv2d: (11x32x-1x-1xf32) <- (11x64x-1x-1xf32, 32x64x11x11xf32) + conv2d_11 = paddle._C_ops.conv2d( + gelu_0, parameter_1, [1, 1], [5, 5], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_1 + + # pd_op.reshape: (1x32x1x1xf32) <- (32xf32, 4xi64) + reshape_12 = paddle._C_ops.reshape(parameter_0, full_int_array_0) + del full_int_array_0, parameter_0 + + # pd_op.add: (11x32x-1x-1xf32) <- (11x32x-1x-1xf32, 1x32x1x1xf32) + add_11 = paddle._C_ops.add(conv2d_11, reshape_12) + + # builtin.combine: ([11x32x-1x-1xf32, 11x32x-1x-1xf32, 11x32x-1x-1xf32, 11x32x-1x-1xf32, 11x32x-1x-1xf32, 11x32x-1x-1xf32]) <- (11x32x-1x-1xf32, 11x32x-1x-1xf32, 11x32x-1x-1xf32, 11x32x-1x-1xf32, 11x32x-1x-1xf32, 11x32x-1x-1xf32) + combine_4 = [add_6, add_7, add_8, add_9, add_10, add_11] + + # pd_op.stack: (11x32x-1x-1x6xf32) <- ([11x32x-1x-1xf32, 11x32x-1x-1xf32, 11x32x-1x-1xf32, 11x32x-1x-1xf32, 11x32x-1x-1xf32, 11x32x-1x-1xf32]) + stack_3 = paddle._C_ops.stack(combine_4, -1) + del combine_4 + + # pd_op.mean: (11x32x-1x-1xf32) <- (11x32x-1x-1x6xf32, 1xi64) + mean_1 = paddle._C_ops.mean(stack_3, full_int_array_1, False) + + # pd_op.transpose: (11x-1x-1x32xf32) <- (11x32x-1x-1xf32) + transpose_1 = paddle._C_ops.transpose(mean_1, [0, 2, 3, 1]) + del mean_1 + + # pd_op.full_int_array: (3xi64) <- () + full_int_array_2 = [11, -1, 32] + + # pd_op.reshape: (11x-1x32xf32) <- (11x-1x-1x32xf32, 3xi64) + reshape_13 = paddle._C_ops.reshape(transpose_1, full_int_array_2) + del full_int_array_2 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_3 = [0] + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_4 = [405] + + # pd_op.slice: (11x-1x32xf32) <- (11x-1x32xf32, 1xi64, 1xi64) + slice_0 = paddle._C_ops.slice( + reshape_13, [1], full_int_array_3, full_int_array_4, [1], [] + ) + del ( + add_0, + add_1, + add_10, + add_11, + add_2, + add_3, + add_4, + add_5, + add_6, + add_7, + add_8, + add_9, + assign_0, + cast_1, + cast_2, + concat_0, + conv2d_0, + conv2d_1, + conv2d_10, + conv2d_11, + conv2d_2, + conv2d_3, + conv2d_4, + conv2d_5, + conv2d_6, + conv2d_7, + conv2d_8, + conv2d_9, + full_4, + full_int_array_1, + full_int_array_3, + full_int_array_4, + full_with_tensor_0, + gelu_0, + mean_0, + multiply_0, + reshape_1, + reshape_10, + reshape_11, + reshape_12, + reshape_13, + reshape_2, + reshape_3, + reshape_4, + reshape_5, + reshape_6, + reshape_7, + reshape_8, + reshape_9, + stack_2, + stack_3, + transpose_0, + transpose_1, + ) + + return slice_0 diff --git a/paddle_samples/PaddleX/TimesNet_cls/subgraph_4/weight_meta.py b/paddle_samples/PaddleX/TimesNet_cls/subgraph_4/weight_meta.py new file mode 100644 index 000000000..8dee6962e --- /dev/null +++ b/paddle_samples/PaddleX/TimesNet_cls/subgraph_4/weight_meta.py @@ -0,0 +1,238 @@ +class Program_weight_tensor_parameter_0: + name = "parameter_0" + shape = [32] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_1: + name = "parameter_1" + shape = [32, 64, 11, 11] + dtype = "float32" + min_val = float("-0.0865714") + max_val = float("0.0766605") + mean = float("1.3109e-05") + std = float("0.0160787") + data = None + + +class Program_weight_tensor_parameter_2: + name = "parameter_2" + shape = [32] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_3: + name = "parameter_3" + shape = [32, 64, 9, 9] + dtype = "float32" + min_val = float("-0.0963141") + max_val = float("0.0864051") + mean = float("-9.67914e-05") + std = float("0.0196263") + data = None + + +class Program_weight_tensor_parameter_4: + name = "parameter_4" + shape = [32] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_5: + name = "parameter_5" + shape = [32, 64, 7, 7] + dtype = "float32" + min_val = float("-0.113538") + max_val = float("0.107465") + mean = float("-5.53685e-05") + std = float("0.0252383") + data = None + + +class Program_weight_tensor_parameter_6: + name = "parameter_6" + shape = [32] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_7: + name = "parameter_7" + shape = [32, 64, 5, 5] + dtype = "float32" + min_val = float("-0.143487") + max_val = float("0.144777") + mean = float("0.000162487") + std = float("0.0356301") + data = None + + +class Program_weight_tensor_parameter_8: + name = "parameter_8" + shape = [32] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_9: + name = "parameter_9" + shape = [32, 64, 3, 3] + dtype = "float32" + min_val = float("-0.257152") + max_val = float("0.254454") + mean = float("-0.00053034") + std = float("0.0586078") + data = None + + +class Program_weight_tensor_parameter_10: + name = "parameter_10" + shape = [32] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_11: + name = "parameter_11" + shape = [32, 64, 1, 1] + dtype = "float32" + min_val = float("-0.590583") + max_val = float("0.573722") + mean = float("0.00757032") + std = float("0.175691") + data = None + + +class Program_weight_tensor_parameter_12: + name = "parameter_12" + shape = [64] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_13: + name = "parameter_13" + shape = [64, 32, 11, 11] + dtype = "float32" + min_val = float("-0.107155") + max_val = float("0.109478") + mean = float("-9.5172e-06") + std = float("0.022672") + data = None + + +class Program_weight_tensor_parameter_14: + name = "parameter_14" + shape = [64] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_15: + name = "parameter_15" + shape = [64, 32, 9, 9] + dtype = "float32" + min_val = float("-0.126869") + max_val = float("0.116599") + mean = float("-9.2705e-05") + std = float("0.0277662") + data = None + + +class Program_weight_tensor_parameter_16: + name = "parameter_16" + shape = [64] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_17: + name = "parameter_17" + shape = [64, 32, 7, 7] + dtype = "float32" + min_val = float("-0.159823") + max_val = float("0.143485") + mean = float("-1.04451e-05") + std = float("0.0357584") + data = None + + +class Program_weight_tensor_parameter_18: + name = "parameter_18" + shape = [64] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_19: + name = "parameter_19" + shape = [64, 32, 5, 5] + dtype = "float32" + min_val = float("-0.218304") + max_val = float("0.200231") + mean = float("0.000425884") + std = float("0.0499644") + data = None + + +class Program_weight_tensor_parameter_20: + name = "parameter_20" + shape = [64] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_21: + name = "parameter_21" + shape = [64, 32, 3, 3] + dtype = "float32" + min_val = float("-0.343896") + max_val = float("0.408045") + mean = float("0.000283063") + std = float("0.0837488") + data = None + + +class Program_weight_tensor_parameter_22: + name = "parameter_22" + shape = [64] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_23: + name = "parameter_23" + shape = [64, 32, 1, 1] + dtype = "float32" + min_val = float("-0.998181") + max_val = float("0.896186") + mean = float("-0.00344661") + std = float("0.251472") + data = None diff --git a/paddle_samples/PaddleX/TimesNet_cls/subgraph_5/graph_hash.txt b/paddle_samples/PaddleX/TimesNet_cls/subgraph_5/graph_hash.txt new file mode 100644 index 000000000..a23694b72 --- /dev/null +++ b/paddle_samples/PaddleX/TimesNet_cls/subgraph_5/graph_hash.txt @@ -0,0 +1 @@ +7c2b02b97365bcd245d9aeb7d1470bc140da19e49b4d21fdc1b40a8aaaf7f68f \ No newline at end of file diff --git a/paddle_samples/PaddleX/TimesNet_cls/subgraph_5/graph_net.json b/paddle_samples/PaddleX/TimesNet_cls/subgraph_5/graph_net.json new file mode 100644 index 000000000..ffc4d714a --- /dev/null +++ b/paddle_samples/PaddleX/TimesNet_cls/subgraph_5/graph_net.json @@ -0,0 +1,6 @@ +{ + "framework": "paddle", + "model_name": "TimesNet_cls", + "num_devices_required": 1, + "num_nodes_required": 1 +} \ No newline at end of file diff --git a/paddle_samples/PaddleX/TimesNet_cls/subgraph_5/input_meta.py b/paddle_samples/PaddleX/TimesNet_cls/subgraph_5/input_meta.py new file mode 100644 index 000000000..ba8c678ef --- /dev/null +++ b/paddle_samples/PaddleX/TimesNet_cls/subgraph_5/input_meta.py @@ -0,0 +1,18 @@ +class Program_weight_tensor_data_0: + name = "data_0" + shape = [11, 405, 32] + dtype = "float32" + min_val = float("-3.49711") + max_val = float("3.56614") + mean = float("-2.10448e-05") + std = float("0.999979") + data = None + + +class Program_weight_tensor_data_1: + name = "data_1" + shape = [11, 405] + dtype = "int32" + min_val = 1 + max_val = 1 + data = None diff --git a/paddle_samples/PaddleX/TimesNet_cls/subgraph_5/model.py b/paddle_samples/PaddleX/TimesNet_cls/subgraph_5/model.py new file mode 100644 index 000000000..3a6816f8c --- /dev/null +++ b/paddle_samples/PaddleX/TimesNet_cls/subgraph_5/model.py @@ -0,0 +1,57 @@ +import paddle + + +class GraphModule(paddle.nn.Layer): + def __init__(self): + super().__init__() + + def forward(self, parameter_0, parameter_1, data_0, data_1): + # pd_op.gelu: (11x405x32xf32) <- (11x405x32xf32) + gelu_0 = paddle._C_ops.gelu(data_0, False) + del data_0 + + # pd_op.full: (1xf32) <- () + full_0 = paddle._C_ops.full( + [1], float("0.1"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.dropout: (11x405x32xf32, 11x405x32xui8) <- (11x405x32xf32, None, 1xf32) + dropout_0, dropout_1 = (lambda x, f: f(x))( + paddle._C_ops.dropout( + gelu_0, None, full_0, True, "upscale_in_train", 0, False + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None), + ) + del full_0, gelu_0 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_0 = [-1] + + # pd_op.unsqueeze: (11x405x1xi32) <- (11x405xi32, 1xi64) + unsqueeze_0 = paddle._C_ops.unsqueeze(data_1, full_int_array_0) + del data_1, full_int_array_0 + + # pd_op.cast: (11x405x1xf32) <- (11x405x1xi32) + cast_0 = paddle._C_ops.cast(unsqueeze_0, paddle.float32) + del unsqueeze_0 + + # pd_op.multiply: (11x405x32xf32) <- (11x405x32xf32, 11x405x1xf32) + multiply_0 = paddle._C_ops.multiply(dropout_0, cast_0) + del cast_0, dropout_0 + + # pd_op.full_int_array: (2xi64) <- () + full_int_array_1 = [11, -1] + + # pd_op.reshape: (11x12960xf32) <- (11x405x32xf32, 2xi64) + reshape_0 = paddle._C_ops.reshape(multiply_0, full_int_array_1) + del full_int_array_1, multiply_0 + + # pd_op.matmul: (11x2xf32) <- (11x12960xf32, 12960x2xf32) + matmul_0 = paddle._C_ops.matmul(reshape_0, parameter_1, False, False) + del parameter_1, reshape_0 + + # pd_op.add: (11x2xf32) <- (11x2xf32, 2xf32) + add_0 = paddle._C_ops.add(matmul_0, parameter_0) + del matmul_0, parameter_0 + + return add_0 diff --git a/paddle_samples/PaddleX/TimesNet_cls/subgraph_5/weight_meta.py b/paddle_samples/PaddleX/TimesNet_cls/subgraph_5/weight_meta.py new file mode 100644 index 000000000..7f04ee443 --- /dev/null +++ b/paddle_samples/PaddleX/TimesNet_cls/subgraph_5/weight_meta.py @@ -0,0 +1,18 @@ +class Program_weight_tensor_parameter_0: + name = "parameter_0" + shape = [2] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_1: + name = "parameter_1" + shape = [12960, 2] + dtype = "float32" + min_val = float("-0.00888354") + max_val = float("0.00888127") + mean = float("-3.3137e-05") + std = float("0.00504627") + data = None diff --git a/paddle_samples/PaddleX/TimesNet_cls/subgraph_6/graph_hash.txt b/paddle_samples/PaddleX/TimesNet_cls/subgraph_6/graph_hash.txt new file mode 100644 index 000000000..5a2bec85d --- /dev/null +++ b/paddle_samples/PaddleX/TimesNet_cls/subgraph_6/graph_hash.txt @@ -0,0 +1 @@ +e5db7af1e6377319ccf8fc48e81b13a7c3e2bbc34b6c3c7da121030977da3abc \ No newline at end of file diff --git a/paddle_samples/PaddleX/TimesNet_cls/subgraph_6/graph_net.json b/paddle_samples/PaddleX/TimesNet_cls/subgraph_6/graph_net.json new file mode 100644 index 000000000..ffc4d714a --- /dev/null +++ b/paddle_samples/PaddleX/TimesNet_cls/subgraph_6/graph_net.json @@ -0,0 +1,6 @@ +{ + "framework": "paddle", + "model_name": "TimesNet_cls", + "num_devices_required": 1, + "num_nodes_required": 1 +} \ No newline at end of file diff --git a/paddle_samples/PaddleX/TimesNet_cls/subgraph_6/input_meta.py b/paddle_samples/PaddleX/TimesNet_cls/subgraph_6/input_meta.py new file mode 100644 index 000000000..dce32d0ca --- /dev/null +++ b/paddle_samples/PaddleX/TimesNet_cls/subgraph_6/input_meta.py @@ -0,0 +1,20 @@ +class Program_weight_tensor_data_0: + name = "data_0" + shape = [11, 405, 3] + dtype = "float32" + min_val = float("-0.797899") + max_val = float("12.5232") + mean = float("0.0319939") + std = float("0.988225") + data = None + + +class Program_weight_tensor_data_1: + name = "data_1" + shape = [1, 5000, 32] + dtype = "float32" + min_val = float("-1.0") + max_val = float("1.0") + mean = float("0.119002") + std = float("0.697021") + data = None diff --git a/paddle_samples/PaddleX/TimesNet_cls/subgraph_6/model.py b/paddle_samples/PaddleX/TimesNet_cls/subgraph_6/model.py new file mode 100644 index 000000000..497e6cee4 --- /dev/null +++ b/paddle_samples/PaddleX/TimesNet_cls/subgraph_6/model.py @@ -0,0 +1,92 @@ +import paddle + + +class GraphModule(paddle.nn.Layer): + def __init__(self): + super().__init__() + + def forward(self, parameter_0, data_0, data_1): + # pd_op.transpose: (11x3x405xf32) <- (11x405x3xf32) + transpose_0 = paddle._C_ops.transpose(data_0, [0, 2, 1]) + del data_0 + + # pd_op.full_int_array: (2xi64) <- () + full_int_array_0 = [3, 4] + + # pd_op.unsqueeze: (11x3x405x1x1xf32) <- (11x3x405xf32, 2xi64) + unsqueeze_0 = paddle._C_ops.unsqueeze(transpose_0, full_int_array_0) + del transpose_0 + + # pd_op.full_int_array: (6xi64) <- () + full_int_array_1 = [0, 0, 0, 0, 1, 1] + + # pd_op.pad3d: (11x3x407x1x1xf32) <- (11x3x405x1x1xf32, 6xi64) + pad3d_0 = paddle._C_ops.pad3d( + unsqueeze_0, full_int_array_1, "circular", float("0"), "NCDHW" + ) + del full_int_array_1, unsqueeze_0 + + # pd_op.squeeze: (11x3x407xf32) <- (11x3x407x1x1xf32, 2xi64) + squeeze_0 = paddle._C_ops.squeeze(pad3d_0, full_int_array_0) + del full_int_array_0, pad3d_0 + + # pd_op.assign: (32x3x3xf32) <- (32x3x3xf32) + assign_0 = parameter_0 + del parameter_0 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_2 = [-2] + + # pd_op.unsqueeze: (32x3x1x3xf32) <- (32x3x3xf32, 1xi64) + unsqueeze_1 = paddle._C_ops.unsqueeze(assign_0, full_int_array_2) + del assign_0 + + # pd_op.unsqueeze: (11x3x1x407xf32) <- (11x3x407xf32, 1xi64) + unsqueeze_2 = paddle._C_ops.unsqueeze(squeeze_0, full_int_array_2) + del squeeze_0 + + # pd_op.conv2d: (11x32x1x405xf32) <- (11x3x1x407xf32, 32x3x1x3xf32) + conv2d_0 = paddle._C_ops.conv2d( + unsqueeze_2, unsqueeze_1, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del unsqueeze_1, unsqueeze_2 + + # pd_op.squeeze: (11x32x405xf32) <- (11x32x1x405xf32, 1xi64) + squeeze_1 = paddle._C_ops.squeeze(conv2d_0, full_int_array_2) + del conv2d_0, full_int_array_2 + + # pd_op.transpose: (11x405x32xf32) <- (11x32x405xf32) + transpose_1 = paddle._C_ops.transpose(squeeze_1, [0, 2, 1]) + del squeeze_1 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_3 = [0] + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_4 = [405] + + # pd_op.slice: (1x405x32xf32) <- (1x5000x32xf32, 1xi64, 1xi64) + slice_0 = paddle._C_ops.slice( + data_1, [1], full_int_array_3, full_int_array_4, [1], [] + ) + del data_1, full_int_array_3, full_int_array_4 + + # pd_op.add: (11x405x32xf32) <- (11x405x32xf32, 1x405x32xf32) + add_0 = paddle._C_ops.add(transpose_1, slice_0) + del slice_0, transpose_1 + + # pd_op.full: (1xf32) <- () + full_0 = paddle._C_ops.full( + [1], float("0.1"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.dropout: (11x405x32xf32, 11x405x32xui8) <- (11x405x32xf32, None, 1xf32) + dropout_0, dropout_1 = (lambda x, f: f(x))( + paddle._C_ops.dropout( + add_0, None, full_0, True, "upscale_in_train", 0, False + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None), + ) + del add_0, full_0 + + return dropout_0 diff --git a/paddle_samples/PaddleX/TimesNet_cls/subgraph_6/weight_meta.py b/paddle_samples/PaddleX/TimesNet_cls/subgraph_6/weight_meta.py new file mode 100644 index 000000000..1e22233da --- /dev/null +++ b/paddle_samples/PaddleX/TimesNet_cls/subgraph_6/weight_meta.py @@ -0,0 +1,9 @@ +class Program_weight_tensor_parameter_0: + name = "parameter_0" + shape = [32, 3, 3] + dtype = "float32" + min_val = float("-1.19218") + max_val = float("1.47384") + mean = float("0.0485418") + std = float("0.465731") + data = None diff --git a/paddle_samples/PaddleX/TimesNet_cls/subgraph_7/graph_hash.txt b/paddle_samples/PaddleX/TimesNet_cls/subgraph_7/graph_hash.txt new file mode 100644 index 000000000..5b9740b6d --- /dev/null +++ b/paddle_samples/PaddleX/TimesNet_cls/subgraph_7/graph_hash.txt @@ -0,0 +1 @@ +c788ee53ec7446be5d13146cc01d890972d8ea81ab50143e201ddeb574745fb3 \ No newline at end of file diff --git a/paddle_samples/PaddleX/TimesNet_cls/subgraph_7/graph_net.json b/paddle_samples/PaddleX/TimesNet_cls/subgraph_7/graph_net.json new file mode 100644 index 000000000..ffc4d714a --- /dev/null +++ b/paddle_samples/PaddleX/TimesNet_cls/subgraph_7/graph_net.json @@ -0,0 +1,6 @@ +{ + "framework": "paddle", + "model_name": "TimesNet_cls", + "num_devices_required": 1, + "num_nodes_required": 1 +} \ No newline at end of file diff --git a/paddle_samples/PaddleX/TimesNet_cls/subgraph_7/input_meta.py b/paddle_samples/PaddleX/TimesNet_cls/subgraph_7/input_meta.py new file mode 100644 index 000000000..e5ae7474f --- /dev/null +++ b/paddle_samples/PaddleX/TimesNet_cls/subgraph_7/input_meta.py @@ -0,0 +1,19 @@ +class Program_weight_tensor_data_0: + name = "data_0" + shape = [] + dtype = "int64" + data = [1] + + +class Program_weight_tensor_data_1: + name = "data_1" + shape = [3] + dtype = "int32" + data = [405, 202, 101] + + +class Program_weight_tensor_data_2: + name = "data_2" + shape = [] + dtype = "int64" + data = [405] diff --git a/paddle_samples/PaddleX/TimesNet_cls/subgraph_7/model.py b/paddle_samples/PaddleX/TimesNet_cls/subgraph_7/model.py new file mode 100644 index 000000000..cd8f1c0dd --- /dev/null +++ b/paddle_samples/PaddleX/TimesNet_cls/subgraph_7/model.py @@ -0,0 +1,54 @@ +import paddle + + +class GraphModule(paddle.nn.Layer): + def __init__(self): + super().__init__() + + def forward(self, data_0, data_1, data_2): + # pd_op.full: (1xf32) <- () + full_0 = paddle._C_ops.full( + [1], float("1"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.scale: (xi64) <- (xi64, 1xf32) + scale_0 = paddle._C_ops.scale(data_0, full_0, float("1"), True) + del full_0 + + # builtin.combine: ([xi64]) <- (xi64) + combine_0 = [data_0] + del data_0 + + # pd_op.stack: (1xi64) <- ([xi64]) + stack_0 = paddle._C_ops.stack(combine_0, 0) + del combine_0 + + # builtin.combine: ([xi64]) <- (xi64) + combine_1 = [scale_0] + del scale_0 + + # pd_op.stack: (1xi64) <- ([xi64]) + stack_1 = paddle._C_ops.stack(combine_1, 0) + del combine_1 + + # pd_op.slice: (xi32) <- (3xi32, 1xi64, 1xi64) + slice_0 = paddle._C_ops.slice(data_1, [0], stack_0, stack_1, [-1], [0]) + del data_1, stack_0, stack_1 + + # pd_op.cast: (xi64) <- (xi32) + cast_0 = paddle._C_ops.cast(slice_0, paddle.int64) + + # pd_op.remainder: (xi64) <- (xi64, xi64) + remainder_0 = paddle._C_ops.remainder(data_2, cast_0) + del cast_0, data_2 + + # pd_op.full: (xi64) <- () + full_1 = paddle._C_ops.full( + [], float("0"), paddle.int64, paddle.framework._current_expected_place() + ) + + # pd_op.not_equal: (xb) <- (xi64, xi64) + not_equal_0 = paddle._C_ops.not_equal(remainder_0, full_1) + del full_1, remainder_0, slice_0 + + return not_equal_0 diff --git a/paddle_samples/PaddleX/TimesNet_cls/subgraph_7/weight_meta.py b/paddle_samples/PaddleX/TimesNet_cls/subgraph_7/weight_meta.py new file mode 100644 index 000000000..8b1378917 --- /dev/null +++ b/paddle_samples/PaddleX/TimesNet_cls/subgraph_7/weight_meta.py @@ -0,0 +1 @@ + diff --git a/paddle_samples/PaddleX/TimesNet_cls/subgraph_8/graph_hash.txt b/paddle_samples/PaddleX/TimesNet_cls/subgraph_8/graph_hash.txt new file mode 100644 index 000000000..920d7b232 --- /dev/null +++ b/paddle_samples/PaddleX/TimesNet_cls/subgraph_8/graph_hash.txt @@ -0,0 +1 @@ +e1ecbf15f7c366dbf3fdc8b7ebf2dcad1dacd500344631ae7716add712a41849 \ No newline at end of file diff --git a/paddle_samples/PaddleX/TimesNet_cls/subgraph_8/graph_net.json b/paddle_samples/PaddleX/TimesNet_cls/subgraph_8/graph_net.json new file mode 100644 index 000000000..ffc4d714a --- /dev/null +++ b/paddle_samples/PaddleX/TimesNet_cls/subgraph_8/graph_net.json @@ -0,0 +1,6 @@ +{ + "framework": "paddle", + "model_name": "TimesNet_cls", + "num_devices_required": 1, + "num_nodes_required": 1 +} \ No newline at end of file diff --git a/paddle_samples/PaddleX/TimesNet_cls/subgraph_8/input_meta.py b/paddle_samples/PaddleX/TimesNet_cls/subgraph_8/input_meta.py new file mode 100644 index 000000000..7faa1c1ee --- /dev/null +++ b/paddle_samples/PaddleX/TimesNet_cls/subgraph_8/input_meta.py @@ -0,0 +1,23 @@ +class Program_weight_tensor_data_0: + name = "data_0" + shape = [11, 405, 32] + dtype = "float32" + min_val = float("-27.8407") + max_val = float("26.4174") + mean = float("0.28191") + std = float("1.66905") + data = None + + +class Program_weight_tensor_data_1: + name = "data_1" + shape = [] + dtype = "int32" + data = [405] + + +class Program_weight_tensor_data_2: + name = "data_2" + shape = [] + dtype = "int64" + data = [405] diff --git a/paddle_samples/PaddleX/TimesNet_cls/subgraph_8/model.py b/paddle_samples/PaddleX/TimesNet_cls/subgraph_8/model.py new file mode 100644 index 000000000..096d6f596 --- /dev/null +++ b/paddle_samples/PaddleX/TimesNet_cls/subgraph_8/model.py @@ -0,0 +1,297 @@ +import paddle + + +class GraphModule(paddle.nn.Layer): + def __init__(self): + super().__init__() + + def forward( + self, + parameter_0, + parameter_1, + parameter_2, + parameter_3, + parameter_4, + parameter_5, + parameter_6, + parameter_7, + parameter_8, + parameter_9, + parameter_10, + parameter_11, + parameter_12, + parameter_13, + parameter_14, + parameter_15, + parameter_16, + parameter_17, + parameter_18, + parameter_19, + parameter_20, + parameter_21, + parameter_22, + parameter_23, + data_0, + data_1, + data_2, + ): + # pd_op.cast: (xi64) <- (xi32) + cast_0 = paddle._C_ops.cast(data_1, paddle.int64) + del data_1 + + # pd_op.floor_divide: (xi64) <- (xi64, xi64) + floor_divide_0 = paddle._C_ops.floor_divide(data_2, cast_0) + del data_2 + + # pd_op.full: (xi64) <- () + full_0 = paddle._C_ops.full( + [], float("11"), paddle.int64, paddle.core.CPUPlace() + ) + + # pd_op.full: (xi64) <- () + full_1 = paddle._C_ops.full( + [], float("32"), paddle.int64, paddle.core.CPUPlace() + ) + + # builtin.combine: ([xi64, xi64, xi64, xi64]) <- (xi64, xi64, xi64, xi64) + combine_0 = [full_0, floor_divide_0, cast_0, full_1] + del cast_0, floor_divide_0, full_0, full_1 + + # pd_op.stack: (4xi64) <- ([xi64, xi64, xi64, xi64]) + stack_0 = paddle._C_ops.stack(combine_0, 0) + del combine_0 + + # pd_op.reshape: (11x-1x-1x32xf32) <- (11x405x32xf32, 4xi64) + reshape_0 = paddle._C_ops.reshape(data_0, stack_0) + del data_0, stack_0 + + # pd_op.transpose: (11x32x-1x-1xf32) <- (11x-1x-1x32xf32) + transpose_0 = paddle._C_ops.transpose(reshape_0, [0, 3, 1, 2]) + del reshape_0 + + # pd_op.conv2d: (11x64x-1x-1xf32) <- (11x32x-1x-1xf32, 64x32x1x1xf32) + conv2d_0 = paddle._C_ops.conv2d( + transpose_0, parameter_23, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_23 + + # pd_op.full_int_array: (4xi64) <- () + full_int_array_0 = [1, -1, 1, 1] + + # pd_op.reshape: (1x64x1x1xf32) <- (64xf32, 4xi64) + reshape_1 = paddle._C_ops.reshape(parameter_22, full_int_array_0) + del parameter_22 + + # pd_op.add: (11x64x-1x-1xf32) <- (11x64x-1x-1xf32, 1x64x1x1xf32) + add_0 = paddle._C_ops.add(conv2d_0, reshape_1) + del conv2d_0, reshape_1 + + # pd_op.conv2d: (11x64x-1x-1xf32) <- (11x32x-1x-1xf32, 64x32x3x3xf32) + conv2d_1 = paddle._C_ops.conv2d( + transpose_0, parameter_21, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_21 + + # pd_op.reshape: (1x64x1x1xf32) <- (64xf32, 4xi64) + reshape_2 = paddle._C_ops.reshape(parameter_20, full_int_array_0) + del parameter_20 + + # pd_op.add: (11x64x-1x-1xf32) <- (11x64x-1x-1xf32, 1x64x1x1xf32) + add_1 = paddle._C_ops.add(conv2d_1, reshape_2) + del conv2d_1, reshape_2 + + # pd_op.conv2d: (11x64x-1x-1xf32) <- (11x32x-1x-1xf32, 64x32x5x5xf32) + conv2d_2 = paddle._C_ops.conv2d( + transpose_0, parameter_19, [1, 1], [2, 2], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_19 + + # pd_op.reshape: (1x64x1x1xf32) <- (64xf32, 4xi64) + reshape_3 = paddle._C_ops.reshape(parameter_18, full_int_array_0) + del parameter_18 + + # pd_op.add: (11x64x-1x-1xf32) <- (11x64x-1x-1xf32, 1x64x1x1xf32) + add_2 = paddle._C_ops.add(conv2d_2, reshape_3) + del conv2d_2, reshape_3 + + # pd_op.conv2d: (11x64x-1x-1xf32) <- (11x32x-1x-1xf32, 64x32x7x7xf32) + conv2d_3 = paddle._C_ops.conv2d( + transpose_0, parameter_17, [1, 1], [3, 3], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_17 + + # pd_op.reshape: (1x64x1x1xf32) <- (64xf32, 4xi64) + reshape_4 = paddle._C_ops.reshape(parameter_16, full_int_array_0) + del parameter_16 + + # pd_op.add: (11x64x-1x-1xf32) <- (11x64x-1x-1xf32, 1x64x1x1xf32) + add_3 = paddle._C_ops.add(conv2d_3, reshape_4) + del conv2d_3, reshape_4 + + # pd_op.conv2d: (11x64x-1x-1xf32) <- (11x32x-1x-1xf32, 64x32x9x9xf32) + conv2d_4 = paddle._C_ops.conv2d( + transpose_0, parameter_15, [1, 1], [4, 4], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_15 + + # pd_op.reshape: (1x64x1x1xf32) <- (64xf32, 4xi64) + reshape_5 = paddle._C_ops.reshape(parameter_14, full_int_array_0) + del parameter_14 + + # pd_op.add: (11x64x-1x-1xf32) <- (11x64x-1x-1xf32, 1x64x1x1xf32) + add_4 = paddle._C_ops.add(conv2d_4, reshape_5) + del conv2d_4, reshape_5 + + # pd_op.conv2d: (11x64x-1x-1xf32) <- (11x32x-1x-1xf32, 64x32x11x11xf32) + conv2d_5 = paddle._C_ops.conv2d( + transpose_0, parameter_13, [1, 1], [5, 5], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_13, transpose_0 + + # pd_op.reshape: (1x64x1x1xf32) <- (64xf32, 4xi64) + reshape_6 = paddle._C_ops.reshape(parameter_12, full_int_array_0) + del parameter_12 + + # pd_op.add: (11x64x-1x-1xf32) <- (11x64x-1x-1xf32, 1x64x1x1xf32) + add_5 = paddle._C_ops.add(conv2d_5, reshape_6) + del conv2d_5, reshape_6 + + # builtin.combine: ([11x64x-1x-1xf32, 11x64x-1x-1xf32, 11x64x-1x-1xf32, 11x64x-1x-1xf32, 11x64x-1x-1xf32, 11x64x-1x-1xf32]) <- (11x64x-1x-1xf32, 11x64x-1x-1xf32, 11x64x-1x-1xf32, 11x64x-1x-1xf32, 11x64x-1x-1xf32, 11x64x-1x-1xf32) + combine_1 = [add_0, add_1, add_2, add_3, add_4, add_5] + del add_0, add_1, add_2, add_3, add_4, add_5 + + # pd_op.stack: (11x64x-1x-1x6xf32) <- ([11x64x-1x-1xf32, 11x64x-1x-1xf32, 11x64x-1x-1xf32, 11x64x-1x-1xf32, 11x64x-1x-1xf32, 11x64x-1x-1xf32]) + stack_1 = paddle._C_ops.stack(combine_1, -1) + del combine_1 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_1 = [-1] + + # pd_op.mean: (11x64x-1x-1xf32) <- (11x64x-1x-1x6xf32, 1xi64) + mean_0 = paddle._C_ops.mean(stack_1, full_int_array_1, False) + del stack_1 + + # pd_op.gelu: (11x64x-1x-1xf32) <- (11x64x-1x-1xf32) + gelu_0 = paddle._C_ops.gelu(mean_0, False) + del mean_0 + + # pd_op.conv2d: (11x32x-1x-1xf32) <- (11x64x-1x-1xf32, 32x64x1x1xf32) + conv2d_6 = paddle._C_ops.conv2d( + gelu_0, parameter_11, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_11 + + # pd_op.reshape: (1x32x1x1xf32) <- (32xf32, 4xi64) + reshape_7 = paddle._C_ops.reshape(parameter_10, full_int_array_0) + del parameter_10 + + # pd_op.add: (11x32x-1x-1xf32) <- (11x32x-1x-1xf32, 1x32x1x1xf32) + add_6 = paddle._C_ops.add(conv2d_6, reshape_7) + del conv2d_6, reshape_7 + + # pd_op.conv2d: (11x32x-1x-1xf32) <- (11x64x-1x-1xf32, 32x64x3x3xf32) + conv2d_7 = paddle._C_ops.conv2d( + gelu_0, parameter_9, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_9 + + # pd_op.reshape: (1x32x1x1xf32) <- (32xf32, 4xi64) + reshape_8 = paddle._C_ops.reshape(parameter_8, full_int_array_0) + del parameter_8 + + # pd_op.add: (11x32x-1x-1xf32) <- (11x32x-1x-1xf32, 1x32x1x1xf32) + add_7 = paddle._C_ops.add(conv2d_7, reshape_8) + del conv2d_7, reshape_8 + + # pd_op.conv2d: (11x32x-1x-1xf32) <- (11x64x-1x-1xf32, 32x64x5x5xf32) + conv2d_8 = paddle._C_ops.conv2d( + gelu_0, parameter_7, [1, 1], [2, 2], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_7 + + # pd_op.reshape: (1x32x1x1xf32) <- (32xf32, 4xi64) + reshape_9 = paddle._C_ops.reshape(parameter_6, full_int_array_0) + del parameter_6 + + # pd_op.add: (11x32x-1x-1xf32) <- (11x32x-1x-1xf32, 1x32x1x1xf32) + add_8 = paddle._C_ops.add(conv2d_8, reshape_9) + del conv2d_8, reshape_9 + + # pd_op.conv2d: (11x32x-1x-1xf32) <- (11x64x-1x-1xf32, 32x64x7x7xf32) + conv2d_9 = paddle._C_ops.conv2d( + gelu_0, parameter_5, [1, 1], [3, 3], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_5 + + # pd_op.reshape: (1x32x1x1xf32) <- (32xf32, 4xi64) + reshape_10 = paddle._C_ops.reshape(parameter_4, full_int_array_0) + del parameter_4 + + # pd_op.add: (11x32x-1x-1xf32) <- (11x32x-1x-1xf32, 1x32x1x1xf32) + add_9 = paddle._C_ops.add(conv2d_9, reshape_10) + del conv2d_9, reshape_10 + + # pd_op.conv2d: (11x32x-1x-1xf32) <- (11x64x-1x-1xf32, 32x64x9x9xf32) + conv2d_10 = paddle._C_ops.conv2d( + gelu_0, parameter_3, [1, 1], [4, 4], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_3 + + # pd_op.reshape: (1x32x1x1xf32) <- (32xf32, 4xi64) + reshape_11 = paddle._C_ops.reshape(parameter_2, full_int_array_0) + del parameter_2 + + # pd_op.add: (11x32x-1x-1xf32) <- (11x32x-1x-1xf32, 1x32x1x1xf32) + add_10 = paddle._C_ops.add(conv2d_10, reshape_11) + del conv2d_10, reshape_11 + + # pd_op.conv2d: (11x32x-1x-1xf32) <- (11x64x-1x-1xf32, 32x64x11x11xf32) + conv2d_11 = paddle._C_ops.conv2d( + gelu_0, parameter_1, [1, 1], [5, 5], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del gelu_0, parameter_1 + + # pd_op.reshape: (1x32x1x1xf32) <- (32xf32, 4xi64) + reshape_12 = paddle._C_ops.reshape(parameter_0, full_int_array_0) + del full_int_array_0, parameter_0 + + # pd_op.add: (11x32x-1x-1xf32) <- (11x32x-1x-1xf32, 1x32x1x1xf32) + add_11 = paddle._C_ops.add(conv2d_11, reshape_12) + del conv2d_11, reshape_12 + + # builtin.combine: ([11x32x-1x-1xf32, 11x32x-1x-1xf32, 11x32x-1x-1xf32, 11x32x-1x-1xf32, 11x32x-1x-1xf32, 11x32x-1x-1xf32]) <- (11x32x-1x-1xf32, 11x32x-1x-1xf32, 11x32x-1x-1xf32, 11x32x-1x-1xf32, 11x32x-1x-1xf32, 11x32x-1x-1xf32) + combine_2 = [add_6, add_7, add_8, add_9, add_10, add_11] + del add_10, add_11, add_6, add_7, add_8, add_9 + + # pd_op.stack: (11x32x-1x-1x6xf32) <- ([11x32x-1x-1xf32, 11x32x-1x-1xf32, 11x32x-1x-1xf32, 11x32x-1x-1xf32, 11x32x-1x-1xf32, 11x32x-1x-1xf32]) + stack_2 = paddle._C_ops.stack(combine_2, -1) + del combine_2 + + # pd_op.mean: (11x32x-1x-1xf32) <- (11x32x-1x-1x6xf32, 1xi64) + mean_1 = paddle._C_ops.mean(stack_2, full_int_array_1, False) + del full_int_array_1, stack_2 + + # pd_op.transpose: (11x-1x-1x32xf32) <- (11x32x-1x-1xf32) + transpose_1 = paddle._C_ops.transpose(mean_1, [0, 2, 3, 1]) + del mean_1 + + # pd_op.full_int_array: (3xi64) <- () + full_int_array_2 = [11, -1, 32] + + # pd_op.reshape: (11x-1x32xf32) <- (11x-1x-1x32xf32, 3xi64) + reshape_13 = paddle._C_ops.reshape(transpose_1, full_int_array_2) + del full_int_array_2, transpose_1 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_3 = [0] + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_4 = [405] + + # pd_op.slice: (11x-1x32xf32) <- (11x-1x32xf32, 1xi64, 1xi64) + slice_0 = paddle._C_ops.slice( + reshape_13, [1], full_int_array_3, full_int_array_4, [1], [] + ) + del full_int_array_3, full_int_array_4, reshape_13 + + return slice_0 diff --git a/paddle_samples/PaddleX/TimesNet_cls/subgraph_8/weight_meta.py b/paddle_samples/PaddleX/TimesNet_cls/subgraph_8/weight_meta.py new file mode 100644 index 000000000..49ec9e4ef --- /dev/null +++ b/paddle_samples/PaddleX/TimesNet_cls/subgraph_8/weight_meta.py @@ -0,0 +1,238 @@ +class Program_weight_tensor_parameter_0: + name = "parameter_0" + shape = [32] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_1: + name = "parameter_1" + shape = [32, 64, 11, 11] + dtype = "float32" + min_val = float("-0.0722999") + max_val = float("0.0691823") + mean = float("7.56387e-05") + std = float("0.0161078") + data = None + + +class Program_weight_tensor_parameter_2: + name = "parameter_2" + shape = [32] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_3: + name = "parameter_3" + shape = [32, 64, 9, 9] + dtype = "float32" + min_val = float("-0.0864089") + max_val = float("0.0842563") + mean = float("-1.45321e-05") + std = float("0.0196753") + data = None + + +class Program_weight_tensor_parameter_4: + name = "parameter_4" + shape = [32] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_5: + name = "parameter_5" + shape = [32, 64, 7, 7] + dtype = "float32" + min_val = float("-0.103665") + max_val = float("0.124736") + mean = float("4.20751e-05") + std = float("0.0252303") + data = None + + +class Program_weight_tensor_parameter_6: + name = "parameter_6" + shape = [32] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_7: + name = "parameter_7" + shape = [32, 64, 5, 5] + dtype = "float32" + min_val = float("-0.146055") + max_val = float("0.13667") + mean = float("0.000257735") + std = float("0.0352745") + data = None + + +class Program_weight_tensor_parameter_8: + name = "parameter_8" + shape = [32] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_9: + name = "parameter_9" + shape = [32, 64, 3, 3] + dtype = "float32" + min_val = float("-0.247586") + max_val = float("0.248785") + mean = float("0.000274214") + std = float("0.0595385") + data = None + + +class Program_weight_tensor_parameter_10: + name = "parameter_10" + shape = [32] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_11: + name = "parameter_11" + shape = [32, 64, 1, 1] + dtype = "float32" + min_val = float("-0.555084") + max_val = float("0.565298") + mean = float("0.00127117") + std = float("0.179279") + data = None + + +class Program_weight_tensor_parameter_12: + name = "parameter_12" + shape = [64] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_13: + name = "parameter_13" + shape = [64, 32, 11, 11] + dtype = "float32" + min_val = float("-0.102371") + max_val = float("0.105998") + mean = float("-3.13668e-05") + std = float("0.0227337") + data = None + + +class Program_weight_tensor_parameter_14: + name = "parameter_14" + shape = [64] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_15: + name = "parameter_15" + shape = [64, 32, 9, 9] + dtype = "float32" + min_val = float("-0.112842") + max_val = float("0.119601") + mean = float("-1.84285e-05") + std = float("0.0278071") + data = None + + +class Program_weight_tensor_parameter_16: + name = "parameter_16" + shape = [64] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_17: + name = "parameter_17" + shape = [64, 32, 7, 7] + dtype = "float32" + min_val = float("-0.159775") + max_val = float("0.158136") + mean = float("-5.40554e-06") + std = float("0.0357246") + data = None + + +class Program_weight_tensor_parameter_18: + name = "parameter_18" + shape = [64] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_19: + name = "parameter_19" + shape = [64, 32, 5, 5] + dtype = "float32" + min_val = float("-0.210519") + max_val = float("0.219765") + mean = float("0.000220505") + std = float("0.0499099") + data = None + + +class Program_weight_tensor_parameter_20: + name = "parameter_20" + shape = [64] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_21: + name = "parameter_21" + shape = [64, 32, 3, 3] + dtype = "float32" + min_val = float("-0.330815") + max_val = float("0.338968") + mean = float("-0.00136894") + std = float("0.0835814") + data = None + + +class Program_weight_tensor_parameter_22: + name = "parameter_22" + shape = [64] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_23: + name = "parameter_23" + shape = [64, 32, 1, 1] + dtype = "float32" + min_val = float("-0.865746") + max_val = float("0.720918") + mean = float("-0.00612444") + std = float("0.249593") + data = None diff --git a/paddle_samples/PaddleX/TimesNet_cls/subgraph_9/graph_hash.txt b/paddle_samples/PaddleX/TimesNet_cls/subgraph_9/graph_hash.txt new file mode 100644 index 000000000..783ecfcf7 --- /dev/null +++ b/paddle_samples/PaddleX/TimesNet_cls/subgraph_9/graph_hash.txt @@ -0,0 +1 @@ +fe721ccf1cd65da315d7193de6608142ca1ddfd9abb684448865f22f76f861b0 \ No newline at end of file diff --git a/paddle_samples/PaddleX/TimesNet_cls/subgraph_9/graph_net.json b/paddle_samples/PaddleX/TimesNet_cls/subgraph_9/graph_net.json new file mode 100644 index 000000000..ffc4d714a --- /dev/null +++ b/paddle_samples/PaddleX/TimesNet_cls/subgraph_9/graph_net.json @@ -0,0 +1,6 @@ +{ + "framework": "paddle", + "model_name": "TimesNet_cls", + "num_devices_required": 1, + "num_nodes_required": 1 +} \ No newline at end of file diff --git a/paddle_samples/PaddleX/TimesNet_cls/subgraph_9/input_meta.py b/paddle_samples/PaddleX/TimesNet_cls/subgraph_9/input_meta.py new file mode 100644 index 000000000..1fa765c4d --- /dev/null +++ b/paddle_samples/PaddleX/TimesNet_cls/subgraph_9/input_meta.py @@ -0,0 +1,83 @@ +class Program_weight_tensor_data_0: + name = "data_0" + shape = [11, 405, 32] + dtype = "float32" + min_val = float("-2.00812") + max_val = float("1.3565") + mean = float("0.00458939") + std = float("0.106809") + data = None + + +class Program_weight_tensor_data_1: + name = "data_1" + shape = [11, 405, 32] + dtype = "float32" + min_val = float("-1.77632") + max_val = float("1.73278") + mean = float("0.0131883") + std = float("0.168296") + data = None + + +class Program_weight_tensor_data_2: + name = "data_2" + shape = [11, 405, 32] + dtype = "float32" + min_val = float("-2.02697") + max_val = float("2.16153") + mean = float("0.021289") + std = float("0.236246") + data = None + + +class Program_weight_tensor_data_3: + name = "data_3" + shape = [11, 3] + dtype = "float32" + data = [ + 84.321, + 62.2965, + 72.8462, + 40.6986, + 31.2876, + 22.2545, + 42.2488, + 31.0237, + 20.7795, + 100.057, + 90.3915, + 101.321, + 45.0697, + 34.1595, + 46.903, + 130.544, + 87.1848, + 49.0174, + 56.664, + 32.6662, + 28.9379, + 77.497, + 77.0318, + 49.8858, + 73.808, + 46.5805, + 52.4966, + 62.9589, + 43.0775, + 80.6435, + 51.517, + 36.4697, + 34.6829, + ] + + +class Program_weight_tensor_data_4: + name = "data_4" + shape = [11, 405, 32] + dtype = "float32" + min_val = float("-30.9414") + max_val = float("29.3567") + mean = float("0.268828") + std = float("1.79071") + data = None diff --git a/paddle_samples/PaddleX/TimesNet_cls/subgraph_9/model.py b/paddle_samples/PaddleX/TimesNet_cls/subgraph_9/model.py new file mode 100644 index 000000000..c0c32defc --- /dev/null +++ b/paddle_samples/PaddleX/TimesNet_cls/subgraph_9/model.py @@ -0,0 +1,65 @@ +import paddle + + +class GraphModule(paddle.nn.Layer): + def __init__(self): + super().__init__() + + def forward(self, data_0, data_1, data_2, data_3, data_4): + # builtin.combine: ([11x405x32xf32, 11x405x32xf32, 11x405x32xf32]) <- (11x405x32xf32, 11x405x32xf32, 11x405x32xf32) + combine_0 = [data_0, data_1, data_2] + del data_0, data_1, data_2 + + # pd_op.stack: (11x405x32x3xf32) <- ([11x405x32xf32, 11x405x32xf32, 11x405x32xf32]) + stack_0 = paddle._C_ops.stack(combine_0, -1) + del combine_0 + + # pd_op.softmax: (11x3xf32) <- (11x3xf32) + softmax_0 = paddle._C_ops.softmax(data_3, 1) + del data_3 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_0 = [1] + + # pd_op.assign: (1xi64) <- (1xi64) + assign_0 = full_int_array_0 + + # pd_op.unsqueeze: (11x1x3xf32) <- (11x3xf32, 1xi64) + unsqueeze_0 = paddle._C_ops.unsqueeze(softmax_0, full_int_array_0) + + # pd_op.unsqueeze: (11x1x1x3xf32) <- (11x1x3xf32, 1xi64) + unsqueeze_1 = paddle._C_ops.unsqueeze(unsqueeze_0, full_int_array_0) + + # pd_op.full_int_array: (4xi64) <- () + full_int_array_1 = [1, 405, 32, 1] + + # pd_op.tile: (11x405x32x3xf32) <- (11x1x1x3xf32, 4xi64) + tile_0 = paddle._C_ops.tile(unsqueeze_1, full_int_array_1) + + # pd_op.multiply: (11x405x32x3xf32) <- (11x405x32x3xf32, 11x405x32x3xf32) + multiply_0 = paddle._C_ops.multiply(stack_0, tile_0) + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_2 = [-1] + + # pd_op.sum: (11x405x32xf32) <- (11x405x32x3xf32, 1xi64) + sum_0 = paddle._C_ops.sum(multiply_0, full_int_array_2, None, False) + + # pd_op.add: (11x405x32xf32) <- (11x405x32xf32, 11x405x32xf32) + add_0 = paddle._C_ops.add(sum_0, data_4) + del ( + assign_0, + data_4, + full_int_array_0, + full_int_array_1, + full_int_array_2, + multiply_0, + softmax_0, + stack_0, + sum_0, + tile_0, + unsqueeze_0, + unsqueeze_1, + ) + + return add_0 diff --git a/paddle_samples/PaddleX/TimesNet_cls/subgraph_9/weight_meta.py b/paddle_samples/PaddleX/TimesNet_cls/subgraph_9/weight_meta.py new file mode 100644 index 000000000..8b1378917 --- /dev/null +++ b/paddle_samples/PaddleX/TimesNet_cls/subgraph_9/weight_meta.py @@ -0,0 +1 @@ + From f860a940bd055e57322a27d2efa6fa7bc0488435 Mon Sep 17 00:00:00 2001 From: Liu Yiqun Date: Wed, 10 Sep 2025 17:15:23 +0800 Subject: [PATCH 2/5] Add cv models with more than 10 subgraphs. --- .../Nonstationary/subgraph_0/graph_hash.txt | 1 + .../Nonstationary/subgraph_0/graph_net.json | 6 + .../Nonstationary/subgraph_0/input_meta.py | 29 + .../PaddleX/Nonstationary/subgraph_0/model.py | 134 + .../subgraph_0}/weight_meta.py | 0 .../Nonstationary/subgraph_1/graph_hash.txt | 1 + .../Nonstationary/subgraph_1/graph_net.json | 6 + .../Nonstationary/subgraph_1/input_meta.py | 18 + .../PaddleX/Nonstationary/subgraph_1/model.py | 73 + .../Nonstationary/subgraph_1/weight_meta.py | 1 + .../Nonstationary/subgraph_10/graph_hash.txt | 1 + .../Nonstationary/subgraph_10/graph_net.json | 6 + .../Nonstationary/subgraph_10/input_meta.py | 49 + .../Nonstationary/subgraph_10/model.py | 1586 +++ .../Nonstationary/subgraph_10/weight_meta.py | 548 + .../Nonstationary/subgraph_11/graph_hash.txt | 1 + .../Nonstationary/subgraph_11/graph_net.json | 6 + .../Nonstationary/subgraph_11/input_meta.py | 49 + .../Nonstationary/subgraph_11/model.py | 1322 +++ .../Nonstationary/subgraph_11/weight_meta.py | 548 + .../Nonstationary/subgraph_2/graph_hash.txt | 1 + .../Nonstationary/subgraph_2/graph_net.json | 6 + .../Nonstationary/subgraph_2/input_meta.py | 49 + .../PaddleX/Nonstationary/subgraph_2/model.py | 544 + .../Nonstationary/subgraph_2/weight_meta.py | 196 + .../Nonstationary/subgraph_3/graph_hash.txt | 1 + .../Nonstationary/subgraph_3/graph_net.json | 6 + .../Nonstationary/subgraph_3/input_meta.py | 66 + .../PaddleX/Nonstationary/subgraph_3/model.py | 444 + .../Nonstationary/subgraph_3/weight_meta.py | 173 + .../Nonstationary/subgraph_4/graph_hash.txt | 1 + .../Nonstationary/subgraph_4/graph_net.json | 6 + .../Nonstationary/subgraph_4/input_meta.py | 38 + .../PaddleX/Nonstationary/subgraph_4/model.py | 80 + .../Nonstationary/subgraph_4/weight_meta.py | 64 + .../Nonstationary/subgraph_5/graph_hash.txt | 1 + .../Nonstationary/subgraph_5/graph_net.json | 6 + .../Nonstationary/subgraph_5/input_meta.py | 16 + .../PaddleX/Nonstationary/subgraph_5/model.py | 39 + .../Nonstationary/subgraph_5/weight_meta.py | 20 + .../Nonstationary/subgraph_6/graph_hash.txt | 1 + .../Nonstationary/subgraph_6/graph_net.json | 6 + .../Nonstationary/subgraph_6/input_meta.py | 66 + .../PaddleX/Nonstationary/subgraph_6/model.py | 445 + .../Nonstationary/subgraph_6/weight_meta.py | 196 + .../Nonstationary/subgraph_7/graph_hash.txt | 1 + .../Nonstationary/subgraph_7/graph_net.json | 6 + .../Nonstationary/subgraph_7/input_meta.py | 31 + .../PaddleX/Nonstationary/subgraph_7/model.py | 57 + .../Nonstationary/subgraph_7/weight_meta.py | 64 + .../Nonstationary/subgraph_8/graph_hash.txt | 1 + .../Nonstationary/subgraph_8/graph_net.json | 6 + .../Nonstationary/subgraph_8/input_meta.py | 42 + .../PaddleX/Nonstationary/subgraph_8/model.py | 1365 +++ .../Nonstationary/subgraph_8/weight_meta.py | 507 + .../Nonstationary/subgraph_9/graph_hash.txt | 1 + .../Nonstationary/subgraph_9/graph_net.json | 6 + .../Nonstationary/subgraph_9/input_meta.py | 27 + .../PaddleX/Nonstationary/subgraph_9/model.py | 67 + .../Nonstationary/subgraph_9/weight_meta.py | 1 + .../PP-DocLayout-M/subgraph_0/graph_hash.txt | 1 + .../PP-DocLayout-M/subgraph_0/graph_net.json | 6 + .../PP-DocLayout-M/subgraph_0/input_meta.py | 43 + .../PP-DocLayout-M/subgraph_0/model.py | 299 + .../PP-DocLayout-M/subgraph_0/weight_meta.py | 1 + .../PP-DocLayout-M/subgraph_1/graph_hash.txt | 1 + .../PP-DocLayout-M/subgraph_1/graph_net.json | 6 + .../PP-DocLayout-M/subgraph_1/input_meta.py | 118 + .../PP-DocLayout-M/subgraph_1/model.py | 219 + .../PP-DocLayout-M/subgraph_1/weight_meta.py | 1 + .../PP-DocLayout-M/subgraph_10/graph_hash.txt | 1 + .../PP-DocLayout-M/subgraph_10/graph_net.json | 6 + .../PP-DocLayout-M/subgraph_10/input_meta.py | 16 + .../PP-DocLayout-M/subgraph_10/model.py | 6569 ++++++++++++ .../PP-DocLayout-M/subgraph_10/weight_meta.py | 5620 +++++++++++ .../PP-DocLayout-M/subgraph_11/graph_hash.txt | 1 + .../PP-DocLayout-M/subgraph_11/graph_net.json | 6 + .../PP-DocLayout-M/subgraph_11/input_meta.py | 9 + .../PP-DocLayout-M/subgraph_11/model.py | 482 + .../PP-DocLayout-M/subgraph_11/weight_meta.py | 1 + .../PP-DocLayout-M/subgraph_12/graph_hash.txt | 1 + .../PP-DocLayout-M/subgraph_12/graph_net.json | 6 + .../PP-DocLayout-M/subgraph_12/input_meta.py | 94 + .../PP-DocLayout-M/subgraph_12/model.py | 262 + .../PP-DocLayout-M/subgraph_12/weight_meta.py | 1 + .../PP-DocLayout-M/subgraph_13/graph_hash.txt | 1 + .../PP-DocLayout-M/subgraph_13/graph_net.json | 6 + .../PP-DocLayout-M/subgraph_13/input_meta.py | 26 + .../PP-DocLayout-M/subgraph_13/model.py | 111 + .../PP-DocLayout-M/subgraph_13/weight_meta.py | 1 + .../PP-DocLayout-M/subgraph_14/graph_hash.txt | 1 + .../PP-DocLayout-M/subgraph_14/graph_net.json | 6 + .../PP-DocLayout-M/subgraph_14/input_meta.py | 70 + .../PP-DocLayout-M/subgraph_14/model.py | 317 + .../PP-DocLayout-M/subgraph_14/weight_meta.py | 1 + .../PP-DocLayout-M/subgraph_15/graph_hash.txt | 1 + .../PP-DocLayout-M/subgraph_15/graph_net.json | 6 + .../PP-DocLayout-M/subgraph_15/input_meta.py | 50 + .../PP-DocLayout-M/subgraph_15/model.py | 298 + .../PP-DocLayout-M/subgraph_15/weight_meta.py | 1 + .../PP-DocLayout-M/subgraph_16/graph_hash.txt | 1 + .../PP-DocLayout-M/subgraph_16/graph_net.json | 6 + .../PP-DocLayout-M/subgraph_16/input_meta.py | 37 + .../PP-DocLayout-M/subgraph_16/model.py | 35 + .../PP-DocLayout-M/subgraph_16/weight_meta.py | 1 + .../PP-DocLayout-M/subgraph_17/graph_hash.txt | 1 + .../PP-DocLayout-M/subgraph_17/graph_net.json | 6 + .../PP-DocLayout-M/subgraph_17/input_meta.py | 52 + .../PP-DocLayout-M/subgraph_17/model.py | 247 + .../PP-DocLayout-M/subgraph_17/weight_meta.py | 1 + .../PP-DocLayout-M/subgraph_2/graph_hash.txt | 1 + .../PP-DocLayout-M/subgraph_2/graph_net.json | 6 + .../PP-DocLayout-M/subgraph_2/input_meta.py | 98 + .../PP-DocLayout-M/subgraph_2/model.py | 157 + .../PP-DocLayout-M/subgraph_2/weight_meta.py | 1 + .../PP-DocLayout-M/subgraph_3/graph_hash.txt | 1 + .../PP-DocLayout-M/subgraph_3/graph_net.json | 6 + .../PP-DocLayout-M/subgraph_3/input_meta.py | 40 + .../PP-DocLayout-M/subgraph_3/model.py | 115 + .../PP-DocLayout-M/subgraph_3/weight_meta.py | 1 + .../PP-DocLayout-M/subgraph_4/graph_hash.txt | 1 + .../PP-DocLayout-M/subgraph_4/graph_net.json | 6 + .../PP-DocLayout-M/subgraph_4/input_meta.py | 92 + .../PP-DocLayout-M/subgraph_4/model.py | 109 + .../PP-DocLayout-M/subgraph_4/weight_meta.py | 1 + .../PP-DocLayout-M/subgraph_5/graph_hash.txt | 1 + .../PP-DocLayout-M/subgraph_5/graph_net.json | 6 + .../PP-DocLayout-M/subgraph_5/input_meta.py | 49 + .../PP-DocLayout-M/subgraph_5/model.py | 160 + .../PP-DocLayout-M/subgraph_5/weight_meta.py | 1 + .../PP-DocLayout-M/subgraph_6/graph_hash.txt | 1 + .../PP-DocLayout-M/subgraph_6/graph_net.json | 6 + .../PP-DocLayout-M/subgraph_6/input_meta.py | 16 + .../PP-DocLayout-M/subgraph_6/model.py | 263 + .../PP-DocLayout-M/subgraph_6/weight_meta.py | 1 + .../PP-DocLayout-M/subgraph_7/graph_hash.txt | 1 + .../PP-DocLayout-M/subgraph_7/graph_net.json | 6 + .../PP-DocLayout-M/subgraph_7/input_meta.py | 65 + .../PP-DocLayout-M/subgraph_7/model.py | 593 ++ .../PP-DocLayout-M/subgraph_7/weight_meta.py | 1 + .../PP-DocLayout-M/subgraph_8/graph_hash.txt | 1 + .../PP-DocLayout-M/subgraph_8/graph_net.json | 6 + .../PP-DocLayout-M/subgraph_8/input_meta.py | 23 + .../PP-DocLayout-M/subgraph_8/model.py | 288 + .../PP-DocLayout-M/subgraph_8/weight_meta.py | 1 + .../PP-DocLayout-M/subgraph_9/graph_hash.txt | 1 + .../PP-DocLayout-M/subgraph_9/graph_net.json | 6 + .../PP-DocLayout-M/subgraph_9/input_meta.py | 16 + .../PP-DocLayout-M/subgraph_9/model.py | 6406 ++++++++++++ .../PP-DocLayout-M/subgraph_9/weight_meta.py | 5620 +++++++++++ .../subgraph_0/graph_hash.txt | 1 + .../subgraph_0/graph_net.json | 6 + .../subgraph_0/input_meta.py | 9 + .../PP-OCRv3_mobile_rec/subgraph_0/model.py | 1526 +++ .../subgraph_0/weight_meta.py | 1505 +++ .../subgraph_1/graph_hash.txt | 1 + .../subgraph_1/graph_net.json | 6 + .../subgraph_1/input_meta.py | 16 + .../PP-OCRv3_mobile_rec/subgraph_1/model.py | 711 ++ .../subgraph_1/weight_meta.py | 117 + .../subgraph_10/graph_hash.txt | 1 + .../subgraph_10/graph_net.json | 6 + .../subgraph_10/input_meta.py | 19 + .../PP-OCRv3_mobile_rec/subgraph_10/model.py | 61 + .../subgraph_10/weight_meta.py | 1 + .../subgraph_2/graph_hash.txt | 1 + .../subgraph_2/graph_net.json | 6 + .../subgraph_2/input_meta.py | 18 + .../PP-OCRv3_mobile_rec/subgraph_2/model.py | 32 + .../subgraph_2/weight_meta.py | 9 + .../subgraph_3/graph_hash.txt | 1 + .../subgraph_3/graph_net.json | 6 + .../subgraph_3/input_meta.py | 23 + .../PP-OCRv3_mobile_rec/subgraph_3/model.py | 61 + .../subgraph_3/weight_meta.py | 1 + .../subgraph_4/graph_hash.txt | 1 + .../subgraph_4/graph_net.json | 6 + .../subgraph_4/input_meta.py | 20 + .../PP-OCRv3_mobile_rec/subgraph_4/model.py | 232 + .../subgraph_4/weight_meta.py | 159 + .../subgraph_5/graph_hash.txt | 1 + .../subgraph_5/graph_net.json | 6 + .../subgraph_5/input_meta.py | 9 + .../PP-OCRv3_mobile_rec/subgraph_5/model.py | 2013 ++++ .../subgraph_5/weight_meta.py | 2064 ++++ .../subgraph_6/graph_hash.txt | 1 + .../subgraph_6/graph_net.json | 6 + .../subgraph_6/input_meta.py | 9 + .../PP-OCRv3_mobile_rec/subgraph_6/model.py | 2029 ++++ .../subgraph_6/weight_meta.py | 2064 ++++ .../subgraph_7/graph_hash.txt | 1 + .../subgraph_7/graph_net.json | 6 + .../subgraph_7/input_meta.py | 77 + .../PP-OCRv3_mobile_rec/subgraph_7/model.py | 147 + .../subgraph_7/weight_meta.py | 20 + .../subgraph_8/graph_hash.txt | 1 + .../subgraph_8/graph_net.json | 6 + .../subgraph_8/input_meta.py | 9 + .../PP-OCRv3_mobile_rec/subgraph_8/model.py | 804 ++ .../subgraph_8/weight_meta.py | 557 ++ .../subgraph_9/graph_hash.txt | 1 + .../subgraph_9/graph_net.json | 6 + .../subgraph_9/input_meta.py | 16 + .../PP-OCRv3_mobile_rec/subgraph_9/model.py | 47 + .../subgraph_9/weight_meta.py | 1 + .../subgraph_0/graph_hash.txt | 1 + .../subgraph_0/graph_net.json | 6 + .../PP-YOLOE-L_human/subgraph_0/input_meta.py | 7 + .../PP-YOLOE-L_human/subgraph_0/model.py | 34 + .../subgraph_0/weight_meta.py | 1 + .../subgraph_1/graph_hash.txt | 1 + .../subgraph_1/graph_net.json | 6 + .../PP-YOLOE-L_human/subgraph_1/input_meta.py | 19 + .../PP-YOLOE-L_human/subgraph_1/model.py | 43 + .../subgraph_1/weight_meta.py | 1 + .../subgraph_10/graph_hash.txt | 1 + .../subgraph_10/graph_net.json | 6 + .../subgraph_10/input_meta.py | 9 + .../PP-YOLOE-L_human/subgraph_10/model.py | 7396 ++++++++++++++ .../subgraph_10/weight_meta.py | 7564 ++++++++++++++ .../subgraph_11/graph_hash.txt | 1 + .../subgraph_11/graph_net.json | 6 + .../subgraph_11/input_meta.py | 156 + .../PP-YOLOE-L_human/subgraph_11/model.py | 385 + .../subgraph_11/weight_meta.py | 1 + .../subgraph_12/graph_hash.txt | 1 + .../subgraph_12/graph_net.json | 6 + .../subgraph_12/input_meta.py | 121 + .../PP-YOLOE-L_human/subgraph_12/model.py | 175 + .../subgraph_12/weight_meta.py | 1 + .../subgraph_13/graph_hash.txt | 1 + .../subgraph_13/graph_net.json | 6 + .../subgraph_13/input_meta.py | 42 + .../PP-YOLOE-L_human/subgraph_13/model.py | 162 + .../subgraph_13/weight_meta.py | 7 + .../subgraph_14/graph_hash.txt | 1 + .../subgraph_14/graph_net.json | 6 + .../subgraph_14/input_meta.py | 141 + .../PP-YOLOE-L_human/subgraph_14/model.py | 338 + .../subgraph_14/weight_meta.py | 1 + .../subgraph_15/graph_hash.txt | 1 + .../subgraph_15/graph_net.json | 6 + .../subgraph_15/input_meta.py | 105 + .../PP-YOLOE-L_human/subgraph_15/model.py | 223 + .../subgraph_15/weight_meta.py | 1 + .../subgraph_16/graph_hash.txt | 1 + .../subgraph_16/graph_net.json | 6 + .../subgraph_16/input_meta.py | 38 + .../PP-YOLOE-L_human/subgraph_16/model.py | 94 + .../subgraph_16/weight_meta.py | 1 + .../subgraph_2/graph_hash.txt | 1 + .../subgraph_2/graph_net.json | 6 + .../PP-YOLOE-L_human/subgraph_2/input_meta.py | 28 + .../PP-YOLOE-L_human/subgraph_2/model.py | 110 + .../subgraph_2/weight_meta.py | 1 + .../subgraph_3/graph_hash.txt | 1 + .../subgraph_3/graph_net.json | 6 + .../PP-YOLOE-L_human/subgraph_3/input_meta.py | 73 + .../PP-YOLOE-L_human/subgraph_3/model.py | 1144 +++ .../subgraph_3/weight_meta.py | 586 ++ .../subgraph_4/graph_hash.txt | 1 + .../subgraph_4/graph_net.json | 6 + .../PP-YOLOE-L_human/subgraph_4/input_meta.py | 31 + .../PP-YOLOE-L_human/subgraph_4/model.py | 1050 ++ .../subgraph_4/weight_meta.py | 586 ++ .../subgraph_5/graph_hash.txt | 1 + .../subgraph_5/graph_net.json | 6 + .../PP-YOLOE-L_human/subgraph_5/input_meta.py | 9 + .../PP-YOLOE-L_human/subgraph_5/model.py | 7164 +++++++++++++ .../subgraph_5/weight_meta.py | 8161 +++++++++++++++ .../subgraph_6/graph_hash.txt | 1 + .../subgraph_6/graph_net.json | 6 + .../PP-YOLOE-L_human/subgraph_6/input_meta.py | 49 + .../PP-YOLOE-L_human/subgraph_6/model.py | 192 + .../subgraph_6/weight_meta.py | 7 + .../subgraph_7/graph_hash.txt | 1 + .../subgraph_7/graph_net.json | 6 + .../PP-YOLOE-L_human/subgraph_7/input_meta.py | 130 + .../PP-YOLOE-L_human/subgraph_7/model.py | 258 + .../subgraph_7/weight_meta.py | 1 + .../subgraph_8/graph_hash.txt | 1 + .../subgraph_8/graph_net.json | 6 + .../PP-YOLOE-L_human/subgraph_8/input_meta.py | 103 + .../PP-YOLOE-L_human/subgraph_8/model.py | 195 + .../subgraph_8/weight_meta.py | 1 + .../subgraph_9/graph_hash.txt | 1 + .../subgraph_9/graph_net.json | 6 + .../PP-YOLOE-L_human/subgraph_9/input_meta.py | 68 + .../PP-YOLOE-L_human/subgraph_9/model.py | 509 + .../subgraph_9/weight_meta.py | 1 + .../subgraph_0/graph_hash.txt | 1 + .../subgraph_0/graph_net.json | 6 + .../subgraph_0/input_meta.py | 73 + .../PP-YOLOE-L_vehicle/subgraph_0/model.py | 1144 +++ .../subgraph_0/weight_meta.py | 586 ++ .../subgraph_11/graph_hash.txt | 1 + .../subgraph_11/graph_net.json | 6 + .../subgraph_11/input_meta.py | 69 + .../PP-YOLOE-L_vehicle/subgraph_11/model.py | 248 + .../subgraph_11/weight_meta.py | 1 + .../subgraph_13/graph_hash.txt | 1 + .../subgraph_13/graph_net.json | 6 + .../subgraph_13/input_meta.py | 31 + .../PP-YOLOE-L_vehicle/subgraph_13/model.py | 1050 ++ .../subgraph_13/weight_meta.py | 574 ++ .../subgraph_14/graph_hash.txt | 1 + .../subgraph_14/graph_net.json | 6 + .../subgraph_14/input_meta.py | 67 + .../PP-YOLOE-L_vehicle/subgraph_14/model.py | 509 + .../subgraph_14/weight_meta.py | 1 + .../subgraph_15/graph_hash.txt | 1 + .../subgraph_15/graph_net.json | 6 + .../subgraph_15/input_meta.py | 48 + .../PP-YOLOE-L_vehicle/subgraph_15/model.py | 192 + .../subgraph_15/weight_meta.py | 7 + .../subgraph_16/graph_hash.txt | 1 + .../subgraph_16/graph_net.json | 6 + .../subgraph_16/input_meta.py | 38 + .../PP-YOLOE-L_vehicle/subgraph_16/model.py | 94 + .../subgraph_16/weight_meta.py | 1 + .../subgraph_17/graph_hash.txt | 1 + .../subgraph_17/graph_net.json | 6 + .../subgraph_17/input_meta.py | 63 + .../PP-YOLOE-L_vehicle/subgraph_17/model.py | 181 + .../subgraph_17/weight_meta.py | 1 + .../subgraph_2/graph_hash.txt | 1 + .../subgraph_2/graph_net.json | 6 + .../subgraph_2/input_meta.py | 9 + .../PP-YOLOE-L_vehicle/subgraph_2/model.py | 7396 ++++++++++++++ .../subgraph_2/weight_meta.py | 7564 ++++++++++++++ .../subgraph_3/graph_hash.txt | 1 + .../subgraph_3/graph_net.json | 6 + .../subgraph_3/input_meta.py | 42 + .../PP-YOLOE-L_vehicle/subgraph_3/model.py | 162 + .../subgraph_3/weight_meta.py | 7 + .../subgraph_5/graph_hash.txt | 1 + .../subgraph_5/graph_net.json | 6 + .../subgraph_5/input_meta.py | 9 + .../PP-YOLOE-L_vehicle/subgraph_5/model.py | 7159 +++++++++++++ .../subgraph_5/weight_meta.py | 8161 +++++++++++++++ .../subgraph_7/graph_hash.txt | 1 + .../subgraph_7/graph_net.json | 6 + .../subgraph_7/input_meta.py | 79 + .../PP-YOLOE-L_vehicle/subgraph_7/model.py | 338 + .../subgraph_7/weight_meta.py | 1 + .../subgraph_8/graph_hash.txt | 1 + .../subgraph_8/graph_net.json | 6 + .../subgraph_8/input_meta.py | 62 + .../PP-YOLOE-L_vehicle/subgraph_8/model.py | 229 + .../subgraph_8/weight_meta.py | 1 + .../PP-YOLOE-R-L/subgraph_0/graph_hash.txt | 1 + .../subgraph_0}/graph_net.json | 2 +- .../PP-YOLOE-R-L/subgraph_0/input_meta.py | 240 + .../PaddleX/PP-YOLOE-R-L/subgraph_0/model.py | 7879 +++++++++++++++ .../PP-YOLOE-R-L/subgraph_0/weight_meta.py | 8453 ++++++++++++++++ .../PP-YOLOE-R-L/subgraph_1/graph_hash.txt | 1 + .../PP-YOLOE-R-L/subgraph_1/graph_net.json | 6 + .../PP-YOLOE-R-L/subgraph_1/input_meta.py | 74 + .../PaddleX/PP-YOLOE-R-L/subgraph_1/model.py | 484 + .../PP-YOLOE-R-L/subgraph_1/weight_meta.py | 1 + .../PP-YOLOE-R-L/subgraph_10/graph_hash.txt | 1 + .../PP-YOLOE-R-L/subgraph_10/graph_net.json | 6 + .../PP-YOLOE-R-L/subgraph_10/input_meta.py | 16 + .../PaddleX/PP-YOLOE-R-L/subgraph_10/model.py | 180 + .../PP-YOLOE-R-L/subgraph_10/weight_meta.py | 1 + .../PP-YOLOE-R-L/subgraph_11/graph_hash.txt | 1 + .../PP-YOLOE-R-L/subgraph_11/graph_net.json | 6 + .../PP-YOLOE-R-L/subgraph_11/input_meta.py | 63 + .../PaddleX/PP-YOLOE-R-L/subgraph_11/model.py | 96 + .../PP-YOLOE-R-L/subgraph_11/weight_meta.py | 1 + .../PP-YOLOE-R-L/subgraph_12/graph_hash.txt | 1 + .../PP-YOLOE-R-L/subgraph_12/graph_net.json | 6 + .../PP-YOLOE-R-L/subgraph_12/input_meta.py | 31 + .../PaddleX/PP-YOLOE-R-L/subgraph_12/model.py | 1292 +++ .../PP-YOLOE-R-L/subgraph_12/weight_meta.py | 745 ++ .../PP-YOLOE-R-L/subgraph_13/graph_hash.txt | 1 + .../PP-YOLOE-R-L/subgraph_13/graph_net.json | 6 + .../PP-YOLOE-R-L/subgraph_13/input_meta.py | 63 + .../PaddleX/PP-YOLOE-R-L/subgraph_13/model.py | 197 + .../PP-YOLOE-R-L/subgraph_13/weight_meta.py | 1 + .../PP-YOLOE-R-L/subgraph_14/graph_hash.txt | 1 + .../PP-YOLOE-R-L/subgraph_14/graph_net.json | 6 + .../PP-YOLOE-R-L/subgraph_14/input_meta.py | 104 + .../PaddleX/PP-YOLOE-R-L/subgraph_14/model.py | 251 + .../PP-YOLOE-R-L/subgraph_14/weight_meta.py | 1 + .../PP-YOLOE-R-L/subgraph_15/graph_hash.txt | 1 + .../PP-YOLOE-R-L/subgraph_15/graph_net.json | 6 + .../PP-YOLOE-R-L/subgraph_15/input_meta.py | 82 + .../PaddleX/PP-YOLOE-R-L/subgraph_15/model.py | 196 + .../PP-YOLOE-R-L/subgraph_15/weight_meta.py | 1 + .../PP-YOLOE-R-L/subgraph_2/graph_hash.txt | 1 + .../PP-YOLOE-R-L/subgraph_2/graph_net.json | 6 + .../PP-YOLOE-R-L/subgraph_2/input_meta.py | 5 + .../PaddleX/PP-YOLOE-R-L/subgraph_2/model.py | 34 + .../PP-YOLOE-R-L/subgraph_2/weight_meta.py | 1 + .../PP-YOLOE-R-L/subgraph_3/graph_hash.txt | 1 + .../PP-YOLOE-R-L/subgraph_3/graph_net.json | 6 + .../PP-YOLOE-R-L/subgraph_3/input_meta.py | 240 + .../PaddleX/PP-YOLOE-R-L/subgraph_3/model.py | 7594 ++++++++++++++ .../PP-YOLOE-R-L/subgraph_3/weight_meta.py | 6968 +++++++++++++ .../PP-YOLOE-R-L/subgraph_4/graph_hash.txt | 1 + .../PP-YOLOE-R-L/subgraph_4/graph_net.json | 6 + .../PP-YOLOE-R-L/subgraph_4/input_meta.py | 19 + .../PaddleX/PP-YOLOE-R-L/subgraph_4/model.py | 43 + .../PP-YOLOE-R-L/subgraph_4/weight_meta.py | 1 + .../PP-YOLOE-R-L/subgraph_5/graph_hash.txt | 1 + .../PP-YOLOE-R-L/subgraph_5/graph_net.json | 6 + .../PP-YOLOE-R-L/subgraph_5/input_meta.py | 81 + .../PaddleX/PP-YOLOE-R-L/subgraph_5/model.py | 465 + .../PP-YOLOE-R-L/subgraph_5/weight_meta.py | 1 + .../PP-YOLOE-R-L/subgraph_6/graph_hash.txt | 1 + .../PP-YOLOE-R-L/subgraph_6/graph_net.json | 6 + .../PP-YOLOE-R-L/subgraph_6/input_meta.py | 28 + .../PaddleX/PP-YOLOE-R-L/subgraph_6/model.py | 110 + .../PP-YOLOE-R-L/subgraph_6/weight_meta.py | 1 + .../PP-YOLOE-R-L/subgraph_7/graph_hash.txt | 1 + .../PP-YOLOE-R-L/subgraph_7/graph_net.json | 6 + .../PP-YOLOE-R-L/subgraph_7/input_meta.py | 65 + .../PaddleX/PP-YOLOE-R-L/subgraph_7/model.py | 245 + .../PP-YOLOE-R-L/subgraph_7/weight_meta.py | 1 + .../PP-YOLOE-R-L/subgraph_8/graph_hash.txt | 1 + .../PP-YOLOE-R-L/subgraph_8/graph_net.json | 6 + .../PP-YOLOE-R-L/subgraph_8/input_meta.py | 64 + .../PaddleX/PP-YOLOE-R-L/subgraph_8/model.py | 753 ++ .../PP-YOLOE-R-L/subgraph_8/weight_meta.py | 1 + .../PP-YOLOE-R-L/subgraph_9/graph_hash.txt | 1 + .../PP-YOLOE-R-L/subgraph_9/graph_net.json | 6 + .../PP-YOLOE-R-L/subgraph_9/input_meta.py | 69 + .../PaddleX/PP-YOLOE-R-L/subgraph_9/model.py | 126 + .../PP-YOLOE-R-L/subgraph_9/weight_meta.py | 1 + .../subgraph_0/graph_hash.txt | 1 + .../subgraph_0/graph_net.json | 6 + .../PP-YOLOE-S_human/subgraph_0/input_meta.py | 93 + .../PP-YOLOE-S_human/subgraph_0/model.py | 223 + .../subgraph_0/weight_meta.py | 1 + .../subgraph_1/graph_hash.txt | 1 + .../subgraph_1/graph_net.json | 6 + .../PP-YOLOE-S_human/subgraph_1/input_meta.py | 42 + .../PP-YOLOE-S_human/subgraph_1/model.py | 162 + .../subgraph_1/weight_meta.py | 7 + .../subgraph_10/graph_hash.txt | 1 + .../subgraph_10/graph_net.json | 6 + .../subgraph_10/input_meta.py | 108 + .../PP-YOLOE-S_human/subgraph_10/model.py | 175 + .../subgraph_10/weight_meta.py | 1 + .../subgraph_11/graph_hash.txt | 1 + .../subgraph_11/graph_net.json | 6 + .../subgraph_11/input_meta.py | 111 + .../PP-YOLOE-S_human/subgraph_11/model.py | 195 + .../subgraph_11/weight_meta.py | 1 + .../subgraph_12/graph_hash.txt | 1 + .../subgraph_12/graph_net.json | 6 + .../subgraph_12/input_meta.py | 28 + .../PP-YOLOE-S_human/subgraph_12/model.py | 110 + .../subgraph_12/weight_meta.py | 1 + .../subgraph_13/graph_hash.txt | 1 + .../subgraph_13/graph_net.json | 6 + .../subgraph_13/input_meta.py | 9 + .../PP-YOLOE-S_human/subgraph_13/model.py | 4048 ++++++++ .../subgraph_13/weight_meta.py | 3860 +++++++ .../subgraph_14/graph_hash.txt | 1 + .../subgraph_14/graph_net.json | 6 + .../subgraph_14/input_meta.py | 31 + .../PP-YOLOE-S_human/subgraph_14/model.py | 1050 ++ .../subgraph_14/weight_meta.py | 586 ++ .../subgraph_15/graph_hash.txt | 1 + .../subgraph_15/graph_net.json | 6 + .../subgraph_15/input_meta.py | 73 + .../PP-YOLOE-S_human/subgraph_15/model.py | 1144 +++ .../subgraph_15/weight_meta.py | 586 ++ .../subgraph_16/graph_hash.txt | 1 + .../subgraph_16/graph_net.json | 6 + .../subgraph_16/input_meta.py | 124 + .../PP-YOLOE-S_human/subgraph_16/model.py | 258 + .../subgraph_16/weight_meta.py | 1 + .../subgraph_2/graph_hash.txt | 1 + .../subgraph_2/graph_net.json | 6 + .../PP-YOLOE-S_human/subgraph_2/input_meta.py | 117 + .../PP-YOLOE-S_human/subgraph_2/model.py | 338 + .../subgraph_2/weight_meta.py | 1 + .../subgraph_3/graph_hash.txt | 1 + .../subgraph_3/graph_net.json | 6 + .../PP-YOLOE-S_human/subgraph_3/input_meta.py | 67 + .../PP-YOLOE-S_human/subgraph_3/model.py | 509 + .../subgraph_3/weight_meta.py | 1 + .../subgraph_4/graph_hash.txt | 1 + .../subgraph_4/graph_net.json | 6 + .../PP-YOLOE-S_human/subgraph_4/input_meta.py | 49 + .../PP-YOLOE-S_human/subgraph_4/model.py | 192 + .../subgraph_4/weight_meta.py | 7 + .../subgraph_5/graph_hash.txt | 1 + .../subgraph_5/graph_net.json | 6 + .../PP-YOLOE-S_human/subgraph_5/input_meta.py | 144 + .../PP-YOLOE-S_human/subgraph_5/model.py | 385 + .../subgraph_5/weight_meta.py | 1 + .../subgraph_6/graph_hash.txt | 1 + .../subgraph_6/graph_net.json | 6 + .../PP-YOLOE-S_human/subgraph_6/input_meta.py | 38 + .../PP-YOLOE-S_human/subgraph_6/model.py | 94 + .../subgraph_6/weight_meta.py | 1 + .../subgraph_7/graph_hash.txt | 1 + .../subgraph_7/graph_net.json | 6 + .../PP-YOLOE-S_human/subgraph_7/input_meta.py | 9 + .../PP-YOLOE-S_human/subgraph_7/model.py | 4278 ++++++++ .../subgraph_7/weight_meta.py | 4457 +++++++++ .../subgraph_8/graph_hash.txt | 1 + .../subgraph_8/graph_net.json | 6 + .../PP-YOLOE-S_human/subgraph_8/input_meta.py | 7 + .../PP-YOLOE-S_human/subgraph_8/model.py | 34 + .../subgraph_8/weight_meta.py | 1 + .../subgraph_9/graph_hash.txt | 1 + .../subgraph_9/graph_net.json | 6 + .../PP-YOLOE-S_human/subgraph_9/input_meta.py | 19 + .../PP-YOLOE-S_human/subgraph_9/model.py | 43 + .../subgraph_9/weight_meta.py | 1 + .../subgraph_0/graph_hash.txt | 1 + .../subgraph_0/graph_net.json | 6 + .../subgraph_0/input_meta.py | 28 + .../PP-YOLOE-S_vehicle/subgraph_0/model.py | 110 + .../subgraph_0/weight_meta.py | 1 + .../subgraph_1/graph_hash.txt | 1 + .../subgraph_1/graph_net.json | 6 + .../subgraph_1/input_meta.py | 76 + .../PP-YOLOE-S_vehicle/subgraph_1/model.py | 201 + .../subgraph_1/weight_meta.py | 1 + .../subgraph_10/graph_hash.txt | 1 + .../subgraph_10/graph_net.json | 6 + .../subgraph_10/input_meta.py | 49 + .../PP-YOLOE-S_vehicle/subgraph_10/model.py | 192 + .../subgraph_10/weight_meta.py | 7 + .../subgraph_11/graph_hash.txt | 1 + .../subgraph_11/graph_net.json | 6 + .../subgraph_11/input_meta.py | 91 + .../PP-YOLOE-S_vehicle/subgraph_11/model.py | 229 + .../subgraph_11/weight_meta.py | 1 + .../subgraph_12/graph_hash.txt | 1 + .../subgraph_12/graph_net.json | 6 + .../subgraph_12/input_meta.py | 83 + .../PP-YOLOE-S_vehicle/subgraph_12/model.py | 264 + .../subgraph_12/weight_meta.py | 1 + .../subgraph_13/graph_hash.txt | 1 + .../subgraph_13/graph_net.json | 6 + .../subgraph_13/input_meta.py | 73 + .../PP-YOLOE-S_vehicle/subgraph_13/model.py | 1144 +++ .../subgraph_13/weight_meta.py | 586 ++ .../subgraph_14/graph_hash.txt | 1 + .../subgraph_14/graph_net.json | 6 + .../subgraph_14/input_meta.py | 19 + .../PP-YOLOE-S_vehicle/subgraph_14/model.py | 43 + .../subgraph_14/weight_meta.py | 1 + .../subgraph_15/graph_hash.txt | 1 + .../subgraph_15/graph_net.json | 6 + .../subgraph_15/input_meta.py | 31 + .../PP-YOLOE-S_vehicle/subgraph_15/model.py | 1050 ++ .../subgraph_15/weight_meta.py | 574 ++ .../subgraph_16/graph_hash.txt | 1 + .../subgraph_16/graph_net.json | 6 + .../subgraph_16/input_meta.py | 52 + .../PP-YOLOE-S_vehicle/subgraph_16/model.py | 338 + .../subgraph_16/weight_meta.py | 1 + .../subgraph_2/graph_hash.txt | 1 + .../subgraph_2/graph_net.json | 6 + .../subgraph_2/input_meta.py | 9 + .../PP-YOLOE-S_vehicle/subgraph_2/model.py | 4273 ++++++++ .../subgraph_2/weight_meta.py | 4457 +++++++++ .../subgraph_3/graph_hash.txt | 1 + .../subgraph_3/graph_net.json | 6 + .../subgraph_3/input_meta.py | 38 + .../PP-YOLOE-S_vehicle/subgraph_3/model.py | 94 + .../subgraph_3/weight_meta.py | 1 + .../subgraph_4/graph_hash.txt | 1 + .../subgraph_4/graph_net.json | 6 + .../subgraph_4/input_meta.py | 59 + .../PP-YOLOE-S_vehicle/subgraph_4/model.py | 176 + .../subgraph_4/weight_meta.py | 1 + .../subgraph_5/graph_hash.txt | 1 + .../subgraph_5/graph_net.json | 6 + .../subgraph_5/input_meta.py | 9 + .../PP-YOLOE-S_vehicle/subgraph_5/model.py | 4048 ++++++++ .../subgraph_5/weight_meta.py | 3860 +++++++ .../subgraph_6/graph_hash.txt | 1 + .../subgraph_6/graph_net.json | 6 + .../subgraph_6/input_meta.py | 87 + .../PP-YOLOE-S_vehicle/subgraph_6/model.py | 385 + .../subgraph_6/weight_meta.py | 1 + .../subgraph_7/graph_hash.txt | 1 + .../subgraph_7/graph_net.json | 6 + .../subgraph_7/input_meta.py | 42 + .../PP-YOLOE-S_vehicle/subgraph_7/model.py | 162 + .../subgraph_7/weight_meta.py | 7 + .../subgraph_8/graph_hash.txt | 1 + .../subgraph_8/graph_net.json | 6 + .../subgraph_8/input_meta.py | 7 + .../PP-YOLOE-S_vehicle/subgraph_8/model.py | 34 + .../subgraph_8/weight_meta.py | 1 + .../subgraph_9/graph_hash.txt | 1 + .../subgraph_9/graph_net.json | 6 + .../subgraph_9/input_meta.py | 68 + .../PP-YOLOE-S_vehicle/subgraph_9/model.py | 509 + .../subgraph_9/weight_meta.py | 1 + .../PP-YOLOE_plus-L/subgraph_0/graph_hash.txt | 1 + .../PP-YOLOE_plus-L/subgraph_0/graph_net.json | 6 + .../PP-YOLOE_plus-L/subgraph_0/input_meta.py | 27 + .../PP-YOLOE_plus-L/subgraph_0/model.py | 110 + .../PP-YOLOE_plus-L/subgraph_0/weight_meta.py | 1 + .../PP-YOLOE_plus-L/subgraph_1/graph_hash.txt | 1 + .../PP-YOLOE_plus-L/subgraph_1/graph_net.json | 6 + .../PP-YOLOE_plus-L/subgraph_1/input_meta.py | 38 + .../PP-YOLOE_plus-L/subgraph_1/model.py | 131 + .../PP-YOLOE_plus-L/subgraph_1/weight_meta.py | 1 + .../subgraph_10/graph_hash.txt | 1 + .../subgraph_10/graph_net.json | 6 + .../PP-YOLOE_plus-L/subgraph_10/input_meta.py | 76 + .../PP-YOLOE_plus-L/subgraph_10/model.py | 287 + .../subgraph_10/weight_meta.py | 1 + .../subgraph_12/graph_hash.txt | 1 + .../subgraph_12/graph_net.json | 6 + .../PP-YOLOE_plus-L/subgraph_12/input_meta.py | 31 + .../PP-YOLOE_plus-L/subgraph_12/model.py | 158 + .../subgraph_12/weight_meta.py | 7 + .../subgraph_13/graph_hash.txt | 1 + .../subgraph_13/graph_net.json | 6 + .../PP-YOLOE_plus-L/subgraph_13/input_meta.py | 49 + .../PP-YOLOE_plus-L/subgraph_13/model.py | 263 + .../subgraph_13/weight_meta.py | 1 + .../subgraph_15/graph_hash.txt | 1 + .../subgraph_15/graph_net.json | 6 + .../PP-YOLOE_plus-L/subgraph_15/input_meta.py | 73 + .../PP-YOLOE_plus-L/subgraph_15/model.py | 1144 +++ .../subgraph_15/weight_meta.py | 586 ++ .../subgraph_16/graph_hash.txt | 1 + .../subgraph_16/graph_net.json | 6 + .../PP-YOLOE_plus-L/subgraph_16/input_meta.py | 134 + .../PP-YOLOE_plus-L/subgraph_16/model.py | 7504 ++++++++++++++ .../subgraph_16/weight_meta.py | 7564 ++++++++++++++ .../PP-YOLOE_plus-L/subgraph_2/graph_hash.txt | 1 + .../PP-YOLOE_plus-L/subgraph_2/graph_net.json | 6 + .../PP-YOLOE_plus-L/subgraph_2/input_meta.py | 19 + .../PP-YOLOE_plus-L/subgraph_2/model.py | 43 + .../PP-YOLOE_plus-L/subgraph_2/weight_meta.py | 1 + .../PP-YOLOE_plus-L/subgraph_3/graph_hash.txt | 1 + .../PP-YOLOE_plus-L/subgraph_3/graph_net.json | 6 + .../PP-YOLOE_plus-L/subgraph_3/input_meta.py | 67 + .../PP-YOLOE_plus-L/subgraph_3/model.py | 509 + .../PP-YOLOE_plus-L/subgraph_3/weight_meta.py | 1 + .../PP-YOLOE_plus-L/subgraph_5/graph_hash.txt | 1 + .../PP-YOLOE_plus-L/subgraph_5/graph_net.json | 6 + .../PP-YOLOE_plus-L/subgraph_5/input_meta.py | 134 + .../PP-YOLOE_plus-L/subgraph_5/model.py | 7279 ++++++++++++++ .../PP-YOLOE_plus-L/subgraph_5/weight_meta.py | 8161 +++++++++++++++ .../PP-YOLOE_plus-L/subgraph_6/graph_hash.txt | 1 + .../PP-YOLOE_plus-L/subgraph_6/graph_net.json | 6 + .../PP-YOLOE_plus-L/subgraph_6/input_meta.py | 85 + .../PP-YOLOE_plus-L/subgraph_6/model.py | 247 + .../PP-YOLOE_plus-L/subgraph_6/weight_meta.py | 1 + .../PP-YOLOE_plus-L/subgraph_7/graph_hash.txt | 1 + .../PP-YOLOE_plus-L/subgraph_7/graph_net.json | 6 + .../PP-YOLOE_plus-L/subgraph_7/input_meta.py | 38 + .../PP-YOLOE_plus-L/subgraph_7/model.py | 94 + .../PP-YOLOE_plus-L/subgraph_7/weight_meta.py | 1 + .../PP-YOLOE_plus-L/subgraph_8/graph_hash.txt | 1 + .../PP-YOLOE_plus-L/subgraph_8/graph_net.json | 6 + .../PP-YOLOE_plus-L/subgraph_8/input_meta.py | 7 + .../PP-YOLOE_plus-L/subgraph_8/model.py | 34 + .../PP-YOLOE_plus-L/subgraph_8/weight_meta.py | 1 + .../PP-YOLOE_plus-L/subgraph_9/graph_hash.txt | 1 + .../PP-YOLOE_plus-L/subgraph_9/graph_net.json | 6 + .../PP-YOLOE_plus-L/subgraph_9/input_meta.py | 31 + .../PP-YOLOE_plus-L/subgraph_9/model.py | 1050 ++ .../PP-YOLOE_plus-L/subgraph_9/weight_meta.py | 574 ++ .../PP-YOLOE_plus-M/subgraph_0/graph_hash.txt | 1 + .../PP-YOLOE_plus-M/subgraph_0/graph_net.json | 6 + .../PP-YOLOE_plus-M/subgraph_0/input_meta.py | 31 + .../PP-YOLOE_plus-M/subgraph_0/model.py | 1050 ++ .../PP-YOLOE_plus-M/subgraph_0/weight_meta.py | 574 ++ .../PP-YOLOE_plus-M/subgraph_1/graph_hash.txt | 1 + .../PP-YOLOE_plus-M/subgraph_1/graph_net.json | 6 + .../PP-YOLOE_plus-M/subgraph_1/input_meta.py | 92 + .../PP-YOLOE_plus-M/subgraph_1/model.py | 5794 +++++++++++ .../PP-YOLOE_plus-M/subgraph_1/weight_meta.py | 5773 +++++++++++ .../subgraph_10/graph_hash.txt | 1 + .../subgraph_10/graph_net.json | 6 + .../PP-YOLOE_plus-M/subgraph_10/input_meta.py | 38 + .../PP-YOLOE_plus-M/subgraph_10/model.py | 94 + .../subgraph_10/weight_meta.py | 1 + .../subgraph_11/graph_hash.txt | 1 + .../subgraph_11/graph_net.json | 6 + .../PP-YOLOE_plus-M/subgraph_11/input_meta.py | 76 + .../PP-YOLOE_plus-M/subgraph_11/model.py | 287 + .../subgraph_11/weight_meta.py | 1 + .../subgraph_13/graph_hash.txt | 1 + .../subgraph_13/graph_net.json | 6 + .../PP-YOLOE_plus-M/subgraph_13/input_meta.py | 19 + .../PP-YOLOE_plus-M/subgraph_13/model.py | 43 + .../subgraph_13/weight_meta.py | 1 + .../subgraph_14/graph_hash.txt | 1 + .../subgraph_14/graph_net.json | 6 + .../PP-YOLOE_plus-M/subgraph_14/input_meta.py | 85 + .../PP-YOLOE_plus-M/subgraph_14/model.py | 247 + .../subgraph_14/weight_meta.py | 1 + .../subgraph_15/graph_hash.txt | 1 + .../subgraph_15/graph_net.json | 6 + .../PP-YOLOE_plus-M/subgraph_15/input_meta.py | 92 + .../PP-YOLOE_plus-M/subgraph_15/model.py | 5806 +++++++++++ .../subgraph_15/weight_meta.py | 6370 ++++++++++++ .../subgraph_16/graph_hash.txt | 1 + .../subgraph_16/graph_net.json | 6 + .../PP-YOLOE_plus-M/subgraph_16/input_meta.py | 38 + .../PP-YOLOE_plus-M/subgraph_16/model.py | 131 + .../subgraph_16/weight_meta.py | 1 + .../subgraph_17/graph_hash.txt | 1 + .../subgraph_17/graph_net.json | 6 + .../PP-YOLOE_plus-M/subgraph_17/input_meta.py | 31 + .../PP-YOLOE_plus-M/subgraph_17/model.py | 158 + .../subgraph_17/weight_meta.py | 7 + .../PP-YOLOE_plus-M/subgraph_2/graph_hash.txt | 1 + .../PP-YOLOE_plus-M/subgraph_2/graph_net.json | 6 + .../PP-YOLOE_plus-M/subgraph_2/input_meta.py | 68 + .../PP-YOLOE_plus-M/subgraph_2/model.py | 509 + .../PP-YOLOE_plus-M/subgraph_2/weight_meta.py | 1 + .../PP-YOLOE_plus-M/subgraph_3/graph_hash.txt | 1 + .../PP-YOLOE_plus-M/subgraph_3/graph_net.json | 6 + .../PP-YOLOE_plus-M/subgraph_3/input_meta.py | 49 + .../PP-YOLOE_plus-M/subgraph_3/model.py | 263 + .../PP-YOLOE_plus-M/subgraph_3/weight_meta.py | 1 + .../PP-YOLOE_plus-M/subgraph_4/graph_hash.txt | 1 + .../PP-YOLOE_plus-M/subgraph_4/graph_net.json | 6 + .../PP-YOLOE_plus-M/subgraph_4/input_meta.py | 73 + .../PP-YOLOE_plus-M/subgraph_4/model.py | 1144 +++ .../PP-YOLOE_plus-M/subgraph_4/weight_meta.py | 586 ++ .../PP-YOLOE_plus-M/subgraph_6/graph_hash.txt | 1 + .../PP-YOLOE_plus-M/subgraph_6/graph_net.json | 6 + .../PP-YOLOE_plus-M/subgraph_6/input_meta.py | 28 + .../PP-YOLOE_plus-M/subgraph_6/model.py | 110 + .../PP-YOLOE_plus-M/subgraph_6/weight_meta.py | 1 + .../PP-YOLOE_plus-M/subgraph_7/graph_hash.txt | 1 + .../PP-YOLOE_plus-M/subgraph_7/graph_net.json | 6 + .../PP-YOLOE_plus-M/subgraph_7/input_meta.py | 38 + .../PP-YOLOE_plus-M/subgraph_7/model.py | 188 + .../PP-YOLOE_plus-M/subgraph_7/weight_meta.py | 7 + .../PP-YOLOE_plus-M/subgraph_9/graph_hash.txt | 1 + .../PP-YOLOE_plus-M/subgraph_9/graph_net.json | 6 + .../PP-YOLOE_plus-M/subgraph_9/input_meta.py | 7 + .../PP-YOLOE_plus-M/subgraph_9/model.py | 34 + .../PP-YOLOE_plus-M/subgraph_9/weight_meta.py | 1 + .../PP-YOLOE_plus-S/subgraph_0/graph_hash.txt | 1 + .../PP-YOLOE_plus-S/subgraph_0/graph_net.json | 6 + .../PP-YOLOE_plus-S/subgraph_0/input_meta.py | 33 + .../PP-YOLOE_plus-S/subgraph_0/model.py | 303 + .../PP-YOLOE_plus-S/subgraph_0/weight_meta.py | 1 + .../PP-YOLOE_plus-S/subgraph_1/graph_hash.txt | 1 + .../PP-YOLOE_plus-S/subgraph_1/graph_net.json | 6 + .../PP-YOLOE_plus-S/subgraph_1/input_meta.py | 7 + .../PP-YOLOE_plus-S/subgraph_1/model.py | 34 + .../PP-YOLOE_plus-S/subgraph_1/weight_meta.py | 1 + .../subgraph_10/graph_hash.txt | 1 + .../subgraph_10/graph_net.json | 6 + .../PP-YOLOE_plus-S/subgraph_10/input_meta.py | 98 + .../PP-YOLOE_plus-S/subgraph_10/model.py | 120 + .../subgraph_10/weight_meta.py | 1 + .../subgraph_11/graph_hash.txt | 1 + .../subgraph_11/graph_net.json | 6 + .../PP-YOLOE_plus-S/subgraph_11/input_meta.py | 19 + .../PP-YOLOE_plus-S/subgraph_11/model.py | 43 + .../subgraph_11/weight_meta.py | 1 + .../subgraph_12/graph_hash.txt | 1 + .../subgraph_12/graph_net.json | 6 + .../PP-YOLOE_plus-S/subgraph_12/input_meta.py | 76 + .../PP-YOLOE_plus-S/subgraph_12/model.py | 287 + .../subgraph_12/weight_meta.py | 1 + .../subgraph_13/graph_hash.txt | 1 + .../subgraph_13/graph_net.json | 6 + .../PP-YOLOE_plus-S/subgraph_13/input_meta.py | 38 + .../PP-YOLOE_plus-S/subgraph_13/model.py | 188 + .../subgraph_13/weight_meta.py | 7 + .../subgraph_14/graph_hash.txt | 1 + .../subgraph_14/graph_net.json | 6 + .../PP-YOLOE_plus-S/subgraph_14/input_meta.py | 38 + .../PP-YOLOE_plus-S/subgraph_14/model.py | 131 + .../subgraph_14/weight_meta.py | 1 + .../subgraph_15/graph_hash.txt | 1 + .../subgraph_15/graph_net.json | 6 + .../PP-YOLOE_plus-S/subgraph_15/input_meta.py | 49 + .../PP-YOLOE_plus-S/subgraph_15/model.py | 263 + .../subgraph_15/weight_meta.py | 1 + .../subgraph_16/graph_hash.txt | 1 + .../subgraph_16/graph_net.json | 6 + .../PP-YOLOE_plus-S/subgraph_16/input_meta.py | 85 + .../PP-YOLOE_plus-S/subgraph_16/model.py | 247 + .../subgraph_16/weight_meta.py | 1 + .../subgraph_17/graph_hash.txt | 1 + .../subgraph_17/graph_net.json | 6 + .../PP-YOLOE_plus-S/subgraph_17/input_meta.py | 38 + .../PP-YOLOE_plus-S/subgraph_17/model.py | 94 + .../subgraph_17/weight_meta.py | 1 + .../subgraph_18/graph_hash.txt | 1 + .../subgraph_18/graph_net.json | 6 + .../PP-YOLOE_plus-S/subgraph_18/input_meta.py | 87 + .../PP-YOLOE_plus-S/subgraph_18/model.py | 262 + .../subgraph_18/weight_meta.py | 1 + .../PP-YOLOE_plus-S/subgraph_2/graph_hash.txt | 1 + .../PP-YOLOE_plus-S/subgraph_2/graph_net.json | 6 + .../PP-YOLOE_plus-S/subgraph_2/input_meta.py | 51 + .../PP-YOLOE_plus-S/subgraph_2/model.py | 4084 ++++++++ .../PP-YOLOE_plus-S/subgraph_2/weight_meta.py | 3860 +++++++ .../PP-YOLOE_plus-S/subgraph_3/graph_hash.txt | 1 + .../PP-YOLOE_plus-S/subgraph_3/graph_net.json | 6 + .../PP-YOLOE_plus-S/subgraph_3/input_meta.py | 73 + .../PP-YOLOE_plus-S/subgraph_3/model.py | 1144 +++ .../PP-YOLOE_plus-S/subgraph_3/weight_meta.py | 574 ++ .../PP-YOLOE_plus-S/subgraph_4/graph_hash.txt | 1 + .../PP-YOLOE_plus-S/subgraph_4/graph_net.json | 6 + .../PP-YOLOE_plus-S/subgraph_4/input_meta.py | 28 + .../PP-YOLOE_plus-S/subgraph_4/model.py | 110 + .../PP-YOLOE_plus-S/subgraph_4/weight_meta.py | 1 + .../PP-YOLOE_plus-S/subgraph_5/graph_hash.txt | 1 + .../PP-YOLOE_plus-S/subgraph_5/graph_net.json | 6 + .../PP-YOLOE_plus-S/subgraph_5/input_meta.py | 31 + .../PP-YOLOE_plus-S/subgraph_5/model.py | 1050 ++ .../PP-YOLOE_plus-S/subgraph_5/weight_meta.py | 574 ++ .../PP-YOLOE_plus-S/subgraph_6/graph_hash.txt | 1 + .../PP-YOLOE_plus-S/subgraph_6/graph_net.json | 6 + .../PP-YOLOE_plus-S/subgraph_6/input_meta.py | 68 + .../PP-YOLOE_plus-S/subgraph_6/model.py | 509 + .../PP-YOLOE_plus-S/subgraph_6/weight_meta.py | 1 + .../PP-YOLOE_plus-S/subgraph_7/graph_hash.txt | 1 + .../PP-YOLOE_plus-S/subgraph_7/graph_net.json | 6 + .../PP-YOLOE_plus-S/subgraph_7/input_meta.py | 50 + .../PP-YOLOE_plus-S/subgraph_7/model.py | 4333 ++++++++ .../PP-YOLOE_plus-S/subgraph_7/weight_meta.py | 4457 +++++++++ .../PP-YOLOE_plus-S/subgraph_8/graph_hash.txt | 1 + .../PP-YOLOE_plus-S/subgraph_8/graph_net.json | 6 + .../PP-YOLOE_plus-S/subgraph_8/input_meta.py | 60 + .../PP-YOLOE_plus-S/subgraph_8/model.py | 310 + .../PP-YOLOE_plus-S/subgraph_8/weight_meta.py | 1 + .../PP-YOLOE_plus-S/subgraph_9/graph_hash.txt | 1 + .../PP-YOLOE_plus-S/subgraph_9/graph_net.json | 6 + .../PP-YOLOE_plus-S/subgraph_9/input_meta.py | 31 + .../PP-YOLOE_plus-S/subgraph_9/model.py | 158 + .../PP-YOLOE_plus-S/subgraph_9/weight_meta.py | 7 + .../subgraph_0/graph_hash.txt | 1 + .../subgraph_0/graph_net.json | 6 + .../subgraph_0/input_meta.py | 119 + .../PP-YOLOE_plus_SOD-L/subgraph_0/model.py | 4857 +++++++++ .../subgraph_0/weight_meta.py | 4013 ++++++++ .../subgraph_1/graph_hash.txt | 1 + .../subgraph_1/graph_net.json | 6 + .../subgraph_1/input_meta.py | 73 + .../PP-YOLOE_plus_SOD-L/subgraph_1/model.py | 1144 +++ .../subgraph_1/weight_meta.py | 580 ++ .../subgraph_10/graph_hash.txt | 1 + .../subgraph_10/graph_net.json | 6 + .../subgraph_10/input_meta.py | 38 + .../PP-YOLOE_plus_SOD-L/subgraph_10/model.py | 94 + .../subgraph_10/weight_meta.py | 1 + .../subgraph_11/graph_hash.txt | 1 + .../subgraph_11/graph_net.json | 6 + .../subgraph_11/input_meta.py | 64 + .../PP-YOLOE_plus_SOD-L/subgraph_11/model.py | 244 + .../subgraph_11/weight_meta.py | 1 + .../subgraph_12/graph_hash.txt | 1 + .../subgraph_12/graph_net.json | 6 + .../subgraph_12/input_meta.py | 222 + .../PP-YOLOE_plus_SOD-L/subgraph_12/model.py | 8874 +++++++++++++++++ .../subgraph_12/weight_meta.py | 8004 +++++++++++++++ .../subgraph_13/graph_hash.txt | 1 + .../subgraph_13/graph_net.json | 6 + .../subgraph_13/input_meta.py | 28 + .../PP-YOLOE_plus_SOD-L/subgraph_13/model.py | 110 + .../subgraph_13/weight_meta.py | 1 + .../subgraph_14/graph_hash.txt | 1 + .../subgraph_14/graph_net.json | 6 + .../subgraph_14/input_meta.py | 31 + .../PP-YOLOE_plus_SOD-L/subgraph_14/model.py | 1050 ++ .../subgraph_14/weight_meta.py | 580 ++ .../subgraph_18/graph_hash.txt | 1 + .../subgraph_18/graph_net.json | 6 + .../subgraph_18/input_meta.py | 19 + .../PP-YOLOE_plus_SOD-L/subgraph_18/model.py | 43 + .../subgraph_18/weight_meta.py | 1 + .../subgraph_2/graph_hash.txt | 1 + .../subgraph_2/graph_net.json | 6 + .../subgraph_2/input_meta.py | 84 + .../PP-YOLOE_plus_SOD-L/subgraph_2/model.py | 192 + .../subgraph_2/weight_meta.py | 1 + .../subgraph_3/graph_hash.txt | 1 + .../subgraph_3/graph_net.json | 6 + .../subgraph_3/input_meta.py | 42 + .../PP-YOLOE_plus_SOD-L/subgraph_3/model.py | 162 + .../subgraph_3/weight_meta.py | 7 + .../subgraph_4/graph_hash.txt | 1 + .../subgraph_4/graph_net.json | 6 + .../subgraph_4/input_meta.py | 134 + .../PP-YOLOE_plus_SOD-L/subgraph_4/model.py | 4040 ++++++++ .../subgraph_4/weight_meta.py | 3989 ++++++++ .../subgraph_5/graph_hash.txt | 1 + .../subgraph_5/graph_net.json | 6 + .../subgraph_5/input_meta.py | 71 + .../PP-YOLOE_plus_SOD-L/subgraph_5/model.py | 504 + .../subgraph_5/weight_meta.py | 1 + .../subgraph_6/graph_hash.txt | 1 + .../subgraph_6/graph_net.json | 6 + .../subgraph_6/input_meta.py | 67 + .../PP-YOLOE_plus_SOD-L/subgraph_6/model.py | 514 + .../subgraph_6/weight_meta.py | 1 + .../subgraph_8/graph_hash.txt | 1 + .../subgraph_8/graph_net.json | 6 + .../subgraph_8/input_meta.py | 233 + .../PP-YOLOE_plus_SOD-L/subgraph_8/model.py | 8235 +++++++++++++++ .../subgraph_8/weight_meta.py | 8595 ++++++++++++++++ .../subgraph_0/graph_hash.txt | 1 + .../subgraph_0/graph_net.json | 6 + .../subgraph_0/input_meta.py | 119 + .../PP-YOLOE_plus_SOD-S/subgraph_0/model.py | 3357 +++++++ .../subgraph_0/weight_meta.py | 2323 +++++ .../subgraph_1/graph_hash.txt | 1 + .../subgraph_1/graph_net.json | 6 + .../subgraph_1/input_meta.py | 89 + .../PP-YOLOE_plus_SOD-S/subgraph_1/model.py | 192 + .../subgraph_1/weight_meta.py | 1 + .../subgraph_10/graph_hash.txt | 1 + .../subgraph_10/graph_net.json | 6 + .../subgraph_10/input_meta.py | 50 + .../PP-YOLOE_plus_SOD-S/subgraph_10/model.py | 2120 ++++ .../subgraph_10/weight_meta.py | 1975 ++++ .../subgraph_11/graph_hash.txt | 1 + .../subgraph_11/graph_net.json | 6 + .../subgraph_11/input_meta.py | 28 + .../PP-YOLOE_plus_SOD-S/subgraph_11/model.py | 110 + .../subgraph_11/weight_meta.py | 1 + .../subgraph_12/graph_hash.txt | 1 + .../subgraph_12/graph_net.json | 6 + .../subgraph_12/input_meta.py | 67 + .../PP-YOLOE_plus_SOD-S/subgraph_12/model.py | 514 + .../subgraph_12/weight_meta.py | 1 + .../subgraph_13/graph_hash.txt | 1 + .../subgraph_13/graph_net.json | 6 + .../subgraph_13/input_meta.py | 19 + .../PP-YOLOE_plus_SOD-S/subgraph_13/model.py | 43 + .../subgraph_13/weight_meta.py | 1 + .../subgraph_14/graph_hash.txt | 1 + .../subgraph_14/graph_net.json | 6 + .../subgraph_14/input_meta.py | 31 + .../PP-YOLOE_plus_SOD-S/subgraph_14/model.py | 1050 ++ .../subgraph_14/weight_meta.py | 580 ++ .../subgraph_15/graph_hash.txt | 1 + .../subgraph_15/graph_net.json | 6 + .../subgraph_15/input_meta.py | 149 + .../PP-YOLOE_plus_SOD-S/subgraph_15/model.py | 5289 ++++++++++ .../subgraph_15/weight_meta.py | 4891 +++++++++ .../subgraph_16/graph_hash.txt | 1 + .../subgraph_16/graph_net.json | 6 + .../subgraph_16/input_meta.py | 64 + .../PP-YOLOE_plus_SOD-S/subgraph_16/model.py | 244 + .../subgraph_16/weight_meta.py | 1 + .../subgraph_17/graph_hash.txt | 1 + .../subgraph_17/graph_net.json | 6 + .../subgraph_17/input_meta.py | 38 + .../PP-YOLOE_plus_SOD-S/subgraph_17/model.py | 94 + .../subgraph_17/weight_meta.py | 1 + .../subgraph_18/graph_hash.txt | 1 + .../subgraph_18/graph_net.json | 6 + .../subgraph_18/input_meta.py | 138 + .../PP-YOLOE_plus_SOD-S/subgraph_18/model.py | 212 + .../subgraph_18/weight_meta.py | 1 + .../subgraph_2/graph_hash.txt | 1 + .../subgraph_2/graph_net.json | 6 + .../subgraph_2/input_meta.py | 42 + .../PP-YOLOE_plus_SOD-S/subgraph_2/model.py | 162 + .../subgraph_2/weight_meta.py | 7 + .../subgraph_3/graph_hash.txt | 1 + .../subgraph_3/graph_net.json | 6 + .../subgraph_3/input_meta.py | 138 + .../PP-YOLOE_plus_SOD-S/subgraph_3/model.py | 5454 ++++++++++ .../subgraph_3/weight_meta.py | 4300 ++++++++ .../subgraph_4/graph_hash.txt | 1 + .../subgraph_4/graph_net.json | 6 + .../subgraph_4/input_meta.py | 48 + .../PP-YOLOE_plus_SOD-S/subgraph_4/model.py | 192 + .../subgraph_4/weight_meta.py | 7 + .../subgraph_5/graph_hash.txt | 1 + .../subgraph_5/graph_net.json | 6 + .../subgraph_5/input_meta.py | 73 + .../PP-YOLOE_plus_SOD-S/subgraph_5/model.py | 1144 +++ .../subgraph_5/weight_meta.py | 580 ++ .../subgraph_6/graph_hash.txt | 1 + .../subgraph_6/graph_net.json | 6 + .../subgraph_6/input_meta.py | 78 + .../PP-YOLOE_plus_SOD-S/subgraph_6/model.py | 551 + .../subgraph_6/weight_meta.py | 1 + .../subgraph_7/graph_hash.txt | 1 + .../subgraph_7/graph_net.json | 6 + .../subgraph_7/input_meta.py | 78 + .../PP-YOLOE_plus_SOD-S/subgraph_7/model.py | 290 + .../subgraph_7/weight_meta.py | 1 + .../subgraph_8/graph_hash.txt | 1 + .../subgraph_8/graph_net.json | 6 + .../subgraph_8/input_meta.py | 71 + .../PP-YOLOE_plus_SOD-S/subgraph_8/model.py | 504 + .../subgraph_8/weight_meta.py | 1 + .../subgraph_9/graph_hash.txt | 1 + .../subgraph_9/graph_net.json | 6 + .../subgraph_9/input_meta.py | 7 + .../PP-YOLOE_plus_SOD-S/subgraph_9/model.py | 34 + .../subgraph_9/weight_meta.py | 1 + .../subgraph_0/graph_hash.txt | 1 + .../subgraph_0/graph_net.json | 6 + .../subgraph_0/input_meta.py | 19 + .../subgraph_0/model.py | 43 + .../subgraph_0/weight_meta.py | 1 + .../subgraph_1/graph_hash.txt | 1 + .../subgraph_1/graph_net.json | 6 + .../subgraph_1/input_meta.py | 73 + .../subgraph_1/model.py | 1144 +++ .../subgraph_1/weight_meta.py | 586 ++ .../subgraph_10/graph_hash.txt | 1 + .../subgraph_10/graph_net.json | 6 + .../subgraph_10/input_meta.py | 42 + .../subgraph_10/model.py | 162 + .../subgraph_10/weight_meta.py | 7 + .../subgraph_11/graph_hash.txt | 1 + .../subgraph_11/graph_net.json | 6 + .../subgraph_11/input_meta.py | 222 + .../subgraph_11/model.py | 8874 +++++++++++++++++ .../subgraph_11/weight_meta.py | 8004 +++++++++++++++ .../subgraph_12/graph_hash.txt | 1 + .../subgraph_12/graph_net.json | 6 + .../subgraph_12/input_meta.py | 102 + .../subgraph_12/model.py | 244 + .../subgraph_12/weight_meta.py | 1 + .../subgraph_13/graph_hash.txt | 1 + .../subgraph_13/graph_net.json | 6 + .../subgraph_13/input_meta.py | 68 + .../subgraph_13/model.py | 514 + .../subgraph_13/weight_meta.py | 1 + .../subgraph_14/graph_hash.txt | 1 + .../subgraph_14/graph_net.json | 6 + .../subgraph_14/input_meta.py | 145 + .../subgraph_14/model.py | 499 + .../subgraph_14/weight_meta.py | 1 + .../subgraph_2/graph_hash.txt | 1 + .../subgraph_2/graph_net.json | 6 + .../subgraph_2/input_meta.py | 119 + .../subgraph_2/model.py | 4857 +++++++++ .../subgraph_2/weight_meta.py | 4013 ++++++++ .../subgraph_3/graph_hash.txt | 1 + .../subgraph_3/graph_net.json | 6 + .../subgraph_3/input_meta.py | 31 + .../subgraph_3/model.py | 1050 ++ .../subgraph_3/weight_meta.py | 586 ++ .../subgraph_4/graph_hash.txt | 1 + .../subgraph_4/graph_net.json | 6 + .../subgraph_4/input_meta.py | 7 + .../subgraph_4/model.py | 34 + .../subgraph_4/weight_meta.py | 1 + .../subgraph_5/graph_hash.txt | 1 + .../subgraph_5/graph_net.json | 6 + .../subgraph_5/input_meta.py | 174 + .../subgraph_5/model.py | 546 + .../subgraph_5/weight_meta.py | 1 + .../subgraph_6/graph_hash.txt | 1 + .../subgraph_6/graph_net.json | 6 + .../subgraph_6/input_meta.py | 127 + .../subgraph_6/model.py | 284 + .../subgraph_6/weight_meta.py | 1 + .../subgraph_7/graph_hash.txt | 1 + .../subgraph_7/graph_net.json | 6 + .../subgraph_7/input_meta.py | 135 + .../subgraph_7/model.py | 4040 ++++++++ .../subgraph_7/weight_meta.py | 3989 ++++++++ .../subgraph_8/graph_hash.txt | 1 + .../subgraph_8/graph_net.json | 6 + .../subgraph_8/input_meta.py | 108 + .../subgraph_8/model.py | 192 + .../subgraph_8/weight_meta.py | 1 + .../subgraph_9/graph_hash.txt | 1 + .../subgraph_9/graph_net.json | 6 + .../subgraph_9/input_meta.py | 28 + .../subgraph_9/model.py | 110 + .../subgraph_9/weight_meta.py | 1 + .../TimesNet/subgraph_0/graph_hash.txt | 1 + .../TimesNet/subgraph_0/graph_net.json | 6 + .../PaddleX/TimesNet/subgraph_0/input_meta.py | 23 + .../PaddleX/TimesNet/subgraph_0/model.py | 331 + .../TimesNet/subgraph_0/weight_meta.py | 238 + .../TimesNet/subgraph_1/graph_hash.txt | 1 + .../TimesNet/subgraph_1/graph_net.json | 6 + .../PaddleX/TimesNet/subgraph_1/input_meta.py | 23 + .../PaddleX/TimesNet/subgraph_1/model.py | 394 + .../TimesNet/subgraph_1/weight_meta.py | 238 + .../TimesNet/subgraph_10/graph_hash.txt | 1 + .../TimesNet/subgraph_10/graph_net.json | 6 + .../TimesNet/subgraph_10/input_meta.py | 9 + .../PaddleX/TimesNet/subgraph_10/model.py | 238 + .../TimesNet/subgraph_10/weight_meta.py | 238 + .../subgraph_11}/graph_hash.txt | 0 .../TimesNet/subgraph_11/graph_net.json | 6 + .../TimesNet/subgraph_11/input_meta.py | 19 + .../PaddleX/TimesNet/subgraph_11/model.py | 54 + .../TimesNet/subgraph_11/weight_meta.py | 1 + .../TimesNet/subgraph_12/graph_hash.txt | 1 + .../TimesNet/subgraph_12/graph_net.json | 6 + .../TimesNet/subgraph_12/input_meta.py | 75 + .../PaddleX/TimesNet/subgraph_12/model.py | 65 + .../TimesNet/subgraph_12/weight_meta.py | 1 + .../TimesNet/subgraph_13/graph_hash.txt | 1 + .../TimesNet/subgraph_13/graph_net.json | 6 + .../TimesNet/subgraph_13/input_meta.py | 31 + .../PaddleX/TimesNet/subgraph_13/model.py | 203 + .../TimesNet/subgraph_13/weight_meta.py | 42 + .../TimesNet/subgraph_2/graph_hash.txt | 1 + .../TimesNet/subgraph_2/graph_net.json | 6 + .../PaddleX/TimesNet/subgraph_2/input_meta.py | 37 + .../PaddleX/TimesNet/subgraph_2/model.py | 401 + .../TimesNet/subgraph_2/weight_meta.py | 238 + .../TimesNet/subgraph_3/graph_hash.txt | 1 + .../TimesNet/subgraph_3/graph_net.json | 6 + .../PaddleX/TimesNet/subgraph_3/input_meta.py | 9 + .../PaddleX/TimesNet/subgraph_3/model.py | 109 + .../TimesNet/subgraph_3/weight_meta.py | 1 + .../TimesNet/subgraph_4/graph_hash.txt | 1 + .../TimesNet/subgraph_4/graph_net.json | 6 + .../PaddleX/TimesNet/subgraph_4/input_meta.py | 9 + .../PaddleX/TimesNet/subgraph_4/model.py | 268 + .../TimesNet/subgraph_4/weight_meta.py | 238 + .../TimesNet/subgraph_5/graph_hash.txt | 1 + .../TimesNet/subgraph_5/graph_net.json | 6 + .../PaddleX/TimesNet/subgraph_5/input_meta.py | 12 + .../PaddleX/TimesNet/subgraph_5/model.py | 37 + .../TimesNet/subgraph_5/weight_meta.py | 1 + .../TimesNet/subgraph_6/graph_hash.txt | 1 + .../TimesNet/subgraph_6/graph_net.json | 6 + .../PaddleX/TimesNet/subgraph_6/input_meta.py | 37 + .../PaddleX/TimesNet/subgraph_6/model.py | 368 + .../TimesNet/subgraph_6/weight_meta.py | 238 + .../TimesNet/subgraph_7/graph_hash.txt | 1 + .../TimesNet/subgraph_7/graph_net.json | 6 + .../PaddleX/TimesNet/subgraph_7/input_meta.py | 57 + .../PaddleX/TimesNet/subgraph_7/model.py | 62 + .../TimesNet/subgraph_7/weight_meta.py | 16 + .../TimesNet/subgraph_8/graph_hash.txt | 1 + .../TimesNet/subgraph_8/graph_net.json | 6 + .../PaddleX/TimesNet/subgraph_8/input_meta.py | 31 + .../PaddleX/TimesNet/subgraph_8/model.py | 210 + .../TimesNet/subgraph_8/weight_meta.py | 42 + .../TimesNet/subgraph_9/graph_hash.txt | 1 + .../TimesNet/subgraph_9/graph_net.json | 6 + .../PaddleX/TimesNet/subgraph_9/input_meta.py | 31 + .../PaddleX/TimesNet/subgraph_9/model.py | 219 + .../TimesNet/subgraph_9/weight_meta.py | 42 + .../TimesNet_ad/subgraph_0/graph_hash.txt | 1 + .../TimesNet_ad/subgraph_0/graph_net.json | 6 + .../TimesNet_ad/subgraph_0/input_meta.py | 30 + .../PaddleX/TimesNet_ad/subgraph_0/model.py | 303 + .../TimesNet_ad/subgraph_0/weight_meta.py | 238 + .../TimesNet_ad/subgraph_1/graph_hash.txt | 1 + .../TimesNet_ad/subgraph_1/graph_net.json | 6 + .../TimesNet_ad/subgraph_1/input_meta.py | 23 + .../PaddleX/TimesNet_ad/subgraph_1/model.py | 394 + .../TimesNet_ad/subgraph_1/weight_meta.py | 238 + .../TimesNet_ad/subgraph_10/graph_hash.txt | 1 + .../TimesNet_ad/subgraph_10/graph_net.json | 6 + .../TimesNet_ad/subgraph_10/input_meta.py | 12 + .../PaddleX/TimesNet_ad/subgraph_10/model.py | 37 + .../TimesNet_ad/subgraph_10/weight_meta.py | 1 + .../TimesNet_ad/subgraph_11/graph_hash.txt | 1 + .../TimesNet_ad/subgraph_11/graph_net.json | 6 + .../TimesNet_ad/subgraph_11/input_meta.py | 37 + .../PaddleX/TimesNet_ad/subgraph_11/model.py | 401 + .../TimesNet_ad/subgraph_11/weight_meta.py | 238 + .../TimesNet_ad/subgraph_12/graph_hash.txt | 1 + .../TimesNet_ad/subgraph_12/graph_net.json | 6 + .../subgraph_12}/input_meta.py | 4 +- .../subgraph_12}/model.py | 0 .../TimesNet_ad/subgraph_12/weight_meta.py | 1 + .../TimesNet_ad/subgraph_2/graph_hash.txt | 1 + .../TimesNet_ad/subgraph_2/graph_net.json | 6 + .../TimesNet_ad/subgraph_2/input_meta.py | 30 + .../PaddleX/TimesNet_ad/subgraph_2/model.py | 337 + .../TimesNet_ad/subgraph_2/weight_meta.py | 238 + .../TimesNet_ad/subgraph_3/graph_hash.txt | 1 + .../TimesNet_ad/subgraph_3/graph_net.json | 6 + .../TimesNet_ad/subgraph_3/input_meta.py | 9 + .../PaddleX/TimesNet_ad/subgraph_3/model.py | 238 + .../TimesNet_ad/subgraph_3/weight_meta.py | 238 + .../TimesNet_ad/subgraph_4/graph_hash.txt | 1 + .../TimesNet_ad/subgraph_4/graph_net.json | 6 + .../TimesNet_ad/subgraph_4/input_meta.py | 37 + .../PaddleX/TimesNet_ad/subgraph_4/model.py | 368 + .../TimesNet_ad/subgraph_4/weight_meta.py | 238 + .../TimesNet_ad/subgraph_5/graph_hash.txt | 1 + .../TimesNet_ad/subgraph_5/graph_net.json | 6 + .../TimesNet_ad/subgraph_5/input_meta.py | 49 + .../PaddleX/TimesNet_ad/subgraph_5/model.py | 65 + .../TimesNet_ad/subgraph_5/weight_meta.py | 1 + .../TimesNet_ad/subgraph_6/graph_hash.txt | 1 + .../TimesNet_ad/subgraph_6/graph_net.json | 6 + .../TimesNet_ad/subgraph_6/input_meta.py | 89 + .../PaddleX/TimesNet_ad/subgraph_6/model.py | 62 + .../TimesNet_ad/subgraph_6/weight_meta.py | 16 + .../TimesNet_ad/subgraph_7/graph_hash.txt | 1 + .../TimesNet_ad/subgraph_7/graph_net.json | 6 + .../TimesNet_ad/subgraph_7/input_meta.py | 9 + .../PaddleX/TimesNet_ad/subgraph_7/model.py | 109 + .../TimesNet_ad/subgraph_7/weight_meta.py | 1 + .../TimesNet_ad/subgraph_8/graph_hash.txt | 1 + .../TimesNet_ad/subgraph_8/graph_net.json | 6 + .../TimesNet_ad/subgraph_8/input_meta.py | 20 + .../PaddleX/TimesNet_ad/subgraph_8/model.py | 177 + .../TimesNet_ad/subgraph_8/weight_meta.py | 9 + .../TimesNet_ad/subgraph_9/graph_hash.txt | 1 + .../TimesNet_ad/subgraph_9/graph_net.json | 6 + .../TimesNet_ad/subgraph_9/input_meta.py | 20 + .../PaddleX/TimesNet_ad/subgraph_9/model.py | 180 + .../TimesNet_ad/subgraph_9/weight_meta.py | 9 + .../ch_SVTRv2_rec/subgraph_0/graph_hash.txt | 1 + .../ch_SVTRv2_rec/subgraph_0/graph_net.json | 6 + .../ch_SVTRv2_rec/subgraph_0/input_meta.py | 51 + .../PaddleX/ch_SVTRv2_rec/subgraph_0/model.py | 867 ++ .../ch_SVTRv2_rec/subgraph_0/weight_meta.py | 790 ++ .../ch_SVTRv2_rec/subgraph_1/graph_hash.txt | 1 + .../ch_SVTRv2_rec/subgraph_1/graph_net.json | 6 + .../ch_SVTRv2_rec/subgraph_1/input_meta.py | 65 + .../PaddleX/ch_SVTRv2_rec/subgraph_1/model.py | 889 ++ .../ch_SVTRv2_rec/subgraph_1/weight_meta.py | 790 ++ .../ch_SVTRv2_rec/subgraph_10/graph_hash.txt | 1 + .../ch_SVTRv2_rec/subgraph_10/graph_net.json | 6 + .../ch_SVTRv2_rec/subgraph_10/input_meta.py | 9 + .../ch_SVTRv2_rec/subgraph_10/model.py | 828 ++ .../ch_SVTRv2_rec/subgraph_10/weight_meta.py | 565 ++ .../ch_SVTRv2_rec/subgraph_11/graph_hash.txt | 1 + .../ch_SVTRv2_rec/subgraph_11/graph_net.json | 6 + .../ch_SVTRv2_rec/subgraph_11/input_meta.py | 30 + .../ch_SVTRv2_rec/subgraph_11/model.py | 1846 ++++ .../ch_SVTRv2_rec/subgraph_11/weight_meta.py | 790 ++ .../ch_SVTRv2_rec/subgraph_12/graph_hash.txt | 1 + .../ch_SVTRv2_rec/subgraph_12/graph_net.json | 6 + .../ch_SVTRv2_rec/subgraph_12/input_meta.py | 9 + .../ch_SVTRv2_rec/subgraph_12/model.py | 1332 +++ .../ch_SVTRv2_rec/subgraph_12/weight_meta.py | 702 ++ .../ch_SVTRv2_rec/subgraph_13/graph_hash.txt | 1 + .../ch_SVTRv2_rec/subgraph_13/graph_net.json | 6 + .../ch_SVTRv2_rec/subgraph_13/input_meta.py | 9 + .../ch_SVTRv2_rec/subgraph_13/model.py | 804 ++ .../ch_SVTRv2_rec/subgraph_13/weight_meta.py | 565 ++ .../ch_SVTRv2_rec/subgraph_14/graph_hash.txt | 1 + .../ch_SVTRv2_rec/subgraph_14/graph_net.json | 6 + .../ch_SVTRv2_rec/subgraph_14/input_meta.py | 36 + .../ch_SVTRv2_rec/subgraph_14/model.py | 1089 ++ .../ch_SVTRv2_rec/subgraph_14/weight_meta.py | 471 + .../ch_SVTRv2_rec/subgraph_15/graph_hash.txt | 1 + .../ch_SVTRv2_rec/subgraph_15/graph_net.json | 6 + .../ch_SVTRv2_rec/subgraph_15/input_meta.py | 9 + .../ch_SVTRv2_rec/subgraph_15/model.py | 666 ++ .../ch_SVTRv2_rec/subgraph_15/weight_meta.py | 565 ++ .../ch_SVTRv2_rec/subgraph_2/graph_hash.txt | 1 + .../ch_SVTRv2_rec/subgraph_2/graph_net.json | 6 + .../ch_SVTRv2_rec/subgraph_2/input_meta.py | 16 + .../PaddleX/ch_SVTRv2_rec/subgraph_2/model.py | 129 + .../ch_SVTRv2_rec/subgraph_2/weight_meta.py | 120 + .../ch_SVTRv2_rec/subgraph_3/graph_hash.txt | 1 + .../ch_SVTRv2_rec/subgraph_3/graph_net.json | 6 + .../ch_SVTRv2_rec/subgraph_3/input_meta.py | 9 + .../PaddleX/ch_SVTRv2_rec/subgraph_3/model.py | 5013 ++++++++++ .../ch_SVTRv2_rec/subgraph_3/weight_meta.py | 2408 +++++ .../ch_SVTRv2_rec/subgraph_4/graph_hash.txt | 1 + .../ch_SVTRv2_rec/subgraph_4/graph_net.json | 6 + .../ch_SVTRv2_rec/subgraph_4/input_meta.py | 23 + .../PaddleX/ch_SVTRv2_rec/subgraph_4/model.py | 736 ++ .../ch_SVTRv2_rec/subgraph_4/weight_meta.py | 702 ++ .../ch_SVTRv2_rec/subgraph_5/graph_hash.txt | 1 + .../ch_SVTRv2_rec/subgraph_5/graph_net.json | 6 + .../ch_SVTRv2_rec/subgraph_5/input_meta.py | 16 + .../PaddleX/ch_SVTRv2_rec/subgraph_5/model.py | 59 + .../ch_SVTRv2_rec/subgraph_5/weight_meta.py | 1 + .../ch_SVTRv2_rec/subgraph_6/graph_hash.txt | 1 + .../ch_SVTRv2_rec/subgraph_6/graph_net.json | 6 + .../ch_SVTRv2_rec/subgraph_6/input_meta.py | 23 + .../PaddleX/ch_SVTRv2_rec/subgraph_6/model.py | 769 ++ .../ch_SVTRv2_rec/subgraph_6/weight_meta.py | 702 ++ .../ch_SVTRv2_rec/subgraph_7/graph_hash.txt | 1 + .../ch_SVTRv2_rec/subgraph_7/graph_net.json | 6 + .../ch_SVTRv2_rec/subgraph_7/input_meta.py | 51 + .../PaddleX/ch_SVTRv2_rec/subgraph_7/model.py | 1918 ++++ .../ch_SVTRv2_rec/subgraph_7/weight_meta.py | 790 ++ .../ch_SVTRv2_rec/subgraph_8/graph_hash.txt | 1 + .../ch_SVTRv2_rec/subgraph_8/graph_net.json | 6 + .../ch_SVTRv2_rec/subgraph_8/input_meta.py | 36 + .../PaddleX/ch_SVTRv2_rec/subgraph_8/model.py | 1139 +++ .../ch_SVTRv2_rec/subgraph_8/weight_meta.py | 471 + .../ch_SVTRv2_rec/subgraph_9/graph_hash.txt | 1 + .../ch_SVTRv2_rec/subgraph_9/graph_net.json | 6 + .../ch_SVTRv2_rec/subgraph_9/input_meta.py | 65 + .../PaddleX/ch_SVTRv2_rec/subgraph_9/model.py | 907 ++ .../ch_SVTRv2_rec/subgraph_9/weight_meta.py | 790 ++ 1295 files changed, 479282 insertions(+), 3 deletions(-) create mode 100644 paddle_samples/PaddleX/Nonstationary/subgraph_0/graph_hash.txt create mode 100644 paddle_samples/PaddleX/Nonstationary/subgraph_0/graph_net.json create mode 100644 paddle_samples/PaddleX/Nonstationary/subgraph_0/input_meta.py create mode 100644 paddle_samples/PaddleX/Nonstationary/subgraph_0/model.py rename paddle_samples/PaddleX/{TimesNet_cls/subgraph_7 => Nonstationary/subgraph_0}/weight_meta.py (100%) create mode 100644 paddle_samples/PaddleX/Nonstationary/subgraph_1/graph_hash.txt create mode 100644 paddle_samples/PaddleX/Nonstationary/subgraph_1/graph_net.json create mode 100644 paddle_samples/PaddleX/Nonstationary/subgraph_1/input_meta.py create mode 100644 paddle_samples/PaddleX/Nonstationary/subgraph_1/model.py create mode 100644 paddle_samples/PaddleX/Nonstationary/subgraph_1/weight_meta.py create mode 100644 paddle_samples/PaddleX/Nonstationary/subgraph_10/graph_hash.txt create mode 100644 paddle_samples/PaddleX/Nonstationary/subgraph_10/graph_net.json create mode 100644 paddle_samples/PaddleX/Nonstationary/subgraph_10/input_meta.py create mode 100644 paddle_samples/PaddleX/Nonstationary/subgraph_10/model.py create mode 100644 paddle_samples/PaddleX/Nonstationary/subgraph_10/weight_meta.py create mode 100644 paddle_samples/PaddleX/Nonstationary/subgraph_11/graph_hash.txt create mode 100644 paddle_samples/PaddleX/Nonstationary/subgraph_11/graph_net.json create mode 100644 paddle_samples/PaddleX/Nonstationary/subgraph_11/input_meta.py create mode 100644 paddle_samples/PaddleX/Nonstationary/subgraph_11/model.py create mode 100644 paddle_samples/PaddleX/Nonstationary/subgraph_11/weight_meta.py create mode 100644 paddle_samples/PaddleX/Nonstationary/subgraph_2/graph_hash.txt create mode 100644 paddle_samples/PaddleX/Nonstationary/subgraph_2/graph_net.json create mode 100644 paddle_samples/PaddleX/Nonstationary/subgraph_2/input_meta.py create mode 100644 paddle_samples/PaddleX/Nonstationary/subgraph_2/model.py create mode 100644 paddle_samples/PaddleX/Nonstationary/subgraph_2/weight_meta.py create mode 100644 paddle_samples/PaddleX/Nonstationary/subgraph_3/graph_hash.txt create mode 100644 paddle_samples/PaddleX/Nonstationary/subgraph_3/graph_net.json create mode 100644 paddle_samples/PaddleX/Nonstationary/subgraph_3/input_meta.py create mode 100644 paddle_samples/PaddleX/Nonstationary/subgraph_3/model.py create mode 100644 paddle_samples/PaddleX/Nonstationary/subgraph_3/weight_meta.py create mode 100644 paddle_samples/PaddleX/Nonstationary/subgraph_4/graph_hash.txt create mode 100644 paddle_samples/PaddleX/Nonstationary/subgraph_4/graph_net.json create mode 100644 paddle_samples/PaddleX/Nonstationary/subgraph_4/input_meta.py create mode 100644 paddle_samples/PaddleX/Nonstationary/subgraph_4/model.py create mode 100644 paddle_samples/PaddleX/Nonstationary/subgraph_4/weight_meta.py create mode 100644 paddle_samples/PaddleX/Nonstationary/subgraph_5/graph_hash.txt create mode 100644 paddle_samples/PaddleX/Nonstationary/subgraph_5/graph_net.json create mode 100644 paddle_samples/PaddleX/Nonstationary/subgraph_5/input_meta.py create mode 100644 paddle_samples/PaddleX/Nonstationary/subgraph_5/model.py create mode 100644 paddle_samples/PaddleX/Nonstationary/subgraph_5/weight_meta.py create mode 100644 paddle_samples/PaddleX/Nonstationary/subgraph_6/graph_hash.txt create mode 100644 paddle_samples/PaddleX/Nonstationary/subgraph_6/graph_net.json create mode 100644 paddle_samples/PaddleX/Nonstationary/subgraph_6/input_meta.py create mode 100644 paddle_samples/PaddleX/Nonstationary/subgraph_6/model.py create mode 100644 paddle_samples/PaddleX/Nonstationary/subgraph_6/weight_meta.py create mode 100644 paddle_samples/PaddleX/Nonstationary/subgraph_7/graph_hash.txt create mode 100644 paddle_samples/PaddleX/Nonstationary/subgraph_7/graph_net.json create mode 100644 paddle_samples/PaddleX/Nonstationary/subgraph_7/input_meta.py create mode 100644 paddle_samples/PaddleX/Nonstationary/subgraph_7/model.py create mode 100644 paddle_samples/PaddleX/Nonstationary/subgraph_7/weight_meta.py create mode 100644 paddle_samples/PaddleX/Nonstationary/subgraph_8/graph_hash.txt create mode 100644 paddle_samples/PaddleX/Nonstationary/subgraph_8/graph_net.json create mode 100644 paddle_samples/PaddleX/Nonstationary/subgraph_8/input_meta.py create mode 100644 paddle_samples/PaddleX/Nonstationary/subgraph_8/model.py create mode 100644 paddle_samples/PaddleX/Nonstationary/subgraph_8/weight_meta.py create mode 100644 paddle_samples/PaddleX/Nonstationary/subgraph_9/graph_hash.txt create mode 100644 paddle_samples/PaddleX/Nonstationary/subgraph_9/graph_net.json create mode 100644 paddle_samples/PaddleX/Nonstationary/subgraph_9/input_meta.py create mode 100644 paddle_samples/PaddleX/Nonstationary/subgraph_9/model.py create mode 100644 paddle_samples/PaddleX/Nonstationary/subgraph_9/weight_meta.py create mode 100644 paddle_samples/PaddleX/PP-DocLayout-M/subgraph_0/graph_hash.txt create mode 100644 paddle_samples/PaddleX/PP-DocLayout-M/subgraph_0/graph_net.json create mode 100644 paddle_samples/PaddleX/PP-DocLayout-M/subgraph_0/input_meta.py create mode 100644 paddle_samples/PaddleX/PP-DocLayout-M/subgraph_0/model.py create mode 100644 paddle_samples/PaddleX/PP-DocLayout-M/subgraph_0/weight_meta.py create mode 100644 paddle_samples/PaddleX/PP-DocLayout-M/subgraph_1/graph_hash.txt create mode 100644 paddle_samples/PaddleX/PP-DocLayout-M/subgraph_1/graph_net.json create mode 100644 paddle_samples/PaddleX/PP-DocLayout-M/subgraph_1/input_meta.py create mode 100644 paddle_samples/PaddleX/PP-DocLayout-M/subgraph_1/model.py create mode 100644 paddle_samples/PaddleX/PP-DocLayout-M/subgraph_1/weight_meta.py create mode 100644 paddle_samples/PaddleX/PP-DocLayout-M/subgraph_10/graph_hash.txt create mode 100644 paddle_samples/PaddleX/PP-DocLayout-M/subgraph_10/graph_net.json create mode 100644 paddle_samples/PaddleX/PP-DocLayout-M/subgraph_10/input_meta.py create mode 100644 paddle_samples/PaddleX/PP-DocLayout-M/subgraph_10/model.py create mode 100644 paddle_samples/PaddleX/PP-DocLayout-M/subgraph_10/weight_meta.py create mode 100644 paddle_samples/PaddleX/PP-DocLayout-M/subgraph_11/graph_hash.txt create mode 100644 paddle_samples/PaddleX/PP-DocLayout-M/subgraph_11/graph_net.json create mode 100644 paddle_samples/PaddleX/PP-DocLayout-M/subgraph_11/input_meta.py create mode 100644 paddle_samples/PaddleX/PP-DocLayout-M/subgraph_11/model.py create mode 100644 paddle_samples/PaddleX/PP-DocLayout-M/subgraph_11/weight_meta.py create mode 100644 paddle_samples/PaddleX/PP-DocLayout-M/subgraph_12/graph_hash.txt create mode 100644 paddle_samples/PaddleX/PP-DocLayout-M/subgraph_12/graph_net.json create mode 100644 paddle_samples/PaddleX/PP-DocLayout-M/subgraph_12/input_meta.py create mode 100644 paddle_samples/PaddleX/PP-DocLayout-M/subgraph_12/model.py create mode 100644 paddle_samples/PaddleX/PP-DocLayout-M/subgraph_12/weight_meta.py create mode 100644 paddle_samples/PaddleX/PP-DocLayout-M/subgraph_13/graph_hash.txt create mode 100644 paddle_samples/PaddleX/PP-DocLayout-M/subgraph_13/graph_net.json create mode 100644 paddle_samples/PaddleX/PP-DocLayout-M/subgraph_13/input_meta.py create mode 100644 paddle_samples/PaddleX/PP-DocLayout-M/subgraph_13/model.py create mode 100644 paddle_samples/PaddleX/PP-DocLayout-M/subgraph_13/weight_meta.py create mode 100644 paddle_samples/PaddleX/PP-DocLayout-M/subgraph_14/graph_hash.txt create mode 100644 paddle_samples/PaddleX/PP-DocLayout-M/subgraph_14/graph_net.json create mode 100644 paddle_samples/PaddleX/PP-DocLayout-M/subgraph_14/input_meta.py create mode 100644 paddle_samples/PaddleX/PP-DocLayout-M/subgraph_14/model.py create mode 100644 paddle_samples/PaddleX/PP-DocLayout-M/subgraph_14/weight_meta.py create mode 100644 paddle_samples/PaddleX/PP-DocLayout-M/subgraph_15/graph_hash.txt create mode 100644 paddle_samples/PaddleX/PP-DocLayout-M/subgraph_15/graph_net.json create mode 100644 paddle_samples/PaddleX/PP-DocLayout-M/subgraph_15/input_meta.py create mode 100644 paddle_samples/PaddleX/PP-DocLayout-M/subgraph_15/model.py create mode 100644 paddle_samples/PaddleX/PP-DocLayout-M/subgraph_15/weight_meta.py create mode 100644 paddle_samples/PaddleX/PP-DocLayout-M/subgraph_16/graph_hash.txt create mode 100644 paddle_samples/PaddleX/PP-DocLayout-M/subgraph_16/graph_net.json create mode 100644 paddle_samples/PaddleX/PP-DocLayout-M/subgraph_16/input_meta.py create mode 100644 paddle_samples/PaddleX/PP-DocLayout-M/subgraph_16/model.py create mode 100644 paddle_samples/PaddleX/PP-DocLayout-M/subgraph_16/weight_meta.py create mode 100644 paddle_samples/PaddleX/PP-DocLayout-M/subgraph_17/graph_hash.txt create mode 100644 paddle_samples/PaddleX/PP-DocLayout-M/subgraph_17/graph_net.json create mode 100644 paddle_samples/PaddleX/PP-DocLayout-M/subgraph_17/input_meta.py create mode 100644 paddle_samples/PaddleX/PP-DocLayout-M/subgraph_17/model.py create mode 100644 paddle_samples/PaddleX/PP-DocLayout-M/subgraph_17/weight_meta.py create mode 100644 paddle_samples/PaddleX/PP-DocLayout-M/subgraph_2/graph_hash.txt create mode 100644 paddle_samples/PaddleX/PP-DocLayout-M/subgraph_2/graph_net.json create mode 100644 paddle_samples/PaddleX/PP-DocLayout-M/subgraph_2/input_meta.py create mode 100644 paddle_samples/PaddleX/PP-DocLayout-M/subgraph_2/model.py create mode 100644 paddle_samples/PaddleX/PP-DocLayout-M/subgraph_2/weight_meta.py create mode 100644 paddle_samples/PaddleX/PP-DocLayout-M/subgraph_3/graph_hash.txt create mode 100644 paddle_samples/PaddleX/PP-DocLayout-M/subgraph_3/graph_net.json create mode 100644 paddle_samples/PaddleX/PP-DocLayout-M/subgraph_3/input_meta.py create mode 100644 paddle_samples/PaddleX/PP-DocLayout-M/subgraph_3/model.py create mode 100644 paddle_samples/PaddleX/PP-DocLayout-M/subgraph_3/weight_meta.py create mode 100644 paddle_samples/PaddleX/PP-DocLayout-M/subgraph_4/graph_hash.txt create mode 100644 paddle_samples/PaddleX/PP-DocLayout-M/subgraph_4/graph_net.json create mode 100644 paddle_samples/PaddleX/PP-DocLayout-M/subgraph_4/input_meta.py create mode 100644 paddle_samples/PaddleX/PP-DocLayout-M/subgraph_4/model.py create mode 100644 paddle_samples/PaddleX/PP-DocLayout-M/subgraph_4/weight_meta.py create mode 100644 paddle_samples/PaddleX/PP-DocLayout-M/subgraph_5/graph_hash.txt create mode 100644 paddle_samples/PaddleX/PP-DocLayout-M/subgraph_5/graph_net.json create mode 100644 paddle_samples/PaddleX/PP-DocLayout-M/subgraph_5/input_meta.py create mode 100644 paddle_samples/PaddleX/PP-DocLayout-M/subgraph_5/model.py create mode 100644 paddle_samples/PaddleX/PP-DocLayout-M/subgraph_5/weight_meta.py create mode 100644 paddle_samples/PaddleX/PP-DocLayout-M/subgraph_6/graph_hash.txt create mode 100644 paddle_samples/PaddleX/PP-DocLayout-M/subgraph_6/graph_net.json create mode 100644 paddle_samples/PaddleX/PP-DocLayout-M/subgraph_6/input_meta.py create mode 100644 paddle_samples/PaddleX/PP-DocLayout-M/subgraph_6/model.py create mode 100644 paddle_samples/PaddleX/PP-DocLayout-M/subgraph_6/weight_meta.py create mode 100644 paddle_samples/PaddleX/PP-DocLayout-M/subgraph_7/graph_hash.txt create mode 100644 paddle_samples/PaddleX/PP-DocLayout-M/subgraph_7/graph_net.json create mode 100644 paddle_samples/PaddleX/PP-DocLayout-M/subgraph_7/input_meta.py create mode 100644 paddle_samples/PaddleX/PP-DocLayout-M/subgraph_7/model.py create mode 100644 paddle_samples/PaddleX/PP-DocLayout-M/subgraph_7/weight_meta.py create mode 100644 paddle_samples/PaddleX/PP-DocLayout-M/subgraph_8/graph_hash.txt create mode 100644 paddle_samples/PaddleX/PP-DocLayout-M/subgraph_8/graph_net.json create mode 100644 paddle_samples/PaddleX/PP-DocLayout-M/subgraph_8/input_meta.py create mode 100644 paddle_samples/PaddleX/PP-DocLayout-M/subgraph_8/model.py create mode 100644 paddle_samples/PaddleX/PP-DocLayout-M/subgraph_8/weight_meta.py create mode 100644 paddle_samples/PaddleX/PP-DocLayout-M/subgraph_9/graph_hash.txt create mode 100644 paddle_samples/PaddleX/PP-DocLayout-M/subgraph_9/graph_net.json create mode 100644 paddle_samples/PaddleX/PP-DocLayout-M/subgraph_9/input_meta.py create mode 100644 paddle_samples/PaddleX/PP-DocLayout-M/subgraph_9/model.py create mode 100644 paddle_samples/PaddleX/PP-DocLayout-M/subgraph_9/weight_meta.py create mode 100644 paddle_samples/PaddleX/PP-OCRv3_mobile_rec/subgraph_0/graph_hash.txt create mode 100644 paddle_samples/PaddleX/PP-OCRv3_mobile_rec/subgraph_0/graph_net.json create mode 100644 paddle_samples/PaddleX/PP-OCRv3_mobile_rec/subgraph_0/input_meta.py create mode 100644 paddle_samples/PaddleX/PP-OCRv3_mobile_rec/subgraph_0/model.py create mode 100644 paddle_samples/PaddleX/PP-OCRv3_mobile_rec/subgraph_0/weight_meta.py create mode 100644 paddle_samples/PaddleX/PP-OCRv3_mobile_rec/subgraph_1/graph_hash.txt create mode 100644 paddle_samples/PaddleX/PP-OCRv3_mobile_rec/subgraph_1/graph_net.json create mode 100644 paddle_samples/PaddleX/PP-OCRv3_mobile_rec/subgraph_1/input_meta.py create mode 100644 paddle_samples/PaddleX/PP-OCRv3_mobile_rec/subgraph_1/model.py create mode 100644 paddle_samples/PaddleX/PP-OCRv3_mobile_rec/subgraph_1/weight_meta.py create mode 100644 paddle_samples/PaddleX/PP-OCRv3_mobile_rec/subgraph_10/graph_hash.txt create mode 100644 paddle_samples/PaddleX/PP-OCRv3_mobile_rec/subgraph_10/graph_net.json create mode 100644 paddle_samples/PaddleX/PP-OCRv3_mobile_rec/subgraph_10/input_meta.py create mode 100644 paddle_samples/PaddleX/PP-OCRv3_mobile_rec/subgraph_10/model.py create mode 100644 paddle_samples/PaddleX/PP-OCRv3_mobile_rec/subgraph_10/weight_meta.py create mode 100644 paddle_samples/PaddleX/PP-OCRv3_mobile_rec/subgraph_2/graph_hash.txt create mode 100644 paddle_samples/PaddleX/PP-OCRv3_mobile_rec/subgraph_2/graph_net.json create mode 100644 paddle_samples/PaddleX/PP-OCRv3_mobile_rec/subgraph_2/input_meta.py create mode 100644 paddle_samples/PaddleX/PP-OCRv3_mobile_rec/subgraph_2/model.py create mode 100644 paddle_samples/PaddleX/PP-OCRv3_mobile_rec/subgraph_2/weight_meta.py create mode 100644 paddle_samples/PaddleX/PP-OCRv3_mobile_rec/subgraph_3/graph_hash.txt create mode 100644 paddle_samples/PaddleX/PP-OCRv3_mobile_rec/subgraph_3/graph_net.json create mode 100644 paddle_samples/PaddleX/PP-OCRv3_mobile_rec/subgraph_3/input_meta.py create mode 100644 paddle_samples/PaddleX/PP-OCRv3_mobile_rec/subgraph_3/model.py create mode 100644 paddle_samples/PaddleX/PP-OCRv3_mobile_rec/subgraph_3/weight_meta.py create mode 100644 paddle_samples/PaddleX/PP-OCRv3_mobile_rec/subgraph_4/graph_hash.txt create mode 100644 paddle_samples/PaddleX/PP-OCRv3_mobile_rec/subgraph_4/graph_net.json create mode 100644 paddle_samples/PaddleX/PP-OCRv3_mobile_rec/subgraph_4/input_meta.py create mode 100644 paddle_samples/PaddleX/PP-OCRv3_mobile_rec/subgraph_4/model.py create mode 100644 paddle_samples/PaddleX/PP-OCRv3_mobile_rec/subgraph_4/weight_meta.py create mode 100644 paddle_samples/PaddleX/PP-OCRv3_mobile_rec/subgraph_5/graph_hash.txt create mode 100644 paddle_samples/PaddleX/PP-OCRv3_mobile_rec/subgraph_5/graph_net.json create mode 100644 paddle_samples/PaddleX/PP-OCRv3_mobile_rec/subgraph_5/input_meta.py create mode 100644 paddle_samples/PaddleX/PP-OCRv3_mobile_rec/subgraph_5/model.py create mode 100644 paddle_samples/PaddleX/PP-OCRv3_mobile_rec/subgraph_5/weight_meta.py create mode 100644 paddle_samples/PaddleX/PP-OCRv3_mobile_rec/subgraph_6/graph_hash.txt create mode 100644 paddle_samples/PaddleX/PP-OCRv3_mobile_rec/subgraph_6/graph_net.json create mode 100644 paddle_samples/PaddleX/PP-OCRv3_mobile_rec/subgraph_6/input_meta.py create mode 100644 paddle_samples/PaddleX/PP-OCRv3_mobile_rec/subgraph_6/model.py create mode 100644 paddle_samples/PaddleX/PP-OCRv3_mobile_rec/subgraph_6/weight_meta.py create mode 100644 paddle_samples/PaddleX/PP-OCRv3_mobile_rec/subgraph_7/graph_hash.txt create mode 100644 paddle_samples/PaddleX/PP-OCRv3_mobile_rec/subgraph_7/graph_net.json create mode 100644 paddle_samples/PaddleX/PP-OCRv3_mobile_rec/subgraph_7/input_meta.py create mode 100644 paddle_samples/PaddleX/PP-OCRv3_mobile_rec/subgraph_7/model.py create mode 100644 paddle_samples/PaddleX/PP-OCRv3_mobile_rec/subgraph_7/weight_meta.py create mode 100644 paddle_samples/PaddleX/PP-OCRv3_mobile_rec/subgraph_8/graph_hash.txt create mode 100644 paddle_samples/PaddleX/PP-OCRv3_mobile_rec/subgraph_8/graph_net.json create mode 100644 paddle_samples/PaddleX/PP-OCRv3_mobile_rec/subgraph_8/input_meta.py create mode 100644 paddle_samples/PaddleX/PP-OCRv3_mobile_rec/subgraph_8/model.py create mode 100644 paddle_samples/PaddleX/PP-OCRv3_mobile_rec/subgraph_8/weight_meta.py create mode 100644 paddle_samples/PaddleX/PP-OCRv3_mobile_rec/subgraph_9/graph_hash.txt create mode 100644 paddle_samples/PaddleX/PP-OCRv3_mobile_rec/subgraph_9/graph_net.json create mode 100644 paddle_samples/PaddleX/PP-OCRv3_mobile_rec/subgraph_9/input_meta.py create mode 100644 paddle_samples/PaddleX/PP-OCRv3_mobile_rec/subgraph_9/model.py create mode 100644 paddle_samples/PaddleX/PP-OCRv3_mobile_rec/subgraph_9/weight_meta.py create mode 100644 paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_0/graph_hash.txt create mode 100644 paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_0/graph_net.json create mode 100644 paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_0/input_meta.py create mode 100644 paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_0/model.py create mode 100644 paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_0/weight_meta.py create mode 100644 paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_1/graph_hash.txt create mode 100644 paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_1/graph_net.json create mode 100644 paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_1/input_meta.py create mode 100644 paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_1/model.py create mode 100644 paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_1/weight_meta.py create mode 100644 paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_10/graph_hash.txt create mode 100644 paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_10/graph_net.json create mode 100644 paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_10/input_meta.py create mode 100644 paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_10/model.py create mode 100644 paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_10/weight_meta.py create mode 100644 paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_11/graph_hash.txt create mode 100644 paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_11/graph_net.json create mode 100644 paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_11/input_meta.py create mode 100644 paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_11/model.py create mode 100644 paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_11/weight_meta.py create mode 100644 paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_12/graph_hash.txt create mode 100644 paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_12/graph_net.json create mode 100644 paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_12/input_meta.py create mode 100644 paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_12/model.py create mode 100644 paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_12/weight_meta.py create mode 100644 paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_13/graph_hash.txt create mode 100644 paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_13/graph_net.json create mode 100644 paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_13/input_meta.py create mode 100644 paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_13/model.py create mode 100644 paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_13/weight_meta.py create mode 100644 paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_14/graph_hash.txt create mode 100644 paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_14/graph_net.json create mode 100644 paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_14/input_meta.py create mode 100644 paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_14/model.py create mode 100644 paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_14/weight_meta.py create mode 100644 paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_15/graph_hash.txt create mode 100644 paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_15/graph_net.json create mode 100644 paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_15/input_meta.py create mode 100644 paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_15/model.py create mode 100644 paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_15/weight_meta.py create mode 100644 paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_16/graph_hash.txt create mode 100644 paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_16/graph_net.json create mode 100644 paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_16/input_meta.py create mode 100644 paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_16/model.py create mode 100644 paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_16/weight_meta.py create mode 100644 paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_2/graph_hash.txt create mode 100644 paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_2/graph_net.json create mode 100644 paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_2/input_meta.py create mode 100644 paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_2/model.py create mode 100644 paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_2/weight_meta.py create mode 100644 paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_3/graph_hash.txt create mode 100644 paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_3/graph_net.json create mode 100644 paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_3/input_meta.py create mode 100644 paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_3/model.py create mode 100644 paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_3/weight_meta.py create mode 100644 paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_4/graph_hash.txt create mode 100644 paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_4/graph_net.json create mode 100644 paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_4/input_meta.py create mode 100644 paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_4/model.py create mode 100644 paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_4/weight_meta.py create mode 100644 paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_5/graph_hash.txt create mode 100644 paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_5/graph_net.json create mode 100644 paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_5/input_meta.py create mode 100644 paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_5/model.py create mode 100644 paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_5/weight_meta.py create mode 100644 paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_6/graph_hash.txt create mode 100644 paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_6/graph_net.json create mode 100644 paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_6/input_meta.py create mode 100644 paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_6/model.py create mode 100644 paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_6/weight_meta.py create mode 100644 paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_7/graph_hash.txt create mode 100644 paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_7/graph_net.json create mode 100644 paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_7/input_meta.py create mode 100644 paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_7/model.py create mode 100644 paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_7/weight_meta.py create mode 100644 paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_8/graph_hash.txt create mode 100644 paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_8/graph_net.json create mode 100644 paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_8/input_meta.py create mode 100644 paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_8/model.py create mode 100644 paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_8/weight_meta.py create mode 100644 paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_9/graph_hash.txt create mode 100644 paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_9/graph_net.json create mode 100644 paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_9/input_meta.py create mode 100644 paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_9/model.py create mode 100644 paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_9/weight_meta.py create mode 100644 paddle_samples/PaddleX/PP-YOLOE-L_vehicle/subgraph_0/graph_hash.txt create mode 100644 paddle_samples/PaddleX/PP-YOLOE-L_vehicle/subgraph_0/graph_net.json create mode 100644 paddle_samples/PaddleX/PP-YOLOE-L_vehicle/subgraph_0/input_meta.py create mode 100644 paddle_samples/PaddleX/PP-YOLOE-L_vehicle/subgraph_0/model.py create mode 100644 paddle_samples/PaddleX/PP-YOLOE-L_vehicle/subgraph_0/weight_meta.py create mode 100644 paddle_samples/PaddleX/PP-YOLOE-L_vehicle/subgraph_11/graph_hash.txt create mode 100644 paddle_samples/PaddleX/PP-YOLOE-L_vehicle/subgraph_11/graph_net.json create mode 100644 paddle_samples/PaddleX/PP-YOLOE-L_vehicle/subgraph_11/input_meta.py create mode 100644 paddle_samples/PaddleX/PP-YOLOE-L_vehicle/subgraph_11/model.py create mode 100644 paddle_samples/PaddleX/PP-YOLOE-L_vehicle/subgraph_11/weight_meta.py create mode 100644 paddle_samples/PaddleX/PP-YOLOE-L_vehicle/subgraph_13/graph_hash.txt create mode 100644 paddle_samples/PaddleX/PP-YOLOE-L_vehicle/subgraph_13/graph_net.json create mode 100644 paddle_samples/PaddleX/PP-YOLOE-L_vehicle/subgraph_13/input_meta.py create mode 100644 paddle_samples/PaddleX/PP-YOLOE-L_vehicle/subgraph_13/model.py create mode 100644 paddle_samples/PaddleX/PP-YOLOE-L_vehicle/subgraph_13/weight_meta.py create mode 100644 paddle_samples/PaddleX/PP-YOLOE-L_vehicle/subgraph_14/graph_hash.txt create mode 100644 paddle_samples/PaddleX/PP-YOLOE-L_vehicle/subgraph_14/graph_net.json create mode 100644 paddle_samples/PaddleX/PP-YOLOE-L_vehicle/subgraph_14/input_meta.py create mode 100644 paddle_samples/PaddleX/PP-YOLOE-L_vehicle/subgraph_14/model.py create mode 100644 paddle_samples/PaddleX/PP-YOLOE-L_vehicle/subgraph_14/weight_meta.py create mode 100644 paddle_samples/PaddleX/PP-YOLOE-L_vehicle/subgraph_15/graph_hash.txt create mode 100644 paddle_samples/PaddleX/PP-YOLOE-L_vehicle/subgraph_15/graph_net.json create mode 100644 paddle_samples/PaddleX/PP-YOLOE-L_vehicle/subgraph_15/input_meta.py create mode 100644 paddle_samples/PaddleX/PP-YOLOE-L_vehicle/subgraph_15/model.py create mode 100644 paddle_samples/PaddleX/PP-YOLOE-L_vehicle/subgraph_15/weight_meta.py create mode 100644 paddle_samples/PaddleX/PP-YOLOE-L_vehicle/subgraph_16/graph_hash.txt create mode 100644 paddle_samples/PaddleX/PP-YOLOE-L_vehicle/subgraph_16/graph_net.json create mode 100644 paddle_samples/PaddleX/PP-YOLOE-L_vehicle/subgraph_16/input_meta.py create mode 100644 paddle_samples/PaddleX/PP-YOLOE-L_vehicle/subgraph_16/model.py create mode 100644 paddle_samples/PaddleX/PP-YOLOE-L_vehicle/subgraph_16/weight_meta.py create mode 100644 paddle_samples/PaddleX/PP-YOLOE-L_vehicle/subgraph_17/graph_hash.txt create mode 100644 paddle_samples/PaddleX/PP-YOLOE-L_vehicle/subgraph_17/graph_net.json create mode 100644 paddle_samples/PaddleX/PP-YOLOE-L_vehicle/subgraph_17/input_meta.py create mode 100644 paddle_samples/PaddleX/PP-YOLOE-L_vehicle/subgraph_17/model.py create mode 100644 paddle_samples/PaddleX/PP-YOLOE-L_vehicle/subgraph_17/weight_meta.py create mode 100644 paddle_samples/PaddleX/PP-YOLOE-L_vehicle/subgraph_2/graph_hash.txt create mode 100644 paddle_samples/PaddleX/PP-YOLOE-L_vehicle/subgraph_2/graph_net.json create mode 100644 paddle_samples/PaddleX/PP-YOLOE-L_vehicle/subgraph_2/input_meta.py create mode 100644 paddle_samples/PaddleX/PP-YOLOE-L_vehicle/subgraph_2/model.py create mode 100644 paddle_samples/PaddleX/PP-YOLOE-L_vehicle/subgraph_2/weight_meta.py create mode 100644 paddle_samples/PaddleX/PP-YOLOE-L_vehicle/subgraph_3/graph_hash.txt create mode 100644 paddle_samples/PaddleX/PP-YOLOE-L_vehicle/subgraph_3/graph_net.json create mode 100644 paddle_samples/PaddleX/PP-YOLOE-L_vehicle/subgraph_3/input_meta.py create mode 100644 paddle_samples/PaddleX/PP-YOLOE-L_vehicle/subgraph_3/model.py create mode 100644 paddle_samples/PaddleX/PP-YOLOE-L_vehicle/subgraph_3/weight_meta.py create mode 100644 paddle_samples/PaddleX/PP-YOLOE-L_vehicle/subgraph_5/graph_hash.txt create mode 100644 paddle_samples/PaddleX/PP-YOLOE-L_vehicle/subgraph_5/graph_net.json create mode 100644 paddle_samples/PaddleX/PP-YOLOE-L_vehicle/subgraph_5/input_meta.py create mode 100644 paddle_samples/PaddleX/PP-YOLOE-L_vehicle/subgraph_5/model.py create mode 100644 paddle_samples/PaddleX/PP-YOLOE-L_vehicle/subgraph_5/weight_meta.py create mode 100644 paddle_samples/PaddleX/PP-YOLOE-L_vehicle/subgraph_7/graph_hash.txt create mode 100644 paddle_samples/PaddleX/PP-YOLOE-L_vehicle/subgraph_7/graph_net.json create mode 100644 paddle_samples/PaddleX/PP-YOLOE-L_vehicle/subgraph_7/input_meta.py create mode 100644 paddle_samples/PaddleX/PP-YOLOE-L_vehicle/subgraph_7/model.py create mode 100644 paddle_samples/PaddleX/PP-YOLOE-L_vehicle/subgraph_7/weight_meta.py create mode 100644 paddle_samples/PaddleX/PP-YOLOE-L_vehicle/subgraph_8/graph_hash.txt create mode 100644 paddle_samples/PaddleX/PP-YOLOE-L_vehicle/subgraph_8/graph_net.json create mode 100644 paddle_samples/PaddleX/PP-YOLOE-L_vehicle/subgraph_8/input_meta.py create mode 100644 paddle_samples/PaddleX/PP-YOLOE-L_vehicle/subgraph_8/model.py create mode 100644 paddle_samples/PaddleX/PP-YOLOE-L_vehicle/subgraph_8/weight_meta.py create mode 100644 paddle_samples/PaddleX/PP-YOLOE-R-L/subgraph_0/graph_hash.txt rename paddle_samples/PaddleX/{TimesNet_cls/subgraph_7 => PP-YOLOE-R-L/subgraph_0}/graph_net.json (72%) create mode 100644 paddle_samples/PaddleX/PP-YOLOE-R-L/subgraph_0/input_meta.py create mode 100644 paddle_samples/PaddleX/PP-YOLOE-R-L/subgraph_0/model.py create mode 100644 paddle_samples/PaddleX/PP-YOLOE-R-L/subgraph_0/weight_meta.py create mode 100644 paddle_samples/PaddleX/PP-YOLOE-R-L/subgraph_1/graph_hash.txt create mode 100644 paddle_samples/PaddleX/PP-YOLOE-R-L/subgraph_1/graph_net.json create mode 100644 paddle_samples/PaddleX/PP-YOLOE-R-L/subgraph_1/input_meta.py create mode 100644 paddle_samples/PaddleX/PP-YOLOE-R-L/subgraph_1/model.py create mode 100644 paddle_samples/PaddleX/PP-YOLOE-R-L/subgraph_1/weight_meta.py create mode 100644 paddle_samples/PaddleX/PP-YOLOE-R-L/subgraph_10/graph_hash.txt create mode 100644 paddle_samples/PaddleX/PP-YOLOE-R-L/subgraph_10/graph_net.json create mode 100644 paddle_samples/PaddleX/PP-YOLOE-R-L/subgraph_10/input_meta.py create mode 100644 paddle_samples/PaddleX/PP-YOLOE-R-L/subgraph_10/model.py create mode 100644 paddle_samples/PaddleX/PP-YOLOE-R-L/subgraph_10/weight_meta.py create mode 100644 paddle_samples/PaddleX/PP-YOLOE-R-L/subgraph_11/graph_hash.txt create mode 100644 paddle_samples/PaddleX/PP-YOLOE-R-L/subgraph_11/graph_net.json create mode 100644 paddle_samples/PaddleX/PP-YOLOE-R-L/subgraph_11/input_meta.py create mode 100644 paddle_samples/PaddleX/PP-YOLOE-R-L/subgraph_11/model.py create mode 100644 paddle_samples/PaddleX/PP-YOLOE-R-L/subgraph_11/weight_meta.py create mode 100644 paddle_samples/PaddleX/PP-YOLOE-R-L/subgraph_12/graph_hash.txt create mode 100644 paddle_samples/PaddleX/PP-YOLOE-R-L/subgraph_12/graph_net.json create mode 100644 paddle_samples/PaddleX/PP-YOLOE-R-L/subgraph_12/input_meta.py create mode 100644 paddle_samples/PaddleX/PP-YOLOE-R-L/subgraph_12/model.py create mode 100644 paddle_samples/PaddleX/PP-YOLOE-R-L/subgraph_12/weight_meta.py create mode 100644 paddle_samples/PaddleX/PP-YOLOE-R-L/subgraph_13/graph_hash.txt create mode 100644 paddle_samples/PaddleX/PP-YOLOE-R-L/subgraph_13/graph_net.json create mode 100644 paddle_samples/PaddleX/PP-YOLOE-R-L/subgraph_13/input_meta.py create mode 100644 paddle_samples/PaddleX/PP-YOLOE-R-L/subgraph_13/model.py create mode 100644 paddle_samples/PaddleX/PP-YOLOE-R-L/subgraph_13/weight_meta.py create mode 100644 paddle_samples/PaddleX/PP-YOLOE-R-L/subgraph_14/graph_hash.txt create mode 100644 paddle_samples/PaddleX/PP-YOLOE-R-L/subgraph_14/graph_net.json create mode 100644 paddle_samples/PaddleX/PP-YOLOE-R-L/subgraph_14/input_meta.py create mode 100644 paddle_samples/PaddleX/PP-YOLOE-R-L/subgraph_14/model.py create mode 100644 paddle_samples/PaddleX/PP-YOLOE-R-L/subgraph_14/weight_meta.py create mode 100644 paddle_samples/PaddleX/PP-YOLOE-R-L/subgraph_15/graph_hash.txt create mode 100644 paddle_samples/PaddleX/PP-YOLOE-R-L/subgraph_15/graph_net.json create mode 100644 paddle_samples/PaddleX/PP-YOLOE-R-L/subgraph_15/input_meta.py create mode 100644 paddle_samples/PaddleX/PP-YOLOE-R-L/subgraph_15/model.py create mode 100644 paddle_samples/PaddleX/PP-YOLOE-R-L/subgraph_15/weight_meta.py create mode 100644 paddle_samples/PaddleX/PP-YOLOE-R-L/subgraph_2/graph_hash.txt create mode 100644 paddle_samples/PaddleX/PP-YOLOE-R-L/subgraph_2/graph_net.json create mode 100644 paddle_samples/PaddleX/PP-YOLOE-R-L/subgraph_2/input_meta.py create mode 100644 paddle_samples/PaddleX/PP-YOLOE-R-L/subgraph_2/model.py create mode 100644 paddle_samples/PaddleX/PP-YOLOE-R-L/subgraph_2/weight_meta.py create mode 100644 paddle_samples/PaddleX/PP-YOLOE-R-L/subgraph_3/graph_hash.txt create mode 100644 paddle_samples/PaddleX/PP-YOLOE-R-L/subgraph_3/graph_net.json create mode 100644 paddle_samples/PaddleX/PP-YOLOE-R-L/subgraph_3/input_meta.py create mode 100644 paddle_samples/PaddleX/PP-YOLOE-R-L/subgraph_3/model.py create mode 100644 paddle_samples/PaddleX/PP-YOLOE-R-L/subgraph_3/weight_meta.py create mode 100644 paddle_samples/PaddleX/PP-YOLOE-R-L/subgraph_4/graph_hash.txt create mode 100644 paddle_samples/PaddleX/PP-YOLOE-R-L/subgraph_4/graph_net.json create mode 100644 paddle_samples/PaddleX/PP-YOLOE-R-L/subgraph_4/input_meta.py create mode 100644 paddle_samples/PaddleX/PP-YOLOE-R-L/subgraph_4/model.py create mode 100644 paddle_samples/PaddleX/PP-YOLOE-R-L/subgraph_4/weight_meta.py create mode 100644 paddle_samples/PaddleX/PP-YOLOE-R-L/subgraph_5/graph_hash.txt create mode 100644 paddle_samples/PaddleX/PP-YOLOE-R-L/subgraph_5/graph_net.json create mode 100644 paddle_samples/PaddleX/PP-YOLOE-R-L/subgraph_5/input_meta.py create mode 100644 paddle_samples/PaddleX/PP-YOLOE-R-L/subgraph_5/model.py create mode 100644 paddle_samples/PaddleX/PP-YOLOE-R-L/subgraph_5/weight_meta.py create mode 100644 paddle_samples/PaddleX/PP-YOLOE-R-L/subgraph_6/graph_hash.txt create mode 100644 paddle_samples/PaddleX/PP-YOLOE-R-L/subgraph_6/graph_net.json create mode 100644 paddle_samples/PaddleX/PP-YOLOE-R-L/subgraph_6/input_meta.py create mode 100644 paddle_samples/PaddleX/PP-YOLOE-R-L/subgraph_6/model.py create mode 100644 paddle_samples/PaddleX/PP-YOLOE-R-L/subgraph_6/weight_meta.py create mode 100644 paddle_samples/PaddleX/PP-YOLOE-R-L/subgraph_7/graph_hash.txt create mode 100644 paddle_samples/PaddleX/PP-YOLOE-R-L/subgraph_7/graph_net.json create mode 100644 paddle_samples/PaddleX/PP-YOLOE-R-L/subgraph_7/input_meta.py create mode 100644 paddle_samples/PaddleX/PP-YOLOE-R-L/subgraph_7/model.py create mode 100644 paddle_samples/PaddleX/PP-YOLOE-R-L/subgraph_7/weight_meta.py create mode 100644 paddle_samples/PaddleX/PP-YOLOE-R-L/subgraph_8/graph_hash.txt create mode 100644 paddle_samples/PaddleX/PP-YOLOE-R-L/subgraph_8/graph_net.json create mode 100644 paddle_samples/PaddleX/PP-YOLOE-R-L/subgraph_8/input_meta.py create mode 100644 paddle_samples/PaddleX/PP-YOLOE-R-L/subgraph_8/model.py create mode 100644 paddle_samples/PaddleX/PP-YOLOE-R-L/subgraph_8/weight_meta.py create mode 100644 paddle_samples/PaddleX/PP-YOLOE-R-L/subgraph_9/graph_hash.txt create mode 100644 paddle_samples/PaddleX/PP-YOLOE-R-L/subgraph_9/graph_net.json create mode 100644 paddle_samples/PaddleX/PP-YOLOE-R-L/subgraph_9/input_meta.py create mode 100644 paddle_samples/PaddleX/PP-YOLOE-R-L/subgraph_9/model.py create mode 100644 paddle_samples/PaddleX/PP-YOLOE-R-L/subgraph_9/weight_meta.py create mode 100644 paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_0/graph_hash.txt create mode 100644 paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_0/graph_net.json create mode 100644 paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_0/input_meta.py create mode 100644 paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_0/model.py create mode 100644 paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_0/weight_meta.py create mode 100644 paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_1/graph_hash.txt create mode 100644 paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_1/graph_net.json create mode 100644 paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_1/input_meta.py create mode 100644 paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_1/model.py create mode 100644 paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_1/weight_meta.py create mode 100644 paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_10/graph_hash.txt create mode 100644 paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_10/graph_net.json create mode 100644 paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_10/input_meta.py create mode 100644 paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_10/model.py create mode 100644 paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_10/weight_meta.py create mode 100644 paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_11/graph_hash.txt create mode 100644 paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_11/graph_net.json create mode 100644 paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_11/input_meta.py create mode 100644 paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_11/model.py create mode 100644 paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_11/weight_meta.py create mode 100644 paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_12/graph_hash.txt create mode 100644 paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_12/graph_net.json create mode 100644 paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_12/input_meta.py create mode 100644 paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_12/model.py create mode 100644 paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_12/weight_meta.py create mode 100644 paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_13/graph_hash.txt create mode 100644 paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_13/graph_net.json create mode 100644 paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_13/input_meta.py create mode 100644 paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_13/model.py create mode 100644 paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_13/weight_meta.py create mode 100644 paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_14/graph_hash.txt create mode 100644 paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_14/graph_net.json create mode 100644 paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_14/input_meta.py create mode 100644 paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_14/model.py create mode 100644 paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_14/weight_meta.py create mode 100644 paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_15/graph_hash.txt create mode 100644 paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_15/graph_net.json create mode 100644 paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_15/input_meta.py create mode 100644 paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_15/model.py create mode 100644 paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_15/weight_meta.py create mode 100644 paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_16/graph_hash.txt create mode 100644 paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_16/graph_net.json create mode 100644 paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_16/input_meta.py create mode 100644 paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_16/model.py create mode 100644 paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_16/weight_meta.py create mode 100644 paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_2/graph_hash.txt create mode 100644 paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_2/graph_net.json create mode 100644 paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_2/input_meta.py create mode 100644 paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_2/model.py create mode 100644 paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_2/weight_meta.py create mode 100644 paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_3/graph_hash.txt create mode 100644 paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_3/graph_net.json create mode 100644 paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_3/input_meta.py create mode 100644 paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_3/model.py create mode 100644 paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_3/weight_meta.py create mode 100644 paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_4/graph_hash.txt create mode 100644 paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_4/graph_net.json create mode 100644 paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_4/input_meta.py create mode 100644 paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_4/model.py create mode 100644 paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_4/weight_meta.py create mode 100644 paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_5/graph_hash.txt create mode 100644 paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_5/graph_net.json create mode 100644 paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_5/input_meta.py create mode 100644 paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_5/model.py create mode 100644 paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_5/weight_meta.py create mode 100644 paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_6/graph_hash.txt create mode 100644 paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_6/graph_net.json create mode 100644 paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_6/input_meta.py create mode 100644 paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_6/model.py create mode 100644 paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_6/weight_meta.py create mode 100644 paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_7/graph_hash.txt create mode 100644 paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_7/graph_net.json create mode 100644 paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_7/input_meta.py create mode 100644 paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_7/model.py create mode 100644 paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_7/weight_meta.py create mode 100644 paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_8/graph_hash.txt create mode 100644 paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_8/graph_net.json create mode 100644 paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_8/input_meta.py create mode 100644 paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_8/model.py create mode 100644 paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_8/weight_meta.py create mode 100644 paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_9/graph_hash.txt create mode 100644 paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_9/graph_net.json create mode 100644 paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_9/input_meta.py create mode 100644 paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_9/model.py create mode 100644 paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_9/weight_meta.py create mode 100644 paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_0/graph_hash.txt create mode 100644 paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_0/graph_net.json create mode 100644 paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_0/input_meta.py create mode 100644 paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_0/model.py create mode 100644 paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_0/weight_meta.py create mode 100644 paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_1/graph_hash.txt create mode 100644 paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_1/graph_net.json create mode 100644 paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_1/input_meta.py create mode 100644 paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_1/model.py create mode 100644 paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_1/weight_meta.py create mode 100644 paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_10/graph_hash.txt create mode 100644 paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_10/graph_net.json create mode 100644 paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_10/input_meta.py create mode 100644 paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_10/model.py create mode 100644 paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_10/weight_meta.py create mode 100644 paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_11/graph_hash.txt create mode 100644 paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_11/graph_net.json create mode 100644 paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_11/input_meta.py create mode 100644 paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_11/model.py create mode 100644 paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_11/weight_meta.py create mode 100644 paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_12/graph_hash.txt create mode 100644 paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_12/graph_net.json create mode 100644 paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_12/input_meta.py create mode 100644 paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_12/model.py create mode 100644 paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_12/weight_meta.py create mode 100644 paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_13/graph_hash.txt create mode 100644 paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_13/graph_net.json create mode 100644 paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_13/input_meta.py create mode 100644 paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_13/model.py create mode 100644 paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_13/weight_meta.py create mode 100644 paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_14/graph_hash.txt create mode 100644 paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_14/graph_net.json create mode 100644 paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_14/input_meta.py create mode 100644 paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_14/model.py create mode 100644 paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_14/weight_meta.py create mode 100644 paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_15/graph_hash.txt create mode 100644 paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_15/graph_net.json create mode 100644 paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_15/input_meta.py create mode 100644 paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_15/model.py create mode 100644 paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_15/weight_meta.py create mode 100644 paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_16/graph_hash.txt create mode 100644 paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_16/graph_net.json create mode 100644 paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_16/input_meta.py create mode 100644 paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_16/model.py create mode 100644 paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_16/weight_meta.py create mode 100644 paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_2/graph_hash.txt create mode 100644 paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_2/graph_net.json create mode 100644 paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_2/input_meta.py create mode 100644 paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_2/model.py create mode 100644 paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_2/weight_meta.py create mode 100644 paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_3/graph_hash.txt create mode 100644 paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_3/graph_net.json create mode 100644 paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_3/input_meta.py create mode 100644 paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_3/model.py create mode 100644 paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_3/weight_meta.py create mode 100644 paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_4/graph_hash.txt create mode 100644 paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_4/graph_net.json create mode 100644 paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_4/input_meta.py create mode 100644 paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_4/model.py create mode 100644 paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_4/weight_meta.py create mode 100644 paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_5/graph_hash.txt create mode 100644 paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_5/graph_net.json create mode 100644 paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_5/input_meta.py create mode 100644 paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_5/model.py create mode 100644 paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_5/weight_meta.py create mode 100644 paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_6/graph_hash.txt create mode 100644 paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_6/graph_net.json create mode 100644 paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_6/input_meta.py create mode 100644 paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_6/model.py create mode 100644 paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_6/weight_meta.py create mode 100644 paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_7/graph_hash.txt create mode 100644 paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_7/graph_net.json create mode 100644 paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_7/input_meta.py create mode 100644 paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_7/model.py create mode 100644 paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_7/weight_meta.py create mode 100644 paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_8/graph_hash.txt create mode 100644 paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_8/graph_net.json create mode 100644 paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_8/input_meta.py create mode 100644 paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_8/model.py create mode 100644 paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_8/weight_meta.py create mode 100644 paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_9/graph_hash.txt create mode 100644 paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_9/graph_net.json create mode 100644 paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_9/input_meta.py create mode 100644 paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_9/model.py create mode 100644 paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_9/weight_meta.py create mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus-L/subgraph_0/graph_hash.txt create mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus-L/subgraph_0/graph_net.json create mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus-L/subgraph_0/input_meta.py create mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus-L/subgraph_0/model.py create mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus-L/subgraph_0/weight_meta.py create mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus-L/subgraph_1/graph_hash.txt create mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus-L/subgraph_1/graph_net.json create mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus-L/subgraph_1/input_meta.py create mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus-L/subgraph_1/model.py create mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus-L/subgraph_1/weight_meta.py create mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus-L/subgraph_10/graph_hash.txt create mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus-L/subgraph_10/graph_net.json create mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus-L/subgraph_10/input_meta.py create mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus-L/subgraph_10/model.py create mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus-L/subgraph_10/weight_meta.py create mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus-L/subgraph_12/graph_hash.txt create mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus-L/subgraph_12/graph_net.json create mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus-L/subgraph_12/input_meta.py create mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus-L/subgraph_12/model.py create mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus-L/subgraph_12/weight_meta.py create mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus-L/subgraph_13/graph_hash.txt create mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus-L/subgraph_13/graph_net.json create mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus-L/subgraph_13/input_meta.py create mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus-L/subgraph_13/model.py create mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus-L/subgraph_13/weight_meta.py create mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus-L/subgraph_15/graph_hash.txt create mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus-L/subgraph_15/graph_net.json create mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus-L/subgraph_15/input_meta.py create mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus-L/subgraph_15/model.py create mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus-L/subgraph_15/weight_meta.py create mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus-L/subgraph_16/graph_hash.txt create mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus-L/subgraph_16/graph_net.json create mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus-L/subgraph_16/input_meta.py create mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus-L/subgraph_16/model.py create mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus-L/subgraph_16/weight_meta.py create mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus-L/subgraph_2/graph_hash.txt create mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus-L/subgraph_2/graph_net.json create mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus-L/subgraph_2/input_meta.py create mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus-L/subgraph_2/model.py create mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus-L/subgraph_2/weight_meta.py create mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus-L/subgraph_3/graph_hash.txt create mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus-L/subgraph_3/graph_net.json create mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus-L/subgraph_3/input_meta.py create mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus-L/subgraph_3/model.py create mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus-L/subgraph_3/weight_meta.py create mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus-L/subgraph_5/graph_hash.txt create mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus-L/subgraph_5/graph_net.json create mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus-L/subgraph_5/input_meta.py create mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus-L/subgraph_5/model.py create mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus-L/subgraph_5/weight_meta.py create mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus-L/subgraph_6/graph_hash.txt create mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus-L/subgraph_6/graph_net.json create mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus-L/subgraph_6/input_meta.py create mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus-L/subgraph_6/model.py create mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus-L/subgraph_6/weight_meta.py create mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus-L/subgraph_7/graph_hash.txt create mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus-L/subgraph_7/graph_net.json create mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus-L/subgraph_7/input_meta.py create mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus-L/subgraph_7/model.py create mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus-L/subgraph_7/weight_meta.py create mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus-L/subgraph_8/graph_hash.txt create mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus-L/subgraph_8/graph_net.json create mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus-L/subgraph_8/input_meta.py create mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus-L/subgraph_8/model.py create mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus-L/subgraph_8/weight_meta.py create mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus-L/subgraph_9/graph_hash.txt create mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus-L/subgraph_9/graph_net.json create mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus-L/subgraph_9/input_meta.py create mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus-L/subgraph_9/model.py create mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus-L/subgraph_9/weight_meta.py create mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus-M/subgraph_0/graph_hash.txt create mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus-M/subgraph_0/graph_net.json create mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus-M/subgraph_0/input_meta.py create mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus-M/subgraph_0/model.py create mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus-M/subgraph_0/weight_meta.py create mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus-M/subgraph_1/graph_hash.txt create mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus-M/subgraph_1/graph_net.json create mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus-M/subgraph_1/input_meta.py create mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus-M/subgraph_1/model.py create mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus-M/subgraph_1/weight_meta.py create mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus-M/subgraph_10/graph_hash.txt create mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus-M/subgraph_10/graph_net.json create mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus-M/subgraph_10/input_meta.py create mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus-M/subgraph_10/model.py create mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus-M/subgraph_10/weight_meta.py create mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus-M/subgraph_11/graph_hash.txt create mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus-M/subgraph_11/graph_net.json create mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus-M/subgraph_11/input_meta.py create mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus-M/subgraph_11/model.py create mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus-M/subgraph_11/weight_meta.py create mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus-M/subgraph_13/graph_hash.txt create mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus-M/subgraph_13/graph_net.json create mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus-M/subgraph_13/input_meta.py create mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus-M/subgraph_13/model.py create mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus-M/subgraph_13/weight_meta.py create mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus-M/subgraph_14/graph_hash.txt create mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus-M/subgraph_14/graph_net.json create mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus-M/subgraph_14/input_meta.py create mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus-M/subgraph_14/model.py create mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus-M/subgraph_14/weight_meta.py create mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus-M/subgraph_15/graph_hash.txt create mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus-M/subgraph_15/graph_net.json create mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus-M/subgraph_15/input_meta.py create mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus-M/subgraph_15/model.py create mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus-M/subgraph_15/weight_meta.py create mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus-M/subgraph_16/graph_hash.txt create mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus-M/subgraph_16/graph_net.json create mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus-M/subgraph_16/input_meta.py create mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus-M/subgraph_16/model.py create mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus-M/subgraph_16/weight_meta.py create mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus-M/subgraph_17/graph_hash.txt create mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus-M/subgraph_17/graph_net.json create mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus-M/subgraph_17/input_meta.py create mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus-M/subgraph_17/model.py create mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus-M/subgraph_17/weight_meta.py create mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus-M/subgraph_2/graph_hash.txt create mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus-M/subgraph_2/graph_net.json create mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus-M/subgraph_2/input_meta.py create mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus-M/subgraph_2/model.py create mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus-M/subgraph_2/weight_meta.py create mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus-M/subgraph_3/graph_hash.txt create mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus-M/subgraph_3/graph_net.json create mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus-M/subgraph_3/input_meta.py create mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus-M/subgraph_3/model.py create mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus-M/subgraph_3/weight_meta.py create mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus-M/subgraph_4/graph_hash.txt create mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus-M/subgraph_4/graph_net.json create mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus-M/subgraph_4/input_meta.py create mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus-M/subgraph_4/model.py create mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus-M/subgraph_4/weight_meta.py create mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus-M/subgraph_6/graph_hash.txt create mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus-M/subgraph_6/graph_net.json create mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus-M/subgraph_6/input_meta.py create mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus-M/subgraph_6/model.py create mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus-M/subgraph_6/weight_meta.py create mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus-M/subgraph_7/graph_hash.txt create mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus-M/subgraph_7/graph_net.json create mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus-M/subgraph_7/input_meta.py create mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus-M/subgraph_7/model.py create mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus-M/subgraph_7/weight_meta.py create mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus-M/subgraph_9/graph_hash.txt create mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus-M/subgraph_9/graph_net.json create mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus-M/subgraph_9/input_meta.py create mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus-M/subgraph_9/model.py create mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus-M/subgraph_9/weight_meta.py create mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_0/graph_hash.txt create mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_0/graph_net.json create mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_0/input_meta.py create mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_0/model.py create mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_0/weight_meta.py create mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_1/graph_hash.txt create mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_1/graph_net.json create mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_1/input_meta.py create mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_1/model.py create mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_1/weight_meta.py create mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_10/graph_hash.txt create mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_10/graph_net.json create mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_10/input_meta.py create mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_10/model.py create mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_10/weight_meta.py create mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_11/graph_hash.txt create mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_11/graph_net.json create mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_11/input_meta.py create mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_11/model.py create mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_11/weight_meta.py create mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_12/graph_hash.txt create mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_12/graph_net.json create mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_12/input_meta.py create mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_12/model.py create mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_12/weight_meta.py create mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_13/graph_hash.txt create mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_13/graph_net.json create mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_13/input_meta.py create mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_13/model.py create mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_13/weight_meta.py create mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_14/graph_hash.txt create mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_14/graph_net.json create mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_14/input_meta.py create mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_14/model.py create mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_14/weight_meta.py create mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_15/graph_hash.txt create mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_15/graph_net.json create mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_15/input_meta.py create mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_15/model.py create mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_15/weight_meta.py create mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_16/graph_hash.txt create mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_16/graph_net.json create mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_16/input_meta.py create mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_16/model.py create mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_16/weight_meta.py create mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_17/graph_hash.txt create mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_17/graph_net.json create mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_17/input_meta.py create mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_17/model.py create mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_17/weight_meta.py create mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_18/graph_hash.txt create mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_18/graph_net.json create mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_18/input_meta.py create mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_18/model.py create mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_18/weight_meta.py create mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_2/graph_hash.txt create mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_2/graph_net.json create mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_2/input_meta.py create mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_2/model.py create mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_2/weight_meta.py create mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_3/graph_hash.txt create mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_3/graph_net.json create mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_3/input_meta.py create mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_3/model.py create mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_3/weight_meta.py create mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_4/graph_hash.txt create mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_4/graph_net.json create mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_4/input_meta.py create mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_4/model.py create mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_4/weight_meta.py create mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_5/graph_hash.txt create mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_5/graph_net.json create mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_5/input_meta.py create mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_5/model.py create mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_5/weight_meta.py create mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_6/graph_hash.txt create mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_6/graph_net.json create mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_6/input_meta.py create mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_6/model.py create mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_6/weight_meta.py create mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_7/graph_hash.txt create mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_7/graph_net.json create mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_7/input_meta.py create mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_7/model.py create mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_7/weight_meta.py create mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_8/graph_hash.txt create mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_8/graph_net.json create mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_8/input_meta.py create mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_8/model.py create mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_8/weight_meta.py create mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_9/graph_hash.txt create mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_9/graph_net.json create mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_9/input_meta.py create mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_9/model.py create mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_9/weight_meta.py create mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/subgraph_0/graph_hash.txt create mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/subgraph_0/graph_net.json create mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/subgraph_0/input_meta.py create mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/subgraph_0/model.py create mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/subgraph_0/weight_meta.py create mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/subgraph_1/graph_hash.txt create mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/subgraph_1/graph_net.json create mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/subgraph_1/input_meta.py create mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/subgraph_1/model.py create mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/subgraph_1/weight_meta.py create mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/subgraph_10/graph_hash.txt create mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/subgraph_10/graph_net.json create mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/subgraph_10/input_meta.py create mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/subgraph_10/model.py create mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/subgraph_10/weight_meta.py create mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/subgraph_11/graph_hash.txt create mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/subgraph_11/graph_net.json create mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/subgraph_11/input_meta.py create mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/subgraph_11/model.py create mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/subgraph_11/weight_meta.py create mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/subgraph_12/graph_hash.txt create mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/subgraph_12/graph_net.json create mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/subgraph_12/input_meta.py create mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/subgraph_12/model.py create mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/subgraph_12/weight_meta.py create mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/subgraph_13/graph_hash.txt create mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/subgraph_13/graph_net.json create mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/subgraph_13/input_meta.py create mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/subgraph_13/model.py create mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/subgraph_13/weight_meta.py create mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/subgraph_14/graph_hash.txt create mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/subgraph_14/graph_net.json create mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/subgraph_14/input_meta.py create mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/subgraph_14/model.py create mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/subgraph_14/weight_meta.py create mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/subgraph_18/graph_hash.txt create mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/subgraph_18/graph_net.json create mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/subgraph_18/input_meta.py create mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/subgraph_18/model.py create mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/subgraph_18/weight_meta.py create mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/subgraph_2/graph_hash.txt create mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/subgraph_2/graph_net.json create mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/subgraph_2/input_meta.py create mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/subgraph_2/model.py create mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/subgraph_2/weight_meta.py create mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/subgraph_3/graph_hash.txt create mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/subgraph_3/graph_net.json create mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/subgraph_3/input_meta.py create mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/subgraph_3/model.py create mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/subgraph_3/weight_meta.py create mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/subgraph_4/graph_hash.txt create mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/subgraph_4/graph_net.json create mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/subgraph_4/input_meta.py create mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/subgraph_4/model.py create mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/subgraph_4/weight_meta.py create mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/subgraph_5/graph_hash.txt create mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/subgraph_5/graph_net.json create mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/subgraph_5/input_meta.py create mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/subgraph_5/model.py create mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/subgraph_5/weight_meta.py create mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/subgraph_6/graph_hash.txt create mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/subgraph_6/graph_net.json create mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/subgraph_6/input_meta.py create mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/subgraph_6/model.py create mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/subgraph_6/weight_meta.py create mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/subgraph_8/graph_hash.txt create mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/subgraph_8/graph_net.json create mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/subgraph_8/input_meta.py create mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/subgraph_8/model.py create mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/subgraph_8/weight_meta.py create mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus_SOD-S/subgraph_0/graph_hash.txt create mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus_SOD-S/subgraph_0/graph_net.json create mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus_SOD-S/subgraph_0/input_meta.py create mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus_SOD-S/subgraph_0/model.py create mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus_SOD-S/subgraph_0/weight_meta.py create mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus_SOD-S/subgraph_1/graph_hash.txt create mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus_SOD-S/subgraph_1/graph_net.json create mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus_SOD-S/subgraph_1/input_meta.py create mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus_SOD-S/subgraph_1/model.py create mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus_SOD-S/subgraph_1/weight_meta.py create mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus_SOD-S/subgraph_10/graph_hash.txt create mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus_SOD-S/subgraph_10/graph_net.json create mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus_SOD-S/subgraph_10/input_meta.py create mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus_SOD-S/subgraph_10/model.py create mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus_SOD-S/subgraph_10/weight_meta.py create mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus_SOD-S/subgraph_11/graph_hash.txt create mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus_SOD-S/subgraph_11/graph_net.json create mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus_SOD-S/subgraph_11/input_meta.py create mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus_SOD-S/subgraph_11/model.py create mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus_SOD-S/subgraph_11/weight_meta.py create mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus_SOD-S/subgraph_12/graph_hash.txt create mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus_SOD-S/subgraph_12/graph_net.json create mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus_SOD-S/subgraph_12/input_meta.py create mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus_SOD-S/subgraph_12/model.py create mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus_SOD-S/subgraph_12/weight_meta.py create mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus_SOD-S/subgraph_13/graph_hash.txt create mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus_SOD-S/subgraph_13/graph_net.json create mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus_SOD-S/subgraph_13/input_meta.py create mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus_SOD-S/subgraph_13/model.py create mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus_SOD-S/subgraph_13/weight_meta.py create mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus_SOD-S/subgraph_14/graph_hash.txt create mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus_SOD-S/subgraph_14/graph_net.json create mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus_SOD-S/subgraph_14/input_meta.py create mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus_SOD-S/subgraph_14/model.py create mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus_SOD-S/subgraph_14/weight_meta.py create mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus_SOD-S/subgraph_15/graph_hash.txt create mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus_SOD-S/subgraph_15/graph_net.json create mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus_SOD-S/subgraph_15/input_meta.py create mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus_SOD-S/subgraph_15/model.py create mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus_SOD-S/subgraph_15/weight_meta.py create mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus_SOD-S/subgraph_16/graph_hash.txt create mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus_SOD-S/subgraph_16/graph_net.json create mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus_SOD-S/subgraph_16/input_meta.py create mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus_SOD-S/subgraph_16/model.py create mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus_SOD-S/subgraph_16/weight_meta.py create mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus_SOD-S/subgraph_17/graph_hash.txt create mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus_SOD-S/subgraph_17/graph_net.json create mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus_SOD-S/subgraph_17/input_meta.py create mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus_SOD-S/subgraph_17/model.py create mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus_SOD-S/subgraph_17/weight_meta.py create mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus_SOD-S/subgraph_18/graph_hash.txt create mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus_SOD-S/subgraph_18/graph_net.json create mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus_SOD-S/subgraph_18/input_meta.py create mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus_SOD-S/subgraph_18/model.py create mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus_SOD-S/subgraph_18/weight_meta.py create mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus_SOD-S/subgraph_2/graph_hash.txt create mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus_SOD-S/subgraph_2/graph_net.json create mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus_SOD-S/subgraph_2/input_meta.py create mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus_SOD-S/subgraph_2/model.py create mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus_SOD-S/subgraph_2/weight_meta.py create mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus_SOD-S/subgraph_3/graph_hash.txt create mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus_SOD-S/subgraph_3/graph_net.json create mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus_SOD-S/subgraph_3/input_meta.py create mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus_SOD-S/subgraph_3/model.py create mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus_SOD-S/subgraph_3/weight_meta.py create mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus_SOD-S/subgraph_4/graph_hash.txt create mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus_SOD-S/subgraph_4/graph_net.json create mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus_SOD-S/subgraph_4/input_meta.py create mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus_SOD-S/subgraph_4/model.py create mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus_SOD-S/subgraph_4/weight_meta.py create mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus_SOD-S/subgraph_5/graph_hash.txt create mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus_SOD-S/subgraph_5/graph_net.json create mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus_SOD-S/subgraph_5/input_meta.py create mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus_SOD-S/subgraph_5/model.py create mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus_SOD-S/subgraph_5/weight_meta.py create mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus_SOD-S/subgraph_6/graph_hash.txt create mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus_SOD-S/subgraph_6/graph_net.json create mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus_SOD-S/subgraph_6/input_meta.py create mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus_SOD-S/subgraph_6/model.py create mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus_SOD-S/subgraph_6/weight_meta.py create mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus_SOD-S/subgraph_7/graph_hash.txt create mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus_SOD-S/subgraph_7/graph_net.json create mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus_SOD-S/subgraph_7/input_meta.py create mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus_SOD-S/subgraph_7/model.py create mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus_SOD-S/subgraph_7/weight_meta.py create mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus_SOD-S/subgraph_8/graph_hash.txt create mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus_SOD-S/subgraph_8/graph_net.json create mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus_SOD-S/subgraph_8/input_meta.py create mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus_SOD-S/subgraph_8/model.py create mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus_SOD-S/subgraph_8/weight_meta.py create mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus_SOD-S/subgraph_9/graph_hash.txt create mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus_SOD-S/subgraph_9/graph_net.json create mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus_SOD-S/subgraph_9/input_meta.py create mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus_SOD-S/subgraph_9/model.py create mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus_SOD-S/subgraph_9/weight_meta.py create mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_0/graph_hash.txt create mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_0/graph_net.json create mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_0/input_meta.py create mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_0/model.py create mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_0/weight_meta.py create mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_1/graph_hash.txt create mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_1/graph_net.json create mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_1/input_meta.py create mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_1/model.py create mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_1/weight_meta.py create mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_10/graph_hash.txt create mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_10/graph_net.json create mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_10/input_meta.py create mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_10/model.py create mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_10/weight_meta.py create mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_11/graph_hash.txt create mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_11/graph_net.json create mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_11/input_meta.py create mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_11/model.py create mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_11/weight_meta.py create mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_12/graph_hash.txt create mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_12/graph_net.json create mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_12/input_meta.py create mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_12/model.py create mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_12/weight_meta.py create mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_13/graph_hash.txt create mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_13/graph_net.json create mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_13/input_meta.py create mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_13/model.py create mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_13/weight_meta.py create mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_14/graph_hash.txt create mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_14/graph_net.json create mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_14/input_meta.py create mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_14/model.py create mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_14/weight_meta.py create mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_2/graph_hash.txt create mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_2/graph_net.json create mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_2/input_meta.py create mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_2/model.py create mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_2/weight_meta.py create mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_3/graph_hash.txt create mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_3/graph_net.json create mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_3/input_meta.py create mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_3/model.py create mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_3/weight_meta.py create mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_4/graph_hash.txt create mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_4/graph_net.json create mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_4/input_meta.py create mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_4/model.py create mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_4/weight_meta.py create mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_5/graph_hash.txt create mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_5/graph_net.json create mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_5/input_meta.py create mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_5/model.py create mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_5/weight_meta.py create mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_6/graph_hash.txt create mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_6/graph_net.json create mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_6/input_meta.py create mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_6/model.py create mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_6/weight_meta.py create mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_7/graph_hash.txt create mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_7/graph_net.json create mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_7/input_meta.py create mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_7/model.py create mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_7/weight_meta.py create mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_8/graph_hash.txt create mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_8/graph_net.json create mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_8/input_meta.py create mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_8/model.py create mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_8/weight_meta.py create mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_9/graph_hash.txt create mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_9/graph_net.json create mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_9/input_meta.py create mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_9/model.py create mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_9/weight_meta.py create mode 100644 paddle_samples/PaddleX/TimesNet/subgraph_0/graph_hash.txt create mode 100644 paddle_samples/PaddleX/TimesNet/subgraph_0/graph_net.json create mode 100644 paddle_samples/PaddleX/TimesNet/subgraph_0/input_meta.py create mode 100644 paddle_samples/PaddleX/TimesNet/subgraph_0/model.py create mode 100644 paddle_samples/PaddleX/TimesNet/subgraph_0/weight_meta.py create mode 100644 paddle_samples/PaddleX/TimesNet/subgraph_1/graph_hash.txt create mode 100644 paddle_samples/PaddleX/TimesNet/subgraph_1/graph_net.json create mode 100644 paddle_samples/PaddleX/TimesNet/subgraph_1/input_meta.py create mode 100644 paddle_samples/PaddleX/TimesNet/subgraph_1/model.py create mode 100644 paddle_samples/PaddleX/TimesNet/subgraph_1/weight_meta.py create mode 100644 paddle_samples/PaddleX/TimesNet/subgraph_10/graph_hash.txt create mode 100644 paddle_samples/PaddleX/TimesNet/subgraph_10/graph_net.json create mode 100644 paddle_samples/PaddleX/TimesNet/subgraph_10/input_meta.py create mode 100644 paddle_samples/PaddleX/TimesNet/subgraph_10/model.py create mode 100644 paddle_samples/PaddleX/TimesNet/subgraph_10/weight_meta.py rename paddle_samples/PaddleX/{TimesNet_cls/subgraph_7 => TimesNet/subgraph_11}/graph_hash.txt (100%) create mode 100644 paddle_samples/PaddleX/TimesNet/subgraph_11/graph_net.json create mode 100644 paddle_samples/PaddleX/TimesNet/subgraph_11/input_meta.py create mode 100644 paddle_samples/PaddleX/TimesNet/subgraph_11/model.py create mode 100644 paddle_samples/PaddleX/TimesNet/subgraph_11/weight_meta.py create mode 100644 paddle_samples/PaddleX/TimesNet/subgraph_12/graph_hash.txt create mode 100644 paddle_samples/PaddleX/TimesNet/subgraph_12/graph_net.json create mode 100644 paddle_samples/PaddleX/TimesNet/subgraph_12/input_meta.py create mode 100644 paddle_samples/PaddleX/TimesNet/subgraph_12/model.py create mode 100644 paddle_samples/PaddleX/TimesNet/subgraph_12/weight_meta.py create mode 100644 paddle_samples/PaddleX/TimesNet/subgraph_13/graph_hash.txt create mode 100644 paddle_samples/PaddleX/TimesNet/subgraph_13/graph_net.json create mode 100644 paddle_samples/PaddleX/TimesNet/subgraph_13/input_meta.py create mode 100644 paddle_samples/PaddleX/TimesNet/subgraph_13/model.py create mode 100644 paddle_samples/PaddleX/TimesNet/subgraph_13/weight_meta.py create mode 100644 paddle_samples/PaddleX/TimesNet/subgraph_2/graph_hash.txt create mode 100644 paddle_samples/PaddleX/TimesNet/subgraph_2/graph_net.json create mode 100644 paddle_samples/PaddleX/TimesNet/subgraph_2/input_meta.py create mode 100644 paddle_samples/PaddleX/TimesNet/subgraph_2/model.py create mode 100644 paddle_samples/PaddleX/TimesNet/subgraph_2/weight_meta.py create mode 100644 paddle_samples/PaddleX/TimesNet/subgraph_3/graph_hash.txt create mode 100644 paddle_samples/PaddleX/TimesNet/subgraph_3/graph_net.json create mode 100644 paddle_samples/PaddleX/TimesNet/subgraph_3/input_meta.py create mode 100644 paddle_samples/PaddleX/TimesNet/subgraph_3/model.py create mode 100644 paddle_samples/PaddleX/TimesNet/subgraph_3/weight_meta.py create mode 100644 paddle_samples/PaddleX/TimesNet/subgraph_4/graph_hash.txt create mode 100644 paddle_samples/PaddleX/TimesNet/subgraph_4/graph_net.json create mode 100644 paddle_samples/PaddleX/TimesNet/subgraph_4/input_meta.py create mode 100644 paddle_samples/PaddleX/TimesNet/subgraph_4/model.py create mode 100644 paddle_samples/PaddleX/TimesNet/subgraph_4/weight_meta.py create mode 100644 paddle_samples/PaddleX/TimesNet/subgraph_5/graph_hash.txt create mode 100644 paddle_samples/PaddleX/TimesNet/subgraph_5/graph_net.json create mode 100644 paddle_samples/PaddleX/TimesNet/subgraph_5/input_meta.py create mode 100644 paddle_samples/PaddleX/TimesNet/subgraph_5/model.py create mode 100644 paddle_samples/PaddleX/TimesNet/subgraph_5/weight_meta.py create mode 100644 paddle_samples/PaddleX/TimesNet/subgraph_6/graph_hash.txt create mode 100644 paddle_samples/PaddleX/TimesNet/subgraph_6/graph_net.json create mode 100644 paddle_samples/PaddleX/TimesNet/subgraph_6/input_meta.py create mode 100644 paddle_samples/PaddleX/TimesNet/subgraph_6/model.py create mode 100644 paddle_samples/PaddleX/TimesNet/subgraph_6/weight_meta.py create mode 100644 paddle_samples/PaddleX/TimesNet/subgraph_7/graph_hash.txt create mode 100644 paddle_samples/PaddleX/TimesNet/subgraph_7/graph_net.json create mode 100644 paddle_samples/PaddleX/TimesNet/subgraph_7/input_meta.py create mode 100644 paddle_samples/PaddleX/TimesNet/subgraph_7/model.py create mode 100644 paddle_samples/PaddleX/TimesNet/subgraph_7/weight_meta.py create mode 100644 paddle_samples/PaddleX/TimesNet/subgraph_8/graph_hash.txt create mode 100644 paddle_samples/PaddleX/TimesNet/subgraph_8/graph_net.json create mode 100644 paddle_samples/PaddleX/TimesNet/subgraph_8/input_meta.py create mode 100644 paddle_samples/PaddleX/TimesNet/subgraph_8/model.py create mode 100644 paddle_samples/PaddleX/TimesNet/subgraph_8/weight_meta.py create mode 100644 paddle_samples/PaddleX/TimesNet/subgraph_9/graph_hash.txt create mode 100644 paddle_samples/PaddleX/TimesNet/subgraph_9/graph_net.json create mode 100644 paddle_samples/PaddleX/TimesNet/subgraph_9/input_meta.py create mode 100644 paddle_samples/PaddleX/TimesNet/subgraph_9/model.py create mode 100644 paddle_samples/PaddleX/TimesNet/subgraph_9/weight_meta.py create mode 100644 paddle_samples/PaddleX/TimesNet_ad/subgraph_0/graph_hash.txt create mode 100644 paddle_samples/PaddleX/TimesNet_ad/subgraph_0/graph_net.json create mode 100644 paddle_samples/PaddleX/TimesNet_ad/subgraph_0/input_meta.py create mode 100644 paddle_samples/PaddleX/TimesNet_ad/subgraph_0/model.py create mode 100644 paddle_samples/PaddleX/TimesNet_ad/subgraph_0/weight_meta.py create mode 100644 paddle_samples/PaddleX/TimesNet_ad/subgraph_1/graph_hash.txt create mode 100644 paddle_samples/PaddleX/TimesNet_ad/subgraph_1/graph_net.json create mode 100644 paddle_samples/PaddleX/TimesNet_ad/subgraph_1/input_meta.py create mode 100644 paddle_samples/PaddleX/TimesNet_ad/subgraph_1/model.py create mode 100644 paddle_samples/PaddleX/TimesNet_ad/subgraph_1/weight_meta.py create mode 100644 paddle_samples/PaddleX/TimesNet_ad/subgraph_10/graph_hash.txt create mode 100644 paddle_samples/PaddleX/TimesNet_ad/subgraph_10/graph_net.json create mode 100644 paddle_samples/PaddleX/TimesNet_ad/subgraph_10/input_meta.py create mode 100644 paddle_samples/PaddleX/TimesNet_ad/subgraph_10/model.py create mode 100644 paddle_samples/PaddleX/TimesNet_ad/subgraph_10/weight_meta.py create mode 100644 paddle_samples/PaddleX/TimesNet_ad/subgraph_11/graph_hash.txt create mode 100644 paddle_samples/PaddleX/TimesNet_ad/subgraph_11/graph_net.json create mode 100644 paddle_samples/PaddleX/TimesNet_ad/subgraph_11/input_meta.py create mode 100644 paddle_samples/PaddleX/TimesNet_ad/subgraph_11/model.py create mode 100644 paddle_samples/PaddleX/TimesNet_ad/subgraph_11/weight_meta.py create mode 100644 paddle_samples/PaddleX/TimesNet_ad/subgraph_12/graph_hash.txt create mode 100644 paddle_samples/PaddleX/TimesNet_ad/subgraph_12/graph_net.json rename paddle_samples/PaddleX/{TimesNet_cls/subgraph_7 => TimesNet_ad/subgraph_12}/input_meta.py (86%) rename paddle_samples/PaddleX/{TimesNet_cls/subgraph_7 => TimesNet_ad/subgraph_12}/model.py (100%) create mode 100644 paddle_samples/PaddleX/TimesNet_ad/subgraph_12/weight_meta.py create mode 100644 paddle_samples/PaddleX/TimesNet_ad/subgraph_2/graph_hash.txt create mode 100644 paddle_samples/PaddleX/TimesNet_ad/subgraph_2/graph_net.json create mode 100644 paddle_samples/PaddleX/TimesNet_ad/subgraph_2/input_meta.py create mode 100644 paddle_samples/PaddleX/TimesNet_ad/subgraph_2/model.py create mode 100644 paddle_samples/PaddleX/TimesNet_ad/subgraph_2/weight_meta.py create mode 100644 paddle_samples/PaddleX/TimesNet_ad/subgraph_3/graph_hash.txt create mode 100644 paddle_samples/PaddleX/TimesNet_ad/subgraph_3/graph_net.json create mode 100644 paddle_samples/PaddleX/TimesNet_ad/subgraph_3/input_meta.py create mode 100644 paddle_samples/PaddleX/TimesNet_ad/subgraph_3/model.py create mode 100644 paddle_samples/PaddleX/TimesNet_ad/subgraph_3/weight_meta.py create mode 100644 paddle_samples/PaddleX/TimesNet_ad/subgraph_4/graph_hash.txt create mode 100644 paddle_samples/PaddleX/TimesNet_ad/subgraph_4/graph_net.json create mode 100644 paddle_samples/PaddleX/TimesNet_ad/subgraph_4/input_meta.py create mode 100644 paddle_samples/PaddleX/TimesNet_ad/subgraph_4/model.py create mode 100644 paddle_samples/PaddleX/TimesNet_ad/subgraph_4/weight_meta.py create mode 100644 paddle_samples/PaddleX/TimesNet_ad/subgraph_5/graph_hash.txt create mode 100644 paddle_samples/PaddleX/TimesNet_ad/subgraph_5/graph_net.json create mode 100644 paddle_samples/PaddleX/TimesNet_ad/subgraph_5/input_meta.py create mode 100644 paddle_samples/PaddleX/TimesNet_ad/subgraph_5/model.py create mode 100644 paddle_samples/PaddleX/TimesNet_ad/subgraph_5/weight_meta.py create mode 100644 paddle_samples/PaddleX/TimesNet_ad/subgraph_6/graph_hash.txt create mode 100644 paddle_samples/PaddleX/TimesNet_ad/subgraph_6/graph_net.json create mode 100644 paddle_samples/PaddleX/TimesNet_ad/subgraph_6/input_meta.py create mode 100644 paddle_samples/PaddleX/TimesNet_ad/subgraph_6/model.py create mode 100644 paddle_samples/PaddleX/TimesNet_ad/subgraph_6/weight_meta.py create mode 100644 paddle_samples/PaddleX/TimesNet_ad/subgraph_7/graph_hash.txt create mode 100644 paddle_samples/PaddleX/TimesNet_ad/subgraph_7/graph_net.json create mode 100644 paddle_samples/PaddleX/TimesNet_ad/subgraph_7/input_meta.py create mode 100644 paddle_samples/PaddleX/TimesNet_ad/subgraph_7/model.py create mode 100644 paddle_samples/PaddleX/TimesNet_ad/subgraph_7/weight_meta.py create mode 100644 paddle_samples/PaddleX/TimesNet_ad/subgraph_8/graph_hash.txt create mode 100644 paddle_samples/PaddleX/TimesNet_ad/subgraph_8/graph_net.json create mode 100644 paddle_samples/PaddleX/TimesNet_ad/subgraph_8/input_meta.py create mode 100644 paddle_samples/PaddleX/TimesNet_ad/subgraph_8/model.py create mode 100644 paddle_samples/PaddleX/TimesNet_ad/subgraph_8/weight_meta.py create mode 100644 paddle_samples/PaddleX/TimesNet_ad/subgraph_9/graph_hash.txt create mode 100644 paddle_samples/PaddleX/TimesNet_ad/subgraph_9/graph_net.json create mode 100644 paddle_samples/PaddleX/TimesNet_ad/subgraph_9/input_meta.py create mode 100644 paddle_samples/PaddleX/TimesNet_ad/subgraph_9/model.py create mode 100644 paddle_samples/PaddleX/TimesNet_ad/subgraph_9/weight_meta.py create mode 100644 paddle_samples/PaddleX/ch_SVTRv2_rec/subgraph_0/graph_hash.txt create mode 100644 paddle_samples/PaddleX/ch_SVTRv2_rec/subgraph_0/graph_net.json create mode 100644 paddle_samples/PaddleX/ch_SVTRv2_rec/subgraph_0/input_meta.py create mode 100644 paddle_samples/PaddleX/ch_SVTRv2_rec/subgraph_0/model.py create mode 100644 paddle_samples/PaddleX/ch_SVTRv2_rec/subgraph_0/weight_meta.py create mode 100644 paddle_samples/PaddleX/ch_SVTRv2_rec/subgraph_1/graph_hash.txt create mode 100644 paddle_samples/PaddleX/ch_SVTRv2_rec/subgraph_1/graph_net.json create mode 100644 paddle_samples/PaddleX/ch_SVTRv2_rec/subgraph_1/input_meta.py create mode 100644 paddle_samples/PaddleX/ch_SVTRv2_rec/subgraph_1/model.py create mode 100644 paddle_samples/PaddleX/ch_SVTRv2_rec/subgraph_1/weight_meta.py create mode 100644 paddle_samples/PaddleX/ch_SVTRv2_rec/subgraph_10/graph_hash.txt create mode 100644 paddle_samples/PaddleX/ch_SVTRv2_rec/subgraph_10/graph_net.json create mode 100644 paddle_samples/PaddleX/ch_SVTRv2_rec/subgraph_10/input_meta.py create mode 100644 paddle_samples/PaddleX/ch_SVTRv2_rec/subgraph_10/model.py create mode 100644 paddle_samples/PaddleX/ch_SVTRv2_rec/subgraph_10/weight_meta.py create mode 100644 paddle_samples/PaddleX/ch_SVTRv2_rec/subgraph_11/graph_hash.txt create mode 100644 paddle_samples/PaddleX/ch_SVTRv2_rec/subgraph_11/graph_net.json create mode 100644 paddle_samples/PaddleX/ch_SVTRv2_rec/subgraph_11/input_meta.py create mode 100644 paddle_samples/PaddleX/ch_SVTRv2_rec/subgraph_11/model.py create mode 100644 paddle_samples/PaddleX/ch_SVTRv2_rec/subgraph_11/weight_meta.py create mode 100644 paddle_samples/PaddleX/ch_SVTRv2_rec/subgraph_12/graph_hash.txt create mode 100644 paddle_samples/PaddleX/ch_SVTRv2_rec/subgraph_12/graph_net.json create mode 100644 paddle_samples/PaddleX/ch_SVTRv2_rec/subgraph_12/input_meta.py create mode 100644 paddle_samples/PaddleX/ch_SVTRv2_rec/subgraph_12/model.py create mode 100644 paddle_samples/PaddleX/ch_SVTRv2_rec/subgraph_12/weight_meta.py create mode 100644 paddle_samples/PaddleX/ch_SVTRv2_rec/subgraph_13/graph_hash.txt create mode 100644 paddle_samples/PaddleX/ch_SVTRv2_rec/subgraph_13/graph_net.json create mode 100644 paddle_samples/PaddleX/ch_SVTRv2_rec/subgraph_13/input_meta.py create mode 100644 paddle_samples/PaddleX/ch_SVTRv2_rec/subgraph_13/model.py create mode 100644 paddle_samples/PaddleX/ch_SVTRv2_rec/subgraph_13/weight_meta.py create mode 100644 paddle_samples/PaddleX/ch_SVTRv2_rec/subgraph_14/graph_hash.txt create mode 100644 paddle_samples/PaddleX/ch_SVTRv2_rec/subgraph_14/graph_net.json create mode 100644 paddle_samples/PaddleX/ch_SVTRv2_rec/subgraph_14/input_meta.py create mode 100644 paddle_samples/PaddleX/ch_SVTRv2_rec/subgraph_14/model.py create mode 100644 paddle_samples/PaddleX/ch_SVTRv2_rec/subgraph_14/weight_meta.py create mode 100644 paddle_samples/PaddleX/ch_SVTRv2_rec/subgraph_15/graph_hash.txt create mode 100644 paddle_samples/PaddleX/ch_SVTRv2_rec/subgraph_15/graph_net.json create mode 100644 paddle_samples/PaddleX/ch_SVTRv2_rec/subgraph_15/input_meta.py create mode 100644 paddle_samples/PaddleX/ch_SVTRv2_rec/subgraph_15/model.py create mode 100644 paddle_samples/PaddleX/ch_SVTRv2_rec/subgraph_15/weight_meta.py create mode 100644 paddle_samples/PaddleX/ch_SVTRv2_rec/subgraph_2/graph_hash.txt create mode 100644 paddle_samples/PaddleX/ch_SVTRv2_rec/subgraph_2/graph_net.json create mode 100644 paddle_samples/PaddleX/ch_SVTRv2_rec/subgraph_2/input_meta.py create mode 100644 paddle_samples/PaddleX/ch_SVTRv2_rec/subgraph_2/model.py create mode 100644 paddle_samples/PaddleX/ch_SVTRv2_rec/subgraph_2/weight_meta.py create mode 100644 paddle_samples/PaddleX/ch_SVTRv2_rec/subgraph_3/graph_hash.txt create mode 100644 paddle_samples/PaddleX/ch_SVTRv2_rec/subgraph_3/graph_net.json create mode 100644 paddle_samples/PaddleX/ch_SVTRv2_rec/subgraph_3/input_meta.py create mode 100644 paddle_samples/PaddleX/ch_SVTRv2_rec/subgraph_3/model.py create mode 100644 paddle_samples/PaddleX/ch_SVTRv2_rec/subgraph_3/weight_meta.py create mode 100644 paddle_samples/PaddleX/ch_SVTRv2_rec/subgraph_4/graph_hash.txt create mode 100644 paddle_samples/PaddleX/ch_SVTRv2_rec/subgraph_4/graph_net.json create mode 100644 paddle_samples/PaddleX/ch_SVTRv2_rec/subgraph_4/input_meta.py create mode 100644 paddle_samples/PaddleX/ch_SVTRv2_rec/subgraph_4/model.py create mode 100644 paddle_samples/PaddleX/ch_SVTRv2_rec/subgraph_4/weight_meta.py create mode 100644 paddle_samples/PaddleX/ch_SVTRv2_rec/subgraph_5/graph_hash.txt create mode 100644 paddle_samples/PaddleX/ch_SVTRv2_rec/subgraph_5/graph_net.json create mode 100644 paddle_samples/PaddleX/ch_SVTRv2_rec/subgraph_5/input_meta.py create mode 100644 paddle_samples/PaddleX/ch_SVTRv2_rec/subgraph_5/model.py create mode 100644 paddle_samples/PaddleX/ch_SVTRv2_rec/subgraph_5/weight_meta.py create mode 100644 paddle_samples/PaddleX/ch_SVTRv2_rec/subgraph_6/graph_hash.txt create mode 100644 paddle_samples/PaddleX/ch_SVTRv2_rec/subgraph_6/graph_net.json create mode 100644 paddle_samples/PaddleX/ch_SVTRv2_rec/subgraph_6/input_meta.py create mode 100644 paddle_samples/PaddleX/ch_SVTRv2_rec/subgraph_6/model.py create mode 100644 paddle_samples/PaddleX/ch_SVTRv2_rec/subgraph_6/weight_meta.py create mode 100644 paddle_samples/PaddleX/ch_SVTRv2_rec/subgraph_7/graph_hash.txt create mode 100644 paddle_samples/PaddleX/ch_SVTRv2_rec/subgraph_7/graph_net.json create mode 100644 paddle_samples/PaddleX/ch_SVTRv2_rec/subgraph_7/input_meta.py create mode 100644 paddle_samples/PaddleX/ch_SVTRv2_rec/subgraph_7/model.py create mode 100644 paddle_samples/PaddleX/ch_SVTRv2_rec/subgraph_7/weight_meta.py create mode 100644 paddle_samples/PaddleX/ch_SVTRv2_rec/subgraph_8/graph_hash.txt create mode 100644 paddle_samples/PaddleX/ch_SVTRv2_rec/subgraph_8/graph_net.json create mode 100644 paddle_samples/PaddleX/ch_SVTRv2_rec/subgraph_8/input_meta.py create mode 100644 paddle_samples/PaddleX/ch_SVTRv2_rec/subgraph_8/model.py create mode 100644 paddle_samples/PaddleX/ch_SVTRv2_rec/subgraph_8/weight_meta.py create mode 100644 paddle_samples/PaddleX/ch_SVTRv2_rec/subgraph_9/graph_hash.txt create mode 100644 paddle_samples/PaddleX/ch_SVTRv2_rec/subgraph_9/graph_net.json create mode 100644 paddle_samples/PaddleX/ch_SVTRv2_rec/subgraph_9/input_meta.py create mode 100644 paddle_samples/PaddleX/ch_SVTRv2_rec/subgraph_9/model.py create mode 100644 paddle_samples/PaddleX/ch_SVTRv2_rec/subgraph_9/weight_meta.py diff --git a/paddle_samples/PaddleX/Nonstationary/subgraph_0/graph_hash.txt b/paddle_samples/PaddleX/Nonstationary/subgraph_0/graph_hash.txt new file mode 100644 index 000000000..0f86f5190 --- /dev/null +++ b/paddle_samples/PaddleX/Nonstationary/subgraph_0/graph_hash.txt @@ -0,0 +1 @@ +87e111bb444c08b20429854b567149d52c99bf4d25b122eb5b425f75c7ed8942 \ No newline at end of file diff --git a/paddle_samples/PaddleX/Nonstationary/subgraph_0/graph_net.json b/paddle_samples/PaddleX/Nonstationary/subgraph_0/graph_net.json new file mode 100644 index 000000000..1977ac2fa --- /dev/null +++ b/paddle_samples/PaddleX/Nonstationary/subgraph_0/graph_net.json @@ -0,0 +1,6 @@ +{ + "framework": "paddle", + "model_name": "Nonstationary", + "num_devices_required": 1, + "num_nodes_required": 1 +} \ No newline at end of file diff --git a/paddle_samples/PaddleX/Nonstationary/subgraph_0/input_meta.py b/paddle_samples/PaddleX/Nonstationary/subgraph_0/input_meta.py new file mode 100644 index 000000000..5602cd964 --- /dev/null +++ b/paddle_samples/PaddleX/Nonstationary/subgraph_0/input_meta.py @@ -0,0 +1,29 @@ +class Program_weight_tensor_data_0: + name = "data_0" + shape = [16, 8, 144, 144] + dtype = "float32" + min_val = float("-72.3269") + max_val = float("42.6953") + mean = float("-0.024826") + std = float("2.65997") + data = None + + +class Program_weight_tensor_data_1: + name = "data_1" + shape = [16, 144, 8, 64] + dtype = "float32" + min_val = float("-6.2104") + max_val = float("7.44246") + mean = float("0.000532517") + std = float("0.625442") + data = None + + +class Program_weight_tensor_data_2: + name = "data_2" + shape = [16, 1, 144, 144] + dtype = "bool" + min_val = 0 + max_val = 2 + data = None diff --git a/paddle_samples/PaddleX/Nonstationary/subgraph_0/model.py b/paddle_samples/PaddleX/Nonstationary/subgraph_0/model.py new file mode 100644 index 000000000..da446a84d --- /dev/null +++ b/paddle_samples/PaddleX/Nonstationary/subgraph_0/model.py @@ -0,0 +1,134 @@ +import paddle + + +class GraphModule(paddle.nn.Layer): + def __init__(self): + super().__init__() + + def forward(self, data_0, data_1, data_2): + # pd_op.full: (16x8x144x144xf32) <- () + full_0 = paddle._C_ops.full( + [16, 8, 144, 144], + float("-inf"), + paddle.float32, + paddle.framework._current_expected_place(), + ) + + # pd_op.full: (1xf32) <- () + full_1 = paddle._C_ops.full( + [1], float("0"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.full_like: (16x8x144x144xf32) <- (16x8x144x144xf32, 1xf32) + full_like_0 = paddle._C_ops.full_like( + full_0, full_1, paddle.float32, paddle.framework._current_expected_place() + ) + + # pd_op.full_like: (16x8x144x144xf32) <- (16x8x144x144xf32, 1xf32) + full_like_1 = paddle._C_ops.full_like( + data_0, full_1, paddle.float32, paddle.framework._current_expected_place() + ) + + # pd_op.full_like: (16x1x144x144xb) <- (16x1x144x144xb, 1xf32) + full_like_2 = paddle._C_ops.full_like( + data_2, full_1, paddle.bool, paddle.framework._current_expected_place() + ) + del full_1 + + # pd_op.cast: (16x1x144x144xf32) <- (16x1x144x144xb) + cast_0 = paddle._C_ops.cast(full_like_2, paddle.float32) + del full_like_2 + + # pd_op.cast: (16x1x144x144xf32) <- (16x1x144x144xb) + cast_1 = paddle._C_ops.cast(data_2, paddle.float32) + del data_2 + + # pd_op.add: (16x8x144x144xf32) <- (16x8x144x144xf32, 16x8x144x144xf32) + add_0 = paddle._C_ops.add(full_like_0, full_like_1) + del full_like_0, full_like_1 + + # pd_op.add: (16x8x144x144xf32) <- (16x8x144x144xf32, 16x1x144x144xf32) + add_1 = paddle._C_ops.add(add_0, cast_0) + del add_0, cast_0 + + # pd_op.add: (16x8x144x144xf32) <- (16x8x144x144xf32, 16x8x144x144xf32) + add_2 = paddle._C_ops.add(full_0, add_1) + + # pd_op.add: (16x8x144x144xf32) <- (16x8x144x144xf32, 16x8x144x144xf32) + add_3 = paddle._C_ops.add(data_0, add_1) + del data_0 + + # pd_op.add: (16x8x144x144xf32) <- (16x1x144x144xf32, 16x8x144x144xf32) + add_4 = paddle._C_ops.add(cast_1, add_1) + del cast_1 + + # pd_op.cast: (16x8x144x144xb) <- (16x8x144x144xf32) + cast_2 = paddle._C_ops.cast(add_4, paddle.bool) + del add_4 + + # pd_op.where: (16x8x144x144xf32) <- (16x8x144x144xb, 16x8x144x144xf32, 16x8x144x144xf32) + where_0 = paddle._C_ops.where(cast_2, add_2, add_3) + + # pd_op.full: (1xf32) <- () + full_2 = paddle._C_ops.full( + [1], float("0.125"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.scale: (16x8x144x144xf32) <- (16x8x144x144xf32, 1xf32) + scale_0 = paddle._C_ops.scale(where_0, full_2, float("0"), True) + del where_0 + + # pd_op.softmax: (16x8x144x144xf32) <- (16x8x144x144xf32) + softmax_0 = paddle._C_ops.softmax(scale_0, -1) + del scale_0 + + # pd_op.full: (1xf32) <- () + full_3 = paddle._C_ops.full( + [1], float("0.05"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.dropout: (16x8x144x144xf32, 16x8x144x144xui8) <- (16x8x144x144xf32, None, 1xf32) + dropout_0, dropout_1 = (lambda x, f: f(x))( + paddle._C_ops.dropout( + softmax_0, None, full_3, False, "upscale_in_train", 0, False + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None), + ) + + # builtin.combine: ([16x8x144x144xf32, 16x144x8x64xf32]) <- (16x8x144x144xf32, 16x144x8x64xf32) + combine_0 = [dropout_0, data_1] + del data_1, dropout_0 + + # pd_op.einsum: (16x144x8x64xf32, [0xf32, 0xf32], [16x8x144x144xf32, 16x144x8x64xf32]) <- ([16x8x144x144xf32, 16x144x8x64xf32]) + einsum_0, einsum_1, einsum_2 = (lambda x, f: f(x))( + paddle._C_ops.einsum(combine_0, "bhls,bshd->blhd"), + lambda out: out if isinstance(out, (list, tuple)) else (out, None, None), + ) + del combine_0 + + # builtin.split: (0xf32, 0xf32) <- ([0xf32, 0xf32]) + ( + split_0, + split_1, + ) = einsum_1 + del einsum_1 + + # builtin.split: (16x8x144x144xf32, 16x144x8x64xf32) <- ([16x8x144x144xf32, 16x144x8x64xf32]) + ( + split_2, + split_3, + ) = einsum_2 + del ( + add_1, + add_2, + add_3, + cast_2, + dropout_1, + einsum_2, + full_0, + full_2, + full_3, + softmax_0, + ) + + return split_0, split_1, split_2, split_3, einsum_0 diff --git a/paddle_samples/PaddleX/TimesNet_cls/subgraph_7/weight_meta.py b/paddle_samples/PaddleX/Nonstationary/subgraph_0/weight_meta.py similarity index 100% rename from paddle_samples/PaddleX/TimesNet_cls/subgraph_7/weight_meta.py rename to paddle_samples/PaddleX/Nonstationary/subgraph_0/weight_meta.py diff --git a/paddle_samples/PaddleX/Nonstationary/subgraph_1/graph_hash.txt b/paddle_samples/PaddleX/Nonstationary/subgraph_1/graph_hash.txt new file mode 100644 index 000000000..bbd12eddf --- /dev/null +++ b/paddle_samples/PaddleX/Nonstationary/subgraph_1/graph_hash.txt @@ -0,0 +1 @@ +f25e2cabc82fb9c0da64a6a26e3df0d65a589fe220cb918d2cedefb5f3b5d9b6 \ No newline at end of file diff --git a/paddle_samples/PaddleX/Nonstationary/subgraph_1/graph_net.json b/paddle_samples/PaddleX/Nonstationary/subgraph_1/graph_net.json new file mode 100644 index 000000000..1977ac2fa --- /dev/null +++ b/paddle_samples/PaddleX/Nonstationary/subgraph_1/graph_net.json @@ -0,0 +1,6 @@ +{ + "framework": "paddle", + "model_name": "Nonstationary", + "num_devices_required": 1, + "num_nodes_required": 1 +} \ No newline at end of file diff --git a/paddle_samples/PaddleX/Nonstationary/subgraph_1/input_meta.py b/paddle_samples/PaddleX/Nonstationary/subgraph_1/input_meta.py new file mode 100644 index 000000000..958a268ea --- /dev/null +++ b/paddle_samples/PaddleX/Nonstationary/subgraph_1/input_meta.py @@ -0,0 +1,18 @@ +class Program_weight_tensor_data_0: + name = "data_0" + shape = [16, 8, 144, 144] + dtype = "float32" + min_val = float("-97.4341") + max_val = float("73.124") + mean = float("0.0342255") + std = float("2.72095") + data = None + + +class Program_weight_tensor_data_1: + name = "data_1" + shape = [16, 1, 144, 144] + dtype = "bool" + min_val = 0 + max_val = 2 + data = None diff --git a/paddle_samples/PaddleX/Nonstationary/subgraph_1/model.py b/paddle_samples/PaddleX/Nonstationary/subgraph_1/model.py new file mode 100644 index 000000000..a8baf8a4e --- /dev/null +++ b/paddle_samples/PaddleX/Nonstationary/subgraph_1/model.py @@ -0,0 +1,73 @@ +import paddle + + +class GraphModule(paddle.nn.Layer): + def __init__(self): + super().__init__() + + def forward(self, data_0, data_1): + # pd_op.full: (16x8x144x144xf32) <- () + full_0 = paddle._C_ops.full( + [16, 8, 144, 144], + float("-inf"), + paddle.float32, + paddle.framework._current_expected_place(), + ) + + # pd_op.full: (1xf32) <- () + full_1 = paddle._C_ops.full( + [1], float("0"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.full_like: (16x8x144x144xf32) <- (16x8x144x144xf32, 1xf32) + full_like_0 = paddle._C_ops.full_like( + full_0, full_1, paddle.float32, paddle.framework._current_expected_place() + ) + + # pd_op.full_like: (16x8x144x144xf32) <- (16x8x144x144xf32, 1xf32) + full_like_1 = paddle._C_ops.full_like( + data_0, full_1, paddle.float32, paddle.framework._current_expected_place() + ) + + # pd_op.full_like: (16x1x144x144xb) <- (16x1x144x144xb, 1xf32) + full_like_2 = paddle._C_ops.full_like( + data_1, full_1, paddle.bool, paddle.framework._current_expected_place() + ) + del full_1 + + # pd_op.cast: (16x1x144x144xf32) <- (16x1x144x144xb) + cast_0 = paddle._C_ops.cast(full_like_2, paddle.float32) + del full_like_2 + + # pd_op.cast: (16x1x144x144xf32) <- (16x1x144x144xb) + cast_1 = paddle._C_ops.cast(data_1, paddle.float32) + del data_1 + + # pd_op.add: (16x8x144x144xf32) <- (16x8x144x144xf32, 16x8x144x144xf32) + add_0 = paddle._C_ops.add(full_like_0, full_like_1) + del full_like_0, full_like_1 + + # pd_op.add: (16x8x144x144xf32) <- (16x8x144x144xf32, 16x1x144x144xf32) + add_1 = paddle._C_ops.add(add_0, cast_0) + del add_0, cast_0 + + # pd_op.add: (16x8x144x144xf32) <- (16x8x144x144xf32, 16x8x144x144xf32) + add_2 = paddle._C_ops.add(full_0, add_1) + + # pd_op.add: (16x8x144x144xf32) <- (16x8x144x144xf32, 16x8x144x144xf32) + add_3 = paddle._C_ops.add(data_0, add_1) + del data_0 + + # pd_op.add: (16x8x144x144xf32) <- (16x1x144x144xf32, 16x8x144x144xf32) + add_4 = paddle._C_ops.add(cast_1, add_1) + del cast_1 + + # pd_op.cast: (16x8x144x144xb) <- (16x8x144x144xf32) + cast_2 = paddle._C_ops.cast(add_4, paddle.bool) + del add_4 + + # pd_op.where: (16x8x144x144xf32) <- (16x8x144x144xb, 16x8x144x144xf32, 16x8x144x144xf32) + where_0 = paddle._C_ops.where(cast_2, add_2, add_3) + del add_1, add_2, add_3, cast_2, full_0 + + return where_0 diff --git a/paddle_samples/PaddleX/Nonstationary/subgraph_1/weight_meta.py b/paddle_samples/PaddleX/Nonstationary/subgraph_1/weight_meta.py new file mode 100644 index 000000000..8b1378917 --- /dev/null +++ b/paddle_samples/PaddleX/Nonstationary/subgraph_1/weight_meta.py @@ -0,0 +1 @@ + diff --git a/paddle_samples/PaddleX/Nonstationary/subgraph_10/graph_hash.txt b/paddle_samples/PaddleX/Nonstationary/subgraph_10/graph_hash.txt new file mode 100644 index 000000000..4753e3668 --- /dev/null +++ b/paddle_samples/PaddleX/Nonstationary/subgraph_10/graph_hash.txt @@ -0,0 +1 @@ +d7843cd7c0386fde7018814545e8875d82f04f1c2196f7d003d9620e344f8cf9 \ No newline at end of file diff --git a/paddle_samples/PaddleX/Nonstationary/subgraph_10/graph_net.json b/paddle_samples/PaddleX/Nonstationary/subgraph_10/graph_net.json new file mode 100644 index 000000000..1977ac2fa --- /dev/null +++ b/paddle_samples/PaddleX/Nonstationary/subgraph_10/graph_net.json @@ -0,0 +1,6 @@ +{ + "framework": "paddle", + "model_name": "Nonstationary", + "num_devices_required": 1, + "num_nodes_required": 1 +} \ No newline at end of file diff --git a/paddle_samples/PaddleX/Nonstationary/subgraph_10/input_meta.py b/paddle_samples/PaddleX/Nonstationary/subgraph_10/input_meta.py new file mode 100644 index 000000000..ef20c10d9 --- /dev/null +++ b/paddle_samples/PaddleX/Nonstationary/subgraph_10/input_meta.py @@ -0,0 +1,49 @@ +class Program_weight_tensor_data_0: + name = "data_0" + shape = [] + dtype = "int64" + data = [3] + + +class Program_weight_tensor_data_1: + name = "data_1" + shape = [3, 96, 1] + dtype = "float32" + min_val = float("-1.36201") + max_val = float("2.86798") + mean = float("0.42894") + std = float("1.34003") + data = None + + +class Program_weight_tensor_data_2: + name = "data_2" + shape = [3, 192, 4] + dtype = "float32" + min_val = float("-0.5") + max_val = float("0.5") + mean = float("-0.0637064") + std = float("0.281218") + data = None + + +class Program_weight_tensor_data_3: + name = "data_3" + shape = [1, 5000, 512] + dtype = "float32" + min_val = float("-1.0") + max_val = float("1.0") + mean = float("0.13237") + std = float("0.694606") + data = None + + +class Program_weight_tensor_data_4: + name = "data_4" + shape = [1, 5000, 512] + dtype = "float32" + min_val = float("-1.0") + max_val = float("1.0") + mean = float("0.13237") + std = float("0.694606") + data = None diff --git a/paddle_samples/PaddleX/Nonstationary/subgraph_10/model.py b/paddle_samples/PaddleX/Nonstationary/subgraph_10/model.py new file mode 100644 index 000000000..f5eddce58 --- /dev/null +++ b/paddle_samples/PaddleX/Nonstationary/subgraph_10/model.py @@ -0,0 +1,1586 @@ +import paddle + + +class GraphModule(paddle.nn.Layer): + def __init__(self): + super().__init__() + + def forward( + self, + parameter_0, + parameter_1, + parameter_2, + parameter_3, + parameter_4, + parameter_5, + parameter_6, + parameter_7, + parameter_8, + parameter_9, + parameter_10, + parameter_11, + parameter_12, + parameter_13, + parameter_14, + parameter_15, + parameter_16, + parameter_17, + parameter_18, + parameter_19, + parameter_20, + parameter_21, + parameter_22, + parameter_23, + parameter_24, + parameter_25, + parameter_26, + parameter_27, + parameter_28, + parameter_29, + parameter_30, + parameter_31, + parameter_32, + parameter_33, + parameter_34, + parameter_35, + parameter_36, + parameter_37, + parameter_38, + parameter_39, + parameter_40, + parameter_41, + parameter_42, + parameter_43, + parameter_44, + parameter_45, + parameter_46, + parameter_47, + parameter_48, + parameter_49, + data_0, + data_1, + data_2, + data_3, + data_4, + ): + # pd_op.full_int_array: (1xi64) <- () + full_int_array_0 = [0] + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_1 = [96] + + # pd_op.slice: (-1x96x4xf32) <- (-1x192x4xf32, 1xi64, 1xi64) + slice_0 = paddle._C_ops.slice( + data_2, [1], full_int_array_0, full_int_array_1, [1], [] + ) + + # pd_op.full: (xi64) <- () + full_0 = paddle._C_ops.full( + [], float("96"), paddle.int64, paddle.core.CPUPlace() + ) + + # pd_op.full: (xi64) <- () + full_1 = paddle._C_ops.full( + [], float("1"), paddle.int64, paddle.core.CPUPlace() + ) + + # builtin.combine: ([xi64, xi64, xi64]) <- (xi64, xi64, xi64) + combine_0 = [data_0, full_0, full_1] + del data_0, full_1 + + # pd_op.stack: (3xi64) <- ([xi64, xi64, xi64]) + stack_0 = paddle._C_ops.stack(combine_0, 0) + del combine_0 + + # pd_op.full: (1xf32) <- () + full_2 = paddle._C_ops.full( + [1], float("0"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.full_with_tensor: (-1x96x1xf32) <- (1xf32, 3xi64) + full_with_tensor_0 = paddle._C_ops.full_with_tensor( + full_2, stack_0, paddle.float32 + ) + del stack_0 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_2 = [-48] + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_3 = [2147483647] + + # pd_op.slice: (-1x48x1xf32) <- (-1x96x1xf32, 1xi64, 1xi64) + slice_1 = paddle._C_ops.slice( + data_1, [1], full_int_array_2, full_int_array_3, [1], [] + ) + + # pd_op.full: (1xi32) <- () + full_3 = paddle._C_ops.full( + [1], float("1"), paddle.int32, paddle.core.CPUPlace() + ) + + # pd_op.assign: (1xi32) <- (1xi32) + assign_0 = full_3 + + # pd_op.assign: (1xi32) <- (1xi32) + assign_1 = full_3 + + # builtin.combine: ([-1x48x1xf32, -1x96x1xf32]) <- (-1x48x1xf32, -1x96x1xf32) + combine_1 = [slice_1, full_with_tensor_0] + del full_with_tensor_0, slice_1 + + # pd_op.concat: (-1x144x1xf32) <- ([-1x48x1xf32, -1x96x1xf32], 1xi32) + concat_0 = paddle._C_ops.concat(combine_1, full_3) + del combine_1 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_4 = [-144] + + # pd_op.slice: (-1x144x4xf32) <- (-1x192x4xf32, 1xi64, 1xi64) + slice_2 = paddle._C_ops.slice( + data_2, [1], full_int_array_4, full_int_array_3, [1], [] + ) + del data_2, full_int_array_4 + + # pd_op.assign: (-1x96x1xf32) <- (-1x96x1xf32) + assign_2 = data_1 + + # pd_op.share_data_: (-1x96x1xf32) <- (-1x96x1xf32) + share_data__0 = assign_2.detach() + del assign_2 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_5 = [1] + + # pd_op.assign: (1xi64) <- (1xi64) + assign_3 = full_int_array_5 + + # pd_op.assign: (1xi64) <- (1xi64) + assign_4 = full_int_array_5 + + # pd_op.assign: (1xi64) <- (1xi64) + assign_5 = full_int_array_5 + + # pd_op.assign: (1xi64) <- (1xi64) + assign_6 = full_int_array_5 + + # pd_op.assign: (1xi64) <- (1xi64) + assign_7 = full_int_array_5 + + # pd_op.assign: (1xi64) <- (1xi64) + assign_8 = full_int_array_5 + + # pd_op.assign: (1xi64) <- (1xi64) + assign_9 = full_int_array_5 + + # pd_op.assign: (1xi64) <- (1xi64) + assign_10 = full_int_array_5 + + # pd_op.mean: (-1x1x1xf32) <- (-1x96x1xf32, 1xi64) + mean_0 = paddle._C_ops.mean(data_1, full_int_array_5, True) + + # pd_op.share_data_: (-1x1x1xf32) <- (-1x1x1xf32) + share_data__1 = mean_0.detach() + del mean_0 + + # pd_op.subtract: (-1x96x1xf32) <- (-1x96x1xf32, -1x1x1xf32) + subtract_0 = paddle._C_ops.subtract(data_1, share_data__1) + del data_1 + + # pd_op.mean: (-1x1x1xf32) <- (-1x96x1xf32, 1xi64) + mean_1 = paddle._C_ops.mean(subtract_0, full_int_array_5, True) + + # pd_op.subtract: (-1x96x1xf32) <- (-1x96x1xf32, -1x1x1xf32) + subtract_1 = paddle._C_ops.subtract(subtract_0, mean_1) + del mean_1 + + # pd_op.pow: (-1x96x1xf32) <- (-1x96x1xf32) + pow_0 = paddle._C_ops.pow(subtract_1, float("2")) + del subtract_1 + + # pd_op.sum: (-1x1x1xf32) <- (-1x96x1xf32, 1xi64) + sum_0 = paddle._C_ops.sum(pow_0, full_int_array_5, paddle.float32, True) + del pow_0 + + # pd_op.numel: (xi64) <- (-1x96x1xf32) + numel_0 = paddle._C_ops.numel(subtract_0) + + # pd_op.cast: (xi64) <- (xi64) + cast_0 = paddle._C_ops.cast(numel_0, paddle.int64) + del numel_0 + + # pd_op.numel: (xi64) <- (-1x1x1xf32) + numel_1 = paddle._C_ops.numel(sum_0) + + # pd_op.cast: (xi64) <- (xi64) + cast_1 = paddle._C_ops.cast(numel_1, paddle.int64) + del numel_1 + + # pd_op.cast: (xf32) <- (xi64) + cast_2 = paddle._C_ops.cast(cast_0, paddle.float32) + del cast_0 + + # pd_op.cast: (xf32) <- (xi64) + cast_3 = paddle._C_ops.cast(cast_1, paddle.float32) + del cast_1 + + # pd_op.divide: (xf32) <- (xf32, xf32) + divide_0 = paddle._C_ops.divide(cast_2, cast_3) + del cast_2, cast_3 + + # pd_op.divide: (-1x1x1xf32) <- (-1x1x1xf32, xf32) + divide_1 = paddle._C_ops.divide(sum_0, divide_0) + del divide_0, sum_0 + + # pd_op.full: (1xf32) <- () + full_4 = paddle._C_ops.full( + [1], float("1"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.scale: (-1x1x1xf32) <- (-1x1x1xf32, 1xf32) + scale_0 = paddle._C_ops.scale(divide_1, full_4, float("1e-05"), True) + del divide_1, full_4 + + # pd_op.sqrt: (-1x1x1xf32) <- (-1x1x1xf32) + sqrt_0 = paddle._C_ops.sqrt(scale_0) + del scale_0 + + # pd_op.share_data_: (-1x1x1xf32) <- (-1x1x1xf32) + share_data__2 = sqrt_0.detach() + del sqrt_0 + + # pd_op.divide: (-1x96x1xf32) <- (-1x96x1xf32, -1x1x1xf32) + divide_2 = paddle._C_ops.divide(subtract_0, share_data__2) + del subtract_0 + + # pd_op.slice: (-1x48x1xf32) <- (-1x96x1xf32, 1xi64, 1xi64) + slice_3 = paddle._C_ops.slice( + divide_2, [1], full_int_array_2, full_int_array_3, [1], [] + ) + del full_int_array_2 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_6 = [-96] + + # pd_op.slice: (-1x96x1xf32) <- (-1x144x1xf32, 1xi64, 1xi64) + slice_4 = paddle._C_ops.slice( + concat_0, [1], full_int_array_6, full_int_array_3, [1], [] + ) + del concat_0, full_int_array_3, full_int_array_6 + + # pd_op.full_like: (-1x96x1xf32) <- (-1x96x1xf32, 1xf32) + full_like_0 = paddle._C_ops.full_like( + slice_4, full_2, paddle.float32, paddle.framework._current_expected_place() + ) + del full_2, slice_4 + + # builtin.combine: ([-1x48x1xf32, -1x96x1xf32]) <- (-1x48x1xf32, -1x96x1xf32) + combine_2 = [slice_3, full_like_0] + del full_like_0, slice_3 + + # pd_op.concat: (-1x144x1xf32) <- ([-1x48x1xf32, -1x96x1xf32], 1xi32) + concat_1 = paddle._C_ops.concat(combine_2, full_3) + del combine_2 + + # pd_op.assign: (-1x144x1xf32) <- (-1x144x1xf32) + assign_11 = concat_1 + del concat_1 + + # pd_op.shape64: (3xi64) <- (-1x96x1xf32) + shape64_0 = paddle._C_ops.shape64(share_data__0) + + # pd_op.slice: (xi64) <- (3xi64, 1xi64, 1xi64) + slice_5 = paddle._C_ops.slice( + shape64_0, [0], full_int_array_0, full_int_array_5, [1], [0] + ) + del shape64_0 + + # pd_op.full_int_array: (2xi64) <- () + full_int_array_7 = [3, 4] + + # pd_op.unsqueeze: (-1x96x1x1x1xf32) <- (-1x96x1xf32, 2xi64) + unsqueeze_0 = paddle._C_ops.unsqueeze(share_data__0, full_int_array_7) + + # pd_op.full_int_array: (6xi64) <- () + full_int_array_8 = [0, 0, 0, 0, 1, 1] + + # pd_op.pad3d: (-1x96x3x1x1xf32) <- (-1x96x1x1x1xf32, 6xi64) + pad3d_0 = paddle._C_ops.pad3d( + unsqueeze_0, full_int_array_8, "circular", float("0"), "NCDHW" + ) + del unsqueeze_0 + + # pd_op.squeeze: (-1x96x3xf32) <- (-1x96x3x1x1xf32, 2xi64) + squeeze_0 = paddle._C_ops.squeeze(pad3d_0, full_int_array_7) + del pad3d_0 + + # pd_op.assign: (1x96x3xf32) <- (1x96x3xf32) + assign_12 = parameter_49 + del parameter_49 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_9 = [-2] + + # pd_op.assign: (1xi64) <- (1xi64) + assign_13 = full_int_array_9 + + # pd_op.assign: (1xi64) <- (1xi64) + assign_14 = full_int_array_9 + + # pd_op.assign: (1xi64) <- (1xi64) + assign_15 = full_int_array_9 + + # pd_op.assign: (1xi64) <- (1xi64) + assign_16 = full_int_array_9 + + # pd_op.assign: (1xi64) <- (1xi64) + assign_17 = full_int_array_9 + + # pd_op.assign: (1xi64) <- (1xi64) + assign_18 = full_int_array_9 + + # pd_op.assign: (1xi64) <- (1xi64) + assign_19 = full_int_array_9 + + # pd_op.assign: (1xi64) <- (1xi64) + assign_20 = full_int_array_9 + + # pd_op.assign: (1xi64) <- (1xi64) + assign_21 = full_int_array_9 + + # pd_op.assign: (1xi64) <- (1xi64) + assign_22 = full_int_array_9 + + # pd_op.assign: (1xi64) <- (1xi64) + assign_23 = full_int_array_9 + + # pd_op.assign: (1xi64) <- (1xi64) + assign_24 = full_int_array_9 + + # pd_op.assign: (1xi64) <- (1xi64) + assign_25 = full_int_array_9 + + # pd_op.assign: (1xi64) <- (1xi64) + assign_26 = full_int_array_9 + + # pd_op.assign: (1xi64) <- (1xi64) + assign_27 = full_int_array_9 + + # pd_op.assign: (1xi64) <- (1xi64) + assign_28 = full_int_array_9 + + # pd_op.assign: (1xi64) <- (1xi64) + assign_29 = full_int_array_9 + + # pd_op.assign: (1xi64) <- (1xi64) + assign_30 = full_int_array_9 + + # pd_op.assign: (1xi64) <- (1xi64) + assign_31 = full_int_array_9 + + # pd_op.unsqueeze: (1x96x1x3xf32) <- (1x96x3xf32, 1xi64) + unsqueeze_1 = paddle._C_ops.unsqueeze(assign_12, full_int_array_9) + + # pd_op.unsqueeze: (-1x96x1x3xf32) <- (-1x96x3xf32, 1xi64) + unsqueeze_2 = paddle._C_ops.unsqueeze(squeeze_0, full_int_array_9) + del squeeze_0 + + # pd_op.conv2d: (-1x1x1x1xf32) <- (-1x96x1x3xf32, 1x96x1x3xf32) + conv2d_0 = paddle._C_ops.conv2d( + unsqueeze_2, unsqueeze_1, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + + # pd_op.squeeze: (-1x1x1xf32) <- (-1x1x1x1xf32, 1xi64) + squeeze_1 = paddle._C_ops.squeeze(conv2d_0, full_int_array_9) + + # builtin.combine: ([-1x1x1xf32, -1x1x1xf32]) <- (-1x1x1xf32, -1x1x1xf32) + combine_3 = [squeeze_1, share_data__2] + + # pd_op.concat: (-1x2x1xf32) <- ([-1x1x1xf32, -1x1x1xf32], 1xi32) + concat_2 = paddle._C_ops.concat(combine_3, full_3) + del combine_3 + + # pd_op.full: (xi64) <- () + full_5 = paddle._C_ops.full( + [], float("-1"), paddle.int64, paddle.core.CPUPlace() + ) + + # builtin.combine: ([xi64, xi64]) <- (xi64, xi64) + combine_4 = [slice_5, full_5] + + # pd_op.stack: (2xi64) <- ([xi64, xi64]) + stack_1 = paddle._C_ops.stack(combine_4, 0) + del combine_4 + + # pd_op.reshape: (-1x-1xf32) <- (-1x2x1xf32, 2xi64) + reshape_0 = paddle._C_ops.reshape(concat_2, stack_1) + del stack_1 + + # pd_op.matmul: (-1x256xf32) <- (-1x-1xf32, 2x256xf32) + matmul_0 = paddle._C_ops.matmul(reshape_0, parameter_48, False, False) + del parameter_48 + + # pd_op.add: (-1x256xf32) <- (-1x256xf32, 256xf32) + add_0 = paddle._C_ops.add(matmul_0, parameter_47) + del parameter_47 + + # pd_op.relu: (-1x256xf32) <- (-1x256xf32) + relu_0 = paddle._C_ops.relu(add_0) + del add_0 + + # pd_op.matmul: (-1x256xf32) <- (-1x256xf32, 256x256xf32) + matmul_1 = paddle._C_ops.matmul(relu_0, parameter_46, False, False) + del parameter_46 + + # pd_op.add: (-1x256xf32) <- (-1x256xf32, 256xf32) + add_1 = paddle._C_ops.add(matmul_1, parameter_45) + del parameter_45 + + # pd_op.relu: (-1x256xf32) <- (-1x256xf32) + relu_1 = paddle._C_ops.relu(add_1) + del add_1 + + # pd_op.matmul: (-1x1xf32) <- (-1x256xf32, 256x1xf32) + matmul_2 = paddle._C_ops.matmul(relu_1, parameter_44, False, False) + del parameter_44 + + # pd_op.exp: (-1x1xf32) <- (-1x1xf32) + exp_0 = paddle._C_ops.exp(matmul_2) + del matmul_2 + + # pd_op.unsqueeze: (-1x96x1x1x1xf32) <- (-1x96x1xf32, 2xi64) + unsqueeze_3 = paddle._C_ops.unsqueeze(share_data__0, full_int_array_7) + del share_data__0 + + # pd_op.pad3d: (-1x96x3x1x1xf32) <- (-1x96x1x1x1xf32, 6xi64) + pad3d_1 = paddle._C_ops.pad3d( + unsqueeze_3, full_int_array_8, "circular", float("0"), "NCDHW" + ) + del unsqueeze_3 + + # pd_op.squeeze: (-1x96x3xf32) <- (-1x96x3x1x1xf32, 2xi64) + squeeze_2 = paddle._C_ops.squeeze(pad3d_1, full_int_array_7) + del pad3d_1 + + # pd_op.assign: (1x96x3xf32) <- (1x96x3xf32) + assign_32 = parameter_43 + del parameter_43 + + # pd_op.unsqueeze: (1x96x1x3xf32) <- (1x96x3xf32, 1xi64) + unsqueeze_4 = paddle._C_ops.unsqueeze(assign_32, full_int_array_9) + + # pd_op.unsqueeze: (-1x96x1x3xf32) <- (-1x96x3xf32, 1xi64) + unsqueeze_5 = paddle._C_ops.unsqueeze(squeeze_2, full_int_array_9) + del squeeze_2 + + # pd_op.conv2d: (-1x1x1x1xf32) <- (-1x96x1x3xf32, 1x96x1x3xf32) + conv2d_1 = paddle._C_ops.conv2d( + unsqueeze_5, unsqueeze_4, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + + # pd_op.squeeze: (-1x1x1xf32) <- (-1x1x1x1xf32, 1xi64) + squeeze_3 = paddle._C_ops.squeeze(conv2d_1, full_int_array_9) + + # builtin.combine: ([-1x1x1xf32, -1x1x1xf32]) <- (-1x1x1xf32, -1x1x1xf32) + combine_5 = [squeeze_3, share_data__1] + + # pd_op.concat: (-1x2x1xf32) <- ([-1x1x1xf32, -1x1x1xf32], 1xi32) + concat_3 = paddle._C_ops.concat(combine_5, full_3) + del combine_5, full_3 + + # builtin.combine: ([xi64, xi64]) <- (xi64, xi64) + combine_6 = [slice_5, full_5] + del slice_5 + + # pd_op.stack: (2xi64) <- ([xi64, xi64]) + stack_2 = paddle._C_ops.stack(combine_6, 0) + del combine_6 + + # pd_op.reshape: (-1x-1xf32) <- (-1x2x1xf32, 2xi64) + reshape_1 = paddle._C_ops.reshape(concat_3, stack_2) + del stack_2 + + # pd_op.matmul: (-1x256xf32) <- (-1x-1xf32, 2x256xf32) + matmul_3 = paddle._C_ops.matmul(reshape_1, parameter_42, False, False) + del parameter_42 + + # pd_op.add: (-1x256xf32) <- (-1x256xf32, 256xf32) + add_2 = paddle._C_ops.add(matmul_3, parameter_41) + del parameter_41 + + # pd_op.relu: (-1x256xf32) <- (-1x256xf32) + relu_2 = paddle._C_ops.relu(add_2) + del add_2 + + # pd_op.matmul: (-1x256xf32) <- (-1x256xf32, 256x256xf32) + matmul_4 = paddle._C_ops.matmul(relu_2, parameter_40, False, False) + del parameter_40 + + # pd_op.add: (-1x256xf32) <- (-1x256xf32, 256xf32) + add_3 = paddle._C_ops.add(matmul_4, parameter_39) + del parameter_39 + + # pd_op.relu: (-1x256xf32) <- (-1x256xf32) + relu_3 = paddle._C_ops.relu(add_3) + del add_3 + + # pd_op.matmul: (-1x96xf32) <- (-1x256xf32, 256x96xf32) + matmul_5 = paddle._C_ops.matmul(relu_3, parameter_38, False, False) + del parameter_38 + + # pd_op.transpose: (-1x1x96xf32) <- (-1x96x1xf32) + transpose_0 = paddle._C_ops.transpose(divide_2, [0, 2, 1]) + + # pd_op.unsqueeze: (-1x1x96x1x1xf32) <- (-1x1x96xf32, 2xi64) + unsqueeze_6 = paddle._C_ops.unsqueeze(transpose_0, full_int_array_7) + del transpose_0 + + # pd_op.pad3d: (-1x1x98x1x1xf32) <- (-1x1x96x1x1xf32, 6xi64) + pad3d_2 = paddle._C_ops.pad3d( + unsqueeze_6, full_int_array_8, "circular", float("0"), "NCDHW" + ) + del unsqueeze_6 + + # pd_op.squeeze: (-1x1x98xf32) <- (-1x1x98x1x1xf32, 2xi64) + squeeze_4 = paddle._C_ops.squeeze(pad3d_2, full_int_array_7) + del pad3d_2 + + # pd_op.assign: (512x1x3xf32) <- (512x1x3xf32) + assign_33 = parameter_37 + del parameter_37 + + # pd_op.unsqueeze: (512x1x1x3xf32) <- (512x1x3xf32, 1xi64) + unsqueeze_7 = paddle._C_ops.unsqueeze(assign_33, full_int_array_9) + + # pd_op.unsqueeze: (-1x1x1x98xf32) <- (-1x1x98xf32, 1xi64) + unsqueeze_8 = paddle._C_ops.unsqueeze(squeeze_4, full_int_array_9) + del squeeze_4 + + # pd_op.conv2d: (-1x512x1x96xf32) <- (-1x1x1x98xf32, 512x1x1x3xf32) + conv2d_2 = paddle._C_ops.conv2d( + unsqueeze_8, unsqueeze_7, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + + # pd_op.squeeze: (-1x512x96xf32) <- (-1x512x1x96xf32, 1xi64) + squeeze_5 = paddle._C_ops.squeeze(conv2d_2, full_int_array_9) + + # pd_op.transpose: (-1x96x512xf32) <- (-1x512x96xf32) + transpose_1 = paddle._C_ops.transpose(squeeze_5, [0, 2, 1]) + del squeeze_5 + + # pd_op.matmul: (-1x96x512xf32) <- (-1x96x4xf32, 4x512xf32) + matmul_6 = paddle._C_ops.matmul(slice_0, parameter_36, False, False) + del parameter_36 + + # pd_op.add: (-1x96x512xf32) <- (-1x96x512xf32, -1x96x512xf32) + add_4 = paddle._C_ops.add(transpose_1, matmul_6) + + # pd_op.shape64: (3xi64) <- (-1x96x1xf32) + shape64_1 = paddle._C_ops.shape64(divide_2) + del divide_2 + + # pd_op.slice: (xi64) <- (3xi64, 1xi64, 1xi64) + slice_6 = paddle._C_ops.slice( + shape64_1, [0], full_int_array_0, full_int_array_5, [1], [0] + ) + del shape64_1 + + # pd_op.slice: (1x96x512xf32) <- (1x5000x512xf32, 1xi64, 1xi64) + slice_7 = paddle._C_ops.slice( + data_3, [1], full_int_array_0, full_int_array_1, [1], [] + ) + del data_3, full_int_array_1 + + # pd_op.add: (-1x96x512xf32) <- (-1x96x512xf32, 1x96x512xf32) + add_5 = paddle._C_ops.add(add_4, slice_7) + + # pd_op.full: (1xf32) <- () + full_6 = paddle._C_ops.full( + [1], float("0.05"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.assign: (1xf32) <- (1xf32) + assign_34 = full_6 + + # pd_op.assign: (1xf32) <- (1xf32) + assign_35 = full_6 + + # pd_op.assign: (1xf32) <- (1xf32) + assign_36 = full_6 + + # pd_op.assign: (1xf32) <- (1xf32) + assign_37 = full_6 + + # pd_op.assign: (1xf32) <- (1xf32) + assign_38 = full_6 + + # pd_op.assign: (1xf32) <- (1xf32) + assign_39 = full_6 + + # pd_op.assign: (1xf32) <- (1xf32) + assign_40 = full_6 + + # pd_op.assign: (1xf32) <- (1xf32) + assign_41 = full_6 + + # pd_op.assign: (1xf32) <- (1xf32) + assign_42 = full_6 + + # pd_op.dropout: (-1x96x512xf32, -1x96x512xui8) <- (-1x96x512xf32, None, 1xf32) + dropout_1, dropout_2 = (lambda x, f: f(x))( + paddle._C_ops.dropout( + add_5, None, full_6, False, "upscale_in_train", 0, False + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None), + ) + del add_5 + + # pd_op.shape64: (3xi64) <- (-1x96x512xf32) + shape64_2 = paddle._C_ops.shape64(dropout_1) + + # pd_op.slice: (xi64) <- (3xi64, 1xi64, 1xi64) + slice_8 = paddle._C_ops.slice( + shape64_2, [0], full_int_array_0, full_int_array_5, [1], [0] + ) + del shape64_2 + + # pd_op.matmul: (-1x96x512xf32) <- (-1x96x512xf32, 512x512xf32) + matmul_7 = paddle._C_ops.matmul(dropout_1, parameter_35, False, False) + del parameter_35 + + # pd_op.add: (-1x96x512xf32) <- (-1x96x512xf32, 512xf32) + add_6 = paddle._C_ops.add(matmul_7, parameter_34) + del parameter_34 + + # pd_op.full: (xi64) <- () + full_7 = paddle._C_ops.full( + [], float("8"), paddle.int64, paddle.core.CPUPlace() + ) + + # builtin.combine: ([xi64, xi64, xi64, xi64]) <- (xi64, xi64, xi64, xi64) + combine_7 = [slice_8, full_0, full_7, full_5] + + # pd_op.stack: (4xi64) <- ([xi64, xi64, xi64, xi64]) + stack_3 = paddle._C_ops.stack(combine_7, 0) + del combine_7 + + # pd_op.reshape: (-1x96x8x-1xf32) <- (-1x96x512xf32, 4xi64) + reshape_2 = paddle._C_ops.reshape(add_6, stack_3) + del stack_3 + + # pd_op.matmul: (-1x96x512xf32) <- (-1x96x512xf32, 512x512xf32) + matmul_8 = paddle._C_ops.matmul(dropout_1, parameter_33, False, False) + del parameter_33 + + # pd_op.add: (-1x96x512xf32) <- (-1x96x512xf32, 512xf32) + add_7 = paddle._C_ops.add(matmul_8, parameter_32) + del parameter_32 + + # builtin.combine: ([xi64, xi64, xi64, xi64]) <- (xi64, xi64, xi64, xi64) + combine_8 = [slice_8, full_0, full_7, full_5] + + # pd_op.stack: (4xi64) <- ([xi64, xi64, xi64, xi64]) + stack_4 = paddle._C_ops.stack(combine_8, 0) + del combine_8 + + # pd_op.reshape: (-1x96x8x-1xf32) <- (-1x96x512xf32, 4xi64) + reshape_3 = paddle._C_ops.reshape(add_7, stack_4) + del stack_4 + + # pd_op.matmul: (-1x96x512xf32) <- (-1x96x512xf32, 512x512xf32) + matmul_9 = paddle._C_ops.matmul(dropout_1, parameter_31, False, False) + del parameter_31 + + # pd_op.add: (-1x96x512xf32) <- (-1x96x512xf32, 512xf32) + add_8 = paddle._C_ops.add(matmul_9, parameter_30) + del parameter_30 + + # builtin.combine: ([xi64, xi64, xi64, xi64]) <- (xi64, xi64, xi64, xi64) + combine_9 = [slice_8, full_0, full_7, full_5] + + # pd_op.stack: (4xi64) <- ([xi64, xi64, xi64, xi64]) + stack_5 = paddle._C_ops.stack(combine_9, 0) + del combine_9 + + # pd_op.reshape: (-1x96x8x-1xf32) <- (-1x96x512xf32, 4xi64) + reshape_4 = paddle._C_ops.reshape(add_8, stack_5) + del stack_5 + + # pd_op.shape64: (4xi64) <- (-1x96x8x-1xf32) + shape64_3 = paddle._C_ops.shape64(reshape_2) + + # pd_op.slice: (xi64) <- (4xi64, 1xi64, 1xi64) + slice_9 = paddle._C_ops.slice( + shape64_3, [0], full_int_array_0, full_int_array_5, [1], [0] + ) + del shape64_3 + + # pd_op.shape64: (4xi64) <- (-1x96x8x-1xf32) + shape64_4 = paddle._C_ops.shape64(reshape_2) + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_10 = [3] + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_11 = [4] + + # pd_op.slice: (xi64) <- (4xi64, 1xi64, 1xi64) + slice_10 = paddle._C_ops.slice( + shape64_4, [0], full_int_array_10, full_int_array_11, [1], [0] + ) + del shape64_4 + + # pd_op.shape64: (4xi64) <- (-1x96x8x-1xf32) + shape64_5 = paddle._C_ops.shape64(reshape_4) + + # pd_op.slice: (xi64) <- (4xi64, 1xi64, 1xi64) + slice_11 = paddle._C_ops.slice( + shape64_5, [0], full_int_array_0, full_int_array_5, [1], [0] + ) + del shape64_5 + + # pd_op.shape64: (4xi64) <- (-1x96x8x-1xf32) + shape64_6 = paddle._C_ops.shape64(reshape_4) + + # pd_op.slice: (xi64) <- (4xi64, 1xi64, 1xi64) + slice_12 = paddle._C_ops.slice( + shape64_6, [0], full_int_array_10, full_int_array_11, [1], [0] + ) + del shape64_6 + + # pd_op.unsqueeze: (-1x1x1xf32) <- (-1x1xf32, 1xi64) + unsqueeze_9 = paddle._C_ops.unsqueeze(exp_0, full_int_array_5) + + # pd_op.unsqueeze: (-1x1x1x1xf32) <- (-1x1x1xf32, 1xi64) + unsqueeze_10 = paddle._C_ops.unsqueeze(unsqueeze_9, full_int_array_5) + + # pd_op.unsqueeze: (-1x1x96xf32) <- (-1x96xf32, 1xi64) + unsqueeze_11 = paddle._C_ops.unsqueeze(matmul_5, full_int_array_5) + + # pd_op.unsqueeze: (-1x1x1x96xf32) <- (-1x1x96xf32, 1xi64) + unsqueeze_12 = paddle._C_ops.unsqueeze(unsqueeze_11, full_int_array_5) + + # builtin.combine: ([-1x96x8x-1xf32, -1x96x8x-1xf32]) <- (-1x96x8x-1xf32, -1x96x8x-1xf32) + combine_10 = [reshape_2, reshape_3] + del reshape_2, reshape_3 + + # pd_op.einsum: (-1x8x96x96xf32, [0xf32, 0xf32], [-1x96x8x-1xf32, -1x96x8x-1xf32]) <- ([-1x96x8x-1xf32, -1x96x8x-1xf32]) + einsum_0, einsum_1, einsum_2 = (lambda x, f: f(x))( + paddle._C_ops.einsum(combine_10, "blhe,bshe->bhls"), + lambda out: out if isinstance(out, (list, tuple)) else (out, None, None), + ) + del combine_10 + + # builtin.split: (0xf32, 0xf32) <- ([0xf32, 0xf32]) + ( + split_0, + split_1, + ) = einsum_1 + del einsum_1 + + # builtin.split: (-1x96x8x-1xf32, -1x96x8x-1xf32) <- ([-1x96x8x-1xf32, -1x96x8x-1xf32]) + ( + split_2, + split_3, + ) = einsum_2 + del einsum_2 + + # pd_op.multiply: (-1x8x96x96xf32) <- (-1x8x96x96xf32, -1x1x1x1xf32) + multiply_0 = paddle._C_ops.multiply(einsum_0, unsqueeze_10) + + # pd_op.add: (-1x8x96x96xf32) <- (-1x8x96x96xf32, -1x1x1x96xf32) + add_9 = paddle._C_ops.add(multiply_0, unsqueeze_12) + + # pd_op.full: (1xf32) <- () + full_8 = paddle._C_ops.full( + [1], float("0.125"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.assign: (1xf32) <- (1xf32) + assign_43 = full_8 + + # pd_op.scale: (-1x8x96x96xf32) <- (-1x8x96x96xf32, 1xf32) + scale_1 = paddle._C_ops.scale(add_9, full_8, float("0"), True) + del add_9 + + # pd_op.softmax: (-1x8x96x96xf32) <- (-1x8x96x96xf32) + softmax_0 = paddle._C_ops.softmax(scale_1, -1) + del scale_1 + + # pd_op.dropout: (-1x8x96x96xf32, -1x8x96x96xui8) <- (-1x8x96x96xf32, None, 1xf32) + dropout_3, dropout_4 = (lambda x, f: f(x))( + paddle._C_ops.dropout( + softmax_0, None, full_6, False, "upscale_in_train", 0, False + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None), + ) + + # builtin.combine: ([-1x8x96x96xf32, -1x96x8x-1xf32]) <- (-1x8x96x96xf32, -1x96x8x-1xf32) + combine_11 = [dropout_3, reshape_4] + del dropout_3, reshape_4 + + # pd_op.einsum: (-1x96x8x-1xf32, [0xf32, 0xf32], [-1x8x96x96xf32, -1x96x8x-1xf32]) <- ([-1x8x96x96xf32, -1x96x8x-1xf32]) + einsum_3, einsum_4, einsum_5 = (lambda x, f: f(x))( + paddle._C_ops.einsum(combine_11, "bhls,bshd->blhd"), + lambda out: out if isinstance(out, (list, tuple)) else (out, None, None), + ) + del combine_11 + + # builtin.split: (0xf32, 0xf32) <- ([0xf32, 0xf32]) + ( + split_4, + split_5, + ) = einsum_4 + del einsum_4 + + # builtin.split: (-1x8x96x96xf32, -1x96x8x-1xf32) <- ([-1x8x96x96xf32, -1x96x8x-1xf32]) + ( + split_6, + split_7, + ) = einsum_5 + del einsum_5 + + # builtin.combine: ([xi64, xi64, xi64]) <- (xi64, xi64, xi64) + combine_12 = [slice_8, full_0, full_5] + del slice_8 + + # pd_op.stack: (3xi64) <- ([xi64, xi64, xi64]) + stack_6 = paddle._C_ops.stack(combine_12, 0) + del combine_12 + + # pd_op.reshape: (-1x96x-1xf32) <- (-1x96x8x-1xf32, 3xi64) + reshape_5 = paddle._C_ops.reshape(einsum_3, stack_6) + del stack_6 + + # pd_op.matmul: (-1x96x512xf32) <- (-1x96x-1xf32, 512x512xf32) + matmul_10 = paddle._C_ops.matmul(reshape_5, parameter_29, False, False) + del parameter_29 + + # pd_op.add: (-1x96x512xf32) <- (-1x96x512xf32, 512xf32) + add_10 = paddle._C_ops.add(matmul_10, parameter_28) + del parameter_28 + + # pd_op.dropout: (-1x96x512xf32, -1x96x512xui8) <- (-1x96x512xf32, None, 1xf32) + dropout_5, dropout_6 = (lambda x, f: f(x))( + paddle._C_ops.dropout( + add_10, None, full_6, False, "upscale_in_train", 0, False + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None), + ) + del add_10 + + # pd_op.add: (-1x96x512xf32) <- (-1x96x512xf32, -1x96x512xf32) + add_11 = paddle._C_ops.add(dropout_1, dropout_5) + + # pd_op.layer_norm: (-1x96x512xf32, -1x96xf32, -1x96xf32) <- (-1x96x512xf32, 512xf32, 512xf32) + layer_norm_1, layer_norm_2, layer_norm_3 = (lambda x, f: f(x))( + paddle._C_ops.layer_norm( + add_11, parameter_27, parameter_26, float("1e-05"), 2 + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None, None), + ) + del parameter_26, parameter_27 + + # pd_op.transpose: (-1x512x96xf32) <- (-1x96x512xf32) + transpose_2 = paddle._C_ops.transpose(layer_norm_1, [0, 2, 1]) + + # pd_op.assign: (2048x512x1xf32) <- (2048x512x1xf32) + assign_44 = parameter_25 + del parameter_25 + + # pd_op.unsqueeze: (2048x512x1x1xf32) <- (2048x512x1xf32, 1xi64) + unsqueeze_13 = paddle._C_ops.unsqueeze(assign_44, full_int_array_9) + + # pd_op.unsqueeze: (-1x512x1x96xf32) <- (-1x512x96xf32, 1xi64) + unsqueeze_14 = paddle._C_ops.unsqueeze(transpose_2, full_int_array_9) + + # pd_op.conv2d: (-1x2048x1x96xf32) <- (-1x512x1x96xf32, 2048x512x1x1xf32) + conv2d_3 = paddle._C_ops.conv2d( + unsqueeze_14, unsqueeze_13, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + + # pd_op.full_int_array: (4xi64) <- () + full_int_array_12 = [1, 2048, 1, 1] + + # pd_op.reshape: (1x2048x1x1xf32) <- (2048xf32, 4xi64) + reshape_6 = paddle._C_ops.reshape(parameter_24, full_int_array_12) + del parameter_24 + + # pd_op.add: (-1x2048x1x96xf32) <- (-1x2048x1x96xf32, 1x2048x1x1xf32) + add_12 = paddle._C_ops.add(conv2d_3, reshape_6) + + # pd_op.squeeze: (-1x2048x96xf32) <- (-1x2048x1x96xf32, 1xi64) + squeeze_6 = paddle._C_ops.squeeze(add_12, full_int_array_9) + + # pd_op.gelu: (-1x2048x96xf32) <- (-1x2048x96xf32) + gelu_0 = paddle._C_ops.gelu(squeeze_6, False) + + # pd_op.dropout: (-1x2048x96xf32, -1x2048x96xui8) <- (-1x2048x96xf32, None, 1xf32) + dropout_7, dropout_8 = (lambda x, f: f(x))( + paddle._C_ops.dropout( + gelu_0, None, full_6, False, "upscale_in_train", 0, False + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None), + ) + del gelu_0 + + # pd_op.assign: (512x2048x1xf32) <- (512x2048x1xf32) + assign_45 = parameter_23 + del parameter_23 + + # pd_op.unsqueeze: (512x2048x1x1xf32) <- (512x2048x1xf32, 1xi64) + unsqueeze_15 = paddle._C_ops.unsqueeze(assign_45, full_int_array_9) + + # pd_op.unsqueeze: (-1x2048x1x96xf32) <- (-1x2048x96xf32, 1xi64) + unsqueeze_16 = paddle._C_ops.unsqueeze(dropout_7, full_int_array_9) + + # pd_op.conv2d: (-1x512x1x96xf32) <- (-1x2048x1x96xf32, 512x2048x1x1xf32) + conv2d_4 = paddle._C_ops.conv2d( + unsqueeze_16, unsqueeze_15, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + + # pd_op.full_int_array: (4xi64) <- () + full_int_array_13 = [1, 512, 1, 1] + + # pd_op.reshape: (1x512x1x1xf32) <- (512xf32, 4xi64) + reshape_7 = paddle._C_ops.reshape(parameter_22, full_int_array_13) + del parameter_22 + + # pd_op.add: (-1x512x1x96xf32) <- (-1x512x1x96xf32, 1x512x1x1xf32) + add_13 = paddle._C_ops.add(conv2d_4, reshape_7) + + # pd_op.squeeze: (-1x512x96xf32) <- (-1x512x1x96xf32, 1xi64) + squeeze_7 = paddle._C_ops.squeeze(add_13, full_int_array_9) + + # pd_op.transpose: (-1x96x512xf32) <- (-1x512x96xf32) + transpose_3 = paddle._C_ops.transpose(squeeze_7, [0, 2, 1]) + del squeeze_7 + + # pd_op.dropout: (-1x96x512xf32, -1x96x512xui8) <- (-1x96x512xf32, None, 1xf32) + dropout_9, dropout_10 = (lambda x, f: f(x))( + paddle._C_ops.dropout( + transpose_3, None, full_6, False, "upscale_in_train", 0, False + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None), + ) + del transpose_3 + + # pd_op.add: (-1x96x512xf32) <- (-1x96x512xf32, -1x96x512xf32) + add_14 = paddle._C_ops.add(layer_norm_1, dropout_9) + + # pd_op.layer_norm: (-1x96x512xf32, -1x96xf32, -1x96xf32) <- (-1x96x512xf32, 512xf32, 512xf32) + layer_norm_4, layer_norm_5, layer_norm_6 = (lambda x, f: f(x))( + paddle._C_ops.layer_norm( + add_14, parameter_21, parameter_20, float("1e-05"), 2 + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None, None), + ) + del parameter_20, parameter_21 + + # pd_op.shape64: (3xi64) <- (-1x96x512xf32) + shape64_7 = paddle._C_ops.shape64(layer_norm_4) + + # pd_op.slice: (xi64) <- (3xi64, 1xi64, 1xi64) + slice_13 = paddle._C_ops.slice( + shape64_7, [0], full_int_array_0, full_int_array_5, [1], [0] + ) + del shape64_7 + + # pd_op.matmul: (-1x96x512xf32) <- (-1x96x512xf32, 512x512xf32) + matmul_11 = paddle._C_ops.matmul(layer_norm_4, parameter_19, False, False) + del parameter_19 + + # pd_op.add: (-1x96x512xf32) <- (-1x96x512xf32, 512xf32) + add_15 = paddle._C_ops.add(matmul_11, parameter_18) + del parameter_18 + + # builtin.combine: ([xi64, xi64, xi64, xi64]) <- (xi64, xi64, xi64, xi64) + combine_13 = [slice_13, full_0, full_7, full_5] + + # pd_op.stack: (4xi64) <- ([xi64, xi64, xi64, xi64]) + stack_7 = paddle._C_ops.stack(combine_13, 0) + del combine_13 + + # pd_op.reshape: (-1x96x8x-1xf32) <- (-1x96x512xf32, 4xi64) + reshape_8 = paddle._C_ops.reshape(add_15, stack_7) + del stack_7 + + # pd_op.matmul: (-1x96x512xf32) <- (-1x96x512xf32, 512x512xf32) + matmul_12 = paddle._C_ops.matmul(layer_norm_4, parameter_17, False, False) + del parameter_17 + + # pd_op.add: (-1x96x512xf32) <- (-1x96x512xf32, 512xf32) + add_16 = paddle._C_ops.add(matmul_12, parameter_16) + del parameter_16 + + # builtin.combine: ([xi64, xi64, xi64, xi64]) <- (xi64, xi64, xi64, xi64) + combine_14 = [slice_13, full_0, full_7, full_5] + + # pd_op.stack: (4xi64) <- ([xi64, xi64, xi64, xi64]) + stack_8 = paddle._C_ops.stack(combine_14, 0) + del combine_14 + + # pd_op.reshape: (-1x96x8x-1xf32) <- (-1x96x512xf32, 4xi64) + reshape_9 = paddle._C_ops.reshape(add_16, stack_8) + del stack_8 + + # pd_op.matmul: (-1x96x512xf32) <- (-1x96x512xf32, 512x512xf32) + matmul_13 = paddle._C_ops.matmul(layer_norm_4, parameter_15, False, False) + del parameter_15 + + # pd_op.add: (-1x96x512xf32) <- (-1x96x512xf32, 512xf32) + add_17 = paddle._C_ops.add(matmul_13, parameter_14) + del parameter_14 + + # builtin.combine: ([xi64, xi64, xi64, xi64]) <- (xi64, xi64, xi64, xi64) + combine_15 = [slice_13, full_0, full_7, full_5] + del full_7 + + # pd_op.stack: (4xi64) <- ([xi64, xi64, xi64, xi64]) + stack_9 = paddle._C_ops.stack(combine_15, 0) + del combine_15 + + # pd_op.reshape: (-1x96x8x-1xf32) <- (-1x96x512xf32, 4xi64) + reshape_10 = paddle._C_ops.reshape(add_17, stack_9) + del stack_9 + + # pd_op.shape64: (4xi64) <- (-1x96x8x-1xf32) + shape64_8 = paddle._C_ops.shape64(reshape_8) + + # pd_op.slice: (xi64) <- (4xi64, 1xi64, 1xi64) + slice_14 = paddle._C_ops.slice( + shape64_8, [0], full_int_array_0, full_int_array_5, [1], [0] + ) + del shape64_8 + + # pd_op.shape64: (4xi64) <- (-1x96x8x-1xf32) + shape64_9 = paddle._C_ops.shape64(reshape_8) + + # pd_op.slice: (xi64) <- (4xi64, 1xi64, 1xi64) + slice_15 = paddle._C_ops.slice( + shape64_9, [0], full_int_array_10, full_int_array_11, [1], [0] + ) + del shape64_9 + + # pd_op.shape64: (4xi64) <- (-1x96x8x-1xf32) + shape64_10 = paddle._C_ops.shape64(reshape_10) + + # pd_op.slice: (xi64) <- (4xi64, 1xi64, 1xi64) + slice_16 = paddle._C_ops.slice( + shape64_10, [0], full_int_array_0, full_int_array_5, [1], [0] + ) + del shape64_10 + + # pd_op.shape64: (4xi64) <- (-1x96x8x-1xf32) + shape64_11 = paddle._C_ops.shape64(reshape_10) + + # pd_op.slice: (xi64) <- (4xi64, 1xi64, 1xi64) + slice_17 = paddle._C_ops.slice( + shape64_11, [0], full_int_array_10, full_int_array_11, [1], [0] + ) + del full_int_array_10, full_int_array_11, shape64_11 + + # pd_op.unsqueeze: (-1x1x1xf32) <- (-1x1xf32, 1xi64) + unsqueeze_17 = paddle._C_ops.unsqueeze(exp_0, full_int_array_5) + + # pd_op.unsqueeze: (-1x1x1x1xf32) <- (-1x1x1xf32, 1xi64) + unsqueeze_18 = paddle._C_ops.unsqueeze(unsqueeze_17, full_int_array_5) + + # pd_op.unsqueeze: (-1x1x96xf32) <- (-1x96xf32, 1xi64) + unsqueeze_19 = paddle._C_ops.unsqueeze(matmul_5, full_int_array_5) + + # pd_op.unsqueeze: (-1x1x1x96xf32) <- (-1x1x96xf32, 1xi64) + unsqueeze_20 = paddle._C_ops.unsqueeze(unsqueeze_19, full_int_array_5) + + # builtin.combine: ([-1x96x8x-1xf32, -1x96x8x-1xf32]) <- (-1x96x8x-1xf32, -1x96x8x-1xf32) + combine_16 = [reshape_8, reshape_9] + del reshape_8, reshape_9 + + # pd_op.einsum: (-1x8x96x96xf32, [0xf32, 0xf32], [-1x96x8x-1xf32, -1x96x8x-1xf32]) <- ([-1x96x8x-1xf32, -1x96x8x-1xf32]) + einsum_6, einsum_7, einsum_8 = (lambda x, f: f(x))( + paddle._C_ops.einsum(combine_16, "blhe,bshe->bhls"), + lambda out: out if isinstance(out, (list, tuple)) else (out, None, None), + ) + del combine_16 + + # builtin.split: (0xf32, 0xf32) <- ([0xf32, 0xf32]) + ( + split_8, + split_9, + ) = einsum_7 + del einsum_7 + + # builtin.split: (-1x96x8x-1xf32, -1x96x8x-1xf32) <- ([-1x96x8x-1xf32, -1x96x8x-1xf32]) + ( + split_10, + split_11, + ) = einsum_8 + del einsum_8 + + # pd_op.multiply: (-1x8x96x96xf32) <- (-1x8x96x96xf32, -1x1x1x1xf32) + multiply_1 = paddle._C_ops.multiply(einsum_6, unsqueeze_18) + + # pd_op.add: (-1x8x96x96xf32) <- (-1x8x96x96xf32, -1x1x1x96xf32) + add_18 = paddle._C_ops.add(multiply_1, unsqueeze_20) + + # pd_op.scale: (-1x8x96x96xf32) <- (-1x8x96x96xf32, 1xf32) + scale_2 = paddle._C_ops.scale(add_18, full_8, float("0"), True) + del add_18 + + # pd_op.softmax: (-1x8x96x96xf32) <- (-1x8x96x96xf32) + softmax_1 = paddle._C_ops.softmax(scale_2, -1) + del scale_2 + + # pd_op.dropout: (-1x8x96x96xf32, -1x8x96x96xui8) <- (-1x8x96x96xf32, None, 1xf32) + dropout_11, dropout_12 = (lambda x, f: f(x))( + paddle._C_ops.dropout( + softmax_1, None, full_6, False, "upscale_in_train", 0, False + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None), + ) + + # builtin.combine: ([-1x8x96x96xf32, -1x96x8x-1xf32]) <- (-1x8x96x96xf32, -1x96x8x-1xf32) + combine_17 = [dropout_11, reshape_10] + del dropout_11, reshape_10 + + # pd_op.einsum: (-1x96x8x-1xf32, [0xf32, 0xf32], [-1x8x96x96xf32, -1x96x8x-1xf32]) <- ([-1x8x96x96xf32, -1x96x8x-1xf32]) + einsum_9, einsum_10, einsum_11 = (lambda x, f: f(x))( + paddle._C_ops.einsum(combine_17, "bhls,bshd->blhd"), + lambda out: out if isinstance(out, (list, tuple)) else (out, None, None), + ) + del combine_17 + + # builtin.split: (0xf32, 0xf32) <- ([0xf32, 0xf32]) + ( + split_12, + split_13, + ) = einsum_10 + del einsum_10 + + # builtin.split: (-1x8x96x96xf32, -1x96x8x-1xf32) <- ([-1x8x96x96xf32, -1x96x8x-1xf32]) + ( + split_14, + split_15, + ) = einsum_11 + del einsum_11 + + # builtin.combine: ([xi64, xi64, xi64]) <- (xi64, xi64, xi64) + combine_18 = [slice_13, full_0, full_5] + del full_0, full_5, slice_13 + + # pd_op.stack: (3xi64) <- ([xi64, xi64, xi64]) + stack_10 = paddle._C_ops.stack(combine_18, 0) + del combine_18 + + # pd_op.reshape: (-1x96x-1xf32) <- (-1x96x8x-1xf32, 3xi64) + reshape_11 = paddle._C_ops.reshape(einsum_9, stack_10) + del stack_10 + + # pd_op.matmul: (-1x96x512xf32) <- (-1x96x-1xf32, 512x512xf32) + matmul_14 = paddle._C_ops.matmul(reshape_11, parameter_13, False, False) + del parameter_13 + + # pd_op.add: (-1x96x512xf32) <- (-1x96x512xf32, 512xf32) + add_19 = paddle._C_ops.add(matmul_14, parameter_12) + del parameter_12 + + # pd_op.dropout: (-1x96x512xf32, -1x96x512xui8) <- (-1x96x512xf32, None, 1xf32) + dropout_13, dropout_14 = (lambda x, f: f(x))( + paddle._C_ops.dropout( + add_19, None, full_6, False, "upscale_in_train", 0, False + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None), + ) + del add_19 + + # pd_op.add: (-1x96x512xf32) <- (-1x96x512xf32, -1x96x512xf32) + add_20 = paddle._C_ops.add(layer_norm_4, dropout_13) + + # pd_op.layer_norm: (-1x96x512xf32, -1x96xf32, -1x96xf32) <- (-1x96x512xf32, 512xf32, 512xf32) + layer_norm_7, layer_norm_8, layer_norm_9 = (lambda x, f: f(x))( + paddle._C_ops.layer_norm( + add_20, parameter_11, parameter_10, float("1e-05"), 2 + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None, None), + ) + del parameter_10, parameter_11 + + # pd_op.transpose: (-1x512x96xf32) <- (-1x96x512xf32) + transpose_4 = paddle._C_ops.transpose(layer_norm_7, [0, 2, 1]) + + # pd_op.assign: (2048x512x1xf32) <- (2048x512x1xf32) + assign_46 = parameter_9 + del parameter_9 + + # pd_op.unsqueeze: (2048x512x1x1xf32) <- (2048x512x1xf32, 1xi64) + unsqueeze_21 = paddle._C_ops.unsqueeze(assign_46, full_int_array_9) + + # pd_op.unsqueeze: (-1x512x1x96xf32) <- (-1x512x96xf32, 1xi64) + unsqueeze_22 = paddle._C_ops.unsqueeze(transpose_4, full_int_array_9) + + # pd_op.conv2d: (-1x2048x1x96xf32) <- (-1x512x1x96xf32, 2048x512x1x1xf32) + conv2d_5 = paddle._C_ops.conv2d( + unsqueeze_22, unsqueeze_21, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + + # pd_op.reshape: (1x2048x1x1xf32) <- (2048xf32, 4xi64) + reshape_12 = paddle._C_ops.reshape(parameter_8, full_int_array_12) + del full_int_array_12, parameter_8 + + # pd_op.add: (-1x2048x1x96xf32) <- (-1x2048x1x96xf32, 1x2048x1x1xf32) + add_21 = paddle._C_ops.add(conv2d_5, reshape_12) + + # pd_op.squeeze: (-1x2048x96xf32) <- (-1x2048x1x96xf32, 1xi64) + squeeze_8 = paddle._C_ops.squeeze(add_21, full_int_array_9) + + # pd_op.gelu: (-1x2048x96xf32) <- (-1x2048x96xf32) + gelu_1 = paddle._C_ops.gelu(squeeze_8, False) + + # pd_op.dropout: (-1x2048x96xf32, -1x2048x96xui8) <- (-1x2048x96xf32, None, 1xf32) + dropout_15, dropout_16 = (lambda x, f: f(x))( + paddle._C_ops.dropout( + gelu_1, None, full_6, False, "upscale_in_train", 0, False + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None), + ) + del gelu_1 + + # pd_op.assign: (512x2048x1xf32) <- (512x2048x1xf32) + assign_47 = parameter_7 + del parameter_7 + + # pd_op.unsqueeze: (512x2048x1x1xf32) <- (512x2048x1xf32, 1xi64) + unsqueeze_23 = paddle._C_ops.unsqueeze(assign_47, full_int_array_9) + + # pd_op.unsqueeze: (-1x2048x1x96xf32) <- (-1x2048x96xf32, 1xi64) + unsqueeze_24 = paddle._C_ops.unsqueeze(dropout_15, full_int_array_9) + + # pd_op.conv2d: (-1x512x1x96xf32) <- (-1x2048x1x96xf32, 512x2048x1x1xf32) + conv2d_6 = paddle._C_ops.conv2d( + unsqueeze_24, unsqueeze_23, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + + # pd_op.reshape: (1x512x1x1xf32) <- (512xf32, 4xi64) + reshape_13 = paddle._C_ops.reshape(parameter_6, full_int_array_13) + del full_int_array_13, parameter_6 + + # pd_op.add: (-1x512x1x96xf32) <- (-1x512x1x96xf32, 1x512x1x1xf32) + add_22 = paddle._C_ops.add(conv2d_6, reshape_13) + + # pd_op.squeeze: (-1x512x96xf32) <- (-1x512x1x96xf32, 1xi64) + squeeze_9 = paddle._C_ops.squeeze(add_22, full_int_array_9) + + # pd_op.transpose: (-1x96x512xf32) <- (-1x512x96xf32) + transpose_5 = paddle._C_ops.transpose(squeeze_9, [0, 2, 1]) + del squeeze_9 + + # pd_op.dropout: (-1x96x512xf32, -1x96x512xui8) <- (-1x96x512xf32, None, 1xf32) + dropout_17, dropout_18 = (lambda x, f: f(x))( + paddle._C_ops.dropout( + transpose_5, None, full_6, False, "upscale_in_train", 0, False + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None), + ) + del transpose_5 + + # pd_op.add: (-1x96x512xf32) <- (-1x96x512xf32, -1x96x512xf32) + add_23 = paddle._C_ops.add(layer_norm_7, dropout_17) + + # pd_op.layer_norm: (-1x96x512xf32, -1x96xf32, -1x96xf32) <- (-1x96x512xf32, 512xf32, 512xf32) + layer_norm_10, layer_norm_11, layer_norm_12 = (lambda x, f: f(x))( + paddle._C_ops.layer_norm( + add_23, parameter_5, parameter_4, float("1e-05"), 2 + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None, None), + ) + del parameter_4, parameter_5 + + # pd_op.layer_norm: (-1x96x512xf32, -1x96xf32, -1x96xf32) <- (-1x96x512xf32, 512xf32, 512xf32) + layer_norm_0, layer_norm_13, layer_norm_14 = (lambda x, f: f(x))( + paddle._C_ops.layer_norm( + layer_norm_10, parameter_3, parameter_2, float("1e-05"), 2 + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None, None), + ) + del parameter_2, parameter_3 + + # pd_op.transpose: (-1x1x144xf32) <- (-1x144x1xf32) + transpose_6 = paddle._C_ops.transpose(assign_11, [0, 2, 1]) + + # pd_op.unsqueeze: (-1x1x144x1x1xf32) <- (-1x1x144xf32, 2xi64) + unsqueeze_25 = paddle._C_ops.unsqueeze(transpose_6, full_int_array_7) + del transpose_6 + + # pd_op.pad3d: (-1x1x146x1x1xf32) <- (-1x1x144x1x1xf32, 6xi64) + pad3d_3 = paddle._C_ops.pad3d( + unsqueeze_25, full_int_array_8, "circular", float("0"), "NCDHW" + ) + del full_int_array_8, unsqueeze_25 + + # pd_op.squeeze: (-1x1x146xf32) <- (-1x1x146x1x1xf32, 2xi64) + squeeze_10 = paddle._C_ops.squeeze(pad3d_3, full_int_array_7) + del full_int_array_7, pad3d_3 + + # pd_op.assign: (512x1x3xf32) <- (512x1x3xf32) + assign_48 = parameter_1 + del parameter_1 + + # pd_op.unsqueeze: (512x1x1x3xf32) <- (512x1x3xf32, 1xi64) + unsqueeze_26 = paddle._C_ops.unsqueeze(assign_48, full_int_array_9) + + # pd_op.unsqueeze: (-1x1x1x146xf32) <- (-1x1x146xf32, 1xi64) + unsqueeze_27 = paddle._C_ops.unsqueeze(squeeze_10, full_int_array_9) + del squeeze_10 + + # pd_op.conv2d: (-1x512x1x144xf32) <- (-1x1x1x146xf32, 512x1x1x3xf32) + conv2d_7 = paddle._C_ops.conv2d( + unsqueeze_27, unsqueeze_26, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + + # pd_op.squeeze: (-1x512x144xf32) <- (-1x512x1x144xf32, 1xi64) + squeeze_11 = paddle._C_ops.squeeze(conv2d_7, full_int_array_9) + + # pd_op.transpose: (-1x144x512xf32) <- (-1x512x144xf32) + transpose_7 = paddle._C_ops.transpose(squeeze_11, [0, 2, 1]) + del squeeze_11 + + # pd_op.matmul: (-1x144x512xf32) <- (-1x144x4xf32, 4x512xf32) + matmul_15 = paddle._C_ops.matmul(slice_2, parameter_0, False, False) + del parameter_0 + + # pd_op.add: (-1x144x512xf32) <- (-1x144x512xf32, -1x144x512xf32) + add_24 = paddle._C_ops.add(transpose_7, matmul_15) + + # pd_op.shape64: (3xi64) <- (-1x144x1xf32) + shape64_12 = paddle._C_ops.shape64(assign_11) + del assign_11 + + # pd_op.slice: (xi64) <- (3xi64, 1xi64, 1xi64) + slice_18 = paddle._C_ops.slice( + shape64_12, [0], full_int_array_0, full_int_array_5, [1], [0] + ) + del full_int_array_5, shape64_12 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_14 = [144] + + # pd_op.slice: (1x144x512xf32) <- (1x5000x512xf32, 1xi64, 1xi64) + slice_19 = paddle._C_ops.slice( + data_4, [1], full_int_array_0, full_int_array_14, [1], [] + ) + del data_4, full_int_array_0, full_int_array_14 + + # pd_op.add: (-1x144x512xf32) <- (-1x144x512xf32, 1x144x512xf32) + add_25 = paddle._C_ops.add(add_24, slice_19) + + # pd_op.dropout: (-1x144x512xf32, -1x144x512xui8) <- (-1x144x512xf32, None, 1xf32) + dropout_0, dropout_19 = (lambda x, f: f(x))( + paddle._C_ops.dropout( + add_25, None, full_6, False, "upscale_in_train", 0, False + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None), + ) + del ( + add_11, + add_12, + add_13, + add_14, + add_15, + add_16, + add_17, + add_20, + add_21, + add_22, + add_23, + add_24, + add_25, + add_4, + add_6, + add_7, + add_8, + assign_0, + assign_1, + assign_10, + assign_12, + assign_13, + assign_14, + assign_15, + assign_16, + assign_17, + assign_18, + assign_19, + assign_20, + assign_21, + assign_22, + assign_23, + assign_24, + assign_25, + assign_26, + assign_27, + assign_28, + assign_29, + assign_3, + assign_30, + assign_31, + assign_32, + assign_33, + assign_34, + assign_35, + assign_36, + assign_37, + assign_38, + assign_39, + assign_4, + assign_40, + assign_41, + assign_42, + assign_43, + assign_44, + assign_45, + assign_46, + assign_47, + assign_48, + assign_5, + assign_6, + assign_7, + assign_8, + assign_9, + concat_2, + concat_3, + conv2d_0, + conv2d_1, + conv2d_2, + conv2d_3, + conv2d_4, + conv2d_5, + conv2d_6, + conv2d_7, + dropout_1, + dropout_10, + dropout_12, + dropout_13, + dropout_14, + dropout_15, + dropout_16, + dropout_17, + dropout_18, + dropout_2, + dropout_4, + dropout_5, + dropout_6, + dropout_7, + dropout_8, + dropout_9, + einsum_0, + einsum_3, + einsum_6, + einsum_9, + exp_0, + full_6, + full_8, + full_int_array_9, + layer_norm_1, + layer_norm_10, + layer_norm_11, + layer_norm_12, + layer_norm_13, + layer_norm_14, + layer_norm_2, + layer_norm_3, + layer_norm_4, + layer_norm_5, + layer_norm_6, + layer_norm_7, + layer_norm_8, + layer_norm_9, + matmul_0, + matmul_1, + matmul_10, + matmul_11, + matmul_12, + matmul_13, + matmul_14, + matmul_15, + matmul_3, + matmul_4, + matmul_5, + matmul_6, + matmul_7, + matmul_8, + matmul_9, + multiply_0, + multiply_1, + relu_0, + relu_1, + relu_2, + relu_3, + reshape_0, + reshape_1, + reshape_11, + reshape_12, + reshape_13, + reshape_5, + reshape_6, + reshape_7, + share_data__1, + share_data__2, + slice_0, + slice_19, + slice_2, + slice_7, + softmax_0, + softmax_1, + squeeze_1, + squeeze_3, + squeeze_6, + squeeze_8, + transpose_1, + transpose_2, + transpose_4, + transpose_7, + unsqueeze_1, + unsqueeze_10, + unsqueeze_11, + unsqueeze_12, + unsqueeze_13, + unsqueeze_14, + unsqueeze_15, + unsqueeze_16, + unsqueeze_17, + unsqueeze_18, + unsqueeze_19, + unsqueeze_2, + unsqueeze_20, + unsqueeze_21, + unsqueeze_22, + unsqueeze_23, + unsqueeze_24, + unsqueeze_26, + unsqueeze_27, + unsqueeze_4, + unsqueeze_5, + unsqueeze_7, + unsqueeze_8, + unsqueeze_9, + ) + + return ( + split_0, + split_1, + split_2, + split_3, + split_4, + split_5, + split_6, + split_7, + split_8, + split_9, + split_10, + split_11, + split_12, + split_13, + split_14, + split_15, + dropout_0, + layer_norm_0, + ) diff --git a/paddle_samples/PaddleX/Nonstationary/subgraph_10/weight_meta.py b/paddle_samples/PaddleX/Nonstationary/subgraph_10/weight_meta.py new file mode 100644 index 000000000..622407ca0 --- /dev/null +++ b/paddle_samples/PaddleX/Nonstationary/subgraph_10/weight_meta.py @@ -0,0 +1,548 @@ +class Program_weight_tensor_parameter_0: + name = "parameter_0" + shape = [4, 512] + dtype = "float32" + min_val = float("-0.516648") + max_val = float("0.532824") + mean = float("0.0111749") + std = float("0.29202") + data = None + + +class Program_weight_tensor_parameter_1: + name = "parameter_1" + shape = [512, 1, 3] + dtype = "float32" + min_val = float("-2.48717") + max_val = float("2.78685") + mean = float("0.00692592") + std = float("0.834177") + data = None + + +class Program_weight_tensor_parameter_2: + name = "parameter_2" + shape = [512] + dtype = "float32" + min_val = float("-0.00299459") + max_val = float("0.0031647") + mean = float("5.9072e-05") + std = float("0.000919968") + data = None + + +class Program_weight_tensor_parameter_3: + name = "parameter_3" + shape = [512] + dtype = "float32" + min_val = float("0.990188") + max_val = float("1.00172") + mean = float("0.996878") + std = float("0.00200131") + data = None + + +class Program_weight_tensor_parameter_4: + name = "parameter_4" + shape = [512] + dtype = "float32" + min_val = float("-0.00234111") + max_val = float("0.00162345") + mean = float("2.57671e-06") + std = float("0.000503227") + data = None + + +class Program_weight_tensor_parameter_5: + name = "parameter_5" + shape = [512] + dtype = "float32" + min_val = float("0.992512") + max_val = float("1.00992") + mean = float("0.99987") + std = float("0.00231439") + data = None + + +class Program_weight_tensor_parameter_6: + name = "parameter_6" + shape = [512] + dtype = "float32" + min_val = float("-0.0022546") + max_val = float("0.0015276") + mean = float("5.94719e-06") + std = float("0.000459151") + data = None + + +class Program_weight_tensor_parameter_7: + name = "parameter_7" + shape = [512, 2048, 1] + dtype = "float32" + min_val = float("-0.142078") + max_val = float("0.164992") + mean = float("7.18134e-05") + std = float("0.0313107") + data = None + + +class Program_weight_tensor_parameter_8: + name = "parameter_8" + shape = [2048] + dtype = "float32" + min_val = float("-0.00586633") + max_val = float("0.00310216") + mean = float("-0.000668817") + std = float("0.00102179") + data = None + + +class Program_weight_tensor_parameter_9: + name = "parameter_9" + shape = [2048, 512, 1] + dtype = "float32" + min_val = float("-0.289242") + max_val = float("0.306056") + mean = float("5.57143e-05") + std = float("0.0625597") + data = None + + +class Program_weight_tensor_parameter_10: + name = "parameter_10" + shape = [512] + dtype = "float32" + min_val = float("-0.00225662") + max_val = float("0.00291557") + mean = float("-1.14119e-05") + std = float("0.000577578") + data = None + + +class Program_weight_tensor_parameter_11: + name = "parameter_11" + shape = [512] + dtype = "float32" + min_val = float("0.99061") + max_val = float("1.00831") + mean = float("0.999616") + std = float("0.00193167") + data = None + + +class Program_weight_tensor_parameter_12: + name = "parameter_12" + shape = [512] + dtype = "float32" + min_val = float("-0.0435265") + max_val = float("0.0449864") + mean = float("0.000492803") + std = float("0.0249498") + data = None + + +class Program_weight_tensor_parameter_13: + name = "parameter_13" + shape = [512, 512] + dtype = "float32" + min_val = float("-0.054725") + max_val = float("0.0524777") + mean = float("9.68339e-05") + std = float("0.0255546") + data = None + + +class Program_weight_tensor_parameter_14: + name = "parameter_14" + shape = [512] + dtype = "float32" + min_val = float("-0.0448786") + max_val = float("0.044449") + mean = float("-0.000110036") + std = float("0.026214") + data = None + + +class Program_weight_tensor_parameter_15: + name = "parameter_15" + shape = [512, 512] + dtype = "float32" + min_val = float("-0.0552115") + max_val = float("0.0546495") + mean = float("-5.58243e-05") + std = float("0.0255326") + data = None + + +class Program_weight_tensor_parameter_16: + name = "parameter_16" + shape = [512] + dtype = "float32" + min_val = float("-0.0437686") + max_val = float("0.0437947") + mean = float("0.000960962") + std = float("0.0258335") + data = None + + +class Program_weight_tensor_parameter_17: + name = "parameter_17" + shape = [512, 512] + dtype = "float32" + min_val = float("-0.0580188") + max_val = float("0.0566592") + mean = float("4.22353e-05") + std = float("0.0256431") + data = None + + +class Program_weight_tensor_parameter_18: + name = "parameter_18" + shape = [512] + dtype = "float32" + min_val = float("-0.0439347") + max_val = float("0.0461479") + mean = float("0.00087395") + std = float("0.0248537") + data = None + + +class Program_weight_tensor_parameter_19: + name = "parameter_19" + shape = [512, 512] + dtype = "float32" + min_val = float("-0.0580408") + max_val = float("0.0555589") + mean = float("4.2071e-05") + std = float("0.0255645") + data = None + + +class Program_weight_tensor_parameter_20: + name = "parameter_20" + shape = [512] + dtype = "float32" + min_val = float("-0.00212544") + max_val = float("0.00177377") + mean = float("-1.2209e-05") + std = float("0.000639135") + data = None + + +class Program_weight_tensor_parameter_21: + name = "parameter_21" + shape = [512] + dtype = "float32" + min_val = float("0.989608") + max_val = float("1.00637") + mean = float("0.999456") + std = float("0.00193493") + data = None + + +class Program_weight_tensor_parameter_22: + name = "parameter_22" + shape = [512] + dtype = "float32" + min_val = float("-0.00190863") + max_val = float("0.00169045") + mean = float("1.59626e-06") + std = float("0.000560233") + data = None + + +class Program_weight_tensor_parameter_23: + name = "parameter_23" + shape = [512, 2048, 1] + dtype = "float32" + min_val = float("-0.159284") + max_val = float("0.154599") + mean = float("2.75316e-05") + std = float("0.0313226") + data = None + + +class Program_weight_tensor_parameter_24: + name = "parameter_24" + shape = [2048] + dtype = "float32" + min_val = float("-0.00493774") + max_val = float("0.00509812") + mean = float("-0.00077856") + std = float("0.00109339") + data = None + + +class Program_weight_tensor_parameter_25: + name = "parameter_25" + shape = [2048, 512, 1] + dtype = "float32" + min_val = float("-0.28545") + max_val = float("0.291971") + mean = float("-2.64044e-05") + std = float("0.0625971") + data = None + + +class Program_weight_tensor_parameter_26: + name = "parameter_26" + shape = [512] + dtype = "float32" + min_val = float("-0.00392048") + max_val = float("0.00438787") + mean = float("-1.01022e-05") + std = float("0.001343") + data = None + + +class Program_weight_tensor_parameter_27: + name = "parameter_27" + shape = [512] + dtype = "float32" + min_val = float("0.992844") + max_val = float("1.01643") + mean = float("0.999781") + std = float("0.00260062") + data = None + + +class Program_weight_tensor_parameter_28: + name = "parameter_28" + shape = [512] + dtype = "float32" + min_val = float("-0.0453684") + max_val = float("0.0462603") + mean = float("-0.000822091") + std = float("0.0253203") + data = None + + +class Program_weight_tensor_parameter_29: + name = "parameter_29" + shape = [512, 512] + dtype = "float32" + min_val = float("-0.0537899") + max_val = float("0.0554069") + mean = float("3.93107e-05") + std = float("0.0255622") + data = None + + +class Program_weight_tensor_parameter_30: + name = "parameter_30" + shape = [512] + dtype = "float32" + min_val = float("-0.0444344") + max_val = float("0.0446174") + mean = float("-0.000132327") + std = float("0.0250294") + data = None + + +class Program_weight_tensor_parameter_31: + name = "parameter_31" + shape = [512, 512] + dtype = "float32" + min_val = float("-0.0655739") + max_val = float("0.0609998") + mean = float("2.71429e-06") + std = float("0.0256491") + data = None + + +class Program_weight_tensor_parameter_32: + name = "parameter_32" + shape = [512] + dtype = "float32" + min_val = float("-0.044085") + max_val = float("0.0441156") + mean = float("0.00325804") + std = float("0.0251241") + data = None + + +class Program_weight_tensor_parameter_33: + name = "parameter_33" + shape = [512, 512] + dtype = "float32" + min_val = float("-0.0670256") + max_val = float("0.0662077") + mean = float("-2.15194e-05") + std = float("0.0256724") + data = None + + +class Program_weight_tensor_parameter_34: + name = "parameter_34" + shape = [512] + dtype = "float32" + min_val = float("-0.0448112") + max_val = float("0.0449485") + mean = float("-0.00040531") + std = float("0.0248306") + data = None + + +class Program_weight_tensor_parameter_35: + name = "parameter_35" + shape = [512, 512] + dtype = "float32" + min_val = float("-0.0651913") + max_val = float("0.0656894") + mean = float("1.28041e-05") + std = float("0.0257543") + data = None + + +class Program_weight_tensor_parameter_36: + name = "parameter_36" + shape = [4, 512] + dtype = "float32" + min_val = float("-0.518061") + max_val = float("0.518531") + mean = float("0.00107684") + std = float("0.291142") + data = None + + +class Program_weight_tensor_parameter_37: + name = "parameter_37" + shape = [512, 1, 3] + dtype = "float32" + min_val = float("-2.59087") + max_val = float("2.88376") + mean = float("0.0227843") + std = float("0.820644") + data = None + + +class Program_weight_tensor_parameter_38: + name = "parameter_38" + shape = [256, 96] + dtype = "float32" + min_val = float("-0.0788252") + max_val = float("0.0977688") + mean = float("-0.000558684") + std = float("0.0369141") + data = None + + +class Program_weight_tensor_parameter_39: + name = "parameter_39" + shape = [256] + dtype = "float32" + min_val = float("-0.0644514") + max_val = float("0.0700591") + mean = float("0.00538818") + std = float("0.0368006") + data = None + + +class Program_weight_tensor_parameter_40: + name = "parameter_40" + shape = [256, 256] + dtype = "float32" + min_val = float("-0.0735758") + max_val = float("0.0849012") + mean = float("0.00350236") + std = float("0.036879") + data = None + + +class Program_weight_tensor_parameter_41: + name = "parameter_41" + shape = [256] + dtype = "float32" + min_val = float("-0.705481") + max_val = float("0.706814") + mean = float("-0.010413") + std = float("0.414562") + data = None + + +class Program_weight_tensor_parameter_42: + name = "parameter_42" + shape = [2, 256] + dtype = "float32" + min_val = float("-0.718397") + max_val = float("0.712294") + mean = float("0.00169281") + std = float("0.428688") + data = None + + +class Program_weight_tensor_parameter_43: + name = "parameter_43" + shape = [1, 96, 3] + dtype = "float32" + min_val = float("-0.258786") + max_val = float("0.248498") + mean = float("0.0242957") + std = float("0.0870572") + data = None + + +class Program_weight_tensor_parameter_44: + name = "parameter_44" + shape = [256, 1] + dtype = "float32" + min_val = float("-0.0631182") + max_val = float("0.0632669") + mean = float("0.00105919") + std = float("0.032167") + data = None + + +class Program_weight_tensor_parameter_45: + name = "parameter_45" + shape = [256] + dtype = "float32" + min_val = float("-0.06438") + max_val = float("0.0656383") + mean = float("-0.000643224") + std = float("0.0376659") + data = None + + +class Program_weight_tensor_parameter_46: + name = "parameter_46" + shape = [256, 256] + dtype = "float32" + min_val = float("-0.0911308") + max_val = float("0.0913062") + mean = float("8.11175e-05") + std = float("0.036396") + data = None + + +class Program_weight_tensor_parameter_47: + name = "parameter_47" + shape = [256] + dtype = "float32" + min_val = float("-0.703553") + max_val = float("0.707552") + mean = float("0.0166094") + std = float("0.417781") + data = None + + +class Program_weight_tensor_parameter_48: + name = "parameter_48" + shape = [2, 256] + dtype = "float32" + min_val = float("-0.712464") + max_val = float("0.708446") + mean = float("-0.00239975") + std = float("0.402917") + data = None + + +class Program_weight_tensor_parameter_49: + name = "parameter_49" + shape = [1, 96, 3] + dtype = "float32" + min_val = float("-0.24203") + max_val = float("0.243139") + mean = float("0.0111164") + std = float("0.08167") + data = None diff --git a/paddle_samples/PaddleX/Nonstationary/subgraph_11/graph_hash.txt b/paddle_samples/PaddleX/Nonstationary/subgraph_11/graph_hash.txt new file mode 100644 index 000000000..ff06dca70 --- /dev/null +++ b/paddle_samples/PaddleX/Nonstationary/subgraph_11/graph_hash.txt @@ -0,0 +1 @@ +f42f153332a0a4876d2eb1705722de3e3b5364eba1fc200390141a5e0cd731e9 \ No newline at end of file diff --git a/paddle_samples/PaddleX/Nonstationary/subgraph_11/graph_net.json b/paddle_samples/PaddleX/Nonstationary/subgraph_11/graph_net.json new file mode 100644 index 000000000..1977ac2fa --- /dev/null +++ b/paddle_samples/PaddleX/Nonstationary/subgraph_11/graph_net.json @@ -0,0 +1,6 @@ +{ + "framework": "paddle", + "model_name": "Nonstationary", + "num_devices_required": 1, + "num_nodes_required": 1 +} \ No newline at end of file diff --git a/paddle_samples/PaddleX/Nonstationary/subgraph_11/input_meta.py b/paddle_samples/PaddleX/Nonstationary/subgraph_11/input_meta.py new file mode 100644 index 000000000..edda1e815 --- /dev/null +++ b/paddle_samples/PaddleX/Nonstationary/subgraph_11/input_meta.py @@ -0,0 +1,49 @@ +class Program_weight_tensor_data_0: + name = "data_0" + shape = [] + dtype = "int64" + data = [16] + + +class Program_weight_tensor_data_1: + name = "data_1" + shape = [16, 96, 1] + dtype = "float32" + min_val = float("-2.06142") + max_val = float("-0.30026") + mean = float("-1.28192") + std = float("0.34683") + data = None + + +class Program_weight_tensor_data_2: + name = "data_2" + shape = [16, 192, 4] + dtype = "float32" + min_val = float("-0.5") + max_val = float("0.533333") + mean = float("-0.0662077") + std = float("0.332569") + data = None + + +class Program_weight_tensor_data_3: + name = "data_3" + shape = [1, 5000, 512] + dtype = "float32" + min_val = float("-1.0") + max_val = float("1.0") + mean = float("0.13237") + std = float("0.694606") + data = None + + +class Program_weight_tensor_data_4: + name = "data_4" + shape = [1, 5000, 512] + dtype = "float32" + min_val = float("-1.0") + max_val = float("1.0") + mean = float("0.13237") + std = float("0.694606") + data = None diff --git a/paddle_samples/PaddleX/Nonstationary/subgraph_11/model.py b/paddle_samples/PaddleX/Nonstationary/subgraph_11/model.py new file mode 100644 index 000000000..def7933e2 --- /dev/null +++ b/paddle_samples/PaddleX/Nonstationary/subgraph_11/model.py @@ -0,0 +1,1322 @@ +import paddle + + +class GraphModule(paddle.nn.Layer): + def __init__(self): + super().__init__() + + def forward( + self, + parameter_0, + parameter_1, + parameter_2, + parameter_3, + parameter_4, + parameter_5, + parameter_6, + parameter_7, + parameter_8, + parameter_9, + parameter_10, + parameter_11, + parameter_12, + parameter_13, + parameter_14, + parameter_15, + parameter_16, + parameter_17, + parameter_18, + parameter_19, + parameter_20, + parameter_21, + parameter_22, + parameter_23, + parameter_24, + parameter_25, + parameter_26, + parameter_27, + parameter_28, + parameter_29, + parameter_30, + parameter_31, + parameter_32, + parameter_33, + parameter_34, + parameter_35, + parameter_36, + parameter_37, + parameter_38, + parameter_39, + parameter_40, + parameter_41, + parameter_42, + parameter_43, + parameter_44, + parameter_45, + parameter_46, + parameter_47, + parameter_48, + parameter_49, + data_0, + data_1, + data_2, + data_3, + data_4, + ): + # pd_op.full_int_array: (1xi64) <- () + full_int_array_0 = [0] + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_1 = [96] + + # pd_op.slice: (-1x96x4xf32) <- (-1x192x4xf32, 1xi64, 1xi64) + slice_0 = paddle._C_ops.slice( + data_2, [1], full_int_array_0, full_int_array_1, [1], [] + ) + + # pd_op.full: (xi64) <- () + full_0 = paddle._C_ops.full( + [], float("96"), paddle.int64, paddle.core.CPUPlace() + ) + + # pd_op.full: (xi64) <- () + full_1 = paddle._C_ops.full( + [], float("1"), paddle.int64, paddle.core.CPUPlace() + ) + + # builtin.combine: ([xi64, xi64, xi64]) <- (xi64, xi64, xi64) + combine_0 = [data_0, full_0, full_1] + del data_0, full_1 + + # pd_op.stack: (3xi64) <- ([xi64, xi64, xi64]) + stack_0 = paddle._C_ops.stack(combine_0, 0) + del combine_0 + + # pd_op.full: (1xf32) <- () + full_2 = paddle._C_ops.full( + [1], float("0"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.full_with_tensor: (-1x96x1xf32) <- (1xf32, 3xi64) + full_with_tensor_0 = paddle._C_ops.full_with_tensor( + full_2, stack_0, paddle.float32 + ) + del stack_0 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_2 = [-48] + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_3 = [2147483647] + + # pd_op.slice: (-1x48x1xf32) <- (-1x96x1xf32, 1xi64, 1xi64) + slice_1 = paddle._C_ops.slice( + data_1, [1], full_int_array_2, full_int_array_3, [1], [] + ) + + # pd_op.full: (1xi32) <- () + full_3 = paddle._C_ops.full( + [1], float("1"), paddle.int32, paddle.core.CPUPlace() + ) + + # builtin.combine: ([-1x48x1xf32, -1x96x1xf32]) <- (-1x48x1xf32, -1x96x1xf32) + combine_1 = [slice_1, full_with_tensor_0] + del full_with_tensor_0, slice_1 + + # pd_op.concat: (-1x144x1xf32) <- ([-1x48x1xf32, -1x96x1xf32], 1xi32) + concat_0 = paddle._C_ops.concat(combine_1, full_3) + del combine_1 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_4 = [-144] + + # pd_op.slice: (-1x144x4xf32) <- (-1x192x4xf32, 1xi64, 1xi64) + slice_2 = paddle._C_ops.slice( + data_2, [1], full_int_array_4, full_int_array_3, [1], [] + ) + del data_2, full_int_array_4 + + # pd_op.assign: (-1x96x1xf32) <- (-1x96x1xf32) + assign_0 = data_1 + + # pd_op.share_data_: (-1x96x1xf32) <- (-1x96x1xf32) + share_data__0 = assign_0.detach() + del assign_0 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_5 = [1] + + # pd_op.mean: (-1x1x1xf32) <- (-1x96x1xf32, 1xi64) + mean_0 = paddle._C_ops.mean(data_1, full_int_array_5, True) + + # pd_op.share_data_: (-1x1x1xf32) <- (-1x1x1xf32) + share_data__1 = mean_0.detach() + del mean_0 + + # pd_op.subtract: (-1x96x1xf32) <- (-1x96x1xf32, -1x1x1xf32) + subtract_0 = paddle._C_ops.subtract(data_1, share_data__1) + del data_1 + + # pd_op.mean: (-1x1x1xf32) <- (-1x96x1xf32, 1xi64) + mean_1 = paddle._C_ops.mean(subtract_0, full_int_array_5, True) + + # pd_op.subtract: (-1x96x1xf32) <- (-1x96x1xf32, -1x1x1xf32) + subtract_1 = paddle._C_ops.subtract(subtract_0, mean_1) + del mean_1 + + # pd_op.pow: (-1x96x1xf32) <- (-1x96x1xf32) + pow_0 = paddle._C_ops.pow(subtract_1, float("2")) + del subtract_1 + + # pd_op.sum: (-1x1x1xf32) <- (-1x96x1xf32, 1xi64) + sum_0 = paddle._C_ops.sum(pow_0, full_int_array_5, paddle.float32, True) + del pow_0 + + # pd_op.numel: (xi64) <- (-1x96x1xf32) + numel_0 = paddle._C_ops.numel(subtract_0) + + # pd_op.cast: (xi64) <- (xi64) + cast_0 = paddle._C_ops.cast(numel_0, paddle.int64) + del numel_0 + + # pd_op.numel: (xi64) <- (-1x1x1xf32) + numel_1 = paddle._C_ops.numel(sum_0) + + # pd_op.cast: (xi64) <- (xi64) + cast_1 = paddle._C_ops.cast(numel_1, paddle.int64) + del numel_1 + + # pd_op.cast: (xf32) <- (xi64) + cast_2 = paddle._C_ops.cast(cast_0, paddle.float32) + del cast_0 + + # pd_op.cast: (xf32) <- (xi64) + cast_3 = paddle._C_ops.cast(cast_1, paddle.float32) + del cast_1 + + # pd_op.divide: (xf32) <- (xf32, xf32) + divide_0 = paddle._C_ops.divide(cast_2, cast_3) + del cast_2, cast_3 + + # pd_op.divide: (-1x1x1xf32) <- (-1x1x1xf32, xf32) + divide_1 = paddle._C_ops.divide(sum_0, divide_0) + del divide_0, sum_0 + + # pd_op.full: (1xf32) <- () + full_4 = paddle._C_ops.full( + [1], float("1"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.scale: (-1x1x1xf32) <- (-1x1x1xf32, 1xf32) + scale_0 = paddle._C_ops.scale(divide_1, full_4, float("1e-05"), True) + del divide_1, full_4 + + # pd_op.sqrt: (-1x1x1xf32) <- (-1x1x1xf32) + sqrt_0 = paddle._C_ops.sqrt(scale_0) + del scale_0 + + # pd_op.share_data_: (-1x1x1xf32) <- (-1x1x1xf32) + share_data__2 = sqrt_0.detach() + del sqrt_0 + + # pd_op.divide: (-1x96x1xf32) <- (-1x96x1xf32, -1x1x1xf32) + divide_2 = paddle._C_ops.divide(subtract_0, share_data__2) + del subtract_0 + + # pd_op.slice: (-1x48x1xf32) <- (-1x96x1xf32, 1xi64, 1xi64) + slice_3 = paddle._C_ops.slice( + divide_2, [1], full_int_array_2, full_int_array_3, [1], [] + ) + del full_int_array_2 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_6 = [-96] + + # pd_op.slice: (-1x96x1xf32) <- (-1x144x1xf32, 1xi64, 1xi64) + slice_4 = paddle._C_ops.slice( + concat_0, [1], full_int_array_6, full_int_array_3, [1], [] + ) + del concat_0, full_int_array_3, full_int_array_6 + + # pd_op.full_like: (-1x96x1xf32) <- (-1x96x1xf32, 1xf32) + full_like_0 = paddle._C_ops.full_like( + slice_4, full_2, paddle.float32, paddle.framework._current_expected_place() + ) + del full_2, slice_4 + + # builtin.combine: ([-1x48x1xf32, -1x96x1xf32]) <- (-1x48x1xf32, -1x96x1xf32) + combine_2 = [slice_3, full_like_0] + del full_like_0, slice_3 + + # pd_op.concat: (-1x144x1xf32) <- ([-1x48x1xf32, -1x96x1xf32], 1xi32) + concat_1 = paddle._C_ops.concat(combine_2, full_3) + del combine_2 + + # pd_op.assign: (-1x144x1xf32) <- (-1x144x1xf32) + assign_1 = concat_1 + del concat_1 + + # pd_op.shape64: (3xi64) <- (-1x96x1xf32) + shape64_0 = paddle._C_ops.shape64(share_data__0) + + # pd_op.slice: (xi64) <- (3xi64, 1xi64, 1xi64) + slice_5 = paddle._C_ops.slice( + shape64_0, [0], full_int_array_0, full_int_array_5, [1], [0] + ) + del shape64_0 + + # pd_op.full_int_array: (2xi64) <- () + full_int_array_7 = [3, 4] + + # pd_op.unsqueeze: (-1x96x1x1x1xf32) <- (-1x96x1xf32, 2xi64) + unsqueeze_0 = paddle._C_ops.unsqueeze(share_data__0, full_int_array_7) + + # pd_op.full_int_array: (6xi64) <- () + full_int_array_8 = [0, 0, 0, 0, 1, 1] + + # pd_op.pad3d: (-1x96x3x1x1xf32) <- (-1x96x1x1x1xf32, 6xi64) + pad3d_0 = paddle._C_ops.pad3d( + unsqueeze_0, full_int_array_8, "circular", float("0"), "NCDHW" + ) + del unsqueeze_0 + + # pd_op.squeeze: (-1x96x3xf32) <- (-1x96x3x1x1xf32, 2xi64) + squeeze_0 = paddle._C_ops.squeeze(pad3d_0, full_int_array_7) + del pad3d_0 + + # pd_op.assign: (1x96x3xf32) <- (1x96x3xf32) + assign_2 = parameter_49 + del parameter_49 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_9 = [-2] + + # pd_op.unsqueeze: (1x96x1x3xf32) <- (1x96x3xf32, 1xi64) + unsqueeze_1 = paddle._C_ops.unsqueeze(assign_2, full_int_array_9) + del assign_2 + + # pd_op.unsqueeze: (-1x96x1x3xf32) <- (-1x96x3xf32, 1xi64) + unsqueeze_2 = paddle._C_ops.unsqueeze(squeeze_0, full_int_array_9) + del squeeze_0 + + # pd_op.conv2d: (-1x1x1x1xf32) <- (-1x96x1x3xf32, 1x96x1x3xf32) + conv2d_0 = paddle._C_ops.conv2d( + unsqueeze_2, unsqueeze_1, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del unsqueeze_1, unsqueeze_2 + + # pd_op.squeeze: (-1x1x1xf32) <- (-1x1x1x1xf32, 1xi64) + squeeze_1 = paddle._C_ops.squeeze(conv2d_0, full_int_array_9) + del conv2d_0 + + # builtin.combine: ([-1x1x1xf32, -1x1x1xf32]) <- (-1x1x1xf32, -1x1x1xf32) + combine_3 = [squeeze_1, share_data__2] + del squeeze_1 + + # pd_op.concat: (-1x2x1xf32) <- ([-1x1x1xf32, -1x1x1xf32], 1xi32) + concat_2 = paddle._C_ops.concat(combine_3, full_3) + del combine_3 + + # pd_op.full: (xi64) <- () + full_5 = paddle._C_ops.full( + [], float("-1"), paddle.int64, paddle.core.CPUPlace() + ) + + # builtin.combine: ([xi64, xi64]) <- (xi64, xi64) + combine_4 = [slice_5, full_5] + + # pd_op.stack: (2xi64) <- ([xi64, xi64]) + stack_1 = paddle._C_ops.stack(combine_4, 0) + del combine_4 + + # pd_op.reshape: (-1x-1xf32) <- (-1x2x1xf32, 2xi64) + reshape_0 = paddle._C_ops.reshape(concat_2, stack_1) + del concat_2, stack_1 + + # pd_op.matmul: (-1x256xf32) <- (-1x-1xf32, 2x256xf32) + matmul_0 = paddle._C_ops.matmul(reshape_0, parameter_48, False, False) + del parameter_48, reshape_0 + + # pd_op.add: (-1x256xf32) <- (-1x256xf32, 256xf32) + add_0 = paddle._C_ops.add(matmul_0, parameter_47) + del matmul_0, parameter_47 + + # pd_op.relu: (-1x256xf32) <- (-1x256xf32) + relu_0 = paddle._C_ops.relu(add_0) + del add_0 + + # pd_op.matmul: (-1x256xf32) <- (-1x256xf32, 256x256xf32) + matmul_1 = paddle._C_ops.matmul(relu_0, parameter_46, False, False) + del parameter_46, relu_0 + + # pd_op.add: (-1x256xf32) <- (-1x256xf32, 256xf32) + add_1 = paddle._C_ops.add(matmul_1, parameter_45) + del matmul_1, parameter_45 + + # pd_op.relu: (-1x256xf32) <- (-1x256xf32) + relu_1 = paddle._C_ops.relu(add_1) + del add_1 + + # pd_op.matmul: (-1x1xf32) <- (-1x256xf32, 256x1xf32) + matmul_2 = paddle._C_ops.matmul(relu_1, parameter_44, False, False) + del parameter_44, relu_1 + + # pd_op.exp: (-1x1xf32) <- (-1x1xf32) + exp_0 = paddle._C_ops.exp(matmul_2) + del matmul_2 + + # pd_op.unsqueeze: (-1x96x1x1x1xf32) <- (-1x96x1xf32, 2xi64) + unsqueeze_3 = paddle._C_ops.unsqueeze(share_data__0, full_int_array_7) + del share_data__0 + + # pd_op.pad3d: (-1x96x3x1x1xf32) <- (-1x96x1x1x1xf32, 6xi64) + pad3d_1 = paddle._C_ops.pad3d( + unsqueeze_3, full_int_array_8, "circular", float("0"), "NCDHW" + ) + del unsqueeze_3 + + # pd_op.squeeze: (-1x96x3xf32) <- (-1x96x3x1x1xf32, 2xi64) + squeeze_2 = paddle._C_ops.squeeze(pad3d_1, full_int_array_7) + del pad3d_1 + + # pd_op.assign: (1x96x3xf32) <- (1x96x3xf32) + assign_3 = parameter_43 + del parameter_43 + + # pd_op.unsqueeze: (1x96x1x3xf32) <- (1x96x3xf32, 1xi64) + unsqueeze_4 = paddle._C_ops.unsqueeze(assign_3, full_int_array_9) + del assign_3 + + # pd_op.unsqueeze: (-1x96x1x3xf32) <- (-1x96x3xf32, 1xi64) + unsqueeze_5 = paddle._C_ops.unsqueeze(squeeze_2, full_int_array_9) + del squeeze_2 + + # pd_op.conv2d: (-1x1x1x1xf32) <- (-1x96x1x3xf32, 1x96x1x3xf32) + conv2d_1 = paddle._C_ops.conv2d( + unsqueeze_5, unsqueeze_4, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del unsqueeze_4, unsqueeze_5 + + # pd_op.squeeze: (-1x1x1xf32) <- (-1x1x1x1xf32, 1xi64) + squeeze_3 = paddle._C_ops.squeeze(conv2d_1, full_int_array_9) + del conv2d_1 + + # builtin.combine: ([-1x1x1xf32, -1x1x1xf32]) <- (-1x1x1xf32, -1x1x1xf32) + combine_5 = [squeeze_3, share_data__1] + del squeeze_3 + + # pd_op.concat: (-1x2x1xf32) <- ([-1x1x1xf32, -1x1x1xf32], 1xi32) + concat_3 = paddle._C_ops.concat(combine_5, full_3) + del combine_5, full_3 + + # builtin.combine: ([xi64, xi64]) <- (xi64, xi64) + combine_6 = [slice_5, full_5] + del slice_5 + + # pd_op.stack: (2xi64) <- ([xi64, xi64]) + stack_2 = paddle._C_ops.stack(combine_6, 0) + del combine_6 + + # pd_op.reshape: (-1x-1xf32) <- (-1x2x1xf32, 2xi64) + reshape_1 = paddle._C_ops.reshape(concat_3, stack_2) + del concat_3, stack_2 + + # pd_op.matmul: (-1x256xf32) <- (-1x-1xf32, 2x256xf32) + matmul_3 = paddle._C_ops.matmul(reshape_1, parameter_42, False, False) + del parameter_42, reshape_1 + + # pd_op.add: (-1x256xf32) <- (-1x256xf32, 256xf32) + add_2 = paddle._C_ops.add(matmul_3, parameter_41) + del matmul_3, parameter_41 + + # pd_op.relu: (-1x256xf32) <- (-1x256xf32) + relu_2 = paddle._C_ops.relu(add_2) + del add_2 + + # pd_op.matmul: (-1x256xf32) <- (-1x256xf32, 256x256xf32) + matmul_4 = paddle._C_ops.matmul(relu_2, parameter_40, False, False) + del parameter_40, relu_2 + + # pd_op.add: (-1x256xf32) <- (-1x256xf32, 256xf32) + add_3 = paddle._C_ops.add(matmul_4, parameter_39) + del matmul_4, parameter_39 + + # pd_op.relu: (-1x256xf32) <- (-1x256xf32) + relu_3 = paddle._C_ops.relu(add_3) + del add_3 + + # pd_op.matmul: (-1x96xf32) <- (-1x256xf32, 256x96xf32) + matmul_5 = paddle._C_ops.matmul(relu_3, parameter_38, False, False) + del parameter_38, relu_3 + + # pd_op.transpose: (-1x1x96xf32) <- (-1x96x1xf32) + transpose_0 = paddle._C_ops.transpose(divide_2, [0, 2, 1]) + + # pd_op.unsqueeze: (-1x1x96x1x1xf32) <- (-1x1x96xf32, 2xi64) + unsqueeze_6 = paddle._C_ops.unsqueeze(transpose_0, full_int_array_7) + del transpose_0 + + # pd_op.pad3d: (-1x1x98x1x1xf32) <- (-1x1x96x1x1xf32, 6xi64) + pad3d_2 = paddle._C_ops.pad3d( + unsqueeze_6, full_int_array_8, "circular", float("0"), "NCDHW" + ) + del unsqueeze_6 + + # pd_op.squeeze: (-1x1x98xf32) <- (-1x1x98x1x1xf32, 2xi64) + squeeze_4 = paddle._C_ops.squeeze(pad3d_2, full_int_array_7) + del pad3d_2 + + # pd_op.assign: (512x1x3xf32) <- (512x1x3xf32) + assign_4 = parameter_37 + del parameter_37 + + # pd_op.unsqueeze: (512x1x1x3xf32) <- (512x1x3xf32, 1xi64) + unsqueeze_7 = paddle._C_ops.unsqueeze(assign_4, full_int_array_9) + del assign_4 + + # pd_op.unsqueeze: (-1x1x1x98xf32) <- (-1x1x98xf32, 1xi64) + unsqueeze_8 = paddle._C_ops.unsqueeze(squeeze_4, full_int_array_9) + del squeeze_4 + + # pd_op.conv2d: (-1x512x1x96xf32) <- (-1x1x1x98xf32, 512x1x1x3xf32) + conv2d_2 = paddle._C_ops.conv2d( + unsqueeze_8, unsqueeze_7, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del unsqueeze_7, unsqueeze_8 + + # pd_op.squeeze: (-1x512x96xf32) <- (-1x512x1x96xf32, 1xi64) + squeeze_5 = paddle._C_ops.squeeze(conv2d_2, full_int_array_9) + del conv2d_2 + + # pd_op.transpose: (-1x96x512xf32) <- (-1x512x96xf32) + transpose_1 = paddle._C_ops.transpose(squeeze_5, [0, 2, 1]) + del squeeze_5 + + # pd_op.matmul: (-1x96x512xf32) <- (-1x96x4xf32, 4x512xf32) + matmul_6 = paddle._C_ops.matmul(slice_0, parameter_36, False, False) + del parameter_36, slice_0 + + # pd_op.add: (-1x96x512xf32) <- (-1x96x512xf32, -1x96x512xf32) + add_4 = paddle._C_ops.add(transpose_1, matmul_6) + del matmul_6, transpose_1 + + # pd_op.shape64: (3xi64) <- (-1x96x1xf32) + shape64_1 = paddle._C_ops.shape64(divide_2) + del divide_2 + + # pd_op.slice: (xi64) <- (3xi64, 1xi64, 1xi64) + slice_6 = paddle._C_ops.slice( + shape64_1, [0], full_int_array_0, full_int_array_5, [1], [0] + ) + del shape64_1 + + # pd_op.slice: (1x96x512xf32) <- (1x5000x512xf32, 1xi64, 1xi64) + slice_7 = paddle._C_ops.slice( + data_3, [1], full_int_array_0, full_int_array_1, [1], [] + ) + del data_3, full_int_array_1 + + # pd_op.add: (-1x96x512xf32) <- (-1x96x512xf32, 1x96x512xf32) + add_5 = paddle._C_ops.add(add_4, slice_7) + del add_4, slice_7 + + # pd_op.full: (1xf32) <- () + full_6 = paddle._C_ops.full( + [1], float("0.05"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.dropout: (-1x96x512xf32, -1x96x512xui8) <- (-1x96x512xf32, None, 1xf32) + dropout_1, dropout_2 = (lambda x, f: f(x))( + paddle._C_ops.dropout( + add_5, None, full_6, True, "upscale_in_train", 0, False + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None), + ) + del add_5 + + # pd_op.shape64: (3xi64) <- (-1x96x512xf32) + shape64_2 = paddle._C_ops.shape64(dropout_1) + + # pd_op.slice: (xi64) <- (3xi64, 1xi64, 1xi64) + slice_8 = paddle._C_ops.slice( + shape64_2, [0], full_int_array_0, full_int_array_5, [1], [0] + ) + del shape64_2 + + # pd_op.matmul: (-1x96x512xf32) <- (-1x96x512xf32, 512x512xf32) + matmul_7 = paddle._C_ops.matmul(dropout_1, parameter_35, False, False) + del parameter_35 + + # pd_op.add: (-1x96x512xf32) <- (-1x96x512xf32, 512xf32) + add_6 = paddle._C_ops.add(matmul_7, parameter_34) + del matmul_7, parameter_34 + + # pd_op.full: (xi64) <- () + full_7 = paddle._C_ops.full( + [], float("8"), paddle.int64, paddle.core.CPUPlace() + ) + + # builtin.combine: ([xi64, xi64, xi64, xi64]) <- (xi64, xi64, xi64, xi64) + combine_7 = [slice_8, full_0, full_7, full_5] + + # pd_op.stack: (4xi64) <- ([xi64, xi64, xi64, xi64]) + stack_3 = paddle._C_ops.stack(combine_7, 0) + del combine_7 + + # pd_op.reshape: (-1x96x8x-1xf32) <- (-1x96x512xf32, 4xi64) + reshape_2 = paddle._C_ops.reshape(add_6, stack_3) + del add_6, stack_3 + + # pd_op.matmul: (-1x96x512xf32) <- (-1x96x512xf32, 512x512xf32) + matmul_8 = paddle._C_ops.matmul(dropout_1, parameter_33, False, False) + del parameter_33 + + # pd_op.add: (-1x96x512xf32) <- (-1x96x512xf32, 512xf32) + add_7 = paddle._C_ops.add(matmul_8, parameter_32) + del matmul_8, parameter_32 + + # builtin.combine: ([xi64, xi64, xi64, xi64]) <- (xi64, xi64, xi64, xi64) + combine_8 = [slice_8, full_0, full_7, full_5] + + # pd_op.stack: (4xi64) <- ([xi64, xi64, xi64, xi64]) + stack_4 = paddle._C_ops.stack(combine_8, 0) + del combine_8 + + # pd_op.reshape: (-1x96x8x-1xf32) <- (-1x96x512xf32, 4xi64) + reshape_3 = paddle._C_ops.reshape(add_7, stack_4) + del add_7, stack_4 + + # pd_op.matmul: (-1x96x512xf32) <- (-1x96x512xf32, 512x512xf32) + matmul_9 = paddle._C_ops.matmul(dropout_1, parameter_31, False, False) + del parameter_31 + + # pd_op.add: (-1x96x512xf32) <- (-1x96x512xf32, 512xf32) + add_8 = paddle._C_ops.add(matmul_9, parameter_30) + del matmul_9, parameter_30 + + # builtin.combine: ([xi64, xi64, xi64, xi64]) <- (xi64, xi64, xi64, xi64) + combine_9 = [slice_8, full_0, full_7, full_5] + + # pd_op.stack: (4xi64) <- ([xi64, xi64, xi64, xi64]) + stack_5 = paddle._C_ops.stack(combine_9, 0) + del combine_9 + + # pd_op.reshape: (-1x96x8x-1xf32) <- (-1x96x512xf32, 4xi64) + reshape_4 = paddle._C_ops.reshape(add_8, stack_5) + del add_8, stack_5 + + # pd_op.shape64: (4xi64) <- (-1x96x8x-1xf32) + shape64_3 = paddle._C_ops.shape64(reshape_2) + + # pd_op.slice: (xi64) <- (4xi64, 1xi64, 1xi64) + slice_9 = paddle._C_ops.slice( + shape64_3, [0], full_int_array_0, full_int_array_5, [1], [0] + ) + del shape64_3 + + # pd_op.shape64: (4xi64) <- (-1x96x8x-1xf32) + shape64_4 = paddle._C_ops.shape64(reshape_2) + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_10 = [3] + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_11 = [4] + + # pd_op.slice: (xi64) <- (4xi64, 1xi64, 1xi64) + slice_10 = paddle._C_ops.slice( + shape64_4, [0], full_int_array_10, full_int_array_11, [1], [0] + ) + del shape64_4 + + # pd_op.shape64: (4xi64) <- (-1x96x8x-1xf32) + shape64_5 = paddle._C_ops.shape64(reshape_4) + + # pd_op.slice: (xi64) <- (4xi64, 1xi64, 1xi64) + slice_11 = paddle._C_ops.slice( + shape64_5, [0], full_int_array_0, full_int_array_5, [1], [0] + ) + del shape64_5 + + # pd_op.shape64: (4xi64) <- (-1x96x8x-1xf32) + shape64_6 = paddle._C_ops.shape64(reshape_4) + + # pd_op.slice: (xi64) <- (4xi64, 1xi64, 1xi64) + slice_12 = paddle._C_ops.slice( + shape64_6, [0], full_int_array_10, full_int_array_11, [1], [0] + ) + del shape64_6 + + # pd_op.unsqueeze: (-1x1x1xf32) <- (-1x1xf32, 1xi64) + unsqueeze_9 = paddle._C_ops.unsqueeze(exp_0, full_int_array_5) + + # pd_op.unsqueeze: (-1x1x1x1xf32) <- (-1x1x1xf32, 1xi64) + unsqueeze_10 = paddle._C_ops.unsqueeze(unsqueeze_9, full_int_array_5) + del unsqueeze_9 + + # pd_op.unsqueeze: (-1x1x96xf32) <- (-1x96xf32, 1xi64) + unsqueeze_11 = paddle._C_ops.unsqueeze(matmul_5, full_int_array_5) + + # pd_op.unsqueeze: (-1x1x1x96xf32) <- (-1x1x96xf32, 1xi64) + unsqueeze_12 = paddle._C_ops.unsqueeze(unsqueeze_11, full_int_array_5) + del unsqueeze_11 + + # builtin.combine: ([-1x96x8x-1xf32, -1x96x8x-1xf32]) <- (-1x96x8x-1xf32, -1x96x8x-1xf32) + combine_10 = [reshape_2, reshape_3] + del reshape_2, reshape_3 + + # pd_op.einsum: (-1x8x96x96xf32, [0xf32, 0xf32], [-1x96x8x-1xf32, -1x96x8x-1xf32]) <- ([-1x96x8x-1xf32, -1x96x8x-1xf32]) + einsum_0, einsum_1, einsum_2 = (lambda x, f: f(x))( + paddle._C_ops.einsum(combine_10, "blhe,bshe->bhls"), + lambda out: out if isinstance(out, (list, tuple)) else (out, None, None), + ) + del combine_10 + + # builtin.split: (0xf32, 0xf32) <- ([0xf32, 0xf32]) + ( + split_0, + split_1, + ) = einsum_1 + del einsum_1 + + # builtin.split: (-1x96x8x-1xf32, -1x96x8x-1xf32) <- ([-1x96x8x-1xf32, -1x96x8x-1xf32]) + ( + split_2, + split_3, + ) = einsum_2 + del einsum_2 + + # pd_op.multiply: (-1x8x96x96xf32) <- (-1x8x96x96xf32, -1x1x1x1xf32) + multiply_0 = paddle._C_ops.multiply(einsum_0, unsqueeze_10) + del einsum_0, unsqueeze_10 + + # pd_op.add: (-1x8x96x96xf32) <- (-1x8x96x96xf32, -1x1x1x96xf32) + add_9 = paddle._C_ops.add(multiply_0, unsqueeze_12) + del multiply_0, unsqueeze_12 + + # pd_op.full: (1xf32) <- () + full_8 = paddle._C_ops.full( + [1], float("0.125"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.scale: (-1x8x96x96xf32) <- (-1x8x96x96xf32, 1xf32) + scale_1 = paddle._C_ops.scale(add_9, full_8, float("0"), True) + del add_9 + + # pd_op.softmax: (-1x8x96x96xf32) <- (-1x8x96x96xf32) + softmax_0 = paddle._C_ops.softmax(scale_1, -1) + del scale_1 + + # pd_op.dropout: (-1x8x96x96xf32, -1x8x96x96xui8) <- (-1x8x96x96xf32, None, 1xf32) + dropout_3, dropout_4 = (lambda x, f: f(x))( + paddle._C_ops.dropout( + softmax_0, None, full_6, True, "upscale_in_train", 0, False + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None), + ) + del softmax_0 + + # builtin.combine: ([-1x8x96x96xf32, -1x96x8x-1xf32]) <- (-1x8x96x96xf32, -1x96x8x-1xf32) + combine_11 = [dropout_3, reshape_4] + del dropout_3, reshape_4 + + # pd_op.einsum: (-1x96x8x-1xf32, [0xf32, 0xf32], [-1x8x96x96xf32, -1x96x8x-1xf32]) <- ([-1x8x96x96xf32, -1x96x8x-1xf32]) + einsum_3, einsum_4, einsum_5 = (lambda x, f: f(x))( + paddle._C_ops.einsum(combine_11, "bhls,bshd->blhd"), + lambda out: out if isinstance(out, (list, tuple)) else (out, None, None), + ) + del combine_11 + + # builtin.split: (0xf32, 0xf32) <- ([0xf32, 0xf32]) + ( + split_4, + split_5, + ) = einsum_4 + del einsum_4 + + # builtin.split: (-1x8x96x96xf32, -1x96x8x-1xf32) <- ([-1x8x96x96xf32, -1x96x8x-1xf32]) + ( + split_6, + split_7, + ) = einsum_5 + del einsum_5 + + # builtin.combine: ([xi64, xi64, xi64]) <- (xi64, xi64, xi64) + combine_12 = [slice_8, full_0, full_5] + del slice_8 + + # pd_op.stack: (3xi64) <- ([xi64, xi64, xi64]) + stack_6 = paddle._C_ops.stack(combine_12, 0) + del combine_12 + + # pd_op.reshape: (-1x96x-1xf32) <- (-1x96x8x-1xf32, 3xi64) + reshape_5 = paddle._C_ops.reshape(einsum_3, stack_6) + del einsum_3, stack_6 + + # pd_op.matmul: (-1x96x512xf32) <- (-1x96x-1xf32, 512x512xf32) + matmul_10 = paddle._C_ops.matmul(reshape_5, parameter_29, False, False) + del parameter_29, reshape_5 + + # pd_op.add: (-1x96x512xf32) <- (-1x96x512xf32, 512xf32) + add_10 = paddle._C_ops.add(matmul_10, parameter_28) + del matmul_10, parameter_28 + + # pd_op.dropout: (-1x96x512xf32, -1x96x512xui8) <- (-1x96x512xf32, None, 1xf32) + dropout_5, dropout_6 = (lambda x, f: f(x))( + paddle._C_ops.dropout( + add_10, None, full_6, True, "upscale_in_train", 0, False + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None), + ) + del add_10 + + # pd_op.add: (-1x96x512xf32) <- (-1x96x512xf32, -1x96x512xf32) + add_11 = paddle._C_ops.add(dropout_1, dropout_5) + del dropout_1, dropout_5 + + # pd_op.layer_norm: (-1x96x512xf32, -1x96xf32, -1x96xf32) <- (-1x96x512xf32, 512xf32, 512xf32) + layer_norm_1, layer_norm_2, layer_norm_3 = (lambda x, f: f(x))( + paddle._C_ops.layer_norm( + add_11, parameter_27, parameter_26, float("1e-05"), 2 + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None, None), + ) + del add_11, parameter_26, parameter_27 + + # pd_op.transpose: (-1x512x96xf32) <- (-1x96x512xf32) + transpose_2 = paddle._C_ops.transpose(layer_norm_1, [0, 2, 1]) + + # pd_op.assign: (2048x512x1xf32) <- (2048x512x1xf32) + assign_5 = parameter_25 + del parameter_25 + + # pd_op.unsqueeze: (2048x512x1x1xf32) <- (2048x512x1xf32, 1xi64) + unsqueeze_13 = paddle._C_ops.unsqueeze(assign_5, full_int_array_9) + del assign_5 + + # pd_op.unsqueeze: (-1x512x1x96xf32) <- (-1x512x96xf32, 1xi64) + unsqueeze_14 = paddle._C_ops.unsqueeze(transpose_2, full_int_array_9) + del transpose_2 + + # pd_op.conv2d: (-1x2048x1x96xf32) <- (-1x512x1x96xf32, 2048x512x1x1xf32) + conv2d_3 = paddle._C_ops.conv2d( + unsqueeze_14, unsqueeze_13, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del unsqueeze_13, unsqueeze_14 + + # pd_op.full_int_array: (4xi64) <- () + full_int_array_12 = [1, 2048, 1, 1] + + # pd_op.reshape: (1x2048x1x1xf32) <- (2048xf32, 4xi64) + reshape_6 = paddle._C_ops.reshape(parameter_24, full_int_array_12) + del parameter_24 + + # pd_op.add: (-1x2048x1x96xf32) <- (-1x2048x1x96xf32, 1x2048x1x1xf32) + add_12 = paddle._C_ops.add(conv2d_3, reshape_6) + del conv2d_3, reshape_6 + + # pd_op.squeeze: (-1x2048x96xf32) <- (-1x2048x1x96xf32, 1xi64) + squeeze_6 = paddle._C_ops.squeeze(add_12, full_int_array_9) + del add_12 + + # pd_op.gelu: (-1x2048x96xf32) <- (-1x2048x96xf32) + gelu_0 = paddle._C_ops.gelu(squeeze_6, False) + del squeeze_6 + + # pd_op.dropout: (-1x2048x96xf32, -1x2048x96xui8) <- (-1x2048x96xf32, None, 1xf32) + dropout_7, dropout_8 = (lambda x, f: f(x))( + paddle._C_ops.dropout( + gelu_0, None, full_6, True, "upscale_in_train", 0, False + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None), + ) + del gelu_0 + + # pd_op.assign: (512x2048x1xf32) <- (512x2048x1xf32) + assign_6 = parameter_23 + del parameter_23 + + # pd_op.unsqueeze: (512x2048x1x1xf32) <- (512x2048x1xf32, 1xi64) + unsqueeze_15 = paddle._C_ops.unsqueeze(assign_6, full_int_array_9) + del assign_6 + + # pd_op.unsqueeze: (-1x2048x1x96xf32) <- (-1x2048x96xf32, 1xi64) + unsqueeze_16 = paddle._C_ops.unsqueeze(dropout_7, full_int_array_9) + del dropout_7 + + # pd_op.conv2d: (-1x512x1x96xf32) <- (-1x2048x1x96xf32, 512x2048x1x1xf32) + conv2d_4 = paddle._C_ops.conv2d( + unsqueeze_16, unsqueeze_15, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del unsqueeze_15, unsqueeze_16 + + # pd_op.full_int_array: (4xi64) <- () + full_int_array_13 = [1, 512, 1, 1] + + # pd_op.reshape: (1x512x1x1xf32) <- (512xf32, 4xi64) + reshape_7 = paddle._C_ops.reshape(parameter_22, full_int_array_13) + del parameter_22 + + # pd_op.add: (-1x512x1x96xf32) <- (-1x512x1x96xf32, 1x512x1x1xf32) + add_13 = paddle._C_ops.add(conv2d_4, reshape_7) + del conv2d_4, reshape_7 + + # pd_op.squeeze: (-1x512x96xf32) <- (-1x512x1x96xf32, 1xi64) + squeeze_7 = paddle._C_ops.squeeze(add_13, full_int_array_9) + del add_13 + + # pd_op.transpose: (-1x96x512xf32) <- (-1x512x96xf32) + transpose_3 = paddle._C_ops.transpose(squeeze_7, [0, 2, 1]) + del squeeze_7 + + # pd_op.dropout: (-1x96x512xf32, -1x96x512xui8) <- (-1x96x512xf32, None, 1xf32) + dropout_9, dropout_10 = (lambda x, f: f(x))( + paddle._C_ops.dropout( + transpose_3, None, full_6, True, "upscale_in_train", 0, False + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None), + ) + del transpose_3 + + # pd_op.add: (-1x96x512xf32) <- (-1x96x512xf32, -1x96x512xf32) + add_14 = paddle._C_ops.add(layer_norm_1, dropout_9) + del dropout_9, layer_norm_1 + + # pd_op.layer_norm: (-1x96x512xf32, -1x96xf32, -1x96xf32) <- (-1x96x512xf32, 512xf32, 512xf32) + layer_norm_4, layer_norm_5, layer_norm_6 = (lambda x, f: f(x))( + paddle._C_ops.layer_norm( + add_14, parameter_21, parameter_20, float("1e-05"), 2 + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None, None), + ) + del add_14, parameter_20, parameter_21 + + # pd_op.shape64: (3xi64) <- (-1x96x512xf32) + shape64_7 = paddle._C_ops.shape64(layer_norm_4) + + # pd_op.slice: (xi64) <- (3xi64, 1xi64, 1xi64) + slice_13 = paddle._C_ops.slice( + shape64_7, [0], full_int_array_0, full_int_array_5, [1], [0] + ) + del shape64_7 + + # pd_op.matmul: (-1x96x512xf32) <- (-1x96x512xf32, 512x512xf32) + matmul_11 = paddle._C_ops.matmul(layer_norm_4, parameter_19, False, False) + del parameter_19 + + # pd_op.add: (-1x96x512xf32) <- (-1x96x512xf32, 512xf32) + add_15 = paddle._C_ops.add(matmul_11, parameter_18) + del matmul_11, parameter_18 + + # builtin.combine: ([xi64, xi64, xi64, xi64]) <- (xi64, xi64, xi64, xi64) + combine_13 = [slice_13, full_0, full_7, full_5] + + # pd_op.stack: (4xi64) <- ([xi64, xi64, xi64, xi64]) + stack_7 = paddle._C_ops.stack(combine_13, 0) + del combine_13 + + # pd_op.reshape: (-1x96x8x-1xf32) <- (-1x96x512xf32, 4xi64) + reshape_8 = paddle._C_ops.reshape(add_15, stack_7) + del add_15, stack_7 + + # pd_op.matmul: (-1x96x512xf32) <- (-1x96x512xf32, 512x512xf32) + matmul_12 = paddle._C_ops.matmul(layer_norm_4, parameter_17, False, False) + del parameter_17 + + # pd_op.add: (-1x96x512xf32) <- (-1x96x512xf32, 512xf32) + add_16 = paddle._C_ops.add(matmul_12, parameter_16) + del matmul_12, parameter_16 + + # builtin.combine: ([xi64, xi64, xi64, xi64]) <- (xi64, xi64, xi64, xi64) + combine_14 = [slice_13, full_0, full_7, full_5] + + # pd_op.stack: (4xi64) <- ([xi64, xi64, xi64, xi64]) + stack_8 = paddle._C_ops.stack(combine_14, 0) + del combine_14 + + # pd_op.reshape: (-1x96x8x-1xf32) <- (-1x96x512xf32, 4xi64) + reshape_9 = paddle._C_ops.reshape(add_16, stack_8) + del add_16, stack_8 + + # pd_op.matmul: (-1x96x512xf32) <- (-1x96x512xf32, 512x512xf32) + matmul_13 = paddle._C_ops.matmul(layer_norm_4, parameter_15, False, False) + del parameter_15 + + # pd_op.add: (-1x96x512xf32) <- (-1x96x512xf32, 512xf32) + add_17 = paddle._C_ops.add(matmul_13, parameter_14) + del matmul_13, parameter_14 + + # builtin.combine: ([xi64, xi64, xi64, xi64]) <- (xi64, xi64, xi64, xi64) + combine_15 = [slice_13, full_0, full_7, full_5] + del full_7 + + # pd_op.stack: (4xi64) <- ([xi64, xi64, xi64, xi64]) + stack_9 = paddle._C_ops.stack(combine_15, 0) + del combine_15 + + # pd_op.reshape: (-1x96x8x-1xf32) <- (-1x96x512xf32, 4xi64) + reshape_10 = paddle._C_ops.reshape(add_17, stack_9) + del add_17, stack_9 + + # pd_op.shape64: (4xi64) <- (-1x96x8x-1xf32) + shape64_8 = paddle._C_ops.shape64(reshape_8) + + # pd_op.slice: (xi64) <- (4xi64, 1xi64, 1xi64) + slice_14 = paddle._C_ops.slice( + shape64_8, [0], full_int_array_0, full_int_array_5, [1], [0] + ) + del shape64_8 + + # pd_op.shape64: (4xi64) <- (-1x96x8x-1xf32) + shape64_9 = paddle._C_ops.shape64(reshape_8) + + # pd_op.slice: (xi64) <- (4xi64, 1xi64, 1xi64) + slice_15 = paddle._C_ops.slice( + shape64_9, [0], full_int_array_10, full_int_array_11, [1], [0] + ) + del shape64_9 + + # pd_op.shape64: (4xi64) <- (-1x96x8x-1xf32) + shape64_10 = paddle._C_ops.shape64(reshape_10) + + # pd_op.slice: (xi64) <- (4xi64, 1xi64, 1xi64) + slice_16 = paddle._C_ops.slice( + shape64_10, [0], full_int_array_0, full_int_array_5, [1], [0] + ) + del shape64_10 + + # pd_op.shape64: (4xi64) <- (-1x96x8x-1xf32) + shape64_11 = paddle._C_ops.shape64(reshape_10) + + # pd_op.slice: (xi64) <- (4xi64, 1xi64, 1xi64) + slice_17 = paddle._C_ops.slice( + shape64_11, [0], full_int_array_10, full_int_array_11, [1], [0] + ) + del full_int_array_10, full_int_array_11, shape64_11 + + # pd_op.unsqueeze: (-1x1x1xf32) <- (-1x1xf32, 1xi64) + unsqueeze_17 = paddle._C_ops.unsqueeze(exp_0, full_int_array_5) + + # pd_op.unsqueeze: (-1x1x1x1xf32) <- (-1x1x1xf32, 1xi64) + unsqueeze_18 = paddle._C_ops.unsqueeze(unsqueeze_17, full_int_array_5) + del unsqueeze_17 + + # pd_op.unsqueeze: (-1x1x96xf32) <- (-1x96xf32, 1xi64) + unsqueeze_19 = paddle._C_ops.unsqueeze(matmul_5, full_int_array_5) + + # pd_op.unsqueeze: (-1x1x1x96xf32) <- (-1x1x96xf32, 1xi64) + unsqueeze_20 = paddle._C_ops.unsqueeze(unsqueeze_19, full_int_array_5) + del unsqueeze_19 + + # builtin.combine: ([-1x96x8x-1xf32, -1x96x8x-1xf32]) <- (-1x96x8x-1xf32, -1x96x8x-1xf32) + combine_16 = [reshape_8, reshape_9] + del reshape_8, reshape_9 + + # pd_op.einsum: (-1x8x96x96xf32, [0xf32, 0xf32], [-1x96x8x-1xf32, -1x96x8x-1xf32]) <- ([-1x96x8x-1xf32, -1x96x8x-1xf32]) + einsum_6, einsum_7, einsum_8 = (lambda x, f: f(x))( + paddle._C_ops.einsum(combine_16, "blhe,bshe->bhls"), + lambda out: out if isinstance(out, (list, tuple)) else (out, None, None), + ) + del combine_16 + + # builtin.split: (0xf32, 0xf32) <- ([0xf32, 0xf32]) + ( + split_8, + split_9, + ) = einsum_7 + del einsum_7 + + # builtin.split: (-1x96x8x-1xf32, -1x96x8x-1xf32) <- ([-1x96x8x-1xf32, -1x96x8x-1xf32]) + ( + split_10, + split_11, + ) = einsum_8 + del einsum_8 + + # pd_op.multiply: (-1x8x96x96xf32) <- (-1x8x96x96xf32, -1x1x1x1xf32) + multiply_1 = paddle._C_ops.multiply(einsum_6, unsqueeze_18) + del einsum_6, unsqueeze_18 + + # pd_op.add: (-1x8x96x96xf32) <- (-1x8x96x96xf32, -1x1x1x96xf32) + add_18 = paddle._C_ops.add(multiply_1, unsqueeze_20) + del multiply_1, unsqueeze_20 + + # pd_op.scale: (-1x8x96x96xf32) <- (-1x8x96x96xf32, 1xf32) + scale_2 = paddle._C_ops.scale(add_18, full_8, float("0"), True) + del add_18, full_8 + + # pd_op.softmax: (-1x8x96x96xf32) <- (-1x8x96x96xf32) + softmax_1 = paddle._C_ops.softmax(scale_2, -1) + del scale_2 + + # pd_op.dropout: (-1x8x96x96xf32, -1x8x96x96xui8) <- (-1x8x96x96xf32, None, 1xf32) + dropout_11, dropout_12 = (lambda x, f: f(x))( + paddle._C_ops.dropout( + softmax_1, None, full_6, True, "upscale_in_train", 0, False + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None), + ) + del softmax_1 + + # builtin.combine: ([-1x8x96x96xf32, -1x96x8x-1xf32]) <- (-1x8x96x96xf32, -1x96x8x-1xf32) + combine_17 = [dropout_11, reshape_10] + del dropout_11, reshape_10 + + # pd_op.einsum: (-1x96x8x-1xf32, [0xf32, 0xf32], [-1x8x96x96xf32, -1x96x8x-1xf32]) <- ([-1x8x96x96xf32, -1x96x8x-1xf32]) + einsum_9, einsum_10, einsum_11 = (lambda x, f: f(x))( + paddle._C_ops.einsum(combine_17, "bhls,bshd->blhd"), + lambda out: out if isinstance(out, (list, tuple)) else (out, None, None), + ) + del combine_17 + + # builtin.split: (0xf32, 0xf32) <- ([0xf32, 0xf32]) + ( + split_12, + split_13, + ) = einsum_10 + del einsum_10 + + # builtin.split: (-1x8x96x96xf32, -1x96x8x-1xf32) <- ([-1x8x96x96xf32, -1x96x8x-1xf32]) + ( + split_14, + split_15, + ) = einsum_11 + del einsum_11 + + # builtin.combine: ([xi64, xi64, xi64]) <- (xi64, xi64, xi64) + combine_18 = [slice_13, full_0, full_5] + del full_0, full_5, slice_13 + + # pd_op.stack: (3xi64) <- ([xi64, xi64, xi64]) + stack_10 = paddle._C_ops.stack(combine_18, 0) + del combine_18 + + # pd_op.reshape: (-1x96x-1xf32) <- (-1x96x8x-1xf32, 3xi64) + reshape_11 = paddle._C_ops.reshape(einsum_9, stack_10) + del einsum_9, stack_10 + + # pd_op.matmul: (-1x96x512xf32) <- (-1x96x-1xf32, 512x512xf32) + matmul_14 = paddle._C_ops.matmul(reshape_11, parameter_13, False, False) + del parameter_13, reshape_11 + + # pd_op.add: (-1x96x512xf32) <- (-1x96x512xf32, 512xf32) + add_19 = paddle._C_ops.add(matmul_14, parameter_12) + del matmul_14, parameter_12 + + # pd_op.dropout: (-1x96x512xf32, -1x96x512xui8) <- (-1x96x512xf32, None, 1xf32) + dropout_13, dropout_14 = (lambda x, f: f(x))( + paddle._C_ops.dropout( + add_19, None, full_6, True, "upscale_in_train", 0, False + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None), + ) + del add_19 + + # pd_op.add: (-1x96x512xf32) <- (-1x96x512xf32, -1x96x512xf32) + add_20 = paddle._C_ops.add(layer_norm_4, dropout_13) + del dropout_13, layer_norm_4 + + # pd_op.layer_norm: (-1x96x512xf32, -1x96xf32, -1x96xf32) <- (-1x96x512xf32, 512xf32, 512xf32) + layer_norm_7, layer_norm_8, layer_norm_9 = (lambda x, f: f(x))( + paddle._C_ops.layer_norm( + add_20, parameter_11, parameter_10, float("1e-05"), 2 + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None, None), + ) + del add_20, parameter_10, parameter_11 + + # pd_op.transpose: (-1x512x96xf32) <- (-1x96x512xf32) + transpose_4 = paddle._C_ops.transpose(layer_norm_7, [0, 2, 1]) + + # pd_op.assign: (2048x512x1xf32) <- (2048x512x1xf32) + assign_7 = parameter_9 + del parameter_9 + + # pd_op.unsqueeze: (2048x512x1x1xf32) <- (2048x512x1xf32, 1xi64) + unsqueeze_21 = paddle._C_ops.unsqueeze(assign_7, full_int_array_9) + del assign_7 + + # pd_op.unsqueeze: (-1x512x1x96xf32) <- (-1x512x96xf32, 1xi64) + unsqueeze_22 = paddle._C_ops.unsqueeze(transpose_4, full_int_array_9) + del transpose_4 + + # pd_op.conv2d: (-1x2048x1x96xf32) <- (-1x512x1x96xf32, 2048x512x1x1xf32) + conv2d_5 = paddle._C_ops.conv2d( + unsqueeze_22, unsqueeze_21, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del unsqueeze_21, unsqueeze_22 + + # pd_op.reshape: (1x2048x1x1xf32) <- (2048xf32, 4xi64) + reshape_12 = paddle._C_ops.reshape(parameter_8, full_int_array_12) + del full_int_array_12, parameter_8 + + # pd_op.add: (-1x2048x1x96xf32) <- (-1x2048x1x96xf32, 1x2048x1x1xf32) + add_21 = paddle._C_ops.add(conv2d_5, reshape_12) + del conv2d_5, reshape_12 + + # pd_op.squeeze: (-1x2048x96xf32) <- (-1x2048x1x96xf32, 1xi64) + squeeze_8 = paddle._C_ops.squeeze(add_21, full_int_array_9) + del add_21 + + # pd_op.gelu: (-1x2048x96xf32) <- (-1x2048x96xf32) + gelu_1 = paddle._C_ops.gelu(squeeze_8, False) + del squeeze_8 + + # pd_op.dropout: (-1x2048x96xf32, -1x2048x96xui8) <- (-1x2048x96xf32, None, 1xf32) + dropout_15, dropout_16 = (lambda x, f: f(x))( + paddle._C_ops.dropout( + gelu_1, None, full_6, True, "upscale_in_train", 0, False + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None), + ) + del gelu_1 + + # pd_op.assign: (512x2048x1xf32) <- (512x2048x1xf32) + assign_8 = parameter_7 + del parameter_7 + + # pd_op.unsqueeze: (512x2048x1x1xf32) <- (512x2048x1xf32, 1xi64) + unsqueeze_23 = paddle._C_ops.unsqueeze(assign_8, full_int_array_9) + del assign_8 + + # pd_op.unsqueeze: (-1x2048x1x96xf32) <- (-1x2048x96xf32, 1xi64) + unsqueeze_24 = paddle._C_ops.unsqueeze(dropout_15, full_int_array_9) + del dropout_15 + + # pd_op.conv2d: (-1x512x1x96xf32) <- (-1x2048x1x96xf32, 512x2048x1x1xf32) + conv2d_6 = paddle._C_ops.conv2d( + unsqueeze_24, unsqueeze_23, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del unsqueeze_23, unsqueeze_24 + + # pd_op.reshape: (1x512x1x1xf32) <- (512xf32, 4xi64) + reshape_13 = paddle._C_ops.reshape(parameter_6, full_int_array_13) + del full_int_array_13, parameter_6 + + # pd_op.add: (-1x512x1x96xf32) <- (-1x512x1x96xf32, 1x512x1x1xf32) + add_22 = paddle._C_ops.add(conv2d_6, reshape_13) + del conv2d_6, reshape_13 + + # pd_op.squeeze: (-1x512x96xf32) <- (-1x512x1x96xf32, 1xi64) + squeeze_9 = paddle._C_ops.squeeze(add_22, full_int_array_9) + del add_22 + + # pd_op.transpose: (-1x96x512xf32) <- (-1x512x96xf32) + transpose_5 = paddle._C_ops.transpose(squeeze_9, [0, 2, 1]) + del squeeze_9 + + # pd_op.dropout: (-1x96x512xf32, -1x96x512xui8) <- (-1x96x512xf32, None, 1xf32) + dropout_17, dropout_18 = (lambda x, f: f(x))( + paddle._C_ops.dropout( + transpose_5, None, full_6, True, "upscale_in_train", 0, False + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None), + ) + del transpose_5 + + # pd_op.add: (-1x96x512xf32) <- (-1x96x512xf32, -1x96x512xf32) + add_23 = paddle._C_ops.add(layer_norm_7, dropout_17) + del dropout_17, layer_norm_7 + + # pd_op.layer_norm: (-1x96x512xf32, -1x96xf32, -1x96xf32) <- (-1x96x512xf32, 512xf32, 512xf32) + layer_norm_10, layer_norm_11, layer_norm_12 = (lambda x, f: f(x))( + paddle._C_ops.layer_norm( + add_23, parameter_5, parameter_4, float("1e-05"), 2 + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None, None), + ) + del add_23, parameter_4, parameter_5 + + # pd_op.layer_norm: (-1x96x512xf32, -1x96xf32, -1x96xf32) <- (-1x96x512xf32, 512xf32, 512xf32) + layer_norm_0, layer_norm_13, layer_norm_14 = (lambda x, f: f(x))( + paddle._C_ops.layer_norm( + layer_norm_10, parameter_3, parameter_2, float("1e-05"), 2 + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None, None), + ) + del layer_norm_10, parameter_2, parameter_3 + + # pd_op.transpose: (-1x1x144xf32) <- (-1x144x1xf32) + transpose_6 = paddle._C_ops.transpose(assign_1, [0, 2, 1]) + + # pd_op.unsqueeze: (-1x1x144x1x1xf32) <- (-1x1x144xf32, 2xi64) + unsqueeze_25 = paddle._C_ops.unsqueeze(transpose_6, full_int_array_7) + del transpose_6 + + # pd_op.pad3d: (-1x1x146x1x1xf32) <- (-1x1x144x1x1xf32, 6xi64) + pad3d_3 = paddle._C_ops.pad3d( + unsqueeze_25, full_int_array_8, "circular", float("0"), "NCDHW" + ) + del full_int_array_8, unsqueeze_25 + + # pd_op.squeeze: (-1x1x146xf32) <- (-1x1x146x1x1xf32, 2xi64) + squeeze_10 = paddle._C_ops.squeeze(pad3d_3, full_int_array_7) + del full_int_array_7, pad3d_3 + + # pd_op.assign: (512x1x3xf32) <- (512x1x3xf32) + assign_9 = parameter_1 + del parameter_1 + + # pd_op.unsqueeze: (512x1x1x3xf32) <- (512x1x3xf32, 1xi64) + unsqueeze_26 = paddle._C_ops.unsqueeze(assign_9, full_int_array_9) + del assign_9 + + # pd_op.unsqueeze: (-1x1x1x146xf32) <- (-1x1x146xf32, 1xi64) + unsqueeze_27 = paddle._C_ops.unsqueeze(squeeze_10, full_int_array_9) + del squeeze_10 + + # pd_op.conv2d: (-1x512x1x144xf32) <- (-1x1x1x146xf32, 512x1x1x3xf32) + conv2d_7 = paddle._C_ops.conv2d( + unsqueeze_27, unsqueeze_26, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del unsqueeze_26, unsqueeze_27 + + # pd_op.squeeze: (-1x512x144xf32) <- (-1x512x1x144xf32, 1xi64) + squeeze_11 = paddle._C_ops.squeeze(conv2d_7, full_int_array_9) + del conv2d_7, full_int_array_9 + + # pd_op.transpose: (-1x144x512xf32) <- (-1x512x144xf32) + transpose_7 = paddle._C_ops.transpose(squeeze_11, [0, 2, 1]) + del squeeze_11 + + # pd_op.matmul: (-1x144x512xf32) <- (-1x144x4xf32, 4x512xf32) + matmul_15 = paddle._C_ops.matmul(slice_2, parameter_0, False, False) + del parameter_0, slice_2 + + # pd_op.add: (-1x144x512xf32) <- (-1x144x512xf32, -1x144x512xf32) + add_24 = paddle._C_ops.add(transpose_7, matmul_15) + del matmul_15, transpose_7 + + # pd_op.shape64: (3xi64) <- (-1x144x1xf32) + shape64_12 = paddle._C_ops.shape64(assign_1) + del assign_1 + + # pd_op.slice: (xi64) <- (3xi64, 1xi64, 1xi64) + slice_18 = paddle._C_ops.slice( + shape64_12, [0], full_int_array_0, full_int_array_5, [1], [0] + ) + del full_int_array_5, shape64_12 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_14 = [144] + + # pd_op.slice: (1x144x512xf32) <- (1x5000x512xf32, 1xi64, 1xi64) + slice_19 = paddle._C_ops.slice( + data_4, [1], full_int_array_0, full_int_array_14, [1], [] + ) + del data_4, full_int_array_0, full_int_array_14 + + # pd_op.add: (-1x144x512xf32) <- (-1x144x512xf32, 1x144x512xf32) + add_25 = paddle._C_ops.add(add_24, slice_19) + del add_24, slice_19 + + # pd_op.dropout: (-1x144x512xf32, -1x144x512xui8) <- (-1x144x512xf32, None, 1xf32) + dropout_0, dropout_19 = (lambda x, f: f(x))( + paddle._C_ops.dropout( + add_25, None, full_6, True, "upscale_in_train", 0, False + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None), + ) + del add_25, exp_0, full_6, matmul_5, share_data__1, share_data__2 + + return dropout_0, layer_norm_0 diff --git a/paddle_samples/PaddleX/Nonstationary/subgraph_11/weight_meta.py b/paddle_samples/PaddleX/Nonstationary/subgraph_11/weight_meta.py new file mode 100644 index 000000000..9606078a4 --- /dev/null +++ b/paddle_samples/PaddleX/Nonstationary/subgraph_11/weight_meta.py @@ -0,0 +1,548 @@ +class Program_weight_tensor_parameter_0: + name = "parameter_0" + shape = [4, 512] + dtype = "float32" + min_val = float("-0.516677") + max_val = float("0.532903") + mean = float("0.0111753") + std = float("0.292031") + data = None + + +class Program_weight_tensor_parameter_1: + name = "parameter_1" + shape = [512, 1, 3] + dtype = "float32" + min_val = float("-2.48719") + max_val = float("2.78684") + mean = float("0.00692642") + std = float("0.834176") + data = None + + +class Program_weight_tensor_parameter_2: + name = "parameter_2" + shape = [512] + dtype = "float32" + min_val = float("-0.00305979") + max_val = float("0.00322003") + mean = float("5.95252e-05") + std = float("0.000925967") + data = None + + +class Program_weight_tensor_parameter_3: + name = "parameter_3" + shape = [512] + dtype = "float32" + min_val = float("0.990193") + max_val = float("1.00172") + mean = float("0.996864") + std = float("0.00200299") + data = None + + +class Program_weight_tensor_parameter_4: + name = "parameter_4" + shape = [512] + dtype = "float32" + min_val = float("-0.00235936") + max_val = float("0.00165742") + mean = float("2.45158e-06") + std = float("0.000508976") + data = None + + +class Program_weight_tensor_parameter_5: + name = "parameter_5" + shape = [512] + dtype = "float32" + min_val = float("0.992481") + max_val = float("1.00997") + mean = float("0.99987") + std = float("0.00231885") + data = None + + +class Program_weight_tensor_parameter_6: + name = "parameter_6" + shape = [512] + dtype = "float32" + min_val = float("-0.00227089") + max_val = float("0.00154006") + mean = float("5.82205e-06") + std = float("0.000460681") + data = None + + +class Program_weight_tensor_parameter_7: + name = "parameter_7" + shape = [512, 2048, 1] + dtype = "float32" + min_val = float("-0.142082") + max_val = float("0.165008") + mean = float("7.17457e-05") + std = float("0.0313108") + data = None + + +class Program_weight_tensor_parameter_8: + name = "parameter_8" + shape = [2048] + dtype = "float32" + min_val = float("-0.00589507") + max_val = float("0.00314866") + mean = float("-0.00067046") + std = float("0.00102437") + data = None + + +class Program_weight_tensor_parameter_9: + name = "parameter_9" + shape = [2048, 512, 1] + dtype = "float32" + min_val = float("-0.28923") + max_val = float("0.306061") + mean = float("5.56915e-05") + std = float("0.0625597") + data = None + + +class Program_weight_tensor_parameter_10: + name = "parameter_10" + shape = [512] + dtype = "float32" + min_val = float("-0.00228257") + max_val = float("0.00295043") + mean = float("-1.27666e-05") + std = float("0.000580614") + data = None + + +class Program_weight_tensor_parameter_11: + name = "parameter_11" + shape = [512] + dtype = "float32" + min_val = float("0.99054") + max_val = float("1.00831") + mean = float("0.999614") + std = float("0.00193367") + data = None + + +class Program_weight_tensor_parameter_12: + name = "parameter_12" + shape = [512] + dtype = "float32" + min_val = float("-0.0434872") + max_val = float("0.0449703") + mean = float("0.000492302") + std = float("0.0249476") + data = None + + +class Program_weight_tensor_parameter_13: + name = "parameter_13" + shape = [512, 512] + dtype = "float32" + min_val = float("-0.0547241") + max_val = float("0.0524427") + mean = float("9.68225e-05") + std = float("0.025554") + data = None + + +class Program_weight_tensor_parameter_14: + name = "parameter_14" + shape = [512] + dtype = "float32" + min_val = float("-0.0448612") + max_val = float("0.0444284") + mean = float("-0.000109846") + std = float("0.0262145") + data = None + + +class Program_weight_tensor_parameter_15: + name = "parameter_15" + shape = [512, 512] + dtype = "float32" + min_val = float("-0.055098") + max_val = float("0.0545562") + mean = float("-5.58127e-05") + std = float("0.0255321") + data = None + + +class Program_weight_tensor_parameter_16: + name = "parameter_16" + shape = [512] + dtype = "float32" + min_val = float("-0.0437686") + max_val = float("0.0437947") + mean = float("0.000960961") + std = float("0.0258335") + data = None + + +class Program_weight_tensor_parameter_17: + name = "parameter_17" + shape = [512, 512] + dtype = "float32" + min_val = float("-0.0580123") + max_val = float("0.0566037") + mean = float("4.22441e-05") + std = float("0.0256427") + data = None + + +class Program_weight_tensor_parameter_18: + name = "parameter_18" + shape = [512] + dtype = "float32" + min_val = float("-0.0438837") + max_val = float("0.0460991") + mean = float("0.000875206") + std = float("0.0248522") + data = None + + +class Program_weight_tensor_parameter_19: + name = "parameter_19" + shape = [512, 512] + dtype = "float32" + min_val = float("-0.0580541") + max_val = float("0.0554847") + mean = float("4.2117e-05") + std = float("0.025564") + data = None + + +class Program_weight_tensor_parameter_20: + name = "parameter_20" + shape = [512] + dtype = "float32" + min_val = float("-0.00218964") + max_val = float("0.00179418") + mean = float("-1.35697e-05") + std = float("0.000641896") + data = None + + +class Program_weight_tensor_parameter_21: + name = "parameter_21" + shape = [512] + dtype = "float32" + min_val = float("0.98961") + max_val = float("1.00635") + mean = float("0.999451") + std = float("0.00193429") + data = None + + +class Program_weight_tensor_parameter_22: + name = "parameter_22" + shape = [512] + dtype = "float32" + min_val = float("-0.0019689") + max_val = float("0.0016809") + mean = float("8.09281e-07") + std = float("0.000560917") + data = None + + +class Program_weight_tensor_parameter_23: + name = "parameter_23" + shape = [512, 2048, 1] + dtype = "float32" + min_val = float("-0.159276") + max_val = float("0.15462") + mean = float("2.70874e-05") + std = float("0.0313229") + data = None + + +class Program_weight_tensor_parameter_24: + name = "parameter_24" + shape = [2048] + dtype = "float32" + min_val = float("-0.00498069") + max_val = float("0.00514892") + mean = float("-0.000779943") + std = float("0.00109583") + data = None + + +class Program_weight_tensor_parameter_25: + name = "parameter_25" + shape = [2048, 512, 1] + dtype = "float32" + min_val = float("-0.285435") + max_val = float("0.292016") + mean = float("-2.6195e-05") + std = float("0.0625971") + data = None + + +class Program_weight_tensor_parameter_26: + name = "parameter_26" + shape = [512] + dtype = "float32" + min_val = float("-0.00395776") + max_val = float("0.00440574") + mean = float("-9.06848e-06") + std = float("0.00135322") + data = None + + +class Program_weight_tensor_parameter_27: + name = "parameter_27" + shape = [512] + dtype = "float32" + min_val = float("0.992855") + max_val = float("1.01649") + mean = float("0.999779") + std = float("0.0025997") + data = None + + +class Program_weight_tensor_parameter_28: + name = "parameter_28" + shape = [512] + dtype = "float32" + min_val = float("-0.0454154") + max_val = float("0.0462302") + mean = float("-0.000822526") + std = float("0.0253206") + data = None + + +class Program_weight_tensor_parameter_29: + name = "parameter_29" + shape = [512, 512] + dtype = "float32" + min_val = float("-0.0537288") + max_val = float("0.0553914") + mean = float("3.93139e-05") + std = float("0.0255616") + data = None + + +class Program_weight_tensor_parameter_30: + name = "parameter_30" + shape = [512] + dtype = "float32" + min_val = float("-0.0444457") + max_val = float("0.0446904") + mean = float("-0.00012996") + std = float("0.0250311") + data = None + + +class Program_weight_tensor_parameter_31: + name = "parameter_31" + shape = [512, 512] + dtype = "float32" + min_val = float("-0.065649") + max_val = float("0.0610422") + mean = float("3.76994e-06") + std = float("0.0256488") + data = None + + +class Program_weight_tensor_parameter_32: + name = "parameter_32" + shape = [512] + dtype = "float32" + min_val = float("-0.044085") + max_val = float("0.0441156") + mean = float("0.00325804") + std = float("0.0251241") + data = None + + +class Program_weight_tensor_parameter_33: + name = "parameter_33" + shape = [512, 512] + dtype = "float32" + min_val = float("-0.0670579") + max_val = float("0.0662575") + mean = float("-2.14905e-05") + std = float("0.0256728") + data = None + + +class Program_weight_tensor_parameter_34: + name = "parameter_34" + shape = [512] + dtype = "float32" + min_val = float("-0.0448819") + max_val = float("0.0448913") + mean = float("-0.000408059") + std = float("0.024829") + data = None + + +class Program_weight_tensor_parameter_35: + name = "parameter_35" + shape = [512, 512] + dtype = "float32" + min_val = float("-0.065153") + max_val = float("0.0657418") + mean = float("1.16269e-05") + std = float("0.0257549") + data = None + + +class Program_weight_tensor_parameter_36: + name = "parameter_36" + shape = [4, 512] + dtype = "float32" + min_val = float("-0.518066") + max_val = float("0.51862") + mean = float("0.00107445") + std = float("0.291154") + data = None + + +class Program_weight_tensor_parameter_37: + name = "parameter_37" + shape = [512, 1, 3] + dtype = "float32" + min_val = float("-2.59084") + max_val = float("2.88379") + mean = float("0.0227863") + std = float("0.820646") + data = None + + +class Program_weight_tensor_parameter_38: + name = "parameter_38" + shape = [256, 96] + dtype = "float32" + min_val = float("-0.0789143") + max_val = float("0.0978309") + mean = float("-0.000558854") + std = float("0.0369148") + data = None + + +class Program_weight_tensor_parameter_39: + name = "parameter_39" + shape = [256] + dtype = "float32" + min_val = float("-0.0644954") + max_val = float("0.0700944") + mean = float("0.00539506") + std = float("0.0368053") + data = None + + +class Program_weight_tensor_parameter_40: + name = "parameter_40" + shape = [256, 256] + dtype = "float32" + min_val = float("-0.0736171") + max_val = float("0.0849385") + mean = float("0.00350442") + std = float("0.0368803") + data = None + + +class Program_weight_tensor_parameter_41: + name = "parameter_41" + shape = [256] + dtype = "float32" + min_val = float("-0.705511") + max_val = float("0.706881") + mean = float("-0.0104033") + std = float("0.414574") + data = None + + +class Program_weight_tensor_parameter_42: + name = "parameter_42" + shape = [2, 256] + dtype = "float32" + min_val = float("-0.718449") + max_val = float("0.712251") + mean = float("0.00165324") + std = float("0.42869") + data = None + + +class Program_weight_tensor_parameter_43: + name = "parameter_43" + shape = [1, 96, 3] + dtype = "float32" + min_val = float("-0.258775") + max_val = float("0.248503") + mean = float("0.0242983") + std = float("0.0870573") + data = None + + +class Program_weight_tensor_parameter_44: + name = "parameter_44" + shape = [256, 1] + dtype = "float32" + min_val = float("-0.0631358") + max_val = float("0.0632669") + mean = float("0.00105952") + std = float("0.0321693") + data = None + + +class Program_weight_tensor_parameter_45: + name = "parameter_45" + shape = [256] + dtype = "float32" + min_val = float("-0.06438") + max_val = float("0.065633") + mean = float("-0.000642404") + std = float("0.0376653") + data = None + + +class Program_weight_tensor_parameter_46: + name = "parameter_46" + shape = [256, 256] + dtype = "float32" + min_val = float("-0.0911485") + max_val = float("0.0913537") + mean = float("8.12422e-05") + std = float("0.0363969") + data = None + + +class Program_weight_tensor_parameter_47: + name = "parameter_47" + shape = [256] + dtype = "float32" + min_val = float("-0.703553") + max_val = float("0.707552") + mean = float("0.0166121") + std = float("0.417778") + data = None + + +class Program_weight_tensor_parameter_48: + name = "parameter_48" + shape = [2, 256] + dtype = "float32" + min_val = float("-0.712457") + max_val = float("0.708452") + mean = float("-0.0024006") + std = float("0.402919") + data = None + + +class Program_weight_tensor_parameter_49: + name = "parameter_49" + shape = [1, 96, 3] + dtype = "float32" + min_val = float("-0.242026") + max_val = float("0.243149") + mean = float("0.011127") + std = float("0.0816704") + data = None diff --git a/paddle_samples/PaddleX/Nonstationary/subgraph_2/graph_hash.txt b/paddle_samples/PaddleX/Nonstationary/subgraph_2/graph_hash.txt new file mode 100644 index 000000000..020c10233 --- /dev/null +++ b/paddle_samples/PaddleX/Nonstationary/subgraph_2/graph_hash.txt @@ -0,0 +1 @@ +10f78c8c0374cfe39ccb3910595419b7d121f10d0a8acbb7bc2cc6ea8f82338f \ No newline at end of file diff --git a/paddle_samples/PaddleX/Nonstationary/subgraph_2/graph_net.json b/paddle_samples/PaddleX/Nonstationary/subgraph_2/graph_net.json new file mode 100644 index 000000000..1977ac2fa --- /dev/null +++ b/paddle_samples/PaddleX/Nonstationary/subgraph_2/graph_net.json @@ -0,0 +1,6 @@ +{ + "framework": "paddle", + "model_name": "Nonstationary", + "num_devices_required": 1, + "num_nodes_required": 1 +} \ No newline at end of file diff --git a/paddle_samples/PaddleX/Nonstationary/subgraph_2/input_meta.py b/paddle_samples/PaddleX/Nonstationary/subgraph_2/input_meta.py new file mode 100644 index 000000000..6e54050e7 --- /dev/null +++ b/paddle_samples/PaddleX/Nonstationary/subgraph_2/input_meta.py @@ -0,0 +1,49 @@ +class Program_weight_tensor_data_0: + name = "data_0" + shape = [3, 144, 512] + dtype = "float32" + min_val = float("-10.158") + max_val = float("11.0643") + mean = float("0.327734") + std = float("1.03443") + data = None + + +class Program_weight_tensor_data_1: + name = "data_1" + shape = [3, 144, 512] + dtype = "float32" + min_val = float("-3.29943") + max_val = float("3.87578") + mean = float("-0.0144692") + std = float("0.415833") + data = None + + +class Program_weight_tensor_data_2: + name = "data_2" + shape = [3, 96, 512] + dtype = "float32" + min_val = float("-4.29811") + max_val = float("4.52") + mean = float("5.9135e-05") + std = float("0.996844") + data = None + + +class Program_weight_tensor_data_3: + name = "data_3" + shape = [3, 1] + dtype = "float32" + data = [0.224591, 1.20468, 0.822164] + + +class Program_weight_tensor_data_4: + name = "data_4" + shape = [3, 96] + dtype = "float32" + min_val = float("-17.7886") + max_val = float("34.2571") + mean = float("-0.361486") + std = float("6.0667") + data = None diff --git a/paddle_samples/PaddleX/Nonstationary/subgraph_2/model.py b/paddle_samples/PaddleX/Nonstationary/subgraph_2/model.py new file mode 100644 index 000000000..63da4ce2a --- /dev/null +++ b/paddle_samples/PaddleX/Nonstationary/subgraph_2/model.py @@ -0,0 +1,544 @@ +import paddle + + +class GraphModule(paddle.nn.Layer): + def __init__(self): + super().__init__() + + def forward( + self, + parameter_0, + parameter_1, + parameter_2, + parameter_3, + parameter_4, + parameter_5, + parameter_6, + parameter_7, + parameter_8, + parameter_9, + parameter_10, + parameter_11, + parameter_12, + parameter_13, + parameter_14, + parameter_15, + parameter_16, + parameter_17, + data_0, + data_1, + data_2, + data_3, + data_4, + ): + # pd_op.full: (1xf32) <- () + full_0 = paddle._C_ops.full( + [1], float("0.05"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.assign: (1xf32) <- (1xf32) + assign_0 = full_0 + + # pd_op.assign: (1xf32) <- (1xf32) + assign_1 = full_0 + + # pd_op.assign: (1xf32) <- (1xf32) + assign_2 = full_0 + + # pd_op.assign: (1xf32) <- (1xf32) + assign_3 = full_0 + + # pd_op.dropout: (-1x144x512xf32, -1x144x512xui8) <- (-1x144x512xf32, None, 1xf32) + dropout_0, dropout_1 = (lambda x, f: f(x))( + paddle._C_ops.dropout( + data_1, None, full_0, False, "upscale_in_train", 0, False + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None), + ) + del data_1 + + # pd_op.add: (-1x144x512xf32) <- (-1x144x512xf32, -1x144x512xf32) + add_0 = paddle._C_ops.add(data_0, dropout_0) + del data_0 + + # pd_op.layer_norm: (-1x144x512xf32, -1x144xf32, -1x144xf32) <- (-1x144x512xf32, 512xf32, 512xf32) + layer_norm_1, layer_norm_2, layer_norm_3 = (lambda x, f: f(x))( + paddle._C_ops.layer_norm( + add_0, parameter_17, parameter_16, float("1e-05"), 2 + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None, None), + ) + del parameter_16, parameter_17 + + # pd_op.shape64: (3xi64) <- (-1x144x512xf32) + shape64_0 = paddle._C_ops.shape64(layer_norm_1) + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_0 = [0] + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_1 = [1] + + # pd_op.assign: (1xi64) <- (1xi64) + assign_4 = full_int_array_1 + + # pd_op.assign: (1xi64) <- (1xi64) + assign_5 = full_int_array_1 + + # pd_op.assign: (1xi64) <- (1xi64) + assign_6 = full_int_array_1 + + # pd_op.assign: (1xi64) <- (1xi64) + assign_7 = full_int_array_1 + + # pd_op.slice: (xi64) <- (3xi64, 1xi64, 1xi64) + slice_0 = paddle._C_ops.slice( + shape64_0, [0], full_int_array_0, full_int_array_1, [1], [0] + ) + del shape64_0 + + # pd_op.matmul: (-1x144x512xf32) <- (-1x144x512xf32, 512x512xf32) + matmul_0 = paddle._C_ops.matmul(layer_norm_1, parameter_15, False, False) + del parameter_15 + + # pd_op.add: (-1x144x512xf32) <- (-1x144x512xf32, 512xf32) + add_1 = paddle._C_ops.add(matmul_0, parameter_14) + del parameter_14 + + # pd_op.full: (xi64) <- () + full_1 = paddle._C_ops.full( + [], float("144"), paddle.int64, paddle.core.CPUPlace() + ) + + # pd_op.full: (xi64) <- () + full_2 = paddle._C_ops.full( + [], float("8"), paddle.int64, paddle.core.CPUPlace() + ) + + # pd_op.full: (xi64) <- () + full_3 = paddle._C_ops.full( + [], float("-1"), paddle.int64, paddle.core.CPUPlace() + ) + + # builtin.combine: ([xi64, xi64, xi64, xi64]) <- (xi64, xi64, xi64, xi64) + combine_0 = [slice_0, full_1, full_2, full_3] + + # pd_op.stack: (4xi64) <- ([xi64, xi64, xi64, xi64]) + stack_0 = paddle._C_ops.stack(combine_0, 0) + del combine_0 + + # pd_op.reshape: (-1x144x8x-1xf32) <- (-1x144x512xf32, 4xi64) + reshape_0 = paddle._C_ops.reshape(add_1, stack_0) + del stack_0 + + # pd_op.matmul: (-1x96x512xf32) <- (-1x96x512xf32, 512x512xf32) + matmul_1 = paddle._C_ops.matmul(data_2, parameter_13, False, False) + del parameter_13 + + # pd_op.add: (-1x96x512xf32) <- (-1x96x512xf32, 512xf32) + add_2 = paddle._C_ops.add(matmul_1, parameter_12) + del parameter_12 + + # pd_op.full: (xi64) <- () + full_4 = paddle._C_ops.full( + [], float("96"), paddle.int64, paddle.core.CPUPlace() + ) + + # builtin.combine: ([xi64, xi64, xi64, xi64]) <- (xi64, xi64, xi64, xi64) + combine_1 = [slice_0, full_4, full_2, full_3] + + # pd_op.stack: (4xi64) <- ([xi64, xi64, xi64, xi64]) + stack_1 = paddle._C_ops.stack(combine_1, 0) + del combine_1 + + # pd_op.reshape: (-1x96x8x-1xf32) <- (-1x96x512xf32, 4xi64) + reshape_1 = paddle._C_ops.reshape(add_2, stack_1) + del stack_1 + + # pd_op.matmul: (-1x96x512xf32) <- (-1x96x512xf32, 512x512xf32) + matmul_2 = paddle._C_ops.matmul(data_2, parameter_11, False, False) + del data_2, parameter_11 + + # pd_op.add: (-1x96x512xf32) <- (-1x96x512xf32, 512xf32) + add_3 = paddle._C_ops.add(matmul_2, parameter_10) + del parameter_10 + + # builtin.combine: ([xi64, xi64, xi64, xi64]) <- (xi64, xi64, xi64, xi64) + combine_2 = [slice_0, full_4, full_2, full_3] + del full_2, full_4 + + # pd_op.stack: (4xi64) <- ([xi64, xi64, xi64, xi64]) + stack_2 = paddle._C_ops.stack(combine_2, 0) + del combine_2 + + # pd_op.reshape: (-1x96x8x-1xf32) <- (-1x96x512xf32, 4xi64) + reshape_2 = paddle._C_ops.reshape(add_3, stack_2) + del stack_2 + + # pd_op.shape64: (4xi64) <- (-1x144x8x-1xf32) + shape64_1 = paddle._C_ops.shape64(reshape_0) + + # pd_op.slice: (xi64) <- (4xi64, 1xi64, 1xi64) + slice_1 = paddle._C_ops.slice( + shape64_1, [0], full_int_array_0, full_int_array_1, [1], [0] + ) + del shape64_1 + + # pd_op.shape64: (4xi64) <- (-1x144x8x-1xf32) + shape64_2 = paddle._C_ops.shape64(reshape_0) + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_2 = [3] + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_3 = [4] + + # pd_op.slice: (xi64) <- (4xi64, 1xi64, 1xi64) + slice_2 = paddle._C_ops.slice( + shape64_2, [0], full_int_array_2, full_int_array_3, [1], [0] + ) + del shape64_2 + + # pd_op.shape64: (4xi64) <- (-1x96x8x-1xf32) + shape64_3 = paddle._C_ops.shape64(reshape_2) + + # pd_op.slice: (xi64) <- (4xi64, 1xi64, 1xi64) + slice_3 = paddle._C_ops.slice( + shape64_3, [0], full_int_array_0, full_int_array_1, [1], [0] + ) + del full_int_array_0, shape64_3 + + # pd_op.shape64: (4xi64) <- (-1x96x8x-1xf32) + shape64_4 = paddle._C_ops.shape64(reshape_2) + + # pd_op.slice: (xi64) <- (4xi64, 1xi64, 1xi64) + slice_4 = paddle._C_ops.slice( + shape64_4, [0], full_int_array_2, full_int_array_3, [1], [0] + ) + del full_int_array_2, full_int_array_3, shape64_4 + + # pd_op.unsqueeze: (-1x1x1xf32) <- (-1x1xf32, 1xi64) + unsqueeze_0 = paddle._C_ops.unsqueeze(data_3, full_int_array_1) + del data_3 + + # pd_op.unsqueeze: (-1x1x1x1xf32) <- (-1x1x1xf32, 1xi64) + unsqueeze_1 = paddle._C_ops.unsqueeze(unsqueeze_0, full_int_array_1) + + # pd_op.unsqueeze: (-1x1x96xf32) <- (-1x96xf32, 1xi64) + unsqueeze_2 = paddle._C_ops.unsqueeze(data_4, full_int_array_1) + del data_4 + + # pd_op.unsqueeze: (-1x1x1x96xf32) <- (-1x1x96xf32, 1xi64) + unsqueeze_3 = paddle._C_ops.unsqueeze(unsqueeze_2, full_int_array_1) + del full_int_array_1 + + # builtin.combine: ([-1x144x8x-1xf32, -1x96x8x-1xf32]) <- (-1x144x8x-1xf32, -1x96x8x-1xf32) + combine_3 = [reshape_0, reshape_1] + del reshape_0, reshape_1 + + # pd_op.einsum: (-1x8x144x96xf32, [0xf32, 0xf32], [-1x144x8x-1xf32, -1x96x8x-1xf32]) <- ([-1x144x8x-1xf32, -1x96x8x-1xf32]) + einsum_0, einsum_1, einsum_2 = (lambda x, f: f(x))( + paddle._C_ops.einsum(combine_3, "blhe,bshe->bhls"), + lambda out: out if isinstance(out, (list, tuple)) else (out, None, None), + ) + del combine_3 + + # builtin.split: (0xf32, 0xf32) <- ([0xf32, 0xf32]) + ( + split_0, + split_1, + ) = einsum_1 + del einsum_1 + + # builtin.split: (-1x144x8x-1xf32, -1x96x8x-1xf32) <- ([-1x144x8x-1xf32, -1x96x8x-1xf32]) + ( + split_2, + split_3, + ) = einsum_2 + del einsum_2 + + # pd_op.multiply: (-1x8x144x96xf32) <- (-1x8x144x96xf32, -1x1x1x1xf32) + multiply_0 = paddle._C_ops.multiply(einsum_0, unsqueeze_1) + + # pd_op.add: (-1x8x144x96xf32) <- (-1x8x144x96xf32, -1x1x1x96xf32) + add_4 = paddle._C_ops.add(multiply_0, unsqueeze_3) + + # pd_op.full: (1xf32) <- () + full_5 = paddle._C_ops.full( + [1], float("0.125"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.scale: (-1x8x144x96xf32) <- (-1x8x144x96xf32, 1xf32) + scale_0 = paddle._C_ops.scale(add_4, full_5, float("0"), True) + del add_4 + + # pd_op.softmax: (-1x8x144x96xf32) <- (-1x8x144x96xf32) + softmax_0 = paddle._C_ops.softmax(scale_0, -1) + del scale_0 + + # pd_op.dropout: (-1x8x144x96xf32, -1x8x144x96xui8) <- (-1x8x144x96xf32, None, 1xf32) + dropout_2, dropout_3 = (lambda x, f: f(x))( + paddle._C_ops.dropout( + softmax_0, None, full_0, False, "upscale_in_train", 0, False + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None), + ) + + # builtin.combine: ([-1x8x144x96xf32, -1x96x8x-1xf32]) <- (-1x8x144x96xf32, -1x96x8x-1xf32) + combine_4 = [dropout_2, reshape_2] + del dropout_2, reshape_2 + + # pd_op.einsum: (-1x144x8x-1xf32, [0xf32, 0xf32], [-1x8x144x96xf32, -1x96x8x-1xf32]) <- ([-1x8x144x96xf32, -1x96x8x-1xf32]) + einsum_3, einsum_4, einsum_5 = (lambda x, f: f(x))( + paddle._C_ops.einsum(combine_4, "bhls,bshd->blhd"), + lambda out: out if isinstance(out, (list, tuple)) else (out, None, None), + ) + del combine_4 + + # builtin.split: (0xf32, 0xf32) <- ([0xf32, 0xf32]) + ( + split_4, + split_5, + ) = einsum_4 + del einsum_4 + + # builtin.split: (-1x8x144x96xf32, -1x96x8x-1xf32) <- ([-1x8x144x96xf32, -1x96x8x-1xf32]) + ( + split_6, + split_7, + ) = einsum_5 + del einsum_5 + + # builtin.combine: ([xi64, xi64, xi64]) <- (xi64, xi64, xi64) + combine_5 = [slice_0, full_1, full_3] + del full_1, full_3, slice_0 + + # pd_op.stack: (3xi64) <- ([xi64, xi64, xi64]) + stack_3 = paddle._C_ops.stack(combine_5, 0) + del combine_5 + + # pd_op.reshape: (-1x144x-1xf32) <- (-1x144x8x-1xf32, 3xi64) + reshape_3 = paddle._C_ops.reshape(einsum_3, stack_3) + del stack_3 + + # pd_op.matmul: (-1x144x512xf32) <- (-1x144x-1xf32, 512x512xf32) + matmul_3 = paddle._C_ops.matmul(reshape_3, parameter_9, False, False) + del parameter_9 + + # pd_op.add: (-1x144x512xf32) <- (-1x144x512xf32, 512xf32) + add_5 = paddle._C_ops.add(matmul_3, parameter_8) + del parameter_8 + + # pd_op.dropout: (-1x144x512xf32, -1x144x512xui8) <- (-1x144x512xf32, None, 1xf32) + dropout_4, dropout_5 = (lambda x, f: f(x))( + paddle._C_ops.dropout( + add_5, None, full_0, False, "upscale_in_train", 0, False + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None), + ) + del add_5 + + # pd_op.add: (-1x144x512xf32) <- (-1x144x512xf32, -1x144x512xf32) + add_6 = paddle._C_ops.add(layer_norm_1, dropout_4) + + # pd_op.layer_norm: (-1x144x512xf32, -1x144xf32, -1x144xf32) <- (-1x144x512xf32, 512xf32, 512xf32) + layer_norm_4, layer_norm_5, layer_norm_6 = (lambda x, f: f(x))( + paddle._C_ops.layer_norm( + add_6, parameter_7, parameter_6, float("1e-05"), 2 + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None, None), + ) + del parameter_6, parameter_7 + + # pd_op.transpose: (-1x512x144xf32) <- (-1x144x512xf32) + transpose_0 = paddle._C_ops.transpose(layer_norm_4, [0, 2, 1]) + + # pd_op.assign: (2048x512x1xf32) <- (2048x512x1xf32) + assign_8 = parameter_5 + del parameter_5 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_4 = [-2] + + # pd_op.assign: (1xi64) <- (1xi64) + assign_9 = full_int_array_4 + + # pd_op.assign: (1xi64) <- (1xi64) + assign_10 = full_int_array_4 + + # pd_op.assign: (1xi64) <- (1xi64) + assign_11 = full_int_array_4 + + # pd_op.assign: (1xi64) <- (1xi64) + assign_12 = full_int_array_4 + + # pd_op.assign: (1xi64) <- (1xi64) + assign_13 = full_int_array_4 + + # pd_op.unsqueeze: (2048x512x1x1xf32) <- (2048x512x1xf32, 1xi64) + unsqueeze_4 = paddle._C_ops.unsqueeze(assign_8, full_int_array_4) + + # pd_op.unsqueeze: (-1x512x1x144xf32) <- (-1x512x144xf32, 1xi64) + unsqueeze_5 = paddle._C_ops.unsqueeze(transpose_0, full_int_array_4) + + # pd_op.conv2d: (-1x2048x1x144xf32) <- (-1x512x1x144xf32, 2048x512x1x1xf32) + conv2d_0 = paddle._C_ops.conv2d( + unsqueeze_5, unsqueeze_4, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + + # pd_op.full_int_array: (4xi64) <- () + full_int_array_5 = [1, 2048, 1, 1] + + # pd_op.reshape: (1x2048x1x1xf32) <- (2048xf32, 4xi64) + reshape_4 = paddle._C_ops.reshape(parameter_4, full_int_array_5) + del full_int_array_5, parameter_4 + + # pd_op.add: (-1x2048x1x144xf32) <- (-1x2048x1x144xf32, 1x2048x1x1xf32) + add_7 = paddle._C_ops.add(conv2d_0, reshape_4) + + # pd_op.squeeze: (-1x2048x144xf32) <- (-1x2048x1x144xf32, 1xi64) + squeeze_0 = paddle._C_ops.squeeze(add_7, full_int_array_4) + + # pd_op.gelu: (-1x2048x144xf32) <- (-1x2048x144xf32) + gelu_0 = paddle._C_ops.gelu(squeeze_0, False) + + # pd_op.dropout: (-1x2048x144xf32, -1x2048x144xui8) <- (-1x2048x144xf32, None, 1xf32) + dropout_6, dropout_7 = (lambda x, f: f(x))( + paddle._C_ops.dropout( + gelu_0, None, full_0, False, "upscale_in_train", 0, False + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None), + ) + del gelu_0 + + # pd_op.assign: (512x2048x1xf32) <- (512x2048x1xf32) + assign_14 = parameter_3 + del parameter_3 + + # pd_op.unsqueeze: (512x2048x1x1xf32) <- (512x2048x1xf32, 1xi64) + unsqueeze_6 = paddle._C_ops.unsqueeze(assign_14, full_int_array_4) + + # pd_op.unsqueeze: (-1x2048x1x144xf32) <- (-1x2048x144xf32, 1xi64) + unsqueeze_7 = paddle._C_ops.unsqueeze(dropout_6, full_int_array_4) + + # pd_op.conv2d: (-1x512x1x144xf32) <- (-1x2048x1x144xf32, 512x2048x1x1xf32) + conv2d_1 = paddle._C_ops.conv2d( + unsqueeze_7, unsqueeze_6, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + + # pd_op.full_int_array: (4xi64) <- () + full_int_array_6 = [1, 512, 1, 1] + + # pd_op.reshape: (1x512x1x1xf32) <- (512xf32, 4xi64) + reshape_5 = paddle._C_ops.reshape(parameter_2, full_int_array_6) + del full_int_array_6, parameter_2 + + # pd_op.add: (-1x512x1x144xf32) <- (-1x512x1x144xf32, 1x512x1x1xf32) + add_8 = paddle._C_ops.add(conv2d_1, reshape_5) + + # pd_op.squeeze: (-1x512x144xf32) <- (-1x512x1x144xf32, 1xi64) + squeeze_1 = paddle._C_ops.squeeze(add_8, full_int_array_4) + + # pd_op.transpose: (-1x144x512xf32) <- (-1x512x144xf32) + transpose_1 = paddle._C_ops.transpose(squeeze_1, [0, 2, 1]) + del squeeze_1 + + # pd_op.dropout: (-1x144x512xf32, -1x144x512xui8) <- (-1x144x512xf32, None, 1xf32) + dropout_8, dropout_9 = (lambda x, f: f(x))( + paddle._C_ops.dropout( + transpose_1, None, full_0, False, "upscale_in_train", 0, False + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None), + ) + del transpose_1 + + # pd_op.add: (-1x144x512xf32) <- (-1x144x512xf32, -1x144x512xf32) + add_9 = paddle._C_ops.add(layer_norm_4, dropout_8) + + # pd_op.layer_norm: (-1x144x512xf32, -1x144xf32, -1x144xf32) <- (-1x144x512xf32, 512xf32, 512xf32) + layer_norm_0, layer_norm_7, layer_norm_8 = (lambda x, f: f(x))( + paddle._C_ops.layer_norm( + add_9, parameter_1, parameter_0, float("1e-05"), 2 + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None, None), + ) + del ( + add_0, + add_1, + add_2, + add_3, + add_6, + add_7, + add_8, + add_9, + assign_0, + assign_1, + assign_10, + assign_11, + assign_12, + assign_13, + assign_14, + assign_2, + assign_3, + assign_4, + assign_5, + assign_6, + assign_7, + assign_8, + assign_9, + conv2d_0, + conv2d_1, + dropout_0, + dropout_1, + dropout_3, + dropout_4, + dropout_5, + dropout_6, + dropout_7, + dropout_8, + dropout_9, + einsum_0, + einsum_3, + full_0, + full_5, + full_int_array_4, + layer_norm_1, + layer_norm_2, + layer_norm_3, + layer_norm_4, + layer_norm_5, + layer_norm_6, + matmul_0, + matmul_1, + matmul_2, + matmul_3, + multiply_0, + parameter_0, + parameter_1, + reshape_3, + reshape_4, + reshape_5, + softmax_0, + squeeze_0, + transpose_0, + unsqueeze_0, + unsqueeze_1, + unsqueeze_2, + unsqueeze_3, + unsqueeze_4, + unsqueeze_5, + unsqueeze_6, + unsqueeze_7, + ) + + return ( + split_0, + split_1, + split_2, + split_3, + split_4, + split_5, + split_6, + split_7, + layer_norm_0, + ) diff --git a/paddle_samples/PaddleX/Nonstationary/subgraph_2/weight_meta.py b/paddle_samples/PaddleX/Nonstationary/subgraph_2/weight_meta.py new file mode 100644 index 000000000..3ce132456 --- /dev/null +++ b/paddle_samples/PaddleX/Nonstationary/subgraph_2/weight_meta.py @@ -0,0 +1,196 @@ +class Program_weight_tensor_parameter_0: + name = "parameter_0" + shape = [512] + dtype = "float32" + min_val = float("-0.0192559") + max_val = float("0.0192842") + mean = float("3.58487e-05") + std = float("0.00406778") + data = None + + +class Program_weight_tensor_parameter_1: + name = "parameter_1" + shape = [512] + dtype = "float32" + min_val = float("0.994002") + max_val = float("1.0232") + mean = float("1.00077") + std = float("0.00505867") + data = None + + +class Program_weight_tensor_parameter_2: + name = "parameter_2" + shape = [512] + dtype = "float32" + min_val = float("-0.0170407") + max_val = float("0.0167192") + mean = float("2.6645e-05") + std = float("0.00341306") + data = None + + +class Program_weight_tensor_parameter_3: + name = "parameter_3" + shape = [512, 2048, 1] + dtype = "float32" + min_val = float("-0.155084") + max_val = float("0.159871") + mean = float("2.50862e-05") + std = float("0.0314332") + data = None + + +class Program_weight_tensor_parameter_4: + name = "parameter_4" + shape = [2048] + dtype = "float32" + min_val = float("-0.00572839") + max_val = float("0.00834493") + mean = float("-0.000443372") + std = float("0.00139635") + data = None + + +class Program_weight_tensor_parameter_5: + name = "parameter_5" + shape = [2048, 512, 1] + dtype = "float32" + min_val = float("-0.299124") + max_val = float("0.325728") + mean = float("-2.79556e-05") + std = float("0.0625856") + data = None + + +class Program_weight_tensor_parameter_6: + name = "parameter_6" + shape = [512] + dtype = "float32" + min_val = float("-0.00381649") + max_val = float("0.00324497") + mean = float("0.000129981") + std = float("0.000942344") + data = None + + +class Program_weight_tensor_parameter_7: + name = "parameter_7" + shape = [512] + dtype = "float32" + min_val = float("0.992858") + max_val = float("1.00861") + mean = float("1.00023") + std = float("0.00195684") + data = None + + +class Program_weight_tensor_parameter_8: + name = "parameter_8" + shape = [512] + dtype = "float32" + min_val = float("-0.045221") + max_val = float("0.0452425") + mean = float("-0.000881771") + std = float("0.0243775") + data = None + + +class Program_weight_tensor_parameter_9: + name = "parameter_9" + shape = [512, 512] + dtype = "float32" + min_val = float("-0.0537789") + max_val = float("0.0527819") + mean = float("-2.99114e-05") + std = float("0.0255011") + data = None + + +class Program_weight_tensor_parameter_10: + name = "parameter_10" + shape = [512] + dtype = "float32" + min_val = float("-0.0440523") + max_val = float("0.0439555") + mean = float("0.000251341") + std = float("0.0249406") + data = None + + +class Program_weight_tensor_parameter_11: + name = "parameter_11" + shape = [512, 512] + dtype = "float32" + min_val = float("-0.0534313") + max_val = float("0.0515383") + mean = float("-0.000108755") + std = float("0.0254607") + data = None + + +class Program_weight_tensor_parameter_12: + name = "parameter_12" + shape = [512] + dtype = "float32" + min_val = float("-0.0440433") + max_val = float("0.0440464") + mean = float("-0.000675068") + std = float("0.0257587") + data = None + + +class Program_weight_tensor_parameter_13: + name = "parameter_13" + shape = [512, 512] + dtype = "float32" + min_val = float("-0.0572128") + max_val = float("0.0571846") + mean = float("5.18046e-06") + std = float("0.0255771") + data = None + + +class Program_weight_tensor_parameter_14: + name = "parameter_14" + shape = [512] + dtype = "float32" + min_val = float("-0.0455499") + max_val = float("0.0446236") + mean = float("0.00117061") + std = float("0.0252549") + data = None + + +class Program_weight_tensor_parameter_15: + name = "parameter_15" + shape = [512, 512] + dtype = "float32" + min_val = float("-0.0542096") + max_val = float("0.0599803") + mean = float("1.79409e-05") + std = float("0.0255749") + data = None + + +class Program_weight_tensor_parameter_16: + name = "parameter_16" + shape = [512] + dtype = "float32" + min_val = float("-0.00313741") + max_val = float("0.00266002") + mean = float("3.4261e-05") + std = float("0.000896825") + data = None + + +class Program_weight_tensor_parameter_17: + name = "parameter_17" + shape = [512] + dtype = "float32" + min_val = float("0.993905") + max_val = float("1.00938") + mean = float("1.00062") + std = float("0.00211296") + data = None diff --git a/paddle_samples/PaddleX/Nonstationary/subgraph_3/graph_hash.txt b/paddle_samples/PaddleX/Nonstationary/subgraph_3/graph_hash.txt new file mode 100644 index 000000000..b1842d61a --- /dev/null +++ b/paddle_samples/PaddleX/Nonstationary/subgraph_3/graph_hash.txt @@ -0,0 +1 @@ +3142f6ef5efe6dbce5f20592af8b004888e7b7d8a80f99c7f117676ff6e70f50 \ No newline at end of file diff --git a/paddle_samples/PaddleX/Nonstationary/subgraph_3/graph_net.json b/paddle_samples/PaddleX/Nonstationary/subgraph_3/graph_net.json new file mode 100644 index 000000000..1977ac2fa --- /dev/null +++ b/paddle_samples/PaddleX/Nonstationary/subgraph_3/graph_net.json @@ -0,0 +1,6 @@ +{ + "framework": "paddle", + "model_name": "Nonstationary", + "num_devices_required": 1, + "num_nodes_required": 1 +} \ No newline at end of file diff --git a/paddle_samples/PaddleX/Nonstationary/subgraph_3/input_meta.py b/paddle_samples/PaddleX/Nonstationary/subgraph_3/input_meta.py new file mode 100644 index 000000000..acea6d301 --- /dev/null +++ b/paddle_samples/PaddleX/Nonstationary/subgraph_3/input_meta.py @@ -0,0 +1,66 @@ +class Program_weight_tensor_data_0: + name = "data_0" + shape = [16, 144, 512] + dtype = "float32" + min_val = float("-10.0219") + max_val = float("11.7087") + mean = float("0.33717") + std = float("1.02734") + data = None + + +class Program_weight_tensor_data_1: + name = "data_1" + shape = [16, 144, 512] + dtype = "float32" + min_val = float("-2.48603") + max_val = float("2.82118") + mean = float("-0.0153106") + std = float("0.28819") + data = None + + +class Program_weight_tensor_data_2: + name = "data_2" + shape = [16, 96, 512] + dtype = "float32" + min_val = float("-4.19954") + max_val = float("4.38491") + mean = float("1.15506e-10") + std = float("0.999995") + data = None + + +class Program_weight_tensor_data_3: + name = "data_3" + shape = [16, 1] + dtype = "float32" + data = [ + 0.990467, + 0.93202, + 0.954836, + 0.9056, + 0.656892, + 0.98248, + 0.871827, + 0.853196, + 0.999883, + 1.09015, + 0.937267, + 0.934102, + 0.593485, + 0.677648, + 1.12517, + 0.95942, + ] + + +class Program_weight_tensor_data_4: + name = "data_4" + shape = [16, 96] + dtype = "float32" + min_val = float("-0.386679") + max_val = float("0.603709") + mean = float("0.00396474") + std = float("0.100583") + data = None diff --git a/paddle_samples/PaddleX/Nonstationary/subgraph_3/model.py b/paddle_samples/PaddleX/Nonstationary/subgraph_3/model.py new file mode 100644 index 000000000..f07f24812 --- /dev/null +++ b/paddle_samples/PaddleX/Nonstationary/subgraph_3/model.py @@ -0,0 +1,444 @@ +import paddle + + +class GraphModule(paddle.nn.Layer): + def __init__(self): + super().__init__() + + def forward( + self, + parameter_0, + parameter_1, + parameter_2, + parameter_3, + parameter_4, + parameter_5, + parameter_6, + parameter_7, + parameter_8, + parameter_9, + parameter_10, + parameter_11, + parameter_12, + parameter_13, + parameter_14, + parameter_15, + parameter_16, + parameter_17, + data_0, + data_1, + data_2, + data_3, + data_4, + ): + # pd_op.full: (1xf32) <- () + full_0 = paddle._C_ops.full( + [1], float("0.05"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.assign: (1xf32) <- (1xf32) + assign_0 = full_0 + + # pd_op.assign: (1xf32) <- (1xf32) + assign_1 = full_0 + + # pd_op.assign: (1xf32) <- (1xf32) + assign_2 = full_0 + + # pd_op.assign: (1xf32) <- (1xf32) + assign_3 = full_0 + + # pd_op.dropout: (16x144x512xf32, 16x144x512xui8) <- (16x144x512xf32, None, 1xf32) + dropout_0, dropout_1 = (lambda x, f: f(x))( + paddle._C_ops.dropout( + data_1, None, full_0, False, "upscale_in_train", 0, False + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None), + ) + del data_1 + + # pd_op.add: (16x144x512xf32) <- (16x144x512xf32, 16x144x512xf32) + add_0 = paddle._C_ops.add(data_0, dropout_0) + del data_0 + + # pd_op.layer_norm: (16x144x512xf32, 16x144xf32, 16x144xf32) <- (16x144x512xf32, 512xf32, 512xf32) + layer_norm_1, layer_norm_2, layer_norm_3 = (lambda x, f: f(x))( + paddle._C_ops.layer_norm( + add_0, parameter_17, parameter_16, float("1e-05"), 2 + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None, None), + ) + del parameter_16, parameter_17 + + # pd_op.matmul: (16x144x512xf32) <- (16x144x512xf32, 512x512xf32) + matmul_0 = paddle._C_ops.matmul(layer_norm_1, parameter_15, False, False) + del parameter_15 + + # pd_op.add: (16x144x512xf32) <- (16x144x512xf32, 512xf32) + add_1 = paddle._C_ops.add(matmul_0, parameter_14) + del parameter_14 + + # pd_op.full_int_array: (4xi64) <- () + full_int_array_0 = [16, 144, 8, -1] + + # pd_op.reshape: (16x144x8x64xf32) <- (16x144x512xf32, 4xi64) + reshape_0 = paddle._C_ops.reshape(add_1, full_int_array_0) + del full_int_array_0 + + # pd_op.matmul: (16x96x512xf32) <- (16x96x512xf32, 512x512xf32) + matmul_1 = paddle._C_ops.matmul(data_2, parameter_13, False, False) + del parameter_13 + + # pd_op.add: (16x96x512xf32) <- (16x96x512xf32, 512xf32) + add_2 = paddle._C_ops.add(matmul_1, parameter_12) + del parameter_12 + + # pd_op.full_int_array: (4xi64) <- () + full_int_array_1 = [16, 96, 8, -1] + + # pd_op.reshape: (16x96x8x64xf32) <- (16x96x512xf32, 4xi64) + reshape_1 = paddle._C_ops.reshape(add_2, full_int_array_1) + + # pd_op.matmul: (16x96x512xf32) <- (16x96x512xf32, 512x512xf32) + matmul_2 = paddle._C_ops.matmul(data_2, parameter_11, False, False) + del data_2, parameter_11 + + # pd_op.add: (16x96x512xf32) <- (16x96x512xf32, 512xf32) + add_3 = paddle._C_ops.add(matmul_2, parameter_10) + del parameter_10 + + # pd_op.reshape: (16x96x8x64xf32) <- (16x96x512xf32, 4xi64) + reshape_2 = paddle._C_ops.reshape(add_3, full_int_array_1) + del full_int_array_1 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_2 = [1] + + # pd_op.assign: (1xi64) <- (1xi64) + assign_4 = full_int_array_2 + + # pd_op.assign: (1xi64) <- (1xi64) + assign_5 = full_int_array_2 + + # pd_op.assign: (1xi64) <- (1xi64) + assign_6 = full_int_array_2 + + # pd_op.unsqueeze: (16x1x1xf32) <- (16x1xf32, 1xi64) + unsqueeze_0 = paddle._C_ops.unsqueeze(data_3, full_int_array_2) + del data_3 + + # pd_op.unsqueeze: (16x1x1x1xf32) <- (16x1x1xf32, 1xi64) + unsqueeze_1 = paddle._C_ops.unsqueeze(unsqueeze_0, full_int_array_2) + + # pd_op.unsqueeze: (16x1x96xf32) <- (16x96xf32, 1xi64) + unsqueeze_2 = paddle._C_ops.unsqueeze(data_4, full_int_array_2) + del data_4 + + # pd_op.unsqueeze: (16x1x1x96xf32) <- (16x1x96xf32, 1xi64) + unsqueeze_3 = paddle._C_ops.unsqueeze(unsqueeze_2, full_int_array_2) + + # builtin.combine: ([16x144x8x64xf32, 16x96x8x64xf32]) <- (16x144x8x64xf32, 16x96x8x64xf32) + combine_0 = [reshape_0, reshape_1] + del reshape_0, reshape_1 + + # pd_op.einsum: (16x8x144x96xf32, [0xf32, 0xf32], [16x144x8x64xf32, 16x96x8x64xf32]) <- ([16x144x8x64xf32, 16x96x8x64xf32]) + einsum_0, einsum_1, einsum_2 = (lambda x, f: f(x))( + paddle._C_ops.einsum(combine_0, "blhe,bshe->bhls"), + lambda out: out if isinstance(out, (list, tuple)) else (out, None, None), + ) + del combine_0 + + # builtin.split: (0xf32, 0xf32) <- ([0xf32, 0xf32]) + ( + split_0, + split_1, + ) = einsum_1 + del einsum_1 + + # builtin.split: (16x144x8x64xf32, 16x96x8x64xf32) <- ([16x144x8x64xf32, 16x96x8x64xf32]) + ( + split_2, + split_3, + ) = einsum_2 + del einsum_2 + + # pd_op.multiply: (16x8x144x96xf32) <- (16x8x144x96xf32, 16x1x1x1xf32) + multiply_0 = paddle._C_ops.multiply(einsum_0, unsqueeze_1) + + # pd_op.add: (16x8x144x96xf32) <- (16x8x144x96xf32, 16x1x1x96xf32) + add_4 = paddle._C_ops.add(multiply_0, unsqueeze_3) + + # pd_op.full: (1xf32) <- () + full_1 = paddle._C_ops.full( + [1], float("0.125"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.scale: (16x8x144x96xf32) <- (16x8x144x96xf32, 1xf32) + scale_0 = paddle._C_ops.scale(add_4, full_1, float("0"), True) + del add_4 + + # pd_op.softmax: (16x8x144x96xf32) <- (16x8x144x96xf32) + softmax_0 = paddle._C_ops.softmax(scale_0, -1) + del scale_0 + + # pd_op.dropout: (16x8x144x96xf32, 16x8x144x96xui8) <- (16x8x144x96xf32, None, 1xf32) + dropout_2, dropout_3 = (lambda x, f: f(x))( + paddle._C_ops.dropout( + softmax_0, None, full_0, False, "upscale_in_train", 0, False + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None), + ) + + # builtin.combine: ([16x8x144x96xf32, 16x96x8x64xf32]) <- (16x8x144x96xf32, 16x96x8x64xf32) + combine_1 = [dropout_2, reshape_2] + del dropout_2, reshape_2 + + # pd_op.einsum: (16x144x8x64xf32, [0xf32, 0xf32], [16x8x144x96xf32, 16x96x8x64xf32]) <- ([16x8x144x96xf32, 16x96x8x64xf32]) + einsum_3, einsum_4, einsum_5 = (lambda x, f: f(x))( + paddle._C_ops.einsum(combine_1, "bhls,bshd->blhd"), + lambda out: out if isinstance(out, (list, tuple)) else (out, None, None), + ) + del combine_1 + + # builtin.split: (0xf32, 0xf32) <- ([0xf32, 0xf32]) + ( + split_4, + split_5, + ) = einsum_4 + del einsum_4 + + # builtin.split: (16x8x144x96xf32, 16x96x8x64xf32) <- ([16x8x144x96xf32, 16x96x8x64xf32]) + ( + split_6, + split_7, + ) = einsum_5 + del einsum_5 + + # pd_op.full_int_array: (3xi64) <- () + full_int_array_3 = [16, 144, -1] + + # pd_op.reshape: (16x144x512xf32) <- (16x144x8x64xf32, 3xi64) + reshape_3 = paddle._C_ops.reshape(einsum_3, full_int_array_3) + del full_int_array_3 + + # pd_op.matmul: (16x144x512xf32) <- (16x144x512xf32, 512x512xf32) + matmul_3 = paddle._C_ops.matmul(reshape_3, parameter_9, False, False) + del parameter_9 + + # pd_op.add: (16x144x512xf32) <- (16x144x512xf32, 512xf32) + add_5 = paddle._C_ops.add(matmul_3, parameter_8) + del parameter_8 + + # pd_op.dropout: (16x144x512xf32, 16x144x512xui8) <- (16x144x512xf32, None, 1xf32) + dropout_4, dropout_5 = (lambda x, f: f(x))( + paddle._C_ops.dropout( + add_5, None, full_0, False, "upscale_in_train", 0, False + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None), + ) + del add_5 + + # pd_op.add: (16x144x512xf32) <- (16x144x512xf32, 16x144x512xf32) + add_6 = paddle._C_ops.add(layer_norm_1, dropout_4) + + # pd_op.layer_norm: (16x144x512xf32, 16x144xf32, 16x144xf32) <- (16x144x512xf32, 512xf32, 512xf32) + layer_norm_4, layer_norm_5, layer_norm_6 = (lambda x, f: f(x))( + paddle._C_ops.layer_norm( + add_6, parameter_7, parameter_6, float("1e-05"), 2 + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None, None), + ) + del parameter_6, parameter_7 + + # pd_op.transpose: (16x512x144xf32) <- (16x144x512xf32) + transpose_0 = paddle._C_ops.transpose(layer_norm_4, [0, 2, 1]) + + # pd_op.assign: (2048x512x1xf32) <- (2048x512x1xf32) + assign_7 = parameter_5 + del parameter_5 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_4 = [-2] + + # pd_op.assign: (1xi64) <- (1xi64) + assign_8 = full_int_array_4 + + # pd_op.assign: (1xi64) <- (1xi64) + assign_9 = full_int_array_4 + + # pd_op.assign: (1xi64) <- (1xi64) + assign_10 = full_int_array_4 + + # pd_op.assign: (1xi64) <- (1xi64) + assign_11 = full_int_array_4 + + # pd_op.assign: (1xi64) <- (1xi64) + assign_12 = full_int_array_4 + + # pd_op.unsqueeze: (2048x512x1x1xf32) <- (2048x512x1xf32, 1xi64) + unsqueeze_4 = paddle._C_ops.unsqueeze(assign_7, full_int_array_4) + + # pd_op.unsqueeze: (16x512x1x144xf32) <- (16x512x144xf32, 1xi64) + unsqueeze_5 = paddle._C_ops.unsqueeze(transpose_0, full_int_array_4) + + # pd_op.conv2d: (16x2048x1x144xf32) <- (16x512x1x144xf32, 2048x512x1x1xf32) + conv2d_0 = paddle._C_ops.conv2d( + unsqueeze_5, unsqueeze_4, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + + # pd_op.full_int_array: (4xi64) <- () + full_int_array_5 = [1, 2048, 1, 1] + + # pd_op.reshape: (1x2048x1x1xf32) <- (2048xf32, 4xi64) + reshape_4 = paddle._C_ops.reshape(parameter_4, full_int_array_5) + del full_int_array_5, parameter_4 + + # pd_op.add: (16x2048x1x144xf32) <- (16x2048x1x144xf32, 1x2048x1x1xf32) + add_7 = paddle._C_ops.add(conv2d_0, reshape_4) + + # pd_op.squeeze: (16x2048x144xf32) <- (16x2048x1x144xf32, 1xi64) + squeeze_0 = paddle._C_ops.squeeze(add_7, full_int_array_4) + + # pd_op.gelu: (16x2048x144xf32) <- (16x2048x144xf32) + gelu_0 = paddle._C_ops.gelu(squeeze_0, False) + + # pd_op.dropout: (16x2048x144xf32, 16x2048x144xui8) <- (16x2048x144xf32, None, 1xf32) + dropout_6, dropout_7 = (lambda x, f: f(x))( + paddle._C_ops.dropout( + gelu_0, None, full_0, False, "upscale_in_train", 0, False + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None), + ) + del gelu_0 + + # pd_op.assign: (512x2048x1xf32) <- (512x2048x1xf32) + assign_13 = parameter_3 + del parameter_3 + + # pd_op.unsqueeze: (512x2048x1x1xf32) <- (512x2048x1xf32, 1xi64) + unsqueeze_6 = paddle._C_ops.unsqueeze(assign_13, full_int_array_4) + + # pd_op.unsqueeze: (16x2048x1x144xf32) <- (16x2048x144xf32, 1xi64) + unsqueeze_7 = paddle._C_ops.unsqueeze(dropout_6, full_int_array_4) + + # pd_op.conv2d: (16x512x1x144xf32) <- (16x2048x1x144xf32, 512x2048x1x1xf32) + conv2d_1 = paddle._C_ops.conv2d( + unsqueeze_7, unsqueeze_6, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + + # pd_op.full_int_array: (4xi64) <- () + full_int_array_6 = [1, 512, 1, 1] + + # pd_op.reshape: (1x512x1x1xf32) <- (512xf32, 4xi64) + reshape_5 = paddle._C_ops.reshape(parameter_2, full_int_array_6) + del full_int_array_6, parameter_2 + + # pd_op.add: (16x512x1x144xf32) <- (16x512x1x144xf32, 1x512x1x1xf32) + add_8 = paddle._C_ops.add(conv2d_1, reshape_5) + + # pd_op.squeeze: (16x512x144xf32) <- (16x512x1x144xf32, 1xi64) + squeeze_1 = paddle._C_ops.squeeze(add_8, full_int_array_4) + + # pd_op.transpose: (16x144x512xf32) <- (16x512x144xf32) + transpose_1 = paddle._C_ops.transpose(squeeze_1, [0, 2, 1]) + del squeeze_1 + + # pd_op.dropout: (16x144x512xf32, 16x144x512xui8) <- (16x144x512xf32, None, 1xf32) + dropout_8, dropout_9 = (lambda x, f: f(x))( + paddle._C_ops.dropout( + transpose_1, None, full_0, False, "upscale_in_train", 0, False + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None), + ) + del transpose_1 + + # pd_op.add: (16x144x512xf32) <- (16x144x512xf32, 16x144x512xf32) + add_9 = paddle._C_ops.add(layer_norm_4, dropout_8) + + # pd_op.layer_norm: (16x144x512xf32, 16x144xf32, 16x144xf32) <- (16x144x512xf32, 512xf32, 512xf32) + layer_norm_0, layer_norm_7, layer_norm_8 = (lambda x, f: f(x))( + paddle._C_ops.layer_norm( + add_9, parameter_1, parameter_0, float("1e-05"), 2 + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None, None), + ) + del ( + add_0, + add_1, + add_2, + add_3, + add_6, + add_7, + add_8, + add_9, + assign_0, + assign_1, + assign_10, + assign_11, + assign_12, + assign_13, + assign_2, + assign_3, + assign_4, + assign_5, + assign_6, + assign_7, + assign_8, + assign_9, + conv2d_0, + conv2d_1, + dropout_0, + dropout_1, + dropout_3, + dropout_4, + dropout_5, + dropout_6, + dropout_7, + dropout_8, + dropout_9, + einsum_0, + einsum_3, + full_0, + full_1, + full_int_array_2, + full_int_array_4, + layer_norm_1, + layer_norm_2, + layer_norm_3, + layer_norm_4, + layer_norm_5, + layer_norm_6, + matmul_0, + matmul_1, + matmul_2, + matmul_3, + multiply_0, + parameter_0, + parameter_1, + reshape_3, + reshape_4, + reshape_5, + softmax_0, + squeeze_0, + transpose_0, + unsqueeze_0, + unsqueeze_1, + unsqueeze_2, + unsqueeze_3, + unsqueeze_4, + unsqueeze_5, + unsqueeze_6, + unsqueeze_7, + ) + + return ( + split_0, + split_1, + split_2, + split_3, + split_4, + split_5, + split_6, + split_7, + layer_norm_0, + ) diff --git a/paddle_samples/PaddleX/Nonstationary/subgraph_3/weight_meta.py b/paddle_samples/PaddleX/Nonstationary/subgraph_3/weight_meta.py new file mode 100644 index 000000000..86e0c4194 --- /dev/null +++ b/paddle_samples/PaddleX/Nonstationary/subgraph_3/weight_meta.py @@ -0,0 +1,173 @@ +class Program_weight_tensor_parameter_0: + name = "parameter_0" + shape = [512] + dtype = "float32" + data = None + + +class Program_weight_tensor_parameter_1: + name = "parameter_1" + shape = [512] + dtype = "float32" + min_val = float("1.0") + max_val = float("1.0") + mean = float("1.0") + data = None + + +class Program_weight_tensor_parameter_2: + name = "parameter_2" + shape = [512] + dtype = "float32" + data = None + + +class Program_weight_tensor_parameter_3: + name = "parameter_3" + shape = [512, 2048, 1] + dtype = "float32" + min_val = float("-0.154566") + max_val = float("0.159472") + mean = float("1.05861e-05") + std = float("0.0312584") + data = None + + +class Program_weight_tensor_parameter_4: + name = "parameter_4" + shape = [2048] + dtype = "float32" + data = None + + +class Program_weight_tensor_parameter_5: + name = "parameter_5" + shape = [2048, 512, 1] + dtype = "float32" + min_val = float("-0.299503") + max_val = float("0.327543") + mean = float("-4.4847e-05") + std = float("0.0625304") + data = None + + +class Program_weight_tensor_parameter_6: + name = "parameter_6" + shape = [512] + dtype = "float32" + data = None + + +class Program_weight_tensor_parameter_7: + name = "parameter_7" + shape = [512] + dtype = "float32" + min_val = float("1.0") + max_val = float("1.0") + mean = float("1.0") + data = None + + +class Program_weight_tensor_parameter_8: + name = "parameter_8" + shape = [512] + dtype = "float32" + min_val = float("-0.0441867") + max_val = float("0.0440204") + mean = float("-0.000902402") + std = float("0.0244242") + data = None + + +class Program_weight_tensor_parameter_9: + name = "parameter_9" + shape = [512, 512] + dtype = "float32" + min_val = float("-0.0441936") + max_val = float("0.0441941") + mean = float("-2.84541e-05") + std = float("0.0255034") + data = None + + +class Program_weight_tensor_parameter_10: + name = "parameter_10" + shape = [512] + dtype = "float32" + min_val = float("-0.0441153") + max_val = float("0.0441149") + mean = float("0.000268159") + std = float("0.0250131") + data = None + + +class Program_weight_tensor_parameter_11: + name = "parameter_11" + shape = [512, 512] + dtype = "float32" + min_val = float("-0.044194") + max_val = float("0.044194") + mean = float("-0.000109504") + std = float("0.0254946") + data = None + + +class Program_weight_tensor_parameter_12: + name = "parameter_12" + shape = [512] + dtype = "float32" + min_val = float("-0.0440431") + max_val = float("0.0440465") + mean = float("-0.000675064") + std = float("0.0257587") + data = None + + +class Program_weight_tensor_parameter_13: + name = "parameter_13" + shape = [512, 512] + dtype = "float32" + min_val = float("-0.0441941") + max_val = float("0.0441941") + mean = float("6.33192e-06") + std = float("0.0255113") + data = None + + +class Program_weight_tensor_parameter_14: + name = "parameter_14" + shape = [512] + dtype = "float32" + min_val = float("-0.044111") + max_val = float("0.0439952") + mean = float("0.00118007") + std = float("0.0252061") + data = None + + +class Program_weight_tensor_parameter_15: + name = "parameter_15" + shape = [512, 512] + dtype = "float32" + min_val = float("-0.0441941") + max_val = float("0.0441941") + mean = float("2.10063e-05") + std = float("0.025508") + data = None + + +class Program_weight_tensor_parameter_16: + name = "parameter_16" + shape = [512] + dtype = "float32" + data = None + + +class Program_weight_tensor_parameter_17: + name = "parameter_17" + shape = [512] + dtype = "float32" + min_val = float("1.0") + max_val = float("1.0") + mean = float("1.0") + data = None diff --git a/paddle_samples/PaddleX/Nonstationary/subgraph_4/graph_hash.txt b/paddle_samples/PaddleX/Nonstationary/subgraph_4/graph_hash.txt new file mode 100644 index 000000000..a092416d1 --- /dev/null +++ b/paddle_samples/PaddleX/Nonstationary/subgraph_4/graph_hash.txt @@ -0,0 +1 @@ +032356cfa273f39af883975a649b9718eaf519bdc3eeb1bc91d47820972ce24b \ No newline at end of file diff --git a/paddle_samples/PaddleX/Nonstationary/subgraph_4/graph_net.json b/paddle_samples/PaddleX/Nonstationary/subgraph_4/graph_net.json new file mode 100644 index 000000000..1977ac2fa --- /dev/null +++ b/paddle_samples/PaddleX/Nonstationary/subgraph_4/graph_net.json @@ -0,0 +1,6 @@ +{ + "framework": "paddle", + "model_name": "Nonstationary", + "num_devices_required": 1, + "num_nodes_required": 1 +} \ No newline at end of file diff --git a/paddle_samples/PaddleX/Nonstationary/subgraph_4/input_meta.py b/paddle_samples/PaddleX/Nonstationary/subgraph_4/input_meta.py new file mode 100644 index 000000000..aacd1738f --- /dev/null +++ b/paddle_samples/PaddleX/Nonstationary/subgraph_4/input_meta.py @@ -0,0 +1,38 @@ +class Program_weight_tensor_data_0: + name = "data_0" + shape = [] + dtype = "int64" + data = [16] + + +class Program_weight_tensor_data_1: + name = "data_1" + shape = [16, 144, 512] + dtype = "float32" + min_val = float("-9.67409") + max_val = float("10.7383") + mean = float("0.32802") + std = float("1.03805") + data = None + + +class Program_weight_tensor_data_2: + name = "data_2" + shape = [16, 144, 512] + dtype = "float32" + min_val = float("-9.67409") + max_val = float("10.7383") + mean = float("0.32802") + std = float("1.03805") + data = None + + +class Program_weight_tensor_data_3: + name = "data_3" + shape = [16, 144, 512] + dtype = "float32" + min_val = float("-9.67409") + max_val = float("10.7383") + mean = float("0.32802") + std = float("1.03805") + data = None diff --git a/paddle_samples/PaddleX/Nonstationary/subgraph_4/model.py b/paddle_samples/PaddleX/Nonstationary/subgraph_4/model.py new file mode 100644 index 000000000..b780d2b93 --- /dev/null +++ b/paddle_samples/PaddleX/Nonstationary/subgraph_4/model.py @@ -0,0 +1,80 @@ +import paddle + + +class GraphModule(paddle.nn.Layer): + def __init__(self): + super().__init__() + + def forward( + self, + parameter_0, + parameter_1, + parameter_2, + parameter_3, + parameter_4, + parameter_5, + data_0, + data_1, + data_2, + data_3, + ): + # pd_op.matmul: (-1x144x512xf32) <- (-1x144x512xf32, 512x512xf32) + matmul_0 = paddle._C_ops.matmul(data_1, parameter_5, False, False) + del data_1, parameter_5 + + # pd_op.add: (-1x144x512xf32) <- (-1x144x512xf32, 512xf32) + add_0 = paddle._C_ops.add(matmul_0, parameter_4) + del matmul_0, parameter_4 + + # pd_op.full: (xi64) <- () + full_0 = paddle._C_ops.full( + [], float("144"), paddle.int64, paddle.core.CPUPlace() + ) + + # pd_op.full: (xi64) <- () + full_1 = paddle._C_ops.full( + [], float("8"), paddle.int64, paddle.core.CPUPlace() + ) + + # pd_op.full: (xi64) <- () + full_2 = paddle._C_ops.full( + [], float("-1"), paddle.int64, paddle.core.CPUPlace() + ) + + # builtin.combine: ([xi64, xi64, xi64, xi64]) <- (xi64, xi64, xi64, xi64) + combine_0 = [data_0, full_0, full_1, full_2] + del data_0, full_0, full_1, full_2 + + # pd_op.stack: (4xi64) <- ([xi64, xi64, xi64, xi64]) + stack_0 = paddle._C_ops.stack(combine_0, 0) + del combine_0 + + # pd_op.reshape: (-1x144x8x-1xf32) <- (-1x144x512xf32, 4xi64) + reshape_0 = paddle._C_ops.reshape(add_0, stack_0) + del add_0 + + # pd_op.matmul: (-1x144x512xf32) <- (-1x144x512xf32, 512x512xf32) + matmul_1 = paddle._C_ops.matmul(data_2, parameter_3, False, False) + del data_2, parameter_3 + + # pd_op.add: (-1x144x512xf32) <- (-1x144x512xf32, 512xf32) + add_1 = paddle._C_ops.add(matmul_1, parameter_2) + del matmul_1, parameter_2 + + # pd_op.reshape: (-1x144x8x-1xf32) <- (-1x144x512xf32, 4xi64) + reshape_1 = paddle._C_ops.reshape(add_1, stack_0) + del add_1 + + # pd_op.matmul: (-1x144x512xf32) <- (-1x144x512xf32, 512x512xf32) + matmul_2 = paddle._C_ops.matmul(data_3, parameter_1, False, False) + del data_3, parameter_1 + + # pd_op.add: (-1x144x512xf32) <- (-1x144x512xf32, 512xf32) + add_2 = paddle._C_ops.add(matmul_2, parameter_0) + del matmul_2, parameter_0 + + # pd_op.reshape: (-1x144x8x-1xf32) <- (-1x144x512xf32, 4xi64) + reshape_2 = paddle._C_ops.reshape(add_2, stack_0) + del add_2, stack_0 + + return reshape_0, reshape_1, reshape_2 diff --git a/paddle_samples/PaddleX/Nonstationary/subgraph_4/weight_meta.py b/paddle_samples/PaddleX/Nonstationary/subgraph_4/weight_meta.py new file mode 100644 index 000000000..e1fce5af9 --- /dev/null +++ b/paddle_samples/PaddleX/Nonstationary/subgraph_4/weight_meta.py @@ -0,0 +1,64 @@ +class Program_weight_tensor_parameter_0: + name = "parameter_0" + shape = [512] + dtype = "float32" + min_val = float("-0.0446107") + max_val = float("0.0453598") + mean = float("0.000231714") + std = float("0.0262657") + data = None + + +class Program_weight_tensor_parameter_1: + name = "parameter_1" + shape = [512, 512] + dtype = "float32" + min_val = float("-0.0623724") + max_val = float("0.0630581") + mean = float("-2.09036e-05") + std = float("0.0256847") + data = None + + +class Program_weight_tensor_parameter_2: + name = "parameter_2" + shape = [512] + dtype = "float32" + min_val = float("-0.0438997") + max_val = float("0.0437803") + mean = float("0.00156957") + std = float("0.0257147") + data = None + + +class Program_weight_tensor_parameter_3: + name = "parameter_3" + shape = [512, 512] + dtype = "float32" + min_val = float("-0.0664537") + max_val = float("0.0706227") + mean = float("8.77122e-05") + std = float("0.0257932") + data = None + + +class Program_weight_tensor_parameter_4: + name = "parameter_4" + shape = [512] + dtype = "float32" + min_val = float("-0.0455267") + max_val = float("0.0482273") + mean = float("-0.000500988") + std = float("0.0249951") + data = None + + +class Program_weight_tensor_parameter_5: + name = "parameter_5" + shape = [512, 512] + dtype = "float32" + min_val = float("-0.0813498") + max_val = float("0.0821216") + mean = float("-3.02754e-05") + std = float("0.0258617") + data = None diff --git a/paddle_samples/PaddleX/Nonstationary/subgraph_5/graph_hash.txt b/paddle_samples/PaddleX/Nonstationary/subgraph_5/graph_hash.txt new file mode 100644 index 000000000..093bdb9f7 --- /dev/null +++ b/paddle_samples/PaddleX/Nonstationary/subgraph_5/graph_hash.txt @@ -0,0 +1 @@ +ba62857f1bb017eaa210bec0adeaa37a6bd0f04662fba6014acc1eda87914649 \ No newline at end of file diff --git a/paddle_samples/PaddleX/Nonstationary/subgraph_5/graph_net.json b/paddle_samples/PaddleX/Nonstationary/subgraph_5/graph_net.json new file mode 100644 index 000000000..1977ac2fa --- /dev/null +++ b/paddle_samples/PaddleX/Nonstationary/subgraph_5/graph_net.json @@ -0,0 +1,6 @@ +{ + "framework": "paddle", + "model_name": "Nonstationary", + "num_devices_required": 1, + "num_nodes_required": 1 +} \ No newline at end of file diff --git a/paddle_samples/PaddleX/Nonstationary/subgraph_5/input_meta.py b/paddle_samples/PaddleX/Nonstationary/subgraph_5/input_meta.py new file mode 100644 index 000000000..7e51ddcee --- /dev/null +++ b/paddle_samples/PaddleX/Nonstationary/subgraph_5/input_meta.py @@ -0,0 +1,16 @@ +class Program_weight_tensor_data_0: + name = "data_0" + shape = [] + dtype = "int64" + data = [16] + + +class Program_weight_tensor_data_1: + name = "data_1" + shape = [16, 144, 8, 64] + dtype = "float32" + min_val = float("-6.13095") + max_val = float("6.49821") + mean = float("0.00859679") + std = float("0.491188") + data = None diff --git a/paddle_samples/PaddleX/Nonstationary/subgraph_5/model.py b/paddle_samples/PaddleX/Nonstationary/subgraph_5/model.py new file mode 100644 index 000000000..7bc8e4c9d --- /dev/null +++ b/paddle_samples/PaddleX/Nonstationary/subgraph_5/model.py @@ -0,0 +1,39 @@ +import paddle + + +class GraphModule(paddle.nn.Layer): + def __init__(self): + super().__init__() + + def forward(self, parameter_0, parameter_1, data_0, data_1): + # pd_op.full: (xi64) <- () + full_0 = paddle._C_ops.full( + [], float("144"), paddle.int64, paddle.core.CPUPlace() + ) + + # pd_op.full: (xi64) <- () + full_1 = paddle._C_ops.full( + [], float("-1"), paddle.int64, paddle.core.CPUPlace() + ) + + # builtin.combine: ([xi64, xi64, xi64]) <- (xi64, xi64, xi64) + combine_0 = [data_0, full_0, full_1] + del data_0, full_0, full_1 + + # pd_op.stack: (3xi64) <- ([xi64, xi64, xi64]) + stack_0 = paddle._C_ops.stack(combine_0, 0) + del combine_0 + + # pd_op.reshape: (-1x144x-1xf32) <- (-1x144x8x64xf32, 3xi64) + reshape_0 = paddle._C_ops.reshape(data_1, stack_0) + del data_1, stack_0 + + # pd_op.matmul: (-1x144x512xf32) <- (-1x144x-1xf32, 512x512xf32) + matmul_0 = paddle._C_ops.matmul(reshape_0, parameter_1, False, False) + del parameter_1, reshape_0 + + # pd_op.add: (-1x144x512xf32) <- (-1x144x512xf32, 512xf32) + add_0 = paddle._C_ops.add(matmul_0, parameter_0) + del matmul_0, parameter_0 + + return add_0 diff --git a/paddle_samples/PaddleX/Nonstationary/subgraph_5/weight_meta.py b/paddle_samples/PaddleX/Nonstationary/subgraph_5/weight_meta.py new file mode 100644 index 000000000..dc65488b8 --- /dev/null +++ b/paddle_samples/PaddleX/Nonstationary/subgraph_5/weight_meta.py @@ -0,0 +1,20 @@ +class Program_weight_tensor_parameter_0: + name = "parameter_0" + shape = [512] + dtype = "float32" + min_val = float("-0.0452965") + max_val = float("0.0456096") + mean = float("-0.000771477") + std = float("0.025356") + data = None + + +class Program_weight_tensor_parameter_1: + name = "parameter_1" + shape = [512, 512] + dtype = "float32" + min_val = float("-0.0540168") + max_val = float("0.0539125") + mean = float("-0.00010208") + std = float("0.0255514") + data = None diff --git a/paddle_samples/PaddleX/Nonstationary/subgraph_6/graph_hash.txt b/paddle_samples/PaddleX/Nonstationary/subgraph_6/graph_hash.txt new file mode 100644 index 000000000..5275e779f --- /dev/null +++ b/paddle_samples/PaddleX/Nonstationary/subgraph_6/graph_hash.txt @@ -0,0 +1 @@ +2cd6953494751b5fbf78191ecfcda8d8ae0a0e7998d59fff3da7ddf077a77bfa \ No newline at end of file diff --git a/paddle_samples/PaddleX/Nonstationary/subgraph_6/graph_net.json b/paddle_samples/PaddleX/Nonstationary/subgraph_6/graph_net.json new file mode 100644 index 000000000..1977ac2fa --- /dev/null +++ b/paddle_samples/PaddleX/Nonstationary/subgraph_6/graph_net.json @@ -0,0 +1,6 @@ +{ + "framework": "paddle", + "model_name": "Nonstationary", + "num_devices_required": 1, + "num_nodes_required": 1 +} \ No newline at end of file diff --git a/paddle_samples/PaddleX/Nonstationary/subgraph_6/input_meta.py b/paddle_samples/PaddleX/Nonstationary/subgraph_6/input_meta.py new file mode 100644 index 000000000..ab744894b --- /dev/null +++ b/paddle_samples/PaddleX/Nonstationary/subgraph_6/input_meta.py @@ -0,0 +1,66 @@ +class Program_weight_tensor_data_0: + name = "data_0" + shape = [16, 144, 512] + dtype = "float32" + min_val = float("-9.67409") + max_val = float("10.7383") + mean = float("0.32802") + std = float("1.03805") + data = None + + +class Program_weight_tensor_data_1: + name = "data_1" + shape = [16, 144, 512] + dtype = "float32" + min_val = float("-3.41957") + max_val = float("4.11716") + mean = float("-0.0142528") + std = float("0.341142") + data = None + + +class Program_weight_tensor_data_2: + name = "data_2" + shape = [16, 96, 512] + dtype = "float32" + min_val = float("-3.97367") + max_val = float("4.64107") + mean = float("5.88689e-05") + std = float("0.996903") + data = None + + +class Program_weight_tensor_data_3: + name = "data_3" + shape = [16, 1] + dtype = "float32" + data = [ + 0.0469126, + 0.067225, + 0.0486318, + 0.0365163, + 0.0623915, + 0.193965, + 0.0578003, + 0.140686, + 0.114084, + 0.233482, + 0.0638282, + 0.0418595, + 0.2652, + 0.0846259, + 0.0710645, + 0.0677776, + ] + + +class Program_weight_tensor_data_4: + name = "data_4" + shape = [16, 96] + dtype = "float32" + min_val = float("-17.3932") + max_val = float("44.7827") + mean = float("-0.782251") + std = float("7.89213") + data = None diff --git a/paddle_samples/PaddleX/Nonstationary/subgraph_6/model.py b/paddle_samples/PaddleX/Nonstationary/subgraph_6/model.py new file mode 100644 index 000000000..14ef2e687 --- /dev/null +++ b/paddle_samples/PaddleX/Nonstationary/subgraph_6/model.py @@ -0,0 +1,445 @@ +import paddle + + +class GraphModule(paddle.nn.Layer): + def __init__(self): + super().__init__() + + def forward( + self, + parameter_0, + parameter_1, + parameter_2, + parameter_3, + parameter_4, + parameter_5, + parameter_6, + parameter_7, + parameter_8, + parameter_9, + parameter_10, + parameter_11, + parameter_12, + parameter_13, + parameter_14, + parameter_15, + parameter_16, + parameter_17, + data_0, + data_1, + data_2, + data_3, + data_4, + ): + # pd_op.full: (1xf32) <- () + full_0 = paddle._C_ops.full( + [1], float("0.05"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.dropout: (-1x144x512xf32, -1x144x512xui8) <- (-1x144x512xf32, None, 1xf32) + dropout_0, dropout_1 = (lambda x, f: f(x))( + paddle._C_ops.dropout( + data_1, None, full_0, True, "upscale_in_train", 0, False + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None), + ) + del data_1 + + # pd_op.add: (-1x144x512xf32) <- (-1x144x512xf32, -1x144x512xf32) + add_0 = paddle._C_ops.add(data_0, dropout_0) + del data_0, dropout_0 + + # pd_op.layer_norm: (-1x144x512xf32, -1x144xf32, -1x144xf32) <- (-1x144x512xf32, 512xf32, 512xf32) + layer_norm_1, layer_norm_2, layer_norm_3 = (lambda x, f: f(x))( + paddle._C_ops.layer_norm( + add_0, parameter_17, parameter_16, float("1e-05"), 2 + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None, None), + ) + del add_0, parameter_16, parameter_17 + + # pd_op.shape64: (3xi64) <- (-1x144x512xf32) + shape64_0 = paddle._C_ops.shape64(layer_norm_1) + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_0 = [0] + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_1 = [1] + + # pd_op.slice: (xi64) <- (3xi64, 1xi64, 1xi64) + slice_0 = paddle._C_ops.slice( + shape64_0, [0], full_int_array_0, full_int_array_1, [1], [0] + ) + del shape64_0 + + # pd_op.matmul: (-1x144x512xf32) <- (-1x144x512xf32, 512x512xf32) + matmul_0 = paddle._C_ops.matmul(layer_norm_1, parameter_15, False, False) + del parameter_15 + + # pd_op.add: (-1x144x512xf32) <- (-1x144x512xf32, 512xf32) + add_1 = paddle._C_ops.add(matmul_0, parameter_14) + del matmul_0, parameter_14 + + # pd_op.full: (xi64) <- () + full_1 = paddle._C_ops.full( + [], float("144"), paddle.int64, paddle.core.CPUPlace() + ) + + # pd_op.full: (xi64) <- () + full_2 = paddle._C_ops.full( + [], float("8"), paddle.int64, paddle.core.CPUPlace() + ) + + # pd_op.full: (xi64) <- () + full_3 = paddle._C_ops.full( + [], float("-1"), paddle.int64, paddle.core.CPUPlace() + ) + + # builtin.combine: ([xi64, xi64, xi64, xi64]) <- (xi64, xi64, xi64, xi64) + combine_0 = [slice_0, full_1, full_2, full_3] + + # pd_op.stack: (4xi64) <- ([xi64, xi64, xi64, xi64]) + stack_0 = paddle._C_ops.stack(combine_0, 0) + del combine_0 + + # pd_op.reshape: (-1x144x8x-1xf32) <- (-1x144x512xf32, 4xi64) + reshape_0 = paddle._C_ops.reshape(add_1, stack_0) + del add_1, stack_0 + + # pd_op.matmul: (-1x96x512xf32) <- (-1x96x512xf32, 512x512xf32) + matmul_1 = paddle._C_ops.matmul(data_2, parameter_13, False, False) + del parameter_13 + + # pd_op.add: (-1x96x512xf32) <- (-1x96x512xf32, 512xf32) + add_2 = paddle._C_ops.add(matmul_1, parameter_12) + del matmul_1, parameter_12 + + # pd_op.full: (xi64) <- () + full_4 = paddle._C_ops.full( + [], float("96"), paddle.int64, paddle.core.CPUPlace() + ) + + # builtin.combine: ([xi64, xi64, xi64, xi64]) <- (xi64, xi64, xi64, xi64) + combine_1 = [slice_0, full_4, full_2, full_3] + + # pd_op.stack: (4xi64) <- ([xi64, xi64, xi64, xi64]) + stack_1 = paddle._C_ops.stack(combine_1, 0) + del combine_1 + + # pd_op.reshape: (-1x96x8x-1xf32) <- (-1x96x512xf32, 4xi64) + reshape_1 = paddle._C_ops.reshape(add_2, stack_1) + del add_2, stack_1 + + # pd_op.matmul: (-1x96x512xf32) <- (-1x96x512xf32, 512x512xf32) + matmul_2 = paddle._C_ops.matmul(data_2, parameter_11, False, False) + del data_2, parameter_11 + + # pd_op.add: (-1x96x512xf32) <- (-1x96x512xf32, 512xf32) + add_3 = paddle._C_ops.add(matmul_2, parameter_10) + del matmul_2, parameter_10 + + # builtin.combine: ([xi64, xi64, xi64, xi64]) <- (xi64, xi64, xi64, xi64) + combine_2 = [slice_0, full_4, full_2, full_3] + del full_2, full_4 + + # pd_op.stack: (4xi64) <- ([xi64, xi64, xi64, xi64]) + stack_2 = paddle._C_ops.stack(combine_2, 0) + del combine_2 + + # pd_op.reshape: (-1x96x8x-1xf32) <- (-1x96x512xf32, 4xi64) + reshape_2 = paddle._C_ops.reshape(add_3, stack_2) + del add_3, stack_2 + + # pd_op.shape64: (4xi64) <- (-1x144x8x-1xf32) + shape64_1 = paddle._C_ops.shape64(reshape_0) + + # pd_op.slice: (xi64) <- (4xi64, 1xi64, 1xi64) + slice_1 = paddle._C_ops.slice( + shape64_1, [0], full_int_array_0, full_int_array_1, [1], [0] + ) + del shape64_1 + + # pd_op.shape64: (4xi64) <- (-1x144x8x-1xf32) + shape64_2 = paddle._C_ops.shape64(reshape_0) + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_2 = [3] + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_3 = [4] + + # pd_op.slice: (xi64) <- (4xi64, 1xi64, 1xi64) + slice_2 = paddle._C_ops.slice( + shape64_2, [0], full_int_array_2, full_int_array_3, [1], [0] + ) + del shape64_2 + + # pd_op.shape64: (4xi64) <- (-1x96x8x-1xf32) + shape64_3 = paddle._C_ops.shape64(reshape_2) + + # pd_op.slice: (xi64) <- (4xi64, 1xi64, 1xi64) + slice_3 = paddle._C_ops.slice( + shape64_3, [0], full_int_array_0, full_int_array_1, [1], [0] + ) + del full_int_array_0, shape64_3 + + # pd_op.shape64: (4xi64) <- (-1x96x8x-1xf32) + shape64_4 = paddle._C_ops.shape64(reshape_2) + + # pd_op.slice: (xi64) <- (4xi64, 1xi64, 1xi64) + slice_4 = paddle._C_ops.slice( + shape64_4, [0], full_int_array_2, full_int_array_3, [1], [0] + ) + del full_int_array_2, full_int_array_3, shape64_4 + + # pd_op.unsqueeze: (-1x1x1xf32) <- (-1x1xf32, 1xi64) + unsqueeze_0 = paddle._C_ops.unsqueeze(data_3, full_int_array_1) + del data_3 + + # pd_op.unsqueeze: (-1x1x1x1xf32) <- (-1x1x1xf32, 1xi64) + unsqueeze_1 = paddle._C_ops.unsqueeze(unsqueeze_0, full_int_array_1) + del unsqueeze_0 + + # pd_op.unsqueeze: (-1x1x96xf32) <- (-1x96xf32, 1xi64) + unsqueeze_2 = paddle._C_ops.unsqueeze(data_4, full_int_array_1) + del data_4 + + # pd_op.unsqueeze: (-1x1x1x96xf32) <- (-1x1x96xf32, 1xi64) + unsqueeze_3 = paddle._C_ops.unsqueeze(unsqueeze_2, full_int_array_1) + del full_int_array_1, unsqueeze_2 + + # builtin.combine: ([-1x144x8x-1xf32, -1x96x8x-1xf32]) <- (-1x144x8x-1xf32, -1x96x8x-1xf32) + combine_3 = [reshape_0, reshape_1] + del reshape_0, reshape_1 + + # pd_op.einsum: (-1x8x144x96xf32, [0xf32, 0xf32], [-1x144x8x-1xf32, -1x96x8x-1xf32]) <- ([-1x144x8x-1xf32, -1x96x8x-1xf32]) + einsum_0, einsum_1, einsum_2 = (lambda x, f: f(x))( + paddle._C_ops.einsum(combine_3, "blhe,bshe->bhls"), + lambda out: out if isinstance(out, (list, tuple)) else (out, None, None), + ) + del combine_3 + + # builtin.split: (0xf32, 0xf32) <- ([0xf32, 0xf32]) + ( + split_0, + split_1, + ) = einsum_1 + del einsum_1 + + # builtin.split: (-1x144x8x-1xf32, -1x96x8x-1xf32) <- ([-1x144x8x-1xf32, -1x96x8x-1xf32]) + ( + split_2, + split_3, + ) = einsum_2 + del einsum_2 + + # pd_op.multiply: (-1x8x144x96xf32) <- (-1x8x144x96xf32, -1x1x1x1xf32) + multiply_0 = paddle._C_ops.multiply(einsum_0, unsqueeze_1) + del einsum_0, unsqueeze_1 + + # pd_op.add: (-1x8x144x96xf32) <- (-1x8x144x96xf32, -1x1x1x96xf32) + add_4 = paddle._C_ops.add(multiply_0, unsqueeze_3) + del multiply_0, unsqueeze_3 + + # pd_op.full: (1xf32) <- () + full_5 = paddle._C_ops.full( + [1], float("0.125"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.scale: (-1x8x144x96xf32) <- (-1x8x144x96xf32, 1xf32) + scale_0 = paddle._C_ops.scale(add_4, full_5, float("0"), True) + del add_4, full_5 + + # pd_op.softmax: (-1x8x144x96xf32) <- (-1x8x144x96xf32) + softmax_0 = paddle._C_ops.softmax(scale_0, -1) + del scale_0 + + # pd_op.dropout: (-1x8x144x96xf32, -1x8x144x96xui8) <- (-1x8x144x96xf32, None, 1xf32) + dropout_2, dropout_3 = (lambda x, f: f(x))( + paddle._C_ops.dropout( + softmax_0, None, full_0, True, "upscale_in_train", 0, False + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None), + ) + del softmax_0 + + # builtin.combine: ([-1x8x144x96xf32, -1x96x8x-1xf32]) <- (-1x8x144x96xf32, -1x96x8x-1xf32) + combine_4 = [dropout_2, reshape_2] + del dropout_2, reshape_2 + + # pd_op.einsum: (-1x144x8x-1xf32, [0xf32, 0xf32], [-1x8x144x96xf32, -1x96x8x-1xf32]) <- ([-1x8x144x96xf32, -1x96x8x-1xf32]) + einsum_3, einsum_4, einsum_5 = (lambda x, f: f(x))( + paddle._C_ops.einsum(combine_4, "bhls,bshd->blhd"), + lambda out: out if isinstance(out, (list, tuple)) else (out, None, None), + ) + del combine_4 + + # builtin.split: (0xf32, 0xf32) <- ([0xf32, 0xf32]) + ( + split_4, + split_5, + ) = einsum_4 + del einsum_4 + + # builtin.split: (-1x8x144x96xf32, -1x96x8x-1xf32) <- ([-1x8x144x96xf32, -1x96x8x-1xf32]) + ( + split_6, + split_7, + ) = einsum_5 + del einsum_5 + + # builtin.combine: ([xi64, xi64, xi64]) <- (xi64, xi64, xi64) + combine_5 = [slice_0, full_1, full_3] + del full_1, full_3, slice_0 + + # pd_op.stack: (3xi64) <- ([xi64, xi64, xi64]) + stack_3 = paddle._C_ops.stack(combine_5, 0) + del combine_5 + + # pd_op.reshape: (-1x144x-1xf32) <- (-1x144x8x-1xf32, 3xi64) + reshape_3 = paddle._C_ops.reshape(einsum_3, stack_3) + del einsum_3, stack_3 + + # pd_op.matmul: (-1x144x512xf32) <- (-1x144x-1xf32, 512x512xf32) + matmul_3 = paddle._C_ops.matmul(reshape_3, parameter_9, False, False) + del parameter_9, reshape_3 + + # pd_op.add: (-1x144x512xf32) <- (-1x144x512xf32, 512xf32) + add_5 = paddle._C_ops.add(matmul_3, parameter_8) + del matmul_3, parameter_8 + + # pd_op.dropout: (-1x144x512xf32, -1x144x512xui8) <- (-1x144x512xf32, None, 1xf32) + dropout_4, dropout_5 = (lambda x, f: f(x))( + paddle._C_ops.dropout( + add_5, None, full_0, True, "upscale_in_train", 0, False + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None), + ) + del add_5 + + # pd_op.add: (-1x144x512xf32) <- (-1x144x512xf32, -1x144x512xf32) + add_6 = paddle._C_ops.add(layer_norm_1, dropout_4) + del dropout_4, layer_norm_1 + + # pd_op.layer_norm: (-1x144x512xf32, -1x144xf32, -1x144xf32) <- (-1x144x512xf32, 512xf32, 512xf32) + layer_norm_4, layer_norm_5, layer_norm_6 = (lambda x, f: f(x))( + paddle._C_ops.layer_norm( + add_6, parameter_7, parameter_6, float("1e-05"), 2 + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None, None), + ) + del add_6, parameter_6, parameter_7 + + # pd_op.transpose: (-1x512x144xf32) <- (-1x144x512xf32) + transpose_0 = paddle._C_ops.transpose(layer_norm_4, [0, 2, 1]) + + # pd_op.assign: (2048x512x1xf32) <- (2048x512x1xf32) + assign_0 = parameter_5 + del parameter_5 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_4 = [-2] + + # pd_op.unsqueeze: (2048x512x1x1xf32) <- (2048x512x1xf32, 1xi64) + unsqueeze_4 = paddle._C_ops.unsqueeze(assign_0, full_int_array_4) + del assign_0 + + # pd_op.unsqueeze: (-1x512x1x144xf32) <- (-1x512x144xf32, 1xi64) + unsqueeze_5 = paddle._C_ops.unsqueeze(transpose_0, full_int_array_4) + del transpose_0 + + # pd_op.conv2d: (-1x2048x1x144xf32) <- (-1x512x1x144xf32, 2048x512x1x1xf32) + conv2d_0 = paddle._C_ops.conv2d( + unsqueeze_5, unsqueeze_4, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del unsqueeze_4, unsqueeze_5 + + # pd_op.full_int_array: (4xi64) <- () + full_int_array_5 = [1, 2048, 1, 1] + + # pd_op.reshape: (1x2048x1x1xf32) <- (2048xf32, 4xi64) + reshape_4 = paddle._C_ops.reshape(parameter_4, full_int_array_5) + del full_int_array_5, parameter_4 + + # pd_op.add: (-1x2048x1x144xf32) <- (-1x2048x1x144xf32, 1x2048x1x1xf32) + add_7 = paddle._C_ops.add(conv2d_0, reshape_4) + del conv2d_0, reshape_4 + + # pd_op.squeeze: (-1x2048x144xf32) <- (-1x2048x1x144xf32, 1xi64) + squeeze_0 = paddle._C_ops.squeeze(add_7, full_int_array_4) + del add_7 + + # pd_op.gelu: (-1x2048x144xf32) <- (-1x2048x144xf32) + gelu_0 = paddle._C_ops.gelu(squeeze_0, False) + del squeeze_0 + + # pd_op.dropout: (-1x2048x144xf32, -1x2048x144xui8) <- (-1x2048x144xf32, None, 1xf32) + dropout_6, dropout_7 = (lambda x, f: f(x))( + paddle._C_ops.dropout( + gelu_0, None, full_0, True, "upscale_in_train", 0, False + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None), + ) + del gelu_0 + + # pd_op.assign: (512x2048x1xf32) <- (512x2048x1xf32) + assign_1 = parameter_3 + del parameter_3 + + # pd_op.unsqueeze: (512x2048x1x1xf32) <- (512x2048x1xf32, 1xi64) + unsqueeze_6 = paddle._C_ops.unsqueeze(assign_1, full_int_array_4) + del assign_1 + + # pd_op.unsqueeze: (-1x2048x1x144xf32) <- (-1x2048x144xf32, 1xi64) + unsqueeze_7 = paddle._C_ops.unsqueeze(dropout_6, full_int_array_4) + del dropout_6 + + # pd_op.conv2d: (-1x512x1x144xf32) <- (-1x2048x1x144xf32, 512x2048x1x1xf32) + conv2d_1 = paddle._C_ops.conv2d( + unsqueeze_7, unsqueeze_6, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del unsqueeze_6, unsqueeze_7 + + # pd_op.full_int_array: (4xi64) <- () + full_int_array_6 = [1, 512, 1, 1] + + # pd_op.reshape: (1x512x1x1xf32) <- (512xf32, 4xi64) + reshape_5 = paddle._C_ops.reshape(parameter_2, full_int_array_6) + del full_int_array_6, parameter_2 + + # pd_op.add: (-1x512x1x144xf32) <- (-1x512x1x144xf32, 1x512x1x1xf32) + add_8 = paddle._C_ops.add(conv2d_1, reshape_5) + del conv2d_1, reshape_5 + + # pd_op.squeeze: (-1x512x144xf32) <- (-1x512x1x144xf32, 1xi64) + squeeze_1 = paddle._C_ops.squeeze(add_8, full_int_array_4) + del add_8, full_int_array_4 + + # pd_op.transpose: (-1x144x512xf32) <- (-1x512x144xf32) + transpose_1 = paddle._C_ops.transpose(squeeze_1, [0, 2, 1]) + del squeeze_1 + + # pd_op.dropout: (-1x144x512xf32, -1x144x512xui8) <- (-1x144x512xf32, None, 1xf32) + dropout_8, dropout_9 = (lambda x, f: f(x))( + paddle._C_ops.dropout( + transpose_1, None, full_0, True, "upscale_in_train", 0, False + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None), + ) + del full_0, transpose_1 + + # pd_op.add: (-1x144x512xf32) <- (-1x144x512xf32, -1x144x512xf32) + add_9 = paddle._C_ops.add(layer_norm_4, dropout_8) + del dropout_8, layer_norm_4 + + # pd_op.layer_norm: (-1x144x512xf32, -1x144xf32, -1x144xf32) <- (-1x144x512xf32, 512xf32, 512xf32) + layer_norm_0, layer_norm_7, layer_norm_8 = (lambda x, f: f(x))( + paddle._C_ops.layer_norm( + add_9, parameter_1, parameter_0, float("1e-05"), 2 + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None, None), + ) + del add_9, parameter_0, parameter_1 + + return layer_norm_0 diff --git a/paddle_samples/PaddleX/Nonstationary/subgraph_6/weight_meta.py b/paddle_samples/PaddleX/Nonstationary/subgraph_6/weight_meta.py new file mode 100644 index 000000000..ddcef0358 --- /dev/null +++ b/paddle_samples/PaddleX/Nonstationary/subgraph_6/weight_meta.py @@ -0,0 +1,196 @@ +class Program_weight_tensor_parameter_0: + name = "parameter_0" + shape = [512] + dtype = "float32" + min_val = float("-0.0192397") + max_val = float("0.0192693") + mean = float("3.61048e-05") + std = float("0.00406395") + data = None + + +class Program_weight_tensor_parameter_1: + name = "parameter_1" + shape = [512] + dtype = "float32" + min_val = float("0.993993") + max_val = float("1.02318") + mean = float("1.00077") + std = float("0.00505466") + data = None + + +class Program_weight_tensor_parameter_2: + name = "parameter_2" + shape = [512] + dtype = "float32" + min_val = float("-0.0170297") + max_val = float("0.0167098") + mean = float("2.68068e-05") + std = float("0.00341074") + data = None + + +class Program_weight_tensor_parameter_3: + name = "parameter_3" + shape = [512, 2048, 1] + dtype = "float32" + min_val = float("-0.155079") + max_val = float("0.159838") + mean = float("2.51708e-05") + std = float("0.0314331") + data = None + + +class Program_weight_tensor_parameter_4: + name = "parameter_4" + shape = [2048] + dtype = "float32" + min_val = float("-0.00575413") + max_val = float("0.00833782") + mean = float("-0.000446487") + std = float("0.00139574") + data = None + + +class Program_weight_tensor_parameter_5: + name = "parameter_5" + shape = [2048, 512, 1] + dtype = "float32" + min_val = float("-0.299145") + max_val = float("0.325746") + mean = float("-2.78938e-05") + std = float("0.0625855") + data = None + + +class Program_weight_tensor_parameter_6: + name = "parameter_6" + shape = [512] + dtype = "float32" + min_val = float("-0.00381496") + max_val = float("0.00324015") + mean = float("0.000128955") + std = float("0.000941853") + data = None + + +class Program_weight_tensor_parameter_7: + name = "parameter_7" + shape = [512] + dtype = "float32" + min_val = float("0.992845") + max_val = float("1.00865") + mean = float("1.00023") + std = float("0.00195828") + data = None + + +class Program_weight_tensor_parameter_8: + name = "parameter_8" + shape = [512] + dtype = "float32" + min_val = float("-0.0452449") + max_val = float("0.0452441") + mean = float("-0.000882317") + std = float("0.0243787") + data = None + + +class Program_weight_tensor_parameter_9: + name = "parameter_9" + shape = [512, 512] + dtype = "float32" + min_val = float("-0.0537709") + max_val = float("0.0527951") + mean = float("-2.99185e-05") + std = float("0.0255009") + data = None + + +class Program_weight_tensor_parameter_10: + name = "parameter_10" + shape = [512] + dtype = "float32" + min_val = float("-0.0440634") + max_val = float("0.0439666") + mean = float("0.000249585") + std = float("0.0249417") + data = None + + +class Program_weight_tensor_parameter_11: + name = "parameter_11" + shape = [512, 512] + dtype = "float32" + min_val = float("-0.0534587") + max_val = float("0.0515689") + mean = float("-0.000108753") + std = float("0.0254603") + data = None + + +class Program_weight_tensor_parameter_12: + name = "parameter_12" + shape = [512] + dtype = "float32" + min_val = float("-0.0440433") + max_val = float("0.0440464") + mean = float("-0.000675067") + std = float("0.0257587") + data = None + + +class Program_weight_tensor_parameter_13: + name = "parameter_13" + shape = [512, 512] + dtype = "float32" + min_val = float("-0.0571011") + max_val = float("0.0572165") + mean = float("5.21896e-06") + std = float("0.0255767") + data = None + + +class Program_weight_tensor_parameter_14: + name = "parameter_14" + shape = [512] + dtype = "float32" + min_val = float("-0.0455331") + max_val = float("0.0446621") + mean = float("0.00117117") + std = float("0.0252566") + data = None + + +class Program_weight_tensor_parameter_15: + name = "parameter_15" + shape = [512, 512] + dtype = "float32" + min_val = float("-0.0541928") + max_val = float("0.059954") + mean = float("1.79621e-05") + std = float("0.0255748") + data = None + + +class Program_weight_tensor_parameter_16: + name = "parameter_16" + shape = [512] + dtype = "float32" + min_val = float("-0.00314124") + max_val = float("0.0026704") + mean = float("3.36496e-05") + std = float("0.000896563") + data = None + + +class Program_weight_tensor_parameter_17: + name = "parameter_17" + shape = [512] + dtype = "float32" + min_val = float("0.993901") + max_val = float("1.00941") + mean = float("1.00062") + std = float("0.00211496") + data = None diff --git a/paddle_samples/PaddleX/Nonstationary/subgraph_7/graph_hash.txt b/paddle_samples/PaddleX/Nonstationary/subgraph_7/graph_hash.txt new file mode 100644 index 000000000..d3eb17b0a --- /dev/null +++ b/paddle_samples/PaddleX/Nonstationary/subgraph_7/graph_hash.txt @@ -0,0 +1 @@ +91f94b67dd9dbcbdd483b58641ca25ee392a56eaf1f957fff7eb9f32b7c3f848 \ No newline at end of file diff --git a/paddle_samples/PaddleX/Nonstationary/subgraph_7/graph_net.json b/paddle_samples/PaddleX/Nonstationary/subgraph_7/graph_net.json new file mode 100644 index 000000000..1977ac2fa --- /dev/null +++ b/paddle_samples/PaddleX/Nonstationary/subgraph_7/graph_net.json @@ -0,0 +1,6 @@ +{ + "framework": "paddle", + "model_name": "Nonstationary", + "num_devices_required": 1, + "num_nodes_required": 1 +} \ No newline at end of file diff --git a/paddle_samples/PaddleX/Nonstationary/subgraph_7/input_meta.py b/paddle_samples/PaddleX/Nonstationary/subgraph_7/input_meta.py new file mode 100644 index 000000000..51169a8be --- /dev/null +++ b/paddle_samples/PaddleX/Nonstationary/subgraph_7/input_meta.py @@ -0,0 +1,31 @@ +class Program_weight_tensor_data_0: + name = "data_0" + shape = [16, 144, 512] + dtype = "float32" + min_val = float("-10.0219") + max_val = float("11.7087") + mean = float("0.33717") + std = float("1.02734") + data = None + + +class Program_weight_tensor_data_1: + name = "data_1" + shape = [16, 144, 512] + dtype = "float32" + min_val = float("-10.0219") + max_val = float("11.7087") + mean = float("0.33717") + std = float("1.02734") + data = None + + +class Program_weight_tensor_data_2: + name = "data_2" + shape = [16, 144, 512] + dtype = "float32" + min_val = float("-10.0219") + max_val = float("11.7087") + mean = float("0.33717") + std = float("1.02734") + data = None diff --git a/paddle_samples/PaddleX/Nonstationary/subgraph_7/model.py b/paddle_samples/PaddleX/Nonstationary/subgraph_7/model.py new file mode 100644 index 000000000..42c6bde19 --- /dev/null +++ b/paddle_samples/PaddleX/Nonstationary/subgraph_7/model.py @@ -0,0 +1,57 @@ +import paddle + + +class GraphModule(paddle.nn.Layer): + def __init__(self): + super().__init__() + + def forward( + self, + parameter_0, + parameter_1, + parameter_2, + parameter_3, + parameter_4, + parameter_5, + data_0, + data_1, + data_2, + ): + # pd_op.matmul: (16x144x512xf32) <- (16x144x512xf32, 512x512xf32) + matmul_0 = paddle._C_ops.matmul(data_0, parameter_5, False, False) + del data_0, parameter_5 + + # pd_op.add: (16x144x512xf32) <- (16x144x512xf32, 512xf32) + add_0 = paddle._C_ops.add(matmul_0, parameter_4) + del parameter_4 + + # pd_op.full_int_array: (4xi64) <- () + full_int_array_0 = [16, 144, 8, -1] + + # pd_op.reshape: (16x144x8x64xf32) <- (16x144x512xf32, 4xi64) + reshape_0 = paddle._C_ops.reshape(add_0, full_int_array_0) + + # pd_op.matmul: (16x144x512xf32) <- (16x144x512xf32, 512x512xf32) + matmul_1 = paddle._C_ops.matmul(data_1, parameter_3, False, False) + del data_1, parameter_3 + + # pd_op.add: (16x144x512xf32) <- (16x144x512xf32, 512xf32) + add_1 = paddle._C_ops.add(matmul_1, parameter_2) + del parameter_2 + + # pd_op.reshape: (16x144x8x64xf32) <- (16x144x512xf32, 4xi64) + reshape_1 = paddle._C_ops.reshape(add_1, full_int_array_0) + + # pd_op.matmul: (16x144x512xf32) <- (16x144x512xf32, 512x512xf32) + matmul_2 = paddle._C_ops.matmul(data_2, parameter_1, False, False) + del data_2, parameter_1 + + # pd_op.add: (16x144x512xf32) <- (16x144x512xf32, 512xf32) + add_2 = paddle._C_ops.add(matmul_2, parameter_0) + del parameter_0 + + # pd_op.reshape: (16x144x8x64xf32) <- (16x144x512xf32, 4xi64) + reshape_2 = paddle._C_ops.reshape(add_2, full_int_array_0) + del add_0, add_1, add_2, full_int_array_0, matmul_0, matmul_1, matmul_2 + + return reshape_0, reshape_1, reshape_2 diff --git a/paddle_samples/PaddleX/Nonstationary/subgraph_7/weight_meta.py b/paddle_samples/PaddleX/Nonstationary/subgraph_7/weight_meta.py new file mode 100644 index 000000000..b5dc7b802 --- /dev/null +++ b/paddle_samples/PaddleX/Nonstationary/subgraph_7/weight_meta.py @@ -0,0 +1,64 @@ +class Program_weight_tensor_parameter_0: + name = "parameter_0" + shape = [512] + dtype = "float32" + min_val = float("-0.0441462") + max_val = float("0.0440855") + mean = float("0.000231583") + std = float("0.026297") + data = None + + +class Program_weight_tensor_parameter_1: + name = "parameter_1" + shape = [512, 512] + dtype = "float32" + min_val = float("-0.0441934") + max_val = float("0.0441932") + mean = float("-7.20876e-06") + std = float("0.0255457") + data = None + + +class Program_weight_tensor_parameter_2: + name = "parameter_2" + shape = [512] + dtype = "float32" + min_val = float("-0.0439005") + max_val = float("0.0437794") + mean = float("0.00156979") + std = float("0.0257149") + data = None + + +class Program_weight_tensor_parameter_3: + name = "parameter_3" + shape = [512, 512] + dtype = "float32" + min_val = float("-0.0441941") + max_val = float("0.0441941") + mean = float("7.97453e-05") + std = float("0.0255102") + data = None + + +class Program_weight_tensor_parameter_4: + name = "parameter_4" + shape = [512] + dtype = "float32" + min_val = float("-0.0439592") + max_val = float("0.0439638") + mean = float("-0.000514035") + std = float("0.0250928") + data = None + + +class Program_weight_tensor_parameter_5: + name = "parameter_5" + shape = [512, 512] + dtype = "float32" + min_val = float("-0.0441942") + max_val = float("0.0441939") + mean = float("-3.68316e-05") + std = float("0.0255133") + data = None diff --git a/paddle_samples/PaddleX/Nonstationary/subgraph_8/graph_hash.txt b/paddle_samples/PaddleX/Nonstationary/subgraph_8/graph_hash.txt new file mode 100644 index 000000000..cb55cb03a --- /dev/null +++ b/paddle_samples/PaddleX/Nonstationary/subgraph_8/graph_hash.txt @@ -0,0 +1 @@ +2cf15df08c3744d30d800212091f1fe0c4981e649a8d64a95a1aa11b2398afec \ No newline at end of file diff --git a/paddle_samples/PaddleX/Nonstationary/subgraph_8/graph_net.json b/paddle_samples/PaddleX/Nonstationary/subgraph_8/graph_net.json new file mode 100644 index 000000000..1977ac2fa --- /dev/null +++ b/paddle_samples/PaddleX/Nonstationary/subgraph_8/graph_net.json @@ -0,0 +1,6 @@ +{ + "framework": "paddle", + "model_name": "Nonstationary", + "num_devices_required": 1, + "num_nodes_required": 1 +} \ No newline at end of file diff --git a/paddle_samples/PaddleX/Nonstationary/subgraph_8/input_meta.py b/paddle_samples/PaddleX/Nonstationary/subgraph_8/input_meta.py new file mode 100644 index 000000000..34bae8604 --- /dev/null +++ b/paddle_samples/PaddleX/Nonstationary/subgraph_8/input_meta.py @@ -0,0 +1,42 @@ +class Program_weight_tensor_data_0: + name = "data_0" + shape = [16, 96, 1] + dtype = "float32" + min_val = float("-1.42933") + max_val = float("3.1545") + mean = float("0.408696") + std = float("0.978119") + data = None + + +class Program_weight_tensor_data_1: + name = "data_1" + shape = [16, 192, 4] + dtype = "float32" + min_val = float("-0.5") + max_val = float("0.533333") + mean = float("-0.00763242") + std = float("0.28448") + data = None + + +class Program_weight_tensor_data_2: + name = "data_2" + shape = [1, 5000, 512] + dtype = "float32" + min_val = float("-1.0") + max_val = float("1.0") + mean = float("0.13237") + std = float("0.694606") + data = None + + +class Program_weight_tensor_data_3: + name = "data_3" + shape = [1, 5000, 512] + dtype = "float32" + min_val = float("-1.0") + max_val = float("1.0") + mean = float("0.13237") + std = float("0.694606") + data = None diff --git a/paddle_samples/PaddleX/Nonstationary/subgraph_8/model.py b/paddle_samples/PaddleX/Nonstationary/subgraph_8/model.py new file mode 100644 index 000000000..e764f06fd --- /dev/null +++ b/paddle_samples/PaddleX/Nonstationary/subgraph_8/model.py @@ -0,0 +1,1365 @@ +import paddle + + +class GraphModule(paddle.nn.Layer): + def __init__(self): + super().__init__() + + def forward( + self, + parameter_0, + parameter_1, + parameter_2, + parameter_3, + parameter_4, + parameter_5, + parameter_6, + parameter_7, + parameter_8, + parameter_9, + parameter_10, + parameter_11, + parameter_12, + parameter_13, + parameter_14, + parameter_15, + parameter_16, + parameter_17, + parameter_18, + parameter_19, + parameter_20, + parameter_21, + parameter_22, + parameter_23, + parameter_24, + parameter_25, + parameter_26, + parameter_27, + parameter_28, + parameter_29, + parameter_30, + parameter_31, + parameter_32, + parameter_33, + parameter_34, + parameter_35, + parameter_36, + parameter_37, + parameter_38, + parameter_39, + parameter_40, + parameter_41, + parameter_42, + parameter_43, + parameter_44, + parameter_45, + parameter_46, + parameter_47, + parameter_48, + parameter_49, + data_0, + data_1, + data_2, + data_3, + ): + # pd_op.full_int_array: (1xi64) <- () + full_int_array_0 = [0] + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_1 = [96] + + # pd_op.slice: (16x96x4xf32) <- (16x192x4xf32, 1xi64, 1xi64) + slice_0 = paddle._C_ops.slice( + data_1, [1], full_int_array_0, full_int_array_1, [1], [] + ) + + # pd_op.full: (16x96x1xf32) <- () + full_0 = paddle._C_ops.full( + [16, 96, 1], + float("0"), + paddle.float32, + paddle.framework._current_expected_place(), + ) + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_2 = [-48] + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_3 = [2147483647] + + # pd_op.slice: (16x48x1xf32) <- (16x96x1xf32, 1xi64, 1xi64) + slice_1 = paddle._C_ops.slice( + data_0, [1], full_int_array_2, full_int_array_3, [1], [] + ) + + # pd_op.full: (1xi32) <- () + full_1 = paddle._C_ops.full( + [1], float("1"), paddle.int32, paddle.core.CPUPlace() + ) + + # pd_op.assign: (1xi32) <- (1xi32) + assign_0 = full_1 + + # pd_op.assign: (1xi32) <- (1xi32) + assign_1 = full_1 + + # builtin.combine: ([16x48x1xf32, 16x96x1xf32]) <- (16x48x1xf32, 16x96x1xf32) + combine_0 = [slice_1, full_0] + del full_0, slice_1 + + # pd_op.concat: (16x144x1xf32) <- ([16x48x1xf32, 16x96x1xf32], 1xi32) + concat_0 = paddle._C_ops.concat(combine_0, full_1) + del combine_0 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_4 = [-144] + + # pd_op.slice: (16x144x4xf32) <- (16x192x4xf32, 1xi64, 1xi64) + slice_2 = paddle._C_ops.slice( + data_1, [1], full_int_array_4, full_int_array_3, [1], [] + ) + del data_1, full_int_array_4 + + # pd_op.assign: (16x96x1xf32) <- (16x96x1xf32) + assign_2 = data_0 + + # pd_op.share_data_: (16x96x1xf32) <- (16x96x1xf32) + share_data__0 = assign_2.detach() + del assign_2 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_5 = [1] + + # pd_op.assign: (1xi64) <- (1xi64) + assign_3 = full_int_array_5 + + # pd_op.assign: (1xi64) <- (1xi64) + assign_4 = full_int_array_5 + + # pd_op.assign: (1xi64) <- (1xi64) + assign_5 = full_int_array_5 + + # pd_op.assign: (1xi64) <- (1xi64) + assign_6 = full_int_array_5 + + # pd_op.assign: (1xi64) <- (1xi64) + assign_7 = full_int_array_5 + + # pd_op.assign: (1xi64) <- (1xi64) + assign_8 = full_int_array_5 + + # pd_op.assign: (1xi64) <- (1xi64) + assign_9 = full_int_array_5 + + # pd_op.assign: (1xi64) <- (1xi64) + assign_10 = full_int_array_5 + + # pd_op.mean: (16x1x1xf32) <- (16x96x1xf32, 1xi64) + mean_0 = paddle._C_ops.mean(data_0, full_int_array_5, True) + + # pd_op.share_data_: (16x1x1xf32) <- (16x1x1xf32) + share_data__1 = mean_0.detach() + del mean_0 + + # pd_op.subtract: (16x96x1xf32) <- (16x96x1xf32, 16x1x1xf32) + subtract_0 = paddle._C_ops.subtract(data_0, share_data__1) + del data_0 + + # pd_op.mean: (16x1x1xf32) <- (16x96x1xf32, 1xi64) + mean_1 = paddle._C_ops.mean(subtract_0, full_int_array_5, True) + + # pd_op.subtract: (16x96x1xf32) <- (16x96x1xf32, 16x1x1xf32) + subtract_1 = paddle._C_ops.subtract(subtract_0, mean_1) + del mean_1 + + # pd_op.pow: (16x96x1xf32) <- (16x96x1xf32) + pow_0 = paddle._C_ops.pow(subtract_1, float("2")) + del subtract_1 + + # pd_op.sum: (16x1x1xf32) <- (16x96x1xf32, 1xi64) + sum_0 = paddle._C_ops.sum(pow_0, full_int_array_5, paddle.float32, True) + del pow_0 + + # pd_op.numel: (xi64) <- (16x96x1xf32) + numel_0 = paddle._C_ops.numel(subtract_0) + + # pd_op.cast: (xi64) <- (xi64) + cast_0 = paddle._C_ops.cast(numel_0, paddle.int64) + del numel_0 + + # pd_op.numel: (xi64) <- (16x1x1xf32) + numel_1 = paddle._C_ops.numel(sum_0) + + # pd_op.cast: (xi64) <- (xi64) + cast_1 = paddle._C_ops.cast(numel_1, paddle.int64) + del numel_1 + + # pd_op.cast: (xf32) <- (xi64) + cast_2 = paddle._C_ops.cast(cast_0, paddle.float32) + del cast_0 + + # pd_op.cast: (xf32) <- (xi64) + cast_3 = paddle._C_ops.cast(cast_1, paddle.float32) + del cast_1 + + # pd_op.divide: (xf32) <- (xf32, xf32) + divide_0 = paddle._C_ops.divide(cast_2, cast_3) + del cast_2, cast_3 + + # pd_op.divide: (16x1x1xf32) <- (16x1x1xf32, xf32) + divide_1 = paddle._C_ops.divide(sum_0, divide_0) + del divide_0, sum_0 + + # pd_op.full: (1xf32) <- () + full_2 = paddle._C_ops.full( + [1], float("1"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.scale: (16x1x1xf32) <- (16x1x1xf32, 1xf32) + scale_0 = paddle._C_ops.scale(divide_1, full_2, float("1e-05"), True) + del divide_1, full_2 + + # pd_op.sqrt: (16x1x1xf32) <- (16x1x1xf32) + sqrt_0 = paddle._C_ops.sqrt(scale_0) + del scale_0 + + # pd_op.share_data_: (16x1x1xf32) <- (16x1x1xf32) + share_data__2 = sqrt_0.detach() + del sqrt_0 + + # pd_op.divide: (16x96x1xf32) <- (16x96x1xf32, 16x1x1xf32) + divide_2 = paddle._C_ops.divide(subtract_0, share_data__2) + del subtract_0 + + # pd_op.slice: (16x48x1xf32) <- (16x96x1xf32, 1xi64, 1xi64) + slice_3 = paddle._C_ops.slice( + divide_2, [1], full_int_array_2, full_int_array_3, [1], [] + ) + del full_int_array_2 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_6 = [-96] + + # pd_op.slice: (16x96x1xf32) <- (16x144x1xf32, 1xi64, 1xi64) + slice_4 = paddle._C_ops.slice( + concat_0, [1], full_int_array_6, full_int_array_3, [1], [] + ) + del concat_0, full_int_array_3, full_int_array_6 + + # pd_op.full: (1xf32) <- () + full_3 = paddle._C_ops.full( + [1], float("0"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.full_like: (16x96x1xf32) <- (16x96x1xf32, 1xf32) + full_like_0 = paddle._C_ops.full_like( + slice_4, full_3, paddle.float32, paddle.framework._current_expected_place() + ) + del full_3, slice_4 + + # builtin.combine: ([16x48x1xf32, 16x96x1xf32]) <- (16x48x1xf32, 16x96x1xf32) + combine_1 = [slice_3, full_like_0] + del full_like_0, slice_3 + + # pd_op.concat: (16x144x1xf32) <- ([16x48x1xf32, 16x96x1xf32], 1xi32) + concat_1 = paddle._C_ops.concat(combine_1, full_1) + del combine_1 + + # pd_op.assign: (16x144x1xf32) <- (16x144x1xf32) + assign_11 = concat_1 + del concat_1 + + # pd_op.full_int_array: (2xi64) <- () + full_int_array_7 = [3, 4] + + # pd_op.unsqueeze: (16x96x1x1x1xf32) <- (16x96x1xf32, 2xi64) + unsqueeze_0 = paddle._C_ops.unsqueeze(share_data__0, full_int_array_7) + + # pd_op.full_int_array: (6xi64) <- () + full_int_array_8 = [0, 0, 0, 0, 1, 1] + + # pd_op.pad3d: (16x96x3x1x1xf32) <- (16x96x1x1x1xf32, 6xi64) + pad3d_0 = paddle._C_ops.pad3d( + unsqueeze_0, full_int_array_8, "circular", float("0"), "NCDHW" + ) + del unsqueeze_0 + + # pd_op.squeeze: (16x96x3xf32) <- (16x96x3x1x1xf32, 2xi64) + squeeze_0 = paddle._C_ops.squeeze(pad3d_0, full_int_array_7) + del pad3d_0 + + # pd_op.assign: (1x96x3xf32) <- (1x96x3xf32) + assign_12 = parameter_49 + del parameter_49 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_9 = [-2] + + # pd_op.assign: (1xi64) <- (1xi64) + assign_13 = full_int_array_9 + + # pd_op.assign: (1xi64) <- (1xi64) + assign_14 = full_int_array_9 + + # pd_op.assign: (1xi64) <- (1xi64) + assign_15 = full_int_array_9 + + # pd_op.assign: (1xi64) <- (1xi64) + assign_16 = full_int_array_9 + + # pd_op.assign: (1xi64) <- (1xi64) + assign_17 = full_int_array_9 + + # pd_op.assign: (1xi64) <- (1xi64) + assign_18 = full_int_array_9 + + # pd_op.assign: (1xi64) <- (1xi64) + assign_19 = full_int_array_9 + + # pd_op.assign: (1xi64) <- (1xi64) + assign_20 = full_int_array_9 + + # pd_op.assign: (1xi64) <- (1xi64) + assign_21 = full_int_array_9 + + # pd_op.assign: (1xi64) <- (1xi64) + assign_22 = full_int_array_9 + + # pd_op.assign: (1xi64) <- (1xi64) + assign_23 = full_int_array_9 + + # pd_op.assign: (1xi64) <- (1xi64) + assign_24 = full_int_array_9 + + # pd_op.assign: (1xi64) <- (1xi64) + assign_25 = full_int_array_9 + + # pd_op.assign: (1xi64) <- (1xi64) + assign_26 = full_int_array_9 + + # pd_op.assign: (1xi64) <- (1xi64) + assign_27 = full_int_array_9 + + # pd_op.assign: (1xi64) <- (1xi64) + assign_28 = full_int_array_9 + + # pd_op.assign: (1xi64) <- (1xi64) + assign_29 = full_int_array_9 + + # pd_op.assign: (1xi64) <- (1xi64) + assign_30 = full_int_array_9 + + # pd_op.assign: (1xi64) <- (1xi64) + assign_31 = full_int_array_9 + + # pd_op.unsqueeze: (1x96x1x3xf32) <- (1x96x3xf32, 1xi64) + unsqueeze_1 = paddle._C_ops.unsqueeze(assign_12, full_int_array_9) + + # pd_op.unsqueeze: (16x96x1x3xf32) <- (16x96x3xf32, 1xi64) + unsqueeze_2 = paddle._C_ops.unsqueeze(squeeze_0, full_int_array_9) + del squeeze_0 + + # pd_op.conv2d: (16x1x1x1xf32) <- (16x96x1x3xf32, 1x96x1x3xf32) + conv2d_0 = paddle._C_ops.conv2d( + unsqueeze_2, unsqueeze_1, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + + # pd_op.squeeze: (16x1x1xf32) <- (16x1x1x1xf32, 1xi64) + squeeze_1 = paddle._C_ops.squeeze(conv2d_0, full_int_array_9) + + # builtin.combine: ([16x1x1xf32, 16x1x1xf32]) <- (16x1x1xf32, 16x1x1xf32) + combine_2 = [squeeze_1, share_data__2] + + # pd_op.concat: (16x2x1xf32) <- ([16x1x1xf32, 16x1x1xf32], 1xi32) + concat_2 = paddle._C_ops.concat(combine_2, full_1) + del combine_2 + + # pd_op.full_int_array: (2xi64) <- () + full_int_array_10 = [16, -1] + + # pd_op.reshape: (16x2xf32) <- (16x2x1xf32, 2xi64) + reshape_0 = paddle._C_ops.reshape(concat_2, full_int_array_10) + + # pd_op.matmul: (16x256xf32) <- (16x2xf32, 2x256xf32) + matmul_0 = paddle._C_ops.matmul(reshape_0, parameter_48, False, False) + del parameter_48 + + # pd_op.add: (16x256xf32) <- (16x256xf32, 256xf32) + add_0 = paddle._C_ops.add(matmul_0, parameter_47) + del parameter_47 + + # pd_op.relu: (16x256xf32) <- (16x256xf32) + relu_0 = paddle._C_ops.relu(add_0) + del add_0 + + # pd_op.matmul: (16x256xf32) <- (16x256xf32, 256x256xf32) + matmul_1 = paddle._C_ops.matmul(relu_0, parameter_46, False, False) + del parameter_46 + + # pd_op.add: (16x256xf32) <- (16x256xf32, 256xf32) + add_1 = paddle._C_ops.add(matmul_1, parameter_45) + del parameter_45 + + # pd_op.relu: (16x256xf32) <- (16x256xf32) + relu_1 = paddle._C_ops.relu(add_1) + del add_1 + + # pd_op.matmul: (16x1xf32) <- (16x256xf32, 256x1xf32) + matmul_2 = paddle._C_ops.matmul(relu_1, parameter_44, False, False) + del parameter_44 + + # pd_op.exp: (16x1xf32) <- (16x1xf32) + exp_0 = paddle._C_ops.exp(matmul_2) + del matmul_2 + + # pd_op.unsqueeze: (16x96x1x1x1xf32) <- (16x96x1xf32, 2xi64) + unsqueeze_3 = paddle._C_ops.unsqueeze(share_data__0, full_int_array_7) + del share_data__0 + + # pd_op.pad3d: (16x96x3x1x1xf32) <- (16x96x1x1x1xf32, 6xi64) + pad3d_1 = paddle._C_ops.pad3d( + unsqueeze_3, full_int_array_8, "circular", float("0"), "NCDHW" + ) + del unsqueeze_3 + + # pd_op.squeeze: (16x96x3xf32) <- (16x96x3x1x1xf32, 2xi64) + squeeze_2 = paddle._C_ops.squeeze(pad3d_1, full_int_array_7) + del pad3d_1 + + # pd_op.assign: (1x96x3xf32) <- (1x96x3xf32) + assign_32 = parameter_43 + del parameter_43 + + # pd_op.unsqueeze: (1x96x1x3xf32) <- (1x96x3xf32, 1xi64) + unsqueeze_4 = paddle._C_ops.unsqueeze(assign_32, full_int_array_9) + + # pd_op.unsqueeze: (16x96x1x3xf32) <- (16x96x3xf32, 1xi64) + unsqueeze_5 = paddle._C_ops.unsqueeze(squeeze_2, full_int_array_9) + del squeeze_2 + + # pd_op.conv2d: (16x1x1x1xf32) <- (16x96x1x3xf32, 1x96x1x3xf32) + conv2d_1 = paddle._C_ops.conv2d( + unsqueeze_5, unsqueeze_4, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + + # pd_op.squeeze: (16x1x1xf32) <- (16x1x1x1xf32, 1xi64) + squeeze_3 = paddle._C_ops.squeeze(conv2d_1, full_int_array_9) + + # builtin.combine: ([16x1x1xf32, 16x1x1xf32]) <- (16x1x1xf32, 16x1x1xf32) + combine_3 = [squeeze_3, share_data__1] + + # pd_op.concat: (16x2x1xf32) <- ([16x1x1xf32, 16x1x1xf32], 1xi32) + concat_3 = paddle._C_ops.concat(combine_3, full_1) + del combine_3, full_1 + + # pd_op.reshape: (16x2xf32) <- (16x2x1xf32, 2xi64) + reshape_1 = paddle._C_ops.reshape(concat_3, full_int_array_10) + del full_int_array_10 + + # pd_op.matmul: (16x256xf32) <- (16x2xf32, 2x256xf32) + matmul_3 = paddle._C_ops.matmul(reshape_1, parameter_42, False, False) + del parameter_42 + + # pd_op.add: (16x256xf32) <- (16x256xf32, 256xf32) + add_2 = paddle._C_ops.add(matmul_3, parameter_41) + del parameter_41 + + # pd_op.relu: (16x256xf32) <- (16x256xf32) + relu_2 = paddle._C_ops.relu(add_2) + del add_2 + + # pd_op.matmul: (16x256xf32) <- (16x256xf32, 256x256xf32) + matmul_4 = paddle._C_ops.matmul(relu_2, parameter_40, False, False) + del parameter_40 + + # pd_op.add: (16x256xf32) <- (16x256xf32, 256xf32) + add_3 = paddle._C_ops.add(matmul_4, parameter_39) + del parameter_39 + + # pd_op.relu: (16x256xf32) <- (16x256xf32) + relu_3 = paddle._C_ops.relu(add_3) + del add_3 + + # pd_op.matmul: (16x96xf32) <- (16x256xf32, 256x96xf32) + matmul_5 = paddle._C_ops.matmul(relu_3, parameter_38, False, False) + del parameter_38 + + # pd_op.transpose: (16x1x96xf32) <- (16x96x1xf32) + transpose_0 = paddle._C_ops.transpose(divide_2, [0, 2, 1]) + del divide_2 + + # pd_op.unsqueeze: (16x1x96x1x1xf32) <- (16x1x96xf32, 2xi64) + unsqueeze_6 = paddle._C_ops.unsqueeze(transpose_0, full_int_array_7) + del transpose_0 + + # pd_op.pad3d: (16x1x98x1x1xf32) <- (16x1x96x1x1xf32, 6xi64) + pad3d_2 = paddle._C_ops.pad3d( + unsqueeze_6, full_int_array_8, "circular", float("0"), "NCDHW" + ) + del unsqueeze_6 + + # pd_op.squeeze: (16x1x98xf32) <- (16x1x98x1x1xf32, 2xi64) + squeeze_4 = paddle._C_ops.squeeze(pad3d_2, full_int_array_7) + del pad3d_2 + + # pd_op.assign: (512x1x3xf32) <- (512x1x3xf32) + assign_33 = parameter_37 + del parameter_37 + + # pd_op.unsqueeze: (512x1x1x3xf32) <- (512x1x3xf32, 1xi64) + unsqueeze_7 = paddle._C_ops.unsqueeze(assign_33, full_int_array_9) + + # pd_op.unsqueeze: (16x1x1x98xf32) <- (16x1x98xf32, 1xi64) + unsqueeze_8 = paddle._C_ops.unsqueeze(squeeze_4, full_int_array_9) + del squeeze_4 + + # pd_op.conv2d: (16x512x1x96xf32) <- (16x1x1x98xf32, 512x1x1x3xf32) + conv2d_2 = paddle._C_ops.conv2d( + unsqueeze_8, unsqueeze_7, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + + # pd_op.squeeze: (16x512x96xf32) <- (16x512x1x96xf32, 1xi64) + squeeze_5 = paddle._C_ops.squeeze(conv2d_2, full_int_array_9) + + # pd_op.transpose: (16x96x512xf32) <- (16x512x96xf32) + transpose_1 = paddle._C_ops.transpose(squeeze_5, [0, 2, 1]) + del squeeze_5 + + # pd_op.matmul: (16x96x512xf32) <- (16x96x4xf32, 4x512xf32) + matmul_6 = paddle._C_ops.matmul(slice_0, parameter_36, False, False) + del parameter_36 + + # pd_op.add: (16x96x512xf32) <- (16x96x512xf32, 16x96x512xf32) + add_4 = paddle._C_ops.add(transpose_1, matmul_6) + + # pd_op.slice: (1x96x512xf32) <- (1x5000x512xf32, 1xi64, 1xi64) + slice_5 = paddle._C_ops.slice( + data_3, [1], full_int_array_0, full_int_array_1, [1], [] + ) + del data_3, full_int_array_1 + + # pd_op.add: (16x96x512xf32) <- (16x96x512xf32, 1x96x512xf32) + add_5 = paddle._C_ops.add(add_4, slice_5) + + # pd_op.full: (1xf32) <- () + full_4 = paddle._C_ops.full( + [1], float("0.05"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.assign: (1xf32) <- (1xf32) + assign_34 = full_4 + + # pd_op.assign: (1xf32) <- (1xf32) + assign_35 = full_4 + + # pd_op.assign: (1xf32) <- (1xf32) + assign_36 = full_4 + + # pd_op.assign: (1xf32) <- (1xf32) + assign_37 = full_4 + + # pd_op.assign: (1xf32) <- (1xf32) + assign_38 = full_4 + + # pd_op.assign: (1xf32) <- (1xf32) + assign_39 = full_4 + + # pd_op.assign: (1xf32) <- (1xf32) + assign_40 = full_4 + + # pd_op.assign: (1xf32) <- (1xf32) + assign_41 = full_4 + + # pd_op.assign: (1xf32) <- (1xf32) + assign_42 = full_4 + + # pd_op.dropout: (16x96x512xf32, 16x96x512xui8) <- (16x96x512xf32, None, 1xf32) + dropout_1, dropout_2 = (lambda x, f: f(x))( + paddle._C_ops.dropout( + add_5, None, full_4, False, "upscale_in_train", 0, False + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None), + ) + del add_5 + + # pd_op.matmul: (16x96x512xf32) <- (16x96x512xf32, 512x512xf32) + matmul_7 = paddle._C_ops.matmul(dropout_1, parameter_35, False, False) + del parameter_35 + + # pd_op.add: (16x96x512xf32) <- (16x96x512xf32, 512xf32) + add_6 = paddle._C_ops.add(matmul_7, parameter_34) + del parameter_34 + + # pd_op.full_int_array: (4xi64) <- () + full_int_array_11 = [16, 96, 8, -1] + + # pd_op.reshape: (16x96x8x64xf32) <- (16x96x512xf32, 4xi64) + reshape_2 = paddle._C_ops.reshape(add_6, full_int_array_11) + + # pd_op.matmul: (16x96x512xf32) <- (16x96x512xf32, 512x512xf32) + matmul_8 = paddle._C_ops.matmul(dropout_1, parameter_33, False, False) + del parameter_33 + + # pd_op.add: (16x96x512xf32) <- (16x96x512xf32, 512xf32) + add_7 = paddle._C_ops.add(matmul_8, parameter_32) + del parameter_32 + + # pd_op.reshape: (16x96x8x64xf32) <- (16x96x512xf32, 4xi64) + reshape_3 = paddle._C_ops.reshape(add_7, full_int_array_11) + + # pd_op.matmul: (16x96x512xf32) <- (16x96x512xf32, 512x512xf32) + matmul_9 = paddle._C_ops.matmul(dropout_1, parameter_31, False, False) + del parameter_31 + + # pd_op.add: (16x96x512xf32) <- (16x96x512xf32, 512xf32) + add_8 = paddle._C_ops.add(matmul_9, parameter_30) + del parameter_30 + + # pd_op.reshape: (16x96x8x64xf32) <- (16x96x512xf32, 4xi64) + reshape_4 = paddle._C_ops.reshape(add_8, full_int_array_11) + + # pd_op.unsqueeze: (16x1x1xf32) <- (16x1xf32, 1xi64) + unsqueeze_9 = paddle._C_ops.unsqueeze(exp_0, full_int_array_5) + + # pd_op.unsqueeze: (16x1x1x1xf32) <- (16x1x1xf32, 1xi64) + unsqueeze_10 = paddle._C_ops.unsqueeze(unsqueeze_9, full_int_array_5) + + # pd_op.unsqueeze: (16x1x96xf32) <- (16x96xf32, 1xi64) + unsqueeze_11 = paddle._C_ops.unsqueeze(matmul_5, full_int_array_5) + + # pd_op.unsqueeze: (16x1x1x96xf32) <- (16x1x96xf32, 1xi64) + unsqueeze_12 = paddle._C_ops.unsqueeze(unsqueeze_11, full_int_array_5) + + # builtin.combine: ([16x96x8x64xf32, 16x96x8x64xf32]) <- (16x96x8x64xf32, 16x96x8x64xf32) + combine_4 = [reshape_2, reshape_3] + del reshape_2, reshape_3 + + # pd_op.einsum: (16x8x96x96xf32, [0xf32, 0xf32], [16x96x8x64xf32, 16x96x8x64xf32]) <- ([16x96x8x64xf32, 16x96x8x64xf32]) + einsum_0, einsum_1, einsum_2 = (lambda x, f: f(x))( + paddle._C_ops.einsum(combine_4, "blhe,bshe->bhls"), + lambda out: out if isinstance(out, (list, tuple)) else (out, None, None), + ) + del combine_4 + + # builtin.split: (0xf32, 0xf32) <- ([0xf32, 0xf32]) + ( + split_0, + split_1, + ) = einsum_1 + del einsum_1 + + # builtin.split: (16x96x8x64xf32, 16x96x8x64xf32) <- ([16x96x8x64xf32, 16x96x8x64xf32]) + ( + split_2, + split_3, + ) = einsum_2 + del einsum_2 + + # pd_op.multiply: (16x8x96x96xf32) <- (16x8x96x96xf32, 16x1x1x1xf32) + multiply_0 = paddle._C_ops.multiply(einsum_0, unsqueeze_10) + + # pd_op.add: (16x8x96x96xf32) <- (16x8x96x96xf32, 16x1x1x96xf32) + add_9 = paddle._C_ops.add(multiply_0, unsqueeze_12) + + # pd_op.full: (1xf32) <- () + full_5 = paddle._C_ops.full( + [1], float("0.125"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.assign: (1xf32) <- (1xf32) + assign_43 = full_5 + + # pd_op.scale: (16x8x96x96xf32) <- (16x8x96x96xf32, 1xf32) + scale_1 = paddle._C_ops.scale(add_9, full_5, float("0"), True) + del add_9 + + # pd_op.softmax: (16x8x96x96xf32) <- (16x8x96x96xf32) + softmax_0 = paddle._C_ops.softmax(scale_1, -1) + del scale_1 + + # pd_op.dropout: (16x8x96x96xf32, 16x8x96x96xui8) <- (16x8x96x96xf32, None, 1xf32) + dropout_3, dropout_4 = (lambda x, f: f(x))( + paddle._C_ops.dropout( + softmax_0, None, full_4, False, "upscale_in_train", 0, False + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None), + ) + + # builtin.combine: ([16x8x96x96xf32, 16x96x8x64xf32]) <- (16x8x96x96xf32, 16x96x8x64xf32) + combine_5 = [dropout_3, reshape_4] + del dropout_3, reshape_4 + + # pd_op.einsum: (16x96x8x64xf32, [0xf32, 0xf32], [16x8x96x96xf32, 16x96x8x64xf32]) <- ([16x8x96x96xf32, 16x96x8x64xf32]) + einsum_3, einsum_4, einsum_5 = (lambda x, f: f(x))( + paddle._C_ops.einsum(combine_5, "bhls,bshd->blhd"), + lambda out: out if isinstance(out, (list, tuple)) else (out, None, None), + ) + del combine_5 + + # builtin.split: (0xf32, 0xf32) <- ([0xf32, 0xf32]) + ( + split_4, + split_5, + ) = einsum_4 + del einsum_4 + + # builtin.split: (16x8x96x96xf32, 16x96x8x64xf32) <- ([16x8x96x96xf32, 16x96x8x64xf32]) + ( + split_6, + split_7, + ) = einsum_5 + del einsum_5 + + # pd_op.full_int_array: (3xi64) <- () + full_int_array_12 = [16, 96, -1] + + # pd_op.reshape: (16x96x512xf32) <- (16x96x8x64xf32, 3xi64) + reshape_5 = paddle._C_ops.reshape(einsum_3, full_int_array_12) + + # pd_op.matmul: (16x96x512xf32) <- (16x96x512xf32, 512x512xf32) + matmul_10 = paddle._C_ops.matmul(reshape_5, parameter_29, False, False) + del parameter_29 + + # pd_op.add: (16x96x512xf32) <- (16x96x512xf32, 512xf32) + add_10 = paddle._C_ops.add(matmul_10, parameter_28) + del parameter_28 + + # pd_op.dropout: (16x96x512xf32, 16x96x512xui8) <- (16x96x512xf32, None, 1xf32) + dropout_5, dropout_6 = (lambda x, f: f(x))( + paddle._C_ops.dropout( + add_10, None, full_4, False, "upscale_in_train", 0, False + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None), + ) + del add_10 + + # pd_op.add: (16x96x512xf32) <- (16x96x512xf32, 16x96x512xf32) + add_11 = paddle._C_ops.add(dropout_1, dropout_5) + + # pd_op.layer_norm: (16x96x512xf32, 16x96xf32, 16x96xf32) <- (16x96x512xf32, 512xf32, 512xf32) + layer_norm_1, layer_norm_2, layer_norm_3 = (lambda x, f: f(x))( + paddle._C_ops.layer_norm( + add_11, parameter_27, parameter_26, float("1e-05"), 2 + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None, None), + ) + del parameter_26, parameter_27 + + # pd_op.transpose: (16x512x96xf32) <- (16x96x512xf32) + transpose_2 = paddle._C_ops.transpose(layer_norm_1, [0, 2, 1]) + + # pd_op.assign: (2048x512x1xf32) <- (2048x512x1xf32) + assign_44 = parameter_25 + del parameter_25 + + # pd_op.unsqueeze: (2048x512x1x1xf32) <- (2048x512x1xf32, 1xi64) + unsqueeze_13 = paddle._C_ops.unsqueeze(assign_44, full_int_array_9) + + # pd_op.unsqueeze: (16x512x1x96xf32) <- (16x512x96xf32, 1xi64) + unsqueeze_14 = paddle._C_ops.unsqueeze(transpose_2, full_int_array_9) + + # pd_op.conv2d: (16x2048x1x96xf32) <- (16x512x1x96xf32, 2048x512x1x1xf32) + conv2d_3 = paddle._C_ops.conv2d( + unsqueeze_14, unsqueeze_13, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + + # pd_op.full_int_array: (4xi64) <- () + full_int_array_13 = [1, 2048, 1, 1] + + # pd_op.reshape: (1x2048x1x1xf32) <- (2048xf32, 4xi64) + reshape_6 = paddle._C_ops.reshape(parameter_24, full_int_array_13) + del parameter_24 + + # pd_op.add: (16x2048x1x96xf32) <- (16x2048x1x96xf32, 1x2048x1x1xf32) + add_12 = paddle._C_ops.add(conv2d_3, reshape_6) + + # pd_op.squeeze: (16x2048x96xf32) <- (16x2048x1x96xf32, 1xi64) + squeeze_6 = paddle._C_ops.squeeze(add_12, full_int_array_9) + + # pd_op.gelu: (16x2048x96xf32) <- (16x2048x96xf32) + gelu_0 = paddle._C_ops.gelu(squeeze_6, False) + + # pd_op.dropout: (16x2048x96xf32, 16x2048x96xui8) <- (16x2048x96xf32, None, 1xf32) + dropout_7, dropout_8 = (lambda x, f: f(x))( + paddle._C_ops.dropout( + gelu_0, None, full_4, False, "upscale_in_train", 0, False + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None), + ) + del gelu_0 + + # pd_op.assign: (512x2048x1xf32) <- (512x2048x1xf32) + assign_45 = parameter_23 + del parameter_23 + + # pd_op.unsqueeze: (512x2048x1x1xf32) <- (512x2048x1xf32, 1xi64) + unsqueeze_15 = paddle._C_ops.unsqueeze(assign_45, full_int_array_9) + + # pd_op.unsqueeze: (16x2048x1x96xf32) <- (16x2048x96xf32, 1xi64) + unsqueeze_16 = paddle._C_ops.unsqueeze(dropout_7, full_int_array_9) + + # pd_op.conv2d: (16x512x1x96xf32) <- (16x2048x1x96xf32, 512x2048x1x1xf32) + conv2d_4 = paddle._C_ops.conv2d( + unsqueeze_16, unsqueeze_15, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + + # pd_op.full_int_array: (4xi64) <- () + full_int_array_14 = [1, 512, 1, 1] + + # pd_op.reshape: (1x512x1x1xf32) <- (512xf32, 4xi64) + reshape_7 = paddle._C_ops.reshape(parameter_22, full_int_array_14) + del parameter_22 + + # pd_op.add: (16x512x1x96xf32) <- (16x512x1x96xf32, 1x512x1x1xf32) + add_13 = paddle._C_ops.add(conv2d_4, reshape_7) + + # pd_op.squeeze: (16x512x96xf32) <- (16x512x1x96xf32, 1xi64) + squeeze_7 = paddle._C_ops.squeeze(add_13, full_int_array_9) + + # pd_op.transpose: (16x96x512xf32) <- (16x512x96xf32) + transpose_3 = paddle._C_ops.transpose(squeeze_7, [0, 2, 1]) + del squeeze_7 + + # pd_op.dropout: (16x96x512xf32, 16x96x512xui8) <- (16x96x512xf32, None, 1xf32) + dropout_9, dropout_10 = (lambda x, f: f(x))( + paddle._C_ops.dropout( + transpose_3, None, full_4, False, "upscale_in_train", 0, False + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None), + ) + del transpose_3 + + # pd_op.add: (16x96x512xf32) <- (16x96x512xf32, 16x96x512xf32) + add_14 = paddle._C_ops.add(layer_norm_1, dropout_9) + + # pd_op.layer_norm: (16x96x512xf32, 16x96xf32, 16x96xf32) <- (16x96x512xf32, 512xf32, 512xf32) + layer_norm_4, layer_norm_5, layer_norm_6 = (lambda x, f: f(x))( + paddle._C_ops.layer_norm( + add_14, parameter_21, parameter_20, float("1e-05"), 2 + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None, None), + ) + del parameter_20, parameter_21 + + # pd_op.matmul: (16x96x512xf32) <- (16x96x512xf32, 512x512xf32) + matmul_11 = paddle._C_ops.matmul(layer_norm_4, parameter_19, False, False) + del parameter_19 + + # pd_op.add: (16x96x512xf32) <- (16x96x512xf32, 512xf32) + add_15 = paddle._C_ops.add(matmul_11, parameter_18) + del parameter_18 + + # pd_op.reshape: (16x96x8x64xf32) <- (16x96x512xf32, 4xi64) + reshape_8 = paddle._C_ops.reshape(add_15, full_int_array_11) + + # pd_op.matmul: (16x96x512xf32) <- (16x96x512xf32, 512x512xf32) + matmul_12 = paddle._C_ops.matmul(layer_norm_4, parameter_17, False, False) + del parameter_17 + + # pd_op.add: (16x96x512xf32) <- (16x96x512xf32, 512xf32) + add_16 = paddle._C_ops.add(matmul_12, parameter_16) + del parameter_16 + + # pd_op.reshape: (16x96x8x64xf32) <- (16x96x512xf32, 4xi64) + reshape_9 = paddle._C_ops.reshape(add_16, full_int_array_11) + + # pd_op.matmul: (16x96x512xf32) <- (16x96x512xf32, 512x512xf32) + matmul_13 = paddle._C_ops.matmul(layer_norm_4, parameter_15, False, False) + del parameter_15 + + # pd_op.add: (16x96x512xf32) <- (16x96x512xf32, 512xf32) + add_17 = paddle._C_ops.add(matmul_13, parameter_14) + del parameter_14 + + # pd_op.reshape: (16x96x8x64xf32) <- (16x96x512xf32, 4xi64) + reshape_10 = paddle._C_ops.reshape(add_17, full_int_array_11) + del full_int_array_11 + + # pd_op.unsqueeze: (16x1x1xf32) <- (16x1xf32, 1xi64) + unsqueeze_17 = paddle._C_ops.unsqueeze(exp_0, full_int_array_5) + + # pd_op.unsqueeze: (16x1x1x1xf32) <- (16x1x1xf32, 1xi64) + unsqueeze_18 = paddle._C_ops.unsqueeze(unsqueeze_17, full_int_array_5) + + # pd_op.unsqueeze: (16x1x96xf32) <- (16x96xf32, 1xi64) + unsqueeze_19 = paddle._C_ops.unsqueeze(matmul_5, full_int_array_5) + + # pd_op.unsqueeze: (16x1x1x96xf32) <- (16x1x96xf32, 1xi64) + unsqueeze_20 = paddle._C_ops.unsqueeze(unsqueeze_19, full_int_array_5) + del full_int_array_5 + + # builtin.combine: ([16x96x8x64xf32, 16x96x8x64xf32]) <- (16x96x8x64xf32, 16x96x8x64xf32) + combine_6 = [reshape_8, reshape_9] + del reshape_8, reshape_9 + + # pd_op.einsum: (16x8x96x96xf32, [0xf32, 0xf32], [16x96x8x64xf32, 16x96x8x64xf32]) <- ([16x96x8x64xf32, 16x96x8x64xf32]) + einsum_6, einsum_7, einsum_8 = (lambda x, f: f(x))( + paddle._C_ops.einsum(combine_6, "blhe,bshe->bhls"), + lambda out: out if isinstance(out, (list, tuple)) else (out, None, None), + ) + del combine_6 + + # builtin.split: (0xf32, 0xf32) <- ([0xf32, 0xf32]) + ( + split_8, + split_9, + ) = einsum_7 + del einsum_7 + + # builtin.split: (16x96x8x64xf32, 16x96x8x64xf32) <- ([16x96x8x64xf32, 16x96x8x64xf32]) + ( + split_10, + split_11, + ) = einsum_8 + del einsum_8 + + # pd_op.multiply: (16x8x96x96xf32) <- (16x8x96x96xf32, 16x1x1x1xf32) + multiply_1 = paddle._C_ops.multiply(einsum_6, unsqueeze_18) + + # pd_op.add: (16x8x96x96xf32) <- (16x8x96x96xf32, 16x1x1x96xf32) + add_18 = paddle._C_ops.add(multiply_1, unsqueeze_20) + + # pd_op.scale: (16x8x96x96xf32) <- (16x8x96x96xf32, 1xf32) + scale_2 = paddle._C_ops.scale(add_18, full_5, float("0"), True) + del add_18 + + # pd_op.softmax: (16x8x96x96xf32) <- (16x8x96x96xf32) + softmax_1 = paddle._C_ops.softmax(scale_2, -1) + del scale_2 + + # pd_op.dropout: (16x8x96x96xf32, 16x8x96x96xui8) <- (16x8x96x96xf32, None, 1xf32) + dropout_11, dropout_12 = (lambda x, f: f(x))( + paddle._C_ops.dropout( + softmax_1, None, full_4, False, "upscale_in_train", 0, False + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None), + ) + + # builtin.combine: ([16x8x96x96xf32, 16x96x8x64xf32]) <- (16x8x96x96xf32, 16x96x8x64xf32) + combine_7 = [dropout_11, reshape_10] + del dropout_11, reshape_10 + + # pd_op.einsum: (16x96x8x64xf32, [0xf32, 0xf32], [16x8x96x96xf32, 16x96x8x64xf32]) <- ([16x8x96x96xf32, 16x96x8x64xf32]) + einsum_9, einsum_10, einsum_11 = (lambda x, f: f(x))( + paddle._C_ops.einsum(combine_7, "bhls,bshd->blhd"), + lambda out: out if isinstance(out, (list, tuple)) else (out, None, None), + ) + del combine_7 + + # builtin.split: (0xf32, 0xf32) <- ([0xf32, 0xf32]) + ( + split_12, + split_13, + ) = einsum_10 + del einsum_10 + + # builtin.split: (16x8x96x96xf32, 16x96x8x64xf32) <- ([16x8x96x96xf32, 16x96x8x64xf32]) + ( + split_14, + split_15, + ) = einsum_11 + del einsum_11 + + # pd_op.reshape: (16x96x512xf32) <- (16x96x8x64xf32, 3xi64) + reshape_11 = paddle._C_ops.reshape(einsum_9, full_int_array_12) + del full_int_array_12 + + # pd_op.matmul: (16x96x512xf32) <- (16x96x512xf32, 512x512xf32) + matmul_14 = paddle._C_ops.matmul(reshape_11, parameter_13, False, False) + del parameter_13 + + # pd_op.add: (16x96x512xf32) <- (16x96x512xf32, 512xf32) + add_19 = paddle._C_ops.add(matmul_14, parameter_12) + del parameter_12 + + # pd_op.dropout: (16x96x512xf32, 16x96x512xui8) <- (16x96x512xf32, None, 1xf32) + dropout_13, dropout_14 = (lambda x, f: f(x))( + paddle._C_ops.dropout( + add_19, None, full_4, False, "upscale_in_train", 0, False + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None), + ) + del add_19 + + # pd_op.add: (16x96x512xf32) <- (16x96x512xf32, 16x96x512xf32) + add_20 = paddle._C_ops.add(layer_norm_4, dropout_13) + + # pd_op.layer_norm: (16x96x512xf32, 16x96xf32, 16x96xf32) <- (16x96x512xf32, 512xf32, 512xf32) + layer_norm_7, layer_norm_8, layer_norm_9 = (lambda x, f: f(x))( + paddle._C_ops.layer_norm( + add_20, parameter_11, parameter_10, float("1e-05"), 2 + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None, None), + ) + del parameter_10, parameter_11 + + # pd_op.transpose: (16x512x96xf32) <- (16x96x512xf32) + transpose_4 = paddle._C_ops.transpose(layer_norm_7, [0, 2, 1]) + + # pd_op.assign: (2048x512x1xf32) <- (2048x512x1xf32) + assign_46 = parameter_9 + del parameter_9 + + # pd_op.unsqueeze: (2048x512x1x1xf32) <- (2048x512x1xf32, 1xi64) + unsqueeze_21 = paddle._C_ops.unsqueeze(assign_46, full_int_array_9) + + # pd_op.unsqueeze: (16x512x1x96xf32) <- (16x512x96xf32, 1xi64) + unsqueeze_22 = paddle._C_ops.unsqueeze(transpose_4, full_int_array_9) + + # pd_op.conv2d: (16x2048x1x96xf32) <- (16x512x1x96xf32, 2048x512x1x1xf32) + conv2d_5 = paddle._C_ops.conv2d( + unsqueeze_22, unsqueeze_21, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + + # pd_op.reshape: (1x2048x1x1xf32) <- (2048xf32, 4xi64) + reshape_12 = paddle._C_ops.reshape(parameter_8, full_int_array_13) + del full_int_array_13, parameter_8 + + # pd_op.add: (16x2048x1x96xf32) <- (16x2048x1x96xf32, 1x2048x1x1xf32) + add_21 = paddle._C_ops.add(conv2d_5, reshape_12) + + # pd_op.squeeze: (16x2048x96xf32) <- (16x2048x1x96xf32, 1xi64) + squeeze_8 = paddle._C_ops.squeeze(add_21, full_int_array_9) + + # pd_op.gelu: (16x2048x96xf32) <- (16x2048x96xf32) + gelu_1 = paddle._C_ops.gelu(squeeze_8, False) + + # pd_op.dropout: (16x2048x96xf32, 16x2048x96xui8) <- (16x2048x96xf32, None, 1xf32) + dropout_15, dropout_16 = (lambda x, f: f(x))( + paddle._C_ops.dropout( + gelu_1, None, full_4, False, "upscale_in_train", 0, False + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None), + ) + del gelu_1 + + # pd_op.assign: (512x2048x1xf32) <- (512x2048x1xf32) + assign_47 = parameter_7 + del parameter_7 + + # pd_op.unsqueeze: (512x2048x1x1xf32) <- (512x2048x1xf32, 1xi64) + unsqueeze_23 = paddle._C_ops.unsqueeze(assign_47, full_int_array_9) + + # pd_op.unsqueeze: (16x2048x1x96xf32) <- (16x2048x96xf32, 1xi64) + unsqueeze_24 = paddle._C_ops.unsqueeze(dropout_15, full_int_array_9) + + # pd_op.conv2d: (16x512x1x96xf32) <- (16x2048x1x96xf32, 512x2048x1x1xf32) + conv2d_6 = paddle._C_ops.conv2d( + unsqueeze_24, unsqueeze_23, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + + # pd_op.reshape: (1x512x1x1xf32) <- (512xf32, 4xi64) + reshape_13 = paddle._C_ops.reshape(parameter_6, full_int_array_14) + del full_int_array_14, parameter_6 + + # pd_op.add: (16x512x1x96xf32) <- (16x512x1x96xf32, 1x512x1x1xf32) + add_22 = paddle._C_ops.add(conv2d_6, reshape_13) + + # pd_op.squeeze: (16x512x96xf32) <- (16x512x1x96xf32, 1xi64) + squeeze_9 = paddle._C_ops.squeeze(add_22, full_int_array_9) + + # pd_op.transpose: (16x96x512xf32) <- (16x512x96xf32) + transpose_5 = paddle._C_ops.transpose(squeeze_9, [0, 2, 1]) + del squeeze_9 + + # pd_op.dropout: (16x96x512xf32, 16x96x512xui8) <- (16x96x512xf32, None, 1xf32) + dropout_17, dropout_18 = (lambda x, f: f(x))( + paddle._C_ops.dropout( + transpose_5, None, full_4, False, "upscale_in_train", 0, False + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None), + ) + del transpose_5 + + # pd_op.add: (16x96x512xf32) <- (16x96x512xf32, 16x96x512xf32) + add_23 = paddle._C_ops.add(layer_norm_7, dropout_17) + + # pd_op.layer_norm: (16x96x512xf32, 16x96xf32, 16x96xf32) <- (16x96x512xf32, 512xf32, 512xf32) + layer_norm_10, layer_norm_11, layer_norm_12 = (lambda x, f: f(x))( + paddle._C_ops.layer_norm( + add_23, parameter_5, parameter_4, float("1e-05"), 2 + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None, None), + ) + del parameter_4, parameter_5 + + # pd_op.layer_norm: (16x96x512xf32, 16x96xf32, 16x96xf32) <- (16x96x512xf32, 512xf32, 512xf32) + layer_norm_0, layer_norm_13, layer_norm_14 = (lambda x, f: f(x))( + paddle._C_ops.layer_norm( + layer_norm_10, parameter_3, parameter_2, float("1e-05"), 2 + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None, None), + ) + del parameter_2, parameter_3 + + # pd_op.transpose: (16x1x144xf32) <- (16x144x1xf32) + transpose_6 = paddle._C_ops.transpose(assign_11, [0, 2, 1]) + del assign_11 + + # pd_op.unsqueeze: (16x1x144x1x1xf32) <- (16x1x144xf32, 2xi64) + unsqueeze_25 = paddle._C_ops.unsqueeze(transpose_6, full_int_array_7) + del transpose_6 + + # pd_op.pad3d: (16x1x146x1x1xf32) <- (16x1x144x1x1xf32, 6xi64) + pad3d_3 = paddle._C_ops.pad3d( + unsqueeze_25, full_int_array_8, "circular", float("0"), "NCDHW" + ) + del full_int_array_8, unsqueeze_25 + + # pd_op.squeeze: (16x1x146xf32) <- (16x1x146x1x1xf32, 2xi64) + squeeze_10 = paddle._C_ops.squeeze(pad3d_3, full_int_array_7) + del full_int_array_7, pad3d_3 + + # pd_op.assign: (512x1x3xf32) <- (512x1x3xf32) + assign_48 = parameter_1 + del parameter_1 + + # pd_op.unsqueeze: (512x1x1x3xf32) <- (512x1x3xf32, 1xi64) + unsqueeze_26 = paddle._C_ops.unsqueeze(assign_48, full_int_array_9) + + # pd_op.unsqueeze: (16x1x1x146xf32) <- (16x1x146xf32, 1xi64) + unsqueeze_27 = paddle._C_ops.unsqueeze(squeeze_10, full_int_array_9) + del squeeze_10 + + # pd_op.conv2d: (16x512x1x144xf32) <- (16x1x1x146xf32, 512x1x1x3xf32) + conv2d_7 = paddle._C_ops.conv2d( + unsqueeze_27, unsqueeze_26, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + + # pd_op.squeeze: (16x512x144xf32) <- (16x512x1x144xf32, 1xi64) + squeeze_11 = paddle._C_ops.squeeze(conv2d_7, full_int_array_9) + + # pd_op.transpose: (16x144x512xf32) <- (16x512x144xf32) + transpose_7 = paddle._C_ops.transpose(squeeze_11, [0, 2, 1]) + del squeeze_11 + + # pd_op.matmul: (16x144x512xf32) <- (16x144x4xf32, 4x512xf32) + matmul_15 = paddle._C_ops.matmul(slice_2, parameter_0, False, False) + del parameter_0 + + # pd_op.add: (16x144x512xf32) <- (16x144x512xf32, 16x144x512xf32) + add_24 = paddle._C_ops.add(transpose_7, matmul_15) + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_15 = [144] + + # pd_op.slice: (1x144x512xf32) <- (1x5000x512xf32, 1xi64, 1xi64) + slice_6 = paddle._C_ops.slice( + data_2, [1], full_int_array_0, full_int_array_15, [1], [] + ) + del data_2, full_int_array_0, full_int_array_15 + + # pd_op.add: (16x144x512xf32) <- (16x144x512xf32, 1x144x512xf32) + add_25 = paddle._C_ops.add(add_24, slice_6) + + # pd_op.dropout: (16x144x512xf32, 16x144x512xui8) <- (16x144x512xf32, None, 1xf32) + dropout_0, dropout_19 = (lambda x, f: f(x))( + paddle._C_ops.dropout( + add_25, None, full_4, False, "upscale_in_train", 0, False + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None), + ) + del ( + add_11, + add_12, + add_13, + add_14, + add_15, + add_16, + add_17, + add_20, + add_21, + add_22, + add_23, + add_24, + add_25, + add_4, + add_6, + add_7, + add_8, + assign_0, + assign_1, + assign_10, + assign_12, + assign_13, + assign_14, + assign_15, + assign_16, + assign_17, + assign_18, + assign_19, + assign_20, + assign_21, + assign_22, + assign_23, + assign_24, + assign_25, + assign_26, + assign_27, + assign_28, + assign_29, + assign_3, + assign_30, + assign_31, + assign_32, + assign_33, + assign_34, + assign_35, + assign_36, + assign_37, + assign_38, + assign_39, + assign_4, + assign_40, + assign_41, + assign_42, + assign_43, + assign_44, + assign_45, + assign_46, + assign_47, + assign_48, + assign_5, + assign_6, + assign_7, + assign_8, + assign_9, + concat_2, + concat_3, + conv2d_0, + conv2d_1, + conv2d_2, + conv2d_3, + conv2d_4, + conv2d_5, + conv2d_6, + conv2d_7, + dropout_1, + dropout_10, + dropout_12, + dropout_13, + dropout_14, + dropout_15, + dropout_16, + dropout_17, + dropout_18, + dropout_2, + dropout_4, + dropout_5, + dropout_6, + dropout_7, + dropout_8, + dropout_9, + einsum_0, + einsum_3, + einsum_6, + einsum_9, + exp_0, + full_4, + full_5, + full_int_array_9, + layer_norm_1, + layer_norm_10, + layer_norm_11, + layer_norm_12, + layer_norm_13, + layer_norm_14, + layer_norm_2, + layer_norm_3, + layer_norm_4, + layer_norm_5, + layer_norm_6, + layer_norm_7, + layer_norm_8, + layer_norm_9, + matmul_0, + matmul_1, + matmul_10, + matmul_11, + matmul_12, + matmul_13, + matmul_14, + matmul_15, + matmul_3, + matmul_4, + matmul_5, + matmul_6, + matmul_7, + matmul_8, + matmul_9, + multiply_0, + multiply_1, + relu_0, + relu_1, + relu_2, + relu_3, + reshape_0, + reshape_1, + reshape_11, + reshape_12, + reshape_13, + reshape_5, + reshape_6, + reshape_7, + share_data__1, + share_data__2, + slice_0, + slice_2, + slice_5, + slice_6, + softmax_0, + softmax_1, + squeeze_1, + squeeze_3, + squeeze_6, + squeeze_8, + transpose_1, + transpose_2, + transpose_4, + transpose_7, + unsqueeze_1, + unsqueeze_10, + unsqueeze_11, + unsqueeze_12, + unsqueeze_13, + unsqueeze_14, + unsqueeze_15, + unsqueeze_16, + unsqueeze_17, + unsqueeze_18, + unsqueeze_19, + unsqueeze_2, + unsqueeze_20, + unsqueeze_21, + unsqueeze_22, + unsqueeze_23, + unsqueeze_24, + unsqueeze_26, + unsqueeze_27, + unsqueeze_4, + unsqueeze_5, + unsqueeze_7, + unsqueeze_8, + unsqueeze_9, + ) + + return ( + split_0, + split_1, + split_2, + split_3, + split_4, + split_5, + split_6, + split_7, + split_8, + split_9, + split_10, + split_11, + split_12, + split_13, + split_14, + split_15, + dropout_0, + layer_norm_0, + ) diff --git a/paddle_samples/PaddleX/Nonstationary/subgraph_8/weight_meta.py b/paddle_samples/PaddleX/Nonstationary/subgraph_8/weight_meta.py new file mode 100644 index 000000000..3303a5334 --- /dev/null +++ b/paddle_samples/PaddleX/Nonstationary/subgraph_8/weight_meta.py @@ -0,0 +1,507 @@ +class Program_weight_tensor_parameter_0: + name = "parameter_0" + shape = [4, 512] + dtype = "float32" + min_val = float("-0.499894") + max_val = float("0.499252") + mean = float("0.0112786") + std = float("0.288296") + data = None + + +class Program_weight_tensor_parameter_1: + name = "parameter_1" + shape = [512, 1, 3] + dtype = "float32" + min_val = float("-2.48586") + max_val = float("2.78747") + mean = float("0.00699817") + std = float("0.835026") + data = None + + +class Program_weight_tensor_parameter_2: + name = "parameter_2" + shape = [512] + dtype = "float32" + data = None + + +class Program_weight_tensor_parameter_3: + name = "parameter_3" + shape = [512] + dtype = "float32" + min_val = float("1.0") + max_val = float("1.0") + mean = float("1.0") + data = None + + +class Program_weight_tensor_parameter_4: + name = "parameter_4" + shape = [512] + dtype = "float32" + data = None + + +class Program_weight_tensor_parameter_5: + name = "parameter_5" + shape = [512] + dtype = "float32" + min_val = float("1.0") + max_val = float("1.0") + mean = float("1.0") + data = None + + +class Program_weight_tensor_parameter_6: + name = "parameter_6" + shape = [512] + dtype = "float32" + data = None + + +class Program_weight_tensor_parameter_7: + name = "parameter_7" + shape = [512, 2048, 1] + dtype = "float32" + min_val = float("-0.145888") + max_val = float("0.166125") + mean = float("7.29192e-05") + std = float("0.0312405") + data = None + + +class Program_weight_tensor_parameter_8: + name = "parameter_8" + shape = [2048] + dtype = "float32" + data = None + + +class Program_weight_tensor_parameter_9: + name = "parameter_9" + shape = [2048, 512, 1] + dtype = "float32" + min_val = float("-0.291176") + max_val = float("0.306408") + mean = float("5.23646e-05") + std = float("0.0625314") + data = None + + +class Program_weight_tensor_parameter_10: + name = "parameter_10" + shape = [512] + dtype = "float32" + data = None + + +class Program_weight_tensor_parameter_11: + name = "parameter_11" + shape = [512] + dtype = "float32" + min_val = float("1.0") + max_val = float("1.0") + mean = float("1.0") + data = None + + +class Program_weight_tensor_parameter_12: + name = "parameter_12" + shape = [512] + dtype = "float32" + min_val = float("-0.0438756") + max_val = float("0.0439059") + mean = float("0.000490117") + std = float("0.0249725") + data = None + + +class Program_weight_tensor_parameter_13: + name = "parameter_13" + shape = [512, 512] + dtype = "float32" + min_val = float("-0.044194") + max_val = float("0.0441936") + mean = float("9.76686e-05") + std = float("0.0255299") + data = None + + +class Program_weight_tensor_parameter_14: + name = "parameter_14" + shape = [512] + dtype = "float32" + min_val = float("-0.0441873") + max_val = float("0.0437633") + mean = float("-9.40517e-05") + std = float("0.0263098") + data = None + + +class Program_weight_tensor_parameter_15: + name = "parameter_15" + shape = [512, 512] + dtype = "float32" + min_val = float("-0.0441941") + max_val = float("0.0441937") + mean = float("-5.73687e-05") + std = float("0.0254951") + data = None + + +class Program_weight_tensor_parameter_16: + name = "parameter_16" + shape = [512] + dtype = "float32" + min_val = float("-0.0437687") + max_val = float("0.0437948") + mean = float("0.000960965") + std = float("0.0258335") + data = None + + +class Program_weight_tensor_parameter_17: + name = "parameter_17" + shape = [512, 512] + dtype = "float32" + min_val = float("-0.0441933") + max_val = float("0.0441941") + mean = float("4.1616e-05") + std = float("0.0255313") + data = None + + +class Program_weight_tensor_parameter_18: + name = "parameter_18" + shape = [512] + dtype = "float32" + min_val = float("-0.043746") + max_val = float("0.0440048") + mean = float("0.000930274") + std = float("0.0250449") + data = None + + +class Program_weight_tensor_parameter_19: + name = "parameter_19" + shape = [512, 512] + dtype = "float32" + min_val = float("-0.0441941") + max_val = float("0.0441937") + mean = float("4.15312e-05") + std = float("0.0255066") + data = None + + +class Program_weight_tensor_parameter_20: + name = "parameter_20" + shape = [512] + dtype = "float32" + data = None + + +class Program_weight_tensor_parameter_21: + name = "parameter_21" + shape = [512] + dtype = "float32" + min_val = float("1.0") + max_val = float("1.0") + mean = float("1.0") + data = None + + +class Program_weight_tensor_parameter_22: + name = "parameter_22" + shape = [512] + dtype = "float32" + data = None + + +class Program_weight_tensor_parameter_23: + name = "parameter_23" + shape = [512, 2048, 1] + dtype = "float32" + min_val = float("-0.159572") + max_val = float("0.153425") + mean = float("2.41515e-05") + std = float("0.031245") + data = None + + +class Program_weight_tensor_parameter_24: + name = "parameter_24" + shape = [2048] + dtype = "float32" + data = None + + +class Program_weight_tensor_parameter_25: + name = "parameter_25" + shape = [2048, 512, 1] + dtype = "float32" + min_val = float("-0.288011") + max_val = float("0.291769") + mean = float("-4.01817e-05") + std = float("0.0625515") + data = None + + +class Program_weight_tensor_parameter_26: + name = "parameter_26" + shape = [512] + dtype = "float32" + data = None + + +class Program_weight_tensor_parameter_27: + name = "parameter_27" + shape = [512] + dtype = "float32" + min_val = float("1.0") + max_val = float("1.0") + mean = float("1.0") + data = None + + +class Program_weight_tensor_parameter_28: + name = "parameter_28" + shape = [512] + dtype = "float32" + min_val = float("-0.0438269") + max_val = float("0.043894") + mean = float("-0.000833528") + std = float("0.0253511") + data = None + + +class Program_weight_tensor_parameter_29: + name = "parameter_29" + shape = [512, 512] + dtype = "float32" + min_val = float("-0.044194") + max_val = float("0.0441939") + mean = float("3.96966e-05") + std = float("0.0255083") + data = None + + +class Program_weight_tensor_parameter_30: + name = "parameter_30" + shape = [512] + dtype = "float32" + min_val = float("-0.0439395") + max_val = float("0.044144") + mean = float("-0.000142293") + std = float("0.0250598") + data = None + + +class Program_weight_tensor_parameter_31: + name = "parameter_31" + shape = [512, 512] + dtype = "float32" + min_val = float("-0.044194") + max_val = float("0.0441935") + mean = float("5.25482e-07") + std = float("0.0255035") + data = None + + +class Program_weight_tensor_parameter_32: + name = "parameter_32" + shape = [512] + dtype = "float32" + min_val = float("-0.0440848") + max_val = float("0.0441149") + mean = float("0.00325801") + std = float("0.0251241") + data = None + + +class Program_weight_tensor_parameter_33: + name = "parameter_33" + shape = [512, 512] + dtype = "float32" + min_val = float("-0.0441942") + max_val = float("0.0441936") + mean = float("-1.53034e-05") + std = float("0.0255184") + data = None + + +class Program_weight_tensor_parameter_34: + name = "parameter_34" + shape = [512] + dtype = "float32" + min_val = float("-0.0441163") + max_val = float("0.0432088") + mean = float("-0.000306653") + std = float("0.0248385") + data = None + + +class Program_weight_tensor_parameter_35: + name = "parameter_35" + shape = [512, 512] + dtype = "float32" + min_val = float("-0.0441942") + max_val = float("0.044194") + mean = float("6.335e-05") + std = float("0.0255415") + data = None + + +class Program_weight_tensor_parameter_36: + name = "parameter_36" + shape = [4, 512] + dtype = "float32" + min_val = float("-0.499228") + max_val = float("0.498545") + mean = float("0.00141352") + std = float("0.287449") + data = None + + +class Program_weight_tensor_parameter_37: + name = "parameter_37" + shape = [512, 1, 3] + dtype = "float32" + min_val = float("-2.58358") + max_val = float("2.8859") + mean = float("0.0226969") + std = float("0.821275") + data = None + + +class Program_weight_tensor_parameter_38: + name = "parameter_38" + shape = [256, 96] + dtype = "float32" + min_val = float("-0.0624998") + max_val = float("0.062495") + mean = float("0.00023814") + std = float("0.0359437") + data = None + + +class Program_weight_tensor_parameter_39: + name = "parameter_39" + shape = [256] + dtype = "float32" + min_val = float("-0.0621955") + max_val = float("0.0623104") + mean = float("0.00219005") + std = float("0.0363308") + data = None + + +class Program_weight_tensor_parameter_40: + name = "parameter_40" + shape = [256, 256] + dtype = "float32" + min_val = float("-0.062496") + max_val = float("0.0624993") + mean = float("-7.35133e-05") + std = float("0.0360347") + data = None + + +class Program_weight_tensor_parameter_41: + name = "parameter_41" + shape = [256] + dtype = "float32" + min_val = float("-0.703797") + max_val = float("0.698984") + mean = float("-0.0163471") + std = float("0.415462") + data = None + + +class Program_weight_tensor_parameter_42: + name = "parameter_42" + shape = [2, 256] + dtype = "float32" + min_val = float("-0.701713") + max_val = float("0.69873") + mean = float("0.00392631") + std = float("0.422085") + data = None + + +class Program_weight_tensor_parameter_43: + name = "parameter_43" + shape = [1, 96, 3] + dtype = "float32" + min_val = float("-0.278402") + max_val = float("0.226406") + mean = float("0.00267436") + std = float("0.086884") + data = None + + +class Program_weight_tensor_parameter_44: + name = "parameter_44" + shape = [256, 1] + dtype = "float32" + min_val = float("-0.0611586") + max_val = float("0.0622426") + mean = float("0.00190723") + std = float("0.0317402") + data = None + + +class Program_weight_tensor_parameter_45: + name = "parameter_45" + shape = [256] + dtype = "float32" + min_val = float("-0.0623299") + max_val = float("0.0619163") + mean = float("-0.000484633") + std = float("0.0374979") + data = None + + +class Program_weight_tensor_parameter_46: + name = "parameter_46" + shape = [256, 256] + dtype = "float32" + min_val = float("-0.062497") + max_val = float("0.0625") + mean = float("5.95283e-05") + std = float("0.036107") + data = None + + +class Program_weight_tensor_parameter_47: + name = "parameter_47" + shape = [256] + dtype = "float32" + min_val = float("-0.703553") + max_val = float("0.706041") + mean = float("0.0169737") + std = float("0.41843") + data = None + + +class Program_weight_tensor_parameter_48: + name = "parameter_48" + shape = [2, 256] + dtype = "float32" + min_val = float("-0.706659") + max_val = float("0.703765") + mean = float("-0.00195895") + std = float("0.40191") + data = None + + +class Program_weight_tensor_parameter_49: + name = "parameter_49" + shape = [1, 96, 3] + dtype = "float32" + min_val = float("-0.243955") + max_val = float("0.244005") + mean = float("0.00894126") + std = float("0.0818693") + data = None diff --git a/paddle_samples/PaddleX/Nonstationary/subgraph_9/graph_hash.txt b/paddle_samples/PaddleX/Nonstationary/subgraph_9/graph_hash.txt new file mode 100644 index 000000000..f5ec257f1 --- /dev/null +++ b/paddle_samples/PaddleX/Nonstationary/subgraph_9/graph_hash.txt @@ -0,0 +1 @@ +ac26e971dd1a4866631e372e4866262a726d356932ff89fe6c794066f3b1af23 \ No newline at end of file diff --git a/paddle_samples/PaddleX/Nonstationary/subgraph_9/graph_net.json b/paddle_samples/PaddleX/Nonstationary/subgraph_9/graph_net.json new file mode 100644 index 000000000..1977ac2fa --- /dev/null +++ b/paddle_samples/PaddleX/Nonstationary/subgraph_9/graph_net.json @@ -0,0 +1,6 @@ +{ + "framework": "paddle", + "model_name": "Nonstationary", + "num_devices_required": 1, + "num_nodes_required": 1 +} \ No newline at end of file diff --git a/paddle_samples/PaddleX/Nonstationary/subgraph_9/input_meta.py b/paddle_samples/PaddleX/Nonstationary/subgraph_9/input_meta.py new file mode 100644 index 000000000..9a5db51a4 --- /dev/null +++ b/paddle_samples/PaddleX/Nonstationary/subgraph_9/input_meta.py @@ -0,0 +1,27 @@ +class Program_weight_tensor_data_0: + name = "data_0" + shape = [3, 144, 8, 64] + dtype = "float32" + min_val = float("-5.30427") + max_val = float("6.20958") + mean = float("0.00490132") + std = float("0.776939") + data = None + + +class Program_weight_tensor_data_1: + name = "data_1" + shape = [3, 144, 8, 64] + dtype = "float32" + min_val = float("-7.91974") + max_val = float("9.62823") + mean = float("0.0194936") + std = float("0.688294") + data = None + + +class Program_weight_tensor_data_2: + name = "data_2" + shape = [3, 1] + dtype = "float32" + data = [0.224591, 1.20468, 0.822164] diff --git a/paddle_samples/PaddleX/Nonstationary/subgraph_9/model.py b/paddle_samples/PaddleX/Nonstationary/subgraph_9/model.py new file mode 100644 index 000000000..c7e7862ef --- /dev/null +++ b/paddle_samples/PaddleX/Nonstationary/subgraph_9/model.py @@ -0,0 +1,67 @@ +import paddle + + +class GraphModule(paddle.nn.Layer): + def __init__(self): + super().__init__() + + def forward(self, data_0, data_1, data_2): + # pd_op.full_int_array: (1xi64) <- () + full_int_array_0 = [1] + + # pd_op.assign: (1xi64) <- (1xi64) + assign_0 = full_int_array_0 + + # pd_op.unsqueeze: (-1x1x1xf32) <- (-1x1xf32, 1xi64) + unsqueeze_0 = paddle._C_ops.unsqueeze(data_2, full_int_array_0) + del data_2 + + # pd_op.unsqueeze: (-1x1x1x1xf32) <- (-1x1x1xf32, 1xi64) + unsqueeze_1 = paddle._C_ops.unsqueeze(unsqueeze_0, full_int_array_0) + + # builtin.combine: ([-1x144x8x64xf32, -1x144x8x64xf32]) <- (-1x144x8x64xf32, -1x144x8x64xf32) + combine_0 = [data_0, data_1] + del data_0, data_1 + + # pd_op.einsum: (-1x8x144x144xf32, [0xf32, 0xf32], [-1x144x8x64xf32, -1x144x8x64xf32]) <- ([-1x144x8x64xf32, -1x144x8x64xf32]) + einsum_0, einsum_1, einsum_2 = (lambda x, f: f(x))( + paddle._C_ops.einsum(combine_0, "blhe,bshe->bhls"), + lambda out: out if isinstance(out, (list, tuple)) else (out, None, None), + ) + del combine_0 + + # builtin.split: (0xf32, 0xf32) <- ([0xf32, 0xf32]) + ( + split_0, + split_1, + ) = einsum_1 + del einsum_1 + + # builtin.split: (-1x144x8x64xf32, -1x144x8x64xf32) <- ([-1x144x8x64xf32, -1x144x8x64xf32]) + ( + split_2, + split_3, + ) = einsum_2 + del einsum_2 + + # pd_op.multiply: (-1x8x144x144xf32) <- (-1x8x144x144xf32, -1x1x1x1xf32) + multiply_0 = paddle._C_ops.multiply(einsum_0, unsqueeze_1) + + # pd_op.full: (1xf32) <- () + full_0 = paddle._C_ops.full( + [1], float("1"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.scale: (-1x8x144x144xf32) <- (-1x8x144x144xf32, 1xf32) + scale_0 = paddle._C_ops.scale(multiply_0, full_0, float("0"), True) + del ( + assign_0, + einsum_0, + full_0, + full_int_array_0, + multiply_0, + unsqueeze_0, + unsqueeze_1, + ) + + return split_0, split_1, split_2, split_3, scale_0 diff --git a/paddle_samples/PaddleX/Nonstationary/subgraph_9/weight_meta.py b/paddle_samples/PaddleX/Nonstationary/subgraph_9/weight_meta.py new file mode 100644 index 000000000..8b1378917 --- /dev/null +++ b/paddle_samples/PaddleX/Nonstationary/subgraph_9/weight_meta.py @@ -0,0 +1 @@ + diff --git a/paddle_samples/PaddleX/PP-DocLayout-M/subgraph_0/graph_hash.txt b/paddle_samples/PaddleX/PP-DocLayout-M/subgraph_0/graph_hash.txt new file mode 100644 index 000000000..d5ccc8aac --- /dev/null +++ b/paddle_samples/PaddleX/PP-DocLayout-M/subgraph_0/graph_hash.txt @@ -0,0 +1 @@ +436f3f6f93dfde4c710404815f0db94b8f7b7393560fc52082f361c8700943a5 \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-DocLayout-M/subgraph_0/graph_net.json b/paddle_samples/PaddleX/PP-DocLayout-M/subgraph_0/graph_net.json new file mode 100644 index 000000000..e9a5b932d --- /dev/null +++ b/paddle_samples/PaddleX/PP-DocLayout-M/subgraph_0/graph_net.json @@ -0,0 +1,6 @@ +{ + "framework": "paddle", + "model_name": "PP-DocLayout-M", + "num_devices_required": 1, + "num_nodes_required": 1 +} \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-DocLayout-M/subgraph_0/input_meta.py b/paddle_samples/PaddleX/PP-DocLayout-M/subgraph_0/input_meta.py new file mode 100644 index 000000000..06436bc4f --- /dev/null +++ b/paddle_samples/PaddleX/PP-DocLayout-M/subgraph_0/input_meta.py @@ -0,0 +1,43 @@ +class Program_weight_tensor_data_0: + name = "data_0" + shape = [1, 2, 8500] + dtype = "float32" + max_val = float("1.0") + mean = float("0.00105882") + std = float("0.0325223") + data = None + + +class Program_weight_tensor_data_1: + name = "data_1" + shape = [1, 2, 1] + dtype = "int32" + data = [1, 3] + + +class Program_weight_tensor_data_2: + name = "data_2" + shape = [1, 8500] + dtype = "float32" + max_val = float("1.0") + mean = float("0.00211765") + std = float("0.0459692") + data = None + + +class Program_weight_tensor_data_3: + name = "data_3" + shape = [1, 2, 4] + dtype = "float32" + data = [85.7195, 7.11111, 592.507, 411.654, 129.738, 425.086, 551.964, 636.84] + + +class Program_weight_tensor_data_4: + name = "data_4" + shape = [1, 8500, 4] + dtype = "float32" + min_val = float("-226.295") + max_val = float("837.271") + mean = float("319.867") + std = float("189.622") + data = None diff --git a/paddle_samples/PaddleX/PP-DocLayout-M/subgraph_0/model.py b/paddle_samples/PaddleX/PP-DocLayout-M/subgraph_0/model.py new file mode 100644 index 000000000..b3d5225ae --- /dev/null +++ b/paddle_samples/PaddleX/PP-DocLayout-M/subgraph_0/model.py @@ -0,0 +1,299 @@ +import paddle + + +class GraphModule(paddle.nn.Layer): + def __init__(self): + super().__init__() + + def forward(self, data_0, data_1, data_2, data_3, data_4): + # pd_op.full: (1xi64) <- () + full_0 = paddle._C_ops.full( + [1], float("-2"), paddle.int64, paddle.core.CPUPlace() + ) + + # pd_op.argmax: (1x8500xi64) <- (1x2x8500xf32, 1xi64) + argmax_0 = paddle._C_ops.argmax(data_0, full_0, False, False, paddle.int64) + del full_0 + + # pd_op.full: (1xf64) <- () + full_1 = paddle._C_ops.full( + [1], float("0"), paddle.float64, paddle.core.CPUPlace() + ) + + # pd_op.full: (1xf64) <- () + full_2 = paddle._C_ops.full( + [1], float("1"), paddle.float64, paddle.core.CPUPlace() + ) + + # pd_op.arange: (1xi32) <- (1xf64, 1xf64, 1xf64) + arange_0 = paddle.arange(full_1, full_2, full_2, dtype="int32") + del full_1, full_2 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_0 = [-1] + + # pd_op.unsqueeze: (1x1xi32) <- (1xi32, 1xi64) + unsqueeze_0 = paddle._C_ops.unsqueeze(arange_0, full_int_array_0) + del arange_0 + + # pd_op.full: (1xf32) <- () + full_3 = paddle._C_ops.full( + [1], float("2"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.scale: (1x1xi32) <- (1x1xi32, 1xf32) + scale_0 = paddle._C_ops.scale(unsqueeze_0, full_3, float("0"), True) + del full_3, unsqueeze_0 + + # pd_op.cast: (1x1xi64) <- (1x1xi32) + cast_0 = paddle._C_ops.cast(scale_0, paddle.int64) + del scale_0 + + # pd_op.add: (1x8500xi64) <- (1x8500xi64, 1x1xi64) + add_0 = paddle._C_ops.add(argmax_0, cast_0) + del argmax_0, cast_0 + + # pd_op.flatten: (2xi32) <- (1x2x1xi32) + flatten_0 = paddle._C_ops.flatten(data_1, 0, 2) + del data_1 + + # pd_op.flatten: (8500xi64) <- (1x8500xi64) + flatten_1 = paddle._C_ops.flatten(add_0, 0, 1) + del add_0 + + # pd_op.full: (1xi32) <- () + full_4 = paddle._C_ops.full( + [1], float("0"), paddle.int32, paddle.core.CPUPlace() + ) + + # pd_op.gather: (8500xi32) <- (2xi32, 8500xi64, 1xi32) + gather_0 = paddle._C_ops.gather(flatten_0, flatten_1, full_4) + del flatten_0 + + # pd_op.full_int_array: (2xi64) <- () + full_int_array_1 = [1, 8500] + + # pd_op.reshape: (1x8500xi32) <- (8500xi32, 2xi64) + reshape_1 = paddle._C_ops.reshape(gather_0, full_int_array_1) + del full_int_array_1, gather_0 + + # pd_op.full: (xf32) <- () + full_5 = paddle._C_ops.full( + [], float("0"), paddle.float32, paddle.framework._current_expected_place() + ) + + # pd_op.greater_than: (1x8500xb) <- (1x8500xf32, xf32) + greater_than_0 = paddle._C_ops.greater_than(data_2, full_5) + del data_2, full_5 + + # pd_op.full: (1xf32) <- () + full_6 = paddle._C_ops.full( + [1], float("11"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.full_like: (1x8500xi32) <- (1x8500xi32, 1xf32) + full_like_0 = paddle._C_ops.full_like( + reshape_1, full_6, paddle.int32, paddle.framework._current_expected_place() + ) + del full_6 + + # pd_op.where: (1x8500xi32) <- (1x8500xb, 1x8500xi32, 1x8500xi32) + where_0 = paddle._C_ops.where(greater_than_0, reshape_1, full_like_0) + del full_like_0, greater_than_0, reshape_1 + + # pd_op.full_int_array: (2xi64) <- () + full_int_array_2 = [-1, 4] + + # pd_op.reshape: (2x4xf32) <- (1x2x4xf32, 2xi64) + reshape_2 = paddle._C_ops.reshape(data_3, full_int_array_2) + del full_int_array_2 + + # pd_op.gather: (8500x4xf32) <- (2x4xf32, 8500xi64, 1xi32) + gather_1 = paddle._C_ops.gather(reshape_2, flatten_1, full_4) + del flatten_1, full_4, reshape_2 + + # pd_op.full_int_array: (3xi64) <- () + full_int_array_3 = [1, 8500, 4] + + # pd_op.reshape: (1x8500x4xf32) <- (8500x4xf32, 3xi64) + reshape_0 = paddle._C_ops.reshape(gather_1, full_int_array_3) + del full_int_array_3, gather_1 + + # pd_op.full: (1xi32) <- () + full_7 = paddle._C_ops.full( + [1], float("12"), paddle.int32, paddle.core.CPUPlace() + ) + + # pd_op.one_hot: (1x8500x12xf32) <- (1x8500xi32, 1xi32) + one_hot_0 = paddle._C_ops.one_hot( + where_0 % paddle.cast(full_7, where_0.dtype), full_7 + ) + del full_7 + + # pd_op.full: (11xi64) <- () + full_8 = paddle._C_ops.full( + [11], float("0"), paddle.int64, paddle.framework._current_expected_place() + ) + + # pd_op.assign_value_: (11xi64) <- (11xi64) + assign_value__0 = paddle._C_ops.assign_value_( + full_8, + [11], + paddle.int64, + [ + float("0"), + float("1"), + float("2"), + float("3"), + float("4"), + float("5"), + float("6"), + float("7"), + float("8"), + float("9"), + float("10"), + ], + paddle.framework._current_expected_place(), + ) + del full_8 + + # pd_op.index_select: (1x8500x11xf32) <- (1x8500x12xf32, 11xi64) + index_select_0 = paddle._C_ops.index_select(one_hot_0, assign_value__0, -1) + del assign_value__0, one_hot_0 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_4 = [2] + + # pd_op.unsqueeze: (1x2x1x4xf32) <- (1x2x4xf32, 1xi64) + unsqueeze_1 = paddle._C_ops.unsqueeze(data_3, full_int_array_4) + del data_3 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_5 = [1] + + # pd_op.unsqueeze: (1x1x8500x4xf32) <- (1x8500x4xf32, 1xi64) + unsqueeze_2 = paddle._C_ops.unsqueeze(data_4, full_int_array_5) + del data_4, full_int_array_5 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_6 = [0] + + # pd_op.slice: (1x2x1x2xf32) <- (1x2x1x4xf32, 1xi64, 1xi64) + slice_0 = paddle._C_ops.slice( + unsqueeze_1, [3], full_int_array_6, full_int_array_4, [1], [] + ) + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_7 = [2147483647] + + # pd_op.slice: (1x2x1x2xf32) <- (1x2x1x4xf32, 1xi64, 1xi64) + slice_1 = paddle._C_ops.slice( + unsqueeze_1, [3], full_int_array_4, full_int_array_7, [1], [] + ) + del unsqueeze_1 + + # pd_op.slice: (1x1x8500x2xf32) <- (1x1x8500x4xf32, 1xi64, 1xi64) + slice_2 = paddle._C_ops.slice( + unsqueeze_2, [3], full_int_array_6, full_int_array_4, [1], [] + ) + del full_int_array_6 + + # pd_op.slice: (1x1x8500x2xf32) <- (1x1x8500x4xf32, 1xi64, 1xi64) + slice_3 = paddle._C_ops.slice( + unsqueeze_2, [3], full_int_array_4, full_int_array_7, [1], [] + ) + del full_int_array_4, full_int_array_7, unsqueeze_2 + + # pd_op.maximum: (1x2x8500x2xf32) <- (1x2x1x2xf32, 1x1x8500x2xf32) + maximum_0 = paddle._C_ops.maximum(slice_0, slice_2) + + # pd_op.minimum: (1x2x8500x2xf32) <- (1x2x1x2xf32, 1x1x8500x2xf32) + minimum_0 = paddle._C_ops.minimum(slice_1, slice_3) + + # pd_op.subtract: (1x2x8500x2xf32) <- (1x2x8500x2xf32, 1x2x8500x2xf32) + subtract_0 = paddle._C_ops.subtract(minimum_0, maximum_0) + del maximum_0, minimum_0 + + # pd_op.full: (1xf32) <- () + full_9 = paddle._C_ops.full( + [1], float("0"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.full: (1xf32) <- () + full_10 = paddle._C_ops.full( + [1], float("3.40282e+38"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.clip: (1x2x8500x2xf32) <- (1x2x8500x2xf32, 1xf32, 1xf32) + clip_0 = paddle._C_ops.clip(subtract_0, full_9, full_10) + del subtract_0 + + # pd_op.prod: (1x2x8500xf32) <- (1x2x8500x2xf32, 1xi64) + prod_0 = paddle._C_ops.prod(clip_0, full_int_array_0, False, False) + del clip_0 + + # pd_op.subtract: (1x2x1x2xf32) <- (1x2x1x2xf32, 1x2x1x2xf32) + subtract_1 = paddle._C_ops.subtract(slice_1, slice_0) + del slice_0, slice_1 + + # pd_op.clip: (1x2x1x2xf32) <- (1x2x1x2xf32, 1xf32, 1xf32) + clip_1 = paddle._C_ops.clip(subtract_1, full_9, full_10) + del subtract_1 + + # pd_op.prod: (1x2x1xf32) <- (1x2x1x2xf32, 1xi64) + prod_1 = paddle._C_ops.prod(clip_1, full_int_array_0, False, False) + del clip_1 + + # pd_op.subtract: (1x1x8500x2xf32) <- (1x1x8500x2xf32, 1x1x8500x2xf32) + subtract_2 = paddle._C_ops.subtract(slice_3, slice_2) + del slice_2, slice_3 + + # pd_op.clip: (1x1x8500x2xf32) <- (1x1x8500x2xf32, 1xf32, 1xf32) + clip_2 = paddle._C_ops.clip(subtract_2, full_9, full_10) + del full_10, full_9, subtract_2 + + # pd_op.prod: (1x1x8500xf32) <- (1x1x8500x2xf32, 1xi64) + prod_2 = paddle._C_ops.prod(clip_2, full_int_array_0, False, False) + del clip_2 + + # pd_op.add: (1x2x8500xf32) <- (1x2x1xf32, 1x1x8500xf32) + add_1 = paddle._C_ops.add(prod_1, prod_2) + del prod_1, prod_2 + + # pd_op.subtract: (1x2x8500xf32) <- (1x2x8500xf32, 1x2x8500xf32) + subtract_3 = paddle._C_ops.subtract(add_1, prod_0) + del add_1 + + # pd_op.full: (1xf32) <- () + full_11 = paddle._C_ops.full( + [1], float("1"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.scale: (1x2x8500xf32) <- (1x2x8500xf32, 1xf32) + scale_1 = paddle._C_ops.scale(subtract_3, full_11, float("1e-09"), True) + del full_11, subtract_3 + + # pd_op.divide: (1x2x8500xf32) <- (1x2x8500xf32, 1x2x8500xf32) + divide_0 = paddle._C_ops.divide(prod_0, scale_1) + del prod_0, scale_1 + + # pd_op.multiply: (1x2x8500xf32) <- (1x2x8500xf32, 1x2x8500xf32) + multiply_1 = paddle._C_ops.multiply(divide_0, data_0) + del data_0, divide_0 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_8 = [-2] + + # pd_op.max: (1x8500xf32) <- (1x2x8500xf32, 1xi64) + max_0 = paddle._C_ops.max(multiply_1, full_int_array_8, False) + del full_int_array_8, multiply_1 + + # pd_op.unsqueeze: (1x8500x1xf32) <- (1x8500xf32, 1xi64) + unsqueeze_3 = paddle._C_ops.unsqueeze(max_0, full_int_array_0) + del full_int_array_0, max_0 + + # pd_op.multiply: (1x8500x11xf32) <- (1x8500x11xf32, 1x8500x1xf32) + multiply_0 = paddle._C_ops.multiply(index_select_0, unsqueeze_3) + del index_select_0, unsqueeze_3, where_0 + + return reshape_0, multiply_0 diff --git a/paddle_samples/PaddleX/PP-DocLayout-M/subgraph_0/weight_meta.py b/paddle_samples/PaddleX/PP-DocLayout-M/subgraph_0/weight_meta.py new file mode 100644 index 000000000..8b1378917 --- /dev/null +++ b/paddle_samples/PaddleX/PP-DocLayout-M/subgraph_0/weight_meta.py @@ -0,0 +1 @@ + diff --git a/paddle_samples/PaddleX/PP-DocLayout-M/subgraph_1/graph_hash.txt b/paddle_samples/PaddleX/PP-DocLayout-M/subgraph_1/graph_hash.txt new file mode 100644 index 000000000..55e3629ce --- /dev/null +++ b/paddle_samples/PaddleX/PP-DocLayout-M/subgraph_1/graph_hash.txt @@ -0,0 +1 @@ +488b13c55345f609c55fc67aab14fbf0b27ea2cb2c6485aa90a61da490901502 \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-DocLayout-M/subgraph_1/graph_net.json b/paddle_samples/PaddleX/PP-DocLayout-M/subgraph_1/graph_net.json new file mode 100644 index 000000000..e9a5b932d --- /dev/null +++ b/paddle_samples/PaddleX/PP-DocLayout-M/subgraph_1/graph_net.json @@ -0,0 +1,6 @@ +{ + "framework": "paddle", + "model_name": "PP-DocLayout-M", + "num_devices_required": 1, + "num_nodes_required": 1 +} \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-DocLayout-M/subgraph_1/input_meta.py b/paddle_samples/PaddleX/PP-DocLayout-M/subgraph_1/input_meta.py new file mode 100644 index 000000000..ef64fc6e6 --- /dev/null +++ b/paddle_samples/PaddleX/PP-DocLayout-M/subgraph_1/input_meta.py @@ -0,0 +1,118 @@ +class Program_weight_tensor_data_0: + name = "data_0" + shape = [13, 4] + dtype = "float32" + data = [ + 406.462, + 0.421381, + 502.567, + 7.47656, + 405.118, + 0.388063, + 515.454, + 7.32615, + 413.011, + 0.362506, + 523.364, + 7.34781, + 449.307, + 0.41976, + 527.918, + 7.42489, + 89.4492, + 199.603, + 449.884, + 271.877, + 90.8021, + 273.699, + 450.504, + 353.533, + 90.5937, + 273.473, + 450.758, + 353.04, + 90.8982, + 273.697, + 449.596, + 353.238, + 92.9324, + 274.74, + 449.81, + 354.295, + 92.6641, + 274.154, + 448.98, + 354.46, + 90.3059, + 272.729, + 449.483, + 354.648, + 90.4465, + 359.394, + 451.058, + 455.503, + 90.2641, + 359.251, + 449.631, + 455.105, + ] + + +class Program_weight_tensor_data_1: + name = "data_1" + shape = [13, 4] + dtype = "float32" + data = [ + 42.2711, + 0.0, + 74.2526, + 1.14583, + 42.2711, + 0.0, + 74.2526, + 1.14583, + 42.2711, + 0.0, + 74.2526, + 1.14583, + 42.2711, + 0.0, + 74.2526, + 1.14583, + 1.43685, + 0.234375, + 7.04519, + 9.83073, + 1.43685, + 0.234375, + 7.04519, + 9.83073, + 1.43685, + 0.234375, + 7.04519, + 9.83073, + 1.43685, + 0.234375, + 7.04519, + 9.83073, + 1.43685, + 0.234375, + 7.04519, + 9.83073, + 1.43685, + 0.234375, + 7.04519, + 9.83073, + 1.43685, + 0.234375, + 7.04519, + 9.83073, + 1.43685, + 0.234375, + 7.04519, + 9.83073, + 1.43685, + 0.234375, + 7.04519, + 9.83073, + ] diff --git a/paddle_samples/PaddleX/PP-DocLayout-M/subgraph_1/model.py b/paddle_samples/PaddleX/PP-DocLayout-M/subgraph_1/model.py new file mode 100644 index 000000000..e7eac0ce0 --- /dev/null +++ b/paddle_samples/PaddleX/PP-DocLayout-M/subgraph_1/model.py @@ -0,0 +1,219 @@ +import paddle + + +class GraphModule(paddle.nn.Layer): + def __init__(self): + super().__init__() + + def forward(self, data_0, data_1): + # pd_op.full: (1xi32) <- () + full_0 = paddle._C_ops.full( + [1], float("1"), paddle.int32, paddle.core.CPUPlace() + ) + + # pd_op.split_with_num: ([-1x1xf32, -1x1xf32, -1x1xf32, -1x1xf32]) <- (-1x4xf32, 1xi32) + split_with_num_0 = paddle._C_ops.split_with_num(data_0, 4, full_0) + del data_0 + + # builtin.split: (-1x1xf32, -1x1xf32, -1x1xf32, -1x1xf32) <- ([-1x1xf32, -1x1xf32, -1x1xf32, -1x1xf32]) + ( + split_0, + split_1, + split_2, + split_3, + ) = split_with_num_0 + del split_with_num_0 + + # pd_op.split_with_num: ([-1x1xf32, -1x1xf32, -1x1xf32, -1x1xf32]) <- (-1x4xf32, 1xi32) + split_with_num_1 = paddle._C_ops.split_with_num(data_1, 4, full_0) + del data_1 + + # builtin.split: (-1x1xf32, -1x1xf32, -1x1xf32, -1x1xf32) <- ([-1x1xf32, -1x1xf32, -1x1xf32, -1x1xf32]) + ( + split_4, + split_5, + split_6, + split_7, + ) = split_with_num_1 + del split_with_num_1 + + # pd_op.maximum: (-1x1xf32) <- (-1x1xf32, -1x1xf32) + maximum_0 = paddle._C_ops.maximum(split_0, split_4) + + # pd_op.maximum: (-1x1xf32) <- (-1x1xf32, -1x1xf32) + maximum_1 = paddle._C_ops.maximum(split_1, split_5) + + # pd_op.minimum: (-1x1xf32) <- (-1x1xf32, -1x1xf32) + minimum_0 = paddle._C_ops.minimum(split_2, split_6) + + # pd_op.minimum: (-1x1xf32) <- (-1x1xf32, -1x1xf32) + minimum_1 = paddle._C_ops.minimum(split_3, split_7) + + # pd_op.subtract: (-1x1xf32) <- (-1x1xf32, -1x1xf32) + subtract_0 = paddle._C_ops.subtract(minimum_0, maximum_0) + + # pd_op.full: (1xf32) <- () + full_1 = paddle._C_ops.full( + [1], float("0"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.assign: (1xf32) <- (1xf32) + assign_0 = full_1 + + # pd_op.full: (1xf32) <- () + full_2 = paddle._C_ops.full( + [1], float("3.40282e+38"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.assign: (1xf32) <- (1xf32) + assign_1 = full_2 + + # pd_op.clip: (-1x1xf32) <- (-1x1xf32, 1xf32, 1xf32) + clip_0 = paddle._C_ops.clip(subtract_0, full_1, full_2) + + # pd_op.subtract: (-1x1xf32) <- (-1x1xf32, -1x1xf32) + subtract_1 = paddle._C_ops.subtract(minimum_1, maximum_1) + + # pd_op.clip: (-1x1xf32) <- (-1x1xf32, 1xf32, 1xf32) + clip_1 = paddle._C_ops.clip(subtract_1, full_1, full_2) + + # pd_op.multiply: (-1x1xf32) <- (-1x1xf32, -1x1xf32) + multiply_0 = paddle._C_ops.multiply(clip_0, clip_1) + + # pd_op.subtract: (-1x1xf32) <- (-1x1xf32, -1x1xf32) + subtract_2 = paddle._C_ops.subtract(split_2, split_0) + + # pd_op.subtract: (-1x1xf32) <- (-1x1xf32, -1x1xf32) + subtract_3 = paddle._C_ops.subtract(split_3, split_1) + + # pd_op.multiply: (-1x1xf32) <- (-1x1xf32, -1x1xf32) + multiply_1 = paddle._C_ops.multiply(subtract_2, subtract_3) + + # pd_op.subtract: (-1x1xf32) <- (-1x1xf32, -1x1xf32) + subtract_4 = paddle._C_ops.subtract(split_6, split_4) + + # pd_op.subtract: (-1x1xf32) <- (-1x1xf32, -1x1xf32) + subtract_5 = paddle._C_ops.subtract(split_7, split_5) + + # pd_op.multiply: (-1x1xf32) <- (-1x1xf32, -1x1xf32) + multiply_2 = paddle._C_ops.multiply(subtract_4, subtract_5) + del subtract_4, subtract_5 + + # pd_op.add: (-1x1xf32) <- (-1x1xf32, -1x1xf32) + add_0 = paddle._C_ops.add(multiply_1, multiply_2) + + # pd_op.subtract: (-1x1xf32) <- (-1x1xf32, -1x1xf32) + subtract_6 = paddle._C_ops.subtract(add_0, multiply_0) + + # pd_op.full: (1xf32) <- () + full_3 = paddle._C_ops.full( + [1], float("1"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.assign: (1xf32) <- (1xf32) + assign_2 = full_3 + + # pd_op.scale: (-1x1xf32) <- (-1x1xf32, 1xf32) + scale_1 = paddle._C_ops.scale(subtract_6, full_3, float("1e-10"), True) + del subtract_6 + + # pd_op.divide: (-1x1xf32) <- (-1x1xf32, -1x1xf32) + divide_0 = paddle._C_ops.divide(multiply_0, scale_1) + + # pd_op.minimum: (-1x1xf32) <- (-1x1xf32, -1x1xf32) + minimum_2 = paddle._C_ops.minimum(split_0, split_4) + + # pd_op.minimum: (-1x1xf32) <- (-1x1xf32, -1x1xf32) + minimum_3 = paddle._C_ops.minimum(split_1, split_5) + + # pd_op.maximum: (-1x1xf32) <- (-1x1xf32, -1x1xf32) + maximum_2 = paddle._C_ops.maximum(split_2, split_6) + + # pd_op.maximum: (-1x1xf32) <- (-1x1xf32, -1x1xf32) + maximum_3 = paddle._C_ops.maximum(split_3, split_7) + + # pd_op.subtract: (-1x1xf32) <- (-1x1xf32, -1x1xf32) + subtract_7 = paddle._C_ops.subtract(maximum_2, minimum_2) + + # pd_op.subtract: (-1x1xf32) <- (-1x1xf32, -1x1xf32) + subtract_8 = paddle._C_ops.subtract(maximum_3, minimum_3) + + # pd_op.multiply: (-1x1xf32) <- (-1x1xf32, -1x1xf32) + multiply_3 = paddle._C_ops.multiply(subtract_7, subtract_8) + + # pd_op.scale: (-1x1xf32) <- (-1x1xf32, 1xf32) + scale_2 = paddle._C_ops.scale(multiply_3, full_3, float("1e-10"), True) + del multiply_3 + + # pd_op.subtract: (-1x1xf32) <- (-1x1xf32, -1x1xf32) + subtract_9 = paddle._C_ops.subtract(scale_2, scale_1) + + # pd_op.divide: (-1x1xf32) <- (-1x1xf32, -1x1xf32) + divide_1 = paddle._C_ops.divide(subtract_9, scale_2) + + # pd_op.subtract: (-1x1xf32) <- (-1x1xf32, -1x1xf32) + subtract_10 = paddle._C_ops.subtract(divide_0, divide_1) + + # pd_op.full: (1xf32) <- () + full_4 = paddle._C_ops.full( + [1], float("-1"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.scale: (-1x1xf32) <- (-1x1xf32, 1xf32) + scale_3 = paddle._C_ops.scale(subtract_10, full_4, float("1"), True) + del subtract_10 + + # pd_op.full: (1xf32) <- () + full_5 = paddle._C_ops.full( + [1], float("2.5"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.scale: (-1x1xf32) <- (-1x1xf32, 1xf32) + scale_0 = paddle._C_ops.scale(scale_3, full_5, float("0"), True) + del ( + add_0, + assign_0, + assign_1, + assign_2, + clip_0, + clip_1, + divide_0, + divide_1, + full_0, + full_1, + full_2, + full_3, + full_4, + full_5, + maximum_0, + maximum_1, + maximum_2, + maximum_3, + minimum_0, + minimum_1, + minimum_2, + minimum_3, + multiply_0, + multiply_1, + multiply_2, + scale_1, + scale_2, + scale_3, + split_0, + split_1, + split_2, + split_3, + split_4, + split_5, + split_6, + split_7, + subtract_0, + subtract_1, + subtract_2, + subtract_3, + subtract_7, + subtract_8, + subtract_9, + ) + + return scale_0 diff --git a/paddle_samples/PaddleX/PP-DocLayout-M/subgraph_1/weight_meta.py b/paddle_samples/PaddleX/PP-DocLayout-M/subgraph_1/weight_meta.py new file mode 100644 index 000000000..8b1378917 --- /dev/null +++ b/paddle_samples/PaddleX/PP-DocLayout-M/subgraph_1/weight_meta.py @@ -0,0 +1 @@ + diff --git a/paddle_samples/PaddleX/PP-DocLayout-M/subgraph_10/graph_hash.txt b/paddle_samples/PaddleX/PP-DocLayout-M/subgraph_10/graph_hash.txt new file mode 100644 index 000000000..12260efa8 --- /dev/null +++ b/paddle_samples/PaddleX/PP-DocLayout-M/subgraph_10/graph_hash.txt @@ -0,0 +1 @@ +d2fca6868ae9755bedbb611af234f7404fba9dbd0d2d6b80c2558ec5c1f951b8 \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-DocLayout-M/subgraph_10/graph_net.json b/paddle_samples/PaddleX/PP-DocLayout-M/subgraph_10/graph_net.json new file mode 100644 index 000000000..e9a5b932d --- /dev/null +++ b/paddle_samples/PaddleX/PP-DocLayout-M/subgraph_10/graph_net.json @@ -0,0 +1,6 @@ +{ + "framework": "paddle", + "model_name": "PP-DocLayout-M", + "num_devices_required": 1, + "num_nodes_required": 1 +} \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-DocLayout-M/subgraph_10/input_meta.py b/paddle_samples/PaddleX/PP-DocLayout-M/subgraph_10/input_meta.py new file mode 100644 index 000000000..6cd322590 --- /dev/null +++ b/paddle_samples/PaddleX/PP-DocLayout-M/subgraph_10/input_meta.py @@ -0,0 +1,16 @@ +class Program_weight_tensor_data_0: + name = "data_0" + shape = [1, 3, 704, 704] + dtype = "float32" + min_val = float("-2.1179") + max_val = float("2.64") + mean = float("2.04138") + std = float("1.11561") + data = None + + +class Program_weight_tensor_data_1: + name = "data_1" + shape = [8] + dtype = "float32" + data = [0.0, 1.0003, 2.0006, 3.00086, 4.00119, 5.00149, 6.00171, 7.00209] diff --git a/paddle_samples/PaddleX/PP-DocLayout-M/subgraph_10/model.py b/paddle_samples/PaddleX/PP-DocLayout-M/subgraph_10/model.py new file mode 100644 index 000000000..13ffdf77e --- /dev/null +++ b/paddle_samples/PaddleX/PP-DocLayout-M/subgraph_10/model.py @@ -0,0 +1,6569 @@ +import paddle + + +class GraphModule(paddle.nn.Layer): + def __init__(self): + super().__init__() + + def forward( + self, + parameter_0, + parameter_1, + parameter_2, + parameter_3, + parameter_4, + parameter_5, + parameter_6, + parameter_7, + parameter_8, + parameter_9, + parameter_10, + parameter_11, + parameter_12, + parameter_13, + parameter_14, + parameter_15, + parameter_16, + parameter_17, + parameter_18, + parameter_19, + parameter_20, + parameter_21, + parameter_22, + parameter_23, + parameter_24, + parameter_25, + parameter_26, + parameter_27, + parameter_28, + parameter_29, + parameter_30, + parameter_31, + parameter_32, + parameter_33, + parameter_34, + parameter_35, + parameter_36, + parameter_37, + parameter_38, + parameter_39, + parameter_40, + parameter_41, + parameter_42, + parameter_43, + parameter_44, + parameter_45, + parameter_46, + parameter_47, + parameter_48, + parameter_49, + parameter_50, + parameter_51, + parameter_52, + parameter_53, + parameter_54, + parameter_55, + parameter_56, + parameter_57, + parameter_58, + parameter_59, + parameter_60, + parameter_61, + parameter_62, + parameter_63, + parameter_64, + parameter_65, + parameter_66, + parameter_67, + parameter_68, + parameter_69, + parameter_70, + parameter_71, + parameter_72, + parameter_73, + parameter_74, + parameter_75, + parameter_76, + parameter_77, + parameter_78, + parameter_79, + parameter_80, + parameter_81, + parameter_82, + parameter_83, + parameter_84, + parameter_85, + parameter_86, + parameter_87, + parameter_88, + parameter_89, + parameter_90, + parameter_91, + parameter_92, + parameter_93, + parameter_94, + parameter_95, + parameter_96, + parameter_97, + parameter_98, + parameter_99, + parameter_100, + parameter_101, + parameter_102, + parameter_103, + parameter_104, + parameter_105, + parameter_106, + parameter_107, + parameter_108, + parameter_109, + parameter_110, + parameter_111, + parameter_112, + parameter_113, + parameter_114, + parameter_115, + parameter_116, + parameter_117, + parameter_118, + parameter_119, + parameter_120, + parameter_121, + parameter_122, + parameter_123, + parameter_124, + parameter_125, + parameter_126, + parameter_127, + parameter_128, + parameter_129, + parameter_130, + parameter_131, + parameter_132, + parameter_133, + parameter_134, + parameter_135, + parameter_136, + parameter_137, + parameter_138, + parameter_139, + parameter_140, + parameter_141, + parameter_142, + parameter_143, + parameter_144, + parameter_145, + parameter_146, + parameter_147, + parameter_148, + parameter_149, + parameter_150, + parameter_151, + parameter_152, + parameter_153, + parameter_154, + parameter_155, + parameter_156, + parameter_157, + parameter_158, + parameter_159, + parameter_160, + parameter_161, + parameter_162, + parameter_163, + parameter_164, + parameter_165, + parameter_166, + parameter_167, + parameter_168, + parameter_169, + parameter_170, + parameter_171, + parameter_172, + parameter_173, + parameter_174, + parameter_175, + parameter_176, + parameter_177, + parameter_178, + parameter_179, + parameter_180, + parameter_181, + parameter_182, + parameter_183, + parameter_184, + parameter_185, + parameter_186, + parameter_187, + parameter_188, + parameter_189, + parameter_190, + parameter_191, + parameter_192, + parameter_193, + parameter_194, + parameter_195, + parameter_196, + parameter_197, + parameter_198, + parameter_199, + parameter_200, + parameter_201, + parameter_202, + parameter_203, + parameter_204, + parameter_205, + parameter_206, + parameter_207, + parameter_208, + parameter_209, + parameter_210, + parameter_211, + parameter_212, + parameter_213, + parameter_214, + parameter_215, + parameter_216, + parameter_217, + parameter_218, + parameter_219, + parameter_220, + parameter_221, + parameter_222, + parameter_223, + parameter_224, + parameter_225, + parameter_226, + parameter_227, + parameter_228, + parameter_229, + parameter_230, + parameter_231, + parameter_232, + parameter_233, + parameter_234, + parameter_235, + parameter_236, + parameter_237, + parameter_238, + parameter_239, + parameter_240, + parameter_241, + parameter_242, + parameter_243, + parameter_244, + parameter_245, + parameter_246, + parameter_247, + parameter_248, + parameter_249, + parameter_250, + parameter_251, + parameter_252, + parameter_253, + parameter_254, + parameter_255, + parameter_256, + parameter_257, + parameter_258, + parameter_259, + parameter_260, + parameter_261, + parameter_262, + parameter_263, + parameter_264, + parameter_265, + parameter_266, + parameter_267, + parameter_268, + parameter_269, + parameter_270, + parameter_271, + parameter_272, + parameter_273, + parameter_274, + parameter_275, + parameter_276, + parameter_277, + parameter_278, + parameter_279, + parameter_280, + parameter_281, + parameter_282, + parameter_283, + parameter_284, + parameter_285, + parameter_286, + parameter_287, + parameter_288, + parameter_289, + parameter_290, + parameter_291, + parameter_292, + parameter_293, + parameter_294, + parameter_295, + parameter_296, + parameter_297, + parameter_298, + parameter_299, + parameter_300, + parameter_301, + parameter_302, + parameter_303, + parameter_304, + parameter_305, + parameter_306, + parameter_307, + parameter_308, + parameter_309, + parameter_310, + parameter_311, + parameter_312, + parameter_313, + parameter_314, + parameter_315, + parameter_316, + parameter_317, + parameter_318, + parameter_319, + parameter_320, + parameter_321, + parameter_322, + parameter_323, + parameter_324, + parameter_325, + parameter_326, + parameter_327, + parameter_328, + parameter_329, + parameter_330, + parameter_331, + parameter_332, + parameter_333, + parameter_334, + parameter_335, + parameter_336, + parameter_337, + parameter_338, + parameter_339, + parameter_340, + parameter_341, + parameter_342, + parameter_343, + parameter_344, + parameter_345, + parameter_346, + parameter_347, + parameter_348, + parameter_349, + parameter_350, + parameter_351, + parameter_352, + parameter_353, + parameter_354, + parameter_355, + parameter_356, + parameter_357, + parameter_358, + parameter_359, + parameter_360, + parameter_361, + parameter_362, + parameter_363, + parameter_364, + parameter_365, + parameter_366, + parameter_367, + parameter_368, + parameter_369, + parameter_370, + parameter_371, + parameter_372, + parameter_373, + parameter_374, + parameter_375, + parameter_376, + parameter_377, + parameter_378, + parameter_379, + parameter_380, + parameter_381, + parameter_382, + parameter_383, + parameter_384, + parameter_385, + parameter_386, + parameter_387, + parameter_388, + parameter_389, + parameter_390, + parameter_391, + parameter_392, + parameter_393, + parameter_394, + parameter_395, + parameter_396, + parameter_397, + parameter_398, + parameter_399, + parameter_400, + parameter_401, + parameter_402, + parameter_403, + parameter_404, + parameter_405, + parameter_406, + parameter_407, + parameter_408, + parameter_409, + parameter_410, + parameter_411, + parameter_412, + parameter_413, + parameter_414, + parameter_415, + parameter_416, + parameter_417, + parameter_418, + parameter_419, + parameter_420, + parameter_421, + parameter_422, + parameter_423, + parameter_424, + parameter_425, + parameter_426, + parameter_427, + parameter_428, + parameter_429, + parameter_430, + parameter_431, + parameter_432, + parameter_433, + parameter_434, + parameter_435, + parameter_436, + parameter_437, + parameter_438, + parameter_439, + parameter_440, + parameter_441, + parameter_442, + parameter_443, + parameter_444, + parameter_445, + parameter_446, + parameter_447, + parameter_448, + parameter_449, + parameter_450, + parameter_451, + parameter_452, + parameter_453, + parameter_454, + parameter_455, + parameter_456, + parameter_457, + parameter_458, + parameter_459, + parameter_460, + parameter_461, + parameter_462, + parameter_463, + parameter_464, + parameter_465, + parameter_466, + parameter_467, + parameter_468, + parameter_469, + parameter_470, + parameter_471, + parameter_472, + parameter_473, + parameter_474, + parameter_475, + parameter_476, + parameter_477, + parameter_478, + parameter_479, + parameter_480, + parameter_481, + parameter_482, + parameter_483, + parameter_484, + parameter_485, + parameter_486, + parameter_487, + parameter_488, + parameter_489, + parameter_490, + parameter_491, + parameter_492, + parameter_493, + parameter_494, + parameter_495, + parameter_496, + parameter_497, + parameter_498, + parameter_499, + parameter_500, + parameter_501, + parameter_502, + parameter_503, + parameter_504, + parameter_505, + parameter_506, + parameter_507, + parameter_508, + parameter_509, + parameter_510, + parameter_511, + parameter_512, + parameter_513, + parameter_514, + parameter_515, + parameter_516, + parameter_517, + parameter_518, + parameter_519, + parameter_520, + parameter_521, + data_0, + data_1, + ): + # pd_op.conv2d: (1x32x-1x-1xf32) <- (1x3x-1x-1xf32, 32x3x3x3xf32) + conv2d_0 = paddle._C_ops.conv2d( + data_0, parameter_521, [2, 2], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del data_0, parameter_521 + + # pd_op.batch_norm_: (1x32x-1x-1xf32, 32xf32, 32xf32, 32xf32, 32xf32, -1xui8) <- (1x32x-1x-1xf32, 32xf32, 32xf32, 32xf32, 32xf32) + ( + batch_norm__0, + batch_norm__1, + batch_norm__2, + batch_norm__3, + batch_norm__4, + batch_norm__5, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_0, + parameter_520, + parameter_519, + parameter_518, + parameter_517, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_517, parameter_518, parameter_519, parameter_520 + + # pd_op.hardswish: (1x32x-1x-1xf32) <- (1x32x-1x-1xf32) + hardswish_0 = paddle._C_ops.hardswish(batch_norm__0) + + # pd_op.depthwise_conv2d: (1x32x-1x-1xf32) <- (1x32x-1x-1xf32, 32x1x3x3xf32) + depthwise_conv2d_0 = paddle._C_ops.depthwise_conv2d( + hardswish_0, parameter_516, [1, 1], [1, 1], "EXPLICIT", 32, [1, 1], "NCHW" + ) + del parameter_516 + + # pd_op.batch_norm_: (1x32x-1x-1xf32, 32xf32, 32xf32, 32xf32, 32xf32, -1xui8) <- (1x32x-1x-1xf32, 32xf32, 32xf32, 32xf32, 32xf32) + ( + batch_norm__6, + batch_norm__7, + batch_norm__8, + batch_norm__9, + batch_norm__10, + batch_norm__11, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + depthwise_conv2d_0, + parameter_515, + parameter_514, + parameter_513, + parameter_512, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_512, parameter_513, parameter_514, parameter_515 + + # pd_op.hardswish: (1x32x-1x-1xf32) <- (1x32x-1x-1xf32) + hardswish_1 = paddle._C_ops.hardswish(batch_norm__6) + + # pd_op.conv2d: (1x64x-1x-1xf32) <- (1x32x-1x-1xf32, 64x32x1x1xf32) + conv2d_1 = paddle._C_ops.conv2d( + hardswish_1, parameter_511, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_511 + + # pd_op.batch_norm_: (1x64x-1x-1xf32, 64xf32, 64xf32, 64xf32, 64xf32, -1xui8) <- (1x64x-1x-1xf32, 64xf32, 64xf32, 64xf32, 64xf32) + ( + batch_norm__12, + batch_norm__13, + batch_norm__14, + batch_norm__15, + batch_norm__16, + batch_norm__17, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_1, + parameter_510, + parameter_509, + parameter_508, + parameter_507, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_507, parameter_508, parameter_509, parameter_510 + + # pd_op.hardswish: (1x64x-1x-1xf32) <- (1x64x-1x-1xf32) + hardswish_2 = paddle._C_ops.hardswish(batch_norm__12) + + # pd_op.depthwise_conv2d: (1x64x-1x-1xf32) <- (1x64x-1x-1xf32, 64x1x3x3xf32) + depthwise_conv2d_1 = paddle._C_ops.depthwise_conv2d( + hardswish_2, parameter_506, [2, 2], [1, 1], "EXPLICIT", 64, [1, 1], "NCHW" + ) + del parameter_506 + + # pd_op.batch_norm_: (1x64x-1x-1xf32, 64xf32, 64xf32, 64xf32, 64xf32, -1xui8) <- (1x64x-1x-1xf32, 64xf32, 64xf32, 64xf32, 64xf32) + ( + batch_norm__18, + batch_norm__19, + batch_norm__20, + batch_norm__21, + batch_norm__22, + batch_norm__23, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + depthwise_conv2d_1, + parameter_505, + parameter_504, + parameter_503, + parameter_502, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_502, parameter_503, parameter_504, parameter_505 + + # pd_op.hardswish: (1x64x-1x-1xf32) <- (1x64x-1x-1xf32) + hardswish_3 = paddle._C_ops.hardswish(batch_norm__18) + + # pd_op.conv2d: (1x128x-1x-1xf32) <- (1x64x-1x-1xf32, 128x64x1x1xf32) + conv2d_2 = paddle._C_ops.conv2d( + hardswish_3, parameter_501, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_501 + + # pd_op.batch_norm_: (1x128x-1x-1xf32, 128xf32, 128xf32, 128xf32, 128xf32, -1xui8) <- (1x128x-1x-1xf32, 128xf32, 128xf32, 128xf32, 128xf32) + ( + batch_norm__24, + batch_norm__25, + batch_norm__26, + batch_norm__27, + batch_norm__28, + batch_norm__29, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_2, + parameter_500, + parameter_499, + parameter_498, + parameter_497, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_497, parameter_498, parameter_499, parameter_500 + + # pd_op.hardswish: (1x128x-1x-1xf32) <- (1x128x-1x-1xf32) + hardswish_4 = paddle._C_ops.hardswish(batch_norm__24) + + # pd_op.depthwise_conv2d: (1x128x-1x-1xf32) <- (1x128x-1x-1xf32, 128x1x3x3xf32) + depthwise_conv2d_2 = paddle._C_ops.depthwise_conv2d( + hardswish_4, parameter_496, [1, 1], [1, 1], "EXPLICIT", 128, [1, 1], "NCHW" + ) + del parameter_496 + + # pd_op.batch_norm_: (1x128x-1x-1xf32, 128xf32, 128xf32, 128xf32, 128xf32, -1xui8) <- (1x128x-1x-1xf32, 128xf32, 128xf32, 128xf32, 128xf32) + ( + batch_norm__30, + batch_norm__31, + batch_norm__32, + batch_norm__33, + batch_norm__34, + batch_norm__35, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + depthwise_conv2d_2, + parameter_495, + parameter_494, + parameter_493, + parameter_492, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_492, parameter_493, parameter_494, parameter_495 + + # pd_op.hardswish: (1x128x-1x-1xf32) <- (1x128x-1x-1xf32) + hardswish_5 = paddle._C_ops.hardswish(batch_norm__30) + + # pd_op.conv2d: (1x128x-1x-1xf32) <- (1x128x-1x-1xf32, 128x128x1x1xf32) + conv2d_3 = paddle._C_ops.conv2d( + hardswish_5, parameter_491, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_491 + + # pd_op.batch_norm_: (1x128x-1x-1xf32, 128xf32, 128xf32, 128xf32, 128xf32, -1xui8) <- (1x128x-1x-1xf32, 128xf32, 128xf32, 128xf32, 128xf32) + ( + batch_norm__36, + batch_norm__37, + batch_norm__38, + batch_norm__39, + batch_norm__40, + batch_norm__41, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_3, + parameter_490, + parameter_489, + parameter_488, + parameter_487, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_487, parameter_488, parameter_489, parameter_490 + + # pd_op.hardswish: (1x128x-1x-1xf32) <- (1x128x-1x-1xf32) + hardswish_6 = paddle._C_ops.hardswish(batch_norm__36) + + # pd_op.depthwise_conv2d: (1x128x-1x-1xf32) <- (1x128x-1x-1xf32, 128x1x3x3xf32) + depthwise_conv2d_3 = paddle._C_ops.depthwise_conv2d( + hardswish_6, parameter_486, [2, 2], [1, 1], "EXPLICIT", 128, [1, 1], "NCHW" + ) + del parameter_486 + + # pd_op.batch_norm_: (1x128x-1x-1xf32, 128xf32, 128xf32, 128xf32, 128xf32, -1xui8) <- (1x128x-1x-1xf32, 128xf32, 128xf32, 128xf32, 128xf32) + ( + batch_norm__42, + batch_norm__43, + batch_norm__44, + batch_norm__45, + batch_norm__46, + batch_norm__47, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + depthwise_conv2d_3, + parameter_485, + parameter_484, + parameter_483, + parameter_482, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_482, parameter_483, parameter_484, parameter_485 + + # pd_op.hardswish: (1x128x-1x-1xf32) <- (1x128x-1x-1xf32) + hardswish_7 = paddle._C_ops.hardswish(batch_norm__42) + + # pd_op.conv2d: (1x256x-1x-1xf32) <- (1x128x-1x-1xf32, 256x128x1x1xf32) + conv2d_4 = paddle._C_ops.conv2d( + hardswish_7, parameter_481, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_481 + + # pd_op.batch_norm_: (1x256x-1x-1xf32, 256xf32, 256xf32, 256xf32, 256xf32, -1xui8) <- (1x256x-1x-1xf32, 256xf32, 256xf32, 256xf32, 256xf32) + ( + batch_norm__48, + batch_norm__49, + batch_norm__50, + batch_norm__51, + batch_norm__52, + batch_norm__53, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_4, + parameter_480, + parameter_479, + parameter_478, + parameter_477, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_477, parameter_478, parameter_479, parameter_480 + + # pd_op.hardswish: (1x256x-1x-1xf32) <- (1x256x-1x-1xf32) + hardswish_8 = paddle._C_ops.hardswish(batch_norm__48) + + # pd_op.depthwise_conv2d: (1x256x-1x-1xf32) <- (1x256x-1x-1xf32, 256x1x3x3xf32) + depthwise_conv2d_4 = paddle._C_ops.depthwise_conv2d( + hardswish_8, parameter_476, [1, 1], [1, 1], "EXPLICIT", 256, [1, 1], "NCHW" + ) + del parameter_476 + + # pd_op.batch_norm_: (1x256x-1x-1xf32, 256xf32, 256xf32, 256xf32, 256xf32, -1xui8) <- (1x256x-1x-1xf32, 256xf32, 256xf32, 256xf32, 256xf32) + ( + batch_norm__54, + batch_norm__55, + batch_norm__56, + batch_norm__57, + batch_norm__58, + batch_norm__59, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + depthwise_conv2d_4, + parameter_475, + parameter_474, + parameter_473, + parameter_472, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_472, parameter_473, parameter_474, parameter_475 + + # pd_op.hardswish: (1x256x-1x-1xf32) <- (1x256x-1x-1xf32) + hardswish_9 = paddle._C_ops.hardswish(batch_norm__54) + + # pd_op.conv2d: (1x256x-1x-1xf32) <- (1x256x-1x-1xf32, 256x256x1x1xf32) + conv2d_5 = paddle._C_ops.conv2d( + hardswish_9, parameter_471, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_471 + + # pd_op.batch_norm_: (1x256x-1x-1xf32, 256xf32, 256xf32, 256xf32, 256xf32, -1xui8) <- (1x256x-1x-1xf32, 256xf32, 256xf32, 256xf32, 256xf32) + ( + batch_norm__60, + batch_norm__61, + batch_norm__62, + batch_norm__63, + batch_norm__64, + batch_norm__65, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_5, + parameter_470, + parameter_469, + parameter_468, + parameter_467, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_467, parameter_468, parameter_469, parameter_470 + + # pd_op.hardswish: (1x256x-1x-1xf32) <- (1x256x-1x-1xf32) + hardswish_10 = paddle._C_ops.hardswish(batch_norm__60) + + # pd_op.depthwise_conv2d: (1x256x-1x-1xf32) <- (1x256x-1x-1xf32, 256x1x3x3xf32) + depthwise_conv2d_5 = paddle._C_ops.depthwise_conv2d( + hardswish_10, parameter_466, [2, 2], [1, 1], "EXPLICIT", 256, [1, 1], "NCHW" + ) + del parameter_466 + + # pd_op.batch_norm_: (1x256x-1x-1xf32, 256xf32, 256xf32, 256xf32, 256xf32, -1xui8) <- (1x256x-1x-1xf32, 256xf32, 256xf32, 256xf32, 256xf32) + ( + batch_norm__66, + batch_norm__67, + batch_norm__68, + batch_norm__69, + batch_norm__70, + batch_norm__71, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + depthwise_conv2d_5, + parameter_465, + parameter_464, + parameter_463, + parameter_462, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_462, parameter_463, parameter_464, parameter_465 + + # pd_op.hardswish: (1x256x-1x-1xf32) <- (1x256x-1x-1xf32) + hardswish_11 = paddle._C_ops.hardswish(batch_norm__66) + + # pd_op.conv2d: (1x512x-1x-1xf32) <- (1x256x-1x-1xf32, 512x256x1x1xf32) + conv2d_6 = paddle._C_ops.conv2d( + hardswish_11, parameter_461, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_461 + + # pd_op.batch_norm_: (1x512x-1x-1xf32, 512xf32, 512xf32, 512xf32, 512xf32, -1xui8) <- (1x512x-1x-1xf32, 512xf32, 512xf32, 512xf32, 512xf32) + ( + batch_norm__72, + batch_norm__73, + batch_norm__74, + batch_norm__75, + batch_norm__76, + batch_norm__77, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_6, + parameter_460, + parameter_459, + parameter_458, + parameter_457, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_457, parameter_458, parameter_459, parameter_460 + + # pd_op.hardswish: (1x512x-1x-1xf32) <- (1x512x-1x-1xf32) + hardswish_12 = paddle._C_ops.hardswish(batch_norm__72) + + # pd_op.depthwise_conv2d: (1x512x-1x-1xf32) <- (1x512x-1x-1xf32, 512x1x5x5xf32) + depthwise_conv2d_6 = paddle._C_ops.depthwise_conv2d( + hardswish_12, parameter_456, [1, 1], [2, 2], "EXPLICIT", 512, [1, 1], "NCHW" + ) + del parameter_456 + + # pd_op.batch_norm_: (1x512x-1x-1xf32, 512xf32, 512xf32, 512xf32, 512xf32, -1xui8) <- (1x512x-1x-1xf32, 512xf32, 512xf32, 512xf32, 512xf32) + ( + batch_norm__78, + batch_norm__79, + batch_norm__80, + batch_norm__81, + batch_norm__82, + batch_norm__83, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + depthwise_conv2d_6, + parameter_455, + parameter_454, + parameter_453, + parameter_452, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_452, parameter_453, parameter_454, parameter_455 + + # pd_op.hardswish: (1x512x-1x-1xf32) <- (1x512x-1x-1xf32) + hardswish_13 = paddle._C_ops.hardswish(batch_norm__78) + + # pd_op.conv2d: (1x512x-1x-1xf32) <- (1x512x-1x-1xf32, 512x512x1x1xf32) + conv2d_7 = paddle._C_ops.conv2d( + hardswish_13, parameter_451, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_451 + + # pd_op.batch_norm_: (1x512x-1x-1xf32, 512xf32, 512xf32, 512xf32, 512xf32, -1xui8) <- (1x512x-1x-1xf32, 512xf32, 512xf32, 512xf32, 512xf32) + ( + batch_norm__84, + batch_norm__85, + batch_norm__86, + batch_norm__87, + batch_norm__88, + batch_norm__89, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_7, + parameter_450, + parameter_449, + parameter_448, + parameter_447, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_447, parameter_448, parameter_449, parameter_450 + + # pd_op.hardswish: (1x512x-1x-1xf32) <- (1x512x-1x-1xf32) + hardswish_14 = paddle._C_ops.hardswish(batch_norm__84) + + # pd_op.depthwise_conv2d: (1x512x-1x-1xf32) <- (1x512x-1x-1xf32, 512x1x5x5xf32) + depthwise_conv2d_7 = paddle._C_ops.depthwise_conv2d( + hardswish_14, parameter_446, [1, 1], [2, 2], "EXPLICIT", 512, [1, 1], "NCHW" + ) + del parameter_446 + + # pd_op.batch_norm_: (1x512x-1x-1xf32, 512xf32, 512xf32, 512xf32, 512xf32, -1xui8) <- (1x512x-1x-1xf32, 512xf32, 512xf32, 512xf32, 512xf32) + ( + batch_norm__90, + batch_norm__91, + batch_norm__92, + batch_norm__93, + batch_norm__94, + batch_norm__95, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + depthwise_conv2d_7, + parameter_445, + parameter_444, + parameter_443, + parameter_442, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_442, parameter_443, parameter_444, parameter_445 + + # pd_op.hardswish: (1x512x-1x-1xf32) <- (1x512x-1x-1xf32) + hardswish_15 = paddle._C_ops.hardswish(batch_norm__90) + + # pd_op.conv2d: (1x512x-1x-1xf32) <- (1x512x-1x-1xf32, 512x512x1x1xf32) + conv2d_8 = paddle._C_ops.conv2d( + hardswish_15, parameter_441, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_441 + + # pd_op.batch_norm_: (1x512x-1x-1xf32, 512xf32, 512xf32, 512xf32, 512xf32, -1xui8) <- (1x512x-1x-1xf32, 512xf32, 512xf32, 512xf32, 512xf32) + ( + batch_norm__96, + batch_norm__97, + batch_norm__98, + batch_norm__99, + batch_norm__100, + batch_norm__101, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_8, + parameter_440, + parameter_439, + parameter_438, + parameter_437, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_437, parameter_438, parameter_439, parameter_440 + + # pd_op.hardswish: (1x512x-1x-1xf32) <- (1x512x-1x-1xf32) + hardswish_16 = paddle._C_ops.hardswish(batch_norm__96) + + # pd_op.depthwise_conv2d: (1x512x-1x-1xf32) <- (1x512x-1x-1xf32, 512x1x5x5xf32) + depthwise_conv2d_8 = paddle._C_ops.depthwise_conv2d( + hardswish_16, parameter_436, [1, 1], [2, 2], "EXPLICIT", 512, [1, 1], "NCHW" + ) + del parameter_436 + + # pd_op.batch_norm_: (1x512x-1x-1xf32, 512xf32, 512xf32, 512xf32, 512xf32, -1xui8) <- (1x512x-1x-1xf32, 512xf32, 512xf32, 512xf32, 512xf32) + ( + batch_norm__102, + batch_norm__103, + batch_norm__104, + batch_norm__105, + batch_norm__106, + batch_norm__107, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + depthwise_conv2d_8, + parameter_435, + parameter_434, + parameter_433, + parameter_432, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_432, parameter_433, parameter_434, parameter_435 + + # pd_op.hardswish: (1x512x-1x-1xf32) <- (1x512x-1x-1xf32) + hardswish_17 = paddle._C_ops.hardswish(batch_norm__102) + + # pd_op.conv2d: (1x512x-1x-1xf32) <- (1x512x-1x-1xf32, 512x512x1x1xf32) + conv2d_9 = paddle._C_ops.conv2d( + hardswish_17, parameter_431, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_431 + + # pd_op.batch_norm_: (1x512x-1x-1xf32, 512xf32, 512xf32, 512xf32, 512xf32, -1xui8) <- (1x512x-1x-1xf32, 512xf32, 512xf32, 512xf32, 512xf32) + ( + batch_norm__108, + batch_norm__109, + batch_norm__110, + batch_norm__111, + batch_norm__112, + batch_norm__113, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_9, + parameter_430, + parameter_429, + parameter_428, + parameter_427, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_427, parameter_428, parameter_429, parameter_430 + + # pd_op.hardswish: (1x512x-1x-1xf32) <- (1x512x-1x-1xf32) + hardswish_18 = paddle._C_ops.hardswish(batch_norm__108) + + # pd_op.depthwise_conv2d: (1x512x-1x-1xf32) <- (1x512x-1x-1xf32, 512x1x5x5xf32) + depthwise_conv2d_9 = paddle._C_ops.depthwise_conv2d( + hardswish_18, parameter_426, [1, 1], [2, 2], "EXPLICIT", 512, [1, 1], "NCHW" + ) + del parameter_426 + + # pd_op.batch_norm_: (1x512x-1x-1xf32, 512xf32, 512xf32, 512xf32, 512xf32, -1xui8) <- (1x512x-1x-1xf32, 512xf32, 512xf32, 512xf32, 512xf32) + ( + batch_norm__114, + batch_norm__115, + batch_norm__116, + batch_norm__117, + batch_norm__118, + batch_norm__119, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + depthwise_conv2d_9, + parameter_425, + parameter_424, + parameter_423, + parameter_422, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_422, parameter_423, parameter_424, parameter_425 + + # pd_op.hardswish: (1x512x-1x-1xf32) <- (1x512x-1x-1xf32) + hardswish_19 = paddle._C_ops.hardswish(batch_norm__114) + + # pd_op.conv2d: (1x512x-1x-1xf32) <- (1x512x-1x-1xf32, 512x512x1x1xf32) + conv2d_10 = paddle._C_ops.conv2d( + hardswish_19, parameter_421, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_421 + + # pd_op.batch_norm_: (1x512x-1x-1xf32, 512xf32, 512xf32, 512xf32, 512xf32, -1xui8) <- (1x512x-1x-1xf32, 512xf32, 512xf32, 512xf32, 512xf32) + ( + batch_norm__120, + batch_norm__121, + batch_norm__122, + batch_norm__123, + batch_norm__124, + batch_norm__125, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_10, + parameter_420, + parameter_419, + parameter_418, + parameter_417, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_417, parameter_418, parameter_419, parameter_420 + + # pd_op.hardswish: (1x512x-1x-1xf32) <- (1x512x-1x-1xf32) + hardswish_20 = paddle._C_ops.hardswish(batch_norm__120) + + # pd_op.depthwise_conv2d: (1x512x-1x-1xf32) <- (1x512x-1x-1xf32, 512x1x5x5xf32) + depthwise_conv2d_10 = paddle._C_ops.depthwise_conv2d( + hardswish_20, parameter_416, [1, 1], [2, 2], "EXPLICIT", 512, [1, 1], "NCHW" + ) + del parameter_416 + + # pd_op.batch_norm_: (1x512x-1x-1xf32, 512xf32, 512xf32, 512xf32, 512xf32, -1xui8) <- (1x512x-1x-1xf32, 512xf32, 512xf32, 512xf32, 512xf32) + ( + batch_norm__126, + batch_norm__127, + batch_norm__128, + batch_norm__129, + batch_norm__130, + batch_norm__131, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + depthwise_conv2d_10, + parameter_415, + parameter_414, + parameter_413, + parameter_412, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_412, parameter_413, parameter_414, parameter_415 + + # pd_op.hardswish: (1x512x-1x-1xf32) <- (1x512x-1x-1xf32) + hardswish_21 = paddle._C_ops.hardswish(batch_norm__126) + + # pd_op.conv2d: (1x512x-1x-1xf32) <- (1x512x-1x-1xf32, 512x512x1x1xf32) + conv2d_11 = paddle._C_ops.conv2d( + hardswish_21, parameter_411, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_411 + + # pd_op.batch_norm_: (1x512x-1x-1xf32, 512xf32, 512xf32, 512xf32, 512xf32, -1xui8) <- (1x512x-1x-1xf32, 512xf32, 512xf32, 512xf32, 512xf32) + ( + batch_norm__132, + batch_norm__133, + batch_norm__134, + batch_norm__135, + batch_norm__136, + batch_norm__137, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_11, + parameter_410, + parameter_409, + parameter_408, + parameter_407, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_407, parameter_408, parameter_409, parameter_410 + + # pd_op.hardswish: (1x512x-1x-1xf32) <- (1x512x-1x-1xf32) + hardswish_22 = paddle._C_ops.hardswish(batch_norm__132) + + # pd_op.depthwise_conv2d: (1x512x-1x-1xf32) <- (1x512x-1x-1xf32, 512x1x5x5xf32) + depthwise_conv2d_11 = paddle._C_ops.depthwise_conv2d( + hardswish_22, parameter_406, [2, 2], [2, 2], "EXPLICIT", 512, [1, 1], "NCHW" + ) + del parameter_406 + + # pd_op.batch_norm_: (1x512x-1x-1xf32, 512xf32, 512xf32, 512xf32, 512xf32, -1xui8) <- (1x512x-1x-1xf32, 512xf32, 512xf32, 512xf32, 512xf32) + ( + batch_norm__138, + batch_norm__139, + batch_norm__140, + batch_norm__141, + batch_norm__142, + batch_norm__143, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + depthwise_conv2d_11, + parameter_405, + parameter_404, + parameter_403, + parameter_402, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_402, parameter_403, parameter_404, parameter_405 + + # pd_op.hardswish: (1x512x-1x-1xf32) <- (1x512x-1x-1xf32) + hardswish_23 = paddle._C_ops.hardswish(batch_norm__138) + + # pd_op.full_int_array: (2xi64) <- () + full_int_array_0 = [1, 1] + + # pd_op.assign: (2xi64) <- (2xi64) + assign_0 = full_int_array_0 + + # pd_op.assign: (2xi64) <- (2xi64) + assign_1 = full_int_array_0 + + # pd_op.assign: (2xi64) <- (2xi64) + assign_2 = full_int_array_0 + + # pd_op.assign: (2xi64) <- (2xi64) + assign_3 = full_int_array_0 + + # pd_op.assign: (2xi64) <- (2xi64) + assign_4 = full_int_array_0 + + # pd_op.pool2d: (1x512x1x1xf32) <- (1x512x-1x-1xf32, 2xi64) + pool2d_0 = paddle._C_ops.pool2d( + hardswish_23, + full_int_array_0, + [1, 1], + [0, 0], + False, + True, + "NCHW", + "avg", + False, + True, + "EXPLICIT", + ) + + # pd_op.conv2d: (1x128x1x1xf32) <- (1x512x1x1xf32, 128x512x1x1xf32) + conv2d_12 = paddle._C_ops.conv2d( + pool2d_0, parameter_401, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_401 + + # pd_op.full_int_array: (4xi64) <- () + full_int_array_1 = [1, -1, 1, 1] + + # pd_op.reshape: (1x128x1x1xf32) <- (128xf32, 4xi64) + reshape_0 = paddle._C_ops.reshape(parameter_400, full_int_array_1) + del parameter_400 + + # pd_op.add: (1x128x1x1xf32) <- (1x128x1x1xf32, 1x128x1x1xf32) + add_0 = paddle._C_ops.add(conv2d_12, reshape_0) + + # pd_op.relu: (1x128x1x1xf32) <- (1x128x1x1xf32) + relu_0 = paddle._C_ops.relu(add_0) + del add_0 + + # pd_op.conv2d: (1x512x1x1xf32) <- (1x128x1x1xf32, 512x128x1x1xf32) + conv2d_13 = paddle._C_ops.conv2d( + relu_0, parameter_399, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_399 + + # pd_op.reshape: (1x512x1x1xf32) <- (512xf32, 4xi64) + reshape_1 = paddle._C_ops.reshape(parameter_398, full_int_array_1) + del parameter_398 + + # pd_op.add: (1x512x1x1xf32) <- (1x512x1x1xf32, 1x512x1x1xf32) + add_1 = paddle._C_ops.add(conv2d_13, reshape_1) + + # pd_op.hardsigmoid: (1x512x1x1xf32) <- (1x512x1x1xf32) + hardsigmoid_0 = paddle._C_ops.hardsigmoid( + add_1, float("0.166667"), float("0.5") + ) + del add_1 + + # pd_op.multiply: (1x512x-1x-1xf32) <- (1x512x-1x-1xf32, 1x512x1x1xf32) + multiply_0 = paddle._C_ops.multiply(hardswish_23, hardsigmoid_0) + + # pd_op.conv2d: (1x1024x-1x-1xf32) <- (1x512x-1x-1xf32, 1024x512x1x1xf32) + conv2d_14 = paddle._C_ops.conv2d( + multiply_0, parameter_397, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_397 + + # pd_op.batch_norm_: (1x1024x-1x-1xf32, 1024xf32, 1024xf32, 1024xf32, 1024xf32, -1xui8) <- (1x1024x-1x-1xf32, 1024xf32, 1024xf32, 1024xf32, 1024xf32) + ( + batch_norm__144, + batch_norm__145, + batch_norm__146, + batch_norm__147, + batch_norm__148, + batch_norm__149, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_14, + parameter_396, + parameter_395, + parameter_394, + parameter_393, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_393, parameter_394, parameter_395, parameter_396 + + # pd_op.hardswish: (1x1024x-1x-1xf32) <- (1x1024x-1x-1xf32) + hardswish_24 = paddle._C_ops.hardswish(batch_norm__144) + + # pd_op.depthwise_conv2d: (1x1024x-1x-1xf32) <- (1x1024x-1x-1xf32, 1024x1x5x5xf32) + depthwise_conv2d_12 = paddle._C_ops.depthwise_conv2d( + hardswish_24, + parameter_392, + [1, 1], + [2, 2], + "EXPLICIT", + 1024, + [1, 1], + "NCHW", + ) + del parameter_392 + + # pd_op.batch_norm_: (1x1024x-1x-1xf32, 1024xf32, 1024xf32, 1024xf32, 1024xf32, -1xui8) <- (1x1024x-1x-1xf32, 1024xf32, 1024xf32, 1024xf32, 1024xf32) + ( + batch_norm__150, + batch_norm__151, + batch_norm__152, + batch_norm__153, + batch_norm__154, + batch_norm__155, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + depthwise_conv2d_12, + parameter_391, + parameter_390, + parameter_389, + parameter_388, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_388, parameter_389, parameter_390, parameter_391 + + # pd_op.hardswish: (1x1024x-1x-1xf32) <- (1x1024x-1x-1xf32) + hardswish_25 = paddle._C_ops.hardswish(batch_norm__150) + + # pd_op.pool2d: (1x1024x1x1xf32) <- (1x1024x-1x-1xf32, 2xi64) + pool2d_1 = paddle._C_ops.pool2d( + hardswish_25, + full_int_array_0, + [1, 1], + [0, 0], + False, + True, + "NCHW", + "avg", + False, + True, + "EXPLICIT", + ) + + # pd_op.conv2d: (1x256x1x1xf32) <- (1x1024x1x1xf32, 256x1024x1x1xf32) + conv2d_15 = paddle._C_ops.conv2d( + pool2d_1, parameter_387, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_387 + + # pd_op.reshape: (1x256x1x1xf32) <- (256xf32, 4xi64) + reshape_2 = paddle._C_ops.reshape(parameter_386, full_int_array_1) + del parameter_386 + + # pd_op.add: (1x256x1x1xf32) <- (1x256x1x1xf32, 1x256x1x1xf32) + add_2 = paddle._C_ops.add(conv2d_15, reshape_2) + + # pd_op.relu: (1x256x1x1xf32) <- (1x256x1x1xf32) + relu_1 = paddle._C_ops.relu(add_2) + del add_2 + + # pd_op.conv2d: (1x1024x1x1xf32) <- (1x256x1x1xf32, 1024x256x1x1xf32) + conv2d_16 = paddle._C_ops.conv2d( + relu_1, parameter_385, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_385 + + # pd_op.reshape: (1x1024x1x1xf32) <- (1024xf32, 4xi64) + reshape_3 = paddle._C_ops.reshape(parameter_384, full_int_array_1) + del parameter_384 + + # pd_op.add: (1x1024x1x1xf32) <- (1x1024x1x1xf32, 1x1024x1x1xf32) + add_3 = paddle._C_ops.add(conv2d_16, reshape_3) + + # pd_op.hardsigmoid: (1x1024x1x1xf32) <- (1x1024x1x1xf32) + hardsigmoid_1 = paddle._C_ops.hardsigmoid( + add_3, float("0.166667"), float("0.5") + ) + del add_3 + + # pd_op.multiply: (1x1024x-1x-1xf32) <- (1x1024x-1x-1xf32, 1x1024x1x1xf32) + multiply_1 = paddle._C_ops.multiply(hardswish_25, hardsigmoid_1) + + # pd_op.conv2d: (1x1024x-1x-1xf32) <- (1x1024x-1x-1xf32, 1024x1024x1x1xf32) + conv2d_17 = paddle._C_ops.conv2d( + multiply_1, parameter_383, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_383 + + # pd_op.batch_norm_: (1x1024x-1x-1xf32, 1024xf32, 1024xf32, 1024xf32, 1024xf32, -1xui8) <- (1x1024x-1x-1xf32, 1024xf32, 1024xf32, 1024xf32, 1024xf32) + ( + batch_norm__156, + batch_norm__157, + batch_norm__158, + batch_norm__159, + batch_norm__160, + batch_norm__161, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_17, + parameter_382, + parameter_381, + parameter_380, + parameter_379, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_379, parameter_380, parameter_381, parameter_382 + + # pd_op.hardswish: (1x1024x-1x-1xf32) <- (1x1024x-1x-1xf32) + hardswish_26 = paddle._C_ops.hardswish(batch_norm__156) + + # pd_op.conv2d: (1x160x-1x-1xf32) <- (1x256x-1x-1xf32, 160x256x1x1xf32) + conv2d_18 = paddle._C_ops.conv2d( + hardswish_10, parameter_378, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_378 + + # pd_op.batch_norm_: (1x160x-1x-1xf32, 160xf32, 160xf32, 160xf32, 160xf32, -1xui8) <- (1x160x-1x-1xf32, 160xf32, 160xf32, 160xf32, 160xf32) + ( + batch_norm__162, + batch_norm__163, + batch_norm__164, + batch_norm__165, + batch_norm__166, + batch_norm__167, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_18, + parameter_377, + parameter_376, + parameter_375, + parameter_374, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_374, parameter_375, parameter_376, parameter_377 + + # pd_op.hardswish: (1x160x-1x-1xf32) <- (1x160x-1x-1xf32) + hardswish_27 = paddle._C_ops.hardswish(batch_norm__162) + + # pd_op.conv2d: (1x160x-1x-1xf32) <- (1x512x-1x-1xf32, 160x512x1x1xf32) + conv2d_19 = paddle._C_ops.conv2d( + hardswish_22, parameter_373, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_373 + + # pd_op.batch_norm_: (1x160x-1x-1xf32, 160xf32, 160xf32, 160xf32, 160xf32, -1xui8) <- (1x160x-1x-1xf32, 160xf32, 160xf32, 160xf32, 160xf32) + ( + batch_norm__168, + batch_norm__169, + batch_norm__170, + batch_norm__171, + batch_norm__172, + batch_norm__173, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_19, + parameter_372, + parameter_371, + parameter_370, + parameter_369, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_369, parameter_370, parameter_371, parameter_372 + + # pd_op.hardswish: (1x160x-1x-1xf32) <- (1x160x-1x-1xf32) + hardswish_28 = paddle._C_ops.hardswish(batch_norm__168) + + # pd_op.conv2d: (1x160x-1x-1xf32) <- (1x1024x-1x-1xf32, 160x1024x1x1xf32) + conv2d_20 = paddle._C_ops.conv2d( + hardswish_26, parameter_368, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_368 + + # pd_op.batch_norm_: (1x160x-1x-1xf32, 160xf32, 160xf32, 160xf32, 160xf32, -1xui8) <- (1x160x-1x-1xf32, 160xf32, 160xf32, 160xf32, 160xf32) + ( + batch_norm__174, + batch_norm__175, + batch_norm__176, + batch_norm__177, + batch_norm__178, + batch_norm__179, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_20, + parameter_367, + parameter_366, + parameter_365, + parameter_364, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_364, parameter_365, parameter_366, parameter_367 + + # pd_op.hardswish: (1x160x-1x-1xf32) <- (1x160x-1x-1xf32) + hardswish_29 = paddle._C_ops.hardswish(batch_norm__174) + + # pd_op.nearest_interp: (1x160x-1x-1xf32) <- (1x160x-1x-1xf32, None, None, None) + nearest_interp_0 = paddle._C_ops.nearest_interp( + hardswish_29, + None, + None, + None, + "NCHW", + -1, + -1, + -1, + [float("2"), float("2")], + "nearest", + False, + 0, + ) + + # pd_op.full: (1xi32) <- () + full_0 = paddle._C_ops.full( + [1], float("1"), paddle.int32, paddle.core.CPUPlace() + ) + + # pd_op.assign: (1xi32) <- (1xi32) + assign_5 = full_0 + + # pd_op.assign: (1xi32) <- (1xi32) + assign_6 = full_0 + + # pd_op.assign: (1xi32) <- (1xi32) + assign_7 = full_0 + + # pd_op.assign: (1xi32) <- (1xi32) + assign_8 = full_0 + + # pd_op.assign: (1xi32) <- (1xi32) + assign_9 = full_0 + + # pd_op.assign: (1xi32) <- (1xi32) + assign_10 = full_0 + + # builtin.combine: ([1x160x-1x-1xf32, 1x160x-1x-1xf32]) <- (1x160x-1x-1xf32, 1x160x-1x-1xf32) + combine_0 = [nearest_interp_0, hardswish_28] + + # pd_op.concat: (1x320x-1x-1xf32) <- ([1x160x-1x-1xf32, 1x160x-1x-1xf32], 1xi32) + concat_3 = paddle._C_ops.concat(combine_0, full_0) + del combine_0 + + # pd_op.depthwise_conv2d: (1x320x-1x-1xf32) <- (1x320x-1x-1xf32, 320x1x5x5xf32) + depthwise_conv2d_13 = paddle._C_ops.depthwise_conv2d( + concat_3, parameter_363, [1, 1], [2, 2], "EXPLICIT", 320, [1, 1], "NCHW" + ) + del parameter_363 + + # pd_op.batch_norm_: (1x320x-1x-1xf32, 320xf32, 320xf32, 320xf32, 320xf32, -1xui8) <- (1x320x-1x-1xf32, 320xf32, 320xf32, 320xf32, 320xf32) + ( + batch_norm__180, + batch_norm__181, + batch_norm__182, + batch_norm__183, + batch_norm__184, + batch_norm__185, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + depthwise_conv2d_13, + parameter_362, + parameter_361, + parameter_360, + parameter_359, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_359, parameter_360, parameter_361, parameter_362 + + # pd_op.hardswish: (1x320x-1x-1xf32) <- (1x320x-1x-1xf32) + hardswish_30 = paddle._C_ops.hardswish(batch_norm__180) + + # pd_op.conv2d: (1x320x-1x-1xf32) <- (1x320x-1x-1xf32, 320x320x1x1xf32) + conv2d_21 = paddle._C_ops.conv2d( + hardswish_30, parameter_358, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_358 + + # pd_op.batch_norm_: (1x320x-1x-1xf32, 320xf32, 320xf32, 320xf32, 320xf32, -1xui8) <- (1x320x-1x-1xf32, 320xf32, 320xf32, 320xf32, 320xf32) + ( + batch_norm__186, + batch_norm__187, + batch_norm__188, + batch_norm__189, + batch_norm__190, + batch_norm__191, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_21, + parameter_357, + parameter_356, + parameter_355, + parameter_354, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_354, parameter_355, parameter_356, parameter_357 + + # pd_op.hardswish: (1x320x-1x-1xf32) <- (1x320x-1x-1xf32) + hardswish_31 = paddle._C_ops.hardswish(batch_norm__186) + + # pd_op.depthwise_conv2d: (1x320x-1x-1xf32) <- (1x320x-1x-1xf32, 320x1x5x5xf32) + depthwise_conv2d_14 = paddle._C_ops.depthwise_conv2d( + hardswish_31, parameter_353, [1, 1], [2, 2], "EXPLICIT", 320, [1, 1], "NCHW" + ) + del parameter_353 + + # pd_op.batch_norm_: (1x320x-1x-1xf32, 320xf32, 320xf32, 320xf32, 320xf32, -1xui8) <- (1x320x-1x-1xf32, 320xf32, 320xf32, 320xf32, 320xf32) + ( + batch_norm__192, + batch_norm__193, + batch_norm__194, + batch_norm__195, + batch_norm__196, + batch_norm__197, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + depthwise_conv2d_14, + parameter_352, + parameter_351, + parameter_350, + parameter_349, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_349, parameter_350, parameter_351, parameter_352 + + # pd_op.hardswish: (1x320x-1x-1xf32) <- (1x320x-1x-1xf32) + hardswish_32 = paddle._C_ops.hardswish(batch_norm__192) + + # pd_op.conv2d: (1x160x-1x-1xf32) <- (1x320x-1x-1xf32, 160x320x1x1xf32) + conv2d_22 = paddle._C_ops.conv2d( + hardswish_32, parameter_348, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_348 + + # pd_op.batch_norm_: (1x160x-1x-1xf32, 160xf32, 160xf32, 160xf32, 160xf32, -1xui8) <- (1x160x-1x-1xf32, 160xf32, 160xf32, 160xf32, 160xf32) + ( + batch_norm__198, + batch_norm__199, + batch_norm__200, + batch_norm__201, + batch_norm__202, + batch_norm__203, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_22, + parameter_347, + parameter_346, + parameter_345, + parameter_344, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_344, parameter_345, parameter_346, parameter_347 + + # pd_op.hardswish: (1x160x-1x-1xf32) <- (1x160x-1x-1xf32) + hardswish_33 = paddle._C_ops.hardswish(batch_norm__198) + + # pd_op.nearest_interp: (1x160x-1x-1xf32) <- (1x160x-1x-1xf32, None, None, None) + nearest_interp_1 = paddle._C_ops.nearest_interp( + hardswish_33, + None, + None, + None, + "NCHW", + -1, + -1, + -1, + [float("2"), float("2")], + "nearest", + False, + 0, + ) + + # builtin.combine: ([1x160x-1x-1xf32, 1x160x-1x-1xf32]) <- (1x160x-1x-1xf32, 1x160x-1x-1xf32) + combine_1 = [nearest_interp_1, hardswish_27] + + # pd_op.concat: (1x320x-1x-1xf32) <- ([1x160x-1x-1xf32, 1x160x-1x-1xf32], 1xi32) + concat_4 = paddle._C_ops.concat(combine_1, full_0) + del combine_1 + + # pd_op.depthwise_conv2d: (1x320x-1x-1xf32) <- (1x320x-1x-1xf32, 320x1x5x5xf32) + depthwise_conv2d_15 = paddle._C_ops.depthwise_conv2d( + concat_4, parameter_343, [1, 1], [2, 2], "EXPLICIT", 320, [1, 1], "NCHW" + ) + del parameter_343 + + # pd_op.batch_norm_: (1x320x-1x-1xf32, 320xf32, 320xf32, 320xf32, 320xf32, -1xui8) <- (1x320x-1x-1xf32, 320xf32, 320xf32, 320xf32, 320xf32) + ( + batch_norm__204, + batch_norm__205, + batch_norm__206, + batch_norm__207, + batch_norm__208, + batch_norm__209, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + depthwise_conv2d_15, + parameter_342, + parameter_341, + parameter_340, + parameter_339, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_339, parameter_340, parameter_341, parameter_342 + + # pd_op.hardswish: (1x320x-1x-1xf32) <- (1x320x-1x-1xf32) + hardswish_34 = paddle._C_ops.hardswish(batch_norm__204) + + # pd_op.conv2d: (1x320x-1x-1xf32) <- (1x320x-1x-1xf32, 320x320x1x1xf32) + conv2d_23 = paddle._C_ops.conv2d( + hardswish_34, parameter_338, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_338 + + # pd_op.batch_norm_: (1x320x-1x-1xf32, 320xf32, 320xf32, 320xf32, 320xf32, -1xui8) <- (1x320x-1x-1xf32, 320xf32, 320xf32, 320xf32, 320xf32) + ( + batch_norm__210, + batch_norm__211, + batch_norm__212, + batch_norm__213, + batch_norm__214, + batch_norm__215, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_23, + parameter_337, + parameter_336, + parameter_335, + parameter_334, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_334, parameter_335, parameter_336, parameter_337 + + # pd_op.hardswish: (1x320x-1x-1xf32) <- (1x320x-1x-1xf32) + hardswish_35 = paddle._C_ops.hardswish(batch_norm__210) + + # pd_op.depthwise_conv2d: (1x320x-1x-1xf32) <- (1x320x-1x-1xf32, 320x1x5x5xf32) + depthwise_conv2d_16 = paddle._C_ops.depthwise_conv2d( + hardswish_35, parameter_333, [1, 1], [2, 2], "EXPLICIT", 320, [1, 1], "NCHW" + ) + del parameter_333 + + # pd_op.batch_norm_: (1x320x-1x-1xf32, 320xf32, 320xf32, 320xf32, 320xf32, -1xui8) <- (1x320x-1x-1xf32, 320xf32, 320xf32, 320xf32, 320xf32) + ( + batch_norm__216, + batch_norm__217, + batch_norm__218, + batch_norm__219, + batch_norm__220, + batch_norm__221, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + depthwise_conv2d_16, + parameter_332, + parameter_331, + parameter_330, + parameter_329, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_329, parameter_330, parameter_331, parameter_332 + + # pd_op.hardswish: (1x320x-1x-1xf32) <- (1x320x-1x-1xf32) + hardswish_36 = paddle._C_ops.hardswish(batch_norm__216) + + # pd_op.conv2d: (1x160x-1x-1xf32) <- (1x320x-1x-1xf32, 160x320x1x1xf32) + conv2d_24 = paddle._C_ops.conv2d( + hardswish_36, parameter_328, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_328 + + # pd_op.batch_norm_: (1x160x-1x-1xf32, 160xf32, 160xf32, 160xf32, 160xf32, -1xui8) <- (1x160x-1x-1xf32, 160xf32, 160xf32, 160xf32, 160xf32) + ( + batch_norm__222, + batch_norm__223, + batch_norm__224, + batch_norm__225, + batch_norm__226, + batch_norm__227, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_24, + parameter_327, + parameter_326, + parameter_325, + parameter_324, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_324, parameter_325, parameter_326, parameter_327 + + # pd_op.hardswish: (1x160x-1x-1xf32) <- (1x160x-1x-1xf32) + hardswish_37 = paddle._C_ops.hardswish(batch_norm__222) + + # pd_op.depthwise_conv2d: (1x160x-1x-1xf32) <- (1x160x-1x-1xf32, 160x1x5x5xf32) + depthwise_conv2d_17 = paddle._C_ops.depthwise_conv2d( + hardswish_37, parameter_323, [2, 2], [2, 2], "EXPLICIT", 160, [1, 1], "NCHW" + ) + del parameter_323 + + # pd_op.batch_norm_: (1x160x-1x-1xf32, 160xf32, 160xf32, 160xf32, 160xf32, -1xui8) <- (1x160x-1x-1xf32, 160xf32, 160xf32, 160xf32, 160xf32) + ( + batch_norm__228, + batch_norm__229, + batch_norm__230, + batch_norm__231, + batch_norm__232, + batch_norm__233, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + depthwise_conv2d_17, + parameter_322, + parameter_321, + parameter_320, + parameter_319, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_319, parameter_320, parameter_321, parameter_322 + + # pd_op.hardswish: (1x160x-1x-1xf32) <- (1x160x-1x-1xf32) + hardswish_38 = paddle._C_ops.hardswish(batch_norm__228) + + # pd_op.conv2d: (1x160x-1x-1xf32) <- (1x160x-1x-1xf32, 160x160x1x1xf32) + conv2d_25 = paddle._C_ops.conv2d( + hardswish_38, parameter_318, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_318 + + # pd_op.batch_norm_: (1x160x-1x-1xf32, 160xf32, 160xf32, 160xf32, 160xf32, -1xui8) <- (1x160x-1x-1xf32, 160xf32, 160xf32, 160xf32, 160xf32) + ( + batch_norm__234, + batch_norm__235, + batch_norm__236, + batch_norm__237, + batch_norm__238, + batch_norm__239, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_25, + parameter_317, + parameter_316, + parameter_315, + parameter_314, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_314, parameter_315, parameter_316, parameter_317 + + # pd_op.hardswish: (1x160x-1x-1xf32) <- (1x160x-1x-1xf32) + hardswish_39 = paddle._C_ops.hardswish(batch_norm__234) + + # builtin.combine: ([1x160x-1x-1xf32, 1x160x-1x-1xf32]) <- (1x160x-1x-1xf32, 1x160x-1x-1xf32) + combine_2 = [hardswish_39, hardswish_33] + + # pd_op.concat: (1x320x-1x-1xf32) <- ([1x160x-1x-1xf32, 1x160x-1x-1xf32], 1xi32) + concat_5 = paddle._C_ops.concat(combine_2, full_0) + del combine_2 + + # pd_op.depthwise_conv2d: (1x320x-1x-1xf32) <- (1x320x-1x-1xf32, 320x1x5x5xf32) + depthwise_conv2d_18 = paddle._C_ops.depthwise_conv2d( + concat_5, parameter_313, [1, 1], [2, 2], "EXPLICIT", 320, [1, 1], "NCHW" + ) + del parameter_313 + + # pd_op.batch_norm_: (1x320x-1x-1xf32, 320xf32, 320xf32, 320xf32, 320xf32, -1xui8) <- (1x320x-1x-1xf32, 320xf32, 320xf32, 320xf32, 320xf32) + ( + batch_norm__240, + batch_norm__241, + batch_norm__242, + batch_norm__243, + batch_norm__244, + batch_norm__245, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + depthwise_conv2d_18, + parameter_312, + parameter_311, + parameter_310, + parameter_309, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_309, parameter_310, parameter_311, parameter_312 + + # pd_op.hardswish: (1x320x-1x-1xf32) <- (1x320x-1x-1xf32) + hardswish_40 = paddle._C_ops.hardswish(batch_norm__240) + + # pd_op.conv2d: (1x320x-1x-1xf32) <- (1x320x-1x-1xf32, 320x320x1x1xf32) + conv2d_26 = paddle._C_ops.conv2d( + hardswish_40, parameter_308, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_308 + + # pd_op.batch_norm_: (1x320x-1x-1xf32, 320xf32, 320xf32, 320xf32, 320xf32, -1xui8) <- (1x320x-1x-1xf32, 320xf32, 320xf32, 320xf32, 320xf32) + ( + batch_norm__246, + batch_norm__247, + batch_norm__248, + batch_norm__249, + batch_norm__250, + batch_norm__251, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_26, + parameter_307, + parameter_306, + parameter_305, + parameter_304, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_304, parameter_305, parameter_306, parameter_307 + + # pd_op.hardswish: (1x320x-1x-1xf32) <- (1x320x-1x-1xf32) + hardswish_41 = paddle._C_ops.hardswish(batch_norm__246) + + # pd_op.depthwise_conv2d: (1x320x-1x-1xf32) <- (1x320x-1x-1xf32, 320x1x5x5xf32) + depthwise_conv2d_19 = paddle._C_ops.depthwise_conv2d( + hardswish_41, parameter_303, [1, 1], [2, 2], "EXPLICIT", 320, [1, 1], "NCHW" + ) + del parameter_303 + + # pd_op.batch_norm_: (1x320x-1x-1xf32, 320xf32, 320xf32, 320xf32, 320xf32, -1xui8) <- (1x320x-1x-1xf32, 320xf32, 320xf32, 320xf32, 320xf32) + ( + batch_norm__252, + batch_norm__253, + batch_norm__254, + batch_norm__255, + batch_norm__256, + batch_norm__257, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + depthwise_conv2d_19, + parameter_302, + parameter_301, + parameter_300, + parameter_299, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_299, parameter_300, parameter_301, parameter_302 + + # pd_op.hardswish: (1x320x-1x-1xf32) <- (1x320x-1x-1xf32) + hardswish_42 = paddle._C_ops.hardswish(batch_norm__252) + + # pd_op.conv2d: (1x160x-1x-1xf32) <- (1x320x-1x-1xf32, 160x320x1x1xf32) + conv2d_27 = paddle._C_ops.conv2d( + hardswish_42, parameter_298, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_298 + + # pd_op.batch_norm_: (1x160x-1x-1xf32, 160xf32, 160xf32, 160xf32, 160xf32, -1xui8) <- (1x160x-1x-1xf32, 160xf32, 160xf32, 160xf32, 160xf32) + ( + batch_norm__258, + batch_norm__259, + batch_norm__260, + batch_norm__261, + batch_norm__262, + batch_norm__263, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_27, + parameter_297, + parameter_296, + parameter_295, + parameter_294, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_294, parameter_295, parameter_296, parameter_297 + + # pd_op.hardswish: (1x160x-1x-1xf32) <- (1x160x-1x-1xf32) + hardswish_43 = paddle._C_ops.hardswish(batch_norm__258) + + # pd_op.depthwise_conv2d: (1x160x-1x-1xf32) <- (1x160x-1x-1xf32, 160x1x5x5xf32) + depthwise_conv2d_20 = paddle._C_ops.depthwise_conv2d( + hardswish_43, parameter_293, [2, 2], [2, 2], "EXPLICIT", 160, [1, 1], "NCHW" + ) + del parameter_293 + + # pd_op.batch_norm_: (1x160x-1x-1xf32, 160xf32, 160xf32, 160xf32, 160xf32, -1xui8) <- (1x160x-1x-1xf32, 160xf32, 160xf32, 160xf32, 160xf32) + ( + batch_norm__264, + batch_norm__265, + batch_norm__266, + batch_norm__267, + batch_norm__268, + batch_norm__269, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + depthwise_conv2d_20, + parameter_292, + parameter_291, + parameter_290, + parameter_289, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_289, parameter_290, parameter_291, parameter_292 + + # pd_op.hardswish: (1x160x-1x-1xf32) <- (1x160x-1x-1xf32) + hardswish_44 = paddle._C_ops.hardswish(batch_norm__264) + + # pd_op.conv2d: (1x160x-1x-1xf32) <- (1x160x-1x-1xf32, 160x160x1x1xf32) + conv2d_28 = paddle._C_ops.conv2d( + hardswish_44, parameter_288, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_288 + + # pd_op.batch_norm_: (1x160x-1x-1xf32, 160xf32, 160xf32, 160xf32, 160xf32, -1xui8) <- (1x160x-1x-1xf32, 160xf32, 160xf32, 160xf32, 160xf32) + ( + batch_norm__270, + batch_norm__271, + batch_norm__272, + batch_norm__273, + batch_norm__274, + batch_norm__275, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_28, + parameter_287, + parameter_286, + parameter_285, + parameter_284, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_284, parameter_285, parameter_286, parameter_287 + + # pd_op.hardswish: (1x160x-1x-1xf32) <- (1x160x-1x-1xf32) + hardswish_45 = paddle._C_ops.hardswish(batch_norm__270) + + # builtin.combine: ([1x160x-1x-1xf32, 1x160x-1x-1xf32]) <- (1x160x-1x-1xf32, 1x160x-1x-1xf32) + combine_3 = [hardswish_45, hardswish_29] + + # pd_op.concat: (1x320x-1x-1xf32) <- ([1x160x-1x-1xf32, 1x160x-1x-1xf32], 1xi32) + concat_6 = paddle._C_ops.concat(combine_3, full_0) + del combine_3 + + # pd_op.depthwise_conv2d: (1x320x-1x-1xf32) <- (1x320x-1x-1xf32, 320x1x5x5xf32) + depthwise_conv2d_21 = paddle._C_ops.depthwise_conv2d( + concat_6, parameter_283, [1, 1], [2, 2], "EXPLICIT", 320, [1, 1], "NCHW" + ) + del parameter_283 + + # pd_op.batch_norm_: (1x320x-1x-1xf32, 320xf32, 320xf32, 320xf32, 320xf32, -1xui8) <- (1x320x-1x-1xf32, 320xf32, 320xf32, 320xf32, 320xf32) + ( + batch_norm__276, + batch_norm__277, + batch_norm__278, + batch_norm__279, + batch_norm__280, + batch_norm__281, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + depthwise_conv2d_21, + parameter_282, + parameter_281, + parameter_280, + parameter_279, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_279, parameter_280, parameter_281, parameter_282 + + # pd_op.hardswish: (1x320x-1x-1xf32) <- (1x320x-1x-1xf32) + hardswish_46 = paddle._C_ops.hardswish(batch_norm__276) + + # pd_op.conv2d: (1x320x-1x-1xf32) <- (1x320x-1x-1xf32, 320x320x1x1xf32) + conv2d_29 = paddle._C_ops.conv2d( + hardswish_46, parameter_278, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_278 + + # pd_op.batch_norm_: (1x320x-1x-1xf32, 320xf32, 320xf32, 320xf32, 320xf32, -1xui8) <- (1x320x-1x-1xf32, 320xf32, 320xf32, 320xf32, 320xf32) + ( + batch_norm__282, + batch_norm__283, + batch_norm__284, + batch_norm__285, + batch_norm__286, + batch_norm__287, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_29, + parameter_277, + parameter_276, + parameter_275, + parameter_274, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_274, parameter_275, parameter_276, parameter_277 + + # pd_op.hardswish: (1x320x-1x-1xf32) <- (1x320x-1x-1xf32) + hardswish_47 = paddle._C_ops.hardswish(batch_norm__282) + + # pd_op.depthwise_conv2d: (1x320x-1x-1xf32) <- (1x320x-1x-1xf32, 320x1x5x5xf32) + depthwise_conv2d_22 = paddle._C_ops.depthwise_conv2d( + hardswish_47, parameter_273, [1, 1], [2, 2], "EXPLICIT", 320, [1, 1], "NCHW" + ) + del parameter_273 + + # pd_op.batch_norm_: (1x320x-1x-1xf32, 320xf32, 320xf32, 320xf32, 320xf32, -1xui8) <- (1x320x-1x-1xf32, 320xf32, 320xf32, 320xf32, 320xf32) + ( + batch_norm__288, + batch_norm__289, + batch_norm__290, + batch_norm__291, + batch_norm__292, + batch_norm__293, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + depthwise_conv2d_22, + parameter_272, + parameter_271, + parameter_270, + parameter_269, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_269, parameter_270, parameter_271, parameter_272 + + # pd_op.hardswish: (1x320x-1x-1xf32) <- (1x320x-1x-1xf32) + hardswish_48 = paddle._C_ops.hardswish(batch_norm__288) + + # pd_op.conv2d: (1x160x-1x-1xf32) <- (1x320x-1x-1xf32, 160x320x1x1xf32) + conv2d_30 = paddle._C_ops.conv2d( + hardswish_48, parameter_268, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_268 + + # pd_op.batch_norm_: (1x160x-1x-1xf32, 160xf32, 160xf32, 160xf32, 160xf32, -1xui8) <- (1x160x-1x-1xf32, 160xf32, 160xf32, 160xf32, 160xf32) + ( + batch_norm__294, + batch_norm__295, + batch_norm__296, + batch_norm__297, + batch_norm__298, + batch_norm__299, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_30, + parameter_267, + parameter_266, + parameter_265, + parameter_264, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_264, parameter_265, parameter_266, parameter_267 + + # pd_op.hardswish: (1x160x-1x-1xf32) <- (1x160x-1x-1xf32) + hardswish_49 = paddle._C_ops.hardswish(batch_norm__294) + + # pd_op.depthwise_conv2d: (1x160x-1x-1xf32) <- (1x160x-1x-1xf32, 160x1x5x5xf32) + depthwise_conv2d_23 = paddle._C_ops.depthwise_conv2d( + hardswish_29, parameter_263, [2, 2], [2, 2], "EXPLICIT", 160, [1, 1], "NCHW" + ) + del parameter_263 + + # pd_op.batch_norm_: (1x160x-1x-1xf32, 160xf32, 160xf32, 160xf32, 160xf32, -1xui8) <- (1x160x-1x-1xf32, 160xf32, 160xf32, 160xf32, 160xf32) + ( + batch_norm__300, + batch_norm__301, + batch_norm__302, + batch_norm__303, + batch_norm__304, + batch_norm__305, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + depthwise_conv2d_23, + parameter_262, + parameter_261, + parameter_260, + parameter_259, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_259, parameter_260, parameter_261, parameter_262 + + # pd_op.hardswish: (1x160x-1x-1xf32) <- (1x160x-1x-1xf32) + hardswish_50 = paddle._C_ops.hardswish(batch_norm__300) + + # pd_op.conv2d: (1x160x-1x-1xf32) <- (1x160x-1x-1xf32, 160x160x1x1xf32) + conv2d_31 = paddle._C_ops.conv2d( + hardswish_50, parameter_258, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_258 + + # pd_op.batch_norm_: (1x160x-1x-1xf32, 160xf32, 160xf32, 160xf32, 160xf32, -1xui8) <- (1x160x-1x-1xf32, 160xf32, 160xf32, 160xf32, 160xf32) + ( + batch_norm__306, + batch_norm__307, + batch_norm__308, + batch_norm__309, + batch_norm__310, + batch_norm__311, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_31, + parameter_257, + parameter_256, + parameter_255, + parameter_254, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_254, parameter_255, parameter_256, parameter_257 + + # pd_op.hardswish: (1x160x-1x-1xf32) <- (1x160x-1x-1xf32) + hardswish_51 = paddle._C_ops.hardswish(batch_norm__306) + + # pd_op.depthwise_conv2d: (1x160x-1x-1xf32) <- (1x160x-1x-1xf32, 160x1x5x5xf32) + depthwise_conv2d_24 = paddle._C_ops.depthwise_conv2d( + hardswish_49, parameter_253, [2, 2], [2, 2], "EXPLICIT", 160, [1, 1], "NCHW" + ) + del parameter_253 + + # pd_op.batch_norm_: (1x160x-1x-1xf32, 160xf32, 160xf32, 160xf32, 160xf32, -1xui8) <- (1x160x-1x-1xf32, 160xf32, 160xf32, 160xf32, 160xf32) + ( + batch_norm__312, + batch_norm__313, + batch_norm__314, + batch_norm__315, + batch_norm__316, + batch_norm__317, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + depthwise_conv2d_24, + parameter_252, + parameter_251, + parameter_250, + parameter_249, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_249, parameter_250, parameter_251, parameter_252 + + # pd_op.hardswish: (1x160x-1x-1xf32) <- (1x160x-1x-1xf32) + hardswish_52 = paddle._C_ops.hardswish(batch_norm__312) + + # pd_op.conv2d: (1x160x-1x-1xf32) <- (1x160x-1x-1xf32, 160x160x1x1xf32) + conv2d_32 = paddle._C_ops.conv2d( + hardswish_52, parameter_248, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_248 + + # pd_op.batch_norm_: (1x160x-1x-1xf32, 160xf32, 160xf32, 160xf32, 160xf32, -1xui8) <- (1x160x-1x-1xf32, 160xf32, 160xf32, 160xf32, 160xf32) + ( + batch_norm__318, + batch_norm__319, + batch_norm__320, + batch_norm__321, + batch_norm__322, + batch_norm__323, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_32, + parameter_247, + parameter_246, + parameter_245, + parameter_244, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_244, parameter_245, parameter_246, parameter_247 + + # pd_op.hardswish: (1x160x-1x-1xf32) <- (1x160x-1x-1xf32) + hardswish_53 = paddle._C_ops.hardswish(batch_norm__318) + + # pd_op.add: (1x160x-1x-1xf32) <- (1x160x-1x-1xf32, 1x160x-1x-1xf32) + add_4 = paddle._C_ops.add(hardswish_51, hardswish_53) + + # pd_op.shape64: (4xi64) <- (1x160x-1x-1xf32) + shape64_0 = paddle._C_ops.shape64(hardswish_37) + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_2 = [0] + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_3 = [1] + + # pd_op.slice: (xi64) <- (4xi64, 1xi64, 1xi64) + slice_0 = paddle._C_ops.slice( + shape64_0, [0], full_int_array_2, full_int_array_3, [1], [0] + ) + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_4 = [2] + + # pd_op.slice: (xi64) <- (4xi64, 1xi64, 1xi64) + slice_1 = paddle._C_ops.slice( + shape64_0, [0], full_int_array_3, full_int_array_4, [1], [0] + ) + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_5 = [3] + + # pd_op.slice: (xi64) <- (4xi64, 1xi64, 1xi64) + slice_2 = paddle._C_ops.slice( + shape64_0, [0], full_int_array_4, full_int_array_5, [1], [0] + ) + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_6 = [4] + + # pd_op.slice: (xi64) <- (4xi64, 1xi64, 1xi64) + slice_3 = paddle._C_ops.slice( + shape64_0, [0], full_int_array_5, full_int_array_6, [1], [0] + ) + del shape64_0 + + # pd_op.depthwise_conv2d: (1x160x-1x-1xf32) <- (1x160x-1x-1xf32, 160x1x5x5xf32) + depthwise_conv2d_25 = paddle._C_ops.depthwise_conv2d( + hardswish_37, parameter_243, [1, 1], [2, 2], "EXPLICIT", 160, [1, 1], "NCHW" + ) + del parameter_243 + + # pd_op.batch_norm_: (1x160x-1x-1xf32, 160xf32, 160xf32, 160xf32, 160xf32, -1xui8) <- (1x160x-1x-1xf32, 160xf32, 160xf32, 160xf32, 160xf32) + ( + batch_norm__324, + batch_norm__325, + batch_norm__326, + batch_norm__327, + batch_norm__328, + batch_norm__329, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + depthwise_conv2d_25, + parameter_242, + parameter_241, + parameter_240, + parameter_239, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_239, parameter_240, parameter_241, parameter_242 + + # pd_op.hardswish: (1x160x-1x-1xf32) <- (1x160x-1x-1xf32) + hardswish_54 = paddle._C_ops.hardswish(batch_norm__324) + + # pd_op.conv2d: (1x160x-1x-1xf32) <- (1x160x-1x-1xf32, 160x160x1x1xf32) + conv2d_33 = paddle._C_ops.conv2d( + hardswish_54, parameter_238, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_238 + + # pd_op.batch_norm_: (1x160x-1x-1xf32, 160xf32, 160xf32, 160xf32, 160xf32, -1xui8) <- (1x160x-1x-1xf32, 160xf32, 160xf32, 160xf32, 160xf32) + ( + batch_norm__330, + batch_norm__331, + batch_norm__332, + batch_norm__333, + batch_norm__334, + batch_norm__335, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_33, + parameter_237, + parameter_236, + parameter_235, + parameter_234, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_234, parameter_235, parameter_236, parameter_237 + + # pd_op.hardswish: (1x160x-1x-1xf32) <- (1x160x-1x-1xf32) + hardswish_55 = paddle._C_ops.hardswish(batch_norm__330) + + # pd_op.depthwise_conv2d: (1x160x-1x-1xf32) <- (1x160x-1x-1xf32, 160x1x5x5xf32) + depthwise_conv2d_26 = paddle._C_ops.depthwise_conv2d( + hardswish_55, parameter_233, [1, 1], [2, 2], "EXPLICIT", 160, [1, 1], "NCHW" + ) + del parameter_233 + + # pd_op.batch_norm_: (1x160x-1x-1xf32, 160xf32, 160xf32, 160xf32, 160xf32, -1xui8) <- (1x160x-1x-1xf32, 160xf32, 160xf32, 160xf32, 160xf32) + ( + batch_norm__336, + batch_norm__337, + batch_norm__338, + batch_norm__339, + batch_norm__340, + batch_norm__341, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + depthwise_conv2d_26, + parameter_232, + parameter_231, + parameter_230, + parameter_229, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_229, parameter_230, parameter_231, parameter_232 + + # pd_op.hardswish: (1x160x-1x-1xf32) <- (1x160x-1x-1xf32) + hardswish_56 = paddle._C_ops.hardswish(batch_norm__336) + + # pd_op.conv2d: (1x160x-1x-1xf32) <- (1x160x-1x-1xf32, 160x160x1x1xf32) + conv2d_34 = paddle._C_ops.conv2d( + hardswish_56, parameter_228, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_228 + + # pd_op.batch_norm_: (1x160x-1x-1xf32, 160xf32, 160xf32, 160xf32, 160xf32, -1xui8) <- (1x160x-1x-1xf32, 160xf32, 160xf32, 160xf32, 160xf32) + ( + batch_norm__342, + batch_norm__343, + batch_norm__344, + batch_norm__345, + batch_norm__346, + batch_norm__347, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_34, + parameter_227, + parameter_226, + parameter_225, + parameter_224, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_224, parameter_225, parameter_226, parameter_227 + + # pd_op.hardswish: (1x160x-1x-1xf32) <- (1x160x-1x-1xf32) + hardswish_57 = paddle._C_ops.hardswish(batch_norm__342) + + # pd_op.depthwise_conv2d: (1x160x-1x-1xf32) <- (1x160x-1x-1xf32, 160x1x5x5xf32) + depthwise_conv2d_27 = paddle._C_ops.depthwise_conv2d( + hardswish_57, parameter_223, [1, 1], [2, 2], "EXPLICIT", 160, [1, 1], "NCHW" + ) + del parameter_223 + + # pd_op.batch_norm_: (1x160x-1x-1xf32, 160xf32, 160xf32, 160xf32, 160xf32, -1xui8) <- (1x160x-1x-1xf32, 160xf32, 160xf32, 160xf32, 160xf32) + ( + batch_norm__348, + batch_norm__349, + batch_norm__350, + batch_norm__351, + batch_norm__352, + batch_norm__353, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + depthwise_conv2d_27, + parameter_222, + parameter_221, + parameter_220, + parameter_219, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_219, parameter_220, parameter_221, parameter_222 + + # pd_op.hardswish: (1x160x-1x-1xf32) <- (1x160x-1x-1xf32) + hardswish_58 = paddle._C_ops.hardswish(batch_norm__348) + + # pd_op.conv2d: (1x160x-1x-1xf32) <- (1x160x-1x-1xf32, 160x160x1x1xf32) + conv2d_35 = paddle._C_ops.conv2d( + hardswish_58, parameter_218, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_218 + + # pd_op.batch_norm_: (1x160x-1x-1xf32, 160xf32, 160xf32, 160xf32, 160xf32, -1xui8) <- (1x160x-1x-1xf32, 160xf32, 160xf32, 160xf32, 160xf32) + ( + batch_norm__354, + batch_norm__355, + batch_norm__356, + batch_norm__357, + batch_norm__358, + batch_norm__359, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_35, + parameter_217, + parameter_216, + parameter_215, + parameter_214, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_214, parameter_215, parameter_216, parameter_217 + + # pd_op.hardswish: (1x160x-1x-1xf32) <- (1x160x-1x-1xf32) + hardswish_59 = paddle._C_ops.hardswish(batch_norm__354) + + # pd_op.depthwise_conv2d: (1x160x-1x-1xf32) <- (1x160x-1x-1xf32, 160x1x5x5xf32) + depthwise_conv2d_28 = paddle._C_ops.depthwise_conv2d( + hardswish_59, parameter_213, [1, 1], [2, 2], "EXPLICIT", 160, [1, 1], "NCHW" + ) + del parameter_213 + + # pd_op.batch_norm_: (1x160x-1x-1xf32, 160xf32, 160xf32, 160xf32, 160xf32, -1xui8) <- (1x160x-1x-1xf32, 160xf32, 160xf32, 160xf32, 160xf32) + ( + batch_norm__360, + batch_norm__361, + batch_norm__362, + batch_norm__363, + batch_norm__364, + batch_norm__365, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + depthwise_conv2d_28, + parameter_212, + parameter_211, + parameter_210, + parameter_209, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_209, parameter_210, parameter_211, parameter_212 + + # pd_op.hardswish: (1x160x-1x-1xf32) <- (1x160x-1x-1xf32) + hardswish_60 = paddle._C_ops.hardswish(batch_norm__360) + + # pd_op.conv2d: (1x160x-1x-1xf32) <- (1x160x-1x-1xf32, 160x160x1x1xf32) + conv2d_36 = paddle._C_ops.conv2d( + hardswish_60, parameter_208, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_208 + + # pd_op.batch_norm_: (1x160x-1x-1xf32, 160xf32, 160xf32, 160xf32, 160xf32, -1xui8) <- (1x160x-1x-1xf32, 160xf32, 160xf32, 160xf32, 160xf32) + ( + batch_norm__366, + batch_norm__367, + batch_norm__368, + batch_norm__369, + batch_norm__370, + batch_norm__371, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_36, + parameter_207, + parameter_206, + parameter_205, + parameter_204, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_204, parameter_205, parameter_206, parameter_207 + + # pd_op.hardswish: (1x160x-1x-1xf32) <- (1x160x-1x-1xf32) + hardswish_61 = paddle._C_ops.hardswish(batch_norm__366) + + # pd_op.pool2d: (1x160x1x1xf32) <- (1x160x-1x-1xf32, 2xi64) + pool2d_2 = paddle._C_ops.pool2d( + hardswish_61, + full_int_array_0, + [1, 1], + [0, 0], + False, + True, + "NCHW", + "avg", + False, + True, + "EXPLICIT", + ) + + # pd_op.conv2d: (1x160x1x1xf32) <- (1x160x1x1xf32, 160x160x1x1xf32) + conv2d_37 = paddle._C_ops.conv2d( + pool2d_2, parameter_203, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_203 + + # pd_op.reshape: (1x160x1x1xf32) <- (160xf32, 4xi64) + reshape_4 = paddle._C_ops.reshape(parameter_202, full_int_array_1) + del parameter_202 + + # pd_op.add: (1x160x1x1xf32) <- (1x160x1x1xf32, 1x160x1x1xf32) + add_5 = paddle._C_ops.add(conv2d_37, reshape_4) + + # pd_op.sigmoid: (1x160x1x1xf32) <- (1x160x1x1xf32) + sigmoid_0 = paddle._C_ops.sigmoid(add_5) + del add_5 + + # pd_op.multiply: (1x160x-1x-1xf32) <- (1x160x-1x-1xf32, 1x160x1x1xf32) + multiply_2 = paddle._C_ops.multiply(hardswish_61, sigmoid_0) + + # pd_op.conv2d: (1x160x-1x-1xf32) <- (1x160x-1x-1xf32, 160x160x1x1xf32) + conv2d_38 = paddle._C_ops.conv2d( + multiply_2, parameter_201, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_201 + + # pd_op.batch_norm_: (1x160x-1x-1xf32, 160xf32, 160xf32, 160xf32, 160xf32, -1xui8) <- (1x160x-1x-1xf32, 160xf32, 160xf32, 160xf32, 160xf32) + ( + batch_norm__372, + batch_norm__373, + batch_norm__374, + batch_norm__375, + batch_norm__376, + batch_norm__377, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_38, + parameter_200, + parameter_199, + parameter_198, + parameter_197, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_197, parameter_198, parameter_199, parameter_200 + + # pd_op.hardswish: (1x160x-1x-1xf32) <- (1x160x-1x-1xf32) + hardswish_62 = paddle._C_ops.hardswish(batch_norm__372) + + # pd_op.conv2d: (1x11x-1x-1xf32) <- (1x160x-1x-1xf32, 11x160x1x1xf32) + conv2d_39 = paddle._C_ops.conv2d( + hardswish_62, parameter_196, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_196 + + # pd_op.reshape: (1x11x1x1xf32) <- (11xf32, 4xi64) + reshape_5 = paddle._C_ops.reshape(parameter_195, full_int_array_1) + del parameter_195 + + # pd_op.add: (1x11x-1x-1xf32) <- (1x11x-1x-1xf32, 1x11x1x1xf32) + add_6 = paddle._C_ops.add(conv2d_39, reshape_5) + + # pd_op.conv2d: (1x32x-1x-1xf32) <- (1x160x-1x-1xf32, 32x160x1x1xf32) + conv2d_40 = paddle._C_ops.conv2d( + hardswish_62, parameter_194, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_194 + + # pd_op.reshape: (1x32x1x1xf32) <- (32xf32, 4xi64) + reshape_6 = paddle._C_ops.reshape(parameter_193, full_int_array_1) + del parameter_193 + + # pd_op.add: (1x32x-1x-1xf32) <- (1x32x-1x-1xf32, 1x32x1x1xf32) + add_7 = paddle._C_ops.add(conv2d_40, reshape_6) + + # pd_op.conv2d: (1x1x-1x-1xf32) <- (1x160x-1x-1xf32, 1x160x5x5xf32) + conv2d_41 = paddle._C_ops.conv2d( + hardswish_61, parameter_192, [1, 1], [2, 2], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_192 + + # pd_op.batch_norm_: (1x1x-1x-1xf32, 1xf32, 1xf32, 1xf32, 1xf32, -1xui8) <- (1x1x-1x-1xf32, 1xf32, 1xf32, 1xf32, 1xf32) + ( + batch_norm__378, + batch_norm__379, + batch_norm__380, + batch_norm__381, + batch_norm__382, + batch_norm__383, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_41, + parameter_191, + parameter_190, + parameter_189, + parameter_188, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_188, parameter_189, parameter_190, parameter_191 + + # pd_op.hardswish: (1x1x-1x-1xf32) <- (1x1x-1x-1xf32) + hardswish_63 = paddle._C_ops.hardswish(batch_norm__378) + + # pd_op.conv2d: (1x1x-1x-1xf32) <- (1x1x-1x-1xf32, 1x1x1x1xf32) + conv2d_42 = paddle._C_ops.conv2d( + hardswish_63, parameter_187, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_187 + + # pd_op.batch_norm_: (1x1x-1x-1xf32, 1xf32, 1xf32, 1xf32, 1xf32, -1xui8) <- (1x1x-1x-1xf32, 1xf32, 1xf32, 1xf32, 1xf32) + ( + batch_norm__384, + batch_norm__385, + batch_norm__386, + batch_norm__387, + batch_norm__388, + batch_norm__389, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_42, + parameter_186, + parameter_185, + parameter_184, + parameter_183, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_183, parameter_184, parameter_185, parameter_186 + + # pd_op.sigmoid: (1x1x-1x-1xf32) <- (1x1x-1x-1xf32) + sigmoid_1 = paddle._C_ops.sigmoid(batch_norm__384) + del batch_norm__384 + + # pd_op.sigmoid: (1x11x-1x-1xf32) <- (1x11x-1x-1xf32) + sigmoid_2 = paddle._C_ops.sigmoid(add_6) + del add_6 + + # pd_op.multiply: (1x11x-1x-1xf32) <- (1x11x-1x-1xf32, 1x1x-1x-1xf32) + multiply_3 = paddle._C_ops.multiply(sigmoid_2, sigmoid_1) + + # pd_op.full: (1xf32) <- () + full_1 = paddle._C_ops.full( + [1], float("1"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.assign: (1xf32) <- (1xf32) + assign_11 = full_1 + + # pd_op.assign: (1xf32) <- (1xf32) + assign_12 = full_1 + + # pd_op.assign: (1xf32) <- (1xf32) + assign_13 = full_1 + + # pd_op.scale: (1x11x-1x-1xf32) <- (1x11x-1x-1xf32, 1xf32) + scale_0 = paddle._C_ops.scale(multiply_3, full_1, float("1e-09"), True) + del multiply_3 + + # pd_op.sqrt: (1x11x-1x-1xf32) <- (1x11x-1x-1xf32) + sqrt_0 = paddle._C_ops.sqrt(scale_0) + del scale_0 + + # pd_op.transpose: (1x-1x-1x11xf32) <- (1x11x-1x-1xf32) + transpose_0 = paddle._C_ops.transpose(sqrt_0, [0, 2, 3, 1]) + + # pd_op.transpose: (1x-1x-1x32xf32) <- (1x32x-1x-1xf32) + transpose_1 = paddle._C_ops.transpose(add_7, [0, 2, 3, 1]) + + # pd_op.shape64: (4xi64) <- (1x-1x-1x11xf32) + shape64_1 = paddle._C_ops.shape64(transpose_0) + + # pd_op.slice: (xi64) <- (4xi64, 1xi64, 1xi64) + slice_4 = paddle._C_ops.slice( + shape64_1, [0], full_int_array_3, full_int_array_4, [1], [0] + ) + del shape64_1 + + # pd_op.shape64: (4xi64) <- (1x-1x-1x11xf32) + shape64_2 = paddle._C_ops.shape64(transpose_0) + + # pd_op.slice: (xi64) <- (4xi64, 1xi64, 1xi64) + slice_5 = paddle._C_ops.slice( + shape64_2, [0], full_int_array_4, full_int_array_5, [1], [0] + ) + del shape64_2 + + # pd_op.full: (1xf32) <- () + full_2 = paddle._C_ops.full( + [1], float("0"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.cast: (xf32) <- (xi64) + cast_0 = paddle._C_ops.cast(slice_5, paddle.float32) + + # pd_op.arange: (-1xf32) <- (1xf32, xf32, 1xf32) + arange_0 = paddle.arange(full_2, cast_0, full_1, dtype="float32") + del cast_0 + + # pd_op.scale: (-1xf32) <- (-1xf32, 1xf32) + scale_1 = paddle._C_ops.scale(arange_0, full_1, float("0.5"), True) + del arange_0 + + # pd_op.full: (1xf32) <- () + full_3 = paddle._C_ops.full( + [1], float("8"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.assign: (1xf32) <- (1xf32) + assign_14 = full_3 + + # pd_op.scale: (-1xf32) <- (-1xf32, 1xf32) + scale_2 = paddle._C_ops.scale(scale_1, full_3, float("0"), True) + del scale_1 + + # pd_op.cast: (xf32) <- (xi64) + cast_1 = paddle._C_ops.cast(slice_4, paddle.float32) + + # pd_op.arange: (-1xf32) <- (1xf32, xf32, 1xf32) + arange_1 = paddle.arange(full_2, cast_1, full_1, dtype="float32") + del cast_1 + + # pd_op.scale: (-1xf32) <- (-1xf32, 1xf32) + scale_3 = paddle._C_ops.scale(arange_1, full_1, float("0.5"), True) + del arange_1 + + # pd_op.scale: (-1xf32) <- (-1xf32, 1xf32) + scale_4 = paddle._C_ops.scale(scale_3, full_3, float("0"), True) + del scale_3 + + # builtin.combine: ([-1xf32, -1xf32]) <- (-1xf32, -1xf32) + combine_4 = [scale_4, scale_2] + del scale_2, scale_4 + + # pd_op.meshgrid: ([-1x-1xf32, -1x-1xf32]) <- ([-1xf32, -1xf32]) + meshgrid_0 = paddle._C_ops.meshgrid(combine_4) + del combine_4 + + # builtin.split: (-1x-1xf32, -1x-1xf32) <- ([-1x-1xf32, -1x-1xf32]) + ( + split_0, + split_1, + ) = meshgrid_0 + del meshgrid_0 + + # pd_op.flatten: (-1xf32) <- (-1x-1xf32) + flatten_0 = paddle._C_ops.flatten(split_0, 0, 1) + del split_0 + + # pd_op.flatten: (-1xf32) <- (-1x-1xf32) + flatten_1 = paddle._C_ops.flatten(split_1, 0, 1) + del split_1 + + # builtin.combine: ([-1xf32, -1xf32]) <- (-1xf32, -1xf32) + combine_5 = [flatten_1, flatten_0] + del flatten_0, flatten_1 + + # pd_op.stack: (-1x2xf32) <- ([-1xf32, -1xf32]) + stack_0 = paddle._C_ops.stack(combine_5, -1) + del combine_5 + + # pd_op.full_int_array: (3xi64) <- () + full_int_array_7 = [1, -1, 11] + + # pd_op.reshape: (1x-1x11xf32) <- (1x-1x-1x11xf32, 3xi64) + reshape_7 = paddle._C_ops.reshape(transpose_0, full_int_array_7) + del transpose_0 + + # pd_op.full_int_array: (2xi64) <- () + full_int_array_8 = [-1, 8] + + # pd_op.reshape: (-1x8xf32) <- (1x-1x-1x32xf32, 2xi64) + reshape_8 = paddle._C_ops.reshape(transpose_1, full_int_array_8) + + # pd_op.softmax: (-1x8xf32) <- (-1x8xf32) + softmax_0 = paddle._C_ops.softmax(reshape_8, 1) + del reshape_8 + + # pd_op.matmul: (-1xf32) <- (-1x8xf32, 8xf32) + matmul_0 = paddle._C_ops.matmul(softmax_0, data_1, False, False) + + # pd_op.full_int_array: (2xi64) <- () + full_int_array_9 = [-1, 4] + + # pd_op.reshape: (-1x4xf32) <- (-1xf32, 2xi64) + reshape_9 = paddle._C_ops.reshape(matmul_0, full_int_array_9) + + # pd_op.scale: (-1x4xf32) <- (-1x4xf32, 1xf32) + scale_5 = paddle._C_ops.scale(reshape_9, full_3, float("0"), True) + del full_3, reshape_9 + + # pd_op.multiply: (xi64) <- (xi64, xi64) + multiply_4 = paddle._C_ops.multiply(slice_4, slice_5) + del slice_4, slice_5 + + # pd_op.full: (xi64) <- () + full_4 = paddle._C_ops.full( + [], float("1"), paddle.int64, paddle.core.CPUPlace() + ) + + # pd_op.full: (xi64) <- () + full_5 = paddle._C_ops.full( + [], float("4"), paddle.int64, paddle.core.CPUPlace() + ) + + # builtin.combine: ([xi64, xi64, xi64]) <- (xi64, xi64, xi64) + combine_6 = [full_4, multiply_4, full_5] + del multiply_4 + + # pd_op.stack: (3xi64) <- ([xi64, xi64, xi64]) + stack_1 = paddle._C_ops.stack(combine_6, 0) + del combine_6 + + # pd_op.reshape: (1x-1x4xf32) <- (-1x4xf32, 3xi64) + reshape_10 = paddle._C_ops.reshape(scale_5, stack_1) + del stack_1 + + # pd_op.full: (1xi32) <- () + full_6 = paddle._C_ops.full( + [1], float("2"), paddle.int32, paddle.core.CPUPlace() + ) + + # pd_op.assign: (1xi32) <- (1xi32) + assign_15 = full_6 + + # pd_op.assign: (1xi32) <- (1xi32) + assign_16 = full_6 + + # pd_op.assign: (1xi32) <- (1xi32) + assign_17 = full_6 + + # pd_op.split_with_num: ([1x-1x2xf32, 1x-1x2xf32]) <- (1x-1x4xf32, 1xi32) + split_with_num_0 = paddle._C_ops.split_with_num(reshape_10, 2, full_6) + del reshape_10 + + # builtin.split: (1x-1x2xf32, 1x-1x2xf32) <- ([1x-1x2xf32, 1x-1x2xf32]) + ( + split_2, + split_3, + ) = split_with_num_0 + del split_with_num_0 + + # pd_op.full: (1xf32) <- () + full_7 = paddle._C_ops.full( + [1], float("-1"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.assign: (1xf32) <- (1xf32) + assign_18 = full_7 + + # pd_op.assign: (1xf32) <- (1xf32) + assign_19 = full_7 + + # pd_op.assign: (1xf32) <- (1xf32) + assign_20 = full_7 + + # pd_op.scale: (1x-1x2xf32) <- (1x-1x2xf32, 1xf32) + scale_6 = paddle._C_ops.scale(split_2, full_7, float("0"), True) + del split_2 + + # pd_op.add: (1x-1x2xf32) <- (1x-1x2xf32, -1x2xf32) + add_8 = paddle._C_ops.add(scale_6, stack_0) + + # pd_op.add: (1x-1x2xf32) <- (1x-1x2xf32, -1x2xf32) + add_9 = paddle._C_ops.add(split_3, stack_0) + + # pd_op.full: (1xi32) <- () + full_8 = paddle._C_ops.full( + [1], float("-1"), paddle.int32, paddle.core.CPUPlace() + ) + + # pd_op.assign: (1xi32) <- (1xi32) + assign_21 = full_8 + + # pd_op.assign: (1xi32) <- (1xi32) + assign_22 = full_8 + + # pd_op.assign: (1xi32) <- (1xi32) + assign_23 = full_8 + + # builtin.combine: ([1x-1x2xf32, 1x-1x2xf32]) <- (1x-1x2xf32, 1x-1x2xf32) + combine_7 = [add_8, add_9] + + # pd_op.concat: (1x-1x4xf32) <- ([1x-1x2xf32, 1x-1x2xf32], 1xi32) + concat_7 = paddle._C_ops.concat(combine_7, full_8) + del combine_7 + + # pd_op.flatten: (1x11x-1xf32) <- (1x11x-1x-1xf32) + flatten_2 = paddle._C_ops.flatten(sqrt_0, 2, 3) + + # pd_op.transpose: (1x-1x11xf32) <- (1x11x-1xf32) + transpose_2 = paddle._C_ops.transpose(flatten_2, [0, 2, 1]) + del flatten_2 + + # pd_op.flatten: (1x32x-1xf32) <- (1x32x-1x-1xf32) + flatten_3 = paddle._C_ops.flatten(add_7, 2, 3) + + # pd_op.transpose: (1x-1x32xf32) <- (1x32x-1xf32) + transpose_3 = paddle._C_ops.transpose(flatten_3, [0, 2, 1]) + del flatten_3 + + # pd_op.full: (1xf32) <- () + full_9 = paddle._C_ops.full( + [1], float("0.125"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.scale: (1x-1x4xf32) <- (1x-1x4xf32, 1xf32) + scale_7 = paddle._C_ops.scale(concat_7, full_9, float("0"), True) + del concat_7 + + # pd_op.shape64: (4xi64) <- (1x160x-1x-1xf32) + shape64_3 = paddle._C_ops.shape64(hardswish_43) + + # pd_op.slice: (xi64) <- (4xi64, 1xi64, 1xi64) + slice_6 = paddle._C_ops.slice( + shape64_3, [0], full_int_array_2, full_int_array_3, [1], [0] + ) + + # pd_op.slice: (xi64) <- (4xi64, 1xi64, 1xi64) + slice_7 = paddle._C_ops.slice( + shape64_3, [0], full_int_array_3, full_int_array_4, [1], [0] + ) + + # pd_op.slice: (xi64) <- (4xi64, 1xi64, 1xi64) + slice_8 = paddle._C_ops.slice( + shape64_3, [0], full_int_array_4, full_int_array_5, [1], [0] + ) + + # pd_op.slice: (xi64) <- (4xi64, 1xi64, 1xi64) + slice_9 = paddle._C_ops.slice( + shape64_3, [0], full_int_array_5, full_int_array_6, [1], [0] + ) + del shape64_3 + + # pd_op.depthwise_conv2d: (1x160x-1x-1xf32) <- (1x160x-1x-1xf32, 160x1x5x5xf32) + depthwise_conv2d_29 = paddle._C_ops.depthwise_conv2d( + hardswish_43, parameter_182, [1, 1], [2, 2], "EXPLICIT", 160, [1, 1], "NCHW" + ) + del parameter_182 + + # pd_op.batch_norm_: (1x160x-1x-1xf32, 160xf32, 160xf32, 160xf32, 160xf32, -1xui8) <- (1x160x-1x-1xf32, 160xf32, 160xf32, 160xf32, 160xf32) + ( + batch_norm__390, + batch_norm__391, + batch_norm__392, + batch_norm__393, + batch_norm__394, + batch_norm__395, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + depthwise_conv2d_29, + parameter_181, + parameter_180, + parameter_179, + parameter_178, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_178, parameter_179, parameter_180, parameter_181 + + # pd_op.hardswish: (1x160x-1x-1xf32) <- (1x160x-1x-1xf32) + hardswish_64 = paddle._C_ops.hardswish(batch_norm__390) + + # pd_op.conv2d: (1x160x-1x-1xf32) <- (1x160x-1x-1xf32, 160x160x1x1xf32) + conv2d_43 = paddle._C_ops.conv2d( + hardswish_64, parameter_177, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_177 + + # pd_op.batch_norm_: (1x160x-1x-1xf32, 160xf32, 160xf32, 160xf32, 160xf32, -1xui8) <- (1x160x-1x-1xf32, 160xf32, 160xf32, 160xf32, 160xf32) + ( + batch_norm__396, + batch_norm__397, + batch_norm__398, + batch_norm__399, + batch_norm__400, + batch_norm__401, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_43, + parameter_176, + parameter_175, + parameter_174, + parameter_173, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_173, parameter_174, parameter_175, parameter_176 + + # pd_op.hardswish: (1x160x-1x-1xf32) <- (1x160x-1x-1xf32) + hardswish_65 = paddle._C_ops.hardswish(batch_norm__396) + + # pd_op.depthwise_conv2d: (1x160x-1x-1xf32) <- (1x160x-1x-1xf32, 160x1x5x5xf32) + depthwise_conv2d_30 = paddle._C_ops.depthwise_conv2d( + hardswish_65, parameter_172, [1, 1], [2, 2], "EXPLICIT", 160, [1, 1], "NCHW" + ) + del parameter_172 + + # pd_op.batch_norm_: (1x160x-1x-1xf32, 160xf32, 160xf32, 160xf32, 160xf32, -1xui8) <- (1x160x-1x-1xf32, 160xf32, 160xf32, 160xf32, 160xf32) + ( + batch_norm__402, + batch_norm__403, + batch_norm__404, + batch_norm__405, + batch_norm__406, + batch_norm__407, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + depthwise_conv2d_30, + parameter_171, + parameter_170, + parameter_169, + parameter_168, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_168, parameter_169, parameter_170, parameter_171 + + # pd_op.hardswish: (1x160x-1x-1xf32) <- (1x160x-1x-1xf32) + hardswish_66 = paddle._C_ops.hardswish(batch_norm__402) + + # pd_op.conv2d: (1x160x-1x-1xf32) <- (1x160x-1x-1xf32, 160x160x1x1xf32) + conv2d_44 = paddle._C_ops.conv2d( + hardswish_66, parameter_167, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_167 + + # pd_op.batch_norm_: (1x160x-1x-1xf32, 160xf32, 160xf32, 160xf32, 160xf32, -1xui8) <- (1x160x-1x-1xf32, 160xf32, 160xf32, 160xf32, 160xf32) + ( + batch_norm__408, + batch_norm__409, + batch_norm__410, + batch_norm__411, + batch_norm__412, + batch_norm__413, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_44, + parameter_166, + parameter_165, + parameter_164, + parameter_163, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_163, parameter_164, parameter_165, parameter_166 + + # pd_op.hardswish: (1x160x-1x-1xf32) <- (1x160x-1x-1xf32) + hardswish_67 = paddle._C_ops.hardswish(batch_norm__408) + + # pd_op.depthwise_conv2d: (1x160x-1x-1xf32) <- (1x160x-1x-1xf32, 160x1x5x5xf32) + depthwise_conv2d_31 = paddle._C_ops.depthwise_conv2d( + hardswish_67, parameter_162, [1, 1], [2, 2], "EXPLICIT", 160, [1, 1], "NCHW" + ) + del parameter_162 + + # pd_op.batch_norm_: (1x160x-1x-1xf32, 160xf32, 160xf32, 160xf32, 160xf32, -1xui8) <- (1x160x-1x-1xf32, 160xf32, 160xf32, 160xf32, 160xf32) + ( + batch_norm__414, + batch_norm__415, + batch_norm__416, + batch_norm__417, + batch_norm__418, + batch_norm__419, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + depthwise_conv2d_31, + parameter_161, + parameter_160, + parameter_159, + parameter_158, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_158, parameter_159, parameter_160, parameter_161 + + # pd_op.hardswish: (1x160x-1x-1xf32) <- (1x160x-1x-1xf32) + hardswish_68 = paddle._C_ops.hardswish(batch_norm__414) + + # pd_op.conv2d: (1x160x-1x-1xf32) <- (1x160x-1x-1xf32, 160x160x1x1xf32) + conv2d_45 = paddle._C_ops.conv2d( + hardswish_68, parameter_157, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_157 + + # pd_op.batch_norm_: (1x160x-1x-1xf32, 160xf32, 160xf32, 160xf32, 160xf32, -1xui8) <- (1x160x-1x-1xf32, 160xf32, 160xf32, 160xf32, 160xf32) + ( + batch_norm__420, + batch_norm__421, + batch_norm__422, + batch_norm__423, + batch_norm__424, + batch_norm__425, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_45, + parameter_156, + parameter_155, + parameter_154, + parameter_153, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_153, parameter_154, parameter_155, parameter_156 + + # pd_op.hardswish: (1x160x-1x-1xf32) <- (1x160x-1x-1xf32) + hardswish_69 = paddle._C_ops.hardswish(batch_norm__420) + + # pd_op.depthwise_conv2d: (1x160x-1x-1xf32) <- (1x160x-1x-1xf32, 160x1x5x5xf32) + depthwise_conv2d_32 = paddle._C_ops.depthwise_conv2d( + hardswish_69, parameter_152, [1, 1], [2, 2], "EXPLICIT", 160, [1, 1], "NCHW" + ) + del parameter_152 + + # pd_op.batch_norm_: (1x160x-1x-1xf32, 160xf32, 160xf32, 160xf32, 160xf32, -1xui8) <- (1x160x-1x-1xf32, 160xf32, 160xf32, 160xf32, 160xf32) + ( + batch_norm__426, + batch_norm__427, + batch_norm__428, + batch_norm__429, + batch_norm__430, + batch_norm__431, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + depthwise_conv2d_32, + parameter_151, + parameter_150, + parameter_149, + parameter_148, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_148, parameter_149, parameter_150, parameter_151 + + # pd_op.hardswish: (1x160x-1x-1xf32) <- (1x160x-1x-1xf32) + hardswish_70 = paddle._C_ops.hardswish(batch_norm__426) + + # pd_op.conv2d: (1x160x-1x-1xf32) <- (1x160x-1x-1xf32, 160x160x1x1xf32) + conv2d_46 = paddle._C_ops.conv2d( + hardswish_70, parameter_147, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_147 + + # pd_op.batch_norm_: (1x160x-1x-1xf32, 160xf32, 160xf32, 160xf32, 160xf32, -1xui8) <- (1x160x-1x-1xf32, 160xf32, 160xf32, 160xf32, 160xf32) + ( + batch_norm__432, + batch_norm__433, + batch_norm__434, + batch_norm__435, + batch_norm__436, + batch_norm__437, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_46, + parameter_146, + parameter_145, + parameter_144, + parameter_143, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_143, parameter_144, parameter_145, parameter_146 + + # pd_op.hardswish: (1x160x-1x-1xf32) <- (1x160x-1x-1xf32) + hardswish_71 = paddle._C_ops.hardswish(batch_norm__432) + + # pd_op.pool2d: (1x160x1x1xf32) <- (1x160x-1x-1xf32, 2xi64) + pool2d_3 = paddle._C_ops.pool2d( + hardswish_71, + full_int_array_0, + [1, 1], + [0, 0], + False, + True, + "NCHW", + "avg", + False, + True, + "EXPLICIT", + ) + + # pd_op.conv2d: (1x160x1x1xf32) <- (1x160x1x1xf32, 160x160x1x1xf32) + conv2d_47 = paddle._C_ops.conv2d( + pool2d_3, parameter_142, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_142 + + # pd_op.reshape: (1x160x1x1xf32) <- (160xf32, 4xi64) + reshape_11 = paddle._C_ops.reshape(parameter_141, full_int_array_1) + del parameter_141 + + # pd_op.add: (1x160x1x1xf32) <- (1x160x1x1xf32, 1x160x1x1xf32) + add_10 = paddle._C_ops.add(conv2d_47, reshape_11) + + # pd_op.sigmoid: (1x160x1x1xf32) <- (1x160x1x1xf32) + sigmoid_3 = paddle._C_ops.sigmoid(add_10) + del add_10 + + # pd_op.multiply: (1x160x-1x-1xf32) <- (1x160x-1x-1xf32, 1x160x1x1xf32) + multiply_5 = paddle._C_ops.multiply(hardswish_71, sigmoid_3) + + # pd_op.conv2d: (1x160x-1x-1xf32) <- (1x160x-1x-1xf32, 160x160x1x1xf32) + conv2d_48 = paddle._C_ops.conv2d( + multiply_5, parameter_140, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_140 + + # pd_op.batch_norm_: (1x160x-1x-1xf32, 160xf32, 160xf32, 160xf32, 160xf32, -1xui8) <- (1x160x-1x-1xf32, 160xf32, 160xf32, 160xf32, 160xf32) + ( + batch_norm__438, + batch_norm__439, + batch_norm__440, + batch_norm__441, + batch_norm__442, + batch_norm__443, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_48, + parameter_139, + parameter_138, + parameter_137, + parameter_136, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_136, parameter_137, parameter_138, parameter_139 + + # pd_op.hardswish: (1x160x-1x-1xf32) <- (1x160x-1x-1xf32) + hardswish_72 = paddle._C_ops.hardswish(batch_norm__438) + + # pd_op.conv2d: (1x11x-1x-1xf32) <- (1x160x-1x-1xf32, 11x160x1x1xf32) + conv2d_49 = paddle._C_ops.conv2d( + hardswish_72, parameter_135, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_135 + + # pd_op.reshape: (1x11x1x1xf32) <- (11xf32, 4xi64) + reshape_12 = paddle._C_ops.reshape(parameter_134, full_int_array_1) + del parameter_134 + + # pd_op.add: (1x11x-1x-1xf32) <- (1x11x-1x-1xf32, 1x11x1x1xf32) + add_11 = paddle._C_ops.add(conv2d_49, reshape_12) + + # pd_op.conv2d: (1x32x-1x-1xf32) <- (1x160x-1x-1xf32, 32x160x1x1xf32) + conv2d_50 = paddle._C_ops.conv2d( + hardswish_72, parameter_133, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_133 + + # pd_op.reshape: (1x32x1x1xf32) <- (32xf32, 4xi64) + reshape_13 = paddle._C_ops.reshape(parameter_132, full_int_array_1) + del parameter_132 + + # pd_op.add: (1x32x-1x-1xf32) <- (1x32x-1x-1xf32, 1x32x1x1xf32) + add_12 = paddle._C_ops.add(conv2d_50, reshape_13) + + # pd_op.conv2d: (1x1x-1x-1xf32) <- (1x160x-1x-1xf32, 1x160x5x5xf32) + conv2d_51 = paddle._C_ops.conv2d( + hardswish_71, parameter_131, [1, 1], [2, 2], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_131 + + # pd_op.batch_norm_: (1x1x-1x-1xf32, 1xf32, 1xf32, 1xf32, 1xf32, -1xui8) <- (1x1x-1x-1xf32, 1xf32, 1xf32, 1xf32, 1xf32) + ( + batch_norm__444, + batch_norm__445, + batch_norm__446, + batch_norm__447, + batch_norm__448, + batch_norm__449, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_51, + parameter_130, + parameter_129, + parameter_128, + parameter_127, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_127, parameter_128, parameter_129, parameter_130 + + # pd_op.hardswish: (1x1x-1x-1xf32) <- (1x1x-1x-1xf32) + hardswish_73 = paddle._C_ops.hardswish(batch_norm__444) + + # pd_op.conv2d: (1x1x-1x-1xf32) <- (1x1x-1x-1xf32, 1x1x1x1xf32) + conv2d_52 = paddle._C_ops.conv2d( + hardswish_73, parameter_126, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_126 + + # pd_op.batch_norm_: (1x1x-1x-1xf32, 1xf32, 1xf32, 1xf32, 1xf32, -1xui8) <- (1x1x-1x-1xf32, 1xf32, 1xf32, 1xf32, 1xf32) + ( + batch_norm__450, + batch_norm__451, + batch_norm__452, + batch_norm__453, + batch_norm__454, + batch_norm__455, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_52, + parameter_125, + parameter_124, + parameter_123, + parameter_122, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_122, parameter_123, parameter_124, parameter_125 + + # pd_op.sigmoid: (1x1x-1x-1xf32) <- (1x1x-1x-1xf32) + sigmoid_4 = paddle._C_ops.sigmoid(batch_norm__450) + del batch_norm__450 + + # pd_op.sigmoid: (1x11x-1x-1xf32) <- (1x11x-1x-1xf32) + sigmoid_5 = paddle._C_ops.sigmoid(add_11) + del add_11 + + # pd_op.multiply: (1x11x-1x-1xf32) <- (1x11x-1x-1xf32, 1x1x-1x-1xf32) + multiply_6 = paddle._C_ops.multiply(sigmoid_5, sigmoid_4) + + # pd_op.scale: (1x11x-1x-1xf32) <- (1x11x-1x-1xf32, 1xf32) + scale_8 = paddle._C_ops.scale(multiply_6, full_1, float("1e-09"), True) + del multiply_6 + + # pd_op.sqrt: (1x11x-1x-1xf32) <- (1x11x-1x-1xf32) + sqrt_1 = paddle._C_ops.sqrt(scale_8) + del scale_8 + + # pd_op.transpose: (1x-1x-1x11xf32) <- (1x11x-1x-1xf32) + transpose_4 = paddle._C_ops.transpose(sqrt_1, [0, 2, 3, 1]) + + # pd_op.transpose: (1x-1x-1x32xf32) <- (1x32x-1x-1xf32) + transpose_5 = paddle._C_ops.transpose(add_12, [0, 2, 3, 1]) + + # pd_op.shape64: (4xi64) <- (1x-1x-1x11xf32) + shape64_4 = paddle._C_ops.shape64(transpose_4) + + # pd_op.slice: (xi64) <- (4xi64, 1xi64, 1xi64) + slice_10 = paddle._C_ops.slice( + shape64_4, [0], full_int_array_3, full_int_array_4, [1], [0] + ) + del shape64_4 + + # pd_op.shape64: (4xi64) <- (1x-1x-1x11xf32) + shape64_5 = paddle._C_ops.shape64(transpose_4) + + # pd_op.slice: (xi64) <- (4xi64, 1xi64, 1xi64) + slice_11 = paddle._C_ops.slice( + shape64_5, [0], full_int_array_4, full_int_array_5, [1], [0] + ) + del shape64_5 + + # pd_op.cast: (xf32) <- (xi64) + cast_2 = paddle._C_ops.cast(slice_11, paddle.float32) + + # pd_op.arange: (-1xf32) <- (1xf32, xf32, 1xf32) + arange_2 = paddle.arange(full_2, cast_2, full_1, dtype="float32") + del cast_2 + + # pd_op.scale: (-1xf32) <- (-1xf32, 1xf32) + scale_9 = paddle._C_ops.scale(arange_2, full_1, float("0.5"), True) + del arange_2 + + # pd_op.full: (1xf32) <- () + full_10 = paddle._C_ops.full( + [1], float("16"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.assign: (1xf32) <- (1xf32) + assign_24 = full_10 + + # pd_op.scale: (-1xf32) <- (-1xf32, 1xf32) + scale_10 = paddle._C_ops.scale(scale_9, full_10, float("0"), True) + del scale_9 + + # pd_op.cast: (xf32) <- (xi64) + cast_3 = paddle._C_ops.cast(slice_10, paddle.float32) + + # pd_op.arange: (-1xf32) <- (1xf32, xf32, 1xf32) + arange_3 = paddle.arange(full_2, cast_3, full_1, dtype="float32") + del cast_3 + + # pd_op.scale: (-1xf32) <- (-1xf32, 1xf32) + scale_11 = paddle._C_ops.scale(arange_3, full_1, float("0.5"), True) + del arange_3 + + # pd_op.scale: (-1xf32) <- (-1xf32, 1xf32) + scale_12 = paddle._C_ops.scale(scale_11, full_10, float("0"), True) + del scale_11 + + # builtin.combine: ([-1xf32, -1xf32]) <- (-1xf32, -1xf32) + combine_8 = [scale_12, scale_10] + del scale_10, scale_12 + + # pd_op.meshgrid: ([-1x-1xf32, -1x-1xf32]) <- ([-1xf32, -1xf32]) + meshgrid_1 = paddle._C_ops.meshgrid(combine_8) + del combine_8 + + # builtin.split: (-1x-1xf32, -1x-1xf32) <- ([-1x-1xf32, -1x-1xf32]) + ( + split_4, + split_5, + ) = meshgrid_1 + del meshgrid_1 + + # pd_op.flatten: (-1xf32) <- (-1x-1xf32) + flatten_4 = paddle._C_ops.flatten(split_4, 0, 1) + del split_4 + + # pd_op.flatten: (-1xf32) <- (-1x-1xf32) + flatten_5 = paddle._C_ops.flatten(split_5, 0, 1) + del split_5 + + # builtin.combine: ([-1xf32, -1xf32]) <- (-1xf32, -1xf32) + combine_9 = [flatten_5, flatten_4] + del flatten_4, flatten_5 + + # pd_op.stack: (-1x2xf32) <- ([-1xf32, -1xf32]) + stack_2 = paddle._C_ops.stack(combine_9, -1) + del combine_9 + + # pd_op.reshape: (1x-1x11xf32) <- (1x-1x-1x11xf32, 3xi64) + reshape_14 = paddle._C_ops.reshape(transpose_4, full_int_array_7) + del transpose_4 + + # pd_op.reshape: (-1x8xf32) <- (1x-1x-1x32xf32, 2xi64) + reshape_15 = paddle._C_ops.reshape(transpose_5, full_int_array_8) + + # pd_op.softmax: (-1x8xf32) <- (-1x8xf32) + softmax_1 = paddle._C_ops.softmax(reshape_15, 1) + del reshape_15 + + # pd_op.matmul: (-1xf32) <- (-1x8xf32, 8xf32) + matmul_1 = paddle._C_ops.matmul(softmax_1, data_1, False, False) + + # pd_op.reshape: (-1x4xf32) <- (-1xf32, 2xi64) + reshape_16 = paddle._C_ops.reshape(matmul_1, full_int_array_9) + + # pd_op.scale: (-1x4xf32) <- (-1x4xf32, 1xf32) + scale_13 = paddle._C_ops.scale(reshape_16, full_10, float("0"), True) + del full_10, reshape_16 + + # pd_op.multiply: (xi64) <- (xi64, xi64) + multiply_7 = paddle._C_ops.multiply(slice_10, slice_11) + del slice_10, slice_11 + + # builtin.combine: ([xi64, xi64, xi64]) <- (xi64, xi64, xi64) + combine_10 = [full_4, multiply_7, full_5] + del multiply_7 + + # pd_op.stack: (3xi64) <- ([xi64, xi64, xi64]) + stack_3 = paddle._C_ops.stack(combine_10, 0) + del combine_10 + + # pd_op.reshape: (1x-1x4xf32) <- (-1x4xf32, 3xi64) + reshape_17 = paddle._C_ops.reshape(scale_13, stack_3) + del stack_3 + + # pd_op.split_with_num: ([1x-1x2xf32, 1x-1x2xf32]) <- (1x-1x4xf32, 1xi32) + split_with_num_1 = paddle._C_ops.split_with_num(reshape_17, 2, full_6) + del reshape_17 + + # builtin.split: (1x-1x2xf32, 1x-1x2xf32) <- ([1x-1x2xf32, 1x-1x2xf32]) + ( + split_6, + split_7, + ) = split_with_num_1 + del split_with_num_1 + + # pd_op.scale: (1x-1x2xf32) <- (1x-1x2xf32, 1xf32) + scale_14 = paddle._C_ops.scale(split_6, full_7, float("0"), True) + del split_6 + + # pd_op.add: (1x-1x2xf32) <- (1x-1x2xf32, -1x2xf32) + add_13 = paddle._C_ops.add(scale_14, stack_2) + + # pd_op.add: (1x-1x2xf32) <- (1x-1x2xf32, -1x2xf32) + add_14 = paddle._C_ops.add(split_7, stack_2) + + # builtin.combine: ([1x-1x2xf32, 1x-1x2xf32]) <- (1x-1x2xf32, 1x-1x2xf32) + combine_11 = [add_13, add_14] + + # pd_op.concat: (1x-1x4xf32) <- ([1x-1x2xf32, 1x-1x2xf32], 1xi32) + concat_8 = paddle._C_ops.concat(combine_11, full_8) + del combine_11 + + # pd_op.flatten: (1x11x-1xf32) <- (1x11x-1x-1xf32) + flatten_6 = paddle._C_ops.flatten(sqrt_1, 2, 3) + + # pd_op.transpose: (1x-1x11xf32) <- (1x11x-1xf32) + transpose_6 = paddle._C_ops.transpose(flatten_6, [0, 2, 1]) + del flatten_6 + + # pd_op.flatten: (1x32x-1xf32) <- (1x32x-1x-1xf32) + flatten_7 = paddle._C_ops.flatten(add_12, 2, 3) + + # pd_op.transpose: (1x-1x32xf32) <- (1x32x-1xf32) + transpose_7 = paddle._C_ops.transpose(flatten_7, [0, 2, 1]) + del flatten_7 + + # pd_op.full: (1xf32) <- () + full_11 = paddle._C_ops.full( + [1], float("0.0625"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.scale: (1x-1x4xf32) <- (1x-1x4xf32, 1xf32) + scale_15 = paddle._C_ops.scale(concat_8, full_11, float("0"), True) + del concat_8 + + # pd_op.shape64: (4xi64) <- (1x160x-1x-1xf32) + shape64_6 = paddle._C_ops.shape64(hardswish_49) + + # pd_op.slice: (xi64) <- (4xi64, 1xi64, 1xi64) + slice_12 = paddle._C_ops.slice( + shape64_6, [0], full_int_array_2, full_int_array_3, [1], [0] + ) + + # pd_op.slice: (xi64) <- (4xi64, 1xi64, 1xi64) + slice_13 = paddle._C_ops.slice( + shape64_6, [0], full_int_array_3, full_int_array_4, [1], [0] + ) + + # pd_op.slice: (xi64) <- (4xi64, 1xi64, 1xi64) + slice_14 = paddle._C_ops.slice( + shape64_6, [0], full_int_array_4, full_int_array_5, [1], [0] + ) + + # pd_op.slice: (xi64) <- (4xi64, 1xi64, 1xi64) + slice_15 = paddle._C_ops.slice( + shape64_6, [0], full_int_array_5, full_int_array_6, [1], [0] + ) + del shape64_6 + + # pd_op.depthwise_conv2d: (1x160x-1x-1xf32) <- (1x160x-1x-1xf32, 160x1x5x5xf32) + depthwise_conv2d_33 = paddle._C_ops.depthwise_conv2d( + hardswish_49, parameter_121, [1, 1], [2, 2], "EXPLICIT", 160, [1, 1], "NCHW" + ) + del parameter_121 + + # pd_op.batch_norm_: (1x160x-1x-1xf32, 160xf32, 160xf32, 160xf32, 160xf32, -1xui8) <- (1x160x-1x-1xf32, 160xf32, 160xf32, 160xf32, 160xf32) + ( + batch_norm__456, + batch_norm__457, + batch_norm__458, + batch_norm__459, + batch_norm__460, + batch_norm__461, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + depthwise_conv2d_33, + parameter_120, + parameter_119, + parameter_118, + parameter_117, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_117, parameter_118, parameter_119, parameter_120 + + # pd_op.hardswish: (1x160x-1x-1xf32) <- (1x160x-1x-1xf32) + hardswish_74 = paddle._C_ops.hardswish(batch_norm__456) + + # pd_op.conv2d: (1x160x-1x-1xf32) <- (1x160x-1x-1xf32, 160x160x1x1xf32) + conv2d_53 = paddle._C_ops.conv2d( + hardswish_74, parameter_116, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_116 + + # pd_op.batch_norm_: (1x160x-1x-1xf32, 160xf32, 160xf32, 160xf32, 160xf32, -1xui8) <- (1x160x-1x-1xf32, 160xf32, 160xf32, 160xf32, 160xf32) + ( + batch_norm__462, + batch_norm__463, + batch_norm__464, + batch_norm__465, + batch_norm__466, + batch_norm__467, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_53, + parameter_115, + parameter_114, + parameter_113, + parameter_112, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_112, parameter_113, parameter_114, parameter_115 + + # pd_op.hardswish: (1x160x-1x-1xf32) <- (1x160x-1x-1xf32) + hardswish_75 = paddle._C_ops.hardswish(batch_norm__462) + + # pd_op.depthwise_conv2d: (1x160x-1x-1xf32) <- (1x160x-1x-1xf32, 160x1x5x5xf32) + depthwise_conv2d_34 = paddle._C_ops.depthwise_conv2d( + hardswish_75, parameter_111, [1, 1], [2, 2], "EXPLICIT", 160, [1, 1], "NCHW" + ) + del parameter_111 + + # pd_op.batch_norm_: (1x160x-1x-1xf32, 160xf32, 160xf32, 160xf32, 160xf32, -1xui8) <- (1x160x-1x-1xf32, 160xf32, 160xf32, 160xf32, 160xf32) + ( + batch_norm__468, + batch_norm__469, + batch_norm__470, + batch_norm__471, + batch_norm__472, + batch_norm__473, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + depthwise_conv2d_34, + parameter_110, + parameter_109, + parameter_108, + parameter_107, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_107, parameter_108, parameter_109, parameter_110 + + # pd_op.hardswish: (1x160x-1x-1xf32) <- (1x160x-1x-1xf32) + hardswish_76 = paddle._C_ops.hardswish(batch_norm__468) + + # pd_op.conv2d: (1x160x-1x-1xf32) <- (1x160x-1x-1xf32, 160x160x1x1xf32) + conv2d_54 = paddle._C_ops.conv2d( + hardswish_76, parameter_106, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_106 + + # pd_op.batch_norm_: (1x160x-1x-1xf32, 160xf32, 160xf32, 160xf32, 160xf32, -1xui8) <- (1x160x-1x-1xf32, 160xf32, 160xf32, 160xf32, 160xf32) + ( + batch_norm__474, + batch_norm__475, + batch_norm__476, + batch_norm__477, + batch_norm__478, + batch_norm__479, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_54, + parameter_105, + parameter_104, + parameter_103, + parameter_102, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_102, parameter_103, parameter_104, parameter_105 + + # pd_op.hardswish: (1x160x-1x-1xf32) <- (1x160x-1x-1xf32) + hardswish_77 = paddle._C_ops.hardswish(batch_norm__474) + + # pd_op.depthwise_conv2d: (1x160x-1x-1xf32) <- (1x160x-1x-1xf32, 160x1x5x5xf32) + depthwise_conv2d_35 = paddle._C_ops.depthwise_conv2d( + hardswish_77, parameter_101, [1, 1], [2, 2], "EXPLICIT", 160, [1, 1], "NCHW" + ) + del parameter_101 + + # pd_op.batch_norm_: (1x160x-1x-1xf32, 160xf32, 160xf32, 160xf32, 160xf32, -1xui8) <- (1x160x-1x-1xf32, 160xf32, 160xf32, 160xf32, 160xf32) + ( + batch_norm__480, + batch_norm__481, + batch_norm__482, + batch_norm__483, + batch_norm__484, + batch_norm__485, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + depthwise_conv2d_35, + parameter_100, + parameter_99, + parameter_98, + parameter_97, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_100, parameter_97, parameter_98, parameter_99 + + # pd_op.hardswish: (1x160x-1x-1xf32) <- (1x160x-1x-1xf32) + hardswish_78 = paddle._C_ops.hardswish(batch_norm__480) + + # pd_op.conv2d: (1x160x-1x-1xf32) <- (1x160x-1x-1xf32, 160x160x1x1xf32) + conv2d_55 = paddle._C_ops.conv2d( + hardswish_78, parameter_96, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_96 + + # pd_op.batch_norm_: (1x160x-1x-1xf32, 160xf32, 160xf32, 160xf32, 160xf32, -1xui8) <- (1x160x-1x-1xf32, 160xf32, 160xf32, 160xf32, 160xf32) + ( + batch_norm__486, + batch_norm__487, + batch_norm__488, + batch_norm__489, + batch_norm__490, + batch_norm__491, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_55, + parameter_95, + parameter_94, + parameter_93, + parameter_92, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_92, parameter_93, parameter_94, parameter_95 + + # pd_op.hardswish: (1x160x-1x-1xf32) <- (1x160x-1x-1xf32) + hardswish_79 = paddle._C_ops.hardswish(batch_norm__486) + + # pd_op.depthwise_conv2d: (1x160x-1x-1xf32) <- (1x160x-1x-1xf32, 160x1x5x5xf32) + depthwise_conv2d_36 = paddle._C_ops.depthwise_conv2d( + hardswish_79, parameter_91, [1, 1], [2, 2], "EXPLICIT", 160, [1, 1], "NCHW" + ) + del parameter_91 + + # pd_op.batch_norm_: (1x160x-1x-1xf32, 160xf32, 160xf32, 160xf32, 160xf32, -1xui8) <- (1x160x-1x-1xf32, 160xf32, 160xf32, 160xf32, 160xf32) + ( + batch_norm__492, + batch_norm__493, + batch_norm__494, + batch_norm__495, + batch_norm__496, + batch_norm__497, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + depthwise_conv2d_36, + parameter_90, + parameter_89, + parameter_88, + parameter_87, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_87, parameter_88, parameter_89, parameter_90 + + # pd_op.hardswish: (1x160x-1x-1xf32) <- (1x160x-1x-1xf32) + hardswish_80 = paddle._C_ops.hardswish(batch_norm__492) + + # pd_op.conv2d: (1x160x-1x-1xf32) <- (1x160x-1x-1xf32, 160x160x1x1xf32) + conv2d_56 = paddle._C_ops.conv2d( + hardswish_80, parameter_86, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_86 + + # pd_op.batch_norm_: (1x160x-1x-1xf32, 160xf32, 160xf32, 160xf32, 160xf32, -1xui8) <- (1x160x-1x-1xf32, 160xf32, 160xf32, 160xf32, 160xf32) + ( + batch_norm__498, + batch_norm__499, + batch_norm__500, + batch_norm__501, + batch_norm__502, + batch_norm__503, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_56, + parameter_85, + parameter_84, + parameter_83, + parameter_82, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_82, parameter_83, parameter_84, parameter_85 + + # pd_op.hardswish: (1x160x-1x-1xf32) <- (1x160x-1x-1xf32) + hardswish_81 = paddle._C_ops.hardswish(batch_norm__498) + + # pd_op.pool2d: (1x160x1x1xf32) <- (1x160x-1x-1xf32, 2xi64) + pool2d_4 = paddle._C_ops.pool2d( + hardswish_81, + full_int_array_0, + [1, 1], + [0, 0], + False, + True, + "NCHW", + "avg", + False, + True, + "EXPLICIT", + ) + + # pd_op.conv2d: (1x160x1x1xf32) <- (1x160x1x1xf32, 160x160x1x1xf32) + conv2d_57 = paddle._C_ops.conv2d( + pool2d_4, parameter_81, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_81 + + # pd_op.reshape: (1x160x1x1xf32) <- (160xf32, 4xi64) + reshape_18 = paddle._C_ops.reshape(parameter_80, full_int_array_1) + del parameter_80 + + # pd_op.add: (1x160x1x1xf32) <- (1x160x1x1xf32, 1x160x1x1xf32) + add_15 = paddle._C_ops.add(conv2d_57, reshape_18) + + # pd_op.sigmoid: (1x160x1x1xf32) <- (1x160x1x1xf32) + sigmoid_6 = paddle._C_ops.sigmoid(add_15) + del add_15 + + # pd_op.multiply: (1x160x-1x-1xf32) <- (1x160x-1x-1xf32, 1x160x1x1xf32) + multiply_8 = paddle._C_ops.multiply(hardswish_81, sigmoid_6) + + # pd_op.conv2d: (1x160x-1x-1xf32) <- (1x160x-1x-1xf32, 160x160x1x1xf32) + conv2d_58 = paddle._C_ops.conv2d( + multiply_8, parameter_79, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_79 + + # pd_op.batch_norm_: (1x160x-1x-1xf32, 160xf32, 160xf32, 160xf32, 160xf32, -1xui8) <- (1x160x-1x-1xf32, 160xf32, 160xf32, 160xf32, 160xf32) + ( + batch_norm__504, + batch_norm__505, + batch_norm__506, + batch_norm__507, + batch_norm__508, + batch_norm__509, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_58, + parameter_78, + parameter_77, + parameter_76, + parameter_75, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_75, parameter_76, parameter_77, parameter_78 + + # pd_op.hardswish: (1x160x-1x-1xf32) <- (1x160x-1x-1xf32) + hardswish_82 = paddle._C_ops.hardswish(batch_norm__504) + + # pd_op.conv2d: (1x11x-1x-1xf32) <- (1x160x-1x-1xf32, 11x160x1x1xf32) + conv2d_59 = paddle._C_ops.conv2d( + hardswish_82, parameter_74, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_74 + + # pd_op.reshape: (1x11x1x1xf32) <- (11xf32, 4xi64) + reshape_19 = paddle._C_ops.reshape(parameter_73, full_int_array_1) + del parameter_73 + + # pd_op.add: (1x11x-1x-1xf32) <- (1x11x-1x-1xf32, 1x11x1x1xf32) + add_16 = paddle._C_ops.add(conv2d_59, reshape_19) + + # pd_op.conv2d: (1x32x-1x-1xf32) <- (1x160x-1x-1xf32, 32x160x1x1xf32) + conv2d_60 = paddle._C_ops.conv2d( + hardswish_82, parameter_72, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_72 + + # pd_op.reshape: (1x32x1x1xf32) <- (32xf32, 4xi64) + reshape_20 = paddle._C_ops.reshape(parameter_71, full_int_array_1) + del parameter_71 + + # pd_op.add: (1x32x-1x-1xf32) <- (1x32x-1x-1xf32, 1x32x1x1xf32) + add_17 = paddle._C_ops.add(conv2d_60, reshape_20) + + # pd_op.conv2d: (1x1x-1x-1xf32) <- (1x160x-1x-1xf32, 1x160x5x5xf32) + conv2d_61 = paddle._C_ops.conv2d( + hardswish_81, parameter_70, [1, 1], [2, 2], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_70 + + # pd_op.batch_norm_: (1x1x-1x-1xf32, 1xf32, 1xf32, 1xf32, 1xf32, -1xui8) <- (1x1x-1x-1xf32, 1xf32, 1xf32, 1xf32, 1xf32) + ( + batch_norm__510, + batch_norm__511, + batch_norm__512, + batch_norm__513, + batch_norm__514, + batch_norm__515, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_61, + parameter_69, + parameter_68, + parameter_67, + parameter_66, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_66, parameter_67, parameter_68, parameter_69 + + # pd_op.hardswish: (1x1x-1x-1xf32) <- (1x1x-1x-1xf32) + hardswish_83 = paddle._C_ops.hardswish(batch_norm__510) + + # pd_op.conv2d: (1x1x-1x-1xf32) <- (1x1x-1x-1xf32, 1x1x1x1xf32) + conv2d_62 = paddle._C_ops.conv2d( + hardswish_83, parameter_65, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_65 + + # pd_op.batch_norm_: (1x1x-1x-1xf32, 1xf32, 1xf32, 1xf32, 1xf32, -1xui8) <- (1x1x-1x-1xf32, 1xf32, 1xf32, 1xf32, 1xf32) + ( + batch_norm__516, + batch_norm__517, + batch_norm__518, + batch_norm__519, + batch_norm__520, + batch_norm__521, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_62, + parameter_64, + parameter_63, + parameter_62, + parameter_61, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_61, parameter_62, parameter_63, parameter_64 + + # pd_op.sigmoid: (1x1x-1x-1xf32) <- (1x1x-1x-1xf32) + sigmoid_7 = paddle._C_ops.sigmoid(batch_norm__516) + del batch_norm__516 + + # pd_op.sigmoid: (1x11x-1x-1xf32) <- (1x11x-1x-1xf32) + sigmoid_8 = paddle._C_ops.sigmoid(add_16) + del add_16 + + # pd_op.multiply: (1x11x-1x-1xf32) <- (1x11x-1x-1xf32, 1x1x-1x-1xf32) + multiply_9 = paddle._C_ops.multiply(sigmoid_8, sigmoid_7) + + # pd_op.scale: (1x11x-1x-1xf32) <- (1x11x-1x-1xf32, 1xf32) + scale_16 = paddle._C_ops.scale(multiply_9, full_1, float("1e-09"), True) + del multiply_9 + + # pd_op.sqrt: (1x11x-1x-1xf32) <- (1x11x-1x-1xf32) + sqrt_2 = paddle._C_ops.sqrt(scale_16) + del scale_16 + + # pd_op.transpose: (1x-1x-1x11xf32) <- (1x11x-1x-1xf32) + transpose_8 = paddle._C_ops.transpose(sqrt_2, [0, 2, 3, 1]) + + # pd_op.transpose: (1x-1x-1x32xf32) <- (1x32x-1x-1xf32) + transpose_9 = paddle._C_ops.transpose(add_17, [0, 2, 3, 1]) + + # pd_op.shape64: (4xi64) <- (1x-1x-1x11xf32) + shape64_7 = paddle._C_ops.shape64(transpose_8) + + # pd_op.slice: (xi64) <- (4xi64, 1xi64, 1xi64) + slice_16 = paddle._C_ops.slice( + shape64_7, [0], full_int_array_3, full_int_array_4, [1], [0] + ) + del shape64_7 + + # pd_op.shape64: (4xi64) <- (1x-1x-1x11xf32) + shape64_8 = paddle._C_ops.shape64(transpose_8) + + # pd_op.slice: (xi64) <- (4xi64, 1xi64, 1xi64) + slice_17 = paddle._C_ops.slice( + shape64_8, [0], full_int_array_4, full_int_array_5, [1], [0] + ) + del shape64_8 + + # pd_op.cast: (xf32) <- (xi64) + cast_4 = paddle._C_ops.cast(slice_17, paddle.float32) + + # pd_op.arange: (-1xf32) <- (1xf32, xf32, 1xf32) + arange_4 = paddle.arange(full_2, cast_4, full_1, dtype="float32") + del cast_4 + + # pd_op.scale: (-1xf32) <- (-1xf32, 1xf32) + scale_17 = paddle._C_ops.scale(arange_4, full_1, float("0.5"), True) + del arange_4 + + # pd_op.full: (1xf32) <- () + full_12 = paddle._C_ops.full( + [1], float("32"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.assign: (1xf32) <- (1xf32) + assign_25 = full_12 + + # pd_op.scale: (-1xf32) <- (-1xf32, 1xf32) + scale_18 = paddle._C_ops.scale(scale_17, full_12, float("0"), True) + del scale_17 + + # pd_op.cast: (xf32) <- (xi64) + cast_5 = paddle._C_ops.cast(slice_16, paddle.float32) + + # pd_op.arange: (-1xf32) <- (1xf32, xf32, 1xf32) + arange_5 = paddle.arange(full_2, cast_5, full_1, dtype="float32") + del cast_5 + + # pd_op.scale: (-1xf32) <- (-1xf32, 1xf32) + scale_19 = paddle._C_ops.scale(arange_5, full_1, float("0.5"), True) + del arange_5 + + # pd_op.scale: (-1xf32) <- (-1xf32, 1xf32) + scale_20 = paddle._C_ops.scale(scale_19, full_12, float("0"), True) + del scale_19 + + # builtin.combine: ([-1xf32, -1xf32]) <- (-1xf32, -1xf32) + combine_12 = [scale_20, scale_18] + del scale_18, scale_20 + + # pd_op.meshgrid: ([-1x-1xf32, -1x-1xf32]) <- ([-1xf32, -1xf32]) + meshgrid_2 = paddle._C_ops.meshgrid(combine_12) + del combine_12 + + # builtin.split: (-1x-1xf32, -1x-1xf32) <- ([-1x-1xf32, -1x-1xf32]) + ( + split_8, + split_9, + ) = meshgrid_2 + del meshgrid_2 + + # pd_op.flatten: (-1xf32) <- (-1x-1xf32) + flatten_8 = paddle._C_ops.flatten(split_8, 0, 1) + del split_8 + + # pd_op.flatten: (-1xf32) <- (-1x-1xf32) + flatten_9 = paddle._C_ops.flatten(split_9, 0, 1) + del split_9 + + # builtin.combine: ([-1xf32, -1xf32]) <- (-1xf32, -1xf32) + combine_13 = [flatten_9, flatten_8] + del flatten_8, flatten_9 + + # pd_op.stack: (-1x2xf32) <- ([-1xf32, -1xf32]) + stack_4 = paddle._C_ops.stack(combine_13, -1) + del combine_13 + + # pd_op.reshape: (1x-1x11xf32) <- (1x-1x-1x11xf32, 3xi64) + reshape_21 = paddle._C_ops.reshape(transpose_8, full_int_array_7) + del transpose_8 + + # pd_op.reshape: (-1x8xf32) <- (1x-1x-1x32xf32, 2xi64) + reshape_22 = paddle._C_ops.reshape(transpose_9, full_int_array_8) + + # pd_op.softmax: (-1x8xf32) <- (-1x8xf32) + softmax_2 = paddle._C_ops.softmax(reshape_22, 1) + del reshape_22 + + # pd_op.matmul: (-1xf32) <- (-1x8xf32, 8xf32) + matmul_2 = paddle._C_ops.matmul(softmax_2, data_1, False, False) + + # pd_op.reshape: (-1x4xf32) <- (-1xf32, 2xi64) + reshape_23 = paddle._C_ops.reshape(matmul_2, full_int_array_9) + + # pd_op.scale: (-1x4xf32) <- (-1x4xf32, 1xf32) + scale_21 = paddle._C_ops.scale(reshape_23, full_12, float("0"), True) + del full_12, reshape_23 + + # pd_op.multiply: (xi64) <- (xi64, xi64) + multiply_10 = paddle._C_ops.multiply(slice_16, slice_17) + del slice_16, slice_17 + + # builtin.combine: ([xi64, xi64, xi64]) <- (xi64, xi64, xi64) + combine_14 = [full_4, multiply_10, full_5] + del multiply_10 + + # pd_op.stack: (3xi64) <- ([xi64, xi64, xi64]) + stack_5 = paddle._C_ops.stack(combine_14, 0) + del combine_14 + + # pd_op.reshape: (1x-1x4xf32) <- (-1x4xf32, 3xi64) + reshape_24 = paddle._C_ops.reshape(scale_21, stack_5) + del stack_5 + + # pd_op.split_with_num: ([1x-1x2xf32, 1x-1x2xf32]) <- (1x-1x4xf32, 1xi32) + split_with_num_2 = paddle._C_ops.split_with_num(reshape_24, 2, full_6) + del reshape_24 + + # builtin.split: (1x-1x2xf32, 1x-1x2xf32) <- ([1x-1x2xf32, 1x-1x2xf32]) + ( + split_10, + split_11, + ) = split_with_num_2 + del split_with_num_2 + + # pd_op.scale: (1x-1x2xf32) <- (1x-1x2xf32, 1xf32) + scale_22 = paddle._C_ops.scale(split_10, full_7, float("0"), True) + del split_10 + + # pd_op.add: (1x-1x2xf32) <- (1x-1x2xf32, -1x2xf32) + add_18 = paddle._C_ops.add(scale_22, stack_4) + + # pd_op.add: (1x-1x2xf32) <- (1x-1x2xf32, -1x2xf32) + add_19 = paddle._C_ops.add(split_11, stack_4) + + # builtin.combine: ([1x-1x2xf32, 1x-1x2xf32]) <- (1x-1x2xf32, 1x-1x2xf32) + combine_15 = [add_18, add_19] + + # pd_op.concat: (1x-1x4xf32) <- ([1x-1x2xf32, 1x-1x2xf32], 1xi32) + concat_9 = paddle._C_ops.concat(combine_15, full_8) + del combine_15 + + # pd_op.flatten: (1x11x-1xf32) <- (1x11x-1x-1xf32) + flatten_10 = paddle._C_ops.flatten(sqrt_2, 2, 3) + + # pd_op.transpose: (1x-1x11xf32) <- (1x11x-1xf32) + transpose_10 = paddle._C_ops.transpose(flatten_10, [0, 2, 1]) + del flatten_10 + + # pd_op.flatten: (1x32x-1xf32) <- (1x32x-1x-1xf32) + flatten_11 = paddle._C_ops.flatten(add_17, 2, 3) + + # pd_op.transpose: (1x-1x32xf32) <- (1x32x-1xf32) + transpose_11 = paddle._C_ops.transpose(flatten_11, [0, 2, 1]) + del flatten_11 + + # pd_op.full: (1xf32) <- () + full_13 = paddle._C_ops.full( + [1], float("0.03125"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.scale: (1x-1x4xf32) <- (1x-1x4xf32, 1xf32) + scale_23 = paddle._C_ops.scale(concat_9, full_13, float("0"), True) + del concat_9 + + # pd_op.shape64: (4xi64) <- (1x160x-1x-1xf32) + shape64_9 = paddle._C_ops.shape64(add_4) + + # pd_op.slice: (xi64) <- (4xi64, 1xi64, 1xi64) + slice_18 = paddle._C_ops.slice( + shape64_9, [0], full_int_array_2, full_int_array_3, [1], [0] + ) + del full_int_array_2 + + # pd_op.slice: (xi64) <- (4xi64, 1xi64, 1xi64) + slice_19 = paddle._C_ops.slice( + shape64_9, [0], full_int_array_3, full_int_array_4, [1], [0] + ) + + # pd_op.slice: (xi64) <- (4xi64, 1xi64, 1xi64) + slice_20 = paddle._C_ops.slice( + shape64_9, [0], full_int_array_4, full_int_array_5, [1], [0] + ) + + # pd_op.slice: (xi64) <- (4xi64, 1xi64, 1xi64) + slice_21 = paddle._C_ops.slice( + shape64_9, [0], full_int_array_5, full_int_array_6, [1], [0] + ) + del full_int_array_6, shape64_9 + + # pd_op.depthwise_conv2d: (1x160x-1x-1xf32) <- (1x160x-1x-1xf32, 160x1x5x5xf32) + depthwise_conv2d_37 = paddle._C_ops.depthwise_conv2d( + add_4, parameter_60, [1, 1], [2, 2], "EXPLICIT", 160, [1, 1], "NCHW" + ) + del parameter_60 + + # pd_op.batch_norm_: (1x160x-1x-1xf32, 160xf32, 160xf32, 160xf32, 160xf32, -1xui8) <- (1x160x-1x-1xf32, 160xf32, 160xf32, 160xf32, 160xf32) + ( + batch_norm__522, + batch_norm__523, + batch_norm__524, + batch_norm__525, + batch_norm__526, + batch_norm__527, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + depthwise_conv2d_37, + parameter_59, + parameter_58, + parameter_57, + parameter_56, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_56, parameter_57, parameter_58, parameter_59 + + # pd_op.hardswish: (1x160x-1x-1xf32) <- (1x160x-1x-1xf32) + hardswish_84 = paddle._C_ops.hardswish(batch_norm__522) + + # pd_op.conv2d: (1x160x-1x-1xf32) <- (1x160x-1x-1xf32, 160x160x1x1xf32) + conv2d_63 = paddle._C_ops.conv2d( + hardswish_84, parameter_55, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_55 + + # pd_op.batch_norm_: (1x160x-1x-1xf32, 160xf32, 160xf32, 160xf32, 160xf32, -1xui8) <- (1x160x-1x-1xf32, 160xf32, 160xf32, 160xf32, 160xf32) + ( + batch_norm__528, + batch_norm__529, + batch_norm__530, + batch_norm__531, + batch_norm__532, + batch_norm__533, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_63, + parameter_54, + parameter_53, + parameter_52, + parameter_51, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_51, parameter_52, parameter_53, parameter_54 + + # pd_op.hardswish: (1x160x-1x-1xf32) <- (1x160x-1x-1xf32) + hardswish_85 = paddle._C_ops.hardswish(batch_norm__528) + + # pd_op.depthwise_conv2d: (1x160x-1x-1xf32) <- (1x160x-1x-1xf32, 160x1x5x5xf32) + depthwise_conv2d_38 = paddle._C_ops.depthwise_conv2d( + hardswish_85, parameter_50, [1, 1], [2, 2], "EXPLICIT", 160, [1, 1], "NCHW" + ) + del parameter_50 + + # pd_op.batch_norm_: (1x160x-1x-1xf32, 160xf32, 160xf32, 160xf32, 160xf32, -1xui8) <- (1x160x-1x-1xf32, 160xf32, 160xf32, 160xf32, 160xf32) + ( + batch_norm__534, + batch_norm__535, + batch_norm__536, + batch_norm__537, + batch_norm__538, + batch_norm__539, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + depthwise_conv2d_38, + parameter_49, + parameter_48, + parameter_47, + parameter_46, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_46, parameter_47, parameter_48, parameter_49 + + # pd_op.hardswish: (1x160x-1x-1xf32) <- (1x160x-1x-1xf32) + hardswish_86 = paddle._C_ops.hardswish(batch_norm__534) + + # pd_op.conv2d: (1x160x-1x-1xf32) <- (1x160x-1x-1xf32, 160x160x1x1xf32) + conv2d_64 = paddle._C_ops.conv2d( + hardswish_86, parameter_45, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_45 + + # pd_op.batch_norm_: (1x160x-1x-1xf32, 160xf32, 160xf32, 160xf32, 160xf32, -1xui8) <- (1x160x-1x-1xf32, 160xf32, 160xf32, 160xf32, 160xf32) + ( + batch_norm__540, + batch_norm__541, + batch_norm__542, + batch_norm__543, + batch_norm__544, + batch_norm__545, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_64, + parameter_44, + parameter_43, + parameter_42, + parameter_41, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_41, parameter_42, parameter_43, parameter_44 + + # pd_op.hardswish: (1x160x-1x-1xf32) <- (1x160x-1x-1xf32) + hardswish_87 = paddle._C_ops.hardswish(batch_norm__540) + + # pd_op.depthwise_conv2d: (1x160x-1x-1xf32) <- (1x160x-1x-1xf32, 160x1x5x5xf32) + depthwise_conv2d_39 = paddle._C_ops.depthwise_conv2d( + hardswish_87, parameter_40, [1, 1], [2, 2], "EXPLICIT", 160, [1, 1], "NCHW" + ) + del parameter_40 + + # pd_op.batch_norm_: (1x160x-1x-1xf32, 160xf32, 160xf32, 160xf32, 160xf32, -1xui8) <- (1x160x-1x-1xf32, 160xf32, 160xf32, 160xf32, 160xf32) + ( + batch_norm__546, + batch_norm__547, + batch_norm__548, + batch_norm__549, + batch_norm__550, + batch_norm__551, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + depthwise_conv2d_39, + parameter_39, + parameter_38, + parameter_37, + parameter_36, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_36, parameter_37, parameter_38, parameter_39 + + # pd_op.hardswish: (1x160x-1x-1xf32) <- (1x160x-1x-1xf32) + hardswish_88 = paddle._C_ops.hardswish(batch_norm__546) + + # pd_op.conv2d: (1x160x-1x-1xf32) <- (1x160x-1x-1xf32, 160x160x1x1xf32) + conv2d_65 = paddle._C_ops.conv2d( + hardswish_88, parameter_35, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_35 + + # pd_op.batch_norm_: (1x160x-1x-1xf32, 160xf32, 160xf32, 160xf32, 160xf32, -1xui8) <- (1x160x-1x-1xf32, 160xf32, 160xf32, 160xf32, 160xf32) + ( + batch_norm__552, + batch_norm__553, + batch_norm__554, + batch_norm__555, + batch_norm__556, + batch_norm__557, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_65, + parameter_34, + parameter_33, + parameter_32, + parameter_31, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_31, parameter_32, parameter_33, parameter_34 + + # pd_op.hardswish: (1x160x-1x-1xf32) <- (1x160x-1x-1xf32) + hardswish_89 = paddle._C_ops.hardswish(batch_norm__552) + + # pd_op.depthwise_conv2d: (1x160x-1x-1xf32) <- (1x160x-1x-1xf32, 160x1x5x5xf32) + depthwise_conv2d_40 = paddle._C_ops.depthwise_conv2d( + hardswish_89, parameter_30, [1, 1], [2, 2], "EXPLICIT", 160, [1, 1], "NCHW" + ) + del parameter_30 + + # pd_op.batch_norm_: (1x160x-1x-1xf32, 160xf32, 160xf32, 160xf32, 160xf32, -1xui8) <- (1x160x-1x-1xf32, 160xf32, 160xf32, 160xf32, 160xf32) + ( + batch_norm__558, + batch_norm__559, + batch_norm__560, + batch_norm__561, + batch_norm__562, + batch_norm__563, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + depthwise_conv2d_40, + parameter_29, + parameter_28, + parameter_27, + parameter_26, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_26, parameter_27, parameter_28, parameter_29 + + # pd_op.hardswish: (1x160x-1x-1xf32) <- (1x160x-1x-1xf32) + hardswish_90 = paddle._C_ops.hardswish(batch_norm__558) + + # pd_op.conv2d: (1x160x-1x-1xf32) <- (1x160x-1x-1xf32, 160x160x1x1xf32) + conv2d_66 = paddle._C_ops.conv2d( + hardswish_90, parameter_25, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_25 + + # pd_op.batch_norm_: (1x160x-1x-1xf32, 160xf32, 160xf32, 160xf32, 160xf32, -1xui8) <- (1x160x-1x-1xf32, 160xf32, 160xf32, 160xf32, 160xf32) + ( + batch_norm__564, + batch_norm__565, + batch_norm__566, + batch_norm__567, + batch_norm__568, + batch_norm__569, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_66, + parameter_24, + parameter_23, + parameter_22, + parameter_21, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_21, parameter_22, parameter_23, parameter_24 + + # pd_op.hardswish: (1x160x-1x-1xf32) <- (1x160x-1x-1xf32) + hardswish_91 = paddle._C_ops.hardswish(batch_norm__564) + + # pd_op.pool2d: (1x160x1x1xf32) <- (1x160x-1x-1xf32, 2xi64) + pool2d_5 = paddle._C_ops.pool2d( + hardswish_91, + full_int_array_0, + [1, 1], + [0, 0], + False, + True, + "NCHW", + "avg", + False, + True, + "EXPLICIT", + ) + + # pd_op.conv2d: (1x160x1x1xf32) <- (1x160x1x1xf32, 160x160x1x1xf32) + conv2d_67 = paddle._C_ops.conv2d( + pool2d_5, parameter_20, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_20 + + # pd_op.reshape: (1x160x1x1xf32) <- (160xf32, 4xi64) + reshape_25 = paddle._C_ops.reshape(parameter_19, full_int_array_1) + del parameter_19 + + # pd_op.add: (1x160x1x1xf32) <- (1x160x1x1xf32, 1x160x1x1xf32) + add_20 = paddle._C_ops.add(conv2d_67, reshape_25) + + # pd_op.sigmoid: (1x160x1x1xf32) <- (1x160x1x1xf32) + sigmoid_9 = paddle._C_ops.sigmoid(add_20) + del add_20 + + # pd_op.multiply: (1x160x-1x-1xf32) <- (1x160x-1x-1xf32, 1x160x1x1xf32) + multiply_11 = paddle._C_ops.multiply(hardswish_91, sigmoid_9) + + # pd_op.conv2d: (1x160x-1x-1xf32) <- (1x160x-1x-1xf32, 160x160x1x1xf32) + conv2d_68 = paddle._C_ops.conv2d( + multiply_11, parameter_18, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_18 + + # pd_op.batch_norm_: (1x160x-1x-1xf32, 160xf32, 160xf32, 160xf32, 160xf32, -1xui8) <- (1x160x-1x-1xf32, 160xf32, 160xf32, 160xf32, 160xf32) + ( + batch_norm__570, + batch_norm__571, + batch_norm__572, + batch_norm__573, + batch_norm__574, + batch_norm__575, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_68, + parameter_17, + parameter_16, + parameter_15, + parameter_14, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_14, parameter_15, parameter_16, parameter_17 + + # pd_op.hardswish: (1x160x-1x-1xf32) <- (1x160x-1x-1xf32) + hardswish_92 = paddle._C_ops.hardswish(batch_norm__570) + + # pd_op.conv2d: (1x11x-1x-1xf32) <- (1x160x-1x-1xf32, 11x160x1x1xf32) + conv2d_69 = paddle._C_ops.conv2d( + hardswish_92, parameter_13, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_13 + + # pd_op.reshape: (1x11x1x1xf32) <- (11xf32, 4xi64) + reshape_26 = paddle._C_ops.reshape(parameter_12, full_int_array_1) + del parameter_12 + + # pd_op.add: (1x11x-1x-1xf32) <- (1x11x-1x-1xf32, 1x11x1x1xf32) + add_21 = paddle._C_ops.add(conv2d_69, reshape_26) + + # pd_op.conv2d: (1x32x-1x-1xf32) <- (1x160x-1x-1xf32, 32x160x1x1xf32) + conv2d_70 = paddle._C_ops.conv2d( + hardswish_92, parameter_11, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_11 + + # pd_op.reshape: (1x32x1x1xf32) <- (32xf32, 4xi64) + reshape_27 = paddle._C_ops.reshape(parameter_10, full_int_array_1) + del full_int_array_1, parameter_10 + + # pd_op.add: (1x32x-1x-1xf32) <- (1x32x-1x-1xf32, 1x32x1x1xf32) + add_22 = paddle._C_ops.add(conv2d_70, reshape_27) + + # pd_op.conv2d: (1x1x-1x-1xf32) <- (1x160x-1x-1xf32, 1x160x5x5xf32) + conv2d_71 = paddle._C_ops.conv2d( + hardswish_91, parameter_9, [1, 1], [2, 2], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_9 + + # pd_op.batch_norm_: (1x1x-1x-1xf32, 1xf32, 1xf32, 1xf32, 1xf32, -1xui8) <- (1x1x-1x-1xf32, 1xf32, 1xf32, 1xf32, 1xf32) + ( + batch_norm__576, + batch_norm__577, + batch_norm__578, + batch_norm__579, + batch_norm__580, + batch_norm__581, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_71, + parameter_8, + parameter_7, + parameter_6, + parameter_5, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_5, parameter_6, parameter_7, parameter_8 + + # pd_op.hardswish: (1x1x-1x-1xf32) <- (1x1x-1x-1xf32) + hardswish_93 = paddle._C_ops.hardswish(batch_norm__576) + + # pd_op.conv2d: (1x1x-1x-1xf32) <- (1x1x-1x-1xf32, 1x1x1x1xf32) + conv2d_72 = paddle._C_ops.conv2d( + hardswish_93, parameter_4, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_4 + + # pd_op.batch_norm_: (1x1x-1x-1xf32, 1xf32, 1xf32, 1xf32, 1xf32, -1xui8) <- (1x1x-1x-1xf32, 1xf32, 1xf32, 1xf32, 1xf32) + ( + batch_norm__582, + batch_norm__583, + batch_norm__584, + batch_norm__585, + batch_norm__586, + batch_norm__587, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_72, + parameter_3, + parameter_2, + parameter_1, + parameter_0, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_0, parameter_1, parameter_2, parameter_3 + + # pd_op.sigmoid: (1x1x-1x-1xf32) <- (1x1x-1x-1xf32) + sigmoid_10 = paddle._C_ops.sigmoid(batch_norm__582) + del batch_norm__582 + + # pd_op.sigmoid: (1x11x-1x-1xf32) <- (1x11x-1x-1xf32) + sigmoid_11 = paddle._C_ops.sigmoid(add_21) + del add_21 + + # pd_op.multiply: (1x11x-1x-1xf32) <- (1x11x-1x-1xf32, 1x1x-1x-1xf32) + multiply_12 = paddle._C_ops.multiply(sigmoid_11, sigmoid_10) + + # pd_op.scale: (1x11x-1x-1xf32) <- (1x11x-1x-1xf32, 1xf32) + scale_24 = paddle._C_ops.scale(multiply_12, full_1, float("1e-09"), True) + del multiply_12 + + # pd_op.sqrt: (1x11x-1x-1xf32) <- (1x11x-1x-1xf32) + sqrt_3 = paddle._C_ops.sqrt(scale_24) + del scale_24 + + # pd_op.transpose: (1x-1x-1x11xf32) <- (1x11x-1x-1xf32) + transpose_12 = paddle._C_ops.transpose(sqrt_3, [0, 2, 3, 1]) + + # pd_op.transpose: (1x-1x-1x32xf32) <- (1x32x-1x-1xf32) + transpose_13 = paddle._C_ops.transpose(add_22, [0, 2, 3, 1]) + + # pd_op.shape64: (4xi64) <- (1x-1x-1x11xf32) + shape64_10 = paddle._C_ops.shape64(transpose_12) + + # pd_op.slice: (xi64) <- (4xi64, 1xi64, 1xi64) + slice_22 = paddle._C_ops.slice( + shape64_10, [0], full_int_array_3, full_int_array_4, [1], [0] + ) + del full_int_array_3, shape64_10 + + # pd_op.shape64: (4xi64) <- (1x-1x-1x11xf32) + shape64_11 = paddle._C_ops.shape64(transpose_12) + + # pd_op.slice: (xi64) <- (4xi64, 1xi64, 1xi64) + slice_23 = paddle._C_ops.slice( + shape64_11, [0], full_int_array_4, full_int_array_5, [1], [0] + ) + del full_int_array_4, full_int_array_5, shape64_11 + + # pd_op.cast: (xf32) <- (xi64) + cast_6 = paddle._C_ops.cast(slice_23, paddle.float32) + + # pd_op.arange: (-1xf32) <- (1xf32, xf32, 1xf32) + arange_6 = paddle.arange(full_2, cast_6, full_1, dtype="float32") + del cast_6 + + # pd_op.scale: (-1xf32) <- (-1xf32, 1xf32) + scale_25 = paddle._C_ops.scale(arange_6, full_1, float("0.5"), True) + del arange_6 + + # pd_op.full: (1xf32) <- () + full_14 = paddle._C_ops.full( + [1], float("64"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.assign: (1xf32) <- (1xf32) + assign_26 = full_14 + + # pd_op.scale: (-1xf32) <- (-1xf32, 1xf32) + scale_26 = paddle._C_ops.scale(scale_25, full_14, float("0"), True) + del scale_25 + + # pd_op.cast: (xf32) <- (xi64) + cast_7 = paddle._C_ops.cast(slice_22, paddle.float32) + + # pd_op.arange: (-1xf32) <- (1xf32, xf32, 1xf32) + arange_7 = paddle.arange(full_2, cast_7, full_1, dtype="float32") + del cast_7, full_2 + + # pd_op.scale: (-1xf32) <- (-1xf32, 1xf32) + scale_27 = paddle._C_ops.scale(arange_7, full_1, float("0.5"), True) + del arange_7 + + # pd_op.scale: (-1xf32) <- (-1xf32, 1xf32) + scale_28 = paddle._C_ops.scale(scale_27, full_14, float("0"), True) + del scale_27 + + # builtin.combine: ([-1xf32, -1xf32]) <- (-1xf32, -1xf32) + combine_16 = [scale_28, scale_26] + del scale_26, scale_28 + + # pd_op.meshgrid: ([-1x-1xf32, -1x-1xf32]) <- ([-1xf32, -1xf32]) + meshgrid_3 = paddle._C_ops.meshgrid(combine_16) + del combine_16 + + # builtin.split: (-1x-1xf32, -1x-1xf32) <- ([-1x-1xf32, -1x-1xf32]) + ( + split_12, + split_13, + ) = meshgrid_3 + del meshgrid_3 + + # pd_op.flatten: (-1xf32) <- (-1x-1xf32) + flatten_12 = paddle._C_ops.flatten(split_12, 0, 1) + del split_12 + + # pd_op.flatten: (-1xf32) <- (-1x-1xf32) + flatten_13 = paddle._C_ops.flatten(split_13, 0, 1) + del split_13 + + # builtin.combine: ([-1xf32, -1xf32]) <- (-1xf32, -1xf32) + combine_17 = [flatten_13, flatten_12] + del flatten_12, flatten_13 + + # pd_op.stack: (-1x2xf32) <- ([-1xf32, -1xf32]) + stack_6 = paddle._C_ops.stack(combine_17, -1) + del combine_17 + + # pd_op.reshape: (1x-1x11xf32) <- (1x-1x-1x11xf32, 3xi64) + reshape_28 = paddle._C_ops.reshape(transpose_12, full_int_array_7) + del full_int_array_7, transpose_12 + + # pd_op.reshape: (-1x8xf32) <- (1x-1x-1x32xf32, 2xi64) + reshape_29 = paddle._C_ops.reshape(transpose_13, full_int_array_8) + del full_int_array_8 + + # pd_op.softmax: (-1x8xf32) <- (-1x8xf32) + softmax_3 = paddle._C_ops.softmax(reshape_29, 1) + del reshape_29 + + # pd_op.matmul: (-1xf32) <- (-1x8xf32, 8xf32) + matmul_3 = paddle._C_ops.matmul(softmax_3, data_1, False, False) + del data_1 + + # pd_op.reshape: (-1x4xf32) <- (-1xf32, 2xi64) + reshape_30 = paddle._C_ops.reshape(matmul_3, full_int_array_9) + del full_int_array_9 + + # pd_op.scale: (-1x4xf32) <- (-1x4xf32, 1xf32) + scale_29 = paddle._C_ops.scale(reshape_30, full_14, float("0"), True) + del full_14, reshape_30 + + # pd_op.multiply: (xi64) <- (xi64, xi64) + multiply_13 = paddle._C_ops.multiply(slice_22, slice_23) + del slice_22, slice_23 + + # builtin.combine: ([xi64, xi64, xi64]) <- (xi64, xi64, xi64) + combine_18 = [full_4, multiply_13, full_5] + del full_4, full_5, multiply_13 + + # pd_op.stack: (3xi64) <- ([xi64, xi64, xi64]) + stack_7 = paddle._C_ops.stack(combine_18, 0) + del combine_18 + + # pd_op.reshape: (1x-1x4xf32) <- (-1x4xf32, 3xi64) + reshape_31 = paddle._C_ops.reshape(scale_29, stack_7) + del stack_7 + + # pd_op.split_with_num: ([1x-1x2xf32, 1x-1x2xf32]) <- (1x-1x4xf32, 1xi32) + split_with_num_3 = paddle._C_ops.split_with_num(reshape_31, 2, full_6) + del reshape_31 + + # builtin.split: (1x-1x2xf32, 1x-1x2xf32) <- ([1x-1x2xf32, 1x-1x2xf32]) + ( + split_14, + split_15, + ) = split_with_num_3 + del split_with_num_3 + + # pd_op.scale: (1x-1x2xf32) <- (1x-1x2xf32, 1xf32) + scale_30 = paddle._C_ops.scale(split_14, full_7, float("0"), True) + del split_14 + + # pd_op.add: (1x-1x2xf32) <- (1x-1x2xf32, -1x2xf32) + add_23 = paddle._C_ops.add(scale_30, stack_6) + + # pd_op.add: (1x-1x2xf32) <- (1x-1x2xf32, -1x2xf32) + add_24 = paddle._C_ops.add(split_15, stack_6) + + # builtin.combine: ([1x-1x2xf32, 1x-1x2xf32]) <- (1x-1x2xf32, 1x-1x2xf32) + combine_19 = [add_23, add_24] + + # pd_op.concat: (1x-1x4xf32) <- ([1x-1x2xf32, 1x-1x2xf32], 1xi32) + concat_10 = paddle._C_ops.concat(combine_19, full_8) + del combine_19 + + # pd_op.flatten: (1x11x-1xf32) <- (1x11x-1x-1xf32) + flatten_14 = paddle._C_ops.flatten(sqrt_3, 2, 3) + + # pd_op.transpose: (1x-1x11xf32) <- (1x11x-1xf32) + transpose_14 = paddle._C_ops.transpose(flatten_14, [0, 2, 1]) + del flatten_14 + + # pd_op.flatten: (1x32x-1xf32) <- (1x32x-1x-1xf32) + flatten_15 = paddle._C_ops.flatten(add_22, 2, 3) + + # pd_op.transpose: (1x-1x32xf32) <- (1x32x-1xf32) + transpose_15 = paddle._C_ops.transpose(flatten_15, [0, 2, 1]) + del flatten_15 + + # pd_op.full: (1xf32) <- () + full_15 = paddle._C_ops.full( + [1], float("0.015625"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.scale: (1x-1x4xf32) <- (1x-1x4xf32, 1xf32) + scale_31 = paddle._C_ops.scale(concat_10, full_15, float("0"), True) + del concat_10 + + # builtin.combine: ([1x-1x11xf32, 1x-1x11xf32, 1x-1x11xf32, 1x-1x11xf32]) <- (1x-1x11xf32, 1x-1x11xf32, 1x-1x11xf32, 1x-1x11xf32) + combine_20 = [transpose_2, transpose_6, transpose_10, transpose_14] + + # pd_op.concat: (1x-1x11xf32) <- ([1x-1x11xf32, 1x-1x11xf32, 1x-1x11xf32, 1x-1x11xf32], 1xi32) + concat_0 = paddle._C_ops.concat(combine_20, full_0) + del combine_20 + + # builtin.combine: ([1x-1x4xf32, 1x-1x4xf32, 1x-1x4xf32, 1x-1x4xf32]) <- (1x-1x4xf32, 1x-1x4xf32, 1x-1x4xf32, 1x-1x4xf32) + combine_21 = [scale_7, scale_15, scale_23, scale_31] + + # pd_op.concat: (1x-1x4xf32) <- ([1x-1x4xf32, 1x-1x4xf32, 1x-1x4xf32, 1x-1x4xf32], 1xi32) + concat_2 = paddle._C_ops.concat(combine_21, full_0) + del combine_21 + + # builtin.combine: ([1x-1x32xf32, 1x-1x32xf32, 1x-1x32xf32, 1x-1x32xf32]) <- (1x-1x32xf32, 1x-1x32xf32, 1x-1x32xf32, 1x-1x32xf32) + combine_22 = [transpose_3, transpose_7, transpose_11, transpose_15] + + # pd_op.concat: (1x-1x32xf32) <- ([1x-1x32xf32, 1x-1x32xf32, 1x-1x32xf32, 1x-1x32xf32], 1xi32) + concat_1 = paddle._C_ops.concat(combine_22, full_0) + del ( + add_12, + add_13, + add_14, + add_17, + add_18, + add_19, + add_22, + add_23, + add_24, + add_4, + add_7, + add_8, + add_9, + assign_0, + assign_1, + assign_10, + assign_11, + assign_12, + assign_13, + assign_14, + assign_15, + assign_16, + assign_17, + assign_18, + assign_19, + assign_2, + assign_20, + assign_21, + assign_22, + assign_23, + assign_24, + assign_25, + assign_26, + assign_3, + assign_4, + assign_5, + assign_6, + assign_7, + assign_8, + assign_9, + batch_norm__0, + batch_norm__1, + batch_norm__10, + batch_norm__100, + batch_norm__101, + batch_norm__102, + batch_norm__103, + batch_norm__104, + batch_norm__105, + batch_norm__106, + batch_norm__107, + batch_norm__108, + batch_norm__109, + batch_norm__11, + batch_norm__110, + batch_norm__111, + batch_norm__112, + batch_norm__113, + batch_norm__114, + batch_norm__115, + batch_norm__116, + batch_norm__117, + batch_norm__118, + batch_norm__119, + batch_norm__12, + batch_norm__120, + batch_norm__121, + batch_norm__122, + batch_norm__123, + batch_norm__124, + batch_norm__125, + batch_norm__126, + batch_norm__127, + batch_norm__128, + batch_norm__129, + batch_norm__13, + batch_norm__130, + batch_norm__131, + batch_norm__132, + batch_norm__133, + batch_norm__134, + batch_norm__135, + batch_norm__136, + batch_norm__137, + batch_norm__138, + batch_norm__139, + batch_norm__14, + batch_norm__140, + batch_norm__141, + batch_norm__142, + batch_norm__143, + batch_norm__144, + batch_norm__145, + batch_norm__146, + batch_norm__147, + batch_norm__148, + batch_norm__149, + batch_norm__15, + batch_norm__150, + batch_norm__151, + batch_norm__152, + batch_norm__153, + batch_norm__154, + batch_norm__155, + batch_norm__156, + batch_norm__157, + batch_norm__158, + batch_norm__159, + batch_norm__16, + batch_norm__160, + batch_norm__161, + batch_norm__162, + batch_norm__163, + batch_norm__164, + batch_norm__165, + batch_norm__166, + batch_norm__167, + batch_norm__168, + batch_norm__169, + batch_norm__17, + batch_norm__170, + batch_norm__171, + batch_norm__172, + batch_norm__173, + batch_norm__174, + batch_norm__175, + batch_norm__176, + batch_norm__177, + batch_norm__178, + batch_norm__179, + batch_norm__18, + batch_norm__180, + batch_norm__181, + batch_norm__182, + batch_norm__183, + batch_norm__184, + batch_norm__185, + batch_norm__186, + batch_norm__187, + batch_norm__188, + batch_norm__189, + batch_norm__19, + batch_norm__190, + batch_norm__191, + batch_norm__192, + batch_norm__193, + batch_norm__194, + batch_norm__195, + batch_norm__196, + batch_norm__197, + batch_norm__198, + batch_norm__199, + batch_norm__2, + batch_norm__20, + batch_norm__200, + batch_norm__201, + batch_norm__202, + batch_norm__203, + batch_norm__204, + batch_norm__205, + batch_norm__206, + batch_norm__207, + batch_norm__208, + batch_norm__209, + batch_norm__21, + batch_norm__210, + batch_norm__211, + batch_norm__212, + batch_norm__213, + batch_norm__214, + batch_norm__215, + batch_norm__216, + batch_norm__217, + batch_norm__218, + batch_norm__219, + batch_norm__22, + batch_norm__220, + batch_norm__221, + batch_norm__222, + batch_norm__223, + batch_norm__224, + batch_norm__225, + batch_norm__226, + batch_norm__227, + batch_norm__228, + batch_norm__229, + batch_norm__23, + batch_norm__230, + batch_norm__231, + batch_norm__232, + batch_norm__233, + batch_norm__234, + batch_norm__235, + batch_norm__236, + batch_norm__237, + batch_norm__238, + batch_norm__239, + batch_norm__24, + batch_norm__240, + batch_norm__241, + batch_norm__242, + batch_norm__243, + batch_norm__244, + batch_norm__245, + batch_norm__246, + batch_norm__247, + batch_norm__248, + batch_norm__249, + batch_norm__25, + batch_norm__250, + batch_norm__251, + batch_norm__252, + batch_norm__253, + batch_norm__254, + batch_norm__255, + batch_norm__256, + batch_norm__257, + batch_norm__258, + batch_norm__259, + batch_norm__26, + batch_norm__260, + batch_norm__261, + batch_norm__262, + batch_norm__263, + batch_norm__264, + batch_norm__265, + batch_norm__266, + batch_norm__267, + batch_norm__268, + batch_norm__269, + batch_norm__27, + batch_norm__270, + batch_norm__271, + batch_norm__272, + batch_norm__273, + batch_norm__274, + batch_norm__275, + batch_norm__276, + batch_norm__277, + batch_norm__278, + batch_norm__279, + batch_norm__28, + batch_norm__280, + batch_norm__281, + batch_norm__282, + batch_norm__283, + batch_norm__284, + batch_norm__285, + batch_norm__286, + batch_norm__287, + batch_norm__288, + batch_norm__289, + batch_norm__29, + batch_norm__290, + batch_norm__291, + batch_norm__292, + batch_norm__293, + batch_norm__294, + batch_norm__295, + batch_norm__296, + batch_norm__297, + batch_norm__298, + batch_norm__299, + batch_norm__3, + batch_norm__30, + batch_norm__300, + batch_norm__301, + batch_norm__302, + batch_norm__303, + batch_norm__304, + batch_norm__305, + batch_norm__306, + batch_norm__307, + batch_norm__308, + batch_norm__309, + batch_norm__31, + batch_norm__310, + batch_norm__311, + batch_norm__312, + batch_norm__313, + batch_norm__314, + batch_norm__315, + batch_norm__316, + batch_norm__317, + batch_norm__318, + batch_norm__319, + batch_norm__32, + batch_norm__320, + batch_norm__321, + batch_norm__322, + batch_norm__323, + batch_norm__324, + batch_norm__325, + batch_norm__326, + batch_norm__327, + batch_norm__328, + batch_norm__329, + batch_norm__33, + batch_norm__330, + batch_norm__331, + batch_norm__332, + batch_norm__333, + batch_norm__334, + batch_norm__335, + batch_norm__336, + batch_norm__337, + batch_norm__338, + batch_norm__339, + batch_norm__34, + batch_norm__340, + batch_norm__341, + batch_norm__342, + batch_norm__343, + batch_norm__344, + batch_norm__345, + batch_norm__346, + batch_norm__347, + batch_norm__348, + batch_norm__349, + batch_norm__35, + batch_norm__350, + batch_norm__351, + batch_norm__352, + batch_norm__353, + batch_norm__354, + batch_norm__355, + batch_norm__356, + batch_norm__357, + batch_norm__358, + batch_norm__359, + batch_norm__36, + batch_norm__360, + batch_norm__361, + batch_norm__362, + batch_norm__363, + batch_norm__364, + batch_norm__365, + batch_norm__366, + batch_norm__367, + batch_norm__368, + batch_norm__369, + batch_norm__37, + batch_norm__370, + batch_norm__371, + batch_norm__372, + batch_norm__373, + batch_norm__374, + batch_norm__375, + batch_norm__376, + batch_norm__377, + batch_norm__378, + batch_norm__379, + batch_norm__38, + batch_norm__380, + batch_norm__381, + batch_norm__382, + batch_norm__383, + batch_norm__385, + batch_norm__386, + batch_norm__387, + batch_norm__388, + batch_norm__389, + batch_norm__39, + batch_norm__390, + batch_norm__391, + batch_norm__392, + batch_norm__393, + batch_norm__394, + batch_norm__395, + batch_norm__396, + batch_norm__397, + batch_norm__398, + batch_norm__399, + batch_norm__4, + batch_norm__40, + batch_norm__400, + batch_norm__401, + batch_norm__402, + batch_norm__403, + batch_norm__404, + batch_norm__405, + batch_norm__406, + batch_norm__407, + batch_norm__408, + batch_norm__409, + batch_norm__41, + batch_norm__410, + batch_norm__411, + batch_norm__412, + batch_norm__413, + batch_norm__414, + batch_norm__415, + batch_norm__416, + batch_norm__417, + batch_norm__418, + batch_norm__419, + batch_norm__42, + batch_norm__420, + batch_norm__421, + batch_norm__422, + batch_norm__423, + batch_norm__424, + batch_norm__425, + batch_norm__426, + batch_norm__427, + batch_norm__428, + batch_norm__429, + batch_norm__43, + batch_norm__430, + batch_norm__431, + batch_norm__432, + batch_norm__433, + batch_norm__434, + batch_norm__435, + batch_norm__436, + batch_norm__437, + batch_norm__438, + batch_norm__439, + batch_norm__44, + batch_norm__440, + batch_norm__441, + batch_norm__442, + batch_norm__443, + batch_norm__444, + batch_norm__445, + batch_norm__446, + batch_norm__447, + batch_norm__448, + batch_norm__449, + batch_norm__45, + batch_norm__451, + batch_norm__452, + batch_norm__453, + batch_norm__454, + batch_norm__455, + batch_norm__456, + batch_norm__457, + batch_norm__458, + batch_norm__459, + batch_norm__46, + batch_norm__460, + batch_norm__461, + batch_norm__462, + batch_norm__463, + batch_norm__464, + batch_norm__465, + batch_norm__466, + batch_norm__467, + batch_norm__468, + batch_norm__469, + batch_norm__47, + batch_norm__470, + batch_norm__471, + batch_norm__472, + batch_norm__473, + batch_norm__474, + batch_norm__475, + batch_norm__476, + batch_norm__477, + batch_norm__478, + batch_norm__479, + batch_norm__48, + batch_norm__480, + batch_norm__481, + batch_norm__482, + batch_norm__483, + batch_norm__484, + batch_norm__485, + batch_norm__486, + batch_norm__487, + batch_norm__488, + batch_norm__489, + batch_norm__49, + batch_norm__490, + batch_norm__491, + batch_norm__492, + batch_norm__493, + batch_norm__494, + batch_norm__495, + batch_norm__496, + batch_norm__497, + batch_norm__498, + batch_norm__499, + batch_norm__5, + batch_norm__50, + batch_norm__500, + batch_norm__501, + batch_norm__502, + batch_norm__503, + batch_norm__504, + batch_norm__505, + batch_norm__506, + batch_norm__507, + batch_norm__508, + batch_norm__509, + batch_norm__51, + batch_norm__510, + batch_norm__511, + batch_norm__512, + batch_norm__513, + batch_norm__514, + batch_norm__515, + batch_norm__517, + batch_norm__518, + batch_norm__519, + batch_norm__52, + batch_norm__520, + batch_norm__521, + batch_norm__522, + batch_norm__523, + batch_norm__524, + batch_norm__525, + batch_norm__526, + batch_norm__527, + batch_norm__528, + batch_norm__529, + batch_norm__53, + batch_norm__530, + batch_norm__531, + batch_norm__532, + batch_norm__533, + batch_norm__534, + batch_norm__535, + batch_norm__536, + batch_norm__537, + batch_norm__538, + batch_norm__539, + batch_norm__54, + batch_norm__540, + batch_norm__541, + batch_norm__542, + batch_norm__543, + batch_norm__544, + batch_norm__545, + batch_norm__546, + batch_norm__547, + batch_norm__548, + batch_norm__549, + batch_norm__55, + batch_norm__550, + batch_norm__551, + batch_norm__552, + batch_norm__553, + batch_norm__554, + batch_norm__555, + batch_norm__556, + batch_norm__557, + batch_norm__558, + batch_norm__559, + batch_norm__56, + batch_norm__560, + batch_norm__561, + batch_norm__562, + batch_norm__563, + batch_norm__564, + batch_norm__565, + batch_norm__566, + batch_norm__567, + batch_norm__568, + batch_norm__569, + batch_norm__57, + batch_norm__570, + batch_norm__571, + batch_norm__572, + batch_norm__573, + batch_norm__574, + batch_norm__575, + batch_norm__576, + batch_norm__577, + batch_norm__578, + batch_norm__579, + batch_norm__58, + batch_norm__580, + batch_norm__581, + batch_norm__583, + batch_norm__584, + batch_norm__585, + batch_norm__586, + batch_norm__587, + batch_norm__59, + batch_norm__6, + batch_norm__60, + batch_norm__61, + batch_norm__62, + batch_norm__63, + batch_norm__64, + batch_norm__65, + batch_norm__66, + batch_norm__67, + batch_norm__68, + batch_norm__69, + batch_norm__7, + batch_norm__70, + batch_norm__71, + batch_norm__72, + batch_norm__73, + batch_norm__74, + batch_norm__75, + batch_norm__76, + batch_norm__77, + batch_norm__78, + batch_norm__79, + batch_norm__8, + batch_norm__80, + batch_norm__81, + batch_norm__82, + batch_norm__83, + batch_norm__84, + batch_norm__85, + batch_norm__86, + batch_norm__87, + batch_norm__88, + batch_norm__89, + batch_norm__9, + batch_norm__90, + batch_norm__91, + batch_norm__92, + batch_norm__93, + batch_norm__94, + batch_norm__95, + batch_norm__96, + batch_norm__97, + batch_norm__98, + batch_norm__99, + combine_22, + concat_3, + concat_4, + concat_5, + concat_6, + conv2d_0, + conv2d_1, + conv2d_10, + conv2d_11, + conv2d_12, + conv2d_13, + conv2d_14, + conv2d_15, + conv2d_16, + conv2d_17, + conv2d_18, + conv2d_19, + conv2d_2, + conv2d_20, + conv2d_21, + conv2d_22, + conv2d_23, + conv2d_24, + conv2d_25, + conv2d_26, + conv2d_27, + conv2d_28, + conv2d_29, + conv2d_3, + conv2d_30, + conv2d_31, + conv2d_32, + conv2d_33, + conv2d_34, + conv2d_35, + conv2d_36, + conv2d_37, + conv2d_38, + conv2d_39, + conv2d_4, + conv2d_40, + conv2d_41, + conv2d_42, + conv2d_43, + conv2d_44, + conv2d_45, + conv2d_46, + conv2d_47, + conv2d_48, + conv2d_49, + conv2d_5, + conv2d_50, + conv2d_51, + conv2d_52, + conv2d_53, + conv2d_54, + conv2d_55, + conv2d_56, + conv2d_57, + conv2d_58, + conv2d_59, + conv2d_6, + conv2d_60, + conv2d_61, + conv2d_62, + conv2d_63, + conv2d_64, + conv2d_65, + conv2d_66, + conv2d_67, + conv2d_68, + conv2d_69, + conv2d_7, + conv2d_70, + conv2d_71, + conv2d_72, + conv2d_8, + conv2d_9, + depthwise_conv2d_0, + depthwise_conv2d_1, + depthwise_conv2d_10, + depthwise_conv2d_11, + depthwise_conv2d_12, + depthwise_conv2d_13, + depthwise_conv2d_14, + depthwise_conv2d_15, + depthwise_conv2d_16, + depthwise_conv2d_17, + depthwise_conv2d_18, + depthwise_conv2d_19, + depthwise_conv2d_2, + depthwise_conv2d_20, + depthwise_conv2d_21, + depthwise_conv2d_22, + depthwise_conv2d_23, + depthwise_conv2d_24, + depthwise_conv2d_25, + depthwise_conv2d_26, + depthwise_conv2d_27, + depthwise_conv2d_28, + depthwise_conv2d_29, + depthwise_conv2d_3, + depthwise_conv2d_30, + depthwise_conv2d_31, + depthwise_conv2d_32, + depthwise_conv2d_33, + depthwise_conv2d_34, + depthwise_conv2d_35, + depthwise_conv2d_36, + depthwise_conv2d_37, + depthwise_conv2d_38, + depthwise_conv2d_39, + depthwise_conv2d_4, + depthwise_conv2d_40, + depthwise_conv2d_5, + depthwise_conv2d_6, + depthwise_conv2d_7, + depthwise_conv2d_8, + depthwise_conv2d_9, + full_0, + full_1, + full_11, + full_13, + full_15, + full_6, + full_7, + full_8, + full_9, + full_int_array_0, + hardsigmoid_0, + hardsigmoid_1, + hardswish_0, + hardswish_1, + hardswish_10, + hardswish_11, + hardswish_12, + hardswish_13, + hardswish_14, + hardswish_15, + hardswish_16, + hardswish_17, + hardswish_18, + hardswish_19, + hardswish_2, + hardswish_20, + hardswish_21, + hardswish_22, + hardswish_23, + hardswish_24, + hardswish_25, + hardswish_26, + hardswish_27, + hardswish_28, + hardswish_29, + hardswish_3, + hardswish_30, + hardswish_31, + hardswish_32, + hardswish_33, + hardswish_34, + hardswish_35, + hardswish_36, + hardswish_37, + hardswish_38, + hardswish_39, + hardswish_4, + hardswish_40, + hardswish_41, + hardswish_42, + hardswish_43, + hardswish_44, + hardswish_45, + hardswish_46, + hardswish_47, + hardswish_48, + hardswish_49, + hardswish_5, + hardswish_50, + hardswish_51, + hardswish_52, + hardswish_53, + hardswish_54, + hardswish_55, + hardswish_56, + hardswish_57, + hardswish_58, + hardswish_59, + hardswish_6, + hardswish_60, + hardswish_61, + hardswish_62, + hardswish_63, + hardswish_64, + hardswish_65, + hardswish_66, + hardswish_67, + hardswish_68, + hardswish_69, + hardswish_7, + hardswish_70, + hardswish_71, + hardswish_72, + hardswish_73, + hardswish_74, + hardswish_75, + hardswish_76, + hardswish_77, + hardswish_78, + hardswish_79, + hardswish_8, + hardswish_80, + hardswish_81, + hardswish_82, + hardswish_83, + hardswish_84, + hardswish_85, + hardswish_86, + hardswish_87, + hardswish_88, + hardswish_89, + hardswish_9, + hardswish_90, + hardswish_91, + hardswish_92, + hardswish_93, + matmul_0, + matmul_1, + matmul_2, + matmul_3, + multiply_0, + multiply_1, + multiply_11, + multiply_2, + multiply_5, + multiply_8, + nearest_interp_0, + nearest_interp_1, + pool2d_0, + pool2d_1, + pool2d_2, + pool2d_3, + pool2d_4, + pool2d_5, + relu_0, + relu_1, + reshape_0, + reshape_1, + reshape_11, + reshape_12, + reshape_13, + reshape_18, + reshape_19, + reshape_2, + reshape_20, + reshape_25, + reshape_26, + reshape_27, + reshape_3, + reshape_4, + reshape_5, + reshape_6, + scale_13, + scale_14, + scale_15, + scale_21, + scale_22, + scale_23, + scale_29, + scale_30, + scale_31, + scale_5, + scale_6, + scale_7, + sigmoid_0, + sigmoid_1, + sigmoid_10, + sigmoid_11, + sigmoid_2, + sigmoid_3, + sigmoid_4, + sigmoid_5, + sigmoid_6, + sigmoid_7, + sigmoid_8, + sigmoid_9, + softmax_0, + softmax_1, + softmax_2, + softmax_3, + split_11, + split_15, + split_3, + split_7, + sqrt_0, + sqrt_1, + sqrt_2, + sqrt_3, + stack_0, + stack_2, + stack_4, + stack_6, + transpose_1, + transpose_10, + transpose_11, + transpose_13, + transpose_14, + transpose_15, + transpose_2, + transpose_3, + transpose_5, + transpose_6, + transpose_7, + transpose_9, + ) + + return concat_0, concat_1, concat_2 diff --git a/paddle_samples/PaddleX/PP-DocLayout-M/subgraph_10/weight_meta.py b/paddle_samples/PaddleX/PP-DocLayout-M/subgraph_10/weight_meta.py new file mode 100644 index 000000000..dacd96db9 --- /dev/null +++ b/paddle_samples/PaddleX/PP-DocLayout-M/subgraph_10/weight_meta.py @@ -0,0 +1,5620 @@ +class Program_weight_tensor_parameter_0: + name = "parameter_0" + shape = [1] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_1: + name = "parameter_1" + shape = [1] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_2: + name = "parameter_2" + shape = [1] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_3: + name = "parameter_3" + shape = [1] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_4: + name = "parameter_4" + shape = [1, 1, 1, 1] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_5: + name = "parameter_5" + shape = [1] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_6: + name = "parameter_6" + shape = [1] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_7: + name = "parameter_7" + shape = [1] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_8: + name = "parameter_8" + shape = [1] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_9: + name = "parameter_9" + shape = [1, 160, 5, 5] + dtype = "float32" + min_val = float("-0.451672") + max_val = float("0.40211") + mean = float("-0.00152979") + std = float("0.0380717") + data = None + + +class Program_weight_tensor_parameter_10: + name = "parameter_10" + shape = [32] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_11: + name = "parameter_11" + shape = [32, 160, 1, 1] + dtype = "float32" + min_val = float("-0.904286") + max_val = float("0.925003") + mean = float("-4.9721e-07") + std = float("0.121513") + data = None + + +class Program_weight_tensor_parameter_12: + name = "parameter_12" + shape = [11] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_13: + name = "parameter_13" + shape = [11, 160, 1, 1] + dtype = "float32" + min_val = float("-0.037561") + max_val = float("0.0283741") + mean = float("-0.000202976") + std = float("0.00991333") + data = None + + +class Program_weight_tensor_parameter_14: + name = "parameter_14" + shape = [160] + dtype = "float32" + min_val = float("-1.07182") + max_val = float("2.09901") + mean = float("0.324653") + std = float("0.396897") + data = None + + +class Program_weight_tensor_parameter_15: + name = "parameter_15" + shape = [160] + dtype = "float32" + min_val = float("0.243511") + max_val = float("2.77916") + mean = float("1.22532") + std = float("0.461907") + data = None + + +class Program_weight_tensor_parameter_16: + name = "parameter_16" + shape = [160] + dtype = "float32" + min_val = float("0.0126288") + max_val = float("1.24776") + mean = float("0.149609") + std = float("0.219869") + data = None + + +class Program_weight_tensor_parameter_17: + name = "parameter_17" + shape = [160] + dtype = "float32" + min_val = float("-0.456847") + max_val = float("0.462937") + mean = float("-0.0487988") + std = float("0.109537") + data = None + + +class Program_weight_tensor_parameter_18: + name = "parameter_18" + shape = [160, 160, 1, 1] + dtype = "float32" + min_val = float("-0.715144") + max_val = float("0.558618") + mean = float("-0.00170393") + std = float("0.0452849") + data = None + + +class Program_weight_tensor_parameter_19: + name = "parameter_19" + shape = [160] + dtype = "float32" + min_val = float("-0.0755139") + max_val = float("0.0744949") + mean = float("-0.001172") + std = float("0.0264535") + data = None + + +class Program_weight_tensor_parameter_20: + name = "parameter_20" + shape = [160, 160, 1, 1] + dtype = "float32" + min_val = float("-0.0921192") + max_val = float("0.0880416") + mean = float("-0.000425043") + std = float("0.00946252") + data = None + + +class Program_weight_tensor_parameter_21: + name = "parameter_21" + shape = [160] + dtype = "float32" + min_val = float("-1.84983") + max_val = float("0.692488") + mean = float("-0.135279") + std = float("0.355861") + data = None + + +class Program_weight_tensor_parameter_22: + name = "parameter_22" + shape = [160] + dtype = "float32" + min_val = float("0.71299") + max_val = float("2.80344") + mean = float("1.41886") + std = float("0.405432") + data = None + + +class Program_weight_tensor_parameter_23: + name = "parameter_23" + shape = [160] + dtype = "float32" + min_val = float("0.0712181") + max_val = float("2.28274") + mean = float("0.486933") + std = float("0.382657") + data = None + + +class Program_weight_tensor_parameter_24: + name = "parameter_24" + shape = [160] + dtype = "float32" + min_val = float("-1.07861") + max_val = float("0.6124") + mean = float("-0.0895206") + std = float("0.301753") + data = None + + +class Program_weight_tensor_parameter_25: + name = "parameter_25" + shape = [160, 160, 1, 1] + dtype = "float32" + min_val = float("-0.593145") + max_val = float("0.47386") + mean = float("-0.000842452") + std = float("0.0461322") + data = None + + +class Program_weight_tensor_parameter_26: + name = "parameter_26" + shape = [160] + dtype = "float32" + min_val = float("-0.442098") + max_val = float("1.22088") + mean = float("0.291083") + std = float("0.346339") + data = None + + +class Program_weight_tensor_parameter_27: + name = "parameter_27" + shape = [160] + dtype = "float32" + min_val = float("0.639275") + max_val = float("2.66534") + mean = float("1.19184") + std = float("0.338602") + data = None + + +class Program_weight_tensor_parameter_28: + name = "parameter_28" + shape = [160] + dtype = "float32" + min_val = float("0.00422663") + max_val = float("2.49617") + mean = float("0.121762") + std = float("0.278931") + data = None + + +class Program_weight_tensor_parameter_29: + name = "parameter_29" + shape = [160] + dtype = "float32" + min_val = float("-0.428698") + max_val = float("1.35627") + mean = float("-0.0248452") + std = float("0.192813") + data = None + + +class Program_weight_tensor_parameter_30: + name = "parameter_30" + shape = [160, 1, 5, 5] + dtype = "float32" + min_val = float("-0.677753") + max_val = float("0.719364") + mean = float("-0.00660615") + std = float("0.06683") + data = None + + +class Program_weight_tensor_parameter_31: + name = "parameter_31" + shape = [160] + dtype = "float32" + min_val = float("-1.07356") + max_val = float("1.37528") + mean = float("0.0331453") + std = float("0.32668") + data = None + + +class Program_weight_tensor_parameter_32: + name = "parameter_32" + shape = [160] + dtype = "float32" + min_val = float("0.292723") + max_val = float("2.19387") + mean = float("1.14408") + std = float("0.323399") + data = None + + +class Program_weight_tensor_parameter_33: + name = "parameter_33" + shape = [160] + dtype = "float32" + min_val = float("0.0845633") + max_val = float("2.93402") + mean = float("0.448064") + std = float("0.368566") + data = None + + +class Program_weight_tensor_parameter_34: + name = "parameter_34" + shape = [160] + dtype = "float32" + min_val = float("-0.599502") + max_val = float("0.419239") + mean = float("-0.0828125") + std = float("0.183433") + data = None + + +class Program_weight_tensor_parameter_35: + name = "parameter_35" + shape = [160, 160, 1, 1] + dtype = "float32" + min_val = float("-0.710848") + max_val = float("0.405476") + mean = float("-0.00136703") + std = float("0.0437224") + data = None + + +class Program_weight_tensor_parameter_36: + name = "parameter_36" + shape = [160] + dtype = "float32" + min_val = float("-0.360475") + max_val = float("0.890861") + mean = float("0.165199") + std = float("0.243672") + data = None + + +class Program_weight_tensor_parameter_37: + name = "parameter_37" + shape = [160] + dtype = "float32" + min_val = float("0.610547") + max_val = float("2.44242") + mean = float("1.22628") + std = float("0.26549") + data = None + + +class Program_weight_tensor_parameter_38: + name = "parameter_38" + shape = [160] + dtype = "float32" + min_val = float("0.00591797") + max_val = float("1.76176") + mean = float("0.116525") + std = float("0.179485") + data = None + + +class Program_weight_tensor_parameter_39: + name = "parameter_39" + shape = [160] + dtype = "float32" + min_val = float("-0.582364") + max_val = float("0.97157") + mean = float("-0.019624") + std = float("0.172071") + data = None + + +class Program_weight_tensor_parameter_40: + name = "parameter_40" + shape = [160, 1, 5, 5] + dtype = "float32" + min_val = float("-0.381607") + max_val = float("0.41571") + mean = float("-0.00621941") + std = float("0.0625369") + data = None + + +class Program_weight_tensor_parameter_41: + name = "parameter_41" + shape = [160] + dtype = "float32" + min_val = float("-0.663131") + max_val = float("1.33511") + mean = float("0.100284") + std = float("0.237487") + data = None + + +class Program_weight_tensor_parameter_42: + name = "parameter_42" + shape = [160] + dtype = "float32" + min_val = float("0.390136") + max_val = float("2.14002") + mean = float("1.15866") + std = float("0.284742") + data = None + + +class Program_weight_tensor_parameter_43: + name = "parameter_43" + shape = [160] + dtype = "float32" + min_val = float("0.0847675") + max_val = float("1.01777") + mean = float("0.338751") + std = float("0.173489") + data = None + + +class Program_weight_tensor_parameter_44: + name = "parameter_44" + shape = [160] + dtype = "float32" + min_val = float("-0.711895") + max_val = float("0.560778") + mean = float("-0.121034") + std = float("0.213729") + data = None + + +class Program_weight_tensor_parameter_45: + name = "parameter_45" + shape = [160, 160, 1, 1] + dtype = "float32" + min_val = float("-0.510208") + max_val = float("0.374869") + mean = float("-0.00228807") + std = float("0.0421229") + data = None + + +class Program_weight_tensor_parameter_46: + name = "parameter_46" + shape = [160] + dtype = "float32" + min_val = float("-0.543995") + max_val = float("0.800117") + mean = float("0.134102") + std = float("0.23574") + data = None + + +class Program_weight_tensor_parameter_47: + name = "parameter_47" + shape = [160] + dtype = "float32" + min_val = float("0.74363") + max_val = float("1.94234") + mean = float("1.19489") + std = float("0.236186") + data = None + + +class Program_weight_tensor_parameter_48: + name = "parameter_48" + shape = [160] + dtype = "float32" + min_val = float("0.00513067") + max_val = float("0.726009") + mean = float("0.105348") + std = float("0.114407") + data = None + + +class Program_weight_tensor_parameter_49: + name = "parameter_49" + shape = [160] + dtype = "float32" + min_val = float("-0.445283") + max_val = float("0.509021") + mean = float("-0.0301795") + std = float("0.128069") + data = None + + +class Program_weight_tensor_parameter_50: + name = "parameter_50" + shape = [160, 1, 5, 5] + dtype = "float32" + min_val = float("-0.367576") + max_val = float("0.410868") + mean = float("-0.00762043") + std = float("0.0621168") + data = None + + +class Program_weight_tensor_parameter_51: + name = "parameter_51" + shape = [160] + dtype = "float32" + min_val = float("-0.383405") + max_val = float("0.833253") + mean = float("0.0922622") + std = float("0.209172") + data = None + + +class Program_weight_tensor_parameter_52: + name = "parameter_52" + shape = [160] + dtype = "float32" + min_val = float("0.252386") + max_val = float("1.86165") + mean = float("1.12765") + std = float("0.309058") + data = None + + +class Program_weight_tensor_parameter_53: + name = "parameter_53" + shape = [160] + dtype = "float32" + min_val = float("0.121087") + max_val = float("1.32705") + mean = float("0.412328") + std = float("0.222736") + data = None + + +class Program_weight_tensor_parameter_54: + name = "parameter_54" + shape = [160] + dtype = "float32" + min_val = float("-0.836442") + max_val = float("0.184503") + mean = float("-0.231937") + std = float("0.212768") + data = None + + +class Program_weight_tensor_parameter_55: + name = "parameter_55" + shape = [160, 160, 1, 1] + dtype = "float32" + min_val = float("-0.415422") + max_val = float("0.355092") + mean = float("-0.00417841") + std = float("0.0420669") + data = None + + +class Program_weight_tensor_parameter_56: + name = "parameter_56" + shape = [160] + dtype = "float32" + min_val = float("-0.422273") + max_val = float("0.622014") + mean = float("0.0532281") + std = float("0.178263") + data = None + + +class Program_weight_tensor_parameter_57: + name = "parameter_57" + shape = [160] + dtype = "float32" + min_val = float("0.90101") + max_val = float("1.87901") + mean = float("1.23585") + std = float("0.18692") + data = None + + +class Program_weight_tensor_parameter_58: + name = "parameter_58" + shape = [160] + dtype = "float32" + min_val = float("0.000145575") + max_val = float("0.00890465") + mean = float("0.00152684") + std = float("0.00150321") + data = None + + +class Program_weight_tensor_parameter_59: + name = "parameter_59" + shape = [160] + dtype = "float32" + min_val = float("-0.0655207") + max_val = float("0.0684921") + mean = float("0.0020443") + std = float("0.0162943") + data = None + + +class Program_weight_tensor_parameter_60: + name = "parameter_60" + shape = [160, 1, 5, 5] + dtype = "float32" + min_val = float("-0.45956") + max_val = float("0.393938") + mean = float("0.000523722") + std = float("0.063645") + data = None + + +class Program_weight_tensor_parameter_61: + name = "parameter_61" + shape = [1] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_62: + name = "parameter_62" + shape = [1] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_63: + name = "parameter_63" + shape = [1] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_64: + name = "parameter_64" + shape = [1] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_65: + name = "parameter_65" + shape = [1, 1, 1, 1] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_66: + name = "parameter_66" + shape = [1] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_67: + name = "parameter_67" + shape = [1] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_68: + name = "parameter_68" + shape = [1] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_69: + name = "parameter_69" + shape = [1] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_70: + name = "parameter_70" + shape = [1, 160, 5, 5] + dtype = "float32" + min_val = float("-0.424555") + max_val = float("0.422866") + mean = float("-0.0017929") + std = float("0.0369799") + data = None + + +class Program_weight_tensor_parameter_71: + name = "parameter_71" + shape = [32] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_72: + name = "parameter_72" + shape = [32, 160, 1, 1] + dtype = "float32" + min_val = float("-1.08544") + max_val = float("0.977937") + mean = float("1.5226e-06") + std = float("0.118597") + data = None + + +class Program_weight_tensor_parameter_73: + name = "parameter_73" + shape = [11] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_74: + name = "parameter_74" + shape = [11, 160, 1, 1] + dtype = "float32" + min_val = float("-0.0360483") + max_val = float("0.0314071") + mean = float("4.50498e-05") + std = float("0.00971069") + data = None + + +class Program_weight_tensor_parameter_75: + name = "parameter_75" + shape = [160] + dtype = "float32" + min_val = float("-0.659654") + max_val = float("2.93605") + mean = float("0.350705") + std = float("0.462014") + data = None + + +class Program_weight_tensor_parameter_76: + name = "parameter_76" + shape = [160] + dtype = "float32" + min_val = float("0.163973") + max_val = float("2.88254") + mean = float("1.06427") + std = float("0.437573") + data = None + + +class Program_weight_tensor_parameter_77: + name = "parameter_77" + shape = [160] + dtype = "float32" + min_val = float("0.0129661") + max_val = float("1.65375") + mean = float("0.180274") + std = float("0.244067") + data = None + + +class Program_weight_tensor_parameter_78: + name = "parameter_78" + shape = [160] + dtype = "float32" + min_val = float("-0.544308") + max_val = float("0.564071") + mean = float("-0.0721784") + std = float("0.118484") + data = None + + +class Program_weight_tensor_parameter_79: + name = "parameter_79" + shape = [160, 160, 1, 1] + dtype = "float32" + min_val = float("-0.726226") + max_val = float("0.672749") + mean = float("-0.00225032") + std = float("0.0438389") + data = None + + +class Program_weight_tensor_parameter_80: + name = "parameter_80" + shape = [160] + dtype = "float32" + min_val = float("-0.0909315") + max_val = float("0.064695") + mean = float("-0.00104904") + std = float("0.0246789") + data = None + + +class Program_weight_tensor_parameter_81: + name = "parameter_81" + shape = [160, 160, 1, 1] + dtype = "float32" + min_val = float("-0.12545") + max_val = float("0.145251") + mean = float("0.000481838") + std = float("0.010268") + data = None + + +class Program_weight_tensor_parameter_82: + name = "parameter_82" + shape = [160] + dtype = "float32" + min_val = float("-0.876391") + max_val = float("0.67437") + mean = float("-0.0779287") + std = float("0.308067") + data = None + + +class Program_weight_tensor_parameter_83: + name = "parameter_83" + shape = [160] + dtype = "float32" + min_val = float("0.781682") + max_val = float("2.90871") + mean = float("1.37239") + std = float("0.381122") + data = None + + +class Program_weight_tensor_parameter_84: + name = "parameter_84" + shape = [160] + dtype = "float32" + min_val = float("0.063769") + max_val = float("2.09966") + mean = float("0.368504") + std = float("0.323924") + data = None + + +class Program_weight_tensor_parameter_85: + name = "parameter_85" + shape = [160] + dtype = "float32" + min_val = float("-0.89422") + max_val = float("0.80638") + mean = float("-0.0975022") + std = float("0.287247") + data = None + + +class Program_weight_tensor_parameter_86: + name = "parameter_86" + shape = [160, 160, 1, 1] + dtype = "float32" + min_val = float("-0.520488") + max_val = float("0.472961") + mean = float("-0.000518464") + std = float("0.0439164") + data = None + + +class Program_weight_tensor_parameter_87: + name = "parameter_87" + shape = [160] + dtype = "float32" + min_val = float("-0.563763") + max_val = float("1.27755") + mean = float("0.259427") + std = float("0.315983") + data = None + + +class Program_weight_tensor_parameter_88: + name = "parameter_88" + shape = [160] + dtype = "float32" + min_val = float("0.625383") + max_val = float("2.90957") + mean = float("1.15463") + std = float("0.306727") + data = None + + +class Program_weight_tensor_parameter_89: + name = "parameter_89" + shape = [160] + dtype = "float32" + min_val = float("0.00370487") + max_val = float("0.686219") + mean = float("0.0971157") + std = float("0.099276") + data = None + + +class Program_weight_tensor_parameter_90: + name = "parameter_90" + shape = [160] + dtype = "float32" + min_val = float("-0.594978") + max_val = float("0.530314") + mean = float("-0.0247584") + std = float("0.125835") + data = None + + +class Program_weight_tensor_parameter_91: + name = "parameter_91" + shape = [160, 1, 5, 5] + dtype = "float32" + min_val = float("-0.645252") + max_val = float("0.479844") + mean = float("-0.00523502") + std = float("0.0623693") + data = None + + +class Program_weight_tensor_parameter_92: + name = "parameter_92" + shape = [160] + dtype = "float32" + min_val = float("-1.04883") + max_val = float("1.0152") + mean = float("0.0902344") + std = float("0.288197") + data = None + + +class Program_weight_tensor_parameter_93: + name = "parameter_93" + shape = [160] + dtype = "float32" + min_val = float("0.105184") + max_val = float("2.14707") + mean = float("1.06571") + std = float("0.312024") + data = None + + +class Program_weight_tensor_parameter_94: + name = "parameter_94" + shape = [160] + dtype = "float32" + min_val = float("0.0578899") + max_val = float("1.92796") + mean = float("0.282636") + std = float("0.218511") + data = None + + +class Program_weight_tensor_parameter_95: + name = "parameter_95" + shape = [160] + dtype = "float32" + min_val = float("-0.725693") + max_val = float("0.297995") + mean = float("-0.0691685") + std = float("0.17773") + data = None + + +class Program_weight_tensor_parameter_96: + name = "parameter_96" + shape = [160, 160, 1, 1] + dtype = "float32" + min_val = float("-0.497628") + max_val = float("0.543799") + mean = float("-0.00114551") + std = float("0.0408437") + data = None + + +class Program_weight_tensor_parameter_97: + name = "parameter_97" + shape = [160] + dtype = "float32" + min_val = float("-0.32822") + max_val = float("0.834667") + mean = float("0.159012") + std = float("0.20757") + data = None + + +class Program_weight_tensor_parameter_98: + name = "parameter_98" + shape = [160] + dtype = "float32" + min_val = float("0.673088") + max_val = float("2.33987") + mean = float("1.10291") + std = float("0.246948") + data = None + + +class Program_weight_tensor_parameter_99: + name = "parameter_99" + shape = [160] + dtype = "float32" + min_val = float("0.00779009") + max_val = float("0.646408") + mean = float("0.08371") + std = float("0.0933709") + data = None + + +class Program_weight_tensor_parameter_100: + name = "parameter_100" + shape = [160] + dtype = "float32" + min_val = float("-0.230511") + max_val = float("0.518703") + mean = float("-0.00868607") + std = float("0.115737") + data = None + + +class Program_weight_tensor_parameter_101: + name = "parameter_101" + shape = [160, 1, 5, 5] + dtype = "float32" + min_val = float("-0.410639") + max_val = float("0.43519") + mean = float("-0.00602301") + std = float("0.0589259") + data = None + + +class Program_weight_tensor_parameter_102: + name = "parameter_102" + shape = [160] + dtype = "float32" + min_val = float("-0.399247") + max_val = float("0.831806") + mean = float("0.0927125") + std = float("0.188308") + data = None + + +class Program_weight_tensor_parameter_103: + name = "parameter_103" + shape = [160] + dtype = "float32" + min_val = float("0.366654") + max_val = float("1.70207") + mean = float("1.0428") + std = float("0.247225") + data = None + + +class Program_weight_tensor_parameter_104: + name = "parameter_104" + shape = [160] + dtype = "float32" + min_val = float("0.0849404") + max_val = float("0.9354") + mean = float("0.259847") + std = float("0.141539") + data = None + + +class Program_weight_tensor_parameter_105: + name = "parameter_105" + shape = [160] + dtype = "float32" + min_val = float("-0.808483") + max_val = float("0.388932") + mean = float("-0.106369") + std = float("0.189447") + data = None + + +class Program_weight_tensor_parameter_106: + name = "parameter_106" + shape = [160, 160, 1, 1] + dtype = "float32" + min_val = float("-0.458533") + max_val = float("0.461653") + mean = float("-0.00186406") + std = float("0.038434") + data = None + + +class Program_weight_tensor_parameter_107: + name = "parameter_107" + shape = [160] + dtype = "float32" + min_val = float("-0.382439") + max_val = float("0.922934") + mean = float("0.149185") + std = float("0.186953") + data = None + + +class Program_weight_tensor_parameter_108: + name = "parameter_108" + shape = [160] + dtype = "float32" + min_val = float("0.660535") + max_val = float("1.90937") + mean = float("1.09193") + std = float("0.235067") + data = None + + +class Program_weight_tensor_parameter_109: + name = "parameter_109" + shape = [160] + dtype = "float32" + min_val = float("0.00632625") + max_val = float("1.30863") + mean = float("0.0772408") + std = float("0.134551") + data = None + + +class Program_weight_tensor_parameter_110: + name = "parameter_110" + shape = [160] + dtype = "float32" + min_val = float("-0.201685") + max_val = float("0.875507") + mean = float("-0.017285") + std = float("0.118792") + data = None + + +class Program_weight_tensor_parameter_111: + name = "parameter_111" + shape = [160, 1, 5, 5] + dtype = "float32" + min_val = float("-0.311941") + max_val = float("0.562423") + mean = float("-0.00621585") + std = float("0.0568137") + data = None + + +class Program_weight_tensor_parameter_112: + name = "parameter_112" + shape = [160] + dtype = "float32" + min_val = float("-0.287732") + max_val = float("1.16097") + mean = float("0.132298") + std = float("0.183646") + data = None + + +class Program_weight_tensor_parameter_113: + name = "parameter_113" + shape = [160] + dtype = "float32" + min_val = float("0.211215") + max_val = float("1.82402") + mean = float("1.00369") + std = float("0.242147") + data = None + + +class Program_weight_tensor_parameter_114: + name = "parameter_114" + shape = [160] + dtype = "float32" + min_val = float("0.0472753") + max_val = float("0.682787") + mean = float("0.179299") + std = float("0.110573") + data = None + + +class Program_weight_tensor_parameter_115: + name = "parameter_115" + shape = [160] + dtype = "float32" + min_val = float("-0.599751") + max_val = float("0.407623") + mean = float("-0.0981103") + std = float("0.185095") + data = None + + +class Program_weight_tensor_parameter_116: + name = "parameter_116" + shape = [160, 160, 1, 1] + dtype = "float32" + min_val = float("-0.438028") + max_val = float("0.387091") + mean = float("-0.00204067") + std = float("0.0365478") + data = None + + +class Program_weight_tensor_parameter_117: + name = "parameter_117" + shape = [160] + dtype = "float32" + min_val = float("-0.132661") + max_val = float("0.957096") + mean = float("0.21293") + std = float("0.187728") + data = None + + +class Program_weight_tensor_parameter_118: + name = "parameter_118" + shape = [160] + dtype = "float32" + min_val = float("0.549461") + max_val = float("1.65925") + mean = float("0.94969") + std = float("0.19558") + data = None + + +class Program_weight_tensor_parameter_119: + name = "parameter_119" + shape = [160] + dtype = "float32" + min_val = float("0.00809507") + max_val = float("0.553689") + mean = float("0.0654991") + std = float("0.065185") + data = None + + +class Program_weight_tensor_parameter_120: + name = "parameter_120" + shape = [160] + dtype = "float32" + min_val = float("-0.40959") + max_val = float("0.325178") + mean = float("-0.0203344") + std = float("0.110218") + data = None + + +class Program_weight_tensor_parameter_121: + name = "parameter_121" + shape = [160, 1, 5, 5] + dtype = "float32" + min_val = float("-0.355665") + max_val = float("0.485326") + mean = float("-0.00646158") + std = float("0.0551162") + data = None + + +class Program_weight_tensor_parameter_122: + name = "parameter_122" + shape = [1] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_123: + name = "parameter_123" + shape = [1] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_124: + name = "parameter_124" + shape = [1] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_125: + name = "parameter_125" + shape = [1] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_126: + name = "parameter_126" + shape = [1, 1, 1, 1] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_127: + name = "parameter_127" + shape = [1] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_128: + name = "parameter_128" + shape = [1] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_129: + name = "parameter_129" + shape = [1] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_130: + name = "parameter_130" + shape = [1] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_131: + name = "parameter_131" + shape = [1, 160, 5, 5] + dtype = "float32" + min_val = float("-0.418585") + max_val = float("0.287794") + mean = float("-0.000902842") + std = float("0.0370888") + data = None + + +class Program_weight_tensor_parameter_132: + name = "parameter_132" + shape = [32] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_133: + name = "parameter_133" + shape = [32, 160, 1, 1] + dtype = "float32" + min_val = float("-1.00723") + max_val = float("0.946176") + mean = float("-1.37603e-07") + std = float("0.112668") + data = None + + +class Program_weight_tensor_parameter_134: + name = "parameter_134" + shape = [11] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_135: + name = "parameter_135" + shape = [11, 160, 1, 1] + dtype = "float32" + min_val = float("-0.033747") + max_val = float("0.033244") + mean = float("-0.000100177") + std = float("0.00976961") + data = None + + +class Program_weight_tensor_parameter_136: + name = "parameter_136" + shape = [160] + dtype = "float32" + min_val = float("-0.473104") + max_val = float("2.28162") + mean = float("0.455859") + std = float("0.429429") + data = None + + +class Program_weight_tensor_parameter_137: + name = "parameter_137" + shape = [160] + dtype = "float32" + min_val = float("0.153135") + max_val = float("2.77831") + mean = float("1.06032") + std = float("0.460141") + data = None + + +class Program_weight_tensor_parameter_138: + name = "parameter_138" + shape = [160] + dtype = "float32" + min_val = float("0.028252") + max_val = float("1.57894") + mean = float("0.184387") + std = float("0.252445") + data = None + + +class Program_weight_tensor_parameter_139: + name = "parameter_139" + shape = [160] + dtype = "float32" + min_val = float("-0.527912") + max_val = float("0.315128") + mean = float("-0.0796611") + std = float("0.120549") + data = None + + +class Program_weight_tensor_parameter_140: + name = "parameter_140" + shape = [160, 160, 1, 1] + dtype = "float32" + min_val = float("-0.595928") + max_val = float("0.52681") + mean = float("-0.00444633") + std = float("0.0427452") + data = None + + +class Program_weight_tensor_parameter_141: + name = "parameter_141" + shape = [160] + dtype = "float32" + min_val = float("-0.0941816") + max_val = float("0.044214") + mean = float("-0.00182823") + std = float("0.0210691") + data = None + + +class Program_weight_tensor_parameter_142: + name = "parameter_142" + shape = [160, 160, 1, 1] + dtype = "float32" + min_val = float("-0.222857") + max_val = float("0.132849") + mean = float("4.68841e-05") + std = float("0.00998749") + data = None + + +class Program_weight_tensor_parameter_143: + name = "parameter_143" + shape = [160] + dtype = "float32" + min_val = float("-1.02208") + max_val = float("0.824308") + mean = float("0.000618564") + std = float("0.267635") + data = None + + +class Program_weight_tensor_parameter_144: + name = "parameter_144" + shape = [160] + dtype = "float32" + min_val = float("0.768214") + max_val = float("2.44401") + mean = float("1.30908") + std = float("0.278143") + data = None + + +class Program_weight_tensor_parameter_145: + name = "parameter_145" + shape = [160] + dtype = "float32" + min_val = float("0.0739855") + max_val = float("2.13874") + mean = float("0.365692") + std = float("0.331082") + data = None + + +class Program_weight_tensor_parameter_146: + name = "parameter_146" + shape = [160] + dtype = "float32" + min_val = float("-0.967535") + max_val = float("0.60134") + mean = float("-0.0942232") + std = float("0.287561") + data = None + + +class Program_weight_tensor_parameter_147: + name = "parameter_147" + shape = [160, 160, 1, 1] + dtype = "float32" + min_val = float("-0.445387") + max_val = float("0.623432") + mean = float("-0.00143501") + std = float("0.0421979") + data = None + + +class Program_weight_tensor_parameter_148: + name = "parameter_148" + shape = [160] + dtype = "float32" + min_val = float("-0.504627") + max_val = float("1.17406") + mean = float("0.241023") + std = float("0.263767") + data = None + + +class Program_weight_tensor_parameter_149: + name = "parameter_149" + shape = [160] + dtype = "float32" + min_val = float("0.630854") + max_val = float("1.86667") + mean = float("1.13745") + std = float("0.273988") + data = None + + +class Program_weight_tensor_parameter_150: + name = "parameter_150" + shape = [160] + dtype = "float32" + min_val = float("0.00724662") + max_val = float("1.88324") + mean = float("0.11524") + std = float("0.197324") + data = None + + +class Program_weight_tensor_parameter_151: + name = "parameter_151" + shape = [160] + dtype = "float32" + min_val = float("-0.301491") + max_val = float("0.971739") + mean = float("0.00941707") + std = float("0.160519") + data = None + + +class Program_weight_tensor_parameter_152: + name = "parameter_152" + shape = [160, 1, 5, 5] + dtype = "float32" + min_val = float("-0.315423") + max_val = float("0.38402") + mean = float("-0.00209902") + std = float("0.0552345") + data = None + + +class Program_weight_tensor_parameter_153: + name = "parameter_153" + shape = [160] + dtype = "float32" + min_val = float("-0.503908") + max_val = float("1.21487") + mean = float("0.137872") + std = float("0.26326") + data = None + + +class Program_weight_tensor_parameter_154: + name = "parameter_154" + shape = [160] + dtype = "float32" + min_val = float("0.172309") + max_val = float("1.70792") + mean = float("1.03854") + std = float("0.206768") + data = None + + +class Program_weight_tensor_parameter_155: + name = "parameter_155" + shape = [160] + dtype = "float32" + min_val = float("0.0615198") + max_val = float("1.17115") + mean = float("0.253453") + std = float("0.186745") + data = None + + +class Program_weight_tensor_parameter_156: + name = "parameter_156" + shape = [160] + dtype = "float32" + min_val = float("-0.646652") + max_val = float("0.524078") + mean = float("-0.0382538") + std = float("0.194998") + data = None + + +class Program_weight_tensor_parameter_157: + name = "parameter_157" + shape = [160, 160, 1, 1] + dtype = "float32" + min_val = float("-0.424875") + max_val = float("0.534562") + mean = float("-0.00104369") + std = float("0.0389634") + data = None + + +class Program_weight_tensor_parameter_158: + name = "parameter_158" + shape = [160] + dtype = "float32" + min_val = float("-0.234147") + max_val = float("0.731623") + mean = float("0.180003") + std = float("0.184631") + data = None + + +class Program_weight_tensor_parameter_159: + name = "parameter_159" + shape = [160] + dtype = "float32" + min_val = float("0.625246") + max_val = float("2.02582") + mean = float("1.05933") + std = float("0.217853") + data = None + + +class Program_weight_tensor_parameter_160: + name = "parameter_160" + shape = [160] + dtype = "float32" + min_val = float("0.00710329") + max_val = float("0.358749") + mean = float("0.0673231") + std = float("0.0610059") + data = None + + +class Program_weight_tensor_parameter_161: + name = "parameter_161" + shape = [160] + dtype = "float32" + min_val = float("-0.319319") + max_val = float("0.324394") + mean = float("-0.00847934") + std = float("0.0960882") + data = None + + +class Program_weight_tensor_parameter_162: + name = "parameter_162" + shape = [160, 1, 5, 5] + dtype = "float32" + min_val = float("-0.29462") + max_val = float("0.369228") + mean = float("-0.00270671") + std = float("0.0523691") + data = None + + +class Program_weight_tensor_parameter_163: + name = "parameter_163" + shape = [160] + dtype = "float32" + min_val = float("-0.724819") + max_val = float("0.621361") + mean = float("0.132312") + std = float("0.17771") + data = None + + +class Program_weight_tensor_parameter_164: + name = "parameter_164" + shape = [160] + dtype = "float32" + min_val = float("0.203459") + max_val = float("1.53051") + mean = float("0.979382") + std = float("0.21821") + data = None + + +class Program_weight_tensor_parameter_165: + name = "parameter_165" + shape = [160] + dtype = "float32" + min_val = float("0.0695702") + max_val = float("0.70513") + mean = float("0.222329") + std = float("0.121058") + data = None + + +class Program_weight_tensor_parameter_166: + name = "parameter_166" + shape = [160] + dtype = "float32" + min_val = float("-0.585708") + max_val = float("0.662883") + mean = float("-0.0244697") + std = float("0.193993") + data = None + + +class Program_weight_tensor_parameter_167: + name = "parameter_167" + shape = [160, 160, 1, 1] + dtype = "float32" + min_val = float("-0.392661") + max_val = float("0.438806") + mean = float("-0.000840363") + std = float("0.0359887") + data = None + + +class Program_weight_tensor_parameter_168: + name = "parameter_168" + shape = [160] + dtype = "float32" + min_val = float("-0.253079") + max_val = float("0.894769") + mean = float("0.151453") + std = float("0.187756") + data = None + + +class Program_weight_tensor_parameter_169: + name = "parameter_169" + shape = [160] + dtype = "float32" + min_val = float("0.617366") + max_val = float("1.68348") + mean = float("1.0562") + std = float("0.193084") + data = None + + +class Program_weight_tensor_parameter_170: + name = "parameter_170" + shape = [160] + dtype = "float32" + min_val = float("0.00334686") + max_val = float("1.01018") + mean = float("0.0585245") + std = float("0.0988077") + data = None + + +class Program_weight_tensor_parameter_171: + name = "parameter_171" + shape = [160] + dtype = "float32" + min_val = float("-0.457207") + max_val = float("0.551079") + mean = float("-0.00971541") + std = float("0.0988531") + data = None + + +class Program_weight_tensor_parameter_172: + name = "parameter_172" + shape = [160, 1, 5, 5] + dtype = "float32" + min_val = float("-0.334558") + max_val = float("0.42574") + mean = float("-0.00336959") + std = float("0.050813") + data = None + + +class Program_weight_tensor_parameter_173: + name = "parameter_173" + shape = [160] + dtype = "float32" + min_val = float("-0.176337") + max_val = float("0.738156") + mean = float("0.139001") + std = float("0.154649") + data = None + + +class Program_weight_tensor_parameter_174: + name = "parameter_174" + shape = [160] + dtype = "float32" + min_val = float("0.214419") + max_val = float("1.54533") + mean = float("0.966559") + std = float("0.211777") + data = None + + +class Program_weight_tensor_parameter_175: + name = "parameter_175" + shape = [160] + dtype = "float32" + min_val = float("0.0407934") + max_val = float("0.775923") + mean = float("0.176859") + std = float("0.113565") + data = None + + +class Program_weight_tensor_parameter_176: + name = "parameter_176" + shape = [160] + dtype = "float32" + min_val = float("-0.617671") + max_val = float("0.332231") + mean = float("-0.0968964") + std = float("0.173349") + data = None + + +class Program_weight_tensor_parameter_177: + name = "parameter_177" + shape = [160, 160, 1, 1] + dtype = "float32" + min_val = float("-0.395765") + max_val = float("0.340649") + mean = float("-0.00226354") + std = float("0.0347849") + data = None + + +class Program_weight_tensor_parameter_178: + name = "parameter_178" + shape = [160] + dtype = "float32" + min_val = float("-0.0645632") + max_val = float("1.14964") + mean = float("0.204171") + std = float("0.170273") + data = None + + +class Program_weight_tensor_parameter_179: + name = "parameter_179" + shape = [160] + dtype = "float32" + min_val = float("0.592413") + max_val = float("1.50929") + mean = float("0.949999") + std = float("0.161298") + data = None + + +class Program_weight_tensor_parameter_180: + name = "parameter_180" + shape = [160] + dtype = "float32" + min_val = float("0.00191379") + max_val = float("0.537653") + mean = float("0.0550924") + std = float("0.0612303") + data = None + + +class Program_weight_tensor_parameter_181: + name = "parameter_181" + shape = [160] + dtype = "float32" + min_val = float("-0.377901") + max_val = float("0.208438") + mean = float("-0.0178993") + std = float("0.0913002") + data = None + + +class Program_weight_tensor_parameter_182: + name = "parameter_182" + shape = [160, 1, 5, 5] + dtype = "float32" + min_val = float("-0.320478") + max_val = float("0.414538") + mean = float("-0.00386017") + std = float("0.0517183") + data = None + + +class Program_weight_tensor_parameter_183: + name = "parameter_183" + shape = [1] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_184: + name = "parameter_184" + shape = [1] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_185: + name = "parameter_185" + shape = [1] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_186: + name = "parameter_186" + shape = [1] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_187: + name = "parameter_187" + shape = [1, 1, 1, 1] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_188: + name = "parameter_188" + shape = [1] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_189: + name = "parameter_189" + shape = [1] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_190: + name = "parameter_190" + shape = [1] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_191: + name = "parameter_191" + shape = [1] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_192: + name = "parameter_192" + shape = [1, 160, 5, 5] + dtype = "float32" + min_val = float("-0.204617") + max_val = float("0.361186") + mean = float("-0.00201719") + std = float("0.0371098") + data = None + + +class Program_weight_tensor_parameter_193: + name = "parameter_193" + shape = [32] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_194: + name = "parameter_194" + shape = [32, 160, 1, 1] + dtype = "float32" + min_val = float("-1.04587") + max_val = float("1.01364") + mean = float("-4.93601e-08") + std = float("0.103596") + data = None + + +class Program_weight_tensor_parameter_195: + name = "parameter_195" + shape = [11] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_196: + name = "parameter_196" + shape = [11, 160, 1, 1] + dtype = "float32" + min_val = float("-0.0296279") + max_val = float("0.0367415") + mean = float("-0.000376476") + std = float("0.0097298") + data = None + + +class Program_weight_tensor_parameter_197: + name = "parameter_197" + shape = [160] + dtype = "float32" + min_val = float("-0.404605") + max_val = float("3.01833") + mean = float("0.510892") + std = float("0.553243") + data = None + + +class Program_weight_tensor_parameter_198: + name = "parameter_198" + shape = [160] + dtype = "float32" + min_val = float("0.211654") + max_val = float("3.17106") + mean = float("0.94806") + std = float("0.46232") + data = None + + +class Program_weight_tensor_parameter_199: + name = "parameter_199" + shape = [160] + dtype = "float32" + min_val = float("0.0230276") + max_val = float("1.45291") + mean = float("0.154709") + std = float("0.192249") + data = None + + +class Program_weight_tensor_parameter_200: + name = "parameter_200" + shape = [160] + dtype = "float32" + min_val = float("-0.662855") + max_val = float("0.181998") + mean = float("-0.14296") + std = float("0.153176") + data = None + + +class Program_weight_tensor_parameter_201: + name = "parameter_201" + shape = [160, 160, 1, 1] + dtype = "float32" + min_val = float("-0.776612") + max_val = float("0.511526") + mean = float("-0.00557896") + std = float("0.0436141") + data = None + + +class Program_weight_tensor_parameter_202: + name = "parameter_202" + shape = [160] + dtype = "float32" + min_val = float("-0.0783804") + max_val = float("0.0532268") + mean = float("-0.00183669") + std = float("0.0204536") + data = None + + +class Program_weight_tensor_parameter_203: + name = "parameter_203" + shape = [160, 160, 1, 1] + dtype = "float32" + min_val = float("-0.298999") + max_val = float("0.45266") + mean = float("-5.20495e-05") + std = float("0.0138978") + data = None + + +class Program_weight_tensor_parameter_204: + name = "parameter_204" + shape = [160] + dtype = "float32" + min_val = float("-0.615485") + max_val = float("0.8689") + mean = float("0.0610505") + std = float("0.300753") + data = None + + +class Program_weight_tensor_parameter_205: + name = "parameter_205" + shape = [160] + dtype = "float32" + min_val = float("0.716886") + max_val = float("2.35801") + mean = float("1.29558") + std = float("0.2676") + data = None + + +class Program_weight_tensor_parameter_206: + name = "parameter_206" + shape = [160] + dtype = "float32" + min_val = float("0.0813626") + max_val = float("2.58888") + mean = float("0.370419") + std = float("0.331322") + data = None + + +class Program_weight_tensor_parameter_207: + name = "parameter_207" + shape = [160] + dtype = "float32" + min_val = float("-1.12713") + max_val = float("1.17168") + mean = float("-0.182178") + std = float("0.3495") + data = None + + +class Program_weight_tensor_parameter_208: + name = "parameter_208" + shape = [160, 160, 1, 1] + dtype = "float32" + min_val = float("-0.419544") + max_val = float("0.585562") + mean = float("-0.00221494") + std = float("0.0441223") + data = None + + +class Program_weight_tensor_parameter_209: + name = "parameter_209" + shape = [160] + dtype = "float32" + min_val = float("-0.362493") + max_val = float("1.69386") + mean = float("0.338417") + std = float("0.401562") + data = None + + +class Program_weight_tensor_parameter_210: + name = "parameter_210" + shape = [160] + dtype = "float32" + min_val = float("0.413905") + max_val = float("2.33428") + mean = float("1.04926") + std = float("0.317775") + data = None + + +class Program_weight_tensor_parameter_211: + name = "parameter_211" + shape = [160] + dtype = "float32" + min_val = float("0.00226846") + max_val = float("1.17693") + mean = float("0.109769") + std = float("0.147545") + data = None + + +class Program_weight_tensor_parameter_212: + name = "parameter_212" + shape = [160] + dtype = "float32" + min_val = float("-0.57416") + max_val = float("0.802464") + mean = float("0.0174315") + std = float("0.148545") + data = None + + +class Program_weight_tensor_parameter_213: + name = "parameter_213" + shape = [160, 1, 5, 5] + dtype = "float32" + min_val = float("-0.336342") + max_val = float("0.468928") + mean = float("0.000807566") + std = float("0.0546926") + data = None + + +class Program_weight_tensor_parameter_214: + name = "parameter_214" + shape = [160] + dtype = "float32" + min_val = float("-0.872361") + max_val = float("1.08374") + mean = float("0.181143") + std = float("0.27821") + data = None + + +class Program_weight_tensor_parameter_215: + name = "parameter_215" + shape = [160] + dtype = "float32" + min_val = float("0.17523") + max_val = float("1.5892") + mean = float("0.975911") + std = float("0.226788") + data = None + + +class Program_weight_tensor_parameter_216: + name = "parameter_216" + shape = [160] + dtype = "float32" + min_val = float("0.0594399") + max_val = float("1.00202") + mean = float("0.272614") + std = float("0.166785") + data = None + + +class Program_weight_tensor_parameter_217: + name = "parameter_217" + shape = [160] + dtype = "float32" + min_val = float("-0.665054") + max_val = float("0.347149") + mean = float("-0.0513694") + std = float("0.183852") + data = None + + +class Program_weight_tensor_parameter_218: + name = "parameter_218" + shape = [160, 160, 1, 1] + dtype = "float32" + min_val = float("-0.37568") + max_val = float("0.520283") + mean = float("-0.00145726") + std = float("0.0411825") + data = None + + +class Program_weight_tensor_parameter_219: + name = "parameter_219" + shape = [160] + dtype = "float32" + min_val = float("-1.38304") + max_val = float("1.0455") + mean = float("0.204048") + std = float("0.252159") + data = None + + +class Program_weight_tensor_parameter_220: + name = "parameter_220" + shape = [160] + dtype = "float32" + min_val = float("0.44976") + max_val = float("3.8715") + mean = float("1.05138") + std = float("0.361932") + data = None + + +class Program_weight_tensor_parameter_221: + name = "parameter_221" + shape = [160] + dtype = "float32" + min_val = float("0.00707063") + max_val = float("0.53858") + mean = float("0.0704383") + std = float("0.0707053") + data = None + + +class Program_weight_tensor_parameter_222: + name = "parameter_222" + shape = [160] + dtype = "float32" + min_val = float("-0.251593") + max_val = float("0.246237") + mean = float("0.00934997") + std = float("0.10303") + data = None + + +class Program_weight_tensor_parameter_223: + name = "parameter_223" + shape = [160, 1, 5, 5] + dtype = "float32" + min_val = float("-0.300395") + max_val = float("0.444947") + mean = float("-0.00141088") + std = float("0.053944") + data = None + + +class Program_weight_tensor_parameter_224: + name = "parameter_224" + shape = [160] + dtype = "float32" + min_val = float("-1.11263") + max_val = float("0.713805") + mean = float("0.135969") + std = float("0.23543") + data = None + + +class Program_weight_tensor_parameter_225: + name = "parameter_225" + shape = [160] + dtype = "float32" + min_val = float("0.217977") + max_val = float("1.56454") + mean = float("0.970716") + std = float("0.20592") + data = None + + +class Program_weight_tensor_parameter_226: + name = "parameter_226" + shape = [160] + dtype = "float32" + min_val = float("0.0813308") + max_val = float("0.855935") + mean = float("0.242214") + std = float("0.131087") + data = None + + +class Program_weight_tensor_parameter_227: + name = "parameter_227" + shape = [160] + dtype = "float32" + min_val = float("-0.65524") + max_val = float("0.435905") + mean = float("-0.0677146") + std = float("0.193093") + data = None + + +class Program_weight_tensor_parameter_228: + name = "parameter_228" + shape = [160, 160, 1, 1] + dtype = "float32" + min_val = float("-0.391635") + max_val = float("0.346344") + mean = float("-0.00172049") + std = float("0.040493") + data = None + + +class Program_weight_tensor_parameter_229: + name = "parameter_229" + shape = [160] + dtype = "float32" + min_val = float("-0.172788") + max_val = float("0.901559") + mean = float("0.179591") + std = float("0.219919") + data = None + + +class Program_weight_tensor_parameter_230: + name = "parameter_230" + shape = [160] + dtype = "float32" + min_val = float("0.589422") + max_val = float("2.04694") + mean = float("1.03347") + std = float("0.217318") + data = None + + +class Program_weight_tensor_parameter_231: + name = "parameter_231" + shape = [160] + dtype = "float32" + min_val = float("0.00577088") + max_val = float("0.891107") + mean = float("0.076133") + std = float("0.08939") + data = None + + +class Program_weight_tensor_parameter_232: + name = "parameter_232" + shape = [160] + dtype = "float32" + min_val = float("-0.31904") + max_val = float("0.425984") + mean = float("0.00616079") + std = float("0.0995321") + data = None + + +class Program_weight_tensor_parameter_233: + name = "parameter_233" + shape = [160, 1, 5, 5] + dtype = "float32" + min_val = float("-0.307824") + max_val = float("0.455428") + mean = float("-0.000303266") + std = float("0.0582493") + data = None + + +class Program_weight_tensor_parameter_234: + name = "parameter_234" + shape = [160] + dtype = "float32" + min_val = float("-1.29751") + max_val = float("0.618497") + mean = float("0.0908566") + std = float("0.224292") + data = None + + +class Program_weight_tensor_parameter_235: + name = "parameter_235" + shape = [160] + dtype = "float32" + min_val = float("0.163975") + max_val = float("1.59063") + mean = float("0.988599") + std = float("0.196965") + data = None + + +class Program_weight_tensor_parameter_236: + name = "parameter_236" + shape = [160] + dtype = "float32" + min_val = float("0.0980709") + max_val = float("0.954687") + mean = float("0.259283") + std = float("0.124802") + data = None + + +class Program_weight_tensor_parameter_237: + name = "parameter_237" + shape = [160] + dtype = "float32" + min_val = float("-0.602555") + max_val = float("0.605119") + mean = float("-0.0989181") + std = float("0.207909") + data = None + + +class Program_weight_tensor_parameter_238: + name = "parameter_238" + shape = [160, 160, 1, 1] + dtype = "float32" + min_val = float("-0.266592") + max_val = float("0.378634") + mean = float("-0.00244535") + std = float("0.0408722") + data = None + + +class Program_weight_tensor_parameter_239: + name = "parameter_239" + shape = [160] + dtype = "float32" + min_val = float("-0.261416") + max_val = float("1.02063") + mean = float("0.194339") + std = float("0.204167") + data = None + + +class Program_weight_tensor_parameter_240: + name = "parameter_240" + shape = [160] + dtype = "float32" + min_val = float("0.695953") + max_val = float("1.66219") + mean = float("1.00801") + std = float("0.189445") + data = None + + +class Program_weight_tensor_parameter_241: + name = "parameter_241" + shape = [160] + dtype = "float32" + min_val = float("0.00390561") + max_val = float("0.320329") + mean = float("0.0614989") + std = float("0.0501493") + data = None + + +class Program_weight_tensor_parameter_242: + name = "parameter_242" + shape = [160] + dtype = "float32" + min_val = float("-0.34389") + max_val = float("0.264256") + mean = float("0.0075132") + std = float("0.0876149") + data = None + + +class Program_weight_tensor_parameter_243: + name = "parameter_243" + shape = [160, 1, 5, 5] + dtype = "float32" + min_val = float("-0.395024") + max_val = float("0.408529") + mean = float("0.00220735") + std = float("0.0604683") + data = None + + +class Program_weight_tensor_parameter_244: + name = "parameter_244" + shape = [160] + dtype = "float32" + min_val = float("-0.0916471") + max_val = float("0.127688") + mean = float("0.0160653") + std = float("0.0399376") + data = None + + +class Program_weight_tensor_parameter_245: + name = "parameter_245" + shape = [160] + dtype = "float32" + min_val = float("-0.000519209") + max_val = float("0.295861") + mean = float("0.112347") + std = float("0.0571506") + data = None + + +class Program_weight_tensor_parameter_246: + name = "parameter_246" + shape = [160] + dtype = "float32" + min_val = float("0.00021794") + max_val = float("0.0154011") + mean = float("0.0023316") + std = float("0.00218119") + data = None + + +class Program_weight_tensor_parameter_247: + name = "parameter_247" + shape = [160] + dtype = "float32" + min_val = float("-0.0354885") + max_val = float("0.030553") + mean = float("0.0010192") + std = float("0.0103486") + data = None + + +class Program_weight_tensor_parameter_248: + name = "parameter_248" + shape = [160, 160, 1, 1] + dtype = "float32" + min_val = float("-0.420773") + max_val = float("0.378611") + mean = float("-0.000362605") + std = float("0.032585") + data = None + + +class Program_weight_tensor_parameter_249: + name = "parameter_249" + shape = [160] + dtype = "float32" + min_val = float("-0.38247") + max_val = float("0.128987") + mean = float("-0.00144232") + std = float("0.0360031") + data = None + + +class Program_weight_tensor_parameter_250: + name = "parameter_250" + shape = [160] + dtype = "float32" + min_val = float("0.0496768") + max_val = float("0.478814") + mean = float("0.141016") + std = float("0.0472787") + data = None + + +class Program_weight_tensor_parameter_251: + name = "parameter_251" + shape = [160] + dtype = "float32" + min_val = float("0.00185439") + max_val = float("1.35649") + mean = float("0.0778274") + std = float("0.140828") + data = None + + +class Program_weight_tensor_parameter_252: + name = "parameter_252" + shape = [160] + dtype = "float32" + min_val = float("-0.508238") + max_val = float("0.291324") + mean = float("0.00327011") + std = float("0.10682") + data = None + + +class Program_weight_tensor_parameter_253: + name = "parameter_253" + shape = [160, 1, 5, 5] + dtype = "float32" + min_val = float("-0.286359") + max_val = float("0.318569") + mean = float("0.000947208") + std = float("0.0497441") + data = None + + +class Program_weight_tensor_parameter_254: + name = "parameter_254" + shape = [160] + dtype = "float32" + min_val = float("-0.104424") + max_val = float("0.131362") + mean = float("0.00521792") + std = float("0.0379529") + data = None + + +class Program_weight_tensor_parameter_255: + name = "parameter_255" + shape = [160] + dtype = "float32" + min_val = float("-0.000914956") + max_val = float("0.226723") + mean = float("0.0813318") + std = float("0.0622218") + data = None + + +class Program_weight_tensor_parameter_256: + name = "parameter_256" + shape = [160] + dtype = "float32" + min_val = float("8.87405e-05") + max_val = float("0.00461226") + mean = float("0.00112684") + std = float("0.00101658") + data = None + + +class Program_weight_tensor_parameter_257: + name = "parameter_257" + shape = [160] + dtype = "float32" + min_val = float("-0.00706023") + max_val = float("0.00518772") + mean = float("-0.000453599") + std = float("0.00210728") + data = None + + +class Program_weight_tensor_parameter_258: + name = "parameter_258" + shape = [160, 160, 1, 1] + dtype = "float32" + min_val = float("-0.238044") + max_val = float("0.257333") + mean = float("0.000115835") + std = float("0.0255221") + data = None + + +class Program_weight_tensor_parameter_259: + name = "parameter_259" + shape = [160] + dtype = "float32" + min_val = float("-0.0598583") + max_val = float("0.0265863") + mean = float("-0.00141753") + std = float("0.010431") + data = None + + +class Program_weight_tensor_parameter_260: + name = "parameter_260" + shape = [160] + dtype = "float32" + min_val = float("0.0318376") + max_val = float("0.226653") + mean = float("0.116231") + std = float("0.0381671") + data = None + + +class Program_weight_tensor_parameter_261: + name = "parameter_261" + shape = [160] + dtype = "float32" + min_val = float("1.00323e-05") + max_val = float("0.0057148") + mean = float("0.000452559") + std = float("0.000777989") + data = None + + +class Program_weight_tensor_parameter_262: + name = "parameter_262" + shape = [160] + dtype = "float32" + min_val = float("-0.0328169") + max_val = float("0.0308263") + mean = float("-0.000512141") + std = float("0.00568475") + data = None + + +class Program_weight_tensor_parameter_263: + name = "parameter_263" + shape = [160, 1, 5, 5] + dtype = "float32" + min_val = float("-0.145296") + max_val = float("0.1908") + mean = float("0.00165077") + std = float("0.0324536") + data = None + + +class Program_weight_tensor_parameter_264: + name = "parameter_264" + shape = [160] + dtype = "float32" + min_val = float("-0.366658") + max_val = float("0.788532") + mean = float("0.143149") + std = float("0.181068") + data = None + + +class Program_weight_tensor_parameter_265: + name = "parameter_265" + shape = [160] + dtype = "float32" + min_val = float("0.293463") + max_val = float("2.18919") + mean = float("0.996796") + std = float("0.304708") + data = None + + +class Program_weight_tensor_parameter_266: + name = "parameter_266" + shape = [160] + dtype = "float32" + min_val = float("0.165949") + max_val = float("1.35663") + mean = float("0.397061") + std = float("0.166115") + data = None + + +class Program_weight_tensor_parameter_267: + name = "parameter_267" + shape = [160] + dtype = "float32" + min_val = float("-0.510099") + max_val = float("0.287998") + mean = float("-0.115685") + std = float("0.148159") + data = None + + +class Program_weight_tensor_parameter_268: + name = "parameter_268" + shape = [160, 320, 1, 1] + dtype = "float32" + min_val = float("-0.335618") + max_val = float("0.340549") + mean = float("-0.00159872") + std = float("0.0336131") + data = None + + +class Program_weight_tensor_parameter_269: + name = "parameter_269" + shape = [320] + dtype = "float32" + min_val = float("-0.325007") + max_val = float("0.579959") + mean = float("0.0603047") + std = float("0.11966") + data = None + + +class Program_weight_tensor_parameter_270: + name = "parameter_270" + shape = [320] + dtype = "float32" + min_val = float("0.638979") + max_val = float("1.67871") + mean = float("1.02728") + std = float("0.180331") + data = None + + +class Program_weight_tensor_parameter_271: + name = "parameter_271" + shape = [320] + dtype = "float32" + min_val = float("0.00765645") + max_val = float("0.320562") + mean = float("0.0492947") + std = float("0.0408075") + data = None + + +class Program_weight_tensor_parameter_272: + name = "parameter_272" + shape = [320] + dtype = "float32" + min_val = float("-0.174864") + max_val = float("0.20281") + mean = float("-0.0061222") + std = float("0.0654713") + data = None + + +class Program_weight_tensor_parameter_273: + name = "parameter_273" + shape = [320, 1, 5, 5] + dtype = "float32" + min_val = float("-0.303681") + max_val = float("0.371919") + mean = float("-0.00228842") + std = float("0.052015") + data = None + + +class Program_weight_tensor_parameter_274: + name = "parameter_274" + shape = [320] + dtype = "float32" + min_val = float("-0.283963") + max_val = float("0.387259") + mean = float("0.0424627") + std = float("0.0900174") + data = None + + +class Program_weight_tensor_parameter_275: + name = "parameter_275" + shape = [320] + dtype = "float32" + min_val = float("0.539323") + max_val = float("1.60164") + mean = float("1.01945") + std = float("0.133319") + data = None + + +class Program_weight_tensor_parameter_276: + name = "parameter_276" + shape = [320] + dtype = "float32" + min_val = float("0.0723645") + max_val = float("0.810574") + mean = float("0.220159") + std = float("0.108775") + data = None + + +class Program_weight_tensor_parameter_277: + name = "parameter_277" + shape = [320] + dtype = "float32" + min_val = float("-0.696386") + max_val = float("0.283183") + mean = float("-0.0887224") + std = float("0.145118") + data = None + + +class Program_weight_tensor_parameter_278: + name = "parameter_278" + shape = [320, 320, 1, 1] + dtype = "float32" + min_val = float("-0.403743") + max_val = float("0.300503") + mean = float("-0.00119202") + std = float("0.0270677") + data = None + + +class Program_weight_tensor_parameter_279: + name = "parameter_279" + shape = [320] + dtype = "float32" + min_val = float("-0.678081") + max_val = float("0.462785") + mean = float("0.0461409") + std = float("0.133269") + data = None + + +class Program_weight_tensor_parameter_280: + name = "parameter_280" + shape = [320] + dtype = "float32" + min_val = float("0.602873") + max_val = float("1.90683") + mean = float("1.04791") + std = float("0.17735") + data = None + + +class Program_weight_tensor_parameter_281: + name = "parameter_281" + shape = [320] + dtype = "float32" + min_val = float("2.42232e-05") + max_val = float("0.0209588") + mean = float("0.000617205") + std = float("0.00199324") + data = None + + +class Program_weight_tensor_parameter_282: + name = "parameter_282" + shape = [320] + dtype = "float32" + min_val = float("-0.0466847") + max_val = float("0.0227657") + mean = float("0.000364594") + std = float("0.00601126") + data = None + + +class Program_weight_tensor_parameter_283: + name = "parameter_283" + shape = [320, 1, 5, 5] + dtype = "float32" + min_val = float("-0.467103") + max_val = float("0.45422") + mean = float("-0.000468492") + std = float("0.0546429") + data = None + + +class Program_weight_tensor_parameter_284: + name = "parameter_284" + shape = [160] + dtype = "float32" + min_val = float("-0.0955889") + max_val = float("0.0915525") + mean = float("0.00151717") + std = float("0.0268781") + data = None + + +class Program_weight_tensor_parameter_285: + name = "parameter_285" + shape = [160] + dtype = "float32" + min_val = float("0.0494931") + max_val = float("0.360592") + mean = float("0.0757804") + std = float("0.0325679") + data = None + + +class Program_weight_tensor_parameter_286: + name = "parameter_286" + shape = [160] + dtype = "float32" + min_val = float("0.000674402") + max_val = float("0.0130887") + mean = float("0.00278946") + std = float("0.00203688") + data = None + + +class Program_weight_tensor_parameter_287: + name = "parameter_287" + shape = [160] + dtype = "float32" + min_val = float("-0.0490614") + max_val = float("0.0589418") + mean = float("0.000897683") + std = float("0.0181164") + data = None + + +class Program_weight_tensor_parameter_288: + name = "parameter_288" + shape = [160, 160, 1, 1] + dtype = "float32" + min_val = float("-0.434495") + max_val = float("0.287076") + mean = float("-0.000298666") + std = float("0.0345373") + data = None + + +class Program_weight_tensor_parameter_289: + name = "parameter_289" + shape = [160] + dtype = "float32" + min_val = float("-0.451926") + max_val = float("0.167768") + mean = float("-0.0024945") + std = float("0.060907") + data = None + + +class Program_weight_tensor_parameter_290: + name = "parameter_290" + shape = [160] + dtype = "float32" + min_val = float("0.0621479") + max_val = float("0.416915") + mean = float("0.15103") + std = float("0.0509463") + data = None + + +class Program_weight_tensor_parameter_291: + name = "parameter_291" + shape = [160] + dtype = "float32" + min_val = float("0.000587313") + max_val = float("1.29081") + mean = float("0.0892401") + std = float("0.183271") + data = None + + +class Program_weight_tensor_parameter_292: + name = "parameter_292" + shape = [160] + dtype = "float32" + min_val = float("-0.202486") + max_val = float("0.226446") + mean = float("-0.00146678") + std = float("0.0812405") + data = None + + +class Program_weight_tensor_parameter_293: + name = "parameter_293" + shape = [160, 1, 5, 5] + dtype = "float32" + min_val = float("-0.261006") + max_val = float("0.274899") + mean = float("-0.000778058") + std = float("0.0529501") + data = None + + +class Program_weight_tensor_parameter_294: + name = "parameter_294" + shape = [160] + dtype = "float32" + min_val = float("-0.729315") + max_val = float("0.954105") + mean = float("0.130556") + std = float("0.202177") + data = None + + +class Program_weight_tensor_parameter_295: + name = "parameter_295" + shape = [160] + dtype = "float32" + min_val = float("0.0863276") + max_val = float("1.7885") + mean = float("0.96807") + std = float("0.290005") + data = None + + +class Program_weight_tensor_parameter_296: + name = "parameter_296" + shape = [160] + dtype = "float32" + min_val = float("0.159542") + max_val = float("1.95533") + mean = float("0.452297") + std = float("0.224753") + data = None + + +class Program_weight_tensor_parameter_297: + name = "parameter_297" + shape = [160] + dtype = "float32" + min_val = float("-0.675731") + max_val = float("0.36319") + mean = float("-0.0838155") + std = float("0.167693") + data = None + + +class Program_weight_tensor_parameter_298: + name = "parameter_298" + shape = [160, 320, 1, 1] + dtype = "float32" + min_val = float("-0.599902") + max_val = float("0.259147") + mean = float("-0.00115435") + std = float("0.0341926") + data = None + + +class Program_weight_tensor_parameter_299: + name = "parameter_299" + shape = [320] + dtype = "float32" + min_val = float("-0.92609") + max_val = float("0.673163") + mean = float("0.0527093") + std = float("0.116354") + data = None + + +class Program_weight_tensor_parameter_300: + name = "parameter_300" + shape = [320] + dtype = "float32" + min_val = float("0.638015") + max_val = float("1.80955") + mean = float("1.03439") + std = float("0.184484") + data = None + + +class Program_weight_tensor_parameter_301: + name = "parameter_301" + shape = [320] + dtype = "float32" + min_val = float("0.00915332") + max_val = float("1.316") + mean = float("0.0603053") + std = float("0.108299") + data = None + + +class Program_weight_tensor_parameter_302: + name = "parameter_302" + shape = [320] + dtype = "float32" + min_val = float("-0.262936") + max_val = float("0.331022") + mean = float("0.00570133") + std = float("0.0657631") + data = None + + +class Program_weight_tensor_parameter_303: + name = "parameter_303" + shape = [320, 1, 5, 5] + dtype = "float32" + min_val = float("-0.308478") + max_val = float("0.467028") + mean = float("0.00120659") + std = float("0.0527592") + data = None + + +class Program_weight_tensor_parameter_304: + name = "parameter_304" + shape = [320] + dtype = "float32" + min_val = float("-0.499203") + max_val = float("0.475238") + mean = float("0.0198587") + std = float("0.105267") + data = None + + +class Program_weight_tensor_parameter_305: + name = "parameter_305" + shape = [320] + dtype = "float32" + min_val = float("0.613968") + max_val = float("1.96029") + mean = float("1.03858") + std = float("0.147808") + data = None + + +class Program_weight_tensor_parameter_306: + name = "parameter_306" + shape = [320] + dtype = "float32" + min_val = float("0.0616902") + max_val = float("1.36857") + mean = float("0.279731") + std = float("0.187516") + data = None + + +class Program_weight_tensor_parameter_307: + name = "parameter_307" + shape = [320] + dtype = "float32" + min_val = float("-0.525324") + max_val = float("0.311563") + mean = float("-0.0929486") + std = float("0.135111") + data = None + + +class Program_weight_tensor_parameter_308: + name = "parameter_308" + shape = [320, 320, 1, 1] + dtype = "float32" + min_val = float("-0.592303") + max_val = float("0.342983") + mean = float("-0.00122471") + std = float("0.0278743") + data = None + + +class Program_weight_tensor_parameter_309: + name = "parameter_309" + shape = [320] + dtype = "float32" + min_val = float("-0.558451") + max_val = float("0.610437") + mean = float("0.0479997") + std = float("0.10956") + data = None + + +class Program_weight_tensor_parameter_310: + name = "parameter_310" + shape = [320] + dtype = "float32" + min_val = float("0.684348") + max_val = float("1.74172") + mean = float("1.06726") + std = float("0.178347") + data = None + + +class Program_weight_tensor_parameter_311: + name = "parameter_311" + shape = [320] + dtype = "float32" + min_val = float("4.10526e-05") + max_val = float("0.932407") + mean = float("0.0259055") + std = float("0.0651789") + data = None + + +class Program_weight_tensor_parameter_312: + name = "parameter_312" + shape = [320] + dtype = "float32" + min_val = float("-0.294994") + max_val = float("0.567246") + mean = float("0.00214224") + std = float("0.0692845") + data = None + + +class Program_weight_tensor_parameter_313: + name = "parameter_313" + shape = [320, 1, 5, 5] + dtype = "float32" + min_val = float("-0.368087") + max_val = float("0.521582") + mean = float("0.000476397") + std = float("0.0552925") + data = None + + +class Program_weight_tensor_parameter_314: + name = "parameter_314" + shape = [160] + dtype = "float32" + min_val = float("-0.064152") + max_val = float("0.102876") + mean = float("0.00285201") + std = float("0.0274364") + data = None + + +class Program_weight_tensor_parameter_315: + name = "parameter_315" + shape = [160] + dtype = "float32" + min_val = float("0.0507335") + max_val = float("0.288115") + mean = float("0.0753076") + std = float("0.0260075") + data = None + + +class Program_weight_tensor_parameter_316: + name = "parameter_316" + shape = [160] + dtype = "float32" + min_val = float("0.000758428") + max_val = float("0.0128635") + mean = float("0.00274818") + std = float("0.00226367") + data = None + + +class Program_weight_tensor_parameter_317: + name = "parameter_317" + shape = [160] + dtype = "float32" + min_val = float("-0.0306867") + max_val = float("0.0173651") + mean = float("-0.000156114") + std = float("0.00872228") + data = None + + +class Program_weight_tensor_parameter_318: + name = "parameter_318" + shape = [160, 160, 1, 1] + dtype = "float32" + min_val = float("-0.352594") + max_val = float("0.373132") + mean = float("0.000200218") + std = float("0.0358407") + data = None + + +class Program_weight_tensor_parameter_319: + name = "parameter_319" + shape = [160] + dtype = "float32" + min_val = float("-0.192671") + max_val = float("0.130221") + mean = float("0.00757929") + std = float("0.0297546") + data = None + + +class Program_weight_tensor_parameter_320: + name = "parameter_320" + shape = [160] + dtype = "float32" + min_val = float("0.0506338") + max_val = float("0.42458") + mean = float("0.145721") + std = float("0.0470026") + data = None + + +class Program_weight_tensor_parameter_321: + name = "parameter_321" + shape = [160] + dtype = "float32" + min_val = float("0.000654232") + max_val = float("0.675557") + mean = float("0.0759739") + std = float("0.0974847") + data = None + + +class Program_weight_tensor_parameter_322: + name = "parameter_322" + shape = [160] + dtype = "float32" + min_val = float("-0.240897") + max_val = float("0.284229") + mean = float("0.00318399") + std = float("0.0753754") + data = None + + +class Program_weight_tensor_parameter_323: + name = "parameter_323" + shape = [160, 1, 5, 5] + dtype = "float32" + min_val = float("-0.287618") + max_val = float("0.278138") + mean = float("0.000243777") + std = float("0.0542145") + data = None + + +class Program_weight_tensor_parameter_324: + name = "parameter_324" + shape = [160] + dtype = "float32" + min_val = float("-1.03285") + max_val = float("0.701057") + mean = float("-0.0119199") + std = float("0.250376") + data = None + + +class Program_weight_tensor_parameter_325: + name = "parameter_325" + shape = [160] + dtype = "float32" + min_val = float("0.160164") + max_val = float("1.70375") + mean = float("1.02213") + std = float("0.248355") + data = None + + +class Program_weight_tensor_parameter_326: + name = "parameter_326" + shape = [160] + dtype = "float32" + min_val = float("0.166161") + max_val = float("0.974438") + mean = float("0.466128") + std = float("0.176342") + data = None + + +class Program_weight_tensor_parameter_327: + name = "parameter_327" + shape = [160] + dtype = "float32" + min_val = float("-0.563735") + max_val = float("0.328191") + mean = float("-0.147509") + std = float("0.167493") + data = None + + +class Program_weight_tensor_parameter_328: + name = "parameter_328" + shape = [160, 320, 1, 1] + dtype = "float32" + min_val = float("-0.296923") + max_val = float("0.264572") + mean = float("-0.00240147") + std = float("0.0419501") + data = None + + +class Program_weight_tensor_parameter_329: + name = "parameter_329" + shape = [320] + dtype = "float32" + min_val = float("-0.538052") + max_val = float("0.740734") + mean = float("0.0345543") + std = float("0.160809") + data = None + + +class Program_weight_tensor_parameter_330: + name = "parameter_330" + shape = [320] + dtype = "float32" + min_val = float("0.61467") + max_val = float("1.89545") + mean = float("1.02775") + std = float("0.212336") + data = None + + +class Program_weight_tensor_parameter_331: + name = "parameter_331" + shape = [320] + dtype = "float32" + min_val = float("0.014492") + max_val = float("0.577139") + mean = float("0.0762005") + std = float("0.0784563") + data = None + + +class Program_weight_tensor_parameter_332: + name = "parameter_332" + shape = [320] + dtype = "float32" + min_val = float("-0.288792") + max_val = float("0.441012") + mean = float("0.00148707") + std = float("0.0806682") + data = None + + +class Program_weight_tensor_parameter_333: + name = "parameter_333" + shape = [320, 1, 5, 5] + dtype = "float32" + min_val = float("-0.29373") + max_val = float("0.501427") + mean = float("0.000950971") + std = float("0.0658203") + data = None + + +class Program_weight_tensor_parameter_334: + name = "parameter_334" + shape = [320] + dtype = "float32" + min_val = float("-1.01408") + max_val = float("0.351836") + mean = float("-0.117835") + std = float("0.221703") + data = None + + +class Program_weight_tensor_parameter_335: + name = "parameter_335" + shape = [320] + dtype = "float32" + min_val = float("0.65583") + max_val = float("1.76944") + mean = float("1.08473") + std = float("0.157102") + data = None + + +class Program_weight_tensor_parameter_336: + name = "parameter_336" + shape = [320] + dtype = "float32" + min_val = float("0.139012") + max_val = float("1.63438") + mean = float("0.421815") + std = float("0.242925") + data = None + + +class Program_weight_tensor_parameter_337: + name = "parameter_337" + shape = [320] + dtype = "float32" + min_val = float("-0.818612") + max_val = float("0.403054") + mean = float("-0.155348") + std = float("0.22407") + data = None + + +class Program_weight_tensor_parameter_338: + name = "parameter_338" + shape = [320, 320, 1, 1] + dtype = "float32" + min_val = float("-0.471786") + max_val = float("0.389691") + mean = float("-0.00206644") + std = float("0.0380648") + data = None + + +class Program_weight_tensor_parameter_339: + name = "parameter_339" + shape = [320] + dtype = "float32" + min_val = float("-0.513633") + max_val = float("0.556947") + mean = float("0.0689954") + std = float("0.169924") + data = None + + +class Program_weight_tensor_parameter_340: + name = "parameter_340" + shape = [320] + dtype = "float32" + min_val = float("0.601971") + max_val = float("1.8328") + mean = float("1.07716") + std = float("0.20982") + data = None + + +class Program_weight_tensor_parameter_341: + name = "parameter_341" + shape = [320] + dtype = "float32" + min_val = float("0.000115374") + max_val = float("0.816909") + mean = float("0.0414171") + std = float("0.082505") + data = None + + +class Program_weight_tensor_parameter_342: + name = "parameter_342" + shape = [320] + dtype = "float32" + min_val = float("-0.473273") + max_val = float("0.506943") + mean = float("-0.00231578") + std = float("0.0876511") + data = None + + +class Program_weight_tensor_parameter_343: + name = "parameter_343" + shape = [320, 1, 5, 5] + dtype = "float32" + min_val = float("-0.571639") + max_val = float("0.559788") + mean = float("0.00222688") + std = float("0.0693936") + data = None + + +class Program_weight_tensor_parameter_344: + name = "parameter_344" + shape = [160] + dtype = "float32" + min_val = float("-0.303664") + max_val = float("0.766095") + mean = float("0.146005") + std = float("0.150233") + data = None + + +class Program_weight_tensor_parameter_345: + name = "parameter_345" + shape = [160] + dtype = "float32" + min_val = float("0.322373") + max_val = float("1.79121") + mean = float("0.961733") + std = float("0.201222") + data = None + + +class Program_weight_tensor_parameter_346: + name = "parameter_346" + shape = [160] + dtype = "float32" + min_val = float("0.167042") + max_val = float("1.26962") + mean = float("0.444816") + std = float("0.195355") + data = None + + +class Program_weight_tensor_parameter_347: + name = "parameter_347" + shape = [160] + dtype = "float32" + min_val = float("-0.47987") + max_val = float("0.242786") + mean = float("-0.136228") + std = float("0.163235") + data = None + + +class Program_weight_tensor_parameter_348: + name = "parameter_348" + shape = [160, 320, 1, 1] + dtype = "float32" + min_val = float("-0.473231") + max_val = float("0.430141") + mean = float("-0.00229155") + std = float("0.0362895") + data = None + + +class Program_weight_tensor_parameter_349: + name = "parameter_349" + shape = [320] + dtype = "float32" + min_val = float("-0.463304") + max_val = float("0.40612") + mean = float("0.0464801") + std = float("0.104339") + data = None + + +class Program_weight_tensor_parameter_350: + name = "parameter_350" + shape = [320] + dtype = "float32" + min_val = float("0.599446") + max_val = float("1.81672") + mean = float("1.03751") + std = float("0.188931") + data = None + + +class Program_weight_tensor_parameter_351: + name = "parameter_351" + shape = [320] + dtype = "float32" + min_val = float("0.00567643") + max_val = float("0.412399") + mean = float("0.0607226") + std = float("0.0429332") + data = None + + +class Program_weight_tensor_parameter_352: + name = "parameter_352" + shape = [320] + dtype = "float32" + min_val = float("-0.261185") + max_val = float("0.206648") + mean = float("-0.00222504") + std = float("0.0712637") + data = None + + +class Program_weight_tensor_parameter_353: + name = "parameter_353" + shape = [320, 1, 5, 5] + dtype = "float32" + min_val = float("-0.266933") + max_val = float("0.561481") + mean = float("-0.000584022") + std = float("0.0532972") + data = None + + +class Program_weight_tensor_parameter_354: + name = "parameter_354" + shape = [320] + dtype = "float32" + min_val = float("-0.525364") + max_val = float("0.371293") + mean = float("0.0164402") + std = float("0.0992592") + data = None + + +class Program_weight_tensor_parameter_355: + name = "parameter_355" + shape = [320] + dtype = "float32" + min_val = float("0.406108") + max_val = float("1.53855") + mean = float("1.04307") + std = float("0.117238") + data = None + + +class Program_weight_tensor_parameter_356: + name = "parameter_356" + shape = [320] + dtype = "float32" + min_val = float("0.0786736") + max_val = float("0.976154") + mean = float("0.268646") + std = float("0.140364") + data = None + + +class Program_weight_tensor_parameter_357: + name = "parameter_357" + shape = [320] + dtype = "float32" + min_val = float("-0.677735") + max_val = float("0.258277") + mean = float("-0.131811") + std = float("0.157283") + data = None + + +class Program_weight_tensor_parameter_358: + name = "parameter_358" + shape = [320, 320, 1, 1] + dtype = "float32" + min_val = float("-0.320809") + max_val = float("0.308654") + mean = float("-0.00194247") + std = float("0.0300297") + data = None + + +class Program_weight_tensor_parameter_359: + name = "parameter_359" + shape = [320] + dtype = "float32" + min_val = float("-0.650676") + max_val = float("1.19757") + mean = float("0.0316925") + std = float("0.144454") + data = None + + +class Program_weight_tensor_parameter_360: + name = "parameter_360" + shape = [320] + dtype = "float32" + min_val = float("0.780626") + max_val = float("2.04192") + mean = float("1.06005") + std = float("0.150166") + data = None + + +class Program_weight_tensor_parameter_361: + name = "parameter_361" + shape = [320] + dtype = "float32" + min_val = float("3.50585e-05") + max_val = float("0.0189308") + mean = float("0.000697837") + std = float("0.00182815") + data = None + + +class Program_weight_tensor_parameter_362: + name = "parameter_362" + shape = [320] + dtype = "float32" + min_val = float("-0.0429151") + max_val = float("0.0221086") + mean = float("-0.00152369") + std = float("0.00734193") + data = None + + +class Program_weight_tensor_parameter_363: + name = "parameter_363" + shape = [320, 1, 5, 5] + dtype = "float32" + min_val = float("-0.438594") + max_val = float("0.557348") + mean = float("-0.000597336") + std = float("0.0528026") + data = None + + +class Program_weight_tensor_parameter_364: + name = "parameter_364" + shape = [160] + dtype = "float32" + min_val = float("-0.14466") + max_val = float("0.13358") + mean = float("0.00574996") + std = float("0.0414271") + data = None + + +class Program_weight_tensor_parameter_365: + name = "parameter_365" + shape = [160] + dtype = "float32" + min_val = float("0.0615052") + max_val = float("0.442104") + mean = float("0.128543") + std = float("0.0637476") + data = None + + +class Program_weight_tensor_parameter_366: + name = "parameter_366" + shape = [160] + dtype = "float32" + min_val = float("1.57758") + max_val = float("13.6128") + mean = float("4.57121") + std = float("2.05748") + data = None + + +class Program_weight_tensor_parameter_367: + name = "parameter_367" + shape = [160] + dtype = "float32" + min_val = float("-2.08866") + max_val = float("1.31168") + mean = float("-0.0645157") + std = float("0.543478") + data = None + + +class Program_weight_tensor_parameter_368: + name = "parameter_368" + shape = [160, 1024, 1, 1] + dtype = "float32" + min_val = float("-0.20738") + max_val = float("0.283428") + mean = float("-0.000216307") + std = float("0.0259136") + data = None + + +class Program_weight_tensor_parameter_369: + name = "parameter_369" + shape = [160] + dtype = "float32" + min_val = float("-0.174329") + max_val = float("0.0557949") + mean = float("-0.00199509") + std = float("0.0293572") + data = None + + +class Program_weight_tensor_parameter_370: + name = "parameter_370" + shape = [160] + dtype = "float32" + min_val = float("0.0454392") + max_val = float("0.530007") + mean = float("0.0755805") + std = float("0.0397847") + data = None + + +class Program_weight_tensor_parameter_371: + name = "parameter_371" + shape = [160] + dtype = "float32" + min_val = float("0.0681452") + max_val = float("0.632142") + mean = float("0.230693") + std = float("0.105456") + data = None + + +class Program_weight_tensor_parameter_372: + name = "parameter_372" + shape = [160] + dtype = "float32" + min_val = float("-0.663757") + max_val = float("0.622029") + mean = float("-0.0115387") + std = float("0.205437") + data = None + + +class Program_weight_tensor_parameter_373: + name = "parameter_373" + shape = [160, 512, 1, 1] + dtype = "float32" + min_val = float("-0.21389") + max_val = float("0.191285") + mean = float("-0.000540478") + std = float("0.0221755") + data = None + + +class Program_weight_tensor_parameter_374: + name = "parameter_374" + shape = [160] + dtype = "float32" + min_val = float("-0.180738") + max_val = float("0.309843") + mean = float("-0.00620432") + std = float("0.0523734") + data = None + + +class Program_weight_tensor_parameter_375: + name = "parameter_375" + shape = [160] + dtype = "float32" + min_val = float("0.0468802") + max_val = float("0.593814") + mean = float("0.115207") + std = float("0.0780899") + data = None + + +class Program_weight_tensor_parameter_376: + name = "parameter_376" + shape = [160] + dtype = "float32" + min_val = float("0.0732588") + max_val = float("0.941683") + mean = float("0.23582") + std = float("0.136469") + data = None + + +class Program_weight_tensor_parameter_377: + name = "parameter_377" + shape = [160] + dtype = "float32" + min_val = float("-0.964533") + max_val = float("0.691684") + mean = float("0.0248407") + std = float("0.250012") + data = None + + +class Program_weight_tensor_parameter_378: + name = "parameter_378" + shape = [160, 256, 1, 1] + dtype = "float32" + min_val = float("-0.286741") + max_val = float("0.368002") + mean = float("-0.00153527") + std = float("0.0427747") + data = None + + +class Program_weight_tensor_parameter_379: + name = "parameter_379" + shape = [1024] + dtype = "float32" + min_val = float("-4.68912") + max_val = float("4.42206") + mean = float("-2.15878") + std = float("0.606116") + data = None + + +class Program_weight_tensor_parameter_380: + name = "parameter_380" + shape = [1024] + dtype = "float32" + min_val = float("2.50671") + max_val = float("4.49836") + mean = float("3.42156") + std = float("0.272125") + data = None + + +class Program_weight_tensor_parameter_381: + name = "parameter_381" + shape = [1024] + dtype = "float32" + min_val = float("0.114807") + max_val = float("1.30505") + mean = float("0.252183") + std = float("0.0846458") + data = None + + +class Program_weight_tensor_parameter_382: + name = "parameter_382" + shape = [1024] + dtype = "float32" + min_val = float("-1.0941") + max_val = float("0.758985") + mean = float("-0.16949") + std = float("0.253669") + data = None + + +class Program_weight_tensor_parameter_383: + name = "parameter_383" + shape = [1024, 1024, 1, 1] + dtype = "float32" + min_val = float("-0.249355") + max_val = float("0.262998") + mean = float("-0.000961395") + std = float("0.0170622") + data = None + + +class Program_weight_tensor_parameter_384: + name = "parameter_384" + shape = [1024] + dtype = "float32" + min_val = float("-0.0482001") + max_val = float("0.059245") + mean = float("-0.00315341") + std = float("0.00853463") + data = None + + +class Program_weight_tensor_parameter_385: + name = "parameter_385" + shape = [1024, 256, 1, 1] + dtype = "float32" + min_val = float("-0.449643") + max_val = float("0.317283") + mean = float("-0.000775906") + std = float("0.00756664") + data = None + + +class Program_weight_tensor_parameter_386: + name = "parameter_386" + shape = [256] + dtype = "float32" + min_val = float("-0.000764426") + max_val = float("0.0140249") + mean = float("0.00116222") + std = float("0.00201421") + data = None + + +class Program_weight_tensor_parameter_387: + name = "parameter_387" + shape = [256, 1024, 1, 1] + dtype = "float32" + min_val = float("-0.214264") + max_val = float("0.194266") + mean = float("0.000280634") + std = float("0.00702432") + data = None + + +class Program_weight_tensor_parameter_388: + name = "parameter_388" + shape = [1024] + dtype = "float32" + min_val = float("-3.63457") + max_val = float("3.58613") + mean = float("-0.0619608") + std = float("0.686025") + data = None + + +class Program_weight_tensor_parameter_389: + name = "parameter_389" + shape = [1024] + dtype = "float32" + min_val = float("0.244121") + max_val = float("3.5711") + mean = float("1.71407") + std = float("0.411809") + data = None + + +class Program_weight_tensor_parameter_390: + name = "parameter_390" + shape = [1024] + dtype = "float32" + min_val = float("0.000135832") + max_val = float("1.16449") + mean = float("0.0216118") + std = float("0.0679511") + data = None + + +class Program_weight_tensor_parameter_391: + name = "parameter_391" + shape = [1024] + dtype = "float32" + min_val = float("-1.58342") + max_val = float("0.209772") + mean = float("-0.0678486") + std = float("0.0956894") + data = None + + +class Program_weight_tensor_parameter_392: + name = "parameter_392" + shape = [1024, 1, 5, 5] + dtype = "float32" + min_val = float("-0.462205") + max_val = float("0.348257") + mean = float("0.00784729") + std = float("0.0436159") + data = None + + +class Program_weight_tensor_parameter_393: + name = "parameter_393" + shape = [1024] + dtype = "float32" + min_val = float("-2.83062") + max_val = float("2.2952") + mean = float("-1.25227") + std = float("0.788125") + data = None + + +class Program_weight_tensor_parameter_394: + name = "parameter_394" + shape = [1024] + dtype = "float32" + min_val = float("-0.223706") + max_val = float("2.34621") + mean = float("1.0473") + std = float("0.33231") + data = None + + +class Program_weight_tensor_parameter_395: + name = "parameter_395" + shape = [1024] + dtype = "float32" + min_val = float("0.0188764") + max_val = float("0.439232") + mean = float("0.112459") + std = float("0.0527284") + data = None + + +class Program_weight_tensor_parameter_396: + name = "parameter_396" + shape = [1024] + dtype = "float32" + min_val = float("-1.07302") + max_val = float("0.623486") + mean = float("-0.0674045") + std = float("0.227021") + data = None + + +class Program_weight_tensor_parameter_397: + name = "parameter_397" + shape = [1024, 512, 1, 1] + dtype = "float32" + min_val = float("-0.316907") + max_val = float("0.413458") + mean = float("-0.000568661") + std = float("0.0234734") + data = None + + +class Program_weight_tensor_parameter_398: + name = "parameter_398" + shape = [512] + dtype = "float32" + min_val = float("-0.0405434") + max_val = float("0.0506635") + mean = float("-0.00656755") + std = float("0.0107446") + data = None + + +class Program_weight_tensor_parameter_399: + name = "parameter_399" + shape = [512, 128, 1, 1] + dtype = "float32" + min_val = float("-0.199278") + max_val = float("0.209998") + mean = float("-0.00413584") + std = float("0.0173349") + data = None + + +class Program_weight_tensor_parameter_400: + name = "parameter_400" + shape = [128] + dtype = "float32" + min_val = float("-0.00203147") + max_val = float("0.008193") + mean = float("0.00277893") + std = float("0.00221782") + data = None + + +class Program_weight_tensor_parameter_401: + name = "parameter_401" + shape = [128, 512, 1, 1] + dtype = "float32" + min_val = float("-0.206761") + max_val = float("0.236064") + mean = float("0.00202451") + std = float("0.0145962") + data = None + + +class Program_weight_tensor_parameter_402: + name = "parameter_402" + shape = [512] + dtype = "float32" + min_val = float("-1.75388") + max_val = float("2.23245") + mean = float("0.474604") + std = float("0.511719") + data = None + + +class Program_weight_tensor_parameter_403: + name = "parameter_403" + shape = [512] + dtype = "float32" + min_val = float("0.60578") + max_val = float("3.435") + mean = float("1.3299") + std = float("0.392424") + data = None + + +class Program_weight_tensor_parameter_404: + name = "parameter_404" + shape = [512] + dtype = "float32" + min_val = float("0.000190141") + max_val = float("2.24988") + mean = float("0.0374706") + std = float("0.113901") + data = None + + +class Program_weight_tensor_parameter_405: + name = "parameter_405" + shape = [512] + dtype = "float32" + min_val = float("-1.06194") + max_val = float("0.749927") + mean = float("-0.0969963") + std = float("0.154428") + data = None + + +class Program_weight_tensor_parameter_406: + name = "parameter_406" + shape = [512, 1, 5, 5] + dtype = "float32" + min_val = float("-0.163015") + max_val = float("0.227604") + mean = float("0.00464727") + std = float("0.0407666") + data = None + + +class Program_weight_tensor_parameter_407: + name = "parameter_407" + shape = [512] + dtype = "float32" + min_val = float("-2.78678") + max_val = float("3.02329") + mean = float("-0.939681") + std = float("0.884703") + data = None + + +class Program_weight_tensor_parameter_408: + name = "parameter_408" + shape = [512] + dtype = "float32" + min_val = float("-1.11576") + max_val = float("2.53483") + mean = float("0.995106") + std = float("0.474605") + data = None + + +class Program_weight_tensor_parameter_409: + name = "parameter_409" + shape = [512] + dtype = "float32" + min_val = float("0.268917") + max_val = float("4.88345") + mean = float("0.765431") + std = float("0.393111") + data = None + + +class Program_weight_tensor_parameter_410: + name = "parameter_410" + shape = [512] + dtype = "float32" + min_val = float("-3.97724") + max_val = float("2.59091") + mean = float("-0.423808") + std = float("1.01634") + data = None + + +class Program_weight_tensor_parameter_411: + name = "parameter_411" + shape = [512, 512, 1, 1] + dtype = "float32" + min_val = float("-0.345357") + max_val = float("0.48923") + mean = float("-0.00131086") + std = float("0.0302391") + data = None + + +class Program_weight_tensor_parameter_412: + name = "parameter_412" + shape = [512] + dtype = "float32" + min_val = float("-4.75827") + max_val = float("5.02772") + mean = float("0.869296") + std = float("1.15298") + data = None + + +class Program_weight_tensor_parameter_413: + name = "parameter_413" + shape = [512] + dtype = "float32" + min_val = float("0.558573") + max_val = float("3.56154") + mean = float("1.12752") + std = float("0.431084") + data = None + + +class Program_weight_tensor_parameter_414: + name = "parameter_414" + shape = [512] + dtype = "float32" + min_val = float("0.000611171") + max_val = float("3.33976") + mean = float("0.0606918") + std = float("0.18664") + data = None + + +class Program_weight_tensor_parameter_415: + name = "parameter_415" + shape = [512] + dtype = "float32" + min_val = float("-2.47602") + max_val = float("0.298231") + mean = float("-0.0807941") + std = float("0.17569") + data = None + + +class Program_weight_tensor_parameter_416: + name = "parameter_416" + shape = [512, 1, 5, 5] + dtype = "float32" + min_val = float("-0.520727") + max_val = float("0.617364") + mean = float("0.00329979") + std = float("0.0550278") + data = None + + +class Program_weight_tensor_parameter_417: + name = "parameter_417" + shape = [512] + dtype = "float32" + min_val = float("-3.44931") + max_val = float("2.01942") + mean = float("-0.946755") + std = float("0.852542") + data = None + + +class Program_weight_tensor_parameter_418: + name = "parameter_418" + shape = [512] + dtype = "float32" + min_val = float("-0.219734") + max_val = float("2.85208") + mean = float("1.04517") + std = float("0.419819") + data = None + + +class Program_weight_tensor_parameter_419: + name = "parameter_419" + shape = [512] + dtype = "float32" + min_val = float("0.159668") + max_val = float("2.3567") + mean = float("0.795323") + std = float("0.29348") + data = None + + +class Program_weight_tensor_parameter_420: + name = "parameter_420" + shape = [512] + dtype = "float32" + min_val = float("-4.49542") + max_val = float("2.63459") + mean = float("-0.521131") + std = float("1.03299") + data = None + + +class Program_weight_tensor_parameter_421: + name = "parameter_421" + shape = [512, 512, 1, 1] + dtype = "float32" + min_val = float("-0.360096") + max_val = float("0.458412") + mean = float("-0.00139282") + std = float("0.0312964") + data = None + + +class Program_weight_tensor_parameter_422: + name = "parameter_422" + shape = [512] + dtype = "float32" + min_val = float("-3.69947") + max_val = float("4.51505") + mean = float("0.914451") + std = float("1.10958") + data = None + + +class Program_weight_tensor_parameter_423: + name = "parameter_423" + shape = [512] + dtype = "float32" + min_val = float("0.479124") + max_val = float("2.8155") + mean = float("1.06738") + std = float("0.416471") + data = None + + +class Program_weight_tensor_parameter_424: + name = "parameter_424" + shape = [512] + dtype = "float32" + min_val = float("0.000135173") + max_val = float("1.29277") + mean = float("0.0789064") + std = float("0.153896") + data = None + + +class Program_weight_tensor_parameter_425: + name = "parameter_425" + shape = [512] + dtype = "float32" + min_val = float("-2.05229") + max_val = float("0.833247") + mean = float("-0.0787151") + std = float("0.21077") + data = None + + +class Program_weight_tensor_parameter_426: + name = "parameter_426" + shape = [512, 1, 5, 5] + dtype = "float32" + min_val = float("-0.682349") + max_val = float("0.569003") + mean = float("0.00184804") + std = float("0.0596565") + data = None + + +class Program_weight_tensor_parameter_427: + name = "parameter_427" + shape = [512] + dtype = "float32" + min_val = float("-3.60406") + max_val = float("3.57973") + mean = float("-0.792669") + std = float("0.920174") + data = None + + +class Program_weight_tensor_parameter_428: + name = "parameter_428" + shape = [512] + dtype = "float32" + min_val = float("0.0673074") + max_val = float("2.8763") + mean = float("1.10539") + std = float("0.461442") + data = None + + +class Program_weight_tensor_parameter_429: + name = "parameter_429" + shape = [512] + dtype = "float32" + min_val = float("0.171814") + max_val = float("2.74686") + mean = float("0.773848") + std = float("0.351669") + data = None + + +class Program_weight_tensor_parameter_430: + name = "parameter_430" + shape = [512] + dtype = "float32" + min_val = float("-3.44215") + max_val = float("2.52678") + mean = float("-0.199132") + std = float("0.906135") + data = None + + +class Program_weight_tensor_parameter_431: + name = "parameter_431" + shape = [512, 512, 1, 1] + dtype = "float32" + min_val = float("-0.406502") + max_val = float("0.38124") + mean = float("-0.000941147") + std = float("0.030964") + data = None + + +class Program_weight_tensor_parameter_432: + name = "parameter_432" + shape = [512] + dtype = "float32" + min_val = float("-2.99077") + max_val = float("4.07165") + mean = float("0.720978") + std = float("1.18939") + data = None + + +class Program_weight_tensor_parameter_433: + name = "parameter_433" + shape = [512] + dtype = "float32" + min_val = float("0.532099") + max_val = float("3.00526") + mean = float("1.08374") + std = float("0.445429") + data = None + + +class Program_weight_tensor_parameter_434: + name = "parameter_434" + shape = [512] + dtype = "float32" + min_val = float("0.000115899") + max_val = float("1.16053") + mean = float("0.0801732") + std = float("0.163527") + data = None + + +class Program_weight_tensor_parameter_435: + name = "parameter_435" + shape = [512] + dtype = "float32" + min_val = float("-3.78834") + max_val = float("0.504813") + mean = float("-0.0724625") + std = float("0.25369") + data = None + + +class Program_weight_tensor_parameter_436: + name = "parameter_436" + shape = [512, 1, 5, 5] + dtype = "float32" + min_val = float("-0.661221") + max_val = float("0.517245") + mean = float("0.00180059") + std = float("0.0604124") + data = None + + +class Program_weight_tensor_parameter_437: + name = "parameter_437" + shape = [512] + dtype = "float32" + min_val = float("-3.389") + max_val = float("2.63184") + mean = float("-0.652219") + std = float("0.873705") + data = None + + +class Program_weight_tensor_parameter_438: + name = "parameter_438" + shape = [512] + dtype = "float32" + min_val = float("0.0574002") + max_val = float("2.69881") + mean = float("1.04787") + std = float("0.520278") + data = None + + +class Program_weight_tensor_parameter_439: + name = "parameter_439" + shape = [512] + dtype = "float32" + min_val = float("0.13844") + max_val = float("4.46918") + mean = float("0.956979") + std = float("0.466403") + data = None + + +class Program_weight_tensor_parameter_440: + name = "parameter_440" + shape = [512] + dtype = "float32" + min_val = float("-3.92327") + max_val = float("2.23584") + mean = float("-0.520004") + std = float("1.0259") + data = None + + +class Program_weight_tensor_parameter_441: + name = "parameter_441" + shape = [512, 512, 1, 1] + dtype = "float32" + min_val = float("-0.397854") + max_val = float("0.566234") + mean = float("-0.00126475") + std = float("0.0302577") + data = None + + +class Program_weight_tensor_parameter_442: + name = "parameter_442" + shape = [512] + dtype = "float32" + min_val = float("-3.63696") + max_val = float("3.69657") + mean = float("0.75971") + std = float("1.19915") + data = None + + +class Program_weight_tensor_parameter_443: + name = "parameter_443" + shape = [512] + dtype = "float32" + min_val = float("0.56869") + max_val = float("2.96831") + mean = float("1.16292") + std = float("0.471643") + data = None + + +class Program_weight_tensor_parameter_444: + name = "parameter_444" + shape = [512] + dtype = "float32" + min_val = float("0.000121099") + max_val = float("1.41941") + mean = float("0.0803492") + std = float("0.164482") + data = None + + +class Program_weight_tensor_parameter_445: + name = "parameter_445" + shape = [512] + dtype = "float32" + min_val = float("-1.52797") + max_val = float("1.03893") + mean = float("-0.0789841") + std = float("0.230021") + data = None + + +class Program_weight_tensor_parameter_446: + name = "parameter_446" + shape = [512, 1, 5, 5] + dtype = "float32" + min_val = float("-0.599808") + max_val = float("0.547304") + mean = float("0.00183313") + std = float("0.061842") + data = None + + +class Program_weight_tensor_parameter_447: + name = "parameter_447" + shape = [512] + dtype = "float32" + min_val = float("-3.0674") + max_val = float("2.7392") + mean = float("-0.477427") + std = float("0.864982") + data = None + + +class Program_weight_tensor_parameter_448: + name = "parameter_448" + shape = [512] + dtype = "float32" + min_val = float("0.066498") + max_val = float("3.75266") + mean = float("1.03868") + std = float("0.557475") + data = None + + +class Program_weight_tensor_parameter_449: + name = "parameter_449" + shape = [512] + dtype = "float32" + min_val = float("0.325652") + max_val = float("5.07868") + mean = float("1.0389") + std = float("0.556766") + data = None + + +class Program_weight_tensor_parameter_450: + name = "parameter_450" + shape = [512] + dtype = "float32" + min_val = float("-2.62398") + max_val = float("1.54129") + mean = float("-0.330855") + std = float("0.757658") + data = None + + +class Program_weight_tensor_parameter_451: + name = "parameter_451" + shape = [512, 512, 1, 1] + dtype = "float32" + min_val = float("-0.408078") + max_val = float("0.371991") + mean = float("-0.001306") + std = float("0.0291745") + data = None + + +class Program_weight_tensor_parameter_452: + name = "parameter_452" + shape = [512] + dtype = "float32" + min_val = float("-6.01235") + max_val = float("3.82419") + mean = float("0.163311") + std = float("1.27245") + data = None + + +class Program_weight_tensor_parameter_453: + name = "parameter_453" + shape = [512] + dtype = "float32" + min_val = float("0.596057") + max_val = float("4.72807") + mean = float("1.43227") + std = float("0.506725") + data = None + + +class Program_weight_tensor_parameter_454: + name = "parameter_454" + shape = [512] + dtype = "float32" + min_val = float("8.18619e-05") + max_val = float("2.17777") + mean = float("0.0758148") + std = float("0.178966") + data = None + + +class Program_weight_tensor_parameter_455: + name = "parameter_455" + shape = [512] + dtype = "float32" + min_val = float("-2.1815") + max_val = float("0.760654") + mean = float("-0.0578169") + std = float("0.229077") + data = None + + +class Program_weight_tensor_parameter_456: + name = "parameter_456" + shape = [512, 1, 5, 5] + dtype = "float32" + min_val = float("-0.421493") + max_val = float("0.476019") + mean = float("-0.0016388") + std = float("0.0698465") + data = None + + +class Program_weight_tensor_parameter_457: + name = "parameter_457" + shape = [512] + dtype = "float32" + min_val = float("-2.01371") + max_val = float("2.2049") + mean = float("-0.00690706") + std = float("0.636127") + data = None + + +class Program_weight_tensor_parameter_458: + name = "parameter_458" + shape = [512] + dtype = "float32" + min_val = float("-0.510236") + max_val = float("3.78718") + mean = float("0.826749") + std = float("0.629704") + data = None + + +class Program_weight_tensor_parameter_459: + name = "parameter_459" + shape = [512] + dtype = "float32" + min_val = float("0.172121") + max_val = float("2.04176") + mean = float("0.574756") + std = float("0.28068") + data = None + + +class Program_weight_tensor_parameter_460: + name = "parameter_460" + shape = [512] + dtype = "float32" + min_val = float("-3.02785") + max_val = float("2.53578") + mean = float("-0.182285") + std = float("0.907337") + data = None + + +class Program_weight_tensor_parameter_461: + name = "parameter_461" + shape = [512, 256, 1, 1] + dtype = "float32" + min_val = float("-0.330583") + max_val = float("0.293063") + mean = float("-0.000545234") + std = float("0.0343697") + data = None + + +class Program_weight_tensor_parameter_462: + name = "parameter_462" + shape = [256] + dtype = "float32" + min_val = float("-2.24034") + max_val = float("3.88891") + mean = float("1.51435") + std = float("1.09445") + data = None + + +class Program_weight_tensor_parameter_463: + name = "parameter_463" + shape = [256] + dtype = "float32" + min_val = float("0.438077") + max_val = float("3.00471") + mean = float("0.910995") + std = float("0.386137") + data = None + + +class Program_weight_tensor_parameter_464: + name = "parameter_464" + shape = [256] + dtype = "float32" + min_val = float("0.000191022") + max_val = float("0.176179") + mean = float("0.0281432") + std = float("0.0242791") + data = None + + +class Program_weight_tensor_parameter_465: + name = "parameter_465" + shape = [256] + dtype = "float32" + min_val = float("-0.815683") + max_val = float("0.569976") + mean = float("-0.0747333") + std = float("0.144234") + data = None + + +class Program_weight_tensor_parameter_466: + name = "parameter_466" + shape = [256, 1, 3, 3] + dtype = "float32" + min_val = float("-0.276007") + max_val = float("0.242465") + mean = float("0.0194744") + std = float("0.0874246") + data = None + + +class Program_weight_tensor_parameter_467: + name = "parameter_467" + shape = [256] + dtype = "float32" + min_val = float("-3.04801") + max_val = float("2.06684") + mean = float("-0.813741") + std = float("0.935085") + data = None + + +class Program_weight_tensor_parameter_468: + name = "parameter_468" + shape = [256] + dtype = "float32" + min_val = float("0.0744769") + max_val = float("1.99524") + mean = float("1.06005") + std = float("0.343784") + data = None + + +class Program_weight_tensor_parameter_469: + name = "parameter_469" + shape = [256] + dtype = "float32" + min_val = float("0.240087") + max_val = float("2.4245") + mean = float("0.851561") + std = float("0.339262") + data = None + + +class Program_weight_tensor_parameter_470: + name = "parameter_470" + shape = [256] + dtype = "float32" + min_val = float("-4.71908") + max_val = float("2.6033") + mean = float("-0.351188") + std = float("1.21449") + data = None + + +class Program_weight_tensor_parameter_471: + name = "parameter_471" + shape = [256, 256, 1, 1] + dtype = "float32" + min_val = float("-0.508823") + max_val = float("0.394891") + mean = float("-0.00235866") + std = float("0.0524808") + data = None + + +class Program_weight_tensor_parameter_472: + name = "parameter_472" + shape = [256] + dtype = "float32" + min_val = float("-2.29341") + max_val = float("4.81048") + mean = float("0.455567") + std = float("1.56124") + data = None + + +class Program_weight_tensor_parameter_473: + name = "parameter_473" + shape = [256] + dtype = "float32" + min_val = float("0.428622") + max_val = float("2.7123") + mean = float("1.1438") + std = float("0.452675") + data = None + + +class Program_weight_tensor_parameter_474: + name = "parameter_474" + shape = [256] + dtype = "float32" + min_val = float("0.000102721") + max_val = float("1.16311") + mean = float("0.101359") + std = float("0.190415") + data = None + + +class Program_weight_tensor_parameter_475: + name = "parameter_475" + shape = [256] + dtype = "float32" + min_val = float("-4.65168") + max_val = float("1.41005") + mean = float("-0.0649407") + std = float("0.446609") + data = None + + +class Program_weight_tensor_parameter_476: + name = "parameter_476" + shape = [256, 1, 3, 3] + dtype = "float32" + min_val = float("-0.60328") + max_val = float("0.621462") + mean = float("0.00231052") + std = float("0.143026") + data = None + + +class Program_weight_tensor_parameter_477: + name = "parameter_477" + shape = [256] + dtype = "float32" + min_val = float("-1.88236") + max_val = float("4.98003") + mean = float("0.373603") + std = float("0.954072") + data = None + + +class Program_weight_tensor_parameter_478: + name = "parameter_478" + shape = [256] + dtype = "float32" + min_val = float("-0.352725") + max_val = float("3.32578") + mean = float("0.764436") + std = float("0.645437") + data = None + + +class Program_weight_tensor_parameter_479: + name = "parameter_479" + shape = [256] + dtype = "float32" + min_val = float("0.176915") + max_val = float("4.17444") + mean = float("0.875024") + std = float("0.523132") + data = None + + +class Program_weight_tensor_parameter_480: + name = "parameter_480" + shape = [256] + dtype = "float32" + min_val = float("-6.47036") + max_val = float("4.37525") + mean = float("-0.302532") + std = float("1.70356") + data = None + + +class Program_weight_tensor_parameter_481: + name = "parameter_481" + shape = [256, 128, 1, 1] + dtype = "float32" + min_val = float("-0.607719") + max_val = float("0.697925") + mean = float("-0.00144516") + std = float("0.0647383") + data = None + + +class Program_weight_tensor_parameter_482: + name = "parameter_482" + shape = [128] + dtype = "float32" + min_val = float("-4.34337") + max_val = float("6.0628") + mean = float("1.85413") + std = float("1.59139") + data = None + + +class Program_weight_tensor_parameter_483: + name = "parameter_483" + shape = [128] + dtype = "float32" + min_val = float("-0.0415686") + max_val = float("3.54283") + mean = float("1.03066") + std = float("0.500108") + data = None + + +class Program_weight_tensor_parameter_484: + name = "parameter_484" + shape = [128] + dtype = "float32" + min_val = float("0.000137833") + max_val = float("16.4231") + mean = float("0.304417") + std = float("1.48304") + data = None + + +class Program_weight_tensor_parameter_485: + name = "parameter_485" + shape = [128] + dtype = "float32" + min_val = float("-2.11429") + max_val = float("6.09559") + mean = float("-0.0288573") + std = float("0.683772") + data = None + + +class Program_weight_tensor_parameter_486: + name = "parameter_486" + shape = [128, 1, 3, 3] + dtype = "float32" + min_val = float("-0.36157") + max_val = float("0.309803") + mean = float("0.0367961") + std = float("0.128036") + data = None + + +class Program_weight_tensor_parameter_487: + name = "parameter_487" + shape = [128] + dtype = "float32" + min_val = float("-2.51858") + max_val = float("7.231") + mean = float("-0.419334") + std = float("1.15109") + data = None + + +class Program_weight_tensor_parameter_488: + name = "parameter_488" + shape = [128] + dtype = "float32" + min_val = float("-0.733515") + max_val = float("8.98718") + mean = float("1.12452") + std = float("0.947115") + data = None + + +class Program_weight_tensor_parameter_489: + name = "parameter_489" + shape = [128] + dtype = "float32" + min_val = float("0.474707") + max_val = float("9.0736") + mean = float("3.87939") + std = float("1.84247") + data = None + + +class Program_weight_tensor_parameter_490: + name = "parameter_490" + shape = [128] + dtype = "float32" + min_val = float("-6.69257") + max_val = float("6.09555") + mean = float("-0.512027") + std = float("2.22913") + data = None + + +class Program_weight_tensor_parameter_491: + name = "parameter_491" + shape = [128, 128, 1, 1] + dtype = "float32" + min_val = float("-0.841678") + max_val = float("0.446547") + mean = float("-0.00545904") + std = float("0.0795739") + data = None + + +class Program_weight_tensor_parameter_492: + name = "parameter_492" + shape = [128] + dtype = "float32" + min_val = float("-1.66964") + max_val = float("11.2954") + mean = float("1.22491") + std = float("1.99392") + data = None + + +class Program_weight_tensor_parameter_493: + name = "parameter_493" + shape = [128] + dtype = "float32" + min_val = float("0.912473") + max_val = float("6.42073") + mean = float("2.16295") + std = float("0.796402") + data = None + + +class Program_weight_tensor_parameter_494: + name = "parameter_494" + shape = [128] + dtype = "float32" + min_val = float("0.000502841") + max_val = float("7.62662") + mean = float("0.400167") + std = float("0.907557") + data = None + + +class Program_weight_tensor_parameter_495: + name = "parameter_495" + shape = [128] + dtype = "float32" + min_val = float("-2.97772") + max_val = float("1.16672") + mean = float("-0.218247") + std = float("0.646007") + data = None + + +class Program_weight_tensor_parameter_496: + name = "parameter_496" + shape = [128, 1, 3, 3] + dtype = "float32" + min_val = float("-0.937138") + max_val = float("0.729617") + mean = float("-0.0106197") + std = float("0.195346") + data = None + + +class Program_weight_tensor_parameter_497: + name = "parameter_497" + shape = [128] + dtype = "float32" + min_val = float("-3.16602") + max_val = float("4.3529") + mean = float("0.818057") + std = float("1.27095") + data = None + + +class Program_weight_tensor_parameter_498: + name = "parameter_498" + shape = [128] + dtype = "float32" + min_val = float("-0.552183") + max_val = float("5.70418") + mean = float("1.23673") + std = float("1.01597") + data = None + + +class Program_weight_tensor_parameter_499: + name = "parameter_499" + shape = [128] + dtype = "float32" + min_val = float("0.971432") + max_val = float("19.4928") + mean = float("5.20099") + std = float("3.24513") + data = None + + +class Program_weight_tensor_parameter_500: + name = "parameter_500" + shape = [128] + dtype = "float32" + min_val = float("-6.69532") + max_val = float("6.20892") + mean = float("-0.384992") + std = float("2.54447") + data = None + + +class Program_weight_tensor_parameter_501: + name = "parameter_501" + shape = [128, 64, 1, 1] + dtype = "float32" + min_val = float("-0.515995") + max_val = float("0.75423") + mean = float("-0.00297671") + std = float("0.0924049") + data = None + + +class Program_weight_tensor_parameter_502: + name = "parameter_502" + shape = [64] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_503: + name = "parameter_503" + shape = [64] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_504: + name = "parameter_504" + shape = [64] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_505: + name = "parameter_505" + shape = [64] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_506: + name = "parameter_506" + shape = [64, 1, 3, 3] + dtype = "float32" + min_val = float("-0.46415") + max_val = float("0.378679") + mean = float("0.0301599") + std = float("0.16447") + data = None + + +class Program_weight_tensor_parameter_507: + name = "parameter_507" + shape = [64] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_508: + name = "parameter_508" + shape = [64] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_509: + name = "parameter_509" + shape = [64] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_510: + name = "parameter_510" + shape = [64] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_511: + name = "parameter_511" + shape = [64, 32, 1, 1] + dtype = "float32" + min_val = float("-0.980201") + max_val = float("0.644443") + mean = float("-0.0139947") + std = float("0.138057") + data = None + + +class Program_weight_tensor_parameter_512: + name = "parameter_512" + shape = [32] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_513: + name = "parameter_513" + shape = [32] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_514: + name = "parameter_514" + shape = [32] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_515: + name = "parameter_515" + shape = [32] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_516: + name = "parameter_516" + shape = [32, 1, 3, 3] + dtype = "float32" + min_val = float("-0.983687") + max_val = float("1.10091") + mean = float("0.00523073") + std = float("0.305819") + data = None + + +class Program_weight_tensor_parameter_517: + name = "parameter_517" + shape = [32] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_518: + name = "parameter_518" + shape = [32] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_519: + name = "parameter_519" + shape = [32] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_520: + name = "parameter_520" + shape = [32] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_521: + name = "parameter_521" + shape = [32, 3, 3, 3] + dtype = "float32" + min_val = float("-0.49613") + max_val = float("0.48646") + mean = float("-0.0016735") + std = float("0.17361") + data = None diff --git a/paddle_samples/PaddleX/PP-DocLayout-M/subgraph_11/graph_hash.txt b/paddle_samples/PaddleX/PP-DocLayout-M/subgraph_11/graph_hash.txt new file mode 100644 index 000000000..5db3fb5d4 --- /dev/null +++ b/paddle_samples/PaddleX/PP-DocLayout-M/subgraph_11/graph_hash.txt @@ -0,0 +1 @@ +21b997a7ad24aecdd3287e4b13db6d4d7045b699c4f2f7d845f5802c2d5ea602 \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-DocLayout-M/subgraph_11/graph_net.json b/paddle_samples/PaddleX/PP-DocLayout-M/subgraph_11/graph_net.json new file mode 100644 index 000000000..e9a5b932d --- /dev/null +++ b/paddle_samples/PaddleX/PP-DocLayout-M/subgraph_11/graph_net.json @@ -0,0 +1,6 @@ +{ + "framework": "paddle", + "model_name": "PP-DocLayout-M", + "num_devices_required": 1, + "num_nodes_required": 1 +} \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-DocLayout-M/subgraph_11/input_meta.py b/paddle_samples/PaddleX/PP-DocLayout-M/subgraph_11/input_meta.py new file mode 100644 index 000000000..c4aae87c0 --- /dev/null +++ b/paddle_samples/PaddleX/PP-DocLayout-M/subgraph_11/input_meta.py @@ -0,0 +1,9 @@ +class Program_weight_tensor_data_0: + name = "data_0" + shape = [1, 8500, 4] + dtype = "float32" + min_val = float("-5.9219") + max_val = float("80.4598") + mean = float("34.3851") + std = float("23.2196") + data = None diff --git a/paddle_samples/PaddleX/PP-DocLayout-M/subgraph_11/model.py b/paddle_samples/PaddleX/PP-DocLayout-M/subgraph_11/model.py new file mode 100644 index 000000000..4fde1154b --- /dev/null +++ b/paddle_samples/PaddleX/PP-DocLayout-M/subgraph_11/model.py @@ -0,0 +1,482 @@ +import paddle + + +class GraphModule(paddle.nn.Layer): + def __init__(self): + super().__init__() + + def forward(self, data_0): + # pd_op.full: (1xf64) <- () + full_0 = paddle._C_ops.full( + [1], float("0"), paddle.float64, paddle.core.CPUPlace() + ) + + # pd_op.full: (1xf64) <- () + full_1 = paddle._C_ops.full( + [1], float("80"), paddle.float64, paddle.core.CPUPlace() + ) + + # pd_op.full: (1xf64) <- () + full_2 = paddle._C_ops.full( + [1], float("1"), paddle.float64, paddle.core.CPUPlace() + ) + + # pd_op.arange: (80xi64) <- (1xf64, 1xf64, 1xf64) + arange_0 = paddle.arange(full_0, full_1, full_2, dtype="int64") + del full_1 + + # pd_op.cast: (80xf32) <- (80xi64) + cast_0 = paddle._C_ops.cast(arange_0, paddle.float32) + del arange_0 + + # pd_op.full: (1xf32) <- () + full_3 = paddle._C_ops.full( + [1], float("1"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.scale: (80xf32) <- (80xf32, 1xf32) + scale_0 = paddle._C_ops.scale(cast_0, full_3, float("0.5"), True) + del cast_0 + + # pd_op.full: (1xf32) <- () + full_4 = paddle._C_ops.full( + [1], float("8"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.scale: (80xf32) <- (80xf32, 1xf32) + scale_1 = paddle._C_ops.scale(scale_0, full_4, float("0"), True) + del full_4, scale_0 + + # builtin.combine: ([80xf32, 80xf32]) <- (80xf32, 80xf32) + combine_0 = [scale_1, scale_1] + del scale_1 + + # pd_op.meshgrid: ([80x80xf32, 80x80xf32]) <- ([80xf32, 80xf32]) + meshgrid_0 = paddle._C_ops.meshgrid(combine_0) + del combine_0 + + # builtin.split: (80x80xf32, 80x80xf32) <- ([80x80xf32, 80x80xf32]) + ( + split_0, + split_1, + ) = meshgrid_0 + del meshgrid_0 + + # pd_op.scale: (80x80xf32) <- (80x80xf32, 1xf32) + scale_2 = paddle._C_ops.scale(split_1, full_3, float("-20"), True) + + # pd_op.scale: (80x80xf32) <- (80x80xf32, 1xf32) + scale_3 = paddle._C_ops.scale(split_0, full_3, float("-20"), True) + + # pd_op.scale: (80x80xf32) <- (80x80xf32, 1xf32) + scale_4 = paddle._C_ops.scale(split_1, full_3, float("20"), True) + + # pd_op.scale: (80x80xf32) <- (80x80xf32, 1xf32) + scale_5 = paddle._C_ops.scale(split_0, full_3, float("20"), True) + + # builtin.combine: ([80x80xf32, 80x80xf32, 80x80xf32, 80x80xf32]) <- (80x80xf32, 80x80xf32, 80x80xf32, 80x80xf32) + combine_1 = [scale_2, scale_3, scale_4, scale_5] + del scale_2, scale_3, scale_4, scale_5 + + # pd_op.stack: (80x80x4xf32) <- ([80x80xf32, 80x80xf32, 80x80xf32, 80x80xf32]) + stack_1 = paddle._C_ops.stack(combine_1, -1) + del combine_1 + + # builtin.combine: ([80x80xf32, 80x80xf32]) <- (80x80xf32, 80x80xf32) + combine_2 = [split_1, split_0] + del split_0, split_1 + + # pd_op.stack: (80x80x2xf32) <- ([80x80xf32, 80x80xf32]) + stack_2 = paddle._C_ops.stack(combine_2, -1) + del combine_2 + + # pd_op.full_int_array: (2xi64) <- () + full_int_array_0 = [-1, 4] + + # pd_op.reshape: (6400x4xf32) <- (80x80x4xf32, 2xi64) + reshape_0 = paddle._C_ops.reshape(stack_1, full_int_array_0) + del stack_1 + + # pd_op.full_int_array: (2xi64) <- () + full_int_array_1 = [-1, 2] + + # pd_op.reshape: (6400x2xf32) <- (80x80x2xf32, 2xi64) + reshape_1 = paddle._C_ops.reshape(stack_2, full_int_array_1) + del stack_2 + + # pd_op.full: (6400x1xf32) <- () + full_5 = paddle._C_ops.full( + [6400, 1], + float("8"), + paddle.float32, + paddle.framework._current_expected_place(), + ) + + # pd_op.full: (1xf64) <- () + full_6 = paddle._C_ops.full( + [1], float("40"), paddle.float64, paddle.core.CPUPlace() + ) + + # pd_op.arange: (40xi64) <- (1xf64, 1xf64, 1xf64) + arange_1 = paddle.arange(full_0, full_6, full_2, dtype="int64") + del full_6 + + # pd_op.cast: (40xf32) <- (40xi64) + cast_1 = paddle._C_ops.cast(arange_1, paddle.float32) + del arange_1 + + # pd_op.scale: (40xf32) <- (40xf32, 1xf32) + scale_6 = paddle._C_ops.scale(cast_1, full_3, float("0.5"), True) + del cast_1 + + # pd_op.full: (1xf32) <- () + full_7 = paddle._C_ops.full( + [1], float("16"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.scale: (40xf32) <- (40xf32, 1xf32) + scale_7 = paddle._C_ops.scale(scale_6, full_7, float("0"), True) + del full_7, scale_6 + + # builtin.combine: ([40xf32, 40xf32]) <- (40xf32, 40xf32) + combine_3 = [scale_7, scale_7] + del scale_7 + + # pd_op.meshgrid: ([40x40xf32, 40x40xf32]) <- ([40xf32, 40xf32]) + meshgrid_1 = paddle._C_ops.meshgrid(combine_3) + del combine_3 + + # builtin.split: (40x40xf32, 40x40xf32) <- ([40x40xf32, 40x40xf32]) + ( + split_2, + split_3, + ) = meshgrid_1 + del meshgrid_1 + + # pd_op.scale: (40x40xf32) <- (40x40xf32, 1xf32) + scale_8 = paddle._C_ops.scale(split_3, full_3, float("-40"), True) + + # pd_op.scale: (40x40xf32) <- (40x40xf32, 1xf32) + scale_9 = paddle._C_ops.scale(split_2, full_3, float("-40"), True) + + # pd_op.scale: (40x40xf32) <- (40x40xf32, 1xf32) + scale_10 = paddle._C_ops.scale(split_3, full_3, float("40"), True) + + # pd_op.scale: (40x40xf32) <- (40x40xf32, 1xf32) + scale_11 = paddle._C_ops.scale(split_2, full_3, float("40"), True) + + # builtin.combine: ([40x40xf32, 40x40xf32, 40x40xf32, 40x40xf32]) <- (40x40xf32, 40x40xf32, 40x40xf32, 40x40xf32) + combine_4 = [scale_8, scale_9, scale_10, scale_11] + del scale_10, scale_11, scale_8, scale_9 + + # pd_op.stack: (40x40x4xf32) <- ([40x40xf32, 40x40xf32, 40x40xf32, 40x40xf32]) + stack_3 = paddle._C_ops.stack(combine_4, -1) + del combine_4 + + # builtin.combine: ([40x40xf32, 40x40xf32]) <- (40x40xf32, 40x40xf32) + combine_5 = [split_3, split_2] + del split_2, split_3 + + # pd_op.stack: (40x40x2xf32) <- ([40x40xf32, 40x40xf32]) + stack_4 = paddle._C_ops.stack(combine_5, -1) + del combine_5 + + # pd_op.reshape: (1600x4xf32) <- (40x40x4xf32, 2xi64) + reshape_2 = paddle._C_ops.reshape(stack_3, full_int_array_0) + del stack_3 + + # pd_op.reshape: (1600x2xf32) <- (40x40x2xf32, 2xi64) + reshape_3 = paddle._C_ops.reshape(stack_4, full_int_array_1) + del stack_4 + + # pd_op.full: (1600x1xf32) <- () + full_8 = paddle._C_ops.full( + [1600, 1], + float("16"), + paddle.float32, + paddle.framework._current_expected_place(), + ) + + # pd_op.full: (1xf64) <- () + full_9 = paddle._C_ops.full( + [1], float("20"), paddle.float64, paddle.core.CPUPlace() + ) + + # pd_op.arange: (20xi64) <- (1xf64, 1xf64, 1xf64) + arange_2 = paddle.arange(full_0, full_9, full_2, dtype="int64") + del full_9 + + # pd_op.cast: (20xf32) <- (20xi64) + cast_2 = paddle._C_ops.cast(arange_2, paddle.float32) + del arange_2 + + # pd_op.scale: (20xf32) <- (20xf32, 1xf32) + scale_12 = paddle._C_ops.scale(cast_2, full_3, float("0.5"), True) + del cast_2 + + # pd_op.full: (1xf32) <- () + full_10 = paddle._C_ops.full( + [1], float("32"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.scale: (20xf32) <- (20xf32, 1xf32) + scale_13 = paddle._C_ops.scale(scale_12, full_10, float("0"), True) + del full_10, scale_12 + + # builtin.combine: ([20xf32, 20xf32]) <- (20xf32, 20xf32) + combine_6 = [scale_13, scale_13] + del scale_13 + + # pd_op.meshgrid: ([20x20xf32, 20x20xf32]) <- ([20xf32, 20xf32]) + meshgrid_2 = paddle._C_ops.meshgrid(combine_6) + del combine_6 + + # builtin.split: (20x20xf32, 20x20xf32) <- ([20x20xf32, 20x20xf32]) + ( + split_4, + split_5, + ) = meshgrid_2 + del meshgrid_2 + + # pd_op.scale: (20x20xf32) <- (20x20xf32, 1xf32) + scale_14 = paddle._C_ops.scale(split_5, full_3, float("-80"), True) + + # pd_op.scale: (20x20xf32) <- (20x20xf32, 1xf32) + scale_15 = paddle._C_ops.scale(split_4, full_3, float("-80"), True) + + # pd_op.scale: (20x20xf32) <- (20x20xf32, 1xf32) + scale_16 = paddle._C_ops.scale(split_5, full_3, float("80"), True) + + # pd_op.scale: (20x20xf32) <- (20x20xf32, 1xf32) + scale_17 = paddle._C_ops.scale(split_4, full_3, float("80"), True) + + # builtin.combine: ([20x20xf32, 20x20xf32, 20x20xf32, 20x20xf32]) <- (20x20xf32, 20x20xf32, 20x20xf32, 20x20xf32) + combine_7 = [scale_14, scale_15, scale_16, scale_17] + del scale_14, scale_15, scale_16, scale_17 + + # pd_op.stack: (20x20x4xf32) <- ([20x20xf32, 20x20xf32, 20x20xf32, 20x20xf32]) + stack_5 = paddle._C_ops.stack(combine_7, -1) + del combine_7 + + # builtin.combine: ([20x20xf32, 20x20xf32]) <- (20x20xf32, 20x20xf32) + combine_8 = [split_5, split_4] + del split_4, split_5 + + # pd_op.stack: (20x20x2xf32) <- ([20x20xf32, 20x20xf32]) + stack_6 = paddle._C_ops.stack(combine_8, -1) + del combine_8 + + # pd_op.reshape: (400x4xf32) <- (20x20x4xf32, 2xi64) + reshape_4 = paddle._C_ops.reshape(stack_5, full_int_array_0) + del stack_5 + + # pd_op.reshape: (400x2xf32) <- (20x20x2xf32, 2xi64) + reshape_5 = paddle._C_ops.reshape(stack_6, full_int_array_1) + del stack_6 + + # pd_op.full: (400x1xf32) <- () + full_11 = paddle._C_ops.full( + [400, 1], + float("32"), + paddle.float32, + paddle.framework._current_expected_place(), + ) + + # pd_op.full: (1xf64) <- () + full_12 = paddle._C_ops.full( + [1], float("10"), paddle.float64, paddle.core.CPUPlace() + ) + + # pd_op.arange: (10xi64) <- (1xf64, 1xf64, 1xf64) + arange_3 = paddle.arange(full_0, full_12, full_2, dtype="int64") + del full_0, full_12, full_2 + + # pd_op.cast: (10xf32) <- (10xi64) + cast_3 = paddle._C_ops.cast(arange_3, paddle.float32) + del arange_3 + + # pd_op.scale: (10xf32) <- (10xf32, 1xf32) + scale_18 = paddle._C_ops.scale(cast_3, full_3, float("0.5"), True) + del cast_3 + + # pd_op.full: (1xf32) <- () + full_13 = paddle._C_ops.full( + [1], float("64"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.scale: (10xf32) <- (10xf32, 1xf32) + scale_19 = paddle._C_ops.scale(scale_18, full_13, float("0"), True) + del full_13, scale_18 + + # builtin.combine: ([10xf32, 10xf32]) <- (10xf32, 10xf32) + combine_9 = [scale_19, scale_19] + del scale_19 + + # pd_op.meshgrid: ([10x10xf32, 10x10xf32]) <- ([10xf32, 10xf32]) + meshgrid_3 = paddle._C_ops.meshgrid(combine_9) + del combine_9 + + # builtin.split: (10x10xf32, 10x10xf32) <- ([10x10xf32, 10x10xf32]) + ( + split_6, + split_7, + ) = meshgrid_3 + del meshgrid_3 + + # pd_op.scale: (10x10xf32) <- (10x10xf32, 1xf32) + scale_20 = paddle._C_ops.scale(split_7, full_3, float("-160"), True) + + # pd_op.scale: (10x10xf32) <- (10x10xf32, 1xf32) + scale_21 = paddle._C_ops.scale(split_6, full_3, float("-160"), True) + + # pd_op.scale: (10x10xf32) <- (10x10xf32, 1xf32) + scale_22 = paddle._C_ops.scale(split_7, full_3, float("160"), True) + + # pd_op.scale: (10x10xf32) <- (10x10xf32, 1xf32) + scale_23 = paddle._C_ops.scale(split_6, full_3, float("160"), True) + del full_3 + + # builtin.combine: ([10x10xf32, 10x10xf32, 10x10xf32, 10x10xf32]) <- (10x10xf32, 10x10xf32, 10x10xf32, 10x10xf32) + combine_10 = [scale_20, scale_21, scale_22, scale_23] + del scale_20, scale_21, scale_22, scale_23 + + # pd_op.stack: (10x10x4xf32) <- ([10x10xf32, 10x10xf32, 10x10xf32, 10x10xf32]) + stack_7 = paddle._C_ops.stack(combine_10, -1) + del combine_10 + + # builtin.combine: ([10x10xf32, 10x10xf32]) <- (10x10xf32, 10x10xf32) + combine_11 = [split_7, split_6] + del split_6, split_7 + + # pd_op.stack: (10x10x2xf32) <- ([10x10xf32, 10x10xf32]) + stack_8 = paddle._C_ops.stack(combine_11, -1) + del combine_11 + + # pd_op.reshape: (100x4xf32) <- (10x10x4xf32, 2xi64) + reshape_6 = paddle._C_ops.reshape(stack_7, full_int_array_0) + del full_int_array_0, stack_7 + + # pd_op.reshape: (100x2xf32) <- (10x10x2xf32, 2xi64) + reshape_7 = paddle._C_ops.reshape(stack_8, full_int_array_1) + del full_int_array_1, stack_8 + + # pd_op.full: (100x1xf32) <- () + full_14 = paddle._C_ops.full( + [100, 1], + float("64"), + paddle.float32, + paddle.framework._current_expected_place(), + ) + + # pd_op.full: (1xi32) <- () + full_15 = paddle._C_ops.full( + [1], float("0"), paddle.int32, paddle.core.CPUPlace() + ) + + # builtin.combine: ([6400x4xf32, 1600x4xf32, 400x4xf32, 100x4xf32]) <- (6400x4xf32, 1600x4xf32, 400x4xf32, 100x4xf32) + combine_12 = [reshape_0, reshape_2, reshape_4, reshape_6] + + # pd_op.concat: (8500x4xf32) <- ([6400x4xf32, 1600x4xf32, 400x4xf32, 100x4xf32], 1xi32) + concat_0 = paddle._C_ops.concat(combine_12, full_15) + del combine_12 + + # builtin.combine: ([6400x2xf32, 1600x2xf32, 400x2xf32, 100x2xf32]) <- (6400x2xf32, 1600x2xf32, 400x2xf32, 100x2xf32) + combine_13 = [reshape_1, reshape_3, reshape_5, reshape_7] + del reshape_1, reshape_3, reshape_5, reshape_7 + + # pd_op.concat: (8500x2xf32) <- ([6400x2xf32, 1600x2xf32, 400x2xf32, 100x2xf32], 1xi32) + concat_1 = paddle._C_ops.concat(combine_13, full_15) + del combine_13 + + # builtin.combine: ([6400x1xf32, 1600x1xf32, 400x1xf32, 100x1xf32]) <- (6400x1xf32, 1600x1xf32, 400x1xf32, 100x1xf32) + combine_14 = [full_5, full_8, full_11, full_14] + del full_11, full_14, full_5, full_8 + + # pd_op.concat: (8500x1xf32) <- ([6400x1xf32, 1600x1xf32, 400x1xf32, 100x1xf32], 1xi32) + concat_2 = paddle._C_ops.concat(combine_14, full_15) + del combine_14, full_15 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_2 = [0] + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_3 = [1] + + # pd_op.slice: (8500xf32) <- (8500x4xf32, 1xi64, 1xi64) + slice_0 = paddle._C_ops.slice( + concat_0, [1], full_int_array_2, full_int_array_3, [1], [1] + ) + del full_int_array_2 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_4 = [2] + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_5 = [3] + + # pd_op.slice: (8500xf32) <- (8500x4xf32, 1xi64, 1xi64) + slice_1 = paddle._C_ops.slice( + concat_0, [1], full_int_array_4, full_int_array_5, [1], [1] + ) + + # pd_op.add: (8500xf32) <- (8500xf32, 8500xf32) + add_0 = paddle._C_ops.add(slice_0, slice_1) + del slice_0, slice_1 + + # pd_op.full: (1xf32) <- () + full_16 = paddle._C_ops.full( + [1], float("0.5"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.scale: (8500xf32) <- (8500xf32, 1xf32) + scale_24 = paddle._C_ops.scale(add_0, full_16, float("0"), True) + del add_0 + + # pd_op.slice: (8500xf32) <- (8500x4xf32, 1xi64, 1xi64) + slice_2 = paddle._C_ops.slice( + concat_0, [1], full_int_array_3, full_int_array_4, [1], [1] + ) + del full_int_array_3, full_int_array_4 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_6 = [4] + + # pd_op.slice: (8500xf32) <- (8500x4xf32, 1xi64, 1xi64) + slice_3 = paddle._C_ops.slice( + concat_0, [1], full_int_array_5, full_int_array_6, [1], [1] + ) + del full_int_array_5, full_int_array_6 + + # pd_op.add: (8500xf32) <- (8500xf32, 8500xf32) + add_1 = paddle._C_ops.add(slice_2, slice_3) + del slice_2, slice_3 + + # pd_op.scale: (8500xf32) <- (8500xf32, 1xf32) + scale_25 = paddle._C_ops.scale(add_1, full_16, float("0"), True) + del add_1, full_16 + + # builtin.combine: ([8500xf32, 8500xf32]) <- (8500xf32, 8500xf32) + combine_15 = [scale_24, scale_25] + del scale_24, scale_25 + + # pd_op.stack: (8500x2xf32) <- ([8500xf32, 8500xf32]) + stack_0 = paddle._C_ops.stack(combine_15, -1) + del combine_15 + + # pd_op.share_data_: (1x8500x4xf32) <- (1x8500x4xf32) + share_data__0 = data_0.detach() + del data_0 + + # pd_op.multiply: (1x8500x4xf32) <- (1x8500x4xf32, 8500x1xf32) + multiply_0 = paddle._C_ops.multiply(share_data__0, concat_2) + del ( + concat_0, + concat_2, + reshape_0, + reshape_2, + reshape_4, + reshape_6, + share_data__0, + ) + + return multiply_0, stack_0 diff --git a/paddle_samples/PaddleX/PP-DocLayout-M/subgraph_11/weight_meta.py b/paddle_samples/PaddleX/PP-DocLayout-M/subgraph_11/weight_meta.py new file mode 100644 index 000000000..8b1378917 --- /dev/null +++ b/paddle_samples/PaddleX/PP-DocLayout-M/subgraph_11/weight_meta.py @@ -0,0 +1 @@ + diff --git a/paddle_samples/PaddleX/PP-DocLayout-M/subgraph_12/graph_hash.txt b/paddle_samples/PaddleX/PP-DocLayout-M/subgraph_12/graph_hash.txt new file mode 100644 index 000000000..6176437ef --- /dev/null +++ b/paddle_samples/PaddleX/PP-DocLayout-M/subgraph_12/graph_hash.txt @@ -0,0 +1 @@ +ce455ccf6903179637c13dadc820764e7da7aa1812be7d120d5a836a1e851760 \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-DocLayout-M/subgraph_12/graph_net.json b/paddle_samples/PaddleX/PP-DocLayout-M/subgraph_12/graph_net.json new file mode 100644 index 000000000..e9a5b932d --- /dev/null +++ b/paddle_samples/PaddleX/PP-DocLayout-M/subgraph_12/graph_net.json @@ -0,0 +1,6 @@ +{ + "framework": "paddle", + "model_name": "PP-DocLayout-M", + "num_devices_required": 1, + "num_nodes_required": 1 +} \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-DocLayout-M/subgraph_12/input_meta.py b/paddle_samples/PaddleX/PP-DocLayout-M/subgraph_12/input_meta.py new file mode 100644 index 000000000..b02782c87 --- /dev/null +++ b/paddle_samples/PaddleX/PP-DocLayout-M/subgraph_12/input_meta.py @@ -0,0 +1,94 @@ +class Program_weight_tensor_data_0: + name = "data_0" + shape = [] + dtype = "int64" + data = [1] + + +class Program_weight_tensor_data_1: + name = "data_1" + shape = [1, 1, 8500] + dtype = "float32" + max_val = float("1.0") + mean = float("0.00423529") + std = float("0.0649412") + data = None + + +class Program_weight_tensor_data_2: + name = "data_2" + shape = [1, 1, 36] + dtype = "int64" + data = [ + 3155, + 3235, + 3154, + 3234, + 3156, + 3236, + 3075, + 3315, + 3074, + 7177, + 7217, + 7178, + 7218, + 7176, + 7216, + 7137, + 7257, + 7138, + 8188, + 8208, + 8189, + 8209, + 8187, + 8207, + 8168, + 8228, + 8169, + 8444, + 8454, + 8443, + 8453, + 8445, + 8455, + 8434, + 8464, + 8433, + ] + + +class Program_weight_tensor_data_3: + name = "data_3" + shape = [1, 1, 8500] + dtype = "float32" + max_val = float("0.282967") + mean = float("0.0105599") + std = float("0.0244824") + data = None + + +class Program_weight_tensor_data_4: + name = "data_4" + shape = [8500, 2] + dtype = "float32" + min_val = float("4.0") + max_val = float("636.0") + mean = float("320.0") + std = float("184.709") + data = None + + +class Program_weight_tensor_data_5: + name = "data_5" + shape = [1, 1, 4] + dtype = "float32" + data = [0.0, 0.0, 565.437, 640.0] + + +class Program_weight_tensor_data_6: + name = "data_6" + shape = [1, 1, 1] + dtype = "float32" + data = [1.0] diff --git a/paddle_samples/PaddleX/PP-DocLayout-M/subgraph_12/model.py b/paddle_samples/PaddleX/PP-DocLayout-M/subgraph_12/model.py new file mode 100644 index 000000000..1cc9cb957 --- /dev/null +++ b/paddle_samples/PaddleX/PP-DocLayout-M/subgraph_12/model.py @@ -0,0 +1,262 @@ +import paddle + + +class GraphModule(paddle.nn.Layer): + def __init__(self): + super().__init__() + + def forward(self, data_0, data_1, data_2, data_3, data_4, data_5, data_6): + # pd_op.multiply: (1x-1x8500xf32) <- (1x-1x8500xf32, 1x-1x8500xf32) + multiply_0 = paddle._C_ops.multiply(data_3, data_1) + del data_3 + + # pd_op.flatten: (-1x8500xf32) <- (1x-1x8500xf32) + flatten_0 = paddle._C_ops.flatten(multiply_0, 0, 1) + + # pd_op.flatten: (-1x36xi64) <- (1x-1x36xi64) + flatten_1 = paddle._C_ops.flatten(data_2, 0, 1) + del data_2 + + # pd_op.index_sample: (-1x36xf32) <- (-1x8500xf32, -1x36xi64) + index_sample_0 = paddle._C_ops.index_sample(flatten_0, flatten_1) + del flatten_0, flatten_1 + + # pd_op.full: (xi64) <- () + full_0 = paddle._C_ops.full( + [], float("1"), paddle.int64, paddle.core.CPUPlace() + ) + + # pd_op.full: (xi64) <- () + full_1 = paddle._C_ops.full( + [], float("-1"), paddle.int64, paddle.core.CPUPlace() + ) + + # builtin.combine: ([xi64, xi64, xi64]) <- (xi64, xi64, xi64) + combine_0 = [full_0, data_0, full_1] + del data_0, full_0, full_1 + + # pd_op.stack: (3xi64) <- ([xi64, xi64, xi64]) + stack_0 = paddle._C_ops.stack(combine_0, 0) + del combine_0 + + # pd_op.reshape: (1x-1x-1xf32) <- (-1x36xf32, 3xi64) + reshape_0 = paddle._C_ops.reshape(index_sample_0, stack_0) + del index_sample_0, stack_0 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_0 = [-1] + + # pd_op.mean: (1x-1x1xf32) <- (1x-1x-1xf32, 1xi64) + mean_0 = paddle._C_ops.mean(reshape_0, full_int_array_0, True) + + # pd_op.subtract: (1x-1x-1xf32) <- (1x-1x-1xf32, 1x-1x1xf32) + subtract_0 = paddle._C_ops.subtract(reshape_0, mean_0) + + # pd_op.pow: (1x-1x-1xf32) <- (1x-1x-1xf32) + pow_0 = paddle._C_ops.pow(subtract_0, float("2")) + del subtract_0 + + # pd_op.sum: (1x-1x1xf32) <- (1x-1x-1xf32, 1xi64) + sum_0 = paddle._C_ops.sum(pow_0, full_int_array_0, paddle.float32, True) + del pow_0 + + # pd_op.numel: (xi64) <- (1x-1x-1xf32) + numel_0 = paddle._C_ops.numel(reshape_0) + del reshape_0 + + # pd_op.cast: (xi64) <- (xi64) + cast_0 = paddle._C_ops.cast(numel_0, paddle.int64) + del numel_0 + + # pd_op.numel: (xi64) <- (1x-1x1xf32) + numel_1 = paddle._C_ops.numel(sum_0) + + # pd_op.cast: (xi64) <- (xi64) + cast_1 = paddle._C_ops.cast(numel_1, paddle.int64) + del numel_1 + + # pd_op.cast: (xf32) <- (xi64) + cast_2 = paddle._C_ops.cast(cast_0, paddle.float32) + del cast_0 + + # pd_op.cast: (xf32) <- (xi64) + cast_3 = paddle._C_ops.cast(cast_1, paddle.float32) + del cast_1 + + # pd_op.divide: (xf32) <- (xf32, xf32) + divide_0 = paddle._C_ops.divide(cast_2, cast_3) + del cast_2, cast_3 + + # pd_op.full: (1xf32) <- () + full_2 = paddle._C_ops.full( + [1], float("1"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.scale: (xf32) <- (xf32, 1xf32) + scale_0 = paddle._C_ops.scale(divide_0, full_2, float("-1"), True) + del divide_0, full_2 + + # pd_op.full: (1xf32) <- () + full_3 = paddle._C_ops.full( + [1], float("0"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.full_like: (xf32) <- (xf32, 1xf32) + full_like_0 = paddle._C_ops.full_like( + scale_0, full_3, paddle.float32, paddle.framework._current_expected_place() + ) + + # pd_op.maximum: (xf32) <- (xf32, xf32) + maximum_0 = paddle._C_ops.maximum(scale_0, full_like_0) + del full_like_0, scale_0 + + # pd_op.divide: (1x-1x1xf32) <- (1x-1x1xf32, xf32) + divide_1 = paddle._C_ops.divide(sum_0, maximum_0) + del maximum_0, sum_0 + + # pd_op.sqrt: (1x-1x1xf32) <- (1x-1x1xf32) + sqrt_0 = paddle._C_ops.sqrt(divide_1) + del divide_1 + + # pd_op.add: (1x-1x1xf32) <- (1x-1x1xf32, 1x-1x1xf32) + add_0 = paddle._C_ops.add(mean_0, sqrt_0) + del mean_0, sqrt_0 + + # pd_op.greater_than: (1x-1x8500xb) <- (1x-1x8500xf32, 1x-1x1xf32) + greater_than_1 = paddle._C_ops.greater_than(multiply_0, add_0) + del add_0, multiply_0 + + # pd_op.full_like: (1x-1x8500xf32) <- (1x-1x8500xf32, 1xf32) + full_like_1 = paddle._C_ops.full_like( + data_1, full_3, paddle.float32, paddle.framework._current_expected_place() + ) + del full_3 + + # pd_op.where: (1x-1x8500xf32) <- (1x-1x8500xb, 1x-1x8500xf32, 1x-1x8500xf32) + where_0 = paddle._C_ops.where(greater_than_1, data_1, full_like_1) + del data_1, full_like_1, greater_than_1 + + # pd_op.full_int_array: (2xi64) <- () + full_int_array_1 = [0, 1] + + # pd_op.unsqueeze: (1x1x8500x2xf32) <- (8500x2xf32, 2xi64) + unsqueeze_0 = paddle._C_ops.unsqueeze(data_4, full_int_array_1) + del data_4, full_int_array_1 + + # pd_op.full: (1xi32) <- () + full_4 = paddle._C_ops.full( + [1], float("3"), paddle.int32, paddle.core.CPUPlace() + ) + + # pd_op.split_with_num: ([1x1x8500x1xf32, 1x1x8500x1xf32]) <- (1x1x8500x2xf32, 1xi32) + split_with_num_0 = paddle._C_ops.split_with_num(unsqueeze_0, 2, full_4) + del unsqueeze_0 + + # builtin.split: (1x1x8500x1xf32, 1x1x8500x1xf32) <- ([1x1x8500x1xf32, 1x1x8500x1xf32]) + ( + split_0, + split_1, + ) = split_with_num_0 + del split_with_num_0 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_2 = [2] + + # pd_op.unsqueeze: (1x-1x1x4xf32) <- (1x-1x4xf32, 1xi64) + unsqueeze_1 = paddle._C_ops.unsqueeze(data_5, full_int_array_2) + del data_5, full_int_array_2 + + # pd_op.split_with_num: ([1x-1x1x1xf32, 1x-1x1x1xf32, 1x-1x1x1xf32, 1x-1x1x1xf32]) <- (1x-1x1x4xf32, 1xi32) + split_with_num_1 = paddle._C_ops.split_with_num(unsqueeze_1, 4, full_4) + del full_4, unsqueeze_1 + + # builtin.split: (1x-1x1x1xf32, 1x-1x1x1xf32, 1x-1x1x1xf32, 1x-1x1x1xf32) <- ([1x-1x1x1xf32, 1x-1x1x1xf32, 1x-1x1x1xf32, 1x-1x1x1xf32]) + ( + split_2, + split_3, + split_4, + split_5, + ) = split_with_num_1 + del split_with_num_1 + + # pd_op.subtract: (1x-1x8500x1xf32) <- (1x1x8500x1xf32, 1x-1x1x1xf32) + subtract_1 = paddle._C_ops.subtract(split_0, split_2) + del split_2 + + # pd_op.subtract: (1x-1x8500x1xf32) <- (1x1x8500x1xf32, 1x-1x1x1xf32) + subtract_2 = paddle._C_ops.subtract(split_1, split_3) + del split_3 + + # pd_op.subtract: (1x-1x8500x1xf32) <- (1x-1x1x1xf32, 1x1x8500x1xf32) + subtract_3 = paddle._C_ops.subtract(split_4, split_0) + del split_0, split_4 + + # pd_op.subtract: (1x-1x8500x1xf32) <- (1x-1x1x1xf32, 1x1x8500x1xf32) + subtract_4 = paddle._C_ops.subtract(split_5, split_1) + del split_1, split_5 + + # pd_op.full: (1xi32) <- () + full_5 = paddle._C_ops.full( + [1], float("-1"), paddle.int32, paddle.core.CPUPlace() + ) + + # builtin.combine: ([1x-1x8500x1xf32, 1x-1x8500x1xf32, 1x-1x8500x1xf32, 1x-1x8500x1xf32]) <- (1x-1x8500x1xf32, 1x-1x8500x1xf32, 1x-1x8500x1xf32, 1x-1x8500x1xf32) + combine_1 = [subtract_1, subtract_2, subtract_3, subtract_4] + del subtract_1, subtract_2, subtract_3, subtract_4 + + # pd_op.concat: (1x-1x8500x4xf32) <- ([1x-1x8500x1xf32, 1x-1x8500x1xf32, 1x-1x8500x1xf32, 1x-1x8500x1xf32], 1xi32) + concat_0 = paddle._C_ops.concat(combine_1, full_5) + del combine_1, full_5 + + # pd_op.min: (1x-1x8500xf32) <- (1x-1x8500x4xf32, 1xi64) + min_0 = paddle._C_ops.min(concat_0, full_int_array_0, False) + del concat_0, full_int_array_0 + + # pd_op.full: (xf32) <- () + full_6 = paddle._C_ops.full( + [], + float("1e-09"), + paddle.float32, + paddle.framework._current_expected_place(), + ) + + # pd_op.greater_than: (1x-1x8500xb) <- (1x-1x8500xf32, xf32) + greater_than_2 = paddle._C_ops.greater_than(min_0, full_6) + del full_6, min_0 + + # pd_op.cast: (1x-1x8500xf32) <- (1x-1x8500xb) + cast_4 = paddle._C_ops.cast(greater_than_2, paddle.float32) + del greater_than_2 + + # pd_op.multiply: (1x-1x8500xf32) <- (1x-1x8500xf32, 1x-1x8500xf32) + multiply_1 = paddle._C_ops.multiply(where_0, cast_4) + del cast_4, where_0 + + # pd_op.multiply: (1x-1x8500xf32) <- (1x-1x8500xf32, 1x-1x1xf32) + multiply_2 = paddle._C_ops.multiply(multiply_1, data_6) + del data_6, multiply_1 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_3 = [-2] + + # pd_op.sum: (1x8500xf32) <- (1x-1x8500xf32, 1xi64) + sum_1 = paddle._C_ops.sum(multiply_2, full_int_array_3, None, False) + del full_int_array_3 + + # pd_op.full_int_array: (0xi64) <- () + full_int_array_4 = [] + + # pd_op.max: (xf32) <- (1x8500xf32, 0xi64) + max_0 = paddle._C_ops.max(sum_1, full_int_array_4, False) + del full_int_array_4 + + # pd_op.full: (xf32) <- () + full_7 = paddle._C_ops.full( + [], float("1"), paddle.float32, paddle.framework._current_expected_place() + ) + + # pd_op.greater_than: (xb) <- (xf32, xf32) + greater_than_0 = paddle._C_ops.greater_than(max_0, full_7) + del full_7, max_0, multiply_2, sum_1 + + return greater_than_0 diff --git a/paddle_samples/PaddleX/PP-DocLayout-M/subgraph_12/weight_meta.py b/paddle_samples/PaddleX/PP-DocLayout-M/subgraph_12/weight_meta.py new file mode 100644 index 000000000..8b1378917 --- /dev/null +++ b/paddle_samples/PaddleX/PP-DocLayout-M/subgraph_12/weight_meta.py @@ -0,0 +1 @@ + diff --git a/paddle_samples/PaddleX/PP-DocLayout-M/subgraph_13/graph_hash.txt b/paddle_samples/PaddleX/PP-DocLayout-M/subgraph_13/graph_hash.txt new file mode 100644 index 000000000..f332df0a1 --- /dev/null +++ b/paddle_samples/PaddleX/PP-DocLayout-M/subgraph_13/graph_hash.txt @@ -0,0 +1 @@ +eba5e6b4ce972cd37ee97da2d7ea1a8c7ec9ff06a3e4294fb8b30fe4172c01d6 \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-DocLayout-M/subgraph_13/graph_net.json b/paddle_samples/PaddleX/PP-DocLayout-M/subgraph_13/graph_net.json new file mode 100644 index 000000000..e9a5b932d --- /dev/null +++ b/paddle_samples/PaddleX/PP-DocLayout-M/subgraph_13/graph_net.json @@ -0,0 +1,6 @@ +{ + "framework": "paddle", + "model_name": "PP-DocLayout-M", + "num_devices_required": 1, + "num_nodes_required": 1 +} \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-DocLayout-M/subgraph_13/input_meta.py b/paddle_samples/PaddleX/PP-DocLayout-M/subgraph_13/input_meta.py new file mode 100644 index 000000000..41e849b22 --- /dev/null +++ b/paddle_samples/PaddleX/PP-DocLayout-M/subgraph_13/input_meta.py @@ -0,0 +1,26 @@ +class Program_weight_tensor_data_0: + name = "data_0" + shape = [8500, 11] + dtype = "float32" + min_val = float("3.36559e-05") + max_val = float("0.120135") + mean = float("0.040002") + std = float("0.0173712") + data = None + + +class Program_weight_tensor_data_1: + name = "data_1" + shape = [8500, 11] + dtype = "float32" + max_val = float("0.941289") + mean = float("0.000145668") + std = float("0.0108761") + data = None + + +class Program_weight_tensor_data_2: + name = "data_2" + shape = [] + dtype = "float32" + data = [13.62] diff --git a/paddle_samples/PaddleX/PP-DocLayout-M/subgraph_13/model.py b/paddle_samples/PaddleX/PP-DocLayout-M/subgraph_13/model.py new file mode 100644 index 000000000..5a595aaa8 --- /dev/null +++ b/paddle_samples/PaddleX/PP-DocLayout-M/subgraph_13/model.py @@ -0,0 +1,111 @@ +import paddle + + +class GraphModule(paddle.nn.Layer): + def __init__(self): + super().__init__() + + def forward(self, data_0, data_1, data_2): + # pd_op.cast: (8500x11xf32) <- (8500x11xf32) + cast_0 = paddle._C_ops.cast(data_1, paddle.float32) + del data_1 + + # pd_op.full: (xf32) <- () + full_0 = paddle._C_ops.full( + [], float("0"), paddle.float32, paddle.framework._current_expected_place() + ) + + # pd_op.greater_than: (8500x11xb) <- (8500x11xf32, xf32) + greater_than_0 = paddle._C_ops.greater_than(cast_0, full_0) + + # pd_op.cast: (8500x11xf32) <- (8500x11xb) + cast_1 = paddle._C_ops.cast(greater_than_0, paddle.float32) + del greater_than_0 + + # pd_op.multiply: (8500x11xf32) <- (8500x11xf32, 8500x11xf32) + multiply_0 = paddle._C_ops.multiply(cast_0, cast_1) + del cast_1 + + # pd_op.subtract: (8500x11xf32) <- (8500x11xf32, 8500x11xf32) + subtract_0 = paddle._C_ops.subtract(data_0, cast_0) + + # pd_op.abs: (8500x11xf32) <- (8500x11xf32) + abs_0 = paddle._C_ops.abs(subtract_0) + + # pd_op.pow: (8500x11xf32) <- (8500x11xf32) + pow_0 = paddle._C_ops.pow(abs_0, float("2")) + + # pd_op.full: (1xf32) <- () + full_1 = paddle._C_ops.full( + [1], float("0.75"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.scale: (8500x11xf32) <- (8500x11xf32, 1xf32) + scale_0 = paddle._C_ops.scale(pow_0, full_1, float("0"), True) + del pow_0 + + # pd_op.less_equal: (8500x11xb) <- (8500x11xf32, xf32) + less_equal_0 = paddle._C_ops.less_equal(cast_0, full_0) + del full_0 + + # pd_op.cast: (8500x11xf32) <- (8500x11xb) + cast_2 = paddle._C_ops.cast(less_equal_0, paddle.float32) + del less_equal_0 + + # pd_op.multiply: (8500x11xf32) <- (8500x11xf32, 8500x11xf32) + multiply_1 = paddle._C_ops.multiply(scale_0, cast_2) + + # pd_op.add: (8500x11xf32) <- (8500x11xf32, 8500x11xf32) + add_0 = paddle._C_ops.add(multiply_0, multiply_1) + + # pd_op.bce_loss: (8500x11xf32) <- (8500x11xf32, 8500x11xf32) + bce_loss_0 = paddle._C_ops.bce_loss(data_0, cast_0) + del data_0 + + # pd_op.multiply: (8500x11xf32) <- (8500x11xf32, 8500x11xf32) + multiply_2 = paddle._C_ops.multiply(bce_loss_0, add_0) + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_0 = [1] + + # pd_op.sum: (8500xf32) <- (8500x11xf32, 1xi64) + sum_0 = paddle._C_ops.sum(multiply_2, full_int_array_0, None, False) + + # pd_op.full: (1xf32) <- () + full_2 = paddle._C_ops.full( + [1], float("1"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.scale: (8500xf32) <- (8500xf32, 1xf32) + scale_1 = paddle._C_ops.scale(sum_0, full_2, float("0"), True) + del sum_0 + + # pd_op.full_int_array: (0xi64) <- () + full_int_array_1 = [] + + # pd_op.sum: (xf32) <- (8500xf32, 0xi64) + sum_1 = paddle._C_ops.sum(scale_1, full_int_array_1, None, False) + + # pd_op.divide: (xf32) <- (xf32, xf32) + divide_0 = paddle._C_ops.divide(sum_1, data_2) + del ( + abs_0, + add_0, + bce_loss_0, + cast_0, + cast_2, + data_2, + full_1, + full_2, + full_int_array_0, + full_int_array_1, + multiply_0, + multiply_1, + multiply_2, + scale_0, + scale_1, + subtract_0, + sum_1, + ) + + return divide_0 diff --git a/paddle_samples/PaddleX/PP-DocLayout-M/subgraph_13/weight_meta.py b/paddle_samples/PaddleX/PP-DocLayout-M/subgraph_13/weight_meta.py new file mode 100644 index 000000000..8b1378917 --- /dev/null +++ b/paddle_samples/PaddleX/PP-DocLayout-M/subgraph_13/weight_meta.py @@ -0,0 +1 @@ + diff --git a/paddle_samples/PaddleX/PP-DocLayout-M/subgraph_14/graph_hash.txt b/paddle_samples/PaddleX/PP-DocLayout-M/subgraph_14/graph_hash.txt new file mode 100644 index 000000000..c2014dda9 --- /dev/null +++ b/paddle_samples/PaddleX/PP-DocLayout-M/subgraph_14/graph_hash.txt @@ -0,0 +1 @@ +83d381d425942c055e716adac75a31fbb9f28b6a94ae9658d2af1c94b3966b54 \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-DocLayout-M/subgraph_14/graph_net.json b/paddle_samples/PaddleX/PP-DocLayout-M/subgraph_14/graph_net.json new file mode 100644 index 000000000..e9a5b932d --- /dev/null +++ b/paddle_samples/PaddleX/PP-DocLayout-M/subgraph_14/graph_net.json @@ -0,0 +1,6 @@ +{ + "framework": "paddle", + "model_name": "PP-DocLayout-M", + "num_devices_required": 1, + "num_nodes_required": 1 +} \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-DocLayout-M/subgraph_14/input_meta.py b/paddle_samples/PaddleX/PP-DocLayout-M/subgraph_14/input_meta.py new file mode 100644 index 000000000..8c7a3f029 --- /dev/null +++ b/paddle_samples/PaddleX/PP-DocLayout-M/subgraph_14/input_meta.py @@ -0,0 +1,70 @@ +class Program_weight_tensor_data_0: + name = "data_0" + shape = [] + dtype = "int64" + data = [3] + + +class Program_weight_tensor_data_1: + name = "data_1" + shape = [] + dtype = "int64" + data = [10285] + + +class Program_weight_tensor_data_2: + name = "data_2" + shape = [1, 3, 10285] + dtype = "float32" + max_val = float("1.0") + mean = float("0.000356506") + std = float("0.018878") + data = None + + +class Program_weight_tensor_data_3: + name = "data_3" + shape = [1, 3, 1] + dtype = "int32" + data = [1, 5, 6] + + +class Program_weight_tensor_data_4: + name = "data_4" + shape = [1, 10285] + dtype = "float32" + max_val = float("1.0") + mean = float("0.00106952") + std = float("0.032686") + data = None + + +class Program_weight_tensor_data_5: + name = "data_5" + shape = [1, 3, 4] + dtype = "float32" + data = [ + 93.0648, + 61.4989, + 115.856, + 74.446, + 89.8993, + 77.6828, + 690.072, + 652.211, + 263.367, + 658.685, + 515.971, + 672.441, + ] + + +class Program_weight_tensor_data_6: + name = "data_6" + shape = [1, 10285, 4] + dtype = "float32" + min_val = float("-242.744") + max_val = float("874.992") + mean = float("352.037") + std = float("207.783") + data = None diff --git a/paddle_samples/PaddleX/PP-DocLayout-M/subgraph_14/model.py b/paddle_samples/PaddleX/PP-DocLayout-M/subgraph_14/model.py new file mode 100644 index 000000000..4271229a2 --- /dev/null +++ b/paddle_samples/PaddleX/PP-DocLayout-M/subgraph_14/model.py @@ -0,0 +1,317 @@ +import paddle + + +class GraphModule(paddle.nn.Layer): + def __init__(self): + super().__init__() + + def forward(self, data_0, data_1, data_2, data_3, data_4, data_5, data_6): + # pd_op.full: (1xi64) <- () + full_0 = paddle._C_ops.full( + [1], float("-2"), paddle.int64, paddle.core.CPUPlace() + ) + + # pd_op.argmax: (1x-1xi64) <- (1x-1x-1xf32, 1xi64) + argmax_0 = paddle._C_ops.argmax(data_2, full_0, False, False, paddle.int64) + del full_0 + + # pd_op.full: (1xf64) <- () + full_1 = paddle._C_ops.full( + [1], float("0"), paddle.float64, paddle.core.CPUPlace() + ) + + # pd_op.full: (1xf64) <- () + full_2 = paddle._C_ops.full( + [1], float("1"), paddle.float64, paddle.core.CPUPlace() + ) + + # pd_op.arange: (1xi32) <- (1xf64, 1xf64, 1xf64) + arange_0 = paddle.arange(full_1, full_2, full_2, dtype="int32") + del full_1, full_2 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_0 = [-1] + + # pd_op.unsqueeze: (1x1xi32) <- (1xi32, 1xi64) + unsqueeze_0 = paddle._C_ops.unsqueeze(arange_0, full_int_array_0) + del arange_0 + + # pd_op.cast: (xi32) <- (xi64) + cast_0 = paddle._C_ops.cast(data_0, paddle.int32) + del data_0 + + # pd_op.multiply: (1x1xi32) <- (1x1xi32, xi32) + multiply_1 = paddle._C_ops.multiply(unsqueeze_0, cast_0) + del cast_0, unsqueeze_0 + + # pd_op.cast: (1x1xi64) <- (1x1xi32) + cast_1 = paddle._C_ops.cast(multiply_1, paddle.int64) + del multiply_1 + + # pd_op.add: (1x-1xi64) <- (1x-1xi64, 1x1xi64) + add_0 = paddle._C_ops.add(argmax_0, cast_1) + del argmax_0, cast_1 + + # pd_op.flatten: (-1xi32) <- (1x-1x1xi32) + flatten_0 = paddle._C_ops.flatten(data_3, 0, 2) + del data_3 + + # pd_op.flatten: (-1xi64) <- (1x-1xi64) + flatten_1 = paddle._C_ops.flatten(add_0, 0, 1) + del add_0 + + # pd_op.full: (1xi32) <- () + full_3 = paddle._C_ops.full( + [1], float("0"), paddle.int32, paddle.core.CPUPlace() + ) + + # pd_op.gather: (-1xi32) <- (-1xi32, -1xi64, 1xi32) + gather_0 = paddle._C_ops.gather(flatten_0, flatten_1, full_3) + del flatten_0 + + # pd_op.full: (xi64) <- () + full_4 = paddle._C_ops.full( + [], float("1"), paddle.int64, paddle.core.CPUPlace() + ) + + # builtin.combine: ([xi64, xi64]) <- (xi64, xi64) + combine_0 = [full_4, data_1] + + # pd_op.stack: (2xi64) <- ([xi64, xi64]) + stack_0 = paddle._C_ops.stack(combine_0, 0) + del combine_0 + + # pd_op.reshape: (1x-1xi32) <- (-1xi32, 2xi64) + reshape_1 = paddle._C_ops.reshape(gather_0, stack_0) + del gather_0, stack_0 + + # pd_op.full: (xf32) <- () + full_5 = paddle._C_ops.full( + [], float("0"), paddle.float32, paddle.framework._current_expected_place() + ) + + # pd_op.greater_than: (1x-1xb) <- (1x-1xf32, xf32) + greater_than_0 = paddle._C_ops.greater_than(data_4, full_5) + del data_4, full_5 + + # pd_op.full: (1xf32) <- () + full_6 = paddle._C_ops.full( + [1], float("11"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.full_like: (1x-1xi32) <- (1x-1xi32, 1xf32) + full_like_0 = paddle._C_ops.full_like( + reshape_1, full_6, paddle.int32, paddle.framework._current_expected_place() + ) + del full_6 + + # pd_op.where: (1x-1xi32) <- (1x-1xb, 1x-1xi32, 1x-1xi32) + where_0 = paddle._C_ops.where(greater_than_0, reshape_1, full_like_0) + del full_like_0, greater_than_0, reshape_1 + + # pd_op.full_int_array: (2xi64) <- () + full_int_array_1 = [-1, 4] + + # pd_op.reshape: (-1x4xf32) <- (1x-1x4xf32, 2xi64) + reshape_2 = paddle._C_ops.reshape(data_5, full_int_array_1) + del full_int_array_1 + + # pd_op.gather: (-1x4xf32) <- (-1x4xf32, -1xi64, 1xi32) + gather_1 = paddle._C_ops.gather(reshape_2, flatten_1, full_3) + del flatten_1, full_3, reshape_2 + + # pd_op.full: (xi64) <- () + full_7 = paddle._C_ops.full( + [], float("4"), paddle.int64, paddle.core.CPUPlace() + ) + + # builtin.combine: ([xi64, xi64, xi64]) <- (xi64, xi64, xi64) + combine_1 = [full_4, data_1, full_7] + del data_1, full_4, full_7 + + # pd_op.stack: (3xi64) <- ([xi64, xi64, xi64]) + stack_1 = paddle._C_ops.stack(combine_1, 0) + del combine_1 + + # pd_op.reshape: (1x-1x4xf32) <- (-1x4xf32, 3xi64) + reshape_0 = paddle._C_ops.reshape(gather_1, stack_1) + del gather_1, stack_1 + + # pd_op.full: (1xi32) <- () + full_8 = paddle._C_ops.full( + [1], float("12"), paddle.int32, paddle.core.CPUPlace() + ) + + # pd_op.one_hot: (1x-1x12xf32) <- (1x-1xi32, 1xi32) + one_hot_0 = paddle._C_ops.one_hot( + where_0 % paddle.cast(full_8, where_0.dtype), full_8 + ) + del full_8 + + # pd_op.full: (11xi64) <- () + full_9 = paddle._C_ops.full( + [11], float("0"), paddle.int64, paddle.framework._current_expected_place() + ) + + # pd_op.assign_value_: (11xi64) <- (11xi64) + assign_value__0 = paddle._C_ops.assign_value_( + full_9, + [11], + paddle.int64, + [ + float("0"), + float("1"), + float("2"), + float("3"), + float("4"), + float("5"), + float("6"), + float("7"), + float("8"), + float("9"), + float("10"), + ], + paddle.framework._current_expected_place(), + ) + del full_9 + + # pd_op.index_select: (1x-1x11xf32) <- (1x-1x12xf32, 11xi64) + index_select_0 = paddle._C_ops.index_select(one_hot_0, assign_value__0, -1) + del assign_value__0, one_hot_0 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_2 = [2] + + # pd_op.unsqueeze: (1x-1x1x4xf32) <- (1x-1x4xf32, 1xi64) + unsqueeze_1 = paddle._C_ops.unsqueeze(data_5, full_int_array_2) + del data_5 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_3 = [1] + + # pd_op.unsqueeze: (1x1x-1x4xf32) <- (1x-1x4xf32, 1xi64) + unsqueeze_2 = paddle._C_ops.unsqueeze(data_6, full_int_array_3) + del data_6, full_int_array_3 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_4 = [0] + + # pd_op.slice: (1x-1x1x2xf32) <- (1x-1x1x4xf32, 1xi64, 1xi64) + slice_0 = paddle._C_ops.slice( + unsqueeze_1, [3], full_int_array_4, full_int_array_2, [1], [] + ) + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_5 = [2147483647] + + # pd_op.slice: (1x-1x1x2xf32) <- (1x-1x1x4xf32, 1xi64, 1xi64) + slice_1 = paddle._C_ops.slice( + unsqueeze_1, [3], full_int_array_2, full_int_array_5, [1], [] + ) + del unsqueeze_1 + + # pd_op.slice: (1x1x-1x2xf32) <- (1x1x-1x4xf32, 1xi64, 1xi64) + slice_2 = paddle._C_ops.slice( + unsqueeze_2, [3], full_int_array_4, full_int_array_2, [1], [] + ) + del full_int_array_4 + + # pd_op.slice: (1x1x-1x2xf32) <- (1x1x-1x4xf32, 1xi64, 1xi64) + slice_3 = paddle._C_ops.slice( + unsqueeze_2, [3], full_int_array_2, full_int_array_5, [1], [] + ) + del full_int_array_2, full_int_array_5, unsqueeze_2 + + # pd_op.maximum: (1x-1x-1x2xf32) <- (1x-1x1x2xf32, 1x1x-1x2xf32) + maximum_0 = paddle._C_ops.maximum(slice_0, slice_2) + + # pd_op.minimum: (1x-1x-1x2xf32) <- (1x-1x1x2xf32, 1x1x-1x2xf32) + minimum_0 = paddle._C_ops.minimum(slice_1, slice_3) + + # pd_op.subtract: (1x-1x-1x2xf32) <- (1x-1x-1x2xf32, 1x-1x-1x2xf32) + subtract_0 = paddle._C_ops.subtract(minimum_0, maximum_0) + del maximum_0, minimum_0 + + # pd_op.full: (1xf32) <- () + full_10 = paddle._C_ops.full( + [1], float("0"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.full: (1xf32) <- () + full_11 = paddle._C_ops.full( + [1], float("3.40282e+38"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.clip: (1x-1x-1x2xf32) <- (1x-1x-1x2xf32, 1xf32, 1xf32) + clip_0 = paddle._C_ops.clip(subtract_0, full_10, full_11) + del subtract_0 + + # pd_op.prod: (1x-1x-1xf32) <- (1x-1x-1x2xf32, 1xi64) + prod_0 = paddle._C_ops.prod(clip_0, full_int_array_0, False, False) + del clip_0 + + # pd_op.subtract: (1x-1x1x2xf32) <- (1x-1x1x2xf32, 1x-1x1x2xf32) + subtract_1 = paddle._C_ops.subtract(slice_1, slice_0) + del slice_0, slice_1 + + # pd_op.clip: (1x-1x1x2xf32) <- (1x-1x1x2xf32, 1xf32, 1xf32) + clip_1 = paddle._C_ops.clip(subtract_1, full_10, full_11) + del subtract_1 + + # pd_op.prod: (1x-1x1xf32) <- (1x-1x1x2xf32, 1xi64) + prod_1 = paddle._C_ops.prod(clip_1, full_int_array_0, False, False) + del clip_1 + + # pd_op.subtract: (1x1x-1x2xf32) <- (1x1x-1x2xf32, 1x1x-1x2xf32) + subtract_2 = paddle._C_ops.subtract(slice_3, slice_2) + del slice_2, slice_3 + + # pd_op.clip: (1x1x-1x2xf32) <- (1x1x-1x2xf32, 1xf32, 1xf32) + clip_2 = paddle._C_ops.clip(subtract_2, full_10, full_11) + del full_10, full_11, subtract_2 + + # pd_op.prod: (1x1x-1xf32) <- (1x1x-1x2xf32, 1xi64) + prod_2 = paddle._C_ops.prod(clip_2, full_int_array_0, False, False) + del clip_2 + + # pd_op.add: (1x-1x-1xf32) <- (1x-1x1xf32, 1x1x-1xf32) + add_1 = paddle._C_ops.add(prod_1, prod_2) + del prod_1, prod_2 + + # pd_op.subtract: (1x-1x-1xf32) <- (1x-1x-1xf32, 1x-1x-1xf32) + subtract_3 = paddle._C_ops.subtract(add_1, prod_0) + del add_1 + + # pd_op.full: (1xf32) <- () + full_12 = paddle._C_ops.full( + [1], float("1"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.scale: (1x-1x-1xf32) <- (1x-1x-1xf32, 1xf32) + scale_0 = paddle._C_ops.scale(subtract_3, full_12, float("1e-09"), True) + del full_12, subtract_3 + + # pd_op.divide: (1x-1x-1xf32) <- (1x-1x-1xf32, 1x-1x-1xf32) + divide_0 = paddle._C_ops.divide(prod_0, scale_0) + del prod_0, scale_0 + + # pd_op.multiply: (1x-1x-1xf32) <- (1x-1x-1xf32, 1x-1x-1xf32) + multiply_2 = paddle._C_ops.multiply(divide_0, data_2) + del data_2, divide_0 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_6 = [-2] + + # pd_op.max: (1x-1xf32) <- (1x-1x-1xf32, 1xi64) + max_0 = paddle._C_ops.max(multiply_2, full_int_array_6, False) + del full_int_array_6, multiply_2 + + # pd_op.unsqueeze: (1x-1x1xf32) <- (1x-1xf32, 1xi64) + unsqueeze_3 = paddle._C_ops.unsqueeze(max_0, full_int_array_0) + del full_int_array_0, max_0 + + # pd_op.multiply: (1x-1x11xf32) <- (1x-1x11xf32, 1x-1x1xf32) + multiply_0 = paddle._C_ops.multiply(index_select_0, unsqueeze_3) + del index_select_0, unsqueeze_3, where_0 + + return reshape_0, multiply_0 diff --git a/paddle_samples/PaddleX/PP-DocLayout-M/subgraph_14/weight_meta.py b/paddle_samples/PaddleX/PP-DocLayout-M/subgraph_14/weight_meta.py new file mode 100644 index 000000000..8b1378917 --- /dev/null +++ b/paddle_samples/PaddleX/PP-DocLayout-M/subgraph_14/weight_meta.py @@ -0,0 +1 @@ + diff --git a/paddle_samples/PaddleX/PP-DocLayout-M/subgraph_15/graph_hash.txt b/paddle_samples/PaddleX/PP-DocLayout-M/subgraph_15/graph_hash.txt new file mode 100644 index 000000000..f76ab3c75 --- /dev/null +++ b/paddle_samples/PaddleX/PP-DocLayout-M/subgraph_15/graph_hash.txt @@ -0,0 +1 @@ +e6b07390ecc264d34159afdf09f3372f66087988a412702a3ff90f50b5d89b97 \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-DocLayout-M/subgraph_15/graph_net.json b/paddle_samples/PaddleX/PP-DocLayout-M/subgraph_15/graph_net.json new file mode 100644 index 000000000..e9a5b932d --- /dev/null +++ b/paddle_samples/PaddleX/PP-DocLayout-M/subgraph_15/graph_net.json @@ -0,0 +1,6 @@ +{ + "framework": "paddle", + "model_name": "PP-DocLayout-M", + "num_devices_required": 1, + "num_nodes_required": 1 +} \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-DocLayout-M/subgraph_15/input_meta.py b/paddle_samples/PaddleX/PP-DocLayout-M/subgraph_15/input_meta.py new file mode 100644 index 000000000..3cc05a789 --- /dev/null +++ b/paddle_samples/PaddleX/PP-DocLayout-M/subgraph_15/input_meta.py @@ -0,0 +1,50 @@ +class Program_weight_tensor_data_0: + name = "data_0" + shape = [] + dtype = "int64" + data = [1] + + +class Program_weight_tensor_data_1: + name = "data_1" + shape = [1, 1, 8500] + dtype = "float32" + max_val = float("1.0") + mean = float("0.00105882") + std = float("0.0325223") + data = None + + +class Program_weight_tensor_data_2: + name = "data_2" + shape = [1, 1, 1] + dtype = "int32" + data = [1] + + +class Program_weight_tensor_data_3: + name = "data_3" + shape = [1, 8500] + dtype = "float32" + max_val = float("1.0") + mean = float("0.00105882") + std = float("0.0325223") + data = None + + +class Program_weight_tensor_data_4: + name = "data_4" + shape = [1, 1, 4] + dtype = "float32" + data = [0.0, 0.0, 565.437, 640.0] + + +class Program_weight_tensor_data_5: + name = "data_5" + shape = [1, 8500, 4] + dtype = "float32" + min_val = float("-89.0917") + max_val = float("831.367") + mean = float("319.721") + std = float("189.73") + data = None diff --git a/paddle_samples/PaddleX/PP-DocLayout-M/subgraph_15/model.py b/paddle_samples/PaddleX/PP-DocLayout-M/subgraph_15/model.py new file mode 100644 index 000000000..ddf39ee9a --- /dev/null +++ b/paddle_samples/PaddleX/PP-DocLayout-M/subgraph_15/model.py @@ -0,0 +1,298 @@ +import paddle + + +class GraphModule(paddle.nn.Layer): + def __init__(self): + super().__init__() + + def forward(self, data_0, data_1, data_2, data_3, data_4, data_5): + # pd_op.full: (1xi64) <- () + full_0 = paddle._C_ops.full( + [1], float("-2"), paddle.int64, paddle.core.CPUPlace() + ) + + # pd_op.argmax: (1x8500xi64) <- (1x-1x8500xf32, 1xi64) + argmax_0 = paddle._C_ops.argmax(data_1, full_0, False, False, paddle.int64) + del full_0 + + # pd_op.full: (1xf64) <- () + full_1 = paddle._C_ops.full( + [1], float("0"), paddle.float64, paddle.core.CPUPlace() + ) + + # pd_op.full: (1xf64) <- () + full_2 = paddle._C_ops.full( + [1], float("1"), paddle.float64, paddle.core.CPUPlace() + ) + + # pd_op.arange: (1xi32) <- (1xf64, 1xf64, 1xf64) + arange_0 = paddle.arange(full_1, full_2, full_2, dtype="int32") + del full_1, full_2 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_0 = [-1] + + # pd_op.unsqueeze: (1x1xi32) <- (1xi32, 1xi64) + unsqueeze_0 = paddle._C_ops.unsqueeze(arange_0, full_int_array_0) + del arange_0 + + # pd_op.cast: (xi32) <- (xi64) + cast_0 = paddle._C_ops.cast(data_0, paddle.int32) + del data_0 + + # pd_op.multiply: (1x1xi32) <- (1x1xi32, xi32) + multiply_1 = paddle._C_ops.multiply(unsqueeze_0, cast_0) + del cast_0, unsqueeze_0 + + # pd_op.cast: (1x1xi64) <- (1x1xi32) + cast_1 = paddle._C_ops.cast(multiply_1, paddle.int64) + del multiply_1 + + # pd_op.add: (1x8500xi64) <- (1x8500xi64, 1x1xi64) + add_0 = paddle._C_ops.add(argmax_0, cast_1) + del argmax_0, cast_1 + + # pd_op.flatten: (-1xi32) <- (1x-1x1xi32) + flatten_0 = paddle._C_ops.flatten(data_2, 0, 2) + del data_2 + + # pd_op.flatten: (8500xi64) <- (1x8500xi64) + flatten_1 = paddle._C_ops.flatten(add_0, 0, 1) + del add_0 + + # pd_op.full: (1xi32) <- () + full_3 = paddle._C_ops.full( + [1], float("0"), paddle.int32, paddle.core.CPUPlace() + ) + + # pd_op.gather: (8500xi32) <- (-1xi32, 8500xi64, 1xi32) + gather_0 = paddle._C_ops.gather(flatten_0, flatten_1, full_3) + del flatten_0 + + # pd_op.full_int_array: (2xi64) <- () + full_int_array_1 = [1, 8500] + + # pd_op.reshape: (1x8500xi32) <- (8500xi32, 2xi64) + reshape_1 = paddle._C_ops.reshape(gather_0, full_int_array_1) + del full_int_array_1, gather_0 + + # pd_op.full: (xf32) <- () + full_4 = paddle._C_ops.full( + [], float("0"), paddle.float32, paddle.framework._current_expected_place() + ) + + # pd_op.greater_than: (1x8500xb) <- (1x8500xf32, xf32) + greater_than_0 = paddle._C_ops.greater_than(data_3, full_4) + del data_3, full_4 + + # pd_op.full: (1xf32) <- () + full_5 = paddle._C_ops.full( + [1], float("11"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.full_like: (1x8500xi32) <- (1x8500xi32, 1xf32) + full_like_0 = paddle._C_ops.full_like( + reshape_1, full_5, paddle.int32, paddle.framework._current_expected_place() + ) + del full_5 + + # pd_op.where: (1x8500xi32) <- (1x8500xb, 1x8500xi32, 1x8500xi32) + where_0 = paddle._C_ops.where(greater_than_0, reshape_1, full_like_0) + del full_like_0, greater_than_0, reshape_1 + + # pd_op.full_int_array: (2xi64) <- () + full_int_array_2 = [-1, 4] + + # pd_op.reshape: (-1x4xf32) <- (1x-1x4xf32, 2xi64) + reshape_2 = paddle._C_ops.reshape(data_4, full_int_array_2) + del full_int_array_2 + + # pd_op.gather: (8500x4xf32) <- (-1x4xf32, 8500xi64, 1xi32) + gather_1 = paddle._C_ops.gather(reshape_2, flatten_1, full_3) + del flatten_1, full_3, reshape_2 + + # pd_op.full_int_array: (3xi64) <- () + full_int_array_3 = [1, 8500, 4] + + # pd_op.reshape: (1x8500x4xf32) <- (8500x4xf32, 3xi64) + reshape_0 = paddle._C_ops.reshape(gather_1, full_int_array_3) + del full_int_array_3, gather_1 + + # pd_op.full: (1xi32) <- () + full_6 = paddle._C_ops.full( + [1], float("12"), paddle.int32, paddle.core.CPUPlace() + ) + + # pd_op.one_hot: (1x8500x12xf32) <- (1x8500xi32, 1xi32) + one_hot_0 = paddle._C_ops.one_hot( + where_0 % paddle.cast(full_6, where_0.dtype), full_6 + ) + del full_6 + + # pd_op.full: (11xi64) <- () + full_7 = paddle._C_ops.full( + [11], float("0"), paddle.int64, paddle.framework._current_expected_place() + ) + + # pd_op.assign_value_: (11xi64) <- (11xi64) + assign_value__0 = paddle._C_ops.assign_value_( + full_7, + [11], + paddle.int64, + [ + float("0"), + float("1"), + float("2"), + float("3"), + float("4"), + float("5"), + float("6"), + float("7"), + float("8"), + float("9"), + float("10"), + ], + paddle.framework._current_expected_place(), + ) + del full_7 + + # pd_op.index_select: (1x8500x11xf32) <- (1x8500x12xf32, 11xi64) + index_select_0 = paddle._C_ops.index_select(one_hot_0, assign_value__0, -1) + del assign_value__0, one_hot_0 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_4 = [2] + + # pd_op.unsqueeze: (1x-1x1x4xf32) <- (1x-1x4xf32, 1xi64) + unsqueeze_1 = paddle._C_ops.unsqueeze(data_4, full_int_array_4) + del data_4 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_5 = [1] + + # pd_op.unsqueeze: (1x1x8500x4xf32) <- (1x8500x4xf32, 1xi64) + unsqueeze_2 = paddle._C_ops.unsqueeze(data_5, full_int_array_5) + del data_5, full_int_array_5 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_6 = [0] + + # pd_op.slice: (1x-1x1x2xf32) <- (1x-1x1x4xf32, 1xi64, 1xi64) + slice_0 = paddle._C_ops.slice( + unsqueeze_1, [3], full_int_array_6, full_int_array_4, [1], [] + ) + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_7 = [2147483647] + + # pd_op.slice: (1x-1x1x2xf32) <- (1x-1x1x4xf32, 1xi64, 1xi64) + slice_1 = paddle._C_ops.slice( + unsqueeze_1, [3], full_int_array_4, full_int_array_7, [1], [] + ) + del unsqueeze_1 + + # pd_op.slice: (1x1x8500x2xf32) <- (1x1x8500x4xf32, 1xi64, 1xi64) + slice_2 = paddle._C_ops.slice( + unsqueeze_2, [3], full_int_array_6, full_int_array_4, [1], [] + ) + del full_int_array_6 + + # pd_op.slice: (1x1x8500x2xf32) <- (1x1x8500x4xf32, 1xi64, 1xi64) + slice_3 = paddle._C_ops.slice( + unsqueeze_2, [3], full_int_array_4, full_int_array_7, [1], [] + ) + del full_int_array_4, full_int_array_7, unsqueeze_2 + + # pd_op.maximum: (1x-1x8500x2xf32) <- (1x-1x1x2xf32, 1x1x8500x2xf32) + maximum_0 = paddle._C_ops.maximum(slice_0, slice_2) + + # pd_op.minimum: (1x-1x8500x2xf32) <- (1x-1x1x2xf32, 1x1x8500x2xf32) + minimum_0 = paddle._C_ops.minimum(slice_1, slice_3) + + # pd_op.subtract: (1x-1x8500x2xf32) <- (1x-1x8500x2xf32, 1x-1x8500x2xf32) + subtract_0 = paddle._C_ops.subtract(minimum_0, maximum_0) + del maximum_0, minimum_0 + + # pd_op.full: (1xf32) <- () + full_8 = paddle._C_ops.full( + [1], float("0"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.full: (1xf32) <- () + full_9 = paddle._C_ops.full( + [1], float("3.40282e+38"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.clip: (1x-1x8500x2xf32) <- (1x-1x8500x2xf32, 1xf32, 1xf32) + clip_0 = paddle._C_ops.clip(subtract_0, full_8, full_9) + del subtract_0 + + # pd_op.prod: (1x-1x8500xf32) <- (1x-1x8500x2xf32, 1xi64) + prod_0 = paddle._C_ops.prod(clip_0, full_int_array_0, False, False) + del clip_0 + + # pd_op.subtract: (1x-1x1x2xf32) <- (1x-1x1x2xf32, 1x-1x1x2xf32) + subtract_1 = paddle._C_ops.subtract(slice_1, slice_0) + del slice_0, slice_1 + + # pd_op.clip: (1x-1x1x2xf32) <- (1x-1x1x2xf32, 1xf32, 1xf32) + clip_1 = paddle._C_ops.clip(subtract_1, full_8, full_9) + del subtract_1 + + # pd_op.prod: (1x-1x1xf32) <- (1x-1x1x2xf32, 1xi64) + prod_1 = paddle._C_ops.prod(clip_1, full_int_array_0, False, False) + del clip_1 + + # pd_op.subtract: (1x1x8500x2xf32) <- (1x1x8500x2xf32, 1x1x8500x2xf32) + subtract_2 = paddle._C_ops.subtract(slice_3, slice_2) + del slice_2, slice_3 + + # pd_op.clip: (1x1x8500x2xf32) <- (1x1x8500x2xf32, 1xf32, 1xf32) + clip_2 = paddle._C_ops.clip(subtract_2, full_8, full_9) + del full_8, full_9, subtract_2 + + # pd_op.prod: (1x1x8500xf32) <- (1x1x8500x2xf32, 1xi64) + prod_2 = paddle._C_ops.prod(clip_2, full_int_array_0, False, False) + del clip_2 + + # pd_op.add: (1x-1x8500xf32) <- (1x-1x1xf32, 1x1x8500xf32) + add_1 = paddle._C_ops.add(prod_1, prod_2) + del prod_1, prod_2 + + # pd_op.subtract: (1x-1x8500xf32) <- (1x-1x8500xf32, 1x-1x8500xf32) + subtract_3 = paddle._C_ops.subtract(add_1, prod_0) + del add_1 + + # pd_op.full: (1xf32) <- () + full_10 = paddle._C_ops.full( + [1], float("1"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.scale: (1x-1x8500xf32) <- (1x-1x8500xf32, 1xf32) + scale_0 = paddle._C_ops.scale(subtract_3, full_10, float("1e-09"), True) + del full_10, subtract_3 + + # pd_op.divide: (1x-1x8500xf32) <- (1x-1x8500xf32, 1x-1x8500xf32) + divide_0 = paddle._C_ops.divide(prod_0, scale_0) + del prod_0, scale_0 + + # pd_op.multiply: (1x-1x8500xf32) <- (1x-1x8500xf32, 1x-1x8500xf32) + multiply_2 = paddle._C_ops.multiply(divide_0, data_1) + del data_1, divide_0 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_8 = [-2] + + # pd_op.max: (1x8500xf32) <- (1x-1x8500xf32, 1xi64) + max_0 = paddle._C_ops.max(multiply_2, full_int_array_8, False) + del full_int_array_8, multiply_2 + + # pd_op.unsqueeze: (1x8500x1xf32) <- (1x8500xf32, 1xi64) + unsqueeze_3 = paddle._C_ops.unsqueeze(max_0, full_int_array_0) + del full_int_array_0, max_0 + + # pd_op.multiply: (1x8500x11xf32) <- (1x8500x11xf32, 1x8500x1xf32) + multiply_0 = paddle._C_ops.multiply(index_select_0, unsqueeze_3) + del index_select_0, unsqueeze_3, where_0 + + return reshape_0, multiply_0 diff --git a/paddle_samples/PaddleX/PP-DocLayout-M/subgraph_15/weight_meta.py b/paddle_samples/PaddleX/PP-DocLayout-M/subgraph_15/weight_meta.py new file mode 100644 index 000000000..8b1378917 --- /dev/null +++ b/paddle_samples/PaddleX/PP-DocLayout-M/subgraph_15/weight_meta.py @@ -0,0 +1 @@ + diff --git a/paddle_samples/PaddleX/PP-DocLayout-M/subgraph_16/graph_hash.txt b/paddle_samples/PaddleX/PP-DocLayout-M/subgraph_16/graph_hash.txt new file mode 100644 index 000000000..5af05ebd4 --- /dev/null +++ b/paddle_samples/PaddleX/PP-DocLayout-M/subgraph_16/graph_hash.txt @@ -0,0 +1 @@ +09dcaa02c333400bdd1d08ee2b3fdcbe7a2c0d7ee1257834158218dacceb19c1 \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-DocLayout-M/subgraph_16/graph_net.json b/paddle_samples/PaddleX/PP-DocLayout-M/subgraph_16/graph_net.json new file mode 100644 index 000000000..e9a5b932d --- /dev/null +++ b/paddle_samples/PaddleX/PP-DocLayout-M/subgraph_16/graph_net.json @@ -0,0 +1,6 @@ +{ + "framework": "paddle", + "model_name": "PP-DocLayout-M", + "num_devices_required": 1, + "num_nodes_required": 1 +} \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-DocLayout-M/subgraph_16/input_meta.py b/paddle_samples/PaddleX/PP-DocLayout-M/subgraph_16/input_meta.py new file mode 100644 index 000000000..72f79e56c --- /dev/null +++ b/paddle_samples/PaddleX/PP-DocLayout-M/subgraph_16/input_meta.py @@ -0,0 +1,37 @@ +class Program_weight_tensor_data_0: + name = "data_0" + shape = [] + dtype = "int64" + data = [7744] + + +class Program_weight_tensor_data_1: + name = "data_1" + shape = [] + dtype = "int64" + data = [1936] + + +class Program_weight_tensor_data_2: + name = "data_2" + shape = [] + dtype = "int64" + data = [484] + + +class Program_weight_tensor_data_3: + name = "data_3" + shape = [] + dtype = "int64" + data = [121] + + +class Program_weight_tensor_data_4: + name = "data_4" + shape = [1, 3, 10285] + dtype = "float32" + min_val = float("2.19994") + max_val = float("868.404") + mean = float("364.969") + std = float("174.551") + data = None diff --git a/paddle_samples/PaddleX/PP-DocLayout-M/subgraph_16/model.py b/paddle_samples/PaddleX/PP-DocLayout-M/subgraph_16/model.py new file mode 100644 index 000000000..8484c64f9 --- /dev/null +++ b/paddle_samples/PaddleX/PP-DocLayout-M/subgraph_16/model.py @@ -0,0 +1,35 @@ +import paddle + + +class GraphModule(paddle.nn.Layer): + def __init__(self): + super().__init__() + + def forward(self, data_0, data_1, data_2, data_3, data_4): + # builtin.combine: ([xi64, xi64, xi64, xi64]) <- (xi64, xi64, xi64, xi64) + combine_0 = [data_0, data_1, data_2, data_3] + del data_0, data_1, data_2, data_3 + + # pd_op.stack: (4xi64) <- ([xi64, xi64, xi64, xi64]) + stack_0 = paddle._C_ops.stack(combine_0, 0) + del combine_0 + + # pd_op.full: (1xi32) <- () + full_0 = paddle._C_ops.full( + [1], float("2"), paddle.int32, paddle.core.CPUPlace() + ) + + # pd_op.split: ([-1x-1x-1xf32, -1x-1x-1xf32, -1x-1x-1xf32, -1x-1x-1xf32]) <- (1x-1x-1xf32, 4xi64, 1xi32) + split_4 = paddle._C_ops.split(data_4, stack_0, full_0) + del data_4, full_0, stack_0 + + # builtin.split: (-1x-1x-1xf32, -1x-1x-1xf32, -1x-1x-1xf32, -1x-1x-1xf32) <- ([-1x-1x-1xf32, -1x-1x-1xf32, -1x-1x-1xf32, -1x-1x-1xf32]) + ( + split_0, + split_1, + split_2, + split_3, + ) = split_4 + del split_4 + + return split_0, split_1, split_2, split_3 diff --git a/paddle_samples/PaddleX/PP-DocLayout-M/subgraph_16/weight_meta.py b/paddle_samples/PaddleX/PP-DocLayout-M/subgraph_16/weight_meta.py new file mode 100644 index 000000000..8b1378917 --- /dev/null +++ b/paddle_samples/PaddleX/PP-DocLayout-M/subgraph_16/weight_meta.py @@ -0,0 +1 @@ + diff --git a/paddle_samples/PaddleX/PP-DocLayout-M/subgraph_17/graph_hash.txt b/paddle_samples/PaddleX/PP-DocLayout-M/subgraph_17/graph_hash.txt new file mode 100644 index 000000000..e8a417f38 --- /dev/null +++ b/paddle_samples/PaddleX/PP-DocLayout-M/subgraph_17/graph_hash.txt @@ -0,0 +1 @@ +179f1deb7156fb59284fad88a911712981fc00ef3d26a1900cca44f20d526583 \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-DocLayout-M/subgraph_17/graph_net.json b/paddle_samples/PaddleX/PP-DocLayout-M/subgraph_17/graph_net.json new file mode 100644 index 000000000..e9a5b932d --- /dev/null +++ b/paddle_samples/PaddleX/PP-DocLayout-M/subgraph_17/graph_net.json @@ -0,0 +1,6 @@ +{ + "framework": "paddle", + "model_name": "PP-DocLayout-M", + "num_devices_required": 1, + "num_nodes_required": 1 +} \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-DocLayout-M/subgraph_17/input_meta.py b/paddle_samples/PaddleX/PP-DocLayout-M/subgraph_17/input_meta.py new file mode 100644 index 000000000..14730e1ca --- /dev/null +++ b/paddle_samples/PaddleX/PP-DocLayout-M/subgraph_17/input_meta.py @@ -0,0 +1,52 @@ +class Program_weight_tensor_data_0: + name = "data_0" + shape = [1, 2, 8500] + dtype = "float32" + max_val = float("1.0") + mean = float("0.00423529") + std = float("0.0649412") + data = None + + +class Program_weight_tensor_data_1: + name = "data_1" + shape = [1, 2, 36] + dtype = "int64" + min_val = 1962 + max_val = 8496 + data = None + + +class Program_weight_tensor_data_2: + name = "data_2" + shape = [1, 2, 8500] + dtype = "float32" + max_val = float("0.546254") + mean = float("0.0101703") + std = float("0.0315443") + data = None + + +class Program_weight_tensor_data_3: + name = "data_3" + shape = [8500, 2] + dtype = "float32" + min_val = float("4.0") + max_val = float("636.0") + mean = float("320.0") + std = float("184.709") + data = None + + +class Program_weight_tensor_data_4: + name = "data_4" + shape = [1, 2, 4] + dtype = "float32" + data = [85.7195, 7.11111, 592.507, 411.654, 129.738, 425.086, 551.964, 636.84] + + +class Program_weight_tensor_data_5: + name = "data_5" + shape = [1, 2, 1] + dtype = "float32" + data = [1.0, 1.0] diff --git a/paddle_samples/PaddleX/PP-DocLayout-M/subgraph_17/model.py b/paddle_samples/PaddleX/PP-DocLayout-M/subgraph_17/model.py new file mode 100644 index 000000000..cefbcaa80 --- /dev/null +++ b/paddle_samples/PaddleX/PP-DocLayout-M/subgraph_17/model.py @@ -0,0 +1,247 @@ +import paddle + + +class GraphModule(paddle.nn.Layer): + def __init__(self): + super().__init__() + + def forward(self, data_0, data_1, data_2, data_3, data_4, data_5): + # pd_op.multiply: (1x2x8500xf32) <- (1x2x8500xf32, 1x2x8500xf32) + multiply_0 = paddle._C_ops.multiply(data_2, data_0) + del data_2 + + # pd_op.flatten: (2x8500xf32) <- (1x2x8500xf32) + flatten_0 = paddle._C_ops.flatten(multiply_0, 0, 1) + + # pd_op.flatten: (2x36xi64) <- (1x2x36xi64) + flatten_1 = paddle._C_ops.flatten(data_1, 0, 1) + del data_1 + + # pd_op.index_sample: (2x36xf32) <- (2x8500xf32, 2x36xi64) + index_sample_0 = paddle._C_ops.index_sample(flatten_0, flatten_1) + del flatten_0, flatten_1 + + # pd_op.full_int_array: (3xi64) <- () + full_int_array_0 = [1, 2, -1] + + # pd_op.reshape: (1x2x36xf32) <- (2x36xf32, 3xi64) + reshape_0 = paddle._C_ops.reshape(index_sample_0, full_int_array_0) + del full_int_array_0, index_sample_0 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_1 = [-1] + + # pd_op.mean: (1x2x1xf32) <- (1x2x36xf32, 1xi64) + mean_0 = paddle._C_ops.mean(reshape_0, full_int_array_1, True) + + # pd_op.subtract: (1x2x36xf32) <- (1x2x36xf32, 1x2x1xf32) + subtract_0 = paddle._C_ops.subtract(reshape_0, mean_0) + + # pd_op.pow: (1x2x36xf32) <- (1x2x36xf32) + pow_0 = paddle._C_ops.pow(subtract_0, float("2")) + del subtract_0 + + # pd_op.sum: (1x2x1xf32) <- (1x2x36xf32, 1xi64) + sum_0 = paddle._C_ops.sum(pow_0, full_int_array_1, paddle.float32, True) + del pow_0 + + # pd_op.numel: (xi64) <- (1x2x36xf32) + numel_0 = paddle._C_ops.numel(reshape_0) + del reshape_0 + + # pd_op.cast: (xi64) <- (xi64) + cast_0 = paddle._C_ops.cast(numel_0, paddle.int64) + del numel_0 + + # pd_op.numel: (xi64) <- (1x2x1xf32) + numel_1 = paddle._C_ops.numel(sum_0) + + # pd_op.cast: (xi64) <- (xi64) + cast_1 = paddle._C_ops.cast(numel_1, paddle.int64) + del numel_1 + + # pd_op.cast: (xf32) <- (xi64) + cast_2 = paddle._C_ops.cast(cast_0, paddle.float32) + del cast_0 + + # pd_op.cast: (xf32) <- (xi64) + cast_3 = paddle._C_ops.cast(cast_1, paddle.float32) + del cast_1 + + # pd_op.divide: (xf32) <- (xf32, xf32) + divide_0 = paddle._C_ops.divide(cast_2, cast_3) + del cast_2, cast_3 + + # pd_op.full: (1xf32) <- () + full_0 = paddle._C_ops.full( + [1], float("1"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.scale: (xf32) <- (xf32, 1xf32) + scale_0 = paddle._C_ops.scale(divide_0, full_0, float("-1"), True) + del divide_0, full_0 + + # pd_op.full: (1xf32) <- () + full_1 = paddle._C_ops.full( + [1], float("0"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.full_like: (xf32) <- (xf32, 1xf32) + full_like_0 = paddle._C_ops.full_like( + scale_0, full_1, paddle.float32, paddle.framework._current_expected_place() + ) + + # pd_op.maximum: (xf32) <- (xf32, xf32) + maximum_0 = paddle._C_ops.maximum(scale_0, full_like_0) + del full_like_0, scale_0 + + # pd_op.divide: (1x2x1xf32) <- (1x2x1xf32, xf32) + divide_1 = paddle._C_ops.divide(sum_0, maximum_0) + del maximum_0, sum_0 + + # pd_op.sqrt: (1x2x1xf32) <- (1x2x1xf32) + sqrt_0 = paddle._C_ops.sqrt(divide_1) + del divide_1 + + # pd_op.add: (1x2x1xf32) <- (1x2x1xf32, 1x2x1xf32) + add_0 = paddle._C_ops.add(mean_0, sqrt_0) + del mean_0, sqrt_0 + + # pd_op.greater_than: (1x2x8500xb) <- (1x2x8500xf32, 1x2x1xf32) + greater_than_1 = paddle._C_ops.greater_than(multiply_0, add_0) + del add_0, multiply_0 + + # pd_op.full_like: (1x2x8500xf32) <- (1x2x8500xf32, 1xf32) + full_like_1 = paddle._C_ops.full_like( + data_0, full_1, paddle.float32, paddle.framework._current_expected_place() + ) + del full_1 + + # pd_op.where: (1x2x8500xf32) <- (1x2x8500xb, 1x2x8500xf32, 1x2x8500xf32) + where_0 = paddle._C_ops.where(greater_than_1, data_0, full_like_1) + del data_0, full_like_1, greater_than_1 + + # pd_op.full_int_array: (2xi64) <- () + full_int_array_2 = [0, 1] + + # pd_op.unsqueeze: (1x1x8500x2xf32) <- (8500x2xf32, 2xi64) + unsqueeze_0 = paddle._C_ops.unsqueeze(data_3, full_int_array_2) + del data_3, full_int_array_2 + + # pd_op.full: (1xi32) <- () + full_2 = paddle._C_ops.full( + [1], float("3"), paddle.int32, paddle.core.CPUPlace() + ) + + # pd_op.split_with_num: ([1x1x8500x1xf32, 1x1x8500x1xf32]) <- (1x1x8500x2xf32, 1xi32) + split_with_num_0 = paddle._C_ops.split_with_num(unsqueeze_0, 2, full_2) + del unsqueeze_0 + + # builtin.split: (1x1x8500x1xf32, 1x1x8500x1xf32) <- ([1x1x8500x1xf32, 1x1x8500x1xf32]) + ( + split_0, + split_1, + ) = split_with_num_0 + del split_with_num_0 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_3 = [2] + + # pd_op.unsqueeze: (1x2x1x4xf32) <- (1x2x4xf32, 1xi64) + unsqueeze_1 = paddle._C_ops.unsqueeze(data_4, full_int_array_3) + del data_4, full_int_array_3 + + # pd_op.split_with_num: ([1x2x1x1xf32, 1x2x1x1xf32, 1x2x1x1xf32, 1x2x1x1xf32]) <- (1x2x1x4xf32, 1xi32) + split_with_num_1 = paddle._C_ops.split_with_num(unsqueeze_1, 4, full_2) + del full_2, unsqueeze_1 + + # builtin.split: (1x2x1x1xf32, 1x2x1x1xf32, 1x2x1x1xf32, 1x2x1x1xf32) <- ([1x2x1x1xf32, 1x2x1x1xf32, 1x2x1x1xf32, 1x2x1x1xf32]) + ( + split_2, + split_3, + split_4, + split_5, + ) = split_with_num_1 + del split_with_num_1 + + # pd_op.subtract: (1x2x8500x1xf32) <- (1x1x8500x1xf32, 1x2x1x1xf32) + subtract_1 = paddle._C_ops.subtract(split_0, split_2) + del split_2 + + # pd_op.subtract: (1x2x8500x1xf32) <- (1x1x8500x1xf32, 1x2x1x1xf32) + subtract_2 = paddle._C_ops.subtract(split_1, split_3) + del split_3 + + # pd_op.subtract: (1x2x8500x1xf32) <- (1x2x1x1xf32, 1x1x8500x1xf32) + subtract_3 = paddle._C_ops.subtract(split_4, split_0) + del split_0, split_4 + + # pd_op.subtract: (1x2x8500x1xf32) <- (1x2x1x1xf32, 1x1x8500x1xf32) + subtract_4 = paddle._C_ops.subtract(split_5, split_1) + del split_1, split_5 + + # pd_op.full: (1xi32) <- () + full_3 = paddle._C_ops.full( + [1], float("-1"), paddle.int32, paddle.core.CPUPlace() + ) + + # builtin.combine: ([1x2x8500x1xf32, 1x2x8500x1xf32, 1x2x8500x1xf32, 1x2x8500x1xf32]) <- (1x2x8500x1xf32, 1x2x8500x1xf32, 1x2x8500x1xf32, 1x2x8500x1xf32) + combine_0 = [subtract_1, subtract_2, subtract_3, subtract_4] + del subtract_1, subtract_2, subtract_3, subtract_4 + + # pd_op.concat: (1x2x8500x4xf32) <- ([1x2x8500x1xf32, 1x2x8500x1xf32, 1x2x8500x1xf32, 1x2x8500x1xf32], 1xi32) + concat_0 = paddle._C_ops.concat(combine_0, full_3) + del combine_0, full_3 + + # pd_op.min: (1x2x8500xf32) <- (1x2x8500x4xf32, 1xi64) + min_0 = paddle._C_ops.min(concat_0, full_int_array_1, False) + del concat_0, full_int_array_1 + + # pd_op.full: (xf32) <- () + full_4 = paddle._C_ops.full( + [], + float("1e-09"), + paddle.float32, + paddle.framework._current_expected_place(), + ) + + # pd_op.greater_than: (1x2x8500xb) <- (1x2x8500xf32, xf32) + greater_than_2 = paddle._C_ops.greater_than(min_0, full_4) + del full_4, min_0 + + # pd_op.cast: (1x2x8500xf32) <- (1x2x8500xb) + cast_4 = paddle._C_ops.cast(greater_than_2, paddle.float32) + del greater_than_2 + + # pd_op.multiply: (1x2x8500xf32) <- (1x2x8500xf32, 1x2x8500xf32) + multiply_1 = paddle._C_ops.multiply(where_0, cast_4) + del cast_4, where_0 + + # pd_op.multiply: (1x2x8500xf32) <- (1x2x8500xf32, 1x2x1xf32) + multiply_2 = paddle._C_ops.multiply(multiply_1, data_5) + del data_5, multiply_1 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_4 = [-2] + + # pd_op.sum: (1x8500xf32) <- (1x2x8500xf32, 1xi64) + sum_1 = paddle._C_ops.sum(multiply_2, full_int_array_4, None, False) + del full_int_array_4 + + # pd_op.full_int_array: (0xi64) <- () + full_int_array_5 = [] + + # pd_op.max: (xf32) <- (1x8500xf32, 0xi64) + max_0 = paddle._C_ops.max(sum_1, full_int_array_5, False) + del full_int_array_5 + + # pd_op.full: (xf32) <- () + full_5 = paddle._C_ops.full( + [], float("1"), paddle.float32, paddle.framework._current_expected_place() + ) + + # pd_op.greater_than: (xb) <- (xf32, xf32) + greater_than_0 = paddle._C_ops.greater_than(max_0, full_5) + del full_5, max_0, multiply_2, sum_1 + + return greater_than_0 diff --git a/paddle_samples/PaddleX/PP-DocLayout-M/subgraph_17/weight_meta.py b/paddle_samples/PaddleX/PP-DocLayout-M/subgraph_17/weight_meta.py new file mode 100644 index 000000000..8b1378917 --- /dev/null +++ b/paddle_samples/PaddleX/PP-DocLayout-M/subgraph_17/weight_meta.py @@ -0,0 +1 @@ + diff --git a/paddle_samples/PaddleX/PP-DocLayout-M/subgraph_2/graph_hash.txt b/paddle_samples/PaddleX/PP-DocLayout-M/subgraph_2/graph_hash.txt new file mode 100644 index 000000000..7b269a4fc --- /dev/null +++ b/paddle_samples/PaddleX/PP-DocLayout-M/subgraph_2/graph_hash.txt @@ -0,0 +1 @@ +6d6f2c9c9ef3af548cf88e1bf54264b303882c64b6c5a29f24bd261b46da7ed3 \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-DocLayout-M/subgraph_2/graph_net.json b/paddle_samples/PaddleX/PP-DocLayout-M/subgraph_2/graph_net.json new file mode 100644 index 000000000..e9a5b932d --- /dev/null +++ b/paddle_samples/PaddleX/PP-DocLayout-M/subgraph_2/graph_net.json @@ -0,0 +1,6 @@ +{ + "framework": "paddle", + "model_name": "PP-DocLayout-M", + "num_devices_required": 1, + "num_nodes_required": 1 +} \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-DocLayout-M/subgraph_2/input_meta.py b/paddle_samples/PaddleX/PP-DocLayout-M/subgraph_2/input_meta.py new file mode 100644 index 000000000..119cf9de8 --- /dev/null +++ b/paddle_samples/PaddleX/PP-DocLayout-M/subgraph_2/input_meta.py @@ -0,0 +1,98 @@ +class Program_weight_tensor_data_0: + name = "data_0" + shape = [] + dtype = "int64" + data = [7744] + + +class Program_weight_tensor_data_1: + name = "data_1" + shape = [] + dtype = "int64" + data = [9680] + + +class Program_weight_tensor_data_2: + name = "data_2" + shape = [] + dtype = "int64" + data = [10164] + + +class Program_weight_tensor_data_3: + name = "data_3" + shape = [] + dtype = "int64" + data = [7744] + + +class Program_weight_tensor_data_4: + name = "data_4" + shape = [] + dtype = "int64" + data = [1936] + + +class Program_weight_tensor_data_5: + name = "data_5" + shape = [] + dtype = "int64" + data = [484] + + +class Program_weight_tensor_data_6: + name = "data_6" + shape = [] + dtype = "int64" + data = [121] + + +class Program_weight_tensor_data_7: + name = "data_7" + shape = [1, 3, 7744] + dtype = "float32" + min_val = float("2.19994") + max_val = float("868.404") + mean = float("364.991") + std = float("174.566") + data = None + + +class Program_weight_tensor_data_8: + name = "data_8" + shape = [1, 3, 1936] + dtype = "float32" + min_val = float("2.80659") + max_val = float("862.75") + mean = float("364.957") + std = float("174.545") + data = None + + +class Program_weight_tensor_data_9: + name = "data_9" + shape = [1, 3, 484] + dtype = "float32" + min_val = float("10.4694") + max_val = float("851.442") + mean = float("364.823") + std = float("174.458") + data = None + + +class Program_weight_tensor_data_10: + name = "data_10" + shape = [1, 3, 121] + dtype = "float32" + min_val = float("27.1063") + max_val = float("828.825") + mean = float("364.293") + std = float("174.098") + data = None + + +class Program_weight_tensor_data_11: + name = "data_11" + shape = [1, 3, 1] + dtype = "float32" + data = [1.0, 1.0, 1.0] diff --git a/paddle_samples/PaddleX/PP-DocLayout-M/subgraph_2/model.py b/paddle_samples/PaddleX/PP-DocLayout-M/subgraph_2/model.py new file mode 100644 index 000000000..1010035f3 --- /dev/null +++ b/paddle_samples/PaddleX/PP-DocLayout-M/subgraph_2/model.py @@ -0,0 +1,157 @@ +import paddle + + +class GraphModule(paddle.nn.Layer): + def __init__(self): + super().__init__() + + def forward( + self, + data_0, + data_1, + data_2, + data_3, + data_4, + data_5, + data_6, + data_7, + data_8, + data_9, + data_10, + data_11, + ): + # pd_op.full: (1xi32) <- () + full_0 = paddle._C_ops.full( + [1], float("9"), paddle.int32, paddle.core.CPUPlace() + ) + + # pd_op.topk: (1x-1x9xf32, 1x-1x9xi64) <- (1x-1x-1xf32, 1xi32) + topk_0, topk_1 = (lambda x, f: f(x))( + paddle._C_ops.topk(data_7, full_0, -1, False, True), + lambda out: out if isinstance(out, (list, tuple)) else (out, None), + ) + del data_7 + + # pd_op.full: (1xf32) <- () + full_1 = paddle._C_ops.full( + [1], float("1"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.scale: (1x-1x9xi64) <- (1x-1x9xi64, 1xf32) + scale_0 = paddle._C_ops.scale(topk_1, full_1, float("0"), True) + del full_1 + + # pd_op.one_hot: (1x-1x9x-1xf32) <- (1x-1x9xi64, xi64) + one_hot_0 = paddle._C_ops.one_hot( + topk_1 % paddle.cast(data_3, topk_1.dtype), data_3 + ) + del data_3, topk_1 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_0 = [-2] + + # pd_op.sum: (1x-1x-1xf32) <- (1x-1x9x-1xf32, 1xi64) + sum_0 = paddle._C_ops.sum(one_hot_0, full_int_array_0, None, False) + del one_hot_0 + + # pd_op.multiply: (1x-1x-1xf32) <- (1x-1x-1xf32, 1x-1x1xf32) + multiply_0 = paddle._C_ops.multiply(sum_0, data_11) + del sum_0 + + # pd_op.topk: (1x-1x9xf32, 1x-1x9xi64) <- (1x-1x-1xf32, 1xi32) + topk_2, topk_3 = (lambda x, f: f(x))( + paddle._C_ops.topk(data_8, full_0, -1, False, True), + lambda out: out if isinstance(out, (list, tuple)) else (out, None), + ) + del data_8 + + # pd_op.add: (1x-1x9xi64) <- (1x-1x9xi64, xi64) + add_0 = paddle._C_ops.add(topk_3, data_0) + del data_0 + + # pd_op.one_hot: (1x-1x9x-1xf32) <- (1x-1x9xi64, xi64) + one_hot_1 = paddle._C_ops.one_hot( + topk_3 % paddle.cast(data_4, topk_3.dtype), data_4 + ) + del data_4, topk_3 + + # pd_op.sum: (1x-1x-1xf32) <- (1x-1x9x-1xf32, 1xi64) + sum_1 = paddle._C_ops.sum(one_hot_1, full_int_array_0, None, False) + del one_hot_1 + + # pd_op.multiply: (1x-1x-1xf32) <- (1x-1x-1xf32, 1x-1x1xf32) + multiply_1 = paddle._C_ops.multiply(sum_1, data_11) + del sum_1 + + # pd_op.topk: (1x-1x9xf32, 1x-1x9xi64) <- (1x-1x-1xf32, 1xi32) + topk_4, topk_5 = (lambda x, f: f(x))( + paddle._C_ops.topk(data_9, full_0, -1, False, True), + lambda out: out if isinstance(out, (list, tuple)) else (out, None), + ) + del data_9 + + # pd_op.add: (1x-1x9xi64) <- (1x-1x9xi64, xi64) + add_1 = paddle._C_ops.add(topk_5, data_1) + del data_1 + + # pd_op.one_hot: (1x-1x9x-1xf32) <- (1x-1x9xi64, xi64) + one_hot_2 = paddle._C_ops.one_hot( + topk_5 % paddle.cast(data_5, topk_5.dtype), data_5 + ) + del data_5, topk_5 + + # pd_op.sum: (1x-1x-1xf32) <- (1x-1x9x-1xf32, 1xi64) + sum_2 = paddle._C_ops.sum(one_hot_2, full_int_array_0, None, False) + del one_hot_2 + + # pd_op.multiply: (1x-1x-1xf32) <- (1x-1x-1xf32, 1x-1x1xf32) + multiply_2 = paddle._C_ops.multiply(sum_2, data_11) + del sum_2 + + # pd_op.topk: (1x-1x9xf32, 1x-1x9xi64) <- (1x-1x-1xf32, 1xi32) + topk_6, topk_7 = (lambda x, f: f(x))( + paddle._C_ops.topk(data_10, full_0, -1, False, True), + lambda out: out if isinstance(out, (list, tuple)) else (out, None), + ) + del data_10, full_0 + + # pd_op.add: (1x-1x9xi64) <- (1x-1x9xi64, xi64) + add_2 = paddle._C_ops.add(topk_7, data_2) + del data_2 + + # pd_op.one_hot: (1x-1x9x-1xf32) <- (1x-1x9xi64, xi64) + one_hot_3 = paddle._C_ops.one_hot( + topk_7 % paddle.cast(data_6, topk_7.dtype), data_6 + ) + del data_6, topk_7 + + # pd_op.sum: (1x-1x-1xf32) <- (1x-1x9x-1xf32, 1xi64) + sum_3 = paddle._C_ops.sum(one_hot_3, full_int_array_0, None, False) + del full_int_array_0, one_hot_3 + + # pd_op.multiply: (1x-1x-1xf32) <- (1x-1x-1xf32, 1x-1x1xf32) + multiply_3 = paddle._C_ops.multiply(sum_3, data_11) + del data_11, sum_3 + + # pd_op.full: (1xi32) <- () + full_2 = paddle._C_ops.full( + [1], float("-1"), paddle.int32, paddle.core.CPUPlace() + ) + + # builtin.combine: ([1x-1x-1xf32, 1x-1x-1xf32, 1x-1x-1xf32, 1x-1x-1xf32]) <- (1x-1x-1xf32, 1x-1x-1xf32, 1x-1x-1xf32, 1x-1x-1xf32) + combine_0 = [multiply_0, multiply_1, multiply_2, multiply_3] + del multiply_0, multiply_1, multiply_2, multiply_3 + + # pd_op.concat: (1x-1x-1xf32) <- ([1x-1x-1xf32, 1x-1x-1xf32, 1x-1x-1xf32, 1x-1x-1xf32], 1xi32) + concat_0 = paddle._C_ops.concat(combine_0, full_2) + del combine_0 + + # builtin.combine: ([1x-1x9xi64, 1x-1x9xi64, 1x-1x9xi64, 1x-1x9xi64]) <- (1x-1x9xi64, 1x-1x9xi64, 1x-1x9xi64, 1x-1x9xi64) + combine_1 = [scale_0, add_0, add_1, add_2] + del add_0, add_1, add_2, scale_0 + + # pd_op.concat: (1x-1x36xi64) <- ([1x-1x9xi64, 1x-1x9xi64, 1x-1x9xi64, 1x-1x9xi64], 1xi32) + concat_1 = paddle._C_ops.concat(combine_1, full_2) + del combine_1, full_2 + + return concat_0, concat_1 diff --git a/paddle_samples/PaddleX/PP-DocLayout-M/subgraph_2/weight_meta.py b/paddle_samples/PaddleX/PP-DocLayout-M/subgraph_2/weight_meta.py new file mode 100644 index 000000000..8b1378917 --- /dev/null +++ b/paddle_samples/PaddleX/PP-DocLayout-M/subgraph_2/weight_meta.py @@ -0,0 +1 @@ + diff --git a/paddle_samples/PaddleX/PP-DocLayout-M/subgraph_3/graph_hash.txt b/paddle_samples/PaddleX/PP-DocLayout-M/subgraph_3/graph_hash.txt new file mode 100644 index 000000000..ba0a1cdaa --- /dev/null +++ b/paddle_samples/PaddleX/PP-DocLayout-M/subgraph_3/graph_hash.txt @@ -0,0 +1 @@ +aee51f07341e98d452a2561b4036b470ba72ac38e5ff480edfedbf1b34ef856e \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-DocLayout-M/subgraph_3/graph_net.json b/paddle_samples/PaddleX/PP-DocLayout-M/subgraph_3/graph_net.json new file mode 100644 index 000000000..e9a5b932d --- /dev/null +++ b/paddle_samples/PaddleX/PP-DocLayout-M/subgraph_3/graph_net.json @@ -0,0 +1,6 @@ +{ + "framework": "paddle", + "model_name": "PP-DocLayout-M", + "num_devices_required": 1, + "num_nodes_required": 1 +} \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-DocLayout-M/subgraph_3/input_meta.py b/paddle_samples/PaddleX/PP-DocLayout-M/subgraph_3/input_meta.py new file mode 100644 index 000000000..195d6d2c5 --- /dev/null +++ b/paddle_samples/PaddleX/PP-DocLayout-M/subgraph_3/input_meta.py @@ -0,0 +1,40 @@ +class Program_weight_tensor_data_0: + name = "data_0" + shape = [] + dtype = "int64" + data = [7681] + + +class Program_weight_tensor_data_1: + name = "data_1" + shape = [] + dtype = "int64" + data = [7681] + + +class Program_weight_tensor_data_2: + name = "data_2" + shape = [7681, 11] + dtype = "float32" + min_val = float("3.96415e-05") + max_val = float("0.124407") + mean = float("0.0370727") + std = float("0.0167679") + data = None + + +class Program_weight_tensor_data_3: + name = "data_3" + shape = [7681, 11] + dtype = "float32" + max_val = float("0.981685") + mean = float("0.000992179") + std = float("0.0289897") + data = None + + +class Program_weight_tensor_data_4: + name = "data_4" + shape = [] + dtype = "float32" + data = [83.8302] diff --git a/paddle_samples/PaddleX/PP-DocLayout-M/subgraph_3/model.py b/paddle_samples/PaddleX/PP-DocLayout-M/subgraph_3/model.py new file mode 100644 index 000000000..7048500ea --- /dev/null +++ b/paddle_samples/PaddleX/PP-DocLayout-M/subgraph_3/model.py @@ -0,0 +1,115 @@ +import paddle + + +class GraphModule(paddle.nn.Layer): + def __init__(self): + super().__init__() + + def forward(self, data_0, data_1, data_2, data_3, data_4): + # pd_op.equal: (xb) <- (xi64, xi64) + equal_0 = paddle._C_ops.equal(data_0, data_1) + del data_0, data_1 + + # pd_op.cast: (-1x11xf32) <- (-1x11xf32) + cast_0 = paddle._C_ops.cast(data_3, paddle.float32) + del data_3 + + # pd_op.full: (xf32) <- () + full_0 = paddle._C_ops.full( + [], float("0"), paddle.float32, paddle.framework._current_expected_place() + ) + + # pd_op.greater_than: (-1x11xb) <- (-1x11xf32, xf32) + greater_than_0 = paddle._C_ops.greater_than(cast_0, full_0) + + # pd_op.cast: (-1x11xf32) <- (-1x11xb) + cast_1 = paddle._C_ops.cast(greater_than_0, paddle.float32) + del greater_than_0 + + # pd_op.multiply: (-1x11xf32) <- (-1x11xf32, -1x11xf32) + multiply_0 = paddle._C_ops.multiply(cast_0, cast_1) + del cast_1 + + # pd_op.subtract: (-1x11xf32) <- (-1x11xf32, -1x11xf32) + subtract_0 = paddle._C_ops.subtract(data_2, cast_0) + + # pd_op.abs: (-1x11xf32) <- (-1x11xf32) + abs_0 = paddle._C_ops.abs(subtract_0) + + # pd_op.pow: (-1x11xf32) <- (-1x11xf32) + pow_0 = paddle._C_ops.pow(abs_0, float("2")) + + # pd_op.full: (1xf32) <- () + full_1 = paddle._C_ops.full( + [1], float("0.75"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.scale: (-1x11xf32) <- (-1x11xf32, 1xf32) + scale_0 = paddle._C_ops.scale(pow_0, full_1, float("0"), True) + del pow_0 + + # pd_op.less_equal: (-1x11xb) <- (-1x11xf32, xf32) + less_equal_0 = paddle._C_ops.less_equal(cast_0, full_0) + del full_0 + + # pd_op.cast: (-1x11xf32) <- (-1x11xb) + cast_2 = paddle._C_ops.cast(less_equal_0, paddle.float32) + del less_equal_0 + + # pd_op.multiply: (-1x11xf32) <- (-1x11xf32, -1x11xf32) + multiply_1 = paddle._C_ops.multiply(scale_0, cast_2) + + # pd_op.add: (-1x11xf32) <- (-1x11xf32, -1x11xf32) + add_0 = paddle._C_ops.add(multiply_0, multiply_1) + + # pd_op.bce_loss: (-1x11xf32) <- (-1x11xf32, -1x11xf32) + bce_loss_0 = paddle._C_ops.bce_loss(data_2, cast_0) + del data_2 + + # pd_op.multiply: (-1x11xf32) <- (-1x11xf32, -1x11xf32) + multiply_2 = paddle._C_ops.multiply(bce_loss_0, add_0) + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_0 = [1] + + # pd_op.sum: (-1xf32) <- (-1x11xf32, 1xi64) + sum_0 = paddle._C_ops.sum(multiply_2, full_int_array_0, None, False) + + # pd_op.full: (1xf32) <- () + full_2 = paddle._C_ops.full( + [1], float("1"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.scale: (-1xf32) <- (-1xf32, 1xf32) + scale_1 = paddle._C_ops.scale(sum_0, full_2, float("0"), True) + del sum_0 + + # pd_op.full_int_array: (0xi64) <- () + full_int_array_1 = [] + + # pd_op.sum: (xf32) <- (-1xf32, 0xi64) + sum_1 = paddle._C_ops.sum(scale_1, full_int_array_1, None, False) + + # pd_op.divide: (xf32) <- (xf32, xf32) + divide_0 = paddle._C_ops.divide(sum_1, data_4) + del ( + abs_0, + add_0, + bce_loss_0, + cast_0, + cast_2, + data_4, + full_1, + full_2, + full_int_array_0, + full_int_array_1, + multiply_0, + multiply_1, + multiply_2, + scale_0, + scale_1, + subtract_0, + sum_1, + ) + + return divide_0 diff --git a/paddle_samples/PaddleX/PP-DocLayout-M/subgraph_3/weight_meta.py b/paddle_samples/PaddleX/PP-DocLayout-M/subgraph_3/weight_meta.py new file mode 100644 index 000000000..8b1378917 --- /dev/null +++ b/paddle_samples/PaddleX/PP-DocLayout-M/subgraph_3/weight_meta.py @@ -0,0 +1 @@ + diff --git a/paddle_samples/PaddleX/PP-DocLayout-M/subgraph_4/graph_hash.txt b/paddle_samples/PaddleX/PP-DocLayout-M/subgraph_4/graph_hash.txt new file mode 100644 index 000000000..479c81f78 --- /dev/null +++ b/paddle_samples/PaddleX/PP-DocLayout-M/subgraph_4/graph_hash.txt @@ -0,0 +1 @@ +0abdc8831ff78ef2837b9d36e0b70316aaf4acebdffa350163a1da7b8df3099d \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-DocLayout-M/subgraph_4/graph_net.json b/paddle_samples/PaddleX/PP-DocLayout-M/subgraph_4/graph_net.json new file mode 100644 index 000000000..e9a5b932d --- /dev/null +++ b/paddle_samples/PaddleX/PP-DocLayout-M/subgraph_4/graph_net.json @@ -0,0 +1,6 @@ +{ + "framework": "paddle", + "model_name": "PP-DocLayout-M", + "num_devices_required": 1, + "num_nodes_required": 1 +} \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-DocLayout-M/subgraph_4/input_meta.py b/paddle_samples/PaddleX/PP-DocLayout-M/subgraph_4/input_meta.py new file mode 100644 index 000000000..66b8ee827 --- /dev/null +++ b/paddle_samples/PaddleX/PP-DocLayout-M/subgraph_4/input_meta.py @@ -0,0 +1,92 @@ +class Program_weight_tensor_data_0: + name = "data_0" + shape = [13, 2] + dtype = "float32" + data = [ + 56.5, + 0.5, + 57.5, + 0.5, + 58.5, + 0.5, + 59.5, + 0.5, + 4.5, + 3.5, + 3.5, + 4.5, + 4.5, + 4.5, + 5.5, + 4.5, + 3.5, + 5.5, + 4.5, + 5.5, + 5.5, + 5.5, + 3.5, + 6.5, + 4.5, + 6.5, + ] + + +class Program_weight_tensor_data_1: + name = "data_1" + shape = [13, 4] + dtype = "float32" + data = [ + 42.2711, + 0.0, + 74.2526, + 1.14583, + 42.2711, + 0.0, + 74.2526, + 1.14583, + 42.2711, + 0.0, + 74.2526, + 1.14583, + 42.2711, + 0.0, + 74.2526, + 1.14583, + 1.43685, + 0.234375, + 7.04519, + 9.83073, + 1.43685, + 0.234375, + 7.04519, + 9.83073, + 1.43685, + 0.234375, + 7.04519, + 9.83073, + 1.43685, + 0.234375, + 7.04519, + 9.83073, + 1.43685, + 0.234375, + 7.04519, + 9.83073, + 1.43685, + 0.234375, + 7.04519, + 9.83073, + 1.43685, + 0.234375, + 7.04519, + 9.83073, + 1.43685, + 0.234375, + 7.04519, + 9.83073, + 1.43685, + 0.234375, + 7.04519, + 9.83073, + ] diff --git a/paddle_samples/PaddleX/PP-DocLayout-M/subgraph_4/model.py b/paddle_samples/PaddleX/PP-DocLayout-M/subgraph_4/model.py new file mode 100644 index 000000000..374314688 --- /dev/null +++ b/paddle_samples/PaddleX/PP-DocLayout-M/subgraph_4/model.py @@ -0,0 +1,109 @@ +import paddle + + +class GraphModule(paddle.nn.Layer): + def __init__(self): + super().__init__() + + def forward(self, data_0, data_1): + # pd_op.full_int_array: (1xi64) <- () + full_int_array_0 = [0] + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_1 = [1] + + # pd_op.slice: (-1xf32) <- (-1x2xf32, 1xi64, 1xi64) + slice_0 = paddle._C_ops.slice( + data_0, [1], full_int_array_0, full_int_array_1, [1], [1] + ) + + # pd_op.slice: (-1xf32) <- (-1x4xf32, 1xi64, 1xi64) + slice_1 = paddle._C_ops.slice( + data_1, [1], full_int_array_0, full_int_array_1, [1], [1] + ) + del full_int_array_0 + + # pd_op.subtract: (-1xf32) <- (-1xf32, -1xf32) + subtract_0 = paddle._C_ops.subtract(slice_0, slice_1) + del slice_1 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_2 = [2] + + # pd_op.slice: (-1xf32) <- (-1x2xf32, 1xi64, 1xi64) + slice_2 = paddle._C_ops.slice( + data_0, [1], full_int_array_1, full_int_array_2, [1], [1] + ) + del data_0 + + # pd_op.slice: (-1xf32) <- (-1x4xf32, 1xi64, 1xi64) + slice_3 = paddle._C_ops.slice( + data_1, [1], full_int_array_1, full_int_array_2, [1], [1] + ) + del full_int_array_1 + + # pd_op.subtract: (-1xf32) <- (-1xf32, -1xf32) + subtract_1 = paddle._C_ops.subtract(slice_2, slice_3) + del slice_3 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_3 = [3] + + # pd_op.slice: (-1xf32) <- (-1x4xf32, 1xi64, 1xi64) + slice_4 = paddle._C_ops.slice( + data_1, [1], full_int_array_2, full_int_array_3, [1], [1] + ) + del full_int_array_2 + + # pd_op.subtract: (-1xf32) <- (-1xf32, -1xf32) + subtract_2 = paddle._C_ops.subtract(slice_4, slice_0) + del slice_0, slice_4 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_4 = [4] + + # pd_op.slice: (-1xf32) <- (-1x4xf32, 1xi64, 1xi64) + slice_5 = paddle._C_ops.slice( + data_1, [1], full_int_array_3, full_int_array_4, [1], [1] + ) + del data_1, full_int_array_3, full_int_array_4 + + # pd_op.subtract: (-1xf32) <- (-1xf32, -1xf32) + subtract_3 = paddle._C_ops.subtract(slice_5, slice_2) + del slice_2, slice_5 + + # pd_op.full: (1xf32) <- () + full_0 = paddle._C_ops.full( + [1], float("0"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.full: (1xf32) <- () + full_1 = paddle._C_ops.full( + [1], float("6.9"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.clip: (-1xf32) <- (-1xf32, 1xf32, 1xf32) + clip_0 = paddle._C_ops.clip(subtract_0, full_0, full_1) + del subtract_0 + + # pd_op.clip: (-1xf32) <- (-1xf32, 1xf32, 1xf32) + clip_1 = paddle._C_ops.clip(subtract_1, full_0, full_1) + del subtract_1 + + # pd_op.clip: (-1xf32) <- (-1xf32, 1xf32, 1xf32) + clip_2 = paddle._C_ops.clip(subtract_2, full_0, full_1) + del subtract_2 + + # pd_op.clip: (-1xf32) <- (-1xf32, 1xf32, 1xf32) + clip_3 = paddle._C_ops.clip(subtract_3, full_0, full_1) + del full_0, full_1, subtract_3 + + # builtin.combine: ([-1xf32, -1xf32, -1xf32, -1xf32]) <- (-1xf32, -1xf32, -1xf32, -1xf32) + combine_0 = [clip_0, clip_1, clip_2, clip_3] + del clip_0, clip_1, clip_2, clip_3 + + # pd_op.stack: (-1x4xf32) <- ([-1xf32, -1xf32, -1xf32, -1xf32]) + stack_0 = paddle._C_ops.stack(combine_0, -1) + del combine_0 + + return stack_0 diff --git a/paddle_samples/PaddleX/PP-DocLayout-M/subgraph_4/weight_meta.py b/paddle_samples/PaddleX/PP-DocLayout-M/subgraph_4/weight_meta.py new file mode 100644 index 000000000..8b1378917 --- /dev/null +++ b/paddle_samples/PaddleX/PP-DocLayout-M/subgraph_4/weight_meta.py @@ -0,0 +1 @@ + diff --git a/paddle_samples/PaddleX/PP-DocLayout-M/subgraph_5/graph_hash.txt b/paddle_samples/PaddleX/PP-DocLayout-M/subgraph_5/graph_hash.txt new file mode 100644 index 000000000..ac7005705 --- /dev/null +++ b/paddle_samples/PaddleX/PP-DocLayout-M/subgraph_5/graph_hash.txt @@ -0,0 +1 @@ +7709ba2153f3126dda880c2badf2655d046d7ae517609e5321e99f32f288ce89 \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-DocLayout-M/subgraph_5/graph_net.json b/paddle_samples/PaddleX/PP-DocLayout-M/subgraph_5/graph_net.json new file mode 100644 index 000000000..e9a5b932d --- /dev/null +++ b/paddle_samples/PaddleX/PP-DocLayout-M/subgraph_5/graph_net.json @@ -0,0 +1,6 @@ +{ + "framework": "paddle", + "model_name": "PP-DocLayout-M", + "num_devices_required": 1, + "num_nodes_required": 1 +} \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-DocLayout-M/subgraph_5/input_meta.py b/paddle_samples/PaddleX/PP-DocLayout-M/subgraph_5/input_meta.py new file mode 100644 index 000000000..a5a077ebe --- /dev/null +++ b/paddle_samples/PaddleX/PP-DocLayout-M/subgraph_5/input_meta.py @@ -0,0 +1,49 @@ +class Program_weight_tensor_data_0: + name = "data_0" + shape = [1, 1, 6400] + dtype = "float32" + min_val = float("4.20028") + max_val = float("473.987") + mean = float("246.761") + std = float("93.567") + data = None + + +class Program_weight_tensor_data_1: + name = "data_1" + shape = [1, 1, 1600] + dtype = "float32" + min_val = float("8.44926") + max_val = float("468.339") + mean = float("246.718") + std = float("93.5111") + data = None + + +class Program_weight_tensor_data_2: + name = "data_2" + shape = [1, 1, 400] + dtype = "float32" + min_val = float("19.2584") + max_val = float("457.044") + mean = float("246.546") + std = float("93.2791") + data = None + + +class Program_weight_tensor_data_3: + name = "data_3" + shape = [1, 1, 100] + dtype = "float32" + min_val = float("32.4329") + max_val = float("434.456") + mean = float("245.855") + std = float("92.3548") + data = None + + +class Program_weight_tensor_data_4: + name = "data_4" + shape = [1, 1, 1] + dtype = "float32" + data = [1.0] diff --git a/paddle_samples/PaddleX/PP-DocLayout-M/subgraph_5/model.py b/paddle_samples/PaddleX/PP-DocLayout-M/subgraph_5/model.py new file mode 100644 index 000000000..9578de61a --- /dev/null +++ b/paddle_samples/PaddleX/PP-DocLayout-M/subgraph_5/model.py @@ -0,0 +1,160 @@ +import paddle + + +class GraphModule(paddle.nn.Layer): + def __init__(self): + super().__init__() + + def forward(self, data_0, data_1, data_2, data_3, data_4): + # pd_op.full: (1xi32) <- () + full_0 = paddle._C_ops.full( + [1], float("9"), paddle.int32, paddle.core.CPUPlace() + ) + + # pd_op.topk: (1x-1x9xf32, 1x-1x9xi64) <- (1x-1x6400xf32, 1xi32) + topk_0, topk_1 = (lambda x, f: f(x))( + paddle._C_ops.topk(data_0, full_0, -1, False, True), + lambda out: out if isinstance(out, (list, tuple)) else (out, None), + ) + del data_0 + + # pd_op.full: (1xf32) <- () + full_1 = paddle._C_ops.full( + [1], float("1"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.scale: (1x-1x9xi64) <- (1x-1x9xi64, 1xf32) + scale_0 = paddle._C_ops.scale(topk_1, full_1, float("0"), True) + + # pd_op.full: (1xi32) <- () + full_2 = paddle._C_ops.full( + [1], float("6400"), paddle.int32, paddle.core.CPUPlace() + ) + + # pd_op.one_hot: (1x-1x9x6400xf32) <- (1x-1x9xi64, 1xi32) + one_hot_0 = paddle._C_ops.one_hot( + topk_1 % paddle.cast(full_2, topk_1.dtype), full_2 + ) + del full_2, topk_1 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_0 = [-2] + + # pd_op.sum: (1x-1x6400xf32) <- (1x-1x9x6400xf32, 1xi64) + sum_0 = paddle._C_ops.sum(one_hot_0, full_int_array_0, None, False) + del one_hot_0 + + # pd_op.multiply: (1x-1x6400xf32) <- (1x-1x6400xf32, 1x-1x1xf32) + multiply_0 = paddle._C_ops.multiply(sum_0, data_4) + del sum_0 + + # pd_op.topk: (1x-1x9xf32, 1x-1x9xi64) <- (1x-1x1600xf32, 1xi32) + topk_2, topk_3 = (lambda x, f: f(x))( + paddle._C_ops.topk(data_1, full_0, -1, False, True), + lambda out: out if isinstance(out, (list, tuple)) else (out, None), + ) + del data_1 + + # pd_op.scale: (1x-1x9xi64) <- (1x-1x9xi64, 1xf32) + scale_1 = paddle._C_ops.scale(topk_3, full_1, float("6400"), True) + + # pd_op.full: (1xi32) <- () + full_3 = paddle._C_ops.full( + [1], float("1600"), paddle.int32, paddle.core.CPUPlace() + ) + + # pd_op.one_hot: (1x-1x9x1600xf32) <- (1x-1x9xi64, 1xi32) + one_hot_1 = paddle._C_ops.one_hot( + topk_3 % paddle.cast(full_3, topk_3.dtype), full_3 + ) + del full_3, topk_3 + + # pd_op.sum: (1x-1x1600xf32) <- (1x-1x9x1600xf32, 1xi64) + sum_1 = paddle._C_ops.sum(one_hot_1, full_int_array_0, None, False) + del one_hot_1 + + # pd_op.multiply: (1x-1x1600xf32) <- (1x-1x1600xf32, 1x-1x1xf32) + multiply_1 = paddle._C_ops.multiply(sum_1, data_4) + del sum_1 + + # pd_op.topk: (1x-1x9xf32, 1x-1x9xi64) <- (1x-1x400xf32, 1xi32) + topk_4, topk_5 = (lambda x, f: f(x))( + paddle._C_ops.topk(data_2, full_0, -1, False, True), + lambda out: out if isinstance(out, (list, tuple)) else (out, None), + ) + del data_2 + + # pd_op.scale: (1x-1x9xi64) <- (1x-1x9xi64, 1xf32) + scale_2 = paddle._C_ops.scale(topk_5, full_1, float("8000"), True) + + # pd_op.full: (1xi32) <- () + full_4 = paddle._C_ops.full( + [1], float("400"), paddle.int32, paddle.core.CPUPlace() + ) + + # pd_op.one_hot: (1x-1x9x400xf32) <- (1x-1x9xi64, 1xi32) + one_hot_2 = paddle._C_ops.one_hot( + topk_5 % paddle.cast(full_4, topk_5.dtype), full_4 + ) + del full_4, topk_5 + + # pd_op.sum: (1x-1x400xf32) <- (1x-1x9x400xf32, 1xi64) + sum_2 = paddle._C_ops.sum(one_hot_2, full_int_array_0, None, False) + del one_hot_2 + + # pd_op.multiply: (1x-1x400xf32) <- (1x-1x400xf32, 1x-1x1xf32) + multiply_2 = paddle._C_ops.multiply(sum_2, data_4) + del sum_2 + + # pd_op.topk: (1x-1x9xf32, 1x-1x9xi64) <- (1x-1x100xf32, 1xi32) + topk_6, topk_7 = (lambda x, f: f(x))( + paddle._C_ops.topk(data_3, full_0, -1, False, True), + lambda out: out if isinstance(out, (list, tuple)) else (out, None), + ) + del data_3, full_0 + + # pd_op.scale: (1x-1x9xi64) <- (1x-1x9xi64, 1xf32) + scale_3 = paddle._C_ops.scale(topk_7, full_1, float("8400"), True) + del full_1 + + # pd_op.full: (1xi32) <- () + full_5 = paddle._C_ops.full( + [1], float("100"), paddle.int32, paddle.core.CPUPlace() + ) + + # pd_op.one_hot: (1x-1x9x100xf32) <- (1x-1x9xi64, 1xi32) + one_hot_3 = paddle._C_ops.one_hot( + topk_7 % paddle.cast(full_5, topk_7.dtype), full_5 + ) + del full_5, topk_7 + + # pd_op.sum: (1x-1x100xf32) <- (1x-1x9x100xf32, 1xi64) + sum_3 = paddle._C_ops.sum(one_hot_3, full_int_array_0, None, False) + del full_int_array_0, one_hot_3 + + # pd_op.multiply: (1x-1x100xf32) <- (1x-1x100xf32, 1x-1x1xf32) + multiply_3 = paddle._C_ops.multiply(sum_3, data_4) + del data_4, sum_3 + + # pd_op.full: (1xi32) <- () + full_6 = paddle._C_ops.full( + [1], float("-1"), paddle.int32, paddle.core.CPUPlace() + ) + + # builtin.combine: ([1x-1x6400xf32, 1x-1x1600xf32, 1x-1x400xf32, 1x-1x100xf32]) <- (1x-1x6400xf32, 1x-1x1600xf32, 1x-1x400xf32, 1x-1x100xf32) + combine_0 = [multiply_0, multiply_1, multiply_2, multiply_3] + del multiply_0, multiply_1, multiply_2, multiply_3 + + # pd_op.concat: (1x-1x8500xf32) <- ([1x-1x6400xf32, 1x-1x1600xf32, 1x-1x400xf32, 1x-1x100xf32], 1xi32) + concat_0 = paddle._C_ops.concat(combine_0, full_6) + del combine_0 + + # builtin.combine: ([1x-1x9xi64, 1x-1x9xi64, 1x-1x9xi64, 1x-1x9xi64]) <- (1x-1x9xi64, 1x-1x9xi64, 1x-1x9xi64, 1x-1x9xi64) + combine_1 = [scale_0, scale_1, scale_2, scale_3] + del scale_0, scale_1, scale_2, scale_3 + + # pd_op.concat: (1x-1x36xi64) <- ([1x-1x9xi64, 1x-1x9xi64, 1x-1x9xi64, 1x-1x9xi64], 1xi32) + concat_1 = paddle._C_ops.concat(combine_1, full_6) + del combine_1, full_6 + + return concat_0, concat_1 diff --git a/paddle_samples/PaddleX/PP-DocLayout-M/subgraph_5/weight_meta.py b/paddle_samples/PaddleX/PP-DocLayout-M/subgraph_5/weight_meta.py new file mode 100644 index 000000000..8b1378917 --- /dev/null +++ b/paddle_samples/PaddleX/PP-DocLayout-M/subgraph_5/weight_meta.py @@ -0,0 +1 @@ + diff --git a/paddle_samples/PaddleX/PP-DocLayout-M/subgraph_6/graph_hash.txt b/paddle_samples/PaddleX/PP-DocLayout-M/subgraph_6/graph_hash.txt new file mode 100644 index 000000000..0a5df33f3 --- /dev/null +++ b/paddle_samples/PaddleX/PP-DocLayout-M/subgraph_6/graph_hash.txt @@ -0,0 +1 @@ +1aa239f70295a3da27b5ea248ef0f3277774d4ef2c07cab157b623e70787015e \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-DocLayout-M/subgraph_6/graph_net.json b/paddle_samples/PaddleX/PP-DocLayout-M/subgraph_6/graph_net.json new file mode 100644 index 000000000..e9a5b932d --- /dev/null +++ b/paddle_samples/PaddleX/PP-DocLayout-M/subgraph_6/graph_net.json @@ -0,0 +1,6 @@ +{ + "framework": "paddle", + "model_name": "PP-DocLayout-M", + "num_devices_required": 1, + "num_nodes_required": 1 +} \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-DocLayout-M/subgraph_6/input_meta.py b/paddle_samples/PaddleX/PP-DocLayout-M/subgraph_6/input_meta.py new file mode 100644 index 000000000..ae28c7c46 --- /dev/null +++ b/paddle_samples/PaddleX/PP-DocLayout-M/subgraph_6/input_meta.py @@ -0,0 +1,16 @@ +class Program_weight_tensor_data_0: + name = "data_0" + shape = [8500, 4] + dtype = "float32" + min_val = float("-128.0") + max_val = float("768.0") + mean = float("320.0") + std = float("187.941") + data = None + + +class Program_weight_tensor_data_1: + name = "data_1" + shape = [1, 2, 4] + dtype = "float32" + data = [85.7195, 7.11111, 592.507, 411.654, 129.738, 425.086, 551.964, 636.84] diff --git a/paddle_samples/PaddleX/PP-DocLayout-M/subgraph_6/model.py b/paddle_samples/PaddleX/PP-DocLayout-M/subgraph_6/model.py new file mode 100644 index 000000000..d4410710d --- /dev/null +++ b/paddle_samples/PaddleX/PP-DocLayout-M/subgraph_6/model.py @@ -0,0 +1,263 @@ +import paddle + + +class GraphModule(paddle.nn.Layer): + def __init__(self): + super().__init__() + + def forward(self, data_0, data_1): + # pd_op.full_int_array: (2xi64) <- () + full_int_array_0 = [-1, 4] + + # pd_op.reshape: (2x4xf32) <- (1x2x4xf32, 2xi64) + reshape_2 = paddle._C_ops.reshape(data_1, full_int_array_0) + del data_1, full_int_array_0 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_1 = [1] + + # pd_op.unsqueeze: (2x1x4xf32) <- (2x4xf32, 1xi64) + unsqueeze_0 = paddle._C_ops.unsqueeze(reshape_2, full_int_array_1) + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_2 = [0] + + # pd_op.unsqueeze: (1x8500x4xf32) <- (8500x4xf32, 1xi64) + unsqueeze_1 = paddle._C_ops.unsqueeze(data_0, full_int_array_2) + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_3 = [2] + + # pd_op.slice: (2x1x2xf32) <- (2x1x4xf32, 1xi64, 1xi64) + slice_0 = paddle._C_ops.slice( + unsqueeze_0, [2], full_int_array_2, full_int_array_3, [1], [] + ) + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_4 = [2147483647] + + # pd_op.slice: (2x1x2xf32) <- (2x1x4xf32, 1xi64, 1xi64) + slice_1 = paddle._C_ops.slice( + unsqueeze_0, [2], full_int_array_3, full_int_array_4, [1], [] + ) + del unsqueeze_0 + + # pd_op.slice: (1x8500x2xf32) <- (1x8500x4xf32, 1xi64, 1xi64) + slice_2 = paddle._C_ops.slice( + unsqueeze_1, [2], full_int_array_2, full_int_array_3, [1], [] + ) + + # pd_op.slice: (1x8500x2xf32) <- (1x8500x4xf32, 1xi64, 1xi64) + slice_3 = paddle._C_ops.slice( + unsqueeze_1, [2], full_int_array_3, full_int_array_4, [1], [] + ) + del full_int_array_4, unsqueeze_1 + + # pd_op.maximum: (2x8500x2xf32) <- (2x1x2xf32, 1x8500x2xf32) + maximum_0 = paddle._C_ops.maximum(slice_0, slice_2) + + # pd_op.minimum: (2x8500x2xf32) <- (2x1x2xf32, 1x8500x2xf32) + minimum_0 = paddle._C_ops.minimum(slice_1, slice_3) + + # pd_op.subtract: (2x8500x2xf32) <- (2x8500x2xf32, 2x8500x2xf32) + subtract_0 = paddle._C_ops.subtract(minimum_0, maximum_0) + del maximum_0, minimum_0 + + # pd_op.full: (1xf32) <- () + full_0 = paddle._C_ops.full( + [1], float("0"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.full: (1xf32) <- () + full_1 = paddle._C_ops.full( + [1], float("3.40282e+38"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.clip: (2x8500x2xf32) <- (2x8500x2xf32, 1xf32, 1xf32) + clip_0 = paddle._C_ops.clip(subtract_0, full_0, full_1) + del subtract_0 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_5 = [-1] + + # pd_op.prod: (2x8500xf32) <- (2x8500x2xf32, 1xi64) + prod_0 = paddle._C_ops.prod(clip_0, full_int_array_5, False, False) + del clip_0 + + # pd_op.subtract: (2x1x2xf32) <- (2x1x2xf32, 2x1x2xf32) + subtract_1 = paddle._C_ops.subtract(slice_1, slice_0) + del slice_0, slice_1 + + # pd_op.clip: (2x1x2xf32) <- (2x1x2xf32, 1xf32, 1xf32) + clip_1 = paddle._C_ops.clip(subtract_1, full_0, full_1) + del subtract_1 + + # pd_op.prod: (2x1xf32) <- (2x1x2xf32, 1xi64) + prod_1 = paddle._C_ops.prod(clip_1, full_int_array_5, False, False) + del clip_1 + + # pd_op.subtract: (1x8500x2xf32) <- (1x8500x2xf32, 1x8500x2xf32) + subtract_2 = paddle._C_ops.subtract(slice_3, slice_2) + del slice_2, slice_3 + + # pd_op.clip: (1x8500x2xf32) <- (1x8500x2xf32, 1xf32, 1xf32) + clip_2 = paddle._C_ops.clip(subtract_2, full_0, full_1) + del full_0, full_1, subtract_2 + + # pd_op.prod: (1x8500xf32) <- (1x8500x2xf32, 1xi64) + prod_2 = paddle._C_ops.prod(clip_2, full_int_array_5, False, False) + del clip_2, full_int_array_5 + + # pd_op.add: (2x8500xf32) <- (2x1xf32, 1x8500xf32) + add_0 = paddle._C_ops.add(prod_1, prod_2) + del prod_1, prod_2 + + # pd_op.subtract: (2x8500xf32) <- (2x8500xf32, 2x8500xf32) + subtract_3 = paddle._C_ops.subtract(add_0, prod_0) + del add_0 + + # pd_op.full: (1xf32) <- () + full_2 = paddle._C_ops.full( + [1], float("1"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.scale: (2x8500xf32) <- (2x8500xf32, 1xf32) + scale_0 = paddle._C_ops.scale(subtract_3, full_2, float("1e-10"), True) + del full_2, subtract_3 + + # pd_op.divide: (2x8500xf32) <- (2x8500xf32, 2x8500xf32) + divide_0 = paddle._C_ops.divide(prod_0, scale_0) + del prod_0, scale_0 + + # pd_op.full_int_array: (3xi64) <- () + full_int_array_6 = [1, -1, 8500] + + # pd_op.reshape: (1x2x8500xf32) <- (2x8500xf32, 3xi64) + reshape_1 = paddle._C_ops.reshape(divide_0, full_int_array_6) + del divide_0 + + # pd_op.slice: (2xf32) <- (2x4xf32, 1xi64, 1xi64) + slice_4 = paddle._C_ops.slice( + reshape_2, [1], full_int_array_2, full_int_array_1, [1], [1] + ) + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_7 = [3] + + # pd_op.slice: (2xf32) <- (2x4xf32, 1xi64, 1xi64) + slice_5 = paddle._C_ops.slice( + reshape_2, [1], full_int_array_3, full_int_array_7, [1], [1] + ) + + # pd_op.add: (2xf32) <- (2xf32, 2xf32) + add_1 = paddle._C_ops.add(slice_4, slice_5) + del slice_4, slice_5 + + # pd_op.full: (1xf32) <- () + full_3 = paddle._C_ops.full( + [1], float("0.5"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.scale: (2xf32) <- (2xf32, 1xf32) + scale_1 = paddle._C_ops.scale(add_1, full_3, float("0"), True) + del add_1 + + # pd_op.slice: (2xf32) <- (2x4xf32, 1xi64, 1xi64) + slice_6 = paddle._C_ops.slice( + reshape_2, [1], full_int_array_1, full_int_array_3, [1], [1] + ) + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_8 = [4] + + # pd_op.slice: (2xf32) <- (2x4xf32, 1xi64, 1xi64) + slice_7 = paddle._C_ops.slice( + reshape_2, [1], full_int_array_7, full_int_array_8, [1], [1] + ) + del reshape_2 + + # pd_op.add: (2xf32) <- (2xf32, 2xf32) + add_2 = paddle._C_ops.add(slice_6, slice_7) + del slice_6, slice_7 + + # pd_op.scale: (2xf32) <- (2xf32, 1xf32) + scale_2 = paddle._C_ops.scale(add_2, full_3, float("0"), True) + del add_2 + + # builtin.combine: ([2xf32, 2xf32]) <- (2xf32, 2xf32) + combine_0 = [scale_1, scale_2] + del scale_1, scale_2 + + # pd_op.stack: (2x2xf32) <- ([2xf32, 2xf32]) + stack_0 = paddle._C_ops.stack(combine_0, -1) + del combine_0 + + # pd_op.unsqueeze: (2x1x2xf32) <- (2x2xf32, 1xi64) + unsqueeze_2 = paddle._C_ops.unsqueeze(stack_0, full_int_array_1) + del stack_0 + + # pd_op.slice: (8500xf32) <- (8500x4xf32, 1xi64, 1xi64) + slice_8 = paddle._C_ops.slice( + data_0, [1], full_int_array_2, full_int_array_1, [1], [1] + ) + + # pd_op.slice: (8500xf32) <- (8500x4xf32, 1xi64, 1xi64) + slice_9 = paddle._C_ops.slice( + data_0, [1], full_int_array_3, full_int_array_7, [1], [1] + ) + + # pd_op.add: (8500xf32) <- (8500xf32, 8500xf32) + add_3 = paddle._C_ops.add(slice_8, slice_9) + del slice_8, slice_9 + + # pd_op.scale: (8500xf32) <- (8500xf32, 1xf32) + scale_3 = paddle._C_ops.scale(add_3, full_3, float("0"), True) + del add_3 + + # pd_op.slice: (8500xf32) <- (8500x4xf32, 1xi64, 1xi64) + slice_10 = paddle._C_ops.slice( + data_0, [1], full_int_array_1, full_int_array_3, [1], [1] + ) + del full_int_array_1, full_int_array_3 + + # pd_op.slice: (8500xf32) <- (8500x4xf32, 1xi64, 1xi64) + slice_11 = paddle._C_ops.slice( + data_0, [1], full_int_array_7, full_int_array_8, [1], [1] + ) + del data_0, full_int_array_7, full_int_array_8 + + # pd_op.add: (8500xf32) <- (8500xf32, 8500xf32) + add_4 = paddle._C_ops.add(slice_10, slice_11) + del slice_10, slice_11 + + # pd_op.scale: (8500xf32) <- (8500xf32, 1xf32) + scale_4 = paddle._C_ops.scale(add_4, full_3, float("0"), True) + del add_4, full_3 + + # builtin.combine: ([8500xf32, 8500xf32]) <- (8500xf32, 8500xf32) + combine_1 = [scale_3, scale_4] + del scale_3, scale_4 + + # pd_op.stack: (8500x2xf32) <- ([8500xf32, 8500xf32]) + stack_1 = paddle._C_ops.stack(combine_1, -1) + del combine_1 + + # pd_op.unsqueeze: (1x8500x2xf32) <- (8500x2xf32, 1xi64) + unsqueeze_3 = paddle._C_ops.unsqueeze(stack_1, full_int_array_2) + del full_int_array_2 + + # pd_op.subtract: (2x8500x2xf32) <- (2x1x2xf32, 1x8500x2xf32) + subtract_4 = paddle._C_ops.subtract(unsqueeze_2, unsqueeze_3) + del unsqueeze_2, unsqueeze_3 + + # pd_op.p_norm: (2x8500xf32) <- (2x8500x2xf32) + p_norm_0 = paddle._C_ops.p_norm( + subtract_4, float("2"), -1, float("1e-12"), False, False + ) + del subtract_4 + + # pd_op.reshape: (1x2x8500xf32) <- (2x8500xf32, 3xi64) + reshape_0 = paddle._C_ops.reshape(p_norm_0, full_int_array_6) + del full_int_array_6, p_norm_0, stack_1 + + return reshape_0, reshape_1 diff --git a/paddle_samples/PaddleX/PP-DocLayout-M/subgraph_6/weight_meta.py b/paddle_samples/PaddleX/PP-DocLayout-M/subgraph_6/weight_meta.py new file mode 100644 index 000000000..8b1378917 --- /dev/null +++ b/paddle_samples/PaddleX/PP-DocLayout-M/subgraph_6/weight_meta.py @@ -0,0 +1 @@ + diff --git a/paddle_samples/PaddleX/PP-DocLayout-M/subgraph_7/graph_hash.txt b/paddle_samples/PaddleX/PP-DocLayout-M/subgraph_7/graph_hash.txt new file mode 100644 index 000000000..cdc4ada80 --- /dev/null +++ b/paddle_samples/PaddleX/PP-DocLayout-M/subgraph_7/graph_hash.txt @@ -0,0 +1 @@ +797441c553dd07c3b490eba8366136b5dbc4ba06ae45da81e2789b745b250db3 \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-DocLayout-M/subgraph_7/graph_net.json b/paddle_samples/PaddleX/PP-DocLayout-M/subgraph_7/graph_net.json new file mode 100644 index 000000000..e9a5b932d --- /dev/null +++ b/paddle_samples/PaddleX/PP-DocLayout-M/subgraph_7/graph_net.json @@ -0,0 +1,6 @@ +{ + "framework": "paddle", + "model_name": "PP-DocLayout-M", + "num_devices_required": 1, + "num_nodes_required": 1 +} \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-DocLayout-M/subgraph_7/input_meta.py b/paddle_samples/PaddleX/PP-DocLayout-M/subgraph_7/input_meta.py new file mode 100644 index 000000000..f33245de1 --- /dev/null +++ b/paddle_samples/PaddleX/PP-DocLayout-M/subgraph_7/input_meta.py @@ -0,0 +1,65 @@ +class Program_weight_tensor_data_0: + name = "data_0" + shape = [] + dtype = "int64" + data = [88] + + +class Program_weight_tensor_data_1: + name = "data_1" + shape = [] + dtype = "int64" + data = [88] + + +class Program_weight_tensor_data_2: + name = "data_2" + shape = [] + dtype = "int64" + data = [44] + + +class Program_weight_tensor_data_3: + name = "data_3" + shape = [] + dtype = "int64" + data = [44] + + +class Program_weight_tensor_data_4: + name = "data_4" + shape = [] + dtype = "int64" + data = [22] + + +class Program_weight_tensor_data_5: + name = "data_5" + shape = [] + dtype = "int64" + data = [22] + + +class Program_weight_tensor_data_6: + name = "data_6" + shape = [] + dtype = "int64" + data = [11] + + +class Program_weight_tensor_data_7: + name = "data_7" + shape = [] + dtype = "int64" + data = [11] + + +class Program_weight_tensor_data_8: + name = "data_8" + shape = [1, 10285, 4] + dtype = "float32" + min_val = float("-5.77952") + max_val = float("90.7795") + mean = float("37.837") + std = float("25.4896") + data = None diff --git a/paddle_samples/PaddleX/PP-DocLayout-M/subgraph_7/model.py b/paddle_samples/PaddleX/PP-DocLayout-M/subgraph_7/model.py new file mode 100644 index 000000000..37df6f170 --- /dev/null +++ b/paddle_samples/PaddleX/PP-DocLayout-M/subgraph_7/model.py @@ -0,0 +1,593 @@ +import paddle + + +class GraphModule(paddle.nn.Layer): + def __init__(self): + super().__init__() + + def forward( + self, data_0, data_1, data_2, data_3, data_4, data_5, data_6, data_7, data_8 + ): + # pd_op.full: (1xi64) <- () + full_0 = paddle._C_ops.full( + [1], float("0"), paddle.int64, paddle.core.CPUPlace() + ) + + # pd_op.full: (1xi64) <- () + full_1 = paddle._C_ops.full( + [1], float("1"), paddle.int64, paddle.core.CPUPlace() + ) + + # pd_op.arange: (-1xi64) <- (1xi64, xi64, 1xi64) + arange_0 = paddle.arange(full_0, data_1, full_1, dtype="int64") + del data_1 + + # pd_op.cast: (-1xf32) <- (-1xi64) + cast_0 = paddle._C_ops.cast(arange_0, paddle.float32) + del arange_0 + + # pd_op.full: (1xf32) <- () + full_2 = paddle._C_ops.full( + [1], float("1"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.scale: (-1xf32) <- (-1xf32, 1xf32) + scale_0 = paddle._C_ops.scale(cast_0, full_2, float("0.5"), True) + del cast_0 + + # pd_op.full: (1xf32) <- () + full_3 = paddle._C_ops.full( + [1], float("8"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.scale: (-1xf32) <- (-1xf32, 1xf32) + scale_1 = paddle._C_ops.scale(scale_0, full_3, float("0"), True) + del scale_0 + + # pd_op.arange: (-1xi64) <- (1xi64, xi64, 1xi64) + arange_1 = paddle.arange(full_0, data_0, full_1, dtype="int64") + del data_0 + + # pd_op.cast: (-1xf32) <- (-1xi64) + cast_1 = paddle._C_ops.cast(arange_1, paddle.float32) + del arange_1 + + # pd_op.scale: (-1xf32) <- (-1xf32, 1xf32) + scale_2 = paddle._C_ops.scale(cast_1, full_2, float("0.5"), True) + del cast_1 + + # pd_op.scale: (-1xf32) <- (-1xf32, 1xf32) + scale_3 = paddle._C_ops.scale(scale_2, full_3, float("0"), True) + del scale_2 + + # builtin.combine: ([-1xf32, -1xf32]) <- (-1xf32, -1xf32) + combine_0 = [scale_3, scale_1] + del scale_1, scale_3 + + # pd_op.meshgrid: ([-1x-1xf32, -1x-1xf32]) <- ([-1xf32, -1xf32]) + meshgrid_0 = paddle._C_ops.meshgrid(combine_0) + del combine_0 + + # builtin.split: (-1x-1xf32, -1x-1xf32) <- ([-1x-1xf32, -1x-1xf32]) + ( + split_0, + split_1, + ) = meshgrid_0 + del meshgrid_0 + + # pd_op.scale: (-1x-1xf32) <- (-1x-1xf32, 1xf32) + scale_4 = paddle._C_ops.scale(split_1, full_2, float("-20"), True) + + # pd_op.scale: (-1x-1xf32) <- (-1x-1xf32, 1xf32) + scale_5 = paddle._C_ops.scale(split_0, full_2, float("-20"), True) + + # pd_op.scale: (-1x-1xf32) <- (-1x-1xf32, 1xf32) + scale_6 = paddle._C_ops.scale(split_1, full_2, float("20"), True) + + # pd_op.scale: (-1x-1xf32) <- (-1x-1xf32, 1xf32) + scale_7 = paddle._C_ops.scale(split_0, full_2, float("20"), True) + + # builtin.combine: ([-1x-1xf32, -1x-1xf32, -1x-1xf32, -1x-1xf32]) <- (-1x-1xf32, -1x-1xf32, -1x-1xf32, -1x-1xf32) + combine_1 = [scale_4, scale_5, scale_6, scale_7] + del scale_4, scale_5, scale_6, scale_7 + + # pd_op.stack: (-1x-1x4xf32) <- ([-1x-1xf32, -1x-1xf32, -1x-1xf32, -1x-1xf32]) + stack_1 = paddle._C_ops.stack(combine_1, -1) + del combine_1 + + # builtin.combine: ([-1x-1xf32, -1x-1xf32]) <- (-1x-1xf32, -1x-1xf32) + combine_2 = [split_1, split_0] + del split_0, split_1 + + # pd_op.stack: (-1x-1x2xf32) <- ([-1x-1xf32, -1x-1xf32]) + stack_2 = paddle._C_ops.stack(combine_2, -1) + del combine_2 + + # pd_op.full_int_array: (2xi64) <- () + full_int_array_0 = [-1, 4] + + # pd_op.reshape: (-1x4xf32) <- (-1x-1x4xf32, 2xi64) + reshape_0 = paddle._C_ops.reshape(stack_1, full_int_array_0) + del stack_1 + + # pd_op.full_int_array: (2xi64) <- () + full_int_array_1 = [-1, 2] + + # pd_op.reshape: (-1x2xf32) <- (-1x-1x2xf32, 2xi64) + reshape_1 = paddle._C_ops.reshape(stack_2, full_int_array_1) + del stack_2 + + # pd_op.shape64: (2xi64) <- (-1x4xf32) + shape64_0 = paddle._C_ops.shape64(reshape_0) + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_2 = [0] + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_3 = [1] + + # pd_op.slice: (xi64) <- (2xi64, 1xi64, 1xi64) + slice_0 = paddle._C_ops.slice( + shape64_0, [0], full_int_array_2, full_int_array_3, [1], [0] + ) + del shape64_0 + + # pd_op.full: (xi64) <- () + full_4 = paddle._C_ops.full( + [], float("1"), paddle.int64, paddle.core.CPUPlace() + ) + + # builtin.combine: ([xi64, xi64]) <- (xi64, xi64) + combine_3 = [slice_0, full_4] + + # pd_op.stack: (2xi64) <- ([xi64, xi64]) + stack_3 = paddle._C_ops.stack(combine_3, 0) + del combine_3 + + # pd_op.full_with_tensor: (-1x1xf32) <- (1xf32, 2xi64) + full_with_tensor_0 = paddle._C_ops.full_with_tensor( + full_3, stack_3, paddle.float32 + ) + del full_3, stack_3 + + # pd_op.arange: (-1xi64) <- (1xi64, xi64, 1xi64) + arange_2 = paddle.arange(full_0, data_3, full_1, dtype="int64") + del data_3 + + # pd_op.cast: (-1xf32) <- (-1xi64) + cast_2 = paddle._C_ops.cast(arange_2, paddle.float32) + del arange_2 + + # pd_op.scale: (-1xf32) <- (-1xf32, 1xf32) + scale_8 = paddle._C_ops.scale(cast_2, full_2, float("0.5"), True) + del cast_2 + + # pd_op.full: (1xf32) <- () + full_5 = paddle._C_ops.full( + [1], float("16"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.scale: (-1xf32) <- (-1xf32, 1xf32) + scale_9 = paddle._C_ops.scale(scale_8, full_5, float("0"), True) + del scale_8 + + # pd_op.arange: (-1xi64) <- (1xi64, xi64, 1xi64) + arange_3 = paddle.arange(full_0, data_2, full_1, dtype="int64") + del data_2 + + # pd_op.cast: (-1xf32) <- (-1xi64) + cast_3 = paddle._C_ops.cast(arange_3, paddle.float32) + del arange_3 + + # pd_op.scale: (-1xf32) <- (-1xf32, 1xf32) + scale_10 = paddle._C_ops.scale(cast_3, full_2, float("0.5"), True) + del cast_3 + + # pd_op.scale: (-1xf32) <- (-1xf32, 1xf32) + scale_11 = paddle._C_ops.scale(scale_10, full_5, float("0"), True) + del scale_10 + + # builtin.combine: ([-1xf32, -1xf32]) <- (-1xf32, -1xf32) + combine_4 = [scale_11, scale_9] + del scale_11, scale_9 + + # pd_op.meshgrid: ([-1x-1xf32, -1x-1xf32]) <- ([-1xf32, -1xf32]) + meshgrid_1 = paddle._C_ops.meshgrid(combine_4) + del combine_4 + + # builtin.split: (-1x-1xf32, -1x-1xf32) <- ([-1x-1xf32, -1x-1xf32]) + ( + split_2, + split_3, + ) = meshgrid_1 + del meshgrid_1 + + # pd_op.scale: (-1x-1xf32) <- (-1x-1xf32, 1xf32) + scale_12 = paddle._C_ops.scale(split_3, full_2, float("-40"), True) + + # pd_op.scale: (-1x-1xf32) <- (-1x-1xf32, 1xf32) + scale_13 = paddle._C_ops.scale(split_2, full_2, float("-40"), True) + + # pd_op.scale: (-1x-1xf32) <- (-1x-1xf32, 1xf32) + scale_14 = paddle._C_ops.scale(split_3, full_2, float("40"), True) + + # pd_op.scale: (-1x-1xf32) <- (-1x-1xf32, 1xf32) + scale_15 = paddle._C_ops.scale(split_2, full_2, float("40"), True) + + # builtin.combine: ([-1x-1xf32, -1x-1xf32, -1x-1xf32, -1x-1xf32]) <- (-1x-1xf32, -1x-1xf32, -1x-1xf32, -1x-1xf32) + combine_5 = [scale_12, scale_13, scale_14, scale_15] + del scale_12, scale_13, scale_14, scale_15 + + # pd_op.stack: (-1x-1x4xf32) <- ([-1x-1xf32, -1x-1xf32, -1x-1xf32, -1x-1xf32]) + stack_4 = paddle._C_ops.stack(combine_5, -1) + del combine_5 + + # builtin.combine: ([-1x-1xf32, -1x-1xf32]) <- (-1x-1xf32, -1x-1xf32) + combine_6 = [split_3, split_2] + del split_2, split_3 + + # pd_op.stack: (-1x-1x2xf32) <- ([-1x-1xf32, -1x-1xf32]) + stack_5 = paddle._C_ops.stack(combine_6, -1) + del combine_6 + + # pd_op.reshape: (-1x4xf32) <- (-1x-1x4xf32, 2xi64) + reshape_2 = paddle._C_ops.reshape(stack_4, full_int_array_0) + del stack_4 + + # pd_op.reshape: (-1x2xf32) <- (-1x-1x2xf32, 2xi64) + reshape_3 = paddle._C_ops.reshape(stack_5, full_int_array_1) + del stack_5 + + # pd_op.shape64: (2xi64) <- (-1x4xf32) + shape64_1 = paddle._C_ops.shape64(reshape_2) + + # pd_op.slice: (xi64) <- (2xi64, 1xi64, 1xi64) + slice_1 = paddle._C_ops.slice( + shape64_1, [0], full_int_array_2, full_int_array_3, [1], [0] + ) + del shape64_1 + + # builtin.combine: ([xi64, xi64]) <- (xi64, xi64) + combine_7 = [slice_1, full_4] + + # pd_op.stack: (2xi64) <- ([xi64, xi64]) + stack_6 = paddle._C_ops.stack(combine_7, 0) + del combine_7 + + # pd_op.full_with_tensor: (-1x1xf32) <- (1xf32, 2xi64) + full_with_tensor_1 = paddle._C_ops.full_with_tensor( + full_5, stack_6, paddle.float32 + ) + del full_5, stack_6 + + # pd_op.arange: (-1xi64) <- (1xi64, xi64, 1xi64) + arange_4 = paddle.arange(full_0, data_5, full_1, dtype="int64") + del data_5 + + # pd_op.cast: (-1xf32) <- (-1xi64) + cast_4 = paddle._C_ops.cast(arange_4, paddle.float32) + del arange_4 + + # pd_op.scale: (-1xf32) <- (-1xf32, 1xf32) + scale_16 = paddle._C_ops.scale(cast_4, full_2, float("0.5"), True) + del cast_4 + + # pd_op.full: (1xf32) <- () + full_6 = paddle._C_ops.full( + [1], float("32"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.scale: (-1xf32) <- (-1xf32, 1xf32) + scale_17 = paddle._C_ops.scale(scale_16, full_6, float("0"), True) + del scale_16 + + # pd_op.arange: (-1xi64) <- (1xi64, xi64, 1xi64) + arange_5 = paddle.arange(full_0, data_4, full_1, dtype="int64") + del data_4 + + # pd_op.cast: (-1xf32) <- (-1xi64) + cast_5 = paddle._C_ops.cast(arange_5, paddle.float32) + del arange_5 + + # pd_op.scale: (-1xf32) <- (-1xf32, 1xf32) + scale_18 = paddle._C_ops.scale(cast_5, full_2, float("0.5"), True) + del cast_5 + + # pd_op.scale: (-1xf32) <- (-1xf32, 1xf32) + scale_19 = paddle._C_ops.scale(scale_18, full_6, float("0"), True) + del scale_18 + + # builtin.combine: ([-1xf32, -1xf32]) <- (-1xf32, -1xf32) + combine_8 = [scale_19, scale_17] + del scale_17, scale_19 + + # pd_op.meshgrid: ([-1x-1xf32, -1x-1xf32]) <- ([-1xf32, -1xf32]) + meshgrid_2 = paddle._C_ops.meshgrid(combine_8) + del combine_8 + + # builtin.split: (-1x-1xf32, -1x-1xf32) <- ([-1x-1xf32, -1x-1xf32]) + ( + split_4, + split_5, + ) = meshgrid_2 + del meshgrid_2 + + # pd_op.scale: (-1x-1xf32) <- (-1x-1xf32, 1xf32) + scale_20 = paddle._C_ops.scale(split_5, full_2, float("-80"), True) + + # pd_op.scale: (-1x-1xf32) <- (-1x-1xf32, 1xf32) + scale_21 = paddle._C_ops.scale(split_4, full_2, float("-80"), True) + + # pd_op.scale: (-1x-1xf32) <- (-1x-1xf32, 1xf32) + scale_22 = paddle._C_ops.scale(split_5, full_2, float("80"), True) + + # pd_op.scale: (-1x-1xf32) <- (-1x-1xf32, 1xf32) + scale_23 = paddle._C_ops.scale(split_4, full_2, float("80"), True) + + # builtin.combine: ([-1x-1xf32, -1x-1xf32, -1x-1xf32, -1x-1xf32]) <- (-1x-1xf32, -1x-1xf32, -1x-1xf32, -1x-1xf32) + combine_9 = [scale_20, scale_21, scale_22, scale_23] + del scale_20, scale_21, scale_22, scale_23 + + # pd_op.stack: (-1x-1x4xf32) <- ([-1x-1xf32, -1x-1xf32, -1x-1xf32, -1x-1xf32]) + stack_7 = paddle._C_ops.stack(combine_9, -1) + del combine_9 + + # builtin.combine: ([-1x-1xf32, -1x-1xf32]) <- (-1x-1xf32, -1x-1xf32) + combine_10 = [split_5, split_4] + del split_4, split_5 + + # pd_op.stack: (-1x-1x2xf32) <- ([-1x-1xf32, -1x-1xf32]) + stack_8 = paddle._C_ops.stack(combine_10, -1) + del combine_10 + + # pd_op.reshape: (-1x4xf32) <- (-1x-1x4xf32, 2xi64) + reshape_4 = paddle._C_ops.reshape(stack_7, full_int_array_0) + del stack_7 + + # pd_op.reshape: (-1x2xf32) <- (-1x-1x2xf32, 2xi64) + reshape_5 = paddle._C_ops.reshape(stack_8, full_int_array_1) + del stack_8 + + # pd_op.shape64: (2xi64) <- (-1x4xf32) + shape64_2 = paddle._C_ops.shape64(reshape_4) + + # pd_op.slice: (xi64) <- (2xi64, 1xi64, 1xi64) + slice_2 = paddle._C_ops.slice( + shape64_2, [0], full_int_array_2, full_int_array_3, [1], [0] + ) + del shape64_2 + + # builtin.combine: ([xi64, xi64]) <- (xi64, xi64) + combine_11 = [slice_2, full_4] + + # pd_op.stack: (2xi64) <- ([xi64, xi64]) + stack_9 = paddle._C_ops.stack(combine_11, 0) + del combine_11 + + # pd_op.full_with_tensor: (-1x1xf32) <- (1xf32, 2xi64) + full_with_tensor_2 = paddle._C_ops.full_with_tensor( + full_6, stack_9, paddle.float32 + ) + del full_6, stack_9 + + # pd_op.arange: (-1xi64) <- (1xi64, xi64, 1xi64) + arange_6 = paddle.arange(full_0, data_7, full_1, dtype="int64") + del data_7 + + # pd_op.cast: (-1xf32) <- (-1xi64) + cast_6 = paddle._C_ops.cast(arange_6, paddle.float32) + del arange_6 + + # pd_op.scale: (-1xf32) <- (-1xf32, 1xf32) + scale_24 = paddle._C_ops.scale(cast_6, full_2, float("0.5"), True) + del cast_6 + + # pd_op.full: (1xf32) <- () + full_7 = paddle._C_ops.full( + [1], float("64"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.scale: (-1xf32) <- (-1xf32, 1xf32) + scale_25 = paddle._C_ops.scale(scale_24, full_7, float("0"), True) + del scale_24 + + # pd_op.arange: (-1xi64) <- (1xi64, xi64, 1xi64) + arange_7 = paddle.arange(full_0, data_6, full_1, dtype="int64") + del data_6, full_0, full_1 + + # pd_op.cast: (-1xf32) <- (-1xi64) + cast_7 = paddle._C_ops.cast(arange_7, paddle.float32) + del arange_7 + + # pd_op.scale: (-1xf32) <- (-1xf32, 1xf32) + scale_26 = paddle._C_ops.scale(cast_7, full_2, float("0.5"), True) + del cast_7 + + # pd_op.scale: (-1xf32) <- (-1xf32, 1xf32) + scale_27 = paddle._C_ops.scale(scale_26, full_7, float("0"), True) + del scale_26 + + # builtin.combine: ([-1xf32, -1xf32]) <- (-1xf32, -1xf32) + combine_12 = [scale_27, scale_25] + del scale_25, scale_27 + + # pd_op.meshgrid: ([-1x-1xf32, -1x-1xf32]) <- ([-1xf32, -1xf32]) + meshgrid_3 = paddle._C_ops.meshgrid(combine_12) + del combine_12 + + # builtin.split: (-1x-1xf32, -1x-1xf32) <- ([-1x-1xf32, -1x-1xf32]) + ( + split_6, + split_7, + ) = meshgrid_3 + del meshgrid_3 + + # pd_op.scale: (-1x-1xf32) <- (-1x-1xf32, 1xf32) + scale_28 = paddle._C_ops.scale(split_7, full_2, float("-160"), True) + + # pd_op.scale: (-1x-1xf32) <- (-1x-1xf32, 1xf32) + scale_29 = paddle._C_ops.scale(split_6, full_2, float("-160"), True) + + # pd_op.scale: (-1x-1xf32) <- (-1x-1xf32, 1xf32) + scale_30 = paddle._C_ops.scale(split_7, full_2, float("160"), True) + + # pd_op.scale: (-1x-1xf32) <- (-1x-1xf32, 1xf32) + scale_31 = paddle._C_ops.scale(split_6, full_2, float("160"), True) + del full_2 + + # builtin.combine: ([-1x-1xf32, -1x-1xf32, -1x-1xf32, -1x-1xf32]) <- (-1x-1xf32, -1x-1xf32, -1x-1xf32, -1x-1xf32) + combine_13 = [scale_28, scale_29, scale_30, scale_31] + del scale_28, scale_29, scale_30, scale_31 + + # pd_op.stack: (-1x-1x4xf32) <- ([-1x-1xf32, -1x-1xf32, -1x-1xf32, -1x-1xf32]) + stack_10 = paddle._C_ops.stack(combine_13, -1) + del combine_13 + + # builtin.combine: ([-1x-1xf32, -1x-1xf32]) <- (-1x-1xf32, -1x-1xf32) + combine_14 = [split_7, split_6] + del split_6, split_7 + + # pd_op.stack: (-1x-1x2xf32) <- ([-1x-1xf32, -1x-1xf32]) + stack_11 = paddle._C_ops.stack(combine_14, -1) + del combine_14 + + # pd_op.reshape: (-1x4xf32) <- (-1x-1x4xf32, 2xi64) + reshape_6 = paddle._C_ops.reshape(stack_10, full_int_array_0) + del full_int_array_0, stack_10 + + # pd_op.reshape: (-1x2xf32) <- (-1x-1x2xf32, 2xi64) + reshape_7 = paddle._C_ops.reshape(stack_11, full_int_array_1) + del full_int_array_1, stack_11 + + # pd_op.shape64: (2xi64) <- (-1x4xf32) + shape64_3 = paddle._C_ops.shape64(reshape_6) + + # pd_op.slice: (xi64) <- (2xi64, 1xi64, 1xi64) + slice_3 = paddle._C_ops.slice( + shape64_3, [0], full_int_array_2, full_int_array_3, [1], [0] + ) + del shape64_3 + + # builtin.combine: ([xi64, xi64]) <- (xi64, xi64) + combine_15 = [slice_3, full_4] + del full_4 + + # pd_op.stack: (2xi64) <- ([xi64, xi64]) + stack_12 = paddle._C_ops.stack(combine_15, 0) + del combine_15 + + # pd_op.full_with_tensor: (-1x1xf32) <- (1xf32, 2xi64) + full_with_tensor_3 = paddle._C_ops.full_with_tensor( + full_7, stack_12, paddle.float32 + ) + del full_7, stack_12 + + # pd_op.full: (1xi32) <- () + full_8 = paddle._C_ops.full( + [1], float("0"), paddle.int32, paddle.core.CPUPlace() + ) + + # builtin.combine: ([-1x4xf32, -1x4xf32, -1x4xf32, -1x4xf32]) <- (-1x4xf32, -1x4xf32, -1x4xf32, -1x4xf32) + combine_16 = [reshape_0, reshape_2, reshape_4, reshape_6] + del reshape_0, reshape_2, reshape_4, reshape_6 + + # pd_op.concat: (-1x4xf32) <- ([-1x4xf32, -1x4xf32, -1x4xf32, -1x4xf32], 1xi32) + concat_0 = paddle._C_ops.concat(combine_16, full_8) + del combine_16 + + # builtin.combine: ([-1x2xf32, -1x2xf32, -1x2xf32, -1x2xf32]) <- (-1x2xf32, -1x2xf32, -1x2xf32, -1x2xf32) + combine_17 = [reshape_1, reshape_3, reshape_5, reshape_7] + del reshape_1, reshape_3, reshape_5, reshape_7 + + # pd_op.concat: (-1x2xf32) <- ([-1x2xf32, -1x2xf32, -1x2xf32, -1x2xf32], 1xi32) + concat_1 = paddle._C_ops.concat(combine_17, full_8) + del combine_17 + + # builtin.combine: ([-1x1xf32, -1x1xf32, -1x1xf32, -1x1xf32]) <- (-1x1xf32, -1x1xf32, -1x1xf32, -1x1xf32) + combine_18 = [ + full_with_tensor_0, + full_with_tensor_1, + full_with_tensor_2, + full_with_tensor_3, + ] + del ( + full_with_tensor_0, + full_with_tensor_1, + full_with_tensor_2, + full_with_tensor_3, + ) + + # pd_op.concat: (-1x1xf32) <- ([-1x1xf32, -1x1xf32, -1x1xf32, -1x1xf32], 1xi32) + concat_2 = paddle._C_ops.concat(combine_18, full_8) + del combine_18, full_8 + + # pd_op.slice: (-1xf32) <- (-1x4xf32, 1xi64, 1xi64) + slice_4 = paddle._C_ops.slice( + concat_0, [1], full_int_array_2, full_int_array_3, [1], [1] + ) + del full_int_array_2 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_4 = [2] + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_5 = [3] + + # pd_op.slice: (-1xf32) <- (-1x4xf32, 1xi64, 1xi64) + slice_5 = paddle._C_ops.slice( + concat_0, [1], full_int_array_4, full_int_array_5, [1], [1] + ) + + # pd_op.add: (-1xf32) <- (-1xf32, -1xf32) + add_0 = paddle._C_ops.add(slice_4, slice_5) + del slice_4, slice_5 + + # pd_op.full: (1xf32) <- () + full_9 = paddle._C_ops.full( + [1], float("0.5"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.scale: (-1xf32) <- (-1xf32, 1xf32) + scale_32 = paddle._C_ops.scale(add_0, full_9, float("0"), True) + del add_0 + + # pd_op.slice: (-1xf32) <- (-1x4xf32, 1xi64, 1xi64) + slice_6 = paddle._C_ops.slice( + concat_0, [1], full_int_array_3, full_int_array_4, [1], [1] + ) + del full_int_array_3, full_int_array_4 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_6 = [4] + + # pd_op.slice: (-1xf32) <- (-1x4xf32, 1xi64, 1xi64) + slice_7 = paddle._C_ops.slice( + concat_0, [1], full_int_array_5, full_int_array_6, [1], [1] + ) + del full_int_array_5, full_int_array_6 + + # pd_op.add: (-1xf32) <- (-1xf32, -1xf32) + add_1 = paddle._C_ops.add(slice_6, slice_7) + del slice_6, slice_7 + + # pd_op.scale: (-1xf32) <- (-1xf32, 1xf32) + scale_33 = paddle._C_ops.scale(add_1, full_9, float("0"), True) + del add_1, full_9 + + # builtin.combine: ([-1xf32, -1xf32]) <- (-1xf32, -1xf32) + combine_19 = [scale_32, scale_33] + del scale_32, scale_33 + + # pd_op.stack: (-1x2xf32) <- ([-1xf32, -1xf32]) + stack_0 = paddle._C_ops.stack(combine_19, -1) + del combine_19 + + # pd_op.share_data_: (1x-1x4xf32) <- (1x-1x4xf32) + share_data__0 = data_8.detach() + del data_8 + + # pd_op.multiply: (1x-1x4xf32) <- (1x-1x4xf32, -1x1xf32) + multiply_0 = paddle._C_ops.multiply(share_data__0, concat_2) + del concat_0, concat_2, share_data__0, slice_0, slice_1, slice_2, slice_3 + + return multiply_0, stack_0 diff --git a/paddle_samples/PaddleX/PP-DocLayout-M/subgraph_7/weight_meta.py b/paddle_samples/PaddleX/PP-DocLayout-M/subgraph_7/weight_meta.py new file mode 100644 index 000000000..8b1378917 --- /dev/null +++ b/paddle_samples/PaddleX/PP-DocLayout-M/subgraph_7/weight_meta.py @@ -0,0 +1 @@ + diff --git a/paddle_samples/PaddleX/PP-DocLayout-M/subgraph_8/graph_hash.txt b/paddle_samples/PaddleX/PP-DocLayout-M/subgraph_8/graph_hash.txt new file mode 100644 index 000000000..0c293ad87 --- /dev/null +++ b/paddle_samples/PaddleX/PP-DocLayout-M/subgraph_8/graph_hash.txt @@ -0,0 +1 @@ +54f6cde8ce78b1624a8ec310f07a47625f0d46e704ba48d40aa0581e4c58691e \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-DocLayout-M/subgraph_8/graph_net.json b/paddle_samples/PaddleX/PP-DocLayout-M/subgraph_8/graph_net.json new file mode 100644 index 000000000..e9a5b932d --- /dev/null +++ b/paddle_samples/PaddleX/PP-DocLayout-M/subgraph_8/graph_net.json @@ -0,0 +1,6 @@ +{ + "framework": "paddle", + "model_name": "PP-DocLayout-M", + "num_devices_required": 1, + "num_nodes_required": 1 +} \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-DocLayout-M/subgraph_8/input_meta.py b/paddle_samples/PaddleX/PP-DocLayout-M/subgraph_8/input_meta.py new file mode 100644 index 000000000..5c69dd630 --- /dev/null +++ b/paddle_samples/PaddleX/PP-DocLayout-M/subgraph_8/input_meta.py @@ -0,0 +1,23 @@ +class Program_weight_tensor_data_0: + name = "data_0" + shape = [] + dtype = "int64" + data = [1] + + +class Program_weight_tensor_data_1: + name = "data_1" + shape = [8500, 4] + dtype = "float32" + min_val = float("-128.0") + max_val = float("768.0") + mean = float("320.0") + std = float("187.941") + data = None + + +class Program_weight_tensor_data_2: + name = "data_2" + shape = [1, 1, 4] + dtype = "float32" + data = [0.0, 0.0, 565.437, 640.0] diff --git a/paddle_samples/PaddleX/PP-DocLayout-M/subgraph_8/model.py b/paddle_samples/PaddleX/PP-DocLayout-M/subgraph_8/model.py new file mode 100644 index 000000000..67c827a96 --- /dev/null +++ b/paddle_samples/PaddleX/PP-DocLayout-M/subgraph_8/model.py @@ -0,0 +1,288 @@ +import paddle + + +class GraphModule(paddle.nn.Layer): + def __init__(self): + super().__init__() + + def forward(self, data_0, data_1, data_2): + # pd_op.full: (xi64) <- () + full_0 = paddle._C_ops.full( + [], float("0"), paddle.int64, paddle.framework._current_expected_place() + ) + + # pd_op.equal: (xb) <- (xi64, xi64) + equal_0 = paddle._C_ops.equal(data_0, full_0) + del data_0 + + # pd_op.cast: (xi64) <- (xb) + cast_0 = paddle._C_ops.cast(equal_0, paddle.int64) + del equal_0 + + # pd_op.not_equal: (xb) <- (xi64, xi64) + not_equal_0 = paddle._C_ops.not_equal(cast_0, full_0) + del cast_0 + + # pd_op.cast: (xi64) <- (xb) + cast_1 = paddle._C_ops.cast(not_equal_0, paddle.int64) + del not_equal_0 + + # pd_op.equal: (xb) <- (xi64, xi64) + equal_1 = paddle._C_ops.equal(cast_1, full_0) + del cast_1, full_0 + + # pd_op.full_int_array: (2xi64) <- () + full_int_array_0 = [-1, 4] + + # pd_op.reshape: (-1x4xf32) <- (1x-1x4xf32, 2xi64) + reshape_2 = paddle._C_ops.reshape(data_2, full_int_array_0) + del data_2, full_int_array_0 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_1 = [1] + + # pd_op.unsqueeze: (-1x1x4xf32) <- (-1x4xf32, 1xi64) + unsqueeze_0 = paddle._C_ops.unsqueeze(reshape_2, full_int_array_1) + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_2 = [0] + + # pd_op.unsqueeze: (1x8500x4xf32) <- (8500x4xf32, 1xi64) + unsqueeze_1 = paddle._C_ops.unsqueeze(data_1, full_int_array_2) + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_3 = [2] + + # pd_op.slice: (-1x1x2xf32) <- (-1x1x4xf32, 1xi64, 1xi64) + slice_0 = paddle._C_ops.slice( + unsqueeze_0, [2], full_int_array_2, full_int_array_3, [1], [] + ) + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_4 = [2147483647] + + # pd_op.slice: (-1x1x2xf32) <- (-1x1x4xf32, 1xi64, 1xi64) + slice_1 = paddle._C_ops.slice( + unsqueeze_0, [2], full_int_array_3, full_int_array_4, [1], [] + ) + del unsqueeze_0 + + # pd_op.slice: (1x8500x2xf32) <- (1x8500x4xf32, 1xi64, 1xi64) + slice_2 = paddle._C_ops.slice( + unsqueeze_1, [2], full_int_array_2, full_int_array_3, [1], [] + ) + + # pd_op.slice: (1x8500x2xf32) <- (1x8500x4xf32, 1xi64, 1xi64) + slice_3 = paddle._C_ops.slice( + unsqueeze_1, [2], full_int_array_3, full_int_array_4, [1], [] + ) + del full_int_array_4, unsqueeze_1 + + # pd_op.maximum: (-1x8500x2xf32) <- (-1x1x2xf32, 1x8500x2xf32) + maximum_0 = paddle._C_ops.maximum(slice_0, slice_2) + + # pd_op.minimum: (-1x8500x2xf32) <- (-1x1x2xf32, 1x8500x2xf32) + minimum_0 = paddle._C_ops.minimum(slice_1, slice_3) + + # pd_op.subtract: (-1x8500x2xf32) <- (-1x8500x2xf32, -1x8500x2xf32) + subtract_0 = paddle._C_ops.subtract(minimum_0, maximum_0) + del maximum_0, minimum_0 + + # pd_op.full: (1xf32) <- () + full_1 = paddle._C_ops.full( + [1], float("0"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.full: (1xf32) <- () + full_2 = paddle._C_ops.full( + [1], float("3.40282e+38"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.clip: (-1x8500x2xf32) <- (-1x8500x2xf32, 1xf32, 1xf32) + clip_0 = paddle._C_ops.clip(subtract_0, full_1, full_2) + del subtract_0 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_5 = [-1] + + # pd_op.prod: (-1x8500xf32) <- (-1x8500x2xf32, 1xi64) + prod_0 = paddle._C_ops.prod(clip_0, full_int_array_5, False, False) + del clip_0 + + # pd_op.subtract: (-1x1x2xf32) <- (-1x1x2xf32, -1x1x2xf32) + subtract_1 = paddle._C_ops.subtract(slice_1, slice_0) + del slice_0, slice_1 + + # pd_op.clip: (-1x1x2xf32) <- (-1x1x2xf32, 1xf32, 1xf32) + clip_1 = paddle._C_ops.clip(subtract_1, full_1, full_2) + del subtract_1 + + # pd_op.prod: (-1x1xf32) <- (-1x1x2xf32, 1xi64) + prod_1 = paddle._C_ops.prod(clip_1, full_int_array_5, False, False) + del clip_1 + + # pd_op.subtract: (1x8500x2xf32) <- (1x8500x2xf32, 1x8500x2xf32) + subtract_2 = paddle._C_ops.subtract(slice_3, slice_2) + del slice_2, slice_3 + + # pd_op.clip: (1x8500x2xf32) <- (1x8500x2xf32, 1xf32, 1xf32) + clip_2 = paddle._C_ops.clip(subtract_2, full_1, full_2) + del full_1, full_2, subtract_2 + + # pd_op.prod: (1x8500xf32) <- (1x8500x2xf32, 1xi64) + prod_2 = paddle._C_ops.prod(clip_2, full_int_array_5, False, False) + del clip_2, full_int_array_5 + + # pd_op.add: (-1x8500xf32) <- (-1x1xf32, 1x8500xf32) + add_0 = paddle._C_ops.add(prod_1, prod_2) + del prod_1, prod_2 + + # pd_op.subtract: (-1x8500xf32) <- (-1x8500xf32, -1x8500xf32) + subtract_3 = paddle._C_ops.subtract(add_0, prod_0) + del add_0 + + # pd_op.full: (1xf32) <- () + full_3 = paddle._C_ops.full( + [1], float("1"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.scale: (-1x8500xf32) <- (-1x8500xf32, 1xf32) + scale_0 = paddle._C_ops.scale(subtract_3, full_3, float("1e-10"), True) + del full_3, subtract_3 + + # pd_op.divide: (-1x8500xf32) <- (-1x8500xf32, -1x8500xf32) + divide_0 = paddle._C_ops.divide(prod_0, scale_0) + del prod_0, scale_0 + + # pd_op.full_int_array: (3xi64) <- () + full_int_array_6 = [1, -1, 8500] + + # pd_op.reshape: (1x-1x8500xf32) <- (-1x8500xf32, 3xi64) + reshape_1 = paddle._C_ops.reshape(divide_0, full_int_array_6) + del divide_0 + + # pd_op.slice: (-1xf32) <- (-1x4xf32, 1xi64, 1xi64) + slice_4 = paddle._C_ops.slice( + reshape_2, [1], full_int_array_2, full_int_array_1, [1], [1] + ) + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_7 = [3] + + # pd_op.slice: (-1xf32) <- (-1x4xf32, 1xi64, 1xi64) + slice_5 = paddle._C_ops.slice( + reshape_2, [1], full_int_array_3, full_int_array_7, [1], [1] + ) + + # pd_op.add: (-1xf32) <- (-1xf32, -1xf32) + add_1 = paddle._C_ops.add(slice_4, slice_5) + del slice_4, slice_5 + + # pd_op.full: (1xf32) <- () + full_4 = paddle._C_ops.full( + [1], float("0.5"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.scale: (-1xf32) <- (-1xf32, 1xf32) + scale_1 = paddle._C_ops.scale(add_1, full_4, float("0"), True) + del add_1 + + # pd_op.slice: (-1xf32) <- (-1x4xf32, 1xi64, 1xi64) + slice_6 = paddle._C_ops.slice( + reshape_2, [1], full_int_array_1, full_int_array_3, [1], [1] + ) + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_8 = [4] + + # pd_op.slice: (-1xf32) <- (-1x4xf32, 1xi64, 1xi64) + slice_7 = paddle._C_ops.slice( + reshape_2, [1], full_int_array_7, full_int_array_8, [1], [1] + ) + del reshape_2 + + # pd_op.add: (-1xf32) <- (-1xf32, -1xf32) + add_2 = paddle._C_ops.add(slice_6, slice_7) + del slice_6, slice_7 + + # pd_op.scale: (-1xf32) <- (-1xf32, 1xf32) + scale_2 = paddle._C_ops.scale(add_2, full_4, float("0"), True) + del add_2 + + # builtin.combine: ([-1xf32, -1xf32]) <- (-1xf32, -1xf32) + combine_0 = [scale_1, scale_2] + del scale_1, scale_2 + + # pd_op.stack: (-1x2xf32) <- ([-1xf32, -1xf32]) + stack_0 = paddle._C_ops.stack(combine_0, -1) + del combine_0 + + # pd_op.unsqueeze: (-1x1x2xf32) <- (-1x2xf32, 1xi64) + unsqueeze_2 = paddle._C_ops.unsqueeze(stack_0, full_int_array_1) + del stack_0 + + # pd_op.slice: (8500xf32) <- (8500x4xf32, 1xi64, 1xi64) + slice_8 = paddle._C_ops.slice( + data_1, [1], full_int_array_2, full_int_array_1, [1], [1] + ) + + # pd_op.slice: (8500xf32) <- (8500x4xf32, 1xi64, 1xi64) + slice_9 = paddle._C_ops.slice( + data_1, [1], full_int_array_3, full_int_array_7, [1], [1] + ) + + # pd_op.add: (8500xf32) <- (8500xf32, 8500xf32) + add_3 = paddle._C_ops.add(slice_8, slice_9) + del slice_8, slice_9 + + # pd_op.scale: (8500xf32) <- (8500xf32, 1xf32) + scale_3 = paddle._C_ops.scale(add_3, full_4, float("0"), True) + del add_3 + + # pd_op.slice: (8500xf32) <- (8500x4xf32, 1xi64, 1xi64) + slice_10 = paddle._C_ops.slice( + data_1, [1], full_int_array_1, full_int_array_3, [1], [1] + ) + del full_int_array_1, full_int_array_3 + + # pd_op.slice: (8500xf32) <- (8500x4xf32, 1xi64, 1xi64) + slice_11 = paddle._C_ops.slice( + data_1, [1], full_int_array_7, full_int_array_8, [1], [1] + ) + del data_1, full_int_array_7, full_int_array_8 + + # pd_op.add: (8500xf32) <- (8500xf32, 8500xf32) + add_4 = paddle._C_ops.add(slice_10, slice_11) + del slice_10, slice_11 + + # pd_op.scale: (8500xf32) <- (8500xf32, 1xf32) + scale_4 = paddle._C_ops.scale(add_4, full_4, float("0"), True) + del add_4, full_4 + + # builtin.combine: ([8500xf32, 8500xf32]) <- (8500xf32, 8500xf32) + combine_1 = [scale_3, scale_4] + del scale_3, scale_4 + + # pd_op.stack: (8500x2xf32) <- ([8500xf32, 8500xf32]) + stack_1 = paddle._C_ops.stack(combine_1, -1) + del combine_1 + + # pd_op.unsqueeze: (1x8500x2xf32) <- (8500x2xf32, 1xi64) + unsqueeze_3 = paddle._C_ops.unsqueeze(stack_1, full_int_array_2) + del full_int_array_2 + + # pd_op.subtract: (-1x8500x2xf32) <- (-1x1x2xf32, 1x8500x2xf32) + subtract_4 = paddle._C_ops.subtract(unsqueeze_2, unsqueeze_3) + del unsqueeze_2, unsqueeze_3 + + # pd_op.p_norm: (-1x8500xf32) <- (-1x8500x2xf32) + p_norm_0 = paddle._C_ops.p_norm( + subtract_4, float("2"), -1, float("1e-12"), False, False + ) + del subtract_4 + + # pd_op.reshape: (1x-1x8500xf32) <- (-1x8500xf32, 3xi64) + reshape_0 = paddle._C_ops.reshape(p_norm_0, full_int_array_6) + del full_int_array_6, p_norm_0, stack_1 + + return reshape_0, reshape_1 diff --git a/paddle_samples/PaddleX/PP-DocLayout-M/subgraph_8/weight_meta.py b/paddle_samples/PaddleX/PP-DocLayout-M/subgraph_8/weight_meta.py new file mode 100644 index 000000000..8b1378917 --- /dev/null +++ b/paddle_samples/PaddleX/PP-DocLayout-M/subgraph_8/weight_meta.py @@ -0,0 +1 @@ + diff --git a/paddle_samples/PaddleX/PP-DocLayout-M/subgraph_9/graph_hash.txt b/paddle_samples/PaddleX/PP-DocLayout-M/subgraph_9/graph_hash.txt new file mode 100644 index 000000000..ca87e2c42 --- /dev/null +++ b/paddle_samples/PaddleX/PP-DocLayout-M/subgraph_9/graph_hash.txt @@ -0,0 +1 @@ +0fa7d323d0622396f6da330ddbf59dc2309bb10b03202a7e2e4eab8d42603e7a \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-DocLayout-M/subgraph_9/graph_net.json b/paddle_samples/PaddleX/PP-DocLayout-M/subgraph_9/graph_net.json new file mode 100644 index 000000000..e9a5b932d --- /dev/null +++ b/paddle_samples/PaddleX/PP-DocLayout-M/subgraph_9/graph_net.json @@ -0,0 +1,6 @@ +{ + "framework": "paddle", + "model_name": "PP-DocLayout-M", + "num_devices_required": 1, + "num_nodes_required": 1 +} \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-DocLayout-M/subgraph_9/input_meta.py b/paddle_samples/PaddleX/PP-DocLayout-M/subgraph_9/input_meta.py new file mode 100644 index 000000000..d21a36335 --- /dev/null +++ b/paddle_samples/PaddleX/PP-DocLayout-M/subgraph_9/input_meta.py @@ -0,0 +1,16 @@ +class Program_weight_tensor_data_0: + name = "data_0" + shape = [1, 3, 640, 640] + dtype = "float32" + min_val = float("-2.1179") + max_val = float("2.64") + mean = float("1.99634") + std = float("1.14742") + data = None + + +class Program_weight_tensor_data_1: + name = "data_1" + shape = [8] + dtype = "float32" + data = [0.0, 1.0003, 2.0006, 3.00086, 4.00119, 5.00149, 6.00171, 7.00209] diff --git a/paddle_samples/PaddleX/PP-DocLayout-M/subgraph_9/model.py b/paddle_samples/PaddleX/PP-DocLayout-M/subgraph_9/model.py new file mode 100644 index 000000000..c3420fb04 --- /dev/null +++ b/paddle_samples/PaddleX/PP-DocLayout-M/subgraph_9/model.py @@ -0,0 +1,6406 @@ +import paddle + + +class GraphModule(paddle.nn.Layer): + def __init__(self): + super().__init__() + + def forward( + self, + parameter_0, + parameter_1, + parameter_2, + parameter_3, + parameter_4, + parameter_5, + parameter_6, + parameter_7, + parameter_8, + parameter_9, + parameter_10, + parameter_11, + parameter_12, + parameter_13, + parameter_14, + parameter_15, + parameter_16, + parameter_17, + parameter_18, + parameter_19, + parameter_20, + parameter_21, + parameter_22, + parameter_23, + parameter_24, + parameter_25, + parameter_26, + parameter_27, + parameter_28, + parameter_29, + parameter_30, + parameter_31, + parameter_32, + parameter_33, + parameter_34, + parameter_35, + parameter_36, + parameter_37, + parameter_38, + parameter_39, + parameter_40, + parameter_41, + parameter_42, + parameter_43, + parameter_44, + parameter_45, + parameter_46, + parameter_47, + parameter_48, + parameter_49, + parameter_50, + parameter_51, + parameter_52, + parameter_53, + parameter_54, + parameter_55, + parameter_56, + parameter_57, + parameter_58, + parameter_59, + parameter_60, + parameter_61, + parameter_62, + parameter_63, + parameter_64, + parameter_65, + parameter_66, + parameter_67, + parameter_68, + parameter_69, + parameter_70, + parameter_71, + parameter_72, + parameter_73, + parameter_74, + parameter_75, + parameter_76, + parameter_77, + parameter_78, + parameter_79, + parameter_80, + parameter_81, + parameter_82, + parameter_83, + parameter_84, + parameter_85, + parameter_86, + parameter_87, + parameter_88, + parameter_89, + parameter_90, + parameter_91, + parameter_92, + parameter_93, + parameter_94, + parameter_95, + parameter_96, + parameter_97, + parameter_98, + parameter_99, + parameter_100, + parameter_101, + parameter_102, + parameter_103, + parameter_104, + parameter_105, + parameter_106, + parameter_107, + parameter_108, + parameter_109, + parameter_110, + parameter_111, + parameter_112, + parameter_113, + parameter_114, + parameter_115, + parameter_116, + parameter_117, + parameter_118, + parameter_119, + parameter_120, + parameter_121, + parameter_122, + parameter_123, + parameter_124, + parameter_125, + parameter_126, + parameter_127, + parameter_128, + parameter_129, + parameter_130, + parameter_131, + parameter_132, + parameter_133, + parameter_134, + parameter_135, + parameter_136, + parameter_137, + parameter_138, + parameter_139, + parameter_140, + parameter_141, + parameter_142, + parameter_143, + parameter_144, + parameter_145, + parameter_146, + parameter_147, + parameter_148, + parameter_149, + parameter_150, + parameter_151, + parameter_152, + parameter_153, + parameter_154, + parameter_155, + parameter_156, + parameter_157, + parameter_158, + parameter_159, + parameter_160, + parameter_161, + parameter_162, + parameter_163, + parameter_164, + parameter_165, + parameter_166, + parameter_167, + parameter_168, + parameter_169, + parameter_170, + parameter_171, + parameter_172, + parameter_173, + parameter_174, + parameter_175, + parameter_176, + parameter_177, + parameter_178, + parameter_179, + parameter_180, + parameter_181, + parameter_182, + parameter_183, + parameter_184, + parameter_185, + parameter_186, + parameter_187, + parameter_188, + parameter_189, + parameter_190, + parameter_191, + parameter_192, + parameter_193, + parameter_194, + parameter_195, + parameter_196, + parameter_197, + parameter_198, + parameter_199, + parameter_200, + parameter_201, + parameter_202, + parameter_203, + parameter_204, + parameter_205, + parameter_206, + parameter_207, + parameter_208, + parameter_209, + parameter_210, + parameter_211, + parameter_212, + parameter_213, + parameter_214, + parameter_215, + parameter_216, + parameter_217, + parameter_218, + parameter_219, + parameter_220, + parameter_221, + parameter_222, + parameter_223, + parameter_224, + parameter_225, + parameter_226, + parameter_227, + parameter_228, + parameter_229, + parameter_230, + parameter_231, + parameter_232, + parameter_233, + parameter_234, + parameter_235, + parameter_236, + parameter_237, + parameter_238, + parameter_239, + parameter_240, + parameter_241, + parameter_242, + parameter_243, + parameter_244, + parameter_245, + parameter_246, + parameter_247, + parameter_248, + parameter_249, + parameter_250, + parameter_251, + parameter_252, + parameter_253, + parameter_254, + parameter_255, + parameter_256, + parameter_257, + parameter_258, + parameter_259, + parameter_260, + parameter_261, + parameter_262, + parameter_263, + parameter_264, + parameter_265, + parameter_266, + parameter_267, + parameter_268, + parameter_269, + parameter_270, + parameter_271, + parameter_272, + parameter_273, + parameter_274, + parameter_275, + parameter_276, + parameter_277, + parameter_278, + parameter_279, + parameter_280, + parameter_281, + parameter_282, + parameter_283, + parameter_284, + parameter_285, + parameter_286, + parameter_287, + parameter_288, + parameter_289, + parameter_290, + parameter_291, + parameter_292, + parameter_293, + parameter_294, + parameter_295, + parameter_296, + parameter_297, + parameter_298, + parameter_299, + parameter_300, + parameter_301, + parameter_302, + parameter_303, + parameter_304, + parameter_305, + parameter_306, + parameter_307, + parameter_308, + parameter_309, + parameter_310, + parameter_311, + parameter_312, + parameter_313, + parameter_314, + parameter_315, + parameter_316, + parameter_317, + parameter_318, + parameter_319, + parameter_320, + parameter_321, + parameter_322, + parameter_323, + parameter_324, + parameter_325, + parameter_326, + parameter_327, + parameter_328, + parameter_329, + parameter_330, + parameter_331, + parameter_332, + parameter_333, + parameter_334, + parameter_335, + parameter_336, + parameter_337, + parameter_338, + parameter_339, + parameter_340, + parameter_341, + parameter_342, + parameter_343, + parameter_344, + parameter_345, + parameter_346, + parameter_347, + parameter_348, + parameter_349, + parameter_350, + parameter_351, + parameter_352, + parameter_353, + parameter_354, + parameter_355, + parameter_356, + parameter_357, + parameter_358, + parameter_359, + parameter_360, + parameter_361, + parameter_362, + parameter_363, + parameter_364, + parameter_365, + parameter_366, + parameter_367, + parameter_368, + parameter_369, + parameter_370, + parameter_371, + parameter_372, + parameter_373, + parameter_374, + parameter_375, + parameter_376, + parameter_377, + parameter_378, + parameter_379, + parameter_380, + parameter_381, + parameter_382, + parameter_383, + parameter_384, + parameter_385, + parameter_386, + parameter_387, + parameter_388, + parameter_389, + parameter_390, + parameter_391, + parameter_392, + parameter_393, + parameter_394, + parameter_395, + parameter_396, + parameter_397, + parameter_398, + parameter_399, + parameter_400, + parameter_401, + parameter_402, + parameter_403, + parameter_404, + parameter_405, + parameter_406, + parameter_407, + parameter_408, + parameter_409, + parameter_410, + parameter_411, + parameter_412, + parameter_413, + parameter_414, + parameter_415, + parameter_416, + parameter_417, + parameter_418, + parameter_419, + parameter_420, + parameter_421, + parameter_422, + parameter_423, + parameter_424, + parameter_425, + parameter_426, + parameter_427, + parameter_428, + parameter_429, + parameter_430, + parameter_431, + parameter_432, + parameter_433, + parameter_434, + parameter_435, + parameter_436, + parameter_437, + parameter_438, + parameter_439, + parameter_440, + parameter_441, + parameter_442, + parameter_443, + parameter_444, + parameter_445, + parameter_446, + parameter_447, + parameter_448, + parameter_449, + parameter_450, + parameter_451, + parameter_452, + parameter_453, + parameter_454, + parameter_455, + parameter_456, + parameter_457, + parameter_458, + parameter_459, + parameter_460, + parameter_461, + parameter_462, + parameter_463, + parameter_464, + parameter_465, + parameter_466, + parameter_467, + parameter_468, + parameter_469, + parameter_470, + parameter_471, + parameter_472, + parameter_473, + parameter_474, + parameter_475, + parameter_476, + parameter_477, + parameter_478, + parameter_479, + parameter_480, + parameter_481, + parameter_482, + parameter_483, + parameter_484, + parameter_485, + parameter_486, + parameter_487, + parameter_488, + parameter_489, + parameter_490, + parameter_491, + parameter_492, + parameter_493, + parameter_494, + parameter_495, + parameter_496, + parameter_497, + parameter_498, + parameter_499, + parameter_500, + parameter_501, + parameter_502, + parameter_503, + parameter_504, + parameter_505, + parameter_506, + parameter_507, + parameter_508, + parameter_509, + parameter_510, + parameter_511, + parameter_512, + parameter_513, + parameter_514, + parameter_515, + parameter_516, + parameter_517, + parameter_518, + parameter_519, + parameter_520, + parameter_521, + data_0, + data_1, + ): + # pd_op.conv2d: (1x32x320x320xf32) <- (1x3x640x640xf32, 32x3x3x3xf32) + conv2d_0 = paddle._C_ops.conv2d( + data_0, parameter_521, [2, 2], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del data_0, parameter_521 + + # pd_op.batch_norm_: (1x32x320x320xf32, 32xf32, 32xf32, 32xf32, 32xf32, -1xui8) <- (1x32x320x320xf32, 32xf32, 32xf32, 32xf32, 32xf32) + ( + batch_norm__0, + batch_norm__1, + batch_norm__2, + batch_norm__3, + batch_norm__4, + batch_norm__5, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_0, + parameter_520, + parameter_519, + parameter_518, + parameter_517, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_517, parameter_518, parameter_519, parameter_520 + + # pd_op.hardswish: (1x32x320x320xf32) <- (1x32x320x320xf32) + hardswish_0 = paddle._C_ops.hardswish(batch_norm__0) + + # pd_op.depthwise_conv2d: (1x32x320x320xf32) <- (1x32x320x320xf32, 32x1x3x3xf32) + depthwise_conv2d_0 = paddle._C_ops.depthwise_conv2d( + hardswish_0, parameter_516, [1, 1], [1, 1], "EXPLICIT", 32, [1, 1], "NCHW" + ) + del parameter_516 + + # pd_op.batch_norm_: (1x32x320x320xf32, 32xf32, 32xf32, 32xf32, 32xf32, -1xui8) <- (1x32x320x320xf32, 32xf32, 32xf32, 32xf32, 32xf32) + ( + batch_norm__6, + batch_norm__7, + batch_norm__8, + batch_norm__9, + batch_norm__10, + batch_norm__11, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + depthwise_conv2d_0, + parameter_515, + parameter_514, + parameter_513, + parameter_512, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_512, parameter_513, parameter_514, parameter_515 + + # pd_op.hardswish: (1x32x320x320xf32) <- (1x32x320x320xf32) + hardswish_1 = paddle._C_ops.hardswish(batch_norm__6) + + # pd_op.conv2d: (1x64x320x320xf32) <- (1x32x320x320xf32, 64x32x1x1xf32) + conv2d_1 = paddle._C_ops.conv2d( + hardswish_1, parameter_511, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_511 + + # pd_op.batch_norm_: (1x64x320x320xf32, 64xf32, 64xf32, 64xf32, 64xf32, -1xui8) <- (1x64x320x320xf32, 64xf32, 64xf32, 64xf32, 64xf32) + ( + batch_norm__12, + batch_norm__13, + batch_norm__14, + batch_norm__15, + batch_norm__16, + batch_norm__17, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_1, + parameter_510, + parameter_509, + parameter_508, + parameter_507, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_507, parameter_508, parameter_509, parameter_510 + + # pd_op.hardswish: (1x64x320x320xf32) <- (1x64x320x320xf32) + hardswish_2 = paddle._C_ops.hardswish(batch_norm__12) + + # pd_op.depthwise_conv2d: (1x64x160x160xf32) <- (1x64x320x320xf32, 64x1x3x3xf32) + depthwise_conv2d_1 = paddle._C_ops.depthwise_conv2d( + hardswish_2, parameter_506, [2, 2], [1, 1], "EXPLICIT", 64, [1, 1], "NCHW" + ) + del parameter_506 + + # pd_op.batch_norm_: (1x64x160x160xf32, 64xf32, 64xf32, 64xf32, 64xf32, -1xui8) <- (1x64x160x160xf32, 64xf32, 64xf32, 64xf32, 64xf32) + ( + batch_norm__18, + batch_norm__19, + batch_norm__20, + batch_norm__21, + batch_norm__22, + batch_norm__23, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + depthwise_conv2d_1, + parameter_505, + parameter_504, + parameter_503, + parameter_502, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_502, parameter_503, parameter_504, parameter_505 + + # pd_op.hardswish: (1x64x160x160xf32) <- (1x64x160x160xf32) + hardswish_3 = paddle._C_ops.hardswish(batch_norm__18) + + # pd_op.conv2d: (1x128x160x160xf32) <- (1x64x160x160xf32, 128x64x1x1xf32) + conv2d_2 = paddle._C_ops.conv2d( + hardswish_3, parameter_501, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_501 + + # pd_op.batch_norm_: (1x128x160x160xf32, 128xf32, 128xf32, 128xf32, 128xf32, -1xui8) <- (1x128x160x160xf32, 128xf32, 128xf32, 128xf32, 128xf32) + ( + batch_norm__24, + batch_norm__25, + batch_norm__26, + batch_norm__27, + batch_norm__28, + batch_norm__29, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_2, + parameter_500, + parameter_499, + parameter_498, + parameter_497, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_497, parameter_498, parameter_499, parameter_500 + + # pd_op.hardswish: (1x128x160x160xf32) <- (1x128x160x160xf32) + hardswish_4 = paddle._C_ops.hardswish(batch_norm__24) + + # pd_op.depthwise_conv2d: (1x128x160x160xf32) <- (1x128x160x160xf32, 128x1x3x3xf32) + depthwise_conv2d_2 = paddle._C_ops.depthwise_conv2d( + hardswish_4, parameter_496, [1, 1], [1, 1], "EXPLICIT", 128, [1, 1], "NCHW" + ) + del parameter_496 + + # pd_op.batch_norm_: (1x128x160x160xf32, 128xf32, 128xf32, 128xf32, 128xf32, -1xui8) <- (1x128x160x160xf32, 128xf32, 128xf32, 128xf32, 128xf32) + ( + batch_norm__30, + batch_norm__31, + batch_norm__32, + batch_norm__33, + batch_norm__34, + batch_norm__35, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + depthwise_conv2d_2, + parameter_495, + parameter_494, + parameter_493, + parameter_492, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_492, parameter_493, parameter_494, parameter_495 + + # pd_op.hardswish: (1x128x160x160xf32) <- (1x128x160x160xf32) + hardswish_5 = paddle._C_ops.hardswish(batch_norm__30) + + # pd_op.conv2d: (1x128x160x160xf32) <- (1x128x160x160xf32, 128x128x1x1xf32) + conv2d_3 = paddle._C_ops.conv2d( + hardswish_5, parameter_491, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_491 + + # pd_op.batch_norm_: (1x128x160x160xf32, 128xf32, 128xf32, 128xf32, 128xf32, -1xui8) <- (1x128x160x160xf32, 128xf32, 128xf32, 128xf32, 128xf32) + ( + batch_norm__36, + batch_norm__37, + batch_norm__38, + batch_norm__39, + batch_norm__40, + batch_norm__41, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_3, + parameter_490, + parameter_489, + parameter_488, + parameter_487, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_487, parameter_488, parameter_489, parameter_490 + + # pd_op.hardswish: (1x128x160x160xf32) <- (1x128x160x160xf32) + hardswish_6 = paddle._C_ops.hardswish(batch_norm__36) + + # pd_op.depthwise_conv2d: (1x128x80x80xf32) <- (1x128x160x160xf32, 128x1x3x3xf32) + depthwise_conv2d_3 = paddle._C_ops.depthwise_conv2d( + hardswish_6, parameter_486, [2, 2], [1, 1], "EXPLICIT", 128, [1, 1], "NCHW" + ) + del parameter_486 + + # pd_op.batch_norm_: (1x128x80x80xf32, 128xf32, 128xf32, 128xf32, 128xf32, -1xui8) <- (1x128x80x80xf32, 128xf32, 128xf32, 128xf32, 128xf32) + ( + batch_norm__42, + batch_norm__43, + batch_norm__44, + batch_norm__45, + batch_norm__46, + batch_norm__47, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + depthwise_conv2d_3, + parameter_485, + parameter_484, + parameter_483, + parameter_482, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_482, parameter_483, parameter_484, parameter_485 + + # pd_op.hardswish: (1x128x80x80xf32) <- (1x128x80x80xf32) + hardswish_7 = paddle._C_ops.hardswish(batch_norm__42) + + # pd_op.conv2d: (1x256x80x80xf32) <- (1x128x80x80xf32, 256x128x1x1xf32) + conv2d_4 = paddle._C_ops.conv2d( + hardswish_7, parameter_481, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_481 + + # pd_op.batch_norm_: (1x256x80x80xf32, 256xf32, 256xf32, 256xf32, 256xf32, -1xui8) <- (1x256x80x80xf32, 256xf32, 256xf32, 256xf32, 256xf32) + ( + batch_norm__48, + batch_norm__49, + batch_norm__50, + batch_norm__51, + batch_norm__52, + batch_norm__53, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_4, + parameter_480, + parameter_479, + parameter_478, + parameter_477, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_477, parameter_478, parameter_479, parameter_480 + + # pd_op.hardswish: (1x256x80x80xf32) <- (1x256x80x80xf32) + hardswish_8 = paddle._C_ops.hardswish(batch_norm__48) + + # pd_op.depthwise_conv2d: (1x256x80x80xf32) <- (1x256x80x80xf32, 256x1x3x3xf32) + depthwise_conv2d_4 = paddle._C_ops.depthwise_conv2d( + hardswish_8, parameter_476, [1, 1], [1, 1], "EXPLICIT", 256, [1, 1], "NCHW" + ) + del parameter_476 + + # pd_op.batch_norm_: (1x256x80x80xf32, 256xf32, 256xf32, 256xf32, 256xf32, -1xui8) <- (1x256x80x80xf32, 256xf32, 256xf32, 256xf32, 256xf32) + ( + batch_norm__54, + batch_norm__55, + batch_norm__56, + batch_norm__57, + batch_norm__58, + batch_norm__59, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + depthwise_conv2d_4, + parameter_475, + parameter_474, + parameter_473, + parameter_472, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_472, parameter_473, parameter_474, parameter_475 + + # pd_op.hardswish: (1x256x80x80xf32) <- (1x256x80x80xf32) + hardswish_9 = paddle._C_ops.hardswish(batch_norm__54) + + # pd_op.conv2d: (1x256x80x80xf32) <- (1x256x80x80xf32, 256x256x1x1xf32) + conv2d_5 = paddle._C_ops.conv2d( + hardswish_9, parameter_471, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_471 + + # pd_op.batch_norm_: (1x256x80x80xf32, 256xf32, 256xf32, 256xf32, 256xf32, -1xui8) <- (1x256x80x80xf32, 256xf32, 256xf32, 256xf32, 256xf32) + ( + batch_norm__60, + batch_norm__61, + batch_norm__62, + batch_norm__63, + batch_norm__64, + batch_norm__65, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_5, + parameter_470, + parameter_469, + parameter_468, + parameter_467, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_467, parameter_468, parameter_469, parameter_470 + + # pd_op.hardswish: (1x256x80x80xf32) <- (1x256x80x80xf32) + hardswish_10 = paddle._C_ops.hardswish(batch_norm__60) + + # pd_op.depthwise_conv2d: (1x256x40x40xf32) <- (1x256x80x80xf32, 256x1x3x3xf32) + depthwise_conv2d_5 = paddle._C_ops.depthwise_conv2d( + hardswish_10, parameter_466, [2, 2], [1, 1], "EXPLICIT", 256, [1, 1], "NCHW" + ) + del parameter_466 + + # pd_op.batch_norm_: (1x256x40x40xf32, 256xf32, 256xf32, 256xf32, 256xf32, -1xui8) <- (1x256x40x40xf32, 256xf32, 256xf32, 256xf32, 256xf32) + ( + batch_norm__66, + batch_norm__67, + batch_norm__68, + batch_norm__69, + batch_norm__70, + batch_norm__71, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + depthwise_conv2d_5, + parameter_465, + parameter_464, + parameter_463, + parameter_462, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_462, parameter_463, parameter_464, parameter_465 + + # pd_op.hardswish: (1x256x40x40xf32) <- (1x256x40x40xf32) + hardswish_11 = paddle._C_ops.hardswish(batch_norm__66) + + # pd_op.conv2d: (1x512x40x40xf32) <- (1x256x40x40xf32, 512x256x1x1xf32) + conv2d_6 = paddle._C_ops.conv2d( + hardswish_11, parameter_461, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_461 + + # pd_op.batch_norm_: (1x512x40x40xf32, 512xf32, 512xf32, 512xf32, 512xf32, -1xui8) <- (1x512x40x40xf32, 512xf32, 512xf32, 512xf32, 512xf32) + ( + batch_norm__72, + batch_norm__73, + batch_norm__74, + batch_norm__75, + batch_norm__76, + batch_norm__77, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_6, + parameter_460, + parameter_459, + parameter_458, + parameter_457, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_457, parameter_458, parameter_459, parameter_460 + + # pd_op.hardswish: (1x512x40x40xf32) <- (1x512x40x40xf32) + hardswish_12 = paddle._C_ops.hardswish(batch_norm__72) + + # pd_op.depthwise_conv2d: (1x512x40x40xf32) <- (1x512x40x40xf32, 512x1x5x5xf32) + depthwise_conv2d_6 = paddle._C_ops.depthwise_conv2d( + hardswish_12, parameter_456, [1, 1], [2, 2], "EXPLICIT", 512, [1, 1], "NCHW" + ) + del parameter_456 + + # pd_op.batch_norm_: (1x512x40x40xf32, 512xf32, 512xf32, 512xf32, 512xf32, -1xui8) <- (1x512x40x40xf32, 512xf32, 512xf32, 512xf32, 512xf32) + ( + batch_norm__78, + batch_norm__79, + batch_norm__80, + batch_norm__81, + batch_norm__82, + batch_norm__83, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + depthwise_conv2d_6, + parameter_455, + parameter_454, + parameter_453, + parameter_452, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_452, parameter_453, parameter_454, parameter_455 + + # pd_op.hardswish: (1x512x40x40xf32) <- (1x512x40x40xf32) + hardswish_13 = paddle._C_ops.hardswish(batch_norm__78) + + # pd_op.conv2d: (1x512x40x40xf32) <- (1x512x40x40xf32, 512x512x1x1xf32) + conv2d_7 = paddle._C_ops.conv2d( + hardswish_13, parameter_451, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_451 + + # pd_op.batch_norm_: (1x512x40x40xf32, 512xf32, 512xf32, 512xf32, 512xf32, -1xui8) <- (1x512x40x40xf32, 512xf32, 512xf32, 512xf32, 512xf32) + ( + batch_norm__84, + batch_norm__85, + batch_norm__86, + batch_norm__87, + batch_norm__88, + batch_norm__89, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_7, + parameter_450, + parameter_449, + parameter_448, + parameter_447, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_447, parameter_448, parameter_449, parameter_450 + + # pd_op.hardswish: (1x512x40x40xf32) <- (1x512x40x40xf32) + hardswish_14 = paddle._C_ops.hardswish(batch_norm__84) + + # pd_op.depthwise_conv2d: (1x512x40x40xf32) <- (1x512x40x40xf32, 512x1x5x5xf32) + depthwise_conv2d_7 = paddle._C_ops.depthwise_conv2d( + hardswish_14, parameter_446, [1, 1], [2, 2], "EXPLICIT", 512, [1, 1], "NCHW" + ) + del parameter_446 + + # pd_op.batch_norm_: (1x512x40x40xf32, 512xf32, 512xf32, 512xf32, 512xf32, -1xui8) <- (1x512x40x40xf32, 512xf32, 512xf32, 512xf32, 512xf32) + ( + batch_norm__90, + batch_norm__91, + batch_norm__92, + batch_norm__93, + batch_norm__94, + batch_norm__95, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + depthwise_conv2d_7, + parameter_445, + parameter_444, + parameter_443, + parameter_442, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_442, parameter_443, parameter_444, parameter_445 + + # pd_op.hardswish: (1x512x40x40xf32) <- (1x512x40x40xf32) + hardswish_15 = paddle._C_ops.hardswish(batch_norm__90) + + # pd_op.conv2d: (1x512x40x40xf32) <- (1x512x40x40xf32, 512x512x1x1xf32) + conv2d_8 = paddle._C_ops.conv2d( + hardswish_15, parameter_441, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_441 + + # pd_op.batch_norm_: (1x512x40x40xf32, 512xf32, 512xf32, 512xf32, 512xf32, -1xui8) <- (1x512x40x40xf32, 512xf32, 512xf32, 512xf32, 512xf32) + ( + batch_norm__96, + batch_norm__97, + batch_norm__98, + batch_norm__99, + batch_norm__100, + batch_norm__101, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_8, + parameter_440, + parameter_439, + parameter_438, + parameter_437, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_437, parameter_438, parameter_439, parameter_440 + + # pd_op.hardswish: (1x512x40x40xf32) <- (1x512x40x40xf32) + hardswish_16 = paddle._C_ops.hardswish(batch_norm__96) + + # pd_op.depthwise_conv2d: (1x512x40x40xf32) <- (1x512x40x40xf32, 512x1x5x5xf32) + depthwise_conv2d_8 = paddle._C_ops.depthwise_conv2d( + hardswish_16, parameter_436, [1, 1], [2, 2], "EXPLICIT", 512, [1, 1], "NCHW" + ) + del parameter_436 + + # pd_op.batch_norm_: (1x512x40x40xf32, 512xf32, 512xf32, 512xf32, 512xf32, -1xui8) <- (1x512x40x40xf32, 512xf32, 512xf32, 512xf32, 512xf32) + ( + batch_norm__102, + batch_norm__103, + batch_norm__104, + batch_norm__105, + batch_norm__106, + batch_norm__107, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + depthwise_conv2d_8, + parameter_435, + parameter_434, + parameter_433, + parameter_432, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_432, parameter_433, parameter_434, parameter_435 + + # pd_op.hardswish: (1x512x40x40xf32) <- (1x512x40x40xf32) + hardswish_17 = paddle._C_ops.hardswish(batch_norm__102) + + # pd_op.conv2d: (1x512x40x40xf32) <- (1x512x40x40xf32, 512x512x1x1xf32) + conv2d_9 = paddle._C_ops.conv2d( + hardswish_17, parameter_431, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_431 + + # pd_op.batch_norm_: (1x512x40x40xf32, 512xf32, 512xf32, 512xf32, 512xf32, -1xui8) <- (1x512x40x40xf32, 512xf32, 512xf32, 512xf32, 512xf32) + ( + batch_norm__108, + batch_norm__109, + batch_norm__110, + batch_norm__111, + batch_norm__112, + batch_norm__113, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_9, + parameter_430, + parameter_429, + parameter_428, + parameter_427, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_427, parameter_428, parameter_429, parameter_430 + + # pd_op.hardswish: (1x512x40x40xf32) <- (1x512x40x40xf32) + hardswish_18 = paddle._C_ops.hardswish(batch_norm__108) + + # pd_op.depthwise_conv2d: (1x512x40x40xf32) <- (1x512x40x40xf32, 512x1x5x5xf32) + depthwise_conv2d_9 = paddle._C_ops.depthwise_conv2d( + hardswish_18, parameter_426, [1, 1], [2, 2], "EXPLICIT", 512, [1, 1], "NCHW" + ) + del parameter_426 + + # pd_op.batch_norm_: (1x512x40x40xf32, 512xf32, 512xf32, 512xf32, 512xf32, -1xui8) <- (1x512x40x40xf32, 512xf32, 512xf32, 512xf32, 512xf32) + ( + batch_norm__114, + batch_norm__115, + batch_norm__116, + batch_norm__117, + batch_norm__118, + batch_norm__119, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + depthwise_conv2d_9, + parameter_425, + parameter_424, + parameter_423, + parameter_422, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_422, parameter_423, parameter_424, parameter_425 + + # pd_op.hardswish: (1x512x40x40xf32) <- (1x512x40x40xf32) + hardswish_19 = paddle._C_ops.hardswish(batch_norm__114) + + # pd_op.conv2d: (1x512x40x40xf32) <- (1x512x40x40xf32, 512x512x1x1xf32) + conv2d_10 = paddle._C_ops.conv2d( + hardswish_19, parameter_421, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_421 + + # pd_op.batch_norm_: (1x512x40x40xf32, 512xf32, 512xf32, 512xf32, 512xf32, -1xui8) <- (1x512x40x40xf32, 512xf32, 512xf32, 512xf32, 512xf32) + ( + batch_norm__120, + batch_norm__121, + batch_norm__122, + batch_norm__123, + batch_norm__124, + batch_norm__125, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_10, + parameter_420, + parameter_419, + parameter_418, + parameter_417, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_417, parameter_418, parameter_419, parameter_420 + + # pd_op.hardswish: (1x512x40x40xf32) <- (1x512x40x40xf32) + hardswish_20 = paddle._C_ops.hardswish(batch_norm__120) + + # pd_op.depthwise_conv2d: (1x512x40x40xf32) <- (1x512x40x40xf32, 512x1x5x5xf32) + depthwise_conv2d_10 = paddle._C_ops.depthwise_conv2d( + hardswish_20, parameter_416, [1, 1], [2, 2], "EXPLICIT", 512, [1, 1], "NCHW" + ) + del parameter_416 + + # pd_op.batch_norm_: (1x512x40x40xf32, 512xf32, 512xf32, 512xf32, 512xf32, -1xui8) <- (1x512x40x40xf32, 512xf32, 512xf32, 512xf32, 512xf32) + ( + batch_norm__126, + batch_norm__127, + batch_norm__128, + batch_norm__129, + batch_norm__130, + batch_norm__131, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + depthwise_conv2d_10, + parameter_415, + parameter_414, + parameter_413, + parameter_412, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_412, parameter_413, parameter_414, parameter_415 + + # pd_op.hardswish: (1x512x40x40xf32) <- (1x512x40x40xf32) + hardswish_21 = paddle._C_ops.hardswish(batch_norm__126) + + # pd_op.conv2d: (1x512x40x40xf32) <- (1x512x40x40xf32, 512x512x1x1xf32) + conv2d_11 = paddle._C_ops.conv2d( + hardswish_21, parameter_411, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_411 + + # pd_op.batch_norm_: (1x512x40x40xf32, 512xf32, 512xf32, 512xf32, 512xf32, -1xui8) <- (1x512x40x40xf32, 512xf32, 512xf32, 512xf32, 512xf32) + ( + batch_norm__132, + batch_norm__133, + batch_norm__134, + batch_norm__135, + batch_norm__136, + batch_norm__137, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_11, + parameter_410, + parameter_409, + parameter_408, + parameter_407, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_407, parameter_408, parameter_409, parameter_410 + + # pd_op.hardswish: (1x512x40x40xf32) <- (1x512x40x40xf32) + hardswish_22 = paddle._C_ops.hardswish(batch_norm__132) + + # pd_op.depthwise_conv2d: (1x512x20x20xf32) <- (1x512x40x40xf32, 512x1x5x5xf32) + depthwise_conv2d_11 = paddle._C_ops.depthwise_conv2d( + hardswish_22, parameter_406, [2, 2], [2, 2], "EXPLICIT", 512, [1, 1], "NCHW" + ) + del parameter_406 + + # pd_op.batch_norm_: (1x512x20x20xf32, 512xf32, 512xf32, 512xf32, 512xf32, -1xui8) <- (1x512x20x20xf32, 512xf32, 512xf32, 512xf32, 512xf32) + ( + batch_norm__138, + batch_norm__139, + batch_norm__140, + batch_norm__141, + batch_norm__142, + batch_norm__143, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + depthwise_conv2d_11, + parameter_405, + parameter_404, + parameter_403, + parameter_402, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_402, parameter_403, parameter_404, parameter_405 + + # pd_op.hardswish: (1x512x20x20xf32) <- (1x512x20x20xf32) + hardswish_23 = paddle._C_ops.hardswish(batch_norm__138) + + # pd_op.full_int_array: (2xi64) <- () + full_int_array_0 = [1, 1] + + # pd_op.assign: (2xi64) <- (2xi64) + assign_0 = full_int_array_0 + + # pd_op.assign: (2xi64) <- (2xi64) + assign_1 = full_int_array_0 + + # pd_op.assign: (2xi64) <- (2xi64) + assign_2 = full_int_array_0 + + # pd_op.assign: (2xi64) <- (2xi64) + assign_3 = full_int_array_0 + + # pd_op.assign: (2xi64) <- (2xi64) + assign_4 = full_int_array_0 + + # pd_op.pool2d: (1x512x1x1xf32) <- (1x512x20x20xf32, 2xi64) + pool2d_0 = paddle._C_ops.pool2d( + hardswish_23, + full_int_array_0, + [1, 1], + [0, 0], + False, + True, + "NCHW", + "avg", + False, + True, + "EXPLICIT", + ) + + # pd_op.conv2d: (1x128x1x1xf32) <- (1x512x1x1xf32, 128x512x1x1xf32) + conv2d_12 = paddle._C_ops.conv2d( + pool2d_0, parameter_401, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_401 + + # pd_op.full_int_array: (4xi64) <- () + full_int_array_1 = [1, -1, 1, 1] + + # pd_op.reshape: (1x128x1x1xf32) <- (128xf32, 4xi64) + reshape_0 = paddle._C_ops.reshape(parameter_400, full_int_array_1) + del parameter_400 + + # pd_op.add: (1x128x1x1xf32) <- (1x128x1x1xf32, 1x128x1x1xf32) + add_0 = paddle._C_ops.add(conv2d_12, reshape_0) + + # pd_op.relu: (1x128x1x1xf32) <- (1x128x1x1xf32) + relu_0 = paddle._C_ops.relu(add_0) + del add_0 + + # pd_op.conv2d: (1x512x1x1xf32) <- (1x128x1x1xf32, 512x128x1x1xf32) + conv2d_13 = paddle._C_ops.conv2d( + relu_0, parameter_399, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_399 + + # pd_op.reshape: (1x512x1x1xf32) <- (512xf32, 4xi64) + reshape_1 = paddle._C_ops.reshape(parameter_398, full_int_array_1) + del parameter_398 + + # pd_op.add: (1x512x1x1xf32) <- (1x512x1x1xf32, 1x512x1x1xf32) + add_1 = paddle._C_ops.add(conv2d_13, reshape_1) + + # pd_op.hardsigmoid: (1x512x1x1xf32) <- (1x512x1x1xf32) + hardsigmoid_0 = paddle._C_ops.hardsigmoid( + add_1, float("0.166667"), float("0.5") + ) + del add_1 + + # pd_op.multiply: (1x512x20x20xf32) <- (1x512x20x20xf32, 1x512x1x1xf32) + multiply_0 = paddle._C_ops.multiply(hardswish_23, hardsigmoid_0) + + # pd_op.conv2d: (1x1024x20x20xf32) <- (1x512x20x20xf32, 1024x512x1x1xf32) + conv2d_14 = paddle._C_ops.conv2d( + multiply_0, parameter_397, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_397 + + # pd_op.batch_norm_: (1x1024x20x20xf32, 1024xf32, 1024xf32, 1024xf32, 1024xf32, -1xui8) <- (1x1024x20x20xf32, 1024xf32, 1024xf32, 1024xf32, 1024xf32) + ( + batch_norm__144, + batch_norm__145, + batch_norm__146, + batch_norm__147, + batch_norm__148, + batch_norm__149, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_14, + parameter_396, + parameter_395, + parameter_394, + parameter_393, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_393, parameter_394, parameter_395, parameter_396 + + # pd_op.hardswish: (1x1024x20x20xf32) <- (1x1024x20x20xf32) + hardswish_24 = paddle._C_ops.hardswish(batch_norm__144) + + # pd_op.depthwise_conv2d: (1x1024x20x20xf32) <- (1x1024x20x20xf32, 1024x1x5x5xf32) + depthwise_conv2d_12 = paddle._C_ops.depthwise_conv2d( + hardswish_24, + parameter_392, + [1, 1], + [2, 2], + "EXPLICIT", + 1024, + [1, 1], + "NCHW", + ) + del parameter_392 + + # pd_op.batch_norm_: (1x1024x20x20xf32, 1024xf32, 1024xf32, 1024xf32, 1024xf32, -1xui8) <- (1x1024x20x20xf32, 1024xf32, 1024xf32, 1024xf32, 1024xf32) + ( + batch_norm__150, + batch_norm__151, + batch_norm__152, + batch_norm__153, + batch_norm__154, + batch_norm__155, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + depthwise_conv2d_12, + parameter_391, + parameter_390, + parameter_389, + parameter_388, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_388, parameter_389, parameter_390, parameter_391 + + # pd_op.hardswish: (1x1024x20x20xf32) <- (1x1024x20x20xf32) + hardswish_25 = paddle._C_ops.hardswish(batch_norm__150) + + # pd_op.pool2d: (1x1024x1x1xf32) <- (1x1024x20x20xf32, 2xi64) + pool2d_1 = paddle._C_ops.pool2d( + hardswish_25, + full_int_array_0, + [1, 1], + [0, 0], + False, + True, + "NCHW", + "avg", + False, + True, + "EXPLICIT", + ) + + # pd_op.conv2d: (1x256x1x1xf32) <- (1x1024x1x1xf32, 256x1024x1x1xf32) + conv2d_15 = paddle._C_ops.conv2d( + pool2d_1, parameter_387, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_387 + + # pd_op.reshape: (1x256x1x1xf32) <- (256xf32, 4xi64) + reshape_2 = paddle._C_ops.reshape(parameter_386, full_int_array_1) + del parameter_386 + + # pd_op.add: (1x256x1x1xf32) <- (1x256x1x1xf32, 1x256x1x1xf32) + add_2 = paddle._C_ops.add(conv2d_15, reshape_2) + + # pd_op.relu: (1x256x1x1xf32) <- (1x256x1x1xf32) + relu_1 = paddle._C_ops.relu(add_2) + del add_2 + + # pd_op.conv2d: (1x1024x1x1xf32) <- (1x256x1x1xf32, 1024x256x1x1xf32) + conv2d_16 = paddle._C_ops.conv2d( + relu_1, parameter_385, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_385 + + # pd_op.reshape: (1x1024x1x1xf32) <- (1024xf32, 4xi64) + reshape_3 = paddle._C_ops.reshape(parameter_384, full_int_array_1) + del parameter_384 + + # pd_op.add: (1x1024x1x1xf32) <- (1x1024x1x1xf32, 1x1024x1x1xf32) + add_3 = paddle._C_ops.add(conv2d_16, reshape_3) + + # pd_op.hardsigmoid: (1x1024x1x1xf32) <- (1x1024x1x1xf32) + hardsigmoid_1 = paddle._C_ops.hardsigmoid( + add_3, float("0.166667"), float("0.5") + ) + del add_3 + + # pd_op.multiply: (1x1024x20x20xf32) <- (1x1024x20x20xf32, 1x1024x1x1xf32) + multiply_1 = paddle._C_ops.multiply(hardswish_25, hardsigmoid_1) + + # pd_op.conv2d: (1x1024x20x20xf32) <- (1x1024x20x20xf32, 1024x1024x1x1xf32) + conv2d_17 = paddle._C_ops.conv2d( + multiply_1, parameter_383, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_383 + + # pd_op.batch_norm_: (1x1024x20x20xf32, 1024xf32, 1024xf32, 1024xf32, 1024xf32, -1xui8) <- (1x1024x20x20xf32, 1024xf32, 1024xf32, 1024xf32, 1024xf32) + ( + batch_norm__156, + batch_norm__157, + batch_norm__158, + batch_norm__159, + batch_norm__160, + batch_norm__161, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_17, + parameter_382, + parameter_381, + parameter_380, + parameter_379, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_379, parameter_380, parameter_381, parameter_382 + + # pd_op.hardswish: (1x1024x20x20xf32) <- (1x1024x20x20xf32) + hardswish_26 = paddle._C_ops.hardswish(batch_norm__156) + + # pd_op.conv2d: (1x160x80x80xf32) <- (1x256x80x80xf32, 160x256x1x1xf32) + conv2d_18 = paddle._C_ops.conv2d( + hardswish_10, parameter_378, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_378 + + # pd_op.batch_norm_: (1x160x80x80xf32, 160xf32, 160xf32, 160xf32, 160xf32, -1xui8) <- (1x160x80x80xf32, 160xf32, 160xf32, 160xf32, 160xf32) + ( + batch_norm__162, + batch_norm__163, + batch_norm__164, + batch_norm__165, + batch_norm__166, + batch_norm__167, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_18, + parameter_377, + parameter_376, + parameter_375, + parameter_374, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_374, parameter_375, parameter_376, parameter_377 + + # pd_op.hardswish: (1x160x80x80xf32) <- (1x160x80x80xf32) + hardswish_27 = paddle._C_ops.hardswish(batch_norm__162) + + # pd_op.conv2d: (1x160x40x40xf32) <- (1x512x40x40xf32, 160x512x1x1xf32) + conv2d_19 = paddle._C_ops.conv2d( + hardswish_22, parameter_373, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_373 + + # pd_op.batch_norm_: (1x160x40x40xf32, 160xf32, 160xf32, 160xf32, 160xf32, -1xui8) <- (1x160x40x40xf32, 160xf32, 160xf32, 160xf32, 160xf32) + ( + batch_norm__168, + batch_norm__169, + batch_norm__170, + batch_norm__171, + batch_norm__172, + batch_norm__173, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_19, + parameter_372, + parameter_371, + parameter_370, + parameter_369, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_369, parameter_370, parameter_371, parameter_372 + + # pd_op.hardswish: (1x160x40x40xf32) <- (1x160x40x40xf32) + hardswish_28 = paddle._C_ops.hardswish(batch_norm__168) + + # pd_op.conv2d: (1x160x20x20xf32) <- (1x1024x20x20xf32, 160x1024x1x1xf32) + conv2d_20 = paddle._C_ops.conv2d( + hardswish_26, parameter_368, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_368 + + # pd_op.batch_norm_: (1x160x20x20xf32, 160xf32, 160xf32, 160xf32, 160xf32, -1xui8) <- (1x160x20x20xf32, 160xf32, 160xf32, 160xf32, 160xf32) + ( + batch_norm__174, + batch_norm__175, + batch_norm__176, + batch_norm__177, + batch_norm__178, + batch_norm__179, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_20, + parameter_367, + parameter_366, + parameter_365, + parameter_364, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_364, parameter_365, parameter_366, parameter_367 + + # pd_op.hardswish: (1x160x20x20xf32) <- (1x160x20x20xf32) + hardswish_29 = paddle._C_ops.hardswish(batch_norm__174) + + # pd_op.nearest_interp: (1x160x40x40xf32) <- (1x160x20x20xf32, None, None, None) + nearest_interp_0 = paddle._C_ops.nearest_interp( + hardswish_29, + None, + None, + None, + "NCHW", + -1, + -1, + -1, + [float("2"), float("2")], + "nearest", + False, + 0, + ) + + # pd_op.full: (1xi32) <- () + full_0 = paddle._C_ops.full( + [1], float("1"), paddle.int32, paddle.core.CPUPlace() + ) + + # pd_op.assign: (1xi32) <- (1xi32) + assign_5 = full_0 + + # pd_op.assign: (1xi32) <- (1xi32) + assign_6 = full_0 + + # pd_op.assign: (1xi32) <- (1xi32) + assign_7 = full_0 + + # pd_op.assign: (1xi32) <- (1xi32) + assign_8 = full_0 + + # pd_op.assign: (1xi32) <- (1xi32) + assign_9 = full_0 + + # pd_op.assign: (1xi32) <- (1xi32) + assign_10 = full_0 + + # builtin.combine: ([1x160x40x40xf32, 1x160x40x40xf32]) <- (1x160x40x40xf32, 1x160x40x40xf32) + combine_0 = [nearest_interp_0, hardswish_28] + + # pd_op.concat: (1x320x40x40xf32) <- ([1x160x40x40xf32, 1x160x40x40xf32], 1xi32) + concat_3 = paddle._C_ops.concat(combine_0, full_0) + del combine_0 + + # pd_op.depthwise_conv2d: (1x320x40x40xf32) <- (1x320x40x40xf32, 320x1x5x5xf32) + depthwise_conv2d_13 = paddle._C_ops.depthwise_conv2d( + concat_3, parameter_363, [1, 1], [2, 2], "EXPLICIT", 320, [1, 1], "NCHW" + ) + del parameter_363 + + # pd_op.batch_norm_: (1x320x40x40xf32, 320xf32, 320xf32, 320xf32, 320xf32, -1xui8) <- (1x320x40x40xf32, 320xf32, 320xf32, 320xf32, 320xf32) + ( + batch_norm__180, + batch_norm__181, + batch_norm__182, + batch_norm__183, + batch_norm__184, + batch_norm__185, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + depthwise_conv2d_13, + parameter_362, + parameter_361, + parameter_360, + parameter_359, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_359, parameter_360, parameter_361, parameter_362 + + # pd_op.hardswish: (1x320x40x40xf32) <- (1x320x40x40xf32) + hardswish_30 = paddle._C_ops.hardswish(batch_norm__180) + + # pd_op.conv2d: (1x320x40x40xf32) <- (1x320x40x40xf32, 320x320x1x1xf32) + conv2d_21 = paddle._C_ops.conv2d( + hardswish_30, parameter_358, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_358 + + # pd_op.batch_norm_: (1x320x40x40xf32, 320xf32, 320xf32, 320xf32, 320xf32, -1xui8) <- (1x320x40x40xf32, 320xf32, 320xf32, 320xf32, 320xf32) + ( + batch_norm__186, + batch_norm__187, + batch_norm__188, + batch_norm__189, + batch_norm__190, + batch_norm__191, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_21, + parameter_357, + parameter_356, + parameter_355, + parameter_354, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_354, parameter_355, parameter_356, parameter_357 + + # pd_op.hardswish: (1x320x40x40xf32) <- (1x320x40x40xf32) + hardswish_31 = paddle._C_ops.hardswish(batch_norm__186) + + # pd_op.depthwise_conv2d: (1x320x40x40xf32) <- (1x320x40x40xf32, 320x1x5x5xf32) + depthwise_conv2d_14 = paddle._C_ops.depthwise_conv2d( + hardswish_31, parameter_353, [1, 1], [2, 2], "EXPLICIT", 320, [1, 1], "NCHW" + ) + del parameter_353 + + # pd_op.batch_norm_: (1x320x40x40xf32, 320xf32, 320xf32, 320xf32, 320xf32, -1xui8) <- (1x320x40x40xf32, 320xf32, 320xf32, 320xf32, 320xf32) + ( + batch_norm__192, + batch_norm__193, + batch_norm__194, + batch_norm__195, + batch_norm__196, + batch_norm__197, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + depthwise_conv2d_14, + parameter_352, + parameter_351, + parameter_350, + parameter_349, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_349, parameter_350, parameter_351, parameter_352 + + # pd_op.hardswish: (1x320x40x40xf32) <- (1x320x40x40xf32) + hardswish_32 = paddle._C_ops.hardswish(batch_norm__192) + + # pd_op.conv2d: (1x160x40x40xf32) <- (1x320x40x40xf32, 160x320x1x1xf32) + conv2d_22 = paddle._C_ops.conv2d( + hardswish_32, parameter_348, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_348 + + # pd_op.batch_norm_: (1x160x40x40xf32, 160xf32, 160xf32, 160xf32, 160xf32, -1xui8) <- (1x160x40x40xf32, 160xf32, 160xf32, 160xf32, 160xf32) + ( + batch_norm__198, + batch_norm__199, + batch_norm__200, + batch_norm__201, + batch_norm__202, + batch_norm__203, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_22, + parameter_347, + parameter_346, + parameter_345, + parameter_344, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_344, parameter_345, parameter_346, parameter_347 + + # pd_op.hardswish: (1x160x40x40xf32) <- (1x160x40x40xf32) + hardswish_33 = paddle._C_ops.hardswish(batch_norm__198) + + # pd_op.nearest_interp: (1x160x80x80xf32) <- (1x160x40x40xf32, None, None, None) + nearest_interp_1 = paddle._C_ops.nearest_interp( + hardswish_33, + None, + None, + None, + "NCHW", + -1, + -1, + -1, + [float("2"), float("2")], + "nearest", + False, + 0, + ) + + # builtin.combine: ([1x160x80x80xf32, 1x160x80x80xf32]) <- (1x160x80x80xf32, 1x160x80x80xf32) + combine_1 = [nearest_interp_1, hardswish_27] + + # pd_op.concat: (1x320x80x80xf32) <- ([1x160x80x80xf32, 1x160x80x80xf32], 1xi32) + concat_4 = paddle._C_ops.concat(combine_1, full_0) + del combine_1 + + # pd_op.depthwise_conv2d: (1x320x80x80xf32) <- (1x320x80x80xf32, 320x1x5x5xf32) + depthwise_conv2d_15 = paddle._C_ops.depthwise_conv2d( + concat_4, parameter_343, [1, 1], [2, 2], "EXPLICIT", 320, [1, 1], "NCHW" + ) + del parameter_343 + + # pd_op.batch_norm_: (1x320x80x80xf32, 320xf32, 320xf32, 320xf32, 320xf32, -1xui8) <- (1x320x80x80xf32, 320xf32, 320xf32, 320xf32, 320xf32) + ( + batch_norm__204, + batch_norm__205, + batch_norm__206, + batch_norm__207, + batch_norm__208, + batch_norm__209, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + depthwise_conv2d_15, + parameter_342, + parameter_341, + parameter_340, + parameter_339, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_339, parameter_340, parameter_341, parameter_342 + + # pd_op.hardswish: (1x320x80x80xf32) <- (1x320x80x80xf32) + hardswish_34 = paddle._C_ops.hardswish(batch_norm__204) + + # pd_op.conv2d: (1x320x80x80xf32) <- (1x320x80x80xf32, 320x320x1x1xf32) + conv2d_23 = paddle._C_ops.conv2d( + hardswish_34, parameter_338, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_338 + + # pd_op.batch_norm_: (1x320x80x80xf32, 320xf32, 320xf32, 320xf32, 320xf32, -1xui8) <- (1x320x80x80xf32, 320xf32, 320xf32, 320xf32, 320xf32) + ( + batch_norm__210, + batch_norm__211, + batch_norm__212, + batch_norm__213, + batch_norm__214, + batch_norm__215, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_23, + parameter_337, + parameter_336, + parameter_335, + parameter_334, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_334, parameter_335, parameter_336, parameter_337 + + # pd_op.hardswish: (1x320x80x80xf32) <- (1x320x80x80xf32) + hardswish_35 = paddle._C_ops.hardswish(batch_norm__210) + + # pd_op.depthwise_conv2d: (1x320x80x80xf32) <- (1x320x80x80xf32, 320x1x5x5xf32) + depthwise_conv2d_16 = paddle._C_ops.depthwise_conv2d( + hardswish_35, parameter_333, [1, 1], [2, 2], "EXPLICIT", 320, [1, 1], "NCHW" + ) + del parameter_333 + + # pd_op.batch_norm_: (1x320x80x80xf32, 320xf32, 320xf32, 320xf32, 320xf32, -1xui8) <- (1x320x80x80xf32, 320xf32, 320xf32, 320xf32, 320xf32) + ( + batch_norm__216, + batch_norm__217, + batch_norm__218, + batch_norm__219, + batch_norm__220, + batch_norm__221, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + depthwise_conv2d_16, + parameter_332, + parameter_331, + parameter_330, + parameter_329, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_329, parameter_330, parameter_331, parameter_332 + + # pd_op.hardswish: (1x320x80x80xf32) <- (1x320x80x80xf32) + hardswish_36 = paddle._C_ops.hardswish(batch_norm__216) + + # pd_op.conv2d: (1x160x80x80xf32) <- (1x320x80x80xf32, 160x320x1x1xf32) + conv2d_24 = paddle._C_ops.conv2d( + hardswish_36, parameter_328, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_328 + + # pd_op.batch_norm_: (1x160x80x80xf32, 160xf32, 160xf32, 160xf32, 160xf32, -1xui8) <- (1x160x80x80xf32, 160xf32, 160xf32, 160xf32, 160xf32) + ( + batch_norm__222, + batch_norm__223, + batch_norm__224, + batch_norm__225, + batch_norm__226, + batch_norm__227, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_24, + parameter_327, + parameter_326, + parameter_325, + parameter_324, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_324, parameter_325, parameter_326, parameter_327 + + # pd_op.hardswish: (1x160x80x80xf32) <- (1x160x80x80xf32) + hardswish_37 = paddle._C_ops.hardswish(batch_norm__222) + + # pd_op.depthwise_conv2d: (1x160x40x40xf32) <- (1x160x80x80xf32, 160x1x5x5xf32) + depthwise_conv2d_17 = paddle._C_ops.depthwise_conv2d( + hardswish_37, parameter_323, [2, 2], [2, 2], "EXPLICIT", 160, [1, 1], "NCHW" + ) + del parameter_323 + + # pd_op.batch_norm_: (1x160x40x40xf32, 160xf32, 160xf32, 160xf32, 160xf32, -1xui8) <- (1x160x40x40xf32, 160xf32, 160xf32, 160xf32, 160xf32) + ( + batch_norm__228, + batch_norm__229, + batch_norm__230, + batch_norm__231, + batch_norm__232, + batch_norm__233, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + depthwise_conv2d_17, + parameter_322, + parameter_321, + parameter_320, + parameter_319, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_319, parameter_320, parameter_321, parameter_322 + + # pd_op.hardswish: (1x160x40x40xf32) <- (1x160x40x40xf32) + hardswish_38 = paddle._C_ops.hardswish(batch_norm__228) + + # pd_op.conv2d: (1x160x40x40xf32) <- (1x160x40x40xf32, 160x160x1x1xf32) + conv2d_25 = paddle._C_ops.conv2d( + hardswish_38, parameter_318, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_318 + + # pd_op.batch_norm_: (1x160x40x40xf32, 160xf32, 160xf32, 160xf32, 160xf32, -1xui8) <- (1x160x40x40xf32, 160xf32, 160xf32, 160xf32, 160xf32) + ( + batch_norm__234, + batch_norm__235, + batch_norm__236, + batch_norm__237, + batch_norm__238, + batch_norm__239, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_25, + parameter_317, + parameter_316, + parameter_315, + parameter_314, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_314, parameter_315, parameter_316, parameter_317 + + # pd_op.hardswish: (1x160x40x40xf32) <- (1x160x40x40xf32) + hardswish_39 = paddle._C_ops.hardswish(batch_norm__234) + + # builtin.combine: ([1x160x40x40xf32, 1x160x40x40xf32]) <- (1x160x40x40xf32, 1x160x40x40xf32) + combine_2 = [hardswish_39, hardswish_33] + + # pd_op.concat: (1x320x40x40xf32) <- ([1x160x40x40xf32, 1x160x40x40xf32], 1xi32) + concat_5 = paddle._C_ops.concat(combine_2, full_0) + del combine_2 + + # pd_op.depthwise_conv2d: (1x320x40x40xf32) <- (1x320x40x40xf32, 320x1x5x5xf32) + depthwise_conv2d_18 = paddle._C_ops.depthwise_conv2d( + concat_5, parameter_313, [1, 1], [2, 2], "EXPLICIT", 320, [1, 1], "NCHW" + ) + del parameter_313 + + # pd_op.batch_norm_: (1x320x40x40xf32, 320xf32, 320xf32, 320xf32, 320xf32, -1xui8) <- (1x320x40x40xf32, 320xf32, 320xf32, 320xf32, 320xf32) + ( + batch_norm__240, + batch_norm__241, + batch_norm__242, + batch_norm__243, + batch_norm__244, + batch_norm__245, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + depthwise_conv2d_18, + parameter_312, + parameter_311, + parameter_310, + parameter_309, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_309, parameter_310, parameter_311, parameter_312 + + # pd_op.hardswish: (1x320x40x40xf32) <- (1x320x40x40xf32) + hardswish_40 = paddle._C_ops.hardswish(batch_norm__240) + + # pd_op.conv2d: (1x320x40x40xf32) <- (1x320x40x40xf32, 320x320x1x1xf32) + conv2d_26 = paddle._C_ops.conv2d( + hardswish_40, parameter_308, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_308 + + # pd_op.batch_norm_: (1x320x40x40xf32, 320xf32, 320xf32, 320xf32, 320xf32, -1xui8) <- (1x320x40x40xf32, 320xf32, 320xf32, 320xf32, 320xf32) + ( + batch_norm__246, + batch_norm__247, + batch_norm__248, + batch_norm__249, + batch_norm__250, + batch_norm__251, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_26, + parameter_307, + parameter_306, + parameter_305, + parameter_304, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_304, parameter_305, parameter_306, parameter_307 + + # pd_op.hardswish: (1x320x40x40xf32) <- (1x320x40x40xf32) + hardswish_41 = paddle._C_ops.hardswish(batch_norm__246) + + # pd_op.depthwise_conv2d: (1x320x40x40xf32) <- (1x320x40x40xf32, 320x1x5x5xf32) + depthwise_conv2d_19 = paddle._C_ops.depthwise_conv2d( + hardswish_41, parameter_303, [1, 1], [2, 2], "EXPLICIT", 320, [1, 1], "NCHW" + ) + del parameter_303 + + # pd_op.batch_norm_: (1x320x40x40xf32, 320xf32, 320xf32, 320xf32, 320xf32, -1xui8) <- (1x320x40x40xf32, 320xf32, 320xf32, 320xf32, 320xf32) + ( + batch_norm__252, + batch_norm__253, + batch_norm__254, + batch_norm__255, + batch_norm__256, + batch_norm__257, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + depthwise_conv2d_19, + parameter_302, + parameter_301, + parameter_300, + parameter_299, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_299, parameter_300, parameter_301, parameter_302 + + # pd_op.hardswish: (1x320x40x40xf32) <- (1x320x40x40xf32) + hardswish_42 = paddle._C_ops.hardswish(batch_norm__252) + + # pd_op.conv2d: (1x160x40x40xf32) <- (1x320x40x40xf32, 160x320x1x1xf32) + conv2d_27 = paddle._C_ops.conv2d( + hardswish_42, parameter_298, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_298 + + # pd_op.batch_norm_: (1x160x40x40xf32, 160xf32, 160xf32, 160xf32, 160xf32, -1xui8) <- (1x160x40x40xf32, 160xf32, 160xf32, 160xf32, 160xf32) + ( + batch_norm__258, + batch_norm__259, + batch_norm__260, + batch_norm__261, + batch_norm__262, + batch_norm__263, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_27, + parameter_297, + parameter_296, + parameter_295, + parameter_294, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_294, parameter_295, parameter_296, parameter_297 + + # pd_op.hardswish: (1x160x40x40xf32) <- (1x160x40x40xf32) + hardswish_43 = paddle._C_ops.hardswish(batch_norm__258) + + # pd_op.depthwise_conv2d: (1x160x20x20xf32) <- (1x160x40x40xf32, 160x1x5x5xf32) + depthwise_conv2d_20 = paddle._C_ops.depthwise_conv2d( + hardswish_43, parameter_293, [2, 2], [2, 2], "EXPLICIT", 160, [1, 1], "NCHW" + ) + del parameter_293 + + # pd_op.batch_norm_: (1x160x20x20xf32, 160xf32, 160xf32, 160xf32, 160xf32, -1xui8) <- (1x160x20x20xf32, 160xf32, 160xf32, 160xf32, 160xf32) + ( + batch_norm__264, + batch_norm__265, + batch_norm__266, + batch_norm__267, + batch_norm__268, + batch_norm__269, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + depthwise_conv2d_20, + parameter_292, + parameter_291, + parameter_290, + parameter_289, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_289, parameter_290, parameter_291, parameter_292 + + # pd_op.hardswish: (1x160x20x20xf32) <- (1x160x20x20xf32) + hardswish_44 = paddle._C_ops.hardswish(batch_norm__264) + + # pd_op.conv2d: (1x160x20x20xf32) <- (1x160x20x20xf32, 160x160x1x1xf32) + conv2d_28 = paddle._C_ops.conv2d( + hardswish_44, parameter_288, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_288 + + # pd_op.batch_norm_: (1x160x20x20xf32, 160xf32, 160xf32, 160xf32, 160xf32, -1xui8) <- (1x160x20x20xf32, 160xf32, 160xf32, 160xf32, 160xf32) + ( + batch_norm__270, + batch_norm__271, + batch_norm__272, + batch_norm__273, + batch_norm__274, + batch_norm__275, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_28, + parameter_287, + parameter_286, + parameter_285, + parameter_284, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_284, parameter_285, parameter_286, parameter_287 + + # pd_op.hardswish: (1x160x20x20xf32) <- (1x160x20x20xf32) + hardswish_45 = paddle._C_ops.hardswish(batch_norm__270) + + # builtin.combine: ([1x160x20x20xf32, 1x160x20x20xf32]) <- (1x160x20x20xf32, 1x160x20x20xf32) + combine_3 = [hardswish_45, hardswish_29] + + # pd_op.concat: (1x320x20x20xf32) <- ([1x160x20x20xf32, 1x160x20x20xf32], 1xi32) + concat_6 = paddle._C_ops.concat(combine_3, full_0) + del combine_3 + + # pd_op.depthwise_conv2d: (1x320x20x20xf32) <- (1x320x20x20xf32, 320x1x5x5xf32) + depthwise_conv2d_21 = paddle._C_ops.depthwise_conv2d( + concat_6, parameter_283, [1, 1], [2, 2], "EXPLICIT", 320, [1, 1], "NCHW" + ) + del parameter_283 + + # pd_op.batch_norm_: (1x320x20x20xf32, 320xf32, 320xf32, 320xf32, 320xf32, -1xui8) <- (1x320x20x20xf32, 320xf32, 320xf32, 320xf32, 320xf32) + ( + batch_norm__276, + batch_norm__277, + batch_norm__278, + batch_norm__279, + batch_norm__280, + batch_norm__281, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + depthwise_conv2d_21, + parameter_282, + parameter_281, + parameter_280, + parameter_279, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_279, parameter_280, parameter_281, parameter_282 + + # pd_op.hardswish: (1x320x20x20xf32) <- (1x320x20x20xf32) + hardswish_46 = paddle._C_ops.hardswish(batch_norm__276) + + # pd_op.conv2d: (1x320x20x20xf32) <- (1x320x20x20xf32, 320x320x1x1xf32) + conv2d_29 = paddle._C_ops.conv2d( + hardswish_46, parameter_278, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_278 + + # pd_op.batch_norm_: (1x320x20x20xf32, 320xf32, 320xf32, 320xf32, 320xf32, -1xui8) <- (1x320x20x20xf32, 320xf32, 320xf32, 320xf32, 320xf32) + ( + batch_norm__282, + batch_norm__283, + batch_norm__284, + batch_norm__285, + batch_norm__286, + batch_norm__287, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_29, + parameter_277, + parameter_276, + parameter_275, + parameter_274, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_274, parameter_275, parameter_276, parameter_277 + + # pd_op.hardswish: (1x320x20x20xf32) <- (1x320x20x20xf32) + hardswish_47 = paddle._C_ops.hardswish(batch_norm__282) + + # pd_op.depthwise_conv2d: (1x320x20x20xf32) <- (1x320x20x20xf32, 320x1x5x5xf32) + depthwise_conv2d_22 = paddle._C_ops.depthwise_conv2d( + hardswish_47, parameter_273, [1, 1], [2, 2], "EXPLICIT", 320, [1, 1], "NCHW" + ) + del parameter_273 + + # pd_op.batch_norm_: (1x320x20x20xf32, 320xf32, 320xf32, 320xf32, 320xf32, -1xui8) <- (1x320x20x20xf32, 320xf32, 320xf32, 320xf32, 320xf32) + ( + batch_norm__288, + batch_norm__289, + batch_norm__290, + batch_norm__291, + batch_norm__292, + batch_norm__293, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + depthwise_conv2d_22, + parameter_272, + parameter_271, + parameter_270, + parameter_269, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_269, parameter_270, parameter_271, parameter_272 + + # pd_op.hardswish: (1x320x20x20xf32) <- (1x320x20x20xf32) + hardswish_48 = paddle._C_ops.hardswish(batch_norm__288) + + # pd_op.conv2d: (1x160x20x20xf32) <- (1x320x20x20xf32, 160x320x1x1xf32) + conv2d_30 = paddle._C_ops.conv2d( + hardswish_48, parameter_268, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_268 + + # pd_op.batch_norm_: (1x160x20x20xf32, 160xf32, 160xf32, 160xf32, 160xf32, -1xui8) <- (1x160x20x20xf32, 160xf32, 160xf32, 160xf32, 160xf32) + ( + batch_norm__294, + batch_norm__295, + batch_norm__296, + batch_norm__297, + batch_norm__298, + batch_norm__299, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_30, + parameter_267, + parameter_266, + parameter_265, + parameter_264, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_264, parameter_265, parameter_266, parameter_267 + + # pd_op.hardswish: (1x160x20x20xf32) <- (1x160x20x20xf32) + hardswish_49 = paddle._C_ops.hardswish(batch_norm__294) + + # pd_op.depthwise_conv2d: (1x160x10x10xf32) <- (1x160x20x20xf32, 160x1x5x5xf32) + depthwise_conv2d_23 = paddle._C_ops.depthwise_conv2d( + hardswish_29, parameter_263, [2, 2], [2, 2], "EXPLICIT", 160, [1, 1], "NCHW" + ) + del parameter_263 + + # pd_op.batch_norm_: (1x160x10x10xf32, 160xf32, 160xf32, 160xf32, 160xf32, -1xui8) <- (1x160x10x10xf32, 160xf32, 160xf32, 160xf32, 160xf32) + ( + batch_norm__300, + batch_norm__301, + batch_norm__302, + batch_norm__303, + batch_norm__304, + batch_norm__305, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + depthwise_conv2d_23, + parameter_262, + parameter_261, + parameter_260, + parameter_259, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_259, parameter_260, parameter_261, parameter_262 + + # pd_op.hardswish: (1x160x10x10xf32) <- (1x160x10x10xf32) + hardswish_50 = paddle._C_ops.hardswish(batch_norm__300) + + # pd_op.conv2d: (1x160x10x10xf32) <- (1x160x10x10xf32, 160x160x1x1xf32) + conv2d_31 = paddle._C_ops.conv2d( + hardswish_50, parameter_258, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_258 + + # pd_op.batch_norm_: (1x160x10x10xf32, 160xf32, 160xf32, 160xf32, 160xf32, -1xui8) <- (1x160x10x10xf32, 160xf32, 160xf32, 160xf32, 160xf32) + ( + batch_norm__306, + batch_norm__307, + batch_norm__308, + batch_norm__309, + batch_norm__310, + batch_norm__311, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_31, + parameter_257, + parameter_256, + parameter_255, + parameter_254, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_254, parameter_255, parameter_256, parameter_257 + + # pd_op.hardswish: (1x160x10x10xf32) <- (1x160x10x10xf32) + hardswish_51 = paddle._C_ops.hardswish(batch_norm__306) + + # pd_op.depthwise_conv2d: (1x160x10x10xf32) <- (1x160x20x20xf32, 160x1x5x5xf32) + depthwise_conv2d_24 = paddle._C_ops.depthwise_conv2d( + hardswish_49, parameter_253, [2, 2], [2, 2], "EXPLICIT", 160, [1, 1], "NCHW" + ) + del parameter_253 + + # pd_op.batch_norm_: (1x160x10x10xf32, 160xf32, 160xf32, 160xf32, 160xf32, -1xui8) <- (1x160x10x10xf32, 160xf32, 160xf32, 160xf32, 160xf32) + ( + batch_norm__312, + batch_norm__313, + batch_norm__314, + batch_norm__315, + batch_norm__316, + batch_norm__317, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + depthwise_conv2d_24, + parameter_252, + parameter_251, + parameter_250, + parameter_249, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_249, parameter_250, parameter_251, parameter_252 + + # pd_op.hardswish: (1x160x10x10xf32) <- (1x160x10x10xf32) + hardswish_52 = paddle._C_ops.hardswish(batch_norm__312) + + # pd_op.conv2d: (1x160x10x10xf32) <- (1x160x10x10xf32, 160x160x1x1xf32) + conv2d_32 = paddle._C_ops.conv2d( + hardswish_52, parameter_248, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_248 + + # pd_op.batch_norm_: (1x160x10x10xf32, 160xf32, 160xf32, 160xf32, 160xf32, -1xui8) <- (1x160x10x10xf32, 160xf32, 160xf32, 160xf32, 160xf32) + ( + batch_norm__318, + batch_norm__319, + batch_norm__320, + batch_norm__321, + batch_norm__322, + batch_norm__323, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_32, + parameter_247, + parameter_246, + parameter_245, + parameter_244, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_244, parameter_245, parameter_246, parameter_247 + + # pd_op.hardswish: (1x160x10x10xf32) <- (1x160x10x10xf32) + hardswish_53 = paddle._C_ops.hardswish(batch_norm__318) + + # pd_op.add: (1x160x10x10xf32) <- (1x160x10x10xf32, 1x160x10x10xf32) + add_4 = paddle._C_ops.add(hardswish_51, hardswish_53) + + # pd_op.shape64: (4xi64) <- (1x160x80x80xf32) + shape64_0 = paddle._C_ops.shape64(hardswish_37) + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_2 = [0] + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_3 = [1] + + # pd_op.slice: (xi64) <- (4xi64, 1xi64, 1xi64) + slice_0 = paddle._C_ops.slice( + shape64_0, [0], full_int_array_2, full_int_array_3, [1], [0] + ) + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_4 = [2] + + # pd_op.slice: (xi64) <- (4xi64, 1xi64, 1xi64) + slice_1 = paddle._C_ops.slice( + shape64_0, [0], full_int_array_3, full_int_array_4, [1], [0] + ) + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_5 = [3] + + # pd_op.slice: (xi64) <- (4xi64, 1xi64, 1xi64) + slice_2 = paddle._C_ops.slice( + shape64_0, [0], full_int_array_4, full_int_array_5, [1], [0] + ) + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_6 = [4] + + # pd_op.slice: (xi64) <- (4xi64, 1xi64, 1xi64) + slice_3 = paddle._C_ops.slice( + shape64_0, [0], full_int_array_5, full_int_array_6, [1], [0] + ) + del shape64_0 + + # pd_op.depthwise_conv2d: (1x160x80x80xf32) <- (1x160x80x80xf32, 160x1x5x5xf32) + depthwise_conv2d_25 = paddle._C_ops.depthwise_conv2d( + hardswish_37, parameter_243, [1, 1], [2, 2], "EXPLICIT", 160, [1, 1], "NCHW" + ) + del parameter_243 + + # pd_op.batch_norm_: (1x160x80x80xf32, 160xf32, 160xf32, 160xf32, 160xf32, -1xui8) <- (1x160x80x80xf32, 160xf32, 160xf32, 160xf32, 160xf32) + ( + batch_norm__324, + batch_norm__325, + batch_norm__326, + batch_norm__327, + batch_norm__328, + batch_norm__329, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + depthwise_conv2d_25, + parameter_242, + parameter_241, + parameter_240, + parameter_239, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_239, parameter_240, parameter_241, parameter_242 + + # pd_op.hardswish: (1x160x80x80xf32) <- (1x160x80x80xf32) + hardswish_54 = paddle._C_ops.hardswish(batch_norm__324) + + # pd_op.conv2d: (1x160x80x80xf32) <- (1x160x80x80xf32, 160x160x1x1xf32) + conv2d_33 = paddle._C_ops.conv2d( + hardswish_54, parameter_238, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_238 + + # pd_op.batch_norm_: (1x160x80x80xf32, 160xf32, 160xf32, 160xf32, 160xf32, -1xui8) <- (1x160x80x80xf32, 160xf32, 160xf32, 160xf32, 160xf32) + ( + batch_norm__330, + batch_norm__331, + batch_norm__332, + batch_norm__333, + batch_norm__334, + batch_norm__335, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_33, + parameter_237, + parameter_236, + parameter_235, + parameter_234, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_234, parameter_235, parameter_236, parameter_237 + + # pd_op.hardswish: (1x160x80x80xf32) <- (1x160x80x80xf32) + hardswish_55 = paddle._C_ops.hardswish(batch_norm__330) + + # pd_op.depthwise_conv2d: (1x160x80x80xf32) <- (1x160x80x80xf32, 160x1x5x5xf32) + depthwise_conv2d_26 = paddle._C_ops.depthwise_conv2d( + hardswish_55, parameter_233, [1, 1], [2, 2], "EXPLICIT", 160, [1, 1], "NCHW" + ) + del parameter_233 + + # pd_op.batch_norm_: (1x160x80x80xf32, 160xf32, 160xf32, 160xf32, 160xf32, -1xui8) <- (1x160x80x80xf32, 160xf32, 160xf32, 160xf32, 160xf32) + ( + batch_norm__336, + batch_norm__337, + batch_norm__338, + batch_norm__339, + batch_norm__340, + batch_norm__341, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + depthwise_conv2d_26, + parameter_232, + parameter_231, + parameter_230, + parameter_229, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_229, parameter_230, parameter_231, parameter_232 + + # pd_op.hardswish: (1x160x80x80xf32) <- (1x160x80x80xf32) + hardswish_56 = paddle._C_ops.hardswish(batch_norm__336) + + # pd_op.conv2d: (1x160x80x80xf32) <- (1x160x80x80xf32, 160x160x1x1xf32) + conv2d_34 = paddle._C_ops.conv2d( + hardswish_56, parameter_228, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_228 + + # pd_op.batch_norm_: (1x160x80x80xf32, 160xf32, 160xf32, 160xf32, 160xf32, -1xui8) <- (1x160x80x80xf32, 160xf32, 160xf32, 160xf32, 160xf32) + ( + batch_norm__342, + batch_norm__343, + batch_norm__344, + batch_norm__345, + batch_norm__346, + batch_norm__347, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_34, + parameter_227, + parameter_226, + parameter_225, + parameter_224, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_224, parameter_225, parameter_226, parameter_227 + + # pd_op.hardswish: (1x160x80x80xf32) <- (1x160x80x80xf32) + hardswish_57 = paddle._C_ops.hardswish(batch_norm__342) + + # pd_op.depthwise_conv2d: (1x160x80x80xf32) <- (1x160x80x80xf32, 160x1x5x5xf32) + depthwise_conv2d_27 = paddle._C_ops.depthwise_conv2d( + hardswish_57, parameter_223, [1, 1], [2, 2], "EXPLICIT", 160, [1, 1], "NCHW" + ) + del parameter_223 + + # pd_op.batch_norm_: (1x160x80x80xf32, 160xf32, 160xf32, 160xf32, 160xf32, -1xui8) <- (1x160x80x80xf32, 160xf32, 160xf32, 160xf32, 160xf32) + ( + batch_norm__348, + batch_norm__349, + batch_norm__350, + batch_norm__351, + batch_norm__352, + batch_norm__353, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + depthwise_conv2d_27, + parameter_222, + parameter_221, + parameter_220, + parameter_219, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_219, parameter_220, parameter_221, parameter_222 + + # pd_op.hardswish: (1x160x80x80xf32) <- (1x160x80x80xf32) + hardswish_58 = paddle._C_ops.hardswish(batch_norm__348) + + # pd_op.conv2d: (1x160x80x80xf32) <- (1x160x80x80xf32, 160x160x1x1xf32) + conv2d_35 = paddle._C_ops.conv2d( + hardswish_58, parameter_218, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_218 + + # pd_op.batch_norm_: (1x160x80x80xf32, 160xf32, 160xf32, 160xf32, 160xf32, -1xui8) <- (1x160x80x80xf32, 160xf32, 160xf32, 160xf32, 160xf32) + ( + batch_norm__354, + batch_norm__355, + batch_norm__356, + batch_norm__357, + batch_norm__358, + batch_norm__359, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_35, + parameter_217, + parameter_216, + parameter_215, + parameter_214, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_214, parameter_215, parameter_216, parameter_217 + + # pd_op.hardswish: (1x160x80x80xf32) <- (1x160x80x80xf32) + hardswish_59 = paddle._C_ops.hardswish(batch_norm__354) + + # pd_op.depthwise_conv2d: (1x160x80x80xf32) <- (1x160x80x80xf32, 160x1x5x5xf32) + depthwise_conv2d_28 = paddle._C_ops.depthwise_conv2d( + hardswish_59, parameter_213, [1, 1], [2, 2], "EXPLICIT", 160, [1, 1], "NCHW" + ) + del parameter_213 + + # pd_op.batch_norm_: (1x160x80x80xf32, 160xf32, 160xf32, 160xf32, 160xf32, -1xui8) <- (1x160x80x80xf32, 160xf32, 160xf32, 160xf32, 160xf32) + ( + batch_norm__360, + batch_norm__361, + batch_norm__362, + batch_norm__363, + batch_norm__364, + batch_norm__365, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + depthwise_conv2d_28, + parameter_212, + parameter_211, + parameter_210, + parameter_209, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_209, parameter_210, parameter_211, parameter_212 + + # pd_op.hardswish: (1x160x80x80xf32) <- (1x160x80x80xf32) + hardswish_60 = paddle._C_ops.hardswish(batch_norm__360) + + # pd_op.conv2d: (1x160x80x80xf32) <- (1x160x80x80xf32, 160x160x1x1xf32) + conv2d_36 = paddle._C_ops.conv2d( + hardswish_60, parameter_208, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_208 + + # pd_op.batch_norm_: (1x160x80x80xf32, 160xf32, 160xf32, 160xf32, 160xf32, -1xui8) <- (1x160x80x80xf32, 160xf32, 160xf32, 160xf32, 160xf32) + ( + batch_norm__366, + batch_norm__367, + batch_norm__368, + batch_norm__369, + batch_norm__370, + batch_norm__371, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_36, + parameter_207, + parameter_206, + parameter_205, + parameter_204, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_204, parameter_205, parameter_206, parameter_207 + + # pd_op.hardswish: (1x160x80x80xf32) <- (1x160x80x80xf32) + hardswish_61 = paddle._C_ops.hardswish(batch_norm__366) + + # pd_op.pool2d: (1x160x1x1xf32) <- (1x160x80x80xf32, 2xi64) + pool2d_2 = paddle._C_ops.pool2d( + hardswish_61, + full_int_array_0, + [1, 1], + [0, 0], + False, + True, + "NCHW", + "avg", + False, + True, + "EXPLICIT", + ) + + # pd_op.conv2d: (1x160x1x1xf32) <- (1x160x1x1xf32, 160x160x1x1xf32) + conv2d_37 = paddle._C_ops.conv2d( + pool2d_2, parameter_203, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_203 + + # pd_op.reshape: (1x160x1x1xf32) <- (160xf32, 4xi64) + reshape_4 = paddle._C_ops.reshape(parameter_202, full_int_array_1) + del parameter_202 + + # pd_op.add: (1x160x1x1xf32) <- (1x160x1x1xf32, 1x160x1x1xf32) + add_5 = paddle._C_ops.add(conv2d_37, reshape_4) + + # pd_op.sigmoid: (1x160x1x1xf32) <- (1x160x1x1xf32) + sigmoid_0 = paddle._C_ops.sigmoid(add_5) + del add_5 + + # pd_op.multiply: (1x160x80x80xf32) <- (1x160x80x80xf32, 1x160x1x1xf32) + multiply_2 = paddle._C_ops.multiply(hardswish_61, sigmoid_0) + + # pd_op.conv2d: (1x160x80x80xf32) <- (1x160x80x80xf32, 160x160x1x1xf32) + conv2d_38 = paddle._C_ops.conv2d( + multiply_2, parameter_201, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_201 + + # pd_op.batch_norm_: (1x160x80x80xf32, 160xf32, 160xf32, 160xf32, 160xf32, -1xui8) <- (1x160x80x80xf32, 160xf32, 160xf32, 160xf32, 160xf32) + ( + batch_norm__372, + batch_norm__373, + batch_norm__374, + batch_norm__375, + batch_norm__376, + batch_norm__377, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_38, + parameter_200, + parameter_199, + parameter_198, + parameter_197, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_197, parameter_198, parameter_199, parameter_200 + + # pd_op.hardswish: (1x160x80x80xf32) <- (1x160x80x80xf32) + hardswish_62 = paddle._C_ops.hardswish(batch_norm__372) + + # pd_op.conv2d: (1x11x80x80xf32) <- (1x160x80x80xf32, 11x160x1x1xf32) + conv2d_39 = paddle._C_ops.conv2d( + hardswish_62, parameter_196, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_196 + + # pd_op.reshape: (1x11x1x1xf32) <- (11xf32, 4xi64) + reshape_5 = paddle._C_ops.reshape(parameter_195, full_int_array_1) + del parameter_195 + + # pd_op.add: (1x11x80x80xf32) <- (1x11x80x80xf32, 1x11x1x1xf32) + add_6 = paddle._C_ops.add(conv2d_39, reshape_5) + + # pd_op.conv2d: (1x32x80x80xf32) <- (1x160x80x80xf32, 32x160x1x1xf32) + conv2d_40 = paddle._C_ops.conv2d( + hardswish_62, parameter_194, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_194 + + # pd_op.reshape: (1x32x1x1xf32) <- (32xf32, 4xi64) + reshape_6 = paddle._C_ops.reshape(parameter_193, full_int_array_1) + del parameter_193 + + # pd_op.add: (1x32x80x80xf32) <- (1x32x80x80xf32, 1x32x1x1xf32) + add_7 = paddle._C_ops.add(conv2d_40, reshape_6) + + # pd_op.conv2d: (1x1x80x80xf32) <- (1x160x80x80xf32, 1x160x5x5xf32) + conv2d_41 = paddle._C_ops.conv2d( + hardswish_61, parameter_192, [1, 1], [2, 2], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_192 + + # pd_op.batch_norm_: (1x1x80x80xf32, 1xf32, 1xf32, 1xf32, 1xf32, -1xui8) <- (1x1x80x80xf32, 1xf32, 1xf32, 1xf32, 1xf32) + ( + batch_norm__378, + batch_norm__379, + batch_norm__380, + batch_norm__381, + batch_norm__382, + batch_norm__383, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_41, + parameter_191, + parameter_190, + parameter_189, + parameter_188, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_188, parameter_189, parameter_190, parameter_191 + + # pd_op.hardswish: (1x1x80x80xf32) <- (1x1x80x80xf32) + hardswish_63 = paddle._C_ops.hardswish(batch_norm__378) + + # pd_op.conv2d: (1x1x80x80xf32) <- (1x1x80x80xf32, 1x1x1x1xf32) + conv2d_42 = paddle._C_ops.conv2d( + hardswish_63, parameter_187, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_187 + + # pd_op.batch_norm_: (1x1x80x80xf32, 1xf32, 1xf32, 1xf32, 1xf32, -1xui8) <- (1x1x80x80xf32, 1xf32, 1xf32, 1xf32, 1xf32) + ( + batch_norm__384, + batch_norm__385, + batch_norm__386, + batch_norm__387, + batch_norm__388, + batch_norm__389, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_42, + parameter_186, + parameter_185, + parameter_184, + parameter_183, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_183, parameter_184, parameter_185, parameter_186 + + # pd_op.sigmoid: (1x1x80x80xf32) <- (1x1x80x80xf32) + sigmoid_1 = paddle._C_ops.sigmoid(batch_norm__384) + del batch_norm__384 + + # pd_op.sigmoid: (1x11x80x80xf32) <- (1x11x80x80xf32) + sigmoid_2 = paddle._C_ops.sigmoid(add_6) + del add_6 + + # pd_op.multiply: (1x11x80x80xf32) <- (1x11x80x80xf32, 1x1x80x80xf32) + multiply_3 = paddle._C_ops.multiply(sigmoid_2, sigmoid_1) + + # pd_op.full: (1xf32) <- () + full_1 = paddle._C_ops.full( + [1], float("1"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.assign: (1xf32) <- (1xf32) + assign_11 = full_1 + + # pd_op.assign: (1xf32) <- (1xf32) + assign_12 = full_1 + + # pd_op.assign: (1xf32) <- (1xf32) + assign_13 = full_1 + + # pd_op.scale: (1x11x80x80xf32) <- (1x11x80x80xf32, 1xf32) + scale_0 = paddle._C_ops.scale(multiply_3, full_1, float("1e-09"), True) + del multiply_3 + + # pd_op.sqrt: (1x11x80x80xf32) <- (1x11x80x80xf32) + sqrt_0 = paddle._C_ops.sqrt(scale_0) + del scale_0 + + # pd_op.transpose: (1x80x80x11xf32) <- (1x11x80x80xf32) + transpose_0 = paddle._C_ops.transpose(sqrt_0, [0, 2, 3, 1]) + + # pd_op.transpose: (1x80x80x32xf32) <- (1x32x80x80xf32) + transpose_1 = paddle._C_ops.transpose(add_7, [0, 2, 3, 1]) + + # pd_op.full: (1xf64) <- () + full_2 = paddle._C_ops.full( + [1], float("0"), paddle.float64, paddle.core.CPUPlace() + ) + + # pd_op.full: (1xf64) <- () + full_3 = paddle._C_ops.full( + [1], float("80"), paddle.float64, paddle.core.CPUPlace() + ) + + # pd_op.full: (1xf64) <- () + full_4 = paddle._C_ops.full( + [1], float("1"), paddle.float64, paddle.core.CPUPlace() + ) + + # pd_op.arange: (80xf32) <- (1xf64, 1xf64, 1xf64) + arange_0 = paddle.arange(full_2, full_3, full_4, dtype="float32") + del full_3 + + # pd_op.scale: (80xf32) <- (80xf32, 1xf32) + scale_1 = paddle._C_ops.scale(arange_0, full_1, float("0.5"), True) + del arange_0 + + # pd_op.full: (1xf32) <- () + full_5 = paddle._C_ops.full( + [1], float("8"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.assign: (1xf32) <- (1xf32) + assign_14 = full_5 + + # pd_op.scale: (80xf32) <- (80xf32, 1xf32) + scale_2 = paddle._C_ops.scale(scale_1, full_5, float("0"), True) + del scale_1 + + # builtin.combine: ([80xf32, 80xf32]) <- (80xf32, 80xf32) + combine_4 = [scale_2, scale_2] + del scale_2 + + # pd_op.meshgrid: ([80x80xf32, 80x80xf32]) <- ([80xf32, 80xf32]) + meshgrid_0 = paddle._C_ops.meshgrid(combine_4) + del combine_4 + + # builtin.split: (80x80xf32, 80x80xf32) <- ([80x80xf32, 80x80xf32]) + ( + split_0, + split_1, + ) = meshgrid_0 + del meshgrid_0 + + # pd_op.flatten: (6400xf32) <- (80x80xf32) + flatten_0 = paddle._C_ops.flatten(split_0, 0, 1) + del split_0 + + # pd_op.flatten: (6400xf32) <- (80x80xf32) + flatten_1 = paddle._C_ops.flatten(split_1, 0, 1) + del split_1 + + # builtin.combine: ([6400xf32, 6400xf32]) <- (6400xf32, 6400xf32) + combine_5 = [flatten_1, flatten_0] + del flatten_0, flatten_1 + + # pd_op.stack: (6400x2xf32) <- ([6400xf32, 6400xf32]) + stack_0 = paddle._C_ops.stack(combine_5, -1) + del combine_5 + + # pd_op.full_int_array: (3xi64) <- () + full_int_array_7 = [1, -1, 11] + + # pd_op.reshape: (1x6400x11xf32) <- (1x80x80x11xf32, 3xi64) + reshape_7 = paddle._C_ops.reshape(transpose_0, full_int_array_7) + del transpose_0 + + # pd_op.full_int_array: (2xi64) <- () + full_int_array_8 = [-1, 8] + + # pd_op.reshape: (25600x8xf32) <- (1x80x80x32xf32, 2xi64) + reshape_8 = paddle._C_ops.reshape(transpose_1, full_int_array_8) + + # pd_op.softmax: (25600x8xf32) <- (25600x8xf32) + softmax_0 = paddle._C_ops.softmax(reshape_8, 1) + del reshape_8 + + # pd_op.matmul: (25600xf32) <- (25600x8xf32, 8xf32) + matmul_0 = paddle._C_ops.matmul(softmax_0, data_1, False, False) + + # pd_op.full_int_array: (2xi64) <- () + full_int_array_9 = [-1, 4] + + # pd_op.reshape: (6400x4xf32) <- (25600xf32, 2xi64) + reshape_9 = paddle._C_ops.reshape(matmul_0, full_int_array_9) + + # pd_op.scale: (6400x4xf32) <- (6400x4xf32, 1xf32) + scale_3 = paddle._C_ops.scale(reshape_9, full_5, float("0"), True) + del full_5, reshape_9 + + # pd_op.full_int_array: (3xi64) <- () + full_int_array_10 = [1, 6400, 4] + + # pd_op.reshape: (1x6400x4xf32) <- (6400x4xf32, 3xi64) + reshape_10 = paddle._C_ops.reshape(scale_3, full_int_array_10) + del full_int_array_10 + + # pd_op.full: (1xi32) <- () + full_6 = paddle._C_ops.full( + [1], float("2"), paddle.int32, paddle.core.CPUPlace() + ) + + # pd_op.assign: (1xi32) <- (1xi32) + assign_15 = full_6 + + # pd_op.assign: (1xi32) <- (1xi32) + assign_16 = full_6 + + # pd_op.assign: (1xi32) <- (1xi32) + assign_17 = full_6 + + # pd_op.split_with_num: ([1x6400x2xf32, 1x6400x2xf32]) <- (1x6400x4xf32, 1xi32) + split_with_num_0 = paddle._C_ops.split_with_num(reshape_10, 2, full_6) + del reshape_10 + + # builtin.split: (1x6400x2xf32, 1x6400x2xf32) <- ([1x6400x2xf32, 1x6400x2xf32]) + ( + split_2, + split_3, + ) = split_with_num_0 + del split_with_num_0 + + # pd_op.full: (1xf32) <- () + full_7 = paddle._C_ops.full( + [1], float("-1"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.assign: (1xf32) <- (1xf32) + assign_18 = full_7 + + # pd_op.assign: (1xf32) <- (1xf32) + assign_19 = full_7 + + # pd_op.assign: (1xf32) <- (1xf32) + assign_20 = full_7 + + # pd_op.scale: (1x6400x2xf32) <- (1x6400x2xf32, 1xf32) + scale_4 = paddle._C_ops.scale(split_2, full_7, float("0"), True) + del split_2 + + # pd_op.add: (1x6400x2xf32) <- (1x6400x2xf32, 6400x2xf32) + add_8 = paddle._C_ops.add(scale_4, stack_0) + + # pd_op.add: (1x6400x2xf32) <- (1x6400x2xf32, 6400x2xf32) + add_9 = paddle._C_ops.add(split_3, stack_0) + + # pd_op.full: (1xi32) <- () + full_8 = paddle._C_ops.full( + [1], float("-1"), paddle.int32, paddle.core.CPUPlace() + ) + + # pd_op.assign: (1xi32) <- (1xi32) + assign_21 = full_8 + + # pd_op.assign: (1xi32) <- (1xi32) + assign_22 = full_8 + + # pd_op.assign: (1xi32) <- (1xi32) + assign_23 = full_8 + + # builtin.combine: ([1x6400x2xf32, 1x6400x2xf32]) <- (1x6400x2xf32, 1x6400x2xf32) + combine_6 = [add_8, add_9] + + # pd_op.concat: (1x6400x4xf32) <- ([1x6400x2xf32, 1x6400x2xf32], 1xi32) + concat_7 = paddle._C_ops.concat(combine_6, full_8) + del combine_6 + + # pd_op.flatten: (1x11x6400xf32) <- (1x11x80x80xf32) + flatten_2 = paddle._C_ops.flatten(sqrt_0, 2, 3) + + # pd_op.transpose: (1x6400x11xf32) <- (1x11x6400xf32) + transpose_2 = paddle._C_ops.transpose(flatten_2, [0, 2, 1]) + del flatten_2 + + # pd_op.flatten: (1x32x6400xf32) <- (1x32x80x80xf32) + flatten_3 = paddle._C_ops.flatten(add_7, 2, 3) + + # pd_op.transpose: (1x6400x32xf32) <- (1x32x6400xf32) + transpose_3 = paddle._C_ops.transpose(flatten_3, [0, 2, 1]) + del flatten_3 + + # pd_op.full: (1xf32) <- () + full_9 = paddle._C_ops.full( + [1], float("0.125"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.scale: (1x6400x4xf32) <- (1x6400x4xf32, 1xf32) + scale_5 = paddle._C_ops.scale(concat_7, full_9, float("0"), True) + del concat_7 + + # pd_op.shape64: (4xi64) <- (1x160x40x40xf32) + shape64_1 = paddle._C_ops.shape64(hardswish_43) + + # pd_op.slice: (xi64) <- (4xi64, 1xi64, 1xi64) + slice_4 = paddle._C_ops.slice( + shape64_1, [0], full_int_array_2, full_int_array_3, [1], [0] + ) + + # pd_op.slice: (xi64) <- (4xi64, 1xi64, 1xi64) + slice_5 = paddle._C_ops.slice( + shape64_1, [0], full_int_array_3, full_int_array_4, [1], [0] + ) + + # pd_op.slice: (xi64) <- (4xi64, 1xi64, 1xi64) + slice_6 = paddle._C_ops.slice( + shape64_1, [0], full_int_array_4, full_int_array_5, [1], [0] + ) + + # pd_op.slice: (xi64) <- (4xi64, 1xi64, 1xi64) + slice_7 = paddle._C_ops.slice( + shape64_1, [0], full_int_array_5, full_int_array_6, [1], [0] + ) + del shape64_1 + + # pd_op.depthwise_conv2d: (1x160x40x40xf32) <- (1x160x40x40xf32, 160x1x5x5xf32) + depthwise_conv2d_29 = paddle._C_ops.depthwise_conv2d( + hardswish_43, parameter_182, [1, 1], [2, 2], "EXPLICIT", 160, [1, 1], "NCHW" + ) + del parameter_182 + + # pd_op.batch_norm_: (1x160x40x40xf32, 160xf32, 160xf32, 160xf32, 160xf32, -1xui8) <- (1x160x40x40xf32, 160xf32, 160xf32, 160xf32, 160xf32) + ( + batch_norm__390, + batch_norm__391, + batch_norm__392, + batch_norm__393, + batch_norm__394, + batch_norm__395, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + depthwise_conv2d_29, + parameter_181, + parameter_180, + parameter_179, + parameter_178, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_178, parameter_179, parameter_180, parameter_181 + + # pd_op.hardswish: (1x160x40x40xf32) <- (1x160x40x40xf32) + hardswish_64 = paddle._C_ops.hardswish(batch_norm__390) + + # pd_op.conv2d: (1x160x40x40xf32) <- (1x160x40x40xf32, 160x160x1x1xf32) + conv2d_43 = paddle._C_ops.conv2d( + hardswish_64, parameter_177, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_177 + + # pd_op.batch_norm_: (1x160x40x40xf32, 160xf32, 160xf32, 160xf32, 160xf32, -1xui8) <- (1x160x40x40xf32, 160xf32, 160xf32, 160xf32, 160xf32) + ( + batch_norm__396, + batch_norm__397, + batch_norm__398, + batch_norm__399, + batch_norm__400, + batch_norm__401, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_43, + parameter_176, + parameter_175, + parameter_174, + parameter_173, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_173, parameter_174, parameter_175, parameter_176 + + # pd_op.hardswish: (1x160x40x40xf32) <- (1x160x40x40xf32) + hardswish_65 = paddle._C_ops.hardswish(batch_norm__396) + + # pd_op.depthwise_conv2d: (1x160x40x40xf32) <- (1x160x40x40xf32, 160x1x5x5xf32) + depthwise_conv2d_30 = paddle._C_ops.depthwise_conv2d( + hardswish_65, parameter_172, [1, 1], [2, 2], "EXPLICIT", 160, [1, 1], "NCHW" + ) + del parameter_172 + + # pd_op.batch_norm_: (1x160x40x40xf32, 160xf32, 160xf32, 160xf32, 160xf32, -1xui8) <- (1x160x40x40xf32, 160xf32, 160xf32, 160xf32, 160xf32) + ( + batch_norm__402, + batch_norm__403, + batch_norm__404, + batch_norm__405, + batch_norm__406, + batch_norm__407, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + depthwise_conv2d_30, + parameter_171, + parameter_170, + parameter_169, + parameter_168, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_168, parameter_169, parameter_170, parameter_171 + + # pd_op.hardswish: (1x160x40x40xf32) <- (1x160x40x40xf32) + hardswish_66 = paddle._C_ops.hardswish(batch_norm__402) + + # pd_op.conv2d: (1x160x40x40xf32) <- (1x160x40x40xf32, 160x160x1x1xf32) + conv2d_44 = paddle._C_ops.conv2d( + hardswish_66, parameter_167, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_167 + + # pd_op.batch_norm_: (1x160x40x40xf32, 160xf32, 160xf32, 160xf32, 160xf32, -1xui8) <- (1x160x40x40xf32, 160xf32, 160xf32, 160xf32, 160xf32) + ( + batch_norm__408, + batch_norm__409, + batch_norm__410, + batch_norm__411, + batch_norm__412, + batch_norm__413, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_44, + parameter_166, + parameter_165, + parameter_164, + parameter_163, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_163, parameter_164, parameter_165, parameter_166 + + # pd_op.hardswish: (1x160x40x40xf32) <- (1x160x40x40xf32) + hardswish_67 = paddle._C_ops.hardswish(batch_norm__408) + + # pd_op.depthwise_conv2d: (1x160x40x40xf32) <- (1x160x40x40xf32, 160x1x5x5xf32) + depthwise_conv2d_31 = paddle._C_ops.depthwise_conv2d( + hardswish_67, parameter_162, [1, 1], [2, 2], "EXPLICIT", 160, [1, 1], "NCHW" + ) + del parameter_162 + + # pd_op.batch_norm_: (1x160x40x40xf32, 160xf32, 160xf32, 160xf32, 160xf32, -1xui8) <- (1x160x40x40xf32, 160xf32, 160xf32, 160xf32, 160xf32) + ( + batch_norm__414, + batch_norm__415, + batch_norm__416, + batch_norm__417, + batch_norm__418, + batch_norm__419, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + depthwise_conv2d_31, + parameter_161, + parameter_160, + parameter_159, + parameter_158, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_158, parameter_159, parameter_160, parameter_161 + + # pd_op.hardswish: (1x160x40x40xf32) <- (1x160x40x40xf32) + hardswish_68 = paddle._C_ops.hardswish(batch_norm__414) + + # pd_op.conv2d: (1x160x40x40xf32) <- (1x160x40x40xf32, 160x160x1x1xf32) + conv2d_45 = paddle._C_ops.conv2d( + hardswish_68, parameter_157, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_157 + + # pd_op.batch_norm_: (1x160x40x40xf32, 160xf32, 160xf32, 160xf32, 160xf32, -1xui8) <- (1x160x40x40xf32, 160xf32, 160xf32, 160xf32, 160xf32) + ( + batch_norm__420, + batch_norm__421, + batch_norm__422, + batch_norm__423, + batch_norm__424, + batch_norm__425, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_45, + parameter_156, + parameter_155, + parameter_154, + parameter_153, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_153, parameter_154, parameter_155, parameter_156 + + # pd_op.hardswish: (1x160x40x40xf32) <- (1x160x40x40xf32) + hardswish_69 = paddle._C_ops.hardswish(batch_norm__420) + + # pd_op.depthwise_conv2d: (1x160x40x40xf32) <- (1x160x40x40xf32, 160x1x5x5xf32) + depthwise_conv2d_32 = paddle._C_ops.depthwise_conv2d( + hardswish_69, parameter_152, [1, 1], [2, 2], "EXPLICIT", 160, [1, 1], "NCHW" + ) + del parameter_152 + + # pd_op.batch_norm_: (1x160x40x40xf32, 160xf32, 160xf32, 160xf32, 160xf32, -1xui8) <- (1x160x40x40xf32, 160xf32, 160xf32, 160xf32, 160xf32) + ( + batch_norm__426, + batch_norm__427, + batch_norm__428, + batch_norm__429, + batch_norm__430, + batch_norm__431, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + depthwise_conv2d_32, + parameter_151, + parameter_150, + parameter_149, + parameter_148, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_148, parameter_149, parameter_150, parameter_151 + + # pd_op.hardswish: (1x160x40x40xf32) <- (1x160x40x40xf32) + hardswish_70 = paddle._C_ops.hardswish(batch_norm__426) + + # pd_op.conv2d: (1x160x40x40xf32) <- (1x160x40x40xf32, 160x160x1x1xf32) + conv2d_46 = paddle._C_ops.conv2d( + hardswish_70, parameter_147, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_147 + + # pd_op.batch_norm_: (1x160x40x40xf32, 160xf32, 160xf32, 160xf32, 160xf32, -1xui8) <- (1x160x40x40xf32, 160xf32, 160xf32, 160xf32, 160xf32) + ( + batch_norm__432, + batch_norm__433, + batch_norm__434, + batch_norm__435, + batch_norm__436, + batch_norm__437, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_46, + parameter_146, + parameter_145, + parameter_144, + parameter_143, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_143, parameter_144, parameter_145, parameter_146 + + # pd_op.hardswish: (1x160x40x40xf32) <- (1x160x40x40xf32) + hardswish_71 = paddle._C_ops.hardswish(batch_norm__432) + + # pd_op.pool2d: (1x160x1x1xf32) <- (1x160x40x40xf32, 2xi64) + pool2d_3 = paddle._C_ops.pool2d( + hardswish_71, + full_int_array_0, + [1, 1], + [0, 0], + False, + True, + "NCHW", + "avg", + False, + True, + "EXPLICIT", + ) + + # pd_op.conv2d: (1x160x1x1xf32) <- (1x160x1x1xf32, 160x160x1x1xf32) + conv2d_47 = paddle._C_ops.conv2d( + pool2d_3, parameter_142, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_142 + + # pd_op.reshape: (1x160x1x1xf32) <- (160xf32, 4xi64) + reshape_11 = paddle._C_ops.reshape(parameter_141, full_int_array_1) + del parameter_141 + + # pd_op.add: (1x160x1x1xf32) <- (1x160x1x1xf32, 1x160x1x1xf32) + add_10 = paddle._C_ops.add(conv2d_47, reshape_11) + + # pd_op.sigmoid: (1x160x1x1xf32) <- (1x160x1x1xf32) + sigmoid_3 = paddle._C_ops.sigmoid(add_10) + del add_10 + + # pd_op.multiply: (1x160x40x40xf32) <- (1x160x40x40xf32, 1x160x1x1xf32) + multiply_4 = paddle._C_ops.multiply(hardswish_71, sigmoid_3) + + # pd_op.conv2d: (1x160x40x40xf32) <- (1x160x40x40xf32, 160x160x1x1xf32) + conv2d_48 = paddle._C_ops.conv2d( + multiply_4, parameter_140, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_140 + + # pd_op.batch_norm_: (1x160x40x40xf32, 160xf32, 160xf32, 160xf32, 160xf32, -1xui8) <- (1x160x40x40xf32, 160xf32, 160xf32, 160xf32, 160xf32) + ( + batch_norm__438, + batch_norm__439, + batch_norm__440, + batch_norm__441, + batch_norm__442, + batch_norm__443, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_48, + parameter_139, + parameter_138, + parameter_137, + parameter_136, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_136, parameter_137, parameter_138, parameter_139 + + # pd_op.hardswish: (1x160x40x40xf32) <- (1x160x40x40xf32) + hardswish_72 = paddle._C_ops.hardswish(batch_norm__438) + + # pd_op.conv2d: (1x11x40x40xf32) <- (1x160x40x40xf32, 11x160x1x1xf32) + conv2d_49 = paddle._C_ops.conv2d( + hardswish_72, parameter_135, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_135 + + # pd_op.reshape: (1x11x1x1xf32) <- (11xf32, 4xi64) + reshape_12 = paddle._C_ops.reshape(parameter_134, full_int_array_1) + del parameter_134 + + # pd_op.add: (1x11x40x40xf32) <- (1x11x40x40xf32, 1x11x1x1xf32) + add_11 = paddle._C_ops.add(conv2d_49, reshape_12) + + # pd_op.conv2d: (1x32x40x40xf32) <- (1x160x40x40xf32, 32x160x1x1xf32) + conv2d_50 = paddle._C_ops.conv2d( + hardswish_72, parameter_133, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_133 + + # pd_op.reshape: (1x32x1x1xf32) <- (32xf32, 4xi64) + reshape_13 = paddle._C_ops.reshape(parameter_132, full_int_array_1) + del parameter_132 + + # pd_op.add: (1x32x40x40xf32) <- (1x32x40x40xf32, 1x32x1x1xf32) + add_12 = paddle._C_ops.add(conv2d_50, reshape_13) + + # pd_op.conv2d: (1x1x40x40xf32) <- (1x160x40x40xf32, 1x160x5x5xf32) + conv2d_51 = paddle._C_ops.conv2d( + hardswish_71, parameter_131, [1, 1], [2, 2], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_131 + + # pd_op.batch_norm_: (1x1x40x40xf32, 1xf32, 1xf32, 1xf32, 1xf32, -1xui8) <- (1x1x40x40xf32, 1xf32, 1xf32, 1xf32, 1xf32) + ( + batch_norm__444, + batch_norm__445, + batch_norm__446, + batch_norm__447, + batch_norm__448, + batch_norm__449, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_51, + parameter_130, + parameter_129, + parameter_128, + parameter_127, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_127, parameter_128, parameter_129, parameter_130 + + # pd_op.hardswish: (1x1x40x40xf32) <- (1x1x40x40xf32) + hardswish_73 = paddle._C_ops.hardswish(batch_norm__444) + + # pd_op.conv2d: (1x1x40x40xf32) <- (1x1x40x40xf32, 1x1x1x1xf32) + conv2d_52 = paddle._C_ops.conv2d( + hardswish_73, parameter_126, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_126 + + # pd_op.batch_norm_: (1x1x40x40xf32, 1xf32, 1xf32, 1xf32, 1xf32, -1xui8) <- (1x1x40x40xf32, 1xf32, 1xf32, 1xf32, 1xf32) + ( + batch_norm__450, + batch_norm__451, + batch_norm__452, + batch_norm__453, + batch_norm__454, + batch_norm__455, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_52, + parameter_125, + parameter_124, + parameter_123, + parameter_122, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_122, parameter_123, parameter_124, parameter_125 + + # pd_op.sigmoid: (1x1x40x40xf32) <- (1x1x40x40xf32) + sigmoid_4 = paddle._C_ops.sigmoid(batch_norm__450) + del batch_norm__450 + + # pd_op.sigmoid: (1x11x40x40xf32) <- (1x11x40x40xf32) + sigmoid_5 = paddle._C_ops.sigmoid(add_11) + del add_11 + + # pd_op.multiply: (1x11x40x40xf32) <- (1x11x40x40xf32, 1x1x40x40xf32) + multiply_5 = paddle._C_ops.multiply(sigmoid_5, sigmoid_4) + + # pd_op.scale: (1x11x40x40xf32) <- (1x11x40x40xf32, 1xf32) + scale_6 = paddle._C_ops.scale(multiply_5, full_1, float("1e-09"), True) + del multiply_5 + + # pd_op.sqrt: (1x11x40x40xf32) <- (1x11x40x40xf32) + sqrt_1 = paddle._C_ops.sqrt(scale_6) + del scale_6 + + # pd_op.transpose: (1x40x40x11xf32) <- (1x11x40x40xf32) + transpose_4 = paddle._C_ops.transpose(sqrt_1, [0, 2, 3, 1]) + + # pd_op.transpose: (1x40x40x32xf32) <- (1x32x40x40xf32) + transpose_5 = paddle._C_ops.transpose(add_12, [0, 2, 3, 1]) + + # pd_op.full: (1xf64) <- () + full_10 = paddle._C_ops.full( + [1], float("40"), paddle.float64, paddle.core.CPUPlace() + ) + + # pd_op.arange: (40xf32) <- (1xf64, 1xf64, 1xf64) + arange_1 = paddle.arange(full_2, full_10, full_4, dtype="float32") + del full_10 + + # pd_op.scale: (40xf32) <- (40xf32, 1xf32) + scale_7 = paddle._C_ops.scale(arange_1, full_1, float("0.5"), True) + del arange_1 + + # pd_op.full: (1xf32) <- () + full_11 = paddle._C_ops.full( + [1], float("16"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.assign: (1xf32) <- (1xf32) + assign_24 = full_11 + + # pd_op.scale: (40xf32) <- (40xf32, 1xf32) + scale_8 = paddle._C_ops.scale(scale_7, full_11, float("0"), True) + del scale_7 + + # builtin.combine: ([40xf32, 40xf32]) <- (40xf32, 40xf32) + combine_7 = [scale_8, scale_8] + del scale_8 + + # pd_op.meshgrid: ([40x40xf32, 40x40xf32]) <- ([40xf32, 40xf32]) + meshgrid_1 = paddle._C_ops.meshgrid(combine_7) + del combine_7 + + # builtin.split: (40x40xf32, 40x40xf32) <- ([40x40xf32, 40x40xf32]) + ( + split_4, + split_5, + ) = meshgrid_1 + del meshgrid_1 + + # pd_op.flatten: (1600xf32) <- (40x40xf32) + flatten_4 = paddle._C_ops.flatten(split_4, 0, 1) + del split_4 + + # pd_op.flatten: (1600xf32) <- (40x40xf32) + flatten_5 = paddle._C_ops.flatten(split_5, 0, 1) + del split_5 + + # builtin.combine: ([1600xf32, 1600xf32]) <- (1600xf32, 1600xf32) + combine_8 = [flatten_5, flatten_4] + del flatten_4, flatten_5 + + # pd_op.stack: (1600x2xf32) <- ([1600xf32, 1600xf32]) + stack_1 = paddle._C_ops.stack(combine_8, -1) + del combine_8 + + # pd_op.reshape: (1x1600x11xf32) <- (1x40x40x11xf32, 3xi64) + reshape_14 = paddle._C_ops.reshape(transpose_4, full_int_array_7) + del transpose_4 + + # pd_op.reshape: (6400x8xf32) <- (1x40x40x32xf32, 2xi64) + reshape_15 = paddle._C_ops.reshape(transpose_5, full_int_array_8) + + # pd_op.softmax: (6400x8xf32) <- (6400x8xf32) + softmax_1 = paddle._C_ops.softmax(reshape_15, 1) + del reshape_15 + + # pd_op.matmul: (6400xf32) <- (6400x8xf32, 8xf32) + matmul_1 = paddle._C_ops.matmul(softmax_1, data_1, False, False) + + # pd_op.reshape: (1600x4xf32) <- (6400xf32, 2xi64) + reshape_16 = paddle._C_ops.reshape(matmul_1, full_int_array_9) + + # pd_op.scale: (1600x4xf32) <- (1600x4xf32, 1xf32) + scale_9 = paddle._C_ops.scale(reshape_16, full_11, float("0"), True) + del full_11, reshape_16 + + # pd_op.full_int_array: (3xi64) <- () + full_int_array_11 = [1, 1600, 4] + + # pd_op.reshape: (1x1600x4xf32) <- (1600x4xf32, 3xi64) + reshape_17 = paddle._C_ops.reshape(scale_9, full_int_array_11) + del full_int_array_11 + + # pd_op.split_with_num: ([1x1600x2xf32, 1x1600x2xf32]) <- (1x1600x4xf32, 1xi32) + split_with_num_1 = paddle._C_ops.split_with_num(reshape_17, 2, full_6) + del reshape_17 + + # builtin.split: (1x1600x2xf32, 1x1600x2xf32) <- ([1x1600x2xf32, 1x1600x2xf32]) + ( + split_6, + split_7, + ) = split_with_num_1 + del split_with_num_1 + + # pd_op.scale: (1x1600x2xf32) <- (1x1600x2xf32, 1xf32) + scale_10 = paddle._C_ops.scale(split_6, full_7, float("0"), True) + del split_6 + + # pd_op.add: (1x1600x2xf32) <- (1x1600x2xf32, 1600x2xf32) + add_13 = paddle._C_ops.add(scale_10, stack_1) + + # pd_op.add: (1x1600x2xf32) <- (1x1600x2xf32, 1600x2xf32) + add_14 = paddle._C_ops.add(split_7, stack_1) + + # builtin.combine: ([1x1600x2xf32, 1x1600x2xf32]) <- (1x1600x2xf32, 1x1600x2xf32) + combine_9 = [add_13, add_14] + + # pd_op.concat: (1x1600x4xf32) <- ([1x1600x2xf32, 1x1600x2xf32], 1xi32) + concat_8 = paddle._C_ops.concat(combine_9, full_8) + del combine_9 + + # pd_op.flatten: (1x11x1600xf32) <- (1x11x40x40xf32) + flatten_6 = paddle._C_ops.flatten(sqrt_1, 2, 3) + + # pd_op.transpose: (1x1600x11xf32) <- (1x11x1600xf32) + transpose_6 = paddle._C_ops.transpose(flatten_6, [0, 2, 1]) + del flatten_6 + + # pd_op.flatten: (1x32x1600xf32) <- (1x32x40x40xf32) + flatten_7 = paddle._C_ops.flatten(add_12, 2, 3) + + # pd_op.transpose: (1x1600x32xf32) <- (1x32x1600xf32) + transpose_7 = paddle._C_ops.transpose(flatten_7, [0, 2, 1]) + del flatten_7 + + # pd_op.full: (1xf32) <- () + full_12 = paddle._C_ops.full( + [1], float("0.0625"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.scale: (1x1600x4xf32) <- (1x1600x4xf32, 1xf32) + scale_11 = paddle._C_ops.scale(concat_8, full_12, float("0"), True) + del concat_8 + + # pd_op.shape64: (4xi64) <- (1x160x20x20xf32) + shape64_2 = paddle._C_ops.shape64(hardswish_49) + + # pd_op.slice: (xi64) <- (4xi64, 1xi64, 1xi64) + slice_8 = paddle._C_ops.slice( + shape64_2, [0], full_int_array_2, full_int_array_3, [1], [0] + ) + + # pd_op.slice: (xi64) <- (4xi64, 1xi64, 1xi64) + slice_9 = paddle._C_ops.slice( + shape64_2, [0], full_int_array_3, full_int_array_4, [1], [0] + ) + + # pd_op.slice: (xi64) <- (4xi64, 1xi64, 1xi64) + slice_10 = paddle._C_ops.slice( + shape64_2, [0], full_int_array_4, full_int_array_5, [1], [0] + ) + + # pd_op.slice: (xi64) <- (4xi64, 1xi64, 1xi64) + slice_11 = paddle._C_ops.slice( + shape64_2, [0], full_int_array_5, full_int_array_6, [1], [0] + ) + del shape64_2 + + # pd_op.depthwise_conv2d: (1x160x20x20xf32) <- (1x160x20x20xf32, 160x1x5x5xf32) + depthwise_conv2d_33 = paddle._C_ops.depthwise_conv2d( + hardswish_49, parameter_121, [1, 1], [2, 2], "EXPLICIT", 160, [1, 1], "NCHW" + ) + del parameter_121 + + # pd_op.batch_norm_: (1x160x20x20xf32, 160xf32, 160xf32, 160xf32, 160xf32, -1xui8) <- (1x160x20x20xf32, 160xf32, 160xf32, 160xf32, 160xf32) + ( + batch_norm__456, + batch_norm__457, + batch_norm__458, + batch_norm__459, + batch_norm__460, + batch_norm__461, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + depthwise_conv2d_33, + parameter_120, + parameter_119, + parameter_118, + parameter_117, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_117, parameter_118, parameter_119, parameter_120 + + # pd_op.hardswish: (1x160x20x20xf32) <- (1x160x20x20xf32) + hardswish_74 = paddle._C_ops.hardswish(batch_norm__456) + + # pd_op.conv2d: (1x160x20x20xf32) <- (1x160x20x20xf32, 160x160x1x1xf32) + conv2d_53 = paddle._C_ops.conv2d( + hardswish_74, parameter_116, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_116 + + # pd_op.batch_norm_: (1x160x20x20xf32, 160xf32, 160xf32, 160xf32, 160xf32, -1xui8) <- (1x160x20x20xf32, 160xf32, 160xf32, 160xf32, 160xf32) + ( + batch_norm__462, + batch_norm__463, + batch_norm__464, + batch_norm__465, + batch_norm__466, + batch_norm__467, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_53, + parameter_115, + parameter_114, + parameter_113, + parameter_112, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_112, parameter_113, parameter_114, parameter_115 + + # pd_op.hardswish: (1x160x20x20xf32) <- (1x160x20x20xf32) + hardswish_75 = paddle._C_ops.hardswish(batch_norm__462) + + # pd_op.depthwise_conv2d: (1x160x20x20xf32) <- (1x160x20x20xf32, 160x1x5x5xf32) + depthwise_conv2d_34 = paddle._C_ops.depthwise_conv2d( + hardswish_75, parameter_111, [1, 1], [2, 2], "EXPLICIT", 160, [1, 1], "NCHW" + ) + del parameter_111 + + # pd_op.batch_norm_: (1x160x20x20xf32, 160xf32, 160xf32, 160xf32, 160xf32, -1xui8) <- (1x160x20x20xf32, 160xf32, 160xf32, 160xf32, 160xf32) + ( + batch_norm__468, + batch_norm__469, + batch_norm__470, + batch_norm__471, + batch_norm__472, + batch_norm__473, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + depthwise_conv2d_34, + parameter_110, + parameter_109, + parameter_108, + parameter_107, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_107, parameter_108, parameter_109, parameter_110 + + # pd_op.hardswish: (1x160x20x20xf32) <- (1x160x20x20xf32) + hardswish_76 = paddle._C_ops.hardswish(batch_norm__468) + + # pd_op.conv2d: (1x160x20x20xf32) <- (1x160x20x20xf32, 160x160x1x1xf32) + conv2d_54 = paddle._C_ops.conv2d( + hardswish_76, parameter_106, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_106 + + # pd_op.batch_norm_: (1x160x20x20xf32, 160xf32, 160xf32, 160xf32, 160xf32, -1xui8) <- (1x160x20x20xf32, 160xf32, 160xf32, 160xf32, 160xf32) + ( + batch_norm__474, + batch_norm__475, + batch_norm__476, + batch_norm__477, + batch_norm__478, + batch_norm__479, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_54, + parameter_105, + parameter_104, + parameter_103, + parameter_102, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_102, parameter_103, parameter_104, parameter_105 + + # pd_op.hardswish: (1x160x20x20xf32) <- (1x160x20x20xf32) + hardswish_77 = paddle._C_ops.hardswish(batch_norm__474) + + # pd_op.depthwise_conv2d: (1x160x20x20xf32) <- (1x160x20x20xf32, 160x1x5x5xf32) + depthwise_conv2d_35 = paddle._C_ops.depthwise_conv2d( + hardswish_77, parameter_101, [1, 1], [2, 2], "EXPLICIT", 160, [1, 1], "NCHW" + ) + del parameter_101 + + # pd_op.batch_norm_: (1x160x20x20xf32, 160xf32, 160xf32, 160xf32, 160xf32, -1xui8) <- (1x160x20x20xf32, 160xf32, 160xf32, 160xf32, 160xf32) + ( + batch_norm__480, + batch_norm__481, + batch_norm__482, + batch_norm__483, + batch_norm__484, + batch_norm__485, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + depthwise_conv2d_35, + parameter_100, + parameter_99, + parameter_98, + parameter_97, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_100, parameter_97, parameter_98, parameter_99 + + # pd_op.hardswish: (1x160x20x20xf32) <- (1x160x20x20xf32) + hardswish_78 = paddle._C_ops.hardswish(batch_norm__480) + + # pd_op.conv2d: (1x160x20x20xf32) <- (1x160x20x20xf32, 160x160x1x1xf32) + conv2d_55 = paddle._C_ops.conv2d( + hardswish_78, parameter_96, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_96 + + # pd_op.batch_norm_: (1x160x20x20xf32, 160xf32, 160xf32, 160xf32, 160xf32, -1xui8) <- (1x160x20x20xf32, 160xf32, 160xf32, 160xf32, 160xf32) + ( + batch_norm__486, + batch_norm__487, + batch_norm__488, + batch_norm__489, + batch_norm__490, + batch_norm__491, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_55, + parameter_95, + parameter_94, + parameter_93, + parameter_92, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_92, parameter_93, parameter_94, parameter_95 + + # pd_op.hardswish: (1x160x20x20xf32) <- (1x160x20x20xf32) + hardswish_79 = paddle._C_ops.hardswish(batch_norm__486) + + # pd_op.depthwise_conv2d: (1x160x20x20xf32) <- (1x160x20x20xf32, 160x1x5x5xf32) + depthwise_conv2d_36 = paddle._C_ops.depthwise_conv2d( + hardswish_79, parameter_91, [1, 1], [2, 2], "EXPLICIT", 160, [1, 1], "NCHW" + ) + del parameter_91 + + # pd_op.batch_norm_: (1x160x20x20xf32, 160xf32, 160xf32, 160xf32, 160xf32, -1xui8) <- (1x160x20x20xf32, 160xf32, 160xf32, 160xf32, 160xf32) + ( + batch_norm__492, + batch_norm__493, + batch_norm__494, + batch_norm__495, + batch_norm__496, + batch_norm__497, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + depthwise_conv2d_36, + parameter_90, + parameter_89, + parameter_88, + parameter_87, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_87, parameter_88, parameter_89, parameter_90 + + # pd_op.hardswish: (1x160x20x20xf32) <- (1x160x20x20xf32) + hardswish_80 = paddle._C_ops.hardswish(batch_norm__492) + + # pd_op.conv2d: (1x160x20x20xf32) <- (1x160x20x20xf32, 160x160x1x1xf32) + conv2d_56 = paddle._C_ops.conv2d( + hardswish_80, parameter_86, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_86 + + # pd_op.batch_norm_: (1x160x20x20xf32, 160xf32, 160xf32, 160xf32, 160xf32, -1xui8) <- (1x160x20x20xf32, 160xf32, 160xf32, 160xf32, 160xf32) + ( + batch_norm__498, + batch_norm__499, + batch_norm__500, + batch_norm__501, + batch_norm__502, + batch_norm__503, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_56, + parameter_85, + parameter_84, + parameter_83, + parameter_82, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_82, parameter_83, parameter_84, parameter_85 + + # pd_op.hardswish: (1x160x20x20xf32) <- (1x160x20x20xf32) + hardswish_81 = paddle._C_ops.hardswish(batch_norm__498) + + # pd_op.pool2d: (1x160x1x1xf32) <- (1x160x20x20xf32, 2xi64) + pool2d_4 = paddle._C_ops.pool2d( + hardswish_81, + full_int_array_0, + [1, 1], + [0, 0], + False, + True, + "NCHW", + "avg", + False, + True, + "EXPLICIT", + ) + + # pd_op.conv2d: (1x160x1x1xf32) <- (1x160x1x1xf32, 160x160x1x1xf32) + conv2d_57 = paddle._C_ops.conv2d( + pool2d_4, parameter_81, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_81 + + # pd_op.reshape: (1x160x1x1xf32) <- (160xf32, 4xi64) + reshape_18 = paddle._C_ops.reshape(parameter_80, full_int_array_1) + del parameter_80 + + # pd_op.add: (1x160x1x1xf32) <- (1x160x1x1xf32, 1x160x1x1xf32) + add_15 = paddle._C_ops.add(conv2d_57, reshape_18) + + # pd_op.sigmoid: (1x160x1x1xf32) <- (1x160x1x1xf32) + sigmoid_6 = paddle._C_ops.sigmoid(add_15) + del add_15 + + # pd_op.multiply: (1x160x20x20xf32) <- (1x160x20x20xf32, 1x160x1x1xf32) + multiply_6 = paddle._C_ops.multiply(hardswish_81, sigmoid_6) + + # pd_op.conv2d: (1x160x20x20xf32) <- (1x160x20x20xf32, 160x160x1x1xf32) + conv2d_58 = paddle._C_ops.conv2d( + multiply_6, parameter_79, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_79 + + # pd_op.batch_norm_: (1x160x20x20xf32, 160xf32, 160xf32, 160xf32, 160xf32, -1xui8) <- (1x160x20x20xf32, 160xf32, 160xf32, 160xf32, 160xf32) + ( + batch_norm__504, + batch_norm__505, + batch_norm__506, + batch_norm__507, + batch_norm__508, + batch_norm__509, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_58, + parameter_78, + parameter_77, + parameter_76, + parameter_75, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_75, parameter_76, parameter_77, parameter_78 + + # pd_op.hardswish: (1x160x20x20xf32) <- (1x160x20x20xf32) + hardswish_82 = paddle._C_ops.hardswish(batch_norm__504) + + # pd_op.conv2d: (1x11x20x20xf32) <- (1x160x20x20xf32, 11x160x1x1xf32) + conv2d_59 = paddle._C_ops.conv2d( + hardswish_82, parameter_74, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_74 + + # pd_op.reshape: (1x11x1x1xf32) <- (11xf32, 4xi64) + reshape_19 = paddle._C_ops.reshape(parameter_73, full_int_array_1) + del parameter_73 + + # pd_op.add: (1x11x20x20xf32) <- (1x11x20x20xf32, 1x11x1x1xf32) + add_16 = paddle._C_ops.add(conv2d_59, reshape_19) + + # pd_op.conv2d: (1x32x20x20xf32) <- (1x160x20x20xf32, 32x160x1x1xf32) + conv2d_60 = paddle._C_ops.conv2d( + hardswish_82, parameter_72, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_72 + + # pd_op.reshape: (1x32x1x1xf32) <- (32xf32, 4xi64) + reshape_20 = paddle._C_ops.reshape(parameter_71, full_int_array_1) + del parameter_71 + + # pd_op.add: (1x32x20x20xf32) <- (1x32x20x20xf32, 1x32x1x1xf32) + add_17 = paddle._C_ops.add(conv2d_60, reshape_20) + + # pd_op.conv2d: (1x1x20x20xf32) <- (1x160x20x20xf32, 1x160x5x5xf32) + conv2d_61 = paddle._C_ops.conv2d( + hardswish_81, parameter_70, [1, 1], [2, 2], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_70 + + # pd_op.batch_norm_: (1x1x20x20xf32, 1xf32, 1xf32, 1xf32, 1xf32, -1xui8) <- (1x1x20x20xf32, 1xf32, 1xf32, 1xf32, 1xf32) + ( + batch_norm__510, + batch_norm__511, + batch_norm__512, + batch_norm__513, + batch_norm__514, + batch_norm__515, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_61, + parameter_69, + parameter_68, + parameter_67, + parameter_66, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_66, parameter_67, parameter_68, parameter_69 + + # pd_op.hardswish: (1x1x20x20xf32) <- (1x1x20x20xf32) + hardswish_83 = paddle._C_ops.hardswish(batch_norm__510) + + # pd_op.conv2d: (1x1x20x20xf32) <- (1x1x20x20xf32, 1x1x1x1xf32) + conv2d_62 = paddle._C_ops.conv2d( + hardswish_83, parameter_65, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_65 + + # pd_op.batch_norm_: (1x1x20x20xf32, 1xf32, 1xf32, 1xf32, 1xf32, -1xui8) <- (1x1x20x20xf32, 1xf32, 1xf32, 1xf32, 1xf32) + ( + batch_norm__516, + batch_norm__517, + batch_norm__518, + batch_norm__519, + batch_norm__520, + batch_norm__521, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_62, + parameter_64, + parameter_63, + parameter_62, + parameter_61, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_61, parameter_62, parameter_63, parameter_64 + + # pd_op.sigmoid: (1x1x20x20xf32) <- (1x1x20x20xf32) + sigmoid_7 = paddle._C_ops.sigmoid(batch_norm__516) + del batch_norm__516 + + # pd_op.sigmoid: (1x11x20x20xf32) <- (1x11x20x20xf32) + sigmoid_8 = paddle._C_ops.sigmoid(add_16) + del add_16 + + # pd_op.multiply: (1x11x20x20xf32) <- (1x11x20x20xf32, 1x1x20x20xf32) + multiply_7 = paddle._C_ops.multiply(sigmoid_8, sigmoid_7) + + # pd_op.scale: (1x11x20x20xf32) <- (1x11x20x20xf32, 1xf32) + scale_12 = paddle._C_ops.scale(multiply_7, full_1, float("1e-09"), True) + del multiply_7 + + # pd_op.sqrt: (1x11x20x20xf32) <- (1x11x20x20xf32) + sqrt_2 = paddle._C_ops.sqrt(scale_12) + del scale_12 + + # pd_op.transpose: (1x20x20x11xf32) <- (1x11x20x20xf32) + transpose_8 = paddle._C_ops.transpose(sqrt_2, [0, 2, 3, 1]) + + # pd_op.transpose: (1x20x20x32xf32) <- (1x32x20x20xf32) + transpose_9 = paddle._C_ops.transpose(add_17, [0, 2, 3, 1]) + + # pd_op.full: (1xf64) <- () + full_13 = paddle._C_ops.full( + [1], float("20"), paddle.float64, paddle.core.CPUPlace() + ) + + # pd_op.arange: (20xf32) <- (1xf64, 1xf64, 1xf64) + arange_2 = paddle.arange(full_2, full_13, full_4, dtype="float32") + del full_13 + + # pd_op.scale: (20xf32) <- (20xf32, 1xf32) + scale_13 = paddle._C_ops.scale(arange_2, full_1, float("0.5"), True) + del arange_2 + + # pd_op.full: (1xf32) <- () + full_14 = paddle._C_ops.full( + [1], float("32"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.assign: (1xf32) <- (1xf32) + assign_25 = full_14 + + # pd_op.scale: (20xf32) <- (20xf32, 1xf32) + scale_14 = paddle._C_ops.scale(scale_13, full_14, float("0"), True) + del scale_13 + + # builtin.combine: ([20xf32, 20xf32]) <- (20xf32, 20xf32) + combine_10 = [scale_14, scale_14] + del scale_14 + + # pd_op.meshgrid: ([20x20xf32, 20x20xf32]) <- ([20xf32, 20xf32]) + meshgrid_2 = paddle._C_ops.meshgrid(combine_10) + del combine_10 + + # builtin.split: (20x20xf32, 20x20xf32) <- ([20x20xf32, 20x20xf32]) + ( + split_8, + split_9, + ) = meshgrid_2 + del meshgrid_2 + + # pd_op.flatten: (400xf32) <- (20x20xf32) + flatten_8 = paddle._C_ops.flatten(split_8, 0, 1) + del split_8 + + # pd_op.flatten: (400xf32) <- (20x20xf32) + flatten_9 = paddle._C_ops.flatten(split_9, 0, 1) + del split_9 + + # builtin.combine: ([400xf32, 400xf32]) <- (400xf32, 400xf32) + combine_11 = [flatten_9, flatten_8] + del flatten_8, flatten_9 + + # pd_op.stack: (400x2xf32) <- ([400xf32, 400xf32]) + stack_2 = paddle._C_ops.stack(combine_11, -1) + del combine_11 + + # pd_op.reshape: (1x400x11xf32) <- (1x20x20x11xf32, 3xi64) + reshape_21 = paddle._C_ops.reshape(transpose_8, full_int_array_7) + del transpose_8 + + # pd_op.reshape: (1600x8xf32) <- (1x20x20x32xf32, 2xi64) + reshape_22 = paddle._C_ops.reshape(transpose_9, full_int_array_8) + + # pd_op.softmax: (1600x8xf32) <- (1600x8xf32) + softmax_2 = paddle._C_ops.softmax(reshape_22, 1) + del reshape_22 + + # pd_op.matmul: (1600xf32) <- (1600x8xf32, 8xf32) + matmul_2 = paddle._C_ops.matmul(softmax_2, data_1, False, False) + + # pd_op.reshape: (400x4xf32) <- (1600xf32, 2xi64) + reshape_23 = paddle._C_ops.reshape(matmul_2, full_int_array_9) + + # pd_op.scale: (400x4xf32) <- (400x4xf32, 1xf32) + scale_15 = paddle._C_ops.scale(reshape_23, full_14, float("0"), True) + del full_14, reshape_23 + + # pd_op.full_int_array: (3xi64) <- () + full_int_array_12 = [1, 400, 4] + + # pd_op.reshape: (1x400x4xf32) <- (400x4xf32, 3xi64) + reshape_24 = paddle._C_ops.reshape(scale_15, full_int_array_12) + del full_int_array_12 + + # pd_op.split_with_num: ([1x400x2xf32, 1x400x2xf32]) <- (1x400x4xf32, 1xi32) + split_with_num_2 = paddle._C_ops.split_with_num(reshape_24, 2, full_6) + del reshape_24 + + # builtin.split: (1x400x2xf32, 1x400x2xf32) <- ([1x400x2xf32, 1x400x2xf32]) + ( + split_10, + split_11, + ) = split_with_num_2 + del split_with_num_2 + + # pd_op.scale: (1x400x2xf32) <- (1x400x2xf32, 1xf32) + scale_16 = paddle._C_ops.scale(split_10, full_7, float("0"), True) + del split_10 + + # pd_op.add: (1x400x2xf32) <- (1x400x2xf32, 400x2xf32) + add_18 = paddle._C_ops.add(scale_16, stack_2) + + # pd_op.add: (1x400x2xf32) <- (1x400x2xf32, 400x2xf32) + add_19 = paddle._C_ops.add(split_11, stack_2) + + # builtin.combine: ([1x400x2xf32, 1x400x2xf32]) <- (1x400x2xf32, 1x400x2xf32) + combine_12 = [add_18, add_19] + + # pd_op.concat: (1x400x4xf32) <- ([1x400x2xf32, 1x400x2xf32], 1xi32) + concat_9 = paddle._C_ops.concat(combine_12, full_8) + del combine_12 + + # pd_op.flatten: (1x11x400xf32) <- (1x11x20x20xf32) + flatten_10 = paddle._C_ops.flatten(sqrt_2, 2, 3) + + # pd_op.transpose: (1x400x11xf32) <- (1x11x400xf32) + transpose_10 = paddle._C_ops.transpose(flatten_10, [0, 2, 1]) + del flatten_10 + + # pd_op.flatten: (1x32x400xf32) <- (1x32x20x20xf32) + flatten_11 = paddle._C_ops.flatten(add_17, 2, 3) + + # pd_op.transpose: (1x400x32xf32) <- (1x32x400xf32) + transpose_11 = paddle._C_ops.transpose(flatten_11, [0, 2, 1]) + del flatten_11 + + # pd_op.full: (1xf32) <- () + full_15 = paddle._C_ops.full( + [1], float("0.03125"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.scale: (1x400x4xf32) <- (1x400x4xf32, 1xf32) + scale_17 = paddle._C_ops.scale(concat_9, full_15, float("0"), True) + del concat_9 + + # pd_op.shape64: (4xi64) <- (1x160x10x10xf32) + shape64_3 = paddle._C_ops.shape64(add_4) + + # pd_op.slice: (xi64) <- (4xi64, 1xi64, 1xi64) + slice_12 = paddle._C_ops.slice( + shape64_3, [0], full_int_array_2, full_int_array_3, [1], [0] + ) + del full_int_array_2 + + # pd_op.slice: (xi64) <- (4xi64, 1xi64, 1xi64) + slice_13 = paddle._C_ops.slice( + shape64_3, [0], full_int_array_3, full_int_array_4, [1], [0] + ) + del full_int_array_3 + + # pd_op.slice: (xi64) <- (4xi64, 1xi64, 1xi64) + slice_14 = paddle._C_ops.slice( + shape64_3, [0], full_int_array_4, full_int_array_5, [1], [0] + ) + del full_int_array_4 + + # pd_op.slice: (xi64) <- (4xi64, 1xi64, 1xi64) + slice_15 = paddle._C_ops.slice( + shape64_3, [0], full_int_array_5, full_int_array_6, [1], [0] + ) + del full_int_array_5, full_int_array_6, shape64_3 + + # pd_op.depthwise_conv2d: (1x160x10x10xf32) <- (1x160x10x10xf32, 160x1x5x5xf32) + depthwise_conv2d_37 = paddle._C_ops.depthwise_conv2d( + add_4, parameter_60, [1, 1], [2, 2], "EXPLICIT", 160, [1, 1], "NCHW" + ) + del parameter_60 + + # pd_op.batch_norm_: (1x160x10x10xf32, 160xf32, 160xf32, 160xf32, 160xf32, -1xui8) <- (1x160x10x10xf32, 160xf32, 160xf32, 160xf32, 160xf32) + ( + batch_norm__522, + batch_norm__523, + batch_norm__524, + batch_norm__525, + batch_norm__526, + batch_norm__527, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + depthwise_conv2d_37, + parameter_59, + parameter_58, + parameter_57, + parameter_56, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_56, parameter_57, parameter_58, parameter_59 + + # pd_op.hardswish: (1x160x10x10xf32) <- (1x160x10x10xf32) + hardswish_84 = paddle._C_ops.hardswish(batch_norm__522) + + # pd_op.conv2d: (1x160x10x10xf32) <- (1x160x10x10xf32, 160x160x1x1xf32) + conv2d_63 = paddle._C_ops.conv2d( + hardswish_84, parameter_55, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_55 + + # pd_op.batch_norm_: (1x160x10x10xf32, 160xf32, 160xf32, 160xf32, 160xf32, -1xui8) <- (1x160x10x10xf32, 160xf32, 160xf32, 160xf32, 160xf32) + ( + batch_norm__528, + batch_norm__529, + batch_norm__530, + batch_norm__531, + batch_norm__532, + batch_norm__533, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_63, + parameter_54, + parameter_53, + parameter_52, + parameter_51, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_51, parameter_52, parameter_53, parameter_54 + + # pd_op.hardswish: (1x160x10x10xf32) <- (1x160x10x10xf32) + hardswish_85 = paddle._C_ops.hardswish(batch_norm__528) + + # pd_op.depthwise_conv2d: (1x160x10x10xf32) <- (1x160x10x10xf32, 160x1x5x5xf32) + depthwise_conv2d_38 = paddle._C_ops.depthwise_conv2d( + hardswish_85, parameter_50, [1, 1], [2, 2], "EXPLICIT", 160, [1, 1], "NCHW" + ) + del parameter_50 + + # pd_op.batch_norm_: (1x160x10x10xf32, 160xf32, 160xf32, 160xf32, 160xf32, -1xui8) <- (1x160x10x10xf32, 160xf32, 160xf32, 160xf32, 160xf32) + ( + batch_norm__534, + batch_norm__535, + batch_norm__536, + batch_norm__537, + batch_norm__538, + batch_norm__539, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + depthwise_conv2d_38, + parameter_49, + parameter_48, + parameter_47, + parameter_46, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_46, parameter_47, parameter_48, parameter_49 + + # pd_op.hardswish: (1x160x10x10xf32) <- (1x160x10x10xf32) + hardswish_86 = paddle._C_ops.hardswish(batch_norm__534) + + # pd_op.conv2d: (1x160x10x10xf32) <- (1x160x10x10xf32, 160x160x1x1xf32) + conv2d_64 = paddle._C_ops.conv2d( + hardswish_86, parameter_45, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_45 + + # pd_op.batch_norm_: (1x160x10x10xf32, 160xf32, 160xf32, 160xf32, 160xf32, -1xui8) <- (1x160x10x10xf32, 160xf32, 160xf32, 160xf32, 160xf32) + ( + batch_norm__540, + batch_norm__541, + batch_norm__542, + batch_norm__543, + batch_norm__544, + batch_norm__545, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_64, + parameter_44, + parameter_43, + parameter_42, + parameter_41, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_41, parameter_42, parameter_43, parameter_44 + + # pd_op.hardswish: (1x160x10x10xf32) <- (1x160x10x10xf32) + hardswish_87 = paddle._C_ops.hardswish(batch_norm__540) + + # pd_op.depthwise_conv2d: (1x160x10x10xf32) <- (1x160x10x10xf32, 160x1x5x5xf32) + depthwise_conv2d_39 = paddle._C_ops.depthwise_conv2d( + hardswish_87, parameter_40, [1, 1], [2, 2], "EXPLICIT", 160, [1, 1], "NCHW" + ) + del parameter_40 + + # pd_op.batch_norm_: (1x160x10x10xf32, 160xf32, 160xf32, 160xf32, 160xf32, -1xui8) <- (1x160x10x10xf32, 160xf32, 160xf32, 160xf32, 160xf32) + ( + batch_norm__546, + batch_norm__547, + batch_norm__548, + batch_norm__549, + batch_norm__550, + batch_norm__551, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + depthwise_conv2d_39, + parameter_39, + parameter_38, + parameter_37, + parameter_36, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_36, parameter_37, parameter_38, parameter_39 + + # pd_op.hardswish: (1x160x10x10xf32) <- (1x160x10x10xf32) + hardswish_88 = paddle._C_ops.hardswish(batch_norm__546) + + # pd_op.conv2d: (1x160x10x10xf32) <- (1x160x10x10xf32, 160x160x1x1xf32) + conv2d_65 = paddle._C_ops.conv2d( + hardswish_88, parameter_35, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_35 + + # pd_op.batch_norm_: (1x160x10x10xf32, 160xf32, 160xf32, 160xf32, 160xf32, -1xui8) <- (1x160x10x10xf32, 160xf32, 160xf32, 160xf32, 160xf32) + ( + batch_norm__552, + batch_norm__553, + batch_norm__554, + batch_norm__555, + batch_norm__556, + batch_norm__557, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_65, + parameter_34, + parameter_33, + parameter_32, + parameter_31, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_31, parameter_32, parameter_33, parameter_34 + + # pd_op.hardswish: (1x160x10x10xf32) <- (1x160x10x10xf32) + hardswish_89 = paddle._C_ops.hardswish(batch_norm__552) + + # pd_op.depthwise_conv2d: (1x160x10x10xf32) <- (1x160x10x10xf32, 160x1x5x5xf32) + depthwise_conv2d_40 = paddle._C_ops.depthwise_conv2d( + hardswish_89, parameter_30, [1, 1], [2, 2], "EXPLICIT", 160, [1, 1], "NCHW" + ) + del parameter_30 + + # pd_op.batch_norm_: (1x160x10x10xf32, 160xf32, 160xf32, 160xf32, 160xf32, -1xui8) <- (1x160x10x10xf32, 160xf32, 160xf32, 160xf32, 160xf32) + ( + batch_norm__558, + batch_norm__559, + batch_norm__560, + batch_norm__561, + batch_norm__562, + batch_norm__563, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + depthwise_conv2d_40, + parameter_29, + parameter_28, + parameter_27, + parameter_26, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_26, parameter_27, parameter_28, parameter_29 + + # pd_op.hardswish: (1x160x10x10xf32) <- (1x160x10x10xf32) + hardswish_90 = paddle._C_ops.hardswish(batch_norm__558) + + # pd_op.conv2d: (1x160x10x10xf32) <- (1x160x10x10xf32, 160x160x1x1xf32) + conv2d_66 = paddle._C_ops.conv2d( + hardswish_90, parameter_25, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_25 + + # pd_op.batch_norm_: (1x160x10x10xf32, 160xf32, 160xf32, 160xf32, 160xf32, -1xui8) <- (1x160x10x10xf32, 160xf32, 160xf32, 160xf32, 160xf32) + ( + batch_norm__564, + batch_norm__565, + batch_norm__566, + batch_norm__567, + batch_norm__568, + batch_norm__569, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_66, + parameter_24, + parameter_23, + parameter_22, + parameter_21, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_21, parameter_22, parameter_23, parameter_24 + + # pd_op.hardswish: (1x160x10x10xf32) <- (1x160x10x10xf32) + hardswish_91 = paddle._C_ops.hardswish(batch_norm__564) + + # pd_op.pool2d: (1x160x1x1xf32) <- (1x160x10x10xf32, 2xi64) + pool2d_5 = paddle._C_ops.pool2d( + hardswish_91, + full_int_array_0, + [1, 1], + [0, 0], + False, + True, + "NCHW", + "avg", + False, + True, + "EXPLICIT", + ) + + # pd_op.conv2d: (1x160x1x1xf32) <- (1x160x1x1xf32, 160x160x1x1xf32) + conv2d_67 = paddle._C_ops.conv2d( + pool2d_5, parameter_20, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_20 + + # pd_op.reshape: (1x160x1x1xf32) <- (160xf32, 4xi64) + reshape_25 = paddle._C_ops.reshape(parameter_19, full_int_array_1) + del parameter_19 + + # pd_op.add: (1x160x1x1xf32) <- (1x160x1x1xf32, 1x160x1x1xf32) + add_20 = paddle._C_ops.add(conv2d_67, reshape_25) + + # pd_op.sigmoid: (1x160x1x1xf32) <- (1x160x1x1xf32) + sigmoid_9 = paddle._C_ops.sigmoid(add_20) + del add_20 + + # pd_op.multiply: (1x160x10x10xf32) <- (1x160x10x10xf32, 1x160x1x1xf32) + multiply_8 = paddle._C_ops.multiply(hardswish_91, sigmoid_9) + + # pd_op.conv2d: (1x160x10x10xf32) <- (1x160x10x10xf32, 160x160x1x1xf32) + conv2d_68 = paddle._C_ops.conv2d( + multiply_8, parameter_18, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_18 + + # pd_op.batch_norm_: (1x160x10x10xf32, 160xf32, 160xf32, 160xf32, 160xf32, -1xui8) <- (1x160x10x10xf32, 160xf32, 160xf32, 160xf32, 160xf32) + ( + batch_norm__570, + batch_norm__571, + batch_norm__572, + batch_norm__573, + batch_norm__574, + batch_norm__575, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_68, + parameter_17, + parameter_16, + parameter_15, + parameter_14, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_14, parameter_15, parameter_16, parameter_17 + + # pd_op.hardswish: (1x160x10x10xf32) <- (1x160x10x10xf32) + hardswish_92 = paddle._C_ops.hardswish(batch_norm__570) + + # pd_op.conv2d: (1x11x10x10xf32) <- (1x160x10x10xf32, 11x160x1x1xf32) + conv2d_69 = paddle._C_ops.conv2d( + hardswish_92, parameter_13, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_13 + + # pd_op.reshape: (1x11x1x1xf32) <- (11xf32, 4xi64) + reshape_26 = paddle._C_ops.reshape(parameter_12, full_int_array_1) + del parameter_12 + + # pd_op.add: (1x11x10x10xf32) <- (1x11x10x10xf32, 1x11x1x1xf32) + add_21 = paddle._C_ops.add(conv2d_69, reshape_26) + + # pd_op.conv2d: (1x32x10x10xf32) <- (1x160x10x10xf32, 32x160x1x1xf32) + conv2d_70 = paddle._C_ops.conv2d( + hardswish_92, parameter_11, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_11 + + # pd_op.reshape: (1x32x1x1xf32) <- (32xf32, 4xi64) + reshape_27 = paddle._C_ops.reshape(parameter_10, full_int_array_1) + del full_int_array_1, parameter_10 + + # pd_op.add: (1x32x10x10xf32) <- (1x32x10x10xf32, 1x32x1x1xf32) + add_22 = paddle._C_ops.add(conv2d_70, reshape_27) + + # pd_op.conv2d: (1x1x10x10xf32) <- (1x160x10x10xf32, 1x160x5x5xf32) + conv2d_71 = paddle._C_ops.conv2d( + hardswish_91, parameter_9, [1, 1], [2, 2], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_9 + + # pd_op.batch_norm_: (1x1x10x10xf32, 1xf32, 1xf32, 1xf32, 1xf32, -1xui8) <- (1x1x10x10xf32, 1xf32, 1xf32, 1xf32, 1xf32) + ( + batch_norm__576, + batch_norm__577, + batch_norm__578, + batch_norm__579, + batch_norm__580, + batch_norm__581, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_71, + parameter_8, + parameter_7, + parameter_6, + parameter_5, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_5, parameter_6, parameter_7, parameter_8 + + # pd_op.hardswish: (1x1x10x10xf32) <- (1x1x10x10xf32) + hardswish_93 = paddle._C_ops.hardswish(batch_norm__576) + + # pd_op.conv2d: (1x1x10x10xf32) <- (1x1x10x10xf32, 1x1x1x1xf32) + conv2d_72 = paddle._C_ops.conv2d( + hardswish_93, parameter_4, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_4 + + # pd_op.batch_norm_: (1x1x10x10xf32, 1xf32, 1xf32, 1xf32, 1xf32, -1xui8) <- (1x1x10x10xf32, 1xf32, 1xf32, 1xf32, 1xf32) + ( + batch_norm__582, + batch_norm__583, + batch_norm__584, + batch_norm__585, + batch_norm__586, + batch_norm__587, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_72, + parameter_3, + parameter_2, + parameter_1, + parameter_0, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_0, parameter_1, parameter_2, parameter_3 + + # pd_op.sigmoid: (1x1x10x10xf32) <- (1x1x10x10xf32) + sigmoid_10 = paddle._C_ops.sigmoid(batch_norm__582) + del batch_norm__582 + + # pd_op.sigmoid: (1x11x10x10xf32) <- (1x11x10x10xf32) + sigmoid_11 = paddle._C_ops.sigmoid(add_21) + del add_21 + + # pd_op.multiply: (1x11x10x10xf32) <- (1x11x10x10xf32, 1x1x10x10xf32) + multiply_9 = paddle._C_ops.multiply(sigmoid_11, sigmoid_10) + + # pd_op.scale: (1x11x10x10xf32) <- (1x11x10x10xf32, 1xf32) + scale_18 = paddle._C_ops.scale(multiply_9, full_1, float("1e-09"), True) + del multiply_9 + + # pd_op.sqrt: (1x11x10x10xf32) <- (1x11x10x10xf32) + sqrt_3 = paddle._C_ops.sqrt(scale_18) + del scale_18 + + # pd_op.transpose: (1x10x10x11xf32) <- (1x11x10x10xf32) + transpose_12 = paddle._C_ops.transpose(sqrt_3, [0, 2, 3, 1]) + + # pd_op.transpose: (1x10x10x32xf32) <- (1x32x10x10xf32) + transpose_13 = paddle._C_ops.transpose(add_22, [0, 2, 3, 1]) + + # pd_op.full: (1xf64) <- () + full_16 = paddle._C_ops.full( + [1], float("10"), paddle.float64, paddle.core.CPUPlace() + ) + + # pd_op.arange: (10xf32) <- (1xf64, 1xf64, 1xf64) + arange_3 = paddle.arange(full_2, full_16, full_4, dtype="float32") + del full_16, full_2, full_4 + + # pd_op.scale: (10xf32) <- (10xf32, 1xf32) + scale_19 = paddle._C_ops.scale(arange_3, full_1, float("0.5"), True) + del arange_3 + + # pd_op.full: (1xf32) <- () + full_17 = paddle._C_ops.full( + [1], float("64"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.assign: (1xf32) <- (1xf32) + assign_26 = full_17 + + # pd_op.scale: (10xf32) <- (10xf32, 1xf32) + scale_20 = paddle._C_ops.scale(scale_19, full_17, float("0"), True) + del scale_19 + + # builtin.combine: ([10xf32, 10xf32]) <- (10xf32, 10xf32) + combine_13 = [scale_20, scale_20] + del scale_20 + + # pd_op.meshgrid: ([10x10xf32, 10x10xf32]) <- ([10xf32, 10xf32]) + meshgrid_3 = paddle._C_ops.meshgrid(combine_13) + del combine_13 + + # builtin.split: (10x10xf32, 10x10xf32) <- ([10x10xf32, 10x10xf32]) + ( + split_12, + split_13, + ) = meshgrid_3 + del meshgrid_3 + + # pd_op.flatten: (100xf32) <- (10x10xf32) + flatten_12 = paddle._C_ops.flatten(split_12, 0, 1) + del split_12 + + # pd_op.flatten: (100xf32) <- (10x10xf32) + flatten_13 = paddle._C_ops.flatten(split_13, 0, 1) + del split_13 + + # builtin.combine: ([100xf32, 100xf32]) <- (100xf32, 100xf32) + combine_14 = [flatten_13, flatten_12] + del flatten_12, flatten_13 + + # pd_op.stack: (100x2xf32) <- ([100xf32, 100xf32]) + stack_3 = paddle._C_ops.stack(combine_14, -1) + del combine_14 + + # pd_op.reshape: (1x100x11xf32) <- (1x10x10x11xf32, 3xi64) + reshape_28 = paddle._C_ops.reshape(transpose_12, full_int_array_7) + del full_int_array_7, transpose_12 + + # pd_op.reshape: (400x8xf32) <- (1x10x10x32xf32, 2xi64) + reshape_29 = paddle._C_ops.reshape(transpose_13, full_int_array_8) + del full_int_array_8 + + # pd_op.softmax: (400x8xf32) <- (400x8xf32) + softmax_3 = paddle._C_ops.softmax(reshape_29, 1) + del reshape_29 + + # pd_op.matmul: (400xf32) <- (400x8xf32, 8xf32) + matmul_3 = paddle._C_ops.matmul(softmax_3, data_1, False, False) + del data_1 + + # pd_op.reshape: (100x4xf32) <- (400xf32, 2xi64) + reshape_30 = paddle._C_ops.reshape(matmul_3, full_int_array_9) + del full_int_array_9 + + # pd_op.scale: (100x4xf32) <- (100x4xf32, 1xf32) + scale_21 = paddle._C_ops.scale(reshape_30, full_17, float("0"), True) + del full_17, reshape_30 + + # pd_op.full_int_array: (3xi64) <- () + full_int_array_13 = [1, 100, 4] + + # pd_op.reshape: (1x100x4xf32) <- (100x4xf32, 3xi64) + reshape_31 = paddle._C_ops.reshape(scale_21, full_int_array_13) + del full_int_array_13 + + # pd_op.split_with_num: ([1x100x2xf32, 1x100x2xf32]) <- (1x100x4xf32, 1xi32) + split_with_num_3 = paddle._C_ops.split_with_num(reshape_31, 2, full_6) + del reshape_31 + + # builtin.split: (1x100x2xf32, 1x100x2xf32) <- ([1x100x2xf32, 1x100x2xf32]) + ( + split_14, + split_15, + ) = split_with_num_3 + del split_with_num_3 + + # pd_op.scale: (1x100x2xf32) <- (1x100x2xf32, 1xf32) + scale_22 = paddle._C_ops.scale(split_14, full_7, float("0"), True) + del split_14 + + # pd_op.add: (1x100x2xf32) <- (1x100x2xf32, 100x2xf32) + add_23 = paddle._C_ops.add(scale_22, stack_3) + + # pd_op.add: (1x100x2xf32) <- (1x100x2xf32, 100x2xf32) + add_24 = paddle._C_ops.add(split_15, stack_3) + + # builtin.combine: ([1x100x2xf32, 1x100x2xf32]) <- (1x100x2xf32, 1x100x2xf32) + combine_15 = [add_23, add_24] + + # pd_op.concat: (1x100x4xf32) <- ([1x100x2xf32, 1x100x2xf32], 1xi32) + concat_10 = paddle._C_ops.concat(combine_15, full_8) + del combine_15 + + # pd_op.flatten: (1x11x100xf32) <- (1x11x10x10xf32) + flatten_14 = paddle._C_ops.flatten(sqrt_3, 2, 3) + + # pd_op.transpose: (1x100x11xf32) <- (1x11x100xf32) + transpose_14 = paddle._C_ops.transpose(flatten_14, [0, 2, 1]) + del flatten_14 + + # pd_op.flatten: (1x32x100xf32) <- (1x32x10x10xf32) + flatten_15 = paddle._C_ops.flatten(add_22, 2, 3) + + # pd_op.transpose: (1x100x32xf32) <- (1x32x100xf32) + transpose_15 = paddle._C_ops.transpose(flatten_15, [0, 2, 1]) + del flatten_15 + + # pd_op.full: (1xf32) <- () + full_18 = paddle._C_ops.full( + [1], float("0.015625"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.scale: (1x100x4xf32) <- (1x100x4xf32, 1xf32) + scale_23 = paddle._C_ops.scale(concat_10, full_18, float("0"), True) + del concat_10 + + # builtin.combine: ([1x6400x11xf32, 1x1600x11xf32, 1x400x11xf32, 1x100x11xf32]) <- (1x6400x11xf32, 1x1600x11xf32, 1x400x11xf32, 1x100x11xf32) + combine_16 = [transpose_2, transpose_6, transpose_10, transpose_14] + + # pd_op.concat: (1x8500x11xf32) <- ([1x6400x11xf32, 1x1600x11xf32, 1x400x11xf32, 1x100x11xf32], 1xi32) + concat_0 = paddle._C_ops.concat(combine_16, full_0) + del combine_16 + + # builtin.combine: ([1x6400x4xf32, 1x1600x4xf32, 1x400x4xf32, 1x100x4xf32]) <- (1x6400x4xf32, 1x1600x4xf32, 1x400x4xf32, 1x100x4xf32) + combine_17 = [scale_5, scale_11, scale_17, scale_23] + + # pd_op.concat: (1x8500x4xf32) <- ([1x6400x4xf32, 1x1600x4xf32, 1x400x4xf32, 1x100x4xf32], 1xi32) + concat_2 = paddle._C_ops.concat(combine_17, full_0) + del combine_17 + + # builtin.combine: ([1x6400x32xf32, 1x1600x32xf32, 1x400x32xf32, 1x100x32xf32]) <- (1x6400x32xf32, 1x1600x32xf32, 1x400x32xf32, 1x100x32xf32) + combine_18 = [transpose_3, transpose_7, transpose_11, transpose_15] + + # pd_op.concat: (1x8500x32xf32) <- ([1x6400x32xf32, 1x1600x32xf32, 1x400x32xf32, 1x100x32xf32], 1xi32) + concat_1 = paddle._C_ops.concat(combine_18, full_0) + del ( + add_12, + add_13, + add_14, + add_17, + add_18, + add_19, + add_22, + add_23, + add_24, + add_4, + add_7, + add_8, + add_9, + assign_0, + assign_1, + assign_10, + assign_11, + assign_12, + assign_13, + assign_14, + assign_15, + assign_16, + assign_17, + assign_18, + assign_19, + assign_2, + assign_20, + assign_21, + assign_22, + assign_23, + assign_24, + assign_25, + assign_26, + assign_3, + assign_4, + assign_5, + assign_6, + assign_7, + assign_8, + assign_9, + batch_norm__0, + batch_norm__1, + batch_norm__10, + batch_norm__100, + batch_norm__101, + batch_norm__102, + batch_norm__103, + batch_norm__104, + batch_norm__105, + batch_norm__106, + batch_norm__107, + batch_norm__108, + batch_norm__109, + batch_norm__11, + batch_norm__110, + batch_norm__111, + batch_norm__112, + batch_norm__113, + batch_norm__114, + batch_norm__115, + batch_norm__116, + batch_norm__117, + batch_norm__118, + batch_norm__119, + batch_norm__12, + batch_norm__120, + batch_norm__121, + batch_norm__122, + batch_norm__123, + batch_norm__124, + batch_norm__125, + batch_norm__126, + batch_norm__127, + batch_norm__128, + batch_norm__129, + batch_norm__13, + batch_norm__130, + batch_norm__131, + batch_norm__132, + batch_norm__133, + batch_norm__134, + batch_norm__135, + batch_norm__136, + batch_norm__137, + batch_norm__138, + batch_norm__139, + batch_norm__14, + batch_norm__140, + batch_norm__141, + batch_norm__142, + batch_norm__143, + batch_norm__144, + batch_norm__145, + batch_norm__146, + batch_norm__147, + batch_norm__148, + batch_norm__149, + batch_norm__15, + batch_norm__150, + batch_norm__151, + batch_norm__152, + batch_norm__153, + batch_norm__154, + batch_norm__155, + batch_norm__156, + batch_norm__157, + batch_norm__158, + batch_norm__159, + batch_norm__16, + batch_norm__160, + batch_norm__161, + batch_norm__162, + batch_norm__163, + batch_norm__164, + batch_norm__165, + batch_norm__166, + batch_norm__167, + batch_norm__168, + batch_norm__169, + batch_norm__17, + batch_norm__170, + batch_norm__171, + batch_norm__172, + batch_norm__173, + batch_norm__174, + batch_norm__175, + batch_norm__176, + batch_norm__177, + batch_norm__178, + batch_norm__179, + batch_norm__18, + batch_norm__180, + batch_norm__181, + batch_norm__182, + batch_norm__183, + batch_norm__184, + batch_norm__185, + batch_norm__186, + batch_norm__187, + batch_norm__188, + batch_norm__189, + batch_norm__19, + batch_norm__190, + batch_norm__191, + batch_norm__192, + batch_norm__193, + batch_norm__194, + batch_norm__195, + batch_norm__196, + batch_norm__197, + batch_norm__198, + batch_norm__199, + batch_norm__2, + batch_norm__20, + batch_norm__200, + batch_norm__201, + batch_norm__202, + batch_norm__203, + batch_norm__204, + batch_norm__205, + batch_norm__206, + batch_norm__207, + batch_norm__208, + batch_norm__209, + batch_norm__21, + batch_norm__210, + batch_norm__211, + batch_norm__212, + batch_norm__213, + batch_norm__214, + batch_norm__215, + batch_norm__216, + batch_norm__217, + batch_norm__218, + batch_norm__219, + batch_norm__22, + batch_norm__220, + batch_norm__221, + batch_norm__222, + batch_norm__223, + batch_norm__224, + batch_norm__225, + batch_norm__226, + batch_norm__227, + batch_norm__228, + batch_norm__229, + batch_norm__23, + batch_norm__230, + batch_norm__231, + batch_norm__232, + batch_norm__233, + batch_norm__234, + batch_norm__235, + batch_norm__236, + batch_norm__237, + batch_norm__238, + batch_norm__239, + batch_norm__24, + batch_norm__240, + batch_norm__241, + batch_norm__242, + batch_norm__243, + batch_norm__244, + batch_norm__245, + batch_norm__246, + batch_norm__247, + batch_norm__248, + batch_norm__249, + batch_norm__25, + batch_norm__250, + batch_norm__251, + batch_norm__252, + batch_norm__253, + batch_norm__254, + batch_norm__255, + batch_norm__256, + batch_norm__257, + batch_norm__258, + batch_norm__259, + batch_norm__26, + batch_norm__260, + batch_norm__261, + batch_norm__262, + batch_norm__263, + batch_norm__264, + batch_norm__265, + batch_norm__266, + batch_norm__267, + batch_norm__268, + batch_norm__269, + batch_norm__27, + batch_norm__270, + batch_norm__271, + batch_norm__272, + batch_norm__273, + batch_norm__274, + batch_norm__275, + batch_norm__276, + batch_norm__277, + batch_norm__278, + batch_norm__279, + batch_norm__28, + batch_norm__280, + batch_norm__281, + batch_norm__282, + batch_norm__283, + batch_norm__284, + batch_norm__285, + batch_norm__286, + batch_norm__287, + batch_norm__288, + batch_norm__289, + batch_norm__29, + batch_norm__290, + batch_norm__291, + batch_norm__292, + batch_norm__293, + batch_norm__294, + batch_norm__295, + batch_norm__296, + batch_norm__297, + batch_norm__298, + batch_norm__299, + batch_norm__3, + batch_norm__30, + batch_norm__300, + batch_norm__301, + batch_norm__302, + batch_norm__303, + batch_norm__304, + batch_norm__305, + batch_norm__306, + batch_norm__307, + batch_norm__308, + batch_norm__309, + batch_norm__31, + batch_norm__310, + batch_norm__311, + batch_norm__312, + batch_norm__313, + batch_norm__314, + batch_norm__315, + batch_norm__316, + batch_norm__317, + batch_norm__318, + batch_norm__319, + batch_norm__32, + batch_norm__320, + batch_norm__321, + batch_norm__322, + batch_norm__323, + batch_norm__324, + batch_norm__325, + batch_norm__326, + batch_norm__327, + batch_norm__328, + batch_norm__329, + batch_norm__33, + batch_norm__330, + batch_norm__331, + batch_norm__332, + batch_norm__333, + batch_norm__334, + batch_norm__335, + batch_norm__336, + batch_norm__337, + batch_norm__338, + batch_norm__339, + batch_norm__34, + batch_norm__340, + batch_norm__341, + batch_norm__342, + batch_norm__343, + batch_norm__344, + batch_norm__345, + batch_norm__346, + batch_norm__347, + batch_norm__348, + batch_norm__349, + batch_norm__35, + batch_norm__350, + batch_norm__351, + batch_norm__352, + batch_norm__353, + batch_norm__354, + batch_norm__355, + batch_norm__356, + batch_norm__357, + batch_norm__358, + batch_norm__359, + batch_norm__36, + batch_norm__360, + batch_norm__361, + batch_norm__362, + batch_norm__363, + batch_norm__364, + batch_norm__365, + batch_norm__366, + batch_norm__367, + batch_norm__368, + batch_norm__369, + batch_norm__37, + batch_norm__370, + batch_norm__371, + batch_norm__372, + batch_norm__373, + batch_norm__374, + batch_norm__375, + batch_norm__376, + batch_norm__377, + batch_norm__378, + batch_norm__379, + batch_norm__38, + batch_norm__380, + batch_norm__381, + batch_norm__382, + batch_norm__383, + batch_norm__385, + batch_norm__386, + batch_norm__387, + batch_norm__388, + batch_norm__389, + batch_norm__39, + batch_norm__390, + batch_norm__391, + batch_norm__392, + batch_norm__393, + batch_norm__394, + batch_norm__395, + batch_norm__396, + batch_norm__397, + batch_norm__398, + batch_norm__399, + batch_norm__4, + batch_norm__40, + batch_norm__400, + batch_norm__401, + batch_norm__402, + batch_norm__403, + batch_norm__404, + batch_norm__405, + batch_norm__406, + batch_norm__407, + batch_norm__408, + batch_norm__409, + batch_norm__41, + batch_norm__410, + batch_norm__411, + batch_norm__412, + batch_norm__413, + batch_norm__414, + batch_norm__415, + batch_norm__416, + batch_norm__417, + batch_norm__418, + batch_norm__419, + batch_norm__42, + batch_norm__420, + batch_norm__421, + batch_norm__422, + batch_norm__423, + batch_norm__424, + batch_norm__425, + batch_norm__426, + batch_norm__427, + batch_norm__428, + batch_norm__429, + batch_norm__43, + batch_norm__430, + batch_norm__431, + batch_norm__432, + batch_norm__433, + batch_norm__434, + batch_norm__435, + batch_norm__436, + batch_norm__437, + batch_norm__438, + batch_norm__439, + batch_norm__44, + batch_norm__440, + batch_norm__441, + batch_norm__442, + batch_norm__443, + batch_norm__444, + batch_norm__445, + batch_norm__446, + batch_norm__447, + batch_norm__448, + batch_norm__449, + batch_norm__45, + batch_norm__451, + batch_norm__452, + batch_norm__453, + batch_norm__454, + batch_norm__455, + batch_norm__456, + batch_norm__457, + batch_norm__458, + batch_norm__459, + batch_norm__46, + batch_norm__460, + batch_norm__461, + batch_norm__462, + batch_norm__463, + batch_norm__464, + batch_norm__465, + batch_norm__466, + batch_norm__467, + batch_norm__468, + batch_norm__469, + batch_norm__47, + batch_norm__470, + batch_norm__471, + batch_norm__472, + batch_norm__473, + batch_norm__474, + batch_norm__475, + batch_norm__476, + batch_norm__477, + batch_norm__478, + batch_norm__479, + batch_norm__48, + batch_norm__480, + batch_norm__481, + batch_norm__482, + batch_norm__483, + batch_norm__484, + batch_norm__485, + batch_norm__486, + batch_norm__487, + batch_norm__488, + batch_norm__489, + batch_norm__49, + batch_norm__490, + batch_norm__491, + batch_norm__492, + batch_norm__493, + batch_norm__494, + batch_norm__495, + batch_norm__496, + batch_norm__497, + batch_norm__498, + batch_norm__499, + batch_norm__5, + batch_norm__50, + batch_norm__500, + batch_norm__501, + batch_norm__502, + batch_norm__503, + batch_norm__504, + batch_norm__505, + batch_norm__506, + batch_norm__507, + batch_norm__508, + batch_norm__509, + batch_norm__51, + batch_norm__510, + batch_norm__511, + batch_norm__512, + batch_norm__513, + batch_norm__514, + batch_norm__515, + batch_norm__517, + batch_norm__518, + batch_norm__519, + batch_norm__52, + batch_norm__520, + batch_norm__521, + batch_norm__522, + batch_norm__523, + batch_norm__524, + batch_norm__525, + batch_norm__526, + batch_norm__527, + batch_norm__528, + batch_norm__529, + batch_norm__53, + batch_norm__530, + batch_norm__531, + batch_norm__532, + batch_norm__533, + batch_norm__534, + batch_norm__535, + batch_norm__536, + batch_norm__537, + batch_norm__538, + batch_norm__539, + batch_norm__54, + batch_norm__540, + batch_norm__541, + batch_norm__542, + batch_norm__543, + batch_norm__544, + batch_norm__545, + batch_norm__546, + batch_norm__547, + batch_norm__548, + batch_norm__549, + batch_norm__55, + batch_norm__550, + batch_norm__551, + batch_norm__552, + batch_norm__553, + batch_norm__554, + batch_norm__555, + batch_norm__556, + batch_norm__557, + batch_norm__558, + batch_norm__559, + batch_norm__56, + batch_norm__560, + batch_norm__561, + batch_norm__562, + batch_norm__563, + batch_norm__564, + batch_norm__565, + batch_norm__566, + batch_norm__567, + batch_norm__568, + batch_norm__569, + batch_norm__57, + batch_norm__570, + batch_norm__571, + batch_norm__572, + batch_norm__573, + batch_norm__574, + batch_norm__575, + batch_norm__576, + batch_norm__577, + batch_norm__578, + batch_norm__579, + batch_norm__58, + batch_norm__580, + batch_norm__581, + batch_norm__583, + batch_norm__584, + batch_norm__585, + batch_norm__586, + batch_norm__587, + batch_norm__59, + batch_norm__6, + batch_norm__60, + batch_norm__61, + batch_norm__62, + batch_norm__63, + batch_norm__64, + batch_norm__65, + batch_norm__66, + batch_norm__67, + batch_norm__68, + batch_norm__69, + batch_norm__7, + batch_norm__70, + batch_norm__71, + batch_norm__72, + batch_norm__73, + batch_norm__74, + batch_norm__75, + batch_norm__76, + batch_norm__77, + batch_norm__78, + batch_norm__79, + batch_norm__8, + batch_norm__80, + batch_norm__81, + batch_norm__82, + batch_norm__83, + batch_norm__84, + batch_norm__85, + batch_norm__86, + batch_norm__87, + batch_norm__88, + batch_norm__89, + batch_norm__9, + batch_norm__90, + batch_norm__91, + batch_norm__92, + batch_norm__93, + batch_norm__94, + batch_norm__95, + batch_norm__96, + batch_norm__97, + batch_norm__98, + batch_norm__99, + combine_18, + concat_3, + concat_4, + concat_5, + concat_6, + conv2d_0, + conv2d_1, + conv2d_10, + conv2d_11, + conv2d_12, + conv2d_13, + conv2d_14, + conv2d_15, + conv2d_16, + conv2d_17, + conv2d_18, + conv2d_19, + conv2d_2, + conv2d_20, + conv2d_21, + conv2d_22, + conv2d_23, + conv2d_24, + conv2d_25, + conv2d_26, + conv2d_27, + conv2d_28, + conv2d_29, + conv2d_3, + conv2d_30, + conv2d_31, + conv2d_32, + conv2d_33, + conv2d_34, + conv2d_35, + conv2d_36, + conv2d_37, + conv2d_38, + conv2d_39, + conv2d_4, + conv2d_40, + conv2d_41, + conv2d_42, + conv2d_43, + conv2d_44, + conv2d_45, + conv2d_46, + conv2d_47, + conv2d_48, + conv2d_49, + conv2d_5, + conv2d_50, + conv2d_51, + conv2d_52, + conv2d_53, + conv2d_54, + conv2d_55, + conv2d_56, + conv2d_57, + conv2d_58, + conv2d_59, + conv2d_6, + conv2d_60, + conv2d_61, + conv2d_62, + conv2d_63, + conv2d_64, + conv2d_65, + conv2d_66, + conv2d_67, + conv2d_68, + conv2d_69, + conv2d_7, + conv2d_70, + conv2d_71, + conv2d_72, + conv2d_8, + conv2d_9, + depthwise_conv2d_0, + depthwise_conv2d_1, + depthwise_conv2d_10, + depthwise_conv2d_11, + depthwise_conv2d_12, + depthwise_conv2d_13, + depthwise_conv2d_14, + depthwise_conv2d_15, + depthwise_conv2d_16, + depthwise_conv2d_17, + depthwise_conv2d_18, + depthwise_conv2d_19, + depthwise_conv2d_2, + depthwise_conv2d_20, + depthwise_conv2d_21, + depthwise_conv2d_22, + depthwise_conv2d_23, + depthwise_conv2d_24, + depthwise_conv2d_25, + depthwise_conv2d_26, + depthwise_conv2d_27, + depthwise_conv2d_28, + depthwise_conv2d_29, + depthwise_conv2d_3, + depthwise_conv2d_30, + depthwise_conv2d_31, + depthwise_conv2d_32, + depthwise_conv2d_33, + depthwise_conv2d_34, + depthwise_conv2d_35, + depthwise_conv2d_36, + depthwise_conv2d_37, + depthwise_conv2d_38, + depthwise_conv2d_39, + depthwise_conv2d_4, + depthwise_conv2d_40, + depthwise_conv2d_5, + depthwise_conv2d_6, + depthwise_conv2d_7, + depthwise_conv2d_8, + depthwise_conv2d_9, + full_0, + full_1, + full_12, + full_15, + full_18, + full_6, + full_7, + full_8, + full_9, + full_int_array_0, + hardsigmoid_0, + hardsigmoid_1, + hardswish_0, + hardswish_1, + hardswish_10, + hardswish_11, + hardswish_12, + hardswish_13, + hardswish_14, + hardswish_15, + hardswish_16, + hardswish_17, + hardswish_18, + hardswish_19, + hardswish_2, + hardswish_20, + hardswish_21, + hardswish_22, + hardswish_23, + hardswish_24, + hardswish_25, + hardswish_26, + hardswish_27, + hardswish_28, + hardswish_29, + hardswish_3, + hardswish_30, + hardswish_31, + hardswish_32, + hardswish_33, + hardswish_34, + hardswish_35, + hardswish_36, + hardswish_37, + hardswish_38, + hardswish_39, + hardswish_4, + hardswish_40, + hardswish_41, + hardswish_42, + hardswish_43, + hardswish_44, + hardswish_45, + hardswish_46, + hardswish_47, + hardswish_48, + hardswish_49, + hardswish_5, + hardswish_50, + hardswish_51, + hardswish_52, + hardswish_53, + hardswish_54, + hardswish_55, + hardswish_56, + hardswish_57, + hardswish_58, + hardswish_59, + hardswish_6, + hardswish_60, + hardswish_61, + hardswish_62, + hardswish_63, + hardswish_64, + hardswish_65, + hardswish_66, + hardswish_67, + hardswish_68, + hardswish_69, + hardswish_7, + hardswish_70, + hardswish_71, + hardswish_72, + hardswish_73, + hardswish_74, + hardswish_75, + hardswish_76, + hardswish_77, + hardswish_78, + hardswish_79, + hardswish_8, + hardswish_80, + hardswish_81, + hardswish_82, + hardswish_83, + hardswish_84, + hardswish_85, + hardswish_86, + hardswish_87, + hardswish_88, + hardswish_89, + hardswish_9, + hardswish_90, + hardswish_91, + hardswish_92, + hardswish_93, + matmul_0, + matmul_1, + matmul_2, + matmul_3, + multiply_0, + multiply_1, + multiply_2, + multiply_4, + multiply_6, + multiply_8, + nearest_interp_0, + nearest_interp_1, + pool2d_0, + pool2d_1, + pool2d_2, + pool2d_3, + pool2d_4, + pool2d_5, + relu_0, + relu_1, + reshape_0, + reshape_1, + reshape_11, + reshape_12, + reshape_13, + reshape_18, + reshape_19, + reshape_2, + reshape_20, + reshape_25, + reshape_26, + reshape_27, + reshape_3, + reshape_4, + reshape_5, + reshape_6, + scale_10, + scale_11, + scale_15, + scale_16, + scale_17, + scale_21, + scale_22, + scale_23, + scale_3, + scale_4, + scale_5, + scale_9, + sigmoid_0, + sigmoid_1, + sigmoid_10, + sigmoid_11, + sigmoid_2, + sigmoid_3, + sigmoid_4, + sigmoid_5, + sigmoid_6, + sigmoid_7, + sigmoid_8, + sigmoid_9, + softmax_0, + softmax_1, + softmax_2, + softmax_3, + split_11, + split_15, + split_3, + split_7, + sqrt_0, + sqrt_1, + sqrt_2, + sqrt_3, + stack_0, + stack_1, + stack_2, + stack_3, + transpose_1, + transpose_10, + transpose_11, + transpose_13, + transpose_14, + transpose_15, + transpose_2, + transpose_3, + transpose_5, + transpose_6, + transpose_7, + transpose_9, + ) + + return concat_0, concat_1, concat_2 diff --git a/paddle_samples/PaddleX/PP-DocLayout-M/subgraph_9/weight_meta.py b/paddle_samples/PaddleX/PP-DocLayout-M/subgraph_9/weight_meta.py new file mode 100644 index 000000000..37cbeda81 --- /dev/null +++ b/paddle_samples/PaddleX/PP-DocLayout-M/subgraph_9/weight_meta.py @@ -0,0 +1,5620 @@ +class Program_weight_tensor_parameter_0: + name = "parameter_0" + shape = [1] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_1: + name = "parameter_1" + shape = [1] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_2: + name = "parameter_2" + shape = [1] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_3: + name = "parameter_3" + shape = [1] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_4: + name = "parameter_4" + shape = [1, 1, 1, 1] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_5: + name = "parameter_5" + shape = [1] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_6: + name = "parameter_6" + shape = [1] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_7: + name = "parameter_7" + shape = [1] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_8: + name = "parameter_8" + shape = [1] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_9: + name = "parameter_9" + shape = [1, 160, 5, 5] + dtype = "float32" + min_val = float("-0.451595") + max_val = float("0.4021") + mean = float("-0.00153001") + std = float("0.0380716") + data = None + + +class Program_weight_tensor_parameter_10: + name = "parameter_10" + shape = [32] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_11: + name = "parameter_11" + shape = [32, 160, 1, 1] + dtype = "float32" + min_val = float("-0.904308") + max_val = float("0.92502") + mean = float("-4.96977e-07") + std = float("0.121521") + data = None + + +class Program_weight_tensor_parameter_12: + name = "parameter_12" + shape = [11] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_13: + name = "parameter_13" + shape = [11, 160, 1, 1] + dtype = "float32" + min_val = float("-0.037527") + max_val = float("0.0283868") + mean = float("-0.000232368") + std = float("0.00991407") + data = None + + +class Program_weight_tensor_parameter_14: + name = "parameter_14" + shape = [160] + dtype = "float32" + min_val = float("-1.07178") + max_val = float("2.09902") + mean = float("0.324677") + std = float("0.396898") + data = None + + +class Program_weight_tensor_parameter_15: + name = "parameter_15" + shape = [160] + dtype = "float32" + min_val = float("0.243497") + max_val = float("2.77917") + mean = float("1.22535") + std = float("0.461906") + data = None + + +class Program_weight_tensor_parameter_16: + name = "parameter_16" + shape = [160] + dtype = "float32" + min_val = float("0.0120656") + max_val = float("1.23335") + mean = float("0.149636") + std = float("0.218916") + data = None + + +class Program_weight_tensor_parameter_17: + name = "parameter_17" + shape = [160] + dtype = "float32" + min_val = float("-0.454949") + max_val = float("0.459922") + mean = float("-0.0482915") + std = float("0.108747") + data = None + + +class Program_weight_tensor_parameter_18: + name = "parameter_18" + shape = [160, 160, 1, 1] + dtype = "float32" + min_val = float("-0.715149") + max_val = float("0.558621") + mean = float("-0.00170533") + std = float("0.0452848") + data = None + + +class Program_weight_tensor_parameter_19: + name = "parameter_19" + shape = [160] + dtype = "float32" + min_val = float("-0.075504") + max_val = float("0.0744955") + mean = float("-0.00117183") + std = float("0.0264521") + data = None + + +class Program_weight_tensor_parameter_20: + name = "parameter_20" + shape = [160, 160, 1, 1] + dtype = "float32" + min_val = float("-0.0921045") + max_val = float("0.0880409") + mean = float("-0.000424996") + std = float("0.0094623") + data = None + + +class Program_weight_tensor_parameter_21: + name = "parameter_21" + shape = [160] + dtype = "float32" + min_val = float("-1.84976") + max_val = float("0.692481") + mean = float("-0.135277") + std = float("0.355856") + data = None + + +class Program_weight_tensor_parameter_22: + name = "parameter_22" + shape = [160] + dtype = "float32" + min_val = float("0.712962") + max_val = float("2.80358") + mean = float("1.41886") + std = float("0.405433") + data = None + + +class Program_weight_tensor_parameter_23: + name = "parameter_23" + shape = [160] + dtype = "float32" + min_val = float("0.0718094") + max_val = float("2.30718") + mean = float("0.483394") + std = float("0.380966") + data = None + + +class Program_weight_tensor_parameter_24: + name = "parameter_24" + shape = [160] + dtype = "float32" + min_val = float("-1.07675") + max_val = float("0.605769") + mean = float("-0.0895075") + std = float("0.30059") + data = None + + +class Program_weight_tensor_parameter_25: + name = "parameter_25" + shape = [160, 160, 1, 1] + dtype = "float32" + min_val = float("-0.593163") + max_val = float("0.473895") + mean = float("-0.000843045") + std = float("0.0461322") + data = None + + +class Program_weight_tensor_parameter_26: + name = "parameter_26" + shape = [160] + dtype = "float32" + min_val = float("-0.4421") + max_val = float("1.22088") + mean = float("0.291084") + std = float("0.346341") + data = None + + +class Program_weight_tensor_parameter_27: + name = "parameter_27" + shape = [160] + dtype = "float32" + min_val = float("0.639288") + max_val = float("2.66534") + mean = float("1.19184") + std = float("0.338603") + data = None + + +class Program_weight_tensor_parameter_28: + name = "parameter_28" + shape = [160] + dtype = "float32" + min_val = float("0.00400687") + max_val = float("2.51969") + mean = float("0.12224") + std = float("0.281775") + data = None + + +class Program_weight_tensor_parameter_29: + name = "parameter_29" + shape = [160] + dtype = "float32" + min_val = float("-0.428301") + max_val = float("1.35734") + mean = float("-0.0244204") + std = float("0.192488") + data = None + + +class Program_weight_tensor_parameter_30: + name = "parameter_30" + shape = [160, 1, 5, 5] + dtype = "float32" + min_val = float("-0.677757") + max_val = float("0.719355") + mean = float("-0.00660828") + std = float("0.0668298") + data = None + + +class Program_weight_tensor_parameter_31: + name = "parameter_31" + shape = [160] + dtype = "float32" + min_val = float("-1.07362") + max_val = float("1.37528") + mean = float("0.0331456") + std = float("0.326679") + data = None + + +class Program_weight_tensor_parameter_32: + name = "parameter_32" + shape = [160] + dtype = "float32" + min_val = float("0.292729") + max_val = float("2.19387") + mean = float("1.14408") + std = float("0.323398") + data = None + + +class Program_weight_tensor_parameter_33: + name = "parameter_33" + shape = [160] + dtype = "float32" + min_val = float("0.0863873") + max_val = float("2.82279") + mean = float("0.445941") + std = float("0.362833") + data = None + + +class Program_weight_tensor_parameter_34: + name = "parameter_34" + shape = [160] + dtype = "float32" + min_val = float("-0.59466") + max_val = float("0.41755") + mean = float("-0.082489") + std = float("0.182495") + data = None + + +class Program_weight_tensor_parameter_35: + name = "parameter_35" + shape = [160, 160, 1, 1] + dtype = "float32" + min_val = float("-0.71084") + max_val = float("0.405466") + mean = float("-0.00136743") + std = float("0.0437224") + data = None + + +class Program_weight_tensor_parameter_36: + name = "parameter_36" + shape = [160] + dtype = "float32" + min_val = float("-0.360475") + max_val = float("0.890855") + mean = float("0.1652") + std = float("0.243673") + data = None + + +class Program_weight_tensor_parameter_37: + name = "parameter_37" + shape = [160] + dtype = "float32" + min_val = float("0.610552") + max_val = float("2.44241") + mean = float("1.22628") + std = float("0.265486") + data = None + + +class Program_weight_tensor_parameter_38: + name = "parameter_38" + shape = [160] + dtype = "float32" + min_val = float("0.0063109") + max_val = float("1.61363") + mean = float("0.115386") + std = float("0.171466") + data = None + + +class Program_weight_tensor_parameter_39: + name = "parameter_39" + shape = [160] + dtype = "float32" + min_val = float("-0.587491") + max_val = float("0.963188") + mean = float("-0.0189979") + std = float("0.171152") + data = None + + +class Program_weight_tensor_parameter_40: + name = "parameter_40" + shape = [160, 1, 5, 5] + dtype = "float32" + min_val = float("-0.381561") + max_val = float("0.415697") + mean = float("-0.00622026") + std = float("0.0625368") + data = None + + +class Program_weight_tensor_parameter_41: + name = "parameter_41" + shape = [160] + dtype = "float32" + min_val = float("-0.663137") + max_val = float("1.33511") + mean = float("0.100284") + std = float("0.237486") + data = None + + +class Program_weight_tensor_parameter_42: + name = "parameter_42" + shape = [160] + dtype = "float32" + min_val = float("0.390136") + max_val = float("2.14002") + mean = float("1.15866") + std = float("0.284741") + data = None + + +class Program_weight_tensor_parameter_43: + name = "parameter_43" + shape = [160] + dtype = "float32" + min_val = float("0.090557") + max_val = float("0.974722") + mean = float("0.335565") + std = float("0.16787") + data = None + + +class Program_weight_tensor_parameter_44: + name = "parameter_44" + shape = [160] + dtype = "float32" + min_val = float("-0.704229") + max_val = float("0.558713") + mean = float("-0.120141") + std = float("0.212415") + data = None + + +class Program_weight_tensor_parameter_45: + name = "parameter_45" + shape = [160, 160, 1, 1] + dtype = "float32" + min_val = float("-0.510218") + max_val = float("0.374873") + mean = float("-0.00228796") + std = float("0.0421229") + data = None + + +class Program_weight_tensor_parameter_46: + name = "parameter_46" + shape = [160] + dtype = "float32" + min_val = float("-0.543997") + max_val = float("0.800116") + mean = float("0.134102") + std = float("0.23574") + data = None + + +class Program_weight_tensor_parameter_47: + name = "parameter_47" + shape = [160] + dtype = "float32" + min_val = float("0.743635") + max_val = float("1.94234") + mean = float("1.19489") + std = float("0.236186") + data = None + + +class Program_weight_tensor_parameter_48: + name = "parameter_48" + shape = [160] + dtype = "float32" + min_val = float("0.00525689") + max_val = float("0.691049") + mean = float("0.104943") + std = float("0.112071") + data = None + + +class Program_weight_tensor_parameter_49: + name = "parameter_49" + shape = [160] + dtype = "float32" + min_val = float("-0.45391") + max_val = float("0.508523") + mean = float("-0.0294124") + std = float("0.127138") + data = None + + +class Program_weight_tensor_parameter_50: + name = "parameter_50" + shape = [160, 1, 5, 5] + dtype = "float32" + min_val = float("-0.367578") + max_val = float("0.410874") + mean = float("-0.00761977") + std = float("0.0621169") + data = None + + +class Program_weight_tensor_parameter_51: + name = "parameter_51" + shape = [160] + dtype = "float32" + min_val = float("-0.383412") + max_val = float("0.833254") + mean = float("0.0922629") + std = float("0.209171") + data = None + + +class Program_weight_tensor_parameter_52: + name = "parameter_52" + shape = [160] + dtype = "float32" + min_val = float("0.252379") + max_val = float("1.86165") + mean = float("1.12765") + std = float("0.309058") + data = None + + +class Program_weight_tensor_parameter_53: + name = "parameter_53" + shape = [160] + dtype = "float32" + min_val = float("0.123464") + max_val = float("1.24346") + mean = float("0.407322") + std = float("0.213542") + data = None + + +class Program_weight_tensor_parameter_54: + name = "parameter_54" + shape = [160] + dtype = "float32" + min_val = float("-0.832224") + max_val = float("0.18598") + mean = float("-0.23071") + std = float("0.21175") + data = None + + +class Program_weight_tensor_parameter_55: + name = "parameter_55" + shape = [160, 160, 1, 1] + dtype = "float32" + min_val = float("-0.415382") + max_val = float("0.355132") + mean = float("-0.00417828") + std = float("0.0420669") + data = None + + +class Program_weight_tensor_parameter_56: + name = "parameter_56" + shape = [160] + dtype = "float32" + min_val = float("-0.422267") + max_val = float("0.622013") + mean = float("0.0532275") + std = float("0.178263") + data = None + + +class Program_weight_tensor_parameter_57: + name = "parameter_57" + shape = [160] + dtype = "float32" + min_val = float("0.901004") + max_val = float("1.87902") + mean = float("1.23585") + std = float("0.186922") + data = None + + +class Program_weight_tensor_parameter_58: + name = "parameter_58" + shape = [160] + dtype = "float32" + min_val = float("0.00014717") + max_val = float("0.00855975") + mean = float("0.00153716") + std = float("0.00151852") + data = None + + +class Program_weight_tensor_parameter_59: + name = "parameter_59" + shape = [160] + dtype = "float32" + min_val = float("-0.0650518") + max_val = float("0.0690878") + mean = float("0.00209802") + std = float("0.0163539") + data = None + + +class Program_weight_tensor_parameter_60: + name = "parameter_60" + shape = [160, 1, 5, 5] + dtype = "float32" + min_val = float("-0.459548") + max_val = float("0.393957") + mean = float("0.000524514") + std = float("0.0636449") + data = None + + +class Program_weight_tensor_parameter_61: + name = "parameter_61" + shape = [1] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_62: + name = "parameter_62" + shape = [1] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_63: + name = "parameter_63" + shape = [1] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_64: + name = "parameter_64" + shape = [1] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_65: + name = "parameter_65" + shape = [1, 1, 1, 1] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_66: + name = "parameter_66" + shape = [1] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_67: + name = "parameter_67" + shape = [1] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_68: + name = "parameter_68" + shape = [1] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_69: + name = "parameter_69" + shape = [1] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_70: + name = "parameter_70" + shape = [1, 160, 5, 5] + dtype = "float32" + min_val = float("-0.424547") + max_val = float("0.422857") + mean = float("-0.00179283") + std = float("0.0369799") + data = None + + +class Program_weight_tensor_parameter_71: + name = "parameter_71" + shape = [32] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_72: + name = "parameter_72" + shape = [32, 160, 1, 1] + dtype = "float32" + min_val = float("-1.08544") + max_val = float("0.977937") + mean = float("1.52271e-06") + std = float("0.118597") + data = None + + +class Program_weight_tensor_parameter_73: + name = "parameter_73" + shape = [11] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_74: + name = "parameter_74" + shape = [11, 160, 1, 1] + dtype = "float32" + min_val = float("-0.0360191") + max_val = float("0.0314712") + mean = float("9.6451e-05") + std = float("0.00971237") + data = None + + +class Program_weight_tensor_parameter_75: + name = "parameter_75" + shape = [160] + dtype = "float32" + min_val = float("-0.659654") + max_val = float("2.93605") + mean = float("0.350705") + std = float("0.462014") + data = None + + +class Program_weight_tensor_parameter_76: + name = "parameter_76" + shape = [160] + dtype = "float32" + min_val = float("0.163972") + max_val = float("2.88255") + mean = float("1.06427") + std = float("0.437573") + data = None + + +class Program_weight_tensor_parameter_77: + name = "parameter_77" + shape = [160] + dtype = "float32" + min_val = float("0.0136787") + max_val = float("1.63874") + mean = float("0.17972") + std = float("0.242427") + data = None + + +class Program_weight_tensor_parameter_78: + name = "parameter_78" + shape = [160] + dtype = "float32" + min_val = float("-0.54537") + max_val = float("0.565826") + mean = float("-0.0720522") + std = float("0.118768") + data = None + + +class Program_weight_tensor_parameter_79: + name = "parameter_79" + shape = [160, 160, 1, 1] + dtype = "float32" + min_val = float("-0.726226") + max_val = float("0.672749") + mean = float("-0.00225032") + std = float("0.0438389") + data = None + + +class Program_weight_tensor_parameter_80: + name = "parameter_80" + shape = [160] + dtype = "float32" + min_val = float("-0.0909312") + max_val = float("0.0646948") + mean = float("-0.00104904") + std = float("0.0246789") + data = None + + +class Program_weight_tensor_parameter_81: + name = "parameter_81" + shape = [160, 160, 1, 1] + dtype = "float32" + min_val = float("-0.12545") + max_val = float("0.14525") + mean = float("0.000481838") + std = float("0.010268") + data = None + + +class Program_weight_tensor_parameter_82: + name = "parameter_82" + shape = [160] + dtype = "float32" + min_val = float("-0.876391") + max_val = float("0.674369") + mean = float("-0.0779286") + std = float("0.308067") + data = None + + +class Program_weight_tensor_parameter_83: + name = "parameter_83" + shape = [160] + dtype = "float32" + min_val = float("0.781682") + max_val = float("2.90871") + mean = float("1.37239") + std = float("0.381122") + data = None + + +class Program_weight_tensor_parameter_84: + name = "parameter_84" + shape = [160] + dtype = "float32" + min_val = float("0.0634601") + max_val = float("2.11083") + mean = float("0.367788") + std = float("0.322431") + data = None + + +class Program_weight_tensor_parameter_85: + name = "parameter_85" + shape = [160] + dtype = "float32" + min_val = float("-0.892735") + max_val = float("0.802678") + mean = float("-0.0977457") + std = float("0.286664") + data = None + + +class Program_weight_tensor_parameter_86: + name = "parameter_86" + shape = [160, 160, 1, 1] + dtype = "float32" + min_val = float("-0.520488") + max_val = float("0.472961") + mean = float("-0.000518472") + std = float("0.0439164") + data = None + + +class Program_weight_tensor_parameter_87: + name = "parameter_87" + shape = [160] + dtype = "float32" + min_val = float("-0.563762") + max_val = float("1.27755") + mean = float("0.259427") + std = float("0.315983") + data = None + + +class Program_weight_tensor_parameter_88: + name = "parameter_88" + shape = [160] + dtype = "float32" + min_val = float("0.625377") + max_val = float("2.90957") + mean = float("1.15463") + std = float("0.306727") + data = None + + +class Program_weight_tensor_parameter_89: + name = "parameter_89" + shape = [160] + dtype = "float32" + min_val = float("0.00377191") + max_val = float("0.69521") + mean = float("0.0969833") + std = float("0.0995961") + data = None + + +class Program_weight_tensor_parameter_90: + name = "parameter_90" + shape = [160] + dtype = "float32" + min_val = float("-0.598259") + max_val = float("0.530976") + mean = float("-0.0246196") + std = float("0.125785") + data = None + + +class Program_weight_tensor_parameter_91: + name = "parameter_91" + shape = [160, 1, 5, 5] + dtype = "float32" + min_val = float("-0.645252") + max_val = float("0.479844") + mean = float("-0.00523488") + std = float("0.0623693") + data = None + + +class Program_weight_tensor_parameter_92: + name = "parameter_92" + shape = [160] + dtype = "float32" + min_val = float("-1.04883") + max_val = float("1.0152") + mean = float("0.0902345") + std = float("0.288197") + data = None + + +class Program_weight_tensor_parameter_93: + name = "parameter_93" + shape = [160] + dtype = "float32" + min_val = float("0.105183") + max_val = float("2.14707") + mean = float("1.06571") + std = float("0.312024") + data = None + + +class Program_weight_tensor_parameter_94: + name = "parameter_94" + shape = [160] + dtype = "float32" + min_val = float("0.0561215") + max_val = float("1.91184") + mean = float("0.281873") + std = float("0.218096") + data = None + + +class Program_weight_tensor_parameter_95: + name = "parameter_95" + shape = [160] + dtype = "float32" + min_val = float("-0.725109") + max_val = float("0.296995") + mean = float("-0.0687759") + std = float("0.17733") + data = None + + +class Program_weight_tensor_parameter_96: + name = "parameter_96" + shape = [160, 160, 1, 1] + dtype = "float32" + min_val = float("-0.497628") + max_val = float("0.543799") + mean = float("-0.00114554") + std = float("0.0408437") + data = None + + +class Program_weight_tensor_parameter_97: + name = "parameter_97" + shape = [160] + dtype = "float32" + min_val = float("-0.328221") + max_val = float("0.834667") + mean = float("0.159012") + std = float("0.20757") + data = None + + +class Program_weight_tensor_parameter_98: + name = "parameter_98" + shape = [160] + dtype = "float32" + min_val = float("0.673087") + max_val = float("2.33987") + mean = float("1.10291") + std = float("0.246948") + data = None + + +class Program_weight_tensor_parameter_99: + name = "parameter_99" + shape = [160] + dtype = "float32" + min_val = float("0.00797692") + max_val = float("0.667263") + mean = float("0.0840242") + std = float("0.0943149") + data = None + + +class Program_weight_tensor_parameter_100: + name = "parameter_100" + shape = [160] + dtype = "float32" + min_val = float("-0.230316") + max_val = float("0.519622") + mean = float("-0.00854647") + std = float("0.115537") + data = None + + +class Program_weight_tensor_parameter_101: + name = "parameter_101" + shape = [160, 1, 5, 5] + dtype = "float32" + min_val = float("-0.410639") + max_val = float("0.435192") + mean = float("-0.00602292") + std = float("0.0589259") + data = None + + +class Program_weight_tensor_parameter_102: + name = "parameter_102" + shape = [160] + dtype = "float32" + min_val = float("-0.399247") + max_val = float("0.831806") + mean = float("0.0927125") + std = float("0.188308") + data = None + + +class Program_weight_tensor_parameter_103: + name = "parameter_103" + shape = [160] + dtype = "float32" + min_val = float("0.366654") + max_val = float("1.70207") + mean = float("1.0428") + std = float("0.247225") + data = None + + +class Program_weight_tensor_parameter_104: + name = "parameter_104" + shape = [160] + dtype = "float32" + min_val = float("0.0835412") + max_val = float("0.96566") + mean = float("0.259268") + std = float("0.141973") + data = None + + +class Program_weight_tensor_parameter_105: + name = "parameter_105" + shape = [160] + dtype = "float32" + min_val = float("-0.805579") + max_val = float("0.387976") + mean = float("-0.105415") + std = float("0.188792") + data = None + + +class Program_weight_tensor_parameter_106: + name = "parameter_106" + shape = [160, 160, 1, 1] + dtype = "float32" + min_val = float("-0.458535") + max_val = float("0.461653") + mean = float("-0.00186406") + std = float("0.038434") + data = None + + +class Program_weight_tensor_parameter_107: + name = "parameter_107" + shape = [160] + dtype = "float32" + min_val = float("-0.38244") + max_val = float("0.922934") + mean = float("0.149185") + std = float("0.186953") + data = None + + +class Program_weight_tensor_parameter_108: + name = "parameter_108" + shape = [160] + dtype = "float32" + min_val = float("0.660535") + max_val = float("1.90937") + mean = float("1.09193") + std = float("0.235067") + data = None + + +class Program_weight_tensor_parameter_109: + name = "parameter_109" + shape = [160] + dtype = "float32" + min_val = float("0.006284") + max_val = float("1.27531") + mean = float("0.0766978") + std = float("0.132208") + data = None + + +class Program_weight_tensor_parameter_110: + name = "parameter_110" + shape = [160] + dtype = "float32" + min_val = float("-0.199407") + max_val = float("0.88179") + mean = float("-0.0168355") + std = float("0.118902") + data = None + + +class Program_weight_tensor_parameter_111: + name = "parameter_111" + shape = [160, 1, 5, 5] + dtype = "float32" + min_val = float("-0.311941") + max_val = float("0.562424") + mean = float("-0.00621571") + std = float("0.0568137") + data = None + + +class Program_weight_tensor_parameter_112: + name = "parameter_112" + shape = [160] + dtype = "float32" + min_val = float("-0.287733") + max_val = float("1.16097") + mean = float("0.132298") + std = float("0.183646") + data = None + + +class Program_weight_tensor_parameter_113: + name = "parameter_113" + shape = [160] + dtype = "float32" + min_val = float("0.211215") + max_val = float("1.82402") + mean = float("1.00369") + std = float("0.242147") + data = None + + +class Program_weight_tensor_parameter_114: + name = "parameter_114" + shape = [160] + dtype = "float32" + min_val = float("0.0477814") + max_val = float("0.674193") + mean = float("0.178926") + std = float("0.109224") + data = None + + +class Program_weight_tensor_parameter_115: + name = "parameter_115" + shape = [160] + dtype = "float32" + min_val = float("-0.600465") + max_val = float("0.406075") + mean = float("-0.0977937") + std = float("0.184528") + data = None + + +class Program_weight_tensor_parameter_116: + name = "parameter_116" + shape = [160, 160, 1, 1] + dtype = "float32" + min_val = float("-0.438028") + max_val = float("0.387091") + mean = float("-0.00204067") + std = float("0.0365478") + data = None + + +class Program_weight_tensor_parameter_117: + name = "parameter_117" + shape = [160] + dtype = "float32" + min_val = float("-0.132661") + max_val = float("0.957095") + mean = float("0.21293") + std = float("0.187728") + data = None + + +class Program_weight_tensor_parameter_118: + name = "parameter_118" + shape = [160] + dtype = "float32" + min_val = float("0.549459") + max_val = float("1.65925") + mean = float("0.94969") + std = float("0.19558") + data = None + + +class Program_weight_tensor_parameter_119: + name = "parameter_119" + shape = [160] + dtype = "float32" + min_val = float("0.00796613") + max_val = float("0.54475") + mean = float("0.0653861") + std = float("0.064643") + data = None + + +class Program_weight_tensor_parameter_120: + name = "parameter_120" + shape = [160] + dtype = "float32" + min_val = float("-0.410477") + max_val = float("0.327404") + mean = float("-0.0203161") + std = float("0.109798") + data = None + + +class Program_weight_tensor_parameter_121: + name = "parameter_121" + shape = [160, 1, 5, 5] + dtype = "float32" + min_val = float("-0.355665") + max_val = float("0.485326") + mean = float("-0.00646148") + std = float("0.0551162") + data = None + + +class Program_weight_tensor_parameter_122: + name = "parameter_122" + shape = [1] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_123: + name = "parameter_123" + shape = [1] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_124: + name = "parameter_124" + shape = [1] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_125: + name = "parameter_125" + shape = [1] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_126: + name = "parameter_126" + shape = [1, 1, 1, 1] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_127: + name = "parameter_127" + shape = [1] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_128: + name = "parameter_128" + shape = [1] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_129: + name = "parameter_129" + shape = [1] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_130: + name = "parameter_130" + shape = [1] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_131: + name = "parameter_131" + shape = [1, 160, 5, 5] + dtype = "float32" + min_val = float("-0.418647") + max_val = float("0.287767") + mean = float("-0.000909641") + std = float("0.0370886") + data = None + + +class Program_weight_tensor_parameter_132: + name = "parameter_132" + shape = [32] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_133: + name = "parameter_133" + shape = [32, 160, 1, 1] + dtype = "float32" + min_val = float("-1.00723") + max_val = float("0.946177") + mean = float("-1.38069e-07") + std = float("0.112668") + data = None + + +class Program_weight_tensor_parameter_134: + name = "parameter_134" + shape = [11] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_135: + name = "parameter_135" + shape = [11, 160, 1, 1] + dtype = "float32" + min_val = float("-0.033657") + max_val = float("0.0334276") + mean = float("2.08047e-05") + std = float("0.00976998") + data = None + + +class Program_weight_tensor_parameter_136: + name = "parameter_136" + shape = [160] + dtype = "float32" + min_val = float("-0.473108") + max_val = float("2.28162") + mean = float("0.455859") + std = float("0.429429") + data = None + + +class Program_weight_tensor_parameter_137: + name = "parameter_137" + shape = [160] + dtype = "float32" + min_val = float("0.153133") + max_val = float("2.7783") + mean = float("1.06032") + std = float("0.46014") + data = None + + +class Program_weight_tensor_parameter_138: + name = "parameter_138" + shape = [160] + dtype = "float32" + min_val = float("0.0290607") + max_val = float("1.55834") + mean = float("0.18257") + std = float("0.248202") + data = None + + +class Program_weight_tensor_parameter_139: + name = "parameter_139" + shape = [160] + dtype = "float32" + min_val = float("-0.524463") + max_val = float("0.316494") + mean = float("-0.0793429") + std = float("0.120318") + data = None + + +class Program_weight_tensor_parameter_140: + name = "parameter_140" + shape = [160, 160, 1, 1] + dtype = "float32" + min_val = float("-0.595928") + max_val = float("0.526811") + mean = float("-0.00444629") + std = float("0.0427452") + data = None + + +class Program_weight_tensor_parameter_141: + name = "parameter_141" + shape = [160] + dtype = "float32" + min_val = float("-0.0941816") + max_val = float("0.0442137") + mean = float("-0.00182824") + std = float("0.0210691") + data = None + + +class Program_weight_tensor_parameter_142: + name = "parameter_142" + shape = [160, 160, 1, 1] + dtype = "float32" + min_val = float("-0.222857") + max_val = float("0.13285") + mean = float("4.68833e-05") + std = float("0.0099875") + data = None + + +class Program_weight_tensor_parameter_143: + name = "parameter_143" + shape = [160] + dtype = "float32" + min_val = float("-1.02208") + max_val = float("0.824311") + mean = float("0.00061999") + std = float("0.267635") + data = None + + +class Program_weight_tensor_parameter_144: + name = "parameter_144" + shape = [160] + dtype = "float32" + min_val = float("0.768213") + max_val = float("2.44401") + mean = float("1.30908") + std = float("0.278141") + data = None + + +class Program_weight_tensor_parameter_145: + name = "parameter_145" + shape = [160] + dtype = "float32" + min_val = float("0.0735944") + max_val = float("2.1033") + mean = float("0.365304") + std = float("0.325035") + data = None + + +class Program_weight_tensor_parameter_146: + name = "parameter_146" + shape = [160] + dtype = "float32" + min_val = float("-0.966134") + max_val = float("0.600229") + mean = float("-0.0942881") + std = float("0.286947") + data = None + + +class Program_weight_tensor_parameter_147: + name = "parameter_147" + shape = [160, 160, 1, 1] + dtype = "float32" + min_val = float("-0.445389") + max_val = float("0.623432") + mean = float("-0.00143505") + std = float("0.0421979") + data = None + + +class Program_weight_tensor_parameter_148: + name = "parameter_148" + shape = [160] + dtype = "float32" + min_val = float("-0.504626") + max_val = float("1.17406") + mean = float("0.241024") + std = float("0.263767") + data = None + + +class Program_weight_tensor_parameter_149: + name = "parameter_149" + shape = [160] + dtype = "float32" + min_val = float("0.630852") + max_val = float("1.86668") + mean = float("1.13745") + std = float("0.273988") + data = None + + +class Program_weight_tensor_parameter_150: + name = "parameter_150" + shape = [160] + dtype = "float32" + min_val = float("0.00695502") + max_val = float("1.92176") + mean = float("0.115302") + std = float("0.199392") + data = None + + +class Program_weight_tensor_parameter_151: + name = "parameter_151" + shape = [160] + dtype = "float32" + min_val = float("-0.30106") + max_val = float("0.971667") + mean = float("0.00934262") + std = float("0.16052") + data = None + + +class Program_weight_tensor_parameter_152: + name = "parameter_152" + shape = [160, 1, 5, 5] + dtype = "float32" + min_val = float("-0.31542") + max_val = float("0.384015") + mean = float("-0.00209939") + std = float("0.0552345") + data = None + + +class Program_weight_tensor_parameter_153: + name = "parameter_153" + shape = [160] + dtype = "float32" + min_val = float("-0.503915") + max_val = float("1.21486") + mean = float("0.137873") + std = float("0.263259") + data = None + + +class Program_weight_tensor_parameter_154: + name = "parameter_154" + shape = [160] + dtype = "float32" + min_val = float("0.172305") + max_val = float("1.70792") + mean = float("1.03854") + std = float("0.206767") + data = None + + +class Program_weight_tensor_parameter_155: + name = "parameter_155" + shape = [160] + dtype = "float32" + min_val = float("0.0619054") + max_val = float("1.17397") + mean = float("0.253932") + std = float("0.186143") + data = None + + +class Program_weight_tensor_parameter_156: + name = "parameter_156" + shape = [160] + dtype = "float32" + min_val = float("-0.646024") + max_val = float("0.523699") + mean = float("-0.0381296") + std = float("0.194742") + data = None + + +class Program_weight_tensor_parameter_157: + name = "parameter_157" + shape = [160, 160, 1, 1] + dtype = "float32" + min_val = float("-0.424882") + max_val = float("0.534564") + mean = float("-0.00104379") + std = float("0.0389634") + data = None + + +class Program_weight_tensor_parameter_158: + name = "parameter_158" + shape = [160] + dtype = "float32" + min_val = float("-0.234147") + max_val = float("0.731624") + mean = float("0.180003") + std = float("0.184631") + data = None + + +class Program_weight_tensor_parameter_159: + name = "parameter_159" + shape = [160] + dtype = "float32" + min_val = float("0.625239") + max_val = float("2.02582") + mean = float("1.05933") + std = float("0.217853") + data = None + + +class Program_weight_tensor_parameter_160: + name = "parameter_160" + shape = [160] + dtype = "float32" + min_val = float("0.00715657") + max_val = float("0.364319") + mean = float("0.0670786") + std = float("0.0607904") + data = None + + +class Program_weight_tensor_parameter_161: + name = "parameter_161" + shape = [160] + dtype = "float32" + min_val = float("-0.321914") + max_val = float("0.325862") + mean = float("-0.00838343") + std = float("0.0961498") + data = None + + +class Program_weight_tensor_parameter_162: + name = "parameter_162" + shape = [160, 1, 5, 5] + dtype = "float32" + min_val = float("-0.294619") + max_val = float("0.369225") + mean = float("-0.00270723") + std = float("0.0523691") + data = None + + +class Program_weight_tensor_parameter_163: + name = "parameter_163" + shape = [160] + dtype = "float32" + min_val = float("-0.724822") + max_val = float("0.621359") + mean = float("0.132312") + std = float("0.17771") + data = None + + +class Program_weight_tensor_parameter_164: + name = "parameter_164" + shape = [160] + dtype = "float32" + min_val = float("0.203459") + max_val = float("1.53051") + mean = float("0.979382") + std = float("0.21821") + data = None + + +class Program_weight_tensor_parameter_165: + name = "parameter_165" + shape = [160] + dtype = "float32" + min_val = float("0.0695517") + max_val = float("0.699871") + mean = float("0.222173") + std = float("0.119696") + data = None + + +class Program_weight_tensor_parameter_166: + name = "parameter_166" + shape = [160] + dtype = "float32" + min_val = float("-0.583648") + max_val = float("0.663122") + mean = float("-0.0243311") + std = float("0.193725") + data = None + + +class Program_weight_tensor_parameter_167: + name = "parameter_167" + shape = [160, 160, 1, 1] + dtype = "float32" + min_val = float("-0.392661") + max_val = float("0.438802") + mean = float("-0.000840405") + std = float("0.0359887") + data = None + + +class Program_weight_tensor_parameter_168: + name = "parameter_168" + shape = [160] + dtype = "float32" + min_val = float("-0.253081") + max_val = float("0.894769") + mean = float("0.151453") + std = float("0.187756") + data = None + + +class Program_weight_tensor_parameter_169: + name = "parameter_169" + shape = [160] + dtype = "float32" + min_val = float("0.617366") + max_val = float("1.68347") + mean = float("1.0562") + std = float("0.193085") + data = None + + +class Program_weight_tensor_parameter_170: + name = "parameter_170" + shape = [160] + dtype = "float32" + min_val = float("0.00335752") + max_val = float("1.02094") + mean = float("0.0584978") + std = float("0.0994048") + data = None + + +class Program_weight_tensor_parameter_171: + name = "parameter_171" + shape = [160] + dtype = "float32" + min_val = float("-0.456837") + max_val = float("0.551568") + mean = float("-0.00975177") + std = float("0.0988193") + data = None + + +class Program_weight_tensor_parameter_172: + name = "parameter_172" + shape = [160, 1, 5, 5] + dtype = "float32" + min_val = float("-0.334559") + max_val = float("0.425739") + mean = float("-0.00337037") + std = float("0.0508129") + data = None + + +class Program_weight_tensor_parameter_173: + name = "parameter_173" + shape = [160] + dtype = "float32" + min_val = float("-0.176337") + max_val = float("0.738155") + mean = float("0.139002") + std = float("0.154649") + data = None + + +class Program_weight_tensor_parameter_174: + name = "parameter_174" + shape = [160] + dtype = "float32" + min_val = float("0.214419") + max_val = float("1.54533") + mean = float("0.96656") + std = float("0.211777") + data = None + + +class Program_weight_tensor_parameter_175: + name = "parameter_175" + shape = [160] + dtype = "float32" + min_val = float("0.0409094") + max_val = float("0.763185") + mean = float("0.177379") + std = float("0.113609") + data = None + + +class Program_weight_tensor_parameter_176: + name = "parameter_176" + shape = [160] + dtype = "float32" + min_val = float("-0.616574") + max_val = float("0.332375") + mean = float("-0.0967199") + std = float("0.173105") + data = None + + +class Program_weight_tensor_parameter_177: + name = "parameter_177" + shape = [160, 160, 1, 1] + dtype = "float32" + min_val = float("-0.395768") + max_val = float("0.340646") + mean = float("-0.00226353") + std = float("0.0347849") + data = None + + +class Program_weight_tensor_parameter_178: + name = "parameter_178" + shape = [160] + dtype = "float32" + min_val = float("-0.0645622") + max_val = float("1.14964") + mean = float("0.204171") + std = float("0.170273") + data = None + + +class Program_weight_tensor_parameter_179: + name = "parameter_179" + shape = [160] + dtype = "float32" + min_val = float("0.592415") + max_val = float("1.50928") + mean = float("0.95") + std = float("0.161297") + data = None + + +class Program_weight_tensor_parameter_180: + name = "parameter_180" + shape = [160] + dtype = "float32" + min_val = float("0.00192433") + max_val = float("0.55464") + mean = float("0.0550412") + std = float("0.0616066") + data = None + + +class Program_weight_tensor_parameter_181: + name = "parameter_181" + shape = [160] + dtype = "float32" + min_val = float("-0.37724") + max_val = float("0.208286") + mean = float("-0.0179695") + std = float("0.0912875") + data = None + + +class Program_weight_tensor_parameter_182: + name = "parameter_182" + shape = [160, 1, 5, 5] + dtype = "float32" + min_val = float("-0.320486") + max_val = float("0.414527") + mean = float("-0.00385979") + std = float("0.0517184") + data = None + + +class Program_weight_tensor_parameter_183: + name = "parameter_183" + shape = [1] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_184: + name = "parameter_184" + shape = [1] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_185: + name = "parameter_185" + shape = [1] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_186: + name = "parameter_186" + shape = [1] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_187: + name = "parameter_187" + shape = [1, 1, 1, 1] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_188: + name = "parameter_188" + shape = [1] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_189: + name = "parameter_189" + shape = [1] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_190: + name = "parameter_190" + shape = [1] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_191: + name = "parameter_191" + shape = [1] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_192: + name = "parameter_192" + shape = [1, 160, 5, 5] + dtype = "float32" + min_val = float("-0.204577") + max_val = float("0.361338") + mean = float("-0.0019917") + std = float("0.0371112") + data = None + + +class Program_weight_tensor_parameter_193: + name = "parameter_193" + shape = [32] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_194: + name = "parameter_194" + shape = [32, 160, 1, 1] + dtype = "float32" + min_val = float("-1.04588") + max_val = float("1.01359") + mean = float("-4.91273e-08") + std = float("0.103596") + data = None + + +class Program_weight_tensor_parameter_195: + name = "parameter_195" + shape = [11] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_196: + name = "parameter_196" + shape = [11, 160, 1, 1] + dtype = "float32" + min_val = float("-0.0295732") + max_val = float("0.0368174") + mean = float("0.00019137") + std = float("0.00972313") + data = None + + +class Program_weight_tensor_parameter_197: + name = "parameter_197" + shape = [160] + dtype = "float32" + min_val = float("-0.40461") + max_val = float("3.01835") + mean = float("0.510894") + std = float("0.553245") + data = None + + +class Program_weight_tensor_parameter_198: + name = "parameter_198" + shape = [160] + dtype = "float32" + min_val = float("0.211663") + max_val = float("3.17107") + mean = float("0.94806") + std = float("0.462321") + data = None + + +class Program_weight_tensor_parameter_199: + name = "parameter_199" + shape = [160] + dtype = "float32" + min_val = float("0.0232397") + max_val = float("1.43855") + mean = float("0.154275") + std = float("0.191715") + data = None + + +class Program_weight_tensor_parameter_200: + name = "parameter_200" + shape = [160] + dtype = "float32" + min_val = float("-0.662656") + max_val = float("0.18405") + mean = float("-0.142926") + std = float("0.152868") + data = None + + +class Program_weight_tensor_parameter_201: + name = "parameter_201" + shape = [160, 160, 1, 1] + dtype = "float32" + min_val = float("-0.776612") + max_val = float("0.511522") + mean = float("-0.00557876") + std = float("0.0436141") + data = None + + +class Program_weight_tensor_parameter_202: + name = "parameter_202" + shape = [160] + dtype = "float32" + min_val = float("-0.0783832") + max_val = float("0.0532411") + mean = float("-0.00183671") + std = float("0.0204537") + data = None + + +class Program_weight_tensor_parameter_203: + name = "parameter_203" + shape = [160, 160, 1, 1] + dtype = "float32" + min_val = float("-0.298999") + max_val = float("0.45266") + mean = float("-5.20553e-05") + std = float("0.0138978") + data = None + + +class Program_weight_tensor_parameter_204: + name = "parameter_204" + shape = [160] + dtype = "float32" + min_val = float("-0.615482") + max_val = float("0.868905") + mean = float("0.0610547") + std = float("0.300753") + data = None + + +class Program_weight_tensor_parameter_205: + name = "parameter_205" + shape = [160] + dtype = "float32" + min_val = float("0.716882") + max_val = float("2.35799") + mean = float("1.29558") + std = float("0.267605") + data = None + + +class Program_weight_tensor_parameter_206: + name = "parameter_206" + shape = [160] + dtype = "float32" + min_val = float("0.083188") + max_val = float("2.65914") + mean = float("0.372032") + std = float("0.333327") + data = None + + +class Program_weight_tensor_parameter_207: + name = "parameter_207" + shape = [160] + dtype = "float32" + min_val = float("-1.12435") + max_val = float("1.16845") + mean = float("-0.181779") + std = float("0.348974") + data = None + + +class Program_weight_tensor_parameter_208: + name = "parameter_208" + shape = [160, 160, 1, 1] + dtype = "float32" + min_val = float("-0.419544") + max_val = float("0.585558") + mean = float("-0.00221699") + std = float("0.0441222") + data = None + + +class Program_weight_tensor_parameter_209: + name = "parameter_209" + shape = [160] + dtype = "float32" + min_val = float("-0.362492") + max_val = float("1.69386") + mean = float("0.338421") + std = float("0.401561") + data = None + + +class Program_weight_tensor_parameter_210: + name = "parameter_210" + shape = [160] + dtype = "float32" + min_val = float("0.413909") + max_val = float("2.33426") + mean = float("1.04926") + std = float("0.317777") + data = None + + +class Program_weight_tensor_parameter_211: + name = "parameter_211" + shape = [160] + dtype = "float32" + min_val = float("0.00224634") + max_val = float("1.17978") + mean = float("0.110018") + std = float("0.147794") + data = None + + +class Program_weight_tensor_parameter_212: + name = "parameter_212" + shape = [160] + dtype = "float32" + min_val = float("-0.574895") + max_val = float("0.79606") + mean = float("0.0173074") + std = float("0.148241") + data = None + + +class Program_weight_tensor_parameter_213: + name = "parameter_213" + shape = [160, 1, 5, 5] + dtype = "float32" + min_val = float("-0.336353") + max_val = float("0.468909") + mean = float("0.000810021") + std = float("0.0546926") + data = None + + +class Program_weight_tensor_parameter_214: + name = "parameter_214" + shape = [160] + dtype = "float32" + min_val = float("-0.872403") + max_val = float("1.08374") + mean = float("0.181147") + std = float("0.278209") + data = None + + +class Program_weight_tensor_parameter_215: + name = "parameter_215" + shape = [160] + dtype = "float32" + min_val = float("0.175234") + max_val = float("1.5892") + mean = float("0.975909") + std = float("0.226788") + data = None + + +class Program_weight_tensor_parameter_216: + name = "parameter_216" + shape = [160] + dtype = "float32" + min_val = float("0.0591667") + max_val = float("1.02978") + mean = float("0.273829") + std = float("0.167329") + data = None + + +class Program_weight_tensor_parameter_217: + name = "parameter_217" + shape = [160] + dtype = "float32" + min_val = float("-0.663119") + max_val = float("0.346237") + mean = float("-0.0510886") + std = float("0.183831") + data = None + + +class Program_weight_tensor_parameter_218: + name = "parameter_218" + shape = [160, 160, 1, 1] + dtype = "float32" + min_val = float("-0.375691") + max_val = float("0.52028") + mean = float("-0.00145965") + std = float("0.0411824") + data = None + + +class Program_weight_tensor_parameter_219: + name = "parameter_219" + shape = [160] + dtype = "float32" + min_val = float("-1.38307") + max_val = float("1.0455") + mean = float("0.20405") + std = float("0.252161") + data = None + + +class Program_weight_tensor_parameter_220: + name = "parameter_220" + shape = [160] + dtype = "float32" + min_val = float("0.449775") + max_val = float("3.8715") + mean = float("1.05138") + std = float("0.361934") + data = None + + +class Program_weight_tensor_parameter_221: + name = "parameter_221" + shape = [160] + dtype = "float32" + min_val = float("0.00711313") + max_val = float("0.543542") + mean = float("0.0704574") + std = float("0.0705625") + data = None + + +class Program_weight_tensor_parameter_222: + name = "parameter_222" + shape = [160] + dtype = "float32" + min_val = float("-0.251437") + max_val = float("0.245636") + mean = float("0.00937719") + std = float("0.10298") + data = None + + +class Program_weight_tensor_parameter_223: + name = "parameter_223" + shape = [160, 1, 5, 5] + dtype = "float32" + min_val = float("-0.300366") + max_val = float("0.444968") + mean = float("-0.00141294") + std = float("0.053944") + data = None + + +class Program_weight_tensor_parameter_224: + name = "parameter_224" + shape = [160] + dtype = "float32" + min_val = float("-1.11267") + max_val = float("0.713805") + mean = float("0.135969") + std = float("0.235432") + data = None + + +class Program_weight_tensor_parameter_225: + name = "parameter_225" + shape = [160] + dtype = "float32" + min_val = float("0.217956") + max_val = float("1.56459") + mean = float("0.970716") + std = float("0.205923") + data = None + + +class Program_weight_tensor_parameter_226: + name = "parameter_226" + shape = [160] + dtype = "float32" + min_val = float("0.0827311") + max_val = float("0.859647") + mean = float("0.242523") + std = float("0.130151") + data = None + + +class Program_weight_tensor_parameter_227: + name = "parameter_227" + shape = [160] + dtype = "float32" + min_val = float("-0.653032") + max_val = float("0.43378") + mean = float("-0.0674586") + std = float("0.192522") + data = None + + +class Program_weight_tensor_parameter_228: + name = "parameter_228" + shape = [160, 160, 1, 1] + dtype = "float32" + min_val = float("-0.391646") + max_val = float("0.346344") + mean = float("-0.00172099") + std = float("0.040493") + data = None + + +class Program_weight_tensor_parameter_229: + name = "parameter_229" + shape = [160] + dtype = "float32" + min_val = float("-0.172776") + max_val = float("0.901561") + mean = float("0.179592") + std = float("0.219919") + data = None + + +class Program_weight_tensor_parameter_230: + name = "parameter_230" + shape = [160] + dtype = "float32" + min_val = float("0.589408") + max_val = float("2.04693") + mean = float("1.03347") + std = float("0.217317") + data = None + + +class Program_weight_tensor_parameter_231: + name = "parameter_231" + shape = [160] + dtype = "float32" + min_val = float("0.00590446") + max_val = float("0.893106") + mean = float("0.0761509") + std = float("0.0894425") + data = None + + +class Program_weight_tensor_parameter_232: + name = "parameter_232" + shape = [160] + dtype = "float32" + min_val = float("-0.3194") + max_val = float("0.426533") + mean = float("0.00617158") + std = float("0.0993765") + data = None + + +class Program_weight_tensor_parameter_233: + name = "parameter_233" + shape = [160, 1, 5, 5] + dtype = "float32" + min_val = float("-0.307816") + max_val = float("0.455446") + mean = float("-0.000301186") + std = float("0.0582493") + data = None + + +class Program_weight_tensor_parameter_234: + name = "parameter_234" + shape = [160] + dtype = "float32" + min_val = float("-1.29753") + max_val = float("0.618502") + mean = float("0.0908567") + std = float("0.224292") + data = None + + +class Program_weight_tensor_parameter_235: + name = "parameter_235" + shape = [160] + dtype = "float32" + min_val = float("0.163983") + max_val = float("1.59063") + mean = float("0.988601") + std = float("0.196965") + data = None + + +class Program_weight_tensor_parameter_236: + name = "parameter_236" + shape = [160] + dtype = "float32" + min_val = float("0.102488") + max_val = float("0.956838") + mean = float("0.260022") + std = float("0.124742") + data = None + + +class Program_weight_tensor_parameter_237: + name = "parameter_237" + shape = [160] + dtype = "float32" + min_val = float("-0.599862") + max_val = float("0.603305") + mean = float("-0.0986995") + std = float("0.207313") + data = None + + +class Program_weight_tensor_parameter_238: + name = "parameter_238" + shape = [160, 160, 1, 1] + dtype = "float32" + min_val = float("-0.266594") + max_val = float("0.378637") + mean = float("-0.00244562") + std = float("0.0408722") + data = None + + +class Program_weight_tensor_parameter_239: + name = "parameter_239" + shape = [160] + dtype = "float32" + min_val = float("-0.261421") + max_val = float("1.02063") + mean = float("0.19434") + std = float("0.204167") + data = None + + +class Program_weight_tensor_parameter_240: + name = "parameter_240" + shape = [160] + dtype = "float32" + min_val = float("0.695941") + max_val = float("1.66216") + mean = float("1.00801") + std = float("0.189448") + data = None + + +class Program_weight_tensor_parameter_241: + name = "parameter_241" + shape = [160] + dtype = "float32" + min_val = float("0.00388842") + max_val = float("0.309695") + mean = float("0.0613038") + std = float("0.0494054") + data = None + + +class Program_weight_tensor_parameter_242: + name = "parameter_242" + shape = [160] + dtype = "float32" + min_val = float("-0.344117") + max_val = float("0.265114") + mean = float("0.00756606") + std = float("0.0875486") + data = None + + +class Program_weight_tensor_parameter_243: + name = "parameter_243" + shape = [160, 1, 5, 5] + dtype = "float32" + min_val = float("-0.395078") + max_val = float("0.408538") + mean = float("0.00220902") + std = float("0.0604682") + data = None + + +class Program_weight_tensor_parameter_244: + name = "parameter_244" + shape = [160] + dtype = "float32" + min_val = float("-0.0916379") + max_val = float("0.127687") + mean = float("0.0160637") + std = float("0.0399332") + data = None + + +class Program_weight_tensor_parameter_245: + name = "parameter_245" + shape = [160] + dtype = "float32" + min_val = float("-0.000505605") + max_val = float("0.295863") + mean = float("0.112348") + std = float("0.0571525") + data = None + + +class Program_weight_tensor_parameter_246: + name = "parameter_246" + shape = [160] + dtype = "float32" + min_val = float("0.000227605") + max_val = float("0.015509") + mean = float("0.00230275") + std = float("0.00215584") + data = None + + +class Program_weight_tensor_parameter_247: + name = "parameter_247" + shape = [160] + dtype = "float32" + min_val = float("-0.0354876") + max_val = float("0.030556") + mean = float("0.00101946") + std = float("0.0103489") + data = None + + +class Program_weight_tensor_parameter_248: + name = "parameter_248" + shape = [160, 160, 1, 1] + dtype = "float32" + min_val = float("-0.420773") + max_val = float("0.378626") + mean = float("-0.000362623") + std = float("0.032585") + data = None + + +class Program_weight_tensor_parameter_249: + name = "parameter_249" + shape = [160] + dtype = "float32" + min_val = float("-0.382433") + max_val = float("0.128992") + mean = float("-0.00144208") + std = float("0.0360011") + data = None + + +class Program_weight_tensor_parameter_250: + name = "parameter_250" + shape = [160] + dtype = "float32" + min_val = float("0.0496553") + max_val = float("0.478795") + mean = float("0.141015") + std = float("0.0472768") + data = None + + +class Program_weight_tensor_parameter_251: + name = "parameter_251" + shape = [160] + dtype = "float32" + min_val = float("0.00178389") + max_val = float("1.43262") + mean = float("0.077785") + std = float("0.145136") + data = None + + +class Program_weight_tensor_parameter_252: + name = "parameter_252" + shape = [160] + dtype = "float32" + min_val = float("-0.511786") + max_val = float("0.288908") + mean = float("0.00277755") + std = float("0.105934") + data = None + + +class Program_weight_tensor_parameter_253: + name = "parameter_253" + shape = [160, 1, 5, 5] + dtype = "float32" + min_val = float("-0.286369") + max_val = float("0.31854") + mean = float("0.000947309") + std = float("0.0497441") + data = None + + +class Program_weight_tensor_parameter_254: + name = "parameter_254" + shape = [160] + dtype = "float32" + min_val = float("-0.104416") + max_val = float("0.131351") + mean = float("0.00521624") + std = float("0.0379462") + data = None + + +class Program_weight_tensor_parameter_255: + name = "parameter_255" + shape = [160] + dtype = "float32" + min_val = float("-0.000900399") + max_val = float("0.226709") + mean = float("0.0813371") + std = float("0.0622182") + data = None + + +class Program_weight_tensor_parameter_256: + name = "parameter_256" + shape = [160] + dtype = "float32" + min_val = float("9.69059e-05") + max_val = float("0.00467925") + mean = float("0.00111553") + std = float("0.00100808") + data = None + + +class Program_weight_tensor_parameter_257: + name = "parameter_257" + shape = [160] + dtype = "float32" + min_val = float("-0.00706138") + max_val = float("0.0051874") + mean = float("-0.000453506") + std = float("0.00210669") + data = None + + +class Program_weight_tensor_parameter_258: + name = "parameter_258" + shape = [160, 160, 1, 1] + dtype = "float32" + min_val = float("-0.238001") + max_val = float("0.257363") + mean = float("0.000115835") + std = float("0.0255222") + data = None + + +class Program_weight_tensor_parameter_259: + name = "parameter_259" + shape = [160] + dtype = "float32" + min_val = float("-0.0598688") + max_val = float("0.0265854") + mean = float("-0.00141752") + std = float("0.010431") + data = None + + +class Program_weight_tensor_parameter_260: + name = "parameter_260" + shape = [160] + dtype = "float32" + min_val = float("0.0318432") + max_val = float("0.226651") + mean = float("0.11623") + std = float("0.0381688") + data = None + + +class Program_weight_tensor_parameter_261: + name = "parameter_261" + shape = [160] + dtype = "float32" + min_val = float("1.07431e-05") + max_val = float("0.00579463") + mean = float("0.000454339") + std = float("0.000790822") + data = None + + +class Program_weight_tensor_parameter_262: + name = "parameter_262" + shape = [160] + dtype = "float32" + min_val = float("-0.0328935") + max_val = float("0.0304081") + mean = float("-0.000507447") + std = float("0.00563087") + data = None + + +class Program_weight_tensor_parameter_263: + name = "parameter_263" + shape = [160, 1, 5, 5] + dtype = "float32" + min_val = float("-0.145286") + max_val = float("0.190788") + mean = float("0.00165125") + std = float("0.0324536") + data = None + + +class Program_weight_tensor_parameter_264: + name = "parameter_264" + shape = [160] + dtype = "float32" + min_val = float("-0.366667") + max_val = float("0.788532") + mean = float("0.143149") + std = float("0.181068") + data = None + + +class Program_weight_tensor_parameter_265: + name = "parameter_265" + shape = [160] + dtype = "float32" + min_val = float("0.293464") + max_val = float("2.18919") + mean = float("0.996796") + std = float("0.304708") + data = None + + +class Program_weight_tensor_parameter_266: + name = "parameter_266" + shape = [160] + dtype = "float32" + min_val = float("0.160478") + max_val = float("1.30537") + mean = float("0.391958") + std = float("0.161208") + data = None + + +class Program_weight_tensor_parameter_267: + name = "parameter_267" + shape = [160] + dtype = "float32" + min_val = float("-0.507931") + max_val = float("0.286665") + mean = float("-0.115189") + std = float("0.147372") + data = None + + +class Program_weight_tensor_parameter_268: + name = "parameter_268" + shape = [160, 320, 1, 1] + dtype = "float32" + min_val = float("-0.335616") + max_val = float("0.340552") + mean = float("-0.00159865") + std = float("0.0336132") + data = None + + +class Program_weight_tensor_parameter_269: + name = "parameter_269" + shape = [320] + dtype = "float32" + min_val = float("-0.32501") + max_val = float("0.579959") + mean = float("0.0603047") + std = float("0.11966") + data = None + + +class Program_weight_tensor_parameter_270: + name = "parameter_270" + shape = [320] + dtype = "float32" + min_val = float("0.638982") + max_val = float("1.67871") + mean = float("1.02728") + std = float("0.180331") + data = None + + +class Program_weight_tensor_parameter_271: + name = "parameter_271" + shape = [320] + dtype = "float32" + min_val = float("0.00808028") + max_val = float("0.311328") + mean = float("0.0491401") + std = float("0.0405209") + data = None + + +class Program_weight_tensor_parameter_272: + name = "parameter_272" + shape = [320] + dtype = "float32" + min_val = float("-0.175342") + max_val = float("0.199997") + mean = float("-0.0059966") + std = float("0.0652882") + data = None + + +class Program_weight_tensor_parameter_273: + name = "parameter_273" + shape = [320, 1, 5, 5] + dtype = "float32" + min_val = float("-0.30368") + max_val = float("0.371915") + mean = float("-0.00228873") + std = float("0.052015") + data = None + + +class Program_weight_tensor_parameter_274: + name = "parameter_274" + shape = [320] + dtype = "float32" + min_val = float("-0.283968") + max_val = float("0.387259") + mean = float("0.0424626") + std = float("0.0900175") + data = None + + +class Program_weight_tensor_parameter_275: + name = "parameter_275" + shape = [320] + dtype = "float32" + min_val = float("0.539321") + max_val = float("1.60164") + mean = float("1.01945") + std = float("0.133319") + data = None + + +class Program_weight_tensor_parameter_276: + name = "parameter_276" + shape = [320] + dtype = "float32" + min_val = float("0.0723918") + max_val = float("0.755284") + mean = float("0.21827") + std = float("0.106583") + data = None + + +class Program_weight_tensor_parameter_277: + name = "parameter_277" + shape = [320] + dtype = "float32" + min_val = float("-0.69234") + max_val = float("0.281942") + mean = float("-0.0885379") + std = float("0.144685") + data = None + + +class Program_weight_tensor_parameter_278: + name = "parameter_278" + shape = [320, 320, 1, 1] + dtype = "float32" + min_val = float("-0.403733") + max_val = float("0.300506") + mean = float("-0.00119199") + std = float("0.0270677") + data = None + + +class Program_weight_tensor_parameter_279: + name = "parameter_279" + shape = [320] + dtype = "float32" + min_val = float("-0.678085") + max_val = float("0.462785") + mean = float("0.0461409") + std = float("0.133269") + data = None + + +class Program_weight_tensor_parameter_280: + name = "parameter_280" + shape = [320] + dtype = "float32" + min_val = float("0.60287") + max_val = float("1.90683") + mean = float("1.04791") + std = float("0.17735") + data = None + + +class Program_weight_tensor_parameter_281: + name = "parameter_281" + shape = [320] + dtype = "float32" + min_val = float("2.45682e-05") + max_val = float("0.0212431") + mean = float("0.000619664") + std = float("0.00201091") + data = None + + +class Program_weight_tensor_parameter_282: + name = "parameter_282" + shape = [320] + dtype = "float32" + min_val = float("-0.046868") + max_val = float("0.0229309") + mean = float("0.000356531") + std = float("0.00599546") + data = None + + +class Program_weight_tensor_parameter_283: + name = "parameter_283" + shape = [320, 1, 5, 5] + dtype = "float32" + min_val = float("-0.467114") + max_val = float("0.454218") + mean = float("-0.000468411") + std = float("0.0546429") + data = None + + +class Program_weight_tensor_parameter_284: + name = "parameter_284" + shape = [160] + dtype = "float32" + min_val = float("-0.0955883") + max_val = float("0.0915484") + mean = float("0.00151709") + std = float("0.026877") + data = None + + +class Program_weight_tensor_parameter_285: + name = "parameter_285" + shape = [160] + dtype = "float32" + min_val = float("0.0494833") + max_val = float("0.360577") + mean = float("0.0757807") + std = float("0.0325665") + data = None + + +class Program_weight_tensor_parameter_286: + name = "parameter_286" + shape = [160] + dtype = "float32" + min_val = float("0.000690535") + max_val = float("0.0131476") + mean = float("0.00277066") + std = float("0.00203283") + data = None + + +class Program_weight_tensor_parameter_287: + name = "parameter_287" + shape = [160] + dtype = "float32" + min_val = float("-0.04906") + max_val = float("0.0589414") + mean = float("0.000897603") + std = float("0.0181168") + data = None + + +class Program_weight_tensor_parameter_288: + name = "parameter_288" + shape = [160, 160, 1, 1] + dtype = "float32" + min_val = float("-0.434491") + max_val = float("0.287085") + mean = float("-0.00029868") + std = float("0.0345373") + data = None + + +class Program_weight_tensor_parameter_289: + name = "parameter_289" + shape = [160] + dtype = "float32" + min_val = float("-0.451931") + max_val = float("0.167765") + mean = float("-0.00249438") + std = float("0.0609065") + data = None + + +class Program_weight_tensor_parameter_290: + name = "parameter_290" + shape = [160] + dtype = "float32" + min_val = float("0.062161") + max_val = float("0.416893") + mean = float("0.151029") + std = float("0.0509449") + data = None + + +class Program_weight_tensor_parameter_291: + name = "parameter_291" + shape = [160] + dtype = "float32" + min_val = float("0.000607419") + max_val = float("1.32689") + mean = float("0.0887302") + std = float("0.181612") + data = None + + +class Program_weight_tensor_parameter_292: + name = "parameter_292" + shape = [160] + dtype = "float32" + min_val = float("-0.200206") + max_val = float("0.226249") + mean = float("-0.0015126") + std = float("0.0811605") + data = None + + +class Program_weight_tensor_parameter_293: + name = "parameter_293" + shape = [160, 1, 5, 5] + dtype = "float32" + min_val = float("-0.261006") + max_val = float("0.2749") + mean = float("-0.000777804") + std = float("0.0529501") + data = None + + +class Program_weight_tensor_parameter_294: + name = "parameter_294" + shape = [160] + dtype = "float32" + min_val = float("-0.72931") + max_val = float("0.9541") + mean = float("0.130556") + std = float("0.202177") + data = None + + +class Program_weight_tensor_parameter_295: + name = "parameter_295" + shape = [160] + dtype = "float32" + min_val = float("0.0863203") + max_val = float("1.7885") + mean = float("0.96807") + std = float("0.290005") + data = None + + +class Program_weight_tensor_parameter_296: + name = "parameter_296" + shape = [160] + dtype = "float32" + min_val = float("0.163719") + max_val = float("1.92833") + mean = float("0.450642") + std = float("0.221921") + data = None + + +class Program_weight_tensor_parameter_297: + name = "parameter_297" + shape = [160] + dtype = "float32" + min_val = float("-0.674034") + max_val = float("0.36232") + mean = float("-0.0834642") + std = float("0.16703") + data = None + + +class Program_weight_tensor_parameter_298: + name = "parameter_298" + shape = [160, 320, 1, 1] + dtype = "float32" + min_val = float("-0.599907") + max_val = float("0.259144") + mean = float("-0.00115431") + std = float("0.0341927") + data = None + + +class Program_weight_tensor_parameter_299: + name = "parameter_299" + shape = [320] + dtype = "float32" + min_val = float("-0.92609") + max_val = float("0.673163") + mean = float("0.0527093") + std = float("0.116354") + data = None + + +class Program_weight_tensor_parameter_300: + name = "parameter_300" + shape = [320] + dtype = "float32" + min_val = float("0.638014") + max_val = float("1.80955") + mean = float("1.03439") + std = float("0.184483") + data = None + + +class Program_weight_tensor_parameter_301: + name = "parameter_301" + shape = [320] + dtype = "float32" + min_val = float("0.0094502") + max_val = float("1.29272") + mean = float("0.0599466") + std = float("0.106027") + data = None + + +class Program_weight_tensor_parameter_302: + name = "parameter_302" + shape = [320] + dtype = "float32" + min_val = float("-0.263219") + max_val = float("0.335806") + mean = float("0.00573963") + std = float("0.0658468") + data = None + + +class Program_weight_tensor_parameter_303: + name = "parameter_303" + shape = [320, 1, 5, 5] + dtype = "float32" + min_val = float("-0.308468") + max_val = float("0.46704") + mean = float("0.0012071") + std = float("0.0527592") + data = None + + +class Program_weight_tensor_parameter_304: + name = "parameter_304" + shape = [320] + dtype = "float32" + min_val = float("-0.499203") + max_val = float("0.475236") + mean = float("0.0198586") + std = float("0.105267") + data = None + + +class Program_weight_tensor_parameter_305: + name = "parameter_305" + shape = [320] + dtype = "float32" + min_val = float("0.613967") + max_val = float("1.96029") + mean = float("1.03858") + std = float("0.147808") + data = None + + +class Program_weight_tensor_parameter_306: + name = "parameter_306" + shape = [320] + dtype = "float32" + min_val = float("0.060855") + max_val = float("1.37548") + mean = float("0.278212") + std = float("0.185533") + data = None + + +class Program_weight_tensor_parameter_307: + name = "parameter_307" + shape = [320] + dtype = "float32" + min_val = float("-0.524064") + max_val = float("0.310286") + mean = float("-0.0926059") + std = float("0.134696") + data = None + + +class Program_weight_tensor_parameter_308: + name = "parameter_308" + shape = [320, 320, 1, 1] + dtype = "float32" + min_val = float("-0.592306") + max_val = float("0.342981") + mean = float("-0.00122469") + std = float("0.0278743") + data = None + + +class Program_weight_tensor_parameter_309: + name = "parameter_309" + shape = [320] + dtype = "float32" + min_val = float("-0.558454") + max_val = float("0.610437") + mean = float("0.0479998") + std = float("0.10956") + data = None + + +class Program_weight_tensor_parameter_310: + name = "parameter_310" + shape = [320] + dtype = "float32" + min_val = float("0.68435") + max_val = float("1.74172") + mean = float("1.06726") + std = float("0.178347") + data = None + + +class Program_weight_tensor_parameter_311: + name = "parameter_311" + shape = [320] + dtype = "float32" + min_val = float("4.1158e-05") + max_val = float("0.917983") + mean = float("0.0258504") + std = float("0.0645535") + data = None + + +class Program_weight_tensor_parameter_312: + name = "parameter_312" + shape = [320] + dtype = "float32" + min_val = float("-0.294467") + max_val = float("0.566649") + mean = float("0.00211171") + std = float("0.0692516") + data = None + + +class Program_weight_tensor_parameter_313: + name = "parameter_313" + shape = [320, 1, 5, 5] + dtype = "float32" + min_val = float("-0.368086") + max_val = float("0.521572") + mean = float("0.000476541") + std = float("0.0552926") + data = None + + +class Program_weight_tensor_parameter_314: + name = "parameter_314" + shape = [160] + dtype = "float32" + min_val = float("-0.064149") + max_val = float("0.102866") + mean = float("0.0028512") + std = float("0.0274351") + data = None + + +class Program_weight_tensor_parameter_315: + name = "parameter_315" + shape = [160] + dtype = "float32" + min_val = float("0.0507284") + max_val = float("0.288121") + mean = float("0.0753075") + std = float("0.0260079") + data = None + + +class Program_weight_tensor_parameter_316: + name = "parameter_316" + shape = [160] + dtype = "float32" + min_val = float("0.000741774") + max_val = float("0.012938") + mean = float("0.00275531") + std = float("0.00225956") + data = None + + +class Program_weight_tensor_parameter_317: + name = "parameter_317" + shape = [160] + dtype = "float32" + min_val = float("-0.0306915") + max_val = float("0.0173706") + mean = float("-0.000155398") + std = float("0.00872024") + data = None + + +class Program_weight_tensor_parameter_318: + name = "parameter_318" + shape = [160, 160, 1, 1] + dtype = "float32" + min_val = float("-0.352596") + max_val = float("0.373128") + mean = float("0.000200183") + std = float("0.0358407") + data = None + + +class Program_weight_tensor_parameter_319: + name = "parameter_319" + shape = [160] + dtype = "float32" + min_val = float("-0.192669") + max_val = float("0.130221") + mean = float("0.00757929") + std = float("0.0297545") + data = None + + +class Program_weight_tensor_parameter_320: + name = "parameter_320" + shape = [160] + dtype = "float32" + min_val = float("0.0506295") + max_val = float("0.424586") + mean = float("0.145721") + std = float("0.0470034") + data = None + + +class Program_weight_tensor_parameter_321: + name = "parameter_321" + shape = [160] + dtype = "float32" + min_val = float("0.000646154") + max_val = float("0.679004") + mean = float("0.0757932") + std = float("0.097401") + data = None + + +class Program_weight_tensor_parameter_322: + name = "parameter_322" + shape = [160] + dtype = "float32" + min_val = float("-0.240246") + max_val = float("0.284417") + mean = float("0.00334975") + std = float("0.0750832") + data = None + + +class Program_weight_tensor_parameter_323: + name = "parameter_323" + shape = [160, 1, 5, 5] + dtype = "float32" + min_val = float("-0.287613") + max_val = float("0.278133") + mean = float("0.000243886") + std = float("0.0542145") + data = None + + +class Program_weight_tensor_parameter_324: + name = "parameter_324" + shape = [160] + dtype = "float32" + min_val = float("-1.03285") + max_val = float("0.701053") + mean = float("-0.0119197") + std = float("0.250376") + data = None + + +class Program_weight_tensor_parameter_325: + name = "parameter_325" + shape = [160] + dtype = "float32" + min_val = float("0.160191") + max_val = float("1.70375") + mean = float("1.02213") + std = float("0.248354") + data = None + + +class Program_weight_tensor_parameter_326: + name = "parameter_326" + shape = [160] + dtype = "float32" + min_val = float("0.175289") + max_val = float("0.994615") + mean = float("0.466752") + std = float("0.176612") + data = None + + +class Program_weight_tensor_parameter_327: + name = "parameter_327" + shape = [160] + dtype = "float32" + min_val = float("-0.562465") + max_val = float("0.330563") + mean = float("-0.146764") + std = float("0.166904") + data = None + + +class Program_weight_tensor_parameter_328: + name = "parameter_328" + shape = [160, 320, 1, 1] + dtype = "float32" + min_val = float("-0.296926") + max_val = float("0.264574") + mean = float("-0.00240144") + std = float("0.0419502") + data = None + + +class Program_weight_tensor_parameter_329: + name = "parameter_329" + shape = [320] + dtype = "float32" + min_val = float("-0.538055") + max_val = float("0.740732") + mean = float("0.0345549") + std = float("0.160809") + data = None + + +class Program_weight_tensor_parameter_330: + name = "parameter_330" + shape = [320] + dtype = "float32" + min_val = float("0.614715") + max_val = float("1.89546") + mean = float("1.02775") + std = float("0.212335") + data = None + + +class Program_weight_tensor_parameter_331: + name = "parameter_331" + shape = [320] + dtype = "float32" + min_val = float("0.0139837") + max_val = float("0.558154") + mean = float("0.0756442") + std = float("0.0761005") + data = None + + +class Program_weight_tensor_parameter_332: + name = "parameter_332" + shape = [320] + dtype = "float32" + min_val = float("-0.287771") + max_val = float("0.442059") + mean = float("0.00147959") + std = float("0.0805246") + data = None + + +class Program_weight_tensor_parameter_333: + name = "parameter_333" + shape = [320, 1, 5, 5] + dtype = "float32" + min_val = float("-0.293724") + max_val = float("0.501427") + mean = float("0.000951543") + std = float("0.0658203") + data = None + + +class Program_weight_tensor_parameter_334: + name = "parameter_334" + shape = [320] + dtype = "float32" + min_val = float("-1.01408") + max_val = float("0.351832") + mean = float("-0.117835") + std = float("0.221703") + data = None + + +class Program_weight_tensor_parameter_335: + name = "parameter_335" + shape = [320] + dtype = "float32" + min_val = float("0.65583") + max_val = float("1.76944") + mean = float("1.08473") + std = float("0.157102") + data = None + + +class Program_weight_tensor_parameter_336: + name = "parameter_336" + shape = [320] + dtype = "float32" + min_val = float("0.13452") + max_val = float("1.63293") + mean = float("0.422305") + std = float("0.238369") + data = None + + +class Program_weight_tensor_parameter_337: + name = "parameter_337" + shape = [320] + dtype = "float32" + min_val = float("-0.81327") + max_val = float("0.401423") + mean = float("-0.154524") + std = float("0.222845") + data = None + + +class Program_weight_tensor_parameter_338: + name = "parameter_338" + shape = [320, 320, 1, 1] + dtype = "float32" + min_val = float("-0.471797") + max_val = float("0.389692") + mean = float("-0.00206647") + std = float("0.0380648") + data = None + + +class Program_weight_tensor_parameter_339: + name = "parameter_339" + shape = [320] + dtype = "float32" + min_val = float("-0.513627") + max_val = float("0.55695") + mean = float("0.0689956") + std = float("0.169924") + data = None + + +class Program_weight_tensor_parameter_340: + name = "parameter_340" + shape = [320] + dtype = "float32" + min_val = float("0.601976") + max_val = float("1.83278") + mean = float("1.07716") + std = float("0.20982") + data = None + + +class Program_weight_tensor_parameter_341: + name = "parameter_341" + shape = [320] + dtype = "float32" + min_val = float("0.00011586") + max_val = float("0.801533") + mean = float("0.0413794") + std = float("0.0818433") + data = None + + +class Program_weight_tensor_parameter_342: + name = "parameter_342" + shape = [320] + dtype = "float32" + min_val = float("-0.472503") + max_val = float("0.508798") + mean = float("-0.00228142") + std = float("0.087638") + data = None + + +class Program_weight_tensor_parameter_343: + name = "parameter_343" + shape = [320, 1, 5, 5] + dtype = "float32" + min_val = float("-0.571613") + max_val = float("0.559811") + mean = float("0.00222669") + std = float("0.0693936") + data = None + + +class Program_weight_tensor_parameter_344: + name = "parameter_344" + shape = [160] + dtype = "float32" + min_val = float("-0.303664") + max_val = float("0.766092") + mean = float("0.146005") + std = float("0.150233") + data = None + + +class Program_weight_tensor_parameter_345: + name = "parameter_345" + shape = [160] + dtype = "float32" + min_val = float("0.322333") + max_val = float("1.79121") + mean = float("0.961734") + std = float("0.201223") + data = None + + +class Program_weight_tensor_parameter_346: + name = "parameter_346" + shape = [160] + dtype = "float32" + min_val = float("0.165524") + max_val = float("1.22308") + mean = float("0.439521") + std = float("0.185691") + data = None + + +class Program_weight_tensor_parameter_347: + name = "parameter_347" + shape = [160] + dtype = "float32" + min_val = float("-0.477878") + max_val = float("0.241652") + mean = float("-0.135919") + std = float("0.16279") + data = None + + +class Program_weight_tensor_parameter_348: + name = "parameter_348" + shape = [160, 320, 1, 1] + dtype = "float32" + min_val = float("-0.473246") + max_val = float("0.430138") + mean = float("-0.00229145") + std = float("0.0362895") + data = None + + +class Program_weight_tensor_parameter_349: + name = "parameter_349" + shape = [320] + dtype = "float32" + min_val = float("-0.463308") + max_val = float("0.406123") + mean = float("0.0464803") + std = float("0.104339") + data = None + + +class Program_weight_tensor_parameter_350: + name = "parameter_350" + shape = [320] + dtype = "float32" + min_val = float("0.599446") + max_val = float("1.81676") + mean = float("1.0375") + std = float("0.188932") + data = None + + +class Program_weight_tensor_parameter_351: + name = "parameter_351" + shape = [320] + dtype = "float32" + min_val = float("0.00580563") + max_val = float("0.39891") + mean = float("0.0605396") + std = float("0.0423213") + data = None + + +class Program_weight_tensor_parameter_352: + name = "parameter_352" + shape = [320] + dtype = "float32" + min_val = float("-0.261078") + max_val = float("0.204192") + mean = float("-0.00226903") + std = float("0.071379") + data = None + + +class Program_weight_tensor_parameter_353: + name = "parameter_353" + shape = [320, 1, 5, 5] + dtype = "float32" + min_val = float("-0.266913") + max_val = float("0.561484") + mean = float("-0.000583783") + std = float("0.0532972") + data = None + + +class Program_weight_tensor_parameter_354: + name = "parameter_354" + shape = [320] + dtype = "float32" + min_val = float("-0.525362") + max_val = float("0.371294") + mean = float("0.01644") + std = float("0.0992591") + data = None + + +class Program_weight_tensor_parameter_355: + name = "parameter_355" + shape = [320] + dtype = "float32" + min_val = float("0.40611") + max_val = float("1.53855") + mean = float("1.04307") + std = float("0.117238") + data = None + + +class Program_weight_tensor_parameter_356: + name = "parameter_356" + shape = [320] + dtype = "float32" + min_val = float("0.0816069") + max_val = float("0.973172") + mean = float("0.266395") + std = float("0.138108") + data = None + + +class Program_weight_tensor_parameter_357: + name = "parameter_357" + shape = [320] + dtype = "float32" + min_val = float("-0.675713") + max_val = float("0.258371") + mean = float("-0.131476") + std = float("0.156946") + data = None + + +class Program_weight_tensor_parameter_358: + name = "parameter_358" + shape = [320, 320, 1, 1] + dtype = "float32" + min_val = float("-0.320808") + max_val = float("0.308671") + mean = float("-0.00194222") + std = float("0.0300297") + data = None + + +class Program_weight_tensor_parameter_359: + name = "parameter_359" + shape = [320] + dtype = "float32" + min_val = float("-0.650682") + max_val = float("1.19758") + mean = float("0.0316924") + std = float("0.144454") + data = None + + +class Program_weight_tensor_parameter_360: + name = "parameter_360" + shape = [320] + dtype = "float32" + min_val = float("0.780624") + max_val = float("2.04193") + mean = float("1.06005") + std = float("0.150167") + data = None + + +class Program_weight_tensor_parameter_361: + name = "parameter_361" + shape = [320] + dtype = "float32" + min_val = float("3.46888e-05") + max_val = float("0.0195151") + mean = float("0.000701378") + std = float("0.00185381") + data = None + + +class Program_weight_tensor_parameter_362: + name = "parameter_362" + shape = [320] + dtype = "float32" + min_val = float("-0.0432573") + max_val = float("0.0218873") + mean = float("-0.00151796") + std = float("0.00731824") + data = None + + +class Program_weight_tensor_parameter_363: + name = "parameter_363" + shape = [320, 1, 5, 5] + dtype = "float32" + min_val = float("-0.438592") + max_val = float("0.557346") + mean = float("-0.000597166") + std = float("0.0528026") + data = None + + +class Program_weight_tensor_parameter_364: + name = "parameter_364" + shape = [160] + dtype = "float32" + min_val = float("-0.144647") + max_val = float("0.133568") + mean = float("0.00574974") + std = float("0.0414225") + data = None + + +class Program_weight_tensor_parameter_365: + name = "parameter_365" + shape = [160] + dtype = "float32" + min_val = float("0.0615571") + max_val = float("0.442063") + mean = float("0.128542") + std = float("0.0637464") + data = None + + +class Program_weight_tensor_parameter_366: + name = "parameter_366" + shape = [160] + dtype = "float32" + min_val = float("1.55719") + max_val = float("12.1319") + mean = float("4.54038") + std = float("1.99311") + data = None + + +class Program_weight_tensor_parameter_367: + name = "parameter_367" + shape = [160] + dtype = "float32" + min_val = float("-2.07294") + max_val = float("1.28115") + mean = float("-0.0651892") + std = float("0.538161") + data = None + + +class Program_weight_tensor_parameter_368: + name = "parameter_368" + shape = [160, 1024, 1, 1] + dtype = "float32" + min_val = float("-0.207378") + max_val = float("0.283423") + mean = float("-0.000215875") + std = float("0.0259136") + data = None + + +class Program_weight_tensor_parameter_369: + name = "parameter_369" + shape = [160] + dtype = "float32" + min_val = float("-0.17433") + max_val = float("0.0557937") + mean = float("-0.00199308") + std = float("0.0293569") + data = None + + +class Program_weight_tensor_parameter_370: + name = "parameter_370" + shape = [160] + dtype = "float32" + min_val = float("0.0454461") + max_val = float("0.530007") + mean = float("0.0755806") + std = float("0.0397851") + data = None + + +class Program_weight_tensor_parameter_371: + name = "parameter_371" + shape = [160] + dtype = "float32" + min_val = float("0.0698261") + max_val = float("0.634801") + mean = float("0.230696") + std = float("0.105205") + data = None + + +class Program_weight_tensor_parameter_372: + name = "parameter_372" + shape = [160] + dtype = "float32" + min_val = float("-0.66241") + max_val = float("0.620589") + mean = float("-0.0116559") + std = float("0.205011") + data = None + + +class Program_weight_tensor_parameter_373: + name = "parameter_373" + shape = [160, 512, 1, 1] + dtype = "float32" + min_val = float("-0.213893") + max_val = float("0.191304") + mean = float("-0.000540441") + std = float("0.0221756") + data = None + + +class Program_weight_tensor_parameter_374: + name = "parameter_374" + shape = [160] + dtype = "float32" + min_val = float("-0.18074") + max_val = float("0.309844") + mean = float("-0.006202") + std = float("0.052372") + data = None + + +class Program_weight_tensor_parameter_375: + name = "parameter_375" + shape = [160] + dtype = "float32" + min_val = float("0.0468643") + max_val = float("0.59381") + mean = float("0.115209") + std = float("0.0780897") + data = None + + +class Program_weight_tensor_parameter_376: + name = "parameter_376" + shape = [160] + dtype = "float32" + min_val = float("0.0773253") + max_val = float("0.926812") + mean = float("0.238091") + std = float("0.136017") + data = None + + +class Program_weight_tensor_parameter_377: + name = "parameter_377" + shape = [160] + dtype = "float32" + min_val = float("-0.96255") + max_val = float("0.691255") + mean = float("0.0258696") + std = float("0.249829") + data = None + + +class Program_weight_tensor_parameter_378: + name = "parameter_378" + shape = [160, 256, 1, 1] + dtype = "float32" + min_val = float("-0.28672") + max_val = float("0.368023") + mean = float("-0.00153536") + std = float("0.0427747") + data = None + + +class Program_weight_tensor_parameter_379: + name = "parameter_379" + shape = [1024] + dtype = "float32" + min_val = float("-4.68912") + max_val = float("4.42206") + mean = float("-2.15878") + std = float("0.606116") + data = None + + +class Program_weight_tensor_parameter_380: + name = "parameter_380" + shape = [1024] + dtype = "float32" + min_val = float("2.50671") + max_val = float("4.49836") + mean = float("3.42156") + std = float("0.272125") + data = None + + +class Program_weight_tensor_parameter_381: + name = "parameter_381" + shape = [1024] + dtype = "float32" + min_val = float("0.121325") + max_val = float("1.288") + mean = float("0.251977") + std = float("0.080401") + data = None + + +class Program_weight_tensor_parameter_382: + name = "parameter_382" + shape = [1024] + dtype = "float32" + min_val = float("-1.0843") + max_val = float("0.754751") + mean = float("-0.167818") + std = float("0.252002") + data = None + + +class Program_weight_tensor_parameter_383: + name = "parameter_383" + shape = [1024, 1024, 1, 1] + dtype = "float32" + min_val = float("-0.249354") + max_val = float("0.262995") + mean = float("-0.000961384") + std = float("0.0170622") + data = None + + +class Program_weight_tensor_parameter_384: + name = "parameter_384" + shape = [1024] + dtype = "float32" + min_val = float("-0.048199") + max_val = float("0.059245") + mean = float("-0.0031534") + std = float("0.00853464") + data = None + + +class Program_weight_tensor_parameter_385: + name = "parameter_385" + shape = [1024, 256, 1, 1] + dtype = "float32" + min_val = float("-0.449643") + max_val = float("0.317283") + mean = float("-0.000775903") + std = float("0.00756664") + data = None + + +class Program_weight_tensor_parameter_386: + name = "parameter_386" + shape = [256] + dtype = "float32" + min_val = float("-0.000764597") + max_val = float("0.0140234") + mean = float("0.00116219") + std = float("0.00201413") + data = None + + +class Program_weight_tensor_parameter_387: + name = "parameter_387" + shape = [256, 1024, 1, 1] + dtype = "float32" + min_val = float("-0.214265") + max_val = float("0.194265") + mean = float("0.00028062") + std = float("0.00702431") + data = None + + +class Program_weight_tensor_parameter_388: + name = "parameter_388" + shape = [1024] + dtype = "float32" + min_val = float("-3.63458") + max_val = float("3.58613") + mean = float("-0.0619609") + std = float("0.686025") + data = None + + +class Program_weight_tensor_parameter_389: + name = "parameter_389" + shape = [1024] + dtype = "float32" + min_val = float("0.244139") + max_val = float("3.5711") + mean = float("1.71407") + std = float("0.411809") + data = None + + +class Program_weight_tensor_parameter_390: + name = "parameter_390" + shape = [1024] + dtype = "float32" + min_val = float("0.00014465") + max_val = float("1.16713") + mean = float("0.0220346") + std = float("0.0691749") + data = None + + +class Program_weight_tensor_parameter_391: + name = "parameter_391" + shape = [1024] + dtype = "float32" + min_val = float("-1.57648") + max_val = float("0.209733") + mean = float("-0.0677034") + std = float("0.0955235") + data = None + + +class Program_weight_tensor_parameter_392: + name = "parameter_392" + shape = [1024, 1, 5, 5] + dtype = "float32" + min_val = float("-0.46224") + max_val = float("0.348286") + mean = float("0.00784727") + std = float("0.043616") + data = None + + +class Program_weight_tensor_parameter_393: + name = "parameter_393" + shape = [1024] + dtype = "float32" + min_val = float("-2.83061") + max_val = float("2.2952") + mean = float("-1.25227") + std = float("0.788125") + data = None + + +class Program_weight_tensor_parameter_394: + name = "parameter_394" + shape = [1024] + dtype = "float32" + min_val = float("-0.223704") + max_val = float("2.34621") + mean = float("1.0473") + std = float("0.33231") + data = None + + +class Program_weight_tensor_parameter_395: + name = "parameter_395" + shape = [1024] + dtype = "float32" + min_val = float("0.0202182") + max_val = float("0.472374") + mean = float("0.111202") + std = float("0.0509513") + data = None + + +class Program_weight_tensor_parameter_396: + name = "parameter_396" + shape = [1024] + dtype = "float32" + min_val = float("-1.07234") + max_val = float("0.619637") + mean = float("-0.0675654") + std = float("0.226296") + data = None + + +class Program_weight_tensor_parameter_397: + name = "parameter_397" + shape = [1024, 512, 1, 1] + dtype = "float32" + min_val = float("-0.316908") + max_val = float("0.413461") + mean = float("-0.000568572") + std = float("0.0234734") + data = None + + +class Program_weight_tensor_parameter_398: + name = "parameter_398" + shape = [512] + dtype = "float32" + min_val = float("-0.0405464") + max_val = float("0.0506636") + mean = float("-0.00656753") + std = float("0.0107445") + data = None + + +class Program_weight_tensor_parameter_399: + name = "parameter_399" + shape = [512, 128, 1, 1] + dtype = "float32" + min_val = float("-0.199267") + max_val = float("0.210001") + mean = float("-0.00413583") + std = float("0.0173349") + data = None + + +class Program_weight_tensor_parameter_400: + name = "parameter_400" + shape = [128] + dtype = "float32" + min_val = float("-0.00203298") + max_val = float("0.00819582") + mean = float("0.00277849") + std = float("0.00221781") + data = None + + +class Program_weight_tensor_parameter_401: + name = "parameter_401" + shape = [128, 512, 1, 1] + dtype = "float32" + min_val = float("-0.20676") + max_val = float("0.236063") + mean = float("0.00202426") + std = float("0.0145962") + data = None + + +class Program_weight_tensor_parameter_402: + name = "parameter_402" + shape = [512] + dtype = "float32" + min_val = float("-1.75388") + max_val = float("2.23245") + mean = float("0.474604") + std = float("0.511719") + data = None + + +class Program_weight_tensor_parameter_403: + name = "parameter_403" + shape = [512] + dtype = "float32" + min_val = float("0.605777") + max_val = float("3.435") + mean = float("1.3299") + std = float("0.392424") + data = None + + +class Program_weight_tensor_parameter_404: + name = "parameter_404" + shape = [512] + dtype = "float32" + min_val = float("0.000198543") + max_val = float("2.14367") + mean = float("0.0374683") + std = float("0.109853") + data = None + + +class Program_weight_tensor_parameter_405: + name = "parameter_405" + shape = [512] + dtype = "float32" + min_val = float("-1.0605") + max_val = float("0.749574") + mean = float("-0.096829") + std = float("0.154603") + data = None + + +class Program_weight_tensor_parameter_406: + name = "parameter_406" + shape = [512, 1, 5, 5] + dtype = "float32" + min_val = float("-0.163016") + max_val = float("0.2276") + mean = float("0.00464678") + std = float("0.0407667") + data = None + + +class Program_weight_tensor_parameter_407: + name = "parameter_407" + shape = [512] + dtype = "float32" + min_val = float("-2.78677") + max_val = float("3.02329") + mean = float("-0.939681") + std = float("0.884703") + data = None + + +class Program_weight_tensor_parameter_408: + name = "parameter_408" + shape = [512] + dtype = "float32" + min_val = float("-1.11576") + max_val = float("2.53483") + mean = float("0.995107") + std = float("0.474606") + data = None + + +class Program_weight_tensor_parameter_409: + name = "parameter_409" + shape = [512] + dtype = "float32" + min_val = float("0.26737") + max_val = float("4.86589") + mean = float("0.758846") + std = float("0.378929") + data = None + + +class Program_weight_tensor_parameter_410: + name = "parameter_410" + shape = [512] + dtype = "float32" + min_val = float("-3.97279") + max_val = float("2.59093") + mean = float("-0.424021") + std = float("1.01594") + data = None + + +class Program_weight_tensor_parameter_411: + name = "parameter_411" + shape = [512, 512, 1, 1] + dtype = "float32" + min_val = float("-0.345356") + max_val = float("0.489219") + mean = float("-0.00131064") + std = float("0.0302391") + data = None + + +class Program_weight_tensor_parameter_412: + name = "parameter_412" + shape = [512] + dtype = "float32" + min_val = float("-4.75827") + max_val = float("5.02772") + mean = float("0.869295") + std = float("1.15298") + data = None + + +class Program_weight_tensor_parameter_413: + name = "parameter_413" + shape = [512] + dtype = "float32" + min_val = float("0.558574") + max_val = float("3.56156") + mean = float("1.12752") + std = float("0.431084") + data = None + + +class Program_weight_tensor_parameter_414: + name = "parameter_414" + shape = [512] + dtype = "float32" + min_val = float("0.000617491") + max_val = float("3.52162") + mean = float("0.0615511") + std = float("0.19362") + data = None + + +class Program_weight_tensor_parameter_415: + name = "parameter_415" + shape = [512] + dtype = "float32" + min_val = float("-2.46371") + max_val = float("0.294574") + mean = float("-0.0807288") + std = float("0.17548") + data = None + + +class Program_weight_tensor_parameter_416: + name = "parameter_416" + shape = [512, 1, 5, 5] + dtype = "float32" + min_val = float("-0.520688") + max_val = float("0.61736") + mean = float("0.00329998") + std = float("0.0550278") + data = None + + +class Program_weight_tensor_parameter_417: + name = "parameter_417" + shape = [512] + dtype = "float32" + min_val = float("-3.44932") + max_val = float("2.01943") + mean = float("-0.946755") + std = float("0.852542") + data = None + + +class Program_weight_tensor_parameter_418: + name = "parameter_418" + shape = [512] + dtype = "float32" + min_val = float("-0.219732") + max_val = float("2.85209") + mean = float("1.04517") + std = float("0.41982") + data = None + + +class Program_weight_tensor_parameter_419: + name = "parameter_419" + shape = [512] + dtype = "float32" + min_val = float("0.160321") + max_val = float("2.23545") + mean = float("0.78881") + std = float("0.282903") + data = None + + +class Program_weight_tensor_parameter_420: + name = "parameter_420" + shape = [512] + dtype = "float32" + min_val = float("-4.49583") + max_val = float("2.63401") + mean = float("-0.521434") + std = float("1.03277") + data = None + + +class Program_weight_tensor_parameter_421: + name = "parameter_421" + shape = [512, 512, 1, 1] + dtype = "float32" + min_val = float("-0.360098") + max_val = float("0.458427") + mean = float("-0.00139261") + std = float("0.0312964") + data = None + + +class Program_weight_tensor_parameter_422: + name = "parameter_422" + shape = [512] + dtype = "float32" + min_val = float("-3.69947") + max_val = float("4.51505") + mean = float("0.914451") + std = float("1.10958") + data = None + + +class Program_weight_tensor_parameter_423: + name = "parameter_423" + shape = [512] + dtype = "float32" + min_val = float("0.479123") + max_val = float("2.81553") + mean = float("1.06738") + std = float("0.416471") + data = None + + +class Program_weight_tensor_parameter_424: + name = "parameter_424" + shape = [512] + dtype = "float32" + min_val = float("0.000134811") + max_val = float("1.34325") + mean = float("0.0796572") + std = float("0.15635") + data = None + + +class Program_weight_tensor_parameter_425: + name = "parameter_425" + shape = [512] + dtype = "float32" + min_val = float("-2.05075") + max_val = float("0.839398") + mean = float("-0.0787515") + std = float("0.2112") + data = None + + +class Program_weight_tensor_parameter_426: + name = "parameter_426" + shape = [512, 1, 5, 5] + dtype = "float32" + min_val = float("-0.682357") + max_val = float("0.568999") + mean = float("0.00184801") + std = float("0.0596565") + data = None + + +class Program_weight_tensor_parameter_427: + name = "parameter_427" + shape = [512] + dtype = "float32" + min_val = float("-3.60406") + max_val = float("3.57973") + mean = float("-0.792668") + std = float("0.920174") + data = None + + +class Program_weight_tensor_parameter_428: + name = "parameter_428" + shape = [512] + dtype = "float32" + min_val = float("0.0673131") + max_val = float("2.8763") + mean = float("1.10539") + std = float("0.461442") + data = None + + +class Program_weight_tensor_parameter_429: + name = "parameter_429" + shape = [512] + dtype = "float32" + min_val = float("0.173978") + max_val = float("2.55223") + mean = float("0.76863") + std = float("0.339075") + data = None + + +class Program_weight_tensor_parameter_430: + name = "parameter_430" + shape = [512] + dtype = "float32" + min_val = float("-3.43966") + max_val = float("2.52436") + mean = float("-0.199641") + std = float("0.905775") + data = None + + +class Program_weight_tensor_parameter_431: + name = "parameter_431" + shape = [512, 512, 1, 1] + dtype = "float32" + min_val = float("-0.406505") + max_val = float("0.381226") + mean = float("-0.000941229") + std = float("0.030964") + data = None + + +class Program_weight_tensor_parameter_432: + name = "parameter_432" + shape = [512] + dtype = "float32" + min_val = float("-2.99076") + max_val = float("4.07165") + mean = float("0.720978") + std = float("1.18939") + data = None + + +class Program_weight_tensor_parameter_433: + name = "parameter_433" + shape = [512] + dtype = "float32" + min_val = float("0.532104") + max_val = float("3.00528") + mean = float("1.08374") + std = float("0.445429") + data = None + + +class Program_weight_tensor_parameter_434: + name = "parameter_434" + shape = [512] + dtype = "float32" + min_val = float("0.000115808") + max_val = float("1.23084") + mean = float("0.0804388") + std = float("0.163612") + data = None + + +class Program_weight_tensor_parameter_435: + name = "parameter_435" + shape = [512] + dtype = "float32" + min_val = float("-3.77949") + max_val = float("0.502876") + mean = float("-0.0723556") + std = float("0.253454") + data = None + + +class Program_weight_tensor_parameter_436: + name = "parameter_436" + shape = [512, 1, 5, 5] + dtype = "float32" + min_val = float("-0.661218") + max_val = float("0.51724") + mean = float("0.00180051") + std = float("0.0604124") + data = None + + +class Program_weight_tensor_parameter_437: + name = "parameter_437" + shape = [512] + dtype = "float32" + min_val = float("-3.389") + max_val = float("2.63183") + mean = float("-0.652218") + std = float("0.873705") + data = None + + +class Program_weight_tensor_parameter_438: + name = "parameter_438" + shape = [512] + dtype = "float32" + min_val = float("0.0574043") + max_val = float("2.69881") + mean = float("1.04787") + std = float("0.520278") + data = None + + +class Program_weight_tensor_parameter_439: + name = "parameter_439" + shape = [512] + dtype = "float32" + min_val = float("0.14157") + max_val = float("4.05711") + mean = float("0.944815") + std = float("0.439006") + data = None + + +class Program_weight_tensor_parameter_440: + name = "parameter_440" + shape = [512] + dtype = "float32" + min_val = float("-3.9207") + max_val = float("2.23825") + mean = float("-0.52007") + std = float("1.02526") + data = None + + +class Program_weight_tensor_parameter_441: + name = "parameter_441" + shape = [512, 512, 1, 1] + dtype = "float32" + min_val = float("-0.397851") + max_val = float("0.566243") + mean = float("-0.0012648") + std = float("0.0302577") + data = None + + +class Program_weight_tensor_parameter_442: + name = "parameter_442" + shape = [512] + dtype = "float32" + min_val = float("-3.63696") + max_val = float("3.69657") + mean = float("0.75971") + std = float("1.19915") + data = None + + +class Program_weight_tensor_parameter_443: + name = "parameter_443" + shape = [512] + dtype = "float32" + min_val = float("0.568696") + max_val = float("2.96831") + mean = float("1.16292") + std = float("0.471643") + data = None + + +class Program_weight_tensor_parameter_444: + name = "parameter_444" + shape = [512] + dtype = "float32" + min_val = float("0.000119312") + max_val = float("1.25746") + mean = float("0.0810436") + std = float("0.163652") + data = None + + +class Program_weight_tensor_parameter_445: + name = "parameter_445" + shape = [512] + dtype = "float32" + min_val = float("-1.52232") + max_val = float("1.04033") + mean = float("-0.0791394") + std = float("0.230396") + data = None + + +class Program_weight_tensor_parameter_446: + name = "parameter_446" + shape = [512, 1, 5, 5] + dtype = "float32" + min_val = float("-0.59981") + max_val = float("0.547293") + mean = float("0.00183321") + std = float("0.061842") + data = None + + +class Program_weight_tensor_parameter_447: + name = "parameter_447" + shape = [512] + dtype = "float32" + min_val = float("-3.0674") + max_val = float("2.7392") + mean = float("-0.477426") + std = float("0.864982") + data = None + + +class Program_weight_tensor_parameter_448: + name = "parameter_448" + shape = [512] + dtype = "float32" + min_val = float("0.0665085") + max_val = float("3.75266") + mean = float("1.03868") + std = float("0.557475") + data = None + + +class Program_weight_tensor_parameter_449: + name = "parameter_449" + shape = [512] + dtype = "float32" + min_val = float("0.347504") + max_val = float("4.96759") + mean = float("1.02142") + std = float("0.527618") + data = None + + +class Program_weight_tensor_parameter_450: + name = "parameter_450" + shape = [512] + dtype = "float32" + min_val = float("-2.61913") + max_val = float("1.52864") + mean = float("-0.330152") + std = float("0.756747") + data = None + + +class Program_weight_tensor_parameter_451: + name = "parameter_451" + shape = [512, 512, 1, 1] + dtype = "float32" + min_val = float("-0.408078") + max_val = float("0.371987") + mean = float("-0.00130599") + std = float("0.0291745") + data = None + + +class Program_weight_tensor_parameter_452: + name = "parameter_452" + shape = [512] + dtype = "float32" + min_val = float("-6.01234") + max_val = float("3.82419") + mean = float("0.163311") + std = float("1.27245") + data = None + + +class Program_weight_tensor_parameter_453: + name = "parameter_453" + shape = [512] + dtype = "float32" + min_val = float("0.596046") + max_val = float("4.72808") + mean = float("1.43227") + std = float("0.506726") + data = None + + +class Program_weight_tensor_parameter_454: + name = "parameter_454" + shape = [512] + dtype = "float32" + min_val = float("7.77682e-05") + max_val = float("2.30813") + mean = float("0.077999") + std = float("0.188346") + data = None + + +class Program_weight_tensor_parameter_455: + name = "parameter_455" + shape = [512] + dtype = "float32" + min_val = float("-2.18564") + max_val = float("0.764279") + mean = float("-0.0578748") + std = float("0.228934") + data = None + + +class Program_weight_tensor_parameter_456: + name = "parameter_456" + shape = [512, 1, 5, 5] + dtype = "float32" + min_val = float("-0.421494") + max_val = float("0.476019") + mean = float("-0.00163924") + std = float("0.0698465") + data = None + + +class Program_weight_tensor_parameter_457: + name = "parameter_457" + shape = [512] + dtype = "float32" + min_val = float("-2.01371") + max_val = float("2.2049") + mean = float("-0.00690624") + std = float("0.636127") + data = None + + +class Program_weight_tensor_parameter_458: + name = "parameter_458" + shape = [512] + dtype = "float32" + min_val = float("-0.510235") + max_val = float("3.78718") + mean = float("0.826748") + std = float("0.629705") + data = None + + +class Program_weight_tensor_parameter_459: + name = "parameter_459" + shape = [512] + dtype = "float32" + min_val = float("0.182133") + max_val = float("2.09328") + mean = float("0.571429") + std = float("0.274745") + data = None + + +class Program_weight_tensor_parameter_460: + name = "parameter_460" + shape = [512] + dtype = "float32" + min_val = float("-3.02528") + max_val = float("2.5369") + mean = float("-0.181741") + std = float("0.907362") + data = None + + +class Program_weight_tensor_parameter_461: + name = "parameter_461" + shape = [512, 256, 1, 1] + dtype = "float32" + min_val = float("-0.330579") + max_val = float("0.293061") + mean = float("-0.000545162") + std = float("0.0343697") + data = None + + +class Program_weight_tensor_parameter_462: + name = "parameter_462" + shape = [256] + dtype = "float32" + min_val = float("-2.24034") + max_val = float("3.88891") + mean = float("1.51435") + std = float("1.09444") + data = None + + +class Program_weight_tensor_parameter_463: + name = "parameter_463" + shape = [256] + dtype = "float32" + min_val = float("0.438089") + max_val = float("3.00471") + mean = float("0.910995") + std = float("0.386137") + data = None + + +class Program_weight_tensor_parameter_464: + name = "parameter_464" + shape = [256] + dtype = "float32" + min_val = float("0.000199445") + max_val = float("0.204878") + mean = float("0.0283828") + std = float("0.0246572") + data = None + + +class Program_weight_tensor_parameter_465: + name = "parameter_465" + shape = [256] + dtype = "float32" + min_val = float("-0.817275") + max_val = float("0.569823") + mean = float("-0.0753346") + std = float("0.144339") + data = None + + +class Program_weight_tensor_parameter_466: + name = "parameter_466" + shape = [256, 1, 3, 3] + dtype = "float32" + min_val = float("-0.27611") + max_val = float("0.242466") + mean = float("0.0194738") + std = float("0.0874247") + data = None + + +class Program_weight_tensor_parameter_467: + name = "parameter_467" + shape = [256] + dtype = "float32" + min_val = float("-3.04801") + max_val = float("2.06684") + mean = float("-0.813742") + std = float("0.935085") + data = None + + +class Program_weight_tensor_parameter_468: + name = "parameter_468" + shape = [256] + dtype = "float32" + min_val = float("0.0744735") + max_val = float("1.99523") + mean = float("1.06005") + std = float("0.343784") + data = None + + +class Program_weight_tensor_parameter_469: + name = "parameter_469" + shape = [256] + dtype = "float32" + min_val = float("0.242457") + max_val = float("2.35436") + mean = float("0.851146") + std = float("0.329938") + data = None + + +class Program_weight_tensor_parameter_470: + name = "parameter_470" + shape = [256] + dtype = "float32" + min_val = float("-4.71901") + max_val = float("2.60086") + mean = float("-0.350544") + std = float("1.21365") + data = None + + +class Program_weight_tensor_parameter_471: + name = "parameter_471" + shape = [256, 256, 1, 1] + dtype = "float32" + min_val = float("-0.508837") + max_val = float("0.394904") + mean = float("-0.00235872") + std = float("0.0524808") + data = None + + +class Program_weight_tensor_parameter_472: + name = "parameter_472" + shape = [256] + dtype = "float32" + min_val = float("-2.2934") + max_val = float("4.81048") + mean = float("0.455567") + std = float("1.56124") + data = None + + +class Program_weight_tensor_parameter_473: + name = "parameter_473" + shape = [256] + dtype = "float32" + min_val = float("0.428587") + max_val = float("2.71229") + mean = float("1.1438") + std = float("0.452676") + data = None + + +class Program_weight_tensor_parameter_474: + name = "parameter_474" + shape = [256] + dtype = "float32" + min_val = float("0.000102636") + max_val = float("1.11159") + mean = float("0.10047") + std = float("0.191548") + data = None + + +class Program_weight_tensor_parameter_475: + name = "parameter_475" + shape = [256] + dtype = "float32" + min_val = float("-4.65106") + max_val = float("1.41037") + mean = float("-0.0645947") + std = float("0.445777") + data = None + + +class Program_weight_tensor_parameter_476: + name = "parameter_476" + shape = [256, 1, 3, 3] + dtype = "float32" + min_val = float("-0.603309") + max_val = float("0.621434") + mean = float("0.00230773") + std = float("0.143026") + data = None + + +class Program_weight_tensor_parameter_477: + name = "parameter_477" + shape = [256] + dtype = "float32" + min_val = float("-1.88235") + max_val = float("4.98003") + mean = float("0.373603") + std = float("0.954073") + data = None + + +class Program_weight_tensor_parameter_478: + name = "parameter_478" + shape = [256] + dtype = "float32" + min_val = float("-0.352723") + max_val = float("3.32578") + mean = float("0.764437") + std = float("0.645436") + data = None + + +class Program_weight_tensor_parameter_479: + name = "parameter_479" + shape = [256] + dtype = "float32" + min_val = float("0.192664") + max_val = float("3.87646") + mean = float("0.849903") + std = float("0.496296") + data = None + + +class Program_weight_tensor_parameter_480: + name = "parameter_480" + shape = [256] + dtype = "float32" + min_val = float("-6.4627") + max_val = float("4.37849") + mean = float("-0.301674") + std = float("1.70311") + data = None + + +class Program_weight_tensor_parameter_481: + name = "parameter_481" + shape = [256, 128, 1, 1] + dtype = "float32" + min_val = float("-0.607689") + max_val = float("0.697928") + mean = float("-0.00144523") + std = float("0.0647383") + data = None + + +class Program_weight_tensor_parameter_482: + name = "parameter_482" + shape = [128] + dtype = "float32" + min_val = float("-4.34337") + max_val = float("6.0628") + mean = float("1.85413") + std = float("1.59139") + data = None + + +class Program_weight_tensor_parameter_483: + name = "parameter_483" + shape = [128] + dtype = "float32" + min_val = float("-0.0415774") + max_val = float("3.54284") + mean = float("1.03066") + std = float("0.500108") + data = None + + +class Program_weight_tensor_parameter_484: + name = "parameter_484" + shape = [128] + dtype = "float32" + min_val = float("0.000133025") + max_val = float("16.2629") + mean = float("0.29932") + std = float("1.46668") + data = None + + +class Program_weight_tensor_parameter_485: + name = "parameter_485" + shape = [128] + dtype = "float32" + min_val = float("-2.09559") + max_val = float("6.10358") + mean = float("-0.0303657") + std = float("0.682803") + data = None + + +class Program_weight_tensor_parameter_486: + name = "parameter_486" + shape = [128, 1, 3, 3] + dtype = "float32" + min_val = float("-0.361578") + max_val = float("0.309794") + mean = float("0.0367976") + std = float("0.128036") + data = None + + +class Program_weight_tensor_parameter_487: + name = "parameter_487" + shape = [128] + dtype = "float32" + min_val = float("-2.51857") + max_val = float("7.231") + mean = float("-0.419332") + std = float("1.15109") + data = None + + +class Program_weight_tensor_parameter_488: + name = "parameter_488" + shape = [128] + dtype = "float32" + min_val = float("-0.733518") + max_val = float("8.98718") + mean = float("1.12452") + std = float("0.947115") + data = None + + +class Program_weight_tensor_parameter_489: + name = "parameter_489" + shape = [128] + dtype = "float32" + min_val = float("0.491515") + max_val = float("8.88929") + mean = float("3.78231") + std = float("1.79243") + data = None + + +class Program_weight_tensor_parameter_490: + name = "parameter_490" + shape = [128] + dtype = "float32" + min_val = float("-6.68292") + max_val = float("6.13744") + mean = float("-0.499404") + std = float("2.21961") + data = None + + +class Program_weight_tensor_parameter_491: + name = "parameter_491" + shape = [128, 128, 1, 1] + dtype = "float32" + min_val = float("-0.841652") + max_val = float("0.446523") + mean = float("-0.00545897") + std = float("0.0795739") + data = None + + +class Program_weight_tensor_parameter_492: + name = "parameter_492" + shape = [128] + dtype = "float32" + min_val = float("-1.66964") + max_val = float("11.2954") + mean = float("1.22491") + std = float("1.99392") + data = None + + +class Program_weight_tensor_parameter_493: + name = "parameter_493" + shape = [128] + dtype = "float32" + min_val = float("0.912487") + max_val = float("6.42073") + mean = float("2.16295") + std = float("0.796402") + data = None + + +class Program_weight_tensor_parameter_494: + name = "parameter_494" + shape = [128] + dtype = "float32" + min_val = float("0.00051609") + max_val = float("8.10835") + mean = float("0.399141") + std = float("0.951141") + data = None + + +class Program_weight_tensor_parameter_495: + name = "parameter_495" + shape = [128] + dtype = "float32" + min_val = float("-2.98104") + max_val = float("1.16637") + mean = float("-0.21698") + std = float("0.644739") + data = None + + +class Program_weight_tensor_parameter_496: + name = "parameter_496" + shape = [128, 1, 3, 3] + dtype = "float32" + min_val = float("-0.937187") + max_val = float("0.729625") + mean = float("-0.0106142") + std = float("0.195347") + data = None + + +class Program_weight_tensor_parameter_497: + name = "parameter_497" + shape = [128] + dtype = "float32" + min_val = float("-3.16602") + max_val = float("4.35291") + mean = float("0.818057") + std = float("1.27095") + data = None + + +class Program_weight_tensor_parameter_498: + name = "parameter_498" + shape = [128] + dtype = "float32" + min_val = float("-0.552193") + max_val = float("5.70418") + mean = float("1.23673") + std = float("1.01597") + data = None + + +class Program_weight_tensor_parameter_499: + name = "parameter_499" + shape = [128] + dtype = "float32" + min_val = float("0.91897") + max_val = float("19.0801") + mean = float("5.1176") + std = float("3.24316") + data = None + + +class Program_weight_tensor_parameter_500: + name = "parameter_500" + shape = [128] + dtype = "float32" + min_val = float("-6.65551") + max_val = float("6.18423") + mean = float("-0.380706") + std = float("2.53671") + data = None + + +class Program_weight_tensor_parameter_501: + name = "parameter_501" + shape = [128, 64, 1, 1] + dtype = "float32" + min_val = float("-0.516007") + max_val = float("0.754238") + mean = float("-0.00297702") + std = float("0.0924049") + data = None + + +class Program_weight_tensor_parameter_502: + name = "parameter_502" + shape = [64] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_503: + name = "parameter_503" + shape = [64] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_504: + name = "parameter_504" + shape = [64] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_505: + name = "parameter_505" + shape = [64] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_506: + name = "parameter_506" + shape = [64, 1, 3, 3] + dtype = "float32" + min_val = float("-0.464146") + max_val = float("0.37868") + mean = float("0.03016") + std = float("0.16447") + data = None + + +class Program_weight_tensor_parameter_507: + name = "parameter_507" + shape = [64] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_508: + name = "parameter_508" + shape = [64] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_509: + name = "parameter_509" + shape = [64] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_510: + name = "parameter_510" + shape = [64] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_511: + name = "parameter_511" + shape = [64, 32, 1, 1] + dtype = "float32" + min_val = float("-0.980215") + max_val = float("0.64433") + mean = float("-0.0139954") + std = float("0.138057") + data = None + + +class Program_weight_tensor_parameter_512: + name = "parameter_512" + shape = [32] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_513: + name = "parameter_513" + shape = [32] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_514: + name = "parameter_514" + shape = [32] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_515: + name = "parameter_515" + shape = [32] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_516: + name = "parameter_516" + shape = [32, 1, 3, 3] + dtype = "float32" + min_val = float("-0.983707") + max_val = float("1.10087") + mean = float("0.00522956") + std = float("0.305819") + data = None + + +class Program_weight_tensor_parameter_517: + name = "parameter_517" + shape = [32] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_518: + name = "parameter_518" + shape = [32] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_519: + name = "parameter_519" + shape = [32] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_520: + name = "parameter_520" + shape = [32] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_521: + name = "parameter_521" + shape = [32, 3, 3, 3] + dtype = "float32" + min_val = float("-0.496125") + max_val = float("0.486462") + mean = float("-0.00167543") + std = float("0.17361") + data = None diff --git a/paddle_samples/PaddleX/PP-OCRv3_mobile_rec/subgraph_0/graph_hash.txt b/paddle_samples/PaddleX/PP-OCRv3_mobile_rec/subgraph_0/graph_hash.txt new file mode 100644 index 000000000..4a6bb8a80 --- /dev/null +++ b/paddle_samples/PaddleX/PP-OCRv3_mobile_rec/subgraph_0/graph_hash.txt @@ -0,0 +1 @@ +6e14247884f18d61a5175ae39e8a13548e42ca059d5bb8dae132d71d507d8d18 \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-OCRv3_mobile_rec/subgraph_0/graph_net.json b/paddle_samples/PaddleX/PP-OCRv3_mobile_rec/subgraph_0/graph_net.json new file mode 100644 index 000000000..6e8c4238c --- /dev/null +++ b/paddle_samples/PaddleX/PP-OCRv3_mobile_rec/subgraph_0/graph_net.json @@ -0,0 +1,6 @@ +{ + "framework": "paddle", + "model_name": "PP-OCRv3_mobile_rec", + "num_devices_required": 1, + "num_nodes_required": 1 +} \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-OCRv3_mobile_rec/subgraph_0/input_meta.py b/paddle_samples/PaddleX/PP-OCRv3_mobile_rec/subgraph_0/input_meta.py new file mode 100644 index 000000000..c508fd524 --- /dev/null +++ b/paddle_samples/PaddleX/PP-OCRv3_mobile_rec/subgraph_0/input_meta.py @@ -0,0 +1,9 @@ +class Program_weight_tensor_data_0: + name = "data_0" + shape = [8, 3, 48, 320] + dtype = "float32" + min_val = float("-1.0") + max_val = float("1.0") + mean = float("0.0340321") + std = float("0.412725") + data = None diff --git a/paddle_samples/PaddleX/PP-OCRv3_mobile_rec/subgraph_0/model.py b/paddle_samples/PaddleX/PP-OCRv3_mobile_rec/subgraph_0/model.py new file mode 100644 index 000000000..bb736947f --- /dev/null +++ b/paddle_samples/PaddleX/PP-OCRv3_mobile_rec/subgraph_0/model.py @@ -0,0 +1,1526 @@ +import paddle + + +class GraphModule(paddle.nn.Layer): + def __init__(self): + super().__init__() + + def forward( + self, + parameter_0, + parameter_1, + parameter_2, + parameter_3, + parameter_4, + parameter_5, + parameter_6, + parameter_7, + parameter_8, + parameter_9, + parameter_10, + parameter_11, + parameter_12, + parameter_13, + parameter_14, + parameter_15, + parameter_16, + parameter_17, + parameter_18, + parameter_19, + parameter_20, + parameter_21, + parameter_22, + parameter_23, + parameter_24, + parameter_25, + parameter_26, + parameter_27, + parameter_28, + parameter_29, + parameter_30, + parameter_31, + parameter_32, + parameter_33, + parameter_34, + parameter_35, + parameter_36, + parameter_37, + parameter_38, + parameter_39, + parameter_40, + parameter_41, + parameter_42, + parameter_43, + parameter_44, + parameter_45, + parameter_46, + parameter_47, + parameter_48, + parameter_49, + parameter_50, + parameter_51, + parameter_52, + parameter_53, + parameter_54, + parameter_55, + parameter_56, + parameter_57, + parameter_58, + parameter_59, + parameter_60, + parameter_61, + parameter_62, + parameter_63, + parameter_64, + parameter_65, + parameter_66, + parameter_67, + parameter_68, + parameter_69, + parameter_70, + parameter_71, + parameter_72, + parameter_73, + parameter_74, + parameter_75, + parameter_76, + parameter_77, + parameter_78, + parameter_79, + parameter_80, + parameter_81, + parameter_82, + parameter_83, + parameter_84, + parameter_85, + parameter_86, + parameter_87, + parameter_88, + parameter_89, + parameter_90, + parameter_91, + parameter_92, + parameter_93, + parameter_94, + parameter_95, + parameter_96, + parameter_97, + parameter_98, + parameter_99, + parameter_100, + parameter_101, + parameter_102, + parameter_103, + parameter_104, + parameter_105, + parameter_106, + parameter_107, + parameter_108, + parameter_109, + parameter_110, + parameter_111, + parameter_112, + parameter_113, + parameter_114, + parameter_115, + parameter_116, + parameter_117, + parameter_118, + parameter_119, + parameter_120, + parameter_121, + parameter_122, + parameter_123, + parameter_124, + parameter_125, + parameter_126, + parameter_127, + parameter_128, + parameter_129, + parameter_130, + parameter_131, + parameter_132, + parameter_133, + parameter_134, + parameter_135, + parameter_136, + parameter_137, + parameter_138, + parameter_139, + parameter_140, + parameter_141, + parameter_142, + data_0, + ): + # pd_op.conv2d: (8x16x24x160xf32) <- (8x3x48x320xf32, 16x3x3x3xf32) + conv2d_0 = paddle._C_ops.conv2d( + data_0, parameter_142, [2, 2], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del data_0, parameter_142 + + # pd_op.batch_norm_: (8x16x24x160xf32, 16xf32, 16xf32, 16xf32, 16xf32, -1xui8) <- (8x16x24x160xf32, 16xf32, 16xf32, 16xf32, 16xf32) + ( + batch_norm__0, + batch_norm__1, + batch_norm__2, + batch_norm__3, + batch_norm__4, + batch_norm__5, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_0, + parameter_141, + parameter_140, + parameter_139, + parameter_138, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_138, parameter_139, parameter_140, parameter_141 + + # pd_op.hardswish: (8x16x24x160xf32) <- (8x16x24x160xf32) + hardswish_0 = paddle._C_ops.hardswish(batch_norm__0) + + # pd_op.depthwise_conv2d: (8x16x24x160xf32) <- (8x16x24x160xf32, 16x1x3x3xf32) + depthwise_conv2d_0 = paddle._C_ops.depthwise_conv2d( + hardswish_0, parameter_137, [1, 1], [1, 1], "EXPLICIT", 16, [1, 1], "NCHW" + ) + del parameter_137 + + # pd_op.batch_norm_: (8x16x24x160xf32, 16xf32, 16xf32, 16xf32, 16xf32, -1xui8) <- (8x16x24x160xf32, 16xf32, 16xf32, 16xf32, 16xf32) + ( + batch_norm__6, + batch_norm__7, + batch_norm__8, + batch_norm__9, + batch_norm__10, + batch_norm__11, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + depthwise_conv2d_0, + parameter_136, + parameter_135, + parameter_134, + parameter_133, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_133, parameter_134, parameter_135, parameter_136 + + # pd_op.hardswish: (8x16x24x160xf32) <- (8x16x24x160xf32) + hardswish_1 = paddle._C_ops.hardswish(batch_norm__6) + + # pd_op.conv2d: (8x32x24x160xf32) <- (8x16x24x160xf32, 32x16x1x1xf32) + conv2d_1 = paddle._C_ops.conv2d( + hardswish_1, parameter_132, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_132 + + # pd_op.batch_norm_: (8x32x24x160xf32, 32xf32, 32xf32, 32xf32, 32xf32, -1xui8) <- (8x32x24x160xf32, 32xf32, 32xf32, 32xf32, 32xf32) + ( + batch_norm__12, + batch_norm__13, + batch_norm__14, + batch_norm__15, + batch_norm__16, + batch_norm__17, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_1, + parameter_131, + parameter_130, + parameter_129, + parameter_128, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_128, parameter_129, parameter_130, parameter_131 + + # pd_op.hardswish: (8x32x24x160xf32) <- (8x32x24x160xf32) + hardswish_2 = paddle._C_ops.hardswish(batch_norm__12) + + # pd_op.depthwise_conv2d: (8x32x24x160xf32) <- (8x32x24x160xf32, 32x1x3x3xf32) + depthwise_conv2d_1 = paddle._C_ops.depthwise_conv2d( + hardswish_2, parameter_127, [1, 1], [1, 1], "EXPLICIT", 32, [1, 1], "NCHW" + ) + del parameter_127 + + # pd_op.batch_norm_: (8x32x24x160xf32, 32xf32, 32xf32, 32xf32, 32xf32, -1xui8) <- (8x32x24x160xf32, 32xf32, 32xf32, 32xf32, 32xf32) + ( + batch_norm__18, + batch_norm__19, + batch_norm__20, + batch_norm__21, + batch_norm__22, + batch_norm__23, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + depthwise_conv2d_1, + parameter_126, + parameter_125, + parameter_124, + parameter_123, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_123, parameter_124, parameter_125, parameter_126 + + # pd_op.hardswish: (8x32x24x160xf32) <- (8x32x24x160xf32) + hardswish_3 = paddle._C_ops.hardswish(batch_norm__18) + + # pd_op.conv2d: (8x64x24x160xf32) <- (8x32x24x160xf32, 64x32x1x1xf32) + conv2d_2 = paddle._C_ops.conv2d( + hardswish_3, parameter_122, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_122 + + # pd_op.batch_norm_: (8x64x24x160xf32, 64xf32, 64xf32, 64xf32, 64xf32, -1xui8) <- (8x64x24x160xf32, 64xf32, 64xf32, 64xf32, 64xf32) + ( + batch_norm__24, + batch_norm__25, + batch_norm__26, + batch_norm__27, + batch_norm__28, + batch_norm__29, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_2, + parameter_121, + parameter_120, + parameter_119, + parameter_118, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_118, parameter_119, parameter_120, parameter_121 + + # pd_op.hardswish: (8x64x24x160xf32) <- (8x64x24x160xf32) + hardswish_4 = paddle._C_ops.hardswish(batch_norm__24) + + # pd_op.depthwise_conv2d: (8x64x24x160xf32) <- (8x64x24x160xf32, 64x1x3x3xf32) + depthwise_conv2d_2 = paddle._C_ops.depthwise_conv2d( + hardswish_4, parameter_117, [1, 1], [1, 1], "EXPLICIT", 64, [1, 1], "NCHW" + ) + del parameter_117 + + # pd_op.batch_norm_: (8x64x24x160xf32, 64xf32, 64xf32, 64xf32, 64xf32, -1xui8) <- (8x64x24x160xf32, 64xf32, 64xf32, 64xf32, 64xf32) + ( + batch_norm__30, + batch_norm__31, + batch_norm__32, + batch_norm__33, + batch_norm__34, + batch_norm__35, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + depthwise_conv2d_2, + parameter_116, + parameter_115, + parameter_114, + parameter_113, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_113, parameter_114, parameter_115, parameter_116 + + # pd_op.hardswish: (8x64x24x160xf32) <- (8x64x24x160xf32) + hardswish_5 = paddle._C_ops.hardswish(batch_norm__30) + + # pd_op.conv2d: (8x64x24x160xf32) <- (8x64x24x160xf32, 64x64x1x1xf32) + conv2d_3 = paddle._C_ops.conv2d( + hardswish_5, parameter_112, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_112 + + # pd_op.batch_norm_: (8x64x24x160xf32, 64xf32, 64xf32, 64xf32, 64xf32, -1xui8) <- (8x64x24x160xf32, 64xf32, 64xf32, 64xf32, 64xf32) + ( + batch_norm__36, + batch_norm__37, + batch_norm__38, + batch_norm__39, + batch_norm__40, + batch_norm__41, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_3, + parameter_111, + parameter_110, + parameter_109, + parameter_108, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_108, parameter_109, parameter_110, parameter_111 + + # pd_op.hardswish: (8x64x24x160xf32) <- (8x64x24x160xf32) + hardswish_6 = paddle._C_ops.hardswish(batch_norm__36) + + # pd_op.depthwise_conv2d: (8x64x12x160xf32) <- (8x64x24x160xf32, 64x1x3x3xf32) + depthwise_conv2d_3 = paddle._C_ops.depthwise_conv2d( + hardswish_6, parameter_107, [2, 1], [1, 1], "EXPLICIT", 64, [1, 1], "NCHW" + ) + del parameter_107 + + # pd_op.batch_norm_: (8x64x12x160xf32, 64xf32, 64xf32, 64xf32, 64xf32, -1xui8) <- (8x64x12x160xf32, 64xf32, 64xf32, 64xf32, 64xf32) + ( + batch_norm__42, + batch_norm__43, + batch_norm__44, + batch_norm__45, + batch_norm__46, + batch_norm__47, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + depthwise_conv2d_3, + parameter_106, + parameter_105, + parameter_104, + parameter_103, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_103, parameter_104, parameter_105, parameter_106 + + # pd_op.hardswish: (8x64x12x160xf32) <- (8x64x12x160xf32) + hardswish_7 = paddle._C_ops.hardswish(batch_norm__42) + + # pd_op.conv2d: (8x128x12x160xf32) <- (8x64x12x160xf32, 128x64x1x1xf32) + conv2d_4 = paddle._C_ops.conv2d( + hardswish_7, parameter_102, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_102 + + # pd_op.batch_norm_: (8x128x12x160xf32, 128xf32, 128xf32, 128xf32, 128xf32, -1xui8) <- (8x128x12x160xf32, 128xf32, 128xf32, 128xf32, 128xf32) + ( + batch_norm__48, + batch_norm__49, + batch_norm__50, + batch_norm__51, + batch_norm__52, + batch_norm__53, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_4, + parameter_101, + parameter_100, + parameter_99, + parameter_98, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_100, parameter_101, parameter_98, parameter_99 + + # pd_op.hardswish: (8x128x12x160xf32) <- (8x128x12x160xf32) + hardswish_8 = paddle._C_ops.hardswish(batch_norm__48) + + # pd_op.depthwise_conv2d: (8x128x12x160xf32) <- (8x128x12x160xf32, 128x1x3x3xf32) + depthwise_conv2d_4 = paddle._C_ops.depthwise_conv2d( + hardswish_8, parameter_97, [1, 1], [1, 1], "EXPLICIT", 128, [1, 1], "NCHW" + ) + del parameter_97 + + # pd_op.batch_norm_: (8x128x12x160xf32, 128xf32, 128xf32, 128xf32, 128xf32, -1xui8) <- (8x128x12x160xf32, 128xf32, 128xf32, 128xf32, 128xf32) + ( + batch_norm__54, + batch_norm__55, + batch_norm__56, + batch_norm__57, + batch_norm__58, + batch_norm__59, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + depthwise_conv2d_4, + parameter_96, + parameter_95, + parameter_94, + parameter_93, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_93, parameter_94, parameter_95, parameter_96 + + # pd_op.hardswish: (8x128x12x160xf32) <- (8x128x12x160xf32) + hardswish_9 = paddle._C_ops.hardswish(batch_norm__54) + + # pd_op.conv2d: (8x128x12x160xf32) <- (8x128x12x160xf32, 128x128x1x1xf32) + conv2d_5 = paddle._C_ops.conv2d( + hardswish_9, parameter_92, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_92 + + # pd_op.batch_norm_: (8x128x12x160xf32, 128xf32, 128xf32, 128xf32, 128xf32, -1xui8) <- (8x128x12x160xf32, 128xf32, 128xf32, 128xf32, 128xf32) + ( + batch_norm__60, + batch_norm__61, + batch_norm__62, + batch_norm__63, + batch_norm__64, + batch_norm__65, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_5, + parameter_91, + parameter_90, + parameter_89, + parameter_88, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_88, parameter_89, parameter_90, parameter_91 + + # pd_op.hardswish: (8x128x12x160xf32) <- (8x128x12x160xf32) + hardswish_10 = paddle._C_ops.hardswish(batch_norm__60) + + # pd_op.depthwise_conv2d: (8x128x6x160xf32) <- (8x128x12x160xf32, 128x1x3x3xf32) + depthwise_conv2d_5 = paddle._C_ops.depthwise_conv2d( + hardswish_10, parameter_87, [2, 1], [1, 1], "EXPLICIT", 128, [1, 1], "NCHW" + ) + del parameter_87 + + # pd_op.batch_norm_: (8x128x6x160xf32, 128xf32, 128xf32, 128xf32, 128xf32, -1xui8) <- (8x128x6x160xf32, 128xf32, 128xf32, 128xf32, 128xf32) + ( + batch_norm__66, + batch_norm__67, + batch_norm__68, + batch_norm__69, + batch_norm__70, + batch_norm__71, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + depthwise_conv2d_5, + parameter_86, + parameter_85, + parameter_84, + parameter_83, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_83, parameter_84, parameter_85, parameter_86 + + # pd_op.hardswish: (8x128x6x160xf32) <- (8x128x6x160xf32) + hardswish_11 = paddle._C_ops.hardswish(batch_norm__66) + + # pd_op.conv2d: (8x256x6x160xf32) <- (8x128x6x160xf32, 256x128x1x1xf32) + conv2d_6 = paddle._C_ops.conv2d( + hardswish_11, parameter_82, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_82 + + # pd_op.batch_norm_: (8x256x6x160xf32, 256xf32, 256xf32, 256xf32, 256xf32, -1xui8) <- (8x256x6x160xf32, 256xf32, 256xf32, 256xf32, 256xf32) + ( + batch_norm__72, + batch_norm__73, + batch_norm__74, + batch_norm__75, + batch_norm__76, + batch_norm__77, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_6, + parameter_81, + parameter_80, + parameter_79, + parameter_78, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_78, parameter_79, parameter_80, parameter_81 + + # pd_op.hardswish: (8x256x6x160xf32) <- (8x256x6x160xf32) + hardswish_12 = paddle._C_ops.hardswish(batch_norm__72) + + # pd_op.depthwise_conv2d: (8x256x6x160xf32) <- (8x256x6x160xf32, 256x1x5x5xf32) + depthwise_conv2d_6 = paddle._C_ops.depthwise_conv2d( + hardswish_12, parameter_77, [1, 1], [2, 2], "EXPLICIT", 256, [1, 1], "NCHW" + ) + del parameter_77 + + # pd_op.batch_norm_: (8x256x6x160xf32, 256xf32, 256xf32, 256xf32, 256xf32, -1xui8) <- (8x256x6x160xf32, 256xf32, 256xf32, 256xf32, 256xf32) + ( + batch_norm__78, + batch_norm__79, + batch_norm__80, + batch_norm__81, + batch_norm__82, + batch_norm__83, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + depthwise_conv2d_6, + parameter_76, + parameter_75, + parameter_74, + parameter_73, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_73, parameter_74, parameter_75, parameter_76 + + # pd_op.hardswish: (8x256x6x160xf32) <- (8x256x6x160xf32) + hardswish_13 = paddle._C_ops.hardswish(batch_norm__78) + + # pd_op.conv2d: (8x256x6x160xf32) <- (8x256x6x160xf32, 256x256x1x1xf32) + conv2d_7 = paddle._C_ops.conv2d( + hardswish_13, parameter_72, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_72 + + # pd_op.batch_norm_: (8x256x6x160xf32, 256xf32, 256xf32, 256xf32, 256xf32, -1xui8) <- (8x256x6x160xf32, 256xf32, 256xf32, 256xf32, 256xf32) + ( + batch_norm__84, + batch_norm__85, + batch_norm__86, + batch_norm__87, + batch_norm__88, + batch_norm__89, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_7, + parameter_71, + parameter_70, + parameter_69, + parameter_68, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_68, parameter_69, parameter_70, parameter_71 + + # pd_op.hardswish: (8x256x6x160xf32) <- (8x256x6x160xf32) + hardswish_14 = paddle._C_ops.hardswish(batch_norm__84) + + # pd_op.depthwise_conv2d: (8x256x6x160xf32) <- (8x256x6x160xf32, 256x1x5x5xf32) + depthwise_conv2d_7 = paddle._C_ops.depthwise_conv2d( + hardswish_14, parameter_67, [1, 1], [2, 2], "EXPLICIT", 256, [1, 1], "NCHW" + ) + del parameter_67 + + # pd_op.batch_norm_: (8x256x6x160xf32, 256xf32, 256xf32, 256xf32, 256xf32, -1xui8) <- (8x256x6x160xf32, 256xf32, 256xf32, 256xf32, 256xf32) + ( + batch_norm__90, + batch_norm__91, + batch_norm__92, + batch_norm__93, + batch_norm__94, + batch_norm__95, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + depthwise_conv2d_7, + parameter_66, + parameter_65, + parameter_64, + parameter_63, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_63, parameter_64, parameter_65, parameter_66 + + # pd_op.hardswish: (8x256x6x160xf32) <- (8x256x6x160xf32) + hardswish_15 = paddle._C_ops.hardswish(batch_norm__90) + + # pd_op.conv2d: (8x256x6x160xf32) <- (8x256x6x160xf32, 256x256x1x1xf32) + conv2d_8 = paddle._C_ops.conv2d( + hardswish_15, parameter_62, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_62 + + # pd_op.batch_norm_: (8x256x6x160xf32, 256xf32, 256xf32, 256xf32, 256xf32, -1xui8) <- (8x256x6x160xf32, 256xf32, 256xf32, 256xf32, 256xf32) + ( + batch_norm__96, + batch_norm__97, + batch_norm__98, + batch_norm__99, + batch_norm__100, + batch_norm__101, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_8, + parameter_61, + parameter_60, + parameter_59, + parameter_58, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_58, parameter_59, parameter_60, parameter_61 + + # pd_op.hardswish: (8x256x6x160xf32) <- (8x256x6x160xf32) + hardswish_16 = paddle._C_ops.hardswish(batch_norm__96) + + # pd_op.depthwise_conv2d: (8x256x6x160xf32) <- (8x256x6x160xf32, 256x1x5x5xf32) + depthwise_conv2d_8 = paddle._C_ops.depthwise_conv2d( + hardswish_16, parameter_57, [1, 1], [2, 2], "EXPLICIT", 256, [1, 1], "NCHW" + ) + del parameter_57 + + # pd_op.batch_norm_: (8x256x6x160xf32, 256xf32, 256xf32, 256xf32, 256xf32, -1xui8) <- (8x256x6x160xf32, 256xf32, 256xf32, 256xf32, 256xf32) + ( + batch_norm__102, + batch_norm__103, + batch_norm__104, + batch_norm__105, + batch_norm__106, + batch_norm__107, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + depthwise_conv2d_8, + parameter_56, + parameter_55, + parameter_54, + parameter_53, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_53, parameter_54, parameter_55, parameter_56 + + # pd_op.hardswish: (8x256x6x160xf32) <- (8x256x6x160xf32) + hardswish_17 = paddle._C_ops.hardswish(batch_norm__102) + + # pd_op.conv2d: (8x256x6x160xf32) <- (8x256x6x160xf32, 256x256x1x1xf32) + conv2d_9 = paddle._C_ops.conv2d( + hardswish_17, parameter_52, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_52 + + # pd_op.batch_norm_: (8x256x6x160xf32, 256xf32, 256xf32, 256xf32, 256xf32, -1xui8) <- (8x256x6x160xf32, 256xf32, 256xf32, 256xf32, 256xf32) + ( + batch_norm__108, + batch_norm__109, + batch_norm__110, + batch_norm__111, + batch_norm__112, + batch_norm__113, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_9, + parameter_51, + parameter_50, + parameter_49, + parameter_48, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_48, parameter_49, parameter_50, parameter_51 + + # pd_op.hardswish: (8x256x6x160xf32) <- (8x256x6x160xf32) + hardswish_18 = paddle._C_ops.hardswish(batch_norm__108) + + # pd_op.depthwise_conv2d: (8x256x6x160xf32) <- (8x256x6x160xf32, 256x1x5x5xf32) + depthwise_conv2d_9 = paddle._C_ops.depthwise_conv2d( + hardswish_18, parameter_47, [1, 1], [2, 2], "EXPLICIT", 256, [1, 1], "NCHW" + ) + del parameter_47 + + # pd_op.batch_norm_: (8x256x6x160xf32, 256xf32, 256xf32, 256xf32, 256xf32, -1xui8) <- (8x256x6x160xf32, 256xf32, 256xf32, 256xf32, 256xf32) + ( + batch_norm__114, + batch_norm__115, + batch_norm__116, + batch_norm__117, + batch_norm__118, + batch_norm__119, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + depthwise_conv2d_9, + parameter_46, + parameter_45, + parameter_44, + parameter_43, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_43, parameter_44, parameter_45, parameter_46 + + # pd_op.hardswish: (8x256x6x160xf32) <- (8x256x6x160xf32) + hardswish_19 = paddle._C_ops.hardswish(batch_norm__114) + + # pd_op.conv2d: (8x256x6x160xf32) <- (8x256x6x160xf32, 256x256x1x1xf32) + conv2d_10 = paddle._C_ops.conv2d( + hardswish_19, parameter_42, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_42 + + # pd_op.batch_norm_: (8x256x6x160xf32, 256xf32, 256xf32, 256xf32, 256xf32, -1xui8) <- (8x256x6x160xf32, 256xf32, 256xf32, 256xf32, 256xf32) + ( + batch_norm__120, + batch_norm__121, + batch_norm__122, + batch_norm__123, + batch_norm__124, + batch_norm__125, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_10, + parameter_41, + parameter_40, + parameter_39, + parameter_38, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_38, parameter_39, parameter_40, parameter_41 + + # pd_op.hardswish: (8x256x6x160xf32) <- (8x256x6x160xf32) + hardswish_20 = paddle._C_ops.hardswish(batch_norm__120) + + # pd_op.depthwise_conv2d: (8x256x6x160xf32) <- (8x256x6x160xf32, 256x1x5x5xf32) + depthwise_conv2d_10 = paddle._C_ops.depthwise_conv2d( + hardswish_20, parameter_37, [1, 1], [2, 2], "EXPLICIT", 256, [1, 1], "NCHW" + ) + del parameter_37 + + # pd_op.batch_norm_: (8x256x6x160xf32, 256xf32, 256xf32, 256xf32, 256xf32, -1xui8) <- (8x256x6x160xf32, 256xf32, 256xf32, 256xf32, 256xf32) + ( + batch_norm__126, + batch_norm__127, + batch_norm__128, + batch_norm__129, + batch_norm__130, + batch_norm__131, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + depthwise_conv2d_10, + parameter_36, + parameter_35, + parameter_34, + parameter_33, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_33, parameter_34, parameter_35, parameter_36 + + # pd_op.hardswish: (8x256x6x160xf32) <- (8x256x6x160xf32) + hardswish_21 = paddle._C_ops.hardswish(batch_norm__126) + + # pd_op.conv2d: (8x256x6x160xf32) <- (8x256x6x160xf32, 256x256x1x1xf32) + conv2d_11 = paddle._C_ops.conv2d( + hardswish_21, parameter_32, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_32 + + # pd_op.batch_norm_: (8x256x6x160xf32, 256xf32, 256xf32, 256xf32, 256xf32, -1xui8) <- (8x256x6x160xf32, 256xf32, 256xf32, 256xf32, 256xf32) + ( + batch_norm__132, + batch_norm__133, + batch_norm__134, + batch_norm__135, + batch_norm__136, + batch_norm__137, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_11, + parameter_31, + parameter_30, + parameter_29, + parameter_28, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_28, parameter_29, parameter_30, parameter_31 + + # pd_op.hardswish: (8x256x6x160xf32) <- (8x256x6x160xf32) + hardswish_22 = paddle._C_ops.hardswish(batch_norm__132) + + # pd_op.depthwise_conv2d: (8x256x3x160xf32) <- (8x256x6x160xf32, 256x1x5x5xf32) + depthwise_conv2d_11 = paddle._C_ops.depthwise_conv2d( + hardswish_22, parameter_27, [2, 1], [2, 2], "EXPLICIT", 256, [1, 1], "NCHW" + ) + del parameter_27 + + # pd_op.batch_norm_: (8x256x3x160xf32, 256xf32, 256xf32, 256xf32, 256xf32, -1xui8) <- (8x256x3x160xf32, 256xf32, 256xf32, 256xf32, 256xf32) + ( + batch_norm__138, + batch_norm__139, + batch_norm__140, + batch_norm__141, + batch_norm__142, + batch_norm__143, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + depthwise_conv2d_11, + parameter_26, + parameter_25, + parameter_24, + parameter_23, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_23, parameter_24, parameter_25, parameter_26 + + # pd_op.hardswish: (8x256x3x160xf32) <- (8x256x3x160xf32) + hardswish_23 = paddle._C_ops.hardswish(batch_norm__138) + + # pd_op.full_int_array: (2xi64) <- () + full_int_array_0 = [1, 1] + + # pd_op.assign: (2xi64) <- (2xi64) + assign_0 = full_int_array_0 + + # pd_op.pool2d: (8x256x1x1xf32) <- (8x256x3x160xf32, 2xi64) + pool2d_1 = paddle._C_ops.pool2d( + hardswish_23, + full_int_array_0, + [1, 1], + [0, 0], + False, + True, + "NCHW", + "avg", + False, + True, + "EXPLICIT", + ) + + # pd_op.conv2d: (8x64x1x1xf32) <- (8x256x1x1xf32, 64x256x1x1xf32) + conv2d_12 = paddle._C_ops.conv2d( + pool2d_1, parameter_22, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_22 + + # pd_op.full_int_array: (4xi64) <- () + full_int_array_1 = [1, -1, 1, 1] + + # pd_op.reshape: (1x64x1x1xf32) <- (64xf32, 4xi64) + reshape_0 = paddle._C_ops.reshape(parameter_21, full_int_array_1) + del parameter_21 + + # pd_op.add: (8x64x1x1xf32) <- (8x64x1x1xf32, 1x64x1x1xf32) + add_0 = paddle._C_ops.add(conv2d_12, reshape_0) + + # pd_op.relu: (8x64x1x1xf32) <- (8x64x1x1xf32) + relu_0 = paddle._C_ops.relu(add_0) + del add_0 + + # pd_op.conv2d: (8x256x1x1xf32) <- (8x64x1x1xf32, 256x64x1x1xf32) + conv2d_13 = paddle._C_ops.conv2d( + relu_0, parameter_20, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_20 + + # pd_op.reshape: (1x256x1x1xf32) <- (256xf32, 4xi64) + reshape_1 = paddle._C_ops.reshape(parameter_19, full_int_array_1) + del parameter_19 + + # pd_op.add: (8x256x1x1xf32) <- (8x256x1x1xf32, 1x256x1x1xf32) + add_1 = paddle._C_ops.add(conv2d_13, reshape_1) + + # pd_op.hardsigmoid: (8x256x1x1xf32) <- (8x256x1x1xf32) + hardsigmoid_0 = paddle._C_ops.hardsigmoid( + add_1, float("0.166667"), float("0.5") + ) + del add_1 + + # pd_op.multiply: (8x256x3x160xf32) <- (8x256x3x160xf32, 8x256x1x1xf32) + multiply_0 = paddle._C_ops.multiply(hardswish_23, hardsigmoid_0) + + # pd_op.conv2d: (8x512x3x160xf32) <- (8x256x3x160xf32, 512x256x1x1xf32) + conv2d_14 = paddle._C_ops.conv2d( + multiply_0, parameter_18, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_18 + + # pd_op.batch_norm_: (8x512x3x160xf32, 512xf32, 512xf32, 512xf32, 512xf32, -1xui8) <- (8x512x3x160xf32, 512xf32, 512xf32, 512xf32, 512xf32) + ( + batch_norm__144, + batch_norm__145, + batch_norm__146, + batch_norm__147, + batch_norm__148, + batch_norm__149, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_14, + parameter_17, + parameter_16, + parameter_15, + parameter_14, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_14, parameter_15, parameter_16, parameter_17 + + # pd_op.hardswish: (8x512x3x160xf32) <- (8x512x3x160xf32) + hardswish_24 = paddle._C_ops.hardswish(batch_norm__144) + + # pd_op.depthwise_conv2d: (8x512x3x80xf32) <- (8x512x3x160xf32, 512x1x5x5xf32) + depthwise_conv2d_12 = paddle._C_ops.depthwise_conv2d( + hardswish_24, parameter_13, [1, 2], [2, 2], "EXPLICIT", 512, [1, 1], "NCHW" + ) + del parameter_13 + + # pd_op.batch_norm_: (8x512x3x80xf32, 512xf32, 512xf32, 512xf32, 512xf32, -1xui8) <- (8x512x3x80xf32, 512xf32, 512xf32, 512xf32, 512xf32) + ( + batch_norm__150, + batch_norm__151, + batch_norm__152, + batch_norm__153, + batch_norm__154, + batch_norm__155, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + depthwise_conv2d_12, + parameter_12, + parameter_11, + parameter_10, + parameter_9, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_10, parameter_11, parameter_12, parameter_9 + + # pd_op.hardswish: (8x512x3x80xf32) <- (8x512x3x80xf32) + hardswish_25 = paddle._C_ops.hardswish(batch_norm__150) + + # pd_op.pool2d: (8x512x1x1xf32) <- (8x512x3x80xf32, 2xi64) + pool2d_2 = paddle._C_ops.pool2d( + hardswish_25, + full_int_array_0, + [1, 1], + [0, 0], + False, + True, + "NCHW", + "avg", + False, + True, + "EXPLICIT", + ) + + # pd_op.conv2d: (8x128x1x1xf32) <- (8x512x1x1xf32, 128x512x1x1xf32) + conv2d_15 = paddle._C_ops.conv2d( + pool2d_2, parameter_8, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_8 + + # pd_op.reshape: (1x128x1x1xf32) <- (128xf32, 4xi64) + reshape_2 = paddle._C_ops.reshape(parameter_7, full_int_array_1) + del parameter_7 + + # pd_op.add: (8x128x1x1xf32) <- (8x128x1x1xf32, 1x128x1x1xf32) + add_2 = paddle._C_ops.add(conv2d_15, reshape_2) + + # pd_op.relu: (8x128x1x1xf32) <- (8x128x1x1xf32) + relu_1 = paddle._C_ops.relu(add_2) + del add_2 + + # pd_op.conv2d: (8x512x1x1xf32) <- (8x128x1x1xf32, 512x128x1x1xf32) + conv2d_16 = paddle._C_ops.conv2d( + relu_1, parameter_6, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_6 + + # pd_op.reshape: (1x512x1x1xf32) <- (512xf32, 4xi64) + reshape_3 = paddle._C_ops.reshape(parameter_5, full_int_array_1) + del full_int_array_1, parameter_5 + + # pd_op.add: (8x512x1x1xf32) <- (8x512x1x1xf32, 1x512x1x1xf32) + add_3 = paddle._C_ops.add(conv2d_16, reshape_3) + + # pd_op.hardsigmoid: (8x512x1x1xf32) <- (8x512x1x1xf32) + hardsigmoid_1 = paddle._C_ops.hardsigmoid( + add_3, float("0.166667"), float("0.5") + ) + del add_3 + + # pd_op.multiply: (8x512x3x80xf32) <- (8x512x3x80xf32, 8x512x1x1xf32) + multiply_1 = paddle._C_ops.multiply(hardswish_25, hardsigmoid_1) + + # pd_op.conv2d: (8x512x3x80xf32) <- (8x512x3x80xf32, 512x512x1x1xf32) + conv2d_17 = paddle._C_ops.conv2d( + multiply_1, parameter_4, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_4 + + # pd_op.batch_norm_: (8x512x3x80xf32, 512xf32, 512xf32, 512xf32, 512xf32, -1xui8) <- (8x512x3x80xf32, 512xf32, 512xf32, 512xf32, 512xf32) + ( + batch_norm__156, + batch_norm__157, + batch_norm__158, + batch_norm__159, + batch_norm__160, + batch_norm__161, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_17, + parameter_3, + parameter_2, + parameter_1, + parameter_0, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_0, parameter_1, parameter_2, parameter_3 + + # pd_op.hardswish: (8x512x3x80xf32) <- (8x512x3x80xf32) + hardswish_26 = paddle._C_ops.hardswish(batch_norm__156) + + # pd_op.full_int_array: (2xi64) <- () + full_int_array_2 = [2, 2] + + # pd_op.pool2d: (8x512x1x40xf32) <- (8x512x3x80xf32, 2xi64) + pool2d_0 = paddle._C_ops.pool2d( + hardswish_26, + full_int_array_2, + [2, 2], + [0, 0], + False, + True, + "NCHW", + "avg", + False, + False, + "EXPLICIT", + ) + del ( + assign_0, + batch_norm__0, + batch_norm__1, + batch_norm__10, + batch_norm__100, + batch_norm__101, + batch_norm__102, + batch_norm__103, + batch_norm__104, + batch_norm__105, + batch_norm__106, + batch_norm__107, + batch_norm__108, + batch_norm__109, + batch_norm__11, + batch_norm__110, + batch_norm__111, + batch_norm__112, + batch_norm__113, + batch_norm__114, + batch_norm__115, + batch_norm__116, + batch_norm__117, + batch_norm__118, + batch_norm__119, + batch_norm__12, + batch_norm__120, + batch_norm__121, + batch_norm__122, + batch_norm__123, + batch_norm__124, + batch_norm__125, + batch_norm__126, + batch_norm__127, + batch_norm__128, + batch_norm__129, + batch_norm__13, + batch_norm__130, + batch_norm__131, + batch_norm__132, + batch_norm__133, + batch_norm__134, + batch_norm__135, + batch_norm__136, + batch_norm__137, + batch_norm__138, + batch_norm__139, + batch_norm__14, + batch_norm__140, + batch_norm__141, + batch_norm__142, + batch_norm__143, + batch_norm__144, + batch_norm__145, + batch_norm__146, + batch_norm__147, + batch_norm__148, + batch_norm__149, + batch_norm__15, + batch_norm__150, + batch_norm__151, + batch_norm__152, + batch_norm__153, + batch_norm__154, + batch_norm__155, + batch_norm__156, + batch_norm__157, + batch_norm__158, + batch_norm__159, + batch_norm__16, + batch_norm__160, + batch_norm__161, + batch_norm__17, + batch_norm__18, + batch_norm__19, + batch_norm__2, + batch_norm__20, + batch_norm__21, + batch_norm__22, + batch_norm__23, + batch_norm__24, + batch_norm__25, + batch_norm__26, + batch_norm__27, + batch_norm__28, + batch_norm__29, + batch_norm__3, + batch_norm__30, + batch_norm__31, + batch_norm__32, + batch_norm__33, + batch_norm__34, + batch_norm__35, + batch_norm__36, + batch_norm__37, + batch_norm__38, + batch_norm__39, + batch_norm__4, + batch_norm__40, + batch_norm__41, + batch_norm__42, + batch_norm__43, + batch_norm__44, + batch_norm__45, + batch_norm__46, + batch_norm__47, + batch_norm__48, + batch_norm__49, + batch_norm__5, + batch_norm__50, + batch_norm__51, + batch_norm__52, + batch_norm__53, + batch_norm__54, + batch_norm__55, + batch_norm__56, + batch_norm__57, + batch_norm__58, + batch_norm__59, + batch_norm__6, + batch_norm__60, + batch_norm__61, + batch_norm__62, + batch_norm__63, + batch_norm__64, + batch_norm__65, + batch_norm__66, + batch_norm__67, + batch_norm__68, + batch_norm__69, + batch_norm__7, + batch_norm__70, + batch_norm__71, + batch_norm__72, + batch_norm__73, + batch_norm__74, + batch_norm__75, + batch_norm__76, + batch_norm__77, + batch_norm__78, + batch_norm__79, + batch_norm__8, + batch_norm__80, + batch_norm__81, + batch_norm__82, + batch_norm__83, + batch_norm__84, + batch_norm__85, + batch_norm__86, + batch_norm__87, + batch_norm__88, + batch_norm__89, + batch_norm__9, + batch_norm__90, + batch_norm__91, + batch_norm__92, + batch_norm__93, + batch_norm__94, + batch_norm__95, + batch_norm__96, + batch_norm__97, + batch_norm__98, + batch_norm__99, + conv2d_0, + conv2d_1, + conv2d_10, + conv2d_11, + conv2d_12, + conv2d_13, + conv2d_14, + conv2d_15, + conv2d_16, + conv2d_17, + conv2d_2, + conv2d_3, + conv2d_4, + conv2d_5, + conv2d_6, + conv2d_7, + conv2d_8, + conv2d_9, + depthwise_conv2d_0, + depthwise_conv2d_1, + depthwise_conv2d_10, + depthwise_conv2d_11, + depthwise_conv2d_12, + depthwise_conv2d_2, + depthwise_conv2d_3, + depthwise_conv2d_4, + depthwise_conv2d_5, + depthwise_conv2d_6, + depthwise_conv2d_7, + depthwise_conv2d_8, + depthwise_conv2d_9, + full_int_array_0, + full_int_array_2, + hardsigmoid_0, + hardsigmoid_1, + hardswish_0, + hardswish_1, + hardswish_10, + hardswish_11, + hardswish_12, + hardswish_13, + hardswish_14, + hardswish_15, + hardswish_16, + hardswish_17, + hardswish_18, + hardswish_19, + hardswish_2, + hardswish_20, + hardswish_21, + hardswish_22, + hardswish_23, + hardswish_24, + hardswish_25, + hardswish_26, + hardswish_3, + hardswish_4, + hardswish_5, + hardswish_6, + hardswish_7, + hardswish_8, + hardswish_9, + multiply_0, + multiply_1, + pool2d_1, + pool2d_2, + relu_0, + relu_1, + reshape_0, + reshape_1, + reshape_2, + reshape_3, + ) + + return pool2d_0 diff --git a/paddle_samples/PaddleX/PP-OCRv3_mobile_rec/subgraph_0/weight_meta.py b/paddle_samples/PaddleX/PP-OCRv3_mobile_rec/subgraph_0/weight_meta.py new file mode 100644 index 000000000..82e9029aa --- /dev/null +++ b/paddle_samples/PaddleX/PP-OCRv3_mobile_rec/subgraph_0/weight_meta.py @@ -0,0 +1,1505 @@ +class Program_weight_tensor_parameter_0: + name = "parameter_0" + shape = [512] + dtype = "float32" + min_val = float("-6.94319") + max_val = float("5.17603") + mean = float("-2.04463") + std = float("1.47288") + data = None + + +class Program_weight_tensor_parameter_1: + name = "parameter_1" + shape = [512] + dtype = "float32" + min_val = float("0.91668") + max_val = float("10.6529") + mean = float("6.39943") + std = float("2.46457") + data = None + + +class Program_weight_tensor_parameter_2: + name = "parameter_2" + shape = [512] + dtype = "float32" + min_val = float("0.0326672") + max_val = float("2.06565") + mean = float("0.10573") + std = float("0.113225") + data = None + + +class Program_weight_tensor_parameter_3: + name = "parameter_3" + shape = [512] + dtype = "float32" + min_val = float("-0.748073") + max_val = float("0.533052") + mean = float("-0.0542231") + std = float("0.0965589") + data = None + + +class Program_weight_tensor_parameter_4: + name = "parameter_4" + shape = [512, 512, 1, 1] + dtype = "float32" + min_val = float("-0.932133") + max_val = float("0.914642") + mean = float("-0.00485539") + std = float("0.0376103") + data = None + + +class Program_weight_tensor_parameter_5: + name = "parameter_5" + shape = [512] + dtype = "float32" + min_val = float("-0.128571") + max_val = float("0.0929558") + mean = float("-0.0164776") + std = float("0.0184942") + data = None + + +class Program_weight_tensor_parameter_6: + name = "parameter_6" + shape = [512, 128, 1, 1] + dtype = "float32" + min_val = float("-0.566549") + max_val = float("0.482768") + mean = float("-0.00409204") + std = float("0.0367954") + data = None + + +class Program_weight_tensor_parameter_7: + name = "parameter_7" + shape = [128] + dtype = "float32" + min_val = float("-0.0356446") + max_val = float("0.423091") + mean = float("0.0111736") + std = float("0.0435538") + data = None + + +class Program_weight_tensor_parameter_8: + name = "parameter_8" + shape = [128, 512, 1, 1] + dtype = "float32" + min_val = float("-1.33388") + max_val = float("0.743168") + mean = float("-0.00034766") + std = float("0.0342186") + data = None + + +class Program_weight_tensor_parameter_9: + name = "parameter_9" + shape = [512] + dtype = "float32" + min_val = float("-2.99667") + max_val = float("1.90818") + mean = float("0.0119501") + std = float("0.445246") + data = None + + +class Program_weight_tensor_parameter_10: + name = "parameter_10" + shape = [512] + dtype = "float32" + min_val = float("0.32574") + max_val = float("7.14605") + mean = float("1.3028") + std = float("0.811836") + data = None + + +class Program_weight_tensor_parameter_11: + name = "parameter_11" + shape = [512] + dtype = "float32" + min_val = float("2.32749e-05") + max_val = float("0.871712") + mean = float("0.0288379") + std = float("0.0664999") + data = None + + +class Program_weight_tensor_parameter_12: + name = "parameter_12" + shape = [512] + dtype = "float32" + min_val = float("-1.66049") + max_val = float("0.449001") + mean = float("-0.096636") + std = float("0.143014") + data = None + + +class Program_weight_tensor_parameter_13: + name = "parameter_13" + shape = [512, 1, 5, 5] + dtype = "float32" + min_val = float("-0.267002") + max_val = float("0.342505") + mean = float("0.0197413") + std = float("0.0487993") + data = None + + +class Program_weight_tensor_parameter_14: + name = "parameter_14" + shape = [512] + dtype = "float32" + min_val = float("-2.14961") + max_val = float("2.63401") + mean = float("-0.893266") + std = float("0.776839") + data = None + + +class Program_weight_tensor_parameter_15: + name = "parameter_15" + shape = [512] + dtype = "float32" + min_val = float("0.0272625") + max_val = float("2.68512") + mean = float("0.991836") + std = float("0.398711") + data = None + + +class Program_weight_tensor_parameter_16: + name = "parameter_16" + shape = [512] + dtype = "float32" + min_val = float("0.0101882") + max_val = float("0.347264") + mean = float("0.049557") + std = float("0.0252963") + data = None + + +class Program_weight_tensor_parameter_17: + name = "parameter_17" + shape = [512] + dtype = "float32" + min_val = float("-0.976618") + max_val = float("0.845487") + mean = float("-0.187257") + std = float("0.244652") + data = None + + +class Program_weight_tensor_parameter_18: + name = "parameter_18" + shape = [512, 256, 1, 1] + dtype = "float32" + min_val = float("-0.495357") + max_val = float("0.857577") + mean = float("-0.00338318") + std = float("0.0472778") + data = None + + +class Program_weight_tensor_parameter_19: + name = "parameter_19" + shape = [256] + dtype = "float32" + min_val = float("-0.300612") + max_val = float("0.042457") + mean = float("-0.0277206") + std = float("0.0292676") + data = None + + +class Program_weight_tensor_parameter_20: + name = "parameter_20" + shape = [256, 64, 1, 1] + dtype = "float32" + min_val = float("-0.72997") + max_val = float("0.492072") + mean = float("-0.00464223") + std = float("0.0517841") + data = None + + +class Program_weight_tensor_parameter_21: + name = "parameter_21" + shape = [64] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_22: + name = "parameter_22" + shape = [64, 256, 1, 1] + dtype = "float32" + min_val = float("-1.28963") + max_val = float("0.677936") + mean = float("-5.13005e-05") + std = float("0.0502358") + data = None + + +class Program_weight_tensor_parameter_23: + name = "parameter_23" + shape = [256] + dtype = "float32" + min_val = float("-2.31421") + max_val = float("2.18386") + mean = float("0.625297") + std = float("0.572412") + data = None + + +class Program_weight_tensor_parameter_24: + name = "parameter_24" + shape = [256] + dtype = "float32" + min_val = float("0.205862") + max_val = float("9.09855") + mean = float("1.39652") + std = float("0.855538") + data = None + + +class Program_weight_tensor_parameter_25: + name = "parameter_25" + shape = [256] + dtype = "float32" + min_val = float("4.48508e-05") + max_val = float("1.95411") + mean = float("0.0784968") + std = float("0.173011") + data = None + + +class Program_weight_tensor_parameter_26: + name = "parameter_26" + shape = [256] + dtype = "float32" + min_val = float("-1.75732") + max_val = float("1.0666") + mean = float("-0.0631418") + std = float("0.221398") + data = None + + +class Program_weight_tensor_parameter_27: + name = "parameter_27" + shape = [256, 1, 5, 5] + dtype = "float32" + min_val = float("-0.244326") + max_val = float("0.269266") + mean = float("0.0218083") + std = float("0.0587576") + data = None + + +class Program_weight_tensor_parameter_28: + name = "parameter_28" + shape = [256] + dtype = "float32" + min_val = float("-2.39331") + max_val = float("1.9342") + mean = float("-0.704796") + std = float("0.648597") + data = None + + +class Program_weight_tensor_parameter_29: + name = "parameter_29" + shape = [256] + dtype = "float32" + min_val = float("0.0256136") + max_val = float("2.33034") + mean = float("0.998246") + std = float("0.339898") + data = None + + +class Program_weight_tensor_parameter_30: + name = "parameter_30" + shape = [256] + dtype = "float32" + min_val = float("0.156744") + max_val = float("2.04102") + mean = float("0.678008") + std = float("0.266174") + data = None + + +class Program_weight_tensor_parameter_31: + name = "parameter_31" + shape = [256] + dtype = "float32" + min_val = float("-3.49483") + max_val = float("2.47609") + mean = float("-0.504286") + std = float("0.980487") + data = None + + +class Program_weight_tensor_parameter_32: + name = "parameter_32" + shape = [256, 256, 1, 1] + dtype = "float32" + min_val = float("-0.607927") + max_val = float("0.614272") + mean = float("-0.00409529") + std = float("0.0521949") + data = None + + +class Program_weight_tensor_parameter_33: + name = "parameter_33" + shape = [256] + dtype = "float32" + min_val = float("-1.30504") + max_val = float("5.34659") + mean = float("1.05266") + std = float("0.933283") + data = None + + +class Program_weight_tensor_parameter_34: + name = "parameter_34" + shape = [256] + dtype = "float32" + min_val = float("0.445302") + max_val = float("2.71212") + mean = float("0.815956") + std = float("0.326587") + data = None + + +class Program_weight_tensor_parameter_35: + name = "parameter_35" + shape = [256] + dtype = "float32" + min_val = float("6.85753e-05") + max_val = float("2.58092") + mean = float("0.0676935") + std = float("0.201834") + data = None + + +class Program_weight_tensor_parameter_36: + name = "parameter_36" + shape = [256] + dtype = "float32" + min_val = float("-0.936704") + max_val = float("0.695813") + mean = float("-0.0458424") + std = float("0.159304") + data = None + + +class Program_weight_tensor_parameter_37: + name = "parameter_37" + shape = [256, 1, 5, 5] + dtype = "float32" + min_val = float("-0.407595") + max_val = float("0.560091") + mean = float("0.00653966") + std = float("0.0681099") + data = None + + +class Program_weight_tensor_parameter_38: + name = "parameter_38" + shape = [256] + dtype = "float32" + min_val = float("-1.95136") + max_val = float("2.22404") + mean = float("-0.471577") + std = float("0.670012") + data = None + + +class Program_weight_tensor_parameter_39: + name = "parameter_39" + shape = [256] + dtype = "float32" + min_val = float("0.0352452") + max_val = float("2.53813") + mean = float("0.972082") + std = float("0.450492") + data = None + + +class Program_weight_tensor_parameter_40: + name = "parameter_40" + shape = [256] + dtype = "float32" + min_val = float("0.254569") + max_val = float("1.99994") + mean = float("0.663126") + std = float("0.235663") + data = None + + +class Program_weight_tensor_parameter_41: + name = "parameter_41" + shape = [256] + dtype = "float32" + min_val = float("-3.27074") + max_val = float("2.25992") + mean = float("-0.411536") + std = float("1.01829") + data = None + + +class Program_weight_tensor_parameter_42: + name = "parameter_42" + shape = [256, 256, 1, 1] + dtype = "float32" + min_val = float("-0.66418") + max_val = float("0.452469") + mean = float("-0.00342402") + std = float("0.049564") + data = None + + +class Program_weight_tensor_parameter_43: + name = "parameter_43" + shape = [256] + dtype = "float32" + min_val = float("-1.56579") + max_val = float("8.77965") + mean = float("1.24159") + std = float("1.03282") + data = None + + +class Program_weight_tensor_parameter_44: + name = "parameter_44" + shape = [256] + dtype = "float32" + min_val = float("0.402129") + max_val = float("2.38821") + mean = float("0.855833") + std = float("0.334832") + data = None + + +class Program_weight_tensor_parameter_45: + name = "parameter_45" + shape = [256] + dtype = "float32" + min_val = float("5.1793e-05") + max_val = float("2.45013") + mean = float("0.0868208") + std = float("0.207413") + data = None + + +class Program_weight_tensor_parameter_46: + name = "parameter_46" + shape = [256] + dtype = "float32" + min_val = float("-1.52627") + max_val = float("0.861808") + mean = float("-0.0348122") + std = float("0.218834") + data = None + + +class Program_weight_tensor_parameter_47: + name = "parameter_47" + shape = [256, 1, 5, 5] + dtype = "float32" + min_val = float("-0.522663") + max_val = float("0.466708") + mean = float("0.00866161") + std = float("0.0698837") + data = None + + +class Program_weight_tensor_parameter_48: + name = "parameter_48" + shape = [256] + dtype = "float32" + min_val = float("-1.95166") + max_val = float("2.38586") + mean = float("-0.432737") + std = float("0.779229") + data = None + + +class Program_weight_tensor_parameter_49: + name = "parameter_49" + shape = [256] + dtype = "float32" + min_val = float("0.0368856") + max_val = float("3.1372") + mean = float("1.08352") + std = float("0.503") + data = None + + +class Program_weight_tensor_parameter_50: + name = "parameter_50" + shape = [256] + dtype = "float32" + min_val = float("0.290007") + max_val = float("1.87551") + mean = float("0.601254") + std = float("0.222449") + data = None + + +class Program_weight_tensor_parameter_51: + name = "parameter_51" + shape = [256] + dtype = "float32" + min_val = float("-4.26529") + max_val = float("2.70649") + mean = float("-0.648219") + std = float("1.4046") + data = None + + +class Program_weight_tensor_parameter_52: + name = "parameter_52" + shape = [256, 256, 1, 1] + dtype = "float32" + min_val = float("-0.749264") + max_val = float("0.461327") + mean = float("-0.00328299") + std = float("0.0491899") + data = None + + +class Program_weight_tensor_parameter_53: + name = "parameter_53" + shape = [256] + dtype = "float32" + min_val = float("-1.65685") + max_val = float("4.78233") + mean = float("1.33167") + std = float("1.06158") + data = None + + +class Program_weight_tensor_parameter_54: + name = "parameter_54" + shape = [256] + dtype = "float32" + min_val = float("0.44531") + max_val = float("2.89709") + mean = float("0.825132") + std = float("0.347385") + data = None + + +class Program_weight_tensor_parameter_55: + name = "parameter_55" + shape = [256] + dtype = "float32" + min_val = float("6.01146e-05") + max_val = float("2.48392") + mean = float("0.0894386") + std = float("0.236991") + data = None + + +class Program_weight_tensor_parameter_56: + name = "parameter_56" + shape = [256] + dtype = "float32" + min_val = float("-1.13379") + max_val = float("1.63063") + mean = float("0.011671") + std = float("0.240934") + data = None + + +class Program_weight_tensor_parameter_57: + name = "parameter_57" + shape = [256, 1, 5, 5] + dtype = "float32" + min_val = float("-0.51334") + max_val = float("0.541044") + mean = float("0.00986753") + std = float("0.0731283") + data = None + + +class Program_weight_tensor_parameter_58: + name = "parameter_58" + shape = [256] + dtype = "float32" + min_val = float("-1.70988") + max_val = float("2.64219") + mean = float("-0.358473") + std = float("0.804285") + data = None + + +class Program_weight_tensor_parameter_59: + name = "parameter_59" + shape = [256] + dtype = "float32" + min_val = float("0.0403382") + max_val = float("2.95073") + mean = float("0.968669") + std = float("0.509248") + data = None + + +class Program_weight_tensor_parameter_60: + name = "parameter_60" + shape = [256] + dtype = "float32" + min_val = float("0.252804") + max_val = float("1.30246") + mean = float("0.505577") + std = float("0.172513") + data = None + + +class Program_weight_tensor_parameter_61: + name = "parameter_61" + shape = [256] + dtype = "float32" + min_val = float("-3.07056") + max_val = float("2.95945") + mean = float("-0.456218") + std = float("0.997669") + data = None + + +class Program_weight_tensor_parameter_62: + name = "parameter_62" + shape = [256, 256, 1, 1] + dtype = "float32" + min_val = float("-0.39501") + max_val = float("0.537323") + mean = float("-0.00325754") + std = float("0.0493134") + data = None + + +class Program_weight_tensor_parameter_63: + name = "parameter_63" + shape = [256] + dtype = "float32" + min_val = float("-3.66503") + max_val = float("4.69864") + mean = float("0.879424") + std = float("1.16992") + data = None + + +class Program_weight_tensor_parameter_64: + name = "parameter_64" + shape = [256] + dtype = "float32" + min_val = float("0.394586") + max_val = float("3.18929") + mean = float("0.874726") + std = float("0.396993") + data = None + + +class Program_weight_tensor_parameter_65: + name = "parameter_65" + shape = [256] + dtype = "float32" + min_val = float("4.37723e-05") + max_val = float("0.916947") + mean = float("0.0756878") + std = float("0.149113") + data = None + + +class Program_weight_tensor_parameter_66: + name = "parameter_66" + shape = [256] + dtype = "float32" + min_val = float("-1.62452") + max_val = float("1.18981") + mean = float("-0.0154176") + std = float("0.243659") + data = None + + +class Program_weight_tensor_parameter_67: + name = "parameter_67" + shape = [256, 1, 5, 5] + dtype = "float32" + min_val = float("-0.476315") + max_val = float("0.503636") + mean = float("0.0128233") + std = float("0.0706266") + data = None + + +class Program_weight_tensor_parameter_68: + name = "parameter_68" + shape = [256] + dtype = "float32" + min_val = float("-1.82696") + max_val = float("2.42262") + mean = float("-0.305415") + std = float("0.809663") + data = None + + +class Program_weight_tensor_parameter_69: + name = "parameter_69" + shape = [256] + dtype = "float32" + min_val = float("0.0323944") + max_val = float("3.31951") + mean = float("0.977617") + std = float("0.588473") + data = None + + +class Program_weight_tensor_parameter_70: + name = "parameter_70" + shape = [256] + dtype = "float32" + min_val = float("0.117855") + max_val = float("1.06645") + mean = float("0.358032") + std = float("0.136061") + data = None + + +class Program_weight_tensor_parameter_71: + name = "parameter_71" + shape = [256] + dtype = "float32" + min_val = float("-3.4732") + max_val = float("3.05095") + mean = float("-0.176997") + std = float("0.897862") + data = None + + +class Program_weight_tensor_parameter_72: + name = "parameter_72" + shape = [256, 256, 1, 1] + dtype = "float32" + min_val = float("-0.549812") + max_val = float("0.637032") + mean = float("-0.00303072") + std = float("0.0512142") + data = None + + +class Program_weight_tensor_parameter_73: + name = "parameter_73" + shape = [256] + dtype = "float32" + min_val = float("-3.67292") + max_val = float("3.23776") + mean = float("0.328735") + std = float("1.28391") + data = None + + +class Program_weight_tensor_parameter_74: + name = "parameter_74" + shape = [256] + dtype = "float32" + min_val = float("0.321789") + max_val = float("5.7278") + mean = float("0.958016") + std = float("0.567403") + data = None + + +class Program_weight_tensor_parameter_75: + name = "parameter_75" + shape = [256] + dtype = "float32" + min_val = float("2.6138e-05") + max_val = float("4.2714") + mean = float("0.0980561") + std = float("0.320494") + data = None + + +class Program_weight_tensor_parameter_76: + name = "parameter_76" + shape = [256] + dtype = "float32" + min_val = float("-1.86519") + max_val = float("0.953119") + mean = float("-0.0063947") + std = float("0.252769") + data = None + + +class Program_weight_tensor_parameter_77: + name = "parameter_77" + shape = [256, 1, 5, 5] + dtype = "float32" + min_val = float("-0.655892") + max_val = float("0.81842") + mean = float("0.00456383") + std = float("0.0843338") + data = None + + +class Program_weight_tensor_parameter_78: + name = "parameter_78" + shape = [256] + dtype = "float32" + min_val = float("-2.63476") + max_val = float("3.0744") + mean = float("0.166387") + std = float("0.816252") + data = None + + +class Program_weight_tensor_parameter_79: + name = "parameter_79" + shape = [256] + dtype = "float32" + min_val = float("0.0222258") + max_val = float("3.12478") + mean = float("0.630971") + std = float("0.566956") + data = None + + +class Program_weight_tensor_parameter_80: + name = "parameter_80" + shape = [256] + dtype = "float32" + min_val = float("0.0690042") + max_val = float("1.81149") + mean = float("0.297713") + std = float("0.224961") + data = None + + +class Program_weight_tensor_parameter_81: + name = "parameter_81" + shape = [256] + dtype = "float32" + min_val = float("-4.0926") + max_val = float("3.27831") + mean = float("-0.0214641") + std = float("1.01075") + data = None + + +class Program_weight_tensor_parameter_82: + name = "parameter_82" + shape = [256, 128, 1, 1] + dtype = "float32" + min_val = float("-0.459044") + max_val = float("0.538959") + mean = float("-0.00102192") + std = float("0.0604132") + data = None + + +class Program_weight_tensor_parameter_83: + name = "parameter_83" + shape = [128] + dtype = "float32" + min_val = float("-1.47659") + max_val = float("3.23497") + mean = float("0.87685") + std = float("1.39232") + data = None + + +class Program_weight_tensor_parameter_84: + name = "parameter_84" + shape = [128] + dtype = "float32" + min_val = float("0.430426") + max_val = float("1.84802") + mean = float("0.856598") + std = float("0.343875") + data = None + + +class Program_weight_tensor_parameter_85: + name = "parameter_85" + shape = [128] + dtype = "float32" + min_val = float("2.57932e-05") + max_val = float("3.08077") + mean = float("0.0972277") + std = float("0.297298") + data = None + + +class Program_weight_tensor_parameter_86: + name = "parameter_86" + shape = [128] + dtype = "float32" + min_val = float("-0.760124") + max_val = float("2.64278") + mean = float("0.0807185") + std = float("0.362666") + data = None + + +class Program_weight_tensor_parameter_87: + name = "parameter_87" + shape = [128, 1, 3, 3] + dtype = "float32" + min_val = float("-0.477131") + max_val = float("0.465011") + mean = float("0.0181202") + std = float("0.116914") + data = None + + +class Program_weight_tensor_parameter_88: + name = "parameter_88" + shape = [128] + dtype = "float32" + min_val = float("-1.02909") + max_val = float("3.48264") + mean = float("0.351608") + std = float("0.817034") + data = None + + +class Program_weight_tensor_parameter_89: + name = "parameter_89" + shape = [128] + dtype = "float32" + min_val = float("0.0270081") + max_val = float("3.08189") + mean = float("0.848281") + std = float("0.661586") + data = None + + +class Program_weight_tensor_parameter_90: + name = "parameter_90" + shape = [128] + dtype = "float32" + min_val = float("0.165669") + max_val = float("4.11912") + mean = float("0.819655") + std = float("0.533822") + data = None + + +class Program_weight_tensor_parameter_91: + name = "parameter_91" + shape = [128] + dtype = "float32" + min_val = float("-3.24276") + max_val = float("2.79273") + mean = float("-0.329515") + std = float("0.976574") + data = None + + +class Program_weight_tensor_parameter_92: + name = "parameter_92" + shape = [128, 128, 1, 1] + dtype = "float32" + min_val = float("-0.624493") + max_val = float("0.593812") + mean = float("-0.00521636") + std = float("0.0722369") + data = None + + +class Program_weight_tensor_parameter_93: + name = "parameter_93" + shape = [128] + dtype = "float32" + min_val = float("-1.87529") + max_val = float("3.28116") + mean = float("0.409486") + std = float("1.13061") + data = None + + +class Program_weight_tensor_parameter_94: + name = "parameter_94" + shape = [128] + dtype = "float32" + min_val = float("0.486559") + max_val = float("4.41081") + mean = float("1.40591") + std = float("0.673133") + data = None + + +class Program_weight_tensor_parameter_95: + name = "parameter_95" + shape = [128] + dtype = "float32" + min_val = float("3.97898e-05") + max_val = float("3.25727") + mean = float("0.180373") + std = float("0.446829") + data = None + + +class Program_weight_tensor_parameter_96: + name = "parameter_96" + shape = [128] + dtype = "float32" + min_val = float("-1.78793") + max_val = float("1.18222") + mean = float("-0.00379003") + std = float("0.389631") + data = None + + +class Program_weight_tensor_parameter_97: + name = "parameter_97" + shape = [128, 1, 3, 3] + dtype = "float32" + min_val = float("-0.835371") + max_val = float("0.724183") + mean = float("0.00010151") + std = float("0.152803") + data = None + + +class Program_weight_tensor_parameter_98: + name = "parameter_98" + shape = [128] + dtype = "float32" + min_val = float("-1.4216") + max_val = float("4.45798") + mean = float("0.928233") + std = float("1.15296") + data = None + + +class Program_weight_tensor_parameter_99: + name = "parameter_99" + shape = [128] + dtype = "float32" + min_val = float("0.036503") + max_val = float("4.59785") + mean = float("0.978381") + std = float("0.897321") + data = None + + +class Program_weight_tensor_parameter_100: + name = "parameter_100" + shape = [128] + dtype = "float32" + min_val = float("0.233016") + max_val = float("13.8349") + mean = float("1.47608") + std = float("1.77682") + data = None + + +class Program_weight_tensor_parameter_101: + name = "parameter_101" + shape = [128] + dtype = "float32" + min_val = float("-4.91118") + max_val = float("5.9741") + mean = float("-0.14533") + std = float("1.8069") + data = None + + +class Program_weight_tensor_parameter_102: + name = "parameter_102" + shape = [128, 64, 1, 1] + dtype = "float32" + min_val = float("-0.534151") + max_val = float("0.582044") + mean = float("-0.000887954") + std = float("0.0838576") + data = None + + +class Program_weight_tensor_parameter_103: + name = "parameter_103" + shape = [64] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_104: + name = "parameter_104" + shape = [64] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_105: + name = "parameter_105" + shape = [64] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_106: + name = "parameter_106" + shape = [64] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_107: + name = "parameter_107" + shape = [64, 1, 3, 3] + dtype = "float32" + min_val = float("-0.399308") + max_val = float("0.339626") + mean = float("0.0277174") + std = float("0.119061") + data = None + + +class Program_weight_tensor_parameter_108: + name = "parameter_108" + shape = [64] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_109: + name = "parameter_109" + shape = [64] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_110: + name = "parameter_110" + shape = [64] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_111: + name = "parameter_111" + shape = [64] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_112: + name = "parameter_112" + shape = [64, 64, 1, 1] + dtype = "float32" + min_val = float("-0.663003") + max_val = float("0.721306") + mean = float("-0.00230788") + std = float("0.097668") + data = None + + +class Program_weight_tensor_parameter_113: + name = "parameter_113" + shape = [64] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_114: + name = "parameter_114" + shape = [64] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_115: + name = "parameter_115" + shape = [64] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_116: + name = "parameter_116" + shape = [64] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_117: + name = "parameter_117" + shape = [64, 1, 3, 3] + dtype = "float32" + min_val = float("-0.434568") + max_val = float("0.481051") + mean = float("0.0138099") + std = float("0.137683") + data = None + + +class Program_weight_tensor_parameter_118: + name = "parameter_118" + shape = [64] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_119: + name = "parameter_119" + shape = [64] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_120: + name = "parameter_120" + shape = [64] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_121: + name = "parameter_121" + shape = [64] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_122: + name = "parameter_122" + shape = [64, 32, 1, 1] + dtype = "float32" + min_val = float("-0.609567") + max_val = float("0.701145") + mean = float("-0.00106658") + std = float("0.108867") + data = None + + +class Program_weight_tensor_parameter_123: + name = "parameter_123" + shape = [32] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_124: + name = "parameter_124" + shape = [32] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_125: + name = "parameter_125" + shape = [32] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_126: + name = "parameter_126" + shape = [32] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_127: + name = "parameter_127" + shape = [32, 1, 3, 3] + dtype = "float32" + min_val = float("-0.641973") + max_val = float("0.792394") + mean = float("0.00418543") + std = float("0.189472") + data = None + + +class Program_weight_tensor_parameter_128: + name = "parameter_128" + shape = [32] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_129: + name = "parameter_129" + shape = [32] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_130: + name = "parameter_130" + shape = [32] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_131: + name = "parameter_131" + shape = [32] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_132: + name = "parameter_132" + shape = [32, 16, 1, 1] + dtype = "float32" + min_val = float("-0.682061") + max_val = float("0.756466") + mean = float("0.00285836") + std = float("0.160627") + data = None + + +class Program_weight_tensor_parameter_133: + name = "parameter_133" + shape = [16] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_134: + name = "parameter_134" + shape = [16] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_135: + name = "parameter_135" + shape = [16] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_136: + name = "parameter_136" + shape = [16] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_137: + name = "parameter_137" + shape = [16, 1, 3, 3] + dtype = "float32" + min_val = float("-1.09535") + max_val = float("0.837732") + mean = float("-0.020074") + std = float("0.311435") + data = None + + +class Program_weight_tensor_parameter_138: + name = "parameter_138" + shape = [16] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_139: + name = "parameter_139" + shape = [16] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_140: + name = "parameter_140" + shape = [16] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_141: + name = "parameter_141" + shape = [16] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_142: + name = "parameter_142" + shape = [16, 3, 3, 3] + dtype = "float32" + min_val = float("-0.70616") + max_val = float("0.829714") + mean = float("0.00703774") + std = float("0.198013") + data = None diff --git a/paddle_samples/PaddleX/PP-OCRv3_mobile_rec/subgraph_1/graph_hash.txt b/paddle_samples/PaddleX/PP-OCRv3_mobile_rec/subgraph_1/graph_hash.txt new file mode 100644 index 000000000..5799af452 --- /dev/null +++ b/paddle_samples/PaddleX/PP-OCRv3_mobile_rec/subgraph_1/graph_hash.txt @@ -0,0 +1 @@ +64e2e08846acb3c913ee71113176ef47ee987044b7c7f694e7569e92bde81ce7 \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-OCRv3_mobile_rec/subgraph_1/graph_net.json b/paddle_samples/PaddleX/PP-OCRv3_mobile_rec/subgraph_1/graph_net.json new file mode 100644 index 000000000..6e8c4238c --- /dev/null +++ b/paddle_samples/PaddleX/PP-OCRv3_mobile_rec/subgraph_1/graph_net.json @@ -0,0 +1,6 @@ +{ + "framework": "paddle", + "model_name": "PP-OCRv3_mobile_rec", + "num_devices_required": 1, + "num_nodes_required": 1 +} \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-OCRv3_mobile_rec/subgraph_1/input_meta.py b/paddle_samples/PaddleX/PP-OCRv3_mobile_rec/subgraph_1/input_meta.py new file mode 100644 index 000000000..c2a232658 --- /dev/null +++ b/paddle_samples/PaddleX/PP-OCRv3_mobile_rec/subgraph_1/input_meta.py @@ -0,0 +1,16 @@ +class Program_weight_tensor_data_0: + name = "data_0" + shape = [8, 512, 1, 40] + dtype = "float32" + min_val = float("-0.374952") + max_val = float("21.0197") + mean = float("1.42975") + std = float("2.85595") + data = None + + +class Program_weight_tensor_data_1: + name = "data_1" + shape = [8] + dtype = "float64" + data = [0.909375, 0.990625, 0.646875, 0.81875, 0.55, 0.3375, 0.565625, 0.29375] diff --git a/paddle_samples/PaddleX/PP-OCRv3_mobile_rec/subgraph_1/model.py b/paddle_samples/PaddleX/PP-OCRv3_mobile_rec/subgraph_1/model.py new file mode 100644 index 000000000..f6fb1ec26 --- /dev/null +++ b/paddle_samples/PaddleX/PP-OCRv3_mobile_rec/subgraph_1/model.py @@ -0,0 +1,711 @@ +import paddle + + +class GraphModule(paddle.nn.Layer): + def __init__(self): + super().__init__() + + def forward( + self, + parameter_0, + parameter_1, + parameter_2, + parameter_3, + parameter_4, + parameter_5, + parameter_6, + parameter_7, + parameter_8, + parameter_9, + parameter_10, + data_0, + data_1, + ): + # pd_op.full_int_array: (2xi64) <- () + full_int_array_0 = [1, 1] + + # pd_op.pool2d: (8x512x1x40xf32) <- (8x512x1x40xf32, 2xi64) + pool2d_0 = paddle._C_ops.pool2d( + data_0, + full_int_array_0, + [1, 1], + [0, 0], + False, + True, + "NCHW", + "max", + False, + False, + "EXPLICIT", + ) + del data_0 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_1 = [2] + + # pd_op.squeeze: (8x512x40xf32) <- (8x512x1x40xf32, 1xi64) + squeeze_0 = paddle._C_ops.squeeze(pool2d_0, full_int_array_1) + + # pd_op.transpose: (8x40x512xf32) <- (8x512x40xf32) + transpose_0 = paddle._C_ops.transpose(squeeze_0, [0, 2, 1]) + del squeeze_0 + + # pd_op.full: (2x8x512xf32) <- () + full_0 = paddle._C_ops.full( + [2, 8, 512], + float("0"), + paddle.float32, + paddle.framework._current_expected_place(), + ) + + # pd_op.assign: (2x8x512xf32) <- (2x8x512xf32) + assign_0 = full_0 + + # pd_op.transpose: (40x8x512xf32) <- (8x40x512xf32) + transpose_1 = paddle._C_ops.transpose(transpose_0, [1, 0, 2]) + del transpose_0 + + # builtin.combine: ([2x8x512xf32, 2x8x512xf32]) <- (2x8x512xf32, 2x8x512xf32) + combine_0 = [full_0, full_0] + + # builtin.combine: ([2048x512xf32, 2048x512xf32, 2048x512xf32, 2048x512xf32, 2048xf32, 2048xf32, 2048xf32, 2048xf32]) <- (2048x512xf32, 2048x512xf32, 2048x512xf32, 2048x512xf32, 2048xf32, 2048xf32, 2048xf32, 2048xf32) + combine_1 = [ + parameter_9, + parameter_8, + parameter_7, + parameter_6, + parameter_5, + parameter_4, + parameter_3, + parameter_2, + ] + del ( + parameter_2, + parameter_3, + parameter_4, + parameter_5, + parameter_6, + parameter_7, + parameter_8, + parameter_9, + ) + + # pd_op.rnn: (40x8x512xf32, 0xf32, [2x8x512xf32, 2x8x512xf32], 0xf32) <- (40x8x512xf32, [2x8x512xf32, 2x8x512xf32], [2048x512xf32, 2048x512xf32, 2048x512xf32, 2048x512xf32, 2048xf32, 2048xf32, 2048xf32, 2048xf32], None, xui8) + rnn_1, rnn_0, rnn_2, rnn_3 = (lambda x, f: f(x))( + paddle._C_ops.rnn( + transpose_1, + combine_0, + combine_1, + None, + parameter_10, + float("0.1"), + False, + 512, + 512, + 2, + "LSTM", + 0, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None), + ) + (None,) + del combine_0, combine_1, parameter_10 + + # builtin.split: (2x8x512xf32, 2x8x512xf32) <- ([2x8x512xf32, 2x8x512xf32]) + ( + split_0, + split_1, + ) = rnn_2 + del rnn_2 + + # pd_op.transpose: (8x40x512xf32) <- (40x8x512xf32) + transpose_2 = paddle._C_ops.transpose(rnn_1, [1, 0, 2]) + + # pd_op.shape64: (3xi64) <- (8x40x512xf32) + shape64_0 = paddle._C_ops.shape64(transpose_2) + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_2 = [1] + + # pd_op.slice: (xi64) <- (3xi64, 1xi64, 1xi64) + slice_0 = paddle._C_ops.slice( + shape64_0, [0], full_int_array_2, full_int_array_1, [1], [0] + ) + del shape64_0 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_3 = [0] + + # pd_op.slice: (xf64) <- (8xf64, 1xi64, 1xi64) + slice_1 = paddle._C_ops.slice( + data_1, [0], full_int_array_3, full_int_array_2, [1], [0] + ) + del full_int_array_3 + + # pd_op.cast: (xf64) <- (xi64) + cast_0 = paddle._C_ops.cast(slice_0, paddle.float64) + + # pd_op.multiply: (xf64) <- (xf64, xf64) + multiply_0 = paddle._C_ops.multiply(slice_1, cast_0) + del slice_1 + + # pd_op.ceil: (xf64) <- (xf64) + ceil_0 = paddle._C_ops.ceil(multiply_0) + del multiply_0 + + # pd_op.cast: (xi64) <- (xf64) + cast_1 = paddle._C_ops.cast(ceil_0, paddle.int64) + del ceil_0 + + # pd_op.minimum: (xi64) <- (xi64, xi64) + minimum_0 = paddle._C_ops.minimum(slice_0, cast_1) + del cast_1 + + # pd_op.full: (1xf32) <- () + full_1 = paddle._C_ops.full( + [1], float("1"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.scale: (xi64) <- (xi64, 1xf32) + scale_0 = paddle._C_ops.scale(minimum_0, full_1, float("-1"), True) + del minimum_0 + + # pd_op.scale: (xi64) <- (xi64, 1xf32) + scale_1 = paddle._C_ops.scale(scale_0, full_1, float("1"), True) + + # pd_op.full: (1xi64) <- () + full_2 = paddle._C_ops.full( + [1], float("0"), paddle.int64, paddle.core.CPUPlace() + ) + + # pd_op.full: (1xi64) <- () + full_3 = paddle._C_ops.full( + [1], float("1"), paddle.int64, paddle.core.CPUPlace() + ) + + # pd_op.full_int_array: (0xi64) <- () + full_int_array_4 = [] + + # pd_op.reshape: (xi64) <- (1xi64, 0xi64) + reshape_0 = paddle._C_ops.reshape(full_2, full_int_array_4) + del full_2 + + # pd_op.reshape: (xi64) <- (1xi64, 0xi64) + reshape_1 = paddle._C_ops.reshape(full_3, full_int_array_4) + del full_3 + + # builtin.combine: ([xi64, xi64]) <- (xi64, xi64) + combine_2 = [reshape_0, scale_0] + del reshape_0, scale_0 + + # pd_op.stack: (2xi64) <- ([xi64, xi64]) + stack_0 = paddle._C_ops.stack(combine_2, 0) + del combine_2 + + # builtin.combine: ([xi64, xi64]) <- (xi64, xi64) + combine_3 = [reshape_1, scale_1] + del scale_1 + + # pd_op.stack: (2xi64) <- ([xi64, xi64]) + stack_1 = paddle._C_ops.stack(combine_3, 0) + del combine_3 + + # pd_op.slice: (512xf32) <- (8x40x512xf32, 2xi64, 2xi64) + slice_2 = paddle._C_ops.slice( + transpose_2, [0, 1], stack_0, stack_1, [1, -1], [0, 1] + ) + + # pd_op.slice: (xf64) <- (8xf64, 1xi64, 1xi64) + slice_3 = paddle._C_ops.slice( + data_1, [0], full_int_array_2, full_int_array_1, [1], [0] + ) + del full_int_array_2 + + # pd_op.multiply: (xf64) <- (xf64, xf64) + multiply_1 = paddle._C_ops.multiply(slice_3, cast_0) + del slice_3 + + # pd_op.ceil: (xf64) <- (xf64) + ceil_1 = paddle._C_ops.ceil(multiply_1) + del multiply_1 + + # pd_op.cast: (xi64) <- (xf64) + cast_2 = paddle._C_ops.cast(ceil_1, paddle.int64) + del ceil_1 + + # pd_op.minimum: (xi64) <- (xi64, xi64) + minimum_1 = paddle._C_ops.minimum(slice_0, cast_2) + del cast_2 + + # pd_op.scale: (xi64) <- (xi64, 1xf32) + scale_2 = paddle._C_ops.scale(minimum_1, full_1, float("-1"), True) + del minimum_1 + + # pd_op.scale: (xi64) <- (xi64, 1xf32) + scale_3 = paddle._C_ops.scale(scale_2, full_1, float("1"), True) + + # pd_op.full: (1xi64) <- () + full_4 = paddle._C_ops.full( + [1], float("2"), paddle.int64, paddle.core.CPUPlace() + ) + + # pd_op.reshape: (xi64) <- (1xi64, 0xi64) + reshape_2 = paddle._C_ops.reshape(full_4, full_int_array_4) + del full_4 + + # builtin.combine: ([xi64, xi64]) <- (xi64, xi64) + combine_4 = [reshape_1, scale_2] + del reshape_1, scale_2 + + # pd_op.stack: (2xi64) <- ([xi64, xi64]) + stack_2 = paddle._C_ops.stack(combine_4, 0) + del combine_4 + + # builtin.combine: ([xi64, xi64]) <- (xi64, xi64) + combine_5 = [reshape_2, scale_3] + del scale_3 + + # pd_op.stack: (2xi64) <- ([xi64, xi64]) + stack_3 = paddle._C_ops.stack(combine_5, 0) + del combine_5 + + # pd_op.slice: (512xf32) <- (8x40x512xf32, 2xi64, 2xi64) + slice_4 = paddle._C_ops.slice( + transpose_2, [0, 1], stack_2, stack_3, [1, -1], [0, 1] + ) + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_5 = [3] + + # pd_op.slice: (xf64) <- (8xf64, 1xi64, 1xi64) + slice_5 = paddle._C_ops.slice( + data_1, [0], full_int_array_1, full_int_array_5, [1], [0] + ) + + # pd_op.multiply: (xf64) <- (xf64, xf64) + multiply_2 = paddle._C_ops.multiply(slice_5, cast_0) + del slice_5 + + # pd_op.ceil: (xf64) <- (xf64) + ceil_2 = paddle._C_ops.ceil(multiply_2) + del multiply_2 + + # pd_op.cast: (xi64) <- (xf64) + cast_3 = paddle._C_ops.cast(ceil_2, paddle.int64) + del ceil_2 + + # pd_op.minimum: (xi64) <- (xi64, xi64) + minimum_2 = paddle._C_ops.minimum(slice_0, cast_3) + del cast_3 + + # pd_op.scale: (xi64) <- (xi64, 1xf32) + scale_4 = paddle._C_ops.scale(minimum_2, full_1, float("-1"), True) + del minimum_2 + + # pd_op.scale: (xi64) <- (xi64, 1xf32) + scale_5 = paddle._C_ops.scale(scale_4, full_1, float("1"), True) + + # pd_op.full: (1xi64) <- () + full_5 = paddle._C_ops.full( + [1], float("3"), paddle.int64, paddle.core.CPUPlace() + ) + + # pd_op.reshape: (xi64) <- (1xi64, 0xi64) + reshape_3 = paddle._C_ops.reshape(full_5, full_int_array_4) + del full_5 + + # builtin.combine: ([xi64, xi64]) <- (xi64, xi64) + combine_6 = [reshape_2, scale_4] + del reshape_2, scale_4 + + # pd_op.stack: (2xi64) <- ([xi64, xi64]) + stack_4 = paddle._C_ops.stack(combine_6, 0) + del combine_6 + + # builtin.combine: ([xi64, xi64]) <- (xi64, xi64) + combine_7 = [reshape_3, scale_5] + del scale_5 + + # pd_op.stack: (2xi64) <- ([xi64, xi64]) + stack_5 = paddle._C_ops.stack(combine_7, 0) + del combine_7 + + # pd_op.slice: (512xf32) <- (8x40x512xf32, 2xi64, 2xi64) + slice_6 = paddle._C_ops.slice( + transpose_2, [0, 1], stack_4, stack_5, [1, -1], [0, 1] + ) + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_6 = [4] + + # pd_op.slice: (xf64) <- (8xf64, 1xi64, 1xi64) + slice_7 = paddle._C_ops.slice( + data_1, [0], full_int_array_5, full_int_array_6, [1], [0] + ) + del full_int_array_5 + + # pd_op.multiply: (xf64) <- (xf64, xf64) + multiply_3 = paddle._C_ops.multiply(slice_7, cast_0) + del slice_7 + + # pd_op.ceil: (xf64) <- (xf64) + ceil_3 = paddle._C_ops.ceil(multiply_3) + del multiply_3 + + # pd_op.cast: (xi64) <- (xf64) + cast_4 = paddle._C_ops.cast(ceil_3, paddle.int64) + del ceil_3 + + # pd_op.minimum: (xi64) <- (xi64, xi64) + minimum_3 = paddle._C_ops.minimum(slice_0, cast_4) + del cast_4 + + # pd_op.scale: (xi64) <- (xi64, 1xf32) + scale_6 = paddle._C_ops.scale(minimum_3, full_1, float("-1"), True) + del minimum_3 + + # pd_op.scale: (xi64) <- (xi64, 1xf32) + scale_7 = paddle._C_ops.scale(scale_6, full_1, float("1"), True) + + # pd_op.full: (1xi64) <- () + full_6 = paddle._C_ops.full( + [1], float("4"), paddle.int64, paddle.core.CPUPlace() + ) + + # pd_op.reshape: (xi64) <- (1xi64, 0xi64) + reshape_4 = paddle._C_ops.reshape(full_6, full_int_array_4) + del full_6 + + # builtin.combine: ([xi64, xi64]) <- (xi64, xi64) + combine_8 = [reshape_3, scale_6] + del reshape_3, scale_6 + + # pd_op.stack: (2xi64) <- ([xi64, xi64]) + stack_6 = paddle._C_ops.stack(combine_8, 0) + del combine_8 + + # builtin.combine: ([xi64, xi64]) <- (xi64, xi64) + combine_9 = [reshape_4, scale_7] + del scale_7 + + # pd_op.stack: (2xi64) <- ([xi64, xi64]) + stack_7 = paddle._C_ops.stack(combine_9, 0) + del combine_9 + + # pd_op.slice: (512xf32) <- (8x40x512xf32, 2xi64, 2xi64) + slice_8 = paddle._C_ops.slice( + transpose_2, [0, 1], stack_6, stack_7, [1, -1], [0, 1] + ) + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_7 = [5] + + # pd_op.slice: (xf64) <- (8xf64, 1xi64, 1xi64) + slice_9 = paddle._C_ops.slice( + data_1, [0], full_int_array_6, full_int_array_7, [1], [0] + ) + del full_int_array_6 + + # pd_op.multiply: (xf64) <- (xf64, xf64) + multiply_4 = paddle._C_ops.multiply(slice_9, cast_0) + del slice_9 + + # pd_op.ceil: (xf64) <- (xf64) + ceil_4 = paddle._C_ops.ceil(multiply_4) + del multiply_4 + + # pd_op.cast: (xi64) <- (xf64) + cast_5 = paddle._C_ops.cast(ceil_4, paddle.int64) + del ceil_4 + + # pd_op.minimum: (xi64) <- (xi64, xi64) + minimum_4 = paddle._C_ops.minimum(slice_0, cast_5) + del cast_5 + + # pd_op.scale: (xi64) <- (xi64, 1xf32) + scale_8 = paddle._C_ops.scale(minimum_4, full_1, float("-1"), True) + del minimum_4 + + # pd_op.scale: (xi64) <- (xi64, 1xf32) + scale_9 = paddle._C_ops.scale(scale_8, full_1, float("1"), True) + + # pd_op.full: (1xi64) <- () + full_7 = paddle._C_ops.full( + [1], float("5"), paddle.int64, paddle.core.CPUPlace() + ) + + # pd_op.reshape: (xi64) <- (1xi64, 0xi64) + reshape_5 = paddle._C_ops.reshape(full_7, full_int_array_4) + del full_7 + + # builtin.combine: ([xi64, xi64]) <- (xi64, xi64) + combine_10 = [reshape_4, scale_8] + del reshape_4, scale_8 + + # pd_op.stack: (2xi64) <- ([xi64, xi64]) + stack_8 = paddle._C_ops.stack(combine_10, 0) + del combine_10 + + # builtin.combine: ([xi64, xi64]) <- (xi64, xi64) + combine_11 = [reshape_5, scale_9] + del scale_9 + + # pd_op.stack: (2xi64) <- ([xi64, xi64]) + stack_9 = paddle._C_ops.stack(combine_11, 0) + del combine_11 + + # pd_op.slice: (512xf32) <- (8x40x512xf32, 2xi64, 2xi64) + slice_10 = paddle._C_ops.slice( + transpose_2, [0, 1], stack_8, stack_9, [1, -1], [0, 1] + ) + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_8 = [6] + + # pd_op.slice: (xf64) <- (8xf64, 1xi64, 1xi64) + slice_11 = paddle._C_ops.slice( + data_1, [0], full_int_array_7, full_int_array_8, [1], [0] + ) + del full_int_array_7 + + # pd_op.multiply: (xf64) <- (xf64, xf64) + multiply_5 = paddle._C_ops.multiply(slice_11, cast_0) + del slice_11 + + # pd_op.ceil: (xf64) <- (xf64) + ceil_5 = paddle._C_ops.ceil(multiply_5) + del multiply_5 + + # pd_op.cast: (xi64) <- (xf64) + cast_6 = paddle._C_ops.cast(ceil_5, paddle.int64) + del ceil_5 + + # pd_op.minimum: (xi64) <- (xi64, xi64) + minimum_5 = paddle._C_ops.minimum(slice_0, cast_6) + del cast_6 + + # pd_op.scale: (xi64) <- (xi64, 1xf32) + scale_10 = paddle._C_ops.scale(minimum_5, full_1, float("-1"), True) + del minimum_5 + + # pd_op.scale: (xi64) <- (xi64, 1xf32) + scale_11 = paddle._C_ops.scale(scale_10, full_1, float("1"), True) + + # pd_op.full: (1xi64) <- () + full_8 = paddle._C_ops.full( + [1], float("6"), paddle.int64, paddle.core.CPUPlace() + ) + + # pd_op.reshape: (xi64) <- (1xi64, 0xi64) + reshape_6 = paddle._C_ops.reshape(full_8, full_int_array_4) + del full_8 + + # builtin.combine: ([xi64, xi64]) <- (xi64, xi64) + combine_12 = [reshape_5, scale_10] + del reshape_5, scale_10 + + # pd_op.stack: (2xi64) <- ([xi64, xi64]) + stack_10 = paddle._C_ops.stack(combine_12, 0) + del combine_12 + + # builtin.combine: ([xi64, xi64]) <- (xi64, xi64) + combine_13 = [reshape_6, scale_11] + del scale_11 + + # pd_op.stack: (2xi64) <- ([xi64, xi64]) + stack_11 = paddle._C_ops.stack(combine_13, 0) + del combine_13 + + # pd_op.slice: (512xf32) <- (8x40x512xf32, 2xi64, 2xi64) + slice_12 = paddle._C_ops.slice( + transpose_2, [0, 1], stack_10, stack_11, [1, -1], [0, 1] + ) + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_9 = [7] + + # pd_op.slice: (xf64) <- (8xf64, 1xi64, 1xi64) + slice_13 = paddle._C_ops.slice( + data_1, [0], full_int_array_8, full_int_array_9, [1], [0] + ) + del full_int_array_8 + + # pd_op.multiply: (xf64) <- (xf64, xf64) + multiply_6 = paddle._C_ops.multiply(slice_13, cast_0) + del slice_13 + + # pd_op.ceil: (xf64) <- (xf64) + ceil_6 = paddle._C_ops.ceil(multiply_6) + del multiply_6 + + # pd_op.cast: (xi64) <- (xf64) + cast_7 = paddle._C_ops.cast(ceil_6, paddle.int64) + del ceil_6 + + # pd_op.minimum: (xi64) <- (xi64, xi64) + minimum_6 = paddle._C_ops.minimum(slice_0, cast_7) + del cast_7 + + # pd_op.scale: (xi64) <- (xi64, 1xf32) + scale_12 = paddle._C_ops.scale(minimum_6, full_1, float("-1"), True) + del minimum_6 + + # pd_op.scale: (xi64) <- (xi64, 1xf32) + scale_13 = paddle._C_ops.scale(scale_12, full_1, float("1"), True) + + # pd_op.full: (1xi64) <- () + full_9 = paddle._C_ops.full( + [1], float("7"), paddle.int64, paddle.core.CPUPlace() + ) + + # pd_op.reshape: (xi64) <- (1xi64, 0xi64) + reshape_7 = paddle._C_ops.reshape(full_9, full_int_array_4) + del full_9 + + # builtin.combine: ([xi64, xi64]) <- (xi64, xi64) + combine_14 = [reshape_6, scale_12] + del reshape_6, scale_12 + + # pd_op.stack: (2xi64) <- ([xi64, xi64]) + stack_12 = paddle._C_ops.stack(combine_14, 0) + del combine_14 + + # builtin.combine: ([xi64, xi64]) <- (xi64, xi64) + combine_15 = [reshape_7, scale_13] + del scale_13 + + # pd_op.stack: (2xi64) <- ([xi64, xi64]) + stack_13 = paddle._C_ops.stack(combine_15, 0) + del combine_15 + + # pd_op.slice: (512xf32) <- (8x40x512xf32, 2xi64, 2xi64) + slice_14 = paddle._C_ops.slice( + transpose_2, [0, 1], stack_12, stack_13, [1, -1], [0, 1] + ) + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_10 = [8] + + # pd_op.slice: (xf64) <- (8xf64, 1xi64, 1xi64) + slice_15 = paddle._C_ops.slice( + data_1, [0], full_int_array_9, full_int_array_10, [1], [0] + ) + del data_1, full_int_array_10, full_int_array_9 + + # pd_op.multiply: (xf64) <- (xf64, xf64) + multiply_7 = paddle._C_ops.multiply(slice_15, cast_0) + del cast_0, slice_15 + + # pd_op.ceil: (xf64) <- (xf64) + ceil_7 = paddle._C_ops.ceil(multiply_7) + del multiply_7 + + # pd_op.cast: (xi64) <- (xf64) + cast_8 = paddle._C_ops.cast(ceil_7, paddle.int64) + del ceil_7 + + # pd_op.minimum: (xi64) <- (xi64, xi64) + minimum_7 = paddle._C_ops.minimum(slice_0, cast_8) + del cast_8, slice_0 + + # pd_op.scale: (xi64) <- (xi64, 1xf32) + scale_14 = paddle._C_ops.scale(minimum_7, full_1, float("-1"), True) + del minimum_7 + + # pd_op.scale: (xi64) <- (xi64, 1xf32) + scale_15 = paddle._C_ops.scale(scale_14, full_1, float("1"), True) + del full_1 + + # pd_op.full: (1xi64) <- () + full_10 = paddle._C_ops.full( + [1], float("8"), paddle.int64, paddle.core.CPUPlace() + ) + + # pd_op.reshape: (xi64) <- (1xi64, 0xi64) + reshape_8 = paddle._C_ops.reshape(full_10, full_int_array_4) + del full_10, full_int_array_4 + + # builtin.combine: ([xi64, xi64]) <- (xi64, xi64) + combine_16 = [reshape_7, scale_14] + del reshape_7, scale_14 + + # pd_op.stack: (2xi64) <- ([xi64, xi64]) + stack_14 = paddle._C_ops.stack(combine_16, 0) + del combine_16 + + # builtin.combine: ([xi64, xi64]) <- (xi64, xi64) + combine_17 = [reshape_8, scale_15] + del reshape_8, scale_15 + + # pd_op.stack: (2xi64) <- ([xi64, xi64]) + stack_15 = paddle._C_ops.stack(combine_17, 0) + del combine_17 + + # pd_op.slice: (512xf32) <- (8x40x512xf32, 2xi64, 2xi64) + slice_16 = paddle._C_ops.slice( + transpose_2, [0, 1], stack_14, stack_15, [1, -1], [0, 1] + ) + + # builtin.combine: ([512xf32, 512xf32, 512xf32, 512xf32, 512xf32, 512xf32, 512xf32, 512xf32]) <- (512xf32, 512xf32, 512xf32, 512xf32, 512xf32, 512xf32, 512xf32, 512xf32) + combine_18 = [ + slice_2, + slice_4, + slice_6, + slice_8, + slice_10, + slice_12, + slice_14, + slice_16, + ] + + # pd_op.stack: (8x512xf32) <- ([512xf32, 512xf32, 512xf32, 512xf32, 512xf32, 512xf32, 512xf32, 512xf32]) + stack_16 = paddle._C_ops.stack(combine_18, 0) + del combine_18 + + # pd_op.matmul: (8x512xf32) <- (8x512xf32, 512x512xf32) + matmul_0 = paddle._C_ops.matmul(stack_16, parameter_1, False, False) + del parameter_1 + + # pd_op.add: (8x512xf32) <- (8x512xf32, 512xf32) + add_0 = paddle._C_ops.add(matmul_0, parameter_0) + del ( + assign_0, + full_0, + full_int_array_0, + full_int_array_1, + matmul_0, + parameter_0, + pool2d_0, + rnn_1, + slice_10, + slice_12, + slice_14, + slice_16, + slice_2, + slice_4, + slice_6, + slice_8, + stack_0, + stack_1, + stack_10, + stack_11, + stack_12, + stack_13, + stack_14, + stack_15, + stack_16, + stack_2, + stack_3, + stack_4, + stack_5, + stack_6, + stack_7, + stack_8, + stack_9, + transpose_1, + transpose_2, + ) + + return rnn_0, split_0, split_1, add_0 diff --git a/paddle_samples/PaddleX/PP-OCRv3_mobile_rec/subgraph_1/weight_meta.py b/paddle_samples/PaddleX/PP-OCRv3_mobile_rec/subgraph_1/weight_meta.py new file mode 100644 index 000000000..f724a46b0 --- /dev/null +++ b/paddle_samples/PaddleX/PP-OCRv3_mobile_rec/subgraph_1/weight_meta.py @@ -0,0 +1,117 @@ +class Program_weight_tensor_parameter_0: + name = "parameter_0" + shape = [512] + dtype = "float32" + min_val = float("-3.31668") + max_val = float("0.234877") + mean = float("-0.00649724") + std = float("0.149464") + data = None + + +class Program_weight_tensor_parameter_1: + name = "parameter_1" + shape = [512, 512] + dtype = "float32" + min_val = float("-2.86775") + max_val = float("1.46244") + mean = float("7.60437e-06") + std = float("0.0332225") + data = None + + +class Program_weight_tensor_parameter_2: + name = "parameter_2" + shape = [2048] + dtype = "float32" + min_val = float("-0.542653") + max_val = float("0.575263") + mean = float("0.016312") + std = float("0.0928475") + data = None + + +class Program_weight_tensor_parameter_3: + name = "parameter_3" + shape = [2048] + dtype = "float32" + min_val = float("-0.542931") + max_val = float("0.575263") + mean = float("0.0163444") + std = float("0.0929558") + data = None + + +class Program_weight_tensor_parameter_4: + name = "parameter_4" + shape = [2048] + dtype = "float32" + min_val = float("-0.109009") + max_val = float("0.0975981") + mean = float("0.0105575") + std = float("0.0165411") + data = None + + +class Program_weight_tensor_parameter_5: + name = "parameter_5" + shape = [2048] + dtype = "float32" + min_val = float("-0.108211") + max_val = float("0.0975994") + mean = float("0.0105572") + std = float("0.0165437") + data = None + + +class Program_weight_tensor_parameter_6: + name = "parameter_6" + shape = [2048, 512] + dtype = "float32" + min_val = float("-1.14314") + max_val = float("1.6061") + mean = float("5.50146e-05") + std = float("0.0222411") + data = None + + +class Program_weight_tensor_parameter_7: + name = "parameter_7" + shape = [2048, 512] + dtype = "float32" + min_val = float("-0.829402") + max_val = float("0.637158") + mean = float("-3.60381e-05") + std = float("0.0156146") + data = None + + +class Program_weight_tensor_parameter_8: + name = "parameter_8" + shape = [2048, 512] + dtype = "float32" + min_val = float("-0.848953") + max_val = float("0.797453") + mean = float("9.63144e-05") + std = float("0.0217754") + data = None + + +class Program_weight_tensor_parameter_9: + name = "parameter_9" + shape = [2048, 512] + dtype = "float32" + min_val = float("-0.54106") + max_val = float("0.573123") + mean = float("3.06617e-05") + std = float("0.0250425") + data = None + + +class Program_weight_tensor_parameter_10: + name = "parameter_10" + shape = [] + dtype = "uint8" + min_val = 0 + max_val = 3 + data = None diff --git a/paddle_samples/PaddleX/PP-OCRv3_mobile_rec/subgraph_10/graph_hash.txt b/paddle_samples/PaddleX/PP-OCRv3_mobile_rec/subgraph_10/graph_hash.txt new file mode 100644 index 000000000..73ab95847 --- /dev/null +++ b/paddle_samples/PaddleX/PP-OCRv3_mobile_rec/subgraph_10/graph_hash.txt @@ -0,0 +1 @@ +a3e8dc68e40a1e1a56c892531c38bfa9d35e4080ff571434f85919fdc4ec5df2 \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-OCRv3_mobile_rec/subgraph_10/graph_net.json b/paddle_samples/PaddleX/PP-OCRv3_mobile_rec/subgraph_10/graph_net.json new file mode 100644 index 000000000..6e8c4238c --- /dev/null +++ b/paddle_samples/PaddleX/PP-OCRv3_mobile_rec/subgraph_10/graph_net.json @@ -0,0 +1,6 @@ +{ + "framework": "paddle", + "model_name": "PP-OCRv3_mobile_rec", + "num_devices_required": 1, + "num_nodes_required": 1 +} \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-OCRv3_mobile_rec/subgraph_10/input_meta.py b/paddle_samples/PaddleX/PP-OCRv3_mobile_rec/subgraph_10/input_meta.py new file mode 100644 index 000000000..8573d2aac --- /dev/null +++ b/paddle_samples/PaddleX/PP-OCRv3_mobile_rec/subgraph_10/input_meta.py @@ -0,0 +1,19 @@ +class Program_weight_tensor_data_0: + name = "data_0" + shape = [] + dtype = "int64" + data = [1] + + +class Program_weight_tensor_data_1: + name = "data_1" + shape = [] + dtype = "int64" + data = [40] + + +class Program_weight_tensor_data_2: + name = "data_2" + shape = [8] + dtype = "float64" + data = [0.909375, 0.990625, 0.646875, 0.81875, 0.55, 0.3375, 0.565625, 0.29375] diff --git a/paddle_samples/PaddleX/PP-OCRv3_mobile_rec/subgraph_10/model.py b/paddle_samples/PaddleX/PP-OCRv3_mobile_rec/subgraph_10/model.py new file mode 100644 index 000000000..53d786f4f --- /dev/null +++ b/paddle_samples/PaddleX/PP-OCRv3_mobile_rec/subgraph_10/model.py @@ -0,0 +1,61 @@ +import paddle + + +class GraphModule(paddle.nn.Layer): + def __init__(self): + super().__init__() + + def forward(self, data_0, data_1, data_2): + # pd_op.full: (1xf32) <- () + full_0 = paddle._C_ops.full( + [1], float("1"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.scale: (xi64) <- (xi64, 1xf32) + scale_0 = paddle._C_ops.scale(data_0, full_0, float("1"), True) + del full_0 + + # builtin.combine: ([xi64]) <- (xi64) + combine_0 = [data_0] + del data_0 + + # pd_op.stack: (1xi64) <- ([xi64]) + stack_0 = paddle._C_ops.stack(combine_0, 0) + del combine_0 + + # builtin.combine: ([xi64]) <- (xi64) + combine_1 = [scale_0] + del scale_0 + + # pd_op.stack: (1xi64) <- ([xi64]) + stack_1 = paddle._C_ops.stack(combine_1, 0) + del combine_1 + + # pd_op.slice: (xf64) <- (8xf64, 1xi64, 1xi64) + slice_0 = paddle._C_ops.slice(data_2, [0], stack_0, stack_1, [-1], [0]) + del data_2, stack_0, stack_1 + + # pd_op.cast: (xf64) <- (xi64) + cast_0 = paddle._C_ops.cast(data_1, paddle.float64) + + # pd_op.multiply: (xf64) <- (xf64, xf64) + multiply_0 = paddle._C_ops.multiply(slice_0, cast_0) + del cast_0, slice_0 + + # pd_op.ceil: (xf64) <- (xf64) + ceil_0 = paddle._C_ops.ceil(multiply_0) + del multiply_0 + + # pd_op.cast: (xi64) <- (xf64) + cast_1 = paddle._C_ops.cast(ceil_0, paddle.int64) + del ceil_0 + + # pd_op.minimum: (xi64) <- (xi64, xi64) + minimum_0 = paddle._C_ops.minimum(data_1, cast_1) + del cast_1 + + # pd_op.less_than: (xb) <- (xi64, xi64) + less_than_0 = paddle._C_ops.less_than(minimum_0, data_1) + del data_1, minimum_0 + + return less_than_0 diff --git a/paddle_samples/PaddleX/PP-OCRv3_mobile_rec/subgraph_10/weight_meta.py b/paddle_samples/PaddleX/PP-OCRv3_mobile_rec/subgraph_10/weight_meta.py new file mode 100644 index 000000000..8b1378917 --- /dev/null +++ b/paddle_samples/PaddleX/PP-OCRv3_mobile_rec/subgraph_10/weight_meta.py @@ -0,0 +1 @@ + diff --git a/paddle_samples/PaddleX/PP-OCRv3_mobile_rec/subgraph_2/graph_hash.txt b/paddle_samples/PaddleX/PP-OCRv3_mobile_rec/subgraph_2/graph_hash.txt new file mode 100644 index 000000000..d05b1513f --- /dev/null +++ b/paddle_samples/PaddleX/PP-OCRv3_mobile_rec/subgraph_2/graph_hash.txt @@ -0,0 +1 @@ +048de6a944fa418cb405e398255ca8f81c765fe973ea3963e1293e80a8956ed7 \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-OCRv3_mobile_rec/subgraph_2/graph_net.json b/paddle_samples/PaddleX/PP-OCRv3_mobile_rec/subgraph_2/graph_net.json new file mode 100644 index 000000000..6e8c4238c --- /dev/null +++ b/paddle_samples/PaddleX/PP-OCRv3_mobile_rec/subgraph_2/graph_net.json @@ -0,0 +1,6 @@ +{ + "framework": "paddle", + "model_name": "PP-OCRv3_mobile_rec", + "num_devices_required": 1, + "num_nodes_required": 1 +} \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-OCRv3_mobile_rec/subgraph_2/input_meta.py b/paddle_samples/PaddleX/PP-OCRv3_mobile_rec/subgraph_2/input_meta.py new file mode 100644 index 000000000..91da800d6 --- /dev/null +++ b/paddle_samples/PaddleX/PP-OCRv3_mobile_rec/subgraph_2/input_meta.py @@ -0,0 +1,18 @@ +class Program_weight_tensor_data_0: + name = "data_0" + shape = [8, 512] + dtype = "float32" + min_val = float("-13.7732") + max_val = float("1.4484") + mean = float("-0.0286131") + std = float("0.599776") + data = None + + +class Program_weight_tensor_data_1: + name = "data_1" + shape = [8, 25] + dtype = "int64" + min_val = 25 + max_val = 6626 + data = None diff --git a/paddle_samples/PaddleX/PP-OCRv3_mobile_rec/subgraph_2/model.py b/paddle_samples/PaddleX/PP-OCRv3_mobile_rec/subgraph_2/model.py new file mode 100644 index 000000000..8d36c711a --- /dev/null +++ b/paddle_samples/PaddleX/PP-OCRv3_mobile_rec/subgraph_2/model.py @@ -0,0 +1,32 @@ +import paddle + + +class GraphModule(paddle.nn.Layer): + def __init__(self): + super().__init__() + + def forward(self, parameter_0, data_0, data_1): + # pd_op.embedding: (8x25x512xf32) <- (8x25xi64, 6627x512xf32) + embedding_0 = paddle._C_ops.embedding(data_1, parameter_0, 6626, False) + del data_1, parameter_0 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_0 = [1] + + # pd_op.unsqueeze: (8x1x512xf32) <- (8x512xf32, 1xi64) + unsqueeze_0 = paddle._C_ops.unsqueeze(data_0, full_int_array_0) + del data_0 + + # pd_op.full: (1xi32) <- () + full_0 = paddle._C_ops.full( + [1], float("1"), paddle.int32, paddle.core.CPUPlace() + ) + + # builtin.combine: ([8x1x512xf32, 8x25x512xf32]) <- (8x1x512xf32, 8x25x512xf32) + combine_0 = [unsqueeze_0, embedding_0] + + # pd_op.concat: (8x26x512xf32) <- ([8x1x512xf32, 8x25x512xf32], 1xi32) + concat_0 = paddle._C_ops.concat(combine_0, full_0) + del combine_0, embedding_0, full_0, full_int_array_0, unsqueeze_0 + + return concat_0 diff --git a/paddle_samples/PaddleX/PP-OCRv3_mobile_rec/subgraph_2/weight_meta.py b/paddle_samples/PaddleX/PP-OCRv3_mobile_rec/subgraph_2/weight_meta.py new file mode 100644 index 000000000..8c3f47570 --- /dev/null +++ b/paddle_samples/PaddleX/PP-OCRv3_mobile_rec/subgraph_2/weight_meta.py @@ -0,0 +1,9 @@ +class Program_weight_tensor_parameter_0: + name = "parameter_0" + shape = [6627, 512] + dtype = "float32" + min_val = float("-4.29271") + max_val = float("4.55126") + mean = float("-1.98214e-07") + std = float("0.0156426") + data = None diff --git a/paddle_samples/PaddleX/PP-OCRv3_mobile_rec/subgraph_3/graph_hash.txt b/paddle_samples/PaddleX/PP-OCRv3_mobile_rec/subgraph_3/graph_hash.txt new file mode 100644 index 000000000..7a360ecf8 --- /dev/null +++ b/paddle_samples/PaddleX/PP-OCRv3_mobile_rec/subgraph_3/graph_hash.txt @@ -0,0 +1 @@ +3a97b5d27d67627d5a107385b311cd467dd48e1126e5e0d26d29e7b648cbc6a2 \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-OCRv3_mobile_rec/subgraph_3/graph_net.json b/paddle_samples/PaddleX/PP-OCRv3_mobile_rec/subgraph_3/graph_net.json new file mode 100644 index 000000000..6e8c4238c --- /dev/null +++ b/paddle_samples/PaddleX/PP-OCRv3_mobile_rec/subgraph_3/graph_net.json @@ -0,0 +1,6 @@ +{ + "framework": "paddle", + "model_name": "PP-OCRv3_mobile_rec", + "num_devices_required": 1, + "num_nodes_required": 1 +} \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-OCRv3_mobile_rec/subgraph_3/input_meta.py b/paddle_samples/PaddleX/PP-OCRv3_mobile_rec/subgraph_3/input_meta.py new file mode 100644 index 000000000..99d8b4e82 --- /dev/null +++ b/paddle_samples/PaddleX/PP-OCRv3_mobile_rec/subgraph_3/input_meta.py @@ -0,0 +1,23 @@ +class Program_weight_tensor_data_0: + name = "data_0" + shape = [] + dtype = "int64" + data = [2] + + +class Program_weight_tensor_data_1: + name = "data_1" + shape = [8, 26, 1, 40, 1] + dtype = "float32" + min_val = float("-inf") + max_val = float("6.34192") + mean = float("-inf") + std = float("nan") + data = None + + +class Program_weight_tensor_data_2: + name = "data_2" + shape = [] + dtype = "int64" + data = [26] diff --git a/paddle_samples/PaddleX/PP-OCRv3_mobile_rec/subgraph_3/model.py b/paddle_samples/PaddleX/PP-OCRv3_mobile_rec/subgraph_3/model.py new file mode 100644 index 000000000..690ec848b --- /dev/null +++ b/paddle_samples/PaddleX/PP-OCRv3_mobile_rec/subgraph_3/model.py @@ -0,0 +1,61 @@ +import paddle + + +class GraphModule(paddle.nn.Layer): + def __init__(self): + super().__init__() + + def forward(self, data_0, data_1, data_2): + # pd_op.full: (1xf32) <- () + full_0 = paddle._C_ops.full( + [1], float("1"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.scale: (xi64) <- (xi64, 1xf32) + scale_0 = paddle._C_ops.scale(data_0, full_0, float("1"), True) + del full_0 + + # pd_op.full: (1xi64) <- () + full_1 = paddle._C_ops.full( + [1], float("2.14748e+09"), paddle.int64, paddle.core.CPUPlace() + ) + + # pd_op.full: (xi64) <- () + full_2 = paddle._C_ops.full( + [], float("2.14748e+09"), paddle.int64, paddle.core.CPUPlace() + ) + + # builtin.combine: ([xi64, xi64]) <- (xi64, xi64) + combine_0 = [data_0, data_2] + del data_0, data_2 + + # pd_op.stack: (2xi64) <- ([xi64, xi64]) + stack_0 = paddle._C_ops.stack(combine_0, 0) + del combine_0 + + # builtin.combine: ([xi64, xi64]) <- (xi64, xi64) + combine_1 = [scale_0, full_2] + del full_2, scale_0 + + # pd_op.stack: (2xi64) <- ([xi64, xi64]) + stack_1 = paddle._C_ops.stack(combine_1, 0) + del combine_1 + + # pd_op.full_int_array: (2xi64) <- () + full_int_array_0 = [1, 1] + + # pd_op.set_value_: (8x26x1x40x1xf32) <- (8x26x1x40x1xf32, 2xi64, 2xi64, 2xi64) + set_value__0 = paddle._C_ops.set_value_( + data_1, + stack_0, + stack_1, + full_int_array_0, + [0, 3], + [0], + [], + [1], + [float("-inf")], + ) + del data_1, full_int_array_0, stack_0, stack_1 + + return set_value__0 diff --git a/paddle_samples/PaddleX/PP-OCRv3_mobile_rec/subgraph_3/weight_meta.py b/paddle_samples/PaddleX/PP-OCRv3_mobile_rec/subgraph_3/weight_meta.py new file mode 100644 index 000000000..8b1378917 --- /dev/null +++ b/paddle_samples/PaddleX/PP-OCRv3_mobile_rec/subgraph_3/weight_meta.py @@ -0,0 +1 @@ + diff --git a/paddle_samples/PaddleX/PP-OCRv3_mobile_rec/subgraph_4/graph_hash.txt b/paddle_samples/PaddleX/PP-OCRv3_mobile_rec/subgraph_4/graph_hash.txt new file mode 100644 index 000000000..9c8d9277e --- /dev/null +++ b/paddle_samples/PaddleX/PP-OCRv3_mobile_rec/subgraph_4/graph_hash.txt @@ -0,0 +1 @@ +8c102e7dc9047680a4acce979cd377f1163f5202a978bc3717cc7d91e985a3cb \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-OCRv3_mobile_rec/subgraph_4/graph_net.json b/paddle_samples/PaddleX/PP-OCRv3_mobile_rec/subgraph_4/graph_net.json new file mode 100644 index 000000000..6e8c4238c --- /dev/null +++ b/paddle_samples/PaddleX/PP-OCRv3_mobile_rec/subgraph_4/graph_net.json @@ -0,0 +1,6 @@ +{ + "framework": "paddle", + "model_name": "PP-OCRv3_mobile_rec", + "num_devices_required": 1, + "num_nodes_required": 1 +} \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-OCRv3_mobile_rec/subgraph_4/input_meta.py b/paddle_samples/PaddleX/PP-OCRv3_mobile_rec/subgraph_4/input_meta.py new file mode 100644 index 000000000..fae4f0276 --- /dev/null +++ b/paddle_samples/PaddleX/PP-OCRv3_mobile_rec/subgraph_4/input_meta.py @@ -0,0 +1,20 @@ +class Program_weight_tensor_data_0: + name = "data_0" + shape = [8, 26, 512] + dtype = "float32" + min_val = float("-13.7732") + max_val = float("4.55126") + mean = float("-0.000303846") + std = float("0.143678") + data = None + + +class Program_weight_tensor_data_1: + name = "data_1" + shape = [8, 512, 1, 40] + dtype = "float32" + min_val = float("-0.374952") + max_val = float("21.0197") + mean = float("1.42975") + std = float("2.85595") + data = None diff --git a/paddle_samples/PaddleX/PP-OCRv3_mobile_rec/subgraph_4/model.py b/paddle_samples/PaddleX/PP-OCRv3_mobile_rec/subgraph_4/model.py new file mode 100644 index 000000000..77588c1a5 --- /dev/null +++ b/paddle_samples/PaddleX/PP-OCRv3_mobile_rec/subgraph_4/model.py @@ -0,0 +1,232 @@ +import paddle + + +class GraphModule(paddle.nn.Layer): + def __init__(self): + super().__init__() + + def forward( + self, + parameter_0, + parameter_1, + parameter_2, + parameter_3, + parameter_4, + parameter_5, + parameter_6, + parameter_7, + parameter_8, + parameter_9, + parameter_10, + parameter_11, + parameter_12, + parameter_13, + parameter_14, + data_0, + data_1, + ): + # pd_op.full: (2x8x512xf32) <- () + full_0 = paddle._C_ops.full( + [2, 8, 512], + float("0"), + paddle.float32, + paddle.framework._current_expected_place(), + ) + + # pd_op.assign: (2x8x512xf32) <- (2x8x512xf32) + assign_0 = full_0 + + # pd_op.transpose: (26x8x512xf32) <- (8x26x512xf32) + transpose_0 = paddle._C_ops.transpose(data_0, [1, 0, 2]) + del data_0 + + # builtin.combine: ([2x8x512xf32, 2x8x512xf32]) <- (2x8x512xf32, 2x8x512xf32) + combine_0 = [full_0, full_0] + + # builtin.combine: ([2048x512xf32, 2048x512xf32, 2048x512xf32, 2048x512xf32, 2048xf32, 2048xf32, 2048xf32, 2048xf32]) <- (2048x512xf32, 2048x512xf32, 2048x512xf32, 2048x512xf32, 2048xf32, 2048xf32, 2048xf32, 2048xf32) + combine_1 = [ + parameter_13, + parameter_12, + parameter_11, + parameter_10, + parameter_9, + parameter_8, + parameter_7, + parameter_6, + ] + del ( + parameter_10, + parameter_11, + parameter_12, + parameter_13, + parameter_6, + parameter_7, + parameter_8, + parameter_9, + ) + + # pd_op.rnn: (26x8x512xf32, 0xf32, [2x8x512xf32, 2x8x512xf32], 0xf32) <- (26x8x512xf32, [2x8x512xf32, 2x8x512xf32], [2048x512xf32, 2048x512xf32, 2048x512xf32, 2048x512xf32, 2048xf32, 2048xf32, 2048xf32, 2048xf32], None, xui8) + rnn_1, rnn_0, rnn_2, rnn_3 = (lambda x, f: f(x))( + paddle._C_ops.rnn( + transpose_0, + combine_0, + combine_1, + None, + parameter_14, + float("0"), + False, + 512, + 512, + 2, + "LSTM", + 0, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None), + ) + (None,) + del combine_0, combine_1, parameter_14 + + # builtin.split: (2x8x512xf32, 2x8x512xf32) <- ([2x8x512xf32, 2x8x512xf32]) + ( + split_0, + split_1, + ) = rnn_2 + del rnn_2 + + # pd_op.transpose: (8x26x512xf32) <- (26x8x512xf32) + transpose_1 = paddle._C_ops.transpose(rnn_1, [1, 0, 2]) + + # pd_op.matmul: (8x26x512xf32) <- (8x26x512xf32, 512x512xf32) + matmul_0 = paddle._C_ops.matmul(transpose_1, parameter_5, False, False) + del parameter_5 + + # pd_op.add: (8x26x512xf32) <- (8x26x512xf32, 512xf32) + add_0 = paddle._C_ops.add(matmul_0, parameter_4) + del parameter_4 + + # pd_op.full_int_array: (2xi64) <- () + full_int_array_0 = [3, 4] + + # pd_op.unsqueeze: (8x26x512x1x1xf32) <- (8x26x512xf32, 2xi64) + unsqueeze_0 = paddle._C_ops.unsqueeze(add_0, full_int_array_0) + + # pd_op.conv2d: (8x512x1x40xf32) <- (8x512x1x40xf32, 512x512x3x3xf32) + conv2d_0 = paddle._C_ops.conv2d( + data_1, parameter_3, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del data_1, parameter_3 + + # pd_op.full_int_array: (4xi64) <- () + full_int_array_1 = [1, -1, 1, 1] + + # pd_op.reshape: (1x512x1x1xf32) <- (512xf32, 4xi64) + reshape_0 = paddle._C_ops.reshape(parameter_2, full_int_array_1) + del full_int_array_1, parameter_2 + + # pd_op.add: (8x512x1x40xf32) <- (8x512x1x40xf32, 1x512x1x1xf32) + add_1 = paddle._C_ops.add(conv2d_0, reshape_0) + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_2 = [1] + + # pd_op.unsqueeze: (8x1x512x1x40xf32) <- (8x512x1x40xf32, 1xi64) + unsqueeze_1 = paddle._C_ops.unsqueeze(add_1, full_int_array_2) + + # pd_op.add: (8x26x512x1x40xf32) <- (8x1x512x1x40xf32, 8x26x512x1x1xf32) + add_2 = paddle._C_ops.add(unsqueeze_1, unsqueeze_0) + + # pd_op.tanh: (8x26x512x1x40xf32) <- (8x26x512x1x40xf32) + tanh_0 = paddle._C_ops.tanh(add_2) + del add_2 + + # pd_op.transpose: (8x26x1x40x512xf32) <- (8x26x512x1x40xf32) + transpose_2 = paddle._C_ops.transpose(tanh_0, [0, 1, 3, 4, 2]) + + # pd_op.matmul: (8x26x1x40x1xf32) <- (8x26x1x40x512xf32, 512x1xf32) + matmul_1 = paddle._C_ops.matmul(transpose_2, parameter_1, False, False) + del parameter_1 + + # pd_op.add: (8x26x1x40x1xf32) <- (8x26x1x40x1xf32, 1xf32) + add_3 = paddle._C_ops.add(matmul_1, parameter_0) + del parameter_0 + + # pd_op.shape64: (5xi64) <- (8x26x1x40x1xf32) + shape64_0 = paddle._C_ops.shape64(add_3) + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_3 = [0] + + # pd_op.slice: (xi64) <- (5xi64, 1xi64, 1xi64) + slice_0 = paddle._C_ops.slice( + shape64_0, [0], full_int_array_3, full_int_array_2, [1], [0] + ) + del full_int_array_3 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_4 = [2] + + # pd_op.slice: (xi64) <- (5xi64, 1xi64, 1xi64) + slice_1 = paddle._C_ops.slice( + shape64_0, [0], full_int_array_2, full_int_array_4, [1], [0] + ) + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_5 = [3] + + # pd_op.slice: (xi64) <- (5xi64, 1xi64, 1xi64) + slice_2 = paddle._C_ops.slice( + shape64_0, [0], full_int_array_4, full_int_array_5, [1], [0] + ) + del full_int_array_4 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_6 = [4] + + # pd_op.slice: (xi64) <- (5xi64, 1xi64, 1xi64) + slice_3 = paddle._C_ops.slice( + shape64_0, [0], full_int_array_5, full_int_array_6, [1], [0] + ) + del full_int_array_5 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_7 = [5] + + # pd_op.slice: (xi64) <- (5xi64, 1xi64, 1xi64) + slice_4 = paddle._C_ops.slice( + shape64_0, [0], full_int_array_6, full_int_array_7, [1], [0] + ) + del full_int_array_6, full_int_array_7, shape64_0 + + # pd_op.full: (xi64) <- () + full_1 = paddle._C_ops.full( + [], float("1"), paddle.int64, paddle.framework._current_expected_place() + ) + + # pd_op.equal: (xb) <- (xi64, xi64) + equal_0 = paddle._C_ops.equal(slice_4, full_1) + del ( + add_0, + add_1, + add_3, + assign_0, + conv2d_0, + full_0, + full_1, + full_int_array_0, + full_int_array_2, + matmul_0, + matmul_1, + reshape_0, + rnn_1, + slice_4, + tanh_0, + transpose_0, + transpose_1, + transpose_2, + unsqueeze_0, + unsqueeze_1, + ) + + return rnn_0, split_0, split_1, equal_0, slice_0, slice_1, slice_2, slice_3 diff --git a/paddle_samples/PaddleX/PP-OCRv3_mobile_rec/subgraph_4/weight_meta.py b/paddle_samples/PaddleX/PP-OCRv3_mobile_rec/subgraph_4/weight_meta.py new file mode 100644 index 000000000..a37a488fa --- /dev/null +++ b/paddle_samples/PaddleX/PP-OCRv3_mobile_rec/subgraph_4/weight_meta.py @@ -0,0 +1,159 @@ +class Program_weight_tensor_parameter_0: + name = "parameter_0" + shape = [1] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_1: + name = "parameter_1" + shape = [512, 1] + dtype = "float32" + min_val = float("-1.0492") + max_val = float("0.658404") + mean = float("-0.00579252") + std = float("0.0962193") + data = None + + +class Program_weight_tensor_parameter_2: + name = "parameter_2" + shape = [512] + dtype = "float32" + min_val = float("-0.0292232") + max_val = float("0.0243092") + mean = float("4.50228e-06") + std = float("0.00290474") + data = None + + +class Program_weight_tensor_parameter_3: + name = "parameter_3" + shape = [512, 512, 3, 3] + dtype = "float32" + min_val = float("-0.442805") + max_val = float("0.249616") + mean = float("5.35911e-06") + std = float("0.00248864") + data = None + + +class Program_weight_tensor_parameter_4: + name = "parameter_4" + shape = [512] + dtype = "float32" + min_val = float("-0.0292233") + max_val = float("0.0243092") + mean = float("4.50216e-06") + std = float("0.00290473") + data = None + + +class Program_weight_tensor_parameter_5: + name = "parameter_5" + shape = [512, 512] + dtype = "float32" + min_val = float("-0.181493") + max_val = float("0.177804") + mean = float("-2.79389e-05") + std = float("0.00555581") + data = None + + +class Program_weight_tensor_parameter_6: + name = "parameter_6" + shape = [2048] + dtype = "float32" + min_val = float("-0.0646618") + max_val = float("0.0926821") + mean = float("0.0299549") + std = float("0.0354666") + data = None + + +class Program_weight_tensor_parameter_7: + name = "parameter_7" + shape = [2048] + dtype = "float32" + min_val = float("-0.0646619") + max_val = float("0.0926821") + mean = float("0.0299549") + std = float("0.0354665") + data = None + + +class Program_weight_tensor_parameter_8: + name = "parameter_8" + shape = [2048] + dtype = "float32" + min_val = float("-0.172956") + max_val = float("0.153334") + mean = float("0.0380417") + std = float("0.057575") + data = None + + +class Program_weight_tensor_parameter_9: + name = "parameter_9" + shape = [2048] + dtype = "float32" + min_val = float("-0.172927") + max_val = float("0.159116") + mean = float("0.0380411") + std = float("0.0575742") + data = None + + +class Program_weight_tensor_parameter_10: + name = "parameter_10" + shape = [2048, 512] + dtype = "float32" + min_val = float("-0.497651") + max_val = float("0.87957") + mean = float("0.000121814") + std = float("0.0218334") + data = None + + +class Program_weight_tensor_parameter_11: + name = "parameter_11" + shape = [2048, 512] + dtype = "float32" + min_val = float("-0.933299") + max_val = float("0.94736") + mean = float("-6.25459e-05") + std = float("0.0454653") + data = None + + +class Program_weight_tensor_parameter_12: + name = "parameter_12" + shape = [2048, 512] + dtype = "float32" + min_val = float("-1.23974") + max_val = float("1.10193") + mean = float("-3.08467e-05") + std = float("0.0388619") + data = None + + +class Program_weight_tensor_parameter_13: + name = "parameter_13" + shape = [2048, 512] + dtype = "float32" + min_val = float("-2.06299") + max_val = float("1.77387") + mean = float("-0.000142565") + std = float("0.0326453") + data = None + + +class Program_weight_tensor_parameter_14: + name = "parameter_14" + shape = [] + dtype = "uint8" + min_val = 0 + max_val = 3 + data = None diff --git a/paddle_samples/PaddleX/PP-OCRv3_mobile_rec/subgraph_5/graph_hash.txt b/paddle_samples/PaddleX/PP-OCRv3_mobile_rec/subgraph_5/graph_hash.txt new file mode 100644 index 000000000..dba345193 --- /dev/null +++ b/paddle_samples/PaddleX/PP-OCRv3_mobile_rec/subgraph_5/graph_hash.txt @@ -0,0 +1 @@ +3a4d2102f9f1e0529e1231b28ed12842864d2c11bf66dc7fe8d52582972a2faf \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-OCRv3_mobile_rec/subgraph_5/graph_net.json b/paddle_samples/PaddleX/PP-OCRv3_mobile_rec/subgraph_5/graph_net.json new file mode 100644 index 000000000..6e8c4238c --- /dev/null +++ b/paddle_samples/PaddleX/PP-OCRv3_mobile_rec/subgraph_5/graph_net.json @@ -0,0 +1,6 @@ +{ + "framework": "paddle", + "model_name": "PP-OCRv3_mobile_rec", + "num_devices_required": 1, + "num_nodes_required": 1 +} \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-OCRv3_mobile_rec/subgraph_5/input_meta.py b/paddle_samples/PaddleX/PP-OCRv3_mobile_rec/subgraph_5/input_meta.py new file mode 100644 index 000000000..e8cc5cbd4 --- /dev/null +++ b/paddle_samples/PaddleX/PP-OCRv3_mobile_rec/subgraph_5/input_meta.py @@ -0,0 +1,9 @@ +class Program_weight_tensor_data_0: + name = "data_0" + shape = [8, 3, 48, 320] + dtype = "float32" + min_val = float("-1.0") + max_val = float("1.0") + mean = float("-0.0236384") + std = float("0.223792") + data = None diff --git a/paddle_samples/PaddleX/PP-OCRv3_mobile_rec/subgraph_5/model.py b/paddle_samples/PaddleX/PP-OCRv3_mobile_rec/subgraph_5/model.py new file mode 100644 index 000000000..70c0c885f --- /dev/null +++ b/paddle_samples/PaddleX/PP-OCRv3_mobile_rec/subgraph_5/model.py @@ -0,0 +1,2013 @@ +import paddle + + +class GraphModule(paddle.nn.Layer): + def __init__(self): + super().__init__() + + def forward( + self, + parameter_0, + parameter_1, + parameter_2, + parameter_3, + parameter_4, + parameter_5, + parameter_6, + parameter_7, + parameter_8, + parameter_9, + parameter_10, + parameter_11, + parameter_12, + parameter_13, + parameter_14, + parameter_15, + parameter_16, + parameter_17, + parameter_18, + parameter_19, + parameter_20, + parameter_21, + parameter_22, + parameter_23, + parameter_24, + parameter_25, + parameter_26, + parameter_27, + parameter_28, + parameter_29, + parameter_30, + parameter_31, + parameter_32, + parameter_33, + parameter_34, + parameter_35, + parameter_36, + parameter_37, + parameter_38, + parameter_39, + parameter_40, + parameter_41, + parameter_42, + parameter_43, + parameter_44, + parameter_45, + parameter_46, + parameter_47, + parameter_48, + parameter_49, + parameter_50, + parameter_51, + parameter_52, + parameter_53, + parameter_54, + parameter_55, + parameter_56, + parameter_57, + parameter_58, + parameter_59, + parameter_60, + parameter_61, + parameter_62, + parameter_63, + parameter_64, + parameter_65, + parameter_66, + parameter_67, + parameter_68, + parameter_69, + parameter_70, + parameter_71, + parameter_72, + parameter_73, + parameter_74, + parameter_75, + parameter_76, + parameter_77, + parameter_78, + parameter_79, + parameter_80, + parameter_81, + parameter_82, + parameter_83, + parameter_84, + parameter_85, + parameter_86, + parameter_87, + parameter_88, + parameter_89, + parameter_90, + parameter_91, + parameter_92, + parameter_93, + parameter_94, + parameter_95, + parameter_96, + parameter_97, + parameter_98, + parameter_99, + parameter_100, + parameter_101, + parameter_102, + parameter_103, + parameter_104, + parameter_105, + parameter_106, + parameter_107, + parameter_108, + parameter_109, + parameter_110, + parameter_111, + parameter_112, + parameter_113, + parameter_114, + parameter_115, + parameter_116, + parameter_117, + parameter_118, + parameter_119, + parameter_120, + parameter_121, + parameter_122, + parameter_123, + parameter_124, + parameter_125, + parameter_126, + parameter_127, + parameter_128, + parameter_129, + parameter_130, + parameter_131, + parameter_132, + parameter_133, + parameter_134, + parameter_135, + parameter_136, + parameter_137, + parameter_138, + parameter_139, + parameter_140, + parameter_141, + parameter_142, + parameter_143, + parameter_144, + parameter_145, + parameter_146, + parameter_147, + parameter_148, + parameter_149, + parameter_150, + parameter_151, + parameter_152, + parameter_153, + parameter_154, + parameter_155, + parameter_156, + parameter_157, + parameter_158, + parameter_159, + parameter_160, + parameter_161, + parameter_162, + parameter_163, + parameter_164, + parameter_165, + parameter_166, + parameter_167, + parameter_168, + parameter_169, + parameter_170, + parameter_171, + parameter_172, + parameter_173, + parameter_174, + parameter_175, + parameter_176, + parameter_177, + parameter_178, + parameter_179, + parameter_180, + parameter_181, + parameter_182, + parameter_183, + parameter_184, + parameter_185, + parameter_186, + parameter_187, + parameter_188, + parameter_189, + parameter_190, + parameter_191, + parameter_192, + parameter_193, + parameter_194, + parameter_195, + data_0, + ): + # pd_op.conv2d: (8x16x24x160xf32) <- (8x3x48x320xf32, 16x3x3x3xf32) + conv2d_0 = paddle._C_ops.conv2d( + data_0, parameter_195, [2, 2], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del data_0, parameter_195 + + # pd_op.batch_norm_: (8x16x24x160xf32, 16xf32, 16xf32, 16xf32, 16xf32, -1xui8) <- (8x16x24x160xf32, 16xf32, 16xf32, 16xf32, 16xf32) + ( + batch_norm__0, + batch_norm__1, + batch_norm__2, + batch_norm__3, + batch_norm__4, + batch_norm__5, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_0, + parameter_194, + parameter_193, + parameter_192, + parameter_191, + True, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_0, parameter_191, parameter_192, parameter_193, parameter_194 + + # pd_op.hardswish: (8x16x24x160xf32) <- (8x16x24x160xf32) + hardswish_0 = paddle._C_ops.hardswish(batch_norm__0) + del batch_norm__0 + + # pd_op.depthwise_conv2d: (8x16x24x160xf32) <- (8x16x24x160xf32, 16x1x3x3xf32) + depthwise_conv2d_0 = paddle._C_ops.depthwise_conv2d( + hardswish_0, parameter_190, [1, 1], [1, 1], "EXPLICIT", 16, [1, 1], "NCHW" + ) + del hardswish_0, parameter_190 + + # pd_op.batch_norm_: (8x16x24x160xf32, 16xf32, 16xf32, 16xf32, 16xf32, -1xui8) <- (8x16x24x160xf32, 16xf32, 16xf32, 16xf32, 16xf32) + ( + batch_norm__6, + batch_norm__7, + batch_norm__8, + batch_norm__9, + batch_norm__10, + batch_norm__11, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + depthwise_conv2d_0, + parameter_189, + parameter_188, + parameter_187, + parameter_186, + True, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del ( + depthwise_conv2d_0, + parameter_186, + parameter_187, + parameter_188, + parameter_189, + ) + + # pd_op.hardswish: (8x16x24x160xf32) <- (8x16x24x160xf32) + hardswish_1 = paddle._C_ops.hardswish(batch_norm__6) + del batch_norm__6 + + # pd_op.conv2d: (8x32x24x160xf32) <- (8x16x24x160xf32, 32x16x1x1xf32) + conv2d_1 = paddle._C_ops.conv2d( + hardswish_1, parameter_185, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del hardswish_1, parameter_185 + + # pd_op.batch_norm_: (8x32x24x160xf32, 32xf32, 32xf32, 32xf32, 32xf32, -1xui8) <- (8x32x24x160xf32, 32xf32, 32xf32, 32xf32, 32xf32) + ( + batch_norm__12, + batch_norm__13, + batch_norm__14, + batch_norm__15, + batch_norm__16, + batch_norm__17, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_1, + parameter_184, + parameter_183, + parameter_182, + parameter_181, + True, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_1, parameter_181, parameter_182, parameter_183, parameter_184 + + # pd_op.hardswish: (8x32x24x160xf32) <- (8x32x24x160xf32) + hardswish_2 = paddle._C_ops.hardswish(batch_norm__12) + del batch_norm__12 + + # pd_op.depthwise_conv2d: (8x32x24x160xf32) <- (8x32x24x160xf32, 32x1x3x3xf32) + depthwise_conv2d_1 = paddle._C_ops.depthwise_conv2d( + hardswish_2, parameter_180, [1, 1], [1, 1], "EXPLICIT", 32, [1, 1], "NCHW" + ) + del hardswish_2, parameter_180 + + # pd_op.batch_norm_: (8x32x24x160xf32, 32xf32, 32xf32, 32xf32, 32xf32, -1xui8) <- (8x32x24x160xf32, 32xf32, 32xf32, 32xf32, 32xf32) + ( + batch_norm__18, + batch_norm__19, + batch_norm__20, + batch_norm__21, + batch_norm__22, + batch_norm__23, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + depthwise_conv2d_1, + parameter_179, + parameter_178, + parameter_177, + parameter_176, + True, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del ( + depthwise_conv2d_1, + parameter_176, + parameter_177, + parameter_178, + parameter_179, + ) + + # pd_op.hardswish: (8x32x24x160xf32) <- (8x32x24x160xf32) + hardswish_3 = paddle._C_ops.hardswish(batch_norm__18) + del batch_norm__18 + + # pd_op.conv2d: (8x64x24x160xf32) <- (8x32x24x160xf32, 64x32x1x1xf32) + conv2d_2 = paddle._C_ops.conv2d( + hardswish_3, parameter_175, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del hardswish_3, parameter_175 + + # pd_op.batch_norm_: (8x64x24x160xf32, 64xf32, 64xf32, 64xf32, 64xf32, -1xui8) <- (8x64x24x160xf32, 64xf32, 64xf32, 64xf32, 64xf32) + ( + batch_norm__24, + batch_norm__25, + batch_norm__26, + batch_norm__27, + batch_norm__28, + batch_norm__29, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_2, + parameter_174, + parameter_173, + parameter_172, + parameter_171, + True, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_2, parameter_171, parameter_172, parameter_173, parameter_174 + + # pd_op.hardswish: (8x64x24x160xf32) <- (8x64x24x160xf32) + hardswish_4 = paddle._C_ops.hardswish(batch_norm__24) + del batch_norm__24 + + # pd_op.depthwise_conv2d: (8x64x24x160xf32) <- (8x64x24x160xf32, 64x1x3x3xf32) + depthwise_conv2d_2 = paddle._C_ops.depthwise_conv2d( + hardswish_4, parameter_170, [1, 1], [1, 1], "EXPLICIT", 64, [1, 1], "NCHW" + ) + del hardswish_4, parameter_170 + + # pd_op.batch_norm_: (8x64x24x160xf32, 64xf32, 64xf32, 64xf32, 64xf32, -1xui8) <- (8x64x24x160xf32, 64xf32, 64xf32, 64xf32, 64xf32) + ( + batch_norm__30, + batch_norm__31, + batch_norm__32, + batch_norm__33, + batch_norm__34, + batch_norm__35, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + depthwise_conv2d_2, + parameter_169, + parameter_168, + parameter_167, + parameter_166, + True, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del ( + depthwise_conv2d_2, + parameter_166, + parameter_167, + parameter_168, + parameter_169, + ) + + # pd_op.hardswish: (8x64x24x160xf32) <- (8x64x24x160xf32) + hardswish_5 = paddle._C_ops.hardswish(batch_norm__30) + del batch_norm__30 + + # pd_op.conv2d: (8x64x24x160xf32) <- (8x64x24x160xf32, 64x64x1x1xf32) + conv2d_3 = paddle._C_ops.conv2d( + hardswish_5, parameter_165, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del hardswish_5, parameter_165 + + # pd_op.batch_norm_: (8x64x24x160xf32, 64xf32, 64xf32, 64xf32, 64xf32, -1xui8) <- (8x64x24x160xf32, 64xf32, 64xf32, 64xf32, 64xf32) + ( + batch_norm__36, + batch_norm__37, + batch_norm__38, + batch_norm__39, + batch_norm__40, + batch_norm__41, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_3, + parameter_164, + parameter_163, + parameter_162, + parameter_161, + True, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_3, parameter_161, parameter_162, parameter_163, parameter_164 + + # pd_op.hardswish: (8x64x24x160xf32) <- (8x64x24x160xf32) + hardswish_6 = paddle._C_ops.hardswish(batch_norm__36) + del batch_norm__36 + + # pd_op.depthwise_conv2d: (8x64x12x160xf32) <- (8x64x24x160xf32, 64x1x3x3xf32) + depthwise_conv2d_3 = paddle._C_ops.depthwise_conv2d( + hardswish_6, parameter_160, [2, 1], [1, 1], "EXPLICIT", 64, [1, 1], "NCHW" + ) + del hardswish_6, parameter_160 + + # pd_op.batch_norm_: (8x64x12x160xf32, 64xf32, 64xf32, 64xf32, 64xf32, -1xui8) <- (8x64x12x160xf32, 64xf32, 64xf32, 64xf32, 64xf32) + ( + batch_norm__42, + batch_norm__43, + batch_norm__44, + batch_norm__45, + batch_norm__46, + batch_norm__47, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + depthwise_conv2d_3, + parameter_159, + parameter_158, + parameter_157, + parameter_156, + True, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del ( + depthwise_conv2d_3, + parameter_156, + parameter_157, + parameter_158, + parameter_159, + ) + + # pd_op.hardswish: (8x64x12x160xf32) <- (8x64x12x160xf32) + hardswish_7 = paddle._C_ops.hardswish(batch_norm__42) + del batch_norm__42 + + # pd_op.conv2d: (8x128x12x160xf32) <- (8x64x12x160xf32, 128x64x1x1xf32) + conv2d_4 = paddle._C_ops.conv2d( + hardswish_7, parameter_155, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del hardswish_7, parameter_155 + + # pd_op.batch_norm_: (8x128x12x160xf32, 128xf32, 128xf32, 128xf32, 128xf32, -1xui8) <- (8x128x12x160xf32, 128xf32, 128xf32, 128xf32, 128xf32) + ( + batch_norm__48, + batch_norm__49, + batch_norm__50, + batch_norm__51, + batch_norm__52, + batch_norm__53, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_4, + parameter_154, + parameter_153, + parameter_152, + parameter_151, + True, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_4, parameter_151, parameter_152, parameter_153, parameter_154 + + # pd_op.hardswish: (8x128x12x160xf32) <- (8x128x12x160xf32) + hardswish_8 = paddle._C_ops.hardswish(batch_norm__48) + del batch_norm__48 + + # pd_op.depthwise_conv2d: (8x128x12x160xf32) <- (8x128x12x160xf32, 128x1x3x3xf32) + depthwise_conv2d_4 = paddle._C_ops.depthwise_conv2d( + hardswish_8, parameter_150, [1, 1], [1, 1], "EXPLICIT", 128, [1, 1], "NCHW" + ) + del hardswish_8, parameter_150 + + # pd_op.batch_norm_: (8x128x12x160xf32, 128xf32, 128xf32, 128xf32, 128xf32, -1xui8) <- (8x128x12x160xf32, 128xf32, 128xf32, 128xf32, 128xf32) + ( + batch_norm__54, + batch_norm__55, + batch_norm__56, + batch_norm__57, + batch_norm__58, + batch_norm__59, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + depthwise_conv2d_4, + parameter_149, + parameter_148, + parameter_147, + parameter_146, + True, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del ( + depthwise_conv2d_4, + parameter_146, + parameter_147, + parameter_148, + parameter_149, + ) + + # pd_op.hardswish: (8x128x12x160xf32) <- (8x128x12x160xf32) + hardswish_9 = paddle._C_ops.hardswish(batch_norm__54) + del batch_norm__54 + + # pd_op.conv2d: (8x128x12x160xf32) <- (8x128x12x160xf32, 128x128x1x1xf32) + conv2d_5 = paddle._C_ops.conv2d( + hardswish_9, parameter_145, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del hardswish_9, parameter_145 + + # pd_op.batch_norm_: (8x128x12x160xf32, 128xf32, 128xf32, 128xf32, 128xf32, -1xui8) <- (8x128x12x160xf32, 128xf32, 128xf32, 128xf32, 128xf32) + ( + batch_norm__60, + batch_norm__61, + batch_norm__62, + batch_norm__63, + batch_norm__64, + batch_norm__65, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_5, + parameter_144, + parameter_143, + parameter_142, + parameter_141, + True, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_5, parameter_141, parameter_142, parameter_143, parameter_144 + + # pd_op.hardswish: (8x128x12x160xf32) <- (8x128x12x160xf32) + hardswish_10 = paddle._C_ops.hardswish(batch_norm__60) + del batch_norm__60 + + # pd_op.depthwise_conv2d: (8x128x6x160xf32) <- (8x128x12x160xf32, 128x1x3x3xf32) + depthwise_conv2d_5 = paddle._C_ops.depthwise_conv2d( + hardswish_10, parameter_140, [2, 1], [1, 1], "EXPLICIT", 128, [1, 1], "NCHW" + ) + del hardswish_10, parameter_140 + + # pd_op.batch_norm_: (8x128x6x160xf32, 128xf32, 128xf32, 128xf32, 128xf32, -1xui8) <- (8x128x6x160xf32, 128xf32, 128xf32, 128xf32, 128xf32) + ( + batch_norm__66, + batch_norm__67, + batch_norm__68, + batch_norm__69, + batch_norm__70, + batch_norm__71, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + depthwise_conv2d_5, + parameter_139, + parameter_138, + parameter_137, + parameter_136, + True, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del ( + depthwise_conv2d_5, + parameter_136, + parameter_137, + parameter_138, + parameter_139, + ) + + # pd_op.hardswish: (8x128x6x160xf32) <- (8x128x6x160xf32) + hardswish_11 = paddle._C_ops.hardswish(batch_norm__66) + del batch_norm__66 + + # pd_op.conv2d: (8x256x6x160xf32) <- (8x128x6x160xf32, 256x128x1x1xf32) + conv2d_6 = paddle._C_ops.conv2d( + hardswish_11, parameter_135, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del hardswish_11, parameter_135 + + # pd_op.batch_norm_: (8x256x6x160xf32, 256xf32, 256xf32, 256xf32, 256xf32, -1xui8) <- (8x256x6x160xf32, 256xf32, 256xf32, 256xf32, 256xf32) + ( + batch_norm__72, + batch_norm__73, + batch_norm__74, + batch_norm__75, + batch_norm__76, + batch_norm__77, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_6, + parameter_134, + parameter_133, + parameter_132, + parameter_131, + True, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_6, parameter_131, parameter_132, parameter_133, parameter_134 + + # pd_op.hardswish: (8x256x6x160xf32) <- (8x256x6x160xf32) + hardswish_12 = paddle._C_ops.hardswish(batch_norm__72) + del batch_norm__72 + + # pd_op.depthwise_conv2d: (8x256x6x160xf32) <- (8x256x6x160xf32, 256x1x5x5xf32) + depthwise_conv2d_6 = paddle._C_ops.depthwise_conv2d( + hardswish_12, parameter_130, [1, 1], [2, 2], "EXPLICIT", 256, [1, 1], "NCHW" + ) + del hardswish_12, parameter_130 + + # pd_op.batch_norm_: (8x256x6x160xf32, 256xf32, 256xf32, 256xf32, 256xf32, -1xui8) <- (8x256x6x160xf32, 256xf32, 256xf32, 256xf32, 256xf32) + ( + batch_norm__78, + batch_norm__79, + batch_norm__80, + batch_norm__81, + batch_norm__82, + batch_norm__83, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + depthwise_conv2d_6, + parameter_129, + parameter_128, + parameter_127, + parameter_126, + True, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del ( + depthwise_conv2d_6, + parameter_126, + parameter_127, + parameter_128, + parameter_129, + ) + + # pd_op.hardswish: (8x256x6x160xf32) <- (8x256x6x160xf32) + hardswish_13 = paddle._C_ops.hardswish(batch_norm__78) + del batch_norm__78 + + # pd_op.conv2d: (8x256x6x160xf32) <- (8x256x6x160xf32, 256x256x1x1xf32) + conv2d_7 = paddle._C_ops.conv2d( + hardswish_13, parameter_125, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del hardswish_13, parameter_125 + + # pd_op.batch_norm_: (8x256x6x160xf32, 256xf32, 256xf32, 256xf32, 256xf32, -1xui8) <- (8x256x6x160xf32, 256xf32, 256xf32, 256xf32, 256xf32) + ( + batch_norm__84, + batch_norm__85, + batch_norm__86, + batch_norm__87, + batch_norm__88, + batch_norm__89, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_7, + parameter_124, + parameter_123, + parameter_122, + parameter_121, + True, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_7, parameter_121, parameter_122, parameter_123, parameter_124 + + # pd_op.hardswish: (8x256x6x160xf32) <- (8x256x6x160xf32) + hardswish_14 = paddle._C_ops.hardswish(batch_norm__84) + del batch_norm__84 + + # pd_op.depthwise_conv2d: (8x256x6x160xf32) <- (8x256x6x160xf32, 256x1x5x5xf32) + depthwise_conv2d_7 = paddle._C_ops.depthwise_conv2d( + hardswish_14, parameter_120, [1, 1], [2, 2], "EXPLICIT", 256, [1, 1], "NCHW" + ) + del hardswish_14, parameter_120 + + # pd_op.batch_norm_: (8x256x6x160xf32, 256xf32, 256xf32, 256xf32, 256xf32, -1xui8) <- (8x256x6x160xf32, 256xf32, 256xf32, 256xf32, 256xf32) + ( + batch_norm__90, + batch_norm__91, + batch_norm__92, + batch_norm__93, + batch_norm__94, + batch_norm__95, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + depthwise_conv2d_7, + parameter_119, + parameter_118, + parameter_117, + parameter_116, + True, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del ( + depthwise_conv2d_7, + parameter_116, + parameter_117, + parameter_118, + parameter_119, + ) + + # pd_op.hardswish: (8x256x6x160xf32) <- (8x256x6x160xf32) + hardswish_15 = paddle._C_ops.hardswish(batch_norm__90) + del batch_norm__90 + + # pd_op.conv2d: (8x256x6x160xf32) <- (8x256x6x160xf32, 256x256x1x1xf32) + conv2d_8 = paddle._C_ops.conv2d( + hardswish_15, parameter_115, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del hardswish_15, parameter_115 + + # pd_op.batch_norm_: (8x256x6x160xf32, 256xf32, 256xf32, 256xf32, 256xf32, -1xui8) <- (8x256x6x160xf32, 256xf32, 256xf32, 256xf32, 256xf32) + ( + batch_norm__96, + batch_norm__97, + batch_norm__98, + batch_norm__99, + batch_norm__100, + batch_norm__101, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_8, + parameter_114, + parameter_113, + parameter_112, + parameter_111, + True, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_8, parameter_111, parameter_112, parameter_113, parameter_114 + + # pd_op.hardswish: (8x256x6x160xf32) <- (8x256x6x160xf32) + hardswish_16 = paddle._C_ops.hardswish(batch_norm__96) + del batch_norm__96 + + # pd_op.depthwise_conv2d: (8x256x6x160xf32) <- (8x256x6x160xf32, 256x1x5x5xf32) + depthwise_conv2d_8 = paddle._C_ops.depthwise_conv2d( + hardswish_16, parameter_110, [1, 1], [2, 2], "EXPLICIT", 256, [1, 1], "NCHW" + ) + del hardswish_16, parameter_110 + + # pd_op.batch_norm_: (8x256x6x160xf32, 256xf32, 256xf32, 256xf32, 256xf32, -1xui8) <- (8x256x6x160xf32, 256xf32, 256xf32, 256xf32, 256xf32) + ( + batch_norm__102, + batch_norm__103, + batch_norm__104, + batch_norm__105, + batch_norm__106, + batch_norm__107, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + depthwise_conv2d_8, + parameter_109, + parameter_108, + parameter_107, + parameter_106, + True, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del ( + depthwise_conv2d_8, + parameter_106, + parameter_107, + parameter_108, + parameter_109, + ) + + # pd_op.hardswish: (8x256x6x160xf32) <- (8x256x6x160xf32) + hardswish_17 = paddle._C_ops.hardswish(batch_norm__102) + del batch_norm__102 + + # pd_op.conv2d: (8x256x6x160xf32) <- (8x256x6x160xf32, 256x256x1x1xf32) + conv2d_9 = paddle._C_ops.conv2d( + hardswish_17, parameter_105, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del hardswish_17, parameter_105 + + # pd_op.batch_norm_: (8x256x6x160xf32, 256xf32, 256xf32, 256xf32, 256xf32, -1xui8) <- (8x256x6x160xf32, 256xf32, 256xf32, 256xf32, 256xf32) + ( + batch_norm__108, + batch_norm__109, + batch_norm__110, + batch_norm__111, + batch_norm__112, + batch_norm__113, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_9, + parameter_104, + parameter_103, + parameter_102, + parameter_101, + True, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_9, parameter_101, parameter_102, parameter_103, parameter_104 + + # pd_op.hardswish: (8x256x6x160xf32) <- (8x256x6x160xf32) + hardswish_18 = paddle._C_ops.hardswish(batch_norm__108) + del batch_norm__108 + + # pd_op.depthwise_conv2d: (8x256x6x160xf32) <- (8x256x6x160xf32, 256x1x5x5xf32) + depthwise_conv2d_9 = paddle._C_ops.depthwise_conv2d( + hardswish_18, parameter_100, [1, 1], [2, 2], "EXPLICIT", 256, [1, 1], "NCHW" + ) + del hardswish_18, parameter_100 + + # pd_op.batch_norm_: (8x256x6x160xf32, 256xf32, 256xf32, 256xf32, 256xf32, -1xui8) <- (8x256x6x160xf32, 256xf32, 256xf32, 256xf32, 256xf32) + ( + batch_norm__114, + batch_norm__115, + batch_norm__116, + batch_norm__117, + batch_norm__118, + batch_norm__119, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + depthwise_conv2d_9, + parameter_99, + parameter_98, + parameter_97, + parameter_96, + True, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del depthwise_conv2d_9, parameter_96, parameter_97, parameter_98, parameter_99 + + # pd_op.hardswish: (8x256x6x160xf32) <- (8x256x6x160xf32) + hardswish_19 = paddle._C_ops.hardswish(batch_norm__114) + del batch_norm__114 + + # pd_op.conv2d: (8x256x6x160xf32) <- (8x256x6x160xf32, 256x256x1x1xf32) + conv2d_10 = paddle._C_ops.conv2d( + hardswish_19, parameter_95, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del hardswish_19, parameter_95 + + # pd_op.batch_norm_: (8x256x6x160xf32, 256xf32, 256xf32, 256xf32, 256xf32, -1xui8) <- (8x256x6x160xf32, 256xf32, 256xf32, 256xf32, 256xf32) + ( + batch_norm__120, + batch_norm__121, + batch_norm__122, + batch_norm__123, + batch_norm__124, + batch_norm__125, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_10, + parameter_94, + parameter_93, + parameter_92, + parameter_91, + True, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_10, parameter_91, parameter_92, parameter_93, parameter_94 + + # pd_op.hardswish: (8x256x6x160xf32) <- (8x256x6x160xf32) + hardswish_20 = paddle._C_ops.hardswish(batch_norm__120) + del batch_norm__120 + + # pd_op.depthwise_conv2d: (8x256x6x160xf32) <- (8x256x6x160xf32, 256x1x5x5xf32) + depthwise_conv2d_10 = paddle._C_ops.depthwise_conv2d( + hardswish_20, parameter_90, [1, 1], [2, 2], "EXPLICIT", 256, [1, 1], "NCHW" + ) + del hardswish_20, parameter_90 + + # pd_op.batch_norm_: (8x256x6x160xf32, 256xf32, 256xf32, 256xf32, 256xf32, -1xui8) <- (8x256x6x160xf32, 256xf32, 256xf32, 256xf32, 256xf32) + ( + batch_norm__126, + batch_norm__127, + batch_norm__128, + batch_norm__129, + batch_norm__130, + batch_norm__131, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + depthwise_conv2d_10, + parameter_89, + parameter_88, + parameter_87, + parameter_86, + True, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del depthwise_conv2d_10, parameter_86, parameter_87, parameter_88, parameter_89 + + # pd_op.hardswish: (8x256x6x160xf32) <- (8x256x6x160xf32) + hardswish_21 = paddle._C_ops.hardswish(batch_norm__126) + del batch_norm__126 + + # pd_op.conv2d: (8x256x6x160xf32) <- (8x256x6x160xf32, 256x256x1x1xf32) + conv2d_11 = paddle._C_ops.conv2d( + hardswish_21, parameter_85, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del hardswish_21, parameter_85 + + # pd_op.batch_norm_: (8x256x6x160xf32, 256xf32, 256xf32, 256xf32, 256xf32, -1xui8) <- (8x256x6x160xf32, 256xf32, 256xf32, 256xf32, 256xf32) + ( + batch_norm__132, + batch_norm__133, + batch_norm__134, + batch_norm__135, + batch_norm__136, + batch_norm__137, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_11, + parameter_84, + parameter_83, + parameter_82, + parameter_81, + True, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_11, parameter_81, parameter_82, parameter_83, parameter_84 + + # pd_op.hardswish: (8x256x6x160xf32) <- (8x256x6x160xf32) + hardswish_22 = paddle._C_ops.hardswish(batch_norm__132) + del batch_norm__132 + + # pd_op.depthwise_conv2d: (8x256x3x160xf32) <- (8x256x6x160xf32, 256x1x5x5xf32) + depthwise_conv2d_11 = paddle._C_ops.depthwise_conv2d( + hardswish_22, parameter_80, [2, 1], [2, 2], "EXPLICIT", 256, [1, 1], "NCHW" + ) + del hardswish_22, parameter_80 + + # pd_op.batch_norm_: (8x256x3x160xf32, 256xf32, 256xf32, 256xf32, 256xf32, -1xui8) <- (8x256x3x160xf32, 256xf32, 256xf32, 256xf32, 256xf32) + ( + batch_norm__138, + batch_norm__139, + batch_norm__140, + batch_norm__141, + batch_norm__142, + batch_norm__143, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + depthwise_conv2d_11, + parameter_79, + parameter_78, + parameter_77, + parameter_76, + True, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del depthwise_conv2d_11, parameter_76, parameter_77, parameter_78, parameter_79 + + # pd_op.hardswish: (8x256x3x160xf32) <- (8x256x3x160xf32) + hardswish_23 = paddle._C_ops.hardswish(batch_norm__138) + del batch_norm__138 + + # pd_op.full_int_array: (2xi64) <- () + full_int_array_0 = [1, 1] + + # pd_op.pool2d: (8x256x1x1xf32) <- (8x256x3x160xf32, 2xi64) + pool2d_0 = paddle._C_ops.pool2d( + hardswish_23, + full_int_array_0, + [1, 1], + [0, 0], + False, + True, + "NCHW", + "avg", + False, + True, + "EXPLICIT", + ) + + # pd_op.conv2d: (8x64x1x1xf32) <- (8x256x1x1xf32, 64x256x1x1xf32) + conv2d_12 = paddle._C_ops.conv2d( + pool2d_0, parameter_75, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_75, pool2d_0 + + # pd_op.full_int_array: (4xi64) <- () + full_int_array_1 = [1, -1, 1, 1] + + # pd_op.reshape: (1x64x1x1xf32) <- (64xf32, 4xi64) + reshape_0 = paddle._C_ops.reshape(parameter_74, full_int_array_1) + del parameter_74 + + # pd_op.add: (8x64x1x1xf32) <- (8x64x1x1xf32, 1x64x1x1xf32) + add_0 = paddle._C_ops.add(conv2d_12, reshape_0) + del conv2d_12, reshape_0 + + # pd_op.relu: (8x64x1x1xf32) <- (8x64x1x1xf32) + relu_0 = paddle._C_ops.relu(add_0) + del add_0 + + # pd_op.conv2d: (8x256x1x1xf32) <- (8x64x1x1xf32, 256x64x1x1xf32) + conv2d_13 = paddle._C_ops.conv2d( + relu_0, parameter_73, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_73, relu_0 + + # pd_op.reshape: (1x256x1x1xf32) <- (256xf32, 4xi64) + reshape_1 = paddle._C_ops.reshape(parameter_72, full_int_array_1) + del parameter_72 + + # pd_op.add: (8x256x1x1xf32) <- (8x256x1x1xf32, 1x256x1x1xf32) + add_1 = paddle._C_ops.add(conv2d_13, reshape_1) + del conv2d_13, reshape_1 + + # pd_op.hardsigmoid: (8x256x1x1xf32) <- (8x256x1x1xf32) + hardsigmoid_0 = paddle._C_ops.hardsigmoid( + add_1, float("0.166667"), float("0.5") + ) + del add_1 + + # pd_op.multiply: (8x256x3x160xf32) <- (8x256x3x160xf32, 8x256x1x1xf32) + multiply_0 = paddle._C_ops.multiply(hardswish_23, hardsigmoid_0) + del hardsigmoid_0, hardswish_23 + + # pd_op.conv2d: (8x512x3x160xf32) <- (8x256x3x160xf32, 512x256x1x1xf32) + conv2d_14 = paddle._C_ops.conv2d( + multiply_0, parameter_71, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del multiply_0, parameter_71 + + # pd_op.batch_norm_: (8x512x3x160xf32, 512xf32, 512xf32, 512xf32, 512xf32, -1xui8) <- (8x512x3x160xf32, 512xf32, 512xf32, 512xf32, 512xf32) + ( + batch_norm__144, + batch_norm__145, + batch_norm__146, + batch_norm__147, + batch_norm__148, + batch_norm__149, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_14, + parameter_70, + parameter_69, + parameter_68, + parameter_67, + True, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_14, parameter_67, parameter_68, parameter_69, parameter_70 + + # pd_op.hardswish: (8x512x3x160xf32) <- (8x512x3x160xf32) + hardswish_24 = paddle._C_ops.hardswish(batch_norm__144) + del batch_norm__144 + + # pd_op.depthwise_conv2d: (8x512x3x80xf32) <- (8x512x3x160xf32, 512x1x5x5xf32) + depthwise_conv2d_12 = paddle._C_ops.depthwise_conv2d( + hardswish_24, parameter_66, [1, 2], [2, 2], "EXPLICIT", 512, [1, 1], "NCHW" + ) + del hardswish_24, parameter_66 + + # pd_op.batch_norm_: (8x512x3x80xf32, 512xf32, 512xf32, 512xf32, 512xf32, -1xui8) <- (8x512x3x80xf32, 512xf32, 512xf32, 512xf32, 512xf32) + ( + batch_norm__150, + batch_norm__151, + batch_norm__152, + batch_norm__153, + batch_norm__154, + batch_norm__155, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + depthwise_conv2d_12, + parameter_65, + parameter_64, + parameter_63, + parameter_62, + True, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del depthwise_conv2d_12, parameter_62, parameter_63, parameter_64, parameter_65 + + # pd_op.hardswish: (8x512x3x80xf32) <- (8x512x3x80xf32) + hardswish_25 = paddle._C_ops.hardswish(batch_norm__150) + del batch_norm__150 + + # pd_op.pool2d: (8x512x1x1xf32) <- (8x512x3x80xf32, 2xi64) + pool2d_1 = paddle._C_ops.pool2d( + hardswish_25, + full_int_array_0, + [1, 1], + [0, 0], + False, + True, + "NCHW", + "avg", + False, + True, + "EXPLICIT", + ) + del full_int_array_0 + + # pd_op.conv2d: (8x128x1x1xf32) <- (8x512x1x1xf32, 128x512x1x1xf32) + conv2d_15 = paddle._C_ops.conv2d( + pool2d_1, parameter_61, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_61, pool2d_1 + + # pd_op.reshape: (1x128x1x1xf32) <- (128xf32, 4xi64) + reshape_2 = paddle._C_ops.reshape(parameter_60, full_int_array_1) + del parameter_60 + + # pd_op.add: (8x128x1x1xf32) <- (8x128x1x1xf32, 1x128x1x1xf32) + add_2 = paddle._C_ops.add(conv2d_15, reshape_2) + del conv2d_15, reshape_2 + + # pd_op.relu: (8x128x1x1xf32) <- (8x128x1x1xf32) + relu_1 = paddle._C_ops.relu(add_2) + del add_2 + + # pd_op.conv2d: (8x512x1x1xf32) <- (8x128x1x1xf32, 512x128x1x1xf32) + conv2d_16 = paddle._C_ops.conv2d( + relu_1, parameter_59, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_59, relu_1 + + # pd_op.reshape: (1x512x1x1xf32) <- (512xf32, 4xi64) + reshape_3 = paddle._C_ops.reshape(parameter_58, full_int_array_1) + del full_int_array_1, parameter_58 + + # pd_op.add: (8x512x1x1xf32) <- (8x512x1x1xf32, 1x512x1x1xf32) + add_3 = paddle._C_ops.add(conv2d_16, reshape_3) + del conv2d_16, reshape_3 + + # pd_op.hardsigmoid: (8x512x1x1xf32) <- (8x512x1x1xf32) + hardsigmoid_1 = paddle._C_ops.hardsigmoid( + add_3, float("0.166667"), float("0.5") + ) + del add_3 + + # pd_op.multiply: (8x512x3x80xf32) <- (8x512x3x80xf32, 8x512x1x1xf32) + multiply_1 = paddle._C_ops.multiply(hardswish_25, hardsigmoid_1) + del hardsigmoid_1, hardswish_25 + + # pd_op.conv2d: (8x512x3x80xf32) <- (8x512x3x80xf32, 512x512x1x1xf32) + conv2d_17 = paddle._C_ops.conv2d( + multiply_1, parameter_57, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del multiply_1, parameter_57 + + # pd_op.batch_norm_: (8x512x3x80xf32, 512xf32, 512xf32, 512xf32, 512xf32, -1xui8) <- (8x512x3x80xf32, 512xf32, 512xf32, 512xf32, 512xf32) + ( + batch_norm__156, + batch_norm__157, + batch_norm__158, + batch_norm__159, + batch_norm__160, + batch_norm__161, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_17, + parameter_56, + parameter_55, + parameter_54, + parameter_53, + True, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_17, parameter_53, parameter_54, parameter_55, parameter_56 + + # pd_op.hardswish: (8x512x3x80xf32) <- (8x512x3x80xf32) + hardswish_26 = paddle._C_ops.hardswish(batch_norm__156) + del batch_norm__156 + + # pd_op.full_int_array: (2xi64) <- () + full_int_array_2 = [2, 2] + + # pd_op.pool2d: (8x512x1x40xf32) <- (8x512x3x80xf32, 2xi64) + pool2d_2 = paddle._C_ops.pool2d( + hardswish_26, + full_int_array_2, + [2, 2], + [0, 0], + False, + True, + "NCHW", + "avg", + False, + False, + "EXPLICIT", + ) + del full_int_array_2, hardswish_26 + + # pd_op.assign: (8x512x1x40xf32) <- (8x512x1x40xf32) + assign_0 = pool2d_2 + del pool2d_2 + + # pd_op.conv2d: (8x64x1x40xf32) <- (8x512x1x40xf32, 64x512x3x3xf32) + conv2d_18 = paddle._C_ops.conv2d( + assign_0, parameter_52, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_52 + + # pd_op.batch_norm_: (8x64x1x40xf32, 64xf32, 64xf32, 64xf32, 64xf32, -1xui8) <- (8x64x1x40xf32, 64xf32, 64xf32, 64xf32, 64xf32) + ( + batch_norm__162, + batch_norm__163, + batch_norm__164, + batch_norm__165, + batch_norm__166, + batch_norm__167, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_18, + parameter_51, + parameter_50, + parameter_49, + parameter_48, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_18, parameter_48, parameter_49, parameter_50, parameter_51 + + # pd_op.swish: (8x64x1x40xf32) <- (8x64x1x40xf32) + swish_0 = paddle._C_ops.swish(batch_norm__162) + del batch_norm__162 + + # pd_op.conv2d: (8x120x1x40xf32) <- (8x64x1x40xf32, 120x64x1x1xf32) + conv2d_19 = paddle._C_ops.conv2d( + swish_0, parameter_47, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_47, swish_0 + + # pd_op.batch_norm_: (8x120x1x40xf32, 120xf32, 120xf32, 120xf32, 120xf32, -1xui8) <- (8x120x1x40xf32, 120xf32, 120xf32, 120xf32, 120xf32) + ( + batch_norm__168, + batch_norm__169, + batch_norm__170, + batch_norm__171, + batch_norm__172, + batch_norm__173, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_19, + parameter_46, + parameter_45, + parameter_44, + parameter_43, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_19, parameter_43, parameter_44, parameter_45, parameter_46 + + # pd_op.swish: (8x120x1x40xf32) <- (8x120x1x40xf32) + swish_1 = paddle._C_ops.swish(batch_norm__168) + del batch_norm__168 + + # pd_op.flatten: (8x120x40xf32) <- (8x120x1x40xf32) + flatten_0 = paddle._C_ops.flatten(swish_1, 2, 3) + del swish_1 + + # pd_op.transpose: (8x40x120xf32) <- (8x120x40xf32) + transpose_0 = paddle._C_ops.transpose(flatten_0, [0, 2, 1]) + del flatten_0 + + # pd_op.layer_norm: (8x40x120xf32, 8x40xf32, 8x40xf32) <- (8x40x120xf32, 120xf32, 120xf32) + layer_norm_0, layer_norm_1, layer_norm_2 = (lambda x, f: f(x))( + paddle._C_ops.layer_norm( + transpose_0, parameter_42, parameter_41, float("1e-05"), 2 + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None, None), + ) + del parameter_41, parameter_42 + + # pd_op.matmul: (8x40x360xf32) <- (8x40x120xf32, 120x360xf32) + matmul_0 = paddle._C_ops.matmul(layer_norm_0, parameter_40, False, False) + del layer_norm_0, parameter_40 + + # pd_op.add: (8x40x360xf32) <- (8x40x360xf32, 360xf32) + add_4 = paddle._C_ops.add(matmul_0, parameter_39) + del matmul_0, parameter_39 + + # pd_op.full_int_array: (5xi64) <- () + full_int_array_3 = [0, -1, 3, 8, 15] + + # pd_op.reshape: (8x40x3x8x15xf32) <- (8x40x360xf32, 5xi64) + reshape_4 = paddle._C_ops.reshape(add_4, full_int_array_3) + del add_4 + + # pd_op.transpose: (3x8x8x40x15xf32) <- (8x40x3x8x15xf32) + transpose_1 = paddle._C_ops.transpose(reshape_4, [2, 0, 3, 1, 4]) + del reshape_4 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_4 = [0] + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_5 = [1] + + # pd_op.slice: (8x8x40x15xf32) <- (3x8x8x40x15xf32, 1xi64, 1xi64) + slice_0 = paddle._C_ops.slice( + transpose_1, [0], full_int_array_4, full_int_array_5, [1], [0] + ) + + # pd_op.full: (1xf32) <- () + full_0 = paddle._C_ops.full( + [1], float("0.258199"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.scale: (8x8x40x15xf32) <- (8x8x40x15xf32, 1xf32) + scale_0 = paddle._C_ops.scale(slice_0, full_0, float("0"), True) + del slice_0 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_6 = [2] + + # pd_op.slice: (8x8x40x15xf32) <- (3x8x8x40x15xf32, 1xi64, 1xi64) + slice_1 = paddle._C_ops.slice( + transpose_1, [0], full_int_array_5, full_int_array_6, [1], [0] + ) + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_7 = [3] + + # pd_op.slice: (8x8x40x15xf32) <- (3x8x8x40x15xf32, 1xi64, 1xi64) + slice_2 = paddle._C_ops.slice( + transpose_1, [0], full_int_array_6, full_int_array_7, [1], [0] + ) + del transpose_1 + + # pd_op.transpose: (8x8x15x40xf32) <- (8x8x40x15xf32) + transpose_2 = paddle._C_ops.transpose(slice_1, [0, 1, 3, 2]) + del slice_1 + + # pd_op.matmul: (8x8x40x40xf32) <- (8x8x40x15xf32, 8x8x15x40xf32) + matmul_1 = paddle._C_ops.matmul(scale_0, transpose_2, False, False) + del scale_0, transpose_2 + + # pd_op.softmax: (8x8x40x40xf32) <- (8x8x40x40xf32) + softmax_1 = paddle._C_ops.softmax(matmul_1, -1) + del matmul_1 + + # pd_op.full: (1xf32) <- () + full_1 = paddle._C_ops.full( + [1], float("0.1"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.dropout: (8x8x40x40xf32, 8x8x40x40xui8) <- (8x8x40x40xf32, None, 1xf32) + dropout_0, dropout_1 = (lambda x, f: f(x))( + paddle._C_ops.dropout( + softmax_1, None, full_1, True, "upscale_in_train", 0, False + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None), + ) + del softmax_1 + + # pd_op.matmul: (8x8x40x15xf32) <- (8x8x40x40xf32, 8x8x40x15xf32) + matmul_2 = paddle._C_ops.matmul(dropout_0, slice_2, False, False) + del dropout_0, slice_2 + + # pd_op.transpose: (8x40x8x15xf32) <- (8x8x40x15xf32) + transpose_3 = paddle._C_ops.transpose(matmul_2, [0, 2, 1, 3]) + del matmul_2 + + # pd_op.full_int_array: (3xi64) <- () + full_int_array_8 = [0, -1, 120] + + # pd_op.reshape: (8x40x120xf32) <- (8x40x8x15xf32, 3xi64) + reshape_5 = paddle._C_ops.reshape(transpose_3, full_int_array_8) + del transpose_3 + + # pd_op.matmul: (8x40x120xf32) <- (8x40x120xf32, 120x120xf32) + matmul_3 = paddle._C_ops.matmul(reshape_5, parameter_38, False, False) + del parameter_38, reshape_5 + + # pd_op.add: (8x40x120xf32) <- (8x40x120xf32, 120xf32) + add_5 = paddle._C_ops.add(matmul_3, parameter_37) + del matmul_3, parameter_37 + + # pd_op.dropout: (8x40x120xf32, 8x40x120xui8) <- (8x40x120xf32, None, 1xf32) + dropout_2, dropout_3 = (lambda x, f: f(x))( + paddle._C_ops.dropout( + add_5, None, full_1, True, "upscale_in_train", 0, False + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None), + ) + del add_5 + + # pd_op.add: (8x40x120xf32) <- (8x40x120xf32, 8x40x120xf32) + add_6 = paddle._C_ops.add(transpose_0, dropout_2) + del dropout_2, transpose_0 + + # pd_op.layer_norm: (8x40x120xf32, 8x40xf32, 8x40xf32) <- (8x40x120xf32, 120xf32, 120xf32) + layer_norm_3, layer_norm_4, layer_norm_5 = (lambda x, f: f(x))( + paddle._C_ops.layer_norm( + add_6, parameter_36, parameter_35, float("1e-05"), 2 + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None, None), + ) + del parameter_35, parameter_36 + + # pd_op.matmul: (8x40x240xf32) <- (8x40x120xf32, 120x240xf32) + matmul_4 = paddle._C_ops.matmul(layer_norm_3, parameter_34, False, False) + del layer_norm_3, parameter_34 + + # pd_op.add: (8x40x240xf32) <- (8x40x240xf32, 240xf32) + add_7 = paddle._C_ops.add(matmul_4, parameter_33) + del matmul_4, parameter_33 + + # pd_op.swish: (8x40x240xf32) <- (8x40x240xf32) + swish_2 = paddle._C_ops.swish(add_7) + del add_7 + + # pd_op.dropout: (8x40x240xf32, 8x40x240xui8) <- (8x40x240xf32, None, 1xf32) + dropout_4, dropout_5 = (lambda x, f: f(x))( + paddle._C_ops.dropout( + swish_2, None, full_1, True, "upscale_in_train", 0, False + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None), + ) + del swish_2 + + # pd_op.matmul: (8x40x120xf32) <- (8x40x240xf32, 240x120xf32) + matmul_5 = paddle._C_ops.matmul(dropout_4, parameter_32, False, False) + del dropout_4, parameter_32 + + # pd_op.add: (8x40x120xf32) <- (8x40x120xf32, 120xf32) + add_8 = paddle._C_ops.add(matmul_5, parameter_31) + del matmul_5, parameter_31 + + # pd_op.dropout: (8x40x120xf32, 8x40x120xui8) <- (8x40x120xf32, None, 1xf32) + dropout_6, dropout_7 = (lambda x, f: f(x))( + paddle._C_ops.dropout( + add_8, None, full_1, True, "upscale_in_train", 0, False + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None), + ) + del add_8 + + # pd_op.add: (8x40x120xf32) <- (8x40x120xf32, 8x40x120xf32) + add_9 = paddle._C_ops.add(add_6, dropout_6) + del add_6, dropout_6 + + # pd_op.layer_norm: (8x40x120xf32, 8x40xf32, 8x40xf32) <- (8x40x120xf32, 120xf32, 120xf32) + layer_norm_6, layer_norm_7, layer_norm_8 = (lambda x, f: f(x))( + paddle._C_ops.layer_norm( + add_9, parameter_30, parameter_29, float("1e-05"), 2 + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None, None), + ) + del parameter_29, parameter_30 + + # pd_op.matmul: (8x40x360xf32) <- (8x40x120xf32, 120x360xf32) + matmul_6 = paddle._C_ops.matmul(layer_norm_6, parameter_28, False, False) + del layer_norm_6, parameter_28 + + # pd_op.add: (8x40x360xf32) <- (8x40x360xf32, 360xf32) + add_10 = paddle._C_ops.add(matmul_6, parameter_27) + del matmul_6, parameter_27 + + # pd_op.reshape: (8x40x3x8x15xf32) <- (8x40x360xf32, 5xi64) + reshape_6 = paddle._C_ops.reshape(add_10, full_int_array_3) + del add_10, full_int_array_3 + + # pd_op.transpose: (3x8x8x40x15xf32) <- (8x40x3x8x15xf32) + transpose_4 = paddle._C_ops.transpose(reshape_6, [2, 0, 3, 1, 4]) + del reshape_6 + + # pd_op.slice: (8x8x40x15xf32) <- (3x8x8x40x15xf32, 1xi64, 1xi64) + slice_3 = paddle._C_ops.slice( + transpose_4, [0], full_int_array_4, full_int_array_5, [1], [0] + ) + del full_int_array_4 + + # pd_op.scale: (8x8x40x15xf32) <- (8x8x40x15xf32, 1xf32) + scale_1 = paddle._C_ops.scale(slice_3, full_0, float("0"), True) + del full_0, slice_3 + + # pd_op.slice: (8x8x40x15xf32) <- (3x8x8x40x15xf32, 1xi64, 1xi64) + slice_4 = paddle._C_ops.slice( + transpose_4, [0], full_int_array_5, full_int_array_6, [1], [0] + ) + del full_int_array_5 + + # pd_op.slice: (8x8x40x15xf32) <- (3x8x8x40x15xf32, 1xi64, 1xi64) + slice_5 = paddle._C_ops.slice( + transpose_4, [0], full_int_array_6, full_int_array_7, [1], [0] + ) + del full_int_array_7, transpose_4 + + # pd_op.transpose: (8x8x15x40xf32) <- (8x8x40x15xf32) + transpose_5 = paddle._C_ops.transpose(slice_4, [0, 1, 3, 2]) + del slice_4 + + # pd_op.matmul: (8x8x40x40xf32) <- (8x8x40x15xf32, 8x8x15x40xf32) + matmul_7 = paddle._C_ops.matmul(scale_1, transpose_5, False, False) + del scale_1, transpose_5 + + # pd_op.softmax: (8x8x40x40xf32) <- (8x8x40x40xf32) + softmax_2 = paddle._C_ops.softmax(matmul_7, -1) + del matmul_7 + + # pd_op.dropout: (8x8x40x40xf32, 8x8x40x40xui8) <- (8x8x40x40xf32, None, 1xf32) + dropout_8, dropout_9 = (lambda x, f: f(x))( + paddle._C_ops.dropout( + softmax_2, None, full_1, True, "upscale_in_train", 0, False + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None), + ) + del softmax_2 + + # pd_op.matmul: (8x8x40x15xf32) <- (8x8x40x40xf32, 8x8x40x15xf32) + matmul_8 = paddle._C_ops.matmul(dropout_8, slice_5, False, False) + del dropout_8, slice_5 + + # pd_op.transpose: (8x40x8x15xf32) <- (8x8x40x15xf32) + transpose_6 = paddle._C_ops.transpose(matmul_8, [0, 2, 1, 3]) + del matmul_8 + + # pd_op.reshape: (8x40x120xf32) <- (8x40x8x15xf32, 3xi64) + reshape_7 = paddle._C_ops.reshape(transpose_6, full_int_array_8) + del full_int_array_8, transpose_6 + + # pd_op.matmul: (8x40x120xf32) <- (8x40x120xf32, 120x120xf32) + matmul_9 = paddle._C_ops.matmul(reshape_7, parameter_26, False, False) + del parameter_26, reshape_7 + + # pd_op.add: (8x40x120xf32) <- (8x40x120xf32, 120xf32) + add_11 = paddle._C_ops.add(matmul_9, parameter_25) + del matmul_9, parameter_25 + + # pd_op.dropout: (8x40x120xf32, 8x40x120xui8) <- (8x40x120xf32, None, 1xf32) + dropout_10, dropout_11 = (lambda x, f: f(x))( + paddle._C_ops.dropout( + add_11, None, full_1, True, "upscale_in_train", 0, False + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None), + ) + del add_11 + + # pd_op.add: (8x40x120xf32) <- (8x40x120xf32, 8x40x120xf32) + add_12 = paddle._C_ops.add(add_9, dropout_10) + del add_9, dropout_10 + + # pd_op.layer_norm: (8x40x120xf32, 8x40xf32, 8x40xf32) <- (8x40x120xf32, 120xf32, 120xf32) + layer_norm_9, layer_norm_10, layer_norm_11 = (lambda x, f: f(x))( + paddle._C_ops.layer_norm( + add_12, parameter_24, parameter_23, float("1e-05"), 2 + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None, None), + ) + del parameter_23, parameter_24 + + # pd_op.matmul: (8x40x240xf32) <- (8x40x120xf32, 120x240xf32) + matmul_10 = paddle._C_ops.matmul(layer_norm_9, parameter_22, False, False) + del layer_norm_9, parameter_22 + + # pd_op.add: (8x40x240xf32) <- (8x40x240xf32, 240xf32) + add_13 = paddle._C_ops.add(matmul_10, parameter_21) + del matmul_10, parameter_21 + + # pd_op.swish: (8x40x240xf32) <- (8x40x240xf32) + swish_3 = paddle._C_ops.swish(add_13) + del add_13 + + # pd_op.dropout: (8x40x240xf32, 8x40x240xui8) <- (8x40x240xf32, None, 1xf32) + dropout_12, dropout_13 = (lambda x, f: f(x))( + paddle._C_ops.dropout( + swish_3, None, full_1, True, "upscale_in_train", 0, False + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None), + ) + del swish_3 + + # pd_op.matmul: (8x40x120xf32) <- (8x40x240xf32, 240x120xf32) + matmul_11 = paddle._C_ops.matmul(dropout_12, parameter_20, False, False) + del dropout_12, parameter_20 + + # pd_op.add: (8x40x120xf32) <- (8x40x120xf32, 120xf32) + add_14 = paddle._C_ops.add(matmul_11, parameter_19) + del matmul_11, parameter_19 + + # pd_op.dropout: (8x40x120xf32, 8x40x120xui8) <- (8x40x120xf32, None, 1xf32) + dropout_14, dropout_15 = (lambda x, f: f(x))( + paddle._C_ops.dropout( + add_14, None, full_1, True, "upscale_in_train", 0, False + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None), + ) + del add_14, full_1 + + # pd_op.add: (8x40x120xf32) <- (8x40x120xf32, 8x40x120xf32) + add_15 = paddle._C_ops.add(add_12, dropout_14) + del add_12, dropout_14 + + # pd_op.layer_norm: (8x40x120xf32, 8x40xf32, 8x40xf32) <- (8x40x120xf32, 120xf32, 120xf32) + layer_norm_12, layer_norm_13, layer_norm_14 = (lambda x, f: f(x))( + paddle._C_ops.layer_norm( + add_15, parameter_18, parameter_17, float("1e-06"), 2 + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None, None), + ) + del add_15, parameter_17, parameter_18 + + # pd_op.full_int_array: (4xi64) <- () + full_int_array_9 = [0, 1, 40, 120] + + # pd_op.reshape: (8x1x40x120xf32) <- (8x40x120xf32, 4xi64) + reshape_8 = paddle._C_ops.reshape(layer_norm_12, full_int_array_9) + del full_int_array_9, layer_norm_12 + + # pd_op.transpose: (8x120x1x40xf32) <- (8x1x40x120xf32) + transpose_7 = paddle._C_ops.transpose(reshape_8, [0, 3, 1, 2]) + del reshape_8 + + # pd_op.conv2d: (8x512x1x40xf32) <- (8x120x1x40xf32, 512x120x1x1xf32) + conv2d_20 = paddle._C_ops.conv2d( + transpose_7, parameter_16, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_16, transpose_7 + + # pd_op.batch_norm_: (8x512x1x40xf32, 512xf32, 512xf32, 512xf32, 512xf32, -1xui8) <- (8x512x1x40xf32, 512xf32, 512xf32, 512xf32, 512xf32) + ( + batch_norm__174, + batch_norm__175, + batch_norm__176, + batch_norm__177, + batch_norm__178, + batch_norm__179, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_20, + parameter_15, + parameter_14, + parameter_13, + parameter_12, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_20, parameter_12, parameter_13, parameter_14, parameter_15 + + # pd_op.swish: (8x512x1x40xf32) <- (8x512x1x40xf32) + swish_4 = paddle._C_ops.swish(batch_norm__174) + del batch_norm__174 + + # pd_op.full: (1xi32) <- () + full_2 = paddle._C_ops.full( + [1], float("1"), paddle.int32, paddle.core.CPUPlace() + ) + + # builtin.combine: ([8x512x1x40xf32, 8x512x1x40xf32]) <- (8x512x1x40xf32, 8x512x1x40xf32) + combine_0 = [assign_0, swish_4] + del assign_0, swish_4 + + # pd_op.concat: (8x1024x1x40xf32) <- ([8x512x1x40xf32, 8x512x1x40xf32], 1xi32) + concat_0 = paddle._C_ops.concat(combine_0, full_2) + del combine_0, full_2 + + # pd_op.conv2d: (8x64x1x40xf32) <- (8x1024x1x40xf32, 64x1024x3x3xf32) + conv2d_21 = paddle._C_ops.conv2d( + concat_0, parameter_11, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del concat_0, parameter_11 + + # pd_op.batch_norm_: (8x64x1x40xf32, 64xf32, 64xf32, 64xf32, 64xf32, -1xui8) <- (8x64x1x40xf32, 64xf32, 64xf32, 64xf32, 64xf32) + ( + batch_norm__180, + batch_norm__181, + batch_norm__182, + batch_norm__183, + batch_norm__184, + batch_norm__185, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_21, + parameter_10, + parameter_9, + parameter_8, + parameter_7, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_21, parameter_10, parameter_7, parameter_8, parameter_9 + + # pd_op.swish: (8x64x1x40xf32) <- (8x64x1x40xf32) + swish_5 = paddle._C_ops.swish(batch_norm__180) + del batch_norm__180 + + # pd_op.conv2d: (8x64x1x40xf32) <- (8x64x1x40xf32, 64x64x1x1xf32) + conv2d_22 = paddle._C_ops.conv2d( + swish_5, parameter_6, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_6, swish_5 + + # pd_op.batch_norm_: (8x64x1x40xf32, 64xf32, 64xf32, 64xf32, 64xf32, -1xui8) <- (8x64x1x40xf32, 64xf32, 64xf32, 64xf32, 64xf32) + ( + batch_norm__186, + batch_norm__187, + batch_norm__188, + batch_norm__189, + batch_norm__190, + batch_norm__191, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_22, + parameter_5, + parameter_4, + parameter_3, + parameter_2, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_22, parameter_2, parameter_3, parameter_4, parameter_5 + + # pd_op.swish: (8x64x1x40xf32) <- (8x64x1x40xf32) + swish_6 = paddle._C_ops.swish(batch_norm__186) + del batch_norm__186 + + # pd_op.squeeze: (8x64x40xf32) <- (8x64x1x40xf32, 1xi64) + squeeze_0 = paddle._C_ops.squeeze(swish_6, full_int_array_6) + del full_int_array_6, swish_6 + + # pd_op.transpose: (8x40x64xf32) <- (8x64x40xf32) + transpose_8 = paddle._C_ops.transpose(squeeze_0, [0, 2, 1]) + del squeeze_0 + + # pd_op.matmul: (8x40x6625xf32) <- (8x40x64xf32, 64x6625xf32) + matmul_12 = paddle._C_ops.matmul(transpose_8, parameter_1, False, False) + del parameter_1, transpose_8 + + # pd_op.add: (8x40x6625xf32) <- (8x40x6625xf32, 6625xf32) + add_16 = paddle._C_ops.add(matmul_12, parameter_0) + del matmul_12, parameter_0 + + # pd_op.softmax: (8x40x6625xf32) <- (8x40x6625xf32) + softmax_0 = paddle._C_ops.softmax(add_16, 2) + del add_16 + + return softmax_0 diff --git a/paddle_samples/PaddleX/PP-OCRv3_mobile_rec/subgraph_5/weight_meta.py b/paddle_samples/PaddleX/PP-OCRv3_mobile_rec/subgraph_5/weight_meta.py new file mode 100644 index 000000000..cc53150f2 --- /dev/null +++ b/paddle_samples/PaddleX/PP-OCRv3_mobile_rec/subgraph_5/weight_meta.py @@ -0,0 +1,2064 @@ +class Program_weight_tensor_parameter_0: + name = "parameter_0" + shape = [6625] + dtype = "float32" + min_val = float("-0.379237") + max_val = float("1.41695") + mean = float("-0.0230814") + std = float("0.040638") + data = None + + +class Program_weight_tensor_parameter_1: + name = "parameter_1" + shape = [64, 6625] + dtype = "float32" + min_val = float("-2.61517") + max_val = float("2.32152") + mean = float("-0.0789") + std = float("0.144839") + data = None + + +class Program_weight_tensor_parameter_2: + name = "parameter_2" + shape = [64] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_3: + name = "parameter_3" + shape = [64] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_4: + name = "parameter_4" + shape = [64] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_5: + name = "parameter_5" + shape = [64] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_6: + name = "parameter_6" + shape = [64, 64, 1, 1] + dtype = "float32" + min_val = float("-1.09516") + max_val = float("1.56997") + mean = float("0.00672384") + std = float("0.257359") + data = None + + +class Program_weight_tensor_parameter_7: + name = "parameter_7" + shape = [64] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_8: + name = "parameter_8" + shape = [64] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_9: + name = "parameter_9" + shape = [64] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_10: + name = "parameter_10" + shape = [64] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_11: + name = "parameter_11" + shape = [64, 1024, 3, 3] + dtype = "float32" + min_val = float("-1.16106") + max_val = float("1.13951") + mean = float("-0.00198152") + std = float("0.0470016") + data = None + + +class Program_weight_tensor_parameter_12: + name = "parameter_12" + shape = [512] + dtype = "float32" + min_val = float("-1.37464") + max_val = float("1.49706") + mean = float("-0.370973") + std = float("0.241875") + data = None + + +class Program_weight_tensor_parameter_13: + name = "parameter_13" + shape = [512] + dtype = "float32" + min_val = float("-0.0107127") + max_val = float("4.69329") + mean = float("0.890954") + std = float("0.38084") + data = None + + +class Program_weight_tensor_parameter_14: + name = "parameter_14" + shape = [512] + dtype = "float32" + min_val = float("0.000220463") + max_val = float("0.345001") + mean = float("0.0825053") + std = float("0.0431813") + data = None + + +class Program_weight_tensor_parameter_15: + name = "parameter_15" + shape = [512] + dtype = "float32" + min_val = float("-0.811102") + max_val = float("1.19008") + mean = float("0.0398876") + std = float("0.1579") + data = None + + +class Program_weight_tensor_parameter_16: + name = "parameter_16" + shape = [512, 120, 1, 1] + dtype = "float32" + min_val = float("-0.718116") + max_val = float("0.894727") + mean = float("-0.00233383") + std = float("0.0873386") + data = None + + +class Program_weight_tensor_parameter_17: + name = "parameter_17" + shape = [120] + dtype = "float32" + min_val = float("-0.00113343") + max_val = float("0.000961629") + mean = float("-2.89278e-05") + std = float("0.000386525") + data = None + + +class Program_weight_tensor_parameter_18: + name = "parameter_18" + shape = [120] + dtype = "float32" + min_val = float("0.0873664") + max_val = float("0.724352") + mean = float("0.379606") + std = float("0.0967956") + data = None + + +class Program_weight_tensor_parameter_19: + name = "parameter_19" + shape = [120] + dtype = "float32" + min_val = float("-0.470022") + max_val = float("0.748517") + mean = float("0.00565079") + std = float("0.130134") + data = None + + +class Program_weight_tensor_parameter_20: + name = "parameter_20" + shape = [240, 120] + dtype = "float32" + min_val = float("-0.84189") + max_val = float("0.792543") + mean = float("-0.00079115") + std = float("0.0670632") + data = None + + +class Program_weight_tensor_parameter_21: + name = "parameter_21" + shape = [240] + dtype = "float32" + min_val = float("-0.275399") + max_val = float("0.0526904") + mean = float("-0.0684576") + std = float("0.0479141") + data = None + + +class Program_weight_tensor_parameter_22: + name = "parameter_22" + shape = [120, 240] + dtype = "float32" + min_val = float("-0.857653") + max_val = float("0.807613") + mean = float("-0.00987257") + std = float("0.107461") + data = None + + +class Program_weight_tensor_parameter_23: + name = "parameter_23" + shape = [120] + dtype = "float32" + min_val = float("-2.50945") + max_val = float("1.87882") + mean = float("0.162663") + std = float("0.400249") + data = None + + +class Program_weight_tensor_parameter_24: + name = "parameter_24" + shape = [120] + dtype = "float32" + min_val = float("0.52825") + max_val = float("1.80602") + mean = float("1.25228") + std = float("0.154194") + data = None + + +class Program_weight_tensor_parameter_25: + name = "parameter_25" + shape = [120] + dtype = "float32" + min_val = float("-0.258214") + max_val = float("0.234695") + mean = float("0.00728996") + std = float("0.085847") + data = None + + +class Program_weight_tensor_parameter_26: + name = "parameter_26" + shape = [120, 120] + dtype = "float32" + min_val = float("-0.474166") + max_val = float("0.383756") + mean = float("-0.000269616") + std = float("0.0854085") + data = None + + +class Program_weight_tensor_parameter_27: + name = "parameter_27" + shape = [360] + dtype = "float32" + min_val = float("-0.803533") + max_val = float("0.874276") + mean = float("0.00735413") + std = float("0.142553") + data = None + + +class Program_weight_tensor_parameter_28: + name = "parameter_28" + shape = [120, 360] + dtype = "float32" + min_val = float("-0.581426") + max_val = float("0.644047") + mean = float("0.000391687") + std = float("0.0957814") + data = None + + +class Program_weight_tensor_parameter_29: + name = "parameter_29" + shape = [120] + dtype = "float32" + min_val = float("-1.08672") + max_val = float("0.788303") + mean = float("0.0833729") + std = float("0.241456") + data = None + + +class Program_weight_tensor_parameter_30: + name = "parameter_30" + shape = [120] + dtype = "float32" + min_val = float("0.515904") + max_val = float("1.10272") + mean = float("0.90528") + std = float("0.0924082") + data = None + + +class Program_weight_tensor_parameter_31: + name = "parameter_31" + shape = [120] + dtype = "float32" + min_val = float("-0.225951") + max_val = float("0.217649") + mean = float("-0.000199204") + std = float("0.0834969") + data = None + + +class Program_weight_tensor_parameter_32: + name = "parameter_32" + shape = [240, 120] + dtype = "float32" + min_val = float("-0.759846") + max_val = float("0.959335") + mean = float("-0.000140495") + std = float("0.0606466") + data = None + + +class Program_weight_tensor_parameter_33: + name = "parameter_33" + shape = [240] + dtype = "float32" + min_val = float("-0.267075") + max_val = float("0.0761767") + mean = float("-0.0710567") + std = float("0.0445584") + data = None + + +class Program_weight_tensor_parameter_34: + name = "parameter_34" + shape = [120, 240] + dtype = "float32" + min_val = float("-0.626377") + max_val = float("0.777438") + mean = float("-0.00135008") + std = float("0.113605") + data = None + + +class Program_weight_tensor_parameter_35: + name = "parameter_35" + shape = [120] + dtype = "float32" + min_val = float("-3.14406") + max_val = float("1.23497") + mean = float("0.0377958") + std = float("0.467852") + data = None + + +class Program_weight_tensor_parameter_36: + name = "parameter_36" + shape = [120] + dtype = "float32" + min_val = float("0.895688") + max_val = float("1.55573") + mean = float("1.21581") + std = float("0.0976649") + data = None + + +class Program_weight_tensor_parameter_37: + name = "parameter_37" + shape = [120] + dtype = "float32" + min_val = float("-0.16761") + max_val = float("0.149008") + mean = float("0.00504008") + std = float("0.0479561") + data = None + + +class Program_weight_tensor_parameter_38: + name = "parameter_38" + shape = [120, 120] + dtype = "float32" + min_val = float("-0.47844") + max_val = float("0.327888") + mean = float("-2.93668e-05") + std = float("0.0733842") + data = None + + +class Program_weight_tensor_parameter_39: + name = "parameter_39" + shape = [360] + dtype = "float32" + min_val = float("-1.05283") + max_val = float("0.973596") + mean = float("-0.00142021") + std = float("0.174564") + data = None + + +class Program_weight_tensor_parameter_40: + name = "parameter_40" + shape = [120, 360] + dtype = "float32" + min_val = float("-0.802997") + max_val = float("0.911171") + mean = float("-7.62152e-05") + std = float("0.0964538") + data = None + + +class Program_weight_tensor_parameter_41: + name = "parameter_41" + shape = [120] + dtype = "float32" + min_val = float("-1.36655") + max_val = float("0.84029") + mean = float("0.0631498") + std = float("0.304378") + data = None + + +class Program_weight_tensor_parameter_42: + name = "parameter_42" + shape = [120] + dtype = "float32" + min_val = float("-0.0132848") + max_val = float("1.33382") + mean = float("0.885975") + std = float("0.225385") + data = None + + +class Program_weight_tensor_parameter_43: + name = "parameter_43" + shape = [120] + dtype = "float32" + min_val = float("-0.896143") + max_val = float("2.06732") + mean = float("0.216446") + std = float("0.419651") + data = None + + +class Program_weight_tensor_parameter_44: + name = "parameter_44" + shape = [120] + dtype = "float32" + min_val = float("0.320767") + max_val = float("2.80154") + mean = float("1.46118") + std = float("0.308907") + data = None + + +class Program_weight_tensor_parameter_45: + name = "parameter_45" + shape = [120] + dtype = "float32" + min_val = float("0.00570793") + max_val = float("0.373694") + mean = float("0.0491769") + std = float("0.0392193") + data = None + + +class Program_weight_tensor_parameter_46: + name = "parameter_46" + shape = [120] + dtype = "float32" + min_val = float("-1.20662") + max_val = float("0.786439") + mean = float("-0.028425") + std = float("0.365683") + data = None + + +class Program_weight_tensor_parameter_47: + name = "parameter_47" + shape = [120, 64, 1, 1] + dtype = "float32" + min_val = float("-1.2714") + max_val = float("1.29797") + mean = float("0.000784841") + std = float("0.199535") + data = None + + +class Program_weight_tensor_parameter_48: + name = "parameter_48" + shape = [64] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_49: + name = "parameter_49" + shape = [64] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_50: + name = "parameter_50" + shape = [64] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_51: + name = "parameter_51" + shape = [64] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_52: + name = "parameter_52" + shape = [64, 512, 3, 3] + dtype = "float32" + min_val = float("-1.40611") + max_val = float("1.10038") + mean = float("-0.00245528") + std = float("0.05029") + data = None + + +class Program_weight_tensor_parameter_53: + name = "parameter_53" + shape = [512] + dtype = "float32" + min_val = float("-6.9423") + max_val = float("5.17735") + mean = float("-2.04504") + std = float("1.47297") + data = None + + +class Program_weight_tensor_parameter_54: + name = "parameter_54" + shape = [512] + dtype = "float32" + min_val = float("0.915076") + max_val = float("10.6534") + mean = float("6.39917") + std = float("2.46428") + data = None + + +class Program_weight_tensor_parameter_55: + name = "parameter_55" + shape = [512] + dtype = "float32" + min_val = float("0.0322723") + max_val = float("2.0221") + mean = float("0.110115") + std = float("0.109804") + data = None + + +class Program_weight_tensor_parameter_56: + name = "parameter_56" + shape = [512] + dtype = "float32" + min_val = float("-0.714717") + max_val = float("0.616614") + mean = float("-0.061605") + std = float("0.100287") + data = None + + +class Program_weight_tensor_parameter_57: + name = "parameter_57" + shape = [512, 512, 1, 1] + dtype = "float32" + min_val = float("-0.927952") + max_val = float("0.912147") + mean = float("-0.00487282") + std = float("0.0375902") + data = None + + +class Program_weight_tensor_parameter_58: + name = "parameter_58" + shape = [512] + dtype = "float32" + min_val = float("-0.131056") + max_val = float("0.0922238") + mean = float("-0.0166377") + std = float("0.0186343") + data = None + + +class Program_weight_tensor_parameter_59: + name = "parameter_59" + shape = [512, 128, 1, 1] + dtype = "float32" + min_val = float("-0.563108") + max_val = float("0.482628") + mean = float("-0.00421348") + std = float("0.0367703") + data = None + + +class Program_weight_tensor_parameter_60: + name = "parameter_60" + shape = [128] + dtype = "float32" + min_val = float("-0.0351852") + max_val = float("0.4196") + mean = float("0.0130297") + std = float("0.0433383") + data = None + + +class Program_weight_tensor_parameter_61: + name = "parameter_61" + shape = [128, 512, 1, 1] + dtype = "float32" + min_val = float("-1.3301") + max_val = float("0.743327") + mean = float("0.000846266") + std = float("0.0344146") + data = None + + +class Program_weight_tensor_parameter_62: + name = "parameter_62" + shape = [512] + dtype = "float32" + min_val = float("-2.99653") + max_val = float("1.90625") + mean = float("0.0115887") + std = float("0.444884") + data = None + + +class Program_weight_tensor_parameter_63: + name = "parameter_63" + shape = [512] + dtype = "float32" + min_val = float("0.33003") + max_val = float("7.14161") + mean = float("1.30241") + std = float("0.811664") + data = None + + +class Program_weight_tensor_parameter_64: + name = "parameter_64" + shape = [512] + dtype = "float32" + min_val = float("1.60311e-05") + max_val = float("0.873089") + mean = float("0.0314933") + std = float("0.0676433") + data = None + + +class Program_weight_tensor_parameter_65: + name = "parameter_65" + shape = [512] + dtype = "float32" + min_val = float("-1.67511") + max_val = float("0.427997") + mean = float("-0.0939804") + std = float("0.142749") + data = None + + +class Program_weight_tensor_parameter_66: + name = "parameter_66" + shape = [512, 1, 5, 5] + dtype = "float32" + min_val = float("-0.265693") + max_val = float("0.34381") + mean = float("0.01971") + std = float("0.0488623") + data = None + + +class Program_weight_tensor_parameter_67: + name = "parameter_67" + shape = [512] + dtype = "float32" + min_val = float("-2.15339") + max_val = float("2.63358") + mean = float("-0.893422") + std = float("0.776881") + data = None + + +class Program_weight_tensor_parameter_68: + name = "parameter_68" + shape = [512] + dtype = "float32" + min_val = float("0.0274539") + max_val = float("2.683") + mean = float("0.991873") + std = float("0.398585") + data = None + + +class Program_weight_tensor_parameter_69: + name = "parameter_69" + shape = [512] + dtype = "float32" + min_val = float("0.00839155") + max_val = float("0.453825") + mean = float("0.0464475") + std = float("0.0287356") + data = None + + +class Program_weight_tensor_parameter_70: + name = "parameter_70" + shape = [512] + dtype = "float32" + min_val = float("-0.953258") + max_val = float("0.850259") + mean = float("-0.182058") + std = float("0.244472") + data = None + + +class Program_weight_tensor_parameter_71: + name = "parameter_71" + shape = [512, 256, 1, 1] + dtype = "float32" + min_val = float("-0.492484") + max_val = float("0.855615") + mean = float("-0.00331369") + std = float("0.0473135") + data = None + + +class Program_weight_tensor_parameter_72: + name = "parameter_72" + shape = [256] + dtype = "float32" + min_val = float("-0.300282") + max_val = float("0.0431338") + mean = float("-0.0277728") + std = float("0.0293606") + data = None + + +class Program_weight_tensor_parameter_73: + name = "parameter_73" + shape = [256, 64, 1, 1] + dtype = "float32" + min_val = float("-0.732785") + max_val = float("0.488966") + mean = float("-0.00470549") + std = float("0.0517995") + data = None + + +class Program_weight_tensor_parameter_74: + name = "parameter_74" + shape = [64] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_75: + name = "parameter_75" + shape = [64, 256, 1, 1] + dtype = "float32" + min_val = float("-1.29047") + max_val = float("0.677073") + mean = float("0.000834517") + std = float("0.0502748") + data = None + + +class Program_weight_tensor_parameter_76: + name = "parameter_76" + shape = [256] + dtype = "float32" + min_val = float("-2.31693") + max_val = float("2.18567") + mean = float("0.624999") + std = float("0.572842") + data = None + + +class Program_weight_tensor_parameter_77: + name = "parameter_77" + shape = [256] + dtype = "float32" + min_val = float("0.203989") + max_val = float("9.10141") + mean = float("1.39641") + std = float("0.855435") + data = None + + +class Program_weight_tensor_parameter_78: + name = "parameter_78" + shape = [256] + dtype = "float32" + min_val = float("4.94662e-05") + max_val = float("1.67876") + mean = float("0.0754494") + std = float("0.160558") + data = None + + +class Program_weight_tensor_parameter_79: + name = "parameter_79" + shape = [256] + dtype = "float32" + min_val = float("-1.75122") + max_val = float("1.0715") + mean = float("-0.0645766") + std = float("0.219713") + data = None + + +class Program_weight_tensor_parameter_80: + name = "parameter_80" + shape = [256, 1, 5, 5] + dtype = "float32" + min_val = float("-0.247528") + max_val = float("0.267467") + mean = float("0.0217321") + std = float("0.0588233") + data = None + + +class Program_weight_tensor_parameter_81: + name = "parameter_81" + shape = [256] + dtype = "float32" + min_val = float("-2.38884") + max_val = float("1.92813") + mean = float("-0.704959") + std = float("0.648384") + data = None + + +class Program_weight_tensor_parameter_82: + name = "parameter_82" + shape = [256] + dtype = "float32" + min_val = float("0.0248943") + max_val = float("2.32932") + mean = float("0.99848") + std = float("0.339812") + data = None + + +class Program_weight_tensor_parameter_83: + name = "parameter_83" + shape = [256] + dtype = "float32" + min_val = float("0.164861") + max_val = float("2.33681") + mean = float("0.670703") + std = float("0.278408") + data = None + + +class Program_weight_tensor_parameter_84: + name = "parameter_84" + shape = [256] + dtype = "float32" + min_val = float("-3.52098") + max_val = float("2.53263") + mean = float("-0.501457") + std = float("0.981916") + data = None + + +class Program_weight_tensor_parameter_85: + name = "parameter_85" + shape = [256, 256, 1, 1] + dtype = "float32" + min_val = float("-0.604677") + max_val = float("0.616501") + mean = float("-0.00409391") + std = float("0.0522357") + data = None + + +class Program_weight_tensor_parameter_86: + name = "parameter_86" + shape = [256] + dtype = "float32" + min_val = float("-1.30931") + max_val = float("5.34531") + mean = float("1.05255") + std = float("0.933432") + data = None + + +class Program_weight_tensor_parameter_87: + name = "parameter_87" + shape = [256] + dtype = "float32" + min_val = float("0.444702") + max_val = float("2.71313") + mean = float("0.815764") + std = float("0.326504") + data = None + + +class Program_weight_tensor_parameter_88: + name = "parameter_88" + shape = [256] + dtype = "float32" + min_val = float("5.02145e-05") + max_val = float("2.61581") + mean = float("0.065821") + std = float("0.198044") + data = None + + +class Program_weight_tensor_parameter_89: + name = "parameter_89" + shape = [256] + dtype = "float32" + min_val = float("-0.911464") + max_val = float("0.709433") + mean = float("-0.0463051") + std = float("0.156301") + data = None + + +class Program_weight_tensor_parameter_90: + name = "parameter_90" + shape = [256, 1, 5, 5] + dtype = "float32" + min_val = float("-0.404726") + max_val = float("0.561177") + mean = float("0.00650608") + std = float("0.0681317") + data = None + + +class Program_weight_tensor_parameter_91: + name = "parameter_91" + shape = [256] + dtype = "float32" + min_val = float("-1.94837") + max_val = float("2.22191") + mean = float("-0.47184") + std = float("0.66991") + data = None + + +class Program_weight_tensor_parameter_92: + name = "parameter_92" + shape = [256] + dtype = "float32" + min_val = float("0.0310262") + max_val = float("2.54106") + mean = float("0.972144") + std = float("0.450682") + data = None + + +class Program_weight_tensor_parameter_93: + name = "parameter_93" + shape = [256] + dtype = "float32" + min_val = float("0.266377") + max_val = float("1.99195") + mean = float("0.66369") + std = float("0.23844") + data = None + + +class Program_weight_tensor_parameter_94: + name = "parameter_94" + shape = [256] + dtype = "float32" + min_val = float("-3.18553") + max_val = float("2.31156") + mean = float("-0.410527") + std = float("1.01772") + data = None + + +class Program_weight_tensor_parameter_95: + name = "parameter_95" + shape = [256, 256, 1, 1] + dtype = "float32" + min_val = float("-0.661761") + max_val = float("0.454845") + mean = float("-0.00341439") + std = float("0.0496098") + data = None + + +class Program_weight_tensor_parameter_96: + name = "parameter_96" + shape = [256] + dtype = "float32" + min_val = float("-1.55942") + max_val = float("8.78174") + mean = float("1.24145") + std = float("1.033") + data = None + + +class Program_weight_tensor_parameter_97: + name = "parameter_97" + shape = [256] + dtype = "float32" + min_val = float("0.399551") + max_val = float("2.38777") + mean = float("0.855712") + std = float("0.335078") + data = None + + +class Program_weight_tensor_parameter_98: + name = "parameter_98" + shape = [256] + dtype = "float32" + min_val = float("4.54132e-05") + max_val = float("2.30389") + mean = float("0.0852217") + std = float("0.19819") + data = None + + +class Program_weight_tensor_parameter_99: + name = "parameter_99" + shape = [256] + dtype = "float32" + min_val = float("-1.53342") + max_val = float("0.894373") + mean = float("-0.034722") + std = float("0.21968") + data = None + + +class Program_weight_tensor_parameter_100: + name = "parameter_100" + shape = [256, 1, 5, 5] + dtype = "float32" + min_val = float("-0.52163") + max_val = float("0.468327") + mean = float("0.00847092") + std = float("0.0699048") + data = None + + +class Program_weight_tensor_parameter_101: + name = "parameter_101" + shape = [256] + dtype = "float32" + min_val = float("-1.94866") + max_val = float("2.38269") + mean = float("-0.433062") + std = float("0.779102") + data = None + + +class Program_weight_tensor_parameter_102: + name = "parameter_102" + shape = [256] + dtype = "float32" + min_val = float("0.0353002") + max_val = float("3.13606") + mean = float("1.08369") + std = float("0.502936") + data = None + + +class Program_weight_tensor_parameter_103: + name = "parameter_103" + shape = [256] + dtype = "float32" + min_val = float("0.267775") + max_val = float("1.84571") + mean = float("0.597777") + std = float("0.222868") + data = None + + +class Program_weight_tensor_parameter_104: + name = "parameter_104" + shape = [256] + dtype = "float32" + min_val = float("-4.21206") + max_val = float("2.74549") + mean = float("-0.655925") + std = float("1.41117") + data = None + + +class Program_weight_tensor_parameter_105: + name = "parameter_105" + shape = [256, 256, 1, 1] + dtype = "float32" + min_val = float("-0.745881") + max_val = float("0.458491") + mean = float("-0.00328742") + std = float("0.0492368") + data = None + + +class Program_weight_tensor_parameter_106: + name = "parameter_106" + shape = [256] + dtype = "float32" + min_val = float("-1.6526") + max_val = float("4.78229") + mean = float("1.33182") + std = float("1.06161") + data = None + + +class Program_weight_tensor_parameter_107: + name = "parameter_107" + shape = [256] + dtype = "float32" + min_val = float("0.443923") + max_val = float("2.89615") + mean = float("0.825036") + std = float("0.347257") + data = None + + +class Program_weight_tensor_parameter_108: + name = "parameter_108" + shape = [256] + dtype = "float32" + min_val = float("5.1616e-05") + max_val = float("2.44101") + mean = float("0.0898679") + std = float("0.239694") + data = None + + +class Program_weight_tensor_parameter_109: + name = "parameter_109" + shape = [256] + dtype = "float32" + min_val = float("-1.14129") + max_val = float("1.65247") + mean = float("0.0116179") + std = float("0.242916") + data = None + + +class Program_weight_tensor_parameter_110: + name = "parameter_110" + shape = [256, 1, 5, 5] + dtype = "float32" + min_val = float("-0.506726") + max_val = float("0.541296") + mean = float("0.00976092") + std = float("0.0731665") + data = None + + +class Program_weight_tensor_parameter_111: + name = "parameter_111" + shape = [256] + dtype = "float32" + min_val = float("-1.7087") + max_val = float("2.63905") + mean = float("-0.358703") + std = float("0.80408") + data = None + + +class Program_weight_tensor_parameter_112: + name = "parameter_112" + shape = [256] + dtype = "float32" + min_val = float("0.0369452") + max_val = float("2.95182") + mean = float("0.968821") + std = float("0.509186") + data = None + + +class Program_weight_tensor_parameter_113: + name = "parameter_113" + shape = [256] + dtype = "float32" + min_val = float("0.223303") + max_val = float("1.40342") + mean = float("0.515294") + std = float("0.194983") + data = None + + +class Program_weight_tensor_parameter_114: + name = "parameter_114" + shape = [256] + dtype = "float32" + min_val = float("-3.10657") + max_val = float("2.92865") + mean = float("-0.456729") + std = float("1.00126") + data = None + + +class Program_weight_tensor_parameter_115: + name = "parameter_115" + shape = [256, 256, 1, 1] + dtype = "float32" + min_val = float("-0.393396") + max_val = float("0.540101") + mean = float("-0.00325585") + std = float("0.049361") + data = None + + +class Program_weight_tensor_parameter_116: + name = "parameter_116" + shape = [256] + dtype = "float32" + min_val = float("-3.66286") + max_val = float("4.69908") + mean = float("0.87936") + std = float("1.16991") + data = None + + +class Program_weight_tensor_parameter_117: + name = "parameter_117" + shape = [256] + dtype = "float32" + min_val = float("0.396038") + max_val = float("3.18987") + mean = float("0.874766") + std = float("0.39682") + data = None + + +class Program_weight_tensor_parameter_118: + name = "parameter_118" + shape = [256] + dtype = "float32" + min_val = float("4.42589e-05") + max_val = float("1.07897") + mean = float("0.0780041") + std = float("0.157112") + data = None + + +class Program_weight_tensor_parameter_119: + name = "parameter_119" + shape = [256] + dtype = "float32" + min_val = float("-1.60242") + max_val = float("1.13025") + mean = float("-0.016689") + std = float("0.242318") + data = None + + +class Program_weight_tensor_parameter_120: + name = "parameter_120" + shape = [256, 1, 5, 5] + dtype = "float32" + min_val = float("-0.477884") + max_val = float("0.502954") + mean = float("0.0126727") + std = float("0.0707076") + data = None + + +class Program_weight_tensor_parameter_121: + name = "parameter_121" + shape = [256] + dtype = "float32" + min_val = float("-1.83087") + max_val = float("2.42642") + mean = float("-0.305437") + std = float("0.809707") + data = None + + +class Program_weight_tensor_parameter_122: + name = "parameter_122" + shape = [256] + dtype = "float32" + min_val = float("0.0313435") + max_val = float("3.32059") + mean = float("0.97765") + std = float("0.588425") + data = None + + +class Program_weight_tensor_parameter_123: + name = "parameter_123" + shape = [256] + dtype = "float32" + min_val = float("0.124685") + max_val = float("1.21656") + mean = float("0.352423") + std = float("0.147248") + data = None + + +class Program_weight_tensor_parameter_124: + name = "parameter_124" + shape = [256] + dtype = "float32" + min_val = float("-3.4848") + max_val = float("3.07169") + mean = float("-0.178261") + std = float("0.898995") + data = None + + +class Program_weight_tensor_parameter_125: + name = "parameter_125" + shape = [256, 256, 1, 1] + dtype = "float32" + min_val = float("-0.550557") + max_val = float("0.634779") + mean = float("-0.00302642") + std = float("0.051265") + data = None + + +class Program_weight_tensor_parameter_126: + name = "parameter_126" + shape = [256] + dtype = "float32" + min_val = float("-3.67435") + max_val = float("3.23799") + mean = float("0.328727") + std = float("1.28371") + data = None + + +class Program_weight_tensor_parameter_127: + name = "parameter_127" + shape = [256] + dtype = "float32" + min_val = float("0.316049") + max_val = float("5.72779") + mean = float("0.957953") + std = float("0.567461") + data = None + + +class Program_weight_tensor_parameter_128: + name = "parameter_128" + shape = [256] + dtype = "float32" + min_val = float("3.20013e-05") + max_val = float("4.24667") + mean = float("0.0981319") + std = float("0.317482") + data = None + + +class Program_weight_tensor_parameter_129: + name = "parameter_129" + shape = [256] + dtype = "float32" + min_val = float("-1.87702") + max_val = float("1.02751") + mean = float("-0.0059837") + std = float("0.253696") + data = None + + +class Program_weight_tensor_parameter_130: + name = "parameter_130" + shape = [256, 1, 5, 5] + dtype = "float32" + min_val = float("-0.651681") + max_val = float("0.818378") + mean = float("0.00447852") + std = float("0.0843552") + data = None + + +class Program_weight_tensor_parameter_131: + name = "parameter_131" + shape = [256] + dtype = "float32" + min_val = float("-2.63124") + max_val = float("3.07714") + mean = float("0.166551") + std = float("0.816235") + data = None + + +class Program_weight_tensor_parameter_132: + name = "parameter_132" + shape = [256] + dtype = "float32" + min_val = float("0.0253762") + max_val = float("3.12846") + mean = float("0.630966") + std = float("0.566876") + data = None + + +class Program_weight_tensor_parameter_133: + name = "parameter_133" + shape = [256] + dtype = "float32" + min_val = float("0.0732147") + max_val = float("2.00221") + mean = float("0.306021") + std = float("0.233483") + data = None + + +class Program_weight_tensor_parameter_134: + name = "parameter_134" + shape = [256] + dtype = "float32" + min_val = float("-4.15919") + max_val = float("3.24149") + mean = float("-0.0216345") + std = float("1.01416") + data = None + + +class Program_weight_tensor_parameter_135: + name = "parameter_135" + shape = [256, 128, 1, 1] + dtype = "float32" + min_val = float("-0.458854") + max_val = float("0.539943") + mean = float("-0.00102917") + std = float("0.0604539") + data = None + + +class Program_weight_tensor_parameter_136: + name = "parameter_136" + shape = [128] + dtype = "float32" + min_val = float("-1.47661") + max_val = float("3.23365") + mean = float("0.876993") + std = float("1.3921") + data = None + + +class Program_weight_tensor_parameter_137: + name = "parameter_137" + shape = [128] + dtype = "float32" + min_val = float("0.433581") + max_val = float("1.85128") + mean = float("0.856498") + std = float("0.343666") + data = None + + +class Program_weight_tensor_parameter_138: + name = "parameter_138" + shape = [128] + dtype = "float32" + min_val = float("2.15841e-05") + max_val = float("2.94433") + mean = float("0.0955551") + std = float("0.287347") + data = None + + +class Program_weight_tensor_parameter_139: + name = "parameter_139" + shape = [128] + dtype = "float32" + min_val = float("-0.772058") + max_val = float("2.64243") + mean = float("0.0802506") + std = float("0.363494") + data = None + + +class Program_weight_tensor_parameter_140: + name = "parameter_140" + shape = [128, 1, 3, 3] + dtype = "float32" + min_val = float("-0.474235") + max_val = float("0.46202") + mean = float("0.0180255") + std = float("0.116947") + data = None + + +class Program_weight_tensor_parameter_141: + name = "parameter_141" + shape = [128] + dtype = "float32" + min_val = float("-1.03079") + max_val = float("3.48196") + mean = float("0.351474") + std = float("0.816892") + data = None + + +class Program_weight_tensor_parameter_142: + name = "parameter_142" + shape = [128] + dtype = "float32" + min_val = float("0.0290844") + max_val = float("3.08291") + mean = float("0.848079") + std = float("0.661486") + data = None + + +class Program_weight_tensor_parameter_143: + name = "parameter_143" + shape = [128] + dtype = "float32" + min_val = float("0.167354") + max_val = float("4.1238") + mean = float("0.792821") + std = float("0.536443") + data = None + + +class Program_weight_tensor_parameter_144: + name = "parameter_144" + shape = [128] + dtype = "float32" + min_val = float("-3.23238") + max_val = float("2.81434") + mean = float("-0.331955") + std = float("0.970932") + data = None + + +class Program_weight_tensor_parameter_145: + name = "parameter_145" + shape = [128, 128, 1, 1] + dtype = "float32" + min_val = float("-0.623159") + max_val = float("0.594043") + mean = float("-0.00524257") + std = float("0.0722632") + data = None + + +class Program_weight_tensor_parameter_146: + name = "parameter_146" + shape = [128] + dtype = "float32" + min_val = float("-1.87488") + max_val = float("3.28169") + mean = float("0.409411") + std = float("1.12985") + data = None + + +class Program_weight_tensor_parameter_147: + name = "parameter_147" + shape = [128] + dtype = "float32" + min_val = float("0.482481") + max_val = float("4.41269") + mean = float("1.40579") + std = float("0.673597") + data = None + + +class Program_weight_tensor_parameter_148: + name = "parameter_148" + shape = [128] + dtype = "float32" + min_val = float("4.09592e-05") + max_val = float("3.48636") + mean = float("0.182057") + std = float("0.467894") + data = None + + +class Program_weight_tensor_parameter_149: + name = "parameter_149" + shape = [128] + dtype = "float32" + min_val = float("-1.76651") + max_val = float("1.15869") + mean = float("-0.00627822") + std = float("0.385738") + data = None + + +class Program_weight_tensor_parameter_150: + name = "parameter_150" + shape = [128, 1, 3, 3] + dtype = "float32" + min_val = float("-0.835065") + max_val = float("0.724195") + mean = float("2.3867e-05") + std = float("0.152833") + data = None + + +class Program_weight_tensor_parameter_151: + name = "parameter_151" + shape = [128] + dtype = "float32" + min_val = float("-1.41918") + max_val = float("4.45937") + mean = float("0.928172") + std = float("1.15334") + data = None + + +class Program_weight_tensor_parameter_152: + name = "parameter_152" + shape = [128] + dtype = "float32" + min_val = float("0.0387113") + max_val = float("4.59836") + mean = float("0.978259") + std = float("0.897297") + data = None + + +class Program_weight_tensor_parameter_153: + name = "parameter_153" + shape = [128] + dtype = "float32" + min_val = float("0.220187") + max_val = float("13.906") + mean = float("1.47607") + std = float("1.77322") + data = None + + +class Program_weight_tensor_parameter_154: + name = "parameter_154" + shape = [128] + dtype = "float32" + min_val = float("-4.88648") + max_val = float("5.978") + mean = float("-0.143999") + std = float("1.80874") + data = None + + +class Program_weight_tensor_parameter_155: + name = "parameter_155" + shape = [128, 64, 1, 1] + dtype = "float32" + min_val = float("-0.531509") + max_val = float("0.582687") + mean = float("-0.000919442") + std = float("0.083879") + data = None + + +class Program_weight_tensor_parameter_156: + name = "parameter_156" + shape = [64] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_157: + name = "parameter_157" + shape = [64] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_158: + name = "parameter_158" + shape = [64] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_159: + name = "parameter_159" + shape = [64] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_160: + name = "parameter_160" + shape = [64, 1, 3, 3] + dtype = "float32" + min_val = float("-0.402881") + max_val = float("0.339908") + mean = float("0.0274269") + std = float("0.119171") + data = None + + +class Program_weight_tensor_parameter_161: + name = "parameter_161" + shape = [64] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_162: + name = "parameter_162" + shape = [64] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_163: + name = "parameter_163" + shape = [64] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_164: + name = "parameter_164" + shape = [64] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_165: + name = "parameter_165" + shape = [64, 64, 1, 1] + dtype = "float32" + min_val = float("-0.663992") + max_val = float("0.719946") + mean = float("-0.00240824") + std = float("0.097702") + data = None + + +class Program_weight_tensor_parameter_166: + name = "parameter_166" + shape = [64] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_167: + name = "parameter_167" + shape = [64] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_168: + name = "parameter_168" + shape = [64] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_169: + name = "parameter_169" + shape = [64] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_170: + name = "parameter_170" + shape = [64, 1, 3, 3] + dtype = "float32" + min_val = float("-0.435785") + max_val = float("0.482322") + mean = float("0.0138155") + std = float("0.137694") + data = None + + +class Program_weight_tensor_parameter_171: + name = "parameter_171" + shape = [64] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_172: + name = "parameter_172" + shape = [64] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_173: + name = "parameter_173" + shape = [64] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_174: + name = "parameter_174" + shape = [64] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_175: + name = "parameter_175" + shape = [64, 32, 1, 1] + dtype = "float32" + min_val = float("-0.61147") + max_val = float("0.700056") + mean = float("-0.0010653") + std = float("0.108931") + data = None + + +class Program_weight_tensor_parameter_176: + name = "parameter_176" + shape = [32] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_177: + name = "parameter_177" + shape = [32] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_178: + name = "parameter_178" + shape = [32] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_179: + name = "parameter_179" + shape = [32] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_180: + name = "parameter_180" + shape = [32, 1, 3, 3] + dtype = "float32" + min_val = float("-0.643489") + max_val = float("0.790969") + mean = float("0.00377921") + std = float("0.189285") + data = None + + +class Program_weight_tensor_parameter_181: + name = "parameter_181" + shape = [32] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_182: + name = "parameter_182" + shape = [32] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_183: + name = "parameter_183" + shape = [32] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_184: + name = "parameter_184" + shape = [32] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_185: + name = "parameter_185" + shape = [32, 16, 1, 1] + dtype = "float32" + min_val = float("-0.683211") + max_val = float("0.764207") + mean = float("0.00309465") + std = float("0.160636") + data = None + + +class Program_weight_tensor_parameter_186: + name = "parameter_186" + shape = [16] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_187: + name = "parameter_187" + shape = [16] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_188: + name = "parameter_188" + shape = [16] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_189: + name = "parameter_189" + shape = [16] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_190: + name = "parameter_190" + shape = [16, 1, 3, 3] + dtype = "float32" + min_val = float("-1.09594") + max_val = float("0.835851") + mean = float("-0.0208495") + std = float("0.311673") + data = None + + +class Program_weight_tensor_parameter_191: + name = "parameter_191" + shape = [16] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_192: + name = "parameter_192" + shape = [16] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_193: + name = "parameter_193" + shape = [16] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_194: + name = "parameter_194" + shape = [16] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_195: + name = "parameter_195" + shape = [16, 3, 3, 3] + dtype = "float32" + min_val = float("-0.702156") + max_val = float("0.832108") + mean = float("0.00730695") + std = float("0.198025") + data = None diff --git a/paddle_samples/PaddleX/PP-OCRv3_mobile_rec/subgraph_6/graph_hash.txt b/paddle_samples/PaddleX/PP-OCRv3_mobile_rec/subgraph_6/graph_hash.txt new file mode 100644 index 000000000..e846e4adb --- /dev/null +++ b/paddle_samples/PaddleX/PP-OCRv3_mobile_rec/subgraph_6/graph_hash.txt @@ -0,0 +1 @@ +55d8f61d74e448d5127ce01028deb67fb3999d01653aa5f82373f65dceeb71ff \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-OCRv3_mobile_rec/subgraph_6/graph_net.json b/paddle_samples/PaddleX/PP-OCRv3_mobile_rec/subgraph_6/graph_net.json new file mode 100644 index 000000000..6e8c4238c --- /dev/null +++ b/paddle_samples/PaddleX/PP-OCRv3_mobile_rec/subgraph_6/graph_net.json @@ -0,0 +1,6 @@ +{ + "framework": "paddle", + "model_name": "PP-OCRv3_mobile_rec", + "num_devices_required": 1, + "num_nodes_required": 1 +} \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-OCRv3_mobile_rec/subgraph_6/input_meta.py b/paddle_samples/PaddleX/PP-OCRv3_mobile_rec/subgraph_6/input_meta.py new file mode 100644 index 000000000..e48f12da0 --- /dev/null +++ b/paddle_samples/PaddleX/PP-OCRv3_mobile_rec/subgraph_6/input_meta.py @@ -0,0 +1,9 @@ +class Program_weight_tensor_data_0: + name = "data_0" + shape = [4, 3, 48, 320] + dtype = "float32" + min_val = float("-0.968627") + max_val = float("1.0") + mean = float("-0.0390442") + std = float("0.192124") + data = None diff --git a/paddle_samples/PaddleX/PP-OCRv3_mobile_rec/subgraph_6/model.py b/paddle_samples/PaddleX/PP-OCRv3_mobile_rec/subgraph_6/model.py new file mode 100644 index 000000000..8acf9e777 --- /dev/null +++ b/paddle_samples/PaddleX/PP-OCRv3_mobile_rec/subgraph_6/model.py @@ -0,0 +1,2029 @@ +import paddle + + +class GraphModule(paddle.nn.Layer): + def __init__(self): + super().__init__() + + def forward( + self, + parameter_0, + parameter_1, + parameter_2, + parameter_3, + parameter_4, + parameter_5, + parameter_6, + parameter_7, + parameter_8, + parameter_9, + parameter_10, + parameter_11, + parameter_12, + parameter_13, + parameter_14, + parameter_15, + parameter_16, + parameter_17, + parameter_18, + parameter_19, + parameter_20, + parameter_21, + parameter_22, + parameter_23, + parameter_24, + parameter_25, + parameter_26, + parameter_27, + parameter_28, + parameter_29, + parameter_30, + parameter_31, + parameter_32, + parameter_33, + parameter_34, + parameter_35, + parameter_36, + parameter_37, + parameter_38, + parameter_39, + parameter_40, + parameter_41, + parameter_42, + parameter_43, + parameter_44, + parameter_45, + parameter_46, + parameter_47, + parameter_48, + parameter_49, + parameter_50, + parameter_51, + parameter_52, + parameter_53, + parameter_54, + parameter_55, + parameter_56, + parameter_57, + parameter_58, + parameter_59, + parameter_60, + parameter_61, + parameter_62, + parameter_63, + parameter_64, + parameter_65, + parameter_66, + parameter_67, + parameter_68, + parameter_69, + parameter_70, + parameter_71, + parameter_72, + parameter_73, + parameter_74, + parameter_75, + parameter_76, + parameter_77, + parameter_78, + parameter_79, + parameter_80, + parameter_81, + parameter_82, + parameter_83, + parameter_84, + parameter_85, + parameter_86, + parameter_87, + parameter_88, + parameter_89, + parameter_90, + parameter_91, + parameter_92, + parameter_93, + parameter_94, + parameter_95, + parameter_96, + parameter_97, + parameter_98, + parameter_99, + parameter_100, + parameter_101, + parameter_102, + parameter_103, + parameter_104, + parameter_105, + parameter_106, + parameter_107, + parameter_108, + parameter_109, + parameter_110, + parameter_111, + parameter_112, + parameter_113, + parameter_114, + parameter_115, + parameter_116, + parameter_117, + parameter_118, + parameter_119, + parameter_120, + parameter_121, + parameter_122, + parameter_123, + parameter_124, + parameter_125, + parameter_126, + parameter_127, + parameter_128, + parameter_129, + parameter_130, + parameter_131, + parameter_132, + parameter_133, + parameter_134, + parameter_135, + parameter_136, + parameter_137, + parameter_138, + parameter_139, + parameter_140, + parameter_141, + parameter_142, + parameter_143, + parameter_144, + parameter_145, + parameter_146, + parameter_147, + parameter_148, + parameter_149, + parameter_150, + parameter_151, + parameter_152, + parameter_153, + parameter_154, + parameter_155, + parameter_156, + parameter_157, + parameter_158, + parameter_159, + parameter_160, + parameter_161, + parameter_162, + parameter_163, + parameter_164, + parameter_165, + parameter_166, + parameter_167, + parameter_168, + parameter_169, + parameter_170, + parameter_171, + parameter_172, + parameter_173, + parameter_174, + parameter_175, + parameter_176, + parameter_177, + parameter_178, + parameter_179, + parameter_180, + parameter_181, + parameter_182, + parameter_183, + parameter_184, + parameter_185, + parameter_186, + parameter_187, + parameter_188, + parameter_189, + parameter_190, + parameter_191, + parameter_192, + parameter_193, + parameter_194, + parameter_195, + data_0, + ): + # pd_op.conv2d: (-1x16x24x160xf32) <- (-1x3x48x320xf32, 16x3x3x3xf32) + conv2d_0 = paddle._C_ops.conv2d( + data_0, parameter_195, [2, 2], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del data_0, parameter_195 + + # pd_op.batch_norm_: (-1x16x24x160xf32, 16xf32, 16xf32, 16xf32, 16xf32, -1xui8) <- (-1x16x24x160xf32, 16xf32, 16xf32, 16xf32, 16xf32) + ( + batch_norm__0, + batch_norm__1, + batch_norm__2, + batch_norm__3, + batch_norm__4, + batch_norm__5, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_0, + parameter_194, + parameter_193, + parameter_192, + parameter_191, + True, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_0, parameter_191, parameter_192, parameter_193, parameter_194 + + # pd_op.hardswish: (-1x16x24x160xf32) <- (-1x16x24x160xf32) + hardswish_0 = paddle._C_ops.hardswish(batch_norm__0) + del batch_norm__0 + + # pd_op.depthwise_conv2d: (-1x16x24x160xf32) <- (-1x16x24x160xf32, 16x1x3x3xf32) + depthwise_conv2d_0 = paddle._C_ops.depthwise_conv2d( + hardswish_0, parameter_190, [1, 1], [1, 1], "EXPLICIT", 16, [1, 1], "NCHW" + ) + del hardswish_0, parameter_190 + + # pd_op.batch_norm_: (-1x16x24x160xf32, 16xf32, 16xf32, 16xf32, 16xf32, -1xui8) <- (-1x16x24x160xf32, 16xf32, 16xf32, 16xf32, 16xf32) + ( + batch_norm__6, + batch_norm__7, + batch_norm__8, + batch_norm__9, + batch_norm__10, + batch_norm__11, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + depthwise_conv2d_0, + parameter_189, + parameter_188, + parameter_187, + parameter_186, + True, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del ( + depthwise_conv2d_0, + parameter_186, + parameter_187, + parameter_188, + parameter_189, + ) + + # pd_op.hardswish: (-1x16x24x160xf32) <- (-1x16x24x160xf32) + hardswish_1 = paddle._C_ops.hardswish(batch_norm__6) + del batch_norm__6 + + # pd_op.conv2d: (-1x32x24x160xf32) <- (-1x16x24x160xf32, 32x16x1x1xf32) + conv2d_1 = paddle._C_ops.conv2d( + hardswish_1, parameter_185, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del hardswish_1, parameter_185 + + # pd_op.batch_norm_: (-1x32x24x160xf32, 32xf32, 32xf32, 32xf32, 32xf32, -1xui8) <- (-1x32x24x160xf32, 32xf32, 32xf32, 32xf32, 32xf32) + ( + batch_norm__12, + batch_norm__13, + batch_norm__14, + batch_norm__15, + batch_norm__16, + batch_norm__17, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_1, + parameter_184, + parameter_183, + parameter_182, + parameter_181, + True, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_1, parameter_181, parameter_182, parameter_183, parameter_184 + + # pd_op.hardswish: (-1x32x24x160xf32) <- (-1x32x24x160xf32) + hardswish_2 = paddle._C_ops.hardswish(batch_norm__12) + del batch_norm__12 + + # pd_op.depthwise_conv2d: (-1x32x24x160xf32) <- (-1x32x24x160xf32, 32x1x3x3xf32) + depthwise_conv2d_1 = paddle._C_ops.depthwise_conv2d( + hardswish_2, parameter_180, [1, 1], [1, 1], "EXPLICIT", 32, [1, 1], "NCHW" + ) + del hardswish_2, parameter_180 + + # pd_op.batch_norm_: (-1x32x24x160xf32, 32xf32, 32xf32, 32xf32, 32xf32, -1xui8) <- (-1x32x24x160xf32, 32xf32, 32xf32, 32xf32, 32xf32) + ( + batch_norm__18, + batch_norm__19, + batch_norm__20, + batch_norm__21, + batch_norm__22, + batch_norm__23, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + depthwise_conv2d_1, + parameter_179, + parameter_178, + parameter_177, + parameter_176, + True, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del ( + depthwise_conv2d_1, + parameter_176, + parameter_177, + parameter_178, + parameter_179, + ) + + # pd_op.hardswish: (-1x32x24x160xf32) <- (-1x32x24x160xf32) + hardswish_3 = paddle._C_ops.hardswish(batch_norm__18) + del batch_norm__18 + + # pd_op.conv2d: (-1x64x24x160xf32) <- (-1x32x24x160xf32, 64x32x1x1xf32) + conv2d_2 = paddle._C_ops.conv2d( + hardswish_3, parameter_175, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del hardswish_3, parameter_175 + + # pd_op.batch_norm_: (-1x64x24x160xf32, 64xf32, 64xf32, 64xf32, 64xf32, -1xui8) <- (-1x64x24x160xf32, 64xf32, 64xf32, 64xf32, 64xf32) + ( + batch_norm__24, + batch_norm__25, + batch_norm__26, + batch_norm__27, + batch_norm__28, + batch_norm__29, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_2, + parameter_174, + parameter_173, + parameter_172, + parameter_171, + True, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_2, parameter_171, parameter_172, parameter_173, parameter_174 + + # pd_op.hardswish: (-1x64x24x160xf32) <- (-1x64x24x160xf32) + hardswish_4 = paddle._C_ops.hardswish(batch_norm__24) + del batch_norm__24 + + # pd_op.depthwise_conv2d: (-1x64x24x160xf32) <- (-1x64x24x160xf32, 64x1x3x3xf32) + depthwise_conv2d_2 = paddle._C_ops.depthwise_conv2d( + hardswish_4, parameter_170, [1, 1], [1, 1], "EXPLICIT", 64, [1, 1], "NCHW" + ) + del hardswish_4, parameter_170 + + # pd_op.batch_norm_: (-1x64x24x160xf32, 64xf32, 64xf32, 64xf32, 64xf32, -1xui8) <- (-1x64x24x160xf32, 64xf32, 64xf32, 64xf32, 64xf32) + ( + batch_norm__30, + batch_norm__31, + batch_norm__32, + batch_norm__33, + batch_norm__34, + batch_norm__35, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + depthwise_conv2d_2, + parameter_169, + parameter_168, + parameter_167, + parameter_166, + True, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del ( + depthwise_conv2d_2, + parameter_166, + parameter_167, + parameter_168, + parameter_169, + ) + + # pd_op.hardswish: (-1x64x24x160xf32) <- (-1x64x24x160xf32) + hardswish_5 = paddle._C_ops.hardswish(batch_norm__30) + del batch_norm__30 + + # pd_op.conv2d: (-1x64x24x160xf32) <- (-1x64x24x160xf32, 64x64x1x1xf32) + conv2d_3 = paddle._C_ops.conv2d( + hardswish_5, parameter_165, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del hardswish_5, parameter_165 + + # pd_op.batch_norm_: (-1x64x24x160xf32, 64xf32, 64xf32, 64xf32, 64xf32, -1xui8) <- (-1x64x24x160xf32, 64xf32, 64xf32, 64xf32, 64xf32) + ( + batch_norm__36, + batch_norm__37, + batch_norm__38, + batch_norm__39, + batch_norm__40, + batch_norm__41, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_3, + parameter_164, + parameter_163, + parameter_162, + parameter_161, + True, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_3, parameter_161, parameter_162, parameter_163, parameter_164 + + # pd_op.hardswish: (-1x64x24x160xf32) <- (-1x64x24x160xf32) + hardswish_6 = paddle._C_ops.hardswish(batch_norm__36) + del batch_norm__36 + + # pd_op.depthwise_conv2d: (-1x64x12x160xf32) <- (-1x64x24x160xf32, 64x1x3x3xf32) + depthwise_conv2d_3 = paddle._C_ops.depthwise_conv2d( + hardswish_6, parameter_160, [2, 1], [1, 1], "EXPLICIT", 64, [1, 1], "NCHW" + ) + del hardswish_6, parameter_160 + + # pd_op.batch_norm_: (-1x64x12x160xf32, 64xf32, 64xf32, 64xf32, 64xf32, -1xui8) <- (-1x64x12x160xf32, 64xf32, 64xf32, 64xf32, 64xf32) + ( + batch_norm__42, + batch_norm__43, + batch_norm__44, + batch_norm__45, + batch_norm__46, + batch_norm__47, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + depthwise_conv2d_3, + parameter_159, + parameter_158, + parameter_157, + parameter_156, + True, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del ( + depthwise_conv2d_3, + parameter_156, + parameter_157, + parameter_158, + parameter_159, + ) + + # pd_op.hardswish: (-1x64x12x160xf32) <- (-1x64x12x160xf32) + hardswish_7 = paddle._C_ops.hardswish(batch_norm__42) + del batch_norm__42 + + # pd_op.conv2d: (-1x128x12x160xf32) <- (-1x64x12x160xf32, 128x64x1x1xf32) + conv2d_4 = paddle._C_ops.conv2d( + hardswish_7, parameter_155, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del hardswish_7, parameter_155 + + # pd_op.batch_norm_: (-1x128x12x160xf32, 128xf32, 128xf32, 128xf32, 128xf32, -1xui8) <- (-1x128x12x160xf32, 128xf32, 128xf32, 128xf32, 128xf32) + ( + batch_norm__48, + batch_norm__49, + batch_norm__50, + batch_norm__51, + batch_norm__52, + batch_norm__53, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_4, + parameter_154, + parameter_153, + parameter_152, + parameter_151, + True, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_4, parameter_151, parameter_152, parameter_153, parameter_154 + + # pd_op.hardswish: (-1x128x12x160xf32) <- (-1x128x12x160xf32) + hardswish_8 = paddle._C_ops.hardswish(batch_norm__48) + del batch_norm__48 + + # pd_op.depthwise_conv2d: (-1x128x12x160xf32) <- (-1x128x12x160xf32, 128x1x3x3xf32) + depthwise_conv2d_4 = paddle._C_ops.depthwise_conv2d( + hardswish_8, parameter_150, [1, 1], [1, 1], "EXPLICIT", 128, [1, 1], "NCHW" + ) + del hardswish_8, parameter_150 + + # pd_op.batch_norm_: (-1x128x12x160xf32, 128xf32, 128xf32, 128xf32, 128xf32, -1xui8) <- (-1x128x12x160xf32, 128xf32, 128xf32, 128xf32, 128xf32) + ( + batch_norm__54, + batch_norm__55, + batch_norm__56, + batch_norm__57, + batch_norm__58, + batch_norm__59, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + depthwise_conv2d_4, + parameter_149, + parameter_148, + parameter_147, + parameter_146, + True, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del ( + depthwise_conv2d_4, + parameter_146, + parameter_147, + parameter_148, + parameter_149, + ) + + # pd_op.hardswish: (-1x128x12x160xf32) <- (-1x128x12x160xf32) + hardswish_9 = paddle._C_ops.hardswish(batch_norm__54) + del batch_norm__54 + + # pd_op.conv2d: (-1x128x12x160xf32) <- (-1x128x12x160xf32, 128x128x1x1xf32) + conv2d_5 = paddle._C_ops.conv2d( + hardswish_9, parameter_145, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del hardswish_9, parameter_145 + + # pd_op.batch_norm_: (-1x128x12x160xf32, 128xf32, 128xf32, 128xf32, 128xf32, -1xui8) <- (-1x128x12x160xf32, 128xf32, 128xf32, 128xf32, 128xf32) + ( + batch_norm__60, + batch_norm__61, + batch_norm__62, + batch_norm__63, + batch_norm__64, + batch_norm__65, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_5, + parameter_144, + parameter_143, + parameter_142, + parameter_141, + True, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_5, parameter_141, parameter_142, parameter_143, parameter_144 + + # pd_op.hardswish: (-1x128x12x160xf32) <- (-1x128x12x160xf32) + hardswish_10 = paddle._C_ops.hardswish(batch_norm__60) + del batch_norm__60 + + # pd_op.depthwise_conv2d: (-1x128x6x160xf32) <- (-1x128x12x160xf32, 128x1x3x3xf32) + depthwise_conv2d_5 = paddle._C_ops.depthwise_conv2d( + hardswish_10, parameter_140, [2, 1], [1, 1], "EXPLICIT", 128, [1, 1], "NCHW" + ) + del hardswish_10, parameter_140 + + # pd_op.batch_norm_: (-1x128x6x160xf32, 128xf32, 128xf32, 128xf32, 128xf32, -1xui8) <- (-1x128x6x160xf32, 128xf32, 128xf32, 128xf32, 128xf32) + ( + batch_norm__66, + batch_norm__67, + batch_norm__68, + batch_norm__69, + batch_norm__70, + batch_norm__71, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + depthwise_conv2d_5, + parameter_139, + parameter_138, + parameter_137, + parameter_136, + True, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del ( + depthwise_conv2d_5, + parameter_136, + parameter_137, + parameter_138, + parameter_139, + ) + + # pd_op.hardswish: (-1x128x6x160xf32) <- (-1x128x6x160xf32) + hardswish_11 = paddle._C_ops.hardswish(batch_norm__66) + del batch_norm__66 + + # pd_op.conv2d: (-1x256x6x160xf32) <- (-1x128x6x160xf32, 256x128x1x1xf32) + conv2d_6 = paddle._C_ops.conv2d( + hardswish_11, parameter_135, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del hardswish_11, parameter_135 + + # pd_op.batch_norm_: (-1x256x6x160xf32, 256xf32, 256xf32, 256xf32, 256xf32, -1xui8) <- (-1x256x6x160xf32, 256xf32, 256xf32, 256xf32, 256xf32) + ( + batch_norm__72, + batch_norm__73, + batch_norm__74, + batch_norm__75, + batch_norm__76, + batch_norm__77, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_6, + parameter_134, + parameter_133, + parameter_132, + parameter_131, + True, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_6, parameter_131, parameter_132, parameter_133, parameter_134 + + # pd_op.hardswish: (-1x256x6x160xf32) <- (-1x256x6x160xf32) + hardswish_12 = paddle._C_ops.hardswish(batch_norm__72) + del batch_norm__72 + + # pd_op.depthwise_conv2d: (-1x256x6x160xf32) <- (-1x256x6x160xf32, 256x1x5x5xf32) + depthwise_conv2d_6 = paddle._C_ops.depthwise_conv2d( + hardswish_12, parameter_130, [1, 1], [2, 2], "EXPLICIT", 256, [1, 1], "NCHW" + ) + del hardswish_12, parameter_130 + + # pd_op.batch_norm_: (-1x256x6x160xf32, 256xf32, 256xf32, 256xf32, 256xf32, -1xui8) <- (-1x256x6x160xf32, 256xf32, 256xf32, 256xf32, 256xf32) + ( + batch_norm__78, + batch_norm__79, + batch_norm__80, + batch_norm__81, + batch_norm__82, + batch_norm__83, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + depthwise_conv2d_6, + parameter_129, + parameter_128, + parameter_127, + parameter_126, + True, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del ( + depthwise_conv2d_6, + parameter_126, + parameter_127, + parameter_128, + parameter_129, + ) + + # pd_op.hardswish: (-1x256x6x160xf32) <- (-1x256x6x160xf32) + hardswish_13 = paddle._C_ops.hardswish(batch_norm__78) + del batch_norm__78 + + # pd_op.conv2d: (-1x256x6x160xf32) <- (-1x256x6x160xf32, 256x256x1x1xf32) + conv2d_7 = paddle._C_ops.conv2d( + hardswish_13, parameter_125, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del hardswish_13, parameter_125 + + # pd_op.batch_norm_: (-1x256x6x160xf32, 256xf32, 256xf32, 256xf32, 256xf32, -1xui8) <- (-1x256x6x160xf32, 256xf32, 256xf32, 256xf32, 256xf32) + ( + batch_norm__84, + batch_norm__85, + batch_norm__86, + batch_norm__87, + batch_norm__88, + batch_norm__89, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_7, + parameter_124, + parameter_123, + parameter_122, + parameter_121, + True, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_7, parameter_121, parameter_122, parameter_123, parameter_124 + + # pd_op.hardswish: (-1x256x6x160xf32) <- (-1x256x6x160xf32) + hardswish_14 = paddle._C_ops.hardswish(batch_norm__84) + del batch_norm__84 + + # pd_op.depthwise_conv2d: (-1x256x6x160xf32) <- (-1x256x6x160xf32, 256x1x5x5xf32) + depthwise_conv2d_7 = paddle._C_ops.depthwise_conv2d( + hardswish_14, parameter_120, [1, 1], [2, 2], "EXPLICIT", 256, [1, 1], "NCHW" + ) + del hardswish_14, parameter_120 + + # pd_op.batch_norm_: (-1x256x6x160xf32, 256xf32, 256xf32, 256xf32, 256xf32, -1xui8) <- (-1x256x6x160xf32, 256xf32, 256xf32, 256xf32, 256xf32) + ( + batch_norm__90, + batch_norm__91, + batch_norm__92, + batch_norm__93, + batch_norm__94, + batch_norm__95, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + depthwise_conv2d_7, + parameter_119, + parameter_118, + parameter_117, + parameter_116, + True, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del ( + depthwise_conv2d_7, + parameter_116, + parameter_117, + parameter_118, + parameter_119, + ) + + # pd_op.hardswish: (-1x256x6x160xf32) <- (-1x256x6x160xf32) + hardswish_15 = paddle._C_ops.hardswish(batch_norm__90) + del batch_norm__90 + + # pd_op.conv2d: (-1x256x6x160xf32) <- (-1x256x6x160xf32, 256x256x1x1xf32) + conv2d_8 = paddle._C_ops.conv2d( + hardswish_15, parameter_115, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del hardswish_15, parameter_115 + + # pd_op.batch_norm_: (-1x256x6x160xf32, 256xf32, 256xf32, 256xf32, 256xf32, -1xui8) <- (-1x256x6x160xf32, 256xf32, 256xf32, 256xf32, 256xf32) + ( + batch_norm__96, + batch_norm__97, + batch_norm__98, + batch_norm__99, + batch_norm__100, + batch_norm__101, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_8, + parameter_114, + parameter_113, + parameter_112, + parameter_111, + True, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_8, parameter_111, parameter_112, parameter_113, parameter_114 + + # pd_op.hardswish: (-1x256x6x160xf32) <- (-1x256x6x160xf32) + hardswish_16 = paddle._C_ops.hardswish(batch_norm__96) + del batch_norm__96 + + # pd_op.depthwise_conv2d: (-1x256x6x160xf32) <- (-1x256x6x160xf32, 256x1x5x5xf32) + depthwise_conv2d_8 = paddle._C_ops.depthwise_conv2d( + hardswish_16, parameter_110, [1, 1], [2, 2], "EXPLICIT", 256, [1, 1], "NCHW" + ) + del hardswish_16, parameter_110 + + # pd_op.batch_norm_: (-1x256x6x160xf32, 256xf32, 256xf32, 256xf32, 256xf32, -1xui8) <- (-1x256x6x160xf32, 256xf32, 256xf32, 256xf32, 256xf32) + ( + batch_norm__102, + batch_norm__103, + batch_norm__104, + batch_norm__105, + batch_norm__106, + batch_norm__107, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + depthwise_conv2d_8, + parameter_109, + parameter_108, + parameter_107, + parameter_106, + True, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del ( + depthwise_conv2d_8, + parameter_106, + parameter_107, + parameter_108, + parameter_109, + ) + + # pd_op.hardswish: (-1x256x6x160xf32) <- (-1x256x6x160xf32) + hardswish_17 = paddle._C_ops.hardswish(batch_norm__102) + del batch_norm__102 + + # pd_op.conv2d: (-1x256x6x160xf32) <- (-1x256x6x160xf32, 256x256x1x1xf32) + conv2d_9 = paddle._C_ops.conv2d( + hardswish_17, parameter_105, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del hardswish_17, parameter_105 + + # pd_op.batch_norm_: (-1x256x6x160xf32, 256xf32, 256xf32, 256xf32, 256xf32, -1xui8) <- (-1x256x6x160xf32, 256xf32, 256xf32, 256xf32, 256xf32) + ( + batch_norm__108, + batch_norm__109, + batch_norm__110, + batch_norm__111, + batch_norm__112, + batch_norm__113, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_9, + parameter_104, + parameter_103, + parameter_102, + parameter_101, + True, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_9, parameter_101, parameter_102, parameter_103, parameter_104 + + # pd_op.hardswish: (-1x256x6x160xf32) <- (-1x256x6x160xf32) + hardswish_18 = paddle._C_ops.hardswish(batch_norm__108) + del batch_norm__108 + + # pd_op.depthwise_conv2d: (-1x256x6x160xf32) <- (-1x256x6x160xf32, 256x1x5x5xf32) + depthwise_conv2d_9 = paddle._C_ops.depthwise_conv2d( + hardswish_18, parameter_100, [1, 1], [2, 2], "EXPLICIT", 256, [1, 1], "NCHW" + ) + del hardswish_18, parameter_100 + + # pd_op.batch_norm_: (-1x256x6x160xf32, 256xf32, 256xf32, 256xf32, 256xf32, -1xui8) <- (-1x256x6x160xf32, 256xf32, 256xf32, 256xf32, 256xf32) + ( + batch_norm__114, + batch_norm__115, + batch_norm__116, + batch_norm__117, + batch_norm__118, + batch_norm__119, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + depthwise_conv2d_9, + parameter_99, + parameter_98, + parameter_97, + parameter_96, + True, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del depthwise_conv2d_9, parameter_96, parameter_97, parameter_98, parameter_99 + + # pd_op.hardswish: (-1x256x6x160xf32) <- (-1x256x6x160xf32) + hardswish_19 = paddle._C_ops.hardswish(batch_norm__114) + del batch_norm__114 + + # pd_op.conv2d: (-1x256x6x160xf32) <- (-1x256x6x160xf32, 256x256x1x1xf32) + conv2d_10 = paddle._C_ops.conv2d( + hardswish_19, parameter_95, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del hardswish_19, parameter_95 + + # pd_op.batch_norm_: (-1x256x6x160xf32, 256xf32, 256xf32, 256xf32, 256xf32, -1xui8) <- (-1x256x6x160xf32, 256xf32, 256xf32, 256xf32, 256xf32) + ( + batch_norm__120, + batch_norm__121, + batch_norm__122, + batch_norm__123, + batch_norm__124, + batch_norm__125, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_10, + parameter_94, + parameter_93, + parameter_92, + parameter_91, + True, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_10, parameter_91, parameter_92, parameter_93, parameter_94 + + # pd_op.hardswish: (-1x256x6x160xf32) <- (-1x256x6x160xf32) + hardswish_20 = paddle._C_ops.hardswish(batch_norm__120) + del batch_norm__120 + + # pd_op.depthwise_conv2d: (-1x256x6x160xf32) <- (-1x256x6x160xf32, 256x1x5x5xf32) + depthwise_conv2d_10 = paddle._C_ops.depthwise_conv2d( + hardswish_20, parameter_90, [1, 1], [2, 2], "EXPLICIT", 256, [1, 1], "NCHW" + ) + del hardswish_20, parameter_90 + + # pd_op.batch_norm_: (-1x256x6x160xf32, 256xf32, 256xf32, 256xf32, 256xf32, -1xui8) <- (-1x256x6x160xf32, 256xf32, 256xf32, 256xf32, 256xf32) + ( + batch_norm__126, + batch_norm__127, + batch_norm__128, + batch_norm__129, + batch_norm__130, + batch_norm__131, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + depthwise_conv2d_10, + parameter_89, + parameter_88, + parameter_87, + parameter_86, + True, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del depthwise_conv2d_10, parameter_86, parameter_87, parameter_88, parameter_89 + + # pd_op.hardswish: (-1x256x6x160xf32) <- (-1x256x6x160xf32) + hardswish_21 = paddle._C_ops.hardswish(batch_norm__126) + del batch_norm__126 + + # pd_op.conv2d: (-1x256x6x160xf32) <- (-1x256x6x160xf32, 256x256x1x1xf32) + conv2d_11 = paddle._C_ops.conv2d( + hardswish_21, parameter_85, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del hardswish_21, parameter_85 + + # pd_op.batch_norm_: (-1x256x6x160xf32, 256xf32, 256xf32, 256xf32, 256xf32, -1xui8) <- (-1x256x6x160xf32, 256xf32, 256xf32, 256xf32, 256xf32) + ( + batch_norm__132, + batch_norm__133, + batch_norm__134, + batch_norm__135, + batch_norm__136, + batch_norm__137, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_11, + parameter_84, + parameter_83, + parameter_82, + parameter_81, + True, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_11, parameter_81, parameter_82, parameter_83, parameter_84 + + # pd_op.hardswish: (-1x256x6x160xf32) <- (-1x256x6x160xf32) + hardswish_22 = paddle._C_ops.hardswish(batch_norm__132) + del batch_norm__132 + + # pd_op.depthwise_conv2d: (-1x256x3x160xf32) <- (-1x256x6x160xf32, 256x1x5x5xf32) + depthwise_conv2d_11 = paddle._C_ops.depthwise_conv2d( + hardswish_22, parameter_80, [2, 1], [2, 2], "EXPLICIT", 256, [1, 1], "NCHW" + ) + del hardswish_22, parameter_80 + + # pd_op.batch_norm_: (-1x256x3x160xf32, 256xf32, 256xf32, 256xf32, 256xf32, -1xui8) <- (-1x256x3x160xf32, 256xf32, 256xf32, 256xf32, 256xf32) + ( + batch_norm__138, + batch_norm__139, + batch_norm__140, + batch_norm__141, + batch_norm__142, + batch_norm__143, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + depthwise_conv2d_11, + parameter_79, + parameter_78, + parameter_77, + parameter_76, + True, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del depthwise_conv2d_11, parameter_76, parameter_77, parameter_78, parameter_79 + + # pd_op.hardswish: (-1x256x3x160xf32) <- (-1x256x3x160xf32) + hardswish_23 = paddle._C_ops.hardswish(batch_norm__138) + del batch_norm__138 + + # pd_op.full_int_array: (2xi64) <- () + full_int_array_0 = [1, 1] + + # pd_op.pool2d: (-1x256x1x1xf32) <- (-1x256x3x160xf32, 2xi64) + pool2d_0 = paddle._C_ops.pool2d( + hardswish_23, + full_int_array_0, + [1, 1], + [0, 0], + False, + True, + "NCHW", + "avg", + False, + True, + "EXPLICIT", + ) + + # pd_op.conv2d: (-1x64x1x1xf32) <- (-1x256x1x1xf32, 64x256x1x1xf32) + conv2d_12 = paddle._C_ops.conv2d( + pool2d_0, parameter_75, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_75, pool2d_0 + + # pd_op.full_int_array: (4xi64) <- () + full_int_array_1 = [1, -1, 1, 1] + + # pd_op.reshape: (1x64x1x1xf32) <- (64xf32, 4xi64) + reshape_0 = paddle._C_ops.reshape(parameter_74, full_int_array_1) + del parameter_74 + + # pd_op.add: (-1x64x1x1xf32) <- (-1x64x1x1xf32, 1x64x1x1xf32) + add_0 = paddle._C_ops.add(conv2d_12, reshape_0) + del conv2d_12, reshape_0 + + # pd_op.relu: (-1x64x1x1xf32) <- (-1x64x1x1xf32) + relu_0 = paddle._C_ops.relu(add_0) + del add_0 + + # pd_op.conv2d: (-1x256x1x1xf32) <- (-1x64x1x1xf32, 256x64x1x1xf32) + conv2d_13 = paddle._C_ops.conv2d( + relu_0, parameter_73, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_73, relu_0 + + # pd_op.reshape: (1x256x1x1xf32) <- (256xf32, 4xi64) + reshape_1 = paddle._C_ops.reshape(parameter_72, full_int_array_1) + del parameter_72 + + # pd_op.add: (-1x256x1x1xf32) <- (-1x256x1x1xf32, 1x256x1x1xf32) + add_1 = paddle._C_ops.add(conv2d_13, reshape_1) + del conv2d_13, reshape_1 + + # pd_op.hardsigmoid: (-1x256x1x1xf32) <- (-1x256x1x1xf32) + hardsigmoid_0 = paddle._C_ops.hardsigmoid( + add_1, float("0.166667"), float("0.5") + ) + del add_1 + + # pd_op.multiply: (-1x256x3x160xf32) <- (-1x256x3x160xf32, -1x256x1x1xf32) + multiply_0 = paddle._C_ops.multiply(hardswish_23, hardsigmoid_0) + del hardsigmoid_0, hardswish_23 + + # pd_op.conv2d: (-1x512x3x160xf32) <- (-1x256x3x160xf32, 512x256x1x1xf32) + conv2d_14 = paddle._C_ops.conv2d( + multiply_0, parameter_71, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del multiply_0, parameter_71 + + # pd_op.batch_norm_: (-1x512x3x160xf32, 512xf32, 512xf32, 512xf32, 512xf32, -1xui8) <- (-1x512x3x160xf32, 512xf32, 512xf32, 512xf32, 512xf32) + ( + batch_norm__144, + batch_norm__145, + batch_norm__146, + batch_norm__147, + batch_norm__148, + batch_norm__149, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_14, + parameter_70, + parameter_69, + parameter_68, + parameter_67, + True, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_14, parameter_67, parameter_68, parameter_69, parameter_70 + + # pd_op.hardswish: (-1x512x3x160xf32) <- (-1x512x3x160xf32) + hardswish_24 = paddle._C_ops.hardswish(batch_norm__144) + del batch_norm__144 + + # pd_op.depthwise_conv2d: (-1x512x3x80xf32) <- (-1x512x3x160xf32, 512x1x5x5xf32) + depthwise_conv2d_12 = paddle._C_ops.depthwise_conv2d( + hardswish_24, parameter_66, [1, 2], [2, 2], "EXPLICIT", 512, [1, 1], "NCHW" + ) + del hardswish_24, parameter_66 + + # pd_op.batch_norm_: (-1x512x3x80xf32, 512xf32, 512xf32, 512xf32, 512xf32, -1xui8) <- (-1x512x3x80xf32, 512xf32, 512xf32, 512xf32, 512xf32) + ( + batch_norm__150, + batch_norm__151, + batch_norm__152, + batch_norm__153, + batch_norm__154, + batch_norm__155, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + depthwise_conv2d_12, + parameter_65, + parameter_64, + parameter_63, + parameter_62, + True, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del depthwise_conv2d_12, parameter_62, parameter_63, parameter_64, parameter_65 + + # pd_op.hardswish: (-1x512x3x80xf32) <- (-1x512x3x80xf32) + hardswish_25 = paddle._C_ops.hardswish(batch_norm__150) + del batch_norm__150 + + # pd_op.pool2d: (-1x512x1x1xf32) <- (-1x512x3x80xf32, 2xi64) + pool2d_1 = paddle._C_ops.pool2d( + hardswish_25, + full_int_array_0, + [1, 1], + [0, 0], + False, + True, + "NCHW", + "avg", + False, + True, + "EXPLICIT", + ) + del full_int_array_0 + + # pd_op.conv2d: (-1x128x1x1xf32) <- (-1x512x1x1xf32, 128x512x1x1xf32) + conv2d_15 = paddle._C_ops.conv2d( + pool2d_1, parameter_61, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_61, pool2d_1 + + # pd_op.reshape: (1x128x1x1xf32) <- (128xf32, 4xi64) + reshape_2 = paddle._C_ops.reshape(parameter_60, full_int_array_1) + del parameter_60 + + # pd_op.add: (-1x128x1x1xf32) <- (-1x128x1x1xf32, 1x128x1x1xf32) + add_2 = paddle._C_ops.add(conv2d_15, reshape_2) + del conv2d_15, reshape_2 + + # pd_op.relu: (-1x128x1x1xf32) <- (-1x128x1x1xf32) + relu_1 = paddle._C_ops.relu(add_2) + del add_2 + + # pd_op.conv2d: (-1x512x1x1xf32) <- (-1x128x1x1xf32, 512x128x1x1xf32) + conv2d_16 = paddle._C_ops.conv2d( + relu_1, parameter_59, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_59, relu_1 + + # pd_op.reshape: (1x512x1x1xf32) <- (512xf32, 4xi64) + reshape_3 = paddle._C_ops.reshape(parameter_58, full_int_array_1) + del full_int_array_1, parameter_58 + + # pd_op.add: (-1x512x1x1xf32) <- (-1x512x1x1xf32, 1x512x1x1xf32) + add_3 = paddle._C_ops.add(conv2d_16, reshape_3) + del conv2d_16, reshape_3 + + # pd_op.hardsigmoid: (-1x512x1x1xf32) <- (-1x512x1x1xf32) + hardsigmoid_1 = paddle._C_ops.hardsigmoid( + add_3, float("0.166667"), float("0.5") + ) + del add_3 + + # pd_op.multiply: (-1x512x3x80xf32) <- (-1x512x3x80xf32, -1x512x1x1xf32) + multiply_1 = paddle._C_ops.multiply(hardswish_25, hardsigmoid_1) + del hardsigmoid_1, hardswish_25 + + # pd_op.conv2d: (-1x512x3x80xf32) <- (-1x512x3x80xf32, 512x512x1x1xf32) + conv2d_17 = paddle._C_ops.conv2d( + multiply_1, parameter_57, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del multiply_1, parameter_57 + + # pd_op.batch_norm_: (-1x512x3x80xf32, 512xf32, 512xf32, 512xf32, 512xf32, -1xui8) <- (-1x512x3x80xf32, 512xf32, 512xf32, 512xf32, 512xf32) + ( + batch_norm__156, + batch_norm__157, + batch_norm__158, + batch_norm__159, + batch_norm__160, + batch_norm__161, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_17, + parameter_56, + parameter_55, + parameter_54, + parameter_53, + True, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_17, parameter_53, parameter_54, parameter_55, parameter_56 + + # pd_op.hardswish: (-1x512x3x80xf32) <- (-1x512x3x80xf32) + hardswish_26 = paddle._C_ops.hardswish(batch_norm__156) + del batch_norm__156 + + # pd_op.full_int_array: (2xi64) <- () + full_int_array_2 = [2, 2] + + # pd_op.pool2d: (-1x512x1x40xf32) <- (-1x512x3x80xf32, 2xi64) + pool2d_2 = paddle._C_ops.pool2d( + hardswish_26, + full_int_array_2, + [2, 2], + [0, 0], + False, + True, + "NCHW", + "avg", + False, + False, + "EXPLICIT", + ) + del full_int_array_2, hardswish_26 + + # pd_op.assign: (-1x512x1x40xf32) <- (-1x512x1x40xf32) + assign_0 = pool2d_2 + del pool2d_2 + + # pd_op.conv2d: (-1x64x1x40xf32) <- (-1x512x1x40xf32, 64x512x3x3xf32) + conv2d_18 = paddle._C_ops.conv2d( + assign_0, parameter_52, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_52 + + # pd_op.batch_norm_: (-1x64x1x40xf32, 64xf32, 64xf32, 64xf32, 64xf32, -1xui8) <- (-1x64x1x40xf32, 64xf32, 64xf32, 64xf32, 64xf32) + ( + batch_norm__162, + batch_norm__163, + batch_norm__164, + batch_norm__165, + batch_norm__166, + batch_norm__167, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_18, + parameter_51, + parameter_50, + parameter_49, + parameter_48, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_18, parameter_48, parameter_49, parameter_50, parameter_51 + + # pd_op.swish: (-1x64x1x40xf32) <- (-1x64x1x40xf32) + swish_0 = paddle._C_ops.swish(batch_norm__162) + del batch_norm__162 + + # pd_op.conv2d: (-1x120x1x40xf32) <- (-1x64x1x40xf32, 120x64x1x1xf32) + conv2d_19 = paddle._C_ops.conv2d( + swish_0, parameter_47, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_47, swish_0 + + # pd_op.batch_norm_: (-1x120x1x40xf32, 120xf32, 120xf32, 120xf32, 120xf32, -1xui8) <- (-1x120x1x40xf32, 120xf32, 120xf32, 120xf32, 120xf32) + ( + batch_norm__168, + batch_norm__169, + batch_norm__170, + batch_norm__171, + batch_norm__172, + batch_norm__173, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_19, + parameter_46, + parameter_45, + parameter_44, + parameter_43, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_19, parameter_43, parameter_44, parameter_45, parameter_46 + + # pd_op.swish: (-1x120x1x40xf32) <- (-1x120x1x40xf32) + swish_1 = paddle._C_ops.swish(batch_norm__168) + del batch_norm__168 + + # pd_op.shape64: (4xi64) <- (-1x120x1x40xf32) + shape64_0 = paddle._C_ops.shape64(swish_1) + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_3 = [0] + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_4 = [1] + + # pd_op.slice: (xi64) <- (4xi64, 1xi64, 1xi64) + slice_0 = paddle._C_ops.slice( + shape64_0, [0], full_int_array_3, full_int_array_4, [1], [0] + ) + del shape64_0 + + # pd_op.flatten: (-1x120x40xf32) <- (-1x120x1x40xf32) + flatten_0 = paddle._C_ops.flatten(swish_1, 2, 3) + del swish_1 + + # pd_op.transpose: (-1x40x120xf32) <- (-1x120x40xf32) + transpose_0 = paddle._C_ops.transpose(flatten_0, [0, 2, 1]) + del flatten_0 + + # pd_op.layer_norm: (-1x40x120xf32, -1x40xf32, -1x40xf32) <- (-1x40x120xf32, 120xf32, 120xf32) + layer_norm_0, layer_norm_1, layer_norm_2 = (lambda x, f: f(x))( + paddle._C_ops.layer_norm( + transpose_0, parameter_42, parameter_41, float("1e-05"), 2 + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None, None), + ) + del parameter_41, parameter_42 + + # pd_op.matmul: (-1x40x360xf32) <- (-1x40x120xf32, 120x360xf32) + matmul_0 = paddle._C_ops.matmul(layer_norm_0, parameter_40, False, False) + del layer_norm_0, parameter_40 + + # pd_op.add: (-1x40x360xf32) <- (-1x40x360xf32, 360xf32) + add_4 = paddle._C_ops.add(matmul_0, parameter_39) + del matmul_0, parameter_39 + + # pd_op.full_int_array: (5xi64) <- () + full_int_array_5 = [0, -1, 3, 8, 15] + + # pd_op.reshape: (-1x-1x3x8x15xf32) <- (-1x40x360xf32, 5xi64) + reshape_4 = paddle._C_ops.reshape(add_4, full_int_array_5) + del add_4 + + # pd_op.transpose: (3x-1x8x-1x15xf32) <- (-1x-1x3x8x15xf32) + transpose_1 = paddle._C_ops.transpose(reshape_4, [2, 0, 3, 1, 4]) + del reshape_4 + + # pd_op.slice: (-1x8x-1x15xf32) <- (3x-1x8x-1x15xf32, 1xi64, 1xi64) + slice_1 = paddle._C_ops.slice( + transpose_1, [0], full_int_array_3, full_int_array_4, [1], [0] + ) + + # pd_op.full: (1xf32) <- () + full_0 = paddle._C_ops.full( + [1], float("0.258199"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.scale: (-1x8x-1x15xf32) <- (-1x8x-1x15xf32, 1xf32) + scale_0 = paddle._C_ops.scale(slice_1, full_0, float("0"), True) + del slice_1 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_6 = [2] + + # pd_op.slice: (-1x8x-1x15xf32) <- (3x-1x8x-1x15xf32, 1xi64, 1xi64) + slice_2 = paddle._C_ops.slice( + transpose_1, [0], full_int_array_4, full_int_array_6, [1], [0] + ) + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_7 = [3] + + # pd_op.slice: (-1x8x-1x15xf32) <- (3x-1x8x-1x15xf32, 1xi64, 1xi64) + slice_3 = paddle._C_ops.slice( + transpose_1, [0], full_int_array_6, full_int_array_7, [1], [0] + ) + del transpose_1 + + # pd_op.transpose: (-1x8x15x-1xf32) <- (-1x8x-1x15xf32) + transpose_2 = paddle._C_ops.transpose(slice_2, [0, 1, 3, 2]) + del slice_2 + + # pd_op.matmul: (-1x8x-1x-1xf32) <- (-1x8x-1x15xf32, -1x8x15x-1xf32) + matmul_1 = paddle._C_ops.matmul(scale_0, transpose_2, False, False) + del scale_0, transpose_2 + + # pd_op.softmax: (-1x8x-1x-1xf32) <- (-1x8x-1x-1xf32) + softmax_1 = paddle._C_ops.softmax(matmul_1, -1) + del matmul_1 + + # pd_op.full: (1xf32) <- () + full_1 = paddle._C_ops.full( + [1], float("0.1"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.dropout: (-1x8x-1x-1xf32, -1x8x-1x-1xui8) <- (-1x8x-1x-1xf32, None, 1xf32) + dropout_0, dropout_1 = (lambda x, f: f(x))( + paddle._C_ops.dropout( + softmax_1, None, full_1, True, "upscale_in_train", 0, False + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None), + ) + del softmax_1 + + # pd_op.matmul: (-1x8x-1x15xf32) <- (-1x8x-1x-1xf32, -1x8x-1x15xf32) + matmul_2 = paddle._C_ops.matmul(dropout_0, slice_3, False, False) + del dropout_0, slice_3 + + # pd_op.transpose: (-1x-1x8x15xf32) <- (-1x8x-1x15xf32) + transpose_3 = paddle._C_ops.transpose(matmul_2, [0, 2, 1, 3]) + del matmul_2 + + # pd_op.full_int_array: (3xi64) <- () + full_int_array_8 = [0, -1, 120] + + # pd_op.reshape: (-1x-1x120xf32) <- (-1x-1x8x15xf32, 3xi64) + reshape_5 = paddle._C_ops.reshape(transpose_3, full_int_array_8) + del transpose_3 + + # pd_op.matmul: (-1x-1x120xf32) <- (-1x-1x120xf32, 120x120xf32) + matmul_3 = paddle._C_ops.matmul(reshape_5, parameter_38, False, False) + del parameter_38, reshape_5 + + # pd_op.add: (-1x-1x120xf32) <- (-1x-1x120xf32, 120xf32) + add_5 = paddle._C_ops.add(matmul_3, parameter_37) + del matmul_3, parameter_37 + + # pd_op.dropout: (-1x-1x120xf32, -1x-1x120xui8) <- (-1x-1x120xf32, None, 1xf32) + dropout_2, dropout_3 = (lambda x, f: f(x))( + paddle._C_ops.dropout( + add_5, None, full_1, True, "upscale_in_train", 0, False + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None), + ) + del add_5 + + # pd_op.add: (-1x40x120xf32) <- (-1x40x120xf32, -1x-1x120xf32) + add_6 = paddle._C_ops.add(transpose_0, dropout_2) + del dropout_2, transpose_0 + + # pd_op.layer_norm: (-1x40x120xf32, -1x40xf32, -1x40xf32) <- (-1x40x120xf32, 120xf32, 120xf32) + layer_norm_3, layer_norm_4, layer_norm_5 = (lambda x, f: f(x))( + paddle._C_ops.layer_norm( + add_6, parameter_36, parameter_35, float("1e-05"), 2 + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None, None), + ) + del parameter_35, parameter_36 + + # pd_op.matmul: (-1x40x240xf32) <- (-1x40x120xf32, 120x240xf32) + matmul_4 = paddle._C_ops.matmul(layer_norm_3, parameter_34, False, False) + del layer_norm_3, parameter_34 + + # pd_op.add: (-1x40x240xf32) <- (-1x40x240xf32, 240xf32) + add_7 = paddle._C_ops.add(matmul_4, parameter_33) + del matmul_4, parameter_33 + + # pd_op.swish: (-1x40x240xf32) <- (-1x40x240xf32) + swish_2 = paddle._C_ops.swish(add_7) + del add_7 + + # pd_op.dropout: (-1x40x240xf32, -1x40x240xui8) <- (-1x40x240xf32, None, 1xf32) + dropout_4, dropout_5 = (lambda x, f: f(x))( + paddle._C_ops.dropout( + swish_2, None, full_1, True, "upscale_in_train", 0, False + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None), + ) + del swish_2 + + # pd_op.matmul: (-1x40x120xf32) <- (-1x40x240xf32, 240x120xf32) + matmul_5 = paddle._C_ops.matmul(dropout_4, parameter_32, False, False) + del dropout_4, parameter_32 + + # pd_op.add: (-1x40x120xf32) <- (-1x40x120xf32, 120xf32) + add_8 = paddle._C_ops.add(matmul_5, parameter_31) + del matmul_5, parameter_31 + + # pd_op.dropout: (-1x40x120xf32, -1x40x120xui8) <- (-1x40x120xf32, None, 1xf32) + dropout_6, dropout_7 = (lambda x, f: f(x))( + paddle._C_ops.dropout( + add_8, None, full_1, True, "upscale_in_train", 0, False + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None), + ) + del add_8 + + # pd_op.add: (-1x40x120xf32) <- (-1x40x120xf32, -1x40x120xf32) + add_9 = paddle._C_ops.add(add_6, dropout_6) + del add_6, dropout_6 + + # pd_op.layer_norm: (-1x40x120xf32, -1x40xf32, -1x40xf32) <- (-1x40x120xf32, 120xf32, 120xf32) + layer_norm_6, layer_norm_7, layer_norm_8 = (lambda x, f: f(x))( + paddle._C_ops.layer_norm( + add_9, parameter_30, parameter_29, float("1e-05"), 2 + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None, None), + ) + del parameter_29, parameter_30 + + # pd_op.matmul: (-1x40x360xf32) <- (-1x40x120xf32, 120x360xf32) + matmul_6 = paddle._C_ops.matmul(layer_norm_6, parameter_28, False, False) + del layer_norm_6, parameter_28 + + # pd_op.add: (-1x40x360xf32) <- (-1x40x360xf32, 360xf32) + add_10 = paddle._C_ops.add(matmul_6, parameter_27) + del matmul_6, parameter_27 + + # pd_op.reshape: (-1x-1x3x8x15xf32) <- (-1x40x360xf32, 5xi64) + reshape_6 = paddle._C_ops.reshape(add_10, full_int_array_5) + del add_10, full_int_array_5 + + # pd_op.transpose: (3x-1x8x-1x15xf32) <- (-1x-1x3x8x15xf32) + transpose_4 = paddle._C_ops.transpose(reshape_6, [2, 0, 3, 1, 4]) + del reshape_6 + + # pd_op.slice: (-1x8x-1x15xf32) <- (3x-1x8x-1x15xf32, 1xi64, 1xi64) + slice_4 = paddle._C_ops.slice( + transpose_4, [0], full_int_array_3, full_int_array_4, [1], [0] + ) + + # pd_op.scale: (-1x8x-1x15xf32) <- (-1x8x-1x15xf32, 1xf32) + scale_1 = paddle._C_ops.scale(slice_4, full_0, float("0"), True) + del full_0, slice_4 + + # pd_op.slice: (-1x8x-1x15xf32) <- (3x-1x8x-1x15xf32, 1xi64, 1xi64) + slice_5 = paddle._C_ops.slice( + transpose_4, [0], full_int_array_4, full_int_array_6, [1], [0] + ) + + # pd_op.slice: (-1x8x-1x15xf32) <- (3x-1x8x-1x15xf32, 1xi64, 1xi64) + slice_6 = paddle._C_ops.slice( + transpose_4, [0], full_int_array_6, full_int_array_7, [1], [0] + ) + del full_int_array_7, transpose_4 + + # pd_op.transpose: (-1x8x15x-1xf32) <- (-1x8x-1x15xf32) + transpose_5 = paddle._C_ops.transpose(slice_5, [0, 1, 3, 2]) + del slice_5 + + # pd_op.matmul: (-1x8x-1x-1xf32) <- (-1x8x-1x15xf32, -1x8x15x-1xf32) + matmul_7 = paddle._C_ops.matmul(scale_1, transpose_5, False, False) + del scale_1, transpose_5 + + # pd_op.softmax: (-1x8x-1x-1xf32) <- (-1x8x-1x-1xf32) + softmax_2 = paddle._C_ops.softmax(matmul_7, -1) + del matmul_7 + + # pd_op.dropout: (-1x8x-1x-1xf32, -1x8x-1x-1xui8) <- (-1x8x-1x-1xf32, None, 1xf32) + dropout_8, dropout_9 = (lambda x, f: f(x))( + paddle._C_ops.dropout( + softmax_2, None, full_1, True, "upscale_in_train", 0, False + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None), + ) + del softmax_2 + + # pd_op.matmul: (-1x8x-1x15xf32) <- (-1x8x-1x-1xf32, -1x8x-1x15xf32) + matmul_8 = paddle._C_ops.matmul(dropout_8, slice_6, False, False) + del dropout_8, slice_6 + + # pd_op.transpose: (-1x-1x8x15xf32) <- (-1x8x-1x15xf32) + transpose_6 = paddle._C_ops.transpose(matmul_8, [0, 2, 1, 3]) + del matmul_8 + + # pd_op.reshape: (-1x-1x120xf32) <- (-1x-1x8x15xf32, 3xi64) + reshape_7 = paddle._C_ops.reshape(transpose_6, full_int_array_8) + del full_int_array_8, transpose_6 + + # pd_op.matmul: (-1x-1x120xf32) <- (-1x-1x120xf32, 120x120xf32) + matmul_9 = paddle._C_ops.matmul(reshape_7, parameter_26, False, False) + del parameter_26, reshape_7 + + # pd_op.add: (-1x-1x120xf32) <- (-1x-1x120xf32, 120xf32) + add_11 = paddle._C_ops.add(matmul_9, parameter_25) + del matmul_9, parameter_25 + + # pd_op.dropout: (-1x-1x120xf32, -1x-1x120xui8) <- (-1x-1x120xf32, None, 1xf32) + dropout_10, dropout_11 = (lambda x, f: f(x))( + paddle._C_ops.dropout( + add_11, None, full_1, True, "upscale_in_train", 0, False + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None), + ) + del add_11 + + # pd_op.add: (-1x40x120xf32) <- (-1x40x120xf32, -1x-1x120xf32) + add_12 = paddle._C_ops.add(add_9, dropout_10) + del add_9, dropout_10 + + # pd_op.layer_norm: (-1x40x120xf32, -1x40xf32, -1x40xf32) <- (-1x40x120xf32, 120xf32, 120xf32) + layer_norm_9, layer_norm_10, layer_norm_11 = (lambda x, f: f(x))( + paddle._C_ops.layer_norm( + add_12, parameter_24, parameter_23, float("1e-05"), 2 + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None, None), + ) + del parameter_23, parameter_24 + + # pd_op.matmul: (-1x40x240xf32) <- (-1x40x120xf32, 120x240xf32) + matmul_10 = paddle._C_ops.matmul(layer_norm_9, parameter_22, False, False) + del layer_norm_9, parameter_22 + + # pd_op.add: (-1x40x240xf32) <- (-1x40x240xf32, 240xf32) + add_13 = paddle._C_ops.add(matmul_10, parameter_21) + del matmul_10, parameter_21 + + # pd_op.swish: (-1x40x240xf32) <- (-1x40x240xf32) + swish_3 = paddle._C_ops.swish(add_13) + del add_13 + + # pd_op.dropout: (-1x40x240xf32, -1x40x240xui8) <- (-1x40x240xf32, None, 1xf32) + dropout_12, dropout_13 = (lambda x, f: f(x))( + paddle._C_ops.dropout( + swish_3, None, full_1, True, "upscale_in_train", 0, False + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None), + ) + del swish_3 + + # pd_op.matmul: (-1x40x120xf32) <- (-1x40x240xf32, 240x120xf32) + matmul_11 = paddle._C_ops.matmul(dropout_12, parameter_20, False, False) + del dropout_12, parameter_20 + + # pd_op.add: (-1x40x120xf32) <- (-1x40x120xf32, 120xf32) + add_14 = paddle._C_ops.add(matmul_11, parameter_19) + del matmul_11, parameter_19 + + # pd_op.dropout: (-1x40x120xf32, -1x40x120xui8) <- (-1x40x120xf32, None, 1xf32) + dropout_14, dropout_15 = (lambda x, f: f(x))( + paddle._C_ops.dropout( + add_14, None, full_1, True, "upscale_in_train", 0, False + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None), + ) + del add_14, full_1 + + # pd_op.add: (-1x40x120xf32) <- (-1x40x120xf32, -1x40x120xf32) + add_15 = paddle._C_ops.add(add_12, dropout_14) + del add_12, dropout_14 + + # pd_op.layer_norm: (-1x40x120xf32, -1x40xf32, -1x40xf32) <- (-1x40x120xf32, 120xf32, 120xf32) + layer_norm_12, layer_norm_13, layer_norm_14 = (lambda x, f: f(x))( + paddle._C_ops.layer_norm( + add_15, parameter_18, parameter_17, float("1e-06"), 2 + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None, None), + ) + del add_15, parameter_17, parameter_18 + + # pd_op.full_int_array: (4xi64) <- () + full_int_array_9 = [0, 1, 40, 120] + + # pd_op.reshape: (-1x1x40x120xf32) <- (-1x40x120xf32, 4xi64) + reshape_8 = paddle._C_ops.reshape(layer_norm_12, full_int_array_9) + del full_int_array_9, layer_norm_12 + + # pd_op.transpose: (-1x120x1x40xf32) <- (-1x1x40x120xf32) + transpose_7 = paddle._C_ops.transpose(reshape_8, [0, 3, 1, 2]) + del reshape_8 + + # pd_op.conv2d: (-1x512x1x40xf32) <- (-1x120x1x40xf32, 512x120x1x1xf32) + conv2d_20 = paddle._C_ops.conv2d( + transpose_7, parameter_16, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_16, transpose_7 + + # pd_op.batch_norm_: (-1x512x1x40xf32, 512xf32, 512xf32, 512xf32, 512xf32, -1xui8) <- (-1x512x1x40xf32, 512xf32, 512xf32, 512xf32, 512xf32) + ( + batch_norm__174, + batch_norm__175, + batch_norm__176, + batch_norm__177, + batch_norm__178, + batch_norm__179, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_20, + parameter_15, + parameter_14, + parameter_13, + parameter_12, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_20, parameter_12, parameter_13, parameter_14, parameter_15 + + # pd_op.swish: (-1x512x1x40xf32) <- (-1x512x1x40xf32) + swish_4 = paddle._C_ops.swish(batch_norm__174) + del batch_norm__174 + + # pd_op.full: (1xi32) <- () + full_2 = paddle._C_ops.full( + [1], float("1"), paddle.int32, paddle.core.CPUPlace() + ) + + # builtin.combine: ([-1x512x1x40xf32, -1x512x1x40xf32]) <- (-1x512x1x40xf32, -1x512x1x40xf32) + combine_0 = [assign_0, swish_4] + del assign_0, swish_4 + + # pd_op.concat: (-1x1024x1x40xf32) <- ([-1x512x1x40xf32, -1x512x1x40xf32], 1xi32) + concat_0 = paddle._C_ops.concat(combine_0, full_2) + del combine_0, full_2 + + # pd_op.conv2d: (-1x64x1x40xf32) <- (-1x1024x1x40xf32, 64x1024x3x3xf32) + conv2d_21 = paddle._C_ops.conv2d( + concat_0, parameter_11, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del concat_0, parameter_11 + + # pd_op.batch_norm_: (-1x64x1x40xf32, 64xf32, 64xf32, 64xf32, 64xf32, -1xui8) <- (-1x64x1x40xf32, 64xf32, 64xf32, 64xf32, 64xf32) + ( + batch_norm__180, + batch_norm__181, + batch_norm__182, + batch_norm__183, + batch_norm__184, + batch_norm__185, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_21, + parameter_10, + parameter_9, + parameter_8, + parameter_7, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_21, parameter_10, parameter_7, parameter_8, parameter_9 + + # pd_op.swish: (-1x64x1x40xf32) <- (-1x64x1x40xf32) + swish_5 = paddle._C_ops.swish(batch_norm__180) + del batch_norm__180 + + # pd_op.conv2d: (-1x64x1x40xf32) <- (-1x64x1x40xf32, 64x64x1x1xf32) + conv2d_22 = paddle._C_ops.conv2d( + swish_5, parameter_6, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_6, swish_5 + + # pd_op.batch_norm_: (-1x64x1x40xf32, 64xf32, 64xf32, 64xf32, 64xf32, -1xui8) <- (-1x64x1x40xf32, 64xf32, 64xf32, 64xf32, 64xf32) + ( + batch_norm__186, + batch_norm__187, + batch_norm__188, + batch_norm__189, + batch_norm__190, + batch_norm__191, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_22, + parameter_5, + parameter_4, + parameter_3, + parameter_2, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_22, parameter_2, parameter_3, parameter_4, parameter_5 + + # pd_op.swish: (-1x64x1x40xf32) <- (-1x64x1x40xf32) + swish_6 = paddle._C_ops.swish(batch_norm__186) + del batch_norm__186 + + # pd_op.shape64: (4xi64) <- (-1x64x1x40xf32) + shape64_1 = paddle._C_ops.shape64(swish_6) + + # pd_op.slice: (xi64) <- (4xi64, 1xi64, 1xi64) + slice_7 = paddle._C_ops.slice( + shape64_1, [0], full_int_array_3, full_int_array_4, [1], [0] + ) + del full_int_array_3, full_int_array_4, shape64_1 + + # pd_op.squeeze: (-1x64x40xf32) <- (-1x64x1x40xf32, 1xi64) + squeeze_0 = paddle._C_ops.squeeze(swish_6, full_int_array_6) + del full_int_array_6, swish_6 + + # pd_op.transpose: (-1x40x64xf32) <- (-1x64x40xf32) + transpose_8 = paddle._C_ops.transpose(squeeze_0, [0, 2, 1]) + del squeeze_0 + + # pd_op.matmul: (-1x40x6625xf32) <- (-1x40x64xf32, 64x6625xf32) + matmul_12 = paddle._C_ops.matmul(transpose_8, parameter_1, False, False) + del parameter_1, transpose_8 + + # pd_op.add: (-1x40x6625xf32) <- (-1x40x6625xf32, 6625xf32) + add_16 = paddle._C_ops.add(matmul_12, parameter_0) + del matmul_12, parameter_0 + + # pd_op.softmax: (-1x40x6625xf32) <- (-1x40x6625xf32) + softmax_0 = paddle._C_ops.softmax(add_16, 2) + del add_16 + + return softmax_0 diff --git a/paddle_samples/PaddleX/PP-OCRv3_mobile_rec/subgraph_6/weight_meta.py b/paddle_samples/PaddleX/PP-OCRv3_mobile_rec/subgraph_6/weight_meta.py new file mode 100644 index 000000000..cc53150f2 --- /dev/null +++ b/paddle_samples/PaddleX/PP-OCRv3_mobile_rec/subgraph_6/weight_meta.py @@ -0,0 +1,2064 @@ +class Program_weight_tensor_parameter_0: + name = "parameter_0" + shape = [6625] + dtype = "float32" + min_val = float("-0.379237") + max_val = float("1.41695") + mean = float("-0.0230814") + std = float("0.040638") + data = None + + +class Program_weight_tensor_parameter_1: + name = "parameter_1" + shape = [64, 6625] + dtype = "float32" + min_val = float("-2.61517") + max_val = float("2.32152") + mean = float("-0.0789") + std = float("0.144839") + data = None + + +class Program_weight_tensor_parameter_2: + name = "parameter_2" + shape = [64] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_3: + name = "parameter_3" + shape = [64] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_4: + name = "parameter_4" + shape = [64] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_5: + name = "parameter_5" + shape = [64] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_6: + name = "parameter_6" + shape = [64, 64, 1, 1] + dtype = "float32" + min_val = float("-1.09516") + max_val = float("1.56997") + mean = float("0.00672384") + std = float("0.257359") + data = None + + +class Program_weight_tensor_parameter_7: + name = "parameter_7" + shape = [64] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_8: + name = "parameter_8" + shape = [64] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_9: + name = "parameter_9" + shape = [64] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_10: + name = "parameter_10" + shape = [64] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_11: + name = "parameter_11" + shape = [64, 1024, 3, 3] + dtype = "float32" + min_val = float("-1.16106") + max_val = float("1.13951") + mean = float("-0.00198152") + std = float("0.0470016") + data = None + + +class Program_weight_tensor_parameter_12: + name = "parameter_12" + shape = [512] + dtype = "float32" + min_val = float("-1.37464") + max_val = float("1.49706") + mean = float("-0.370973") + std = float("0.241875") + data = None + + +class Program_weight_tensor_parameter_13: + name = "parameter_13" + shape = [512] + dtype = "float32" + min_val = float("-0.0107127") + max_val = float("4.69329") + mean = float("0.890954") + std = float("0.38084") + data = None + + +class Program_weight_tensor_parameter_14: + name = "parameter_14" + shape = [512] + dtype = "float32" + min_val = float("0.000220463") + max_val = float("0.345001") + mean = float("0.0825053") + std = float("0.0431813") + data = None + + +class Program_weight_tensor_parameter_15: + name = "parameter_15" + shape = [512] + dtype = "float32" + min_val = float("-0.811102") + max_val = float("1.19008") + mean = float("0.0398876") + std = float("0.1579") + data = None + + +class Program_weight_tensor_parameter_16: + name = "parameter_16" + shape = [512, 120, 1, 1] + dtype = "float32" + min_val = float("-0.718116") + max_val = float("0.894727") + mean = float("-0.00233383") + std = float("0.0873386") + data = None + + +class Program_weight_tensor_parameter_17: + name = "parameter_17" + shape = [120] + dtype = "float32" + min_val = float("-0.00113343") + max_val = float("0.000961629") + mean = float("-2.89278e-05") + std = float("0.000386525") + data = None + + +class Program_weight_tensor_parameter_18: + name = "parameter_18" + shape = [120] + dtype = "float32" + min_val = float("0.0873664") + max_val = float("0.724352") + mean = float("0.379606") + std = float("0.0967956") + data = None + + +class Program_weight_tensor_parameter_19: + name = "parameter_19" + shape = [120] + dtype = "float32" + min_val = float("-0.470022") + max_val = float("0.748517") + mean = float("0.00565079") + std = float("0.130134") + data = None + + +class Program_weight_tensor_parameter_20: + name = "parameter_20" + shape = [240, 120] + dtype = "float32" + min_val = float("-0.84189") + max_val = float("0.792543") + mean = float("-0.00079115") + std = float("0.0670632") + data = None + + +class Program_weight_tensor_parameter_21: + name = "parameter_21" + shape = [240] + dtype = "float32" + min_val = float("-0.275399") + max_val = float("0.0526904") + mean = float("-0.0684576") + std = float("0.0479141") + data = None + + +class Program_weight_tensor_parameter_22: + name = "parameter_22" + shape = [120, 240] + dtype = "float32" + min_val = float("-0.857653") + max_val = float("0.807613") + mean = float("-0.00987257") + std = float("0.107461") + data = None + + +class Program_weight_tensor_parameter_23: + name = "parameter_23" + shape = [120] + dtype = "float32" + min_val = float("-2.50945") + max_val = float("1.87882") + mean = float("0.162663") + std = float("0.400249") + data = None + + +class Program_weight_tensor_parameter_24: + name = "parameter_24" + shape = [120] + dtype = "float32" + min_val = float("0.52825") + max_val = float("1.80602") + mean = float("1.25228") + std = float("0.154194") + data = None + + +class Program_weight_tensor_parameter_25: + name = "parameter_25" + shape = [120] + dtype = "float32" + min_val = float("-0.258214") + max_val = float("0.234695") + mean = float("0.00728996") + std = float("0.085847") + data = None + + +class Program_weight_tensor_parameter_26: + name = "parameter_26" + shape = [120, 120] + dtype = "float32" + min_val = float("-0.474166") + max_val = float("0.383756") + mean = float("-0.000269616") + std = float("0.0854085") + data = None + + +class Program_weight_tensor_parameter_27: + name = "parameter_27" + shape = [360] + dtype = "float32" + min_val = float("-0.803533") + max_val = float("0.874276") + mean = float("0.00735413") + std = float("0.142553") + data = None + + +class Program_weight_tensor_parameter_28: + name = "parameter_28" + shape = [120, 360] + dtype = "float32" + min_val = float("-0.581426") + max_val = float("0.644047") + mean = float("0.000391687") + std = float("0.0957814") + data = None + + +class Program_weight_tensor_parameter_29: + name = "parameter_29" + shape = [120] + dtype = "float32" + min_val = float("-1.08672") + max_val = float("0.788303") + mean = float("0.0833729") + std = float("0.241456") + data = None + + +class Program_weight_tensor_parameter_30: + name = "parameter_30" + shape = [120] + dtype = "float32" + min_val = float("0.515904") + max_val = float("1.10272") + mean = float("0.90528") + std = float("0.0924082") + data = None + + +class Program_weight_tensor_parameter_31: + name = "parameter_31" + shape = [120] + dtype = "float32" + min_val = float("-0.225951") + max_val = float("0.217649") + mean = float("-0.000199204") + std = float("0.0834969") + data = None + + +class Program_weight_tensor_parameter_32: + name = "parameter_32" + shape = [240, 120] + dtype = "float32" + min_val = float("-0.759846") + max_val = float("0.959335") + mean = float("-0.000140495") + std = float("0.0606466") + data = None + + +class Program_weight_tensor_parameter_33: + name = "parameter_33" + shape = [240] + dtype = "float32" + min_val = float("-0.267075") + max_val = float("0.0761767") + mean = float("-0.0710567") + std = float("0.0445584") + data = None + + +class Program_weight_tensor_parameter_34: + name = "parameter_34" + shape = [120, 240] + dtype = "float32" + min_val = float("-0.626377") + max_val = float("0.777438") + mean = float("-0.00135008") + std = float("0.113605") + data = None + + +class Program_weight_tensor_parameter_35: + name = "parameter_35" + shape = [120] + dtype = "float32" + min_val = float("-3.14406") + max_val = float("1.23497") + mean = float("0.0377958") + std = float("0.467852") + data = None + + +class Program_weight_tensor_parameter_36: + name = "parameter_36" + shape = [120] + dtype = "float32" + min_val = float("0.895688") + max_val = float("1.55573") + mean = float("1.21581") + std = float("0.0976649") + data = None + + +class Program_weight_tensor_parameter_37: + name = "parameter_37" + shape = [120] + dtype = "float32" + min_val = float("-0.16761") + max_val = float("0.149008") + mean = float("0.00504008") + std = float("0.0479561") + data = None + + +class Program_weight_tensor_parameter_38: + name = "parameter_38" + shape = [120, 120] + dtype = "float32" + min_val = float("-0.47844") + max_val = float("0.327888") + mean = float("-2.93668e-05") + std = float("0.0733842") + data = None + + +class Program_weight_tensor_parameter_39: + name = "parameter_39" + shape = [360] + dtype = "float32" + min_val = float("-1.05283") + max_val = float("0.973596") + mean = float("-0.00142021") + std = float("0.174564") + data = None + + +class Program_weight_tensor_parameter_40: + name = "parameter_40" + shape = [120, 360] + dtype = "float32" + min_val = float("-0.802997") + max_val = float("0.911171") + mean = float("-7.62152e-05") + std = float("0.0964538") + data = None + + +class Program_weight_tensor_parameter_41: + name = "parameter_41" + shape = [120] + dtype = "float32" + min_val = float("-1.36655") + max_val = float("0.84029") + mean = float("0.0631498") + std = float("0.304378") + data = None + + +class Program_weight_tensor_parameter_42: + name = "parameter_42" + shape = [120] + dtype = "float32" + min_val = float("-0.0132848") + max_val = float("1.33382") + mean = float("0.885975") + std = float("0.225385") + data = None + + +class Program_weight_tensor_parameter_43: + name = "parameter_43" + shape = [120] + dtype = "float32" + min_val = float("-0.896143") + max_val = float("2.06732") + mean = float("0.216446") + std = float("0.419651") + data = None + + +class Program_weight_tensor_parameter_44: + name = "parameter_44" + shape = [120] + dtype = "float32" + min_val = float("0.320767") + max_val = float("2.80154") + mean = float("1.46118") + std = float("0.308907") + data = None + + +class Program_weight_tensor_parameter_45: + name = "parameter_45" + shape = [120] + dtype = "float32" + min_val = float("0.00570793") + max_val = float("0.373694") + mean = float("0.0491769") + std = float("0.0392193") + data = None + + +class Program_weight_tensor_parameter_46: + name = "parameter_46" + shape = [120] + dtype = "float32" + min_val = float("-1.20662") + max_val = float("0.786439") + mean = float("-0.028425") + std = float("0.365683") + data = None + + +class Program_weight_tensor_parameter_47: + name = "parameter_47" + shape = [120, 64, 1, 1] + dtype = "float32" + min_val = float("-1.2714") + max_val = float("1.29797") + mean = float("0.000784841") + std = float("0.199535") + data = None + + +class Program_weight_tensor_parameter_48: + name = "parameter_48" + shape = [64] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_49: + name = "parameter_49" + shape = [64] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_50: + name = "parameter_50" + shape = [64] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_51: + name = "parameter_51" + shape = [64] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_52: + name = "parameter_52" + shape = [64, 512, 3, 3] + dtype = "float32" + min_val = float("-1.40611") + max_val = float("1.10038") + mean = float("-0.00245528") + std = float("0.05029") + data = None + + +class Program_weight_tensor_parameter_53: + name = "parameter_53" + shape = [512] + dtype = "float32" + min_val = float("-6.9423") + max_val = float("5.17735") + mean = float("-2.04504") + std = float("1.47297") + data = None + + +class Program_weight_tensor_parameter_54: + name = "parameter_54" + shape = [512] + dtype = "float32" + min_val = float("0.915076") + max_val = float("10.6534") + mean = float("6.39917") + std = float("2.46428") + data = None + + +class Program_weight_tensor_parameter_55: + name = "parameter_55" + shape = [512] + dtype = "float32" + min_val = float("0.0322723") + max_val = float("2.0221") + mean = float("0.110115") + std = float("0.109804") + data = None + + +class Program_weight_tensor_parameter_56: + name = "parameter_56" + shape = [512] + dtype = "float32" + min_val = float("-0.714717") + max_val = float("0.616614") + mean = float("-0.061605") + std = float("0.100287") + data = None + + +class Program_weight_tensor_parameter_57: + name = "parameter_57" + shape = [512, 512, 1, 1] + dtype = "float32" + min_val = float("-0.927952") + max_val = float("0.912147") + mean = float("-0.00487282") + std = float("0.0375902") + data = None + + +class Program_weight_tensor_parameter_58: + name = "parameter_58" + shape = [512] + dtype = "float32" + min_val = float("-0.131056") + max_val = float("0.0922238") + mean = float("-0.0166377") + std = float("0.0186343") + data = None + + +class Program_weight_tensor_parameter_59: + name = "parameter_59" + shape = [512, 128, 1, 1] + dtype = "float32" + min_val = float("-0.563108") + max_val = float("0.482628") + mean = float("-0.00421348") + std = float("0.0367703") + data = None + + +class Program_weight_tensor_parameter_60: + name = "parameter_60" + shape = [128] + dtype = "float32" + min_val = float("-0.0351852") + max_val = float("0.4196") + mean = float("0.0130297") + std = float("0.0433383") + data = None + + +class Program_weight_tensor_parameter_61: + name = "parameter_61" + shape = [128, 512, 1, 1] + dtype = "float32" + min_val = float("-1.3301") + max_val = float("0.743327") + mean = float("0.000846266") + std = float("0.0344146") + data = None + + +class Program_weight_tensor_parameter_62: + name = "parameter_62" + shape = [512] + dtype = "float32" + min_val = float("-2.99653") + max_val = float("1.90625") + mean = float("0.0115887") + std = float("0.444884") + data = None + + +class Program_weight_tensor_parameter_63: + name = "parameter_63" + shape = [512] + dtype = "float32" + min_val = float("0.33003") + max_val = float("7.14161") + mean = float("1.30241") + std = float("0.811664") + data = None + + +class Program_weight_tensor_parameter_64: + name = "parameter_64" + shape = [512] + dtype = "float32" + min_val = float("1.60311e-05") + max_val = float("0.873089") + mean = float("0.0314933") + std = float("0.0676433") + data = None + + +class Program_weight_tensor_parameter_65: + name = "parameter_65" + shape = [512] + dtype = "float32" + min_val = float("-1.67511") + max_val = float("0.427997") + mean = float("-0.0939804") + std = float("0.142749") + data = None + + +class Program_weight_tensor_parameter_66: + name = "parameter_66" + shape = [512, 1, 5, 5] + dtype = "float32" + min_val = float("-0.265693") + max_val = float("0.34381") + mean = float("0.01971") + std = float("0.0488623") + data = None + + +class Program_weight_tensor_parameter_67: + name = "parameter_67" + shape = [512] + dtype = "float32" + min_val = float("-2.15339") + max_val = float("2.63358") + mean = float("-0.893422") + std = float("0.776881") + data = None + + +class Program_weight_tensor_parameter_68: + name = "parameter_68" + shape = [512] + dtype = "float32" + min_val = float("0.0274539") + max_val = float("2.683") + mean = float("0.991873") + std = float("0.398585") + data = None + + +class Program_weight_tensor_parameter_69: + name = "parameter_69" + shape = [512] + dtype = "float32" + min_val = float("0.00839155") + max_val = float("0.453825") + mean = float("0.0464475") + std = float("0.0287356") + data = None + + +class Program_weight_tensor_parameter_70: + name = "parameter_70" + shape = [512] + dtype = "float32" + min_val = float("-0.953258") + max_val = float("0.850259") + mean = float("-0.182058") + std = float("0.244472") + data = None + + +class Program_weight_tensor_parameter_71: + name = "parameter_71" + shape = [512, 256, 1, 1] + dtype = "float32" + min_val = float("-0.492484") + max_val = float("0.855615") + mean = float("-0.00331369") + std = float("0.0473135") + data = None + + +class Program_weight_tensor_parameter_72: + name = "parameter_72" + shape = [256] + dtype = "float32" + min_val = float("-0.300282") + max_val = float("0.0431338") + mean = float("-0.0277728") + std = float("0.0293606") + data = None + + +class Program_weight_tensor_parameter_73: + name = "parameter_73" + shape = [256, 64, 1, 1] + dtype = "float32" + min_val = float("-0.732785") + max_val = float("0.488966") + mean = float("-0.00470549") + std = float("0.0517995") + data = None + + +class Program_weight_tensor_parameter_74: + name = "parameter_74" + shape = [64] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_75: + name = "parameter_75" + shape = [64, 256, 1, 1] + dtype = "float32" + min_val = float("-1.29047") + max_val = float("0.677073") + mean = float("0.000834517") + std = float("0.0502748") + data = None + + +class Program_weight_tensor_parameter_76: + name = "parameter_76" + shape = [256] + dtype = "float32" + min_val = float("-2.31693") + max_val = float("2.18567") + mean = float("0.624999") + std = float("0.572842") + data = None + + +class Program_weight_tensor_parameter_77: + name = "parameter_77" + shape = [256] + dtype = "float32" + min_val = float("0.203989") + max_val = float("9.10141") + mean = float("1.39641") + std = float("0.855435") + data = None + + +class Program_weight_tensor_parameter_78: + name = "parameter_78" + shape = [256] + dtype = "float32" + min_val = float("4.94662e-05") + max_val = float("1.67876") + mean = float("0.0754494") + std = float("0.160558") + data = None + + +class Program_weight_tensor_parameter_79: + name = "parameter_79" + shape = [256] + dtype = "float32" + min_val = float("-1.75122") + max_val = float("1.0715") + mean = float("-0.0645766") + std = float("0.219713") + data = None + + +class Program_weight_tensor_parameter_80: + name = "parameter_80" + shape = [256, 1, 5, 5] + dtype = "float32" + min_val = float("-0.247528") + max_val = float("0.267467") + mean = float("0.0217321") + std = float("0.0588233") + data = None + + +class Program_weight_tensor_parameter_81: + name = "parameter_81" + shape = [256] + dtype = "float32" + min_val = float("-2.38884") + max_val = float("1.92813") + mean = float("-0.704959") + std = float("0.648384") + data = None + + +class Program_weight_tensor_parameter_82: + name = "parameter_82" + shape = [256] + dtype = "float32" + min_val = float("0.0248943") + max_val = float("2.32932") + mean = float("0.99848") + std = float("0.339812") + data = None + + +class Program_weight_tensor_parameter_83: + name = "parameter_83" + shape = [256] + dtype = "float32" + min_val = float("0.164861") + max_val = float("2.33681") + mean = float("0.670703") + std = float("0.278408") + data = None + + +class Program_weight_tensor_parameter_84: + name = "parameter_84" + shape = [256] + dtype = "float32" + min_val = float("-3.52098") + max_val = float("2.53263") + mean = float("-0.501457") + std = float("0.981916") + data = None + + +class Program_weight_tensor_parameter_85: + name = "parameter_85" + shape = [256, 256, 1, 1] + dtype = "float32" + min_val = float("-0.604677") + max_val = float("0.616501") + mean = float("-0.00409391") + std = float("0.0522357") + data = None + + +class Program_weight_tensor_parameter_86: + name = "parameter_86" + shape = [256] + dtype = "float32" + min_val = float("-1.30931") + max_val = float("5.34531") + mean = float("1.05255") + std = float("0.933432") + data = None + + +class Program_weight_tensor_parameter_87: + name = "parameter_87" + shape = [256] + dtype = "float32" + min_val = float("0.444702") + max_val = float("2.71313") + mean = float("0.815764") + std = float("0.326504") + data = None + + +class Program_weight_tensor_parameter_88: + name = "parameter_88" + shape = [256] + dtype = "float32" + min_val = float("5.02145e-05") + max_val = float("2.61581") + mean = float("0.065821") + std = float("0.198044") + data = None + + +class Program_weight_tensor_parameter_89: + name = "parameter_89" + shape = [256] + dtype = "float32" + min_val = float("-0.911464") + max_val = float("0.709433") + mean = float("-0.0463051") + std = float("0.156301") + data = None + + +class Program_weight_tensor_parameter_90: + name = "parameter_90" + shape = [256, 1, 5, 5] + dtype = "float32" + min_val = float("-0.404726") + max_val = float("0.561177") + mean = float("0.00650608") + std = float("0.0681317") + data = None + + +class Program_weight_tensor_parameter_91: + name = "parameter_91" + shape = [256] + dtype = "float32" + min_val = float("-1.94837") + max_val = float("2.22191") + mean = float("-0.47184") + std = float("0.66991") + data = None + + +class Program_weight_tensor_parameter_92: + name = "parameter_92" + shape = [256] + dtype = "float32" + min_val = float("0.0310262") + max_val = float("2.54106") + mean = float("0.972144") + std = float("0.450682") + data = None + + +class Program_weight_tensor_parameter_93: + name = "parameter_93" + shape = [256] + dtype = "float32" + min_val = float("0.266377") + max_val = float("1.99195") + mean = float("0.66369") + std = float("0.23844") + data = None + + +class Program_weight_tensor_parameter_94: + name = "parameter_94" + shape = [256] + dtype = "float32" + min_val = float("-3.18553") + max_val = float("2.31156") + mean = float("-0.410527") + std = float("1.01772") + data = None + + +class Program_weight_tensor_parameter_95: + name = "parameter_95" + shape = [256, 256, 1, 1] + dtype = "float32" + min_val = float("-0.661761") + max_val = float("0.454845") + mean = float("-0.00341439") + std = float("0.0496098") + data = None + + +class Program_weight_tensor_parameter_96: + name = "parameter_96" + shape = [256] + dtype = "float32" + min_val = float("-1.55942") + max_val = float("8.78174") + mean = float("1.24145") + std = float("1.033") + data = None + + +class Program_weight_tensor_parameter_97: + name = "parameter_97" + shape = [256] + dtype = "float32" + min_val = float("0.399551") + max_val = float("2.38777") + mean = float("0.855712") + std = float("0.335078") + data = None + + +class Program_weight_tensor_parameter_98: + name = "parameter_98" + shape = [256] + dtype = "float32" + min_val = float("4.54132e-05") + max_val = float("2.30389") + mean = float("0.0852217") + std = float("0.19819") + data = None + + +class Program_weight_tensor_parameter_99: + name = "parameter_99" + shape = [256] + dtype = "float32" + min_val = float("-1.53342") + max_val = float("0.894373") + mean = float("-0.034722") + std = float("0.21968") + data = None + + +class Program_weight_tensor_parameter_100: + name = "parameter_100" + shape = [256, 1, 5, 5] + dtype = "float32" + min_val = float("-0.52163") + max_val = float("0.468327") + mean = float("0.00847092") + std = float("0.0699048") + data = None + + +class Program_weight_tensor_parameter_101: + name = "parameter_101" + shape = [256] + dtype = "float32" + min_val = float("-1.94866") + max_val = float("2.38269") + mean = float("-0.433062") + std = float("0.779102") + data = None + + +class Program_weight_tensor_parameter_102: + name = "parameter_102" + shape = [256] + dtype = "float32" + min_val = float("0.0353002") + max_val = float("3.13606") + mean = float("1.08369") + std = float("0.502936") + data = None + + +class Program_weight_tensor_parameter_103: + name = "parameter_103" + shape = [256] + dtype = "float32" + min_val = float("0.267775") + max_val = float("1.84571") + mean = float("0.597777") + std = float("0.222868") + data = None + + +class Program_weight_tensor_parameter_104: + name = "parameter_104" + shape = [256] + dtype = "float32" + min_val = float("-4.21206") + max_val = float("2.74549") + mean = float("-0.655925") + std = float("1.41117") + data = None + + +class Program_weight_tensor_parameter_105: + name = "parameter_105" + shape = [256, 256, 1, 1] + dtype = "float32" + min_val = float("-0.745881") + max_val = float("0.458491") + mean = float("-0.00328742") + std = float("0.0492368") + data = None + + +class Program_weight_tensor_parameter_106: + name = "parameter_106" + shape = [256] + dtype = "float32" + min_val = float("-1.6526") + max_val = float("4.78229") + mean = float("1.33182") + std = float("1.06161") + data = None + + +class Program_weight_tensor_parameter_107: + name = "parameter_107" + shape = [256] + dtype = "float32" + min_val = float("0.443923") + max_val = float("2.89615") + mean = float("0.825036") + std = float("0.347257") + data = None + + +class Program_weight_tensor_parameter_108: + name = "parameter_108" + shape = [256] + dtype = "float32" + min_val = float("5.1616e-05") + max_val = float("2.44101") + mean = float("0.0898679") + std = float("0.239694") + data = None + + +class Program_weight_tensor_parameter_109: + name = "parameter_109" + shape = [256] + dtype = "float32" + min_val = float("-1.14129") + max_val = float("1.65247") + mean = float("0.0116179") + std = float("0.242916") + data = None + + +class Program_weight_tensor_parameter_110: + name = "parameter_110" + shape = [256, 1, 5, 5] + dtype = "float32" + min_val = float("-0.506726") + max_val = float("0.541296") + mean = float("0.00976092") + std = float("0.0731665") + data = None + + +class Program_weight_tensor_parameter_111: + name = "parameter_111" + shape = [256] + dtype = "float32" + min_val = float("-1.7087") + max_val = float("2.63905") + mean = float("-0.358703") + std = float("0.80408") + data = None + + +class Program_weight_tensor_parameter_112: + name = "parameter_112" + shape = [256] + dtype = "float32" + min_val = float("0.0369452") + max_val = float("2.95182") + mean = float("0.968821") + std = float("0.509186") + data = None + + +class Program_weight_tensor_parameter_113: + name = "parameter_113" + shape = [256] + dtype = "float32" + min_val = float("0.223303") + max_val = float("1.40342") + mean = float("0.515294") + std = float("0.194983") + data = None + + +class Program_weight_tensor_parameter_114: + name = "parameter_114" + shape = [256] + dtype = "float32" + min_val = float("-3.10657") + max_val = float("2.92865") + mean = float("-0.456729") + std = float("1.00126") + data = None + + +class Program_weight_tensor_parameter_115: + name = "parameter_115" + shape = [256, 256, 1, 1] + dtype = "float32" + min_val = float("-0.393396") + max_val = float("0.540101") + mean = float("-0.00325585") + std = float("0.049361") + data = None + + +class Program_weight_tensor_parameter_116: + name = "parameter_116" + shape = [256] + dtype = "float32" + min_val = float("-3.66286") + max_val = float("4.69908") + mean = float("0.87936") + std = float("1.16991") + data = None + + +class Program_weight_tensor_parameter_117: + name = "parameter_117" + shape = [256] + dtype = "float32" + min_val = float("0.396038") + max_val = float("3.18987") + mean = float("0.874766") + std = float("0.39682") + data = None + + +class Program_weight_tensor_parameter_118: + name = "parameter_118" + shape = [256] + dtype = "float32" + min_val = float("4.42589e-05") + max_val = float("1.07897") + mean = float("0.0780041") + std = float("0.157112") + data = None + + +class Program_weight_tensor_parameter_119: + name = "parameter_119" + shape = [256] + dtype = "float32" + min_val = float("-1.60242") + max_val = float("1.13025") + mean = float("-0.016689") + std = float("0.242318") + data = None + + +class Program_weight_tensor_parameter_120: + name = "parameter_120" + shape = [256, 1, 5, 5] + dtype = "float32" + min_val = float("-0.477884") + max_val = float("0.502954") + mean = float("0.0126727") + std = float("0.0707076") + data = None + + +class Program_weight_tensor_parameter_121: + name = "parameter_121" + shape = [256] + dtype = "float32" + min_val = float("-1.83087") + max_val = float("2.42642") + mean = float("-0.305437") + std = float("0.809707") + data = None + + +class Program_weight_tensor_parameter_122: + name = "parameter_122" + shape = [256] + dtype = "float32" + min_val = float("0.0313435") + max_val = float("3.32059") + mean = float("0.97765") + std = float("0.588425") + data = None + + +class Program_weight_tensor_parameter_123: + name = "parameter_123" + shape = [256] + dtype = "float32" + min_val = float("0.124685") + max_val = float("1.21656") + mean = float("0.352423") + std = float("0.147248") + data = None + + +class Program_weight_tensor_parameter_124: + name = "parameter_124" + shape = [256] + dtype = "float32" + min_val = float("-3.4848") + max_val = float("3.07169") + mean = float("-0.178261") + std = float("0.898995") + data = None + + +class Program_weight_tensor_parameter_125: + name = "parameter_125" + shape = [256, 256, 1, 1] + dtype = "float32" + min_val = float("-0.550557") + max_val = float("0.634779") + mean = float("-0.00302642") + std = float("0.051265") + data = None + + +class Program_weight_tensor_parameter_126: + name = "parameter_126" + shape = [256] + dtype = "float32" + min_val = float("-3.67435") + max_val = float("3.23799") + mean = float("0.328727") + std = float("1.28371") + data = None + + +class Program_weight_tensor_parameter_127: + name = "parameter_127" + shape = [256] + dtype = "float32" + min_val = float("0.316049") + max_val = float("5.72779") + mean = float("0.957953") + std = float("0.567461") + data = None + + +class Program_weight_tensor_parameter_128: + name = "parameter_128" + shape = [256] + dtype = "float32" + min_val = float("3.20013e-05") + max_val = float("4.24667") + mean = float("0.0981319") + std = float("0.317482") + data = None + + +class Program_weight_tensor_parameter_129: + name = "parameter_129" + shape = [256] + dtype = "float32" + min_val = float("-1.87702") + max_val = float("1.02751") + mean = float("-0.0059837") + std = float("0.253696") + data = None + + +class Program_weight_tensor_parameter_130: + name = "parameter_130" + shape = [256, 1, 5, 5] + dtype = "float32" + min_val = float("-0.651681") + max_val = float("0.818378") + mean = float("0.00447852") + std = float("0.0843552") + data = None + + +class Program_weight_tensor_parameter_131: + name = "parameter_131" + shape = [256] + dtype = "float32" + min_val = float("-2.63124") + max_val = float("3.07714") + mean = float("0.166551") + std = float("0.816235") + data = None + + +class Program_weight_tensor_parameter_132: + name = "parameter_132" + shape = [256] + dtype = "float32" + min_val = float("0.0253762") + max_val = float("3.12846") + mean = float("0.630966") + std = float("0.566876") + data = None + + +class Program_weight_tensor_parameter_133: + name = "parameter_133" + shape = [256] + dtype = "float32" + min_val = float("0.0732147") + max_val = float("2.00221") + mean = float("0.306021") + std = float("0.233483") + data = None + + +class Program_weight_tensor_parameter_134: + name = "parameter_134" + shape = [256] + dtype = "float32" + min_val = float("-4.15919") + max_val = float("3.24149") + mean = float("-0.0216345") + std = float("1.01416") + data = None + + +class Program_weight_tensor_parameter_135: + name = "parameter_135" + shape = [256, 128, 1, 1] + dtype = "float32" + min_val = float("-0.458854") + max_val = float("0.539943") + mean = float("-0.00102917") + std = float("0.0604539") + data = None + + +class Program_weight_tensor_parameter_136: + name = "parameter_136" + shape = [128] + dtype = "float32" + min_val = float("-1.47661") + max_val = float("3.23365") + mean = float("0.876993") + std = float("1.3921") + data = None + + +class Program_weight_tensor_parameter_137: + name = "parameter_137" + shape = [128] + dtype = "float32" + min_val = float("0.433581") + max_val = float("1.85128") + mean = float("0.856498") + std = float("0.343666") + data = None + + +class Program_weight_tensor_parameter_138: + name = "parameter_138" + shape = [128] + dtype = "float32" + min_val = float("2.15841e-05") + max_val = float("2.94433") + mean = float("0.0955551") + std = float("0.287347") + data = None + + +class Program_weight_tensor_parameter_139: + name = "parameter_139" + shape = [128] + dtype = "float32" + min_val = float("-0.772058") + max_val = float("2.64243") + mean = float("0.0802506") + std = float("0.363494") + data = None + + +class Program_weight_tensor_parameter_140: + name = "parameter_140" + shape = [128, 1, 3, 3] + dtype = "float32" + min_val = float("-0.474235") + max_val = float("0.46202") + mean = float("0.0180255") + std = float("0.116947") + data = None + + +class Program_weight_tensor_parameter_141: + name = "parameter_141" + shape = [128] + dtype = "float32" + min_val = float("-1.03079") + max_val = float("3.48196") + mean = float("0.351474") + std = float("0.816892") + data = None + + +class Program_weight_tensor_parameter_142: + name = "parameter_142" + shape = [128] + dtype = "float32" + min_val = float("0.0290844") + max_val = float("3.08291") + mean = float("0.848079") + std = float("0.661486") + data = None + + +class Program_weight_tensor_parameter_143: + name = "parameter_143" + shape = [128] + dtype = "float32" + min_val = float("0.167354") + max_val = float("4.1238") + mean = float("0.792821") + std = float("0.536443") + data = None + + +class Program_weight_tensor_parameter_144: + name = "parameter_144" + shape = [128] + dtype = "float32" + min_val = float("-3.23238") + max_val = float("2.81434") + mean = float("-0.331955") + std = float("0.970932") + data = None + + +class Program_weight_tensor_parameter_145: + name = "parameter_145" + shape = [128, 128, 1, 1] + dtype = "float32" + min_val = float("-0.623159") + max_val = float("0.594043") + mean = float("-0.00524257") + std = float("0.0722632") + data = None + + +class Program_weight_tensor_parameter_146: + name = "parameter_146" + shape = [128] + dtype = "float32" + min_val = float("-1.87488") + max_val = float("3.28169") + mean = float("0.409411") + std = float("1.12985") + data = None + + +class Program_weight_tensor_parameter_147: + name = "parameter_147" + shape = [128] + dtype = "float32" + min_val = float("0.482481") + max_val = float("4.41269") + mean = float("1.40579") + std = float("0.673597") + data = None + + +class Program_weight_tensor_parameter_148: + name = "parameter_148" + shape = [128] + dtype = "float32" + min_val = float("4.09592e-05") + max_val = float("3.48636") + mean = float("0.182057") + std = float("0.467894") + data = None + + +class Program_weight_tensor_parameter_149: + name = "parameter_149" + shape = [128] + dtype = "float32" + min_val = float("-1.76651") + max_val = float("1.15869") + mean = float("-0.00627822") + std = float("0.385738") + data = None + + +class Program_weight_tensor_parameter_150: + name = "parameter_150" + shape = [128, 1, 3, 3] + dtype = "float32" + min_val = float("-0.835065") + max_val = float("0.724195") + mean = float("2.3867e-05") + std = float("0.152833") + data = None + + +class Program_weight_tensor_parameter_151: + name = "parameter_151" + shape = [128] + dtype = "float32" + min_val = float("-1.41918") + max_val = float("4.45937") + mean = float("0.928172") + std = float("1.15334") + data = None + + +class Program_weight_tensor_parameter_152: + name = "parameter_152" + shape = [128] + dtype = "float32" + min_val = float("0.0387113") + max_val = float("4.59836") + mean = float("0.978259") + std = float("0.897297") + data = None + + +class Program_weight_tensor_parameter_153: + name = "parameter_153" + shape = [128] + dtype = "float32" + min_val = float("0.220187") + max_val = float("13.906") + mean = float("1.47607") + std = float("1.77322") + data = None + + +class Program_weight_tensor_parameter_154: + name = "parameter_154" + shape = [128] + dtype = "float32" + min_val = float("-4.88648") + max_val = float("5.978") + mean = float("-0.143999") + std = float("1.80874") + data = None + + +class Program_weight_tensor_parameter_155: + name = "parameter_155" + shape = [128, 64, 1, 1] + dtype = "float32" + min_val = float("-0.531509") + max_val = float("0.582687") + mean = float("-0.000919442") + std = float("0.083879") + data = None + + +class Program_weight_tensor_parameter_156: + name = "parameter_156" + shape = [64] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_157: + name = "parameter_157" + shape = [64] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_158: + name = "parameter_158" + shape = [64] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_159: + name = "parameter_159" + shape = [64] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_160: + name = "parameter_160" + shape = [64, 1, 3, 3] + dtype = "float32" + min_val = float("-0.402881") + max_val = float("0.339908") + mean = float("0.0274269") + std = float("0.119171") + data = None + + +class Program_weight_tensor_parameter_161: + name = "parameter_161" + shape = [64] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_162: + name = "parameter_162" + shape = [64] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_163: + name = "parameter_163" + shape = [64] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_164: + name = "parameter_164" + shape = [64] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_165: + name = "parameter_165" + shape = [64, 64, 1, 1] + dtype = "float32" + min_val = float("-0.663992") + max_val = float("0.719946") + mean = float("-0.00240824") + std = float("0.097702") + data = None + + +class Program_weight_tensor_parameter_166: + name = "parameter_166" + shape = [64] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_167: + name = "parameter_167" + shape = [64] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_168: + name = "parameter_168" + shape = [64] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_169: + name = "parameter_169" + shape = [64] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_170: + name = "parameter_170" + shape = [64, 1, 3, 3] + dtype = "float32" + min_val = float("-0.435785") + max_val = float("0.482322") + mean = float("0.0138155") + std = float("0.137694") + data = None + + +class Program_weight_tensor_parameter_171: + name = "parameter_171" + shape = [64] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_172: + name = "parameter_172" + shape = [64] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_173: + name = "parameter_173" + shape = [64] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_174: + name = "parameter_174" + shape = [64] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_175: + name = "parameter_175" + shape = [64, 32, 1, 1] + dtype = "float32" + min_val = float("-0.61147") + max_val = float("0.700056") + mean = float("-0.0010653") + std = float("0.108931") + data = None + + +class Program_weight_tensor_parameter_176: + name = "parameter_176" + shape = [32] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_177: + name = "parameter_177" + shape = [32] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_178: + name = "parameter_178" + shape = [32] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_179: + name = "parameter_179" + shape = [32] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_180: + name = "parameter_180" + shape = [32, 1, 3, 3] + dtype = "float32" + min_val = float("-0.643489") + max_val = float("0.790969") + mean = float("0.00377921") + std = float("0.189285") + data = None + + +class Program_weight_tensor_parameter_181: + name = "parameter_181" + shape = [32] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_182: + name = "parameter_182" + shape = [32] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_183: + name = "parameter_183" + shape = [32] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_184: + name = "parameter_184" + shape = [32] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_185: + name = "parameter_185" + shape = [32, 16, 1, 1] + dtype = "float32" + min_val = float("-0.683211") + max_val = float("0.764207") + mean = float("0.00309465") + std = float("0.160636") + data = None + + +class Program_weight_tensor_parameter_186: + name = "parameter_186" + shape = [16] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_187: + name = "parameter_187" + shape = [16] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_188: + name = "parameter_188" + shape = [16] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_189: + name = "parameter_189" + shape = [16] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_190: + name = "parameter_190" + shape = [16, 1, 3, 3] + dtype = "float32" + min_val = float("-1.09594") + max_val = float("0.835851") + mean = float("-0.0208495") + std = float("0.311673") + data = None + + +class Program_weight_tensor_parameter_191: + name = "parameter_191" + shape = [16] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_192: + name = "parameter_192" + shape = [16] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_193: + name = "parameter_193" + shape = [16] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_194: + name = "parameter_194" + shape = [16] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_195: + name = "parameter_195" + shape = [16, 3, 3, 3] + dtype = "float32" + min_val = float("-0.702156") + max_val = float("0.832108") + mean = float("0.00730695") + std = float("0.198025") + data = None diff --git a/paddle_samples/PaddleX/PP-OCRv3_mobile_rec/subgraph_7/graph_hash.txt b/paddle_samples/PaddleX/PP-OCRv3_mobile_rec/subgraph_7/graph_hash.txt new file mode 100644 index 000000000..8954562a4 --- /dev/null +++ b/paddle_samples/PaddleX/PP-OCRv3_mobile_rec/subgraph_7/graph_hash.txt @@ -0,0 +1 @@ +3c0581957e4dc90efeb656504e6c756d99634bfd60a327fb25c4ab52f8ed9494 \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-OCRv3_mobile_rec/subgraph_7/graph_net.json b/paddle_samples/PaddleX/PP-OCRv3_mobile_rec/subgraph_7/graph_net.json new file mode 100644 index 000000000..6e8c4238c --- /dev/null +++ b/paddle_samples/PaddleX/PP-OCRv3_mobile_rec/subgraph_7/graph_net.json @@ -0,0 +1,6 @@ +{ + "framework": "paddle", + "model_name": "PP-OCRv3_mobile_rec", + "num_devices_required": 1, + "num_nodes_required": 1 +} \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-OCRv3_mobile_rec/subgraph_7/input_meta.py b/paddle_samples/PaddleX/PP-OCRv3_mobile_rec/subgraph_7/input_meta.py new file mode 100644 index 000000000..6cae46716 --- /dev/null +++ b/paddle_samples/PaddleX/PP-OCRv3_mobile_rec/subgraph_7/input_meta.py @@ -0,0 +1,77 @@ +class Program_weight_tensor_data_0: + name = "data_0" + shape = [8, 26, 1, 40, 1] + dtype = "float32" + min_val = float("-inf") + max_val = float("6.34192") + mean = float("-inf") + std = float("nan") + data = None + + +class Program_weight_tensor_data_1: + name = "data_1" + shape = [] + dtype = "int64" + data = [8] + + +class Program_weight_tensor_data_2: + name = "data_2" + shape = [] + dtype = "int64" + data = [26] + + +class Program_weight_tensor_data_3: + name = "data_3" + shape = [] + dtype = "int64" + data = [1] + + +class Program_weight_tensor_data_4: + name = "data_4" + shape = [] + dtype = "int64" + data = [40] + + +class Program_weight_tensor_data_5: + name = "data_5" + shape = [] + dtype = "int64" + data = [1] + + +class Program_weight_tensor_data_6: + name = "data_6" + shape = [8, 512, 1, 40] + dtype = "float32" + min_val = float("-0.374952") + max_val = float("21.0197") + mean = float("1.42975") + std = float("2.85595") + data = None + + +class Program_weight_tensor_data_7: + name = "data_7" + shape = [8, 1, 512] + dtype = "float32" + min_val = float("-13.7732") + max_val = float("1.4484") + mean = float("-0.0286131") + std = float("0.599776") + data = None + + +class Program_weight_tensor_data_8: + name = "data_8" + shape = [8, 26, 512] + dtype = "float32" + min_val = float("-0.985823") + max_val = float("0.992505") + mean = float("-0.00543335") + std = float("0.574291") + data = None diff --git a/paddle_samples/PaddleX/PP-OCRv3_mobile_rec/subgraph_7/model.py b/paddle_samples/PaddleX/PP-OCRv3_mobile_rec/subgraph_7/model.py new file mode 100644 index 000000000..730a39e97 --- /dev/null +++ b/paddle_samples/PaddleX/PP-OCRv3_mobile_rec/subgraph_7/model.py @@ -0,0 +1,147 @@ +import paddle + + +class GraphModule(paddle.nn.Layer): + def __init__(self): + super().__init__() + + def forward( + self, + parameter_0, + parameter_1, + data_0, + data_1, + data_2, + data_3, + data_4, + data_5, + data_6, + data_7, + data_8, + ): + # pd_op.full: (xi64) <- () + full_0 = paddle._C_ops.full( + [], float("-1"), paddle.int64, paddle.core.CPUPlace() + ) + + # builtin.combine: ([xi64, xi64, xi64]) <- (xi64, xi64, xi64) + combine_0 = [data_1, data_2, full_0] + del full_0 + + # pd_op.stack: (3xi64) <- ([xi64, xi64, xi64]) + stack_0 = paddle._C_ops.stack(combine_0, 0) + del combine_0 + + # pd_op.reshape: (-1x-1x-1xf32) <- (8x26x1x40x1xf32, 3xi64) + reshape_0 = paddle._C_ops.reshape(data_0, stack_0) + del data_0, stack_0 + + # pd_op.softmax: (-1x-1x-1xf32) <- (-1x-1x-1xf32) + softmax_0 = paddle._C_ops.softmax(reshape_0, -1) + del reshape_0 + + # builtin.combine: ([xi64, xi64, xi64, xi64, xi64]) <- (xi64, xi64, xi64, xi64, xi64) + combine_1 = [data_1, data_2, data_3, data_4, data_5] + del data_2, data_3, data_4, data_5 + + # pd_op.stack: (5xi64) <- ([xi64, xi64, xi64, xi64, xi64]) + stack_1 = paddle._C_ops.stack(combine_1, 0) + del combine_1 + + # pd_op.reshape: (-1x-1x-1x-1x-1xf32) <- (-1x-1x-1xf32, 5xi64) + reshape_1 = paddle._C_ops.reshape(softmax_0, stack_1) + del stack_1 + + # pd_op.transpose: (-1x-1x-1x-1x-1xf32) <- (-1x-1x-1x-1x-1xf32) + transpose_0 = paddle._C_ops.transpose(reshape_1, [0, 1, 4, 2, 3]) + del reshape_1 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_0 = [1] + + # pd_op.unsqueeze: (8x1x512x1x40xf32) <- (8x512x1x40xf32, 1xi64) + unsqueeze_0 = paddle._C_ops.unsqueeze(data_6, full_int_array_0) + del data_6 + + # pd_op.multiply: (8x-1x512x-1x40xf32) <- (8x1x512x1x40xf32, -1x-1x-1x-1x-1xf32) + multiply_0 = paddle._C_ops.multiply(unsqueeze_0, transpose_0) + + # pd_op.full_int_array: (2xi64) <- () + full_int_array_1 = [3, 4] + + # pd_op.sum: (8x-1x512xf32) <- (8x-1x512x-1x40xf32, 2xi64) + sum_0 = paddle._C_ops.sum(multiply_0, full_int_array_1, None, False) + + # pd_op.full: (xi64) <- () + full_1 = paddle._C_ops.full( + [], float("26"), paddle.int64, paddle.core.CPUPlace() + ) + + # pd_op.full: (xi64) <- () + full_2 = paddle._C_ops.full( + [], float("512"), paddle.int64, paddle.core.CPUPlace() + ) + + # builtin.combine: ([xi64, xi64, xi64]) <- (xi64, xi64, xi64) + combine_2 = [data_1, full_1, full_2] + del data_1, full_1, full_2 + + # pd_op.stack: (3xi64) <- ([xi64, xi64, xi64]) + stack_2 = paddle._C_ops.stack(combine_2, 0) + del combine_2 + + # pd_op.expand: (-1x26x512xf32) <- (8x1x512xf32, 3xi64) + expand_0 = paddle._C_ops.expand(data_7, stack_2) + del data_7 + + # pd_op.full: (1xi32) <- () + full_3 = paddle._C_ops.full( + [1], float("2"), paddle.int32, paddle.core.CPUPlace() + ) + + # builtin.combine: ([8x26x512xf32, 8x-1x512xf32, -1x26x512xf32]) <- (8x26x512xf32, 8x-1x512xf32, -1x26x512xf32) + combine_3 = [data_8, sum_0, expand_0] + del data_8 + + # pd_op.concat: (8x26x1536xf32) <- ([8x26x512xf32, 8x-1x512xf32, -1x26x512xf32], 1xi32) + concat_0 = paddle._C_ops.concat(combine_3, full_3) + del combine_3 + + # pd_op.matmul: (8x26x6626xf32) <- (8x26x1536xf32, 1536x6626xf32) + matmul_0 = paddle._C_ops.matmul(concat_0, parameter_1, False, False) + del parameter_1 + + # pd_op.add: (8x26x6626xf32) <- (8x26x6626xf32, 6626xf32) + add_0 = paddle._C_ops.add(matmul_0, parameter_0) + del parameter_0 + + # pd_op.full: (1xf32) <- () + full_4 = paddle._C_ops.full( + [1], float("0.1"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.dropout: (8x26x6626xf32, 8x26x6626xui8) <- (8x26x6626xf32, None, 1xf32) + dropout_0, dropout_1 = (lambda x, f: f(x))( + paddle._C_ops.dropout( + add_0, None, full_4, False, "upscale_in_train", 0, False + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None), + ) + del ( + add_0, + concat_0, + expand_0, + full_3, + full_4, + full_int_array_0, + full_int_array_1, + matmul_0, + multiply_0, + softmax_0, + stack_2, + sum_0, + transpose_0, + unsqueeze_0, + ) + + return dropout_0 diff --git a/paddle_samples/PaddleX/PP-OCRv3_mobile_rec/subgraph_7/weight_meta.py b/paddle_samples/PaddleX/PP-OCRv3_mobile_rec/subgraph_7/weight_meta.py new file mode 100644 index 000000000..8575f12f8 --- /dev/null +++ b/paddle_samples/PaddleX/PP-OCRv3_mobile_rec/subgraph_7/weight_meta.py @@ -0,0 +1,20 @@ +class Program_weight_tensor_parameter_0: + name = "parameter_0" + shape = [6626] + dtype = "float32" + min_val = float("-0.0303926") + max_val = float("0.0998053") + mean = float("-0.00364787") + std = float("0.00237118") + data = None + + +class Program_weight_tensor_parameter_1: + name = "parameter_1" + shape = [1536, 6626] + dtype = "float32" + min_val = float("-1.32345") + max_val = float("2.33782") + mean = float("-0.000833732") + std = float("0.0219708") + data = None diff --git a/paddle_samples/PaddleX/PP-OCRv3_mobile_rec/subgraph_8/graph_hash.txt b/paddle_samples/PaddleX/PP-OCRv3_mobile_rec/subgraph_8/graph_hash.txt new file mode 100644 index 000000000..556425964 --- /dev/null +++ b/paddle_samples/PaddleX/PP-OCRv3_mobile_rec/subgraph_8/graph_hash.txt @@ -0,0 +1 @@ +191b451ee9ab92acfcb4e6040792558084caccf2504f69966683a93f81fbf80b \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-OCRv3_mobile_rec/subgraph_8/graph_net.json b/paddle_samples/PaddleX/PP-OCRv3_mobile_rec/subgraph_8/graph_net.json new file mode 100644 index 000000000..6e8c4238c --- /dev/null +++ b/paddle_samples/PaddleX/PP-OCRv3_mobile_rec/subgraph_8/graph_net.json @@ -0,0 +1,6 @@ +{ + "framework": "paddle", + "model_name": "PP-OCRv3_mobile_rec", + "num_devices_required": 1, + "num_nodes_required": 1 +} \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-OCRv3_mobile_rec/subgraph_8/input_meta.py b/paddle_samples/PaddleX/PP-OCRv3_mobile_rec/subgraph_8/input_meta.py new file mode 100644 index 000000000..e26c51273 --- /dev/null +++ b/paddle_samples/PaddleX/PP-OCRv3_mobile_rec/subgraph_8/input_meta.py @@ -0,0 +1,9 @@ +class Program_weight_tensor_data_0: + name = "data_0" + shape = [8, 512, 1, 40] + dtype = "float32" + min_val = float("-0.374952") + max_val = float("21.0197") + mean = float("1.42975") + std = float("2.85595") + data = None diff --git a/paddle_samples/PaddleX/PP-OCRv3_mobile_rec/subgraph_8/model.py b/paddle_samples/PaddleX/PP-OCRv3_mobile_rec/subgraph_8/model.py new file mode 100644 index 000000000..df81a3f70 --- /dev/null +++ b/paddle_samples/PaddleX/PP-OCRv3_mobile_rec/subgraph_8/model.py @@ -0,0 +1,804 @@ +import paddle + + +class GraphModule(paddle.nn.Layer): + def __init__(self): + super().__init__() + + def forward( + self, + parameter_0, + parameter_1, + parameter_2, + parameter_3, + parameter_4, + parameter_5, + parameter_6, + parameter_7, + parameter_8, + parameter_9, + parameter_10, + parameter_11, + parameter_12, + parameter_13, + parameter_14, + parameter_15, + parameter_16, + parameter_17, + parameter_18, + parameter_19, + parameter_20, + parameter_21, + parameter_22, + parameter_23, + parameter_24, + parameter_25, + parameter_26, + parameter_27, + parameter_28, + parameter_29, + parameter_30, + parameter_31, + parameter_32, + parameter_33, + parameter_34, + parameter_35, + parameter_36, + parameter_37, + parameter_38, + parameter_39, + parameter_40, + parameter_41, + parameter_42, + parameter_43, + parameter_44, + parameter_45, + parameter_46, + parameter_47, + parameter_48, + parameter_49, + parameter_50, + parameter_51, + parameter_52, + data_0, + ): + # pd_op.assign: (8x512x1x40xf32) <- (8x512x1x40xf32) + assign_0 = data_0 + del data_0 + + # pd_op.conv2d: (8x64x1x40xf32) <- (8x512x1x40xf32, 64x512x3x3xf32) + conv2d_0 = paddle._C_ops.conv2d( + assign_0, parameter_52, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_52 + + # pd_op.batch_norm_: (8x64x1x40xf32, 64xf32, 64xf32, 64xf32, 64xf32, -1xui8) <- (8x64x1x40xf32, 64xf32, 64xf32, 64xf32, 64xf32) + ( + batch_norm__0, + batch_norm__1, + batch_norm__2, + batch_norm__3, + batch_norm__4, + batch_norm__5, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_0, + parameter_51, + parameter_50, + parameter_49, + parameter_48, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_48, parameter_49, parameter_50, parameter_51 + + # pd_op.swish: (8x64x1x40xf32) <- (8x64x1x40xf32) + swish_0 = paddle._C_ops.swish(batch_norm__0) + + # pd_op.conv2d: (8x120x1x40xf32) <- (8x64x1x40xf32, 120x64x1x1xf32) + conv2d_1 = paddle._C_ops.conv2d( + swish_0, parameter_47, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_47 + + # pd_op.batch_norm_: (8x120x1x40xf32, 120xf32, 120xf32, 120xf32, 120xf32, -1xui8) <- (8x120x1x40xf32, 120xf32, 120xf32, 120xf32, 120xf32) + ( + batch_norm__6, + batch_norm__7, + batch_norm__8, + batch_norm__9, + batch_norm__10, + batch_norm__11, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_1, + parameter_46, + parameter_45, + parameter_44, + parameter_43, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_43, parameter_44, parameter_45, parameter_46 + + # pd_op.swish: (8x120x1x40xf32) <- (8x120x1x40xf32) + swish_1 = paddle._C_ops.swish(batch_norm__6) + + # pd_op.flatten: (8x120x40xf32) <- (8x120x1x40xf32) + flatten_0 = paddle._C_ops.flatten(swish_1, 2, 3) + + # pd_op.transpose: (8x40x120xf32) <- (8x120x40xf32) + transpose_0 = paddle._C_ops.transpose(flatten_0, [0, 2, 1]) + del flatten_0 + + # pd_op.layer_norm: (8x40x120xf32, 8x40xf32, 8x40xf32) <- (8x40x120xf32, 120xf32, 120xf32) + layer_norm_0, layer_norm_1, layer_norm_2 = (lambda x, f: f(x))( + paddle._C_ops.layer_norm( + transpose_0, parameter_42, parameter_41, float("1e-05"), 2 + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None, None), + ) + del parameter_41, parameter_42 + + # pd_op.matmul: (8x40x360xf32) <- (8x40x120xf32, 120x360xf32) + matmul_0 = paddle._C_ops.matmul(layer_norm_0, parameter_40, False, False) + del parameter_40 + + # pd_op.add: (8x40x360xf32) <- (8x40x360xf32, 360xf32) + add_1 = paddle._C_ops.add(matmul_0, parameter_39) + del parameter_39 + + # pd_op.full_int_array: (5xi64) <- () + full_int_array_0 = [0, -1, 3, 8, 15] + + # pd_op.reshape: (8x40x3x8x15xf32) <- (8x40x360xf32, 5xi64) + reshape_0 = paddle._C_ops.reshape(add_1, full_int_array_0) + + # pd_op.transpose: (3x8x8x40x15xf32) <- (8x40x3x8x15xf32) + transpose_1 = paddle._C_ops.transpose(reshape_0, [2, 0, 3, 1, 4]) + del reshape_0 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_1 = [0] + + # pd_op.assign: (1xi64) <- (1xi64) + assign_1 = full_int_array_1 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_2 = [1] + + # pd_op.assign: (1xi64) <- (1xi64) + assign_2 = full_int_array_2 + + # pd_op.assign: (1xi64) <- (1xi64) + assign_3 = full_int_array_2 + + # pd_op.assign: (1xi64) <- (1xi64) + assign_4 = full_int_array_2 + + # pd_op.slice: (8x8x40x15xf32) <- (3x8x8x40x15xf32, 1xi64, 1xi64) + slice_0 = paddle._C_ops.slice( + transpose_1, [0], full_int_array_1, full_int_array_2, [1], [0] + ) + + # pd_op.full: (1xf32) <- () + full_0 = paddle._C_ops.full( + [1], float("0.258199"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.assign: (1xf32) <- (1xf32) + assign_5 = full_0 + + # pd_op.scale: (8x8x40x15xf32) <- (8x8x40x15xf32, 1xf32) + scale_0 = paddle._C_ops.scale(slice_0, full_0, float("0"), True) + del slice_0 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_3 = [2] + + # pd_op.assign: (1xi64) <- (1xi64) + assign_6 = full_int_array_3 + + # pd_op.assign: (1xi64) <- (1xi64) + assign_7 = full_int_array_3 + + # pd_op.assign: (1xi64) <- (1xi64) + assign_8 = full_int_array_3 + + # pd_op.assign: (1xi64) <- (1xi64) + assign_9 = full_int_array_3 + + # pd_op.slice: (8x8x40x15xf32) <- (3x8x8x40x15xf32, 1xi64, 1xi64) + slice_1 = paddle._C_ops.slice( + transpose_1, [0], full_int_array_2, full_int_array_3, [1], [0] + ) + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_4 = [3] + + # pd_op.assign: (1xi64) <- (1xi64) + assign_10 = full_int_array_4 + + # pd_op.slice: (8x8x40x15xf32) <- (3x8x8x40x15xf32, 1xi64, 1xi64) + slice_2 = paddle._C_ops.slice( + transpose_1, [0], full_int_array_3, full_int_array_4, [1], [0] + ) + + # pd_op.transpose: (8x8x15x40xf32) <- (8x8x40x15xf32) + transpose_2 = paddle._C_ops.transpose(slice_1, [0, 1, 3, 2]) + del slice_1 + + # pd_op.matmul: (8x8x40x40xf32) <- (8x8x40x15xf32, 8x8x15x40xf32) + matmul_1 = paddle._C_ops.matmul(scale_0, transpose_2, False, False) + + # pd_op.softmax: (8x8x40x40xf32) <- (8x8x40x40xf32) + softmax_0 = paddle._C_ops.softmax(matmul_1, -1) + del matmul_1 + + # pd_op.full: (1xf32) <- () + full_1 = paddle._C_ops.full( + [1], float("0.1"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.assign: (1xf32) <- (1xf32) + assign_11 = full_1 + + # pd_op.assign: (1xf32) <- (1xf32) + assign_12 = full_1 + + # pd_op.assign: (1xf32) <- (1xf32) + assign_13 = full_1 + + # pd_op.assign: (1xf32) <- (1xf32) + assign_14 = full_1 + + # pd_op.assign: (1xf32) <- (1xf32) + assign_15 = full_1 + + # pd_op.assign: (1xf32) <- (1xf32) + assign_16 = full_1 + + # pd_op.assign: (1xf32) <- (1xf32) + assign_17 = full_1 + + # pd_op.dropout: (8x8x40x40xf32, 8x8x40x40xui8) <- (8x8x40x40xf32, None, 1xf32) + dropout_0, dropout_1 = (lambda x, f: f(x))( + paddle._C_ops.dropout( + softmax_0, None, full_1, False, "upscale_in_train", 0, False + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None), + ) + + # pd_op.matmul: (8x8x40x15xf32) <- (8x8x40x40xf32, 8x8x40x15xf32) + matmul_2 = paddle._C_ops.matmul(dropout_0, slice_2, False, False) + + # pd_op.transpose: (8x40x8x15xf32) <- (8x8x40x15xf32) + transpose_3 = paddle._C_ops.transpose(matmul_2, [0, 2, 1, 3]) + del matmul_2 + + # pd_op.full_int_array: (3xi64) <- () + full_int_array_5 = [0, -1, 120] + + # pd_op.reshape: (8x40x120xf32) <- (8x40x8x15xf32, 3xi64) + reshape_1 = paddle._C_ops.reshape(transpose_3, full_int_array_5) + + # pd_op.matmul: (8x40x120xf32) <- (8x40x120xf32, 120x120xf32) + matmul_3 = paddle._C_ops.matmul(reshape_1, parameter_38, False, False) + del parameter_38 + + # pd_op.add: (8x40x120xf32) <- (8x40x120xf32, 120xf32) + add_2 = paddle._C_ops.add(matmul_3, parameter_37) + del parameter_37 + + # pd_op.dropout: (8x40x120xf32, 8x40x120xui8) <- (8x40x120xf32, None, 1xf32) + dropout_2, dropout_3 = (lambda x, f: f(x))( + paddle._C_ops.dropout( + add_2, None, full_1, False, "upscale_in_train", 0, False + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None), + ) + del add_2 + + # pd_op.add: (8x40x120xf32) <- (8x40x120xf32, 8x40x120xf32) + add_3 = paddle._C_ops.add(transpose_0, dropout_2) + + # pd_op.layer_norm: (8x40x120xf32, 8x40xf32, 8x40xf32) <- (8x40x120xf32, 120xf32, 120xf32) + layer_norm_3, layer_norm_4, layer_norm_5 = (lambda x, f: f(x))( + paddle._C_ops.layer_norm( + add_3, parameter_36, parameter_35, float("1e-05"), 2 + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None, None), + ) + del parameter_35, parameter_36 + + # pd_op.matmul: (8x40x240xf32) <- (8x40x120xf32, 120x240xf32) + matmul_4 = paddle._C_ops.matmul(layer_norm_3, parameter_34, False, False) + del parameter_34 + + # pd_op.add: (8x40x240xf32) <- (8x40x240xf32, 240xf32) + add_4 = paddle._C_ops.add(matmul_4, parameter_33) + del parameter_33 + + # pd_op.swish: (8x40x240xf32) <- (8x40x240xf32) + swish_2 = paddle._C_ops.swish(add_4) + + # pd_op.dropout: (8x40x240xf32, 8x40x240xui8) <- (8x40x240xf32, None, 1xf32) + dropout_4, dropout_5 = (lambda x, f: f(x))( + paddle._C_ops.dropout( + swish_2, None, full_1, False, "upscale_in_train", 0, False + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None), + ) + del swish_2 + + # pd_op.matmul: (8x40x120xf32) <- (8x40x240xf32, 240x120xf32) + matmul_5 = paddle._C_ops.matmul(dropout_4, parameter_32, False, False) + del parameter_32 + + # pd_op.add: (8x40x120xf32) <- (8x40x120xf32, 120xf32) + add_5 = paddle._C_ops.add(matmul_5, parameter_31) + del parameter_31 + + # pd_op.dropout: (8x40x120xf32, 8x40x120xui8) <- (8x40x120xf32, None, 1xf32) + dropout_6, dropout_7 = (lambda x, f: f(x))( + paddle._C_ops.dropout( + add_5, None, full_1, False, "upscale_in_train", 0, False + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None), + ) + del add_5 + + # pd_op.add: (8x40x120xf32) <- (8x40x120xf32, 8x40x120xf32) + add_6 = paddle._C_ops.add(add_3, dropout_6) + + # pd_op.layer_norm: (8x40x120xf32, 8x40xf32, 8x40xf32) <- (8x40x120xf32, 120xf32, 120xf32) + layer_norm_6, layer_norm_7, layer_norm_8 = (lambda x, f: f(x))( + paddle._C_ops.layer_norm( + add_6, parameter_30, parameter_29, float("1e-05"), 2 + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None, None), + ) + del parameter_29, parameter_30 + + # pd_op.matmul: (8x40x360xf32) <- (8x40x120xf32, 120x360xf32) + matmul_6 = paddle._C_ops.matmul(layer_norm_6, parameter_28, False, False) + del parameter_28 + + # pd_op.add: (8x40x360xf32) <- (8x40x360xf32, 360xf32) + add_7 = paddle._C_ops.add(matmul_6, parameter_27) + del parameter_27 + + # pd_op.reshape: (8x40x3x8x15xf32) <- (8x40x360xf32, 5xi64) + reshape_2 = paddle._C_ops.reshape(add_7, full_int_array_0) + del full_int_array_0 + + # pd_op.transpose: (3x8x8x40x15xf32) <- (8x40x3x8x15xf32) + transpose_4 = paddle._C_ops.transpose(reshape_2, [2, 0, 3, 1, 4]) + del reshape_2 + + # pd_op.slice: (8x8x40x15xf32) <- (3x8x8x40x15xf32, 1xi64, 1xi64) + slice_3 = paddle._C_ops.slice( + transpose_4, [0], full_int_array_1, full_int_array_2, [1], [0] + ) + + # pd_op.scale: (8x8x40x15xf32) <- (8x8x40x15xf32, 1xf32) + scale_1 = paddle._C_ops.scale(slice_3, full_0, float("0"), True) + del slice_3 + + # pd_op.slice: (8x8x40x15xf32) <- (3x8x8x40x15xf32, 1xi64, 1xi64) + slice_4 = paddle._C_ops.slice( + transpose_4, [0], full_int_array_2, full_int_array_3, [1], [0] + ) + + # pd_op.slice: (8x8x40x15xf32) <- (3x8x8x40x15xf32, 1xi64, 1xi64) + slice_5 = paddle._C_ops.slice( + transpose_4, [0], full_int_array_3, full_int_array_4, [1], [0] + ) + + # pd_op.transpose: (8x8x15x40xf32) <- (8x8x40x15xf32) + transpose_5 = paddle._C_ops.transpose(slice_4, [0, 1, 3, 2]) + del slice_4 + + # pd_op.matmul: (8x8x40x40xf32) <- (8x8x40x15xf32, 8x8x15x40xf32) + matmul_7 = paddle._C_ops.matmul(scale_1, transpose_5, False, False) + + # pd_op.softmax: (8x8x40x40xf32) <- (8x8x40x40xf32) + softmax_1 = paddle._C_ops.softmax(matmul_7, -1) + del matmul_7 + + # pd_op.dropout: (8x8x40x40xf32, 8x8x40x40xui8) <- (8x8x40x40xf32, None, 1xf32) + dropout_8, dropout_9 = (lambda x, f: f(x))( + paddle._C_ops.dropout( + softmax_1, None, full_1, False, "upscale_in_train", 0, False + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None), + ) + + # pd_op.matmul: (8x8x40x15xf32) <- (8x8x40x40xf32, 8x8x40x15xf32) + matmul_8 = paddle._C_ops.matmul(dropout_8, slice_5, False, False) + + # pd_op.transpose: (8x40x8x15xf32) <- (8x8x40x15xf32) + transpose_6 = paddle._C_ops.transpose(matmul_8, [0, 2, 1, 3]) + del matmul_8 + + # pd_op.reshape: (8x40x120xf32) <- (8x40x8x15xf32, 3xi64) + reshape_3 = paddle._C_ops.reshape(transpose_6, full_int_array_5) + del full_int_array_5 + + # pd_op.matmul: (8x40x120xf32) <- (8x40x120xf32, 120x120xf32) + matmul_9 = paddle._C_ops.matmul(reshape_3, parameter_26, False, False) + del parameter_26 + + # pd_op.add: (8x40x120xf32) <- (8x40x120xf32, 120xf32) + add_8 = paddle._C_ops.add(matmul_9, parameter_25) + del parameter_25 + + # pd_op.dropout: (8x40x120xf32, 8x40x120xui8) <- (8x40x120xf32, None, 1xf32) + dropout_10, dropout_11 = (lambda x, f: f(x))( + paddle._C_ops.dropout( + add_8, None, full_1, False, "upscale_in_train", 0, False + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None), + ) + del add_8 + + # pd_op.add: (8x40x120xf32) <- (8x40x120xf32, 8x40x120xf32) + add_9 = paddle._C_ops.add(add_6, dropout_10) + + # pd_op.layer_norm: (8x40x120xf32, 8x40xf32, 8x40xf32) <- (8x40x120xf32, 120xf32, 120xf32) + layer_norm_9, layer_norm_10, layer_norm_11 = (lambda x, f: f(x))( + paddle._C_ops.layer_norm( + add_9, parameter_24, parameter_23, float("1e-05"), 2 + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None, None), + ) + del parameter_23, parameter_24 + + # pd_op.matmul: (8x40x240xf32) <- (8x40x120xf32, 120x240xf32) + matmul_10 = paddle._C_ops.matmul(layer_norm_9, parameter_22, False, False) + del parameter_22 + + # pd_op.add: (8x40x240xf32) <- (8x40x240xf32, 240xf32) + add_10 = paddle._C_ops.add(matmul_10, parameter_21) + del parameter_21 + + # pd_op.swish: (8x40x240xf32) <- (8x40x240xf32) + swish_3 = paddle._C_ops.swish(add_10) + + # pd_op.dropout: (8x40x240xf32, 8x40x240xui8) <- (8x40x240xf32, None, 1xf32) + dropout_12, dropout_13 = (lambda x, f: f(x))( + paddle._C_ops.dropout( + swish_3, None, full_1, False, "upscale_in_train", 0, False + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None), + ) + del swish_3 + + # pd_op.matmul: (8x40x120xf32) <- (8x40x240xf32, 240x120xf32) + matmul_11 = paddle._C_ops.matmul(dropout_12, parameter_20, False, False) + del parameter_20 + + # pd_op.add: (8x40x120xf32) <- (8x40x120xf32, 120xf32) + add_11 = paddle._C_ops.add(matmul_11, parameter_19) + del parameter_19 + + # pd_op.dropout: (8x40x120xf32, 8x40x120xui8) <- (8x40x120xf32, None, 1xf32) + dropout_14, dropout_15 = (lambda x, f: f(x))( + paddle._C_ops.dropout( + add_11, None, full_1, False, "upscale_in_train", 0, False + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None), + ) + del add_11 + + # pd_op.add: (8x40x120xf32) <- (8x40x120xf32, 8x40x120xf32) + add_12 = paddle._C_ops.add(add_9, dropout_14) + + # pd_op.layer_norm: (8x40x120xf32, 8x40xf32, 8x40xf32) <- (8x40x120xf32, 120xf32, 120xf32) + layer_norm_12, layer_norm_13, layer_norm_14 = (lambda x, f: f(x))( + paddle._C_ops.layer_norm( + add_12, parameter_18, parameter_17, float("1e-06"), 2 + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None, None), + ) + del parameter_17, parameter_18 + + # pd_op.full_int_array: (4xi64) <- () + full_int_array_6 = [0, 1, 40, 120] + + # pd_op.reshape: (8x1x40x120xf32) <- (8x40x120xf32, 4xi64) + reshape_4 = paddle._C_ops.reshape(layer_norm_12, full_int_array_6) + del full_int_array_6 + + # pd_op.transpose: (8x120x1x40xf32) <- (8x1x40x120xf32) + transpose_7 = paddle._C_ops.transpose(reshape_4, [0, 3, 1, 2]) + del reshape_4 + + # pd_op.conv2d: (8x512x1x40xf32) <- (8x120x1x40xf32, 512x120x1x1xf32) + conv2d_2 = paddle._C_ops.conv2d( + transpose_7, parameter_16, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_16 + + # pd_op.batch_norm_: (8x512x1x40xf32, 512xf32, 512xf32, 512xf32, 512xf32, -1xui8) <- (8x512x1x40xf32, 512xf32, 512xf32, 512xf32, 512xf32) + ( + batch_norm__12, + batch_norm__13, + batch_norm__14, + batch_norm__15, + batch_norm__16, + batch_norm__17, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_2, + parameter_15, + parameter_14, + parameter_13, + parameter_12, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_12, parameter_13, parameter_14, parameter_15 + + # pd_op.swish: (8x512x1x40xf32) <- (8x512x1x40xf32) + swish_4 = paddle._C_ops.swish(batch_norm__12) + + # pd_op.full: (1xi32) <- () + full_2 = paddle._C_ops.full( + [1], float("1"), paddle.int32, paddle.core.CPUPlace() + ) + + # builtin.combine: ([8x512x1x40xf32, 8x512x1x40xf32]) <- (8x512x1x40xf32, 8x512x1x40xf32) + combine_0 = [assign_0, swish_4] + + # pd_op.concat: (8x1024x1x40xf32) <- ([8x512x1x40xf32, 8x512x1x40xf32], 1xi32) + concat_0 = paddle._C_ops.concat(combine_0, full_2) + del combine_0 + + # pd_op.conv2d: (8x64x1x40xf32) <- (8x1024x1x40xf32, 64x1024x3x3xf32) + conv2d_3 = paddle._C_ops.conv2d( + concat_0, parameter_11, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_11 + + # pd_op.batch_norm_: (8x64x1x40xf32, 64xf32, 64xf32, 64xf32, 64xf32, -1xui8) <- (8x64x1x40xf32, 64xf32, 64xf32, 64xf32, 64xf32) + ( + batch_norm__18, + batch_norm__19, + batch_norm__20, + batch_norm__21, + batch_norm__22, + batch_norm__23, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_3, + parameter_10, + parameter_9, + parameter_8, + parameter_7, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_10, parameter_7, parameter_8, parameter_9 + + # pd_op.swish: (8x64x1x40xf32) <- (8x64x1x40xf32) + swish_5 = paddle._C_ops.swish(batch_norm__18) + + # pd_op.conv2d: (8x64x1x40xf32) <- (8x64x1x40xf32, 64x64x1x1xf32) + conv2d_4 = paddle._C_ops.conv2d( + swish_5, parameter_6, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_6 + + # pd_op.batch_norm_: (8x64x1x40xf32, 64xf32, 64xf32, 64xf32, 64xf32, -1xui8) <- (8x64x1x40xf32, 64xf32, 64xf32, 64xf32, 64xf32) + ( + batch_norm__24, + batch_norm__25, + batch_norm__26, + batch_norm__27, + batch_norm__28, + batch_norm__29, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_4, + parameter_5, + parameter_4, + parameter_3, + parameter_2, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_2, parameter_3, parameter_4, parameter_5 + + # pd_op.swish: (8x64x1x40xf32) <- (8x64x1x40xf32) + swish_6 = paddle._C_ops.swish(batch_norm__24) + + # pd_op.squeeze: (8x64x40xf32) <- (8x64x1x40xf32, 1xi64) + squeeze_0 = paddle._C_ops.squeeze(swish_6, full_int_array_3) + + # pd_op.transpose: (8x40x64xf32) <- (8x64x40xf32) + transpose_8 = paddle._C_ops.transpose(squeeze_0, [0, 2, 1]) + del squeeze_0 + + # pd_op.matmul: (8x40x6625xf32) <- (8x40x64xf32, 64x6625xf32) + matmul_12 = paddle._C_ops.matmul(transpose_8, parameter_1, False, False) + del parameter_1 + + # pd_op.add: (8x40x6625xf32) <- (8x40x6625xf32, 6625xf32) + add_0 = paddle._C_ops.add(matmul_12, parameter_0) + del ( + add_1, + add_10, + add_12, + add_3, + add_4, + add_6, + add_7, + add_9, + assign_0, + assign_1, + assign_10, + assign_11, + assign_12, + assign_13, + assign_14, + assign_15, + assign_16, + assign_17, + assign_2, + assign_3, + assign_4, + assign_5, + assign_6, + assign_7, + assign_8, + assign_9, + batch_norm__0, + batch_norm__1, + batch_norm__10, + batch_norm__11, + batch_norm__12, + batch_norm__13, + batch_norm__14, + batch_norm__15, + batch_norm__16, + batch_norm__17, + batch_norm__18, + batch_norm__19, + batch_norm__2, + batch_norm__20, + batch_norm__21, + batch_norm__22, + batch_norm__23, + batch_norm__24, + batch_norm__25, + batch_norm__26, + batch_norm__27, + batch_norm__28, + batch_norm__29, + batch_norm__3, + batch_norm__4, + batch_norm__5, + batch_norm__6, + batch_norm__7, + batch_norm__8, + batch_norm__9, + concat_0, + conv2d_0, + conv2d_1, + conv2d_2, + conv2d_3, + conv2d_4, + dropout_0, + dropout_1, + dropout_10, + dropout_11, + dropout_12, + dropout_13, + dropout_14, + dropout_15, + dropout_2, + dropout_3, + dropout_4, + dropout_5, + dropout_6, + dropout_7, + dropout_8, + dropout_9, + full_0, + full_1, + full_2, + full_int_array_1, + full_int_array_2, + full_int_array_3, + full_int_array_4, + layer_norm_0, + layer_norm_1, + layer_norm_10, + layer_norm_11, + layer_norm_12, + layer_norm_13, + layer_norm_14, + layer_norm_2, + layer_norm_3, + layer_norm_4, + layer_norm_5, + layer_norm_6, + layer_norm_7, + layer_norm_8, + layer_norm_9, + matmul_0, + matmul_10, + matmul_11, + matmul_12, + matmul_3, + matmul_4, + matmul_5, + matmul_6, + matmul_9, + parameter_0, + reshape_1, + reshape_3, + scale_0, + scale_1, + slice_2, + slice_5, + softmax_0, + softmax_1, + swish_0, + swish_1, + swish_4, + swish_5, + swish_6, + transpose_0, + transpose_1, + transpose_2, + transpose_3, + transpose_4, + transpose_5, + transpose_6, + transpose_7, + transpose_8, + ) + + return add_0 diff --git a/paddle_samples/PaddleX/PP-OCRv3_mobile_rec/subgraph_8/weight_meta.py b/paddle_samples/PaddleX/PP-OCRv3_mobile_rec/subgraph_8/weight_meta.py new file mode 100644 index 000000000..01b37c91b --- /dev/null +++ b/paddle_samples/PaddleX/PP-OCRv3_mobile_rec/subgraph_8/weight_meta.py @@ -0,0 +1,557 @@ +class Program_weight_tensor_parameter_0: + name = "parameter_0" + shape = [6625] + dtype = "float32" + min_val = float("-0.377916") + max_val = float("1.42216") + mean = float("-0.0189959") + std = float("0.0417399") + data = None + + +class Program_weight_tensor_parameter_1: + name = "parameter_1" + shape = [64, 6625] + dtype = "float32" + min_val = float("-2.61296") + max_val = float("2.32764") + mean = float("-0.0787059") + std = float("0.148571") + data = None + + +class Program_weight_tensor_parameter_2: + name = "parameter_2" + shape = [64] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_3: + name = "parameter_3" + shape = [64] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_4: + name = "parameter_4" + shape = [64] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_5: + name = "parameter_5" + shape = [64] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_6: + name = "parameter_6" + shape = [64, 64, 1, 1] + dtype = "float32" + min_val = float("-1.09304") + max_val = float("1.56807") + mean = float("0.00635252") + std = float("0.257309") + data = None + + +class Program_weight_tensor_parameter_7: + name = "parameter_7" + shape = [64] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_8: + name = "parameter_8" + shape = [64] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_9: + name = "parameter_9" + shape = [64] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_10: + name = "parameter_10" + shape = [64] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_11: + name = "parameter_11" + shape = [64, 1024, 3, 3] + dtype = "float32" + min_val = float("-1.15726") + max_val = float("1.1427") + mean = float("-0.00208067") + std = float("0.0469813") + data = None + + +class Program_weight_tensor_parameter_12: + name = "parameter_12" + shape = [512] + dtype = "float32" + min_val = float("-1.37588") + max_val = float("1.4867") + mean = float("-0.371081") + std = float("0.241713") + data = None + + +class Program_weight_tensor_parameter_13: + name = "parameter_13" + shape = [512] + dtype = "float32" + min_val = float("-9.05239e-21") + max_val = float("4.69402") + mean = float("0.890169") + std = float("0.3807") + data = None + + +class Program_weight_tensor_parameter_14: + name = "parameter_14" + shape = [512] + dtype = "float32" + min_val = float("5.60519e-45") + max_val = float("0.378778") + mean = float("0.0723262") + std = float("0.0465915") + data = None + + +class Program_weight_tensor_parameter_15: + name = "parameter_15" + shape = [512] + dtype = "float32" + min_val = float("-0.881991") + max_val = float("1.24445") + mean = float("0.0585753") + std = float("0.153225") + data = None + + +class Program_weight_tensor_parameter_16: + name = "parameter_16" + shape = [512, 120, 1, 1] + dtype = "float32" + min_val = float("-0.711734") + max_val = float("0.896147") + mean = float("-0.00231909") + std = float("0.0873188") + data = None + + +class Program_weight_tensor_parameter_17: + name = "parameter_17" + shape = [120] + dtype = "float32" + min_val = float("-2.55902e-05") + max_val = float("2.47097e-05") + mean = float("-5.49631e-07") + std = float("7.93629e-06") + data = None + + +class Program_weight_tensor_parameter_18: + name = "parameter_18" + shape = [120] + dtype = "float32" + min_val = float("0.0868589") + max_val = float("0.730986") + mean = float("0.379714") + std = float("0.0970573") + data = None + + +class Program_weight_tensor_parameter_19: + name = "parameter_19" + shape = [120] + dtype = "float32" + min_val = float("-0.466003") + max_val = float("0.75025") + mean = float("0.00573285") + std = float("0.130007") + data = None + + +class Program_weight_tensor_parameter_20: + name = "parameter_20" + shape = [240, 120] + dtype = "float32" + min_val = float("-0.846989") + max_val = float("0.796952") + mean = float("-0.000801207") + std = float("0.0670105") + data = None + + +class Program_weight_tensor_parameter_21: + name = "parameter_21" + shape = [240] + dtype = "float32" + min_val = float("-0.277188") + max_val = float("0.0515708") + mean = float("-0.0686214") + std = float("0.0476718") + data = None + + +class Program_weight_tensor_parameter_22: + name = "parameter_22" + shape = [120, 240] + dtype = "float32" + min_val = float("-0.853996") + max_val = float("0.802498") + mean = float("-0.00989718") + std = float("0.107437") + data = None + + +class Program_weight_tensor_parameter_23: + name = "parameter_23" + shape = [120] + dtype = "float32" + min_val = float("-2.5141") + max_val = float("1.87775") + mean = float("0.162905") + std = float("0.400739") + data = None + + +class Program_weight_tensor_parameter_24: + name = "parameter_24" + shape = [120] + dtype = "float32" + min_val = float("0.534906") + max_val = float("1.80272") + mean = float("1.25191") + std = float("0.153295") + data = None + + +class Program_weight_tensor_parameter_25: + name = "parameter_25" + shape = [120] + dtype = "float32" + min_val = float("-0.256357") + max_val = float("0.234697") + mean = float("0.00734568") + std = float("0.0857936") + data = None + + +class Program_weight_tensor_parameter_26: + name = "parameter_26" + shape = [120, 120] + dtype = "float32" + min_val = float("-0.47502") + max_val = float("0.382786") + mean = float("-0.000270915") + std = float("0.0853847") + data = None + + +class Program_weight_tensor_parameter_27: + name = "parameter_27" + shape = [360] + dtype = "float32" + min_val = float("-0.803446") + max_val = float("0.875539") + mean = float("0.00725798") + std = float("0.142288") + data = None + + +class Program_weight_tensor_parameter_28: + name = "parameter_28" + shape = [120, 360] + dtype = "float32" + min_val = float("-0.580934") + max_val = float("0.64566") + mean = float("0.000383004") + std = float("0.0956631") + data = None + + +class Program_weight_tensor_parameter_29: + name = "parameter_29" + shape = [120] + dtype = "float32" + min_val = float("-1.09111") + max_val = float("0.787922") + mean = float("0.083151") + std = float("0.241889") + data = None + + +class Program_weight_tensor_parameter_30: + name = "parameter_30" + shape = [120] + dtype = "float32" + min_val = float("0.51279") + max_val = float("1.1023") + mean = float("0.904169") + std = float("0.0929786") + data = None + + +class Program_weight_tensor_parameter_31: + name = "parameter_31" + shape = [120] + dtype = "float32" + min_val = float("-0.228875") + max_val = float("0.21483") + mean = float("-6.3478e-05") + std = float("0.0831831") + data = None + + +class Program_weight_tensor_parameter_32: + name = "parameter_32" + shape = [240, 120] + dtype = "float32" + min_val = float("-0.76132") + max_val = float("0.960517") + mean = float("-0.000218174") + std = float("0.0606104") + data = None + + +class Program_weight_tensor_parameter_33: + name = "parameter_33" + shape = [240] + dtype = "float32" + min_val = float("-0.266384") + max_val = float("0.075881") + mean = float("-0.0712071") + std = float("0.0445641") + data = None + + +class Program_weight_tensor_parameter_34: + name = "parameter_34" + shape = [120, 240] + dtype = "float32" + min_val = float("-0.630314") + max_val = float("0.771772") + mean = float("-0.00134198") + std = float("0.113584") + data = None + + +class Program_weight_tensor_parameter_35: + name = "parameter_35" + shape = [120] + dtype = "float32" + min_val = float("-3.14951") + max_val = float("1.23596") + mean = float("0.038144") + std = float("0.46898") + data = None + + +class Program_weight_tensor_parameter_36: + name = "parameter_36" + shape = [120] + dtype = "float32" + min_val = float("0.901017") + max_val = float("1.55979") + mean = float("1.21469") + std = float("0.097717") + data = None + + +class Program_weight_tensor_parameter_37: + name = "parameter_37" + shape = [120] + dtype = "float32" + min_val = float("-0.164768") + max_val = float("0.14499") + mean = float("0.00512634") + std = float("0.0474912") + data = None + + +class Program_weight_tensor_parameter_38: + name = "parameter_38" + shape = [120, 120] + dtype = "float32" + min_val = float("-0.47646") + max_val = float("0.325474") + mean = float("-3.23615e-05") + std = float("0.0733312") + data = None + + +class Program_weight_tensor_parameter_39: + name = "parameter_39" + shape = [360] + dtype = "float32" + min_val = float("-1.05056") + max_val = float("0.967884") + mean = float("-0.00157208") + std = float("0.174592") + data = None + + +class Program_weight_tensor_parameter_40: + name = "parameter_40" + shape = [120, 360] + dtype = "float32" + min_val = float("-0.801017") + max_val = float("0.908275") + mean = float("-6.76613e-05") + std = float("0.0963535") + data = None + + +class Program_weight_tensor_parameter_41: + name = "parameter_41" + shape = [120] + dtype = "float32" + min_val = float("-1.36757") + max_val = float("0.844466") + mean = float("0.0633027") + std = float("0.305119") + data = None + + +class Program_weight_tensor_parameter_42: + name = "parameter_42" + shape = [120] + dtype = "float32" + min_val = float("-8.01928e-05") + max_val = float("1.32548") + mean = float("0.884662") + std = float("0.225001") + data = None + + +class Program_weight_tensor_parameter_43: + name = "parameter_43" + shape = [120] + dtype = "float32" + min_val = float("-0.90043") + max_val = float("2.06854") + mean = float("0.216484") + std = float("0.419805") + data = None + + +class Program_weight_tensor_parameter_44: + name = "parameter_44" + shape = [120] + dtype = "float32" + min_val = float("0.31917") + max_val = float("2.80272") + mean = float("1.46157") + std = float("0.309021") + data = None + + +class Program_weight_tensor_parameter_45: + name = "parameter_45" + shape = [120] + dtype = "float32" + min_val = float("0.00564911") + max_val = float("0.304982") + mean = float("0.0348703") + std = float("0.0309459") + data = None + + +class Program_weight_tensor_parameter_46: + name = "parameter_46" + shape = [120] + dtype = "float32" + min_val = float("-1.19566") + max_val = float("0.773057") + mean = float("-0.0313422") + std = float("0.363322") + data = None + + +class Program_weight_tensor_parameter_47: + name = "parameter_47" + shape = [120, 64, 1, 1] + dtype = "float32" + min_val = float("-1.27055") + max_val = float("1.294") + mean = float("0.000513944") + std = float("0.199517") + data = None + + +class Program_weight_tensor_parameter_48: + name = "parameter_48" + shape = [64] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_49: + name = "parameter_49" + shape = [64] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_50: + name = "parameter_50" + shape = [64] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_51: + name = "parameter_51" + shape = [64] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_52: + name = "parameter_52" + shape = [64, 512, 3, 3] + dtype = "float32" + min_val = float("-1.40578") + max_val = float("1.09913") + mean = float("-0.00247433") + std = float("0.050292") + data = None diff --git a/paddle_samples/PaddleX/PP-OCRv3_mobile_rec/subgraph_9/graph_hash.txt b/paddle_samples/PaddleX/PP-OCRv3_mobile_rec/subgraph_9/graph_hash.txt new file mode 100644 index 000000000..b39968cf6 --- /dev/null +++ b/paddle_samples/PaddleX/PP-OCRv3_mobile_rec/subgraph_9/graph_hash.txt @@ -0,0 +1 @@ +52169c818b1f22c50ffd0e3682973a1a793a24b40927dc1481c9bbc7c7e736a3 \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-OCRv3_mobile_rec/subgraph_9/graph_net.json b/paddle_samples/PaddleX/PP-OCRv3_mobile_rec/subgraph_9/graph_net.json new file mode 100644 index 000000000..6e8c4238c --- /dev/null +++ b/paddle_samples/PaddleX/PP-OCRv3_mobile_rec/subgraph_9/graph_net.json @@ -0,0 +1,6 @@ +{ + "framework": "paddle", + "model_name": "PP-OCRv3_mobile_rec", + "num_devices_required": 1, + "num_nodes_required": 1 +} \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-OCRv3_mobile_rec/subgraph_9/input_meta.py b/paddle_samples/PaddleX/PP-OCRv3_mobile_rec/subgraph_9/input_meta.py new file mode 100644 index 000000000..dbf466523 --- /dev/null +++ b/paddle_samples/PaddleX/PP-OCRv3_mobile_rec/subgraph_9/input_meta.py @@ -0,0 +1,16 @@ +class Program_weight_tensor_data_0: + name = "data_0" + shape = [8, 26, 1, 40, 1] + dtype = "float32" + min_val = float("-6.07778") + max_val = float("6.34192") + mean = float("-0.398269") + std = float("1.96614") + data = None + + +class Program_weight_tensor_data_1: + name = "data_1" + shape = [] + dtype = "int64" + data = [37] diff --git a/paddle_samples/PaddleX/PP-OCRv3_mobile_rec/subgraph_9/model.py b/paddle_samples/PaddleX/PP-OCRv3_mobile_rec/subgraph_9/model.py new file mode 100644 index 000000000..52fce5b77 --- /dev/null +++ b/paddle_samples/PaddleX/PP-OCRv3_mobile_rec/subgraph_9/model.py @@ -0,0 +1,47 @@ +import paddle + + +class GraphModule(paddle.nn.Layer): + def __init__(self): + super().__init__() + + def forward(self, data_0, data_1): + # pd_op.full: (1xi64) <- () + full_0 = paddle._C_ops.full( + [1], float("0"), paddle.int64, paddle.core.CPUPlace() + ) + + # pd_op.full: (xi64) <- () + full_1 = paddle._C_ops.full( + [], float("0"), paddle.int64, paddle.core.CPUPlace() + ) + + # builtin.combine: ([xi64, xi64]) <- (xi64, xi64) + combine_0 = [full_1, data_1] + del data_1, full_1 + + # pd_op.stack: (2xi64) <- ([xi64, xi64]) + stack_0 = paddle._C_ops.stack(combine_0, 0) + del combine_0 + + # pd_op.full_int_array: (2xi64) <- () + full_int_array_0 = [1, 2147483647] + + # pd_op.full_int_array: (2xi64) <- () + full_int_array_1 = [1, 1] + + # pd_op.set_value_: (8x26x1x40x1xf32) <- (8x26x1x40x1xf32, 2xi64, 2xi64, 2xi64) + set_value__0 = paddle._C_ops.set_value_( + data_0, + stack_0, + full_int_array_0, + full_int_array_1, + [0, 3], + [0], + [], + [1], + [float("-inf")], + ) + del data_0, full_int_array_0, full_int_array_1, stack_0 + + return set_value__0 diff --git a/paddle_samples/PaddleX/PP-OCRv3_mobile_rec/subgraph_9/weight_meta.py b/paddle_samples/PaddleX/PP-OCRv3_mobile_rec/subgraph_9/weight_meta.py new file mode 100644 index 000000000..8b1378917 --- /dev/null +++ b/paddle_samples/PaddleX/PP-OCRv3_mobile_rec/subgraph_9/weight_meta.py @@ -0,0 +1 @@ + diff --git a/paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_0/graph_hash.txt b/paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_0/graph_hash.txt new file mode 100644 index 000000000..8a3ea7694 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_0/graph_hash.txt @@ -0,0 +1 @@ +a29b29f75d677bfc4d3d22fe15a38544cc611ed4c5426ad85c9ccd866320ff3a \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_0/graph_net.json b/paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_0/graph_net.json new file mode 100644 index 000000000..cf4d2108b --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_0/graph_net.json @@ -0,0 +1,6 @@ +{ + "framework": "paddle", + "model_name": "PP-YOLOE-L_human", + "num_devices_required": 1, + "num_nodes_required": 1 +} \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_0/input_meta.py b/paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_0/input_meta.py new file mode 100644 index 000000000..eca445c75 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_0/input_meta.py @@ -0,0 +1,7 @@ +class Program_weight_tensor_data_0: + name = "data_0" + shape = [2, 3024] + dtype = "int32" + min_val = 0 + max_val = 1 + data = None diff --git a/paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_0/model.py b/paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_0/model.py new file mode 100644 index 000000000..e57fc793b --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_0/model.py @@ -0,0 +1,34 @@ +import paddle + + +class GraphModule(paddle.nn.Layer): + def __init__(self): + super().__init__() + + def forward(self, data_0): + # pd_op.full: (xi32) <- () + full_0 = paddle._C_ops.full( + [], float("1"), paddle.int32, paddle.framework._current_expected_place() + ) + + # pd_op.not_equal: (2x-1xb) <- (2x-1xi32, xi32) + not_equal_0 = paddle._C_ops.not_equal(data_0, full_0) + del data_0, full_0 + + # pd_op.full_int_array: (0xi64) <- () + full_int_array_0 = [] + + # pd_op.sum: (xi64) <- (2x-1xb, 0xi64) + sum_0 = paddle._C_ops.sum(not_equal_0, full_int_array_0, None, False) + del full_int_array_0 + + # pd_op.full: (xi64) <- () + full_1 = paddle._C_ops.full( + [], float("0"), paddle.int64, paddle.framework._current_expected_place() + ) + + # pd_op.greater_than: (xb) <- (xi64, xi64) + greater_than_0 = paddle._C_ops.greater_than(sum_0, full_1) + del full_1, not_equal_0, sum_0 + + return greater_than_0 diff --git a/paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_0/weight_meta.py b/paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_0/weight_meta.py new file mode 100644 index 000000000..8b1378917 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_0/weight_meta.py @@ -0,0 +1 @@ + diff --git a/paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_1/graph_hash.txt b/paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_1/graph_hash.txt new file mode 100644 index 000000000..b08da1263 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_1/graph_hash.txt @@ -0,0 +1 @@ +4d26d0110d82b3e8a1ea0c4059adeb25427870d4527791748b1197dfa32e25ef \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_1/graph_net.json b/paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_1/graph_net.json new file mode 100644 index 000000000..cf4d2108b --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_1/graph_net.json @@ -0,0 +1,6 @@ +{ + "framework": "paddle", + "model_name": "PP-YOLOE-L_human", + "num_devices_required": 1, + "num_nodes_required": 1 +} \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_1/input_meta.py b/paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_1/input_meta.py new file mode 100644 index 000000000..d890c95a4 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_1/input_meta.py @@ -0,0 +1,19 @@ +class Program_weight_tensor_data_0: + name = "data_0" + shape = [] + dtype = "float32" + data = [0.335797] + + +class Program_weight_tensor_data_1: + name = "data_1" + shape = [] + dtype = "float32" + data = [1.17666] + + +class Program_weight_tensor_data_2: + name = "data_2" + shape = [] + dtype = "float32" + data = [0.769235] diff --git a/paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_1/model.py b/paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_1/model.py new file mode 100644 index 000000000..4cccb2b8e --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_1/model.py @@ -0,0 +1,43 @@ +import paddle + + +class GraphModule(paddle.nn.Layer): + def __init__(self): + super().__init__() + + def forward(self, data_0, data_1, data_2): + # pd_op.full: (1xf32) <- () + full_0 = paddle._C_ops.full( + [1], float("1"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.scale: (xf32) <- (xf32, 1xf32) + scale_0 = paddle._C_ops.scale(data_2, full_0, float("0"), True) + del data_2 + + # pd_op.full: (1xf32) <- () + full_1 = paddle._C_ops.full( + [1], float("2.5"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.scale: (xf32) <- (xf32, 1xf32) + scale_1 = paddle._C_ops.scale(data_0, full_1, float("0"), True) + del data_0 + + # pd_op.add: (xf32) <- (xf32, xf32) + add_1 = paddle._C_ops.add(scale_0, scale_1) + + # pd_op.full: (1xf32) <- () + full_2 = paddle._C_ops.full( + [1], float("0.5"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.scale: (xf32) <- (xf32, 1xf32) + scale_2 = paddle._C_ops.scale(data_1, full_2, float("0"), True) + del data_1 + + # pd_op.add: (xf32) <- (xf32, xf32) + add_0 = paddle._C_ops.add(add_1, scale_2) + del add_1, full_0, full_1, full_2, scale_0, scale_1, scale_2 + + return add_0 diff --git a/paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_1/weight_meta.py b/paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_1/weight_meta.py new file mode 100644 index 000000000..8b1378917 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_1/weight_meta.py @@ -0,0 +1 @@ + diff --git a/paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_10/graph_hash.txt b/paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_10/graph_hash.txt new file mode 100644 index 000000000..abcd385f1 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_10/graph_hash.txt @@ -0,0 +1 @@ +eaac049364bed4d6c67fc3935e79acb80e63ba96c2283c6c4364c4ef58871728 \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_10/graph_net.json b/paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_10/graph_net.json new file mode 100644 index 000000000..cf4d2108b --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_10/graph_net.json @@ -0,0 +1,6 @@ +{ + "framework": "paddle", + "model_name": "PP-YOLOE-L_human", + "num_devices_required": 1, + "num_nodes_required": 1 +} \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_10/input_meta.py b/paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_10/input_meta.py new file mode 100644 index 000000000..30a284ba9 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_10/input_meta.py @@ -0,0 +1,9 @@ +class Program_weight_tensor_data_0: + name = "data_0" + shape = [2, 3, 384, 384] + dtype = "float32" + min_val = float("-2.1179") + max_val = float("2.64") + mean = float("0.147246") + std = float("1.17749") + data = None diff --git a/paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_10/model.py b/paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_10/model.py new file mode 100644 index 000000000..36853978e --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_10/model.py @@ -0,0 +1,7396 @@ +import paddle + + +class GraphModule(paddle.nn.Layer): + def __init__(self): + super().__init__() + + def forward( + self, + parameter_0, + parameter_1, + parameter_2, + parameter_3, + parameter_4, + parameter_5, + parameter_6, + parameter_7, + parameter_8, + parameter_9, + parameter_10, + parameter_11, + parameter_12, + parameter_13, + parameter_14, + parameter_15, + parameter_16, + parameter_17, + parameter_18, + parameter_19, + parameter_20, + parameter_21, + parameter_22, + parameter_23, + parameter_24, + parameter_25, + parameter_26, + parameter_27, + parameter_28, + parameter_29, + parameter_30, + parameter_31, + parameter_32, + parameter_33, + parameter_34, + parameter_35, + parameter_36, + parameter_37, + parameter_38, + parameter_39, + parameter_40, + parameter_41, + parameter_42, + parameter_43, + parameter_44, + parameter_45, + parameter_46, + parameter_47, + parameter_48, + parameter_49, + parameter_50, + parameter_51, + parameter_52, + parameter_53, + parameter_54, + parameter_55, + parameter_56, + parameter_57, + parameter_58, + parameter_59, + parameter_60, + parameter_61, + parameter_62, + parameter_63, + parameter_64, + parameter_65, + parameter_66, + parameter_67, + parameter_68, + parameter_69, + parameter_70, + parameter_71, + parameter_72, + parameter_73, + parameter_74, + parameter_75, + parameter_76, + parameter_77, + parameter_78, + parameter_79, + parameter_80, + parameter_81, + parameter_82, + parameter_83, + parameter_84, + parameter_85, + parameter_86, + parameter_87, + parameter_88, + parameter_89, + parameter_90, + parameter_91, + parameter_92, + parameter_93, + parameter_94, + parameter_95, + parameter_96, + parameter_97, + parameter_98, + parameter_99, + parameter_100, + parameter_101, + parameter_102, + parameter_103, + parameter_104, + parameter_105, + parameter_106, + parameter_107, + parameter_108, + parameter_109, + parameter_110, + parameter_111, + parameter_112, + parameter_113, + parameter_114, + parameter_115, + parameter_116, + parameter_117, + parameter_118, + parameter_119, + parameter_120, + parameter_121, + parameter_122, + parameter_123, + parameter_124, + parameter_125, + parameter_126, + parameter_127, + parameter_128, + parameter_129, + parameter_130, + parameter_131, + parameter_132, + parameter_133, + parameter_134, + parameter_135, + parameter_136, + parameter_137, + parameter_138, + parameter_139, + parameter_140, + parameter_141, + parameter_142, + parameter_143, + parameter_144, + parameter_145, + parameter_146, + parameter_147, + parameter_148, + parameter_149, + parameter_150, + parameter_151, + parameter_152, + parameter_153, + parameter_154, + parameter_155, + parameter_156, + parameter_157, + parameter_158, + parameter_159, + parameter_160, + parameter_161, + parameter_162, + parameter_163, + parameter_164, + parameter_165, + parameter_166, + parameter_167, + parameter_168, + parameter_169, + parameter_170, + parameter_171, + parameter_172, + parameter_173, + parameter_174, + parameter_175, + parameter_176, + parameter_177, + parameter_178, + parameter_179, + parameter_180, + parameter_181, + parameter_182, + parameter_183, + parameter_184, + parameter_185, + parameter_186, + parameter_187, + parameter_188, + parameter_189, + parameter_190, + parameter_191, + parameter_192, + parameter_193, + parameter_194, + parameter_195, + parameter_196, + parameter_197, + parameter_198, + parameter_199, + parameter_200, + parameter_201, + parameter_202, + parameter_203, + parameter_204, + parameter_205, + parameter_206, + parameter_207, + parameter_208, + parameter_209, + parameter_210, + parameter_211, + parameter_212, + parameter_213, + parameter_214, + parameter_215, + parameter_216, + parameter_217, + parameter_218, + parameter_219, + parameter_220, + parameter_221, + parameter_222, + parameter_223, + parameter_224, + parameter_225, + parameter_226, + parameter_227, + parameter_228, + parameter_229, + parameter_230, + parameter_231, + parameter_232, + parameter_233, + parameter_234, + parameter_235, + parameter_236, + parameter_237, + parameter_238, + parameter_239, + parameter_240, + parameter_241, + parameter_242, + parameter_243, + parameter_244, + parameter_245, + parameter_246, + parameter_247, + parameter_248, + parameter_249, + parameter_250, + parameter_251, + parameter_252, + parameter_253, + parameter_254, + parameter_255, + parameter_256, + parameter_257, + parameter_258, + parameter_259, + parameter_260, + parameter_261, + parameter_262, + parameter_263, + parameter_264, + parameter_265, + parameter_266, + parameter_267, + parameter_268, + parameter_269, + parameter_270, + parameter_271, + parameter_272, + parameter_273, + parameter_274, + parameter_275, + parameter_276, + parameter_277, + parameter_278, + parameter_279, + parameter_280, + parameter_281, + parameter_282, + parameter_283, + parameter_284, + parameter_285, + parameter_286, + parameter_287, + parameter_288, + parameter_289, + parameter_290, + parameter_291, + parameter_292, + parameter_293, + parameter_294, + parameter_295, + parameter_296, + parameter_297, + parameter_298, + parameter_299, + parameter_300, + parameter_301, + parameter_302, + parameter_303, + parameter_304, + parameter_305, + parameter_306, + parameter_307, + parameter_308, + parameter_309, + parameter_310, + parameter_311, + parameter_312, + parameter_313, + parameter_314, + parameter_315, + parameter_316, + parameter_317, + parameter_318, + parameter_319, + parameter_320, + parameter_321, + parameter_322, + parameter_323, + parameter_324, + parameter_325, + parameter_326, + parameter_327, + parameter_328, + parameter_329, + parameter_330, + parameter_331, + parameter_332, + parameter_333, + parameter_334, + parameter_335, + parameter_336, + parameter_337, + parameter_338, + parameter_339, + parameter_340, + parameter_341, + parameter_342, + parameter_343, + parameter_344, + parameter_345, + parameter_346, + parameter_347, + parameter_348, + parameter_349, + parameter_350, + parameter_351, + parameter_352, + parameter_353, + parameter_354, + parameter_355, + parameter_356, + parameter_357, + parameter_358, + parameter_359, + parameter_360, + parameter_361, + parameter_362, + parameter_363, + parameter_364, + parameter_365, + parameter_366, + parameter_367, + parameter_368, + parameter_369, + parameter_370, + parameter_371, + parameter_372, + parameter_373, + parameter_374, + parameter_375, + parameter_376, + parameter_377, + parameter_378, + parameter_379, + parameter_380, + parameter_381, + parameter_382, + parameter_383, + parameter_384, + parameter_385, + parameter_386, + parameter_387, + parameter_388, + parameter_389, + parameter_390, + parameter_391, + parameter_392, + parameter_393, + parameter_394, + parameter_395, + parameter_396, + parameter_397, + parameter_398, + parameter_399, + parameter_400, + parameter_401, + parameter_402, + parameter_403, + parameter_404, + parameter_405, + parameter_406, + parameter_407, + parameter_408, + parameter_409, + parameter_410, + parameter_411, + parameter_412, + parameter_413, + parameter_414, + parameter_415, + parameter_416, + parameter_417, + parameter_418, + parameter_419, + parameter_420, + parameter_421, + parameter_422, + parameter_423, + parameter_424, + parameter_425, + parameter_426, + parameter_427, + parameter_428, + parameter_429, + parameter_430, + parameter_431, + parameter_432, + parameter_433, + parameter_434, + parameter_435, + parameter_436, + parameter_437, + parameter_438, + parameter_439, + parameter_440, + parameter_441, + parameter_442, + parameter_443, + parameter_444, + parameter_445, + parameter_446, + parameter_447, + parameter_448, + parameter_449, + parameter_450, + parameter_451, + parameter_452, + parameter_453, + parameter_454, + parameter_455, + parameter_456, + parameter_457, + parameter_458, + parameter_459, + parameter_460, + parameter_461, + parameter_462, + parameter_463, + parameter_464, + parameter_465, + parameter_466, + parameter_467, + parameter_468, + parameter_469, + parameter_470, + parameter_471, + parameter_472, + parameter_473, + parameter_474, + parameter_475, + parameter_476, + parameter_477, + parameter_478, + parameter_479, + parameter_480, + parameter_481, + parameter_482, + parameter_483, + parameter_484, + parameter_485, + parameter_486, + parameter_487, + parameter_488, + parameter_489, + parameter_490, + parameter_491, + parameter_492, + parameter_493, + parameter_494, + parameter_495, + parameter_496, + parameter_497, + parameter_498, + parameter_499, + parameter_500, + parameter_501, + parameter_502, + parameter_503, + parameter_504, + parameter_505, + parameter_506, + parameter_507, + parameter_508, + parameter_509, + parameter_510, + parameter_511, + parameter_512, + parameter_513, + parameter_514, + parameter_515, + parameter_516, + parameter_517, + parameter_518, + parameter_519, + parameter_520, + parameter_521, + parameter_522, + parameter_523, + parameter_524, + parameter_525, + parameter_526, + parameter_527, + parameter_528, + parameter_529, + parameter_530, + parameter_531, + parameter_532, + parameter_533, + parameter_534, + parameter_535, + parameter_536, + parameter_537, + parameter_538, + parameter_539, + parameter_540, + parameter_541, + parameter_542, + parameter_543, + parameter_544, + parameter_545, + parameter_546, + parameter_547, + parameter_548, + parameter_549, + parameter_550, + parameter_551, + parameter_552, + parameter_553, + parameter_554, + parameter_555, + parameter_556, + parameter_557, + parameter_558, + parameter_559, + parameter_560, + parameter_561, + parameter_562, + parameter_563, + parameter_564, + parameter_565, + parameter_566, + parameter_567, + parameter_568, + parameter_569, + parameter_570, + parameter_571, + parameter_572, + parameter_573, + parameter_574, + parameter_575, + parameter_576, + parameter_577, + parameter_578, + parameter_579, + parameter_580, + parameter_581, + parameter_582, + parameter_583, + parameter_584, + parameter_585, + parameter_586, + parameter_587, + parameter_588, + parameter_589, + parameter_590, + parameter_591, + parameter_592, + parameter_593, + parameter_594, + parameter_595, + parameter_596, + parameter_597, + parameter_598, + parameter_599, + parameter_600, + parameter_601, + parameter_602, + parameter_603, + parameter_604, + parameter_605, + parameter_606, + parameter_607, + parameter_608, + parameter_609, + parameter_610, + parameter_611, + parameter_612, + parameter_613, + parameter_614, + parameter_615, + parameter_616, + parameter_617, + parameter_618, + parameter_619, + parameter_620, + parameter_621, + parameter_622, + parameter_623, + parameter_624, + parameter_625, + parameter_626, + parameter_627, + parameter_628, + parameter_629, + parameter_630, + parameter_631, + parameter_632, + parameter_633, + parameter_634, + parameter_635, + parameter_636, + parameter_637, + parameter_638, + parameter_639, + parameter_640, + parameter_641, + parameter_642, + parameter_643, + parameter_644, + parameter_645, + parameter_646, + parameter_647, + parameter_648, + parameter_649, + parameter_650, + parameter_651, + parameter_652, + parameter_653, + parameter_654, + parameter_655, + parameter_656, + parameter_657, + parameter_658, + parameter_659, + parameter_660, + parameter_661, + parameter_662, + parameter_663, + parameter_664, + parameter_665, + parameter_666, + parameter_667, + parameter_668, + parameter_669, + parameter_670, + parameter_671, + parameter_672, + parameter_673, + parameter_674, + parameter_675, + parameter_676, + parameter_677, + parameter_678, + parameter_679, + parameter_680, + parameter_681, + parameter_682, + parameter_683, + parameter_684, + parameter_685, + parameter_686, + parameter_687, + parameter_688, + parameter_689, + parameter_690, + parameter_691, + parameter_692, + parameter_693, + parameter_694, + parameter_695, + parameter_696, + parameter_697, + data_0, + ): + # pd_op.conv2d: (2x32x-1x-1xf32) <- (2x3x-1x-1xf32, 32x3x3x3xf32) + conv2d_0 = paddle._C_ops.conv2d( + data_0, parameter_697, [2, 2], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del data_0, parameter_697 + + # pd_op.batch_norm_: (2x32x-1x-1xf32, 32xf32, 32xf32, 32xf32, 32xf32, -1xui8) <- (2x32x-1x-1xf32, 32xf32, 32xf32, 32xf32, 32xf32) + ( + batch_norm__0, + batch_norm__1, + batch_norm__2, + batch_norm__3, + batch_norm__4, + batch_norm__5, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_0, + parameter_696, + parameter_695, + parameter_694, + parameter_693, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_693, parameter_694, parameter_695, parameter_696 + + # pd_op.swish: (2x32x-1x-1xf32) <- (2x32x-1x-1xf32) + swish_1 = paddle._C_ops.swish(batch_norm__0) + + # pd_op.conv2d: (2x32x-1x-1xf32) <- (2x32x-1x-1xf32, 32x32x3x3xf32) + conv2d_1 = paddle._C_ops.conv2d( + swish_1, parameter_692, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_692 + + # pd_op.batch_norm_: (2x32x-1x-1xf32, 32xf32, 32xf32, 32xf32, 32xf32, -1xui8) <- (2x32x-1x-1xf32, 32xf32, 32xf32, 32xf32, 32xf32) + ( + batch_norm__6, + batch_norm__7, + batch_norm__8, + batch_norm__9, + batch_norm__10, + batch_norm__11, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_1, + parameter_691, + parameter_690, + parameter_689, + parameter_688, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_688, parameter_689, parameter_690, parameter_691 + + # pd_op.swish: (2x32x-1x-1xf32) <- (2x32x-1x-1xf32) + swish_2 = paddle._C_ops.swish(batch_norm__6) + + # pd_op.conv2d: (2x64x-1x-1xf32) <- (2x32x-1x-1xf32, 64x32x3x3xf32) + conv2d_2 = paddle._C_ops.conv2d( + swish_2, parameter_687, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_687 + + # pd_op.batch_norm_: (2x64x-1x-1xf32, 64xf32, 64xf32, 64xf32, 64xf32, -1xui8) <- (2x64x-1x-1xf32, 64xf32, 64xf32, 64xf32, 64xf32) + ( + batch_norm__12, + batch_norm__13, + batch_norm__14, + batch_norm__15, + batch_norm__16, + batch_norm__17, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_2, + parameter_686, + parameter_685, + parameter_684, + parameter_683, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_683, parameter_684, parameter_685, parameter_686 + + # pd_op.swish: (2x64x-1x-1xf32) <- (2x64x-1x-1xf32) + swish_3 = paddle._C_ops.swish(batch_norm__12) + + # pd_op.conv2d: (2x96x-1x-1xf32) <- (2x64x-1x-1xf32, 96x64x3x3xf32) + conv2d_3 = paddle._C_ops.conv2d( + swish_3, parameter_682, [2, 2], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_682 + + # pd_op.batch_norm_: (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__18, + batch_norm__19, + batch_norm__20, + batch_norm__21, + batch_norm__22, + batch_norm__23, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_3, + parameter_681, + parameter_680, + parameter_679, + parameter_678, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_678, parameter_679, parameter_680, parameter_681 + + # pd_op.swish: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32) + swish_4 = paddle._C_ops.swish(batch_norm__18) + + # pd_op.conv2d: (2x48x-1x-1xf32) <- (2x96x-1x-1xf32, 48x96x1x1xf32) + conv2d_4 = paddle._C_ops.conv2d( + swish_4, parameter_677, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_677 + + # pd_op.batch_norm_: (2x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32, -1xui8) <- (2x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32) + ( + batch_norm__24, + batch_norm__25, + batch_norm__26, + batch_norm__27, + batch_norm__28, + batch_norm__29, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_4, + parameter_676, + parameter_675, + parameter_674, + parameter_673, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_673, parameter_674, parameter_675, parameter_676 + + # pd_op.swish: (2x48x-1x-1xf32) <- (2x48x-1x-1xf32) + swish_5 = paddle._C_ops.swish(batch_norm__24) + + # pd_op.conv2d: (2x48x-1x-1xf32) <- (2x96x-1x-1xf32, 48x96x1x1xf32) + conv2d_5 = paddle._C_ops.conv2d( + swish_4, parameter_672, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_672 + + # pd_op.batch_norm_: (2x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32, -1xui8) <- (2x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32) + ( + batch_norm__30, + batch_norm__31, + batch_norm__32, + batch_norm__33, + batch_norm__34, + batch_norm__35, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_5, + parameter_671, + parameter_670, + parameter_669, + parameter_668, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_668, parameter_669, parameter_670, parameter_671 + + # pd_op.swish: (2x48x-1x-1xf32) <- (2x48x-1x-1xf32) + swish_6 = paddle._C_ops.swish(batch_norm__30) + + # pd_op.conv2d: (2x48x-1x-1xf32) <- (2x48x-1x-1xf32, 48x48x3x3xf32) + conv2d_6 = paddle._C_ops.conv2d( + swish_6, parameter_667, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_667 + + # pd_op.batch_norm_: (2x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32, -1xui8) <- (2x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32) + ( + batch_norm__36, + batch_norm__37, + batch_norm__38, + batch_norm__39, + batch_norm__40, + batch_norm__41, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_6, + parameter_666, + parameter_665, + parameter_664, + parameter_663, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_663, parameter_664, parameter_665, parameter_666 + + # pd_op.swish: (2x48x-1x-1xf32) <- (2x48x-1x-1xf32) + swish_7 = paddle._C_ops.swish(batch_norm__36) + + # pd_op.conv2d: (2x48x-1x-1xf32) <- (2x48x-1x-1xf32, 48x48x3x3xf32) + conv2d_7 = paddle._C_ops.conv2d( + swish_7, parameter_662, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_662 + + # pd_op.batch_norm_: (2x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32, -1xui8) <- (2x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32) + ( + batch_norm__42, + batch_norm__43, + batch_norm__44, + batch_norm__45, + batch_norm__46, + batch_norm__47, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_7, + parameter_661, + parameter_660, + parameter_659, + parameter_658, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_658, parameter_659, parameter_660, parameter_661 + + # pd_op.conv2d: (2x48x-1x-1xf32) <- (2x48x-1x-1xf32, 48x48x1x1xf32) + conv2d_8 = paddle._C_ops.conv2d( + swish_7, parameter_657, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_657 + + # pd_op.batch_norm_: (2x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32, -1xui8) <- (2x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32) + ( + batch_norm__48, + batch_norm__49, + batch_norm__50, + batch_norm__51, + batch_norm__52, + batch_norm__53, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_8, + parameter_656, + parameter_655, + parameter_654, + parameter_653, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_653, parameter_654, parameter_655, parameter_656 + + # pd_op.add: (2x48x-1x-1xf32) <- (2x48x-1x-1xf32, 2x48x-1x-1xf32) + add_0 = paddle._C_ops.add(batch_norm__42, batch_norm__48) + + # pd_op.swish: (2x48x-1x-1xf32) <- (2x48x-1x-1xf32) + swish_8 = paddle._C_ops.swish(add_0) + + # pd_op.add: (2x48x-1x-1xf32) <- (2x48x-1x-1xf32, 2x48x-1x-1xf32) + add_1 = paddle._C_ops.add(swish_6, swish_8) + + # pd_op.conv2d: (2x48x-1x-1xf32) <- (2x48x-1x-1xf32, 48x48x3x3xf32) + conv2d_9 = paddle._C_ops.conv2d( + add_1, parameter_652, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_652 + + # pd_op.batch_norm_: (2x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32, -1xui8) <- (2x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32) + ( + batch_norm__54, + batch_norm__55, + batch_norm__56, + batch_norm__57, + batch_norm__58, + batch_norm__59, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_9, + parameter_651, + parameter_650, + parameter_649, + parameter_648, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_648, parameter_649, parameter_650, parameter_651 + + # pd_op.swish: (2x48x-1x-1xf32) <- (2x48x-1x-1xf32) + swish_9 = paddle._C_ops.swish(batch_norm__54) + + # pd_op.conv2d: (2x48x-1x-1xf32) <- (2x48x-1x-1xf32, 48x48x3x3xf32) + conv2d_10 = paddle._C_ops.conv2d( + swish_9, parameter_647, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_647 + + # pd_op.batch_norm_: (2x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32, -1xui8) <- (2x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32) + ( + batch_norm__60, + batch_norm__61, + batch_norm__62, + batch_norm__63, + batch_norm__64, + batch_norm__65, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_10, + parameter_646, + parameter_645, + parameter_644, + parameter_643, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_643, parameter_644, parameter_645, parameter_646 + + # pd_op.conv2d: (2x48x-1x-1xf32) <- (2x48x-1x-1xf32, 48x48x1x1xf32) + conv2d_11 = paddle._C_ops.conv2d( + swish_9, parameter_642, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_642 + + # pd_op.batch_norm_: (2x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32, -1xui8) <- (2x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32) + ( + batch_norm__66, + batch_norm__67, + batch_norm__68, + batch_norm__69, + batch_norm__70, + batch_norm__71, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_11, + parameter_641, + parameter_640, + parameter_639, + parameter_638, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_638, parameter_639, parameter_640, parameter_641 + + # pd_op.add: (2x48x-1x-1xf32) <- (2x48x-1x-1xf32, 2x48x-1x-1xf32) + add_2 = paddle._C_ops.add(batch_norm__60, batch_norm__66) + + # pd_op.swish: (2x48x-1x-1xf32) <- (2x48x-1x-1xf32) + swish_10 = paddle._C_ops.swish(add_2) + + # pd_op.add: (2x48x-1x-1xf32) <- (2x48x-1x-1xf32, 2x48x-1x-1xf32) + add_3 = paddle._C_ops.add(add_1, swish_10) + + # pd_op.conv2d: (2x48x-1x-1xf32) <- (2x48x-1x-1xf32, 48x48x3x3xf32) + conv2d_12 = paddle._C_ops.conv2d( + add_3, parameter_637, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_637 + + # pd_op.batch_norm_: (2x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32, -1xui8) <- (2x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32) + ( + batch_norm__72, + batch_norm__73, + batch_norm__74, + batch_norm__75, + batch_norm__76, + batch_norm__77, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_12, + parameter_636, + parameter_635, + parameter_634, + parameter_633, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_633, parameter_634, parameter_635, parameter_636 + + # pd_op.swish: (2x48x-1x-1xf32) <- (2x48x-1x-1xf32) + swish_11 = paddle._C_ops.swish(batch_norm__72) + + # pd_op.conv2d: (2x48x-1x-1xf32) <- (2x48x-1x-1xf32, 48x48x3x3xf32) + conv2d_13 = paddle._C_ops.conv2d( + swish_11, parameter_632, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_632 + + # pd_op.batch_norm_: (2x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32, -1xui8) <- (2x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32) + ( + batch_norm__78, + batch_norm__79, + batch_norm__80, + batch_norm__81, + batch_norm__82, + batch_norm__83, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_13, + parameter_631, + parameter_630, + parameter_629, + parameter_628, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_628, parameter_629, parameter_630, parameter_631 + + # pd_op.conv2d: (2x48x-1x-1xf32) <- (2x48x-1x-1xf32, 48x48x1x1xf32) + conv2d_14 = paddle._C_ops.conv2d( + swish_11, parameter_627, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_627 + + # pd_op.batch_norm_: (2x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32, -1xui8) <- (2x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32) + ( + batch_norm__84, + batch_norm__85, + batch_norm__86, + batch_norm__87, + batch_norm__88, + batch_norm__89, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_14, + parameter_626, + parameter_625, + parameter_624, + parameter_623, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_623, parameter_624, parameter_625, parameter_626 + + # pd_op.add: (2x48x-1x-1xf32) <- (2x48x-1x-1xf32, 2x48x-1x-1xf32) + add_4 = paddle._C_ops.add(batch_norm__78, batch_norm__84) + + # pd_op.swish: (2x48x-1x-1xf32) <- (2x48x-1x-1xf32) + swish_12 = paddle._C_ops.swish(add_4) + + # pd_op.add: (2x48x-1x-1xf32) <- (2x48x-1x-1xf32, 2x48x-1x-1xf32) + add_5 = paddle._C_ops.add(add_3, swish_12) + + # pd_op.full: (1xi32) <- () + full_0 = paddle._C_ops.full( + [1], float("1"), paddle.int32, paddle.core.CPUPlace() + ) + + # pd_op.assign: (1xi32) <- (1xi32) + assign_0 = full_0 + + # pd_op.assign: (1xi32) <- (1xi32) + assign_1 = full_0 + + # pd_op.assign: (1xi32) <- (1xi32) + assign_2 = full_0 + + # pd_op.assign: (1xi32) <- (1xi32) + assign_3 = full_0 + + # pd_op.assign: (1xi32) <- (1xi32) + assign_4 = full_0 + + # pd_op.assign: (1xi32) <- (1xi32) + assign_5 = full_0 + + # pd_op.assign: (1xi32) <- (1xi32) + assign_6 = full_0 + + # pd_op.assign: (1xi32) <- (1xi32) + assign_7 = full_0 + + # pd_op.assign: (1xi32) <- (1xi32) + assign_8 = full_0 + + # pd_op.assign: (1xi32) <- (1xi32) + assign_9 = full_0 + + # pd_op.assign: (1xi32) <- (1xi32) + assign_10 = full_0 + + # pd_op.assign: (1xi32) <- (1xi32) + assign_11 = full_0 + + # pd_op.assign: (1xi32) <- (1xi32) + assign_12 = full_0 + + # builtin.combine: ([2x48x-1x-1xf32, 2x48x-1x-1xf32]) <- (2x48x-1x-1xf32, 2x48x-1x-1xf32) + combine_0 = [swish_5, add_5] + + # pd_op.concat: (2x96x-1x-1xf32) <- ([2x48x-1x-1xf32, 2x48x-1x-1xf32], 1xi32) + concat_0 = paddle._C_ops.concat(combine_0, full_0) + del combine_0 + + # pd_op.full_int_array: (2xi64) <- () + full_int_array_0 = [2, 3] + + # pd_op.assign: (2xi64) <- (2xi64) + assign_13 = full_int_array_0 + + # pd_op.assign: (2xi64) <- (2xi64) + assign_14 = full_int_array_0 + + # pd_op.assign: (2xi64) <- (2xi64) + assign_15 = full_int_array_0 + + # pd_op.mean: (2x96x1x1xf32) <- (2x96x-1x-1xf32, 2xi64) + mean_0 = paddle._C_ops.mean(concat_0, full_int_array_0, True) + + # pd_op.conv2d: (2x96x1x1xf32) <- (2x96x1x1xf32, 96x96x1x1xf32) + conv2d_15 = paddle._C_ops.conv2d( + mean_0, parameter_622, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_622 + + # pd_op.full_int_array: (4xi64) <- () + full_int_array_1 = [1, -1, 1, 1] + + # pd_op.reshape: (1x96x1x1xf32) <- (96xf32, 4xi64) + reshape_0 = paddle._C_ops.reshape(parameter_621, full_int_array_1) + del parameter_621 + + # pd_op.add: (2x96x1x1xf32) <- (2x96x1x1xf32, 1x96x1x1xf32) + add_6 = paddle._C_ops.add(conv2d_15, reshape_0) + + # pd_op.hardsigmoid: (2x96x1x1xf32) <- (2x96x1x1xf32) + hardsigmoid_0 = paddle._C_ops.hardsigmoid( + add_6, float("0.166667"), float("0.5") + ) + del add_6 + + # pd_op.multiply: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, 2x96x1x1xf32) + multiply_0 = paddle._C_ops.multiply(concat_0, hardsigmoid_0) + + # pd_op.conv2d: (2x128x-1x-1xf32) <- (2x96x-1x-1xf32, 128x96x1x1xf32) + conv2d_16 = paddle._C_ops.conv2d( + multiply_0, parameter_620, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_620 + + # pd_op.batch_norm_: (2x128x-1x-1xf32, 128xf32, 128xf32, 128xf32, 128xf32, -1xui8) <- (2x128x-1x-1xf32, 128xf32, 128xf32, 128xf32, 128xf32) + ( + batch_norm__90, + batch_norm__91, + batch_norm__92, + batch_norm__93, + batch_norm__94, + batch_norm__95, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_16, + parameter_619, + parameter_618, + parameter_617, + parameter_616, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_616, parameter_617, parameter_618, parameter_619 + + # pd_op.swish: (2x128x-1x-1xf32) <- (2x128x-1x-1xf32) + swish_13 = paddle._C_ops.swish(batch_norm__90) + + # pd_op.conv2d: (2x192x-1x-1xf32) <- (2x128x-1x-1xf32, 192x128x3x3xf32) + conv2d_17 = paddle._C_ops.conv2d( + swish_13, parameter_615, [2, 2], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_615 + + # pd_op.batch_norm_: (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__96, + batch_norm__97, + batch_norm__98, + batch_norm__99, + batch_norm__100, + batch_norm__101, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_17, + parameter_614, + parameter_613, + parameter_612, + parameter_611, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_611, parameter_612, parameter_613, parameter_614 + + # pd_op.swish: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32) + swish_14 = paddle._C_ops.swish(batch_norm__96) + + # pd_op.conv2d: (2x96x-1x-1xf32) <- (2x192x-1x-1xf32, 96x192x1x1xf32) + conv2d_18 = paddle._C_ops.conv2d( + swish_14, parameter_610, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_610 + + # pd_op.batch_norm_: (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__102, + batch_norm__103, + batch_norm__104, + batch_norm__105, + batch_norm__106, + batch_norm__107, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_18, + parameter_609, + parameter_608, + parameter_607, + parameter_606, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_606, parameter_607, parameter_608, parameter_609 + + # pd_op.swish: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32) + swish_15 = paddle._C_ops.swish(batch_norm__102) + + # pd_op.conv2d: (2x96x-1x-1xf32) <- (2x192x-1x-1xf32, 96x192x1x1xf32) + conv2d_19 = paddle._C_ops.conv2d( + swish_14, parameter_605, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_605 + + # pd_op.batch_norm_: (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__108, + batch_norm__109, + batch_norm__110, + batch_norm__111, + batch_norm__112, + batch_norm__113, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_19, + parameter_604, + parameter_603, + parameter_602, + parameter_601, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_601, parameter_602, parameter_603, parameter_604 + + # pd_op.swish: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32) + swish_16 = paddle._C_ops.swish(batch_norm__108) + + # pd_op.conv2d: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, 96x96x3x3xf32) + conv2d_20 = paddle._C_ops.conv2d( + swish_16, parameter_600, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_600 + + # pd_op.batch_norm_: (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__114, + batch_norm__115, + batch_norm__116, + batch_norm__117, + batch_norm__118, + batch_norm__119, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_20, + parameter_599, + parameter_598, + parameter_597, + parameter_596, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_596, parameter_597, parameter_598, parameter_599 + + # pd_op.swish: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32) + swish_17 = paddle._C_ops.swish(batch_norm__114) + + # pd_op.conv2d: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, 96x96x3x3xf32) + conv2d_21 = paddle._C_ops.conv2d( + swish_17, parameter_595, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_595 + + # pd_op.batch_norm_: (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__120, + batch_norm__121, + batch_norm__122, + batch_norm__123, + batch_norm__124, + batch_norm__125, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_21, + parameter_594, + parameter_593, + parameter_592, + parameter_591, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_591, parameter_592, parameter_593, parameter_594 + + # pd_op.conv2d: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, 96x96x1x1xf32) + conv2d_22 = paddle._C_ops.conv2d( + swish_17, parameter_590, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_590 + + # pd_op.batch_norm_: (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__126, + batch_norm__127, + batch_norm__128, + batch_norm__129, + batch_norm__130, + batch_norm__131, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_22, + parameter_589, + parameter_588, + parameter_587, + parameter_586, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_586, parameter_587, parameter_588, parameter_589 + + # pd_op.add: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, 2x96x-1x-1xf32) + add_7 = paddle._C_ops.add(batch_norm__120, batch_norm__126) + + # pd_op.swish: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32) + swish_18 = paddle._C_ops.swish(add_7) + + # pd_op.add: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, 2x96x-1x-1xf32) + add_8 = paddle._C_ops.add(swish_16, swish_18) + + # pd_op.conv2d: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, 96x96x3x3xf32) + conv2d_23 = paddle._C_ops.conv2d( + add_8, parameter_585, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_585 + + # pd_op.batch_norm_: (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__132, + batch_norm__133, + batch_norm__134, + batch_norm__135, + batch_norm__136, + batch_norm__137, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_23, + parameter_584, + parameter_583, + parameter_582, + parameter_581, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_581, parameter_582, parameter_583, parameter_584 + + # pd_op.swish: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32) + swish_19 = paddle._C_ops.swish(batch_norm__132) + + # pd_op.conv2d: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, 96x96x3x3xf32) + conv2d_24 = paddle._C_ops.conv2d( + swish_19, parameter_580, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_580 + + # pd_op.batch_norm_: (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__138, + batch_norm__139, + batch_norm__140, + batch_norm__141, + batch_norm__142, + batch_norm__143, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_24, + parameter_579, + parameter_578, + parameter_577, + parameter_576, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_576, parameter_577, parameter_578, parameter_579 + + # pd_op.conv2d: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, 96x96x1x1xf32) + conv2d_25 = paddle._C_ops.conv2d( + swish_19, parameter_575, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_575 + + # pd_op.batch_norm_: (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__144, + batch_norm__145, + batch_norm__146, + batch_norm__147, + batch_norm__148, + batch_norm__149, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_25, + parameter_574, + parameter_573, + parameter_572, + parameter_571, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_571, parameter_572, parameter_573, parameter_574 + + # pd_op.add: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, 2x96x-1x-1xf32) + add_9 = paddle._C_ops.add(batch_norm__138, batch_norm__144) + + # pd_op.swish: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32) + swish_20 = paddle._C_ops.swish(add_9) + + # pd_op.add: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, 2x96x-1x-1xf32) + add_10 = paddle._C_ops.add(add_8, swish_20) + + # pd_op.conv2d: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, 96x96x3x3xf32) + conv2d_26 = paddle._C_ops.conv2d( + add_10, parameter_570, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_570 + + # pd_op.batch_norm_: (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__150, + batch_norm__151, + batch_norm__152, + batch_norm__153, + batch_norm__154, + batch_norm__155, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_26, + parameter_569, + parameter_568, + parameter_567, + parameter_566, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_566, parameter_567, parameter_568, parameter_569 + + # pd_op.swish: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32) + swish_21 = paddle._C_ops.swish(batch_norm__150) + + # pd_op.conv2d: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, 96x96x3x3xf32) + conv2d_27 = paddle._C_ops.conv2d( + swish_21, parameter_565, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_565 + + # pd_op.batch_norm_: (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__156, + batch_norm__157, + batch_norm__158, + batch_norm__159, + batch_norm__160, + batch_norm__161, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_27, + parameter_564, + parameter_563, + parameter_562, + parameter_561, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_561, parameter_562, parameter_563, parameter_564 + + # pd_op.conv2d: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, 96x96x1x1xf32) + conv2d_28 = paddle._C_ops.conv2d( + swish_21, parameter_560, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_560 + + # pd_op.batch_norm_: (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__162, + batch_norm__163, + batch_norm__164, + batch_norm__165, + batch_norm__166, + batch_norm__167, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_28, + parameter_559, + parameter_558, + parameter_557, + parameter_556, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_556, parameter_557, parameter_558, parameter_559 + + # pd_op.add: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, 2x96x-1x-1xf32) + add_11 = paddle._C_ops.add(batch_norm__156, batch_norm__162) + + # pd_op.swish: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32) + swish_22 = paddle._C_ops.swish(add_11) + + # pd_op.add: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, 2x96x-1x-1xf32) + add_12 = paddle._C_ops.add(add_10, swish_22) + + # pd_op.conv2d: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, 96x96x3x3xf32) + conv2d_29 = paddle._C_ops.conv2d( + add_12, parameter_555, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_555 + + # pd_op.batch_norm_: (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__168, + batch_norm__169, + batch_norm__170, + batch_norm__171, + batch_norm__172, + batch_norm__173, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_29, + parameter_554, + parameter_553, + parameter_552, + parameter_551, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_551, parameter_552, parameter_553, parameter_554 + + # pd_op.swish: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32) + swish_23 = paddle._C_ops.swish(batch_norm__168) + + # pd_op.conv2d: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, 96x96x3x3xf32) + conv2d_30 = paddle._C_ops.conv2d( + swish_23, parameter_550, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_550 + + # pd_op.batch_norm_: (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__174, + batch_norm__175, + batch_norm__176, + batch_norm__177, + batch_norm__178, + batch_norm__179, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_30, + parameter_549, + parameter_548, + parameter_547, + parameter_546, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_546, parameter_547, parameter_548, parameter_549 + + # pd_op.conv2d: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, 96x96x1x1xf32) + conv2d_31 = paddle._C_ops.conv2d( + swish_23, parameter_545, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_545 + + # pd_op.batch_norm_: (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__180, + batch_norm__181, + batch_norm__182, + batch_norm__183, + batch_norm__184, + batch_norm__185, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_31, + parameter_544, + parameter_543, + parameter_542, + parameter_541, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_541, parameter_542, parameter_543, parameter_544 + + # pd_op.add: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, 2x96x-1x-1xf32) + add_13 = paddle._C_ops.add(batch_norm__174, batch_norm__180) + + # pd_op.swish: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32) + swish_24 = paddle._C_ops.swish(add_13) + + # pd_op.add: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, 2x96x-1x-1xf32) + add_14 = paddle._C_ops.add(add_12, swish_24) + + # pd_op.conv2d: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, 96x96x3x3xf32) + conv2d_32 = paddle._C_ops.conv2d( + add_14, parameter_540, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_540 + + # pd_op.batch_norm_: (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__186, + batch_norm__187, + batch_norm__188, + batch_norm__189, + batch_norm__190, + batch_norm__191, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_32, + parameter_539, + parameter_538, + parameter_537, + parameter_536, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_536, parameter_537, parameter_538, parameter_539 + + # pd_op.swish: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32) + swish_25 = paddle._C_ops.swish(batch_norm__186) + + # pd_op.conv2d: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, 96x96x3x3xf32) + conv2d_33 = paddle._C_ops.conv2d( + swish_25, parameter_535, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_535 + + # pd_op.batch_norm_: (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__192, + batch_norm__193, + batch_norm__194, + batch_norm__195, + batch_norm__196, + batch_norm__197, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_33, + parameter_534, + parameter_533, + parameter_532, + parameter_531, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_531, parameter_532, parameter_533, parameter_534 + + # pd_op.conv2d: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, 96x96x1x1xf32) + conv2d_34 = paddle._C_ops.conv2d( + swish_25, parameter_530, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_530 + + # pd_op.batch_norm_: (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__198, + batch_norm__199, + batch_norm__200, + batch_norm__201, + batch_norm__202, + batch_norm__203, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_34, + parameter_529, + parameter_528, + parameter_527, + parameter_526, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_526, parameter_527, parameter_528, parameter_529 + + # pd_op.add: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, 2x96x-1x-1xf32) + add_15 = paddle._C_ops.add(batch_norm__192, batch_norm__198) + + # pd_op.swish: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32) + swish_26 = paddle._C_ops.swish(add_15) + + # pd_op.add: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, 2x96x-1x-1xf32) + add_16 = paddle._C_ops.add(add_14, swish_26) + + # pd_op.conv2d: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, 96x96x3x3xf32) + conv2d_35 = paddle._C_ops.conv2d( + add_16, parameter_525, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_525 + + # pd_op.batch_norm_: (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__204, + batch_norm__205, + batch_norm__206, + batch_norm__207, + batch_norm__208, + batch_norm__209, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_35, + parameter_524, + parameter_523, + parameter_522, + parameter_521, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_521, parameter_522, parameter_523, parameter_524 + + # pd_op.swish: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32) + swish_27 = paddle._C_ops.swish(batch_norm__204) + + # pd_op.conv2d: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, 96x96x3x3xf32) + conv2d_36 = paddle._C_ops.conv2d( + swish_27, parameter_520, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_520 + + # pd_op.batch_norm_: (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__210, + batch_norm__211, + batch_norm__212, + batch_norm__213, + batch_norm__214, + batch_norm__215, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_36, + parameter_519, + parameter_518, + parameter_517, + parameter_516, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_516, parameter_517, parameter_518, parameter_519 + + # pd_op.conv2d: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, 96x96x1x1xf32) + conv2d_37 = paddle._C_ops.conv2d( + swish_27, parameter_515, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_515 + + # pd_op.batch_norm_: (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__216, + batch_norm__217, + batch_norm__218, + batch_norm__219, + batch_norm__220, + batch_norm__221, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_37, + parameter_514, + parameter_513, + parameter_512, + parameter_511, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_511, parameter_512, parameter_513, parameter_514 + + # pd_op.add: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, 2x96x-1x-1xf32) + add_17 = paddle._C_ops.add(batch_norm__210, batch_norm__216) + + # pd_op.swish: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32) + swish_28 = paddle._C_ops.swish(add_17) + + # pd_op.add: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, 2x96x-1x-1xf32) + add_18 = paddle._C_ops.add(add_16, swish_28) + + # builtin.combine: ([2x96x-1x-1xf32, 2x96x-1x-1xf32]) <- (2x96x-1x-1xf32, 2x96x-1x-1xf32) + combine_1 = [swish_15, add_18] + + # pd_op.concat: (2x192x-1x-1xf32) <- ([2x96x-1x-1xf32, 2x96x-1x-1xf32], 1xi32) + concat_1 = paddle._C_ops.concat(combine_1, full_0) + del combine_1 + + # pd_op.mean: (2x192x1x1xf32) <- (2x192x-1x-1xf32, 2xi64) + mean_1 = paddle._C_ops.mean(concat_1, full_int_array_0, True) + + # pd_op.conv2d: (2x192x1x1xf32) <- (2x192x1x1xf32, 192x192x1x1xf32) + conv2d_38 = paddle._C_ops.conv2d( + mean_1, parameter_510, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_510 + + # pd_op.reshape: (1x192x1x1xf32) <- (192xf32, 4xi64) + reshape_1 = paddle._C_ops.reshape(parameter_509, full_int_array_1) + del parameter_509 + + # pd_op.add: (2x192x1x1xf32) <- (2x192x1x1xf32, 1x192x1x1xf32) + add_19 = paddle._C_ops.add(conv2d_38, reshape_1) + + # pd_op.hardsigmoid: (2x192x1x1xf32) <- (2x192x1x1xf32) + hardsigmoid_1 = paddle._C_ops.hardsigmoid( + add_19, float("0.166667"), float("0.5") + ) + del add_19 + + # pd_op.multiply: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 2x192x1x1xf32) + multiply_1 = paddle._C_ops.multiply(concat_1, hardsigmoid_1) + + # pd_op.conv2d: (2x256x-1x-1xf32) <- (2x192x-1x-1xf32, 256x192x1x1xf32) + conv2d_39 = paddle._C_ops.conv2d( + multiply_1, parameter_508, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_508 + + # pd_op.batch_norm_: (2x256x-1x-1xf32, 256xf32, 256xf32, 256xf32, 256xf32, -1xui8) <- (2x256x-1x-1xf32, 256xf32, 256xf32, 256xf32, 256xf32) + ( + batch_norm__222, + batch_norm__223, + batch_norm__224, + batch_norm__225, + batch_norm__226, + batch_norm__227, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_39, + parameter_507, + parameter_506, + parameter_505, + parameter_504, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_504, parameter_505, parameter_506, parameter_507 + + # pd_op.swish: (2x256x-1x-1xf32) <- (2x256x-1x-1xf32) + swish_29 = paddle._C_ops.swish(batch_norm__222) + + # pd_op.conv2d: (2x384x-1x-1xf32) <- (2x256x-1x-1xf32, 384x256x3x3xf32) + conv2d_40 = paddle._C_ops.conv2d( + swish_29, parameter_503, [2, 2], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_503 + + # pd_op.batch_norm_: (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__228, + batch_norm__229, + batch_norm__230, + batch_norm__231, + batch_norm__232, + batch_norm__233, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_40, + parameter_502, + parameter_501, + parameter_500, + parameter_499, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_499, parameter_500, parameter_501, parameter_502 + + # pd_op.swish: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32) + swish_30 = paddle._C_ops.swish(batch_norm__228) + + # pd_op.conv2d: (2x192x-1x-1xf32) <- (2x384x-1x-1xf32, 192x384x1x1xf32) + conv2d_41 = paddle._C_ops.conv2d( + swish_30, parameter_498, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_498 + + # pd_op.batch_norm_: (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__234, + batch_norm__235, + batch_norm__236, + batch_norm__237, + batch_norm__238, + batch_norm__239, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_41, + parameter_497, + parameter_496, + parameter_495, + parameter_494, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_494, parameter_495, parameter_496, parameter_497 + + # pd_op.swish: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32) + swish_31 = paddle._C_ops.swish(batch_norm__234) + + # pd_op.conv2d: (2x192x-1x-1xf32) <- (2x384x-1x-1xf32, 192x384x1x1xf32) + conv2d_42 = paddle._C_ops.conv2d( + swish_30, parameter_493, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_493 + + # pd_op.batch_norm_: (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__240, + batch_norm__241, + batch_norm__242, + batch_norm__243, + batch_norm__244, + batch_norm__245, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_42, + parameter_492, + parameter_491, + parameter_490, + parameter_489, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_489, parameter_490, parameter_491, parameter_492 + + # pd_op.swish: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32) + swish_32 = paddle._C_ops.swish(batch_norm__240) + + # pd_op.conv2d: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 192x192x3x3xf32) + conv2d_43 = paddle._C_ops.conv2d( + swish_32, parameter_488, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_488 + + # pd_op.batch_norm_: (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__246, + batch_norm__247, + batch_norm__248, + batch_norm__249, + batch_norm__250, + batch_norm__251, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_43, + parameter_487, + parameter_486, + parameter_485, + parameter_484, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_484, parameter_485, parameter_486, parameter_487 + + # pd_op.swish: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32) + swish_33 = paddle._C_ops.swish(batch_norm__246) + + # pd_op.conv2d: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 192x192x3x3xf32) + conv2d_44 = paddle._C_ops.conv2d( + swish_33, parameter_483, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_483 + + # pd_op.batch_norm_: (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__252, + batch_norm__253, + batch_norm__254, + batch_norm__255, + batch_norm__256, + batch_norm__257, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_44, + parameter_482, + parameter_481, + parameter_480, + parameter_479, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_479, parameter_480, parameter_481, parameter_482 + + # pd_op.conv2d: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 192x192x1x1xf32) + conv2d_45 = paddle._C_ops.conv2d( + swish_33, parameter_478, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_478 + + # pd_op.batch_norm_: (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__258, + batch_norm__259, + batch_norm__260, + batch_norm__261, + batch_norm__262, + batch_norm__263, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_45, + parameter_477, + parameter_476, + parameter_475, + parameter_474, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_474, parameter_475, parameter_476, parameter_477 + + # pd_op.add: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 2x192x-1x-1xf32) + add_20 = paddle._C_ops.add(batch_norm__252, batch_norm__258) + + # pd_op.swish: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32) + swish_34 = paddle._C_ops.swish(add_20) + + # pd_op.add: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 2x192x-1x-1xf32) + add_21 = paddle._C_ops.add(swish_32, swish_34) + + # pd_op.conv2d: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 192x192x3x3xf32) + conv2d_46 = paddle._C_ops.conv2d( + add_21, parameter_473, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_473 + + # pd_op.batch_norm_: (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__264, + batch_norm__265, + batch_norm__266, + batch_norm__267, + batch_norm__268, + batch_norm__269, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_46, + parameter_472, + parameter_471, + parameter_470, + parameter_469, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_469, parameter_470, parameter_471, parameter_472 + + # pd_op.swish: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32) + swish_35 = paddle._C_ops.swish(batch_norm__264) + + # pd_op.conv2d: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 192x192x3x3xf32) + conv2d_47 = paddle._C_ops.conv2d( + swish_35, parameter_468, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_468 + + # pd_op.batch_norm_: (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__270, + batch_norm__271, + batch_norm__272, + batch_norm__273, + batch_norm__274, + batch_norm__275, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_47, + parameter_467, + parameter_466, + parameter_465, + parameter_464, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_464, parameter_465, parameter_466, parameter_467 + + # pd_op.conv2d: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 192x192x1x1xf32) + conv2d_48 = paddle._C_ops.conv2d( + swish_35, parameter_463, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_463 + + # pd_op.batch_norm_: (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__276, + batch_norm__277, + batch_norm__278, + batch_norm__279, + batch_norm__280, + batch_norm__281, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_48, + parameter_462, + parameter_461, + parameter_460, + parameter_459, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_459, parameter_460, parameter_461, parameter_462 + + # pd_op.add: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 2x192x-1x-1xf32) + add_22 = paddle._C_ops.add(batch_norm__270, batch_norm__276) + + # pd_op.swish: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32) + swish_36 = paddle._C_ops.swish(add_22) + + # pd_op.add: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 2x192x-1x-1xf32) + add_23 = paddle._C_ops.add(add_21, swish_36) + + # pd_op.conv2d: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 192x192x3x3xf32) + conv2d_49 = paddle._C_ops.conv2d( + add_23, parameter_458, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_458 + + # pd_op.batch_norm_: (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__282, + batch_norm__283, + batch_norm__284, + batch_norm__285, + batch_norm__286, + batch_norm__287, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_49, + parameter_457, + parameter_456, + parameter_455, + parameter_454, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_454, parameter_455, parameter_456, parameter_457 + + # pd_op.swish: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32) + swish_37 = paddle._C_ops.swish(batch_norm__282) + + # pd_op.conv2d: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 192x192x3x3xf32) + conv2d_50 = paddle._C_ops.conv2d( + swish_37, parameter_453, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_453 + + # pd_op.batch_norm_: (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__288, + batch_norm__289, + batch_norm__290, + batch_norm__291, + batch_norm__292, + batch_norm__293, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_50, + parameter_452, + parameter_451, + parameter_450, + parameter_449, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_449, parameter_450, parameter_451, parameter_452 + + # pd_op.conv2d: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 192x192x1x1xf32) + conv2d_51 = paddle._C_ops.conv2d( + swish_37, parameter_448, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_448 + + # pd_op.batch_norm_: (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__294, + batch_norm__295, + batch_norm__296, + batch_norm__297, + batch_norm__298, + batch_norm__299, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_51, + parameter_447, + parameter_446, + parameter_445, + parameter_444, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_444, parameter_445, parameter_446, parameter_447 + + # pd_op.add: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 2x192x-1x-1xf32) + add_24 = paddle._C_ops.add(batch_norm__288, batch_norm__294) + + # pd_op.swish: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32) + swish_38 = paddle._C_ops.swish(add_24) + + # pd_op.add: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 2x192x-1x-1xf32) + add_25 = paddle._C_ops.add(add_23, swish_38) + + # pd_op.conv2d: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 192x192x3x3xf32) + conv2d_52 = paddle._C_ops.conv2d( + add_25, parameter_443, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_443 + + # pd_op.batch_norm_: (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__300, + batch_norm__301, + batch_norm__302, + batch_norm__303, + batch_norm__304, + batch_norm__305, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_52, + parameter_442, + parameter_441, + parameter_440, + parameter_439, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_439, parameter_440, parameter_441, parameter_442 + + # pd_op.swish: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32) + swish_39 = paddle._C_ops.swish(batch_norm__300) + + # pd_op.conv2d: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 192x192x3x3xf32) + conv2d_53 = paddle._C_ops.conv2d( + swish_39, parameter_438, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_438 + + # pd_op.batch_norm_: (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__306, + batch_norm__307, + batch_norm__308, + batch_norm__309, + batch_norm__310, + batch_norm__311, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_53, + parameter_437, + parameter_436, + parameter_435, + parameter_434, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_434, parameter_435, parameter_436, parameter_437 + + # pd_op.conv2d: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 192x192x1x1xf32) + conv2d_54 = paddle._C_ops.conv2d( + swish_39, parameter_433, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_433 + + # pd_op.batch_norm_: (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__312, + batch_norm__313, + batch_norm__314, + batch_norm__315, + batch_norm__316, + batch_norm__317, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_54, + parameter_432, + parameter_431, + parameter_430, + parameter_429, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_429, parameter_430, parameter_431, parameter_432 + + # pd_op.add: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 2x192x-1x-1xf32) + add_26 = paddle._C_ops.add(batch_norm__306, batch_norm__312) + + # pd_op.swish: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32) + swish_40 = paddle._C_ops.swish(add_26) + + # pd_op.add: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 2x192x-1x-1xf32) + add_27 = paddle._C_ops.add(add_25, swish_40) + + # pd_op.conv2d: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 192x192x3x3xf32) + conv2d_55 = paddle._C_ops.conv2d( + add_27, parameter_428, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_428 + + # pd_op.batch_norm_: (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__318, + batch_norm__319, + batch_norm__320, + batch_norm__321, + batch_norm__322, + batch_norm__323, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_55, + parameter_427, + parameter_426, + parameter_425, + parameter_424, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_424, parameter_425, parameter_426, parameter_427 + + # pd_op.swish: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32) + swish_41 = paddle._C_ops.swish(batch_norm__318) + + # pd_op.conv2d: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 192x192x3x3xf32) + conv2d_56 = paddle._C_ops.conv2d( + swish_41, parameter_423, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_423 + + # pd_op.batch_norm_: (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__324, + batch_norm__325, + batch_norm__326, + batch_norm__327, + batch_norm__328, + batch_norm__329, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_56, + parameter_422, + parameter_421, + parameter_420, + parameter_419, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_419, parameter_420, parameter_421, parameter_422 + + # pd_op.conv2d: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 192x192x1x1xf32) + conv2d_57 = paddle._C_ops.conv2d( + swish_41, parameter_418, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_418 + + # pd_op.batch_norm_: (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__330, + batch_norm__331, + batch_norm__332, + batch_norm__333, + batch_norm__334, + batch_norm__335, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_57, + parameter_417, + parameter_416, + parameter_415, + parameter_414, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_414, parameter_415, parameter_416, parameter_417 + + # pd_op.add: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 2x192x-1x-1xf32) + add_28 = paddle._C_ops.add(batch_norm__324, batch_norm__330) + + # pd_op.swish: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32) + swish_42 = paddle._C_ops.swish(add_28) + + # pd_op.add: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 2x192x-1x-1xf32) + add_29 = paddle._C_ops.add(add_27, swish_42) + + # pd_op.conv2d: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 192x192x3x3xf32) + conv2d_58 = paddle._C_ops.conv2d( + add_29, parameter_413, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_413 + + # pd_op.batch_norm_: (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__336, + batch_norm__337, + batch_norm__338, + batch_norm__339, + batch_norm__340, + batch_norm__341, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_58, + parameter_412, + parameter_411, + parameter_410, + parameter_409, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_409, parameter_410, parameter_411, parameter_412 + + # pd_op.swish: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32) + swish_43 = paddle._C_ops.swish(batch_norm__336) + + # pd_op.conv2d: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 192x192x3x3xf32) + conv2d_59 = paddle._C_ops.conv2d( + swish_43, parameter_408, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_408 + + # pd_op.batch_norm_: (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__342, + batch_norm__343, + batch_norm__344, + batch_norm__345, + batch_norm__346, + batch_norm__347, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_59, + parameter_407, + parameter_406, + parameter_405, + parameter_404, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_404, parameter_405, parameter_406, parameter_407 + + # pd_op.conv2d: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 192x192x1x1xf32) + conv2d_60 = paddle._C_ops.conv2d( + swish_43, parameter_403, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_403 + + # pd_op.batch_norm_: (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__348, + batch_norm__349, + batch_norm__350, + batch_norm__351, + batch_norm__352, + batch_norm__353, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_60, + parameter_402, + parameter_401, + parameter_400, + parameter_399, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_399, parameter_400, parameter_401, parameter_402 + + # pd_op.add: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 2x192x-1x-1xf32) + add_30 = paddle._C_ops.add(batch_norm__342, batch_norm__348) + + # pd_op.swish: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32) + swish_44 = paddle._C_ops.swish(add_30) + + # pd_op.add: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 2x192x-1x-1xf32) + add_31 = paddle._C_ops.add(add_29, swish_44) + + # builtin.combine: ([2x192x-1x-1xf32, 2x192x-1x-1xf32]) <- (2x192x-1x-1xf32, 2x192x-1x-1xf32) + combine_2 = [swish_31, add_31] + + # pd_op.concat: (2x384x-1x-1xf32) <- ([2x192x-1x-1xf32, 2x192x-1x-1xf32], 1xi32) + concat_2 = paddle._C_ops.concat(combine_2, full_0) + del combine_2 + + # pd_op.mean: (2x384x1x1xf32) <- (2x384x-1x-1xf32, 2xi64) + mean_2 = paddle._C_ops.mean(concat_2, full_int_array_0, True) + + # pd_op.conv2d: (2x384x1x1xf32) <- (2x384x1x1xf32, 384x384x1x1xf32) + conv2d_61 = paddle._C_ops.conv2d( + mean_2, parameter_398, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_398 + + # pd_op.reshape: (1x384x1x1xf32) <- (384xf32, 4xi64) + reshape_2 = paddle._C_ops.reshape(parameter_397, full_int_array_1) + del parameter_397 + + # pd_op.add: (2x384x1x1xf32) <- (2x384x1x1xf32, 1x384x1x1xf32) + add_32 = paddle._C_ops.add(conv2d_61, reshape_2) + + # pd_op.hardsigmoid: (2x384x1x1xf32) <- (2x384x1x1xf32) + hardsigmoid_2 = paddle._C_ops.hardsigmoid( + add_32, float("0.166667"), float("0.5") + ) + del add_32 + + # pd_op.multiply: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32, 2x384x1x1xf32) + multiply_2 = paddle._C_ops.multiply(concat_2, hardsigmoid_2) + + # pd_op.conv2d: (2x512x-1x-1xf32) <- (2x384x-1x-1xf32, 512x384x1x1xf32) + conv2d_62 = paddle._C_ops.conv2d( + multiply_2, parameter_396, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_396 + + # pd_op.batch_norm_: (2x512x-1x-1xf32, 512xf32, 512xf32, 512xf32, 512xf32, -1xui8) <- (2x512x-1x-1xf32, 512xf32, 512xf32, 512xf32, 512xf32) + ( + batch_norm__354, + batch_norm__355, + batch_norm__356, + batch_norm__357, + batch_norm__358, + batch_norm__359, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_62, + parameter_395, + parameter_394, + parameter_393, + parameter_392, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_392, parameter_393, parameter_394, parameter_395 + + # pd_op.swish: (2x512x-1x-1xf32) <- (2x512x-1x-1xf32) + swish_45 = paddle._C_ops.swish(batch_norm__354) + + # pd_op.conv2d: (2x768x-1x-1xf32) <- (2x512x-1x-1xf32, 768x512x3x3xf32) + conv2d_63 = paddle._C_ops.conv2d( + swish_45, parameter_391, [2, 2], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_391 + + # pd_op.batch_norm_: (2x768x-1x-1xf32, 768xf32, 768xf32, 768xf32, 768xf32, -1xui8) <- (2x768x-1x-1xf32, 768xf32, 768xf32, 768xf32, 768xf32) + ( + batch_norm__360, + batch_norm__361, + batch_norm__362, + batch_norm__363, + batch_norm__364, + batch_norm__365, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_63, + parameter_390, + parameter_389, + parameter_388, + parameter_387, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_387, parameter_388, parameter_389, parameter_390 + + # pd_op.swish: (2x768x-1x-1xf32) <- (2x768x-1x-1xf32) + swish_46 = paddle._C_ops.swish(batch_norm__360) + + # pd_op.conv2d: (2x384x-1x-1xf32) <- (2x768x-1x-1xf32, 384x768x1x1xf32) + conv2d_64 = paddle._C_ops.conv2d( + swish_46, parameter_386, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_386 + + # pd_op.batch_norm_: (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__366, + batch_norm__367, + batch_norm__368, + batch_norm__369, + batch_norm__370, + batch_norm__371, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_64, + parameter_385, + parameter_384, + parameter_383, + parameter_382, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_382, parameter_383, parameter_384, parameter_385 + + # pd_op.swish: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32) + swish_47 = paddle._C_ops.swish(batch_norm__366) + + # pd_op.conv2d: (2x384x-1x-1xf32) <- (2x768x-1x-1xf32, 384x768x1x1xf32) + conv2d_65 = paddle._C_ops.conv2d( + swish_46, parameter_381, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_381 + + # pd_op.batch_norm_: (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__372, + batch_norm__373, + batch_norm__374, + batch_norm__375, + batch_norm__376, + batch_norm__377, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_65, + parameter_380, + parameter_379, + parameter_378, + parameter_377, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_377, parameter_378, parameter_379, parameter_380 + + # pd_op.swish: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32) + swish_48 = paddle._C_ops.swish(batch_norm__372) + + # pd_op.conv2d: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32, 384x384x3x3xf32) + conv2d_66 = paddle._C_ops.conv2d( + swish_48, parameter_376, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_376 + + # pd_op.batch_norm_: (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__378, + batch_norm__379, + batch_norm__380, + batch_norm__381, + batch_norm__382, + batch_norm__383, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_66, + parameter_375, + parameter_374, + parameter_373, + parameter_372, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_372, parameter_373, parameter_374, parameter_375 + + # pd_op.swish: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32) + swish_49 = paddle._C_ops.swish(batch_norm__378) + + # pd_op.conv2d: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32, 384x384x3x3xf32) + conv2d_67 = paddle._C_ops.conv2d( + swish_49, parameter_371, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_371 + + # pd_op.batch_norm_: (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__384, + batch_norm__385, + batch_norm__386, + batch_norm__387, + batch_norm__388, + batch_norm__389, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_67, + parameter_370, + parameter_369, + parameter_368, + parameter_367, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_367, parameter_368, parameter_369, parameter_370 + + # pd_op.conv2d: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32, 384x384x1x1xf32) + conv2d_68 = paddle._C_ops.conv2d( + swish_49, parameter_366, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_366 + + # pd_op.batch_norm_: (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__390, + batch_norm__391, + batch_norm__392, + batch_norm__393, + batch_norm__394, + batch_norm__395, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_68, + parameter_365, + parameter_364, + parameter_363, + parameter_362, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_362, parameter_363, parameter_364, parameter_365 + + # pd_op.add: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32, 2x384x-1x-1xf32) + add_33 = paddle._C_ops.add(batch_norm__384, batch_norm__390) + + # pd_op.swish: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32) + swish_50 = paddle._C_ops.swish(add_33) + + # pd_op.add: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32, 2x384x-1x-1xf32) + add_34 = paddle._C_ops.add(swish_48, swish_50) + + # pd_op.conv2d: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32, 384x384x3x3xf32) + conv2d_69 = paddle._C_ops.conv2d( + add_34, parameter_361, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_361 + + # pd_op.batch_norm_: (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__396, + batch_norm__397, + batch_norm__398, + batch_norm__399, + batch_norm__400, + batch_norm__401, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_69, + parameter_360, + parameter_359, + parameter_358, + parameter_357, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_357, parameter_358, parameter_359, parameter_360 + + # pd_op.swish: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32) + swish_51 = paddle._C_ops.swish(batch_norm__396) + + # pd_op.conv2d: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32, 384x384x3x3xf32) + conv2d_70 = paddle._C_ops.conv2d( + swish_51, parameter_356, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_356 + + # pd_op.batch_norm_: (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__402, + batch_norm__403, + batch_norm__404, + batch_norm__405, + batch_norm__406, + batch_norm__407, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_70, + parameter_355, + parameter_354, + parameter_353, + parameter_352, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_352, parameter_353, parameter_354, parameter_355 + + # pd_op.conv2d: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32, 384x384x1x1xf32) + conv2d_71 = paddle._C_ops.conv2d( + swish_51, parameter_351, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_351 + + # pd_op.batch_norm_: (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__408, + batch_norm__409, + batch_norm__410, + batch_norm__411, + batch_norm__412, + batch_norm__413, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_71, + parameter_350, + parameter_349, + parameter_348, + parameter_347, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_347, parameter_348, parameter_349, parameter_350 + + # pd_op.add: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32, 2x384x-1x-1xf32) + add_35 = paddle._C_ops.add(batch_norm__402, batch_norm__408) + + # pd_op.swish: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32) + swish_52 = paddle._C_ops.swish(add_35) + + # pd_op.add: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32, 2x384x-1x-1xf32) + add_36 = paddle._C_ops.add(add_34, swish_52) + + # pd_op.conv2d: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32, 384x384x3x3xf32) + conv2d_72 = paddle._C_ops.conv2d( + add_36, parameter_346, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_346 + + # pd_op.batch_norm_: (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__414, + batch_norm__415, + batch_norm__416, + batch_norm__417, + batch_norm__418, + batch_norm__419, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_72, + parameter_345, + parameter_344, + parameter_343, + parameter_342, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_342, parameter_343, parameter_344, parameter_345 + + # pd_op.swish: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32) + swish_53 = paddle._C_ops.swish(batch_norm__414) + + # pd_op.conv2d: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32, 384x384x3x3xf32) + conv2d_73 = paddle._C_ops.conv2d( + swish_53, parameter_341, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_341 + + # pd_op.batch_norm_: (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__420, + batch_norm__421, + batch_norm__422, + batch_norm__423, + batch_norm__424, + batch_norm__425, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_73, + parameter_340, + parameter_339, + parameter_338, + parameter_337, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_337, parameter_338, parameter_339, parameter_340 + + # pd_op.conv2d: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32, 384x384x1x1xf32) + conv2d_74 = paddle._C_ops.conv2d( + swish_53, parameter_336, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_336 + + # pd_op.batch_norm_: (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__426, + batch_norm__427, + batch_norm__428, + batch_norm__429, + batch_norm__430, + batch_norm__431, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_74, + parameter_335, + parameter_334, + parameter_333, + parameter_332, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_332, parameter_333, parameter_334, parameter_335 + + # pd_op.add: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32, 2x384x-1x-1xf32) + add_37 = paddle._C_ops.add(batch_norm__420, batch_norm__426) + + # pd_op.swish: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32) + swish_54 = paddle._C_ops.swish(add_37) + + # pd_op.add: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32, 2x384x-1x-1xf32) + add_38 = paddle._C_ops.add(add_36, swish_54) + + # builtin.combine: ([2x384x-1x-1xf32, 2x384x-1x-1xf32]) <- (2x384x-1x-1xf32, 2x384x-1x-1xf32) + combine_3 = [swish_47, add_38] + + # pd_op.concat: (2x768x-1x-1xf32) <- ([2x384x-1x-1xf32, 2x384x-1x-1xf32], 1xi32) + concat_3 = paddle._C_ops.concat(combine_3, full_0) + del combine_3 + + # pd_op.mean: (2x768x1x1xf32) <- (2x768x-1x-1xf32, 2xi64) + mean_3 = paddle._C_ops.mean(concat_3, full_int_array_0, True) + + # pd_op.conv2d: (2x768x1x1xf32) <- (2x768x1x1xf32, 768x768x1x1xf32) + conv2d_75 = paddle._C_ops.conv2d( + mean_3, parameter_331, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_331 + + # pd_op.reshape: (1x768x1x1xf32) <- (768xf32, 4xi64) + reshape_3 = paddle._C_ops.reshape(parameter_330, full_int_array_1) + del full_int_array_1, parameter_330 + + # pd_op.add: (2x768x1x1xf32) <- (2x768x1x1xf32, 1x768x1x1xf32) + add_39 = paddle._C_ops.add(conv2d_75, reshape_3) + + # pd_op.hardsigmoid: (2x768x1x1xf32) <- (2x768x1x1xf32) + hardsigmoid_3 = paddle._C_ops.hardsigmoid( + add_39, float("0.166667"), float("0.5") + ) + del add_39 + + # pd_op.multiply: (2x768x-1x-1xf32) <- (2x768x-1x-1xf32, 2x768x1x1xf32) + multiply_3 = paddle._C_ops.multiply(concat_3, hardsigmoid_3) + + # pd_op.conv2d: (2x1024x-1x-1xf32) <- (2x768x-1x-1xf32, 1024x768x1x1xf32) + conv2d_76 = paddle._C_ops.conv2d( + multiply_3, parameter_329, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_329 + + # pd_op.batch_norm_: (2x1024x-1x-1xf32, 1024xf32, 1024xf32, 1024xf32, 1024xf32, -1xui8) <- (2x1024x-1x-1xf32, 1024xf32, 1024xf32, 1024xf32, 1024xf32) + ( + batch_norm__432, + batch_norm__433, + batch_norm__434, + batch_norm__435, + batch_norm__436, + batch_norm__437, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_76, + parameter_328, + parameter_327, + parameter_326, + parameter_325, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_325, parameter_326, parameter_327, parameter_328 + + # pd_op.swish: (2x1024x-1x-1xf32) <- (2x1024x-1x-1xf32) + swish_55 = paddle._C_ops.swish(batch_norm__432) + + # pd_op.conv2d: (2x384x-1x-1xf32) <- (2x1024x-1x-1xf32, 384x1024x1x1xf32) + conv2d_77 = paddle._C_ops.conv2d( + swish_55, parameter_324, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_324 + + # pd_op.batch_norm_: (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__438, + batch_norm__439, + batch_norm__440, + batch_norm__441, + batch_norm__442, + batch_norm__443, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_77, + parameter_323, + parameter_322, + parameter_321, + parameter_320, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_320, parameter_321, parameter_322, parameter_323 + + # pd_op.swish: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32) + swish_56 = paddle._C_ops.swish(batch_norm__438) + + # pd_op.conv2d: (2x384x-1x-1xf32) <- (2x1024x-1x-1xf32, 384x1024x1x1xf32) + conv2d_78 = paddle._C_ops.conv2d( + swish_55, parameter_319, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_319 + + # pd_op.batch_norm_: (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__444, + batch_norm__445, + batch_norm__446, + batch_norm__447, + batch_norm__448, + batch_norm__449, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_78, + parameter_318, + parameter_317, + parameter_316, + parameter_315, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_315, parameter_316, parameter_317, parameter_318 + + # pd_op.swish: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32) + swish_57 = paddle._C_ops.swish(batch_norm__444) + + # pd_op.conv2d: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32, 384x384x3x3xf32) + conv2d_79 = paddle._C_ops.conv2d( + swish_57, parameter_314, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_314 + + # pd_op.batch_norm_: (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__450, + batch_norm__451, + batch_norm__452, + batch_norm__453, + batch_norm__454, + batch_norm__455, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_79, + parameter_313, + parameter_312, + parameter_311, + parameter_310, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_310, parameter_311, parameter_312, parameter_313 + + # pd_op.swish: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32) + swish_58 = paddle._C_ops.swish(batch_norm__450) + + # pd_op.conv2d: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32, 384x384x3x3xf32) + conv2d_80 = paddle._C_ops.conv2d( + swish_58, parameter_309, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_309 + + # pd_op.batch_norm_: (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__456, + batch_norm__457, + batch_norm__458, + batch_norm__459, + batch_norm__460, + batch_norm__461, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_80, + parameter_308, + parameter_307, + parameter_306, + parameter_305, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_305, parameter_306, parameter_307, parameter_308 + + # pd_op.conv2d: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32, 384x384x1x1xf32) + conv2d_81 = paddle._C_ops.conv2d( + swish_58, parameter_304, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_304 + + # pd_op.batch_norm_: (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__462, + batch_norm__463, + batch_norm__464, + batch_norm__465, + batch_norm__466, + batch_norm__467, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_81, + parameter_303, + parameter_302, + parameter_301, + parameter_300, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_300, parameter_301, parameter_302, parameter_303 + + # pd_op.add: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32, 2x384x-1x-1xf32) + add_40 = paddle._C_ops.add(batch_norm__456, batch_norm__462) + + # pd_op.swish: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32) + swish_59 = paddle._C_ops.swish(add_40) + + # pd_op.conv2d: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32, 384x384x3x3xf32) + conv2d_82 = paddle._C_ops.conv2d( + swish_59, parameter_299, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_299 + + # pd_op.batch_norm_: (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__468, + batch_norm__469, + batch_norm__470, + batch_norm__471, + batch_norm__472, + batch_norm__473, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_82, + parameter_298, + parameter_297, + parameter_296, + parameter_295, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_295, parameter_296, parameter_297, parameter_298 + + # pd_op.swish: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32) + swish_60 = paddle._C_ops.swish(batch_norm__468) + + # pd_op.conv2d: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32, 384x384x3x3xf32) + conv2d_83 = paddle._C_ops.conv2d( + swish_60, parameter_294, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_294 + + # pd_op.batch_norm_: (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__474, + batch_norm__475, + batch_norm__476, + batch_norm__477, + batch_norm__478, + batch_norm__479, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_83, + parameter_293, + parameter_292, + parameter_291, + parameter_290, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_290, parameter_291, parameter_292, parameter_293 + + # pd_op.conv2d: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32, 384x384x1x1xf32) + conv2d_84 = paddle._C_ops.conv2d( + swish_60, parameter_289, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_289 + + # pd_op.batch_norm_: (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__480, + batch_norm__481, + batch_norm__482, + batch_norm__483, + batch_norm__484, + batch_norm__485, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_84, + parameter_288, + parameter_287, + parameter_286, + parameter_285, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_285, parameter_286, parameter_287, parameter_288 + + # pd_op.add: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32, 2x384x-1x-1xf32) + add_41 = paddle._C_ops.add(batch_norm__474, batch_norm__480) + + # pd_op.swish: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32) + swish_61 = paddle._C_ops.swish(add_41) + + # pd_op.full_int_array: (2xi64) <- () + full_int_array_2 = [5, 5] + + # pd_op.pool2d: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32, 2xi64) + pool2d_0 = paddle._C_ops.pool2d( + swish_61, + full_int_array_2, + [1, 1], + [2, 2], + False, + True, + "NCHW", + "max", + False, + False, + "EXPLICIT", + ) + + # pd_op.full_int_array: (2xi64) <- () + full_int_array_3 = [9, 9] + + # pd_op.pool2d: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32, 2xi64) + pool2d_1 = paddle._C_ops.pool2d( + swish_61, + full_int_array_3, + [1, 1], + [4, 4], + False, + True, + "NCHW", + "max", + False, + False, + "EXPLICIT", + ) + + # pd_op.full_int_array: (2xi64) <- () + full_int_array_4 = [13, 13] + + # pd_op.pool2d: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32, 2xi64) + pool2d_2 = paddle._C_ops.pool2d( + swish_61, + full_int_array_4, + [1, 1], + [6, 6], + False, + True, + "NCHW", + "max", + False, + False, + "EXPLICIT", + ) + + # builtin.combine: ([2x384x-1x-1xf32, 2x384x-1x-1xf32, 2x384x-1x-1xf32, 2x384x-1x-1xf32]) <- (2x384x-1x-1xf32, 2x384x-1x-1xf32, 2x384x-1x-1xf32, 2x384x-1x-1xf32) + combine_4 = [swish_61, pool2d_0, pool2d_1, pool2d_2] + + # pd_op.concat: (2x1536x-1x-1xf32) <- ([2x384x-1x-1xf32, 2x384x-1x-1xf32, 2x384x-1x-1xf32, 2x384x-1x-1xf32], 1xi32) + concat_4 = paddle._C_ops.concat(combine_4, full_0) + del combine_4 + + # pd_op.conv2d: (2x384x-1x-1xf32) <- (2x1536x-1x-1xf32, 384x1536x1x1xf32) + conv2d_85 = paddle._C_ops.conv2d( + concat_4, parameter_284, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_284 + + # pd_op.batch_norm_: (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__486, + batch_norm__487, + batch_norm__488, + batch_norm__489, + batch_norm__490, + batch_norm__491, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_85, + parameter_283, + parameter_282, + parameter_281, + parameter_280, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_280, parameter_281, parameter_282, parameter_283 + + # pd_op.swish: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32) + swish_62 = paddle._C_ops.swish(batch_norm__486) + + # pd_op.conv2d: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32, 384x384x3x3xf32) + conv2d_86 = paddle._C_ops.conv2d( + swish_62, parameter_279, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_279 + + # pd_op.batch_norm_: (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__492, + batch_norm__493, + batch_norm__494, + batch_norm__495, + batch_norm__496, + batch_norm__497, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_86, + parameter_278, + parameter_277, + parameter_276, + parameter_275, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_275, parameter_276, parameter_277, parameter_278 + + # pd_op.swish: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32) + swish_63 = paddle._C_ops.swish(batch_norm__492) + + # pd_op.conv2d: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32, 384x384x3x3xf32) + conv2d_87 = paddle._C_ops.conv2d( + swish_63, parameter_274, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_274 + + # pd_op.batch_norm_: (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__498, + batch_norm__499, + batch_norm__500, + batch_norm__501, + batch_norm__502, + batch_norm__503, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_87, + parameter_273, + parameter_272, + parameter_271, + parameter_270, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_270, parameter_271, parameter_272, parameter_273 + + # pd_op.conv2d: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32, 384x384x1x1xf32) + conv2d_88 = paddle._C_ops.conv2d( + swish_63, parameter_269, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_269 + + # pd_op.batch_norm_: (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__504, + batch_norm__505, + batch_norm__506, + batch_norm__507, + batch_norm__508, + batch_norm__509, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_88, + parameter_268, + parameter_267, + parameter_266, + parameter_265, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_265, parameter_266, parameter_267, parameter_268 + + # pd_op.add: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32, 2x384x-1x-1xf32) + add_42 = paddle._C_ops.add(batch_norm__498, batch_norm__504) + + # pd_op.swish: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32) + swish_64 = paddle._C_ops.swish(add_42) + + # builtin.combine: ([2x384x-1x-1xf32, 2x384x-1x-1xf32]) <- (2x384x-1x-1xf32, 2x384x-1x-1xf32) + combine_5 = [swish_56, swish_64] + + # pd_op.concat: (2x768x-1x-1xf32) <- ([2x384x-1x-1xf32, 2x384x-1x-1xf32], 1xi32) + concat_5 = paddle._C_ops.concat(combine_5, full_0) + del combine_5 + + # pd_op.conv2d: (2x768x-1x-1xf32) <- (2x768x-1x-1xf32, 768x768x1x1xf32) + conv2d_89 = paddle._C_ops.conv2d( + concat_5, parameter_264, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_264 + + # pd_op.batch_norm_: (2x768x-1x-1xf32, 768xf32, 768xf32, 768xf32, 768xf32, -1xui8) <- (2x768x-1x-1xf32, 768xf32, 768xf32, 768xf32, 768xf32) + ( + batch_norm__510, + batch_norm__511, + batch_norm__512, + batch_norm__513, + batch_norm__514, + batch_norm__515, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_89, + parameter_263, + parameter_262, + parameter_261, + parameter_260, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_260, parameter_261, parameter_262, parameter_263 + + # pd_op.swish: (2x768x-1x-1xf32) <- (2x768x-1x-1xf32) + swish_65 = paddle._C_ops.swish(batch_norm__510) + + # pd_op.conv2d: (2x384x-1x-1xf32) <- (2x768x-1x-1xf32, 384x768x1x1xf32) + conv2d_90 = paddle._C_ops.conv2d( + swish_65, parameter_259, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_259 + + # pd_op.batch_norm_: (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__516, + batch_norm__517, + batch_norm__518, + batch_norm__519, + batch_norm__520, + batch_norm__521, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_90, + parameter_258, + parameter_257, + parameter_256, + parameter_255, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_255, parameter_256, parameter_257, parameter_258 + + # pd_op.swish: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32) + swish_66 = paddle._C_ops.swish(batch_norm__516) + + # pd_op.nearest_interp: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32, None, None, None) + nearest_interp_0 = paddle._C_ops.nearest_interp( + swish_66, + None, + None, + None, + "NCHW", + -1, + -1, + -1, + [float("2"), float("2")], + "nearest", + False, + 0, + ) + + # builtin.combine: ([2x384x-1x-1xf32, 2x512x-1x-1xf32]) <- (2x384x-1x-1xf32, 2x512x-1x-1xf32) + combine_6 = [nearest_interp_0, swish_45] + + # pd_op.concat: (2x896x-1x-1xf32) <- ([2x384x-1x-1xf32, 2x512x-1x-1xf32], 1xi32) + concat_6 = paddle._C_ops.concat(combine_6, full_0) + del combine_6 + + # pd_op.conv2d: (2x192x-1x-1xf32) <- (2x896x-1x-1xf32, 192x896x1x1xf32) + conv2d_91 = paddle._C_ops.conv2d( + concat_6, parameter_254, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_254 + + # pd_op.batch_norm_: (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__522, + batch_norm__523, + batch_norm__524, + batch_norm__525, + batch_norm__526, + batch_norm__527, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_91, + parameter_253, + parameter_252, + parameter_251, + parameter_250, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_250, parameter_251, parameter_252, parameter_253 + + # pd_op.swish: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32) + swish_67 = paddle._C_ops.swish(batch_norm__522) + + # pd_op.conv2d: (2x192x-1x-1xf32) <- (2x896x-1x-1xf32, 192x896x1x1xf32) + conv2d_92 = paddle._C_ops.conv2d( + concat_6, parameter_249, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_249 + + # pd_op.batch_norm_: (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__528, + batch_norm__529, + batch_norm__530, + batch_norm__531, + batch_norm__532, + batch_norm__533, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_92, + parameter_248, + parameter_247, + parameter_246, + parameter_245, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_245, parameter_246, parameter_247, parameter_248 + + # pd_op.swish: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32) + swish_68 = paddle._C_ops.swish(batch_norm__528) + + # pd_op.conv2d: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 192x192x3x3xf32) + conv2d_93 = paddle._C_ops.conv2d( + swish_68, parameter_244, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_244 + + # pd_op.batch_norm_: (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__534, + batch_norm__535, + batch_norm__536, + batch_norm__537, + batch_norm__538, + batch_norm__539, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_93, + parameter_243, + parameter_242, + parameter_241, + parameter_240, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_240, parameter_241, parameter_242, parameter_243 + + # pd_op.swish: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32) + swish_69 = paddle._C_ops.swish(batch_norm__534) + + # pd_op.conv2d: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 192x192x3x3xf32) + conv2d_94 = paddle._C_ops.conv2d( + swish_69, parameter_239, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_239 + + # pd_op.batch_norm_: (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__540, + batch_norm__541, + batch_norm__542, + batch_norm__543, + batch_norm__544, + batch_norm__545, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_94, + parameter_238, + parameter_237, + parameter_236, + parameter_235, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_235, parameter_236, parameter_237, parameter_238 + + # pd_op.conv2d: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 192x192x1x1xf32) + conv2d_95 = paddle._C_ops.conv2d( + swish_69, parameter_234, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_234 + + # pd_op.batch_norm_: (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__546, + batch_norm__547, + batch_norm__548, + batch_norm__549, + batch_norm__550, + batch_norm__551, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_95, + parameter_233, + parameter_232, + parameter_231, + parameter_230, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_230, parameter_231, parameter_232, parameter_233 + + # pd_op.add: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 2x192x-1x-1xf32) + add_43 = paddle._C_ops.add(batch_norm__540, batch_norm__546) + + # pd_op.swish: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32) + swish_70 = paddle._C_ops.swish(add_43) + + # pd_op.conv2d: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 192x192x3x3xf32) + conv2d_96 = paddle._C_ops.conv2d( + swish_70, parameter_229, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_229 + + # pd_op.batch_norm_: (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__552, + batch_norm__553, + batch_norm__554, + batch_norm__555, + batch_norm__556, + batch_norm__557, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_96, + parameter_228, + parameter_227, + parameter_226, + parameter_225, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_225, parameter_226, parameter_227, parameter_228 + + # pd_op.swish: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32) + swish_71 = paddle._C_ops.swish(batch_norm__552) + + # pd_op.conv2d: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 192x192x3x3xf32) + conv2d_97 = paddle._C_ops.conv2d( + swish_71, parameter_224, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_224 + + # pd_op.batch_norm_: (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__558, + batch_norm__559, + batch_norm__560, + batch_norm__561, + batch_norm__562, + batch_norm__563, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_97, + parameter_223, + parameter_222, + parameter_221, + parameter_220, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_220, parameter_221, parameter_222, parameter_223 + + # pd_op.conv2d: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 192x192x1x1xf32) + conv2d_98 = paddle._C_ops.conv2d( + swish_71, parameter_219, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_219 + + # pd_op.batch_norm_: (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__564, + batch_norm__565, + batch_norm__566, + batch_norm__567, + batch_norm__568, + batch_norm__569, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_98, + parameter_218, + parameter_217, + parameter_216, + parameter_215, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_215, parameter_216, parameter_217, parameter_218 + + # pd_op.add: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 2x192x-1x-1xf32) + add_44 = paddle._C_ops.add(batch_norm__558, batch_norm__564) + + # pd_op.swish: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32) + swish_72 = paddle._C_ops.swish(add_44) + + # pd_op.conv2d: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 192x192x3x3xf32) + conv2d_99 = paddle._C_ops.conv2d( + swish_72, parameter_214, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_214 + + # pd_op.batch_norm_: (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__570, + batch_norm__571, + batch_norm__572, + batch_norm__573, + batch_norm__574, + batch_norm__575, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_99, + parameter_213, + parameter_212, + parameter_211, + parameter_210, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_210, parameter_211, parameter_212, parameter_213 + + # pd_op.swish: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32) + swish_73 = paddle._C_ops.swish(batch_norm__570) + + # pd_op.conv2d: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 192x192x3x3xf32) + conv2d_100 = paddle._C_ops.conv2d( + swish_73, parameter_209, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_209 + + # pd_op.batch_norm_: (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__576, + batch_norm__577, + batch_norm__578, + batch_norm__579, + batch_norm__580, + batch_norm__581, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_100, + parameter_208, + parameter_207, + parameter_206, + parameter_205, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_205, parameter_206, parameter_207, parameter_208 + + # pd_op.conv2d: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 192x192x1x1xf32) + conv2d_101 = paddle._C_ops.conv2d( + swish_73, parameter_204, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_204 + + # pd_op.batch_norm_: (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__582, + batch_norm__583, + batch_norm__584, + batch_norm__585, + batch_norm__586, + batch_norm__587, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_101, + parameter_203, + parameter_202, + parameter_201, + parameter_200, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_200, parameter_201, parameter_202, parameter_203 + + # pd_op.add: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 2x192x-1x-1xf32) + add_45 = paddle._C_ops.add(batch_norm__576, batch_norm__582) + + # pd_op.swish: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32) + swish_74 = paddle._C_ops.swish(add_45) + + # builtin.combine: ([2x192x-1x-1xf32, 2x192x-1x-1xf32]) <- (2x192x-1x-1xf32, 2x192x-1x-1xf32) + combine_7 = [swish_67, swish_74] + + # pd_op.concat: (2x384x-1x-1xf32) <- ([2x192x-1x-1xf32, 2x192x-1x-1xf32], 1xi32) + concat_7 = paddle._C_ops.concat(combine_7, full_0) + del combine_7 + + # pd_op.conv2d: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32, 384x384x1x1xf32) + conv2d_102 = paddle._C_ops.conv2d( + concat_7, parameter_199, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_199 + + # pd_op.batch_norm_: (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__588, + batch_norm__589, + batch_norm__590, + batch_norm__591, + batch_norm__592, + batch_norm__593, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_102, + parameter_198, + parameter_197, + parameter_196, + parameter_195, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_195, parameter_196, parameter_197, parameter_198 + + # pd_op.swish: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32) + swish_75 = paddle._C_ops.swish(batch_norm__588) + + # pd_op.conv2d: (2x192x-1x-1xf32) <- (2x384x-1x-1xf32, 192x384x1x1xf32) + conv2d_103 = paddle._C_ops.conv2d( + swish_75, parameter_194, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_194 + + # pd_op.batch_norm_: (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__594, + batch_norm__595, + batch_norm__596, + batch_norm__597, + batch_norm__598, + batch_norm__599, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_103, + parameter_193, + parameter_192, + parameter_191, + parameter_190, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_190, parameter_191, parameter_192, parameter_193 + + # pd_op.swish: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32) + swish_76 = paddle._C_ops.swish(batch_norm__594) + + # pd_op.nearest_interp: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, None, None, None) + nearest_interp_1 = paddle._C_ops.nearest_interp( + swish_76, + None, + None, + None, + "NCHW", + -1, + -1, + -1, + [float("2"), float("2")], + "nearest", + False, + 0, + ) + + # builtin.combine: ([2x192x-1x-1xf32, 2x256x-1x-1xf32]) <- (2x192x-1x-1xf32, 2x256x-1x-1xf32) + combine_8 = [nearest_interp_1, swish_29] + + # pd_op.concat: (2x448x-1x-1xf32) <- ([2x192x-1x-1xf32, 2x256x-1x-1xf32], 1xi32) + concat_8 = paddle._C_ops.concat(combine_8, full_0) + del combine_8 + + # pd_op.conv2d: (2x96x-1x-1xf32) <- (2x448x-1x-1xf32, 96x448x1x1xf32) + conv2d_104 = paddle._C_ops.conv2d( + concat_8, parameter_189, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_189 + + # pd_op.batch_norm_: (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__600, + batch_norm__601, + batch_norm__602, + batch_norm__603, + batch_norm__604, + batch_norm__605, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_104, + parameter_188, + parameter_187, + parameter_186, + parameter_185, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_185, parameter_186, parameter_187, parameter_188 + + # pd_op.swish: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32) + swish_77 = paddle._C_ops.swish(batch_norm__600) + + # pd_op.conv2d: (2x96x-1x-1xf32) <- (2x448x-1x-1xf32, 96x448x1x1xf32) + conv2d_105 = paddle._C_ops.conv2d( + concat_8, parameter_184, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_184 + + # pd_op.batch_norm_: (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__606, + batch_norm__607, + batch_norm__608, + batch_norm__609, + batch_norm__610, + batch_norm__611, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_105, + parameter_183, + parameter_182, + parameter_181, + parameter_180, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_180, parameter_181, parameter_182, parameter_183 + + # pd_op.swish: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32) + swish_78 = paddle._C_ops.swish(batch_norm__606) + + # pd_op.conv2d: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, 96x96x3x3xf32) + conv2d_106 = paddle._C_ops.conv2d( + swish_78, parameter_179, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_179 + + # pd_op.batch_norm_: (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__612, + batch_norm__613, + batch_norm__614, + batch_norm__615, + batch_norm__616, + batch_norm__617, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_106, + parameter_178, + parameter_177, + parameter_176, + parameter_175, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_175, parameter_176, parameter_177, parameter_178 + + # pd_op.swish: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32) + swish_79 = paddle._C_ops.swish(batch_norm__612) + + # pd_op.conv2d: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, 96x96x3x3xf32) + conv2d_107 = paddle._C_ops.conv2d( + swish_79, parameter_174, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_174 + + # pd_op.batch_norm_: (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__618, + batch_norm__619, + batch_norm__620, + batch_norm__621, + batch_norm__622, + batch_norm__623, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_107, + parameter_173, + parameter_172, + parameter_171, + parameter_170, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_170, parameter_171, parameter_172, parameter_173 + + # pd_op.conv2d: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, 96x96x1x1xf32) + conv2d_108 = paddle._C_ops.conv2d( + swish_79, parameter_169, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_169 + + # pd_op.batch_norm_: (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__624, + batch_norm__625, + batch_norm__626, + batch_norm__627, + batch_norm__628, + batch_norm__629, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_108, + parameter_168, + parameter_167, + parameter_166, + parameter_165, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_165, parameter_166, parameter_167, parameter_168 + + # pd_op.add: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, 2x96x-1x-1xf32) + add_46 = paddle._C_ops.add(batch_norm__618, batch_norm__624) + + # pd_op.swish: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32) + swish_80 = paddle._C_ops.swish(add_46) + + # pd_op.conv2d: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, 96x96x3x3xf32) + conv2d_109 = paddle._C_ops.conv2d( + swish_80, parameter_164, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_164 + + # pd_op.batch_norm_: (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__630, + batch_norm__631, + batch_norm__632, + batch_norm__633, + batch_norm__634, + batch_norm__635, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_109, + parameter_163, + parameter_162, + parameter_161, + parameter_160, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_160, parameter_161, parameter_162, parameter_163 + + # pd_op.swish: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32) + swish_81 = paddle._C_ops.swish(batch_norm__630) + + # pd_op.conv2d: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, 96x96x3x3xf32) + conv2d_110 = paddle._C_ops.conv2d( + swish_81, parameter_159, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_159 + + # pd_op.batch_norm_: (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__636, + batch_norm__637, + batch_norm__638, + batch_norm__639, + batch_norm__640, + batch_norm__641, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_110, + parameter_158, + parameter_157, + parameter_156, + parameter_155, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_155, parameter_156, parameter_157, parameter_158 + + # pd_op.conv2d: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, 96x96x1x1xf32) + conv2d_111 = paddle._C_ops.conv2d( + swish_81, parameter_154, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_154 + + # pd_op.batch_norm_: (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__642, + batch_norm__643, + batch_norm__644, + batch_norm__645, + batch_norm__646, + batch_norm__647, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_111, + parameter_153, + parameter_152, + parameter_151, + parameter_150, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_150, parameter_151, parameter_152, parameter_153 + + # pd_op.add: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, 2x96x-1x-1xf32) + add_47 = paddle._C_ops.add(batch_norm__636, batch_norm__642) + + # pd_op.swish: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32) + swish_82 = paddle._C_ops.swish(add_47) + + # pd_op.conv2d: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, 96x96x3x3xf32) + conv2d_112 = paddle._C_ops.conv2d( + swish_82, parameter_149, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_149 + + # pd_op.batch_norm_: (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__648, + batch_norm__649, + batch_norm__650, + batch_norm__651, + batch_norm__652, + batch_norm__653, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_112, + parameter_148, + parameter_147, + parameter_146, + parameter_145, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_145, parameter_146, parameter_147, parameter_148 + + # pd_op.swish: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32) + swish_83 = paddle._C_ops.swish(batch_norm__648) + + # pd_op.conv2d: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, 96x96x3x3xf32) + conv2d_113 = paddle._C_ops.conv2d( + swish_83, parameter_144, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_144 + + # pd_op.batch_norm_: (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__654, + batch_norm__655, + batch_norm__656, + batch_norm__657, + batch_norm__658, + batch_norm__659, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_113, + parameter_143, + parameter_142, + parameter_141, + parameter_140, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_140, parameter_141, parameter_142, parameter_143 + + # pd_op.conv2d: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, 96x96x1x1xf32) + conv2d_114 = paddle._C_ops.conv2d( + swish_83, parameter_139, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_139 + + # pd_op.batch_norm_: (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__660, + batch_norm__661, + batch_norm__662, + batch_norm__663, + batch_norm__664, + batch_norm__665, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_114, + parameter_138, + parameter_137, + parameter_136, + parameter_135, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_135, parameter_136, parameter_137, parameter_138 + + # pd_op.add: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, 2x96x-1x-1xf32) + add_48 = paddle._C_ops.add(batch_norm__654, batch_norm__660) + + # pd_op.swish: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32) + swish_84 = paddle._C_ops.swish(add_48) + + # builtin.combine: ([2x96x-1x-1xf32, 2x96x-1x-1xf32]) <- (2x96x-1x-1xf32, 2x96x-1x-1xf32) + combine_9 = [swish_77, swish_84] + + # pd_op.concat: (2x192x-1x-1xf32) <- ([2x96x-1x-1xf32, 2x96x-1x-1xf32], 1xi32) + concat_9 = paddle._C_ops.concat(combine_9, full_0) + del combine_9 + + # pd_op.conv2d: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 192x192x1x1xf32) + conv2d_115 = paddle._C_ops.conv2d( + concat_9, parameter_134, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_134 + + # pd_op.batch_norm_: (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__666, + batch_norm__667, + batch_norm__668, + batch_norm__669, + batch_norm__670, + batch_norm__671, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_115, + parameter_133, + parameter_132, + parameter_131, + parameter_130, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_130, parameter_131, parameter_132, parameter_133 + + # pd_op.swish: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32) + swish_85 = paddle._C_ops.swish(batch_norm__666) + + # pd_op.conv2d: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 192x192x3x3xf32) + conv2d_116 = paddle._C_ops.conv2d( + swish_85, parameter_129, [2, 2], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_129 + + # pd_op.batch_norm_: (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__672, + batch_norm__673, + batch_norm__674, + batch_norm__675, + batch_norm__676, + batch_norm__677, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_116, + parameter_128, + parameter_127, + parameter_126, + parameter_125, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_125, parameter_126, parameter_127, parameter_128 + + # pd_op.swish: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32) + swish_86 = paddle._C_ops.swish(batch_norm__672) + + # builtin.combine: ([2x192x-1x-1xf32, 2x384x-1x-1xf32]) <- (2x192x-1x-1xf32, 2x384x-1x-1xf32) + combine_10 = [swish_86, swish_75] + + # pd_op.concat: (2x576x-1x-1xf32) <- ([2x192x-1x-1xf32, 2x384x-1x-1xf32], 1xi32) + concat_10 = paddle._C_ops.concat(combine_10, full_0) + del combine_10 + + # pd_op.conv2d: (2x192x-1x-1xf32) <- (2x576x-1x-1xf32, 192x576x1x1xf32) + conv2d_117 = paddle._C_ops.conv2d( + concat_10, parameter_124, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_124 + + # pd_op.batch_norm_: (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__678, + batch_norm__679, + batch_norm__680, + batch_norm__681, + batch_norm__682, + batch_norm__683, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_117, + parameter_123, + parameter_122, + parameter_121, + parameter_120, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_120, parameter_121, parameter_122, parameter_123 + + # pd_op.swish: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32) + swish_87 = paddle._C_ops.swish(batch_norm__678) + + # pd_op.conv2d: (2x192x-1x-1xf32) <- (2x576x-1x-1xf32, 192x576x1x1xf32) + conv2d_118 = paddle._C_ops.conv2d( + concat_10, parameter_119, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_119 + + # pd_op.batch_norm_: (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__684, + batch_norm__685, + batch_norm__686, + batch_norm__687, + batch_norm__688, + batch_norm__689, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_118, + parameter_118, + parameter_117, + parameter_116, + parameter_115, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_115, parameter_116, parameter_117, parameter_118 + + # pd_op.swish: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32) + swish_88 = paddle._C_ops.swish(batch_norm__684) + + # pd_op.conv2d: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 192x192x3x3xf32) + conv2d_119 = paddle._C_ops.conv2d( + swish_88, parameter_114, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_114 + + # pd_op.batch_norm_: (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__690, + batch_norm__691, + batch_norm__692, + batch_norm__693, + batch_norm__694, + batch_norm__695, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_119, + parameter_113, + parameter_112, + parameter_111, + parameter_110, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_110, parameter_111, parameter_112, parameter_113 + + # pd_op.swish: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32) + swish_89 = paddle._C_ops.swish(batch_norm__690) + + # pd_op.conv2d: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 192x192x3x3xf32) + conv2d_120 = paddle._C_ops.conv2d( + swish_89, parameter_109, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_109 + + # pd_op.batch_norm_: (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__696, + batch_norm__697, + batch_norm__698, + batch_norm__699, + batch_norm__700, + batch_norm__701, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_120, + parameter_108, + parameter_107, + parameter_106, + parameter_105, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_105, parameter_106, parameter_107, parameter_108 + + # pd_op.conv2d: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 192x192x1x1xf32) + conv2d_121 = paddle._C_ops.conv2d( + swish_89, parameter_104, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_104 + + # pd_op.batch_norm_: (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__702, + batch_norm__703, + batch_norm__704, + batch_norm__705, + batch_norm__706, + batch_norm__707, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_121, + parameter_103, + parameter_102, + parameter_101, + parameter_100, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_100, parameter_101, parameter_102, parameter_103 + + # pd_op.add: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 2x192x-1x-1xf32) + add_49 = paddle._C_ops.add(batch_norm__696, batch_norm__702) + + # pd_op.swish: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32) + swish_90 = paddle._C_ops.swish(add_49) + + # pd_op.conv2d: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 192x192x3x3xf32) + conv2d_122 = paddle._C_ops.conv2d( + swish_90, parameter_99, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_99 + + # pd_op.batch_norm_: (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__708, + batch_norm__709, + batch_norm__710, + batch_norm__711, + batch_norm__712, + batch_norm__713, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_122, + parameter_98, + parameter_97, + parameter_96, + parameter_95, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_95, parameter_96, parameter_97, parameter_98 + + # pd_op.swish: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32) + swish_91 = paddle._C_ops.swish(batch_norm__708) + + # pd_op.conv2d: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 192x192x3x3xf32) + conv2d_123 = paddle._C_ops.conv2d( + swish_91, parameter_94, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_94 + + # pd_op.batch_norm_: (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__714, + batch_norm__715, + batch_norm__716, + batch_norm__717, + batch_norm__718, + batch_norm__719, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_123, + parameter_93, + parameter_92, + parameter_91, + parameter_90, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_90, parameter_91, parameter_92, parameter_93 + + # pd_op.conv2d: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 192x192x1x1xf32) + conv2d_124 = paddle._C_ops.conv2d( + swish_91, parameter_89, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_89 + + # pd_op.batch_norm_: (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__720, + batch_norm__721, + batch_norm__722, + batch_norm__723, + batch_norm__724, + batch_norm__725, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_124, + parameter_88, + parameter_87, + parameter_86, + parameter_85, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_85, parameter_86, parameter_87, parameter_88 + + # pd_op.add: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 2x192x-1x-1xf32) + add_50 = paddle._C_ops.add(batch_norm__714, batch_norm__720) + + # pd_op.swish: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32) + swish_92 = paddle._C_ops.swish(add_50) + + # pd_op.conv2d: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 192x192x3x3xf32) + conv2d_125 = paddle._C_ops.conv2d( + swish_92, parameter_84, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_84 + + # pd_op.batch_norm_: (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__726, + batch_norm__727, + batch_norm__728, + batch_norm__729, + batch_norm__730, + batch_norm__731, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_125, + parameter_83, + parameter_82, + parameter_81, + parameter_80, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_80, parameter_81, parameter_82, parameter_83 + + # pd_op.swish: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32) + swish_93 = paddle._C_ops.swish(batch_norm__726) + + # pd_op.conv2d: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 192x192x3x3xf32) + conv2d_126 = paddle._C_ops.conv2d( + swish_93, parameter_79, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_79 + + # pd_op.batch_norm_: (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__732, + batch_norm__733, + batch_norm__734, + batch_norm__735, + batch_norm__736, + batch_norm__737, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_126, + parameter_78, + parameter_77, + parameter_76, + parameter_75, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_75, parameter_76, parameter_77, parameter_78 + + # pd_op.conv2d: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 192x192x1x1xf32) + conv2d_127 = paddle._C_ops.conv2d( + swish_93, parameter_74, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_74 + + # pd_op.batch_norm_: (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__738, + batch_norm__739, + batch_norm__740, + batch_norm__741, + batch_norm__742, + batch_norm__743, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_127, + parameter_73, + parameter_72, + parameter_71, + parameter_70, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_70, parameter_71, parameter_72, parameter_73 + + # pd_op.add: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 2x192x-1x-1xf32) + add_51 = paddle._C_ops.add(batch_norm__732, batch_norm__738) + + # pd_op.swish: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32) + swish_94 = paddle._C_ops.swish(add_51) + + # builtin.combine: ([2x192x-1x-1xf32, 2x192x-1x-1xf32]) <- (2x192x-1x-1xf32, 2x192x-1x-1xf32) + combine_11 = [swish_87, swish_94] + + # pd_op.concat: (2x384x-1x-1xf32) <- ([2x192x-1x-1xf32, 2x192x-1x-1xf32], 1xi32) + concat_11 = paddle._C_ops.concat(combine_11, full_0) + del combine_11 + + # pd_op.conv2d: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32, 384x384x1x1xf32) + conv2d_128 = paddle._C_ops.conv2d( + concat_11, parameter_69, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_69 + + # pd_op.batch_norm_: (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__744, + batch_norm__745, + batch_norm__746, + batch_norm__747, + batch_norm__748, + batch_norm__749, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_128, + parameter_68, + parameter_67, + parameter_66, + parameter_65, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_65, parameter_66, parameter_67, parameter_68 + + # pd_op.swish: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32) + swish_95 = paddle._C_ops.swish(batch_norm__744) + + # pd_op.conv2d: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32, 384x384x3x3xf32) + conv2d_129 = paddle._C_ops.conv2d( + swish_95, parameter_64, [2, 2], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_64 + + # pd_op.batch_norm_: (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__750, + batch_norm__751, + batch_norm__752, + batch_norm__753, + batch_norm__754, + batch_norm__755, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_129, + parameter_63, + parameter_62, + parameter_61, + parameter_60, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_60, parameter_61, parameter_62, parameter_63 + + # pd_op.swish: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32) + swish_96 = paddle._C_ops.swish(batch_norm__750) + + # builtin.combine: ([2x384x-1x-1xf32, 2x768x-1x-1xf32]) <- (2x384x-1x-1xf32, 2x768x-1x-1xf32) + combine_12 = [swish_96, swish_65] + + # pd_op.concat: (2x1152x-1x-1xf32) <- ([2x384x-1x-1xf32, 2x768x-1x-1xf32], 1xi32) + concat_12 = paddle._C_ops.concat(combine_12, full_0) + del combine_12 + + # pd_op.conv2d: (2x384x-1x-1xf32) <- (2x1152x-1x-1xf32, 384x1152x1x1xf32) + conv2d_130 = paddle._C_ops.conv2d( + concat_12, parameter_59, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_59 + + # pd_op.batch_norm_: (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__756, + batch_norm__757, + batch_norm__758, + batch_norm__759, + batch_norm__760, + batch_norm__761, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_130, + parameter_58, + parameter_57, + parameter_56, + parameter_55, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_55, parameter_56, parameter_57, parameter_58 + + # pd_op.swish: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32) + swish_97 = paddle._C_ops.swish(batch_norm__756) + + # pd_op.conv2d: (2x384x-1x-1xf32) <- (2x1152x-1x-1xf32, 384x1152x1x1xf32) + conv2d_131 = paddle._C_ops.conv2d( + concat_12, parameter_54, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_54 + + # pd_op.batch_norm_: (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__762, + batch_norm__763, + batch_norm__764, + batch_norm__765, + batch_norm__766, + batch_norm__767, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_131, + parameter_53, + parameter_52, + parameter_51, + parameter_50, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_50, parameter_51, parameter_52, parameter_53 + + # pd_op.swish: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32) + swish_98 = paddle._C_ops.swish(batch_norm__762) + + # pd_op.conv2d: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32, 384x384x3x3xf32) + conv2d_132 = paddle._C_ops.conv2d( + swish_98, parameter_49, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_49 + + # pd_op.batch_norm_: (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__768, + batch_norm__769, + batch_norm__770, + batch_norm__771, + batch_norm__772, + batch_norm__773, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_132, + parameter_48, + parameter_47, + parameter_46, + parameter_45, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_45, parameter_46, parameter_47, parameter_48 + + # pd_op.swish: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32) + swish_99 = paddle._C_ops.swish(batch_norm__768) + + # pd_op.conv2d: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32, 384x384x3x3xf32) + conv2d_133 = paddle._C_ops.conv2d( + swish_99, parameter_44, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_44 + + # pd_op.batch_norm_: (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__774, + batch_norm__775, + batch_norm__776, + batch_norm__777, + batch_norm__778, + batch_norm__779, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_133, + parameter_43, + parameter_42, + parameter_41, + parameter_40, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_40, parameter_41, parameter_42, parameter_43 + + # pd_op.conv2d: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32, 384x384x1x1xf32) + conv2d_134 = paddle._C_ops.conv2d( + swish_99, parameter_39, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_39 + + # pd_op.batch_norm_: (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__780, + batch_norm__781, + batch_norm__782, + batch_norm__783, + batch_norm__784, + batch_norm__785, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_134, + parameter_38, + parameter_37, + parameter_36, + parameter_35, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_35, parameter_36, parameter_37, parameter_38 + + # pd_op.add: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32, 2x384x-1x-1xf32) + add_52 = paddle._C_ops.add(batch_norm__774, batch_norm__780) + + # pd_op.swish: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32) + swish_100 = paddle._C_ops.swish(add_52) + + # pd_op.conv2d: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32, 384x384x3x3xf32) + conv2d_135 = paddle._C_ops.conv2d( + swish_100, parameter_34, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_34 + + # pd_op.batch_norm_: (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__786, + batch_norm__787, + batch_norm__788, + batch_norm__789, + batch_norm__790, + batch_norm__791, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_135, + parameter_33, + parameter_32, + parameter_31, + parameter_30, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_30, parameter_31, parameter_32, parameter_33 + + # pd_op.swish: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32) + swish_101 = paddle._C_ops.swish(batch_norm__786) + + # pd_op.conv2d: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32, 384x384x3x3xf32) + conv2d_136 = paddle._C_ops.conv2d( + swish_101, parameter_29, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_29 + + # pd_op.batch_norm_: (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__792, + batch_norm__793, + batch_norm__794, + batch_norm__795, + batch_norm__796, + batch_norm__797, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_136, + parameter_28, + parameter_27, + parameter_26, + parameter_25, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_25, parameter_26, parameter_27, parameter_28 + + # pd_op.conv2d: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32, 384x384x1x1xf32) + conv2d_137 = paddle._C_ops.conv2d( + swish_101, parameter_24, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_24 + + # pd_op.batch_norm_: (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__798, + batch_norm__799, + batch_norm__800, + batch_norm__801, + batch_norm__802, + batch_norm__803, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_137, + parameter_23, + parameter_22, + parameter_21, + parameter_20, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_20, parameter_21, parameter_22, parameter_23 + + # pd_op.add: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32, 2x384x-1x-1xf32) + add_53 = paddle._C_ops.add(batch_norm__792, batch_norm__798) + + # pd_op.swish: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32) + swish_102 = paddle._C_ops.swish(add_53) + + # pd_op.conv2d: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32, 384x384x3x3xf32) + conv2d_138 = paddle._C_ops.conv2d( + swish_102, parameter_19, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_19 + + # pd_op.batch_norm_: (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__804, + batch_norm__805, + batch_norm__806, + batch_norm__807, + batch_norm__808, + batch_norm__809, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_138, + parameter_18, + parameter_17, + parameter_16, + parameter_15, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_15, parameter_16, parameter_17, parameter_18 + + # pd_op.swish: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32) + swish_103 = paddle._C_ops.swish(batch_norm__804) + + # pd_op.conv2d: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32, 384x384x3x3xf32) + conv2d_139 = paddle._C_ops.conv2d( + swish_103, parameter_14, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_14 + + # pd_op.batch_norm_: (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__810, + batch_norm__811, + batch_norm__812, + batch_norm__813, + batch_norm__814, + batch_norm__815, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_139, + parameter_13, + parameter_12, + parameter_11, + parameter_10, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_10, parameter_11, parameter_12, parameter_13 + + # pd_op.conv2d: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32, 384x384x1x1xf32) + conv2d_140 = paddle._C_ops.conv2d( + swish_103, parameter_9, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_9 + + # pd_op.batch_norm_: (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__816, + batch_norm__817, + batch_norm__818, + batch_norm__819, + batch_norm__820, + batch_norm__821, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_140, + parameter_8, + parameter_7, + parameter_6, + parameter_5, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_5, parameter_6, parameter_7, parameter_8 + + # pd_op.add: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32, 2x384x-1x-1xf32) + add_54 = paddle._C_ops.add(batch_norm__810, batch_norm__816) + + # pd_op.swish: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32) + swish_104 = paddle._C_ops.swish(add_54) + + # builtin.combine: ([2x384x-1x-1xf32, 2x384x-1x-1xf32]) <- (2x384x-1x-1xf32, 2x384x-1x-1xf32) + combine_13 = [swish_97, swish_104] + + # pd_op.concat: (2x768x-1x-1xf32) <- ([2x384x-1x-1xf32, 2x384x-1x-1xf32], 1xi32) + concat_13 = paddle._C_ops.concat(combine_13, full_0) + del combine_13 + + # pd_op.conv2d: (2x768x-1x-1xf32) <- (2x768x-1x-1xf32, 768x768x1x1xf32) + conv2d_141 = paddle._C_ops.conv2d( + concat_13, parameter_4, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_4 + + # pd_op.batch_norm_: (2x768x-1x-1xf32, 768xf32, 768xf32, 768xf32, 768xf32, -1xui8) <- (2x768x-1x-1xf32, 768xf32, 768xf32, 768xf32, 768xf32) + ( + batch_norm__822, + batch_norm__823, + batch_norm__824, + batch_norm__825, + batch_norm__826, + batch_norm__827, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_141, + parameter_3, + parameter_2, + parameter_1, + parameter_0, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_0, parameter_1, parameter_2, parameter_3 + + # pd_op.swish: (2x768x-1x-1xf32) <- (2x768x-1x-1xf32) + swish_0 = paddle._C_ops.swish(batch_norm__822) + del ( + add_0, + add_1, + add_10, + add_11, + add_12, + add_13, + add_14, + add_15, + add_16, + add_17, + add_18, + add_2, + add_20, + add_21, + add_22, + add_23, + add_24, + add_25, + add_26, + add_27, + add_28, + add_29, + add_3, + add_30, + add_31, + add_33, + add_34, + add_35, + add_36, + add_37, + add_38, + add_4, + add_40, + add_41, + add_42, + add_43, + add_44, + add_45, + add_46, + add_47, + add_48, + add_49, + add_5, + add_50, + add_51, + add_52, + add_53, + add_54, + add_7, + add_8, + add_9, + assign_0, + assign_1, + assign_10, + assign_11, + assign_12, + assign_13, + assign_14, + assign_15, + assign_2, + assign_3, + assign_4, + assign_5, + assign_6, + assign_7, + assign_8, + assign_9, + batch_norm__0, + batch_norm__1, + batch_norm__10, + batch_norm__100, + batch_norm__101, + batch_norm__102, + batch_norm__103, + batch_norm__104, + batch_norm__105, + batch_norm__106, + batch_norm__107, + batch_norm__108, + batch_norm__109, + batch_norm__11, + batch_norm__110, + batch_norm__111, + batch_norm__112, + batch_norm__113, + batch_norm__114, + batch_norm__115, + batch_norm__116, + batch_norm__117, + batch_norm__118, + batch_norm__119, + batch_norm__12, + batch_norm__120, + batch_norm__121, + batch_norm__122, + batch_norm__123, + batch_norm__124, + batch_norm__125, + batch_norm__126, + batch_norm__127, + batch_norm__128, + batch_norm__129, + batch_norm__13, + batch_norm__130, + batch_norm__131, + batch_norm__132, + batch_norm__133, + batch_norm__134, + batch_norm__135, + batch_norm__136, + batch_norm__137, + batch_norm__138, + batch_norm__139, + batch_norm__14, + batch_norm__140, + batch_norm__141, + batch_norm__142, + batch_norm__143, + batch_norm__144, + batch_norm__145, + batch_norm__146, + batch_norm__147, + batch_norm__148, + batch_norm__149, + batch_norm__15, + batch_norm__150, + batch_norm__151, + batch_norm__152, + batch_norm__153, + batch_norm__154, + batch_norm__155, + batch_norm__156, + batch_norm__157, + batch_norm__158, + batch_norm__159, + batch_norm__16, + batch_norm__160, + batch_norm__161, + batch_norm__162, + batch_norm__163, + batch_norm__164, + batch_norm__165, + batch_norm__166, + batch_norm__167, + batch_norm__168, + batch_norm__169, + batch_norm__17, + batch_norm__170, + batch_norm__171, + batch_norm__172, + batch_norm__173, + batch_norm__174, + batch_norm__175, + batch_norm__176, + batch_norm__177, + batch_norm__178, + batch_norm__179, + batch_norm__18, + batch_norm__180, + batch_norm__181, + batch_norm__182, + batch_norm__183, + batch_norm__184, + batch_norm__185, + batch_norm__186, + batch_norm__187, + batch_norm__188, + batch_norm__189, + batch_norm__19, + batch_norm__190, + batch_norm__191, + batch_norm__192, + batch_norm__193, + batch_norm__194, + batch_norm__195, + batch_norm__196, + batch_norm__197, + batch_norm__198, + batch_norm__199, + batch_norm__2, + batch_norm__20, + batch_norm__200, + batch_norm__201, + batch_norm__202, + batch_norm__203, + batch_norm__204, + batch_norm__205, + batch_norm__206, + batch_norm__207, + batch_norm__208, + batch_norm__209, + batch_norm__21, + batch_norm__210, + batch_norm__211, + batch_norm__212, + batch_norm__213, + batch_norm__214, + batch_norm__215, + batch_norm__216, + batch_norm__217, + batch_norm__218, + batch_norm__219, + batch_norm__22, + batch_norm__220, + batch_norm__221, + batch_norm__222, + batch_norm__223, + batch_norm__224, + batch_norm__225, + batch_norm__226, + batch_norm__227, + batch_norm__228, + batch_norm__229, + batch_norm__23, + batch_norm__230, + batch_norm__231, + batch_norm__232, + batch_norm__233, + batch_norm__234, + batch_norm__235, + batch_norm__236, + batch_norm__237, + batch_norm__238, + batch_norm__239, + batch_norm__24, + batch_norm__240, + batch_norm__241, + batch_norm__242, + batch_norm__243, + batch_norm__244, + batch_norm__245, + batch_norm__246, + batch_norm__247, + batch_norm__248, + batch_norm__249, + batch_norm__25, + batch_norm__250, + batch_norm__251, + batch_norm__252, + batch_norm__253, + batch_norm__254, + batch_norm__255, + batch_norm__256, + batch_norm__257, + batch_norm__258, + batch_norm__259, + batch_norm__26, + batch_norm__260, + batch_norm__261, + batch_norm__262, + batch_norm__263, + batch_norm__264, + batch_norm__265, + batch_norm__266, + batch_norm__267, + batch_norm__268, + batch_norm__269, + batch_norm__27, + batch_norm__270, + batch_norm__271, + batch_norm__272, + batch_norm__273, + batch_norm__274, + batch_norm__275, + batch_norm__276, + batch_norm__277, + batch_norm__278, + batch_norm__279, + batch_norm__28, + batch_norm__280, + batch_norm__281, + batch_norm__282, + batch_norm__283, + batch_norm__284, + batch_norm__285, + batch_norm__286, + batch_norm__287, + batch_norm__288, + batch_norm__289, + batch_norm__29, + batch_norm__290, + batch_norm__291, + batch_norm__292, + batch_norm__293, + batch_norm__294, + batch_norm__295, + batch_norm__296, + batch_norm__297, + batch_norm__298, + batch_norm__299, + batch_norm__3, + batch_norm__30, + batch_norm__300, + batch_norm__301, + batch_norm__302, + batch_norm__303, + batch_norm__304, + batch_norm__305, + batch_norm__306, + batch_norm__307, + batch_norm__308, + batch_norm__309, + batch_norm__31, + batch_norm__310, + batch_norm__311, + batch_norm__312, + batch_norm__313, + batch_norm__314, + batch_norm__315, + batch_norm__316, + batch_norm__317, + batch_norm__318, + batch_norm__319, + batch_norm__32, + batch_norm__320, + batch_norm__321, + batch_norm__322, + batch_norm__323, + batch_norm__324, + batch_norm__325, + batch_norm__326, + batch_norm__327, + batch_norm__328, + batch_norm__329, + batch_norm__33, + batch_norm__330, + batch_norm__331, + batch_norm__332, + batch_norm__333, + batch_norm__334, + batch_norm__335, + batch_norm__336, + batch_norm__337, + batch_norm__338, + batch_norm__339, + batch_norm__34, + batch_norm__340, + batch_norm__341, + batch_norm__342, + batch_norm__343, + batch_norm__344, + batch_norm__345, + batch_norm__346, + batch_norm__347, + batch_norm__348, + batch_norm__349, + batch_norm__35, + batch_norm__350, + batch_norm__351, + batch_norm__352, + batch_norm__353, + batch_norm__354, + batch_norm__355, + batch_norm__356, + batch_norm__357, + batch_norm__358, + batch_norm__359, + batch_norm__36, + batch_norm__360, + batch_norm__361, + batch_norm__362, + batch_norm__363, + batch_norm__364, + batch_norm__365, + batch_norm__366, + batch_norm__367, + batch_norm__368, + batch_norm__369, + batch_norm__37, + batch_norm__370, + batch_norm__371, + batch_norm__372, + batch_norm__373, + batch_norm__374, + batch_norm__375, + batch_norm__376, + batch_norm__377, + batch_norm__378, + batch_norm__379, + batch_norm__38, + batch_norm__380, + batch_norm__381, + batch_norm__382, + batch_norm__383, + batch_norm__384, + batch_norm__385, + batch_norm__386, + batch_norm__387, + batch_norm__388, + batch_norm__389, + batch_norm__39, + batch_norm__390, + batch_norm__391, + batch_norm__392, + batch_norm__393, + batch_norm__394, + batch_norm__395, + batch_norm__396, + batch_norm__397, + batch_norm__398, + batch_norm__399, + batch_norm__4, + batch_norm__40, + batch_norm__400, + batch_norm__401, + batch_norm__402, + batch_norm__403, + batch_norm__404, + batch_norm__405, + batch_norm__406, + batch_norm__407, + batch_norm__408, + batch_norm__409, + batch_norm__41, + batch_norm__410, + batch_norm__411, + batch_norm__412, + batch_norm__413, + batch_norm__414, + batch_norm__415, + batch_norm__416, + batch_norm__417, + batch_norm__418, + batch_norm__419, + batch_norm__42, + batch_norm__420, + batch_norm__421, + batch_norm__422, + batch_norm__423, + batch_norm__424, + batch_norm__425, + batch_norm__426, + batch_norm__427, + batch_norm__428, + batch_norm__429, + batch_norm__43, + batch_norm__430, + batch_norm__431, + batch_norm__432, + batch_norm__433, + batch_norm__434, + batch_norm__435, + batch_norm__436, + batch_norm__437, + batch_norm__438, + batch_norm__439, + batch_norm__44, + batch_norm__440, + batch_norm__441, + batch_norm__442, + batch_norm__443, + batch_norm__444, + batch_norm__445, + batch_norm__446, + batch_norm__447, + batch_norm__448, + batch_norm__449, + batch_norm__45, + batch_norm__450, + batch_norm__451, + batch_norm__452, + batch_norm__453, + batch_norm__454, + batch_norm__455, + batch_norm__456, + batch_norm__457, + batch_norm__458, + batch_norm__459, + batch_norm__46, + batch_norm__460, + batch_norm__461, + batch_norm__462, + batch_norm__463, + batch_norm__464, + batch_norm__465, + batch_norm__466, + batch_norm__467, + batch_norm__468, + batch_norm__469, + batch_norm__47, + batch_norm__470, + batch_norm__471, + batch_norm__472, + batch_norm__473, + batch_norm__474, + batch_norm__475, + batch_norm__476, + batch_norm__477, + batch_norm__478, + batch_norm__479, + batch_norm__48, + batch_norm__480, + batch_norm__481, + batch_norm__482, + batch_norm__483, + batch_norm__484, + batch_norm__485, + batch_norm__486, + batch_norm__487, + batch_norm__488, + batch_norm__489, + batch_norm__49, + batch_norm__490, + batch_norm__491, + batch_norm__492, + batch_norm__493, + batch_norm__494, + batch_norm__495, + batch_norm__496, + batch_norm__497, + batch_norm__498, + batch_norm__499, + batch_norm__5, + batch_norm__50, + batch_norm__500, + batch_norm__501, + batch_norm__502, + batch_norm__503, + batch_norm__504, + batch_norm__505, + batch_norm__506, + batch_norm__507, + batch_norm__508, + batch_norm__509, + batch_norm__51, + batch_norm__510, + batch_norm__511, + batch_norm__512, + batch_norm__513, + batch_norm__514, + batch_norm__515, + batch_norm__516, + batch_norm__517, + batch_norm__518, + batch_norm__519, + batch_norm__52, + batch_norm__520, + batch_norm__521, + batch_norm__522, + batch_norm__523, + batch_norm__524, + batch_norm__525, + batch_norm__526, + batch_norm__527, + batch_norm__528, + batch_norm__529, + batch_norm__53, + batch_norm__530, + batch_norm__531, + batch_norm__532, + batch_norm__533, + batch_norm__534, + batch_norm__535, + batch_norm__536, + batch_norm__537, + batch_norm__538, + batch_norm__539, + batch_norm__54, + batch_norm__540, + batch_norm__541, + batch_norm__542, + batch_norm__543, + batch_norm__544, + batch_norm__545, + batch_norm__546, + batch_norm__547, + batch_norm__548, + batch_norm__549, + batch_norm__55, + batch_norm__550, + batch_norm__551, + batch_norm__552, + batch_norm__553, + batch_norm__554, + batch_norm__555, + batch_norm__556, + batch_norm__557, + batch_norm__558, + batch_norm__559, + batch_norm__56, + batch_norm__560, + batch_norm__561, + batch_norm__562, + batch_norm__563, + batch_norm__564, + batch_norm__565, + batch_norm__566, + batch_norm__567, + batch_norm__568, + batch_norm__569, + batch_norm__57, + batch_norm__570, + batch_norm__571, + batch_norm__572, + batch_norm__573, + batch_norm__574, + batch_norm__575, + batch_norm__576, + batch_norm__577, + batch_norm__578, + batch_norm__579, + batch_norm__58, + batch_norm__580, + batch_norm__581, + batch_norm__582, + batch_norm__583, + batch_norm__584, + batch_norm__585, + batch_norm__586, + batch_norm__587, + batch_norm__588, + batch_norm__589, + batch_norm__59, + batch_norm__590, + batch_norm__591, + batch_norm__592, + batch_norm__593, + batch_norm__594, + batch_norm__595, + batch_norm__596, + batch_norm__597, + batch_norm__598, + batch_norm__599, + batch_norm__6, + batch_norm__60, + batch_norm__600, + batch_norm__601, + batch_norm__602, + batch_norm__603, + batch_norm__604, + batch_norm__605, + batch_norm__606, + batch_norm__607, + batch_norm__608, + batch_norm__609, + batch_norm__61, + batch_norm__610, + batch_norm__611, + batch_norm__612, + batch_norm__613, + batch_norm__614, + batch_norm__615, + batch_norm__616, + batch_norm__617, + batch_norm__618, + batch_norm__619, + batch_norm__62, + batch_norm__620, + batch_norm__621, + batch_norm__622, + batch_norm__623, + batch_norm__624, + batch_norm__625, + batch_norm__626, + batch_norm__627, + batch_norm__628, + batch_norm__629, + batch_norm__63, + batch_norm__630, + batch_norm__631, + batch_norm__632, + batch_norm__633, + batch_norm__634, + batch_norm__635, + batch_norm__636, + batch_norm__637, + batch_norm__638, + batch_norm__639, + batch_norm__64, + batch_norm__640, + batch_norm__641, + batch_norm__642, + batch_norm__643, + batch_norm__644, + batch_norm__645, + batch_norm__646, + batch_norm__647, + batch_norm__648, + batch_norm__649, + batch_norm__65, + batch_norm__650, + batch_norm__651, + batch_norm__652, + batch_norm__653, + batch_norm__654, + batch_norm__655, + batch_norm__656, + batch_norm__657, + batch_norm__658, + batch_norm__659, + batch_norm__66, + batch_norm__660, + batch_norm__661, + batch_norm__662, + batch_norm__663, + batch_norm__664, + batch_norm__665, + batch_norm__666, + batch_norm__667, + batch_norm__668, + batch_norm__669, + batch_norm__67, + batch_norm__670, + batch_norm__671, + batch_norm__672, + batch_norm__673, + batch_norm__674, + batch_norm__675, + batch_norm__676, + batch_norm__677, + batch_norm__678, + batch_norm__679, + batch_norm__68, + batch_norm__680, + batch_norm__681, + batch_norm__682, + batch_norm__683, + batch_norm__684, + batch_norm__685, + batch_norm__686, + batch_norm__687, + batch_norm__688, + batch_norm__689, + batch_norm__69, + batch_norm__690, + batch_norm__691, + batch_norm__692, + batch_norm__693, + batch_norm__694, + batch_norm__695, + batch_norm__696, + batch_norm__697, + batch_norm__698, + batch_norm__699, + batch_norm__7, + batch_norm__70, + batch_norm__700, + batch_norm__701, + batch_norm__702, + batch_norm__703, + batch_norm__704, + batch_norm__705, + batch_norm__706, + batch_norm__707, + batch_norm__708, + batch_norm__709, + batch_norm__71, + batch_norm__710, + batch_norm__711, + batch_norm__712, + batch_norm__713, + batch_norm__714, + batch_norm__715, + batch_norm__716, + batch_norm__717, + batch_norm__718, + batch_norm__719, + batch_norm__72, + batch_norm__720, + batch_norm__721, + batch_norm__722, + batch_norm__723, + batch_norm__724, + batch_norm__725, + batch_norm__726, + batch_norm__727, + batch_norm__728, + batch_norm__729, + batch_norm__73, + batch_norm__730, + batch_norm__731, + batch_norm__732, + batch_norm__733, + batch_norm__734, + batch_norm__735, + batch_norm__736, + batch_norm__737, + batch_norm__738, + batch_norm__739, + batch_norm__74, + batch_norm__740, + batch_norm__741, + batch_norm__742, + batch_norm__743, + batch_norm__744, + batch_norm__745, + batch_norm__746, + batch_norm__747, + batch_norm__748, + batch_norm__749, + batch_norm__75, + batch_norm__750, + batch_norm__751, + batch_norm__752, + batch_norm__753, + batch_norm__754, + batch_norm__755, + batch_norm__756, + batch_norm__757, + batch_norm__758, + batch_norm__759, + batch_norm__76, + batch_norm__760, + batch_norm__761, + batch_norm__762, + batch_norm__763, + batch_norm__764, + batch_norm__765, + batch_norm__766, + batch_norm__767, + batch_norm__768, + batch_norm__769, + batch_norm__77, + batch_norm__770, + batch_norm__771, + batch_norm__772, + batch_norm__773, + batch_norm__774, + batch_norm__775, + batch_norm__776, + batch_norm__777, + batch_norm__778, + batch_norm__779, + batch_norm__78, + batch_norm__780, + batch_norm__781, + batch_norm__782, + batch_norm__783, + batch_norm__784, + batch_norm__785, + batch_norm__786, + batch_norm__787, + batch_norm__788, + batch_norm__789, + batch_norm__79, + batch_norm__790, + batch_norm__791, + batch_norm__792, + batch_norm__793, + batch_norm__794, + batch_norm__795, + batch_norm__796, + batch_norm__797, + batch_norm__798, + batch_norm__799, + batch_norm__8, + batch_norm__80, + batch_norm__800, + batch_norm__801, + batch_norm__802, + batch_norm__803, + batch_norm__804, + batch_norm__805, + batch_norm__806, + batch_norm__807, + batch_norm__808, + batch_norm__809, + batch_norm__81, + batch_norm__810, + batch_norm__811, + batch_norm__812, + batch_norm__813, + batch_norm__814, + batch_norm__815, + batch_norm__816, + batch_norm__817, + batch_norm__818, + batch_norm__819, + batch_norm__82, + batch_norm__820, + batch_norm__821, + batch_norm__822, + batch_norm__823, + batch_norm__824, + batch_norm__825, + batch_norm__826, + batch_norm__827, + batch_norm__83, + batch_norm__84, + batch_norm__85, + batch_norm__86, + batch_norm__87, + batch_norm__88, + batch_norm__89, + batch_norm__9, + batch_norm__90, + batch_norm__91, + batch_norm__92, + batch_norm__93, + batch_norm__94, + batch_norm__95, + batch_norm__96, + batch_norm__97, + batch_norm__98, + batch_norm__99, + concat_0, + concat_1, + concat_10, + concat_11, + concat_12, + concat_13, + concat_2, + concat_3, + concat_4, + concat_5, + concat_6, + concat_7, + concat_8, + concat_9, + conv2d_0, + conv2d_1, + conv2d_10, + conv2d_100, + conv2d_101, + conv2d_102, + conv2d_103, + conv2d_104, + conv2d_105, + conv2d_106, + conv2d_107, + conv2d_108, + conv2d_109, + conv2d_11, + conv2d_110, + conv2d_111, + conv2d_112, + conv2d_113, + conv2d_114, + conv2d_115, + conv2d_116, + conv2d_117, + conv2d_118, + conv2d_119, + conv2d_12, + conv2d_120, + conv2d_121, + conv2d_122, + conv2d_123, + conv2d_124, + conv2d_125, + conv2d_126, + conv2d_127, + conv2d_128, + conv2d_129, + conv2d_13, + conv2d_130, + conv2d_131, + conv2d_132, + conv2d_133, + conv2d_134, + conv2d_135, + conv2d_136, + conv2d_137, + conv2d_138, + conv2d_139, + conv2d_14, + conv2d_140, + conv2d_141, + conv2d_15, + conv2d_16, + conv2d_17, + conv2d_18, + conv2d_19, + conv2d_2, + conv2d_20, + conv2d_21, + conv2d_22, + conv2d_23, + conv2d_24, + conv2d_25, + conv2d_26, + conv2d_27, + conv2d_28, + conv2d_29, + conv2d_3, + conv2d_30, + conv2d_31, + conv2d_32, + conv2d_33, + conv2d_34, + conv2d_35, + conv2d_36, + conv2d_37, + conv2d_38, + conv2d_39, + conv2d_4, + conv2d_40, + conv2d_41, + conv2d_42, + conv2d_43, + conv2d_44, + conv2d_45, + conv2d_46, + conv2d_47, + conv2d_48, + conv2d_49, + conv2d_5, + conv2d_50, + conv2d_51, + conv2d_52, + conv2d_53, + conv2d_54, + conv2d_55, + conv2d_56, + conv2d_57, + conv2d_58, + conv2d_59, + conv2d_6, + conv2d_60, + conv2d_61, + conv2d_62, + conv2d_63, + conv2d_64, + conv2d_65, + conv2d_66, + conv2d_67, + conv2d_68, + conv2d_69, + conv2d_7, + conv2d_70, + conv2d_71, + conv2d_72, + conv2d_73, + conv2d_74, + conv2d_75, + conv2d_76, + conv2d_77, + conv2d_78, + conv2d_79, + conv2d_8, + conv2d_80, + conv2d_81, + conv2d_82, + conv2d_83, + conv2d_84, + conv2d_85, + conv2d_86, + conv2d_87, + conv2d_88, + conv2d_89, + conv2d_9, + conv2d_90, + conv2d_91, + conv2d_92, + conv2d_93, + conv2d_94, + conv2d_95, + conv2d_96, + conv2d_97, + conv2d_98, + conv2d_99, + full_0, + full_int_array_0, + full_int_array_2, + full_int_array_3, + full_int_array_4, + hardsigmoid_0, + hardsigmoid_1, + hardsigmoid_2, + hardsigmoid_3, + mean_0, + mean_1, + mean_2, + mean_3, + multiply_0, + multiply_1, + multiply_2, + multiply_3, + nearest_interp_0, + nearest_interp_1, + pool2d_0, + pool2d_1, + pool2d_2, + reshape_0, + reshape_1, + reshape_2, + reshape_3, + swish_1, + swish_10, + swish_100, + swish_101, + swish_102, + swish_103, + swish_104, + swish_11, + swish_12, + swish_13, + swish_14, + swish_15, + swish_16, + swish_17, + swish_18, + swish_19, + swish_2, + swish_20, + swish_21, + swish_22, + swish_23, + swish_24, + swish_25, + swish_26, + swish_27, + swish_28, + swish_29, + swish_3, + swish_30, + swish_31, + swish_32, + swish_33, + swish_34, + swish_35, + swish_36, + swish_37, + swish_38, + swish_39, + swish_4, + swish_40, + swish_41, + swish_42, + swish_43, + swish_44, + swish_45, + swish_46, + swish_47, + swish_48, + swish_49, + swish_5, + swish_50, + swish_51, + swish_52, + swish_53, + swish_54, + swish_55, + swish_56, + swish_57, + swish_58, + swish_59, + swish_6, + swish_60, + swish_61, + swish_62, + swish_63, + swish_64, + swish_65, + swish_66, + swish_67, + swish_68, + swish_69, + swish_7, + swish_70, + swish_71, + swish_72, + swish_73, + swish_74, + swish_75, + swish_76, + swish_77, + swish_78, + swish_79, + swish_8, + swish_80, + swish_81, + swish_82, + swish_83, + swish_84, + swish_85, + swish_86, + swish_87, + swish_88, + swish_89, + swish_9, + swish_90, + swish_91, + swish_92, + swish_93, + swish_94, + swish_95, + swish_96, + swish_97, + swish_98, + swish_99, + ) + + return swish_0 diff --git a/paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_10/weight_meta.py b/paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_10/weight_meta.py new file mode 100644 index 000000000..133b954fb --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_10/weight_meta.py @@ -0,0 +1,7564 @@ +class Program_weight_tensor_parameter_0: + name = "parameter_0" + shape = [768] + dtype = "float32" + min_val = float("-0.743033") + max_val = float("0.507361") + mean = float("0.020663") + std = float("0.126347") + data = None + + +class Program_weight_tensor_parameter_1: + name = "parameter_1" + shape = [768] + dtype = "float32" + min_val = float("0.898341") + max_val = float("1.48377") + mean = float("0.987983") + std = float("0.0346382") + data = None + + +class Program_weight_tensor_parameter_2: + name = "parameter_2" + shape = [768] + dtype = "float32" + min_val = float("0.00273834") + max_val = float("0.117552") + mean = float("0.0109298") + std = float("0.0105056") + data = None + + +class Program_weight_tensor_parameter_3: + name = "parameter_3" + shape = [768] + dtype = "float32" + min_val = float("-0.37129") + max_val = float("0.129239") + mean = float("-0.0442188") + std = float("0.0445754") + data = None + + +class Program_weight_tensor_parameter_4: + name = "parameter_4" + shape = [768, 768, 1, 1] + dtype = "float32" + min_val = float("-0.0737572") + max_val = float("0.0366322") + mean = float("-0.000187312") + std = float("0.00262376") + data = None + + +class Program_weight_tensor_parameter_5: + name = "parameter_5" + shape = [384] + dtype = "float32" + min_val = float("-0.254357") + max_val = float("0.0598301") + mean = float("-0.0304517") + std = float("0.0365455") + data = None + + +class Program_weight_tensor_parameter_6: + name = "parameter_6" + shape = [384] + dtype = "float32" + min_val = float("0.944225") + max_val = float("1.06993") + mean = float("0.98764") + std = float("0.0170711") + data = None + + +class Program_weight_tensor_parameter_7: + name = "parameter_7" + shape = [384] + dtype = "float32" + min_val = float("0.000787555") + max_val = float("0.0292413") + mean = float("0.00503152") + std = float("0.00299112") + data = None + + +class Program_weight_tensor_parameter_8: + name = "parameter_8" + shape = [384] + dtype = "float32" + min_val = float("-0.0613106") + max_val = float("0.0666846") + mean = float("-0.0116626") + std = float("0.0163377") + data = None + + +class Program_weight_tensor_parameter_9: + name = "parameter_9" + shape = [384, 384, 1, 1] + dtype = "float32" + min_val = float("-0.0343938") + max_val = float("0.0285101") + mean = float("-0.000206475") + std = float("0.00196258") + data = None + + +class Program_weight_tensor_parameter_10: + name = "parameter_10" + shape = [384] + dtype = "float32" + min_val = float("-0.254357") + max_val = float("0.0598301") + mean = float("-0.0304517") + std = float("0.0365455") + data = None + + +class Program_weight_tensor_parameter_11: + name = "parameter_11" + shape = [384] + dtype = "float32" + min_val = float("0.872284") + max_val = float("1.23739") + mean = float("1.03025") + std = float("0.03542") + data = None + + +class Program_weight_tensor_parameter_12: + name = "parameter_12" + shape = [384] + dtype = "float32" + min_val = float("0.00298812") + max_val = float("0.06913") + mean = float("0.0100035") + std = float("0.00526466") + data = None + + +class Program_weight_tensor_parameter_13: + name = "parameter_13" + shape = [384] + dtype = "float32" + min_val = float("-0.217798") + max_val = float("0.069544") + mean = float("-0.0277671") + std = float("0.0366238") + data = None + + +class Program_weight_tensor_parameter_14: + name = "parameter_14" + shape = [384, 384, 3, 3] + dtype = "float32" + min_val = float("-0.0371442") + max_val = float("0.0433044") + mean = float("-5.51279e-05") + std = float("0.00132722") + data = None + + +class Program_weight_tensor_parameter_15: + name = "parameter_15" + shape = [384] + dtype = "float32" + min_val = float("-0.374067") + max_val = float("0.0523626") + mean = float("-0.0502448") + std = float("0.0490669") + data = None + + +class Program_weight_tensor_parameter_16: + name = "parameter_16" + shape = [384] + dtype = "float32" + min_val = float("0.960272") + max_val = float("1.34778") + mean = float("1.02211") + std = float("0.0411657") + data = None + + +class Program_weight_tensor_parameter_17: + name = "parameter_17" + shape = [384] + dtype = "float32" + min_val = float("0.00848703") + max_val = float("0.127638") + mean = float("0.0290221") + std = float("0.0141029") + data = None + + +class Program_weight_tensor_parameter_18: + name = "parameter_18" + shape = [384] + dtype = "float32" + min_val = float("-0.267895") + max_val = float("0.386565") + mean = float("-0.0388769") + std = float("0.0578821") + data = None + + +class Program_weight_tensor_parameter_19: + name = "parameter_19" + shape = [384, 384, 3, 3] + dtype = "float32" + min_val = float("-0.025652") + max_val = float("0.0582461") + mean = float("-5.85454e-05") + std = float("0.00148034") + data = None + + +class Program_weight_tensor_parameter_20: + name = "parameter_20" + shape = [384] + dtype = "float32" + min_val = float("-0.156742") + max_val = float("0.0105973") + mean = float("-0.0499453") + std = float("0.029959") + data = None + + +class Program_weight_tensor_parameter_21: + name = "parameter_21" + shape = [384] + dtype = "float32" + min_val = float("0.910836") + max_val = float("1.05523") + mean = float("0.980981") + std = float("0.013256") + data = None + + +class Program_weight_tensor_parameter_22: + name = "parameter_22" + shape = [384] + dtype = "float32" + min_val = float("0.000818602") + max_val = float("0.015916") + mean = float("0.00425207") + std = float("0.00240053") + data = None + + +class Program_weight_tensor_parameter_23: + name = "parameter_23" + shape = [384] + dtype = "float32" + min_val = float("-0.0553324") + max_val = float("0.0530611") + mean = float("-0.00573343") + std = float("0.0163695") + data = None + + +class Program_weight_tensor_parameter_24: + name = "parameter_24" + shape = [384, 384, 1, 1] + dtype = "float32" + min_val = float("-0.0380534") + max_val = float("0.0291262") + mean = float("-0.000145551") + std = float("0.00195161") + data = None + + +class Program_weight_tensor_parameter_25: + name = "parameter_25" + shape = [384] + dtype = "float32" + min_val = float("-0.156742") + max_val = float("0.0105973") + mean = float("-0.0499453") + std = float("0.029959") + data = None + + +class Program_weight_tensor_parameter_26: + name = "parameter_26" + shape = [384] + dtype = "float32" + min_val = float("0.970749") + max_val = float("1.20629") + mean = float("1.02831") + std = float("0.0394471") + data = None + + +class Program_weight_tensor_parameter_27: + name = "parameter_27" + shape = [384] + dtype = "float32" + min_val = float("0.00291281") + max_val = float("0.0619909") + mean = float("0.0115855") + std = float("0.0064419") + data = None + + +class Program_weight_tensor_parameter_28: + name = "parameter_28" + shape = [384] + dtype = "float32" + min_val = float("-0.14316") + max_val = float("0.0743786") + mean = float("-0.0336338") + std = float("0.0347498") + data = None + + +class Program_weight_tensor_parameter_29: + name = "parameter_29" + shape = [384, 384, 3, 3] + dtype = "float32" + min_val = float("-0.0326419") + max_val = float("0.0530926") + mean = float("-6.71983e-05") + std = float("0.00132282") + data = None + + +class Program_weight_tensor_parameter_30: + name = "parameter_30" + shape = [384] + dtype = "float32" + min_val = float("-0.246103") + max_val = float("0.0162123") + mean = float("-0.0546987") + std = float("0.0390461") + data = None + + +class Program_weight_tensor_parameter_31: + name = "parameter_31" + shape = [384] + dtype = "float32" + min_val = float("0.935276") + max_val = float("1.24725") + mean = float("1.0196") + std = float("0.0429498") + data = None + + +class Program_weight_tensor_parameter_32: + name = "parameter_32" + shape = [384] + dtype = "float32" + min_val = float("0.00642887") + max_val = float("0.14985") + mean = float("0.0275346") + std = float("0.0174424") + data = None + + +class Program_weight_tensor_parameter_33: + name = "parameter_33" + shape = [384] + dtype = "float32" + min_val = float("-0.213687") + max_val = float("0.100992") + mean = float("-0.0487334") + std = float("0.0524185") + data = None + + +class Program_weight_tensor_parameter_34: + name = "parameter_34" + shape = [384, 384, 3, 3] + dtype = "float32" + min_val = float("-0.0313679") + max_val = float("0.0549118") + mean = float("-6.83835e-05") + std = float("0.00148433") + data = None + + +class Program_weight_tensor_parameter_35: + name = "parameter_35" + shape = [384] + dtype = "float32" + min_val = float("-0.184919") + max_val = float("0.0322844") + mean = float("-0.053517") + std = float("0.0327273") + data = None + + +class Program_weight_tensor_parameter_36: + name = "parameter_36" + shape = [384] + dtype = "float32" + min_val = float("0.904243") + max_val = float("1.01869") + mean = float("0.979303") + std = float("0.0159215") + data = None + + +class Program_weight_tensor_parameter_37: + name = "parameter_37" + shape = [384] + dtype = "float32" + min_val = float("0.00121268") + max_val = float("0.0185698") + mean = float("0.0049722") + std = float("0.00233631") + data = None + + +class Program_weight_tensor_parameter_38: + name = "parameter_38" + shape = [384] + dtype = "float32" + min_val = float("-0.130073") + max_val = float("0.0529869") + mean = float("-0.00273802") + std = float("0.0173354") + data = None + + +class Program_weight_tensor_parameter_39: + name = "parameter_39" + shape = [384, 384, 1, 1] + dtype = "float32" + min_val = float("-0.0417716") + max_val = float("0.0284739") + mean = float("-9.60982e-05") + std = float("0.00206623") + data = None + + +class Program_weight_tensor_parameter_40: + name = "parameter_40" + shape = [384] + dtype = "float32" + min_val = float("-0.184919") + max_val = float("0.0322844") + mean = float("-0.053517") + std = float("0.0327273") + data = None + + +class Program_weight_tensor_parameter_41: + name = "parameter_41" + shape = [384] + dtype = "float32" + min_val = float("0.974999") + max_val = float("1.21351") + mean = float("1.03068") + std = float("0.0413997") + data = None + + +class Program_weight_tensor_parameter_42: + name = "parameter_42" + shape = [384] + dtype = "float32" + min_val = float("0.00469089") + max_val = float("0.0653512") + mean = float("0.0143986") + std = float("0.00774657") + data = None + + +class Program_weight_tensor_parameter_43: + name = "parameter_43" + shape = [384] + dtype = "float32" + min_val = float("-0.187123") + max_val = float("0.0984266") + mean = float("-0.0228005") + std = float("0.0323402") + data = None + + +class Program_weight_tensor_parameter_44: + name = "parameter_44" + shape = [384, 384, 3, 3] + dtype = "float32" + min_val = float("-0.0269551") + max_val = float("0.0469683") + mean = float("-4.80811e-05") + std = float("0.00136096") + data = None + + +class Program_weight_tensor_parameter_45: + name = "parameter_45" + shape = [384] + dtype = "float32" + min_val = float("-0.213445") + max_val = float("0.026485") + mean = float("-0.0541075") + std = float("0.0385587") + data = None + + +class Program_weight_tensor_parameter_46: + name = "parameter_46" + shape = [384] + dtype = "float32" + min_val = float("0.92487") + max_val = float("1.16824") + mean = float("1.02125") + std = float("0.0372863") + data = None + + +class Program_weight_tensor_parameter_47: + name = "parameter_47" + shape = [384] + dtype = "float32" + min_val = float("0.00425419") + max_val = float("0.0987383") + mean = float("0.0223376") + std = float("0.0149169") + data = None + + +class Program_weight_tensor_parameter_48: + name = "parameter_48" + shape = [384] + dtype = "float32" + min_val = float("-0.187104") + max_val = float("0.162072") + mean = float("-0.0516316") + std = float("0.0556028") + data = None + + +class Program_weight_tensor_parameter_49: + name = "parameter_49" + shape = [384, 384, 3, 3] + dtype = "float32" + min_val = float("-0.0191691") + max_val = float("0.0361507") + mean = float("-8.79686e-05") + std = float("0.00151782") + data = None + + +class Program_weight_tensor_parameter_50: + name = "parameter_50" + shape = [384] + dtype = "float32" + min_val = float("-0.121349") + max_val = float("0.0778676") + mean = float("-0.0298326") + std = float("0.02283") + data = None + + +class Program_weight_tensor_parameter_51: + name = "parameter_51" + shape = [384] + dtype = "float32" + min_val = float("0.963274") + max_val = float("1.12241") + mean = float("1.01457") + std = float("0.0265398") + data = None + + +class Program_weight_tensor_parameter_52: + name = "parameter_52" + shape = [384] + dtype = "float32" + min_val = float("0.00391785") + max_val = float("0.053385") + mean = float("0.00936041") + std = float("0.00450165") + data = None + + +class Program_weight_tensor_parameter_53: + name = "parameter_53" + shape = [384] + dtype = "float32" + min_val = float("-0.108579") + max_val = float("0.125758") + mean = float("-0.0221365") + std = float("0.0301141") + data = None + + +class Program_weight_tensor_parameter_54: + name = "parameter_54" + shape = [384, 1152, 1, 1] + dtype = "float32" + min_val = float("-0.0419679") + max_val = float("0.0632127") + mean = float("-0.000100512") + std = float("0.00237333") + data = None + + +class Program_weight_tensor_parameter_55: + name = "parameter_55" + shape = [384] + dtype = "float32" + min_val = float("-0.116774") + max_val = float("0.0159135") + mean = float("-0.0176811") + std = float("0.0160398") + data = None + + +class Program_weight_tensor_parameter_56: + name = "parameter_56" + shape = [384] + dtype = "float32" + min_val = float("0.940853") + max_val = float("1.23804") + mean = float("1.0135") + std = float("0.0258691") + data = None + + +class Program_weight_tensor_parameter_57: + name = "parameter_57" + shape = [384] + dtype = "float32" + min_val = float("0.00285411") + max_val = float("0.0532654") + mean = float("0.00768658") + std = float("0.00402365") + data = None + + +class Program_weight_tensor_parameter_58: + name = "parameter_58" + shape = [384] + dtype = "float32" + min_val = float("-0.101351") + max_val = float("0.0758357") + mean = float("-0.0327951") + std = float("0.029396") + data = None + + +class Program_weight_tensor_parameter_59: + name = "parameter_59" + shape = [384, 1152, 1, 1] + dtype = "float32" + min_val = float("-0.0323273") + max_val = float("0.0419256") + mean = float("-0.000154794") + std = float("0.00225172") + data = None + + +class Program_weight_tensor_parameter_60: + name = "parameter_60" + shape = [384] + dtype = "float32" + min_val = float("-0.0965203") + max_val = float("0.00581689") + mean = float("-0.0225958") + std = float("0.0169398") + data = None + + +class Program_weight_tensor_parameter_61: + name = "parameter_61" + shape = [384] + dtype = "float32" + min_val = float("0.958471") + max_val = float("1.19094") + mean = float("1.0392") + std = float("0.0317908") + data = None + + +class Program_weight_tensor_parameter_62: + name = "parameter_62" + shape = [384] + dtype = "float32" + min_val = float("0.00448496") + max_val = float("0.0556805") + mean = float("0.0128026") + std = float("0.00693747") + data = None + + +class Program_weight_tensor_parameter_63: + name = "parameter_63" + shape = [384] + dtype = "float32" + min_val = float("-0.233107") + max_val = float("0.160893") + mean = float("-0.0188617") + std = float("0.0553687") + data = None + + +class Program_weight_tensor_parameter_64: + name = "parameter_64" + shape = [384, 384, 3, 3] + dtype = "float32" + min_val = float("-0.0228928") + max_val = float("0.0361422") + mean = float("-2.08226e-05") + std = float("0.00120166") + data = None + + +class Program_weight_tensor_parameter_65: + name = "parameter_65" + shape = [384] + dtype = "float32" + min_val = float("-0.46246") + max_val = float("0.417289") + mean = float("0.0796105") + std = float("0.135843") + data = None + + +class Program_weight_tensor_parameter_66: + name = "parameter_66" + shape = [384] + dtype = "float32" + min_val = float("0.861824") + max_val = float("1.37925") + mean = float("0.999753") + std = float("0.0519702") + data = None + + +class Program_weight_tensor_parameter_67: + name = "parameter_67" + shape = [384] + dtype = "float32" + min_val = float("0.005019") + max_val = float("0.151152") + mean = float("0.0195404") + std = float("0.0157973") + data = None + + +class Program_weight_tensor_parameter_68: + name = "parameter_68" + shape = [384] + dtype = "float32" + min_val = float("-0.230007") + max_val = float("0.114525") + mean = float("-0.0460026") + std = float("0.0471628") + data = None + + +class Program_weight_tensor_parameter_69: + name = "parameter_69" + shape = [384, 384, 1, 1] + dtype = "float32" + min_val = float("-0.109087") + max_val = float("0.0690511") + mean = float("-0.000406287") + std = float("0.00575579") + data = None + + +class Program_weight_tensor_parameter_70: + name = "parameter_70" + shape = [192] + dtype = "float32" + min_val = float("-0.165038") + max_val = float("0.0547988") + mean = float("-0.0386347") + std = float("0.0421154") + data = None + + +class Program_weight_tensor_parameter_71: + name = "parameter_71" + shape = [192] + dtype = "float32" + min_val = float("0.859852") + max_val = float("1.08355") + mean = float("0.961161") + std = float("0.0320714") + data = None + + +class Program_weight_tensor_parameter_72: + name = "parameter_72" + shape = [192] + dtype = "float32" + min_val = float("0.00155973") + max_val = float("0.0305334") + mean = float("0.00919111") + std = float("0.00640562") + data = None + + +class Program_weight_tensor_parameter_73: + name = "parameter_73" + shape = [192] + dtype = "float32" + min_val = float("-0.0680704") + max_val = float("0.083547") + mean = float("-0.0167198") + std = float("0.027735") + data = None + + +class Program_weight_tensor_parameter_74: + name = "parameter_74" + shape = [192, 192, 1, 1] + dtype = "float32" + min_val = float("-0.0553851") + max_val = float("0.0384638") + mean = float("-0.000583231") + std = float("0.00426065") + data = None + + +class Program_weight_tensor_parameter_75: + name = "parameter_75" + shape = [192] + dtype = "float32" + min_val = float("-0.165038") + max_val = float("0.0547988") + mean = float("-0.0386347") + std = float("0.0421154") + data = None + + +class Program_weight_tensor_parameter_76: + name = "parameter_76" + shape = [192] + dtype = "float32" + min_val = float("0.909392") + max_val = float("1.22799") + mean = float("1.04913") + std = float("0.0517355") + data = None + + +class Program_weight_tensor_parameter_77: + name = "parameter_77" + shape = [192] + dtype = "float32" + min_val = float("0.00618387") + max_val = float("0.0601428") + mean = float("0.0162134") + std = float("0.00795116") + data = None + + +class Program_weight_tensor_parameter_78: + name = "parameter_78" + shape = [192] + dtype = "float32" + min_val = float("-0.21459") + max_val = float("0.292505") + mean = float("-0.0268324") + std = float("0.057947") + data = None + + +class Program_weight_tensor_parameter_79: + name = "parameter_79" + shape = [192, 192, 3, 3] + dtype = "float32" + min_val = float("-0.0437504") + max_val = float("0.0604602") + mean = float("-8.54298e-05") + std = float("0.00300512") + data = None + + +class Program_weight_tensor_parameter_80: + name = "parameter_80" + shape = [192] + dtype = "float32" + min_val = float("-0.30904") + max_val = float("0.0489039") + mean = float("-0.0760544") + std = float("0.0618318") + data = None + + +class Program_weight_tensor_parameter_81: + name = "parameter_81" + shape = [192] + dtype = "float32" + min_val = float("0.911397") + max_val = float("1.34725") + mean = float("1.02609") + std = float("0.0557122") + data = None + + +class Program_weight_tensor_parameter_82: + name = "parameter_82" + shape = [192] + dtype = "float32" + min_val = float("0.00973595") + max_val = float("0.152819") + mean = float("0.0448439") + std = float("0.024794") + data = None + + +class Program_weight_tensor_parameter_83: + name = "parameter_83" + shape = [192] + dtype = "float32" + min_val = float("-0.186501") + max_val = float("0.249142") + mean = float("-0.0277286") + std = float("0.0570363") + data = None + + +class Program_weight_tensor_parameter_84: + name = "parameter_84" + shape = [192, 192, 3, 3] + dtype = "float32" + min_val = float("-0.0495299") + max_val = float("0.0581408") + mean = float("-0.000118023") + std = float("0.00333522") + data = None + + +class Program_weight_tensor_parameter_85: + name = "parameter_85" + shape = [192] + dtype = "float32" + min_val = float("-0.27082") + max_val = float("0.0055029") + mean = float("-0.0905993") + std = float("0.0443888") + data = None + + +class Program_weight_tensor_parameter_86: + name = "parameter_86" + shape = [192] + dtype = "float32" + min_val = float("0.88134") + max_val = float("1.1226") + mean = float("0.962732") + std = float("0.0271064") + data = None + + +class Program_weight_tensor_parameter_87: + name = "parameter_87" + shape = [192] + dtype = "float32" + min_val = float("0.00249203") + max_val = float("0.0283251") + mean = float("0.00900569") + std = float("0.00424473") + data = None + + +class Program_weight_tensor_parameter_88: + name = "parameter_88" + shape = [192] + dtype = "float32" + min_val = float("-0.0503456") + max_val = float("0.0503521") + mean = float("-0.0123481") + std = float("0.015716") + data = None + + +class Program_weight_tensor_parameter_89: + name = "parameter_89" + shape = [192, 192, 1, 1] + dtype = "float32" + min_val = float("-0.0558178") + max_val = float("0.0423444") + mean = float("-0.000674284") + std = float("0.00453073") + data = None + + +class Program_weight_tensor_parameter_90: + name = "parameter_90" + shape = [192] + dtype = "float32" + min_val = float("-0.27082") + max_val = float("0.0055029") + mean = float("-0.0905993") + std = float("0.0443888") + data = None + + +class Program_weight_tensor_parameter_91: + name = "parameter_91" + shape = [192] + dtype = "float32" + min_val = float("0.941929") + max_val = float("1.2385") + mean = float("1.04237") + std = float("0.0442231") + data = None + + +class Program_weight_tensor_parameter_92: + name = "parameter_92" + shape = [192] + dtype = "float32" + min_val = float("0.00587151") + max_val = float("0.0741055") + mean = float("0.0191318") + std = float("0.010681") + data = None + + +class Program_weight_tensor_parameter_93: + name = "parameter_93" + shape = [192] + dtype = "float32" + min_val = float("-0.148072") + max_val = float("0.0938185") + mean = float("-0.0237551") + std = float("0.0412601") + data = None + + +class Program_weight_tensor_parameter_94: + name = "parameter_94" + shape = [192, 192, 3, 3] + dtype = "float32" + min_val = float("-0.0454471") + max_val = float("0.0592261") + mean = float("-0.000107954") + std = float("0.00304373") + data = None + + +class Program_weight_tensor_parameter_95: + name = "parameter_95" + shape = [192] + dtype = "float32" + min_val = float("-0.307199") + max_val = float("0.0609234") + mean = float("-0.111087") + std = float("0.0621482") + data = None + + +class Program_weight_tensor_parameter_96: + name = "parameter_96" + shape = [192] + dtype = "float32" + min_val = float("0.918678") + max_val = float("1.21815") + mean = float("1.02816") + std = float("0.0566506") + data = None + + +class Program_weight_tensor_parameter_97: + name = "parameter_97" + shape = [192] + dtype = "float32" + min_val = float("0.0128713") + max_val = float("0.177337") + mean = float("0.0412506") + std = float("0.0235929") + data = None + + +class Program_weight_tensor_parameter_98: + name = "parameter_98" + shape = [192] + dtype = "float32" + min_val = float("-0.172019") + max_val = float("0.0591701") + mean = float("-0.0428226") + std = float("0.0398324") + data = None + + +class Program_weight_tensor_parameter_99: + name = "parameter_99" + shape = [192, 192, 3, 3] + dtype = "float32" + min_val = float("-0.0466526") + max_val = float("0.0663792") + mean = float("-0.000155177") + std = float("0.00347012") + data = None + + +class Program_weight_tensor_parameter_100: + name = "parameter_100" + shape = [192] + dtype = "float32" + min_val = float("-0.366754") + max_val = float("-0.0134222") + mean = float("-0.100255") + std = float("0.0548872") + data = None + + +class Program_weight_tensor_parameter_101: + name = "parameter_101" + shape = [192] + dtype = "float32" + min_val = float("0.868592") + max_val = float("1.06216") + mean = float("0.959936") + std = float("0.0231102") + data = None + + +class Program_weight_tensor_parameter_102: + name = "parameter_102" + shape = [192] + dtype = "float32" + min_val = float("0.00298303") + max_val = float("0.0244492") + mean = float("0.00883176") + std = float("0.00373019") + data = None + + +class Program_weight_tensor_parameter_103: + name = "parameter_103" + shape = [192] + dtype = "float32" + min_val = float("-0.072098") + max_val = float("0.042818") + mean = float("-0.0280475") + std = float("0.0244836") + data = None + + +class Program_weight_tensor_parameter_104: + name = "parameter_104" + shape = [192, 192, 1, 1] + dtype = "float32" + min_val = float("-0.0469252") + max_val = float("0.0444719") + mean = float("-0.00112462") + std = float("0.00488669") + data = None + + +class Program_weight_tensor_parameter_105: + name = "parameter_105" + shape = [192] + dtype = "float32" + min_val = float("-0.366754") + max_val = float("-0.0134222") + mean = float("-0.100255") + std = float("0.0548872") + data = None + + +class Program_weight_tensor_parameter_106: + name = "parameter_106" + shape = [192] + dtype = "float32" + min_val = float("0.935173") + max_val = float("1.20371") + mean = float("1.03374") + std = float("0.043616") + data = None + + +class Program_weight_tensor_parameter_107: + name = "parameter_107" + shape = [192] + dtype = "float32" + min_val = float("0.00859096") + max_val = float("0.107539") + mean = float("0.0237503") + std = float("0.0166029") + data = None + + +class Program_weight_tensor_parameter_108: + name = "parameter_108" + shape = [192] + dtype = "float32" + min_val = float("-0.162328") + max_val = float("0.123377") + mean = float("-0.0156432") + std = float("0.0485434") + data = None + + +class Program_weight_tensor_parameter_109: + name = "parameter_109" + shape = [192, 192, 3, 3] + dtype = "float32" + min_val = float("-0.0520373") + max_val = float("0.0633059") + mean = float("-6.95137e-05") + std = float("0.00322718") + data = None + + +class Program_weight_tensor_parameter_110: + name = "parameter_110" + shape = [192] + dtype = "float32" + min_val = float("-0.516767") + max_val = float("-0.0203543") + mean = float("-0.126034") + std = float("0.0638195") + data = None + + +class Program_weight_tensor_parameter_111: + name = "parameter_111" + shape = [192] + dtype = "float32" + min_val = float("0.849485") + max_val = float("1.2535") + mean = float("1.02927") + std = float("0.0639047") + data = None + + +class Program_weight_tensor_parameter_112: + name = "parameter_112" + shape = [192] + dtype = "float32" + min_val = float("0.0148703") + max_val = float("0.241855") + mean = float("0.0348458") + std = float("0.0220686") + data = None + + +class Program_weight_tensor_parameter_113: + name = "parameter_113" + shape = [192] + dtype = "float32" + min_val = float("-0.185102") + max_val = float("0.184245") + mean = float("-0.0439409") + std = float("0.0540873") + data = None + + +class Program_weight_tensor_parameter_114: + name = "parameter_114" + shape = [192, 192, 3, 3] + dtype = "float32" + min_val = float("-0.0395493") + max_val = float("0.0583323") + mean = float("-0.000157674") + std = float("0.00373071") + data = None + + +class Program_weight_tensor_parameter_115: + name = "parameter_115" + shape = [192] + dtype = "float32" + min_val = float("-0.2713") + max_val = float("0.0560855") + mean = float("-0.0865079") + std = float("0.0441199") + data = None + + +class Program_weight_tensor_parameter_116: + name = "parameter_116" + shape = [192] + dtype = "float32" + min_val = float("0.915361") + max_val = float("1.26769") + mean = float("1.02552") + std = float("0.0564528") + data = None + + +class Program_weight_tensor_parameter_117: + name = "parameter_117" + shape = [192] + dtype = "float32" + min_val = float("0.00711924") + max_val = float("0.0896093") + mean = float("0.0157688") + std = float("0.00827259") + data = None + + +class Program_weight_tensor_parameter_118: + name = "parameter_118" + shape = [192] + dtype = "float32" + min_val = float("-0.126929") + max_val = float("0.130507") + mean = float("-0.026764") + std = float("0.0351332") + data = None + + +class Program_weight_tensor_parameter_119: + name = "parameter_119" + shape = [192, 576, 1, 1] + dtype = "float32" + min_val = float("-0.0617836") + max_val = float("0.0625637") + mean = float("-0.000234352") + std = float("0.00557708") + data = None + + +class Program_weight_tensor_parameter_120: + name = "parameter_120" + shape = [192] + dtype = "float32" + min_val = float("-0.162411") + max_val = float("0.0343933") + mean = float("-0.0146927") + std = float("0.026659") + data = None + + +class Program_weight_tensor_parameter_121: + name = "parameter_121" + shape = [192] + dtype = "float32" + min_val = float("0.896164") + max_val = float("1.15119") + mean = float("1.0042") + std = float("0.0388476") + data = None + + +class Program_weight_tensor_parameter_122: + name = "parameter_122" + shape = [192] + dtype = "float32" + min_val = float("0.00389326") + max_val = float("0.0742727") + mean = float("0.0112922") + std = float("0.00784653") + data = None + + +class Program_weight_tensor_parameter_123: + name = "parameter_123" + shape = [192] + dtype = "float32" + min_val = float("-0.0913461") + max_val = float("0.0634663") + mean = float("-0.0244524") + std = float("0.0247669") + data = None + + +class Program_weight_tensor_parameter_124: + name = "parameter_124" + shape = [192, 576, 1, 1] + dtype = "float32" + min_val = float("-0.0566803") + max_val = float("0.0710687") + mean = float("-0.000235042") + std = float("0.00475399") + data = None + + +class Program_weight_tensor_parameter_125: + name = "parameter_125" + shape = [192] + dtype = "float32" + min_val = float("-0.14785") + max_val = float("0.00919369") + mean = float("-0.043826") + std = float("0.0292536") + data = None + + +class Program_weight_tensor_parameter_126: + name = "parameter_126" + shape = [192] + dtype = "float32" + min_val = float("0.94402") + max_val = float("1.18839") + mean = float("1.03601") + std = float("0.0346975") + data = None + + +class Program_weight_tensor_parameter_127: + name = "parameter_127" + shape = [192] + dtype = "float32" + min_val = float("0.00378721") + max_val = float("0.0744969") + mean = float("0.0185453") + std = float("0.0106259") + data = None + + +class Program_weight_tensor_parameter_128: + name = "parameter_128" + shape = [192] + dtype = "float32" + min_val = float("-0.529234") + max_val = float("0.400014") + mean = float("-0.0329347") + std = float("0.115644") + data = None + + +class Program_weight_tensor_parameter_129: + name = "parameter_129" + shape = [192, 192, 3, 3] + dtype = "float32" + min_val = float("-0.033215") + max_val = float("0.0441369") + mean = float("-4.11432e-05") + std = float("0.0028055") + data = None + + +class Program_weight_tensor_parameter_130: + name = "parameter_130" + shape = [192] + dtype = "float32" + min_val = float("-0.797229") + max_val = float("1.67957") + mean = float("0.205711") + std = float("0.338337") + data = None + + +class Program_weight_tensor_parameter_131: + name = "parameter_131" + shape = [192] + dtype = "float32" + min_val = float("0.583548") + max_val = float("1.53646") + mean = float("0.971373") + std = float("0.108279") + data = None + + +class Program_weight_tensor_parameter_132: + name = "parameter_132" + shape = [192] + dtype = "float32" + min_val = float("0.00968032") + max_val = float("0.326665") + mean = float("0.0366729") + std = float("0.0364719") + data = None + + +class Program_weight_tensor_parameter_133: + name = "parameter_133" + shape = [192] + dtype = "float32" + min_val = float("-0.384641") + max_val = float("0.143155") + mean = float("-0.0453675") + std = float("0.063487") + data = None + + +class Program_weight_tensor_parameter_134: + name = "parameter_134" + shape = [192, 192, 1, 1] + dtype = "float32" + min_val = float("-0.162906") + max_val = float("0.103755") + mean = float("-0.000761071") + std = float("0.0121564") + data = None + + +class Program_weight_tensor_parameter_135: + name = "parameter_135" + shape = [96] + dtype = "float32" + min_val = float("-0.282247") + max_val = float("0.281851") + mean = float("0.00710046") + std = float("0.11669") + data = None + + +class Program_weight_tensor_parameter_136: + name = "parameter_136" + shape = [96] + dtype = "float32" + min_val = float("0.77923") + max_val = float("1.27017") + mean = float("0.926291") + std = float("0.0711138") + data = None + + +class Program_weight_tensor_parameter_137: + name = "parameter_137" + shape = [96] + dtype = "float32" + min_val = float("0.00244381") + max_val = float("0.0367146") + mean = float("0.014305") + std = float("0.0077109") + data = None + + +class Program_weight_tensor_parameter_138: + name = "parameter_138" + shape = [96] + dtype = "float32" + min_val = float("-0.0509503") + max_val = float("0.0931656") + mean = float("-0.0103877") + std = float("0.0225298") + data = None + + +class Program_weight_tensor_parameter_139: + name = "parameter_139" + shape = [96, 96, 1, 1] + dtype = "float32" + min_val = float("-0.10638") + max_val = float("0.0638405") + mean = float("-0.00110258") + std = float("0.00883031") + data = None + + +class Program_weight_tensor_parameter_140: + name = "parameter_140" + shape = [96] + dtype = "float32" + min_val = float("-0.282247") + max_val = float("0.281851") + mean = float("0.00710046") + std = float("0.11669") + data = None + + +class Program_weight_tensor_parameter_141: + name = "parameter_141" + shape = [96] + dtype = "float32" + min_val = float("0.686595") + max_val = float("1.34871") + mean = float("1.04697") + std = float("0.0915013") + data = None + + +class Program_weight_tensor_parameter_142: + name = "parameter_142" + shape = [96] + dtype = "float32" + min_val = float("0.00849613") + max_val = float("0.0476423") + mean = float("0.0241855") + std = float("0.00965118") + data = None + + +class Program_weight_tensor_parameter_143: + name = "parameter_143" + shape = [96] + dtype = "float32" + min_val = float("-0.176894") + max_val = float("0.124313") + mean = float("-0.0161684") + std = float("0.0511988") + data = None + + +class Program_weight_tensor_parameter_144: + name = "parameter_144" + shape = [96, 96, 3, 3] + dtype = "float32" + min_val = float("-0.074123") + max_val = float("0.0992917") + mean = float("-0.000128278") + std = float("0.00660925") + data = None + + +class Program_weight_tensor_parameter_145: + name = "parameter_145" + shape = [96] + dtype = "float32" + min_val = float("-0.457141") + max_val = float("0.284175") + mean = float("-0.137858") + std = float("0.150133") + data = None + + +class Program_weight_tensor_parameter_146: + name = "parameter_146" + shape = [96] + dtype = "float32" + min_val = float("0.828432") + max_val = float("1.7552") + mean = float("1.0098") + std = float("0.141163") + data = None + + +class Program_weight_tensor_parameter_147: + name = "parameter_147" + shape = [96] + dtype = "float32" + min_val = float("0.0197657") + max_val = float("0.21109") + mean = float("0.0558951") + std = float("0.0328392") + data = None + + +class Program_weight_tensor_parameter_148: + name = "parameter_148" + shape = [96] + dtype = "float32" + min_val = float("-0.177509") + max_val = float("0.117139") + mean = float("-0.0272736") + std = float("0.0445487") + data = None + + +class Program_weight_tensor_parameter_149: + name = "parameter_149" + shape = [96, 96, 3, 3] + dtype = "float32" + min_val = float("-0.0848417") + max_val = float("0.0927675") + mean = float("-0.000388559") + std = float("0.00736544") + data = None + + +class Program_weight_tensor_parameter_150: + name = "parameter_150" + shape = [96] + dtype = "float32" + min_val = float("-0.394379") + max_val = float("0.00738925") + mean = float("-0.148925") + std = float("0.0838505") + data = None + + +class Program_weight_tensor_parameter_151: + name = "parameter_151" + shape = [96] + dtype = "float32" + min_val = float("0.717037") + max_val = float("0.995563") + mean = float("0.881515") + std = float("0.0561052") + data = None + + +class Program_weight_tensor_parameter_152: + name = "parameter_152" + shape = [96] + dtype = "float32" + min_val = float("0.00228925") + max_val = float("0.0352953") + mean = float("0.0132225") + std = float("0.00553136") + data = None + + +class Program_weight_tensor_parameter_153: + name = "parameter_153" + shape = [96] + dtype = "float32" + min_val = float("-0.0407708") + max_val = float("0.0474609") + mean = float("0.00908712") + std = float("0.0191192") + data = None + + +class Program_weight_tensor_parameter_154: + name = "parameter_154" + shape = [96, 96, 1, 1] + dtype = "float32" + min_val = float("-0.0741001") + max_val = float("0.0598664") + mean = float("-0.000306918") + std = float("0.00908011") + data = None + + +class Program_weight_tensor_parameter_155: + name = "parameter_155" + shape = [96] + dtype = "float32" + min_val = float("-0.394379") + max_val = float("0.00738925") + mean = float("-0.148925") + std = float("0.0838505") + data = None + + +class Program_weight_tensor_parameter_156: + name = "parameter_156" + shape = [96] + dtype = "float32" + min_val = float("0.805436") + max_val = float("1.22568") + mean = float("1.05162") + std = float("0.0896051") + data = None + + +class Program_weight_tensor_parameter_157: + name = "parameter_157" + shape = [96] + dtype = "float32" + min_val = float("0.00804892") + max_val = float("0.0701516") + mean = float("0.0242923") + std = float("0.0134998") + data = None + + +class Program_weight_tensor_parameter_158: + name = "parameter_158" + shape = [96] + dtype = "float32" + min_val = float("-0.12638") + max_val = float("0.0436454") + mean = float("-0.0376916") + std = float("0.0295355") + data = None + + +class Program_weight_tensor_parameter_159: + name = "parameter_159" + shape = [96, 96, 3, 3] + dtype = "float32" + min_val = float("-0.0809044") + max_val = float("0.126721") + mean = float("-0.00048597") + std = float("0.00701167") + data = None + + +class Program_weight_tensor_parameter_160: + name = "parameter_160" + shape = [96] + dtype = "float32" + min_val = float("-0.497069") + max_val = float("0.0662031") + mean = float("-0.208094") + std = float("0.125894") + data = None + + +class Program_weight_tensor_parameter_161: + name = "parameter_161" + shape = [96] + dtype = "float32" + min_val = float("0.70186") + max_val = float("1.54155") + mean = float("0.99652") + std = float("0.130955") + data = None + + +class Program_weight_tensor_parameter_162: + name = "parameter_162" + shape = [96] + dtype = "float32" + min_val = float("0.0257306") + max_val = float("0.191174") + mean = float("0.0583405") + std = float("0.0285068") + data = None + + +class Program_weight_tensor_parameter_163: + name = "parameter_163" + shape = [96] + dtype = "float32" + min_val = float("-0.124903") + max_val = float("0.0136205") + mean = float("-0.0566505") + std = float("0.0340341") + data = None + + +class Program_weight_tensor_parameter_164: + name = "parameter_164" + shape = [96, 96, 3, 3] + dtype = "float32" + min_val = float("-0.0825468") + max_val = float("0.0998638") + mean = float("-0.000585607") + std = float("0.00815449") + data = None + + +class Program_weight_tensor_parameter_165: + name = "parameter_165" + shape = [96] + dtype = "float32" + min_val = float("-0.488308") + max_val = float("0.0969995") + mean = float("-0.1903") + std = float("0.108395") + data = None + + +class Program_weight_tensor_parameter_166: + name = "parameter_166" + shape = [96] + dtype = "float32" + min_val = float("0.693424") + max_val = float("1.02499") + mean = float("0.878418") + std = float("0.0578356") + data = None + + +class Program_weight_tensor_parameter_167: + name = "parameter_167" + shape = [96] + dtype = "float32" + min_val = float("0.00563897") + max_val = float("0.0250253") + mean = float("0.0115188") + std = float("0.00399237") + data = None + + +class Program_weight_tensor_parameter_168: + name = "parameter_168" + shape = [96] + dtype = "float32" + min_val = float("-0.0555785") + max_val = float("0.0182289") + mean = float("-0.0150525") + std = float("0.0144486") + data = None + + +class Program_weight_tensor_parameter_169: + name = "parameter_169" + shape = [96, 96, 1, 1] + dtype = "float32" + min_val = float("-0.0682249") + max_val = float("0.0726027") + mean = float("-0.00190267") + std = float("0.0112706") + data = None + + +class Program_weight_tensor_parameter_170: + name = "parameter_170" + shape = [96] + dtype = "float32" + min_val = float("-0.488308") + max_val = float("0.0969995") + mean = float("-0.1903") + std = float("0.108395") + data = None + + +class Program_weight_tensor_parameter_171: + name = "parameter_171" + shape = [96] + dtype = "float32" + min_val = float("0.64497") + max_val = float("1.26041") + mean = float("1.02347") + std = float("0.096841") + data = None + + +class Program_weight_tensor_parameter_172: + name = "parameter_172" + shape = [96] + dtype = "float32" + min_val = float("0.0135838") + max_val = float("0.108919") + mean = float("0.0303804") + std = float("0.0148297") + data = None + + +class Program_weight_tensor_parameter_173: + name = "parameter_173" + shape = [96] + dtype = "float32" + min_val = float("-0.0953459") + max_val = float("0.045938") + mean = float("-0.0243831") + std = float("0.0289356") + data = None + + +class Program_weight_tensor_parameter_174: + name = "parameter_174" + shape = [96, 96, 3, 3] + dtype = "float32" + min_val = float("-0.0690141") + max_val = float("0.0907724") + mean = float("-0.000336498") + std = float("0.00782096") + data = None + + +class Program_weight_tensor_parameter_175: + name = "parameter_175" + shape = [96] + dtype = "float32" + min_val = float("-0.751265") + max_val = float("0.0462032") + mean = float("-0.246976") + std = float("0.15091") + data = None + + +class Program_weight_tensor_parameter_176: + name = "parameter_176" + shape = [96] + dtype = "float32" + min_val = float("0.715124") + max_val = float("1.24904") + mean = float("0.989016") + std = float("0.0990378") + data = None + + +class Program_weight_tensor_parameter_177: + name = "parameter_177" + shape = [96] + dtype = "float32" + min_val = float("0.019195") + max_val = float("0.100826") + mean = float("0.0410936") + std = float("0.0178574") + data = None + + +class Program_weight_tensor_parameter_178: + name = "parameter_178" + shape = [96] + dtype = "float32" + min_val = float("-0.191045") + max_val = float("0.167209") + mean = float("-0.0509906") + std = float("0.0585368") + data = None + + +class Program_weight_tensor_parameter_179: + name = "parameter_179" + shape = [96, 96, 3, 3] + dtype = "float32" + min_val = float("-0.109709") + max_val = float("0.102873") + mean = float("-0.000302796") + std = float("0.00950446") + data = None + + +class Program_weight_tensor_parameter_180: + name = "parameter_180" + shape = [96] + dtype = "float32" + min_val = float("-0.682344") + max_val = float("0.560615") + mean = float("-0.183267") + std = float("0.262805") + data = None + + +class Program_weight_tensor_parameter_181: + name = "parameter_181" + shape = [96] + dtype = "float32" + min_val = float("0.650671") + max_val = float("1.28138") + mean = float("0.926136") + std = float("0.127332") + data = None + + +class Program_weight_tensor_parameter_182: + name = "parameter_182" + shape = [96] + dtype = "float32" + min_val = float("0.0166935") + max_val = float("0.0740427") + mean = float("0.0351758") + std = float("0.0134938") + data = None + + +class Program_weight_tensor_parameter_183: + name = "parameter_183" + shape = [96] + dtype = "float32" + min_val = float("-0.139363") + max_val = float("0.124573") + mean = float("-0.0021096") + std = float("0.0513469") + data = None + + +class Program_weight_tensor_parameter_184: + name = "parameter_184" + shape = [96, 448, 1, 1] + dtype = "float32" + min_val = float("-0.176164") + max_val = float("0.183176") + mean = float("-0.000547214") + std = float("0.0128837") + data = None + + +class Program_weight_tensor_parameter_185: + name = "parameter_185" + shape = [96] + dtype = "float32" + min_val = float("-0.163476") + max_val = float("0.175629") + mean = float("0.0382556") + std = float("0.0657315") + data = None + + +class Program_weight_tensor_parameter_186: + name = "parameter_186" + shape = [96] + dtype = "float32" + min_val = float("0.668729") + max_val = float("1.38231") + mean = float("0.957819") + std = float("0.134079") + data = None + + +class Program_weight_tensor_parameter_187: + name = "parameter_187" + shape = [96] + dtype = "float32" + min_val = float("0.0053491") + max_val = float("0.0408792") + mean = float("0.0111961") + std = float("0.00625857") + data = None + + +class Program_weight_tensor_parameter_188: + name = "parameter_188" + shape = [96] + dtype = "float32" + min_val = float("-0.0998904") + max_val = float("0.112465") + mean = float("-0.0129474") + std = float("0.0345809") + data = None + + +class Program_weight_tensor_parameter_189: + name = "parameter_189" + shape = [96, 448, 1, 1] + dtype = "float32" + min_val = float("-0.10245") + max_val = float("0.143631") + mean = float("-0.000321447") + std = float("0.0083028") + data = None + + +class Program_weight_tensor_parameter_190: + name = "parameter_190" + shape = [192] + dtype = "float32" + min_val = float("-0.339438") + max_val = float("0.0668367") + mean = float("-0.126421") + std = float("0.0576214") + data = None + + +class Program_weight_tensor_parameter_191: + name = "parameter_191" + shape = [192] + dtype = "float32" + min_val = float("0.709181") + max_val = float("1.31558") + mean = float("0.850576") + std = float("0.0685498") + data = None + + +class Program_weight_tensor_parameter_192: + name = "parameter_192" + shape = [192] + dtype = "float32" + min_val = float("0.00982368") + max_val = float("0.120733") + mean = float("0.0222883") + std = float("0.0126702") + data = None + + +class Program_weight_tensor_parameter_193: + name = "parameter_193" + shape = [192] + dtype = "float32" + min_val = float("-0.158372") + max_val = float("0.119076") + mean = float("-0.0375286") + std = float("0.0397228") + data = None + + +class Program_weight_tensor_parameter_194: + name = "parameter_194" + shape = [192, 384, 1, 1] + dtype = "float32" + min_val = float("-0.0618403") + max_val = float("0.087686") + mean = float("-0.000670588") + std = float("0.00876173") + data = None + + +class Program_weight_tensor_parameter_195: + name = "parameter_195" + shape = [384] + dtype = "float32" + min_val = float("-0.246737") + max_val = float("0.0626357") + mean = float("-0.104556") + std = float("0.0398377") + data = None + + +class Program_weight_tensor_parameter_196: + name = "parameter_196" + shape = [384] + dtype = "float32" + min_val = float("0.86139") + max_val = float("1.36552") + mean = float("1.03591") + std = float("0.0538437") + data = None + + +class Program_weight_tensor_parameter_197: + name = "parameter_197" + shape = [384] + dtype = "float32" + min_val = float("0.0112363") + max_val = float("0.181727") + mean = float("0.0226795") + std = float("0.0127742") + data = None + + +class Program_weight_tensor_parameter_198: + name = "parameter_198" + shape = [384] + dtype = "float32" + min_val = float("-0.257774") + max_val = float("0.11719") + mean = float("-0.0706271") + std = float("0.0443703") + data = None + + +class Program_weight_tensor_parameter_199: + name = "parameter_199" + shape = [384, 384, 1, 1] + dtype = "float32" + min_val = float("-0.0766884") + max_val = float("0.0811282") + mean = float("-0.000905669") + std = float("0.00825814") + data = None + + +class Program_weight_tensor_parameter_200: + name = "parameter_200" + shape = [192] + dtype = "float32" + min_val = float("-0.163787") + max_val = float("-0.0178854") + mean = float("-0.0640593") + std = float("0.0247121") + data = None + + +class Program_weight_tensor_parameter_201: + name = "parameter_201" + shape = [192] + dtype = "float32" + min_val = float("0.863464") + max_val = float("1.00313") + mean = float("0.954504") + std = float("0.0217909") + data = None + + +class Program_weight_tensor_parameter_202: + name = "parameter_202" + shape = [192] + dtype = "float32" + min_val = float("0.00426603") + max_val = float("0.0274319") + mean = float("0.00904264") + std = float("0.00343389") + data = None + + +class Program_weight_tensor_parameter_203: + name = "parameter_203" + shape = [192] + dtype = "float32" + min_val = float("-0.128303") + max_val = float("0.0856769") + mean = float("-0.0260934") + std = float("0.0402736") + data = None + + +class Program_weight_tensor_parameter_204: + name = "parameter_204" + shape = [192, 192, 1, 1] + dtype = "float32" + min_val = float("-0.0361114") + max_val = float("0.0350956") + mean = float("-0.000748032") + std = float("0.00552871") + data = None + + +class Program_weight_tensor_parameter_205: + name = "parameter_205" + shape = [192] + dtype = "float32" + min_val = float("-0.163787") + max_val = float("-0.0178854") + mean = float("-0.0640593") + std = float("0.0247121") + data = None + + +class Program_weight_tensor_parameter_206: + name = "parameter_206" + shape = [192] + dtype = "float32" + min_val = float("0.931794") + max_val = float("1.03438") + mean = float("0.986242") + std = float("0.0213097") + data = None + + +class Program_weight_tensor_parameter_207: + name = "parameter_207" + shape = [192] + dtype = "float32" + min_val = float("0.0162213") + max_val = float("0.15521") + mean = float("0.0388931") + std = float("0.0172201") + data = None + + +class Program_weight_tensor_parameter_208: + name = "parameter_208" + shape = [192] + dtype = "float32" + min_val = float("-0.27218") + max_val = float("0.153326") + mean = float("-0.0421112") + std = float("0.072068") + data = None + + +class Program_weight_tensor_parameter_209: + name = "parameter_209" + shape = [192, 192, 3, 3] + dtype = "float32" + min_val = float("-0.0286582") + max_val = float("0.0402364") + mean = float("-0.000141068") + std = float("0.00300371") + data = None + + +class Program_weight_tensor_parameter_210: + name = "parameter_210" + shape = [192] + dtype = "float32" + min_val = float("-0.26148") + max_val = float("0.0033202") + mean = float("-0.0610375") + std = float("0.0375229") + data = None + + +class Program_weight_tensor_parameter_211: + name = "parameter_211" + shape = [192] + dtype = "float32" + min_val = float("0.955773") + max_val = float("1.12848") + mean = float("1.0249") + std = float("0.0296608") + data = None + + +class Program_weight_tensor_parameter_212: + name = "parameter_212" + shape = [192] + dtype = "float32" + min_val = float("0.0478933") + max_val = float("0.390037") + mean = float("0.10908") + std = float("0.0501276") + data = None + + +class Program_weight_tensor_parameter_213: + name = "parameter_213" + shape = [192] + dtype = "float32" + min_val = float("-0.460456") + max_val = float("0.58138") + mean = float("-0.0865142") + std = float("0.149413") + data = None + + +class Program_weight_tensor_parameter_214: + name = "parameter_214" + shape = [192, 192, 3, 3] + dtype = "float32" + min_val = float("-0.0310591") + max_val = float("0.0529931") + mean = float("-0.000143811") + std = float("0.00358108") + data = None + + +class Program_weight_tensor_parameter_215: + name = "parameter_215" + shape = [192] + dtype = "float32" + min_val = float("-0.141458") + max_val = float("-0.0079347") + mean = float("-0.0529687") + std = float("0.0249688") + data = None + + +class Program_weight_tensor_parameter_216: + name = "parameter_216" + shape = [192] + dtype = "float32" + min_val = float("0.959304") + max_val = float("1.04419") + mean = float("0.9895") + std = float("0.0122273") + data = None + + +class Program_weight_tensor_parameter_217: + name = "parameter_217" + shape = [192] + dtype = "float32" + min_val = float("0.00246453") + max_val = float("0.0136491") + mean = float("0.00528701") + std = float("0.00174133") + data = None + + +class Program_weight_tensor_parameter_218: + name = "parameter_218" + shape = [192] + dtype = "float32" + min_val = float("-0.0640118") + max_val = float("0.0474594") + mean = float("-0.0144983") + std = float("0.0208838") + data = None + + +class Program_weight_tensor_parameter_219: + name = "parameter_219" + shape = [192, 192, 1, 1] + dtype = "float32" + min_val = float("-0.031823") + max_val = float("0.0528765") + mean = float("-0.000451702") + std = float("0.00561687") + data = None + + +class Program_weight_tensor_parameter_220: + name = "parameter_220" + shape = [192] + dtype = "float32" + min_val = float("-0.141458") + max_val = float("-0.0079347") + mean = float("-0.0529687") + std = float("0.0249688") + data = None + + +class Program_weight_tensor_parameter_221: + name = "parameter_221" + shape = [192] + dtype = "float32" + min_val = float("0.975086") + max_val = float("1.08421") + mean = float("1.01035") + std = float("0.0198136") + data = None + + +class Program_weight_tensor_parameter_222: + name = "parameter_222" + shape = [192] + dtype = "float32" + min_val = float("0.0102") + max_val = float("0.0531645") + mean = float("0.0191559") + std = float("0.0068919") + data = None + + +class Program_weight_tensor_parameter_223: + name = "parameter_223" + shape = [192] + dtype = "float32" + min_val = float("-0.158448") + max_val = float("0.0748376") + mean = float("-0.0355524") + std = float("0.0377289") + data = None + + +class Program_weight_tensor_parameter_224: + name = "parameter_224" + shape = [192, 192, 3, 3] + dtype = "float32" + min_val = float("-0.0272448") + max_val = float("0.0453462") + mean = float("-0.000119593") + std = float("0.00283721") + data = None + + +class Program_weight_tensor_parameter_225: + name = "parameter_225" + shape = [192] + dtype = "float32" + min_val = float("-0.176533") + max_val = float("-0.0140918") + mean = float("-0.0627926") + std = float("0.0255225") + data = None + + +class Program_weight_tensor_parameter_226: + name = "parameter_226" + shape = [192] + dtype = "float32" + min_val = float("0.959453") + max_val = float("1.17829") + mean = float("1.01679") + std = float("0.0291818") + data = None + + +class Program_weight_tensor_parameter_227: + name = "parameter_227" + shape = [192] + dtype = "float32" + min_val = float("0.049348") + max_val = float("0.418024") + mean = float("0.115489") + std = float("0.0507256") + data = None + + +class Program_weight_tensor_parameter_228: + name = "parameter_228" + shape = [192] + dtype = "float32" + min_val = float("-0.807392") + max_val = float("0.359917") + mean = float("-0.202071") + std = float("0.159679") + data = None + + +class Program_weight_tensor_parameter_229: + name = "parameter_229" + shape = [192, 192, 3, 3] + dtype = "float32" + min_val = float("-0.0318035") + max_val = float("0.0525248") + mean = float("-0.000289932") + std = float("0.00370175") + data = None + + +class Program_weight_tensor_parameter_230: + name = "parameter_230" + shape = [192] + dtype = "float32" + min_val = float("-0.101496") + max_val = float("0.00229944") + mean = float("-0.0432151") + std = float("0.017968") + data = None + + +class Program_weight_tensor_parameter_231: + name = "parameter_231" + shape = [192] + dtype = "float32" + min_val = float("0.948209") + max_val = float("1.06033") + mean = float("0.999231") + std = float("0.0183132") + data = None + + +class Program_weight_tensor_parameter_232: + name = "parameter_232" + shape = [192] + dtype = "float32" + min_val = float("0.00265606") + max_val = float("0.0159575") + mean = float("0.00530049") + std = float("0.0019803") + data = None + + +class Program_weight_tensor_parameter_233: + name = "parameter_233" + shape = [192] + dtype = "float32" + min_val = float("-0.0698203") + max_val = float("0.09676") + mean = float("-0.0136201") + std = float("0.0227753") + data = None + + +class Program_weight_tensor_parameter_234: + name = "parameter_234" + shape = [192, 192, 1, 1] + dtype = "float32" + min_val = float("-0.0267064") + max_val = float("0.0456144") + mean = float("-0.000432224") + std = float("0.0062217") + data = None + + +class Program_weight_tensor_parameter_235: + name = "parameter_235" + shape = [192] + dtype = "float32" + min_val = float("-0.101496") + max_val = float("0.00229944") + mean = float("-0.0432151") + std = float("0.017968") + data = None + + +class Program_weight_tensor_parameter_236: + name = "parameter_236" + shape = [192] + dtype = "float32" + min_val = float("0.965303") + max_val = float("1.08528") + mean = float("1.00108") + std = float("0.0207208") + data = None + + +class Program_weight_tensor_parameter_237: + name = "parameter_237" + shape = [192] + dtype = "float32" + min_val = float("0.00866238") + max_val = float("0.0846476") + mean = float("0.0232237") + std = float("0.0107339") + data = None + + +class Program_weight_tensor_parameter_238: + name = "parameter_238" + shape = [192] + dtype = "float32" + min_val = float("-0.172128") + max_val = float("0.121261") + mean = float("-0.0385422") + std = float("0.048641") + data = None + + +class Program_weight_tensor_parameter_239: + name = "parameter_239" + shape = [192, 192, 3, 3] + dtype = "float32" + min_val = float("-0.0216114") + max_val = float("0.0354893") + mean = float("-0.00013956") + std = float("0.00290758") + data = None + + +class Program_weight_tensor_parameter_240: + name = "parameter_240" + shape = [192] + dtype = "float32" + min_val = float("-0.204286") + max_val = float("-0.0144314") + mean = float("-0.0931084") + std = float("0.0313584") + data = None + + +class Program_weight_tensor_parameter_241: + name = "parameter_241" + shape = [192] + dtype = "float32" + min_val = float("0.924946") + max_val = float("1.13197") + mean = float("1.03248") + std = float("0.033681") + data = None + + +class Program_weight_tensor_parameter_242: + name = "parameter_242" + shape = [192] + dtype = "float32" + min_val = float("0.0171774") + max_val = float("0.151451") + mean = float("0.0404337") + std = float("0.0194303") + data = None + + +class Program_weight_tensor_parameter_243: + name = "parameter_243" + shape = [192] + dtype = "float32" + min_val = float("-0.214493") + max_val = float("0.18665") + mean = float("-0.031976") + std = float("0.0594646") + data = None + + +class Program_weight_tensor_parameter_244: + name = "parameter_244" + shape = [192, 192, 3, 3] + dtype = "float32" + min_val = float("-0.0401705") + max_val = float("0.0541615") + mean = float("-0.000152877") + std = float("0.00458084") + data = None + + +class Program_weight_tensor_parameter_245: + name = "parameter_245" + shape = [192] + dtype = "float32" + min_val = float("-0.283235") + max_val = float("-0.0399838") + mean = float("-0.14066") + std = float("0.0427785") + data = None + + +class Program_weight_tensor_parameter_246: + name = "parameter_246" + shape = [192] + dtype = "float32" + min_val = float("0.926396") + max_val = float("1.23818") + mean = float("1.04777") + std = float("0.044691") + data = None + + +class Program_weight_tensor_parameter_247: + name = "parameter_247" + shape = [192] + dtype = "float32" + min_val = float("0.00760291") + max_val = float("0.0292301") + mean = float("0.0135064") + std = float("0.00386928") + data = None + + +class Program_weight_tensor_parameter_248: + name = "parameter_248" + shape = [192] + dtype = "float32" + min_val = float("-0.082307") + max_val = float("0.240184") + mean = float("0.034674") + std = float("0.0405637") + data = None + + +class Program_weight_tensor_parameter_249: + name = "parameter_249" + shape = [192, 896, 1, 1] + dtype = "float32" + min_val = float("-0.0633244") + max_val = float("0.0929278") + mean = float("-0.000269954") + std = float("0.00689158") + data = None + + +class Program_weight_tensor_parameter_250: + name = "parameter_250" + shape = [192] + dtype = "float32" + min_val = float("-0.259346") + max_val = float("0.089777") + mean = float("-0.101264") + std = float("0.0626651") + data = None + + +class Program_weight_tensor_parameter_251: + name = "parameter_251" + shape = [192] + dtype = "float32" + min_val = float("0.942808") + max_val = float("1.43592") + mean = float("1.10635") + std = float("0.0692502") + data = None + + +class Program_weight_tensor_parameter_252: + name = "parameter_252" + shape = [192] + dtype = "float32" + min_val = float("0.00933576") + max_val = float("0.0689556") + mean = float("0.0172674") + std = float("0.00566401") + data = None + + +class Program_weight_tensor_parameter_253: + name = "parameter_253" + shape = [192] + dtype = "float32" + min_val = float("-0.0892463") + max_val = float("0.100943") + mean = float("0.0114547") + std = float("0.0327796") + data = None + + +class Program_weight_tensor_parameter_254: + name = "parameter_254" + shape = [192, 896, 1, 1] + dtype = "float32" + min_val = float("-0.0570768") + max_val = float("0.156442") + mean = float("-0.000259374") + std = float("0.0074107") + data = None + + +class Program_weight_tensor_parameter_255: + name = "parameter_255" + shape = [384] + dtype = "float32" + min_val = float("-0.294876") + max_val = float("-0.0604914") + mean = float("-0.156798") + std = float("0.0456771") + data = None + + +class Program_weight_tensor_parameter_256: + name = "parameter_256" + shape = [384] + dtype = "float32" + min_val = float("0.724694") + max_val = float("1.03284") + mean = float("0.862991") + std = float("0.0407095") + data = None + + +class Program_weight_tensor_parameter_257: + name = "parameter_257" + shape = [384] + dtype = "float32" + min_val = float("0.0113886") + max_val = float("0.0802941") + mean = float("0.0276917") + std = float("0.0102752") + data = None + + +class Program_weight_tensor_parameter_258: + name = "parameter_258" + shape = [384] + dtype = "float32" + min_val = float("-0.162031") + max_val = float("0.204487") + mean = float("-0.0755192") + std = float("0.0459278") + data = None + + +class Program_weight_tensor_parameter_259: + name = "parameter_259" + shape = [384, 768, 1, 1] + dtype = "float32" + min_val = float("-0.0292396") + max_val = float("0.0392698") + mean = float("-0.000553119") + std = float("0.00501583") + data = None + + +class Program_weight_tensor_parameter_260: + name = "parameter_260" + shape = [768] + dtype = "float32" + min_val = float("-0.119028") + max_val = float("0.0118783") + mean = float("-0.0644734") + std = float("0.0183049") + data = None + + +class Program_weight_tensor_parameter_261: + name = "parameter_261" + shape = [768] + dtype = "float32" + min_val = float("0.943984") + max_val = float("1.15965") + mean = float("1.02714") + std = float("0.0265502") + data = None + + +class Program_weight_tensor_parameter_262: + name = "parameter_262" + shape = [768] + dtype = "float32" + min_val = float("0.00981212") + max_val = float("0.0919141") + mean = float("0.019193") + std = float("0.00623331") + data = None + + +class Program_weight_tensor_parameter_263: + name = "parameter_263" + shape = [768] + dtype = "float32" + min_val = float("-0.163952") + max_val = float("0.165282") + mean = float("-0.0496244") + std = float("0.0363468") + data = None + + +class Program_weight_tensor_parameter_264: + name = "parameter_264" + shape = [768, 768, 1, 1] + dtype = "float32" + min_val = float("-0.0555945") + max_val = float("0.0656988") + mean = float("-0.000287705") + std = float("0.00431166") + data = None + + +class Program_weight_tensor_parameter_265: + name = "parameter_265" + shape = [384] + dtype = "float32" + min_val = float("-0.185724") + max_val = float("0.101576") + mean = float("-0.0493899") + std = float("0.0232288") + data = None + + +class Program_weight_tensor_parameter_266: + name = "parameter_266" + shape = [384] + dtype = "float32" + min_val = float("0.895849") + max_val = float("1.01586") + mean = float("0.977906") + std = float("0.0132131") + data = None + + +class Program_weight_tensor_parameter_267: + name = "parameter_267" + shape = [384] + dtype = "float32" + min_val = float("0.00471585") + max_val = float("0.0485148") + mean = float("0.0128933") + std = float("0.00549912") + data = None + + +class Program_weight_tensor_parameter_268: + name = "parameter_268" + shape = [384] + dtype = "float32" + min_val = float("-0.0800717") + max_val = float("0.0775817") + mean = float("-0.0161284") + std = float("0.027053") + data = None + + +class Program_weight_tensor_parameter_269: + name = "parameter_269" + shape = [384, 384, 1, 1] + dtype = "float32" + min_val = float("-0.0257666") + max_val = float("0.0430036") + mean = float("-0.000213548") + std = float("0.0035453") + data = None + + +class Program_weight_tensor_parameter_270: + name = "parameter_270" + shape = [384] + dtype = "float32" + min_val = float("-0.185724") + max_val = float("0.101576") + mean = float("-0.0493899") + std = float("0.0232288") + data = None + + +class Program_weight_tensor_parameter_271: + name = "parameter_271" + shape = [384] + dtype = "float32" + min_val = float("0.894262") + max_val = float("1.09303") + mean = float("0.980357") + std = float("0.0138837") + data = None + + +class Program_weight_tensor_parameter_272: + name = "parameter_272" + shape = [384] + dtype = "float32" + min_val = float("0.0282483") + max_val = float("0.349296") + mean = float("0.0853497") + std = float("0.0378846") + data = None + + +class Program_weight_tensor_parameter_273: + name = "parameter_273" + shape = [384] + dtype = "float32" + min_val = float("-0.301014") + max_val = float("0.153614") + mean = float("-0.0944117") + std = float("0.0767887") + data = None + + +class Program_weight_tensor_parameter_274: + name = "parameter_274" + shape = [384, 384, 3, 3] + dtype = "float32" + min_val = float("-0.0401543") + max_val = float("0.0468426") + mean = float("-0.000144951") + std = float("0.00128804") + data = None + + +class Program_weight_tensor_parameter_275: + name = "parameter_275" + shape = [384] + dtype = "float32" + min_val = float("-0.0960581") + max_val = float("0.159253") + mean = float("-0.016533") + std = float("0.0220565") + data = None + + +class Program_weight_tensor_parameter_276: + name = "parameter_276" + shape = [384] + dtype = "float32" + min_val = float("0.948727") + max_val = float("1.24141") + mean = float("1.01995") + std = float("0.03235") + data = None + + +class Program_weight_tensor_parameter_277: + name = "parameter_277" + shape = [384] + dtype = "float32" + min_val = float("0.0143644") + max_val = float("0.251509") + mean = float("0.0688272") + std = float("0.0333456") + data = None + + +class Program_weight_tensor_parameter_278: + name = "parameter_278" + shape = [384] + dtype = "float32" + min_val = float("-0.226292") + max_val = float("0.268143") + mean = float("-0.0368348") + std = float("0.087958") + data = None + + +class Program_weight_tensor_parameter_279: + name = "parameter_279" + shape = [384, 384, 3, 3] + dtype = "float32" + min_val = float("-0.0312562") + max_val = float("0.0430391") + mean = float("-5.96221e-05") + std = float("0.00162276") + data = None + + +class Program_weight_tensor_parameter_280: + name = "parameter_280" + shape = [384] + dtype = "float32" + min_val = float("-0.1174") + max_val = float("0.0562049") + mean = float("-0.0226758") + std = float("0.0171998") + data = None + + +class Program_weight_tensor_parameter_281: + name = "parameter_281" + shape = [384] + dtype = "float32" + min_val = float("0.959883") + max_val = float("1.14924") + mean = float("1.02097") + std = float("0.0314417") + data = None + + +class Program_weight_tensor_parameter_282: + name = "parameter_282" + shape = [384] + dtype = "float32" + min_val = float("0.0684558") + max_val = float("0.632888") + mean = float("0.21699") + std = float("0.0818797") + data = None + + +class Program_weight_tensor_parameter_283: + name = "parameter_283" + shape = [384] + dtype = "float32" + min_val = float("-2.54336") + max_val = float("2.10932") + mean = float("-0.081684") + std = float("0.72804") + data = None + + +class Program_weight_tensor_parameter_284: + name = "parameter_284" + shape = [384, 1536, 1, 1] + dtype = "float32" + min_val = float("-0.0302692") + max_val = float("0.0501981") + mean = float("3.98672e-05") + std = float("0.00292002") + data = None + + +class Program_weight_tensor_parameter_285: + name = "parameter_285" + shape = [384] + dtype = "float32" + min_val = float("-0.0227048") + max_val = float("0.0393072") + mean = float("-0.00516438") + std = float("0.00791319") + data = None + + +class Program_weight_tensor_parameter_286: + name = "parameter_286" + shape = [384] + dtype = "float32" + min_val = float("0.954966") + max_val = float("1.13128") + mean = float("0.992945") + std = float("0.0198847") + data = None + + +class Program_weight_tensor_parameter_287: + name = "parameter_287" + shape = [384] + dtype = "float32" + min_val = float("0.00285281") + max_val = float("0.0151266") + mean = float("0.00653893") + std = float("0.00215647") + data = None + + +class Program_weight_tensor_parameter_288: + name = "parameter_288" + shape = [384] + dtype = "float32" + min_val = float("-0.103457") + max_val = float("0.0644516") + mean = float("-0.0399251") + std = float("0.0284929") + data = None + + +class Program_weight_tensor_parameter_289: + name = "parameter_289" + shape = [384, 384, 1, 1] + dtype = "float32" + min_val = float("-0.0241085") + max_val = float("0.0314178") + mean = float("-0.000494811") + std = float("0.00322186") + data = None + + +class Program_weight_tensor_parameter_290: + name = "parameter_290" + shape = [384] + dtype = "float32" + min_val = float("-0.0227048") + max_val = float("0.0393072") + mean = float("-0.00516438") + std = float("0.00791319") + data = None + + +class Program_weight_tensor_parameter_291: + name = "parameter_291" + shape = [384] + dtype = "float32" + min_val = float("0.942703") + max_val = float("1.15329") + mean = float("1.00343") + std = float("0.0304117") + data = None + + +class Program_weight_tensor_parameter_292: + name = "parameter_292" + shape = [384] + dtype = "float32" + min_val = float("0.0131764") + max_val = float("0.0914844") + mean = float("0.038932") + std = float("0.0152216") + data = None + + +class Program_weight_tensor_parameter_293: + name = "parameter_293" + shape = [384] + dtype = "float32" + min_val = float("-0.27583") + max_val = float("0.129426") + mean = float("-0.115384") + std = float("0.0654124") + data = None + + +class Program_weight_tensor_parameter_294: + name = "parameter_294" + shape = [384, 384, 3, 3] + dtype = "float32" + min_val = float("-0.00991428") + max_val = float("0.0235268") + mean = float("-0.000178382") + std = float("0.00124102") + data = None + + +class Program_weight_tensor_parameter_295: + name = "parameter_295" + shape = [384] + dtype = "float32" + min_val = float("-0.0484464") + max_val = float("0.0216319") + mean = float("-0.00460097") + std = float("0.00967076") + data = None + + +class Program_weight_tensor_parameter_296: + name = "parameter_296" + shape = [384] + dtype = "float32" + min_val = float("0.963084") + max_val = float("1.21833") + mean = float("1.01835") + std = float("0.0242792") + data = None + + +class Program_weight_tensor_parameter_297: + name = "parameter_297" + shape = [384] + dtype = "float32" + min_val = float("0.0846965") + max_val = float("0.477757") + mean = float("0.218773") + std = float("0.0720263") + data = None + + +class Program_weight_tensor_parameter_298: + name = "parameter_298" + shape = [384] + dtype = "float32" + min_val = float("-1.09295") + max_val = float("1.28986") + mean = float("-0.341772") + std = float("0.305414") + data = None + + +class Program_weight_tensor_parameter_299: + name = "parameter_299" + shape = [384, 384, 3, 3] + dtype = "float32" + min_val = float("-0.0119066") + max_val = float("0.018937") + mean = float("-0.000190711") + std = float("0.00143766") + data = None + + +class Program_weight_tensor_parameter_300: + name = "parameter_300" + shape = [384] + dtype = "float32" + min_val = float("-0.0328709") + max_val = float("0.0296847") + mean = float("0.00167559") + std = float("0.0101838") + data = None + + +class Program_weight_tensor_parameter_301: + name = "parameter_301" + shape = [384] + dtype = "float32" + min_val = float("0.982379") + max_val = float("1.06457") + mean = float("1.00463") + std = float("0.00875427") + data = None + + +class Program_weight_tensor_parameter_302: + name = "parameter_302" + shape = [384] + dtype = "float32" + min_val = float("0.00182393") + max_val = float("0.0117111") + mean = float("0.00446161") + std = float("0.00149495") + data = None + + +class Program_weight_tensor_parameter_303: + name = "parameter_303" + shape = [384] + dtype = "float32" + min_val = float("-0.0690861") + max_val = float("0.16253") + mean = float("-0.0277446") + std = float("0.0287758") + data = None + + +class Program_weight_tensor_parameter_304: + name = "parameter_304" + shape = [384, 384, 1, 1] + dtype = "float32" + min_val = float("-0.0149167") + max_val = float("0.024544") + mean = float("-0.00035583") + std = float("0.00252494") + data = None + + +class Program_weight_tensor_parameter_305: + name = "parameter_305" + shape = [384] + dtype = "float32" + min_val = float("-0.0328709") + max_val = float("0.0296847") + mean = float("0.00167559") + std = float("0.0101838") + data = None + + +class Program_weight_tensor_parameter_306: + name = "parameter_306" + shape = [384] + dtype = "float32" + min_val = float("0.972532") + max_val = float("1.06753") + mean = float("1.00429") + std = float("0.0130159") + data = None + + +class Program_weight_tensor_parameter_307: + name = "parameter_307" + shape = [384] + dtype = "float32" + min_val = float("0.00916589") + max_val = float("0.0757382") + mean = float("0.026734") + std = float("0.00932033") + data = None + + +class Program_weight_tensor_parameter_308: + name = "parameter_308" + shape = [384] + dtype = "float32" + min_val = float("-0.212158") + max_val = float("0.398515") + mean = float("-0.0802837") + std = float("0.079264") + data = None + + +class Program_weight_tensor_parameter_309: + name = "parameter_309" + shape = [384, 384, 3, 3] + dtype = "float32" + min_val = float("-0.0093195") + max_val = float("0.0136616") + mean = float("-0.000127923") + std = float("0.000958249") + data = None + + +class Program_weight_tensor_parameter_310: + name = "parameter_310" + shape = [384] + dtype = "float32" + min_val = float("-0.0472265") + max_val = float("0.0147404") + mean = float("-0.0123072") + std = float("0.0107235") + data = None + + +class Program_weight_tensor_parameter_311: + name = "parameter_311" + shape = [384] + dtype = "float32" + min_val = float("0.97566") + max_val = float("1.10735") + mean = float("1.01405") + std = float("0.0167737") + data = None + + +class Program_weight_tensor_parameter_312: + name = "parameter_312" + shape = [384] + dtype = "float32" + min_val = float("0.0120215") + max_val = float("0.0818828") + mean = float("0.0302745") + std = float("0.00902368") + data = None + + +class Program_weight_tensor_parameter_313: + name = "parameter_313" + shape = [384] + dtype = "float32" + min_val = float("-0.144457") + max_val = float("0.215418") + mean = float("-0.0320907") + std = float("0.0550216") + data = None + + +class Program_weight_tensor_parameter_314: + name = "parameter_314" + shape = [384, 384, 3, 3] + dtype = "float32" + min_val = float("-0.00932646") + max_val = float("0.0180928") + mean = float("-5.9912e-05") + std = float("0.00138574") + data = None + + +class Program_weight_tensor_parameter_315: + name = "parameter_315" + shape = [384] + dtype = "float32" + min_val = float("-0.0636266") + max_val = float("0.0504044") + mean = float("-0.0321583") + std = float("0.01479") + data = None + + +class Program_weight_tensor_parameter_316: + name = "parameter_316" + shape = [384] + dtype = "float32" + min_val = float("0.963145") + max_val = float("1.05901") + mean = float("1.01824") + std = float("0.0133966") + data = None + + +class Program_weight_tensor_parameter_317: + name = "parameter_317" + shape = [384] + dtype = "float32" + min_val = float("0.017764") + max_val = float("0.0645009") + mean = float("0.0305608") + std = float("0.00719559") + data = None + + +class Program_weight_tensor_parameter_318: + name = "parameter_318" + shape = [384] + dtype = "float32" + min_val = float("-0.20154") + max_val = float("0.391038") + mean = float("-0.0613575") + std = float("0.0615632") + data = None + + +class Program_weight_tensor_parameter_319: + name = "parameter_319" + shape = [384, 1024, 1, 1] + dtype = "float32" + min_val = float("-0.0154521") + max_val = float("0.0405892") + mean = float("-0.000219009") + std = float("0.00320931") + data = None + + +class Program_weight_tensor_parameter_320: + name = "parameter_320" + shape = [384] + dtype = "float32" + min_val = float("-0.0181982") + max_val = float("0.0407238") + mean = float("0.0138314") + std = float("0.0107853") + data = None + + +class Program_weight_tensor_parameter_321: + name = "parameter_321" + shape = [384] + dtype = "float32" + min_val = float("1.02428") + max_val = float("1.13089") + mean = float("1.07731") + std = float("0.018385") + data = None + + +class Program_weight_tensor_parameter_322: + name = "parameter_322" + shape = [384] + dtype = "float32" + min_val = float("0.0297022") + max_val = float("0.0861284") + mean = float("0.0459045") + std = float("0.0083673") + data = None + + +class Program_weight_tensor_parameter_323: + name = "parameter_323" + shape = [384] + dtype = "float32" + min_val = float("-0.214848") + max_val = float("0.119899") + mean = float("-0.107077") + std = float("0.0483314") + data = None + + +class Program_weight_tensor_parameter_324: + name = "parameter_324" + shape = [384, 1024, 1, 1] + dtype = "float32" + min_val = float("-0.0202669") + max_val = float("0.0292336") + mean = float("-0.000379697") + std = float("0.00443318") + data = None + + +class Program_weight_tensor_parameter_325: + name = "parameter_325" + shape = [1024] + dtype = "float32" + min_val = float("-3.76738") + max_val = float("-0.74061") + mean = float("-2.19149") + std = float("0.429665") + data = None + + +class Program_weight_tensor_parameter_326: + name = "parameter_326" + shape = [1024] + dtype = "float32" + min_val = float("1.61677") + max_val = float("4.4298") + mean = float("3.07687") + std = float("0.251634") + data = None + + +class Program_weight_tensor_parameter_327: + name = "parameter_327" + shape = [1024] + dtype = "float32" + min_val = float("0.00318616") + max_val = float("0.0156112") + mean = float("0.00619051") + std = float("0.00132122") + data = None + + +class Program_weight_tensor_parameter_328: + name = "parameter_328" + shape = [1024] + dtype = "float32" + min_val = float("-0.151272") + max_val = float("0.141612") + mean = float("-0.0486746") + std = float("0.0293873") + data = None + + +class Program_weight_tensor_parameter_329: + name = "parameter_329" + shape = [1024, 768, 1, 1] + dtype = "float32" + min_val = float("-0.0373496") + max_val = float("0.0729797") + mean = float("-0.000461198") + std = float("0.00502166") + data = None + + +class Program_weight_tensor_parameter_330: + name = "parameter_330" + shape = [768] + dtype = "float32" + min_val = float("-0.0138178") + max_val = float("0.000466847") + mean = float("-0.00284092") + std = float("0.00262746") + data = None + + +class Program_weight_tensor_parameter_331: + name = "parameter_331" + shape = [768, 768, 1, 1] + dtype = "float32" + min_val = float("-0.119554") + max_val = float("0.123314") + mean = float("-0.00126139") + std = float("0.00352046") + data = None + + +class Program_weight_tensor_parameter_332: + name = "parameter_332" + shape = [384] + dtype = "float32" + min_val = float("-1.77338") + max_val = float("0.412596") + mean = float("-0.274223") + std = float("0.294015") + data = None + + +class Program_weight_tensor_parameter_333: + name = "parameter_333" + shape = [384] + dtype = "float32" + min_val = float("0.235525") + max_val = float("2.08023") + mean = float("0.662181") + std = float("0.284497") + data = None + + +class Program_weight_tensor_parameter_334: + name = "parameter_334" + shape = [384] + dtype = "float32" + min_val = float("0.000109661") + max_val = float("0.00172742") + mean = float("0.000430241") + std = float("0.000193304") + data = None + + +class Program_weight_tensor_parameter_335: + name = "parameter_335" + shape = [384] + dtype = "float32" + min_val = float("-0.130554") + max_val = float("0.0981204") + mean = float("0.0202639") + std = float("0.0336592") + data = None + + +class Program_weight_tensor_parameter_336: + name = "parameter_336" + shape = [384, 384, 1, 1] + dtype = "float32" + min_val = float("-0.0274341") + max_val = float("0.0353346") + mean = float("-0.000364024") + std = float("0.0038463") + data = None + + +class Program_weight_tensor_parameter_337: + name = "parameter_337" + shape = [384] + dtype = "float32" + min_val = float("-1.77338") + max_val = float("0.412596") + mean = float("-0.274223") + std = float("0.294015") + data = None + + +class Program_weight_tensor_parameter_338: + name = "parameter_338" + shape = [384] + dtype = "float32" + min_val = float("0.350868") + max_val = float("2.87683") + mean = float("1.11168") + std = float("0.332422") + data = None + + +class Program_weight_tensor_parameter_339: + name = "parameter_339" + shape = [384] + dtype = "float32" + min_val = float("0.000733881") + max_val = float("0.0108386") + mean = float("0.002852") + std = float("0.0011359") + data = None + + +class Program_weight_tensor_parameter_340: + name = "parameter_340" + shape = [384] + dtype = "float32" + min_val = float("-0.119853") + max_val = float("0.177988") + mean = float("0.00969028") + std = float("0.0360493") + data = None + + +class Program_weight_tensor_parameter_341: + name = "parameter_341" + shape = [384, 384, 3, 3] + dtype = "float32" + min_val = float("-0.0215559") + max_val = float("0.0430067") + mean = float("-3.73056e-05") + std = float("0.00229731") + data = None + + +class Program_weight_tensor_parameter_342: + name = "parameter_342" + shape = [384] + dtype = "float32" + min_val = float("-2.6275") + max_val = float("0.015896") + mean = float("-1.59098") + std = float("0.414959") + data = None + + +class Program_weight_tensor_parameter_343: + name = "parameter_343" + shape = [384] + dtype = "float32" + min_val = float("0.47721") + max_val = float("1.87134") + mean = float("1.1266") + std = float("0.14685") + data = None + + +class Program_weight_tensor_parameter_344: + name = "parameter_344" + shape = [384] + dtype = "float32" + min_val = float("0.120369") + max_val = float("0.515457") + mean = float("0.241946") + std = float("0.0512298") + data = None + + +class Program_weight_tensor_parameter_345: + name = "parameter_345" + shape = [384] + dtype = "float32" + min_val = float("-1.59625") + max_val = float("1.32189") + mean = float("-0.654999") + std = float("0.281837") + data = None + + +class Program_weight_tensor_parameter_346: + name = "parameter_346" + shape = [384, 384, 3, 3] + dtype = "float32" + min_val = float("-0.0216367") + max_val = float("0.0531612") + mean = float("-0.000286661") + std = float("0.00308802") + data = None + + +class Program_weight_tensor_parameter_347: + name = "parameter_347" + shape = [384] + dtype = "float32" + min_val = float("-1.93672") + max_val = float("1.08927") + mean = float("-0.561118") + std = float("0.377358") + data = None + + +class Program_weight_tensor_parameter_348: + name = "parameter_348" + shape = [384] + dtype = "float32" + min_val = float("0.163384") + max_val = float("2.03243") + mean = float("0.568654") + std = float("0.23058") + data = None + + +class Program_weight_tensor_parameter_349: + name = "parameter_349" + shape = [384] + dtype = "float32" + min_val = float("0.000211366") + max_val = float("0.00209824") + mean = float("0.000563789") + std = float("0.000253525") + data = None + + +class Program_weight_tensor_parameter_350: + name = "parameter_350" + shape = [384] + dtype = "float32" + min_val = float("-0.0508295") + max_val = float("0.116156") + mean = float("0.0281283") + std = float("0.0230871") + data = None + + +class Program_weight_tensor_parameter_351: + name = "parameter_351" + shape = [384, 384, 1, 1] + dtype = "float32" + min_val = float("-0.0278888") + max_val = float("0.0403076") + mean = float("-0.000537846") + std = float("0.00370404") + data = None + + +class Program_weight_tensor_parameter_352: + name = "parameter_352" + shape = [384] + dtype = "float32" + min_val = float("-1.93672") + max_val = float("1.08927") + mean = float("-0.561118") + std = float("0.377358") + data = None + + +class Program_weight_tensor_parameter_353: + name = "parameter_353" + shape = [384] + dtype = "float32" + min_val = float("0.579866") + max_val = float("2.2087") + mean = float("1.11942") + std = float("0.260049") + data = None + + +class Program_weight_tensor_parameter_354: + name = "parameter_354" + shape = [384] + dtype = "float32" + min_val = float("0.00147811") + max_val = float("0.014418") + mean = float("0.00450143") + std = float("0.00147944") + data = None + + +class Program_weight_tensor_parameter_355: + name = "parameter_355" + shape = [384] + dtype = "float32" + min_val = float("-0.198479") + max_val = float("0.184089") + mean = float("0.0300434") + std = float("0.0432483") + data = None + + +class Program_weight_tensor_parameter_356: + name = "parameter_356" + shape = [384, 384, 3, 3] + dtype = "float32" + min_val = float("-0.0243065") + max_val = float("0.0531256") + mean = float("-8.65998e-05") + std = float("0.00245086") + data = None + + +class Program_weight_tensor_parameter_357: + name = "parameter_357" + shape = [384] + dtype = "float32" + min_val = float("-2.40101") + max_val = float("0.85896") + mean = float("-1.44185") + std = float("0.356455") + data = None + + +class Program_weight_tensor_parameter_358: + name = "parameter_358" + shape = [384] + dtype = "float32" + min_val = float("0.363523") + max_val = float("1.90593") + mean = float("1.14998") + std = float("0.141316") + data = None + + +class Program_weight_tensor_parameter_359: + name = "parameter_359" + shape = [384] + dtype = "float32" + min_val = float("0.0902803") + max_val = float("0.317551") + mean = float("0.162194") + std = float("0.0353807") + data = None + + +class Program_weight_tensor_parameter_360: + name = "parameter_360" + shape = [384] + dtype = "float32" + min_val = float("-0.96643") + max_val = float("1.13281") + mean = float("-0.366938") + std = float("0.183667") + data = None + + +class Program_weight_tensor_parameter_361: + name = "parameter_361" + shape = [384, 384, 3, 3] + dtype = "float32" + min_val = float("-0.0266427") + max_val = float("0.0728291") + mean = float("-0.000293523") + std = float("0.00309007") + data = None + + +class Program_weight_tensor_parameter_362: + name = "parameter_362" + shape = [384] + dtype = "float32" + min_val = float("-1.88671") + max_val = float("0.68709") + mean = float("-0.470258") + std = float("0.394922") + data = None + + +class Program_weight_tensor_parameter_363: + name = "parameter_363" + shape = [384] + dtype = "float32" + min_val = float("0.081144") + max_val = float("2.10167") + mean = float("0.444389") + std = float("0.213461") + data = None + + +class Program_weight_tensor_parameter_364: + name = "parameter_364" + shape = [384] + dtype = "float32" + min_val = float("0.000181328") + max_val = float("0.00238775") + mean = float("0.000598608") + std = float("0.000263232") + data = None + + +class Program_weight_tensor_parameter_365: + name = "parameter_365" + shape = [384] + dtype = "float32" + min_val = float("-0.122912") + max_val = float("0.116066") + mean = float("0.0424948") + std = float("0.0250559") + data = None + + +class Program_weight_tensor_parameter_366: + name = "parameter_366" + shape = [384, 384, 1, 1] + dtype = "float32" + min_val = float("-0.035118") + max_val = float("0.0374998") + mean = float("-0.000747404") + std = float("0.00319117") + data = None + + +class Program_weight_tensor_parameter_367: + name = "parameter_367" + shape = [384] + dtype = "float32" + min_val = float("-1.88671") + max_val = float("0.68709") + mean = float("-0.470258") + std = float("0.394922") + data = None + + +class Program_weight_tensor_parameter_368: + name = "parameter_368" + shape = [384] + dtype = "float32" + min_val = float("0.542057") + max_val = float("2.23163") + mean = float("1.09037") + std = float("0.258504") + data = None + + +class Program_weight_tensor_parameter_369: + name = "parameter_369" + shape = [384] + dtype = "float32" + min_val = float("0.00171885") + max_val = float("0.0206226") + mean = float("0.00584658") + std = float("0.00207454") + data = None + + +class Program_weight_tensor_parameter_370: + name = "parameter_370" + shape = [384] + dtype = "float32" + min_val = float("-0.160396") + max_val = float("0.169188") + mean = float("0.0475079") + std = float("0.0427583") + data = None + + +class Program_weight_tensor_parameter_371: + name = "parameter_371" + shape = [384, 384, 3, 3] + dtype = "float32" + min_val = float("-0.0204974") + max_val = float("0.0381036") + mean = float("-0.000108104") + std = float("0.00256466") + data = None + + +class Program_weight_tensor_parameter_372: + name = "parameter_372" + shape = [384] + dtype = "float32" + min_val = float("-2.18087") + max_val = float("0.356852") + mean = float("-1.40712") + std = float("0.272169") + data = None + + +class Program_weight_tensor_parameter_373: + name = "parameter_373" + shape = [384] + dtype = "float32" + min_val = float("0.61486") + max_val = float("1.63036") + mean = float("1.12182") + std = float("0.102341") + data = None + + +class Program_weight_tensor_parameter_374: + name = "parameter_374" + shape = [384] + dtype = "float32" + min_val = float("0.0686106") + max_val = float("0.300719") + mean = float("0.120296") + std = float("0.0324319") + data = None + + +class Program_weight_tensor_parameter_375: + name = "parameter_375" + shape = [384] + dtype = "float32" + min_val = float("-1.00958") + max_val = float("0.205067") + mean = float("-0.252895") + std = float("0.152032") + data = None + + +class Program_weight_tensor_parameter_376: + name = "parameter_376" + shape = [384, 384, 3, 3] + dtype = "float32" + min_val = float("-0.0201776") + max_val = float("0.0515865") + mean = float("-0.000247363") + std = float("0.00287082") + data = None + + +class Program_weight_tensor_parameter_377: + name = "parameter_377" + shape = [384] + dtype = "float32" + min_val = float("-2.9181") + max_val = float("2.39818") + mean = float("-0.744825") + std = float("0.666127") + data = None + + +class Program_weight_tensor_parameter_378: + name = "parameter_378" + shape = [384] + dtype = "float32" + min_val = float("1.00684") + max_val = float("2.89816") + mean = float("1.92298") + std = float("0.268903") + data = None + + +class Program_weight_tensor_parameter_379: + name = "parameter_379" + shape = [384] + dtype = "float32" + min_val = float("0.00323555") + max_val = float("0.0164731") + mean = float("0.00686751") + std = float("0.00157055") + data = None + + +class Program_weight_tensor_parameter_380: + name = "parameter_380" + shape = [384] + dtype = "float32" + min_val = float("-0.29054") + max_val = float("0.150754") + mean = float("0.0807349") + std = float("0.0368912") + data = None + + +class Program_weight_tensor_parameter_381: + name = "parameter_381" + shape = [384, 768, 1, 1] + dtype = "float32" + min_val = float("-0.0488105") + max_val = float("0.0678759") + mean = float("-0.000895552") + std = float("0.00678193") + data = None + + +class Program_weight_tensor_parameter_382: + name = "parameter_382" + shape = [384] + dtype = "float32" + min_val = float("-2.26258") + max_val = float("0.746331") + mean = float("-0.781383") + std = float("0.476609") + data = None + + +class Program_weight_tensor_parameter_383: + name = "parameter_383" + shape = [384] + dtype = "float32" + min_val = float("0.922112") + max_val = float("2.88383") + mean = float("2.09535") + std = float("0.309922") + data = None + + +class Program_weight_tensor_parameter_384: + name = "parameter_384" + shape = [384] + dtype = "float32" + min_val = float("0.0011051") + max_val = float("0.00520625") + mean = float("0.00250761") + std = float("0.00049518") + data = None + + +class Program_weight_tensor_parameter_385: + name = "parameter_385" + shape = [384] + dtype = "float32" + min_val = float("-0.0778026") + max_val = float("0.0934732") + mean = float("0.049973") + std = float("0.0213988") + data = None + + +class Program_weight_tensor_parameter_386: + name = "parameter_386" + shape = [384, 768, 1, 1] + dtype = "float32" + min_val = float("-0.0427813") + max_val = float("0.0835518") + mean = float("-0.000478385") + std = float("0.00455026") + data = None + + +class Program_weight_tensor_parameter_387: + name = "parameter_387" + shape = [768] + dtype = "float32" + min_val = float("-2.45583") + max_val = float("0.559171") + mean = float("-0.951569") + std = float("0.337772") + data = None + + +class Program_weight_tensor_parameter_388: + name = "parameter_388" + shape = [768] + dtype = "float32" + min_val = float("0.470726") + max_val = float("1.80127") + mean = float("0.915463") + std = float("0.147887") + data = None + + +class Program_weight_tensor_parameter_389: + name = "parameter_389" + shape = [768] + dtype = "float32" + min_val = float("0.0124823") + max_val = float("0.0978023") + mean = float("0.0216824") + std = float("0.00583908") + data = None + + +class Program_weight_tensor_parameter_390: + name = "parameter_390" + shape = [768] + dtype = "float32" + min_val = float("-0.476605") + max_val = float("0.316374") + mean = float("0.0538298") + std = float("0.0770574") + data = None + + +class Program_weight_tensor_parameter_391: + name = "parameter_391" + shape = [768, 512, 3, 3] + dtype = "float32" + min_val = float("-0.033942") + max_val = float("0.048741") + mean = float("-0.00014743") + std = float("0.00292614") + data = None + + +class Program_weight_tensor_parameter_392: + name = "parameter_392" + shape = [512] + dtype = "float32" + min_val = float("-3.41671") + max_val = float("1.90925") + mean = float("-1.24212") + std = float("0.518646") + data = None + + +class Program_weight_tensor_parameter_393: + name = "parameter_393" + shape = [512] + dtype = "float32" + min_val = float("0.417792") + max_val = float("1.62807") + mean = float("1.12265") + std = float("0.144739") + data = None + + +class Program_weight_tensor_parameter_394: + name = "parameter_394" + shape = [512] + dtype = "float32" + min_val = float("0.00686698") + max_val = float("0.0300204") + mean = float("0.0132355") + std = float("0.00263341") + data = None + + +class Program_weight_tensor_parameter_395: + name = "parameter_395" + shape = [512] + dtype = "float32" + min_val = float("-0.204822") + max_val = float("0.0997006") + mean = float("-0.0635602") + std = float("0.0500124") + data = None + + +class Program_weight_tensor_parameter_396: + name = "parameter_396" + shape = [512, 384, 1, 1] + dtype = "float32" + min_val = float("-0.0779687") + max_val = float("0.204082") + mean = float("-0.000758003") + std = float("0.00949015") + data = None + + +class Program_weight_tensor_parameter_397: + name = "parameter_397" + shape = [384] + dtype = "float32" + min_val = float("-0.010737") + max_val = float("0.00241083") + mean = float("-0.00328027") + std = float("0.00285957") + data = None + + +class Program_weight_tensor_parameter_398: + name = "parameter_398" + shape = [384, 384, 1, 1] + dtype = "float32" + min_val = float("-0.214148") + max_val = float("0.108238") + mean = float("-0.00206829") + std = float("0.00564515") + data = None + + +class Program_weight_tensor_parameter_399: + name = "parameter_399" + shape = [192] + dtype = "float32" + min_val = float("-1.9212") + max_val = float("0.337783") + mean = float("-0.37118") + std = float("0.319858") + data = None + + +class Program_weight_tensor_parameter_400: + name = "parameter_400" + shape = [192] + dtype = "float32" + min_val = float("0.000863766") + max_val = float("2.25766") + mean = float("0.550683") + std = float("0.447335") + data = None + + +class Program_weight_tensor_parameter_401: + name = "parameter_401" + shape = [192] + dtype = "float32" + min_val = float("5.24031e-07") + max_val = float("0.00212704") + mean = float("0.000573903") + std = float("0.000337641") + data = None + + +class Program_weight_tensor_parameter_402: + name = "parameter_402" + shape = [192] + dtype = "float32" + min_val = float("-0.0361206") + max_val = float("0.0752975") + mean = float("0.0112954") + std = float("0.019226") + data = None + + +class Program_weight_tensor_parameter_403: + name = "parameter_403" + shape = [192, 192, 1, 1] + dtype = "float32" + min_val = float("-0.0324994") + max_val = float("0.0613531") + mean = float("-0.000532142") + std = float("0.00461667") + data = None + + +class Program_weight_tensor_parameter_404: + name = "parameter_404" + shape = [192] + dtype = "float32" + min_val = float("-1.9212") + max_val = float("0.337783") + mean = float("-0.37118") + std = float("0.319858") + data = None + + +class Program_weight_tensor_parameter_405: + name = "parameter_405" + shape = [192] + dtype = "float32" + min_val = float("0.331404") + max_val = float("2.85491") + mean = float("1.19184") + std = float("0.517709") + data = None + + +class Program_weight_tensor_parameter_406: + name = "parameter_406" + shape = [192] + dtype = "float32" + min_val = float("0.00140744") + max_val = float("0.0176487") + mean = float("0.00594824") + std = float("0.00242133") + data = None + + +class Program_weight_tensor_parameter_407: + name = "parameter_407" + shape = [192] + dtype = "float32" + min_val = float("-0.149957") + max_val = float("0.183059") + mean = float("0.0286224") + std = float("0.0529416") + data = None + + +class Program_weight_tensor_parameter_408: + name = "parameter_408" + shape = [192, 192, 3, 3] + dtype = "float32" + min_val = float("-0.0304763") + max_val = float("0.0467537") + mean = float("-0.000182961") + std = float("0.00351809") + data = None + + +class Program_weight_tensor_parameter_409: + name = "parameter_409" + shape = [192] + dtype = "float32" + min_val = float("-2.92889") + max_val = float("-0.215128") + mean = float("-1.34775") + std = float("0.401955") + data = None + + +class Program_weight_tensor_parameter_410: + name = "parameter_410" + shape = [192] + dtype = "float32" + min_val = float("0.691777") + max_val = float("2.01593") + mean = float("1.16727") + std = float("0.167346") + data = None + + +class Program_weight_tensor_parameter_411: + name = "parameter_411" + shape = [192] + dtype = "float32" + min_val = float("0.12144") + max_val = float("0.513783") + mean = float("0.2084") + std = float("0.0576939") + data = None + + +class Program_weight_tensor_parameter_412: + name = "parameter_412" + shape = [192] + dtype = "float32" + min_val = float("-3.52808") + max_val = float("1.58137") + mean = float("-0.298547") + std = float("0.409608") + data = None + + +class Program_weight_tensor_parameter_413: + name = "parameter_413" + shape = [192, 192, 3, 3] + dtype = "float32" + min_val = float("-0.0306673") + max_val = float("0.0446454") + mean = float("-0.000270107") + std = float("0.00414217") + data = None + + +class Program_weight_tensor_parameter_414: + name = "parameter_414" + shape = [192] + dtype = "float32" + min_val = float("-1.93263") + max_val = float("0.441273") + mean = float("-0.309778") + std = float("0.3114") + data = None + + +class Program_weight_tensor_parameter_415: + name = "parameter_415" + shape = [192] + dtype = "float32" + min_val = float("1.31901e-05") + max_val = float("1.74196") + mean = float("0.401521") + std = float("0.316685") + data = None + + +class Program_weight_tensor_parameter_416: + name = "parameter_416" + shape = [192] + dtype = "float32" + min_val = float("2.4925e-10") + max_val = float("0.00266824") + mean = float("0.000571331") + std = float("0.000381815") + data = None + + +class Program_weight_tensor_parameter_417: + name = "parameter_417" + shape = [192] + dtype = "float32" + min_val = float("-0.0718396") + max_val = float("0.0473328") + mean = float("0.0120177") + std = float("0.0141073") + data = None + + +class Program_weight_tensor_parameter_418: + name = "parameter_418" + shape = [192, 192, 1, 1] + dtype = "float32" + min_val = float("-0.0369971") + max_val = float("0.0474525") + mean = float("-0.000537935") + std = float("0.00432741") + data = None + + +class Program_weight_tensor_parameter_419: + name = "parameter_419" + shape = [192] + dtype = "float32" + min_val = float("-1.93263") + max_val = float("0.441273") + mean = float("-0.309778") + std = float("0.3114") + data = None + + +class Program_weight_tensor_parameter_420: + name = "parameter_420" + shape = [192] + dtype = "float32" + min_val = float("0.42444") + max_val = float("2.26483") + mean = float("1.12589") + std = float("0.378215") + data = None + + +class Program_weight_tensor_parameter_421: + name = "parameter_421" + shape = [192] + dtype = "float32" + min_val = float("0.0024206") + max_val = float("0.0137874") + mean = float("0.00667971") + std = float("0.00197733") + data = None + + +class Program_weight_tensor_parameter_422: + name = "parameter_422" + shape = [192] + dtype = "float32" + min_val = float("-0.156973") + max_val = float("0.138921") + mean = float("0.0395744") + std = float("0.0387458") + data = None + + +class Program_weight_tensor_parameter_423: + name = "parameter_423" + shape = [192, 192, 3, 3] + dtype = "float32" + min_val = float("-0.0297468") + max_val = float("0.061426") + mean = float("-0.000214963") + std = float("0.0038424") + data = None + + +class Program_weight_tensor_parameter_424: + name = "parameter_424" + shape = [192] + dtype = "float32" + min_val = float("-2.52744") + max_val = float("-0.176752") + mean = float("-1.32827") + std = float("0.439675") + data = None + + +class Program_weight_tensor_parameter_425: + name = "parameter_425" + shape = [192] + dtype = "float32" + min_val = float("0.648699") + max_val = float("1.69649") + mean = float("1.1831") + std = float("0.164999") + data = None + + +class Program_weight_tensor_parameter_426: + name = "parameter_426" + shape = [192] + dtype = "float32" + min_val = float("0.0811482") + max_val = float("0.262619") + mean = float("0.148698") + std = float("0.0343621") + data = None + + +class Program_weight_tensor_parameter_427: + name = "parameter_427" + shape = [192] + dtype = "float32" + min_val = float("-2.61184") + max_val = float("0.442873") + mean = float("-0.178286") + std = float("0.277058") + data = None + + +class Program_weight_tensor_parameter_428: + name = "parameter_428" + shape = [192, 192, 3, 3] + dtype = "float32" + min_val = float("-0.0353353") + max_val = float("0.0475915") + mean = float("-0.000311195") + std = float("0.00440006") + data = None + + +class Program_weight_tensor_parameter_429: + name = "parameter_429" + shape = [192] + dtype = "float32" + min_val = float("-1.76929") + max_val = float("0.366299") + mean = float("-0.296944") + std = float("0.323923") + data = None + + +class Program_weight_tensor_parameter_430: + name = "parameter_430" + shape = [192] + dtype = "float32" + min_val = float("1.65802e-05") + max_val = float("1.6632") + mean = float("0.314337") + std = float("0.259063") + data = None + + +class Program_weight_tensor_parameter_431: + name = "parameter_431" + shape = [192] + dtype = "float32" + min_val = float("1.21154e-10") + max_val = float("0.00374479") + mean = float("0.000521225") + std = float("0.000463093") + data = None + + +class Program_weight_tensor_parameter_432: + name = "parameter_432" + shape = [192] + dtype = "float32" + min_val = float("-0.052794") + max_val = float("0.0958051") + mean = float("0.0142202") + std = float("0.0171986") + data = None + + +class Program_weight_tensor_parameter_433: + name = "parameter_433" + shape = [192, 192, 1, 1] + dtype = "float32" + min_val = float("-0.0479075") + max_val = float("0.0473163") + mean = float("-0.000576931") + std = float("0.00422766") + data = None + + +class Program_weight_tensor_parameter_434: + name = "parameter_434" + shape = [192] + dtype = "float32" + min_val = float("-1.76929") + max_val = float("0.366299") + mean = float("-0.296944") + std = float("0.323923") + data = None + + +class Program_weight_tensor_parameter_435: + name = "parameter_435" + shape = [192] + dtype = "float32" + min_val = float("0.376374") + max_val = float("1.96514") + mean = float("1.05296") + std = float("0.33661") + data = None + + +class Program_weight_tensor_parameter_436: + name = "parameter_436" + shape = [192] + dtype = "float32" + min_val = float("0.00319475") + max_val = float("0.0156312") + mean = float("0.0069418") + std = float("0.00223012") + data = None + + +class Program_weight_tensor_parameter_437: + name = "parameter_437" + shape = [192] + dtype = "float32" + min_val = float("-0.138894") + max_val = float("0.117271") + mean = float("0.0385623") + std = float("0.0372869") + data = None + + +class Program_weight_tensor_parameter_438: + name = "parameter_438" + shape = [192, 192, 3, 3] + dtype = "float32" + min_val = float("-0.0312578") + max_val = float("0.0469487") + mean = float("-0.000193498") + std = float("0.00398813") + data = None + + +class Program_weight_tensor_parameter_439: + name = "parameter_439" + shape = [192] + dtype = "float32" + min_val = float("-2.54978") + max_val = float("0.123303") + mean = float("-1.28641") + std = float("0.420062") + data = None + + +class Program_weight_tensor_parameter_440: + name = "parameter_440" + shape = [192] + dtype = "float32" + min_val = float("0.65748") + max_val = float("1.74041") + mean = float("1.14789") + std = float("0.166573") + data = None + + +class Program_weight_tensor_parameter_441: + name = "parameter_441" + shape = [192] + dtype = "float32" + min_val = float("0.0607768") + max_val = float("0.189401") + mean = float("0.103113") + std = float("0.0244967") + data = None + + +class Program_weight_tensor_parameter_442: + name = "parameter_442" + shape = [192] + dtype = "float32" + min_val = float("-1.77358") + max_val = float("0.232054") + mean = float("-0.181866") + std = float("0.205799") + data = None + + +class Program_weight_tensor_parameter_443: + name = "parameter_443" + shape = [192, 192, 3, 3] + dtype = "float32" + min_val = float("-0.035669") + max_val = float("0.0555704") + mean = float("-0.000362505") + std = float("0.00447118") + data = None + + +class Program_weight_tensor_parameter_444: + name = "parameter_444" + shape = [192] + dtype = "float32" + min_val = float("-2.08733") + max_val = float("0.454803") + mean = float("-0.308331") + std = float("0.365117") + data = None + + +class Program_weight_tensor_parameter_445: + name = "parameter_445" + shape = [192] + dtype = "float32" + min_val = float("4.74169e-06") + max_val = float("0.678347") + mean = float("0.185861") + std = float("0.145635") + data = None + + +class Program_weight_tensor_parameter_446: + name = "parameter_446" + shape = [192] + dtype = "float32" + min_val = float("1.05153e-11") + max_val = float("0.00148836") + mean = float("0.000320402") + std = float("0.000237363") + data = None + + +class Program_weight_tensor_parameter_447: + name = "parameter_447" + shape = [192] + dtype = "float32" + min_val = float("-0.0594774") + max_val = float("0.0478072") + mean = float("0.00930335") + std = float("0.0146157") + data = None + + +class Program_weight_tensor_parameter_448: + name = "parameter_448" + shape = [192, 192, 1, 1] + dtype = "float32" + min_val = float("-0.0234346") + max_val = float("0.0490819") + mean = float("-0.000384362") + std = float("0.00376562") + data = None + + +class Program_weight_tensor_parameter_449: + name = "parameter_449" + shape = [192] + dtype = "float32" + min_val = float("-2.08733") + max_val = float("0.454803") + mean = float("-0.308331") + std = float("0.365117") + data = None + + +class Program_weight_tensor_parameter_450: + name = "parameter_450" + shape = [192] + dtype = "float32" + min_val = float("0.39604") + max_val = float("1.9294") + mean = float("0.959521") + std = float("0.305318") + data = None + + +class Program_weight_tensor_parameter_451: + name = "parameter_451" + shape = [192] + dtype = "float32" + min_val = float("0.00246386") + max_val = float("0.018286") + mean = float("0.00690414") + std = float("0.00229679") + data = None + + +class Program_weight_tensor_parameter_452: + name = "parameter_452" + shape = [192] + dtype = "float32" + min_val = float("-0.0963721") + max_val = float("0.139837") + mean = float("0.0375609") + std = float("0.0391308") + data = None + + +class Program_weight_tensor_parameter_453: + name = "parameter_453" + shape = [192, 192, 3, 3] + dtype = "float32" + min_val = float("-0.0382843") + max_val = float("0.0441675") + mean = float("-0.000189265") + std = float("0.00417807") + data = None + + +class Program_weight_tensor_parameter_454: + name = "parameter_454" + shape = [192] + dtype = "float32" + min_val = float("-2.82542") + max_val = float("-0.165072") + mean = float("-1.2819") + std = float("0.428551") + data = None + + +class Program_weight_tensor_parameter_455: + name = "parameter_455" + shape = [192] + dtype = "float32" + min_val = float("0.761717") + max_val = float("1.48005") + mean = float("1.12799") + std = float("0.136186") + data = None + + +class Program_weight_tensor_parameter_456: + name = "parameter_456" + shape = [192] + dtype = "float32" + min_val = float("0.0437385") + max_val = float("0.134822") + mean = float("0.0772096") + std = float("0.0171041") + data = None + + +class Program_weight_tensor_parameter_457: + name = "parameter_457" + shape = [192] + dtype = "float32" + min_val = float("-1.32712") + max_val = float("0.174196") + mean = float("-0.153249") + std = float("0.161134") + data = None + + +class Program_weight_tensor_parameter_458: + name = "parameter_458" + shape = [192, 192, 3, 3] + dtype = "float32" + min_val = float("-0.0470685") + max_val = float("0.0540297") + mean = float("-0.000367007") + std = float("0.00453239") + data = None + + +class Program_weight_tensor_parameter_459: + name = "parameter_459" + shape = [192] + dtype = "float32" + min_val = float("-1.21591") + max_val = float("0.408817") + mean = float("-0.268583") + std = float("0.326001") + data = None + + +class Program_weight_tensor_parameter_460: + name = "parameter_460" + shape = [192] + dtype = "float32" + min_val = float("1.13966e-07") + max_val = float("0.671077") + mean = float("0.16575") + std = float("0.128785") + data = None + + +class Program_weight_tensor_parameter_461: + name = "parameter_461" + shape = [192] + dtype = "float32" + min_val = float("4.36695e-14") + max_val = float("0.00121919") + mean = float("0.000288819") + std = float("0.000228404") + data = None + + +class Program_weight_tensor_parameter_462: + name = "parameter_462" + shape = [192] + dtype = "float32" + min_val = float("-0.0449358") + max_val = float("0.0625559") + mean = float("0.00941777") + std = float("0.0159613") + data = None + + +class Program_weight_tensor_parameter_463: + name = "parameter_463" + shape = [192, 192, 1, 1] + dtype = "float32" + min_val = float("-0.0445329") + max_val = float("0.0469969") + mean = float("-0.000357929") + std = float("0.00375663") + data = None + + +class Program_weight_tensor_parameter_464: + name = "parameter_464" + shape = [192] + dtype = "float32" + min_val = float("-1.21591") + max_val = float("0.408817") + mean = float("-0.268583") + std = float("0.326001") + data = None + + +class Program_weight_tensor_parameter_465: + name = "parameter_465" + shape = [192] + dtype = "float32" + min_val = float("0.342519") + max_val = float("1.57267") + mean = float("0.856133") + std = float("0.266894") + data = None + + +class Program_weight_tensor_parameter_466: + name = "parameter_466" + shape = [192] + dtype = "float32" + min_val = float("0.00236989") + max_val = float("0.012972") + mean = float("0.0063056") + std = float("0.00193128") + data = None + + +class Program_weight_tensor_parameter_467: + name = "parameter_467" + shape = [192] + dtype = "float32" + min_val = float("-0.108302") + max_val = float("0.156299") + mean = float("0.0414147") + std = float("0.0423383") + data = None + + +class Program_weight_tensor_parameter_468: + name = "parameter_468" + shape = [192, 192, 3, 3] + dtype = "float32" + min_val = float("-0.0362289") + max_val = float("0.0435947") + mean = float("-0.000180448") + std = float("0.00414403") + data = None + + +class Program_weight_tensor_parameter_469: + name = "parameter_469" + shape = [192] + dtype = "float32" + min_val = float("-2.55484") + max_val = float("-0.225384") + mean = float("-1.29607") + std = float("0.412734") + data = None + + +class Program_weight_tensor_parameter_470: + name = "parameter_470" + shape = [192] + dtype = "float32" + min_val = float("0.652885") + max_val = float("1.46467") + mean = float("1.10229") + std = float("0.128509") + data = None + + +class Program_weight_tensor_parameter_471: + name = "parameter_471" + shape = [192] + dtype = "float32" + min_val = float("0.0350575") + max_val = float("0.115971") + mean = float("0.0597254") + std = float("0.016366") + data = None + + +class Program_weight_tensor_parameter_472: + name = "parameter_472" + shape = [192] + dtype = "float32" + min_val = float("-0.464018") + max_val = float("0.240961") + mean = float("-0.124572") + std = float("0.139434") + data = None + + +class Program_weight_tensor_parameter_473: + name = "parameter_473" + shape = [192, 192, 3, 3] + dtype = "float32" + min_val = float("-0.0705414") + max_val = float("0.0741061") + mean = float("-0.000347681") + std = float("0.00450759") + data = None + + +class Program_weight_tensor_parameter_474: + name = "parameter_474" + shape = [192] + dtype = "float32" + min_val = float("-1.25114") + max_val = float("0.496985") + mean = float("-0.195696") + std = float("0.286609") + data = None + + +class Program_weight_tensor_parameter_475: + name = "parameter_475" + shape = [192] + dtype = "float32" + min_val = float("6.01345e-05") + max_val = float("1.52688") + mean = float("0.227253") + std = float("0.219405") + data = None + + +class Program_weight_tensor_parameter_476: + name = "parameter_476" + shape = [192] + dtype = "float32" + min_val = float("2.92882e-09") + max_val = float("0.00984128") + mean = float("0.00070971") + std = float("0.00095936") + data = None + + +class Program_weight_tensor_parameter_477: + name = "parameter_477" + shape = [192] + dtype = "float32" + min_val = float("-0.0592165") + max_val = float("0.107792") + mean = float("0.0157093") + std = float("0.0208809") + data = None + + +class Program_weight_tensor_parameter_478: + name = "parameter_478" + shape = [192, 192, 1, 1] + dtype = "float32" + min_val = float("-0.055296") + max_val = float("0.0451825") + mean = float("-0.000685397") + std = float("0.0046336") + data = None + + +class Program_weight_tensor_parameter_479: + name = "parameter_479" + shape = [192] + dtype = "float32" + min_val = float("-1.25114") + max_val = float("0.496985") + mean = float("-0.195696") + std = float("0.286609") + data = None + + +class Program_weight_tensor_parameter_480: + name = "parameter_480" + shape = [192] + dtype = "float32" + min_val = float("0.361257") + max_val = float("1.38672") + mean = float("0.77414") + std = float("0.223602") + data = None + + +class Program_weight_tensor_parameter_481: + name = "parameter_481" + shape = [192] + dtype = "float32" + min_val = float("0.00343985") + max_val = float("0.024217") + mean = float("0.0103742") + std = float("0.00341779") + data = None + + +class Program_weight_tensor_parameter_482: + name = "parameter_482" + shape = [192] + dtype = "float32" + min_val = float("-0.109521") + max_val = float("0.160426") + mean = float("0.0591583") + std = float("0.050798") + data = None + + +class Program_weight_tensor_parameter_483: + name = "parameter_483" + shape = [192, 192, 3, 3] + dtype = "float32" + min_val = float("-0.052784") + max_val = float("0.0464242") + mean = float("-0.000289158") + std = float("0.00403919") + data = None + + +class Program_weight_tensor_parameter_484: + name = "parameter_484" + shape = [192] + dtype = "float32" + min_val = float("-1.93294") + max_val = float("-0.217802") + mean = float("-1.1784") + std = float("0.32417") + data = None + + +class Program_weight_tensor_parameter_485: + name = "parameter_485" + shape = [192] + dtype = "float32" + min_val = float("0.740846") + max_val = float("1.58017") + mean = float("1.10678") + std = float("0.141855") + data = None + + +class Program_weight_tensor_parameter_486: + name = "parameter_486" + shape = [192] + dtype = "float32" + min_val = float("0.0251568") + max_val = float("0.110892") + mean = float("0.053406") + std = float("0.0180815") + data = None + + +class Program_weight_tensor_parameter_487: + name = "parameter_487" + shape = [192] + dtype = "float32" + min_val = float("-1.08676") + max_val = float("0.26685") + mean = float("-0.082947") + std = float("0.142479") + data = None + + +class Program_weight_tensor_parameter_488: + name = "parameter_488" + shape = [192, 192, 3, 3] + dtype = "float32" + min_val = float("-0.0681332") + max_val = float("0.0849403") + mean = float("-0.000286395") + std = float("0.00425012") + data = None + + +class Program_weight_tensor_parameter_489: + name = "parameter_489" + shape = [192] + dtype = "float32" + min_val = float("-2.8666") + max_val = float("1.61594") + mean = float("-0.0521098") + std = float("0.754176") + data = None + + +class Program_weight_tensor_parameter_490: + name = "parameter_490" + shape = [192] + dtype = "float32" + min_val = float("0.385687") + max_val = float("2.01902") + mean = float("0.957509") + std = float("0.228151") + data = None + + +class Program_weight_tensor_parameter_491: + name = "parameter_491" + shape = [192] + dtype = "float32" + min_val = float("0.0120072") + max_val = float("0.108766") + mean = float("0.0349151") + std = float("0.0150818") + data = None + + +class Program_weight_tensor_parameter_492: + name = "parameter_492" + shape = [192] + dtype = "float32" + min_val = float("-0.256396") + max_val = float("0.483456") + mean = float("-0.0666677") + std = float("0.0824818") + data = None + + +class Program_weight_tensor_parameter_493: + name = "parameter_493" + shape = [192, 384, 1, 1] + dtype = "float32" + min_val = float("-0.115145") + max_val = float("0.117107") + mean = float("-0.000804966") + std = float("0.00946446") + data = None + + +class Program_weight_tensor_parameter_494: + name = "parameter_494" + shape = [192] + dtype = "float32" + min_val = float("-2.99057") + max_val = float("1.19128") + mean = float("0.0645638") + std = float("0.651107") + data = None + + +class Program_weight_tensor_parameter_495: + name = "parameter_495" + shape = [192] + dtype = "float32" + min_val = float("0.897146") + max_val = float("5.48142") + mean = float("1.93592") + std = float("0.914038") + data = None + + +class Program_weight_tensor_parameter_496: + name = "parameter_496" + shape = [192] + dtype = "float32" + min_val = float("0.00686488") + max_val = float("0.0692764") + mean = float("0.0235044") + std = float("0.0082593") + data = None + + +class Program_weight_tensor_parameter_497: + name = "parameter_497" + shape = [192] + dtype = "float32" + min_val = float("-0.189901") + max_val = float("0.13753") + mean = float("-0.032472") + std = float("0.0640675") + data = None + + +class Program_weight_tensor_parameter_498: + name = "parameter_498" + shape = [192, 384, 1, 1] + dtype = "float32" + min_val = float("-0.0766857") + max_val = float("0.113871") + mean = float("-0.00064456") + std = float("0.00873627") + data = None + + +class Program_weight_tensor_parameter_499: + name = "parameter_499" + shape = [384] + dtype = "float32" + min_val = float("-2.88369") + max_val = float("1.29531") + mean = float("-0.324976") + std = float("0.576845") + data = None + + +class Program_weight_tensor_parameter_500: + name = "parameter_500" + shape = [384] + dtype = "float32" + min_val = float("0.63927") + max_val = float("2.42129") + mean = float("1.16101") + std = float("0.261198") + data = None + + +class Program_weight_tensor_parameter_501: + name = "parameter_501" + shape = [384] + dtype = "float32" + min_val = float("0.0160522") + max_val = float("0.14684") + mean = float("0.035732") + std = float("0.018691") + data = None + + +class Program_weight_tensor_parameter_502: + name = "parameter_502" + shape = [384] + dtype = "float32" + min_val = float("-0.444625") + max_val = float("0.224227") + mean = float("0.0242203") + std = float("0.0770511") + data = None + + +class Program_weight_tensor_parameter_503: + name = "parameter_503" + shape = [384, 256, 3, 3] + dtype = "float32" + min_val = float("-0.0733418") + max_val = float("0.082808") + mean = float("-0.000120386") + std = float("0.00455694") + data = None + + +class Program_weight_tensor_parameter_504: + name = "parameter_504" + shape = [256] + dtype = "float32" + min_val = float("-2.17306") + max_val = float("1.35014") + mean = float("-0.983635") + std = float("0.560538") + data = None + + +class Program_weight_tensor_parameter_505: + name = "parameter_505" + shape = [256] + dtype = "float32" + min_val = float("0.525409") + max_val = float("1.72142") + mean = float("1.09457") + std = float("0.181495") + data = None + + +class Program_weight_tensor_parameter_506: + name = "parameter_506" + shape = [256] + dtype = "float32" + min_val = float("0.00327494") + max_val = float("0.0357364") + mean = float("0.00983111") + std = float("0.00444547") + data = None + + +class Program_weight_tensor_parameter_507: + name = "parameter_507" + shape = [256] + dtype = "float32" + min_val = float("-0.281973") + max_val = float("0.212193") + mean = float("-0.0584459") + std = float("0.0749526") + data = None + + +class Program_weight_tensor_parameter_508: + name = "parameter_508" + shape = [256, 192, 1, 1] + dtype = "float32" + min_val = float("-0.228144") + max_val = float("0.18937") + mean = float("-0.000934833") + std = float("0.0150912") + data = None + + +class Program_weight_tensor_parameter_509: + name = "parameter_509" + shape = [192] + dtype = "float32" + min_val = float("-0.0160867") + max_val = float("0.00291467") + mean = float("-0.00513708") + std = float("0.00438686") + data = None + + +class Program_weight_tensor_parameter_510: + name = "parameter_510" + shape = [192, 192, 1, 1] + dtype = "float32" + min_val = float("-0.301238") + max_val = float("0.279827") + mean = float("-0.0036602") + std = float("0.0109363") + data = None + + +class Program_weight_tensor_parameter_511: + name = "parameter_511" + shape = [96] + dtype = "float32" + min_val = float("-1.88507") + max_val = float("0.408619") + mean = float("-0.264055") + std = float("0.417878") + data = None + + +class Program_weight_tensor_parameter_512: + name = "parameter_512" + shape = [96] + dtype = "float32" + min_val = float("0.075817") + max_val = float("3.31686") + mean = float("0.585907") + std = float("0.696242") + data = None + + +class Program_weight_tensor_parameter_513: + name = "parameter_513" + shape = [96] + dtype = "float32" + min_val = float("9.73872e-05") + max_val = float("0.00192041") + mean = float("0.000475576") + std = float("0.00034983") + data = None + + +class Program_weight_tensor_parameter_514: + name = "parameter_514" + shape = [96] + dtype = "float32" + min_val = float("-0.0602378") + max_val = float("0.0724714") + mean = float("0.00884973") + std = float("0.025982") + data = None + + +class Program_weight_tensor_parameter_515: + name = "parameter_515" + shape = [96, 96, 1, 1] + dtype = "float32" + min_val = float("-0.0575483") + max_val = float("0.0998151") + mean = float("-0.00071542") + std = float("0.00806398") + data = None + + +class Program_weight_tensor_parameter_516: + name = "parameter_516" + shape = [96] + dtype = "float32" + min_val = float("-1.88507") + max_val = float("0.408619") + mean = float("-0.264055") + std = float("0.417878") + data = None + + +class Program_weight_tensor_parameter_517: + name = "parameter_517" + shape = [96] + dtype = "float32" + min_val = float("0.328165") + max_val = float("5.49297") + mean = float("1.04367") + std = float("0.907579") + data = None + + +class Program_weight_tensor_parameter_518: + name = "parameter_518" + shape = [96] + dtype = "float32" + min_val = float("0.00063499") + max_val = float("0.0106201") + mean = float("0.00347763") + std = float("0.00169617") + data = None + + +class Program_weight_tensor_parameter_519: + name = "parameter_519" + shape = [96] + dtype = "float32" + min_val = float("-0.207875") + max_val = float("0.204035") + mean = float("0.0224973") + std = float("0.0704149") + data = None + + +class Program_weight_tensor_parameter_520: + name = "parameter_520" + shape = [96, 96, 3, 3] + dtype = "float32" + min_val = float("-0.0433834") + max_val = float("0.0664612") + mean = float("-0.000206276") + std = float("0.00585214") + data = None + + +class Program_weight_tensor_parameter_521: + name = "parameter_521" + shape = [96] + dtype = "float32" + min_val = float("-2.41446") + max_val = float("-0.0351507") + mean = float("-1.27023") + std = float("0.444263") + data = None + + +class Program_weight_tensor_parameter_522: + name = "parameter_522" + shape = [96] + dtype = "float32" + min_val = float("0.478655") + max_val = float("1.73921") + mean = float("0.924149") + std = float("0.17682") + data = None + + +class Program_weight_tensor_parameter_523: + name = "parameter_523" + shape = [96] + dtype = "float32" + min_val = float("0.0485854") + max_val = float("0.347665") + mean = float("0.116634") + std = float("0.0397481") + data = None + + +class Program_weight_tensor_parameter_524: + name = "parameter_524" + shape = [96] + dtype = "float32" + min_val = float("-4.59321") + max_val = float("0.829715") + mean = float("-0.207516") + std = float("0.572839") + data = None + + +class Program_weight_tensor_parameter_525: + name = "parameter_525" + shape = [96, 96, 3, 3] + dtype = "float32" + min_val = float("-0.143913") + max_val = float("0.105902") + mean = float("-0.000391481") + std = float("0.0073659") + data = None + + +class Program_weight_tensor_parameter_526: + name = "parameter_526" + shape = [96] + dtype = "float32" + min_val = float("-1.40027") + max_val = float("0.433022") + mean = float("-0.187531") + std = float("0.338824") + data = None + + +class Program_weight_tensor_parameter_527: + name = "parameter_527" + shape = [96] + dtype = "float32" + min_val = float("0.00378303") + max_val = float("1.87179") + mean = float("0.411102") + std = float("0.370104") + data = None + + +class Program_weight_tensor_parameter_528: + name = "parameter_528" + shape = [96] + dtype = "float32" + min_val = float("7.53826e-06") + max_val = float("0.00354419") + mean = float("0.000777177") + std = float("0.000707669") + data = None + + +class Program_weight_tensor_parameter_529: + name = "parameter_529" + shape = [96] + dtype = "float32" + min_val = float("-0.0550316") + max_val = float("0.0548868") + mean = float("0.00763059") + std = float("0.0197358") + data = None + + +class Program_weight_tensor_parameter_530: + name = "parameter_530" + shape = [96, 96, 1, 1] + dtype = "float32" + min_val = float("-0.0516744") + max_val = float("0.091015") + mean = float("-0.000705691") + std = float("0.00756381") + data = None + + +class Program_weight_tensor_parameter_531: + name = "parameter_531" + shape = [96] + dtype = "float32" + min_val = float("-1.40027") + max_val = float("0.433022") + mean = float("-0.187531") + std = float("0.338824") + data = None + + +class Program_weight_tensor_parameter_532: + name = "parameter_532" + shape = [96] + dtype = "float32" + min_val = float("0.340683") + max_val = float("2.2745") + mean = float("0.862345") + std = float("0.431657") + data = None + + +class Program_weight_tensor_parameter_533: + name = "parameter_533" + shape = [96] + dtype = "float32" + min_val = float("0.00255349") + max_val = float("0.0155132") + mean = float("0.00548608") + std = float("0.00239986") + data = None + + +class Program_weight_tensor_parameter_534: + name = "parameter_534" + shape = [96] + dtype = "float32" + min_val = float("-0.17823") + max_val = float("0.137982") + mean = float("0.0322525") + std = float("0.046662") + data = None + + +class Program_weight_tensor_parameter_535: + name = "parameter_535" + shape = [96, 96, 3, 3] + dtype = "float32" + min_val = float("-0.0674981") + max_val = float("0.0570277") + mean = float("-0.00032752") + std = float("0.006034") + data = None + + +class Program_weight_tensor_parameter_536: + name = "parameter_536" + shape = [96] + dtype = "float32" + min_val = float("-3.35834") + max_val = float("0.28772") + mean = float("-1.21666") + std = float("0.561104") + data = None + + +class Program_weight_tensor_parameter_537: + name = "parameter_537" + shape = [96] + dtype = "float32" + min_val = float("0.416818") + max_val = float("1.89857") + mean = float("1.01497") + std = float("0.241794") + data = None + + +class Program_weight_tensor_parameter_538: + name = "parameter_538" + shape = [96] + dtype = "float32" + min_val = float("0.0432234") + max_val = float("0.174066") + mean = float("0.0777276") + std = float("0.0209529") + data = None + + +class Program_weight_tensor_parameter_539: + name = "parameter_539" + shape = [96] + dtype = "float32" + min_val = float("-1.16964") + max_val = float("0.598494") + mean = float("-0.139571") + std = float("0.296264") + data = None + + +class Program_weight_tensor_parameter_540: + name = "parameter_540" + shape = [96, 96, 3, 3] + dtype = "float32" + min_val = float("-0.159737") + max_val = float("0.152684") + mean = float("-0.000521222") + std = float("0.00746838") + data = None + + +class Program_weight_tensor_parameter_541: + name = "parameter_541" + shape = [96] + dtype = "float32" + min_val = float("-1.27259") + max_val = float("0.580805") + mean = float("-0.160154") + std = float("0.285669") + data = None + + +class Program_weight_tensor_parameter_542: + name = "parameter_542" + shape = [96] + dtype = "float32" + min_val = float("1.78897e-05") + max_val = float("1.24216") + mean = float("0.291835") + std = float("0.195371") + data = None + + +class Program_weight_tensor_parameter_543: + name = "parameter_543" + shape = [96] + dtype = "float32" + min_val = float("9.8845e-11") + max_val = float("0.00345974") + mean = float("0.000742287") + std = float("0.000570769") + data = None + + +class Program_weight_tensor_parameter_544: + name = "parameter_544" + shape = [96] + dtype = "float32" + min_val = float("-0.0514278") + max_val = float("0.0447507") + mean = float("0.00739843") + std = float("0.0166169") + data = None + + +class Program_weight_tensor_parameter_545: + name = "parameter_545" + shape = [96, 96, 1, 1] + dtype = "float32" + min_val = float("-0.0512371") + max_val = float("0.058203") + mean = float("-0.000637488") + std = float("0.00765631") + data = None + + +class Program_weight_tensor_parameter_546: + name = "parameter_546" + shape = [96] + dtype = "float32" + min_val = float("-1.27259") + max_val = float("0.580805") + mean = float("-0.160154") + std = float("0.285669") + data = None + + +class Program_weight_tensor_parameter_547: + name = "parameter_547" + shape = [96] + dtype = "float32" + min_val = float("0.228783") + max_val = float("1.64106") + mean = float("0.722497") + std = float("0.264866") + data = None + + +class Program_weight_tensor_parameter_548: + name = "parameter_548" + shape = [96] + dtype = "float32" + min_val = float("0.00213447") + max_val = float("0.0186589") + mean = float("0.00667835") + std = float("0.00312485") + data = None + + +class Program_weight_tensor_parameter_549: + name = "parameter_549" + shape = [96] + dtype = "float32" + min_val = float("-0.0988952") + max_val = float("0.152065") + mean = float("0.0248752") + std = float("0.0430747") + data = None + + +class Program_weight_tensor_parameter_550: + name = "parameter_550" + shape = [96, 96, 3, 3] + dtype = "float32" + min_val = float("-0.0680949") + max_val = float("0.071738") + mean = float("-0.000264866") + std = float("0.00622902") + data = None + + +class Program_weight_tensor_parameter_551: + name = "parameter_551" + shape = [96] + dtype = "float32" + min_val = float("-3.6614") + max_val = float("0.205968") + mean = float("-1.16919") + std = float("0.589988") + data = None + + +class Program_weight_tensor_parameter_552: + name = "parameter_552" + shape = [96] + dtype = "float32" + min_val = float("0.512145") + max_val = float("2.1309") + mean = float("1.02688") + std = float("0.241984") + data = None + + +class Program_weight_tensor_parameter_553: + name = "parameter_553" + shape = [96] + dtype = "float32" + min_val = float("0.0345738") + max_val = float("0.101406") + mean = float("0.0616151") + std = float("0.0129723") + data = None + + +class Program_weight_tensor_parameter_554: + name = "parameter_554" + shape = [96] + dtype = "float32" + min_val = float("-1.10731") + max_val = float("0.608096") + mean = float("-0.0544066") + std = float("0.26029") + data = None + + +class Program_weight_tensor_parameter_555: + name = "parameter_555" + shape = [96, 96, 3, 3] + dtype = "float32" + min_val = float("-0.107257") + max_val = float("0.139746") + mean = float("-0.000428188") + std = float("0.00759317") + data = None + + +class Program_weight_tensor_parameter_556: + name = "parameter_556" + shape = [96] + dtype = "float32" + min_val = float("-0.931411") + max_val = float("0.406494") + mean = float("-0.216229") + std = float("0.275567") + data = None + + +class Program_weight_tensor_parameter_557: + name = "parameter_557" + shape = [96] + dtype = "float32" + min_val = float("2.7851e-05") + max_val = float("1.34666") + mean = float("0.301153") + std = float("0.211126") + data = None + + +class Program_weight_tensor_parameter_558: + name = "parameter_558" + shape = [96] + dtype = "float32" + min_val = float("2.44855e-10") + max_val = float("0.00276123") + mean = float("0.000787333") + std = float("0.000529563") + data = None + + +class Program_weight_tensor_parameter_559: + name = "parameter_559" + shape = [96] + dtype = "float32" + min_val = float("-0.0511623") + max_val = float("0.0677894") + mean = float("0.00865361") + std = float("0.0199128") + data = None + + +class Program_weight_tensor_parameter_560: + name = "parameter_560" + shape = [96, 96, 1, 1] + dtype = "float32" + min_val = float("-0.0714816") + max_val = float("0.0528174") + mean = float("-0.00080202") + std = float("0.00805684") + data = None + + +class Program_weight_tensor_parameter_561: + name = "parameter_561" + shape = [96] + dtype = "float32" + min_val = float("-0.931411") + max_val = float("0.406494") + mean = float("-0.216229") + std = float("0.275567") + data = None + + +class Program_weight_tensor_parameter_562: + name = "parameter_562" + shape = [96] + dtype = "float32" + min_val = float("0.141158") + max_val = float("1.7851") + mean = float("0.707756") + std = float("0.28548") + data = None + + +class Program_weight_tensor_parameter_563: + name = "parameter_563" + shape = [96] + dtype = "float32" + min_val = float("0.0020027") + max_val = float("0.0193217") + mean = float("0.00726263") + std = float("0.00306558") + data = None + + +class Program_weight_tensor_parameter_564: + name = "parameter_564" + shape = [96] + dtype = "float32" + min_val = float("-0.118899") + max_val = float("0.18858") + mean = float("0.0361517") + std = float("0.0485395") + data = None + + +class Program_weight_tensor_parameter_565: + name = "parameter_565" + shape = [96, 96, 3, 3] + dtype = "float32" + min_val = float("-0.0623542") + max_val = float("0.0588107") + mean = float("-0.000362252") + std = float("0.00634091") + data = None + + +class Program_weight_tensor_parameter_566: + name = "parameter_566" + shape = [96] + dtype = "float32" + min_val = float("-3.19799") + max_val = float("0.0441781") + mean = float("-1.1066") + std = float("0.512528") + data = None + + +class Program_weight_tensor_parameter_567: + name = "parameter_567" + shape = [96] + dtype = "float32" + min_val = float("0.556846") + max_val = float("1.70748") + mean = float("0.988099") + std = float("0.180513") + data = None + + +class Program_weight_tensor_parameter_568: + name = "parameter_568" + shape = [96] + dtype = "float32" + min_val = float("0.0248385") + max_val = float("0.353521") + mean = float("0.0516013") + std = float("0.0336755") + data = None + + +class Program_weight_tensor_parameter_569: + name = "parameter_569" + shape = [96] + dtype = "float32" + min_val = float("-3.41151") + max_val = float("0.407488") + mean = float("-0.0719755") + std = float("0.386288") + data = None + + +class Program_weight_tensor_parameter_570: + name = "parameter_570" + shape = [96, 96, 3, 3] + dtype = "float32" + min_val = float("-0.0620146") + max_val = float("0.0633394") + mean = float("-0.000448888") + std = float("0.00731171") + data = None + + +class Program_weight_tensor_parameter_571: + name = "parameter_571" + shape = [96] + dtype = "float32" + min_val = float("-0.974533") + max_val = float("0.654029") + mean = float("-0.172382") + std = float("0.268825") + data = None + + +class Program_weight_tensor_parameter_572: + name = "parameter_572" + shape = [96] + dtype = "float32" + min_val = float("0.0445082") + max_val = float("1.22303") + mean = float("0.290206") + std = float("0.187352") + data = None + + +class Program_weight_tensor_parameter_573: + name = "parameter_573" + shape = [96] + dtype = "float32" + min_val = float("9.51178e-05") + max_val = float("0.00465114") + mean = float("0.00132027") + std = float("0.00087515") + data = None + + +class Program_weight_tensor_parameter_574: + name = "parameter_574" + shape = [96] + dtype = "float32" + min_val = float("-0.0394857") + max_val = float("0.0556089") + mean = float("0.00847204") + std = float("0.0204605") + data = None + + +class Program_weight_tensor_parameter_575: + name = "parameter_575" + shape = [96, 96, 1, 1] + dtype = "float32" + min_val = float("-0.0591107") + max_val = float("0.084198") + mean = float("-0.000990773") + std = float("0.00883492") + data = None + + +class Program_weight_tensor_parameter_576: + name = "parameter_576" + shape = [96] + dtype = "float32" + min_val = float("-0.974533") + max_val = float("0.654028") + mean = float("-0.172382") + std = float("0.268825") + data = None + + +class Program_weight_tensor_parameter_577: + name = "parameter_577" + shape = [96] + dtype = "float32" + min_val = float("0.207034") + max_val = float("1.47417") + mean = float("0.603427") + std = float("0.233027") + data = None + + +class Program_weight_tensor_parameter_578: + name = "parameter_578" + shape = [96] + dtype = "float32" + min_val = float("0.00207586") + max_val = float("0.0251574") + mean = float("0.0107863") + std = float("0.00456461") + data = None + + +class Program_weight_tensor_parameter_579: + name = "parameter_579" + shape = [96] + dtype = "float32" + min_val = float("-0.110009") + max_val = float("0.147658") + mean = float("0.0285999") + std = float("0.050264") + data = None + + +class Program_weight_tensor_parameter_580: + name = "parameter_580" + shape = [96, 96, 3, 3] + dtype = "float32" + min_val = float("-0.070149") + max_val = float("0.0470186") + mean = float("-0.000362618") + std = float("0.00621856") + data = None + + +class Program_weight_tensor_parameter_581: + name = "parameter_581" + shape = [96] + dtype = "float32" + min_val = float("-3.53234") + max_val = float("0.173836") + mean = float("-1.04711") + std = float("0.571399") + data = None + + +class Program_weight_tensor_parameter_582: + name = "parameter_582" + shape = [96] + dtype = "float32" + min_val = float("0.597874") + max_val = float("2.39872") + mean = float("1.0552") + std = float("0.205323") + data = None + + +class Program_weight_tensor_parameter_583: + name = "parameter_583" + shape = [96] + dtype = "float32" + min_val = float("0.0185202") + max_val = float("0.112167") + mean = float("0.0429341") + std = float("0.0167429") + data = None + + +class Program_weight_tensor_parameter_584: + name = "parameter_584" + shape = [96] + dtype = "float32" + min_val = float("-0.443067") + max_val = float("0.447293") + mean = float("-0.0529377") + std = float("0.167364") + data = None + + +class Program_weight_tensor_parameter_585: + name = "parameter_585" + shape = [96, 96, 3, 3] + dtype = "float32" + min_val = float("-0.063488") + max_val = float("0.083032") + mean = float("-0.000377604") + std = float("0.00741283") + data = None + + +class Program_weight_tensor_parameter_586: + name = "parameter_586" + shape = [96] + dtype = "float32" + min_val = float("-0.693542") + max_val = float("0.601255") + mean = float("-0.0971658") + std = float("0.280506") + data = None + + +class Program_weight_tensor_parameter_587: + name = "parameter_587" + shape = [96] + dtype = "float32" + min_val = float("0.0450765") + max_val = float("1.29061") + mean = float("0.303813") + std = float("0.204412") + data = None + + +class Program_weight_tensor_parameter_588: + name = "parameter_588" + shape = [96] + dtype = "float32" + min_val = float("0.000388448") + max_val = float("0.0258625") + mean = float("0.00450419") + std = float("0.00431847") + data = None + + +class Program_weight_tensor_parameter_589: + name = "parameter_589" + shape = [96] + dtype = "float32" + min_val = float("-0.0432957") + max_val = float("0.0269978") + mean = float("-0.00076361") + std = float("0.0138503") + data = None + + +class Program_weight_tensor_parameter_590: + name = "parameter_590" + shape = [96, 96, 1, 1] + dtype = "float32" + min_val = float("-0.106785") + max_val = float("0.0776316") + mean = float("-0.00136769") + std = float("0.0107474") + data = None + + +class Program_weight_tensor_parameter_591: + name = "parameter_591" + shape = [96] + dtype = "float32" + min_val = float("-0.693542") + max_val = float("0.601255") + mean = float("-0.0971658") + std = float("0.280506") + data = None + + +class Program_weight_tensor_parameter_592: + name = "parameter_592" + shape = [96] + dtype = "float32" + min_val = float("0.117864") + max_val = float("1.42918") + mean = float("0.534336") + std = float("0.282789") + data = None + + +class Program_weight_tensor_parameter_593: + name = "parameter_593" + shape = [96] + dtype = "float32" + min_val = float("0.00952757") + max_val = float("0.13009") + mean = float("0.03226") + std = float("0.0210741") + data = None + + +class Program_weight_tensor_parameter_594: + name = "parameter_594" + shape = [96] + dtype = "float32" + min_val = float("-0.182934") + max_val = float("0.0815284") + mean = float("0.000193777") + std = float("0.0473769") + data = None + + +class Program_weight_tensor_parameter_595: + name = "parameter_595" + shape = [96, 96, 3, 3] + dtype = "float32" + min_val = float("-0.09962") + max_val = float("0.0585965") + mean = float("-0.000472593") + std = float("0.0059293") + data = None + + +class Program_weight_tensor_parameter_596: + name = "parameter_596" + shape = [96] + dtype = "float32" + min_val = float("-2.17807") + max_val = float("0.517143") + mean = float("-0.845295") + std = float("0.487541") + data = None + + +class Program_weight_tensor_parameter_597: + name = "parameter_597" + shape = [96] + dtype = "float32" + min_val = float("0.772289") + max_val = float("2.3588") + mean = float("1.25972") + std = float("0.22461") + data = None + + +class Program_weight_tensor_parameter_598: + name = "parameter_598" + shape = [96] + dtype = "float32" + min_val = float("0.0176127") + max_val = float("0.188596") + mean = float("0.0443549") + std = float("0.0256738") + data = None + + +class Program_weight_tensor_parameter_599: + name = "parameter_599" + shape = [96] + dtype = "float32" + min_val = float("-0.978829") + max_val = float("0.469855") + mean = float("-0.0195263") + std = float("0.204639") + data = None + + +class Program_weight_tensor_parameter_600: + name = "parameter_600" + shape = [96, 96, 3, 3] + dtype = "float32" + min_val = float("-0.191231") + max_val = float("0.182087") + mean = float("-0.000212875") + std = float("0.00772769") + data = None + + +class Program_weight_tensor_parameter_601: + name = "parameter_601" + shape = [96] + dtype = "float32" + min_val = float("-3.43026") + max_val = float("1.98535") + mean = float("0.505807") + std = float("0.879788") + data = None + + +class Program_weight_tensor_parameter_602: + name = "parameter_602" + shape = [96] + dtype = "float32" + min_val = float("0.234711") + max_val = float("2.355") + mean = float("0.651409") + std = float("0.28777") + data = None + + +class Program_weight_tensor_parameter_603: + name = "parameter_603" + shape = [96] + dtype = "float32" + min_val = float("0.0114431") + max_val = float("0.213174") + mean = float("0.044291") + std = float("0.0324628") + data = None + + +class Program_weight_tensor_parameter_604: + name = "parameter_604" + shape = [96] + dtype = "float32" + min_val = float("-0.393192") + max_val = float("0.404316") + mean = float("-0.0345236") + std = float("0.111426") + data = None + + +class Program_weight_tensor_parameter_605: + name = "parameter_605" + shape = [96, 192, 1, 1] + dtype = "float32" + min_val = float("-0.192313") + max_val = float("0.22459") + mean = float("-0.000851966") + std = float("0.016247") + data = None + + +class Program_weight_tensor_parameter_606: + name = "parameter_606" + shape = [96] + dtype = "float32" + min_val = float("-4.88256") + max_val = float("1.51768") + mean = float("0.319321") + std = float("1.02555") + data = None + + +class Program_weight_tensor_parameter_607: + name = "parameter_607" + shape = [96] + dtype = "float32" + min_val = float("0.584545") + max_val = float("6.91604") + mean = float("1.75143") + std = float("1.27947") + data = None + + +class Program_weight_tensor_parameter_608: + name = "parameter_608" + shape = [96] + dtype = "float32" + min_val = float("0.00474677") + max_val = float("0.161777") + mean = float("0.0349508") + std = float("0.0294242") + data = None + + +class Program_weight_tensor_parameter_609: + name = "parameter_609" + shape = [96] + dtype = "float32" + min_val = float("-0.246975") + max_val = float("0.361227") + mean = float("0.00345537") + std = float("0.112749") + data = None + + +class Program_weight_tensor_parameter_610: + name = "parameter_610" + shape = [96, 192, 1, 1] + dtype = "float32" + min_val = float("-0.126516") + max_val = float("0.194387") + mean = float("-0.000239444") + std = float("0.0147307") + data = None + + +class Program_weight_tensor_parameter_611: + name = "parameter_611" + shape = [192] + dtype = "float32" + min_val = float("-2.32829") + max_val = float("1.72494") + mean = float("-0.148208") + std = float("0.746996") + data = None + + +class Program_weight_tensor_parameter_612: + name = "parameter_612" + shape = [192] + dtype = "float32" + min_val = float("0.622318") + max_val = float("2.80938") + mean = float("1.10417") + std = float("0.281992") + data = None + + +class Program_weight_tensor_parameter_613: + name = "parameter_613" + shape = [192] + dtype = "float32" + min_val = float("0.0114845") + max_val = float("0.292646") + mean = float("0.0547784") + std = float("0.0457904") + data = None + + +class Program_weight_tensor_parameter_614: + name = "parameter_614" + shape = [192] + dtype = "float32" + min_val = float("-0.529517") + max_val = float("0.374347") + mean = float("-0.0709328") + std = float("0.134011") + data = None + + +class Program_weight_tensor_parameter_615: + name = "parameter_615" + shape = [192, 128, 3, 3] + dtype = "float32" + min_val = float("-0.0839254") + max_val = float("0.110947") + mean = float("-0.000137724") + std = float("0.0074586") + data = None + + +class Program_weight_tensor_parameter_616: + name = "parameter_616" + shape = [128] + dtype = "float32" + min_val = float("-2.79544") + max_val = float("1.93522") + mean = float("-0.737016") + std = float("0.682776") + data = None + + +class Program_weight_tensor_parameter_617: + name = "parameter_617" + shape = [128] + dtype = "float32" + min_val = float("0.292294") + max_val = float("2.14222") + mean = float("1.05749") + std = float("0.241285") + data = None + + +class Program_weight_tensor_parameter_618: + name = "parameter_618" + shape = [128] + dtype = "float32" + min_val = float("0.00168553") + max_val = float("0.0271169") + mean = float("0.00561304") + std = float("0.00326986") + data = None + + +class Program_weight_tensor_parameter_619: + name = "parameter_619" + shape = [128] + dtype = "float32" + min_val = float("-0.250703") + max_val = float("0.2342") + mean = float("0.00559302") + std = float("0.0940671") + data = None + + +class Program_weight_tensor_parameter_620: + name = "parameter_620" + shape = [128, 96, 1, 1] + dtype = "float32" + min_val = float("-0.161201") + max_val = float("0.174641") + mean = float("-0.00154525") + std = float("0.0227048") + data = None + + +class Program_weight_tensor_parameter_621: + name = "parameter_621" + shape = [96] + dtype = "float32" + min_val = float("-0.0177029") + max_val = float("0.000400182") + mean = float("-0.00757502") + std = float("0.00539744") + data = None + + +class Program_weight_tensor_parameter_622: + name = "parameter_622" + shape = [96, 96, 1, 1] + dtype = "float32" + min_val = float("-0.328403") + max_val = float("0.138399") + mean = float("-0.00747706") + std = float("0.0181007") + data = None + + +class Program_weight_tensor_parameter_623: + name = "parameter_623" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_624: + name = "parameter_624" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_625: + name = "parameter_625" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_626: + name = "parameter_626" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_627: + name = "parameter_627" + shape = [48, 48, 1, 1] + dtype = "float32" + min_val = float("-0.0686131") + max_val = float("0.07745") + mean = float("-0.00194833") + std = float("0.0132318") + data = None + + +class Program_weight_tensor_parameter_628: + name = "parameter_628" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_629: + name = "parameter_629" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_630: + name = "parameter_630" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_631: + name = "parameter_631" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_632: + name = "parameter_632" + shape = [48, 48, 3, 3] + dtype = "float32" + min_val = float("-0.0503947") + max_val = float("0.0591655") + mean = float("-0.000553561") + std = float("0.0104874") + data = None + + +class Program_weight_tensor_parameter_633: + name = "parameter_633" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_634: + name = "parameter_634" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_635: + name = "parameter_635" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_636: + name = "parameter_636" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_637: + name = "parameter_637" + shape = [48, 48, 3, 3] + dtype = "float32" + min_val = float("-0.0666035") + max_val = float("0.0834523") + mean = float("-0.000534285") + std = float("0.0120052") + data = None + + +class Program_weight_tensor_parameter_638: + name = "parameter_638" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_639: + name = "parameter_639" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_640: + name = "parameter_640" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_641: + name = "parameter_641" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_642: + name = "parameter_642" + shape = [48, 48, 1, 1] + dtype = "float32" + min_val = float("-0.0891029") + max_val = float("0.0714005") + mean = float("-0.00154811") + std = float("0.0144626") + data = None + + +class Program_weight_tensor_parameter_643: + name = "parameter_643" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_644: + name = "parameter_644" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_645: + name = "parameter_645" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_646: + name = "parameter_646" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_647: + name = "parameter_647" + shape = [48, 48, 3, 3] + dtype = "float32" + min_val = float("-0.0625883") + max_val = float("0.0562251") + mean = float("-0.000735936") + std = float("0.0107194") + data = None + + +class Program_weight_tensor_parameter_648: + name = "parameter_648" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_649: + name = "parameter_649" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_650: + name = "parameter_650" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_651: + name = "parameter_651" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_652: + name = "parameter_652" + shape = [48, 48, 3, 3] + dtype = "float32" + min_val = float("-0.103104") + max_val = float("0.0787725") + mean = float("-0.000430431") + std = float("0.0124082") + data = None + + +class Program_weight_tensor_parameter_653: + name = "parameter_653" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_654: + name = "parameter_654" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_655: + name = "parameter_655" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_656: + name = "parameter_656" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_657: + name = "parameter_657" + shape = [48, 48, 1, 1] + dtype = "float32" + min_val = float("-0.0876833") + max_val = float("0.07366") + mean = float("-0.00206219") + std = float("0.0169761") + data = None + + +class Program_weight_tensor_parameter_658: + name = "parameter_658" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_659: + name = "parameter_659" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_660: + name = "parameter_660" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_661: + name = "parameter_661" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_662: + name = "parameter_662" + shape = [48, 48, 3, 3] + dtype = "float32" + min_val = float("-0.0801655") + max_val = float("0.0747513") + mean = float("-0.000602095") + std = float("0.0109849") + data = None + + +class Program_weight_tensor_parameter_663: + name = "parameter_663" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_664: + name = "parameter_664" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_665: + name = "parameter_665" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_666: + name = "parameter_666" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_667: + name = "parameter_667" + shape = [48, 48, 3, 3] + dtype = "float32" + min_val = float("-0.0959795") + max_val = float("0.103953") + mean = float("-0.000426331") + std = float("0.0129461") + data = None + + +class Program_weight_tensor_parameter_668: + name = "parameter_668" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_669: + name = "parameter_669" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_670: + name = "parameter_670" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_671: + name = "parameter_671" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_672: + name = "parameter_672" + shape = [48, 96, 1, 1] + dtype = "float32" + min_val = float("-0.164541") + max_val = float("0.137911") + mean = float("-0.0025117") + std = float("0.0243799") + data = None + + +class Program_weight_tensor_parameter_673: + name = "parameter_673" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_674: + name = "parameter_674" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_675: + name = "parameter_675" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_676: + name = "parameter_676" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_677: + name = "parameter_677" + shape = [48, 96, 1, 1] + dtype = "float32" + min_val = float("-0.160023") + max_val = float("0.155941") + mean = float("-0.00065559") + std = float("0.0231529") + data = None + + +class Program_weight_tensor_parameter_678: + name = "parameter_678" + shape = [96] + dtype = "float32" + min_val = float("-3.44006") + max_val = float("3.33808") + mean = float("0.314835") + std = float("1.15264") + data = None + + +class Program_weight_tensor_parameter_679: + name = "parameter_679" + shape = [96] + dtype = "float32" + min_val = float("0.89902") + max_val = float("4.778") + mean = float("1.94257") + std = float("0.714568") + data = None + + +class Program_weight_tensor_parameter_680: + name = "parameter_680" + shape = [96] + dtype = "float32" + min_val = float("0.835229") + max_val = float("17.8402") + mean = float("2.94892") + std = float("2.46279") + data = None + + +class Program_weight_tensor_parameter_681: + name = "parameter_681" + shape = [96] + dtype = "float32" + min_val = float("-2.00554") + max_val = float("2.58848") + mean = float("-0.340428") + std = float("0.699341") + data = None + + +class Program_weight_tensor_parameter_682: + name = "parameter_682" + shape = [96, 64, 3, 3] + dtype = "float32" + min_val = float("-0.120992") + max_val = float("0.107231") + mean = float("-0.000508647") + std = float("0.0129212") + data = None + + +class Program_weight_tensor_parameter_683: + name = "parameter_683" + shape = [64] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_684: + name = "parameter_684" + shape = [64] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_685: + name = "parameter_685" + shape = [64] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_686: + name = "parameter_686" + shape = [64] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_687: + name = "parameter_687" + shape = [64, 32, 3, 3] + dtype = "float32" + min_val = float("-0.14708") + max_val = float("0.16461") + mean = float("-0.000709544") + std = float("0.0199361") + data = None + + +class Program_weight_tensor_parameter_688: + name = "parameter_688" + shape = [32] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_689: + name = "parameter_689" + shape = [32] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_690: + name = "parameter_690" + shape = [32] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_691: + name = "parameter_691" + shape = [32] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_692: + name = "parameter_692" + shape = [32, 32, 3, 3] + dtype = "float32" + min_val = float("-0.249357") + max_val = float("0.202774") + mean = float("-0.000201137") + std = float("0.0258896") + data = None + + +class Program_weight_tensor_parameter_693: + name = "parameter_693" + shape = [32] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_694: + name = "parameter_694" + shape = [32] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_695: + name = "parameter_695" + shape = [32] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_696: + name = "parameter_696" + shape = [32] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_697: + name = "parameter_697" + shape = [32, 3, 3, 3] + dtype = "float32" + min_val = float("-0.28905") + max_val = float("0.284627") + mean = float("-0.00254561") + std = float("0.0658851") + data = None diff --git a/paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_11/graph_hash.txt b/paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_11/graph_hash.txt new file mode 100644 index 000000000..d7f7b18a2 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_11/graph_hash.txt @@ -0,0 +1 @@ +7bb8a2b2502a471463ad03a6babccb8a2db42f0cbace538187cdc42cf672f3d5 \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_11/graph_net.json b/paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_11/graph_net.json new file mode 100644 index 000000000..cf4d2108b --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_11/graph_net.json @@ -0,0 +1,6 @@ +{ + "framework": "paddle", + "model_name": "PP-YOLOE-L_human", + "num_devices_required": 1, + "num_nodes_required": 1 +} \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_11/input_meta.py b/paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_11/input_meta.py new file mode 100644 index 000000000..93b3f9bb6 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_11/input_meta.py @@ -0,0 +1,156 @@ +class Program_weight_tensor_data_0: + name = "data_0" + shape = [] + dtype = "int64" + data = [23] + + +class Program_weight_tensor_data_1: + name = "data_1" + shape = [2, 3024, 1] + dtype = "float32" + min_val = float("4.30403e-05") + max_val = float("0.88968") + mean = float("0.054047") + std = float("0.118534") + data = None + + +class Program_weight_tensor_data_2: + name = "data_2" + shape = [2, 3024, 4] + dtype = "float32" + min_val = float("-300.913") + max_val = float("713.889") + mean = float("192.573") + std = float("133.469") + data = None + + +class Program_weight_tensor_data_3: + name = "data_3" + shape = [3024, 2] + dtype = "float32" + min_val = float("4.0") + max_val = float("380.0") + mean = float("192.0") + std = float("110.796") + data = None + + +class Program_weight_tensor_data_4: + name = "data_4" + shape = [2, 23, 1] + dtype = "int32" + data = [ + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + ] + + +class Program_weight_tensor_data_5: + name = "data_5" + shape = [2, 23, 4] + dtype = "float32" + max_val = float("383.232") + mean = float("161.173") + std = float("98.87") + data = None + + +class Program_weight_tensor_data_6: + name = "data_6" + shape = [2, 23, 1] + dtype = "float32" + data = [ + 1.0, + 1.0, + 1.0, + 1.0, + 1.0, + 1.0, + 1.0, + 1.0, + 1.0, + 1.0, + 1.0, + 1.0, + 1.0, + 1.0, + 1.0, + 1.0, + 1.0, + 1.0, + 1.0, + 1.0, + 1.0, + 1.0, + 1.0, + 1.0, + 1.0, + 1.0, + 1.0, + 1.0, + 1.0, + 1.0, + 1.0, + 1.0, + 1.0, + 1.0, + 1.0, + 1.0, + 1.0, + 1.0, + 1.0, + 1.0, + 1.0, + 1.0, + 0.0, + 0.0, + 0.0, + 0.0, + ] diff --git a/paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_11/model.py b/paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_11/model.py new file mode 100644 index 000000000..c8d4c25a1 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_11/model.py @@ -0,0 +1,385 @@ +import paddle + + +class GraphModule(paddle.nn.Layer): + def __init__(self): + super().__init__() + + def forward(self, data_0, data_1, data_2, data_3, data_4, data_5, data_6): + # pd_op.full: (xi64) <- () + full_0 = paddle._C_ops.full( + [], float("0"), paddle.int64, paddle.framework._current_expected_place() + ) + + # pd_op.equal: (xb) <- (xi64, xi64) + equal_0 = paddle._C_ops.equal(data_0, full_0) + + # pd_op.cast: (xi64) <- (xb) + cast_0 = paddle._C_ops.cast(equal_0, paddle.int64) + del equal_0 + + # pd_op.not_equal: (xb) <- (xi64, xi64) + not_equal_0 = paddle._C_ops.not_equal(cast_0, full_0) + del cast_0 + + # pd_op.cast: (xi64) <- (xb) + cast_1 = paddle._C_ops.cast(not_equal_0, paddle.int64) + del not_equal_0 + + # pd_op.equal: (xb) <- (xi64, xi64) + equal_1 = paddle._C_ops.equal(cast_1, full_0) + del cast_1, full_0 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_0 = [2] + + # pd_op.unsqueeze: (2x-1x1x4xf32) <- (2x-1x4xf32, 1xi64) + unsqueeze_0 = paddle._C_ops.unsqueeze(data_5, full_int_array_0) + del data_5 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_1 = [1] + + # pd_op.unsqueeze: (2x1x-1x4xf32) <- (2x-1x4xf32, 1xi64) + unsqueeze_1 = paddle._C_ops.unsqueeze(data_2, full_int_array_1) + del data_2 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_2 = [0] + + # pd_op.slice: (2x-1x1x2xf32) <- (2x-1x1x4xf32, 1xi64, 1xi64) + slice_0 = paddle._C_ops.slice( + unsqueeze_0, [3], full_int_array_2, full_int_array_0, [1], [] + ) + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_3 = [2147483647] + + # pd_op.slice: (2x-1x1x2xf32) <- (2x-1x1x4xf32, 1xi64, 1xi64) + slice_1 = paddle._C_ops.slice( + unsqueeze_0, [3], full_int_array_0, full_int_array_3, [1], [] + ) + + # pd_op.slice: (2x1x-1x2xf32) <- (2x1x-1x4xf32, 1xi64, 1xi64) + slice_2 = paddle._C_ops.slice( + unsqueeze_1, [3], full_int_array_2, full_int_array_0, [1], [] + ) + del full_int_array_2 + + # pd_op.slice: (2x1x-1x2xf32) <- (2x1x-1x4xf32, 1xi64, 1xi64) + slice_3 = paddle._C_ops.slice( + unsqueeze_1, [3], full_int_array_0, full_int_array_3, [1], [] + ) + del full_int_array_3, unsqueeze_1 + + # pd_op.maximum: (2x-1x-1x2xf32) <- (2x-1x1x2xf32, 2x1x-1x2xf32) + maximum_0 = paddle._C_ops.maximum(slice_0, slice_2) + + # pd_op.minimum: (2x-1x-1x2xf32) <- (2x-1x1x2xf32, 2x1x-1x2xf32) + minimum_0 = paddle._C_ops.minimum(slice_1, slice_3) + + # pd_op.subtract: (2x-1x-1x2xf32) <- (2x-1x-1x2xf32, 2x-1x-1x2xf32) + subtract_0 = paddle._C_ops.subtract(minimum_0, maximum_0) + del maximum_0, minimum_0 + + # pd_op.full: (1xf32) <- () + full_1 = paddle._C_ops.full( + [1], float("0"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.full: (1xf32) <- () + full_2 = paddle._C_ops.full( + [1], float("3.40282e+38"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.clip: (2x-1x-1x2xf32) <- (2x-1x-1x2xf32, 1xf32, 1xf32) + clip_0 = paddle._C_ops.clip(subtract_0, full_1, full_2) + del subtract_0 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_4 = [-1] + + # pd_op.prod: (2x-1x-1xf32) <- (2x-1x-1x2xf32, 1xi64) + prod_0 = paddle._C_ops.prod(clip_0, full_int_array_4, False, False) + del clip_0 + + # pd_op.subtract: (2x-1x1x2xf32) <- (2x-1x1x2xf32, 2x-1x1x2xf32) + subtract_1 = paddle._C_ops.subtract(slice_1, slice_0) + del slice_0, slice_1 + + # pd_op.clip: (2x-1x1x2xf32) <- (2x-1x1x2xf32, 1xf32, 1xf32) + clip_1 = paddle._C_ops.clip(subtract_1, full_1, full_2) + del subtract_1 + + # pd_op.prod: (2x-1x1xf32) <- (2x-1x1x2xf32, 1xi64) + prod_1 = paddle._C_ops.prod(clip_1, full_int_array_4, False, False) + del clip_1 + + # pd_op.subtract: (2x1x-1x2xf32) <- (2x1x-1x2xf32, 2x1x-1x2xf32) + subtract_2 = paddle._C_ops.subtract(slice_3, slice_2) + del slice_2, slice_3 + + # pd_op.clip: (2x1x-1x2xf32) <- (2x1x-1x2xf32, 1xf32, 1xf32) + clip_2 = paddle._C_ops.clip(subtract_2, full_1, full_2) + del full_1, full_2, subtract_2 + + # pd_op.prod: (2x1x-1xf32) <- (2x1x-1x2xf32, 1xi64) + prod_2 = paddle._C_ops.prod(clip_2, full_int_array_4, False, False) + del clip_2 + + # pd_op.add: (2x-1x-1xf32) <- (2x-1x1xf32, 2x1x-1xf32) + add_0 = paddle._C_ops.add(prod_1, prod_2) + del prod_1, prod_2 + + # pd_op.subtract: (2x-1x-1xf32) <- (2x-1x-1xf32, 2x-1x-1xf32) + subtract_3 = paddle._C_ops.subtract(add_0, prod_0) + del add_0 + + # pd_op.full: (1xf32) <- () + full_3 = paddle._C_ops.full( + [1], float("1"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.scale: (2x-1x-1xf32) <- (2x-1x-1xf32, 1xf32) + scale_0 = paddle._C_ops.scale(subtract_3, full_3, float("1e-09"), True) + del full_3, subtract_3 + + # pd_op.divide: (2x-1x-1xf32) <- (2x-1x-1xf32, 2x-1x-1xf32) + divide_0 = paddle._C_ops.divide(prod_0, scale_0) + del prod_0, scale_0 + + # pd_op.transpose: (2x1x-1xf32) <- (2x-1x1xf32) + transpose_0 = paddle._C_ops.transpose(data_1, [0, 2, 1]) + del data_1 + + # pd_op.full: (1xf64) <- () + full_4 = paddle._C_ops.full( + [1], float("0"), paddle.float64, paddle.core.CPUPlace() + ) + + # pd_op.full: (1xf64) <- () + full_5 = paddle._C_ops.full( + [1], float("2"), paddle.float64, paddle.core.CPUPlace() + ) + + # pd_op.full: (1xf64) <- () + full_6 = paddle._C_ops.full( + [1], float("1"), paddle.float64, paddle.core.CPUPlace() + ) + + # pd_op.arange: (2xi32) <- (1xf64, 1xf64, 1xf64) + arange_0 = paddle.arange(full_4, full_5, full_6, dtype="int32") + del full_4, full_5, full_6 + + # pd_op.unsqueeze: (2x1xi32) <- (2xi32, 1xi64) + unsqueeze_2 = paddle._C_ops.unsqueeze(arange_0, full_int_array_4) + del arange_0 + + # pd_op.full: (xi64) <- () + full_7 = paddle._C_ops.full( + [], float("1"), paddle.int64, paddle.core.CPUPlace() + ) + + # builtin.combine: ([xi64, xi64]) <- (xi64, xi64) + combine_0 = [full_7, data_0] + del data_0, full_7 + + # pd_op.stack: (2xi64) <- ([xi64, xi64]) + stack_0 = paddle._C_ops.stack(combine_0, 0) + del combine_0 + + # pd_op.tile: (2x-1xi32) <- (2x1xi32, 2xi64) + tile_0 = paddle._C_ops.tile(unsqueeze_2, stack_0) + del stack_0 + + # pd_op.squeeze: (2x-1xi32) <- (2x-1x1xi32, 1xi64) + squeeze_0 = paddle._C_ops.squeeze(data_4, full_int_array_4) + del data_4 + + # builtin.combine: ([2x-1xi32, 2x-1xi32]) <- (2x-1xi32, 2x-1xi32) + combine_1 = [tile_0, squeeze_0] + del squeeze_0, tile_0 + + # pd_op.stack: (2x-1x2xi32) <- ([2x-1xi32, 2x-1xi32]) + stack_1 = paddle._C_ops.stack(combine_1, -1) + del combine_1 + + # pd_op.gather_nd: (2x-1x-1xf32) <- (2x1x-1xf32, 2x-1x2xi32) + gather_nd_0 = paddle._C_ops.gather_nd(transpose_0, stack_1) + del stack_1, transpose_0 + + # pd_op.pow: (2x-1x-1xf32) <- (2x-1x-1xf32) + pow_0 = paddle._C_ops.pow(gather_nd_0, float("1")) + del gather_nd_0 + + # pd_op.pow: (2x-1x-1xf32) <- (2x-1x-1xf32) + pow_1 = paddle._C_ops.pow(divide_0, float("6")) + + # pd_op.multiply: (2x-1x-1xf32) <- (2x-1x-1xf32, 2x-1x-1xf32) + multiply_0 = paddle._C_ops.multiply(pow_0, pow_1) + del pow_0, pow_1 + + # pd_op.full_int_array: (2xi64) <- () + full_int_array_5 = [0, 1] + + # pd_op.unsqueeze: (1x1x-1x2xf32) <- (-1x2xf32, 2xi64) + unsqueeze_3 = paddle._C_ops.unsqueeze(data_3, full_int_array_5) + del data_3, full_int_array_5 + + # pd_op.full: (1xi32) <- () + full_8 = paddle._C_ops.full( + [1], float("3"), paddle.int32, paddle.core.CPUPlace() + ) + + # pd_op.split_with_num: ([1x1x-1x1xf32, 1x1x-1x1xf32]) <- (1x1x-1x2xf32, 1xi32) + split_with_num_0 = paddle._C_ops.split_with_num(unsqueeze_3, 2, full_8) + del unsqueeze_3 + + # builtin.split: (1x1x-1x1xf32, 1x1x-1x1xf32) <- ([1x1x-1x1xf32, 1x1x-1x1xf32]) + ( + split_0, + split_1, + ) = split_with_num_0 + del split_with_num_0 + + # pd_op.split_with_num: ([2x-1x1x1xf32, 2x-1x1x1xf32, 2x-1x1x1xf32, 2x-1x1x1xf32]) <- (2x-1x1x4xf32, 1xi32) + split_with_num_1 = paddle._C_ops.split_with_num(unsqueeze_0, 4, full_8) + del full_8, unsqueeze_0 + + # builtin.split: (2x-1x1x1xf32, 2x-1x1x1xf32, 2x-1x1x1xf32, 2x-1x1x1xf32) <- ([2x-1x1x1xf32, 2x-1x1x1xf32, 2x-1x1x1xf32, 2x-1x1x1xf32]) + ( + split_2, + split_3, + split_4, + split_5, + ) = split_with_num_1 + del split_with_num_1 + + # pd_op.subtract: (2x-1x-1x1xf32) <- (1x1x-1x1xf32, 2x-1x1x1xf32) + subtract_4 = paddle._C_ops.subtract(split_0, split_2) + del split_2 + + # pd_op.subtract: (2x-1x-1x1xf32) <- (1x1x-1x1xf32, 2x-1x1x1xf32) + subtract_5 = paddle._C_ops.subtract(split_1, split_3) + del split_3 + + # pd_op.subtract: (2x-1x-1x1xf32) <- (2x-1x1x1xf32, 1x1x-1x1xf32) + subtract_6 = paddle._C_ops.subtract(split_4, split_0) + del split_0, split_4 + + # pd_op.subtract: (2x-1x-1x1xf32) <- (2x-1x1x1xf32, 1x1x-1x1xf32) + subtract_7 = paddle._C_ops.subtract(split_5, split_1) + del split_1, split_5 + + # pd_op.full: (1xi32) <- () + full_9 = paddle._C_ops.full( + [1], float("-1"), paddle.int32, paddle.core.CPUPlace() + ) + + # builtin.combine: ([2x-1x-1x1xf32, 2x-1x-1x1xf32, 2x-1x-1x1xf32, 2x-1x-1x1xf32]) <- (2x-1x-1x1xf32, 2x-1x-1x1xf32, 2x-1x-1x1xf32, 2x-1x-1x1xf32) + combine_2 = [subtract_4, subtract_5, subtract_6, subtract_7] + del subtract_4, subtract_5, subtract_6, subtract_7 + + # pd_op.concat: (2x-1x-1x4xf32) <- ([2x-1x-1x1xf32, 2x-1x-1x1xf32, 2x-1x-1x1xf32, 2x-1x-1x1xf32], 1xi32) + concat_0 = paddle._C_ops.concat(combine_2, full_9) + del combine_2, full_9 + + # pd_op.min: (2x-1x-1xf32) <- (2x-1x-1x4xf32, 1xi64) + min_0 = paddle._C_ops.min(concat_0, full_int_array_4, False) + del concat_0, full_int_array_4 + + # pd_op.full: (xf32) <- () + full_10 = paddle._C_ops.full( + [], + float("1e-09"), + paddle.float32, + paddle.framework._current_expected_place(), + ) + + # pd_op.greater_than: (2x-1x-1xb) <- (2x-1x-1xf32, xf32) + greater_than_1 = paddle._C_ops.greater_than(min_0, full_10) + del full_10, min_0 + + # pd_op.cast: (2x-1x-1xf32) <- (2x-1x-1xb) + cast_2 = paddle._C_ops.cast(greater_than_1, paddle.float32) + del greater_than_1 + + # pd_op.multiply: (2x-1x-1xf32) <- (2x-1x-1xf32, 2x-1x-1xf32) + multiply_1 = paddle._C_ops.multiply(multiply_0, cast_2) + + # pd_op.shape64: (3xi64) <- (2x-1x-1xf32) + shape64_0 = paddle._C_ops.shape64(multiply_1) + + # pd_op.slice: (xi64) <- (3xi64, 1xi64, 1xi64) + slice_4 = paddle._C_ops.slice( + shape64_0, [0], full_int_array_1, full_int_array_0, [1], [0] + ) + del full_int_array_1 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_6 = [3] + + # pd_op.slice: (xi64) <- (3xi64, 1xi64, 1xi64) + slice_5 = paddle._C_ops.slice( + shape64_0, [0], full_int_array_0, full_int_array_6, [1], [0] + ) + del full_int_array_0, full_int_array_6, shape64_0 + + # pd_op.full: (1xi32) <- () + full_11 = paddle._C_ops.full( + [1], float("13"), paddle.int32, paddle.core.CPUPlace() + ) + + # pd_op.topk: (2x-1x13xf32, 2x-1x13xi64) <- (2x-1x-1xf32, 1xi32) + topk_0, topk_1 = (lambda x, f: f(x))( + paddle._C_ops.topk(multiply_1, full_11, -1, True, True), + lambda out: out if isinstance(out, (list, tuple)) else (out, None), + ) + del full_11, multiply_1 + + # pd_op.one_hot: (2x-1x13x-1xf32) <- (2x-1x13xi64, xi64) + one_hot_0 = paddle._C_ops.one_hot( + topk_1 % paddle.cast(slice_5, topk_1.dtype), slice_5 + ) + del slice_5, topk_1 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_7 = [-2] + + # pd_op.sum: (2x-1x-1xf32) <- (2x-1x13x-1xf32, 1xi64) + sum_0 = paddle._C_ops.sum(one_hot_0, full_int_array_7, None, False) + del one_hot_0 + + # pd_op.multiply: (2x-1x-1xf32) <- (2x-1x-1xf32, 2x-1x1xf32) + multiply_2 = paddle._C_ops.multiply(sum_0, data_6) + del sum_0 + + # pd_op.multiply: (2x-1x-1xf32) <- (2x-1x-1xf32, 2x-1x-1xf32) + multiply_3 = paddle._C_ops.multiply(multiply_2, cast_2) + del cast_2, multiply_2 + + # pd_op.multiply: (2x-1x-1xf32) <- (2x-1x-1xf32, 2x-1x1xf32) + multiply_4 = paddle._C_ops.multiply(multiply_3, data_6) + del data_6, multiply_3 + + # pd_op.sum: (2x-1xf32) <- (2x-1x-1xf32, 1xi64) + sum_1 = paddle._C_ops.sum(multiply_4, full_int_array_7, None, False) + del full_int_array_7 + + # pd_op.full_int_array: (0xi64) <- () + full_int_array_8 = [] + + # pd_op.max: (xf32) <- (2x-1xf32, 0xi64) + max_0 = paddle._C_ops.max(sum_1, full_int_array_8, False) + del full_int_array_8 + + # pd_op.full: (xf32) <- () + full_12 = paddle._C_ops.full( + [], float("1"), paddle.float32, paddle.framework._current_expected_place() + ) + + # pd_op.greater_than: (xb) <- (xf32, xf32) + greater_than_0 = paddle._C_ops.greater_than(max_0, full_12) + del divide_0, full_12, max_0, multiply_0, multiply_4, sum_1, unsqueeze_2 + + return greater_than_0 diff --git a/paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_11/weight_meta.py b/paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_11/weight_meta.py new file mode 100644 index 000000000..8b1378917 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_11/weight_meta.py @@ -0,0 +1 @@ + diff --git a/paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_12/graph_hash.txt b/paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_12/graph_hash.txt new file mode 100644 index 000000000..c50f83e98 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_12/graph_hash.txt @@ -0,0 +1 @@ +167a64ff79f6c85c326535ed7c795e778140d55e3adb01cadae8d8f479d3f197 \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_12/graph_net.json b/paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_12/graph_net.json new file mode 100644 index 000000000..cf4d2108b --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_12/graph_net.json @@ -0,0 +1,6 @@ +{ + "framework": "paddle", + "model_name": "PP-YOLOE-L_human", + "num_devices_required": 1, + "num_nodes_required": 1 +} \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_12/input_meta.py b/paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_12/input_meta.py new file mode 100644 index 000000000..38dba74a4 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_12/input_meta.py @@ -0,0 +1,121 @@ +class Program_weight_tensor_data_0: + name = "data_0" + shape = [2, 29, 8400] + dtype = "float32" + max_val = float("1.0") + mean = float("0.000933908") + std = float("0.0305456") + data = None + + +class Program_weight_tensor_data_1: + name = "data_1" + shape = [2, 1] + dtype = "int32" + data = [0, 1] + + +class Program_weight_tensor_data_2: + name = "data_2" + shape = [2, 29, 1] + dtype = "int32" + data = [ + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + ] + + +class Program_weight_tensor_data_3: + name = "data_3" + shape = [2, 8400] + dtype = "float32" + max_val = float("1.0") + mean = float("0.0270833") + std = float("0.162326") + data = None + + +class Program_weight_tensor_data_4: + name = "data_4" + shape = [2, 29, 4] + dtype = "float32" + max_val = float("640.0") + mean = float("225.221") + std = float("222.396") + data = None + + +class Program_weight_tensor_data_5: + name = "data_5" + shape = [2, 29, 8400] + dtype = "float32" + max_val = float("0.332125") + mean = float("6.7312e-05") + std = float("0.00215112") + data = None + + +class Program_weight_tensor_data_6: + name = "data_6" + shape = [2, 29, 8400] + dtype = "float32" + max_val = float("0.941182") + mean = float("0.00754317") + std = float("0.0517711") + data = None diff --git a/paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_12/model.py b/paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_12/model.py new file mode 100644 index 000000000..08f9efbb7 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_12/model.py @@ -0,0 +1,175 @@ +import paddle + + +class GraphModule(paddle.nn.Layer): + def __init__(self): + super().__init__() + + def forward(self, data_0, data_1, data_2, data_3, data_4, data_5, data_6): + # pd_op.full: (1xi64) <- () + full_0 = paddle._C_ops.full( + [1], float("-2"), paddle.int64, paddle.core.CPUPlace() + ) + + # pd_op.argmax: (2x8400xi64) <- (2x29x8400xf32, 1xi64) + argmax_0 = paddle._C_ops.argmax(data_0, full_0, False, False, paddle.int64) + del full_0 + + # pd_op.full: (1xf32) <- () + full_1 = paddle._C_ops.full( + [1], float("29"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.scale: (2x1xi32) <- (2x1xi32, 1xf32) + scale_0 = paddle._C_ops.scale(data_1, full_1, float("0"), True) + del data_1, full_1 + + # pd_op.cast: (2x1xi64) <- (2x1xi32) + cast_0 = paddle._C_ops.cast(scale_0, paddle.int64) + del scale_0 + + # pd_op.add: (2x8400xi64) <- (2x8400xi64, 2x1xi64) + add_0 = paddle._C_ops.add(argmax_0, cast_0) + del argmax_0, cast_0 + + # pd_op.flatten: (58xi32) <- (2x29x1xi32) + flatten_0 = paddle._C_ops.flatten(data_2, 0, 2) + del data_2 + + # pd_op.flatten: (16800xi64) <- (2x8400xi64) + flatten_1 = paddle._C_ops.flatten(add_0, 0, 1) + del add_0 + + # pd_op.full: (1xi32) <- () + full_2 = paddle._C_ops.full( + [1], float("0"), paddle.int32, paddle.core.CPUPlace() + ) + + # pd_op.gather: (16800xi32) <- (58xi32, 16800xi64, 1xi32) + gather_0 = paddle._C_ops.gather(flatten_0, flatten_1, full_2) + del flatten_0 + + # pd_op.full_int_array: (2xi64) <- () + full_int_array_0 = [2, 8400] + + # pd_op.reshape: (2x8400xi32) <- (16800xi32, 2xi64) + reshape_1 = paddle._C_ops.reshape(gather_0, full_int_array_0) + del full_int_array_0, gather_0 + + # pd_op.full: (xf32) <- () + full_3 = paddle._C_ops.full( + [], float("0"), paddle.float32, paddle.framework._current_expected_place() + ) + + # pd_op.greater_than: (2x8400xb) <- (2x8400xf32, xf32) + greater_than_0 = paddle._C_ops.greater_than(data_3, full_3) + del data_3, full_3 + + # pd_op.full: (1xf32) <- () + full_4 = paddle._C_ops.full( + [1], float("1"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.full_like: (2x8400xi32) <- (2x8400xi32, 1xf32) + full_like_0 = paddle._C_ops.full_like( + reshape_1, full_4, paddle.int32, paddle.framework._current_expected_place() + ) + + # pd_op.where: (2x8400xi32) <- (2x8400xb, 2x8400xi32, 2x8400xi32) + where_0 = paddle._C_ops.where(greater_than_0, reshape_1, full_like_0) + del full_like_0, greater_than_0, reshape_1 + + # pd_op.full_int_array: (2xi64) <- () + full_int_array_1 = [-1, 4] + + # pd_op.reshape: (58x4xf32) <- (2x29x4xf32, 2xi64) + reshape_2 = paddle._C_ops.reshape(data_4, full_int_array_1) + del data_4, full_int_array_1 + + # pd_op.gather: (16800x4xf32) <- (58x4xf32, 16800xi64, 1xi32) + gather_1 = paddle._C_ops.gather(reshape_2, flatten_1, full_2) + del flatten_1, full_2, reshape_2 + + # pd_op.full_int_array: (3xi64) <- () + full_int_array_2 = [2, 8400, 4] + + # pd_op.reshape: (2x8400x4xf32) <- (16800x4xf32, 3xi64) + reshape_0 = paddle._C_ops.reshape(gather_1, full_int_array_2) + del full_int_array_2, gather_1 + + # pd_op.full: (1xi32) <- () + full_5 = paddle._C_ops.full( + [1], float("2"), paddle.int32, paddle.core.CPUPlace() + ) + + # pd_op.one_hot: (2x8400x2xf32) <- (2x8400xi32, 1xi32) + one_hot_0 = paddle._C_ops.one_hot( + where_0 % paddle.cast(full_5, where_0.dtype), full_5 + ) + del full_5 + + # pd_op.full: (1xi64) <- () + full_6 = paddle._C_ops.full( + [1], float("0"), paddle.int64, paddle.framework._current_expected_place() + ) + + # pd_op.assign_value_: (1xi64) <- (1xi64) + assign_value__0 = paddle._C_ops.assign_value_( + full_6, + [1], + paddle.int64, + [float("0")], + paddle.framework._current_expected_place(), + ) + del full_6 + + # pd_op.index_select: (2x8400x1xf32) <- (2x8400x2xf32, 1xi64) + index_select_0 = paddle._C_ops.index_select(one_hot_0, assign_value__0, -1) + del assign_value__0, one_hot_0 + + # pd_op.multiply: (2x29x8400xf32) <- (2x29x8400xf32, 2x29x8400xf32) + multiply_1 = paddle._C_ops.multiply(data_5, data_0) + del data_5 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_3 = [-1] + + # pd_op.max: (2x29x1xf32) <- (2x29x8400xf32, 1xi64) + max_0 = paddle._C_ops.max(multiply_1, full_int_array_3, True) + + # pd_op.multiply: (2x29x8400xf32) <- (2x29x8400xf32, 2x29x8400xf32) + multiply_2 = paddle._C_ops.multiply(data_6, data_0) + del data_0, data_6 + + # pd_op.max: (2x29x1xf32) <- (2x29x8400xf32, 1xi64) + max_1 = paddle._C_ops.max(multiply_2, full_int_array_3, True) + del multiply_2 + + # pd_op.scale: (2x29x1xf32) <- (2x29x1xf32, 1xf32) + scale_1 = paddle._C_ops.scale(max_0, full_4, float("1e-09"), True) + del full_4, max_0 + + # pd_op.divide: (2x29x8400xf32) <- (2x29x8400xf32, 2x29x1xf32) + divide_0 = paddle._C_ops.divide(multiply_1, scale_1) + del multiply_1, scale_1 + + # pd_op.multiply: (2x29x8400xf32) <- (2x29x8400xf32, 2x29x1xf32) + multiply_3 = paddle._C_ops.multiply(divide_0, max_1) + del divide_0, max_1 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_4 = [-2] + + # pd_op.max: (2x8400xf32) <- (2x29x8400xf32, 1xi64) + max_2 = paddle._C_ops.max(multiply_3, full_int_array_4, False) + del full_int_array_4, multiply_3 + + # pd_op.unsqueeze: (2x8400x1xf32) <- (2x8400xf32, 1xi64) + unsqueeze_0 = paddle._C_ops.unsqueeze(max_2, full_int_array_3) + del full_int_array_3, max_2 + + # pd_op.multiply: (2x8400x1xf32) <- (2x8400x1xf32, 2x8400x1xf32) + multiply_0 = paddle._C_ops.multiply(index_select_0, unsqueeze_0) + del index_select_0, unsqueeze_0, where_0 + + return reshape_0, multiply_0 diff --git a/paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_12/weight_meta.py b/paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_12/weight_meta.py new file mode 100644 index 000000000..8b1378917 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_12/weight_meta.py @@ -0,0 +1 @@ + diff --git a/paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_13/graph_hash.txt b/paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_13/graph_hash.txt new file mode 100644 index 000000000..a0a4bdddf --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_13/graph_hash.txt @@ -0,0 +1 @@ +92d3ca6357b660bcf1334e1feb5b6b37f7616ff93ec1a53ea5a94dd2dd47ce97 \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_13/graph_net.json b/paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_13/graph_net.json new file mode 100644 index 000000000..cf4d2108b --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_13/graph_net.json @@ -0,0 +1,6 @@ +{ + "framework": "paddle", + "model_name": "PP-YOLOE-L_human", + "num_devices_required": 1, + "num_nodes_required": 1 +} \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_13/input_meta.py b/paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_13/input_meta.py new file mode 100644 index 000000000..4a675438d --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_13/input_meta.py @@ -0,0 +1,42 @@ +class Program_weight_tensor_data_0: + name = "data_0" + shape = [2, 2100, 1] + dtype = "float32" + min_val = float("0.000301052") + max_val = float("0.902879") + mean = float("0.0454822") + std = float("0.116377") + data = None + + +class Program_weight_tensor_data_1: + name = "data_1" + shape = [2, 2100, 68] + dtype = "float32" + min_val = float("-6.9182") + max_val = float("13.8683") + mean = float("3.2119e-05") + std = float("1.64095") + data = None + + +class Program_weight_tensor_data_2: + name = "data_2" + shape = [2100, 2] + dtype = "float32" + min_val = float("4.0") + max_val = float("316.0") + mean = float("160.0") + std = float("92.31") + data = None + + +class Program_weight_tensor_data_3: + name = "data_3" + shape = [2100, 1] + dtype = "float32" + min_val = float("8.0") + max_val = float("32.0") + mean = float("10.6667") + std = float("5.70157") + data = None diff --git a/paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_13/model.py b/paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_13/model.py new file mode 100644 index 000000000..c3a688445 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_13/model.py @@ -0,0 +1,162 @@ +import paddle + + +class GraphModule(paddle.nn.Layer): + def __init__(self): + super().__init__() + + def forward(self, parameter_0, data_0, data_1, data_2, data_3): + # pd_op.divide: (2100x2xf32) <- (2100x2xf32, 2100x1xf32) + divide_0 = paddle._C_ops.divide(data_2, data_3) + del data_2 + + # pd_op.shape64: (3xi64) <- (2x2100x68xf32) + shape64_0 = paddle._C_ops.shape64(data_1) + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_0 = [0] + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_1 = [1] + + # pd_op.assign: (1xi64) <- (1xi64) + assign_0 = full_int_array_1 + + # pd_op.slice: (xi64) <- (3xi64, 1xi64, 1xi64) + slice_0 = paddle._C_ops.slice( + shape64_0, [0], full_int_array_0, full_int_array_1, [1], [0] + ) + del full_int_array_0 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_2 = [2] + + # pd_op.slice: (xi64) <- (3xi64, 1xi64, 1xi64) + slice_1 = paddle._C_ops.slice( + shape64_0, [0], full_int_array_1, full_int_array_2, [1], [0] + ) + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_3 = [3] + + # pd_op.slice: (xi64) <- (3xi64, 1xi64, 1xi64) + slice_2 = paddle._C_ops.slice( + shape64_0, [0], full_int_array_2, full_int_array_3, [1], [0] + ) + del full_int_array_2, full_int_array_3, shape64_0 + + # pd_op.full: (xi64) <- () + full_0 = paddle._C_ops.full( + [], float("-1"), paddle.int64, paddle.core.CPUPlace() + ) + + # pd_op.full: (xi64) <- () + full_1 = paddle._C_ops.full( + [], float("4"), paddle.int64, paddle.core.CPUPlace() + ) + + # pd_op.full: (xi64) <- () + full_2 = paddle._C_ops.full( + [], float("17"), paddle.int64, paddle.core.CPUPlace() + ) + + # builtin.combine: ([xi64, xi64, xi64, xi64]) <- (xi64, xi64, xi64, xi64) + combine_0 = [full_0, slice_1, full_1, full_2] + del full_0, full_1, full_2, slice_1 + + # pd_op.stack: (4xi64) <- ([xi64, xi64, xi64, xi64]) + stack_0 = paddle._C_ops.stack(combine_0, 0) + del combine_0 + + # pd_op.reshape: (-1x-1x4x17xf32) <- (2x2100x68xf32, 4xi64) + reshape_0 = paddle._C_ops.reshape(data_1, stack_0) + del data_1, stack_0 + + # pd_op.softmax: (-1x-1x4x17xf32) <- (-1x-1x4x17xf32) + softmax_0 = paddle._C_ops.softmax(reshape_0, -1) + del reshape_0 + + # pd_op.transpose: (-1x17x-1x4xf32) <- (-1x-1x4x17xf32) + transpose_0 = paddle._C_ops.transpose(softmax_0, [0, 3, 1, 2]) + + # pd_op.conv2d: (-1x1x-1x4xf32) <- (-1x17x-1x4xf32, 1x17x1x1xf32) + conv2d_0 = paddle._C_ops.conv2d( + transpose_0, parameter_0, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_0 + + # pd_op.squeeze: (-1x-1x4xf32) <- (-1x1x-1x4xf32, 1xi64) + squeeze_0 = paddle._C_ops.squeeze(conv2d_0, full_int_array_1) + del full_int_array_1 + + # pd_op.full: (1xi32) <- () + full_3 = paddle._C_ops.full( + [1], float("2"), paddle.int32, paddle.core.CPUPlace() + ) + + # pd_op.split_with_num: ([-1x-1x2xf32, -1x-1x2xf32]) <- (-1x-1x4xf32, 1xi32) + split_with_num_0 = paddle._C_ops.split_with_num(squeeze_0, 2, full_3) + del squeeze_0 + + # builtin.split: (-1x-1x2xf32, -1x-1x2xf32) <- ([-1x-1x2xf32, -1x-1x2xf32]) + ( + split_0, + split_1, + ) = split_with_num_0 + del split_with_num_0 + + # pd_op.full: (1xf32) <- () + full_4 = paddle._C_ops.full( + [1], float("-1"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.scale: (-1x-1x2xf32) <- (-1x-1x2xf32, 1xf32) + scale_0 = paddle._C_ops.scale(split_0, full_4, float("0"), True) + del split_0 + + # pd_op.add: (-1x2100x2xf32) <- (-1x-1x2xf32, 2100x2xf32) + add_0 = paddle._C_ops.add(scale_0, divide_0) + + # pd_op.add: (-1x2100x2xf32) <- (-1x-1x2xf32, 2100x2xf32) + add_1 = paddle._C_ops.add(split_1, divide_0) + + # pd_op.full: (1xi32) <- () + full_5 = paddle._C_ops.full( + [1], float("-1"), paddle.int32, paddle.core.CPUPlace() + ) + + # builtin.combine: ([-1x2100x2xf32, -1x2100x2xf32]) <- (-1x2100x2xf32, -1x2100x2xf32) + combine_1 = [add_0, add_1] + + # pd_op.concat: (-1x2100x4xf32) <- ([-1x2100x2xf32, -1x2100x2xf32], 1xi32) + concat_0 = paddle._C_ops.concat(combine_1, full_5) + del combine_1 + + # pd_op.share_data_: (2x2100x1xf32) <- (2x2100x1xf32) + share_data__0 = data_0.detach() + del data_0 + + # pd_op.share_data_: (-1x2100x4xf32) <- (-1x2100x4xf32) + share_data__1 = concat_0.detach() + + # pd_op.multiply: (-1x2100x4xf32) <- (-1x2100x4xf32, 2100x1xf32) + multiply_0 = paddle._C_ops.multiply(share_data__1, data_3) + del ( + add_0, + add_1, + assign_0, + concat_0, + conv2d_0, + data_3, + divide_0, + full_3, + full_4, + full_5, + scale_0, + share_data__1, + softmax_0, + split_1, + transpose_0, + ) + + return share_data__0, multiply_0 diff --git a/paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_13/weight_meta.py b/paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_13/weight_meta.py new file mode 100644 index 000000000..28198680e --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_13/weight_meta.py @@ -0,0 +1,7 @@ +class Program_weight_tensor_parameter_0: + name = "parameter_0" + shape = [1, 17, 1, 1] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None diff --git a/paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_14/graph_hash.txt b/paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_14/graph_hash.txt new file mode 100644 index 000000000..5b6734509 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_14/graph_hash.txt @@ -0,0 +1 @@ +9e1e78a2b9529fe1a568cac2694d2e415d0a0de0add87039ed78bf63f19ac354 \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_14/graph_net.json b/paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_14/graph_net.json new file mode 100644 index 000000000..cf4d2108b --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_14/graph_net.json @@ -0,0 +1,6 @@ +{ + "framework": "paddle", + "model_name": "PP-YOLOE-L_human", + "num_devices_required": 1, + "num_nodes_required": 1 +} \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_14/input_meta.py b/paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_14/input_meta.py new file mode 100644 index 000000000..d779c49f5 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_14/input_meta.py @@ -0,0 +1,141 @@ +class Program_weight_tensor_data_0: + name = "data_0" + shape = [2, 2100, 1] + dtype = "float32" + min_val = float("0.000301052") + max_val = float("0.902879") + mean = float("0.0454822") + std = float("0.116377") + data = None + + +class Program_weight_tensor_data_1: + name = "data_1" + shape = [2, 2100, 4] + dtype = "float32" + min_val = float("-292.871") + max_val = float("632.104") + mean = float("160.31") + std = float("118.239") + data = None + + +class Program_weight_tensor_data_2: + name = "data_2" + shape = [2100, 2] + dtype = "float32" + min_val = float("4.0") + max_val = float("316.0") + mean = float("160.0") + std = float("92.31") + data = None + + +class Program_weight_tensor_data_3: + name = "data_3" + shape = [2, 21, 1] + dtype = "int32" + data = [ + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + ] + + +class Program_weight_tensor_data_4: + name = "data_4" + shape = [2, 21, 4] + dtype = "float32" + max_val = float("320.0") + mean = float("97.1572") + std = float("87.9478") + data = None + + +class Program_weight_tensor_data_5: + name = "data_5" + shape = [2, 21, 1] + dtype = "float32" + data = [ + 1.0, + 1.0, + 1.0, + 1.0, + 1.0, + 1.0, + 1.0, + 1.0, + 1.0, + 1.0, + 1.0, + 1.0, + 1.0, + 1.0, + 1.0, + 1.0, + 1.0, + 1.0, + 1.0, + 1.0, + 1.0, + 1.0, + 1.0, + 1.0, + 1.0, + 1.0, + 1.0, + 1.0, + 1.0, + 1.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + ] diff --git a/paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_14/model.py b/paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_14/model.py new file mode 100644 index 000000000..db89bc12d --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_14/model.py @@ -0,0 +1,338 @@ +import paddle + + +class GraphModule(paddle.nn.Layer): + def __init__(self): + super().__init__() + + def forward(self, data_0, data_1, data_2, data_3, data_4, data_5): + # pd_op.full_int_array: (1xi64) <- () + full_int_array_0 = [2] + + # pd_op.unsqueeze: (2x21x1x4xf32) <- (2x21x4xf32, 1xi64) + unsqueeze_0 = paddle._C_ops.unsqueeze(data_4, full_int_array_0) + del data_4 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_1 = [1] + + # pd_op.unsqueeze: (2x1x2100x4xf32) <- (2x2100x4xf32, 1xi64) + unsqueeze_1 = paddle._C_ops.unsqueeze(data_1, full_int_array_1) + del data_1, full_int_array_1 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_2 = [0] + + # pd_op.slice: (2x21x1x2xf32) <- (2x21x1x4xf32, 1xi64, 1xi64) + slice_0 = paddle._C_ops.slice( + unsqueeze_0, [3], full_int_array_2, full_int_array_0, [1], [] + ) + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_3 = [2147483647] + + # pd_op.slice: (2x21x1x2xf32) <- (2x21x1x4xf32, 1xi64, 1xi64) + slice_1 = paddle._C_ops.slice( + unsqueeze_0, [3], full_int_array_0, full_int_array_3, [1], [] + ) + + # pd_op.slice: (2x1x2100x2xf32) <- (2x1x2100x4xf32, 1xi64, 1xi64) + slice_2 = paddle._C_ops.slice( + unsqueeze_1, [3], full_int_array_2, full_int_array_0, [1], [] + ) + del full_int_array_2 + + # pd_op.slice: (2x1x2100x2xf32) <- (2x1x2100x4xf32, 1xi64, 1xi64) + slice_3 = paddle._C_ops.slice( + unsqueeze_1, [3], full_int_array_0, full_int_array_3, [1], [] + ) + del full_int_array_0, full_int_array_3, unsqueeze_1 + + # pd_op.maximum: (2x21x2100x2xf32) <- (2x21x1x2xf32, 2x1x2100x2xf32) + maximum_0 = paddle._C_ops.maximum(slice_0, slice_2) + + # pd_op.minimum: (2x21x2100x2xf32) <- (2x21x1x2xf32, 2x1x2100x2xf32) + minimum_0 = paddle._C_ops.minimum(slice_1, slice_3) + + # pd_op.subtract: (2x21x2100x2xf32) <- (2x21x2100x2xf32, 2x21x2100x2xf32) + subtract_0 = paddle._C_ops.subtract(minimum_0, maximum_0) + del maximum_0, minimum_0 + + # pd_op.full: (1xf32) <- () + full_0 = paddle._C_ops.full( + [1], float("0"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.full: (1xf32) <- () + full_1 = paddle._C_ops.full( + [1], float("3.40282e+38"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.clip: (2x21x2100x2xf32) <- (2x21x2100x2xf32, 1xf32, 1xf32) + clip_0 = paddle._C_ops.clip(subtract_0, full_0, full_1) + del subtract_0 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_4 = [-1] + + # pd_op.prod: (2x21x2100xf32) <- (2x21x2100x2xf32, 1xi64) + prod_0 = paddle._C_ops.prod(clip_0, full_int_array_4, False, False) + del clip_0 + + # pd_op.subtract: (2x21x1x2xf32) <- (2x21x1x2xf32, 2x21x1x2xf32) + subtract_1 = paddle._C_ops.subtract(slice_1, slice_0) + del slice_0, slice_1 + + # pd_op.clip: (2x21x1x2xf32) <- (2x21x1x2xf32, 1xf32, 1xf32) + clip_1 = paddle._C_ops.clip(subtract_1, full_0, full_1) + del subtract_1 + + # pd_op.prod: (2x21x1xf32) <- (2x21x1x2xf32, 1xi64) + prod_1 = paddle._C_ops.prod(clip_1, full_int_array_4, False, False) + del clip_1 + + # pd_op.subtract: (2x1x2100x2xf32) <- (2x1x2100x2xf32, 2x1x2100x2xf32) + subtract_2 = paddle._C_ops.subtract(slice_3, slice_2) + del slice_2, slice_3 + + # pd_op.clip: (2x1x2100x2xf32) <- (2x1x2100x2xf32, 1xf32, 1xf32) + clip_2 = paddle._C_ops.clip(subtract_2, full_0, full_1) + del full_0, full_1, subtract_2 + + # pd_op.prod: (2x1x2100xf32) <- (2x1x2100x2xf32, 1xi64) + prod_2 = paddle._C_ops.prod(clip_2, full_int_array_4, False, False) + del clip_2 + + # pd_op.add: (2x21x2100xf32) <- (2x21x1xf32, 2x1x2100xf32) + add_0 = paddle._C_ops.add(prod_1, prod_2) + del prod_1, prod_2 + + # pd_op.subtract: (2x21x2100xf32) <- (2x21x2100xf32, 2x21x2100xf32) + subtract_3 = paddle._C_ops.subtract(add_0, prod_0) + del add_0 + + # pd_op.full: (1xf32) <- () + full_2 = paddle._C_ops.full( + [1], float("1"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.scale: (2x21x2100xf32) <- (2x21x2100xf32, 1xf32) + scale_0 = paddle._C_ops.scale(subtract_3, full_2, float("1e-09"), True) + del full_2, subtract_3 + + # pd_op.divide: (2x21x2100xf32) <- (2x21x2100xf32, 2x21x2100xf32) + divide_0 = paddle._C_ops.divide(prod_0, scale_0) + del prod_0, scale_0 + + # pd_op.transpose: (2x1x2100xf32) <- (2x2100x1xf32) + transpose_0 = paddle._C_ops.transpose(data_0, [0, 2, 1]) + del data_0 + + # pd_op.full: (1xf64) <- () + full_3 = paddle._C_ops.full( + [1], float("0"), paddle.float64, paddle.core.CPUPlace() + ) + + # pd_op.full: (1xf64) <- () + full_4 = paddle._C_ops.full( + [1], float("2"), paddle.float64, paddle.core.CPUPlace() + ) + + # pd_op.full: (1xf64) <- () + full_5 = paddle._C_ops.full( + [1], float("1"), paddle.float64, paddle.core.CPUPlace() + ) + + # pd_op.arange: (2xi32) <- (1xf64, 1xf64, 1xf64) + arange_0 = paddle.arange(full_3, full_4, full_5, dtype="int32") + del full_3, full_4, full_5 + + # pd_op.unsqueeze: (2x1xi32) <- (2xi32, 1xi64) + unsqueeze_2 = paddle._C_ops.unsqueeze(arange_0, full_int_array_4) + del arange_0 + + # pd_op.full_int_array: (2xi64) <- () + full_int_array_5 = [1, 21] + + # pd_op.tile: (2x21xi32) <- (2x1xi32, 2xi64) + tile_0 = paddle._C_ops.tile(unsqueeze_2, full_int_array_5) + del full_int_array_5 + + # pd_op.squeeze: (2x21xi32) <- (2x21x1xi32, 1xi64) + squeeze_0 = paddle._C_ops.squeeze(data_3, full_int_array_4) + del data_3 + + # builtin.combine: ([2x21xi32, 2x21xi32]) <- (2x21xi32, 2x21xi32) + combine_0 = [tile_0, squeeze_0] + del squeeze_0, tile_0 + + # pd_op.stack: (2x21x2xi32) <- ([2x21xi32, 2x21xi32]) + stack_0 = paddle._C_ops.stack(combine_0, -1) + del combine_0 + + # pd_op.gather_nd: (2x21x2100xf32) <- (2x1x2100xf32, 2x21x2xi32) + gather_nd_0 = paddle._C_ops.gather_nd(transpose_0, stack_0) + del stack_0, transpose_0 + + # pd_op.pow: (2x21x2100xf32) <- (2x21x2100xf32) + pow_0 = paddle._C_ops.pow(gather_nd_0, float("1")) + del gather_nd_0 + + # pd_op.pow: (2x21x2100xf32) <- (2x21x2100xf32) + pow_1 = paddle._C_ops.pow(divide_0, float("6")) + + # pd_op.multiply: (2x21x2100xf32) <- (2x21x2100xf32, 2x21x2100xf32) + multiply_0 = paddle._C_ops.multiply(pow_0, pow_1) + del pow_0, pow_1 + + # pd_op.full_int_array: (2xi64) <- () + full_int_array_6 = [0, 1] + + # pd_op.unsqueeze: (1x1x2100x2xf32) <- (2100x2xf32, 2xi64) + unsqueeze_3 = paddle._C_ops.unsqueeze(data_2, full_int_array_6) + del data_2, full_int_array_6 + + # pd_op.full: (1xi32) <- () + full_6 = paddle._C_ops.full( + [1], float("3"), paddle.int32, paddle.core.CPUPlace() + ) + + # pd_op.split_with_num: ([1x1x2100x1xf32, 1x1x2100x1xf32]) <- (1x1x2100x2xf32, 1xi32) + split_with_num_0 = paddle._C_ops.split_with_num(unsqueeze_3, 2, full_6) + del unsqueeze_3 + + # builtin.split: (1x1x2100x1xf32, 1x1x2100x1xf32) <- ([1x1x2100x1xf32, 1x1x2100x1xf32]) + ( + split_0, + split_1, + ) = split_with_num_0 + del split_with_num_0 + + # pd_op.split_with_num: ([2x21x1x1xf32, 2x21x1x1xf32, 2x21x1x1xf32, 2x21x1x1xf32]) <- (2x21x1x4xf32, 1xi32) + split_with_num_1 = paddle._C_ops.split_with_num(unsqueeze_0, 4, full_6) + del full_6, unsqueeze_0 + + # builtin.split: (2x21x1x1xf32, 2x21x1x1xf32, 2x21x1x1xf32, 2x21x1x1xf32) <- ([2x21x1x1xf32, 2x21x1x1xf32, 2x21x1x1xf32, 2x21x1x1xf32]) + ( + split_2, + split_3, + split_4, + split_5, + ) = split_with_num_1 + del split_with_num_1 + + # pd_op.subtract: (2x21x2100x1xf32) <- (1x1x2100x1xf32, 2x21x1x1xf32) + subtract_4 = paddle._C_ops.subtract(split_0, split_2) + del split_2 + + # pd_op.subtract: (2x21x2100x1xf32) <- (1x1x2100x1xf32, 2x21x1x1xf32) + subtract_5 = paddle._C_ops.subtract(split_1, split_3) + del split_3 + + # pd_op.subtract: (2x21x2100x1xf32) <- (2x21x1x1xf32, 1x1x2100x1xf32) + subtract_6 = paddle._C_ops.subtract(split_4, split_0) + del split_0, split_4 + + # pd_op.subtract: (2x21x2100x1xf32) <- (2x21x1x1xf32, 1x1x2100x1xf32) + subtract_7 = paddle._C_ops.subtract(split_5, split_1) + del split_1, split_5 + + # pd_op.full: (1xi32) <- () + full_7 = paddle._C_ops.full( + [1], float("-1"), paddle.int32, paddle.core.CPUPlace() + ) + + # builtin.combine: ([2x21x2100x1xf32, 2x21x2100x1xf32, 2x21x2100x1xf32, 2x21x2100x1xf32]) <- (2x21x2100x1xf32, 2x21x2100x1xf32, 2x21x2100x1xf32, 2x21x2100x1xf32) + combine_1 = [subtract_4, subtract_5, subtract_6, subtract_7] + del subtract_4, subtract_5, subtract_6, subtract_7 + + # pd_op.concat: (2x21x2100x4xf32) <- ([2x21x2100x1xf32, 2x21x2100x1xf32, 2x21x2100x1xf32, 2x21x2100x1xf32], 1xi32) + concat_0 = paddle._C_ops.concat(combine_1, full_7) + del combine_1, full_7 + + # pd_op.min: (2x21x2100xf32) <- (2x21x2100x4xf32, 1xi64) + min_0 = paddle._C_ops.min(concat_0, full_int_array_4, False) + del concat_0, full_int_array_4 + + # pd_op.full: (xf32) <- () + full_8 = paddle._C_ops.full( + [], + float("1e-09"), + paddle.float32, + paddle.framework._current_expected_place(), + ) + + # pd_op.greater_than: (2x21x2100xb) <- (2x21x2100xf32, xf32) + greater_than_1 = paddle._C_ops.greater_than(min_0, full_8) + del full_8, min_0 + + # pd_op.cast: (2x21x2100xf32) <- (2x21x2100xb) + cast_0 = paddle._C_ops.cast(greater_than_1, paddle.float32) + del greater_than_1 + + # pd_op.multiply: (2x21x2100xf32) <- (2x21x2100xf32, 2x21x2100xf32) + multiply_1 = paddle._C_ops.multiply(multiply_0, cast_0) + + # pd_op.full: (1xi32) <- () + full_9 = paddle._C_ops.full( + [1], float("13"), paddle.int32, paddle.core.CPUPlace() + ) + + # pd_op.topk: (2x21x13xf32, 2x21x13xi64) <- (2x21x2100xf32, 1xi32) + topk_0, topk_1 = (lambda x, f: f(x))( + paddle._C_ops.topk(multiply_1, full_9, -1, True, True), + lambda out: out if isinstance(out, (list, tuple)) else (out, None), + ) + del full_9, multiply_1 + + # pd_op.full: (1xi32) <- () + full_10 = paddle._C_ops.full( + [1], float("2100"), paddle.int32, paddle.core.CPUPlace() + ) + + # pd_op.one_hot: (2x21x13x2100xf32) <- (2x21x13xi64, 1xi32) + one_hot_0 = paddle._C_ops.one_hot( + topk_1 % paddle.cast(full_10, topk_1.dtype), full_10 + ) + del full_10, topk_1 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_7 = [-2] + + # pd_op.sum: (2x21x2100xf32) <- (2x21x13x2100xf32, 1xi64) + sum_0 = paddle._C_ops.sum(one_hot_0, full_int_array_7, None, False) + del one_hot_0 + + # pd_op.multiply: (2x21x2100xf32) <- (2x21x2100xf32, 2x21x1xf32) + multiply_2 = paddle._C_ops.multiply(sum_0, data_5) + del sum_0 + + # pd_op.multiply: (2x21x2100xf32) <- (2x21x2100xf32, 2x21x2100xf32) + multiply_3 = paddle._C_ops.multiply(multiply_2, cast_0) + del cast_0, multiply_2 + + # pd_op.multiply: (2x21x2100xf32) <- (2x21x2100xf32, 2x21x1xf32) + multiply_4 = paddle._C_ops.multiply(multiply_3, data_5) + del data_5, multiply_3 + + # pd_op.sum: (2x2100xf32) <- (2x21x2100xf32, 1xi64) + sum_1 = paddle._C_ops.sum(multiply_4, full_int_array_7, None, False) + del full_int_array_7 + + # pd_op.full_int_array: (0xi64) <- () + full_int_array_8 = [] + + # pd_op.max: (xf32) <- (2x2100xf32, 0xi64) + max_0 = paddle._C_ops.max(sum_1, full_int_array_8, False) + del full_int_array_8 + + # pd_op.full: (xf32) <- () + full_11 = paddle._C_ops.full( + [], float("1"), paddle.float32, paddle.framework._current_expected_place() + ) + + # pd_op.greater_than: (xb) <- (xf32, xf32) + greater_than_0 = paddle._C_ops.greater_than(max_0, full_11) + del divide_0, full_11, max_0, multiply_0, multiply_4, sum_1, unsqueeze_2 + + return greater_than_0 diff --git a/paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_14/weight_meta.py b/paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_14/weight_meta.py new file mode 100644 index 000000000..8b1378917 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_14/weight_meta.py @@ -0,0 +1 @@ + diff --git a/paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_15/graph_hash.txt b/paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_15/graph_hash.txt new file mode 100644 index 000000000..0467b2504 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_15/graph_hash.txt @@ -0,0 +1 @@ +dd67c9b14d6f4a8d4f7743618ef2dc8d751095eadf10338c08c3930ba5bb87f1 \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_15/graph_net.json b/paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_15/graph_net.json new file mode 100644 index 000000000..cf4d2108b --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_15/graph_net.json @@ -0,0 +1,6 @@ +{ + "framework": "paddle", + "model_name": "PP-YOLOE-L_human", + "num_devices_required": 1, + "num_nodes_required": 1 +} \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_15/input_meta.py b/paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_15/input_meta.py new file mode 100644 index 000000000..71f1bf11a --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_15/input_meta.py @@ -0,0 +1,105 @@ +class Program_weight_tensor_data_0: + name = "data_0" + shape = [2, 2100] + dtype = "float32" + max_val = float("3.0") + mean = float("0.0607143") + std = float("0.274954") + data = None + + +class Program_weight_tensor_data_1: + name = "data_1" + shape = [2, 21, 2100] + dtype = "float32" + max_val = float("0.912588") + mean = float("0.02087") + std = float("0.0876462") + data = None + + +class Program_weight_tensor_data_2: + name = "data_2" + shape = [2, 21, 2100] + dtype = "float32" + max_val = float("1.0") + mean = float("0.00289116") + std = float("0.0536917") + data = None + + +class Program_weight_tensor_data_3: + name = "data_3" + shape = [2, 1] + dtype = "int32" + data = [0, 1] + + +class Program_weight_tensor_data_4: + name = "data_4" + shape = [2, 21, 1] + dtype = "int32" + data = [ + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + ] + + +class Program_weight_tensor_data_5: + name = "data_5" + shape = [2, 21, 4] + dtype = "float32" + max_val = float("320.0") + mean = float("97.1572") + std = float("87.9478") + data = None + + +class Program_weight_tensor_data_6: + name = "data_6" + shape = [2, 21, 2100] + dtype = "float32" + max_val = float("0.241469") + mean = float("0.000188227") + std = float("0.00363226") + data = None diff --git a/paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_15/model.py b/paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_15/model.py new file mode 100644 index 000000000..23048d518 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_15/model.py @@ -0,0 +1,223 @@ +import paddle + + +class GraphModule(paddle.nn.Layer): + def __init__(self): + super().__init__() + + def forward(self, data_0, data_1, data_2, data_3, data_4, data_5, data_6): + # pd_op.full_int_array: (1xi64) <- () + full_int_array_0 = [1] + + # pd_op.unsqueeze: (2x1x2100xf32) <- (2x2100xf32, 1xi64) + unsqueeze_0 = paddle._C_ops.unsqueeze(data_0, full_int_array_0) + del data_0, full_int_array_0 + + # pd_op.full: (xf32) <- () + full_0 = paddle._C_ops.full( + [], float("1"), paddle.float32, paddle.framework._current_expected_place() + ) + + # pd_op.greater_than: (2x1x2100xb) <- (2x1x2100xf32, xf32) + greater_than_0 = paddle._C_ops.greater_than(unsqueeze_0, full_0) + del full_0, unsqueeze_0 + + # pd_op.full_int_array: (3xi64) <- () + full_int_array_1 = [1, 21, 1] + + # pd_op.tile: (2x21x2100xb) <- (2x1x2100xb, 3xi64) + tile_0 = paddle._C_ops.tile(greater_than_0, full_int_array_1) + del full_int_array_1, greater_than_0 + + # pd_op.full: (1xi64) <- () + full_1 = paddle._C_ops.full( + [1], float("-2"), paddle.int64, paddle.core.CPUPlace() + ) + + # pd_op.argmax: (2x2100xi64) <- (2x21x2100xf32, 1xi64) + argmax_0 = paddle._C_ops.argmax(data_1, full_1, False, False, paddle.int64) + + # pd_op.full: (1xi32) <- () + full_2 = paddle._C_ops.full( + [1], float("21"), paddle.int32, paddle.core.CPUPlace() + ) + + # pd_op.one_hot: (2x2100x21xf32) <- (2x2100xi64, 1xi32) + one_hot_0 = paddle._C_ops.one_hot( + argmax_0 % paddle.cast(full_2, argmax_0.dtype), full_2 + ) + del argmax_0, full_2 + + # pd_op.transpose: (2x21x2100xf32) <- (2x2100x21xf32) + transpose_0 = paddle._C_ops.transpose(one_hot_0, [0, 2, 1]) + del one_hot_0 + + # pd_op.where: (2x21x2100xf32) <- (2x21x2100xb, 2x21x2100xf32, 2x21x2100xf32) + where_0 = paddle._C_ops.where(tile_0, transpose_0, data_2) + del data_2, tile_0, transpose_0 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_2 = [-2] + + # pd_op.sum: (2x2100xf32) <- (2x21x2100xf32, 1xi64) + sum_0 = paddle._C_ops.sum(where_0, full_int_array_2, None, False) + + # pd_op.argmax: (2x2100xi64) <- (2x21x2100xf32, 1xi64) + argmax_1 = paddle._C_ops.argmax(where_0, full_1, False, False, paddle.int64) + del full_1 + + # pd_op.full: (1xf32) <- () + full_3 = paddle._C_ops.full( + [1], float("21"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.scale: (2x1xi32) <- (2x1xi32, 1xf32) + scale_0 = paddle._C_ops.scale(data_3, full_3, float("0"), True) + del data_3, full_3 + + # pd_op.cast: (2x1xi64) <- (2x1xi32) + cast_0 = paddle._C_ops.cast(scale_0, paddle.int64) + del scale_0 + + # pd_op.add: (2x2100xi64) <- (2x2100xi64, 2x1xi64) + add_0 = paddle._C_ops.add(argmax_1, cast_0) + del argmax_1, cast_0 + + # pd_op.flatten: (42xi32) <- (2x21x1xi32) + flatten_0 = paddle._C_ops.flatten(data_4, 0, 2) + del data_4 + + # pd_op.flatten: (4200xi64) <- (2x2100xi64) + flatten_1 = paddle._C_ops.flatten(add_0, 0, 1) + del add_0 + + # pd_op.full: (1xi32) <- () + full_4 = paddle._C_ops.full( + [1], float("0"), paddle.int32, paddle.core.CPUPlace() + ) + + # pd_op.gather: (4200xi32) <- (42xi32, 4200xi64, 1xi32) + gather_0 = paddle._C_ops.gather(flatten_0, flatten_1, full_4) + del flatten_0 + + # pd_op.full_int_array: (2xi64) <- () + full_int_array_3 = [2, 2100] + + # pd_op.reshape: (2x2100xi32) <- (4200xi32, 2xi64) + reshape_1 = paddle._C_ops.reshape(gather_0, full_int_array_3) + del full_int_array_3, gather_0 + + # pd_op.full: (xf32) <- () + full_5 = paddle._C_ops.full( + [], float("0"), paddle.float32, paddle.framework._current_expected_place() + ) + + # pd_op.greater_than: (2x2100xb) <- (2x2100xf32, xf32) + greater_than_1 = paddle._C_ops.greater_than(sum_0, full_5) + del full_5, sum_0 + + # pd_op.full: (1xf32) <- () + full_6 = paddle._C_ops.full( + [1], float("1"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.full_like: (2x2100xi32) <- (2x2100xi32, 1xf32) + full_like_0 = paddle._C_ops.full_like( + reshape_1, full_6, paddle.int32, paddle.framework._current_expected_place() + ) + + # pd_op.where: (2x2100xi32) <- (2x2100xb, 2x2100xi32, 2x2100xi32) + where_1 = paddle._C_ops.where(greater_than_1, reshape_1, full_like_0) + del full_like_0, greater_than_1, reshape_1 + + # pd_op.full_int_array: (2xi64) <- () + full_int_array_4 = [-1, 4] + + # pd_op.reshape: (42x4xf32) <- (2x21x4xf32, 2xi64) + reshape_2 = paddle._C_ops.reshape(data_5, full_int_array_4) + del data_5, full_int_array_4 + + # pd_op.gather: (4200x4xf32) <- (42x4xf32, 4200xi64, 1xi32) + gather_1 = paddle._C_ops.gather(reshape_2, flatten_1, full_4) + del flatten_1, full_4, reshape_2 + + # pd_op.full_int_array: (3xi64) <- () + full_int_array_5 = [2, 2100, 4] + + # pd_op.reshape: (2x2100x4xf32) <- (4200x4xf32, 3xi64) + reshape_0 = paddle._C_ops.reshape(gather_1, full_int_array_5) + del full_int_array_5, gather_1 + + # pd_op.full: (1xi32) <- () + full_7 = paddle._C_ops.full( + [1], float("2"), paddle.int32, paddle.core.CPUPlace() + ) + + # pd_op.one_hot: (2x2100x2xf32) <- (2x2100xi32, 1xi32) + one_hot_1 = paddle._C_ops.one_hot( + where_1 % paddle.cast(full_7, where_1.dtype), full_7 + ) + del full_7 + + # pd_op.full: (1xi64) <- () + full_8 = paddle._C_ops.full( + [1], float("0"), paddle.int64, paddle.framework._current_expected_place() + ) + + # pd_op.assign_value_: (1xi64) <- (1xi64) + assign_value__0 = paddle._C_ops.assign_value_( + full_8, + [1], + paddle.int64, + [float("0")], + paddle.framework._current_expected_place(), + ) + del full_8 + + # pd_op.index_select: (2x2100x1xf32) <- (2x2100x2xf32, 1xi64) + index_select_0 = paddle._C_ops.index_select(one_hot_1, assign_value__0, -1) + del assign_value__0, one_hot_1 + + # pd_op.multiply: (2x21x2100xf32) <- (2x21x2100xf32, 2x21x2100xf32) + multiply_1 = paddle._C_ops.multiply(data_6, where_0) + del data_6 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_6 = [-1] + + # pd_op.max: (2x21x1xf32) <- (2x21x2100xf32, 1xi64) + max_0 = paddle._C_ops.max(multiply_1, full_int_array_6, True) + + # pd_op.multiply: (2x21x2100xf32) <- (2x21x2100xf32, 2x21x2100xf32) + multiply_2 = paddle._C_ops.multiply(data_1, where_0) + del data_1, where_0 + + # pd_op.max: (2x21x1xf32) <- (2x21x2100xf32, 1xi64) + max_1 = paddle._C_ops.max(multiply_2, full_int_array_6, True) + del multiply_2 + + # pd_op.scale: (2x21x1xf32) <- (2x21x1xf32, 1xf32) + scale_1 = paddle._C_ops.scale(max_0, full_6, float("1e-09"), True) + del full_6, max_0 + + # pd_op.divide: (2x21x2100xf32) <- (2x21x2100xf32, 2x21x1xf32) + divide_0 = paddle._C_ops.divide(multiply_1, scale_1) + del multiply_1, scale_1 + + # pd_op.multiply: (2x21x2100xf32) <- (2x21x2100xf32, 2x21x1xf32) + multiply_3 = paddle._C_ops.multiply(divide_0, max_1) + del divide_0, max_1 + + # pd_op.max: (2x2100xf32) <- (2x21x2100xf32, 1xi64) + max_2 = paddle._C_ops.max(multiply_3, full_int_array_2, False) + del full_int_array_2, multiply_3 + + # pd_op.unsqueeze: (2x2100x1xf32) <- (2x2100xf32, 1xi64) + unsqueeze_1 = paddle._C_ops.unsqueeze(max_2, full_int_array_6) + del full_int_array_6, max_2 + + # pd_op.multiply: (2x2100x1xf32) <- (2x2100x1xf32, 2x2100x1xf32) + multiply_0 = paddle._C_ops.multiply(index_select_0, unsqueeze_1) + del index_select_0, unsqueeze_1, where_1 + + return reshape_0, multiply_0 diff --git a/paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_15/weight_meta.py b/paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_15/weight_meta.py new file mode 100644 index 000000000..8b1378917 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_15/weight_meta.py @@ -0,0 +1 @@ + diff --git a/paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_16/graph_hash.txt b/paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_16/graph_hash.txt new file mode 100644 index 000000000..463698320 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_16/graph_hash.txt @@ -0,0 +1 @@ +611aaf40802ae728109fb4c99495540bcf765813a1a25b37c13dde4b4b8683ee \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_16/graph_net.json b/paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_16/graph_net.json new file mode 100644 index 000000000..cf4d2108b --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_16/graph_net.json @@ -0,0 +1,6 @@ +{ + "framework": "paddle", + "model_name": "PP-YOLOE-L_human", + "num_devices_required": 1, + "num_nodes_required": 1 +} \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_16/input_meta.py b/paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_16/input_meta.py new file mode 100644 index 000000000..d8668962d --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_16/input_meta.py @@ -0,0 +1,38 @@ +class Program_weight_tensor_data_0: + name = "data_0" + shape = [2, 8400, 4] + dtype = "float32" + min_val = float("0.170512") + max_val = float("14.8417") + mean = float("5.20202") + std = float("3.42596") + data = None + + +class Program_weight_tensor_data_1: + name = "data_1" + shape = [8400, 2] + dtype = "float32" + min_val = float("0.5") + max_val = float("79.5") + mean = float("34.7619") + std = float("22.9098") + data = None + + +class Program_weight_tensor_data_2: + name = "data_2" + shape = [8400, 1] + dtype = "float32" + min_val = float("8.0") + max_val = float("32.0") + mean = float("10.6667") + std = float("5.70157") + data = None + + +class Program_weight_tensor_data_3: + name = "data_3" + shape = [2, 2] + dtype = "float32" + data = [1.20075, 0.802005, 1.74863, 1.16364] diff --git a/paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_16/model.py b/paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_16/model.py new file mode 100644 index 000000000..561c0c35b --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_16/model.py @@ -0,0 +1,94 @@ +import paddle + + +class GraphModule(paddle.nn.Layer): + def __init__(self): + super().__init__() + + def forward(self, data_0, data_1, data_2, data_3): + # pd_op.full: (1xi32) <- () + full_0 = paddle._C_ops.full( + [1], float("2"), paddle.int32, paddle.core.CPUPlace() + ) + + # pd_op.split_with_num: ([2x8400x2xf32, 2x8400x2xf32]) <- (2x8400x4xf32, 1xi32) + split_with_num_0 = paddle._C_ops.split_with_num(data_0, 2, full_0) + del data_0, full_0 + + # builtin.split: (2x8400x2xf32, 2x8400x2xf32) <- ([2x8400x2xf32, 2x8400x2xf32]) + ( + split_0, + split_1, + ) = split_with_num_0 + del split_with_num_0 + + # pd_op.full: (1xf32) <- () + full_1 = paddle._C_ops.full( + [1], float("-1"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.scale: (2x8400x2xf32) <- (2x8400x2xf32, 1xf32) + scale_0 = paddle._C_ops.scale(split_0, full_1, float("0"), True) + del full_1, split_0 + + # pd_op.add: (2x8400x2xf32) <- (2x8400x2xf32, 8400x2xf32) + add_0 = paddle._C_ops.add(scale_0, data_1) + del scale_0 + + # pd_op.add: (2x8400x2xf32) <- (2x8400x2xf32, 8400x2xf32) + add_1 = paddle._C_ops.add(split_1, data_1) + del data_1, split_1 + + # pd_op.full: (1xi32) <- () + full_2 = paddle._C_ops.full( + [1], float("-1"), paddle.int32, paddle.core.CPUPlace() + ) + + # builtin.combine: ([2x8400x2xf32, 2x8400x2xf32]) <- (2x8400x2xf32, 2x8400x2xf32) + combine_0 = [add_0, add_1] + del add_0, add_1 + + # pd_op.concat: (2x8400x4xf32) <- ([2x8400x2xf32, 2x8400x2xf32], 1xi32) + concat_0 = paddle._C_ops.concat(combine_0, full_2) + del combine_0 + + # pd_op.multiply: (2x8400x4xf32) <- (2x8400x4xf32, 8400x1xf32) + multiply_0 = paddle._C_ops.multiply(concat_0, data_2) + del concat_0, data_2 + + # pd_op.full: (1xi32) <- () + full_3 = paddle._C_ops.full( + [1], float("1"), paddle.int32, paddle.core.CPUPlace() + ) + + # pd_op.split_with_num: ([2x1xf32, 2x1xf32]) <- (2x2xf32, 1xi32) + split_with_num_1 = paddle._C_ops.split_with_num(data_3, 2, full_3) + del data_3, full_3 + + # builtin.split: (2x1xf32, 2x1xf32) <- ([2x1xf32, 2x1xf32]) + ( + split_2, + split_3, + ) = split_with_num_1 + del split_with_num_1 + + # builtin.combine: ([2x1xf32, 2x1xf32, 2x1xf32, 2x1xf32]) <- (2x1xf32, 2x1xf32, 2x1xf32, 2x1xf32) + combine_1 = [split_3, split_2, split_3, split_2] + del split_2, split_3 + + # pd_op.concat: (2x4xf32) <- ([2x1xf32, 2x1xf32, 2x1xf32, 2x1xf32], 1xi32) + concat_1 = paddle._C_ops.concat(combine_1, full_2) + del combine_1, full_2 + + # pd_op.full_int_array: (3xi64) <- () + full_int_array_0 = [-1, 1, 4] + + # pd_op.reshape: (2x1x4xf32) <- (2x4xf32, 3xi64) + reshape_0 = paddle._C_ops.reshape(concat_1, full_int_array_0) + del concat_1, full_int_array_0 + + # pd_op.divide: (2x8400x4xf32) <- (2x8400x4xf32, 2x1x4xf32) + divide_0 = paddle._C_ops.divide(multiply_0, reshape_0) + del multiply_0, reshape_0 + + return divide_0 diff --git a/paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_16/weight_meta.py b/paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_16/weight_meta.py new file mode 100644 index 000000000..8b1378917 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_16/weight_meta.py @@ -0,0 +1 @@ + diff --git a/paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_2/graph_hash.txt b/paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_2/graph_hash.txt new file mode 100644 index 000000000..8644d9e4f --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_2/graph_hash.txt @@ -0,0 +1 @@ +ab00f78367b398fd627ea53927a42dac99230f6540cf3006c528aebbab4be04a \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_2/graph_net.json b/paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_2/graph_net.json new file mode 100644 index 000000000..cf4d2108b --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_2/graph_net.json @@ -0,0 +1,6 @@ +{ + "framework": "paddle", + "model_name": "PP-YOLOE-L_human", + "num_devices_required": 1, + "num_nodes_required": 1 +} \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_2/input_meta.py b/paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_2/input_meta.py new file mode 100644 index 000000000..9392746ec --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_2/input_meta.py @@ -0,0 +1,28 @@ +class Program_weight_tensor_data_0: + name = "data_0" + shape = [2, 3024, 1] + dtype = "float32" + min_val = float("4.30403e-05") + max_val = float("0.88968") + mean = float("0.054047") + std = float("0.118534") + data = None + + +class Program_weight_tensor_data_1: + name = "data_1" + shape = [2, 3024] + dtype = "int32" + min_val = 0 + max_val = 1 + data = None + + +class Program_weight_tensor_data_2: + name = "data_2" + shape = [2, 3024, 1] + dtype = "float32" + max_val = float("0.909315") + mean = float("0.0306404") + std = float("0.125461") + data = None diff --git a/paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_2/model.py b/paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_2/model.py new file mode 100644 index 000000000..747380d08 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_2/model.py @@ -0,0 +1,110 @@ +import paddle + + +class GraphModule(paddle.nn.Layer): + def __init__(self): + super().__init__() + + def forward(self, data_0, data_1, data_2): + # pd_op.full: (1xi32) <- () + full_0 = paddle._C_ops.full( + [1], float("2"), paddle.int32, paddle.core.CPUPlace() + ) + + # pd_op.one_hot: (2x-1x2xf32) <- (2x-1xi32, 1xi32) + one_hot_0 = paddle._C_ops.one_hot( + data_1 % paddle.cast(full_0, data_1.dtype), full_0 + ) + del data_1, full_0 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_0 = [0] + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_1 = [-1] + + # pd_op.slice: (2x-1x1xf32) <- (2x-1x2xf32, 1xi64, 1xi64) + slice_0 = paddle._C_ops.slice( + one_hot_0, [2], full_int_array_0, full_int_array_1, [1], [] + ) + del full_int_array_0, full_int_array_1, one_hot_0 + + # pd_op.pow: (2x-1x1xf32) <- (2x-1x1xf32) + pow_0 = paddle._C_ops.pow(data_0, float("2")) + + # pd_op.full: (1xf32) <- () + full_1 = paddle._C_ops.full( + [1], float("0.75"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.scale: (2x-1x1xf32) <- (2x-1x1xf32, 1xf32) + scale_0 = paddle._C_ops.scale(pow_0, full_1, float("0"), True) + del pow_0 + + # pd_op.full: (1xf32) <- () + full_2 = paddle._C_ops.full( + [1], float("-1"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.scale: (2x-1x1xf32) <- (2x-1x1xf32, 1xf32) + scale_1 = paddle._C_ops.scale(slice_0, full_2, float("1"), True) + del full_2 + + # pd_op.multiply: (2x-1x1xf32) <- (2x-1x1xf32, 2x-1x1xf32) + multiply_0 = paddle._C_ops.multiply(scale_0, scale_1) + + # pd_op.multiply: (2x-1x1xf32) <- (2x-1x1xf32, 2x-1x1xf32) + multiply_1 = paddle._C_ops.multiply(data_2, slice_0) + del slice_0 + + # pd_op.add: (2x-1x1xf32) <- (2x-1x1xf32, 2x-1x1xf32) + add_0 = paddle._C_ops.add(multiply_0, multiply_1) + + # pd_op.bce_loss: (2x-1x1xf32) <- (2x-1x1xf32, 2x-1x1xf32) + bce_loss_0 = paddle._C_ops.bce_loss(data_0, data_2) + del data_0 + + # pd_op.multiply: (2x-1x1xf32) <- (2x-1x1xf32, 2x-1x1xf32) + multiply_2 = paddle._C_ops.multiply(bce_loss_0, add_0) + + # pd_op.full_int_array: (0xi64) <- () + full_int_array_2 = [] + + # pd_op.sum: (xf32) <- (2x-1x1xf32, 0xi64) + sum_0 = paddle._C_ops.sum(multiply_2, full_int_array_2, None, False) + + # pd_op.sum: (xf32) <- (2x-1x1xf32, 0xi64) + sum_1 = paddle._C_ops.sum(data_2, full_int_array_2, None, False) + del data_2 + + # pd_op.full: (1xf32) <- () + full_3 = paddle._C_ops.full( + [1], float("1"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.full: (1xf32) <- () + full_4 = paddle._C_ops.full( + [1], float("3.40282e+38"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.clip: (xf32) <- (xf32, 1xf32, 1xf32) + clip_0 = paddle._C_ops.clip(sum_1, full_3, full_4) + del full_3, full_4, sum_1 + + # pd_op.divide: (xf32) <- (xf32, xf32) + divide_0 = paddle._C_ops.divide(sum_0, clip_0) + del ( + add_0, + bce_loss_0, + clip_0, + full_1, + full_int_array_2, + multiply_0, + multiply_1, + multiply_2, + scale_0, + scale_1, + sum_0, + ) + + return divide_0 diff --git a/paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_2/weight_meta.py b/paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_2/weight_meta.py new file mode 100644 index 000000000..8b1378917 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_2/weight_meta.py @@ -0,0 +1 @@ + diff --git a/paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_3/graph_hash.txt b/paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_3/graph_hash.txt new file mode 100644 index 000000000..a90272c96 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_3/graph_hash.txt @@ -0,0 +1 @@ +32cd2923bc0a61a7c5f1bc48378db5c4de25399c2f9388132502f296c7a71760 \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_3/graph_net.json b/paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_3/graph_net.json new file mode 100644 index 000000000..cf4d2108b --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_3/graph_net.json @@ -0,0 +1,6 @@ +{ + "framework": "paddle", + "model_name": "PP-YOLOE-L_human", + "num_devices_required": 1, + "num_nodes_required": 1 +} \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_3/input_meta.py b/paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_3/input_meta.py new file mode 100644 index 000000000..8b3874e61 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_3/input_meta.py @@ -0,0 +1,73 @@ +class Program_weight_tensor_data_0: + name = "data_0" + shape = [] + dtype = "int64" + data = [14] + + +class Program_weight_tensor_data_1: + name = "data_1" + shape = [] + dtype = "int64" + data = [14] + + +class Program_weight_tensor_data_2: + name = "data_2" + shape = [] + dtype = "int64" + data = [28] + + +class Program_weight_tensor_data_3: + name = "data_3" + shape = [] + dtype = "int64" + data = [28] + + +class Program_weight_tensor_data_4: + name = "data_4" + shape = [] + dtype = "int64" + data = [56] + + +class Program_weight_tensor_data_5: + name = "data_5" + shape = [] + dtype = "int64" + data = [56] + + +class Program_weight_tensor_data_6: + name = "data_6" + shape = [2, 768, 14, 14] + dtype = "float32" + min_val = float("-0.278465") + max_val = float("6.38042") + mean = float("0.198977") + std = float("0.492361") + data = None + + +class Program_weight_tensor_data_7: + name = "data_7" + shape = [2, 384, 28, 28] + dtype = "float32" + min_val = float("-0.278465") + max_val = float("8.58115") + mean = float("0.212227") + std = float("0.589868") + data = None + + +class Program_weight_tensor_data_8: + name = "data_8" + shape = [2, 192, 56, 56] + dtype = "float32" + min_val = float("-0.278465") + max_val = float("8.27989") + mean = float("0.302059") + std = float("0.556663") + data = None diff --git a/paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_3/model.py b/paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_3/model.py new file mode 100644 index 000000000..f7f899d11 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_3/model.py @@ -0,0 +1,1144 @@ +import paddle + + +class GraphModule(paddle.nn.Layer): + def __init__(self): + super().__init__() + + def forward( + self, + parameter_0, + parameter_1, + parameter_2, + parameter_3, + parameter_4, + parameter_5, + parameter_6, + parameter_7, + parameter_8, + parameter_9, + parameter_10, + parameter_11, + parameter_12, + parameter_13, + parameter_14, + parameter_15, + parameter_16, + parameter_17, + parameter_18, + parameter_19, + parameter_20, + parameter_21, + parameter_22, + parameter_23, + parameter_24, + parameter_25, + parameter_26, + parameter_27, + parameter_28, + parameter_29, + parameter_30, + parameter_31, + parameter_32, + parameter_33, + parameter_34, + parameter_35, + parameter_36, + parameter_37, + parameter_38, + parameter_39, + parameter_40, + parameter_41, + parameter_42, + parameter_43, + parameter_44, + parameter_45, + parameter_46, + parameter_47, + parameter_48, + parameter_49, + parameter_50, + parameter_51, + parameter_52, + parameter_53, + data_0, + data_1, + data_2, + data_3, + data_4, + data_5, + data_6, + data_7, + data_8, + ): + # pd_op.full: (1xi64) <- () + full_0 = paddle._C_ops.full( + [1], float("0"), paddle.int64, paddle.core.CPUPlace() + ) + + # pd_op.full: (1xi64) <- () + full_1 = paddle._C_ops.full( + [1], float("1"), paddle.int64, paddle.core.CPUPlace() + ) + + # pd_op.arange: (-1xi64) <- (1xi64, xi64, 1xi64) + arange_0 = paddle.arange(full_0, data_1, full_1, dtype="int64") + del data_1 + + # pd_op.cast: (-1xf32) <- (-1xi64) + cast_0 = paddle._C_ops.cast(arange_0, paddle.float32) + del arange_0 + + # pd_op.full: (1xf32) <- () + full_2 = paddle._C_ops.full( + [1], float("1"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.scale: (-1xf32) <- (-1xf32, 1xf32) + scale_0 = paddle._C_ops.scale(cast_0, full_2, float("0.5"), True) + del cast_0 + + # pd_op.full: (1xf32) <- () + full_3 = paddle._C_ops.full( + [1], float("32"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.scale: (-1xf32) <- (-1xf32, 1xf32) + scale_1 = paddle._C_ops.scale(scale_0, full_3, float("0"), True) + del scale_0 + + # pd_op.arange: (-1xi64) <- (1xi64, xi64, 1xi64) + arange_1 = paddle.arange(full_0, data_0, full_1, dtype="int64") + del data_0 + + # pd_op.cast: (-1xf32) <- (-1xi64) + cast_1 = paddle._C_ops.cast(arange_1, paddle.float32) + del arange_1 + + # pd_op.scale: (-1xf32) <- (-1xf32, 1xf32) + scale_2 = paddle._C_ops.scale(cast_1, full_2, float("0.5"), True) + del cast_1 + + # pd_op.scale: (-1xf32) <- (-1xf32, 1xf32) + scale_3 = paddle._C_ops.scale(scale_2, full_3, float("0"), True) + del scale_2 + + # builtin.combine: ([-1xf32, -1xf32]) <- (-1xf32, -1xf32) + combine_0 = [scale_3, scale_1] + del scale_1, scale_3 + + # pd_op.meshgrid: ([-1x-1xf32, -1x-1xf32]) <- ([-1xf32, -1xf32]) + meshgrid_0 = paddle._C_ops.meshgrid(combine_0) + del combine_0 + + # builtin.split: (-1x-1xf32, -1x-1xf32) <- ([-1x-1xf32, -1x-1xf32]) + ( + split_0, + split_1, + ) = meshgrid_0 + del meshgrid_0 + + # pd_op.scale: (-1x-1xf32) <- (-1x-1xf32, 1xf32) + scale_4 = paddle._C_ops.scale(split_1, full_2, float("-80"), True) + + # pd_op.scale: (-1x-1xf32) <- (-1x-1xf32, 1xf32) + scale_5 = paddle._C_ops.scale(split_0, full_2, float("-80"), True) + + # pd_op.scale: (-1x-1xf32) <- (-1x-1xf32, 1xf32) + scale_6 = paddle._C_ops.scale(split_1, full_2, float("80"), True) + + # pd_op.scale: (-1x-1xf32) <- (-1x-1xf32, 1xf32) + scale_7 = paddle._C_ops.scale(split_0, full_2, float("80"), True) + + # builtin.combine: ([-1x-1xf32, -1x-1xf32, -1x-1xf32, -1x-1xf32]) <- (-1x-1xf32, -1x-1xf32, -1x-1xf32, -1x-1xf32) + combine_1 = [scale_4, scale_5, scale_6, scale_7] + del scale_4, scale_5, scale_6, scale_7 + + # pd_op.stack: (-1x-1x4xf32) <- ([-1x-1xf32, -1x-1xf32, -1x-1xf32, -1x-1xf32]) + stack_0 = paddle._C_ops.stack(combine_1, -1) + del combine_1 + + # builtin.combine: ([-1x-1xf32, -1x-1xf32]) <- (-1x-1xf32, -1x-1xf32) + combine_2 = [split_1, split_0] + del split_0, split_1 + + # pd_op.stack: (-1x-1x2xf32) <- ([-1x-1xf32, -1x-1xf32]) + stack_1 = paddle._C_ops.stack(combine_2, -1) + del combine_2 + + # pd_op.full_int_array: (2xi64) <- () + full_int_array_0 = [-1, 4] + + # pd_op.reshape: (-1x4xf32) <- (-1x-1x4xf32, 2xi64) + reshape_0 = paddle._C_ops.reshape(stack_0, full_int_array_0) + del stack_0 + + # pd_op.full_int_array: (2xi64) <- () + full_int_array_1 = [-1, 2] + + # pd_op.reshape: (-1x2xf32) <- (-1x-1x2xf32, 2xi64) + reshape_1 = paddle._C_ops.reshape(stack_1, full_int_array_1) + del stack_1 + + # pd_op.shape64: (2xi64) <- (-1x4xf32) + shape64_0 = paddle._C_ops.shape64(reshape_0) + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_2 = [0] + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_3 = [1] + + # pd_op.slice: (xi64) <- (2xi64, 1xi64, 1xi64) + slice_0 = paddle._C_ops.slice( + shape64_0, [0], full_int_array_2, full_int_array_3, [1], [0] + ) + del shape64_0 + + # pd_op.full: (xi64) <- () + full_4 = paddle._C_ops.full( + [], float("1"), paddle.int64, paddle.core.CPUPlace() + ) + + # builtin.combine: ([xi64, xi64]) <- (xi64, xi64) + combine_3 = [slice_0, full_4] + + # pd_op.stack: (2xi64) <- ([xi64, xi64]) + stack_2 = paddle._C_ops.stack(combine_3, 0) + del combine_3 + + # pd_op.full_with_tensor: (-1x1xf32) <- (1xf32, 2xi64) + full_with_tensor_0 = paddle._C_ops.full_with_tensor( + full_3, stack_2, paddle.float32 + ) + del full_3, stack_2 + + # pd_op.arange: (-1xi64) <- (1xi64, xi64, 1xi64) + arange_2 = paddle.arange(full_0, data_3, full_1, dtype="int64") + del data_3 + + # pd_op.cast: (-1xf32) <- (-1xi64) + cast_2 = paddle._C_ops.cast(arange_2, paddle.float32) + del arange_2 + + # pd_op.scale: (-1xf32) <- (-1xf32, 1xf32) + scale_8 = paddle._C_ops.scale(cast_2, full_2, float("0.5"), True) + del cast_2 + + # pd_op.full: (1xf32) <- () + full_5 = paddle._C_ops.full( + [1], float("16"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.scale: (-1xf32) <- (-1xf32, 1xf32) + scale_9 = paddle._C_ops.scale(scale_8, full_5, float("0"), True) + del scale_8 + + # pd_op.arange: (-1xi64) <- (1xi64, xi64, 1xi64) + arange_3 = paddle.arange(full_0, data_2, full_1, dtype="int64") + del data_2 + + # pd_op.cast: (-1xf32) <- (-1xi64) + cast_3 = paddle._C_ops.cast(arange_3, paddle.float32) + del arange_3 + + # pd_op.scale: (-1xf32) <- (-1xf32, 1xf32) + scale_10 = paddle._C_ops.scale(cast_3, full_2, float("0.5"), True) + del cast_3 + + # pd_op.scale: (-1xf32) <- (-1xf32, 1xf32) + scale_11 = paddle._C_ops.scale(scale_10, full_5, float("0"), True) + del scale_10 + + # builtin.combine: ([-1xf32, -1xf32]) <- (-1xf32, -1xf32) + combine_4 = [scale_11, scale_9] + del scale_11, scale_9 + + # pd_op.meshgrid: ([-1x-1xf32, -1x-1xf32]) <- ([-1xf32, -1xf32]) + meshgrid_1 = paddle._C_ops.meshgrid(combine_4) + del combine_4 + + # builtin.split: (-1x-1xf32, -1x-1xf32) <- ([-1x-1xf32, -1x-1xf32]) + ( + split_2, + split_3, + ) = meshgrid_1 + del meshgrid_1 + + # pd_op.scale: (-1x-1xf32) <- (-1x-1xf32, 1xf32) + scale_12 = paddle._C_ops.scale(split_3, full_2, float("-40"), True) + + # pd_op.scale: (-1x-1xf32) <- (-1x-1xf32, 1xf32) + scale_13 = paddle._C_ops.scale(split_2, full_2, float("-40"), True) + + # pd_op.scale: (-1x-1xf32) <- (-1x-1xf32, 1xf32) + scale_14 = paddle._C_ops.scale(split_3, full_2, float("40"), True) + + # pd_op.scale: (-1x-1xf32) <- (-1x-1xf32, 1xf32) + scale_15 = paddle._C_ops.scale(split_2, full_2, float("40"), True) + + # builtin.combine: ([-1x-1xf32, -1x-1xf32, -1x-1xf32, -1x-1xf32]) <- (-1x-1xf32, -1x-1xf32, -1x-1xf32, -1x-1xf32) + combine_5 = [scale_12, scale_13, scale_14, scale_15] + del scale_12, scale_13, scale_14, scale_15 + + # pd_op.stack: (-1x-1x4xf32) <- ([-1x-1xf32, -1x-1xf32, -1x-1xf32, -1x-1xf32]) + stack_3 = paddle._C_ops.stack(combine_5, -1) + del combine_5 + + # builtin.combine: ([-1x-1xf32, -1x-1xf32]) <- (-1x-1xf32, -1x-1xf32) + combine_6 = [split_3, split_2] + del split_2, split_3 + + # pd_op.stack: (-1x-1x2xf32) <- ([-1x-1xf32, -1x-1xf32]) + stack_4 = paddle._C_ops.stack(combine_6, -1) + del combine_6 + + # pd_op.reshape: (-1x4xf32) <- (-1x-1x4xf32, 2xi64) + reshape_2 = paddle._C_ops.reshape(stack_3, full_int_array_0) + del stack_3 + + # pd_op.reshape: (-1x2xf32) <- (-1x-1x2xf32, 2xi64) + reshape_3 = paddle._C_ops.reshape(stack_4, full_int_array_1) + del stack_4 + + # pd_op.shape64: (2xi64) <- (-1x4xf32) + shape64_1 = paddle._C_ops.shape64(reshape_2) + + # pd_op.slice: (xi64) <- (2xi64, 1xi64, 1xi64) + slice_1 = paddle._C_ops.slice( + shape64_1, [0], full_int_array_2, full_int_array_3, [1], [0] + ) + del shape64_1 + + # builtin.combine: ([xi64, xi64]) <- (xi64, xi64) + combine_7 = [slice_1, full_4] + + # pd_op.stack: (2xi64) <- ([xi64, xi64]) + stack_5 = paddle._C_ops.stack(combine_7, 0) + del combine_7 + + # pd_op.full_with_tensor: (-1x1xf32) <- (1xf32, 2xi64) + full_with_tensor_1 = paddle._C_ops.full_with_tensor( + full_5, stack_5, paddle.float32 + ) + del full_5, stack_5 + + # pd_op.arange: (-1xi64) <- (1xi64, xi64, 1xi64) + arange_4 = paddle.arange(full_0, data_5, full_1, dtype="int64") + del data_5 + + # pd_op.cast: (-1xf32) <- (-1xi64) + cast_4 = paddle._C_ops.cast(arange_4, paddle.float32) + del arange_4 + + # pd_op.scale: (-1xf32) <- (-1xf32, 1xf32) + scale_16 = paddle._C_ops.scale(cast_4, full_2, float("0.5"), True) + del cast_4 + + # pd_op.full: (1xf32) <- () + full_6 = paddle._C_ops.full( + [1], float("8"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.scale: (-1xf32) <- (-1xf32, 1xf32) + scale_17 = paddle._C_ops.scale(scale_16, full_6, float("0"), True) + del scale_16 + + # pd_op.arange: (-1xi64) <- (1xi64, xi64, 1xi64) + arange_5 = paddle.arange(full_0, data_4, full_1, dtype="int64") + del data_4, full_0, full_1 + + # pd_op.cast: (-1xf32) <- (-1xi64) + cast_5 = paddle._C_ops.cast(arange_5, paddle.float32) + del arange_5 + + # pd_op.scale: (-1xf32) <- (-1xf32, 1xf32) + scale_18 = paddle._C_ops.scale(cast_5, full_2, float("0.5"), True) + del cast_5 + + # pd_op.scale: (-1xf32) <- (-1xf32, 1xf32) + scale_19 = paddle._C_ops.scale(scale_18, full_6, float("0"), True) + del scale_18 + + # builtin.combine: ([-1xf32, -1xf32]) <- (-1xf32, -1xf32) + combine_8 = [scale_19, scale_17] + del scale_17, scale_19 + + # pd_op.meshgrid: ([-1x-1xf32, -1x-1xf32]) <- ([-1xf32, -1xf32]) + meshgrid_2 = paddle._C_ops.meshgrid(combine_8) + del combine_8 + + # builtin.split: (-1x-1xf32, -1x-1xf32) <- ([-1x-1xf32, -1x-1xf32]) + ( + split_4, + split_5, + ) = meshgrid_2 + del meshgrid_2 + + # pd_op.scale: (-1x-1xf32) <- (-1x-1xf32, 1xf32) + scale_20 = paddle._C_ops.scale(split_5, full_2, float("-20"), True) + + # pd_op.scale: (-1x-1xf32) <- (-1x-1xf32, 1xf32) + scale_21 = paddle._C_ops.scale(split_4, full_2, float("-20"), True) + + # pd_op.scale: (-1x-1xf32) <- (-1x-1xf32, 1xf32) + scale_22 = paddle._C_ops.scale(split_5, full_2, float("20"), True) + + # pd_op.scale: (-1x-1xf32) <- (-1x-1xf32, 1xf32) + scale_23 = paddle._C_ops.scale(split_4, full_2, float("20"), True) + del full_2 + + # builtin.combine: ([-1x-1xf32, -1x-1xf32, -1x-1xf32, -1x-1xf32]) <- (-1x-1xf32, -1x-1xf32, -1x-1xf32, -1x-1xf32) + combine_9 = [scale_20, scale_21, scale_22, scale_23] + del scale_20, scale_21, scale_22, scale_23 + + # pd_op.stack: (-1x-1x4xf32) <- ([-1x-1xf32, -1x-1xf32, -1x-1xf32, -1x-1xf32]) + stack_6 = paddle._C_ops.stack(combine_9, -1) + del combine_9 + + # builtin.combine: ([-1x-1xf32, -1x-1xf32]) <- (-1x-1xf32, -1x-1xf32) + combine_10 = [split_5, split_4] + del split_4, split_5 + + # pd_op.stack: (-1x-1x2xf32) <- ([-1x-1xf32, -1x-1xf32]) + stack_7 = paddle._C_ops.stack(combine_10, -1) + del combine_10 + + # pd_op.reshape: (-1x4xf32) <- (-1x-1x4xf32, 2xi64) + reshape_4 = paddle._C_ops.reshape(stack_6, full_int_array_0) + del full_int_array_0, stack_6 + + # pd_op.reshape: (-1x2xf32) <- (-1x-1x2xf32, 2xi64) + reshape_5 = paddle._C_ops.reshape(stack_7, full_int_array_1) + del full_int_array_1, stack_7 + + # pd_op.shape64: (2xi64) <- (-1x4xf32) + shape64_2 = paddle._C_ops.shape64(reshape_4) + + # pd_op.slice: (xi64) <- (2xi64, 1xi64, 1xi64) + slice_2 = paddle._C_ops.slice( + shape64_2, [0], full_int_array_2, full_int_array_3, [1], [0] + ) + del full_int_array_2, full_int_array_3, shape64_2 + + # builtin.combine: ([xi64, xi64]) <- (xi64, xi64) + combine_11 = [slice_2, full_4] + del full_4 + + # pd_op.stack: (2xi64) <- ([xi64, xi64]) + stack_8 = paddle._C_ops.stack(combine_11, 0) + del combine_11 + + # pd_op.full_with_tensor: (-1x1xf32) <- (1xf32, 2xi64) + full_with_tensor_2 = paddle._C_ops.full_with_tensor( + full_6, stack_8, paddle.float32 + ) + del full_6, stack_8 + + # pd_op.full: (1xi32) <- () + full_7 = paddle._C_ops.full( + [1], float("0"), paddle.int32, paddle.core.CPUPlace() + ) + + # builtin.combine: ([-1x4xf32, -1x4xf32, -1x4xf32]) <- (-1x4xf32, -1x4xf32, -1x4xf32) + combine_12 = [reshape_0, reshape_2, reshape_4] + del reshape_0, reshape_2, reshape_4 + + # pd_op.concat: (-1x4xf32) <- ([-1x4xf32, -1x4xf32, -1x4xf32], 1xi32) + concat_2 = paddle._C_ops.concat(combine_12, full_7) + del combine_12 + + # builtin.combine: ([-1x2xf32, -1x2xf32, -1x2xf32]) <- (-1x2xf32, -1x2xf32, -1x2xf32) + combine_13 = [reshape_1, reshape_3, reshape_5] + del reshape_1, reshape_3, reshape_5 + + # pd_op.concat: (-1x2xf32) <- ([-1x2xf32, -1x2xf32, -1x2xf32], 1xi32) + concat_3 = paddle._C_ops.concat(combine_13, full_7) + del combine_13 + + # builtin.combine: ([-1x1xf32, -1x1xf32, -1x1xf32]) <- (-1x1xf32, -1x1xf32, -1x1xf32) + combine_14 = [full_with_tensor_0, full_with_tensor_1, full_with_tensor_2] + del full_with_tensor_0, full_with_tensor_1, full_with_tensor_2 + + # pd_op.concat: (-1x1xf32) <- ([-1x1xf32, -1x1xf32, -1x1xf32], 1xi32) + concat_4 = paddle._C_ops.concat(combine_14, full_7) + del combine_14, full_7 + + # pd_op.full_int_array: (2xi64) <- () + full_int_array_4 = [1, 1] + + # pd_op.assign: (2xi64) <- (2xi64) + assign_0 = full_int_array_4 + + # pd_op.assign: (2xi64) <- (2xi64) + assign_1 = full_int_array_4 + + # pd_op.pool2d: (2x768x1x1xf32) <- (2x768x-1x-1xf32, 2xi64) + pool2d_0 = paddle._C_ops.pool2d( + data_6, + full_int_array_4, + [1, 1], + [0, 0], + False, + True, + "NCHW", + "avg", + False, + True, + "EXPLICIT", + ) + + # pd_op.conv2d: (2x768x1x1xf32) <- (2x768x1x1xf32, 768x768x1x1xf32) + conv2d_0 = paddle._C_ops.conv2d( + pool2d_0, parameter_53, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_53 + + # pd_op.full_int_array: (4xi64) <- () + full_int_array_5 = [1, -1, 1, 1] + + # pd_op.reshape: (1x768x1x1xf32) <- (768xf32, 4xi64) + reshape_6 = paddle._C_ops.reshape(parameter_52, full_int_array_5) + del parameter_52 + + # pd_op.add: (2x768x1x1xf32) <- (2x768x1x1xf32, 1x768x1x1xf32) + add_0 = paddle._C_ops.add(conv2d_0, reshape_6) + + # pd_op.sigmoid: (2x768x1x1xf32) <- (2x768x1x1xf32) + sigmoid_0 = paddle._C_ops.sigmoid(add_0) + del add_0 + + # pd_op.multiply: (2x768x-1x-1xf32) <- (2x768x-1x-1xf32, 2x768x1x1xf32) + multiply_0 = paddle._C_ops.multiply(data_6, sigmoid_0) + + # pd_op.conv2d: (2x768x-1x-1xf32) <- (2x768x-1x-1xf32, 768x768x1x1xf32) + conv2d_1 = paddle._C_ops.conv2d( + multiply_0, parameter_51, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_51 + + # pd_op.batch_norm_: (2x768x-1x-1xf32, 768xf32, 768xf32, 768xf32, 768xf32, -1xui8) <- (2x768x-1x-1xf32, 768xf32, 768xf32, 768xf32, 768xf32) + ( + batch_norm__0, + batch_norm__1, + batch_norm__2, + batch_norm__3, + batch_norm__4, + batch_norm__5, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_1, + parameter_50, + parameter_49, + parameter_48, + parameter_47, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_47, parameter_48, parameter_49, parameter_50 + + # pd_op.swish: (2x768x-1x-1xf32) <- (2x768x-1x-1xf32) + swish_0 = paddle._C_ops.swish(batch_norm__0) + + # pd_op.add: (2x768x-1x-1xf32) <- (2x768x-1x-1xf32, 2x768x-1x-1xf32) + add_1 = paddle._C_ops.add(swish_0, data_6) + + # pd_op.conv2d: (2x1x-1x-1xf32) <- (2x768x-1x-1xf32, 1x768x3x3xf32) + conv2d_2 = paddle._C_ops.conv2d( + add_1, parameter_46, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_46 + + # pd_op.reshape: (1x1x1x1xf32) <- (1xf32, 4xi64) + reshape_7 = paddle._C_ops.reshape(parameter_45, full_int_array_5) + del parameter_45 + + # pd_op.add: (2x1x-1x-1xf32) <- (2x1x-1x-1xf32, 1x1x1x1xf32) + add_2 = paddle._C_ops.add(conv2d_2, reshape_7) + + # pd_op.conv2d: (2x768x1x1xf32) <- (2x768x1x1xf32, 768x768x1x1xf32) + conv2d_3 = paddle._C_ops.conv2d( + pool2d_0, parameter_44, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_44 + + # pd_op.reshape: (1x768x1x1xf32) <- (768xf32, 4xi64) + reshape_8 = paddle._C_ops.reshape(parameter_43, full_int_array_5) + del parameter_43 + + # pd_op.add: (2x768x1x1xf32) <- (2x768x1x1xf32, 1x768x1x1xf32) + add_3 = paddle._C_ops.add(conv2d_3, reshape_8) + + # pd_op.sigmoid: (2x768x1x1xf32) <- (2x768x1x1xf32) + sigmoid_1 = paddle._C_ops.sigmoid(add_3) + del add_3 + + # pd_op.multiply: (2x768x-1x-1xf32) <- (2x768x-1x-1xf32, 2x768x1x1xf32) + multiply_1 = paddle._C_ops.multiply(data_6, sigmoid_1) + del data_6 + + # pd_op.conv2d: (2x768x-1x-1xf32) <- (2x768x-1x-1xf32, 768x768x1x1xf32) + conv2d_4 = paddle._C_ops.conv2d( + multiply_1, parameter_42, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_42 + + # pd_op.batch_norm_: (2x768x-1x-1xf32, 768xf32, 768xf32, 768xf32, 768xf32, -1xui8) <- (2x768x-1x-1xf32, 768xf32, 768xf32, 768xf32, 768xf32) + ( + batch_norm__6, + batch_norm__7, + batch_norm__8, + batch_norm__9, + batch_norm__10, + batch_norm__11, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_4, + parameter_41, + parameter_40, + parameter_39, + parameter_38, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_38, parameter_39, parameter_40, parameter_41 + + # pd_op.swish: (2x768x-1x-1xf32) <- (2x768x-1x-1xf32) + swish_1 = paddle._C_ops.swish(batch_norm__6) + + # pd_op.conv2d: (2x68x-1x-1xf32) <- (2x768x-1x-1xf32, 68x768x3x3xf32) + conv2d_5 = paddle._C_ops.conv2d( + swish_1, parameter_37, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_37 + + # pd_op.reshape: (1x68x1x1xf32) <- (68xf32, 4xi64) + reshape_9 = paddle._C_ops.reshape(parameter_36, full_int_array_5) + del parameter_36 + + # pd_op.add: (2x68x-1x-1xf32) <- (2x68x-1x-1xf32, 1x68x1x1xf32) + add_4 = paddle._C_ops.add(conv2d_5, reshape_9) + + # pd_op.sigmoid: (2x1x-1x-1xf32) <- (2x1x-1x-1xf32) + sigmoid_2 = paddle._C_ops.sigmoid(add_2) + del add_2 + + # pd_op.flatten: (2x1x-1xf32) <- (2x1x-1x-1xf32) + flatten_0 = paddle._C_ops.flatten(sigmoid_2, 2, 3) + + # pd_op.transpose: (2x-1x1xf32) <- (2x1x-1xf32) + transpose_0 = paddle._C_ops.transpose(flatten_0, [0, 2, 1]) + del flatten_0 + + # pd_op.flatten: (2x68x-1xf32) <- (2x68x-1x-1xf32) + flatten_1 = paddle._C_ops.flatten(add_4, 2, 3) + + # pd_op.transpose: (2x-1x68xf32) <- (2x68x-1xf32) + transpose_1 = paddle._C_ops.transpose(flatten_1, [0, 2, 1]) + del flatten_1 + + # pd_op.pool2d: (2x384x1x1xf32) <- (2x384x-1x-1xf32, 2xi64) + pool2d_1 = paddle._C_ops.pool2d( + data_7, + full_int_array_4, + [1, 1], + [0, 0], + False, + True, + "NCHW", + "avg", + False, + True, + "EXPLICIT", + ) + + # pd_op.conv2d: (2x384x1x1xf32) <- (2x384x1x1xf32, 384x384x1x1xf32) + conv2d_6 = paddle._C_ops.conv2d( + pool2d_1, parameter_35, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_35 + + # pd_op.reshape: (1x384x1x1xf32) <- (384xf32, 4xi64) + reshape_10 = paddle._C_ops.reshape(parameter_34, full_int_array_5) + del parameter_34 + + # pd_op.add: (2x384x1x1xf32) <- (2x384x1x1xf32, 1x384x1x1xf32) + add_5 = paddle._C_ops.add(conv2d_6, reshape_10) + + # pd_op.sigmoid: (2x384x1x1xf32) <- (2x384x1x1xf32) + sigmoid_3 = paddle._C_ops.sigmoid(add_5) + del add_5 + + # pd_op.multiply: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32, 2x384x1x1xf32) + multiply_2 = paddle._C_ops.multiply(data_7, sigmoid_3) + + # pd_op.conv2d: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32, 384x384x1x1xf32) + conv2d_7 = paddle._C_ops.conv2d( + multiply_2, parameter_33, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_33 + + # pd_op.batch_norm_: (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__12, + batch_norm__13, + batch_norm__14, + batch_norm__15, + batch_norm__16, + batch_norm__17, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_7, + parameter_32, + parameter_31, + parameter_30, + parameter_29, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_29, parameter_30, parameter_31, parameter_32 + + # pd_op.swish: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32) + swish_2 = paddle._C_ops.swish(batch_norm__12) + + # pd_op.add: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32, 2x384x-1x-1xf32) + add_6 = paddle._C_ops.add(swish_2, data_7) + + # pd_op.conv2d: (2x1x-1x-1xf32) <- (2x384x-1x-1xf32, 1x384x3x3xf32) + conv2d_8 = paddle._C_ops.conv2d( + add_6, parameter_28, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_28 + + # pd_op.reshape: (1x1x1x1xf32) <- (1xf32, 4xi64) + reshape_11 = paddle._C_ops.reshape(parameter_27, full_int_array_5) + del parameter_27 + + # pd_op.add: (2x1x-1x-1xf32) <- (2x1x-1x-1xf32, 1x1x1x1xf32) + add_7 = paddle._C_ops.add(conv2d_8, reshape_11) + + # pd_op.conv2d: (2x384x1x1xf32) <- (2x384x1x1xf32, 384x384x1x1xf32) + conv2d_9 = paddle._C_ops.conv2d( + pool2d_1, parameter_26, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_26 + + # pd_op.reshape: (1x384x1x1xf32) <- (384xf32, 4xi64) + reshape_12 = paddle._C_ops.reshape(parameter_25, full_int_array_5) + del parameter_25 + + # pd_op.add: (2x384x1x1xf32) <- (2x384x1x1xf32, 1x384x1x1xf32) + add_8 = paddle._C_ops.add(conv2d_9, reshape_12) + + # pd_op.sigmoid: (2x384x1x1xf32) <- (2x384x1x1xf32) + sigmoid_4 = paddle._C_ops.sigmoid(add_8) + del add_8 + + # pd_op.multiply: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32, 2x384x1x1xf32) + multiply_3 = paddle._C_ops.multiply(data_7, sigmoid_4) + del data_7 + + # pd_op.conv2d: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32, 384x384x1x1xf32) + conv2d_10 = paddle._C_ops.conv2d( + multiply_3, parameter_24, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_24 + + # pd_op.batch_norm_: (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__18, + batch_norm__19, + batch_norm__20, + batch_norm__21, + batch_norm__22, + batch_norm__23, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_10, + parameter_23, + parameter_22, + parameter_21, + parameter_20, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_20, parameter_21, parameter_22, parameter_23 + + # pd_op.swish: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32) + swish_3 = paddle._C_ops.swish(batch_norm__18) + + # pd_op.conv2d: (2x68x-1x-1xf32) <- (2x384x-1x-1xf32, 68x384x3x3xf32) + conv2d_11 = paddle._C_ops.conv2d( + swish_3, parameter_19, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_19 + + # pd_op.reshape: (1x68x1x1xf32) <- (68xf32, 4xi64) + reshape_13 = paddle._C_ops.reshape(parameter_18, full_int_array_5) + del parameter_18 + + # pd_op.add: (2x68x-1x-1xf32) <- (2x68x-1x-1xf32, 1x68x1x1xf32) + add_9 = paddle._C_ops.add(conv2d_11, reshape_13) + + # pd_op.sigmoid: (2x1x-1x-1xf32) <- (2x1x-1x-1xf32) + sigmoid_5 = paddle._C_ops.sigmoid(add_7) + del add_7 + + # pd_op.flatten: (2x1x-1xf32) <- (2x1x-1x-1xf32) + flatten_2 = paddle._C_ops.flatten(sigmoid_5, 2, 3) + + # pd_op.transpose: (2x-1x1xf32) <- (2x1x-1xf32) + transpose_2 = paddle._C_ops.transpose(flatten_2, [0, 2, 1]) + del flatten_2 + + # pd_op.flatten: (2x68x-1xf32) <- (2x68x-1x-1xf32) + flatten_3 = paddle._C_ops.flatten(add_9, 2, 3) + + # pd_op.transpose: (2x-1x68xf32) <- (2x68x-1xf32) + transpose_3 = paddle._C_ops.transpose(flatten_3, [0, 2, 1]) + del flatten_3 + + # pd_op.pool2d: (2x192x1x1xf32) <- (2x192x-1x-1xf32, 2xi64) + pool2d_2 = paddle._C_ops.pool2d( + data_8, + full_int_array_4, + [1, 1], + [0, 0], + False, + True, + "NCHW", + "avg", + False, + True, + "EXPLICIT", + ) + + # pd_op.conv2d: (2x192x1x1xf32) <- (2x192x1x1xf32, 192x192x1x1xf32) + conv2d_12 = paddle._C_ops.conv2d( + pool2d_2, parameter_17, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_17 + + # pd_op.reshape: (1x192x1x1xf32) <- (192xf32, 4xi64) + reshape_14 = paddle._C_ops.reshape(parameter_16, full_int_array_5) + del parameter_16 + + # pd_op.add: (2x192x1x1xf32) <- (2x192x1x1xf32, 1x192x1x1xf32) + add_10 = paddle._C_ops.add(conv2d_12, reshape_14) + + # pd_op.sigmoid: (2x192x1x1xf32) <- (2x192x1x1xf32) + sigmoid_6 = paddle._C_ops.sigmoid(add_10) + del add_10 + + # pd_op.multiply: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 2x192x1x1xf32) + multiply_4 = paddle._C_ops.multiply(data_8, sigmoid_6) + + # pd_op.conv2d: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 192x192x1x1xf32) + conv2d_13 = paddle._C_ops.conv2d( + multiply_4, parameter_15, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_15 + + # pd_op.batch_norm_: (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__24, + batch_norm__25, + batch_norm__26, + batch_norm__27, + batch_norm__28, + batch_norm__29, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_13, + parameter_14, + parameter_13, + parameter_12, + parameter_11, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_11, parameter_12, parameter_13, parameter_14 + + # pd_op.swish: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32) + swish_4 = paddle._C_ops.swish(batch_norm__24) + + # pd_op.add: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 2x192x-1x-1xf32) + add_11 = paddle._C_ops.add(swish_4, data_8) + + # pd_op.conv2d: (2x1x-1x-1xf32) <- (2x192x-1x-1xf32, 1x192x3x3xf32) + conv2d_14 = paddle._C_ops.conv2d( + add_11, parameter_10, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_10 + + # pd_op.reshape: (1x1x1x1xf32) <- (1xf32, 4xi64) + reshape_15 = paddle._C_ops.reshape(parameter_9, full_int_array_5) + del parameter_9 + + # pd_op.add: (2x1x-1x-1xf32) <- (2x1x-1x-1xf32, 1x1x1x1xf32) + add_12 = paddle._C_ops.add(conv2d_14, reshape_15) + + # pd_op.conv2d: (2x192x1x1xf32) <- (2x192x1x1xf32, 192x192x1x1xf32) + conv2d_15 = paddle._C_ops.conv2d( + pool2d_2, parameter_8, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_8 + + # pd_op.reshape: (1x192x1x1xf32) <- (192xf32, 4xi64) + reshape_16 = paddle._C_ops.reshape(parameter_7, full_int_array_5) + del parameter_7 + + # pd_op.add: (2x192x1x1xf32) <- (2x192x1x1xf32, 1x192x1x1xf32) + add_13 = paddle._C_ops.add(conv2d_15, reshape_16) + + # pd_op.sigmoid: (2x192x1x1xf32) <- (2x192x1x1xf32) + sigmoid_7 = paddle._C_ops.sigmoid(add_13) + del add_13 + + # pd_op.multiply: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 2x192x1x1xf32) + multiply_5 = paddle._C_ops.multiply(data_8, sigmoid_7) + del data_8 + + # pd_op.conv2d: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 192x192x1x1xf32) + conv2d_16 = paddle._C_ops.conv2d( + multiply_5, parameter_6, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_6 + + # pd_op.batch_norm_: (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__30, + batch_norm__31, + batch_norm__32, + batch_norm__33, + batch_norm__34, + batch_norm__35, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_16, + parameter_5, + parameter_4, + parameter_3, + parameter_2, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_2, parameter_3, parameter_4, parameter_5 + + # pd_op.swish: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32) + swish_5 = paddle._C_ops.swish(batch_norm__30) + + # pd_op.conv2d: (2x68x-1x-1xf32) <- (2x192x-1x-1xf32, 68x192x3x3xf32) + conv2d_17 = paddle._C_ops.conv2d( + swish_5, parameter_1, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_1 + + # pd_op.reshape: (1x68x1x1xf32) <- (68xf32, 4xi64) + reshape_17 = paddle._C_ops.reshape(parameter_0, full_int_array_5) + del full_int_array_5, parameter_0 + + # pd_op.add: (2x68x-1x-1xf32) <- (2x68x-1x-1xf32, 1x68x1x1xf32) + add_14 = paddle._C_ops.add(conv2d_17, reshape_17) + + # pd_op.sigmoid: (2x1x-1x-1xf32) <- (2x1x-1x-1xf32) + sigmoid_8 = paddle._C_ops.sigmoid(add_12) + del add_12 + + # pd_op.flatten: (2x1x-1xf32) <- (2x1x-1x-1xf32) + flatten_4 = paddle._C_ops.flatten(sigmoid_8, 2, 3) + + # pd_op.transpose: (2x-1x1xf32) <- (2x1x-1xf32) + transpose_4 = paddle._C_ops.transpose(flatten_4, [0, 2, 1]) + del flatten_4 + + # pd_op.flatten: (2x68x-1xf32) <- (2x68x-1x-1xf32) + flatten_5 = paddle._C_ops.flatten(add_14, 2, 3) + + # pd_op.transpose: (2x-1x68xf32) <- (2x68x-1xf32) + transpose_5 = paddle._C_ops.transpose(flatten_5, [0, 2, 1]) + del flatten_5 + + # pd_op.full: (1xi32) <- () + full_8 = paddle._C_ops.full( + [1], float("1"), paddle.int32, paddle.core.CPUPlace() + ) + + # pd_op.assign: (1xi32) <- (1xi32) + assign_2 = full_8 + + # builtin.combine: ([2x-1x1xf32, 2x-1x1xf32, 2x-1x1xf32]) <- (2x-1x1xf32, 2x-1x1xf32, 2x-1x1xf32) + combine_15 = [transpose_0, transpose_2, transpose_4] + + # pd_op.concat: (2x-1x1xf32) <- ([2x-1x1xf32, 2x-1x1xf32, 2x-1x1xf32], 1xi32) + concat_0 = paddle._C_ops.concat(combine_15, full_8) + del combine_15 + + # builtin.combine: ([2x-1x68xf32, 2x-1x68xf32, 2x-1x68xf32]) <- (2x-1x68xf32, 2x-1x68xf32, 2x-1x68xf32) + combine_16 = [transpose_1, transpose_3, transpose_5] + + # pd_op.concat: (2x-1x68xf32) <- ([2x-1x68xf32, 2x-1x68xf32, 2x-1x68xf32], 1xi32) + concat_1 = paddle._C_ops.concat(combine_16, full_8) + del ( + add_1, + add_11, + add_14, + add_4, + add_6, + add_9, + assign_0, + assign_1, + assign_2, + batch_norm__0, + batch_norm__1, + batch_norm__10, + batch_norm__11, + batch_norm__12, + batch_norm__13, + batch_norm__14, + batch_norm__15, + batch_norm__16, + batch_norm__17, + batch_norm__18, + batch_norm__19, + batch_norm__2, + batch_norm__20, + batch_norm__21, + batch_norm__22, + batch_norm__23, + batch_norm__24, + batch_norm__25, + batch_norm__26, + batch_norm__27, + batch_norm__28, + batch_norm__29, + batch_norm__3, + batch_norm__30, + batch_norm__31, + batch_norm__32, + batch_norm__33, + batch_norm__34, + batch_norm__35, + batch_norm__4, + batch_norm__5, + batch_norm__6, + batch_norm__7, + batch_norm__8, + batch_norm__9, + combine_16, + conv2d_0, + conv2d_1, + conv2d_10, + conv2d_11, + conv2d_12, + conv2d_13, + conv2d_14, + conv2d_15, + conv2d_16, + conv2d_17, + conv2d_2, + conv2d_3, + conv2d_4, + conv2d_5, + conv2d_6, + conv2d_7, + conv2d_8, + conv2d_9, + full_8, + full_int_array_4, + multiply_0, + multiply_1, + multiply_2, + multiply_3, + multiply_4, + multiply_5, + pool2d_0, + pool2d_1, + pool2d_2, + reshape_10, + reshape_11, + reshape_12, + reshape_13, + reshape_14, + reshape_15, + reshape_16, + reshape_17, + reshape_6, + reshape_7, + reshape_8, + reshape_9, + sigmoid_0, + sigmoid_1, + sigmoid_2, + sigmoid_3, + sigmoid_4, + sigmoid_5, + sigmoid_6, + sigmoid_7, + sigmoid_8, + slice_0, + slice_1, + slice_2, + swish_0, + swish_1, + swish_2, + swish_3, + swish_4, + swish_5, + transpose_0, + transpose_1, + transpose_2, + transpose_3, + transpose_4, + transpose_5, + ) + + return concat_0, concat_1, concat_2, concat_3, concat_4 diff --git a/paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_3/weight_meta.py b/paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_3/weight_meta.py new file mode 100644 index 000000000..e37ea3f3b --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_3/weight_meta.py @@ -0,0 +1,586 @@ +class Program_weight_tensor_parameter_0: + name = "parameter_0" + shape = [68] + dtype = "float32" + min_val = float("-0.0120063") + max_val = float("0.0335161") + mean = float("1.74565e-07") + std = float("0.00787881") + data = None + + +class Program_weight_tensor_parameter_1: + name = "parameter_1" + shape = [68, 192, 3, 3] + dtype = "float32" + min_val = float("-0.168682") + max_val = float("0.167259") + mean = float("7.71688e-08") + std = float("0.00779909") + data = None + + +class Program_weight_tensor_parameter_2: + name = "parameter_2" + shape = [192] + dtype = "float32" + min_val = float("-0.128229") + max_val = float("0.242306") + mean = float("0.03158") + std = float("0.0650427") + data = None + + +class Program_weight_tensor_parameter_3: + name = "parameter_3" + shape = [192] + dtype = "float32" + min_val = float("0.74995") + max_val = float("1.60753") + mean = float("1.16227") + std = float("0.142938") + data = None + + +class Program_weight_tensor_parameter_4: + name = "parameter_4" + shape = [192] + dtype = "float32" + min_val = float("0.000266157") + max_val = float("0.0040105") + mean = float("0.0010684") + std = float("0.000708224") + data = None + + +class Program_weight_tensor_parameter_5: + name = "parameter_5" + shape = [192] + dtype = "float32" + min_val = float("-0.0377588") + max_val = float("0.0284686") + mean = float("-0.00327812") + std = float("0.0104515") + data = None + + +class Program_weight_tensor_parameter_6: + name = "parameter_6" + shape = [192, 192, 1, 1] + dtype = "float32" + min_val = float("-0.08141") + max_val = float("0.0877749") + mean = float("-0.00031094") + std = float("0.00761982") + data = None + + +class Program_weight_tensor_parameter_7: + name = "parameter_7" + shape = [192] + dtype = "float32" + min_val = float("-0.00811822") + max_val = float("0.0146164") + mean = float("-0.000174084") + std = float("0.00426814") + data = None + + +class Program_weight_tensor_parameter_8: + name = "parameter_8" + shape = [192, 192, 1, 1] + dtype = "float32" + min_val = float("-0.0116131") + max_val = float("0.0197689") + mean = float("-0.000103125") + std = float("0.00169022") + data = None + + +class Program_weight_tensor_parameter_9: + name = "parameter_9" + shape = [1] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_10: + name = "parameter_10" + shape = [1, 192, 3, 3] + dtype = "float32" + min_val = float("-0.0678163") + max_val = float("0.0273811") + mean = float("-0.000125764") + std = float("0.00819254") + data = None + + +class Program_weight_tensor_parameter_11: + name = "parameter_11" + shape = [192] + dtype = "float32" + min_val = float("-0.461256") + max_val = float("0.536796") + mean = float("0.114829") + std = float("0.141718") + data = None + + +class Program_weight_tensor_parameter_12: + name = "parameter_12" + shape = [192] + dtype = "float32" + min_val = float("0.859577") + max_val = float("1.50378") + mean = float("1.09021") + std = float("0.0873597") + data = None + + +class Program_weight_tensor_parameter_13: + name = "parameter_13" + shape = [192] + dtype = "float32" + min_val = float("0.000431253") + max_val = float("0.0137074") + mean = float("0.00230506") + std = float("0.00181221") + data = None + + +class Program_weight_tensor_parameter_14: + name = "parameter_14" + shape = [192] + dtype = "float32" + min_val = float("-0.15411") + max_val = float("0.0351219") + mean = float("-0.0393248") + std = float("0.0376471") + data = None + + +class Program_weight_tensor_parameter_15: + name = "parameter_15" + shape = [192, 192, 1, 1] + dtype = "float32" + min_val = float("-0.0575188") + max_val = float("0.0739459") + mean = float("-0.00110288") + std = float("0.0076522") + data = None + + +class Program_weight_tensor_parameter_16: + name = "parameter_16" + shape = [192] + dtype = "float32" + min_val = float("-0.00397738") + max_val = float("0.0119464") + mean = float("-0.000221189") + std = float("0.00218333") + data = None + + +class Program_weight_tensor_parameter_17: + name = "parameter_17" + shape = [192, 192, 1, 1] + dtype = "float32" + min_val = float("-0.0524122") + max_val = float("0.0693195") + mean = float("-9.53701e-05") + std = float("0.00173109") + data = None + + +class Program_weight_tensor_parameter_18: + name = "parameter_18" + shape = [68] + dtype = "float32" + min_val = float("-0.00573793") + max_val = float("0.0217784") + mean = float("1.54891e-07") + std = float("0.00541763") + data = None + + +class Program_weight_tensor_parameter_19: + name = "parameter_19" + shape = [68, 384, 3, 3] + dtype = "float32" + min_val = float("-0.0981024") + max_val = float("0.124946") + mean = float("4.94838e-08") + std = float("0.00515546") + data = None + + +class Program_weight_tensor_parameter_20: + name = "parameter_20" + shape = [384] + dtype = "float32" + min_val = float("-0.0868715") + max_val = float("0.0669849") + mean = float("0.0121538") + std = float("0.0210256") + data = None + + +class Program_weight_tensor_parameter_21: + name = "parameter_21" + shape = [384] + dtype = "float32" + min_val = float("0.90122") + max_val = float("1.2108") + mean = float("1.07141") + std = float("0.0473601") + data = None + + +class Program_weight_tensor_parameter_22: + name = "parameter_22" + shape = [384] + dtype = "float32" + min_val = float("0.000115831") + max_val = float("0.00541574") + mean = float("0.000753911") + std = float("0.000632097") + data = None + + +class Program_weight_tensor_parameter_23: + name = "parameter_23" + shape = [384] + dtype = "float32" + min_val = float("-0.0423513") + max_val = float("0.0117177") + mean = float("-0.00494504") + std = float("0.00661737") + data = None + + +class Program_weight_tensor_parameter_24: + name = "parameter_24" + shape = [384, 384, 1, 1] + dtype = "float32" + min_val = float("-0.0495158") + max_val = float("0.0695034") + mean = float("-0.000125359") + std = float("0.00350255") + data = None + + +class Program_weight_tensor_parameter_25: + name = "parameter_25" + shape = [384] + dtype = "float32" + min_val = float("-0.00583976") + max_val = float("0.00760424") + mean = float("1.90588e-05") + std = float("0.0021531") + data = None + + +class Program_weight_tensor_parameter_26: + name = "parameter_26" + shape = [384, 384, 1, 1] + dtype = "float32" + min_val = float("-0.00305361") + max_val = float("0.00596843") + mean = float("-3.93664e-05") + std = float("0.000601011") + data = None + + +class Program_weight_tensor_parameter_27: + name = "parameter_27" + shape = [1] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_28: + name = "parameter_28" + shape = [1, 384, 3, 3] + dtype = "float32" + min_val = float("-0.0633952") + max_val = float("0.0250402") + mean = float("-0.000143176") + std = float("0.00571183") + data = None + + +class Program_weight_tensor_parameter_29: + name = "parameter_29" + shape = [384] + dtype = "float32" + min_val = float("-0.167044") + max_val = float("0.241629") + mean = float("0.0667936") + std = float("0.0667169") + data = None + + +class Program_weight_tensor_parameter_30: + name = "parameter_30" + shape = [384] + dtype = "float32" + min_val = float("0.949533") + max_val = float("1.31274") + mean = float("1.04702") + std = float("0.0484413") + data = None + + +class Program_weight_tensor_parameter_31: + name = "parameter_31" + shape = [384] + dtype = "float32" + min_val = float("0.000357813") + max_val = float("0.020638") + mean = float("0.00383275") + std = float("0.00340836") + data = None + + +class Program_weight_tensor_parameter_32: + name = "parameter_32" + shape = [384] + dtype = "float32" + min_val = float("-0.0843473") + max_val = float("0.02507") + mean = float("-0.027169") + std = float("0.0211633") + data = None + + +class Program_weight_tensor_parameter_33: + name = "parameter_33" + shape = [384, 384, 1, 1] + dtype = "float32" + min_val = float("-0.0580925") + max_val = float("0.0555854") + mean = float("-0.000603587") + std = float("0.00353173") + data = None + + +class Program_weight_tensor_parameter_34: + name = "parameter_34" + shape = [384] + dtype = "float32" + min_val = float("-0.00347224") + max_val = float("0.014402") + mean = float("-4.23883e-05") + std = float("0.0014551") + data = None + + +class Program_weight_tensor_parameter_35: + name = "parameter_35" + shape = [384, 384, 1, 1] + dtype = "float32" + min_val = float("-0.0163252") + max_val = float("0.0101556") + mean = float("-4.84765e-05") + std = float("0.000584142") + data = None + + +class Program_weight_tensor_parameter_36: + name = "parameter_36" + shape = [68] + dtype = "float32" + min_val = float("-0.00369389") + max_val = float("0.00674372") + mean = float("1.39786e-07") + std = float("0.00287983") + data = None + + +class Program_weight_tensor_parameter_37: + name = "parameter_37" + shape = [68, 768, 3, 3] + dtype = "float32" + min_val = float("-0.0366324") + max_val = float("0.0462164") + mean = float("-3.25235e-09") + std = float("0.00290201") + data = None + + +class Program_weight_tensor_parameter_38: + name = "parameter_38" + shape = [768] + dtype = "float32" + min_val = float("-0.0688838") + max_val = float("0.0553698") + mean = float("-0.000947437") + std = float("0.0161684") + data = None + + +class Program_weight_tensor_parameter_39: + name = "parameter_39" + shape = [768] + dtype = "float32" + min_val = float("0.964207") + max_val = float("1.26838") + mean = float("1.05105") + std = float("0.0346631") + data = None + + +class Program_weight_tensor_parameter_40: + name = "parameter_40" + shape = [768] + dtype = "float32" + min_val = float("3.12347e-05") + max_val = float("0.00203003") + mean = float("0.000380075") + std = float("0.00026984") + data = None + + +class Program_weight_tensor_parameter_41: + name = "parameter_41" + shape = [768] + dtype = "float32" + min_val = float("-0.0153051") + max_val = float("0.0154829") + mean = float("-0.00229315") + std = float("0.00345522") + data = None + + +class Program_weight_tensor_parameter_42: + name = "parameter_42" + shape = [768, 768, 1, 1] + dtype = "float32" + min_val = float("-0.0373301") + max_val = float("0.0386579") + mean = float("-3.68871e-05") + std = float("0.00154547") + data = None + + +class Program_weight_tensor_parameter_43: + name = "parameter_43" + shape = [768] + dtype = "float32" + min_val = float("-0.00311382") + max_val = float("0.00471731") + mean = float("1.99403e-05") + std = float("0.00111448") + data = None + + +class Program_weight_tensor_parameter_44: + name = "parameter_44" + shape = [768, 768, 1, 1] + dtype = "float32" + min_val = float("-0.003459") + max_val = float("0.00512325") + mean = float("-2.05348e-05") + std = float("0.000302259") + data = None + + +class Program_weight_tensor_parameter_45: + name = "parameter_45" + shape = [1] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_46: + name = "parameter_46" + shape = [1, 768, 3, 3] + dtype = "float32" + min_val = float("-0.0278575") + max_val = float("0.0206799") + mean = float("0.000226758") + std = float("0.00242893") + data = None + + +class Program_weight_tensor_parameter_47: + name = "parameter_47" + shape = [768] + dtype = "float32" + min_val = float("-0.270176") + max_val = float("0.251616") + mean = float("0.00962775") + std = float("0.0522785") + data = None + + +class Program_weight_tensor_parameter_48: + name = "parameter_48" + shape = [768] + dtype = "float32" + min_val = float("0.914105") + max_val = float("1.28201") + mean = float("1.03121") + std = float("0.0519879") + data = None + + +class Program_weight_tensor_parameter_49: + name = "parameter_49" + shape = [768] + dtype = "float32" + min_val = float("6.12283e-05") + max_val = float("0.00858465") + mean = float("0.00112119") + std = float("0.000817386") + data = None + + +class Program_weight_tensor_parameter_50: + name = "parameter_50" + shape = [768] + dtype = "float32" + min_val = float("-0.0881042") + max_val = float("0.0255774") + mean = float("-0.0239762") + std = float("0.0175392") + data = None + + +class Program_weight_tensor_parameter_51: + name = "parameter_51" + shape = [768, 768, 1, 1] + dtype = "float32" + min_val = float("-0.0500488") + max_val = float("0.0289173") + mean = float("-0.000294642") + std = float("0.0016182") + data = None + + +class Program_weight_tensor_parameter_52: + name = "parameter_52" + shape = [768] + dtype = "float32" + min_val = float("-0.0131155") + max_val = float("0.00722008") + mean = float("-3.7653e-05") + std = float("0.000847186") + data = None + + +class Program_weight_tensor_parameter_53: + name = "parameter_53" + shape = [768, 768, 1, 1] + dtype = "float32" + min_val = float("-0.026325") + max_val = float("0.0506588") + mean = float("7.82541e-06") + std = float("0.000366702") + data = None diff --git a/paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_4/graph_hash.txt b/paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_4/graph_hash.txt new file mode 100644 index 000000000..302d9614d --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_4/graph_hash.txt @@ -0,0 +1 @@ +03f141519fd2680b171e881b52c9f1915c126a7a8316d88046de2da35494a18e \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_4/graph_net.json b/paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_4/graph_net.json new file mode 100644 index 000000000..cf4d2108b --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_4/graph_net.json @@ -0,0 +1,6 @@ +{ + "framework": "paddle", + "model_name": "PP-YOLOE-L_human", + "num_devices_required": 1, + "num_nodes_required": 1 +} \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_4/input_meta.py b/paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_4/input_meta.py new file mode 100644 index 000000000..3b3f1d3f4 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_4/input_meta.py @@ -0,0 +1,31 @@ +class Program_weight_tensor_data_0: + name = "data_0" + shape = [2, 768, 10, 10] + dtype = "float32" + min_val = float("-0.278465") + max_val = float("4.94873") + mean = float("0.216488") + std = float("0.515754") + data = None + + +class Program_weight_tensor_data_1: + name = "data_1" + shape = [2, 384, 20, 20] + dtype = "float32" + min_val = float("-0.278465") + max_val = float("6.24276") + mean = float("0.245113") + std = float("0.548865") + data = None + + +class Program_weight_tensor_data_2: + name = "data_2" + shape = [2, 192, 40, 40] + dtype = "float32" + min_val = float("-0.278465") + max_val = float("9.95819") + mean = float("0.32061") + std = float("0.582481") + data = None diff --git a/paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_4/model.py b/paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_4/model.py new file mode 100644 index 000000000..2091d6739 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_4/model.py @@ -0,0 +1,1050 @@ +import paddle + + +class GraphModule(paddle.nn.Layer): + def __init__(self): + super().__init__() + + def forward( + self, + parameter_0, + parameter_1, + parameter_2, + parameter_3, + parameter_4, + parameter_5, + parameter_6, + parameter_7, + parameter_8, + parameter_9, + parameter_10, + parameter_11, + parameter_12, + parameter_13, + parameter_14, + parameter_15, + parameter_16, + parameter_17, + parameter_18, + parameter_19, + parameter_20, + parameter_21, + parameter_22, + parameter_23, + parameter_24, + parameter_25, + parameter_26, + parameter_27, + parameter_28, + parameter_29, + parameter_30, + parameter_31, + parameter_32, + parameter_33, + parameter_34, + parameter_35, + parameter_36, + parameter_37, + parameter_38, + parameter_39, + parameter_40, + parameter_41, + parameter_42, + parameter_43, + parameter_44, + parameter_45, + parameter_46, + parameter_47, + parameter_48, + parameter_49, + parameter_50, + parameter_51, + parameter_52, + parameter_53, + data_0, + data_1, + data_2, + ): + # pd_op.full: (1xf64) <- () + full_0 = paddle._C_ops.full( + [1], float("0"), paddle.float64, paddle.core.CPUPlace() + ) + + # pd_op.full: (1xf64) <- () + full_1 = paddle._C_ops.full( + [1], float("10"), paddle.float64, paddle.core.CPUPlace() + ) + + # pd_op.full: (1xf64) <- () + full_2 = paddle._C_ops.full( + [1], float("1"), paddle.float64, paddle.core.CPUPlace() + ) + + # pd_op.arange: (10xi64) <- (1xf64, 1xf64, 1xf64) + arange_0 = paddle.arange(full_0, full_1, full_2, dtype="int64") + del full_1 + + # pd_op.cast: (10xf32) <- (10xi64) + cast_0 = paddle._C_ops.cast(arange_0, paddle.float32) + del arange_0 + + # pd_op.full: (1xf32) <- () + full_3 = paddle._C_ops.full( + [1], float("1"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.scale: (10xf32) <- (10xf32, 1xf32) + scale_0 = paddle._C_ops.scale(cast_0, full_3, float("0.5"), True) + del cast_0 + + # pd_op.full: (1xf32) <- () + full_4 = paddle._C_ops.full( + [1], float("32"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.scale: (10xf32) <- (10xf32, 1xf32) + scale_1 = paddle._C_ops.scale(scale_0, full_4, float("0"), True) + del full_4, scale_0 + + # builtin.combine: ([10xf32, 10xf32]) <- (10xf32, 10xf32) + combine_0 = [scale_1, scale_1] + del scale_1 + + # pd_op.meshgrid: ([10x10xf32, 10x10xf32]) <- ([10xf32, 10xf32]) + meshgrid_0 = paddle._C_ops.meshgrid(combine_0) + del combine_0 + + # builtin.split: (10x10xf32, 10x10xf32) <- ([10x10xf32, 10x10xf32]) + ( + split_0, + split_1, + ) = meshgrid_0 + del meshgrid_0 + + # pd_op.scale: (10x10xf32) <- (10x10xf32, 1xf32) + scale_2 = paddle._C_ops.scale(split_1, full_3, float("-80"), True) + + # pd_op.scale: (10x10xf32) <- (10x10xf32, 1xf32) + scale_3 = paddle._C_ops.scale(split_0, full_3, float("-80"), True) + + # pd_op.scale: (10x10xf32) <- (10x10xf32, 1xf32) + scale_4 = paddle._C_ops.scale(split_1, full_3, float("80"), True) + + # pd_op.scale: (10x10xf32) <- (10x10xf32, 1xf32) + scale_5 = paddle._C_ops.scale(split_0, full_3, float("80"), True) + + # builtin.combine: ([10x10xf32, 10x10xf32, 10x10xf32, 10x10xf32]) <- (10x10xf32, 10x10xf32, 10x10xf32, 10x10xf32) + combine_1 = [scale_2, scale_3, scale_4, scale_5] + del scale_2, scale_3, scale_4, scale_5 + + # pd_op.stack: (10x10x4xf32) <- ([10x10xf32, 10x10xf32, 10x10xf32, 10x10xf32]) + stack_0 = paddle._C_ops.stack(combine_1, -1) + del combine_1 + + # builtin.combine: ([10x10xf32, 10x10xf32]) <- (10x10xf32, 10x10xf32) + combine_2 = [split_1, split_0] + del split_0, split_1 + + # pd_op.stack: (10x10x2xf32) <- ([10x10xf32, 10x10xf32]) + stack_1 = paddle._C_ops.stack(combine_2, -1) + del combine_2 + + # pd_op.full_int_array: (2xi64) <- () + full_int_array_0 = [-1, 4] + + # pd_op.reshape: (100x4xf32) <- (10x10x4xf32, 2xi64) + reshape_0 = paddle._C_ops.reshape(stack_0, full_int_array_0) + del stack_0 + + # pd_op.full_int_array: (2xi64) <- () + full_int_array_1 = [-1, 2] + + # pd_op.reshape: (100x2xf32) <- (10x10x2xf32, 2xi64) + reshape_1 = paddle._C_ops.reshape(stack_1, full_int_array_1) + del stack_1 + + # pd_op.full: (100x1xf32) <- () + full_5 = paddle._C_ops.full( + [100, 1], + float("32"), + paddle.float32, + paddle.framework._current_expected_place(), + ) + + # pd_op.full: (1xf64) <- () + full_6 = paddle._C_ops.full( + [1], float("20"), paddle.float64, paddle.core.CPUPlace() + ) + + # pd_op.arange: (20xi64) <- (1xf64, 1xf64, 1xf64) + arange_1 = paddle.arange(full_0, full_6, full_2, dtype="int64") + del full_6 + + # pd_op.cast: (20xf32) <- (20xi64) + cast_1 = paddle._C_ops.cast(arange_1, paddle.float32) + del arange_1 + + # pd_op.scale: (20xf32) <- (20xf32, 1xf32) + scale_6 = paddle._C_ops.scale(cast_1, full_3, float("0.5"), True) + del cast_1 + + # pd_op.full: (1xf32) <- () + full_7 = paddle._C_ops.full( + [1], float("16"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.scale: (20xf32) <- (20xf32, 1xf32) + scale_7 = paddle._C_ops.scale(scale_6, full_7, float("0"), True) + del full_7, scale_6 + + # builtin.combine: ([20xf32, 20xf32]) <- (20xf32, 20xf32) + combine_3 = [scale_7, scale_7] + del scale_7 + + # pd_op.meshgrid: ([20x20xf32, 20x20xf32]) <- ([20xf32, 20xf32]) + meshgrid_1 = paddle._C_ops.meshgrid(combine_3) + del combine_3 + + # builtin.split: (20x20xf32, 20x20xf32) <- ([20x20xf32, 20x20xf32]) + ( + split_2, + split_3, + ) = meshgrid_1 + del meshgrid_1 + + # pd_op.scale: (20x20xf32) <- (20x20xf32, 1xf32) + scale_8 = paddle._C_ops.scale(split_3, full_3, float("-40"), True) + + # pd_op.scale: (20x20xf32) <- (20x20xf32, 1xf32) + scale_9 = paddle._C_ops.scale(split_2, full_3, float("-40"), True) + + # pd_op.scale: (20x20xf32) <- (20x20xf32, 1xf32) + scale_10 = paddle._C_ops.scale(split_3, full_3, float("40"), True) + + # pd_op.scale: (20x20xf32) <- (20x20xf32, 1xf32) + scale_11 = paddle._C_ops.scale(split_2, full_3, float("40"), True) + + # builtin.combine: ([20x20xf32, 20x20xf32, 20x20xf32, 20x20xf32]) <- (20x20xf32, 20x20xf32, 20x20xf32, 20x20xf32) + combine_4 = [scale_8, scale_9, scale_10, scale_11] + del scale_10, scale_11, scale_8, scale_9 + + # pd_op.stack: (20x20x4xf32) <- ([20x20xf32, 20x20xf32, 20x20xf32, 20x20xf32]) + stack_2 = paddle._C_ops.stack(combine_4, -1) + del combine_4 + + # builtin.combine: ([20x20xf32, 20x20xf32]) <- (20x20xf32, 20x20xf32) + combine_5 = [split_3, split_2] + del split_2, split_3 + + # pd_op.stack: (20x20x2xf32) <- ([20x20xf32, 20x20xf32]) + stack_3 = paddle._C_ops.stack(combine_5, -1) + del combine_5 + + # pd_op.reshape: (400x4xf32) <- (20x20x4xf32, 2xi64) + reshape_2 = paddle._C_ops.reshape(stack_2, full_int_array_0) + del stack_2 + + # pd_op.reshape: (400x2xf32) <- (20x20x2xf32, 2xi64) + reshape_3 = paddle._C_ops.reshape(stack_3, full_int_array_1) + del stack_3 + + # pd_op.full: (400x1xf32) <- () + full_8 = paddle._C_ops.full( + [400, 1], + float("16"), + paddle.float32, + paddle.framework._current_expected_place(), + ) + + # pd_op.full: (1xf64) <- () + full_9 = paddle._C_ops.full( + [1], float("40"), paddle.float64, paddle.core.CPUPlace() + ) + + # pd_op.arange: (40xi64) <- (1xf64, 1xf64, 1xf64) + arange_2 = paddle.arange(full_0, full_9, full_2, dtype="int64") + del full_0, full_2, full_9 + + # pd_op.cast: (40xf32) <- (40xi64) + cast_2 = paddle._C_ops.cast(arange_2, paddle.float32) + del arange_2 + + # pd_op.scale: (40xf32) <- (40xf32, 1xf32) + scale_12 = paddle._C_ops.scale(cast_2, full_3, float("0.5"), True) + del cast_2 + + # pd_op.full: (1xf32) <- () + full_10 = paddle._C_ops.full( + [1], float("8"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.scale: (40xf32) <- (40xf32, 1xf32) + scale_13 = paddle._C_ops.scale(scale_12, full_10, float("0"), True) + del full_10, scale_12 + + # builtin.combine: ([40xf32, 40xf32]) <- (40xf32, 40xf32) + combine_6 = [scale_13, scale_13] + del scale_13 + + # pd_op.meshgrid: ([40x40xf32, 40x40xf32]) <- ([40xf32, 40xf32]) + meshgrid_2 = paddle._C_ops.meshgrid(combine_6) + del combine_6 + + # builtin.split: (40x40xf32, 40x40xf32) <- ([40x40xf32, 40x40xf32]) + ( + split_4, + split_5, + ) = meshgrid_2 + del meshgrid_2 + + # pd_op.scale: (40x40xf32) <- (40x40xf32, 1xf32) + scale_14 = paddle._C_ops.scale(split_5, full_3, float("-20"), True) + + # pd_op.scale: (40x40xf32) <- (40x40xf32, 1xf32) + scale_15 = paddle._C_ops.scale(split_4, full_3, float("-20"), True) + + # pd_op.scale: (40x40xf32) <- (40x40xf32, 1xf32) + scale_16 = paddle._C_ops.scale(split_5, full_3, float("20"), True) + + # pd_op.scale: (40x40xf32) <- (40x40xf32, 1xf32) + scale_17 = paddle._C_ops.scale(split_4, full_3, float("20"), True) + del full_3 + + # builtin.combine: ([40x40xf32, 40x40xf32, 40x40xf32, 40x40xf32]) <- (40x40xf32, 40x40xf32, 40x40xf32, 40x40xf32) + combine_7 = [scale_14, scale_15, scale_16, scale_17] + del scale_14, scale_15, scale_16, scale_17 + + # pd_op.stack: (40x40x4xf32) <- ([40x40xf32, 40x40xf32, 40x40xf32, 40x40xf32]) + stack_4 = paddle._C_ops.stack(combine_7, -1) + del combine_7 + + # builtin.combine: ([40x40xf32, 40x40xf32]) <- (40x40xf32, 40x40xf32) + combine_8 = [split_5, split_4] + del split_4, split_5 + + # pd_op.stack: (40x40x2xf32) <- ([40x40xf32, 40x40xf32]) + stack_5 = paddle._C_ops.stack(combine_8, -1) + del combine_8 + + # pd_op.reshape: (1600x4xf32) <- (40x40x4xf32, 2xi64) + reshape_4 = paddle._C_ops.reshape(stack_4, full_int_array_0) + del full_int_array_0, stack_4 + + # pd_op.reshape: (1600x2xf32) <- (40x40x2xf32, 2xi64) + reshape_5 = paddle._C_ops.reshape(stack_5, full_int_array_1) + del full_int_array_1, stack_5 + + # pd_op.full: (1600x1xf32) <- () + full_11 = paddle._C_ops.full( + [1600, 1], + float("8"), + paddle.float32, + paddle.framework._current_expected_place(), + ) + + # pd_op.full: (1xi32) <- () + full_12 = paddle._C_ops.full( + [1], float("0"), paddle.int32, paddle.core.CPUPlace() + ) + + # builtin.combine: ([100x4xf32, 400x4xf32, 1600x4xf32]) <- (100x4xf32, 400x4xf32, 1600x4xf32) + combine_9 = [reshape_0, reshape_2, reshape_4] + + # pd_op.concat: (2100x4xf32) <- ([100x4xf32, 400x4xf32, 1600x4xf32], 1xi32) + concat_2 = paddle._C_ops.concat(combine_9, full_12) + del combine_9 + + # builtin.combine: ([100x2xf32, 400x2xf32, 1600x2xf32]) <- (100x2xf32, 400x2xf32, 1600x2xf32) + combine_10 = [reshape_1, reshape_3, reshape_5] + del reshape_1, reshape_3, reshape_5 + + # pd_op.concat: (2100x2xf32) <- ([100x2xf32, 400x2xf32, 1600x2xf32], 1xi32) + concat_3 = paddle._C_ops.concat(combine_10, full_12) + del combine_10 + + # builtin.combine: ([100x1xf32, 400x1xf32, 1600x1xf32]) <- (100x1xf32, 400x1xf32, 1600x1xf32) + combine_11 = [full_5, full_8, full_11] + del full_11, full_5, full_8 + + # pd_op.concat: (2100x1xf32) <- ([100x1xf32, 400x1xf32, 1600x1xf32], 1xi32) + concat_4 = paddle._C_ops.concat(combine_11, full_12) + del combine_11, full_12 + + # pd_op.full_int_array: (2xi64) <- () + full_int_array_2 = [1, 1] + + # pd_op.assign: (2xi64) <- (2xi64) + assign_0 = full_int_array_2 + + # pd_op.assign: (2xi64) <- (2xi64) + assign_1 = full_int_array_2 + + # pd_op.pool2d: (2x768x1x1xf32) <- (2x768x10x10xf32, 2xi64) + pool2d_0 = paddle._C_ops.pool2d( + data_0, + full_int_array_2, + [1, 1], + [0, 0], + False, + True, + "NCHW", + "avg", + False, + True, + "EXPLICIT", + ) + + # pd_op.conv2d: (2x768x1x1xf32) <- (2x768x1x1xf32, 768x768x1x1xf32) + conv2d_0 = paddle._C_ops.conv2d( + pool2d_0, parameter_53, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_53 + + # pd_op.full_int_array: (4xi64) <- () + full_int_array_3 = [1, -1, 1, 1] + + # pd_op.reshape: (1x768x1x1xf32) <- (768xf32, 4xi64) + reshape_6 = paddle._C_ops.reshape(parameter_52, full_int_array_3) + del parameter_52 + + # pd_op.add: (2x768x1x1xf32) <- (2x768x1x1xf32, 1x768x1x1xf32) + add_0 = paddle._C_ops.add(conv2d_0, reshape_6) + + # pd_op.sigmoid: (2x768x1x1xf32) <- (2x768x1x1xf32) + sigmoid_0 = paddle._C_ops.sigmoid(add_0) + del add_0 + + # pd_op.multiply: (2x768x10x10xf32) <- (2x768x10x10xf32, 2x768x1x1xf32) + multiply_0 = paddle._C_ops.multiply(data_0, sigmoid_0) + + # pd_op.conv2d: (2x768x10x10xf32) <- (2x768x10x10xf32, 768x768x1x1xf32) + conv2d_1 = paddle._C_ops.conv2d( + multiply_0, parameter_51, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_51 + + # pd_op.batch_norm_: (2x768x10x10xf32, 768xf32, 768xf32, 768xf32, 768xf32, -1xui8) <- (2x768x10x10xf32, 768xf32, 768xf32, 768xf32, 768xf32) + ( + batch_norm__0, + batch_norm__1, + batch_norm__2, + batch_norm__3, + batch_norm__4, + batch_norm__5, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_1, + parameter_50, + parameter_49, + parameter_48, + parameter_47, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_47, parameter_48, parameter_49, parameter_50 + + # pd_op.swish: (2x768x10x10xf32) <- (2x768x10x10xf32) + swish_0 = paddle._C_ops.swish(batch_norm__0) + + # pd_op.add: (2x768x10x10xf32) <- (2x768x10x10xf32, 2x768x10x10xf32) + add_1 = paddle._C_ops.add(swish_0, data_0) + + # pd_op.conv2d: (2x1x10x10xf32) <- (2x768x10x10xf32, 1x768x3x3xf32) + conv2d_2 = paddle._C_ops.conv2d( + add_1, parameter_46, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_46 + + # pd_op.reshape: (1x1x1x1xf32) <- (1xf32, 4xi64) + reshape_7 = paddle._C_ops.reshape(parameter_45, full_int_array_3) + del parameter_45 + + # pd_op.add: (2x1x10x10xf32) <- (2x1x10x10xf32, 1x1x1x1xf32) + add_2 = paddle._C_ops.add(conv2d_2, reshape_7) + + # pd_op.conv2d: (2x768x1x1xf32) <- (2x768x1x1xf32, 768x768x1x1xf32) + conv2d_3 = paddle._C_ops.conv2d( + pool2d_0, parameter_44, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_44 + + # pd_op.reshape: (1x768x1x1xf32) <- (768xf32, 4xi64) + reshape_8 = paddle._C_ops.reshape(parameter_43, full_int_array_3) + del parameter_43 + + # pd_op.add: (2x768x1x1xf32) <- (2x768x1x1xf32, 1x768x1x1xf32) + add_3 = paddle._C_ops.add(conv2d_3, reshape_8) + + # pd_op.sigmoid: (2x768x1x1xf32) <- (2x768x1x1xf32) + sigmoid_1 = paddle._C_ops.sigmoid(add_3) + del add_3 + + # pd_op.multiply: (2x768x10x10xf32) <- (2x768x10x10xf32, 2x768x1x1xf32) + multiply_1 = paddle._C_ops.multiply(data_0, sigmoid_1) + del data_0 + + # pd_op.conv2d: (2x768x10x10xf32) <- (2x768x10x10xf32, 768x768x1x1xf32) + conv2d_4 = paddle._C_ops.conv2d( + multiply_1, parameter_42, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_42 + + # pd_op.batch_norm_: (2x768x10x10xf32, 768xf32, 768xf32, 768xf32, 768xf32, -1xui8) <- (2x768x10x10xf32, 768xf32, 768xf32, 768xf32, 768xf32) + ( + batch_norm__6, + batch_norm__7, + batch_norm__8, + batch_norm__9, + batch_norm__10, + batch_norm__11, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_4, + parameter_41, + parameter_40, + parameter_39, + parameter_38, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_38, parameter_39, parameter_40, parameter_41 + + # pd_op.swish: (2x768x10x10xf32) <- (2x768x10x10xf32) + swish_1 = paddle._C_ops.swish(batch_norm__6) + + # pd_op.conv2d: (2x68x10x10xf32) <- (2x768x10x10xf32, 68x768x3x3xf32) + conv2d_5 = paddle._C_ops.conv2d( + swish_1, parameter_37, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_37 + + # pd_op.reshape: (1x68x1x1xf32) <- (68xf32, 4xi64) + reshape_9 = paddle._C_ops.reshape(parameter_36, full_int_array_3) + del parameter_36 + + # pd_op.add: (2x68x10x10xf32) <- (2x68x10x10xf32, 1x68x1x1xf32) + add_4 = paddle._C_ops.add(conv2d_5, reshape_9) + + # pd_op.sigmoid: (2x1x10x10xf32) <- (2x1x10x10xf32) + sigmoid_2 = paddle._C_ops.sigmoid(add_2) + del add_2 + + # pd_op.flatten: (2x1x100xf32) <- (2x1x10x10xf32) + flatten_0 = paddle._C_ops.flatten(sigmoid_2, 2, 3) + + # pd_op.transpose: (2x100x1xf32) <- (2x1x100xf32) + transpose_0 = paddle._C_ops.transpose(flatten_0, [0, 2, 1]) + del flatten_0 + + # pd_op.flatten: (2x68x100xf32) <- (2x68x10x10xf32) + flatten_1 = paddle._C_ops.flatten(add_4, 2, 3) + + # pd_op.transpose: (2x100x68xf32) <- (2x68x100xf32) + transpose_1 = paddle._C_ops.transpose(flatten_1, [0, 2, 1]) + del flatten_1 + + # pd_op.pool2d: (2x384x1x1xf32) <- (2x384x20x20xf32, 2xi64) + pool2d_1 = paddle._C_ops.pool2d( + data_1, + full_int_array_2, + [1, 1], + [0, 0], + False, + True, + "NCHW", + "avg", + False, + True, + "EXPLICIT", + ) + + # pd_op.conv2d: (2x384x1x1xf32) <- (2x384x1x1xf32, 384x384x1x1xf32) + conv2d_6 = paddle._C_ops.conv2d( + pool2d_1, parameter_35, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_35 + + # pd_op.reshape: (1x384x1x1xf32) <- (384xf32, 4xi64) + reshape_10 = paddle._C_ops.reshape(parameter_34, full_int_array_3) + del parameter_34 + + # pd_op.add: (2x384x1x1xf32) <- (2x384x1x1xf32, 1x384x1x1xf32) + add_5 = paddle._C_ops.add(conv2d_6, reshape_10) + + # pd_op.sigmoid: (2x384x1x1xf32) <- (2x384x1x1xf32) + sigmoid_3 = paddle._C_ops.sigmoid(add_5) + del add_5 + + # pd_op.multiply: (2x384x20x20xf32) <- (2x384x20x20xf32, 2x384x1x1xf32) + multiply_2 = paddle._C_ops.multiply(data_1, sigmoid_3) + + # pd_op.conv2d: (2x384x20x20xf32) <- (2x384x20x20xf32, 384x384x1x1xf32) + conv2d_7 = paddle._C_ops.conv2d( + multiply_2, parameter_33, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_33 + + # pd_op.batch_norm_: (2x384x20x20xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x20x20xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__12, + batch_norm__13, + batch_norm__14, + batch_norm__15, + batch_norm__16, + batch_norm__17, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_7, + parameter_32, + parameter_31, + parameter_30, + parameter_29, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_29, parameter_30, parameter_31, parameter_32 + + # pd_op.swish: (2x384x20x20xf32) <- (2x384x20x20xf32) + swish_2 = paddle._C_ops.swish(batch_norm__12) + + # pd_op.add: (2x384x20x20xf32) <- (2x384x20x20xf32, 2x384x20x20xf32) + add_6 = paddle._C_ops.add(swish_2, data_1) + + # pd_op.conv2d: (2x1x20x20xf32) <- (2x384x20x20xf32, 1x384x3x3xf32) + conv2d_8 = paddle._C_ops.conv2d( + add_6, parameter_28, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_28 + + # pd_op.reshape: (1x1x1x1xf32) <- (1xf32, 4xi64) + reshape_11 = paddle._C_ops.reshape(parameter_27, full_int_array_3) + del parameter_27 + + # pd_op.add: (2x1x20x20xf32) <- (2x1x20x20xf32, 1x1x1x1xf32) + add_7 = paddle._C_ops.add(conv2d_8, reshape_11) + + # pd_op.conv2d: (2x384x1x1xf32) <- (2x384x1x1xf32, 384x384x1x1xf32) + conv2d_9 = paddle._C_ops.conv2d( + pool2d_1, parameter_26, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_26 + + # pd_op.reshape: (1x384x1x1xf32) <- (384xf32, 4xi64) + reshape_12 = paddle._C_ops.reshape(parameter_25, full_int_array_3) + del parameter_25 + + # pd_op.add: (2x384x1x1xf32) <- (2x384x1x1xf32, 1x384x1x1xf32) + add_8 = paddle._C_ops.add(conv2d_9, reshape_12) + + # pd_op.sigmoid: (2x384x1x1xf32) <- (2x384x1x1xf32) + sigmoid_4 = paddle._C_ops.sigmoid(add_8) + del add_8 + + # pd_op.multiply: (2x384x20x20xf32) <- (2x384x20x20xf32, 2x384x1x1xf32) + multiply_3 = paddle._C_ops.multiply(data_1, sigmoid_4) + del data_1 + + # pd_op.conv2d: (2x384x20x20xf32) <- (2x384x20x20xf32, 384x384x1x1xf32) + conv2d_10 = paddle._C_ops.conv2d( + multiply_3, parameter_24, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_24 + + # pd_op.batch_norm_: (2x384x20x20xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x20x20xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__18, + batch_norm__19, + batch_norm__20, + batch_norm__21, + batch_norm__22, + batch_norm__23, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_10, + parameter_23, + parameter_22, + parameter_21, + parameter_20, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_20, parameter_21, parameter_22, parameter_23 + + # pd_op.swish: (2x384x20x20xf32) <- (2x384x20x20xf32) + swish_3 = paddle._C_ops.swish(batch_norm__18) + + # pd_op.conv2d: (2x68x20x20xf32) <- (2x384x20x20xf32, 68x384x3x3xf32) + conv2d_11 = paddle._C_ops.conv2d( + swish_3, parameter_19, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_19 + + # pd_op.reshape: (1x68x1x1xf32) <- (68xf32, 4xi64) + reshape_13 = paddle._C_ops.reshape(parameter_18, full_int_array_3) + del parameter_18 + + # pd_op.add: (2x68x20x20xf32) <- (2x68x20x20xf32, 1x68x1x1xf32) + add_9 = paddle._C_ops.add(conv2d_11, reshape_13) + + # pd_op.sigmoid: (2x1x20x20xf32) <- (2x1x20x20xf32) + sigmoid_5 = paddle._C_ops.sigmoid(add_7) + del add_7 + + # pd_op.flatten: (2x1x400xf32) <- (2x1x20x20xf32) + flatten_2 = paddle._C_ops.flatten(sigmoid_5, 2, 3) + + # pd_op.transpose: (2x400x1xf32) <- (2x1x400xf32) + transpose_2 = paddle._C_ops.transpose(flatten_2, [0, 2, 1]) + del flatten_2 + + # pd_op.flatten: (2x68x400xf32) <- (2x68x20x20xf32) + flatten_3 = paddle._C_ops.flatten(add_9, 2, 3) + + # pd_op.transpose: (2x400x68xf32) <- (2x68x400xf32) + transpose_3 = paddle._C_ops.transpose(flatten_3, [0, 2, 1]) + del flatten_3 + + # pd_op.pool2d: (2x192x1x1xf32) <- (2x192x40x40xf32, 2xi64) + pool2d_2 = paddle._C_ops.pool2d( + data_2, + full_int_array_2, + [1, 1], + [0, 0], + False, + True, + "NCHW", + "avg", + False, + True, + "EXPLICIT", + ) + + # pd_op.conv2d: (2x192x1x1xf32) <- (2x192x1x1xf32, 192x192x1x1xf32) + conv2d_12 = paddle._C_ops.conv2d( + pool2d_2, parameter_17, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_17 + + # pd_op.reshape: (1x192x1x1xf32) <- (192xf32, 4xi64) + reshape_14 = paddle._C_ops.reshape(parameter_16, full_int_array_3) + del parameter_16 + + # pd_op.add: (2x192x1x1xf32) <- (2x192x1x1xf32, 1x192x1x1xf32) + add_10 = paddle._C_ops.add(conv2d_12, reshape_14) + + # pd_op.sigmoid: (2x192x1x1xf32) <- (2x192x1x1xf32) + sigmoid_6 = paddle._C_ops.sigmoid(add_10) + del add_10 + + # pd_op.multiply: (2x192x40x40xf32) <- (2x192x40x40xf32, 2x192x1x1xf32) + multiply_4 = paddle._C_ops.multiply(data_2, sigmoid_6) + + # pd_op.conv2d: (2x192x40x40xf32) <- (2x192x40x40xf32, 192x192x1x1xf32) + conv2d_13 = paddle._C_ops.conv2d( + multiply_4, parameter_15, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_15 + + # pd_op.batch_norm_: (2x192x40x40xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x40x40xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__24, + batch_norm__25, + batch_norm__26, + batch_norm__27, + batch_norm__28, + batch_norm__29, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_13, + parameter_14, + parameter_13, + parameter_12, + parameter_11, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_11, parameter_12, parameter_13, parameter_14 + + # pd_op.swish: (2x192x40x40xf32) <- (2x192x40x40xf32) + swish_4 = paddle._C_ops.swish(batch_norm__24) + + # pd_op.add: (2x192x40x40xf32) <- (2x192x40x40xf32, 2x192x40x40xf32) + add_11 = paddle._C_ops.add(swish_4, data_2) + + # pd_op.conv2d: (2x1x40x40xf32) <- (2x192x40x40xf32, 1x192x3x3xf32) + conv2d_14 = paddle._C_ops.conv2d( + add_11, parameter_10, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_10 + + # pd_op.reshape: (1x1x1x1xf32) <- (1xf32, 4xi64) + reshape_15 = paddle._C_ops.reshape(parameter_9, full_int_array_3) + del parameter_9 + + # pd_op.add: (2x1x40x40xf32) <- (2x1x40x40xf32, 1x1x1x1xf32) + add_12 = paddle._C_ops.add(conv2d_14, reshape_15) + + # pd_op.conv2d: (2x192x1x1xf32) <- (2x192x1x1xf32, 192x192x1x1xf32) + conv2d_15 = paddle._C_ops.conv2d( + pool2d_2, parameter_8, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_8 + + # pd_op.reshape: (1x192x1x1xf32) <- (192xf32, 4xi64) + reshape_16 = paddle._C_ops.reshape(parameter_7, full_int_array_3) + del parameter_7 + + # pd_op.add: (2x192x1x1xf32) <- (2x192x1x1xf32, 1x192x1x1xf32) + add_13 = paddle._C_ops.add(conv2d_15, reshape_16) + + # pd_op.sigmoid: (2x192x1x1xf32) <- (2x192x1x1xf32) + sigmoid_7 = paddle._C_ops.sigmoid(add_13) + del add_13 + + # pd_op.multiply: (2x192x40x40xf32) <- (2x192x40x40xf32, 2x192x1x1xf32) + multiply_5 = paddle._C_ops.multiply(data_2, sigmoid_7) + del data_2 + + # pd_op.conv2d: (2x192x40x40xf32) <- (2x192x40x40xf32, 192x192x1x1xf32) + conv2d_16 = paddle._C_ops.conv2d( + multiply_5, parameter_6, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_6 + + # pd_op.batch_norm_: (2x192x40x40xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x40x40xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__30, + batch_norm__31, + batch_norm__32, + batch_norm__33, + batch_norm__34, + batch_norm__35, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_16, + parameter_5, + parameter_4, + parameter_3, + parameter_2, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_2, parameter_3, parameter_4, parameter_5 + + # pd_op.swish: (2x192x40x40xf32) <- (2x192x40x40xf32) + swish_5 = paddle._C_ops.swish(batch_norm__30) + + # pd_op.conv2d: (2x68x40x40xf32) <- (2x192x40x40xf32, 68x192x3x3xf32) + conv2d_17 = paddle._C_ops.conv2d( + swish_5, parameter_1, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_1 + + # pd_op.reshape: (1x68x1x1xf32) <- (68xf32, 4xi64) + reshape_17 = paddle._C_ops.reshape(parameter_0, full_int_array_3) + del full_int_array_3, parameter_0 + + # pd_op.add: (2x68x40x40xf32) <- (2x68x40x40xf32, 1x68x1x1xf32) + add_14 = paddle._C_ops.add(conv2d_17, reshape_17) + + # pd_op.sigmoid: (2x1x40x40xf32) <- (2x1x40x40xf32) + sigmoid_8 = paddle._C_ops.sigmoid(add_12) + del add_12 + + # pd_op.flatten: (2x1x1600xf32) <- (2x1x40x40xf32) + flatten_4 = paddle._C_ops.flatten(sigmoid_8, 2, 3) + + # pd_op.transpose: (2x1600x1xf32) <- (2x1x1600xf32) + transpose_4 = paddle._C_ops.transpose(flatten_4, [0, 2, 1]) + del flatten_4 + + # pd_op.flatten: (2x68x1600xf32) <- (2x68x40x40xf32) + flatten_5 = paddle._C_ops.flatten(add_14, 2, 3) + + # pd_op.transpose: (2x1600x68xf32) <- (2x68x1600xf32) + transpose_5 = paddle._C_ops.transpose(flatten_5, [0, 2, 1]) + del flatten_5 + + # pd_op.full: (1xi32) <- () + full_13 = paddle._C_ops.full( + [1], float("1"), paddle.int32, paddle.core.CPUPlace() + ) + + # pd_op.assign: (1xi32) <- (1xi32) + assign_2 = full_13 + + # builtin.combine: ([2x100x1xf32, 2x400x1xf32, 2x1600x1xf32]) <- (2x100x1xf32, 2x400x1xf32, 2x1600x1xf32) + combine_12 = [transpose_0, transpose_2, transpose_4] + + # pd_op.concat: (2x2100x1xf32) <- ([2x100x1xf32, 2x400x1xf32, 2x1600x1xf32], 1xi32) + concat_0 = paddle._C_ops.concat(combine_12, full_13) + del combine_12 + + # builtin.combine: ([2x100x68xf32, 2x400x68xf32, 2x1600x68xf32]) <- (2x100x68xf32, 2x400x68xf32, 2x1600x68xf32) + combine_13 = [transpose_1, transpose_3, transpose_5] + + # pd_op.concat: (2x2100x68xf32) <- ([2x100x68xf32, 2x400x68xf32, 2x1600x68xf32], 1xi32) + concat_1 = paddle._C_ops.concat(combine_13, full_13) + del ( + add_1, + add_11, + add_14, + add_4, + add_6, + add_9, + assign_0, + assign_1, + assign_2, + batch_norm__0, + batch_norm__1, + batch_norm__10, + batch_norm__11, + batch_norm__12, + batch_norm__13, + batch_norm__14, + batch_norm__15, + batch_norm__16, + batch_norm__17, + batch_norm__18, + batch_norm__19, + batch_norm__2, + batch_norm__20, + batch_norm__21, + batch_norm__22, + batch_norm__23, + batch_norm__24, + batch_norm__25, + batch_norm__26, + batch_norm__27, + batch_norm__28, + batch_norm__29, + batch_norm__3, + batch_norm__30, + batch_norm__31, + batch_norm__32, + batch_norm__33, + batch_norm__34, + batch_norm__35, + batch_norm__4, + batch_norm__5, + batch_norm__6, + batch_norm__7, + batch_norm__8, + batch_norm__9, + combine_13, + conv2d_0, + conv2d_1, + conv2d_10, + conv2d_11, + conv2d_12, + conv2d_13, + conv2d_14, + conv2d_15, + conv2d_16, + conv2d_17, + conv2d_2, + conv2d_3, + conv2d_4, + conv2d_5, + conv2d_6, + conv2d_7, + conv2d_8, + conv2d_9, + full_13, + full_int_array_2, + multiply_0, + multiply_1, + multiply_2, + multiply_3, + multiply_4, + multiply_5, + pool2d_0, + pool2d_1, + pool2d_2, + reshape_0, + reshape_10, + reshape_11, + reshape_12, + reshape_13, + reshape_14, + reshape_15, + reshape_16, + reshape_17, + reshape_2, + reshape_4, + reshape_6, + reshape_7, + reshape_8, + reshape_9, + sigmoid_0, + sigmoid_1, + sigmoid_2, + sigmoid_3, + sigmoid_4, + sigmoid_5, + sigmoid_6, + sigmoid_7, + sigmoid_8, + swish_0, + swish_1, + swish_2, + swish_3, + swish_4, + swish_5, + transpose_0, + transpose_1, + transpose_2, + transpose_3, + transpose_4, + transpose_5, + ) + + return concat_0, concat_1, concat_2, concat_3, concat_4 diff --git a/paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_4/weight_meta.py b/paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_4/weight_meta.py new file mode 100644 index 000000000..6ace17947 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_4/weight_meta.py @@ -0,0 +1,586 @@ +class Program_weight_tensor_parameter_0: + name = "parameter_0" + shape = [68] + dtype = "float32" + min_val = float("-0.0125949") + max_val = float("0.0320021") + mean = float("1.7506e-07") + std = float("0.00761307") + data = None + + +class Program_weight_tensor_parameter_1: + name = "parameter_1" + shape = [68, 192, 3, 3] + dtype = "float32" + min_val = float("-0.168904") + max_val = float("0.169014") + mean = float("7.72561e-08") + std = float("0.00776969") + data = None + + +class Program_weight_tensor_parameter_2: + name = "parameter_2" + shape = [192] + dtype = "float32" + min_val = float("-0.1282") + max_val = float("0.242037") + mean = float("0.0315273") + std = float("0.0649783") + data = None + + +class Program_weight_tensor_parameter_3: + name = "parameter_3" + shape = [192] + dtype = "float32" + min_val = float("0.749934") + max_val = float("1.60723") + mean = float("1.16226") + std = float("0.142889") + data = None + + +class Program_weight_tensor_parameter_4: + name = "parameter_4" + shape = [192] + dtype = "float32" + min_val = float("0.000125293") + max_val = float("0.00326057") + mean = float("0.000652126") + std = float("0.000508615") + data = None + + +class Program_weight_tensor_parameter_5: + name = "parameter_5" + shape = [192] + dtype = "float32" + min_val = float("-0.0367297") + max_val = float("0.0227105") + mean = float("-0.00375554") + std = float("0.00919574") + data = None + + +class Program_weight_tensor_parameter_6: + name = "parameter_6" + shape = [192, 192, 1, 1] + dtype = "float32" + min_val = float("-0.0802065") + max_val = float("0.0870637") + mean = float("-0.000277691") + std = float("0.00748032") + data = None + + +class Program_weight_tensor_parameter_7: + name = "parameter_7" + shape = [192] + dtype = "float32" + min_val = float("-0.00803608") + max_val = float("0.0139589") + mean = float("-0.000171724") + std = float("0.00422841") + data = None + + +class Program_weight_tensor_parameter_8: + name = "parameter_8" + shape = [192, 192, 1, 1] + dtype = "float32" + min_val = float("-0.0114108") + max_val = float("0.0196577") + mean = float("-0.000102816") + std = float("0.00167728") + data = None + + +class Program_weight_tensor_parameter_9: + name = "parameter_9" + shape = [1] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_10: + name = "parameter_10" + shape = [1, 192, 3, 3] + dtype = "float32" + min_val = float("-0.0737663") + max_val = float("0.0381556") + mean = float("-0.000233392") + std = float("0.00813758") + data = None + + +class Program_weight_tensor_parameter_11: + name = "parameter_11" + shape = [192] + dtype = "float32" + min_val = float("-0.461245") + max_val = float("0.539067") + mean = float("0.115313") + std = float("0.141982") + data = None + + +class Program_weight_tensor_parameter_12: + name = "parameter_12" + shape = [192] + dtype = "float32" + min_val = float("0.859994") + max_val = float("1.5039") + mean = float("1.0904") + std = float("0.087357") + data = None + + +class Program_weight_tensor_parameter_13: + name = "parameter_13" + shape = [192] + dtype = "float32" + min_val = float("0.000180681") + max_val = float("0.0167448") + mean = float("0.00155933") + std = float("0.00171619") + data = None + + +class Program_weight_tensor_parameter_14: + name = "parameter_14" + shape = [192] + dtype = "float32" + min_val = float("-0.162108") + max_val = float("0.0318165") + mean = float("-0.0382413") + std = float("0.0371313") + data = None + + +class Program_weight_tensor_parameter_15: + name = "parameter_15" + shape = [192, 192, 1, 1] + dtype = "float32" + min_val = float("-0.0631619") + max_val = float("0.0730614") + mean = float("-0.00112433") + std = float("0.00747877") + data = None + + +class Program_weight_tensor_parameter_16: + name = "parameter_16" + shape = [192] + dtype = "float32" + min_val = float("-0.00637065") + max_val = float("0.0126912") + mean = float("-0.000223424") + std = float("0.0021997") + data = None + + +class Program_weight_tensor_parameter_17: + name = "parameter_17" + shape = [192, 192, 1, 1] + dtype = "float32" + min_val = float("-0.053909") + max_val = float("0.0684801") + mean = float("-0.000100672") + std = float("0.0017502") + data = None + + +class Program_weight_tensor_parameter_18: + name = "parameter_18" + shape = [68] + dtype = "float32" + min_val = float("-0.00566615") + max_val = float("0.0214451") + mean = float("1.55502e-07") + std = float("0.00526503") + data = None + + +class Program_weight_tensor_parameter_19: + name = "parameter_19" + shape = [68, 384, 3, 3] + dtype = "float32" + min_val = float("-0.0979879") + max_val = float("0.125266") + mean = float("4.95638e-08") + std = float("0.00513519") + data = None + + +class Program_weight_tensor_parameter_20: + name = "parameter_20" + shape = [384] + dtype = "float32" + min_val = float("-0.0865217") + max_val = float("0.0669948") + mean = float("0.012154") + std = float("0.0210022") + data = None + + +class Program_weight_tensor_parameter_21: + name = "parameter_21" + shape = [384] + dtype = "float32" + min_val = float("0.900968") + max_val = float("1.21051") + mean = float("1.07139") + std = float("0.0473477") + data = None + + +class Program_weight_tensor_parameter_22: + name = "parameter_22" + shape = [384] + dtype = "float32" + min_val = float("7.41786e-05") + max_val = float("0.0044514") + mean = float("0.000468042") + std = float("0.000472298") + data = None + + +class Program_weight_tensor_parameter_23: + name = "parameter_23" + shape = [384] + dtype = "float32" + min_val = float("-0.0346561") + max_val = float("0.00824432") + mean = float("-0.00577785") + std = float("0.00551423") + data = None + + +class Program_weight_tensor_parameter_24: + name = "parameter_24" + shape = [384, 384, 1, 1] + dtype = "float32" + min_val = float("-0.049033") + max_val = float("0.0710137") + mean = float("-0.000133784") + std = float("0.00343512") + data = None + + +class Program_weight_tensor_parameter_25: + name = "parameter_25" + shape = [384] + dtype = "float32" + min_val = float("-0.00577836") + max_val = float("0.00744136") + mean = float("1.95538e-05") + std = float("0.0021323") + data = None + + +class Program_weight_tensor_parameter_26: + name = "parameter_26" + shape = [384, 384, 1, 1] + dtype = "float32" + min_val = float("-0.00302728") + max_val = float("0.00565068") + mean = float("-3.93425e-05") + std = float("0.000596316") + data = None + + +class Program_weight_tensor_parameter_27: + name = "parameter_27" + shape = [1] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_28: + name = "parameter_28" + shape = [1, 384, 3, 3] + dtype = "float32" + min_val = float("-0.0603723") + max_val = float("0.0339025") + mean = float("3.23314e-05") + std = float("0.00523636") + data = None + + +class Program_weight_tensor_parameter_29: + name = "parameter_29" + shape = [384] + dtype = "float32" + min_val = float("-0.167038") + max_val = float("0.24192") + mean = float("0.0669682") + std = float("0.066759") + data = None + + +class Program_weight_tensor_parameter_30: + name = "parameter_30" + shape = [384] + dtype = "float32" + min_val = float("0.949473") + max_val = float("1.31276") + mean = float("1.04714") + std = float("0.0484713") + data = None + + +class Program_weight_tensor_parameter_31: + name = "parameter_31" + shape = [384] + dtype = "float32" + min_val = float("0.000153128") + max_val = float("0.00911633") + mean = float("0.00178265") + std = float("0.00169809") + data = None + + +class Program_weight_tensor_parameter_32: + name = "parameter_32" + shape = [384] + dtype = "float32" + min_val = float("-0.0907551") + max_val = float("0.0285552") + mean = float("-0.030032") + std = float("0.022836") + data = None + + +class Program_weight_tensor_parameter_33: + name = "parameter_33" + shape = [384, 384, 1, 1] + dtype = "float32" + min_val = float("-0.0582307") + max_val = float("0.0552916") + mean = float("-0.00061252") + std = float("0.00344303") + data = None + + +class Program_weight_tensor_parameter_34: + name = "parameter_34" + shape = [384] + dtype = "float32" + min_val = float("-0.0037973") + max_val = float("0.0145702") + mean = float("-4.21824e-05") + std = float("0.00146417") + data = None + + +class Program_weight_tensor_parameter_35: + name = "parameter_35" + shape = [384, 384, 1, 1] + dtype = "float32" + min_val = float("-0.0162584") + max_val = float("0.0101061") + mean = float("-4.89305e-05") + std = float("0.000586823") + data = None + + +class Program_weight_tensor_parameter_36: + name = "parameter_36" + shape = [68] + dtype = "float32" + min_val = float("-0.0036888") + max_val = float("0.00622556") + mean = float("1.40266e-07") + std = float("0.00281352") + data = None + + +class Program_weight_tensor_parameter_37: + name = "parameter_37" + shape = [68, 768, 3, 3] + dtype = "float32" + min_val = float("-0.0367545") + max_val = float("0.0462386") + mean = float("-3.27418e-09") + std = float("0.002901") + data = None + + +class Program_weight_tensor_parameter_38: + name = "parameter_38" + shape = [768] + dtype = "float32" + min_val = float("-0.0688678") + max_val = float("0.0553425") + mean = float("-0.000948868") + std = float("0.0161607") + data = None + + +class Program_weight_tensor_parameter_39: + name = "parameter_39" + shape = [768] + dtype = "float32" + min_val = float("0.964192") + max_val = float("1.2684") + mean = float("1.05105") + std = float("0.0346617") + data = None + + +class Program_weight_tensor_parameter_40: + name = "parameter_40" + shape = [768] + dtype = "float32" + min_val = float("3.71783e-05") + max_val = float("0.00243415") + mean = float("0.000262823") + std = float("0.000216483") + data = None + + +class Program_weight_tensor_parameter_41: + name = "parameter_41" + shape = [768] + dtype = "float32" + min_val = float("-0.0157416") + max_val = float("0.00402676") + mean = float("-0.00257724") + std = float("0.00218349") + data = None + + +class Program_weight_tensor_parameter_42: + name = "parameter_42" + shape = [768, 768, 1, 1] + dtype = "float32" + min_val = float("-0.0373925") + max_val = float("0.0388143") + mean = float("-3.65769e-05") + std = float("0.00153555") + data = None + + +class Program_weight_tensor_parameter_43: + name = "parameter_43" + shape = [768] + dtype = "float32" + min_val = float("-0.00312007") + max_val = float("0.00471227") + mean = float("1.99185e-05") + std = float("0.00111313") + data = None + + +class Program_weight_tensor_parameter_44: + name = "parameter_44" + shape = [768, 768, 1, 1] + dtype = "float32" + min_val = float("-0.00345829") + max_val = float("0.00512512") + mean = float("-2.05958e-05") + std = float("0.000302015") + data = None + + +class Program_weight_tensor_parameter_45: + name = "parameter_45" + shape = [1] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_46: + name = "parameter_46" + shape = [1, 768, 3, 3] + dtype = "float32" + min_val = float("-0.0286119") + max_val = float("0.0237047") + mean = float("0.000189832") + std = float("0.00263222") + data = None + + +class Program_weight_tensor_parameter_47: + name = "parameter_47" + shape = [768] + dtype = "float32" + min_val = float("-0.270176") + max_val = float("0.251642") + mean = float("0.00964705") + std = float("0.0522824") + data = None + + +class Program_weight_tensor_parameter_48: + name = "parameter_48" + shape = [768] + dtype = "float32" + min_val = float("0.914096") + max_val = float("1.28203") + mean = float("1.03123") + std = float("0.0519999") + data = None + + +class Program_weight_tensor_parameter_49: + name = "parameter_49" + shape = [768] + dtype = "float32" + min_val = float("6.03729e-05") + max_val = float("0.0203602") + mean = float("0.00171387") + std = float("0.00183349") + data = None + + +class Program_weight_tensor_parameter_50: + name = "parameter_50" + shape = [768] + dtype = "float32" + min_val = float("-0.0962161") + max_val = float("0.0281046") + mean = float("-0.0255357") + std = float("0.0185518") + data = None + + +class Program_weight_tensor_parameter_51: + name = "parameter_51" + shape = [768, 768, 1, 1] + dtype = "float32" + min_val = float("-0.0502056") + max_val = float("0.0287164") + mean = float("-0.000295195") + std = float("0.00161217") + data = None + + +class Program_weight_tensor_parameter_52: + name = "parameter_52" + shape = [768] + dtype = "float32" + min_val = float("-0.0130647") + max_val = float("0.00720863") + mean = float("-3.75881e-05") + std = float("0.000847582") + data = None + + +class Program_weight_tensor_parameter_53: + name = "parameter_53" + shape = [768, 768, 1, 1] + dtype = "float32" + min_val = float("-0.0263655") + max_val = float("0.050775") + mean = float("7.8706e-06") + std = float("0.000367284") + data = None diff --git a/paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_5/graph_hash.txt b/paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_5/graph_hash.txt new file mode 100644 index 000000000..887348c41 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_5/graph_hash.txt @@ -0,0 +1 @@ +1e402c4f0d4d39dfe54d4f81fddd4df61c186704bf895e847864ff205620eab8 \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_5/graph_net.json b/paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_5/graph_net.json new file mode 100644 index 000000000..cf4d2108b --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_5/graph_net.json @@ -0,0 +1,6 @@ +{ + "framework": "paddle", + "model_name": "PP-YOLOE-L_human", + "num_devices_required": 1, + "num_nodes_required": 1 +} \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_5/input_meta.py b/paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_5/input_meta.py new file mode 100644 index 000000000..0efbf63c7 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_5/input_meta.py @@ -0,0 +1,9 @@ +class Program_weight_tensor_data_0: + name = "data_0" + shape = [2, 3, 640, 640] + dtype = "float32" + min_val = float("-2.1179") + max_val = float("2.64") + mean = float("0.558125") + std = float("1.3994") + data = None diff --git a/paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_5/model.py b/paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_5/model.py new file mode 100644 index 000000000..450141608 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_5/model.py @@ -0,0 +1,7164 @@ +import paddle + + +class GraphModule(paddle.nn.Layer): + def __init__(self): + super().__init__() + + def forward( + self, + parameter_0, + parameter_1, + parameter_2, + parameter_3, + parameter_4, + parameter_5, + parameter_6, + parameter_7, + parameter_8, + parameter_9, + parameter_10, + parameter_11, + parameter_12, + parameter_13, + parameter_14, + parameter_15, + parameter_16, + parameter_17, + parameter_18, + parameter_19, + parameter_20, + parameter_21, + parameter_22, + parameter_23, + parameter_24, + parameter_25, + parameter_26, + parameter_27, + parameter_28, + parameter_29, + parameter_30, + parameter_31, + parameter_32, + parameter_33, + parameter_34, + parameter_35, + parameter_36, + parameter_37, + parameter_38, + parameter_39, + parameter_40, + parameter_41, + parameter_42, + parameter_43, + parameter_44, + parameter_45, + parameter_46, + parameter_47, + parameter_48, + parameter_49, + parameter_50, + parameter_51, + parameter_52, + parameter_53, + parameter_54, + parameter_55, + parameter_56, + parameter_57, + parameter_58, + parameter_59, + parameter_60, + parameter_61, + parameter_62, + parameter_63, + parameter_64, + parameter_65, + parameter_66, + parameter_67, + parameter_68, + parameter_69, + parameter_70, + parameter_71, + parameter_72, + parameter_73, + parameter_74, + parameter_75, + parameter_76, + parameter_77, + parameter_78, + parameter_79, + parameter_80, + parameter_81, + parameter_82, + parameter_83, + parameter_84, + parameter_85, + parameter_86, + parameter_87, + parameter_88, + parameter_89, + parameter_90, + parameter_91, + parameter_92, + parameter_93, + parameter_94, + parameter_95, + parameter_96, + parameter_97, + parameter_98, + parameter_99, + parameter_100, + parameter_101, + parameter_102, + parameter_103, + parameter_104, + parameter_105, + parameter_106, + parameter_107, + parameter_108, + parameter_109, + parameter_110, + parameter_111, + parameter_112, + parameter_113, + parameter_114, + parameter_115, + parameter_116, + parameter_117, + parameter_118, + parameter_119, + parameter_120, + parameter_121, + parameter_122, + parameter_123, + parameter_124, + parameter_125, + parameter_126, + parameter_127, + parameter_128, + parameter_129, + parameter_130, + parameter_131, + parameter_132, + parameter_133, + parameter_134, + parameter_135, + parameter_136, + parameter_137, + parameter_138, + parameter_139, + parameter_140, + parameter_141, + parameter_142, + parameter_143, + parameter_144, + parameter_145, + parameter_146, + parameter_147, + parameter_148, + parameter_149, + parameter_150, + parameter_151, + parameter_152, + parameter_153, + parameter_154, + parameter_155, + parameter_156, + parameter_157, + parameter_158, + parameter_159, + parameter_160, + parameter_161, + parameter_162, + parameter_163, + parameter_164, + parameter_165, + parameter_166, + parameter_167, + parameter_168, + parameter_169, + parameter_170, + parameter_171, + parameter_172, + parameter_173, + parameter_174, + parameter_175, + parameter_176, + parameter_177, + parameter_178, + parameter_179, + parameter_180, + parameter_181, + parameter_182, + parameter_183, + parameter_184, + parameter_185, + parameter_186, + parameter_187, + parameter_188, + parameter_189, + parameter_190, + parameter_191, + parameter_192, + parameter_193, + parameter_194, + parameter_195, + parameter_196, + parameter_197, + parameter_198, + parameter_199, + parameter_200, + parameter_201, + parameter_202, + parameter_203, + parameter_204, + parameter_205, + parameter_206, + parameter_207, + parameter_208, + parameter_209, + parameter_210, + parameter_211, + parameter_212, + parameter_213, + parameter_214, + parameter_215, + parameter_216, + parameter_217, + parameter_218, + parameter_219, + parameter_220, + parameter_221, + parameter_222, + parameter_223, + parameter_224, + parameter_225, + parameter_226, + parameter_227, + parameter_228, + parameter_229, + parameter_230, + parameter_231, + parameter_232, + parameter_233, + parameter_234, + parameter_235, + parameter_236, + parameter_237, + parameter_238, + parameter_239, + parameter_240, + parameter_241, + parameter_242, + parameter_243, + parameter_244, + parameter_245, + parameter_246, + parameter_247, + parameter_248, + parameter_249, + parameter_250, + parameter_251, + parameter_252, + parameter_253, + parameter_254, + parameter_255, + parameter_256, + parameter_257, + parameter_258, + parameter_259, + parameter_260, + parameter_261, + parameter_262, + parameter_263, + parameter_264, + parameter_265, + parameter_266, + parameter_267, + parameter_268, + parameter_269, + parameter_270, + parameter_271, + parameter_272, + parameter_273, + parameter_274, + parameter_275, + parameter_276, + parameter_277, + parameter_278, + parameter_279, + parameter_280, + parameter_281, + parameter_282, + parameter_283, + parameter_284, + parameter_285, + parameter_286, + parameter_287, + parameter_288, + parameter_289, + parameter_290, + parameter_291, + parameter_292, + parameter_293, + parameter_294, + parameter_295, + parameter_296, + parameter_297, + parameter_298, + parameter_299, + parameter_300, + parameter_301, + parameter_302, + parameter_303, + parameter_304, + parameter_305, + parameter_306, + parameter_307, + parameter_308, + parameter_309, + parameter_310, + parameter_311, + parameter_312, + parameter_313, + parameter_314, + parameter_315, + parameter_316, + parameter_317, + parameter_318, + parameter_319, + parameter_320, + parameter_321, + parameter_322, + parameter_323, + parameter_324, + parameter_325, + parameter_326, + parameter_327, + parameter_328, + parameter_329, + parameter_330, + parameter_331, + parameter_332, + parameter_333, + parameter_334, + parameter_335, + parameter_336, + parameter_337, + parameter_338, + parameter_339, + parameter_340, + parameter_341, + parameter_342, + parameter_343, + parameter_344, + parameter_345, + parameter_346, + parameter_347, + parameter_348, + parameter_349, + parameter_350, + parameter_351, + parameter_352, + parameter_353, + parameter_354, + parameter_355, + parameter_356, + parameter_357, + parameter_358, + parameter_359, + parameter_360, + parameter_361, + parameter_362, + parameter_363, + parameter_364, + parameter_365, + parameter_366, + parameter_367, + parameter_368, + parameter_369, + parameter_370, + parameter_371, + parameter_372, + parameter_373, + parameter_374, + parameter_375, + parameter_376, + parameter_377, + parameter_378, + parameter_379, + parameter_380, + parameter_381, + parameter_382, + parameter_383, + parameter_384, + parameter_385, + parameter_386, + parameter_387, + parameter_388, + parameter_389, + parameter_390, + parameter_391, + parameter_392, + parameter_393, + parameter_394, + parameter_395, + parameter_396, + parameter_397, + parameter_398, + parameter_399, + parameter_400, + parameter_401, + parameter_402, + parameter_403, + parameter_404, + parameter_405, + parameter_406, + parameter_407, + parameter_408, + parameter_409, + parameter_410, + parameter_411, + parameter_412, + parameter_413, + parameter_414, + parameter_415, + parameter_416, + parameter_417, + parameter_418, + parameter_419, + parameter_420, + parameter_421, + parameter_422, + parameter_423, + parameter_424, + parameter_425, + parameter_426, + parameter_427, + parameter_428, + parameter_429, + parameter_430, + parameter_431, + parameter_432, + parameter_433, + parameter_434, + parameter_435, + parameter_436, + parameter_437, + parameter_438, + parameter_439, + parameter_440, + parameter_441, + parameter_442, + parameter_443, + parameter_444, + parameter_445, + parameter_446, + parameter_447, + parameter_448, + parameter_449, + parameter_450, + parameter_451, + parameter_452, + parameter_453, + parameter_454, + parameter_455, + parameter_456, + parameter_457, + parameter_458, + parameter_459, + parameter_460, + parameter_461, + parameter_462, + parameter_463, + parameter_464, + parameter_465, + parameter_466, + parameter_467, + parameter_468, + parameter_469, + parameter_470, + parameter_471, + parameter_472, + parameter_473, + parameter_474, + parameter_475, + parameter_476, + parameter_477, + parameter_478, + parameter_479, + parameter_480, + parameter_481, + parameter_482, + parameter_483, + parameter_484, + parameter_485, + parameter_486, + parameter_487, + parameter_488, + parameter_489, + parameter_490, + parameter_491, + parameter_492, + parameter_493, + parameter_494, + parameter_495, + parameter_496, + parameter_497, + parameter_498, + parameter_499, + parameter_500, + parameter_501, + parameter_502, + parameter_503, + parameter_504, + parameter_505, + parameter_506, + parameter_507, + parameter_508, + parameter_509, + parameter_510, + parameter_511, + parameter_512, + parameter_513, + parameter_514, + parameter_515, + parameter_516, + parameter_517, + parameter_518, + parameter_519, + parameter_520, + parameter_521, + parameter_522, + parameter_523, + parameter_524, + parameter_525, + parameter_526, + parameter_527, + parameter_528, + parameter_529, + parameter_530, + parameter_531, + parameter_532, + parameter_533, + parameter_534, + parameter_535, + parameter_536, + parameter_537, + parameter_538, + parameter_539, + parameter_540, + parameter_541, + parameter_542, + parameter_543, + parameter_544, + parameter_545, + parameter_546, + parameter_547, + parameter_548, + parameter_549, + parameter_550, + parameter_551, + parameter_552, + parameter_553, + parameter_554, + parameter_555, + parameter_556, + parameter_557, + parameter_558, + parameter_559, + parameter_560, + parameter_561, + parameter_562, + parameter_563, + parameter_564, + parameter_565, + parameter_566, + parameter_567, + parameter_568, + parameter_569, + parameter_570, + parameter_571, + parameter_572, + parameter_573, + parameter_574, + parameter_575, + parameter_576, + parameter_577, + parameter_578, + parameter_579, + parameter_580, + parameter_581, + parameter_582, + parameter_583, + parameter_584, + parameter_585, + parameter_586, + parameter_587, + parameter_588, + parameter_589, + parameter_590, + parameter_591, + parameter_592, + parameter_593, + parameter_594, + parameter_595, + parameter_596, + parameter_597, + parameter_598, + parameter_599, + parameter_600, + parameter_601, + parameter_602, + parameter_603, + parameter_604, + parameter_605, + parameter_606, + parameter_607, + parameter_608, + parameter_609, + parameter_610, + parameter_611, + parameter_612, + parameter_613, + parameter_614, + parameter_615, + parameter_616, + parameter_617, + parameter_618, + parameter_619, + parameter_620, + parameter_621, + parameter_622, + parameter_623, + parameter_624, + parameter_625, + parameter_626, + parameter_627, + parameter_628, + parameter_629, + parameter_630, + parameter_631, + parameter_632, + parameter_633, + parameter_634, + parameter_635, + parameter_636, + parameter_637, + parameter_638, + parameter_639, + parameter_640, + parameter_641, + parameter_642, + parameter_643, + parameter_644, + parameter_645, + parameter_646, + parameter_647, + parameter_648, + parameter_649, + parameter_650, + parameter_651, + parameter_652, + parameter_653, + parameter_654, + parameter_655, + parameter_656, + parameter_657, + parameter_658, + parameter_659, + parameter_660, + parameter_661, + parameter_662, + parameter_663, + parameter_664, + parameter_665, + parameter_666, + parameter_667, + parameter_668, + parameter_669, + parameter_670, + parameter_671, + parameter_672, + parameter_673, + parameter_674, + parameter_675, + parameter_676, + parameter_677, + parameter_678, + parameter_679, + parameter_680, + parameter_681, + parameter_682, + parameter_683, + parameter_684, + parameter_685, + parameter_686, + parameter_687, + parameter_688, + parameter_689, + parameter_690, + parameter_691, + parameter_692, + parameter_693, + parameter_694, + parameter_695, + parameter_696, + parameter_697, + parameter_698, + parameter_699, + parameter_700, + parameter_701, + parameter_702, + parameter_703, + parameter_704, + parameter_705, + parameter_706, + parameter_707, + parameter_708, + parameter_709, + parameter_710, + parameter_711, + parameter_712, + parameter_713, + parameter_714, + parameter_715, + parameter_716, + parameter_717, + parameter_718, + parameter_719, + parameter_720, + parameter_721, + parameter_722, + parameter_723, + parameter_724, + parameter_725, + parameter_726, + parameter_727, + parameter_728, + parameter_729, + parameter_730, + parameter_731, + parameter_732, + parameter_733, + parameter_734, + parameter_735, + parameter_736, + parameter_737, + parameter_738, + parameter_739, + parameter_740, + parameter_741, + parameter_742, + parameter_743, + parameter_744, + parameter_745, + parameter_746, + parameter_747, + parameter_748, + parameter_749, + parameter_750, + parameter_751, + parameter_752, + data_0, + ): + # pd_op.conv2d: (2x32x-1x-1xf32) <- (2x3x-1x-1xf32, 32x3x3x3xf32) + conv2d_0 = paddle._C_ops.conv2d( + data_0, parameter_752, [2, 2], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del data_0, parameter_752 + + # pd_op.batch_norm_: (2x32x-1x-1xf32, 32xf32, 32xf32, 32xf32, 32xf32, -1xui8) <- (2x32x-1x-1xf32, 32xf32, 32xf32, 32xf32, 32xf32) + ( + batch_norm__0, + batch_norm__1, + batch_norm__2, + batch_norm__3, + batch_norm__4, + batch_norm__5, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_0, + parameter_751, + parameter_750, + parameter_749, + parameter_748, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_0, parameter_748, parameter_749, parameter_750, parameter_751 + + # pd_op.swish: (2x32x-1x-1xf32) <- (2x32x-1x-1xf32) + swish_0 = paddle._C_ops.swish(batch_norm__0) + del batch_norm__0 + + # pd_op.conv2d: (2x32x-1x-1xf32) <- (2x32x-1x-1xf32, 32x32x3x3xf32) + conv2d_1 = paddle._C_ops.conv2d( + swish_0, parameter_747, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_747, swish_0 + + # pd_op.batch_norm_: (2x32x-1x-1xf32, 32xf32, 32xf32, 32xf32, 32xf32, -1xui8) <- (2x32x-1x-1xf32, 32xf32, 32xf32, 32xf32, 32xf32) + ( + batch_norm__6, + batch_norm__7, + batch_norm__8, + batch_norm__9, + batch_norm__10, + batch_norm__11, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_1, + parameter_746, + parameter_745, + parameter_744, + parameter_743, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_1, parameter_743, parameter_744, parameter_745, parameter_746 + + # pd_op.swish: (2x32x-1x-1xf32) <- (2x32x-1x-1xf32) + swish_1 = paddle._C_ops.swish(batch_norm__6) + del batch_norm__6 + + # pd_op.conv2d: (2x64x-1x-1xf32) <- (2x32x-1x-1xf32, 64x32x3x3xf32) + conv2d_2 = paddle._C_ops.conv2d( + swish_1, parameter_742, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_742, swish_1 + + # pd_op.batch_norm_: (2x64x-1x-1xf32, 64xf32, 64xf32, 64xf32, 64xf32, -1xui8) <- (2x64x-1x-1xf32, 64xf32, 64xf32, 64xf32, 64xf32) + ( + batch_norm__12, + batch_norm__13, + batch_norm__14, + batch_norm__15, + batch_norm__16, + batch_norm__17, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_2, + parameter_741, + parameter_740, + parameter_739, + parameter_738, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_2, parameter_738, parameter_739, parameter_740, parameter_741 + + # pd_op.swish: (2x64x-1x-1xf32) <- (2x64x-1x-1xf32) + swish_2 = paddle._C_ops.swish(batch_norm__12) + del batch_norm__12 + + # pd_op.conv2d: (2x96x-1x-1xf32) <- (2x64x-1x-1xf32, 96x64x3x3xf32) + conv2d_3 = paddle._C_ops.conv2d( + swish_2, parameter_737, [2, 2], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_737, swish_2 + + # pd_op.batch_norm_: (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__18, + batch_norm__19, + batch_norm__20, + batch_norm__21, + batch_norm__22, + batch_norm__23, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_3, + parameter_736, + parameter_735, + parameter_734, + parameter_733, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_3, parameter_733, parameter_734, parameter_735, parameter_736 + + # pd_op.swish: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32) + swish_3 = paddle._C_ops.swish(batch_norm__18) + del batch_norm__18 + + # pd_op.conv2d: (2x48x-1x-1xf32) <- (2x96x-1x-1xf32, 48x96x1x1xf32) + conv2d_4 = paddle._C_ops.conv2d( + swish_3, parameter_732, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_732 + + # pd_op.batch_norm_: (2x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32, -1xui8) <- (2x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32) + ( + batch_norm__24, + batch_norm__25, + batch_norm__26, + batch_norm__27, + batch_norm__28, + batch_norm__29, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_4, + parameter_731, + parameter_730, + parameter_729, + parameter_728, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_4, parameter_728, parameter_729, parameter_730, parameter_731 + + # pd_op.swish: (2x48x-1x-1xf32) <- (2x48x-1x-1xf32) + swish_4 = paddle._C_ops.swish(batch_norm__24) + del batch_norm__24 + + # pd_op.conv2d: (2x48x-1x-1xf32) <- (2x96x-1x-1xf32, 48x96x1x1xf32) + conv2d_5 = paddle._C_ops.conv2d( + swish_3, parameter_727, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_727, swish_3 + + # pd_op.batch_norm_: (2x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32, -1xui8) <- (2x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32) + ( + batch_norm__30, + batch_norm__31, + batch_norm__32, + batch_norm__33, + batch_norm__34, + batch_norm__35, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_5, + parameter_726, + parameter_725, + parameter_724, + parameter_723, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_5, parameter_723, parameter_724, parameter_725, parameter_726 + + # pd_op.swish: (2x48x-1x-1xf32) <- (2x48x-1x-1xf32) + swish_5 = paddle._C_ops.swish(batch_norm__30) + del batch_norm__30 + + # pd_op.conv2d: (2x48x-1x-1xf32) <- (2x48x-1x-1xf32, 48x48x3x3xf32) + conv2d_6 = paddle._C_ops.conv2d( + swish_5, parameter_722, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_722 + + # pd_op.batch_norm_: (2x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32, -1xui8) <- (2x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32) + ( + batch_norm__36, + batch_norm__37, + batch_norm__38, + batch_norm__39, + batch_norm__40, + batch_norm__41, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_6, + parameter_721, + parameter_720, + parameter_719, + parameter_718, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_6, parameter_718, parameter_719, parameter_720, parameter_721 + + # pd_op.swish: (2x48x-1x-1xf32) <- (2x48x-1x-1xf32) + swish_6 = paddle._C_ops.swish(batch_norm__36) + del batch_norm__36 + + # pd_op.conv2d: (2x48x-1x-1xf32) <- (2x48x-1x-1xf32, 48x48x3x3xf32) + conv2d_7 = paddle._C_ops.conv2d( + swish_6, parameter_717, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_717 + + # pd_op.batch_norm_: (2x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32, -1xui8) <- (2x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32) + ( + batch_norm__42, + batch_norm__43, + batch_norm__44, + batch_norm__45, + batch_norm__46, + batch_norm__47, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_7, + parameter_716, + parameter_715, + parameter_714, + parameter_713, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_7, parameter_713, parameter_714, parameter_715, parameter_716 + + # pd_op.conv2d: (2x48x-1x-1xf32) <- (2x48x-1x-1xf32, 48x48x1x1xf32) + conv2d_8 = paddle._C_ops.conv2d( + swish_6, parameter_712, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_712, swish_6 + + # pd_op.batch_norm_: (2x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32, -1xui8) <- (2x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32) + ( + batch_norm__48, + batch_norm__49, + batch_norm__50, + batch_norm__51, + batch_norm__52, + batch_norm__53, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_8, + parameter_711, + parameter_710, + parameter_709, + parameter_708, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_8, parameter_708, parameter_709, parameter_710, parameter_711 + + # pd_op.add: (2x48x-1x-1xf32) <- (2x48x-1x-1xf32, 2x48x-1x-1xf32) + add_0 = paddle._C_ops.add(batch_norm__42, batch_norm__48) + del batch_norm__42, batch_norm__48 + + # pd_op.swish: (2x48x-1x-1xf32) <- (2x48x-1x-1xf32) + swish_7 = paddle._C_ops.swish(add_0) + del add_0 + + # pd_op.add: (2x48x-1x-1xf32) <- (2x48x-1x-1xf32, 2x48x-1x-1xf32) + add_1 = paddle._C_ops.add(swish_5, swish_7) + del swish_5, swish_7 + + # pd_op.conv2d: (2x48x-1x-1xf32) <- (2x48x-1x-1xf32, 48x48x3x3xf32) + conv2d_9 = paddle._C_ops.conv2d( + add_1, parameter_707, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_707 + + # pd_op.batch_norm_: (2x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32, -1xui8) <- (2x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32) + ( + batch_norm__54, + batch_norm__55, + batch_norm__56, + batch_norm__57, + batch_norm__58, + batch_norm__59, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_9, + parameter_706, + parameter_705, + parameter_704, + parameter_703, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_9, parameter_703, parameter_704, parameter_705, parameter_706 + + # pd_op.swish: (2x48x-1x-1xf32) <- (2x48x-1x-1xf32) + swish_8 = paddle._C_ops.swish(batch_norm__54) + del batch_norm__54 + + # pd_op.conv2d: (2x48x-1x-1xf32) <- (2x48x-1x-1xf32, 48x48x3x3xf32) + conv2d_10 = paddle._C_ops.conv2d( + swish_8, parameter_702, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_702 + + # pd_op.batch_norm_: (2x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32, -1xui8) <- (2x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32) + ( + batch_norm__60, + batch_norm__61, + batch_norm__62, + batch_norm__63, + batch_norm__64, + batch_norm__65, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_10, + parameter_701, + parameter_700, + parameter_699, + parameter_698, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_10, parameter_698, parameter_699, parameter_700, parameter_701 + + # pd_op.conv2d: (2x48x-1x-1xf32) <- (2x48x-1x-1xf32, 48x48x1x1xf32) + conv2d_11 = paddle._C_ops.conv2d( + swish_8, parameter_697, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_697, swish_8 + + # pd_op.batch_norm_: (2x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32, -1xui8) <- (2x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32) + ( + batch_norm__66, + batch_norm__67, + batch_norm__68, + batch_norm__69, + batch_norm__70, + batch_norm__71, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_11, + parameter_696, + parameter_695, + parameter_694, + parameter_693, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_11, parameter_693, parameter_694, parameter_695, parameter_696 + + # pd_op.add: (2x48x-1x-1xf32) <- (2x48x-1x-1xf32, 2x48x-1x-1xf32) + add_2 = paddle._C_ops.add(batch_norm__60, batch_norm__66) + del batch_norm__60, batch_norm__66 + + # pd_op.swish: (2x48x-1x-1xf32) <- (2x48x-1x-1xf32) + swish_9 = paddle._C_ops.swish(add_2) + del add_2 + + # pd_op.add: (2x48x-1x-1xf32) <- (2x48x-1x-1xf32, 2x48x-1x-1xf32) + add_3 = paddle._C_ops.add(add_1, swish_9) + del add_1, swish_9 + + # pd_op.conv2d: (2x48x-1x-1xf32) <- (2x48x-1x-1xf32, 48x48x3x3xf32) + conv2d_12 = paddle._C_ops.conv2d( + add_3, parameter_692, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_692 + + # pd_op.batch_norm_: (2x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32, -1xui8) <- (2x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32) + ( + batch_norm__72, + batch_norm__73, + batch_norm__74, + batch_norm__75, + batch_norm__76, + batch_norm__77, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_12, + parameter_691, + parameter_690, + parameter_689, + parameter_688, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_12, parameter_688, parameter_689, parameter_690, parameter_691 + + # pd_op.swish: (2x48x-1x-1xf32) <- (2x48x-1x-1xf32) + swish_10 = paddle._C_ops.swish(batch_norm__72) + del batch_norm__72 + + # pd_op.conv2d: (2x48x-1x-1xf32) <- (2x48x-1x-1xf32, 48x48x3x3xf32) + conv2d_13 = paddle._C_ops.conv2d( + swish_10, parameter_687, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_687 + + # pd_op.batch_norm_: (2x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32, -1xui8) <- (2x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32) + ( + batch_norm__78, + batch_norm__79, + batch_norm__80, + batch_norm__81, + batch_norm__82, + batch_norm__83, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_13, + parameter_686, + parameter_685, + parameter_684, + parameter_683, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_13, parameter_683, parameter_684, parameter_685, parameter_686 + + # pd_op.conv2d: (2x48x-1x-1xf32) <- (2x48x-1x-1xf32, 48x48x1x1xf32) + conv2d_14 = paddle._C_ops.conv2d( + swish_10, parameter_682, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_682, swish_10 + + # pd_op.batch_norm_: (2x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32, -1xui8) <- (2x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32) + ( + batch_norm__84, + batch_norm__85, + batch_norm__86, + batch_norm__87, + batch_norm__88, + batch_norm__89, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_14, + parameter_681, + parameter_680, + parameter_679, + parameter_678, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_14, parameter_678, parameter_679, parameter_680, parameter_681 + + # pd_op.add: (2x48x-1x-1xf32) <- (2x48x-1x-1xf32, 2x48x-1x-1xf32) + add_4 = paddle._C_ops.add(batch_norm__78, batch_norm__84) + del batch_norm__78, batch_norm__84 + + # pd_op.swish: (2x48x-1x-1xf32) <- (2x48x-1x-1xf32) + swish_11 = paddle._C_ops.swish(add_4) + del add_4 + + # pd_op.add: (2x48x-1x-1xf32) <- (2x48x-1x-1xf32, 2x48x-1x-1xf32) + add_5 = paddle._C_ops.add(add_3, swish_11) + del add_3, swish_11 + + # pd_op.full: (1xi32) <- () + full_0 = paddle._C_ops.full( + [1], float("1"), paddle.int32, paddle.core.CPUPlace() + ) + + # builtin.combine: ([2x48x-1x-1xf32, 2x48x-1x-1xf32]) <- (2x48x-1x-1xf32, 2x48x-1x-1xf32) + combine_0 = [swish_4, add_5] + del add_5, swish_4 + + # pd_op.concat: (2x96x-1x-1xf32) <- ([2x48x-1x-1xf32, 2x48x-1x-1xf32], 1xi32) + concat_2 = paddle._C_ops.concat(combine_0, full_0) + del combine_0 + + # pd_op.full_int_array: (2xi64) <- () + full_int_array_0 = [2, 3] + + # pd_op.mean: (2x96x1x1xf32) <- (2x96x-1x-1xf32, 2xi64) + mean_0 = paddle._C_ops.mean(concat_2, full_int_array_0, True) + + # pd_op.conv2d: (2x96x1x1xf32) <- (2x96x1x1xf32, 96x96x1x1xf32) + conv2d_15 = paddle._C_ops.conv2d( + mean_0, parameter_677, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del mean_0, parameter_677 + + # pd_op.full_int_array: (4xi64) <- () + full_int_array_1 = [1, -1, 1, 1] + + # pd_op.reshape: (1x96x1x1xf32) <- (96xf32, 4xi64) + reshape_0 = paddle._C_ops.reshape(parameter_676, full_int_array_1) + del parameter_676 + + # pd_op.add: (2x96x1x1xf32) <- (2x96x1x1xf32, 1x96x1x1xf32) + add_6 = paddle._C_ops.add(conv2d_15, reshape_0) + del conv2d_15, reshape_0 + + # pd_op.hardsigmoid: (2x96x1x1xf32) <- (2x96x1x1xf32) + hardsigmoid_0 = paddle._C_ops.hardsigmoid( + add_6, float("0.166667"), float("0.5") + ) + del add_6 + + # pd_op.multiply: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, 2x96x1x1xf32) + multiply_0 = paddle._C_ops.multiply(concat_2, hardsigmoid_0) + del concat_2, hardsigmoid_0 + + # pd_op.conv2d: (2x128x-1x-1xf32) <- (2x96x-1x-1xf32, 128x96x1x1xf32) + conv2d_16 = paddle._C_ops.conv2d( + multiply_0, parameter_675, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del multiply_0, parameter_675 + + # pd_op.batch_norm_: (2x128x-1x-1xf32, 128xf32, 128xf32, 128xf32, 128xf32, -1xui8) <- (2x128x-1x-1xf32, 128xf32, 128xf32, 128xf32, 128xf32) + ( + batch_norm__90, + batch_norm__91, + batch_norm__92, + batch_norm__93, + batch_norm__94, + batch_norm__95, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_16, + parameter_674, + parameter_673, + parameter_672, + parameter_671, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_16, parameter_671, parameter_672, parameter_673, parameter_674 + + # pd_op.swish: (2x128x-1x-1xf32) <- (2x128x-1x-1xf32) + swish_12 = paddle._C_ops.swish(batch_norm__90) + del batch_norm__90 + + # pd_op.conv2d: (2x192x-1x-1xf32) <- (2x128x-1x-1xf32, 192x128x3x3xf32) + conv2d_17 = paddle._C_ops.conv2d( + swish_12, parameter_670, [2, 2], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_670, swish_12 + + # pd_op.batch_norm_: (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__96, + batch_norm__97, + batch_norm__98, + batch_norm__99, + batch_norm__100, + batch_norm__101, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_17, + parameter_669, + parameter_668, + parameter_667, + parameter_666, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_17, parameter_666, parameter_667, parameter_668, parameter_669 + + # pd_op.swish: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32) + swish_13 = paddle._C_ops.swish(batch_norm__96) + del batch_norm__96 + + # pd_op.conv2d: (2x96x-1x-1xf32) <- (2x192x-1x-1xf32, 96x192x1x1xf32) + conv2d_18 = paddle._C_ops.conv2d( + swish_13, parameter_665, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_665 + + # pd_op.batch_norm_: (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__102, + batch_norm__103, + batch_norm__104, + batch_norm__105, + batch_norm__106, + batch_norm__107, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_18, + parameter_664, + parameter_663, + parameter_662, + parameter_661, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_18, parameter_661, parameter_662, parameter_663, parameter_664 + + # pd_op.swish: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32) + swish_14 = paddle._C_ops.swish(batch_norm__102) + del batch_norm__102 + + # pd_op.conv2d: (2x96x-1x-1xf32) <- (2x192x-1x-1xf32, 96x192x1x1xf32) + conv2d_19 = paddle._C_ops.conv2d( + swish_13, parameter_660, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_660, swish_13 + + # pd_op.batch_norm_: (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__108, + batch_norm__109, + batch_norm__110, + batch_norm__111, + batch_norm__112, + batch_norm__113, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_19, + parameter_659, + parameter_658, + parameter_657, + parameter_656, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_19, parameter_656, parameter_657, parameter_658, parameter_659 + + # pd_op.swish: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32) + swish_15 = paddle._C_ops.swish(batch_norm__108) + del batch_norm__108 + + # pd_op.conv2d: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, 96x96x3x3xf32) + conv2d_20 = paddle._C_ops.conv2d( + swish_15, parameter_655, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_655 + + # pd_op.batch_norm_: (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__114, + batch_norm__115, + batch_norm__116, + batch_norm__117, + batch_norm__118, + batch_norm__119, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_20, + parameter_654, + parameter_653, + parameter_652, + parameter_651, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_20, parameter_651, parameter_652, parameter_653, parameter_654 + + # pd_op.swish: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32) + swish_16 = paddle._C_ops.swish(batch_norm__114) + del batch_norm__114 + + # pd_op.conv2d: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, 96x96x3x3xf32) + conv2d_21 = paddle._C_ops.conv2d( + swish_16, parameter_650, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_650 + + # pd_op.batch_norm_: (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__120, + batch_norm__121, + batch_norm__122, + batch_norm__123, + batch_norm__124, + batch_norm__125, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_21, + parameter_649, + parameter_648, + parameter_647, + parameter_646, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_21, parameter_646, parameter_647, parameter_648, parameter_649 + + # pd_op.conv2d: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, 96x96x1x1xf32) + conv2d_22 = paddle._C_ops.conv2d( + swish_16, parameter_645, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_645, swish_16 + + # pd_op.batch_norm_: (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__126, + batch_norm__127, + batch_norm__128, + batch_norm__129, + batch_norm__130, + batch_norm__131, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_22, + parameter_644, + parameter_643, + parameter_642, + parameter_641, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_22, parameter_641, parameter_642, parameter_643, parameter_644 + + # pd_op.add: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, 2x96x-1x-1xf32) + add_7 = paddle._C_ops.add(batch_norm__120, batch_norm__126) + del batch_norm__120, batch_norm__126 + + # pd_op.swish: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32) + swish_17 = paddle._C_ops.swish(add_7) + del add_7 + + # pd_op.add: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, 2x96x-1x-1xf32) + add_8 = paddle._C_ops.add(swish_15, swish_17) + del swish_15, swish_17 + + # pd_op.conv2d: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, 96x96x3x3xf32) + conv2d_23 = paddle._C_ops.conv2d( + add_8, parameter_640, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_640 + + # pd_op.batch_norm_: (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__132, + batch_norm__133, + batch_norm__134, + batch_norm__135, + batch_norm__136, + batch_norm__137, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_23, + parameter_639, + parameter_638, + parameter_637, + parameter_636, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_23, parameter_636, parameter_637, parameter_638, parameter_639 + + # pd_op.swish: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32) + swish_18 = paddle._C_ops.swish(batch_norm__132) + del batch_norm__132 + + # pd_op.conv2d: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, 96x96x3x3xf32) + conv2d_24 = paddle._C_ops.conv2d( + swish_18, parameter_635, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_635 + + # pd_op.batch_norm_: (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__138, + batch_norm__139, + batch_norm__140, + batch_norm__141, + batch_norm__142, + batch_norm__143, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_24, + parameter_634, + parameter_633, + parameter_632, + parameter_631, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_24, parameter_631, parameter_632, parameter_633, parameter_634 + + # pd_op.conv2d: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, 96x96x1x1xf32) + conv2d_25 = paddle._C_ops.conv2d( + swish_18, parameter_630, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_630, swish_18 + + # pd_op.batch_norm_: (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__144, + batch_norm__145, + batch_norm__146, + batch_norm__147, + batch_norm__148, + batch_norm__149, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_25, + parameter_629, + parameter_628, + parameter_627, + parameter_626, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_25, parameter_626, parameter_627, parameter_628, parameter_629 + + # pd_op.add: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, 2x96x-1x-1xf32) + add_9 = paddle._C_ops.add(batch_norm__138, batch_norm__144) + del batch_norm__138, batch_norm__144 + + # pd_op.swish: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32) + swish_19 = paddle._C_ops.swish(add_9) + del add_9 + + # pd_op.add: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, 2x96x-1x-1xf32) + add_10 = paddle._C_ops.add(add_8, swish_19) + del add_8, swish_19 + + # pd_op.conv2d: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, 96x96x3x3xf32) + conv2d_26 = paddle._C_ops.conv2d( + add_10, parameter_625, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_625 + + # pd_op.batch_norm_: (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__150, + batch_norm__151, + batch_norm__152, + batch_norm__153, + batch_norm__154, + batch_norm__155, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_26, + parameter_624, + parameter_623, + parameter_622, + parameter_621, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_26, parameter_621, parameter_622, parameter_623, parameter_624 + + # pd_op.swish: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32) + swish_20 = paddle._C_ops.swish(batch_norm__150) + del batch_norm__150 + + # pd_op.conv2d: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, 96x96x3x3xf32) + conv2d_27 = paddle._C_ops.conv2d( + swish_20, parameter_620, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_620 + + # pd_op.batch_norm_: (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__156, + batch_norm__157, + batch_norm__158, + batch_norm__159, + batch_norm__160, + batch_norm__161, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_27, + parameter_619, + parameter_618, + parameter_617, + parameter_616, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_27, parameter_616, parameter_617, parameter_618, parameter_619 + + # pd_op.conv2d: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, 96x96x1x1xf32) + conv2d_28 = paddle._C_ops.conv2d( + swish_20, parameter_615, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_615, swish_20 + + # pd_op.batch_norm_: (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__162, + batch_norm__163, + batch_norm__164, + batch_norm__165, + batch_norm__166, + batch_norm__167, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_28, + parameter_614, + parameter_613, + parameter_612, + parameter_611, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_28, parameter_611, parameter_612, parameter_613, parameter_614 + + # pd_op.add: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, 2x96x-1x-1xf32) + add_11 = paddle._C_ops.add(batch_norm__156, batch_norm__162) + del batch_norm__156, batch_norm__162 + + # pd_op.swish: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32) + swish_21 = paddle._C_ops.swish(add_11) + del add_11 + + # pd_op.add: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, 2x96x-1x-1xf32) + add_12 = paddle._C_ops.add(add_10, swish_21) + del add_10, swish_21 + + # pd_op.conv2d: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, 96x96x3x3xf32) + conv2d_29 = paddle._C_ops.conv2d( + add_12, parameter_610, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_610 + + # pd_op.batch_norm_: (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__168, + batch_norm__169, + batch_norm__170, + batch_norm__171, + batch_norm__172, + batch_norm__173, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_29, + parameter_609, + parameter_608, + parameter_607, + parameter_606, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_29, parameter_606, parameter_607, parameter_608, parameter_609 + + # pd_op.swish: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32) + swish_22 = paddle._C_ops.swish(batch_norm__168) + del batch_norm__168 + + # pd_op.conv2d: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, 96x96x3x3xf32) + conv2d_30 = paddle._C_ops.conv2d( + swish_22, parameter_605, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_605 + + # pd_op.batch_norm_: (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__174, + batch_norm__175, + batch_norm__176, + batch_norm__177, + batch_norm__178, + batch_norm__179, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_30, + parameter_604, + parameter_603, + parameter_602, + parameter_601, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_30, parameter_601, parameter_602, parameter_603, parameter_604 + + # pd_op.conv2d: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, 96x96x1x1xf32) + conv2d_31 = paddle._C_ops.conv2d( + swish_22, parameter_600, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_600, swish_22 + + # pd_op.batch_norm_: (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__180, + batch_norm__181, + batch_norm__182, + batch_norm__183, + batch_norm__184, + batch_norm__185, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_31, + parameter_599, + parameter_598, + parameter_597, + parameter_596, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_31, parameter_596, parameter_597, parameter_598, parameter_599 + + # pd_op.add: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, 2x96x-1x-1xf32) + add_13 = paddle._C_ops.add(batch_norm__174, batch_norm__180) + del batch_norm__174, batch_norm__180 + + # pd_op.swish: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32) + swish_23 = paddle._C_ops.swish(add_13) + del add_13 + + # pd_op.add: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, 2x96x-1x-1xf32) + add_14 = paddle._C_ops.add(add_12, swish_23) + del add_12, swish_23 + + # pd_op.conv2d: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, 96x96x3x3xf32) + conv2d_32 = paddle._C_ops.conv2d( + add_14, parameter_595, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_595 + + # pd_op.batch_norm_: (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__186, + batch_norm__187, + batch_norm__188, + batch_norm__189, + batch_norm__190, + batch_norm__191, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_32, + parameter_594, + parameter_593, + parameter_592, + parameter_591, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_32, parameter_591, parameter_592, parameter_593, parameter_594 + + # pd_op.swish: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32) + swish_24 = paddle._C_ops.swish(batch_norm__186) + del batch_norm__186 + + # pd_op.conv2d: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, 96x96x3x3xf32) + conv2d_33 = paddle._C_ops.conv2d( + swish_24, parameter_590, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_590 + + # pd_op.batch_norm_: (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__192, + batch_norm__193, + batch_norm__194, + batch_norm__195, + batch_norm__196, + batch_norm__197, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_33, + parameter_589, + parameter_588, + parameter_587, + parameter_586, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_33, parameter_586, parameter_587, parameter_588, parameter_589 + + # pd_op.conv2d: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, 96x96x1x1xf32) + conv2d_34 = paddle._C_ops.conv2d( + swish_24, parameter_585, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_585, swish_24 + + # pd_op.batch_norm_: (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__198, + batch_norm__199, + batch_norm__200, + batch_norm__201, + batch_norm__202, + batch_norm__203, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_34, + parameter_584, + parameter_583, + parameter_582, + parameter_581, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_34, parameter_581, parameter_582, parameter_583, parameter_584 + + # pd_op.add: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, 2x96x-1x-1xf32) + add_15 = paddle._C_ops.add(batch_norm__192, batch_norm__198) + del batch_norm__192, batch_norm__198 + + # pd_op.swish: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32) + swish_25 = paddle._C_ops.swish(add_15) + del add_15 + + # pd_op.add: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, 2x96x-1x-1xf32) + add_16 = paddle._C_ops.add(add_14, swish_25) + del add_14, swish_25 + + # pd_op.conv2d: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, 96x96x3x3xf32) + conv2d_35 = paddle._C_ops.conv2d( + add_16, parameter_580, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_580 + + # pd_op.batch_norm_: (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__204, + batch_norm__205, + batch_norm__206, + batch_norm__207, + batch_norm__208, + batch_norm__209, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_35, + parameter_579, + parameter_578, + parameter_577, + parameter_576, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_35, parameter_576, parameter_577, parameter_578, parameter_579 + + # pd_op.swish: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32) + swish_26 = paddle._C_ops.swish(batch_norm__204) + del batch_norm__204 + + # pd_op.conv2d: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, 96x96x3x3xf32) + conv2d_36 = paddle._C_ops.conv2d( + swish_26, parameter_575, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_575 + + # pd_op.batch_norm_: (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__210, + batch_norm__211, + batch_norm__212, + batch_norm__213, + batch_norm__214, + batch_norm__215, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_36, + parameter_574, + parameter_573, + parameter_572, + parameter_571, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_36, parameter_571, parameter_572, parameter_573, parameter_574 + + # pd_op.conv2d: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, 96x96x1x1xf32) + conv2d_37 = paddle._C_ops.conv2d( + swish_26, parameter_570, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_570, swish_26 + + # pd_op.batch_norm_: (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__216, + batch_norm__217, + batch_norm__218, + batch_norm__219, + batch_norm__220, + batch_norm__221, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_37, + parameter_569, + parameter_568, + parameter_567, + parameter_566, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_37, parameter_566, parameter_567, parameter_568, parameter_569 + + # pd_op.add: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, 2x96x-1x-1xf32) + add_17 = paddle._C_ops.add(batch_norm__210, batch_norm__216) + del batch_norm__210, batch_norm__216 + + # pd_op.swish: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32) + swish_27 = paddle._C_ops.swish(add_17) + del add_17 + + # pd_op.add: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, 2x96x-1x-1xf32) + add_18 = paddle._C_ops.add(add_16, swish_27) + del add_16, swish_27 + + # builtin.combine: ([2x96x-1x-1xf32, 2x96x-1x-1xf32]) <- (2x96x-1x-1xf32, 2x96x-1x-1xf32) + combine_1 = [swish_14, add_18] + del add_18, swish_14 + + # pd_op.concat: (2x192x-1x-1xf32) <- ([2x96x-1x-1xf32, 2x96x-1x-1xf32], 1xi32) + concat_3 = paddle._C_ops.concat(combine_1, full_0) + del combine_1 + + # pd_op.mean: (2x192x1x1xf32) <- (2x192x-1x-1xf32, 2xi64) + mean_1 = paddle._C_ops.mean(concat_3, full_int_array_0, True) + + # pd_op.conv2d: (2x192x1x1xf32) <- (2x192x1x1xf32, 192x192x1x1xf32) + conv2d_38 = paddle._C_ops.conv2d( + mean_1, parameter_565, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del mean_1, parameter_565 + + # pd_op.reshape: (1x192x1x1xf32) <- (192xf32, 4xi64) + reshape_1 = paddle._C_ops.reshape(parameter_564, full_int_array_1) + del parameter_564 + + # pd_op.add: (2x192x1x1xf32) <- (2x192x1x1xf32, 1x192x1x1xf32) + add_19 = paddle._C_ops.add(conv2d_38, reshape_1) + del conv2d_38, reshape_1 + + # pd_op.hardsigmoid: (2x192x1x1xf32) <- (2x192x1x1xf32) + hardsigmoid_1 = paddle._C_ops.hardsigmoid( + add_19, float("0.166667"), float("0.5") + ) + del add_19 + + # pd_op.multiply: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 2x192x1x1xf32) + multiply_1 = paddle._C_ops.multiply(concat_3, hardsigmoid_1) + del concat_3, hardsigmoid_1 + + # pd_op.conv2d: (2x256x-1x-1xf32) <- (2x192x-1x-1xf32, 256x192x1x1xf32) + conv2d_39 = paddle._C_ops.conv2d( + multiply_1, parameter_563, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del multiply_1, parameter_563 + + # pd_op.batch_norm_: (2x256x-1x-1xf32, 256xf32, 256xf32, 256xf32, 256xf32, -1xui8) <- (2x256x-1x-1xf32, 256xf32, 256xf32, 256xf32, 256xf32) + ( + batch_norm__222, + batch_norm__223, + batch_norm__224, + batch_norm__225, + batch_norm__226, + batch_norm__227, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_39, + parameter_562, + parameter_561, + parameter_560, + parameter_559, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_39, parameter_559, parameter_560, parameter_561, parameter_562 + + # pd_op.swish: (2x256x-1x-1xf32) <- (2x256x-1x-1xf32) + swish_28 = paddle._C_ops.swish(batch_norm__222) + del batch_norm__222 + + # pd_op.conv2d: (2x384x-1x-1xf32) <- (2x256x-1x-1xf32, 384x256x3x3xf32) + conv2d_40 = paddle._C_ops.conv2d( + swish_28, parameter_558, [2, 2], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_558 + + # pd_op.batch_norm_: (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__228, + batch_norm__229, + batch_norm__230, + batch_norm__231, + batch_norm__232, + batch_norm__233, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_40, + parameter_557, + parameter_556, + parameter_555, + parameter_554, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_40, parameter_554, parameter_555, parameter_556, parameter_557 + + # pd_op.swish: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32) + swish_29 = paddle._C_ops.swish(batch_norm__228) + del batch_norm__228 + + # pd_op.conv2d: (2x192x-1x-1xf32) <- (2x384x-1x-1xf32, 192x384x1x1xf32) + conv2d_41 = paddle._C_ops.conv2d( + swish_29, parameter_553, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_553 + + # pd_op.batch_norm_: (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__234, + batch_norm__235, + batch_norm__236, + batch_norm__237, + batch_norm__238, + batch_norm__239, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_41, + parameter_552, + parameter_551, + parameter_550, + parameter_549, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_41, parameter_549, parameter_550, parameter_551, parameter_552 + + # pd_op.swish: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32) + swish_30 = paddle._C_ops.swish(batch_norm__234) + del batch_norm__234 + + # pd_op.conv2d: (2x192x-1x-1xf32) <- (2x384x-1x-1xf32, 192x384x1x1xf32) + conv2d_42 = paddle._C_ops.conv2d( + swish_29, parameter_548, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_548, swish_29 + + # pd_op.batch_norm_: (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__240, + batch_norm__241, + batch_norm__242, + batch_norm__243, + batch_norm__244, + batch_norm__245, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_42, + parameter_547, + parameter_546, + parameter_545, + parameter_544, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_42, parameter_544, parameter_545, parameter_546, parameter_547 + + # pd_op.swish: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32) + swish_31 = paddle._C_ops.swish(batch_norm__240) + del batch_norm__240 + + # pd_op.conv2d: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 192x192x3x3xf32) + conv2d_43 = paddle._C_ops.conv2d( + swish_31, parameter_543, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_543 + + # pd_op.batch_norm_: (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__246, + batch_norm__247, + batch_norm__248, + batch_norm__249, + batch_norm__250, + batch_norm__251, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_43, + parameter_542, + parameter_541, + parameter_540, + parameter_539, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_43, parameter_539, parameter_540, parameter_541, parameter_542 + + # pd_op.swish: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32) + swish_32 = paddle._C_ops.swish(batch_norm__246) + del batch_norm__246 + + # pd_op.conv2d: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 192x192x3x3xf32) + conv2d_44 = paddle._C_ops.conv2d( + swish_32, parameter_538, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_538 + + # pd_op.batch_norm_: (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__252, + batch_norm__253, + batch_norm__254, + batch_norm__255, + batch_norm__256, + batch_norm__257, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_44, + parameter_537, + parameter_536, + parameter_535, + parameter_534, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_44, parameter_534, parameter_535, parameter_536, parameter_537 + + # pd_op.conv2d: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 192x192x1x1xf32) + conv2d_45 = paddle._C_ops.conv2d( + swish_32, parameter_533, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_533, swish_32 + + # pd_op.batch_norm_: (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__258, + batch_norm__259, + batch_norm__260, + batch_norm__261, + batch_norm__262, + batch_norm__263, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_45, + parameter_532, + parameter_531, + parameter_530, + parameter_529, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_45, parameter_529, parameter_530, parameter_531, parameter_532 + + # pd_op.add: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 2x192x-1x-1xf32) + add_20 = paddle._C_ops.add(batch_norm__252, batch_norm__258) + del batch_norm__252, batch_norm__258 + + # pd_op.swish: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32) + swish_33 = paddle._C_ops.swish(add_20) + del add_20 + + # pd_op.add: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 2x192x-1x-1xf32) + add_21 = paddle._C_ops.add(swish_31, swish_33) + del swish_31, swish_33 + + # pd_op.conv2d: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 192x192x3x3xf32) + conv2d_46 = paddle._C_ops.conv2d( + add_21, parameter_528, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_528 + + # pd_op.batch_norm_: (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__264, + batch_norm__265, + batch_norm__266, + batch_norm__267, + batch_norm__268, + batch_norm__269, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_46, + parameter_527, + parameter_526, + parameter_525, + parameter_524, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_46, parameter_524, parameter_525, parameter_526, parameter_527 + + # pd_op.swish: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32) + swish_34 = paddle._C_ops.swish(batch_norm__264) + del batch_norm__264 + + # pd_op.conv2d: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 192x192x3x3xf32) + conv2d_47 = paddle._C_ops.conv2d( + swish_34, parameter_523, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_523 + + # pd_op.batch_norm_: (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__270, + batch_norm__271, + batch_norm__272, + batch_norm__273, + batch_norm__274, + batch_norm__275, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_47, + parameter_522, + parameter_521, + parameter_520, + parameter_519, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_47, parameter_519, parameter_520, parameter_521, parameter_522 + + # pd_op.conv2d: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 192x192x1x1xf32) + conv2d_48 = paddle._C_ops.conv2d( + swish_34, parameter_518, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_518, swish_34 + + # pd_op.batch_norm_: (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__276, + batch_norm__277, + batch_norm__278, + batch_norm__279, + batch_norm__280, + batch_norm__281, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_48, + parameter_517, + parameter_516, + parameter_515, + parameter_514, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_48, parameter_514, parameter_515, parameter_516, parameter_517 + + # pd_op.add: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 2x192x-1x-1xf32) + add_22 = paddle._C_ops.add(batch_norm__270, batch_norm__276) + del batch_norm__270, batch_norm__276 + + # pd_op.swish: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32) + swish_35 = paddle._C_ops.swish(add_22) + del add_22 + + # pd_op.add: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 2x192x-1x-1xf32) + add_23 = paddle._C_ops.add(add_21, swish_35) + del add_21, swish_35 + + # pd_op.conv2d: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 192x192x3x3xf32) + conv2d_49 = paddle._C_ops.conv2d( + add_23, parameter_513, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_513 + + # pd_op.batch_norm_: (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__282, + batch_norm__283, + batch_norm__284, + batch_norm__285, + batch_norm__286, + batch_norm__287, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_49, + parameter_512, + parameter_511, + parameter_510, + parameter_509, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_49, parameter_509, parameter_510, parameter_511, parameter_512 + + # pd_op.swish: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32) + swish_36 = paddle._C_ops.swish(batch_norm__282) + del batch_norm__282 + + # pd_op.conv2d: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 192x192x3x3xf32) + conv2d_50 = paddle._C_ops.conv2d( + swish_36, parameter_508, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_508 + + # pd_op.batch_norm_: (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__288, + batch_norm__289, + batch_norm__290, + batch_norm__291, + batch_norm__292, + batch_norm__293, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_50, + parameter_507, + parameter_506, + parameter_505, + parameter_504, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_50, parameter_504, parameter_505, parameter_506, parameter_507 + + # pd_op.conv2d: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 192x192x1x1xf32) + conv2d_51 = paddle._C_ops.conv2d( + swish_36, parameter_503, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_503, swish_36 + + # pd_op.batch_norm_: (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__294, + batch_norm__295, + batch_norm__296, + batch_norm__297, + batch_norm__298, + batch_norm__299, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_51, + parameter_502, + parameter_501, + parameter_500, + parameter_499, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_51, parameter_499, parameter_500, parameter_501, parameter_502 + + # pd_op.add: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 2x192x-1x-1xf32) + add_24 = paddle._C_ops.add(batch_norm__288, batch_norm__294) + del batch_norm__288, batch_norm__294 + + # pd_op.swish: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32) + swish_37 = paddle._C_ops.swish(add_24) + del add_24 + + # pd_op.add: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 2x192x-1x-1xf32) + add_25 = paddle._C_ops.add(add_23, swish_37) + del add_23, swish_37 + + # pd_op.conv2d: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 192x192x3x3xf32) + conv2d_52 = paddle._C_ops.conv2d( + add_25, parameter_498, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_498 + + # pd_op.batch_norm_: (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__300, + batch_norm__301, + batch_norm__302, + batch_norm__303, + batch_norm__304, + batch_norm__305, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_52, + parameter_497, + parameter_496, + parameter_495, + parameter_494, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_52, parameter_494, parameter_495, parameter_496, parameter_497 + + # pd_op.swish: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32) + swish_38 = paddle._C_ops.swish(batch_norm__300) + del batch_norm__300 + + # pd_op.conv2d: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 192x192x3x3xf32) + conv2d_53 = paddle._C_ops.conv2d( + swish_38, parameter_493, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_493 + + # pd_op.batch_norm_: (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__306, + batch_norm__307, + batch_norm__308, + batch_norm__309, + batch_norm__310, + batch_norm__311, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_53, + parameter_492, + parameter_491, + parameter_490, + parameter_489, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_53, parameter_489, parameter_490, parameter_491, parameter_492 + + # pd_op.conv2d: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 192x192x1x1xf32) + conv2d_54 = paddle._C_ops.conv2d( + swish_38, parameter_488, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_488, swish_38 + + # pd_op.batch_norm_: (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__312, + batch_norm__313, + batch_norm__314, + batch_norm__315, + batch_norm__316, + batch_norm__317, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_54, + parameter_487, + parameter_486, + parameter_485, + parameter_484, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_54, parameter_484, parameter_485, parameter_486, parameter_487 + + # pd_op.add: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 2x192x-1x-1xf32) + add_26 = paddle._C_ops.add(batch_norm__306, batch_norm__312) + del batch_norm__306, batch_norm__312 + + # pd_op.swish: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32) + swish_39 = paddle._C_ops.swish(add_26) + del add_26 + + # pd_op.add: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 2x192x-1x-1xf32) + add_27 = paddle._C_ops.add(add_25, swish_39) + del add_25, swish_39 + + # pd_op.conv2d: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 192x192x3x3xf32) + conv2d_55 = paddle._C_ops.conv2d( + add_27, parameter_483, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_483 + + # pd_op.batch_norm_: (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__318, + batch_norm__319, + batch_norm__320, + batch_norm__321, + batch_norm__322, + batch_norm__323, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_55, + parameter_482, + parameter_481, + parameter_480, + parameter_479, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_55, parameter_479, parameter_480, parameter_481, parameter_482 + + # pd_op.swish: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32) + swish_40 = paddle._C_ops.swish(batch_norm__318) + del batch_norm__318 + + # pd_op.conv2d: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 192x192x3x3xf32) + conv2d_56 = paddle._C_ops.conv2d( + swish_40, parameter_478, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_478 + + # pd_op.batch_norm_: (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__324, + batch_norm__325, + batch_norm__326, + batch_norm__327, + batch_norm__328, + batch_norm__329, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_56, + parameter_477, + parameter_476, + parameter_475, + parameter_474, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_56, parameter_474, parameter_475, parameter_476, parameter_477 + + # pd_op.conv2d: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 192x192x1x1xf32) + conv2d_57 = paddle._C_ops.conv2d( + swish_40, parameter_473, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_473, swish_40 + + # pd_op.batch_norm_: (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__330, + batch_norm__331, + batch_norm__332, + batch_norm__333, + batch_norm__334, + batch_norm__335, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_57, + parameter_472, + parameter_471, + parameter_470, + parameter_469, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_57, parameter_469, parameter_470, parameter_471, parameter_472 + + # pd_op.add: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 2x192x-1x-1xf32) + add_28 = paddle._C_ops.add(batch_norm__324, batch_norm__330) + del batch_norm__324, batch_norm__330 + + # pd_op.swish: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32) + swish_41 = paddle._C_ops.swish(add_28) + del add_28 + + # pd_op.add: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 2x192x-1x-1xf32) + add_29 = paddle._C_ops.add(add_27, swish_41) + del add_27, swish_41 + + # pd_op.conv2d: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 192x192x3x3xf32) + conv2d_58 = paddle._C_ops.conv2d( + add_29, parameter_468, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_468 + + # pd_op.batch_norm_: (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__336, + batch_norm__337, + batch_norm__338, + batch_norm__339, + batch_norm__340, + batch_norm__341, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_58, + parameter_467, + parameter_466, + parameter_465, + parameter_464, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_58, parameter_464, parameter_465, parameter_466, parameter_467 + + # pd_op.swish: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32) + swish_42 = paddle._C_ops.swish(batch_norm__336) + del batch_norm__336 + + # pd_op.conv2d: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 192x192x3x3xf32) + conv2d_59 = paddle._C_ops.conv2d( + swish_42, parameter_463, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_463 + + # pd_op.batch_norm_: (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__342, + batch_norm__343, + batch_norm__344, + batch_norm__345, + batch_norm__346, + batch_norm__347, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_59, + parameter_462, + parameter_461, + parameter_460, + parameter_459, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_59, parameter_459, parameter_460, parameter_461, parameter_462 + + # pd_op.conv2d: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 192x192x1x1xf32) + conv2d_60 = paddle._C_ops.conv2d( + swish_42, parameter_458, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_458, swish_42 + + # pd_op.batch_norm_: (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__348, + batch_norm__349, + batch_norm__350, + batch_norm__351, + batch_norm__352, + batch_norm__353, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_60, + parameter_457, + parameter_456, + parameter_455, + parameter_454, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_60, parameter_454, parameter_455, parameter_456, parameter_457 + + # pd_op.add: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 2x192x-1x-1xf32) + add_30 = paddle._C_ops.add(batch_norm__342, batch_norm__348) + del batch_norm__342, batch_norm__348 + + # pd_op.swish: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32) + swish_43 = paddle._C_ops.swish(add_30) + del add_30 + + # pd_op.add: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 2x192x-1x-1xf32) + add_31 = paddle._C_ops.add(add_29, swish_43) + del add_29, swish_43 + + # builtin.combine: ([2x192x-1x-1xf32, 2x192x-1x-1xf32]) <- (2x192x-1x-1xf32, 2x192x-1x-1xf32) + combine_2 = [swish_30, add_31] + del add_31, swish_30 + + # pd_op.concat: (2x384x-1x-1xf32) <- ([2x192x-1x-1xf32, 2x192x-1x-1xf32], 1xi32) + concat_4 = paddle._C_ops.concat(combine_2, full_0) + del combine_2 + + # pd_op.mean: (2x384x1x1xf32) <- (2x384x-1x-1xf32, 2xi64) + mean_2 = paddle._C_ops.mean(concat_4, full_int_array_0, True) + + # pd_op.conv2d: (2x384x1x1xf32) <- (2x384x1x1xf32, 384x384x1x1xf32) + conv2d_61 = paddle._C_ops.conv2d( + mean_2, parameter_453, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del mean_2, parameter_453 + + # pd_op.reshape: (1x384x1x1xf32) <- (384xf32, 4xi64) + reshape_2 = paddle._C_ops.reshape(parameter_452, full_int_array_1) + del parameter_452 + + # pd_op.add: (2x384x1x1xf32) <- (2x384x1x1xf32, 1x384x1x1xf32) + add_32 = paddle._C_ops.add(conv2d_61, reshape_2) + del conv2d_61, reshape_2 + + # pd_op.hardsigmoid: (2x384x1x1xf32) <- (2x384x1x1xf32) + hardsigmoid_2 = paddle._C_ops.hardsigmoid( + add_32, float("0.166667"), float("0.5") + ) + del add_32 + + # pd_op.multiply: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32, 2x384x1x1xf32) + multiply_2 = paddle._C_ops.multiply(concat_4, hardsigmoid_2) + del concat_4, hardsigmoid_2 + + # pd_op.conv2d: (2x512x-1x-1xf32) <- (2x384x-1x-1xf32, 512x384x1x1xf32) + conv2d_62 = paddle._C_ops.conv2d( + multiply_2, parameter_451, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del multiply_2, parameter_451 + + # pd_op.batch_norm_: (2x512x-1x-1xf32, 512xf32, 512xf32, 512xf32, 512xf32, -1xui8) <- (2x512x-1x-1xf32, 512xf32, 512xf32, 512xf32, 512xf32) + ( + batch_norm__354, + batch_norm__355, + batch_norm__356, + batch_norm__357, + batch_norm__358, + batch_norm__359, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_62, + parameter_450, + parameter_449, + parameter_448, + parameter_447, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_62, parameter_447, parameter_448, parameter_449, parameter_450 + + # pd_op.swish: (2x512x-1x-1xf32) <- (2x512x-1x-1xf32) + swish_44 = paddle._C_ops.swish(batch_norm__354) + del batch_norm__354 + + # pd_op.conv2d: (2x768x-1x-1xf32) <- (2x512x-1x-1xf32, 768x512x3x3xf32) + conv2d_63 = paddle._C_ops.conv2d( + swish_44, parameter_446, [2, 2], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_446 + + # pd_op.batch_norm_: (2x768x-1x-1xf32, 768xf32, 768xf32, 768xf32, 768xf32, -1xui8) <- (2x768x-1x-1xf32, 768xf32, 768xf32, 768xf32, 768xf32) + ( + batch_norm__360, + batch_norm__361, + batch_norm__362, + batch_norm__363, + batch_norm__364, + batch_norm__365, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_63, + parameter_445, + parameter_444, + parameter_443, + parameter_442, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_63, parameter_442, parameter_443, parameter_444, parameter_445 + + # pd_op.swish: (2x768x-1x-1xf32) <- (2x768x-1x-1xf32) + swish_45 = paddle._C_ops.swish(batch_norm__360) + del batch_norm__360 + + # pd_op.conv2d: (2x384x-1x-1xf32) <- (2x768x-1x-1xf32, 384x768x1x1xf32) + conv2d_64 = paddle._C_ops.conv2d( + swish_45, parameter_441, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_441 + + # pd_op.batch_norm_: (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__366, + batch_norm__367, + batch_norm__368, + batch_norm__369, + batch_norm__370, + batch_norm__371, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_64, + parameter_440, + parameter_439, + parameter_438, + parameter_437, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_64, parameter_437, parameter_438, parameter_439, parameter_440 + + # pd_op.swish: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32) + swish_46 = paddle._C_ops.swish(batch_norm__366) + del batch_norm__366 + + # pd_op.conv2d: (2x384x-1x-1xf32) <- (2x768x-1x-1xf32, 384x768x1x1xf32) + conv2d_65 = paddle._C_ops.conv2d( + swish_45, parameter_436, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_436, swish_45 + + # pd_op.batch_norm_: (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__372, + batch_norm__373, + batch_norm__374, + batch_norm__375, + batch_norm__376, + batch_norm__377, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_65, + parameter_435, + parameter_434, + parameter_433, + parameter_432, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_65, parameter_432, parameter_433, parameter_434, parameter_435 + + # pd_op.swish: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32) + swish_47 = paddle._C_ops.swish(batch_norm__372) + del batch_norm__372 + + # pd_op.conv2d: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32, 384x384x3x3xf32) + conv2d_66 = paddle._C_ops.conv2d( + swish_47, parameter_431, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_431 + + # pd_op.batch_norm_: (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__378, + batch_norm__379, + batch_norm__380, + batch_norm__381, + batch_norm__382, + batch_norm__383, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_66, + parameter_430, + parameter_429, + parameter_428, + parameter_427, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_66, parameter_427, parameter_428, parameter_429, parameter_430 + + # pd_op.swish: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32) + swish_48 = paddle._C_ops.swish(batch_norm__378) + del batch_norm__378 + + # pd_op.conv2d: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32, 384x384x3x3xf32) + conv2d_67 = paddle._C_ops.conv2d( + swish_48, parameter_426, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_426 + + # pd_op.batch_norm_: (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__384, + batch_norm__385, + batch_norm__386, + batch_norm__387, + batch_norm__388, + batch_norm__389, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_67, + parameter_425, + parameter_424, + parameter_423, + parameter_422, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_67, parameter_422, parameter_423, parameter_424, parameter_425 + + # pd_op.conv2d: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32, 384x384x1x1xf32) + conv2d_68 = paddle._C_ops.conv2d( + swish_48, parameter_421, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_421, swish_48 + + # pd_op.batch_norm_: (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__390, + batch_norm__391, + batch_norm__392, + batch_norm__393, + batch_norm__394, + batch_norm__395, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_68, + parameter_420, + parameter_419, + parameter_418, + parameter_417, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_68, parameter_417, parameter_418, parameter_419, parameter_420 + + # pd_op.add: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32, 2x384x-1x-1xf32) + add_33 = paddle._C_ops.add(batch_norm__384, batch_norm__390) + del batch_norm__384, batch_norm__390 + + # pd_op.swish: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32) + swish_49 = paddle._C_ops.swish(add_33) + del add_33 + + # pd_op.add: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32, 2x384x-1x-1xf32) + add_34 = paddle._C_ops.add(swish_47, swish_49) + del swish_47, swish_49 + + # pd_op.conv2d: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32, 384x384x3x3xf32) + conv2d_69 = paddle._C_ops.conv2d( + add_34, parameter_416, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_416 + + # pd_op.batch_norm_: (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__396, + batch_norm__397, + batch_norm__398, + batch_norm__399, + batch_norm__400, + batch_norm__401, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_69, + parameter_415, + parameter_414, + parameter_413, + parameter_412, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_69, parameter_412, parameter_413, parameter_414, parameter_415 + + # pd_op.swish: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32) + swish_50 = paddle._C_ops.swish(batch_norm__396) + del batch_norm__396 + + # pd_op.conv2d: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32, 384x384x3x3xf32) + conv2d_70 = paddle._C_ops.conv2d( + swish_50, parameter_411, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_411 + + # pd_op.batch_norm_: (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__402, + batch_norm__403, + batch_norm__404, + batch_norm__405, + batch_norm__406, + batch_norm__407, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_70, + parameter_410, + parameter_409, + parameter_408, + parameter_407, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_70, parameter_407, parameter_408, parameter_409, parameter_410 + + # pd_op.conv2d: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32, 384x384x1x1xf32) + conv2d_71 = paddle._C_ops.conv2d( + swish_50, parameter_406, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_406, swish_50 + + # pd_op.batch_norm_: (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__408, + batch_norm__409, + batch_norm__410, + batch_norm__411, + batch_norm__412, + batch_norm__413, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_71, + parameter_405, + parameter_404, + parameter_403, + parameter_402, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_71, parameter_402, parameter_403, parameter_404, parameter_405 + + # pd_op.add: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32, 2x384x-1x-1xf32) + add_35 = paddle._C_ops.add(batch_norm__402, batch_norm__408) + del batch_norm__402, batch_norm__408 + + # pd_op.swish: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32) + swish_51 = paddle._C_ops.swish(add_35) + del add_35 + + # pd_op.add: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32, 2x384x-1x-1xf32) + add_36 = paddle._C_ops.add(add_34, swish_51) + del add_34, swish_51 + + # pd_op.conv2d: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32, 384x384x3x3xf32) + conv2d_72 = paddle._C_ops.conv2d( + add_36, parameter_401, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_401 + + # pd_op.batch_norm_: (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__414, + batch_norm__415, + batch_norm__416, + batch_norm__417, + batch_norm__418, + batch_norm__419, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_72, + parameter_400, + parameter_399, + parameter_398, + parameter_397, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_72, parameter_397, parameter_398, parameter_399, parameter_400 + + # pd_op.swish: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32) + swish_52 = paddle._C_ops.swish(batch_norm__414) + del batch_norm__414 + + # pd_op.conv2d: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32, 384x384x3x3xf32) + conv2d_73 = paddle._C_ops.conv2d( + swish_52, parameter_396, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_396 + + # pd_op.batch_norm_: (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__420, + batch_norm__421, + batch_norm__422, + batch_norm__423, + batch_norm__424, + batch_norm__425, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_73, + parameter_395, + parameter_394, + parameter_393, + parameter_392, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_73, parameter_392, parameter_393, parameter_394, parameter_395 + + # pd_op.conv2d: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32, 384x384x1x1xf32) + conv2d_74 = paddle._C_ops.conv2d( + swish_52, parameter_391, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_391, swish_52 + + # pd_op.batch_norm_: (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__426, + batch_norm__427, + batch_norm__428, + batch_norm__429, + batch_norm__430, + batch_norm__431, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_74, + parameter_390, + parameter_389, + parameter_388, + parameter_387, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_74, parameter_387, parameter_388, parameter_389, parameter_390 + + # pd_op.add: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32, 2x384x-1x-1xf32) + add_37 = paddle._C_ops.add(batch_norm__420, batch_norm__426) + del batch_norm__420, batch_norm__426 + + # pd_op.swish: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32) + swish_53 = paddle._C_ops.swish(add_37) + del add_37 + + # pd_op.add: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32, 2x384x-1x-1xf32) + add_38 = paddle._C_ops.add(add_36, swish_53) + del add_36, swish_53 + + # builtin.combine: ([2x384x-1x-1xf32, 2x384x-1x-1xf32]) <- (2x384x-1x-1xf32, 2x384x-1x-1xf32) + combine_3 = [swish_46, add_38] + del add_38, swish_46 + + # pd_op.concat: (2x768x-1x-1xf32) <- ([2x384x-1x-1xf32, 2x384x-1x-1xf32], 1xi32) + concat_5 = paddle._C_ops.concat(combine_3, full_0) + del combine_3 + + # pd_op.mean: (2x768x1x1xf32) <- (2x768x-1x-1xf32, 2xi64) + mean_3 = paddle._C_ops.mean(concat_5, full_int_array_0, True) + del full_int_array_0 + + # pd_op.conv2d: (2x768x1x1xf32) <- (2x768x1x1xf32, 768x768x1x1xf32) + conv2d_75 = paddle._C_ops.conv2d( + mean_3, parameter_386, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del mean_3, parameter_386 + + # pd_op.reshape: (1x768x1x1xf32) <- (768xf32, 4xi64) + reshape_3 = paddle._C_ops.reshape(parameter_385, full_int_array_1) + del parameter_385 + + # pd_op.add: (2x768x1x1xf32) <- (2x768x1x1xf32, 1x768x1x1xf32) + add_39 = paddle._C_ops.add(conv2d_75, reshape_3) + del conv2d_75, reshape_3 + + # pd_op.hardsigmoid: (2x768x1x1xf32) <- (2x768x1x1xf32) + hardsigmoid_3 = paddle._C_ops.hardsigmoid( + add_39, float("0.166667"), float("0.5") + ) + del add_39 + + # pd_op.multiply: (2x768x-1x-1xf32) <- (2x768x-1x-1xf32, 2x768x1x1xf32) + multiply_3 = paddle._C_ops.multiply(concat_5, hardsigmoid_3) + del concat_5, hardsigmoid_3 + + # pd_op.conv2d: (2x1024x-1x-1xf32) <- (2x768x-1x-1xf32, 1024x768x1x1xf32) + conv2d_76 = paddle._C_ops.conv2d( + multiply_3, parameter_384, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del multiply_3, parameter_384 + + # pd_op.batch_norm_: (2x1024x-1x-1xf32, 1024xf32, 1024xf32, 1024xf32, 1024xf32, -1xui8) <- (2x1024x-1x-1xf32, 1024xf32, 1024xf32, 1024xf32, 1024xf32) + ( + batch_norm__432, + batch_norm__433, + batch_norm__434, + batch_norm__435, + batch_norm__436, + batch_norm__437, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_76, + parameter_383, + parameter_382, + parameter_381, + parameter_380, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_76, parameter_380, parameter_381, parameter_382, parameter_383 + + # pd_op.swish: (2x1024x-1x-1xf32) <- (2x1024x-1x-1xf32) + swish_54 = paddle._C_ops.swish(batch_norm__432) + del batch_norm__432 + + # pd_op.conv2d: (2x384x-1x-1xf32) <- (2x1024x-1x-1xf32, 384x1024x1x1xf32) + conv2d_77 = paddle._C_ops.conv2d( + swish_54, parameter_379, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_379 + + # pd_op.batch_norm_: (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__438, + batch_norm__439, + batch_norm__440, + batch_norm__441, + batch_norm__442, + batch_norm__443, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_77, + parameter_378, + parameter_377, + parameter_376, + parameter_375, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_77, parameter_375, parameter_376, parameter_377, parameter_378 + + # pd_op.swish: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32) + swish_55 = paddle._C_ops.swish(batch_norm__438) + del batch_norm__438 + + # pd_op.conv2d: (2x384x-1x-1xf32) <- (2x1024x-1x-1xf32, 384x1024x1x1xf32) + conv2d_78 = paddle._C_ops.conv2d( + swish_54, parameter_374, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_374, swish_54 + + # pd_op.batch_norm_: (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__444, + batch_norm__445, + batch_norm__446, + batch_norm__447, + batch_norm__448, + batch_norm__449, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_78, + parameter_373, + parameter_372, + parameter_371, + parameter_370, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_78, parameter_370, parameter_371, parameter_372, parameter_373 + + # pd_op.swish: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32) + swish_56 = paddle._C_ops.swish(batch_norm__444) + del batch_norm__444 + + # pd_op.conv2d: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32, 384x384x3x3xf32) + conv2d_79 = paddle._C_ops.conv2d( + swish_56, parameter_369, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_369, swish_56 + + # pd_op.batch_norm_: (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__450, + batch_norm__451, + batch_norm__452, + batch_norm__453, + batch_norm__454, + batch_norm__455, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_79, + parameter_368, + parameter_367, + parameter_366, + parameter_365, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_79, parameter_365, parameter_366, parameter_367, parameter_368 + + # pd_op.swish: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32) + swish_57 = paddle._C_ops.swish(batch_norm__450) + del batch_norm__450 + + # pd_op.conv2d: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32, 384x384x3x3xf32) + conv2d_80 = paddle._C_ops.conv2d( + swish_57, parameter_364, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_364 + + # pd_op.batch_norm_: (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__456, + batch_norm__457, + batch_norm__458, + batch_norm__459, + batch_norm__460, + batch_norm__461, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_80, + parameter_363, + parameter_362, + parameter_361, + parameter_360, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_80, parameter_360, parameter_361, parameter_362, parameter_363 + + # pd_op.conv2d: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32, 384x384x1x1xf32) + conv2d_81 = paddle._C_ops.conv2d( + swish_57, parameter_359, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_359, swish_57 + + # pd_op.batch_norm_: (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__462, + batch_norm__463, + batch_norm__464, + batch_norm__465, + batch_norm__466, + batch_norm__467, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_81, + parameter_358, + parameter_357, + parameter_356, + parameter_355, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_81, parameter_355, parameter_356, parameter_357, parameter_358 + + # pd_op.add: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32, 2x384x-1x-1xf32) + add_40 = paddle._C_ops.add(batch_norm__456, batch_norm__462) + del batch_norm__456, batch_norm__462 + + # pd_op.swish: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32) + swish_58 = paddle._C_ops.swish(add_40) + del add_40 + + # pd_op.conv2d: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32, 384x384x3x3xf32) + conv2d_82 = paddle._C_ops.conv2d( + swish_58, parameter_354, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_354, swish_58 + + # pd_op.batch_norm_: (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__468, + batch_norm__469, + batch_norm__470, + batch_norm__471, + batch_norm__472, + batch_norm__473, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_82, + parameter_353, + parameter_352, + parameter_351, + parameter_350, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_82, parameter_350, parameter_351, parameter_352, parameter_353 + + # pd_op.swish: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32) + swish_59 = paddle._C_ops.swish(batch_norm__468) + del batch_norm__468 + + # pd_op.conv2d: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32, 384x384x3x3xf32) + conv2d_83 = paddle._C_ops.conv2d( + swish_59, parameter_349, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_349 + + # pd_op.batch_norm_: (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__474, + batch_norm__475, + batch_norm__476, + batch_norm__477, + batch_norm__478, + batch_norm__479, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_83, + parameter_348, + parameter_347, + parameter_346, + parameter_345, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_83, parameter_345, parameter_346, parameter_347, parameter_348 + + # pd_op.conv2d: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32, 384x384x1x1xf32) + conv2d_84 = paddle._C_ops.conv2d( + swish_59, parameter_344, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_344, swish_59 + + # pd_op.batch_norm_: (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__480, + batch_norm__481, + batch_norm__482, + batch_norm__483, + batch_norm__484, + batch_norm__485, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_84, + parameter_343, + parameter_342, + parameter_341, + parameter_340, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_84, parameter_340, parameter_341, parameter_342, parameter_343 + + # pd_op.add: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32, 2x384x-1x-1xf32) + add_41 = paddle._C_ops.add(batch_norm__474, batch_norm__480) + del batch_norm__474, batch_norm__480 + + # pd_op.swish: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32) + swish_60 = paddle._C_ops.swish(add_41) + del add_41 + + # pd_op.full_int_array: (2xi64) <- () + full_int_array_2 = [5, 5] + + # pd_op.pool2d: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32, 2xi64) + pool2d_0 = paddle._C_ops.pool2d( + swish_60, + full_int_array_2, + [1, 1], + [2, 2], + False, + True, + "NCHW", + "max", + False, + False, + "EXPLICIT", + ) + del full_int_array_2 + + # pd_op.full_int_array: (2xi64) <- () + full_int_array_3 = [9, 9] + + # pd_op.pool2d: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32, 2xi64) + pool2d_1 = paddle._C_ops.pool2d( + swish_60, + full_int_array_3, + [1, 1], + [4, 4], + False, + True, + "NCHW", + "max", + False, + False, + "EXPLICIT", + ) + del full_int_array_3 + + # pd_op.full_int_array: (2xi64) <- () + full_int_array_4 = [13, 13] + + # pd_op.pool2d: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32, 2xi64) + pool2d_2 = paddle._C_ops.pool2d( + swish_60, + full_int_array_4, + [1, 1], + [6, 6], + False, + True, + "NCHW", + "max", + False, + False, + "EXPLICIT", + ) + del full_int_array_4 + + # builtin.combine: ([2x384x-1x-1xf32, 2x384x-1x-1xf32, 2x384x-1x-1xf32, 2x384x-1x-1xf32]) <- (2x384x-1x-1xf32, 2x384x-1x-1xf32, 2x384x-1x-1xf32, 2x384x-1x-1xf32) + combine_4 = [swish_60, pool2d_0, pool2d_1, pool2d_2] + del pool2d_0, pool2d_1, pool2d_2, swish_60 + + # pd_op.concat: (2x1536x-1x-1xf32) <- ([2x384x-1x-1xf32, 2x384x-1x-1xf32, 2x384x-1x-1xf32, 2x384x-1x-1xf32], 1xi32) + concat_6 = paddle._C_ops.concat(combine_4, full_0) + del combine_4 + + # pd_op.conv2d: (2x384x-1x-1xf32) <- (2x1536x-1x-1xf32, 384x1536x1x1xf32) + conv2d_85 = paddle._C_ops.conv2d( + concat_6, parameter_339, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del concat_6, parameter_339 + + # pd_op.batch_norm_: (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__486, + batch_norm__487, + batch_norm__488, + batch_norm__489, + batch_norm__490, + batch_norm__491, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_85, + parameter_338, + parameter_337, + parameter_336, + parameter_335, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_85, parameter_335, parameter_336, parameter_337, parameter_338 + + # pd_op.swish: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32) + swish_61 = paddle._C_ops.swish(batch_norm__486) + del batch_norm__486 + + # pd_op.conv2d: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32, 384x384x3x3xf32) + conv2d_86 = paddle._C_ops.conv2d( + swish_61, parameter_334, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_334, swish_61 + + # pd_op.batch_norm_: (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__492, + batch_norm__493, + batch_norm__494, + batch_norm__495, + batch_norm__496, + batch_norm__497, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_86, + parameter_333, + parameter_332, + parameter_331, + parameter_330, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_86, parameter_330, parameter_331, parameter_332, parameter_333 + + # pd_op.swish: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32) + swish_62 = paddle._C_ops.swish(batch_norm__492) + del batch_norm__492 + + # pd_op.conv2d: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32, 384x384x3x3xf32) + conv2d_87 = paddle._C_ops.conv2d( + swish_62, parameter_329, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_329 + + # pd_op.batch_norm_: (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__498, + batch_norm__499, + batch_norm__500, + batch_norm__501, + batch_norm__502, + batch_norm__503, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_87, + parameter_328, + parameter_327, + parameter_326, + parameter_325, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_87, parameter_325, parameter_326, parameter_327, parameter_328 + + # pd_op.conv2d: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32, 384x384x1x1xf32) + conv2d_88 = paddle._C_ops.conv2d( + swish_62, parameter_324, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_324, swish_62 + + # pd_op.batch_norm_: (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__504, + batch_norm__505, + batch_norm__506, + batch_norm__507, + batch_norm__508, + batch_norm__509, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_88, + parameter_323, + parameter_322, + parameter_321, + parameter_320, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_88, parameter_320, parameter_321, parameter_322, parameter_323 + + # pd_op.add: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32, 2x384x-1x-1xf32) + add_42 = paddle._C_ops.add(batch_norm__498, batch_norm__504) + del batch_norm__498, batch_norm__504 + + # pd_op.swish: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32) + swish_63 = paddle._C_ops.swish(add_42) + del add_42 + + # builtin.combine: ([2x384x-1x-1xf32, 2x384x-1x-1xf32]) <- (2x384x-1x-1xf32, 2x384x-1x-1xf32) + combine_5 = [swish_55, swish_63] + del swish_55, swish_63 + + # pd_op.concat: (2x768x-1x-1xf32) <- ([2x384x-1x-1xf32, 2x384x-1x-1xf32], 1xi32) + concat_7 = paddle._C_ops.concat(combine_5, full_0) + del combine_5 + + # pd_op.conv2d: (2x768x-1x-1xf32) <- (2x768x-1x-1xf32, 768x768x1x1xf32) + conv2d_89 = paddle._C_ops.conv2d( + concat_7, parameter_319, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del concat_7, parameter_319 + + # pd_op.batch_norm_: (2x768x-1x-1xf32, 768xf32, 768xf32, 768xf32, 768xf32, -1xui8) <- (2x768x-1x-1xf32, 768xf32, 768xf32, 768xf32, 768xf32) + ( + batch_norm__510, + batch_norm__511, + batch_norm__512, + batch_norm__513, + batch_norm__514, + batch_norm__515, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_89, + parameter_318, + parameter_317, + parameter_316, + parameter_315, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_89, parameter_315, parameter_316, parameter_317, parameter_318 + + # pd_op.swish: (2x768x-1x-1xf32) <- (2x768x-1x-1xf32) + swish_64 = paddle._C_ops.swish(batch_norm__510) + del batch_norm__510 + + # pd_op.conv2d: (2x384x-1x-1xf32) <- (2x768x-1x-1xf32, 384x768x1x1xf32) + conv2d_90 = paddle._C_ops.conv2d( + swish_64, parameter_314, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_314 + + # pd_op.batch_norm_: (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__516, + batch_norm__517, + batch_norm__518, + batch_norm__519, + batch_norm__520, + batch_norm__521, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_90, + parameter_313, + parameter_312, + parameter_311, + parameter_310, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_90, parameter_310, parameter_311, parameter_312, parameter_313 + + # pd_op.swish: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32) + swish_65 = paddle._C_ops.swish(batch_norm__516) + del batch_norm__516 + + # pd_op.nearest_interp: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32, None, None, None) + nearest_interp_0 = paddle._C_ops.nearest_interp( + swish_65, + None, + None, + None, + "NCHW", + -1, + -1, + -1, + [float("2"), float("2")], + "nearest", + False, + 0, + ) + del swish_65 + + # builtin.combine: ([2x384x-1x-1xf32, 2x512x-1x-1xf32]) <- (2x384x-1x-1xf32, 2x512x-1x-1xf32) + combine_6 = [nearest_interp_0, swish_44] + del nearest_interp_0, swish_44 + + # pd_op.concat: (2x896x-1x-1xf32) <- ([2x384x-1x-1xf32, 2x512x-1x-1xf32], 1xi32) + concat_8 = paddle._C_ops.concat(combine_6, full_0) + del combine_6 + + # pd_op.conv2d: (2x192x-1x-1xf32) <- (2x896x-1x-1xf32, 192x896x1x1xf32) + conv2d_91 = paddle._C_ops.conv2d( + concat_8, parameter_309, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_309 + + # pd_op.batch_norm_: (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__522, + batch_norm__523, + batch_norm__524, + batch_norm__525, + batch_norm__526, + batch_norm__527, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_91, + parameter_308, + parameter_307, + parameter_306, + parameter_305, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_91, parameter_305, parameter_306, parameter_307, parameter_308 + + # pd_op.swish: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32) + swish_66 = paddle._C_ops.swish(batch_norm__522) + del batch_norm__522 + + # pd_op.conv2d: (2x192x-1x-1xf32) <- (2x896x-1x-1xf32, 192x896x1x1xf32) + conv2d_92 = paddle._C_ops.conv2d( + concat_8, parameter_304, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del concat_8, parameter_304 + + # pd_op.batch_norm_: (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__528, + batch_norm__529, + batch_norm__530, + batch_norm__531, + batch_norm__532, + batch_norm__533, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_92, + parameter_303, + parameter_302, + parameter_301, + parameter_300, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_92, parameter_300, parameter_301, parameter_302, parameter_303 + + # pd_op.swish: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32) + swish_67 = paddle._C_ops.swish(batch_norm__528) + del batch_norm__528 + + # pd_op.conv2d: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 192x192x3x3xf32) + conv2d_93 = paddle._C_ops.conv2d( + swish_67, parameter_299, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_299, swish_67 + + # pd_op.batch_norm_: (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__534, + batch_norm__535, + batch_norm__536, + batch_norm__537, + batch_norm__538, + batch_norm__539, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_93, + parameter_298, + parameter_297, + parameter_296, + parameter_295, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_93, parameter_295, parameter_296, parameter_297, parameter_298 + + # pd_op.swish: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32) + swish_68 = paddle._C_ops.swish(batch_norm__534) + del batch_norm__534 + + # pd_op.conv2d: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 192x192x3x3xf32) + conv2d_94 = paddle._C_ops.conv2d( + swish_68, parameter_294, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_294 + + # pd_op.batch_norm_: (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__540, + batch_norm__541, + batch_norm__542, + batch_norm__543, + batch_norm__544, + batch_norm__545, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_94, + parameter_293, + parameter_292, + parameter_291, + parameter_290, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_94, parameter_290, parameter_291, parameter_292, parameter_293 + + # pd_op.conv2d: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 192x192x1x1xf32) + conv2d_95 = paddle._C_ops.conv2d( + swish_68, parameter_289, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_289, swish_68 + + # pd_op.batch_norm_: (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__546, + batch_norm__547, + batch_norm__548, + batch_norm__549, + batch_norm__550, + batch_norm__551, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_95, + parameter_288, + parameter_287, + parameter_286, + parameter_285, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_95, parameter_285, parameter_286, parameter_287, parameter_288 + + # pd_op.add: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 2x192x-1x-1xf32) + add_43 = paddle._C_ops.add(batch_norm__540, batch_norm__546) + del batch_norm__540, batch_norm__546 + + # pd_op.swish: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32) + swish_69 = paddle._C_ops.swish(add_43) + del add_43 + + # pd_op.conv2d: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 192x192x3x3xf32) + conv2d_96 = paddle._C_ops.conv2d( + swish_69, parameter_284, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_284, swish_69 + + # pd_op.batch_norm_: (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__552, + batch_norm__553, + batch_norm__554, + batch_norm__555, + batch_norm__556, + batch_norm__557, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_96, + parameter_283, + parameter_282, + parameter_281, + parameter_280, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_96, parameter_280, parameter_281, parameter_282, parameter_283 + + # pd_op.swish: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32) + swish_70 = paddle._C_ops.swish(batch_norm__552) + del batch_norm__552 + + # pd_op.conv2d: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 192x192x3x3xf32) + conv2d_97 = paddle._C_ops.conv2d( + swish_70, parameter_279, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_279 + + # pd_op.batch_norm_: (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__558, + batch_norm__559, + batch_norm__560, + batch_norm__561, + batch_norm__562, + batch_norm__563, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_97, + parameter_278, + parameter_277, + parameter_276, + parameter_275, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_97, parameter_275, parameter_276, parameter_277, parameter_278 + + # pd_op.conv2d: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 192x192x1x1xf32) + conv2d_98 = paddle._C_ops.conv2d( + swish_70, parameter_274, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_274, swish_70 + + # pd_op.batch_norm_: (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__564, + batch_norm__565, + batch_norm__566, + batch_norm__567, + batch_norm__568, + batch_norm__569, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_98, + parameter_273, + parameter_272, + parameter_271, + parameter_270, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_98, parameter_270, parameter_271, parameter_272, parameter_273 + + # pd_op.add: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 2x192x-1x-1xf32) + add_44 = paddle._C_ops.add(batch_norm__558, batch_norm__564) + del batch_norm__558, batch_norm__564 + + # pd_op.swish: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32) + swish_71 = paddle._C_ops.swish(add_44) + del add_44 + + # pd_op.conv2d: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 192x192x3x3xf32) + conv2d_99 = paddle._C_ops.conv2d( + swish_71, parameter_269, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_269, swish_71 + + # pd_op.batch_norm_: (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__570, + batch_norm__571, + batch_norm__572, + batch_norm__573, + batch_norm__574, + batch_norm__575, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_99, + parameter_268, + parameter_267, + parameter_266, + parameter_265, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_99, parameter_265, parameter_266, parameter_267, parameter_268 + + # pd_op.swish: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32) + swish_72 = paddle._C_ops.swish(batch_norm__570) + del batch_norm__570 + + # pd_op.conv2d: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 192x192x3x3xf32) + conv2d_100 = paddle._C_ops.conv2d( + swish_72, parameter_264, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_264 + + # pd_op.batch_norm_: (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__576, + batch_norm__577, + batch_norm__578, + batch_norm__579, + batch_norm__580, + batch_norm__581, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_100, + parameter_263, + parameter_262, + parameter_261, + parameter_260, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_100, parameter_260, parameter_261, parameter_262, parameter_263 + + # pd_op.conv2d: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 192x192x1x1xf32) + conv2d_101 = paddle._C_ops.conv2d( + swish_72, parameter_259, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_259, swish_72 + + # pd_op.batch_norm_: (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__582, + batch_norm__583, + batch_norm__584, + batch_norm__585, + batch_norm__586, + batch_norm__587, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_101, + parameter_258, + parameter_257, + parameter_256, + parameter_255, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_101, parameter_255, parameter_256, parameter_257, parameter_258 + + # pd_op.add: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 2x192x-1x-1xf32) + add_45 = paddle._C_ops.add(batch_norm__576, batch_norm__582) + del batch_norm__576, batch_norm__582 + + # pd_op.swish: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32) + swish_73 = paddle._C_ops.swish(add_45) + del add_45 + + # builtin.combine: ([2x192x-1x-1xf32, 2x192x-1x-1xf32]) <- (2x192x-1x-1xf32, 2x192x-1x-1xf32) + combine_7 = [swish_66, swish_73] + del swish_66, swish_73 + + # pd_op.concat: (2x384x-1x-1xf32) <- ([2x192x-1x-1xf32, 2x192x-1x-1xf32], 1xi32) + concat_9 = paddle._C_ops.concat(combine_7, full_0) + del combine_7 + + # pd_op.conv2d: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32, 384x384x1x1xf32) + conv2d_102 = paddle._C_ops.conv2d( + concat_9, parameter_254, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del concat_9, parameter_254 + + # pd_op.batch_norm_: (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__588, + batch_norm__589, + batch_norm__590, + batch_norm__591, + batch_norm__592, + batch_norm__593, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_102, + parameter_253, + parameter_252, + parameter_251, + parameter_250, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_102, parameter_250, parameter_251, parameter_252, parameter_253 + + # pd_op.swish: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32) + swish_74 = paddle._C_ops.swish(batch_norm__588) + del batch_norm__588 + + # pd_op.conv2d: (2x192x-1x-1xf32) <- (2x384x-1x-1xf32, 192x384x1x1xf32) + conv2d_103 = paddle._C_ops.conv2d( + swish_74, parameter_249, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_249 + + # pd_op.batch_norm_: (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__594, + batch_norm__595, + batch_norm__596, + batch_norm__597, + batch_norm__598, + batch_norm__599, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_103, + parameter_248, + parameter_247, + parameter_246, + parameter_245, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_103, parameter_245, parameter_246, parameter_247, parameter_248 + + # pd_op.swish: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32) + swish_75 = paddle._C_ops.swish(batch_norm__594) + del batch_norm__594 + + # pd_op.nearest_interp: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, None, None, None) + nearest_interp_1 = paddle._C_ops.nearest_interp( + swish_75, + None, + None, + None, + "NCHW", + -1, + -1, + -1, + [float("2"), float("2")], + "nearest", + False, + 0, + ) + del swish_75 + + # builtin.combine: ([2x192x-1x-1xf32, 2x256x-1x-1xf32]) <- (2x192x-1x-1xf32, 2x256x-1x-1xf32) + combine_8 = [nearest_interp_1, swish_28] + del nearest_interp_1, swish_28 + + # pd_op.concat: (2x448x-1x-1xf32) <- ([2x192x-1x-1xf32, 2x256x-1x-1xf32], 1xi32) + concat_10 = paddle._C_ops.concat(combine_8, full_0) + del combine_8 + + # pd_op.conv2d: (2x96x-1x-1xf32) <- (2x448x-1x-1xf32, 96x448x1x1xf32) + conv2d_104 = paddle._C_ops.conv2d( + concat_10, parameter_244, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_244 + + # pd_op.batch_norm_: (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__600, + batch_norm__601, + batch_norm__602, + batch_norm__603, + batch_norm__604, + batch_norm__605, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_104, + parameter_243, + parameter_242, + parameter_241, + parameter_240, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_104, parameter_240, parameter_241, parameter_242, parameter_243 + + # pd_op.swish: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32) + swish_76 = paddle._C_ops.swish(batch_norm__600) + del batch_norm__600 + + # pd_op.conv2d: (2x96x-1x-1xf32) <- (2x448x-1x-1xf32, 96x448x1x1xf32) + conv2d_105 = paddle._C_ops.conv2d( + concat_10, parameter_239, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del concat_10, parameter_239 + + # pd_op.batch_norm_: (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__606, + batch_norm__607, + batch_norm__608, + batch_norm__609, + batch_norm__610, + batch_norm__611, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_105, + parameter_238, + parameter_237, + parameter_236, + parameter_235, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_105, parameter_235, parameter_236, parameter_237, parameter_238 + + # pd_op.swish: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32) + swish_77 = paddle._C_ops.swish(batch_norm__606) + del batch_norm__606 + + # pd_op.conv2d: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, 96x96x3x3xf32) + conv2d_106 = paddle._C_ops.conv2d( + swish_77, parameter_234, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_234, swish_77 + + # pd_op.batch_norm_: (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__612, + batch_norm__613, + batch_norm__614, + batch_norm__615, + batch_norm__616, + batch_norm__617, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_106, + parameter_233, + parameter_232, + parameter_231, + parameter_230, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_106, parameter_230, parameter_231, parameter_232, parameter_233 + + # pd_op.swish: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32) + swish_78 = paddle._C_ops.swish(batch_norm__612) + del batch_norm__612 + + # pd_op.conv2d: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, 96x96x3x3xf32) + conv2d_107 = paddle._C_ops.conv2d( + swish_78, parameter_229, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_229 + + # pd_op.batch_norm_: (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__618, + batch_norm__619, + batch_norm__620, + batch_norm__621, + batch_norm__622, + batch_norm__623, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_107, + parameter_228, + parameter_227, + parameter_226, + parameter_225, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_107, parameter_225, parameter_226, parameter_227, parameter_228 + + # pd_op.conv2d: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, 96x96x1x1xf32) + conv2d_108 = paddle._C_ops.conv2d( + swish_78, parameter_224, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_224, swish_78 + + # pd_op.batch_norm_: (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__624, + batch_norm__625, + batch_norm__626, + batch_norm__627, + batch_norm__628, + batch_norm__629, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_108, + parameter_223, + parameter_222, + parameter_221, + parameter_220, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_108, parameter_220, parameter_221, parameter_222, parameter_223 + + # pd_op.add: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, 2x96x-1x-1xf32) + add_46 = paddle._C_ops.add(batch_norm__618, batch_norm__624) + del batch_norm__618, batch_norm__624 + + # pd_op.swish: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32) + swish_79 = paddle._C_ops.swish(add_46) + del add_46 + + # pd_op.conv2d: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, 96x96x3x3xf32) + conv2d_109 = paddle._C_ops.conv2d( + swish_79, parameter_219, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_219, swish_79 + + # pd_op.batch_norm_: (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__630, + batch_norm__631, + batch_norm__632, + batch_norm__633, + batch_norm__634, + batch_norm__635, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_109, + parameter_218, + parameter_217, + parameter_216, + parameter_215, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_109, parameter_215, parameter_216, parameter_217, parameter_218 + + # pd_op.swish: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32) + swish_80 = paddle._C_ops.swish(batch_norm__630) + del batch_norm__630 + + # pd_op.conv2d: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, 96x96x3x3xf32) + conv2d_110 = paddle._C_ops.conv2d( + swish_80, parameter_214, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_214 + + # pd_op.batch_norm_: (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__636, + batch_norm__637, + batch_norm__638, + batch_norm__639, + batch_norm__640, + batch_norm__641, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_110, + parameter_213, + parameter_212, + parameter_211, + parameter_210, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_110, parameter_210, parameter_211, parameter_212, parameter_213 + + # pd_op.conv2d: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, 96x96x1x1xf32) + conv2d_111 = paddle._C_ops.conv2d( + swish_80, parameter_209, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_209, swish_80 + + # pd_op.batch_norm_: (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__642, + batch_norm__643, + batch_norm__644, + batch_norm__645, + batch_norm__646, + batch_norm__647, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_111, + parameter_208, + parameter_207, + parameter_206, + parameter_205, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_111, parameter_205, parameter_206, parameter_207, parameter_208 + + # pd_op.add: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, 2x96x-1x-1xf32) + add_47 = paddle._C_ops.add(batch_norm__636, batch_norm__642) + del batch_norm__636, batch_norm__642 + + # pd_op.swish: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32) + swish_81 = paddle._C_ops.swish(add_47) + del add_47 + + # pd_op.conv2d: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, 96x96x3x3xf32) + conv2d_112 = paddle._C_ops.conv2d( + swish_81, parameter_204, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_204, swish_81 + + # pd_op.batch_norm_: (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__648, + batch_norm__649, + batch_norm__650, + batch_norm__651, + batch_norm__652, + batch_norm__653, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_112, + parameter_203, + parameter_202, + parameter_201, + parameter_200, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_112, parameter_200, parameter_201, parameter_202, parameter_203 + + # pd_op.swish: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32) + swish_82 = paddle._C_ops.swish(batch_norm__648) + del batch_norm__648 + + # pd_op.conv2d: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, 96x96x3x3xf32) + conv2d_113 = paddle._C_ops.conv2d( + swish_82, parameter_199, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_199 + + # pd_op.batch_norm_: (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__654, + batch_norm__655, + batch_norm__656, + batch_norm__657, + batch_norm__658, + batch_norm__659, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_113, + parameter_198, + parameter_197, + parameter_196, + parameter_195, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_113, parameter_195, parameter_196, parameter_197, parameter_198 + + # pd_op.conv2d: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, 96x96x1x1xf32) + conv2d_114 = paddle._C_ops.conv2d( + swish_82, parameter_194, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_194, swish_82 + + # pd_op.batch_norm_: (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__660, + batch_norm__661, + batch_norm__662, + batch_norm__663, + batch_norm__664, + batch_norm__665, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_114, + parameter_193, + parameter_192, + parameter_191, + parameter_190, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_114, parameter_190, parameter_191, parameter_192, parameter_193 + + # pd_op.add: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, 2x96x-1x-1xf32) + add_48 = paddle._C_ops.add(batch_norm__654, batch_norm__660) + del batch_norm__654, batch_norm__660 + + # pd_op.swish: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32) + swish_83 = paddle._C_ops.swish(add_48) + del add_48 + + # builtin.combine: ([2x96x-1x-1xf32, 2x96x-1x-1xf32]) <- (2x96x-1x-1xf32, 2x96x-1x-1xf32) + combine_9 = [swish_76, swish_83] + del swish_76, swish_83 + + # pd_op.concat: (2x192x-1x-1xf32) <- ([2x96x-1x-1xf32, 2x96x-1x-1xf32], 1xi32) + concat_11 = paddle._C_ops.concat(combine_9, full_0) + del combine_9 + + # pd_op.conv2d: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 192x192x1x1xf32) + conv2d_115 = paddle._C_ops.conv2d( + concat_11, parameter_189, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del concat_11, parameter_189 + + # pd_op.batch_norm_: (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__666, + batch_norm__667, + batch_norm__668, + batch_norm__669, + batch_norm__670, + batch_norm__671, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_115, + parameter_188, + parameter_187, + parameter_186, + parameter_185, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_115, parameter_185, parameter_186, parameter_187, parameter_188 + + # pd_op.swish: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32) + swish_84 = paddle._C_ops.swish(batch_norm__666) + del batch_norm__666 + + # pd_op.conv2d: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 192x192x3x3xf32) + conv2d_116 = paddle._C_ops.conv2d( + swish_84, parameter_184, [2, 2], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_184 + + # pd_op.batch_norm_: (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__672, + batch_norm__673, + batch_norm__674, + batch_norm__675, + batch_norm__676, + batch_norm__677, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_116, + parameter_183, + parameter_182, + parameter_181, + parameter_180, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_116, parameter_180, parameter_181, parameter_182, parameter_183 + + # pd_op.swish: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32) + swish_85 = paddle._C_ops.swish(batch_norm__672) + del batch_norm__672 + + # builtin.combine: ([2x192x-1x-1xf32, 2x384x-1x-1xf32]) <- (2x192x-1x-1xf32, 2x384x-1x-1xf32) + combine_10 = [swish_85, swish_74] + del swish_74, swish_85 + + # pd_op.concat: (2x576x-1x-1xf32) <- ([2x192x-1x-1xf32, 2x384x-1x-1xf32], 1xi32) + concat_12 = paddle._C_ops.concat(combine_10, full_0) + del combine_10 + + # pd_op.conv2d: (2x192x-1x-1xf32) <- (2x576x-1x-1xf32, 192x576x1x1xf32) + conv2d_117 = paddle._C_ops.conv2d( + concat_12, parameter_179, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_179 + + # pd_op.batch_norm_: (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__678, + batch_norm__679, + batch_norm__680, + batch_norm__681, + batch_norm__682, + batch_norm__683, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_117, + parameter_178, + parameter_177, + parameter_176, + parameter_175, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_117, parameter_175, parameter_176, parameter_177, parameter_178 + + # pd_op.swish: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32) + swish_86 = paddle._C_ops.swish(batch_norm__678) + del batch_norm__678 + + # pd_op.conv2d: (2x192x-1x-1xf32) <- (2x576x-1x-1xf32, 192x576x1x1xf32) + conv2d_118 = paddle._C_ops.conv2d( + concat_12, parameter_174, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del concat_12, parameter_174 + + # pd_op.batch_norm_: (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__684, + batch_norm__685, + batch_norm__686, + batch_norm__687, + batch_norm__688, + batch_norm__689, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_118, + parameter_173, + parameter_172, + parameter_171, + parameter_170, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_118, parameter_170, parameter_171, parameter_172, parameter_173 + + # pd_op.swish: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32) + swish_87 = paddle._C_ops.swish(batch_norm__684) + del batch_norm__684 + + # pd_op.conv2d: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 192x192x3x3xf32) + conv2d_119 = paddle._C_ops.conv2d( + swish_87, parameter_169, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_169, swish_87 + + # pd_op.batch_norm_: (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__690, + batch_norm__691, + batch_norm__692, + batch_norm__693, + batch_norm__694, + batch_norm__695, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_119, + parameter_168, + parameter_167, + parameter_166, + parameter_165, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_119, parameter_165, parameter_166, parameter_167, parameter_168 + + # pd_op.swish: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32) + swish_88 = paddle._C_ops.swish(batch_norm__690) + del batch_norm__690 + + # pd_op.conv2d: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 192x192x3x3xf32) + conv2d_120 = paddle._C_ops.conv2d( + swish_88, parameter_164, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_164 + + # pd_op.batch_norm_: (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__696, + batch_norm__697, + batch_norm__698, + batch_norm__699, + batch_norm__700, + batch_norm__701, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_120, + parameter_163, + parameter_162, + parameter_161, + parameter_160, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_120, parameter_160, parameter_161, parameter_162, parameter_163 + + # pd_op.conv2d: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 192x192x1x1xf32) + conv2d_121 = paddle._C_ops.conv2d( + swish_88, parameter_159, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_159, swish_88 + + # pd_op.batch_norm_: (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__702, + batch_norm__703, + batch_norm__704, + batch_norm__705, + batch_norm__706, + batch_norm__707, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_121, + parameter_158, + parameter_157, + parameter_156, + parameter_155, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_121, parameter_155, parameter_156, parameter_157, parameter_158 + + # pd_op.add: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 2x192x-1x-1xf32) + add_49 = paddle._C_ops.add(batch_norm__696, batch_norm__702) + del batch_norm__696, batch_norm__702 + + # pd_op.swish: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32) + swish_89 = paddle._C_ops.swish(add_49) + del add_49 + + # pd_op.conv2d: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 192x192x3x3xf32) + conv2d_122 = paddle._C_ops.conv2d( + swish_89, parameter_154, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_154, swish_89 + + # pd_op.batch_norm_: (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__708, + batch_norm__709, + batch_norm__710, + batch_norm__711, + batch_norm__712, + batch_norm__713, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_122, + parameter_153, + parameter_152, + parameter_151, + parameter_150, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_122, parameter_150, parameter_151, parameter_152, parameter_153 + + # pd_op.swish: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32) + swish_90 = paddle._C_ops.swish(batch_norm__708) + del batch_norm__708 + + # pd_op.conv2d: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 192x192x3x3xf32) + conv2d_123 = paddle._C_ops.conv2d( + swish_90, parameter_149, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_149 + + # pd_op.batch_norm_: (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__714, + batch_norm__715, + batch_norm__716, + batch_norm__717, + batch_norm__718, + batch_norm__719, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_123, + parameter_148, + parameter_147, + parameter_146, + parameter_145, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_123, parameter_145, parameter_146, parameter_147, parameter_148 + + # pd_op.conv2d: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 192x192x1x1xf32) + conv2d_124 = paddle._C_ops.conv2d( + swish_90, parameter_144, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_144, swish_90 + + # pd_op.batch_norm_: (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__720, + batch_norm__721, + batch_norm__722, + batch_norm__723, + batch_norm__724, + batch_norm__725, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_124, + parameter_143, + parameter_142, + parameter_141, + parameter_140, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_124, parameter_140, parameter_141, parameter_142, parameter_143 + + # pd_op.add: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 2x192x-1x-1xf32) + add_50 = paddle._C_ops.add(batch_norm__714, batch_norm__720) + del batch_norm__714, batch_norm__720 + + # pd_op.swish: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32) + swish_91 = paddle._C_ops.swish(add_50) + del add_50 + + # pd_op.conv2d: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 192x192x3x3xf32) + conv2d_125 = paddle._C_ops.conv2d( + swish_91, parameter_139, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_139, swish_91 + + # pd_op.batch_norm_: (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__726, + batch_norm__727, + batch_norm__728, + batch_norm__729, + batch_norm__730, + batch_norm__731, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_125, + parameter_138, + parameter_137, + parameter_136, + parameter_135, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_125, parameter_135, parameter_136, parameter_137, parameter_138 + + # pd_op.swish: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32) + swish_92 = paddle._C_ops.swish(batch_norm__726) + del batch_norm__726 + + # pd_op.conv2d: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 192x192x3x3xf32) + conv2d_126 = paddle._C_ops.conv2d( + swish_92, parameter_134, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_134 + + # pd_op.batch_norm_: (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__732, + batch_norm__733, + batch_norm__734, + batch_norm__735, + batch_norm__736, + batch_norm__737, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_126, + parameter_133, + parameter_132, + parameter_131, + parameter_130, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_126, parameter_130, parameter_131, parameter_132, parameter_133 + + # pd_op.conv2d: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 192x192x1x1xf32) + conv2d_127 = paddle._C_ops.conv2d( + swish_92, parameter_129, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_129, swish_92 + + # pd_op.batch_norm_: (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__738, + batch_norm__739, + batch_norm__740, + batch_norm__741, + batch_norm__742, + batch_norm__743, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_127, + parameter_128, + parameter_127, + parameter_126, + parameter_125, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_127, parameter_125, parameter_126, parameter_127, parameter_128 + + # pd_op.add: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 2x192x-1x-1xf32) + add_51 = paddle._C_ops.add(batch_norm__732, batch_norm__738) + del batch_norm__732, batch_norm__738 + + # pd_op.swish: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32) + swish_93 = paddle._C_ops.swish(add_51) + del add_51 + + # builtin.combine: ([2x192x-1x-1xf32, 2x192x-1x-1xf32]) <- (2x192x-1x-1xf32, 2x192x-1x-1xf32) + combine_11 = [swish_86, swish_93] + del swish_86, swish_93 + + # pd_op.concat: (2x384x-1x-1xf32) <- ([2x192x-1x-1xf32, 2x192x-1x-1xf32], 1xi32) + concat_13 = paddle._C_ops.concat(combine_11, full_0) + del combine_11 + + # pd_op.conv2d: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32, 384x384x1x1xf32) + conv2d_128 = paddle._C_ops.conv2d( + concat_13, parameter_124, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del concat_13, parameter_124 + + # pd_op.batch_norm_: (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__744, + batch_norm__745, + batch_norm__746, + batch_norm__747, + batch_norm__748, + batch_norm__749, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_128, + parameter_123, + parameter_122, + parameter_121, + parameter_120, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_128, parameter_120, parameter_121, parameter_122, parameter_123 + + # pd_op.swish: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32) + swish_94 = paddle._C_ops.swish(batch_norm__744) + del batch_norm__744 + + # pd_op.conv2d: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32, 384x384x3x3xf32) + conv2d_129 = paddle._C_ops.conv2d( + swish_94, parameter_119, [2, 2], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_119 + + # pd_op.batch_norm_: (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__750, + batch_norm__751, + batch_norm__752, + batch_norm__753, + batch_norm__754, + batch_norm__755, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_129, + parameter_118, + parameter_117, + parameter_116, + parameter_115, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_129, parameter_115, parameter_116, parameter_117, parameter_118 + + # pd_op.swish: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32) + swish_95 = paddle._C_ops.swish(batch_norm__750) + del batch_norm__750 + + # builtin.combine: ([2x384x-1x-1xf32, 2x768x-1x-1xf32]) <- (2x384x-1x-1xf32, 2x768x-1x-1xf32) + combine_12 = [swish_95, swish_64] + del swish_64, swish_95 + + # pd_op.concat: (2x1152x-1x-1xf32) <- ([2x384x-1x-1xf32, 2x768x-1x-1xf32], 1xi32) + concat_14 = paddle._C_ops.concat(combine_12, full_0) + del combine_12 + + # pd_op.conv2d: (2x384x-1x-1xf32) <- (2x1152x-1x-1xf32, 384x1152x1x1xf32) + conv2d_130 = paddle._C_ops.conv2d( + concat_14, parameter_114, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_114 + + # pd_op.batch_norm_: (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__756, + batch_norm__757, + batch_norm__758, + batch_norm__759, + batch_norm__760, + batch_norm__761, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_130, + parameter_113, + parameter_112, + parameter_111, + parameter_110, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_130, parameter_110, parameter_111, parameter_112, parameter_113 + + # pd_op.swish: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32) + swish_96 = paddle._C_ops.swish(batch_norm__756) + del batch_norm__756 + + # pd_op.conv2d: (2x384x-1x-1xf32) <- (2x1152x-1x-1xf32, 384x1152x1x1xf32) + conv2d_131 = paddle._C_ops.conv2d( + concat_14, parameter_109, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del concat_14, parameter_109 + + # pd_op.batch_norm_: (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__762, + batch_norm__763, + batch_norm__764, + batch_norm__765, + batch_norm__766, + batch_norm__767, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_131, + parameter_108, + parameter_107, + parameter_106, + parameter_105, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_131, parameter_105, parameter_106, parameter_107, parameter_108 + + # pd_op.swish: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32) + swish_97 = paddle._C_ops.swish(batch_norm__762) + del batch_norm__762 + + # pd_op.conv2d: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32, 384x384x3x3xf32) + conv2d_132 = paddle._C_ops.conv2d( + swish_97, parameter_104, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_104, swish_97 + + # pd_op.batch_norm_: (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__768, + batch_norm__769, + batch_norm__770, + batch_norm__771, + batch_norm__772, + batch_norm__773, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_132, + parameter_103, + parameter_102, + parameter_101, + parameter_100, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_132, parameter_100, parameter_101, parameter_102, parameter_103 + + # pd_op.swish: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32) + swish_98 = paddle._C_ops.swish(batch_norm__768) + del batch_norm__768 + + # pd_op.conv2d: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32, 384x384x3x3xf32) + conv2d_133 = paddle._C_ops.conv2d( + swish_98, parameter_99, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_99 + + # pd_op.batch_norm_: (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__774, + batch_norm__775, + batch_norm__776, + batch_norm__777, + batch_norm__778, + batch_norm__779, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_133, + parameter_98, + parameter_97, + parameter_96, + parameter_95, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_133, parameter_95, parameter_96, parameter_97, parameter_98 + + # pd_op.conv2d: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32, 384x384x1x1xf32) + conv2d_134 = paddle._C_ops.conv2d( + swish_98, parameter_94, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_94, swish_98 + + # pd_op.batch_norm_: (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__780, + batch_norm__781, + batch_norm__782, + batch_norm__783, + batch_norm__784, + batch_norm__785, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_134, + parameter_93, + parameter_92, + parameter_91, + parameter_90, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_134, parameter_90, parameter_91, parameter_92, parameter_93 + + # pd_op.add: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32, 2x384x-1x-1xf32) + add_52 = paddle._C_ops.add(batch_norm__774, batch_norm__780) + del batch_norm__774, batch_norm__780 + + # pd_op.swish: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32) + swish_99 = paddle._C_ops.swish(add_52) + del add_52 + + # pd_op.conv2d: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32, 384x384x3x3xf32) + conv2d_135 = paddle._C_ops.conv2d( + swish_99, parameter_89, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_89, swish_99 + + # pd_op.batch_norm_: (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__786, + batch_norm__787, + batch_norm__788, + batch_norm__789, + batch_norm__790, + batch_norm__791, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_135, + parameter_88, + parameter_87, + parameter_86, + parameter_85, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_135, parameter_85, parameter_86, parameter_87, parameter_88 + + # pd_op.swish: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32) + swish_100 = paddle._C_ops.swish(batch_norm__786) + del batch_norm__786 + + # pd_op.conv2d: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32, 384x384x3x3xf32) + conv2d_136 = paddle._C_ops.conv2d( + swish_100, parameter_84, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_84 + + # pd_op.batch_norm_: (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__792, + batch_norm__793, + batch_norm__794, + batch_norm__795, + batch_norm__796, + batch_norm__797, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_136, + parameter_83, + parameter_82, + parameter_81, + parameter_80, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_136, parameter_80, parameter_81, parameter_82, parameter_83 + + # pd_op.conv2d: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32, 384x384x1x1xf32) + conv2d_137 = paddle._C_ops.conv2d( + swish_100, parameter_79, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_79, swish_100 + + # pd_op.batch_norm_: (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__798, + batch_norm__799, + batch_norm__800, + batch_norm__801, + batch_norm__802, + batch_norm__803, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_137, + parameter_78, + parameter_77, + parameter_76, + parameter_75, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_137, parameter_75, parameter_76, parameter_77, parameter_78 + + # pd_op.add: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32, 2x384x-1x-1xf32) + add_53 = paddle._C_ops.add(batch_norm__792, batch_norm__798) + del batch_norm__792, batch_norm__798 + + # pd_op.swish: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32) + swish_101 = paddle._C_ops.swish(add_53) + del add_53 + + # pd_op.conv2d: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32, 384x384x3x3xf32) + conv2d_138 = paddle._C_ops.conv2d( + swish_101, parameter_74, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_74, swish_101 + + # pd_op.batch_norm_: (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__804, + batch_norm__805, + batch_norm__806, + batch_norm__807, + batch_norm__808, + batch_norm__809, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_138, + parameter_73, + parameter_72, + parameter_71, + parameter_70, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_138, parameter_70, parameter_71, parameter_72, parameter_73 + + # pd_op.swish: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32) + swish_102 = paddle._C_ops.swish(batch_norm__804) + del batch_norm__804 + + # pd_op.conv2d: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32, 384x384x3x3xf32) + conv2d_139 = paddle._C_ops.conv2d( + swish_102, parameter_69, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_69 + + # pd_op.batch_norm_: (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__810, + batch_norm__811, + batch_norm__812, + batch_norm__813, + batch_norm__814, + batch_norm__815, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_139, + parameter_68, + parameter_67, + parameter_66, + parameter_65, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_139, parameter_65, parameter_66, parameter_67, parameter_68 + + # pd_op.conv2d: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32, 384x384x1x1xf32) + conv2d_140 = paddle._C_ops.conv2d( + swish_102, parameter_64, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_64, swish_102 + + # pd_op.batch_norm_: (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__816, + batch_norm__817, + batch_norm__818, + batch_norm__819, + batch_norm__820, + batch_norm__821, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_140, + parameter_63, + parameter_62, + parameter_61, + parameter_60, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_140, parameter_60, parameter_61, parameter_62, parameter_63 + + # pd_op.add: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32, 2x384x-1x-1xf32) + add_54 = paddle._C_ops.add(batch_norm__810, batch_norm__816) + del batch_norm__810, batch_norm__816 + + # pd_op.swish: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32) + swish_103 = paddle._C_ops.swish(add_54) + del add_54 + + # builtin.combine: ([2x384x-1x-1xf32, 2x384x-1x-1xf32]) <- (2x384x-1x-1xf32, 2x384x-1x-1xf32) + combine_13 = [swish_96, swish_103] + del swish_103, swish_96 + + # pd_op.concat: (2x768x-1x-1xf32) <- ([2x384x-1x-1xf32, 2x384x-1x-1xf32], 1xi32) + concat_15 = paddle._C_ops.concat(combine_13, full_0) + del combine_13 + + # pd_op.conv2d: (2x768x-1x-1xf32) <- (2x768x-1x-1xf32, 768x768x1x1xf32) + conv2d_141 = paddle._C_ops.conv2d( + concat_15, parameter_59, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del concat_15, parameter_59 + + # pd_op.batch_norm_: (2x768x-1x-1xf32, 768xf32, 768xf32, 768xf32, 768xf32, -1xui8) <- (2x768x-1x-1xf32, 768xf32, 768xf32, 768xf32, 768xf32) + ( + batch_norm__822, + batch_norm__823, + batch_norm__824, + batch_norm__825, + batch_norm__826, + batch_norm__827, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_141, + parameter_58, + parameter_57, + parameter_56, + parameter_55, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_141, parameter_55, parameter_56, parameter_57, parameter_58 + + # pd_op.swish: (2x768x-1x-1xf32) <- (2x768x-1x-1xf32) + swish_104 = paddle._C_ops.swish(batch_norm__822) + del batch_norm__822 + + # pd_op.shape64: (4xi64) <- (2x768x-1x-1xf32) + shape64_0 = paddle._C_ops.shape64(swish_104) + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_5 = [2] + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_6 = [3] + + # pd_op.slice: (xi64) <- (4xi64, 1xi64, 1xi64) + slice_0 = paddle._C_ops.slice( + shape64_0, [0], full_int_array_5, full_int_array_6, [1], [0] + ) + del shape64_0 + + # pd_op.shape64: (4xi64) <- (2x768x-1x-1xf32) + shape64_1 = paddle._C_ops.shape64(swish_104) + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_7 = [4] + + # pd_op.slice: (xi64) <- (4xi64, 1xi64, 1xi64) + slice_1 = paddle._C_ops.slice( + shape64_1, [0], full_int_array_6, full_int_array_7, [1], [0] + ) + del shape64_1 + + # pd_op.multiply: (xi64) <- (xi64, xi64) + multiply_4 = paddle._C_ops.multiply(slice_0, slice_1) + del slice_0, slice_1 + + # pd_op.full_int_array: (2xi64) <- () + full_int_array_8 = [1, 1] + + # pd_op.pool2d: (2x768x1x1xf32) <- (2x768x-1x-1xf32, 2xi64) + pool2d_3 = paddle._C_ops.pool2d( + swish_104, + full_int_array_8, + [1, 1], + [0, 0], + False, + True, + "NCHW", + "avg", + False, + True, + "EXPLICIT", + ) + + # pd_op.conv2d: (2x768x1x1xf32) <- (2x768x1x1xf32, 768x768x1x1xf32) + conv2d_142 = paddle._C_ops.conv2d( + pool2d_3, parameter_54, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_54 + + # pd_op.reshape: (1x768x1x1xf32) <- (768xf32, 4xi64) + reshape_4 = paddle._C_ops.reshape(parameter_53, full_int_array_1) + del parameter_53 + + # pd_op.add: (2x768x1x1xf32) <- (2x768x1x1xf32, 1x768x1x1xf32) + add_55 = paddle._C_ops.add(conv2d_142, reshape_4) + del conv2d_142, reshape_4 + + # pd_op.sigmoid: (2x768x1x1xf32) <- (2x768x1x1xf32) + sigmoid_0 = paddle._C_ops.sigmoid(add_55) + del add_55 + + # pd_op.multiply: (2x768x-1x-1xf32) <- (2x768x-1x-1xf32, 2x768x1x1xf32) + multiply_5 = paddle._C_ops.multiply(swish_104, sigmoid_0) + del sigmoid_0 + + # pd_op.conv2d: (2x768x-1x-1xf32) <- (2x768x-1x-1xf32, 768x768x1x1xf32) + conv2d_143 = paddle._C_ops.conv2d( + multiply_5, parameter_52, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del multiply_5, parameter_52 + + # pd_op.batch_norm_: (2x768x-1x-1xf32, 768xf32, 768xf32, 768xf32, 768xf32, -1xui8) <- (2x768x-1x-1xf32, 768xf32, 768xf32, 768xf32, 768xf32) + ( + batch_norm__828, + batch_norm__829, + batch_norm__830, + batch_norm__831, + batch_norm__832, + batch_norm__833, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_143, + parameter_51, + parameter_50, + parameter_49, + parameter_48, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_143, parameter_48, parameter_49, parameter_50, parameter_51 + + # pd_op.swish: (2x768x-1x-1xf32) <- (2x768x-1x-1xf32) + swish_105 = paddle._C_ops.swish(batch_norm__828) + del batch_norm__828 + + # pd_op.add: (2x768x-1x-1xf32) <- (2x768x-1x-1xf32, 2x768x-1x-1xf32) + add_56 = paddle._C_ops.add(swish_105, swish_104) + del swish_105 + + # pd_op.conv2d: (2x1x-1x-1xf32) <- (2x768x-1x-1xf32, 1x768x3x3xf32) + conv2d_144 = paddle._C_ops.conv2d( + add_56, parameter_47, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del add_56, parameter_47 + + # pd_op.reshape: (1x1x1x1xf32) <- (1xf32, 4xi64) + reshape_5 = paddle._C_ops.reshape(parameter_46, full_int_array_1) + del parameter_46 + + # pd_op.add: (2x1x-1x-1xf32) <- (2x1x-1x-1xf32, 1x1x1x1xf32) + add_57 = paddle._C_ops.add(conv2d_144, reshape_5) + del conv2d_144, reshape_5 + + # pd_op.conv2d: (2x768x1x1xf32) <- (2x768x1x1xf32, 768x768x1x1xf32) + conv2d_145 = paddle._C_ops.conv2d( + pool2d_3, parameter_45, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_45, pool2d_3 + + # pd_op.reshape: (1x768x1x1xf32) <- (768xf32, 4xi64) + reshape_6 = paddle._C_ops.reshape(parameter_44, full_int_array_1) + del parameter_44 + + # pd_op.add: (2x768x1x1xf32) <- (2x768x1x1xf32, 1x768x1x1xf32) + add_58 = paddle._C_ops.add(conv2d_145, reshape_6) + del conv2d_145, reshape_6 + + # pd_op.sigmoid: (2x768x1x1xf32) <- (2x768x1x1xf32) + sigmoid_1 = paddle._C_ops.sigmoid(add_58) + del add_58 + + # pd_op.multiply: (2x768x-1x-1xf32) <- (2x768x-1x-1xf32, 2x768x1x1xf32) + multiply_6 = paddle._C_ops.multiply(swish_104, sigmoid_1) + del sigmoid_1, swish_104 + + # pd_op.conv2d: (2x768x-1x-1xf32) <- (2x768x-1x-1xf32, 768x768x1x1xf32) + conv2d_146 = paddle._C_ops.conv2d( + multiply_6, parameter_43, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del multiply_6, parameter_43 + + # pd_op.batch_norm_: (2x768x-1x-1xf32, 768xf32, 768xf32, 768xf32, 768xf32, -1xui8) <- (2x768x-1x-1xf32, 768xf32, 768xf32, 768xf32, 768xf32) + ( + batch_norm__834, + batch_norm__835, + batch_norm__836, + batch_norm__837, + batch_norm__838, + batch_norm__839, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_146, + parameter_42, + parameter_41, + parameter_40, + parameter_39, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_146, parameter_39, parameter_40, parameter_41, parameter_42 + + # pd_op.swish: (2x768x-1x-1xf32) <- (2x768x-1x-1xf32) + swish_106 = paddle._C_ops.swish(batch_norm__834) + del batch_norm__834 + + # pd_op.conv2d: (2x68x-1x-1xf32) <- (2x768x-1x-1xf32, 68x768x3x3xf32) + conv2d_147 = paddle._C_ops.conv2d( + swish_106, parameter_38, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_38, swish_106 + + # pd_op.reshape: (1x68x1x1xf32) <- (68xf32, 4xi64) + reshape_7 = paddle._C_ops.reshape(parameter_37, full_int_array_1) + del parameter_37 + + # pd_op.add: (2x68x-1x-1xf32) <- (2x68x-1x-1xf32, 1x68x1x1xf32) + add_59 = paddle._C_ops.add(conv2d_147, reshape_7) + del conv2d_147, reshape_7 + + # pd_op.full: (xi64) <- () + full_1 = paddle._C_ops.full( + [], float("-1"), paddle.int64, paddle.core.CPUPlace() + ) + + # pd_op.full: (xi64) <- () + full_2 = paddle._C_ops.full( + [], float("4"), paddle.int64, paddle.core.CPUPlace() + ) + + # pd_op.full: (xi64) <- () + full_3 = paddle._C_ops.full( + [], float("17"), paddle.int64, paddle.core.CPUPlace() + ) + + # builtin.combine: ([xi64, xi64, xi64, xi64]) <- (xi64, xi64, xi64, xi64) + combine_14 = [full_1, full_2, full_3, multiply_4] + + # pd_op.stack: (4xi64) <- ([xi64, xi64, xi64, xi64]) + stack_0 = paddle._C_ops.stack(combine_14, 0) + del combine_14 + + # pd_op.reshape: (-1x4x17x-1xf32) <- (2x68x-1x-1xf32, 4xi64) + reshape_8 = paddle._C_ops.reshape(add_59, stack_0) + del add_59, stack_0 + + # pd_op.transpose: (-1x17x-1x4xf32) <- (-1x4x17x-1xf32) + transpose_0 = paddle._C_ops.transpose(reshape_8, [0, 2, 3, 1]) + del reshape_8 + + # pd_op.softmax: (-1x17x-1x4xf32) <- (-1x17x-1x4xf32) + softmax_0 = paddle._C_ops.softmax(transpose_0, 1) + del transpose_0 + + # pd_op.conv2d: (-1x1x-1x4xf32) <- (-1x17x-1x4xf32, 1x17x1x1xf32) + conv2d_148 = paddle._C_ops.conv2d( + softmax_0, parameter_36, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del softmax_0 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_9 = [1] + + # pd_op.squeeze: (-1x-1x4xf32) <- (-1x1x-1x4xf32, 1xi64) + squeeze_0 = paddle._C_ops.squeeze(conv2d_148, full_int_array_9) + del conv2d_148 + + # pd_op.sigmoid: (2x1x-1x-1xf32) <- (2x1x-1x-1xf32) + sigmoid_2 = paddle._C_ops.sigmoid(add_57) + del add_57 + + # pd_op.full: (xi64) <- () + full_4 = paddle._C_ops.full( + [], float("1"), paddle.int64, paddle.core.CPUPlace() + ) + + # builtin.combine: ([xi64, xi64, xi64]) <- (xi64, xi64, xi64) + combine_15 = [full_1, full_4, multiply_4] + del multiply_4 + + # pd_op.stack: (3xi64) <- ([xi64, xi64, xi64]) + stack_1 = paddle._C_ops.stack(combine_15, 0) + del combine_15 + + # pd_op.reshape: (-1x1x-1xf32) <- (2x1x-1x-1xf32, 3xi64) + reshape_9 = paddle._C_ops.reshape(sigmoid_2, stack_1) + del sigmoid_2, stack_1 + + # pd_op.shape64: (4xi64) <- (2x384x-1x-1xf32) + shape64_2 = paddle._C_ops.shape64(swish_94) + + # pd_op.slice: (xi64) <- (4xi64, 1xi64, 1xi64) + slice_2 = paddle._C_ops.slice( + shape64_2, [0], full_int_array_5, full_int_array_6, [1], [0] + ) + del shape64_2 + + # pd_op.shape64: (4xi64) <- (2x384x-1x-1xf32) + shape64_3 = paddle._C_ops.shape64(swish_94) + + # pd_op.slice: (xi64) <- (4xi64, 1xi64, 1xi64) + slice_3 = paddle._C_ops.slice( + shape64_3, [0], full_int_array_6, full_int_array_7, [1], [0] + ) + del shape64_3 + + # pd_op.multiply: (xi64) <- (xi64, xi64) + multiply_7 = paddle._C_ops.multiply(slice_2, slice_3) + del slice_2, slice_3 + + # pd_op.pool2d: (2x384x1x1xf32) <- (2x384x-1x-1xf32, 2xi64) + pool2d_4 = paddle._C_ops.pool2d( + swish_94, + full_int_array_8, + [1, 1], + [0, 0], + False, + True, + "NCHW", + "avg", + False, + True, + "EXPLICIT", + ) + + # pd_op.conv2d: (2x384x1x1xf32) <- (2x384x1x1xf32, 384x384x1x1xf32) + conv2d_149 = paddle._C_ops.conv2d( + pool2d_4, parameter_35, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_35 + + # pd_op.reshape: (1x384x1x1xf32) <- (384xf32, 4xi64) + reshape_10 = paddle._C_ops.reshape(parameter_34, full_int_array_1) + del parameter_34 + + # pd_op.add: (2x384x1x1xf32) <- (2x384x1x1xf32, 1x384x1x1xf32) + add_60 = paddle._C_ops.add(conv2d_149, reshape_10) + del conv2d_149, reshape_10 + + # pd_op.sigmoid: (2x384x1x1xf32) <- (2x384x1x1xf32) + sigmoid_3 = paddle._C_ops.sigmoid(add_60) + del add_60 + + # pd_op.multiply: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32, 2x384x1x1xf32) + multiply_8 = paddle._C_ops.multiply(swish_94, sigmoid_3) + del sigmoid_3 + + # pd_op.conv2d: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32, 384x384x1x1xf32) + conv2d_150 = paddle._C_ops.conv2d( + multiply_8, parameter_33, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del multiply_8, parameter_33 + + # pd_op.batch_norm_: (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__840, + batch_norm__841, + batch_norm__842, + batch_norm__843, + batch_norm__844, + batch_norm__845, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_150, + parameter_32, + parameter_31, + parameter_30, + parameter_29, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_150, parameter_29, parameter_30, parameter_31, parameter_32 + + # pd_op.swish: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32) + swish_107 = paddle._C_ops.swish(batch_norm__840) + del batch_norm__840 + + # pd_op.add: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32, 2x384x-1x-1xf32) + add_61 = paddle._C_ops.add(swish_107, swish_94) + del swish_107 + + # pd_op.conv2d: (2x1x-1x-1xf32) <- (2x384x-1x-1xf32, 1x384x3x3xf32) + conv2d_151 = paddle._C_ops.conv2d( + add_61, parameter_28, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del add_61, parameter_28 + + # pd_op.reshape: (1x1x1x1xf32) <- (1xf32, 4xi64) + reshape_11 = paddle._C_ops.reshape(parameter_27, full_int_array_1) + del parameter_27 + + # pd_op.add: (2x1x-1x-1xf32) <- (2x1x-1x-1xf32, 1x1x1x1xf32) + add_62 = paddle._C_ops.add(conv2d_151, reshape_11) + del conv2d_151, reshape_11 + + # pd_op.conv2d: (2x384x1x1xf32) <- (2x384x1x1xf32, 384x384x1x1xf32) + conv2d_152 = paddle._C_ops.conv2d( + pool2d_4, parameter_26, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_26, pool2d_4 + + # pd_op.reshape: (1x384x1x1xf32) <- (384xf32, 4xi64) + reshape_12 = paddle._C_ops.reshape(parameter_25, full_int_array_1) + del parameter_25 + + # pd_op.add: (2x384x1x1xf32) <- (2x384x1x1xf32, 1x384x1x1xf32) + add_63 = paddle._C_ops.add(conv2d_152, reshape_12) + del conv2d_152, reshape_12 + + # pd_op.sigmoid: (2x384x1x1xf32) <- (2x384x1x1xf32) + sigmoid_4 = paddle._C_ops.sigmoid(add_63) + del add_63 + + # pd_op.multiply: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32, 2x384x1x1xf32) + multiply_9 = paddle._C_ops.multiply(swish_94, sigmoid_4) + del sigmoid_4, swish_94 + + # pd_op.conv2d: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32, 384x384x1x1xf32) + conv2d_153 = paddle._C_ops.conv2d( + multiply_9, parameter_24, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del multiply_9, parameter_24 + + # pd_op.batch_norm_: (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__846, + batch_norm__847, + batch_norm__848, + batch_norm__849, + batch_norm__850, + batch_norm__851, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_153, + parameter_23, + parameter_22, + parameter_21, + parameter_20, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_153, parameter_20, parameter_21, parameter_22, parameter_23 + + # pd_op.swish: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32) + swish_108 = paddle._C_ops.swish(batch_norm__846) + del batch_norm__846 + + # pd_op.conv2d: (2x68x-1x-1xf32) <- (2x384x-1x-1xf32, 68x384x3x3xf32) + conv2d_154 = paddle._C_ops.conv2d( + swish_108, parameter_19, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_19, swish_108 + + # pd_op.reshape: (1x68x1x1xf32) <- (68xf32, 4xi64) + reshape_13 = paddle._C_ops.reshape(parameter_18, full_int_array_1) + del parameter_18 + + # pd_op.add: (2x68x-1x-1xf32) <- (2x68x-1x-1xf32, 1x68x1x1xf32) + add_64 = paddle._C_ops.add(conv2d_154, reshape_13) + del conv2d_154, reshape_13 + + # builtin.combine: ([xi64, xi64, xi64, xi64]) <- (xi64, xi64, xi64, xi64) + combine_16 = [full_1, full_2, full_3, multiply_7] + + # pd_op.stack: (4xi64) <- ([xi64, xi64, xi64, xi64]) + stack_2 = paddle._C_ops.stack(combine_16, 0) + del combine_16 + + # pd_op.reshape: (-1x4x17x-1xf32) <- (2x68x-1x-1xf32, 4xi64) + reshape_14 = paddle._C_ops.reshape(add_64, stack_2) + del add_64, stack_2 + + # pd_op.transpose: (-1x17x-1x4xf32) <- (-1x4x17x-1xf32) + transpose_1 = paddle._C_ops.transpose(reshape_14, [0, 2, 3, 1]) + del reshape_14 + + # pd_op.softmax: (-1x17x-1x4xf32) <- (-1x17x-1x4xf32) + softmax_1 = paddle._C_ops.softmax(transpose_1, 1) + del transpose_1 + + # pd_op.conv2d: (-1x1x-1x4xf32) <- (-1x17x-1x4xf32, 1x17x1x1xf32) + conv2d_155 = paddle._C_ops.conv2d( + softmax_1, parameter_36, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del softmax_1 + + # pd_op.squeeze: (-1x-1x4xf32) <- (-1x1x-1x4xf32, 1xi64) + squeeze_1 = paddle._C_ops.squeeze(conv2d_155, full_int_array_9) + del conv2d_155 + + # pd_op.sigmoid: (2x1x-1x-1xf32) <- (2x1x-1x-1xf32) + sigmoid_5 = paddle._C_ops.sigmoid(add_62) + del add_62 + + # builtin.combine: ([xi64, xi64, xi64]) <- (xi64, xi64, xi64) + combine_17 = [full_1, full_4, multiply_7] + del multiply_7 + + # pd_op.stack: (3xi64) <- ([xi64, xi64, xi64]) + stack_3 = paddle._C_ops.stack(combine_17, 0) + del combine_17 + + # pd_op.reshape: (-1x1x-1xf32) <- (2x1x-1x-1xf32, 3xi64) + reshape_15 = paddle._C_ops.reshape(sigmoid_5, stack_3) + del sigmoid_5, stack_3 + + # pd_op.shape64: (4xi64) <- (2x192x-1x-1xf32) + shape64_4 = paddle._C_ops.shape64(swish_84) + + # pd_op.slice: (xi64) <- (4xi64, 1xi64, 1xi64) + slice_4 = paddle._C_ops.slice( + shape64_4, [0], full_int_array_5, full_int_array_6, [1], [0] + ) + del full_int_array_5, shape64_4 + + # pd_op.shape64: (4xi64) <- (2x192x-1x-1xf32) + shape64_5 = paddle._C_ops.shape64(swish_84) + + # pd_op.slice: (xi64) <- (4xi64, 1xi64, 1xi64) + slice_5 = paddle._C_ops.slice( + shape64_5, [0], full_int_array_6, full_int_array_7, [1], [0] + ) + del full_int_array_6, full_int_array_7, shape64_5 + + # pd_op.multiply: (xi64) <- (xi64, xi64) + multiply_10 = paddle._C_ops.multiply(slice_4, slice_5) + del slice_4, slice_5 + + # pd_op.pool2d: (2x192x1x1xf32) <- (2x192x-1x-1xf32, 2xi64) + pool2d_5 = paddle._C_ops.pool2d( + swish_84, + full_int_array_8, + [1, 1], + [0, 0], + False, + True, + "NCHW", + "avg", + False, + True, + "EXPLICIT", + ) + del full_int_array_8 + + # pd_op.conv2d: (2x192x1x1xf32) <- (2x192x1x1xf32, 192x192x1x1xf32) + conv2d_156 = paddle._C_ops.conv2d( + pool2d_5, parameter_17, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_17 + + # pd_op.reshape: (1x192x1x1xf32) <- (192xf32, 4xi64) + reshape_16 = paddle._C_ops.reshape(parameter_16, full_int_array_1) + del parameter_16 + + # pd_op.add: (2x192x1x1xf32) <- (2x192x1x1xf32, 1x192x1x1xf32) + add_65 = paddle._C_ops.add(conv2d_156, reshape_16) + del conv2d_156, reshape_16 + + # pd_op.sigmoid: (2x192x1x1xf32) <- (2x192x1x1xf32) + sigmoid_6 = paddle._C_ops.sigmoid(add_65) + del add_65 + + # pd_op.multiply: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 2x192x1x1xf32) + multiply_11 = paddle._C_ops.multiply(swish_84, sigmoid_6) + del sigmoid_6 + + # pd_op.conv2d: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 192x192x1x1xf32) + conv2d_157 = paddle._C_ops.conv2d( + multiply_11, parameter_15, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del multiply_11, parameter_15 + + # pd_op.batch_norm_: (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__852, + batch_norm__853, + batch_norm__854, + batch_norm__855, + batch_norm__856, + batch_norm__857, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_157, + parameter_14, + parameter_13, + parameter_12, + parameter_11, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_157, parameter_11, parameter_12, parameter_13, parameter_14 + + # pd_op.swish: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32) + swish_109 = paddle._C_ops.swish(batch_norm__852) + del batch_norm__852 + + # pd_op.add: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 2x192x-1x-1xf32) + add_66 = paddle._C_ops.add(swish_109, swish_84) + del swish_109 + + # pd_op.conv2d: (2x1x-1x-1xf32) <- (2x192x-1x-1xf32, 1x192x3x3xf32) + conv2d_158 = paddle._C_ops.conv2d( + add_66, parameter_10, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del add_66, parameter_10 + + # pd_op.reshape: (1x1x1x1xf32) <- (1xf32, 4xi64) + reshape_17 = paddle._C_ops.reshape(parameter_9, full_int_array_1) + del parameter_9 + + # pd_op.add: (2x1x-1x-1xf32) <- (2x1x-1x-1xf32, 1x1x1x1xf32) + add_67 = paddle._C_ops.add(conv2d_158, reshape_17) + del conv2d_158, reshape_17 + + # pd_op.conv2d: (2x192x1x1xf32) <- (2x192x1x1xf32, 192x192x1x1xf32) + conv2d_159 = paddle._C_ops.conv2d( + pool2d_5, parameter_8, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_8, pool2d_5 + + # pd_op.reshape: (1x192x1x1xf32) <- (192xf32, 4xi64) + reshape_18 = paddle._C_ops.reshape(parameter_7, full_int_array_1) + del parameter_7 + + # pd_op.add: (2x192x1x1xf32) <- (2x192x1x1xf32, 1x192x1x1xf32) + add_68 = paddle._C_ops.add(conv2d_159, reshape_18) + del conv2d_159, reshape_18 + + # pd_op.sigmoid: (2x192x1x1xf32) <- (2x192x1x1xf32) + sigmoid_7 = paddle._C_ops.sigmoid(add_68) + del add_68 + + # pd_op.multiply: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 2x192x1x1xf32) + multiply_12 = paddle._C_ops.multiply(swish_84, sigmoid_7) + del sigmoid_7, swish_84 + + # pd_op.conv2d: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 192x192x1x1xf32) + conv2d_160 = paddle._C_ops.conv2d( + multiply_12, parameter_6, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del multiply_12, parameter_6 + + # pd_op.batch_norm_: (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__858, + batch_norm__859, + batch_norm__860, + batch_norm__861, + batch_norm__862, + batch_norm__863, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_160, + parameter_5, + parameter_4, + parameter_3, + parameter_2, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_160, parameter_2, parameter_3, parameter_4, parameter_5 + + # pd_op.swish: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32) + swish_110 = paddle._C_ops.swish(batch_norm__858) + del batch_norm__858 + + # pd_op.conv2d: (2x68x-1x-1xf32) <- (2x192x-1x-1xf32, 68x192x3x3xf32) + conv2d_161 = paddle._C_ops.conv2d( + swish_110, parameter_1, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_1, swish_110 + + # pd_op.reshape: (1x68x1x1xf32) <- (68xf32, 4xi64) + reshape_19 = paddle._C_ops.reshape(parameter_0, full_int_array_1) + del full_int_array_1, parameter_0 + + # pd_op.add: (2x68x-1x-1xf32) <- (2x68x-1x-1xf32, 1x68x1x1xf32) + add_69 = paddle._C_ops.add(conv2d_161, reshape_19) + del conv2d_161, reshape_19 + + # builtin.combine: ([xi64, xi64, xi64, xi64]) <- (xi64, xi64, xi64, xi64) + combine_18 = [full_1, full_2, full_3, multiply_10] + del full_2, full_3 + + # pd_op.stack: (4xi64) <- ([xi64, xi64, xi64, xi64]) + stack_4 = paddle._C_ops.stack(combine_18, 0) + del combine_18 + + # pd_op.reshape: (-1x4x17x-1xf32) <- (2x68x-1x-1xf32, 4xi64) + reshape_20 = paddle._C_ops.reshape(add_69, stack_4) + del add_69, stack_4 + + # pd_op.transpose: (-1x17x-1x4xf32) <- (-1x4x17x-1xf32) + transpose_2 = paddle._C_ops.transpose(reshape_20, [0, 2, 3, 1]) + del reshape_20 + + # pd_op.softmax: (-1x17x-1x4xf32) <- (-1x17x-1x4xf32) + softmax_2 = paddle._C_ops.softmax(transpose_2, 1) + del transpose_2 + + # pd_op.conv2d: (-1x1x-1x4xf32) <- (-1x17x-1x4xf32, 1x17x1x1xf32) + conv2d_162 = paddle._C_ops.conv2d( + softmax_2, parameter_36, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_36, softmax_2 + + # pd_op.squeeze: (-1x-1x4xf32) <- (-1x1x-1x4xf32, 1xi64) + squeeze_2 = paddle._C_ops.squeeze(conv2d_162, full_int_array_9) + del conv2d_162, full_int_array_9 + + # pd_op.sigmoid: (2x1x-1x-1xf32) <- (2x1x-1x-1xf32) + sigmoid_8 = paddle._C_ops.sigmoid(add_67) + del add_67 + + # builtin.combine: ([xi64, xi64, xi64]) <- (xi64, xi64, xi64) + combine_19 = [full_1, full_4, multiply_10] + del full_1, full_4, multiply_10 + + # pd_op.stack: (3xi64) <- ([xi64, xi64, xi64]) + stack_5 = paddle._C_ops.stack(combine_19, 0) + del combine_19 + + # pd_op.reshape: (-1x1x-1xf32) <- (2x1x-1x-1xf32, 3xi64) + reshape_21 = paddle._C_ops.reshape(sigmoid_8, stack_5) + del sigmoid_8, stack_5 + + # pd_op.full: (1xi32) <- () + full_5 = paddle._C_ops.full( + [1], float("-1"), paddle.int32, paddle.core.CPUPlace() + ) + + # builtin.combine: ([-1x1x-1xf32, -1x1x-1xf32, -1x1x-1xf32]) <- (-1x1x-1xf32, -1x1x-1xf32, -1x1x-1xf32) + combine_20 = [reshape_9, reshape_15, reshape_21] + del reshape_15, reshape_21, reshape_9 + + # pd_op.concat: (-1x1x-1xf32) <- ([-1x1x-1xf32, -1x1x-1xf32, -1x1x-1xf32], 1xi32) + concat_0 = paddle._C_ops.concat(combine_20, full_5) + del combine_20, full_5 + + # builtin.combine: ([-1x-1x4xf32, -1x-1x4xf32, -1x-1x4xf32]) <- (-1x-1x4xf32, -1x-1x4xf32, -1x-1x4xf32) + combine_21 = [squeeze_0, squeeze_1, squeeze_2] + del squeeze_0, squeeze_1, squeeze_2 + + # pd_op.concat: (-1x-1x4xf32) <- ([-1x-1x4xf32, -1x-1x4xf32, -1x-1x4xf32], 1xi32) + concat_1 = paddle._C_ops.concat(combine_21, full_0) + del combine_21, full_0 + + return concat_0, concat_1 diff --git a/paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_5/weight_meta.py b/paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_5/weight_meta.py new file mode 100644 index 000000000..d3b0e946d --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_5/weight_meta.py @@ -0,0 +1,8161 @@ +class Program_weight_tensor_parameter_0: + name = "parameter_0" + shape = [68] + dtype = "float32" + min_val = float("-0.0121641") + max_val = float("0.0333701") + mean = float("1.74754e-07") + std = float("0.00786353") + data = None + + +class Program_weight_tensor_parameter_1: + name = "parameter_1" + shape = [68, 192, 3, 3] + dtype = "float32" + min_val = float("-0.16862") + max_val = float("0.167033") + mean = float("7.72125e-08") + std = float("0.00779619") + data = None + + +class Program_weight_tensor_parameter_2: + name = "parameter_2" + shape = [192] + dtype = "float32" + min_val = float("-0.128251") + max_val = float("0.242263") + mean = float("0.0315745") + std = float("0.0650394") + data = None + + +class Program_weight_tensor_parameter_3: + name = "parameter_3" + shape = [192] + dtype = "float32" + min_val = float("0.750028") + max_val = float("1.60768") + mean = float("1.16244") + std = float("0.142952") + data = None + + +class Program_weight_tensor_parameter_4: + name = "parameter_4" + shape = [192] + dtype = "float32" + min_val = float("0.000273701") + max_val = float("0.00405646") + mean = float("0.00107088") + std = float("0.000695427") + data = None + + +class Program_weight_tensor_parameter_5: + name = "parameter_5" + shape = [192] + dtype = "float32" + min_val = float("-0.0367661") + max_val = float("0.0272949") + mean = float("-0.00327525") + std = float("0.0101808") + data = None + + +class Program_weight_tensor_parameter_6: + name = "parameter_6" + shape = [192, 192, 1, 1] + dtype = "float32" + min_val = float("-0.0811281") + max_val = float("0.0878963") + mean = float("-0.000294008") + std = float("0.00760061") + data = None + + +class Program_weight_tensor_parameter_7: + name = "parameter_7" + shape = [192] + dtype = "float32" + min_val = float("-0.00810131") + max_val = float("0.0145692") + mean = float("-0.000173901") + std = float("0.00426476") + data = None + + +class Program_weight_tensor_parameter_8: + name = "parameter_8" + shape = [192, 192, 1, 1] + dtype = "float32" + min_val = float("-0.0116778") + max_val = float("0.0197315") + mean = float("-0.00010309") + std = float("0.00168907") + data = None + + +class Program_weight_tensor_parameter_9: + name = "parameter_9" + shape = [1] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_10: + name = "parameter_10" + shape = [1, 192, 3, 3] + dtype = "float32" + min_val = float("-0.0675271") + max_val = float("0.02687") + mean = float("-7.15423e-05") + std = float("0.00802438") + data = None + + +class Program_weight_tensor_parameter_11: + name = "parameter_11" + shape = [192] + dtype = "float32" + min_val = float("-0.4613") + max_val = float("0.53723") + mean = float("0.114887") + std = float("0.141768") + data = None + + +class Program_weight_tensor_parameter_12: + name = "parameter_12" + shape = [192] + dtype = "float32" + min_val = float("0.859721") + max_val = float("1.50399") + mean = float("1.09037") + std = float("0.0873702") + data = None + + +class Program_weight_tensor_parameter_13: + name = "parameter_13" + shape = [192] + dtype = "float32" + min_val = float("0.000384752") + max_val = float("0.0131843") + mean = float("0.00225241") + std = float("0.00177847") + data = None + + +class Program_weight_tensor_parameter_14: + name = "parameter_14" + shape = [192] + dtype = "float32" + min_val = float("-0.156504") + max_val = float("0.0356246") + mean = float("-0.039832") + std = float("0.0378479") + data = None + + +class Program_weight_tensor_parameter_15: + name = "parameter_15" + shape = [192, 192, 1, 1] + dtype = "float32" + min_val = float("-0.0583474") + max_val = float("0.0738958") + mean = float("-0.00111519") + std = float("0.00763322") + data = None + + +class Program_weight_tensor_parameter_16: + name = "parameter_16" + shape = [192] + dtype = "float32" + min_val = float("-0.00397422") + max_val = float("0.012043") + mean = float("-0.00022146") + std = float("0.00218294") + data = None + + +class Program_weight_tensor_parameter_17: + name = "parameter_17" + shape = [192, 192, 1, 1] + dtype = "float32" + min_val = float("-0.0525848") + max_val = float("0.0694109") + mean = float("-9.56083e-05") + std = float("0.00173277") + data = None + + +class Program_weight_tensor_parameter_18: + name = "parameter_18" + shape = [68] + dtype = "float32" + min_val = float("-0.00570853") + max_val = float("0.0219015") + mean = float("1.55051e-07") + std = float("0.00540501") + data = None + + +class Program_weight_tensor_parameter_19: + name = "parameter_19" + shape = [68, 384, 3, 3] + dtype = "float32" + min_val = float("-0.0981608") + max_val = float("0.12553") + mean = float("4.95129e-08") + std = float("0.00515442") + data = None + + +class Program_weight_tensor_parameter_20: + name = "parameter_20" + shape = [384] + dtype = "float32" + min_val = float("-0.0868007") + max_val = float("0.0669862") + mean = float("0.0121588") + std = float("0.0210241") + data = None + + +class Program_weight_tensor_parameter_21: + name = "parameter_21" + shape = [384] + dtype = "float32" + min_val = float("0.901452") + max_val = float("1.21097") + mean = float("1.07157") + std = float("0.0473606") + data = None + + +class Program_weight_tensor_parameter_22: + name = "parameter_22" + shape = [384] + dtype = "float32" + min_val = float("0.000124814") + max_val = float("0.00490021") + mean = float("0.000716724") + std = float("0.000553742") + data = None + + +class Program_weight_tensor_parameter_23: + name = "parameter_23" + shape = [384] + dtype = "float32" + min_val = float("-0.0426542") + max_val = float("0.0117692") + mean = float("-0.005064") + std = float("0.00640636") + data = None + + +class Program_weight_tensor_parameter_24: + name = "parameter_24" + shape = [384, 384, 1, 1] + dtype = "float32" + min_val = float("-0.0494409") + max_val = float("0.0693013") + mean = float("-0.0001264") + std = float("0.00349468") + data = None + + +class Program_weight_tensor_parameter_25: + name = "parameter_25" + shape = [384] + dtype = "float32" + min_val = float("-0.00583917") + max_val = float("0.00759229") + mean = float("1.91238e-05") + std = float("0.00215072") + data = None + + +class Program_weight_tensor_parameter_26: + name = "parameter_26" + shape = [384, 384, 1, 1] + dtype = "float32" + min_val = float("-0.0030542") + max_val = float("0.00595636") + mean = float("-3.93566e-05") + std = float("0.000600581") + data = None + + +class Program_weight_tensor_parameter_27: + name = "parameter_27" + shape = [1] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_28: + name = "parameter_28" + shape = [1, 384, 3, 3] + dtype = "float32" + min_val = float("-0.0624534") + max_val = float("0.0258413") + mean = float("-0.000163845") + std = float("0.0055211") + data = None + + +class Program_weight_tensor_parameter_29: + name = "parameter_29" + shape = [384] + dtype = "float32" + min_val = float("-0.167064") + max_val = float("0.24168") + mean = float("0.06682") + std = float("0.0667279") + data = None + + +class Program_weight_tensor_parameter_30: + name = "parameter_30" + shape = [384] + dtype = "float32" + min_val = float("0.949665") + max_val = float("1.31293") + mean = float("1.04718") + std = float("0.0484473") + data = None + + +class Program_weight_tensor_parameter_31: + name = "parameter_31" + shape = [384] + dtype = "float32" + min_val = float("0.000374549") + max_val = float("0.0152991") + mean = float("0.00347468") + std = float("0.00302924") + data = None + + +class Program_weight_tensor_parameter_32: + name = "parameter_32" + shape = [384] + dtype = "float32" + min_val = float("-0.0852089") + max_val = float("0.0240756") + mean = float("-0.0272279") + std = float("0.0212846") + data = None + + +class Program_weight_tensor_parameter_33: + name = "parameter_33" + shape = [384, 384, 1, 1] + dtype = "float32" + min_val = float("-0.058167") + max_val = float("0.0555544") + mean = float("-0.000600445") + std = float("0.00352186") + data = None + + +class Program_weight_tensor_parameter_34: + name = "parameter_34" + shape = [384] + dtype = "float32" + min_val = float("-0.00350437") + max_val = float("0.0144092") + mean = float("-4.23957e-05") + std = float("0.00145588") + data = None + + +class Program_weight_tensor_parameter_35: + name = "parameter_35" + shape = [384, 384, 1, 1] + dtype = "float32" + min_val = float("-0.0163149") + max_val = float("0.0101554") + mean = float("-4.85259e-05") + std = float("0.000584502") + data = None + + +class Program_weight_tensor_parameter_36: + name = "parameter_36" + shape = [1, 17, 1, 1] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_37: + name = "parameter_37" + shape = [68] + dtype = "float32" + min_val = float("-0.00368977") + max_val = float("0.00660188") + mean = float("1.39771e-07") + std = float("0.00287114") + data = None + + +class Program_weight_tensor_parameter_38: + name = "parameter_38" + shape = [68, 768, 3, 3] + dtype = "float32" + min_val = float("-0.0367501") + max_val = float("0.0460144") + mean = float("-3.25963e-09") + std = float("0.00290222") + data = None + + +class Program_weight_tensor_parameter_39: + name = "parameter_39" + shape = [768] + dtype = "float32" + min_val = float("-0.068895") + max_val = float("0.0553675") + mean = float("-0.000948569") + std = float("0.0161696") + data = None + + +class Program_weight_tensor_parameter_40: + name = "parameter_40" + shape = [768] + dtype = "float32" + min_val = float("0.964345") + max_val = float("1.26855") + mean = float("1.0512") + std = float("0.0346679") + data = None + + +class Program_weight_tensor_parameter_41: + name = "parameter_41" + shape = [768] + dtype = "float32" + min_val = float("3.06014e-05") + max_val = float("0.00198479") + mean = float("0.000351474") + std = float("0.000240551") + data = None + + +class Program_weight_tensor_parameter_42: + name = "parameter_42" + shape = [768] + dtype = "float32" + min_val = float("-0.0146215") + max_val = float("0.0161621") + mean = float("-0.00225536") + std = float("0.00333205") + data = None + + +class Program_weight_tensor_parameter_43: + name = "parameter_43" + shape = [768, 768, 1, 1] + dtype = "float32" + min_val = float("-0.0373657") + max_val = float("0.0387001") + mean = float("-3.59702e-05") + std = float("0.00154467") + data = None + + +class Program_weight_tensor_parameter_44: + name = "parameter_44" + shape = [768] + dtype = "float32" + min_val = float("-0.00311512") + max_val = float("0.00471713") + mean = float("1.99403e-05") + std = float("0.0011147") + data = None + + +class Program_weight_tensor_parameter_45: + name = "parameter_45" + shape = [768, 768, 1, 1] + dtype = "float32" + min_val = float("-0.00345896") + max_val = float("0.00512545") + mean = float("-2.05388e-05") + std = float("0.000302326") + data = None + + +class Program_weight_tensor_parameter_46: + name = "parameter_46" + shape = [1] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_47: + name = "parameter_47" + shape = [1, 768, 3, 3] + dtype = "float32" + min_val = float("-0.0279547") + max_val = float("0.0209131") + mean = float("0.000246235") + std = float("0.00247216") + data = None + + +class Program_weight_tensor_parameter_48: + name = "parameter_48" + shape = [768] + dtype = "float32" + min_val = float("-0.270215") + max_val = float("0.251652") + mean = float("0.00963056") + std = float("0.0522864") + data = None + + +class Program_weight_tensor_parameter_49: + name = "parameter_49" + shape = [768] + dtype = "float32" + min_val = float("0.914237") + max_val = float("1.28219") + mean = float("1.03136") + std = float("0.0519961") + data = None + + +class Program_weight_tensor_parameter_50: + name = "parameter_50" + shape = [768] + dtype = "float32" + min_val = float("6.30127e-05") + max_val = float("0.0091368") + mean = float("0.00113058") + std = float("0.000808467") + data = None + + +class Program_weight_tensor_parameter_51: + name = "parameter_51" + shape = [768] + dtype = "float32" + min_val = float("-0.0890634") + max_val = float("0.025694") + mean = float("-0.0241855") + std = float("0.0176791") + data = None + + +class Program_weight_tensor_parameter_52: + name = "parameter_52" + shape = [768, 768, 1, 1] + dtype = "float32" + min_val = float("-0.0500597") + max_val = float("0.0289301") + mean = float("-0.0002949") + std = float("0.00161818") + data = None + + +class Program_weight_tensor_parameter_53: + name = "parameter_53" + shape = [768] + dtype = "float32" + min_val = float("-0.0131038") + max_val = float("0.0072247") + mean = float("-3.76555e-05") + std = float("0.000847185") + data = None + + +class Program_weight_tensor_parameter_54: + name = "parameter_54" + shape = [768, 768, 1, 1] + dtype = "float32" + min_val = float("-0.0263292") + max_val = float("0.0506765") + mean = float("7.82918e-06") + std = float("0.000366786") + data = None + + +class Program_weight_tensor_parameter_55: + name = "parameter_55" + shape = [768] + dtype = "float32" + min_val = float("-0.743252") + max_val = float("0.507408") + mean = float("0.0206483") + std = float("0.126366") + data = None + + +class Program_weight_tensor_parameter_56: + name = "parameter_56" + shape = [768] + dtype = "float32" + min_val = float("0.898431") + max_val = float("1.48411") + mean = float("0.988118") + std = float("0.0346462") + data = None + + +class Program_weight_tensor_parameter_57: + name = "parameter_57" + shape = [768] + dtype = "float32" + min_val = float("0.00143168") + max_val = float("0.218986") + mean = float("0.0161277") + std = float("0.0174729") + data = None + + +class Program_weight_tensor_parameter_58: + name = "parameter_58" + shape = [768] + dtype = "float32" + min_val = float("-0.324566") + max_val = float("0.124823") + mean = float("-0.0415513") + std = float("0.0442598") + data = None + + +class Program_weight_tensor_parameter_59: + name = "parameter_59" + shape = [768, 768, 1, 1] + dtype = "float32" + min_val = float("-0.0729223") + max_val = float("0.0383852") + mean = float("-0.000183939") + std = float("0.00263752") + data = None + + +class Program_weight_tensor_parameter_60: + name = "parameter_60" + shape = [384] + dtype = "float32" + min_val = float("-0.254417") + max_val = float("0.0598548") + mean = float("-0.0304627") + std = float("0.0365547") + data = None + + +class Program_weight_tensor_parameter_61: + name = "parameter_61" + shape = [384] + dtype = "float32" + min_val = float("0.944342") + max_val = float("1.07013") + mean = float("0.987779") + std = float("0.0170766") + data = None + + +class Program_weight_tensor_parameter_62: + name = "parameter_62" + shape = [384] + dtype = "float32" + min_val = float("0.000781911") + max_val = float("0.0274863") + mean = float("0.00553515") + std = float("0.00283191") + data = None + + +class Program_weight_tensor_parameter_63: + name = "parameter_63" + shape = [384] + dtype = "float32" + min_val = float("-0.0647792") + max_val = float("0.0618622") + mean = float("-0.0137433") + std = float("0.0169653") + data = None + + +class Program_weight_tensor_parameter_64: + name = "parameter_64" + shape = [384, 384, 1, 1] + dtype = "float32" + min_val = float("-0.0337186") + max_val = float("0.0292072") + mean = float("-0.000215613") + std = float("0.00198244") + data = None + + +class Program_weight_tensor_parameter_65: + name = "parameter_65" + shape = [384] + dtype = "float32" + min_val = float("-0.254417") + max_val = float("0.0598548") + mean = float("-0.0304627") + std = float("0.0365547") + data = None + + +class Program_weight_tensor_parameter_66: + name = "parameter_66" + shape = [384] + dtype = "float32" + min_val = float("0.87232") + max_val = float("1.23757") + mean = float("1.03039") + std = float("0.0354241") + data = None + + +class Program_weight_tensor_parameter_67: + name = "parameter_67" + shape = [384] + dtype = "float32" + min_val = float("0.00422365") + max_val = float("0.0712197") + mean = float("0.0152383") + std = float("0.00948895") + data = None + + +class Program_weight_tensor_parameter_68: + name = "parameter_68" + shape = [384] + dtype = "float32" + min_val = float("-0.223974") + max_val = float("0.0647828") + mean = float("-0.0300006") + std = float("0.0408773") + data = None + + +class Program_weight_tensor_parameter_69: + name = "parameter_69" + shape = [384, 384, 3, 3] + dtype = "float32" + min_val = float("-0.037223") + max_val = float("0.043261") + mean = float("-5.79741e-05") + std = float("0.0013366") + data = None + + +class Program_weight_tensor_parameter_70: + name = "parameter_70" + shape = [384] + dtype = "float32" + min_val = float("-0.374188") + max_val = float("0.0523552") + mean = float("-0.0502526") + std = float("0.0490786") + data = None + + +class Program_weight_tensor_parameter_71: + name = "parameter_71" + shape = [384] + dtype = "float32" + min_val = float("0.960412") + max_val = float("1.34791") + mean = float("1.02226") + std = float("0.0411683") + data = None + + +class Program_weight_tensor_parameter_72: + name = "parameter_72" + shape = [384] + dtype = "float32" + min_val = float("0.010283") + max_val = float("0.254173") + mean = float("0.0359127") + std = float("0.0200239") + data = None + + +class Program_weight_tensor_parameter_73: + name = "parameter_73" + shape = [384] + dtype = "float32" + min_val = float("-0.27605") + max_val = float("0.395213") + mean = float("-0.0462748") + std = float("0.0687873") + data = None + + +class Program_weight_tensor_parameter_74: + name = "parameter_74" + shape = [384, 384, 3, 3] + dtype = "float32" + min_val = float("-0.0256572") + max_val = float("0.0583057") + mean = float("-6.19001e-05") + std = float("0.00149112") + data = None + + +class Program_weight_tensor_parameter_75: + name = "parameter_75" + shape = [384] + dtype = "float32" + min_val = float("-0.156775") + max_val = float("0.0106289") + mean = float("-0.0499544") + std = float("0.0299656") + data = None + + +class Program_weight_tensor_parameter_76: + name = "parameter_76" + shape = [384] + dtype = "float32" + min_val = float("0.910978") + max_val = float("1.0554") + mean = float("0.981121") + std = float("0.0132607") + data = None + + +class Program_weight_tensor_parameter_77: + name = "parameter_77" + shape = [384] + dtype = "float32" + min_val = float("0.00100225") + max_val = float("0.0211152") + mean = float("0.00600844") + std = float("0.00309617") + data = None + + +class Program_weight_tensor_parameter_78: + name = "parameter_78" + shape = [384] + dtype = "float32" + min_val = float("-0.0557797") + max_val = float("0.0537013") + mean = float("-0.00659213") + std = float("0.0164185") + data = None + + +class Program_weight_tensor_parameter_79: + name = "parameter_79" + shape = [384, 384, 1, 1] + dtype = "float32" + min_val = float("-0.0378843") + max_val = float("0.0289645") + mean = float("-0.000143777") + std = float("0.00197098") + data = None + + +class Program_weight_tensor_parameter_80: + name = "parameter_80" + shape = [384] + dtype = "float32" + min_val = float("-0.156775") + max_val = float("0.0106289") + mean = float("-0.0499544") + std = float("0.0299656") + data = None + + +class Program_weight_tensor_parameter_81: + name = "parameter_81" + shape = [384] + dtype = "float32" + min_val = float("0.97086") + max_val = float("1.20651") + mean = float("1.02846") + std = float("0.0394551") + data = None + + +class Program_weight_tensor_parameter_82: + name = "parameter_82" + shape = [384] + dtype = "float32" + min_val = float("0.00373931") + max_val = float("0.0816658") + mean = float("0.0156042") + std = float("0.0102651") + data = None + + +class Program_weight_tensor_parameter_83: + name = "parameter_83" + shape = [384] + dtype = "float32" + min_val = float("-0.137162") + max_val = float("0.0833122") + mean = float("-0.0358381") + std = float("0.035943") + data = None + + +class Program_weight_tensor_parameter_84: + name = "parameter_84" + shape = [384, 384, 3, 3] + dtype = "float32" + min_val = float("-0.0325572") + max_val = float("0.0530747") + mean = float("-6.70622e-05") + std = float("0.00133181") + data = None + + +class Program_weight_tensor_parameter_85: + name = "parameter_85" + shape = [384] + dtype = "float32" + min_val = float("-0.246152") + max_val = float("0.0162046") + mean = float("-0.0547072") + std = float("0.0390494") + data = None + + +class Program_weight_tensor_parameter_86: + name = "parameter_86" + shape = [384] + dtype = "float32" + min_val = float("0.935508") + max_val = float("1.24745") + mean = float("1.01975") + std = float("0.0429522") + data = None + + +class Program_weight_tensor_parameter_87: + name = "parameter_87" + shape = [384] + dtype = "float32" + min_val = float("0.0107172") + max_val = float("0.224052") + mean = float("0.0361907") + std = float("0.0244225") + data = None + + +class Program_weight_tensor_parameter_88: + name = "parameter_88" + shape = [384] + dtype = "float32" + min_val = float("-0.261585") + max_val = float("0.098573") + mean = float("-0.0621459") + std = float("0.0653452") + data = None + + +class Program_weight_tensor_parameter_89: + name = "parameter_89" + shape = [384, 384, 3, 3] + dtype = "float32" + min_val = float("-0.0313703") + max_val = float("0.0549787") + mean = float("-7.65491e-05") + std = float("0.00149524") + data = None + + +class Program_weight_tensor_parameter_90: + name = "parameter_90" + shape = [384] + dtype = "float32" + min_val = float("-0.18497") + max_val = float("0.0322873") + mean = float("-0.0535274") + std = float("0.0327313") + data = None + + +class Program_weight_tensor_parameter_91: + name = "parameter_91" + shape = [384] + dtype = "float32" + min_val = float("0.904425") + max_val = float("1.01888") + mean = float("0.979445") + std = float("0.0159237") + data = None + + +class Program_weight_tensor_parameter_92: + name = "parameter_92" + shape = [384] + dtype = "float32" + min_val = float("0.00126839") + max_val = float("0.022133") + mean = float("0.00585202") + std = float("0.00253785") + data = None + + +class Program_weight_tensor_parameter_93: + name = "parameter_93" + shape = [384] + dtype = "float32" + min_val = float("-0.135948") + max_val = float("0.0577871") + mean = float("-0.00339615") + std = float("0.0176901") + data = None + + +class Program_weight_tensor_parameter_94: + name = "parameter_94" + shape = [384, 384, 1, 1] + dtype = "float32" + min_val = float("-0.0420657") + max_val = float("0.0292266") + mean = float("-9.33786e-05") + std = float("0.00208678") + data = None + + +class Program_weight_tensor_parameter_95: + name = "parameter_95" + shape = [384] + dtype = "float32" + min_val = float("-0.18497") + max_val = float("0.0322873") + mean = float("-0.0535274") + std = float("0.0327313") + data = None + + +class Program_weight_tensor_parameter_96: + name = "parameter_96" + shape = [384] + dtype = "float32" + min_val = float("0.975118") + max_val = float("1.21368") + mean = float("1.03083") + std = float("0.0414091") + data = None + + +class Program_weight_tensor_parameter_97: + name = "parameter_97" + shape = [384] + dtype = "float32" + min_val = float("0.00527168") + max_val = float("0.111708") + mean = float("0.0189554") + std = float("0.0110883") + data = None + + +class Program_weight_tensor_parameter_98: + name = "parameter_98" + shape = [384] + dtype = "float32" + min_val = float("-0.21361") + max_val = float("0.11623") + mean = float("-0.0238691") + std = float("0.0374207") + data = None + + +class Program_weight_tensor_parameter_99: + name = "parameter_99" + shape = [384, 384, 3, 3] + dtype = "float32" + min_val = float("-0.0271595") + max_val = float("0.0469736") + mean = float("-4.67456e-05") + std = float("0.00137009") + data = None + + +class Program_weight_tensor_parameter_100: + name = "parameter_100" + shape = [384] + dtype = "float32" + min_val = float("-0.21342") + max_val = float("0.0264741") + mean = float("-0.0541152") + std = float("0.0385625") + data = None + + +class Program_weight_tensor_parameter_101: + name = "parameter_101" + shape = [384] + dtype = "float32" + min_val = float("0.924962") + max_val = float("1.16836") + mean = float("1.02139") + std = float("0.0372896") + data = None + + +class Program_weight_tensor_parameter_102: + name = "parameter_102" + shape = [384] + dtype = "float32" + min_val = float("0.00496729") + max_val = float("0.140077") + mean = float("0.0244752") + std = float("0.0172182") + data = None + + +class Program_weight_tensor_parameter_103: + name = "parameter_103" + shape = [384] + dtype = "float32" + min_val = float("-0.191476") + max_val = float("0.186117") + mean = float("-0.05164") + std = float("0.059301") + data = None + + +class Program_weight_tensor_parameter_104: + name = "parameter_104" + shape = [384, 384, 3, 3] + dtype = "float32" + min_val = float("-0.0192065") + max_val = float("0.0359823") + mean = float("-8.73946e-05") + std = float("0.00152888") + data = None + + +class Program_weight_tensor_parameter_105: + name = "parameter_105" + shape = [384] + dtype = "float32" + min_val = float("-0.121407") + max_val = float("0.0778681") + mean = float("-0.0298381") + std = float("0.0228376") + data = None + + +class Program_weight_tensor_parameter_106: + name = "parameter_106" + shape = [384] + dtype = "float32" + min_val = float("0.963361") + max_val = float("1.12251") + mean = float("1.01472") + std = float("0.026551") + data = None + + +class Program_weight_tensor_parameter_107: + name = "parameter_107" + shape = [384] + dtype = "float32" + min_val = float("0.00345433") + max_val = float("0.0369605") + mean = float("0.00986699") + std = float("0.00450402") + data = None + + +class Program_weight_tensor_parameter_108: + name = "parameter_108" + shape = [384] + dtype = "float32" + min_val = float("-0.10945") + max_val = float("0.11273") + mean = float("-0.0241149") + std = float("0.0306634") + data = None + + +class Program_weight_tensor_parameter_109: + name = "parameter_109" + shape = [384, 1152, 1, 1] + dtype = "float32" + min_val = float("-0.041425") + max_val = float("0.0658511") + mean = float("-0.000110638") + std = float("0.00239277") + data = None + + +class Program_weight_tensor_parameter_110: + name = "parameter_110" + shape = [384] + dtype = "float32" + min_val = float("-0.116772") + max_val = float("0.0158963") + mean = float("-0.0176804") + std = float("0.0160411") + data = None + + +class Program_weight_tensor_parameter_111: + name = "parameter_111" + shape = [384] + dtype = "float32" + min_val = float("0.941042") + max_val = float("1.23811") + mean = float("1.01365") + std = float("0.0258733") + data = None + + +class Program_weight_tensor_parameter_112: + name = "parameter_112" + shape = [384] + dtype = "float32" + min_val = float("0.00232905") + max_val = float("0.0347195") + mean = float("0.00753712") + std = float("0.00352835") + data = None + + +class Program_weight_tensor_parameter_113: + name = "parameter_113" + shape = [384] + dtype = "float32" + min_val = float("-0.101033") + max_val = float("0.0739513") + mean = float("-0.0323173") + std = float("0.0285796") + data = None + + +class Program_weight_tensor_parameter_114: + name = "parameter_114" + shape = [384, 1152, 1, 1] + dtype = "float32" + min_val = float("-0.0325728") + max_val = float("0.0419523") + mean = float("-0.000152293") + std = float("0.0022599") + data = None + + +class Program_weight_tensor_parameter_115: + name = "parameter_115" + shape = [384] + dtype = "float32" + min_val = float("-0.0965388") + max_val = float("0.0058174") + mean = float("-0.0226018") + std = float("0.0169399") + data = None + + +class Program_weight_tensor_parameter_116: + name = "parameter_116" + shape = [384] + dtype = "float32" + min_val = float("0.958665") + max_val = float("1.19121") + mean = float("1.03934") + std = float("0.0318009") + data = None + + +class Program_weight_tensor_parameter_117: + name = "parameter_117" + shape = [384] + dtype = "float32" + min_val = float("0.0031369") + max_val = float("0.0781734") + mean = float("0.0161429") + std = float("0.0101672") + data = None + + +class Program_weight_tensor_parameter_118: + name = "parameter_118" + shape = [384] + dtype = "float32" + min_val = float("-0.190589") + max_val = float("0.140504") + mean = float("-0.019138") + std = float("0.0505961") + data = None + + +class Program_weight_tensor_parameter_119: + name = "parameter_119" + shape = [384, 384, 3, 3] + dtype = "float32" + min_val = float("-0.0231751") + max_val = float("0.0359994") + mean = float("-2.40033e-05") + std = float("0.0012128") + data = None + + +class Program_weight_tensor_parameter_120: + name = "parameter_120" + shape = [384] + dtype = "float32" + min_val = float("-0.462787") + max_val = float("0.417372") + mean = float("0.0794253") + std = float("0.135822") + data = None + + +class Program_weight_tensor_parameter_121: + name = "parameter_121" + shape = [384] + dtype = "float32" + min_val = float("0.861745") + max_val = float("1.3792") + mean = float("0.999766") + std = float("0.0519782") + data = None + + +class Program_weight_tensor_parameter_122: + name = "parameter_122" + shape = [384] + dtype = "float32" + min_val = float("0.00656037") + max_val = float("0.259205") + mean = float("0.0327912") + std = float("0.0213536") + data = None + + +class Program_weight_tensor_parameter_123: + name = "parameter_123" + shape = [384] + dtype = "float32" + min_val = float("-0.205895") + max_val = float("0.124211") + mean = float("-0.0443792") + std = float("0.0489067") + data = None + + +class Program_weight_tensor_parameter_124: + name = "parameter_124" + shape = [384, 384, 1, 1] + dtype = "float32" + min_val = float("-0.109559") + max_val = float("0.0699117") + mean = float("-0.000394157") + std = float("0.00584597") + data = None + + +class Program_weight_tensor_parameter_125: + name = "parameter_125" + shape = [192] + dtype = "float32" + min_val = float("-0.164875") + max_val = float("0.0548568") + mean = float("-0.0386634") + std = float("0.0421332") + data = None + + +class Program_weight_tensor_parameter_126: + name = "parameter_126" + shape = [192] + dtype = "float32" + min_val = float("0.859888") + max_val = float("1.08381") + mean = float("0.961303") + std = float("0.0320852") + data = None + + +class Program_weight_tensor_parameter_127: + name = "parameter_127" + shape = [192] + dtype = "float32" + min_val = float("0.00237067") + max_val = float("0.0444514") + mean = float("0.0122066") + std = float("0.00791997") + data = None + + +class Program_weight_tensor_parameter_128: + name = "parameter_128" + shape = [192] + dtype = "float32" + min_val = float("-0.0674783") + max_val = float("0.0904693") + mean = float("-0.0179738") + std = float("0.0268437") + data = None + + +class Program_weight_tensor_parameter_129: + name = "parameter_129" + shape = [192, 192, 1, 1] + dtype = "float32" + min_val = float("-0.0551751") + max_val = float("0.0362489") + mean = float("-0.000586875") + std = float("0.00435786") + data = None + + +class Program_weight_tensor_parameter_130: + name = "parameter_130" + shape = [192] + dtype = "float32" + min_val = float("-0.164875") + max_val = float("0.0548568") + mean = float("-0.0386634") + std = float("0.0421332") + data = None + + +class Program_weight_tensor_parameter_131: + name = "parameter_131" + shape = [192] + dtype = "float32" + min_val = float("0.90941") + max_val = float("1.22819") + mean = float("1.04928") + std = float("0.0517532") + data = None + + +class Program_weight_tensor_parameter_132: + name = "parameter_132" + shape = [192] + dtype = "float32" + min_val = float("0.0108259") + max_val = float("0.114161") + mean = float("0.0298535") + std = float("0.0146571") + data = None + + +class Program_weight_tensor_parameter_133: + name = "parameter_133" + shape = [192] + dtype = "float32" + min_val = float("-0.169563") + max_val = float("0.279569") + mean = float("-0.0244124") + std = float("0.0606") + data = None + + +class Program_weight_tensor_parameter_134: + name = "parameter_134" + shape = [192, 192, 3, 3] + dtype = "float32" + min_val = float("-0.0436249") + max_val = float("0.0599502") + mean = float("-7.42007e-05") + std = float("0.00305484") + data = None + + +class Program_weight_tensor_parameter_135: + name = "parameter_135" + shape = [192] + dtype = "float32" + min_val = float("-0.309115") + max_val = float("0.0491253") + mean = float("-0.0761064") + std = float("0.0618768") + data = None + + +class Program_weight_tensor_parameter_136: + name = "parameter_136" + shape = [192] + dtype = "float32" + min_val = float("0.911347") + max_val = float("1.34826") + mean = float("1.02627") + std = float("0.055741") + data = None + + +class Program_weight_tensor_parameter_137: + name = "parameter_137" + shape = [192] + dtype = "float32" + min_val = float("0.0162916") + max_val = float("0.230424") + mean = float("0.0635955") + std = float("0.0341668") + data = None + + +class Program_weight_tensor_parameter_138: + name = "parameter_138" + shape = [192] + dtype = "float32" + min_val = float("-0.206388") + max_val = float("0.253518") + mean = float("-0.0305603") + std = float("0.0636566") + data = None + + +class Program_weight_tensor_parameter_139: + name = "parameter_139" + shape = [192, 192, 3, 3] + dtype = "float32" + min_val = float("-0.0489536") + max_val = float("0.0578266") + mean = float("-0.000107368") + std = float("0.00338174") + data = None + + +class Program_weight_tensor_parameter_140: + name = "parameter_140" + shape = [192] + dtype = "float32" + min_val = float("-0.270884") + max_val = float("0.00566454") + mean = float("-0.090629") + std = float("0.0444102") + data = None + + +class Program_weight_tensor_parameter_141: + name = "parameter_141" + shape = [192] + dtype = "float32" + min_val = float("0.880957") + max_val = float("1.12294") + mean = float("0.962849") + std = float("0.027118") + data = None + + +class Program_weight_tensor_parameter_142: + name = "parameter_142" + shape = [192] + dtype = "float32" + min_val = float("0.00248917") + max_val = float("0.0254") + mean = float("0.00851717") + std = float("0.00392838") + data = None + + +class Program_weight_tensor_parameter_143: + name = "parameter_143" + shape = [192] + dtype = "float32" + min_val = float("-0.0544904") + max_val = float("0.0507639") + mean = float("-0.0137934") + std = float("0.0160176") + data = None + + +class Program_weight_tensor_parameter_144: + name = "parameter_144" + shape = [192, 192, 1, 1] + dtype = "float32" + min_val = float("-0.0557329") + max_val = float("0.0442933") + mean = float("-0.000665974") + std = float("0.00459727") + data = None + + +class Program_weight_tensor_parameter_145: + name = "parameter_145" + shape = [192] + dtype = "float32" + min_val = float("-0.270884") + max_val = float("0.00566454") + mean = float("-0.090629") + std = float("0.0444102") + data = None + + +class Program_weight_tensor_parameter_146: + name = "parameter_146" + shape = [192] + dtype = "float32" + min_val = float("0.942586") + max_val = float("1.23879") + mean = float("1.04255") + std = float("0.0442743") + data = None + + +class Program_weight_tensor_parameter_147: + name = "parameter_147" + shape = [192] + dtype = "float32" + min_val = float("0.00773896") + max_val = float("0.088285") + mean = float("0.0241641") + std = float("0.0121994") + data = None + + +class Program_weight_tensor_parameter_148: + name = "parameter_148" + shape = [192] + dtype = "float32" + min_val = float("-0.164934") + max_val = float("0.0952735") + mean = float("-0.0249949") + std = float("0.042689") + data = None + + +class Program_weight_tensor_parameter_149: + name = "parameter_149" + shape = [192, 192, 3, 3] + dtype = "float32" + min_val = float("-0.0456874") + max_val = float("0.057905") + mean = float("-0.000110896") + std = float("0.0030779") + data = None + + +class Program_weight_tensor_parameter_150: + name = "parameter_150" + shape = [192] + dtype = "float32" + min_val = float("-0.307293") + max_val = float("0.0608156") + mean = float("-0.111113") + std = float("0.0621663") + data = None + + +class Program_weight_tensor_parameter_151: + name = "parameter_151" + shape = [192] + dtype = "float32" + min_val = float("0.918775") + max_val = float("1.21828") + mean = float("1.02832") + std = float("0.0566345") + data = None + + +class Program_weight_tensor_parameter_152: + name = "parameter_152" + shape = [192] + dtype = "float32" + min_val = float("0.0164702") + max_val = float("0.234996") + mean = float("0.0519117") + std = float("0.0284291") + data = None + + +class Program_weight_tensor_parameter_153: + name = "parameter_153" + shape = [192] + dtype = "float32" + min_val = float("-0.272336") + max_val = float("0.0789161") + mean = float("-0.0542654") + std = float("0.0575364") + data = None + + +class Program_weight_tensor_parameter_154: + name = "parameter_154" + shape = [192, 192, 3, 3] + dtype = "float32" + min_val = float("-0.0465236") + max_val = float("0.0704404") + mean = float("-0.00016208") + std = float("0.0035069") + data = None + + +class Program_weight_tensor_parameter_155: + name = "parameter_155" + shape = [192] + dtype = "float32" + min_val = float("-0.366904") + max_val = float("-0.0134069") + mean = float("-0.100298") + std = float("0.0549012") + data = None + + +class Program_weight_tensor_parameter_156: + name = "parameter_156" + shape = [192] + dtype = "float32" + min_val = float("0.868637") + max_val = float("1.06224") + mean = float("0.960063") + std = float("0.0231142") + data = None + + +class Program_weight_tensor_parameter_157: + name = "parameter_157" + shape = [192] + dtype = "float32" + min_val = float("0.00330898") + max_val = float("0.0238657") + mean = float("0.00862685") + std = float("0.00314342") + data = None + + +class Program_weight_tensor_parameter_158: + name = "parameter_158" + shape = [192] + dtype = "float32" + min_val = float("-0.0763245") + max_val = float("0.0462065") + mean = float("-0.0293695") + std = float("0.0255824") + data = None + + +class Program_weight_tensor_parameter_159: + name = "parameter_159" + shape = [192, 192, 1, 1] + dtype = "float32" + min_val = float("-0.0469422") + max_val = float("0.0462969") + mean = float("-0.00112619") + std = float("0.00494161") + data = None + + +class Program_weight_tensor_parameter_160: + name = "parameter_160" + shape = [192] + dtype = "float32" + min_val = float("-0.366904") + max_val = float("-0.0134069") + mean = float("-0.100298") + std = float("0.0549012") + data = None + + +class Program_weight_tensor_parameter_161: + name = "parameter_161" + shape = [192] + dtype = "float32" + min_val = float("0.935067") + max_val = float("1.20384") + mean = float("1.03391") + std = float("0.0436551") + data = None + + +class Program_weight_tensor_parameter_162: + name = "parameter_162" + shape = [192] + dtype = "float32" + min_val = float("0.0102647") + max_val = float("0.0944033") + mean = float("0.0271519") + std = float("0.0150605") + data = None + + +class Program_weight_tensor_parameter_163: + name = "parameter_163" + shape = [192] + dtype = "float32" + min_val = float("-0.191706") + max_val = float("0.130996") + mean = float("-0.0170581") + std = float("0.0523357") + data = None + + +class Program_weight_tensor_parameter_164: + name = "parameter_164" + shape = [192, 192, 3, 3] + dtype = "float32" + min_val = float("-0.053679") + max_val = float("0.0659301") + mean = float("-7.24105e-05") + std = float("0.00325663") + data = None + + +class Program_weight_tensor_parameter_165: + name = "parameter_165" + shape = [192] + dtype = "float32" + min_val = float("-0.51688") + max_val = float("-0.0204016") + mean = float("-0.126066") + std = float("0.0638447") + data = None + + +class Program_weight_tensor_parameter_166: + name = "parameter_166" + shape = [192] + dtype = "float32" + min_val = float("0.850002") + max_val = float("1.25364") + mean = float("1.02943") + std = float("0.0638937") + data = None + + +class Program_weight_tensor_parameter_167: + name = "parameter_167" + shape = [192] + dtype = "float32" + min_val = float("0.0162259") + max_val = float("0.240876") + mean = float("0.0391802") + std = float("0.0225238") + data = None + + +class Program_weight_tensor_parameter_168: + name = "parameter_168" + shape = [192] + dtype = "float32" + min_val = float("-0.156443") + max_val = float("0.198934") + mean = float("-0.0443495") + std = float("0.0575356") + data = None + + +class Program_weight_tensor_parameter_169: + name = "parameter_169" + shape = [192, 192, 3, 3] + dtype = "float32" + min_val = float("-0.0397151") + max_val = float("0.0638021") + mean = float("-0.000153904") + std = float("0.00376403") + data = None + + +class Program_weight_tensor_parameter_170: + name = "parameter_170" + shape = [192] + dtype = "float32" + min_val = float("-0.271286") + max_val = float("0.0561678") + mean = float("-0.0865284") + std = float("0.0441388") + data = None + + +class Program_weight_tensor_parameter_171: + name = "parameter_171" + shape = [192] + dtype = "float32" + min_val = float("0.916087") + max_val = float("1.26752") + mean = float("1.02568") + std = float("0.0564594") + data = None + + +class Program_weight_tensor_parameter_172: + name = "parameter_172" + shape = [192] + dtype = "float32" + min_val = float("0.00734004") + max_val = float("0.107356") + mean = float("0.0181635") + std = float("0.00979573") + data = None + + +class Program_weight_tensor_parameter_173: + name = "parameter_173" + shape = [192] + dtype = "float32" + min_val = float("-0.137356") + max_val = float("0.149896") + mean = float("-0.0263279") + std = float("0.0371664") + data = None + + +class Program_weight_tensor_parameter_174: + name = "parameter_174" + shape = [192, 576, 1, 1] + dtype = "float32" + min_val = float("-0.0614514") + max_val = float("0.0653983") + mean = float("-0.00023038") + std = float("0.00562286") + data = None + + +class Program_weight_tensor_parameter_175: + name = "parameter_175" + shape = [192] + dtype = "float32" + min_val = float("-0.162315") + max_val = float("0.0343819") + mean = float("-0.0146986") + std = float("0.0266544") + data = None + + +class Program_weight_tensor_parameter_176: + name = "parameter_176" + shape = [192] + dtype = "float32" + min_val = float("0.895922") + max_val = float("1.15148") + mean = float("1.00435") + std = float("0.0388615") + data = None + + +class Program_weight_tensor_parameter_177: + name = "parameter_177" + shape = [192] + dtype = "float32" + min_val = float("0.00332295") + max_val = float("0.0679395") + mean = float("0.0127088") + std = float("0.00797069") + data = None + + +class Program_weight_tensor_parameter_178: + name = "parameter_178" + shape = [192] + dtype = "float32" + min_val = float("-0.0835811") + max_val = float("0.0528489") + mean = float("-0.0240515") + std = float("0.0239575") + data = None + + +class Program_weight_tensor_parameter_179: + name = "parameter_179" + shape = [192, 576, 1, 1] + dtype = "float32" + min_val = float("-0.0562021") + max_val = float("0.0724976") + mean = float("-0.000234749") + std = float("0.00480357") + data = None + + +class Program_weight_tensor_parameter_180: + name = "parameter_180" + shape = [192] + dtype = "float32" + min_val = float("-0.147874") + max_val = float("0.0091692") + mean = float("-0.0438342") + std = float("0.0292702") + data = None + + +class Program_weight_tensor_parameter_181: + name = "parameter_181" + shape = [192] + dtype = "float32" + min_val = float("0.944219") + max_val = float("1.18873") + mean = float("1.03618") + std = float("0.0347147") + data = None + + +class Program_weight_tensor_parameter_182: + name = "parameter_182" + shape = [192] + dtype = "float32" + min_val = float("0.00778453") + max_val = float("0.0847991") + mean = float("0.024107") + std = float("0.0139533") + data = None + + +class Program_weight_tensor_parameter_183: + name = "parameter_183" + shape = [192] + dtype = "float32" + min_val = float("-0.516924") + max_val = float("0.369412") + mean = float("-0.0347383") + std = float("0.116224") + data = None + + +class Program_weight_tensor_parameter_184: + name = "parameter_184" + shape = [192, 192, 3, 3] + dtype = "float32" + min_val = float("-0.0341945") + max_val = float("0.0437504") + mean = float("-4.80874e-05") + std = float("0.00283951") + data = None + + +class Program_weight_tensor_parameter_185: + name = "parameter_185" + shape = [192] + dtype = "float32" + min_val = float("-0.797629") + max_val = float("1.67476") + mean = float("0.20496") + std = float("0.337874") + data = None + + +class Program_weight_tensor_parameter_186: + name = "parameter_186" + shape = [192] + dtype = "float32" + min_val = float("0.58345") + max_val = float("1.5379") + mean = float("0.970985") + std = float("0.108354") + data = None + + +class Program_weight_tensor_parameter_187: + name = "parameter_187" + shape = [192] + dtype = "float32" + min_val = float("0.0110326") + max_val = float("0.258651") + mean = float("0.0490295") + std = float("0.034123") + data = None + + +class Program_weight_tensor_parameter_188: + name = "parameter_188" + shape = [192] + dtype = "float32" + min_val = float("-0.358034") + max_val = float("0.124888") + mean = float("-0.0482") + std = float("0.0595823") + data = None + + +class Program_weight_tensor_parameter_189: + name = "parameter_189" + shape = [192, 192, 1, 1] + dtype = "float32" + min_val = float("-0.163791") + max_val = float("0.103768") + mean = float("-0.000863297") + std = float("0.0123312") + data = None + + +class Program_weight_tensor_parameter_190: + name = "parameter_190" + shape = [96] + dtype = "float32" + min_val = float("-0.282433") + max_val = float("0.282069") + mean = float("0.00710333") + std = float("0.116715") + data = None + + +class Program_weight_tensor_parameter_191: + name = "parameter_191" + shape = [96] + dtype = "float32" + min_val = float("0.779394") + max_val = float("1.27113") + mean = float("0.92644") + std = float("0.0712489") + data = None + + +class Program_weight_tensor_parameter_192: + name = "parameter_192" + shape = [96] + dtype = "float32" + min_val = float("0.00442567") + max_val = float("0.0353601") + mean = float("0.0151945") + std = float("0.00701794") + data = None + + +class Program_weight_tensor_parameter_193: + name = "parameter_193" + shape = [96] + dtype = "float32" + min_val = float("-0.0517214") + max_val = float("0.0885269") + mean = float("-0.0106388") + std = float("0.021762") + data = None + + +class Program_weight_tensor_parameter_194: + name = "parameter_194" + shape = [96, 96, 1, 1] + dtype = "float32" + min_val = float("-0.107754") + max_val = float("0.0652385") + mean = float("-0.00119794") + std = float("0.00899017") + data = None + + +class Program_weight_tensor_parameter_195: + name = "parameter_195" + shape = [96] + dtype = "float32" + min_val = float("-0.282433") + max_val = float("0.282069") + mean = float("0.00710333") + std = float("0.116715") + data = None + + +class Program_weight_tensor_parameter_196: + name = "parameter_196" + shape = [96] + dtype = "float32" + min_val = float("0.688077") + max_val = float("1.34931") + mean = float("1.04714") + std = float("0.0915058") + data = None + + +class Program_weight_tensor_parameter_197: + name = "parameter_197" + shape = [96] + dtype = "float32" + min_val = float("0.0162018") + max_val = float("0.111441") + mean = float("0.0411683") + std = float("0.0179849") + data = None + + +class Program_weight_tensor_parameter_198: + name = "parameter_198" + shape = [96] + dtype = "float32" + min_val = float("-0.160526") + max_val = float("0.134332") + mean = float("-0.0154187") + std = float("0.0523237") + data = None + + +class Program_weight_tensor_parameter_199: + name = "parameter_199" + shape = [96, 96, 3, 3] + dtype = "float32" + min_val = float("-0.0771997") + max_val = float("0.0999289") + mean = float("-0.000134172") + std = float("0.00670866") + data = None + + +class Program_weight_tensor_parameter_200: + name = "parameter_200" + shape = [96] + dtype = "float32" + min_val = float("-0.457238") + max_val = float("0.284249") + mean = float("-0.137916") + std = float("0.150132") + data = None + + +class Program_weight_tensor_parameter_201: + name = "parameter_201" + shape = [96] + dtype = "float32" + min_val = float("0.828351") + max_val = float("1.75545") + mean = float("1.00999") + std = float("0.141165") + data = None + + +class Program_weight_tensor_parameter_202: + name = "parameter_202" + shape = [96] + dtype = "float32" + min_val = float("0.0275218") + max_val = float("0.241148") + mean = float("0.0762207") + std = float("0.0393366") + data = None + + +class Program_weight_tensor_parameter_203: + name = "parameter_203" + shape = [96] + dtype = "float32" + min_val = float("-0.229059") + max_val = float("0.151439") + mean = float("-0.0459165") + std = float("0.0531543") + data = None + + +class Program_weight_tensor_parameter_204: + name = "parameter_204" + shape = [96, 96, 3, 3] + dtype = "float32" + min_val = float("-0.086691") + max_val = float("0.0921687") + mean = float("-0.000442128") + std = float("0.00745447") + data = None + + +class Program_weight_tensor_parameter_205: + name = "parameter_205" + shape = [96] + dtype = "float32" + min_val = float("-0.394418") + max_val = float("0.00715098") + mean = float("-0.148941") + std = float("0.0839182") + data = None + + +class Program_weight_tensor_parameter_206: + name = "parameter_206" + shape = [96] + dtype = "float32" + min_val = float("0.717638") + max_val = float("0.995679") + mean = float("0.881627") + std = float("0.0561071") + data = None + + +class Program_weight_tensor_parameter_207: + name = "parameter_207" + shape = [96] + dtype = "float32" + min_val = float("0.00318176") + max_val = float("0.0326119") + mean = float("0.0119885") + std = float("0.00487886") + data = None + + +class Program_weight_tensor_parameter_208: + name = "parameter_208" + shape = [96] + dtype = "float32" + min_val = float("-0.0399496") + max_val = float("0.0465444") + mean = float("0.00960277") + std = float("0.0194889") + data = None + + +class Program_weight_tensor_parameter_209: + name = "parameter_209" + shape = [96, 96, 1, 1] + dtype = "float32" + min_val = float("-0.072261") + max_val = float("0.059046") + mean = float("-0.000366401") + std = float("0.00920863") + data = None + + +class Program_weight_tensor_parameter_210: + name = "parameter_210" + shape = [96] + dtype = "float32" + min_val = float("-0.394418") + max_val = float("0.00715098") + mean = float("-0.148941") + std = float("0.0839182") + data = None + + +class Program_weight_tensor_parameter_211: + name = "parameter_211" + shape = [96] + dtype = "float32" + min_val = float("0.805485") + max_val = float("1.22609") + mean = float("1.05179") + std = float("0.0896267") + data = None + + +class Program_weight_tensor_parameter_212: + name = "parameter_212" + shape = [96] + dtype = "float32" + min_val = float("0.014007") + max_val = float("0.0962489") + mean = float("0.0314367") + std = float("0.0153187") + data = None + + +class Program_weight_tensor_parameter_213: + name = "parameter_213" + shape = [96] + dtype = "float32" + min_val = float("-0.123048") + max_val = float("0.0345517") + mean = float("-0.0406836") + std = float("0.0274139") + data = None + + +class Program_weight_tensor_parameter_214: + name = "parameter_214" + shape = [96, 96, 3, 3] + dtype = "float32" + min_val = float("-0.0827043") + max_val = float("0.130698") + mean = float("-0.000532638") + std = float("0.00708723") + data = None + + +class Program_weight_tensor_parameter_215: + name = "parameter_215" + shape = [96] + dtype = "float32" + min_val = float("-0.4971") + max_val = float("0.0666561") + mean = float("-0.208105") + std = float("0.125916") + data = None + + +class Program_weight_tensor_parameter_216: + name = "parameter_216" + shape = [96] + dtype = "float32" + min_val = float("0.701779") + max_val = float("1.54173") + mean = float("0.996682") + std = float("0.130856") + data = None + + +class Program_weight_tensor_parameter_217: + name = "parameter_217" + shape = [96] + dtype = "float32" + min_val = float("0.0345198") + max_val = float("0.225098") + mean = float("0.0760202") + std = float("0.0354848") + data = None + + +class Program_weight_tensor_parameter_218: + name = "parameter_218" + shape = [96] + dtype = "float32" + min_val = float("-0.162876") + max_val = float("0.0247422") + mean = float("-0.0705325") + std = float("0.0413251") + data = None + + +class Program_weight_tensor_parameter_219: + name = "parameter_219" + shape = [96, 96, 3, 3] + dtype = "float32" + min_val = float("-0.0817576") + max_val = float("0.109913") + mean = float("-0.000622453") + std = float("0.00823604") + data = None + + +class Program_weight_tensor_parameter_220: + name = "parameter_220" + shape = [96] + dtype = "float32" + min_val = float("-0.488691") + max_val = float("0.0970943") + mean = float("-0.190324") + std = float("0.108446") + data = None + + +class Program_weight_tensor_parameter_221: + name = "parameter_221" + shape = [96] + dtype = "float32" + min_val = float("0.693616") + max_val = float("1.02467") + mean = float("0.878546") + std = float("0.0578259") + data = None + + +class Program_weight_tensor_parameter_222: + name = "parameter_222" + shape = [96] + dtype = "float32" + min_val = float("0.00485397") + max_val = float("0.0229483") + mean = float("0.00968923") + std = float("0.00319375") + data = None + + +class Program_weight_tensor_parameter_223: + name = "parameter_223" + shape = [96] + dtype = "float32" + min_val = float("-0.0542733") + max_val = float("0.018105") + mean = float("-0.0145726") + std = float("0.0141953") + data = None + + +class Program_weight_tensor_parameter_224: + name = "parameter_224" + shape = [96, 96, 1, 1] + dtype = "float32" + min_val = float("-0.0682492") + max_val = float("0.0733783") + mean = float("-0.00196267") + std = float("0.0113818") + data = None + + +class Program_weight_tensor_parameter_225: + name = "parameter_225" + shape = [96] + dtype = "float32" + min_val = float("-0.488691") + max_val = float("0.0970943") + mean = float("-0.190324") + std = float("0.108446") + data = None + + +class Program_weight_tensor_parameter_226: + name = "parameter_226" + shape = [96] + dtype = "float32" + min_val = float("0.645063") + max_val = float("1.26063") + mean = float("1.02362") + std = float("0.09686") + data = None + + +class Program_weight_tensor_parameter_227: + name = "parameter_227" + shape = [96] + dtype = "float32" + min_val = float("0.0173224") + max_val = float("0.0889882") + mean = float("0.0343411") + std = float("0.0136393") + data = None + + +class Program_weight_tensor_parameter_228: + name = "parameter_228" + shape = [96] + dtype = "float32" + min_val = float("-0.105072") + max_val = float("0.0360949") + mean = float("-0.0231942") + std = float("0.0289781") + data = None + + +class Program_weight_tensor_parameter_229: + name = "parameter_229" + shape = [96, 96, 3, 3] + dtype = "float32" + min_val = float("-0.0745421") + max_val = float("0.0940337") + mean = float("-0.00035508") + std = float("0.00789677") + data = None + + +class Program_weight_tensor_parameter_230: + name = "parameter_230" + shape = [96] + dtype = "float32" + min_val = float("-0.751789") + max_val = float("0.0464603") + mean = float("-0.247051") + std = float("0.151011") + data = None + + +class Program_weight_tensor_parameter_231: + name = "parameter_231" + shape = [96] + dtype = "float32" + min_val = float("0.714263") + max_val = float("1.24884") + mean = float("0.989188") + std = float("0.0989809") + data = None + + +class Program_weight_tensor_parameter_232: + name = "parameter_232" + shape = [96] + dtype = "float32" + min_val = float("0.0192381") + max_val = float("0.106687") + mean = float("0.0454445") + std = float("0.0173454") + data = None + + +class Program_weight_tensor_parameter_233: + name = "parameter_233" + shape = [96] + dtype = "float32" + min_val = float("-0.193068") + max_val = float("0.162514") + mean = float("-0.0494989") + std = float("0.0621228") + data = None + + +class Program_weight_tensor_parameter_234: + name = "parameter_234" + shape = [96, 96, 3, 3] + dtype = "float32" + min_val = float("-0.1095") + max_val = float("0.106952") + mean = float("-0.000308593") + std = float("0.00958619") + data = None + + +class Program_weight_tensor_parameter_235: + name = "parameter_235" + shape = [96] + dtype = "float32" + min_val = float("-0.683455") + max_val = float("0.560512") + mean = float("-0.183278") + std = float("0.262885") + data = None + + +class Program_weight_tensor_parameter_236: + name = "parameter_236" + shape = [96] + dtype = "float32" + min_val = float("0.648031") + max_val = float("1.28146") + mean = float("0.926267") + std = float("0.127366") + data = None + + +class Program_weight_tensor_parameter_237: + name = "parameter_237" + shape = [96] + dtype = "float32" + min_val = float("0.016628") + max_val = float("0.10579") + mean = float("0.035372") + std = float("0.0158467") + data = None + + +class Program_weight_tensor_parameter_238: + name = "parameter_238" + shape = [96] + dtype = "float32" + min_val = float("-0.157678") + max_val = float("0.14856") + mean = float("-0.00222391") + std = float("0.0540448") + data = None + + +class Program_weight_tensor_parameter_239: + name = "parameter_239" + shape = [96, 448, 1, 1] + dtype = "float32" + min_val = float("-0.177188") + max_val = float("0.183871") + mean = float("-0.000515019") + std = float("0.0129704") + data = None + + +class Program_weight_tensor_parameter_240: + name = "parameter_240" + shape = [96] + dtype = "float32" + min_val = float("-0.163588") + max_val = float("0.175471") + mean = float("0.0382599") + std = float("0.0656847") + data = None + + +class Program_weight_tensor_parameter_241: + name = "parameter_241" + shape = [96] + dtype = "float32" + min_val = float("0.669358") + max_val = float("1.3841") + mean = float("0.957927") + std = float("0.134075") + data = None + + +class Program_weight_tensor_parameter_242: + name = "parameter_242" + shape = [96] + dtype = "float32" + min_val = float("0.00489376") + max_val = float("0.0629722") + mean = float("0.0138758") + std = float("0.0105147") + data = None + + +class Program_weight_tensor_parameter_243: + name = "parameter_243" + shape = [96] + dtype = "float32" + min_val = float("-0.133469") + max_val = float("0.126109") + mean = float("-0.0143674") + std = float("0.0411788") + data = None + + +class Program_weight_tensor_parameter_244: + name = "parameter_244" + shape = [96, 448, 1, 1] + dtype = "float32" + min_val = float("-0.102132") + max_val = float("0.144586") + mean = float("-0.000330012") + std = float("0.00839787") + data = None + + +class Program_weight_tensor_parameter_245: + name = "parameter_245" + shape = [192] + dtype = "float32" + min_val = float("-0.339551") + max_val = float("0.0664494") + mean = float("-0.126606") + std = float("0.0575959") + data = None + + +class Program_weight_tensor_parameter_246: + name = "parameter_246" + shape = [192] + dtype = "float32" + min_val = float("0.709393") + max_val = float("1.3156") + mean = float("0.850458") + std = float("0.0685706") + data = None + + +class Program_weight_tensor_parameter_247: + name = "parameter_247" + shape = [192] + dtype = "float32" + min_val = float("0.010404") + max_val = float("0.122053") + mean = float("0.0280258") + std = float("0.0156642") + data = None + + +class Program_weight_tensor_parameter_248: + name = "parameter_248" + shape = [192] + dtype = "float32" + min_val = float("-0.155749") + max_val = float("0.136232") + mean = float("-0.0322454") + std = float("0.0428806") + data = None + + +class Program_weight_tensor_parameter_249: + name = "parameter_249" + shape = [192, 384, 1, 1] + dtype = "float32" + min_val = float("-0.061982") + max_val = float("0.0831281") + mean = float("-0.000581908") + std = float("0.00881389") + data = None + + +class Program_weight_tensor_parameter_250: + name = "parameter_250" + shape = [384] + dtype = "float32" + min_val = float("-0.246965") + max_val = float("0.0630617") + mean = float("-0.104599") + std = float("0.0398804") + data = None + + +class Program_weight_tensor_parameter_251: + name = "parameter_251" + shape = [384] + dtype = "float32" + min_val = float("0.861512") + max_val = float("1.36674") + mean = float("1.03605") + std = float("0.053899") + data = None + + +class Program_weight_tensor_parameter_252: + name = "parameter_252" + shape = [384] + dtype = "float32" + min_val = float("0.0107051") + max_val = float("0.2004") + mean = float("0.0241877") + std = float("0.0136582") + data = None + + +class Program_weight_tensor_parameter_253: + name = "parameter_253" + shape = [384] + dtype = "float32" + min_val = float("-0.229998") + max_val = float("0.107698") + mean = float("-0.0688903") + std = float("0.0443671") + data = None + + +class Program_weight_tensor_parameter_254: + name = "parameter_254" + shape = [384, 384, 1, 1] + dtype = "float32" + min_val = float("-0.0758349") + max_val = float("0.0805799") + mean = float("-0.000901133") + std = float("0.00830266") + data = None + + +class Program_weight_tensor_parameter_255: + name = "parameter_255" + shape = [192] + dtype = "float32" + min_val = float("-0.163884") + max_val = float("-0.0178385") + mean = float("-0.0640727") + std = float("0.0247279") + data = None + + +class Program_weight_tensor_parameter_256: + name = "parameter_256" + shape = [192] + dtype = "float32" + min_val = float("0.863587") + max_val = float("1.00334") + mean = float("0.954617") + std = float("0.0217918") + data = None + + +class Program_weight_tensor_parameter_257: + name = "parameter_257" + shape = [192] + dtype = "float32" + min_val = float("0.0034545") + max_val = float("0.038045") + mean = float("0.0120641") + std = float("0.00476242") + data = None + + +class Program_weight_tensor_parameter_258: + name = "parameter_258" + shape = [192] + dtype = "float32" + min_val = float("-0.122529") + max_val = float("0.0865108") + mean = float("-0.0250934") + std = float("0.0391884") + data = None + + +class Program_weight_tensor_parameter_259: + name = "parameter_259" + shape = [192, 192, 1, 1] + dtype = "float32" + min_val = float("-0.0358277") + max_val = float("0.0353334") + mean = float("-0.000735129") + std = float("0.00560004") + data = None + + +class Program_weight_tensor_parameter_260: + name = "parameter_260" + shape = [192] + dtype = "float32" + min_val = float("-0.163884") + max_val = float("-0.0178385") + mean = float("-0.0640727") + std = float("0.0247279") + data = None + + +class Program_weight_tensor_parameter_261: + name = "parameter_261" + shape = [192] + dtype = "float32" + min_val = float("0.93174") + max_val = float("1.03463") + mean = float("0.986386") + std = float("0.0213307") + data = None + + +class Program_weight_tensor_parameter_262: + name = "parameter_262" + shape = [192] + dtype = "float32" + min_val = float("0.0167477") + max_val = float("0.203134") + mean = float("0.052805") + std = float("0.0263321") + data = None + + +class Program_weight_tensor_parameter_263: + name = "parameter_263" + shape = [192] + dtype = "float32" + min_val = float("-0.296003") + max_val = float("0.159338") + mean = float("-0.0399017") + std = float("0.0729902") + data = None + + +class Program_weight_tensor_parameter_264: + name = "parameter_264" + shape = [192, 192, 3, 3] + dtype = "float32" + min_val = float("-0.0289266") + max_val = float("0.0434229") + mean = float("-0.000136219") + std = float("0.00303263") + data = None + + +class Program_weight_tensor_parameter_265: + name = "parameter_265" + shape = [192] + dtype = "float32" + min_val = float("-0.261885") + max_val = float("0.00328351") + mean = float("-0.0610575") + std = float("0.0375462") + data = None + + +class Program_weight_tensor_parameter_266: + name = "parameter_266" + shape = [192] + dtype = "float32" + min_val = float("0.956086") + max_val = float("1.12873") + mean = float("1.02506") + std = float("0.0296596") + data = None + + +class Program_weight_tensor_parameter_267: + name = "parameter_267" + shape = [192] + dtype = "float32" + min_val = float("0.061243") + max_val = float("0.513914") + mean = float("0.150437") + std = float("0.0684615") + data = None + + +class Program_weight_tensor_parameter_268: + name = "parameter_268" + shape = [192] + dtype = "float32" + min_val = float("-0.452041") + max_val = float("0.606505") + mean = float("-0.0982439") + std = float("0.151449") + data = None + + +class Program_weight_tensor_parameter_269: + name = "parameter_269" + shape = [192, 192, 3, 3] + dtype = "float32" + min_val = float("-0.0310959") + max_val = float("0.0540347") + mean = float("-0.00014844") + std = float("0.00361873") + data = None + + +class Program_weight_tensor_parameter_270: + name = "parameter_270" + shape = [192] + dtype = "float32" + min_val = float("-0.141558") + max_val = float("-0.00800961") + mean = float("-0.0529786") + std = float("0.0249763") + data = None + + +class Program_weight_tensor_parameter_271: + name = "parameter_271" + shape = [192] + dtype = "float32" + min_val = float("0.959372") + max_val = float("1.04442") + mean = float("0.989644") + std = float("0.0122297") + data = None + + +class Program_weight_tensor_parameter_272: + name = "parameter_272" + shape = [192] + dtype = "float32" + min_val = float("0.00251893") + max_val = float("0.0152974") + mean = float("0.00580933") + std = float("0.00203393") + data = None + + +class Program_weight_tensor_parameter_273: + name = "parameter_273" + shape = [192] + dtype = "float32" + min_val = float("-0.0626737") + max_val = float("0.0473693") + mean = float("-0.0138723") + std = float("0.02048") + data = None + + +class Program_weight_tensor_parameter_274: + name = "parameter_274" + shape = [192, 192, 1, 1] + dtype = "float32" + min_val = float("-0.0311982") + max_val = float("0.0518735") + mean = float("-0.000434417") + std = float("0.00568024") + data = None + + +class Program_weight_tensor_parameter_275: + name = "parameter_275" + shape = [192] + dtype = "float32" + min_val = float("-0.141558") + max_val = float("-0.00800961") + mean = float("-0.0529786") + std = float("0.0249763") + data = None + + +class Program_weight_tensor_parameter_276: + name = "parameter_276" + shape = [192] + dtype = "float32" + min_val = float("0.97528") + max_val = float("1.08483") + mean = float("1.0105") + std = float("0.0198465") + data = None + + +class Program_weight_tensor_parameter_277: + name = "parameter_277" + shape = [192] + dtype = "float32" + min_val = float("0.0100197") + max_val = float("0.0811647") + mean = float("0.0260735") + std = float("0.0117168") + data = None + + +class Program_weight_tensor_parameter_278: + name = "parameter_278" + shape = [192] + dtype = "float32" + min_val = float("-0.155834") + max_val = float("0.0633611") + mean = float("-0.0359147") + std = float("0.0411457") + data = None + + +class Program_weight_tensor_parameter_279: + name = "parameter_279" + shape = [192, 192, 3, 3] + dtype = "float32" + min_val = float("-0.0282102") + max_val = float("0.0458931") + mean = float("-0.00012004") + std = float("0.00286929") + data = None + + +class Program_weight_tensor_parameter_280: + name = "parameter_280" + shape = [192] + dtype = "float32" + min_val = float("-0.176698") + max_val = float("-0.0141152") + mean = float("-0.0628199") + std = float("0.0255403") + data = None + + +class Program_weight_tensor_parameter_281: + name = "parameter_281" + shape = [192] + dtype = "float32" + min_val = float("0.958955") + max_val = float("1.17844") + mean = float("1.01695") + std = float("0.0292023") + data = None + + +class Program_weight_tensor_parameter_282: + name = "parameter_282" + shape = [192] + dtype = "float32" + min_val = float("0.0500735") + max_val = float("0.644105") + mean = float("0.154604") + std = float("0.0711127") + data = None + + +class Program_weight_tensor_parameter_283: + name = "parameter_283" + shape = [192] + dtype = "float32" + min_val = float("-0.96903") + max_val = float("0.298499") + mean = float("-0.203069") + std = float("0.162208") + data = None + + +class Program_weight_tensor_parameter_284: + name = "parameter_284" + shape = [192, 192, 3, 3] + dtype = "float32" + min_val = float("-0.0311857") + max_val = float("0.0544745") + mean = float("-0.00028947") + std = float("0.00373957") + data = None + + +class Program_weight_tensor_parameter_285: + name = "parameter_285" + shape = [192] + dtype = "float32" + min_val = float("-0.10185") + max_val = float("0.00233393") + mean = float("-0.0432126") + std = float("0.0179711") + data = None + + +class Program_weight_tensor_parameter_286: + name = "parameter_286" + shape = [192] + dtype = "float32" + min_val = float("0.948317") + max_val = float("1.06055") + mean = float("0.999383") + std = float("0.0183237") + data = None + + +class Program_weight_tensor_parameter_287: + name = "parameter_287" + shape = [192] + dtype = "float32" + min_val = float("0.00292581") + max_val = float("0.0259082") + mean = float("0.00678114") + std = float("0.00302636") + data = None + + +class Program_weight_tensor_parameter_288: + name = "parameter_288" + shape = [192] + dtype = "float32" + min_val = float("-0.0659671") + max_val = float("0.0956013") + mean = float("-0.0137909") + std = float("0.0224992") + data = None + + +class Program_weight_tensor_parameter_289: + name = "parameter_289" + shape = [192, 192, 1, 1] + dtype = "float32" + min_val = float("-0.0276943") + max_val = float("0.0493058") + mean = float("-0.000437529") + std = float("0.00627464") + data = None + + +class Program_weight_tensor_parameter_290: + name = "parameter_290" + shape = [192] + dtype = "float32" + min_val = float("-0.10185") + max_val = float("0.00233393") + mean = float("-0.0432126") + std = float("0.0179711") + data = None + + +class Program_weight_tensor_parameter_291: + name = "parameter_291" + shape = [192] + dtype = "float32" + min_val = float("0.965464") + max_val = float("1.08549") + mean = float("1.00121") + std = float("0.0207436") + data = None + + +class Program_weight_tensor_parameter_292: + name = "parameter_292" + shape = [192] + dtype = "float32" + min_val = float("0.0115852") + max_val = float("0.138506") + mean = float("0.0305611") + std = float("0.0162962") + data = None + + +class Program_weight_tensor_parameter_293: + name = "parameter_293" + shape = [192] + dtype = "float32" + min_val = float("-0.185733") + max_val = float("0.112438") + mean = float("-0.0408639") + std = float("0.0494237") + data = None + + +class Program_weight_tensor_parameter_294: + name = "parameter_294" + shape = [192, 192, 3, 3] + dtype = "float32" + min_val = float("-0.0215845") + max_val = float("0.0364201") + mean = float("-0.00014749") + std = float("0.00293375") + data = None + + +class Program_weight_tensor_parameter_295: + name = "parameter_295" + shape = [192] + dtype = "float32" + min_val = float("-0.204494") + max_val = float("-0.0144044") + mean = float("-0.093129") + std = float("0.031367") + data = None + + +class Program_weight_tensor_parameter_296: + name = "parameter_296" + shape = [192] + dtype = "float32" + min_val = float("0.925869") + max_val = float("1.13227") + mean = float("1.03263") + std = float("0.0337128") + data = None + + +class Program_weight_tensor_parameter_297: + name = "parameter_297" + shape = [192] + dtype = "float32" + min_val = float("0.0195444") + max_val = float("0.229094") + mean = float("0.0494256") + std = float("0.028838") + data = None + + +class Program_weight_tensor_parameter_298: + name = "parameter_298" + shape = [192] + dtype = "float32" + min_val = float("-0.206747") + max_val = float("0.185165") + mean = float("-0.0332686") + std = float("0.0597757") + data = None + + +class Program_weight_tensor_parameter_299: + name = "parameter_299" + shape = [192, 192, 3, 3] + dtype = "float32" + min_val = float("-0.0430718") + max_val = float("0.053963") + mean = float("-0.000150791") + std = float("0.00461238") + data = None + + +class Program_weight_tensor_parameter_300: + name = "parameter_300" + shape = [192] + dtype = "float32" + min_val = float("-0.283525") + max_val = float("-0.0399851") + mean = float("-0.140694") + std = float("0.0427984") + data = None + + +class Program_weight_tensor_parameter_301: + name = "parameter_301" + shape = [192] + dtype = "float32" + min_val = float("0.926335") + max_val = float("1.23804") + mean = float("1.04793") + std = float("0.0447197") + data = None + + +class Program_weight_tensor_parameter_302: + name = "parameter_302" + shape = [192] + dtype = "float32" + min_val = float("0.00689615") + max_val = float("0.0800207") + mean = float("0.0149961") + std = float("0.00715212") + data = None + + +class Program_weight_tensor_parameter_303: + name = "parameter_303" + shape = [192] + dtype = "float32" + min_val = float("-0.0804451") + max_val = float("0.263811") + mean = float("0.0374362") + std = float("0.0442538") + data = None + + +class Program_weight_tensor_parameter_304: + name = "parameter_304" + shape = [192, 896, 1, 1] + dtype = "float32" + min_val = float("-0.0642343") + max_val = float("0.0939171") + mean = float("-0.000263371") + std = float("0.00693971") + data = None + + +class Program_weight_tensor_parameter_305: + name = "parameter_305" + shape = [192] + dtype = "float32" + min_val = float("-0.259614") + max_val = float("0.0895134") + mean = float("-0.101258") + std = float("0.0626699") + data = None + + +class Program_weight_tensor_parameter_306: + name = "parameter_306" + shape = [192] + dtype = "float32" + min_val = float("0.943118") + max_val = float("1.4362") + mean = float("1.10653") + std = float("0.0692581") + data = None + + +class Program_weight_tensor_parameter_307: + name = "parameter_307" + shape = [192] + dtype = "float32" + min_val = float("0.00794024") + max_val = float("0.0595908") + mean = float("0.0173921") + std = float("0.00611972") + data = None + + +class Program_weight_tensor_parameter_308: + name = "parameter_308" + shape = [192] + dtype = "float32" + min_val = float("-0.0920628") + max_val = float("0.110728") + mean = float("0.0118426") + std = float("0.0357492") + data = None + + +class Program_weight_tensor_parameter_309: + name = "parameter_309" + shape = [192, 896, 1, 1] + dtype = "float32" + min_val = float("-0.0567609") + max_val = float("0.156081") + mean = float("-0.000269107") + std = float("0.00743497") + data = None + + +class Program_weight_tensor_parameter_310: + name = "parameter_310" + shape = [384] + dtype = "float32" + min_val = float("-0.294804") + max_val = float("-0.060543") + mean = float("-0.156882") + std = float("0.0456804") + data = None + + +class Program_weight_tensor_parameter_311: + name = "parameter_311" + shape = [384] + dtype = "float32" + min_val = float("0.724859") + max_val = float("1.03323") + mean = float("0.863032") + std = float("0.0407231") + data = None + + +class Program_weight_tensor_parameter_312: + name = "parameter_312" + shape = [384] + dtype = "float32" + min_val = float("0.00784397") + max_val = float("0.104905") + mean = float("0.0334223") + std = float("0.014198") + data = None + + +class Program_weight_tensor_parameter_313: + name = "parameter_313" + shape = [384] + dtype = "float32" + min_val = float("-0.164346") + max_val = float("0.22268") + mean = float("-0.0758449") + std = float("0.0478351") + data = None + + +class Program_weight_tensor_parameter_314: + name = "parameter_314" + shape = [384, 768, 1, 1] + dtype = "float32" + min_val = float("-0.0291496") + max_val = float("0.0400009") + mean = float("-0.000547305") + std = float("0.00504041") + data = None + + +class Program_weight_tensor_parameter_315: + name = "parameter_315" + shape = [768] + dtype = "float32" + min_val = float("-0.119154") + max_val = float("0.0119942") + mean = float("-0.064486") + std = float("0.0183131") + data = None + + +class Program_weight_tensor_parameter_316: + name = "parameter_316" + shape = [768] + dtype = "float32" + min_val = float("0.944047") + max_val = float("1.15971") + mean = float("1.02729") + std = float("0.0265504") + data = None + + +class Program_weight_tensor_parameter_317: + name = "parameter_317" + shape = [768] + dtype = "float32" + min_val = float("0.00748782") + max_val = float("0.0591047") + mean = float("0.0177536") + std = float("0.00575632") + data = None + + +class Program_weight_tensor_parameter_318: + name = "parameter_318" + shape = [768] + dtype = "float32" + min_val = float("-0.159061") + max_val = float("0.168433") + mean = float("-0.0463487") + std = float("0.03515") + data = None + + +class Program_weight_tensor_parameter_319: + name = "parameter_319" + shape = [768, 768, 1, 1] + dtype = "float32" + min_val = float("-0.0549868") + max_val = float("0.0669411") + mean = float("-0.000287029") + std = float("0.00433313") + data = None + + +class Program_weight_tensor_parameter_320: + name = "parameter_320" + shape = [384] + dtype = "float32" + min_val = float("-0.185685") + max_val = float("0.10164") + mean = float("-0.0494086") + std = float("0.0232351") + data = None + + +class Program_weight_tensor_parameter_321: + name = "parameter_321" + shape = [384] + dtype = "float32" + min_val = float("0.896036") + max_val = float("1.01635") + mean = float("0.978029") + std = float("0.0132066") + data = None + + +class Program_weight_tensor_parameter_322: + name = "parameter_322" + shape = [384] + dtype = "float32" + min_val = float("0.00273274") + max_val = float("0.0612585") + mean = float("0.0150774") + std = float("0.00712838") + data = None + + +class Program_weight_tensor_parameter_323: + name = "parameter_323" + shape = [384] + dtype = "float32" + min_val = float("-0.094619") + max_val = float("0.082197") + mean = float("-0.0164625") + std = float("0.0288433") + data = None + + +class Program_weight_tensor_parameter_324: + name = "parameter_324" + shape = [384, 384, 1, 1] + dtype = "float32" + min_val = float("-0.0261294") + max_val = float("0.0423599") + mean = float("-0.000213738") + std = float("0.00356891") + data = None + + +class Program_weight_tensor_parameter_325: + name = "parameter_325" + shape = [384] + dtype = "float32" + min_val = float("-0.185685") + max_val = float("0.10164") + mean = float("-0.0494086") + std = float("0.0232351") + data = None + + +class Program_weight_tensor_parameter_326: + name = "parameter_326" + shape = [384] + dtype = "float32" + min_val = float("0.894376") + max_val = float("1.09337") + mean = float("0.980494") + std = float("0.0138921") + data = None + + +class Program_weight_tensor_parameter_327: + name = "parameter_327" + shape = [384] + dtype = "float32" + min_val = float("0.0200498") + max_val = float("0.390994") + mean = float("0.0969056") + std = float("0.0410626") + data = None + + +class Program_weight_tensor_parameter_328: + name = "parameter_328" + shape = [384] + dtype = "float32" + min_val = float("-0.296537") + max_val = float("0.174422") + mean = float("-0.0931523") + std = float("0.078707") + data = None + + +class Program_weight_tensor_parameter_329: + name = "parameter_329" + shape = [384, 384, 3, 3] + dtype = "float32" + min_val = float("-0.0405513") + max_val = float("0.0477433") + mean = float("-0.000144236") + std = float("0.0012981") + data = None + + +class Program_weight_tensor_parameter_330: + name = "parameter_330" + shape = [384] + dtype = "float32" + min_val = float("-0.0960122") + max_val = float("0.159226") + mean = float("-0.0165351") + std = float("0.0220682") + data = None + + +class Program_weight_tensor_parameter_331: + name = "parameter_331" + shape = [384] + dtype = "float32" + min_val = float("0.948717") + max_val = float("1.24156") + mean = float("1.02009") + std = float("0.0323589") + data = None + + +class Program_weight_tensor_parameter_332: + name = "parameter_332" + shape = [384] + dtype = "float32" + min_val = float("0.0193796") + max_val = float("0.205494") + mean = float("0.0716991") + std = float("0.0322584") + data = None + + +class Program_weight_tensor_parameter_333: + name = "parameter_333" + shape = [384] + dtype = "float32" + min_val = float("-0.243859") + max_val = float("0.256559") + mean = float("-0.036955") + std = float("0.0934812") + data = None + + +class Program_weight_tensor_parameter_334: + name = "parameter_334" + shape = [384, 384, 3, 3] + dtype = "float32" + min_val = float("-0.0314824") + max_val = float("0.0433857") + mean = float("-5.92754e-05") + std = float("0.00163745") + data = None + + +class Program_weight_tensor_parameter_335: + name = "parameter_335" + shape = [384] + dtype = "float32" + min_val = float("-0.11726") + max_val = float("0.0562716") + mean = float("-0.022678") + std = float("0.0172073") + data = None + + +class Program_weight_tensor_parameter_336: + name = "parameter_336" + shape = [384] + dtype = "float32" + min_val = float("0.959882") + max_val = float("1.14934") + mean = float("1.02111") + std = float("0.0314671") + data = None + + +class Program_weight_tensor_parameter_337: + name = "parameter_337" + shape = [384] + dtype = "float32" + min_val = float("0.0640868") + max_val = float("0.478792") + mean = float("0.196997") + std = float("0.0748147") + data = None + + +class Program_weight_tensor_parameter_338: + name = "parameter_338" + shape = [384] + dtype = "float32" + min_val = float("-3.06654") + max_val = float("2.27858") + mean = float("-0.0660014") + std = float("0.854655") + data = None + + +class Program_weight_tensor_parameter_339: + name = "parameter_339" + shape = [384, 1536, 1, 1] + dtype = "float32" + min_val = float("-0.0296671") + max_val = float("0.0506577") + mean = float("4.95696e-05") + std = float("0.0029479") + data = None + + +class Program_weight_tensor_parameter_340: + name = "parameter_340" + shape = [384] + dtype = "float32" + min_val = float("-0.0227352") + max_val = float("0.0393382") + mean = float("-0.00516489") + std = float("0.00791584") + data = None + + +class Program_weight_tensor_parameter_341: + name = "parameter_341" + shape = [384] + dtype = "float32" + min_val = float("0.955146") + max_val = float("1.13147") + mean = float("0.993086") + std = float("0.0198871") + data = None + + +class Program_weight_tensor_parameter_342: + name = "parameter_342" + shape = [384] + dtype = "float32" + min_val = float("0.00282349") + max_val = float("0.0174546") + mean = float("0.00695172") + std = float("0.00230227") + data = None + + +class Program_weight_tensor_parameter_343: + name = "parameter_343" + shape = [384] + dtype = "float32" + min_val = float("-0.112602") + max_val = float("0.0675238") + mean = float("-0.0392805") + std = float("0.0295428") + data = None + + +class Program_weight_tensor_parameter_344: + name = "parameter_344" + shape = [384, 384, 1, 1] + dtype = "float32" + min_val = float("-0.0241173") + max_val = float("0.0329422") + mean = float("-0.00048881") + std = float("0.00325498") + data = None + + +class Program_weight_tensor_parameter_345: + name = "parameter_345" + shape = [384] + dtype = "float32" + min_val = float("-0.0227352") + max_val = float("0.0393382") + mean = float("-0.00516489") + std = float("0.00791584") + data = None + + +class Program_weight_tensor_parameter_346: + name = "parameter_346" + shape = [384] + dtype = "float32" + min_val = float("0.942877") + max_val = float("1.15339") + mean = float("1.00357") + std = float("0.0304213") + data = None + + +class Program_weight_tensor_parameter_347: + name = "parameter_347" + shape = [384] + dtype = "float32" + min_val = float("0.0174238") + max_val = float("0.105417") + mean = float("0.0402829") + std = float("0.0149511") + data = None + + +class Program_weight_tensor_parameter_348: + name = "parameter_348" + shape = [384] + dtype = "float32" + min_val = float("-0.302856") + max_val = float("0.138938") + mean = float("-0.115265") + std = float("0.070667") + data = None + + +class Program_weight_tensor_parameter_349: + name = "parameter_349" + shape = [384, 384, 3, 3] + dtype = "float32" + min_val = float("-0.00989848") + max_val = float("0.0236966") + mean = float("-0.000177304") + std = float("0.00125558") + data = None + + +class Program_weight_tensor_parameter_350: + name = "parameter_350" + shape = [384] + dtype = "float32" + min_val = float("-0.0484119") + max_val = float("0.0216706") + mean = float("-0.00460322") + std = float("0.00967884") + data = None + + +class Program_weight_tensor_parameter_351: + name = "parameter_351" + shape = [384] + dtype = "float32" + min_val = float("0.963329") + max_val = float("1.21855") + mean = float("1.0185") + std = float("0.0242884") + data = None + + +class Program_weight_tensor_parameter_352: + name = "parameter_352" + shape = [384] + dtype = "float32" + min_val = float("0.086614") + max_val = float("0.596778") + mean = float("0.21882") + std = float("0.0755323") + data = None + + +class Program_weight_tensor_parameter_353: + name = "parameter_353" + shape = [384] + dtype = "float32" + min_val = float("-1.15975") + max_val = float("1.29731") + mean = float("-0.335725") + std = float("0.308056") + data = None + + +class Program_weight_tensor_parameter_354: + name = "parameter_354" + shape = [384, 384, 3, 3] + dtype = "float32" + min_val = float("-0.0119886") + max_val = float("0.0184089") + mean = float("-0.00019169") + std = float("0.00145043") + data = None + + +class Program_weight_tensor_parameter_355: + name = "parameter_355" + shape = [384] + dtype = "float32" + min_val = float("-0.0328394") + max_val = float("0.0297512") + mean = float("0.00167897") + std = float("0.010186") + data = None + + +class Program_weight_tensor_parameter_356: + name = "parameter_356" + shape = [384] + dtype = "float32" + min_val = float("0.982533") + max_val = float("1.0647") + mean = float("1.00477") + std = float("0.00875161") + data = None + + +class Program_weight_tensor_parameter_357: + name = "parameter_357" + shape = [384] + dtype = "float32" + min_val = float("0.00176664") + max_val = float("0.0152747") + mean = float("0.00489013") + std = float("0.00193295") + data = None + + +class Program_weight_tensor_parameter_358: + name = "parameter_358" + shape = [384] + dtype = "float32" + min_val = float("-0.0739742") + max_val = float("0.158979") + mean = float("-0.027123") + std = float("0.0289474") + data = None + + +class Program_weight_tensor_parameter_359: + name = "parameter_359" + shape = [384, 384, 1, 1] + dtype = "float32" + min_val = float("-0.0150124") + max_val = float("0.0242325") + mean = float("-0.000351008") + std = float("0.00254457") + data = None + + +class Program_weight_tensor_parameter_360: + name = "parameter_360" + shape = [384] + dtype = "float32" + min_val = float("-0.0328394") + max_val = float("0.0297512") + mean = float("0.00167897") + std = float("0.0101861") + data = None + + +class Program_weight_tensor_parameter_361: + name = "parameter_361" + shape = [384] + dtype = "float32" + min_val = float("0.972682") + max_val = float("1.06774") + mean = float("1.00444") + std = float("0.0130159") + data = None + + +class Program_weight_tensor_parameter_362: + name = "parameter_362" + shape = [384] + dtype = "float32" + min_val = float("0.00954045") + max_val = float("0.0874858") + mean = float("0.0287656") + std = float("0.0113341") + data = None + + +class Program_weight_tensor_parameter_363: + name = "parameter_363" + shape = [384] + dtype = "float32" + min_val = float("-0.207832") + max_val = float("0.392453") + mean = float("-0.0789175") + std = float("0.0796675") + data = None + + +class Program_weight_tensor_parameter_364: + name = "parameter_364" + shape = [384, 384, 3, 3] + dtype = "float32" + min_val = float("-0.0093616") + max_val = float("0.0137836") + mean = float("-0.000126532") + std = float("0.000967028") + data = None + + +class Program_weight_tensor_parameter_365: + name = "parameter_365" + shape = [384] + dtype = "float32" + min_val = float("-0.0472583") + max_val = float("0.014755") + mean = float("-0.0123119") + std = float("0.0107245") + data = None + + +class Program_weight_tensor_parameter_366: + name = "parameter_366" + shape = [384] + dtype = "float32" + min_val = float("0.975868") + max_val = float("1.10759") + mean = float("1.0142") + std = float("0.0167785") + data = None + + +class Program_weight_tensor_parameter_367: + name = "parameter_367" + shape = [384] + dtype = "float32" + min_val = float("0.0118693") + max_val = float("0.0905819") + mean = float("0.0338104") + std = float("0.0120183") + data = None + + +class Program_weight_tensor_parameter_368: + name = "parameter_368" + shape = [384] + dtype = "float32" + min_val = float("-0.15617") + max_val = float("0.226779") + mean = float("-0.0315867") + std = float("0.0593419") + data = None + + +class Program_weight_tensor_parameter_369: + name = "parameter_369" + shape = [384, 384, 3, 3] + dtype = "float32" + min_val = float("-0.00935706") + max_val = float("0.0179717") + mean = float("-5.73374e-05") + std = float("0.00139814") + data = None + + +class Program_weight_tensor_parameter_370: + name = "parameter_370" + shape = [384] + dtype = "float32" + min_val = float("-0.0636534") + max_val = float("0.0504377") + mean = float("-0.0321673") + std = float("0.0147943") + data = None + + +class Program_weight_tensor_parameter_371: + name = "parameter_371" + shape = [384] + dtype = "float32" + min_val = float("0.963242") + max_val = float("1.05925") + mean = float("1.01839") + std = float("0.0133969") + data = None + + +class Program_weight_tensor_parameter_372: + name = "parameter_372" + shape = [384] + dtype = "float32" + min_val = float("0.0177396") + max_val = float("0.0873128") + mean = float("0.0347121") + std = float("0.0104923") + data = None + + +class Program_weight_tensor_parameter_373: + name = "parameter_373" + shape = [384] + dtype = "float32" + min_val = float("-0.16412") + max_val = float("0.352551") + mean = float("-0.0559099") + std = float("0.0586702") + data = None + + +class Program_weight_tensor_parameter_374: + name = "parameter_374" + shape = [384, 1024, 1, 1] + dtype = "float32" + min_val = float("-0.0159351") + max_val = float("0.0411332") + mean = float("-0.000215983") + std = float("0.00323815") + data = None + + +class Program_weight_tensor_parameter_375: + name = "parameter_375" + shape = [384] + dtype = "float32" + min_val = float("-0.0181465") + max_val = float("0.0407222") + mean = float("0.0138467") + std = float("0.0107829") + data = None + + +class Program_weight_tensor_parameter_376: + name = "parameter_376" + shape = [384] + dtype = "float32" + min_val = float("1.02453") + max_val = float("1.13107") + mean = float("1.07749") + std = float("0.0183879") + data = None + + +class Program_weight_tensor_parameter_377: + name = "parameter_377" + shape = [384] + dtype = "float32" + min_val = float("0.0271516") + max_val = float("0.110982") + mean = float("0.0512444") + std = float("0.0114884") + data = None + + +class Program_weight_tensor_parameter_378: + name = "parameter_378" + shape = [384] + dtype = "float32" + min_val = float("-0.211969") + max_val = float("0.0900462") + mean = float("-0.105197") + std = float("0.0486625") + data = None + + +class Program_weight_tensor_parameter_379: + name = "parameter_379" + shape = [384, 1024, 1, 1] + dtype = "float32" + min_val = float("-0.0203775") + max_val = float("0.0291291") + mean = float("-0.000383976") + std = float("0.00445592") + data = None + + +class Program_weight_tensor_parameter_380: + name = "parameter_380" + shape = [1024] + dtype = "float32" + min_val = float("-3.76792") + max_val = float("-0.740644") + mean = float("-2.1918") + std = float("0.429728") + data = None + + +class Program_weight_tensor_parameter_381: + name = "parameter_381" + shape = [1024] + dtype = "float32" + min_val = float("1.6171") + max_val = float("4.43044") + mean = float("3.07732") + std = float("0.251667") + data = None + + +class Program_weight_tensor_parameter_382: + name = "parameter_382" + shape = [1024] + dtype = "float32" + min_val = float("0.00329864") + max_val = float("0.015225") + mean = float("0.00606563") + std = float("0.00145377") + data = None + + +class Program_weight_tensor_parameter_383: + name = "parameter_383" + shape = [1024] + dtype = "float32" + min_val = float("-0.14414") + max_val = float("0.140198") + mean = float("-0.0464258") + std = float("0.0301041") + data = None + + +class Program_weight_tensor_parameter_384: + name = "parameter_384" + shape = [1024, 768, 1, 1] + dtype = "float32" + min_val = float("-0.0402323") + max_val = float("0.0735797") + mean = float("-0.000459907") + std = float("0.00505835") + data = None + + +class Program_weight_tensor_parameter_385: + name = "parameter_385" + shape = [768] + dtype = "float32" + min_val = float("-0.0141909") + max_val = float("0.000475638") + mean = float("-0.00283804") + std = float("0.00262288") + data = None + + +class Program_weight_tensor_parameter_386: + name = "parameter_386" + shape = [768, 768, 1, 1] + dtype = "float32" + min_val = float("-0.120882") + max_val = float("0.123349") + mean = float("-0.00126009") + std = float("0.00351789") + data = None + + +class Program_weight_tensor_parameter_387: + name = "parameter_387" + shape = [384] + dtype = "float32" + min_val = float("-1.77367") + max_val = float("0.412602") + mean = float("-0.274258") + std = float("0.294059") + data = None + + +class Program_weight_tensor_parameter_388: + name = "parameter_388" + shape = [384] + dtype = "float32" + min_val = float("0.235561") + max_val = float("2.08056") + mean = float("0.662284") + std = float("0.284534") + data = None + + +class Program_weight_tensor_parameter_389: + name = "parameter_389" + shape = [384] + dtype = "float32" + min_val = float("0.000113873") + max_val = float("0.00209633") + mean = float("0.0004665") + std = float("0.000255457") + data = None + + +class Program_weight_tensor_parameter_390: + name = "parameter_390" + shape = [384] + dtype = "float32" + min_val = float("-0.125764") + max_val = float("0.101893") + mean = float("0.0212292") + std = float("0.0340142") + data = None + + +class Program_weight_tensor_parameter_391: + name = "parameter_391" + shape = [384, 384, 1, 1] + dtype = "float32" + min_val = float("-0.0286965") + max_val = float("0.035629") + mean = float("-0.000367399") + std = float("0.00386218") + data = None + + +class Program_weight_tensor_parameter_392: + name = "parameter_392" + shape = [384] + dtype = "float32" + min_val = float("-1.77367") + max_val = float("0.412602") + mean = float("-0.274258") + std = float("0.294059") + data = None + + +class Program_weight_tensor_parameter_393: + name = "parameter_393" + shape = [384] + dtype = "float32" + min_val = float("0.351006") + max_val = float("2.87728") + mean = float("1.11184") + std = float("0.332465") + data = None + + +class Program_weight_tensor_parameter_394: + name = "parameter_394" + shape = [384] + dtype = "float32" + min_val = float("0.000701995") + max_val = float("0.0163921") + mean = float("0.0029206") + std = float("0.00157333") + data = None + + +class Program_weight_tensor_parameter_395: + name = "parameter_395" + shape = [384] + dtype = "float32" + min_val = float("-0.116824") + max_val = float("0.200745") + mean = float("0.0140369") + std = float("0.0396593") + data = None + + +class Program_weight_tensor_parameter_396: + name = "parameter_396" + shape = [384, 384, 3, 3] + dtype = "float32" + min_val = float("-0.0220804") + max_val = float("0.04297") + mean = float("-4.22647e-05") + std = float("0.00231045") + data = None + + +class Program_weight_tensor_parameter_397: + name = "parameter_397" + shape = [384] + dtype = "float32" + min_val = float("-2.62795") + max_val = float("0.0158542") + mean = float("-1.59121") + std = float("0.415017") + data = None + + +class Program_weight_tensor_parameter_398: + name = "parameter_398" + shape = [384] + dtype = "float32" + min_val = float("0.47743") + max_val = float("1.87138") + mean = float("1.12676") + std = float("0.146879") + data = None + + +class Program_weight_tensor_parameter_399: + name = "parameter_399" + shape = [384] + dtype = "float32" + min_val = float("0.113593") + max_val = float("0.422227") + mean = float("0.214738") + std = float("0.0498719") + data = None + + +class Program_weight_tensor_parameter_400: + name = "parameter_400" + shape = [384] + dtype = "float32" + min_val = float("-1.45531") + max_val = float("1.18213") + mean = float("-0.59336") + std = float("0.263014") + data = None + + +class Program_weight_tensor_parameter_401: + name = "parameter_401" + shape = [384, 384, 3, 3] + dtype = "float32" + min_val = float("-0.022066") + max_val = float("0.0536152") + mean = float("-0.000285658") + std = float("0.00310489") + data = None + + +class Program_weight_tensor_parameter_402: + name = "parameter_402" + shape = [384] + dtype = "float32" + min_val = float("-1.93699") + max_val = float("1.08947") + mean = float("-0.561202") + std = float("0.377413") + data = None + + +class Program_weight_tensor_parameter_403: + name = "parameter_403" + shape = [384] + dtype = "float32" + min_val = float("0.163474") + max_val = float("2.03267") + mean = float("0.56873") + std = float("0.230607") + data = None + + +class Program_weight_tensor_parameter_404: + name = "parameter_404" + shape = [384] + dtype = "float32" + min_val = float("0.000165769") + max_val = float("0.00154379") + mean = float("0.000510825") + std = float("0.000208377") + data = None + + +class Program_weight_tensor_parameter_405: + name = "parameter_405" + shape = [384] + dtype = "float32" + min_val = float("-0.0544925") + max_val = float("0.122403") + mean = float("0.0297331") + std = float("0.0242308") + data = None + + +class Program_weight_tensor_parameter_406: + name = "parameter_406" + shape = [384, 384, 1, 1] + dtype = "float32" + min_val = float("-0.0248092") + max_val = float("0.0409625") + mean = float("-0.000538948") + std = float("0.00372246") + data = None + + +class Program_weight_tensor_parameter_407: + name = "parameter_407" + shape = [384] + dtype = "float32" + min_val = float("-1.93699") + max_val = float("1.08947") + mean = float("-0.561202") + std = float("0.377413") + data = None + + +class Program_weight_tensor_parameter_408: + name = "parameter_408" + shape = [384] + dtype = "float32" + min_val = float("0.580104") + max_val = float("2.20902") + mean = float("1.11957") + std = float("0.260079") + data = None + + +class Program_weight_tensor_parameter_409: + name = "parameter_409" + shape = [384] + dtype = "float32" + min_val = float("0.00139025") + max_val = float("0.0159313") + mean = float("0.00398625") + std = float("0.00139688") + data = None + + +class Program_weight_tensor_parameter_410: + name = "parameter_410" + shape = [384] + dtype = "float32" + min_val = float("-0.244735") + max_val = float("0.186015") + mean = float("0.0310846") + std = float("0.0489882") + data = None + + +class Program_weight_tensor_parameter_411: + name = "parameter_411" + shape = [384, 384, 3, 3] + dtype = "float32" + min_val = float("-0.0252079") + max_val = float("0.0526455") + mean = float("-8.39502e-05") + std = float("0.0024655") + data = None + + +class Program_weight_tensor_parameter_412: + name = "parameter_412" + shape = [384] + dtype = "float32" + min_val = float("-2.40135") + max_val = float("0.859232") + mean = float("-1.44206") + std = float("0.356507") + data = None + + +class Program_weight_tensor_parameter_413: + name = "parameter_413" + shape = [384] + dtype = "float32" + min_val = float("0.363926") + max_val = float("1.90623") + mean = float("1.15014") + std = float("0.141336") + data = None + + +class Program_weight_tensor_parameter_414: + name = "parameter_414" + shape = [384] + dtype = "float32" + min_val = float("0.0806588") + max_val = float("0.336866") + mean = float("0.15364") + std = float("0.0396176") + data = None + + +class Program_weight_tensor_parameter_415: + name = "parameter_415" + shape = [384] + dtype = "float32" + min_val = float("-0.87307") + max_val = float("1.09774") + mean = float("-0.335248") + std = float("0.182532") + data = None + + +class Program_weight_tensor_parameter_416: + name = "parameter_416" + shape = [384, 384, 3, 3] + dtype = "float32" + min_val = float("-0.0263525") + max_val = float("0.0731513") + mean = float("-0.000293573") + std = float("0.00310694") + data = None + + +class Program_weight_tensor_parameter_417: + name = "parameter_417" + shape = [384] + dtype = "float32" + min_val = float("-1.88701") + max_val = float("0.687197") + mean = float("-0.470322") + std = float("0.394983") + data = None + + +class Program_weight_tensor_parameter_418: + name = "parameter_418" + shape = [384] + dtype = "float32" + min_val = float("0.0809799") + max_val = float("2.10197") + mean = float("0.444459") + std = float("0.213484") + data = None + + +class Program_weight_tensor_parameter_419: + name = "parameter_419" + shape = [384] + dtype = "float32" + min_val = float("0.000162128") + max_val = float("0.00346214") + mean = float("0.000611306") + std = float("0.00031867") + data = None + + +class Program_weight_tensor_parameter_420: + name = "parameter_420" + shape = [384] + dtype = "float32" + min_val = float("-0.129635") + max_val = float("0.106917") + mean = float("0.0440525") + std = float("0.0262248") + data = None + + +class Program_weight_tensor_parameter_421: + name = "parameter_421" + shape = [384, 384, 1, 1] + dtype = "float32" + min_val = float("-0.0352524") + max_val = float("0.0365241") + mean = float("-0.000741488") + std = float("0.00320875") + data = None + + +class Program_weight_tensor_parameter_422: + name = "parameter_422" + shape = [384] + dtype = "float32" + min_val = float("-1.88701") + max_val = float("0.687197") + mean = float("-0.470322") + std = float("0.394983") + data = None + + +class Program_weight_tensor_parameter_423: + name = "parameter_423" + shape = [384] + dtype = "float32" + min_val = float("0.542247") + max_val = float("2.23189") + mean = float("1.09053") + std = float("0.258538") + data = None + + +class Program_weight_tensor_parameter_424: + name = "parameter_424" + shape = [384] + dtype = "float32" + min_val = float("0.00196335") + max_val = float("0.0206155") + mean = float("0.00537647") + std = float("0.00219218") + data = None + + +class Program_weight_tensor_parameter_425: + name = "parameter_425" + shape = [384] + dtype = "float32" + min_val = float("-0.17294") + max_val = float("0.186353") + mean = float("0.048691") + std = float("0.0496443") + data = None + + +class Program_weight_tensor_parameter_426: + name = "parameter_426" + shape = [384, 384, 3, 3] + dtype = "float32" + min_val = float("-0.0202656") + max_val = float("0.0367709") + mean = float("-0.000105801") + std = float("0.00257938") + data = None + + +class Program_weight_tensor_parameter_427: + name = "parameter_427" + shape = [384] + dtype = "float32" + min_val = float("-2.18119") + max_val = float("0.357178") + mean = float("-1.40732") + std = float("0.272214") + data = None + + +class Program_weight_tensor_parameter_428: + name = "parameter_428" + shape = [384] + dtype = "float32" + min_val = float("0.615126") + max_val = float("1.63076") + mean = float("1.12199") + std = float("0.10235") + data = None + + +class Program_weight_tensor_parameter_429: + name = "parameter_429" + shape = [384] + dtype = "float32" + min_val = float("0.0563996") + max_val = float("0.373366") + mean = float("0.118212") + std = float("0.0407772") + data = None + + +class Program_weight_tensor_parameter_430: + name = "parameter_430" + shape = [384] + dtype = "float32" + min_val = float("-0.905849") + max_val = float("0.191543") + mean = float("-0.240798") + std = float("0.148646") + data = None + + +class Program_weight_tensor_parameter_431: + name = "parameter_431" + shape = [384, 384, 3, 3] + dtype = "float32" + min_val = float("-0.019732") + max_val = float("0.0506193") + mean = float("-0.000249295") + std = float("0.00288566") + data = None + + +class Program_weight_tensor_parameter_432: + name = "parameter_432" + shape = [384] + dtype = "float32" + min_val = float("-2.91853") + max_val = float("2.39851") + mean = float("-0.744932") + std = float("0.666223") + data = None + + +class Program_weight_tensor_parameter_433: + name = "parameter_433" + shape = [384] + dtype = "float32" + min_val = float("1.00709") + max_val = float("2.8987") + mean = float("1.92326") + std = float("0.268933") + data = None + + +class Program_weight_tensor_parameter_434: + name = "parameter_434" + shape = [384] + dtype = "float32" + min_val = float("0.00320772") + max_val = float("0.0174641") + mean = float("0.00683319") + std = float("0.0019151") + data = None + + +class Program_weight_tensor_parameter_435: + name = "parameter_435" + shape = [384] + dtype = "float32" + min_val = float("-0.29818") + max_val = float("0.152926") + mean = float("0.0837471") + std = float("0.0403423") + data = None + + +class Program_weight_tensor_parameter_436: + name = "parameter_436" + shape = [384, 768, 1, 1] + dtype = "float32" + min_val = float("-0.048687") + max_val = float("0.0700561") + mean = float("-0.000900026") + std = float("0.0068164") + data = None + + +class Program_weight_tensor_parameter_437: + name = "parameter_437" + shape = [384] + dtype = "float32" + min_val = float("-2.26289") + max_val = float("0.746403") + mean = float("-0.781495") + std = float("0.476677") + data = None + + +class Program_weight_tensor_parameter_438: + name = "parameter_438" + shape = [384] + dtype = "float32" + min_val = float("0.922204") + max_val = float("2.88427") + mean = float("2.09564") + std = float("0.309969") + data = None + + +class Program_weight_tensor_parameter_439: + name = "parameter_439" + shape = [384] + dtype = "float32" + min_val = float("0.00130773") + max_val = float("0.00698116") + mean = float("0.00267608") + std = float("0.000673612") + data = None + + +class Program_weight_tensor_parameter_440: + name = "parameter_440" + shape = [384] + dtype = "float32" + min_val = float("-0.0821142") + max_val = float("0.105567") + mean = float("0.0508792") + std = float("0.0249406") + data = None + + +class Program_weight_tensor_parameter_441: + name = "parameter_441" + shape = [384, 768, 1, 1] + dtype = "float32" + min_val = float("-0.0425887") + max_val = float("0.0837173") + mean = float("-0.000481791") + std = float("0.00459332") + data = None + + +class Program_weight_tensor_parameter_442: + name = "parameter_442" + shape = [768] + dtype = "float32" + min_val = float("-2.45623") + max_val = float("0.558987") + mean = float("-0.951715") + std = float("0.337813") + data = None + + +class Program_weight_tensor_parameter_443: + name = "parameter_443" + shape = [768] + dtype = "float32" + min_val = float("0.470427") + max_val = float("1.80148") + mean = float("0.915598") + std = float("0.147913") + data = None + + +class Program_weight_tensor_parameter_444: + name = "parameter_444" + shape = [768] + dtype = "float32" + min_val = float("0.00952444") + max_val = float("0.0754958") + mean = float("0.0195973") + std = float("0.00656106") + data = None + + +class Program_weight_tensor_parameter_445: + name = "parameter_445" + shape = [768] + dtype = "float32" + min_val = float("-0.488016") + max_val = float("0.343295") + mean = float("0.0603457") + std = float("0.0842927") + data = None + + +class Program_weight_tensor_parameter_446: + name = "parameter_446" + shape = [768, 512, 3, 3] + dtype = "float32" + min_val = float("-0.034856") + max_val = float("0.0488879") + mean = float("-0.00014407") + std = float("0.00294789") + data = None + + +class Program_weight_tensor_parameter_447: + name = "parameter_447" + shape = [512] + dtype = "float32" + min_val = float("-3.41737") + max_val = float("1.90927") + mean = float("-1.24224") + std = float("0.518743") + data = None + + +class Program_weight_tensor_parameter_448: + name = "parameter_448" + shape = [512] + dtype = "float32" + min_val = float("0.415685") + max_val = float("1.6278") + mean = float("1.1229") + std = float("0.144719") + data = None + + +class Program_weight_tensor_parameter_449: + name = "parameter_449" + shape = [512] + dtype = "float32" + min_val = float("0.00694087") + max_val = float("0.0321495") + mean = float("0.0139897") + std = float("0.00350457") + data = None + + +class Program_weight_tensor_parameter_450: + name = "parameter_450" + shape = [512] + dtype = "float32" + min_val = float("-0.196539") + max_val = float("0.094235") + mean = float("-0.0610599") + std = float("0.0516363") + data = None + + +class Program_weight_tensor_parameter_451: + name = "parameter_451" + shape = [512, 384, 1, 1] + dtype = "float32" + min_val = float("-0.0778304") + max_val = float("0.202597") + mean = float("-0.000771845") + std = float("0.00954957") + data = None + + +class Program_weight_tensor_parameter_452: + name = "parameter_452" + shape = [384] + dtype = "float32" + min_val = float("-0.0109842") + max_val = float("0.00224846") + mean = float("-0.00328449") + std = float("0.00288318") + data = None + + +class Program_weight_tensor_parameter_453: + name = "parameter_453" + shape = [384, 384, 1, 1] + dtype = "float32" + min_val = float("-0.21807") + max_val = float("0.106954") + mean = float("-0.00207131") + std = float("0.00565368") + data = None + + +class Program_weight_tensor_parameter_454: + name = "parameter_454" + shape = [192] + dtype = "float32" + min_val = float("-1.92149") + max_val = float("0.337952") + mean = float("-0.371234") + std = float("0.319908") + data = None + + +class Program_weight_tensor_parameter_455: + name = "parameter_455" + shape = [192] + dtype = "float32" + min_val = float("0.000870638") + max_val = float("2.25812") + mean = float("0.550758") + std = float("0.447397") + data = None + + +class Program_weight_tensor_parameter_456: + name = "parameter_456" + shape = [192] + dtype = "float32" + min_val = float("5.1348e-07") + max_val = float("0.00288291") + mean = float("0.00059132") + std = float("0.000373062") + data = None + + +class Program_weight_tensor_parameter_457: + name = "parameter_457" + shape = [192] + dtype = "float32" + min_val = float("-0.0367649") + max_val = float("0.0808075") + mean = float("0.0118125") + std = float("0.020077") + data = None + + +class Program_weight_tensor_parameter_458: + name = "parameter_458" + shape = [192, 192, 1, 1] + dtype = "float32" + min_val = float("-0.0338493") + max_val = float("0.0598473") + mean = float("-0.00053451") + std = float("0.00465335") + data = None + + +class Program_weight_tensor_parameter_459: + name = "parameter_459" + shape = [192] + dtype = "float32" + min_val = float("-1.92149") + max_val = float("0.337952") + mean = float("-0.371234") + std = float("0.319908") + data = None + + +class Program_weight_tensor_parameter_460: + name = "parameter_460" + shape = [192] + dtype = "float32" + min_val = float("0.331666") + max_val = float("2.85547") + mean = float("1.19201") + std = float("0.517776") + data = None + + +class Program_weight_tensor_parameter_461: + name = "parameter_461" + shape = [192] + dtype = "float32" + min_val = float("0.0016658") + max_val = float("0.0215238") + mean = float("0.00634517") + std = float("0.00269256") + data = None + + +class Program_weight_tensor_parameter_462: + name = "parameter_462" + shape = [192] + dtype = "float32" + min_val = float("-0.163366") + max_val = float("0.179365") + mean = float("0.0293621") + std = float("0.056096") + data = None + + +class Program_weight_tensor_parameter_463: + name = "parameter_463" + shape = [192, 192, 3, 3] + dtype = "float32" + min_val = float("-0.0312573") + max_val = float("0.0467973") + mean = float("-0.000183183") + std = float("0.00354666") + data = None + + +class Program_weight_tensor_parameter_464: + name = "parameter_464" + shape = [192] + dtype = "float32" + min_val = float("-2.92904") + max_val = float("-0.215016") + mean = float("-1.34795") + std = float("0.402008") + data = None + + +class Program_weight_tensor_parameter_465: + name = "parameter_465" + shape = [192] + dtype = "float32" + min_val = float("0.692101") + max_val = float("2.01606") + mean = float("1.16745") + std = float("0.167372") + data = None + + +class Program_weight_tensor_parameter_466: + name = "parameter_466" + shape = [192] + dtype = "float32" + min_val = float("0.0945369") + max_val = float("0.729788") + mean = float("0.230533") + std = float("0.0802558") + data = None + + +class Program_weight_tensor_parameter_467: + name = "parameter_467" + shape = [192] + dtype = "float32" + min_val = float("-3.63023") + max_val = float("1.63866") + mean = float("-0.27364") + std = float("0.413243") + data = None + + +class Program_weight_tensor_parameter_468: + name = "parameter_468" + shape = [192, 192, 3, 3] + dtype = "float32" + min_val = float("-0.0311555") + max_val = float("0.0439246") + mean = float("-0.000266211") + std = float("0.00418642") + data = None + + +class Program_weight_tensor_parameter_469: + name = "parameter_469" + shape = [192] + dtype = "float32" + min_val = float("-1.93292") + max_val = float("0.441324") + mean = float("-0.309813") + std = float("0.31146") + data = None + + +class Program_weight_tensor_parameter_470: + name = "parameter_470" + shape = [192] + dtype = "float32" + min_val = float("1.21421e-05") + max_val = float("1.74219") + mean = float("0.40159") + std = float("0.316726") + data = None + + +class Program_weight_tensor_parameter_471: + name = "parameter_471" + shape = [192] + dtype = "float32" + min_val = float("2.80133e-10") + max_val = float("0.00251043") + mean = float("0.000554603") + std = float("0.00035378") + data = None + + +class Program_weight_tensor_parameter_472: + name = "parameter_472" + shape = [192] + dtype = "float32" + min_val = float("-0.0758057") + max_val = float("0.0487678") + mean = float("0.012343") + std = float("0.0146262") + data = None + + +class Program_weight_tensor_parameter_473: + name = "parameter_473" + shape = [192, 192, 1, 1] + dtype = "float32" + min_val = float("-0.0353994") + max_val = float("0.0460282") + mean = float("-0.000533793") + std = float("0.00437157") + data = None + + +class Program_weight_tensor_parameter_474: + name = "parameter_474" + shape = [192] + dtype = "float32" + min_val = float("-1.93292") + max_val = float("0.441324") + mean = float("-0.309813") + std = float("0.31146") + data = None + + +class Program_weight_tensor_parameter_475: + name = "parameter_475" + shape = [192] + dtype = "float32" + min_val = float("0.42435") + max_val = float("2.26518") + mean = float("1.12607") + std = float("0.378251") + data = None + + +class Program_weight_tensor_parameter_476: + name = "parameter_476" + shape = [192] + dtype = "float32" + min_val = float("0.00284822") + max_val = float("0.0152635") + mean = float("0.00699763") + std = float("0.00239705") + data = None + + +class Program_weight_tensor_parameter_477: + name = "parameter_477" + shape = [192] + dtype = "float32" + min_val = float("-0.167984") + max_val = float("0.143853") + mean = float("0.0389637") + std = float("0.0425518") + data = None + + +class Program_weight_tensor_parameter_478: + name = "parameter_478" + shape = [192, 192, 3, 3] + dtype = "float32" + min_val = float("-0.0286213") + max_val = float("0.0650864") + mean = float("-0.000209064") + std = float("0.00388037") + data = None + + +class Program_weight_tensor_parameter_479: + name = "parameter_479" + shape = [192] + dtype = "float32" + min_val = float("-2.52761") + max_val = float("-0.176828") + mean = float("-1.32846") + std = float("0.439749") + data = None + + +class Program_weight_tensor_parameter_480: + name = "parameter_480" + shape = [192] + dtype = "float32" + min_val = float("0.648718") + max_val = float("1.69614") + mean = float("1.18327") + std = float("0.16502") + data = None + + +class Program_weight_tensor_parameter_481: + name = "parameter_481" + shape = [192] + dtype = "float32" + min_val = float("0.0748949") + max_val = float("0.496601") + mean = float("0.173") + std = float("0.0538659") + data = None + + +class Program_weight_tensor_parameter_482: + name = "parameter_482" + shape = [192] + dtype = "float32" + min_val = float("-2.67916") + max_val = float("0.470252") + mean = float("-0.16537") + std = float("0.290787") + data = None + + +class Program_weight_tensor_parameter_483: + name = "parameter_483" + shape = [192, 192, 3, 3] + dtype = "float32" + min_val = float("-0.0343915") + max_val = float("0.0481415") + mean = float("-0.000316572") + std = float("0.00445199") + data = None + + +class Program_weight_tensor_parameter_484: + name = "parameter_484" + shape = [192] + dtype = "float32" + min_val = float("-1.76951") + max_val = float("0.366199") + mean = float("-0.296977") + std = float("0.323972") + data = None + + +class Program_weight_tensor_parameter_485: + name = "parameter_485" + shape = [192] + dtype = "float32" + min_val = float("1.63141e-05") + max_val = float("1.66338") + mean = float("0.314389") + std = float("0.259103") + data = None + + +class Program_weight_tensor_parameter_486: + name = "parameter_486" + shape = [192] + dtype = "float32" + min_val = float("1.00826e-10") + max_val = float("0.00233588") + mean = float("0.000436101") + std = float("0.000314553") + data = None + + +class Program_weight_tensor_parameter_487: + name = "parameter_487" + shape = [192] + dtype = "float32" + min_val = float("-0.0580651") + max_val = float("0.107613") + mean = float("0.0149725") + std = float("0.01831") + data = None + + +class Program_weight_tensor_parameter_488: + name = "parameter_488" + shape = [192, 192, 1, 1] + dtype = "float32" + min_val = float("-0.0464034") + max_val = float("0.0464563") + mean = float("-0.000581242") + std = float("0.00427597") + data = None + + +class Program_weight_tensor_parameter_489: + name = "parameter_489" + shape = [192] + dtype = "float32" + min_val = float("-1.76951") + max_val = float("0.366199") + mean = float("-0.296977") + std = float("0.323972") + data = None + + +class Program_weight_tensor_parameter_490: + name = "parameter_490" + shape = [192] + dtype = "float32" + min_val = float("0.376033") + max_val = float("1.96542") + mean = float("1.05311") + std = float("0.336647") + data = None + + +class Program_weight_tensor_parameter_491: + name = "parameter_491" + shape = [192] + dtype = "float32" + min_val = float("0.00281438") + max_val = float("0.0138807") + mean = float("0.00641947") + std = float("0.00197932") + data = None + + +class Program_weight_tensor_parameter_492: + name = "parameter_492" + shape = [192] + dtype = "float32" + min_val = float("-0.141749") + max_val = float("0.124064") + mean = float("0.0413732") + std = float("0.0403923") + data = None + + +class Program_weight_tensor_parameter_493: + name = "parameter_493" + shape = [192, 192, 3, 3] + dtype = "float32" + min_val = float("-0.0318205") + max_val = float("0.0481358") + mean = float("-0.000199073") + std = float("0.00403277") + data = None + + +class Program_weight_tensor_parameter_494: + name = "parameter_494" + shape = [192] + dtype = "float32" + min_val = float("-2.55005") + max_val = float("0.122959") + mean = float("-1.28661") + std = float("0.420119") + data = None + + +class Program_weight_tensor_parameter_495: + name = "parameter_495" + shape = [192] + dtype = "float32" + min_val = float("0.65695") + max_val = float("1.74033") + mean = float("1.14805") + std = float("0.166602") + data = None + + +class Program_weight_tensor_parameter_496: + name = "parameter_496" + shape = [192] + dtype = "float32" + min_val = float("0.0600779") + max_val = float("0.27472") + mean = float("0.119173") + std = float("0.0382636") + data = None + + +class Program_weight_tensor_parameter_497: + name = "parameter_497" + shape = [192] + dtype = "float32" + min_val = float("-1.7121") + max_val = float("0.282664") + mean = float("-0.157492") + std = float("0.210747") + data = None + + +class Program_weight_tensor_parameter_498: + name = "parameter_498" + shape = [192, 192, 3, 3] + dtype = "float32" + min_val = float("-0.0337097") + max_val = float("0.0561925") + mean = float("-0.000355772") + std = float("0.00453268") + data = None + + +class Program_weight_tensor_parameter_499: + name = "parameter_499" + shape = [192] + dtype = "float32" + min_val = float("-2.0876") + max_val = float("0.454829") + mean = float("-0.308381") + std = float("0.365164") + data = None + + +class Program_weight_tensor_parameter_500: + name = "parameter_500" + shape = [192] + dtype = "float32" + min_val = float("4.58655e-06") + max_val = float("0.678706") + mean = float("0.185877") + std = float("0.145677") + data = None + + +class Program_weight_tensor_parameter_501: + name = "parameter_501" + shape = [192] + dtype = "float32" + min_val = float("8.47064e-12") + max_val = float("0.00164074") + mean = float("0.000297303") + std = float("0.000229035") + data = None + + +class Program_weight_tensor_parameter_502: + name = "parameter_502" + shape = [192] + dtype = "float32" + min_val = float("-0.0618719") + max_val = float("0.0484143") + mean = float("0.009817") + std = float("0.0152548") + data = None + + +class Program_weight_tensor_parameter_503: + name = "parameter_503" + shape = [192, 192, 1, 1] + dtype = "float32" + min_val = float("-0.022879") + max_val = float("0.047727") + mean = float("-0.000391334") + std = float("0.00380308") + data = None + + +class Program_weight_tensor_parameter_504: + name = "parameter_504" + shape = [192] + dtype = "float32" + min_val = float("-2.0876") + max_val = float("0.454829") + mean = float("-0.308381") + std = float("0.365164") + data = None + + +class Program_weight_tensor_parameter_505: + name = "parameter_505" + shape = [192] + dtype = "float32" + min_val = float("0.395584") + max_val = float("1.92978") + mean = float("0.959637") + std = float("0.305366") + data = None + + +class Program_weight_tensor_parameter_506: + name = "parameter_506" + shape = [192] + dtype = "float32" + min_val = float("0.00220796") + max_val = float("0.0165287") + mean = float("0.00648062") + std = float("0.00216628") + data = None + + +class Program_weight_tensor_parameter_507: + name = "parameter_507" + shape = [192] + dtype = "float32" + min_val = float("-0.0969257") + max_val = float("0.153187") + mean = float("0.0398628") + std = float("0.0461512") + data = None + + +class Program_weight_tensor_parameter_508: + name = "parameter_508" + shape = [192, 192, 3, 3] + dtype = "float32" + min_val = float("-0.0386304") + max_val = float("0.0466633") + mean = float("-0.000193407") + std = float("0.0042258") + data = None + + +class Program_weight_tensor_parameter_509: + name = "parameter_509" + shape = [192] + dtype = "float32" + min_val = float("-2.82558") + max_val = float("-0.1652") + mean = float("-1.28207") + std = float("0.428605") + data = None + + +class Program_weight_tensor_parameter_510: + name = "parameter_510" + shape = [192] + dtype = "float32" + min_val = float("0.762073") + max_val = float("1.48094") + mean = float("1.12816") + std = float("0.13623") + data = None + + +class Program_weight_tensor_parameter_511: + name = "parameter_511" + shape = [192] + dtype = "float32" + min_val = float("0.0397243") + max_val = float("0.172477") + mean = float("0.0907787") + std = float("0.026348") + data = None + + +class Program_weight_tensor_parameter_512: + name = "parameter_512" + shape = [192] + dtype = "float32" + min_val = float("-1.35787") + max_val = float("0.21585") + mean = float("-0.146323") + std = float("0.174381") + data = None + + +class Program_weight_tensor_parameter_513: + name = "parameter_513" + shape = [192, 192, 3, 3] + dtype = "float32" + min_val = float("-0.0481387") + max_val = float("0.0518744") + mean = float("-0.000372327") + std = float("0.00459266") + data = None + + +class Program_weight_tensor_parameter_514: + name = "parameter_514" + shape = [192] + dtype = "float32" + min_val = float("-1.21602") + max_val = float("0.408845") + mean = float("-0.268639") + std = float("0.326037") + data = None + + +class Program_weight_tensor_parameter_515: + name = "parameter_515" + shape = [192] + dtype = "float32" + min_val = float("1.21361e-07") + max_val = float("0.671038") + mean = float("0.165778") + std = float("0.128782") + data = None + + +class Program_weight_tensor_parameter_516: + name = "parameter_516" + shape = [192] + dtype = "float32" + min_val = float("5.10666e-14") + max_val = float("0.00122479") + mean = float("0.000286236") + std = float("0.000223959") + data = None + + +class Program_weight_tensor_parameter_517: + name = "parameter_517" + shape = [192] + dtype = "float32" + min_val = float("-0.0473146") + max_val = float("0.0612585") + mean = float("0.00982243") + std = float("0.0167813") + data = None + + +class Program_weight_tensor_parameter_518: + name = "parameter_518" + shape = [192, 192, 1, 1] + dtype = "float32" + min_val = float("-0.0449688") + max_val = float("0.0471178") + mean = float("-0.000356672") + std = float("0.00380026") + data = None + + +class Program_weight_tensor_parameter_519: + name = "parameter_519" + shape = [192] + dtype = "float32" + min_val = float("-1.21602") + max_val = float("0.408845") + mean = float("-0.268639") + std = float("0.326037") + data = None + + +class Program_weight_tensor_parameter_520: + name = "parameter_520" + shape = [192] + dtype = "float32" + min_val = float("0.341979") + max_val = float("1.57313") + mean = float("0.856243") + std = float("0.26693") + data = None + + +class Program_weight_tensor_parameter_521: + name = "parameter_521" + shape = [192] + dtype = "float32" + min_val = float("0.00232583") + max_val = float("0.0129819") + mean = float("0.00609533") + std = float("0.00205265") + data = None + + +class Program_weight_tensor_parameter_522: + name = "parameter_522" + shape = [192] + dtype = "float32" + min_val = float("-0.127749") + max_val = float("0.195926") + mean = float("0.0466017") + std = float("0.051466") + data = None + + +class Program_weight_tensor_parameter_523: + name = "parameter_523" + shape = [192, 192, 3, 3] + dtype = "float32" + min_val = float("-0.0366939") + max_val = float("0.0434549") + mean = float("-0.000195847") + std = float("0.00419905") + data = None + + +class Program_weight_tensor_parameter_524: + name = "parameter_524" + shape = [192] + dtype = "float32" + min_val = float("-2.55529") + max_val = float("-0.225479") + mean = float("-1.29624") + std = float("0.412812") + data = None + + +class Program_weight_tensor_parameter_525: + name = "parameter_525" + shape = [192] + dtype = "float32" + min_val = float("0.653378") + max_val = float("1.46586") + mean = float("1.10245") + std = float("0.128553") + data = None + + +class Program_weight_tensor_parameter_526: + name = "parameter_526" + shape = [192] + dtype = "float32" + min_val = float("0.0342008") + max_val = float("0.175266") + mean = float("0.0690084") + std = float("0.021438") + data = None + + +class Program_weight_tensor_parameter_527: + name = "parameter_527" + shape = [192] + dtype = "float32" + min_val = float("-0.545223") + max_val = float("0.228578") + mean = float("-0.119097") + std = float("0.147888") + data = None + + +class Program_weight_tensor_parameter_528: + name = "parameter_528" + shape = [192, 192, 3, 3] + dtype = "float32" + min_val = float("-0.0706133") + max_val = float("0.0735169") + mean = float("-0.000353042") + std = float("0.00457192") + data = None + + +class Program_weight_tensor_parameter_529: + name = "parameter_529" + shape = [192] + dtype = "float32" + min_val = float("-1.25126") + max_val = float("0.497063") + mean = float("-0.19574") + std = float("0.286663") + data = None + + +class Program_weight_tensor_parameter_530: + name = "parameter_530" + shape = [192] + dtype = "float32" + min_val = float("6.14606e-05") + max_val = float("1.52716") + mean = float("0.227275") + std = float("0.219438") + data = None + + +class Program_weight_tensor_parameter_531: + name = "parameter_531" + shape = [192] + dtype = "float32" + min_val = float("2.64489e-09") + max_val = float("0.0123289") + mean = float("0.000814775") + std = float("0.00130349") + data = None + + +class Program_weight_tensor_parameter_532: + name = "parameter_532" + shape = [192] + dtype = "float32" + min_val = float("-0.0645179") + max_val = float("0.115089") + mean = float("0.0169266") + std = float("0.0221153") + data = None + + +class Program_weight_tensor_parameter_533: + name = "parameter_533" + shape = [192, 192, 1, 1] + dtype = "float32" + min_val = float("-0.0561284") + max_val = float("0.045268") + mean = float("-0.000693627") + std = float("0.00467943") + data = None + + +class Program_weight_tensor_parameter_534: + name = "parameter_534" + shape = [192] + dtype = "float32" + min_val = float("-1.25126") + max_val = float("0.497063") + mean = float("-0.19574") + std = float("0.286663") + data = None + + +class Program_weight_tensor_parameter_535: + name = "parameter_535" + shape = [192] + dtype = "float32" + min_val = float("0.36186") + max_val = float("1.38711") + mean = float("0.774269") + std = float("0.223592") + data = None + + +class Program_weight_tensor_parameter_536: + name = "parameter_536" + shape = [192] + dtype = "float32" + min_val = float("0.00350209") + max_val = float("0.0281618") + mean = float("0.0105941") + std = float("0.00430879") + data = None + + +class Program_weight_tensor_parameter_537: + name = "parameter_537" + shape = [192] + dtype = "float32" + min_val = float("-0.112878") + max_val = float("0.226915") + mean = float("0.0615516") + std = float("0.0598904") + data = None + + +class Program_weight_tensor_parameter_538: + name = "parameter_538" + shape = [192, 192, 3, 3] + dtype = "float32" + min_val = float("-0.0529325") + max_val = float("0.0484814") + mean = float("-0.00028006") + std = float("0.00409236") + data = None + + +class Program_weight_tensor_parameter_539: + name = "parameter_539" + shape = [192] + dtype = "float32" + min_val = float("-1.93323") + max_val = float("-0.218148") + mean = float("-1.17854") + std = float("0.324229") + data = None + + +class Program_weight_tensor_parameter_540: + name = "parameter_540" + shape = [192] + dtype = "float32" + min_val = float("0.740518") + max_val = float("1.5803") + mean = float("1.10697") + std = float("0.141844") + data = None + + +class Program_weight_tensor_parameter_541: + name = "parameter_541" + shape = [192] + dtype = "float32" + min_val = float("0.0218969") + max_val = float("0.148975") + mean = float("0.0589305") + std = float("0.0225134") + data = None + + +class Program_weight_tensor_parameter_542: + name = "parameter_542" + shape = [192] + dtype = "float32" + min_val = float("-1.10111") + max_val = float("0.212335") + mean = float("-0.0771266") + std = float("0.153293") + data = None + + +class Program_weight_tensor_parameter_543: + name = "parameter_543" + shape = [192, 192, 3, 3] + dtype = "float32" + min_val = float("-0.0698774") + max_val = float("0.0843485") + mean = float("-0.000278324") + std = float("0.00430405") + data = None + + +class Program_weight_tensor_parameter_544: + name = "parameter_544" + shape = [192] + dtype = "float32" + min_val = float("-2.86663") + max_val = float("1.61614") + mean = float("-0.052084") + std = float("0.754297") + data = None + + +class Program_weight_tensor_parameter_545: + name = "parameter_545" + shape = [192] + dtype = "float32" + min_val = float("0.386311") + max_val = float("2.0202") + mean = float("0.957762") + std = float("0.228222") + data = None + + +class Program_weight_tensor_parameter_546: + name = "parameter_546" + shape = [192] + dtype = "float32" + min_val = float("0.0141214") + max_val = float("0.117926") + mean = float("0.0377791") + std = float("0.016658") + data = None + + +class Program_weight_tensor_parameter_547: + name = "parameter_547" + shape = [192] + dtype = "float32" + min_val = float("-0.254428") + max_val = float("0.445126") + mean = float("-0.0589302") + std = float("0.0824672") + data = None + + +class Program_weight_tensor_parameter_548: + name = "parameter_548" + shape = [192, 384, 1, 1] + dtype = "float32" + min_val = float("-0.116756") + max_val = float("0.117417") + mean = float("-0.000787399") + std = float("0.00957888") + data = None + + +class Program_weight_tensor_parameter_549: + name = "parameter_549" + shape = [192] + dtype = "float32" + min_val = float("-2.99102") + max_val = float("1.1915") + mean = float("0.0645639") + std = float("0.651199") + data = None + + +class Program_weight_tensor_parameter_550: + name = "parameter_550" + shape = [192] + dtype = "float32" + min_val = float("0.897733") + max_val = float("5.48221") + mean = float("1.9362") + std = float("0.914173") + data = None + + +class Program_weight_tensor_parameter_551: + name = "parameter_551" + shape = [192] + dtype = "float32" + min_val = float("0.00697074") + max_val = float("0.0731968") + mean = float("0.0245477") + std = float("0.00941073") + data = None + + +class Program_weight_tensor_parameter_552: + name = "parameter_552" + shape = [192] + dtype = "float32" + min_val = float("-0.184054") + max_val = float("0.137175") + mean = float("-0.0287211") + std = float("0.0644716") + data = None + + +class Program_weight_tensor_parameter_553: + name = "parameter_553" + shape = [192, 384, 1, 1] + dtype = "float32" + min_val = float("-0.0772494") + max_val = float("0.114244") + mean = float("-0.000650866") + std = float("0.00878871") + data = None + + +class Program_weight_tensor_parameter_554: + name = "parameter_554" + shape = [384] + dtype = "float32" + min_val = float("-2.88389") + max_val = float("1.29563") + mean = float("-0.324997") + std = float("0.576948") + data = None + + +class Program_weight_tensor_parameter_555: + name = "parameter_555" + shape = [384] + dtype = "float32" + min_val = float("0.638494") + max_val = float("2.42125") + mean = float("1.16117") + std = float("0.261241") + data = None + + +class Program_weight_tensor_parameter_556: + name = "parameter_556" + shape = [384] + dtype = "float32" + min_val = float("0.0134979") + max_val = float("0.166246") + mean = float("0.0370424") + std = float("0.0197515") + data = None + + +class Program_weight_tensor_parameter_557: + name = "parameter_557" + shape = [384] + dtype = "float32" + min_val = float("-0.453729") + max_val = float("0.277397") + mean = float("0.0337569") + std = float("0.0859813") + data = None + + +class Program_weight_tensor_parameter_558: + name = "parameter_558" + shape = [384, 256, 3, 3] + dtype = "float32" + min_val = float("-0.072596") + max_val = float("0.0817311") + mean = float("-0.000132782") + std = float("0.00460665") + data = None + + +class Program_weight_tensor_parameter_559: + name = "parameter_559" + shape = [256] + dtype = "float32" + min_val = float("-2.17322") + max_val = float("1.35074") + mean = float("-0.983607") + std = float("0.560679") + data = None + + +class Program_weight_tensor_parameter_560: + name = "parameter_560" + shape = [256] + dtype = "float32" + min_val = float("0.526274") + max_val = float("1.72195") + mean = float("1.09496") + std = float("0.181389") + data = None + + +class Program_weight_tensor_parameter_561: + name = "parameter_561" + shape = [256] + dtype = "float32" + min_val = float("0.00288251") + max_val = float("0.0395874") + mean = float("0.0112768") + std = float("0.00506366") + data = None + + +class Program_weight_tensor_parameter_562: + name = "parameter_562" + shape = [256] + dtype = "float32" + min_val = float("-0.27031") + max_val = float("0.252979") + mean = float("-0.0499906") + std = float("0.075516") + data = None + + +class Program_weight_tensor_parameter_563: + name = "parameter_563" + shape = [256, 192, 1, 1] + dtype = "float32" + min_val = float("-0.229823") + max_val = float("0.188979") + mean = float("-0.000927787") + std = float("0.0152934") + data = None + + +class Program_weight_tensor_parameter_564: + name = "parameter_564" + shape = [192] + dtype = "float32" + min_val = float("-0.0179576") + max_val = float("0.00298543") + mean = float("-0.00521571") + std = float("0.00462341") + data = None + + +class Program_weight_tensor_parameter_565: + name = "parameter_565" + shape = [192, 192, 1, 1] + dtype = "float32" + min_val = float("-0.300325") + max_val = float("0.281667") + mean = float("-0.00372535") + std = float("0.0110562") + data = None + + +class Program_weight_tensor_parameter_566: + name = "parameter_566" + shape = [96] + dtype = "float32" + min_val = float("-1.88531") + max_val = float("0.409038") + mean = float("-0.263955") + std = float("0.418007") + data = None + + +class Program_weight_tensor_parameter_567: + name = "parameter_567" + shape = [96] + dtype = "float32" + min_val = float("0.0749649") + max_val = float("3.31752") + mean = float("0.586006") + std = float("0.696357") + data = None + + +class Program_weight_tensor_parameter_568: + name = "parameter_568" + shape = [96] + dtype = "float32" + min_val = float("7.15455e-05") + max_val = float("0.00164022") + mean = float("0.000529873") + std = float("0.00034909") + data = None + + +class Program_weight_tensor_parameter_569: + name = "parameter_569" + shape = [96] + dtype = "float32" + min_val = float("-0.0612176") + max_val = float("0.0781642") + mean = float("0.00988991") + std = float("0.0269136") + data = None + + +class Program_weight_tensor_parameter_570: + name = "parameter_570" + shape = [96, 96, 1, 1] + dtype = "float32" + min_val = float("-0.0589256") + max_val = float("0.101755") + mean = float("-0.000750476") + std = float("0.00819084") + data = None + + +class Program_weight_tensor_parameter_571: + name = "parameter_571" + shape = [96] + dtype = "float32" + min_val = float("-1.88531") + max_val = float("0.409038") + mean = float("-0.263955") + std = float("0.418007") + data = None + + +class Program_weight_tensor_parameter_572: + name = "parameter_572" + shape = [96] + dtype = "float32" + min_val = float("0.327865") + max_val = float("5.49328") + mean = float("1.04392") + std = float("0.907663") + data = None + + +class Program_weight_tensor_parameter_573: + name = "parameter_573" + shape = [96] + dtype = "float32" + min_val = float("0.000859294") + max_val = float("0.0115223") + mean = float("0.00425077") + std = float("0.00202637") + data = None + + +class Program_weight_tensor_parameter_574: + name = "parameter_574" + shape = [96] + dtype = "float32" + min_val = float("-0.215658") + max_val = float("0.218361") + mean = float("0.0259055") + std = float("0.0809568") + data = None + + +class Program_weight_tensor_parameter_575: + name = "parameter_575" + shape = [96, 96, 3, 3] + dtype = "float32" + min_val = float("-0.0431934") + max_val = float("0.0666906") + mean = float("-0.000217486") + std = float("0.00594071") + data = None + + +class Program_weight_tensor_parameter_576: + name = "parameter_576" + shape = [96] + dtype = "float32" + min_val = float("-2.41465") + max_val = float("-0.0345912") + mean = float("-1.27033") + std = float("0.444347") + data = None + + +class Program_weight_tensor_parameter_577: + name = "parameter_577" + shape = [96] + dtype = "float32" + min_val = float("0.478829") + max_val = float("1.74038") + mean = float("0.924321") + std = float("0.176848") + data = None + + +class Program_weight_tensor_parameter_578: + name = "parameter_578" + shape = [96] + dtype = "float32" + min_val = float("0.0809096") + max_val = float("0.403764") + mean = float("0.160369") + std = float("0.0549566") + data = None + + +class Program_weight_tensor_parameter_579: + name = "parameter_579" + shape = [96] + dtype = "float32" + min_val = float("-4.59399") + max_val = float("0.805277") + mean = float("-0.216618") + std = float("0.583175") + data = None + + +class Program_weight_tensor_parameter_580: + name = "parameter_580" + shape = [96, 96, 3, 3] + dtype = "float32" + min_val = float("-0.144287") + max_val = float("0.1046") + mean = float("-0.000407875") + std = float("0.00749342") + data = None + + +class Program_weight_tensor_parameter_581: + name = "parameter_581" + shape = [96] + dtype = "float32" + min_val = float("-1.40062") + max_val = float("0.433276") + mean = float("-0.187401") + std = float("0.33895") + data = None + + +class Program_weight_tensor_parameter_582: + name = "parameter_582" + shape = [96] + dtype = "float32" + min_val = float("0.00384237") + max_val = float("1.87219") + mean = float("0.411157") + std = float("0.370169") + data = None + + +class Program_weight_tensor_parameter_583: + name = "parameter_583" + shape = [96] + dtype = "float32" + min_val = float("4.572e-06") + max_val = float("0.00121943") + mean = float("0.000468589") + std = float("0.000237229") + data = None + + +class Program_weight_tensor_parameter_584: + name = "parameter_584" + shape = [96] + dtype = "float32" + min_val = float("-0.0585515") + max_val = float("0.070879") + mean = float("0.0112879") + std = float("0.0224624") + data = None + + +class Program_weight_tensor_parameter_585: + name = "parameter_585" + shape = [96, 96, 1, 1] + dtype = "float32" + min_val = float("-0.0491457") + max_val = float("0.0933655") + mean = float("-0.00077123") + std = float("0.0077478") + data = None + + +class Program_weight_tensor_parameter_586: + name = "parameter_586" + shape = [96] + dtype = "float32" + min_val = float("-1.40062") + max_val = float("0.433276") + mean = float("-0.187401") + std = float("0.33895") + data = None + + +class Program_weight_tensor_parameter_587: + name = "parameter_587" + shape = [96] + dtype = "float32" + min_val = float("0.340762") + max_val = float("2.27512") + mean = float("0.862526") + std = float("0.431743") + data = None + + +class Program_weight_tensor_parameter_588: + name = "parameter_588" + shape = [96] + dtype = "float32" + min_val = float("0.00205042") + max_val = float("0.0191049") + mean = float("0.00504139") + std = float("0.00228906") + data = None + + +class Program_weight_tensor_parameter_589: + name = "parameter_589" + shape = [96] + dtype = "float32" + min_val = float("-0.196305") + max_val = float("0.16606") + mean = float("0.0448386") + std = float("0.0616071") + data = None + + +class Program_weight_tensor_parameter_590: + name = "parameter_590" + shape = [96, 96, 3, 3] + dtype = "float32" + min_val = float("-0.065221") + max_val = float("0.0604921") + mean = float("-0.000354783") + std = float("0.0061643") + data = None + + +class Program_weight_tensor_parameter_591: + name = "parameter_591" + shape = [96] + dtype = "float32" + min_val = float("-3.35876") + max_val = float("0.28762") + mean = float("-1.21687") + std = float("0.561261") + data = None + + +class Program_weight_tensor_parameter_592: + name = "parameter_592" + shape = [96] + dtype = "float32" + min_val = float("0.417055") + max_val = float("1.89869") + mean = float("1.01507") + std = float("0.241827") + data = None + + +class Program_weight_tensor_parameter_593: + name = "parameter_593" + shape = [96] + dtype = "float32" + min_val = float("0.0525853") + max_val = float("0.339799") + mean = float("0.115279") + std = float("0.0450278") + data = None + + +class Program_weight_tensor_parameter_594: + name = "parameter_594" + shape = [96] + dtype = "float32" + min_val = float("-1.14861") + max_val = float("0.724198") + mean = float("-0.1544") + std = float("0.333002") + data = None + + +class Program_weight_tensor_parameter_595: + name = "parameter_595" + shape = [96, 96, 3, 3] + dtype = "float32" + min_val = float("-0.161457") + max_val = float("0.151403") + mean = float("-0.00059058") + std = float("0.00763228") + data = None + + +class Program_weight_tensor_parameter_596: + name = "parameter_596" + shape = [96] + dtype = "float32" + min_val = float("-1.27266") + max_val = float("0.581227") + mean = float("-0.160038") + std = float("0.285784") + data = None + + +class Program_weight_tensor_parameter_597: + name = "parameter_597" + shape = [96] + dtype = "float32" + min_val = float("1.65857e-05") + max_val = float("1.24222") + mean = float("0.291878") + std = float("0.195391") + data = None + + +class Program_weight_tensor_parameter_598: + name = "parameter_598" + shape = [96] + dtype = "float32" + min_val = float("4.12937e-11") + max_val = float("0.00213141") + mean = float("0.00070626") + std = float("0.000412402") + data = None + + +class Program_weight_tensor_parameter_599: + name = "parameter_599" + shape = [96] + dtype = "float32" + min_val = float("-0.0536044") + max_val = float("0.0491687") + mean = float("0.00877341") + std = float("0.0177895") + data = None + + +class Program_weight_tensor_parameter_600: + name = "parameter_600" + shape = [96, 96, 1, 1] + dtype = "float32" + min_val = float("-0.0485857") + max_val = float("0.0605225") + mean = float("-0.000686591") + std = float("0.00777864") + data = None + + +class Program_weight_tensor_parameter_601: + name = "parameter_601" + shape = [96] + dtype = "float32" + min_val = float("-1.27266") + max_val = float("0.581227") + mean = float("-0.160038") + std = float("0.285784") + data = None + + +class Program_weight_tensor_parameter_602: + name = "parameter_602" + shape = [96] + dtype = "float32" + min_val = float("0.228409") + max_val = float("1.64162") + mean = float("0.722635") + std = float("0.264964") + data = None + + +class Program_weight_tensor_parameter_603: + name = "parameter_603" + shape = [96] + dtype = "float32" + min_val = float("0.00119138") + max_val = float("0.0176354") + mean = float("0.00744115") + std = float("0.00313205") + data = None + + +class Program_weight_tensor_parameter_604: + name = "parameter_604" + shape = [96] + dtype = "float32" + min_val = float("-0.135314") + max_val = float("0.168187") + mean = float("0.0315588") + std = float("0.0526597") + data = None + + +class Program_weight_tensor_parameter_605: + name = "parameter_605" + shape = [96, 96, 3, 3] + dtype = "float32" + min_val = float("-0.0704485") + max_val = float("0.070591") + mean = float("-0.000300029") + std = float("0.00633475") + data = None + + +class Program_weight_tensor_parameter_606: + name = "parameter_606" + shape = [96] + dtype = "float32" + min_val = float("-3.66192") + max_val = float("0.205666") + mean = float("-1.16932") + std = float("0.590023") + data = None + + +class Program_weight_tensor_parameter_607: + name = "parameter_607" + shape = [96] + dtype = "float32" + min_val = float("0.512036") + max_val = float("2.13116") + mean = float("1.02708") + std = float("0.242015") + data = None + + +class Program_weight_tensor_parameter_608: + name = "parameter_608" + shape = [96] + dtype = "float32" + min_val = float("0.041166") + max_val = float("0.162924") + mean = float("0.0835089") + std = float("0.0213025") + data = None + + +class Program_weight_tensor_parameter_609: + name = "parameter_609" + shape = [96] + dtype = "float32" + min_val = float("-0.998543") + max_val = float("0.640256") + mean = float("-0.0591147") + std = float("0.277502") + data = None + + +class Program_weight_tensor_parameter_610: + name = "parameter_610" + shape = [96, 96, 3, 3] + dtype = "float32" + min_val = float("-0.107751") + max_val = float("0.142301") + mean = float("-0.00045618") + std = float("0.00773742") + data = None + + +class Program_weight_tensor_parameter_611: + name = "parameter_611" + shape = [96] + dtype = "float32" + min_val = float("-0.931268") + max_val = float("0.407158") + mean = float("-0.216176") + std = float("0.275758") + data = None + + +class Program_weight_tensor_parameter_612: + name = "parameter_612" + shape = [96] + dtype = "float32" + min_val = float("2.71831e-05") + max_val = float("1.34688") + mean = float("0.301193") + std = float("0.211171") + data = None + + +class Program_weight_tensor_parameter_613: + name = "parameter_613" + shape = [96] + dtype = "float32" + min_val = float("2.23456e-10") + max_val = float("0.00194358") + mean = float("0.000595693") + std = float("0.000345868") + data = None + + +class Program_weight_tensor_parameter_614: + name = "parameter_614" + shape = [96] + dtype = "float32" + min_val = float("-0.0477274") + max_val = float("0.0749525") + mean = float("0.0111517") + std = float("0.0213627") + data = None + + +class Program_weight_tensor_parameter_615: + name = "parameter_615" + shape = [96, 96, 1, 1] + dtype = "float32" + min_val = float("-0.0699687") + max_val = float("0.0559414") + mean = float("-0.000879362") + std = float("0.00824677") + data = None + + +class Program_weight_tensor_parameter_616: + name = "parameter_616" + shape = [96] + dtype = "float32" + min_val = float("-0.931268") + max_val = float("0.407158") + mean = float("-0.216176") + std = float("0.275758") + data = None + + +class Program_weight_tensor_parameter_617: + name = "parameter_617" + shape = [96] + dtype = "float32" + min_val = float("0.141304") + max_val = float("1.78555") + mean = float("0.707751") + std = float("0.285502") + data = None + + +class Program_weight_tensor_parameter_618: + name = "parameter_618" + shape = [96] + dtype = "float32" + min_val = float("0.00138566") + max_val = float("0.0171802") + mean = float("0.0064867") + std = float("0.00280367") + data = None + + +class Program_weight_tensor_parameter_619: + name = "parameter_619" + shape = [96] + dtype = "float32" + min_val = float("-0.141781") + max_val = float("0.230423") + mean = float("0.0445566") + std = float("0.061652") + data = None + + +class Program_weight_tensor_parameter_620: + name = "parameter_620" + shape = [96, 96, 3, 3] + dtype = "float32" + min_val = float("-0.062371") + max_val = float("0.0600486") + mean = float("-0.000387805") + std = float("0.00646949") + data = None + + +class Program_weight_tensor_parameter_621: + name = "parameter_621" + shape = [96] + dtype = "float32" + min_val = float("-3.19828") + max_val = float("0.0449738") + mean = float("-1.10671") + std = float("0.512578") + data = None + + +class Program_weight_tensor_parameter_622: + name = "parameter_622" + shape = [96] + dtype = "float32" + min_val = float("0.557682") + max_val = float("1.70921") + mean = float("0.988261") + std = float("0.180548") + data = None + + +class Program_weight_tensor_parameter_623: + name = "parameter_623" + shape = [96] + dtype = "float32" + min_val = float("0.0333738") + max_val = float("0.371229") + mean = float("0.0699845") + std = float("0.0453232") + data = None + + +class Program_weight_tensor_parameter_624: + name = "parameter_624" + shape = [96] + dtype = "float32" + min_val = float("-3.49796") + max_val = float("0.702257") + mean = float("-0.0770272") + std = float("0.422514") + data = None + + +class Program_weight_tensor_parameter_625: + name = "parameter_625" + shape = [96, 96, 3, 3] + dtype = "float32" + min_val = float("-0.0625886") + max_val = float("0.0641362") + mean = float("-0.000520221") + std = float("0.00749421") + data = None + + +class Program_weight_tensor_parameter_626: + name = "parameter_626" + shape = [96] + dtype = "float32" + min_val = float("-0.97494") + max_val = float("0.654939") + mean = float("-0.172363") + std = float("0.268928") + data = None + + +class Program_weight_tensor_parameter_627: + name = "parameter_627" + shape = [96] + dtype = "float32" + min_val = float("0.0441875") + max_val = float("1.22339") + mean = float("0.290183") + std = float("0.187519") + data = None + + +class Program_weight_tensor_parameter_628: + name = "parameter_628" + shape = [96] + dtype = "float32" + min_val = float("8.90461e-05") + max_val = float("0.00484309") + mean = float("0.0013165") + std = float("0.000858291") + data = None + + +class Program_weight_tensor_parameter_629: + name = "parameter_629" + shape = [96] + dtype = "float32" + min_val = float("-0.0430826") + max_val = float("0.0611931") + mean = float("0.0109949") + std = float("0.0221652") + data = None + + +class Program_weight_tensor_parameter_630: + name = "parameter_630" + shape = [96, 96, 1, 1] + dtype = "float32" + min_val = float("-0.0592519") + max_val = float("0.0816286") + mean = float("-0.00107855") + std = float("0.00899059") + data = None + + +class Program_weight_tensor_parameter_631: + name = "parameter_631" + shape = [96] + dtype = "float32" + min_val = float("-0.97494") + max_val = float("0.654938") + mean = float("-0.172363") + std = float("0.268928") + data = None + + +class Program_weight_tensor_parameter_632: + name = "parameter_632" + shape = [96] + dtype = "float32" + min_val = float("0.207123") + max_val = float("1.47404") + mean = float("0.603234") + std = float("0.233008") + data = None + + +class Program_weight_tensor_parameter_633: + name = "parameter_633" + shape = [96] + dtype = "float32" + min_val = float("0.00153436") + max_val = float("0.0321997") + mean = float("0.0113381") + std = float("0.00513998") + data = None + + +class Program_weight_tensor_parameter_634: + name = "parameter_634" + shape = [96] + dtype = "float32" + min_val = float("-0.126439") + max_val = float("0.186672") + mean = float("0.04017") + std = float("0.0613092") + data = None + + +class Program_weight_tensor_parameter_635: + name = "parameter_635" + shape = [96, 96, 3, 3] + dtype = "float32" + min_val = float("-0.0706592") + max_val = float("0.04784") + mean = float("-0.000434798") + std = float("0.00634623") + data = None + + +class Program_weight_tensor_parameter_636: + name = "parameter_636" + shape = [96] + dtype = "float32" + min_val = float("-3.53276") + max_val = float("0.173878") + mean = float("-1.04724") + std = float("0.571594") + data = None + + +class Program_weight_tensor_parameter_637: + name = "parameter_637" + shape = [96] + dtype = "float32" + min_val = float("0.59827") + max_val = float("2.39877") + mean = float("1.05533") + std = float("0.205288") + data = None + + +class Program_weight_tensor_parameter_638: + name = "parameter_638" + shape = [96] + dtype = "float32" + min_val = float("0.0232114") + max_val = float("0.15752") + mean = float("0.0513191") + std = float("0.0222263") + data = None + + +class Program_weight_tensor_parameter_639: + name = "parameter_639" + shape = [96] + dtype = "float32" + min_val = float("-0.833358") + max_val = float("0.443566") + mean = float("-0.109677") + std = float("0.206745") + data = None + + +class Program_weight_tensor_parameter_640: + name = "parameter_640" + shape = [96, 96, 3, 3] + dtype = "float32" + min_val = float("-0.0623116") + max_val = float("0.0791851") + mean = float("-0.000467548") + std = float("0.00758332") + data = None + + +class Program_weight_tensor_parameter_641: + name = "parameter_641" + shape = [96] + dtype = "float32" + min_val = float("-0.693664") + max_val = float("0.60174") + mean = float("-0.0970031") + std = float("0.28066") + data = None + + +class Program_weight_tensor_parameter_642: + name = "parameter_642" + shape = [96] + dtype = "float32" + min_val = float("0.0447252") + max_val = float("1.29095") + mean = float("0.303908") + std = float("0.204524") + data = None + + +class Program_weight_tensor_parameter_643: + name = "parameter_643" + shape = [96] + dtype = "float32" + min_val = float("0.000328896") + max_val = float("0.0265135") + mean = float("0.0045151") + std = float("0.0050081") + data = None + + +class Program_weight_tensor_parameter_644: + name = "parameter_644" + shape = [96] + dtype = "float32" + min_val = float("-0.0354704") + max_val = float("0.0397808") + mean = float("0.00127535") + std = float("0.014201") + data = None + + +class Program_weight_tensor_parameter_645: + name = "parameter_645" + shape = [96, 96, 1, 1] + dtype = "float32" + min_val = float("-0.106362") + max_val = float("0.0764622") + mean = float("-0.00137677") + std = float("0.0109433") + data = None + + +class Program_weight_tensor_parameter_646: + name = "parameter_646" + shape = [96] + dtype = "float32" + min_val = float("-0.693664") + max_val = float("0.60174") + mean = float("-0.0970031") + std = float("0.28066") + data = None + + +class Program_weight_tensor_parameter_647: + name = "parameter_647" + shape = [96] + dtype = "float32" + min_val = float("0.114798") + max_val = float("1.42975") + mean = float("0.534319") + std = float("0.282908") + data = None + + +class Program_weight_tensor_parameter_648: + name = "parameter_648" + shape = [96] + dtype = "float32" + min_val = float("0.00898997") + max_val = float("0.141359") + mean = float("0.0314926") + std = float("0.023737") + data = None + + +class Program_weight_tensor_parameter_649: + name = "parameter_649" + shape = [96] + dtype = "float32" + min_val = float("-0.174314") + max_val = float("0.10829") + mean = float("0.00620304") + std = float("0.0462639") + data = None + + +class Program_weight_tensor_parameter_650: + name = "parameter_650" + shape = [96, 96, 3, 3] + dtype = "float32" + min_val = float("-0.0984757") + max_val = float("0.0575998") + mean = float("-0.000479545") + std = float("0.00605809") + data = None + + +class Program_weight_tensor_parameter_651: + name = "parameter_651" + shape = [96] + dtype = "float32" + min_val = float("-2.17816") + max_val = float("0.51698") + mean = float("-0.845377") + std = float("0.487645") + data = None + + +class Program_weight_tensor_parameter_652: + name = "parameter_652" + shape = [96] + dtype = "float32" + min_val = float("0.77161") + max_val = float("2.3602") + mean = float("1.25988") + std = float("0.224696") + data = None + + +class Program_weight_tensor_parameter_653: + name = "parameter_653" + shape = [96] + dtype = "float32" + min_val = float("0.015383") + max_val = float("0.178267") + mean = float("0.0472873") + std = float("0.0263198") + data = None + + +class Program_weight_tensor_parameter_654: + name = "parameter_654" + shape = [96] + dtype = "float32" + min_val = float("-0.979672") + max_val = float("0.598382") + mean = float("-0.00991227") + std = float("0.26453") + data = None + + +class Program_weight_tensor_parameter_655: + name = "parameter_655" + shape = [96, 96, 3, 3] + dtype = "float32" + min_val = float("-0.18655") + max_val = float("0.184723") + mean = float("-0.000236166") + std = float("0.00788694") + data = None + + +class Program_weight_tensor_parameter_656: + name = "parameter_656" + shape = [96] + dtype = "float32" + min_val = float("-3.43148") + max_val = float("1.98617") + mean = float("0.506211") + std = float("0.880051") + data = None + + +class Program_weight_tensor_parameter_657: + name = "parameter_657" + shape = [96] + dtype = "float32" + min_val = float("0.236311") + max_val = float("2.35362") + mean = float("0.651878") + std = float("0.28754") + data = None + + +class Program_weight_tensor_parameter_658: + name = "parameter_658" + shape = [96] + dtype = "float32" + min_val = float("0.0117247") + max_val = float("0.231744") + mean = float("0.0510382") + std = float("0.0375861") + data = None + + +class Program_weight_tensor_parameter_659: + name = "parameter_659" + shape = [96] + dtype = "float32" + min_val = float("-0.343473") + max_val = float("0.411073") + mean = float("-0.0320342") + std = float("0.108969") + data = None + + +class Program_weight_tensor_parameter_660: + name = "parameter_660" + shape = [96, 192, 1, 1] + dtype = "float32" + min_val = float("-0.192027") + max_val = float("0.222182") + mean = float("-0.000918719") + std = float("0.0165388") + data = None + + +class Program_weight_tensor_parameter_661: + name = "parameter_661" + shape = [96] + dtype = "float32" + min_val = float("-4.88353") + max_val = float("1.51806") + mean = float("0.31948") + std = float("1.02577") + data = None + + +class Program_weight_tensor_parameter_662: + name = "parameter_662" + shape = [96] + dtype = "float32" + min_val = float("0.586539") + max_val = float("6.9165") + mean = float("1.75181") + std = float("1.27953") + data = None + + +class Program_weight_tensor_parameter_663: + name = "parameter_663" + shape = [96] + dtype = "float32" + min_val = float("0.00633015") + max_val = float("0.206462") + mean = float("0.0361831") + std = float("0.0337311") + data = None + + +class Program_weight_tensor_parameter_664: + name = "parameter_664" + shape = [96] + dtype = "float32" + min_val = float("-0.255696") + max_val = float("0.345495") + mean = float("0.0041729") + std = float("0.11182") + data = None + + +class Program_weight_tensor_parameter_665: + name = "parameter_665" + shape = [96, 192, 1, 1] + dtype = "float32" + min_val = float("-0.127167") + max_val = float("0.194115") + mean = float("-0.000326139") + std = float("0.0148707") + data = None + + +class Program_weight_tensor_parameter_666: + name = "parameter_666" + shape = [192] + dtype = "float32" + min_val = float("-2.32833") + max_val = float("1.72534") + mean = float("-0.148212") + std = float("0.74712") + data = None + + +class Program_weight_tensor_parameter_667: + name = "parameter_667" + shape = [192] + dtype = "float32" + min_val = float("0.622468") + max_val = float("2.80886") + mean = float("1.10433") + std = float("0.28203") + data = None + + +class Program_weight_tensor_parameter_668: + name = "parameter_668" + shape = [192] + dtype = "float32" + min_val = float("0.0148764") + max_val = float("0.296694") + mean = float("0.0602016") + std = float("0.0512632") + data = None + + +class Program_weight_tensor_parameter_669: + name = "parameter_669" + shape = [192] + dtype = "float32" + min_val = float("-0.484334") + max_val = float("0.393152") + mean = float("-0.0615423") + std = float("0.141595") + data = None + + +class Program_weight_tensor_parameter_670: + name = "parameter_670" + shape = [192, 128, 3, 3] + dtype = "float32" + min_val = float("-0.0828645") + max_val = float("0.112082") + mean = float("-0.000180181") + std = float("0.00758201") + data = None + + +class Program_weight_tensor_parameter_671: + name = "parameter_671" + shape = [128] + dtype = "float32" + min_val = float("-2.79585") + max_val = float("1.93556") + mean = float("-0.737006") + std = float("0.682944") + data = None + + +class Program_weight_tensor_parameter_672: + name = "parameter_672" + shape = [128] + dtype = "float32" + min_val = float("0.285504") + max_val = float("2.14283") + mean = float("1.05763") + std = float("0.241319") + data = None + + +class Program_weight_tensor_parameter_673: + name = "parameter_673" + shape = [128] + dtype = "float32" + min_val = float("0.00198275") + max_val = float("0.0308593") + mean = float("0.00749597") + std = float("0.0040783") + data = None + + +class Program_weight_tensor_parameter_674: + name = "parameter_674" + shape = [128] + dtype = "float32" + min_val = float("-0.24466") + max_val = float("0.222894") + mean = float("0.00568367") + std = float("0.0965137") + data = None + + +class Program_weight_tensor_parameter_675: + name = "parameter_675" + shape = [128, 96, 1, 1] + dtype = "float32" + min_val = float("-0.166405") + max_val = float("0.175163") + mean = float("-0.00156848") + std = float("0.0231405") + data = None + + +class Program_weight_tensor_parameter_676: + name = "parameter_676" + shape = [96] + dtype = "float32" + min_val = float("-0.0184228") + max_val = float("0.000685844") + mean = float("-0.00754499") + std = float("0.00536267") + data = None + + +class Program_weight_tensor_parameter_677: + name = "parameter_677" + shape = [96, 96, 1, 1] + dtype = "float32" + min_val = float("-0.328782") + max_val = float("0.139858") + mean = float("-0.00742733") + std = float("0.0181488") + data = None + + +class Program_weight_tensor_parameter_678: + name = "parameter_678" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_679: + name = "parameter_679" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_680: + name = "parameter_680" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_681: + name = "parameter_681" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_682: + name = "parameter_682" + shape = [48, 48, 1, 1] + dtype = "float32" + min_val = float("-0.0694928") + max_val = float("0.0716001") + mean = float("-0.00189844") + std = float("0.013508") + data = None + + +class Program_weight_tensor_parameter_683: + name = "parameter_683" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_684: + name = "parameter_684" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_685: + name = "parameter_685" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_686: + name = "parameter_686" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_687: + name = "parameter_687" + shape = [48, 48, 3, 3] + dtype = "float32" + min_val = float("-0.0528373") + max_val = float("0.0588766") + mean = float("-0.000550354") + std = float("0.0107283") + data = None + + +class Program_weight_tensor_parameter_688: + name = "parameter_688" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_689: + name = "parameter_689" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_690: + name = "parameter_690" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_691: + name = "parameter_691" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_692: + name = "parameter_692" + shape = [48, 48, 3, 3] + dtype = "float32" + min_val = float("-0.0695434") + max_val = float("0.0819049") + mean = float("-0.000542189") + std = float("0.0123074") + data = None + + +class Program_weight_tensor_parameter_693: + name = "parameter_693" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_694: + name = "parameter_694" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_695: + name = "parameter_695" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_696: + name = "parameter_696" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_697: + name = "parameter_697" + shape = [48, 48, 1, 1] + dtype = "float32" + min_val = float("-0.0874883") + max_val = float("0.0747463") + mean = float("-0.00169657") + std = float("0.0148391") + data = None + + +class Program_weight_tensor_parameter_698: + name = "parameter_698" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_699: + name = "parameter_699" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_700: + name = "parameter_700" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_701: + name = "parameter_701" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_702: + name = "parameter_702" + shape = [48, 48, 3, 3] + dtype = "float32" + min_val = float("-0.0635524") + max_val = float("0.0580463") + mean = float("-0.000739745") + std = float("0.0110062") + data = None + + +class Program_weight_tensor_parameter_703: + name = "parameter_703" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_704: + name = "parameter_704" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_705: + name = "parameter_705" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_706: + name = "parameter_706" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_707: + name = "parameter_707" + shape = [48, 48, 3, 3] + dtype = "float32" + min_val = float("-0.107019") + max_val = float("0.079425") + mean = float("-0.000460802") + std = float("0.0127916") + data = None + + +class Program_weight_tensor_parameter_708: + name = "parameter_708" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_709: + name = "parameter_709" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_710: + name = "parameter_710" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_711: + name = "parameter_711" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_712: + name = "parameter_712" + shape = [48, 48, 1, 1] + dtype = "float32" + min_val = float("-0.0858132") + max_val = float("0.0745154") + mean = float("-0.00238642") + std = float("0.0175588") + data = None + + +class Program_weight_tensor_parameter_713: + name = "parameter_713" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_714: + name = "parameter_714" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_715: + name = "parameter_715" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_716: + name = "parameter_716" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_717: + name = "parameter_717" + shape = [48, 48, 3, 3] + dtype = "float32" + min_val = float("-0.0846406") + max_val = float("0.0813299") + mean = float("-0.000758941") + std = float("0.0113941") + data = None + + +class Program_weight_tensor_parameter_718: + name = "parameter_718" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_719: + name = "parameter_719" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_720: + name = "parameter_720" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_721: + name = "parameter_721" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_722: + name = "parameter_722" + shape = [48, 48, 3, 3] + dtype = "float32" + min_val = float("-0.0937415") + max_val = float("0.100266") + mean = float("-0.000412186") + std = float("0.0133653") + data = None + + +class Program_weight_tensor_parameter_723: + name = "parameter_723" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_724: + name = "parameter_724" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_725: + name = "parameter_725" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_726: + name = "parameter_726" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_727: + name = "parameter_727" + shape = [48, 96, 1, 1] + dtype = "float32" + min_val = float("-0.161222") + max_val = float("0.143543") + mean = float("-0.00258366") + std = float("0.0251244") + data = None + + +class Program_weight_tensor_parameter_728: + name = "parameter_728" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_729: + name = "parameter_729" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_730: + name = "parameter_730" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_731: + name = "parameter_731" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_732: + name = "parameter_732" + shape = [48, 96, 1, 1] + dtype = "float32" + min_val = float("-0.154232") + max_val = float("0.156181") + mean = float("-0.000860315") + std = float("0.0235534") + data = None + + +class Program_weight_tensor_parameter_733: + name = "parameter_733" + shape = [96] + dtype = "float32" + min_val = float("-3.44046") + max_val = float("3.33878") + mean = float("0.314975") + std = float("1.15279") + data = None + + +class Program_weight_tensor_parameter_734: + name = "parameter_734" + shape = [96] + dtype = "float32" + min_val = float("0.897762") + max_val = float("4.77852") + mean = float("1.94282") + std = float("0.714741") + data = None + + +class Program_weight_tensor_parameter_735: + name = "parameter_735" + shape = [96] + dtype = "float32" + min_val = float("0.904221") + max_val = float("17.724") + mean = float("3.51757") + std = float("2.84263") + data = None + + +class Program_weight_tensor_parameter_736: + name = "parameter_736" + shape = [96] + dtype = "float32" + min_val = float("-3.52556") + max_val = float("2.86509") + mean = float("-0.490399") + std = float("1.01715") + data = None + + +class Program_weight_tensor_parameter_737: + name = "parameter_737" + shape = [96, 64, 3, 3] + dtype = "float32" + min_val = float("-0.119905") + max_val = float("0.107669") + mean = float("-0.000589641") + std = float("0.0132205") + data = None + + +class Program_weight_tensor_parameter_738: + name = "parameter_738" + shape = [64] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_739: + name = "parameter_739" + shape = [64] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_740: + name = "parameter_740" + shape = [64] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_741: + name = "parameter_741" + shape = [64] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_742: + name = "parameter_742" + shape = [64, 32, 3, 3] + dtype = "float32" + min_val = float("-0.142947") + max_val = float("0.172539") + mean = float("-0.00107772") + std = float("0.0206893") + data = None + + +class Program_weight_tensor_parameter_743: + name = "parameter_743" + shape = [32] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_744: + name = "parameter_744" + shape = [32] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_745: + name = "parameter_745" + shape = [32] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_746: + name = "parameter_746" + shape = [32] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_747: + name = "parameter_747" + shape = [32, 32, 3, 3] + dtype = "float32" + min_val = float("-0.249415") + max_val = float("0.211949") + mean = float("-0.000841281") + std = float("0.0267141") + data = None + + +class Program_weight_tensor_parameter_748: + name = "parameter_748" + shape = [32] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_749: + name = "parameter_749" + shape = [32] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_750: + name = "parameter_750" + shape = [32] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_751: + name = "parameter_751" + shape = [32] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_752: + name = "parameter_752" + shape = [32, 3, 3, 3] + dtype = "float32" + min_val = float("-0.293046") + max_val = float("0.285575") + mean = float("-0.00286731") + std = float("0.0673331") + data = None diff --git a/paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_6/graph_hash.txt b/paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_6/graph_hash.txt new file mode 100644 index 000000000..babd6567f --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_6/graph_hash.txt @@ -0,0 +1 @@ +3d13d62f659e1dd50de7ed21b396c52b0de085fbe77030af69ce5dcd93ef5c05 \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_6/graph_net.json b/paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_6/graph_net.json new file mode 100644 index 000000000..cf4d2108b --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_6/graph_net.json @@ -0,0 +1,6 @@ +{ + "framework": "paddle", + "model_name": "PP-YOLOE-L_human", + "num_devices_required": 1, + "num_nodes_required": 1 +} \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_6/input_meta.py b/paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_6/input_meta.py new file mode 100644 index 000000000..b762e8b43 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_6/input_meta.py @@ -0,0 +1,49 @@ +class Program_weight_tensor_data_0: + name = "data_0" + shape = [] + dtype = "int64" + data = [1] + + +class Program_weight_tensor_data_1: + name = "data_1" + shape = [2, 4116, 1] + dtype = "float32" + min_val = float("4.23157e-09") + max_val = float("0.54831") + mean = float("0.0392222") + std = float("0.0590982") + data = None + + +class Program_weight_tensor_data_2: + name = "data_2" + shape = [2, 4116, 68] + dtype = "float32" + min_val = float("-7.32965") + max_val = float("11.6819") + mean = float("3.41484e-05") + std = float("1.50283") + data = None + + +class Program_weight_tensor_data_3: + name = "data_3" + shape = [4116, 2] + dtype = "float32" + min_val = float("4.0") + max_val = float("444.0") + mean = float("224.0") + std = float("129.279") + data = None + + +class Program_weight_tensor_data_4: + name = "data_4" + shape = [4116, 1] + dtype = "float32" + min_val = float("8.0") + max_val = float("32.0") + mean = float("10.6667") + std = float("5.70157") + data = None diff --git a/paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_6/model.py b/paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_6/model.py new file mode 100644 index 000000000..4bd73f18f --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_6/model.py @@ -0,0 +1,192 @@ +import paddle + + +class GraphModule(paddle.nn.Layer): + def __init__(self): + super().__init__() + + def forward(self, parameter_0, data_0, data_1, data_2, data_3, data_4): + # pd_op.divide: (-1x2xf32) <- (-1x2xf32, -1x1xf32) + divide_0 = paddle._C_ops.divide(data_3, data_4) + del data_3 + + # pd_op.shape64: (3xi64) <- (2x-1x68xf32) + shape64_0 = paddle._C_ops.shape64(data_2) + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_0 = [0] + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_1 = [1] + + # pd_op.assign: (1xi64) <- (1xi64) + assign_0 = full_int_array_1 + + # pd_op.slice: (xi64) <- (3xi64, 1xi64, 1xi64) + slice_0 = paddle._C_ops.slice( + shape64_0, [0], full_int_array_0, full_int_array_1, [1], [0] + ) + del full_int_array_0 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_2 = [2] + + # pd_op.slice: (xi64) <- (3xi64, 1xi64, 1xi64) + slice_1 = paddle._C_ops.slice( + shape64_0, [0], full_int_array_1, full_int_array_2, [1], [0] + ) + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_3 = [3] + + # pd_op.slice: (xi64) <- (3xi64, 1xi64, 1xi64) + slice_2 = paddle._C_ops.slice( + shape64_0, [0], full_int_array_2, full_int_array_3, [1], [0] + ) + del full_int_array_2, full_int_array_3, shape64_0 + + # pd_op.full: (xi64) <- () + full_0 = paddle._C_ops.full( + [], float("-1"), paddle.int64, paddle.core.CPUPlace() + ) + + # pd_op.full: (xi64) <- () + full_1 = paddle._C_ops.full( + [], float("4"), paddle.int64, paddle.core.CPUPlace() + ) + + # pd_op.full: (xi64) <- () + full_2 = paddle._C_ops.full( + [], float("17"), paddle.int64, paddle.core.CPUPlace() + ) + + # builtin.combine: ([xi64, xi64, xi64, xi64]) <- (xi64, xi64, xi64, xi64) + combine_0 = [full_0, slice_1, full_1, full_2] + del full_0, full_1, full_2, slice_1 + + # pd_op.stack: (4xi64) <- ([xi64, xi64, xi64, xi64]) + stack_0 = paddle._C_ops.stack(combine_0, 0) + del combine_0 + + # pd_op.reshape: (-1x-1x4x17xf32) <- (2x-1x68xf32, 4xi64) + reshape_0 = paddle._C_ops.reshape(data_2, stack_0) + del data_2, stack_0 + + # pd_op.softmax: (-1x-1x4x17xf32) <- (-1x-1x4x17xf32) + softmax_0 = paddle._C_ops.softmax(reshape_0, -1) + del reshape_0 + + # pd_op.transpose: (-1x17x-1x4xf32) <- (-1x-1x4x17xf32) + transpose_0 = paddle._C_ops.transpose(softmax_0, [0, 3, 1, 2]) + + # pd_op.conv2d: (-1x1x-1x4xf32) <- (-1x17x-1x4xf32, 1x17x1x1xf32) + conv2d_0 = paddle._C_ops.conv2d( + transpose_0, parameter_0, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_0 + + # pd_op.squeeze: (-1x-1x4xf32) <- (-1x1x-1x4xf32, 1xi64) + squeeze_0 = paddle._C_ops.squeeze(conv2d_0, full_int_array_1) + del full_int_array_1 + + # pd_op.full: (1xi32) <- () + full_3 = paddle._C_ops.full( + [1], float("2"), paddle.int32, paddle.core.CPUPlace() + ) + + # pd_op.split_with_num: ([-1x-1x2xf32, -1x-1x2xf32]) <- (-1x-1x4xf32, 1xi32) + split_with_num_0 = paddle._C_ops.split_with_num(squeeze_0, 2, full_3) + del squeeze_0 + + # builtin.split: (-1x-1x2xf32, -1x-1x2xf32) <- ([-1x-1x2xf32, -1x-1x2xf32]) + ( + split_0, + split_1, + ) = split_with_num_0 + del split_with_num_0 + + # pd_op.full: (1xf32) <- () + full_4 = paddle._C_ops.full( + [1], float("-1"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.scale: (-1x-1x2xf32) <- (-1x-1x2xf32, 1xf32) + scale_0 = paddle._C_ops.scale(split_0, full_4, float("0"), True) + del split_0 + + # pd_op.add: (-1x-1x2xf32) <- (-1x-1x2xf32, -1x2xf32) + add_0 = paddle._C_ops.add(scale_0, divide_0) + + # pd_op.add: (-1x-1x2xf32) <- (-1x-1x2xf32, -1x2xf32) + add_1 = paddle._C_ops.add(split_1, divide_0) + + # pd_op.full: (1xi32) <- () + full_5 = paddle._C_ops.full( + [1], float("-1"), paddle.int32, paddle.core.CPUPlace() + ) + + # builtin.combine: ([-1x-1x2xf32, -1x-1x2xf32]) <- (-1x-1x2xf32, -1x-1x2xf32) + combine_1 = [add_0, add_1] + + # pd_op.concat: (-1x-1x4xf32) <- ([-1x-1x2xf32, -1x-1x2xf32], 1xi32) + concat_0 = paddle._C_ops.concat(combine_1, full_5) + del combine_1 + + # pd_op.full: (xi64) <- () + full_6 = paddle._C_ops.full( + [], float("-1"), paddle.int64, paddle.framework._current_expected_place() + ) + + # pd_op.less_than: (xb) <- (xi64, xi64) + less_than_0 = paddle._C_ops.less_than(data_0, full_6) + del data_0, full_6 + + # pd_op.cast: (xi64) <- (xb) + cast_0 = paddle._C_ops.cast(less_than_0, paddle.int64) + del less_than_0 + + # pd_op.full: (xi64) <- () + full_7 = paddle._C_ops.full( + [], float("0"), paddle.int64, paddle.framework._current_expected_place() + ) + + # pd_op.not_equal: (xb) <- (xi64, xi64) + not_equal_0 = paddle._C_ops.not_equal(cast_0, full_7) + del cast_0 + + # pd_op.cast: (xi64) <- (xb) + cast_1 = paddle._C_ops.cast(not_equal_0, paddle.int64) + del not_equal_0 + + # pd_op.equal: (xb) <- (xi64, xi64) + equal_0 = paddle._C_ops.equal(cast_1, full_7) + del cast_1, full_7 + + # pd_op.share_data_: (2x-1x1xf32) <- (2x-1x1xf32) + share_data__0 = data_1.detach() + del data_1 + + # pd_op.share_data_: (-1x-1x4xf32) <- (-1x-1x4xf32) + share_data__1 = concat_0.detach() + + # pd_op.multiply: (-1x-1x4xf32) <- (-1x-1x4xf32, -1x1xf32) + multiply_0 = paddle._C_ops.multiply(share_data__1, data_4) + del ( + add_0, + add_1, + assign_0, + concat_0, + conv2d_0, + data_4, + divide_0, + full_3, + full_4, + full_5, + scale_0, + share_data__1, + softmax_0, + split_1, + transpose_0, + ) + + return share_data__0, multiply_0 diff --git a/paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_6/weight_meta.py b/paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_6/weight_meta.py new file mode 100644 index 000000000..28198680e --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_6/weight_meta.py @@ -0,0 +1,7 @@ +class Program_weight_tensor_parameter_0: + name = "parameter_0" + shape = [1, 17, 1, 1] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None diff --git a/paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_7/graph_hash.txt b/paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_7/graph_hash.txt new file mode 100644 index 000000000..665cc1cb7 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_7/graph_hash.txt @@ -0,0 +1 @@ +4c194f1b47af22d5dbdc2dc8f63cad5abcfa9a3548b3439131bcdfe6c15f25bd \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_7/graph_net.json b/paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_7/graph_net.json new file mode 100644 index 000000000..cf4d2108b --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_7/graph_net.json @@ -0,0 +1,6 @@ +{ + "framework": "paddle", + "model_name": "PP-YOLOE-L_human", + "num_devices_required": 1, + "num_nodes_required": 1 +} \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_7/input_meta.py b/paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_7/input_meta.py new file mode 100644 index 000000000..cf0c3a83e --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_7/input_meta.py @@ -0,0 +1,130 @@ +class Program_weight_tensor_data_0: + name = "data_0" + shape = [] + dtype = "int64" + data = [23] + + +class Program_weight_tensor_data_1: + name = "data_1" + shape = [] + dtype = "int64" + data = [3024] + + +class Program_weight_tensor_data_2: + name = "data_2" + shape = [] + dtype = "int64" + data = [23] + + +class Program_weight_tensor_data_3: + name = "data_3" + shape = [2, 3024] + dtype = "float32" + max_val = float("2.0") + mean = float("0.0825066") + std = float("0.285748") + data = None + + +class Program_weight_tensor_data_4: + name = "data_4" + shape = [2, 23, 3024] + dtype = "float32" + max_val = float("0.909315") + mean = float("0.0172789") + std = float("0.0769687") + data = None + + +class Program_weight_tensor_data_5: + name = "data_5" + shape = [2, 23, 3024] + dtype = "float32" + max_val = float("1.0") + mean = float("0.00358724") + std = float("0.0597861") + data = None + + +class Program_weight_tensor_data_6: + name = "data_6" + shape = [2, 1] + dtype = "int32" + data = [0, 1] + + +class Program_weight_tensor_data_7: + name = "data_7" + shape = [2, 23, 1] + dtype = "int32" + data = [ + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + ] + + +class Program_weight_tensor_data_8: + name = "data_8" + shape = [2, 23, 4] + dtype = "float32" + max_val = float("383.232") + mean = float("161.173") + std = float("98.87") + data = None + + +class Program_weight_tensor_data_9: + name = "data_9" + shape = [2, 23, 3024] + dtype = "float32" + max_val = float("0.356873") + mean = float("0.000194909") + std = float("0.00436737") + data = None diff --git a/paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_7/model.py b/paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_7/model.py new file mode 100644 index 000000000..0eefa8f8c --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_7/model.py @@ -0,0 +1,258 @@ +import paddle + + +class GraphModule(paddle.nn.Layer): + def __init__(self): + super().__init__() + + def forward( + self, + data_0, + data_1, + data_2, + data_3, + data_4, + data_5, + data_6, + data_7, + data_8, + data_9, + ): + # pd_op.full_int_array: (1xi64) <- () + full_int_array_0 = [1] + + # pd_op.unsqueeze: (2x1x-1xf32) <- (2x-1xf32, 1xi64) + unsqueeze_0 = paddle._C_ops.unsqueeze(data_3, full_int_array_0) + del data_3, full_int_array_0 + + # pd_op.full: (xf32) <- () + full_0 = paddle._C_ops.full( + [], float("1"), paddle.float32, paddle.framework._current_expected_place() + ) + + # pd_op.greater_than: (2x1x-1xb) <- (2x1x-1xf32, xf32) + greater_than_0 = paddle._C_ops.greater_than(unsqueeze_0, full_0) + del full_0, unsqueeze_0 + + # pd_op.full: (xi64) <- () + full_1 = paddle._C_ops.full( + [], float("1"), paddle.int64, paddle.core.CPUPlace() + ) + + # builtin.combine: ([xi64, xi64, xi64]) <- (xi64, xi64, xi64) + combine_0 = [full_1, data_0, full_1] + del full_1 + + # pd_op.stack: (3xi64) <- ([xi64, xi64, xi64]) + stack_0 = paddle._C_ops.stack(combine_0, 0) + del combine_0 + + # pd_op.tile: (2x-1x-1xb) <- (2x1x-1xb, 3xi64) + tile_0 = paddle._C_ops.tile(greater_than_0, stack_0) + del greater_than_0, stack_0 + + # pd_op.full: (1xi64) <- () + full_2 = paddle._C_ops.full( + [1], float("-2"), paddle.int64, paddle.core.CPUPlace() + ) + + # pd_op.argmax: (2x-1xi64) <- (2x-1x-1xf32, 1xi64) + argmax_0 = paddle._C_ops.argmax(data_4, full_2, False, False, paddle.int64) + + # pd_op.one_hot: (2x-1x-1xf32) <- (2x-1xi64, xi64) + one_hot_0 = paddle._C_ops.one_hot( + argmax_0 % paddle.cast(data_2, argmax_0.dtype), data_2 + ) + del argmax_0, data_2 + + # pd_op.transpose: (2x-1x-1xf32) <- (2x-1x-1xf32) + transpose_0 = paddle._C_ops.transpose(one_hot_0, [0, 2, 1]) + del one_hot_0 + + # pd_op.where: (2x-1x-1xf32) <- (2x-1x-1xb, 2x-1x-1xf32, 2x-1x-1xf32) + where_0 = paddle._C_ops.where(tile_0, transpose_0, data_5) + del data_5, tile_0, transpose_0 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_1 = [-2] + + # pd_op.sum: (2x-1xf32) <- (2x-1x-1xf32, 1xi64) + sum_0 = paddle._C_ops.sum(where_0, full_int_array_1, None, False) + + # pd_op.argmax: (2x-1xi64) <- (2x-1x-1xf32, 1xi64) + argmax_1 = paddle._C_ops.argmax(where_0, full_2, False, False, paddle.int64) + del full_2 + + # pd_op.cast: (xi32) <- (xi64) + cast_0 = paddle._C_ops.cast(data_0, paddle.int32) + del data_0 + + # pd_op.multiply: (2x1xi32) <- (2x1xi32, xi32) + multiply_1 = paddle._C_ops.multiply(data_6, cast_0) + del cast_0, data_6 + + # pd_op.cast: (2x1xi64) <- (2x1xi32) + cast_1 = paddle._C_ops.cast(multiply_1, paddle.int64) + del multiply_1 + + # pd_op.add: (2x-1xi64) <- (2x-1xi64, 2x1xi64) + add_0 = paddle._C_ops.add(argmax_1, cast_1) + del argmax_1, cast_1 + + # pd_op.flatten: (-1xi32) <- (2x-1x1xi32) + flatten_0 = paddle._C_ops.flatten(data_7, 0, 2) + del data_7 + + # pd_op.flatten: (-1xi64) <- (2x-1xi64) + flatten_1 = paddle._C_ops.flatten(add_0, 0, 1) + del add_0 + + # pd_op.full: (1xi32) <- () + full_3 = paddle._C_ops.full( + [1], float("0"), paddle.int32, paddle.core.CPUPlace() + ) + + # pd_op.gather: (-1xi32) <- (-1xi32, -1xi64, 1xi32) + gather_0 = paddle._C_ops.gather(flatten_0, flatten_1, full_3) + del flatten_0 + + # pd_op.full: (xi64) <- () + full_4 = paddle._C_ops.full( + [], float("2"), paddle.int64, paddle.core.CPUPlace() + ) + + # builtin.combine: ([xi64, xi64]) <- (xi64, xi64) + combine_1 = [full_4, data_1] + + # pd_op.stack: (2xi64) <- ([xi64, xi64]) + stack_1 = paddle._C_ops.stack(combine_1, 0) + del combine_1 + + # pd_op.reshape: (2x-1xi32) <- (-1xi32, 2xi64) + reshape_1 = paddle._C_ops.reshape(gather_0, stack_1) + del gather_0, stack_1 + + # pd_op.full: (xf32) <- () + full_5 = paddle._C_ops.full( + [], float("0"), paddle.float32, paddle.framework._current_expected_place() + ) + + # pd_op.greater_than: (2x-1xb) <- (2x-1xf32, xf32) + greater_than_1 = paddle._C_ops.greater_than(sum_0, full_5) + del full_5, sum_0 + + # pd_op.full: (1xf32) <- () + full_6 = paddle._C_ops.full( + [1], float("1"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.full_like: (2x-1xi32) <- (2x-1xi32, 1xf32) + full_like_0 = paddle._C_ops.full_like( + reshape_1, full_6, paddle.int32, paddle.framework._current_expected_place() + ) + + # pd_op.where: (2x-1xi32) <- (2x-1xb, 2x-1xi32, 2x-1xi32) + where_1 = paddle._C_ops.where(greater_than_1, reshape_1, full_like_0) + del full_like_0, greater_than_1, reshape_1 + + # pd_op.full_int_array: (2xi64) <- () + full_int_array_2 = [-1, 4] + + # pd_op.reshape: (-1x4xf32) <- (2x-1x4xf32, 2xi64) + reshape_2 = paddle._C_ops.reshape(data_8, full_int_array_2) + del data_8, full_int_array_2 + + # pd_op.gather: (-1x4xf32) <- (-1x4xf32, -1xi64, 1xi32) + gather_1 = paddle._C_ops.gather(reshape_2, flatten_1, full_3) + del flatten_1, full_3, reshape_2 + + # pd_op.full: (xi64) <- () + full_7 = paddle._C_ops.full( + [], float("4"), paddle.int64, paddle.core.CPUPlace() + ) + + # builtin.combine: ([xi64, xi64, xi64]) <- (xi64, xi64, xi64) + combine_2 = [full_4, data_1, full_7] + del data_1, full_4, full_7 + + # pd_op.stack: (3xi64) <- ([xi64, xi64, xi64]) + stack_2 = paddle._C_ops.stack(combine_2, 0) + del combine_2 + + # pd_op.reshape: (2x-1x4xf32) <- (-1x4xf32, 3xi64) + reshape_0 = paddle._C_ops.reshape(gather_1, stack_2) + del gather_1, stack_2 + + # pd_op.full: (1xi32) <- () + full_8 = paddle._C_ops.full( + [1], float("2"), paddle.int32, paddle.core.CPUPlace() + ) + + # pd_op.one_hot: (2x-1x2xf32) <- (2x-1xi32, 1xi32) + one_hot_1 = paddle._C_ops.one_hot( + where_1 % paddle.cast(full_8, where_1.dtype), full_8 + ) + del full_8 + + # pd_op.full: (1xi64) <- () + full_9 = paddle._C_ops.full( + [1], float("0"), paddle.int64, paddle.framework._current_expected_place() + ) + + # pd_op.assign_value_: (1xi64) <- (1xi64) + assign_value__0 = paddle._C_ops.assign_value_( + full_9, + [1], + paddle.int64, + [float("0")], + paddle.framework._current_expected_place(), + ) + del full_9 + + # pd_op.index_select: (2x-1x1xf32) <- (2x-1x2xf32, 1xi64) + index_select_0 = paddle._C_ops.index_select(one_hot_1, assign_value__0, -1) + del assign_value__0, one_hot_1 + + # pd_op.multiply: (2x-1x-1xf32) <- (2x-1x-1xf32, 2x-1x-1xf32) + multiply_2 = paddle._C_ops.multiply(data_9, where_0) + del data_9 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_3 = [-1] + + # pd_op.max: (2x-1x1xf32) <- (2x-1x-1xf32, 1xi64) + max_0 = paddle._C_ops.max(multiply_2, full_int_array_3, True) + + # pd_op.multiply: (2x-1x-1xf32) <- (2x-1x-1xf32, 2x-1x-1xf32) + multiply_3 = paddle._C_ops.multiply(data_4, where_0) + del data_4, where_0 + + # pd_op.max: (2x-1x1xf32) <- (2x-1x-1xf32, 1xi64) + max_1 = paddle._C_ops.max(multiply_3, full_int_array_3, True) + del multiply_3 + + # pd_op.scale: (2x-1x1xf32) <- (2x-1x1xf32, 1xf32) + scale_0 = paddle._C_ops.scale(max_0, full_6, float("1e-09"), True) + del full_6, max_0 + + # pd_op.divide: (2x-1x-1xf32) <- (2x-1x-1xf32, 2x-1x1xf32) + divide_0 = paddle._C_ops.divide(multiply_2, scale_0) + del multiply_2, scale_0 + + # pd_op.multiply: (2x-1x-1xf32) <- (2x-1x-1xf32, 2x-1x1xf32) + multiply_4 = paddle._C_ops.multiply(divide_0, max_1) + del divide_0, max_1 + + # pd_op.max: (2x-1xf32) <- (2x-1x-1xf32, 1xi64) + max_2 = paddle._C_ops.max(multiply_4, full_int_array_1, False) + del full_int_array_1, multiply_4 + + # pd_op.unsqueeze: (2x-1x1xf32) <- (2x-1xf32, 1xi64) + unsqueeze_1 = paddle._C_ops.unsqueeze(max_2, full_int_array_3) + del full_int_array_3, max_2 + + # pd_op.multiply: (2x-1x1xf32) <- (2x-1x1xf32, 2x-1x1xf32) + multiply_0 = paddle._C_ops.multiply(index_select_0, unsqueeze_1) + del index_select_0, unsqueeze_1, where_1 + + return reshape_0, multiply_0 diff --git a/paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_7/weight_meta.py b/paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_7/weight_meta.py new file mode 100644 index 000000000..8b1378917 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_7/weight_meta.py @@ -0,0 +1 @@ + diff --git a/paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_8/graph_hash.txt b/paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_8/graph_hash.txt new file mode 100644 index 000000000..ad26781d8 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_8/graph_hash.txt @@ -0,0 +1 @@ +99bd1d3460daced0b08a14965df1673734764218c3df6e74b7dd522b3b824b95 \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_8/graph_net.json b/paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_8/graph_net.json new file mode 100644 index 000000000..cf4d2108b --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_8/graph_net.json @@ -0,0 +1,6 @@ +{ + "framework": "paddle", + "model_name": "PP-YOLOE-L_human", + "num_devices_required": 1, + "num_nodes_required": 1 +} \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_8/input_meta.py b/paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_8/input_meta.py new file mode 100644 index 000000000..9285c370d --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_8/input_meta.py @@ -0,0 +1,103 @@ +class Program_weight_tensor_data_0: + name = "data_0" + shape = [] + dtype = "int64" + data = [13] + + +class Program_weight_tensor_data_1: + name = "data_1" + shape = [] + dtype = "int64" + data = [6069] + + +class Program_weight_tensor_data_2: + name = "data_2" + shape = [2, 13, 6069] + dtype = "float32" + max_val = float("1.0") + mean = float("0.00205965") + std = float("0.0453366") + data = None + + +class Program_weight_tensor_data_3: + name = "data_3" + shape = [2, 1] + dtype = "int32" + data = [0, 1] + + +class Program_weight_tensor_data_4: + name = "data_4" + shape = [2, 13, 1] + dtype = "int32" + data = [ + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + ] + + +class Program_weight_tensor_data_5: + name = "data_5" + shape = [2, 6069] + dtype = "float32" + max_val = float("1.0") + mean = float("0.0267754") + std = float("0.161426") + data = None + + +class Program_weight_tensor_data_6: + name = "data_6" + shape = [2, 13, 4] + dtype = "float32" + max_val = float("544.0") + mean = float("318.798") + std = float("154.54") + data = None + + +class Program_weight_tensor_data_7: + name = "data_7" + shape = [2, 13, 6069] + dtype = "float32" + max_val = float("0.254782") + mean = float("0.000128415") + std = float("0.00278824") + data = None + + +class Program_weight_tensor_data_8: + name = "data_8" + shape = [2, 13, 6069] + dtype = "float32" + max_val = float("0.906178") + mean = float("0.0215299") + std = float("0.0809394") + data = None diff --git a/paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_8/model.py b/paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_8/model.py new file mode 100644 index 000000000..2c208d4ad --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_8/model.py @@ -0,0 +1,195 @@ +import paddle + + +class GraphModule(paddle.nn.Layer): + def __init__(self): + super().__init__() + + def forward( + self, data_0, data_1, data_2, data_3, data_4, data_5, data_6, data_7, data_8 + ): + # pd_op.full: (1xi64) <- () + full_0 = paddle._C_ops.full( + [1], float("-2"), paddle.int64, paddle.core.CPUPlace() + ) + + # pd_op.argmax: (2x-1xi64) <- (2x-1x-1xf32, 1xi64) + argmax_0 = paddle._C_ops.argmax(data_2, full_0, False, False, paddle.int64) + del full_0 + + # pd_op.cast: (xi32) <- (xi64) + cast_0 = paddle._C_ops.cast(data_0, paddle.int32) + del data_0 + + # pd_op.multiply: (2x1xi32) <- (2x1xi32, xi32) + multiply_1 = paddle._C_ops.multiply(data_3, cast_0) + del cast_0, data_3 + + # pd_op.cast: (2x1xi64) <- (2x1xi32) + cast_1 = paddle._C_ops.cast(multiply_1, paddle.int64) + del multiply_1 + + # pd_op.add: (2x-1xi64) <- (2x-1xi64, 2x1xi64) + add_0 = paddle._C_ops.add(argmax_0, cast_1) + del argmax_0, cast_1 + + # pd_op.flatten: (-1xi32) <- (2x-1x1xi32) + flatten_0 = paddle._C_ops.flatten(data_4, 0, 2) + del data_4 + + # pd_op.flatten: (-1xi64) <- (2x-1xi64) + flatten_1 = paddle._C_ops.flatten(add_0, 0, 1) + del add_0 + + # pd_op.full: (1xi32) <- () + full_1 = paddle._C_ops.full( + [1], float("0"), paddle.int32, paddle.core.CPUPlace() + ) + + # pd_op.gather: (-1xi32) <- (-1xi32, -1xi64, 1xi32) + gather_0 = paddle._C_ops.gather(flatten_0, flatten_1, full_1) + del flatten_0 + + # pd_op.full: (xi64) <- () + full_2 = paddle._C_ops.full( + [], float("2"), paddle.int64, paddle.core.CPUPlace() + ) + + # builtin.combine: ([xi64, xi64]) <- (xi64, xi64) + combine_0 = [full_2, data_1] + + # pd_op.stack: (2xi64) <- ([xi64, xi64]) + stack_0 = paddle._C_ops.stack(combine_0, 0) + del combine_0 + + # pd_op.reshape: (2x-1xi32) <- (-1xi32, 2xi64) + reshape_1 = paddle._C_ops.reshape(gather_0, stack_0) + del gather_0, stack_0 + + # pd_op.full: (xf32) <- () + full_3 = paddle._C_ops.full( + [], float("0"), paddle.float32, paddle.framework._current_expected_place() + ) + + # pd_op.greater_than: (2x-1xb) <- (2x-1xf32, xf32) + greater_than_0 = paddle._C_ops.greater_than(data_5, full_3) + del data_5, full_3 + + # pd_op.full: (1xf32) <- () + full_4 = paddle._C_ops.full( + [1], float("1"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.full_like: (2x-1xi32) <- (2x-1xi32, 1xf32) + full_like_0 = paddle._C_ops.full_like( + reshape_1, full_4, paddle.int32, paddle.framework._current_expected_place() + ) + + # pd_op.where: (2x-1xi32) <- (2x-1xb, 2x-1xi32, 2x-1xi32) + where_0 = paddle._C_ops.where(greater_than_0, reshape_1, full_like_0) + del full_like_0, greater_than_0, reshape_1 + + # pd_op.full_int_array: (2xi64) <- () + full_int_array_0 = [-1, 4] + + # pd_op.reshape: (-1x4xf32) <- (2x-1x4xf32, 2xi64) + reshape_2 = paddle._C_ops.reshape(data_6, full_int_array_0) + del data_6, full_int_array_0 + + # pd_op.gather: (-1x4xf32) <- (-1x4xf32, -1xi64, 1xi32) + gather_1 = paddle._C_ops.gather(reshape_2, flatten_1, full_1) + del flatten_1, full_1, reshape_2 + + # pd_op.full: (xi64) <- () + full_5 = paddle._C_ops.full( + [], float("4"), paddle.int64, paddle.core.CPUPlace() + ) + + # builtin.combine: ([xi64, xi64, xi64]) <- (xi64, xi64, xi64) + combine_1 = [full_2, data_1, full_5] + del data_1, full_2, full_5 + + # pd_op.stack: (3xi64) <- ([xi64, xi64, xi64]) + stack_1 = paddle._C_ops.stack(combine_1, 0) + del combine_1 + + # pd_op.reshape: (2x-1x4xf32) <- (-1x4xf32, 3xi64) + reshape_0 = paddle._C_ops.reshape(gather_1, stack_1) + del gather_1, stack_1 + + # pd_op.full: (1xi32) <- () + full_6 = paddle._C_ops.full( + [1], float("2"), paddle.int32, paddle.core.CPUPlace() + ) + + # pd_op.one_hot: (2x-1x2xf32) <- (2x-1xi32, 1xi32) + one_hot_0 = paddle._C_ops.one_hot( + where_0 % paddle.cast(full_6, where_0.dtype), full_6 + ) + del full_6 + + # pd_op.full: (1xi64) <- () + full_7 = paddle._C_ops.full( + [1], float("0"), paddle.int64, paddle.framework._current_expected_place() + ) + + # pd_op.assign_value_: (1xi64) <- (1xi64) + assign_value__0 = paddle._C_ops.assign_value_( + full_7, + [1], + paddle.int64, + [float("0")], + paddle.framework._current_expected_place(), + ) + del full_7 + + # pd_op.index_select: (2x-1x1xf32) <- (2x-1x2xf32, 1xi64) + index_select_0 = paddle._C_ops.index_select(one_hot_0, assign_value__0, -1) + del assign_value__0, one_hot_0 + + # pd_op.multiply: (2x-1x-1xf32) <- (2x-1x-1xf32, 2x-1x-1xf32) + multiply_2 = paddle._C_ops.multiply(data_7, data_2) + del data_7 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_1 = [-1] + + # pd_op.max: (2x-1x1xf32) <- (2x-1x-1xf32, 1xi64) + max_0 = paddle._C_ops.max(multiply_2, full_int_array_1, True) + + # pd_op.multiply: (2x-1x-1xf32) <- (2x-1x-1xf32, 2x-1x-1xf32) + multiply_3 = paddle._C_ops.multiply(data_8, data_2) + del data_2, data_8 + + # pd_op.max: (2x-1x1xf32) <- (2x-1x-1xf32, 1xi64) + max_1 = paddle._C_ops.max(multiply_3, full_int_array_1, True) + del multiply_3 + + # pd_op.scale: (2x-1x1xf32) <- (2x-1x1xf32, 1xf32) + scale_0 = paddle._C_ops.scale(max_0, full_4, float("1e-09"), True) + del full_4, max_0 + + # pd_op.divide: (2x-1x-1xf32) <- (2x-1x-1xf32, 2x-1x1xf32) + divide_0 = paddle._C_ops.divide(multiply_2, scale_0) + del multiply_2, scale_0 + + # pd_op.multiply: (2x-1x-1xf32) <- (2x-1x-1xf32, 2x-1x1xf32) + multiply_4 = paddle._C_ops.multiply(divide_0, max_1) + del divide_0, max_1 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_2 = [-2] + + # pd_op.max: (2x-1xf32) <- (2x-1x-1xf32, 1xi64) + max_2 = paddle._C_ops.max(multiply_4, full_int_array_2, False) + del full_int_array_2, multiply_4 + + # pd_op.unsqueeze: (2x-1x1xf32) <- (2x-1xf32, 1xi64) + unsqueeze_0 = paddle._C_ops.unsqueeze(max_2, full_int_array_1) + del full_int_array_1, max_2 + + # pd_op.multiply: (2x-1x1xf32) <- (2x-1x1xf32, 2x-1x1xf32) + multiply_0 = paddle._C_ops.multiply(index_select_0, unsqueeze_0) + del index_select_0, unsqueeze_0, where_0 + + return reshape_0, multiply_0 diff --git a/paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_8/weight_meta.py b/paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_8/weight_meta.py new file mode 100644 index 000000000..8b1378917 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_8/weight_meta.py @@ -0,0 +1 @@ + diff --git a/paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_9/graph_hash.txt b/paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_9/graph_hash.txt new file mode 100644 index 000000000..2f9daab91 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_9/graph_hash.txt @@ -0,0 +1 @@ +237f1666acd796c265f31ddd9bc78ab95e4da70095a07017ec39287d25ff30bd \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_9/graph_net.json b/paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_9/graph_net.json new file mode 100644 index 000000000..cf4d2108b --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_9/graph_net.json @@ -0,0 +1,6 @@ +{ + "framework": "paddle", + "model_name": "PP-YOLOE-L_human", + "num_devices_required": 1, + "num_nodes_required": 1 +} \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_9/input_meta.py b/paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_9/input_meta.py new file mode 100644 index 000000000..67b740844 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_9/input_meta.py @@ -0,0 +1,68 @@ +class Program_weight_tensor_data_0: + name = "data_0" + shape = [2, 2100] + dtype = "bool" + min_val = 0 + max_val = 2 + data = None + + +class Program_weight_tensor_data_1: + name = "data_1" + shape = [2, 2100, 4] + dtype = "float32" + min_val = float("-9.15223") + max_val = float("47.8129") + mean = float("17.428") + std = float("12.8723") + data = None + + +class Program_weight_tensor_data_2: + name = "data_2" + shape = [2, 2100, 4] + dtype = "float32" + min_val = float("0.170455") + max_val = float("40.0") + mean = float("14.6217") + std = float("11.8845") + data = None + + +class Program_weight_tensor_data_3: + name = "data_3" + shape = [2, 2100, 1] + dtype = "float32" + max_val = float("0.887648") + mean = float("0.0225166") + std = float("0.109165") + data = None + + +class Program_weight_tensor_data_4: + name = "data_4" + shape = [] + dtype = "float32" + data = [94.5698] + + +class Program_weight_tensor_data_5: + name = "data_5" + shape = [2, 2100, 68] + dtype = "float32" + min_val = float("-6.9182") + max_val = float("13.8683") + mean = float("3.2119e-05") + std = float("1.64095") + data = None + + +class Program_weight_tensor_data_6: + name = "data_6" + shape = [2100, 2] + dtype = "float32" + min_val = float("0.5") + max_val = float("39.5") + mean = float("17.381") + std = float("11.4522") + data = None diff --git a/paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_9/model.py b/paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_9/model.py new file mode 100644 index 000000000..7adfad7cf --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_9/model.py @@ -0,0 +1,509 @@ +import paddle + + +class GraphModule(paddle.nn.Layer): + def __init__(self): + super().__init__() + + def forward(self, data_0, data_1, data_2, data_3, data_4, data_5, data_6): + # pd_op.cast: (2x2100xi32) <- (2x2100xb) + cast_0 = paddle._C_ops.cast(data_0, paddle.int32) + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_0 = [-1] + + # pd_op.assign: (1xi64) <- (1xi64) + assign_0 = full_int_array_0 + + # pd_op.assign: (1xi64) <- (1xi64) + assign_1 = full_int_array_0 + + # pd_op.assign: (1xi64) <- (1xi64) + assign_2 = full_int_array_0 + + # pd_op.unsqueeze: (2x2100x1xi32) <- (2x2100xi32, 1xi64) + unsqueeze_0 = paddle._C_ops.unsqueeze(cast_0, full_int_array_0) + del cast_0 + + # pd_op.full_int_array: (3xi64) <- () + full_int_array_1 = [1, 1, 4] + + # pd_op.tile: (2x2100x4xi32) <- (2x2100x1xi32, 3xi64) + tile_0 = paddle._C_ops.tile(unsqueeze_0, full_int_array_1) + del full_int_array_1, unsqueeze_0 + + # pd_op.cast: (2x2100x4xb) <- (2x2100x4xi32) + cast_1 = paddle._C_ops.cast(tile_0, paddle.bool) + del tile_0 + + # pd_op.masked_select: (-1xf32) <- (2x2100x4xf32, 2x2100x4xb) + masked_select_0 = paddle._C_ops.masked_select(data_1, cast_1) + del data_1 + + # pd_op.full_int_array: (2xi64) <- () + full_int_array_2 = [-1, 4] + + # pd_op.reshape: (-1x4xf32) <- (-1xf32, 2xi64) + reshape_0 = paddle._C_ops.reshape(masked_select_0, full_int_array_2) + + # pd_op.masked_select: (-1xf32) <- (2x2100x4xf32, 2x2100x4xb) + masked_select_1 = paddle._C_ops.masked_select(data_2, cast_1) + + # pd_op.reshape: (-1x4xf32) <- (-1xf32, 2xi64) + reshape_1 = paddle._C_ops.reshape(masked_select_1, full_int_array_2) + del masked_select_1 + + # pd_op.sum: (2x2100xf32) <- (2x2100x1xf32, 1xi64) + sum_0 = paddle._C_ops.sum(data_3, full_int_array_0, None, False) + del data_3 + + # pd_op.masked_select: (-1xf32) <- (2x2100xf32, 2x2100xb) + masked_select_2 = paddle._C_ops.masked_select(sum_0, data_0) + del sum_0 + + # pd_op.unsqueeze: (-1x1xf32) <- (-1xf32, 1xi64) + unsqueeze_1 = paddle._C_ops.unsqueeze(masked_select_2, full_int_array_0) + del masked_select_2 + + # pd_op.subtract: (-1x4xf32) <- (-1x4xf32, -1x4xf32) + subtract_0 = paddle._C_ops.subtract(reshape_0, reshape_1) + + # pd_op.abs: (-1x4xf32) <- (-1x4xf32) + abs_0 = paddle._C_ops.abs(subtract_0) + + # pd_op.mean_all: (xf32) <- (-1x4xf32) + mean_all_0 = paddle._C_ops.mean_all(abs_0) + + # pd_op.full: (1xi32) <- () + full_0 = paddle._C_ops.full( + [1], float("1"), paddle.int32, paddle.core.CPUPlace() + ) + + # pd_op.split_with_num: ([-1x1xf32, -1x1xf32, -1x1xf32, -1x1xf32]) <- (-1x4xf32, 1xi32) + split_with_num_0 = paddle._C_ops.split_with_num(reshape_0, 4, full_0) + + # builtin.split: (-1x1xf32, -1x1xf32, -1x1xf32, -1x1xf32) <- ([-1x1xf32, -1x1xf32, -1x1xf32, -1x1xf32]) + ( + split_0, + split_1, + split_2, + split_3, + ) = split_with_num_0 + del split_with_num_0 + + # pd_op.split_with_num: ([-1x1xf32, -1x1xf32, -1x1xf32, -1x1xf32]) <- (-1x4xf32, 1xi32) + split_with_num_1 = paddle._C_ops.split_with_num(reshape_1, 4, full_0) + + # builtin.split: (-1x1xf32, -1x1xf32, -1x1xf32, -1x1xf32) <- ([-1x1xf32, -1x1xf32, -1x1xf32, -1x1xf32]) + ( + split_4, + split_5, + split_6, + split_7, + ) = split_with_num_1 + del split_with_num_1 + + # pd_op.maximum: (-1x1xf32) <- (-1x1xf32, -1x1xf32) + maximum_0 = paddle._C_ops.maximum(split_0, split_4) + + # pd_op.maximum: (-1x1xf32) <- (-1x1xf32, -1x1xf32) + maximum_1 = paddle._C_ops.maximum(split_1, split_5) + + # pd_op.minimum: (-1x1xf32) <- (-1x1xf32, -1x1xf32) + minimum_0 = paddle._C_ops.minimum(split_2, split_6) + + # pd_op.minimum: (-1x1xf32) <- (-1x1xf32, -1x1xf32) + minimum_1 = paddle._C_ops.minimum(split_3, split_7) + + # pd_op.subtract: (-1x1xf32) <- (-1x1xf32, -1x1xf32) + subtract_1 = paddle._C_ops.subtract(minimum_0, maximum_0) + + # pd_op.full: (1xf32) <- () + full_1 = paddle._C_ops.full( + [1], float("0"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.assign: (1xf32) <- (1xf32) + assign_3 = full_1 + + # pd_op.full: (1xf32) <- () + full_2 = paddle._C_ops.full( + [1], float("3.40282e+38"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.assign: (1xf32) <- (1xf32) + assign_4 = full_2 + + # pd_op.clip: (-1x1xf32) <- (-1x1xf32, 1xf32, 1xf32) + clip_0 = paddle._C_ops.clip(subtract_1, full_1, full_2) + + # pd_op.subtract: (-1x1xf32) <- (-1x1xf32, -1x1xf32) + subtract_2 = paddle._C_ops.subtract(minimum_1, maximum_1) + + # pd_op.clip: (-1x1xf32) <- (-1x1xf32, 1xf32, 1xf32) + clip_1 = paddle._C_ops.clip(subtract_2, full_1, full_2) + + # pd_op.multiply: (-1x1xf32) <- (-1x1xf32, -1x1xf32) + multiply_0 = paddle._C_ops.multiply(clip_0, clip_1) + + # pd_op.subtract: (-1x1xf32) <- (-1x1xf32, -1x1xf32) + subtract_3 = paddle._C_ops.subtract(split_2, split_0) + + # pd_op.subtract: (-1x1xf32) <- (-1x1xf32, -1x1xf32) + subtract_4 = paddle._C_ops.subtract(split_3, split_1) + + # pd_op.multiply: (-1x1xf32) <- (-1x1xf32, -1x1xf32) + multiply_1 = paddle._C_ops.multiply(subtract_3, subtract_4) + + # pd_op.subtract: (-1x1xf32) <- (-1x1xf32, -1x1xf32) + subtract_5 = paddle._C_ops.subtract(split_6, split_4) + + # pd_op.subtract: (-1x1xf32) <- (-1x1xf32, -1x1xf32) + subtract_6 = paddle._C_ops.subtract(split_7, split_5) + + # pd_op.multiply: (-1x1xf32) <- (-1x1xf32, -1x1xf32) + multiply_2 = paddle._C_ops.multiply(subtract_5, subtract_6) + del subtract_5, subtract_6 + + # pd_op.add: (-1x1xf32) <- (-1x1xf32, -1x1xf32) + add_0 = paddle._C_ops.add(multiply_1, multiply_2) + + # pd_op.subtract: (-1x1xf32) <- (-1x1xf32, -1x1xf32) + subtract_7 = paddle._C_ops.subtract(add_0, multiply_0) + + # pd_op.full: (1xf32) <- () + full_3 = paddle._C_ops.full( + [1], float("1"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.assign: (1xf32) <- (1xf32) + assign_5 = full_3 + + # pd_op.assign: (1xf32) <- (1xf32) + assign_6 = full_3 + + # pd_op.scale: (-1x1xf32) <- (-1x1xf32, 1xf32) + scale_0 = paddle._C_ops.scale(subtract_7, full_3, float("1e-10"), True) + del subtract_7 + + # pd_op.divide: (-1x1xf32) <- (-1x1xf32, -1x1xf32) + divide_2 = paddle._C_ops.divide(multiply_0, scale_0) + + # pd_op.minimum: (-1x1xf32) <- (-1x1xf32, -1x1xf32) + minimum_2 = paddle._C_ops.minimum(split_0, split_4) + + # pd_op.minimum: (-1x1xf32) <- (-1x1xf32, -1x1xf32) + minimum_3 = paddle._C_ops.minimum(split_1, split_5) + + # pd_op.maximum: (-1x1xf32) <- (-1x1xf32, -1x1xf32) + maximum_2 = paddle._C_ops.maximum(split_2, split_6) + + # pd_op.maximum: (-1x1xf32) <- (-1x1xf32, -1x1xf32) + maximum_3 = paddle._C_ops.maximum(split_3, split_7) + + # pd_op.subtract: (-1x1xf32) <- (-1x1xf32, -1x1xf32) + subtract_8 = paddle._C_ops.subtract(maximum_2, minimum_2) + + # pd_op.subtract: (-1x1xf32) <- (-1x1xf32, -1x1xf32) + subtract_9 = paddle._C_ops.subtract(maximum_3, minimum_3) + + # pd_op.multiply: (-1x1xf32) <- (-1x1xf32, -1x1xf32) + multiply_3 = paddle._C_ops.multiply(subtract_8, subtract_9) + + # pd_op.scale: (-1x1xf32) <- (-1x1xf32, 1xf32) + scale_1 = paddle._C_ops.scale(multiply_3, full_3, float("1e-10"), True) + del multiply_3 + + # pd_op.subtract: (-1x1xf32) <- (-1x1xf32, -1x1xf32) + subtract_10 = paddle._C_ops.subtract(scale_1, scale_0) + + # pd_op.divide: (-1x1xf32) <- (-1x1xf32, -1x1xf32) + divide_3 = paddle._C_ops.divide(subtract_10, scale_1) + + # pd_op.subtract: (-1x1xf32) <- (-1x1xf32, -1x1xf32) + subtract_11 = paddle._C_ops.subtract(divide_2, divide_3) + + # pd_op.full: (1xf32) <- () + full_4 = paddle._C_ops.full( + [1], float("-1"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.scale: (-1x1xf32) <- (-1x1xf32, 1xf32) + scale_2 = paddle._C_ops.scale(subtract_11, full_4, float("1"), True) + del subtract_11 + + # pd_op.scale: (-1x1xf32) <- (-1x1xf32, 1xf32) + scale_3 = paddle._C_ops.scale(scale_2, full_3, float("0"), True) + del scale_2 + + # pd_op.multiply: (-1x1xf32) <- (-1x1xf32, -1x1xf32) + multiply_4 = paddle._C_ops.multiply(scale_3, unsqueeze_1) + + # pd_op.full_int_array: (0xi64) <- () + full_int_array_3 = [] + + # pd_op.assign: (0xi64) <- (0xi64) + assign_7 = full_int_array_3 + + # pd_op.sum: (xf32) <- (-1x1xf32, 0xi64) + sum_1 = paddle._C_ops.sum(multiply_4, full_int_array_3, None, False) + + # pd_op.divide: (xf32) <- (xf32, xf32) + divide_0 = paddle._C_ops.divide(sum_1, data_4) + + # pd_op.unsqueeze: (2x2100x1xb) <- (2x2100xb, 1xi64) + unsqueeze_2 = paddle._C_ops.unsqueeze(data_0, full_int_array_0) + del data_0 + + # pd_op.cast: (2x2100x1xi32) <- (2x2100x1xb) + cast_2 = paddle._C_ops.cast(unsqueeze_2, paddle.int32) + del unsqueeze_2 + + # pd_op.full_int_array: (3xi64) <- () + full_int_array_4 = [1, 1, 68] + + # pd_op.tile: (2x2100x68xi32) <- (2x2100x1xi32, 3xi64) + tile_1 = paddle._C_ops.tile(cast_2, full_int_array_4) + del cast_2, full_int_array_4 + + # pd_op.cast: (2x2100x68xb) <- (2x2100x68xi32) + cast_3 = paddle._C_ops.cast(tile_1, paddle.bool) + del tile_1 + + # pd_op.masked_select: (-1xf32) <- (2x2100x68xf32, 2x2100x68xb) + masked_select_3 = paddle._C_ops.masked_select(data_5, cast_3) + del data_5 + + # pd_op.full_int_array: (3xi64) <- () + full_int_array_5 = [-1, 4, 17] + + # pd_op.reshape: (-1x4x17xf32) <- (-1xf32, 3xi64) + reshape_2 = paddle._C_ops.reshape(masked_select_3, full_int_array_5) + del full_int_array_5 + + # pd_op.full: (1xi32) <- () + full_5 = paddle._C_ops.full( + [1], float("2"), paddle.int32, paddle.core.CPUPlace() + ) + + # pd_op.split_with_num: ([2x2100x2xf32, 2x2100x2xf32]) <- (2x2100x4xf32, 1xi32) + split_with_num_2 = paddle._C_ops.split_with_num(data_2, 2, full_5) + del data_2, full_5 + + # builtin.split: (2x2100x2xf32, 2x2100x2xf32) <- ([2x2100x2xf32, 2x2100x2xf32]) + ( + split_8, + split_9, + ) = split_with_num_2 + del split_with_num_2 + + # pd_op.subtract: (2x2100x2xf32) <- (2100x2xf32, 2x2100x2xf32) + subtract_12 = paddle._C_ops.subtract(data_6, split_8) + del split_8 + + # pd_op.subtract: (2x2100x2xf32) <- (2x2100x2xf32, 2100x2xf32) + subtract_13 = paddle._C_ops.subtract(split_9, data_6) + del data_6, split_9 + + # pd_op.full: (1xi32) <- () + full_6 = paddle._C_ops.full( + [1], float("-1"), paddle.int32, paddle.core.CPUPlace() + ) + + # builtin.combine: ([2x2100x2xf32, 2x2100x2xf32]) <- (2x2100x2xf32, 2x2100x2xf32) + combine_0 = [subtract_12, subtract_13] + del subtract_12, subtract_13 + + # pd_op.concat: (2x2100x4xf32) <- ([2x2100x2xf32, 2x2100x2xf32], 1xi32) + concat_0 = paddle._C_ops.concat(combine_0, full_6) + del combine_0, full_6 + + # pd_op.full: (1xf32) <- () + full_7 = paddle._C_ops.full( + [1], float("15.99"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.clip: (2x2100x4xf32) <- (2x2100x4xf32, 1xf32, 1xf32) + clip_2 = paddle._C_ops.clip(concat_0, full_1, full_7) + del concat_0, full_7 + + # pd_op.masked_select: (-1xf32) <- (2x2100x4xf32, 2x2100x4xb) + masked_select_4 = paddle._C_ops.masked_select(clip_2, cast_1) + del clip_2 + + # pd_op.reshape: (-1x4xf32) <- (-1xf32, 2xi64) + reshape_3 = paddle._C_ops.reshape(masked_select_4, full_int_array_2) + del full_int_array_2, masked_select_4 + + # pd_op.floor: (-1x4xf32) <- (-1x4xf32) + floor_0 = paddle._C_ops.floor(reshape_3) + + # pd_op.cast: (-1x4xi64) <- (-1x4xf32) + cast_4 = paddle._C_ops.cast(floor_0, paddle.int64) + del floor_0 + + # pd_op.scale: (-1x4xi64) <- (-1x4xi64, 1xf32) + scale_4 = paddle._C_ops.scale(cast_4, full_3, float("1"), True) + + # pd_op.cast: (-1x4xf32) <- (-1x4xi64) + cast_5 = paddle._C_ops.cast(scale_4, paddle.float32) + + # pd_op.subtract: (-1x4xf32) <- (-1x4xf32, -1x4xf32) + subtract_14 = paddle._C_ops.subtract(cast_5, reshape_3) + del cast_5, reshape_3 + + # pd_op.scale: (-1x4xf32) <- (-1x4xf32, 1xf32) + scale_5 = paddle._C_ops.scale(subtract_14, full_4, float("1"), True) + + # pd_op.scale: (-1x4xi64) <- (-1x4xi64, 1xf32) + scale_6 = paddle._C_ops.scale(cast_4, full_3, float("0"), True) + del cast_4 + + # pd_op.unsqueeze: (-1x4x1xi64) <- (-1x4xi64, 1xi64) + unsqueeze_3 = paddle._C_ops.unsqueeze(scale_6, full_int_array_0) + del scale_6 + + # pd_op.cross_entropy_with_softmax: (-1x4x17xf32, -1x4x1xf32) <- (-1x4x17xf32, -1x4x1xi64) + cross_entropy_with_softmax_0, cross_entropy_with_softmax_2 = ( + lambda x, f: f(x) + )( + paddle._C_ops.cross_entropy_with_softmax( + reshape_2, unsqueeze_3, False, True, True, -100, -1 + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None), + ) + + # pd_op.squeeze: (-1x4xf32) <- (-1x4x1xf32, 1xi64) + squeeze_0 = paddle._C_ops.squeeze( + cross_entropy_with_softmax_2, full_int_array_0 + ) + + # pd_op.multiply: (-1x4xf32) <- (-1x4xf32, -1x4xf32) + multiply_5 = paddle._C_ops.multiply(squeeze_0, subtract_14) + + # pd_op.scale: (-1x4xi64) <- (-1x4xi64, 1xf32) + scale_7 = paddle._C_ops.scale(scale_4, full_3, float("0"), True) + del scale_4 + + # pd_op.unsqueeze: (-1x4x1xi64) <- (-1x4xi64, 1xi64) + unsqueeze_4 = paddle._C_ops.unsqueeze(scale_7, full_int_array_0) + del scale_7 + + # pd_op.cross_entropy_with_softmax: (-1x4x17xf32, -1x4x1xf32) <- (-1x4x17xf32, -1x4x1xi64) + cross_entropy_with_softmax_1, cross_entropy_with_softmax_3 = ( + lambda x, f: f(x) + )( + paddle._C_ops.cross_entropy_with_softmax( + reshape_2, unsqueeze_4, False, True, True, -100, -1 + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None), + ) + del reshape_2 + + # pd_op.squeeze: (-1x4xf32) <- (-1x4x1xf32, 1xi64) + squeeze_1 = paddle._C_ops.squeeze( + cross_entropy_with_softmax_3, full_int_array_0 + ) + + # pd_op.multiply: (-1x4xf32) <- (-1x4xf32, -1x4xf32) + multiply_6 = paddle._C_ops.multiply(squeeze_1, scale_5) + + # pd_op.add: (-1x4xf32) <- (-1x4xf32, -1x4xf32) + add_1 = paddle._C_ops.add(multiply_5, multiply_6) + + # pd_op.mean: (-1x1xf32) <- (-1x4xf32, 1xi64) + mean_0 = paddle._C_ops.mean(add_1, full_int_array_0, True) + del full_int_array_0 + + # pd_op.multiply: (-1x1xf32) <- (-1x1xf32, -1x1xf32) + multiply_7 = paddle._C_ops.multiply(mean_0, unsqueeze_1) + + # pd_op.sum: (xf32) <- (-1x1xf32, 0xi64) + sum_2 = paddle._C_ops.sum(multiply_7, full_int_array_3, None, False) + + # pd_op.divide: (xf32) <- (xf32, xf32) + divide_1 = paddle._C_ops.divide(sum_2, data_4) + del ( + abs_0, + add_0, + add_1, + assign_0, + assign_1, + assign_2, + assign_3, + assign_4, + assign_5, + assign_6, + assign_7, + cast_1, + cast_3, + clip_0, + clip_1, + cross_entropy_with_softmax_2, + cross_entropy_with_softmax_3, + data_4, + divide_2, + divide_3, + full_0, + full_1, + full_2, + full_3, + full_4, + full_int_array_3, + masked_select_0, + masked_select_3, + maximum_0, + maximum_1, + maximum_2, + maximum_3, + mean_0, + minimum_0, + minimum_1, + minimum_2, + minimum_3, + multiply_0, + multiply_1, + multiply_2, + multiply_4, + multiply_5, + multiply_6, + multiply_7, + reshape_0, + reshape_1, + scale_0, + scale_1, + scale_3, + scale_5, + split_0, + split_1, + split_2, + split_3, + split_4, + split_5, + split_6, + split_7, + squeeze_0, + squeeze_1, + subtract_0, + subtract_1, + subtract_10, + subtract_14, + subtract_2, + subtract_3, + subtract_4, + subtract_8, + subtract_9, + sum_1, + sum_2, + unsqueeze_1, + unsqueeze_3, + unsqueeze_4, + ) + + return ( + cross_entropy_with_softmax_0, + cross_entropy_with_softmax_1, + mean_all_0, + divide_0, + divide_1, + ) diff --git a/paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_9/weight_meta.py b/paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_9/weight_meta.py new file mode 100644 index 000000000..8b1378917 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_9/weight_meta.py @@ -0,0 +1 @@ + diff --git a/paddle_samples/PaddleX/PP-YOLOE-L_vehicle/subgraph_0/graph_hash.txt b/paddle_samples/PaddleX/PP-YOLOE-L_vehicle/subgraph_0/graph_hash.txt new file mode 100644 index 000000000..a90272c96 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE-L_vehicle/subgraph_0/graph_hash.txt @@ -0,0 +1 @@ +32cd2923bc0a61a7c5f1bc48378db5c4de25399c2f9388132502f296c7a71760 \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE-L_vehicle/subgraph_0/graph_net.json b/paddle_samples/PaddleX/PP-YOLOE-L_vehicle/subgraph_0/graph_net.json new file mode 100644 index 000000000..4a2e26ae4 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE-L_vehicle/subgraph_0/graph_net.json @@ -0,0 +1,6 @@ +{ + "framework": "paddle", + "model_name": "PP-YOLOE-L_vehicle", + "num_devices_required": 1, + "num_nodes_required": 1 +} \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE-L_vehicle/subgraph_0/input_meta.py b/paddle_samples/PaddleX/PP-YOLOE-L_vehicle/subgraph_0/input_meta.py new file mode 100644 index 000000000..7bd7807fe --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE-L_vehicle/subgraph_0/input_meta.py @@ -0,0 +1,73 @@ +class Program_weight_tensor_data_0: + name = "data_0" + shape = [] + dtype = "int64" + data = [14] + + +class Program_weight_tensor_data_1: + name = "data_1" + shape = [] + dtype = "int64" + data = [14] + + +class Program_weight_tensor_data_2: + name = "data_2" + shape = [] + dtype = "int64" + data = [28] + + +class Program_weight_tensor_data_3: + name = "data_3" + shape = [] + dtype = "int64" + data = [28] + + +class Program_weight_tensor_data_4: + name = "data_4" + shape = [] + dtype = "int64" + data = [56] + + +class Program_weight_tensor_data_5: + name = "data_5" + shape = [] + dtype = "int64" + data = [56] + + +class Program_weight_tensor_data_6: + name = "data_6" + shape = [2, 768, 14, 14] + dtype = "float32" + min_val = float("-0.278465") + max_val = float("11.1806") + mean = float("0.192012") + std = float("0.569649") + data = None + + +class Program_weight_tensor_data_7: + name = "data_7" + shape = [2, 384, 28, 28] + dtype = "float32" + min_val = float("-0.278465") + max_val = float("25.4887") + mean = float("0.232567") + std = float("0.845376") + data = None + + +class Program_weight_tensor_data_8: + name = "data_8" + shape = [2, 192, 56, 56] + dtype = "float32" + min_val = float("-0.278465") + max_val = float("28.7635") + mean = float("0.345133") + std = float("0.91854") + data = None diff --git a/paddle_samples/PaddleX/PP-YOLOE-L_vehicle/subgraph_0/model.py b/paddle_samples/PaddleX/PP-YOLOE-L_vehicle/subgraph_0/model.py new file mode 100644 index 000000000..bdcca8e53 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE-L_vehicle/subgraph_0/model.py @@ -0,0 +1,1144 @@ +import paddle + + +class GraphModule(paddle.nn.Layer): + def __init__(self): + super().__init__() + + def forward( + self, + parameter_0, + parameter_1, + parameter_2, + parameter_3, + parameter_4, + parameter_5, + parameter_6, + parameter_7, + parameter_8, + parameter_9, + parameter_10, + parameter_11, + parameter_12, + parameter_13, + parameter_14, + parameter_15, + parameter_16, + parameter_17, + parameter_18, + parameter_19, + parameter_20, + parameter_21, + parameter_22, + parameter_23, + parameter_24, + parameter_25, + parameter_26, + parameter_27, + parameter_28, + parameter_29, + parameter_30, + parameter_31, + parameter_32, + parameter_33, + parameter_34, + parameter_35, + parameter_36, + parameter_37, + parameter_38, + parameter_39, + parameter_40, + parameter_41, + parameter_42, + parameter_43, + parameter_44, + parameter_45, + parameter_46, + parameter_47, + parameter_48, + parameter_49, + parameter_50, + parameter_51, + parameter_52, + parameter_53, + data_0, + data_1, + data_2, + data_3, + data_4, + data_5, + data_6, + data_7, + data_8, + ): + # pd_op.full: (1xi64) <- () + full_0 = paddle._C_ops.full( + [1], float("0"), paddle.int64, paddle.core.CPUPlace() + ) + + # pd_op.full: (1xi64) <- () + full_1 = paddle._C_ops.full( + [1], float("1"), paddle.int64, paddle.core.CPUPlace() + ) + + # pd_op.arange: (-1xi64) <- (1xi64, xi64, 1xi64) + arange_0 = paddle.arange(full_0, data_1, full_1, dtype="int64") + del data_1 + + # pd_op.cast: (-1xf32) <- (-1xi64) + cast_0 = paddle._C_ops.cast(arange_0, paddle.float32) + del arange_0 + + # pd_op.full: (1xf32) <- () + full_2 = paddle._C_ops.full( + [1], float("1"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.scale: (-1xf32) <- (-1xf32, 1xf32) + scale_0 = paddle._C_ops.scale(cast_0, full_2, float("0.5"), True) + del cast_0 + + # pd_op.full: (1xf32) <- () + full_3 = paddle._C_ops.full( + [1], float("32"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.scale: (-1xf32) <- (-1xf32, 1xf32) + scale_1 = paddle._C_ops.scale(scale_0, full_3, float("0"), True) + del scale_0 + + # pd_op.arange: (-1xi64) <- (1xi64, xi64, 1xi64) + arange_1 = paddle.arange(full_0, data_0, full_1, dtype="int64") + del data_0 + + # pd_op.cast: (-1xf32) <- (-1xi64) + cast_1 = paddle._C_ops.cast(arange_1, paddle.float32) + del arange_1 + + # pd_op.scale: (-1xf32) <- (-1xf32, 1xf32) + scale_2 = paddle._C_ops.scale(cast_1, full_2, float("0.5"), True) + del cast_1 + + # pd_op.scale: (-1xf32) <- (-1xf32, 1xf32) + scale_3 = paddle._C_ops.scale(scale_2, full_3, float("0"), True) + del scale_2 + + # builtin.combine: ([-1xf32, -1xf32]) <- (-1xf32, -1xf32) + combine_0 = [scale_3, scale_1] + del scale_1, scale_3 + + # pd_op.meshgrid: ([-1x-1xf32, -1x-1xf32]) <- ([-1xf32, -1xf32]) + meshgrid_0 = paddle._C_ops.meshgrid(combine_0) + del combine_0 + + # builtin.split: (-1x-1xf32, -1x-1xf32) <- ([-1x-1xf32, -1x-1xf32]) + ( + split_0, + split_1, + ) = meshgrid_0 + del meshgrid_0 + + # pd_op.scale: (-1x-1xf32) <- (-1x-1xf32, 1xf32) + scale_4 = paddle._C_ops.scale(split_1, full_2, float("-80"), True) + + # pd_op.scale: (-1x-1xf32) <- (-1x-1xf32, 1xf32) + scale_5 = paddle._C_ops.scale(split_0, full_2, float("-80"), True) + + # pd_op.scale: (-1x-1xf32) <- (-1x-1xf32, 1xf32) + scale_6 = paddle._C_ops.scale(split_1, full_2, float("80"), True) + + # pd_op.scale: (-1x-1xf32) <- (-1x-1xf32, 1xf32) + scale_7 = paddle._C_ops.scale(split_0, full_2, float("80"), True) + + # builtin.combine: ([-1x-1xf32, -1x-1xf32, -1x-1xf32, -1x-1xf32]) <- (-1x-1xf32, -1x-1xf32, -1x-1xf32, -1x-1xf32) + combine_1 = [scale_4, scale_5, scale_6, scale_7] + del scale_4, scale_5, scale_6, scale_7 + + # pd_op.stack: (-1x-1x4xf32) <- ([-1x-1xf32, -1x-1xf32, -1x-1xf32, -1x-1xf32]) + stack_0 = paddle._C_ops.stack(combine_1, -1) + del combine_1 + + # builtin.combine: ([-1x-1xf32, -1x-1xf32]) <- (-1x-1xf32, -1x-1xf32) + combine_2 = [split_1, split_0] + del split_0, split_1 + + # pd_op.stack: (-1x-1x2xf32) <- ([-1x-1xf32, -1x-1xf32]) + stack_1 = paddle._C_ops.stack(combine_2, -1) + del combine_2 + + # pd_op.full_int_array: (2xi64) <- () + full_int_array_0 = [-1, 4] + + # pd_op.reshape: (-1x4xf32) <- (-1x-1x4xf32, 2xi64) + reshape_0 = paddle._C_ops.reshape(stack_0, full_int_array_0) + del stack_0 + + # pd_op.full_int_array: (2xi64) <- () + full_int_array_1 = [-1, 2] + + # pd_op.reshape: (-1x2xf32) <- (-1x-1x2xf32, 2xi64) + reshape_1 = paddle._C_ops.reshape(stack_1, full_int_array_1) + del stack_1 + + # pd_op.shape64: (2xi64) <- (-1x4xf32) + shape64_0 = paddle._C_ops.shape64(reshape_0) + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_2 = [0] + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_3 = [1] + + # pd_op.slice: (xi64) <- (2xi64, 1xi64, 1xi64) + slice_0 = paddle._C_ops.slice( + shape64_0, [0], full_int_array_2, full_int_array_3, [1], [0] + ) + del shape64_0 + + # pd_op.full: (xi64) <- () + full_4 = paddle._C_ops.full( + [], float("1"), paddle.int64, paddle.core.CPUPlace() + ) + + # builtin.combine: ([xi64, xi64]) <- (xi64, xi64) + combine_3 = [slice_0, full_4] + + # pd_op.stack: (2xi64) <- ([xi64, xi64]) + stack_2 = paddle._C_ops.stack(combine_3, 0) + del combine_3 + + # pd_op.full_with_tensor: (-1x1xf32) <- (1xf32, 2xi64) + full_with_tensor_0 = paddle._C_ops.full_with_tensor( + full_3, stack_2, paddle.float32 + ) + del full_3, stack_2 + + # pd_op.arange: (-1xi64) <- (1xi64, xi64, 1xi64) + arange_2 = paddle.arange(full_0, data_3, full_1, dtype="int64") + del data_3 + + # pd_op.cast: (-1xf32) <- (-1xi64) + cast_2 = paddle._C_ops.cast(arange_2, paddle.float32) + del arange_2 + + # pd_op.scale: (-1xf32) <- (-1xf32, 1xf32) + scale_8 = paddle._C_ops.scale(cast_2, full_2, float("0.5"), True) + del cast_2 + + # pd_op.full: (1xf32) <- () + full_5 = paddle._C_ops.full( + [1], float("16"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.scale: (-1xf32) <- (-1xf32, 1xf32) + scale_9 = paddle._C_ops.scale(scale_8, full_5, float("0"), True) + del scale_8 + + # pd_op.arange: (-1xi64) <- (1xi64, xi64, 1xi64) + arange_3 = paddle.arange(full_0, data_2, full_1, dtype="int64") + del data_2 + + # pd_op.cast: (-1xf32) <- (-1xi64) + cast_3 = paddle._C_ops.cast(arange_3, paddle.float32) + del arange_3 + + # pd_op.scale: (-1xf32) <- (-1xf32, 1xf32) + scale_10 = paddle._C_ops.scale(cast_3, full_2, float("0.5"), True) + del cast_3 + + # pd_op.scale: (-1xf32) <- (-1xf32, 1xf32) + scale_11 = paddle._C_ops.scale(scale_10, full_5, float("0"), True) + del scale_10 + + # builtin.combine: ([-1xf32, -1xf32]) <- (-1xf32, -1xf32) + combine_4 = [scale_11, scale_9] + del scale_11, scale_9 + + # pd_op.meshgrid: ([-1x-1xf32, -1x-1xf32]) <- ([-1xf32, -1xf32]) + meshgrid_1 = paddle._C_ops.meshgrid(combine_4) + del combine_4 + + # builtin.split: (-1x-1xf32, -1x-1xf32) <- ([-1x-1xf32, -1x-1xf32]) + ( + split_2, + split_3, + ) = meshgrid_1 + del meshgrid_1 + + # pd_op.scale: (-1x-1xf32) <- (-1x-1xf32, 1xf32) + scale_12 = paddle._C_ops.scale(split_3, full_2, float("-40"), True) + + # pd_op.scale: (-1x-1xf32) <- (-1x-1xf32, 1xf32) + scale_13 = paddle._C_ops.scale(split_2, full_2, float("-40"), True) + + # pd_op.scale: (-1x-1xf32) <- (-1x-1xf32, 1xf32) + scale_14 = paddle._C_ops.scale(split_3, full_2, float("40"), True) + + # pd_op.scale: (-1x-1xf32) <- (-1x-1xf32, 1xf32) + scale_15 = paddle._C_ops.scale(split_2, full_2, float("40"), True) + + # builtin.combine: ([-1x-1xf32, -1x-1xf32, -1x-1xf32, -1x-1xf32]) <- (-1x-1xf32, -1x-1xf32, -1x-1xf32, -1x-1xf32) + combine_5 = [scale_12, scale_13, scale_14, scale_15] + del scale_12, scale_13, scale_14, scale_15 + + # pd_op.stack: (-1x-1x4xf32) <- ([-1x-1xf32, -1x-1xf32, -1x-1xf32, -1x-1xf32]) + stack_3 = paddle._C_ops.stack(combine_5, -1) + del combine_5 + + # builtin.combine: ([-1x-1xf32, -1x-1xf32]) <- (-1x-1xf32, -1x-1xf32) + combine_6 = [split_3, split_2] + del split_2, split_3 + + # pd_op.stack: (-1x-1x2xf32) <- ([-1x-1xf32, -1x-1xf32]) + stack_4 = paddle._C_ops.stack(combine_6, -1) + del combine_6 + + # pd_op.reshape: (-1x4xf32) <- (-1x-1x4xf32, 2xi64) + reshape_2 = paddle._C_ops.reshape(stack_3, full_int_array_0) + del stack_3 + + # pd_op.reshape: (-1x2xf32) <- (-1x-1x2xf32, 2xi64) + reshape_3 = paddle._C_ops.reshape(stack_4, full_int_array_1) + del stack_4 + + # pd_op.shape64: (2xi64) <- (-1x4xf32) + shape64_1 = paddle._C_ops.shape64(reshape_2) + + # pd_op.slice: (xi64) <- (2xi64, 1xi64, 1xi64) + slice_1 = paddle._C_ops.slice( + shape64_1, [0], full_int_array_2, full_int_array_3, [1], [0] + ) + del shape64_1 + + # builtin.combine: ([xi64, xi64]) <- (xi64, xi64) + combine_7 = [slice_1, full_4] + + # pd_op.stack: (2xi64) <- ([xi64, xi64]) + stack_5 = paddle._C_ops.stack(combine_7, 0) + del combine_7 + + # pd_op.full_with_tensor: (-1x1xf32) <- (1xf32, 2xi64) + full_with_tensor_1 = paddle._C_ops.full_with_tensor( + full_5, stack_5, paddle.float32 + ) + del full_5, stack_5 + + # pd_op.arange: (-1xi64) <- (1xi64, xi64, 1xi64) + arange_4 = paddle.arange(full_0, data_5, full_1, dtype="int64") + del data_5 + + # pd_op.cast: (-1xf32) <- (-1xi64) + cast_4 = paddle._C_ops.cast(arange_4, paddle.float32) + del arange_4 + + # pd_op.scale: (-1xf32) <- (-1xf32, 1xf32) + scale_16 = paddle._C_ops.scale(cast_4, full_2, float("0.5"), True) + del cast_4 + + # pd_op.full: (1xf32) <- () + full_6 = paddle._C_ops.full( + [1], float("8"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.scale: (-1xf32) <- (-1xf32, 1xf32) + scale_17 = paddle._C_ops.scale(scale_16, full_6, float("0"), True) + del scale_16 + + # pd_op.arange: (-1xi64) <- (1xi64, xi64, 1xi64) + arange_5 = paddle.arange(full_0, data_4, full_1, dtype="int64") + del data_4, full_0, full_1 + + # pd_op.cast: (-1xf32) <- (-1xi64) + cast_5 = paddle._C_ops.cast(arange_5, paddle.float32) + del arange_5 + + # pd_op.scale: (-1xf32) <- (-1xf32, 1xf32) + scale_18 = paddle._C_ops.scale(cast_5, full_2, float("0.5"), True) + del cast_5 + + # pd_op.scale: (-1xf32) <- (-1xf32, 1xf32) + scale_19 = paddle._C_ops.scale(scale_18, full_6, float("0"), True) + del scale_18 + + # builtin.combine: ([-1xf32, -1xf32]) <- (-1xf32, -1xf32) + combine_8 = [scale_19, scale_17] + del scale_17, scale_19 + + # pd_op.meshgrid: ([-1x-1xf32, -1x-1xf32]) <- ([-1xf32, -1xf32]) + meshgrid_2 = paddle._C_ops.meshgrid(combine_8) + del combine_8 + + # builtin.split: (-1x-1xf32, -1x-1xf32) <- ([-1x-1xf32, -1x-1xf32]) + ( + split_4, + split_5, + ) = meshgrid_2 + del meshgrid_2 + + # pd_op.scale: (-1x-1xf32) <- (-1x-1xf32, 1xf32) + scale_20 = paddle._C_ops.scale(split_5, full_2, float("-20"), True) + + # pd_op.scale: (-1x-1xf32) <- (-1x-1xf32, 1xf32) + scale_21 = paddle._C_ops.scale(split_4, full_2, float("-20"), True) + + # pd_op.scale: (-1x-1xf32) <- (-1x-1xf32, 1xf32) + scale_22 = paddle._C_ops.scale(split_5, full_2, float("20"), True) + + # pd_op.scale: (-1x-1xf32) <- (-1x-1xf32, 1xf32) + scale_23 = paddle._C_ops.scale(split_4, full_2, float("20"), True) + del full_2 + + # builtin.combine: ([-1x-1xf32, -1x-1xf32, -1x-1xf32, -1x-1xf32]) <- (-1x-1xf32, -1x-1xf32, -1x-1xf32, -1x-1xf32) + combine_9 = [scale_20, scale_21, scale_22, scale_23] + del scale_20, scale_21, scale_22, scale_23 + + # pd_op.stack: (-1x-1x4xf32) <- ([-1x-1xf32, -1x-1xf32, -1x-1xf32, -1x-1xf32]) + stack_6 = paddle._C_ops.stack(combine_9, -1) + del combine_9 + + # builtin.combine: ([-1x-1xf32, -1x-1xf32]) <- (-1x-1xf32, -1x-1xf32) + combine_10 = [split_5, split_4] + del split_4, split_5 + + # pd_op.stack: (-1x-1x2xf32) <- ([-1x-1xf32, -1x-1xf32]) + stack_7 = paddle._C_ops.stack(combine_10, -1) + del combine_10 + + # pd_op.reshape: (-1x4xf32) <- (-1x-1x4xf32, 2xi64) + reshape_4 = paddle._C_ops.reshape(stack_6, full_int_array_0) + del full_int_array_0, stack_6 + + # pd_op.reshape: (-1x2xf32) <- (-1x-1x2xf32, 2xi64) + reshape_5 = paddle._C_ops.reshape(stack_7, full_int_array_1) + del full_int_array_1, stack_7 + + # pd_op.shape64: (2xi64) <- (-1x4xf32) + shape64_2 = paddle._C_ops.shape64(reshape_4) + + # pd_op.slice: (xi64) <- (2xi64, 1xi64, 1xi64) + slice_2 = paddle._C_ops.slice( + shape64_2, [0], full_int_array_2, full_int_array_3, [1], [0] + ) + del full_int_array_2, full_int_array_3, shape64_2 + + # builtin.combine: ([xi64, xi64]) <- (xi64, xi64) + combine_11 = [slice_2, full_4] + del full_4 + + # pd_op.stack: (2xi64) <- ([xi64, xi64]) + stack_8 = paddle._C_ops.stack(combine_11, 0) + del combine_11 + + # pd_op.full_with_tensor: (-1x1xf32) <- (1xf32, 2xi64) + full_with_tensor_2 = paddle._C_ops.full_with_tensor( + full_6, stack_8, paddle.float32 + ) + del full_6, stack_8 + + # pd_op.full: (1xi32) <- () + full_7 = paddle._C_ops.full( + [1], float("0"), paddle.int32, paddle.core.CPUPlace() + ) + + # builtin.combine: ([-1x4xf32, -1x4xf32, -1x4xf32]) <- (-1x4xf32, -1x4xf32, -1x4xf32) + combine_12 = [reshape_0, reshape_2, reshape_4] + del reshape_0, reshape_2, reshape_4 + + # pd_op.concat: (-1x4xf32) <- ([-1x4xf32, -1x4xf32, -1x4xf32], 1xi32) + concat_2 = paddle._C_ops.concat(combine_12, full_7) + del combine_12 + + # builtin.combine: ([-1x2xf32, -1x2xf32, -1x2xf32]) <- (-1x2xf32, -1x2xf32, -1x2xf32) + combine_13 = [reshape_1, reshape_3, reshape_5] + del reshape_1, reshape_3, reshape_5 + + # pd_op.concat: (-1x2xf32) <- ([-1x2xf32, -1x2xf32, -1x2xf32], 1xi32) + concat_3 = paddle._C_ops.concat(combine_13, full_7) + del combine_13 + + # builtin.combine: ([-1x1xf32, -1x1xf32, -1x1xf32]) <- (-1x1xf32, -1x1xf32, -1x1xf32) + combine_14 = [full_with_tensor_0, full_with_tensor_1, full_with_tensor_2] + del full_with_tensor_0, full_with_tensor_1, full_with_tensor_2 + + # pd_op.concat: (-1x1xf32) <- ([-1x1xf32, -1x1xf32, -1x1xf32], 1xi32) + concat_4 = paddle._C_ops.concat(combine_14, full_7) + del combine_14, full_7 + + # pd_op.full_int_array: (2xi64) <- () + full_int_array_4 = [1, 1] + + # pd_op.assign: (2xi64) <- (2xi64) + assign_0 = full_int_array_4 + + # pd_op.assign: (2xi64) <- (2xi64) + assign_1 = full_int_array_4 + + # pd_op.pool2d: (2x768x1x1xf32) <- (2x768x-1x-1xf32, 2xi64) + pool2d_0 = paddle._C_ops.pool2d( + data_6, + full_int_array_4, + [1, 1], + [0, 0], + False, + True, + "NCHW", + "avg", + False, + True, + "EXPLICIT", + ) + + # pd_op.conv2d: (2x768x1x1xf32) <- (2x768x1x1xf32, 768x768x1x1xf32) + conv2d_0 = paddle._C_ops.conv2d( + pool2d_0, parameter_53, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_53 + + # pd_op.full_int_array: (4xi64) <- () + full_int_array_5 = [1, -1, 1, 1] + + # pd_op.reshape: (1x768x1x1xf32) <- (768xf32, 4xi64) + reshape_6 = paddle._C_ops.reshape(parameter_52, full_int_array_5) + del parameter_52 + + # pd_op.add: (2x768x1x1xf32) <- (2x768x1x1xf32, 1x768x1x1xf32) + add_0 = paddle._C_ops.add(conv2d_0, reshape_6) + + # pd_op.sigmoid: (2x768x1x1xf32) <- (2x768x1x1xf32) + sigmoid_0 = paddle._C_ops.sigmoid(add_0) + del add_0 + + # pd_op.multiply: (2x768x-1x-1xf32) <- (2x768x-1x-1xf32, 2x768x1x1xf32) + multiply_0 = paddle._C_ops.multiply(data_6, sigmoid_0) + + # pd_op.conv2d: (2x768x-1x-1xf32) <- (2x768x-1x-1xf32, 768x768x1x1xf32) + conv2d_1 = paddle._C_ops.conv2d( + multiply_0, parameter_51, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_51 + + # pd_op.batch_norm_: (2x768x-1x-1xf32, 768xf32, 768xf32, 768xf32, 768xf32, -1xui8) <- (2x768x-1x-1xf32, 768xf32, 768xf32, 768xf32, 768xf32) + ( + batch_norm__0, + batch_norm__1, + batch_norm__2, + batch_norm__3, + batch_norm__4, + batch_norm__5, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_1, + parameter_50, + parameter_49, + parameter_48, + parameter_47, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_47, parameter_48, parameter_49, parameter_50 + + # pd_op.swish: (2x768x-1x-1xf32) <- (2x768x-1x-1xf32) + swish_0 = paddle._C_ops.swish(batch_norm__0) + + # pd_op.add: (2x768x-1x-1xf32) <- (2x768x-1x-1xf32, 2x768x-1x-1xf32) + add_1 = paddle._C_ops.add(swish_0, data_6) + + # pd_op.conv2d: (2x4x-1x-1xf32) <- (2x768x-1x-1xf32, 4x768x3x3xf32) + conv2d_2 = paddle._C_ops.conv2d( + add_1, parameter_46, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_46 + + # pd_op.reshape: (1x4x1x1xf32) <- (4xf32, 4xi64) + reshape_7 = paddle._C_ops.reshape(parameter_45, full_int_array_5) + del parameter_45 + + # pd_op.add: (2x4x-1x-1xf32) <- (2x4x-1x-1xf32, 1x4x1x1xf32) + add_2 = paddle._C_ops.add(conv2d_2, reshape_7) + + # pd_op.conv2d: (2x768x1x1xf32) <- (2x768x1x1xf32, 768x768x1x1xf32) + conv2d_3 = paddle._C_ops.conv2d( + pool2d_0, parameter_44, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_44 + + # pd_op.reshape: (1x768x1x1xf32) <- (768xf32, 4xi64) + reshape_8 = paddle._C_ops.reshape(parameter_43, full_int_array_5) + del parameter_43 + + # pd_op.add: (2x768x1x1xf32) <- (2x768x1x1xf32, 1x768x1x1xf32) + add_3 = paddle._C_ops.add(conv2d_3, reshape_8) + + # pd_op.sigmoid: (2x768x1x1xf32) <- (2x768x1x1xf32) + sigmoid_1 = paddle._C_ops.sigmoid(add_3) + del add_3 + + # pd_op.multiply: (2x768x-1x-1xf32) <- (2x768x-1x-1xf32, 2x768x1x1xf32) + multiply_1 = paddle._C_ops.multiply(data_6, sigmoid_1) + del data_6 + + # pd_op.conv2d: (2x768x-1x-1xf32) <- (2x768x-1x-1xf32, 768x768x1x1xf32) + conv2d_4 = paddle._C_ops.conv2d( + multiply_1, parameter_42, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_42 + + # pd_op.batch_norm_: (2x768x-1x-1xf32, 768xf32, 768xf32, 768xf32, 768xf32, -1xui8) <- (2x768x-1x-1xf32, 768xf32, 768xf32, 768xf32, 768xf32) + ( + batch_norm__6, + batch_norm__7, + batch_norm__8, + batch_norm__9, + batch_norm__10, + batch_norm__11, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_4, + parameter_41, + parameter_40, + parameter_39, + parameter_38, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_38, parameter_39, parameter_40, parameter_41 + + # pd_op.swish: (2x768x-1x-1xf32) <- (2x768x-1x-1xf32) + swish_1 = paddle._C_ops.swish(batch_norm__6) + + # pd_op.conv2d: (2x68x-1x-1xf32) <- (2x768x-1x-1xf32, 68x768x3x3xf32) + conv2d_5 = paddle._C_ops.conv2d( + swish_1, parameter_37, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_37 + + # pd_op.reshape: (1x68x1x1xf32) <- (68xf32, 4xi64) + reshape_9 = paddle._C_ops.reshape(parameter_36, full_int_array_5) + del parameter_36 + + # pd_op.add: (2x68x-1x-1xf32) <- (2x68x-1x-1xf32, 1x68x1x1xf32) + add_4 = paddle._C_ops.add(conv2d_5, reshape_9) + + # pd_op.sigmoid: (2x4x-1x-1xf32) <- (2x4x-1x-1xf32) + sigmoid_2 = paddle._C_ops.sigmoid(add_2) + del add_2 + + # pd_op.flatten: (2x4x-1xf32) <- (2x4x-1x-1xf32) + flatten_0 = paddle._C_ops.flatten(sigmoid_2, 2, 3) + + # pd_op.transpose: (2x-1x4xf32) <- (2x4x-1xf32) + transpose_0 = paddle._C_ops.transpose(flatten_0, [0, 2, 1]) + del flatten_0 + + # pd_op.flatten: (2x68x-1xf32) <- (2x68x-1x-1xf32) + flatten_1 = paddle._C_ops.flatten(add_4, 2, 3) + + # pd_op.transpose: (2x-1x68xf32) <- (2x68x-1xf32) + transpose_1 = paddle._C_ops.transpose(flatten_1, [0, 2, 1]) + del flatten_1 + + # pd_op.pool2d: (2x384x1x1xf32) <- (2x384x-1x-1xf32, 2xi64) + pool2d_1 = paddle._C_ops.pool2d( + data_7, + full_int_array_4, + [1, 1], + [0, 0], + False, + True, + "NCHW", + "avg", + False, + True, + "EXPLICIT", + ) + + # pd_op.conv2d: (2x384x1x1xf32) <- (2x384x1x1xf32, 384x384x1x1xf32) + conv2d_6 = paddle._C_ops.conv2d( + pool2d_1, parameter_35, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_35 + + # pd_op.reshape: (1x384x1x1xf32) <- (384xf32, 4xi64) + reshape_10 = paddle._C_ops.reshape(parameter_34, full_int_array_5) + del parameter_34 + + # pd_op.add: (2x384x1x1xf32) <- (2x384x1x1xf32, 1x384x1x1xf32) + add_5 = paddle._C_ops.add(conv2d_6, reshape_10) + + # pd_op.sigmoid: (2x384x1x1xf32) <- (2x384x1x1xf32) + sigmoid_3 = paddle._C_ops.sigmoid(add_5) + del add_5 + + # pd_op.multiply: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32, 2x384x1x1xf32) + multiply_2 = paddle._C_ops.multiply(data_7, sigmoid_3) + + # pd_op.conv2d: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32, 384x384x1x1xf32) + conv2d_7 = paddle._C_ops.conv2d( + multiply_2, parameter_33, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_33 + + # pd_op.batch_norm_: (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__12, + batch_norm__13, + batch_norm__14, + batch_norm__15, + batch_norm__16, + batch_norm__17, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_7, + parameter_32, + parameter_31, + parameter_30, + parameter_29, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_29, parameter_30, parameter_31, parameter_32 + + # pd_op.swish: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32) + swish_2 = paddle._C_ops.swish(batch_norm__12) + + # pd_op.add: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32, 2x384x-1x-1xf32) + add_6 = paddle._C_ops.add(swish_2, data_7) + + # pd_op.conv2d: (2x4x-1x-1xf32) <- (2x384x-1x-1xf32, 4x384x3x3xf32) + conv2d_8 = paddle._C_ops.conv2d( + add_6, parameter_28, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_28 + + # pd_op.reshape: (1x4x1x1xf32) <- (4xf32, 4xi64) + reshape_11 = paddle._C_ops.reshape(parameter_27, full_int_array_5) + del parameter_27 + + # pd_op.add: (2x4x-1x-1xf32) <- (2x4x-1x-1xf32, 1x4x1x1xf32) + add_7 = paddle._C_ops.add(conv2d_8, reshape_11) + + # pd_op.conv2d: (2x384x1x1xf32) <- (2x384x1x1xf32, 384x384x1x1xf32) + conv2d_9 = paddle._C_ops.conv2d( + pool2d_1, parameter_26, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_26 + + # pd_op.reshape: (1x384x1x1xf32) <- (384xf32, 4xi64) + reshape_12 = paddle._C_ops.reshape(parameter_25, full_int_array_5) + del parameter_25 + + # pd_op.add: (2x384x1x1xf32) <- (2x384x1x1xf32, 1x384x1x1xf32) + add_8 = paddle._C_ops.add(conv2d_9, reshape_12) + + # pd_op.sigmoid: (2x384x1x1xf32) <- (2x384x1x1xf32) + sigmoid_4 = paddle._C_ops.sigmoid(add_8) + del add_8 + + # pd_op.multiply: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32, 2x384x1x1xf32) + multiply_3 = paddle._C_ops.multiply(data_7, sigmoid_4) + del data_7 + + # pd_op.conv2d: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32, 384x384x1x1xf32) + conv2d_10 = paddle._C_ops.conv2d( + multiply_3, parameter_24, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_24 + + # pd_op.batch_norm_: (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__18, + batch_norm__19, + batch_norm__20, + batch_norm__21, + batch_norm__22, + batch_norm__23, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_10, + parameter_23, + parameter_22, + parameter_21, + parameter_20, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_20, parameter_21, parameter_22, parameter_23 + + # pd_op.swish: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32) + swish_3 = paddle._C_ops.swish(batch_norm__18) + + # pd_op.conv2d: (2x68x-1x-1xf32) <- (2x384x-1x-1xf32, 68x384x3x3xf32) + conv2d_11 = paddle._C_ops.conv2d( + swish_3, parameter_19, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_19 + + # pd_op.reshape: (1x68x1x1xf32) <- (68xf32, 4xi64) + reshape_13 = paddle._C_ops.reshape(parameter_18, full_int_array_5) + del parameter_18 + + # pd_op.add: (2x68x-1x-1xf32) <- (2x68x-1x-1xf32, 1x68x1x1xf32) + add_9 = paddle._C_ops.add(conv2d_11, reshape_13) + + # pd_op.sigmoid: (2x4x-1x-1xf32) <- (2x4x-1x-1xf32) + sigmoid_5 = paddle._C_ops.sigmoid(add_7) + del add_7 + + # pd_op.flatten: (2x4x-1xf32) <- (2x4x-1x-1xf32) + flatten_2 = paddle._C_ops.flatten(sigmoid_5, 2, 3) + + # pd_op.transpose: (2x-1x4xf32) <- (2x4x-1xf32) + transpose_2 = paddle._C_ops.transpose(flatten_2, [0, 2, 1]) + del flatten_2 + + # pd_op.flatten: (2x68x-1xf32) <- (2x68x-1x-1xf32) + flatten_3 = paddle._C_ops.flatten(add_9, 2, 3) + + # pd_op.transpose: (2x-1x68xf32) <- (2x68x-1xf32) + transpose_3 = paddle._C_ops.transpose(flatten_3, [0, 2, 1]) + del flatten_3 + + # pd_op.pool2d: (2x192x1x1xf32) <- (2x192x-1x-1xf32, 2xi64) + pool2d_2 = paddle._C_ops.pool2d( + data_8, + full_int_array_4, + [1, 1], + [0, 0], + False, + True, + "NCHW", + "avg", + False, + True, + "EXPLICIT", + ) + + # pd_op.conv2d: (2x192x1x1xf32) <- (2x192x1x1xf32, 192x192x1x1xf32) + conv2d_12 = paddle._C_ops.conv2d( + pool2d_2, parameter_17, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_17 + + # pd_op.reshape: (1x192x1x1xf32) <- (192xf32, 4xi64) + reshape_14 = paddle._C_ops.reshape(parameter_16, full_int_array_5) + del parameter_16 + + # pd_op.add: (2x192x1x1xf32) <- (2x192x1x1xf32, 1x192x1x1xf32) + add_10 = paddle._C_ops.add(conv2d_12, reshape_14) + + # pd_op.sigmoid: (2x192x1x1xf32) <- (2x192x1x1xf32) + sigmoid_6 = paddle._C_ops.sigmoid(add_10) + del add_10 + + # pd_op.multiply: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 2x192x1x1xf32) + multiply_4 = paddle._C_ops.multiply(data_8, sigmoid_6) + + # pd_op.conv2d: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 192x192x1x1xf32) + conv2d_13 = paddle._C_ops.conv2d( + multiply_4, parameter_15, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_15 + + # pd_op.batch_norm_: (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__24, + batch_norm__25, + batch_norm__26, + batch_norm__27, + batch_norm__28, + batch_norm__29, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_13, + parameter_14, + parameter_13, + parameter_12, + parameter_11, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_11, parameter_12, parameter_13, parameter_14 + + # pd_op.swish: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32) + swish_4 = paddle._C_ops.swish(batch_norm__24) + + # pd_op.add: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 2x192x-1x-1xf32) + add_11 = paddle._C_ops.add(swish_4, data_8) + + # pd_op.conv2d: (2x4x-1x-1xf32) <- (2x192x-1x-1xf32, 4x192x3x3xf32) + conv2d_14 = paddle._C_ops.conv2d( + add_11, parameter_10, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_10 + + # pd_op.reshape: (1x4x1x1xf32) <- (4xf32, 4xi64) + reshape_15 = paddle._C_ops.reshape(parameter_9, full_int_array_5) + del parameter_9 + + # pd_op.add: (2x4x-1x-1xf32) <- (2x4x-1x-1xf32, 1x4x1x1xf32) + add_12 = paddle._C_ops.add(conv2d_14, reshape_15) + + # pd_op.conv2d: (2x192x1x1xf32) <- (2x192x1x1xf32, 192x192x1x1xf32) + conv2d_15 = paddle._C_ops.conv2d( + pool2d_2, parameter_8, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_8 + + # pd_op.reshape: (1x192x1x1xf32) <- (192xf32, 4xi64) + reshape_16 = paddle._C_ops.reshape(parameter_7, full_int_array_5) + del parameter_7 + + # pd_op.add: (2x192x1x1xf32) <- (2x192x1x1xf32, 1x192x1x1xf32) + add_13 = paddle._C_ops.add(conv2d_15, reshape_16) + + # pd_op.sigmoid: (2x192x1x1xf32) <- (2x192x1x1xf32) + sigmoid_7 = paddle._C_ops.sigmoid(add_13) + del add_13 + + # pd_op.multiply: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 2x192x1x1xf32) + multiply_5 = paddle._C_ops.multiply(data_8, sigmoid_7) + del data_8 + + # pd_op.conv2d: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 192x192x1x1xf32) + conv2d_16 = paddle._C_ops.conv2d( + multiply_5, parameter_6, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_6 + + # pd_op.batch_norm_: (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__30, + batch_norm__31, + batch_norm__32, + batch_norm__33, + batch_norm__34, + batch_norm__35, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_16, + parameter_5, + parameter_4, + parameter_3, + parameter_2, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_2, parameter_3, parameter_4, parameter_5 + + # pd_op.swish: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32) + swish_5 = paddle._C_ops.swish(batch_norm__30) + + # pd_op.conv2d: (2x68x-1x-1xf32) <- (2x192x-1x-1xf32, 68x192x3x3xf32) + conv2d_17 = paddle._C_ops.conv2d( + swish_5, parameter_1, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_1 + + # pd_op.reshape: (1x68x1x1xf32) <- (68xf32, 4xi64) + reshape_17 = paddle._C_ops.reshape(parameter_0, full_int_array_5) + del full_int_array_5, parameter_0 + + # pd_op.add: (2x68x-1x-1xf32) <- (2x68x-1x-1xf32, 1x68x1x1xf32) + add_14 = paddle._C_ops.add(conv2d_17, reshape_17) + + # pd_op.sigmoid: (2x4x-1x-1xf32) <- (2x4x-1x-1xf32) + sigmoid_8 = paddle._C_ops.sigmoid(add_12) + del add_12 + + # pd_op.flatten: (2x4x-1xf32) <- (2x4x-1x-1xf32) + flatten_4 = paddle._C_ops.flatten(sigmoid_8, 2, 3) + + # pd_op.transpose: (2x-1x4xf32) <- (2x4x-1xf32) + transpose_4 = paddle._C_ops.transpose(flatten_4, [0, 2, 1]) + del flatten_4 + + # pd_op.flatten: (2x68x-1xf32) <- (2x68x-1x-1xf32) + flatten_5 = paddle._C_ops.flatten(add_14, 2, 3) + + # pd_op.transpose: (2x-1x68xf32) <- (2x68x-1xf32) + transpose_5 = paddle._C_ops.transpose(flatten_5, [0, 2, 1]) + del flatten_5 + + # pd_op.full: (1xi32) <- () + full_8 = paddle._C_ops.full( + [1], float("1"), paddle.int32, paddle.core.CPUPlace() + ) + + # pd_op.assign: (1xi32) <- (1xi32) + assign_2 = full_8 + + # builtin.combine: ([2x-1x4xf32, 2x-1x4xf32, 2x-1x4xf32]) <- (2x-1x4xf32, 2x-1x4xf32, 2x-1x4xf32) + combine_15 = [transpose_0, transpose_2, transpose_4] + + # pd_op.concat: (2x-1x4xf32) <- ([2x-1x4xf32, 2x-1x4xf32, 2x-1x4xf32], 1xi32) + concat_0 = paddle._C_ops.concat(combine_15, full_8) + del combine_15 + + # builtin.combine: ([2x-1x68xf32, 2x-1x68xf32, 2x-1x68xf32]) <- (2x-1x68xf32, 2x-1x68xf32, 2x-1x68xf32) + combine_16 = [transpose_1, transpose_3, transpose_5] + + # pd_op.concat: (2x-1x68xf32) <- ([2x-1x68xf32, 2x-1x68xf32, 2x-1x68xf32], 1xi32) + concat_1 = paddle._C_ops.concat(combine_16, full_8) + del ( + add_1, + add_11, + add_14, + add_4, + add_6, + add_9, + assign_0, + assign_1, + assign_2, + batch_norm__0, + batch_norm__1, + batch_norm__10, + batch_norm__11, + batch_norm__12, + batch_norm__13, + batch_norm__14, + batch_norm__15, + batch_norm__16, + batch_norm__17, + batch_norm__18, + batch_norm__19, + batch_norm__2, + batch_norm__20, + batch_norm__21, + batch_norm__22, + batch_norm__23, + batch_norm__24, + batch_norm__25, + batch_norm__26, + batch_norm__27, + batch_norm__28, + batch_norm__29, + batch_norm__3, + batch_norm__30, + batch_norm__31, + batch_norm__32, + batch_norm__33, + batch_norm__34, + batch_norm__35, + batch_norm__4, + batch_norm__5, + batch_norm__6, + batch_norm__7, + batch_norm__8, + batch_norm__9, + combine_16, + conv2d_0, + conv2d_1, + conv2d_10, + conv2d_11, + conv2d_12, + conv2d_13, + conv2d_14, + conv2d_15, + conv2d_16, + conv2d_17, + conv2d_2, + conv2d_3, + conv2d_4, + conv2d_5, + conv2d_6, + conv2d_7, + conv2d_8, + conv2d_9, + full_8, + full_int_array_4, + multiply_0, + multiply_1, + multiply_2, + multiply_3, + multiply_4, + multiply_5, + pool2d_0, + pool2d_1, + pool2d_2, + reshape_10, + reshape_11, + reshape_12, + reshape_13, + reshape_14, + reshape_15, + reshape_16, + reshape_17, + reshape_6, + reshape_7, + reshape_8, + reshape_9, + sigmoid_0, + sigmoid_1, + sigmoid_2, + sigmoid_3, + sigmoid_4, + sigmoid_5, + sigmoid_6, + sigmoid_7, + sigmoid_8, + slice_0, + slice_1, + slice_2, + swish_0, + swish_1, + swish_2, + swish_3, + swish_4, + swish_5, + transpose_0, + transpose_1, + transpose_2, + transpose_3, + transpose_4, + transpose_5, + ) + + return concat_0, concat_1, concat_2, concat_3, concat_4 diff --git a/paddle_samples/PaddleX/PP-YOLOE-L_vehicle/subgraph_0/weight_meta.py b/paddle_samples/PaddleX/PP-YOLOE-L_vehicle/subgraph_0/weight_meta.py new file mode 100644 index 000000000..c7e0aa45d --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE-L_vehicle/subgraph_0/weight_meta.py @@ -0,0 +1,586 @@ +class Program_weight_tensor_parameter_0: + name = "parameter_0" + shape = [68] + dtype = "float32" + min_val = float("-0.00925576") + max_val = float("0.0373948") + mean = float("8.72824e-08") + std = float("0.00862349") + data = None + + +class Program_weight_tensor_parameter_1: + name = "parameter_1" + shape = [68, 192, 3, 3] + dtype = "float32" + min_val = float("-0.143827") + max_val = float("0.168304") + mean = float("5.68543e-08") + std = float("0.00711119") + data = None + + +class Program_weight_tensor_parameter_2: + name = "parameter_2" + shape = [192] + dtype = "float32" + min_val = float("-0.0740156") + max_val = float("0.254426") + mean = float("0.0633438") + std = float("0.0574767") + data = None + + +class Program_weight_tensor_parameter_3: + name = "parameter_3" + shape = [192] + dtype = "float32" + min_val = float("0.838238") + max_val = float("1.78552") + mean = float("1.2918") + std = float("0.191195") + data = None + + +class Program_weight_tensor_parameter_4: + name = "parameter_4" + shape = [192] + dtype = "float32" + min_val = float("0.000822901") + max_val = float("0.063123") + mean = float("0.0112077") + std = float("0.00842708") + data = None + + +class Program_weight_tensor_parameter_5: + name = "parameter_5" + shape = [192] + dtype = "float32" + min_val = float("-0.115789") + max_val = float("0.122016") + mean = float("-0.00147854") + std = float("0.0384486") + data = None + + +class Program_weight_tensor_parameter_6: + name = "parameter_6" + shape = [192, 192, 1, 1] + dtype = "float32" + min_val = float("-0.0602355") + max_val = float("0.0853349") + mean = float("-0.000554639") + std = float("0.00702224") + data = None + + +class Program_weight_tensor_parameter_7: + name = "parameter_7" + shape = [192] + dtype = "float32" + min_val = float("-0.00462773") + max_val = float("0.0060156") + mean = float("4.24223e-05") + std = float("0.0019278") + data = None + + +class Program_weight_tensor_parameter_8: + name = "parameter_8" + shape = [192, 192, 1, 1] + dtype = "float32" + min_val = float("-0.0100771") + max_val = float("0.01135") + mean = float("-7.39524e-06") + std = float("0.00128718") + data = None + + +class Program_weight_tensor_parameter_9: + name = "parameter_9" + shape = [4] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_10: + name = "parameter_10" + shape = [4, 192, 3, 3] + dtype = "float32" + min_val = float("-0.344983") + max_val = float("0.0528926") + mean = float("-0.0184469") + std = float("0.0468232") + data = None + + +class Program_weight_tensor_parameter_11: + name = "parameter_11" + shape = [192] + dtype = "float32" + min_val = float("-0.445875") + max_val = float("1.50051") + mean = float("0.404315") + std = float("0.335424") + data = None + + +class Program_weight_tensor_parameter_12: + name = "parameter_12" + shape = [192] + dtype = "float32" + min_val = float("0.959458") + max_val = float("2.22908") + mean = float("1.3733") + std = float("0.176915") + data = None + + +class Program_weight_tensor_parameter_13: + name = "parameter_13" + shape = [192] + dtype = "float32" + min_val = float("0.00434037") + max_val = float("26.8554") + mean = float("0.524987") + std = float("2.35391") + data = None + + +class Program_weight_tensor_parameter_14: + name = "parameter_14" + shape = [192] + dtype = "float32" + min_val = float("-0.548344") + max_val = float("2.67378") + mean = float("0.0899683") + std = float("0.32715") + data = None + + +class Program_weight_tensor_parameter_15: + name = "parameter_15" + shape = [192, 192, 1, 1] + dtype = "float32" + min_val = float("-0.725619") + max_val = float("0.454136") + mean = float("0.00160697") + std = float("0.0255239") + data = None + + +class Program_weight_tensor_parameter_16: + name = "parameter_16" + shape = [192] + dtype = "float32" + min_val = float("-0.0117797") + max_val = float("0.0160663") + mean = float("-8.90954e-05") + std = float("0.00280426") + data = None + + +class Program_weight_tensor_parameter_17: + name = "parameter_17" + shape = [192, 192, 1, 1] + dtype = "float32" + min_val = float("-0.0350776") + max_val = float("0.0312972") + mean = float("2.94677e-05") + std = float("0.00183097") + data = None + + +class Program_weight_tensor_parameter_18: + name = "parameter_18" + shape = [68] + dtype = "float32" + min_val = float("-0.00469375") + max_val = float("0.0194489") + mean = float("6.58692e-08") + std = float("0.00452283") + data = None + + +class Program_weight_tensor_parameter_19: + name = "parameter_19" + shape = [68, 384, 3, 3] + dtype = "float32" + min_val = float("-0.082678") + max_val = float("0.109438") + mean = float("3.68454e-08") + std = float("0.00410717") + data = None + + +class Program_weight_tensor_parameter_20: + name = "parameter_20" + shape = [384] + dtype = "float32" + min_val = float("-0.0150875") + max_val = float("0.107487") + mean = float("0.0329244") + std = float("0.0178576") + data = None + + +class Program_weight_tensor_parameter_21: + name = "parameter_21" + shape = [384] + dtype = "float32" + min_val = float("1.00854") + max_val = float("1.29001") + mean = float("1.14543") + std = float("0.0510595") + data = None + + +class Program_weight_tensor_parameter_22: + name = "parameter_22" + shape = [384] + dtype = "float32" + min_val = float("0.000224402") + max_val = float("0.554167") + mean = float("0.0237249") + std = float("0.0506673") + data = None + + +class Program_weight_tensor_parameter_23: + name = "parameter_23" + shape = [384] + dtype = "float32" + min_val = float("-0.10498") + max_val = float("0.125234") + mean = float("-0.0096529") + std = float("0.0315707") + data = None + + +class Program_weight_tensor_parameter_24: + name = "parameter_24" + shape = [384, 384, 1, 1] + dtype = "float32" + min_val = float("-0.046203") + max_val = float("0.0533854") + mean = float("-0.00024935") + std = float("0.00299432") + data = None + + +class Program_weight_tensor_parameter_25: + name = "parameter_25" + shape = [384] + dtype = "float32" + min_val = float("-0.00255311") + max_val = float("0.00302266") + mean = float("9.24001e-05") + std = float("0.00102546") + data = None + + +class Program_weight_tensor_parameter_26: + name = "parameter_26" + shape = [384, 384, 1, 1] + dtype = "float32" + min_val = float("-0.00218781") + max_val = float("0.00368412") + mean = float("2.14576e-05") + std = float("0.000454436") + data = None + + +class Program_weight_tensor_parameter_27: + name = "parameter_27" + shape = [4] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_28: + name = "parameter_28" + shape = [4, 384, 3, 3] + dtype = "float32" + min_val = float("-0.408866") + max_val = float("0.0192085") + mean = float("-0.0556575") + std = float("0.0692171") + data = None + + +class Program_weight_tensor_parameter_29: + name = "parameter_29" + shape = [384] + dtype = "float32" + min_val = float("-0.232909") + max_val = float("0.549512") + mean = float("0.280237") + std = float("0.126665") + data = None + + +class Program_weight_tensor_parameter_30: + name = "parameter_30" + shape = [384] + dtype = "float32" + min_val = float("0.99194") + max_val = float("1.51118") + mean = float("1.23481") + std = float("0.0724215") + data = None + + +class Program_weight_tensor_parameter_31: + name = "parameter_31" + shape = [384] + dtype = "float32" + min_val = float("0.00162732") + max_val = float("157.489") + mean = float("2.37771") + std = float("10.0765") + data = None + + +class Program_weight_tensor_parameter_32: + name = "parameter_32" + shape = [384] + dtype = "float32" + min_val = float("-3.4158") + max_val = float("2.26802") + mean = float("0.0219363") + std = float("0.412554") + data = None + + +class Program_weight_tensor_parameter_33: + name = "parameter_33" + shape = [384, 384, 1, 1] + dtype = "float32" + min_val = float("-0.303292") + max_val = float("0.19556") + mean = float("0.000605287") + std = float("0.0144313") + data = None + + +class Program_weight_tensor_parameter_34: + name = "parameter_34" + shape = [384] + dtype = "float32" + min_val = float("-0.00209029") + max_val = float("0.00798783") + mean = float("1.7015e-06") + std = float("0.000935296") + data = None + + +class Program_weight_tensor_parameter_35: + name = "parameter_35" + shape = [384, 384, 1, 1] + dtype = "float32" + min_val = float("-0.00535335") + max_val = float("0.0106302") + mean = float("3.37412e-06") + std = float("0.000515341") + data = None + + +class Program_weight_tensor_parameter_36: + name = "parameter_36" + shape = [68] + dtype = "float32" + min_val = float("-0.00525521") + max_val = float("0.0145446") + mean = float("1.87138e-08") + std = float("0.00475") + data = None + + +class Program_weight_tensor_parameter_37: + name = "parameter_37" + shape = [68, 768, 3, 3] + dtype = "float32" + min_val = float("-0.0328719") + max_val = float("0.0658116") + mean = float("1.06011e-08") + std = float("0.00223125") + data = None + + +class Program_weight_tensor_parameter_38: + name = "parameter_38" + shape = [768] + dtype = "float32" + min_val = float("-0.0159444") + max_val = float("0.0716664") + mean = float("0.0156827") + std = float("0.0141851") + data = None + + +class Program_weight_tensor_parameter_39: + name = "parameter_39" + shape = [768] + dtype = "float32" + min_val = float("1.02746") + max_val = float("1.22316") + mean = float("1.09258") + std = float("0.0262997") + data = None + + +class Program_weight_tensor_parameter_40: + name = "parameter_40" + shape = [768] + dtype = "float32" + min_val = float("9.27059e-05") + max_val = float("0.0491799") + mean = float("0.00131356") + std = float("0.00220594") + data = None + + +class Program_weight_tensor_parameter_41: + name = "parameter_41" + shape = [768] + dtype = "float32" + min_val = float("-0.118335") + max_val = float("0.0502021") + mean = float("-0.00579341") + std = float("0.0133184") + data = None + + +class Program_weight_tensor_parameter_42: + name = "parameter_42" + shape = [768, 768, 1, 1] + dtype = "float32" + min_val = float("-0.0245855") + max_val = float("0.0259771") + mean = float("-9.1685e-05") + std = float("0.00109901") + data = None + + +class Program_weight_tensor_parameter_43: + name = "parameter_43" + shape = [768] + dtype = "float32" + min_val = float("-0.00228367") + max_val = float("0.0017984") + mean = float("9.02642e-05") + std = float("0.000464238") + data = None + + +class Program_weight_tensor_parameter_44: + name = "parameter_44" + shape = [768, 768, 1, 1] + dtype = "float32" + min_val = float("-0.00190114") + max_val = float("0.00166296") + mean = float("2.82487e-05") + std = float("0.000154769") + data = None + + +class Program_weight_tensor_parameter_45: + name = "parameter_45" + shape = [4] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_46: + name = "parameter_46" + shape = [4, 768, 3, 3] + dtype = "float32" + min_val = float("-0.398112") + max_val = float("0.0341335") + mean = float("-0.0183825") + std = float("0.0458581") + data = None + + +class Program_weight_tensor_parameter_47: + name = "parameter_47" + shape = [768] + dtype = "float32" + min_val = float("-0.149591") + max_val = float("0.25563") + mean = float("0.127271") + std = float("0.0546892") + data = None + + +class Program_weight_tensor_parameter_48: + name = "parameter_48" + shape = [768] + dtype = "float32" + min_val = float("1.01586") + max_val = float("1.35031") + mean = float("1.10996") + std = float("0.0353314") + data = None + + +class Program_weight_tensor_parameter_49: + name = "parameter_49" + shape = [768] + dtype = "float32" + min_val = float("7.61599e-05") + max_val = float("7.24991") + mean = float("0.129052") + std = float("0.546235") + data = None + + +class Program_weight_tensor_parameter_50: + name = "parameter_50" + shape = [768] + dtype = "float32" + min_val = float("-0.787154") + max_val = float("0.440341") + mean = float("-0.0324434") + std = float("0.112186") + data = None + + +class Program_weight_tensor_parameter_51: + name = "parameter_51" + shape = [768, 768, 1, 1] + dtype = "float32" + min_val = float("-0.0434972") + max_val = float("0.028497") + mean = float("-0.00050544") + std = float("0.0033397") + data = None + + +class Program_weight_tensor_parameter_52: + name = "parameter_52" + shape = [768] + dtype = "float32" + min_val = float("-0.0036321") + max_val = float("0.00249458") + mean = float("1.83682e-05") + std = float("0.000350058") + data = None + + +class Program_weight_tensor_parameter_53: + name = "parameter_53" + shape = [768, 768, 1, 1] + dtype = "float32" + min_val = float("-0.0129388") + max_val = float("0.040846") + mean = float("7.13241e-06") + std = float("0.000222922") + data = None diff --git a/paddle_samples/PaddleX/PP-YOLOE-L_vehicle/subgraph_11/graph_hash.txt b/paddle_samples/PaddleX/PP-YOLOE-L_vehicle/subgraph_11/graph_hash.txt new file mode 100644 index 000000000..e94f1ce5a --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE-L_vehicle/subgraph_11/graph_hash.txt @@ -0,0 +1 @@ +e0ff4c70e120c4ec0e602d727b74e15e6f4e61525ade2ac5f305aac2c206752c \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE-L_vehicle/subgraph_11/graph_net.json b/paddle_samples/PaddleX/PP-YOLOE-L_vehicle/subgraph_11/graph_net.json new file mode 100644 index 000000000..4a2e26ae4 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE-L_vehicle/subgraph_11/graph_net.json @@ -0,0 +1,6 @@ +{ + "framework": "paddle", + "model_name": "PP-YOLOE-L_vehicle", + "num_devices_required": 1, + "num_nodes_required": 1 +} \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE-L_vehicle/subgraph_11/input_meta.py b/paddle_samples/PaddleX/PP-YOLOE-L_vehicle/subgraph_11/input_meta.py new file mode 100644 index 000000000..c0dc17231 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE-L_vehicle/subgraph_11/input_meta.py @@ -0,0 +1,69 @@ +class Program_weight_tensor_data_0: + name = "data_0" + shape = [] + dtype = "int64" + data = [7581] + + +class Program_weight_tensor_data_1: + name = "data_1" + shape = [2, 7581] + dtype = "float32" + max_val = float("2.0") + mean = float("0.00995911") + std = float("0.10127") + data = None + + +class Program_weight_tensor_data_2: + name = "data_2" + shape = [2, 11, 7581] + dtype = "float32" + max_val = float("0.971142") + mean = float("0.00550478") + std = float("0.0532604") + data = None + + +class Program_weight_tensor_data_3: + name = "data_3" + shape = [2, 11, 7581] + dtype = "float32" + max_val = float("1.0") + mean = float("0.000905374") + std = float("0.0300758") + data = None + + +class Program_weight_tensor_data_4: + name = "data_4" + shape = [2, 1] + dtype = "int32" + data = [0, 1] + + +class Program_weight_tensor_data_5: + name = "data_5" + shape = [2, 11, 1] + dtype = "int32" + data = [0, 0, 0, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0] + + +class Program_weight_tensor_data_6: + name = "data_6" + shape = [2, 11, 4] + dtype = "float32" + max_val = float("608.0") + mean = float("218.815") + std = float("214.701") + data = None + + +class Program_weight_tensor_data_7: + name = "data_7" + shape = [2, 11, 7581] + dtype = "float32" + max_val = float("0.00885437") + mean = float("1.14459e-05") + std = float("0.000226499") + data = None diff --git a/paddle_samples/PaddleX/PP-YOLOE-L_vehicle/subgraph_11/model.py b/paddle_samples/PaddleX/PP-YOLOE-L_vehicle/subgraph_11/model.py new file mode 100644 index 000000000..d6c3a388b --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE-L_vehicle/subgraph_11/model.py @@ -0,0 +1,248 @@ +import paddle + + +class GraphModule(paddle.nn.Layer): + def __init__(self): + super().__init__() + + def forward(self, data_0, data_1, data_2, data_3, data_4, data_5, data_6, data_7): + # pd_op.full_int_array: (1xi64) <- () + full_int_array_0 = [1] + + # pd_op.unsqueeze: (2x1x-1xf32) <- (2x-1xf32, 1xi64) + unsqueeze_0 = paddle._C_ops.unsqueeze(data_1, full_int_array_0) + del data_1, full_int_array_0 + + # pd_op.full: (xf32) <- () + full_0 = paddle._C_ops.full( + [], float("1"), paddle.float32, paddle.framework._current_expected_place() + ) + + # pd_op.greater_than: (2x1x-1xb) <- (2x1x-1xf32, xf32) + greater_than_0 = paddle._C_ops.greater_than(unsqueeze_0, full_0) + del full_0, unsqueeze_0 + + # pd_op.full_int_array: (3xi64) <- () + full_int_array_1 = [1, 11, 1] + + # pd_op.tile: (2x11x-1xb) <- (2x1x-1xb, 3xi64) + tile_0 = paddle._C_ops.tile(greater_than_0, full_int_array_1) + del full_int_array_1, greater_than_0 + + # pd_op.full: (1xi64) <- () + full_1 = paddle._C_ops.full( + [1], float("-2"), paddle.int64, paddle.core.CPUPlace() + ) + + # pd_op.argmax: (2x-1xi64) <- (2x11x-1xf32, 1xi64) + argmax_0 = paddle._C_ops.argmax(data_2, full_1, False, False, paddle.int64) + + # pd_op.full: (1xi32) <- () + full_2 = paddle._C_ops.full( + [1], float("11"), paddle.int32, paddle.core.CPUPlace() + ) + + # pd_op.one_hot: (2x-1x11xf32) <- (2x-1xi64, 1xi32) + one_hot_0 = paddle._C_ops.one_hot( + argmax_0 % paddle.cast(full_2, argmax_0.dtype), full_2 + ) + del argmax_0, full_2 + + # pd_op.transpose: (2x11x-1xf32) <- (2x-1x11xf32) + transpose_0 = paddle._C_ops.transpose(one_hot_0, [0, 2, 1]) + del one_hot_0 + + # pd_op.where: (2x11x-1xf32) <- (2x11x-1xb, 2x11x-1xf32, 2x11x-1xf32) + where_0 = paddle._C_ops.where(tile_0, transpose_0, data_3) + del data_3, tile_0, transpose_0 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_2 = [-2] + + # pd_op.sum: (2x-1xf32) <- (2x11x-1xf32, 1xi64) + sum_0 = paddle._C_ops.sum(where_0, full_int_array_2, None, False) + + # pd_op.argmax: (2x-1xi64) <- (2x11x-1xf32, 1xi64) + argmax_1 = paddle._C_ops.argmax(where_0, full_1, False, False, paddle.int64) + del full_1 + + # pd_op.full: (1xf32) <- () + full_3 = paddle._C_ops.full( + [1], float("11"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.scale: (2x1xi32) <- (2x1xi32, 1xf32) + scale_0 = paddle._C_ops.scale(data_4, full_3, float("0"), True) + del data_4, full_3 + + # pd_op.cast: (2x1xi64) <- (2x1xi32) + cast_0 = paddle._C_ops.cast(scale_0, paddle.int64) + del scale_0 + + # pd_op.add: (2x-1xi64) <- (2x-1xi64, 2x1xi64) + add_0 = paddle._C_ops.add(argmax_1, cast_0) + del argmax_1, cast_0 + + # pd_op.flatten: (22xi32) <- (2x11x1xi32) + flatten_0 = paddle._C_ops.flatten(data_5, 0, 2) + del data_5 + + # pd_op.flatten: (-1xi64) <- (2x-1xi64) + flatten_1 = paddle._C_ops.flatten(add_0, 0, 1) + del add_0 + + # pd_op.full: (1xi32) <- () + full_4 = paddle._C_ops.full( + [1], float("0"), paddle.int32, paddle.core.CPUPlace() + ) + + # pd_op.gather: (-1xi32) <- (22xi32, -1xi64, 1xi32) + gather_0 = paddle._C_ops.gather(flatten_0, flatten_1, full_4) + del flatten_0 + + # pd_op.full: (xi64) <- () + full_5 = paddle._C_ops.full( + [], float("2"), paddle.int64, paddle.core.CPUPlace() + ) + + # builtin.combine: ([xi64, xi64]) <- (xi64, xi64) + combine_0 = [full_5, data_0] + + # pd_op.stack: (2xi64) <- ([xi64, xi64]) + stack_0 = paddle._C_ops.stack(combine_0, 0) + del combine_0 + + # pd_op.reshape: (2x-1xi32) <- (-1xi32, 2xi64) + reshape_1 = paddle._C_ops.reshape(gather_0, stack_0) + del gather_0, stack_0 + + # pd_op.full: (xf32) <- () + full_6 = paddle._C_ops.full( + [], float("0"), paddle.float32, paddle.framework._current_expected_place() + ) + + # pd_op.greater_than: (2x-1xb) <- (2x-1xf32, xf32) + greater_than_1 = paddle._C_ops.greater_than(sum_0, full_6) + del full_6, sum_0 + + # pd_op.full: (1xf32) <- () + full_7 = paddle._C_ops.full( + [1], float("4"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.full_like: (2x-1xi32) <- (2x-1xi32, 1xf32) + full_like_0 = paddle._C_ops.full_like( + reshape_1, full_7, paddle.int32, paddle.framework._current_expected_place() + ) + del full_7 + + # pd_op.where: (2x-1xi32) <- (2x-1xb, 2x-1xi32, 2x-1xi32) + where_1 = paddle._C_ops.where(greater_than_1, reshape_1, full_like_0) + del full_like_0, greater_than_1, reshape_1 + + # pd_op.full_int_array: (2xi64) <- () + full_int_array_3 = [-1, 4] + + # pd_op.reshape: (22x4xf32) <- (2x11x4xf32, 2xi64) + reshape_2 = paddle._C_ops.reshape(data_6, full_int_array_3) + del data_6, full_int_array_3 + + # pd_op.gather: (-1x4xf32) <- (22x4xf32, -1xi64, 1xi32) + gather_1 = paddle._C_ops.gather(reshape_2, flatten_1, full_4) + del flatten_1, full_4, reshape_2 + + # pd_op.full: (xi64) <- () + full_8 = paddle._C_ops.full( + [], float("4"), paddle.int64, paddle.core.CPUPlace() + ) + + # builtin.combine: ([xi64, xi64, xi64]) <- (xi64, xi64, xi64) + combine_1 = [full_5, data_0, full_8] + del data_0, full_5, full_8 + + # pd_op.stack: (3xi64) <- ([xi64, xi64, xi64]) + stack_1 = paddle._C_ops.stack(combine_1, 0) + del combine_1 + + # pd_op.reshape: (2x-1x4xf32) <- (-1x4xf32, 3xi64) + reshape_0 = paddle._C_ops.reshape(gather_1, stack_1) + del gather_1, stack_1 + + # pd_op.full: (1xi32) <- () + full_9 = paddle._C_ops.full( + [1], float("5"), paddle.int32, paddle.core.CPUPlace() + ) + + # pd_op.one_hot: (2x-1x5xf32) <- (2x-1xi32, 1xi32) + one_hot_1 = paddle._C_ops.one_hot( + where_1 % paddle.cast(full_9, where_1.dtype), full_9 + ) + del full_9 + + # pd_op.full: (4xi64) <- () + full_10 = paddle._C_ops.full( + [4], float("0"), paddle.int64, paddle.framework._current_expected_place() + ) + + # pd_op.assign_value_: (4xi64) <- (4xi64) + assign_value__0 = paddle._C_ops.assign_value_( + full_10, + [4], + paddle.int64, + [float("0"), float("1"), float("2"), float("3")], + paddle.framework._current_expected_place(), + ) + del full_10 + + # pd_op.index_select: (2x-1x4xf32) <- (2x-1x5xf32, 4xi64) + index_select_0 = paddle._C_ops.index_select(one_hot_1, assign_value__0, -1) + del assign_value__0, one_hot_1 + + # pd_op.multiply: (2x11x-1xf32) <- (2x11x-1xf32, 2x11x-1xf32) + multiply_1 = paddle._C_ops.multiply(data_7, where_0) + del data_7 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_4 = [-1] + + # pd_op.max: (2x11x1xf32) <- (2x11x-1xf32, 1xi64) + max_0 = paddle._C_ops.max(multiply_1, full_int_array_4, True) + + # pd_op.multiply: (2x11x-1xf32) <- (2x11x-1xf32, 2x11x-1xf32) + multiply_2 = paddle._C_ops.multiply(data_2, where_0) + del data_2, where_0 + + # pd_op.max: (2x11x1xf32) <- (2x11x-1xf32, 1xi64) + max_1 = paddle._C_ops.max(multiply_2, full_int_array_4, True) + del multiply_2 + + # pd_op.full: (1xf32) <- () + full_11 = paddle._C_ops.full( + [1], float("1"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.scale: (2x11x1xf32) <- (2x11x1xf32, 1xf32) + scale_1 = paddle._C_ops.scale(max_0, full_11, float("1e-09"), True) + del full_11, max_0 + + # pd_op.divide: (2x11x-1xf32) <- (2x11x-1xf32, 2x11x1xf32) + divide_0 = paddle._C_ops.divide(multiply_1, scale_1) + del multiply_1, scale_1 + + # pd_op.multiply: (2x11x-1xf32) <- (2x11x-1xf32, 2x11x1xf32) + multiply_3 = paddle._C_ops.multiply(divide_0, max_1) + del divide_0, max_1 + + # pd_op.max: (2x-1xf32) <- (2x11x-1xf32, 1xi64) + max_2 = paddle._C_ops.max(multiply_3, full_int_array_2, False) + del full_int_array_2, multiply_3 + + # pd_op.unsqueeze: (2x-1x1xf32) <- (2x-1xf32, 1xi64) + unsqueeze_1 = paddle._C_ops.unsqueeze(max_2, full_int_array_4) + del full_int_array_4, max_2 + + # pd_op.multiply: (2x-1x4xf32) <- (2x-1x4xf32, 2x-1x1xf32) + multiply_0 = paddle._C_ops.multiply(index_select_0, unsqueeze_1) + del index_select_0, unsqueeze_1, where_1 + + return reshape_0, multiply_0 diff --git a/paddle_samples/PaddleX/PP-YOLOE-L_vehicle/subgraph_11/weight_meta.py b/paddle_samples/PaddleX/PP-YOLOE-L_vehicle/subgraph_11/weight_meta.py new file mode 100644 index 000000000..8b1378917 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE-L_vehicle/subgraph_11/weight_meta.py @@ -0,0 +1 @@ + diff --git a/paddle_samples/PaddleX/PP-YOLOE-L_vehicle/subgraph_13/graph_hash.txt b/paddle_samples/PaddleX/PP-YOLOE-L_vehicle/subgraph_13/graph_hash.txt new file mode 100644 index 000000000..a93514ba4 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE-L_vehicle/subgraph_13/graph_hash.txt @@ -0,0 +1 @@ +be2fb57bd448a9ffeb7401288b396cc0d51942b463c2f34662d7485236768468 \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE-L_vehicle/subgraph_13/graph_net.json b/paddle_samples/PaddleX/PP-YOLOE-L_vehicle/subgraph_13/graph_net.json new file mode 100644 index 000000000..4a2e26ae4 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE-L_vehicle/subgraph_13/graph_net.json @@ -0,0 +1,6 @@ +{ + "framework": "paddle", + "model_name": "PP-YOLOE-L_vehicle", + "num_devices_required": 1, + "num_nodes_required": 1 +} \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE-L_vehicle/subgraph_13/input_meta.py b/paddle_samples/PaddleX/PP-YOLOE-L_vehicle/subgraph_13/input_meta.py new file mode 100644 index 000000000..9fcc501d1 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE-L_vehicle/subgraph_13/input_meta.py @@ -0,0 +1,31 @@ +class Program_weight_tensor_data_0: + name = "data_0" + shape = [2, 768, 16, 16] + dtype = "float32" + min_val = float("-0.278465") + max_val = float("7.58923") + mean = float("0.294479") + std = float("0.650884") + data = None + + +class Program_weight_tensor_data_1: + name = "data_1" + shape = [2, 384, 32, 32] + dtype = "float32" + min_val = float("-0.278465") + max_val = float("10.9425") + mean = float("0.4045") + std = float("0.743228") + data = None + + +class Program_weight_tensor_data_2: + name = "data_2" + shape = [2, 192, 64, 64] + dtype = "float32" + min_val = float("-0.278465") + max_val = float("11.8473") + mean = float("0.501229") + std = float("0.814411") + data = None diff --git a/paddle_samples/PaddleX/PP-YOLOE-L_vehicle/subgraph_13/model.py b/paddle_samples/PaddleX/PP-YOLOE-L_vehicle/subgraph_13/model.py new file mode 100644 index 000000000..0a0635ece --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE-L_vehicle/subgraph_13/model.py @@ -0,0 +1,1050 @@ +import paddle + + +class GraphModule(paddle.nn.Layer): + def __init__(self): + super().__init__() + + def forward( + self, + parameter_0, + parameter_1, + parameter_2, + parameter_3, + parameter_4, + parameter_5, + parameter_6, + parameter_7, + parameter_8, + parameter_9, + parameter_10, + parameter_11, + parameter_12, + parameter_13, + parameter_14, + parameter_15, + parameter_16, + parameter_17, + parameter_18, + parameter_19, + parameter_20, + parameter_21, + parameter_22, + parameter_23, + parameter_24, + parameter_25, + parameter_26, + parameter_27, + parameter_28, + parameter_29, + parameter_30, + parameter_31, + parameter_32, + parameter_33, + parameter_34, + parameter_35, + parameter_36, + parameter_37, + parameter_38, + parameter_39, + parameter_40, + parameter_41, + parameter_42, + parameter_43, + parameter_44, + parameter_45, + parameter_46, + parameter_47, + parameter_48, + parameter_49, + parameter_50, + parameter_51, + parameter_52, + parameter_53, + data_0, + data_1, + data_2, + ): + # pd_op.full: (1xf64) <- () + full_0 = paddle._C_ops.full( + [1], float("0"), paddle.float64, paddle.core.CPUPlace() + ) + + # pd_op.full: (1xf64) <- () + full_1 = paddle._C_ops.full( + [1], float("16"), paddle.float64, paddle.core.CPUPlace() + ) + + # pd_op.full: (1xf64) <- () + full_2 = paddle._C_ops.full( + [1], float("1"), paddle.float64, paddle.core.CPUPlace() + ) + + # pd_op.arange: (16xi64) <- (1xf64, 1xf64, 1xf64) + arange_0 = paddle.arange(full_0, full_1, full_2, dtype="int64") + del full_1 + + # pd_op.cast: (16xf32) <- (16xi64) + cast_0 = paddle._C_ops.cast(arange_0, paddle.float32) + del arange_0 + + # pd_op.full: (1xf32) <- () + full_3 = paddle._C_ops.full( + [1], float("1"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.scale: (16xf32) <- (16xf32, 1xf32) + scale_0 = paddle._C_ops.scale(cast_0, full_3, float("0.5"), True) + del cast_0 + + # pd_op.full: (1xf32) <- () + full_4 = paddle._C_ops.full( + [1], float("32"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.scale: (16xf32) <- (16xf32, 1xf32) + scale_1 = paddle._C_ops.scale(scale_0, full_4, float("0"), True) + del full_4, scale_0 + + # builtin.combine: ([16xf32, 16xf32]) <- (16xf32, 16xf32) + combine_0 = [scale_1, scale_1] + del scale_1 + + # pd_op.meshgrid: ([16x16xf32, 16x16xf32]) <- ([16xf32, 16xf32]) + meshgrid_0 = paddle._C_ops.meshgrid(combine_0) + del combine_0 + + # builtin.split: (16x16xf32, 16x16xf32) <- ([16x16xf32, 16x16xf32]) + ( + split_0, + split_1, + ) = meshgrid_0 + del meshgrid_0 + + # pd_op.scale: (16x16xf32) <- (16x16xf32, 1xf32) + scale_2 = paddle._C_ops.scale(split_1, full_3, float("-80"), True) + + # pd_op.scale: (16x16xf32) <- (16x16xf32, 1xf32) + scale_3 = paddle._C_ops.scale(split_0, full_3, float("-80"), True) + + # pd_op.scale: (16x16xf32) <- (16x16xf32, 1xf32) + scale_4 = paddle._C_ops.scale(split_1, full_3, float("80"), True) + + # pd_op.scale: (16x16xf32) <- (16x16xf32, 1xf32) + scale_5 = paddle._C_ops.scale(split_0, full_3, float("80"), True) + + # builtin.combine: ([16x16xf32, 16x16xf32, 16x16xf32, 16x16xf32]) <- (16x16xf32, 16x16xf32, 16x16xf32, 16x16xf32) + combine_1 = [scale_2, scale_3, scale_4, scale_5] + del scale_2, scale_3, scale_4, scale_5 + + # pd_op.stack: (16x16x4xf32) <- ([16x16xf32, 16x16xf32, 16x16xf32, 16x16xf32]) + stack_0 = paddle._C_ops.stack(combine_1, -1) + del combine_1 + + # builtin.combine: ([16x16xf32, 16x16xf32]) <- (16x16xf32, 16x16xf32) + combine_2 = [split_1, split_0] + del split_0, split_1 + + # pd_op.stack: (16x16x2xf32) <- ([16x16xf32, 16x16xf32]) + stack_1 = paddle._C_ops.stack(combine_2, -1) + del combine_2 + + # pd_op.full_int_array: (2xi64) <- () + full_int_array_0 = [-1, 4] + + # pd_op.reshape: (256x4xf32) <- (16x16x4xf32, 2xi64) + reshape_0 = paddle._C_ops.reshape(stack_0, full_int_array_0) + del stack_0 + + # pd_op.full_int_array: (2xi64) <- () + full_int_array_1 = [-1, 2] + + # pd_op.reshape: (256x2xf32) <- (16x16x2xf32, 2xi64) + reshape_1 = paddle._C_ops.reshape(stack_1, full_int_array_1) + del stack_1 + + # pd_op.full: (256x1xf32) <- () + full_5 = paddle._C_ops.full( + [256, 1], + float("32"), + paddle.float32, + paddle.framework._current_expected_place(), + ) + + # pd_op.full: (1xf64) <- () + full_6 = paddle._C_ops.full( + [1], float("32"), paddle.float64, paddle.core.CPUPlace() + ) + + # pd_op.arange: (32xi64) <- (1xf64, 1xf64, 1xf64) + arange_1 = paddle.arange(full_0, full_6, full_2, dtype="int64") + del full_6 + + # pd_op.cast: (32xf32) <- (32xi64) + cast_1 = paddle._C_ops.cast(arange_1, paddle.float32) + del arange_1 + + # pd_op.scale: (32xf32) <- (32xf32, 1xf32) + scale_6 = paddle._C_ops.scale(cast_1, full_3, float("0.5"), True) + del cast_1 + + # pd_op.full: (1xf32) <- () + full_7 = paddle._C_ops.full( + [1], float("16"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.scale: (32xf32) <- (32xf32, 1xf32) + scale_7 = paddle._C_ops.scale(scale_6, full_7, float("0"), True) + del full_7, scale_6 + + # builtin.combine: ([32xf32, 32xf32]) <- (32xf32, 32xf32) + combine_3 = [scale_7, scale_7] + del scale_7 + + # pd_op.meshgrid: ([32x32xf32, 32x32xf32]) <- ([32xf32, 32xf32]) + meshgrid_1 = paddle._C_ops.meshgrid(combine_3) + del combine_3 + + # builtin.split: (32x32xf32, 32x32xf32) <- ([32x32xf32, 32x32xf32]) + ( + split_2, + split_3, + ) = meshgrid_1 + del meshgrid_1 + + # pd_op.scale: (32x32xf32) <- (32x32xf32, 1xf32) + scale_8 = paddle._C_ops.scale(split_3, full_3, float("-40"), True) + + # pd_op.scale: (32x32xf32) <- (32x32xf32, 1xf32) + scale_9 = paddle._C_ops.scale(split_2, full_3, float("-40"), True) + + # pd_op.scale: (32x32xf32) <- (32x32xf32, 1xf32) + scale_10 = paddle._C_ops.scale(split_3, full_3, float("40"), True) + + # pd_op.scale: (32x32xf32) <- (32x32xf32, 1xf32) + scale_11 = paddle._C_ops.scale(split_2, full_3, float("40"), True) + + # builtin.combine: ([32x32xf32, 32x32xf32, 32x32xf32, 32x32xf32]) <- (32x32xf32, 32x32xf32, 32x32xf32, 32x32xf32) + combine_4 = [scale_8, scale_9, scale_10, scale_11] + del scale_10, scale_11, scale_8, scale_9 + + # pd_op.stack: (32x32x4xf32) <- ([32x32xf32, 32x32xf32, 32x32xf32, 32x32xf32]) + stack_2 = paddle._C_ops.stack(combine_4, -1) + del combine_4 + + # builtin.combine: ([32x32xf32, 32x32xf32]) <- (32x32xf32, 32x32xf32) + combine_5 = [split_3, split_2] + del split_2, split_3 + + # pd_op.stack: (32x32x2xf32) <- ([32x32xf32, 32x32xf32]) + stack_3 = paddle._C_ops.stack(combine_5, -1) + del combine_5 + + # pd_op.reshape: (1024x4xf32) <- (32x32x4xf32, 2xi64) + reshape_2 = paddle._C_ops.reshape(stack_2, full_int_array_0) + del stack_2 + + # pd_op.reshape: (1024x2xf32) <- (32x32x2xf32, 2xi64) + reshape_3 = paddle._C_ops.reshape(stack_3, full_int_array_1) + del stack_3 + + # pd_op.full: (1024x1xf32) <- () + full_8 = paddle._C_ops.full( + [1024, 1], + float("16"), + paddle.float32, + paddle.framework._current_expected_place(), + ) + + # pd_op.full: (1xf64) <- () + full_9 = paddle._C_ops.full( + [1], float("64"), paddle.float64, paddle.core.CPUPlace() + ) + + # pd_op.arange: (64xi64) <- (1xf64, 1xf64, 1xf64) + arange_2 = paddle.arange(full_0, full_9, full_2, dtype="int64") + del full_0, full_2, full_9 + + # pd_op.cast: (64xf32) <- (64xi64) + cast_2 = paddle._C_ops.cast(arange_2, paddle.float32) + del arange_2 + + # pd_op.scale: (64xf32) <- (64xf32, 1xf32) + scale_12 = paddle._C_ops.scale(cast_2, full_3, float("0.5"), True) + del cast_2 + + # pd_op.full: (1xf32) <- () + full_10 = paddle._C_ops.full( + [1], float("8"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.scale: (64xf32) <- (64xf32, 1xf32) + scale_13 = paddle._C_ops.scale(scale_12, full_10, float("0"), True) + del full_10, scale_12 + + # builtin.combine: ([64xf32, 64xf32]) <- (64xf32, 64xf32) + combine_6 = [scale_13, scale_13] + del scale_13 + + # pd_op.meshgrid: ([64x64xf32, 64x64xf32]) <- ([64xf32, 64xf32]) + meshgrid_2 = paddle._C_ops.meshgrid(combine_6) + del combine_6 + + # builtin.split: (64x64xf32, 64x64xf32) <- ([64x64xf32, 64x64xf32]) + ( + split_4, + split_5, + ) = meshgrid_2 + del meshgrid_2 + + # pd_op.scale: (64x64xf32) <- (64x64xf32, 1xf32) + scale_14 = paddle._C_ops.scale(split_5, full_3, float("-20"), True) + + # pd_op.scale: (64x64xf32) <- (64x64xf32, 1xf32) + scale_15 = paddle._C_ops.scale(split_4, full_3, float("-20"), True) + + # pd_op.scale: (64x64xf32) <- (64x64xf32, 1xf32) + scale_16 = paddle._C_ops.scale(split_5, full_3, float("20"), True) + + # pd_op.scale: (64x64xf32) <- (64x64xf32, 1xf32) + scale_17 = paddle._C_ops.scale(split_4, full_3, float("20"), True) + del full_3 + + # builtin.combine: ([64x64xf32, 64x64xf32, 64x64xf32, 64x64xf32]) <- (64x64xf32, 64x64xf32, 64x64xf32, 64x64xf32) + combine_7 = [scale_14, scale_15, scale_16, scale_17] + del scale_14, scale_15, scale_16, scale_17 + + # pd_op.stack: (64x64x4xf32) <- ([64x64xf32, 64x64xf32, 64x64xf32, 64x64xf32]) + stack_4 = paddle._C_ops.stack(combine_7, -1) + del combine_7 + + # builtin.combine: ([64x64xf32, 64x64xf32]) <- (64x64xf32, 64x64xf32) + combine_8 = [split_5, split_4] + del split_4, split_5 + + # pd_op.stack: (64x64x2xf32) <- ([64x64xf32, 64x64xf32]) + stack_5 = paddle._C_ops.stack(combine_8, -1) + del combine_8 + + # pd_op.reshape: (4096x4xf32) <- (64x64x4xf32, 2xi64) + reshape_4 = paddle._C_ops.reshape(stack_4, full_int_array_0) + del full_int_array_0, stack_4 + + # pd_op.reshape: (4096x2xf32) <- (64x64x2xf32, 2xi64) + reshape_5 = paddle._C_ops.reshape(stack_5, full_int_array_1) + del full_int_array_1, stack_5 + + # pd_op.full: (4096x1xf32) <- () + full_11 = paddle._C_ops.full( + [4096, 1], + float("8"), + paddle.float32, + paddle.framework._current_expected_place(), + ) + + # pd_op.full: (1xi32) <- () + full_12 = paddle._C_ops.full( + [1], float("0"), paddle.int32, paddle.core.CPUPlace() + ) + + # builtin.combine: ([256x4xf32, 1024x4xf32, 4096x4xf32]) <- (256x4xf32, 1024x4xf32, 4096x4xf32) + combine_9 = [reshape_0, reshape_2, reshape_4] + + # pd_op.concat: (5376x4xf32) <- ([256x4xf32, 1024x4xf32, 4096x4xf32], 1xi32) + concat_2 = paddle._C_ops.concat(combine_9, full_12) + del combine_9 + + # builtin.combine: ([256x2xf32, 1024x2xf32, 4096x2xf32]) <- (256x2xf32, 1024x2xf32, 4096x2xf32) + combine_10 = [reshape_1, reshape_3, reshape_5] + del reshape_1, reshape_3, reshape_5 + + # pd_op.concat: (5376x2xf32) <- ([256x2xf32, 1024x2xf32, 4096x2xf32], 1xi32) + concat_3 = paddle._C_ops.concat(combine_10, full_12) + del combine_10 + + # builtin.combine: ([256x1xf32, 1024x1xf32, 4096x1xf32]) <- (256x1xf32, 1024x1xf32, 4096x1xf32) + combine_11 = [full_5, full_8, full_11] + del full_11, full_5, full_8 + + # pd_op.concat: (5376x1xf32) <- ([256x1xf32, 1024x1xf32, 4096x1xf32], 1xi32) + concat_4 = paddle._C_ops.concat(combine_11, full_12) + del combine_11, full_12 + + # pd_op.full_int_array: (2xi64) <- () + full_int_array_2 = [1, 1] + + # pd_op.assign: (2xi64) <- (2xi64) + assign_0 = full_int_array_2 + + # pd_op.assign: (2xi64) <- (2xi64) + assign_1 = full_int_array_2 + + # pd_op.pool2d: (2x768x1x1xf32) <- (2x768x16x16xf32, 2xi64) + pool2d_0 = paddle._C_ops.pool2d( + data_0, + full_int_array_2, + [1, 1], + [0, 0], + False, + True, + "NCHW", + "avg", + False, + True, + "EXPLICIT", + ) + + # pd_op.conv2d: (2x768x1x1xf32) <- (2x768x1x1xf32, 768x768x1x1xf32) + conv2d_0 = paddle._C_ops.conv2d( + pool2d_0, parameter_53, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_53 + + # pd_op.full_int_array: (4xi64) <- () + full_int_array_3 = [1, -1, 1, 1] + + # pd_op.reshape: (1x768x1x1xf32) <- (768xf32, 4xi64) + reshape_6 = paddle._C_ops.reshape(parameter_52, full_int_array_3) + del parameter_52 + + # pd_op.add: (2x768x1x1xf32) <- (2x768x1x1xf32, 1x768x1x1xf32) + add_0 = paddle._C_ops.add(conv2d_0, reshape_6) + + # pd_op.sigmoid: (2x768x1x1xf32) <- (2x768x1x1xf32) + sigmoid_0 = paddle._C_ops.sigmoid(add_0) + del add_0 + + # pd_op.multiply: (2x768x16x16xf32) <- (2x768x16x16xf32, 2x768x1x1xf32) + multiply_0 = paddle._C_ops.multiply(data_0, sigmoid_0) + + # pd_op.conv2d: (2x768x16x16xf32) <- (2x768x16x16xf32, 768x768x1x1xf32) + conv2d_1 = paddle._C_ops.conv2d( + multiply_0, parameter_51, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_51 + + # pd_op.batch_norm_: (2x768x16x16xf32, 768xf32, 768xf32, 768xf32, 768xf32, -1xui8) <- (2x768x16x16xf32, 768xf32, 768xf32, 768xf32, 768xf32) + ( + batch_norm__0, + batch_norm__1, + batch_norm__2, + batch_norm__3, + batch_norm__4, + batch_norm__5, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_1, + parameter_50, + parameter_49, + parameter_48, + parameter_47, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_47, parameter_48, parameter_49, parameter_50 + + # pd_op.swish: (2x768x16x16xf32) <- (2x768x16x16xf32) + swish_0 = paddle._C_ops.swish(batch_norm__0) + + # pd_op.add: (2x768x16x16xf32) <- (2x768x16x16xf32, 2x768x16x16xf32) + add_1 = paddle._C_ops.add(swish_0, data_0) + + # pd_op.conv2d: (2x4x16x16xf32) <- (2x768x16x16xf32, 4x768x3x3xf32) + conv2d_2 = paddle._C_ops.conv2d( + add_1, parameter_46, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_46 + + # pd_op.reshape: (1x4x1x1xf32) <- (4xf32, 4xi64) + reshape_7 = paddle._C_ops.reshape(parameter_45, full_int_array_3) + del parameter_45 + + # pd_op.add: (2x4x16x16xf32) <- (2x4x16x16xf32, 1x4x1x1xf32) + add_2 = paddle._C_ops.add(conv2d_2, reshape_7) + + # pd_op.conv2d: (2x768x1x1xf32) <- (2x768x1x1xf32, 768x768x1x1xf32) + conv2d_3 = paddle._C_ops.conv2d( + pool2d_0, parameter_44, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_44 + + # pd_op.reshape: (1x768x1x1xf32) <- (768xf32, 4xi64) + reshape_8 = paddle._C_ops.reshape(parameter_43, full_int_array_3) + del parameter_43 + + # pd_op.add: (2x768x1x1xf32) <- (2x768x1x1xf32, 1x768x1x1xf32) + add_3 = paddle._C_ops.add(conv2d_3, reshape_8) + + # pd_op.sigmoid: (2x768x1x1xf32) <- (2x768x1x1xf32) + sigmoid_1 = paddle._C_ops.sigmoid(add_3) + del add_3 + + # pd_op.multiply: (2x768x16x16xf32) <- (2x768x16x16xf32, 2x768x1x1xf32) + multiply_1 = paddle._C_ops.multiply(data_0, sigmoid_1) + del data_0 + + # pd_op.conv2d: (2x768x16x16xf32) <- (2x768x16x16xf32, 768x768x1x1xf32) + conv2d_4 = paddle._C_ops.conv2d( + multiply_1, parameter_42, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_42 + + # pd_op.batch_norm_: (2x768x16x16xf32, 768xf32, 768xf32, 768xf32, 768xf32, -1xui8) <- (2x768x16x16xf32, 768xf32, 768xf32, 768xf32, 768xf32) + ( + batch_norm__6, + batch_norm__7, + batch_norm__8, + batch_norm__9, + batch_norm__10, + batch_norm__11, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_4, + parameter_41, + parameter_40, + parameter_39, + parameter_38, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_38, parameter_39, parameter_40, parameter_41 + + # pd_op.swish: (2x768x16x16xf32) <- (2x768x16x16xf32) + swish_1 = paddle._C_ops.swish(batch_norm__6) + + # pd_op.conv2d: (2x68x16x16xf32) <- (2x768x16x16xf32, 68x768x3x3xf32) + conv2d_5 = paddle._C_ops.conv2d( + swish_1, parameter_37, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_37 + + # pd_op.reshape: (1x68x1x1xf32) <- (68xf32, 4xi64) + reshape_9 = paddle._C_ops.reshape(parameter_36, full_int_array_3) + del parameter_36 + + # pd_op.add: (2x68x16x16xf32) <- (2x68x16x16xf32, 1x68x1x1xf32) + add_4 = paddle._C_ops.add(conv2d_5, reshape_9) + + # pd_op.sigmoid: (2x4x16x16xf32) <- (2x4x16x16xf32) + sigmoid_2 = paddle._C_ops.sigmoid(add_2) + del add_2 + + # pd_op.flatten: (2x4x256xf32) <- (2x4x16x16xf32) + flatten_0 = paddle._C_ops.flatten(sigmoid_2, 2, 3) + + # pd_op.transpose: (2x256x4xf32) <- (2x4x256xf32) + transpose_0 = paddle._C_ops.transpose(flatten_0, [0, 2, 1]) + del flatten_0 + + # pd_op.flatten: (2x68x256xf32) <- (2x68x16x16xf32) + flatten_1 = paddle._C_ops.flatten(add_4, 2, 3) + + # pd_op.transpose: (2x256x68xf32) <- (2x68x256xf32) + transpose_1 = paddle._C_ops.transpose(flatten_1, [0, 2, 1]) + del flatten_1 + + # pd_op.pool2d: (2x384x1x1xf32) <- (2x384x32x32xf32, 2xi64) + pool2d_1 = paddle._C_ops.pool2d( + data_1, + full_int_array_2, + [1, 1], + [0, 0], + False, + True, + "NCHW", + "avg", + False, + True, + "EXPLICIT", + ) + + # pd_op.conv2d: (2x384x1x1xf32) <- (2x384x1x1xf32, 384x384x1x1xf32) + conv2d_6 = paddle._C_ops.conv2d( + pool2d_1, parameter_35, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_35 + + # pd_op.reshape: (1x384x1x1xf32) <- (384xf32, 4xi64) + reshape_10 = paddle._C_ops.reshape(parameter_34, full_int_array_3) + del parameter_34 + + # pd_op.add: (2x384x1x1xf32) <- (2x384x1x1xf32, 1x384x1x1xf32) + add_5 = paddle._C_ops.add(conv2d_6, reshape_10) + + # pd_op.sigmoid: (2x384x1x1xf32) <- (2x384x1x1xf32) + sigmoid_3 = paddle._C_ops.sigmoid(add_5) + del add_5 + + # pd_op.multiply: (2x384x32x32xf32) <- (2x384x32x32xf32, 2x384x1x1xf32) + multiply_2 = paddle._C_ops.multiply(data_1, sigmoid_3) + + # pd_op.conv2d: (2x384x32x32xf32) <- (2x384x32x32xf32, 384x384x1x1xf32) + conv2d_7 = paddle._C_ops.conv2d( + multiply_2, parameter_33, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_33 + + # pd_op.batch_norm_: (2x384x32x32xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x32x32xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__12, + batch_norm__13, + batch_norm__14, + batch_norm__15, + batch_norm__16, + batch_norm__17, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_7, + parameter_32, + parameter_31, + parameter_30, + parameter_29, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_29, parameter_30, parameter_31, parameter_32 + + # pd_op.swish: (2x384x32x32xf32) <- (2x384x32x32xf32) + swish_2 = paddle._C_ops.swish(batch_norm__12) + + # pd_op.add: (2x384x32x32xf32) <- (2x384x32x32xf32, 2x384x32x32xf32) + add_6 = paddle._C_ops.add(swish_2, data_1) + + # pd_op.conv2d: (2x4x32x32xf32) <- (2x384x32x32xf32, 4x384x3x3xf32) + conv2d_8 = paddle._C_ops.conv2d( + add_6, parameter_28, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_28 + + # pd_op.reshape: (1x4x1x1xf32) <- (4xf32, 4xi64) + reshape_11 = paddle._C_ops.reshape(parameter_27, full_int_array_3) + del parameter_27 + + # pd_op.add: (2x4x32x32xf32) <- (2x4x32x32xf32, 1x4x1x1xf32) + add_7 = paddle._C_ops.add(conv2d_8, reshape_11) + + # pd_op.conv2d: (2x384x1x1xf32) <- (2x384x1x1xf32, 384x384x1x1xf32) + conv2d_9 = paddle._C_ops.conv2d( + pool2d_1, parameter_26, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_26 + + # pd_op.reshape: (1x384x1x1xf32) <- (384xf32, 4xi64) + reshape_12 = paddle._C_ops.reshape(parameter_25, full_int_array_3) + del parameter_25 + + # pd_op.add: (2x384x1x1xf32) <- (2x384x1x1xf32, 1x384x1x1xf32) + add_8 = paddle._C_ops.add(conv2d_9, reshape_12) + + # pd_op.sigmoid: (2x384x1x1xf32) <- (2x384x1x1xf32) + sigmoid_4 = paddle._C_ops.sigmoid(add_8) + del add_8 + + # pd_op.multiply: (2x384x32x32xf32) <- (2x384x32x32xf32, 2x384x1x1xf32) + multiply_3 = paddle._C_ops.multiply(data_1, sigmoid_4) + del data_1 + + # pd_op.conv2d: (2x384x32x32xf32) <- (2x384x32x32xf32, 384x384x1x1xf32) + conv2d_10 = paddle._C_ops.conv2d( + multiply_3, parameter_24, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_24 + + # pd_op.batch_norm_: (2x384x32x32xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x32x32xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__18, + batch_norm__19, + batch_norm__20, + batch_norm__21, + batch_norm__22, + batch_norm__23, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_10, + parameter_23, + parameter_22, + parameter_21, + parameter_20, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_20, parameter_21, parameter_22, parameter_23 + + # pd_op.swish: (2x384x32x32xf32) <- (2x384x32x32xf32) + swish_3 = paddle._C_ops.swish(batch_norm__18) + + # pd_op.conv2d: (2x68x32x32xf32) <- (2x384x32x32xf32, 68x384x3x3xf32) + conv2d_11 = paddle._C_ops.conv2d( + swish_3, parameter_19, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_19 + + # pd_op.reshape: (1x68x1x1xf32) <- (68xf32, 4xi64) + reshape_13 = paddle._C_ops.reshape(parameter_18, full_int_array_3) + del parameter_18 + + # pd_op.add: (2x68x32x32xf32) <- (2x68x32x32xf32, 1x68x1x1xf32) + add_9 = paddle._C_ops.add(conv2d_11, reshape_13) + + # pd_op.sigmoid: (2x4x32x32xf32) <- (2x4x32x32xf32) + sigmoid_5 = paddle._C_ops.sigmoid(add_7) + del add_7 + + # pd_op.flatten: (2x4x1024xf32) <- (2x4x32x32xf32) + flatten_2 = paddle._C_ops.flatten(sigmoid_5, 2, 3) + + # pd_op.transpose: (2x1024x4xf32) <- (2x4x1024xf32) + transpose_2 = paddle._C_ops.transpose(flatten_2, [0, 2, 1]) + del flatten_2 + + # pd_op.flatten: (2x68x1024xf32) <- (2x68x32x32xf32) + flatten_3 = paddle._C_ops.flatten(add_9, 2, 3) + + # pd_op.transpose: (2x1024x68xf32) <- (2x68x1024xf32) + transpose_3 = paddle._C_ops.transpose(flatten_3, [0, 2, 1]) + del flatten_3 + + # pd_op.pool2d: (2x192x1x1xf32) <- (2x192x64x64xf32, 2xi64) + pool2d_2 = paddle._C_ops.pool2d( + data_2, + full_int_array_2, + [1, 1], + [0, 0], + False, + True, + "NCHW", + "avg", + False, + True, + "EXPLICIT", + ) + + # pd_op.conv2d: (2x192x1x1xf32) <- (2x192x1x1xf32, 192x192x1x1xf32) + conv2d_12 = paddle._C_ops.conv2d( + pool2d_2, parameter_17, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_17 + + # pd_op.reshape: (1x192x1x1xf32) <- (192xf32, 4xi64) + reshape_14 = paddle._C_ops.reshape(parameter_16, full_int_array_3) + del parameter_16 + + # pd_op.add: (2x192x1x1xf32) <- (2x192x1x1xf32, 1x192x1x1xf32) + add_10 = paddle._C_ops.add(conv2d_12, reshape_14) + + # pd_op.sigmoid: (2x192x1x1xf32) <- (2x192x1x1xf32) + sigmoid_6 = paddle._C_ops.sigmoid(add_10) + del add_10 + + # pd_op.multiply: (2x192x64x64xf32) <- (2x192x64x64xf32, 2x192x1x1xf32) + multiply_4 = paddle._C_ops.multiply(data_2, sigmoid_6) + + # pd_op.conv2d: (2x192x64x64xf32) <- (2x192x64x64xf32, 192x192x1x1xf32) + conv2d_13 = paddle._C_ops.conv2d( + multiply_4, parameter_15, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_15 + + # pd_op.batch_norm_: (2x192x64x64xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x64x64xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__24, + batch_norm__25, + batch_norm__26, + batch_norm__27, + batch_norm__28, + batch_norm__29, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_13, + parameter_14, + parameter_13, + parameter_12, + parameter_11, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_11, parameter_12, parameter_13, parameter_14 + + # pd_op.swish: (2x192x64x64xf32) <- (2x192x64x64xf32) + swish_4 = paddle._C_ops.swish(batch_norm__24) + + # pd_op.add: (2x192x64x64xf32) <- (2x192x64x64xf32, 2x192x64x64xf32) + add_11 = paddle._C_ops.add(swish_4, data_2) + + # pd_op.conv2d: (2x4x64x64xf32) <- (2x192x64x64xf32, 4x192x3x3xf32) + conv2d_14 = paddle._C_ops.conv2d( + add_11, parameter_10, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_10 + + # pd_op.reshape: (1x4x1x1xf32) <- (4xf32, 4xi64) + reshape_15 = paddle._C_ops.reshape(parameter_9, full_int_array_3) + del parameter_9 + + # pd_op.add: (2x4x64x64xf32) <- (2x4x64x64xf32, 1x4x1x1xf32) + add_12 = paddle._C_ops.add(conv2d_14, reshape_15) + + # pd_op.conv2d: (2x192x1x1xf32) <- (2x192x1x1xf32, 192x192x1x1xf32) + conv2d_15 = paddle._C_ops.conv2d( + pool2d_2, parameter_8, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_8 + + # pd_op.reshape: (1x192x1x1xf32) <- (192xf32, 4xi64) + reshape_16 = paddle._C_ops.reshape(parameter_7, full_int_array_3) + del parameter_7 + + # pd_op.add: (2x192x1x1xf32) <- (2x192x1x1xf32, 1x192x1x1xf32) + add_13 = paddle._C_ops.add(conv2d_15, reshape_16) + + # pd_op.sigmoid: (2x192x1x1xf32) <- (2x192x1x1xf32) + sigmoid_7 = paddle._C_ops.sigmoid(add_13) + del add_13 + + # pd_op.multiply: (2x192x64x64xf32) <- (2x192x64x64xf32, 2x192x1x1xf32) + multiply_5 = paddle._C_ops.multiply(data_2, sigmoid_7) + del data_2 + + # pd_op.conv2d: (2x192x64x64xf32) <- (2x192x64x64xf32, 192x192x1x1xf32) + conv2d_16 = paddle._C_ops.conv2d( + multiply_5, parameter_6, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_6 + + # pd_op.batch_norm_: (2x192x64x64xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x64x64xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__30, + batch_norm__31, + batch_norm__32, + batch_norm__33, + batch_norm__34, + batch_norm__35, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_16, + parameter_5, + parameter_4, + parameter_3, + parameter_2, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_2, parameter_3, parameter_4, parameter_5 + + # pd_op.swish: (2x192x64x64xf32) <- (2x192x64x64xf32) + swish_5 = paddle._C_ops.swish(batch_norm__30) + + # pd_op.conv2d: (2x68x64x64xf32) <- (2x192x64x64xf32, 68x192x3x3xf32) + conv2d_17 = paddle._C_ops.conv2d( + swish_5, parameter_1, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_1 + + # pd_op.reshape: (1x68x1x1xf32) <- (68xf32, 4xi64) + reshape_17 = paddle._C_ops.reshape(parameter_0, full_int_array_3) + del full_int_array_3, parameter_0 + + # pd_op.add: (2x68x64x64xf32) <- (2x68x64x64xf32, 1x68x1x1xf32) + add_14 = paddle._C_ops.add(conv2d_17, reshape_17) + + # pd_op.sigmoid: (2x4x64x64xf32) <- (2x4x64x64xf32) + sigmoid_8 = paddle._C_ops.sigmoid(add_12) + del add_12 + + # pd_op.flatten: (2x4x4096xf32) <- (2x4x64x64xf32) + flatten_4 = paddle._C_ops.flatten(sigmoid_8, 2, 3) + + # pd_op.transpose: (2x4096x4xf32) <- (2x4x4096xf32) + transpose_4 = paddle._C_ops.transpose(flatten_4, [0, 2, 1]) + del flatten_4 + + # pd_op.flatten: (2x68x4096xf32) <- (2x68x64x64xf32) + flatten_5 = paddle._C_ops.flatten(add_14, 2, 3) + + # pd_op.transpose: (2x4096x68xf32) <- (2x68x4096xf32) + transpose_5 = paddle._C_ops.transpose(flatten_5, [0, 2, 1]) + del flatten_5 + + # pd_op.full: (1xi32) <- () + full_13 = paddle._C_ops.full( + [1], float("1"), paddle.int32, paddle.core.CPUPlace() + ) + + # pd_op.assign: (1xi32) <- (1xi32) + assign_2 = full_13 + + # builtin.combine: ([2x256x4xf32, 2x1024x4xf32, 2x4096x4xf32]) <- (2x256x4xf32, 2x1024x4xf32, 2x4096x4xf32) + combine_12 = [transpose_0, transpose_2, transpose_4] + + # pd_op.concat: (2x5376x4xf32) <- ([2x256x4xf32, 2x1024x4xf32, 2x4096x4xf32], 1xi32) + concat_0 = paddle._C_ops.concat(combine_12, full_13) + del combine_12 + + # builtin.combine: ([2x256x68xf32, 2x1024x68xf32, 2x4096x68xf32]) <- (2x256x68xf32, 2x1024x68xf32, 2x4096x68xf32) + combine_13 = [transpose_1, transpose_3, transpose_5] + + # pd_op.concat: (2x5376x68xf32) <- ([2x256x68xf32, 2x1024x68xf32, 2x4096x68xf32], 1xi32) + concat_1 = paddle._C_ops.concat(combine_13, full_13) + del ( + add_1, + add_11, + add_14, + add_4, + add_6, + add_9, + assign_0, + assign_1, + assign_2, + batch_norm__0, + batch_norm__1, + batch_norm__10, + batch_norm__11, + batch_norm__12, + batch_norm__13, + batch_norm__14, + batch_norm__15, + batch_norm__16, + batch_norm__17, + batch_norm__18, + batch_norm__19, + batch_norm__2, + batch_norm__20, + batch_norm__21, + batch_norm__22, + batch_norm__23, + batch_norm__24, + batch_norm__25, + batch_norm__26, + batch_norm__27, + batch_norm__28, + batch_norm__29, + batch_norm__3, + batch_norm__30, + batch_norm__31, + batch_norm__32, + batch_norm__33, + batch_norm__34, + batch_norm__35, + batch_norm__4, + batch_norm__5, + batch_norm__6, + batch_norm__7, + batch_norm__8, + batch_norm__9, + combine_13, + conv2d_0, + conv2d_1, + conv2d_10, + conv2d_11, + conv2d_12, + conv2d_13, + conv2d_14, + conv2d_15, + conv2d_16, + conv2d_17, + conv2d_2, + conv2d_3, + conv2d_4, + conv2d_5, + conv2d_6, + conv2d_7, + conv2d_8, + conv2d_9, + full_13, + full_int_array_2, + multiply_0, + multiply_1, + multiply_2, + multiply_3, + multiply_4, + multiply_5, + pool2d_0, + pool2d_1, + pool2d_2, + reshape_0, + reshape_10, + reshape_11, + reshape_12, + reshape_13, + reshape_14, + reshape_15, + reshape_16, + reshape_17, + reshape_2, + reshape_4, + reshape_6, + reshape_7, + reshape_8, + reshape_9, + sigmoid_0, + sigmoid_1, + sigmoid_2, + sigmoid_3, + sigmoid_4, + sigmoid_5, + sigmoid_6, + sigmoid_7, + sigmoid_8, + swish_0, + swish_1, + swish_2, + swish_3, + swish_4, + swish_5, + transpose_0, + transpose_1, + transpose_2, + transpose_3, + transpose_4, + transpose_5, + ) + + return concat_0, concat_1, concat_2, concat_3, concat_4 diff --git a/paddle_samples/PaddleX/PP-YOLOE-L_vehicle/subgraph_13/weight_meta.py b/paddle_samples/PaddleX/PP-YOLOE-L_vehicle/subgraph_13/weight_meta.py new file mode 100644 index 000000000..e5a6e2138 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE-L_vehicle/subgraph_13/weight_meta.py @@ -0,0 +1,574 @@ +class Program_weight_tensor_parameter_0: + name = "parameter_0" + shape = [68] + dtype = "float32" + min_val = float("-0.00617039") + max_val = float("0.0247471") + mean = float("8.76171e-08") + std = float("0.0058339") + data = None + + +class Program_weight_tensor_parameter_1: + name = "parameter_1" + shape = [68, 192, 3, 3] + dtype = "float32" + min_val = float("-0.145996") + max_val = float("0.169864") + mean = float("5.68543e-08") + std = float("0.00689417") + data = None + + +class Program_weight_tensor_parameter_2: + name = "parameter_2" + shape = [192] + dtype = "float32" + min_val = float("-0.0738349") + max_val = float("0.245316") + mean = float("0.0614159") + std = float("0.056228") + data = None + + +class Program_weight_tensor_parameter_3: + name = "parameter_3" + shape = [192] + dtype = "float32" + min_val = float("0.838577") + max_val = float("1.78465") + mean = float("1.29147") + std = float("0.191143") + data = None + + +class Program_weight_tensor_parameter_4: + name = "parameter_4" + shape = [192] + dtype = "float32" + min_val = float("0.000103191") + max_val = float("0.00502981") + mean = float("0.00082096") + std = float("0.000779049") + data = None + + +class Program_weight_tensor_parameter_5: + name = "parameter_5" + shape = [192] + dtype = "float32" + min_val = float("-0.0708353") + max_val = float("0.0386494") + mean = float("-0.00914949") + std = float("0.0186265") + data = None + + +class Program_weight_tensor_parameter_6: + name = "parameter_6" + shape = [192, 192, 1, 1] + dtype = "float32" + min_val = float("-0.0606701") + max_val = float("0.082399") + mean = float("-0.000256942") + std = float("0.00597583") + data = None + + +class Program_weight_tensor_parameter_7: + name = "parameter_7" + shape = [192] + dtype = "float32" + min_val = float("-0.00445146") + max_val = float("0.00606764") + mean = float("4.20382e-05") + std = float("0.00190307") + data = None + + +class Program_weight_tensor_parameter_8: + name = "parameter_8" + shape = [192, 192, 1, 1] + dtype = "float32" + min_val = float("-0.0103112") + max_val = float("0.0114903") + mean = float("-1.30666e-05") + std = float("0.0012739") + data = None + + +class Program_weight_tensor_parameter_9: + name = "parameter_9" + shape = [4] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_10: + name = "parameter_10" + shape = [4, 192, 3, 3] + dtype = "float32" + data = None + + +class Program_weight_tensor_parameter_11: + name = "parameter_11" + shape = [192] + dtype = "float32" + min_val = float("-0.446263") + max_val = float("1.52163") + mean = float("0.405797") + std = float("0.337179") + data = None + + +class Program_weight_tensor_parameter_12: + name = "parameter_12" + shape = [192] + dtype = "float32" + min_val = float("0.960207") + max_val = float("2.22997") + mean = float("1.3744") + std = float("0.177275") + data = None + + +class Program_weight_tensor_parameter_13: + name = "parameter_13" + shape = [192] + dtype = "float32" + min_val = float("0.000143504") + max_val = float("0.0121462") + mean = float("0.000852247") + std = float("0.00116876") + data = None + + +class Program_weight_tensor_parameter_14: + name = "parameter_14" + shape = [192] + dtype = "float32" + min_val = float("-0.16047") + max_val = float("0.0780924") + mean = float("-0.0110097") + std = float("0.0322318") + data = None + + +class Program_weight_tensor_parameter_15: + name = "parameter_15" + shape = [192, 192, 1, 1] + dtype = "float32" + min_val = float("-0.0792006") + max_val = float("0.0707992") + mean = float("-0.000280639") + std = float("0.00516747") + data = None + + +class Program_weight_tensor_parameter_16: + name = "parameter_16" + shape = [192] + dtype = "float32" + min_val = float("-0.0037695") + max_val = float("0.00682135") + mean = float("-4.77828e-05") + std = float("0.00130604") + data = None + + +class Program_weight_tensor_parameter_17: + name = "parameter_17" + shape = [192, 192, 1, 1] + dtype = "float32" + min_val = float("-0.0252135") + max_val = float("0.0187257") + mean = float("1.59693e-06") + std = float("0.00117037") + data = None + + +class Program_weight_tensor_parameter_18: + name = "parameter_18" + shape = [68] + dtype = "float32" + min_val = float("-0.00329662") + max_val = float("0.0167199") + mean = float("6.5862e-08") + std = float("0.00398608") + data = None + + +class Program_weight_tensor_parameter_19: + name = "parameter_19" + shape = [68, 384, 3, 3] + dtype = "float32" + min_val = float("-0.0833873") + max_val = float("0.108341") + mean = float("3.68746e-08") + std = float("0.00404361") + data = None + + +class Program_weight_tensor_parameter_20: + name = "parameter_20" + shape = [384] + dtype = "float32" + min_val = float("-0.0153616") + max_val = float("0.107275") + mean = float("0.0328367") + std = float("0.0178463") + data = None + + +class Program_weight_tensor_parameter_21: + name = "parameter_21" + shape = [384] + dtype = "float32" + min_val = float("1.0083") + max_val = float("1.2901") + mean = float("1.1454") + std = float("0.0510579") + data = None + + +class Program_weight_tensor_parameter_22: + name = "parameter_22" + shape = [384] + dtype = "float32" + min_val = float("5.11946e-05") + max_val = float("0.00501719") + mean = float("0.000416751") + std = float("0.000536564") + data = None + + +class Program_weight_tensor_parameter_23: + name = "parameter_23" + shape = [384] + dtype = "float32" + min_val = float("-0.0829912") + max_val = float("0.00866225") + mean = float("-0.0107976") + std = float("0.0096376") + data = None + + +class Program_weight_tensor_parameter_24: + name = "parameter_24" + shape = [384, 384, 1, 1] + dtype = "float32" + min_val = float("-0.0456148") + max_val = float("0.0526446") + mean = float("-0.000134144") + std = float("0.00242607") + data = None + + +class Program_weight_tensor_parameter_25: + name = "parameter_25" + shape = [384] + dtype = "float32" + min_val = float("-0.00253366") + max_val = float("0.00295965") + mean = float("9.22357e-05") + std = float("0.00102263") + data = None + + +class Program_weight_tensor_parameter_26: + name = "parameter_26" + shape = [384, 384, 1, 1] + dtype = "float32" + min_val = float("-0.00217157") + max_val = float("0.00357567") + mean = float("2.19639e-05") + std = float("0.000452488") + data = None + + +class Program_weight_tensor_parameter_27: + name = "parameter_27" + shape = [4] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_28: + name = "parameter_28" + shape = [4, 384, 3, 3] + dtype = "float32" + data = None + + +class Program_weight_tensor_parameter_29: + name = "parameter_29" + shape = [384] + dtype = "float32" + min_val = float("-0.232051") + max_val = float("0.549515") + mean = float("0.2807") + std = float("0.126784") + data = None + + +class Program_weight_tensor_parameter_30: + name = "parameter_30" + shape = [384] + dtype = "float32" + min_val = float("0.992895") + max_val = float("1.51605") + mean = float("1.23523") + std = float("0.0725696") + data = None + + +class Program_weight_tensor_parameter_31: + name = "parameter_31" + shape = [384] + dtype = "float32" + min_val = float("9.68094e-05") + max_val = float("0.00485121") + mean = float("0.000651078") + std = float("0.000626398") + data = None + + +class Program_weight_tensor_parameter_32: + name = "parameter_32" + shape = [384] + dtype = "float32" + min_val = float("-0.0829515") + max_val = float("0.0463276") + mean = float("-0.0198775") + std = float("0.0205532") + data = None + + +class Program_weight_tensor_parameter_33: + name = "parameter_33" + shape = [384, 384, 1, 1] + dtype = "float32" + min_val = float("-0.0485744") + max_val = float("0.0331662") + mean = float("-0.000243223") + std = float("0.00217656") + data = None + + +class Program_weight_tensor_parameter_34: + name = "parameter_34" + shape = [384] + dtype = "float32" + min_val = float("-0.0012767") + max_val = float("0.00746438") + mean = float("5.25699e-06") + std = float("0.000736404") + data = None + + +class Program_weight_tensor_parameter_35: + name = "parameter_35" + shape = [384, 384, 1, 1] + dtype = "float32" + min_val = float("-0.00538541") + max_val = float("0.0108279") + mean = float("6.00139e-06") + std = float("0.000471235") + data = None + + +class Program_weight_tensor_parameter_36: + name = "parameter_36" + shape = [68] + dtype = "float32" + min_val = float("-0.00218127") + max_val = float("0.00587206") + mean = float("1.84955e-08") + std = float("0.00209701") + data = None + + +class Program_weight_tensor_parameter_37: + name = "parameter_37" + shape = [68, 768, 3, 3] + dtype = "float32" + min_val = float("-0.0338328") + max_val = float("0.0647329") + mean = float("1.05956e-08") + std = float("0.00213904") + data = None + + +class Program_weight_tensor_parameter_38: + name = "parameter_38" + shape = [768] + dtype = "float32" + min_val = float("-0.0169414") + max_val = float("0.0711969") + mean = float("0.0152725") + std = float("0.0139057") + data = None + + +class Program_weight_tensor_parameter_39: + name = "parameter_39" + shape = [768] + dtype = "float32" + min_val = float("1.0272") + max_val = float("1.22279") + mean = float("1.09248") + std = float("0.0262909") + data = None + + +class Program_weight_tensor_parameter_40: + name = "parameter_40" + shape = [768] + dtype = "float32" + min_val = float("2.38092e-05") + max_val = float("0.00168459") + mean = float("0.000130715") + std = float("0.000148661") + data = None + + +class Program_weight_tensor_parameter_41: + name = "parameter_41" + shape = [768] + dtype = "float32" + min_val = float("-0.0437267") + max_val = float("0.00473501") + mean = float("-0.00537265") + std = float("0.00503777") + data = None + + +class Program_weight_tensor_parameter_42: + name = "parameter_42" + shape = [768, 768, 1, 1] + dtype = "float32" + min_val = float("-0.0243792") + max_val = float("0.0257345") + mean = float("-4.77602e-05") + std = float("0.000969163") + data = None + + +class Program_weight_tensor_parameter_43: + name = "parameter_43" + shape = [768] + dtype = "float32" + min_val = float("-0.00225995") + max_val = float("0.00160769") + mean = float("9.00065e-05") + std = float("0.000460301") + data = None + + +class Program_weight_tensor_parameter_44: + name = "parameter_44" + shape = [768, 768, 1, 1] + dtype = "float32" + min_val = float("-0.00189167") + max_val = float("0.00164264") + mean = float("2.63137e-05") + std = float("0.000153203") + data = None + + +class Program_weight_tensor_parameter_45: + name = "parameter_45" + shape = [4] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_46: + name = "parameter_46" + shape = [4, 768, 3, 3] + dtype = "float32" + data = None + + +class Program_weight_tensor_parameter_47: + name = "parameter_47" + shape = [768] + dtype = "float32" + min_val = float("-0.151545") + max_val = float("0.254017") + mean = float("0.126634") + std = float("0.0547851") + data = None + + +class Program_weight_tensor_parameter_48: + name = "parameter_48" + shape = [768] + dtype = "float32" + min_val = float("1.01503") + max_val = float("1.34968") + mean = float("1.11006") + std = float("0.0353393") + data = None + + +class Program_weight_tensor_parameter_49: + name = "parameter_49" + shape = [768] + dtype = "float32" + min_val = float("5.78733e-05") + max_val = float("0.00166021") + mean = float("0.000339151") + std = float("0.000203657") + data = None + + +class Program_weight_tensor_parameter_50: + name = "parameter_50" + shape = [768] + dtype = "float32" + min_val = float("-0.0459798") + max_val = float("0.0744545") + mean = float("-0.0129441") + std = float("0.00948883") + data = None + + +class Program_weight_tensor_parameter_51: + name = "parameter_51" + shape = [768, 768, 1, 1] + dtype = "float32" + min_val = float("-0.0236407") + max_val = float("0.0190883") + mean = float("-0.000113066") + std = float("0.000944275") + data = None + + +class Program_weight_tensor_parameter_52: + name = "parameter_52" + shape = [768] + dtype = "float32" + min_val = float("-0.00397064") + max_val = float("0.00215154") + mean = float("1.9151e-05") + std = float("0.000320242") + data = None + + +class Program_weight_tensor_parameter_53: + name = "parameter_53" + shape = [768, 768, 1, 1] + dtype = "float32" + min_val = float("-0.0131335") + max_val = float("0.040808") + mean = float("6.49697e-06") + std = float("0.000218383") + data = None diff --git a/paddle_samples/PaddleX/PP-YOLOE-L_vehicle/subgraph_14/graph_hash.txt b/paddle_samples/PaddleX/PP-YOLOE-L_vehicle/subgraph_14/graph_hash.txt new file mode 100644 index 000000000..2f9daab91 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE-L_vehicle/subgraph_14/graph_hash.txt @@ -0,0 +1 @@ +237f1666acd796c265f31ddd9bc78ab95e4da70095a07017ec39287d25ff30bd \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE-L_vehicle/subgraph_14/graph_net.json b/paddle_samples/PaddleX/PP-YOLOE-L_vehicle/subgraph_14/graph_net.json new file mode 100644 index 000000000..4a2e26ae4 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE-L_vehicle/subgraph_14/graph_net.json @@ -0,0 +1,6 @@ +{ + "framework": "paddle", + "model_name": "PP-YOLOE-L_vehicle", + "num_devices_required": 1, + "num_nodes_required": 1 +} \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE-L_vehicle/subgraph_14/input_meta.py b/paddle_samples/PaddleX/PP-YOLOE-L_vehicle/subgraph_14/input_meta.py new file mode 100644 index 000000000..d1d51e8d6 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE-L_vehicle/subgraph_14/input_meta.py @@ -0,0 +1,67 @@ +class Program_weight_tensor_data_0: + name = "data_0" + shape = [2, 3549] + dtype = "bool" + min_val = 0 + max_val = 2 + data = None + + +class Program_weight_tensor_data_1: + name = "data_1" + shape = [2, 3549, 4] + dtype = "float32" + min_val = float("-8.35609") + max_val = float("59.0717") + mean = float("22.5856") + std = float("15.7449") + data = None + + +class Program_weight_tensor_data_2: + name = "data_2" + shape = [2, 3549, 4] + dtype = "float32" + max_val = float("42.0071") + mean = float("21.2277") + std = float("13.4889") + data = None + + +class Program_weight_tensor_data_3: + name = "data_3" + shape = [2, 3549, 4] + dtype = "float32" + max_val = float("0.923099") + mean = float("0.00145319") + std = float("0.0330365") + data = None + + +class Program_weight_tensor_data_4: + name = "data_4" + shape = [] + dtype = "float32" + data = [41.2588] + + +class Program_weight_tensor_data_5: + name = "data_5" + shape = [2, 3549, 68] + dtype = "float32" + min_val = float("-3.91208") + max_val = float("10.707") + mean = float("3.11295e-05") + std = float("1.44354") + data = None + + +class Program_weight_tensor_data_6: + name = "data_6" + shape = [3549, 2] + dtype = "float32" + min_val = float("0.5") + max_val = float("51.5") + mean = float("22.5952") + std = float("14.8898") + data = None diff --git a/paddle_samples/PaddleX/PP-YOLOE-L_vehicle/subgraph_14/model.py b/paddle_samples/PaddleX/PP-YOLOE-L_vehicle/subgraph_14/model.py new file mode 100644 index 000000000..5db76f83d --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE-L_vehicle/subgraph_14/model.py @@ -0,0 +1,509 @@ +import paddle + + +class GraphModule(paddle.nn.Layer): + def __init__(self): + super().__init__() + + def forward(self, data_0, data_1, data_2, data_3, data_4, data_5, data_6): + # pd_op.cast: (2x-1xi32) <- (2x-1xb) + cast_0 = paddle._C_ops.cast(data_0, paddle.int32) + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_0 = [-1] + + # pd_op.assign: (1xi64) <- (1xi64) + assign_0 = full_int_array_0 + + # pd_op.assign: (1xi64) <- (1xi64) + assign_1 = full_int_array_0 + + # pd_op.assign: (1xi64) <- (1xi64) + assign_2 = full_int_array_0 + + # pd_op.unsqueeze: (2x-1x1xi32) <- (2x-1xi32, 1xi64) + unsqueeze_0 = paddle._C_ops.unsqueeze(cast_0, full_int_array_0) + del cast_0 + + # pd_op.full_int_array: (3xi64) <- () + full_int_array_1 = [1, 1, 4] + + # pd_op.tile: (2x-1x4xi32) <- (2x-1x1xi32, 3xi64) + tile_0 = paddle._C_ops.tile(unsqueeze_0, full_int_array_1) + del full_int_array_1, unsqueeze_0 + + # pd_op.cast: (2x-1x4xb) <- (2x-1x4xi32) + cast_1 = paddle._C_ops.cast(tile_0, paddle.bool) + del tile_0 + + # pd_op.masked_select: (-1xf32) <- (2x-1x4xf32, 2x-1x4xb) + masked_select_0 = paddle._C_ops.masked_select(data_1, cast_1) + del data_1 + + # pd_op.full_int_array: (2xi64) <- () + full_int_array_2 = [-1, 4] + + # pd_op.reshape: (-1x4xf32) <- (-1xf32, 2xi64) + reshape_0 = paddle._C_ops.reshape(masked_select_0, full_int_array_2) + + # pd_op.masked_select: (-1xf32) <- (2x-1x4xf32, 2x-1x4xb) + masked_select_1 = paddle._C_ops.masked_select(data_2, cast_1) + + # pd_op.reshape: (-1x4xf32) <- (-1xf32, 2xi64) + reshape_1 = paddle._C_ops.reshape(masked_select_1, full_int_array_2) + del masked_select_1 + + # pd_op.sum: (2x-1xf32) <- (2x-1x4xf32, 1xi64) + sum_0 = paddle._C_ops.sum(data_3, full_int_array_0, None, False) + del data_3 + + # pd_op.masked_select: (-1xf32) <- (2x-1xf32, 2x-1xb) + masked_select_2 = paddle._C_ops.masked_select(sum_0, data_0) + del sum_0 + + # pd_op.unsqueeze: (-1x1xf32) <- (-1xf32, 1xi64) + unsqueeze_1 = paddle._C_ops.unsqueeze(masked_select_2, full_int_array_0) + del masked_select_2 + + # pd_op.subtract: (-1x4xf32) <- (-1x4xf32, -1x4xf32) + subtract_0 = paddle._C_ops.subtract(reshape_0, reshape_1) + + # pd_op.abs: (-1x4xf32) <- (-1x4xf32) + abs_0 = paddle._C_ops.abs(subtract_0) + + # pd_op.mean_all: (xf32) <- (-1x4xf32) + mean_all_0 = paddle._C_ops.mean_all(abs_0) + + # pd_op.full: (1xi32) <- () + full_0 = paddle._C_ops.full( + [1], float("1"), paddle.int32, paddle.core.CPUPlace() + ) + + # pd_op.split_with_num: ([-1x1xf32, -1x1xf32, -1x1xf32, -1x1xf32]) <- (-1x4xf32, 1xi32) + split_with_num_0 = paddle._C_ops.split_with_num(reshape_0, 4, full_0) + + # builtin.split: (-1x1xf32, -1x1xf32, -1x1xf32, -1x1xf32) <- ([-1x1xf32, -1x1xf32, -1x1xf32, -1x1xf32]) + ( + split_0, + split_1, + split_2, + split_3, + ) = split_with_num_0 + del split_with_num_0 + + # pd_op.split_with_num: ([-1x1xf32, -1x1xf32, -1x1xf32, -1x1xf32]) <- (-1x4xf32, 1xi32) + split_with_num_1 = paddle._C_ops.split_with_num(reshape_1, 4, full_0) + + # builtin.split: (-1x1xf32, -1x1xf32, -1x1xf32, -1x1xf32) <- ([-1x1xf32, -1x1xf32, -1x1xf32, -1x1xf32]) + ( + split_4, + split_5, + split_6, + split_7, + ) = split_with_num_1 + del split_with_num_1 + + # pd_op.maximum: (-1x1xf32) <- (-1x1xf32, -1x1xf32) + maximum_0 = paddle._C_ops.maximum(split_0, split_4) + + # pd_op.maximum: (-1x1xf32) <- (-1x1xf32, -1x1xf32) + maximum_1 = paddle._C_ops.maximum(split_1, split_5) + + # pd_op.minimum: (-1x1xf32) <- (-1x1xf32, -1x1xf32) + minimum_0 = paddle._C_ops.minimum(split_2, split_6) + + # pd_op.minimum: (-1x1xf32) <- (-1x1xf32, -1x1xf32) + minimum_1 = paddle._C_ops.minimum(split_3, split_7) + + # pd_op.subtract: (-1x1xf32) <- (-1x1xf32, -1x1xf32) + subtract_1 = paddle._C_ops.subtract(minimum_0, maximum_0) + + # pd_op.full: (1xf32) <- () + full_1 = paddle._C_ops.full( + [1], float("0"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.assign: (1xf32) <- (1xf32) + assign_3 = full_1 + + # pd_op.full: (1xf32) <- () + full_2 = paddle._C_ops.full( + [1], float("3.40282e+38"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.assign: (1xf32) <- (1xf32) + assign_4 = full_2 + + # pd_op.clip: (-1x1xf32) <- (-1x1xf32, 1xf32, 1xf32) + clip_0 = paddle._C_ops.clip(subtract_1, full_1, full_2) + + # pd_op.subtract: (-1x1xf32) <- (-1x1xf32, -1x1xf32) + subtract_2 = paddle._C_ops.subtract(minimum_1, maximum_1) + + # pd_op.clip: (-1x1xf32) <- (-1x1xf32, 1xf32, 1xf32) + clip_1 = paddle._C_ops.clip(subtract_2, full_1, full_2) + + # pd_op.multiply: (-1x1xf32) <- (-1x1xf32, -1x1xf32) + multiply_0 = paddle._C_ops.multiply(clip_0, clip_1) + + # pd_op.subtract: (-1x1xf32) <- (-1x1xf32, -1x1xf32) + subtract_3 = paddle._C_ops.subtract(split_2, split_0) + + # pd_op.subtract: (-1x1xf32) <- (-1x1xf32, -1x1xf32) + subtract_4 = paddle._C_ops.subtract(split_3, split_1) + + # pd_op.multiply: (-1x1xf32) <- (-1x1xf32, -1x1xf32) + multiply_1 = paddle._C_ops.multiply(subtract_3, subtract_4) + + # pd_op.subtract: (-1x1xf32) <- (-1x1xf32, -1x1xf32) + subtract_5 = paddle._C_ops.subtract(split_6, split_4) + + # pd_op.subtract: (-1x1xf32) <- (-1x1xf32, -1x1xf32) + subtract_6 = paddle._C_ops.subtract(split_7, split_5) + + # pd_op.multiply: (-1x1xf32) <- (-1x1xf32, -1x1xf32) + multiply_2 = paddle._C_ops.multiply(subtract_5, subtract_6) + del subtract_5, subtract_6 + + # pd_op.add: (-1x1xf32) <- (-1x1xf32, -1x1xf32) + add_0 = paddle._C_ops.add(multiply_1, multiply_2) + + # pd_op.subtract: (-1x1xf32) <- (-1x1xf32, -1x1xf32) + subtract_7 = paddle._C_ops.subtract(add_0, multiply_0) + + # pd_op.full: (1xf32) <- () + full_3 = paddle._C_ops.full( + [1], float("1"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.assign: (1xf32) <- (1xf32) + assign_5 = full_3 + + # pd_op.assign: (1xf32) <- (1xf32) + assign_6 = full_3 + + # pd_op.scale: (-1x1xf32) <- (-1x1xf32, 1xf32) + scale_0 = paddle._C_ops.scale(subtract_7, full_3, float("1e-10"), True) + del subtract_7 + + # pd_op.divide: (-1x1xf32) <- (-1x1xf32, -1x1xf32) + divide_2 = paddle._C_ops.divide(multiply_0, scale_0) + + # pd_op.minimum: (-1x1xf32) <- (-1x1xf32, -1x1xf32) + minimum_2 = paddle._C_ops.minimum(split_0, split_4) + + # pd_op.minimum: (-1x1xf32) <- (-1x1xf32, -1x1xf32) + minimum_3 = paddle._C_ops.minimum(split_1, split_5) + + # pd_op.maximum: (-1x1xf32) <- (-1x1xf32, -1x1xf32) + maximum_2 = paddle._C_ops.maximum(split_2, split_6) + + # pd_op.maximum: (-1x1xf32) <- (-1x1xf32, -1x1xf32) + maximum_3 = paddle._C_ops.maximum(split_3, split_7) + + # pd_op.subtract: (-1x1xf32) <- (-1x1xf32, -1x1xf32) + subtract_8 = paddle._C_ops.subtract(maximum_2, minimum_2) + + # pd_op.subtract: (-1x1xf32) <- (-1x1xf32, -1x1xf32) + subtract_9 = paddle._C_ops.subtract(maximum_3, minimum_3) + + # pd_op.multiply: (-1x1xf32) <- (-1x1xf32, -1x1xf32) + multiply_3 = paddle._C_ops.multiply(subtract_8, subtract_9) + + # pd_op.scale: (-1x1xf32) <- (-1x1xf32, 1xf32) + scale_1 = paddle._C_ops.scale(multiply_3, full_3, float("1e-10"), True) + del multiply_3 + + # pd_op.subtract: (-1x1xf32) <- (-1x1xf32, -1x1xf32) + subtract_10 = paddle._C_ops.subtract(scale_1, scale_0) + + # pd_op.divide: (-1x1xf32) <- (-1x1xf32, -1x1xf32) + divide_3 = paddle._C_ops.divide(subtract_10, scale_1) + + # pd_op.subtract: (-1x1xf32) <- (-1x1xf32, -1x1xf32) + subtract_11 = paddle._C_ops.subtract(divide_2, divide_3) + + # pd_op.full: (1xf32) <- () + full_4 = paddle._C_ops.full( + [1], float("-1"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.scale: (-1x1xf32) <- (-1x1xf32, 1xf32) + scale_2 = paddle._C_ops.scale(subtract_11, full_4, float("1"), True) + del subtract_11 + + # pd_op.scale: (-1x1xf32) <- (-1x1xf32, 1xf32) + scale_3 = paddle._C_ops.scale(scale_2, full_3, float("0"), True) + del scale_2 + + # pd_op.multiply: (-1x1xf32) <- (-1x1xf32, -1x1xf32) + multiply_4 = paddle._C_ops.multiply(scale_3, unsqueeze_1) + + # pd_op.full_int_array: (0xi64) <- () + full_int_array_3 = [] + + # pd_op.assign: (0xi64) <- (0xi64) + assign_7 = full_int_array_3 + + # pd_op.sum: (xf32) <- (-1x1xf32, 0xi64) + sum_1 = paddle._C_ops.sum(multiply_4, full_int_array_3, None, False) + + # pd_op.divide: (xf32) <- (xf32, xf32) + divide_0 = paddle._C_ops.divide(sum_1, data_4) + + # pd_op.unsqueeze: (2x-1x1xb) <- (2x-1xb, 1xi64) + unsqueeze_2 = paddle._C_ops.unsqueeze(data_0, full_int_array_0) + del data_0 + + # pd_op.cast: (2x-1x1xi32) <- (2x-1x1xb) + cast_2 = paddle._C_ops.cast(unsqueeze_2, paddle.int32) + del unsqueeze_2 + + # pd_op.full_int_array: (3xi64) <- () + full_int_array_4 = [1, 1, 68] + + # pd_op.tile: (2x-1x68xi32) <- (2x-1x1xi32, 3xi64) + tile_1 = paddle._C_ops.tile(cast_2, full_int_array_4) + del cast_2, full_int_array_4 + + # pd_op.cast: (2x-1x68xb) <- (2x-1x68xi32) + cast_3 = paddle._C_ops.cast(tile_1, paddle.bool) + del tile_1 + + # pd_op.masked_select: (-1xf32) <- (2x-1x68xf32, 2x-1x68xb) + masked_select_3 = paddle._C_ops.masked_select(data_5, cast_3) + del data_5 + + # pd_op.full_int_array: (3xi64) <- () + full_int_array_5 = [-1, 4, 17] + + # pd_op.reshape: (-1x4x17xf32) <- (-1xf32, 3xi64) + reshape_2 = paddle._C_ops.reshape(masked_select_3, full_int_array_5) + del full_int_array_5 + + # pd_op.full: (1xi32) <- () + full_5 = paddle._C_ops.full( + [1], float("2"), paddle.int32, paddle.core.CPUPlace() + ) + + # pd_op.split_with_num: ([2x-1x2xf32, 2x-1x2xf32]) <- (2x-1x4xf32, 1xi32) + split_with_num_2 = paddle._C_ops.split_with_num(data_2, 2, full_5) + del data_2, full_5 + + # builtin.split: (2x-1x2xf32, 2x-1x2xf32) <- ([2x-1x2xf32, 2x-1x2xf32]) + ( + split_8, + split_9, + ) = split_with_num_2 + del split_with_num_2 + + # pd_op.subtract: (2x-1x2xf32) <- (-1x2xf32, 2x-1x2xf32) + subtract_12 = paddle._C_ops.subtract(data_6, split_8) + del split_8 + + # pd_op.subtract: (2x-1x2xf32) <- (2x-1x2xf32, -1x2xf32) + subtract_13 = paddle._C_ops.subtract(split_9, data_6) + del data_6, split_9 + + # pd_op.full: (1xi32) <- () + full_6 = paddle._C_ops.full( + [1], float("-1"), paddle.int32, paddle.core.CPUPlace() + ) + + # builtin.combine: ([2x-1x2xf32, 2x-1x2xf32]) <- (2x-1x2xf32, 2x-1x2xf32) + combine_0 = [subtract_12, subtract_13] + del subtract_12, subtract_13 + + # pd_op.concat: (2x-1x4xf32) <- ([2x-1x2xf32, 2x-1x2xf32], 1xi32) + concat_0 = paddle._C_ops.concat(combine_0, full_6) + del combine_0, full_6 + + # pd_op.full: (1xf32) <- () + full_7 = paddle._C_ops.full( + [1], float("15.99"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.clip: (2x-1x4xf32) <- (2x-1x4xf32, 1xf32, 1xf32) + clip_2 = paddle._C_ops.clip(concat_0, full_1, full_7) + del concat_0, full_7 + + # pd_op.masked_select: (-1xf32) <- (2x-1x4xf32, 2x-1x4xb) + masked_select_4 = paddle._C_ops.masked_select(clip_2, cast_1) + del clip_2 + + # pd_op.reshape: (-1x4xf32) <- (-1xf32, 2xi64) + reshape_3 = paddle._C_ops.reshape(masked_select_4, full_int_array_2) + del full_int_array_2, masked_select_4 + + # pd_op.floor: (-1x4xf32) <- (-1x4xf32) + floor_0 = paddle._C_ops.floor(reshape_3) + + # pd_op.cast: (-1x4xi64) <- (-1x4xf32) + cast_4 = paddle._C_ops.cast(floor_0, paddle.int64) + del floor_0 + + # pd_op.scale: (-1x4xi64) <- (-1x4xi64, 1xf32) + scale_4 = paddle._C_ops.scale(cast_4, full_3, float("1"), True) + + # pd_op.cast: (-1x4xf32) <- (-1x4xi64) + cast_5 = paddle._C_ops.cast(scale_4, paddle.float32) + + # pd_op.subtract: (-1x4xf32) <- (-1x4xf32, -1x4xf32) + subtract_14 = paddle._C_ops.subtract(cast_5, reshape_3) + del cast_5, reshape_3 + + # pd_op.scale: (-1x4xf32) <- (-1x4xf32, 1xf32) + scale_5 = paddle._C_ops.scale(subtract_14, full_4, float("1"), True) + + # pd_op.scale: (-1x4xi64) <- (-1x4xi64, 1xf32) + scale_6 = paddle._C_ops.scale(cast_4, full_3, float("0"), True) + del cast_4 + + # pd_op.unsqueeze: (-1x4x1xi64) <- (-1x4xi64, 1xi64) + unsqueeze_3 = paddle._C_ops.unsqueeze(scale_6, full_int_array_0) + del scale_6 + + # pd_op.cross_entropy_with_softmax: (-1x4x17xf32, -1x4x1xf32) <- (-1x4x17xf32, -1x4x1xi64) + cross_entropy_with_softmax_0, cross_entropy_with_softmax_2 = ( + lambda x, f: f(x) + )( + paddle._C_ops.cross_entropy_with_softmax( + reshape_2, unsqueeze_3, False, True, True, -100, -1 + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None), + ) + + # pd_op.squeeze: (-1x4xf32) <- (-1x4x1xf32, 1xi64) + squeeze_0 = paddle._C_ops.squeeze( + cross_entropy_with_softmax_2, full_int_array_0 + ) + + # pd_op.multiply: (-1x4xf32) <- (-1x4xf32, -1x4xf32) + multiply_5 = paddle._C_ops.multiply(squeeze_0, subtract_14) + + # pd_op.scale: (-1x4xi64) <- (-1x4xi64, 1xf32) + scale_7 = paddle._C_ops.scale(scale_4, full_3, float("0"), True) + del scale_4 + + # pd_op.unsqueeze: (-1x4x1xi64) <- (-1x4xi64, 1xi64) + unsqueeze_4 = paddle._C_ops.unsqueeze(scale_7, full_int_array_0) + del scale_7 + + # pd_op.cross_entropy_with_softmax: (-1x4x17xf32, -1x4x1xf32) <- (-1x4x17xf32, -1x4x1xi64) + cross_entropy_with_softmax_1, cross_entropy_with_softmax_3 = ( + lambda x, f: f(x) + )( + paddle._C_ops.cross_entropy_with_softmax( + reshape_2, unsqueeze_4, False, True, True, -100, -1 + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None), + ) + del reshape_2 + + # pd_op.squeeze: (-1x4xf32) <- (-1x4x1xf32, 1xi64) + squeeze_1 = paddle._C_ops.squeeze( + cross_entropy_with_softmax_3, full_int_array_0 + ) + + # pd_op.multiply: (-1x4xf32) <- (-1x4xf32, -1x4xf32) + multiply_6 = paddle._C_ops.multiply(squeeze_1, scale_5) + + # pd_op.add: (-1x4xf32) <- (-1x4xf32, -1x4xf32) + add_1 = paddle._C_ops.add(multiply_5, multiply_6) + + # pd_op.mean: (-1x1xf32) <- (-1x4xf32, 1xi64) + mean_0 = paddle._C_ops.mean(add_1, full_int_array_0, True) + del full_int_array_0 + + # pd_op.multiply: (-1x1xf32) <- (-1x1xf32, -1x1xf32) + multiply_7 = paddle._C_ops.multiply(mean_0, unsqueeze_1) + + # pd_op.sum: (xf32) <- (-1x1xf32, 0xi64) + sum_2 = paddle._C_ops.sum(multiply_7, full_int_array_3, None, False) + + # pd_op.divide: (xf32) <- (xf32, xf32) + divide_1 = paddle._C_ops.divide(sum_2, data_4) + del ( + abs_0, + add_0, + add_1, + assign_0, + assign_1, + assign_2, + assign_3, + assign_4, + assign_5, + assign_6, + assign_7, + cast_1, + cast_3, + clip_0, + clip_1, + cross_entropy_with_softmax_2, + cross_entropy_with_softmax_3, + data_4, + divide_2, + divide_3, + full_0, + full_1, + full_2, + full_3, + full_4, + full_int_array_3, + masked_select_0, + masked_select_3, + maximum_0, + maximum_1, + maximum_2, + maximum_3, + mean_0, + minimum_0, + minimum_1, + minimum_2, + minimum_3, + multiply_0, + multiply_1, + multiply_2, + multiply_4, + multiply_5, + multiply_6, + multiply_7, + reshape_0, + reshape_1, + scale_0, + scale_1, + scale_3, + scale_5, + split_0, + split_1, + split_2, + split_3, + split_4, + split_5, + split_6, + split_7, + squeeze_0, + squeeze_1, + subtract_0, + subtract_1, + subtract_10, + subtract_14, + subtract_2, + subtract_3, + subtract_4, + subtract_8, + subtract_9, + sum_1, + sum_2, + unsqueeze_1, + unsqueeze_3, + unsqueeze_4, + ) + + return ( + cross_entropy_with_softmax_0, + cross_entropy_with_softmax_1, + mean_all_0, + divide_0, + divide_1, + ) diff --git a/paddle_samples/PaddleX/PP-YOLOE-L_vehicle/subgraph_14/weight_meta.py b/paddle_samples/PaddleX/PP-YOLOE-L_vehicle/subgraph_14/weight_meta.py new file mode 100644 index 000000000..8b1378917 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE-L_vehicle/subgraph_14/weight_meta.py @@ -0,0 +1 @@ + diff --git a/paddle_samples/PaddleX/PP-YOLOE-L_vehicle/subgraph_15/graph_hash.txt b/paddle_samples/PaddleX/PP-YOLOE-L_vehicle/subgraph_15/graph_hash.txt new file mode 100644 index 000000000..babd6567f --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE-L_vehicle/subgraph_15/graph_hash.txt @@ -0,0 +1 @@ +3d13d62f659e1dd50de7ed21b396c52b0de085fbe77030af69ce5dcd93ef5c05 \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE-L_vehicle/subgraph_15/graph_net.json b/paddle_samples/PaddleX/PP-YOLOE-L_vehicle/subgraph_15/graph_net.json new file mode 100644 index 000000000..4a2e26ae4 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE-L_vehicle/subgraph_15/graph_net.json @@ -0,0 +1,6 @@ +{ + "framework": "paddle", + "model_name": "PP-YOLOE-L_vehicle", + "num_devices_required": 1, + "num_nodes_required": 1 +} \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE-L_vehicle/subgraph_15/input_meta.py b/paddle_samples/PaddleX/PP-YOLOE-L_vehicle/subgraph_15/input_meta.py new file mode 100644 index 000000000..22e6b559d --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE-L_vehicle/subgraph_15/input_meta.py @@ -0,0 +1,48 @@ +class Program_weight_tensor_data_0: + name = "data_0" + shape = [] + dtype = "int64" + data = [1] + + +class Program_weight_tensor_data_1: + name = "data_1" + shape = [2, 4116, 4] + dtype = "float32" + max_val = float("0.0452349") + mean = float("0.00261075") + std = float("0.00638883") + data = None + + +class Program_weight_tensor_data_2: + name = "data_2" + shape = [2, 4116, 68] + dtype = "float32" + min_val = float("-13.9672") + max_val = float("27.3274") + mean = float("1.46976e-05") + std = float("0.86683") + data = None + + +class Program_weight_tensor_data_3: + name = "data_3" + shape = [4116, 2] + dtype = "float32" + min_val = float("4.0") + max_val = float("444.0") + mean = float("224.0") + std = float("129.279") + data = None + + +class Program_weight_tensor_data_4: + name = "data_4" + shape = [4116, 1] + dtype = "float32" + min_val = float("8.0") + max_val = float("32.0") + mean = float("10.6667") + std = float("5.70157") + data = None diff --git a/paddle_samples/PaddleX/PP-YOLOE-L_vehicle/subgraph_15/model.py b/paddle_samples/PaddleX/PP-YOLOE-L_vehicle/subgraph_15/model.py new file mode 100644 index 000000000..2d525f6d3 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE-L_vehicle/subgraph_15/model.py @@ -0,0 +1,192 @@ +import paddle + + +class GraphModule(paddle.nn.Layer): + def __init__(self): + super().__init__() + + def forward(self, parameter_0, data_0, data_1, data_2, data_3, data_4): + # pd_op.divide: (-1x2xf32) <- (-1x2xf32, -1x1xf32) + divide_0 = paddle._C_ops.divide(data_3, data_4) + del data_3 + + # pd_op.shape64: (3xi64) <- (2x-1x68xf32) + shape64_0 = paddle._C_ops.shape64(data_2) + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_0 = [0] + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_1 = [1] + + # pd_op.assign: (1xi64) <- (1xi64) + assign_0 = full_int_array_1 + + # pd_op.slice: (xi64) <- (3xi64, 1xi64, 1xi64) + slice_0 = paddle._C_ops.slice( + shape64_0, [0], full_int_array_0, full_int_array_1, [1], [0] + ) + del full_int_array_0 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_2 = [2] + + # pd_op.slice: (xi64) <- (3xi64, 1xi64, 1xi64) + slice_1 = paddle._C_ops.slice( + shape64_0, [0], full_int_array_1, full_int_array_2, [1], [0] + ) + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_3 = [3] + + # pd_op.slice: (xi64) <- (3xi64, 1xi64, 1xi64) + slice_2 = paddle._C_ops.slice( + shape64_0, [0], full_int_array_2, full_int_array_3, [1], [0] + ) + del full_int_array_2, full_int_array_3, shape64_0 + + # pd_op.full: (xi64) <- () + full_0 = paddle._C_ops.full( + [], float("-1"), paddle.int64, paddle.core.CPUPlace() + ) + + # pd_op.full: (xi64) <- () + full_1 = paddle._C_ops.full( + [], float("4"), paddle.int64, paddle.core.CPUPlace() + ) + + # pd_op.full: (xi64) <- () + full_2 = paddle._C_ops.full( + [], float("17"), paddle.int64, paddle.core.CPUPlace() + ) + + # builtin.combine: ([xi64, xi64, xi64, xi64]) <- (xi64, xi64, xi64, xi64) + combine_0 = [full_0, slice_1, full_1, full_2] + del full_0, full_1, full_2, slice_1 + + # pd_op.stack: (4xi64) <- ([xi64, xi64, xi64, xi64]) + stack_0 = paddle._C_ops.stack(combine_0, 0) + del combine_0 + + # pd_op.reshape: (-1x-1x4x17xf32) <- (2x-1x68xf32, 4xi64) + reshape_0 = paddle._C_ops.reshape(data_2, stack_0) + del data_2, stack_0 + + # pd_op.softmax: (-1x-1x4x17xf32) <- (-1x-1x4x17xf32) + softmax_0 = paddle._C_ops.softmax(reshape_0, -1) + del reshape_0 + + # pd_op.transpose: (-1x17x-1x4xf32) <- (-1x-1x4x17xf32) + transpose_0 = paddle._C_ops.transpose(softmax_0, [0, 3, 1, 2]) + + # pd_op.conv2d: (-1x1x-1x4xf32) <- (-1x17x-1x4xf32, 1x17x1x1xf32) + conv2d_0 = paddle._C_ops.conv2d( + transpose_0, parameter_0, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_0 + + # pd_op.squeeze: (-1x-1x4xf32) <- (-1x1x-1x4xf32, 1xi64) + squeeze_0 = paddle._C_ops.squeeze(conv2d_0, full_int_array_1) + del full_int_array_1 + + # pd_op.full: (1xi32) <- () + full_3 = paddle._C_ops.full( + [1], float("2"), paddle.int32, paddle.core.CPUPlace() + ) + + # pd_op.split_with_num: ([-1x-1x2xf32, -1x-1x2xf32]) <- (-1x-1x4xf32, 1xi32) + split_with_num_0 = paddle._C_ops.split_with_num(squeeze_0, 2, full_3) + del squeeze_0 + + # builtin.split: (-1x-1x2xf32, -1x-1x2xf32) <- ([-1x-1x2xf32, -1x-1x2xf32]) + ( + split_0, + split_1, + ) = split_with_num_0 + del split_with_num_0 + + # pd_op.full: (1xf32) <- () + full_4 = paddle._C_ops.full( + [1], float("-1"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.scale: (-1x-1x2xf32) <- (-1x-1x2xf32, 1xf32) + scale_0 = paddle._C_ops.scale(split_0, full_4, float("0"), True) + del split_0 + + # pd_op.add: (-1x-1x2xf32) <- (-1x-1x2xf32, -1x2xf32) + add_0 = paddle._C_ops.add(scale_0, divide_0) + + # pd_op.add: (-1x-1x2xf32) <- (-1x-1x2xf32, -1x2xf32) + add_1 = paddle._C_ops.add(split_1, divide_0) + + # pd_op.full: (1xi32) <- () + full_5 = paddle._C_ops.full( + [1], float("-1"), paddle.int32, paddle.core.CPUPlace() + ) + + # builtin.combine: ([-1x-1x2xf32, -1x-1x2xf32]) <- (-1x-1x2xf32, -1x-1x2xf32) + combine_1 = [add_0, add_1] + + # pd_op.concat: (-1x-1x4xf32) <- ([-1x-1x2xf32, -1x-1x2xf32], 1xi32) + concat_0 = paddle._C_ops.concat(combine_1, full_5) + del combine_1 + + # pd_op.full: (xi64) <- () + full_6 = paddle._C_ops.full( + [], float("-1"), paddle.int64, paddle.framework._current_expected_place() + ) + + # pd_op.less_than: (xb) <- (xi64, xi64) + less_than_0 = paddle._C_ops.less_than(data_0, full_6) + del data_0, full_6 + + # pd_op.cast: (xi64) <- (xb) + cast_0 = paddle._C_ops.cast(less_than_0, paddle.int64) + del less_than_0 + + # pd_op.full: (xi64) <- () + full_7 = paddle._C_ops.full( + [], float("0"), paddle.int64, paddle.framework._current_expected_place() + ) + + # pd_op.not_equal: (xb) <- (xi64, xi64) + not_equal_0 = paddle._C_ops.not_equal(cast_0, full_7) + del cast_0 + + # pd_op.cast: (xi64) <- (xb) + cast_1 = paddle._C_ops.cast(not_equal_0, paddle.int64) + del not_equal_0 + + # pd_op.equal: (xb) <- (xi64, xi64) + equal_0 = paddle._C_ops.equal(cast_1, full_7) + del cast_1, full_7 + + # pd_op.share_data_: (2x-1x4xf32) <- (2x-1x4xf32) + share_data__0 = data_1.detach() + del data_1 + + # pd_op.share_data_: (-1x-1x4xf32) <- (-1x-1x4xf32) + share_data__1 = concat_0.detach() + + # pd_op.multiply: (-1x-1x4xf32) <- (-1x-1x4xf32, -1x1xf32) + multiply_0 = paddle._C_ops.multiply(share_data__1, data_4) + del ( + add_0, + add_1, + assign_0, + concat_0, + conv2d_0, + data_4, + divide_0, + full_3, + full_4, + full_5, + scale_0, + share_data__1, + softmax_0, + split_1, + transpose_0, + ) + + return share_data__0, multiply_0 diff --git a/paddle_samples/PaddleX/PP-YOLOE-L_vehicle/subgraph_15/weight_meta.py b/paddle_samples/PaddleX/PP-YOLOE-L_vehicle/subgraph_15/weight_meta.py new file mode 100644 index 000000000..28198680e --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE-L_vehicle/subgraph_15/weight_meta.py @@ -0,0 +1,7 @@ +class Program_weight_tensor_parameter_0: + name = "parameter_0" + shape = [1, 17, 1, 1] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None diff --git a/paddle_samples/PaddleX/PP-YOLOE-L_vehicle/subgraph_16/graph_hash.txt b/paddle_samples/PaddleX/PP-YOLOE-L_vehicle/subgraph_16/graph_hash.txt new file mode 100644 index 000000000..463698320 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE-L_vehicle/subgraph_16/graph_hash.txt @@ -0,0 +1 @@ +611aaf40802ae728109fb4c99495540bcf765813a1a25b37c13dde4b4b8683ee \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE-L_vehicle/subgraph_16/graph_net.json b/paddle_samples/PaddleX/PP-YOLOE-L_vehicle/subgraph_16/graph_net.json new file mode 100644 index 000000000..4a2e26ae4 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE-L_vehicle/subgraph_16/graph_net.json @@ -0,0 +1,6 @@ +{ + "framework": "paddle", + "model_name": "PP-YOLOE-L_vehicle", + "num_devices_required": 1, + "num_nodes_required": 1 +} \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE-L_vehicle/subgraph_16/input_meta.py b/paddle_samples/PaddleX/PP-YOLOE-L_vehicle/subgraph_16/input_meta.py new file mode 100644 index 000000000..2064cb376 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE-L_vehicle/subgraph_16/input_meta.py @@ -0,0 +1,38 @@ +class Program_weight_tensor_data_0: + name = "data_0" + shape = [2, 8400, 4] + dtype = "float32" + min_val = float("0.72227") + max_val = float("10.3192") + mean = float("5.96735") + std = float("1.32539") + data = None + + +class Program_weight_tensor_data_1: + name = "data_1" + shape = [8400, 2] + dtype = "float32" + min_val = float("0.5") + max_val = float("79.5") + mean = float("34.7619") + std = float("22.9098") + data = None + + +class Program_weight_tensor_data_2: + name = "data_2" + shape = [8400, 1] + dtype = "float32" + min_val = float("8.0") + max_val = float("32.0") + mean = float("10.6667") + std = float("5.70157") + data = None + + +class Program_weight_tensor_data_3: + name = "data_3" + shape = [2, 2] + dtype = "float32" + data = [1.18519, 0.666667, 1.18519, 0.666667] diff --git a/paddle_samples/PaddleX/PP-YOLOE-L_vehicle/subgraph_16/model.py b/paddle_samples/PaddleX/PP-YOLOE-L_vehicle/subgraph_16/model.py new file mode 100644 index 000000000..561c0c35b --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE-L_vehicle/subgraph_16/model.py @@ -0,0 +1,94 @@ +import paddle + + +class GraphModule(paddle.nn.Layer): + def __init__(self): + super().__init__() + + def forward(self, data_0, data_1, data_2, data_3): + # pd_op.full: (1xi32) <- () + full_0 = paddle._C_ops.full( + [1], float("2"), paddle.int32, paddle.core.CPUPlace() + ) + + # pd_op.split_with_num: ([2x8400x2xf32, 2x8400x2xf32]) <- (2x8400x4xf32, 1xi32) + split_with_num_0 = paddle._C_ops.split_with_num(data_0, 2, full_0) + del data_0, full_0 + + # builtin.split: (2x8400x2xf32, 2x8400x2xf32) <- ([2x8400x2xf32, 2x8400x2xf32]) + ( + split_0, + split_1, + ) = split_with_num_0 + del split_with_num_0 + + # pd_op.full: (1xf32) <- () + full_1 = paddle._C_ops.full( + [1], float("-1"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.scale: (2x8400x2xf32) <- (2x8400x2xf32, 1xf32) + scale_0 = paddle._C_ops.scale(split_0, full_1, float("0"), True) + del full_1, split_0 + + # pd_op.add: (2x8400x2xf32) <- (2x8400x2xf32, 8400x2xf32) + add_0 = paddle._C_ops.add(scale_0, data_1) + del scale_0 + + # pd_op.add: (2x8400x2xf32) <- (2x8400x2xf32, 8400x2xf32) + add_1 = paddle._C_ops.add(split_1, data_1) + del data_1, split_1 + + # pd_op.full: (1xi32) <- () + full_2 = paddle._C_ops.full( + [1], float("-1"), paddle.int32, paddle.core.CPUPlace() + ) + + # builtin.combine: ([2x8400x2xf32, 2x8400x2xf32]) <- (2x8400x2xf32, 2x8400x2xf32) + combine_0 = [add_0, add_1] + del add_0, add_1 + + # pd_op.concat: (2x8400x4xf32) <- ([2x8400x2xf32, 2x8400x2xf32], 1xi32) + concat_0 = paddle._C_ops.concat(combine_0, full_2) + del combine_0 + + # pd_op.multiply: (2x8400x4xf32) <- (2x8400x4xf32, 8400x1xf32) + multiply_0 = paddle._C_ops.multiply(concat_0, data_2) + del concat_0, data_2 + + # pd_op.full: (1xi32) <- () + full_3 = paddle._C_ops.full( + [1], float("1"), paddle.int32, paddle.core.CPUPlace() + ) + + # pd_op.split_with_num: ([2x1xf32, 2x1xf32]) <- (2x2xf32, 1xi32) + split_with_num_1 = paddle._C_ops.split_with_num(data_3, 2, full_3) + del data_3, full_3 + + # builtin.split: (2x1xf32, 2x1xf32) <- ([2x1xf32, 2x1xf32]) + ( + split_2, + split_3, + ) = split_with_num_1 + del split_with_num_1 + + # builtin.combine: ([2x1xf32, 2x1xf32, 2x1xf32, 2x1xf32]) <- (2x1xf32, 2x1xf32, 2x1xf32, 2x1xf32) + combine_1 = [split_3, split_2, split_3, split_2] + del split_2, split_3 + + # pd_op.concat: (2x4xf32) <- ([2x1xf32, 2x1xf32, 2x1xf32, 2x1xf32], 1xi32) + concat_1 = paddle._C_ops.concat(combine_1, full_2) + del combine_1, full_2 + + # pd_op.full_int_array: (3xi64) <- () + full_int_array_0 = [-1, 1, 4] + + # pd_op.reshape: (2x1x4xf32) <- (2x4xf32, 3xi64) + reshape_0 = paddle._C_ops.reshape(concat_1, full_int_array_0) + del concat_1, full_int_array_0 + + # pd_op.divide: (2x8400x4xf32) <- (2x8400x4xf32, 2x1x4xf32) + divide_0 = paddle._C_ops.divide(multiply_0, reshape_0) + del multiply_0, reshape_0 + + return divide_0 diff --git a/paddle_samples/PaddleX/PP-YOLOE-L_vehicle/subgraph_16/weight_meta.py b/paddle_samples/PaddleX/PP-YOLOE-L_vehicle/subgraph_16/weight_meta.py new file mode 100644 index 000000000..8b1378917 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE-L_vehicle/subgraph_16/weight_meta.py @@ -0,0 +1 @@ + diff --git a/paddle_samples/PaddleX/PP-YOLOE-L_vehicle/subgraph_17/graph_hash.txt b/paddle_samples/PaddleX/PP-YOLOE-L_vehicle/subgraph_17/graph_hash.txt new file mode 100644 index 000000000..1fb84b936 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE-L_vehicle/subgraph_17/graph_hash.txt @@ -0,0 +1 @@ +7151a27ee13106ef0614d1a21e8ba0cf50c805a756c390f71a03d77fcff10b9f \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE-L_vehicle/subgraph_17/graph_net.json b/paddle_samples/PaddleX/PP-YOLOE-L_vehicle/subgraph_17/graph_net.json new file mode 100644 index 000000000..4a2e26ae4 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE-L_vehicle/subgraph_17/graph_net.json @@ -0,0 +1,6 @@ +{ + "framework": "paddle", + "model_name": "PP-YOLOE-L_vehicle", + "num_devices_required": 1, + "num_nodes_required": 1 +} \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE-L_vehicle/subgraph_17/input_meta.py b/paddle_samples/PaddleX/PP-YOLOE-L_vehicle/subgraph_17/input_meta.py new file mode 100644 index 000000000..685cf9586 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE-L_vehicle/subgraph_17/input_meta.py @@ -0,0 +1,63 @@ +class Program_weight_tensor_data_0: + name = "data_0" + shape = [2, 11, 5376] + dtype = "float32" + max_val = float("1.0") + mean = float("0.00186012") + std = float("0.043089") + data = None + + +class Program_weight_tensor_data_1: + name = "data_1" + shape = [2, 1] + dtype = "int32" + data = [0, 1] + + +class Program_weight_tensor_data_2: + name = "data_2" + shape = [2, 11, 1] + dtype = "int32" + data = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 3, 0, 0, 0, 0, 3] + + +class Program_weight_tensor_data_3: + name = "data_3" + shape = [2, 5376] + dtype = "float32" + max_val = float("1.0") + mean = float("0.0204613") + std = float("0.141572") + data = None + + +class Program_weight_tensor_data_4: + name = "data_4" + shape = [2, 11, 4] + dtype = "float32" + min_val = float("60.4005") + max_val = float("512.0") + mean = float("223.716") + std = float("117.023") + data = None + + +class Program_weight_tensor_data_5: + name = "data_5" + shape = [2, 11, 5376] + dtype = "float32" + max_val = float("0.00924298") + mean = float("2.15111e-05") + std = float("0.000307684") + data = None + + +class Program_weight_tensor_data_6: + name = "data_6" + shape = [2, 11, 5376] + dtype = "float32" + max_val = float("0.986966") + mean = float("0.00984969") + std = float("0.0727765") + data = None diff --git a/paddle_samples/PaddleX/PP-YOLOE-L_vehicle/subgraph_17/model.py b/paddle_samples/PaddleX/PP-YOLOE-L_vehicle/subgraph_17/model.py new file mode 100644 index 000000000..e2e902cde --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE-L_vehicle/subgraph_17/model.py @@ -0,0 +1,181 @@ +import paddle + + +class GraphModule(paddle.nn.Layer): + def __init__(self): + super().__init__() + + def forward(self, data_0, data_1, data_2, data_3, data_4, data_5, data_6): + # pd_op.full: (1xi64) <- () + full_0 = paddle._C_ops.full( + [1], float("-2"), paddle.int64, paddle.core.CPUPlace() + ) + + # pd_op.argmax: (2x5376xi64) <- (2x11x5376xf32, 1xi64) + argmax_0 = paddle._C_ops.argmax(data_0, full_0, False, False, paddle.int64) + del full_0 + + # pd_op.full: (1xf32) <- () + full_1 = paddle._C_ops.full( + [1], float("11"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.scale: (2x1xi32) <- (2x1xi32, 1xf32) + scale_0 = paddle._C_ops.scale(data_1, full_1, float("0"), True) + del data_1, full_1 + + # pd_op.cast: (2x1xi64) <- (2x1xi32) + cast_0 = paddle._C_ops.cast(scale_0, paddle.int64) + del scale_0 + + # pd_op.add: (2x5376xi64) <- (2x5376xi64, 2x1xi64) + add_0 = paddle._C_ops.add(argmax_0, cast_0) + del argmax_0, cast_0 + + # pd_op.flatten: (22xi32) <- (2x11x1xi32) + flatten_0 = paddle._C_ops.flatten(data_2, 0, 2) + del data_2 + + # pd_op.flatten: (10752xi64) <- (2x5376xi64) + flatten_1 = paddle._C_ops.flatten(add_0, 0, 1) + del add_0 + + # pd_op.full: (1xi32) <- () + full_2 = paddle._C_ops.full( + [1], float("0"), paddle.int32, paddle.core.CPUPlace() + ) + + # pd_op.gather: (10752xi32) <- (22xi32, 10752xi64, 1xi32) + gather_0 = paddle._C_ops.gather(flatten_0, flatten_1, full_2) + del flatten_0 + + # pd_op.full_int_array: (2xi64) <- () + full_int_array_0 = [2, 5376] + + # pd_op.reshape: (2x5376xi32) <- (10752xi32, 2xi64) + reshape_1 = paddle._C_ops.reshape(gather_0, full_int_array_0) + del full_int_array_0, gather_0 + + # pd_op.full: (xf32) <- () + full_3 = paddle._C_ops.full( + [], float("0"), paddle.float32, paddle.framework._current_expected_place() + ) + + # pd_op.greater_than: (2x5376xb) <- (2x5376xf32, xf32) + greater_than_0 = paddle._C_ops.greater_than(data_3, full_3) + del data_3, full_3 + + # pd_op.full: (1xf32) <- () + full_4 = paddle._C_ops.full( + [1], float("4"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.full_like: (2x5376xi32) <- (2x5376xi32, 1xf32) + full_like_0 = paddle._C_ops.full_like( + reshape_1, full_4, paddle.int32, paddle.framework._current_expected_place() + ) + del full_4 + + # pd_op.where: (2x5376xi32) <- (2x5376xb, 2x5376xi32, 2x5376xi32) + where_0 = paddle._C_ops.where(greater_than_0, reshape_1, full_like_0) + del full_like_0, greater_than_0, reshape_1 + + # pd_op.full_int_array: (2xi64) <- () + full_int_array_1 = [-1, 4] + + # pd_op.reshape: (22x4xf32) <- (2x11x4xf32, 2xi64) + reshape_2 = paddle._C_ops.reshape(data_4, full_int_array_1) + del data_4, full_int_array_1 + + # pd_op.gather: (10752x4xf32) <- (22x4xf32, 10752xi64, 1xi32) + gather_1 = paddle._C_ops.gather(reshape_2, flatten_1, full_2) + del flatten_1, full_2, reshape_2 + + # pd_op.full_int_array: (3xi64) <- () + full_int_array_2 = [2, 5376, 4] + + # pd_op.reshape: (2x5376x4xf32) <- (10752x4xf32, 3xi64) + reshape_0 = paddle._C_ops.reshape(gather_1, full_int_array_2) + del full_int_array_2, gather_1 + + # pd_op.full: (1xi32) <- () + full_5 = paddle._C_ops.full( + [1], float("5"), paddle.int32, paddle.core.CPUPlace() + ) + + # pd_op.one_hot: (2x5376x5xf32) <- (2x5376xi32, 1xi32) + one_hot_0 = paddle._C_ops.one_hot( + where_0 % paddle.cast(full_5, where_0.dtype), full_5 + ) + del full_5 + + # pd_op.full: (4xi64) <- () + full_6 = paddle._C_ops.full( + [4], float("0"), paddle.int64, paddle.framework._current_expected_place() + ) + + # pd_op.assign_value_: (4xi64) <- (4xi64) + assign_value__0 = paddle._C_ops.assign_value_( + full_6, + [4], + paddle.int64, + [float("0"), float("1"), float("2"), float("3")], + paddle.framework._current_expected_place(), + ) + del full_6 + + # pd_op.index_select: (2x5376x4xf32) <- (2x5376x5xf32, 4xi64) + index_select_0 = paddle._C_ops.index_select(one_hot_0, assign_value__0, -1) + del assign_value__0, one_hot_0 + + # pd_op.multiply: (2x11x5376xf32) <- (2x11x5376xf32, 2x11x5376xf32) + multiply_1 = paddle._C_ops.multiply(data_5, data_0) + del data_5 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_3 = [-1] + + # pd_op.max: (2x11x1xf32) <- (2x11x5376xf32, 1xi64) + max_0 = paddle._C_ops.max(multiply_1, full_int_array_3, True) + + # pd_op.multiply: (2x11x5376xf32) <- (2x11x5376xf32, 2x11x5376xf32) + multiply_2 = paddle._C_ops.multiply(data_6, data_0) + del data_0, data_6 + + # pd_op.max: (2x11x1xf32) <- (2x11x5376xf32, 1xi64) + max_1 = paddle._C_ops.max(multiply_2, full_int_array_3, True) + del multiply_2 + + # pd_op.full: (1xf32) <- () + full_7 = paddle._C_ops.full( + [1], float("1"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.scale: (2x11x1xf32) <- (2x11x1xf32, 1xf32) + scale_1 = paddle._C_ops.scale(max_0, full_7, float("1e-09"), True) + del full_7, max_0 + + # pd_op.divide: (2x11x5376xf32) <- (2x11x5376xf32, 2x11x1xf32) + divide_0 = paddle._C_ops.divide(multiply_1, scale_1) + del multiply_1, scale_1 + + # pd_op.multiply: (2x11x5376xf32) <- (2x11x5376xf32, 2x11x1xf32) + multiply_3 = paddle._C_ops.multiply(divide_0, max_1) + del divide_0, max_1 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_4 = [-2] + + # pd_op.max: (2x5376xf32) <- (2x11x5376xf32, 1xi64) + max_2 = paddle._C_ops.max(multiply_3, full_int_array_4, False) + del full_int_array_4, multiply_3 + + # pd_op.unsqueeze: (2x5376x1xf32) <- (2x5376xf32, 1xi64) + unsqueeze_0 = paddle._C_ops.unsqueeze(max_2, full_int_array_3) + del full_int_array_3, max_2 + + # pd_op.multiply: (2x5376x4xf32) <- (2x5376x4xf32, 2x5376x1xf32) + multiply_0 = paddle._C_ops.multiply(index_select_0, unsqueeze_0) + del index_select_0, unsqueeze_0, where_0 + + return reshape_0, multiply_0 diff --git a/paddle_samples/PaddleX/PP-YOLOE-L_vehicle/subgraph_17/weight_meta.py b/paddle_samples/PaddleX/PP-YOLOE-L_vehicle/subgraph_17/weight_meta.py new file mode 100644 index 000000000..8b1378917 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE-L_vehicle/subgraph_17/weight_meta.py @@ -0,0 +1 @@ + diff --git a/paddle_samples/PaddleX/PP-YOLOE-L_vehicle/subgraph_2/graph_hash.txt b/paddle_samples/PaddleX/PP-YOLOE-L_vehicle/subgraph_2/graph_hash.txt new file mode 100644 index 000000000..abcd385f1 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE-L_vehicle/subgraph_2/graph_hash.txt @@ -0,0 +1 @@ +eaac049364bed4d6c67fc3935e79acb80e63ba96c2283c6c4364c4ef58871728 \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE-L_vehicle/subgraph_2/graph_net.json b/paddle_samples/PaddleX/PP-YOLOE-L_vehicle/subgraph_2/graph_net.json new file mode 100644 index 000000000..4a2e26ae4 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE-L_vehicle/subgraph_2/graph_net.json @@ -0,0 +1,6 @@ +{ + "framework": "paddle", + "model_name": "PP-YOLOE-L_vehicle", + "num_devices_required": 1, + "num_nodes_required": 1 +} \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE-L_vehicle/subgraph_2/input_meta.py b/paddle_samples/PaddleX/PP-YOLOE-L_vehicle/subgraph_2/input_meta.py new file mode 100644 index 000000000..ec4e87e7b --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE-L_vehicle/subgraph_2/input_meta.py @@ -0,0 +1,9 @@ +class Program_weight_tensor_data_0: + name = "data_0" + shape = [2, 3, 512, 512] + dtype = "float32" + min_val = float("-1.9517") + max_val = float("2.64") + mean = float("-0.133605") + std = float("0.697919") + data = None diff --git a/paddle_samples/PaddleX/PP-YOLOE-L_vehicle/subgraph_2/model.py b/paddle_samples/PaddleX/PP-YOLOE-L_vehicle/subgraph_2/model.py new file mode 100644 index 000000000..2a640202f --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE-L_vehicle/subgraph_2/model.py @@ -0,0 +1,7396 @@ +import paddle + + +class GraphModule(paddle.nn.Layer): + def __init__(self): + super().__init__() + + def forward( + self, + parameter_0, + parameter_1, + parameter_2, + parameter_3, + parameter_4, + parameter_5, + parameter_6, + parameter_7, + parameter_8, + parameter_9, + parameter_10, + parameter_11, + parameter_12, + parameter_13, + parameter_14, + parameter_15, + parameter_16, + parameter_17, + parameter_18, + parameter_19, + parameter_20, + parameter_21, + parameter_22, + parameter_23, + parameter_24, + parameter_25, + parameter_26, + parameter_27, + parameter_28, + parameter_29, + parameter_30, + parameter_31, + parameter_32, + parameter_33, + parameter_34, + parameter_35, + parameter_36, + parameter_37, + parameter_38, + parameter_39, + parameter_40, + parameter_41, + parameter_42, + parameter_43, + parameter_44, + parameter_45, + parameter_46, + parameter_47, + parameter_48, + parameter_49, + parameter_50, + parameter_51, + parameter_52, + parameter_53, + parameter_54, + parameter_55, + parameter_56, + parameter_57, + parameter_58, + parameter_59, + parameter_60, + parameter_61, + parameter_62, + parameter_63, + parameter_64, + parameter_65, + parameter_66, + parameter_67, + parameter_68, + parameter_69, + parameter_70, + parameter_71, + parameter_72, + parameter_73, + parameter_74, + parameter_75, + parameter_76, + parameter_77, + parameter_78, + parameter_79, + parameter_80, + parameter_81, + parameter_82, + parameter_83, + parameter_84, + parameter_85, + parameter_86, + parameter_87, + parameter_88, + parameter_89, + parameter_90, + parameter_91, + parameter_92, + parameter_93, + parameter_94, + parameter_95, + parameter_96, + parameter_97, + parameter_98, + parameter_99, + parameter_100, + parameter_101, + parameter_102, + parameter_103, + parameter_104, + parameter_105, + parameter_106, + parameter_107, + parameter_108, + parameter_109, + parameter_110, + parameter_111, + parameter_112, + parameter_113, + parameter_114, + parameter_115, + parameter_116, + parameter_117, + parameter_118, + parameter_119, + parameter_120, + parameter_121, + parameter_122, + parameter_123, + parameter_124, + parameter_125, + parameter_126, + parameter_127, + parameter_128, + parameter_129, + parameter_130, + parameter_131, + parameter_132, + parameter_133, + parameter_134, + parameter_135, + parameter_136, + parameter_137, + parameter_138, + parameter_139, + parameter_140, + parameter_141, + parameter_142, + parameter_143, + parameter_144, + parameter_145, + parameter_146, + parameter_147, + parameter_148, + parameter_149, + parameter_150, + parameter_151, + parameter_152, + parameter_153, + parameter_154, + parameter_155, + parameter_156, + parameter_157, + parameter_158, + parameter_159, + parameter_160, + parameter_161, + parameter_162, + parameter_163, + parameter_164, + parameter_165, + parameter_166, + parameter_167, + parameter_168, + parameter_169, + parameter_170, + parameter_171, + parameter_172, + parameter_173, + parameter_174, + parameter_175, + parameter_176, + parameter_177, + parameter_178, + parameter_179, + parameter_180, + parameter_181, + parameter_182, + parameter_183, + parameter_184, + parameter_185, + parameter_186, + parameter_187, + parameter_188, + parameter_189, + parameter_190, + parameter_191, + parameter_192, + parameter_193, + parameter_194, + parameter_195, + parameter_196, + parameter_197, + parameter_198, + parameter_199, + parameter_200, + parameter_201, + parameter_202, + parameter_203, + parameter_204, + parameter_205, + parameter_206, + parameter_207, + parameter_208, + parameter_209, + parameter_210, + parameter_211, + parameter_212, + parameter_213, + parameter_214, + parameter_215, + parameter_216, + parameter_217, + parameter_218, + parameter_219, + parameter_220, + parameter_221, + parameter_222, + parameter_223, + parameter_224, + parameter_225, + parameter_226, + parameter_227, + parameter_228, + parameter_229, + parameter_230, + parameter_231, + parameter_232, + parameter_233, + parameter_234, + parameter_235, + parameter_236, + parameter_237, + parameter_238, + parameter_239, + parameter_240, + parameter_241, + parameter_242, + parameter_243, + parameter_244, + parameter_245, + parameter_246, + parameter_247, + parameter_248, + parameter_249, + parameter_250, + parameter_251, + parameter_252, + parameter_253, + parameter_254, + parameter_255, + parameter_256, + parameter_257, + parameter_258, + parameter_259, + parameter_260, + parameter_261, + parameter_262, + parameter_263, + parameter_264, + parameter_265, + parameter_266, + parameter_267, + parameter_268, + parameter_269, + parameter_270, + parameter_271, + parameter_272, + parameter_273, + parameter_274, + parameter_275, + parameter_276, + parameter_277, + parameter_278, + parameter_279, + parameter_280, + parameter_281, + parameter_282, + parameter_283, + parameter_284, + parameter_285, + parameter_286, + parameter_287, + parameter_288, + parameter_289, + parameter_290, + parameter_291, + parameter_292, + parameter_293, + parameter_294, + parameter_295, + parameter_296, + parameter_297, + parameter_298, + parameter_299, + parameter_300, + parameter_301, + parameter_302, + parameter_303, + parameter_304, + parameter_305, + parameter_306, + parameter_307, + parameter_308, + parameter_309, + parameter_310, + parameter_311, + parameter_312, + parameter_313, + parameter_314, + parameter_315, + parameter_316, + parameter_317, + parameter_318, + parameter_319, + parameter_320, + parameter_321, + parameter_322, + parameter_323, + parameter_324, + parameter_325, + parameter_326, + parameter_327, + parameter_328, + parameter_329, + parameter_330, + parameter_331, + parameter_332, + parameter_333, + parameter_334, + parameter_335, + parameter_336, + parameter_337, + parameter_338, + parameter_339, + parameter_340, + parameter_341, + parameter_342, + parameter_343, + parameter_344, + parameter_345, + parameter_346, + parameter_347, + parameter_348, + parameter_349, + parameter_350, + parameter_351, + parameter_352, + parameter_353, + parameter_354, + parameter_355, + parameter_356, + parameter_357, + parameter_358, + parameter_359, + parameter_360, + parameter_361, + parameter_362, + parameter_363, + parameter_364, + parameter_365, + parameter_366, + parameter_367, + parameter_368, + parameter_369, + parameter_370, + parameter_371, + parameter_372, + parameter_373, + parameter_374, + parameter_375, + parameter_376, + parameter_377, + parameter_378, + parameter_379, + parameter_380, + parameter_381, + parameter_382, + parameter_383, + parameter_384, + parameter_385, + parameter_386, + parameter_387, + parameter_388, + parameter_389, + parameter_390, + parameter_391, + parameter_392, + parameter_393, + parameter_394, + parameter_395, + parameter_396, + parameter_397, + parameter_398, + parameter_399, + parameter_400, + parameter_401, + parameter_402, + parameter_403, + parameter_404, + parameter_405, + parameter_406, + parameter_407, + parameter_408, + parameter_409, + parameter_410, + parameter_411, + parameter_412, + parameter_413, + parameter_414, + parameter_415, + parameter_416, + parameter_417, + parameter_418, + parameter_419, + parameter_420, + parameter_421, + parameter_422, + parameter_423, + parameter_424, + parameter_425, + parameter_426, + parameter_427, + parameter_428, + parameter_429, + parameter_430, + parameter_431, + parameter_432, + parameter_433, + parameter_434, + parameter_435, + parameter_436, + parameter_437, + parameter_438, + parameter_439, + parameter_440, + parameter_441, + parameter_442, + parameter_443, + parameter_444, + parameter_445, + parameter_446, + parameter_447, + parameter_448, + parameter_449, + parameter_450, + parameter_451, + parameter_452, + parameter_453, + parameter_454, + parameter_455, + parameter_456, + parameter_457, + parameter_458, + parameter_459, + parameter_460, + parameter_461, + parameter_462, + parameter_463, + parameter_464, + parameter_465, + parameter_466, + parameter_467, + parameter_468, + parameter_469, + parameter_470, + parameter_471, + parameter_472, + parameter_473, + parameter_474, + parameter_475, + parameter_476, + parameter_477, + parameter_478, + parameter_479, + parameter_480, + parameter_481, + parameter_482, + parameter_483, + parameter_484, + parameter_485, + parameter_486, + parameter_487, + parameter_488, + parameter_489, + parameter_490, + parameter_491, + parameter_492, + parameter_493, + parameter_494, + parameter_495, + parameter_496, + parameter_497, + parameter_498, + parameter_499, + parameter_500, + parameter_501, + parameter_502, + parameter_503, + parameter_504, + parameter_505, + parameter_506, + parameter_507, + parameter_508, + parameter_509, + parameter_510, + parameter_511, + parameter_512, + parameter_513, + parameter_514, + parameter_515, + parameter_516, + parameter_517, + parameter_518, + parameter_519, + parameter_520, + parameter_521, + parameter_522, + parameter_523, + parameter_524, + parameter_525, + parameter_526, + parameter_527, + parameter_528, + parameter_529, + parameter_530, + parameter_531, + parameter_532, + parameter_533, + parameter_534, + parameter_535, + parameter_536, + parameter_537, + parameter_538, + parameter_539, + parameter_540, + parameter_541, + parameter_542, + parameter_543, + parameter_544, + parameter_545, + parameter_546, + parameter_547, + parameter_548, + parameter_549, + parameter_550, + parameter_551, + parameter_552, + parameter_553, + parameter_554, + parameter_555, + parameter_556, + parameter_557, + parameter_558, + parameter_559, + parameter_560, + parameter_561, + parameter_562, + parameter_563, + parameter_564, + parameter_565, + parameter_566, + parameter_567, + parameter_568, + parameter_569, + parameter_570, + parameter_571, + parameter_572, + parameter_573, + parameter_574, + parameter_575, + parameter_576, + parameter_577, + parameter_578, + parameter_579, + parameter_580, + parameter_581, + parameter_582, + parameter_583, + parameter_584, + parameter_585, + parameter_586, + parameter_587, + parameter_588, + parameter_589, + parameter_590, + parameter_591, + parameter_592, + parameter_593, + parameter_594, + parameter_595, + parameter_596, + parameter_597, + parameter_598, + parameter_599, + parameter_600, + parameter_601, + parameter_602, + parameter_603, + parameter_604, + parameter_605, + parameter_606, + parameter_607, + parameter_608, + parameter_609, + parameter_610, + parameter_611, + parameter_612, + parameter_613, + parameter_614, + parameter_615, + parameter_616, + parameter_617, + parameter_618, + parameter_619, + parameter_620, + parameter_621, + parameter_622, + parameter_623, + parameter_624, + parameter_625, + parameter_626, + parameter_627, + parameter_628, + parameter_629, + parameter_630, + parameter_631, + parameter_632, + parameter_633, + parameter_634, + parameter_635, + parameter_636, + parameter_637, + parameter_638, + parameter_639, + parameter_640, + parameter_641, + parameter_642, + parameter_643, + parameter_644, + parameter_645, + parameter_646, + parameter_647, + parameter_648, + parameter_649, + parameter_650, + parameter_651, + parameter_652, + parameter_653, + parameter_654, + parameter_655, + parameter_656, + parameter_657, + parameter_658, + parameter_659, + parameter_660, + parameter_661, + parameter_662, + parameter_663, + parameter_664, + parameter_665, + parameter_666, + parameter_667, + parameter_668, + parameter_669, + parameter_670, + parameter_671, + parameter_672, + parameter_673, + parameter_674, + parameter_675, + parameter_676, + parameter_677, + parameter_678, + parameter_679, + parameter_680, + parameter_681, + parameter_682, + parameter_683, + parameter_684, + parameter_685, + parameter_686, + parameter_687, + parameter_688, + parameter_689, + parameter_690, + parameter_691, + parameter_692, + parameter_693, + parameter_694, + parameter_695, + parameter_696, + parameter_697, + data_0, + ): + # pd_op.conv2d: (2x32x256x256xf32) <- (2x3x512x512xf32, 32x3x3x3xf32) + conv2d_0 = paddle._C_ops.conv2d( + data_0, parameter_697, [2, 2], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del data_0, parameter_697 + + # pd_op.batch_norm_: (2x32x256x256xf32, 32xf32, 32xf32, 32xf32, 32xf32, -1xui8) <- (2x32x256x256xf32, 32xf32, 32xf32, 32xf32, 32xf32) + ( + batch_norm__0, + batch_norm__1, + batch_norm__2, + batch_norm__3, + batch_norm__4, + batch_norm__5, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_0, + parameter_696, + parameter_695, + parameter_694, + parameter_693, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_693, parameter_694, parameter_695, parameter_696 + + # pd_op.swish: (2x32x256x256xf32) <- (2x32x256x256xf32) + swish_1 = paddle._C_ops.swish(batch_norm__0) + + # pd_op.conv2d: (2x32x256x256xf32) <- (2x32x256x256xf32, 32x32x3x3xf32) + conv2d_1 = paddle._C_ops.conv2d( + swish_1, parameter_692, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_692 + + # pd_op.batch_norm_: (2x32x256x256xf32, 32xf32, 32xf32, 32xf32, 32xf32, -1xui8) <- (2x32x256x256xf32, 32xf32, 32xf32, 32xf32, 32xf32) + ( + batch_norm__6, + batch_norm__7, + batch_norm__8, + batch_norm__9, + batch_norm__10, + batch_norm__11, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_1, + parameter_691, + parameter_690, + parameter_689, + parameter_688, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_688, parameter_689, parameter_690, parameter_691 + + # pd_op.swish: (2x32x256x256xf32) <- (2x32x256x256xf32) + swish_2 = paddle._C_ops.swish(batch_norm__6) + + # pd_op.conv2d: (2x64x256x256xf32) <- (2x32x256x256xf32, 64x32x3x3xf32) + conv2d_2 = paddle._C_ops.conv2d( + swish_2, parameter_687, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_687 + + # pd_op.batch_norm_: (2x64x256x256xf32, 64xf32, 64xf32, 64xf32, 64xf32, -1xui8) <- (2x64x256x256xf32, 64xf32, 64xf32, 64xf32, 64xf32) + ( + batch_norm__12, + batch_norm__13, + batch_norm__14, + batch_norm__15, + batch_norm__16, + batch_norm__17, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_2, + parameter_686, + parameter_685, + parameter_684, + parameter_683, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_683, parameter_684, parameter_685, parameter_686 + + # pd_op.swish: (2x64x256x256xf32) <- (2x64x256x256xf32) + swish_3 = paddle._C_ops.swish(batch_norm__12) + + # pd_op.conv2d: (2x96x128x128xf32) <- (2x64x256x256xf32, 96x64x3x3xf32) + conv2d_3 = paddle._C_ops.conv2d( + swish_3, parameter_682, [2, 2], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_682 + + # pd_op.batch_norm_: (2x96x128x128xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x128x128xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__18, + batch_norm__19, + batch_norm__20, + batch_norm__21, + batch_norm__22, + batch_norm__23, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_3, + parameter_681, + parameter_680, + parameter_679, + parameter_678, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_678, parameter_679, parameter_680, parameter_681 + + # pd_op.swish: (2x96x128x128xf32) <- (2x96x128x128xf32) + swish_4 = paddle._C_ops.swish(batch_norm__18) + + # pd_op.conv2d: (2x48x128x128xf32) <- (2x96x128x128xf32, 48x96x1x1xf32) + conv2d_4 = paddle._C_ops.conv2d( + swish_4, parameter_677, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_677 + + # pd_op.batch_norm_: (2x48x128x128xf32, 48xf32, 48xf32, 48xf32, 48xf32, -1xui8) <- (2x48x128x128xf32, 48xf32, 48xf32, 48xf32, 48xf32) + ( + batch_norm__24, + batch_norm__25, + batch_norm__26, + batch_norm__27, + batch_norm__28, + batch_norm__29, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_4, + parameter_676, + parameter_675, + parameter_674, + parameter_673, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_673, parameter_674, parameter_675, parameter_676 + + # pd_op.swish: (2x48x128x128xf32) <- (2x48x128x128xf32) + swish_5 = paddle._C_ops.swish(batch_norm__24) + + # pd_op.conv2d: (2x48x128x128xf32) <- (2x96x128x128xf32, 48x96x1x1xf32) + conv2d_5 = paddle._C_ops.conv2d( + swish_4, parameter_672, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_672 + + # pd_op.batch_norm_: (2x48x128x128xf32, 48xf32, 48xf32, 48xf32, 48xf32, -1xui8) <- (2x48x128x128xf32, 48xf32, 48xf32, 48xf32, 48xf32) + ( + batch_norm__30, + batch_norm__31, + batch_norm__32, + batch_norm__33, + batch_norm__34, + batch_norm__35, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_5, + parameter_671, + parameter_670, + parameter_669, + parameter_668, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_668, parameter_669, parameter_670, parameter_671 + + # pd_op.swish: (2x48x128x128xf32) <- (2x48x128x128xf32) + swish_6 = paddle._C_ops.swish(batch_norm__30) + + # pd_op.conv2d: (2x48x128x128xf32) <- (2x48x128x128xf32, 48x48x3x3xf32) + conv2d_6 = paddle._C_ops.conv2d( + swish_6, parameter_667, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_667 + + # pd_op.batch_norm_: (2x48x128x128xf32, 48xf32, 48xf32, 48xf32, 48xf32, -1xui8) <- (2x48x128x128xf32, 48xf32, 48xf32, 48xf32, 48xf32) + ( + batch_norm__36, + batch_norm__37, + batch_norm__38, + batch_norm__39, + batch_norm__40, + batch_norm__41, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_6, + parameter_666, + parameter_665, + parameter_664, + parameter_663, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_663, parameter_664, parameter_665, parameter_666 + + # pd_op.swish: (2x48x128x128xf32) <- (2x48x128x128xf32) + swish_7 = paddle._C_ops.swish(batch_norm__36) + + # pd_op.conv2d: (2x48x128x128xf32) <- (2x48x128x128xf32, 48x48x3x3xf32) + conv2d_7 = paddle._C_ops.conv2d( + swish_7, parameter_662, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_662 + + # pd_op.batch_norm_: (2x48x128x128xf32, 48xf32, 48xf32, 48xf32, 48xf32, -1xui8) <- (2x48x128x128xf32, 48xf32, 48xf32, 48xf32, 48xf32) + ( + batch_norm__42, + batch_norm__43, + batch_norm__44, + batch_norm__45, + batch_norm__46, + batch_norm__47, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_7, + parameter_661, + parameter_660, + parameter_659, + parameter_658, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_658, parameter_659, parameter_660, parameter_661 + + # pd_op.conv2d: (2x48x128x128xf32) <- (2x48x128x128xf32, 48x48x1x1xf32) + conv2d_8 = paddle._C_ops.conv2d( + swish_7, parameter_657, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_657 + + # pd_op.batch_norm_: (2x48x128x128xf32, 48xf32, 48xf32, 48xf32, 48xf32, -1xui8) <- (2x48x128x128xf32, 48xf32, 48xf32, 48xf32, 48xf32) + ( + batch_norm__48, + batch_norm__49, + batch_norm__50, + batch_norm__51, + batch_norm__52, + batch_norm__53, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_8, + parameter_656, + parameter_655, + parameter_654, + parameter_653, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_653, parameter_654, parameter_655, parameter_656 + + # pd_op.add: (2x48x128x128xf32) <- (2x48x128x128xf32, 2x48x128x128xf32) + add_0 = paddle._C_ops.add(batch_norm__42, batch_norm__48) + + # pd_op.swish: (2x48x128x128xf32) <- (2x48x128x128xf32) + swish_8 = paddle._C_ops.swish(add_0) + + # pd_op.add: (2x48x128x128xf32) <- (2x48x128x128xf32, 2x48x128x128xf32) + add_1 = paddle._C_ops.add(swish_6, swish_8) + + # pd_op.conv2d: (2x48x128x128xf32) <- (2x48x128x128xf32, 48x48x3x3xf32) + conv2d_9 = paddle._C_ops.conv2d( + add_1, parameter_652, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_652 + + # pd_op.batch_norm_: (2x48x128x128xf32, 48xf32, 48xf32, 48xf32, 48xf32, -1xui8) <- (2x48x128x128xf32, 48xf32, 48xf32, 48xf32, 48xf32) + ( + batch_norm__54, + batch_norm__55, + batch_norm__56, + batch_norm__57, + batch_norm__58, + batch_norm__59, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_9, + parameter_651, + parameter_650, + parameter_649, + parameter_648, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_648, parameter_649, parameter_650, parameter_651 + + # pd_op.swish: (2x48x128x128xf32) <- (2x48x128x128xf32) + swish_9 = paddle._C_ops.swish(batch_norm__54) + + # pd_op.conv2d: (2x48x128x128xf32) <- (2x48x128x128xf32, 48x48x3x3xf32) + conv2d_10 = paddle._C_ops.conv2d( + swish_9, parameter_647, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_647 + + # pd_op.batch_norm_: (2x48x128x128xf32, 48xf32, 48xf32, 48xf32, 48xf32, -1xui8) <- (2x48x128x128xf32, 48xf32, 48xf32, 48xf32, 48xf32) + ( + batch_norm__60, + batch_norm__61, + batch_norm__62, + batch_norm__63, + batch_norm__64, + batch_norm__65, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_10, + parameter_646, + parameter_645, + parameter_644, + parameter_643, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_643, parameter_644, parameter_645, parameter_646 + + # pd_op.conv2d: (2x48x128x128xf32) <- (2x48x128x128xf32, 48x48x1x1xf32) + conv2d_11 = paddle._C_ops.conv2d( + swish_9, parameter_642, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_642 + + # pd_op.batch_norm_: (2x48x128x128xf32, 48xf32, 48xf32, 48xf32, 48xf32, -1xui8) <- (2x48x128x128xf32, 48xf32, 48xf32, 48xf32, 48xf32) + ( + batch_norm__66, + batch_norm__67, + batch_norm__68, + batch_norm__69, + batch_norm__70, + batch_norm__71, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_11, + parameter_641, + parameter_640, + parameter_639, + parameter_638, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_638, parameter_639, parameter_640, parameter_641 + + # pd_op.add: (2x48x128x128xf32) <- (2x48x128x128xf32, 2x48x128x128xf32) + add_2 = paddle._C_ops.add(batch_norm__60, batch_norm__66) + + # pd_op.swish: (2x48x128x128xf32) <- (2x48x128x128xf32) + swish_10 = paddle._C_ops.swish(add_2) + + # pd_op.add: (2x48x128x128xf32) <- (2x48x128x128xf32, 2x48x128x128xf32) + add_3 = paddle._C_ops.add(add_1, swish_10) + + # pd_op.conv2d: (2x48x128x128xf32) <- (2x48x128x128xf32, 48x48x3x3xf32) + conv2d_12 = paddle._C_ops.conv2d( + add_3, parameter_637, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_637 + + # pd_op.batch_norm_: (2x48x128x128xf32, 48xf32, 48xf32, 48xf32, 48xf32, -1xui8) <- (2x48x128x128xf32, 48xf32, 48xf32, 48xf32, 48xf32) + ( + batch_norm__72, + batch_norm__73, + batch_norm__74, + batch_norm__75, + batch_norm__76, + batch_norm__77, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_12, + parameter_636, + parameter_635, + parameter_634, + parameter_633, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_633, parameter_634, parameter_635, parameter_636 + + # pd_op.swish: (2x48x128x128xf32) <- (2x48x128x128xf32) + swish_11 = paddle._C_ops.swish(batch_norm__72) + + # pd_op.conv2d: (2x48x128x128xf32) <- (2x48x128x128xf32, 48x48x3x3xf32) + conv2d_13 = paddle._C_ops.conv2d( + swish_11, parameter_632, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_632 + + # pd_op.batch_norm_: (2x48x128x128xf32, 48xf32, 48xf32, 48xf32, 48xf32, -1xui8) <- (2x48x128x128xf32, 48xf32, 48xf32, 48xf32, 48xf32) + ( + batch_norm__78, + batch_norm__79, + batch_norm__80, + batch_norm__81, + batch_norm__82, + batch_norm__83, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_13, + parameter_631, + parameter_630, + parameter_629, + parameter_628, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_628, parameter_629, parameter_630, parameter_631 + + # pd_op.conv2d: (2x48x128x128xf32) <- (2x48x128x128xf32, 48x48x1x1xf32) + conv2d_14 = paddle._C_ops.conv2d( + swish_11, parameter_627, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_627 + + # pd_op.batch_norm_: (2x48x128x128xf32, 48xf32, 48xf32, 48xf32, 48xf32, -1xui8) <- (2x48x128x128xf32, 48xf32, 48xf32, 48xf32, 48xf32) + ( + batch_norm__84, + batch_norm__85, + batch_norm__86, + batch_norm__87, + batch_norm__88, + batch_norm__89, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_14, + parameter_626, + parameter_625, + parameter_624, + parameter_623, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_623, parameter_624, parameter_625, parameter_626 + + # pd_op.add: (2x48x128x128xf32) <- (2x48x128x128xf32, 2x48x128x128xf32) + add_4 = paddle._C_ops.add(batch_norm__78, batch_norm__84) + + # pd_op.swish: (2x48x128x128xf32) <- (2x48x128x128xf32) + swish_12 = paddle._C_ops.swish(add_4) + + # pd_op.add: (2x48x128x128xf32) <- (2x48x128x128xf32, 2x48x128x128xf32) + add_5 = paddle._C_ops.add(add_3, swish_12) + + # pd_op.full: (1xi32) <- () + full_0 = paddle._C_ops.full( + [1], float("1"), paddle.int32, paddle.core.CPUPlace() + ) + + # pd_op.assign: (1xi32) <- (1xi32) + assign_0 = full_0 + + # pd_op.assign: (1xi32) <- (1xi32) + assign_1 = full_0 + + # pd_op.assign: (1xi32) <- (1xi32) + assign_2 = full_0 + + # pd_op.assign: (1xi32) <- (1xi32) + assign_3 = full_0 + + # pd_op.assign: (1xi32) <- (1xi32) + assign_4 = full_0 + + # pd_op.assign: (1xi32) <- (1xi32) + assign_5 = full_0 + + # pd_op.assign: (1xi32) <- (1xi32) + assign_6 = full_0 + + # pd_op.assign: (1xi32) <- (1xi32) + assign_7 = full_0 + + # pd_op.assign: (1xi32) <- (1xi32) + assign_8 = full_0 + + # pd_op.assign: (1xi32) <- (1xi32) + assign_9 = full_0 + + # pd_op.assign: (1xi32) <- (1xi32) + assign_10 = full_0 + + # pd_op.assign: (1xi32) <- (1xi32) + assign_11 = full_0 + + # pd_op.assign: (1xi32) <- (1xi32) + assign_12 = full_0 + + # builtin.combine: ([2x48x128x128xf32, 2x48x128x128xf32]) <- (2x48x128x128xf32, 2x48x128x128xf32) + combine_0 = [swish_5, add_5] + + # pd_op.concat: (2x96x128x128xf32) <- ([2x48x128x128xf32, 2x48x128x128xf32], 1xi32) + concat_0 = paddle._C_ops.concat(combine_0, full_0) + del combine_0 + + # pd_op.full_int_array: (2xi64) <- () + full_int_array_0 = [2, 3] + + # pd_op.assign: (2xi64) <- (2xi64) + assign_13 = full_int_array_0 + + # pd_op.assign: (2xi64) <- (2xi64) + assign_14 = full_int_array_0 + + # pd_op.assign: (2xi64) <- (2xi64) + assign_15 = full_int_array_0 + + # pd_op.mean: (2x96x1x1xf32) <- (2x96x128x128xf32, 2xi64) + mean_0 = paddle._C_ops.mean(concat_0, full_int_array_0, True) + + # pd_op.conv2d: (2x96x1x1xf32) <- (2x96x1x1xf32, 96x96x1x1xf32) + conv2d_15 = paddle._C_ops.conv2d( + mean_0, parameter_622, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_622 + + # pd_op.full_int_array: (4xi64) <- () + full_int_array_1 = [1, -1, 1, 1] + + # pd_op.reshape: (1x96x1x1xf32) <- (96xf32, 4xi64) + reshape_0 = paddle._C_ops.reshape(parameter_621, full_int_array_1) + del parameter_621 + + # pd_op.add: (2x96x1x1xf32) <- (2x96x1x1xf32, 1x96x1x1xf32) + add_6 = paddle._C_ops.add(conv2d_15, reshape_0) + + # pd_op.hardsigmoid: (2x96x1x1xf32) <- (2x96x1x1xf32) + hardsigmoid_0 = paddle._C_ops.hardsigmoid( + add_6, float("0.166667"), float("0.5") + ) + del add_6 + + # pd_op.multiply: (2x96x128x128xf32) <- (2x96x128x128xf32, 2x96x1x1xf32) + multiply_0 = paddle._C_ops.multiply(concat_0, hardsigmoid_0) + + # pd_op.conv2d: (2x128x128x128xf32) <- (2x96x128x128xf32, 128x96x1x1xf32) + conv2d_16 = paddle._C_ops.conv2d( + multiply_0, parameter_620, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_620 + + # pd_op.batch_norm_: (2x128x128x128xf32, 128xf32, 128xf32, 128xf32, 128xf32, -1xui8) <- (2x128x128x128xf32, 128xf32, 128xf32, 128xf32, 128xf32) + ( + batch_norm__90, + batch_norm__91, + batch_norm__92, + batch_norm__93, + batch_norm__94, + batch_norm__95, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_16, + parameter_619, + parameter_618, + parameter_617, + parameter_616, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_616, parameter_617, parameter_618, parameter_619 + + # pd_op.swish: (2x128x128x128xf32) <- (2x128x128x128xf32) + swish_13 = paddle._C_ops.swish(batch_norm__90) + + # pd_op.conv2d: (2x192x64x64xf32) <- (2x128x128x128xf32, 192x128x3x3xf32) + conv2d_17 = paddle._C_ops.conv2d( + swish_13, parameter_615, [2, 2], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_615 + + # pd_op.batch_norm_: (2x192x64x64xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x64x64xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__96, + batch_norm__97, + batch_norm__98, + batch_norm__99, + batch_norm__100, + batch_norm__101, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_17, + parameter_614, + parameter_613, + parameter_612, + parameter_611, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_611, parameter_612, parameter_613, parameter_614 + + # pd_op.swish: (2x192x64x64xf32) <- (2x192x64x64xf32) + swish_14 = paddle._C_ops.swish(batch_norm__96) + + # pd_op.conv2d: (2x96x64x64xf32) <- (2x192x64x64xf32, 96x192x1x1xf32) + conv2d_18 = paddle._C_ops.conv2d( + swish_14, parameter_610, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_610 + + # pd_op.batch_norm_: (2x96x64x64xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x64x64xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__102, + batch_norm__103, + batch_norm__104, + batch_norm__105, + batch_norm__106, + batch_norm__107, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_18, + parameter_609, + parameter_608, + parameter_607, + parameter_606, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_606, parameter_607, parameter_608, parameter_609 + + # pd_op.swish: (2x96x64x64xf32) <- (2x96x64x64xf32) + swish_15 = paddle._C_ops.swish(batch_norm__102) + + # pd_op.conv2d: (2x96x64x64xf32) <- (2x192x64x64xf32, 96x192x1x1xf32) + conv2d_19 = paddle._C_ops.conv2d( + swish_14, parameter_605, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_605 + + # pd_op.batch_norm_: (2x96x64x64xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x64x64xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__108, + batch_norm__109, + batch_norm__110, + batch_norm__111, + batch_norm__112, + batch_norm__113, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_19, + parameter_604, + parameter_603, + parameter_602, + parameter_601, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_601, parameter_602, parameter_603, parameter_604 + + # pd_op.swish: (2x96x64x64xf32) <- (2x96x64x64xf32) + swish_16 = paddle._C_ops.swish(batch_norm__108) + + # pd_op.conv2d: (2x96x64x64xf32) <- (2x96x64x64xf32, 96x96x3x3xf32) + conv2d_20 = paddle._C_ops.conv2d( + swish_16, parameter_600, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_600 + + # pd_op.batch_norm_: (2x96x64x64xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x64x64xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__114, + batch_norm__115, + batch_norm__116, + batch_norm__117, + batch_norm__118, + batch_norm__119, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_20, + parameter_599, + parameter_598, + parameter_597, + parameter_596, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_596, parameter_597, parameter_598, parameter_599 + + # pd_op.swish: (2x96x64x64xf32) <- (2x96x64x64xf32) + swish_17 = paddle._C_ops.swish(batch_norm__114) + + # pd_op.conv2d: (2x96x64x64xf32) <- (2x96x64x64xf32, 96x96x3x3xf32) + conv2d_21 = paddle._C_ops.conv2d( + swish_17, parameter_595, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_595 + + # pd_op.batch_norm_: (2x96x64x64xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x64x64xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__120, + batch_norm__121, + batch_norm__122, + batch_norm__123, + batch_norm__124, + batch_norm__125, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_21, + parameter_594, + parameter_593, + parameter_592, + parameter_591, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_591, parameter_592, parameter_593, parameter_594 + + # pd_op.conv2d: (2x96x64x64xf32) <- (2x96x64x64xf32, 96x96x1x1xf32) + conv2d_22 = paddle._C_ops.conv2d( + swish_17, parameter_590, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_590 + + # pd_op.batch_norm_: (2x96x64x64xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x64x64xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__126, + batch_norm__127, + batch_norm__128, + batch_norm__129, + batch_norm__130, + batch_norm__131, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_22, + parameter_589, + parameter_588, + parameter_587, + parameter_586, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_586, parameter_587, parameter_588, parameter_589 + + # pd_op.add: (2x96x64x64xf32) <- (2x96x64x64xf32, 2x96x64x64xf32) + add_7 = paddle._C_ops.add(batch_norm__120, batch_norm__126) + + # pd_op.swish: (2x96x64x64xf32) <- (2x96x64x64xf32) + swish_18 = paddle._C_ops.swish(add_7) + + # pd_op.add: (2x96x64x64xf32) <- (2x96x64x64xf32, 2x96x64x64xf32) + add_8 = paddle._C_ops.add(swish_16, swish_18) + + # pd_op.conv2d: (2x96x64x64xf32) <- (2x96x64x64xf32, 96x96x3x3xf32) + conv2d_23 = paddle._C_ops.conv2d( + add_8, parameter_585, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_585 + + # pd_op.batch_norm_: (2x96x64x64xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x64x64xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__132, + batch_norm__133, + batch_norm__134, + batch_norm__135, + batch_norm__136, + batch_norm__137, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_23, + parameter_584, + parameter_583, + parameter_582, + parameter_581, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_581, parameter_582, parameter_583, parameter_584 + + # pd_op.swish: (2x96x64x64xf32) <- (2x96x64x64xf32) + swish_19 = paddle._C_ops.swish(batch_norm__132) + + # pd_op.conv2d: (2x96x64x64xf32) <- (2x96x64x64xf32, 96x96x3x3xf32) + conv2d_24 = paddle._C_ops.conv2d( + swish_19, parameter_580, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_580 + + # pd_op.batch_norm_: (2x96x64x64xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x64x64xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__138, + batch_norm__139, + batch_norm__140, + batch_norm__141, + batch_norm__142, + batch_norm__143, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_24, + parameter_579, + parameter_578, + parameter_577, + parameter_576, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_576, parameter_577, parameter_578, parameter_579 + + # pd_op.conv2d: (2x96x64x64xf32) <- (2x96x64x64xf32, 96x96x1x1xf32) + conv2d_25 = paddle._C_ops.conv2d( + swish_19, parameter_575, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_575 + + # pd_op.batch_norm_: (2x96x64x64xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x64x64xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__144, + batch_norm__145, + batch_norm__146, + batch_norm__147, + batch_norm__148, + batch_norm__149, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_25, + parameter_574, + parameter_573, + parameter_572, + parameter_571, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_571, parameter_572, parameter_573, parameter_574 + + # pd_op.add: (2x96x64x64xf32) <- (2x96x64x64xf32, 2x96x64x64xf32) + add_9 = paddle._C_ops.add(batch_norm__138, batch_norm__144) + + # pd_op.swish: (2x96x64x64xf32) <- (2x96x64x64xf32) + swish_20 = paddle._C_ops.swish(add_9) + + # pd_op.add: (2x96x64x64xf32) <- (2x96x64x64xf32, 2x96x64x64xf32) + add_10 = paddle._C_ops.add(add_8, swish_20) + + # pd_op.conv2d: (2x96x64x64xf32) <- (2x96x64x64xf32, 96x96x3x3xf32) + conv2d_26 = paddle._C_ops.conv2d( + add_10, parameter_570, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_570 + + # pd_op.batch_norm_: (2x96x64x64xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x64x64xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__150, + batch_norm__151, + batch_norm__152, + batch_norm__153, + batch_norm__154, + batch_norm__155, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_26, + parameter_569, + parameter_568, + parameter_567, + parameter_566, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_566, parameter_567, parameter_568, parameter_569 + + # pd_op.swish: (2x96x64x64xf32) <- (2x96x64x64xf32) + swish_21 = paddle._C_ops.swish(batch_norm__150) + + # pd_op.conv2d: (2x96x64x64xf32) <- (2x96x64x64xf32, 96x96x3x3xf32) + conv2d_27 = paddle._C_ops.conv2d( + swish_21, parameter_565, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_565 + + # pd_op.batch_norm_: (2x96x64x64xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x64x64xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__156, + batch_norm__157, + batch_norm__158, + batch_norm__159, + batch_norm__160, + batch_norm__161, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_27, + parameter_564, + parameter_563, + parameter_562, + parameter_561, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_561, parameter_562, parameter_563, parameter_564 + + # pd_op.conv2d: (2x96x64x64xf32) <- (2x96x64x64xf32, 96x96x1x1xf32) + conv2d_28 = paddle._C_ops.conv2d( + swish_21, parameter_560, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_560 + + # pd_op.batch_norm_: (2x96x64x64xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x64x64xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__162, + batch_norm__163, + batch_norm__164, + batch_norm__165, + batch_norm__166, + batch_norm__167, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_28, + parameter_559, + parameter_558, + parameter_557, + parameter_556, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_556, parameter_557, parameter_558, parameter_559 + + # pd_op.add: (2x96x64x64xf32) <- (2x96x64x64xf32, 2x96x64x64xf32) + add_11 = paddle._C_ops.add(batch_norm__156, batch_norm__162) + + # pd_op.swish: (2x96x64x64xf32) <- (2x96x64x64xf32) + swish_22 = paddle._C_ops.swish(add_11) + + # pd_op.add: (2x96x64x64xf32) <- (2x96x64x64xf32, 2x96x64x64xf32) + add_12 = paddle._C_ops.add(add_10, swish_22) + + # pd_op.conv2d: (2x96x64x64xf32) <- (2x96x64x64xf32, 96x96x3x3xf32) + conv2d_29 = paddle._C_ops.conv2d( + add_12, parameter_555, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_555 + + # pd_op.batch_norm_: (2x96x64x64xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x64x64xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__168, + batch_norm__169, + batch_norm__170, + batch_norm__171, + batch_norm__172, + batch_norm__173, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_29, + parameter_554, + parameter_553, + parameter_552, + parameter_551, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_551, parameter_552, parameter_553, parameter_554 + + # pd_op.swish: (2x96x64x64xf32) <- (2x96x64x64xf32) + swish_23 = paddle._C_ops.swish(batch_norm__168) + + # pd_op.conv2d: (2x96x64x64xf32) <- (2x96x64x64xf32, 96x96x3x3xf32) + conv2d_30 = paddle._C_ops.conv2d( + swish_23, parameter_550, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_550 + + # pd_op.batch_norm_: (2x96x64x64xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x64x64xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__174, + batch_norm__175, + batch_norm__176, + batch_norm__177, + batch_norm__178, + batch_norm__179, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_30, + parameter_549, + parameter_548, + parameter_547, + parameter_546, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_546, parameter_547, parameter_548, parameter_549 + + # pd_op.conv2d: (2x96x64x64xf32) <- (2x96x64x64xf32, 96x96x1x1xf32) + conv2d_31 = paddle._C_ops.conv2d( + swish_23, parameter_545, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_545 + + # pd_op.batch_norm_: (2x96x64x64xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x64x64xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__180, + batch_norm__181, + batch_norm__182, + batch_norm__183, + batch_norm__184, + batch_norm__185, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_31, + parameter_544, + parameter_543, + parameter_542, + parameter_541, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_541, parameter_542, parameter_543, parameter_544 + + # pd_op.add: (2x96x64x64xf32) <- (2x96x64x64xf32, 2x96x64x64xf32) + add_13 = paddle._C_ops.add(batch_norm__174, batch_norm__180) + + # pd_op.swish: (2x96x64x64xf32) <- (2x96x64x64xf32) + swish_24 = paddle._C_ops.swish(add_13) + + # pd_op.add: (2x96x64x64xf32) <- (2x96x64x64xf32, 2x96x64x64xf32) + add_14 = paddle._C_ops.add(add_12, swish_24) + + # pd_op.conv2d: (2x96x64x64xf32) <- (2x96x64x64xf32, 96x96x3x3xf32) + conv2d_32 = paddle._C_ops.conv2d( + add_14, parameter_540, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_540 + + # pd_op.batch_norm_: (2x96x64x64xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x64x64xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__186, + batch_norm__187, + batch_norm__188, + batch_norm__189, + batch_norm__190, + batch_norm__191, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_32, + parameter_539, + parameter_538, + parameter_537, + parameter_536, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_536, parameter_537, parameter_538, parameter_539 + + # pd_op.swish: (2x96x64x64xf32) <- (2x96x64x64xf32) + swish_25 = paddle._C_ops.swish(batch_norm__186) + + # pd_op.conv2d: (2x96x64x64xf32) <- (2x96x64x64xf32, 96x96x3x3xf32) + conv2d_33 = paddle._C_ops.conv2d( + swish_25, parameter_535, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_535 + + # pd_op.batch_norm_: (2x96x64x64xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x64x64xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__192, + batch_norm__193, + batch_norm__194, + batch_norm__195, + batch_norm__196, + batch_norm__197, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_33, + parameter_534, + parameter_533, + parameter_532, + parameter_531, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_531, parameter_532, parameter_533, parameter_534 + + # pd_op.conv2d: (2x96x64x64xf32) <- (2x96x64x64xf32, 96x96x1x1xf32) + conv2d_34 = paddle._C_ops.conv2d( + swish_25, parameter_530, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_530 + + # pd_op.batch_norm_: (2x96x64x64xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x64x64xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__198, + batch_norm__199, + batch_norm__200, + batch_norm__201, + batch_norm__202, + batch_norm__203, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_34, + parameter_529, + parameter_528, + parameter_527, + parameter_526, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_526, parameter_527, parameter_528, parameter_529 + + # pd_op.add: (2x96x64x64xf32) <- (2x96x64x64xf32, 2x96x64x64xf32) + add_15 = paddle._C_ops.add(batch_norm__192, batch_norm__198) + + # pd_op.swish: (2x96x64x64xf32) <- (2x96x64x64xf32) + swish_26 = paddle._C_ops.swish(add_15) + + # pd_op.add: (2x96x64x64xf32) <- (2x96x64x64xf32, 2x96x64x64xf32) + add_16 = paddle._C_ops.add(add_14, swish_26) + + # pd_op.conv2d: (2x96x64x64xf32) <- (2x96x64x64xf32, 96x96x3x3xf32) + conv2d_35 = paddle._C_ops.conv2d( + add_16, parameter_525, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_525 + + # pd_op.batch_norm_: (2x96x64x64xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x64x64xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__204, + batch_norm__205, + batch_norm__206, + batch_norm__207, + batch_norm__208, + batch_norm__209, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_35, + parameter_524, + parameter_523, + parameter_522, + parameter_521, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_521, parameter_522, parameter_523, parameter_524 + + # pd_op.swish: (2x96x64x64xf32) <- (2x96x64x64xf32) + swish_27 = paddle._C_ops.swish(batch_norm__204) + + # pd_op.conv2d: (2x96x64x64xf32) <- (2x96x64x64xf32, 96x96x3x3xf32) + conv2d_36 = paddle._C_ops.conv2d( + swish_27, parameter_520, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_520 + + # pd_op.batch_norm_: (2x96x64x64xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x64x64xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__210, + batch_norm__211, + batch_norm__212, + batch_norm__213, + batch_norm__214, + batch_norm__215, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_36, + parameter_519, + parameter_518, + parameter_517, + parameter_516, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_516, parameter_517, parameter_518, parameter_519 + + # pd_op.conv2d: (2x96x64x64xf32) <- (2x96x64x64xf32, 96x96x1x1xf32) + conv2d_37 = paddle._C_ops.conv2d( + swish_27, parameter_515, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_515 + + # pd_op.batch_norm_: (2x96x64x64xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x64x64xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__216, + batch_norm__217, + batch_norm__218, + batch_norm__219, + batch_norm__220, + batch_norm__221, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_37, + parameter_514, + parameter_513, + parameter_512, + parameter_511, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_511, parameter_512, parameter_513, parameter_514 + + # pd_op.add: (2x96x64x64xf32) <- (2x96x64x64xf32, 2x96x64x64xf32) + add_17 = paddle._C_ops.add(batch_norm__210, batch_norm__216) + + # pd_op.swish: (2x96x64x64xf32) <- (2x96x64x64xf32) + swish_28 = paddle._C_ops.swish(add_17) + + # pd_op.add: (2x96x64x64xf32) <- (2x96x64x64xf32, 2x96x64x64xf32) + add_18 = paddle._C_ops.add(add_16, swish_28) + + # builtin.combine: ([2x96x64x64xf32, 2x96x64x64xf32]) <- (2x96x64x64xf32, 2x96x64x64xf32) + combine_1 = [swish_15, add_18] + + # pd_op.concat: (2x192x64x64xf32) <- ([2x96x64x64xf32, 2x96x64x64xf32], 1xi32) + concat_1 = paddle._C_ops.concat(combine_1, full_0) + del combine_1 + + # pd_op.mean: (2x192x1x1xf32) <- (2x192x64x64xf32, 2xi64) + mean_1 = paddle._C_ops.mean(concat_1, full_int_array_0, True) + + # pd_op.conv2d: (2x192x1x1xf32) <- (2x192x1x1xf32, 192x192x1x1xf32) + conv2d_38 = paddle._C_ops.conv2d( + mean_1, parameter_510, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_510 + + # pd_op.reshape: (1x192x1x1xf32) <- (192xf32, 4xi64) + reshape_1 = paddle._C_ops.reshape(parameter_509, full_int_array_1) + del parameter_509 + + # pd_op.add: (2x192x1x1xf32) <- (2x192x1x1xf32, 1x192x1x1xf32) + add_19 = paddle._C_ops.add(conv2d_38, reshape_1) + + # pd_op.hardsigmoid: (2x192x1x1xf32) <- (2x192x1x1xf32) + hardsigmoid_1 = paddle._C_ops.hardsigmoid( + add_19, float("0.166667"), float("0.5") + ) + del add_19 + + # pd_op.multiply: (2x192x64x64xf32) <- (2x192x64x64xf32, 2x192x1x1xf32) + multiply_1 = paddle._C_ops.multiply(concat_1, hardsigmoid_1) + + # pd_op.conv2d: (2x256x64x64xf32) <- (2x192x64x64xf32, 256x192x1x1xf32) + conv2d_39 = paddle._C_ops.conv2d( + multiply_1, parameter_508, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_508 + + # pd_op.batch_norm_: (2x256x64x64xf32, 256xf32, 256xf32, 256xf32, 256xf32, -1xui8) <- (2x256x64x64xf32, 256xf32, 256xf32, 256xf32, 256xf32) + ( + batch_norm__222, + batch_norm__223, + batch_norm__224, + batch_norm__225, + batch_norm__226, + batch_norm__227, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_39, + parameter_507, + parameter_506, + parameter_505, + parameter_504, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_504, parameter_505, parameter_506, parameter_507 + + # pd_op.swish: (2x256x64x64xf32) <- (2x256x64x64xf32) + swish_29 = paddle._C_ops.swish(batch_norm__222) + + # pd_op.conv2d: (2x384x32x32xf32) <- (2x256x64x64xf32, 384x256x3x3xf32) + conv2d_40 = paddle._C_ops.conv2d( + swish_29, parameter_503, [2, 2], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_503 + + # pd_op.batch_norm_: (2x384x32x32xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x32x32xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__228, + batch_norm__229, + batch_norm__230, + batch_norm__231, + batch_norm__232, + batch_norm__233, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_40, + parameter_502, + parameter_501, + parameter_500, + parameter_499, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_499, parameter_500, parameter_501, parameter_502 + + # pd_op.swish: (2x384x32x32xf32) <- (2x384x32x32xf32) + swish_30 = paddle._C_ops.swish(batch_norm__228) + + # pd_op.conv2d: (2x192x32x32xf32) <- (2x384x32x32xf32, 192x384x1x1xf32) + conv2d_41 = paddle._C_ops.conv2d( + swish_30, parameter_498, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_498 + + # pd_op.batch_norm_: (2x192x32x32xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x32x32xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__234, + batch_norm__235, + batch_norm__236, + batch_norm__237, + batch_norm__238, + batch_norm__239, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_41, + parameter_497, + parameter_496, + parameter_495, + parameter_494, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_494, parameter_495, parameter_496, parameter_497 + + # pd_op.swish: (2x192x32x32xf32) <- (2x192x32x32xf32) + swish_31 = paddle._C_ops.swish(batch_norm__234) + + # pd_op.conv2d: (2x192x32x32xf32) <- (2x384x32x32xf32, 192x384x1x1xf32) + conv2d_42 = paddle._C_ops.conv2d( + swish_30, parameter_493, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_493 + + # pd_op.batch_norm_: (2x192x32x32xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x32x32xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__240, + batch_norm__241, + batch_norm__242, + batch_norm__243, + batch_norm__244, + batch_norm__245, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_42, + parameter_492, + parameter_491, + parameter_490, + parameter_489, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_489, parameter_490, parameter_491, parameter_492 + + # pd_op.swish: (2x192x32x32xf32) <- (2x192x32x32xf32) + swish_32 = paddle._C_ops.swish(batch_norm__240) + + # pd_op.conv2d: (2x192x32x32xf32) <- (2x192x32x32xf32, 192x192x3x3xf32) + conv2d_43 = paddle._C_ops.conv2d( + swish_32, parameter_488, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_488 + + # pd_op.batch_norm_: (2x192x32x32xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x32x32xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__246, + batch_norm__247, + batch_norm__248, + batch_norm__249, + batch_norm__250, + batch_norm__251, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_43, + parameter_487, + parameter_486, + parameter_485, + parameter_484, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_484, parameter_485, parameter_486, parameter_487 + + # pd_op.swish: (2x192x32x32xf32) <- (2x192x32x32xf32) + swish_33 = paddle._C_ops.swish(batch_norm__246) + + # pd_op.conv2d: (2x192x32x32xf32) <- (2x192x32x32xf32, 192x192x3x3xf32) + conv2d_44 = paddle._C_ops.conv2d( + swish_33, parameter_483, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_483 + + # pd_op.batch_norm_: (2x192x32x32xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x32x32xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__252, + batch_norm__253, + batch_norm__254, + batch_norm__255, + batch_norm__256, + batch_norm__257, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_44, + parameter_482, + parameter_481, + parameter_480, + parameter_479, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_479, parameter_480, parameter_481, parameter_482 + + # pd_op.conv2d: (2x192x32x32xf32) <- (2x192x32x32xf32, 192x192x1x1xf32) + conv2d_45 = paddle._C_ops.conv2d( + swish_33, parameter_478, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_478 + + # pd_op.batch_norm_: (2x192x32x32xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x32x32xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__258, + batch_norm__259, + batch_norm__260, + batch_norm__261, + batch_norm__262, + batch_norm__263, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_45, + parameter_477, + parameter_476, + parameter_475, + parameter_474, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_474, parameter_475, parameter_476, parameter_477 + + # pd_op.add: (2x192x32x32xf32) <- (2x192x32x32xf32, 2x192x32x32xf32) + add_20 = paddle._C_ops.add(batch_norm__252, batch_norm__258) + + # pd_op.swish: (2x192x32x32xf32) <- (2x192x32x32xf32) + swish_34 = paddle._C_ops.swish(add_20) + + # pd_op.add: (2x192x32x32xf32) <- (2x192x32x32xf32, 2x192x32x32xf32) + add_21 = paddle._C_ops.add(swish_32, swish_34) + + # pd_op.conv2d: (2x192x32x32xf32) <- (2x192x32x32xf32, 192x192x3x3xf32) + conv2d_46 = paddle._C_ops.conv2d( + add_21, parameter_473, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_473 + + # pd_op.batch_norm_: (2x192x32x32xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x32x32xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__264, + batch_norm__265, + batch_norm__266, + batch_norm__267, + batch_norm__268, + batch_norm__269, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_46, + parameter_472, + parameter_471, + parameter_470, + parameter_469, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_469, parameter_470, parameter_471, parameter_472 + + # pd_op.swish: (2x192x32x32xf32) <- (2x192x32x32xf32) + swish_35 = paddle._C_ops.swish(batch_norm__264) + + # pd_op.conv2d: (2x192x32x32xf32) <- (2x192x32x32xf32, 192x192x3x3xf32) + conv2d_47 = paddle._C_ops.conv2d( + swish_35, parameter_468, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_468 + + # pd_op.batch_norm_: (2x192x32x32xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x32x32xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__270, + batch_norm__271, + batch_norm__272, + batch_norm__273, + batch_norm__274, + batch_norm__275, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_47, + parameter_467, + parameter_466, + parameter_465, + parameter_464, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_464, parameter_465, parameter_466, parameter_467 + + # pd_op.conv2d: (2x192x32x32xf32) <- (2x192x32x32xf32, 192x192x1x1xf32) + conv2d_48 = paddle._C_ops.conv2d( + swish_35, parameter_463, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_463 + + # pd_op.batch_norm_: (2x192x32x32xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x32x32xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__276, + batch_norm__277, + batch_norm__278, + batch_norm__279, + batch_norm__280, + batch_norm__281, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_48, + parameter_462, + parameter_461, + parameter_460, + parameter_459, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_459, parameter_460, parameter_461, parameter_462 + + # pd_op.add: (2x192x32x32xf32) <- (2x192x32x32xf32, 2x192x32x32xf32) + add_22 = paddle._C_ops.add(batch_norm__270, batch_norm__276) + + # pd_op.swish: (2x192x32x32xf32) <- (2x192x32x32xf32) + swish_36 = paddle._C_ops.swish(add_22) + + # pd_op.add: (2x192x32x32xf32) <- (2x192x32x32xf32, 2x192x32x32xf32) + add_23 = paddle._C_ops.add(add_21, swish_36) + + # pd_op.conv2d: (2x192x32x32xf32) <- (2x192x32x32xf32, 192x192x3x3xf32) + conv2d_49 = paddle._C_ops.conv2d( + add_23, parameter_458, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_458 + + # pd_op.batch_norm_: (2x192x32x32xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x32x32xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__282, + batch_norm__283, + batch_norm__284, + batch_norm__285, + batch_norm__286, + batch_norm__287, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_49, + parameter_457, + parameter_456, + parameter_455, + parameter_454, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_454, parameter_455, parameter_456, parameter_457 + + # pd_op.swish: (2x192x32x32xf32) <- (2x192x32x32xf32) + swish_37 = paddle._C_ops.swish(batch_norm__282) + + # pd_op.conv2d: (2x192x32x32xf32) <- (2x192x32x32xf32, 192x192x3x3xf32) + conv2d_50 = paddle._C_ops.conv2d( + swish_37, parameter_453, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_453 + + # pd_op.batch_norm_: (2x192x32x32xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x32x32xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__288, + batch_norm__289, + batch_norm__290, + batch_norm__291, + batch_norm__292, + batch_norm__293, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_50, + parameter_452, + parameter_451, + parameter_450, + parameter_449, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_449, parameter_450, parameter_451, parameter_452 + + # pd_op.conv2d: (2x192x32x32xf32) <- (2x192x32x32xf32, 192x192x1x1xf32) + conv2d_51 = paddle._C_ops.conv2d( + swish_37, parameter_448, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_448 + + # pd_op.batch_norm_: (2x192x32x32xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x32x32xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__294, + batch_norm__295, + batch_norm__296, + batch_norm__297, + batch_norm__298, + batch_norm__299, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_51, + parameter_447, + parameter_446, + parameter_445, + parameter_444, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_444, parameter_445, parameter_446, parameter_447 + + # pd_op.add: (2x192x32x32xf32) <- (2x192x32x32xf32, 2x192x32x32xf32) + add_24 = paddle._C_ops.add(batch_norm__288, batch_norm__294) + + # pd_op.swish: (2x192x32x32xf32) <- (2x192x32x32xf32) + swish_38 = paddle._C_ops.swish(add_24) + + # pd_op.add: (2x192x32x32xf32) <- (2x192x32x32xf32, 2x192x32x32xf32) + add_25 = paddle._C_ops.add(add_23, swish_38) + + # pd_op.conv2d: (2x192x32x32xf32) <- (2x192x32x32xf32, 192x192x3x3xf32) + conv2d_52 = paddle._C_ops.conv2d( + add_25, parameter_443, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_443 + + # pd_op.batch_norm_: (2x192x32x32xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x32x32xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__300, + batch_norm__301, + batch_norm__302, + batch_norm__303, + batch_norm__304, + batch_norm__305, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_52, + parameter_442, + parameter_441, + parameter_440, + parameter_439, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_439, parameter_440, parameter_441, parameter_442 + + # pd_op.swish: (2x192x32x32xf32) <- (2x192x32x32xf32) + swish_39 = paddle._C_ops.swish(batch_norm__300) + + # pd_op.conv2d: (2x192x32x32xf32) <- (2x192x32x32xf32, 192x192x3x3xf32) + conv2d_53 = paddle._C_ops.conv2d( + swish_39, parameter_438, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_438 + + # pd_op.batch_norm_: (2x192x32x32xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x32x32xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__306, + batch_norm__307, + batch_norm__308, + batch_norm__309, + batch_norm__310, + batch_norm__311, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_53, + parameter_437, + parameter_436, + parameter_435, + parameter_434, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_434, parameter_435, parameter_436, parameter_437 + + # pd_op.conv2d: (2x192x32x32xf32) <- (2x192x32x32xf32, 192x192x1x1xf32) + conv2d_54 = paddle._C_ops.conv2d( + swish_39, parameter_433, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_433 + + # pd_op.batch_norm_: (2x192x32x32xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x32x32xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__312, + batch_norm__313, + batch_norm__314, + batch_norm__315, + batch_norm__316, + batch_norm__317, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_54, + parameter_432, + parameter_431, + parameter_430, + parameter_429, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_429, parameter_430, parameter_431, parameter_432 + + # pd_op.add: (2x192x32x32xf32) <- (2x192x32x32xf32, 2x192x32x32xf32) + add_26 = paddle._C_ops.add(batch_norm__306, batch_norm__312) + + # pd_op.swish: (2x192x32x32xf32) <- (2x192x32x32xf32) + swish_40 = paddle._C_ops.swish(add_26) + + # pd_op.add: (2x192x32x32xf32) <- (2x192x32x32xf32, 2x192x32x32xf32) + add_27 = paddle._C_ops.add(add_25, swish_40) + + # pd_op.conv2d: (2x192x32x32xf32) <- (2x192x32x32xf32, 192x192x3x3xf32) + conv2d_55 = paddle._C_ops.conv2d( + add_27, parameter_428, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_428 + + # pd_op.batch_norm_: (2x192x32x32xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x32x32xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__318, + batch_norm__319, + batch_norm__320, + batch_norm__321, + batch_norm__322, + batch_norm__323, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_55, + parameter_427, + parameter_426, + parameter_425, + parameter_424, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_424, parameter_425, parameter_426, parameter_427 + + # pd_op.swish: (2x192x32x32xf32) <- (2x192x32x32xf32) + swish_41 = paddle._C_ops.swish(batch_norm__318) + + # pd_op.conv2d: (2x192x32x32xf32) <- (2x192x32x32xf32, 192x192x3x3xf32) + conv2d_56 = paddle._C_ops.conv2d( + swish_41, parameter_423, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_423 + + # pd_op.batch_norm_: (2x192x32x32xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x32x32xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__324, + batch_norm__325, + batch_norm__326, + batch_norm__327, + batch_norm__328, + batch_norm__329, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_56, + parameter_422, + parameter_421, + parameter_420, + parameter_419, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_419, parameter_420, parameter_421, parameter_422 + + # pd_op.conv2d: (2x192x32x32xf32) <- (2x192x32x32xf32, 192x192x1x1xf32) + conv2d_57 = paddle._C_ops.conv2d( + swish_41, parameter_418, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_418 + + # pd_op.batch_norm_: (2x192x32x32xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x32x32xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__330, + batch_norm__331, + batch_norm__332, + batch_norm__333, + batch_norm__334, + batch_norm__335, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_57, + parameter_417, + parameter_416, + parameter_415, + parameter_414, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_414, parameter_415, parameter_416, parameter_417 + + # pd_op.add: (2x192x32x32xf32) <- (2x192x32x32xf32, 2x192x32x32xf32) + add_28 = paddle._C_ops.add(batch_norm__324, batch_norm__330) + + # pd_op.swish: (2x192x32x32xf32) <- (2x192x32x32xf32) + swish_42 = paddle._C_ops.swish(add_28) + + # pd_op.add: (2x192x32x32xf32) <- (2x192x32x32xf32, 2x192x32x32xf32) + add_29 = paddle._C_ops.add(add_27, swish_42) + + # pd_op.conv2d: (2x192x32x32xf32) <- (2x192x32x32xf32, 192x192x3x3xf32) + conv2d_58 = paddle._C_ops.conv2d( + add_29, parameter_413, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_413 + + # pd_op.batch_norm_: (2x192x32x32xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x32x32xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__336, + batch_norm__337, + batch_norm__338, + batch_norm__339, + batch_norm__340, + batch_norm__341, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_58, + parameter_412, + parameter_411, + parameter_410, + parameter_409, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_409, parameter_410, parameter_411, parameter_412 + + # pd_op.swish: (2x192x32x32xf32) <- (2x192x32x32xf32) + swish_43 = paddle._C_ops.swish(batch_norm__336) + + # pd_op.conv2d: (2x192x32x32xf32) <- (2x192x32x32xf32, 192x192x3x3xf32) + conv2d_59 = paddle._C_ops.conv2d( + swish_43, parameter_408, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_408 + + # pd_op.batch_norm_: (2x192x32x32xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x32x32xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__342, + batch_norm__343, + batch_norm__344, + batch_norm__345, + batch_norm__346, + batch_norm__347, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_59, + parameter_407, + parameter_406, + parameter_405, + parameter_404, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_404, parameter_405, parameter_406, parameter_407 + + # pd_op.conv2d: (2x192x32x32xf32) <- (2x192x32x32xf32, 192x192x1x1xf32) + conv2d_60 = paddle._C_ops.conv2d( + swish_43, parameter_403, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_403 + + # pd_op.batch_norm_: (2x192x32x32xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x32x32xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__348, + batch_norm__349, + batch_norm__350, + batch_norm__351, + batch_norm__352, + batch_norm__353, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_60, + parameter_402, + parameter_401, + parameter_400, + parameter_399, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_399, parameter_400, parameter_401, parameter_402 + + # pd_op.add: (2x192x32x32xf32) <- (2x192x32x32xf32, 2x192x32x32xf32) + add_30 = paddle._C_ops.add(batch_norm__342, batch_norm__348) + + # pd_op.swish: (2x192x32x32xf32) <- (2x192x32x32xf32) + swish_44 = paddle._C_ops.swish(add_30) + + # pd_op.add: (2x192x32x32xf32) <- (2x192x32x32xf32, 2x192x32x32xf32) + add_31 = paddle._C_ops.add(add_29, swish_44) + + # builtin.combine: ([2x192x32x32xf32, 2x192x32x32xf32]) <- (2x192x32x32xf32, 2x192x32x32xf32) + combine_2 = [swish_31, add_31] + + # pd_op.concat: (2x384x32x32xf32) <- ([2x192x32x32xf32, 2x192x32x32xf32], 1xi32) + concat_2 = paddle._C_ops.concat(combine_2, full_0) + del combine_2 + + # pd_op.mean: (2x384x1x1xf32) <- (2x384x32x32xf32, 2xi64) + mean_2 = paddle._C_ops.mean(concat_2, full_int_array_0, True) + + # pd_op.conv2d: (2x384x1x1xf32) <- (2x384x1x1xf32, 384x384x1x1xf32) + conv2d_61 = paddle._C_ops.conv2d( + mean_2, parameter_398, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_398 + + # pd_op.reshape: (1x384x1x1xf32) <- (384xf32, 4xi64) + reshape_2 = paddle._C_ops.reshape(parameter_397, full_int_array_1) + del parameter_397 + + # pd_op.add: (2x384x1x1xf32) <- (2x384x1x1xf32, 1x384x1x1xf32) + add_32 = paddle._C_ops.add(conv2d_61, reshape_2) + + # pd_op.hardsigmoid: (2x384x1x1xf32) <- (2x384x1x1xf32) + hardsigmoid_2 = paddle._C_ops.hardsigmoid( + add_32, float("0.166667"), float("0.5") + ) + del add_32 + + # pd_op.multiply: (2x384x32x32xf32) <- (2x384x32x32xf32, 2x384x1x1xf32) + multiply_2 = paddle._C_ops.multiply(concat_2, hardsigmoid_2) + + # pd_op.conv2d: (2x512x32x32xf32) <- (2x384x32x32xf32, 512x384x1x1xf32) + conv2d_62 = paddle._C_ops.conv2d( + multiply_2, parameter_396, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_396 + + # pd_op.batch_norm_: (2x512x32x32xf32, 512xf32, 512xf32, 512xf32, 512xf32, -1xui8) <- (2x512x32x32xf32, 512xf32, 512xf32, 512xf32, 512xf32) + ( + batch_norm__354, + batch_norm__355, + batch_norm__356, + batch_norm__357, + batch_norm__358, + batch_norm__359, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_62, + parameter_395, + parameter_394, + parameter_393, + parameter_392, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_392, parameter_393, parameter_394, parameter_395 + + # pd_op.swish: (2x512x32x32xf32) <- (2x512x32x32xf32) + swish_45 = paddle._C_ops.swish(batch_norm__354) + + # pd_op.conv2d: (2x768x16x16xf32) <- (2x512x32x32xf32, 768x512x3x3xf32) + conv2d_63 = paddle._C_ops.conv2d( + swish_45, parameter_391, [2, 2], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_391 + + # pd_op.batch_norm_: (2x768x16x16xf32, 768xf32, 768xf32, 768xf32, 768xf32, -1xui8) <- (2x768x16x16xf32, 768xf32, 768xf32, 768xf32, 768xf32) + ( + batch_norm__360, + batch_norm__361, + batch_norm__362, + batch_norm__363, + batch_norm__364, + batch_norm__365, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_63, + parameter_390, + parameter_389, + parameter_388, + parameter_387, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_387, parameter_388, parameter_389, parameter_390 + + # pd_op.swish: (2x768x16x16xf32) <- (2x768x16x16xf32) + swish_46 = paddle._C_ops.swish(batch_norm__360) + + # pd_op.conv2d: (2x384x16x16xf32) <- (2x768x16x16xf32, 384x768x1x1xf32) + conv2d_64 = paddle._C_ops.conv2d( + swish_46, parameter_386, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_386 + + # pd_op.batch_norm_: (2x384x16x16xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x16x16xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__366, + batch_norm__367, + batch_norm__368, + batch_norm__369, + batch_norm__370, + batch_norm__371, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_64, + parameter_385, + parameter_384, + parameter_383, + parameter_382, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_382, parameter_383, parameter_384, parameter_385 + + # pd_op.swish: (2x384x16x16xf32) <- (2x384x16x16xf32) + swish_47 = paddle._C_ops.swish(batch_norm__366) + + # pd_op.conv2d: (2x384x16x16xf32) <- (2x768x16x16xf32, 384x768x1x1xf32) + conv2d_65 = paddle._C_ops.conv2d( + swish_46, parameter_381, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_381 + + # pd_op.batch_norm_: (2x384x16x16xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x16x16xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__372, + batch_norm__373, + batch_norm__374, + batch_norm__375, + batch_norm__376, + batch_norm__377, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_65, + parameter_380, + parameter_379, + parameter_378, + parameter_377, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_377, parameter_378, parameter_379, parameter_380 + + # pd_op.swish: (2x384x16x16xf32) <- (2x384x16x16xf32) + swish_48 = paddle._C_ops.swish(batch_norm__372) + + # pd_op.conv2d: (2x384x16x16xf32) <- (2x384x16x16xf32, 384x384x3x3xf32) + conv2d_66 = paddle._C_ops.conv2d( + swish_48, parameter_376, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_376 + + # pd_op.batch_norm_: (2x384x16x16xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x16x16xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__378, + batch_norm__379, + batch_norm__380, + batch_norm__381, + batch_norm__382, + batch_norm__383, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_66, + parameter_375, + parameter_374, + parameter_373, + parameter_372, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_372, parameter_373, parameter_374, parameter_375 + + # pd_op.swish: (2x384x16x16xf32) <- (2x384x16x16xf32) + swish_49 = paddle._C_ops.swish(batch_norm__378) + + # pd_op.conv2d: (2x384x16x16xf32) <- (2x384x16x16xf32, 384x384x3x3xf32) + conv2d_67 = paddle._C_ops.conv2d( + swish_49, parameter_371, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_371 + + # pd_op.batch_norm_: (2x384x16x16xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x16x16xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__384, + batch_norm__385, + batch_norm__386, + batch_norm__387, + batch_norm__388, + batch_norm__389, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_67, + parameter_370, + parameter_369, + parameter_368, + parameter_367, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_367, parameter_368, parameter_369, parameter_370 + + # pd_op.conv2d: (2x384x16x16xf32) <- (2x384x16x16xf32, 384x384x1x1xf32) + conv2d_68 = paddle._C_ops.conv2d( + swish_49, parameter_366, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_366 + + # pd_op.batch_norm_: (2x384x16x16xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x16x16xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__390, + batch_norm__391, + batch_norm__392, + batch_norm__393, + batch_norm__394, + batch_norm__395, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_68, + parameter_365, + parameter_364, + parameter_363, + parameter_362, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_362, parameter_363, parameter_364, parameter_365 + + # pd_op.add: (2x384x16x16xf32) <- (2x384x16x16xf32, 2x384x16x16xf32) + add_33 = paddle._C_ops.add(batch_norm__384, batch_norm__390) + + # pd_op.swish: (2x384x16x16xf32) <- (2x384x16x16xf32) + swish_50 = paddle._C_ops.swish(add_33) + + # pd_op.add: (2x384x16x16xf32) <- (2x384x16x16xf32, 2x384x16x16xf32) + add_34 = paddle._C_ops.add(swish_48, swish_50) + + # pd_op.conv2d: (2x384x16x16xf32) <- (2x384x16x16xf32, 384x384x3x3xf32) + conv2d_69 = paddle._C_ops.conv2d( + add_34, parameter_361, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_361 + + # pd_op.batch_norm_: (2x384x16x16xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x16x16xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__396, + batch_norm__397, + batch_norm__398, + batch_norm__399, + batch_norm__400, + batch_norm__401, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_69, + parameter_360, + parameter_359, + parameter_358, + parameter_357, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_357, parameter_358, parameter_359, parameter_360 + + # pd_op.swish: (2x384x16x16xf32) <- (2x384x16x16xf32) + swish_51 = paddle._C_ops.swish(batch_norm__396) + + # pd_op.conv2d: (2x384x16x16xf32) <- (2x384x16x16xf32, 384x384x3x3xf32) + conv2d_70 = paddle._C_ops.conv2d( + swish_51, parameter_356, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_356 + + # pd_op.batch_norm_: (2x384x16x16xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x16x16xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__402, + batch_norm__403, + batch_norm__404, + batch_norm__405, + batch_norm__406, + batch_norm__407, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_70, + parameter_355, + parameter_354, + parameter_353, + parameter_352, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_352, parameter_353, parameter_354, parameter_355 + + # pd_op.conv2d: (2x384x16x16xf32) <- (2x384x16x16xf32, 384x384x1x1xf32) + conv2d_71 = paddle._C_ops.conv2d( + swish_51, parameter_351, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_351 + + # pd_op.batch_norm_: (2x384x16x16xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x16x16xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__408, + batch_norm__409, + batch_norm__410, + batch_norm__411, + batch_norm__412, + batch_norm__413, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_71, + parameter_350, + parameter_349, + parameter_348, + parameter_347, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_347, parameter_348, parameter_349, parameter_350 + + # pd_op.add: (2x384x16x16xf32) <- (2x384x16x16xf32, 2x384x16x16xf32) + add_35 = paddle._C_ops.add(batch_norm__402, batch_norm__408) + + # pd_op.swish: (2x384x16x16xf32) <- (2x384x16x16xf32) + swish_52 = paddle._C_ops.swish(add_35) + + # pd_op.add: (2x384x16x16xf32) <- (2x384x16x16xf32, 2x384x16x16xf32) + add_36 = paddle._C_ops.add(add_34, swish_52) + + # pd_op.conv2d: (2x384x16x16xf32) <- (2x384x16x16xf32, 384x384x3x3xf32) + conv2d_72 = paddle._C_ops.conv2d( + add_36, parameter_346, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_346 + + # pd_op.batch_norm_: (2x384x16x16xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x16x16xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__414, + batch_norm__415, + batch_norm__416, + batch_norm__417, + batch_norm__418, + batch_norm__419, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_72, + parameter_345, + parameter_344, + parameter_343, + parameter_342, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_342, parameter_343, parameter_344, parameter_345 + + # pd_op.swish: (2x384x16x16xf32) <- (2x384x16x16xf32) + swish_53 = paddle._C_ops.swish(batch_norm__414) + + # pd_op.conv2d: (2x384x16x16xf32) <- (2x384x16x16xf32, 384x384x3x3xf32) + conv2d_73 = paddle._C_ops.conv2d( + swish_53, parameter_341, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_341 + + # pd_op.batch_norm_: (2x384x16x16xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x16x16xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__420, + batch_norm__421, + batch_norm__422, + batch_norm__423, + batch_norm__424, + batch_norm__425, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_73, + parameter_340, + parameter_339, + parameter_338, + parameter_337, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_337, parameter_338, parameter_339, parameter_340 + + # pd_op.conv2d: (2x384x16x16xf32) <- (2x384x16x16xf32, 384x384x1x1xf32) + conv2d_74 = paddle._C_ops.conv2d( + swish_53, parameter_336, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_336 + + # pd_op.batch_norm_: (2x384x16x16xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x16x16xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__426, + batch_norm__427, + batch_norm__428, + batch_norm__429, + batch_norm__430, + batch_norm__431, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_74, + parameter_335, + parameter_334, + parameter_333, + parameter_332, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_332, parameter_333, parameter_334, parameter_335 + + # pd_op.add: (2x384x16x16xf32) <- (2x384x16x16xf32, 2x384x16x16xf32) + add_37 = paddle._C_ops.add(batch_norm__420, batch_norm__426) + + # pd_op.swish: (2x384x16x16xf32) <- (2x384x16x16xf32) + swish_54 = paddle._C_ops.swish(add_37) + + # pd_op.add: (2x384x16x16xf32) <- (2x384x16x16xf32, 2x384x16x16xf32) + add_38 = paddle._C_ops.add(add_36, swish_54) + + # builtin.combine: ([2x384x16x16xf32, 2x384x16x16xf32]) <- (2x384x16x16xf32, 2x384x16x16xf32) + combine_3 = [swish_47, add_38] + + # pd_op.concat: (2x768x16x16xf32) <- ([2x384x16x16xf32, 2x384x16x16xf32], 1xi32) + concat_3 = paddle._C_ops.concat(combine_3, full_0) + del combine_3 + + # pd_op.mean: (2x768x1x1xf32) <- (2x768x16x16xf32, 2xi64) + mean_3 = paddle._C_ops.mean(concat_3, full_int_array_0, True) + + # pd_op.conv2d: (2x768x1x1xf32) <- (2x768x1x1xf32, 768x768x1x1xf32) + conv2d_75 = paddle._C_ops.conv2d( + mean_3, parameter_331, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_331 + + # pd_op.reshape: (1x768x1x1xf32) <- (768xf32, 4xi64) + reshape_3 = paddle._C_ops.reshape(parameter_330, full_int_array_1) + del full_int_array_1, parameter_330 + + # pd_op.add: (2x768x1x1xf32) <- (2x768x1x1xf32, 1x768x1x1xf32) + add_39 = paddle._C_ops.add(conv2d_75, reshape_3) + + # pd_op.hardsigmoid: (2x768x1x1xf32) <- (2x768x1x1xf32) + hardsigmoid_3 = paddle._C_ops.hardsigmoid( + add_39, float("0.166667"), float("0.5") + ) + del add_39 + + # pd_op.multiply: (2x768x16x16xf32) <- (2x768x16x16xf32, 2x768x1x1xf32) + multiply_3 = paddle._C_ops.multiply(concat_3, hardsigmoid_3) + + # pd_op.conv2d: (2x1024x16x16xf32) <- (2x768x16x16xf32, 1024x768x1x1xf32) + conv2d_76 = paddle._C_ops.conv2d( + multiply_3, parameter_329, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_329 + + # pd_op.batch_norm_: (2x1024x16x16xf32, 1024xf32, 1024xf32, 1024xf32, 1024xf32, -1xui8) <- (2x1024x16x16xf32, 1024xf32, 1024xf32, 1024xf32, 1024xf32) + ( + batch_norm__432, + batch_norm__433, + batch_norm__434, + batch_norm__435, + batch_norm__436, + batch_norm__437, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_76, + parameter_328, + parameter_327, + parameter_326, + parameter_325, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_325, parameter_326, parameter_327, parameter_328 + + # pd_op.swish: (2x1024x16x16xf32) <- (2x1024x16x16xf32) + swish_55 = paddle._C_ops.swish(batch_norm__432) + + # pd_op.conv2d: (2x384x16x16xf32) <- (2x1024x16x16xf32, 384x1024x1x1xf32) + conv2d_77 = paddle._C_ops.conv2d( + swish_55, parameter_324, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_324 + + # pd_op.batch_norm_: (2x384x16x16xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x16x16xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__438, + batch_norm__439, + batch_norm__440, + batch_norm__441, + batch_norm__442, + batch_norm__443, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_77, + parameter_323, + parameter_322, + parameter_321, + parameter_320, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_320, parameter_321, parameter_322, parameter_323 + + # pd_op.swish: (2x384x16x16xf32) <- (2x384x16x16xf32) + swish_56 = paddle._C_ops.swish(batch_norm__438) + + # pd_op.conv2d: (2x384x16x16xf32) <- (2x1024x16x16xf32, 384x1024x1x1xf32) + conv2d_78 = paddle._C_ops.conv2d( + swish_55, parameter_319, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_319 + + # pd_op.batch_norm_: (2x384x16x16xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x16x16xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__444, + batch_norm__445, + batch_norm__446, + batch_norm__447, + batch_norm__448, + batch_norm__449, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_78, + parameter_318, + parameter_317, + parameter_316, + parameter_315, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_315, parameter_316, parameter_317, parameter_318 + + # pd_op.swish: (2x384x16x16xf32) <- (2x384x16x16xf32) + swish_57 = paddle._C_ops.swish(batch_norm__444) + + # pd_op.conv2d: (2x384x16x16xf32) <- (2x384x16x16xf32, 384x384x3x3xf32) + conv2d_79 = paddle._C_ops.conv2d( + swish_57, parameter_314, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_314 + + # pd_op.batch_norm_: (2x384x16x16xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x16x16xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__450, + batch_norm__451, + batch_norm__452, + batch_norm__453, + batch_norm__454, + batch_norm__455, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_79, + parameter_313, + parameter_312, + parameter_311, + parameter_310, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_310, parameter_311, parameter_312, parameter_313 + + # pd_op.swish: (2x384x16x16xf32) <- (2x384x16x16xf32) + swish_58 = paddle._C_ops.swish(batch_norm__450) + + # pd_op.conv2d: (2x384x16x16xf32) <- (2x384x16x16xf32, 384x384x3x3xf32) + conv2d_80 = paddle._C_ops.conv2d( + swish_58, parameter_309, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_309 + + # pd_op.batch_norm_: (2x384x16x16xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x16x16xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__456, + batch_norm__457, + batch_norm__458, + batch_norm__459, + batch_norm__460, + batch_norm__461, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_80, + parameter_308, + parameter_307, + parameter_306, + parameter_305, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_305, parameter_306, parameter_307, parameter_308 + + # pd_op.conv2d: (2x384x16x16xf32) <- (2x384x16x16xf32, 384x384x1x1xf32) + conv2d_81 = paddle._C_ops.conv2d( + swish_58, parameter_304, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_304 + + # pd_op.batch_norm_: (2x384x16x16xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x16x16xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__462, + batch_norm__463, + batch_norm__464, + batch_norm__465, + batch_norm__466, + batch_norm__467, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_81, + parameter_303, + parameter_302, + parameter_301, + parameter_300, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_300, parameter_301, parameter_302, parameter_303 + + # pd_op.add: (2x384x16x16xf32) <- (2x384x16x16xf32, 2x384x16x16xf32) + add_40 = paddle._C_ops.add(batch_norm__456, batch_norm__462) + + # pd_op.swish: (2x384x16x16xf32) <- (2x384x16x16xf32) + swish_59 = paddle._C_ops.swish(add_40) + + # pd_op.conv2d: (2x384x16x16xf32) <- (2x384x16x16xf32, 384x384x3x3xf32) + conv2d_82 = paddle._C_ops.conv2d( + swish_59, parameter_299, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_299 + + # pd_op.batch_norm_: (2x384x16x16xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x16x16xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__468, + batch_norm__469, + batch_norm__470, + batch_norm__471, + batch_norm__472, + batch_norm__473, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_82, + parameter_298, + parameter_297, + parameter_296, + parameter_295, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_295, parameter_296, parameter_297, parameter_298 + + # pd_op.swish: (2x384x16x16xf32) <- (2x384x16x16xf32) + swish_60 = paddle._C_ops.swish(batch_norm__468) + + # pd_op.conv2d: (2x384x16x16xf32) <- (2x384x16x16xf32, 384x384x3x3xf32) + conv2d_83 = paddle._C_ops.conv2d( + swish_60, parameter_294, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_294 + + # pd_op.batch_norm_: (2x384x16x16xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x16x16xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__474, + batch_norm__475, + batch_norm__476, + batch_norm__477, + batch_norm__478, + batch_norm__479, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_83, + parameter_293, + parameter_292, + parameter_291, + parameter_290, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_290, parameter_291, parameter_292, parameter_293 + + # pd_op.conv2d: (2x384x16x16xf32) <- (2x384x16x16xf32, 384x384x1x1xf32) + conv2d_84 = paddle._C_ops.conv2d( + swish_60, parameter_289, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_289 + + # pd_op.batch_norm_: (2x384x16x16xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x16x16xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__480, + batch_norm__481, + batch_norm__482, + batch_norm__483, + batch_norm__484, + batch_norm__485, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_84, + parameter_288, + parameter_287, + parameter_286, + parameter_285, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_285, parameter_286, parameter_287, parameter_288 + + # pd_op.add: (2x384x16x16xf32) <- (2x384x16x16xf32, 2x384x16x16xf32) + add_41 = paddle._C_ops.add(batch_norm__474, batch_norm__480) + + # pd_op.swish: (2x384x16x16xf32) <- (2x384x16x16xf32) + swish_61 = paddle._C_ops.swish(add_41) + + # pd_op.full_int_array: (2xi64) <- () + full_int_array_2 = [5, 5] + + # pd_op.pool2d: (2x384x16x16xf32) <- (2x384x16x16xf32, 2xi64) + pool2d_0 = paddle._C_ops.pool2d( + swish_61, + full_int_array_2, + [1, 1], + [2, 2], + False, + True, + "NCHW", + "max", + False, + False, + "EXPLICIT", + ) + + # pd_op.full_int_array: (2xi64) <- () + full_int_array_3 = [9, 9] + + # pd_op.pool2d: (2x384x16x16xf32) <- (2x384x16x16xf32, 2xi64) + pool2d_1 = paddle._C_ops.pool2d( + swish_61, + full_int_array_3, + [1, 1], + [4, 4], + False, + True, + "NCHW", + "max", + False, + False, + "EXPLICIT", + ) + + # pd_op.full_int_array: (2xi64) <- () + full_int_array_4 = [13, 13] + + # pd_op.pool2d: (2x384x16x16xf32) <- (2x384x16x16xf32, 2xi64) + pool2d_2 = paddle._C_ops.pool2d( + swish_61, + full_int_array_4, + [1, 1], + [6, 6], + False, + True, + "NCHW", + "max", + False, + False, + "EXPLICIT", + ) + + # builtin.combine: ([2x384x16x16xf32, 2x384x16x16xf32, 2x384x16x16xf32, 2x384x16x16xf32]) <- (2x384x16x16xf32, 2x384x16x16xf32, 2x384x16x16xf32, 2x384x16x16xf32) + combine_4 = [swish_61, pool2d_0, pool2d_1, pool2d_2] + + # pd_op.concat: (2x1536x16x16xf32) <- ([2x384x16x16xf32, 2x384x16x16xf32, 2x384x16x16xf32, 2x384x16x16xf32], 1xi32) + concat_4 = paddle._C_ops.concat(combine_4, full_0) + del combine_4 + + # pd_op.conv2d: (2x384x16x16xf32) <- (2x1536x16x16xf32, 384x1536x1x1xf32) + conv2d_85 = paddle._C_ops.conv2d( + concat_4, parameter_284, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_284 + + # pd_op.batch_norm_: (2x384x16x16xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x16x16xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__486, + batch_norm__487, + batch_norm__488, + batch_norm__489, + batch_norm__490, + batch_norm__491, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_85, + parameter_283, + parameter_282, + parameter_281, + parameter_280, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_280, parameter_281, parameter_282, parameter_283 + + # pd_op.swish: (2x384x16x16xf32) <- (2x384x16x16xf32) + swish_62 = paddle._C_ops.swish(batch_norm__486) + + # pd_op.conv2d: (2x384x16x16xf32) <- (2x384x16x16xf32, 384x384x3x3xf32) + conv2d_86 = paddle._C_ops.conv2d( + swish_62, parameter_279, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_279 + + # pd_op.batch_norm_: (2x384x16x16xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x16x16xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__492, + batch_norm__493, + batch_norm__494, + batch_norm__495, + batch_norm__496, + batch_norm__497, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_86, + parameter_278, + parameter_277, + parameter_276, + parameter_275, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_275, parameter_276, parameter_277, parameter_278 + + # pd_op.swish: (2x384x16x16xf32) <- (2x384x16x16xf32) + swish_63 = paddle._C_ops.swish(batch_norm__492) + + # pd_op.conv2d: (2x384x16x16xf32) <- (2x384x16x16xf32, 384x384x3x3xf32) + conv2d_87 = paddle._C_ops.conv2d( + swish_63, parameter_274, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_274 + + # pd_op.batch_norm_: (2x384x16x16xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x16x16xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__498, + batch_norm__499, + batch_norm__500, + batch_norm__501, + batch_norm__502, + batch_norm__503, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_87, + parameter_273, + parameter_272, + parameter_271, + parameter_270, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_270, parameter_271, parameter_272, parameter_273 + + # pd_op.conv2d: (2x384x16x16xf32) <- (2x384x16x16xf32, 384x384x1x1xf32) + conv2d_88 = paddle._C_ops.conv2d( + swish_63, parameter_269, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_269 + + # pd_op.batch_norm_: (2x384x16x16xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x16x16xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__504, + batch_norm__505, + batch_norm__506, + batch_norm__507, + batch_norm__508, + batch_norm__509, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_88, + parameter_268, + parameter_267, + parameter_266, + parameter_265, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_265, parameter_266, parameter_267, parameter_268 + + # pd_op.add: (2x384x16x16xf32) <- (2x384x16x16xf32, 2x384x16x16xf32) + add_42 = paddle._C_ops.add(batch_norm__498, batch_norm__504) + + # pd_op.swish: (2x384x16x16xf32) <- (2x384x16x16xf32) + swish_64 = paddle._C_ops.swish(add_42) + + # builtin.combine: ([2x384x16x16xf32, 2x384x16x16xf32]) <- (2x384x16x16xf32, 2x384x16x16xf32) + combine_5 = [swish_56, swish_64] + + # pd_op.concat: (2x768x16x16xf32) <- ([2x384x16x16xf32, 2x384x16x16xf32], 1xi32) + concat_5 = paddle._C_ops.concat(combine_5, full_0) + del combine_5 + + # pd_op.conv2d: (2x768x16x16xf32) <- (2x768x16x16xf32, 768x768x1x1xf32) + conv2d_89 = paddle._C_ops.conv2d( + concat_5, parameter_264, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_264 + + # pd_op.batch_norm_: (2x768x16x16xf32, 768xf32, 768xf32, 768xf32, 768xf32, -1xui8) <- (2x768x16x16xf32, 768xf32, 768xf32, 768xf32, 768xf32) + ( + batch_norm__510, + batch_norm__511, + batch_norm__512, + batch_norm__513, + batch_norm__514, + batch_norm__515, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_89, + parameter_263, + parameter_262, + parameter_261, + parameter_260, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_260, parameter_261, parameter_262, parameter_263 + + # pd_op.swish: (2x768x16x16xf32) <- (2x768x16x16xf32) + swish_65 = paddle._C_ops.swish(batch_norm__510) + + # pd_op.conv2d: (2x384x16x16xf32) <- (2x768x16x16xf32, 384x768x1x1xf32) + conv2d_90 = paddle._C_ops.conv2d( + swish_65, parameter_259, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_259 + + # pd_op.batch_norm_: (2x384x16x16xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x16x16xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__516, + batch_norm__517, + batch_norm__518, + batch_norm__519, + batch_norm__520, + batch_norm__521, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_90, + parameter_258, + parameter_257, + parameter_256, + parameter_255, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_255, parameter_256, parameter_257, parameter_258 + + # pd_op.swish: (2x384x16x16xf32) <- (2x384x16x16xf32) + swish_66 = paddle._C_ops.swish(batch_norm__516) + + # pd_op.nearest_interp: (2x384x32x32xf32) <- (2x384x16x16xf32, None, None, None) + nearest_interp_0 = paddle._C_ops.nearest_interp( + swish_66, + None, + None, + None, + "NCHW", + -1, + -1, + -1, + [float("2"), float("2")], + "nearest", + False, + 0, + ) + + # builtin.combine: ([2x384x32x32xf32, 2x512x32x32xf32]) <- (2x384x32x32xf32, 2x512x32x32xf32) + combine_6 = [nearest_interp_0, swish_45] + + # pd_op.concat: (2x896x32x32xf32) <- ([2x384x32x32xf32, 2x512x32x32xf32], 1xi32) + concat_6 = paddle._C_ops.concat(combine_6, full_0) + del combine_6 + + # pd_op.conv2d: (2x192x32x32xf32) <- (2x896x32x32xf32, 192x896x1x1xf32) + conv2d_91 = paddle._C_ops.conv2d( + concat_6, parameter_254, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_254 + + # pd_op.batch_norm_: (2x192x32x32xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x32x32xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__522, + batch_norm__523, + batch_norm__524, + batch_norm__525, + batch_norm__526, + batch_norm__527, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_91, + parameter_253, + parameter_252, + parameter_251, + parameter_250, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_250, parameter_251, parameter_252, parameter_253 + + # pd_op.swish: (2x192x32x32xf32) <- (2x192x32x32xf32) + swish_67 = paddle._C_ops.swish(batch_norm__522) + + # pd_op.conv2d: (2x192x32x32xf32) <- (2x896x32x32xf32, 192x896x1x1xf32) + conv2d_92 = paddle._C_ops.conv2d( + concat_6, parameter_249, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_249 + + # pd_op.batch_norm_: (2x192x32x32xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x32x32xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__528, + batch_norm__529, + batch_norm__530, + batch_norm__531, + batch_norm__532, + batch_norm__533, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_92, + parameter_248, + parameter_247, + parameter_246, + parameter_245, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_245, parameter_246, parameter_247, parameter_248 + + # pd_op.swish: (2x192x32x32xf32) <- (2x192x32x32xf32) + swish_68 = paddle._C_ops.swish(batch_norm__528) + + # pd_op.conv2d: (2x192x32x32xf32) <- (2x192x32x32xf32, 192x192x3x3xf32) + conv2d_93 = paddle._C_ops.conv2d( + swish_68, parameter_244, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_244 + + # pd_op.batch_norm_: (2x192x32x32xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x32x32xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__534, + batch_norm__535, + batch_norm__536, + batch_norm__537, + batch_norm__538, + batch_norm__539, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_93, + parameter_243, + parameter_242, + parameter_241, + parameter_240, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_240, parameter_241, parameter_242, parameter_243 + + # pd_op.swish: (2x192x32x32xf32) <- (2x192x32x32xf32) + swish_69 = paddle._C_ops.swish(batch_norm__534) + + # pd_op.conv2d: (2x192x32x32xf32) <- (2x192x32x32xf32, 192x192x3x3xf32) + conv2d_94 = paddle._C_ops.conv2d( + swish_69, parameter_239, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_239 + + # pd_op.batch_norm_: (2x192x32x32xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x32x32xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__540, + batch_norm__541, + batch_norm__542, + batch_norm__543, + batch_norm__544, + batch_norm__545, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_94, + parameter_238, + parameter_237, + parameter_236, + parameter_235, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_235, parameter_236, parameter_237, parameter_238 + + # pd_op.conv2d: (2x192x32x32xf32) <- (2x192x32x32xf32, 192x192x1x1xf32) + conv2d_95 = paddle._C_ops.conv2d( + swish_69, parameter_234, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_234 + + # pd_op.batch_norm_: (2x192x32x32xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x32x32xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__546, + batch_norm__547, + batch_norm__548, + batch_norm__549, + batch_norm__550, + batch_norm__551, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_95, + parameter_233, + parameter_232, + parameter_231, + parameter_230, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_230, parameter_231, parameter_232, parameter_233 + + # pd_op.add: (2x192x32x32xf32) <- (2x192x32x32xf32, 2x192x32x32xf32) + add_43 = paddle._C_ops.add(batch_norm__540, batch_norm__546) + + # pd_op.swish: (2x192x32x32xf32) <- (2x192x32x32xf32) + swish_70 = paddle._C_ops.swish(add_43) + + # pd_op.conv2d: (2x192x32x32xf32) <- (2x192x32x32xf32, 192x192x3x3xf32) + conv2d_96 = paddle._C_ops.conv2d( + swish_70, parameter_229, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_229 + + # pd_op.batch_norm_: (2x192x32x32xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x32x32xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__552, + batch_norm__553, + batch_norm__554, + batch_norm__555, + batch_norm__556, + batch_norm__557, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_96, + parameter_228, + parameter_227, + parameter_226, + parameter_225, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_225, parameter_226, parameter_227, parameter_228 + + # pd_op.swish: (2x192x32x32xf32) <- (2x192x32x32xf32) + swish_71 = paddle._C_ops.swish(batch_norm__552) + + # pd_op.conv2d: (2x192x32x32xf32) <- (2x192x32x32xf32, 192x192x3x3xf32) + conv2d_97 = paddle._C_ops.conv2d( + swish_71, parameter_224, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_224 + + # pd_op.batch_norm_: (2x192x32x32xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x32x32xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__558, + batch_norm__559, + batch_norm__560, + batch_norm__561, + batch_norm__562, + batch_norm__563, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_97, + parameter_223, + parameter_222, + parameter_221, + parameter_220, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_220, parameter_221, parameter_222, parameter_223 + + # pd_op.conv2d: (2x192x32x32xf32) <- (2x192x32x32xf32, 192x192x1x1xf32) + conv2d_98 = paddle._C_ops.conv2d( + swish_71, parameter_219, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_219 + + # pd_op.batch_norm_: (2x192x32x32xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x32x32xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__564, + batch_norm__565, + batch_norm__566, + batch_norm__567, + batch_norm__568, + batch_norm__569, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_98, + parameter_218, + parameter_217, + parameter_216, + parameter_215, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_215, parameter_216, parameter_217, parameter_218 + + # pd_op.add: (2x192x32x32xf32) <- (2x192x32x32xf32, 2x192x32x32xf32) + add_44 = paddle._C_ops.add(batch_norm__558, batch_norm__564) + + # pd_op.swish: (2x192x32x32xf32) <- (2x192x32x32xf32) + swish_72 = paddle._C_ops.swish(add_44) + + # pd_op.conv2d: (2x192x32x32xf32) <- (2x192x32x32xf32, 192x192x3x3xf32) + conv2d_99 = paddle._C_ops.conv2d( + swish_72, parameter_214, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_214 + + # pd_op.batch_norm_: (2x192x32x32xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x32x32xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__570, + batch_norm__571, + batch_norm__572, + batch_norm__573, + batch_norm__574, + batch_norm__575, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_99, + parameter_213, + parameter_212, + parameter_211, + parameter_210, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_210, parameter_211, parameter_212, parameter_213 + + # pd_op.swish: (2x192x32x32xf32) <- (2x192x32x32xf32) + swish_73 = paddle._C_ops.swish(batch_norm__570) + + # pd_op.conv2d: (2x192x32x32xf32) <- (2x192x32x32xf32, 192x192x3x3xf32) + conv2d_100 = paddle._C_ops.conv2d( + swish_73, parameter_209, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_209 + + # pd_op.batch_norm_: (2x192x32x32xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x32x32xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__576, + batch_norm__577, + batch_norm__578, + batch_norm__579, + batch_norm__580, + batch_norm__581, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_100, + parameter_208, + parameter_207, + parameter_206, + parameter_205, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_205, parameter_206, parameter_207, parameter_208 + + # pd_op.conv2d: (2x192x32x32xf32) <- (2x192x32x32xf32, 192x192x1x1xf32) + conv2d_101 = paddle._C_ops.conv2d( + swish_73, parameter_204, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_204 + + # pd_op.batch_norm_: (2x192x32x32xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x32x32xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__582, + batch_norm__583, + batch_norm__584, + batch_norm__585, + batch_norm__586, + batch_norm__587, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_101, + parameter_203, + parameter_202, + parameter_201, + parameter_200, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_200, parameter_201, parameter_202, parameter_203 + + # pd_op.add: (2x192x32x32xf32) <- (2x192x32x32xf32, 2x192x32x32xf32) + add_45 = paddle._C_ops.add(batch_norm__576, batch_norm__582) + + # pd_op.swish: (2x192x32x32xf32) <- (2x192x32x32xf32) + swish_74 = paddle._C_ops.swish(add_45) + + # builtin.combine: ([2x192x32x32xf32, 2x192x32x32xf32]) <- (2x192x32x32xf32, 2x192x32x32xf32) + combine_7 = [swish_67, swish_74] + + # pd_op.concat: (2x384x32x32xf32) <- ([2x192x32x32xf32, 2x192x32x32xf32], 1xi32) + concat_7 = paddle._C_ops.concat(combine_7, full_0) + del combine_7 + + # pd_op.conv2d: (2x384x32x32xf32) <- (2x384x32x32xf32, 384x384x1x1xf32) + conv2d_102 = paddle._C_ops.conv2d( + concat_7, parameter_199, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_199 + + # pd_op.batch_norm_: (2x384x32x32xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x32x32xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__588, + batch_norm__589, + batch_norm__590, + batch_norm__591, + batch_norm__592, + batch_norm__593, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_102, + parameter_198, + parameter_197, + parameter_196, + parameter_195, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_195, parameter_196, parameter_197, parameter_198 + + # pd_op.swish: (2x384x32x32xf32) <- (2x384x32x32xf32) + swish_75 = paddle._C_ops.swish(batch_norm__588) + + # pd_op.conv2d: (2x192x32x32xf32) <- (2x384x32x32xf32, 192x384x1x1xf32) + conv2d_103 = paddle._C_ops.conv2d( + swish_75, parameter_194, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_194 + + # pd_op.batch_norm_: (2x192x32x32xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x32x32xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__594, + batch_norm__595, + batch_norm__596, + batch_norm__597, + batch_norm__598, + batch_norm__599, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_103, + parameter_193, + parameter_192, + parameter_191, + parameter_190, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_190, parameter_191, parameter_192, parameter_193 + + # pd_op.swish: (2x192x32x32xf32) <- (2x192x32x32xf32) + swish_76 = paddle._C_ops.swish(batch_norm__594) + + # pd_op.nearest_interp: (2x192x64x64xf32) <- (2x192x32x32xf32, None, None, None) + nearest_interp_1 = paddle._C_ops.nearest_interp( + swish_76, + None, + None, + None, + "NCHW", + -1, + -1, + -1, + [float("2"), float("2")], + "nearest", + False, + 0, + ) + + # builtin.combine: ([2x192x64x64xf32, 2x256x64x64xf32]) <- (2x192x64x64xf32, 2x256x64x64xf32) + combine_8 = [nearest_interp_1, swish_29] + + # pd_op.concat: (2x448x64x64xf32) <- ([2x192x64x64xf32, 2x256x64x64xf32], 1xi32) + concat_8 = paddle._C_ops.concat(combine_8, full_0) + del combine_8 + + # pd_op.conv2d: (2x96x64x64xf32) <- (2x448x64x64xf32, 96x448x1x1xf32) + conv2d_104 = paddle._C_ops.conv2d( + concat_8, parameter_189, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_189 + + # pd_op.batch_norm_: (2x96x64x64xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x64x64xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__600, + batch_norm__601, + batch_norm__602, + batch_norm__603, + batch_norm__604, + batch_norm__605, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_104, + parameter_188, + parameter_187, + parameter_186, + parameter_185, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_185, parameter_186, parameter_187, parameter_188 + + # pd_op.swish: (2x96x64x64xf32) <- (2x96x64x64xf32) + swish_77 = paddle._C_ops.swish(batch_norm__600) + + # pd_op.conv2d: (2x96x64x64xf32) <- (2x448x64x64xf32, 96x448x1x1xf32) + conv2d_105 = paddle._C_ops.conv2d( + concat_8, parameter_184, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_184 + + # pd_op.batch_norm_: (2x96x64x64xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x64x64xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__606, + batch_norm__607, + batch_norm__608, + batch_norm__609, + batch_norm__610, + batch_norm__611, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_105, + parameter_183, + parameter_182, + parameter_181, + parameter_180, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_180, parameter_181, parameter_182, parameter_183 + + # pd_op.swish: (2x96x64x64xf32) <- (2x96x64x64xf32) + swish_78 = paddle._C_ops.swish(batch_norm__606) + + # pd_op.conv2d: (2x96x64x64xf32) <- (2x96x64x64xf32, 96x96x3x3xf32) + conv2d_106 = paddle._C_ops.conv2d( + swish_78, parameter_179, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_179 + + # pd_op.batch_norm_: (2x96x64x64xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x64x64xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__612, + batch_norm__613, + batch_norm__614, + batch_norm__615, + batch_norm__616, + batch_norm__617, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_106, + parameter_178, + parameter_177, + parameter_176, + parameter_175, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_175, parameter_176, parameter_177, parameter_178 + + # pd_op.swish: (2x96x64x64xf32) <- (2x96x64x64xf32) + swish_79 = paddle._C_ops.swish(batch_norm__612) + + # pd_op.conv2d: (2x96x64x64xf32) <- (2x96x64x64xf32, 96x96x3x3xf32) + conv2d_107 = paddle._C_ops.conv2d( + swish_79, parameter_174, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_174 + + # pd_op.batch_norm_: (2x96x64x64xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x64x64xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__618, + batch_norm__619, + batch_norm__620, + batch_norm__621, + batch_norm__622, + batch_norm__623, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_107, + parameter_173, + parameter_172, + parameter_171, + parameter_170, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_170, parameter_171, parameter_172, parameter_173 + + # pd_op.conv2d: (2x96x64x64xf32) <- (2x96x64x64xf32, 96x96x1x1xf32) + conv2d_108 = paddle._C_ops.conv2d( + swish_79, parameter_169, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_169 + + # pd_op.batch_norm_: (2x96x64x64xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x64x64xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__624, + batch_norm__625, + batch_norm__626, + batch_norm__627, + batch_norm__628, + batch_norm__629, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_108, + parameter_168, + parameter_167, + parameter_166, + parameter_165, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_165, parameter_166, parameter_167, parameter_168 + + # pd_op.add: (2x96x64x64xf32) <- (2x96x64x64xf32, 2x96x64x64xf32) + add_46 = paddle._C_ops.add(batch_norm__618, batch_norm__624) + + # pd_op.swish: (2x96x64x64xf32) <- (2x96x64x64xf32) + swish_80 = paddle._C_ops.swish(add_46) + + # pd_op.conv2d: (2x96x64x64xf32) <- (2x96x64x64xf32, 96x96x3x3xf32) + conv2d_109 = paddle._C_ops.conv2d( + swish_80, parameter_164, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_164 + + # pd_op.batch_norm_: (2x96x64x64xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x64x64xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__630, + batch_norm__631, + batch_norm__632, + batch_norm__633, + batch_norm__634, + batch_norm__635, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_109, + parameter_163, + parameter_162, + parameter_161, + parameter_160, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_160, parameter_161, parameter_162, parameter_163 + + # pd_op.swish: (2x96x64x64xf32) <- (2x96x64x64xf32) + swish_81 = paddle._C_ops.swish(batch_norm__630) + + # pd_op.conv2d: (2x96x64x64xf32) <- (2x96x64x64xf32, 96x96x3x3xf32) + conv2d_110 = paddle._C_ops.conv2d( + swish_81, parameter_159, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_159 + + # pd_op.batch_norm_: (2x96x64x64xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x64x64xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__636, + batch_norm__637, + batch_norm__638, + batch_norm__639, + batch_norm__640, + batch_norm__641, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_110, + parameter_158, + parameter_157, + parameter_156, + parameter_155, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_155, parameter_156, parameter_157, parameter_158 + + # pd_op.conv2d: (2x96x64x64xf32) <- (2x96x64x64xf32, 96x96x1x1xf32) + conv2d_111 = paddle._C_ops.conv2d( + swish_81, parameter_154, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_154 + + # pd_op.batch_norm_: (2x96x64x64xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x64x64xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__642, + batch_norm__643, + batch_norm__644, + batch_norm__645, + batch_norm__646, + batch_norm__647, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_111, + parameter_153, + parameter_152, + parameter_151, + parameter_150, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_150, parameter_151, parameter_152, parameter_153 + + # pd_op.add: (2x96x64x64xf32) <- (2x96x64x64xf32, 2x96x64x64xf32) + add_47 = paddle._C_ops.add(batch_norm__636, batch_norm__642) + + # pd_op.swish: (2x96x64x64xf32) <- (2x96x64x64xf32) + swish_82 = paddle._C_ops.swish(add_47) + + # pd_op.conv2d: (2x96x64x64xf32) <- (2x96x64x64xf32, 96x96x3x3xf32) + conv2d_112 = paddle._C_ops.conv2d( + swish_82, parameter_149, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_149 + + # pd_op.batch_norm_: (2x96x64x64xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x64x64xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__648, + batch_norm__649, + batch_norm__650, + batch_norm__651, + batch_norm__652, + batch_norm__653, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_112, + parameter_148, + parameter_147, + parameter_146, + parameter_145, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_145, parameter_146, parameter_147, parameter_148 + + # pd_op.swish: (2x96x64x64xf32) <- (2x96x64x64xf32) + swish_83 = paddle._C_ops.swish(batch_norm__648) + + # pd_op.conv2d: (2x96x64x64xf32) <- (2x96x64x64xf32, 96x96x3x3xf32) + conv2d_113 = paddle._C_ops.conv2d( + swish_83, parameter_144, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_144 + + # pd_op.batch_norm_: (2x96x64x64xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x64x64xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__654, + batch_norm__655, + batch_norm__656, + batch_norm__657, + batch_norm__658, + batch_norm__659, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_113, + parameter_143, + parameter_142, + parameter_141, + parameter_140, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_140, parameter_141, parameter_142, parameter_143 + + # pd_op.conv2d: (2x96x64x64xf32) <- (2x96x64x64xf32, 96x96x1x1xf32) + conv2d_114 = paddle._C_ops.conv2d( + swish_83, parameter_139, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_139 + + # pd_op.batch_norm_: (2x96x64x64xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x64x64xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__660, + batch_norm__661, + batch_norm__662, + batch_norm__663, + batch_norm__664, + batch_norm__665, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_114, + parameter_138, + parameter_137, + parameter_136, + parameter_135, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_135, parameter_136, parameter_137, parameter_138 + + # pd_op.add: (2x96x64x64xf32) <- (2x96x64x64xf32, 2x96x64x64xf32) + add_48 = paddle._C_ops.add(batch_norm__654, batch_norm__660) + + # pd_op.swish: (2x96x64x64xf32) <- (2x96x64x64xf32) + swish_84 = paddle._C_ops.swish(add_48) + + # builtin.combine: ([2x96x64x64xf32, 2x96x64x64xf32]) <- (2x96x64x64xf32, 2x96x64x64xf32) + combine_9 = [swish_77, swish_84] + + # pd_op.concat: (2x192x64x64xf32) <- ([2x96x64x64xf32, 2x96x64x64xf32], 1xi32) + concat_9 = paddle._C_ops.concat(combine_9, full_0) + del combine_9 + + # pd_op.conv2d: (2x192x64x64xf32) <- (2x192x64x64xf32, 192x192x1x1xf32) + conv2d_115 = paddle._C_ops.conv2d( + concat_9, parameter_134, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_134 + + # pd_op.batch_norm_: (2x192x64x64xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x64x64xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__666, + batch_norm__667, + batch_norm__668, + batch_norm__669, + batch_norm__670, + batch_norm__671, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_115, + parameter_133, + parameter_132, + parameter_131, + parameter_130, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_130, parameter_131, parameter_132, parameter_133 + + # pd_op.swish: (2x192x64x64xf32) <- (2x192x64x64xf32) + swish_85 = paddle._C_ops.swish(batch_norm__666) + + # pd_op.conv2d: (2x192x32x32xf32) <- (2x192x64x64xf32, 192x192x3x3xf32) + conv2d_116 = paddle._C_ops.conv2d( + swish_85, parameter_129, [2, 2], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_129 + + # pd_op.batch_norm_: (2x192x32x32xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x32x32xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__672, + batch_norm__673, + batch_norm__674, + batch_norm__675, + batch_norm__676, + batch_norm__677, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_116, + parameter_128, + parameter_127, + parameter_126, + parameter_125, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_125, parameter_126, parameter_127, parameter_128 + + # pd_op.swish: (2x192x32x32xf32) <- (2x192x32x32xf32) + swish_86 = paddle._C_ops.swish(batch_norm__672) + + # builtin.combine: ([2x192x32x32xf32, 2x384x32x32xf32]) <- (2x192x32x32xf32, 2x384x32x32xf32) + combine_10 = [swish_86, swish_75] + + # pd_op.concat: (2x576x32x32xf32) <- ([2x192x32x32xf32, 2x384x32x32xf32], 1xi32) + concat_10 = paddle._C_ops.concat(combine_10, full_0) + del combine_10 + + # pd_op.conv2d: (2x192x32x32xf32) <- (2x576x32x32xf32, 192x576x1x1xf32) + conv2d_117 = paddle._C_ops.conv2d( + concat_10, parameter_124, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_124 + + # pd_op.batch_norm_: (2x192x32x32xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x32x32xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__678, + batch_norm__679, + batch_norm__680, + batch_norm__681, + batch_norm__682, + batch_norm__683, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_117, + parameter_123, + parameter_122, + parameter_121, + parameter_120, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_120, parameter_121, parameter_122, parameter_123 + + # pd_op.swish: (2x192x32x32xf32) <- (2x192x32x32xf32) + swish_87 = paddle._C_ops.swish(batch_norm__678) + + # pd_op.conv2d: (2x192x32x32xf32) <- (2x576x32x32xf32, 192x576x1x1xf32) + conv2d_118 = paddle._C_ops.conv2d( + concat_10, parameter_119, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_119 + + # pd_op.batch_norm_: (2x192x32x32xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x32x32xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__684, + batch_norm__685, + batch_norm__686, + batch_norm__687, + batch_norm__688, + batch_norm__689, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_118, + parameter_118, + parameter_117, + parameter_116, + parameter_115, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_115, parameter_116, parameter_117, parameter_118 + + # pd_op.swish: (2x192x32x32xf32) <- (2x192x32x32xf32) + swish_88 = paddle._C_ops.swish(batch_norm__684) + + # pd_op.conv2d: (2x192x32x32xf32) <- (2x192x32x32xf32, 192x192x3x3xf32) + conv2d_119 = paddle._C_ops.conv2d( + swish_88, parameter_114, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_114 + + # pd_op.batch_norm_: (2x192x32x32xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x32x32xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__690, + batch_norm__691, + batch_norm__692, + batch_norm__693, + batch_norm__694, + batch_norm__695, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_119, + parameter_113, + parameter_112, + parameter_111, + parameter_110, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_110, parameter_111, parameter_112, parameter_113 + + # pd_op.swish: (2x192x32x32xf32) <- (2x192x32x32xf32) + swish_89 = paddle._C_ops.swish(batch_norm__690) + + # pd_op.conv2d: (2x192x32x32xf32) <- (2x192x32x32xf32, 192x192x3x3xf32) + conv2d_120 = paddle._C_ops.conv2d( + swish_89, parameter_109, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_109 + + # pd_op.batch_norm_: (2x192x32x32xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x32x32xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__696, + batch_norm__697, + batch_norm__698, + batch_norm__699, + batch_norm__700, + batch_norm__701, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_120, + parameter_108, + parameter_107, + parameter_106, + parameter_105, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_105, parameter_106, parameter_107, parameter_108 + + # pd_op.conv2d: (2x192x32x32xf32) <- (2x192x32x32xf32, 192x192x1x1xf32) + conv2d_121 = paddle._C_ops.conv2d( + swish_89, parameter_104, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_104 + + # pd_op.batch_norm_: (2x192x32x32xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x32x32xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__702, + batch_norm__703, + batch_norm__704, + batch_norm__705, + batch_norm__706, + batch_norm__707, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_121, + parameter_103, + parameter_102, + parameter_101, + parameter_100, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_100, parameter_101, parameter_102, parameter_103 + + # pd_op.add: (2x192x32x32xf32) <- (2x192x32x32xf32, 2x192x32x32xf32) + add_49 = paddle._C_ops.add(batch_norm__696, batch_norm__702) + + # pd_op.swish: (2x192x32x32xf32) <- (2x192x32x32xf32) + swish_90 = paddle._C_ops.swish(add_49) + + # pd_op.conv2d: (2x192x32x32xf32) <- (2x192x32x32xf32, 192x192x3x3xf32) + conv2d_122 = paddle._C_ops.conv2d( + swish_90, parameter_99, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_99 + + # pd_op.batch_norm_: (2x192x32x32xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x32x32xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__708, + batch_norm__709, + batch_norm__710, + batch_norm__711, + batch_norm__712, + batch_norm__713, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_122, + parameter_98, + parameter_97, + parameter_96, + parameter_95, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_95, parameter_96, parameter_97, parameter_98 + + # pd_op.swish: (2x192x32x32xf32) <- (2x192x32x32xf32) + swish_91 = paddle._C_ops.swish(batch_norm__708) + + # pd_op.conv2d: (2x192x32x32xf32) <- (2x192x32x32xf32, 192x192x3x3xf32) + conv2d_123 = paddle._C_ops.conv2d( + swish_91, parameter_94, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_94 + + # pd_op.batch_norm_: (2x192x32x32xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x32x32xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__714, + batch_norm__715, + batch_norm__716, + batch_norm__717, + batch_norm__718, + batch_norm__719, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_123, + parameter_93, + parameter_92, + parameter_91, + parameter_90, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_90, parameter_91, parameter_92, parameter_93 + + # pd_op.conv2d: (2x192x32x32xf32) <- (2x192x32x32xf32, 192x192x1x1xf32) + conv2d_124 = paddle._C_ops.conv2d( + swish_91, parameter_89, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_89 + + # pd_op.batch_norm_: (2x192x32x32xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x32x32xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__720, + batch_norm__721, + batch_norm__722, + batch_norm__723, + batch_norm__724, + batch_norm__725, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_124, + parameter_88, + parameter_87, + parameter_86, + parameter_85, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_85, parameter_86, parameter_87, parameter_88 + + # pd_op.add: (2x192x32x32xf32) <- (2x192x32x32xf32, 2x192x32x32xf32) + add_50 = paddle._C_ops.add(batch_norm__714, batch_norm__720) + + # pd_op.swish: (2x192x32x32xf32) <- (2x192x32x32xf32) + swish_92 = paddle._C_ops.swish(add_50) + + # pd_op.conv2d: (2x192x32x32xf32) <- (2x192x32x32xf32, 192x192x3x3xf32) + conv2d_125 = paddle._C_ops.conv2d( + swish_92, parameter_84, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_84 + + # pd_op.batch_norm_: (2x192x32x32xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x32x32xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__726, + batch_norm__727, + batch_norm__728, + batch_norm__729, + batch_norm__730, + batch_norm__731, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_125, + parameter_83, + parameter_82, + parameter_81, + parameter_80, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_80, parameter_81, parameter_82, parameter_83 + + # pd_op.swish: (2x192x32x32xf32) <- (2x192x32x32xf32) + swish_93 = paddle._C_ops.swish(batch_norm__726) + + # pd_op.conv2d: (2x192x32x32xf32) <- (2x192x32x32xf32, 192x192x3x3xf32) + conv2d_126 = paddle._C_ops.conv2d( + swish_93, parameter_79, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_79 + + # pd_op.batch_norm_: (2x192x32x32xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x32x32xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__732, + batch_norm__733, + batch_norm__734, + batch_norm__735, + batch_norm__736, + batch_norm__737, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_126, + parameter_78, + parameter_77, + parameter_76, + parameter_75, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_75, parameter_76, parameter_77, parameter_78 + + # pd_op.conv2d: (2x192x32x32xf32) <- (2x192x32x32xf32, 192x192x1x1xf32) + conv2d_127 = paddle._C_ops.conv2d( + swish_93, parameter_74, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_74 + + # pd_op.batch_norm_: (2x192x32x32xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x32x32xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__738, + batch_norm__739, + batch_norm__740, + batch_norm__741, + batch_norm__742, + batch_norm__743, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_127, + parameter_73, + parameter_72, + parameter_71, + parameter_70, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_70, parameter_71, parameter_72, parameter_73 + + # pd_op.add: (2x192x32x32xf32) <- (2x192x32x32xf32, 2x192x32x32xf32) + add_51 = paddle._C_ops.add(batch_norm__732, batch_norm__738) + + # pd_op.swish: (2x192x32x32xf32) <- (2x192x32x32xf32) + swish_94 = paddle._C_ops.swish(add_51) + + # builtin.combine: ([2x192x32x32xf32, 2x192x32x32xf32]) <- (2x192x32x32xf32, 2x192x32x32xf32) + combine_11 = [swish_87, swish_94] + + # pd_op.concat: (2x384x32x32xf32) <- ([2x192x32x32xf32, 2x192x32x32xf32], 1xi32) + concat_11 = paddle._C_ops.concat(combine_11, full_0) + del combine_11 + + # pd_op.conv2d: (2x384x32x32xf32) <- (2x384x32x32xf32, 384x384x1x1xf32) + conv2d_128 = paddle._C_ops.conv2d( + concat_11, parameter_69, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_69 + + # pd_op.batch_norm_: (2x384x32x32xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x32x32xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__744, + batch_norm__745, + batch_norm__746, + batch_norm__747, + batch_norm__748, + batch_norm__749, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_128, + parameter_68, + parameter_67, + parameter_66, + parameter_65, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_65, parameter_66, parameter_67, parameter_68 + + # pd_op.swish: (2x384x32x32xf32) <- (2x384x32x32xf32) + swish_95 = paddle._C_ops.swish(batch_norm__744) + + # pd_op.conv2d: (2x384x16x16xf32) <- (2x384x32x32xf32, 384x384x3x3xf32) + conv2d_129 = paddle._C_ops.conv2d( + swish_95, parameter_64, [2, 2], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_64 + + # pd_op.batch_norm_: (2x384x16x16xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x16x16xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__750, + batch_norm__751, + batch_norm__752, + batch_norm__753, + batch_norm__754, + batch_norm__755, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_129, + parameter_63, + parameter_62, + parameter_61, + parameter_60, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_60, parameter_61, parameter_62, parameter_63 + + # pd_op.swish: (2x384x16x16xf32) <- (2x384x16x16xf32) + swish_96 = paddle._C_ops.swish(batch_norm__750) + + # builtin.combine: ([2x384x16x16xf32, 2x768x16x16xf32]) <- (2x384x16x16xf32, 2x768x16x16xf32) + combine_12 = [swish_96, swish_65] + + # pd_op.concat: (2x1152x16x16xf32) <- ([2x384x16x16xf32, 2x768x16x16xf32], 1xi32) + concat_12 = paddle._C_ops.concat(combine_12, full_0) + del combine_12 + + # pd_op.conv2d: (2x384x16x16xf32) <- (2x1152x16x16xf32, 384x1152x1x1xf32) + conv2d_130 = paddle._C_ops.conv2d( + concat_12, parameter_59, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_59 + + # pd_op.batch_norm_: (2x384x16x16xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x16x16xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__756, + batch_norm__757, + batch_norm__758, + batch_norm__759, + batch_norm__760, + batch_norm__761, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_130, + parameter_58, + parameter_57, + parameter_56, + parameter_55, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_55, parameter_56, parameter_57, parameter_58 + + # pd_op.swish: (2x384x16x16xf32) <- (2x384x16x16xf32) + swish_97 = paddle._C_ops.swish(batch_norm__756) + + # pd_op.conv2d: (2x384x16x16xf32) <- (2x1152x16x16xf32, 384x1152x1x1xf32) + conv2d_131 = paddle._C_ops.conv2d( + concat_12, parameter_54, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_54 + + # pd_op.batch_norm_: (2x384x16x16xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x16x16xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__762, + batch_norm__763, + batch_norm__764, + batch_norm__765, + batch_norm__766, + batch_norm__767, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_131, + parameter_53, + parameter_52, + parameter_51, + parameter_50, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_50, parameter_51, parameter_52, parameter_53 + + # pd_op.swish: (2x384x16x16xf32) <- (2x384x16x16xf32) + swish_98 = paddle._C_ops.swish(batch_norm__762) + + # pd_op.conv2d: (2x384x16x16xf32) <- (2x384x16x16xf32, 384x384x3x3xf32) + conv2d_132 = paddle._C_ops.conv2d( + swish_98, parameter_49, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_49 + + # pd_op.batch_norm_: (2x384x16x16xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x16x16xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__768, + batch_norm__769, + batch_norm__770, + batch_norm__771, + batch_norm__772, + batch_norm__773, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_132, + parameter_48, + parameter_47, + parameter_46, + parameter_45, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_45, parameter_46, parameter_47, parameter_48 + + # pd_op.swish: (2x384x16x16xf32) <- (2x384x16x16xf32) + swish_99 = paddle._C_ops.swish(batch_norm__768) + + # pd_op.conv2d: (2x384x16x16xf32) <- (2x384x16x16xf32, 384x384x3x3xf32) + conv2d_133 = paddle._C_ops.conv2d( + swish_99, parameter_44, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_44 + + # pd_op.batch_norm_: (2x384x16x16xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x16x16xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__774, + batch_norm__775, + batch_norm__776, + batch_norm__777, + batch_norm__778, + batch_norm__779, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_133, + parameter_43, + parameter_42, + parameter_41, + parameter_40, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_40, parameter_41, parameter_42, parameter_43 + + # pd_op.conv2d: (2x384x16x16xf32) <- (2x384x16x16xf32, 384x384x1x1xf32) + conv2d_134 = paddle._C_ops.conv2d( + swish_99, parameter_39, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_39 + + # pd_op.batch_norm_: (2x384x16x16xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x16x16xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__780, + batch_norm__781, + batch_norm__782, + batch_norm__783, + batch_norm__784, + batch_norm__785, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_134, + parameter_38, + parameter_37, + parameter_36, + parameter_35, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_35, parameter_36, parameter_37, parameter_38 + + # pd_op.add: (2x384x16x16xf32) <- (2x384x16x16xf32, 2x384x16x16xf32) + add_52 = paddle._C_ops.add(batch_norm__774, batch_norm__780) + + # pd_op.swish: (2x384x16x16xf32) <- (2x384x16x16xf32) + swish_100 = paddle._C_ops.swish(add_52) + + # pd_op.conv2d: (2x384x16x16xf32) <- (2x384x16x16xf32, 384x384x3x3xf32) + conv2d_135 = paddle._C_ops.conv2d( + swish_100, parameter_34, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_34 + + # pd_op.batch_norm_: (2x384x16x16xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x16x16xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__786, + batch_norm__787, + batch_norm__788, + batch_norm__789, + batch_norm__790, + batch_norm__791, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_135, + parameter_33, + parameter_32, + parameter_31, + parameter_30, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_30, parameter_31, parameter_32, parameter_33 + + # pd_op.swish: (2x384x16x16xf32) <- (2x384x16x16xf32) + swish_101 = paddle._C_ops.swish(batch_norm__786) + + # pd_op.conv2d: (2x384x16x16xf32) <- (2x384x16x16xf32, 384x384x3x3xf32) + conv2d_136 = paddle._C_ops.conv2d( + swish_101, parameter_29, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_29 + + # pd_op.batch_norm_: (2x384x16x16xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x16x16xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__792, + batch_norm__793, + batch_norm__794, + batch_norm__795, + batch_norm__796, + batch_norm__797, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_136, + parameter_28, + parameter_27, + parameter_26, + parameter_25, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_25, parameter_26, parameter_27, parameter_28 + + # pd_op.conv2d: (2x384x16x16xf32) <- (2x384x16x16xf32, 384x384x1x1xf32) + conv2d_137 = paddle._C_ops.conv2d( + swish_101, parameter_24, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_24 + + # pd_op.batch_norm_: (2x384x16x16xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x16x16xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__798, + batch_norm__799, + batch_norm__800, + batch_norm__801, + batch_norm__802, + batch_norm__803, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_137, + parameter_23, + parameter_22, + parameter_21, + parameter_20, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_20, parameter_21, parameter_22, parameter_23 + + # pd_op.add: (2x384x16x16xf32) <- (2x384x16x16xf32, 2x384x16x16xf32) + add_53 = paddle._C_ops.add(batch_norm__792, batch_norm__798) + + # pd_op.swish: (2x384x16x16xf32) <- (2x384x16x16xf32) + swish_102 = paddle._C_ops.swish(add_53) + + # pd_op.conv2d: (2x384x16x16xf32) <- (2x384x16x16xf32, 384x384x3x3xf32) + conv2d_138 = paddle._C_ops.conv2d( + swish_102, parameter_19, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_19 + + # pd_op.batch_norm_: (2x384x16x16xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x16x16xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__804, + batch_norm__805, + batch_norm__806, + batch_norm__807, + batch_norm__808, + batch_norm__809, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_138, + parameter_18, + parameter_17, + parameter_16, + parameter_15, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_15, parameter_16, parameter_17, parameter_18 + + # pd_op.swish: (2x384x16x16xf32) <- (2x384x16x16xf32) + swish_103 = paddle._C_ops.swish(batch_norm__804) + + # pd_op.conv2d: (2x384x16x16xf32) <- (2x384x16x16xf32, 384x384x3x3xf32) + conv2d_139 = paddle._C_ops.conv2d( + swish_103, parameter_14, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_14 + + # pd_op.batch_norm_: (2x384x16x16xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x16x16xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__810, + batch_norm__811, + batch_norm__812, + batch_norm__813, + batch_norm__814, + batch_norm__815, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_139, + parameter_13, + parameter_12, + parameter_11, + parameter_10, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_10, parameter_11, parameter_12, parameter_13 + + # pd_op.conv2d: (2x384x16x16xf32) <- (2x384x16x16xf32, 384x384x1x1xf32) + conv2d_140 = paddle._C_ops.conv2d( + swish_103, parameter_9, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_9 + + # pd_op.batch_norm_: (2x384x16x16xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x16x16xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__816, + batch_norm__817, + batch_norm__818, + batch_norm__819, + batch_norm__820, + batch_norm__821, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_140, + parameter_8, + parameter_7, + parameter_6, + parameter_5, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_5, parameter_6, parameter_7, parameter_8 + + # pd_op.add: (2x384x16x16xf32) <- (2x384x16x16xf32, 2x384x16x16xf32) + add_54 = paddle._C_ops.add(batch_norm__810, batch_norm__816) + + # pd_op.swish: (2x384x16x16xf32) <- (2x384x16x16xf32) + swish_104 = paddle._C_ops.swish(add_54) + + # builtin.combine: ([2x384x16x16xf32, 2x384x16x16xf32]) <- (2x384x16x16xf32, 2x384x16x16xf32) + combine_13 = [swish_97, swish_104] + + # pd_op.concat: (2x768x16x16xf32) <- ([2x384x16x16xf32, 2x384x16x16xf32], 1xi32) + concat_13 = paddle._C_ops.concat(combine_13, full_0) + del combine_13 + + # pd_op.conv2d: (2x768x16x16xf32) <- (2x768x16x16xf32, 768x768x1x1xf32) + conv2d_141 = paddle._C_ops.conv2d( + concat_13, parameter_4, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_4 + + # pd_op.batch_norm_: (2x768x16x16xf32, 768xf32, 768xf32, 768xf32, 768xf32, -1xui8) <- (2x768x16x16xf32, 768xf32, 768xf32, 768xf32, 768xf32) + ( + batch_norm__822, + batch_norm__823, + batch_norm__824, + batch_norm__825, + batch_norm__826, + batch_norm__827, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_141, + parameter_3, + parameter_2, + parameter_1, + parameter_0, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_0, parameter_1, parameter_2, parameter_3 + + # pd_op.swish: (2x768x16x16xf32) <- (2x768x16x16xf32) + swish_0 = paddle._C_ops.swish(batch_norm__822) + del ( + add_0, + add_1, + add_10, + add_11, + add_12, + add_13, + add_14, + add_15, + add_16, + add_17, + add_18, + add_2, + add_20, + add_21, + add_22, + add_23, + add_24, + add_25, + add_26, + add_27, + add_28, + add_29, + add_3, + add_30, + add_31, + add_33, + add_34, + add_35, + add_36, + add_37, + add_38, + add_4, + add_40, + add_41, + add_42, + add_43, + add_44, + add_45, + add_46, + add_47, + add_48, + add_49, + add_5, + add_50, + add_51, + add_52, + add_53, + add_54, + add_7, + add_8, + add_9, + assign_0, + assign_1, + assign_10, + assign_11, + assign_12, + assign_13, + assign_14, + assign_15, + assign_2, + assign_3, + assign_4, + assign_5, + assign_6, + assign_7, + assign_8, + assign_9, + batch_norm__0, + batch_norm__1, + batch_norm__10, + batch_norm__100, + batch_norm__101, + batch_norm__102, + batch_norm__103, + batch_norm__104, + batch_norm__105, + batch_norm__106, + batch_norm__107, + batch_norm__108, + batch_norm__109, + batch_norm__11, + batch_norm__110, + batch_norm__111, + batch_norm__112, + batch_norm__113, + batch_norm__114, + batch_norm__115, + batch_norm__116, + batch_norm__117, + batch_norm__118, + batch_norm__119, + batch_norm__12, + batch_norm__120, + batch_norm__121, + batch_norm__122, + batch_norm__123, + batch_norm__124, + batch_norm__125, + batch_norm__126, + batch_norm__127, + batch_norm__128, + batch_norm__129, + batch_norm__13, + batch_norm__130, + batch_norm__131, + batch_norm__132, + batch_norm__133, + batch_norm__134, + batch_norm__135, + batch_norm__136, + batch_norm__137, + batch_norm__138, + batch_norm__139, + batch_norm__14, + batch_norm__140, + batch_norm__141, + batch_norm__142, + batch_norm__143, + batch_norm__144, + batch_norm__145, + batch_norm__146, + batch_norm__147, + batch_norm__148, + batch_norm__149, + batch_norm__15, + batch_norm__150, + batch_norm__151, + batch_norm__152, + batch_norm__153, + batch_norm__154, + batch_norm__155, + batch_norm__156, + batch_norm__157, + batch_norm__158, + batch_norm__159, + batch_norm__16, + batch_norm__160, + batch_norm__161, + batch_norm__162, + batch_norm__163, + batch_norm__164, + batch_norm__165, + batch_norm__166, + batch_norm__167, + batch_norm__168, + batch_norm__169, + batch_norm__17, + batch_norm__170, + batch_norm__171, + batch_norm__172, + batch_norm__173, + batch_norm__174, + batch_norm__175, + batch_norm__176, + batch_norm__177, + batch_norm__178, + batch_norm__179, + batch_norm__18, + batch_norm__180, + batch_norm__181, + batch_norm__182, + batch_norm__183, + batch_norm__184, + batch_norm__185, + batch_norm__186, + batch_norm__187, + batch_norm__188, + batch_norm__189, + batch_norm__19, + batch_norm__190, + batch_norm__191, + batch_norm__192, + batch_norm__193, + batch_norm__194, + batch_norm__195, + batch_norm__196, + batch_norm__197, + batch_norm__198, + batch_norm__199, + batch_norm__2, + batch_norm__20, + batch_norm__200, + batch_norm__201, + batch_norm__202, + batch_norm__203, + batch_norm__204, + batch_norm__205, + batch_norm__206, + batch_norm__207, + batch_norm__208, + batch_norm__209, + batch_norm__21, + batch_norm__210, + batch_norm__211, + batch_norm__212, + batch_norm__213, + batch_norm__214, + batch_norm__215, + batch_norm__216, + batch_norm__217, + batch_norm__218, + batch_norm__219, + batch_norm__22, + batch_norm__220, + batch_norm__221, + batch_norm__222, + batch_norm__223, + batch_norm__224, + batch_norm__225, + batch_norm__226, + batch_norm__227, + batch_norm__228, + batch_norm__229, + batch_norm__23, + batch_norm__230, + batch_norm__231, + batch_norm__232, + batch_norm__233, + batch_norm__234, + batch_norm__235, + batch_norm__236, + batch_norm__237, + batch_norm__238, + batch_norm__239, + batch_norm__24, + batch_norm__240, + batch_norm__241, + batch_norm__242, + batch_norm__243, + batch_norm__244, + batch_norm__245, + batch_norm__246, + batch_norm__247, + batch_norm__248, + batch_norm__249, + batch_norm__25, + batch_norm__250, + batch_norm__251, + batch_norm__252, + batch_norm__253, + batch_norm__254, + batch_norm__255, + batch_norm__256, + batch_norm__257, + batch_norm__258, + batch_norm__259, + batch_norm__26, + batch_norm__260, + batch_norm__261, + batch_norm__262, + batch_norm__263, + batch_norm__264, + batch_norm__265, + batch_norm__266, + batch_norm__267, + batch_norm__268, + batch_norm__269, + batch_norm__27, + batch_norm__270, + batch_norm__271, + batch_norm__272, + batch_norm__273, + batch_norm__274, + batch_norm__275, + batch_norm__276, + batch_norm__277, + batch_norm__278, + batch_norm__279, + batch_norm__28, + batch_norm__280, + batch_norm__281, + batch_norm__282, + batch_norm__283, + batch_norm__284, + batch_norm__285, + batch_norm__286, + batch_norm__287, + batch_norm__288, + batch_norm__289, + batch_norm__29, + batch_norm__290, + batch_norm__291, + batch_norm__292, + batch_norm__293, + batch_norm__294, + batch_norm__295, + batch_norm__296, + batch_norm__297, + batch_norm__298, + batch_norm__299, + batch_norm__3, + batch_norm__30, + batch_norm__300, + batch_norm__301, + batch_norm__302, + batch_norm__303, + batch_norm__304, + batch_norm__305, + batch_norm__306, + batch_norm__307, + batch_norm__308, + batch_norm__309, + batch_norm__31, + batch_norm__310, + batch_norm__311, + batch_norm__312, + batch_norm__313, + batch_norm__314, + batch_norm__315, + batch_norm__316, + batch_norm__317, + batch_norm__318, + batch_norm__319, + batch_norm__32, + batch_norm__320, + batch_norm__321, + batch_norm__322, + batch_norm__323, + batch_norm__324, + batch_norm__325, + batch_norm__326, + batch_norm__327, + batch_norm__328, + batch_norm__329, + batch_norm__33, + batch_norm__330, + batch_norm__331, + batch_norm__332, + batch_norm__333, + batch_norm__334, + batch_norm__335, + batch_norm__336, + batch_norm__337, + batch_norm__338, + batch_norm__339, + batch_norm__34, + batch_norm__340, + batch_norm__341, + batch_norm__342, + batch_norm__343, + batch_norm__344, + batch_norm__345, + batch_norm__346, + batch_norm__347, + batch_norm__348, + batch_norm__349, + batch_norm__35, + batch_norm__350, + batch_norm__351, + batch_norm__352, + batch_norm__353, + batch_norm__354, + batch_norm__355, + batch_norm__356, + batch_norm__357, + batch_norm__358, + batch_norm__359, + batch_norm__36, + batch_norm__360, + batch_norm__361, + batch_norm__362, + batch_norm__363, + batch_norm__364, + batch_norm__365, + batch_norm__366, + batch_norm__367, + batch_norm__368, + batch_norm__369, + batch_norm__37, + batch_norm__370, + batch_norm__371, + batch_norm__372, + batch_norm__373, + batch_norm__374, + batch_norm__375, + batch_norm__376, + batch_norm__377, + batch_norm__378, + batch_norm__379, + batch_norm__38, + batch_norm__380, + batch_norm__381, + batch_norm__382, + batch_norm__383, + batch_norm__384, + batch_norm__385, + batch_norm__386, + batch_norm__387, + batch_norm__388, + batch_norm__389, + batch_norm__39, + batch_norm__390, + batch_norm__391, + batch_norm__392, + batch_norm__393, + batch_norm__394, + batch_norm__395, + batch_norm__396, + batch_norm__397, + batch_norm__398, + batch_norm__399, + batch_norm__4, + batch_norm__40, + batch_norm__400, + batch_norm__401, + batch_norm__402, + batch_norm__403, + batch_norm__404, + batch_norm__405, + batch_norm__406, + batch_norm__407, + batch_norm__408, + batch_norm__409, + batch_norm__41, + batch_norm__410, + batch_norm__411, + batch_norm__412, + batch_norm__413, + batch_norm__414, + batch_norm__415, + batch_norm__416, + batch_norm__417, + batch_norm__418, + batch_norm__419, + batch_norm__42, + batch_norm__420, + batch_norm__421, + batch_norm__422, + batch_norm__423, + batch_norm__424, + batch_norm__425, + batch_norm__426, + batch_norm__427, + batch_norm__428, + batch_norm__429, + batch_norm__43, + batch_norm__430, + batch_norm__431, + batch_norm__432, + batch_norm__433, + batch_norm__434, + batch_norm__435, + batch_norm__436, + batch_norm__437, + batch_norm__438, + batch_norm__439, + batch_norm__44, + batch_norm__440, + batch_norm__441, + batch_norm__442, + batch_norm__443, + batch_norm__444, + batch_norm__445, + batch_norm__446, + batch_norm__447, + batch_norm__448, + batch_norm__449, + batch_norm__45, + batch_norm__450, + batch_norm__451, + batch_norm__452, + batch_norm__453, + batch_norm__454, + batch_norm__455, + batch_norm__456, + batch_norm__457, + batch_norm__458, + batch_norm__459, + batch_norm__46, + batch_norm__460, + batch_norm__461, + batch_norm__462, + batch_norm__463, + batch_norm__464, + batch_norm__465, + batch_norm__466, + batch_norm__467, + batch_norm__468, + batch_norm__469, + batch_norm__47, + batch_norm__470, + batch_norm__471, + batch_norm__472, + batch_norm__473, + batch_norm__474, + batch_norm__475, + batch_norm__476, + batch_norm__477, + batch_norm__478, + batch_norm__479, + batch_norm__48, + batch_norm__480, + batch_norm__481, + batch_norm__482, + batch_norm__483, + batch_norm__484, + batch_norm__485, + batch_norm__486, + batch_norm__487, + batch_norm__488, + batch_norm__489, + batch_norm__49, + batch_norm__490, + batch_norm__491, + batch_norm__492, + batch_norm__493, + batch_norm__494, + batch_norm__495, + batch_norm__496, + batch_norm__497, + batch_norm__498, + batch_norm__499, + batch_norm__5, + batch_norm__50, + batch_norm__500, + batch_norm__501, + batch_norm__502, + batch_norm__503, + batch_norm__504, + batch_norm__505, + batch_norm__506, + batch_norm__507, + batch_norm__508, + batch_norm__509, + batch_norm__51, + batch_norm__510, + batch_norm__511, + batch_norm__512, + batch_norm__513, + batch_norm__514, + batch_norm__515, + batch_norm__516, + batch_norm__517, + batch_norm__518, + batch_norm__519, + batch_norm__52, + batch_norm__520, + batch_norm__521, + batch_norm__522, + batch_norm__523, + batch_norm__524, + batch_norm__525, + batch_norm__526, + batch_norm__527, + batch_norm__528, + batch_norm__529, + batch_norm__53, + batch_norm__530, + batch_norm__531, + batch_norm__532, + batch_norm__533, + batch_norm__534, + batch_norm__535, + batch_norm__536, + batch_norm__537, + batch_norm__538, + batch_norm__539, + batch_norm__54, + batch_norm__540, + batch_norm__541, + batch_norm__542, + batch_norm__543, + batch_norm__544, + batch_norm__545, + batch_norm__546, + batch_norm__547, + batch_norm__548, + batch_norm__549, + batch_norm__55, + batch_norm__550, + batch_norm__551, + batch_norm__552, + batch_norm__553, + batch_norm__554, + batch_norm__555, + batch_norm__556, + batch_norm__557, + batch_norm__558, + batch_norm__559, + batch_norm__56, + batch_norm__560, + batch_norm__561, + batch_norm__562, + batch_norm__563, + batch_norm__564, + batch_norm__565, + batch_norm__566, + batch_norm__567, + batch_norm__568, + batch_norm__569, + batch_norm__57, + batch_norm__570, + batch_norm__571, + batch_norm__572, + batch_norm__573, + batch_norm__574, + batch_norm__575, + batch_norm__576, + batch_norm__577, + batch_norm__578, + batch_norm__579, + batch_norm__58, + batch_norm__580, + batch_norm__581, + batch_norm__582, + batch_norm__583, + batch_norm__584, + batch_norm__585, + batch_norm__586, + batch_norm__587, + batch_norm__588, + batch_norm__589, + batch_norm__59, + batch_norm__590, + batch_norm__591, + batch_norm__592, + batch_norm__593, + batch_norm__594, + batch_norm__595, + batch_norm__596, + batch_norm__597, + batch_norm__598, + batch_norm__599, + batch_norm__6, + batch_norm__60, + batch_norm__600, + batch_norm__601, + batch_norm__602, + batch_norm__603, + batch_norm__604, + batch_norm__605, + batch_norm__606, + batch_norm__607, + batch_norm__608, + batch_norm__609, + batch_norm__61, + batch_norm__610, + batch_norm__611, + batch_norm__612, + batch_norm__613, + batch_norm__614, + batch_norm__615, + batch_norm__616, + batch_norm__617, + batch_norm__618, + batch_norm__619, + batch_norm__62, + batch_norm__620, + batch_norm__621, + batch_norm__622, + batch_norm__623, + batch_norm__624, + batch_norm__625, + batch_norm__626, + batch_norm__627, + batch_norm__628, + batch_norm__629, + batch_norm__63, + batch_norm__630, + batch_norm__631, + batch_norm__632, + batch_norm__633, + batch_norm__634, + batch_norm__635, + batch_norm__636, + batch_norm__637, + batch_norm__638, + batch_norm__639, + batch_norm__64, + batch_norm__640, + batch_norm__641, + batch_norm__642, + batch_norm__643, + batch_norm__644, + batch_norm__645, + batch_norm__646, + batch_norm__647, + batch_norm__648, + batch_norm__649, + batch_norm__65, + batch_norm__650, + batch_norm__651, + batch_norm__652, + batch_norm__653, + batch_norm__654, + batch_norm__655, + batch_norm__656, + batch_norm__657, + batch_norm__658, + batch_norm__659, + batch_norm__66, + batch_norm__660, + batch_norm__661, + batch_norm__662, + batch_norm__663, + batch_norm__664, + batch_norm__665, + batch_norm__666, + batch_norm__667, + batch_norm__668, + batch_norm__669, + batch_norm__67, + batch_norm__670, + batch_norm__671, + batch_norm__672, + batch_norm__673, + batch_norm__674, + batch_norm__675, + batch_norm__676, + batch_norm__677, + batch_norm__678, + batch_norm__679, + batch_norm__68, + batch_norm__680, + batch_norm__681, + batch_norm__682, + batch_norm__683, + batch_norm__684, + batch_norm__685, + batch_norm__686, + batch_norm__687, + batch_norm__688, + batch_norm__689, + batch_norm__69, + batch_norm__690, + batch_norm__691, + batch_norm__692, + batch_norm__693, + batch_norm__694, + batch_norm__695, + batch_norm__696, + batch_norm__697, + batch_norm__698, + batch_norm__699, + batch_norm__7, + batch_norm__70, + batch_norm__700, + batch_norm__701, + batch_norm__702, + batch_norm__703, + batch_norm__704, + batch_norm__705, + batch_norm__706, + batch_norm__707, + batch_norm__708, + batch_norm__709, + batch_norm__71, + batch_norm__710, + batch_norm__711, + batch_norm__712, + batch_norm__713, + batch_norm__714, + batch_norm__715, + batch_norm__716, + batch_norm__717, + batch_norm__718, + batch_norm__719, + batch_norm__72, + batch_norm__720, + batch_norm__721, + batch_norm__722, + batch_norm__723, + batch_norm__724, + batch_norm__725, + batch_norm__726, + batch_norm__727, + batch_norm__728, + batch_norm__729, + batch_norm__73, + batch_norm__730, + batch_norm__731, + batch_norm__732, + batch_norm__733, + batch_norm__734, + batch_norm__735, + batch_norm__736, + batch_norm__737, + batch_norm__738, + batch_norm__739, + batch_norm__74, + batch_norm__740, + batch_norm__741, + batch_norm__742, + batch_norm__743, + batch_norm__744, + batch_norm__745, + batch_norm__746, + batch_norm__747, + batch_norm__748, + batch_norm__749, + batch_norm__75, + batch_norm__750, + batch_norm__751, + batch_norm__752, + batch_norm__753, + batch_norm__754, + batch_norm__755, + batch_norm__756, + batch_norm__757, + batch_norm__758, + batch_norm__759, + batch_norm__76, + batch_norm__760, + batch_norm__761, + batch_norm__762, + batch_norm__763, + batch_norm__764, + batch_norm__765, + batch_norm__766, + batch_norm__767, + batch_norm__768, + batch_norm__769, + batch_norm__77, + batch_norm__770, + batch_norm__771, + batch_norm__772, + batch_norm__773, + batch_norm__774, + batch_norm__775, + batch_norm__776, + batch_norm__777, + batch_norm__778, + batch_norm__779, + batch_norm__78, + batch_norm__780, + batch_norm__781, + batch_norm__782, + batch_norm__783, + batch_norm__784, + batch_norm__785, + batch_norm__786, + batch_norm__787, + batch_norm__788, + batch_norm__789, + batch_norm__79, + batch_norm__790, + batch_norm__791, + batch_norm__792, + batch_norm__793, + batch_norm__794, + batch_norm__795, + batch_norm__796, + batch_norm__797, + batch_norm__798, + batch_norm__799, + batch_norm__8, + batch_norm__80, + batch_norm__800, + batch_norm__801, + batch_norm__802, + batch_norm__803, + batch_norm__804, + batch_norm__805, + batch_norm__806, + batch_norm__807, + batch_norm__808, + batch_norm__809, + batch_norm__81, + batch_norm__810, + batch_norm__811, + batch_norm__812, + batch_norm__813, + batch_norm__814, + batch_norm__815, + batch_norm__816, + batch_norm__817, + batch_norm__818, + batch_norm__819, + batch_norm__82, + batch_norm__820, + batch_norm__821, + batch_norm__822, + batch_norm__823, + batch_norm__824, + batch_norm__825, + batch_norm__826, + batch_norm__827, + batch_norm__83, + batch_norm__84, + batch_norm__85, + batch_norm__86, + batch_norm__87, + batch_norm__88, + batch_norm__89, + batch_norm__9, + batch_norm__90, + batch_norm__91, + batch_norm__92, + batch_norm__93, + batch_norm__94, + batch_norm__95, + batch_norm__96, + batch_norm__97, + batch_norm__98, + batch_norm__99, + concat_0, + concat_1, + concat_10, + concat_11, + concat_12, + concat_13, + concat_2, + concat_3, + concat_4, + concat_5, + concat_6, + concat_7, + concat_8, + concat_9, + conv2d_0, + conv2d_1, + conv2d_10, + conv2d_100, + conv2d_101, + conv2d_102, + conv2d_103, + conv2d_104, + conv2d_105, + conv2d_106, + conv2d_107, + conv2d_108, + conv2d_109, + conv2d_11, + conv2d_110, + conv2d_111, + conv2d_112, + conv2d_113, + conv2d_114, + conv2d_115, + conv2d_116, + conv2d_117, + conv2d_118, + conv2d_119, + conv2d_12, + conv2d_120, + conv2d_121, + conv2d_122, + conv2d_123, + conv2d_124, + conv2d_125, + conv2d_126, + conv2d_127, + conv2d_128, + conv2d_129, + conv2d_13, + conv2d_130, + conv2d_131, + conv2d_132, + conv2d_133, + conv2d_134, + conv2d_135, + conv2d_136, + conv2d_137, + conv2d_138, + conv2d_139, + conv2d_14, + conv2d_140, + conv2d_141, + conv2d_15, + conv2d_16, + conv2d_17, + conv2d_18, + conv2d_19, + conv2d_2, + conv2d_20, + conv2d_21, + conv2d_22, + conv2d_23, + conv2d_24, + conv2d_25, + conv2d_26, + conv2d_27, + conv2d_28, + conv2d_29, + conv2d_3, + conv2d_30, + conv2d_31, + conv2d_32, + conv2d_33, + conv2d_34, + conv2d_35, + conv2d_36, + conv2d_37, + conv2d_38, + conv2d_39, + conv2d_4, + conv2d_40, + conv2d_41, + conv2d_42, + conv2d_43, + conv2d_44, + conv2d_45, + conv2d_46, + conv2d_47, + conv2d_48, + conv2d_49, + conv2d_5, + conv2d_50, + conv2d_51, + conv2d_52, + conv2d_53, + conv2d_54, + conv2d_55, + conv2d_56, + conv2d_57, + conv2d_58, + conv2d_59, + conv2d_6, + conv2d_60, + conv2d_61, + conv2d_62, + conv2d_63, + conv2d_64, + conv2d_65, + conv2d_66, + conv2d_67, + conv2d_68, + conv2d_69, + conv2d_7, + conv2d_70, + conv2d_71, + conv2d_72, + conv2d_73, + conv2d_74, + conv2d_75, + conv2d_76, + conv2d_77, + conv2d_78, + conv2d_79, + conv2d_8, + conv2d_80, + conv2d_81, + conv2d_82, + conv2d_83, + conv2d_84, + conv2d_85, + conv2d_86, + conv2d_87, + conv2d_88, + conv2d_89, + conv2d_9, + conv2d_90, + conv2d_91, + conv2d_92, + conv2d_93, + conv2d_94, + conv2d_95, + conv2d_96, + conv2d_97, + conv2d_98, + conv2d_99, + full_0, + full_int_array_0, + full_int_array_2, + full_int_array_3, + full_int_array_4, + hardsigmoid_0, + hardsigmoid_1, + hardsigmoid_2, + hardsigmoid_3, + mean_0, + mean_1, + mean_2, + mean_3, + multiply_0, + multiply_1, + multiply_2, + multiply_3, + nearest_interp_0, + nearest_interp_1, + pool2d_0, + pool2d_1, + pool2d_2, + reshape_0, + reshape_1, + reshape_2, + reshape_3, + swish_1, + swish_10, + swish_100, + swish_101, + swish_102, + swish_103, + swish_104, + swish_11, + swish_12, + swish_13, + swish_14, + swish_15, + swish_16, + swish_17, + swish_18, + swish_19, + swish_2, + swish_20, + swish_21, + swish_22, + swish_23, + swish_24, + swish_25, + swish_26, + swish_27, + swish_28, + swish_29, + swish_3, + swish_30, + swish_31, + swish_32, + swish_33, + swish_34, + swish_35, + swish_36, + swish_37, + swish_38, + swish_39, + swish_4, + swish_40, + swish_41, + swish_42, + swish_43, + swish_44, + swish_45, + swish_46, + swish_47, + swish_48, + swish_49, + swish_5, + swish_50, + swish_51, + swish_52, + swish_53, + swish_54, + swish_55, + swish_56, + swish_57, + swish_58, + swish_59, + swish_6, + swish_60, + swish_61, + swish_62, + swish_63, + swish_64, + swish_65, + swish_66, + swish_67, + swish_68, + swish_69, + swish_7, + swish_70, + swish_71, + swish_72, + swish_73, + swish_74, + swish_75, + swish_76, + swish_77, + swish_78, + swish_79, + swish_8, + swish_80, + swish_81, + swish_82, + swish_83, + swish_84, + swish_85, + swish_86, + swish_87, + swish_88, + swish_89, + swish_9, + swish_90, + swish_91, + swish_92, + swish_93, + swish_94, + swish_95, + swish_96, + swish_97, + swish_98, + swish_99, + ) + + return swish_0 diff --git a/paddle_samples/PaddleX/PP-YOLOE-L_vehicle/subgraph_2/weight_meta.py b/paddle_samples/PaddleX/PP-YOLOE-L_vehicle/subgraph_2/weight_meta.py new file mode 100644 index 000000000..25c54b0a2 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE-L_vehicle/subgraph_2/weight_meta.py @@ -0,0 +1,7564 @@ +class Program_weight_tensor_parameter_0: + name = "parameter_0" + shape = [768] + dtype = "float32" + min_val = float("-0.241183") + max_val = float("0.339114") + mean = float("0.111688") + std = float("0.0760614") + data = None + + +class Program_weight_tensor_parameter_1: + name = "parameter_1" + shape = [768] + dtype = "float32" + min_val = float("0.855286") + max_val = float("1.34027") + mean = float("1.09294") + std = float("0.0413897") + data = None + + +class Program_weight_tensor_parameter_2: + name = "parameter_2" + shape = [768] + dtype = "float32" + min_val = float("0.000633402") + max_val = float("0.0240355") + mean = float("0.002005") + std = float("0.00159656") + data = None + + +class Program_weight_tensor_parameter_3: + name = "parameter_3" + shape = [768] + dtype = "float32" + min_val = float("-0.0975389") + max_val = float("0.0882256") + mean = float("-0.0156178") + std = float("0.0147547") + data = None + + +class Program_weight_tensor_parameter_4: + name = "parameter_4" + shape = [768, 768, 1, 1] + dtype = "float32" + min_val = float("-0.0352945") + max_val = float("0.027285") + mean = float("-9.50896e-05") + std = float("0.0014892") + data = None + + +class Program_weight_tensor_parameter_5: + name = "parameter_5" + shape = [384] + dtype = "float32" + min_val = float("-0.220092") + max_val = float("0.0360991") + mean = float("-0.0289701") + std = float("0.0325433") + data = None + + +class Program_weight_tensor_parameter_6: + name = "parameter_6" + shape = [384] + dtype = "float32" + min_val = float("0.949185") + max_val = float("1.03882") + mean = float("0.984992") + std = float("0.0122396") + data = None + + +class Program_weight_tensor_parameter_7: + name = "parameter_7" + shape = [384] + dtype = "float32" + min_val = float("0.000285216") + max_val = float("0.00478627") + mean = float("0.00122854") + std = float("0.00061932") + data = None + + +class Program_weight_tensor_parameter_8: + name = "parameter_8" + shape = [384] + dtype = "float32" + min_val = float("-0.0285195") + max_val = float("0.0323284") + mean = float("0.00097355") + std = float("0.01139") + data = None + + +class Program_weight_tensor_parameter_9: + name = "parameter_9" + shape = [384, 384, 1, 1] + dtype = "float32" + min_val = float("-0.021078") + max_val = float("0.0140467") + mean = float("5.46721e-06") + std = float("0.00115269") + data = None + + +class Program_weight_tensor_parameter_10: + name = "parameter_10" + shape = [384] + dtype = "float32" + min_val = float("-0.220092") + max_val = float("0.0360991") + mean = float("-0.0289701") + std = float("0.0325433") + data = None + + +class Program_weight_tensor_parameter_11: + name = "parameter_11" + shape = [384] + dtype = "float32" + min_val = float("0.853263") + max_val = float("1.12594") + mean = float("1.01951") + std = float("0.020693") + data = None + + +class Program_weight_tensor_parameter_12: + name = "parameter_12" + shape = [384] + dtype = "float32" + min_val = float("0.000688955") + max_val = float("0.010658") + mean = float("0.00259363") + std = float("0.001307") + data = None + + +class Program_weight_tensor_parameter_13: + name = "parameter_13" + shape = [384] + dtype = "float32" + min_val = float("-0.0898062") + max_val = float("0.0697168") + mean = float("-0.0204018") + std = float("0.0193672") + data = None + + +class Program_weight_tensor_parameter_14: + name = "parameter_14" + shape = [384, 384, 3, 3] + dtype = "float32" + min_val = float("-0.0184926") + max_val = float("0.0231329") + mean = float("-3.89981e-05") + std = float("0.000775433") + data = None + + +class Program_weight_tensor_parameter_15: + name = "parameter_15" + shape = [384] + dtype = "float32" + min_val = float("-0.187634") + max_val = float("0.0397745") + mean = float("-0.0496692") + std = float("0.0341143") + data = None + + +class Program_weight_tensor_parameter_16: + name = "parameter_16" + shape = [384] + dtype = "float32" + min_val = float("0.923384") + max_val = float("1.15592") + mean = float("1.01789") + std = float("0.0319458") + data = None + + +class Program_weight_tensor_parameter_17: + name = "parameter_17" + shape = [384] + dtype = "float32" + min_val = float("0.00169357") + max_val = float("0.0414876") + mean = float("0.00683062") + std = float("0.00397382") + data = None + + +class Program_weight_tensor_parameter_18: + name = "parameter_18" + shape = [384] + dtype = "float32" + min_val = float("-0.144802") + max_val = float("0.086576") + mean = float("-0.0246949") + std = float("0.0237346") + data = None + + +class Program_weight_tensor_parameter_19: + name = "parameter_19" + shape = [384, 384, 3, 3] + dtype = "float32" + min_val = float("-0.0202825") + max_val = float("0.0290433") + mean = float("-4.22358e-05") + std = float("0.000884567") + data = None + + +class Program_weight_tensor_parameter_20: + name = "parameter_20" + shape = [384] + dtype = "float32" + min_val = float("-0.13611") + max_val = float("0.0209956") + mean = float("-0.0495068") + std = float("0.0271924") + data = None + + +class Program_weight_tensor_parameter_21: + name = "parameter_21" + shape = [384] + dtype = "float32" + min_val = float("0.940917") + max_val = float("1.03841") + mean = float("0.986233") + std = float("0.0130345") + data = None + + +class Program_weight_tensor_parameter_22: + name = "parameter_22" + shape = [384] + dtype = "float32" + min_val = float("0.000300689") + max_val = float("0.00457802") + mean = float("0.00153576") + std = float("0.000801307") + data = None + + +class Program_weight_tensor_parameter_23: + name = "parameter_23" + shape = [384] + dtype = "float32" + min_val = float("-0.037531") + max_val = float("0.034864") + mean = float("0.000281664") + std = float("0.0103515") + data = None + + +class Program_weight_tensor_parameter_24: + name = "parameter_24" + shape = [384, 384, 1, 1] + dtype = "float32" + min_val = float("-0.019341") + max_val = float("0.0169804") + mean = float("-1.66912e-05") + std = float("0.0011822") + data = None + + +class Program_weight_tensor_parameter_25: + name = "parameter_25" + shape = [384] + dtype = "float32" + min_val = float("-0.13611") + max_val = float("0.0209956") + mean = float("-0.0495068") + std = float("0.0271924") + data = None + + +class Program_weight_tensor_parameter_26: + name = "parameter_26" + shape = [384] + dtype = "float32" + min_val = float("0.966625") + max_val = float("1.10386") + mean = float("1.01865") + std = float("0.0185725") + data = None + + +class Program_weight_tensor_parameter_27: + name = "parameter_27" + shape = [384] + dtype = "float32" + min_val = float("0.00102883") + max_val = float("0.0119425") + mean = float("0.00332718") + std = float("0.00160318") + data = None + + +class Program_weight_tensor_parameter_28: + name = "parameter_28" + shape = [384] + dtype = "float32" + min_val = float("-0.087175") + max_val = float("0.0661578") + mean = float("-0.0253732") + std = float("0.0194598") + data = None + + +class Program_weight_tensor_parameter_29: + name = "parameter_29" + shape = [384, 384, 3, 3] + dtype = "float32" + min_val = float("-0.0204281") + max_val = float("0.0272238") + mean = float("-4.94366e-05") + std = float("0.000795963") + data = None + + +class Program_weight_tensor_parameter_30: + name = "parameter_30" + shape = [384] + dtype = "float32" + min_val = float("-0.148287") + max_val = float("0.0257711") + mean = float("-0.0507511") + std = float("0.0264773") + data = None + + +class Program_weight_tensor_parameter_31: + name = "parameter_31" + shape = [384] + dtype = "float32" + min_val = float("0.938638") + max_val = float("1.11514") + mean = float("1.0147") + std = float("0.0355173") + data = None + + +class Program_weight_tensor_parameter_32: + name = "parameter_32" + shape = [384] + dtype = "float32" + min_val = float("0.00170241") + max_val = float("0.0180036") + mean = float("0.00504014") + std = float("0.00243654") + data = None + + +class Program_weight_tensor_parameter_33: + name = "parameter_33" + shape = [384] + dtype = "float32" + min_val = float("-0.0935511") + max_val = float("0.0491545") + mean = float("-0.00834149") + std = float("0.0230017") + data = None + + +class Program_weight_tensor_parameter_34: + name = "parameter_34" + shape = [384, 384, 3, 3] + dtype = "float32" + min_val = float("-0.0202815") + max_val = float("0.0279557") + mean = float("-3.32955e-05") + std = float("0.000914897") + data = None + + +class Program_weight_tensor_parameter_35: + name = "parameter_35" + shape = [384] + dtype = "float32" + min_val = float("-0.153278") + max_val = float("0.0451778") + mean = float("-0.0555992") + std = float("0.0277362") + data = None + + +class Program_weight_tensor_parameter_36: + name = "parameter_36" + shape = [384] + dtype = "float32" + min_val = float("0.932352") + max_val = float("1.05292") + mean = float("0.984355") + std = float("0.0159156") + data = None + + +class Program_weight_tensor_parameter_37: + name = "parameter_37" + shape = [384] + dtype = "float32" + min_val = float("0.000433048") + max_val = float("0.0042084") + mean = float("0.00195875") + std = float("0.000692107") + data = None + + +class Program_weight_tensor_parameter_38: + name = "parameter_38" + shape = [384] + dtype = "float32" + min_val = float("-0.0284852") + max_val = float("0.0252557") + mean = float("-0.00507782") + std = float("0.008977") + data = None + + +class Program_weight_tensor_parameter_39: + name = "parameter_39" + shape = [384, 384, 1, 1] + dtype = "float32" + min_val = float("-0.0207499") + max_val = float("0.0193701") + mean = float("-0.000100869") + std = float("0.00123961") + data = None + + +class Program_weight_tensor_parameter_40: + name = "parameter_40" + shape = [384] + dtype = "float32" + min_val = float("-0.153278") + max_val = float("0.0451778") + mean = float("-0.0555992") + std = float("0.0277362") + data = None + + +class Program_weight_tensor_parameter_41: + name = "parameter_41" + shape = [384] + dtype = "float32" + min_val = float("0.963645") + max_val = float("1.1319") + mean = float("1.02215") + std = float("0.0263352") + data = None + + +class Program_weight_tensor_parameter_42: + name = "parameter_42" + shape = [384] + dtype = "float32" + min_val = float("0.0016078") + max_val = float("0.0222238") + mean = float("0.00525679") + std = float("0.00277651") + data = None + + +class Program_weight_tensor_parameter_43: + name = "parameter_43" + shape = [384] + dtype = "float32" + min_val = float("-0.104951") + max_val = float("0.0448978") + mean = float("-0.0148056") + std = float("0.0210608") + data = None + + +class Program_weight_tensor_parameter_44: + name = "parameter_44" + shape = [384, 384, 3, 3] + dtype = "float32" + min_val = float("-0.0196928") + max_val = float("0.0250277") + mean = float("-3.1454e-05") + std = float("0.000855646") + data = None + + +class Program_weight_tensor_parameter_45: + name = "parameter_45" + shape = [384] + dtype = "float32" + min_val = float("-0.161193") + max_val = float("0.0517648") + mean = float("-0.053389") + std = float("0.0280913") + data = None + + +class Program_weight_tensor_parameter_46: + name = "parameter_46" + shape = [384] + dtype = "float32" + min_val = float("0.917844") + max_val = float("1.15227") + mean = float("1.01538") + std = float("0.0358938") + data = None + + +class Program_weight_tensor_parameter_47: + name = "parameter_47" + shape = [384] + dtype = "float32" + min_val = float("0.00206138") + max_val = float("0.0284275") + mean = float("0.00546661") + std = float("0.0027318") + data = None + + +class Program_weight_tensor_parameter_48: + name = "parameter_48" + shape = [384] + dtype = "float32" + min_val = float("-0.0944194") + max_val = float("0.0675621") + mean = float("-0.0218124") + std = float("0.0260175") + data = None + + +class Program_weight_tensor_parameter_49: + name = "parameter_49" + shape = [384, 384, 3, 3] + dtype = "float32" + min_val = float("-0.0217502") + max_val = float("0.0223443") + mean = float("-4.13226e-05") + std = float("0.000970614") + data = None + + +class Program_weight_tensor_parameter_50: + name = "parameter_50" + shape = [384] + dtype = "float32" + min_val = float("-0.101781") + max_val = float("0.056876") + mean = float("-0.0401516") + std = float("0.0228938") + data = None + + +class Program_weight_tensor_parameter_51: + name = "parameter_51" + shape = [384] + dtype = "float32" + min_val = float("0.963822") + max_val = float("1.11711") + mean = float("1.01317") + std = float("0.0243344") + data = None + + +class Program_weight_tensor_parameter_52: + name = "parameter_52" + shape = [384] + dtype = "float32" + min_val = float("0.00110984") + max_val = float("0.00597262") + mean = float("0.0020846") + std = float("0.000693443") + data = None + + +class Program_weight_tensor_parameter_53: + name = "parameter_53" + shape = [384] + dtype = "float32" + min_val = float("-0.0495382") + max_val = float("0.0540655") + mean = float("-0.0114017") + std = float("0.0130666") + data = None + + +class Program_weight_tensor_parameter_54: + name = "parameter_54" + shape = [384, 1152, 1, 1] + dtype = "float32" + min_val = float("-0.0410642") + max_val = float("0.0455197") + mean = float("-5.58216e-05") + std = float("0.0014784") + data = None + + +class Program_weight_tensor_parameter_55: + name = "parameter_55" + shape = [384] + dtype = "float32" + min_val = float("-0.0709703") + max_val = float("0.0169244") + mean = float("-0.0176548") + std = float("0.0126642") + data = None + + +class Program_weight_tensor_parameter_56: + name = "parameter_56" + shape = [384] + dtype = "float32" + min_val = float("0.913666") + max_val = float("1.1021") + mean = float("1.00906") + std = float("0.0166227") + data = None + + +class Program_weight_tensor_parameter_57: + name = "parameter_57" + shape = [384] + dtype = "float32" + min_val = float("0.000694125") + max_val = float("0.00878753") + mean = float("0.00162036") + std = float("0.000892236") + data = None + + +class Program_weight_tensor_parameter_58: + name = "parameter_58" + shape = [384] + dtype = "float32" + min_val = float("-0.0470721") + max_val = float("0.0304015") + mean = float("-0.0113723") + std = float("0.0122988") + data = None + + +class Program_weight_tensor_parameter_59: + name = "parameter_59" + shape = [384, 1152, 1, 1] + dtype = "float32" + min_val = float("-0.0379563") + max_val = float("0.0300735") + mean = float("-5.92181e-05") + std = float("0.00129426") + data = None + + +class Program_weight_tensor_parameter_60: + name = "parameter_60" + shape = [384] + dtype = "float32" + min_val = float("-0.0787137") + max_val = float("0.0034001") + mean = float("-0.0252802") + std = float("0.0143999") + data = None + + +class Program_weight_tensor_parameter_61: + name = "parameter_61" + shape = [384] + dtype = "float32" + min_val = float("0.98146") + max_val = float("1.12391") + mean = float("1.02741") + std = float("0.0212668") + data = None + + +class Program_weight_tensor_parameter_62: + name = "parameter_62" + shape = [384] + dtype = "float32" + min_val = float("0.00215154") + max_val = float("0.0252834") + mean = float("0.0067307") + std = float("0.00354134") + data = None + + +class Program_weight_tensor_parameter_63: + name = "parameter_63" + shape = [384] + dtype = "float32" + min_val = float("-0.277707") + max_val = float("0.114193") + mean = float("-0.0247189") + std = float("0.0461422") + data = None + + +class Program_weight_tensor_parameter_64: + name = "parameter_64" + shape = [384, 384, 3, 3] + dtype = "float32" + min_val = float("-0.031519") + max_val = float("0.0263953") + mean = float("-1.72944e-05") + std = float("0.000847953") + data = None + + +class Program_weight_tensor_parameter_65: + name = "parameter_65" + shape = [384] + dtype = "float32" + min_val = float("-0.413088") + max_val = float("0.667853") + mean = float("0.255716") + std = float("0.158995") + data = None + + +class Program_weight_tensor_parameter_66: + name = "parameter_66" + shape = [384] + dtype = "float32" + min_val = float("0.924404") + max_val = float("1.6694") + mean = float("1.17434") + std = float("0.0909085") + data = None + + +class Program_weight_tensor_parameter_67: + name = "parameter_67" + shape = [384] + dtype = "float32" + min_val = float("0.00152847") + max_val = float("0.0344264") + mean = float("0.00519486") + std = float("0.00331994") + data = None + + +class Program_weight_tensor_parameter_68: + name = "parameter_68" + shape = [384] + dtype = "float32" + min_val = float("-0.0946215") + max_val = float("0.0705854") + mean = float("-0.019859") + std = float("0.0214886") + data = None + + +class Program_weight_tensor_parameter_69: + name = "parameter_69" + shape = [384, 384, 1, 1] + dtype = "float32" + min_val = float("-0.066096") + max_val = float("0.062936") + mean = float("-0.000217452") + std = float("0.00361837") + data = None + + +class Program_weight_tensor_parameter_70: + name = "parameter_70" + shape = [192] + dtype = "float32" + min_val = float("-0.257401") + max_val = float("0.0756873") + mean = float("-0.0386734") + std = float("0.0605711") + data = None + + +class Program_weight_tensor_parameter_71: + name = "parameter_71" + shape = [192] + dtype = "float32" + min_val = float("0.913002") + max_val = float("1.05482") + mean = float("0.97016") + std = float("0.0253977") + data = None + + +class Program_weight_tensor_parameter_72: + name = "parameter_72" + shape = [192] + dtype = "float32" + min_val = float("0.000555623") + max_val = float("0.0115202") + mean = float("0.00332226") + std = float("0.00222795") + data = None + + +class Program_weight_tensor_parameter_73: + name = "parameter_73" + shape = [192] + dtype = "float32" + min_val = float("-0.0431262") + max_val = float("0.0415504") + mean = float("-0.00850673") + std = float("0.0137204") + data = None + + +class Program_weight_tensor_parameter_74: + name = "parameter_74" + shape = [192, 192, 1, 1] + dtype = "float32" + min_val = float("-0.0403293") + max_val = float("0.0259594") + mean = float("-0.00030975") + std = float("0.00273458") + data = None + + +class Program_weight_tensor_parameter_75: + name = "parameter_75" + shape = [192] + dtype = "float32" + min_val = float("-0.257401") + max_val = float("0.0756873") + mean = float("-0.0386734") + std = float("0.0605711") + data = None + + +class Program_weight_tensor_parameter_76: + name = "parameter_76" + shape = [192] + dtype = "float32" + min_val = float("0.67347") + max_val = float("1.16453") + mean = float("1.02581") + std = float("0.0489") + data = None + + +class Program_weight_tensor_parameter_77: + name = "parameter_77" + shape = [192] + dtype = "float32" + min_val = float("0.00163203") + max_val = float("0.0223524") + mean = float("0.00610887") + std = float("0.00292424") + data = None + + +class Program_weight_tensor_parameter_78: + name = "parameter_78" + shape = [192] + dtype = "float32" + min_val = float("-0.110504") + max_val = float("0.0737091") + mean = float("-0.0141394") + std = float("0.0249903") + data = None + + +class Program_weight_tensor_parameter_79: + name = "parameter_79" + shape = [192, 192, 3, 3] + dtype = "float32" + min_val = float("-0.0282871") + max_val = float("0.036242") + mean = float("-4.29571e-05") + std = float("0.00185962") + data = None + + +class Program_weight_tensor_parameter_80: + name = "parameter_80" + shape = [192] + dtype = "float32" + min_val = float("-0.254657") + max_val = float("0.0957903") + mean = float("-0.080577") + std = float("0.0593186") + data = None + + +class Program_weight_tensor_parameter_81: + name = "parameter_81" + shape = [192] + dtype = "float32" + min_val = float("0.858086") + max_val = float("1.31453") + mean = float("1.01565") + std = float("0.0626041") + data = None + + +class Program_weight_tensor_parameter_82: + name = "parameter_82" + shape = [192] + dtype = "float32" + min_val = float("0.00360845") + max_val = float("0.0470869") + mean = float("0.0123099") + std = float("0.00713766") + data = None + + +class Program_weight_tensor_parameter_83: + name = "parameter_83" + shape = [192] + dtype = "float32" + min_val = float("-0.112606") + max_val = float("0.159199") + mean = float("-0.0134364") + std = float("0.0271641") + data = None + + +class Program_weight_tensor_parameter_84: + name = "parameter_84" + shape = [192, 192, 3, 3] + dtype = "float32" + min_val = float("-0.0377322") + max_val = float("0.0505119") + mean = float("-6.78957e-05") + std = float("0.00209937") + data = None + + +class Program_weight_tensor_parameter_85: + name = "parameter_85" + shape = [192] + dtype = "float32" + min_val = float("-0.2187") + max_val = float("0.0411154") + mean = float("-0.100397") + std = float("0.0464196") + data = None + + +class Program_weight_tensor_parameter_86: + name = "parameter_86" + shape = [192] + dtype = "float32" + min_val = float("0.889894") + max_val = float("1.08345") + mean = float("0.97001") + std = float("0.0267277") + data = None + + +class Program_weight_tensor_parameter_87: + name = "parameter_87" + shape = [192] + dtype = "float32" + min_val = float("0.00101443") + max_val = float("0.00913514") + mean = float("0.00312222") + std = float("0.00149668") + data = None + + +class Program_weight_tensor_parameter_88: + name = "parameter_88" + shape = [192] + dtype = "float32" + min_val = float("-0.0424533") + max_val = float("0.0299395") + mean = float("-0.00723978") + std = float("0.0103044") + data = None + + +class Program_weight_tensor_parameter_89: + name = "parameter_89" + shape = [192, 192, 1, 1] + dtype = "float32" + min_val = float("-0.0374012") + max_val = float("0.0307031") + mean = float("-0.000380153") + std = float("0.00281281") + data = None + + +class Program_weight_tensor_parameter_90: + name = "parameter_90" + shape = [192] + dtype = "float32" + min_val = float("-0.2187") + max_val = float("0.0411154") + mean = float("-0.100397") + std = float("0.0464196") + data = None + + +class Program_weight_tensor_parameter_91: + name = "parameter_91" + shape = [192] + dtype = "float32" + min_val = float("0.930532") + max_val = float("1.13532") + mean = float("1.02386") + std = float("0.0379141") + data = None + + +class Program_weight_tensor_parameter_92: + name = "parameter_92" + shape = [192] + dtype = "float32" + min_val = float("0.00223834") + max_val = float("0.0246658") + mean = float("0.00757584") + std = float("0.00405972") + data = None + + +class Program_weight_tensor_parameter_93: + name = "parameter_93" + shape = [192] + dtype = "float32" + min_val = float("-0.0893976") + max_val = float("0.0617831") + mean = float("-0.0197007") + std = float("0.0216284") + data = None + + +class Program_weight_tensor_parameter_94: + name = "parameter_94" + shape = [192, 192, 3, 3] + dtype = "float32" + min_val = float("-0.03875") + max_val = float("0.0477189") + mean = float("-8.51208e-05") + std = float("0.0019455") + data = None + + +class Program_weight_tensor_parameter_95: + name = "parameter_95" + shape = [192] + dtype = "float32" + min_val = float("-0.230434") + max_val = float("0.0106795") + mean = float("-0.106056") + std = float("0.0515807") + data = None + + +class Program_weight_tensor_parameter_96: + name = "parameter_96" + shape = [192] + dtype = "float32" + min_val = float("0.866582") + max_val = float("1.19747") + mean = float("1.01744") + std = float("0.0615714") + data = None + + +class Program_weight_tensor_parameter_97: + name = "parameter_97" + shape = [192] + dtype = "float32" + min_val = float("0.00388242") + max_val = float("0.0321491") + mean = float("0.00938872") + std = float("0.00493863") + data = None + + +class Program_weight_tensor_parameter_98: + name = "parameter_98" + shape = [192] + dtype = "float32" + min_val = float("-0.0911383") + max_val = float("0.0437329") + mean = float("-0.0127117") + std = float("0.0237587") + data = None + + +class Program_weight_tensor_parameter_99: + name = "parameter_99" + shape = [192, 192, 3, 3] + dtype = "float32" + min_val = float("-0.0428683") + max_val = float("0.060501") + mean = float("-7.44884e-05") + std = float("0.00218307") + data = None + + +class Program_weight_tensor_parameter_100: + name = "parameter_100" + shape = [192] + dtype = "float32" + min_val = float("-0.331729") + max_val = float("0.0596956") + mean = float("-0.122847") + std = float("0.0596309") + data = None + + +class Program_weight_tensor_parameter_101: + name = "parameter_101" + shape = [192] + dtype = "float32" + min_val = float("0.864417") + max_val = float("1.08298") + mean = float("0.967068") + std = float("0.029395") + data = None + + +class Program_weight_tensor_parameter_102: + name = "parameter_102" + shape = [192] + dtype = "float32" + min_val = float("0.00115593") + max_val = float("0.00832892") + mean = float("0.00330322") + std = float("0.0012261") + data = None + + +class Program_weight_tensor_parameter_103: + name = "parameter_103" + shape = [192] + dtype = "float32" + min_val = float("-0.029956") + max_val = float("0.0246558") + mean = float("-0.00718649") + std = float("0.0115832") + data = None + + +class Program_weight_tensor_parameter_104: + name = "parameter_104" + shape = [192, 192, 1, 1] + dtype = "float32" + min_val = float("-0.0306946") + max_val = float("0.0748886") + mean = float("-0.000418031") + std = float("0.00305454") + data = None + + +class Program_weight_tensor_parameter_105: + name = "parameter_105" + shape = [192] + dtype = "float32" + min_val = float("-0.331729") + max_val = float("0.0596956") + mean = float("-0.122847") + std = float("0.0596309") + data = None + + +class Program_weight_tensor_parameter_106: + name = "parameter_106" + shape = [192] + dtype = "float32" + min_val = float("0.930062") + max_val = float("1.13715") + mean = float("1.02214") + std = float("0.0316853") + data = None + + +class Program_weight_tensor_parameter_107: + name = "parameter_107" + shape = [192] + dtype = "float32" + min_val = float("0.00308637") + max_val = float("0.0441344") + mean = float("0.00890809") + std = float("0.00562068") + data = None + + +class Program_weight_tensor_parameter_108: + name = "parameter_108" + shape = [192] + dtype = "float32" + min_val = float("-0.101372") + max_val = float("0.040673") + mean = float("-0.0117882") + std = float("0.0223792") + data = None + + +class Program_weight_tensor_parameter_109: + name = "parameter_109" + shape = [192, 192, 3, 3] + dtype = "float32" + min_val = float("-0.0356841") + max_val = float("0.0579415") + mean = float("-6.02996e-05") + std = float("0.00208653") + data = None + + +class Program_weight_tensor_parameter_110: + name = "parameter_110" + shape = [192] + dtype = "float32" + min_val = float("-0.348121") + max_val = float("0.134135") + mean = float("-0.132564") + std = float("0.0683482") + data = None + + +class Program_weight_tensor_parameter_111: + name = "parameter_111" + shape = [192] + dtype = "float32" + min_val = float("0.883098") + max_val = float("1.33245") + mean = float("1.01684") + std = float("0.066269") + data = None + + +class Program_weight_tensor_parameter_112: + name = "parameter_112" + shape = [192] + dtype = "float32" + min_val = float("0.00419839") + max_val = float("0.0370941") + mean = float("0.0100871") + std = float("0.00543772") + data = None + + +class Program_weight_tensor_parameter_113: + name = "parameter_113" + shape = [192] + dtype = "float32" + min_val = float("-0.10148") + max_val = float("0.0669823") + mean = float("-0.0195411") + std = float("0.0241009") + data = None + + +class Program_weight_tensor_parameter_114: + name = "parameter_114" + shape = [192, 192, 3, 3] + dtype = "float32" + min_val = float("-0.0441361") + max_val = float("0.0879773") + mean = float("-7.14624e-05") + std = float("0.00242393") + data = None + + +class Program_weight_tensor_parameter_115: + name = "parameter_115" + shape = [192] + dtype = "float32" + min_val = float("-0.248584") + max_val = float("0.0643379") + mean = float("-0.0972664") + std = float("0.0449312") + data = None + + +class Program_weight_tensor_parameter_116: + name = "parameter_116" + shape = [192] + dtype = "float32" + min_val = float("0.916261") + max_val = float("1.23422") + mean = float("1.01788") + std = float("0.0458154") + data = None + + +class Program_weight_tensor_parameter_117: + name = "parameter_117" + shape = [192] + dtype = "float32" + min_val = float("0.00242366") + max_val = float("0.0139676") + mean = float("0.00484591") + std = float("0.00171238") + data = None + + +class Program_weight_tensor_parameter_118: + name = "parameter_118" + shape = [192] + dtype = "float32" + min_val = float("-0.0716758") + max_val = float("0.0467604") + mean = float("-0.01717") + std = float("0.0186884") + data = None + + +class Program_weight_tensor_parameter_119: + name = "parameter_119" + shape = [192, 576, 1, 1] + dtype = "float32" + min_val = float("-0.0539295") + max_val = float("0.0629534") + mean = float("-0.000154038") + std = float("0.00352968") + data = None + + +class Program_weight_tensor_parameter_120: + name = "parameter_120" + shape = [192] + dtype = "float32" + min_val = float("-0.165453") + max_val = float("0.0408002") + mean = float("-0.033107") + std = float("0.0302826") + data = None + + +class Program_weight_tensor_parameter_121: + name = "parameter_121" + shape = [192] + dtype = "float32" + min_val = float("0.914357") + max_val = float("1.29586") + mean = float("1.00068") + std = float("0.0391306") + data = None + + +class Program_weight_tensor_parameter_122: + name = "parameter_122" + shape = [192] + dtype = "float32" + min_val = float("0.00125772") + max_val = float("0.0253186") + mean = float("0.0035712") + std = float("0.00260193") + data = None + + +class Program_weight_tensor_parameter_123: + name = "parameter_123" + shape = [192] + dtype = "float32" + min_val = float("-0.0483963") + max_val = float("0.0286654") + mean = float("-0.0102664") + std = float("0.0142139") + data = None + + +class Program_weight_tensor_parameter_124: + name = "parameter_124" + shape = [192, 576, 1, 1] + dtype = "float32" + min_val = float("-0.052518") + max_val = float("0.0664707") + mean = float("-9.00415e-05") + std = float("0.00298878") + data = None + + +class Program_weight_tensor_parameter_125: + name = "parameter_125" + shape = [192] + dtype = "float32" + min_val = float("-0.156099") + max_val = float("0.0110499") + mean = float("-0.0538026") + std = float("0.030917") + data = None + + +class Program_weight_tensor_parameter_126: + name = "parameter_126" + shape = [192] + dtype = "float32" + min_val = float("0.853169") + max_val = float("1.1744") + mean = float("1.00826") + std = float("0.0376149") + data = None + + +class Program_weight_tensor_parameter_127: + name = "parameter_127" + shape = [192] + dtype = "float32" + min_val = float("0.00348672") + max_val = float("0.0403159") + mean = float("0.0111201") + std = float("0.00590273") + data = None + + +class Program_weight_tensor_parameter_128: + name = "parameter_128" + shape = [192] + dtype = "float32" + min_val = float("-0.272761") + max_val = float("0.304479") + mean = float("-0.0311626") + std = float("0.0877005") + data = None + + +class Program_weight_tensor_parameter_129: + name = "parameter_129" + shape = [192, 192, 3, 3] + dtype = "float32" + min_val = float("-0.0436434") + max_val = float("0.0390175") + mean = float("-2.75791e-05") + std = float("0.00207461") + data = None + + +class Program_weight_tensor_parameter_130: + name = "parameter_130" + shape = [192] + dtype = "float32" + min_val = float("-0.731877") + max_val = float("1.79262") + mean = float("0.373936") + std = float("0.434119") + data = None + + +class Program_weight_tensor_parameter_131: + name = "parameter_131" + shape = [192] + dtype = "float32" + min_val = float("0.625408") + max_val = float("1.70704") + mean = float("1.16439") + std = float("0.199739") + data = None + + +class Program_weight_tensor_parameter_132: + name = "parameter_132" + shape = [192] + dtype = "float32" + min_val = float("0.00270591") + max_val = float("0.0641962") + mean = float("0.0133792") + std = float("0.0107877") + data = None + + +class Program_weight_tensor_parameter_133: + name = "parameter_133" + shape = [192] + dtype = "float32" + min_val = float("-0.201918") + max_val = float("0.126312") + mean = float("-0.02014") + std = float("0.0395955") + data = None + + +class Program_weight_tensor_parameter_134: + name = "parameter_134" + shape = [192, 192, 1, 1] + dtype = "float32" + min_val = float("-0.124441") + max_val = float("0.101297") + mean = float("-0.000428392") + std = float("0.00839381") + data = None + + +class Program_weight_tensor_parameter_135: + name = "parameter_135" + shape = [96] + dtype = "float32" + min_val = float("-0.646764") + max_val = float("0.287418") + mean = float("-0.036847") + std = float("0.203964") + data = None + + +class Program_weight_tensor_parameter_136: + name = "parameter_136" + shape = [96] + dtype = "float32" + min_val = float("0.743727") + max_val = float("1.31816") + mean = float("0.928051") + std = float("0.0891314") + data = None + + +class Program_weight_tensor_parameter_137: + name = "parameter_137" + shape = [96] + dtype = "float32" + min_val = float("0.00126457") + max_val = float("0.0283809") + mean = float("0.00872036") + std = float("0.0074912") + data = None + + +class Program_weight_tensor_parameter_138: + name = "parameter_138" + shape = [96] + dtype = "float32" + min_val = float("-0.0533211") + max_val = float("0.054261") + mean = float("-0.00777041") + std = float("0.0241502") + data = None + + +class Program_weight_tensor_parameter_139: + name = "parameter_139" + shape = [96, 96, 1, 1] + dtype = "float32" + min_val = float("-0.0662853") + max_val = float("0.0498423") + mean = float("-0.000819471") + std = float("0.00712985") + data = None + + +class Program_weight_tensor_parameter_140: + name = "parameter_140" + shape = [96] + dtype = "float32" + min_val = float("-0.646764") + max_val = float("0.287418") + mean = float("-0.036847") + std = float("0.203964") + data = None + + +class Program_weight_tensor_parameter_141: + name = "parameter_141" + shape = [96] + dtype = "float32" + min_val = float("0.478759") + max_val = float("1.3967") + mean = float("1.04303") + std = float("0.135316") + data = None + + +class Program_weight_tensor_parameter_142: + name = "parameter_142" + shape = [96] + dtype = "float32" + min_val = float("0.00456047") + max_val = float("0.0957772") + mean = float("0.0187912") + std = float("0.0159306") + data = None + + +class Program_weight_tensor_parameter_143: + name = "parameter_143" + shape = [96] + dtype = "float32" + min_val = float("-0.189351") + max_val = float("0.148294") + mean = float("-0.00279725") + std = float("0.0509242") + data = None + + +class Program_weight_tensor_parameter_144: + name = "parameter_144" + shape = [96, 96, 3, 3] + dtype = "float32" + min_val = float("-0.0753107") + max_val = float("0.0647631") + mean = float("-7.46307e-05") + std = float("0.00493989") + data = None + + +class Program_weight_tensor_parameter_145: + name = "parameter_145" + shape = [96] + dtype = "float32" + min_val = float("-0.807012") + max_val = float("0.639205") + mean = float("-0.135788") + std = float("0.222961") + data = None + + +class Program_weight_tensor_parameter_146: + name = "parameter_146" + shape = [96] + dtype = "float32" + min_val = float("0.500992") + max_val = float("1.52738") + mean = float("0.986272") + std = float("0.139979") + data = None + + +class Program_weight_tensor_parameter_147: + name = "parameter_147" + shape = [96] + dtype = "float32" + min_val = float("0.00448636") + max_val = float("0.078761") + mean = float("0.0176369") + std = float("0.0131972") + data = None + + +class Program_weight_tensor_parameter_148: + name = "parameter_148" + shape = [96] + dtype = "float32" + min_val = float("-0.232282") + max_val = float("0.112253") + mean = float("0.00311495") + std = float("0.0583791") + data = None + + +class Program_weight_tensor_parameter_149: + name = "parameter_149" + shape = [96, 96, 3, 3] + dtype = "float32" + min_val = float("-0.0632043") + max_val = float("0.0542453") + mean = float("-0.000286044") + std = float("0.00552684") + data = None + + +class Program_weight_tensor_parameter_150: + name = "parameter_150" + shape = [96] + dtype = "float32" + min_val = float("-0.375565") + max_val = float("0.21532") + mean = float("-0.182874") + std = float("0.131104") + data = None + + +class Program_weight_tensor_parameter_151: + name = "parameter_151" + shape = [96] + dtype = "float32" + min_val = float("0.660313") + max_val = float("1.1612") + mean = float("0.867514") + std = float("0.0695054") + data = None + + +class Program_weight_tensor_parameter_152: + name = "parameter_152" + shape = [96] + dtype = "float32" + min_val = float("0.00209929") + max_val = float("0.0217899") + mean = float("0.00748623") + std = float("0.00298326") + data = None + + +class Program_weight_tensor_parameter_153: + name = "parameter_153" + shape = [96] + dtype = "float32" + min_val = float("-0.0465666") + max_val = float("0.039584") + mean = float("-0.0139749") + std = float("0.0171722") + data = None + + +class Program_weight_tensor_parameter_154: + name = "parameter_154" + shape = [96, 96, 1, 1] + dtype = "float32" + min_val = float("-0.063312") + max_val = float("0.0527997") + mean = float("-0.00133358") + std = float("0.00750932") + data = None + + +class Program_weight_tensor_parameter_155: + name = "parameter_155" + shape = [96] + dtype = "float32" + min_val = float("-0.375565") + max_val = float("0.21532") + mean = float("-0.182874") + std = float("0.131104") + data = None + + +class Program_weight_tensor_parameter_156: + name = "parameter_156" + shape = [96] + dtype = "float32" + min_val = float("0.801681") + max_val = float("1.29162") + mean = float("1.01024") + std = float("0.0817165") + data = None + + +class Program_weight_tensor_parameter_157: + name = "parameter_157" + shape = [96] + dtype = "float32" + min_val = float("0.00485304") + max_val = float("0.0746222") + mean = float("0.0201209") + std = float("0.0145062") + data = None + + +class Program_weight_tensor_parameter_158: + name = "parameter_158" + shape = [96] + dtype = "float32" + min_val = float("-0.102996") + max_val = float("0.0561913") + mean = float("-0.0165577") + std = float("0.0346552") + data = None + + +class Program_weight_tensor_parameter_159: + name = "parameter_159" + shape = [96, 96, 3, 3] + dtype = "float32" + min_val = float("-0.0719449") + max_val = float("0.0662486") + mean = float("-0.000336108") + std = float("0.0053214") + data = None + + +class Program_weight_tensor_parameter_160: + name = "parameter_160" + shape = [96] + dtype = "float32" + min_val = float("-0.500131") + max_val = float("0.31631") + mean = float("-0.192663") + std = float("0.162844") + data = None + + +class Program_weight_tensor_parameter_161: + name = "parameter_161" + shape = [96] + dtype = "float32" + min_val = float("0.725877") + max_val = float("1.31136") + mean = float("0.944603") + std = float("0.105848") + data = None + + +class Program_weight_tensor_parameter_162: + name = "parameter_162" + shape = [96] + dtype = "float32" + min_val = float("0.00343356") + max_val = float("0.0319398") + mean = float("0.00955323") + std = float("0.00603822") + data = None + + +class Program_weight_tensor_parameter_163: + name = "parameter_163" + shape = [96] + dtype = "float32" + min_val = float("-0.114596") + max_val = float("0.088809") + mean = float("0.0165881") + std = float("0.0398273") + data = None + + +class Program_weight_tensor_parameter_164: + name = "parameter_164" + shape = [96, 96, 3, 3] + dtype = "float32" + min_val = float("-0.0822766") + max_val = float("0.0772172") + mean = float("-0.000347346") + std = float("0.00616296") + data = None + + +class Program_weight_tensor_parameter_165: + name = "parameter_165" + shape = [96] + dtype = "float32" + min_val = float("-0.60397") + max_val = float("0.0882115") + mean = float("-0.234695") + std = float("0.141888") + data = None + + +class Program_weight_tensor_parameter_166: + name = "parameter_166" + shape = [96] + dtype = "float32" + min_val = float("0.705014") + max_val = float("1.02807") + mean = float("0.909017") + std = float("0.0648434") + data = None + + +class Program_weight_tensor_parameter_167: + name = "parameter_167" + shape = [96] + dtype = "float32" + min_val = float("0.00465651") + max_val = float("0.0323499") + mean = float("0.0105922") + std = float("0.00456131") + data = None + + +class Program_weight_tensor_parameter_168: + name = "parameter_168" + shape = [96] + dtype = "float32" + min_val = float("-0.0572868") + max_val = float("0.0528458") + mean = float("-0.0101465") + std = float("0.0295067") + data = None + + +class Program_weight_tensor_parameter_169: + name = "parameter_169" + shape = [96, 96, 1, 1] + dtype = "float32" + min_val = float("-0.0723145") + max_val = float("0.0736894") + mean = float("-0.0014416") + std = float("0.00966079") + data = None + + +class Program_weight_tensor_parameter_170: + name = "parameter_170" + shape = [96] + dtype = "float32" + min_val = float("-0.60397") + max_val = float("0.0882115") + mean = float("-0.234695") + std = float("0.141888") + data = None + + +class Program_weight_tensor_parameter_171: + name = "parameter_171" + shape = [96] + dtype = "float32" + min_val = float("0.618521") + max_val = float("1.21767") + mean = float("0.958308") + std = float("0.106787") + data = None + + +class Program_weight_tensor_parameter_172: + name = "parameter_172" + shape = [96] + dtype = "float32" + min_val = float("0.00884074") + max_val = float("0.0914745") + mean = float("0.0314621") + std = float("0.0183233") + data = None + + +class Program_weight_tensor_parameter_173: + name = "parameter_173" + shape = [96] + dtype = "float32" + min_val = float("-0.160652") + max_val = float("0.12782") + mean = float("-0.004871") + std = float("0.0578784") + data = None + + +class Program_weight_tensor_parameter_174: + name = "parameter_174" + shape = [96, 96, 3, 3] + dtype = "float32" + min_val = float("-0.0638916") + max_val = float("0.0818362") + mean = float("-0.000246873") + std = float("0.00600333") + data = None + + +class Program_weight_tensor_parameter_175: + name = "parameter_175" + shape = [96] + dtype = "float32" + min_val = float("-0.81651") + max_val = float("0.693971") + mean = float("-0.221258") + std = float("0.247267") + data = None + + +class Program_weight_tensor_parameter_176: + name = "parameter_176" + shape = [96] + dtype = "float32" + min_val = float("0.652024") + max_val = float("1.50005") + mean = float("0.904973") + std = float("0.115969") + data = None + + +class Program_weight_tensor_parameter_177: + name = "parameter_177" + shape = [96] + dtype = "float32" + min_val = float("0.00583431") + max_val = float("0.0907615") + mean = float("0.0165982") + std = float("0.0160351") + data = None + + +class Program_weight_tensor_parameter_178: + name = "parameter_178" + shape = [96] + dtype = "float32" + min_val = float("-0.201578") + max_val = float("0.211771") + mean = float("-0.00815532") + std = float("0.0732757") + data = None + + +class Program_weight_tensor_parameter_179: + name = "parameter_179" + shape = [96, 96, 3, 3] + dtype = "float32" + min_val = float("-0.0998433") + max_val = float("0.159214") + mean = float("-0.000138757") + std = float("0.00702056") + data = None + + +class Program_weight_tensor_parameter_180: + name = "parameter_180" + shape = [96] + dtype = "float32" + min_val = float("-0.737142") + max_val = float("1.03848") + mean = float("-0.0970708") + std = float("0.357165") + data = None + + +class Program_weight_tensor_parameter_181: + name = "parameter_181" + shape = [96] + dtype = "float32" + min_val = float("0.485726") + max_val = float("1.17258") + mean = float("0.785471") + std = float("0.141168") + data = None + + +class Program_weight_tensor_parameter_182: + name = "parameter_182" + shape = [96] + dtype = "float32" + min_val = float("0.00479017") + max_val = float("0.0498613") + mean = float("0.0140149") + std = float("0.00706644") + data = None + + +class Program_weight_tensor_parameter_183: + name = "parameter_183" + shape = [96] + dtype = "float32" + min_val = float("-0.140604") + max_val = float("0.0620771") + mean = float("-0.00136434") + std = float("0.0304237") + data = None + + +class Program_weight_tensor_parameter_184: + name = "parameter_184" + shape = [96, 448, 1, 1] + dtype = "float32" + min_val = float("-0.138366") + max_val = float("0.118931") + mean = float("-0.000333914") + std = float("0.00899348") + data = None + + +class Program_weight_tensor_parameter_185: + name = "parameter_185" + shape = [96] + dtype = "float32" + min_val = float("-0.100614") + max_val = float("0.279575") + mean = float("0.0624506") + std = float("0.0700954") + data = None + + +class Program_weight_tensor_parameter_186: + name = "parameter_186" + shape = [96] + dtype = "float32" + min_val = float("0.726756") + max_val = float("1.15922") + mean = float("0.8968") + std = float("0.0750659") + data = None + + +class Program_weight_tensor_parameter_187: + name = "parameter_187" + shape = [96] + dtype = "float32" + min_val = float("0.00172884") + max_val = float("0.0284937") + mean = float("0.0037772") + std = float("0.00304651") + data = None + + +class Program_weight_tensor_parameter_188: + name = "parameter_188" + shape = [96] + dtype = "float32" + min_val = float("-0.0587997") + max_val = float("0.0664821") + mean = float("-0.0051813") + std = float("0.0161551") + data = None + + +class Program_weight_tensor_parameter_189: + name = "parameter_189" + shape = [96, 448, 1, 1] + dtype = "float32" + min_val = float("-0.0960914") + max_val = float("0.114711") + mean = float("-0.000131248") + std = float("0.00503197") + data = None + + +class Program_weight_tensor_parameter_190: + name = "parameter_190" + shape = [192] + dtype = "float32" + min_val = float("-0.419878") + max_val = float("0.305803") + mean = float("-0.0843783") + std = float("0.0933839") + data = None + + +class Program_weight_tensor_parameter_191: + name = "parameter_191" + shape = [192] + dtype = "float32" + min_val = float("0.660092") + max_val = float("1.60427") + mean = float("0.830813") + std = float("0.0949805") + data = None + + +class Program_weight_tensor_parameter_192: + name = "parameter_192" + shape = [192] + dtype = "float32" + min_val = float("0.00393118") + max_val = float("0.106249") + mean = float("0.0110337") + std = float("0.00808965") + data = None + + +class Program_weight_tensor_parameter_193: + name = "parameter_193" + shape = [192] + dtype = "float32" + min_val = float("-0.0959304") + max_val = float("0.0379394") + mean = float("-0.0249515") + std = float("0.0229359") + data = None + + +class Program_weight_tensor_parameter_194: + name = "parameter_194" + shape = [192, 384, 1, 1] + dtype = "float32" + min_val = float("-0.0812206") + max_val = float("0.0824483") + mean = float("-0.000421448") + std = float("0.00583855") + data = None + + +class Program_weight_tensor_parameter_195: + name = "parameter_195" + shape = [384] + dtype = "float32" + min_val = float("-0.373089") + max_val = float("0.1651") + mean = float("-0.0928755") + std = float("0.059282") + data = None + + +class Program_weight_tensor_parameter_196: + name = "parameter_196" + shape = [384] + dtype = "float32" + min_val = float("0.877299") + max_val = float("1.57429") + mean = float("1.01597") + std = float("0.0852694") + data = None + + +class Program_weight_tensor_parameter_197: + name = "parameter_197" + shape = [384] + dtype = "float32" + min_val = float("0.00323605") + max_val = float("0.0532371") + mean = float("0.00771792") + std = float("0.00446179") + data = None + + +class Program_weight_tensor_parameter_198: + name = "parameter_198" + shape = [384] + dtype = "float32" + min_val = float("-0.165965") + max_val = float("0.109289") + mean = float("-0.031791") + std = float("0.0291846") + data = None + + +class Program_weight_tensor_parameter_199: + name = "parameter_199" + shape = [384, 384, 1, 1] + dtype = "float32" + min_val = float("-0.130749") + max_val = float("0.0789119") + mean = float("-0.000445351") + std = float("0.00532961") + data = None + + +class Program_weight_tensor_parameter_200: + name = "parameter_200" + shape = [192] + dtype = "float32" + min_val = float("-0.256683") + max_val = float("0.0671899") + mean = float("-0.0839381") + std = float("0.0440531") + data = None + + +class Program_weight_tensor_parameter_201: + name = "parameter_201" + shape = [192] + dtype = "float32" + min_val = float("0.819788") + max_val = float("0.987271") + mean = float("0.928926") + std = float("0.0272814") + data = None + + +class Program_weight_tensor_parameter_202: + name = "parameter_202" + shape = [192] + dtype = "float32" + min_val = float("0.00236419") + max_val = float("0.0177991") + mean = float("0.00505779") + std = float("0.00197103") + data = None + + +class Program_weight_tensor_parameter_203: + name = "parameter_203" + shape = [192] + dtype = "float32" + min_val = float("-0.0602631") + max_val = float("0.0372194") + mean = float("-0.0145273") + std = float("0.0175294") + data = None + + +class Program_weight_tensor_parameter_204: + name = "parameter_204" + shape = [192, 192, 1, 1] + dtype = "float32" + min_val = float("-0.0359811") + max_val = float("0.0341578") + mean = float("-0.000499181") + std = float("0.00410174") + data = None + + +class Program_weight_tensor_parameter_205: + name = "parameter_205" + shape = [192] + dtype = "float32" + min_val = float("-0.256683") + max_val = float("0.0671899") + mean = float("-0.0839381") + std = float("0.0440531") + data = None + + +class Program_weight_tensor_parameter_206: + name = "parameter_206" + shape = [192] + dtype = "float32" + min_val = float("0.900374") + max_val = float("1.08407") + mean = float("0.991603") + std = float("0.0256836") + data = None + + +class Program_weight_tensor_parameter_207: + name = "parameter_207" + shape = [192] + dtype = "float32" + min_val = float("0.00869434") + max_val = float("0.103153") + mean = float("0.0195607") + std = float("0.00998195") + data = None + + +class Program_weight_tensor_parameter_208: + name = "parameter_208" + shape = [192] + dtype = "float32" + min_val = float("-0.148147") + max_val = float("0.083841") + mean = float("-0.0215099") + std = float("0.0357341") + data = None + + +class Program_weight_tensor_parameter_209: + name = "parameter_209" + shape = [192, 192, 3, 3] + dtype = "float32" + min_val = float("-0.0560753") + max_val = float("0.0798715") + mean = float("-6.90239e-05") + std = float("0.00228177") + data = None + + +class Program_weight_tensor_parameter_210: + name = "parameter_210" + shape = [192] + dtype = "float32" + min_val = float("-0.28296") + max_val = float("0.00624382") + mean = float("-0.108697") + std = float("0.054323") + data = None + + +class Program_weight_tensor_parameter_211: + name = "parameter_211" + shape = [192] + dtype = "float32" + min_val = float("0.935362") + max_val = float("1.19786") + mean = float("1.03662") + std = float("0.044863") + data = None + + +class Program_weight_tensor_parameter_212: + name = "parameter_212" + shape = [192] + dtype = "float32" + min_val = float("0.0153049") + max_val = float("0.150077") + mean = float("0.0321082") + std = float("0.0130511") + data = None + + +class Program_weight_tensor_parameter_213: + name = "parameter_213" + shape = [192] + dtype = "float32" + min_val = float("-0.205475") + max_val = float("0.151818") + mean = float("-0.048646") + std = float("0.0468518") + data = None + + +class Program_weight_tensor_parameter_214: + name = "parameter_214" + shape = [192, 192, 3, 3] + dtype = "float32" + min_val = float("-0.0723395") + max_val = float("0.0757746") + mean = float("-0.000117075") + std = float("0.00276706") + data = None + + +class Program_weight_tensor_parameter_215: + name = "parameter_215" + shape = [192] + dtype = "float32" + min_val = float("-0.253549") + max_val = float("-0.0252667") + mean = float("-0.111396") + std = float("0.0515085") + data = None + + +class Program_weight_tensor_parameter_216: + name = "parameter_216" + shape = [192] + dtype = "float32" + min_val = float("0.915269") + max_val = float("1.08382") + mean = float("0.975487") + std = float("0.0197657") + data = None + + +class Program_weight_tensor_parameter_217: + name = "parameter_217" + shape = [192] + dtype = "float32" + min_val = float("0.00131146") + max_val = float("0.00746504") + mean = float("0.00271604") + std = float("0.00086359") + data = None + + +class Program_weight_tensor_parameter_218: + name = "parameter_218" + shape = [192] + dtype = "float32" + min_val = float("-0.0571962") + max_val = float("0.0503431") + mean = float("-0.0146694") + std = float("0.014805") + data = None + + +class Program_weight_tensor_parameter_219: + name = "parameter_219" + shape = [192, 192, 1, 1] + dtype = "float32" + min_val = float("-0.0237103") + max_val = float("0.0309154") + mean = float("-0.000535383") + std = float("0.00421215") + data = None + + +class Program_weight_tensor_parameter_220: + name = "parameter_220" + shape = [192] + dtype = "float32" + min_val = float("-0.253549") + max_val = float("-0.0252667") + mean = float("-0.111396") + std = float("0.0515085") + data = None + + +class Program_weight_tensor_parameter_221: + name = "parameter_221" + shape = [192] + dtype = "float32" + min_val = float("0.93953") + max_val = float("1.1319") + mean = float("1.00446") + std = float("0.0346886") + data = None + + +class Program_weight_tensor_parameter_222: + name = "parameter_222" + shape = [192] + dtype = "float32" + min_val = float("0.00555641") + max_val = float("0.0273444") + mean = float("0.0100098") + std = float("0.00301259") + data = None + + +class Program_weight_tensor_parameter_223: + name = "parameter_223" + shape = [192] + dtype = "float32" + min_val = float("-0.142862") + max_val = float("0.0842238") + mean = float("-0.0289718") + std = float("0.0284312") + data = None + + +class Program_weight_tensor_parameter_224: + name = "parameter_224" + shape = [192, 192, 3, 3] + dtype = "float32" + min_val = float("-0.0463499") + max_val = float("0.0648047") + mean = float("-0.000117258") + std = float("0.00236655") + data = None + + +class Program_weight_tensor_parameter_225: + name = "parameter_225" + shape = [192] + dtype = "float32" + min_val = float("-0.397852") + max_val = float("-0.024517") + mean = float("-0.135045") + std = float("0.0579008") + data = None + + +class Program_weight_tensor_parameter_226: + name = "parameter_226" + shape = [192] + dtype = "float32" + min_val = float("0.935578") + max_val = float("1.28416") + mean = float("1.02712") + std = float("0.0581486") + data = None + + +class Program_weight_tensor_parameter_227: + name = "parameter_227" + shape = [192] + dtype = "float32" + min_val = float("0.0152128") + max_val = float("0.0761776") + mean = float("0.0305415") + std = float("0.00974539") + data = None + + +class Program_weight_tensor_parameter_228: + name = "parameter_228" + shape = [192] + dtype = "float32" + min_val = float("-0.258209") + max_val = float("0.302634") + mean = float("-0.0505409") + std = float("0.057312") + data = None + + +class Program_weight_tensor_parameter_229: + name = "parameter_229" + shape = [192, 192, 3, 3] + dtype = "float32" + min_val = float("-0.0333938") + max_val = float("0.0459033") + mean = float("-0.000119163") + std = float("0.00292916") + data = None + + +class Program_weight_tensor_parameter_230: + name = "parameter_230" + shape = [192] + dtype = "float32" + min_val = float("-0.291413") + max_val = float("-0.0234132") + mean = float("-0.113952") + std = float("0.046762") + data = None + + +class Program_weight_tensor_parameter_231: + name = "parameter_231" + shape = [192] + dtype = "float32" + min_val = float("0.906693") + max_val = float("1.13942") + mean = float("0.996984") + std = float("0.036522") + data = None + + +class Program_weight_tensor_parameter_232: + name = "parameter_232" + shape = [192] + dtype = "float32" + min_val = float("0.0014244") + max_val = float("0.00546528") + mean = float("0.00250251") + std = float("0.000770997") + data = None + + +class Program_weight_tensor_parameter_233: + name = "parameter_233" + shape = [192] + dtype = "float32" + min_val = float("-0.0389427") + max_val = float("0.0867383") + mean = float("-0.00930065") + std = float("0.0135574") + data = None + + +class Program_weight_tensor_parameter_234: + name = "parameter_234" + shape = [192, 192, 1, 1] + dtype = "float32" + min_val = float("-0.0346133") + max_val = float("0.0524164") + mean = float("-0.000326125") + std = float("0.00484124") + data = None + + +class Program_weight_tensor_parameter_235: + name = "parameter_235" + shape = [192] + dtype = "float32" + min_val = float("-0.291413") + max_val = float("-0.0234132") + mean = float("-0.113952") + std = float("0.046762") + data = None + + +class Program_weight_tensor_parameter_236: + name = "parameter_236" + shape = [192] + dtype = "float32" + min_val = float("0.909728") + max_val = float("1.14814") + mean = float("0.986983") + std = float("0.0373625") + data = None + + +class Program_weight_tensor_parameter_237: + name = "parameter_237" + shape = [192] + dtype = "float32" + min_val = float("0.00562666") + max_val = float("0.0219922") + mean = float("0.0109758") + std = float("0.00338237") + data = None + + +class Program_weight_tensor_parameter_238: + name = "parameter_238" + shape = [192] + dtype = "float32" + min_val = float("-0.21375") + max_val = float("0.0270211") + mean = float("-0.0354808") + std = float("0.0287563") + data = None + + +class Program_weight_tensor_parameter_239: + name = "parameter_239" + shape = [192, 192, 3, 3] + dtype = "float32" + min_val = float("-0.024049") + max_val = float("0.0316937") + mean = float("-0.00014643") + std = float("0.00232213") + data = None + + +class Program_weight_tensor_parameter_240: + name = "parameter_240" + shape = [192] + dtype = "float32" + min_val = float("-0.370567") + max_val = float("-0.0105869") + mean = float("-0.161477") + std = float("0.0602506") + data = None + + +class Program_weight_tensor_parameter_241: + name = "parameter_241" + shape = [192] + dtype = "float32" + min_val = float("0.905056") + max_val = float("1.22126") + mean = float("1.03042") + std = float("0.0494268") + data = None + + +class Program_weight_tensor_parameter_242: + name = "parameter_242" + shape = [192] + dtype = "float32" + min_val = float("0.00753308") + max_val = float("0.0302844") + mean = float("0.0141411") + std = float("0.00443453") + data = None + + +class Program_weight_tensor_parameter_243: + name = "parameter_243" + shape = [192] + dtype = "float32" + min_val = float("-0.100325") + max_val = float("0.0739225") + mean = float("-0.0300805") + std = float("0.0307536") + data = None + + +class Program_weight_tensor_parameter_244: + name = "parameter_244" + shape = [192, 192, 3, 3] + dtype = "float32" + min_val = float("-0.0896984") + max_val = float("0.04215") + mean = float("-0.000139982") + std = float("0.00324088") + data = None + + +class Program_weight_tensor_parameter_245: + name = "parameter_245" + shape = [192] + dtype = "float32" + min_val = float("-0.397771") + max_val = float("0.0863893") + mean = float("-0.165521") + std = float("0.0737187") + data = None + + +class Program_weight_tensor_parameter_246: + name = "parameter_246" + shape = [192] + dtype = "float32" + min_val = float("0.878799") + max_val = float("1.17577") + mean = float("1.01746") + std = float("0.0565899") + data = None + + +class Program_weight_tensor_parameter_247: + name = "parameter_247" + shape = [192] + dtype = "float32" + min_val = float("0.00278118") + max_val = float("0.0114293") + mean = float("0.00483493") + std = float("0.00146785") + data = None + + +class Program_weight_tensor_parameter_248: + name = "parameter_248" + shape = [192] + dtype = "float32" + min_val = float("-0.0560004") + max_val = float("0.0581845") + mean = float("0.0102661") + std = float("0.0202822") + data = None + + +class Program_weight_tensor_parameter_249: + name = "parameter_249" + shape = [192, 896, 1, 1] + dtype = "float32" + min_val = float("-0.101477") + max_val = float("0.142492") + mean = float("-0.000156828") + std = float("0.00446408") + data = None + + +class Program_weight_tensor_parameter_250: + name = "parameter_250" + shape = [192] + dtype = "float32" + min_val = float("-0.151112") + max_val = float("0.501803") + mean = float("-0.00466305") + std = float("0.073986") + data = None + + +class Program_weight_tensor_parameter_251: + name = "parameter_251" + shape = [192] + dtype = "float32" + min_val = float("0.932258") + max_val = float("1.23138") + mean = float("1.04643") + std = float("0.0627369") + data = None + + +class Program_weight_tensor_parameter_252: + name = "parameter_252" + shape = [192] + dtype = "float32" + min_val = float("0.00250556") + max_val = float("0.0461735") + mean = float("0.00561131") + std = float("0.00346797") + data = None + + +class Program_weight_tensor_parameter_253: + name = "parameter_253" + shape = [192] + dtype = "float32" + min_val = float("-0.0548003") + max_val = float("0.0495759") + mean = float("-0.000104753") + std = float("0.0205629") + data = None + + +class Program_weight_tensor_parameter_254: + name = "parameter_254" + shape = [192, 896, 1, 1] + dtype = "float32" + min_val = float("-0.134457") + max_val = float("0.0882326") + mean = float("-0.000122861") + std = float("0.00449465") + data = None + + +class Program_weight_tensor_parameter_255: + name = "parameter_255" + shape = [384] + dtype = "float32" + min_val = float("-0.31215") + max_val = float("-0.0444473") + mean = float("-0.171263") + std = float("0.0441121") + data = None + + +class Program_weight_tensor_parameter_256: + name = "parameter_256" + shape = [384] + dtype = "float32" + min_val = float("0.786726") + max_val = float("1.17526") + mean = float("0.885032") + std = float("0.0343599") + data = None + + +class Program_weight_tensor_parameter_257: + name = "parameter_257" + shape = [384] + dtype = "float32" + min_val = float("0.00467646") + max_val = float("0.0521003") + mean = float("0.0100483") + std = float("0.00415136") + data = None + + +class Program_weight_tensor_parameter_258: + name = "parameter_258" + shape = [384] + dtype = "float32" + min_val = float("-0.0852832") + max_val = float("0.0645434") + mean = float("-0.0324354") + std = float("0.0213669") + data = None + + +class Program_weight_tensor_parameter_259: + name = "parameter_259" + shape = [384, 768, 1, 1] + dtype = "float32" + min_val = float("-0.0336805") + max_val = float("0.0404763") + mean = float("-0.000294351") + std = float("0.00351804") + data = None + + +class Program_weight_tensor_parameter_260: + name = "parameter_260" + shape = [768] + dtype = "float32" + min_val = float("-0.164743") + max_val = float("0.115333") + mean = float("-0.0875639") + std = float("0.0238268") + data = None + + +class Program_weight_tensor_parameter_261: + name = "parameter_261" + shape = [768] + dtype = "float32" + min_val = float("0.935282") + max_val = float("1.27924") + mean = float("1.02951") + std = float("0.0299064") + data = None + + +class Program_weight_tensor_parameter_262: + name = "parameter_262" + shape = [768] + dtype = "float32" + min_val = float("0.00328574") + max_val = float("0.0307873") + mean = float("0.00666914") + std = float("0.00225792") + data = None + + +class Program_weight_tensor_parameter_263: + name = "parameter_263" + shape = [768] + dtype = "float32" + min_val = float("-0.13377") + max_val = float("0.10924") + mean = float("-0.0334799") + std = float("0.0256795") + data = None + + +class Program_weight_tensor_parameter_264: + name = "parameter_264" + shape = [768, 768, 1, 1] + dtype = "float32" + min_val = float("-0.0417249") + max_val = float("0.0674709") + mean = float("-0.000227656") + std = float("0.00301788") + data = None + + +class Program_weight_tensor_parameter_265: + name = "parameter_265" + shape = [384] + dtype = "float32" + min_val = float("-0.175755") + max_val = float("0.114994") + mean = float("-0.059758") + std = float("0.0316004") + data = None + + +class Program_weight_tensor_parameter_266: + name = "parameter_266" + shape = [384] + dtype = "float32" + min_val = float("0.875498") + max_val = float("1.05486") + mean = float("0.97544") + std = float("0.0178167") + data = None + + +class Program_weight_tensor_parameter_267: + name = "parameter_267" + shape = [384] + dtype = "float32" + min_val = float("0.00187857") + max_val = float("0.0214535") + mean = float("0.0049821") + std = float("0.00205769") + data = None + + +class Program_weight_tensor_parameter_268: + name = "parameter_268" + shape = [384] + dtype = "float32" + min_val = float("-0.048337") + max_val = float("0.0454388") + mean = float("-0.00746495") + std = float("0.0193299") + data = None + + +class Program_weight_tensor_parameter_269: + name = "parameter_269" + shape = [384, 384, 1, 1] + dtype = "float32" + min_val = float("-0.0520415") + max_val = float("0.0516332") + mean = float("-0.000102198") + std = float("0.00275407") + data = None + + +class Program_weight_tensor_parameter_270: + name = "parameter_270" + shape = [384] + dtype = "float32" + min_val = float("-0.175755") + max_val = float("0.114994") + mean = float("-0.059758") + std = float("0.0316004") + data = None + + +class Program_weight_tensor_parameter_271: + name = "parameter_271" + shape = [384] + dtype = "float32" + min_val = float("0.939765") + max_val = float("1.08488") + mean = float("0.993679") + std = float("0.0187977") + data = None + + +class Program_weight_tensor_parameter_272: + name = "parameter_272" + shape = [384] + dtype = "float32" + min_val = float("0.0136676") + max_val = float("0.24702") + mean = float("0.0361577") + std = float("0.0178113") + data = None + + +class Program_weight_tensor_parameter_273: + name = "parameter_273" + shape = [384] + dtype = "float32" + min_val = float("-0.193526") + max_val = float("0.128898") + mean = float("-0.0635078") + std = float("0.0592118") + data = None + + +class Program_weight_tensor_parameter_274: + name = "parameter_274" + shape = [384, 384, 3, 3] + dtype = "float32" + min_val = float("-0.0262406") + max_val = float("0.0342993") + mean = float("-0.000104765") + std = float("0.00102972") + data = None + + +class Program_weight_tensor_parameter_275: + name = "parameter_275" + shape = [384] + dtype = "float32" + min_val = float("-0.151307") + max_val = float("0.102932") + mean = float("-0.036051") + std = float("0.0243773") + data = None + + +class Program_weight_tensor_parameter_276: + name = "parameter_276" + shape = [384] + dtype = "float32" + min_val = float("0.948438") + max_val = float("1.23653") + mean = float("1.02223") + std = float("0.0374692") + data = None + + +class Program_weight_tensor_parameter_277: + name = "parameter_277" + shape = [384] + dtype = "float32" + min_val = float("0.00885489") + max_val = float("0.121229") + mean = float("0.0291848") + std = float("0.0116348") + data = None + + +class Program_weight_tensor_parameter_278: + name = "parameter_278" + shape = [384] + dtype = "float32" + min_val = float("-0.180611") + max_val = float("0.112007") + mean = float("-0.0377734") + std = float("0.047009") + data = None + + +class Program_weight_tensor_parameter_279: + name = "parameter_279" + shape = [384, 384, 3, 3] + dtype = "float32" + min_val = float("-0.0233099") + max_val = float("0.0327733") + mean = float("-6.46831e-05") + std = float("0.00135178") + data = None + + +class Program_weight_tensor_parameter_280: + name = "parameter_280" + shape = [384] + dtype = "float32" + min_val = float("-0.12739") + max_val = float("0.0293372") + mean = float("-0.0396596") + std = float("0.0204788") + data = None + + +class Program_weight_tensor_parameter_281: + name = "parameter_281" + shape = [384] + dtype = "float32" + min_val = float("0.935426") + max_val = float("1.22654") + mean = float("1.02217") + std = float("0.0409336") + data = None + + +class Program_weight_tensor_parameter_282: + name = "parameter_282" + shape = [384] + dtype = "float32" + min_val = float("0.026841") + max_val = float("0.276428") + mean = float("0.0890046") + std = float("0.0287197") + data = None + + +class Program_weight_tensor_parameter_283: + name = "parameter_283" + shape = [384] + dtype = "float32" + min_val = float("-0.988092") + max_val = float("1.24067") + mean = float("-0.0106803") + std = float("0.356169") + data = None + + +class Program_weight_tensor_parameter_284: + name = "parameter_284" + shape = [384, 1536, 1, 1] + dtype = "float32" + min_val = float("-0.0437694") + max_val = float("0.0478719") + mean = float("3.92494e-05") + std = float("0.00226745") + data = None + + +class Program_weight_tensor_parameter_285: + name = "parameter_285" + shape = [384] + dtype = "float32" + min_val = float("-0.0351363") + max_val = float("0.0448389") + mean = float("-0.00202565") + std = float("0.0112301") + data = None + + +class Program_weight_tensor_parameter_286: + name = "parameter_286" + shape = [384] + dtype = "float32" + min_val = float("0.955166") + max_val = float("1.07949") + mean = float("0.99032") + std = float("0.0160701") + data = None + + +class Program_weight_tensor_parameter_287: + name = "parameter_287" + shape = [384] + dtype = "float32" + min_val = float("0.00173498") + max_val = float("0.00643269") + mean = float("0.00313556") + std = float("0.000799758") + data = None + + +class Program_weight_tensor_parameter_288: + name = "parameter_288" + shape = [384] + dtype = "float32" + min_val = float("-0.0717004") + max_val = float("0.0408329") + mean = float("-0.0299012") + std = float("0.0176195") + data = None + + +class Program_weight_tensor_parameter_289: + name = "parameter_289" + shape = [384, 384, 1, 1] + dtype = "float32" + min_val = float("-0.0228554") + max_val = float("0.0328209") + mean = float("-0.000375757") + std = float("0.00255745") + data = None + + +class Program_weight_tensor_parameter_290: + name = "parameter_290" + shape = [384] + dtype = "float32" + min_val = float("-0.0351365") + max_val = float("0.0448389") + mean = float("-0.00202565") + std = float("0.0112301") + data = None + + +class Program_weight_tensor_parameter_291: + name = "parameter_291" + shape = [384] + dtype = "float32" + min_val = float("0.957996") + max_val = float("1.12454") + mean = float("1.00467") + std = float("0.0255587") + data = None + + +class Program_weight_tensor_parameter_292: + name = "parameter_292" + shape = [384] + dtype = "float32" + min_val = float("0.0083014") + max_val = float("0.0352855") + mean = float("0.0175327") + std = float("0.00454233") + data = None + + +class Program_weight_tensor_parameter_293: + name = "parameter_293" + shape = [384] + dtype = "float32" + min_val = float("-0.247265") + max_val = float("0.101324") + mean = float("-0.0772083") + std = float("0.0418546") + data = None + + +class Program_weight_tensor_parameter_294: + name = "parameter_294" + shape = [384, 384, 3, 3] + dtype = "float32" + min_val = float("-0.0259612") + max_val = float("0.0450877") + mean = float("-0.000119198") + std = float("0.00109556") + data = None + + +class Program_weight_tensor_parameter_295: + name = "parameter_295" + shape = [384] + dtype = "float32" + min_val = float("-0.0757949") + max_val = float("0.0143529") + mean = float("-0.0184104") + std = float("0.0130086") + data = None + + +class Program_weight_tensor_parameter_296: + name = "parameter_296" + shape = [384] + dtype = "float32" + min_val = float("0.948442") + max_val = float("1.1913") + mean = float("1.02005") + std = float("0.0307435") + data = None + + +class Program_weight_tensor_parameter_297: + name = "parameter_297" + shape = [384] + dtype = "float32" + min_val = float("0.0341549") + max_val = float("0.134905") + mean = float("0.0680685") + std = float("0.0155983") + data = None + + +class Program_weight_tensor_parameter_298: + name = "parameter_298" + shape = [384] + dtype = "float32" + min_val = float("-0.699112") + max_val = float("0.534627") + mean = float("-0.130478") + std = float("0.14264") + data = None + + +class Program_weight_tensor_parameter_299: + name = "parameter_299" + shape = [384, 384, 3, 3] + dtype = "float32" + min_val = float("-0.0199475") + max_val = float("0.0262388") + mean = float("-8.6257e-05") + std = float("0.00132087") + data = None + + +class Program_weight_tensor_parameter_300: + name = "parameter_300" + shape = [384] + dtype = "float32" + min_val = float("-0.0646182") + max_val = float("0.0263686") + mean = float("-0.0175816") + std = float("0.0121433") + data = None + + +class Program_weight_tensor_parameter_301: + name = "parameter_301" + shape = [384] + dtype = "float32" + min_val = float("0.976895") + max_val = float("1.05226") + mean = float("0.998698") + std = float("0.0104278") + data = None + + +class Program_weight_tensor_parameter_302: + name = "parameter_302" + shape = [384] + dtype = "float32" + min_val = float("0.000942471") + max_val = float("0.00502155") + mean = float("0.00201621") + std = float("0.000551618") + data = None + + +class Program_weight_tensor_parameter_303: + name = "parameter_303" + shape = [384] + dtype = "float32" + min_val = float("-0.0479207") + max_val = float("0.0928967") + mean = float("-0.0128137") + std = float("0.0168058") + data = None + + +class Program_weight_tensor_parameter_304: + name = "parameter_304" + shape = [384, 384, 1, 1] + dtype = "float32" + min_val = float("-0.019998") + max_val = float("0.0336093") + mean = float("-0.0001808") + std = float("0.00225336") + data = None + + +class Program_weight_tensor_parameter_305: + name = "parameter_305" + shape = [384] + dtype = "float32" + min_val = float("-0.0646181") + max_val = float("0.0263686") + mean = float("-0.0175816") + std = float("0.0121433") + data = None + + +class Program_weight_tensor_parameter_306: + name = "parameter_306" + shape = [384] + dtype = "float32" + min_val = float("0.976548") + max_val = float("1.1009") + mean = float("1.00714") + std = float("0.0202515") + data = None + + +class Program_weight_tensor_parameter_307: + name = "parameter_307" + shape = [384] + dtype = "float32" + min_val = float("0.00525835") + max_val = float("0.0300494") + mean = float("0.0114248") + std = float("0.0035076") + data = None + + +class Program_weight_tensor_parameter_308: + name = "parameter_308" + shape = [384] + dtype = "float32" + min_val = float("-0.129246") + max_val = float("0.173103") + mean = float("-0.0523329") + std = float("0.0390795") + data = None + + +class Program_weight_tensor_parameter_309: + name = "parameter_309" + shape = [384, 384, 3, 3] + dtype = "float32" + min_val = float("-0.0126416") + max_val = float("0.0247245") + mean = float("-9.22132e-05") + std = float("0.000953698") + data = None + + +class Program_weight_tensor_parameter_310: + name = "parameter_310" + shape = [384] + dtype = "float32" + min_val = float("-0.0846385") + max_val = float("-0.0012331") + mean = float("-0.0378149") + std = float("0.0147169") + data = None + + +class Program_weight_tensor_parameter_311: + name = "parameter_311" + shape = [384] + dtype = "float32" + min_val = float("0.961605") + max_val = float("1.1164") + mean = float("1.01863") + std = float("0.0258138") + data = None + + +class Program_weight_tensor_parameter_312: + name = "parameter_312" + shape = [384] + dtype = "float32" + min_val = float("0.00543412") + max_val = float("0.034") + mean = float("0.0114679") + std = float("0.00281078") + data = None + + +class Program_weight_tensor_parameter_313: + name = "parameter_313" + shape = [384] + dtype = "float32" + min_val = float("-0.105395") + max_val = float("0.0875829") + mean = float("-0.0276342") + std = float("0.0283604") + data = None + + +class Program_weight_tensor_parameter_314: + name = "parameter_314" + shape = [384, 384, 3, 3] + dtype = "float32" + min_val = float("-0.0131868") + max_val = float("0.0207652") + mean = float("-5.34705e-05") + std = float("0.00131228") + data = None + + +class Program_weight_tensor_parameter_315: + name = "parameter_315" + shape = [384] + dtype = "float32" + min_val = float("-0.107271") + max_val = float("0.0233646") + mean = float("-0.0562337") + std = float("0.0197346") + data = None + + +class Program_weight_tensor_parameter_316: + name = "parameter_316" + shape = [384] + dtype = "float32" + min_val = float("0.981244") + max_val = float("1.07279") + mean = float("1.02152") + std = float("0.0138952") + data = None + + +class Program_weight_tensor_parameter_317: + name = "parameter_317" + shape = [384] + dtype = "float32" + min_val = float("0.00227472") + max_val = float("0.00995654") + mean = float("0.00327401") + std = float("0.000751464") + data = None + + +class Program_weight_tensor_parameter_318: + name = "parameter_318" + shape = [384] + dtype = "float32" + min_val = float("-0.0410182") + max_val = float("0.0248085") + mean = float("0.0060818") + std = float("0.00760998") + data = None + + +class Program_weight_tensor_parameter_319: + name = "parameter_319" + shape = [384, 1024, 1, 1] + dtype = "float32" + min_val = float("-0.0184221") + max_val = float("0.0482036") + mean = float("-0.000162631") + std = float("0.00257425") + data = None + + +class Program_weight_tensor_parameter_320: + name = "parameter_320" + shape = [384] + dtype = "float32" + min_val = float("-0.058929") + max_val = float("0.0342356") + mean = float("-0.00850635") + std = float("0.0113521") + data = None + + +class Program_weight_tensor_parameter_321: + name = "parameter_321" + shape = [384] + dtype = "float32" + min_val = float("1.00879") + max_val = float("1.21413") + mean = float("1.05118") + std = float("0.0209519") + data = None + + +class Program_weight_tensor_parameter_322: + name = "parameter_322" + shape = [384] + dtype = "float32" + min_val = float("0.00216519") + max_val = float("0.00959873") + mean = float("0.00317997") + std = float("0.000757622") + data = None + + +class Program_weight_tensor_parameter_323: + name = "parameter_323" + shape = [384] + dtype = "float32" + min_val = float("-0.0134693") + max_val = float("0.019212") + mean = float("0.00436546") + std = float("0.00543387") + data = None + + +class Program_weight_tensor_parameter_324: + name = "parameter_324" + shape = [384, 1024, 1, 1] + dtype = "float32" + min_val = float("-0.0415669") + max_val = float("0.0455758") + mean = float("-0.000161607") + std = float("0.00274744") + data = None + + +class Program_weight_tensor_parameter_325: + name = "parameter_325" + shape = [1024] + dtype = "float32" + min_val = float("-3.76504") + max_val = float("-0.731072") + mean = float("-2.1942") + std = float("0.428366") + data = None + + +class Program_weight_tensor_parameter_326: + name = "parameter_326" + shape = [1024] + dtype = "float32" + min_val = float("1.62418") + max_val = float("4.43435") + mean = float("3.07417") + std = float("0.255202") + data = None + + +class Program_weight_tensor_parameter_327: + name = "parameter_327" + shape = [1024] + dtype = "float32" + min_val = float("0.00545502") + max_val = float("0.0375841") + mean = float("0.0112259") + std = float("0.00369148") + data = None + + +class Program_weight_tensor_parameter_328: + name = "parameter_328" + shape = [1024] + dtype = "float32" + min_val = float("-0.116582") + max_val = float("0.0584592") + mean = float("-0.0421243") + std = float("0.0189653") + data = None + + +class Program_weight_tensor_parameter_329: + name = "parameter_329" + shape = [1024, 768, 1, 1] + dtype = "float32" + min_val = float("-0.0682658") + max_val = float("0.08076") + mean = float("-0.00031367") + std = float("0.00305502") + data = None + + +class Program_weight_tensor_parameter_330: + name = "parameter_330" + shape = [768] + dtype = "float32" + min_val = float("-0.0121546") + max_val = float("0.00561678") + mean = float("-0.000568828") + std = float("0.00173927") + data = None + + +class Program_weight_tensor_parameter_331: + name = "parameter_331" + shape = [768, 768, 1, 1] + dtype = "float32" + min_val = float("-0.104186") + max_val = float("0.108443") + mean = float("-0.000213298") + std = float("0.00130075") + data = None + + +class Program_weight_tensor_parameter_332: + name = "parameter_332" + shape = [384] + dtype = "float32" + min_val = float("-1.77817") + max_val = float("0.498255") + mean = float("-0.30001") + std = float("0.296857") + data = None + + +class Program_weight_tensor_parameter_333: + name = "parameter_333" + shape = [384] + dtype = "float32" + min_val = float("0.18984") + max_val = float("1.98158") + mean = float("0.620203") + std = float("0.2786") + data = None + + +class Program_weight_tensor_parameter_334: + name = "parameter_334" + shape = [384] + dtype = "float32" + min_val = float("3.45478e-05") + max_val = float("0.000862679") + mean = float("0.000152348") + std = float("8.47812e-05") + data = None + + +class Program_weight_tensor_parameter_335: + name = "parameter_335" + shape = [384] + dtype = "float32" + min_val = float("-0.0224284") + max_val = float("0.0569889") + mean = float("0.0173439") + std = float("0.0133472") + data = None + + +class Program_weight_tensor_parameter_336: + name = "parameter_336" + shape = [384, 384, 1, 1] + dtype = "float32" + min_val = float("-0.0196332") + max_val = float("0.0252926") + mean = float("-0.000291589") + std = float("0.00211517") + data = None + + +class Program_weight_tensor_parameter_337: + name = "parameter_337" + shape = [384] + dtype = "float32" + min_val = float("-1.77817") + max_val = float("0.498255") + mean = float("-0.30001") + std = float("0.296857") + data = None + + +class Program_weight_tensor_parameter_338: + name = "parameter_338" + shape = [384] + dtype = "float32" + min_val = float("0.36588") + max_val = float("2.77771") + mean = float("1.04742") + std = float("0.308644") + data = None + + +class Program_weight_tensor_parameter_339: + name = "parameter_339" + shape = [384] + dtype = "float32" + min_val = float("0.000357258") + max_val = float("0.00471916") + mean = float("0.00106354") + std = float("0.000429049") + data = None + + +class Program_weight_tensor_parameter_340: + name = "parameter_340" + shape = [384] + dtype = "float32" + min_val = float("-0.199773") + max_val = float("0.0734005") + mean = float("0.0127227") + std = float("0.0242137") + data = None + + +class Program_weight_tensor_parameter_341: + name = "parameter_341" + shape = [384, 384, 3, 3] + dtype = "float32" + min_val = float("-0.0170937") + max_val = float("0.0209283") + mean = float("-2.76977e-05") + std = float("0.0013628") + data = None + + +class Program_weight_tensor_parameter_342: + name = "parameter_342" + shape = [384] + dtype = "float32" + min_val = float("-2.61203") + max_val = float("0.0551867") + mean = float("-1.58323") + std = float("0.416329") + data = None + + +class Program_weight_tensor_parameter_343: + name = "parameter_343" + shape = [384] + dtype = "float32" + min_val = float("0.567765") + max_val = float("1.67644") + mean = float("1.1241") + std = float("0.146785") + data = None + + +class Program_weight_tensor_parameter_344: + name = "parameter_344" + shape = [384] + dtype = "float32" + min_val = float("0.0222405") + max_val = float("0.126805") + mean = float("0.0503725") + std = float("0.0157052") + data = None + + +class Program_weight_tensor_parameter_345: + name = "parameter_345" + shape = [384] + dtype = "float32" + min_val = float("-0.656718") + max_val = float("0.389056") + mean = float("-0.183173") + std = float("0.0983499") + data = None + + +class Program_weight_tensor_parameter_346: + name = "parameter_346" + shape = [384, 384, 3, 3] + dtype = "float32" + min_val = float("-0.0169074") + max_val = float("0.0427054") + mean = float("-0.000129063") + std = float("0.00175192") + data = None + + +class Program_weight_tensor_parameter_347: + name = "parameter_347" + shape = [384] + dtype = "float32" + min_val = float("-1.93759") + max_val = float("0.733668") + mean = float("-0.570541") + std = float("0.365887") + data = None + + +class Program_weight_tensor_parameter_348: + name = "parameter_348" + shape = [384] + dtype = "float32" + min_val = float("0.140501") + max_val = float("2.06358") + mean = float("0.563187") + std = float("0.226696") + data = None + + +class Program_weight_tensor_parameter_349: + name = "parameter_349" + shape = [384] + dtype = "float32" + min_val = float("4.15388e-05") + max_val = float("0.000906682") + mean = float("0.00019597") + std = float("9.97336e-05") + data = None + + +class Program_weight_tensor_parameter_350: + name = "parameter_350" + shape = [384] + dtype = "float32" + min_val = float("-0.0259493") + max_val = float("0.0583295") + mean = float("0.0172821") + std = float("0.0111211") + data = None + + +class Program_weight_tensor_parameter_351: + name = "parameter_351" + shape = [384, 384, 1, 1] + dtype = "float32" + min_val = float("-0.0185543") + max_val = float("0.0208855") + mean = float("-0.000319504") + std = float("0.00201805") + data = None + + +class Program_weight_tensor_parameter_352: + name = "parameter_352" + shape = [384] + dtype = "float32" + min_val = float("-1.93768") + max_val = float("0.733668") + mean = float("-0.570541") + std = float("0.365888") + data = None + + +class Program_weight_tensor_parameter_353: + name = "parameter_353" + shape = [384] + dtype = "float32" + min_val = float("0.57953") + max_val = float("2.10385") + mean = float("1.09247") + std = float("0.254048") + data = None + + +class Program_weight_tensor_parameter_354: + name = "parameter_354" + shape = [384] + dtype = "float32" + min_val = float("0.000627586") + max_val = float("0.00452718") + mean = float("0.00162004") + std = float("0.000525088") + data = None + + +class Program_weight_tensor_parameter_355: + name = "parameter_355" + shape = [384] + dtype = "float32" + min_val = float("-0.0456402") + max_val = float("0.100243") + mean = float("0.0222313") + std = float("0.0223563") + data = None + + +class Program_weight_tensor_parameter_356: + name = "parameter_356" + shape = [384, 384, 3, 3] + dtype = "float32" + min_val = float("-0.017816") + max_val = float("0.0248569") + mean = float("-5.76546e-05") + std = float("0.00141105") + data = None + + +class Program_weight_tensor_parameter_357: + name = "parameter_357" + shape = [384] + dtype = "float32" + min_val = float("-2.4273") + max_val = float("0.839141") + mean = float("-1.42155") + std = float("0.360806") + data = None + + +class Program_weight_tensor_parameter_358: + name = "parameter_358" + shape = [384] + dtype = "float32" + min_val = float("0.438688") + max_val = float("1.84355") + mean = float("1.15674") + std = float("0.142333") + data = None + + +class Program_weight_tensor_parameter_359: + name = "parameter_359" + shape = [384] + dtype = "float32" + min_val = float("0.0169176") + max_val = float("0.0834605") + mean = float("0.0339828") + std = float("0.0113006") + data = None + + +class Program_weight_tensor_parameter_360: + name = "parameter_360" + shape = [384] + dtype = "float32" + min_val = float("-0.537141") + max_val = float("0.620008") + mean = float("-0.11866") + std = float("0.0759343") + data = None + + +class Program_weight_tensor_parameter_361: + name = "parameter_361" + shape = [384, 384, 3, 3] + dtype = "float32" + min_val = float("-0.0215698") + max_val = float("0.0331226") + mean = float("-0.000122919") + std = float("0.00173093") + data = None + + +class Program_weight_tensor_parameter_362: + name = "parameter_362" + shape = [384] + dtype = "float32" + min_val = float("-1.88491") + max_val = float("0.489301") + mean = float("-0.478428") + std = float("0.384422") + data = None + + +class Program_weight_tensor_parameter_363: + name = "parameter_363" + shape = [384] + dtype = "float32" + min_val = float("0.0963961") + max_val = float("2.12165") + mean = float("0.441644") + std = float("0.215765") + data = None + + +class Program_weight_tensor_parameter_364: + name = "parameter_364" + shape = [384] + dtype = "float32" + min_val = float("4.83567e-05") + max_val = float("0.00122351") + mean = float("0.000214336") + std = float("0.000121244") + data = None + + +class Program_weight_tensor_parameter_365: + name = "parameter_365" + shape = [384] + dtype = "float32" + min_val = float("-0.0604177") + max_val = float("0.0648457") + mean = float("0.0205045") + std = float("0.0130396") + data = None + + +class Program_weight_tensor_parameter_366: + name = "parameter_366" + shape = [384, 384, 1, 1] + dtype = "float32" + min_val = float("-0.0178981") + max_val = float("0.0199833") + mean = float("-0.000383584") + std = float("0.00172042") + data = None + + +class Program_weight_tensor_parameter_367: + name = "parameter_367" + shape = [384] + dtype = "float32" + min_val = float("-1.88491") + max_val = float("0.489301") + mean = float("-0.478428") + std = float("0.384422") + data = None + + +class Program_weight_tensor_parameter_368: + name = "parameter_368" + shape = [384] + dtype = "float32" + min_val = float("0.572695") + max_val = float("2.21595") + mean = float("1.06078") + std = float("0.254685") + data = None + + +class Program_weight_tensor_parameter_369: + name = "parameter_369" + shape = [384] + dtype = "float32" + min_val = float("0.000726389") + max_val = float("0.00447183") + mean = float("0.00202693") + std = float("0.000650997") + data = None + + +class Program_weight_tensor_parameter_370: + name = "parameter_370" + shape = [384] + dtype = "float32" + min_val = float("-0.161605") + max_val = float("0.0850251") + mean = float("0.0261345") + std = float("0.0250145") + data = None + + +class Program_weight_tensor_parameter_371: + name = "parameter_371" + shape = [384, 384, 3, 3] + dtype = "float32" + min_val = float("-0.016375") + max_val = float("0.0234752") + mean = float("-5.94736e-05") + std = float("0.00147549") + data = None + + +class Program_weight_tensor_parameter_372: + name = "parameter_372" + shape = [384] + dtype = "float32" + min_val = float("-2.15512") + max_val = float("0.42913") + mean = float("-1.38221") + std = float("0.277652") + data = None + + +class Program_weight_tensor_parameter_373: + name = "parameter_373" + shape = [384] + dtype = "float32" + min_val = float("0.714131") + max_val = float("1.63294") + mean = float("1.13544") + std = float("0.0992085") + data = None + + +class Program_weight_tensor_parameter_374: + name = "parameter_374" + shape = [384] + dtype = "float32" + min_val = float("0.0110827") + max_val = float("0.0639734") + mean = float("0.0257205") + std = float("0.00835153") + data = None + + +class Program_weight_tensor_parameter_375: + name = "parameter_375" + shape = [384] + dtype = "float32" + min_val = float("-0.516632") + max_val = float("0.224516") + mean = float("-0.0886359") + std = float("0.0647703") + data = None + + +class Program_weight_tensor_parameter_376: + name = "parameter_376" + shape = [384, 384, 3, 3] + dtype = "float32" + min_val = float("-0.0257925") + max_val = float("0.0417178") + mean = float("-0.000106541") + std = float("0.00163045") + data = None + + +class Program_weight_tensor_parameter_377: + name = "parameter_377" + shape = [384] + dtype = "float32" + min_val = float("-2.93083") + max_val = float("1.76129") + mean = float("-0.76492") + std = float("0.654104") + data = None + + +class Program_weight_tensor_parameter_378: + name = "parameter_378" + shape = [384] + dtype = "float32" + min_val = float("0.973816") + max_val = float("2.91102") + mean = float("1.85247") + std = float("0.272485") + data = None + + +class Program_weight_tensor_parameter_379: + name = "parameter_379" + shape = [384] + dtype = "float32" + min_val = float("0.00124108") + max_val = float("0.00551094") + mean = float("0.00251181") + std = float("0.000624682") + data = None + + +class Program_weight_tensor_parameter_380: + name = "parameter_380" + shape = [384] + dtype = "float32" + min_val = float("-0.154056") + max_val = float("0.0997407") + mean = float("0.0427987") + std = float("0.0198229") + data = None + + +class Program_weight_tensor_parameter_381: + name = "parameter_381" + shape = [384, 768, 1, 1] + dtype = "float32" + min_val = float("-0.0482202") + max_val = float("0.0395783") + mean = float("-0.000489332") + std = float("0.00371088") + data = None + + +class Program_weight_tensor_parameter_382: + name = "parameter_382" + shape = [384] + dtype = "float32" + min_val = float("-2.24474") + max_val = float("0.693989") + mean = float("-0.776354") + std = float("0.476138") + data = None + + +class Program_weight_tensor_parameter_383: + name = "parameter_383" + shape = [384] + dtype = "float32" + min_val = float("0.973795") + max_val = float("2.89093") + mean = float("2.10265") + std = float("0.302973") + data = None + + +class Program_weight_tensor_parameter_384: + name = "parameter_384" + shape = [384] + dtype = "float32" + min_val = float("0.000459391") + max_val = float("0.00364653") + mean = float("0.00103832") + std = float("0.000354294") + data = None + + +class Program_weight_tensor_parameter_385: + name = "parameter_385" + shape = [384] + dtype = "float32" + min_val = float("-0.036311") + max_val = float("0.0640884") + mean = float("0.0228094") + std = float("0.0124348") + data = None + + +class Program_weight_tensor_parameter_386: + name = "parameter_386" + shape = [384, 768, 1, 1] + dtype = "float32" + min_val = float("-0.129421") + max_val = float("0.063091") + mean = float("-0.000254003") + std = float("0.00272646") + data = None + + +class Program_weight_tensor_parameter_387: + name = "parameter_387" + shape = [768] + dtype = "float32" + min_val = float("-2.41043") + max_val = float("0.654464") + mean = float("-0.915927") + std = float("0.344484") + data = None + + +class Program_weight_tensor_parameter_388: + name = "parameter_388" + shape = [768] + dtype = "float32" + min_val = float("0.519256") + max_val = float("1.87666") + mean = float("0.912499") + std = float("0.147147") + data = None + + +class Program_weight_tensor_parameter_389: + name = "parameter_389" + shape = [768] + dtype = "float32" + min_val = float("0.00362407") + max_val = float("0.0346536") + mean = float("0.00806678") + std = float("0.00278567") + data = None + + +class Program_weight_tensor_parameter_390: + name = "parameter_390" + shape = [768] + dtype = "float32" + min_val = float("-0.180838") + max_val = float("0.1547") + mean = float("0.0229543") + std = float("0.0362646") + data = None + + +class Program_weight_tensor_parameter_391: + name = "parameter_391" + shape = [768, 512, 3, 3] + dtype = "float32" + min_val = float("-0.050247") + max_val = float("0.0427207") + mean = float("-6.42878e-05") + std = float("0.00168004") + data = None + + +class Program_weight_tensor_parameter_392: + name = "parameter_392" + shape = [512] + dtype = "float32" + min_val = float("-3.38705") + max_val = float("1.66488") + mean = float("-1.17821") + std = float("0.526891") + data = None + + +class Program_weight_tensor_parameter_393: + name = "parameter_393" + shape = [512] + dtype = "float32" + min_val = float("0.488671") + max_val = float("1.69914") + mean = float("1.10955") + std = float("0.15027") + data = None + + +class Program_weight_tensor_parameter_394: + name = "parameter_394" + shape = [512] + dtype = "float32" + min_val = float("0.00126485") + max_val = float("0.00820921") + mean = float("0.00340923") + std = float("0.00106595") + data = None + + +class Program_weight_tensor_parameter_395: + name = "parameter_395" + shape = [512] + dtype = "float32" + min_val = float("-0.11898") + max_val = float("0.0891435") + mean = float("-0.0369126") + std = float("0.0289632") + data = None + + +class Program_weight_tensor_parameter_396: + name = "parameter_396" + shape = [512, 384, 1, 1] + dtype = "float32" + min_val = float("-0.225953") + max_val = float("0.196097") + mean = float("-0.000408665") + std = float("0.00566642") + data = None + + +class Program_weight_tensor_parameter_397: + name = "parameter_397" + shape = [384] + dtype = "float32" + min_val = float("-0.00866309") + max_val = float("0.000825011") + mean = float("-0.00232518") + std = float("0.0017594") + data = None + + +class Program_weight_tensor_parameter_398: + name = "parameter_398" + shape = [384, 384, 1, 1] + dtype = "float32" + min_val = float("-0.202855") + max_val = float("0.135115") + mean = float("-0.00175899") + std = float("0.00410018") + data = None + + +class Program_weight_tensor_parameter_399: + name = "parameter_399" + shape = [192] + dtype = "float32" + min_val = float("-1.95281") + max_val = float("0.504414") + mean = float("-0.323169") + std = float("0.341204") + data = None + + +class Program_weight_tensor_parameter_400: + name = "parameter_400" + shape = [192] + dtype = "float32" + min_val = float("0.0702988") + max_val = float("2.23409") + mean = float("0.601912") + std = float("0.439789") + data = None + + +class Program_weight_tensor_parameter_401: + name = "parameter_401" + shape = [192] + dtype = "float32" + min_val = float("6.06397e-05") + max_val = float("0.000857671") + mean = float("0.000292524") + std = float("0.000171414") + data = None + + +class Program_weight_tensor_parameter_402: + name = "parameter_402" + shape = [192] + dtype = "float32" + min_val = float("-0.0341788") + max_val = float("0.0367911") + mean = float("0.0035144") + std = float("0.0110465") + data = None + + +class Program_weight_tensor_parameter_403: + name = "parameter_403" + shape = [192, 192, 1, 1] + dtype = "float32" + min_val = float("-0.0204711") + max_val = float("0.0537535") + mean = float("-0.000251412") + std = float("0.00321926") + data = None + + +class Program_weight_tensor_parameter_404: + name = "parameter_404" + shape = [192] + dtype = "float32" + min_val = float("-1.95281") + max_val = float("0.504414") + mean = float("-0.323169") + std = float("0.341204") + data = None + + +class Program_weight_tensor_parameter_405: + name = "parameter_405" + shape = [192] + dtype = "float32" + min_val = float("0.384773") + max_val = float("2.87161") + mean = float("1.22954") + std = float("0.52094") + data = None + + +class Program_weight_tensor_parameter_406: + name = "parameter_406" + shape = [192] + dtype = "float32" + min_val = float("0.00047849") + max_val = float("0.0105565") + mean = float("0.00264743") + std = float("0.0011636") + data = None + + +class Program_weight_tensor_parameter_407: + name = "parameter_407" + shape = [192] + dtype = "float32" + min_val = float("-0.0760633") + max_val = float("0.0960629") + mean = float("0.0132737") + std = float("0.0293694") + data = None + + +class Program_weight_tensor_parameter_408: + name = "parameter_408" + shape = [192, 192, 3, 3] + dtype = "float32" + min_val = float("-0.0214696") + max_val = float("0.0330379") + mean = float("-0.000105926") + std = float("0.00236944") + data = None + + +class Program_weight_tensor_parameter_409: + name = "parameter_409" + shape = [192] + dtype = "float32" + min_val = float("-2.88997") + max_val = float("-0.124639") + mean = float("-1.33303") + std = float("0.398") + data = None + + +class Program_weight_tensor_parameter_410: + name = "parameter_410" + shape = [192] + dtype = "float32" + min_val = float("0.720106") + max_val = float("2.09477") + mean = float("1.16316") + std = float("0.171513") + data = None + + +class Program_weight_tensor_parameter_411: + name = "parameter_411" + shape = [192] + dtype = "float32" + min_val = float("0.0350146") + max_val = float("0.281849") + mean = float("0.0801621") + std = float("0.0325573") + data = None + + +class Program_weight_tensor_parameter_412: + name = "parameter_412" + shape = [192] + dtype = "float32" + min_val = float("-2.97017") + max_val = float("2.23437") + mean = float("-0.175713") + std = float("0.385326") + data = None + + +class Program_weight_tensor_parameter_413: + name = "parameter_413" + shape = [192, 192, 3, 3] + dtype = "float32" + min_val = float("-0.0303358") + max_val = float("0.0416184") + mean = float("-0.000150412") + std = float("0.00285456") + data = None + + +class Program_weight_tensor_parameter_414: + name = "parameter_414" + shape = [192] + dtype = "float32" + min_val = float("-1.92902") + max_val = float("0.596353") + mean = float("-0.261587") + std = float("0.334409") + data = None + + +class Program_weight_tensor_parameter_415: + name = "parameter_415" + shape = [192] + dtype = "float32" + min_val = float("0.0489169") + max_val = float("1.76734") + mean = float("0.453534") + std = float("0.302569") + data = None + + +class Program_weight_tensor_parameter_416: + name = "parameter_416" + shape = [192] + dtype = "float32" + min_val = float("5.20947e-05") + max_val = float("0.00150981") + mean = float("0.000284881") + std = float("0.00019177") + data = None + + +class Program_weight_tensor_parameter_417: + name = "parameter_417" + shape = [192] + dtype = "float32" + min_val = float("-0.0223658") + max_val = float("0.0363701") + mean = float("0.00787701") + std = float("0.00970774") + data = None + + +class Program_weight_tensor_parameter_418: + name = "parameter_418" + shape = [192, 192, 1, 1] + dtype = "float32" + min_val = float("-0.0239734") + max_val = float("0.0302964") + mean = float("-0.000361483") + std = float("0.00299226") + data = None + + +class Program_weight_tensor_parameter_419: + name = "parameter_419" + shape = [192] + dtype = "float32" + min_val = float("-1.92902") + max_val = float("0.596353") + mean = float("-0.261587") + std = float("0.334409") + data = None + + +class Program_weight_tensor_parameter_420: + name = "parameter_420" + shape = [192] + dtype = "float32" + min_val = float("0.419546") + max_val = float("2.27565") + mean = float("1.1481") + std = float("0.38095") + data = None + + +class Program_weight_tensor_parameter_421: + name = "parameter_421" + shape = [192] + dtype = "float32" + min_val = float("0.00122219") + max_val = float("0.0063277") + mean = float("0.00282241") + std = float("0.000859264") + data = None + + +class Program_weight_tensor_parameter_422: + name = "parameter_422" + shape = [192] + dtype = "float32" + min_val = float("-0.0745243") + max_val = float("0.0844685") + mean = float("0.0227952") + std = float("0.0246463") + data = None + + +class Program_weight_tensor_parameter_423: + name = "parameter_423" + shape = [192, 192, 3, 3] + dtype = "float32" + min_val = float("-0.0186838") + max_val = float("0.0254977") + mean = float("-0.000124378") + std = float("0.00251097") + data = None + + +class Program_weight_tensor_parameter_424: + name = "parameter_424" + shape = [192] + dtype = "float32" + min_val = float("-2.53592") + max_val = float("-0.131672") + mean = float("-1.31612") + std = float("0.443757") + data = None + + +class Program_weight_tensor_parameter_425: + name = "parameter_425" + shape = [192] + dtype = "float32" + min_val = float("0.717982") + max_val = float("1.65612") + mean = float("1.17887") + std = float("0.161137") + data = None + + +class Program_weight_tensor_parameter_426: + name = "parameter_426" + shape = [192] + dtype = "float32" + min_val = float("0.019859") + max_val = float("0.109361") + mean = float("0.053725") + std = float("0.0174112") + data = None + + +class Program_weight_tensor_parameter_427: + name = "parameter_427" + shape = [192] + dtype = "float32" + min_val = float("-1.6874") + max_val = float("0.715917") + mean = float("-0.0525487") + std = float("0.202935") + data = None + + +class Program_weight_tensor_parameter_428: + name = "parameter_428" + shape = [192, 192, 3, 3] + dtype = "float32" + min_val = float("-0.0424222") + max_val = float("0.0424072") + mean = float("-0.000164971") + std = float("0.00292092") + data = None + + +class Program_weight_tensor_parameter_429: + name = "parameter_429" + shape = [192] + dtype = "float32" + min_val = float("-1.76329") + max_val = float("0.544922") + mean = float("-0.246307") + std = float("0.349367") + data = None + + +class Program_weight_tensor_parameter_430: + name = "parameter_430" + shape = [192] + dtype = "float32" + min_val = float("0.00838759") + max_val = float("1.66366") + mean = float("0.357305") + std = float("0.246704") + data = None + + +class Program_weight_tensor_parameter_431: + name = "parameter_431" + shape = [192] + dtype = "float32" + min_val = float("2.7519e-06") + max_val = float("0.00136124") + mean = float("0.000270016") + std = float("0.000205115") + data = None + + +class Program_weight_tensor_parameter_432: + name = "parameter_432" + shape = [192] + dtype = "float32" + min_val = float("-0.0246477") + max_val = float("0.0419057") + mean = float("0.00937144") + std = float("0.00982806") + data = None + + +class Program_weight_tensor_parameter_433: + name = "parameter_433" + shape = [192, 192, 1, 1] + dtype = "float32" + min_val = float("-0.0257052") + max_val = float("0.0227599") + mean = float("-0.000402919") + std = float("0.00286132") + data = None + + +class Program_weight_tensor_parameter_434: + name = "parameter_434" + shape = [192] + dtype = "float32" + min_val = float("-1.76329") + max_val = float("0.544922") + mean = float("-0.246307") + std = float("0.349367") + data = None + + +class Program_weight_tensor_parameter_435: + name = "parameter_435" + shape = [192] + dtype = "float32" + min_val = float("0.385511") + max_val = float("1.96748") + mean = float("1.06974") + std = float("0.336052") + data = None + + +class Program_weight_tensor_parameter_436: + name = "parameter_436" + shape = [192] + dtype = "float32" + min_val = float("0.00118411") + max_val = float("0.00608141") + mean = float("0.0031663") + std = float("0.000994709") + data = None + + +class Program_weight_tensor_parameter_437: + name = "parameter_437" + shape = [192] + dtype = "float32" + min_val = float("-0.0656616") + max_val = float("0.0775184") + mean = float("0.0234637") + std = float("0.0196456") + data = None + + +class Program_weight_tensor_parameter_438: + name = "parameter_438" + shape = [192, 192, 3, 3] + dtype = "float32" + min_val = float("-0.0234567") + max_val = float("0.031622") + mean = float("-0.000127451") + std = float("0.0025894") + data = None + + +class Program_weight_tensor_parameter_439: + name = "parameter_439" + shape = [192] + dtype = "float32" + min_val = float("-2.51425") + max_val = float("0.158605") + mean = float("-1.26745") + std = float("0.427693") + data = None + + +class Program_weight_tensor_parameter_440: + name = "parameter_440" + shape = [192] + dtype = "float32" + min_val = float("0.599436") + max_val = float("1.78145") + mean = float("1.14926") + std = float("0.161525") + data = None + + +class Program_weight_tensor_parameter_441: + name = "parameter_441" + shape = [192] + dtype = "float32" + min_val = float("0.0150449") + max_val = float("0.0928161") + mean = float("0.0356273") + std = float("0.0112392") + data = None + + +class Program_weight_tensor_parameter_442: + name = "parameter_442" + shape = [192] + dtype = "float32" + min_val = float("-1.33124") + max_val = float("0.435574") + mean = float("-0.0281949") + std = float("0.145649") + data = None + + +class Program_weight_tensor_parameter_443: + name = "parameter_443" + shape = [192, 192, 3, 3] + dtype = "float32" + min_val = float("-0.0368305") + max_val = float("0.0462592") + mean = float("-0.000170748") + std = float("0.0029624") + data = None + + +class Program_weight_tensor_parameter_444: + name = "parameter_444" + shape = [192] + dtype = "float32" + min_val = float("-2.0885") + max_val = float("0.649037") + mean = float("-0.259743") + std = float("0.386626") + data = None + + +class Program_weight_tensor_parameter_445: + name = "parameter_445" + shape = [192] + dtype = "float32" + min_val = float("0.000296745") + max_val = float("0.72258") + mean = float("0.216526") + std = float("0.135201") + data = None + + +class Program_weight_tensor_parameter_446: + name = "parameter_446" + shape = [192] + dtype = "float32" + min_val = float("8.11875e-09") + max_val = float("0.000665964") + mean = float("0.000158357") + std = float("9.40236e-05") + data = None + + +class Program_weight_tensor_parameter_447: + name = "parameter_447" + shape = [192] + dtype = "float32" + min_val = float("-0.0160817") + max_val = float("0.0306913") + mean = float("0.00616664") + std = float("0.0078993") + data = None + + +class Program_weight_tensor_parameter_448: + name = "parameter_448" + shape = [192, 192, 1, 1] + dtype = "float32" + min_val = float("-0.0133827") + max_val = float("0.0222178") + mean = float("-0.000251321") + std = float("0.00253254") + data = None + + +class Program_weight_tensor_parameter_449: + name = "parameter_449" + shape = [192] + dtype = "float32" + min_val = float("-2.0885") + max_val = float("0.649037") + mean = float("-0.259743") + std = float("0.386626") + data = None + + +class Program_weight_tensor_parameter_450: + name = "parameter_450" + shape = [192] + dtype = "float32" + min_val = float("0.394568") + max_val = float("1.95749") + mean = float("0.953795") + std = float("0.30538") + data = None + + +class Program_weight_tensor_parameter_451: + name = "parameter_451" + shape = [192] + dtype = "float32" + min_val = float("0.0012967") + max_val = float("0.00756705") + mean = float("0.00306035") + std = float("0.000987711") + data = None + + +class Program_weight_tensor_parameter_452: + name = "parameter_452" + shape = [192] + dtype = "float32" + min_val = float("-0.0322819") + max_val = float("0.0806911") + mean = float("0.0286436") + std = float("0.0227917") + data = None + + +class Program_weight_tensor_parameter_453: + name = "parameter_453" + shape = [192, 192, 3, 3] + dtype = "float32" + min_val = float("-0.0259952") + max_val = float("0.0272212") + mean = float("-0.000147606") + std = float("0.00265528") + data = None + + +class Program_weight_tensor_parameter_454: + name = "parameter_454" + shape = [192] + dtype = "float32" + min_val = float("-2.7731") + max_val = float("-0.0376525") + mean = float("-1.25978") + std = float("0.434928") + data = None + + +class Program_weight_tensor_parameter_455: + name = "parameter_455" + shape = [192] + dtype = "float32" + min_val = float("0.744519") + max_val = float("1.562") + mean = float("1.13319") + std = float("0.139729") + data = None + + +class Program_weight_tensor_parameter_456: + name = "parameter_456" + shape = [192] + dtype = "float32" + min_val = float("0.0113947") + max_val = float("0.0533248") + mean = float("0.0251346") + std = float("0.00756214") + data = None + + +class Program_weight_tensor_parameter_457: + name = "parameter_457" + shape = [192] + dtype = "float32" + min_val = float("-0.991719") + max_val = float("0.260327") + mean = float("-0.0417286") + std = float("0.119228") + data = None + + +class Program_weight_tensor_parameter_458: + name = "parameter_458" + shape = [192, 192, 3, 3] + dtype = "float32" + min_val = float("-0.0456909") + max_val = float("0.0485113") + mean = float("-0.000180492") + std = float("0.00293373") + data = None + + +class Program_weight_tensor_parameter_459: + name = "parameter_459" + shape = [192] + dtype = "float32" + min_val = float("-1.20653") + max_val = float("0.515872") + mean = float("-0.218122") + std = float("0.352265") + data = None + + +class Program_weight_tensor_parameter_460: + name = "parameter_460" + shape = [192] + dtype = "float32" + min_val = float("-3.46641e-06") + max_val = float("0.680648") + mean = float("0.195002") + std = float("0.117181") + data = None + + +class Program_weight_tensor_parameter_461: + name = "parameter_461" + shape = [192] + dtype = "float32" + min_val = float("1.40005e-12") + max_val = float("0.000626638") + mean = float("0.000157763") + std = float("9.56391e-05") + data = None + + +class Program_weight_tensor_parameter_462: + name = "parameter_462" + shape = [192] + dtype = "float32" + min_val = float("-0.0271548") + max_val = float("0.033376") + mean = float("0.0067476") + std = float("0.00878243") + data = None + + +class Program_weight_tensor_parameter_463: + name = "parameter_463" + shape = [192, 192, 1, 1] + dtype = "float32" + min_val = float("-0.0239047") + max_val = float("0.0291998") + mean = float("-0.00027316") + std = float("0.00260504") + data = None + + +class Program_weight_tensor_parameter_464: + name = "parameter_464" + shape = [192] + dtype = "float32" + min_val = float("-1.20653") + max_val = float("0.515872") + mean = float("-0.218122") + std = float("0.352265") + data = None + + +class Program_weight_tensor_parameter_465: + name = "parameter_465" + shape = [192] + dtype = "float32" + min_val = float("0.398504") + max_val = float("1.57172") + mean = float("0.848038") + std = float("0.259251") + data = None + + +class Program_weight_tensor_parameter_466: + name = "parameter_466" + shape = [192] + dtype = "float32" + min_val = float("0.00112489") + max_val = float("0.00610025") + mean = float("0.00301719") + std = float("0.000917632") + data = None + + +class Program_weight_tensor_parameter_467: + name = "parameter_467" + shape = [192] + dtype = "float32" + min_val = float("-0.0478429") + max_val = float("0.0836548") + mean = float("0.0242454") + std = float("0.0221161") + data = None + + +class Program_weight_tensor_parameter_468: + name = "parameter_468" + shape = [192, 192, 3, 3] + dtype = "float32" + min_val = float("-0.0240424") + max_val = float("0.0295695") + mean = float("-0.000120615") + std = float("0.00264505") + data = None + + +class Program_weight_tensor_parameter_469: + name = "parameter_469" + shape = [192] + dtype = "float32" + min_val = float("-2.48813") + max_val = float("-0.081453") + mean = float("-1.27066") + std = float("0.42002") + data = None + + +class Program_weight_tensor_parameter_470: + name = "parameter_470" + shape = [192] + dtype = "float32" + min_val = float("0.694183") + max_val = float("1.5418") + mean = float("1.10669") + std = float("0.135408") + data = None + + +class Program_weight_tensor_parameter_471: + name = "parameter_471" + shape = [192] + dtype = "float32" + min_val = float("0.00729749") + max_val = float("0.037708") + mean = float("0.0179887") + std = float("0.00554409") + data = None + + +class Program_weight_tensor_parameter_472: + name = "parameter_472" + shape = [192] + dtype = "float32" + min_val = float("-0.396006") + max_val = float("0.372514") + mean = float("-0.0446145") + std = float("0.0976403") + data = None + + +class Program_weight_tensor_parameter_473: + name = "parameter_473" + shape = [192, 192, 3, 3] + dtype = "float32" + min_val = float("-0.0442693") + max_val = float("0.0514986") + mean = float("-0.000169164") + std = float("0.00292394") + data = None + + +class Program_weight_tensor_parameter_474: + name = "parameter_474" + shape = [192] + dtype = "float32" + min_val = float("-1.23276") + max_val = float("0.509269") + mean = float("-0.153678") + std = float("0.304039") + data = None + + +class Program_weight_tensor_parameter_475: + name = "parameter_475" + shape = [192] + dtype = "float32" + min_val = float("0.00230822") + max_val = float("1.53088") + mean = float("0.236662") + std = float("0.211142") + data = None + + +class Program_weight_tensor_parameter_476: + name = "parameter_476" + shape = [192] + dtype = "float32" + min_val = float("1.5422e-06") + max_val = float("0.00552255") + mean = float("0.000356895") + std = float("0.000492703") + data = None + + +class Program_weight_tensor_parameter_477: + name = "parameter_477" + shape = [192] + dtype = "float32" + min_val = float("-0.0329799") + max_val = float("0.071078") + mean = float("0.00817666") + std = float("0.0127163") + data = None + + +class Program_weight_tensor_parameter_478: + name = "parameter_478" + shape = [192, 192, 1, 1] + dtype = "float32" + min_val = float("-0.0505573") + max_val = float("0.0215882") + mean = float("-0.000354289") + std = float("0.00312677") + data = None + + +class Program_weight_tensor_parameter_479: + name = "parameter_479" + shape = [192] + dtype = "float32" + min_val = float("-1.23276") + max_val = float("0.509269") + mean = float("-0.153678") + std = float("0.304039") + data = None + + +class Program_weight_tensor_parameter_480: + name = "parameter_480" + shape = [192] + dtype = "float32" + min_val = float("0.332725") + max_val = float("1.44096") + mean = float("0.751287") + std = float("0.218777") + data = None + + +class Program_weight_tensor_parameter_481: + name = "parameter_481" + shape = [192] + dtype = "float32" + min_val = float("0.00203765") + max_val = float("0.010713") + mean = float("0.00478482") + std = float("0.00160695") + data = None + + +class Program_weight_tensor_parameter_482: + name = "parameter_482" + shape = [192] + dtype = "float32" + min_val = float("-0.0848523") + max_val = float("0.0954295") + mean = float("0.0308101") + std = float("0.029966") + data = None + + +class Program_weight_tensor_parameter_483: + name = "parameter_483" + shape = [192, 192, 3, 3] + dtype = "float32" + min_val = float("-0.0446459") + max_val = float("0.043586") + mean = float("-0.000155276") + std = float("0.00261469") + data = None + + +class Program_weight_tensor_parameter_484: + name = "parameter_484" + shape = [192] + dtype = "float32" + min_val = float("-1.86947") + max_val = float("-0.187775") + mean = float("-1.16389") + std = float("0.325632") + data = None + + +class Program_weight_tensor_parameter_485: + name = "parameter_485" + shape = [192] + dtype = "float32" + min_val = float("0.751502") + max_val = float("1.6189") + mean = float("1.10951") + std = float("0.131899") + data = None + + +class Program_weight_tensor_parameter_486: + name = "parameter_486" + shape = [192] + dtype = "float32" + min_val = float("0.00675098") + max_val = float("0.0288212") + mean = float("0.0150362") + std = float("0.00493022") + data = None + + +class Program_weight_tensor_parameter_487: + name = "parameter_487" + shape = [192] + dtype = "float32" + min_val = float("-0.630258") + max_val = float("0.15393") + mean = float("-0.0440278") + std = float("0.0837471") + data = None + + +class Program_weight_tensor_parameter_488: + name = "parameter_488" + shape = [192, 192, 3, 3] + dtype = "float32" + min_val = float("-0.0520183") + max_val = float("0.060427") + mean = float("-0.000134748") + std = float("0.00285085") + data = None + + +class Program_weight_tensor_parameter_489: + name = "parameter_489" + shape = [192] + dtype = "float32" + min_val = float("-2.8152") + max_val = float("1.61438") + mean = float("-0.0255439") + std = float("0.761416") + data = None + + +class Program_weight_tensor_parameter_490: + name = "parameter_490" + shape = [192] + dtype = "float32" + min_val = float("0.478669") + max_val = float("2.07816") + mean = float("0.879851") + std = float("0.224413") + data = None + + +class Program_weight_tensor_parameter_491: + name = "parameter_491" + shape = [192] + dtype = "float32" + min_val = float("0.00476784") + max_val = float("0.0318025") + mean = float("0.0116175") + std = float("0.00470038") + data = None + + +class Program_weight_tensor_parameter_492: + name = "parameter_492" + shape = [192] + dtype = "float32" + min_val = float("-0.131857") + max_val = float("0.222727") + mean = float("-0.026891") + std = float("0.042331") + data = None + + +class Program_weight_tensor_parameter_493: + name = "parameter_493" + shape = [192, 384, 1, 1] + dtype = "float32" + min_val = float("-0.0777914") + max_val = float("0.0808518") + mean = float("-0.000351302") + std = float("0.00605248") + data = None + + +class Program_weight_tensor_parameter_494: + name = "parameter_494" + shape = [192] + dtype = "float32" + min_val = float("-2.91406") + max_val = float("2.11419") + mean = float("0.103692") + std = float("0.66786") + data = None + + +class Program_weight_tensor_parameter_495: + name = "parameter_495" + shape = [192] + dtype = "float32" + min_val = float("0.856639") + max_val = float("5.70993") + mean = float("1.92505") + std = float("0.968671") + data = None + + +class Program_weight_tensor_parameter_496: + name = "parameter_496" + shape = [192] + dtype = "float32" + min_val = float("0.00257245") + max_val = float("0.0283236") + mean = float("0.00901804") + std = float("0.00367608") + data = None + + +class Program_weight_tensor_parameter_497: + name = "parameter_497" + shape = [192] + dtype = "float32" + min_val = float("-0.109946") + max_val = float("0.115643") + mean = float("-0.0160117") + std = float("0.0391476") + data = None + + +class Program_weight_tensor_parameter_498: + name = "parameter_498" + shape = [192, 384, 1, 1] + dtype = "float32" + min_val = float("-0.0685027") + max_val = float("0.128425") + mean = float("-0.000355146") + std = float("0.0056262") + data = None + + +class Program_weight_tensor_parameter_499: + name = "parameter_499" + shape = [384] + dtype = "float32" + min_val = float("-2.9274") + max_val = float("1.33653") + mean = float("-0.313448") + std = float("0.57221") + data = None + + +class Program_weight_tensor_parameter_500: + name = "parameter_500" + shape = [384] + dtype = "float32" + min_val = float("0.700648") + max_val = float("2.45294") + mean = float("1.1469") + std = float("0.257954") + data = None + + +class Program_weight_tensor_parameter_501: + name = "parameter_501" + shape = [384] + dtype = "float32" + min_val = float("0.00449195") + max_val = float("0.0530031") + mean = float("0.0124585") + std = float("0.00683387") + data = None + + +class Program_weight_tensor_parameter_502: + name = "parameter_502" + shape = [384] + dtype = "float32" + min_val = float("-0.1972") + max_val = float("0.152797") + mean = float("0.0131924") + std = float("0.044096") + data = None + + +class Program_weight_tensor_parameter_503: + name = "parameter_503" + shape = [384, 256, 3, 3] + dtype = "float32" + min_val = float("-0.0546488") + max_val = float("0.0575398") + mean = float("-6.44059e-05") + std = float("0.00299774") + data = None + + +class Program_weight_tensor_parameter_504: + name = "parameter_504" + shape = [256] + dtype = "float32" + min_val = float("-2.08086") + max_val = float("1.23876") + mean = float("-0.929492") + std = float("0.560306") + data = None + + +class Program_weight_tensor_parameter_505: + name = "parameter_505" + shape = [256] + dtype = "float32" + min_val = float("0.460814") + max_val = float("1.60591") + mean = float("1.03747") + std = float("0.187077") + data = None + + +class Program_weight_tensor_parameter_506: + name = "parameter_506" + shape = [256] + dtype = "float32" + min_val = float("0.000450162") + max_val = float("0.0138338") + mean = float("0.00234151") + std = float("0.00124114") + data = None + + +class Program_weight_tensor_parameter_507: + name = "parameter_507" + shape = [256] + dtype = "float32" + min_val = float("-0.15446") + max_val = float("0.117599") + mean = float("-0.0301123") + std = float("0.0510926") + data = None + + +class Program_weight_tensor_parameter_508: + name = "parameter_508" + shape = [256, 192, 1, 1] + dtype = "float32" + min_val = float("-0.215021") + max_val = float("0.160071") + mean = float("-0.000576881") + std = float("0.00999512") + data = None + + +class Program_weight_tensor_parameter_509: + name = "parameter_509" + shape = [192] + dtype = "float32" + min_val = float("-0.0144925") + max_val = float("0.00179001") + mean = float("-0.00408812") + std = float("0.00285198") + data = None + + +class Program_weight_tensor_parameter_510: + name = "parameter_510" + shape = [192, 192, 1, 1] + dtype = "float32" + min_val = float("-0.536113") + max_val = float("0.170091") + mean = float("-0.00352777") + std = float("0.00872308") + data = None + + +class Program_weight_tensor_parameter_511: + name = "parameter_511" + shape = [96] + dtype = "float32" + min_val = float("-1.89842") + max_val = float("0.649199") + mean = float("-0.163374") + std = float("0.446007") + data = None + + +class Program_weight_tensor_parameter_512: + name = "parameter_512" + shape = [96] + dtype = "float32" + min_val = float("0.118793") + max_val = float("3.45032") + mean = float("0.648909") + std = float("0.709122") + data = None + + +class Program_weight_tensor_parameter_513: + name = "parameter_513" + shape = [96] + dtype = "float32" + min_val = float("3.48017e-05") + max_val = float("0.00129092") + mean = float("0.000353523") + std = float("0.000270336") + data = None + + +class Program_weight_tensor_parameter_514: + name = "parameter_514" + shape = [96] + dtype = "float32" + min_val = float("-0.0383633") + max_val = float("0.0410815") + mean = float("0.00326046") + std = float("0.0151102") + data = None + + +class Program_weight_tensor_parameter_515: + name = "parameter_515" + shape = [96, 96, 1, 1] + dtype = "float32" + min_val = float("-0.0396602") + max_val = float("0.0712072") + mean = float("-0.000415922") + std = float("0.00589655") + data = None + + +class Program_weight_tensor_parameter_516: + name = "parameter_516" + shape = [96] + dtype = "float32" + min_val = float("-1.89842") + max_val = float("0.649199") + mean = float("-0.163374") + std = float("0.446007") + data = None + + +class Program_weight_tensor_parameter_517: + name = "parameter_517" + shape = [96] + dtype = "float32" + min_val = float("0.27629") + max_val = float("5.76398") + mean = float("1.11237") + std = float("0.941275") + data = None + + +class Program_weight_tensor_parameter_518: + name = "parameter_518" + shape = [96] + dtype = "float32" + min_val = float("0.00035511") + max_val = float("0.00718247") + mean = float("0.0023089") + std = float("0.00133833") + data = None + + +class Program_weight_tensor_parameter_519: + name = "parameter_519" + shape = [96] + dtype = "float32" + min_val = float("-0.0920791") + max_val = float("0.0842519") + mean = float("0.0093933") + std = float("0.0381936") + data = None + + +class Program_weight_tensor_parameter_520: + name = "parameter_520" + shape = [96, 96, 3, 3] + dtype = "float32" + min_val = float("-0.0346361") + max_val = float("0.0508237") + mean = float("-0.000158614") + std = float("0.00427877") + data = None + + +class Program_weight_tensor_parameter_521: + name = "parameter_521" + shape = [96] + dtype = "float32" + min_val = float("-2.47268") + max_val = float("-0.0395751") + mean = float("-1.25288") + std = float("0.438506") + data = None + + +class Program_weight_tensor_parameter_522: + name = "parameter_522" + shape = [96] + dtype = "float32" + min_val = float("0.484514") + max_val = float("1.73163") + mean = float("0.919464") + std = float("0.175598") + data = None + + +class Program_weight_tensor_parameter_523: + name = "parameter_523" + shape = [96] + dtype = "float32" + min_val = float("0.0182282") + max_val = float("0.116164") + mean = float("0.0425678") + std = float("0.017739") + data = None + + +class Program_weight_tensor_parameter_524: + name = "parameter_524" + shape = [96] + dtype = "float32" + min_val = float("-2.55824") + max_val = float("1.13418") + mean = float("-0.120129") + std = float("0.381008") + data = None + + +class Program_weight_tensor_parameter_525: + name = "parameter_525" + shape = [96, 96, 3, 3] + dtype = "float32" + min_val = float("-0.127128") + max_val = float("0.0861504") + mean = float("-0.000218704") + std = float("0.00529549") + data = None + + +class Program_weight_tensor_parameter_526: + name = "parameter_526" + shape = [96] + dtype = "float32" + min_val = float("-1.35987") + max_val = float("0.614259") + mean = float("-0.108904") + std = float("0.363421") + data = None + + +class Program_weight_tensor_parameter_527: + name = "parameter_527" + shape = [96] + dtype = "float32" + min_val = float("0.00955654") + max_val = float("1.85224") + mean = float("0.454916") + std = float("0.359478") + data = None + + +class Program_weight_tensor_parameter_528: + name = "parameter_528" + shape = [96] + dtype = "float32" + min_val = float("4.00506e-06") + max_val = float("0.00234563") + mean = float("0.000538477") + std = float("0.000453658") + data = None + + +class Program_weight_tensor_parameter_529: + name = "parameter_529" + shape = [96] + dtype = "float32" + min_val = float("-0.0357937") + max_val = float("0.0432553") + mean = float("0.00761236") + std = float("0.014379") + data = None + + +class Program_weight_tensor_parameter_530: + name = "parameter_530" + shape = [96, 96, 1, 1] + dtype = "float32" + min_val = float("-0.0375291") + max_val = float("0.0336522") + mean = float("-0.000679886") + std = float("0.00538009") + data = None + + +class Program_weight_tensor_parameter_531: + name = "parameter_531" + shape = [96] + dtype = "float32" + min_val = float("-1.35987") + max_val = float("0.614259") + mean = float("-0.108904") + std = float("0.363421") + data = None + + +class Program_weight_tensor_parameter_532: + name = "parameter_532" + shape = [96] + dtype = "float32" + min_val = float("0.381916") + max_val = float("2.31118") + mean = float("0.904033") + std = float("0.422742") + data = None + + +class Program_weight_tensor_parameter_533: + name = "parameter_533" + shape = [96] + dtype = "float32" + min_val = float("0.00152583") + max_val = float("0.0130956") + mean = float("0.00360983") + std = float("0.00182045") + data = None + + +class Program_weight_tensor_parameter_534: + name = "parameter_534" + shape = [96] + dtype = "float32" + min_val = float("-0.0602139") + max_val = float("0.0926286") + mean = float("0.0240804") + std = float("0.0293413") + data = None + + +class Program_weight_tensor_parameter_535: + name = "parameter_535" + shape = [96, 96, 3, 3] + dtype = "float32" + min_val = float("-0.0526704") + max_val = float("0.0617496") + mean = float("-0.00024044") + std = float("0.00425527") + data = None + + +class Program_weight_tensor_parameter_536: + name = "parameter_536" + shape = [96] + dtype = "float32" + min_val = float("-3.30872") + max_val = float("0.356571") + mean = float("-1.21876") + std = float("0.556947") + data = None + + +class Program_weight_tensor_parameter_537: + name = "parameter_537" + shape = [96] + dtype = "float32" + min_val = float("0.42521") + max_val = float("1.92533") + mean = float("1.00705") + std = float("0.236551") + data = None + + +class Program_weight_tensor_parameter_538: + name = "parameter_538" + shape = [96] + dtype = "float32" + min_val = float("0.0150869") + max_val = float("0.0812702") + mean = float("0.0265418") + std = float("0.00908418") + data = None + + +class Program_weight_tensor_parameter_539: + name = "parameter_539" + shape = [96] + dtype = "float32" + min_val = float("-0.89497") + max_val = float("0.555961") + mean = float("-0.0363605") + std = float("0.206065") + data = None + + +class Program_weight_tensor_parameter_540: + name = "parameter_540" + shape = [96, 96, 3, 3] + dtype = "float32" + min_val = float("-0.132978") + max_val = float("0.135745") + mean = float("-0.000279374") + std = float("0.00518152") + data = None + + +class Program_weight_tensor_parameter_541: + name = "parameter_541" + shape = [96] + dtype = "float32" + min_val = float("-1.22354") + max_val = float("0.655745") + mean = float("-0.0920703") + std = float("0.30607") + data = None + + +class Program_weight_tensor_parameter_542: + name = "parameter_542" + shape = [96] + dtype = "float32" + min_val = float("0.0320682") + max_val = float("1.28684") + mean = float("0.312972") + std = float("0.193547") + data = None + + +class Program_weight_tensor_parameter_543: + name = "parameter_543" + shape = [96] + dtype = "float32" + min_val = float("1.25242e-05") + max_val = float("0.00304423") + mean = float("0.000494706") + std = float("0.000483804") + data = None + + +class Program_weight_tensor_parameter_544: + name = "parameter_544" + shape = [96] + dtype = "float32" + min_val = float("-0.0280188") + max_val = float("0.0438614") + mean = float("0.0066925") + std = float("0.0132689") + data = None + + +class Program_weight_tensor_parameter_545: + name = "parameter_545" + shape = [96, 96, 1, 1] + dtype = "float32" + min_val = float("-0.0359891") + max_val = float("0.037578") + mean = float("-0.000549529") + std = float("0.00543459") + data = None + + +class Program_weight_tensor_parameter_546: + name = "parameter_546" + shape = [96] + dtype = "float32" + min_val = float("-1.22354") + max_val = float("0.655745") + mean = float("-0.0920703") + std = float("0.30607") + data = None + + +class Program_weight_tensor_parameter_547: + name = "parameter_547" + shape = [96] + dtype = "float32" + min_val = float("0.321517") + max_val = float("1.60435") + mean = float("0.742508") + std = float("0.256539") + data = None + + +class Program_weight_tensor_parameter_548: + name = "parameter_548" + shape = [96] + dtype = "float32" + min_val = float("0.0010948") + max_val = float("0.0109667") + mean = float("0.00391925") + std = float("0.00180583") + data = None + + +class Program_weight_tensor_parameter_549: + name = "parameter_549" + shape = [96] + dtype = "float32" + min_val = float("-0.0465221") + max_val = float("0.11619") + mean = float("0.0224584") + std = float("0.0290944") + data = None + + +class Program_weight_tensor_parameter_550: + name = "parameter_550" + shape = [96, 96, 3, 3] + dtype = "float32" + min_val = float("-0.0464603") + max_val = float("0.0403376") + mean = float("-0.000230737") + std = float("0.00435378") + data = None + + +class Program_weight_tensor_parameter_551: + name = "parameter_551" + shape = [96] + dtype = "float32" + min_val = float("-3.56355") + max_val = float("0.31361") + mean = float("-1.16302") + std = float("0.578576") + data = None + + +class Program_weight_tensor_parameter_552: + name = "parameter_552" + shape = [96] + dtype = "float32" + min_val = float("0.516248") + max_val = float("2.22549") + mean = float("1.01872") + std = float("0.244167") + data = None + + +class Program_weight_tensor_parameter_553: + name = "parameter_553" + shape = [96] + dtype = "float32" + min_val = float("0.00947191") + max_val = float("0.0394704") + mean = float("0.019952") + std = float("0.00531941") + data = None + + +class Program_weight_tensor_parameter_554: + name = "parameter_554" + shape = [96] + dtype = "float32" + min_val = float("-0.543762") + max_val = float("0.453745") + mean = float("-0.00190825") + std = float("0.159635") + data = None + + +class Program_weight_tensor_parameter_555: + name = "parameter_555" + shape = [96, 96, 3, 3] + dtype = "float32" + min_val = float("-0.104936") + max_val = float("0.123929") + mean = float("-0.000230516") + std = float("0.00527145") + data = None + + +class Program_weight_tensor_parameter_556: + name = "parameter_556" + shape = [96] + dtype = "float32" + min_val = float("-0.914544") + max_val = float("0.549399") + mean = float("-0.147604") + std = float("0.291572") + data = None + + +class Program_weight_tensor_parameter_557: + name = "parameter_557" + shape = [96] + dtype = "float32" + min_val = float("0.0335844") + max_val = float("1.37871") + mean = float("0.313952") + std = float("0.205577") + data = None + + +class Program_weight_tensor_parameter_558: + name = "parameter_558" + shape = [96] + dtype = "float32" + min_val = float("2.17369e-05") + max_val = float("0.00299857") + mean = float("0.000531722") + std = float("0.000408053") + data = None + + +class Program_weight_tensor_parameter_559: + name = "parameter_559" + shape = [96] + dtype = "float32" + min_val = float("-0.0202019") + max_val = float("0.0401913") + mean = float("0.00828191") + std = float("0.0127605") + data = None + + +class Program_weight_tensor_parameter_560: + name = "parameter_560" + shape = [96, 96, 1, 1] + dtype = "float32" + min_val = float("-0.0457306") + max_val = float("0.0364814") + mean = float("-0.00067745") + std = float("0.00557391") + data = None + + +class Program_weight_tensor_parameter_561: + name = "parameter_561" + shape = [96] + dtype = "float32" + min_val = float("-0.914544") + max_val = float("0.549399") + mean = float("-0.147604") + std = float("0.291572") + data = None + + +class Program_weight_tensor_parameter_562: + name = "parameter_562" + shape = [96] + dtype = "float32" + min_val = float("0.141171") + max_val = float("1.73846") + mean = float("0.702424") + std = float("0.28575") + data = None + + +class Program_weight_tensor_parameter_563: + name = "parameter_563" + shape = [96] + dtype = "float32" + min_val = float("0.000436992") + max_val = float("0.0116807") + mean = float("0.00437206") + std = float("0.00181053") + data = None + + +class Program_weight_tensor_parameter_564: + name = "parameter_564" + shape = [96] + dtype = "float32" + min_val = float("-0.0611763") + max_val = float("0.0849162") + mean = float("0.0256109") + std = float("0.0259056") + data = None + + +class Program_weight_tensor_parameter_565: + name = "parameter_565" + shape = [96, 96, 3, 3] + dtype = "float32" + min_val = float("-0.0711506") + max_val = float("0.0585904") + mean = float("-0.000252199") + std = float("0.00442523") + data = None + + +class Program_weight_tensor_parameter_566: + name = "parameter_566" + shape = [96] + dtype = "float32" + min_val = float("-2.62308") + max_val = float("0.0475886") + mean = float("-1.09223") + std = float("0.492262") + data = None + + +class Program_weight_tensor_parameter_567: + name = "parameter_567" + shape = [96] + dtype = "float32" + min_val = float("0.546141") + max_val = float("1.74737") + mean = float("0.990116") + std = float("0.183616") + data = None + + +class Program_weight_tensor_parameter_568: + name = "parameter_568" + shape = [96] + dtype = "float32" + min_val = float("0.00780535") + max_val = float("0.0286571") + mean = float("0.0150858") + std = float("0.00396297") + data = None + + +class Program_weight_tensor_parameter_569: + name = "parameter_569" + shape = [96] + dtype = "float32" + min_val = float("-0.397648") + max_val = float("0.372501") + mean = float("-0.0250664") + std = float("0.127281") + data = None + + +class Program_weight_tensor_parameter_570: + name = "parameter_570" + shape = [96, 96, 3, 3] + dtype = "float32" + min_val = float("-0.0589674") + max_val = float("0.10126") + mean = float("-0.000249985") + std = float("0.00515185") + data = None + + +class Program_weight_tensor_parameter_571: + name = "parameter_571" + shape = [96] + dtype = "float32" + min_val = float("-0.982631") + max_val = float("0.556017") + mean = float("-0.128051") + std = float("0.290117") + data = None + + +class Program_weight_tensor_parameter_572: + name = "parameter_572" + shape = [96] + dtype = "float32" + min_val = float("0.0693376") + max_val = float("1.15446") + mean = float("0.273121") + std = float("0.164903") + data = None + + +class Program_weight_tensor_parameter_573: + name = "parameter_573" + shape = [96] + dtype = "float32" + min_val = float("6.83451e-05") + max_val = float("0.00211645") + mean = float("0.000639177") + std = float("0.000387327") + data = None + + +class Program_weight_tensor_parameter_574: + name = "parameter_574" + shape = [96] + dtype = "float32" + min_val = float("-0.0410594") + max_val = float("0.0527878") + mean = float("0.00475022") + std = float("0.0156944") + data = None + + +class Program_weight_tensor_parameter_575: + name = "parameter_575" + shape = [96, 96, 1, 1] + dtype = "float32" + min_val = float("-0.0522607") + max_val = float("0.0599666") + mean = float("-0.000547462") + std = float("0.00617448") + data = None + + +class Program_weight_tensor_parameter_576: + name = "parameter_576" + shape = [96] + dtype = "float32" + min_val = float("-0.982631") + max_val = float("0.556015") + mean = float("-0.128051") + std = float("0.290117") + data = None + + +class Program_weight_tensor_parameter_577: + name = "parameter_577" + shape = [96] + dtype = "float32" + min_val = float("0.179543") + max_val = float("1.52891") + mean = float("0.577267") + std = float("0.230524") + data = None + + +class Program_weight_tensor_parameter_578: + name = "parameter_578" + shape = [96] + dtype = "float32" + min_val = float("0.00185316") + max_val = float("0.0151893") + mean = float("0.0053405") + std = float("0.00217932") + data = None + + +class Program_weight_tensor_parameter_579: + name = "parameter_579" + shape = [96] + dtype = "float32" + min_val = float("-0.062003") + max_val = float("0.105803") + mean = float("0.0194326") + std = float("0.0299898") + data = None + + +class Program_weight_tensor_parameter_580: + name = "parameter_580" + shape = [96, 96, 3, 3] + dtype = "float32" + min_val = float("-0.052588") + max_val = float("0.0401134") + mean = float("-0.000216222") + std = float("0.00433571") + data = None + + +class Program_weight_tensor_parameter_581: + name = "parameter_581" + shape = [96] + dtype = "float32" + min_val = float("-3.34591") + max_val = float("0.217567") + mean = float("-1.0205") + std = float("0.542044") + data = None + + +class Program_weight_tensor_parameter_582: + name = "parameter_582" + shape = [96] + dtype = "float32" + min_val = float("0.541294") + max_val = float("2.73375") + mean = float("1.0434") + std = float("0.234097") + data = None + + +class Program_weight_tensor_parameter_583: + name = "parameter_583" + shape = [96] + dtype = "float32" + min_val = float("0.00623622") + max_val = float("0.0298161") + mean = float("0.0123004") + std = float("0.00410441") + data = None + + +class Program_weight_tensor_parameter_584: + name = "parameter_584" + shape = [96] + dtype = "float32" + min_val = float("-0.316894") + max_val = float("0.213951") + mean = float("-0.0247624") + std = float("0.0998134") + data = None + + +class Program_weight_tensor_parameter_585: + name = "parameter_585" + shape = [96, 96, 3, 3] + dtype = "float32" + min_val = float("-0.0787901") + max_val = float("0.0721749") + mean = float("-0.000275213") + std = float("0.00519315") + data = None + + +class Program_weight_tensor_parameter_586: + name = "parameter_586" + shape = [96] + dtype = "float32" + min_val = float("-0.603406") + max_val = float("0.46876") + mean = float("-0.0838298") + std = float("0.256426") + data = None + + +class Program_weight_tensor_parameter_587: + name = "parameter_587" + shape = [96] + dtype = "float32" + min_val = float("0.0549309") + max_val = float("1.22997") + mean = float("0.285879") + std = float("0.196767") + data = None + + +class Program_weight_tensor_parameter_588: + name = "parameter_588" + shape = [96] + dtype = "float32" + min_val = float("0.00027077") + max_val = float("0.0173696") + mean = float("0.00296255") + std = float("0.00270399") + data = None + + +class Program_weight_tensor_parameter_589: + name = "parameter_589" + shape = [96] + dtype = "float32" + min_val = float("-0.0278518") + max_val = float("0.0236883") + mean = float("0.000353097") + std = float("0.00822438") + data = None + + +class Program_weight_tensor_parameter_590: + name = "parameter_590" + shape = [96, 96, 1, 1] + dtype = "float32" + min_val = float("-0.0745539") + max_val = float("0.0558059") + mean = float("-0.000956811") + std = float("0.00693171") + data = None + + +class Program_weight_tensor_parameter_591: + name = "parameter_591" + shape = [96] + dtype = "float32" + min_val = float("-0.603406") + max_val = float("0.468759") + mean = float("-0.0838298") + std = float("0.256426") + data = None + + +class Program_weight_tensor_parameter_592: + name = "parameter_592" + shape = [96] + dtype = "float32" + min_val = float("0.184847") + max_val = float("1.32269") + mean = float("0.519013") + std = float("0.258771") + data = None + + +class Program_weight_tensor_parameter_593: + name = "parameter_593" + shape = [96] + dtype = "float32" + min_val = float("0.00359367") + max_val = float("0.0619606") + mean = float("0.0188235") + std = float("0.00993846") + data = None + + +class Program_weight_tensor_parameter_594: + name = "parameter_594" + shape = [96] + dtype = "float32" + min_val = float("-0.0720302") + max_val = float("0.0583683") + mean = float("-0.00652554") + std = float("0.0276525") + data = None + + +class Program_weight_tensor_parameter_595: + name = "parameter_595" + shape = [96, 96, 3, 3] + dtype = "float32" + min_val = float("-0.077949") + max_val = float("0.0416607") + mean = float("-0.000187561") + std = float("0.00433555") + data = None + + +class Program_weight_tensor_parameter_596: + name = "parameter_596" + shape = [96] + dtype = "float32" + min_val = float("-2.41266") + max_val = float("0.498184") + mean = float("-0.836552") + std = float("0.475182") + data = None + + +class Program_weight_tensor_parameter_597: + name = "parameter_597" + shape = [96] + dtype = "float32" + min_val = float("0.828403") + max_val = float("2.26623") + mean = float("1.25294") + std = float("0.215447") + data = None + + +class Program_weight_tensor_parameter_598: + name = "parameter_598" + shape = [96] + dtype = "float32" + min_val = float("0.00371984") + max_val = float("0.0266859") + mean = float("0.00901981") + std = float("0.00402208") + data = None + + +class Program_weight_tensor_parameter_599: + name = "parameter_599" + shape = [96] + dtype = "float32" + min_val = float("-0.356962") + max_val = float("0.240426") + mean = float("-0.0246764") + std = float("0.102141") + data = None + + +class Program_weight_tensor_parameter_600: + name = "parameter_600" + shape = [96, 96, 3, 3] + dtype = "float32" + min_val = float("-0.111863") + max_val = float("0.117189") + mean = float("-0.00012768") + std = float("0.00536188") + data = None + + +class Program_weight_tensor_parameter_601: + name = "parameter_601" + shape = [96] + dtype = "float32" + min_val = float("-3.19124") + max_val = float("1.93359") + mean = float("0.508377") + std = float("0.871805") + data = None + + +class Program_weight_tensor_parameter_602: + name = "parameter_602" + shape = [96] + dtype = "float32" + min_val = float("0.236398") + max_val = float("2.59722") + mean = float("0.515435") + std = float("0.323564") + data = None + + +class Program_weight_tensor_parameter_603: + name = "parameter_603" + shape = [96] + dtype = "float32" + min_val = float("0.00484403") + max_val = float("0.0706225") + mean = float("0.0146031") + std = float("0.0110259") + data = None + + +class Program_weight_tensor_parameter_604: + name = "parameter_604" + shape = [96] + dtype = "float32" + min_val = float("-0.209383") + max_val = float("0.238176") + mean = float("-0.0128806") + std = float("0.0740322") + data = None + + +class Program_weight_tensor_parameter_605: + name = "parameter_605" + shape = [96, 192, 1, 1] + dtype = "float32" + min_val = float("-0.169334") + max_val = float("0.152895") + mean = float("-0.000316404") + std = float("0.0110428") + data = None + + +class Program_weight_tensor_parameter_606: + name = "parameter_606" + shape = [96] + dtype = "float32" + min_val = float("-4.89364") + max_val = float("1.73104") + mean = float("0.4216") + std = float("1.0546") + data = None + + +class Program_weight_tensor_parameter_607: + name = "parameter_607" + shape = [96] + dtype = "float32" + min_val = float("0.36737") + max_val = float("6.94832") + mean = float("1.69928") + std = float("1.37496") + data = None + + +class Program_weight_tensor_parameter_608: + name = "parameter_608" + shape = [96] + dtype = "float32" + min_val = float("0.00220095") + max_val = float("0.0892683") + mean = float("0.0142797") + std = float("0.011982") + data = None + + +class Program_weight_tensor_parameter_609: + name = "parameter_609" + shape = [96] + dtype = "float32" + min_val = float("-0.162445") + max_val = float("0.294147") + mean = float("0.00715431") + std = float("0.0821284") + data = None + + +class Program_weight_tensor_parameter_610: + name = "parameter_610" + shape = [96, 192, 1, 1] + dtype = "float32" + min_val = float("-0.0897093") + max_val = float("0.18944") + mean = float("9.40014e-05") + std = float("0.0102431") + data = None + + +class Program_weight_tensor_parameter_611: + name = "parameter_611" + shape = [192] + dtype = "float32" + min_val = float("-2.2631") + max_val = float("1.81642") + mean = float("-0.104199") + std = float("0.765409") + data = None + + +class Program_weight_tensor_parameter_612: + name = "parameter_612" + shape = [192] + dtype = "float32" + min_val = float("0.55386") + max_val = float("3.06666") + mean = float("1.03418") + std = float("0.295019") + data = None + + +class Program_weight_tensor_parameter_613: + name = "parameter_613" + shape = [192] + dtype = "float32" + min_val = float("0.00449313") + max_val = float("0.113476") + mean = float("0.0166471") + std = float("0.0141621") + data = None + + +class Program_weight_tensor_parameter_614: + name = "parameter_614" + shape = [192] + dtype = "float32" + min_val = float("-0.370599") + max_val = float("0.182049") + mean = float("-0.036688") + std = float("0.0839067") + data = None + + +class Program_weight_tensor_parameter_615: + name = "parameter_615" + shape = [192, 128, 3, 3] + dtype = "float32" + min_val = float("-0.0617191") + max_val = float("0.0972102") + mean = float("-0.000107627") + std = float("0.00527157") + data = None + + +class Program_weight_tensor_parameter_616: + name = "parameter_616" + shape = [128] + dtype = "float32" + min_val = float("-2.77261") + max_val = float("1.9501") + mean = float("-0.747738") + std = float("0.667569") + data = None + + +class Program_weight_tensor_parameter_617: + name = "parameter_617" + shape = [128] + dtype = "float32" + min_val = float("0.286557") + max_val = float("2.12971") + mean = float("0.963078") + std = float("0.252374") + data = None + + +class Program_weight_tensor_parameter_618: + name = "parameter_618" + shape = [128] + dtype = "float32" + min_val = float("0.000299552") + max_val = float("0.00717751") + mean = float("0.00205572") + std = float("0.00102826") + data = None + + +class Program_weight_tensor_parameter_619: + name = "parameter_619" + shape = [128] + dtype = "float32" + min_val = float("-0.172927") + max_val = float("0.263946") + mean = float("0.00188719") + std = float("0.0703671") + data = None + + +class Program_weight_tensor_parameter_620: + name = "parameter_620" + shape = [128, 96, 1, 1] + dtype = "float32" + min_val = float("-0.15476") + max_val = float("0.142506") + mean = float("-0.00109664") + std = float("0.0155992") + data = None + + +class Program_weight_tensor_parameter_621: + name = "parameter_621" + shape = [96] + dtype = "float32" + min_val = float("-0.0126476") + max_val = float("-0.000993819") + mean = float("-0.00607335") + std = float("0.00318226") + data = None + + +class Program_weight_tensor_parameter_622: + name = "parameter_622" + shape = [96, 96, 1, 1] + dtype = "float32" + min_val = float("-0.201162") + max_val = float("0.136016") + mean = float("-0.00720846") + std = float("0.0140613") + data = None + + +class Program_weight_tensor_parameter_623: + name = "parameter_623" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_624: + name = "parameter_624" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_625: + name = "parameter_625" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_626: + name = "parameter_626" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_627: + name = "parameter_627" + shape = [48, 48, 1, 1] + dtype = "float32" + min_val = float("-0.0505865") + max_val = float("0.0461092") + mean = float("-0.00117516") + std = float("0.00977429") + data = None + + +class Program_weight_tensor_parameter_628: + name = "parameter_628" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_629: + name = "parameter_629" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_630: + name = "parameter_630" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_631: + name = "parameter_631" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_632: + name = "parameter_632" + shape = [48, 48, 3, 3] + dtype = "float32" + min_val = float("-0.0457659") + max_val = float("0.0697484") + mean = float("-0.00021736") + std = float("0.00778433") + data = None + + +class Program_weight_tensor_parameter_633: + name = "parameter_633" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_634: + name = "parameter_634" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_635: + name = "parameter_635" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_636: + name = "parameter_636" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_637: + name = "parameter_637" + shape = [48, 48, 3, 3] + dtype = "float32" + min_val = float("-0.0742706") + max_val = float("0.0873867") + mean = float("-0.000456091") + std = float("0.00888736") + data = None + + +class Program_weight_tensor_parameter_638: + name = "parameter_638" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_639: + name = "parameter_639" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_640: + name = "parameter_640" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_641: + name = "parameter_641" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_642: + name = "parameter_642" + shape = [48, 48, 1, 1] + dtype = "float32" + min_val = float("-0.0664849") + max_val = float("0.0489036") + mean = float("-0.00150188") + std = float("0.0103773") + data = None + + +class Program_weight_tensor_parameter_643: + name = "parameter_643" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_644: + name = "parameter_644" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_645: + name = "parameter_645" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_646: + name = "parameter_646" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_647: + name = "parameter_647" + shape = [48, 48, 3, 3] + dtype = "float32" + min_val = float("-0.0407101") + max_val = float("0.050469") + mean = float("-0.000484431") + std = float("0.00782941") + data = None + + +class Program_weight_tensor_parameter_648: + name = "parameter_648" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_649: + name = "parameter_649" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_650: + name = "parameter_650" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_651: + name = "parameter_651" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_652: + name = "parameter_652" + shape = [48, 48, 3, 3] + dtype = "float32" + min_val = float("-0.0793737") + max_val = float("0.0613154") + mean = float("-0.000271202") + std = float("0.00895769") + data = None + + +class Program_weight_tensor_parameter_653: + name = "parameter_653" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_654: + name = "parameter_654" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_655: + name = "parameter_655" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_656: + name = "parameter_656" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_657: + name = "parameter_657" + shape = [48, 48, 1, 1] + dtype = "float32" + min_val = float("-0.0919037") + max_val = float("0.0493862") + mean = float("-0.00106003") + std = float("0.0128276") + data = None + + +class Program_weight_tensor_parameter_658: + name = "parameter_658" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_659: + name = "parameter_659" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_660: + name = "parameter_660" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_661: + name = "parameter_661" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_662: + name = "parameter_662" + shape = [48, 48, 3, 3] + dtype = "float32" + min_val = float("-0.0804163") + max_val = float("0.0565016") + mean = float("-0.000357967") + std = float("0.00821755") + data = None + + +class Program_weight_tensor_parameter_663: + name = "parameter_663" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_664: + name = "parameter_664" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_665: + name = "parameter_665" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_666: + name = "parameter_666" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_667: + name = "parameter_667" + shape = [48, 48, 3, 3] + dtype = "float32" + min_val = float("-0.0770235") + max_val = float("0.067704") + mean = float("-0.000344409") + std = float("0.00960955") + data = None + + +class Program_weight_tensor_parameter_668: + name = "parameter_668" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_669: + name = "parameter_669" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_670: + name = "parameter_670" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_671: + name = "parameter_671" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_672: + name = "parameter_672" + shape = [48, 96, 1, 1] + dtype = "float32" + min_val = float("-0.118598") + max_val = float("0.0941444") + mean = float("-0.00163172") + std = float("0.0167723") + data = None + + +class Program_weight_tensor_parameter_673: + name = "parameter_673" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_674: + name = "parameter_674" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_675: + name = "parameter_675" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_676: + name = "parameter_676" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_677: + name = "parameter_677" + shape = [48, 96, 1, 1] + dtype = "float32" + min_val = float("-0.0929333") + max_val = float("0.130766") + mean = float("-0.000650292") + std = float("0.0158564") + data = None + + +class Program_weight_tensor_parameter_678: + name = "parameter_678" + shape = [96] + dtype = "float32" + min_val = float("-3.11018") + max_val = float("3.25616") + mean = float("0.366533") + std = float("1.14177") + data = None + + +class Program_weight_tensor_parameter_679: + name = "parameter_679" + shape = [96] + dtype = "float32" + min_val = float("0.792014") + max_val = float("4.97259") + mean = float("1.87775") + std = float("0.779521") + data = None + + +class Program_weight_tensor_parameter_680: + name = "parameter_680" + shape = [96] + dtype = "float32" + min_val = float("0.315668") + max_val = float("13.6419") + mean = float("1.47445") + std = float("1.58175") + data = None + + +class Program_weight_tensor_parameter_681: + name = "parameter_681" + shape = [96] + dtype = "float32" + min_val = float("-1.15247") + max_val = float("1.963") + mean = float("-0.208063") + std = float("0.507653") + data = None + + +class Program_weight_tensor_parameter_682: + name = "parameter_682" + shape = [96, 64, 3, 3] + dtype = "float32" + min_val = float("-0.0933882") + max_val = float("0.0936018") + mean = float("-0.000299565") + std = float("0.00912066") + data = None + + +class Program_weight_tensor_parameter_683: + name = "parameter_683" + shape = [64] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_684: + name = "parameter_684" + shape = [64] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_685: + name = "parameter_685" + shape = [64] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_686: + name = "parameter_686" + shape = [64] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_687: + name = "parameter_687" + shape = [64, 32, 3, 3] + dtype = "float32" + min_val = float("-0.111523") + max_val = float("0.120134") + mean = float("-0.000452619") + std = float("0.0141786") + data = None + + +class Program_weight_tensor_parameter_688: + name = "parameter_688" + shape = [32] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_689: + name = "parameter_689" + shape = [32] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_690: + name = "parameter_690" + shape = [32] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_691: + name = "parameter_691" + shape = [32] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_692: + name = "parameter_692" + shape = [32, 32, 3, 3] + dtype = "float32" + min_val = float("-0.225894") + max_val = float("0.134795") + mean = float("-0.0001144") + std = float("0.0181569") + data = None + + +class Program_weight_tensor_parameter_693: + name = "parameter_693" + shape = [32] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_694: + name = "parameter_694" + shape = [32] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_695: + name = "parameter_695" + shape = [32] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_696: + name = "parameter_696" + shape = [32] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_697: + name = "parameter_697" + shape = [32, 3, 3, 3] + dtype = "float32" + min_val = float("-0.214027") + max_val = float("0.221186") + mean = float("-0.00133857") + std = float("0.0524068") + data = None diff --git a/paddle_samples/PaddleX/PP-YOLOE-L_vehicle/subgraph_3/graph_hash.txt b/paddle_samples/PaddleX/PP-YOLOE-L_vehicle/subgraph_3/graph_hash.txt new file mode 100644 index 000000000..a0a4bdddf --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE-L_vehicle/subgraph_3/graph_hash.txt @@ -0,0 +1 @@ +92d3ca6357b660bcf1334e1feb5b6b37f7616ff93ec1a53ea5a94dd2dd47ce97 \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE-L_vehicle/subgraph_3/graph_net.json b/paddle_samples/PaddleX/PP-YOLOE-L_vehicle/subgraph_3/graph_net.json new file mode 100644 index 000000000..4a2e26ae4 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE-L_vehicle/subgraph_3/graph_net.json @@ -0,0 +1,6 @@ +{ + "framework": "paddle", + "model_name": "PP-YOLOE-L_vehicle", + "num_devices_required": 1, + "num_nodes_required": 1 +} \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE-L_vehicle/subgraph_3/input_meta.py b/paddle_samples/PaddleX/PP-YOLOE-L_vehicle/subgraph_3/input_meta.py new file mode 100644 index 000000000..24ecd6576 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE-L_vehicle/subgraph_3/input_meta.py @@ -0,0 +1,42 @@ +class Program_weight_tensor_data_0: + name = "data_0" + shape = [2, 3549, 4] + dtype = "float32" + min_val = float("0.01") + max_val = float("0.01") + mean = float("0.01") + std = float("9.31323e-10") + data = None + + +class Program_weight_tensor_data_1: + name = "data_1" + shape = [2, 3549, 68] + dtype = "float32" + min_val = float("-3.91208") + max_val = float("10.707") + mean = float("3.11295e-05") + std = float("1.44354") + data = None + + +class Program_weight_tensor_data_2: + name = "data_2" + shape = [3549, 2] + dtype = "float32" + min_val = float("4.0") + max_val = float("412.0") + mean = float("208.0") + std = float("120.038") + data = None + + +class Program_weight_tensor_data_3: + name = "data_3" + shape = [3549, 1] + dtype = "float32" + min_val = float("8.0") + max_val = float("32.0") + mean = float("10.6667") + std = float("5.70157") + data = None diff --git a/paddle_samples/PaddleX/PP-YOLOE-L_vehicle/subgraph_3/model.py b/paddle_samples/PaddleX/PP-YOLOE-L_vehicle/subgraph_3/model.py new file mode 100644 index 000000000..4910e2ad1 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE-L_vehicle/subgraph_3/model.py @@ -0,0 +1,162 @@ +import paddle + + +class GraphModule(paddle.nn.Layer): + def __init__(self): + super().__init__() + + def forward(self, parameter_0, data_0, data_1, data_2, data_3): + # pd_op.divide: (-1x2xf32) <- (-1x2xf32, -1x1xf32) + divide_0 = paddle._C_ops.divide(data_2, data_3) + del data_2 + + # pd_op.shape64: (3xi64) <- (2x-1x68xf32) + shape64_0 = paddle._C_ops.shape64(data_1) + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_0 = [0] + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_1 = [1] + + # pd_op.assign: (1xi64) <- (1xi64) + assign_0 = full_int_array_1 + + # pd_op.slice: (xi64) <- (3xi64, 1xi64, 1xi64) + slice_0 = paddle._C_ops.slice( + shape64_0, [0], full_int_array_0, full_int_array_1, [1], [0] + ) + del full_int_array_0 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_2 = [2] + + # pd_op.slice: (xi64) <- (3xi64, 1xi64, 1xi64) + slice_1 = paddle._C_ops.slice( + shape64_0, [0], full_int_array_1, full_int_array_2, [1], [0] + ) + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_3 = [3] + + # pd_op.slice: (xi64) <- (3xi64, 1xi64, 1xi64) + slice_2 = paddle._C_ops.slice( + shape64_0, [0], full_int_array_2, full_int_array_3, [1], [0] + ) + del full_int_array_2, full_int_array_3, shape64_0 + + # pd_op.full: (xi64) <- () + full_0 = paddle._C_ops.full( + [], float("-1"), paddle.int64, paddle.core.CPUPlace() + ) + + # pd_op.full: (xi64) <- () + full_1 = paddle._C_ops.full( + [], float("4"), paddle.int64, paddle.core.CPUPlace() + ) + + # pd_op.full: (xi64) <- () + full_2 = paddle._C_ops.full( + [], float("17"), paddle.int64, paddle.core.CPUPlace() + ) + + # builtin.combine: ([xi64, xi64, xi64, xi64]) <- (xi64, xi64, xi64, xi64) + combine_0 = [full_0, slice_1, full_1, full_2] + del full_0, full_1, full_2, slice_1 + + # pd_op.stack: (4xi64) <- ([xi64, xi64, xi64, xi64]) + stack_0 = paddle._C_ops.stack(combine_0, 0) + del combine_0 + + # pd_op.reshape: (-1x-1x4x17xf32) <- (2x-1x68xf32, 4xi64) + reshape_0 = paddle._C_ops.reshape(data_1, stack_0) + del data_1, stack_0 + + # pd_op.softmax: (-1x-1x4x17xf32) <- (-1x-1x4x17xf32) + softmax_0 = paddle._C_ops.softmax(reshape_0, -1) + del reshape_0 + + # pd_op.transpose: (-1x17x-1x4xf32) <- (-1x-1x4x17xf32) + transpose_0 = paddle._C_ops.transpose(softmax_0, [0, 3, 1, 2]) + + # pd_op.conv2d: (-1x1x-1x4xf32) <- (-1x17x-1x4xf32, 1x17x1x1xf32) + conv2d_0 = paddle._C_ops.conv2d( + transpose_0, parameter_0, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_0 + + # pd_op.squeeze: (-1x-1x4xf32) <- (-1x1x-1x4xf32, 1xi64) + squeeze_0 = paddle._C_ops.squeeze(conv2d_0, full_int_array_1) + del full_int_array_1 + + # pd_op.full: (1xi32) <- () + full_3 = paddle._C_ops.full( + [1], float("2"), paddle.int32, paddle.core.CPUPlace() + ) + + # pd_op.split_with_num: ([-1x-1x2xf32, -1x-1x2xf32]) <- (-1x-1x4xf32, 1xi32) + split_with_num_0 = paddle._C_ops.split_with_num(squeeze_0, 2, full_3) + del squeeze_0 + + # builtin.split: (-1x-1x2xf32, -1x-1x2xf32) <- ([-1x-1x2xf32, -1x-1x2xf32]) + ( + split_0, + split_1, + ) = split_with_num_0 + del split_with_num_0 + + # pd_op.full: (1xf32) <- () + full_4 = paddle._C_ops.full( + [1], float("-1"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.scale: (-1x-1x2xf32) <- (-1x-1x2xf32, 1xf32) + scale_0 = paddle._C_ops.scale(split_0, full_4, float("0"), True) + del split_0 + + # pd_op.add: (-1x-1x2xf32) <- (-1x-1x2xf32, -1x2xf32) + add_0 = paddle._C_ops.add(scale_0, divide_0) + + # pd_op.add: (-1x-1x2xf32) <- (-1x-1x2xf32, -1x2xf32) + add_1 = paddle._C_ops.add(split_1, divide_0) + + # pd_op.full: (1xi32) <- () + full_5 = paddle._C_ops.full( + [1], float("-1"), paddle.int32, paddle.core.CPUPlace() + ) + + # builtin.combine: ([-1x-1x2xf32, -1x-1x2xf32]) <- (-1x-1x2xf32, -1x-1x2xf32) + combine_1 = [add_0, add_1] + + # pd_op.concat: (-1x-1x4xf32) <- ([-1x-1x2xf32, -1x-1x2xf32], 1xi32) + concat_0 = paddle._C_ops.concat(combine_1, full_5) + del combine_1 + + # pd_op.share_data_: (2x-1x4xf32) <- (2x-1x4xf32) + share_data__0 = data_0.detach() + del data_0 + + # pd_op.share_data_: (-1x-1x4xf32) <- (-1x-1x4xf32) + share_data__1 = concat_0.detach() + + # pd_op.multiply: (-1x-1x4xf32) <- (-1x-1x4xf32, -1x1xf32) + multiply_0 = paddle._C_ops.multiply(share_data__1, data_3) + del ( + add_0, + add_1, + assign_0, + concat_0, + conv2d_0, + data_3, + divide_0, + full_3, + full_4, + full_5, + scale_0, + share_data__1, + softmax_0, + split_1, + transpose_0, + ) + + return share_data__0, multiply_0 diff --git a/paddle_samples/PaddleX/PP-YOLOE-L_vehicle/subgraph_3/weight_meta.py b/paddle_samples/PaddleX/PP-YOLOE-L_vehicle/subgraph_3/weight_meta.py new file mode 100644 index 000000000..28198680e --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE-L_vehicle/subgraph_3/weight_meta.py @@ -0,0 +1,7 @@ +class Program_weight_tensor_parameter_0: + name = "parameter_0" + shape = [1, 17, 1, 1] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None diff --git a/paddle_samples/PaddleX/PP-YOLOE-L_vehicle/subgraph_5/graph_hash.txt b/paddle_samples/PaddleX/PP-YOLOE-L_vehicle/subgraph_5/graph_hash.txt new file mode 100644 index 000000000..326177577 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE-L_vehicle/subgraph_5/graph_hash.txt @@ -0,0 +1 @@ +a359c7e1d53cf1fb3878706e33bab7ffa5b4304c0589d7e759965f8ab6ff7f98 \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE-L_vehicle/subgraph_5/graph_net.json b/paddle_samples/PaddleX/PP-YOLOE-L_vehicle/subgraph_5/graph_net.json new file mode 100644 index 000000000..4a2e26ae4 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE-L_vehicle/subgraph_5/graph_net.json @@ -0,0 +1,6 @@ +{ + "framework": "paddle", + "model_name": "PP-YOLOE-L_vehicle", + "num_devices_required": 1, + "num_nodes_required": 1 +} \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE-L_vehicle/subgraph_5/input_meta.py b/paddle_samples/PaddleX/PP-YOLOE-L_vehicle/subgraph_5/input_meta.py new file mode 100644 index 000000000..f58dc071b --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE-L_vehicle/subgraph_5/input_meta.py @@ -0,0 +1,9 @@ +class Program_weight_tensor_data_0: + name = "data_0" + shape = [2, 3, 640, 640] + dtype = "float32" + min_val = float("-2.01516") + max_val = float("2.64") + mean = float("0.187747") + std = float("0.681331") + data = None diff --git a/paddle_samples/PaddleX/PP-YOLOE-L_vehicle/subgraph_5/model.py b/paddle_samples/PaddleX/PP-YOLOE-L_vehicle/subgraph_5/model.py new file mode 100644 index 000000000..6bdb7ea24 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE-L_vehicle/subgraph_5/model.py @@ -0,0 +1,7159 @@ +import paddle + + +class GraphModule(paddle.nn.Layer): + def __init__(self): + super().__init__() + + def forward( + self, + parameter_0, + parameter_1, + parameter_2, + parameter_3, + parameter_4, + parameter_5, + parameter_6, + parameter_7, + parameter_8, + parameter_9, + parameter_10, + parameter_11, + parameter_12, + parameter_13, + parameter_14, + parameter_15, + parameter_16, + parameter_17, + parameter_18, + parameter_19, + parameter_20, + parameter_21, + parameter_22, + parameter_23, + parameter_24, + parameter_25, + parameter_26, + parameter_27, + parameter_28, + parameter_29, + parameter_30, + parameter_31, + parameter_32, + parameter_33, + parameter_34, + parameter_35, + parameter_36, + parameter_37, + parameter_38, + parameter_39, + parameter_40, + parameter_41, + parameter_42, + parameter_43, + parameter_44, + parameter_45, + parameter_46, + parameter_47, + parameter_48, + parameter_49, + parameter_50, + parameter_51, + parameter_52, + parameter_53, + parameter_54, + parameter_55, + parameter_56, + parameter_57, + parameter_58, + parameter_59, + parameter_60, + parameter_61, + parameter_62, + parameter_63, + parameter_64, + parameter_65, + parameter_66, + parameter_67, + parameter_68, + parameter_69, + parameter_70, + parameter_71, + parameter_72, + parameter_73, + parameter_74, + parameter_75, + parameter_76, + parameter_77, + parameter_78, + parameter_79, + parameter_80, + parameter_81, + parameter_82, + parameter_83, + parameter_84, + parameter_85, + parameter_86, + parameter_87, + parameter_88, + parameter_89, + parameter_90, + parameter_91, + parameter_92, + parameter_93, + parameter_94, + parameter_95, + parameter_96, + parameter_97, + parameter_98, + parameter_99, + parameter_100, + parameter_101, + parameter_102, + parameter_103, + parameter_104, + parameter_105, + parameter_106, + parameter_107, + parameter_108, + parameter_109, + parameter_110, + parameter_111, + parameter_112, + parameter_113, + parameter_114, + parameter_115, + parameter_116, + parameter_117, + parameter_118, + parameter_119, + parameter_120, + parameter_121, + parameter_122, + parameter_123, + parameter_124, + parameter_125, + parameter_126, + parameter_127, + parameter_128, + parameter_129, + parameter_130, + parameter_131, + parameter_132, + parameter_133, + parameter_134, + parameter_135, + parameter_136, + parameter_137, + parameter_138, + parameter_139, + parameter_140, + parameter_141, + parameter_142, + parameter_143, + parameter_144, + parameter_145, + parameter_146, + parameter_147, + parameter_148, + parameter_149, + parameter_150, + parameter_151, + parameter_152, + parameter_153, + parameter_154, + parameter_155, + parameter_156, + parameter_157, + parameter_158, + parameter_159, + parameter_160, + parameter_161, + parameter_162, + parameter_163, + parameter_164, + parameter_165, + parameter_166, + parameter_167, + parameter_168, + parameter_169, + parameter_170, + parameter_171, + parameter_172, + parameter_173, + parameter_174, + parameter_175, + parameter_176, + parameter_177, + parameter_178, + parameter_179, + parameter_180, + parameter_181, + parameter_182, + parameter_183, + parameter_184, + parameter_185, + parameter_186, + parameter_187, + parameter_188, + parameter_189, + parameter_190, + parameter_191, + parameter_192, + parameter_193, + parameter_194, + parameter_195, + parameter_196, + parameter_197, + parameter_198, + parameter_199, + parameter_200, + parameter_201, + parameter_202, + parameter_203, + parameter_204, + parameter_205, + parameter_206, + parameter_207, + parameter_208, + parameter_209, + parameter_210, + parameter_211, + parameter_212, + parameter_213, + parameter_214, + parameter_215, + parameter_216, + parameter_217, + parameter_218, + parameter_219, + parameter_220, + parameter_221, + parameter_222, + parameter_223, + parameter_224, + parameter_225, + parameter_226, + parameter_227, + parameter_228, + parameter_229, + parameter_230, + parameter_231, + parameter_232, + parameter_233, + parameter_234, + parameter_235, + parameter_236, + parameter_237, + parameter_238, + parameter_239, + parameter_240, + parameter_241, + parameter_242, + parameter_243, + parameter_244, + parameter_245, + parameter_246, + parameter_247, + parameter_248, + parameter_249, + parameter_250, + parameter_251, + parameter_252, + parameter_253, + parameter_254, + parameter_255, + parameter_256, + parameter_257, + parameter_258, + parameter_259, + parameter_260, + parameter_261, + parameter_262, + parameter_263, + parameter_264, + parameter_265, + parameter_266, + parameter_267, + parameter_268, + parameter_269, + parameter_270, + parameter_271, + parameter_272, + parameter_273, + parameter_274, + parameter_275, + parameter_276, + parameter_277, + parameter_278, + parameter_279, + parameter_280, + parameter_281, + parameter_282, + parameter_283, + parameter_284, + parameter_285, + parameter_286, + parameter_287, + parameter_288, + parameter_289, + parameter_290, + parameter_291, + parameter_292, + parameter_293, + parameter_294, + parameter_295, + parameter_296, + parameter_297, + parameter_298, + parameter_299, + parameter_300, + parameter_301, + parameter_302, + parameter_303, + parameter_304, + parameter_305, + parameter_306, + parameter_307, + parameter_308, + parameter_309, + parameter_310, + parameter_311, + parameter_312, + parameter_313, + parameter_314, + parameter_315, + parameter_316, + parameter_317, + parameter_318, + parameter_319, + parameter_320, + parameter_321, + parameter_322, + parameter_323, + parameter_324, + parameter_325, + parameter_326, + parameter_327, + parameter_328, + parameter_329, + parameter_330, + parameter_331, + parameter_332, + parameter_333, + parameter_334, + parameter_335, + parameter_336, + parameter_337, + parameter_338, + parameter_339, + parameter_340, + parameter_341, + parameter_342, + parameter_343, + parameter_344, + parameter_345, + parameter_346, + parameter_347, + parameter_348, + parameter_349, + parameter_350, + parameter_351, + parameter_352, + parameter_353, + parameter_354, + parameter_355, + parameter_356, + parameter_357, + parameter_358, + parameter_359, + parameter_360, + parameter_361, + parameter_362, + parameter_363, + parameter_364, + parameter_365, + parameter_366, + parameter_367, + parameter_368, + parameter_369, + parameter_370, + parameter_371, + parameter_372, + parameter_373, + parameter_374, + parameter_375, + parameter_376, + parameter_377, + parameter_378, + parameter_379, + parameter_380, + parameter_381, + parameter_382, + parameter_383, + parameter_384, + parameter_385, + parameter_386, + parameter_387, + parameter_388, + parameter_389, + parameter_390, + parameter_391, + parameter_392, + parameter_393, + parameter_394, + parameter_395, + parameter_396, + parameter_397, + parameter_398, + parameter_399, + parameter_400, + parameter_401, + parameter_402, + parameter_403, + parameter_404, + parameter_405, + parameter_406, + parameter_407, + parameter_408, + parameter_409, + parameter_410, + parameter_411, + parameter_412, + parameter_413, + parameter_414, + parameter_415, + parameter_416, + parameter_417, + parameter_418, + parameter_419, + parameter_420, + parameter_421, + parameter_422, + parameter_423, + parameter_424, + parameter_425, + parameter_426, + parameter_427, + parameter_428, + parameter_429, + parameter_430, + parameter_431, + parameter_432, + parameter_433, + parameter_434, + parameter_435, + parameter_436, + parameter_437, + parameter_438, + parameter_439, + parameter_440, + parameter_441, + parameter_442, + parameter_443, + parameter_444, + parameter_445, + parameter_446, + parameter_447, + parameter_448, + parameter_449, + parameter_450, + parameter_451, + parameter_452, + parameter_453, + parameter_454, + parameter_455, + parameter_456, + parameter_457, + parameter_458, + parameter_459, + parameter_460, + parameter_461, + parameter_462, + parameter_463, + parameter_464, + parameter_465, + parameter_466, + parameter_467, + parameter_468, + parameter_469, + parameter_470, + parameter_471, + parameter_472, + parameter_473, + parameter_474, + parameter_475, + parameter_476, + parameter_477, + parameter_478, + parameter_479, + parameter_480, + parameter_481, + parameter_482, + parameter_483, + parameter_484, + parameter_485, + parameter_486, + parameter_487, + parameter_488, + parameter_489, + parameter_490, + parameter_491, + parameter_492, + parameter_493, + parameter_494, + parameter_495, + parameter_496, + parameter_497, + parameter_498, + parameter_499, + parameter_500, + parameter_501, + parameter_502, + parameter_503, + parameter_504, + parameter_505, + parameter_506, + parameter_507, + parameter_508, + parameter_509, + parameter_510, + parameter_511, + parameter_512, + parameter_513, + parameter_514, + parameter_515, + parameter_516, + parameter_517, + parameter_518, + parameter_519, + parameter_520, + parameter_521, + parameter_522, + parameter_523, + parameter_524, + parameter_525, + parameter_526, + parameter_527, + parameter_528, + parameter_529, + parameter_530, + parameter_531, + parameter_532, + parameter_533, + parameter_534, + parameter_535, + parameter_536, + parameter_537, + parameter_538, + parameter_539, + parameter_540, + parameter_541, + parameter_542, + parameter_543, + parameter_544, + parameter_545, + parameter_546, + parameter_547, + parameter_548, + parameter_549, + parameter_550, + parameter_551, + parameter_552, + parameter_553, + parameter_554, + parameter_555, + parameter_556, + parameter_557, + parameter_558, + parameter_559, + parameter_560, + parameter_561, + parameter_562, + parameter_563, + parameter_564, + parameter_565, + parameter_566, + parameter_567, + parameter_568, + parameter_569, + parameter_570, + parameter_571, + parameter_572, + parameter_573, + parameter_574, + parameter_575, + parameter_576, + parameter_577, + parameter_578, + parameter_579, + parameter_580, + parameter_581, + parameter_582, + parameter_583, + parameter_584, + parameter_585, + parameter_586, + parameter_587, + parameter_588, + parameter_589, + parameter_590, + parameter_591, + parameter_592, + parameter_593, + parameter_594, + parameter_595, + parameter_596, + parameter_597, + parameter_598, + parameter_599, + parameter_600, + parameter_601, + parameter_602, + parameter_603, + parameter_604, + parameter_605, + parameter_606, + parameter_607, + parameter_608, + parameter_609, + parameter_610, + parameter_611, + parameter_612, + parameter_613, + parameter_614, + parameter_615, + parameter_616, + parameter_617, + parameter_618, + parameter_619, + parameter_620, + parameter_621, + parameter_622, + parameter_623, + parameter_624, + parameter_625, + parameter_626, + parameter_627, + parameter_628, + parameter_629, + parameter_630, + parameter_631, + parameter_632, + parameter_633, + parameter_634, + parameter_635, + parameter_636, + parameter_637, + parameter_638, + parameter_639, + parameter_640, + parameter_641, + parameter_642, + parameter_643, + parameter_644, + parameter_645, + parameter_646, + parameter_647, + parameter_648, + parameter_649, + parameter_650, + parameter_651, + parameter_652, + parameter_653, + parameter_654, + parameter_655, + parameter_656, + parameter_657, + parameter_658, + parameter_659, + parameter_660, + parameter_661, + parameter_662, + parameter_663, + parameter_664, + parameter_665, + parameter_666, + parameter_667, + parameter_668, + parameter_669, + parameter_670, + parameter_671, + parameter_672, + parameter_673, + parameter_674, + parameter_675, + parameter_676, + parameter_677, + parameter_678, + parameter_679, + parameter_680, + parameter_681, + parameter_682, + parameter_683, + parameter_684, + parameter_685, + parameter_686, + parameter_687, + parameter_688, + parameter_689, + parameter_690, + parameter_691, + parameter_692, + parameter_693, + parameter_694, + parameter_695, + parameter_696, + parameter_697, + parameter_698, + parameter_699, + parameter_700, + parameter_701, + parameter_702, + parameter_703, + parameter_704, + parameter_705, + parameter_706, + parameter_707, + parameter_708, + parameter_709, + parameter_710, + parameter_711, + parameter_712, + parameter_713, + parameter_714, + parameter_715, + parameter_716, + parameter_717, + parameter_718, + parameter_719, + parameter_720, + parameter_721, + parameter_722, + parameter_723, + parameter_724, + parameter_725, + parameter_726, + parameter_727, + parameter_728, + parameter_729, + parameter_730, + parameter_731, + parameter_732, + parameter_733, + parameter_734, + parameter_735, + parameter_736, + parameter_737, + parameter_738, + parameter_739, + parameter_740, + parameter_741, + parameter_742, + parameter_743, + parameter_744, + parameter_745, + parameter_746, + parameter_747, + parameter_748, + parameter_749, + parameter_750, + parameter_751, + parameter_752, + data_0, + ): + # pd_op.conv2d: (2x32x-1x-1xf32) <- (2x3x-1x-1xf32, 32x3x3x3xf32) + conv2d_0 = paddle._C_ops.conv2d( + data_0, parameter_752, [2, 2], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del data_0, parameter_752 + + # pd_op.batch_norm_: (2x32x-1x-1xf32, 32xf32, 32xf32, 32xf32, 32xf32, -1xui8) <- (2x32x-1x-1xf32, 32xf32, 32xf32, 32xf32, 32xf32) + ( + batch_norm__0, + batch_norm__1, + batch_norm__2, + batch_norm__3, + batch_norm__4, + batch_norm__5, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_0, + parameter_751, + parameter_750, + parameter_749, + parameter_748, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_0, parameter_748, parameter_749, parameter_750, parameter_751 + + # pd_op.swish: (2x32x-1x-1xf32) <- (2x32x-1x-1xf32) + swish_0 = paddle._C_ops.swish(batch_norm__0) + del batch_norm__0 + + # pd_op.conv2d: (2x32x-1x-1xf32) <- (2x32x-1x-1xf32, 32x32x3x3xf32) + conv2d_1 = paddle._C_ops.conv2d( + swish_0, parameter_747, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_747, swish_0 + + # pd_op.batch_norm_: (2x32x-1x-1xf32, 32xf32, 32xf32, 32xf32, 32xf32, -1xui8) <- (2x32x-1x-1xf32, 32xf32, 32xf32, 32xf32, 32xf32) + ( + batch_norm__6, + batch_norm__7, + batch_norm__8, + batch_norm__9, + batch_norm__10, + batch_norm__11, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_1, + parameter_746, + parameter_745, + parameter_744, + parameter_743, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_1, parameter_743, parameter_744, parameter_745, parameter_746 + + # pd_op.swish: (2x32x-1x-1xf32) <- (2x32x-1x-1xf32) + swish_1 = paddle._C_ops.swish(batch_norm__6) + del batch_norm__6 + + # pd_op.conv2d: (2x64x-1x-1xf32) <- (2x32x-1x-1xf32, 64x32x3x3xf32) + conv2d_2 = paddle._C_ops.conv2d( + swish_1, parameter_742, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_742, swish_1 + + # pd_op.batch_norm_: (2x64x-1x-1xf32, 64xf32, 64xf32, 64xf32, 64xf32, -1xui8) <- (2x64x-1x-1xf32, 64xf32, 64xf32, 64xf32, 64xf32) + ( + batch_norm__12, + batch_norm__13, + batch_norm__14, + batch_norm__15, + batch_norm__16, + batch_norm__17, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_2, + parameter_741, + parameter_740, + parameter_739, + parameter_738, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_2, parameter_738, parameter_739, parameter_740, parameter_741 + + # pd_op.swish: (2x64x-1x-1xf32) <- (2x64x-1x-1xf32) + swish_2 = paddle._C_ops.swish(batch_norm__12) + del batch_norm__12 + + # pd_op.conv2d: (2x96x-1x-1xf32) <- (2x64x-1x-1xf32, 96x64x3x3xf32) + conv2d_3 = paddle._C_ops.conv2d( + swish_2, parameter_737, [2, 2], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_737, swish_2 + + # pd_op.batch_norm_: (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__18, + batch_norm__19, + batch_norm__20, + batch_norm__21, + batch_norm__22, + batch_norm__23, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_3, + parameter_736, + parameter_735, + parameter_734, + parameter_733, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_3, parameter_733, parameter_734, parameter_735, parameter_736 + + # pd_op.swish: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32) + swish_3 = paddle._C_ops.swish(batch_norm__18) + del batch_norm__18 + + # pd_op.conv2d: (2x48x-1x-1xf32) <- (2x96x-1x-1xf32, 48x96x1x1xf32) + conv2d_4 = paddle._C_ops.conv2d( + swish_3, parameter_732, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_732 + + # pd_op.batch_norm_: (2x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32, -1xui8) <- (2x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32) + ( + batch_norm__24, + batch_norm__25, + batch_norm__26, + batch_norm__27, + batch_norm__28, + batch_norm__29, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_4, + parameter_731, + parameter_730, + parameter_729, + parameter_728, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_4, parameter_728, parameter_729, parameter_730, parameter_731 + + # pd_op.swish: (2x48x-1x-1xf32) <- (2x48x-1x-1xf32) + swish_4 = paddle._C_ops.swish(batch_norm__24) + del batch_norm__24 + + # pd_op.conv2d: (2x48x-1x-1xf32) <- (2x96x-1x-1xf32, 48x96x1x1xf32) + conv2d_5 = paddle._C_ops.conv2d( + swish_3, parameter_727, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_727, swish_3 + + # pd_op.batch_norm_: (2x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32, -1xui8) <- (2x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32) + ( + batch_norm__30, + batch_norm__31, + batch_norm__32, + batch_norm__33, + batch_norm__34, + batch_norm__35, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_5, + parameter_726, + parameter_725, + parameter_724, + parameter_723, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_5, parameter_723, parameter_724, parameter_725, parameter_726 + + # pd_op.swish: (2x48x-1x-1xf32) <- (2x48x-1x-1xf32) + swish_5 = paddle._C_ops.swish(batch_norm__30) + del batch_norm__30 + + # pd_op.conv2d: (2x48x-1x-1xf32) <- (2x48x-1x-1xf32, 48x48x3x3xf32) + conv2d_6 = paddle._C_ops.conv2d( + swish_5, parameter_722, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_722 + + # pd_op.batch_norm_: (2x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32, -1xui8) <- (2x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32) + ( + batch_norm__36, + batch_norm__37, + batch_norm__38, + batch_norm__39, + batch_norm__40, + batch_norm__41, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_6, + parameter_721, + parameter_720, + parameter_719, + parameter_718, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_6, parameter_718, parameter_719, parameter_720, parameter_721 + + # pd_op.swish: (2x48x-1x-1xf32) <- (2x48x-1x-1xf32) + swish_6 = paddle._C_ops.swish(batch_norm__36) + del batch_norm__36 + + # pd_op.conv2d: (2x48x-1x-1xf32) <- (2x48x-1x-1xf32, 48x48x3x3xf32) + conv2d_7 = paddle._C_ops.conv2d( + swish_6, parameter_717, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_717 + + # pd_op.batch_norm_: (2x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32, -1xui8) <- (2x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32) + ( + batch_norm__42, + batch_norm__43, + batch_norm__44, + batch_norm__45, + batch_norm__46, + batch_norm__47, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_7, + parameter_716, + parameter_715, + parameter_714, + parameter_713, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_7, parameter_713, parameter_714, parameter_715, parameter_716 + + # pd_op.conv2d: (2x48x-1x-1xf32) <- (2x48x-1x-1xf32, 48x48x1x1xf32) + conv2d_8 = paddle._C_ops.conv2d( + swish_6, parameter_712, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_712, swish_6 + + # pd_op.batch_norm_: (2x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32, -1xui8) <- (2x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32) + ( + batch_norm__48, + batch_norm__49, + batch_norm__50, + batch_norm__51, + batch_norm__52, + batch_norm__53, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_8, + parameter_711, + parameter_710, + parameter_709, + parameter_708, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_8, parameter_708, parameter_709, parameter_710, parameter_711 + + # pd_op.add: (2x48x-1x-1xf32) <- (2x48x-1x-1xf32, 2x48x-1x-1xf32) + add_0 = paddle._C_ops.add(batch_norm__42, batch_norm__48) + del batch_norm__42, batch_norm__48 + + # pd_op.swish: (2x48x-1x-1xf32) <- (2x48x-1x-1xf32) + swish_7 = paddle._C_ops.swish(add_0) + del add_0 + + # pd_op.add: (2x48x-1x-1xf32) <- (2x48x-1x-1xf32, 2x48x-1x-1xf32) + add_1 = paddle._C_ops.add(swish_5, swish_7) + del swish_5, swish_7 + + # pd_op.conv2d: (2x48x-1x-1xf32) <- (2x48x-1x-1xf32, 48x48x3x3xf32) + conv2d_9 = paddle._C_ops.conv2d( + add_1, parameter_707, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_707 + + # pd_op.batch_norm_: (2x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32, -1xui8) <- (2x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32) + ( + batch_norm__54, + batch_norm__55, + batch_norm__56, + batch_norm__57, + batch_norm__58, + batch_norm__59, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_9, + parameter_706, + parameter_705, + parameter_704, + parameter_703, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_9, parameter_703, parameter_704, parameter_705, parameter_706 + + # pd_op.swish: (2x48x-1x-1xf32) <- (2x48x-1x-1xf32) + swish_8 = paddle._C_ops.swish(batch_norm__54) + del batch_norm__54 + + # pd_op.conv2d: (2x48x-1x-1xf32) <- (2x48x-1x-1xf32, 48x48x3x3xf32) + conv2d_10 = paddle._C_ops.conv2d( + swish_8, parameter_702, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_702 + + # pd_op.batch_norm_: (2x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32, -1xui8) <- (2x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32) + ( + batch_norm__60, + batch_norm__61, + batch_norm__62, + batch_norm__63, + batch_norm__64, + batch_norm__65, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_10, + parameter_701, + parameter_700, + parameter_699, + parameter_698, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_10, parameter_698, parameter_699, parameter_700, parameter_701 + + # pd_op.conv2d: (2x48x-1x-1xf32) <- (2x48x-1x-1xf32, 48x48x1x1xf32) + conv2d_11 = paddle._C_ops.conv2d( + swish_8, parameter_697, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_697, swish_8 + + # pd_op.batch_norm_: (2x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32, -1xui8) <- (2x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32) + ( + batch_norm__66, + batch_norm__67, + batch_norm__68, + batch_norm__69, + batch_norm__70, + batch_norm__71, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_11, + parameter_696, + parameter_695, + parameter_694, + parameter_693, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_11, parameter_693, parameter_694, parameter_695, parameter_696 + + # pd_op.add: (2x48x-1x-1xf32) <- (2x48x-1x-1xf32, 2x48x-1x-1xf32) + add_2 = paddle._C_ops.add(batch_norm__60, batch_norm__66) + del batch_norm__60, batch_norm__66 + + # pd_op.swish: (2x48x-1x-1xf32) <- (2x48x-1x-1xf32) + swish_9 = paddle._C_ops.swish(add_2) + del add_2 + + # pd_op.add: (2x48x-1x-1xf32) <- (2x48x-1x-1xf32, 2x48x-1x-1xf32) + add_3 = paddle._C_ops.add(add_1, swish_9) + del add_1, swish_9 + + # pd_op.conv2d: (2x48x-1x-1xf32) <- (2x48x-1x-1xf32, 48x48x3x3xf32) + conv2d_12 = paddle._C_ops.conv2d( + add_3, parameter_692, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_692 + + # pd_op.batch_norm_: (2x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32, -1xui8) <- (2x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32) + ( + batch_norm__72, + batch_norm__73, + batch_norm__74, + batch_norm__75, + batch_norm__76, + batch_norm__77, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_12, + parameter_691, + parameter_690, + parameter_689, + parameter_688, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_12, parameter_688, parameter_689, parameter_690, parameter_691 + + # pd_op.swish: (2x48x-1x-1xf32) <- (2x48x-1x-1xf32) + swish_10 = paddle._C_ops.swish(batch_norm__72) + del batch_norm__72 + + # pd_op.conv2d: (2x48x-1x-1xf32) <- (2x48x-1x-1xf32, 48x48x3x3xf32) + conv2d_13 = paddle._C_ops.conv2d( + swish_10, parameter_687, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_687 + + # pd_op.batch_norm_: (2x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32, -1xui8) <- (2x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32) + ( + batch_norm__78, + batch_norm__79, + batch_norm__80, + batch_norm__81, + batch_norm__82, + batch_norm__83, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_13, + parameter_686, + parameter_685, + parameter_684, + parameter_683, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_13, parameter_683, parameter_684, parameter_685, parameter_686 + + # pd_op.conv2d: (2x48x-1x-1xf32) <- (2x48x-1x-1xf32, 48x48x1x1xf32) + conv2d_14 = paddle._C_ops.conv2d( + swish_10, parameter_682, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_682, swish_10 + + # pd_op.batch_norm_: (2x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32, -1xui8) <- (2x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32) + ( + batch_norm__84, + batch_norm__85, + batch_norm__86, + batch_norm__87, + batch_norm__88, + batch_norm__89, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_14, + parameter_681, + parameter_680, + parameter_679, + parameter_678, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_14, parameter_678, parameter_679, parameter_680, parameter_681 + + # pd_op.add: (2x48x-1x-1xf32) <- (2x48x-1x-1xf32, 2x48x-1x-1xf32) + add_4 = paddle._C_ops.add(batch_norm__78, batch_norm__84) + del batch_norm__78, batch_norm__84 + + # pd_op.swish: (2x48x-1x-1xf32) <- (2x48x-1x-1xf32) + swish_11 = paddle._C_ops.swish(add_4) + del add_4 + + # pd_op.add: (2x48x-1x-1xf32) <- (2x48x-1x-1xf32, 2x48x-1x-1xf32) + add_5 = paddle._C_ops.add(add_3, swish_11) + del add_3, swish_11 + + # pd_op.full: (1xi32) <- () + full_0 = paddle._C_ops.full( + [1], float("1"), paddle.int32, paddle.core.CPUPlace() + ) + + # builtin.combine: ([2x48x-1x-1xf32, 2x48x-1x-1xf32]) <- (2x48x-1x-1xf32, 2x48x-1x-1xf32) + combine_0 = [swish_4, add_5] + del add_5, swish_4 + + # pd_op.concat: (2x96x-1x-1xf32) <- ([2x48x-1x-1xf32, 2x48x-1x-1xf32], 1xi32) + concat_2 = paddle._C_ops.concat(combine_0, full_0) + del combine_0 + + # pd_op.full_int_array: (2xi64) <- () + full_int_array_0 = [2, 3] + + # pd_op.mean: (2x96x1x1xf32) <- (2x96x-1x-1xf32, 2xi64) + mean_0 = paddle._C_ops.mean(concat_2, full_int_array_0, True) + + # pd_op.conv2d: (2x96x1x1xf32) <- (2x96x1x1xf32, 96x96x1x1xf32) + conv2d_15 = paddle._C_ops.conv2d( + mean_0, parameter_677, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del mean_0, parameter_677 + + # pd_op.full_int_array: (4xi64) <- () + full_int_array_1 = [1, -1, 1, 1] + + # pd_op.reshape: (1x96x1x1xf32) <- (96xf32, 4xi64) + reshape_0 = paddle._C_ops.reshape(parameter_676, full_int_array_1) + del parameter_676 + + # pd_op.add: (2x96x1x1xf32) <- (2x96x1x1xf32, 1x96x1x1xf32) + add_6 = paddle._C_ops.add(conv2d_15, reshape_0) + del conv2d_15, reshape_0 + + # pd_op.hardsigmoid: (2x96x1x1xf32) <- (2x96x1x1xf32) + hardsigmoid_0 = paddle._C_ops.hardsigmoid( + add_6, float("0.166667"), float("0.5") + ) + del add_6 + + # pd_op.multiply: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, 2x96x1x1xf32) + multiply_0 = paddle._C_ops.multiply(concat_2, hardsigmoid_0) + del concat_2, hardsigmoid_0 + + # pd_op.conv2d: (2x128x-1x-1xf32) <- (2x96x-1x-1xf32, 128x96x1x1xf32) + conv2d_16 = paddle._C_ops.conv2d( + multiply_0, parameter_675, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del multiply_0, parameter_675 + + # pd_op.batch_norm_: (2x128x-1x-1xf32, 128xf32, 128xf32, 128xf32, 128xf32, -1xui8) <- (2x128x-1x-1xf32, 128xf32, 128xf32, 128xf32, 128xf32) + ( + batch_norm__90, + batch_norm__91, + batch_norm__92, + batch_norm__93, + batch_norm__94, + batch_norm__95, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_16, + parameter_674, + parameter_673, + parameter_672, + parameter_671, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_16, parameter_671, parameter_672, parameter_673, parameter_674 + + # pd_op.swish: (2x128x-1x-1xf32) <- (2x128x-1x-1xf32) + swish_12 = paddle._C_ops.swish(batch_norm__90) + del batch_norm__90 + + # pd_op.conv2d: (2x192x-1x-1xf32) <- (2x128x-1x-1xf32, 192x128x3x3xf32) + conv2d_17 = paddle._C_ops.conv2d( + swish_12, parameter_670, [2, 2], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_670, swish_12 + + # pd_op.batch_norm_: (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__96, + batch_norm__97, + batch_norm__98, + batch_norm__99, + batch_norm__100, + batch_norm__101, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_17, + parameter_669, + parameter_668, + parameter_667, + parameter_666, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_17, parameter_666, parameter_667, parameter_668, parameter_669 + + # pd_op.swish: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32) + swish_13 = paddle._C_ops.swish(batch_norm__96) + del batch_norm__96 + + # pd_op.conv2d: (2x96x-1x-1xf32) <- (2x192x-1x-1xf32, 96x192x1x1xf32) + conv2d_18 = paddle._C_ops.conv2d( + swish_13, parameter_665, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_665 + + # pd_op.batch_norm_: (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__102, + batch_norm__103, + batch_norm__104, + batch_norm__105, + batch_norm__106, + batch_norm__107, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_18, + parameter_664, + parameter_663, + parameter_662, + parameter_661, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_18, parameter_661, parameter_662, parameter_663, parameter_664 + + # pd_op.swish: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32) + swish_14 = paddle._C_ops.swish(batch_norm__102) + del batch_norm__102 + + # pd_op.conv2d: (2x96x-1x-1xf32) <- (2x192x-1x-1xf32, 96x192x1x1xf32) + conv2d_19 = paddle._C_ops.conv2d( + swish_13, parameter_660, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_660, swish_13 + + # pd_op.batch_norm_: (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__108, + batch_norm__109, + batch_norm__110, + batch_norm__111, + batch_norm__112, + batch_norm__113, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_19, + parameter_659, + parameter_658, + parameter_657, + parameter_656, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_19, parameter_656, parameter_657, parameter_658, parameter_659 + + # pd_op.swish: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32) + swish_15 = paddle._C_ops.swish(batch_norm__108) + del batch_norm__108 + + # pd_op.conv2d: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, 96x96x3x3xf32) + conv2d_20 = paddle._C_ops.conv2d( + swish_15, parameter_655, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_655 + + # pd_op.batch_norm_: (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__114, + batch_norm__115, + batch_norm__116, + batch_norm__117, + batch_norm__118, + batch_norm__119, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_20, + parameter_654, + parameter_653, + parameter_652, + parameter_651, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_20, parameter_651, parameter_652, parameter_653, parameter_654 + + # pd_op.swish: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32) + swish_16 = paddle._C_ops.swish(batch_norm__114) + del batch_norm__114 + + # pd_op.conv2d: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, 96x96x3x3xf32) + conv2d_21 = paddle._C_ops.conv2d( + swish_16, parameter_650, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_650 + + # pd_op.batch_norm_: (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__120, + batch_norm__121, + batch_norm__122, + batch_norm__123, + batch_norm__124, + batch_norm__125, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_21, + parameter_649, + parameter_648, + parameter_647, + parameter_646, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_21, parameter_646, parameter_647, parameter_648, parameter_649 + + # pd_op.conv2d: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, 96x96x1x1xf32) + conv2d_22 = paddle._C_ops.conv2d( + swish_16, parameter_645, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_645, swish_16 + + # pd_op.batch_norm_: (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__126, + batch_norm__127, + batch_norm__128, + batch_norm__129, + batch_norm__130, + batch_norm__131, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_22, + parameter_644, + parameter_643, + parameter_642, + parameter_641, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_22, parameter_641, parameter_642, parameter_643, parameter_644 + + # pd_op.add: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, 2x96x-1x-1xf32) + add_7 = paddle._C_ops.add(batch_norm__120, batch_norm__126) + del batch_norm__120, batch_norm__126 + + # pd_op.swish: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32) + swish_17 = paddle._C_ops.swish(add_7) + del add_7 + + # pd_op.add: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, 2x96x-1x-1xf32) + add_8 = paddle._C_ops.add(swish_15, swish_17) + del swish_15, swish_17 + + # pd_op.conv2d: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, 96x96x3x3xf32) + conv2d_23 = paddle._C_ops.conv2d( + add_8, parameter_640, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_640 + + # pd_op.batch_norm_: (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__132, + batch_norm__133, + batch_norm__134, + batch_norm__135, + batch_norm__136, + batch_norm__137, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_23, + parameter_639, + parameter_638, + parameter_637, + parameter_636, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_23, parameter_636, parameter_637, parameter_638, parameter_639 + + # pd_op.swish: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32) + swish_18 = paddle._C_ops.swish(batch_norm__132) + del batch_norm__132 + + # pd_op.conv2d: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, 96x96x3x3xf32) + conv2d_24 = paddle._C_ops.conv2d( + swish_18, parameter_635, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_635 + + # pd_op.batch_norm_: (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__138, + batch_norm__139, + batch_norm__140, + batch_norm__141, + batch_norm__142, + batch_norm__143, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_24, + parameter_634, + parameter_633, + parameter_632, + parameter_631, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_24, parameter_631, parameter_632, parameter_633, parameter_634 + + # pd_op.conv2d: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, 96x96x1x1xf32) + conv2d_25 = paddle._C_ops.conv2d( + swish_18, parameter_630, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_630, swish_18 + + # pd_op.batch_norm_: (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__144, + batch_norm__145, + batch_norm__146, + batch_norm__147, + batch_norm__148, + batch_norm__149, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_25, + parameter_629, + parameter_628, + parameter_627, + parameter_626, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_25, parameter_626, parameter_627, parameter_628, parameter_629 + + # pd_op.add: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, 2x96x-1x-1xf32) + add_9 = paddle._C_ops.add(batch_norm__138, batch_norm__144) + del batch_norm__138, batch_norm__144 + + # pd_op.swish: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32) + swish_19 = paddle._C_ops.swish(add_9) + del add_9 + + # pd_op.add: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, 2x96x-1x-1xf32) + add_10 = paddle._C_ops.add(add_8, swish_19) + del add_8, swish_19 + + # pd_op.conv2d: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, 96x96x3x3xf32) + conv2d_26 = paddle._C_ops.conv2d( + add_10, parameter_625, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_625 + + # pd_op.batch_norm_: (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__150, + batch_norm__151, + batch_norm__152, + batch_norm__153, + batch_norm__154, + batch_norm__155, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_26, + parameter_624, + parameter_623, + parameter_622, + parameter_621, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_26, parameter_621, parameter_622, parameter_623, parameter_624 + + # pd_op.swish: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32) + swish_20 = paddle._C_ops.swish(batch_norm__150) + del batch_norm__150 + + # pd_op.conv2d: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, 96x96x3x3xf32) + conv2d_27 = paddle._C_ops.conv2d( + swish_20, parameter_620, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_620 + + # pd_op.batch_norm_: (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__156, + batch_norm__157, + batch_norm__158, + batch_norm__159, + batch_norm__160, + batch_norm__161, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_27, + parameter_619, + parameter_618, + parameter_617, + parameter_616, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_27, parameter_616, parameter_617, parameter_618, parameter_619 + + # pd_op.conv2d: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, 96x96x1x1xf32) + conv2d_28 = paddle._C_ops.conv2d( + swish_20, parameter_615, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_615, swish_20 + + # pd_op.batch_norm_: (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__162, + batch_norm__163, + batch_norm__164, + batch_norm__165, + batch_norm__166, + batch_norm__167, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_28, + parameter_614, + parameter_613, + parameter_612, + parameter_611, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_28, parameter_611, parameter_612, parameter_613, parameter_614 + + # pd_op.add: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, 2x96x-1x-1xf32) + add_11 = paddle._C_ops.add(batch_norm__156, batch_norm__162) + del batch_norm__156, batch_norm__162 + + # pd_op.swish: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32) + swish_21 = paddle._C_ops.swish(add_11) + del add_11 + + # pd_op.add: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, 2x96x-1x-1xf32) + add_12 = paddle._C_ops.add(add_10, swish_21) + del add_10, swish_21 + + # pd_op.conv2d: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, 96x96x3x3xf32) + conv2d_29 = paddle._C_ops.conv2d( + add_12, parameter_610, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_610 + + # pd_op.batch_norm_: (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__168, + batch_norm__169, + batch_norm__170, + batch_norm__171, + batch_norm__172, + batch_norm__173, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_29, + parameter_609, + parameter_608, + parameter_607, + parameter_606, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_29, parameter_606, parameter_607, parameter_608, parameter_609 + + # pd_op.swish: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32) + swish_22 = paddle._C_ops.swish(batch_norm__168) + del batch_norm__168 + + # pd_op.conv2d: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, 96x96x3x3xf32) + conv2d_30 = paddle._C_ops.conv2d( + swish_22, parameter_605, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_605 + + # pd_op.batch_norm_: (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__174, + batch_norm__175, + batch_norm__176, + batch_norm__177, + batch_norm__178, + batch_norm__179, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_30, + parameter_604, + parameter_603, + parameter_602, + parameter_601, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_30, parameter_601, parameter_602, parameter_603, parameter_604 + + # pd_op.conv2d: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, 96x96x1x1xf32) + conv2d_31 = paddle._C_ops.conv2d( + swish_22, parameter_600, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_600, swish_22 + + # pd_op.batch_norm_: (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__180, + batch_norm__181, + batch_norm__182, + batch_norm__183, + batch_norm__184, + batch_norm__185, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_31, + parameter_599, + parameter_598, + parameter_597, + parameter_596, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_31, parameter_596, parameter_597, parameter_598, parameter_599 + + # pd_op.add: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, 2x96x-1x-1xf32) + add_13 = paddle._C_ops.add(batch_norm__174, batch_norm__180) + del batch_norm__174, batch_norm__180 + + # pd_op.swish: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32) + swish_23 = paddle._C_ops.swish(add_13) + del add_13 + + # pd_op.add: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, 2x96x-1x-1xf32) + add_14 = paddle._C_ops.add(add_12, swish_23) + del add_12, swish_23 + + # pd_op.conv2d: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, 96x96x3x3xf32) + conv2d_32 = paddle._C_ops.conv2d( + add_14, parameter_595, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_595 + + # pd_op.batch_norm_: (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__186, + batch_norm__187, + batch_norm__188, + batch_norm__189, + batch_norm__190, + batch_norm__191, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_32, + parameter_594, + parameter_593, + parameter_592, + parameter_591, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_32, parameter_591, parameter_592, parameter_593, parameter_594 + + # pd_op.swish: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32) + swish_24 = paddle._C_ops.swish(batch_norm__186) + del batch_norm__186 + + # pd_op.conv2d: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, 96x96x3x3xf32) + conv2d_33 = paddle._C_ops.conv2d( + swish_24, parameter_590, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_590 + + # pd_op.batch_norm_: (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__192, + batch_norm__193, + batch_norm__194, + batch_norm__195, + batch_norm__196, + batch_norm__197, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_33, + parameter_589, + parameter_588, + parameter_587, + parameter_586, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_33, parameter_586, parameter_587, parameter_588, parameter_589 + + # pd_op.conv2d: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, 96x96x1x1xf32) + conv2d_34 = paddle._C_ops.conv2d( + swish_24, parameter_585, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_585, swish_24 + + # pd_op.batch_norm_: (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__198, + batch_norm__199, + batch_norm__200, + batch_norm__201, + batch_norm__202, + batch_norm__203, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_34, + parameter_584, + parameter_583, + parameter_582, + parameter_581, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_34, parameter_581, parameter_582, parameter_583, parameter_584 + + # pd_op.add: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, 2x96x-1x-1xf32) + add_15 = paddle._C_ops.add(batch_norm__192, batch_norm__198) + del batch_norm__192, batch_norm__198 + + # pd_op.swish: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32) + swish_25 = paddle._C_ops.swish(add_15) + del add_15 + + # pd_op.add: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, 2x96x-1x-1xf32) + add_16 = paddle._C_ops.add(add_14, swish_25) + del add_14, swish_25 + + # pd_op.conv2d: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, 96x96x3x3xf32) + conv2d_35 = paddle._C_ops.conv2d( + add_16, parameter_580, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_580 + + # pd_op.batch_norm_: (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__204, + batch_norm__205, + batch_norm__206, + batch_norm__207, + batch_norm__208, + batch_norm__209, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_35, + parameter_579, + parameter_578, + parameter_577, + parameter_576, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_35, parameter_576, parameter_577, parameter_578, parameter_579 + + # pd_op.swish: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32) + swish_26 = paddle._C_ops.swish(batch_norm__204) + del batch_norm__204 + + # pd_op.conv2d: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, 96x96x3x3xf32) + conv2d_36 = paddle._C_ops.conv2d( + swish_26, parameter_575, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_575 + + # pd_op.batch_norm_: (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__210, + batch_norm__211, + batch_norm__212, + batch_norm__213, + batch_norm__214, + batch_norm__215, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_36, + parameter_574, + parameter_573, + parameter_572, + parameter_571, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_36, parameter_571, parameter_572, parameter_573, parameter_574 + + # pd_op.conv2d: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, 96x96x1x1xf32) + conv2d_37 = paddle._C_ops.conv2d( + swish_26, parameter_570, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_570, swish_26 + + # pd_op.batch_norm_: (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__216, + batch_norm__217, + batch_norm__218, + batch_norm__219, + batch_norm__220, + batch_norm__221, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_37, + parameter_569, + parameter_568, + parameter_567, + parameter_566, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_37, parameter_566, parameter_567, parameter_568, parameter_569 + + # pd_op.add: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, 2x96x-1x-1xf32) + add_17 = paddle._C_ops.add(batch_norm__210, batch_norm__216) + del batch_norm__210, batch_norm__216 + + # pd_op.swish: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32) + swish_27 = paddle._C_ops.swish(add_17) + del add_17 + + # pd_op.add: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, 2x96x-1x-1xf32) + add_18 = paddle._C_ops.add(add_16, swish_27) + del add_16, swish_27 + + # builtin.combine: ([2x96x-1x-1xf32, 2x96x-1x-1xf32]) <- (2x96x-1x-1xf32, 2x96x-1x-1xf32) + combine_1 = [swish_14, add_18] + del add_18, swish_14 + + # pd_op.concat: (2x192x-1x-1xf32) <- ([2x96x-1x-1xf32, 2x96x-1x-1xf32], 1xi32) + concat_3 = paddle._C_ops.concat(combine_1, full_0) + del combine_1 + + # pd_op.mean: (2x192x1x1xf32) <- (2x192x-1x-1xf32, 2xi64) + mean_1 = paddle._C_ops.mean(concat_3, full_int_array_0, True) + + # pd_op.conv2d: (2x192x1x1xf32) <- (2x192x1x1xf32, 192x192x1x1xf32) + conv2d_38 = paddle._C_ops.conv2d( + mean_1, parameter_565, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del mean_1, parameter_565 + + # pd_op.reshape: (1x192x1x1xf32) <- (192xf32, 4xi64) + reshape_1 = paddle._C_ops.reshape(parameter_564, full_int_array_1) + del parameter_564 + + # pd_op.add: (2x192x1x1xf32) <- (2x192x1x1xf32, 1x192x1x1xf32) + add_19 = paddle._C_ops.add(conv2d_38, reshape_1) + del conv2d_38, reshape_1 + + # pd_op.hardsigmoid: (2x192x1x1xf32) <- (2x192x1x1xf32) + hardsigmoid_1 = paddle._C_ops.hardsigmoid( + add_19, float("0.166667"), float("0.5") + ) + del add_19 + + # pd_op.multiply: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 2x192x1x1xf32) + multiply_1 = paddle._C_ops.multiply(concat_3, hardsigmoid_1) + del concat_3, hardsigmoid_1 + + # pd_op.conv2d: (2x256x-1x-1xf32) <- (2x192x-1x-1xf32, 256x192x1x1xf32) + conv2d_39 = paddle._C_ops.conv2d( + multiply_1, parameter_563, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del multiply_1, parameter_563 + + # pd_op.batch_norm_: (2x256x-1x-1xf32, 256xf32, 256xf32, 256xf32, 256xf32, -1xui8) <- (2x256x-1x-1xf32, 256xf32, 256xf32, 256xf32, 256xf32) + ( + batch_norm__222, + batch_norm__223, + batch_norm__224, + batch_norm__225, + batch_norm__226, + batch_norm__227, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_39, + parameter_562, + parameter_561, + parameter_560, + parameter_559, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_39, parameter_559, parameter_560, parameter_561, parameter_562 + + # pd_op.swish: (2x256x-1x-1xf32) <- (2x256x-1x-1xf32) + swish_28 = paddle._C_ops.swish(batch_norm__222) + del batch_norm__222 + + # pd_op.conv2d: (2x384x-1x-1xf32) <- (2x256x-1x-1xf32, 384x256x3x3xf32) + conv2d_40 = paddle._C_ops.conv2d( + swish_28, parameter_558, [2, 2], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_558 + + # pd_op.batch_norm_: (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__228, + batch_norm__229, + batch_norm__230, + batch_norm__231, + batch_norm__232, + batch_norm__233, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_40, + parameter_557, + parameter_556, + parameter_555, + parameter_554, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_40, parameter_554, parameter_555, parameter_556, parameter_557 + + # pd_op.swish: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32) + swish_29 = paddle._C_ops.swish(batch_norm__228) + del batch_norm__228 + + # pd_op.conv2d: (2x192x-1x-1xf32) <- (2x384x-1x-1xf32, 192x384x1x1xf32) + conv2d_41 = paddle._C_ops.conv2d( + swish_29, parameter_553, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_553 + + # pd_op.batch_norm_: (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__234, + batch_norm__235, + batch_norm__236, + batch_norm__237, + batch_norm__238, + batch_norm__239, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_41, + parameter_552, + parameter_551, + parameter_550, + parameter_549, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_41, parameter_549, parameter_550, parameter_551, parameter_552 + + # pd_op.swish: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32) + swish_30 = paddle._C_ops.swish(batch_norm__234) + del batch_norm__234 + + # pd_op.conv2d: (2x192x-1x-1xf32) <- (2x384x-1x-1xf32, 192x384x1x1xf32) + conv2d_42 = paddle._C_ops.conv2d( + swish_29, parameter_548, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_548, swish_29 + + # pd_op.batch_norm_: (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__240, + batch_norm__241, + batch_norm__242, + batch_norm__243, + batch_norm__244, + batch_norm__245, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_42, + parameter_547, + parameter_546, + parameter_545, + parameter_544, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_42, parameter_544, parameter_545, parameter_546, parameter_547 + + # pd_op.swish: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32) + swish_31 = paddle._C_ops.swish(batch_norm__240) + del batch_norm__240 + + # pd_op.conv2d: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 192x192x3x3xf32) + conv2d_43 = paddle._C_ops.conv2d( + swish_31, parameter_543, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_543 + + # pd_op.batch_norm_: (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__246, + batch_norm__247, + batch_norm__248, + batch_norm__249, + batch_norm__250, + batch_norm__251, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_43, + parameter_542, + parameter_541, + parameter_540, + parameter_539, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_43, parameter_539, parameter_540, parameter_541, parameter_542 + + # pd_op.swish: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32) + swish_32 = paddle._C_ops.swish(batch_norm__246) + del batch_norm__246 + + # pd_op.conv2d: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 192x192x3x3xf32) + conv2d_44 = paddle._C_ops.conv2d( + swish_32, parameter_538, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_538 + + # pd_op.batch_norm_: (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__252, + batch_norm__253, + batch_norm__254, + batch_norm__255, + batch_norm__256, + batch_norm__257, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_44, + parameter_537, + parameter_536, + parameter_535, + parameter_534, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_44, parameter_534, parameter_535, parameter_536, parameter_537 + + # pd_op.conv2d: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 192x192x1x1xf32) + conv2d_45 = paddle._C_ops.conv2d( + swish_32, parameter_533, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_533, swish_32 + + # pd_op.batch_norm_: (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__258, + batch_norm__259, + batch_norm__260, + batch_norm__261, + batch_norm__262, + batch_norm__263, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_45, + parameter_532, + parameter_531, + parameter_530, + parameter_529, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_45, parameter_529, parameter_530, parameter_531, parameter_532 + + # pd_op.add: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 2x192x-1x-1xf32) + add_20 = paddle._C_ops.add(batch_norm__252, batch_norm__258) + del batch_norm__252, batch_norm__258 + + # pd_op.swish: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32) + swish_33 = paddle._C_ops.swish(add_20) + del add_20 + + # pd_op.add: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 2x192x-1x-1xf32) + add_21 = paddle._C_ops.add(swish_31, swish_33) + del swish_31, swish_33 + + # pd_op.conv2d: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 192x192x3x3xf32) + conv2d_46 = paddle._C_ops.conv2d( + add_21, parameter_528, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_528 + + # pd_op.batch_norm_: (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__264, + batch_norm__265, + batch_norm__266, + batch_norm__267, + batch_norm__268, + batch_norm__269, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_46, + parameter_527, + parameter_526, + parameter_525, + parameter_524, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_46, parameter_524, parameter_525, parameter_526, parameter_527 + + # pd_op.swish: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32) + swish_34 = paddle._C_ops.swish(batch_norm__264) + del batch_norm__264 + + # pd_op.conv2d: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 192x192x3x3xf32) + conv2d_47 = paddle._C_ops.conv2d( + swish_34, parameter_523, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_523 + + # pd_op.batch_norm_: (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__270, + batch_norm__271, + batch_norm__272, + batch_norm__273, + batch_norm__274, + batch_norm__275, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_47, + parameter_522, + parameter_521, + parameter_520, + parameter_519, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_47, parameter_519, parameter_520, parameter_521, parameter_522 + + # pd_op.conv2d: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 192x192x1x1xf32) + conv2d_48 = paddle._C_ops.conv2d( + swish_34, parameter_518, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_518, swish_34 + + # pd_op.batch_norm_: (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__276, + batch_norm__277, + batch_norm__278, + batch_norm__279, + batch_norm__280, + batch_norm__281, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_48, + parameter_517, + parameter_516, + parameter_515, + parameter_514, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_48, parameter_514, parameter_515, parameter_516, parameter_517 + + # pd_op.add: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 2x192x-1x-1xf32) + add_22 = paddle._C_ops.add(batch_norm__270, batch_norm__276) + del batch_norm__270, batch_norm__276 + + # pd_op.swish: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32) + swish_35 = paddle._C_ops.swish(add_22) + del add_22 + + # pd_op.add: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 2x192x-1x-1xf32) + add_23 = paddle._C_ops.add(add_21, swish_35) + del add_21, swish_35 + + # pd_op.conv2d: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 192x192x3x3xf32) + conv2d_49 = paddle._C_ops.conv2d( + add_23, parameter_513, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_513 + + # pd_op.batch_norm_: (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__282, + batch_norm__283, + batch_norm__284, + batch_norm__285, + batch_norm__286, + batch_norm__287, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_49, + parameter_512, + parameter_511, + parameter_510, + parameter_509, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_49, parameter_509, parameter_510, parameter_511, parameter_512 + + # pd_op.swish: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32) + swish_36 = paddle._C_ops.swish(batch_norm__282) + del batch_norm__282 + + # pd_op.conv2d: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 192x192x3x3xf32) + conv2d_50 = paddle._C_ops.conv2d( + swish_36, parameter_508, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_508 + + # pd_op.batch_norm_: (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__288, + batch_norm__289, + batch_norm__290, + batch_norm__291, + batch_norm__292, + batch_norm__293, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_50, + parameter_507, + parameter_506, + parameter_505, + parameter_504, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_50, parameter_504, parameter_505, parameter_506, parameter_507 + + # pd_op.conv2d: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 192x192x1x1xf32) + conv2d_51 = paddle._C_ops.conv2d( + swish_36, parameter_503, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_503, swish_36 + + # pd_op.batch_norm_: (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__294, + batch_norm__295, + batch_norm__296, + batch_norm__297, + batch_norm__298, + batch_norm__299, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_51, + parameter_502, + parameter_501, + parameter_500, + parameter_499, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_51, parameter_499, parameter_500, parameter_501, parameter_502 + + # pd_op.add: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 2x192x-1x-1xf32) + add_24 = paddle._C_ops.add(batch_norm__288, batch_norm__294) + del batch_norm__288, batch_norm__294 + + # pd_op.swish: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32) + swish_37 = paddle._C_ops.swish(add_24) + del add_24 + + # pd_op.add: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 2x192x-1x-1xf32) + add_25 = paddle._C_ops.add(add_23, swish_37) + del add_23, swish_37 + + # pd_op.conv2d: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 192x192x3x3xf32) + conv2d_52 = paddle._C_ops.conv2d( + add_25, parameter_498, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_498 + + # pd_op.batch_norm_: (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__300, + batch_norm__301, + batch_norm__302, + batch_norm__303, + batch_norm__304, + batch_norm__305, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_52, + parameter_497, + parameter_496, + parameter_495, + parameter_494, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_52, parameter_494, parameter_495, parameter_496, parameter_497 + + # pd_op.swish: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32) + swish_38 = paddle._C_ops.swish(batch_norm__300) + del batch_norm__300 + + # pd_op.conv2d: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 192x192x3x3xf32) + conv2d_53 = paddle._C_ops.conv2d( + swish_38, parameter_493, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_493 + + # pd_op.batch_norm_: (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__306, + batch_norm__307, + batch_norm__308, + batch_norm__309, + batch_norm__310, + batch_norm__311, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_53, + parameter_492, + parameter_491, + parameter_490, + parameter_489, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_53, parameter_489, parameter_490, parameter_491, parameter_492 + + # pd_op.conv2d: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 192x192x1x1xf32) + conv2d_54 = paddle._C_ops.conv2d( + swish_38, parameter_488, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_488, swish_38 + + # pd_op.batch_norm_: (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__312, + batch_norm__313, + batch_norm__314, + batch_norm__315, + batch_norm__316, + batch_norm__317, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_54, + parameter_487, + parameter_486, + parameter_485, + parameter_484, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_54, parameter_484, parameter_485, parameter_486, parameter_487 + + # pd_op.add: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 2x192x-1x-1xf32) + add_26 = paddle._C_ops.add(batch_norm__306, batch_norm__312) + del batch_norm__306, batch_norm__312 + + # pd_op.swish: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32) + swish_39 = paddle._C_ops.swish(add_26) + del add_26 + + # pd_op.add: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 2x192x-1x-1xf32) + add_27 = paddle._C_ops.add(add_25, swish_39) + del add_25, swish_39 + + # pd_op.conv2d: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 192x192x3x3xf32) + conv2d_55 = paddle._C_ops.conv2d( + add_27, parameter_483, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_483 + + # pd_op.batch_norm_: (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__318, + batch_norm__319, + batch_norm__320, + batch_norm__321, + batch_norm__322, + batch_norm__323, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_55, + parameter_482, + parameter_481, + parameter_480, + parameter_479, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_55, parameter_479, parameter_480, parameter_481, parameter_482 + + # pd_op.swish: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32) + swish_40 = paddle._C_ops.swish(batch_norm__318) + del batch_norm__318 + + # pd_op.conv2d: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 192x192x3x3xf32) + conv2d_56 = paddle._C_ops.conv2d( + swish_40, parameter_478, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_478 + + # pd_op.batch_norm_: (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__324, + batch_norm__325, + batch_norm__326, + batch_norm__327, + batch_norm__328, + batch_norm__329, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_56, + parameter_477, + parameter_476, + parameter_475, + parameter_474, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_56, parameter_474, parameter_475, parameter_476, parameter_477 + + # pd_op.conv2d: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 192x192x1x1xf32) + conv2d_57 = paddle._C_ops.conv2d( + swish_40, parameter_473, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_473, swish_40 + + # pd_op.batch_norm_: (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__330, + batch_norm__331, + batch_norm__332, + batch_norm__333, + batch_norm__334, + batch_norm__335, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_57, + parameter_472, + parameter_471, + parameter_470, + parameter_469, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_57, parameter_469, parameter_470, parameter_471, parameter_472 + + # pd_op.add: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 2x192x-1x-1xf32) + add_28 = paddle._C_ops.add(batch_norm__324, batch_norm__330) + del batch_norm__324, batch_norm__330 + + # pd_op.swish: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32) + swish_41 = paddle._C_ops.swish(add_28) + del add_28 + + # pd_op.add: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 2x192x-1x-1xf32) + add_29 = paddle._C_ops.add(add_27, swish_41) + del add_27, swish_41 + + # pd_op.conv2d: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 192x192x3x3xf32) + conv2d_58 = paddle._C_ops.conv2d( + add_29, parameter_468, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_468 + + # pd_op.batch_norm_: (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__336, + batch_norm__337, + batch_norm__338, + batch_norm__339, + batch_norm__340, + batch_norm__341, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_58, + parameter_467, + parameter_466, + parameter_465, + parameter_464, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_58, parameter_464, parameter_465, parameter_466, parameter_467 + + # pd_op.swish: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32) + swish_42 = paddle._C_ops.swish(batch_norm__336) + del batch_norm__336 + + # pd_op.conv2d: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 192x192x3x3xf32) + conv2d_59 = paddle._C_ops.conv2d( + swish_42, parameter_463, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_463 + + # pd_op.batch_norm_: (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__342, + batch_norm__343, + batch_norm__344, + batch_norm__345, + batch_norm__346, + batch_norm__347, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_59, + parameter_462, + parameter_461, + parameter_460, + parameter_459, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_59, parameter_459, parameter_460, parameter_461, parameter_462 + + # pd_op.conv2d: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 192x192x1x1xf32) + conv2d_60 = paddle._C_ops.conv2d( + swish_42, parameter_458, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_458, swish_42 + + # pd_op.batch_norm_: (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__348, + batch_norm__349, + batch_norm__350, + batch_norm__351, + batch_norm__352, + batch_norm__353, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_60, + parameter_457, + parameter_456, + parameter_455, + parameter_454, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_60, parameter_454, parameter_455, parameter_456, parameter_457 + + # pd_op.add: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 2x192x-1x-1xf32) + add_30 = paddle._C_ops.add(batch_norm__342, batch_norm__348) + del batch_norm__342, batch_norm__348 + + # pd_op.swish: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32) + swish_43 = paddle._C_ops.swish(add_30) + del add_30 + + # pd_op.add: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 2x192x-1x-1xf32) + add_31 = paddle._C_ops.add(add_29, swish_43) + del add_29, swish_43 + + # builtin.combine: ([2x192x-1x-1xf32, 2x192x-1x-1xf32]) <- (2x192x-1x-1xf32, 2x192x-1x-1xf32) + combine_2 = [swish_30, add_31] + del add_31, swish_30 + + # pd_op.concat: (2x384x-1x-1xf32) <- ([2x192x-1x-1xf32, 2x192x-1x-1xf32], 1xi32) + concat_4 = paddle._C_ops.concat(combine_2, full_0) + del combine_2 + + # pd_op.mean: (2x384x1x1xf32) <- (2x384x-1x-1xf32, 2xi64) + mean_2 = paddle._C_ops.mean(concat_4, full_int_array_0, True) + + # pd_op.conv2d: (2x384x1x1xf32) <- (2x384x1x1xf32, 384x384x1x1xf32) + conv2d_61 = paddle._C_ops.conv2d( + mean_2, parameter_453, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del mean_2, parameter_453 + + # pd_op.reshape: (1x384x1x1xf32) <- (384xf32, 4xi64) + reshape_2 = paddle._C_ops.reshape(parameter_452, full_int_array_1) + del parameter_452 + + # pd_op.add: (2x384x1x1xf32) <- (2x384x1x1xf32, 1x384x1x1xf32) + add_32 = paddle._C_ops.add(conv2d_61, reshape_2) + del conv2d_61, reshape_2 + + # pd_op.hardsigmoid: (2x384x1x1xf32) <- (2x384x1x1xf32) + hardsigmoid_2 = paddle._C_ops.hardsigmoid( + add_32, float("0.166667"), float("0.5") + ) + del add_32 + + # pd_op.multiply: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32, 2x384x1x1xf32) + multiply_2 = paddle._C_ops.multiply(concat_4, hardsigmoid_2) + del concat_4, hardsigmoid_2 + + # pd_op.conv2d: (2x512x-1x-1xf32) <- (2x384x-1x-1xf32, 512x384x1x1xf32) + conv2d_62 = paddle._C_ops.conv2d( + multiply_2, parameter_451, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del multiply_2, parameter_451 + + # pd_op.batch_norm_: (2x512x-1x-1xf32, 512xf32, 512xf32, 512xf32, 512xf32, -1xui8) <- (2x512x-1x-1xf32, 512xf32, 512xf32, 512xf32, 512xf32) + ( + batch_norm__354, + batch_norm__355, + batch_norm__356, + batch_norm__357, + batch_norm__358, + batch_norm__359, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_62, + parameter_450, + parameter_449, + parameter_448, + parameter_447, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_62, parameter_447, parameter_448, parameter_449, parameter_450 + + # pd_op.swish: (2x512x-1x-1xf32) <- (2x512x-1x-1xf32) + swish_44 = paddle._C_ops.swish(batch_norm__354) + del batch_norm__354 + + # pd_op.conv2d: (2x768x-1x-1xf32) <- (2x512x-1x-1xf32, 768x512x3x3xf32) + conv2d_63 = paddle._C_ops.conv2d( + swish_44, parameter_446, [2, 2], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_446 + + # pd_op.batch_norm_: (2x768x-1x-1xf32, 768xf32, 768xf32, 768xf32, 768xf32, -1xui8) <- (2x768x-1x-1xf32, 768xf32, 768xf32, 768xf32, 768xf32) + ( + batch_norm__360, + batch_norm__361, + batch_norm__362, + batch_norm__363, + batch_norm__364, + batch_norm__365, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_63, + parameter_445, + parameter_444, + parameter_443, + parameter_442, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_63, parameter_442, parameter_443, parameter_444, parameter_445 + + # pd_op.swish: (2x768x-1x-1xf32) <- (2x768x-1x-1xf32) + swish_45 = paddle._C_ops.swish(batch_norm__360) + del batch_norm__360 + + # pd_op.conv2d: (2x384x-1x-1xf32) <- (2x768x-1x-1xf32, 384x768x1x1xf32) + conv2d_64 = paddle._C_ops.conv2d( + swish_45, parameter_441, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_441 + + # pd_op.batch_norm_: (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__366, + batch_norm__367, + batch_norm__368, + batch_norm__369, + batch_norm__370, + batch_norm__371, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_64, + parameter_440, + parameter_439, + parameter_438, + parameter_437, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_64, parameter_437, parameter_438, parameter_439, parameter_440 + + # pd_op.swish: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32) + swish_46 = paddle._C_ops.swish(batch_norm__366) + del batch_norm__366 + + # pd_op.conv2d: (2x384x-1x-1xf32) <- (2x768x-1x-1xf32, 384x768x1x1xf32) + conv2d_65 = paddle._C_ops.conv2d( + swish_45, parameter_436, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_436, swish_45 + + # pd_op.batch_norm_: (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__372, + batch_norm__373, + batch_norm__374, + batch_norm__375, + batch_norm__376, + batch_norm__377, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_65, + parameter_435, + parameter_434, + parameter_433, + parameter_432, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_65, parameter_432, parameter_433, parameter_434, parameter_435 + + # pd_op.swish: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32) + swish_47 = paddle._C_ops.swish(batch_norm__372) + del batch_norm__372 + + # pd_op.conv2d: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32, 384x384x3x3xf32) + conv2d_66 = paddle._C_ops.conv2d( + swish_47, parameter_431, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_431 + + # pd_op.batch_norm_: (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__378, + batch_norm__379, + batch_norm__380, + batch_norm__381, + batch_norm__382, + batch_norm__383, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_66, + parameter_430, + parameter_429, + parameter_428, + parameter_427, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_66, parameter_427, parameter_428, parameter_429, parameter_430 + + # pd_op.swish: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32) + swish_48 = paddle._C_ops.swish(batch_norm__378) + del batch_norm__378 + + # pd_op.conv2d: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32, 384x384x3x3xf32) + conv2d_67 = paddle._C_ops.conv2d( + swish_48, parameter_426, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_426 + + # pd_op.batch_norm_: (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__384, + batch_norm__385, + batch_norm__386, + batch_norm__387, + batch_norm__388, + batch_norm__389, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_67, + parameter_425, + parameter_424, + parameter_423, + parameter_422, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_67, parameter_422, parameter_423, parameter_424, parameter_425 + + # pd_op.conv2d: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32, 384x384x1x1xf32) + conv2d_68 = paddle._C_ops.conv2d( + swish_48, parameter_421, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_421, swish_48 + + # pd_op.batch_norm_: (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__390, + batch_norm__391, + batch_norm__392, + batch_norm__393, + batch_norm__394, + batch_norm__395, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_68, + parameter_420, + parameter_419, + parameter_418, + parameter_417, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_68, parameter_417, parameter_418, parameter_419, parameter_420 + + # pd_op.add: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32, 2x384x-1x-1xf32) + add_33 = paddle._C_ops.add(batch_norm__384, batch_norm__390) + del batch_norm__384, batch_norm__390 + + # pd_op.swish: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32) + swish_49 = paddle._C_ops.swish(add_33) + del add_33 + + # pd_op.add: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32, 2x384x-1x-1xf32) + add_34 = paddle._C_ops.add(swish_47, swish_49) + del swish_47, swish_49 + + # pd_op.conv2d: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32, 384x384x3x3xf32) + conv2d_69 = paddle._C_ops.conv2d( + add_34, parameter_416, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_416 + + # pd_op.batch_norm_: (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__396, + batch_norm__397, + batch_norm__398, + batch_norm__399, + batch_norm__400, + batch_norm__401, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_69, + parameter_415, + parameter_414, + parameter_413, + parameter_412, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_69, parameter_412, parameter_413, parameter_414, parameter_415 + + # pd_op.swish: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32) + swish_50 = paddle._C_ops.swish(batch_norm__396) + del batch_norm__396 + + # pd_op.conv2d: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32, 384x384x3x3xf32) + conv2d_70 = paddle._C_ops.conv2d( + swish_50, parameter_411, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_411 + + # pd_op.batch_norm_: (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__402, + batch_norm__403, + batch_norm__404, + batch_norm__405, + batch_norm__406, + batch_norm__407, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_70, + parameter_410, + parameter_409, + parameter_408, + parameter_407, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_70, parameter_407, parameter_408, parameter_409, parameter_410 + + # pd_op.conv2d: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32, 384x384x1x1xf32) + conv2d_71 = paddle._C_ops.conv2d( + swish_50, parameter_406, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_406, swish_50 + + # pd_op.batch_norm_: (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__408, + batch_norm__409, + batch_norm__410, + batch_norm__411, + batch_norm__412, + batch_norm__413, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_71, + parameter_405, + parameter_404, + parameter_403, + parameter_402, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_71, parameter_402, parameter_403, parameter_404, parameter_405 + + # pd_op.add: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32, 2x384x-1x-1xf32) + add_35 = paddle._C_ops.add(batch_norm__402, batch_norm__408) + del batch_norm__402, batch_norm__408 + + # pd_op.swish: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32) + swish_51 = paddle._C_ops.swish(add_35) + del add_35 + + # pd_op.add: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32, 2x384x-1x-1xf32) + add_36 = paddle._C_ops.add(add_34, swish_51) + del add_34, swish_51 + + # pd_op.conv2d: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32, 384x384x3x3xf32) + conv2d_72 = paddle._C_ops.conv2d( + add_36, parameter_401, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_401 + + # pd_op.batch_norm_: (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__414, + batch_norm__415, + batch_norm__416, + batch_norm__417, + batch_norm__418, + batch_norm__419, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_72, + parameter_400, + parameter_399, + parameter_398, + parameter_397, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_72, parameter_397, parameter_398, parameter_399, parameter_400 + + # pd_op.swish: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32) + swish_52 = paddle._C_ops.swish(batch_norm__414) + del batch_norm__414 + + # pd_op.conv2d: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32, 384x384x3x3xf32) + conv2d_73 = paddle._C_ops.conv2d( + swish_52, parameter_396, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_396 + + # pd_op.batch_norm_: (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__420, + batch_norm__421, + batch_norm__422, + batch_norm__423, + batch_norm__424, + batch_norm__425, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_73, + parameter_395, + parameter_394, + parameter_393, + parameter_392, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_73, parameter_392, parameter_393, parameter_394, parameter_395 + + # pd_op.conv2d: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32, 384x384x1x1xf32) + conv2d_74 = paddle._C_ops.conv2d( + swish_52, parameter_391, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_391, swish_52 + + # pd_op.batch_norm_: (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__426, + batch_norm__427, + batch_norm__428, + batch_norm__429, + batch_norm__430, + batch_norm__431, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_74, + parameter_390, + parameter_389, + parameter_388, + parameter_387, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_74, parameter_387, parameter_388, parameter_389, parameter_390 + + # pd_op.add: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32, 2x384x-1x-1xf32) + add_37 = paddle._C_ops.add(batch_norm__420, batch_norm__426) + del batch_norm__420, batch_norm__426 + + # pd_op.swish: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32) + swish_53 = paddle._C_ops.swish(add_37) + del add_37 + + # pd_op.add: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32, 2x384x-1x-1xf32) + add_38 = paddle._C_ops.add(add_36, swish_53) + del add_36, swish_53 + + # builtin.combine: ([2x384x-1x-1xf32, 2x384x-1x-1xf32]) <- (2x384x-1x-1xf32, 2x384x-1x-1xf32) + combine_3 = [swish_46, add_38] + del add_38, swish_46 + + # pd_op.concat: (2x768x-1x-1xf32) <- ([2x384x-1x-1xf32, 2x384x-1x-1xf32], 1xi32) + concat_5 = paddle._C_ops.concat(combine_3, full_0) + del combine_3 + + # pd_op.mean: (2x768x1x1xf32) <- (2x768x-1x-1xf32, 2xi64) + mean_3 = paddle._C_ops.mean(concat_5, full_int_array_0, True) + del full_int_array_0 + + # pd_op.conv2d: (2x768x1x1xf32) <- (2x768x1x1xf32, 768x768x1x1xf32) + conv2d_75 = paddle._C_ops.conv2d( + mean_3, parameter_386, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del mean_3, parameter_386 + + # pd_op.reshape: (1x768x1x1xf32) <- (768xf32, 4xi64) + reshape_3 = paddle._C_ops.reshape(parameter_385, full_int_array_1) + del parameter_385 + + # pd_op.add: (2x768x1x1xf32) <- (2x768x1x1xf32, 1x768x1x1xf32) + add_39 = paddle._C_ops.add(conv2d_75, reshape_3) + del conv2d_75, reshape_3 + + # pd_op.hardsigmoid: (2x768x1x1xf32) <- (2x768x1x1xf32) + hardsigmoid_3 = paddle._C_ops.hardsigmoid( + add_39, float("0.166667"), float("0.5") + ) + del add_39 + + # pd_op.multiply: (2x768x-1x-1xf32) <- (2x768x-1x-1xf32, 2x768x1x1xf32) + multiply_3 = paddle._C_ops.multiply(concat_5, hardsigmoid_3) + del concat_5, hardsigmoid_3 + + # pd_op.conv2d: (2x1024x-1x-1xf32) <- (2x768x-1x-1xf32, 1024x768x1x1xf32) + conv2d_76 = paddle._C_ops.conv2d( + multiply_3, parameter_384, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del multiply_3, parameter_384 + + # pd_op.batch_norm_: (2x1024x-1x-1xf32, 1024xf32, 1024xf32, 1024xf32, 1024xf32, -1xui8) <- (2x1024x-1x-1xf32, 1024xf32, 1024xf32, 1024xf32, 1024xf32) + ( + batch_norm__432, + batch_norm__433, + batch_norm__434, + batch_norm__435, + batch_norm__436, + batch_norm__437, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_76, + parameter_383, + parameter_382, + parameter_381, + parameter_380, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_76, parameter_380, parameter_381, parameter_382, parameter_383 + + # pd_op.swish: (2x1024x-1x-1xf32) <- (2x1024x-1x-1xf32) + swish_54 = paddle._C_ops.swish(batch_norm__432) + del batch_norm__432 + + # pd_op.conv2d: (2x384x-1x-1xf32) <- (2x1024x-1x-1xf32, 384x1024x1x1xf32) + conv2d_77 = paddle._C_ops.conv2d( + swish_54, parameter_379, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_379 + + # pd_op.batch_norm_: (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__438, + batch_norm__439, + batch_norm__440, + batch_norm__441, + batch_norm__442, + batch_norm__443, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_77, + parameter_378, + parameter_377, + parameter_376, + parameter_375, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_77, parameter_375, parameter_376, parameter_377, parameter_378 + + # pd_op.swish: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32) + swish_55 = paddle._C_ops.swish(batch_norm__438) + del batch_norm__438 + + # pd_op.conv2d: (2x384x-1x-1xf32) <- (2x1024x-1x-1xf32, 384x1024x1x1xf32) + conv2d_78 = paddle._C_ops.conv2d( + swish_54, parameter_374, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_374, swish_54 + + # pd_op.batch_norm_: (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__444, + batch_norm__445, + batch_norm__446, + batch_norm__447, + batch_norm__448, + batch_norm__449, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_78, + parameter_373, + parameter_372, + parameter_371, + parameter_370, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_78, parameter_370, parameter_371, parameter_372, parameter_373 + + # pd_op.swish: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32) + swish_56 = paddle._C_ops.swish(batch_norm__444) + del batch_norm__444 + + # pd_op.conv2d: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32, 384x384x3x3xf32) + conv2d_79 = paddle._C_ops.conv2d( + swish_56, parameter_369, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_369, swish_56 + + # pd_op.batch_norm_: (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__450, + batch_norm__451, + batch_norm__452, + batch_norm__453, + batch_norm__454, + batch_norm__455, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_79, + parameter_368, + parameter_367, + parameter_366, + parameter_365, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_79, parameter_365, parameter_366, parameter_367, parameter_368 + + # pd_op.swish: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32) + swish_57 = paddle._C_ops.swish(batch_norm__450) + del batch_norm__450 + + # pd_op.conv2d: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32, 384x384x3x3xf32) + conv2d_80 = paddle._C_ops.conv2d( + swish_57, parameter_364, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_364 + + # pd_op.batch_norm_: (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__456, + batch_norm__457, + batch_norm__458, + batch_norm__459, + batch_norm__460, + batch_norm__461, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_80, + parameter_363, + parameter_362, + parameter_361, + parameter_360, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_80, parameter_360, parameter_361, parameter_362, parameter_363 + + # pd_op.conv2d: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32, 384x384x1x1xf32) + conv2d_81 = paddle._C_ops.conv2d( + swish_57, parameter_359, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_359, swish_57 + + # pd_op.batch_norm_: (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__462, + batch_norm__463, + batch_norm__464, + batch_norm__465, + batch_norm__466, + batch_norm__467, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_81, + parameter_358, + parameter_357, + parameter_356, + parameter_355, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_81, parameter_355, parameter_356, parameter_357, parameter_358 + + # pd_op.add: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32, 2x384x-1x-1xf32) + add_40 = paddle._C_ops.add(batch_norm__456, batch_norm__462) + del batch_norm__456, batch_norm__462 + + # pd_op.swish: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32) + swish_58 = paddle._C_ops.swish(add_40) + del add_40 + + # pd_op.conv2d: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32, 384x384x3x3xf32) + conv2d_82 = paddle._C_ops.conv2d( + swish_58, parameter_354, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_354, swish_58 + + # pd_op.batch_norm_: (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__468, + batch_norm__469, + batch_norm__470, + batch_norm__471, + batch_norm__472, + batch_norm__473, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_82, + parameter_353, + parameter_352, + parameter_351, + parameter_350, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_82, parameter_350, parameter_351, parameter_352, parameter_353 + + # pd_op.swish: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32) + swish_59 = paddle._C_ops.swish(batch_norm__468) + del batch_norm__468 + + # pd_op.conv2d: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32, 384x384x3x3xf32) + conv2d_83 = paddle._C_ops.conv2d( + swish_59, parameter_349, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_349 + + # pd_op.batch_norm_: (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__474, + batch_norm__475, + batch_norm__476, + batch_norm__477, + batch_norm__478, + batch_norm__479, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_83, + parameter_348, + parameter_347, + parameter_346, + parameter_345, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_83, parameter_345, parameter_346, parameter_347, parameter_348 + + # pd_op.conv2d: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32, 384x384x1x1xf32) + conv2d_84 = paddle._C_ops.conv2d( + swish_59, parameter_344, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_344, swish_59 + + # pd_op.batch_norm_: (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__480, + batch_norm__481, + batch_norm__482, + batch_norm__483, + batch_norm__484, + batch_norm__485, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_84, + parameter_343, + parameter_342, + parameter_341, + parameter_340, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_84, parameter_340, parameter_341, parameter_342, parameter_343 + + # pd_op.add: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32, 2x384x-1x-1xf32) + add_41 = paddle._C_ops.add(batch_norm__474, batch_norm__480) + del batch_norm__474, batch_norm__480 + + # pd_op.swish: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32) + swish_60 = paddle._C_ops.swish(add_41) + del add_41 + + # pd_op.full_int_array: (2xi64) <- () + full_int_array_2 = [5, 5] + + # pd_op.pool2d: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32, 2xi64) + pool2d_0 = paddle._C_ops.pool2d( + swish_60, + full_int_array_2, + [1, 1], + [2, 2], + False, + True, + "NCHW", + "max", + False, + False, + "EXPLICIT", + ) + del full_int_array_2 + + # pd_op.full_int_array: (2xi64) <- () + full_int_array_3 = [9, 9] + + # pd_op.pool2d: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32, 2xi64) + pool2d_1 = paddle._C_ops.pool2d( + swish_60, + full_int_array_3, + [1, 1], + [4, 4], + False, + True, + "NCHW", + "max", + False, + False, + "EXPLICIT", + ) + del full_int_array_3 + + # pd_op.full_int_array: (2xi64) <- () + full_int_array_4 = [13, 13] + + # pd_op.pool2d: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32, 2xi64) + pool2d_2 = paddle._C_ops.pool2d( + swish_60, + full_int_array_4, + [1, 1], + [6, 6], + False, + True, + "NCHW", + "max", + False, + False, + "EXPLICIT", + ) + del full_int_array_4 + + # builtin.combine: ([2x384x-1x-1xf32, 2x384x-1x-1xf32, 2x384x-1x-1xf32, 2x384x-1x-1xf32]) <- (2x384x-1x-1xf32, 2x384x-1x-1xf32, 2x384x-1x-1xf32, 2x384x-1x-1xf32) + combine_4 = [swish_60, pool2d_0, pool2d_1, pool2d_2] + del pool2d_0, pool2d_1, pool2d_2, swish_60 + + # pd_op.concat: (2x1536x-1x-1xf32) <- ([2x384x-1x-1xf32, 2x384x-1x-1xf32, 2x384x-1x-1xf32, 2x384x-1x-1xf32], 1xi32) + concat_6 = paddle._C_ops.concat(combine_4, full_0) + del combine_4 + + # pd_op.conv2d: (2x384x-1x-1xf32) <- (2x1536x-1x-1xf32, 384x1536x1x1xf32) + conv2d_85 = paddle._C_ops.conv2d( + concat_6, parameter_339, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del concat_6, parameter_339 + + # pd_op.batch_norm_: (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__486, + batch_norm__487, + batch_norm__488, + batch_norm__489, + batch_norm__490, + batch_norm__491, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_85, + parameter_338, + parameter_337, + parameter_336, + parameter_335, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_85, parameter_335, parameter_336, parameter_337, parameter_338 + + # pd_op.swish: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32) + swish_61 = paddle._C_ops.swish(batch_norm__486) + del batch_norm__486 + + # pd_op.conv2d: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32, 384x384x3x3xf32) + conv2d_86 = paddle._C_ops.conv2d( + swish_61, parameter_334, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_334, swish_61 + + # pd_op.batch_norm_: (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__492, + batch_norm__493, + batch_norm__494, + batch_norm__495, + batch_norm__496, + batch_norm__497, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_86, + parameter_333, + parameter_332, + parameter_331, + parameter_330, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_86, parameter_330, parameter_331, parameter_332, parameter_333 + + # pd_op.swish: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32) + swish_62 = paddle._C_ops.swish(batch_norm__492) + del batch_norm__492 + + # pd_op.conv2d: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32, 384x384x3x3xf32) + conv2d_87 = paddle._C_ops.conv2d( + swish_62, parameter_329, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_329 + + # pd_op.batch_norm_: (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__498, + batch_norm__499, + batch_norm__500, + batch_norm__501, + batch_norm__502, + batch_norm__503, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_87, + parameter_328, + parameter_327, + parameter_326, + parameter_325, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_87, parameter_325, parameter_326, parameter_327, parameter_328 + + # pd_op.conv2d: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32, 384x384x1x1xf32) + conv2d_88 = paddle._C_ops.conv2d( + swish_62, parameter_324, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_324, swish_62 + + # pd_op.batch_norm_: (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__504, + batch_norm__505, + batch_norm__506, + batch_norm__507, + batch_norm__508, + batch_norm__509, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_88, + parameter_323, + parameter_322, + parameter_321, + parameter_320, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_88, parameter_320, parameter_321, parameter_322, parameter_323 + + # pd_op.add: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32, 2x384x-1x-1xf32) + add_42 = paddle._C_ops.add(batch_norm__498, batch_norm__504) + del batch_norm__498, batch_norm__504 + + # pd_op.swish: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32) + swish_63 = paddle._C_ops.swish(add_42) + del add_42 + + # builtin.combine: ([2x384x-1x-1xf32, 2x384x-1x-1xf32]) <- (2x384x-1x-1xf32, 2x384x-1x-1xf32) + combine_5 = [swish_55, swish_63] + del swish_55, swish_63 + + # pd_op.concat: (2x768x-1x-1xf32) <- ([2x384x-1x-1xf32, 2x384x-1x-1xf32], 1xi32) + concat_7 = paddle._C_ops.concat(combine_5, full_0) + del combine_5 + + # pd_op.conv2d: (2x768x-1x-1xf32) <- (2x768x-1x-1xf32, 768x768x1x1xf32) + conv2d_89 = paddle._C_ops.conv2d( + concat_7, parameter_319, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del concat_7, parameter_319 + + # pd_op.batch_norm_: (2x768x-1x-1xf32, 768xf32, 768xf32, 768xf32, 768xf32, -1xui8) <- (2x768x-1x-1xf32, 768xf32, 768xf32, 768xf32, 768xf32) + ( + batch_norm__510, + batch_norm__511, + batch_norm__512, + batch_norm__513, + batch_norm__514, + batch_norm__515, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_89, + parameter_318, + parameter_317, + parameter_316, + parameter_315, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_89, parameter_315, parameter_316, parameter_317, parameter_318 + + # pd_op.swish: (2x768x-1x-1xf32) <- (2x768x-1x-1xf32) + swish_64 = paddle._C_ops.swish(batch_norm__510) + del batch_norm__510 + + # pd_op.conv2d: (2x384x-1x-1xf32) <- (2x768x-1x-1xf32, 384x768x1x1xf32) + conv2d_90 = paddle._C_ops.conv2d( + swish_64, parameter_314, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_314 + + # pd_op.batch_norm_: (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__516, + batch_norm__517, + batch_norm__518, + batch_norm__519, + batch_norm__520, + batch_norm__521, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_90, + parameter_313, + parameter_312, + parameter_311, + parameter_310, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_90, parameter_310, parameter_311, parameter_312, parameter_313 + + # pd_op.swish: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32) + swish_65 = paddle._C_ops.swish(batch_norm__516) + del batch_norm__516 + + # pd_op.nearest_interp: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32, None, None, None) + nearest_interp_0 = paddle._C_ops.nearest_interp( + swish_65, + None, + None, + None, + "NCHW", + -1, + -1, + -1, + [float("2"), float("2")], + "nearest", + False, + 0, + ) + del swish_65 + + # builtin.combine: ([2x384x-1x-1xf32, 2x512x-1x-1xf32]) <- (2x384x-1x-1xf32, 2x512x-1x-1xf32) + combine_6 = [nearest_interp_0, swish_44] + del nearest_interp_0, swish_44 + + # pd_op.concat: (2x896x-1x-1xf32) <- ([2x384x-1x-1xf32, 2x512x-1x-1xf32], 1xi32) + concat_8 = paddle._C_ops.concat(combine_6, full_0) + del combine_6 + + # pd_op.conv2d: (2x192x-1x-1xf32) <- (2x896x-1x-1xf32, 192x896x1x1xf32) + conv2d_91 = paddle._C_ops.conv2d( + concat_8, parameter_309, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_309 + + # pd_op.batch_norm_: (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__522, + batch_norm__523, + batch_norm__524, + batch_norm__525, + batch_norm__526, + batch_norm__527, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_91, + parameter_308, + parameter_307, + parameter_306, + parameter_305, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_91, parameter_305, parameter_306, parameter_307, parameter_308 + + # pd_op.swish: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32) + swish_66 = paddle._C_ops.swish(batch_norm__522) + del batch_norm__522 + + # pd_op.conv2d: (2x192x-1x-1xf32) <- (2x896x-1x-1xf32, 192x896x1x1xf32) + conv2d_92 = paddle._C_ops.conv2d( + concat_8, parameter_304, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del concat_8, parameter_304 + + # pd_op.batch_norm_: (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__528, + batch_norm__529, + batch_norm__530, + batch_norm__531, + batch_norm__532, + batch_norm__533, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_92, + parameter_303, + parameter_302, + parameter_301, + parameter_300, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_92, parameter_300, parameter_301, parameter_302, parameter_303 + + # pd_op.swish: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32) + swish_67 = paddle._C_ops.swish(batch_norm__528) + del batch_norm__528 + + # pd_op.conv2d: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 192x192x3x3xf32) + conv2d_93 = paddle._C_ops.conv2d( + swish_67, parameter_299, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_299, swish_67 + + # pd_op.batch_norm_: (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__534, + batch_norm__535, + batch_norm__536, + batch_norm__537, + batch_norm__538, + batch_norm__539, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_93, + parameter_298, + parameter_297, + parameter_296, + parameter_295, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_93, parameter_295, parameter_296, parameter_297, parameter_298 + + # pd_op.swish: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32) + swish_68 = paddle._C_ops.swish(batch_norm__534) + del batch_norm__534 + + # pd_op.conv2d: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 192x192x3x3xf32) + conv2d_94 = paddle._C_ops.conv2d( + swish_68, parameter_294, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_294 + + # pd_op.batch_norm_: (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__540, + batch_norm__541, + batch_norm__542, + batch_norm__543, + batch_norm__544, + batch_norm__545, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_94, + parameter_293, + parameter_292, + parameter_291, + parameter_290, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_94, parameter_290, parameter_291, parameter_292, parameter_293 + + # pd_op.conv2d: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 192x192x1x1xf32) + conv2d_95 = paddle._C_ops.conv2d( + swish_68, parameter_289, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_289, swish_68 + + # pd_op.batch_norm_: (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__546, + batch_norm__547, + batch_norm__548, + batch_norm__549, + batch_norm__550, + batch_norm__551, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_95, + parameter_288, + parameter_287, + parameter_286, + parameter_285, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_95, parameter_285, parameter_286, parameter_287, parameter_288 + + # pd_op.add: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 2x192x-1x-1xf32) + add_43 = paddle._C_ops.add(batch_norm__540, batch_norm__546) + del batch_norm__540, batch_norm__546 + + # pd_op.swish: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32) + swish_69 = paddle._C_ops.swish(add_43) + del add_43 + + # pd_op.conv2d: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 192x192x3x3xf32) + conv2d_96 = paddle._C_ops.conv2d( + swish_69, parameter_284, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_284, swish_69 + + # pd_op.batch_norm_: (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__552, + batch_norm__553, + batch_norm__554, + batch_norm__555, + batch_norm__556, + batch_norm__557, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_96, + parameter_283, + parameter_282, + parameter_281, + parameter_280, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_96, parameter_280, parameter_281, parameter_282, parameter_283 + + # pd_op.swish: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32) + swish_70 = paddle._C_ops.swish(batch_norm__552) + del batch_norm__552 + + # pd_op.conv2d: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 192x192x3x3xf32) + conv2d_97 = paddle._C_ops.conv2d( + swish_70, parameter_279, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_279 + + # pd_op.batch_norm_: (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__558, + batch_norm__559, + batch_norm__560, + batch_norm__561, + batch_norm__562, + batch_norm__563, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_97, + parameter_278, + parameter_277, + parameter_276, + parameter_275, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_97, parameter_275, parameter_276, parameter_277, parameter_278 + + # pd_op.conv2d: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 192x192x1x1xf32) + conv2d_98 = paddle._C_ops.conv2d( + swish_70, parameter_274, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_274, swish_70 + + # pd_op.batch_norm_: (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__564, + batch_norm__565, + batch_norm__566, + batch_norm__567, + batch_norm__568, + batch_norm__569, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_98, + parameter_273, + parameter_272, + parameter_271, + parameter_270, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_98, parameter_270, parameter_271, parameter_272, parameter_273 + + # pd_op.add: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 2x192x-1x-1xf32) + add_44 = paddle._C_ops.add(batch_norm__558, batch_norm__564) + del batch_norm__558, batch_norm__564 + + # pd_op.swish: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32) + swish_71 = paddle._C_ops.swish(add_44) + del add_44 + + # pd_op.conv2d: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 192x192x3x3xf32) + conv2d_99 = paddle._C_ops.conv2d( + swish_71, parameter_269, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_269, swish_71 + + # pd_op.batch_norm_: (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__570, + batch_norm__571, + batch_norm__572, + batch_norm__573, + batch_norm__574, + batch_norm__575, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_99, + parameter_268, + parameter_267, + parameter_266, + parameter_265, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_99, parameter_265, parameter_266, parameter_267, parameter_268 + + # pd_op.swish: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32) + swish_72 = paddle._C_ops.swish(batch_norm__570) + del batch_norm__570 + + # pd_op.conv2d: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 192x192x3x3xf32) + conv2d_100 = paddle._C_ops.conv2d( + swish_72, parameter_264, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_264 + + # pd_op.batch_norm_: (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__576, + batch_norm__577, + batch_norm__578, + batch_norm__579, + batch_norm__580, + batch_norm__581, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_100, + parameter_263, + parameter_262, + parameter_261, + parameter_260, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_100, parameter_260, parameter_261, parameter_262, parameter_263 + + # pd_op.conv2d: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 192x192x1x1xf32) + conv2d_101 = paddle._C_ops.conv2d( + swish_72, parameter_259, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_259, swish_72 + + # pd_op.batch_norm_: (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__582, + batch_norm__583, + batch_norm__584, + batch_norm__585, + batch_norm__586, + batch_norm__587, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_101, + parameter_258, + parameter_257, + parameter_256, + parameter_255, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_101, parameter_255, parameter_256, parameter_257, parameter_258 + + # pd_op.add: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 2x192x-1x-1xf32) + add_45 = paddle._C_ops.add(batch_norm__576, batch_norm__582) + del batch_norm__576, batch_norm__582 + + # pd_op.swish: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32) + swish_73 = paddle._C_ops.swish(add_45) + del add_45 + + # builtin.combine: ([2x192x-1x-1xf32, 2x192x-1x-1xf32]) <- (2x192x-1x-1xf32, 2x192x-1x-1xf32) + combine_7 = [swish_66, swish_73] + del swish_66, swish_73 + + # pd_op.concat: (2x384x-1x-1xf32) <- ([2x192x-1x-1xf32, 2x192x-1x-1xf32], 1xi32) + concat_9 = paddle._C_ops.concat(combine_7, full_0) + del combine_7 + + # pd_op.conv2d: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32, 384x384x1x1xf32) + conv2d_102 = paddle._C_ops.conv2d( + concat_9, parameter_254, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del concat_9, parameter_254 + + # pd_op.batch_norm_: (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__588, + batch_norm__589, + batch_norm__590, + batch_norm__591, + batch_norm__592, + batch_norm__593, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_102, + parameter_253, + parameter_252, + parameter_251, + parameter_250, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_102, parameter_250, parameter_251, parameter_252, parameter_253 + + # pd_op.swish: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32) + swish_74 = paddle._C_ops.swish(batch_norm__588) + del batch_norm__588 + + # pd_op.conv2d: (2x192x-1x-1xf32) <- (2x384x-1x-1xf32, 192x384x1x1xf32) + conv2d_103 = paddle._C_ops.conv2d( + swish_74, parameter_249, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_249 + + # pd_op.batch_norm_: (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__594, + batch_norm__595, + batch_norm__596, + batch_norm__597, + batch_norm__598, + batch_norm__599, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_103, + parameter_248, + parameter_247, + parameter_246, + parameter_245, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_103, parameter_245, parameter_246, parameter_247, parameter_248 + + # pd_op.swish: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32) + swish_75 = paddle._C_ops.swish(batch_norm__594) + del batch_norm__594 + + # pd_op.nearest_interp: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, None, None, None) + nearest_interp_1 = paddle._C_ops.nearest_interp( + swish_75, + None, + None, + None, + "NCHW", + -1, + -1, + -1, + [float("2"), float("2")], + "nearest", + False, + 0, + ) + del swish_75 + + # builtin.combine: ([2x192x-1x-1xf32, 2x256x-1x-1xf32]) <- (2x192x-1x-1xf32, 2x256x-1x-1xf32) + combine_8 = [nearest_interp_1, swish_28] + del nearest_interp_1, swish_28 + + # pd_op.concat: (2x448x-1x-1xf32) <- ([2x192x-1x-1xf32, 2x256x-1x-1xf32], 1xi32) + concat_10 = paddle._C_ops.concat(combine_8, full_0) + del combine_8 + + # pd_op.conv2d: (2x96x-1x-1xf32) <- (2x448x-1x-1xf32, 96x448x1x1xf32) + conv2d_104 = paddle._C_ops.conv2d( + concat_10, parameter_244, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_244 + + # pd_op.batch_norm_: (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__600, + batch_norm__601, + batch_norm__602, + batch_norm__603, + batch_norm__604, + batch_norm__605, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_104, + parameter_243, + parameter_242, + parameter_241, + parameter_240, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_104, parameter_240, parameter_241, parameter_242, parameter_243 + + # pd_op.swish: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32) + swish_76 = paddle._C_ops.swish(batch_norm__600) + del batch_norm__600 + + # pd_op.conv2d: (2x96x-1x-1xf32) <- (2x448x-1x-1xf32, 96x448x1x1xf32) + conv2d_105 = paddle._C_ops.conv2d( + concat_10, parameter_239, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del concat_10, parameter_239 + + # pd_op.batch_norm_: (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__606, + batch_norm__607, + batch_norm__608, + batch_norm__609, + batch_norm__610, + batch_norm__611, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_105, + parameter_238, + parameter_237, + parameter_236, + parameter_235, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_105, parameter_235, parameter_236, parameter_237, parameter_238 + + # pd_op.swish: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32) + swish_77 = paddle._C_ops.swish(batch_norm__606) + del batch_norm__606 + + # pd_op.conv2d: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, 96x96x3x3xf32) + conv2d_106 = paddle._C_ops.conv2d( + swish_77, parameter_234, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_234, swish_77 + + # pd_op.batch_norm_: (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__612, + batch_norm__613, + batch_norm__614, + batch_norm__615, + batch_norm__616, + batch_norm__617, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_106, + parameter_233, + parameter_232, + parameter_231, + parameter_230, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_106, parameter_230, parameter_231, parameter_232, parameter_233 + + # pd_op.swish: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32) + swish_78 = paddle._C_ops.swish(batch_norm__612) + del batch_norm__612 + + # pd_op.conv2d: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, 96x96x3x3xf32) + conv2d_107 = paddle._C_ops.conv2d( + swish_78, parameter_229, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_229 + + # pd_op.batch_norm_: (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__618, + batch_norm__619, + batch_norm__620, + batch_norm__621, + batch_norm__622, + batch_norm__623, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_107, + parameter_228, + parameter_227, + parameter_226, + parameter_225, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_107, parameter_225, parameter_226, parameter_227, parameter_228 + + # pd_op.conv2d: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, 96x96x1x1xf32) + conv2d_108 = paddle._C_ops.conv2d( + swish_78, parameter_224, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_224, swish_78 + + # pd_op.batch_norm_: (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__624, + batch_norm__625, + batch_norm__626, + batch_norm__627, + batch_norm__628, + batch_norm__629, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_108, + parameter_223, + parameter_222, + parameter_221, + parameter_220, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_108, parameter_220, parameter_221, parameter_222, parameter_223 + + # pd_op.add: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, 2x96x-1x-1xf32) + add_46 = paddle._C_ops.add(batch_norm__618, batch_norm__624) + del batch_norm__618, batch_norm__624 + + # pd_op.swish: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32) + swish_79 = paddle._C_ops.swish(add_46) + del add_46 + + # pd_op.conv2d: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, 96x96x3x3xf32) + conv2d_109 = paddle._C_ops.conv2d( + swish_79, parameter_219, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_219, swish_79 + + # pd_op.batch_norm_: (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__630, + batch_norm__631, + batch_norm__632, + batch_norm__633, + batch_norm__634, + batch_norm__635, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_109, + parameter_218, + parameter_217, + parameter_216, + parameter_215, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_109, parameter_215, parameter_216, parameter_217, parameter_218 + + # pd_op.swish: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32) + swish_80 = paddle._C_ops.swish(batch_norm__630) + del batch_norm__630 + + # pd_op.conv2d: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, 96x96x3x3xf32) + conv2d_110 = paddle._C_ops.conv2d( + swish_80, parameter_214, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_214 + + # pd_op.batch_norm_: (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__636, + batch_norm__637, + batch_norm__638, + batch_norm__639, + batch_norm__640, + batch_norm__641, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_110, + parameter_213, + parameter_212, + parameter_211, + parameter_210, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_110, parameter_210, parameter_211, parameter_212, parameter_213 + + # pd_op.conv2d: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, 96x96x1x1xf32) + conv2d_111 = paddle._C_ops.conv2d( + swish_80, parameter_209, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_209, swish_80 + + # pd_op.batch_norm_: (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__642, + batch_norm__643, + batch_norm__644, + batch_norm__645, + batch_norm__646, + batch_norm__647, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_111, + parameter_208, + parameter_207, + parameter_206, + parameter_205, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_111, parameter_205, parameter_206, parameter_207, parameter_208 + + # pd_op.add: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, 2x96x-1x-1xf32) + add_47 = paddle._C_ops.add(batch_norm__636, batch_norm__642) + del batch_norm__636, batch_norm__642 + + # pd_op.swish: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32) + swish_81 = paddle._C_ops.swish(add_47) + del add_47 + + # pd_op.conv2d: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, 96x96x3x3xf32) + conv2d_112 = paddle._C_ops.conv2d( + swish_81, parameter_204, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_204, swish_81 + + # pd_op.batch_norm_: (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__648, + batch_norm__649, + batch_norm__650, + batch_norm__651, + batch_norm__652, + batch_norm__653, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_112, + parameter_203, + parameter_202, + parameter_201, + parameter_200, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_112, parameter_200, parameter_201, parameter_202, parameter_203 + + # pd_op.swish: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32) + swish_82 = paddle._C_ops.swish(batch_norm__648) + del batch_norm__648 + + # pd_op.conv2d: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, 96x96x3x3xf32) + conv2d_113 = paddle._C_ops.conv2d( + swish_82, parameter_199, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_199 + + # pd_op.batch_norm_: (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__654, + batch_norm__655, + batch_norm__656, + batch_norm__657, + batch_norm__658, + batch_norm__659, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_113, + parameter_198, + parameter_197, + parameter_196, + parameter_195, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_113, parameter_195, parameter_196, parameter_197, parameter_198 + + # pd_op.conv2d: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, 96x96x1x1xf32) + conv2d_114 = paddle._C_ops.conv2d( + swish_82, parameter_194, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_194, swish_82 + + # pd_op.batch_norm_: (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__660, + batch_norm__661, + batch_norm__662, + batch_norm__663, + batch_norm__664, + batch_norm__665, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_114, + parameter_193, + parameter_192, + parameter_191, + parameter_190, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_114, parameter_190, parameter_191, parameter_192, parameter_193 + + # pd_op.add: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, 2x96x-1x-1xf32) + add_48 = paddle._C_ops.add(batch_norm__654, batch_norm__660) + del batch_norm__654, batch_norm__660 + + # pd_op.swish: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32) + swish_83 = paddle._C_ops.swish(add_48) + del add_48 + + # builtin.combine: ([2x96x-1x-1xf32, 2x96x-1x-1xf32]) <- (2x96x-1x-1xf32, 2x96x-1x-1xf32) + combine_9 = [swish_76, swish_83] + del swish_76, swish_83 + + # pd_op.concat: (2x192x-1x-1xf32) <- ([2x96x-1x-1xf32, 2x96x-1x-1xf32], 1xi32) + concat_11 = paddle._C_ops.concat(combine_9, full_0) + del combine_9 + + # pd_op.conv2d: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 192x192x1x1xf32) + conv2d_115 = paddle._C_ops.conv2d( + concat_11, parameter_189, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del concat_11, parameter_189 + + # pd_op.batch_norm_: (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__666, + batch_norm__667, + batch_norm__668, + batch_norm__669, + batch_norm__670, + batch_norm__671, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_115, + parameter_188, + parameter_187, + parameter_186, + parameter_185, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_115, parameter_185, parameter_186, parameter_187, parameter_188 + + # pd_op.swish: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32) + swish_84 = paddle._C_ops.swish(batch_norm__666) + del batch_norm__666 + + # pd_op.conv2d: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 192x192x3x3xf32) + conv2d_116 = paddle._C_ops.conv2d( + swish_84, parameter_184, [2, 2], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_184 + + # pd_op.batch_norm_: (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__672, + batch_norm__673, + batch_norm__674, + batch_norm__675, + batch_norm__676, + batch_norm__677, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_116, + parameter_183, + parameter_182, + parameter_181, + parameter_180, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_116, parameter_180, parameter_181, parameter_182, parameter_183 + + # pd_op.swish: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32) + swish_85 = paddle._C_ops.swish(batch_norm__672) + del batch_norm__672 + + # builtin.combine: ([2x192x-1x-1xf32, 2x384x-1x-1xf32]) <- (2x192x-1x-1xf32, 2x384x-1x-1xf32) + combine_10 = [swish_85, swish_74] + del swish_74, swish_85 + + # pd_op.concat: (2x576x-1x-1xf32) <- ([2x192x-1x-1xf32, 2x384x-1x-1xf32], 1xi32) + concat_12 = paddle._C_ops.concat(combine_10, full_0) + del combine_10 + + # pd_op.conv2d: (2x192x-1x-1xf32) <- (2x576x-1x-1xf32, 192x576x1x1xf32) + conv2d_117 = paddle._C_ops.conv2d( + concat_12, parameter_179, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_179 + + # pd_op.batch_norm_: (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__678, + batch_norm__679, + batch_norm__680, + batch_norm__681, + batch_norm__682, + batch_norm__683, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_117, + parameter_178, + parameter_177, + parameter_176, + parameter_175, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_117, parameter_175, parameter_176, parameter_177, parameter_178 + + # pd_op.swish: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32) + swish_86 = paddle._C_ops.swish(batch_norm__678) + del batch_norm__678 + + # pd_op.conv2d: (2x192x-1x-1xf32) <- (2x576x-1x-1xf32, 192x576x1x1xf32) + conv2d_118 = paddle._C_ops.conv2d( + concat_12, parameter_174, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del concat_12, parameter_174 + + # pd_op.batch_norm_: (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__684, + batch_norm__685, + batch_norm__686, + batch_norm__687, + batch_norm__688, + batch_norm__689, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_118, + parameter_173, + parameter_172, + parameter_171, + parameter_170, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_118, parameter_170, parameter_171, parameter_172, parameter_173 + + # pd_op.swish: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32) + swish_87 = paddle._C_ops.swish(batch_norm__684) + del batch_norm__684 + + # pd_op.conv2d: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 192x192x3x3xf32) + conv2d_119 = paddle._C_ops.conv2d( + swish_87, parameter_169, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_169, swish_87 + + # pd_op.batch_norm_: (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__690, + batch_norm__691, + batch_norm__692, + batch_norm__693, + batch_norm__694, + batch_norm__695, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_119, + parameter_168, + parameter_167, + parameter_166, + parameter_165, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_119, parameter_165, parameter_166, parameter_167, parameter_168 + + # pd_op.swish: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32) + swish_88 = paddle._C_ops.swish(batch_norm__690) + del batch_norm__690 + + # pd_op.conv2d: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 192x192x3x3xf32) + conv2d_120 = paddle._C_ops.conv2d( + swish_88, parameter_164, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_164 + + # pd_op.batch_norm_: (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__696, + batch_norm__697, + batch_norm__698, + batch_norm__699, + batch_norm__700, + batch_norm__701, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_120, + parameter_163, + parameter_162, + parameter_161, + parameter_160, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_120, parameter_160, parameter_161, parameter_162, parameter_163 + + # pd_op.conv2d: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 192x192x1x1xf32) + conv2d_121 = paddle._C_ops.conv2d( + swish_88, parameter_159, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_159, swish_88 + + # pd_op.batch_norm_: (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__702, + batch_norm__703, + batch_norm__704, + batch_norm__705, + batch_norm__706, + batch_norm__707, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_121, + parameter_158, + parameter_157, + parameter_156, + parameter_155, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_121, parameter_155, parameter_156, parameter_157, parameter_158 + + # pd_op.add: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 2x192x-1x-1xf32) + add_49 = paddle._C_ops.add(batch_norm__696, batch_norm__702) + del batch_norm__696, batch_norm__702 + + # pd_op.swish: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32) + swish_89 = paddle._C_ops.swish(add_49) + del add_49 + + # pd_op.conv2d: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 192x192x3x3xf32) + conv2d_122 = paddle._C_ops.conv2d( + swish_89, parameter_154, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_154, swish_89 + + # pd_op.batch_norm_: (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__708, + batch_norm__709, + batch_norm__710, + batch_norm__711, + batch_norm__712, + batch_norm__713, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_122, + parameter_153, + parameter_152, + parameter_151, + parameter_150, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_122, parameter_150, parameter_151, parameter_152, parameter_153 + + # pd_op.swish: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32) + swish_90 = paddle._C_ops.swish(batch_norm__708) + del batch_norm__708 + + # pd_op.conv2d: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 192x192x3x3xf32) + conv2d_123 = paddle._C_ops.conv2d( + swish_90, parameter_149, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_149 + + # pd_op.batch_norm_: (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__714, + batch_norm__715, + batch_norm__716, + batch_norm__717, + batch_norm__718, + batch_norm__719, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_123, + parameter_148, + parameter_147, + parameter_146, + parameter_145, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_123, parameter_145, parameter_146, parameter_147, parameter_148 + + # pd_op.conv2d: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 192x192x1x1xf32) + conv2d_124 = paddle._C_ops.conv2d( + swish_90, parameter_144, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_144, swish_90 + + # pd_op.batch_norm_: (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__720, + batch_norm__721, + batch_norm__722, + batch_norm__723, + batch_norm__724, + batch_norm__725, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_124, + parameter_143, + parameter_142, + parameter_141, + parameter_140, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_124, parameter_140, parameter_141, parameter_142, parameter_143 + + # pd_op.add: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 2x192x-1x-1xf32) + add_50 = paddle._C_ops.add(batch_norm__714, batch_norm__720) + del batch_norm__714, batch_norm__720 + + # pd_op.swish: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32) + swish_91 = paddle._C_ops.swish(add_50) + del add_50 + + # pd_op.conv2d: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 192x192x3x3xf32) + conv2d_125 = paddle._C_ops.conv2d( + swish_91, parameter_139, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_139, swish_91 + + # pd_op.batch_norm_: (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__726, + batch_norm__727, + batch_norm__728, + batch_norm__729, + batch_norm__730, + batch_norm__731, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_125, + parameter_138, + parameter_137, + parameter_136, + parameter_135, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_125, parameter_135, parameter_136, parameter_137, parameter_138 + + # pd_op.swish: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32) + swish_92 = paddle._C_ops.swish(batch_norm__726) + del batch_norm__726 + + # pd_op.conv2d: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 192x192x3x3xf32) + conv2d_126 = paddle._C_ops.conv2d( + swish_92, parameter_134, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_134 + + # pd_op.batch_norm_: (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__732, + batch_norm__733, + batch_norm__734, + batch_norm__735, + batch_norm__736, + batch_norm__737, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_126, + parameter_133, + parameter_132, + parameter_131, + parameter_130, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_126, parameter_130, parameter_131, parameter_132, parameter_133 + + # pd_op.conv2d: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 192x192x1x1xf32) + conv2d_127 = paddle._C_ops.conv2d( + swish_92, parameter_129, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_129, swish_92 + + # pd_op.batch_norm_: (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__738, + batch_norm__739, + batch_norm__740, + batch_norm__741, + batch_norm__742, + batch_norm__743, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_127, + parameter_128, + parameter_127, + parameter_126, + parameter_125, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_127, parameter_125, parameter_126, parameter_127, parameter_128 + + # pd_op.add: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 2x192x-1x-1xf32) + add_51 = paddle._C_ops.add(batch_norm__732, batch_norm__738) + del batch_norm__732, batch_norm__738 + + # pd_op.swish: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32) + swish_93 = paddle._C_ops.swish(add_51) + del add_51 + + # builtin.combine: ([2x192x-1x-1xf32, 2x192x-1x-1xf32]) <- (2x192x-1x-1xf32, 2x192x-1x-1xf32) + combine_11 = [swish_86, swish_93] + del swish_86, swish_93 + + # pd_op.concat: (2x384x-1x-1xf32) <- ([2x192x-1x-1xf32, 2x192x-1x-1xf32], 1xi32) + concat_13 = paddle._C_ops.concat(combine_11, full_0) + del combine_11 + + # pd_op.conv2d: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32, 384x384x1x1xf32) + conv2d_128 = paddle._C_ops.conv2d( + concat_13, parameter_124, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del concat_13, parameter_124 + + # pd_op.batch_norm_: (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__744, + batch_norm__745, + batch_norm__746, + batch_norm__747, + batch_norm__748, + batch_norm__749, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_128, + parameter_123, + parameter_122, + parameter_121, + parameter_120, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_128, parameter_120, parameter_121, parameter_122, parameter_123 + + # pd_op.swish: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32) + swish_94 = paddle._C_ops.swish(batch_norm__744) + del batch_norm__744 + + # pd_op.conv2d: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32, 384x384x3x3xf32) + conv2d_129 = paddle._C_ops.conv2d( + swish_94, parameter_119, [2, 2], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_119 + + # pd_op.batch_norm_: (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__750, + batch_norm__751, + batch_norm__752, + batch_norm__753, + batch_norm__754, + batch_norm__755, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_129, + parameter_118, + parameter_117, + parameter_116, + parameter_115, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_129, parameter_115, parameter_116, parameter_117, parameter_118 + + # pd_op.swish: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32) + swish_95 = paddle._C_ops.swish(batch_norm__750) + del batch_norm__750 + + # builtin.combine: ([2x384x-1x-1xf32, 2x768x-1x-1xf32]) <- (2x384x-1x-1xf32, 2x768x-1x-1xf32) + combine_12 = [swish_95, swish_64] + del swish_64, swish_95 + + # pd_op.concat: (2x1152x-1x-1xf32) <- ([2x384x-1x-1xf32, 2x768x-1x-1xf32], 1xi32) + concat_14 = paddle._C_ops.concat(combine_12, full_0) + del combine_12 + + # pd_op.conv2d: (2x384x-1x-1xf32) <- (2x1152x-1x-1xf32, 384x1152x1x1xf32) + conv2d_130 = paddle._C_ops.conv2d( + concat_14, parameter_114, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_114 + + # pd_op.batch_norm_: (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__756, + batch_norm__757, + batch_norm__758, + batch_norm__759, + batch_norm__760, + batch_norm__761, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_130, + parameter_113, + parameter_112, + parameter_111, + parameter_110, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_130, parameter_110, parameter_111, parameter_112, parameter_113 + + # pd_op.swish: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32) + swish_96 = paddle._C_ops.swish(batch_norm__756) + del batch_norm__756 + + # pd_op.conv2d: (2x384x-1x-1xf32) <- (2x1152x-1x-1xf32, 384x1152x1x1xf32) + conv2d_131 = paddle._C_ops.conv2d( + concat_14, parameter_109, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del concat_14, parameter_109 + + # pd_op.batch_norm_: (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__762, + batch_norm__763, + batch_norm__764, + batch_norm__765, + batch_norm__766, + batch_norm__767, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_131, + parameter_108, + parameter_107, + parameter_106, + parameter_105, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_131, parameter_105, parameter_106, parameter_107, parameter_108 + + # pd_op.swish: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32) + swish_97 = paddle._C_ops.swish(batch_norm__762) + del batch_norm__762 + + # pd_op.conv2d: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32, 384x384x3x3xf32) + conv2d_132 = paddle._C_ops.conv2d( + swish_97, parameter_104, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_104, swish_97 + + # pd_op.batch_norm_: (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__768, + batch_norm__769, + batch_norm__770, + batch_norm__771, + batch_norm__772, + batch_norm__773, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_132, + parameter_103, + parameter_102, + parameter_101, + parameter_100, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_132, parameter_100, parameter_101, parameter_102, parameter_103 + + # pd_op.swish: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32) + swish_98 = paddle._C_ops.swish(batch_norm__768) + del batch_norm__768 + + # pd_op.conv2d: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32, 384x384x3x3xf32) + conv2d_133 = paddle._C_ops.conv2d( + swish_98, parameter_99, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_99 + + # pd_op.batch_norm_: (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__774, + batch_norm__775, + batch_norm__776, + batch_norm__777, + batch_norm__778, + batch_norm__779, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_133, + parameter_98, + parameter_97, + parameter_96, + parameter_95, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_133, parameter_95, parameter_96, parameter_97, parameter_98 + + # pd_op.conv2d: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32, 384x384x1x1xf32) + conv2d_134 = paddle._C_ops.conv2d( + swish_98, parameter_94, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_94, swish_98 + + # pd_op.batch_norm_: (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__780, + batch_norm__781, + batch_norm__782, + batch_norm__783, + batch_norm__784, + batch_norm__785, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_134, + parameter_93, + parameter_92, + parameter_91, + parameter_90, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_134, parameter_90, parameter_91, parameter_92, parameter_93 + + # pd_op.add: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32, 2x384x-1x-1xf32) + add_52 = paddle._C_ops.add(batch_norm__774, batch_norm__780) + del batch_norm__774, batch_norm__780 + + # pd_op.swish: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32) + swish_99 = paddle._C_ops.swish(add_52) + del add_52 + + # pd_op.conv2d: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32, 384x384x3x3xf32) + conv2d_135 = paddle._C_ops.conv2d( + swish_99, parameter_89, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_89, swish_99 + + # pd_op.batch_norm_: (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__786, + batch_norm__787, + batch_norm__788, + batch_norm__789, + batch_norm__790, + batch_norm__791, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_135, + parameter_88, + parameter_87, + parameter_86, + parameter_85, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_135, parameter_85, parameter_86, parameter_87, parameter_88 + + # pd_op.swish: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32) + swish_100 = paddle._C_ops.swish(batch_norm__786) + del batch_norm__786 + + # pd_op.conv2d: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32, 384x384x3x3xf32) + conv2d_136 = paddle._C_ops.conv2d( + swish_100, parameter_84, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_84 + + # pd_op.batch_norm_: (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__792, + batch_norm__793, + batch_norm__794, + batch_norm__795, + batch_norm__796, + batch_norm__797, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_136, + parameter_83, + parameter_82, + parameter_81, + parameter_80, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_136, parameter_80, parameter_81, parameter_82, parameter_83 + + # pd_op.conv2d: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32, 384x384x1x1xf32) + conv2d_137 = paddle._C_ops.conv2d( + swish_100, parameter_79, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_79, swish_100 + + # pd_op.batch_norm_: (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__798, + batch_norm__799, + batch_norm__800, + batch_norm__801, + batch_norm__802, + batch_norm__803, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_137, + parameter_78, + parameter_77, + parameter_76, + parameter_75, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_137, parameter_75, parameter_76, parameter_77, parameter_78 + + # pd_op.add: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32, 2x384x-1x-1xf32) + add_53 = paddle._C_ops.add(batch_norm__792, batch_norm__798) + del batch_norm__792, batch_norm__798 + + # pd_op.swish: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32) + swish_101 = paddle._C_ops.swish(add_53) + del add_53 + + # pd_op.conv2d: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32, 384x384x3x3xf32) + conv2d_138 = paddle._C_ops.conv2d( + swish_101, parameter_74, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_74, swish_101 + + # pd_op.batch_norm_: (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__804, + batch_norm__805, + batch_norm__806, + batch_norm__807, + batch_norm__808, + batch_norm__809, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_138, + parameter_73, + parameter_72, + parameter_71, + parameter_70, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_138, parameter_70, parameter_71, parameter_72, parameter_73 + + # pd_op.swish: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32) + swish_102 = paddle._C_ops.swish(batch_norm__804) + del batch_norm__804 + + # pd_op.conv2d: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32, 384x384x3x3xf32) + conv2d_139 = paddle._C_ops.conv2d( + swish_102, parameter_69, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_69 + + # pd_op.batch_norm_: (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__810, + batch_norm__811, + batch_norm__812, + batch_norm__813, + batch_norm__814, + batch_norm__815, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_139, + parameter_68, + parameter_67, + parameter_66, + parameter_65, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_139, parameter_65, parameter_66, parameter_67, parameter_68 + + # pd_op.conv2d: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32, 384x384x1x1xf32) + conv2d_140 = paddle._C_ops.conv2d( + swish_102, parameter_64, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_64, swish_102 + + # pd_op.batch_norm_: (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__816, + batch_norm__817, + batch_norm__818, + batch_norm__819, + batch_norm__820, + batch_norm__821, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_140, + parameter_63, + parameter_62, + parameter_61, + parameter_60, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_140, parameter_60, parameter_61, parameter_62, parameter_63 + + # pd_op.add: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32, 2x384x-1x-1xf32) + add_54 = paddle._C_ops.add(batch_norm__810, batch_norm__816) + del batch_norm__810, batch_norm__816 + + # pd_op.swish: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32) + swish_103 = paddle._C_ops.swish(add_54) + del add_54 + + # builtin.combine: ([2x384x-1x-1xf32, 2x384x-1x-1xf32]) <- (2x384x-1x-1xf32, 2x384x-1x-1xf32) + combine_13 = [swish_96, swish_103] + del swish_103, swish_96 + + # pd_op.concat: (2x768x-1x-1xf32) <- ([2x384x-1x-1xf32, 2x384x-1x-1xf32], 1xi32) + concat_15 = paddle._C_ops.concat(combine_13, full_0) + del combine_13 + + # pd_op.conv2d: (2x768x-1x-1xf32) <- (2x768x-1x-1xf32, 768x768x1x1xf32) + conv2d_141 = paddle._C_ops.conv2d( + concat_15, parameter_59, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del concat_15, parameter_59 + + # pd_op.batch_norm_: (2x768x-1x-1xf32, 768xf32, 768xf32, 768xf32, 768xf32, -1xui8) <- (2x768x-1x-1xf32, 768xf32, 768xf32, 768xf32, 768xf32) + ( + batch_norm__822, + batch_norm__823, + batch_norm__824, + batch_norm__825, + batch_norm__826, + batch_norm__827, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_141, + parameter_58, + parameter_57, + parameter_56, + parameter_55, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_141, parameter_55, parameter_56, parameter_57, parameter_58 + + # pd_op.swish: (2x768x-1x-1xf32) <- (2x768x-1x-1xf32) + swish_104 = paddle._C_ops.swish(batch_norm__822) + del batch_norm__822 + + # pd_op.shape64: (4xi64) <- (2x768x-1x-1xf32) + shape64_0 = paddle._C_ops.shape64(swish_104) + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_5 = [2] + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_6 = [3] + + # pd_op.slice: (xi64) <- (4xi64, 1xi64, 1xi64) + slice_0 = paddle._C_ops.slice( + shape64_0, [0], full_int_array_5, full_int_array_6, [1], [0] + ) + del shape64_0 + + # pd_op.shape64: (4xi64) <- (2x768x-1x-1xf32) + shape64_1 = paddle._C_ops.shape64(swish_104) + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_7 = [4] + + # pd_op.slice: (xi64) <- (4xi64, 1xi64, 1xi64) + slice_1 = paddle._C_ops.slice( + shape64_1, [0], full_int_array_6, full_int_array_7, [1], [0] + ) + del shape64_1 + + # pd_op.multiply: (xi64) <- (xi64, xi64) + multiply_4 = paddle._C_ops.multiply(slice_0, slice_1) + del slice_0, slice_1 + + # pd_op.full_int_array: (2xi64) <- () + full_int_array_8 = [1, 1] + + # pd_op.pool2d: (2x768x1x1xf32) <- (2x768x-1x-1xf32, 2xi64) + pool2d_3 = paddle._C_ops.pool2d( + swish_104, + full_int_array_8, + [1, 1], + [0, 0], + False, + True, + "NCHW", + "avg", + False, + True, + "EXPLICIT", + ) + + # pd_op.conv2d: (2x768x1x1xf32) <- (2x768x1x1xf32, 768x768x1x1xf32) + conv2d_142 = paddle._C_ops.conv2d( + pool2d_3, parameter_54, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_54 + + # pd_op.reshape: (1x768x1x1xf32) <- (768xf32, 4xi64) + reshape_4 = paddle._C_ops.reshape(parameter_53, full_int_array_1) + del parameter_53 + + # pd_op.add: (2x768x1x1xf32) <- (2x768x1x1xf32, 1x768x1x1xf32) + add_55 = paddle._C_ops.add(conv2d_142, reshape_4) + del conv2d_142, reshape_4 + + # pd_op.sigmoid: (2x768x1x1xf32) <- (2x768x1x1xf32) + sigmoid_0 = paddle._C_ops.sigmoid(add_55) + del add_55 + + # pd_op.multiply: (2x768x-1x-1xf32) <- (2x768x-1x-1xf32, 2x768x1x1xf32) + multiply_5 = paddle._C_ops.multiply(swish_104, sigmoid_0) + del sigmoid_0 + + # pd_op.conv2d: (2x768x-1x-1xf32) <- (2x768x-1x-1xf32, 768x768x1x1xf32) + conv2d_143 = paddle._C_ops.conv2d( + multiply_5, parameter_52, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del multiply_5, parameter_52 + + # pd_op.batch_norm_: (2x768x-1x-1xf32, 768xf32, 768xf32, 768xf32, 768xf32, -1xui8) <- (2x768x-1x-1xf32, 768xf32, 768xf32, 768xf32, 768xf32) + ( + batch_norm__828, + batch_norm__829, + batch_norm__830, + batch_norm__831, + batch_norm__832, + batch_norm__833, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_143, + parameter_51, + parameter_50, + parameter_49, + parameter_48, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_143, parameter_48, parameter_49, parameter_50, parameter_51 + + # pd_op.swish: (2x768x-1x-1xf32) <- (2x768x-1x-1xf32) + swish_105 = paddle._C_ops.swish(batch_norm__828) + del batch_norm__828 + + # pd_op.add: (2x768x-1x-1xf32) <- (2x768x-1x-1xf32, 2x768x-1x-1xf32) + add_56 = paddle._C_ops.add(swish_105, swish_104) + del swish_105 + + # pd_op.conv2d: (2x4x-1x-1xf32) <- (2x768x-1x-1xf32, 4x768x3x3xf32) + conv2d_144 = paddle._C_ops.conv2d( + add_56, parameter_47, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del add_56, parameter_47 + + # pd_op.reshape: (1x4x1x1xf32) <- (4xf32, 4xi64) + reshape_5 = paddle._C_ops.reshape(parameter_46, full_int_array_1) + del parameter_46 + + # pd_op.add: (2x4x-1x-1xf32) <- (2x4x-1x-1xf32, 1x4x1x1xf32) + add_57 = paddle._C_ops.add(conv2d_144, reshape_5) + del conv2d_144, reshape_5 + + # pd_op.conv2d: (2x768x1x1xf32) <- (2x768x1x1xf32, 768x768x1x1xf32) + conv2d_145 = paddle._C_ops.conv2d( + pool2d_3, parameter_45, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_45, pool2d_3 + + # pd_op.reshape: (1x768x1x1xf32) <- (768xf32, 4xi64) + reshape_6 = paddle._C_ops.reshape(parameter_44, full_int_array_1) + del parameter_44 + + # pd_op.add: (2x768x1x1xf32) <- (2x768x1x1xf32, 1x768x1x1xf32) + add_58 = paddle._C_ops.add(conv2d_145, reshape_6) + del conv2d_145, reshape_6 + + # pd_op.sigmoid: (2x768x1x1xf32) <- (2x768x1x1xf32) + sigmoid_1 = paddle._C_ops.sigmoid(add_58) + del add_58 + + # pd_op.multiply: (2x768x-1x-1xf32) <- (2x768x-1x-1xf32, 2x768x1x1xf32) + multiply_6 = paddle._C_ops.multiply(swish_104, sigmoid_1) + del sigmoid_1, swish_104 + + # pd_op.conv2d: (2x768x-1x-1xf32) <- (2x768x-1x-1xf32, 768x768x1x1xf32) + conv2d_146 = paddle._C_ops.conv2d( + multiply_6, parameter_43, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del multiply_6, parameter_43 + + # pd_op.batch_norm_: (2x768x-1x-1xf32, 768xf32, 768xf32, 768xf32, 768xf32, -1xui8) <- (2x768x-1x-1xf32, 768xf32, 768xf32, 768xf32, 768xf32) + ( + batch_norm__834, + batch_norm__835, + batch_norm__836, + batch_norm__837, + batch_norm__838, + batch_norm__839, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_146, + parameter_42, + parameter_41, + parameter_40, + parameter_39, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_146, parameter_39, parameter_40, parameter_41, parameter_42 + + # pd_op.swish: (2x768x-1x-1xf32) <- (2x768x-1x-1xf32) + swish_106 = paddle._C_ops.swish(batch_norm__834) + del batch_norm__834 + + # pd_op.conv2d: (2x68x-1x-1xf32) <- (2x768x-1x-1xf32, 68x768x3x3xf32) + conv2d_147 = paddle._C_ops.conv2d( + swish_106, parameter_38, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_38, swish_106 + + # pd_op.reshape: (1x68x1x1xf32) <- (68xf32, 4xi64) + reshape_7 = paddle._C_ops.reshape(parameter_37, full_int_array_1) + del parameter_37 + + # pd_op.add: (2x68x-1x-1xf32) <- (2x68x-1x-1xf32, 1x68x1x1xf32) + add_59 = paddle._C_ops.add(conv2d_147, reshape_7) + del conv2d_147, reshape_7 + + # pd_op.full: (xi64) <- () + full_1 = paddle._C_ops.full( + [], float("-1"), paddle.int64, paddle.core.CPUPlace() + ) + + # pd_op.full: (xi64) <- () + full_2 = paddle._C_ops.full( + [], float("4"), paddle.int64, paddle.core.CPUPlace() + ) + + # pd_op.full: (xi64) <- () + full_3 = paddle._C_ops.full( + [], float("17"), paddle.int64, paddle.core.CPUPlace() + ) + + # builtin.combine: ([xi64, xi64, xi64, xi64]) <- (xi64, xi64, xi64, xi64) + combine_14 = [full_1, full_2, full_3, multiply_4] + + # pd_op.stack: (4xi64) <- ([xi64, xi64, xi64, xi64]) + stack_0 = paddle._C_ops.stack(combine_14, 0) + del combine_14 + + # pd_op.reshape: (-1x4x17x-1xf32) <- (2x68x-1x-1xf32, 4xi64) + reshape_8 = paddle._C_ops.reshape(add_59, stack_0) + del add_59, stack_0 + + # pd_op.transpose: (-1x17x-1x4xf32) <- (-1x4x17x-1xf32) + transpose_0 = paddle._C_ops.transpose(reshape_8, [0, 2, 3, 1]) + del reshape_8 + + # pd_op.softmax: (-1x17x-1x4xf32) <- (-1x17x-1x4xf32) + softmax_0 = paddle._C_ops.softmax(transpose_0, 1) + del transpose_0 + + # pd_op.conv2d: (-1x1x-1x4xf32) <- (-1x17x-1x4xf32, 1x17x1x1xf32) + conv2d_148 = paddle._C_ops.conv2d( + softmax_0, parameter_36, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del softmax_0 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_9 = [1] + + # pd_op.squeeze: (-1x-1x4xf32) <- (-1x1x-1x4xf32, 1xi64) + squeeze_0 = paddle._C_ops.squeeze(conv2d_148, full_int_array_9) + del conv2d_148 + + # pd_op.sigmoid: (2x4x-1x-1xf32) <- (2x4x-1x-1xf32) + sigmoid_2 = paddle._C_ops.sigmoid(add_57) + del add_57 + + # builtin.combine: ([xi64, xi64, xi64]) <- (xi64, xi64, xi64) + combine_15 = [full_1, full_2, multiply_4] + del multiply_4 + + # pd_op.stack: (3xi64) <- ([xi64, xi64, xi64]) + stack_1 = paddle._C_ops.stack(combine_15, 0) + del combine_15 + + # pd_op.reshape: (-1x4x-1xf32) <- (2x4x-1x-1xf32, 3xi64) + reshape_9 = paddle._C_ops.reshape(sigmoid_2, stack_1) + del sigmoid_2, stack_1 + + # pd_op.shape64: (4xi64) <- (2x384x-1x-1xf32) + shape64_2 = paddle._C_ops.shape64(swish_94) + + # pd_op.slice: (xi64) <- (4xi64, 1xi64, 1xi64) + slice_2 = paddle._C_ops.slice( + shape64_2, [0], full_int_array_5, full_int_array_6, [1], [0] + ) + del shape64_2 + + # pd_op.shape64: (4xi64) <- (2x384x-1x-1xf32) + shape64_3 = paddle._C_ops.shape64(swish_94) + + # pd_op.slice: (xi64) <- (4xi64, 1xi64, 1xi64) + slice_3 = paddle._C_ops.slice( + shape64_3, [0], full_int_array_6, full_int_array_7, [1], [0] + ) + del shape64_3 + + # pd_op.multiply: (xi64) <- (xi64, xi64) + multiply_7 = paddle._C_ops.multiply(slice_2, slice_3) + del slice_2, slice_3 + + # pd_op.pool2d: (2x384x1x1xf32) <- (2x384x-1x-1xf32, 2xi64) + pool2d_4 = paddle._C_ops.pool2d( + swish_94, + full_int_array_8, + [1, 1], + [0, 0], + False, + True, + "NCHW", + "avg", + False, + True, + "EXPLICIT", + ) + + # pd_op.conv2d: (2x384x1x1xf32) <- (2x384x1x1xf32, 384x384x1x1xf32) + conv2d_149 = paddle._C_ops.conv2d( + pool2d_4, parameter_35, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_35 + + # pd_op.reshape: (1x384x1x1xf32) <- (384xf32, 4xi64) + reshape_10 = paddle._C_ops.reshape(parameter_34, full_int_array_1) + del parameter_34 + + # pd_op.add: (2x384x1x1xf32) <- (2x384x1x1xf32, 1x384x1x1xf32) + add_60 = paddle._C_ops.add(conv2d_149, reshape_10) + del conv2d_149, reshape_10 + + # pd_op.sigmoid: (2x384x1x1xf32) <- (2x384x1x1xf32) + sigmoid_3 = paddle._C_ops.sigmoid(add_60) + del add_60 + + # pd_op.multiply: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32, 2x384x1x1xf32) + multiply_8 = paddle._C_ops.multiply(swish_94, sigmoid_3) + del sigmoid_3 + + # pd_op.conv2d: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32, 384x384x1x1xf32) + conv2d_150 = paddle._C_ops.conv2d( + multiply_8, parameter_33, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del multiply_8, parameter_33 + + # pd_op.batch_norm_: (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__840, + batch_norm__841, + batch_norm__842, + batch_norm__843, + batch_norm__844, + batch_norm__845, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_150, + parameter_32, + parameter_31, + parameter_30, + parameter_29, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_150, parameter_29, parameter_30, parameter_31, parameter_32 + + # pd_op.swish: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32) + swish_107 = paddle._C_ops.swish(batch_norm__840) + del batch_norm__840 + + # pd_op.add: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32, 2x384x-1x-1xf32) + add_61 = paddle._C_ops.add(swish_107, swish_94) + del swish_107 + + # pd_op.conv2d: (2x4x-1x-1xf32) <- (2x384x-1x-1xf32, 4x384x3x3xf32) + conv2d_151 = paddle._C_ops.conv2d( + add_61, parameter_28, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del add_61, parameter_28 + + # pd_op.reshape: (1x4x1x1xf32) <- (4xf32, 4xi64) + reshape_11 = paddle._C_ops.reshape(parameter_27, full_int_array_1) + del parameter_27 + + # pd_op.add: (2x4x-1x-1xf32) <- (2x4x-1x-1xf32, 1x4x1x1xf32) + add_62 = paddle._C_ops.add(conv2d_151, reshape_11) + del conv2d_151, reshape_11 + + # pd_op.conv2d: (2x384x1x1xf32) <- (2x384x1x1xf32, 384x384x1x1xf32) + conv2d_152 = paddle._C_ops.conv2d( + pool2d_4, parameter_26, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_26, pool2d_4 + + # pd_op.reshape: (1x384x1x1xf32) <- (384xf32, 4xi64) + reshape_12 = paddle._C_ops.reshape(parameter_25, full_int_array_1) + del parameter_25 + + # pd_op.add: (2x384x1x1xf32) <- (2x384x1x1xf32, 1x384x1x1xf32) + add_63 = paddle._C_ops.add(conv2d_152, reshape_12) + del conv2d_152, reshape_12 + + # pd_op.sigmoid: (2x384x1x1xf32) <- (2x384x1x1xf32) + sigmoid_4 = paddle._C_ops.sigmoid(add_63) + del add_63 + + # pd_op.multiply: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32, 2x384x1x1xf32) + multiply_9 = paddle._C_ops.multiply(swish_94, sigmoid_4) + del sigmoid_4, swish_94 + + # pd_op.conv2d: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32, 384x384x1x1xf32) + conv2d_153 = paddle._C_ops.conv2d( + multiply_9, parameter_24, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del multiply_9, parameter_24 + + # pd_op.batch_norm_: (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__846, + batch_norm__847, + batch_norm__848, + batch_norm__849, + batch_norm__850, + batch_norm__851, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_153, + parameter_23, + parameter_22, + parameter_21, + parameter_20, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_153, parameter_20, parameter_21, parameter_22, parameter_23 + + # pd_op.swish: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32) + swish_108 = paddle._C_ops.swish(batch_norm__846) + del batch_norm__846 + + # pd_op.conv2d: (2x68x-1x-1xf32) <- (2x384x-1x-1xf32, 68x384x3x3xf32) + conv2d_154 = paddle._C_ops.conv2d( + swish_108, parameter_19, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_19, swish_108 + + # pd_op.reshape: (1x68x1x1xf32) <- (68xf32, 4xi64) + reshape_13 = paddle._C_ops.reshape(parameter_18, full_int_array_1) + del parameter_18 + + # pd_op.add: (2x68x-1x-1xf32) <- (2x68x-1x-1xf32, 1x68x1x1xf32) + add_64 = paddle._C_ops.add(conv2d_154, reshape_13) + del conv2d_154, reshape_13 + + # builtin.combine: ([xi64, xi64, xi64, xi64]) <- (xi64, xi64, xi64, xi64) + combine_16 = [full_1, full_2, full_3, multiply_7] + + # pd_op.stack: (4xi64) <- ([xi64, xi64, xi64, xi64]) + stack_2 = paddle._C_ops.stack(combine_16, 0) + del combine_16 + + # pd_op.reshape: (-1x4x17x-1xf32) <- (2x68x-1x-1xf32, 4xi64) + reshape_14 = paddle._C_ops.reshape(add_64, stack_2) + del add_64, stack_2 + + # pd_op.transpose: (-1x17x-1x4xf32) <- (-1x4x17x-1xf32) + transpose_1 = paddle._C_ops.transpose(reshape_14, [0, 2, 3, 1]) + del reshape_14 + + # pd_op.softmax: (-1x17x-1x4xf32) <- (-1x17x-1x4xf32) + softmax_1 = paddle._C_ops.softmax(transpose_1, 1) + del transpose_1 + + # pd_op.conv2d: (-1x1x-1x4xf32) <- (-1x17x-1x4xf32, 1x17x1x1xf32) + conv2d_155 = paddle._C_ops.conv2d( + softmax_1, parameter_36, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del softmax_1 + + # pd_op.squeeze: (-1x-1x4xf32) <- (-1x1x-1x4xf32, 1xi64) + squeeze_1 = paddle._C_ops.squeeze(conv2d_155, full_int_array_9) + del conv2d_155 + + # pd_op.sigmoid: (2x4x-1x-1xf32) <- (2x4x-1x-1xf32) + sigmoid_5 = paddle._C_ops.sigmoid(add_62) + del add_62 + + # builtin.combine: ([xi64, xi64, xi64]) <- (xi64, xi64, xi64) + combine_17 = [full_1, full_2, multiply_7] + del multiply_7 + + # pd_op.stack: (3xi64) <- ([xi64, xi64, xi64]) + stack_3 = paddle._C_ops.stack(combine_17, 0) + del combine_17 + + # pd_op.reshape: (-1x4x-1xf32) <- (2x4x-1x-1xf32, 3xi64) + reshape_15 = paddle._C_ops.reshape(sigmoid_5, stack_3) + del sigmoid_5, stack_3 + + # pd_op.shape64: (4xi64) <- (2x192x-1x-1xf32) + shape64_4 = paddle._C_ops.shape64(swish_84) + + # pd_op.slice: (xi64) <- (4xi64, 1xi64, 1xi64) + slice_4 = paddle._C_ops.slice( + shape64_4, [0], full_int_array_5, full_int_array_6, [1], [0] + ) + del full_int_array_5, shape64_4 + + # pd_op.shape64: (4xi64) <- (2x192x-1x-1xf32) + shape64_5 = paddle._C_ops.shape64(swish_84) + + # pd_op.slice: (xi64) <- (4xi64, 1xi64, 1xi64) + slice_5 = paddle._C_ops.slice( + shape64_5, [0], full_int_array_6, full_int_array_7, [1], [0] + ) + del full_int_array_6, full_int_array_7, shape64_5 + + # pd_op.multiply: (xi64) <- (xi64, xi64) + multiply_10 = paddle._C_ops.multiply(slice_4, slice_5) + del slice_4, slice_5 + + # pd_op.pool2d: (2x192x1x1xf32) <- (2x192x-1x-1xf32, 2xi64) + pool2d_5 = paddle._C_ops.pool2d( + swish_84, + full_int_array_8, + [1, 1], + [0, 0], + False, + True, + "NCHW", + "avg", + False, + True, + "EXPLICIT", + ) + del full_int_array_8 + + # pd_op.conv2d: (2x192x1x1xf32) <- (2x192x1x1xf32, 192x192x1x1xf32) + conv2d_156 = paddle._C_ops.conv2d( + pool2d_5, parameter_17, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_17 + + # pd_op.reshape: (1x192x1x1xf32) <- (192xf32, 4xi64) + reshape_16 = paddle._C_ops.reshape(parameter_16, full_int_array_1) + del parameter_16 + + # pd_op.add: (2x192x1x1xf32) <- (2x192x1x1xf32, 1x192x1x1xf32) + add_65 = paddle._C_ops.add(conv2d_156, reshape_16) + del conv2d_156, reshape_16 + + # pd_op.sigmoid: (2x192x1x1xf32) <- (2x192x1x1xf32) + sigmoid_6 = paddle._C_ops.sigmoid(add_65) + del add_65 + + # pd_op.multiply: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 2x192x1x1xf32) + multiply_11 = paddle._C_ops.multiply(swish_84, sigmoid_6) + del sigmoid_6 + + # pd_op.conv2d: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 192x192x1x1xf32) + conv2d_157 = paddle._C_ops.conv2d( + multiply_11, parameter_15, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del multiply_11, parameter_15 + + # pd_op.batch_norm_: (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__852, + batch_norm__853, + batch_norm__854, + batch_norm__855, + batch_norm__856, + batch_norm__857, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_157, + parameter_14, + parameter_13, + parameter_12, + parameter_11, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_157, parameter_11, parameter_12, parameter_13, parameter_14 + + # pd_op.swish: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32) + swish_109 = paddle._C_ops.swish(batch_norm__852) + del batch_norm__852 + + # pd_op.add: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 2x192x-1x-1xf32) + add_66 = paddle._C_ops.add(swish_109, swish_84) + del swish_109 + + # pd_op.conv2d: (2x4x-1x-1xf32) <- (2x192x-1x-1xf32, 4x192x3x3xf32) + conv2d_158 = paddle._C_ops.conv2d( + add_66, parameter_10, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del add_66, parameter_10 + + # pd_op.reshape: (1x4x1x1xf32) <- (4xf32, 4xi64) + reshape_17 = paddle._C_ops.reshape(parameter_9, full_int_array_1) + del parameter_9 + + # pd_op.add: (2x4x-1x-1xf32) <- (2x4x-1x-1xf32, 1x4x1x1xf32) + add_67 = paddle._C_ops.add(conv2d_158, reshape_17) + del conv2d_158, reshape_17 + + # pd_op.conv2d: (2x192x1x1xf32) <- (2x192x1x1xf32, 192x192x1x1xf32) + conv2d_159 = paddle._C_ops.conv2d( + pool2d_5, parameter_8, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_8, pool2d_5 + + # pd_op.reshape: (1x192x1x1xf32) <- (192xf32, 4xi64) + reshape_18 = paddle._C_ops.reshape(parameter_7, full_int_array_1) + del parameter_7 + + # pd_op.add: (2x192x1x1xf32) <- (2x192x1x1xf32, 1x192x1x1xf32) + add_68 = paddle._C_ops.add(conv2d_159, reshape_18) + del conv2d_159, reshape_18 + + # pd_op.sigmoid: (2x192x1x1xf32) <- (2x192x1x1xf32) + sigmoid_7 = paddle._C_ops.sigmoid(add_68) + del add_68 + + # pd_op.multiply: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 2x192x1x1xf32) + multiply_12 = paddle._C_ops.multiply(swish_84, sigmoid_7) + del sigmoid_7, swish_84 + + # pd_op.conv2d: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 192x192x1x1xf32) + conv2d_160 = paddle._C_ops.conv2d( + multiply_12, parameter_6, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del multiply_12, parameter_6 + + # pd_op.batch_norm_: (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__858, + batch_norm__859, + batch_norm__860, + batch_norm__861, + batch_norm__862, + batch_norm__863, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_160, + parameter_5, + parameter_4, + parameter_3, + parameter_2, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_160, parameter_2, parameter_3, parameter_4, parameter_5 + + # pd_op.swish: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32) + swish_110 = paddle._C_ops.swish(batch_norm__858) + del batch_norm__858 + + # pd_op.conv2d: (2x68x-1x-1xf32) <- (2x192x-1x-1xf32, 68x192x3x3xf32) + conv2d_161 = paddle._C_ops.conv2d( + swish_110, parameter_1, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_1, swish_110 + + # pd_op.reshape: (1x68x1x1xf32) <- (68xf32, 4xi64) + reshape_19 = paddle._C_ops.reshape(parameter_0, full_int_array_1) + del full_int_array_1, parameter_0 + + # pd_op.add: (2x68x-1x-1xf32) <- (2x68x-1x-1xf32, 1x68x1x1xf32) + add_69 = paddle._C_ops.add(conv2d_161, reshape_19) + del conv2d_161, reshape_19 + + # builtin.combine: ([xi64, xi64, xi64, xi64]) <- (xi64, xi64, xi64, xi64) + combine_18 = [full_1, full_2, full_3, multiply_10] + del full_3 + + # pd_op.stack: (4xi64) <- ([xi64, xi64, xi64, xi64]) + stack_4 = paddle._C_ops.stack(combine_18, 0) + del combine_18 + + # pd_op.reshape: (-1x4x17x-1xf32) <- (2x68x-1x-1xf32, 4xi64) + reshape_20 = paddle._C_ops.reshape(add_69, stack_4) + del add_69, stack_4 + + # pd_op.transpose: (-1x17x-1x4xf32) <- (-1x4x17x-1xf32) + transpose_2 = paddle._C_ops.transpose(reshape_20, [0, 2, 3, 1]) + del reshape_20 + + # pd_op.softmax: (-1x17x-1x4xf32) <- (-1x17x-1x4xf32) + softmax_2 = paddle._C_ops.softmax(transpose_2, 1) + del transpose_2 + + # pd_op.conv2d: (-1x1x-1x4xf32) <- (-1x17x-1x4xf32, 1x17x1x1xf32) + conv2d_162 = paddle._C_ops.conv2d( + softmax_2, parameter_36, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_36, softmax_2 + + # pd_op.squeeze: (-1x-1x4xf32) <- (-1x1x-1x4xf32, 1xi64) + squeeze_2 = paddle._C_ops.squeeze(conv2d_162, full_int_array_9) + del conv2d_162, full_int_array_9 + + # pd_op.sigmoid: (2x4x-1x-1xf32) <- (2x4x-1x-1xf32) + sigmoid_8 = paddle._C_ops.sigmoid(add_67) + del add_67 + + # builtin.combine: ([xi64, xi64, xi64]) <- (xi64, xi64, xi64) + combine_19 = [full_1, full_2, multiply_10] + del full_1, full_2, multiply_10 + + # pd_op.stack: (3xi64) <- ([xi64, xi64, xi64]) + stack_5 = paddle._C_ops.stack(combine_19, 0) + del combine_19 + + # pd_op.reshape: (-1x4x-1xf32) <- (2x4x-1x-1xf32, 3xi64) + reshape_21 = paddle._C_ops.reshape(sigmoid_8, stack_5) + del sigmoid_8, stack_5 + + # pd_op.full: (1xi32) <- () + full_4 = paddle._C_ops.full( + [1], float("-1"), paddle.int32, paddle.core.CPUPlace() + ) + + # builtin.combine: ([-1x4x-1xf32, -1x4x-1xf32, -1x4x-1xf32]) <- (-1x4x-1xf32, -1x4x-1xf32, -1x4x-1xf32) + combine_20 = [reshape_9, reshape_15, reshape_21] + del reshape_15, reshape_21, reshape_9 + + # pd_op.concat: (-1x4x-1xf32) <- ([-1x4x-1xf32, -1x4x-1xf32, -1x4x-1xf32], 1xi32) + concat_0 = paddle._C_ops.concat(combine_20, full_4) + del combine_20, full_4 + + # builtin.combine: ([-1x-1x4xf32, -1x-1x4xf32, -1x-1x4xf32]) <- (-1x-1x4xf32, -1x-1x4xf32, -1x-1x4xf32) + combine_21 = [squeeze_0, squeeze_1, squeeze_2] + del squeeze_0, squeeze_1, squeeze_2 + + # pd_op.concat: (-1x-1x4xf32) <- ([-1x-1x4xf32, -1x-1x4xf32, -1x-1x4xf32], 1xi32) + concat_1 = paddle._C_ops.concat(combine_21, full_0) + del combine_21, full_0 + + return concat_0, concat_1 diff --git a/paddle_samples/PaddleX/PP-YOLOE-L_vehicle/subgraph_5/weight_meta.py b/paddle_samples/PaddleX/PP-YOLOE-L_vehicle/subgraph_5/weight_meta.py new file mode 100644 index 000000000..bf4cf55ae --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE-L_vehicle/subgraph_5/weight_meta.py @@ -0,0 +1,8161 @@ +class Program_weight_tensor_parameter_0: + name = "parameter_0" + shape = [68] + dtype = "float32" + min_val = float("-0.00892735") + max_val = float("0.0359164") + mean = float("8.74861e-08") + std = float("0.00810027") + data = None + + +class Program_weight_tensor_parameter_1: + name = "parameter_1" + shape = [68, 192, 3, 3] + dtype = "float32" + min_val = float("-0.14403") + max_val = float("0.16853") + mean = float("5.68543e-08") + std = float("0.00707932") + data = None + + +class Program_weight_tensor_parameter_2: + name = "parameter_2" + shape = [192] + dtype = "float32" + min_val = float("-0.0740081") + max_val = float("0.252888") + mean = float("0.0629457") + std = float("0.0572415") + data = None + + +class Program_weight_tensor_parameter_3: + name = "parameter_3" + shape = [192] + dtype = "float32" + min_val = float("0.838381") + max_val = float("1.78564") + mean = float("1.29191") + std = float("0.191219") + data = None + + +class Program_weight_tensor_parameter_4: + name = "parameter_4" + shape = [192] + dtype = "float32" + min_val = float("0.000988996") + max_val = float("0.0390925") + mean = float("0.00961489") + std = float("0.00656614") + data = None + + +class Program_weight_tensor_parameter_5: + name = "parameter_5" + shape = [192] + dtype = "float32" + min_val = float("-0.11659") + max_val = float("0.132114") + mean = float("0.00156193") + std = float("0.0387838") + data = None + + +class Program_weight_tensor_parameter_6: + name = "parameter_6" + shape = [192, 192, 1, 1] + dtype = "float32" + min_val = float("-0.060318") + max_val = float("0.0853128") + mean = float("-0.000471342") + std = float("0.00698795") + data = None + + +class Program_weight_tensor_parameter_7: + name = "parameter_7" + shape = [192] + dtype = "float32" + min_val = float("-0.00462196") + max_val = float("0.00603283") + mean = float("4.25491e-05") + std = float("0.00192614") + data = None + + +class Program_weight_tensor_parameter_8: + name = "parameter_8" + shape = [192, 192, 1, 1] + dtype = "float32" + min_val = float("-0.0100888") + max_val = float("0.0113319") + mean = float("-9.49891e-06") + std = float("0.00128433") + data = None + + +class Program_weight_tensor_parameter_9: + name = "parameter_9" + shape = [4] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_10: + name = "parameter_10" + shape = [4, 192, 3, 3] + dtype = "float32" + min_val = float("-0.336461") + max_val = float("0.0527773") + mean = float("-0.0165997") + std = float("0.0458695") + data = None + + +class Program_weight_tensor_parameter_11: + name = "parameter_11" + shape = [192] + dtype = "float32" + min_val = float("-0.446317") + max_val = float("1.5052") + mean = float("0.404532") + std = float("0.335901") + data = None + + +class Program_weight_tensor_parameter_12: + name = "parameter_12" + shape = [192] + dtype = "float32" + min_val = float("0.959521") + max_val = float("2.22919") + mean = float("1.3736") + std = float("0.176912") + data = None + + +class Program_weight_tensor_parameter_13: + name = "parameter_13" + shape = [192] + dtype = "float32" + min_val = float("0.00279508") + max_val = float("23.2612") + mean = float("0.479151") + std = float("2.07157") + data = None + + +class Program_weight_tensor_parameter_14: + name = "parameter_14" + shape = [192] + dtype = "float32" + min_val = float("-0.559818") + max_val = float("2.59203") + mean = float("0.0883731") + std = float("0.322225") + data = None + + +class Program_weight_tensor_parameter_15: + name = "parameter_15" + shape = [192, 192, 1, 1] + dtype = "float32" + min_val = float("-0.70809") + max_val = float("0.443594") + mean = float("0.00156292") + std = float("0.0251271") + data = None + + +class Program_weight_tensor_parameter_16: + name = "parameter_16" + shape = [192] + dtype = "float32" + min_val = float("-0.0114725") + max_val = float("0.0157792") + mean = float("-8.72761e-05") + std = float("0.00276262") + data = None + + +class Program_weight_tensor_parameter_17: + name = "parameter_17" + shape = [192, 192, 1, 1] + dtype = "float32" + min_val = float("-0.0348732") + max_val = float("0.0307751") + mean = float("2.79732e-05") + std = float("0.00181061") + data = None + + +class Program_weight_tensor_parameter_18: + name = "parameter_18" + shape = [68] + dtype = "float32" + min_val = float("-0.00445757") + max_val = float("0.0190964") + mean = float("6.58329e-08") + std = float("0.00445574") + data = None + + +class Program_weight_tensor_parameter_19: + name = "parameter_19" + shape = [68, 384, 3, 3] + dtype = "float32" + min_val = float("-0.0827377") + max_val = float("0.109436") + mean = float("3.68891e-08") + std = float("0.00410645") + data = None + + +class Program_weight_tensor_parameter_20: + name = "parameter_20" + shape = [384] + dtype = "float32" + min_val = float("-0.0151169") + max_val = float("0.107487") + mean = float("0.032917") + std = float("0.0178601") + data = None + + +class Program_weight_tensor_parameter_21: + name = "parameter_21" + shape = [384] + dtype = "float32" + min_val = float("1.00868") + max_val = float("1.29019") + mean = float("1.1456") + std = float("0.0510674") + data = None + + +class Program_weight_tensor_parameter_22: + name = "parameter_22" + shape = [384] + dtype = "float32" + min_val = float("0.000383663") + max_val = float("0.550054") + mean = float("0.0230272") + std = float("0.0486734") + data = None + + +class Program_weight_tensor_parameter_23: + name = "parameter_23" + shape = [384] + dtype = "float32" + min_val = float("-0.106161") + max_val = float("0.123469") + mean = float("-0.010298") + std = float("0.0313141") + data = None + + +class Program_weight_tensor_parameter_24: + name = "parameter_24" + shape = [384, 384, 1, 1] + dtype = "float32" + min_val = float("-0.0462353") + max_val = float("0.0532962") + mean = float("-0.000249771") + std = float("0.00297506") + data = None + + +class Program_weight_tensor_parameter_25: + name = "parameter_25" + shape = [384] + dtype = "float32" + min_val = float("-0.00255366") + max_val = float("0.00300366") + mean = float("9.24283e-05") + std = float("0.00102595") + data = None + + +class Program_weight_tensor_parameter_26: + name = "parameter_26" + shape = [384, 384, 1, 1] + dtype = "float32" + min_val = float("-0.00218811") + max_val = float("0.00368407") + mean = float("2.14446e-05") + std = float("0.000454602") + data = None + + +class Program_weight_tensor_parameter_27: + name = "parameter_27" + shape = [4] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_28: + name = "parameter_28" + shape = [4, 384, 3, 3] + dtype = "float32" + min_val = float("-0.404012") + max_val = float("0.018674") + mean = float("-0.0535936") + std = float("0.0678225") + data = None + + +class Program_weight_tensor_parameter_29: + name = "parameter_29" + shape = [384] + dtype = "float32" + min_val = float("-0.23292") + max_val = float("0.549526") + mean = float("0.280285") + std = float("0.126685") + data = None + + +class Program_weight_tensor_parameter_30: + name = "parameter_30" + shape = [384] + dtype = "float32" + min_val = float("0.992106") + max_val = float("1.5115") + mean = float("1.23501") + std = float("0.0724327") + data = None + + +class Program_weight_tensor_parameter_31: + name = "parameter_31" + shape = [384] + dtype = "float32" + min_val = float("0.00193308") + max_val = float("158.649") + mean = float("2.35102") + std = float("10.1328") + data = None + + +class Program_weight_tensor_parameter_32: + name = "parameter_32" + shape = [384] + dtype = "float32" + min_val = float("-3.49971") + max_val = float("2.32388") + mean = float("0.0203273") + std = float("0.419635") + data = None + + +class Program_weight_tensor_parameter_33: + name = "parameter_33" + shape = [384, 384, 1, 1] + dtype = "float32" + min_val = float("-0.299716") + max_val = float("0.19324") + mean = float("0.000565759") + std = float("0.0141589") + data = None + + +class Program_weight_tensor_parameter_34: + name = "parameter_34" + shape = [384] + dtype = "float32" + min_val = float("-0.00201965") + max_val = float("0.00793739") + mean = float("1.85378e-06") + std = float("0.000924825") + data = None + + +class Program_weight_tensor_parameter_35: + name = "parameter_35" + shape = [384, 384, 1, 1] + dtype = "float32" + min_val = float("-0.0053256") + max_val = float("0.0106358") + mean = float("3.65302e-06") + std = float("0.000513084") + data = None + + +class Program_weight_tensor_parameter_36: + name = "parameter_36" + shape = [1, 17, 1, 1] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_37: + name = "parameter_37" + shape = [68] + dtype = "float32" + min_val = float("-0.00483921") + max_val = float("0.0132204") + mean = float("1.85537e-08") + std = float("0.00434121") + data = None + + +class Program_weight_tensor_parameter_38: + name = "parameter_38" + shape = [68, 768, 3, 3] + dtype = "float32" + min_val = float("-0.033019") + max_val = float("0.0657063") + mean = float("1.06083e-08") + std = float("0.00221797") + data = None + + +class Program_weight_tensor_parameter_39: + name = "parameter_39" + shape = [768] + dtype = "float32" + min_val = float("-0.0161648") + max_val = float("0.0716208") + mean = float("0.0156062") + std = float("0.014137") + data = None + + +class Program_weight_tensor_parameter_40: + name = "parameter_40" + shape = [768] + dtype = "float32" + min_val = float("1.02758") + max_val = float("1.22328") + mean = float("1.09273") + std = float("0.0263012") + data = None + + +class Program_weight_tensor_parameter_41: + name = "parameter_41" + shape = [768] + dtype = "float32" + min_val = float("9.07716e-05") + max_val = float("0.044646") + mean = float("0.00124304") + std = float("0.0020634") + data = None + + +class Program_weight_tensor_parameter_42: + name = "parameter_42" + shape = [768] + dtype = "float32" + min_val = float("-0.110096") + max_val = float("0.0490828") + mean = float("-0.00556926") + std = float("0.0125766") + data = None + + +class Program_weight_tensor_parameter_43: + name = "parameter_43" + shape = [768, 768, 1, 1] + dtype = "float32" + min_val = float("-0.0245815") + max_val = float("0.0259975") + mean = float("-8.56304e-05") + std = float("0.00108959") + data = None + + +class Program_weight_tensor_parameter_44: + name = "parameter_44" + shape = [768] + dtype = "float32" + min_val = float("-0.00228552") + max_val = float("0.00179437") + mean = float("9.01848e-05") + std = float("0.000464165") + data = None + + +class Program_weight_tensor_parameter_45: + name = "parameter_45" + shape = [768, 768, 1, 1] + dtype = "float32" + min_val = float("-0.00190183") + max_val = float("0.00166425") + mean = float("2.80024e-05") + std = float("0.000154694") + data = None + + +class Program_weight_tensor_parameter_46: + name = "parameter_46" + shape = [4] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_47: + name = "parameter_47" + shape = [4, 768, 3, 3] + dtype = "float32" + min_val = float("-0.360602") + max_val = float("0.0321948") + mean = float("-0.017021") + std = float("0.0416566") + data = None + + +class Program_weight_tensor_parameter_48: + name = "parameter_48" + shape = [768] + dtype = "float32" + min_val = float("-0.149733") + max_val = float("0.255597") + mean = float("0.127259") + std = float("0.0547028") + data = None + + +class Program_weight_tensor_parameter_49: + name = "parameter_49" + shape = [768] + dtype = "float32" + min_val = float("1.01591") + max_val = float("1.35046") + mean = float("1.11013") + std = float("0.0353362") + data = None + + +class Program_weight_tensor_parameter_50: + name = "parameter_50" + shape = [768] + dtype = "float32" + min_val = float("0.000190586") + max_val = float("5.78833") + mean = float("0.106439") + std = float("0.43971") + data = None + + +class Program_weight_tensor_parameter_51: + name = "parameter_51" + shape = [768] + dtype = "float32" + min_val = float("-0.732685") + max_val = float("0.378772") + mean = float("-0.0300328") + std = float("0.0991444") + data = None + + +class Program_weight_tensor_parameter_52: + name = "parameter_52" + shape = [768, 768, 1, 1] + dtype = "float32" + min_val = float("-0.0393304") + max_val = float("0.0266957") + mean = float("-0.000475184") + std = float("0.00312029") + data = None + + +class Program_weight_tensor_parameter_53: + name = "parameter_53" + shape = [768] + dtype = "float32" + min_val = float("-0.00365788") + max_val = float("0.00249573") + mean = float("1.83923e-05") + std = float("0.00034543") + data = None + + +class Program_weight_tensor_parameter_54: + name = "parameter_54" + shape = [768, 768, 1, 1] + dtype = "float32" + min_val = float("-0.0129539") + max_val = float("0.0408516") + mean = float("7.14053e-06") + std = float("0.000222292") + data = None + + +class Program_weight_tensor_parameter_55: + name = "parameter_55" + shape = [768] + dtype = "float32" + min_val = float("-0.239693") + max_val = float("0.342023") + mean = float("0.111992") + std = float("0.075942") + data = None + + +class Program_weight_tensor_parameter_56: + name = "parameter_56" + shape = [768] + dtype = "float32" + min_val = float("0.85597") + max_val = float("1.34121") + mean = float("1.0926") + std = float("0.041353") + data = None + + +class Program_weight_tensor_parameter_57: + name = "parameter_57" + shape = [768] + dtype = "float32" + min_val = float("0.00747408") + max_val = float("91.142") + mean = float("1.28785") + std = float("4.31139") + data = None + + +class Program_weight_tensor_parameter_58: + name = "parameter_58" + shape = [768] + dtype = "float32" + min_val = float("-0.940587") + max_val = float("0.533912") + mean = float("-0.0459939") + std = float("0.109863") + data = None + + +class Program_weight_tensor_parameter_59: + name = "parameter_59" + shape = [768, 768, 1, 1] + dtype = "float32" + min_val = float("-0.0494613") + max_val = float("0.0342687") + mean = float("-0.00053839") + std = float("0.00305259") + data = None + + +class Program_weight_tensor_parameter_60: + name = "parameter_60" + shape = [384] + dtype = "float32" + min_val = float("-0.220176") + max_val = float("0.0362122") + mean = float("-0.0290593") + std = float("0.0325532") + data = None + + +class Program_weight_tensor_parameter_61: + name = "parameter_61" + shape = [384] + dtype = "float32" + min_val = float("0.949198") + max_val = float("1.03905") + mean = float("0.985069") + std = float("0.0122127") + data = None + + +class Program_weight_tensor_parameter_62: + name = "parameter_62" + shape = [384] + dtype = "float32" + min_val = float("0.000362336") + max_val = float("5.86749") + mean = float("0.0824461") + std = float("0.456655") + data = None + + +class Program_weight_tensor_parameter_63: + name = "parameter_63" + shape = [384] + dtype = "float32" + min_val = float("-0.0999696") + max_val = float("0.203239") + mean = float("0.00744766") + std = float("0.024564") + data = None + + +class Program_weight_tensor_parameter_64: + name = "parameter_64" + shape = [384, 384, 1, 1] + dtype = "float32" + min_val = float("-0.0202515") + max_val = float("0.0297371") + mean = float("0.000259053") + std = float("0.00196863") + data = None + + +class Program_weight_tensor_parameter_65: + name = "parameter_65" + shape = [384] + dtype = "float32" + min_val = float("-0.220176") + max_val = float("0.0362122") + mean = float("-0.0290593") + std = float("0.0325532") + data = None + + +class Program_weight_tensor_parameter_66: + name = "parameter_66" + shape = [384] + dtype = "float32" + min_val = float("0.853608") + max_val = float("1.12594") + mean = float("1.01957") + std = float("0.0206557") + data = None + + +class Program_weight_tensor_parameter_67: + name = "parameter_67" + shape = [384] + dtype = "float32" + min_val = float("0.00676097") + max_val = float("25.3593") + mean = float("0.532137") + std = float("1.58471") + data = None + + +class Program_weight_tensor_parameter_68: + name = "parameter_68" + shape = [384] + dtype = "float32" + min_val = float("-0.311548") + max_val = float("0.516814") + mean = float("-0.00365134") + std = float("0.0774418") + data = None + + +class Program_weight_tensor_parameter_69: + name = "parameter_69" + shape = [384, 384, 3, 3] + dtype = "float32" + min_val = float("-0.018036") + max_val = float("0.0229965") + mean = float("-1.03648e-05") + std = float("0.00100784") + data = None + + +class Program_weight_tensor_parameter_70: + name = "parameter_70" + shape = [384] + dtype = "float32" + min_val = float("-0.187836") + max_val = float("0.0393232") + mean = float("-0.0497104") + std = float("0.0341181") + data = None + + +class Program_weight_tensor_parameter_71: + name = "parameter_71" + shape = [384] + dtype = "float32" + min_val = float("0.92338") + max_val = float("1.15649") + mean = float("1.01802") + std = float("0.0319672") + data = None + + +class Program_weight_tensor_parameter_72: + name = "parameter_72" + shape = [384] + dtype = "float32" + min_val = float("0.0260268") + max_val = float("9.39199") + mean = float("0.909698") + std = float("1.08737") + data = None + + +class Program_weight_tensor_parameter_73: + name = "parameter_73" + shape = [384] + dtype = "float32" + min_val = float("-0.352188") + max_val = float("0.490548") + mean = float("0.0139095") + std = float("0.119918") + data = None + + +class Program_weight_tensor_parameter_74: + name = "parameter_74" + shape = [384, 384, 3, 3] + dtype = "float32" + min_val = float("-0.0214424") + max_val = float("0.0293422") + mean = float("1.62345e-05") + std = float("0.00113367") + data = None + + +class Program_weight_tensor_parameter_75: + name = "parameter_75" + shape = [384] + dtype = "float32" + min_val = float("-0.136108") + max_val = float("0.0210026") + mean = float("-0.0495464") + std = float("0.0271927") + data = None + + +class Program_weight_tensor_parameter_76: + name = "parameter_76" + shape = [384] + dtype = "float32" + min_val = float("0.94096") + max_val = float("1.03866") + mean = float("0.98639") + std = float("0.0130591") + data = None + + +class Program_weight_tensor_parameter_77: + name = "parameter_77" + shape = [384] + dtype = "float32" + min_val = float("0.00109296") + max_val = float("0.435956") + mean = float("0.0262465") + std = float("0.047939") + data = None + + +class Program_weight_tensor_parameter_78: + name = "parameter_78" + shape = [384] + dtype = "float32" + min_val = float("-0.0984515") + max_val = float("0.0774922") + mean = float("0.00451097") + std = float("0.020161") + data = None + + +class Program_weight_tensor_parameter_79: + name = "parameter_79" + shape = [384, 384, 1, 1] + dtype = "float32" + min_val = float("-0.0187966") + max_val = float("0.015677") + mean = float("0.000104981") + std = float("0.00175477") + data = None + + +class Program_weight_tensor_parameter_80: + name = "parameter_80" + shape = [384] + dtype = "float32" + min_val = float("-0.136108") + max_val = float("0.0210026") + mean = float("-0.0495464") + std = float("0.0271927") + data = None + + +class Program_weight_tensor_parameter_81: + name = "parameter_81" + shape = [384] + dtype = "float32" + min_val = float("0.966771") + max_val = float("1.104") + mean = float("1.01877") + std = float("0.0185739") + data = None + + +class Program_weight_tensor_parameter_82: + name = "parameter_82" + shape = [384] + dtype = "float32" + min_val = float("0.006631") + max_val = float("4.5025") + mean = float("0.219574") + std = float("0.441772") + data = None + + +class Program_weight_tensor_parameter_83: + name = "parameter_83" + shape = [384] + dtype = "float32" + min_val = float("-0.29354") + max_val = float("0.27892") + mean = float("-0.00342736") + std = float("0.0628133") + data = None + + +class Program_weight_tensor_parameter_84: + name = "parameter_84" + shape = [384, 384, 3, 3] + dtype = "float32" + min_val = float("-0.0202651") + max_val = float("0.0277646") + mean = float("-7.89595e-06") + std = float("0.000982729") + data = None + + +class Program_weight_tensor_parameter_85: + name = "parameter_85" + shape = [384] + dtype = "float32" + min_val = float("-0.148346") + max_val = float("0.0258169") + mean = float("-0.0508039") + std = float("0.0264955") + data = None + + +class Program_weight_tensor_parameter_86: + name = "parameter_86" + shape = [384] + dtype = "float32" + min_val = float("0.938732") + max_val = float("1.11539") + mean = float("1.01484") + std = float("0.0355445") + data = None + + +class Program_weight_tensor_parameter_87: + name = "parameter_87" + shape = [384] + dtype = "float32" + min_val = float("0.00583479") + max_val = float("7.27592") + mean = float("0.403401") + std = float("0.610221") + data = None + + +class Program_weight_tensor_parameter_88: + name = "parameter_88" + shape = [384] + dtype = "float32" + min_val = float("-0.416572") + max_val = float("0.519537") + mean = float("0.0156239") + std = float("0.100455") + data = None + + +class Program_weight_tensor_parameter_89: + name = "parameter_89" + shape = [384, 384, 3, 3] + dtype = "float32" + min_val = float("-0.0194111") + max_val = float("0.027243") + mean = float("1.68141e-05") + std = float("0.00118306") + data = None + + +class Program_weight_tensor_parameter_90: + name = "parameter_90" + shape = [384] + dtype = "float32" + min_val = float("-0.153224") + max_val = float("0.0447664") + mean = float("-0.0556235") + std = float("0.0277297") + data = None + + +class Program_weight_tensor_parameter_91: + name = "parameter_91" + shape = [384] + dtype = "float32" + min_val = float("0.932483") + max_val = float("1.05427") + mean = float("0.984506") + std = float("0.0159277") + data = None + + +class Program_weight_tensor_parameter_92: + name = "parameter_92" + shape = [384] + dtype = "float32" + min_val = float("0.000534063") + max_val = float("0.217156") + mean = float("0.0168761") + std = float("0.0268172") + data = None + + +class Program_weight_tensor_parameter_93: + name = "parameter_93" + shape = [384] + dtype = "float32" + min_val = float("-0.0656613") + max_val = float("0.0732472") + mean = float("-0.00097402") + std = float("0.0152174") + data = None + + +class Program_weight_tensor_parameter_94: + name = "parameter_94" + shape = [384, 384, 1, 1] + dtype = "float32" + min_val = float("-0.0201466") + max_val = float("0.0213735") + mean = float("-4.74338e-05") + std = float("0.00168772") + data = None + + +class Program_weight_tensor_parameter_95: + name = "parameter_95" + shape = [384] + dtype = "float32" + min_val = float("-0.153224") + max_val = float("0.0447664") + mean = float("-0.0556235") + std = float("0.0277297") + data = None + + +class Program_weight_tensor_parameter_96: + name = "parameter_96" + shape = [384] + dtype = "float32" + min_val = float("0.963627") + max_val = float("1.13194") + mean = float("1.02226") + std = float("0.0263426") + data = None + + +class Program_weight_tensor_parameter_97: + name = "parameter_97" + shape = [384] + dtype = "float32" + min_val = float("0.0039872") + max_val = float("2.61582") + mean = float("0.128734") + std = float("0.223227") + data = None + + +class Program_weight_tensor_parameter_98: + name = "parameter_98" + shape = [384] + dtype = "float32" + min_val = float("-0.158487") + max_val = float("0.16455") + mean = float("-0.00243845") + std = float("0.0415247") + data = None + + +class Program_weight_tensor_parameter_99: + name = "parameter_99" + shape = [384, 384, 3, 3] + dtype = "float32" + min_val = float("-0.0197083") + max_val = float("0.0251654") + mean = float("-8.82344e-06") + std = float("0.00104335") + data = None + + +class Program_weight_tensor_parameter_100: + name = "parameter_100" + shape = [384] + dtype = "float32" + min_val = float("-0.161053") + max_val = float("0.0517202") + mean = float("-0.0534173") + std = float("0.0280919") + data = None + + +class Program_weight_tensor_parameter_101: + name = "parameter_101" + shape = [384] + dtype = "float32" + min_val = float("0.918049") + max_val = float("1.15232") + mean = float("1.01552") + std = float("0.0359069") + data = None + + +class Program_weight_tensor_parameter_102: + name = "parameter_102" + shape = [384] + dtype = "float32" + min_val = float("0.00751172") + max_val = float("4.27019") + mean = float("0.219163") + std = float("0.336004") + data = None + + +class Program_weight_tensor_parameter_103: + name = "parameter_103" + shape = [384] + dtype = "float32" + min_val = float("-0.172824") + max_val = float("0.238297") + mean = float("0.000740969") + std = float("0.0528198") + data = None + + +class Program_weight_tensor_parameter_104: + name = "parameter_104" + shape = [384, 384, 3, 3] + dtype = "float32" + min_val = float("-0.0223627") + max_val = float("0.0225632") + mean = float("4.76008e-06") + std = float("0.00121998") + data = None + + +class Program_weight_tensor_parameter_105: + name = "parameter_105" + shape = [384] + dtype = "float32" + min_val = float("-0.10183") + max_val = float("0.0571021") + mean = float("-0.0401848") + std = float("0.0228972") + data = None + + +class Program_weight_tensor_parameter_106: + name = "parameter_106" + shape = [384] + dtype = "float32" + min_val = float("0.964009") + max_val = float("1.11744") + mean = float("1.01332") + std = float("0.0243487") + data = None + + +class Program_weight_tensor_parameter_107: + name = "parameter_107" + shape = [384] + dtype = "float32" + min_val = float("0.0068207") + max_val = float("0.57439") + mean = float("0.0795897") + std = float("0.0829282") + data = None + + +class Program_weight_tensor_parameter_108: + name = "parameter_108" + shape = [384] + dtype = "float32" + min_val = float("-0.0720079") + max_val = float("0.133674") + mean = float("0.00403881") + std = float("0.0281469") + data = None + + +class Program_weight_tensor_parameter_109: + name = "parameter_109" + shape = [384, 1152, 1, 1] + dtype = "float32" + min_val = float("-0.040986") + max_val = float("0.0442725") + mean = float("2.35921e-05") + std = float("0.00180591") + data = None + + +class Program_weight_tensor_parameter_110: + name = "parameter_110" + shape = [384] + dtype = "float32" + min_val = float("-0.0710209") + max_val = float("0.0171363") + mean = float("-0.0176361") + std = float("0.0126887") + data = None + + +class Program_weight_tensor_parameter_111: + name = "parameter_111" + shape = [384] + dtype = "float32" + min_val = float("0.915116") + max_val = float("1.10226") + mean = float("1.00939") + std = float("0.0165555") + data = None + + +class Program_weight_tensor_parameter_112: + name = "parameter_112" + shape = [384] + dtype = "float32" + min_val = float("0.00101995") + max_val = float("1.60525") + mean = float("0.0940799") + std = float("0.172003") + data = None + + +class Program_weight_tensor_parameter_113: + name = "parameter_113" + shape = [384] + dtype = "float32" + min_val = float("-0.106481") + max_val = float("0.109302") + mean = float("0.00224785") + std = float("0.0268252") + data = None + + +class Program_weight_tensor_parameter_114: + name = "parameter_114" + shape = [384, 1152, 1, 1] + dtype = "float32" + min_val = float("-0.0370576") + max_val = float("0.0304661") + mean = float("3.8777e-05") + std = float("0.00152873") + data = None + + +class Program_weight_tensor_parameter_115: + name = "parameter_115" + shape = [384] + dtype = "float32" + min_val = float("-0.0787177") + max_val = float("0.00338451") + mean = float("-0.025285") + std = float("0.0143999") + data = None + + +class Program_weight_tensor_parameter_116: + name = "parameter_116" + shape = [384] + dtype = "float32" + min_val = float("0.98141") + max_val = float("1.12417") + mean = float("1.02758") + std = float("0.0212745") + data = None + + +class Program_weight_tensor_parameter_117: + name = "parameter_117" + shape = [384] + dtype = "float32" + min_val = float("0.00212422") + max_val = float("3.61176") + mean = float("0.161326") + std = float("0.310472") + data = None + + +class Program_weight_tensor_parameter_118: + name = "parameter_118" + shape = [384] + dtype = "float32" + min_val = float("-0.339877") + max_val = float("0.322401") + mean = float("-0.00463237") + std = float("0.0920204") + data = None + + +class Program_weight_tensor_parameter_119: + name = "parameter_119" + shape = [384, 384, 3, 3] + dtype = "float32" + min_val = float("-0.0316566") + max_val = float("0.0261297") + mean = float("-3.55618e-06") + std = float("0.000915228") + data = None + + +class Program_weight_tensor_parameter_120: + name = "parameter_120" + shape = [384] + dtype = "float32" + min_val = float("-0.413396") + max_val = float("0.666571") + mean = float("0.255151") + std = float("0.15886") + data = None + + +class Program_weight_tensor_parameter_121: + name = "parameter_121" + shape = [384] + dtype = "float32" + min_val = float("0.924159") + max_val = float("1.66721") + mean = float("1.17398") + std = float("0.0907099") + data = None + + +class Program_weight_tensor_parameter_122: + name = "parameter_122" + shape = [384] + dtype = "float32" + min_val = float("0.0144254") + max_val = float("122.174") + mean = float("3.89996") + std = float("13.8836") + data = None + + +class Program_weight_tensor_parameter_123: + name = "parameter_123" + shape = [384] + dtype = "float32" + min_val = float("-0.852841") + max_val = float("0.566699") + mean = float("-0.0134303") + std = float("0.139115") + data = None + + +class Program_weight_tensor_parameter_124: + name = "parameter_124" + shape = [384, 384, 1, 1] + dtype = "float32" + min_val = float("-0.136465") + max_val = float("0.10202") + mean = float("-0.000553339") + std = float("0.00889327") + data = None + + +class Program_weight_tensor_parameter_125: + name = "parameter_125" + shape = [192] + dtype = "float32" + min_val = float("-0.257665") + max_val = float("0.0752437") + mean = float("-0.0387492") + std = float("0.0605554") + data = None + + +class Program_weight_tensor_parameter_126: + name = "parameter_126" + shape = [192] + dtype = "float32" + min_val = float("0.912244") + max_val = float("1.05536") + mean = float("0.97033") + std = float("0.0254714") + data = None + + +class Program_weight_tensor_parameter_127: + name = "parameter_127" + shape = [192] + dtype = "float32" + min_val = float("0.000393611") + max_val = float("2.87493") + mean = float("0.111455") + std = float("0.279032") + data = None + + +class Program_weight_tensor_parameter_128: + name = "parameter_128" + shape = [192] + dtype = "float32" + min_val = float("-0.0556163") + max_val = float("0.0292508") + mean = float("-0.00194405") + std = float("0.0112362") + data = None + + +class Program_weight_tensor_parameter_129: + name = "parameter_129" + shape = [192, 192, 1, 1] + dtype = "float32" + min_val = float("-0.0439425") + max_val = float("0.0410791") + mean = float("-0.000352772") + std = float("0.00463785") + data = None + + +class Program_weight_tensor_parameter_130: + name = "parameter_130" + shape = [192] + dtype = "float32" + min_val = float("-0.257665") + max_val = float("0.0752437") + mean = float("-0.0387492") + std = float("0.0605554") + data = None + + +class Program_weight_tensor_parameter_131: + name = "parameter_131" + shape = [192] + dtype = "float32" + min_val = float("0.673684") + max_val = float("1.16438") + mean = float("1.02596") + std = float("0.0488779") + data = None + + +class Program_weight_tensor_parameter_132: + name = "parameter_132" + shape = [192] + dtype = "float32" + min_val = float("0.0133915") + max_val = float("25.98") + mean = float("1.07065") + std = float("2.9016") + data = None + + +class Program_weight_tensor_parameter_133: + name = "parameter_133" + shape = [192] + dtype = "float32" + min_val = float("-0.180444") + max_val = float("0.125675") + mean = float("-0.00551328") + std = float("0.0357889") + data = None + + +class Program_weight_tensor_parameter_134: + name = "parameter_134" + shape = [192, 192, 3, 3] + dtype = "float32" + min_val = float("-0.031421") + max_val = float("0.0394767") + mean = float("-0.000121284") + std = float("0.0024869") + data = None + + +class Program_weight_tensor_parameter_135: + name = "parameter_135" + shape = [192] + dtype = "float32" + min_val = float("-0.255521") + max_val = float("0.0954508") + mean = float("-0.0805929") + std = float("0.0593622") + data = None + + +class Program_weight_tensor_parameter_136: + name = "parameter_136" + shape = [192] + dtype = "float32" + min_val = float("0.856802") + max_val = float("1.31575") + mean = float("1.0158") + std = float("0.0628707") + data = None + + +class Program_weight_tensor_parameter_137: + name = "parameter_137" + shape = [192] + dtype = "float32" + min_val = float("0.0279533") + max_val = float("27.6276") + mean = float("1.05945") + std = float("2.31666") + data = None + + +class Program_weight_tensor_parameter_138: + name = "parameter_138" + shape = [192] + dtype = "float32" + min_val = float("-0.134847") + max_val = float("0.218809") + mean = float("0.00283619") + std = float("0.0445478") + data = None + + +class Program_weight_tensor_parameter_139: + name = "parameter_139" + shape = [192, 192, 3, 3] + dtype = "float32" + min_val = float("-0.0375382") + max_val = float("0.0517003") + mean = float("-3.45177e-05") + std = float("0.0026056") + data = None + + +class Program_weight_tensor_parameter_140: + name = "parameter_140" + shape = [192] + dtype = "float32" + min_val = float("-0.21883") + max_val = float("0.0413199") + mean = float("-0.100393") + std = float("0.0464424") + data = None + + +class Program_weight_tensor_parameter_141: + name = "parameter_141" + shape = [192] + dtype = "float32" + min_val = float("0.8901") + max_val = float("1.08415") + mean = float("0.970219") + std = float("0.0268324") + data = None + + +class Program_weight_tensor_parameter_142: + name = "parameter_142" + shape = [192] + dtype = "float32" + min_val = float("0.000612437") + max_val = float("0.233253") + mean = float("0.0124722") + std = float("0.0250989") + data = None + + +class Program_weight_tensor_parameter_143: + name = "parameter_143" + shape = [192] + dtype = "float32" + min_val = float("-0.0253723") + max_val = float("0.00984091") + mean = float("-0.0014133") + std = float("0.00517924") + data = None + + +class Program_weight_tensor_parameter_144: + name = "parameter_144" + shape = [192, 192, 1, 1] + dtype = "float32" + min_val = float("-0.0375517") + max_val = float("0.033268") + mean = float("-0.000569872") + std = float("0.00362479") + data = None + + +class Program_weight_tensor_parameter_145: + name = "parameter_145" + shape = [192] + dtype = "float32" + min_val = float("-0.21883") + max_val = float("0.0413199") + mean = float("-0.100393") + std = float("0.0464424") + data = None + + +class Program_weight_tensor_parameter_146: + name = "parameter_146" + shape = [192] + dtype = "float32" + min_val = float("0.930194") + max_val = float("1.13524") + mean = float("1.02396") + std = float("0.0379187") + data = None + + +class Program_weight_tensor_parameter_147: + name = "parameter_147" + shape = [192] + dtype = "float32" + min_val = float("0.00801575") + max_val = float("1.06604") + mean = float("0.0984606") + std = float("0.143442") + data = None + + +class Program_weight_tensor_parameter_148: + name = "parameter_148" + shape = [192] + dtype = "float32" + min_val = float("-0.0705495") + max_val = float("0.024401") + mean = float("-0.00514406") + std = float("0.0149741") + data = None + + +class Program_weight_tensor_parameter_149: + name = "parameter_149" + shape = [192, 192, 3, 3] + dtype = "float32" + min_val = float("-0.0383657") + max_val = float("0.0467087") + mean = float("-0.000148064") + std = float("0.00219098") + data = None + + +class Program_weight_tensor_parameter_150: + name = "parameter_150" + shape = [192] + dtype = "float32" + min_val = float("-0.230449") + max_val = float("0.0107375") + mean = float("-0.106032") + std = float("0.0515695") + data = None + + +class Program_weight_tensor_parameter_151: + name = "parameter_151" + shape = [192] + dtype = "float32" + min_val = float("0.866584") + max_val = float("1.19769") + mean = float("1.0176") + std = float("0.0616183") + data = None + + +class Program_weight_tensor_parameter_152: + name = "parameter_152" + shape = [192] + dtype = "float32" + min_val = float("0.0189783") + max_val = float("2.75021") + mean = float("0.20213") + std = float("0.25366") + data = None + + +class Program_weight_tensor_parameter_153: + name = "parameter_153" + shape = [192] + dtype = "float32" + min_val = float("-0.0862401") + max_val = float("0.0391613") + mean = float("-0.00692022") + std = float("0.0188185") + data = None + + +class Program_weight_tensor_parameter_154: + name = "parameter_154" + shape = [192, 192, 3, 3] + dtype = "float32" + min_val = float("-0.0440031") + max_val = float("0.0615391") + mean = float("-0.000144339") + std = float("0.00244733") + data = None + + +class Program_weight_tensor_parameter_155: + name = "parameter_155" + shape = [192] + dtype = "float32" + min_val = float("-0.331751") + max_val = float("0.0593176") + mean = float("-0.122839") + std = float("0.0596387") + data = None + + +class Program_weight_tensor_parameter_156: + name = "parameter_156" + shape = [192] + dtype = "float32" + min_val = float("0.864681") + max_val = float("1.08356") + mean = float("0.967237") + std = float("0.0294572") + data = None + + +class Program_weight_tensor_parameter_157: + name = "parameter_157" + shape = [192] + dtype = "float32" + min_val = float("0.000340173") + max_val = float("0.0775148") + mean = float("0.00814505") + std = float("0.0093693") + data = None + + +class Program_weight_tensor_parameter_158: + name = "parameter_158" + shape = [192] + dtype = "float32" + min_val = float("-0.0142046") + max_val = float("0.0108901") + mean = float("-0.000354694") + std = float("0.00406659") + data = None + + +class Program_weight_tensor_parameter_159: + name = "parameter_159" + shape = [192, 192, 1, 1] + dtype = "float32" + min_val = float("-0.0335535") + max_val = float("0.0763086") + mean = float("-0.000457842") + std = float("0.00358968") + data = None + + +class Program_weight_tensor_parameter_160: + name = "parameter_160" + shape = [192] + dtype = "float32" + min_val = float("-0.331751") + max_val = float("0.0593176") + mean = float("-0.122839") + std = float("0.0596387") + data = None + + +class Program_weight_tensor_parameter_161: + name = "parameter_161" + shape = [192] + dtype = "float32" + min_val = float("0.930627") + max_val = float("1.13713") + mean = float("1.02228") + std = float("0.0316434") + data = None + + +class Program_weight_tensor_parameter_162: + name = "parameter_162" + shape = [192] + dtype = "float32" + min_val = float("0.00494727") + max_val = float("1.02902") + mean = float("0.085425") + std = float("0.136729") + data = None + + +class Program_weight_tensor_parameter_163: + name = "parameter_163" + shape = [192] + dtype = "float32" + min_val = float("-0.0599128") + max_val = float("0.0420675") + mean = float("-0.00217438") + std = float("0.0152992") + data = None + + +class Program_weight_tensor_parameter_164: + name = "parameter_164" + shape = [192, 192, 3, 3] + dtype = "float32" + min_val = float("-0.0370746") + max_val = float("0.0560801") + mean = float("-0.000137326") + std = float("0.00230186") + data = None + + +class Program_weight_tensor_parameter_165: + name = "parameter_165" + shape = [192] + dtype = "float32" + min_val = float("-0.348258") + max_val = float("0.134414") + mean = float("-0.132516") + std = float("0.0683607") + data = None + + +class Program_weight_tensor_parameter_166: + name = "parameter_166" + shape = [192] + dtype = "float32" + min_val = float("0.882276") + max_val = float("1.33254") + mean = float("1.01699") + std = float("0.066298") + data = None + + +class Program_weight_tensor_parameter_167: + name = "parameter_167" + shape = [192] + dtype = "float32" + min_val = float("0.00563114") + max_val = float("2.12348") + mean = float("0.105603") + std = float("0.186094") + data = None + + +class Program_weight_tensor_parameter_168: + name = "parameter_168" + shape = [192] + dtype = "float32" + min_val = float("-0.0852457") + max_val = float("0.107334") + mean = float("-0.00720781") + std = float("0.0193172") + data = None + + +class Program_weight_tensor_parameter_169: + name = "parameter_169" + shape = [192, 192, 3, 3] + dtype = "float32" + min_val = float("-0.0441365") + max_val = float("0.0884734") + mean = float("-8.53865e-05") + std = float("0.00277581") + data = None + + +class Program_weight_tensor_parameter_170: + name = "parameter_170" + shape = [192] + dtype = "float32" + min_val = float("-0.248486") + max_val = float("0.0638204") + mean = float("-0.0972066") + std = float("0.0449139") + data = None + + +class Program_weight_tensor_parameter_171: + name = "parameter_171" + shape = [192] + dtype = "float32" + min_val = float("0.916404") + max_val = float("1.2336") + mean = float("1.01803") + std = float("0.0458357") + data = None + + +class Program_weight_tensor_parameter_172: + name = "parameter_172" + shape = [192] + dtype = "float32" + min_val = float("0.00285938") + max_val = float("1.25761") + mean = float("0.0895332") + std = float("0.162617") + data = None + + +class Program_weight_tensor_parameter_173: + name = "parameter_173" + shape = [192] + dtype = "float32" + min_val = float("-0.096256") + max_val = float("0.113259") + mean = float("-0.00855981") + std = float("0.0239588") + data = None + + +class Program_weight_tensor_parameter_174: + name = "parameter_174" + shape = [192, 576, 1, 1] + dtype = "float32" + min_val = float("-0.054491") + max_val = float("0.0593654") + mean = float("-0.000210848") + std = float("0.00429447") + data = None + + +class Program_weight_tensor_parameter_175: + name = "parameter_175" + shape = [192] + dtype = "float32" + min_val = float("-0.165888") + max_val = float("0.0407389") + mean = float("-0.0331057") + std = float("0.0303175") + data = None + + +class Program_weight_tensor_parameter_176: + name = "parameter_176" + shape = [192] + dtype = "float32" + min_val = float("0.914858") + max_val = float("1.29697") + mean = float("1.00082") + std = float("0.0391908") + data = None + + +class Program_weight_tensor_parameter_177: + name = "parameter_177" + shape = [192] + dtype = "float32" + min_val = float("0.00305941") + max_val = float("1.43751") + mean = float("0.124248") + std = float("0.190205") + data = None + + +class Program_weight_tensor_parameter_178: + name = "parameter_178" + shape = [192] + dtype = "float32" + min_val = float("-0.100544") + max_val = float("0.127463") + mean = float("0.00148309") + std = float("0.0308809") + data = None + + +class Program_weight_tensor_parameter_179: + name = "parameter_179" + shape = [192, 576, 1, 1] + dtype = "float32" + min_val = float("-0.0567269") + max_val = float("0.0677656") + mean = float("6.1846e-05") + std = float("0.00367772") + data = None + + +class Program_weight_tensor_parameter_180: + name = "parameter_180" + shape = [192] + dtype = "float32" + min_val = float("-0.15598") + max_val = float("0.0109365") + mean = float("-0.0537461") + std = float("0.0309099") + data = None + + +class Program_weight_tensor_parameter_181: + name = "parameter_181" + shape = [192] + dtype = "float32" + min_val = float("0.854102") + max_val = float("1.17499") + mean = float("1.00855") + std = float("0.0375922") + data = None + + +class Program_weight_tensor_parameter_182: + name = "parameter_182" + shape = [192] + dtype = "float32" + min_val = float("0.0177937") + max_val = float("2.32847") + mean = float("0.20173") + std = float("0.23775") + data = None + + +class Program_weight_tensor_parameter_183: + name = "parameter_183" + shape = [192] + dtype = "float32" + min_val = float("-0.582113") + max_val = float("0.599466") + mean = float("-0.0228734") + std = float("0.194881") + data = None + + +class Program_weight_tensor_parameter_184: + name = "parameter_184" + shape = [192, 192, 3, 3] + dtype = "float32" + min_val = float("-0.0460401") + max_val = float("0.0411278") + mean = float("-2.83696e-05") + std = float("0.00233677") + data = None + + +class Program_weight_tensor_parameter_185: + name = "parameter_185" + shape = [192] + dtype = "float32" + min_val = float("-0.727151") + max_val = float("1.77126") + mean = float("0.372279") + std = float("0.431884") + data = None + + +class Program_weight_tensor_parameter_186: + name = "parameter_186" + shape = [192] + dtype = "float32" + min_val = float("0.643738") + max_val = float("1.70973") + mean = float("1.1633") + std = float("0.19941") + data = None + + +class Program_weight_tensor_parameter_187: + name = "parameter_187" + shape = [192] + dtype = "float32" + min_val = float("0.0175227") + max_val = float("24.367") + mean = float("0.996291") + std = float("2.34855") + data = None + + +class Program_weight_tensor_parameter_188: + name = "parameter_188" + shape = [192] + dtype = "float32" + min_val = float("-0.649941") + max_val = float("0.526551") + mean = float("0.0190609") + std = float("0.138321") + data = None + + +class Program_weight_tensor_parameter_189: + name = "parameter_189" + shape = [192, 192, 1, 1] + dtype = "float32" + min_val = float("-0.222576") + max_val = float("0.181966") + mean = float("0.000145706") + std = float("0.0205848") + data = None + + +class Program_weight_tensor_parameter_190: + name = "parameter_190" + shape = [96] + dtype = "float32" + min_val = float("-0.646041") + max_val = float("0.287621") + mean = float("-0.0371052") + std = float("0.203701") + data = None + + +class Program_weight_tensor_parameter_191: + name = "parameter_191" + shape = [96] + dtype = "float32" + min_val = float("0.729587") + max_val = float("1.32793") + mean = float("0.928081") + std = float("0.0901068") + data = None + + +class Program_weight_tensor_parameter_192: + name = "parameter_192" + shape = [96] + dtype = "float32" + min_val = float("0.00961121") + max_val = float("5.95736") + mean = float("0.233741") + std = float("0.717086") + data = None + + +class Program_weight_tensor_parameter_193: + name = "parameter_193" + shape = [96] + dtype = "float32" + min_val = float("-0.0755287") + max_val = float("0.0554683") + mean = float("0.00113738") + std = float("0.0240865") + data = None + + +class Program_weight_tensor_parameter_194: + name = "parameter_194" + shape = [96, 96, 1, 1] + dtype = "float32" + min_val = float("-0.538395") + max_val = float("0.204203") + mean = float("-0.00127837") + std = float("0.021114") + data = None + + +class Program_weight_tensor_parameter_195: + name = "parameter_195" + shape = [96] + dtype = "float32" + min_val = float("-0.646041") + max_val = float("0.287621") + mean = float("-0.0371052") + std = float("0.203701") + data = None + + +class Program_weight_tensor_parameter_196: + name = "parameter_196" + shape = [96] + dtype = "float32" + min_val = float("0.482104") + max_val = float("1.39863") + mean = float("1.04313") + std = float("0.135699") + data = None + + +class Program_weight_tensor_parameter_197: + name = "parameter_197" + shape = [96] + dtype = "float32" + min_val = float("0.0457488") + max_val = float("30.6283") + mean = float("1.69124") + std = float("4.42624") + data = None + + +class Program_weight_tensor_parameter_198: + name = "parameter_198" + shape = [96] + dtype = "float32" + min_val = float("-0.384559") + max_val = float("0.181016") + mean = float("-0.0210207") + std = float("0.0873941") + data = None + + +class Program_weight_tensor_parameter_199: + name = "parameter_199" + shape = [96, 96, 3, 3] + dtype = "float32" + min_val = float("-0.247075") + max_val = float("0.10887") + mean = float("-0.000316918") + std = float("0.00915013") + data = None + + +class Program_weight_tensor_parameter_200: + name = "parameter_200" + shape = [96] + dtype = "float32" + min_val = float("-0.806157") + max_val = float("0.642647") + mean = float("-0.135704") + std = float("0.223383") + data = None + + +class Program_weight_tensor_parameter_201: + name = "parameter_201" + shape = [96] + dtype = "float32" + min_val = float("0.498225") + max_val = float("1.52038") + mean = float("0.986561") + std = float("0.14034") + data = None + + +class Program_weight_tensor_parameter_202: + name = "parameter_202" + shape = [96] + dtype = "float32" + min_val = float("0.133909") + max_val = float("27.5573") + mean = float("1.63725") + std = float("2.96894") + data = None + + +class Program_weight_tensor_parameter_203: + name = "parameter_203" + shape = [96] + dtype = "float32" + min_val = float("-0.14425") + max_val = float("0.110106") + mean = float("0.00161153") + std = float("0.0553599") + data = None + + +class Program_weight_tensor_parameter_204: + name = "parameter_204" + shape = [96, 96, 3, 3] + dtype = "float32" + min_val = float("-0.217565") + max_val = float("0.114085") + mean = float("-0.000489408") + std = float("0.00910554") + data = None + + +class Program_weight_tensor_parameter_205: + name = "parameter_205" + shape = [96] + dtype = "float32" + min_val = float("-0.376161") + max_val = float("0.21441") + mean = float("-0.182742") + std = float("0.131052") + data = None + + +class Program_weight_tensor_parameter_206: + name = "parameter_206" + shape = [96] + dtype = "float32" + min_val = float("0.657588") + max_val = float("1.16753") + mean = float("0.867648") + std = float("0.0697505") + data = None + + +class Program_weight_tensor_parameter_207: + name = "parameter_207" + shape = [96] + dtype = "float32" + min_val = float("0.00284137") + max_val = float("1.11982") + mean = float("0.0593209") + std = float("0.123554") + data = None + + +class Program_weight_tensor_parameter_208: + name = "parameter_208" + shape = [96] + dtype = "float32" + min_val = float("-0.0657001") + max_val = float("0.0660294") + mean = float("-0.0085817") + std = float("0.0166625") + data = None + + +class Program_weight_tensor_parameter_209: + name = "parameter_209" + shape = [96, 96, 1, 1] + dtype = "float32" + min_val = float("-0.112441") + max_val = float("0.145551") + mean = float("-0.00185134") + std = float("0.0132855") + data = None + + +class Program_weight_tensor_parameter_210: + name = "parameter_210" + shape = [96] + dtype = "float32" + min_val = float("-0.376161") + max_val = float("0.21441") + mean = float("-0.182742") + std = float("0.131052") + data = None + + +class Program_weight_tensor_parameter_211: + name = "parameter_211" + shape = [96] + dtype = "float32" + min_val = float("0.794636") + max_val = float("1.29933") + mean = float("1.01044") + std = float("0.0818406") + data = None + + +class Program_weight_tensor_parameter_212: + name = "parameter_212" + shape = [96] + dtype = "float32" + min_val = float("0.0372437") + max_val = float("9.07593") + mean = float("0.686368") + std = float("1.14539") + data = None + + +class Program_weight_tensor_parameter_213: + name = "parameter_213" + shape = [96] + dtype = "float32" + min_val = float("-0.192429") + max_val = float("0.242172") + mean = float("-0.0113233") + std = float("0.0649532") + data = None + + +class Program_weight_tensor_parameter_214: + name = "parameter_214" + shape = [96, 96, 3, 3] + dtype = "float32" + min_val = float("-0.0724886") + max_val = float("0.0676065") + mean = float("-0.000591651") + std = float("0.00719272") + data = None + + +class Program_weight_tensor_parameter_215: + name = "parameter_215" + shape = [96] + dtype = "float32" + min_val = float("-0.504724") + max_val = float("0.310511") + mean = float("-0.192469") + std = float("0.162887") + data = None + + +class Program_weight_tensor_parameter_216: + name = "parameter_216" + shape = [96] + dtype = "float32" + min_val = float("0.729078") + max_val = float("1.32546") + mean = float("0.944974") + std = float("0.106132") + data = None + + +class Program_weight_tensor_parameter_217: + name = "parameter_217" + shape = [96] + dtype = "float32" + min_val = float("0.0420698") + max_val = float("6.71555") + mean = float("1.0677") + std = float("1.20063") + data = None + + +class Program_weight_tensor_parameter_218: + name = "parameter_218" + shape = [96] + dtype = "float32" + min_val = float("-0.126845") + max_val = float("0.134215") + mean = float("0.0195531") + std = float("0.0565244") + data = None + + +class Program_weight_tensor_parameter_219: + name = "parameter_219" + shape = [96, 96, 3, 3] + dtype = "float32" + min_val = float("-0.086667") + max_val = float("0.0891793") + mean = float("-0.000463108") + std = float("0.00828012") + data = None + + +class Program_weight_tensor_parameter_220: + name = "parameter_220" + shape = [96] + dtype = "float32" + min_val = float("-0.603289") + max_val = float("0.0867803") + mean = float("-0.234451") + std = float("0.142047") + data = None + + +class Program_weight_tensor_parameter_221: + name = "parameter_221" + shape = [96] + dtype = "float32" + min_val = float("0.704426") + max_val = float("1.02604") + mean = float("0.909068") + std = float("0.0649952") + data = None + + +class Program_weight_tensor_parameter_222: + name = "parameter_222" + shape = [96] + dtype = "float32" + min_val = float("0.001266") + max_val = float("1.28419") + mean = float("0.0888219") + std = float("0.188276") + data = None + + +class Program_weight_tensor_parameter_223: + name = "parameter_223" + shape = [96] + dtype = "float32" + min_val = float("-0.0485395") + max_val = float("0.0643204") + mean = float("-0.00403548") + std = float("0.0232515") + data = None + + +class Program_weight_tensor_parameter_224: + name = "parameter_224" + shape = [96, 96, 1, 1] + dtype = "float32" + min_val = float("-0.0943453") + max_val = float("0.128335") + mean = float("-0.00194288") + std = float("0.0140302") + data = None + + +class Program_weight_tensor_parameter_225: + name = "parameter_225" + shape = [96] + dtype = "float32" + min_val = float("-0.603289") + max_val = float("0.0867803") + mean = float("-0.234451") + std = float("0.142047") + data = None + + +class Program_weight_tensor_parameter_226: + name = "parameter_226" + shape = [96] + dtype = "float32" + min_val = float("0.622021") + max_val = float("1.21586") + mean = float("0.958698") + std = float("0.106219") + data = None + + +class Program_weight_tensor_parameter_227: + name = "parameter_227" + shape = [96] + dtype = "float32" + min_val = float("0.00636808") + max_val = float("6.96019") + mean = float("0.706011") + std = float("1.27379") + data = None + + +class Program_weight_tensor_parameter_228: + name = "parameter_228" + shape = [96] + dtype = "float32" + min_val = float("-0.265289") + max_val = float("0.2344") + mean = float("6.01998e-05") + std = float("0.078052") + data = None + + +class Program_weight_tensor_parameter_229: + name = "parameter_229" + shape = [96, 96, 3, 3] + dtype = "float32" + min_val = float("-0.0724794") + max_val = float("0.0855183") + mean = float("-0.000671817") + std = float("0.00725462") + data = None + + +class Program_weight_tensor_parameter_230: + name = "parameter_230" + shape = [96] + dtype = "float32" + min_val = float("-0.81596") + max_val = float("0.694539") + mean = float("-0.220603") + std = float("0.24757") + data = None + + +class Program_weight_tensor_parameter_231: + name = "parameter_231" + shape = [96] + dtype = "float32" + min_val = float("0.653873") + max_val = float("1.50477") + mean = float("0.905064") + std = float("0.115987") + data = None + + +class Program_weight_tensor_parameter_232: + name = "parameter_232" + shape = [96] + dtype = "float32" + min_val = float("0.0138713") + max_val = float("5.6047") + mean = float("0.607674") + std = float("0.872345") + data = None + + +class Program_weight_tensor_parameter_233: + name = "parameter_233" + shape = [96] + dtype = "float32" + min_val = float("-0.18801") + max_val = float("0.361114") + mean = float("0.00822761") + std = float("0.104183") + data = None + + +class Program_weight_tensor_parameter_234: + name = "parameter_234" + shape = [96, 96, 3, 3] + dtype = "float32" + min_val = float("-0.112173") + max_val = float("0.152093") + mean = float("-0.000219951") + std = float("0.00849275") + data = None + + +class Program_weight_tensor_parameter_235: + name = "parameter_235" + shape = [96] + dtype = "float32" + min_val = float("-0.737721") + max_val = float("1.03803") + mean = float("-0.0965003") + std = float("0.357534") + data = None + + +class Program_weight_tensor_parameter_236: + name = "parameter_236" + shape = [96] + dtype = "float32" + min_val = float("0.486178") + max_val = float("1.16474") + mean = float("0.785591") + std = float("0.140337") + data = None + + +class Program_weight_tensor_parameter_237: + name = "parameter_237" + shape = [96] + dtype = "float32" + min_val = float("0.0220315") + max_val = float("1.90785") + mean = float("0.302731") + std = float("0.386768") + data = None + + +class Program_weight_tensor_parameter_238: + name = "parameter_238" + shape = [96] + dtype = "float32" + min_val = float("-0.265742") + max_val = float("0.176522") + mean = float("0.0019327") + std = float("0.0895195") + data = None + + +class Program_weight_tensor_parameter_239: + name = "parameter_239" + shape = [96, 448, 1, 1] + dtype = "float32" + min_val = float("-0.141003") + max_val = float("0.12526") + mean = float("-0.000255411") + std = float("0.0113464") + data = None + + +class Program_weight_tensor_parameter_240: + name = "parameter_240" + shape = [96] + dtype = "float32" + min_val = float("-0.101234") + max_val = float("0.280761") + mean = float("0.0626191") + std = float("0.0701217") + data = None + + +class Program_weight_tensor_parameter_241: + name = "parameter_241" + shape = [96] + dtype = "float32" + min_val = float("0.728134") + max_val = float("1.1619") + mean = float("0.897257") + std = float("0.075542") + data = None + + +class Program_weight_tensor_parameter_242: + name = "parameter_242" + shape = [96] + dtype = "float32" + min_val = float("0.00554226") + max_val = float("3.43284") + mean = float("0.265583") + std = float("0.559653") + data = None + + +class Program_weight_tensor_parameter_243: + name = "parameter_243" + shape = [96] + dtype = "float32" + min_val = float("-0.350261") + max_val = float("0.171463") + mean = float("-0.00933972") + std = float("0.0751828") + data = None + + +class Program_weight_tensor_parameter_244: + name = "parameter_244" + shape = [96, 448, 1, 1] + dtype = "float32" + min_val = float("-0.0930981") + max_val = float("0.109019") + mean = float("-1.41361e-05") + std = float("0.00738557") + data = None + + +class Program_weight_tensor_parameter_245: + name = "parameter_245" + shape = [192] + dtype = "float32" + min_val = float("-0.420169") + max_val = float("0.304077") + mean = float("-0.0846625") + std = float("0.0933085") + data = None + + +class Program_weight_tensor_parameter_246: + name = "parameter_246" + shape = [192] + dtype = "float32" + min_val = float("0.660034") + max_val = float("1.60045") + mean = float("0.830394") + std = float("0.0946997") + data = None + + +class Program_weight_tensor_parameter_247: + name = "parameter_247" + shape = [192] + dtype = "float32" + min_val = float("0.00802662") + max_val = float("1.1227") + mean = float("0.141375") + std = float("0.138512") + data = None + + +class Program_weight_tensor_parameter_248: + name = "parameter_248" + shape = [192] + dtype = "float32" + min_val = float("-0.0902271") + max_val = float("0.0555908") + mean = float("-0.00784275") + std = float("0.0256276") + data = None + + +class Program_weight_tensor_parameter_249: + name = "parameter_249" + shape = [192, 384, 1, 1] + dtype = "float32" + min_val = float("-0.0796293") + max_val = float("0.0883018") + mean = float("-0.000481953") + std = float("0.00665654") + data = None + + +class Program_weight_tensor_parameter_250: + name = "parameter_250" + shape = [384] + dtype = "float32" + min_val = float("-0.373051") + max_val = float("0.163758") + mean = float("-0.0929626") + std = float("0.0592494") + data = None + + +class Program_weight_tensor_parameter_251: + name = "parameter_251" + shape = [384] + dtype = "float32" + min_val = float("0.87726") + max_val = float("1.57409") + mean = float("1.01609") + std = float("0.085249") + data = None + + +class Program_weight_tensor_parameter_252: + name = "parameter_252" + shape = [384] + dtype = "float32" + min_val = float("0.00707231") + max_val = float("0.602354") + mean = float("0.080235") + std = float("0.0743166") + data = None + + +class Program_weight_tensor_parameter_253: + name = "parameter_253" + shape = [384] + dtype = "float32" + min_val = float("-0.136249") + max_val = float("0.144427") + mean = float("-0.0149049") + std = float("0.0284274") + data = None + + +class Program_weight_tensor_parameter_254: + name = "parameter_254" + shape = [384, 384, 1, 1] + dtype = "float32" + min_val = float("-0.129077") + max_val = float("0.0807547") + mean = float("-0.000487049") + std = float("0.00603461") + data = None + + +class Program_weight_tensor_parameter_255: + name = "parameter_255" + shape = [192] + dtype = "float32" + min_val = float("-0.256959") + max_val = float("0.0668543") + mean = float("-0.0840696") + std = float("0.0440415") + data = None + + +class Program_weight_tensor_parameter_256: + name = "parameter_256" + shape = [192] + dtype = "float32" + min_val = float("0.819568") + max_val = float("0.986962") + mean = float("0.929009") + std = float("0.0272595") + data = None + + +class Program_weight_tensor_parameter_257: + name = "parameter_257" + shape = [192] + dtype = "float32" + min_val = float("0.000592244") + max_val = float("0.300787") + mean = float("0.0342886") + std = float("0.0384523") + data = None + + +class Program_weight_tensor_parameter_258: + name = "parameter_258" + shape = [192] + dtype = "float32" + min_val = float("-0.0318971") + max_val = float("0.0175305") + mean = float("-0.00483448") + std = float("0.00913169") + data = None + + +class Program_weight_tensor_parameter_259: + name = "parameter_259" + shape = [192, 192, 1, 1] + dtype = "float32" + min_val = float("-0.035315") + max_val = float("0.0383402") + mean = float("-0.000675809") + std = float("0.0047554") + data = None + + +class Program_weight_tensor_parameter_260: + name = "parameter_260" + shape = [192] + dtype = "float32" + min_val = float("-0.256959") + max_val = float("0.0668543") + mean = float("-0.0840696") + std = float("0.0440415") + data = None + + +class Program_weight_tensor_parameter_261: + name = "parameter_261" + shape = [192] + dtype = "float32" + min_val = float("0.900377") + max_val = float("1.08413") + mean = float("0.991669") + std = float("0.0256929") + data = None + + +class Program_weight_tensor_parameter_262: + name = "parameter_262" + shape = [192] + dtype = "float32" + min_val = float("0.0159625") + max_val = float("1.75258") + mean = float("0.264178") + std = float("0.327029") + data = None + + +class Program_weight_tensor_parameter_263: + name = "parameter_263" + shape = [192] + dtype = "float32" + min_val = float("-0.0846459") + max_val = float("0.0435916") + mean = float("-0.01252") + std = float("0.0267311") + data = None + + +class Program_weight_tensor_parameter_264: + name = "parameter_264" + shape = [192, 192, 3, 3] + dtype = "float32" + min_val = float("-0.0581955") + max_val = float("0.0815388") + mean = float("-0.000159628") + std = float("0.00251421") + data = None + + +class Program_weight_tensor_parameter_265: + name = "parameter_265" + shape = [192] + dtype = "float32" + min_val = float("-0.283023") + max_val = float("0.00609804") + mean = float("-0.108732") + std = float("0.0543265") + data = None + + +class Program_weight_tensor_parameter_266: + name = "parameter_266" + shape = [192] + dtype = "float32" + min_val = float("0.93521") + max_val = float("1.19749") + mean = float("1.03676") + std = float("0.0449099") + data = None + + +class Program_weight_tensor_parameter_267: + name = "parameter_267" + shape = [192] + dtype = "float32" + min_val = float("0.0382762") + max_val = float("5.57442") + mean = float("0.725692") + std = float("0.823782") + data = None + + +class Program_weight_tensor_parameter_268: + name = "parameter_268" + shape = [192] + dtype = "float32" + min_val = float("-0.237511") + max_val = float("0.165957") + mean = float("-0.0152484") + std = float("0.0789669") + data = None + + +class Program_weight_tensor_parameter_269: + name = "parameter_269" + shape = [192, 192, 3, 3] + dtype = "float32" + min_val = float("-0.0710582") + max_val = float("0.0757265") + mean = float("-0.000116839") + std = float("0.00304633") + data = None + + +class Program_weight_tensor_parameter_270: + name = "parameter_270" + shape = [192] + dtype = "float32" + min_val = float("-0.253576") + max_val = float("-0.0258551") + mean = float("-0.111423") + std = float("0.0514888") + data = None + + +class Program_weight_tensor_parameter_271: + name = "parameter_271" + shape = [192] + dtype = "float32" + min_val = float("0.915196") + max_val = float("1.08387") + mean = float("0.975653") + std = float("0.0197955") + data = None + + +class Program_weight_tensor_parameter_272: + name = "parameter_272" + shape = [192] + dtype = "float32" + min_val = float("0.000784494") + max_val = float("0.170735") + mean = float("0.0158213") + std = float("0.0237635") + data = None + + +class Program_weight_tensor_parameter_273: + name = "parameter_273" + shape = [192] + dtype = "float32" + min_val = float("-0.0457468") + max_val = float("0.0201233") + mean = float("-0.00730915") + std = float("0.0101963") + data = None + + +class Program_weight_tensor_parameter_274: + name = "parameter_274" + shape = [192, 192, 1, 1] + dtype = "float32" + min_val = float("-0.0337886") + max_val = float("0.0331928") + mean = float("-0.000817084") + std = float("0.00496809") + data = None + + +class Program_weight_tensor_parameter_275: + name = "parameter_275" + shape = [192] + dtype = "float32" + min_val = float("-0.253576") + max_val = float("-0.0258551") + mean = float("-0.111423") + std = float("0.0514888") + data = None + + +class Program_weight_tensor_parameter_276: + name = "parameter_276" + shape = [192] + dtype = "float32" + min_val = float("0.940652") + max_val = float("1.13061") + mean = float("1.00459") + std = float("0.0346428") + data = None + + +class Program_weight_tensor_parameter_277: + name = "parameter_277" + shape = [192] + dtype = "float32" + min_val = float("0.00912887") + max_val = float("1.74385") + mean = float("0.136466") + std = float("0.22376") + data = None + + +class Program_weight_tensor_parameter_278: + name = "parameter_278" + shape = [192] + dtype = "float32" + min_val = float("-0.104047") + max_val = float("0.047509") + mean = float("-0.0196895") + std = float("0.0277402") + data = None + + +class Program_weight_tensor_parameter_279: + name = "parameter_279" + shape = [192, 192, 3, 3] + dtype = "float32" + min_val = float("-0.0458249") + max_val = float("0.0663524") + mean = float("-0.000235445") + std = float("0.00258708") + data = None + + +class Program_weight_tensor_parameter_280: + name = "parameter_280" + shape = [192] + dtype = "float32" + min_val = float("-0.397813") + max_val = float("-0.0235897") + mean = float("-0.135031") + std = float("0.0579049") + data = None + + +class Program_weight_tensor_parameter_281: + name = "parameter_281" + shape = [192] + dtype = "float32" + min_val = float("0.935628") + max_val = float("1.28762") + mean = float("1.02726") + std = float("0.0582587") + data = None + + +class Program_weight_tensor_parameter_282: + name = "parameter_282" + shape = [192] + dtype = "float32" + min_val = float("0.0350071") + max_val = float("3.67144") + mean = float("0.444898") + std = float("0.548341") + data = None + + +class Program_weight_tensor_parameter_283: + name = "parameter_283" + shape = [192] + dtype = "float32" + min_val = float("-0.456827") + max_val = float("0.250861") + mean = float("-0.0445605") + std = float("0.110845") + data = None + + +class Program_weight_tensor_parameter_284: + name = "parameter_284" + shape = [192, 192, 3, 3] + dtype = "float32" + min_val = float("-0.032414") + max_val = float("0.0474969") + mean = float("-0.000226297") + std = float("0.00328822") + data = None + + +class Program_weight_tensor_parameter_285: + name = "parameter_285" + shape = [192] + dtype = "float32" + min_val = float("-0.290864") + max_val = float("-0.0230562") + mean = float("-0.11391") + std = float("0.04676") + data = None + + +class Program_weight_tensor_parameter_286: + name = "parameter_286" + shape = [192] + dtype = "float32" + min_val = float("0.906862") + max_val = float("1.13958") + mean = float("0.997132") + std = float("0.0365506") + data = None + + +class Program_weight_tensor_parameter_287: + name = "parameter_287" + shape = [192] + dtype = "float32" + min_val = float("0.000630271") + max_val = float("0.126275") + mean = float("0.0156972") + std = float("0.0212032") + data = None + + +class Program_weight_tensor_parameter_288: + name = "parameter_288" + shape = [192] + dtype = "float32" + min_val = float("-0.032696") + max_val = float("0.0205933") + mean = float("-0.00387507") + std = float("0.00785712") + data = None + + +class Program_weight_tensor_parameter_289: + name = "parameter_289" + shape = [192, 192, 1, 1] + dtype = "float32" + min_val = float("-0.0348327") + max_val = float("0.0582176") + mean = float("-0.000388586") + std = float("0.0055446") + data = None + + +class Program_weight_tensor_parameter_290: + name = "parameter_290" + shape = [192] + dtype = "float32" + min_val = float("-0.290864") + max_val = float("-0.0230562") + mean = float("-0.11391") + std = float("0.04676") + data = None + + +class Program_weight_tensor_parameter_291: + name = "parameter_291" + shape = [192] + dtype = "float32" + min_val = float("0.90984") + max_val = float("1.14776") + mean = float("0.987138") + std = float("0.0373269") + data = None + + +class Program_weight_tensor_parameter_292: + name = "parameter_292" + shape = [192] + dtype = "float32" + min_val = float("0.0068577") + max_val = float("3.15529") + mean = float("0.115454") + std = float("0.251449") + data = None + + +class Program_weight_tensor_parameter_293: + name = "parameter_293" + shape = [192] + dtype = "float32" + min_val = float("-0.157661") + max_val = float("0.0523813") + mean = float("-0.0154723") + std = float("0.0237568") + data = None + + +class Program_weight_tensor_parameter_294: + name = "parameter_294" + shape = [192, 192, 3, 3] + dtype = "float32" + min_val = float("-0.0229903") + max_val = float("0.0319766") + mean = float("-0.000196356") + std = float("0.00258864") + data = None + + +class Program_weight_tensor_parameter_295: + name = "parameter_295" + shape = [192] + dtype = "float32" + min_val = float("-0.370486") + max_val = float("-0.0106218") + mean = float("-0.161455") + std = float("0.0602386") + data = None + + +class Program_weight_tensor_parameter_296: + name = "parameter_296" + shape = [192] + dtype = "float32" + min_val = float("0.9051") + max_val = float("1.22165") + mean = float("1.03058") + std = float("0.049462") + data = None + + +class Program_weight_tensor_parameter_297: + name = "parameter_297" + shape = [192] + dtype = "float32" + min_val = float("0.0103088") + max_val = float("3.73392") + mean = float("0.208286") + std = float("0.402475") + data = None + + +class Program_weight_tensor_parameter_298: + name = "parameter_298" + shape = [192] + dtype = "float32" + min_val = float("-0.0463211") + max_val = float("0.0315376") + mean = float("-0.00913568") + std = float("0.0128318") + data = None + + +class Program_weight_tensor_parameter_299: + name = "parameter_299" + shape = [192, 192, 3, 3] + dtype = "float32" + min_val = float("-0.0895417") + max_val = float("0.0442348") + mean = float("-0.000211153") + std = float("0.0035822") + data = None + + +class Program_weight_tensor_parameter_300: + name = "parameter_300" + shape = [192] + dtype = "float32" + min_val = float("-0.397496") + max_val = float("0.0864231") + mean = float("-0.165482") + std = float("0.0737238") + data = None + + +class Program_weight_tensor_parameter_301: + name = "parameter_301" + shape = [192] + dtype = "float32" + min_val = float("0.879897") + max_val = float("1.17541") + mean = float("1.01759") + std = float("0.0566345") + data = None + + +class Program_weight_tensor_parameter_302: + name = "parameter_302" + shape = [192] + dtype = "float32" + min_val = float("0.00621631") + max_val = float("1.89483") + mean = float("0.0866904") + std = float("0.204768") + data = None + + +class Program_weight_tensor_parameter_303: + name = "parameter_303" + shape = [192] + dtype = "float32" + min_val = float("-0.10704") + max_val = float("0.250321") + mean = float("0.0243905") + std = float("0.0576948") + data = None + + +class Program_weight_tensor_parameter_304: + name = "parameter_304" + shape = [192, 896, 1, 1] + dtype = "float32" + min_val = float("-0.102693") + max_val = float("0.140514") + mean = float("-0.000271279") + std = float("0.00498004") + data = None + + +class Program_weight_tensor_parameter_305: + name = "parameter_305" + shape = [192] + dtype = "float32" + min_val = float("-0.150926") + max_val = float("0.502386") + mean = float("-0.00458928") + std = float("0.0740063") + data = None + + +class Program_weight_tensor_parameter_306: + name = "parameter_306" + shape = [192] + dtype = "float32" + min_val = float("0.932811") + max_val = float("1.23138") + mean = float("1.04668") + std = float("0.0628145") + data = None + + +class Program_weight_tensor_parameter_307: + name = "parameter_307" + shape = [192] + dtype = "float32" + min_val = float("0.00500346") + max_val = float("0.658867") + mean = float("0.071691") + std = float("0.0871382") + data = None + + +class Program_weight_tensor_parameter_308: + name = "parameter_308" + shape = [192] + dtype = "float32" + min_val = float("-0.179438") + max_val = float("0.127582") + mean = float("0.00911666") + std = float("0.0540771") + data = None + + +class Program_weight_tensor_parameter_309: + name = "parameter_309" + shape = [192, 896, 1, 1] + dtype = "float32" + min_val = float("-0.135587") + max_val = float("0.0877653") + mean = float("-0.000157689") + std = float("0.00488948") + data = None + + +class Program_weight_tensor_parameter_310: + name = "parameter_310" + shape = [384] + dtype = "float32" + min_val = float("-0.312093") + max_val = float("-0.0453597") + mean = float("-0.171326") + std = float("0.0441224") + data = None + + +class Program_weight_tensor_parameter_311: + name = "parameter_311" + shape = [384] + dtype = "float32" + min_val = float("0.787891") + max_val = float("1.17398") + mean = float("0.88519") + std = float("0.0343002") + data = None + + +class Program_weight_tensor_parameter_312: + name = "parameter_312" + shape = [384] + dtype = "float32" + min_val = float("0.00518361") + max_val = float("0.717927") + mean = float("0.0818779") + std = float("0.074952") + data = None + + +class Program_weight_tensor_parameter_313: + name = "parameter_313" + shape = [384] + dtype = "float32" + min_val = float("-0.112216") + max_val = float("0.0645384") + mean = float("-0.00977235") + std = float("0.0242007") + data = None + + +class Program_weight_tensor_parameter_314: + name = "parameter_314" + shape = [384, 768, 1, 1] + dtype = "float32" + min_val = float("-0.0335371") + max_val = float("0.0385938") + mean = float("-0.000215569") + std = float("0.00376159") + data = None + + +class Program_weight_tensor_parameter_315: + name = "parameter_315" + shape = [768] + dtype = "float32" + min_val = float("-0.16492") + max_val = float("0.116104") + mean = float("-0.0876142") + std = float("0.0238545") + data = None + + +class Program_weight_tensor_parameter_316: + name = "parameter_316" + shape = [768] + dtype = "float32" + min_val = float("0.935583") + max_val = float("1.28041") + mean = float("1.02965") + std = float("0.0299369") + data = None + + +class Program_weight_tensor_parameter_317: + name = "parameter_317" + shape = [768] + dtype = "float32" + min_val = float("0.00577983") + max_val = float("0.559026") + mean = float("0.0765748") + std = float("0.0652625") + data = None + + +class Program_weight_tensor_parameter_318: + name = "parameter_318" + shape = [768] + dtype = "float32" + min_val = float("-0.154632") + max_val = float("0.121681") + mean = float("-0.00883901") + std = float("0.0433664") + data = None + + +class Program_weight_tensor_parameter_319: + name = "parameter_319" + shape = [768, 768, 1, 1] + dtype = "float32" + min_val = float("-0.0409931") + max_val = float("0.0668") + mean = float("-0.000149374") + std = float("0.00323186") + data = None + + +class Program_weight_tensor_parameter_320: + name = "parameter_320" + shape = [384] + dtype = "float32" + min_val = float("-0.175711") + max_val = float("0.114937") + mean = float("-0.0598235") + std = float("0.031613") + data = None + + +class Program_weight_tensor_parameter_321: + name = "parameter_321" + shape = [384] + dtype = "float32" + min_val = float("0.876243") + max_val = float("1.05485") + mean = float("0.975582") + std = float("0.0178294") + data = None + + +class Program_weight_tensor_parameter_322: + name = "parameter_322" + shape = [384] + dtype = "float32" + min_val = float("0.000782373") + max_val = float("0.603868") + mean = float("0.0317629") + std = float("0.0542092") + data = None + + +class Program_weight_tensor_parameter_323: + name = "parameter_323" + shape = [384] + dtype = "float32" + min_val = float("-0.0928944") + max_val = float("0.0888735") + mean = float("0.00210265") + std = float("0.0327669") + data = None + + +class Program_weight_tensor_parameter_324: + name = "parameter_324" + shape = [384, 384, 1, 1] + dtype = "float32" + min_val = float("-0.0523449") + max_val = float("0.052802") + mean = float("4.73102e-05") + std = float("0.00322689") + data = None + + +class Program_weight_tensor_parameter_325: + name = "parameter_325" + shape = [384] + dtype = "float32" + min_val = float("-0.175711") + max_val = float("0.114937") + mean = float("-0.0598235") + std = float("0.031613") + data = None + + +class Program_weight_tensor_parameter_326: + name = "parameter_326" + shape = [384] + dtype = "float32" + min_val = float("0.939826") + max_val = float("1.08521") + mean = float("0.993811") + std = float("0.0188195") + data = None + + +class Program_weight_tensor_parameter_327: + name = "parameter_327" + shape = [384] + dtype = "float32" + min_val = float("0.0100212") + max_val = float("3.79981") + mean = float("0.240529") + std = float("0.361857") + data = None + + +class Program_weight_tensor_parameter_328: + name = "parameter_328" + shape = [384] + dtype = "float32" + min_val = float("-0.247484") + max_val = float("0.338793") + mean = float("-0.0375155") + std = float("0.0861982") + data = None + + +class Program_weight_tensor_parameter_329: + name = "parameter_329" + shape = [384, 384, 3, 3] + dtype = "float32" + min_val = float("-0.0258772") + max_val = float("0.0360759") + mean = float("-7.49101e-05") + std = float("0.001213") + data = None + + +class Program_weight_tensor_parameter_330: + name = "parameter_330" + shape = [384] + dtype = "float32" + min_val = float("-0.151775") + max_val = float("0.102834") + mean = float("-0.0361192") + std = float("0.0243711") + data = None + + +class Program_weight_tensor_parameter_331: + name = "parameter_331" + shape = [384] + dtype = "float32" + min_val = float("0.94838") + max_val = float("1.23657") + mean = float("1.02234") + std = float("0.0374976") + data = None + + +class Program_weight_tensor_parameter_332: + name = "parameter_332" + shape = [384] + dtype = "float32" + min_val = float("0.0447625") + max_val = float("8.11219") + mean = float("0.62398") + std = float("1.02319") + data = None + + +class Program_weight_tensor_parameter_333: + name = "parameter_333" + shape = [384] + dtype = "float32" + min_val = float("-0.712325") + max_val = float("0.573616") + mean = float("0.00533148") + std = float("0.162416") + data = None + + +class Program_weight_tensor_parameter_334: + name = "parameter_334" + shape = [384, 384, 3, 3] + dtype = "float32" + min_val = float("-0.0241385") + max_val = float("0.0318638") + mean = float("1.18507e-05") + std = float("0.00161292") + data = None + + +class Program_weight_tensor_parameter_335: + name = "parameter_335" + shape = [384] + dtype = "float32" + min_val = float("-0.127682") + max_val = float("0.0290178") + mean = float("-0.0397355") + std = float("0.020477") + data = None + + +class Program_weight_tensor_parameter_336: + name = "parameter_336" + shape = [384] + dtype = "float32" + min_val = float("0.935742") + max_val = float("1.22687") + mean = float("1.02229") + std = float("0.040962") + data = None + + +class Program_weight_tensor_parameter_337: + name = "parameter_337" + shape = [384] + dtype = "float32" + min_val = float("0.255072") + max_val = float("120.906") + mean = float("9.80837") + std = float("15.5408") + data = None + + +class Program_weight_tensor_parameter_338: + name = "parameter_338" + shape = [384] + dtype = "float32" + min_val = float("-4.13105") + max_val = float("6.0128") + mean = float("0.0669194") + std = float("1.73557") + data = None + + +class Program_weight_tensor_parameter_339: + name = "parameter_339" + shape = [384, 1536, 1, 1] + dtype = "float32" + min_val = float("-0.0444577") + max_val = float("0.0503126") + mean = float("0.0001148") + std = float("0.00260266") + data = None + + +class Program_weight_tensor_parameter_340: + name = "parameter_340" + shape = [384] + dtype = "float32" + min_val = float("-0.0351402") + max_val = float("0.0448126") + mean = float("-0.00203657") + std = float("0.0112334") + data = None + + +class Program_weight_tensor_parameter_341: + name = "parameter_341" + shape = [384] + dtype = "float32" + min_val = float("0.955294") + max_val = float("1.07985") + mean = float("0.990475") + std = float("0.01608") + data = None + + +class Program_weight_tensor_parameter_342: + name = "parameter_342" + shape = [384] + dtype = "float32" + min_val = float("0.00198763") + max_val = float("0.0635025") + mean = float("0.0138523") + std = float("0.00926475") + data = None + + +class Program_weight_tensor_parameter_343: + name = "parameter_343" + shape = [384] + dtype = "float32" + min_val = float("-0.0391059") + max_val = float("0.0393871") + mean = float("-0.0105951") + std = float("0.0131426") + data = None + + +class Program_weight_tensor_parameter_344: + name = "parameter_344" + shape = [384, 384, 1, 1] + dtype = "float32" + min_val = float("-0.0224049") + max_val = float("0.0332456") + mean = float("-0.000297944") + std = float("0.00282752") + data = None + + +class Program_weight_tensor_parameter_345: + name = "parameter_345" + shape = [384] + dtype = "float32" + min_val = float("-0.0351403") + max_val = float("0.0448126") + mean = float("-0.00203657") + std = float("0.0112334") + data = None + + +class Program_weight_tensor_parameter_346: + name = "parameter_346" + shape = [384] + dtype = "float32" + min_val = float("0.958435") + max_val = float("1.12455") + mean = float("1.0048") + std = float("0.0255586") + data = None + + +class Program_weight_tensor_parameter_347: + name = "parameter_347" + shape = [384] + dtype = "float32" + min_val = float("0.0140512") + max_val = float("0.800076") + mean = float("0.108902") + std = float("0.0948212") + data = None + + +class Program_weight_tensor_parameter_348: + name = "parameter_348" + shape = [384] + dtype = "float32" + min_val = float("-0.115193") + max_val = float("0.079648") + mean = float("-0.0321172") + std = float("0.0341768") + data = None + + +class Program_weight_tensor_parameter_349: + name = "parameter_349" + shape = [384, 384, 3, 3] + dtype = "float32" + min_val = float("-0.0255902") + max_val = float("0.0443317") + mean = float("-0.00011048") + std = float("0.00120834") + data = None + + +class Program_weight_tensor_parameter_350: + name = "parameter_350" + shape = [384] + dtype = "float32" + min_val = float("-0.0758443") + max_val = float("0.0143564") + mean = float("-0.018454") + std = float("0.013002") + data = None + + +class Program_weight_tensor_parameter_351: + name = "parameter_351" + shape = [384] + dtype = "float32" + min_val = float("0.948675") + max_val = float("1.19194") + mean = float("1.02021") + std = float("0.0307393") + data = None + + +class Program_weight_tensor_parameter_352: + name = "parameter_352" + shape = [384] + dtype = "float32" + min_val = float("0.0278921") + max_val = float("3.5996") + mean = float("0.368141") + std = float("0.390009") + data = None + + +class Program_weight_tensor_parameter_353: + name = "parameter_353" + shape = [384] + dtype = "float32" + min_val = float("-0.273124") + max_val = float("0.208821") + mean = float("-0.0380423") + std = float("0.077889") + data = None + + +class Program_weight_tensor_parameter_354: + name = "parameter_354" + shape = [384, 384, 3, 3] + dtype = "float32" + min_val = float("-0.0198387") + max_val = float("0.0263313") + mean = float("-7.66766e-05") + std = float("0.00143257") + data = None + + +class Program_weight_tensor_parameter_355: + name = "parameter_355" + shape = [384] + dtype = "float32" + min_val = float("-0.0645948") + max_val = float("0.0263716") + mean = float("-0.0175864") + std = float("0.0121472") + data = None + + +class Program_weight_tensor_parameter_356: + name = "parameter_356" + shape = [384] + dtype = "float32" + min_val = float("0.977031") + max_val = float("1.05231") + mean = float("0.998866") + std = float("0.0104315") + data = None + + +class Program_weight_tensor_parameter_357: + name = "parameter_357" + shape = [384] + dtype = "float32" + min_val = float("0.000423634") + max_val = float("0.110283") + mean = float("0.0116174") + std = float("0.0134231") + data = None + + +class Program_weight_tensor_parameter_358: + name = "parameter_358" + shape = [384] + dtype = "float32" + min_val = float("-0.0211042") + max_val = float("0.0345802") + mean = float("-0.0023165") + std = float("0.00961282") + data = None + + +class Program_weight_tensor_parameter_359: + name = "parameter_359" + shape = [384, 384, 1, 1] + dtype = "float32" + min_val = float("-0.0201647") + max_val = float("0.0351393") + mean = float("-8.36299e-05") + std = float("0.00242893") + data = None + + +class Program_weight_tensor_parameter_360: + name = "parameter_360" + shape = [384] + dtype = "float32" + min_val = float("-0.0645947") + max_val = float("0.0263716") + mean = float("-0.0175864") + std = float("0.0121472") + data = None + + +class Program_weight_tensor_parameter_361: + name = "parameter_361" + shape = [384] + dtype = "float32" + min_val = float("0.976794") + max_val = float("1.10103") + mean = float("1.00726") + std = float("0.0202551") + data = None + + +class Program_weight_tensor_parameter_362: + name = "parameter_362" + shape = [384] + dtype = "float32" + min_val = float("0.00768908") + max_val = float("1.55127") + mean = float("0.109916") + std = float("0.159092") + data = None + + +class Program_weight_tensor_parameter_363: + name = "parameter_363" + shape = [384] + dtype = "float32" + min_val = float("-0.103719") + max_val = float("0.0862114") + mean = float("-0.0208973") + std = float("0.0271219") + data = None + + +class Program_weight_tensor_parameter_364: + name = "parameter_364" + shape = [384, 384, 3, 3] + dtype = "float32" + min_val = float("-0.0127177") + max_val = float("0.0247748") + mean = float("-9.78059e-05") + std = float("0.00103805") + data = None + + +class Program_weight_tensor_parameter_365: + name = "parameter_365" + shape = [384] + dtype = "float32" + min_val = float("-0.084671") + max_val = float("-0.001335") + mean = float("-0.0378383") + std = float("0.0147188") + data = None + + +class Program_weight_tensor_parameter_366: + name = "parameter_366" + shape = [384] + dtype = "float32" + min_val = float("0.961734") + max_val = float("1.11654") + mean = float("1.01877") + std = float("0.0258236") + data = None + + +class Program_weight_tensor_parameter_367: + name = "parameter_367" + shape = [384] + dtype = "float32" + min_val = float("0.00767187") + max_val = float("0.454337") + mean = float("0.0877939") + std = float("0.0760314") + data = None + + +class Program_weight_tensor_parameter_368: + name = "parameter_368" + shape = [384] + dtype = "float32" + min_val = float("-0.0506767") + max_val = float("0.0420926") + mean = float("-0.0059652") + std = float("0.018851") + data = None + + +class Program_weight_tensor_parameter_369: + name = "parameter_369" + shape = [384, 384, 3, 3] + dtype = "float32" + min_val = float("-0.0131907") + max_val = float("0.0210535") + mean = float("-3.41569e-05") + std = float("0.00141085") + data = None + + +class Program_weight_tensor_parameter_370: + name = "parameter_370" + shape = [384] + dtype = "float32" + min_val = float("-0.10718") + max_val = float("0.0233714") + mean = float("-0.0562477") + std = float("0.019742") + data = None + + +class Program_weight_tensor_parameter_371: + name = "parameter_371" + shape = [384] + dtype = "float32" + min_val = float("0.981369") + max_val = float("1.07286") + mean = float("1.02166") + std = float("0.0138993") + data = None + + +class Program_weight_tensor_parameter_372: + name = "parameter_372" + shape = [384] + dtype = "float32" + min_val = float("0.00628651") + max_val = float("2.59218") + mean = float("0.256612") + std = float("0.377235") + data = None + + +class Program_weight_tensor_parameter_373: + name = "parameter_373" + shape = [384] + dtype = "float32" + min_val = float("-0.10037") + max_val = float("0.159741") + mean = float("0.0326345") + std = float("0.0438385") + data = None + + +class Program_weight_tensor_parameter_374: + name = "parameter_374" + shape = [384, 1024, 1, 1] + dtype = "float32" + min_val = float("-0.017557") + max_val = float("0.0459387") + mean = float("-0.000156264") + std = float("0.00273104") + data = None + + +class Program_weight_tensor_parameter_375: + name = "parameter_375" + shape = [384] + dtype = "float32" + min_val = float("-0.0589739") + max_val = float("0.0343006") + mean = float("-0.00850519") + std = float("0.0113599") + data = None + + +class Program_weight_tensor_parameter_376: + name = "parameter_376" + shape = [384] + dtype = "float32" + min_val = float("1.00897") + max_val = float("1.21432") + mean = float("1.05133") + std = float("0.0209679") + data = None + + +class Program_weight_tensor_parameter_377: + name = "parameter_377" + shape = [384] + dtype = "float32" + min_val = float("0.0034229") + max_val = float("0.869117") + mean = float("0.100891") + std = float("0.0914111") + data = None + + +class Program_weight_tensor_parameter_378: + name = "parameter_378" + shape = [384] + dtype = "float32" + min_val = float("-0.0875187") + max_val = float("0.134152") + mean = float("0.0286449") + std = float("0.0311893") + data = None + + +class Program_weight_tensor_parameter_379: + name = "parameter_379" + shape = [384, 1024, 1, 1] + dtype = "float32" + min_val = float("-0.0404324") + max_val = float("0.0486178") + mean = float("-0.000170107") + std = float("0.00283689") + data = None + + +class Program_weight_tensor_parameter_380: + name = "parameter_380" + shape = [1024] + dtype = "float32" + min_val = float("-3.76561") + max_val = float("-0.731173") + mean = float("-2.19452") + std = float("0.428428") + data = None + + +class Program_weight_tensor_parameter_381: + name = "parameter_381" + shape = [1024] + dtype = "float32" + min_val = float("1.62445") + max_val = float("4.43497") + mean = float("3.07461") + std = float("0.255239") + data = None + + +class Program_weight_tensor_parameter_382: + name = "parameter_382" + shape = [1024] + dtype = "float32" + min_val = float("0.00300033") + max_val = float("1.03235") + mean = float("0.0732599") + std = float("0.0918229") + data = None + + +class Program_weight_tensor_parameter_383: + name = "parameter_383" + shape = [1024] + dtype = "float32" + min_val = float("-0.102441") + max_val = float("0.0886517") + mean = float("0.0176384") + std = float("0.0286918") + data = None + + +class Program_weight_tensor_parameter_384: + name = "parameter_384" + shape = [1024, 768, 1, 1] + dtype = "float32" + min_val = float("-0.0683686") + max_val = float("0.079499") + mean = float("-0.000295847") + std = float("0.00320057") + data = None + + +class Program_weight_tensor_parameter_385: + name = "parameter_385" + shape = [768] + dtype = "float32" + min_val = float("-0.0121386") + max_val = float("0.00527683") + mean = float("-0.00056885") + std = float("0.00174363") + data = None + + +class Program_weight_tensor_parameter_386: + name = "parameter_386" + shape = [768, 768, 1, 1] + dtype = "float32" + min_val = float("-0.104283") + max_val = float("0.108395") + mean = float("-0.000212643") + std = float("0.00130148") + data = None + + +class Program_weight_tensor_parameter_387: + name = "parameter_387" + shape = [384] + dtype = "float32" + min_val = float("-1.77842") + max_val = float("0.498407") + mean = float("-0.300061") + std = float("0.296898") + data = None + + +class Program_weight_tensor_parameter_388: + name = "parameter_388" + shape = [384] + dtype = "float32" + min_val = float("0.189938") + max_val = float("1.98187") + mean = float("0.620289") + std = float("0.278642") + data = None + + +class Program_weight_tensor_parameter_389: + name = "parameter_389" + shape = [384] + dtype = "float32" + min_val = float("9.91892e-05") + max_val = float("0.0270208") + mean = float("0.00199739") + std = float("0.00264807") + data = None + + +class Program_weight_tensor_parameter_390: + name = "parameter_390" + shape = [384] + dtype = "float32" + min_val = float("-0.0508396") + max_val = float("0.0837222") + mean = float("0.0276176") + std = float("0.0223824") + data = None + + +class Program_weight_tensor_parameter_391: + name = "parameter_391" + shape = [384, 384, 1, 1] + dtype = "float32" + min_val = float("-0.019634") + max_val = float("0.026249") + mean = float("-0.000305565") + std = float("0.00221217") + data = None + + +class Program_weight_tensor_parameter_392: + name = "parameter_392" + shape = [384] + dtype = "float32" + min_val = float("-1.77842") + max_val = float("0.498407") + mean = float("-0.300061") + std = float("0.296898") + data = None + + +class Program_weight_tensor_parameter_393: + name = "parameter_393" + shape = [384] + dtype = "float32" + min_val = float("0.365979") + max_val = float("2.77813") + mean = float("1.04754") + std = float("0.30869") + data = None + + +class Program_weight_tensor_parameter_394: + name = "parameter_394" + shape = [384] + dtype = "float32" + min_val = float("0.000885633") + max_val = float("0.640413") + mean = float("0.0266485") + std = float("0.0528161") + data = None + + +class Program_weight_tensor_parameter_395: + name = "parameter_395" + shape = [384] + dtype = "float32" + min_val = float("-0.2607") + max_val = float("0.252739") + mean = float("0.0237469") + std = float("0.0710539") + data = None + + +class Program_weight_tensor_parameter_396: + name = "parameter_396" + shape = [384, 384, 3, 3] + dtype = "float32" + min_val = float("-0.0161625") + max_val = float("0.0237353") + mean = float("-2.77227e-05") + std = float("0.00143234") + data = None + + +class Program_weight_tensor_parameter_397: + name = "parameter_397" + shape = [384] + dtype = "float32" + min_val = float("-2.61241") + max_val = float("0.0552853") + mean = float("-1.58347") + std = float("0.416394") + data = None + + +class Program_weight_tensor_parameter_398: + name = "parameter_398" + shape = [384] + dtype = "float32" + min_val = float("0.56794") + max_val = float("1.67647") + mean = float("1.12425") + std = float("0.146808") + data = None + + +class Program_weight_tensor_parameter_399: + name = "parameter_399" + shape = [384] + dtype = "float32" + min_val = float("0.0425016") + max_val = float("8.60413") + mean = float("0.715833") + std = float("0.95826") + data = None + + +class Program_weight_tensor_parameter_400: + name = "parameter_400" + shape = [384] + dtype = "float32" + min_val = float("-0.579806") + max_val = float("0.450828") + mean = float("0.0787798") + std = float("0.153293") + data = None + + +class Program_weight_tensor_parameter_401: + name = "parameter_401" + shape = [384, 384, 3, 3] + dtype = "float32" + min_val = float("-0.0172878") + max_val = float("0.0422016") + mean = float("-0.000115855") + std = float("0.00184295") + data = None + + +class Program_weight_tensor_parameter_402: + name = "parameter_402" + shape = [384] + dtype = "float32" + min_val = float("-1.93787") + max_val = float("0.733813") + mean = float("-0.570636") + std = float("0.36594") + data = None + + +class Program_weight_tensor_parameter_403: + name = "parameter_403" + shape = [384] + dtype = "float32" + min_val = float("0.140643") + max_val = float("2.06386") + mean = float("0.563265") + std = float("0.226726") + data = None + + +class Program_weight_tensor_parameter_404: + name = "parameter_404" + shape = [384] + dtype = "float32" + min_val = float("7.24306e-05") + max_val = float("0.0212711") + mean = float("0.00249773") + std = float("0.00263818") + data = None + + +class Program_weight_tensor_parameter_405: + name = "parameter_405" + shape = [384] + dtype = "float32" + min_val = float("-0.0480177") + max_val = float("0.0906403") + mean = float("0.0290943") + std = float("0.0218768") + data = None + + +class Program_weight_tensor_parameter_406: + name = "parameter_406" + shape = [384, 384, 1, 1] + dtype = "float32" + min_val = float("-0.0187865") + max_val = float("0.0197971") + mean = float("-0.000317036") + std = float("0.00213102") + data = None + + +class Program_weight_tensor_parameter_407: + name = "parameter_407" + shape = [384] + dtype = "float32" + min_val = float("-1.93797") + max_val = float("0.733813") + mean = float("-0.570636") + std = float("0.365941") + data = None + + +class Program_weight_tensor_parameter_408: + name = "parameter_408" + shape = [384] + dtype = "float32" + min_val = float("0.579568") + max_val = float("2.10408") + mean = float("1.0926") + std = float("0.254088") + data = None + + +class Program_weight_tensor_parameter_409: + name = "parameter_409" + shape = [384] + dtype = "float32" + min_val = float("0.00121035") + max_val = float("0.722758") + mean = float("0.0326783") + std = float("0.0604562") + data = None + + +class Program_weight_tensor_parameter_410: + name = "parameter_410" + shape = [384] + dtype = "float32" + min_val = float("-0.233337") + max_val = float("0.235021") + mean = float("0.0433769") + std = float("0.0724029") + data = None + + +class Program_weight_tensor_parameter_411: + name = "parameter_411" + shape = [384, 384, 3, 3] + dtype = "float32" + min_val = float("-0.017636") + max_val = float("0.0242342") + mean = float("-5.98118e-05") + std = float("0.00148823") + data = None + + +class Program_weight_tensor_parameter_412: + name = "parameter_412" + shape = [384] + dtype = "float32" + min_val = float("-2.42769") + max_val = float("0.839301") + mean = float("-1.42177") + std = float("0.360858") + data = None + + +class Program_weight_tensor_parameter_413: + name = "parameter_413" + shape = [384] + dtype = "float32" + min_val = float("0.438544") + max_val = float("1.84376") + mean = float("1.15689") + std = float("0.142353") + data = None + + +class Program_weight_tensor_parameter_414: + name = "parameter_414" + shape = [384] + dtype = "float32" + min_val = float("0.0203352") + max_val = float("3.181") + mean = float("0.368327") + std = float("0.465673") + data = None + + +class Program_weight_tensor_parameter_415: + name = "parameter_415" + shape = [384] + dtype = "float32" + min_val = float("-0.205922") + max_val = float("0.327803") + mean = float("0.0418561") + std = float("0.0756754") + data = None + + +class Program_weight_tensor_parameter_416: + name = "parameter_416" + shape = [384, 384, 3, 3] + dtype = "float32" + min_val = float("-0.0216487") + max_val = float("0.0329662") + mean = float("-0.000111566") + std = float("0.00182337") + data = None + + +class Program_weight_tensor_parameter_417: + name = "parameter_417" + shape = [384] + dtype = "float32" + min_val = float("-1.88519") + max_val = float("0.489282") + mean = float("-0.478509") + std = float("0.384475") + data = None + + +class Program_weight_tensor_parameter_418: + name = "parameter_418" + shape = [384] + dtype = "float32" + min_val = float("0.0964864") + max_val = float("2.12186") + mean = float("0.441698") + std = float("0.215794") + data = None + + +class Program_weight_tensor_parameter_419: + name = "parameter_419" + shape = [384] + dtype = "float32" + min_val = float("0.000190261") + max_val = float("0.0374628") + mean = float("0.00504513") + std = float("0.00528967") + data = None + + +class Program_weight_tensor_parameter_420: + name = "parameter_420" + shape = [384] + dtype = "float32" + min_val = float("-0.0833438") + max_val = float("0.106805") + mean = float("0.0319307") + std = float("0.0228581") + data = None + + +class Program_weight_tensor_parameter_421: + name = "parameter_421" + shape = [384, 384, 1, 1] + dtype = "float32" + min_val = float("-0.0196403") + max_val = float("0.0181013") + mean = float("-0.000367563") + std = float("0.00183188") + data = None + + +class Program_weight_tensor_parameter_422: + name = "parameter_422" + shape = [384] + dtype = "float32" + min_val = float("-1.88519") + max_val = float("0.489282") + mean = float("-0.478509") + std = float("0.384475") + data = None + + +class Program_weight_tensor_parameter_423: + name = "parameter_423" + shape = [384] + dtype = "float32" + min_val = float("0.572754") + max_val = float("2.21625") + mean = float("1.0609") + std = float("0.254729") + data = None + + +class Program_weight_tensor_parameter_424: + name = "parameter_424" + shape = [384] + dtype = "float32" + min_val = float("0.00314815") + max_val = float("0.609992") + mean = float("0.063887") + std = float("0.0854036") + data = None + + +class Program_weight_tensor_parameter_425: + name = "parameter_425" + shape = [384] + dtype = "float32" + min_val = float("-0.309743") + max_val = float("0.213002") + mean = float("0.0357878") + std = float("0.0857551") + data = None + + +class Program_weight_tensor_parameter_426: + name = "parameter_426" + shape = [384, 384, 3, 3] + dtype = "float32" + min_val = float("-0.0167874") + max_val = float("0.0235595") + mean = float("-4.10985e-05") + std = float("0.0015578") + data = None + + +class Program_weight_tensor_parameter_427: + name = "parameter_427" + shape = [384] + dtype = "float32" + min_val = float("-2.15541") + max_val = float("0.429211") + mean = float("-1.38242") + std = float("0.277701") + data = None + + +class Program_weight_tensor_parameter_428: + name = "parameter_428" + shape = [384] + dtype = "float32" + min_val = float("0.714293") + max_val = float("1.63322") + mean = float("1.13559") + std = float("0.0992203") + data = None + + +class Program_weight_tensor_parameter_429: + name = "parameter_429" + shape = [384] + dtype = "float32" + min_val = float("0.00874361") + max_val = float("1.63267") + mean = float("0.172849") + std = float("0.199136") + data = None + + +class Program_weight_tensor_parameter_430: + name = "parameter_430" + shape = [384] + dtype = "float32" + min_val = float("-0.262866") + max_val = float("0.171872") + mean = float("0.00667807") + std = float("0.0549583") + data = None + + +class Program_weight_tensor_parameter_431: + name = "parameter_431" + shape = [384, 384, 3, 3] + dtype = "float32" + min_val = float("-0.0255212") + max_val = float("0.0449234") + mean = float("-9.38033e-05") + std = float("0.00171679") + data = None + + +class Program_weight_tensor_parameter_432: + name = "parameter_432" + shape = [384] + dtype = "float32" + min_val = float("-2.9313") + max_val = float("1.76163") + mean = float("-0.765032") + std = float("0.654203") + data = None + + +class Program_weight_tensor_parameter_433: + name = "parameter_433" + shape = [384] + dtype = "float32" + min_val = float("0.974162") + max_val = float("2.91141") + mean = float("1.85277") + std = float("0.272517") + data = None + + +class Program_weight_tensor_parameter_434: + name = "parameter_434" + shape = [384] + dtype = "float32" + min_val = float("0.00181718") + max_val = float("0.234308") + mean = float("0.0412232") + std = float("0.0430638") + data = None + + +class Program_weight_tensor_parameter_435: + name = "parameter_435" + shape = [384] + dtype = "float32" + min_val = float("-0.234178") + max_val = float("0.245307") + mean = float("0.0695519") + std = float("0.0618743") + data = None + + +class Program_weight_tensor_parameter_436: + name = "parameter_436" + shape = [384, 768, 1, 1] + dtype = "float32" + min_val = float("-0.0469498") + max_val = float("0.0418139") + mean = float("-0.000433104") + std = float("0.00390566") + data = None + + +class Program_weight_tensor_parameter_437: + name = "parameter_437" + shape = [384] + dtype = "float32" + min_val = float("-2.24508") + max_val = float("0.69413") + mean = float("-0.776468") + std = float("0.476207") + data = None + + +class Program_weight_tensor_parameter_438: + name = "parameter_438" + shape = [384] + dtype = "float32" + min_val = float("0.973977") + max_val = float("2.89139") + mean = float("2.10296") + std = float("0.303008") + data = None + + +class Program_weight_tensor_parameter_439: + name = "parameter_439" + shape = [384] + dtype = "float32" + min_val = float("0.000334173") + max_val = float("0.779074") + mean = float("0.0234264") + std = float("0.0613748") + data = None + + +class Program_weight_tensor_parameter_440: + name = "parameter_440" + shape = [384] + dtype = "float32" + min_val = float("-0.246706") + max_val = float("0.197941") + mean = float("0.032747") + std = float("0.0469715") + data = None + + +class Program_weight_tensor_parameter_441: + name = "parameter_441" + shape = [384, 768, 1, 1] + dtype = "float32" + min_val = float("-0.128826") + max_val = float("0.0634304") + mean = float("-0.000209218") + std = float("0.0029129") + data = None + + +class Program_weight_tensor_parameter_442: + name = "parameter_442" + shape = [768] + dtype = "float32" + min_val = float("-2.41087") + max_val = float("0.654592") + mean = float("-0.916074") + std = float("0.344533") + data = None + + +class Program_weight_tensor_parameter_443: + name = "parameter_443" + shape = [768] + dtype = "float32" + min_val = float("0.51965") + max_val = float("1.8768") + mean = float("0.91262") + std = float("0.147168") + data = None + + +class Program_weight_tensor_parameter_444: + name = "parameter_444" + shape = [768] + dtype = "float32" + min_val = float("0.00391866") + max_val = float("6.24599") + mean = float("0.161767") + std = float("0.35048") + data = None + + +class Program_weight_tensor_parameter_445: + name = "parameter_445" + shape = [768] + dtype = "float32" + min_val = float("-0.309254") + max_val = float("0.670901") + mean = float("0.0400058") + std = float("0.130768") + data = None + + +class Program_weight_tensor_parameter_446: + name = "parameter_446" + shape = [768, 512, 3, 3] + dtype = "float32" + min_val = float("-0.049509") + max_val = float("0.0452558") + mean = float("-4.74502e-05") + std = float("0.00178091") + data = None + + +class Program_weight_tensor_parameter_447: + name = "parameter_447" + shape = [512] + dtype = "float32" + min_val = float("-3.38771") + max_val = float("1.66524") + mean = float("-1.17835") + std = float("0.526979") + data = None + + +class Program_weight_tensor_parameter_448: + name = "parameter_448" + shape = [512] + dtype = "float32" + min_val = float("0.490699") + max_val = float("1.69897") + mean = float("1.10972") + std = float("0.150271") + data = None + + +class Program_weight_tensor_parameter_449: + name = "parameter_449" + shape = [512] + dtype = "float32" + min_val = float("0.00174229") + max_val = float("1.5935") + mean = float("0.0661371") + std = float("0.125469") + data = None + + +class Program_weight_tensor_parameter_450: + name = "parameter_450" + shape = [512] + dtype = "float32" + min_val = float("-0.179287") + max_val = float("0.13044") + mean = float("-0.00872415") + std = float("0.0427454") + data = None + + +class Program_weight_tensor_parameter_451: + name = "parameter_451" + shape = [512, 384, 1, 1] + dtype = "float32" + min_val = float("-0.228408") + max_val = float("0.195018") + mean = float("-0.000416299") + std = float("0.00607886") + data = None + + +class Program_weight_tensor_parameter_452: + name = "parameter_452" + shape = [384] + dtype = "float32" + min_val = float("-0.00934079") + max_val = float("0.000912873") + mean = float("-0.00232942") + std = float("0.00179213") + data = None + + +class Program_weight_tensor_parameter_453: + name = "parameter_453" + shape = [384, 384, 1, 1] + dtype = "float32" + min_val = float("-0.202973") + max_val = float("0.134442") + mean = float("-0.00176474") + std = float("0.00412056") + data = None + + +class Program_weight_tensor_parameter_454: + name = "parameter_454" + shape = [192] + dtype = "float32" + min_val = float("-1.95309") + max_val = float("0.504365") + mean = float("-0.323219") + std = float("0.341256") + data = None + + +class Program_weight_tensor_parameter_455: + name = "parameter_455" + shape = [192] + dtype = "float32" + min_val = float("0.0702496") + max_val = float("2.23431") + mean = float("0.601975") + std = float("0.439856") + data = None + + +class Program_weight_tensor_parameter_456: + name = "parameter_456" + shape = [192] + dtype = "float32" + min_val = float("4.76391e-05") + max_val = float("0.0303151") + mean = float("0.00419476") + std = float("0.00513906") + data = None + + +class Program_weight_tensor_parameter_457: + name = "parameter_457" + shape = [192] + dtype = "float32" + min_val = float("-0.0732794") + max_val = float("0.0514422") + mean = float("0.00968913") + std = float("0.0191895") + data = None + + +class Program_weight_tensor_parameter_458: + name = "parameter_458" + shape = [192, 192, 1, 1] + dtype = "float32" + min_val = float("-0.0205319") + max_val = float("0.0540422") + mean = float("-0.000288173") + std = float("0.00337745") + data = None + + +class Program_weight_tensor_parameter_459: + name = "parameter_459" + shape = [192] + dtype = "float32" + min_val = float("-1.95309") + max_val = float("0.504365") + mean = float("-0.323219") + std = float("0.341256") + data = None + + +class Program_weight_tensor_parameter_460: + name = "parameter_460" + shape = [192] + dtype = "float32" + min_val = float("0.384813") + max_val = float("2.87188") + mean = float("1.22967") + std = float("0.52103") + data = None + + +class Program_weight_tensor_parameter_461: + name = "parameter_461" + shape = [192] + dtype = "float32" + min_val = float("0.00334316") + max_val = float("0.649378") + mean = float("0.0680944") + std = float("0.0920336") + data = None + + +class Program_weight_tensor_parameter_462: + name = "parameter_462" + shape = [192] + dtype = "float32" + min_val = float("-0.149357") + max_val = float("0.210071") + mean = float("0.0335995") + std = float("0.0682833") + data = None + + +class Program_weight_tensor_parameter_463: + name = "parameter_463" + shape = [192, 192, 3, 3] + dtype = "float32" + min_val = float("-0.0206109") + max_val = float("0.0328944") + mean = float("-0.000105819") + std = float("0.00247448") + data = None + + +class Program_weight_tensor_parameter_464: + name = "parameter_464" + shape = [192] + dtype = "float32" + min_val = float("-2.89041") + max_val = float("-0.124973") + mean = float("-1.33321") + std = float("0.398058") + data = None + + +class Program_weight_tensor_parameter_465: + name = "parameter_465" + shape = [192] + dtype = "float32" + min_val = float("0.719268") + max_val = float("2.09501") + mean = float("1.16332") + std = float("0.171594") + data = None + + +class Program_weight_tensor_parameter_466: + name = "parameter_466" + shape = [192] + dtype = "float32" + min_val = float("0.0195207") + max_val = float("8.32209") + mean = float("1.10631") + std = float("1.4008") + data = None + + +class Program_weight_tensor_parameter_467: + name = "parameter_467" + shape = [192] + dtype = "float32" + min_val = float("-1.98123") + max_val = float("1.21603") + mean = float("0.0516891") + std = float("0.330024") + data = None + + +class Program_weight_tensor_parameter_468: + name = "parameter_468" + shape = [192, 192, 3, 3] + dtype = "float32" + min_val = float("-0.0302761") + max_val = float("0.0413314") + mean = float("-9.04468e-05") + std = float("0.00300886") + data = None + + +class Program_weight_tensor_parameter_469: + name = "parameter_469" + shape = [192] + dtype = "float32" + min_val = float("-1.92933") + max_val = float("0.596271") + mean = float("-0.261624") + std = float("0.334454") + data = None + + +class Program_weight_tensor_parameter_470: + name = "parameter_470" + shape = [192] + dtype = "float32" + min_val = float("0.0488552") + max_val = float("1.76755") + mean = float("0.453568") + std = float("0.30262") + data = None + + +class Program_weight_tensor_parameter_471: + name = "parameter_471" + shape = [192] + dtype = "float32" + min_val = float("2.24294e-05") + max_val = float("0.164315") + mean = float("0.00587699") + std = float("0.0161003") + data = None + + +class Program_weight_tensor_parameter_472: + name = "parameter_472" + shape = [192] + dtype = "float32" + min_val = float("-0.0490808") + max_val = float("0.0669156") + mean = float("0.0149264") + std = float("0.0217292") + data = None + + +class Program_weight_tensor_parameter_473: + name = "parameter_473" + shape = [192, 192, 1, 1] + dtype = "float32" + min_val = float("-0.0242882") + max_val = float("0.0299433") + mean = float("-0.000340263") + std = float("0.00326035") + data = None + + +class Program_weight_tensor_parameter_474: + name = "parameter_474" + shape = [192] + dtype = "float32" + min_val = float("-1.92933") + max_val = float("0.596271") + mean = float("-0.261624") + std = float("0.334454") + data = None + + +class Program_weight_tensor_parameter_475: + name = "parameter_475" + shape = [192] + dtype = "float32" + min_val = float("0.419712") + max_val = float("2.27591") + mean = float("1.14823") + std = float("0.381008") + data = None + + +class Program_weight_tensor_parameter_476: + name = "parameter_476" + shape = [192] + dtype = "float32" + min_val = float("0.00492381") + max_val = float("1.92887") + mean = float("0.107118") + std = float("0.20391") + data = None + + +class Program_weight_tensor_parameter_477: + name = "parameter_477" + shape = [192] + dtype = "float32" + min_val = float("-0.242602") + max_val = float("0.295724") + mean = float("0.0375102") + std = float("0.0955899") + data = None + + +class Program_weight_tensor_parameter_478: + name = "parameter_478" + shape = [192, 192, 3, 3] + dtype = "float32" + min_val = float("-0.0188034") + max_val = float("0.0253279") + mean = float("-8.278e-05") + std = float("0.00266932") + data = None + + +class Program_weight_tensor_parameter_479: + name = "parameter_479" + shape = [192] + dtype = "float32" + min_val = float("-2.53631") + max_val = float("-0.131674") + mean = float("-1.31632") + std = float("0.443813") + data = None + + +class Program_weight_tensor_parameter_480: + name = "parameter_480" + shape = [192] + dtype = "float32" + min_val = float("0.718027") + max_val = float("1.65396") + mean = float("1.17902") + std = float("0.161151") + data = None + + +class Program_weight_tensor_parameter_481: + name = "parameter_481" + shape = [192] + dtype = "float32" + min_val = float("0.0311619") + max_val = float("12.4415") + mean = float("1.08409") + std = float("1.81585") + data = None + + +class Program_weight_tensor_parameter_482: + name = "parameter_482" + shape = [192] + dtype = "float32" + min_val = float("-1.09829") + max_val = float("0.629046") + mean = float("0.123942") + std = float("0.213918") + data = None + + +class Program_weight_tensor_parameter_483: + name = "parameter_483" + shape = [192, 192, 3, 3] + dtype = "float32" + min_val = float("-0.042754") + max_val = float("0.0429227") + mean = float("-0.000109615") + std = float("0.00313016") + data = None + + +class Program_weight_tensor_parameter_484: + name = "parameter_484" + shape = [192] + dtype = "float32" + min_val = float("-1.76349") + max_val = float("0.544904") + mean = float("-0.246355") + std = float("0.349411") + data = None + + +class Program_weight_tensor_parameter_485: + name = "parameter_485" + shape = [192] + dtype = "float32" + min_val = float("0.00801797") + max_val = float("1.66398") + mean = float("0.357353") + std = float("0.246738") + data = None + + +class Program_weight_tensor_parameter_486: + name = "parameter_486" + shape = [192] + dtype = "float32" + min_val = float("7.15677e-05") + max_val = float("0.105247") + mean = float("0.00410654") + std = float("0.0106252") + data = None + + +class Program_weight_tensor_parameter_487: + name = "parameter_487" + shape = [192] + dtype = "float32" + min_val = float("-0.0643907") + max_val = float("0.0951448") + mean = float("0.0170309") + std = float("0.0236018") + data = None + + +class Program_weight_tensor_parameter_488: + name = "parameter_488" + shape = [192, 192, 1, 1] + dtype = "float32" + min_val = float("-0.020302") + max_val = float("0.0261232") + mean = float("-0.00036246") + std = float("0.00323036") + data = None + + +class Program_weight_tensor_parameter_489: + name = "parameter_489" + shape = [192] + dtype = "float32" + min_val = float("-1.76349") + max_val = float("0.544905") + mean = float("-0.246355") + std = float("0.349411") + data = None + + +class Program_weight_tensor_parameter_490: + name = "parameter_490" + shape = [192] + dtype = "float32" + min_val = float("0.385673") + max_val = float("1.96785") + mean = float("1.06985") + std = float("0.336108") + data = None + + +class Program_weight_tensor_parameter_491: + name = "parameter_491" + shape = [192] + dtype = "float32" + min_val = float("0.00226675") + max_val = float("1.91593") + mean = float("0.101595") + std = float("0.227642") + data = None + + +class Program_weight_tensor_parameter_492: + name = "parameter_492" + shape = [192] + dtype = "float32" + min_val = float("-0.261671") + max_val = float("0.282769") + mean = float("0.0457371") + std = float("0.101536") + data = None + + +class Program_weight_tensor_parameter_493: + name = "parameter_493" + shape = [192, 192, 3, 3] + dtype = "float32" + min_val = float("-0.0263202") + max_val = float("0.031635") + mean = float("-0.000104025") + std = float("0.00284282") + data = None + + +class Program_weight_tensor_parameter_494: + name = "parameter_494" + shape = [192] + dtype = "float32" + min_val = float("-2.51462") + max_val = float("0.158257") + mean = float("-1.26766") + std = float("0.427753") + data = None + + +class Program_weight_tensor_parameter_495: + name = "parameter_495" + shape = [192] + dtype = "float32" + min_val = float("0.598242") + max_val = float("1.78157") + mean = float("1.1494") + std = float("0.161575") + data = None + + +class Program_weight_tensor_parameter_496: + name = "parameter_496" + shape = [192] + dtype = "float32" + min_val = float("0.0222714") + max_val = float("37.7966") + mean = float("1.49691") + std = float("3.97204") + data = None + + +class Program_weight_tensor_parameter_497: + name = "parameter_497" + shape = [192] + dtype = "float32" + min_val = float("-0.873886") + max_val = float("0.81748") + mean = float("0.0710219") + std = float("0.213003") + data = None + + +class Program_weight_tensor_parameter_498: + name = "parameter_498" + shape = [192, 192, 3, 3] + dtype = "float32" + min_val = float("-0.0380726") + max_val = float("0.047001") + mean = float("-0.000179735") + std = float("0.00344855") + data = None + + +class Program_weight_tensor_parameter_499: + name = "parameter_499" + shape = [192] + dtype = "float32" + min_val = float("-2.08881") + max_val = float("0.648999") + mean = float("-0.259801") + std = float("0.386671") + data = None + + +class Program_weight_tensor_parameter_500: + name = "parameter_500" + shape = [192] + dtype = "float32" + min_val = float("0.000298623") + max_val = float("0.722711") + mean = float("0.216526") + std = float("0.135243") + data = None + + +class Program_weight_tensor_parameter_501: + name = "parameter_501" + shape = [192] + dtype = "float32" + min_val = float("4.96868e-08") + max_val = float("0.107631") + mean = float("0.00291956") + std = float("0.00896913") + data = None + + +class Program_weight_tensor_parameter_502: + name = "parameter_502" + shape = [192] + dtype = "float32" + min_val = float("-0.0826544") + max_val = float("0.0880952") + mean = float("0.0119081") + std = float("0.0222372") + data = None + + +class Program_weight_tensor_parameter_503: + name = "parameter_503" + shape = [192, 192, 1, 1] + dtype = "float32" + min_val = float("-0.0383726") + max_val = float("0.0426544") + mean = float("-0.000299896") + std = float("0.00305223") + data = None + + +class Program_weight_tensor_parameter_504: + name = "parameter_504" + shape = [192] + dtype = "float32" + min_val = float("-2.08881") + max_val = float("0.648999") + mean = float("-0.259801") + std = float("0.386671") + data = None + + +class Program_weight_tensor_parameter_505: + name = "parameter_505" + shape = [192] + dtype = "float32" + min_val = float("0.394983") + max_val = float("1.95775") + mean = float("0.953923") + std = float("0.305397") + data = None + + +class Program_weight_tensor_parameter_506: + name = "parameter_506" + shape = [192] + dtype = "float32" + min_val = float("0.00262196") + max_val = float("7.469") + mean = float("0.131262") + std = float("0.559292") + data = None + + +class Program_weight_tensor_parameter_507: + name = "parameter_507" + shape = [192] + dtype = "float32" + min_val = float("-1.11459") + max_val = float("0.413148") + mean = float("0.0497411") + std = float("0.149456") + data = None + + +class Program_weight_tensor_parameter_508: + name = "parameter_508" + shape = [192, 192, 3, 3] + dtype = "float32" + min_val = float("-0.0273404") + max_val = float("0.0675831") + mean = float("-0.000146946") + std = float("0.00314256") + data = None + + +class Program_weight_tensor_parameter_509: + name = "parameter_509" + shape = [192] + dtype = "float32" + min_val = float("-2.77342") + max_val = float("-0.0376017") + mean = float("-1.25998") + std = float("0.434996") + data = None + + +class Program_weight_tensor_parameter_510: + name = "parameter_510" + shape = [192] + dtype = "float32" + min_val = float("0.744961") + max_val = float("1.56236") + mean = float("1.13335") + std = float("0.139709") + data = None + + +class Program_weight_tensor_parameter_511: + name = "parameter_511" + shape = [192] + dtype = "float32" + min_val = float("0.00886955") + max_val = float("38.6321") + mean = float("1.16498") + std = float("4.2895") + data = None + + +class Program_weight_tensor_parameter_512: + name = "parameter_512" + shape = [192] + dtype = "float32" + min_val = float("-0.81896") + max_val = float("0.818165") + mean = float("0.0290114") + std = float("0.197181") + data = None + + +class Program_weight_tensor_parameter_513: + name = "parameter_513" + shape = [192, 192, 3, 3] + dtype = "float32" + min_val = float("-0.0472119") + max_val = float("0.0488221") + mean = float("-0.000229853") + std = float("0.00339264") + data = None + + +class Program_weight_tensor_parameter_514: + name = "parameter_514" + shape = [192] + dtype = "float32" + min_val = float("-1.20653") + max_val = float("0.515824") + mean = float("-0.218129") + std = float("0.352315") + data = None + + +class Program_weight_tensor_parameter_515: + name = "parameter_515" + shape = [192] + dtype = "float32" + min_val = float("-3.41948e-06") + max_val = float("0.680384") + mean = float("0.195099") + std = float("0.117152") + data = None + + +class Program_weight_tensor_parameter_516: + name = "parameter_516" + shape = [192] + dtype = "float32" + min_val = float("6.06307e-12") + max_val = float("0.398718") + mean = float("0.00738152") + std = float("0.0311542") + data = None + + +class Program_weight_tensor_parameter_517: + name = "parameter_517" + shape = [192] + dtype = "float32" + min_val = float("-0.129674") + max_val = float("0.071682") + mean = float("0.0112656") + std = float("0.0230303") + data = None + + +class Program_weight_tensor_parameter_518: + name = "parameter_518" + shape = [192, 192, 1, 1] + dtype = "float32" + min_val = float("-0.0270501") + max_val = float("0.0345417") + mean = float("-0.000283263") + std = float("0.00318819") + data = None + + +class Program_weight_tensor_parameter_519: + name = "parameter_519" + shape = [192] + dtype = "float32" + min_val = float("-1.20653") + max_val = float("0.515824") + mean = float("-0.218129") + std = float("0.352315") + data = None + + +class Program_weight_tensor_parameter_520: + name = "parameter_520" + shape = [192] + dtype = "float32" + min_val = float("0.398135") + max_val = float("1.5715") + mean = float("0.848265") + std = float("0.259232") + data = None + + +class Program_weight_tensor_parameter_521: + name = "parameter_521" + shape = [192] + dtype = "float32" + min_val = float("0.00433799") + max_val = float("10.4863") + mean = float("0.263417") + std = float("0.977392") + data = None + + +class Program_weight_tensor_parameter_522: + name = "parameter_522" + shape = [192] + dtype = "float32" + min_val = float("-0.947376") + max_val = float("0.293492") + mean = float("0.0244436") + std = float("0.157364") + data = None + + +class Program_weight_tensor_parameter_523: + name = "parameter_523" + shape = [192, 192, 3, 3] + dtype = "float32" + min_val = float("-0.0235823") + max_val = float("0.0383498") + mean = float("-6.68298e-05") + std = float("0.00312995") + data = None + + +class Program_weight_tensor_parameter_524: + name = "parameter_524" + shape = [192] + dtype = "float32" + min_val = float("-2.48859") + max_val = float("-0.0816794") + mean = float("-1.27085") + std = float("0.420072") + data = None + + +class Program_weight_tensor_parameter_525: + name = "parameter_525" + shape = [192] + dtype = "float32" + min_val = float("0.694565") + max_val = float("1.54206") + mean = float("1.10687") + std = float("0.135381") + data = None + + +class Program_weight_tensor_parameter_526: + name = "parameter_526" + shape = [192] + dtype = "float32" + min_val = float("0.00545689") + max_val = float("4.02717") + mean = float("0.403302") + std = float("0.646807") + data = None + + +class Program_weight_tensor_parameter_527: + name = "parameter_527" + shape = [192] + dtype = "float32" + min_val = float("-0.588504") + max_val = float("0.746211") + mean = float("0.00482585") + std = float("0.188335") + data = None + + +class Program_weight_tensor_parameter_528: + name = "parameter_528" + shape = [192, 192, 3, 3] + dtype = "float32" + min_val = float("-0.0454542") + max_val = float("0.0510382") + mean = float("-0.00015484") + std = float("0.00337127") + data = None + + +class Program_weight_tensor_parameter_529: + name = "parameter_529" + shape = [192] + dtype = "float32" + min_val = float("-1.23302") + max_val = float("0.509381") + mean = float("-0.153645") + std = float("0.30407") + data = None + + +class Program_weight_tensor_parameter_530: + name = "parameter_530" + shape = [192] + dtype = "float32" + min_val = float("0.00227296") + max_val = float("1.53114") + mean = float("0.236795") + std = float("0.211228") + data = None + + +class Program_weight_tensor_parameter_531: + name = "parameter_531" + shape = [192] + dtype = "float32" + min_val = float("2.14271e-05") + max_val = float("0.127867") + mean = float("0.00908259") + std = float("0.0177169") + data = None + + +class Program_weight_tensor_parameter_532: + name = "parameter_532" + shape = [192] + dtype = "float32" + min_val = float("-0.0935318") + max_val = float("0.137813") + mean = float("0.0162608") + std = float("0.0290513") + data = None + + +class Program_weight_tensor_parameter_533: + name = "parameter_533" + shape = [192, 192, 1, 1] + dtype = "float32" + min_val = float("-0.0512067") + max_val = float("0.0280295") + mean = float("-0.000438388") + std = float("0.00368151") + data = None + + +class Program_weight_tensor_parameter_534: + name = "parameter_534" + shape = [192] + dtype = "float32" + min_val = float("-1.23302") + max_val = float("0.509381") + mean = float("-0.153645") + std = float("0.30407") + data = None + + +class Program_weight_tensor_parameter_535: + name = "parameter_535" + shape = [192] + dtype = "float32" + min_val = float("0.332594") + max_val = float("1.44107") + mean = float("0.751491") + std = float("0.218724") + data = None + + +class Program_weight_tensor_parameter_536: + name = "parameter_536" + shape = [192] + dtype = "float32" + min_val = float("0.00580545") + max_val = float("3.585") + mean = float("0.245801") + std = float("0.410758") + data = None + + +class Program_weight_tensor_parameter_537: + name = "parameter_537" + shape = [192] + dtype = "float32" + min_val = float("-0.654561") + max_val = float("0.520224") + mean = float("0.0637138") + std = float("0.132489") + data = None + + +class Program_weight_tensor_parameter_538: + name = "parameter_538" + shape = [192, 192, 3, 3] + dtype = "float32" + min_val = float("-0.0460525") + max_val = float("0.0463877") + mean = float("-0.000214011") + std = float("0.0030628") + data = None + + +class Program_weight_tensor_parameter_539: + name = "parameter_539" + shape = [192] + dtype = "float32" + min_val = float("-1.86975") + max_val = float("-0.187693") + mean = float("-1.16402") + std = float("0.325704") + data = None + + +class Program_weight_tensor_parameter_540: + name = "parameter_540" + shape = [192] + dtype = "float32" + min_val = float("0.751831") + max_val = float("1.61753") + mean = float("1.10973") + std = float("0.131817") + data = None + + +class Program_weight_tensor_parameter_541: + name = "parameter_541" + shape = [192] + dtype = "float32" + min_val = float("0.00623295") + max_val = float("7.47422") + mean = float("0.274923") + std = float("0.674046") + data = None + + +class Program_weight_tensor_parameter_542: + name = "parameter_542" + shape = [192] + dtype = "float32" + min_val = float("-0.566295") + max_val = float("0.711085") + mean = float("-0.0315813") + std = float("0.130281") + data = None + + +class Program_weight_tensor_parameter_543: + name = "parameter_543" + shape = [192, 192, 3, 3] + dtype = "float32" + min_val = float("-0.0494038") + max_val = float("0.0562226") + mean = float("-0.000159684") + std = float("0.00334459") + data = None + + +class Program_weight_tensor_parameter_544: + name = "parameter_544" + shape = [192] + dtype = "float32" + min_val = float("-2.81555") + max_val = float("1.61423") + mean = float("-0.0254536") + std = float("0.761522") + data = None + + +class Program_weight_tensor_parameter_545: + name = "parameter_545" + shape = [192] + dtype = "float32" + min_val = float("0.476452") + max_val = float("2.07853") + mean = float("0.880028") + std = float("0.224402") + data = None + + +class Program_weight_tensor_parameter_546: + name = "parameter_546" + shape = [192] + dtype = "float32" + min_val = float("0.00413651") + max_val = float("6.56568") + mean = float("0.232782") + std = float("0.576315") + data = None + + +class Program_weight_tensor_parameter_547: + name = "parameter_547" + shape = [192] + dtype = "float32" + min_val = float("-0.211511") + max_val = float("0.111527") + mean = float("-0.00608796") + std = float("0.0521257") + data = None + + +class Program_weight_tensor_parameter_548: + name = "parameter_548" + shape = [192, 384, 1, 1] + dtype = "float32" + min_val = float("-0.0748107") + max_val = float("0.0760846") + mean = float("-0.000563234") + std = float("0.00701903") + data = None + + +class Program_weight_tensor_parameter_549: + name = "parameter_549" + shape = [192] + dtype = "float32" + min_val = float("-2.91449") + max_val = float("2.11472") + mean = float("0.103721") + std = float("0.667968") + data = None + + +class Program_weight_tensor_parameter_550: + name = "parameter_550" + shape = [192] + dtype = "float32" + min_val = float("0.856446") + max_val = float("5.71069") + mean = float("1.92533") + std = float("0.968824") + data = None + + +class Program_weight_tensor_parameter_551: + name = "parameter_551" + shape = [192] + dtype = "float32" + min_val = float("0.00176998") + max_val = float("0.489107") + mean = float("0.0413273") + std = float("0.0662281") + data = None + + +class Program_weight_tensor_parameter_552: + name = "parameter_552" + shape = [192] + dtype = "float32" + min_val = float("-0.089043") + max_val = float("0.117142") + mean = float("0.00920289") + std = float("0.0350583") + data = None + + +class Program_weight_tensor_parameter_553: + name = "parameter_553" + shape = [192, 384, 1, 1] + dtype = "float32" + min_val = float("-0.0673953") + max_val = float("0.128402") + mean = float("-0.000430991") + std = float("0.00603374") + data = None + + +class Program_weight_tensor_parameter_554: + name = "parameter_554" + shape = [384] + dtype = "float32" + min_val = float("-2.92773") + max_val = float("1.33693") + mean = float("-0.313455") + std = float("0.572295") + data = None + + +class Program_weight_tensor_parameter_555: + name = "parameter_555" + shape = [384] + dtype = "float32" + min_val = float("0.699301") + max_val = float("2.45328") + mean = float("1.14709") + std = float("0.257994") + data = None + + +class Program_weight_tensor_parameter_556: + name = "parameter_556" + shape = [384] + dtype = "float32" + min_val = float("0.00380316") + max_val = float("33.1796") + mean = float("0.637666") + std = float("1.97202") + data = None + + +class Program_weight_tensor_parameter_557: + name = "parameter_557" + shape = [384] + dtype = "float32" + min_val = float("-0.920201") + max_val = float("0.383577") + mean = float("0.0242312") + std = float("0.154423") + data = None + + +class Program_weight_tensor_parameter_558: + name = "parameter_558" + shape = [384, 256, 3, 3] + dtype = "float32" + min_val = float("-0.0550514") + max_val = float("0.0583738") + mean = float("-6.14001e-05") + std = float("0.00339831") + data = None + + +class Program_weight_tensor_parameter_559: + name = "parameter_559" + shape = [256] + dtype = "float32" + min_val = float("-2.08101") + max_val = float("1.23907") + mean = float("-0.929071") + std = float("0.560762") + data = None + + +class Program_weight_tensor_parameter_560: + name = "parameter_560" + shape = [256] + dtype = "float32" + min_val = float("0.460852") + max_val = float("1.60647") + mean = float("1.03822") + std = float("0.186473") + data = None + + +class Program_weight_tensor_parameter_561: + name = "parameter_561" + shape = [256] + dtype = "float32" + min_val = float("0.00288698") + max_val = float("2.85435") + mean = float("0.124524") + std = float("0.278458") + data = None + + +class Program_weight_tensor_parameter_562: + name = "parameter_562" + shape = [256] + dtype = "float32" + min_val = float("-0.513886") + max_val = float("0.320015") + mean = float("-0.0168315") + std = float("0.107427") + data = None + + +class Program_weight_tensor_parameter_563: + name = "parameter_563" + shape = [256, 192, 1, 1] + dtype = "float32" + min_val = float("-0.22133") + max_val = float("0.170826") + mean = float("-0.000439134") + std = float("0.0126658") + data = None + + +class Program_weight_tensor_parameter_564: + name = "parameter_564" + shape = [192] + dtype = "float32" + min_val = float("-0.0149414") + max_val = float("0.00274012") + mean = float("-0.0039491") + std = float("0.00303571") + data = None + + +class Program_weight_tensor_parameter_565: + name = "parameter_565" + shape = [192, 192, 1, 1] + dtype = "float32" + min_val = float("-0.518434") + max_val = float("0.174661") + mean = float("-0.00334445") + std = float("0.00882037") + data = None + + +class Program_weight_tensor_parameter_566: + name = "parameter_566" + shape = [96] + dtype = "float32" + min_val = float("-1.89866") + max_val = float("0.648435") + mean = float("-0.163531") + std = float("0.445964") + data = None + + +class Program_weight_tensor_parameter_567: + name = "parameter_567" + shape = [96] + dtype = "float32" + min_val = float("0.118827") + max_val = float("3.45075") + mean = float("0.648866") + std = float("0.709221") + data = None + + +class Program_weight_tensor_parameter_568: + name = "parameter_568" + shape = [96] + dtype = "float32" + min_val = float("5.72145e-05") + max_val = float("0.0305947") + mean = float("0.00350272") + std = float("0.00465155") + data = None + + +class Program_weight_tensor_parameter_569: + name = "parameter_569" + shape = [96] + dtype = "float32" + min_val = float("-0.0537825") + max_val = float("0.0541921") + mean = float("0.010285") + std = float("0.0254635") + data = None + + +class Program_weight_tensor_parameter_570: + name = "parameter_570" + shape = [96, 96, 1, 1] + dtype = "float32" + min_val = float("-0.0435225") + max_val = float("0.0719533") + mean = float("-0.000638006") + std = float("0.00649986") + data = None + + +class Program_weight_tensor_parameter_571: + name = "parameter_571" + shape = [96] + dtype = "float32" + min_val = float("-1.89866") + max_val = float("0.648435") + mean = float("-0.163531") + std = float("0.445964") + data = None + + +class Program_weight_tensor_parameter_572: + name = "parameter_572" + shape = [96] + dtype = "float32" + min_val = float("0.274848") + max_val = float("5.76456") + mean = float("1.11235") + std = float("0.941375") + data = None + + +class Program_weight_tensor_parameter_573: + name = "parameter_573" + shape = [96] + dtype = "float32" + min_val = float("0.00262636") + max_val = float("0.731286") + mean = float("0.0545356") + std = float("0.080615") + data = None + + +class Program_weight_tensor_parameter_574: + name = "parameter_574" + shape = [96] + dtype = "float32" + min_val = float("-0.403692") + max_val = float("0.191464") + mean = float("0.0207246") + std = float("0.102896") + data = None + + +class Program_weight_tensor_parameter_575: + name = "parameter_575" + shape = [96, 96, 3, 3] + dtype = "float32" + min_val = float("-0.034156") + max_val = float("0.0485352") + mean = float("-0.000167324") + std = float("0.00457433") + data = None + + +class Program_weight_tensor_parameter_576: + name = "parameter_576" + shape = [96] + dtype = "float32" + min_val = float("-2.47237") + max_val = float("-0.039721") + mean = float("-1.25291") + std = float("0.438553") + data = None + + +class Program_weight_tensor_parameter_577: + name = "parameter_577" + shape = [96] + dtype = "float32" + min_val = float("0.484422") + max_val = float("1.73309") + mean = float("0.919688") + std = float("0.175697") + data = None + + +class Program_weight_tensor_parameter_578: + name = "parameter_578" + shape = [96] + dtype = "float32" + min_val = float("0.0913526") + max_val = float("5.78304") + mean = float("1.20582") + std = float("1.11379") + data = None + + +class Program_weight_tensor_parameter_579: + name = "parameter_579" + shape = [96] + dtype = "float32" + min_val = float("-2.67749") + max_val = float("1.89478") + mean = float("-0.0280634") + std = float("0.649795") + data = None + + +class Program_weight_tensor_parameter_580: + name = "parameter_580" + shape = [96, 96, 3, 3] + dtype = "float32" + min_val = float("-0.125626") + max_val = float("0.0867783") + mean = float("-0.000262143") + std = float("0.00565539") + data = None + + +class Program_weight_tensor_parameter_581: + name = "parameter_581" + shape = [96] + dtype = "float32" + min_val = float("-1.36009") + max_val = float("0.613418") + mean = float("-0.109119") + std = float("0.363358") + data = None + + +class Program_weight_tensor_parameter_582: + name = "parameter_582" + shape = [96] + dtype = "float32" + min_val = float("0.00951907") + max_val = float("1.85237") + mean = float("0.454878") + std = float("0.359562") + data = None + + +class Program_weight_tensor_parameter_583: + name = "parameter_583" + shape = [96] + dtype = "float32" + min_val = float("1.34543e-05") + max_val = float("0.100978") + mean = float("0.00659523") + std = float("0.0127025") + data = None + + +class Program_weight_tensor_parameter_584: + name = "parameter_584" + shape = [96] + dtype = "float32" + min_val = float("-0.0733295") + max_val = float("0.0665844") + mean = float("0.0164669") + std = float("0.0261863") + data = None + + +class Program_weight_tensor_parameter_585: + name = "parameter_585" + shape = [96, 96, 1, 1] + dtype = "float32" + min_val = float("-0.0616569") + max_val = float("0.0533414") + mean = float("-0.000915536") + std = float("0.0063832") + data = None + + +class Program_weight_tensor_parameter_586: + name = "parameter_586" + shape = [96] + dtype = "float32" + min_val = float("-1.36009") + max_val = float("0.613418") + mean = float("-0.109119") + std = float("0.363358") + data = None + + +class Program_weight_tensor_parameter_587: + name = "parameter_587" + shape = [96] + dtype = "float32" + min_val = float("0.381804") + max_val = float("2.31148") + mean = float("0.90401") + std = float("0.422733") + data = None + + +class Program_weight_tensor_parameter_588: + name = "parameter_588" + shape = [96] + dtype = "float32" + min_val = float("0.0028765") + max_val = float("1.96506") + mean = float("0.163957") + std = float("0.29453") + data = None + + +class Program_weight_tensor_parameter_589: + name = "parameter_589" + shape = [96] + dtype = "float32" + min_val = float("-0.169521") + max_val = float("0.382588") + mean = float("0.0552") + std = float("0.100991") + data = None + + +class Program_weight_tensor_parameter_590: + name = "parameter_590" + shape = [96, 96, 3, 3] + dtype = "float32" + min_val = float("-0.0539882") + max_val = float("0.0612803") + mean = float("-0.000352692") + std = float("0.0047621") + data = None + + +class Program_weight_tensor_parameter_591: + name = "parameter_591" + shape = [96] + dtype = "float32" + min_val = float("-3.30884") + max_val = float("0.356165") + mean = float("-1.21887") + std = float("0.556975") + data = None + + +class Program_weight_tensor_parameter_592: + name = "parameter_592" + shape = [96] + dtype = "float32" + min_val = float("0.424695") + max_val = float("1.92592") + mean = float("1.00728") + std = float("0.236573") + data = None + + +class Program_weight_tensor_parameter_593: + name = "parameter_593" + shape = [96] + dtype = "float32" + min_val = float("0.0237573") + max_val = float("6.63037") + mean = float("1.04207") + std = float("1.2277") + data = None + + +class Program_weight_tensor_parameter_594: + name = "parameter_594" + shape = [96] + dtype = "float32" + min_val = float("-1.1716") + max_val = float("1.33307") + mean = float("0.029565") + std = float("0.439463") + data = None + + +class Program_weight_tensor_parameter_595: + name = "parameter_595" + shape = [96, 96, 3, 3] + dtype = "float32" + min_val = float("-0.134962") + max_val = float("0.136054") + mean = float("-0.000198739") + std = float("0.0057204") + data = None + + +class Program_weight_tensor_parameter_596: + name = "parameter_596" + shape = [96] + dtype = "float32" + min_val = float("-1.22361") + max_val = float("0.654799") + mean = float("-0.0922317") + std = float("0.305958") + data = None + + +class Program_weight_tensor_parameter_597: + name = "parameter_597" + shape = [96] + dtype = "float32" + min_val = float("0.0316927") + max_val = float("1.28685") + mean = float("0.312909") + std = float("0.193627") + data = None + + +class Program_weight_tensor_parameter_598: + name = "parameter_598" + shape = [96] + dtype = "float32" + min_val = float("9.22096e-05") + max_val = float("0.267661") + mean = float("0.00780572") + std = float("0.0277819") + data = None + + +class Program_weight_tensor_parameter_599: + name = "parameter_599" + shape = [96] + dtype = "float32" + min_val = float("-0.127424") + max_val = float("0.0779931") + mean = float("0.00867301") + std = float("0.0281039") + data = None + + +class Program_weight_tensor_parameter_600: + name = "parameter_600" + shape = [96, 96, 1, 1] + dtype = "float32" + min_val = float("-0.0357805") + max_val = float("0.04937") + mean = float("-0.000468989") + std = float("0.00648871") + data = None + + +class Program_weight_tensor_parameter_601: + name = "parameter_601" + shape = [96] + dtype = "float32" + min_val = float("-1.22361") + max_val = float("0.654799") + mean = float("-0.0922317") + std = float("0.305958") + data = None + + +class Program_weight_tensor_parameter_602: + name = "parameter_602" + shape = [96] + dtype = "float32" + min_val = float("0.321215") + max_val = float("1.60468") + mean = float("0.742453") + std = float("0.256594") + data = None + + +class Program_weight_tensor_parameter_603: + name = "parameter_603" + shape = [96] + dtype = "float32" + min_val = float("0.00594866") + max_val = float("13.1468") + mean = float("0.280576") + std = float("1.35706") + data = None + + +class Program_weight_tensor_parameter_604: + name = "parameter_604" + shape = [96] + dtype = "float32" + min_val = float("-0.512798") + max_val = float("0.360902") + mean = float("0.024901") + std = float("0.131996") + data = None + + +class Program_weight_tensor_parameter_605: + name = "parameter_605" + shape = [96, 96, 3, 3] + dtype = "float32" + min_val = float("-0.0457283") + max_val = float("0.0529573") + mean = float("-0.000146386") + std = float("0.00507802") + data = None + + +class Program_weight_tensor_parameter_606: + name = "parameter_606" + shape = [96] + dtype = "float32" + min_val = float("-3.56417") + max_val = float("0.313374") + mean = float("-1.16309") + std = float("0.578663") + data = None + + +class Program_weight_tensor_parameter_607: + name = "parameter_607" + shape = [96] + dtype = "float32" + min_val = float("0.515839") + max_val = float("2.22553") + mean = float("1.01898") + std = float("0.244192") + data = None + + +class Program_weight_tensor_parameter_608: + name = "parameter_608" + shape = [96] + dtype = "float32" + min_val = float("0.0566049") + max_val = float("8.50527") + mean = float("0.821018") + std = float("1.25589") + data = None + + +class Program_weight_tensor_parameter_609: + name = "parameter_609" + shape = [96] + dtype = "float32" + min_val = float("-1.3259") + max_val = float("0.986206") + mean = float("0.0226801") + std = float("0.384225") + data = None + + +class Program_weight_tensor_parameter_610: + name = "parameter_610" + shape = [96, 96, 3, 3] + dtype = "float32" + min_val = float("-0.106683") + max_val = float("0.124295") + mean = float("-0.000180527") + std = float("0.00585965") + data = None + + +class Program_weight_tensor_parameter_611: + name = "parameter_611" + shape = [96] + dtype = "float32" + min_val = float("-0.914024") + max_val = float("0.548448") + mean = float("-0.147808") + std = float("0.291506") + data = None + + +class Program_weight_tensor_parameter_612: + name = "parameter_612" + shape = [96] + dtype = "float32" + min_val = float("0.0339392") + max_val = float("1.38054") + mean = float("0.313981") + std = float("0.205844") + data = None + + +class Program_weight_tensor_parameter_613: + name = "parameter_613" + shape = [96] + dtype = "float32" + min_val = float("2.21066e-05") + max_val = float("0.136525") + mean = float("0.00723304") + std = float("0.0189755") + data = None + + +class Program_weight_tensor_parameter_614: + name = "parameter_614" + shape = [96] + dtype = "float32" + min_val = float("-0.0806456") + max_val = float("0.0638613") + mean = float("0.013262") + std = float("0.0276148") + data = None + + +class Program_weight_tensor_parameter_615: + name = "parameter_615" + shape = [96, 96, 1, 1] + dtype = "float32" + min_val = float("-0.0590545") + max_val = float("0.0449412") + mean = float("-0.00069183") + std = float("0.00674782") + data = None + + +class Program_weight_tensor_parameter_616: + name = "parameter_616" + shape = [96] + dtype = "float32" + min_val = float("-0.914024") + max_val = float("0.548448") + mean = float("-0.147808") + std = float("0.291506") + data = None + + +class Program_weight_tensor_parameter_617: + name = "parameter_617" + shape = [96] + dtype = "float32" + min_val = float("0.142128") + max_val = float("1.73988") + mean = float("0.702206") + std = float("0.285683") + data = None + + +class Program_weight_tensor_parameter_618: + name = "parameter_618" + shape = [96] + dtype = "float32" + min_val = float("0.00387817") + max_val = float("1.69108") + mean = float("0.132084") + std = float("0.265544") + data = None + + +class Program_weight_tensor_parameter_619: + name = "parameter_619" + shape = [96] + dtype = "float32" + min_val = float("-0.365992") + max_val = float("0.29138") + mean = float("0.0334006") + std = float("0.11377") + data = None + + +class Program_weight_tensor_parameter_620: + name = "parameter_620" + shape = [96, 96, 3, 3] + dtype = "float32" + min_val = float("-0.0709519") + max_val = float("0.0589113") + mean = float("-0.000196033") + std = float("0.00498706") + data = None + + +class Program_weight_tensor_parameter_621: + name = "parameter_621" + shape = [96] + dtype = "float32" + min_val = float("-2.6239") + max_val = float("0.0469523") + mean = float("-1.09235") + std = float("0.492344") + data = None + + +class Program_weight_tensor_parameter_622: + name = "parameter_622" + shape = [96] + dtype = "float32" + min_val = float("0.546345") + max_val = float("1.74702") + mean = float("0.990286") + std = float("0.183688") + data = None + + +class Program_weight_tensor_parameter_623: + name = "parameter_623" + shape = [96] + dtype = "float32" + min_val = float("0.036742") + max_val = float("4.90946") + mean = float("0.515044") + std = float("0.805591") + data = None + + +class Program_weight_tensor_parameter_624: + name = "parameter_624" + shape = [96] + dtype = "float32" + min_val = float("-0.8935") + max_val = float("0.923151") + mean = float("-0.0110131") + std = float("0.335925") + data = None + + +class Program_weight_tensor_parameter_625: + name = "parameter_625" + shape = [96, 96, 3, 3] + dtype = "float32" + min_val = float("-0.0557588") + max_val = float("0.0977312") + mean = float("-0.000280993") + std = float("0.00584596") + data = None + + +class Program_weight_tensor_parameter_626: + name = "parameter_626" + shape = [96] + dtype = "float32" + min_val = float("-0.982144") + max_val = float("0.555605") + mean = float("-0.128127") + std = float("0.289988") + data = None + + +class Program_weight_tensor_parameter_627: + name = "parameter_627" + shape = [96] + dtype = "float32" + min_val = float("0.0646262") + max_val = float("1.15632") + mean = float("0.273046") + std = float("0.165216") + data = None + + +class Program_weight_tensor_parameter_628: + name = "parameter_628" + shape = [96] + dtype = "float32" + min_val = float("0.000135618") + max_val = float("0.187931") + mean = float("0.0150782") + std = float("0.0310702") + data = None + + +class Program_weight_tensor_parameter_629: + name = "parameter_629" + shape = [96] + dtype = "float32" + min_val = float("-0.0878502") + max_val = float("0.0810483") + mean = float("0.00536994") + std = float("0.0324391") + data = None + + +class Program_weight_tensor_parameter_630: + name = "parameter_630" + shape = [96, 96, 1, 1] + dtype = "float32" + min_val = float("-0.0693269") + max_val = float("0.0585016") + mean = float("-7.57148e-05") + std = float("0.00796075") + data = None + + +class Program_weight_tensor_parameter_631: + name = "parameter_631" + shape = [96] + dtype = "float32" + min_val = float("-0.982144") + max_val = float("0.555603") + mean = float("-0.128127") + std = float("0.289988") + data = None + + +class Program_weight_tensor_parameter_632: + name = "parameter_632" + shape = [96] + dtype = "float32" + min_val = float("0.178989") + max_val = float("1.52642") + mean = float("0.577049") + std = float("0.230661") + data = None + + +class Program_weight_tensor_parameter_633: + name = "parameter_633" + shape = [96] + dtype = "float32" + min_val = float("0.00908185") + max_val = float("6.25082") + mean = float("0.308875") + std = float("0.731709") + data = None + + +class Program_weight_tensor_parameter_634: + name = "parameter_634" + shape = [96] + dtype = "float32" + min_val = float("-0.503088") + max_val = float("0.306438") + mean = float("0.0144393") + std = float("0.128309") + data = None + + +class Program_weight_tensor_parameter_635: + name = "parameter_635" + shape = [96, 96, 3, 3] + dtype = "float32" + min_val = float("-0.0559299") + max_val = float("0.0452462") + mean = float("5.04339e-05") + std = float("0.0052732") + data = None + + +class Program_weight_tensor_parameter_636: + name = "parameter_636" + shape = [96] + dtype = "float32" + min_val = float("-3.34611") + max_val = float("0.216964") + mean = float("-1.02056") + std = float("0.542199") + data = None + + +class Program_weight_tensor_parameter_637: + name = "parameter_637" + shape = [96] + dtype = "float32" + min_val = float("0.541273") + max_val = float("2.73475") + mean = float("1.04359") + std = float("0.234238") + data = None + + +class Program_weight_tensor_parameter_638: + name = "parameter_638" + shape = [96] + dtype = "float32" + min_val = float("0.00839167") + max_val = float("10.3218") + mean = float("0.36125") + std = float("1.13641") + data = None + + +class Program_weight_tensor_parameter_639: + name = "parameter_639" + shape = [96] + dtype = "float32" + min_val = float("-0.676206") + max_val = float("2.05745") + mean = float("-0.000203486") + std = float("0.390477") + data = None + + +class Program_weight_tensor_parameter_640: + name = "parameter_640" + shape = [96, 96, 3, 3] + dtype = "float32" + min_val = float("-0.0823601") + max_val = float("0.0816342") + mean = float("-0.000177511") + std = float("0.00638281") + data = None + + +class Program_weight_tensor_parameter_641: + name = "parameter_641" + shape = [96] + dtype = "float32" + min_val = float("-0.60359") + max_val = float("0.468132") + mean = float("-0.0840719") + std = float("0.256343") + data = None + + +class Program_weight_tensor_parameter_642: + name = "parameter_642" + shape = [96] + dtype = "float32" + min_val = float("0.0544126") + max_val = float("1.22927") + mean = float("0.286115") + std = float("0.196735") + data = None + + +class Program_weight_tensor_parameter_643: + name = "parameter_643" + shape = [96] + dtype = "float32" + min_val = float("0.000143085") + max_val = float("0.359897") + mean = float("0.0363701") + std = float("0.0574546") + data = None + + +class Program_weight_tensor_parameter_644: + name = "parameter_644" + shape = [96] + dtype = "float32" + min_val = float("-0.032617") + max_val = float("0.0498112") + mean = float("0.0055907") + std = float("0.016775") + data = None + + +class Program_weight_tensor_parameter_645: + name = "parameter_645" + shape = [96, 96, 1, 1] + dtype = "float32" + min_val = float("-0.0773345") + max_val = float("0.0606068") + mean = float("-0.000857754") + std = float("0.00880052") + data = None + + +class Program_weight_tensor_parameter_646: + name = "parameter_646" + shape = [96] + dtype = "float32" + min_val = float("-0.60359") + max_val = float("0.468132") + mean = float("-0.0840719") + std = float("0.256343") + data = None + + +class Program_weight_tensor_parameter_647: + name = "parameter_647" + shape = [96] + dtype = "float32" + min_val = float("0.182862") + max_val = float("1.3226") + mean = float("0.518428") + std = float("0.258918") + data = None + + +class Program_weight_tensor_parameter_648: + name = "parameter_648" + shape = [96] + dtype = "float32" + min_val = float("0.00892751") + max_val = float("44.0063") + mean = float("0.942949") + std = float("4.46794") + data = None + + +class Program_weight_tensor_parameter_649: + name = "parameter_649" + shape = [96] + dtype = "float32" + min_val = float("-0.205703") + max_val = float("0.154926") + mean = float("0.000264955") + std = float("0.061708") + data = None + + +class Program_weight_tensor_parameter_650: + name = "parameter_650" + shape = [96, 96, 3, 3] + dtype = "float32" + min_val = float("-0.0785968") + max_val = float("0.0587748") + mean = float("6.78683e-05") + std = float("0.00555531") + data = None + + +class Program_weight_tensor_parameter_651: + name = "parameter_651" + shape = [96] + dtype = "float32" + min_val = float("-2.41369") + max_val = float("0.496388") + mean = float("-0.836608") + std = float("0.475164") + data = None + + +class Program_weight_tensor_parameter_652: + name = "parameter_652" + shape = [96] + dtype = "float32" + min_val = float("0.830242") + max_val = float("2.26639") + mean = float("1.25319") + std = float("0.21549") + data = None + + +class Program_weight_tensor_parameter_653: + name = "parameter_653" + shape = [96] + dtype = "float32" + min_val = float("0.00554491") + max_val = float("3.92354") + mean = float("0.225363") + std = float("0.491203") + data = None + + +class Program_weight_tensor_parameter_654: + name = "parameter_654" + shape = [96] + dtype = "float32" + min_val = float("-0.682011") + max_val = float("0.863938") + mean = float("-0.0559418") + std = float("0.30762") + data = None + + +class Program_weight_tensor_parameter_655: + name = "parameter_655" + shape = [96, 96, 3, 3] + dtype = "float32" + min_val = float("-0.12871") + max_val = float("0.120943") + mean = float("-7.30879e-05") + std = float("0.00678624") + data = None + + +class Program_weight_tensor_parameter_656: + name = "parameter_656" + shape = [96] + dtype = "float32" + min_val = float("-3.19003") + max_val = float("1.93308") + mean = float("0.508604") + std = float("0.871957") + data = None + + +class Program_weight_tensor_parameter_657: + name = "parameter_657" + shape = [96] + dtype = "float32" + min_val = float("0.229244") + max_val = float("2.60033") + mean = float("0.516748") + std = float("0.323392") + data = None + + +class Program_weight_tensor_parameter_658: + name = "parameter_658" + shape = [96] + dtype = "float32" + min_val = float("0.00224007") + max_val = float("3.81285") + mean = float("0.214005") + std = float("0.462532") + data = None + + +class Program_weight_tensor_parameter_659: + name = "parameter_659" + shape = [96] + dtype = "float32" + min_val = float("-0.449601") + max_val = float("0.320966") + mean = float("-0.0199257") + std = float("0.135458") + data = None + + +class Program_weight_tensor_parameter_660: + name = "parameter_660" + shape = [96, 192, 1, 1] + dtype = "float32" + min_val = float("-0.149955") + max_val = float("0.139515") + mean = float("-0.000577616") + std = float("0.0141809") + data = None + + +class Program_weight_tensor_parameter_661: + name = "parameter_661" + shape = [96] + dtype = "float32" + min_val = float("-4.89436") + max_val = float("1.73118") + mean = float("0.421666") + std = float("1.05477") + data = None + + +class Program_weight_tensor_parameter_662: + name = "parameter_662" + shape = [96] + dtype = "float32" + min_val = float("0.368666") + max_val = float("6.94933") + mean = float("1.70017") + std = float("1.37461") + data = None + + +class Program_weight_tensor_parameter_663: + name = "parameter_663" + shape = [96] + dtype = "float32" + min_val = float("0.00483646") + max_val = float("0.772254") + mean = float("0.109227") + std = float("0.134964") + data = None + + +class Program_weight_tensor_parameter_664: + name = "parameter_664" + shape = [96] + dtype = "float32" + min_val = float("-0.28843") + max_val = float("0.305353") + mean = float("0.017579") + std = float("0.109216") + data = None + + +class Program_weight_tensor_parameter_665: + name = "parameter_665" + shape = [96, 192, 1, 1] + dtype = "float32" + min_val = float("-0.085332") + max_val = float("0.209102") + mean = float("0.000131895") + std = float("0.0127058") + data = None + + +class Program_weight_tensor_parameter_666: + name = "parameter_666" + shape = [192] + dtype = "float32" + min_val = float("-2.26416") + max_val = float("1.81781") + mean = float("-0.104368") + std = float("0.765541") + data = None + + +class Program_weight_tensor_parameter_667: + name = "parameter_667" + shape = [192] + dtype = "float32" + min_val = float("0.557485") + max_val = float("3.06726") + mean = float("1.03433") + std = float("0.294829") + data = None + + +class Program_weight_tensor_parameter_668: + name = "parameter_668" + shape = [192] + dtype = "float32" + min_val = float("0.0122193") + max_val = float("31.99") + mean = float("0.691469") + std = float("2.78351") + data = None + + +class Program_weight_tensor_parameter_669: + name = "parameter_669" + shape = [192] + dtype = "float32" + min_val = float("-0.716405") + max_val = float("0.543476") + mean = float("-0.0205713") + std = float("0.180835") + data = None + + +class Program_weight_tensor_parameter_670: + name = "parameter_670" + shape = [192, 128, 3, 3] + dtype = "float32" + min_val = float("-0.088438") + max_val = float("0.0989281") + mean = float("-0.000264084") + std = float("0.00696874") + data = None + + +class Program_weight_tensor_parameter_671: + name = "parameter_671" + shape = [128] + dtype = "float32" + min_val = float("-2.77349") + max_val = float("1.94915") + mean = float("-0.748001") + std = float("0.667633") + data = None + + +class Program_weight_tensor_parameter_672: + name = "parameter_672" + shape = [128] + dtype = "float32" + min_val = float("0.282119") + max_val = float("2.11163") + mean = float("0.963232") + std = float("0.25164") + data = None + + +class Program_weight_tensor_parameter_673: + name = "parameter_673" + shape = [128] + dtype = "float32" + min_val = float("0.00348181") + max_val = float("0.641549") + mean = float("0.0466075") + std = float("0.0727225") + data = None + + +class Program_weight_tensor_parameter_674: + name = "parameter_674" + shape = [128] + dtype = "float32" + min_val = float("-0.799433") + max_val = float("0.769186") + mean = float("-0.000783121") + std = float("0.234277") + data = None + + +class Program_weight_tensor_parameter_675: + name = "parameter_675" + shape = [128, 96, 1, 1] + dtype = "float32" + min_val = float("-0.237331") + max_val = float("0.234872") + mean = float("-0.00058892") + std = float("0.0227538") + data = None + + +class Program_weight_tensor_parameter_676: + name = "parameter_676" + shape = [96] + dtype = "float32" + min_val = float("-0.0196388") + max_val = float("0.00429926") + mean = float("-0.00604744") + std = float("0.00423631") + data = None + + +class Program_weight_tensor_parameter_677: + name = "parameter_677" + shape = [96, 96, 1, 1] + dtype = "float32" + min_val = float("-0.202175") + max_val = float("0.139733") + mean = float("-0.00708148") + std = float("0.015418") + data = None + + +class Program_weight_tensor_parameter_678: + name = "parameter_678" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_679: + name = "parameter_679" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_680: + name = "parameter_680" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_681: + name = "parameter_681" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_682: + name = "parameter_682" + shape = [48, 48, 1, 1] + dtype = "float32" + min_val = float("-0.113449") + max_val = float("0.116382") + mean = float("-8.18214e-05") + std = float("0.0174881") + data = None + + +class Program_weight_tensor_parameter_683: + name = "parameter_683" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_684: + name = "parameter_684" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_685: + name = "parameter_685" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_686: + name = "parameter_686" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_687: + name = "parameter_687" + shape = [48, 48, 3, 3] + dtype = "float32" + min_val = float("-0.110314") + max_val = float("0.0737469") + mean = float("-6.02772e-05") + std = float("0.0116185") + data = None + + +class Program_weight_tensor_parameter_688: + name = "parameter_688" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_689: + name = "parameter_689" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_690: + name = "parameter_690" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_691: + name = "parameter_691" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_692: + name = "parameter_692" + shape = [48, 48, 3, 3] + dtype = "float32" + min_val = float("-0.114734") + max_val = float("0.10347") + mean = float("-0.000467215") + std = float("0.0140058") + data = None + + +class Program_weight_tensor_parameter_693: + name = "parameter_693" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_694: + name = "parameter_694" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_695: + name = "parameter_695" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_696: + name = "parameter_696" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_697: + name = "parameter_697" + shape = [48, 48, 1, 1] + dtype = "float32" + min_val = float("-0.195911") + max_val = float("0.178967") + mean = float("-0.00335025") + std = float("0.0224713") + data = None + + +class Program_weight_tensor_parameter_698: + name = "parameter_698" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_699: + name = "parameter_699" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_700: + name = "parameter_700" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_701: + name = "parameter_701" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_702: + name = "parameter_702" + shape = [48, 48, 3, 3] + dtype = "float32" + min_val = float("-0.100652") + max_val = float("0.124988") + mean = float("-0.00151496") + std = float("0.0136224") + data = None + + +class Program_weight_tensor_parameter_703: + name = "parameter_703" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_704: + name = "parameter_704" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_705: + name = "parameter_705" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_706: + name = "parameter_706" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_707: + name = "parameter_707" + shape = [48, 48, 3, 3] + dtype = "float32" + min_val = float("-0.177811") + max_val = float("0.145591") + mean = float("-0.00141726") + std = float("0.0186424") + data = None + + +class Program_weight_tensor_parameter_708: + name = "parameter_708" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_709: + name = "parameter_709" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_710: + name = "parameter_710" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_711: + name = "parameter_711" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_712: + name = "parameter_712" + shape = [48, 48, 1, 1] + dtype = "float32" + min_val = float("-0.374308") + max_val = float("0.181152") + mean = float("-0.000464201") + std = float("0.0312118") + data = None + + +class Program_weight_tensor_parameter_713: + name = "parameter_713" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_714: + name = "parameter_714" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_715: + name = "parameter_715" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_716: + name = "parameter_716" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_717: + name = "parameter_717" + shape = [48, 48, 3, 3] + dtype = "float32" + min_val = float("-0.310804") + max_val = float("0.0760952") + mean = float("-0.000399181") + std = float("0.0158955") + data = None + + +class Program_weight_tensor_parameter_718: + name = "parameter_718" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_719: + name = "parameter_719" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_720: + name = "parameter_720" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_721: + name = "parameter_721" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_722: + name = "parameter_722" + shape = [48, 48, 3, 3] + dtype = "float32" + min_val = float("-0.156863") + max_val = float("0.102575") + mean = float("-0.000657703") + std = float("0.0180323") + data = None + + +class Program_weight_tensor_parameter_723: + name = "parameter_723" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_724: + name = "parameter_724" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_725: + name = "parameter_725" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_726: + name = "parameter_726" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_727: + name = "parameter_727" + shape = [48, 96, 1, 1] + dtype = "float32" + min_val = float("-0.231057") + max_val = float("0.209028") + mean = float("-0.00477756") + std = float("0.0398542") + data = None + + +class Program_weight_tensor_parameter_728: + name = "parameter_728" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_729: + name = "parameter_729" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_730: + name = "parameter_730" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_731: + name = "parameter_731" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_732: + name = "parameter_732" + shape = [48, 96, 1, 1] + dtype = "float32" + min_val = float("-0.096208") + max_val = float("0.142834") + mean = float("-0.000778624") + std = float("0.0186653") + data = None + + +class Program_weight_tensor_parameter_733: + name = "parameter_733" + shape = [96] + dtype = "float32" + min_val = float("-3.10984") + max_val = float("3.25719") + mean = float("0.366741") + std = float("1.14155") + data = None + + +class Program_weight_tensor_parameter_734: + name = "parameter_734" + shape = [96] + dtype = "float32" + min_val = float("0.799139") + max_val = float("4.98464") + mean = float("1.87865") + std = float("0.779024") + data = None + + +class Program_weight_tensor_parameter_735: + name = "parameter_735" + shape = [96] + dtype = "float32" + min_val = float("1.76406") + max_val = float("1001.9") + mean = float("80.1803") + std = float("148.327") + data = None + + +class Program_weight_tensor_parameter_736: + name = "parameter_736" + shape = [96] + dtype = "float32" + min_val = float("-12.59") + max_val = float("13.3036") + mean = float("-0.396251") + std = float("4.14618") + data = None + + +class Program_weight_tensor_parameter_737: + name = "parameter_737" + shape = [96, 64, 3, 3] + dtype = "float32" + min_val = float("-0.0895351") + max_val = float("0.0977452") + mean = float("-0.000531484") + std = float("0.0157853") + data = None + + +class Program_weight_tensor_parameter_738: + name = "parameter_738" + shape = [64] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_739: + name = "parameter_739" + shape = [64] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_740: + name = "parameter_740" + shape = [64] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_741: + name = "parameter_741" + shape = [64] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_742: + name = "parameter_742" + shape = [64, 32, 3, 3] + dtype = "float32" + min_val = float("-0.15482") + max_val = float("0.145416") + mean = float("-0.000966986") + std = float("0.0231232") + data = None + + +class Program_weight_tensor_parameter_743: + name = "parameter_743" + shape = [32] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_744: + name = "parameter_744" + shape = [32] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_745: + name = "parameter_745" + shape = [32] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_746: + name = "parameter_746" + shape = [32] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_747: + name = "parameter_747" + shape = [32, 32, 3, 3] + dtype = "float32" + min_val = float("-0.27982") + max_val = float("0.160959") + mean = float("0.00175241") + std = float("0.0302711") + data = None + + +class Program_weight_tensor_parameter_748: + name = "parameter_748" + shape = [32] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_749: + name = "parameter_749" + shape = [32] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_750: + name = "parameter_750" + shape = [32] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_751: + name = "parameter_751" + shape = [32] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_752: + name = "parameter_752" + shape = [32, 3, 3, 3] + dtype = "float32" + min_val = float("-0.231799") + max_val = float("0.266845") + mean = float("0.00703356") + std = float("0.0617717") + data = None diff --git a/paddle_samples/PaddleX/PP-YOLOE-L_vehicle/subgraph_7/graph_hash.txt b/paddle_samples/PaddleX/PP-YOLOE-L_vehicle/subgraph_7/graph_hash.txt new file mode 100644 index 000000000..e914be348 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE-L_vehicle/subgraph_7/graph_hash.txt @@ -0,0 +1 @@ +9da1bb687362f1446a27742d64e3f86d5a3d7446f117fa51faab14ad46d53591 \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE-L_vehicle/subgraph_7/graph_net.json b/paddle_samples/PaddleX/PP-YOLOE-L_vehicle/subgraph_7/graph_net.json new file mode 100644 index 000000000..4a2e26ae4 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE-L_vehicle/subgraph_7/graph_net.json @@ -0,0 +1,6 @@ +{ + "framework": "paddle", + "model_name": "PP-YOLOE-L_vehicle", + "num_devices_required": 1, + "num_nodes_required": 1 +} \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE-L_vehicle/subgraph_7/input_meta.py b/paddle_samples/PaddleX/PP-YOLOE-L_vehicle/subgraph_7/input_meta.py new file mode 100644 index 000000000..f2d0e16fd --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE-L_vehicle/subgraph_7/input_meta.py @@ -0,0 +1,79 @@ +class Program_weight_tensor_data_0: + name = "data_0" + shape = [2, 5376, 4] + dtype = "float32" + min_val = float("0.01") + max_val = float("0.01") + mean = float("0.01") + std = float("9.31323e-10") + data = None + + +class Program_weight_tensor_data_1: + name = "data_1" + shape = [2, 5376, 4] + dtype = "float32" + min_val = float("-280.15") + max_val = float("841.797") + mean = float("255.956") + std = float("162.848") + data = None + + +class Program_weight_tensor_data_2: + name = "data_2" + shape = [5376, 2] + dtype = "float32" + min_val = float("4.0") + max_val = float("508.0") + mean = float("256.0") + std = float("147.76") + data = None + + +class Program_weight_tensor_data_3: + name = "data_3" + shape = [2, 11, 1] + dtype = "int32" + data = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 3, 0, 0, 0, 0, 3] + + +class Program_weight_tensor_data_4: + name = "data_4" + shape = [2, 11, 4] + dtype = "float32" + min_val = float("60.4005") + max_val = float("512.0") + mean = float("223.716") + std = float("117.023") + data = None + + +class Program_weight_tensor_data_5: + name = "data_5" + shape = [2, 11, 1] + dtype = "float32" + data = [ + 1.0, + 1.0, + 1.0, + 1.0, + 1.0, + 1.0, + 1.0, + 1.0, + 1.0, + 1.0, + 1.0, + 1.0, + 1.0, + 1.0, + 1.0, + 1.0, + 1.0, + 1.0, + 1.0, + 1.0, + 1.0, + 1.0, + ] diff --git a/paddle_samples/PaddleX/PP-YOLOE-L_vehicle/subgraph_7/model.py b/paddle_samples/PaddleX/PP-YOLOE-L_vehicle/subgraph_7/model.py new file mode 100644 index 000000000..fa605edb7 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE-L_vehicle/subgraph_7/model.py @@ -0,0 +1,338 @@ +import paddle + + +class GraphModule(paddle.nn.Layer): + def __init__(self): + super().__init__() + + def forward(self, data_0, data_1, data_2, data_3, data_4, data_5): + # pd_op.full_int_array: (1xi64) <- () + full_int_array_0 = [2] + + # pd_op.unsqueeze: (2x11x1x4xf32) <- (2x11x4xf32, 1xi64) + unsqueeze_0 = paddle._C_ops.unsqueeze(data_4, full_int_array_0) + del data_4 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_1 = [1] + + # pd_op.unsqueeze: (2x1x5376x4xf32) <- (2x5376x4xf32, 1xi64) + unsqueeze_1 = paddle._C_ops.unsqueeze(data_1, full_int_array_1) + del data_1, full_int_array_1 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_2 = [0] + + # pd_op.slice: (2x11x1x2xf32) <- (2x11x1x4xf32, 1xi64, 1xi64) + slice_0 = paddle._C_ops.slice( + unsqueeze_0, [3], full_int_array_2, full_int_array_0, [1], [] + ) + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_3 = [2147483647] + + # pd_op.slice: (2x11x1x2xf32) <- (2x11x1x4xf32, 1xi64, 1xi64) + slice_1 = paddle._C_ops.slice( + unsqueeze_0, [3], full_int_array_0, full_int_array_3, [1], [] + ) + + # pd_op.slice: (2x1x5376x2xf32) <- (2x1x5376x4xf32, 1xi64, 1xi64) + slice_2 = paddle._C_ops.slice( + unsqueeze_1, [3], full_int_array_2, full_int_array_0, [1], [] + ) + del full_int_array_2 + + # pd_op.slice: (2x1x5376x2xf32) <- (2x1x5376x4xf32, 1xi64, 1xi64) + slice_3 = paddle._C_ops.slice( + unsqueeze_1, [3], full_int_array_0, full_int_array_3, [1], [] + ) + del full_int_array_0, full_int_array_3, unsqueeze_1 + + # pd_op.maximum: (2x11x5376x2xf32) <- (2x11x1x2xf32, 2x1x5376x2xf32) + maximum_0 = paddle._C_ops.maximum(slice_0, slice_2) + + # pd_op.minimum: (2x11x5376x2xf32) <- (2x11x1x2xf32, 2x1x5376x2xf32) + minimum_0 = paddle._C_ops.minimum(slice_1, slice_3) + + # pd_op.subtract: (2x11x5376x2xf32) <- (2x11x5376x2xf32, 2x11x5376x2xf32) + subtract_0 = paddle._C_ops.subtract(minimum_0, maximum_0) + del maximum_0, minimum_0 + + # pd_op.full: (1xf32) <- () + full_0 = paddle._C_ops.full( + [1], float("0"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.full: (1xf32) <- () + full_1 = paddle._C_ops.full( + [1], float("3.40282e+38"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.clip: (2x11x5376x2xf32) <- (2x11x5376x2xf32, 1xf32, 1xf32) + clip_0 = paddle._C_ops.clip(subtract_0, full_0, full_1) + del subtract_0 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_4 = [-1] + + # pd_op.prod: (2x11x5376xf32) <- (2x11x5376x2xf32, 1xi64) + prod_0 = paddle._C_ops.prod(clip_0, full_int_array_4, False, False) + del clip_0 + + # pd_op.subtract: (2x11x1x2xf32) <- (2x11x1x2xf32, 2x11x1x2xf32) + subtract_1 = paddle._C_ops.subtract(slice_1, slice_0) + del slice_0, slice_1 + + # pd_op.clip: (2x11x1x2xf32) <- (2x11x1x2xf32, 1xf32, 1xf32) + clip_1 = paddle._C_ops.clip(subtract_1, full_0, full_1) + del subtract_1 + + # pd_op.prod: (2x11x1xf32) <- (2x11x1x2xf32, 1xi64) + prod_1 = paddle._C_ops.prod(clip_1, full_int_array_4, False, False) + del clip_1 + + # pd_op.subtract: (2x1x5376x2xf32) <- (2x1x5376x2xf32, 2x1x5376x2xf32) + subtract_2 = paddle._C_ops.subtract(slice_3, slice_2) + del slice_2, slice_3 + + # pd_op.clip: (2x1x5376x2xf32) <- (2x1x5376x2xf32, 1xf32, 1xf32) + clip_2 = paddle._C_ops.clip(subtract_2, full_0, full_1) + del full_0, full_1, subtract_2 + + # pd_op.prod: (2x1x5376xf32) <- (2x1x5376x2xf32, 1xi64) + prod_2 = paddle._C_ops.prod(clip_2, full_int_array_4, False, False) + del clip_2 + + # pd_op.add: (2x11x5376xf32) <- (2x11x1xf32, 2x1x5376xf32) + add_0 = paddle._C_ops.add(prod_1, prod_2) + del prod_1, prod_2 + + # pd_op.subtract: (2x11x5376xf32) <- (2x11x5376xf32, 2x11x5376xf32) + subtract_3 = paddle._C_ops.subtract(add_0, prod_0) + del add_0 + + # pd_op.full: (1xf32) <- () + full_2 = paddle._C_ops.full( + [1], float("1"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.scale: (2x11x5376xf32) <- (2x11x5376xf32, 1xf32) + scale_0 = paddle._C_ops.scale(subtract_3, full_2, float("1e-09"), True) + del full_2, subtract_3 + + # pd_op.divide: (2x11x5376xf32) <- (2x11x5376xf32, 2x11x5376xf32) + divide_0 = paddle._C_ops.divide(prod_0, scale_0) + del prod_0, scale_0 + + # pd_op.transpose: (2x4x5376xf32) <- (2x5376x4xf32) + transpose_0 = paddle._C_ops.transpose(data_0, [0, 2, 1]) + del data_0 + + # pd_op.full: (1xf64) <- () + full_3 = paddle._C_ops.full( + [1], float("0"), paddle.float64, paddle.core.CPUPlace() + ) + + # pd_op.full: (1xf64) <- () + full_4 = paddle._C_ops.full( + [1], float("2"), paddle.float64, paddle.core.CPUPlace() + ) + + # pd_op.full: (1xf64) <- () + full_5 = paddle._C_ops.full( + [1], float("1"), paddle.float64, paddle.core.CPUPlace() + ) + + # pd_op.arange: (2xi32) <- (1xf64, 1xf64, 1xf64) + arange_0 = paddle.arange(full_3, full_4, full_5, dtype="int32") + del full_3, full_4, full_5 + + # pd_op.unsqueeze: (2x1xi32) <- (2xi32, 1xi64) + unsqueeze_2 = paddle._C_ops.unsqueeze(arange_0, full_int_array_4) + del arange_0 + + # pd_op.full_int_array: (2xi64) <- () + full_int_array_5 = [1, 11] + + # pd_op.tile: (2x11xi32) <- (2x1xi32, 2xi64) + tile_0 = paddle._C_ops.tile(unsqueeze_2, full_int_array_5) + del full_int_array_5 + + # pd_op.squeeze: (2x11xi32) <- (2x11x1xi32, 1xi64) + squeeze_0 = paddle._C_ops.squeeze(data_3, full_int_array_4) + del data_3 + + # builtin.combine: ([2x11xi32, 2x11xi32]) <- (2x11xi32, 2x11xi32) + combine_0 = [tile_0, squeeze_0] + del squeeze_0, tile_0 + + # pd_op.stack: (2x11x2xi32) <- ([2x11xi32, 2x11xi32]) + stack_0 = paddle._C_ops.stack(combine_0, -1) + del combine_0 + + # pd_op.gather_nd: (2x11x5376xf32) <- (2x4x5376xf32, 2x11x2xi32) + gather_nd_0 = paddle._C_ops.gather_nd(transpose_0, stack_0) + del stack_0, transpose_0 + + # pd_op.pow: (2x11x5376xf32) <- (2x11x5376xf32) + pow_0 = paddle._C_ops.pow(gather_nd_0, float("1")) + del gather_nd_0 + + # pd_op.pow: (2x11x5376xf32) <- (2x11x5376xf32) + pow_1 = paddle._C_ops.pow(divide_0, float("6")) + + # pd_op.multiply: (2x11x5376xf32) <- (2x11x5376xf32, 2x11x5376xf32) + multiply_0 = paddle._C_ops.multiply(pow_0, pow_1) + del pow_0, pow_1 + + # pd_op.full_int_array: (2xi64) <- () + full_int_array_6 = [0, 1] + + # pd_op.unsqueeze: (1x1x5376x2xf32) <- (5376x2xf32, 2xi64) + unsqueeze_3 = paddle._C_ops.unsqueeze(data_2, full_int_array_6) + del data_2, full_int_array_6 + + # pd_op.full: (1xi32) <- () + full_6 = paddle._C_ops.full( + [1], float("3"), paddle.int32, paddle.core.CPUPlace() + ) + + # pd_op.split_with_num: ([1x1x5376x1xf32, 1x1x5376x1xf32]) <- (1x1x5376x2xf32, 1xi32) + split_with_num_0 = paddle._C_ops.split_with_num(unsqueeze_3, 2, full_6) + del unsqueeze_3 + + # builtin.split: (1x1x5376x1xf32, 1x1x5376x1xf32) <- ([1x1x5376x1xf32, 1x1x5376x1xf32]) + ( + split_0, + split_1, + ) = split_with_num_0 + del split_with_num_0 + + # pd_op.split_with_num: ([2x11x1x1xf32, 2x11x1x1xf32, 2x11x1x1xf32, 2x11x1x1xf32]) <- (2x11x1x4xf32, 1xi32) + split_with_num_1 = paddle._C_ops.split_with_num(unsqueeze_0, 4, full_6) + del full_6, unsqueeze_0 + + # builtin.split: (2x11x1x1xf32, 2x11x1x1xf32, 2x11x1x1xf32, 2x11x1x1xf32) <- ([2x11x1x1xf32, 2x11x1x1xf32, 2x11x1x1xf32, 2x11x1x1xf32]) + ( + split_2, + split_3, + split_4, + split_5, + ) = split_with_num_1 + del split_with_num_1 + + # pd_op.subtract: (2x11x5376x1xf32) <- (1x1x5376x1xf32, 2x11x1x1xf32) + subtract_4 = paddle._C_ops.subtract(split_0, split_2) + del split_2 + + # pd_op.subtract: (2x11x5376x1xf32) <- (1x1x5376x1xf32, 2x11x1x1xf32) + subtract_5 = paddle._C_ops.subtract(split_1, split_3) + del split_3 + + # pd_op.subtract: (2x11x5376x1xf32) <- (2x11x1x1xf32, 1x1x5376x1xf32) + subtract_6 = paddle._C_ops.subtract(split_4, split_0) + del split_0, split_4 + + # pd_op.subtract: (2x11x5376x1xf32) <- (2x11x1x1xf32, 1x1x5376x1xf32) + subtract_7 = paddle._C_ops.subtract(split_5, split_1) + del split_1, split_5 + + # pd_op.full: (1xi32) <- () + full_7 = paddle._C_ops.full( + [1], float("-1"), paddle.int32, paddle.core.CPUPlace() + ) + + # builtin.combine: ([2x11x5376x1xf32, 2x11x5376x1xf32, 2x11x5376x1xf32, 2x11x5376x1xf32]) <- (2x11x5376x1xf32, 2x11x5376x1xf32, 2x11x5376x1xf32, 2x11x5376x1xf32) + combine_1 = [subtract_4, subtract_5, subtract_6, subtract_7] + del subtract_4, subtract_5, subtract_6, subtract_7 + + # pd_op.concat: (2x11x5376x4xf32) <- ([2x11x5376x1xf32, 2x11x5376x1xf32, 2x11x5376x1xf32, 2x11x5376x1xf32], 1xi32) + concat_0 = paddle._C_ops.concat(combine_1, full_7) + del combine_1, full_7 + + # pd_op.min: (2x11x5376xf32) <- (2x11x5376x4xf32, 1xi64) + min_0 = paddle._C_ops.min(concat_0, full_int_array_4, False) + del concat_0, full_int_array_4 + + # pd_op.full: (xf32) <- () + full_8 = paddle._C_ops.full( + [], + float("1e-09"), + paddle.float32, + paddle.framework._current_expected_place(), + ) + + # pd_op.greater_than: (2x11x5376xb) <- (2x11x5376xf32, xf32) + greater_than_1 = paddle._C_ops.greater_than(min_0, full_8) + del full_8, min_0 + + # pd_op.cast: (2x11x5376xf32) <- (2x11x5376xb) + cast_0 = paddle._C_ops.cast(greater_than_1, paddle.float32) + del greater_than_1 + + # pd_op.multiply: (2x11x5376xf32) <- (2x11x5376xf32, 2x11x5376xf32) + multiply_1 = paddle._C_ops.multiply(multiply_0, cast_0) + + # pd_op.full: (1xi32) <- () + full_9 = paddle._C_ops.full( + [1], float("13"), paddle.int32, paddle.core.CPUPlace() + ) + + # pd_op.topk: (2x11x13xf32, 2x11x13xi64) <- (2x11x5376xf32, 1xi32) + topk_0, topk_1 = (lambda x, f: f(x))( + paddle._C_ops.topk(multiply_1, full_9, -1, True, True), + lambda out: out if isinstance(out, (list, tuple)) else (out, None), + ) + del full_9, multiply_1 + + # pd_op.full: (1xi32) <- () + full_10 = paddle._C_ops.full( + [1], float("5376"), paddle.int32, paddle.core.CPUPlace() + ) + + # pd_op.one_hot: (2x11x13x5376xf32) <- (2x11x13xi64, 1xi32) + one_hot_0 = paddle._C_ops.one_hot( + topk_1 % paddle.cast(full_10, topk_1.dtype), full_10 + ) + del full_10, topk_1 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_7 = [-2] + + # pd_op.sum: (2x11x5376xf32) <- (2x11x13x5376xf32, 1xi64) + sum_0 = paddle._C_ops.sum(one_hot_0, full_int_array_7, None, False) + del one_hot_0 + + # pd_op.multiply: (2x11x5376xf32) <- (2x11x5376xf32, 2x11x1xf32) + multiply_2 = paddle._C_ops.multiply(sum_0, data_5) + del sum_0 + + # pd_op.multiply: (2x11x5376xf32) <- (2x11x5376xf32, 2x11x5376xf32) + multiply_3 = paddle._C_ops.multiply(multiply_2, cast_0) + del cast_0, multiply_2 + + # pd_op.multiply: (2x11x5376xf32) <- (2x11x5376xf32, 2x11x1xf32) + multiply_4 = paddle._C_ops.multiply(multiply_3, data_5) + del data_5, multiply_3 + + # pd_op.sum: (2x5376xf32) <- (2x11x5376xf32, 1xi64) + sum_1 = paddle._C_ops.sum(multiply_4, full_int_array_7, None, False) + del full_int_array_7 + + # pd_op.full_int_array: (0xi64) <- () + full_int_array_8 = [] + + # pd_op.max: (xf32) <- (2x5376xf32, 0xi64) + max_0 = paddle._C_ops.max(sum_1, full_int_array_8, False) + del full_int_array_8 + + # pd_op.full: (xf32) <- () + full_11 = paddle._C_ops.full( + [], float("1"), paddle.float32, paddle.framework._current_expected_place() + ) + + # pd_op.greater_than: (xb) <- (xf32, xf32) + greater_than_0 = paddle._C_ops.greater_than(max_0, full_11) + del divide_0, full_11, max_0, multiply_0, multiply_4, sum_1, unsqueeze_2 + + return greater_than_0 diff --git a/paddle_samples/PaddleX/PP-YOLOE-L_vehicle/subgraph_7/weight_meta.py b/paddle_samples/PaddleX/PP-YOLOE-L_vehicle/subgraph_7/weight_meta.py new file mode 100644 index 000000000..8b1378917 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE-L_vehicle/subgraph_7/weight_meta.py @@ -0,0 +1 @@ + diff --git a/paddle_samples/PaddleX/PP-YOLOE-L_vehicle/subgraph_8/graph_hash.txt b/paddle_samples/PaddleX/PP-YOLOE-L_vehicle/subgraph_8/graph_hash.txt new file mode 100644 index 000000000..8803a71d1 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE-L_vehicle/subgraph_8/graph_hash.txt @@ -0,0 +1 @@ +caff3ce11eecc715bd3f3941781af9e1fe02099d04db57f533f2d44d2d0dd33f \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE-L_vehicle/subgraph_8/graph_net.json b/paddle_samples/PaddleX/PP-YOLOE-L_vehicle/subgraph_8/graph_net.json new file mode 100644 index 000000000..4a2e26ae4 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE-L_vehicle/subgraph_8/graph_net.json @@ -0,0 +1,6 @@ +{ + "framework": "paddle", + "model_name": "PP-YOLOE-L_vehicle", + "num_devices_required": 1, + "num_nodes_required": 1 +} \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE-L_vehicle/subgraph_8/input_meta.py b/paddle_samples/PaddleX/PP-YOLOE-L_vehicle/subgraph_8/input_meta.py new file mode 100644 index 000000000..b47fda69a --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE-L_vehicle/subgraph_8/input_meta.py @@ -0,0 +1,62 @@ +class Program_weight_tensor_data_0: + name = "data_0" + shape = [2, 12096] + dtype = "float32" + max_val = float("2.0") + mean = float("0.00223214") + std = float("0.0480607") + data = None + + +class Program_weight_tensor_data_1: + name = "data_1" + shape = [2, 11, 12096] + dtype = "float32" + max_val = float("0.940076") + mean = float("0.000804856") + std = float("0.0222414") + data = None + + +class Program_weight_tensor_data_2: + name = "data_2" + shape = [2, 11, 12096] + dtype = "float32" + max_val = float("1.0") + mean = float("0.000202922") + std = float("0.0142436") + data = None + + +class Program_weight_tensor_data_3: + name = "data_3" + shape = [2, 1] + dtype = "int32" + data = [0, 1] + + +class Program_weight_tensor_data_4: + name = "data_4" + shape = [2, 11, 1] + dtype = "int32" + data = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0] + + +class Program_weight_tensor_data_5: + name = "data_5" + shape = [2, 11, 4] + dtype = "float32" + max_val = float("629.571") + mean = float("192.521") + std = float("244.622") + data = None + + +class Program_weight_tensor_data_6: + name = "data_6" + shape = [2, 11, 12096] + dtype = "float32" + max_val = float("0.00694391") + mean = float("1.95634e-06") + std = float("9.01196e-05") + data = None diff --git a/paddle_samples/PaddleX/PP-YOLOE-L_vehicle/subgraph_8/model.py b/paddle_samples/PaddleX/PP-YOLOE-L_vehicle/subgraph_8/model.py new file mode 100644 index 000000000..41382ee7e --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE-L_vehicle/subgraph_8/model.py @@ -0,0 +1,229 @@ +import paddle + + +class GraphModule(paddle.nn.Layer): + def __init__(self): + super().__init__() + + def forward(self, data_0, data_1, data_2, data_3, data_4, data_5, data_6): + # pd_op.full_int_array: (1xi64) <- () + full_int_array_0 = [1] + + # pd_op.unsqueeze: (2x1x12096xf32) <- (2x12096xf32, 1xi64) + unsqueeze_0 = paddle._C_ops.unsqueeze(data_0, full_int_array_0) + del data_0, full_int_array_0 + + # pd_op.full: (xf32) <- () + full_0 = paddle._C_ops.full( + [], float("1"), paddle.float32, paddle.framework._current_expected_place() + ) + + # pd_op.greater_than: (2x1x12096xb) <- (2x1x12096xf32, xf32) + greater_than_0 = paddle._C_ops.greater_than(unsqueeze_0, full_0) + del full_0, unsqueeze_0 + + # pd_op.full_int_array: (3xi64) <- () + full_int_array_1 = [1, 11, 1] + + # pd_op.tile: (2x11x12096xb) <- (2x1x12096xb, 3xi64) + tile_0 = paddle._C_ops.tile(greater_than_0, full_int_array_1) + del full_int_array_1, greater_than_0 + + # pd_op.full: (1xi64) <- () + full_1 = paddle._C_ops.full( + [1], float("-2"), paddle.int64, paddle.core.CPUPlace() + ) + + # pd_op.argmax: (2x12096xi64) <- (2x11x12096xf32, 1xi64) + argmax_0 = paddle._C_ops.argmax(data_1, full_1, False, False, paddle.int64) + + # pd_op.full: (1xi32) <- () + full_2 = paddle._C_ops.full( + [1], float("11"), paddle.int32, paddle.core.CPUPlace() + ) + + # pd_op.one_hot: (2x12096x11xf32) <- (2x12096xi64, 1xi32) + one_hot_0 = paddle._C_ops.one_hot( + argmax_0 % paddle.cast(full_2, argmax_0.dtype), full_2 + ) + del argmax_0, full_2 + + # pd_op.transpose: (2x11x12096xf32) <- (2x12096x11xf32) + transpose_0 = paddle._C_ops.transpose(one_hot_0, [0, 2, 1]) + del one_hot_0 + + # pd_op.where: (2x11x12096xf32) <- (2x11x12096xb, 2x11x12096xf32, 2x11x12096xf32) + where_0 = paddle._C_ops.where(tile_0, transpose_0, data_2) + del data_2, tile_0, transpose_0 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_2 = [-2] + + # pd_op.sum: (2x12096xf32) <- (2x11x12096xf32, 1xi64) + sum_0 = paddle._C_ops.sum(where_0, full_int_array_2, None, False) + + # pd_op.argmax: (2x12096xi64) <- (2x11x12096xf32, 1xi64) + argmax_1 = paddle._C_ops.argmax(where_0, full_1, False, False, paddle.int64) + del full_1 + + # pd_op.full: (1xf32) <- () + full_3 = paddle._C_ops.full( + [1], float("11"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.scale: (2x1xi32) <- (2x1xi32, 1xf32) + scale_0 = paddle._C_ops.scale(data_3, full_3, float("0"), True) + del data_3, full_3 + + # pd_op.cast: (2x1xi64) <- (2x1xi32) + cast_0 = paddle._C_ops.cast(scale_0, paddle.int64) + del scale_0 + + # pd_op.add: (2x12096xi64) <- (2x12096xi64, 2x1xi64) + add_0 = paddle._C_ops.add(argmax_1, cast_0) + del argmax_1, cast_0 + + # pd_op.flatten: (22xi32) <- (2x11x1xi32) + flatten_0 = paddle._C_ops.flatten(data_4, 0, 2) + del data_4 + + # pd_op.flatten: (24192xi64) <- (2x12096xi64) + flatten_1 = paddle._C_ops.flatten(add_0, 0, 1) + del add_0 + + # pd_op.full: (1xi32) <- () + full_4 = paddle._C_ops.full( + [1], float("0"), paddle.int32, paddle.core.CPUPlace() + ) + + # pd_op.gather: (24192xi32) <- (22xi32, 24192xi64, 1xi32) + gather_0 = paddle._C_ops.gather(flatten_0, flatten_1, full_4) + del flatten_0 + + # pd_op.full_int_array: (2xi64) <- () + full_int_array_3 = [2, 12096] + + # pd_op.reshape: (2x12096xi32) <- (24192xi32, 2xi64) + reshape_1 = paddle._C_ops.reshape(gather_0, full_int_array_3) + del full_int_array_3, gather_0 + + # pd_op.full: (xf32) <- () + full_5 = paddle._C_ops.full( + [], float("0"), paddle.float32, paddle.framework._current_expected_place() + ) + + # pd_op.greater_than: (2x12096xb) <- (2x12096xf32, xf32) + greater_than_1 = paddle._C_ops.greater_than(sum_0, full_5) + del full_5, sum_0 + + # pd_op.full: (1xf32) <- () + full_6 = paddle._C_ops.full( + [1], float("4"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.full_like: (2x12096xi32) <- (2x12096xi32, 1xf32) + full_like_0 = paddle._C_ops.full_like( + reshape_1, full_6, paddle.int32, paddle.framework._current_expected_place() + ) + del full_6 + + # pd_op.where: (2x12096xi32) <- (2x12096xb, 2x12096xi32, 2x12096xi32) + where_1 = paddle._C_ops.where(greater_than_1, reshape_1, full_like_0) + del full_like_0, greater_than_1, reshape_1 + + # pd_op.full_int_array: (2xi64) <- () + full_int_array_4 = [-1, 4] + + # pd_op.reshape: (22x4xf32) <- (2x11x4xf32, 2xi64) + reshape_2 = paddle._C_ops.reshape(data_5, full_int_array_4) + del data_5, full_int_array_4 + + # pd_op.gather: (24192x4xf32) <- (22x4xf32, 24192xi64, 1xi32) + gather_1 = paddle._C_ops.gather(reshape_2, flatten_1, full_4) + del flatten_1, full_4, reshape_2 + + # pd_op.full_int_array: (3xi64) <- () + full_int_array_5 = [2, 12096, 4] + + # pd_op.reshape: (2x12096x4xf32) <- (24192x4xf32, 3xi64) + reshape_0 = paddle._C_ops.reshape(gather_1, full_int_array_5) + del full_int_array_5, gather_1 + + # pd_op.full: (1xi32) <- () + full_7 = paddle._C_ops.full( + [1], float("5"), paddle.int32, paddle.core.CPUPlace() + ) + + # pd_op.one_hot: (2x12096x5xf32) <- (2x12096xi32, 1xi32) + one_hot_1 = paddle._C_ops.one_hot( + where_1 % paddle.cast(full_7, where_1.dtype), full_7 + ) + del full_7 + + # pd_op.full: (4xi64) <- () + full_8 = paddle._C_ops.full( + [4], float("0"), paddle.int64, paddle.framework._current_expected_place() + ) + + # pd_op.assign_value_: (4xi64) <- (4xi64) + assign_value__0 = paddle._C_ops.assign_value_( + full_8, + [4], + paddle.int64, + [float("0"), float("1"), float("2"), float("3")], + paddle.framework._current_expected_place(), + ) + del full_8 + + # pd_op.index_select: (2x12096x4xf32) <- (2x12096x5xf32, 4xi64) + index_select_0 = paddle._C_ops.index_select(one_hot_1, assign_value__0, -1) + del assign_value__0, one_hot_1 + + # pd_op.multiply: (2x11x12096xf32) <- (2x11x12096xf32, 2x11x12096xf32) + multiply_1 = paddle._C_ops.multiply(data_6, where_0) + del data_6 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_6 = [-1] + + # pd_op.max: (2x11x1xf32) <- (2x11x12096xf32, 1xi64) + max_0 = paddle._C_ops.max(multiply_1, full_int_array_6, True) + + # pd_op.multiply: (2x11x12096xf32) <- (2x11x12096xf32, 2x11x12096xf32) + multiply_2 = paddle._C_ops.multiply(data_1, where_0) + del data_1, where_0 + + # pd_op.max: (2x11x1xf32) <- (2x11x12096xf32, 1xi64) + max_1 = paddle._C_ops.max(multiply_2, full_int_array_6, True) + del multiply_2 + + # pd_op.full: (1xf32) <- () + full_9 = paddle._C_ops.full( + [1], float("1"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.scale: (2x11x1xf32) <- (2x11x1xf32, 1xf32) + scale_1 = paddle._C_ops.scale(max_0, full_9, float("1e-09"), True) + del full_9, max_0 + + # pd_op.divide: (2x11x12096xf32) <- (2x11x12096xf32, 2x11x1xf32) + divide_0 = paddle._C_ops.divide(multiply_1, scale_1) + del multiply_1, scale_1 + + # pd_op.multiply: (2x11x12096xf32) <- (2x11x12096xf32, 2x11x1xf32) + multiply_3 = paddle._C_ops.multiply(divide_0, max_1) + del divide_0, max_1 + + # pd_op.max: (2x12096xf32) <- (2x11x12096xf32, 1xi64) + max_2 = paddle._C_ops.max(multiply_3, full_int_array_2, False) + del full_int_array_2, multiply_3 + + # pd_op.unsqueeze: (2x12096x1xf32) <- (2x12096xf32, 1xi64) + unsqueeze_1 = paddle._C_ops.unsqueeze(max_2, full_int_array_6) + del full_int_array_6, max_2 + + # pd_op.multiply: (2x12096x4xf32) <- (2x12096x4xf32, 2x12096x1xf32) + multiply_0 = paddle._C_ops.multiply(index_select_0, unsqueeze_1) + del index_select_0, unsqueeze_1, where_1 + + return reshape_0, multiply_0 diff --git a/paddle_samples/PaddleX/PP-YOLOE-L_vehicle/subgraph_8/weight_meta.py b/paddle_samples/PaddleX/PP-YOLOE-L_vehicle/subgraph_8/weight_meta.py new file mode 100644 index 000000000..8b1378917 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE-L_vehicle/subgraph_8/weight_meta.py @@ -0,0 +1 @@ + diff --git a/paddle_samples/PaddleX/PP-YOLOE-R-L/subgraph_0/graph_hash.txt b/paddle_samples/PaddleX/PP-YOLOE-R-L/subgraph_0/graph_hash.txt new file mode 100644 index 000000000..712096340 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE-R-L/subgraph_0/graph_hash.txt @@ -0,0 +1 @@ +c0ac440995621c9e2a73dee011d634dac3ca35eca6918a25e9e939c3d6586f0b \ No newline at end of file diff --git a/paddle_samples/PaddleX/TimesNet_cls/subgraph_7/graph_net.json b/paddle_samples/PaddleX/PP-YOLOE-R-L/subgraph_0/graph_net.json similarity index 72% rename from paddle_samples/PaddleX/TimesNet_cls/subgraph_7/graph_net.json rename to paddle_samples/PaddleX/PP-YOLOE-R-L/subgraph_0/graph_net.json index ffc4d714a..93527f12f 100644 --- a/paddle_samples/PaddleX/TimesNet_cls/subgraph_7/graph_net.json +++ b/paddle_samples/PaddleX/PP-YOLOE-R-L/subgraph_0/graph_net.json @@ -1,6 +1,6 @@ { "framework": "paddle", - "model_name": "TimesNet_cls", + "model_name": "PP-YOLOE-R-L", "num_devices_required": 1, "num_nodes_required": 1 } \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE-R-L/subgraph_0/input_meta.py b/paddle_samples/PaddleX/PP-YOLOE-R-L/subgraph_0/input_meta.py new file mode 100644 index 000000000..94adc2577 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE-R-L/subgraph_0/input_meta.py @@ -0,0 +1,240 @@ +class Program_weight_tensor_data_0: + name = "data_0" + shape = [1] + dtype = "float32" + data = [0.066085] + + +class Program_weight_tensor_data_1: + name = "data_1" + shape = [1] + dtype = "float32" + data = [2.4124] + + +class Program_weight_tensor_data_2: + name = "data_2" + shape = [1] + dtype = "float32" + data = [1.04525] + + +class Program_weight_tensor_data_3: + name = "data_3" + shape = [1] + dtype = "float32" + data = [-0.172959] + + +class Program_weight_tensor_data_4: + name = "data_4" + shape = [1] + dtype = "float32" + data = [0.474131] + + +class Program_weight_tensor_data_5: + name = "data_5" + shape = [1] + dtype = "float32" + data = [0.802381] + + +class Program_weight_tensor_data_6: + name = "data_6" + shape = [1] + dtype = "float32" + data = [0.0333048] + + +class Program_weight_tensor_data_7: + name = "data_7" + shape = [1] + dtype = "float32" + data = [0.622674] + + +class Program_weight_tensor_data_8: + name = "data_8" + shape = [1] + dtype = "float32" + data = [0.561124] + + +class Program_weight_tensor_data_9: + name = "data_9" + shape = [1] + dtype = "float32" + data = [0.834973] + + +class Program_weight_tensor_data_10: + name = "data_10" + shape = [1] + dtype = "float32" + data = [1.37214] + + +class Program_weight_tensor_data_11: + name = "data_11" + shape = [1] + dtype = "float32" + data = [0.770523] + + +class Program_weight_tensor_data_12: + name = "data_12" + shape = [1] + dtype = "float32" + data = [0.528534] + + +class Program_weight_tensor_data_13: + name = "data_13" + shape = [1] + dtype = "float32" + data = [0.601143] + + +class Program_weight_tensor_data_14: + name = "data_14" + shape = [1] + dtype = "float32" + data = [-0.259786] + + +class Program_weight_tensor_data_15: + name = "data_15" + shape = [1] + dtype = "float32" + data = [1.29801] + + +class Program_weight_tensor_data_16: + name = "data_16" + shape = [1] + dtype = "float32" + data = [1.50558] + + +class Program_weight_tensor_data_17: + name = "data_17" + shape = [1] + dtype = "float32" + data = [1.22346] + + +class Program_weight_tensor_data_18: + name = "data_18" + shape = [1] + dtype = "float32" + data = [1.03017] + + +class Program_weight_tensor_data_19: + name = "data_19" + shape = [1] + dtype = "float32" + data = [1.2033] + + +class Program_weight_tensor_data_20: + name = "data_20" + shape = [1] + dtype = "float32" + data = [-3.96205] + + +class Program_weight_tensor_data_21: + name = "data_21" + shape = [1] + dtype = "float32" + data = [1.50687] + + +class Program_weight_tensor_data_22: + name = "data_22" + shape = [1] + dtype = "float32" + data = [1.15877] + + +class Program_weight_tensor_data_23: + name = "data_23" + shape = [1] + dtype = "float32" + data = [1.67365] + + +class Program_weight_tensor_data_24: + name = "data_24" + shape = [1] + dtype = "float32" + data = [1.14694] + + +class Program_weight_tensor_data_25: + name = "data_25" + shape = [1] + dtype = "float32" + data = [0.849779] + + +class Program_weight_tensor_data_26: + name = "data_26" + shape = [1] + dtype = "float32" + data = [2.04379] + + +class Program_weight_tensor_data_27: + name = "data_27" + shape = [1] + dtype = "float32" + data = [1.85278] + + +class Program_weight_tensor_data_28: + name = "data_28" + shape = [1] + dtype = "float32" + data = [0.964129] + + +class Program_weight_tensor_data_29: + name = "data_29" + shape = [1] + dtype = "float32" + data = [4.44911] + + +class Program_weight_tensor_data_30: + name = "data_30" + shape = [1] + dtype = "float32" + data = [0.615654] + + +class Program_weight_tensor_data_31: + name = "data_31" + shape = [1] + dtype = "float32" + data = [0.454356] + + +class Program_weight_tensor_data_32: + name = "data_32" + shape = [1] + dtype = "float32" + data = [3.84236] + + +class Program_weight_tensor_data_33: + name = "data_33" + shape = [6, 3, 1024, 1024] + dtype = "float32" + min_val = float("-2.1179") + max_val = float("2.64") + mean = float("-0.615839") + std = float("0.88445") + data = None diff --git a/paddle_samples/PaddleX/PP-YOLOE-R-L/subgraph_0/model.py b/paddle_samples/PaddleX/PP-YOLOE-R-L/subgraph_0/model.py new file mode 100644 index 000000000..74172bff2 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE-R-L/subgraph_0/model.py @@ -0,0 +1,7879 @@ +import paddle + + +class GraphModule(paddle.nn.Layer): + def __init__(self): + super().__init__() + + def forward( + self, + parameter_0, + parameter_1, + parameter_2, + parameter_3, + parameter_4, + parameter_5, + parameter_6, + parameter_7, + parameter_8, + parameter_9, + parameter_10, + parameter_11, + parameter_12, + parameter_13, + parameter_14, + parameter_15, + parameter_16, + parameter_17, + parameter_18, + parameter_19, + parameter_20, + parameter_21, + parameter_22, + parameter_23, + parameter_24, + parameter_25, + parameter_26, + parameter_27, + parameter_28, + parameter_29, + parameter_30, + parameter_31, + parameter_32, + parameter_33, + parameter_34, + parameter_35, + parameter_36, + parameter_37, + parameter_38, + parameter_39, + parameter_40, + parameter_41, + parameter_42, + parameter_43, + parameter_44, + parameter_45, + parameter_46, + parameter_47, + parameter_48, + parameter_49, + parameter_50, + parameter_51, + parameter_52, + parameter_53, + parameter_54, + parameter_55, + parameter_56, + parameter_57, + parameter_58, + parameter_59, + parameter_60, + parameter_61, + parameter_62, + parameter_63, + parameter_64, + parameter_65, + parameter_66, + parameter_67, + parameter_68, + parameter_69, + parameter_70, + parameter_71, + parameter_72, + parameter_73, + parameter_74, + parameter_75, + parameter_76, + parameter_77, + parameter_78, + parameter_79, + parameter_80, + parameter_81, + parameter_82, + parameter_83, + parameter_84, + parameter_85, + parameter_86, + parameter_87, + parameter_88, + parameter_89, + parameter_90, + parameter_91, + parameter_92, + parameter_93, + parameter_94, + parameter_95, + parameter_96, + parameter_97, + parameter_98, + parameter_99, + parameter_100, + parameter_101, + parameter_102, + parameter_103, + parameter_104, + parameter_105, + parameter_106, + parameter_107, + parameter_108, + parameter_109, + parameter_110, + parameter_111, + parameter_112, + parameter_113, + parameter_114, + parameter_115, + parameter_116, + parameter_117, + parameter_118, + parameter_119, + parameter_120, + parameter_121, + parameter_122, + parameter_123, + parameter_124, + parameter_125, + parameter_126, + parameter_127, + parameter_128, + parameter_129, + parameter_130, + parameter_131, + parameter_132, + parameter_133, + parameter_134, + parameter_135, + parameter_136, + parameter_137, + parameter_138, + parameter_139, + parameter_140, + parameter_141, + parameter_142, + parameter_143, + parameter_144, + parameter_145, + parameter_146, + parameter_147, + parameter_148, + parameter_149, + parameter_150, + parameter_151, + parameter_152, + parameter_153, + parameter_154, + parameter_155, + parameter_156, + parameter_157, + parameter_158, + parameter_159, + parameter_160, + parameter_161, + parameter_162, + parameter_163, + parameter_164, + parameter_165, + parameter_166, + parameter_167, + parameter_168, + parameter_169, + parameter_170, + parameter_171, + parameter_172, + parameter_173, + parameter_174, + parameter_175, + parameter_176, + parameter_177, + parameter_178, + parameter_179, + parameter_180, + parameter_181, + parameter_182, + parameter_183, + parameter_184, + parameter_185, + parameter_186, + parameter_187, + parameter_188, + parameter_189, + parameter_190, + parameter_191, + parameter_192, + parameter_193, + parameter_194, + parameter_195, + parameter_196, + parameter_197, + parameter_198, + parameter_199, + parameter_200, + parameter_201, + parameter_202, + parameter_203, + parameter_204, + parameter_205, + parameter_206, + parameter_207, + parameter_208, + parameter_209, + parameter_210, + parameter_211, + parameter_212, + parameter_213, + parameter_214, + parameter_215, + parameter_216, + parameter_217, + parameter_218, + parameter_219, + parameter_220, + parameter_221, + parameter_222, + parameter_223, + parameter_224, + parameter_225, + parameter_226, + parameter_227, + parameter_228, + parameter_229, + parameter_230, + parameter_231, + parameter_232, + parameter_233, + parameter_234, + parameter_235, + parameter_236, + parameter_237, + parameter_238, + parameter_239, + parameter_240, + parameter_241, + parameter_242, + parameter_243, + parameter_244, + parameter_245, + parameter_246, + parameter_247, + parameter_248, + parameter_249, + parameter_250, + parameter_251, + parameter_252, + parameter_253, + parameter_254, + parameter_255, + parameter_256, + parameter_257, + parameter_258, + parameter_259, + parameter_260, + parameter_261, + parameter_262, + parameter_263, + parameter_264, + parameter_265, + parameter_266, + parameter_267, + parameter_268, + parameter_269, + parameter_270, + parameter_271, + parameter_272, + parameter_273, + parameter_274, + parameter_275, + parameter_276, + parameter_277, + parameter_278, + parameter_279, + parameter_280, + parameter_281, + parameter_282, + parameter_283, + parameter_284, + parameter_285, + parameter_286, + parameter_287, + parameter_288, + parameter_289, + parameter_290, + parameter_291, + parameter_292, + parameter_293, + parameter_294, + parameter_295, + parameter_296, + parameter_297, + parameter_298, + parameter_299, + parameter_300, + parameter_301, + parameter_302, + parameter_303, + parameter_304, + parameter_305, + parameter_306, + parameter_307, + parameter_308, + parameter_309, + parameter_310, + parameter_311, + parameter_312, + parameter_313, + parameter_314, + parameter_315, + parameter_316, + parameter_317, + parameter_318, + parameter_319, + parameter_320, + parameter_321, + parameter_322, + parameter_323, + parameter_324, + parameter_325, + parameter_326, + parameter_327, + parameter_328, + parameter_329, + parameter_330, + parameter_331, + parameter_332, + parameter_333, + parameter_334, + parameter_335, + parameter_336, + parameter_337, + parameter_338, + parameter_339, + parameter_340, + parameter_341, + parameter_342, + parameter_343, + parameter_344, + parameter_345, + parameter_346, + parameter_347, + parameter_348, + parameter_349, + parameter_350, + parameter_351, + parameter_352, + parameter_353, + parameter_354, + parameter_355, + parameter_356, + parameter_357, + parameter_358, + parameter_359, + parameter_360, + parameter_361, + parameter_362, + parameter_363, + parameter_364, + parameter_365, + parameter_366, + parameter_367, + parameter_368, + parameter_369, + parameter_370, + parameter_371, + parameter_372, + parameter_373, + parameter_374, + parameter_375, + parameter_376, + parameter_377, + parameter_378, + parameter_379, + parameter_380, + parameter_381, + parameter_382, + parameter_383, + parameter_384, + parameter_385, + parameter_386, + parameter_387, + parameter_388, + parameter_389, + parameter_390, + parameter_391, + parameter_392, + parameter_393, + parameter_394, + parameter_395, + parameter_396, + parameter_397, + parameter_398, + parameter_399, + parameter_400, + parameter_401, + parameter_402, + parameter_403, + parameter_404, + parameter_405, + parameter_406, + parameter_407, + parameter_408, + parameter_409, + parameter_410, + parameter_411, + parameter_412, + parameter_413, + parameter_414, + parameter_415, + parameter_416, + parameter_417, + parameter_418, + parameter_419, + parameter_420, + parameter_421, + parameter_422, + parameter_423, + parameter_424, + parameter_425, + parameter_426, + parameter_427, + parameter_428, + parameter_429, + parameter_430, + parameter_431, + parameter_432, + parameter_433, + parameter_434, + parameter_435, + parameter_436, + parameter_437, + parameter_438, + parameter_439, + parameter_440, + parameter_441, + parameter_442, + parameter_443, + parameter_444, + parameter_445, + parameter_446, + parameter_447, + parameter_448, + parameter_449, + parameter_450, + parameter_451, + parameter_452, + parameter_453, + parameter_454, + parameter_455, + parameter_456, + parameter_457, + parameter_458, + parameter_459, + parameter_460, + parameter_461, + parameter_462, + parameter_463, + parameter_464, + parameter_465, + parameter_466, + parameter_467, + parameter_468, + parameter_469, + parameter_470, + parameter_471, + parameter_472, + parameter_473, + parameter_474, + parameter_475, + parameter_476, + parameter_477, + parameter_478, + parameter_479, + parameter_480, + parameter_481, + parameter_482, + parameter_483, + parameter_484, + parameter_485, + parameter_486, + parameter_487, + parameter_488, + parameter_489, + parameter_490, + parameter_491, + parameter_492, + parameter_493, + parameter_494, + parameter_495, + parameter_496, + parameter_497, + parameter_498, + parameter_499, + parameter_500, + parameter_501, + parameter_502, + parameter_503, + parameter_504, + parameter_505, + parameter_506, + parameter_507, + parameter_508, + parameter_509, + parameter_510, + parameter_511, + parameter_512, + parameter_513, + parameter_514, + parameter_515, + parameter_516, + parameter_517, + parameter_518, + parameter_519, + parameter_520, + parameter_521, + parameter_522, + parameter_523, + parameter_524, + parameter_525, + parameter_526, + parameter_527, + parameter_528, + parameter_529, + parameter_530, + parameter_531, + parameter_532, + parameter_533, + parameter_534, + parameter_535, + parameter_536, + parameter_537, + parameter_538, + parameter_539, + parameter_540, + parameter_541, + parameter_542, + parameter_543, + parameter_544, + parameter_545, + parameter_546, + parameter_547, + parameter_548, + parameter_549, + parameter_550, + parameter_551, + parameter_552, + parameter_553, + parameter_554, + parameter_555, + parameter_556, + parameter_557, + parameter_558, + parameter_559, + parameter_560, + parameter_561, + parameter_562, + parameter_563, + parameter_564, + parameter_565, + parameter_566, + parameter_567, + parameter_568, + parameter_569, + parameter_570, + parameter_571, + parameter_572, + parameter_573, + parameter_574, + parameter_575, + parameter_576, + parameter_577, + parameter_578, + parameter_579, + parameter_580, + parameter_581, + parameter_582, + parameter_583, + parameter_584, + parameter_585, + parameter_586, + parameter_587, + parameter_588, + parameter_589, + parameter_590, + parameter_591, + parameter_592, + parameter_593, + parameter_594, + parameter_595, + parameter_596, + parameter_597, + parameter_598, + parameter_599, + parameter_600, + parameter_601, + parameter_602, + parameter_603, + parameter_604, + parameter_605, + parameter_606, + parameter_607, + parameter_608, + parameter_609, + parameter_610, + parameter_611, + parameter_612, + parameter_613, + parameter_614, + parameter_615, + parameter_616, + parameter_617, + parameter_618, + parameter_619, + parameter_620, + parameter_621, + parameter_622, + parameter_623, + parameter_624, + parameter_625, + parameter_626, + parameter_627, + parameter_628, + parameter_629, + parameter_630, + parameter_631, + parameter_632, + parameter_633, + parameter_634, + parameter_635, + parameter_636, + parameter_637, + parameter_638, + parameter_639, + parameter_640, + parameter_641, + parameter_642, + parameter_643, + parameter_644, + parameter_645, + parameter_646, + parameter_647, + parameter_648, + parameter_649, + parameter_650, + parameter_651, + parameter_652, + parameter_653, + parameter_654, + parameter_655, + parameter_656, + parameter_657, + parameter_658, + parameter_659, + parameter_660, + parameter_661, + parameter_662, + parameter_663, + parameter_664, + parameter_665, + parameter_666, + parameter_667, + parameter_668, + parameter_669, + parameter_670, + parameter_671, + parameter_672, + parameter_673, + parameter_674, + parameter_675, + parameter_676, + parameter_677, + parameter_678, + parameter_679, + parameter_680, + parameter_681, + parameter_682, + parameter_683, + parameter_684, + parameter_685, + parameter_686, + parameter_687, + parameter_688, + parameter_689, + parameter_690, + parameter_691, + parameter_692, + parameter_693, + parameter_694, + parameter_695, + parameter_696, + parameter_697, + parameter_698, + parameter_699, + parameter_700, + parameter_701, + parameter_702, + parameter_703, + parameter_704, + parameter_705, + parameter_706, + parameter_707, + parameter_708, + parameter_709, + parameter_710, + parameter_711, + parameter_712, + parameter_713, + parameter_714, + parameter_715, + parameter_716, + parameter_717, + parameter_718, + parameter_719, + parameter_720, + parameter_721, + parameter_722, + parameter_723, + parameter_724, + parameter_725, + parameter_726, + parameter_727, + parameter_728, + parameter_729, + parameter_730, + parameter_731, + parameter_732, + parameter_733, + parameter_734, + parameter_735, + parameter_736, + parameter_737, + parameter_738, + parameter_739, + parameter_740, + parameter_741, + parameter_742, + parameter_743, + parameter_744, + parameter_745, + parameter_746, + parameter_747, + parameter_748, + parameter_749, + parameter_750, + parameter_751, + parameter_752, + parameter_753, + parameter_754, + parameter_755, + parameter_756, + parameter_757, + parameter_758, + parameter_759, + parameter_760, + parameter_761, + parameter_762, + parameter_763, + parameter_764, + parameter_765, + parameter_766, + parameter_767, + parameter_768, + parameter_769, + parameter_770, + parameter_771, + parameter_772, + parameter_773, + parameter_774, + parameter_775, + parameter_776, + parameter_777, + parameter_778, + parameter_779, + data_0, + data_1, + data_2, + data_3, + data_4, + data_5, + data_6, + data_7, + data_8, + data_9, + data_10, + data_11, + data_12, + data_13, + data_14, + data_15, + data_16, + data_17, + data_18, + data_19, + data_20, + data_21, + data_22, + data_23, + data_24, + data_25, + data_26, + data_27, + data_28, + data_29, + data_30, + data_31, + data_32, + data_33, + ): + # pd_op.conv2d: (-1x32x512x512xf32) <- (-1x3x1024x1024xf32, 32x3x3x3xf32) + conv2d_0 = paddle._C_ops.conv2d( + data_33, parameter_779, [2, 2], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del data_33, parameter_779 + + # pd_op.batch_norm_: (-1x32x512x512xf32, 32xf32, 32xf32, 32xf32, 32xf32, -1xui8) <- (-1x32x512x512xf32, 32xf32, 32xf32, 32xf32, 32xf32) + ( + batch_norm__0, + batch_norm__1, + batch_norm__2, + batch_norm__3, + batch_norm__4, + batch_norm__5, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_0, + parameter_778, + parameter_777, + parameter_776, + parameter_775, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_0, parameter_775, parameter_776, parameter_777, parameter_778 + + # pd_op.swish: (-1x32x512x512xf32) <- (-1x32x512x512xf32) + swish_0 = paddle._C_ops.swish(batch_norm__0) + del batch_norm__0 + + # pd_op.conv2d: (-1x32x512x512xf32) <- (-1x32x512x512xf32, 32x32x3x3xf32) + conv2d_1 = paddle._C_ops.conv2d( + swish_0, parameter_774, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_774, swish_0 + + # pd_op.batch_norm_: (-1x32x512x512xf32, 32xf32, 32xf32, 32xf32, 32xf32, -1xui8) <- (-1x32x512x512xf32, 32xf32, 32xf32, 32xf32, 32xf32) + ( + batch_norm__6, + batch_norm__7, + batch_norm__8, + batch_norm__9, + batch_norm__10, + batch_norm__11, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_1, + parameter_773, + parameter_772, + parameter_771, + parameter_770, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_1, parameter_770, parameter_771, parameter_772, parameter_773 + + # pd_op.swish: (-1x32x512x512xf32) <- (-1x32x512x512xf32) + swish_1 = paddle._C_ops.swish(batch_norm__6) + del batch_norm__6 + + # pd_op.conv2d: (-1x64x512x512xf32) <- (-1x32x512x512xf32, 64x32x3x3xf32) + conv2d_2 = paddle._C_ops.conv2d( + swish_1, parameter_769, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_769, swish_1 + + # pd_op.batch_norm_: (-1x64x512x512xf32, 64xf32, 64xf32, 64xf32, 64xf32, -1xui8) <- (-1x64x512x512xf32, 64xf32, 64xf32, 64xf32, 64xf32) + ( + batch_norm__12, + batch_norm__13, + batch_norm__14, + batch_norm__15, + batch_norm__16, + batch_norm__17, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_2, + parameter_768, + parameter_767, + parameter_766, + parameter_765, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_2, parameter_765, parameter_766, parameter_767, parameter_768 + + # pd_op.swish: (-1x64x512x512xf32) <- (-1x64x512x512xf32) + swish_2 = paddle._C_ops.swish(batch_norm__12) + del batch_norm__12 + + # pd_op.conv2d: (-1x96x256x256xf32) <- (-1x64x512x512xf32, 96x64x3x3xf32) + conv2d_3 = paddle._C_ops.conv2d( + swish_2, parameter_764, [2, 2], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_764, swish_2 + + # pd_op.batch_norm_: (-1x96x256x256xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (-1x96x256x256xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__18, + batch_norm__19, + batch_norm__20, + batch_norm__21, + batch_norm__22, + batch_norm__23, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_3, + parameter_763, + parameter_762, + parameter_761, + parameter_760, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_3, parameter_760, parameter_761, parameter_762, parameter_763 + + # pd_op.swish: (-1x96x256x256xf32) <- (-1x96x256x256xf32) + swish_3 = paddle._C_ops.swish(batch_norm__18) + del batch_norm__18 + + # pd_op.conv2d: (-1x48x256x256xf32) <- (-1x96x256x256xf32, 48x96x1x1xf32) + conv2d_4 = paddle._C_ops.conv2d( + swish_3, parameter_759, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_759 + + # pd_op.batch_norm_: (-1x48x256x256xf32, 48xf32, 48xf32, 48xf32, 48xf32, -1xui8) <- (-1x48x256x256xf32, 48xf32, 48xf32, 48xf32, 48xf32) + ( + batch_norm__24, + batch_norm__25, + batch_norm__26, + batch_norm__27, + batch_norm__28, + batch_norm__29, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_4, + parameter_758, + parameter_757, + parameter_756, + parameter_755, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_4, parameter_755, parameter_756, parameter_757, parameter_758 + + # pd_op.swish: (-1x48x256x256xf32) <- (-1x48x256x256xf32) + swish_4 = paddle._C_ops.swish(batch_norm__24) + del batch_norm__24 + + # pd_op.conv2d: (-1x48x256x256xf32) <- (-1x96x256x256xf32, 48x96x1x1xf32) + conv2d_5 = paddle._C_ops.conv2d( + swish_3, parameter_754, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_754, swish_3 + + # pd_op.batch_norm_: (-1x48x256x256xf32, 48xf32, 48xf32, 48xf32, 48xf32, -1xui8) <- (-1x48x256x256xf32, 48xf32, 48xf32, 48xf32, 48xf32) + ( + batch_norm__30, + batch_norm__31, + batch_norm__32, + batch_norm__33, + batch_norm__34, + batch_norm__35, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_5, + parameter_753, + parameter_752, + parameter_751, + parameter_750, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_5, parameter_750, parameter_751, parameter_752, parameter_753 + + # pd_op.swish: (-1x48x256x256xf32) <- (-1x48x256x256xf32) + swish_5 = paddle._C_ops.swish(batch_norm__30) + del batch_norm__30 + + # pd_op.conv2d: (-1x48x256x256xf32) <- (-1x48x256x256xf32, 48x48x3x3xf32) + conv2d_6 = paddle._C_ops.conv2d( + swish_5, parameter_749, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_749 + + # pd_op.batch_norm_: (-1x48x256x256xf32, 48xf32, 48xf32, 48xf32, 48xf32, -1xui8) <- (-1x48x256x256xf32, 48xf32, 48xf32, 48xf32, 48xf32) + ( + batch_norm__36, + batch_norm__37, + batch_norm__38, + batch_norm__39, + batch_norm__40, + batch_norm__41, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_6, + parameter_748, + parameter_747, + parameter_746, + parameter_745, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_6, parameter_745, parameter_746, parameter_747, parameter_748 + + # pd_op.swish: (-1x48x256x256xf32) <- (-1x48x256x256xf32) + swish_6 = paddle._C_ops.swish(batch_norm__36) + del batch_norm__36 + + # pd_op.conv2d: (-1x48x256x256xf32) <- (-1x48x256x256xf32, 48x48x3x3xf32) + conv2d_7 = paddle._C_ops.conv2d( + swish_6, parameter_744, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_744 + + # pd_op.batch_norm_: (-1x48x256x256xf32, 48xf32, 48xf32, 48xf32, 48xf32, -1xui8) <- (-1x48x256x256xf32, 48xf32, 48xf32, 48xf32, 48xf32) + ( + batch_norm__42, + batch_norm__43, + batch_norm__44, + batch_norm__45, + batch_norm__46, + batch_norm__47, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_7, + parameter_743, + parameter_742, + parameter_741, + parameter_740, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_7, parameter_740, parameter_741, parameter_742, parameter_743 + + # pd_op.conv2d: (-1x48x256x256xf32) <- (-1x48x256x256xf32, 48x48x1x1xf32) + conv2d_8 = paddle._C_ops.conv2d( + swish_6, parameter_739, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_739, swish_6 + + # pd_op.batch_norm_: (-1x48x256x256xf32, 48xf32, 48xf32, 48xf32, 48xf32, -1xui8) <- (-1x48x256x256xf32, 48xf32, 48xf32, 48xf32, 48xf32) + ( + batch_norm__48, + batch_norm__49, + batch_norm__50, + batch_norm__51, + batch_norm__52, + batch_norm__53, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_8, + parameter_738, + parameter_737, + parameter_736, + parameter_735, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_8, parameter_735, parameter_736, parameter_737, parameter_738 + + # pd_op.multiply: (-1x48x256x256xf32) <- (1xf32, -1x48x256x256xf32) + multiply_0 = paddle._C_ops.multiply(data_0, batch_norm__48) + del batch_norm__48, data_0 + + # pd_op.add: (-1x48x256x256xf32) <- (-1x48x256x256xf32, -1x48x256x256xf32) + add_0 = paddle._C_ops.add(batch_norm__42, multiply_0) + del batch_norm__42, multiply_0 + + # pd_op.swish: (-1x48x256x256xf32) <- (-1x48x256x256xf32) + swish_7 = paddle._C_ops.swish(add_0) + del add_0 + + # pd_op.add: (-1x48x256x256xf32) <- (-1x48x256x256xf32, -1x48x256x256xf32) + add_1 = paddle._C_ops.add(swish_5, swish_7) + del swish_5, swish_7 + + # pd_op.conv2d: (-1x48x256x256xf32) <- (-1x48x256x256xf32, 48x48x3x3xf32) + conv2d_9 = paddle._C_ops.conv2d( + add_1, parameter_734, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_734 + + # pd_op.batch_norm_: (-1x48x256x256xf32, 48xf32, 48xf32, 48xf32, 48xf32, -1xui8) <- (-1x48x256x256xf32, 48xf32, 48xf32, 48xf32, 48xf32) + ( + batch_norm__54, + batch_norm__55, + batch_norm__56, + batch_norm__57, + batch_norm__58, + batch_norm__59, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_9, + parameter_733, + parameter_732, + parameter_731, + parameter_730, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_9, parameter_730, parameter_731, parameter_732, parameter_733 + + # pd_op.swish: (-1x48x256x256xf32) <- (-1x48x256x256xf32) + swish_8 = paddle._C_ops.swish(batch_norm__54) + del batch_norm__54 + + # pd_op.conv2d: (-1x48x256x256xf32) <- (-1x48x256x256xf32, 48x48x3x3xf32) + conv2d_10 = paddle._C_ops.conv2d( + swish_8, parameter_729, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_729 + + # pd_op.batch_norm_: (-1x48x256x256xf32, 48xf32, 48xf32, 48xf32, 48xf32, -1xui8) <- (-1x48x256x256xf32, 48xf32, 48xf32, 48xf32, 48xf32) + ( + batch_norm__60, + batch_norm__61, + batch_norm__62, + batch_norm__63, + batch_norm__64, + batch_norm__65, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_10, + parameter_728, + parameter_727, + parameter_726, + parameter_725, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_10, parameter_725, parameter_726, parameter_727, parameter_728 + + # pd_op.conv2d: (-1x48x256x256xf32) <- (-1x48x256x256xf32, 48x48x1x1xf32) + conv2d_11 = paddle._C_ops.conv2d( + swish_8, parameter_724, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_724, swish_8 + + # pd_op.batch_norm_: (-1x48x256x256xf32, 48xf32, 48xf32, 48xf32, 48xf32, -1xui8) <- (-1x48x256x256xf32, 48xf32, 48xf32, 48xf32, 48xf32) + ( + batch_norm__66, + batch_norm__67, + batch_norm__68, + batch_norm__69, + batch_norm__70, + batch_norm__71, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_11, + parameter_723, + parameter_722, + parameter_721, + parameter_720, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_11, parameter_720, parameter_721, parameter_722, parameter_723 + + # pd_op.multiply: (-1x48x256x256xf32) <- (1xf32, -1x48x256x256xf32) + multiply_1 = paddle._C_ops.multiply(data_1, batch_norm__66) + del batch_norm__66, data_1 + + # pd_op.add: (-1x48x256x256xf32) <- (-1x48x256x256xf32, -1x48x256x256xf32) + add_2 = paddle._C_ops.add(batch_norm__60, multiply_1) + del batch_norm__60, multiply_1 + + # pd_op.swish: (-1x48x256x256xf32) <- (-1x48x256x256xf32) + swish_9 = paddle._C_ops.swish(add_2) + del add_2 + + # pd_op.add: (-1x48x256x256xf32) <- (-1x48x256x256xf32, -1x48x256x256xf32) + add_3 = paddle._C_ops.add(add_1, swish_9) + del add_1, swish_9 + + # pd_op.conv2d: (-1x48x256x256xf32) <- (-1x48x256x256xf32, 48x48x3x3xf32) + conv2d_12 = paddle._C_ops.conv2d( + add_3, parameter_719, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_719 + + # pd_op.batch_norm_: (-1x48x256x256xf32, 48xf32, 48xf32, 48xf32, 48xf32, -1xui8) <- (-1x48x256x256xf32, 48xf32, 48xf32, 48xf32, 48xf32) + ( + batch_norm__72, + batch_norm__73, + batch_norm__74, + batch_norm__75, + batch_norm__76, + batch_norm__77, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_12, + parameter_718, + parameter_717, + parameter_716, + parameter_715, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_12, parameter_715, parameter_716, parameter_717, parameter_718 + + # pd_op.swish: (-1x48x256x256xf32) <- (-1x48x256x256xf32) + swish_10 = paddle._C_ops.swish(batch_norm__72) + del batch_norm__72 + + # pd_op.conv2d: (-1x48x256x256xf32) <- (-1x48x256x256xf32, 48x48x3x3xf32) + conv2d_13 = paddle._C_ops.conv2d( + swish_10, parameter_714, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_714 + + # pd_op.batch_norm_: (-1x48x256x256xf32, 48xf32, 48xf32, 48xf32, 48xf32, -1xui8) <- (-1x48x256x256xf32, 48xf32, 48xf32, 48xf32, 48xf32) + ( + batch_norm__78, + batch_norm__79, + batch_norm__80, + batch_norm__81, + batch_norm__82, + batch_norm__83, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_13, + parameter_713, + parameter_712, + parameter_711, + parameter_710, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_13, parameter_710, parameter_711, parameter_712, parameter_713 + + # pd_op.conv2d: (-1x48x256x256xf32) <- (-1x48x256x256xf32, 48x48x1x1xf32) + conv2d_14 = paddle._C_ops.conv2d( + swish_10, parameter_709, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_709, swish_10 + + # pd_op.batch_norm_: (-1x48x256x256xf32, 48xf32, 48xf32, 48xf32, 48xf32, -1xui8) <- (-1x48x256x256xf32, 48xf32, 48xf32, 48xf32, 48xf32) + ( + batch_norm__84, + batch_norm__85, + batch_norm__86, + batch_norm__87, + batch_norm__88, + batch_norm__89, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_14, + parameter_708, + parameter_707, + parameter_706, + parameter_705, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_14, parameter_705, parameter_706, parameter_707, parameter_708 + + # pd_op.multiply: (-1x48x256x256xf32) <- (1xf32, -1x48x256x256xf32) + multiply_2 = paddle._C_ops.multiply(data_2, batch_norm__84) + del batch_norm__84, data_2 + + # pd_op.add: (-1x48x256x256xf32) <- (-1x48x256x256xf32, -1x48x256x256xf32) + add_4 = paddle._C_ops.add(batch_norm__78, multiply_2) + del batch_norm__78, multiply_2 + + # pd_op.swish: (-1x48x256x256xf32) <- (-1x48x256x256xf32) + swish_11 = paddle._C_ops.swish(add_4) + del add_4 + + # pd_op.add: (-1x48x256x256xf32) <- (-1x48x256x256xf32, -1x48x256x256xf32) + add_5 = paddle._C_ops.add(add_3, swish_11) + del add_3, swish_11 + + # pd_op.full: (1xi32) <- () + full_0 = paddle._C_ops.full( + [1], float("1"), paddle.int32, paddle.core.CPUPlace() + ) + + # builtin.combine: ([-1x48x256x256xf32, -1x48x256x256xf32]) <- (-1x48x256x256xf32, -1x48x256x256xf32) + combine_0 = [swish_4, add_5] + del add_5, swish_4 + + # pd_op.concat: (-1x96x256x256xf32) <- ([-1x48x256x256xf32, -1x48x256x256xf32], 1xi32) + concat_2 = paddle._C_ops.concat(combine_0, full_0) + del combine_0 + + # pd_op.full_int_array: (2xi64) <- () + full_int_array_0 = [2, 3] + + # pd_op.mean: (-1x96x1x1xf32) <- (-1x96x256x256xf32, 2xi64) + mean_0 = paddle._C_ops.mean(concat_2, full_int_array_0, True) + + # pd_op.conv2d: (-1x96x1x1xf32) <- (-1x96x1x1xf32, 96x96x1x1xf32) + conv2d_15 = paddle._C_ops.conv2d( + mean_0, parameter_704, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del mean_0, parameter_704 + + # pd_op.full_int_array: (4xi64) <- () + full_int_array_1 = [1, -1, 1, 1] + + # pd_op.reshape: (1x96x1x1xf32) <- (96xf32, 4xi64) + reshape_0 = paddle._C_ops.reshape(parameter_703, full_int_array_1) + del parameter_703 + + # pd_op.add: (-1x96x1x1xf32) <- (-1x96x1x1xf32, 1x96x1x1xf32) + add_6 = paddle._C_ops.add(conv2d_15, reshape_0) + del conv2d_15, reshape_0 + + # pd_op.hardsigmoid: (-1x96x1x1xf32) <- (-1x96x1x1xf32) + hardsigmoid_0 = paddle._C_ops.hardsigmoid( + add_6, float("0.166667"), float("0.5") + ) + del add_6 + + # pd_op.multiply: (-1x96x256x256xf32) <- (-1x96x256x256xf32, -1x96x1x1xf32) + multiply_3 = paddle._C_ops.multiply(concat_2, hardsigmoid_0) + del concat_2, hardsigmoid_0 + + # pd_op.conv2d: (-1x128x256x256xf32) <- (-1x96x256x256xf32, 128x96x1x1xf32) + conv2d_16 = paddle._C_ops.conv2d( + multiply_3, parameter_702, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del multiply_3, parameter_702 + + # pd_op.batch_norm_: (-1x128x256x256xf32, 128xf32, 128xf32, 128xf32, 128xf32, -1xui8) <- (-1x128x256x256xf32, 128xf32, 128xf32, 128xf32, 128xf32) + ( + batch_norm__90, + batch_norm__91, + batch_norm__92, + batch_norm__93, + batch_norm__94, + batch_norm__95, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_16, + parameter_701, + parameter_700, + parameter_699, + parameter_698, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_16, parameter_698, parameter_699, parameter_700, parameter_701 + + # pd_op.swish: (-1x128x256x256xf32) <- (-1x128x256x256xf32) + swish_12 = paddle._C_ops.swish(batch_norm__90) + del batch_norm__90 + + # pd_op.conv2d: (-1x192x128x128xf32) <- (-1x128x256x256xf32, 192x128x3x3xf32) + conv2d_17 = paddle._C_ops.conv2d( + swish_12, parameter_697, [2, 2], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_697, swish_12 + + # pd_op.batch_norm_: (-1x192x128x128xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (-1x192x128x128xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__96, + batch_norm__97, + batch_norm__98, + batch_norm__99, + batch_norm__100, + batch_norm__101, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_17, + parameter_696, + parameter_695, + parameter_694, + parameter_693, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_17, parameter_693, parameter_694, parameter_695, parameter_696 + + # pd_op.swish: (-1x192x128x128xf32) <- (-1x192x128x128xf32) + swish_13 = paddle._C_ops.swish(batch_norm__96) + del batch_norm__96 + + # pd_op.conv2d: (-1x96x128x128xf32) <- (-1x192x128x128xf32, 96x192x1x1xf32) + conv2d_18 = paddle._C_ops.conv2d( + swish_13, parameter_692, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_692 + + # pd_op.batch_norm_: (-1x96x128x128xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (-1x96x128x128xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__102, + batch_norm__103, + batch_norm__104, + batch_norm__105, + batch_norm__106, + batch_norm__107, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_18, + parameter_691, + parameter_690, + parameter_689, + parameter_688, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_18, parameter_688, parameter_689, parameter_690, parameter_691 + + # pd_op.swish: (-1x96x128x128xf32) <- (-1x96x128x128xf32) + swish_14 = paddle._C_ops.swish(batch_norm__102) + del batch_norm__102 + + # pd_op.conv2d: (-1x96x128x128xf32) <- (-1x192x128x128xf32, 96x192x1x1xf32) + conv2d_19 = paddle._C_ops.conv2d( + swish_13, parameter_687, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_687, swish_13 + + # pd_op.batch_norm_: (-1x96x128x128xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (-1x96x128x128xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__108, + batch_norm__109, + batch_norm__110, + batch_norm__111, + batch_norm__112, + batch_norm__113, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_19, + parameter_686, + parameter_685, + parameter_684, + parameter_683, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_19, parameter_683, parameter_684, parameter_685, parameter_686 + + # pd_op.swish: (-1x96x128x128xf32) <- (-1x96x128x128xf32) + swish_15 = paddle._C_ops.swish(batch_norm__108) + del batch_norm__108 + + # pd_op.conv2d: (-1x96x128x128xf32) <- (-1x96x128x128xf32, 96x96x3x3xf32) + conv2d_20 = paddle._C_ops.conv2d( + swish_15, parameter_682, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_682 + + # pd_op.batch_norm_: (-1x96x128x128xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (-1x96x128x128xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__114, + batch_norm__115, + batch_norm__116, + batch_norm__117, + batch_norm__118, + batch_norm__119, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_20, + parameter_681, + parameter_680, + parameter_679, + parameter_678, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_20, parameter_678, parameter_679, parameter_680, parameter_681 + + # pd_op.swish: (-1x96x128x128xf32) <- (-1x96x128x128xf32) + swish_16 = paddle._C_ops.swish(batch_norm__114) + del batch_norm__114 + + # pd_op.conv2d: (-1x96x128x128xf32) <- (-1x96x128x128xf32, 96x96x3x3xf32) + conv2d_21 = paddle._C_ops.conv2d( + swish_16, parameter_677, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_677 + + # pd_op.batch_norm_: (-1x96x128x128xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (-1x96x128x128xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__120, + batch_norm__121, + batch_norm__122, + batch_norm__123, + batch_norm__124, + batch_norm__125, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_21, + parameter_676, + parameter_675, + parameter_674, + parameter_673, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_21, parameter_673, parameter_674, parameter_675, parameter_676 + + # pd_op.conv2d: (-1x96x128x128xf32) <- (-1x96x128x128xf32, 96x96x1x1xf32) + conv2d_22 = paddle._C_ops.conv2d( + swish_16, parameter_672, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_672, swish_16 + + # pd_op.batch_norm_: (-1x96x128x128xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (-1x96x128x128xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__126, + batch_norm__127, + batch_norm__128, + batch_norm__129, + batch_norm__130, + batch_norm__131, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_22, + parameter_671, + parameter_670, + parameter_669, + parameter_668, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_22, parameter_668, parameter_669, parameter_670, parameter_671 + + # pd_op.multiply: (-1x96x128x128xf32) <- (1xf32, -1x96x128x128xf32) + multiply_4 = paddle._C_ops.multiply(data_3, batch_norm__126) + del batch_norm__126, data_3 + + # pd_op.add: (-1x96x128x128xf32) <- (-1x96x128x128xf32, -1x96x128x128xf32) + add_7 = paddle._C_ops.add(batch_norm__120, multiply_4) + del batch_norm__120, multiply_4 + + # pd_op.swish: (-1x96x128x128xf32) <- (-1x96x128x128xf32) + swish_17 = paddle._C_ops.swish(add_7) + del add_7 + + # pd_op.add: (-1x96x128x128xf32) <- (-1x96x128x128xf32, -1x96x128x128xf32) + add_8 = paddle._C_ops.add(swish_15, swish_17) + del swish_15, swish_17 + + # pd_op.conv2d: (-1x96x128x128xf32) <- (-1x96x128x128xf32, 96x96x3x3xf32) + conv2d_23 = paddle._C_ops.conv2d( + add_8, parameter_667, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_667 + + # pd_op.batch_norm_: (-1x96x128x128xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (-1x96x128x128xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__132, + batch_norm__133, + batch_norm__134, + batch_norm__135, + batch_norm__136, + batch_norm__137, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_23, + parameter_666, + parameter_665, + parameter_664, + parameter_663, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_23, parameter_663, parameter_664, parameter_665, parameter_666 + + # pd_op.swish: (-1x96x128x128xf32) <- (-1x96x128x128xf32) + swish_18 = paddle._C_ops.swish(batch_norm__132) + del batch_norm__132 + + # pd_op.conv2d: (-1x96x128x128xf32) <- (-1x96x128x128xf32, 96x96x3x3xf32) + conv2d_24 = paddle._C_ops.conv2d( + swish_18, parameter_662, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_662 + + # pd_op.batch_norm_: (-1x96x128x128xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (-1x96x128x128xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__138, + batch_norm__139, + batch_norm__140, + batch_norm__141, + batch_norm__142, + batch_norm__143, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_24, + parameter_661, + parameter_660, + parameter_659, + parameter_658, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_24, parameter_658, parameter_659, parameter_660, parameter_661 + + # pd_op.conv2d: (-1x96x128x128xf32) <- (-1x96x128x128xf32, 96x96x1x1xf32) + conv2d_25 = paddle._C_ops.conv2d( + swish_18, parameter_657, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_657, swish_18 + + # pd_op.batch_norm_: (-1x96x128x128xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (-1x96x128x128xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__144, + batch_norm__145, + batch_norm__146, + batch_norm__147, + batch_norm__148, + batch_norm__149, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_25, + parameter_656, + parameter_655, + parameter_654, + parameter_653, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_25, parameter_653, parameter_654, parameter_655, parameter_656 + + # pd_op.multiply: (-1x96x128x128xf32) <- (1xf32, -1x96x128x128xf32) + multiply_5 = paddle._C_ops.multiply(data_4, batch_norm__144) + del batch_norm__144, data_4 + + # pd_op.add: (-1x96x128x128xf32) <- (-1x96x128x128xf32, -1x96x128x128xf32) + add_9 = paddle._C_ops.add(batch_norm__138, multiply_5) + del batch_norm__138, multiply_5 + + # pd_op.swish: (-1x96x128x128xf32) <- (-1x96x128x128xf32) + swish_19 = paddle._C_ops.swish(add_9) + del add_9 + + # pd_op.add: (-1x96x128x128xf32) <- (-1x96x128x128xf32, -1x96x128x128xf32) + add_10 = paddle._C_ops.add(add_8, swish_19) + del add_8, swish_19 + + # pd_op.conv2d: (-1x96x128x128xf32) <- (-1x96x128x128xf32, 96x96x3x3xf32) + conv2d_26 = paddle._C_ops.conv2d( + add_10, parameter_652, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_652 + + # pd_op.batch_norm_: (-1x96x128x128xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (-1x96x128x128xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__150, + batch_norm__151, + batch_norm__152, + batch_norm__153, + batch_norm__154, + batch_norm__155, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_26, + parameter_651, + parameter_650, + parameter_649, + parameter_648, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_26, parameter_648, parameter_649, parameter_650, parameter_651 + + # pd_op.swish: (-1x96x128x128xf32) <- (-1x96x128x128xf32) + swish_20 = paddle._C_ops.swish(batch_norm__150) + del batch_norm__150 + + # pd_op.conv2d: (-1x96x128x128xf32) <- (-1x96x128x128xf32, 96x96x3x3xf32) + conv2d_27 = paddle._C_ops.conv2d( + swish_20, parameter_647, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_647 + + # pd_op.batch_norm_: (-1x96x128x128xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (-1x96x128x128xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__156, + batch_norm__157, + batch_norm__158, + batch_norm__159, + batch_norm__160, + batch_norm__161, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_27, + parameter_646, + parameter_645, + parameter_644, + parameter_643, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_27, parameter_643, parameter_644, parameter_645, parameter_646 + + # pd_op.conv2d: (-1x96x128x128xf32) <- (-1x96x128x128xf32, 96x96x1x1xf32) + conv2d_28 = paddle._C_ops.conv2d( + swish_20, parameter_642, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_642, swish_20 + + # pd_op.batch_norm_: (-1x96x128x128xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (-1x96x128x128xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__162, + batch_norm__163, + batch_norm__164, + batch_norm__165, + batch_norm__166, + batch_norm__167, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_28, + parameter_641, + parameter_640, + parameter_639, + parameter_638, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_28, parameter_638, parameter_639, parameter_640, parameter_641 + + # pd_op.multiply: (-1x96x128x128xf32) <- (1xf32, -1x96x128x128xf32) + multiply_6 = paddle._C_ops.multiply(data_5, batch_norm__162) + del batch_norm__162, data_5 + + # pd_op.add: (-1x96x128x128xf32) <- (-1x96x128x128xf32, -1x96x128x128xf32) + add_11 = paddle._C_ops.add(batch_norm__156, multiply_6) + del batch_norm__156, multiply_6 + + # pd_op.swish: (-1x96x128x128xf32) <- (-1x96x128x128xf32) + swish_21 = paddle._C_ops.swish(add_11) + del add_11 + + # pd_op.add: (-1x96x128x128xf32) <- (-1x96x128x128xf32, -1x96x128x128xf32) + add_12 = paddle._C_ops.add(add_10, swish_21) + del add_10, swish_21 + + # pd_op.conv2d: (-1x96x128x128xf32) <- (-1x96x128x128xf32, 96x96x3x3xf32) + conv2d_29 = paddle._C_ops.conv2d( + add_12, parameter_637, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_637 + + # pd_op.batch_norm_: (-1x96x128x128xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (-1x96x128x128xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__168, + batch_norm__169, + batch_norm__170, + batch_norm__171, + batch_norm__172, + batch_norm__173, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_29, + parameter_636, + parameter_635, + parameter_634, + parameter_633, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_29, parameter_633, parameter_634, parameter_635, parameter_636 + + # pd_op.swish: (-1x96x128x128xf32) <- (-1x96x128x128xf32) + swish_22 = paddle._C_ops.swish(batch_norm__168) + del batch_norm__168 + + # pd_op.conv2d: (-1x96x128x128xf32) <- (-1x96x128x128xf32, 96x96x3x3xf32) + conv2d_30 = paddle._C_ops.conv2d( + swish_22, parameter_632, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_632 + + # pd_op.batch_norm_: (-1x96x128x128xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (-1x96x128x128xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__174, + batch_norm__175, + batch_norm__176, + batch_norm__177, + batch_norm__178, + batch_norm__179, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_30, + parameter_631, + parameter_630, + parameter_629, + parameter_628, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_30, parameter_628, parameter_629, parameter_630, parameter_631 + + # pd_op.conv2d: (-1x96x128x128xf32) <- (-1x96x128x128xf32, 96x96x1x1xf32) + conv2d_31 = paddle._C_ops.conv2d( + swish_22, parameter_627, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_627, swish_22 + + # pd_op.batch_norm_: (-1x96x128x128xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (-1x96x128x128xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__180, + batch_norm__181, + batch_norm__182, + batch_norm__183, + batch_norm__184, + batch_norm__185, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_31, + parameter_626, + parameter_625, + parameter_624, + parameter_623, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_31, parameter_623, parameter_624, parameter_625, parameter_626 + + # pd_op.multiply: (-1x96x128x128xf32) <- (1xf32, -1x96x128x128xf32) + multiply_7 = paddle._C_ops.multiply(data_6, batch_norm__180) + del batch_norm__180, data_6 + + # pd_op.add: (-1x96x128x128xf32) <- (-1x96x128x128xf32, -1x96x128x128xf32) + add_13 = paddle._C_ops.add(batch_norm__174, multiply_7) + del batch_norm__174, multiply_7 + + # pd_op.swish: (-1x96x128x128xf32) <- (-1x96x128x128xf32) + swish_23 = paddle._C_ops.swish(add_13) + del add_13 + + # pd_op.add: (-1x96x128x128xf32) <- (-1x96x128x128xf32, -1x96x128x128xf32) + add_14 = paddle._C_ops.add(add_12, swish_23) + del add_12, swish_23 + + # pd_op.conv2d: (-1x96x128x128xf32) <- (-1x96x128x128xf32, 96x96x3x3xf32) + conv2d_32 = paddle._C_ops.conv2d( + add_14, parameter_622, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_622 + + # pd_op.batch_norm_: (-1x96x128x128xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (-1x96x128x128xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__186, + batch_norm__187, + batch_norm__188, + batch_norm__189, + batch_norm__190, + batch_norm__191, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_32, + parameter_621, + parameter_620, + parameter_619, + parameter_618, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_32, parameter_618, parameter_619, parameter_620, parameter_621 + + # pd_op.swish: (-1x96x128x128xf32) <- (-1x96x128x128xf32) + swish_24 = paddle._C_ops.swish(batch_norm__186) + del batch_norm__186 + + # pd_op.conv2d: (-1x96x128x128xf32) <- (-1x96x128x128xf32, 96x96x3x3xf32) + conv2d_33 = paddle._C_ops.conv2d( + swish_24, parameter_617, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_617 + + # pd_op.batch_norm_: (-1x96x128x128xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (-1x96x128x128xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__192, + batch_norm__193, + batch_norm__194, + batch_norm__195, + batch_norm__196, + batch_norm__197, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_33, + parameter_616, + parameter_615, + parameter_614, + parameter_613, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_33, parameter_613, parameter_614, parameter_615, parameter_616 + + # pd_op.conv2d: (-1x96x128x128xf32) <- (-1x96x128x128xf32, 96x96x1x1xf32) + conv2d_34 = paddle._C_ops.conv2d( + swish_24, parameter_612, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_612, swish_24 + + # pd_op.batch_norm_: (-1x96x128x128xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (-1x96x128x128xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__198, + batch_norm__199, + batch_norm__200, + batch_norm__201, + batch_norm__202, + batch_norm__203, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_34, + parameter_611, + parameter_610, + parameter_609, + parameter_608, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_34, parameter_608, parameter_609, parameter_610, parameter_611 + + # pd_op.multiply: (-1x96x128x128xf32) <- (1xf32, -1x96x128x128xf32) + multiply_8 = paddle._C_ops.multiply(data_7, batch_norm__198) + del batch_norm__198, data_7 + + # pd_op.add: (-1x96x128x128xf32) <- (-1x96x128x128xf32, -1x96x128x128xf32) + add_15 = paddle._C_ops.add(batch_norm__192, multiply_8) + del batch_norm__192, multiply_8 + + # pd_op.swish: (-1x96x128x128xf32) <- (-1x96x128x128xf32) + swish_25 = paddle._C_ops.swish(add_15) + del add_15 + + # pd_op.add: (-1x96x128x128xf32) <- (-1x96x128x128xf32, -1x96x128x128xf32) + add_16 = paddle._C_ops.add(add_14, swish_25) + del add_14, swish_25 + + # pd_op.conv2d: (-1x96x128x128xf32) <- (-1x96x128x128xf32, 96x96x3x3xf32) + conv2d_35 = paddle._C_ops.conv2d( + add_16, parameter_607, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_607 + + # pd_op.batch_norm_: (-1x96x128x128xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (-1x96x128x128xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__204, + batch_norm__205, + batch_norm__206, + batch_norm__207, + batch_norm__208, + batch_norm__209, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_35, + parameter_606, + parameter_605, + parameter_604, + parameter_603, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_35, parameter_603, parameter_604, parameter_605, parameter_606 + + # pd_op.swish: (-1x96x128x128xf32) <- (-1x96x128x128xf32) + swish_26 = paddle._C_ops.swish(batch_norm__204) + del batch_norm__204 + + # pd_op.conv2d: (-1x96x128x128xf32) <- (-1x96x128x128xf32, 96x96x3x3xf32) + conv2d_36 = paddle._C_ops.conv2d( + swish_26, parameter_602, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_602 + + # pd_op.batch_norm_: (-1x96x128x128xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (-1x96x128x128xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__210, + batch_norm__211, + batch_norm__212, + batch_norm__213, + batch_norm__214, + batch_norm__215, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_36, + parameter_601, + parameter_600, + parameter_599, + parameter_598, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_36, parameter_598, parameter_599, parameter_600, parameter_601 + + # pd_op.conv2d: (-1x96x128x128xf32) <- (-1x96x128x128xf32, 96x96x1x1xf32) + conv2d_37 = paddle._C_ops.conv2d( + swish_26, parameter_597, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_597, swish_26 + + # pd_op.batch_norm_: (-1x96x128x128xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (-1x96x128x128xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__216, + batch_norm__217, + batch_norm__218, + batch_norm__219, + batch_norm__220, + batch_norm__221, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_37, + parameter_596, + parameter_595, + parameter_594, + parameter_593, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_37, parameter_593, parameter_594, parameter_595, parameter_596 + + # pd_op.multiply: (-1x96x128x128xf32) <- (1xf32, -1x96x128x128xf32) + multiply_9 = paddle._C_ops.multiply(data_8, batch_norm__216) + del batch_norm__216, data_8 + + # pd_op.add: (-1x96x128x128xf32) <- (-1x96x128x128xf32, -1x96x128x128xf32) + add_17 = paddle._C_ops.add(batch_norm__210, multiply_9) + del batch_norm__210, multiply_9 + + # pd_op.swish: (-1x96x128x128xf32) <- (-1x96x128x128xf32) + swish_27 = paddle._C_ops.swish(add_17) + del add_17 + + # pd_op.add: (-1x96x128x128xf32) <- (-1x96x128x128xf32, -1x96x128x128xf32) + add_18 = paddle._C_ops.add(add_16, swish_27) + del add_16, swish_27 + + # builtin.combine: ([-1x96x128x128xf32, -1x96x128x128xf32]) <- (-1x96x128x128xf32, -1x96x128x128xf32) + combine_1 = [swish_14, add_18] + del add_18, swish_14 + + # pd_op.concat: (-1x192x128x128xf32) <- ([-1x96x128x128xf32, -1x96x128x128xf32], 1xi32) + concat_3 = paddle._C_ops.concat(combine_1, full_0) + del combine_1 + + # pd_op.mean: (-1x192x1x1xf32) <- (-1x192x128x128xf32, 2xi64) + mean_1 = paddle._C_ops.mean(concat_3, full_int_array_0, True) + + # pd_op.conv2d: (-1x192x1x1xf32) <- (-1x192x1x1xf32, 192x192x1x1xf32) + conv2d_38 = paddle._C_ops.conv2d( + mean_1, parameter_592, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del mean_1, parameter_592 + + # pd_op.reshape: (1x192x1x1xf32) <- (192xf32, 4xi64) + reshape_1 = paddle._C_ops.reshape(parameter_591, full_int_array_1) + del parameter_591 + + # pd_op.add: (-1x192x1x1xf32) <- (-1x192x1x1xf32, 1x192x1x1xf32) + add_19 = paddle._C_ops.add(conv2d_38, reshape_1) + del conv2d_38, reshape_1 + + # pd_op.hardsigmoid: (-1x192x1x1xf32) <- (-1x192x1x1xf32) + hardsigmoid_1 = paddle._C_ops.hardsigmoid( + add_19, float("0.166667"), float("0.5") + ) + del add_19 + + # pd_op.multiply: (-1x192x128x128xf32) <- (-1x192x128x128xf32, -1x192x1x1xf32) + multiply_10 = paddle._C_ops.multiply(concat_3, hardsigmoid_1) + del concat_3, hardsigmoid_1 + + # pd_op.conv2d: (-1x256x128x128xf32) <- (-1x192x128x128xf32, 256x192x1x1xf32) + conv2d_39 = paddle._C_ops.conv2d( + multiply_10, parameter_590, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del multiply_10, parameter_590 + + # pd_op.batch_norm_: (-1x256x128x128xf32, 256xf32, 256xf32, 256xf32, 256xf32, -1xui8) <- (-1x256x128x128xf32, 256xf32, 256xf32, 256xf32, 256xf32) + ( + batch_norm__222, + batch_norm__223, + batch_norm__224, + batch_norm__225, + batch_norm__226, + batch_norm__227, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_39, + parameter_589, + parameter_588, + parameter_587, + parameter_586, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_39, parameter_586, parameter_587, parameter_588, parameter_589 + + # pd_op.swish: (-1x256x128x128xf32) <- (-1x256x128x128xf32) + swish_28 = paddle._C_ops.swish(batch_norm__222) + del batch_norm__222 + + # pd_op.conv2d: (-1x384x64x64xf32) <- (-1x256x128x128xf32, 384x256x3x3xf32) + conv2d_40 = paddle._C_ops.conv2d( + swish_28, parameter_585, [2, 2], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_585 + + # pd_op.batch_norm_: (-1x384x64x64xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (-1x384x64x64xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__228, + batch_norm__229, + batch_norm__230, + batch_norm__231, + batch_norm__232, + batch_norm__233, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_40, + parameter_584, + parameter_583, + parameter_582, + parameter_581, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_40, parameter_581, parameter_582, parameter_583, parameter_584 + + # pd_op.swish: (-1x384x64x64xf32) <- (-1x384x64x64xf32) + swish_29 = paddle._C_ops.swish(batch_norm__228) + del batch_norm__228 + + # pd_op.conv2d: (-1x192x64x64xf32) <- (-1x384x64x64xf32, 192x384x1x1xf32) + conv2d_41 = paddle._C_ops.conv2d( + swish_29, parameter_580, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_580 + + # pd_op.batch_norm_: (-1x192x64x64xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (-1x192x64x64xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__234, + batch_norm__235, + batch_norm__236, + batch_norm__237, + batch_norm__238, + batch_norm__239, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_41, + parameter_579, + parameter_578, + parameter_577, + parameter_576, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_41, parameter_576, parameter_577, parameter_578, parameter_579 + + # pd_op.swish: (-1x192x64x64xf32) <- (-1x192x64x64xf32) + swish_30 = paddle._C_ops.swish(batch_norm__234) + del batch_norm__234 + + # pd_op.conv2d: (-1x192x64x64xf32) <- (-1x384x64x64xf32, 192x384x1x1xf32) + conv2d_42 = paddle._C_ops.conv2d( + swish_29, parameter_575, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_575, swish_29 + + # pd_op.batch_norm_: (-1x192x64x64xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (-1x192x64x64xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__240, + batch_norm__241, + batch_norm__242, + batch_norm__243, + batch_norm__244, + batch_norm__245, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_42, + parameter_574, + parameter_573, + parameter_572, + parameter_571, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_42, parameter_571, parameter_572, parameter_573, parameter_574 + + # pd_op.swish: (-1x192x64x64xf32) <- (-1x192x64x64xf32) + swish_31 = paddle._C_ops.swish(batch_norm__240) + del batch_norm__240 + + # pd_op.conv2d: (-1x192x64x64xf32) <- (-1x192x64x64xf32, 192x192x3x3xf32) + conv2d_43 = paddle._C_ops.conv2d( + swish_31, parameter_570, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_570 + + # pd_op.batch_norm_: (-1x192x64x64xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (-1x192x64x64xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__246, + batch_norm__247, + batch_norm__248, + batch_norm__249, + batch_norm__250, + batch_norm__251, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_43, + parameter_569, + parameter_568, + parameter_567, + parameter_566, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_43, parameter_566, parameter_567, parameter_568, parameter_569 + + # pd_op.swish: (-1x192x64x64xf32) <- (-1x192x64x64xf32) + swish_32 = paddle._C_ops.swish(batch_norm__246) + del batch_norm__246 + + # pd_op.conv2d: (-1x192x64x64xf32) <- (-1x192x64x64xf32, 192x192x3x3xf32) + conv2d_44 = paddle._C_ops.conv2d( + swish_32, parameter_565, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_565 + + # pd_op.batch_norm_: (-1x192x64x64xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (-1x192x64x64xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__252, + batch_norm__253, + batch_norm__254, + batch_norm__255, + batch_norm__256, + batch_norm__257, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_44, + parameter_564, + parameter_563, + parameter_562, + parameter_561, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_44, parameter_561, parameter_562, parameter_563, parameter_564 + + # pd_op.conv2d: (-1x192x64x64xf32) <- (-1x192x64x64xf32, 192x192x1x1xf32) + conv2d_45 = paddle._C_ops.conv2d( + swish_32, parameter_560, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_560, swish_32 + + # pd_op.batch_norm_: (-1x192x64x64xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (-1x192x64x64xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__258, + batch_norm__259, + batch_norm__260, + batch_norm__261, + batch_norm__262, + batch_norm__263, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_45, + parameter_559, + parameter_558, + parameter_557, + parameter_556, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_45, parameter_556, parameter_557, parameter_558, parameter_559 + + # pd_op.multiply: (-1x192x64x64xf32) <- (1xf32, -1x192x64x64xf32) + multiply_11 = paddle._C_ops.multiply(data_9, batch_norm__258) + del batch_norm__258, data_9 + + # pd_op.add: (-1x192x64x64xf32) <- (-1x192x64x64xf32, -1x192x64x64xf32) + add_20 = paddle._C_ops.add(batch_norm__252, multiply_11) + del batch_norm__252, multiply_11 + + # pd_op.swish: (-1x192x64x64xf32) <- (-1x192x64x64xf32) + swish_33 = paddle._C_ops.swish(add_20) + del add_20 + + # pd_op.add: (-1x192x64x64xf32) <- (-1x192x64x64xf32, -1x192x64x64xf32) + add_21 = paddle._C_ops.add(swish_31, swish_33) + del swish_31, swish_33 + + # pd_op.conv2d: (-1x192x64x64xf32) <- (-1x192x64x64xf32, 192x192x3x3xf32) + conv2d_46 = paddle._C_ops.conv2d( + add_21, parameter_555, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_555 + + # pd_op.batch_norm_: (-1x192x64x64xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (-1x192x64x64xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__264, + batch_norm__265, + batch_norm__266, + batch_norm__267, + batch_norm__268, + batch_norm__269, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_46, + parameter_554, + parameter_553, + parameter_552, + parameter_551, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_46, parameter_551, parameter_552, parameter_553, parameter_554 + + # pd_op.swish: (-1x192x64x64xf32) <- (-1x192x64x64xf32) + swish_34 = paddle._C_ops.swish(batch_norm__264) + del batch_norm__264 + + # pd_op.conv2d: (-1x192x64x64xf32) <- (-1x192x64x64xf32, 192x192x3x3xf32) + conv2d_47 = paddle._C_ops.conv2d( + swish_34, parameter_550, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_550 + + # pd_op.batch_norm_: (-1x192x64x64xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (-1x192x64x64xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__270, + batch_norm__271, + batch_norm__272, + batch_norm__273, + batch_norm__274, + batch_norm__275, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_47, + parameter_549, + parameter_548, + parameter_547, + parameter_546, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_47, parameter_546, parameter_547, parameter_548, parameter_549 + + # pd_op.conv2d: (-1x192x64x64xf32) <- (-1x192x64x64xf32, 192x192x1x1xf32) + conv2d_48 = paddle._C_ops.conv2d( + swish_34, parameter_545, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_545, swish_34 + + # pd_op.batch_norm_: (-1x192x64x64xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (-1x192x64x64xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__276, + batch_norm__277, + batch_norm__278, + batch_norm__279, + batch_norm__280, + batch_norm__281, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_48, + parameter_544, + parameter_543, + parameter_542, + parameter_541, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_48, parameter_541, parameter_542, parameter_543, parameter_544 + + # pd_op.multiply: (-1x192x64x64xf32) <- (1xf32, -1x192x64x64xf32) + multiply_12 = paddle._C_ops.multiply(data_10, batch_norm__276) + del batch_norm__276, data_10 + + # pd_op.add: (-1x192x64x64xf32) <- (-1x192x64x64xf32, -1x192x64x64xf32) + add_22 = paddle._C_ops.add(batch_norm__270, multiply_12) + del batch_norm__270, multiply_12 + + # pd_op.swish: (-1x192x64x64xf32) <- (-1x192x64x64xf32) + swish_35 = paddle._C_ops.swish(add_22) + del add_22 + + # pd_op.add: (-1x192x64x64xf32) <- (-1x192x64x64xf32, -1x192x64x64xf32) + add_23 = paddle._C_ops.add(add_21, swish_35) + del add_21, swish_35 + + # pd_op.conv2d: (-1x192x64x64xf32) <- (-1x192x64x64xf32, 192x192x3x3xf32) + conv2d_49 = paddle._C_ops.conv2d( + add_23, parameter_540, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_540 + + # pd_op.batch_norm_: (-1x192x64x64xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (-1x192x64x64xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__282, + batch_norm__283, + batch_norm__284, + batch_norm__285, + batch_norm__286, + batch_norm__287, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_49, + parameter_539, + parameter_538, + parameter_537, + parameter_536, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_49, parameter_536, parameter_537, parameter_538, parameter_539 + + # pd_op.swish: (-1x192x64x64xf32) <- (-1x192x64x64xf32) + swish_36 = paddle._C_ops.swish(batch_norm__282) + del batch_norm__282 + + # pd_op.conv2d: (-1x192x64x64xf32) <- (-1x192x64x64xf32, 192x192x3x3xf32) + conv2d_50 = paddle._C_ops.conv2d( + swish_36, parameter_535, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_535 + + # pd_op.batch_norm_: (-1x192x64x64xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (-1x192x64x64xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__288, + batch_norm__289, + batch_norm__290, + batch_norm__291, + batch_norm__292, + batch_norm__293, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_50, + parameter_534, + parameter_533, + parameter_532, + parameter_531, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_50, parameter_531, parameter_532, parameter_533, parameter_534 + + # pd_op.conv2d: (-1x192x64x64xf32) <- (-1x192x64x64xf32, 192x192x1x1xf32) + conv2d_51 = paddle._C_ops.conv2d( + swish_36, parameter_530, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_530, swish_36 + + # pd_op.batch_norm_: (-1x192x64x64xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (-1x192x64x64xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__294, + batch_norm__295, + batch_norm__296, + batch_norm__297, + batch_norm__298, + batch_norm__299, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_51, + parameter_529, + parameter_528, + parameter_527, + parameter_526, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_51, parameter_526, parameter_527, parameter_528, parameter_529 + + # pd_op.multiply: (-1x192x64x64xf32) <- (1xf32, -1x192x64x64xf32) + multiply_13 = paddle._C_ops.multiply(data_11, batch_norm__294) + del batch_norm__294, data_11 + + # pd_op.add: (-1x192x64x64xf32) <- (-1x192x64x64xf32, -1x192x64x64xf32) + add_24 = paddle._C_ops.add(batch_norm__288, multiply_13) + del batch_norm__288, multiply_13 + + # pd_op.swish: (-1x192x64x64xf32) <- (-1x192x64x64xf32) + swish_37 = paddle._C_ops.swish(add_24) + del add_24 + + # pd_op.add: (-1x192x64x64xf32) <- (-1x192x64x64xf32, -1x192x64x64xf32) + add_25 = paddle._C_ops.add(add_23, swish_37) + del add_23, swish_37 + + # pd_op.conv2d: (-1x192x64x64xf32) <- (-1x192x64x64xf32, 192x192x3x3xf32) + conv2d_52 = paddle._C_ops.conv2d( + add_25, parameter_525, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_525 + + # pd_op.batch_norm_: (-1x192x64x64xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (-1x192x64x64xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__300, + batch_norm__301, + batch_norm__302, + batch_norm__303, + batch_norm__304, + batch_norm__305, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_52, + parameter_524, + parameter_523, + parameter_522, + parameter_521, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_52, parameter_521, parameter_522, parameter_523, parameter_524 + + # pd_op.swish: (-1x192x64x64xf32) <- (-1x192x64x64xf32) + swish_38 = paddle._C_ops.swish(batch_norm__300) + del batch_norm__300 + + # pd_op.conv2d: (-1x192x64x64xf32) <- (-1x192x64x64xf32, 192x192x3x3xf32) + conv2d_53 = paddle._C_ops.conv2d( + swish_38, parameter_520, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_520 + + # pd_op.batch_norm_: (-1x192x64x64xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (-1x192x64x64xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__306, + batch_norm__307, + batch_norm__308, + batch_norm__309, + batch_norm__310, + batch_norm__311, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_53, + parameter_519, + parameter_518, + parameter_517, + parameter_516, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_53, parameter_516, parameter_517, parameter_518, parameter_519 + + # pd_op.conv2d: (-1x192x64x64xf32) <- (-1x192x64x64xf32, 192x192x1x1xf32) + conv2d_54 = paddle._C_ops.conv2d( + swish_38, parameter_515, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_515, swish_38 + + # pd_op.batch_norm_: (-1x192x64x64xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (-1x192x64x64xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__312, + batch_norm__313, + batch_norm__314, + batch_norm__315, + batch_norm__316, + batch_norm__317, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_54, + parameter_514, + parameter_513, + parameter_512, + parameter_511, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_54, parameter_511, parameter_512, parameter_513, parameter_514 + + # pd_op.multiply: (-1x192x64x64xf32) <- (1xf32, -1x192x64x64xf32) + multiply_14 = paddle._C_ops.multiply(data_12, batch_norm__312) + del batch_norm__312, data_12 + + # pd_op.add: (-1x192x64x64xf32) <- (-1x192x64x64xf32, -1x192x64x64xf32) + add_26 = paddle._C_ops.add(batch_norm__306, multiply_14) + del batch_norm__306, multiply_14 + + # pd_op.swish: (-1x192x64x64xf32) <- (-1x192x64x64xf32) + swish_39 = paddle._C_ops.swish(add_26) + del add_26 + + # pd_op.add: (-1x192x64x64xf32) <- (-1x192x64x64xf32, -1x192x64x64xf32) + add_27 = paddle._C_ops.add(add_25, swish_39) + del add_25, swish_39 + + # pd_op.conv2d: (-1x192x64x64xf32) <- (-1x192x64x64xf32, 192x192x3x3xf32) + conv2d_55 = paddle._C_ops.conv2d( + add_27, parameter_510, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_510 + + # pd_op.batch_norm_: (-1x192x64x64xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (-1x192x64x64xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__318, + batch_norm__319, + batch_norm__320, + batch_norm__321, + batch_norm__322, + batch_norm__323, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_55, + parameter_509, + parameter_508, + parameter_507, + parameter_506, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_55, parameter_506, parameter_507, parameter_508, parameter_509 + + # pd_op.swish: (-1x192x64x64xf32) <- (-1x192x64x64xf32) + swish_40 = paddle._C_ops.swish(batch_norm__318) + del batch_norm__318 + + # pd_op.conv2d: (-1x192x64x64xf32) <- (-1x192x64x64xf32, 192x192x3x3xf32) + conv2d_56 = paddle._C_ops.conv2d( + swish_40, parameter_505, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_505 + + # pd_op.batch_norm_: (-1x192x64x64xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (-1x192x64x64xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__324, + batch_norm__325, + batch_norm__326, + batch_norm__327, + batch_norm__328, + batch_norm__329, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_56, + parameter_504, + parameter_503, + parameter_502, + parameter_501, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_56, parameter_501, parameter_502, parameter_503, parameter_504 + + # pd_op.conv2d: (-1x192x64x64xf32) <- (-1x192x64x64xf32, 192x192x1x1xf32) + conv2d_57 = paddle._C_ops.conv2d( + swish_40, parameter_500, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_500, swish_40 + + # pd_op.batch_norm_: (-1x192x64x64xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (-1x192x64x64xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__330, + batch_norm__331, + batch_norm__332, + batch_norm__333, + batch_norm__334, + batch_norm__335, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_57, + parameter_499, + parameter_498, + parameter_497, + parameter_496, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_57, parameter_496, parameter_497, parameter_498, parameter_499 + + # pd_op.multiply: (-1x192x64x64xf32) <- (1xf32, -1x192x64x64xf32) + multiply_15 = paddle._C_ops.multiply(data_13, batch_norm__330) + del batch_norm__330, data_13 + + # pd_op.add: (-1x192x64x64xf32) <- (-1x192x64x64xf32, -1x192x64x64xf32) + add_28 = paddle._C_ops.add(batch_norm__324, multiply_15) + del batch_norm__324, multiply_15 + + # pd_op.swish: (-1x192x64x64xf32) <- (-1x192x64x64xf32) + swish_41 = paddle._C_ops.swish(add_28) + del add_28 + + # pd_op.add: (-1x192x64x64xf32) <- (-1x192x64x64xf32, -1x192x64x64xf32) + add_29 = paddle._C_ops.add(add_27, swish_41) + del add_27, swish_41 + + # pd_op.conv2d: (-1x192x64x64xf32) <- (-1x192x64x64xf32, 192x192x3x3xf32) + conv2d_58 = paddle._C_ops.conv2d( + add_29, parameter_495, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_495 + + # pd_op.batch_norm_: (-1x192x64x64xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (-1x192x64x64xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__336, + batch_norm__337, + batch_norm__338, + batch_norm__339, + batch_norm__340, + batch_norm__341, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_58, + parameter_494, + parameter_493, + parameter_492, + parameter_491, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_58, parameter_491, parameter_492, parameter_493, parameter_494 + + # pd_op.swish: (-1x192x64x64xf32) <- (-1x192x64x64xf32) + swish_42 = paddle._C_ops.swish(batch_norm__336) + del batch_norm__336 + + # pd_op.conv2d: (-1x192x64x64xf32) <- (-1x192x64x64xf32, 192x192x3x3xf32) + conv2d_59 = paddle._C_ops.conv2d( + swish_42, parameter_490, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_490 + + # pd_op.batch_norm_: (-1x192x64x64xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (-1x192x64x64xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__342, + batch_norm__343, + batch_norm__344, + batch_norm__345, + batch_norm__346, + batch_norm__347, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_59, + parameter_489, + parameter_488, + parameter_487, + parameter_486, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_59, parameter_486, parameter_487, parameter_488, parameter_489 + + # pd_op.conv2d: (-1x192x64x64xf32) <- (-1x192x64x64xf32, 192x192x1x1xf32) + conv2d_60 = paddle._C_ops.conv2d( + swish_42, parameter_485, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_485, swish_42 + + # pd_op.batch_norm_: (-1x192x64x64xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (-1x192x64x64xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__348, + batch_norm__349, + batch_norm__350, + batch_norm__351, + batch_norm__352, + batch_norm__353, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_60, + parameter_484, + parameter_483, + parameter_482, + parameter_481, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_60, parameter_481, parameter_482, parameter_483, parameter_484 + + # pd_op.multiply: (-1x192x64x64xf32) <- (1xf32, -1x192x64x64xf32) + multiply_16 = paddle._C_ops.multiply(data_14, batch_norm__348) + del batch_norm__348, data_14 + + # pd_op.add: (-1x192x64x64xf32) <- (-1x192x64x64xf32, -1x192x64x64xf32) + add_30 = paddle._C_ops.add(batch_norm__342, multiply_16) + del batch_norm__342, multiply_16 + + # pd_op.swish: (-1x192x64x64xf32) <- (-1x192x64x64xf32) + swish_43 = paddle._C_ops.swish(add_30) + del add_30 + + # pd_op.add: (-1x192x64x64xf32) <- (-1x192x64x64xf32, -1x192x64x64xf32) + add_31 = paddle._C_ops.add(add_29, swish_43) + del add_29, swish_43 + + # builtin.combine: ([-1x192x64x64xf32, -1x192x64x64xf32]) <- (-1x192x64x64xf32, -1x192x64x64xf32) + combine_2 = [swish_30, add_31] + del add_31, swish_30 + + # pd_op.concat: (-1x384x64x64xf32) <- ([-1x192x64x64xf32, -1x192x64x64xf32], 1xi32) + concat_4 = paddle._C_ops.concat(combine_2, full_0) + del combine_2 + + # pd_op.mean: (-1x384x1x1xf32) <- (-1x384x64x64xf32, 2xi64) + mean_2 = paddle._C_ops.mean(concat_4, full_int_array_0, True) + + # pd_op.conv2d: (-1x384x1x1xf32) <- (-1x384x1x1xf32, 384x384x1x1xf32) + conv2d_61 = paddle._C_ops.conv2d( + mean_2, parameter_480, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del mean_2, parameter_480 + + # pd_op.reshape: (1x384x1x1xf32) <- (384xf32, 4xi64) + reshape_2 = paddle._C_ops.reshape(parameter_479, full_int_array_1) + del parameter_479 + + # pd_op.add: (-1x384x1x1xf32) <- (-1x384x1x1xf32, 1x384x1x1xf32) + add_32 = paddle._C_ops.add(conv2d_61, reshape_2) + del conv2d_61, reshape_2 + + # pd_op.hardsigmoid: (-1x384x1x1xf32) <- (-1x384x1x1xf32) + hardsigmoid_2 = paddle._C_ops.hardsigmoid( + add_32, float("0.166667"), float("0.5") + ) + del add_32 + + # pd_op.multiply: (-1x384x64x64xf32) <- (-1x384x64x64xf32, -1x384x1x1xf32) + multiply_17 = paddle._C_ops.multiply(concat_4, hardsigmoid_2) + del concat_4, hardsigmoid_2 + + # pd_op.conv2d: (-1x512x64x64xf32) <- (-1x384x64x64xf32, 512x384x1x1xf32) + conv2d_62 = paddle._C_ops.conv2d( + multiply_17, parameter_478, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del multiply_17, parameter_478 + + # pd_op.batch_norm_: (-1x512x64x64xf32, 512xf32, 512xf32, 512xf32, 512xf32, -1xui8) <- (-1x512x64x64xf32, 512xf32, 512xf32, 512xf32, 512xf32) + ( + batch_norm__354, + batch_norm__355, + batch_norm__356, + batch_norm__357, + batch_norm__358, + batch_norm__359, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_62, + parameter_477, + parameter_476, + parameter_475, + parameter_474, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_62, parameter_474, parameter_475, parameter_476, parameter_477 + + # pd_op.swish: (-1x512x64x64xf32) <- (-1x512x64x64xf32) + swish_44 = paddle._C_ops.swish(batch_norm__354) + del batch_norm__354 + + # pd_op.conv2d: (-1x768x32x32xf32) <- (-1x512x64x64xf32, 768x512x3x3xf32) + conv2d_63 = paddle._C_ops.conv2d( + swish_44, parameter_473, [2, 2], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_473 + + # pd_op.batch_norm_: (-1x768x32x32xf32, 768xf32, 768xf32, 768xf32, 768xf32, -1xui8) <- (-1x768x32x32xf32, 768xf32, 768xf32, 768xf32, 768xf32) + ( + batch_norm__360, + batch_norm__361, + batch_norm__362, + batch_norm__363, + batch_norm__364, + batch_norm__365, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_63, + parameter_472, + parameter_471, + parameter_470, + parameter_469, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_63, parameter_469, parameter_470, parameter_471, parameter_472 + + # pd_op.swish: (-1x768x32x32xf32) <- (-1x768x32x32xf32) + swish_45 = paddle._C_ops.swish(batch_norm__360) + del batch_norm__360 + + # pd_op.conv2d: (-1x384x32x32xf32) <- (-1x768x32x32xf32, 384x768x1x1xf32) + conv2d_64 = paddle._C_ops.conv2d( + swish_45, parameter_468, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_468 + + # pd_op.batch_norm_: (-1x384x32x32xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (-1x384x32x32xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__366, + batch_norm__367, + batch_norm__368, + batch_norm__369, + batch_norm__370, + batch_norm__371, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_64, + parameter_467, + parameter_466, + parameter_465, + parameter_464, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_64, parameter_464, parameter_465, parameter_466, parameter_467 + + # pd_op.swish: (-1x384x32x32xf32) <- (-1x384x32x32xf32) + swish_46 = paddle._C_ops.swish(batch_norm__366) + del batch_norm__366 + + # pd_op.conv2d: (-1x384x32x32xf32) <- (-1x768x32x32xf32, 384x768x1x1xf32) + conv2d_65 = paddle._C_ops.conv2d( + swish_45, parameter_463, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_463, swish_45 + + # pd_op.batch_norm_: (-1x384x32x32xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (-1x384x32x32xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__372, + batch_norm__373, + batch_norm__374, + batch_norm__375, + batch_norm__376, + batch_norm__377, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_65, + parameter_462, + parameter_461, + parameter_460, + parameter_459, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_65, parameter_459, parameter_460, parameter_461, parameter_462 + + # pd_op.swish: (-1x384x32x32xf32) <- (-1x384x32x32xf32) + swish_47 = paddle._C_ops.swish(batch_norm__372) + del batch_norm__372 + + # pd_op.conv2d: (-1x384x32x32xf32) <- (-1x384x32x32xf32, 384x384x3x3xf32) + conv2d_66 = paddle._C_ops.conv2d( + swish_47, parameter_458, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_458 + + # pd_op.batch_norm_: (-1x384x32x32xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (-1x384x32x32xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__378, + batch_norm__379, + batch_norm__380, + batch_norm__381, + batch_norm__382, + batch_norm__383, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_66, + parameter_457, + parameter_456, + parameter_455, + parameter_454, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_66, parameter_454, parameter_455, parameter_456, parameter_457 + + # pd_op.swish: (-1x384x32x32xf32) <- (-1x384x32x32xf32) + swish_48 = paddle._C_ops.swish(batch_norm__378) + del batch_norm__378 + + # pd_op.conv2d: (-1x384x32x32xf32) <- (-1x384x32x32xf32, 384x384x3x3xf32) + conv2d_67 = paddle._C_ops.conv2d( + swish_48, parameter_453, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_453 + + # pd_op.batch_norm_: (-1x384x32x32xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (-1x384x32x32xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__384, + batch_norm__385, + batch_norm__386, + batch_norm__387, + batch_norm__388, + batch_norm__389, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_67, + parameter_452, + parameter_451, + parameter_450, + parameter_449, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_67, parameter_449, parameter_450, parameter_451, parameter_452 + + # pd_op.conv2d: (-1x384x32x32xf32) <- (-1x384x32x32xf32, 384x384x1x1xf32) + conv2d_68 = paddle._C_ops.conv2d( + swish_48, parameter_448, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_448, swish_48 + + # pd_op.batch_norm_: (-1x384x32x32xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (-1x384x32x32xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__390, + batch_norm__391, + batch_norm__392, + batch_norm__393, + batch_norm__394, + batch_norm__395, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_68, + parameter_447, + parameter_446, + parameter_445, + parameter_444, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_68, parameter_444, parameter_445, parameter_446, parameter_447 + + # pd_op.multiply: (-1x384x32x32xf32) <- (1xf32, -1x384x32x32xf32) + multiply_18 = paddle._C_ops.multiply(data_15, batch_norm__390) + del batch_norm__390, data_15 + + # pd_op.add: (-1x384x32x32xf32) <- (-1x384x32x32xf32, -1x384x32x32xf32) + add_33 = paddle._C_ops.add(batch_norm__384, multiply_18) + del batch_norm__384, multiply_18 + + # pd_op.swish: (-1x384x32x32xf32) <- (-1x384x32x32xf32) + swish_49 = paddle._C_ops.swish(add_33) + del add_33 + + # pd_op.add: (-1x384x32x32xf32) <- (-1x384x32x32xf32, -1x384x32x32xf32) + add_34 = paddle._C_ops.add(swish_47, swish_49) + del swish_47, swish_49 + + # pd_op.conv2d: (-1x384x32x32xf32) <- (-1x384x32x32xf32, 384x384x3x3xf32) + conv2d_69 = paddle._C_ops.conv2d( + add_34, parameter_443, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_443 + + # pd_op.batch_norm_: (-1x384x32x32xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (-1x384x32x32xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__396, + batch_norm__397, + batch_norm__398, + batch_norm__399, + batch_norm__400, + batch_norm__401, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_69, + parameter_442, + parameter_441, + parameter_440, + parameter_439, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_69, parameter_439, parameter_440, parameter_441, parameter_442 + + # pd_op.swish: (-1x384x32x32xf32) <- (-1x384x32x32xf32) + swish_50 = paddle._C_ops.swish(batch_norm__396) + del batch_norm__396 + + # pd_op.conv2d: (-1x384x32x32xf32) <- (-1x384x32x32xf32, 384x384x3x3xf32) + conv2d_70 = paddle._C_ops.conv2d( + swish_50, parameter_438, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_438 + + # pd_op.batch_norm_: (-1x384x32x32xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (-1x384x32x32xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__402, + batch_norm__403, + batch_norm__404, + batch_norm__405, + batch_norm__406, + batch_norm__407, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_70, + parameter_437, + parameter_436, + parameter_435, + parameter_434, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_70, parameter_434, parameter_435, parameter_436, parameter_437 + + # pd_op.conv2d: (-1x384x32x32xf32) <- (-1x384x32x32xf32, 384x384x1x1xf32) + conv2d_71 = paddle._C_ops.conv2d( + swish_50, parameter_433, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_433, swish_50 + + # pd_op.batch_norm_: (-1x384x32x32xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (-1x384x32x32xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__408, + batch_norm__409, + batch_norm__410, + batch_norm__411, + batch_norm__412, + batch_norm__413, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_71, + parameter_432, + parameter_431, + parameter_430, + parameter_429, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_71, parameter_429, parameter_430, parameter_431, parameter_432 + + # pd_op.multiply: (-1x384x32x32xf32) <- (1xf32, -1x384x32x32xf32) + multiply_19 = paddle._C_ops.multiply(data_16, batch_norm__408) + del batch_norm__408, data_16 + + # pd_op.add: (-1x384x32x32xf32) <- (-1x384x32x32xf32, -1x384x32x32xf32) + add_35 = paddle._C_ops.add(batch_norm__402, multiply_19) + del batch_norm__402, multiply_19 + + # pd_op.swish: (-1x384x32x32xf32) <- (-1x384x32x32xf32) + swish_51 = paddle._C_ops.swish(add_35) + del add_35 + + # pd_op.add: (-1x384x32x32xf32) <- (-1x384x32x32xf32, -1x384x32x32xf32) + add_36 = paddle._C_ops.add(add_34, swish_51) + del add_34, swish_51 + + # pd_op.conv2d: (-1x384x32x32xf32) <- (-1x384x32x32xf32, 384x384x3x3xf32) + conv2d_72 = paddle._C_ops.conv2d( + add_36, parameter_428, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_428 + + # pd_op.batch_norm_: (-1x384x32x32xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (-1x384x32x32xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__414, + batch_norm__415, + batch_norm__416, + batch_norm__417, + batch_norm__418, + batch_norm__419, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_72, + parameter_427, + parameter_426, + parameter_425, + parameter_424, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_72, parameter_424, parameter_425, parameter_426, parameter_427 + + # pd_op.swish: (-1x384x32x32xf32) <- (-1x384x32x32xf32) + swish_52 = paddle._C_ops.swish(batch_norm__414) + del batch_norm__414 + + # pd_op.conv2d: (-1x384x32x32xf32) <- (-1x384x32x32xf32, 384x384x3x3xf32) + conv2d_73 = paddle._C_ops.conv2d( + swish_52, parameter_423, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_423 + + # pd_op.batch_norm_: (-1x384x32x32xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (-1x384x32x32xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__420, + batch_norm__421, + batch_norm__422, + batch_norm__423, + batch_norm__424, + batch_norm__425, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_73, + parameter_422, + parameter_421, + parameter_420, + parameter_419, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_73, parameter_419, parameter_420, parameter_421, parameter_422 + + # pd_op.conv2d: (-1x384x32x32xf32) <- (-1x384x32x32xf32, 384x384x1x1xf32) + conv2d_74 = paddle._C_ops.conv2d( + swish_52, parameter_418, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_418, swish_52 + + # pd_op.batch_norm_: (-1x384x32x32xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (-1x384x32x32xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__426, + batch_norm__427, + batch_norm__428, + batch_norm__429, + batch_norm__430, + batch_norm__431, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_74, + parameter_417, + parameter_416, + parameter_415, + parameter_414, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_74, parameter_414, parameter_415, parameter_416, parameter_417 + + # pd_op.multiply: (-1x384x32x32xf32) <- (1xf32, -1x384x32x32xf32) + multiply_20 = paddle._C_ops.multiply(data_17, batch_norm__426) + del batch_norm__426, data_17 + + # pd_op.add: (-1x384x32x32xf32) <- (-1x384x32x32xf32, -1x384x32x32xf32) + add_37 = paddle._C_ops.add(batch_norm__420, multiply_20) + del batch_norm__420, multiply_20 + + # pd_op.swish: (-1x384x32x32xf32) <- (-1x384x32x32xf32) + swish_53 = paddle._C_ops.swish(add_37) + del add_37 + + # pd_op.add: (-1x384x32x32xf32) <- (-1x384x32x32xf32, -1x384x32x32xf32) + add_38 = paddle._C_ops.add(add_36, swish_53) + del add_36, swish_53 + + # builtin.combine: ([-1x384x32x32xf32, -1x384x32x32xf32]) <- (-1x384x32x32xf32, -1x384x32x32xf32) + combine_3 = [swish_46, add_38] + del add_38, swish_46 + + # pd_op.concat: (-1x768x32x32xf32) <- ([-1x384x32x32xf32, -1x384x32x32xf32], 1xi32) + concat_5 = paddle._C_ops.concat(combine_3, full_0) + del combine_3 + + # pd_op.mean: (-1x768x1x1xf32) <- (-1x768x32x32xf32, 2xi64) + mean_3 = paddle._C_ops.mean(concat_5, full_int_array_0, True) + + # pd_op.conv2d: (-1x768x1x1xf32) <- (-1x768x1x1xf32, 768x768x1x1xf32) + conv2d_75 = paddle._C_ops.conv2d( + mean_3, parameter_413, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del mean_3, parameter_413 + + # pd_op.reshape: (1x768x1x1xf32) <- (768xf32, 4xi64) + reshape_3 = paddle._C_ops.reshape(parameter_412, full_int_array_1) + del parameter_412 + + # pd_op.add: (-1x768x1x1xf32) <- (-1x768x1x1xf32, 1x768x1x1xf32) + add_39 = paddle._C_ops.add(conv2d_75, reshape_3) + del conv2d_75, reshape_3 + + # pd_op.hardsigmoid: (-1x768x1x1xf32) <- (-1x768x1x1xf32) + hardsigmoid_3 = paddle._C_ops.hardsigmoid( + add_39, float("0.166667"), float("0.5") + ) + del add_39 + + # pd_op.multiply: (-1x768x32x32xf32) <- (-1x768x32x32xf32, -1x768x1x1xf32) + multiply_21 = paddle._C_ops.multiply(concat_5, hardsigmoid_3) + del concat_5, hardsigmoid_3 + + # pd_op.conv2d: (-1x1024x32x32xf32) <- (-1x768x32x32xf32, 1024x768x1x1xf32) + conv2d_76 = paddle._C_ops.conv2d( + multiply_21, parameter_411, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del multiply_21, parameter_411 + + # pd_op.batch_norm_: (-1x1024x32x32xf32, 1024xf32, 1024xf32, 1024xf32, 1024xf32, -1xui8) <- (-1x1024x32x32xf32, 1024xf32, 1024xf32, 1024xf32, 1024xf32) + ( + batch_norm__432, + batch_norm__433, + batch_norm__434, + batch_norm__435, + batch_norm__436, + batch_norm__437, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_76, + parameter_410, + parameter_409, + parameter_408, + parameter_407, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_76, parameter_407, parameter_408, parameter_409, parameter_410 + + # pd_op.swish: (-1x1024x32x32xf32) <- (-1x1024x32x32xf32) + swish_54 = paddle._C_ops.swish(batch_norm__432) + del batch_norm__432 + + # pd_op.conv2d: (-1x384x32x32xf32) <- (-1x1024x32x32xf32, 384x1024x1x1xf32) + conv2d_77 = paddle._C_ops.conv2d( + swish_54, parameter_406, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_406 + + # pd_op.batch_norm_: (-1x384x32x32xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (-1x384x32x32xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__438, + batch_norm__439, + batch_norm__440, + batch_norm__441, + batch_norm__442, + batch_norm__443, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_77, + parameter_405, + parameter_404, + parameter_403, + parameter_402, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_77, parameter_402, parameter_403, parameter_404, parameter_405 + + # pd_op.swish: (-1x384x32x32xf32) <- (-1x384x32x32xf32) + swish_55 = paddle._C_ops.swish(batch_norm__438) + del batch_norm__438 + + # pd_op.conv2d: (-1x384x32x32xf32) <- (-1x1024x32x32xf32, 384x1024x1x1xf32) + conv2d_78 = paddle._C_ops.conv2d( + swish_54, parameter_401, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_401, swish_54 + + # pd_op.batch_norm_: (-1x384x32x32xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (-1x384x32x32xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__444, + batch_norm__445, + batch_norm__446, + batch_norm__447, + batch_norm__448, + batch_norm__449, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_78, + parameter_400, + parameter_399, + parameter_398, + parameter_397, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_78, parameter_397, parameter_398, parameter_399, parameter_400 + + # pd_op.swish: (-1x384x32x32xf32) <- (-1x384x32x32xf32) + swish_56 = paddle._C_ops.swish(batch_norm__444) + del batch_norm__444 + + # pd_op.conv2d: (-1x384x32x32xf32) <- (-1x384x32x32xf32, 384x384x3x3xf32) + conv2d_79 = paddle._C_ops.conv2d( + swish_56, parameter_396, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_396, swish_56 + + # pd_op.batch_norm_: (-1x384x32x32xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (-1x384x32x32xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__450, + batch_norm__451, + batch_norm__452, + batch_norm__453, + batch_norm__454, + batch_norm__455, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_79, + parameter_395, + parameter_394, + parameter_393, + parameter_392, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_79, parameter_392, parameter_393, parameter_394, parameter_395 + + # pd_op.swish: (-1x384x32x32xf32) <- (-1x384x32x32xf32) + swish_57 = paddle._C_ops.swish(batch_norm__450) + del batch_norm__450 + + # pd_op.conv2d: (-1x384x32x32xf32) <- (-1x384x32x32xf32, 384x384x3x3xf32) + conv2d_80 = paddle._C_ops.conv2d( + swish_57, parameter_391, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_391 + + # pd_op.batch_norm_: (-1x384x32x32xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (-1x384x32x32xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__456, + batch_norm__457, + batch_norm__458, + batch_norm__459, + batch_norm__460, + batch_norm__461, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_80, + parameter_390, + parameter_389, + parameter_388, + parameter_387, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_80, parameter_387, parameter_388, parameter_389, parameter_390 + + # pd_op.conv2d: (-1x384x32x32xf32) <- (-1x384x32x32xf32, 384x384x1x1xf32) + conv2d_81 = paddle._C_ops.conv2d( + swish_57, parameter_386, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_386, swish_57 + + # pd_op.batch_norm_: (-1x384x32x32xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (-1x384x32x32xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__462, + batch_norm__463, + batch_norm__464, + batch_norm__465, + batch_norm__466, + batch_norm__467, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_81, + parameter_385, + parameter_384, + parameter_383, + parameter_382, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_81, parameter_382, parameter_383, parameter_384, parameter_385 + + # pd_op.multiply: (-1x384x32x32xf32) <- (1xf32, -1x384x32x32xf32) + multiply_22 = paddle._C_ops.multiply(data_18, batch_norm__462) + del batch_norm__462, data_18 + + # pd_op.add: (-1x384x32x32xf32) <- (-1x384x32x32xf32, -1x384x32x32xf32) + add_40 = paddle._C_ops.add(batch_norm__456, multiply_22) + del batch_norm__456, multiply_22 + + # pd_op.swish: (-1x384x32x32xf32) <- (-1x384x32x32xf32) + swish_58 = paddle._C_ops.swish(add_40) + del add_40 + + # pd_op.conv2d: (-1x384x32x32xf32) <- (-1x384x32x32xf32, 384x384x3x3xf32) + conv2d_82 = paddle._C_ops.conv2d( + swish_58, parameter_381, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_381, swish_58 + + # pd_op.batch_norm_: (-1x384x32x32xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (-1x384x32x32xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__468, + batch_norm__469, + batch_norm__470, + batch_norm__471, + batch_norm__472, + batch_norm__473, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_82, + parameter_380, + parameter_379, + parameter_378, + parameter_377, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_82, parameter_377, parameter_378, parameter_379, parameter_380 + + # pd_op.swish: (-1x384x32x32xf32) <- (-1x384x32x32xf32) + swish_59 = paddle._C_ops.swish(batch_norm__468) + del batch_norm__468 + + # pd_op.conv2d: (-1x384x32x32xf32) <- (-1x384x32x32xf32, 384x384x3x3xf32) + conv2d_83 = paddle._C_ops.conv2d( + swish_59, parameter_376, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_376 + + # pd_op.batch_norm_: (-1x384x32x32xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (-1x384x32x32xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__474, + batch_norm__475, + batch_norm__476, + batch_norm__477, + batch_norm__478, + batch_norm__479, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_83, + parameter_375, + parameter_374, + parameter_373, + parameter_372, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_83, parameter_372, parameter_373, parameter_374, parameter_375 + + # pd_op.conv2d: (-1x384x32x32xf32) <- (-1x384x32x32xf32, 384x384x1x1xf32) + conv2d_84 = paddle._C_ops.conv2d( + swish_59, parameter_371, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_371, swish_59 + + # pd_op.batch_norm_: (-1x384x32x32xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (-1x384x32x32xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__480, + batch_norm__481, + batch_norm__482, + batch_norm__483, + batch_norm__484, + batch_norm__485, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_84, + parameter_370, + parameter_369, + parameter_368, + parameter_367, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_84, parameter_367, parameter_368, parameter_369, parameter_370 + + # pd_op.multiply: (-1x384x32x32xf32) <- (1xf32, -1x384x32x32xf32) + multiply_23 = paddle._C_ops.multiply(data_19, batch_norm__480) + del batch_norm__480, data_19 + + # pd_op.add: (-1x384x32x32xf32) <- (-1x384x32x32xf32, -1x384x32x32xf32) + add_41 = paddle._C_ops.add(batch_norm__474, multiply_23) + del batch_norm__474, multiply_23 + + # pd_op.swish: (-1x384x32x32xf32) <- (-1x384x32x32xf32) + swish_60 = paddle._C_ops.swish(add_41) + del add_41 + + # pd_op.full_int_array: (2xi64) <- () + full_int_array_2 = [5, 5] + + # pd_op.pool2d: (-1x384x32x32xf32) <- (-1x384x32x32xf32, 2xi64) + pool2d_0 = paddle._C_ops.pool2d( + swish_60, + full_int_array_2, + [1, 1], + [2, 2], + False, + True, + "NCHW", + "max", + False, + False, + "EXPLICIT", + ) + del full_int_array_2 + + # pd_op.full_int_array: (2xi64) <- () + full_int_array_3 = [9, 9] + + # pd_op.pool2d: (-1x384x32x32xf32) <- (-1x384x32x32xf32, 2xi64) + pool2d_1 = paddle._C_ops.pool2d( + swish_60, + full_int_array_3, + [1, 1], + [4, 4], + False, + True, + "NCHW", + "max", + False, + False, + "EXPLICIT", + ) + del full_int_array_3 + + # pd_op.full_int_array: (2xi64) <- () + full_int_array_4 = [13, 13] + + # pd_op.pool2d: (-1x384x32x32xf32) <- (-1x384x32x32xf32, 2xi64) + pool2d_2 = paddle._C_ops.pool2d( + swish_60, + full_int_array_4, + [1, 1], + [6, 6], + False, + True, + "NCHW", + "max", + False, + False, + "EXPLICIT", + ) + del full_int_array_4 + + # builtin.combine: ([-1x384x32x32xf32, -1x384x32x32xf32, -1x384x32x32xf32, -1x384x32x32xf32]) <- (-1x384x32x32xf32, -1x384x32x32xf32, -1x384x32x32xf32, -1x384x32x32xf32) + combine_4 = [swish_60, pool2d_0, pool2d_1, pool2d_2] + del pool2d_0, pool2d_1, pool2d_2, swish_60 + + # pd_op.concat: (-1x1536x32x32xf32) <- ([-1x384x32x32xf32, -1x384x32x32xf32, -1x384x32x32xf32, -1x384x32x32xf32], 1xi32) + concat_6 = paddle._C_ops.concat(combine_4, full_0) + del combine_4 + + # pd_op.conv2d: (-1x384x32x32xf32) <- (-1x1536x32x32xf32, 384x1536x1x1xf32) + conv2d_85 = paddle._C_ops.conv2d( + concat_6, parameter_366, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del concat_6, parameter_366 + + # pd_op.batch_norm_: (-1x384x32x32xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (-1x384x32x32xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__486, + batch_norm__487, + batch_norm__488, + batch_norm__489, + batch_norm__490, + batch_norm__491, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_85, + parameter_365, + parameter_364, + parameter_363, + parameter_362, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_85, parameter_362, parameter_363, parameter_364, parameter_365 + + # pd_op.swish: (-1x384x32x32xf32) <- (-1x384x32x32xf32) + swish_61 = paddle._C_ops.swish(batch_norm__486) + del batch_norm__486 + + # pd_op.conv2d: (-1x384x32x32xf32) <- (-1x384x32x32xf32, 384x384x3x3xf32) + conv2d_86 = paddle._C_ops.conv2d( + swish_61, parameter_361, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_361, swish_61 + + # pd_op.batch_norm_: (-1x384x32x32xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (-1x384x32x32xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__492, + batch_norm__493, + batch_norm__494, + batch_norm__495, + batch_norm__496, + batch_norm__497, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_86, + parameter_360, + parameter_359, + parameter_358, + parameter_357, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_86, parameter_357, parameter_358, parameter_359, parameter_360 + + # pd_op.swish: (-1x384x32x32xf32) <- (-1x384x32x32xf32) + swish_62 = paddle._C_ops.swish(batch_norm__492) + del batch_norm__492 + + # pd_op.conv2d: (-1x384x32x32xf32) <- (-1x384x32x32xf32, 384x384x3x3xf32) + conv2d_87 = paddle._C_ops.conv2d( + swish_62, parameter_356, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_356 + + # pd_op.batch_norm_: (-1x384x32x32xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (-1x384x32x32xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__498, + batch_norm__499, + batch_norm__500, + batch_norm__501, + batch_norm__502, + batch_norm__503, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_87, + parameter_355, + parameter_354, + parameter_353, + parameter_352, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_87, parameter_352, parameter_353, parameter_354, parameter_355 + + # pd_op.conv2d: (-1x384x32x32xf32) <- (-1x384x32x32xf32, 384x384x1x1xf32) + conv2d_88 = paddle._C_ops.conv2d( + swish_62, parameter_351, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_351, swish_62 + + # pd_op.batch_norm_: (-1x384x32x32xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (-1x384x32x32xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__504, + batch_norm__505, + batch_norm__506, + batch_norm__507, + batch_norm__508, + batch_norm__509, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_88, + parameter_350, + parameter_349, + parameter_348, + parameter_347, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_88, parameter_347, parameter_348, parameter_349, parameter_350 + + # pd_op.multiply: (-1x384x32x32xf32) <- (1xf32, -1x384x32x32xf32) + multiply_24 = paddle._C_ops.multiply(data_20, batch_norm__504) + del batch_norm__504, data_20 + + # pd_op.add: (-1x384x32x32xf32) <- (-1x384x32x32xf32, -1x384x32x32xf32) + add_42 = paddle._C_ops.add(batch_norm__498, multiply_24) + del batch_norm__498, multiply_24 + + # pd_op.swish: (-1x384x32x32xf32) <- (-1x384x32x32xf32) + swish_63 = paddle._C_ops.swish(add_42) + del add_42 + + # builtin.combine: ([-1x384x32x32xf32, -1x384x32x32xf32]) <- (-1x384x32x32xf32, -1x384x32x32xf32) + combine_5 = [swish_55, swish_63] + del swish_55, swish_63 + + # pd_op.concat: (-1x768x32x32xf32) <- ([-1x384x32x32xf32, -1x384x32x32xf32], 1xi32) + concat_7 = paddle._C_ops.concat(combine_5, full_0) + del combine_5 + + # pd_op.conv2d: (-1x768x32x32xf32) <- (-1x768x32x32xf32, 768x768x1x1xf32) + conv2d_89 = paddle._C_ops.conv2d( + concat_7, parameter_346, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del concat_7, parameter_346 + + # pd_op.batch_norm_: (-1x768x32x32xf32, 768xf32, 768xf32, 768xf32, 768xf32, -1xui8) <- (-1x768x32x32xf32, 768xf32, 768xf32, 768xf32, 768xf32) + ( + batch_norm__510, + batch_norm__511, + batch_norm__512, + batch_norm__513, + batch_norm__514, + batch_norm__515, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_89, + parameter_345, + parameter_344, + parameter_343, + parameter_342, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_89, parameter_342, parameter_343, parameter_344, parameter_345 + + # pd_op.swish: (-1x768x32x32xf32) <- (-1x768x32x32xf32) + swish_64 = paddle._C_ops.swish(batch_norm__510) + del batch_norm__510 + + # pd_op.conv2d: (-1x384x32x32xf32) <- (-1x768x32x32xf32, 384x768x1x1xf32) + conv2d_90 = paddle._C_ops.conv2d( + swish_64, parameter_341, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_341 + + # pd_op.batch_norm_: (-1x384x32x32xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (-1x384x32x32xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__516, + batch_norm__517, + batch_norm__518, + batch_norm__519, + batch_norm__520, + batch_norm__521, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_90, + parameter_340, + parameter_339, + parameter_338, + parameter_337, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_90, parameter_337, parameter_338, parameter_339, parameter_340 + + # pd_op.swish: (-1x384x32x32xf32) <- (-1x384x32x32xf32) + swish_65 = paddle._C_ops.swish(batch_norm__516) + del batch_norm__516 + + # pd_op.nearest_interp: (-1x384x64x64xf32) <- (-1x384x32x32xf32, None, None, None) + nearest_interp_0 = paddle._C_ops.nearest_interp( + swish_65, + None, + None, + None, + "NCHW", + -1, + -1, + -1, + [float("2"), float("2")], + "nearest", + False, + 0, + ) + del swish_65 + + # builtin.combine: ([-1x384x64x64xf32, -1x512x64x64xf32]) <- (-1x384x64x64xf32, -1x512x64x64xf32) + combine_6 = [nearest_interp_0, swish_44] + del nearest_interp_0, swish_44 + + # pd_op.concat: (-1x896x64x64xf32) <- ([-1x384x64x64xf32, -1x512x64x64xf32], 1xi32) + concat_8 = paddle._C_ops.concat(combine_6, full_0) + del combine_6 + + # pd_op.conv2d: (-1x192x64x64xf32) <- (-1x896x64x64xf32, 192x896x1x1xf32) + conv2d_91 = paddle._C_ops.conv2d( + concat_8, parameter_336, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_336 + + # pd_op.batch_norm_: (-1x192x64x64xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (-1x192x64x64xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__522, + batch_norm__523, + batch_norm__524, + batch_norm__525, + batch_norm__526, + batch_norm__527, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_91, + parameter_335, + parameter_334, + parameter_333, + parameter_332, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_91, parameter_332, parameter_333, parameter_334, parameter_335 + + # pd_op.swish: (-1x192x64x64xf32) <- (-1x192x64x64xf32) + swish_66 = paddle._C_ops.swish(batch_norm__522) + del batch_norm__522 + + # pd_op.conv2d: (-1x192x64x64xf32) <- (-1x896x64x64xf32, 192x896x1x1xf32) + conv2d_92 = paddle._C_ops.conv2d( + concat_8, parameter_331, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del concat_8, parameter_331 + + # pd_op.batch_norm_: (-1x192x64x64xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (-1x192x64x64xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__528, + batch_norm__529, + batch_norm__530, + batch_norm__531, + batch_norm__532, + batch_norm__533, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_92, + parameter_330, + parameter_329, + parameter_328, + parameter_327, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_92, parameter_327, parameter_328, parameter_329, parameter_330 + + # pd_op.swish: (-1x192x64x64xf32) <- (-1x192x64x64xf32) + swish_67 = paddle._C_ops.swish(batch_norm__528) + del batch_norm__528 + + # pd_op.conv2d: (-1x192x64x64xf32) <- (-1x192x64x64xf32, 192x192x3x3xf32) + conv2d_93 = paddle._C_ops.conv2d( + swish_67, parameter_326, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_326, swish_67 + + # pd_op.batch_norm_: (-1x192x64x64xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (-1x192x64x64xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__534, + batch_norm__535, + batch_norm__536, + batch_norm__537, + batch_norm__538, + batch_norm__539, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_93, + parameter_325, + parameter_324, + parameter_323, + parameter_322, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_93, parameter_322, parameter_323, parameter_324, parameter_325 + + # pd_op.swish: (-1x192x64x64xf32) <- (-1x192x64x64xf32) + swish_68 = paddle._C_ops.swish(batch_norm__534) + del batch_norm__534 + + # pd_op.conv2d: (-1x192x64x64xf32) <- (-1x192x64x64xf32, 192x192x3x3xf32) + conv2d_94 = paddle._C_ops.conv2d( + swish_68, parameter_321, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_321 + + # pd_op.batch_norm_: (-1x192x64x64xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (-1x192x64x64xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__540, + batch_norm__541, + batch_norm__542, + batch_norm__543, + batch_norm__544, + batch_norm__545, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_94, + parameter_320, + parameter_319, + parameter_318, + parameter_317, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_94, parameter_317, parameter_318, parameter_319, parameter_320 + + # pd_op.conv2d: (-1x192x64x64xf32) <- (-1x192x64x64xf32, 192x192x1x1xf32) + conv2d_95 = paddle._C_ops.conv2d( + swish_68, parameter_316, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_316, swish_68 + + # pd_op.batch_norm_: (-1x192x64x64xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (-1x192x64x64xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__546, + batch_norm__547, + batch_norm__548, + batch_norm__549, + batch_norm__550, + batch_norm__551, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_95, + parameter_315, + parameter_314, + parameter_313, + parameter_312, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_95, parameter_312, parameter_313, parameter_314, parameter_315 + + # pd_op.multiply: (-1x192x64x64xf32) <- (1xf32, -1x192x64x64xf32) + multiply_25 = paddle._C_ops.multiply(data_21, batch_norm__546) + del batch_norm__546, data_21 + + # pd_op.add: (-1x192x64x64xf32) <- (-1x192x64x64xf32, -1x192x64x64xf32) + add_43 = paddle._C_ops.add(batch_norm__540, multiply_25) + del batch_norm__540, multiply_25 + + # pd_op.swish: (-1x192x64x64xf32) <- (-1x192x64x64xf32) + swish_69 = paddle._C_ops.swish(add_43) + del add_43 + + # pd_op.conv2d: (-1x192x64x64xf32) <- (-1x192x64x64xf32, 192x192x3x3xf32) + conv2d_96 = paddle._C_ops.conv2d( + swish_69, parameter_311, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_311, swish_69 + + # pd_op.batch_norm_: (-1x192x64x64xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (-1x192x64x64xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__552, + batch_norm__553, + batch_norm__554, + batch_norm__555, + batch_norm__556, + batch_norm__557, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_96, + parameter_310, + parameter_309, + parameter_308, + parameter_307, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_96, parameter_307, parameter_308, parameter_309, parameter_310 + + # pd_op.swish: (-1x192x64x64xf32) <- (-1x192x64x64xf32) + swish_70 = paddle._C_ops.swish(batch_norm__552) + del batch_norm__552 + + # pd_op.conv2d: (-1x192x64x64xf32) <- (-1x192x64x64xf32, 192x192x3x3xf32) + conv2d_97 = paddle._C_ops.conv2d( + swish_70, parameter_306, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_306 + + # pd_op.batch_norm_: (-1x192x64x64xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (-1x192x64x64xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__558, + batch_norm__559, + batch_norm__560, + batch_norm__561, + batch_norm__562, + batch_norm__563, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_97, + parameter_305, + parameter_304, + parameter_303, + parameter_302, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_97, parameter_302, parameter_303, parameter_304, parameter_305 + + # pd_op.conv2d: (-1x192x64x64xf32) <- (-1x192x64x64xf32, 192x192x1x1xf32) + conv2d_98 = paddle._C_ops.conv2d( + swish_70, parameter_301, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_301, swish_70 + + # pd_op.batch_norm_: (-1x192x64x64xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (-1x192x64x64xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__564, + batch_norm__565, + batch_norm__566, + batch_norm__567, + batch_norm__568, + batch_norm__569, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_98, + parameter_300, + parameter_299, + parameter_298, + parameter_297, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_98, parameter_297, parameter_298, parameter_299, parameter_300 + + # pd_op.multiply: (-1x192x64x64xf32) <- (1xf32, -1x192x64x64xf32) + multiply_26 = paddle._C_ops.multiply(data_22, batch_norm__564) + del batch_norm__564, data_22 + + # pd_op.add: (-1x192x64x64xf32) <- (-1x192x64x64xf32, -1x192x64x64xf32) + add_44 = paddle._C_ops.add(batch_norm__558, multiply_26) + del batch_norm__558, multiply_26 + + # pd_op.swish: (-1x192x64x64xf32) <- (-1x192x64x64xf32) + swish_71 = paddle._C_ops.swish(add_44) + del add_44 + + # pd_op.conv2d: (-1x192x64x64xf32) <- (-1x192x64x64xf32, 192x192x3x3xf32) + conv2d_99 = paddle._C_ops.conv2d( + swish_71, parameter_296, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_296, swish_71 + + # pd_op.batch_norm_: (-1x192x64x64xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (-1x192x64x64xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__570, + batch_norm__571, + batch_norm__572, + batch_norm__573, + batch_norm__574, + batch_norm__575, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_99, + parameter_295, + parameter_294, + parameter_293, + parameter_292, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_99, parameter_292, parameter_293, parameter_294, parameter_295 + + # pd_op.swish: (-1x192x64x64xf32) <- (-1x192x64x64xf32) + swish_72 = paddle._C_ops.swish(batch_norm__570) + del batch_norm__570 + + # pd_op.conv2d: (-1x192x64x64xf32) <- (-1x192x64x64xf32, 192x192x3x3xf32) + conv2d_100 = paddle._C_ops.conv2d( + swish_72, parameter_291, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_291 + + # pd_op.batch_norm_: (-1x192x64x64xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (-1x192x64x64xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__576, + batch_norm__577, + batch_norm__578, + batch_norm__579, + batch_norm__580, + batch_norm__581, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_100, + parameter_290, + parameter_289, + parameter_288, + parameter_287, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_100, parameter_287, parameter_288, parameter_289, parameter_290 + + # pd_op.conv2d: (-1x192x64x64xf32) <- (-1x192x64x64xf32, 192x192x1x1xf32) + conv2d_101 = paddle._C_ops.conv2d( + swish_72, parameter_286, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_286, swish_72 + + # pd_op.batch_norm_: (-1x192x64x64xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (-1x192x64x64xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__582, + batch_norm__583, + batch_norm__584, + batch_norm__585, + batch_norm__586, + batch_norm__587, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_101, + parameter_285, + parameter_284, + parameter_283, + parameter_282, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_101, parameter_282, parameter_283, parameter_284, parameter_285 + + # pd_op.multiply: (-1x192x64x64xf32) <- (1xf32, -1x192x64x64xf32) + multiply_27 = paddle._C_ops.multiply(data_23, batch_norm__582) + del batch_norm__582, data_23 + + # pd_op.add: (-1x192x64x64xf32) <- (-1x192x64x64xf32, -1x192x64x64xf32) + add_45 = paddle._C_ops.add(batch_norm__576, multiply_27) + del batch_norm__576, multiply_27 + + # pd_op.swish: (-1x192x64x64xf32) <- (-1x192x64x64xf32) + swish_73 = paddle._C_ops.swish(add_45) + del add_45 + + # builtin.combine: ([-1x192x64x64xf32, -1x192x64x64xf32]) <- (-1x192x64x64xf32, -1x192x64x64xf32) + combine_7 = [swish_66, swish_73] + del swish_66, swish_73 + + # pd_op.concat: (-1x384x64x64xf32) <- ([-1x192x64x64xf32, -1x192x64x64xf32], 1xi32) + concat_9 = paddle._C_ops.concat(combine_7, full_0) + del combine_7 + + # pd_op.conv2d: (-1x384x64x64xf32) <- (-1x384x64x64xf32, 384x384x1x1xf32) + conv2d_102 = paddle._C_ops.conv2d( + concat_9, parameter_281, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del concat_9, parameter_281 + + # pd_op.batch_norm_: (-1x384x64x64xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (-1x384x64x64xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__588, + batch_norm__589, + batch_norm__590, + batch_norm__591, + batch_norm__592, + batch_norm__593, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_102, + parameter_280, + parameter_279, + parameter_278, + parameter_277, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_102, parameter_277, parameter_278, parameter_279, parameter_280 + + # pd_op.swish: (-1x384x64x64xf32) <- (-1x384x64x64xf32) + swish_74 = paddle._C_ops.swish(batch_norm__588) + del batch_norm__588 + + # pd_op.conv2d: (-1x192x64x64xf32) <- (-1x384x64x64xf32, 192x384x1x1xf32) + conv2d_103 = paddle._C_ops.conv2d( + swish_74, parameter_276, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_276 + + # pd_op.batch_norm_: (-1x192x64x64xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (-1x192x64x64xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__594, + batch_norm__595, + batch_norm__596, + batch_norm__597, + batch_norm__598, + batch_norm__599, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_103, + parameter_275, + parameter_274, + parameter_273, + parameter_272, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_103, parameter_272, parameter_273, parameter_274, parameter_275 + + # pd_op.swish: (-1x192x64x64xf32) <- (-1x192x64x64xf32) + swish_75 = paddle._C_ops.swish(batch_norm__594) + del batch_norm__594 + + # pd_op.nearest_interp: (-1x192x128x128xf32) <- (-1x192x64x64xf32, None, None, None) + nearest_interp_1 = paddle._C_ops.nearest_interp( + swish_75, + None, + None, + None, + "NCHW", + -1, + -1, + -1, + [float("2"), float("2")], + "nearest", + False, + 0, + ) + del swish_75 + + # builtin.combine: ([-1x192x128x128xf32, -1x256x128x128xf32]) <- (-1x192x128x128xf32, -1x256x128x128xf32) + combine_8 = [nearest_interp_1, swish_28] + del nearest_interp_1, swish_28 + + # pd_op.concat: (-1x448x128x128xf32) <- ([-1x192x128x128xf32, -1x256x128x128xf32], 1xi32) + concat_10 = paddle._C_ops.concat(combine_8, full_0) + del combine_8 + + # pd_op.conv2d: (-1x96x128x128xf32) <- (-1x448x128x128xf32, 96x448x1x1xf32) + conv2d_104 = paddle._C_ops.conv2d( + concat_10, parameter_271, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_271 + + # pd_op.batch_norm_: (-1x96x128x128xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (-1x96x128x128xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__600, + batch_norm__601, + batch_norm__602, + batch_norm__603, + batch_norm__604, + batch_norm__605, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_104, + parameter_270, + parameter_269, + parameter_268, + parameter_267, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_104, parameter_267, parameter_268, parameter_269, parameter_270 + + # pd_op.swish: (-1x96x128x128xf32) <- (-1x96x128x128xf32) + swish_76 = paddle._C_ops.swish(batch_norm__600) + del batch_norm__600 + + # pd_op.conv2d: (-1x96x128x128xf32) <- (-1x448x128x128xf32, 96x448x1x1xf32) + conv2d_105 = paddle._C_ops.conv2d( + concat_10, parameter_266, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del concat_10, parameter_266 + + # pd_op.batch_norm_: (-1x96x128x128xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (-1x96x128x128xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__606, + batch_norm__607, + batch_norm__608, + batch_norm__609, + batch_norm__610, + batch_norm__611, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_105, + parameter_265, + parameter_264, + parameter_263, + parameter_262, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_105, parameter_262, parameter_263, parameter_264, parameter_265 + + # pd_op.swish: (-1x96x128x128xf32) <- (-1x96x128x128xf32) + swish_77 = paddle._C_ops.swish(batch_norm__606) + del batch_norm__606 + + # pd_op.conv2d: (-1x96x128x128xf32) <- (-1x96x128x128xf32, 96x96x3x3xf32) + conv2d_106 = paddle._C_ops.conv2d( + swish_77, parameter_261, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_261, swish_77 + + # pd_op.batch_norm_: (-1x96x128x128xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (-1x96x128x128xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__612, + batch_norm__613, + batch_norm__614, + batch_norm__615, + batch_norm__616, + batch_norm__617, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_106, + parameter_260, + parameter_259, + parameter_258, + parameter_257, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_106, parameter_257, parameter_258, parameter_259, parameter_260 + + # pd_op.swish: (-1x96x128x128xf32) <- (-1x96x128x128xf32) + swish_78 = paddle._C_ops.swish(batch_norm__612) + del batch_norm__612 + + # pd_op.conv2d: (-1x96x128x128xf32) <- (-1x96x128x128xf32, 96x96x3x3xf32) + conv2d_107 = paddle._C_ops.conv2d( + swish_78, parameter_256, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_256 + + # pd_op.batch_norm_: (-1x96x128x128xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (-1x96x128x128xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__618, + batch_norm__619, + batch_norm__620, + batch_norm__621, + batch_norm__622, + batch_norm__623, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_107, + parameter_255, + parameter_254, + parameter_253, + parameter_252, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_107, parameter_252, parameter_253, parameter_254, parameter_255 + + # pd_op.conv2d: (-1x96x128x128xf32) <- (-1x96x128x128xf32, 96x96x1x1xf32) + conv2d_108 = paddle._C_ops.conv2d( + swish_78, parameter_251, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_251, swish_78 + + # pd_op.batch_norm_: (-1x96x128x128xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (-1x96x128x128xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__624, + batch_norm__625, + batch_norm__626, + batch_norm__627, + batch_norm__628, + batch_norm__629, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_108, + parameter_250, + parameter_249, + parameter_248, + parameter_247, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_108, parameter_247, parameter_248, parameter_249, parameter_250 + + # pd_op.multiply: (-1x96x128x128xf32) <- (1xf32, -1x96x128x128xf32) + multiply_28 = paddle._C_ops.multiply(data_24, batch_norm__624) + del batch_norm__624, data_24 + + # pd_op.add: (-1x96x128x128xf32) <- (-1x96x128x128xf32, -1x96x128x128xf32) + add_46 = paddle._C_ops.add(batch_norm__618, multiply_28) + del batch_norm__618, multiply_28 + + # pd_op.swish: (-1x96x128x128xf32) <- (-1x96x128x128xf32) + swish_79 = paddle._C_ops.swish(add_46) + del add_46 + + # pd_op.conv2d: (-1x96x128x128xf32) <- (-1x96x128x128xf32, 96x96x3x3xf32) + conv2d_109 = paddle._C_ops.conv2d( + swish_79, parameter_246, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_246, swish_79 + + # pd_op.batch_norm_: (-1x96x128x128xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (-1x96x128x128xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__630, + batch_norm__631, + batch_norm__632, + batch_norm__633, + batch_norm__634, + batch_norm__635, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_109, + parameter_245, + parameter_244, + parameter_243, + parameter_242, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_109, parameter_242, parameter_243, parameter_244, parameter_245 + + # pd_op.swish: (-1x96x128x128xf32) <- (-1x96x128x128xf32) + swish_80 = paddle._C_ops.swish(batch_norm__630) + del batch_norm__630 + + # pd_op.conv2d: (-1x96x128x128xf32) <- (-1x96x128x128xf32, 96x96x3x3xf32) + conv2d_110 = paddle._C_ops.conv2d( + swish_80, parameter_241, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_241 + + # pd_op.batch_norm_: (-1x96x128x128xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (-1x96x128x128xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__636, + batch_norm__637, + batch_norm__638, + batch_norm__639, + batch_norm__640, + batch_norm__641, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_110, + parameter_240, + parameter_239, + parameter_238, + parameter_237, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_110, parameter_237, parameter_238, parameter_239, parameter_240 + + # pd_op.conv2d: (-1x96x128x128xf32) <- (-1x96x128x128xf32, 96x96x1x1xf32) + conv2d_111 = paddle._C_ops.conv2d( + swish_80, parameter_236, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_236, swish_80 + + # pd_op.batch_norm_: (-1x96x128x128xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (-1x96x128x128xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__642, + batch_norm__643, + batch_norm__644, + batch_norm__645, + batch_norm__646, + batch_norm__647, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_111, + parameter_235, + parameter_234, + parameter_233, + parameter_232, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_111, parameter_232, parameter_233, parameter_234, parameter_235 + + # pd_op.multiply: (-1x96x128x128xf32) <- (1xf32, -1x96x128x128xf32) + multiply_29 = paddle._C_ops.multiply(data_25, batch_norm__642) + del batch_norm__642, data_25 + + # pd_op.add: (-1x96x128x128xf32) <- (-1x96x128x128xf32, -1x96x128x128xf32) + add_47 = paddle._C_ops.add(batch_norm__636, multiply_29) + del batch_norm__636, multiply_29 + + # pd_op.swish: (-1x96x128x128xf32) <- (-1x96x128x128xf32) + swish_81 = paddle._C_ops.swish(add_47) + del add_47 + + # pd_op.conv2d: (-1x96x128x128xf32) <- (-1x96x128x128xf32, 96x96x3x3xf32) + conv2d_112 = paddle._C_ops.conv2d( + swish_81, parameter_231, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_231, swish_81 + + # pd_op.batch_norm_: (-1x96x128x128xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (-1x96x128x128xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__648, + batch_norm__649, + batch_norm__650, + batch_norm__651, + batch_norm__652, + batch_norm__653, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_112, + parameter_230, + parameter_229, + parameter_228, + parameter_227, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_112, parameter_227, parameter_228, parameter_229, parameter_230 + + # pd_op.swish: (-1x96x128x128xf32) <- (-1x96x128x128xf32) + swish_82 = paddle._C_ops.swish(batch_norm__648) + del batch_norm__648 + + # pd_op.conv2d: (-1x96x128x128xf32) <- (-1x96x128x128xf32, 96x96x3x3xf32) + conv2d_113 = paddle._C_ops.conv2d( + swish_82, parameter_226, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_226 + + # pd_op.batch_norm_: (-1x96x128x128xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (-1x96x128x128xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__654, + batch_norm__655, + batch_norm__656, + batch_norm__657, + batch_norm__658, + batch_norm__659, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_113, + parameter_225, + parameter_224, + parameter_223, + parameter_222, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_113, parameter_222, parameter_223, parameter_224, parameter_225 + + # pd_op.conv2d: (-1x96x128x128xf32) <- (-1x96x128x128xf32, 96x96x1x1xf32) + conv2d_114 = paddle._C_ops.conv2d( + swish_82, parameter_221, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_221, swish_82 + + # pd_op.batch_norm_: (-1x96x128x128xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (-1x96x128x128xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__660, + batch_norm__661, + batch_norm__662, + batch_norm__663, + batch_norm__664, + batch_norm__665, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_114, + parameter_220, + parameter_219, + parameter_218, + parameter_217, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_114, parameter_217, parameter_218, parameter_219, parameter_220 + + # pd_op.multiply: (-1x96x128x128xf32) <- (1xf32, -1x96x128x128xf32) + multiply_30 = paddle._C_ops.multiply(data_26, batch_norm__660) + del batch_norm__660, data_26 + + # pd_op.add: (-1x96x128x128xf32) <- (-1x96x128x128xf32, -1x96x128x128xf32) + add_48 = paddle._C_ops.add(batch_norm__654, multiply_30) + del batch_norm__654, multiply_30 + + # pd_op.swish: (-1x96x128x128xf32) <- (-1x96x128x128xf32) + swish_83 = paddle._C_ops.swish(add_48) + del add_48 + + # builtin.combine: ([-1x96x128x128xf32, -1x96x128x128xf32]) <- (-1x96x128x128xf32, -1x96x128x128xf32) + combine_9 = [swish_76, swish_83] + del swish_76, swish_83 + + # pd_op.concat: (-1x192x128x128xf32) <- ([-1x96x128x128xf32, -1x96x128x128xf32], 1xi32) + concat_11 = paddle._C_ops.concat(combine_9, full_0) + del combine_9 + + # pd_op.conv2d: (-1x192x128x128xf32) <- (-1x192x128x128xf32, 192x192x1x1xf32) + conv2d_115 = paddle._C_ops.conv2d( + concat_11, parameter_216, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del concat_11, parameter_216 + + # pd_op.batch_norm_: (-1x192x128x128xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (-1x192x128x128xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__666, + batch_norm__667, + batch_norm__668, + batch_norm__669, + batch_norm__670, + batch_norm__671, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_115, + parameter_215, + parameter_214, + parameter_213, + parameter_212, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_115, parameter_212, parameter_213, parameter_214, parameter_215 + + # pd_op.swish: (-1x192x128x128xf32) <- (-1x192x128x128xf32) + swish_84 = paddle._C_ops.swish(batch_norm__666) + del batch_norm__666 + + # pd_op.conv2d: (-1x192x64x64xf32) <- (-1x192x128x128xf32, 192x192x3x3xf32) + conv2d_116 = paddle._C_ops.conv2d( + swish_84, parameter_211, [2, 2], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_211 + + # pd_op.batch_norm_: (-1x192x64x64xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (-1x192x64x64xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__672, + batch_norm__673, + batch_norm__674, + batch_norm__675, + batch_norm__676, + batch_norm__677, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_116, + parameter_210, + parameter_209, + parameter_208, + parameter_207, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_116, parameter_207, parameter_208, parameter_209, parameter_210 + + # pd_op.swish: (-1x192x64x64xf32) <- (-1x192x64x64xf32) + swish_85 = paddle._C_ops.swish(batch_norm__672) + del batch_norm__672 + + # builtin.combine: ([-1x192x64x64xf32, -1x384x64x64xf32]) <- (-1x192x64x64xf32, -1x384x64x64xf32) + combine_10 = [swish_85, swish_74] + del swish_74, swish_85 + + # pd_op.concat: (-1x576x64x64xf32) <- ([-1x192x64x64xf32, -1x384x64x64xf32], 1xi32) + concat_12 = paddle._C_ops.concat(combine_10, full_0) + del combine_10 + + # pd_op.conv2d: (-1x192x64x64xf32) <- (-1x576x64x64xf32, 192x576x1x1xf32) + conv2d_117 = paddle._C_ops.conv2d( + concat_12, parameter_206, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_206 + + # pd_op.batch_norm_: (-1x192x64x64xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (-1x192x64x64xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__678, + batch_norm__679, + batch_norm__680, + batch_norm__681, + batch_norm__682, + batch_norm__683, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_117, + parameter_205, + parameter_204, + parameter_203, + parameter_202, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_117, parameter_202, parameter_203, parameter_204, parameter_205 + + # pd_op.swish: (-1x192x64x64xf32) <- (-1x192x64x64xf32) + swish_86 = paddle._C_ops.swish(batch_norm__678) + del batch_norm__678 + + # pd_op.conv2d: (-1x192x64x64xf32) <- (-1x576x64x64xf32, 192x576x1x1xf32) + conv2d_118 = paddle._C_ops.conv2d( + concat_12, parameter_201, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del concat_12, parameter_201 + + # pd_op.batch_norm_: (-1x192x64x64xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (-1x192x64x64xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__684, + batch_norm__685, + batch_norm__686, + batch_norm__687, + batch_norm__688, + batch_norm__689, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_118, + parameter_200, + parameter_199, + parameter_198, + parameter_197, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_118, parameter_197, parameter_198, parameter_199, parameter_200 + + # pd_op.swish: (-1x192x64x64xf32) <- (-1x192x64x64xf32) + swish_87 = paddle._C_ops.swish(batch_norm__684) + del batch_norm__684 + + # pd_op.conv2d: (-1x192x64x64xf32) <- (-1x192x64x64xf32, 192x192x3x3xf32) + conv2d_119 = paddle._C_ops.conv2d( + swish_87, parameter_196, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_196, swish_87 + + # pd_op.batch_norm_: (-1x192x64x64xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (-1x192x64x64xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__690, + batch_norm__691, + batch_norm__692, + batch_norm__693, + batch_norm__694, + batch_norm__695, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_119, + parameter_195, + parameter_194, + parameter_193, + parameter_192, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_119, parameter_192, parameter_193, parameter_194, parameter_195 + + # pd_op.swish: (-1x192x64x64xf32) <- (-1x192x64x64xf32) + swish_88 = paddle._C_ops.swish(batch_norm__690) + del batch_norm__690 + + # pd_op.conv2d: (-1x192x64x64xf32) <- (-1x192x64x64xf32, 192x192x3x3xf32) + conv2d_120 = paddle._C_ops.conv2d( + swish_88, parameter_191, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_191 + + # pd_op.batch_norm_: (-1x192x64x64xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (-1x192x64x64xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__696, + batch_norm__697, + batch_norm__698, + batch_norm__699, + batch_norm__700, + batch_norm__701, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_120, + parameter_190, + parameter_189, + parameter_188, + parameter_187, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_120, parameter_187, parameter_188, parameter_189, parameter_190 + + # pd_op.conv2d: (-1x192x64x64xf32) <- (-1x192x64x64xf32, 192x192x1x1xf32) + conv2d_121 = paddle._C_ops.conv2d( + swish_88, parameter_186, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_186, swish_88 + + # pd_op.batch_norm_: (-1x192x64x64xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (-1x192x64x64xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__702, + batch_norm__703, + batch_norm__704, + batch_norm__705, + batch_norm__706, + batch_norm__707, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_121, + parameter_185, + parameter_184, + parameter_183, + parameter_182, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_121, parameter_182, parameter_183, parameter_184, parameter_185 + + # pd_op.multiply: (-1x192x64x64xf32) <- (1xf32, -1x192x64x64xf32) + multiply_31 = paddle._C_ops.multiply(data_27, batch_norm__702) + del batch_norm__702, data_27 + + # pd_op.add: (-1x192x64x64xf32) <- (-1x192x64x64xf32, -1x192x64x64xf32) + add_49 = paddle._C_ops.add(batch_norm__696, multiply_31) + del batch_norm__696, multiply_31 + + # pd_op.swish: (-1x192x64x64xf32) <- (-1x192x64x64xf32) + swish_89 = paddle._C_ops.swish(add_49) + del add_49 + + # pd_op.conv2d: (-1x192x64x64xf32) <- (-1x192x64x64xf32, 192x192x3x3xf32) + conv2d_122 = paddle._C_ops.conv2d( + swish_89, parameter_181, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_181, swish_89 + + # pd_op.batch_norm_: (-1x192x64x64xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (-1x192x64x64xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__708, + batch_norm__709, + batch_norm__710, + batch_norm__711, + batch_norm__712, + batch_norm__713, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_122, + parameter_180, + parameter_179, + parameter_178, + parameter_177, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_122, parameter_177, parameter_178, parameter_179, parameter_180 + + # pd_op.swish: (-1x192x64x64xf32) <- (-1x192x64x64xf32) + swish_90 = paddle._C_ops.swish(batch_norm__708) + del batch_norm__708 + + # pd_op.conv2d: (-1x192x64x64xf32) <- (-1x192x64x64xf32, 192x192x3x3xf32) + conv2d_123 = paddle._C_ops.conv2d( + swish_90, parameter_176, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_176 + + # pd_op.batch_norm_: (-1x192x64x64xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (-1x192x64x64xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__714, + batch_norm__715, + batch_norm__716, + batch_norm__717, + batch_norm__718, + batch_norm__719, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_123, + parameter_175, + parameter_174, + parameter_173, + parameter_172, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_123, parameter_172, parameter_173, parameter_174, parameter_175 + + # pd_op.conv2d: (-1x192x64x64xf32) <- (-1x192x64x64xf32, 192x192x1x1xf32) + conv2d_124 = paddle._C_ops.conv2d( + swish_90, parameter_171, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_171, swish_90 + + # pd_op.batch_norm_: (-1x192x64x64xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (-1x192x64x64xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__720, + batch_norm__721, + batch_norm__722, + batch_norm__723, + batch_norm__724, + batch_norm__725, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_124, + parameter_170, + parameter_169, + parameter_168, + parameter_167, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_124, parameter_167, parameter_168, parameter_169, parameter_170 + + # pd_op.multiply: (-1x192x64x64xf32) <- (1xf32, -1x192x64x64xf32) + multiply_32 = paddle._C_ops.multiply(data_28, batch_norm__720) + del batch_norm__720, data_28 + + # pd_op.add: (-1x192x64x64xf32) <- (-1x192x64x64xf32, -1x192x64x64xf32) + add_50 = paddle._C_ops.add(batch_norm__714, multiply_32) + del batch_norm__714, multiply_32 + + # pd_op.swish: (-1x192x64x64xf32) <- (-1x192x64x64xf32) + swish_91 = paddle._C_ops.swish(add_50) + del add_50 + + # pd_op.conv2d: (-1x192x64x64xf32) <- (-1x192x64x64xf32, 192x192x3x3xf32) + conv2d_125 = paddle._C_ops.conv2d( + swish_91, parameter_166, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_166, swish_91 + + # pd_op.batch_norm_: (-1x192x64x64xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (-1x192x64x64xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__726, + batch_norm__727, + batch_norm__728, + batch_norm__729, + batch_norm__730, + batch_norm__731, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_125, + parameter_165, + parameter_164, + parameter_163, + parameter_162, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_125, parameter_162, parameter_163, parameter_164, parameter_165 + + # pd_op.swish: (-1x192x64x64xf32) <- (-1x192x64x64xf32) + swish_92 = paddle._C_ops.swish(batch_norm__726) + del batch_norm__726 + + # pd_op.conv2d: (-1x192x64x64xf32) <- (-1x192x64x64xf32, 192x192x3x3xf32) + conv2d_126 = paddle._C_ops.conv2d( + swish_92, parameter_161, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_161 + + # pd_op.batch_norm_: (-1x192x64x64xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (-1x192x64x64xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__732, + batch_norm__733, + batch_norm__734, + batch_norm__735, + batch_norm__736, + batch_norm__737, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_126, + parameter_160, + parameter_159, + parameter_158, + parameter_157, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_126, parameter_157, parameter_158, parameter_159, parameter_160 + + # pd_op.conv2d: (-1x192x64x64xf32) <- (-1x192x64x64xf32, 192x192x1x1xf32) + conv2d_127 = paddle._C_ops.conv2d( + swish_92, parameter_156, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_156, swish_92 + + # pd_op.batch_norm_: (-1x192x64x64xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (-1x192x64x64xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__738, + batch_norm__739, + batch_norm__740, + batch_norm__741, + batch_norm__742, + batch_norm__743, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_127, + parameter_155, + parameter_154, + parameter_153, + parameter_152, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_127, parameter_152, parameter_153, parameter_154, parameter_155 + + # pd_op.multiply: (-1x192x64x64xf32) <- (1xf32, -1x192x64x64xf32) + multiply_33 = paddle._C_ops.multiply(data_29, batch_norm__738) + del batch_norm__738, data_29 + + # pd_op.add: (-1x192x64x64xf32) <- (-1x192x64x64xf32, -1x192x64x64xf32) + add_51 = paddle._C_ops.add(batch_norm__732, multiply_33) + del batch_norm__732, multiply_33 + + # pd_op.swish: (-1x192x64x64xf32) <- (-1x192x64x64xf32) + swish_93 = paddle._C_ops.swish(add_51) + del add_51 + + # builtin.combine: ([-1x192x64x64xf32, -1x192x64x64xf32]) <- (-1x192x64x64xf32, -1x192x64x64xf32) + combine_11 = [swish_86, swish_93] + del swish_86, swish_93 + + # pd_op.concat: (-1x384x64x64xf32) <- ([-1x192x64x64xf32, -1x192x64x64xf32], 1xi32) + concat_13 = paddle._C_ops.concat(combine_11, full_0) + del combine_11 + + # pd_op.conv2d: (-1x384x64x64xf32) <- (-1x384x64x64xf32, 384x384x1x1xf32) + conv2d_128 = paddle._C_ops.conv2d( + concat_13, parameter_151, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del concat_13, parameter_151 + + # pd_op.batch_norm_: (-1x384x64x64xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (-1x384x64x64xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__744, + batch_norm__745, + batch_norm__746, + batch_norm__747, + batch_norm__748, + batch_norm__749, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_128, + parameter_150, + parameter_149, + parameter_148, + parameter_147, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_128, parameter_147, parameter_148, parameter_149, parameter_150 + + # pd_op.swish: (-1x384x64x64xf32) <- (-1x384x64x64xf32) + swish_94 = paddle._C_ops.swish(batch_norm__744) + del batch_norm__744 + + # pd_op.conv2d: (-1x384x32x32xf32) <- (-1x384x64x64xf32, 384x384x3x3xf32) + conv2d_129 = paddle._C_ops.conv2d( + swish_94, parameter_146, [2, 2], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_146 + + # pd_op.batch_norm_: (-1x384x32x32xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (-1x384x32x32xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__750, + batch_norm__751, + batch_norm__752, + batch_norm__753, + batch_norm__754, + batch_norm__755, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_129, + parameter_145, + parameter_144, + parameter_143, + parameter_142, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_129, parameter_142, parameter_143, parameter_144, parameter_145 + + # pd_op.swish: (-1x384x32x32xf32) <- (-1x384x32x32xf32) + swish_95 = paddle._C_ops.swish(batch_norm__750) + del batch_norm__750 + + # builtin.combine: ([-1x384x32x32xf32, -1x768x32x32xf32]) <- (-1x384x32x32xf32, -1x768x32x32xf32) + combine_12 = [swish_95, swish_64] + del swish_64, swish_95 + + # pd_op.concat: (-1x1152x32x32xf32) <- ([-1x384x32x32xf32, -1x768x32x32xf32], 1xi32) + concat_14 = paddle._C_ops.concat(combine_12, full_0) + del combine_12 + + # pd_op.conv2d: (-1x384x32x32xf32) <- (-1x1152x32x32xf32, 384x1152x1x1xf32) + conv2d_130 = paddle._C_ops.conv2d( + concat_14, parameter_141, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_141 + + # pd_op.batch_norm_: (-1x384x32x32xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (-1x384x32x32xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__756, + batch_norm__757, + batch_norm__758, + batch_norm__759, + batch_norm__760, + batch_norm__761, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_130, + parameter_140, + parameter_139, + parameter_138, + parameter_137, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_130, parameter_137, parameter_138, parameter_139, parameter_140 + + # pd_op.swish: (-1x384x32x32xf32) <- (-1x384x32x32xf32) + swish_96 = paddle._C_ops.swish(batch_norm__756) + del batch_norm__756 + + # pd_op.conv2d: (-1x384x32x32xf32) <- (-1x1152x32x32xf32, 384x1152x1x1xf32) + conv2d_131 = paddle._C_ops.conv2d( + concat_14, parameter_136, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del concat_14, parameter_136 + + # pd_op.batch_norm_: (-1x384x32x32xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (-1x384x32x32xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__762, + batch_norm__763, + batch_norm__764, + batch_norm__765, + batch_norm__766, + batch_norm__767, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_131, + parameter_135, + parameter_134, + parameter_133, + parameter_132, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_131, parameter_132, parameter_133, parameter_134, parameter_135 + + # pd_op.swish: (-1x384x32x32xf32) <- (-1x384x32x32xf32) + swish_97 = paddle._C_ops.swish(batch_norm__762) + del batch_norm__762 + + # pd_op.conv2d: (-1x384x32x32xf32) <- (-1x384x32x32xf32, 384x384x3x3xf32) + conv2d_132 = paddle._C_ops.conv2d( + swish_97, parameter_131, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_131, swish_97 + + # pd_op.batch_norm_: (-1x384x32x32xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (-1x384x32x32xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__768, + batch_norm__769, + batch_norm__770, + batch_norm__771, + batch_norm__772, + batch_norm__773, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_132, + parameter_130, + parameter_129, + parameter_128, + parameter_127, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_132, parameter_127, parameter_128, parameter_129, parameter_130 + + # pd_op.swish: (-1x384x32x32xf32) <- (-1x384x32x32xf32) + swish_98 = paddle._C_ops.swish(batch_norm__768) + del batch_norm__768 + + # pd_op.conv2d: (-1x384x32x32xf32) <- (-1x384x32x32xf32, 384x384x3x3xf32) + conv2d_133 = paddle._C_ops.conv2d( + swish_98, parameter_126, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_126 + + # pd_op.batch_norm_: (-1x384x32x32xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (-1x384x32x32xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__774, + batch_norm__775, + batch_norm__776, + batch_norm__777, + batch_norm__778, + batch_norm__779, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_133, + parameter_125, + parameter_124, + parameter_123, + parameter_122, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_133, parameter_122, parameter_123, parameter_124, parameter_125 + + # pd_op.conv2d: (-1x384x32x32xf32) <- (-1x384x32x32xf32, 384x384x1x1xf32) + conv2d_134 = paddle._C_ops.conv2d( + swish_98, parameter_121, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_121, swish_98 + + # pd_op.batch_norm_: (-1x384x32x32xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (-1x384x32x32xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__780, + batch_norm__781, + batch_norm__782, + batch_norm__783, + batch_norm__784, + batch_norm__785, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_134, + parameter_120, + parameter_119, + parameter_118, + parameter_117, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_134, parameter_117, parameter_118, parameter_119, parameter_120 + + # pd_op.multiply: (-1x384x32x32xf32) <- (1xf32, -1x384x32x32xf32) + multiply_34 = paddle._C_ops.multiply(data_30, batch_norm__780) + del batch_norm__780, data_30 + + # pd_op.add: (-1x384x32x32xf32) <- (-1x384x32x32xf32, -1x384x32x32xf32) + add_52 = paddle._C_ops.add(batch_norm__774, multiply_34) + del batch_norm__774, multiply_34 + + # pd_op.swish: (-1x384x32x32xf32) <- (-1x384x32x32xf32) + swish_99 = paddle._C_ops.swish(add_52) + del add_52 + + # pd_op.conv2d: (-1x384x32x32xf32) <- (-1x384x32x32xf32, 384x384x3x3xf32) + conv2d_135 = paddle._C_ops.conv2d( + swish_99, parameter_116, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_116, swish_99 + + # pd_op.batch_norm_: (-1x384x32x32xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (-1x384x32x32xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__786, + batch_norm__787, + batch_norm__788, + batch_norm__789, + batch_norm__790, + batch_norm__791, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_135, + parameter_115, + parameter_114, + parameter_113, + parameter_112, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_135, parameter_112, parameter_113, parameter_114, parameter_115 + + # pd_op.swish: (-1x384x32x32xf32) <- (-1x384x32x32xf32) + swish_100 = paddle._C_ops.swish(batch_norm__786) + del batch_norm__786 + + # pd_op.conv2d: (-1x384x32x32xf32) <- (-1x384x32x32xf32, 384x384x3x3xf32) + conv2d_136 = paddle._C_ops.conv2d( + swish_100, parameter_111, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_111 + + # pd_op.batch_norm_: (-1x384x32x32xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (-1x384x32x32xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__792, + batch_norm__793, + batch_norm__794, + batch_norm__795, + batch_norm__796, + batch_norm__797, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_136, + parameter_110, + parameter_109, + parameter_108, + parameter_107, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_136, parameter_107, parameter_108, parameter_109, parameter_110 + + # pd_op.conv2d: (-1x384x32x32xf32) <- (-1x384x32x32xf32, 384x384x1x1xf32) + conv2d_137 = paddle._C_ops.conv2d( + swish_100, parameter_106, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_106, swish_100 + + # pd_op.batch_norm_: (-1x384x32x32xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (-1x384x32x32xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__798, + batch_norm__799, + batch_norm__800, + batch_norm__801, + batch_norm__802, + batch_norm__803, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_137, + parameter_105, + parameter_104, + parameter_103, + parameter_102, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_137, parameter_102, parameter_103, parameter_104, parameter_105 + + # pd_op.multiply: (-1x384x32x32xf32) <- (1xf32, -1x384x32x32xf32) + multiply_35 = paddle._C_ops.multiply(data_31, batch_norm__798) + del batch_norm__798, data_31 + + # pd_op.add: (-1x384x32x32xf32) <- (-1x384x32x32xf32, -1x384x32x32xf32) + add_53 = paddle._C_ops.add(batch_norm__792, multiply_35) + del batch_norm__792, multiply_35 + + # pd_op.swish: (-1x384x32x32xf32) <- (-1x384x32x32xf32) + swish_101 = paddle._C_ops.swish(add_53) + del add_53 + + # pd_op.conv2d: (-1x384x32x32xf32) <- (-1x384x32x32xf32, 384x384x3x3xf32) + conv2d_138 = paddle._C_ops.conv2d( + swish_101, parameter_101, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_101, swish_101 + + # pd_op.batch_norm_: (-1x384x32x32xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (-1x384x32x32xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__804, + batch_norm__805, + batch_norm__806, + batch_norm__807, + batch_norm__808, + batch_norm__809, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_138, + parameter_100, + parameter_99, + parameter_98, + parameter_97, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_138, parameter_100, parameter_97, parameter_98, parameter_99 + + # pd_op.swish: (-1x384x32x32xf32) <- (-1x384x32x32xf32) + swish_102 = paddle._C_ops.swish(batch_norm__804) + del batch_norm__804 + + # pd_op.conv2d: (-1x384x32x32xf32) <- (-1x384x32x32xf32, 384x384x3x3xf32) + conv2d_139 = paddle._C_ops.conv2d( + swish_102, parameter_96, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_96 + + # pd_op.batch_norm_: (-1x384x32x32xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (-1x384x32x32xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__810, + batch_norm__811, + batch_norm__812, + batch_norm__813, + batch_norm__814, + batch_norm__815, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_139, + parameter_95, + parameter_94, + parameter_93, + parameter_92, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_139, parameter_92, parameter_93, parameter_94, parameter_95 + + # pd_op.conv2d: (-1x384x32x32xf32) <- (-1x384x32x32xf32, 384x384x1x1xf32) + conv2d_140 = paddle._C_ops.conv2d( + swish_102, parameter_91, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_91, swish_102 + + # pd_op.batch_norm_: (-1x384x32x32xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (-1x384x32x32xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__816, + batch_norm__817, + batch_norm__818, + batch_norm__819, + batch_norm__820, + batch_norm__821, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_140, + parameter_90, + parameter_89, + parameter_88, + parameter_87, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_140, parameter_87, parameter_88, parameter_89, parameter_90 + + # pd_op.multiply: (-1x384x32x32xf32) <- (1xf32, -1x384x32x32xf32) + multiply_36 = paddle._C_ops.multiply(data_32, batch_norm__816) + del batch_norm__816, data_32 + + # pd_op.add: (-1x384x32x32xf32) <- (-1x384x32x32xf32, -1x384x32x32xf32) + add_54 = paddle._C_ops.add(batch_norm__810, multiply_36) + del batch_norm__810, multiply_36 + + # pd_op.swish: (-1x384x32x32xf32) <- (-1x384x32x32xf32) + swish_103 = paddle._C_ops.swish(add_54) + del add_54 + + # builtin.combine: ([-1x384x32x32xf32, -1x384x32x32xf32]) <- (-1x384x32x32xf32, -1x384x32x32xf32) + combine_13 = [swish_96, swish_103] + del swish_103, swish_96 + + # pd_op.concat: (-1x768x32x32xf32) <- ([-1x384x32x32xf32, -1x384x32x32xf32], 1xi32) + concat_15 = paddle._C_ops.concat(combine_13, full_0) + del combine_13 + + # pd_op.conv2d: (-1x768x32x32xf32) <- (-1x768x32x32xf32, 768x768x1x1xf32) + conv2d_141 = paddle._C_ops.conv2d( + concat_15, parameter_86, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del concat_15, parameter_86 + + # pd_op.batch_norm_: (-1x768x32x32xf32, 768xf32, 768xf32, 768xf32, 768xf32, -1xui8) <- (-1x768x32x32xf32, 768xf32, 768xf32, 768xf32, 768xf32) + ( + batch_norm__822, + batch_norm__823, + batch_norm__824, + batch_norm__825, + batch_norm__826, + batch_norm__827, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_141, + parameter_85, + parameter_84, + parameter_83, + parameter_82, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_141, parameter_82, parameter_83, parameter_84, parameter_85 + + # pd_op.swish: (-1x768x32x32xf32) <- (-1x768x32x32xf32) + swish_104 = paddle._C_ops.swish(batch_norm__822) + del batch_norm__822 + + # pd_op.shape64: (4xi64) <- (-1x768x32x32xf32) + shape64_0 = paddle._C_ops.shape64(swish_104) + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_5 = [0] + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_6 = [1] + + # pd_op.slice: (xi64) <- (4xi64, 1xi64, 1xi64) + slice_0 = paddle._C_ops.slice( + shape64_0, [0], full_int_array_5, full_int_array_6, [1], [0] + ) + del shape64_0 + + # pd_op.full: (1xf64) <- () + full_1 = paddle._C_ops.full( + [1], float("0"), paddle.float64, paddle.core.CPUPlace() + ) + + # pd_op.full: (1xf64) <- () + full_2 = paddle._C_ops.full( + [1], float("32"), paddle.float64, paddle.core.CPUPlace() + ) + + # pd_op.full: (1xf64) <- () + full_3 = paddle._C_ops.full( + [1], float("1"), paddle.float64, paddle.core.CPUPlace() + ) + + # pd_op.arange: (32xi64) <- (1xf64, 1xf64, 1xf64) + arange_0 = paddle.arange(full_1, full_2, full_3, dtype="int64") + del full_2 + + # pd_op.cast: (32xf32) <- (32xi64) + cast_0 = paddle._C_ops.cast(arange_0, paddle.float32) + del arange_0 + + # pd_op.full: (1xf32) <- () + full_4 = paddle._C_ops.full( + [1], float("1"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.scale: (32xf32) <- (32xf32, 1xf32) + scale_0 = paddle._C_ops.scale(cast_0, full_4, float("0.5"), True) + del cast_0 + + # pd_op.full: (1xf32) <- () + full_5 = paddle._C_ops.full( + [1], float("32"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.scale: (32xf32) <- (32xf32, 1xf32) + scale_1 = paddle._C_ops.scale(scale_0, full_5, float("0"), True) + del scale_0 + + # builtin.combine: ([32xf32, 32xf32]) <- (32xf32, 32xf32) + combine_14 = [scale_1, scale_1] + del scale_1 + + # pd_op.meshgrid: ([32x32xf32, 32x32xf32]) <- ([32xf32, 32xf32]) + meshgrid_0 = paddle._C_ops.meshgrid(combine_14) + del combine_14 + + # builtin.split: (32x32xf32, 32x32xf32) <- ([32x32xf32, 32x32xf32]) + ( + split_0, + split_1, + ) = meshgrid_0 + del meshgrid_0 + + # builtin.combine: ([32x32xf32, 32x32xf32]) <- (32x32xf32, 32x32xf32) + combine_15 = [split_1, split_0] + del split_0, split_1 + + # pd_op.stack: (32x32x2xf32) <- ([32x32xf32, 32x32xf32]) + stack_0 = paddle._C_ops.stack(combine_15, -1) + del combine_15 + + # pd_op.cast: (32x32x2xf32) <- (32x32x2xf32) + cast_1 = paddle._C_ops.cast(stack_0, paddle.float32) + del stack_0 + + # pd_op.full_int_array: (3xi64) <- () + full_int_array_7 = [1, -1, 2] + + # pd_op.reshape: (1x1024x2xf32) <- (32x32x2xf32, 3xi64) + reshape_4 = paddle._C_ops.reshape(cast_1, full_int_array_7) + del cast_1 + + # pd_op.full: (1x1024x1xf32) <- () + full_6 = paddle._C_ops.full( + [1, 1024, 1], + float("32"), + paddle.float32, + paddle.framework._current_expected_place(), + ) + + # pd_op.shape64: (4xi64) <- (-1x384x64x64xf32) + shape64_1 = paddle._C_ops.shape64(swish_94) + + # pd_op.slice: (xi64) <- (4xi64, 1xi64, 1xi64) + slice_1 = paddle._C_ops.slice( + shape64_1, [0], full_int_array_5, full_int_array_6, [1], [0] + ) + del shape64_1 + + # pd_op.full: (1xf64) <- () + full_7 = paddle._C_ops.full( + [1], float("64"), paddle.float64, paddle.core.CPUPlace() + ) + + # pd_op.arange: (64xi64) <- (1xf64, 1xf64, 1xf64) + arange_1 = paddle.arange(full_1, full_7, full_3, dtype="int64") + del full_7 + + # pd_op.cast: (64xf32) <- (64xi64) + cast_2 = paddle._C_ops.cast(arange_1, paddle.float32) + del arange_1 + + # pd_op.scale: (64xf32) <- (64xf32, 1xf32) + scale_2 = paddle._C_ops.scale(cast_2, full_4, float("0.5"), True) + del cast_2 + + # pd_op.full: (1xf32) <- () + full_8 = paddle._C_ops.full( + [1], float("16"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.scale: (64xf32) <- (64xf32, 1xf32) + scale_3 = paddle._C_ops.scale(scale_2, full_8, float("0"), True) + del scale_2 + + # builtin.combine: ([64xf32, 64xf32]) <- (64xf32, 64xf32) + combine_16 = [scale_3, scale_3] + del scale_3 + + # pd_op.meshgrid: ([64x64xf32, 64x64xf32]) <- ([64xf32, 64xf32]) + meshgrid_1 = paddle._C_ops.meshgrid(combine_16) + del combine_16 + + # builtin.split: (64x64xf32, 64x64xf32) <- ([64x64xf32, 64x64xf32]) + ( + split_2, + split_3, + ) = meshgrid_1 + del meshgrid_1 + + # builtin.combine: ([64x64xf32, 64x64xf32]) <- (64x64xf32, 64x64xf32) + combine_17 = [split_3, split_2] + del split_2, split_3 + + # pd_op.stack: (64x64x2xf32) <- ([64x64xf32, 64x64xf32]) + stack_1 = paddle._C_ops.stack(combine_17, -1) + del combine_17 + + # pd_op.cast: (64x64x2xf32) <- (64x64x2xf32) + cast_3 = paddle._C_ops.cast(stack_1, paddle.float32) + del stack_1 + + # pd_op.reshape: (1x4096x2xf32) <- (64x64x2xf32, 3xi64) + reshape_5 = paddle._C_ops.reshape(cast_3, full_int_array_7) + del cast_3 + + # pd_op.full: (1x4096x1xf32) <- () + full_9 = paddle._C_ops.full( + [1, 4096, 1], + float("16"), + paddle.float32, + paddle.framework._current_expected_place(), + ) + + # pd_op.shape64: (4xi64) <- (-1x192x128x128xf32) + shape64_2 = paddle._C_ops.shape64(swish_84) + + # pd_op.slice: (xi64) <- (4xi64, 1xi64, 1xi64) + slice_2 = paddle._C_ops.slice( + shape64_2, [0], full_int_array_5, full_int_array_6, [1], [0] + ) + del full_int_array_5, full_int_array_6, shape64_2 + + # pd_op.full: (1xf64) <- () + full_10 = paddle._C_ops.full( + [1], float("128"), paddle.float64, paddle.core.CPUPlace() + ) + + # pd_op.arange: (128xi64) <- (1xf64, 1xf64, 1xf64) + arange_2 = paddle.arange(full_1, full_10, full_3, dtype="int64") + del full_1, full_10, full_3 + + # pd_op.cast: (128xf32) <- (128xi64) + cast_4 = paddle._C_ops.cast(arange_2, paddle.float32) + del arange_2 + + # pd_op.scale: (128xf32) <- (128xf32, 1xf32) + scale_4 = paddle._C_ops.scale(cast_4, full_4, float("0.5"), True) + del cast_4 + + # pd_op.full: (1xf32) <- () + full_11 = paddle._C_ops.full( + [1], float("8"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.scale: (128xf32) <- (128xf32, 1xf32) + scale_5 = paddle._C_ops.scale(scale_4, full_11, float("0"), True) + del scale_4 + + # builtin.combine: ([128xf32, 128xf32]) <- (128xf32, 128xf32) + combine_18 = [scale_5, scale_5] + del scale_5 + + # pd_op.meshgrid: ([128x128xf32, 128x128xf32]) <- ([128xf32, 128xf32]) + meshgrid_2 = paddle._C_ops.meshgrid(combine_18) + del combine_18 + + # builtin.split: (128x128xf32, 128x128xf32) <- ([128x128xf32, 128x128xf32]) + ( + split_4, + split_5, + ) = meshgrid_2 + del meshgrid_2 + + # builtin.combine: ([128x128xf32, 128x128xf32]) <- (128x128xf32, 128x128xf32) + combine_19 = [split_5, split_4] + del split_4, split_5 + + # pd_op.stack: (128x128x2xf32) <- ([128x128xf32, 128x128xf32]) + stack_2 = paddle._C_ops.stack(combine_19, -1) + del combine_19 + + # pd_op.cast: (128x128x2xf32) <- (128x128x2xf32) + cast_5 = paddle._C_ops.cast(stack_2, paddle.float32) + del stack_2 + + # pd_op.reshape: (1x16384x2xf32) <- (128x128x2xf32, 3xi64) + reshape_6 = paddle._C_ops.reshape(cast_5, full_int_array_7) + del cast_5, full_int_array_7 + + # pd_op.full: (1x16384x1xf32) <- () + full_12 = paddle._C_ops.full( + [1, 16384, 1], + float("8"), + paddle.float32, + paddle.framework._current_expected_place(), + ) + + # builtin.combine: ([1x1024x2xf32, 1x4096x2xf32, 1x16384x2xf32]) <- (1x1024x2xf32, 1x4096x2xf32, 1x16384x2xf32) + combine_20 = [reshape_4, reshape_5, reshape_6] + del reshape_4, reshape_5, reshape_6 + + # pd_op.concat: (1x21504x2xf32) <- ([1x1024x2xf32, 1x4096x2xf32, 1x16384x2xf32], 1xi32) + concat_16 = paddle._C_ops.concat(combine_20, full_0) + del combine_20 + + # builtin.combine: ([1x1024x1xf32, 1x4096x1xf32, 1x16384x1xf32]) <- (1x1024x1xf32, 1x4096x1xf32, 1x16384x1xf32) + combine_21 = [full_6, full_9, full_12] + del full_12, full_6, full_9 + + # pd_op.concat: (1x21504x1xf32) <- ([1x1024x1xf32, 1x4096x1xf32, 1x16384x1xf32], 1xi32) + concat_17 = paddle._C_ops.concat(combine_21, full_0) + del combine_21 + + # pd_op.full_int_array: (2xi64) <- () + full_int_array_8 = [1, 1] + + # pd_op.pool2d: (-1x768x1x1xf32) <- (-1x768x32x32xf32, 2xi64) + pool2d_3 = paddle._C_ops.pool2d( + swish_104, + full_int_array_8, + [1, 1], + [0, 0], + False, + True, + "NCHW", + "avg", + False, + True, + "EXPLICIT", + ) + + # pd_op.conv2d: (-1x768x1x1xf32) <- (-1x768x1x1xf32, 768x768x1x1xf32) + conv2d_142 = paddle._C_ops.conv2d( + pool2d_3, parameter_81, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_81 + + # pd_op.reshape: (1x768x1x1xf32) <- (768xf32, 4xi64) + reshape_7 = paddle._C_ops.reshape(parameter_80, full_int_array_1) + del parameter_80 + + # pd_op.add: (-1x768x1x1xf32) <- (-1x768x1x1xf32, 1x768x1x1xf32) + add_55 = paddle._C_ops.add(conv2d_142, reshape_7) + del conv2d_142, reshape_7 + + # pd_op.sigmoid: (-1x768x1x1xf32) <- (-1x768x1x1xf32) + sigmoid_0 = paddle._C_ops.sigmoid(add_55) + del add_55 + + # pd_op.multiply: (-1x768x32x32xf32) <- (-1x768x32x32xf32, -1x768x1x1xf32) + multiply_37 = paddle._C_ops.multiply(swish_104, sigmoid_0) + del sigmoid_0 + + # pd_op.conv2d: (-1x768x32x32xf32) <- (-1x768x32x32xf32, 768x768x1x1xf32) + conv2d_143 = paddle._C_ops.conv2d( + multiply_37, parameter_79, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del multiply_37, parameter_79 + + # pd_op.batch_norm_: (-1x768x32x32xf32, 768xf32, 768xf32, 768xf32, 768xf32, -1xui8) <- (-1x768x32x32xf32, 768xf32, 768xf32, 768xf32, 768xf32) + ( + batch_norm__828, + batch_norm__829, + batch_norm__830, + batch_norm__831, + batch_norm__832, + batch_norm__833, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_143, + parameter_78, + parameter_77, + parameter_76, + parameter_75, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_143, parameter_75, parameter_76, parameter_77, parameter_78 + + # pd_op.swish: (-1x768x32x32xf32) <- (-1x768x32x32xf32) + swish_105 = paddle._C_ops.swish(batch_norm__828) + del batch_norm__828 + + # pd_op.add: (-1x768x32x32xf32) <- (-1x768x32x32xf32, -1x768x32x32xf32) + add_56 = paddle._C_ops.add(swish_105, swish_104) + del swish_105 + + # pd_op.conv2d: (-1x15x32x32xf32) <- (-1x768x32x32xf32, 15x768x3x3xf32) + conv2d_144 = paddle._C_ops.conv2d( + add_56, parameter_74, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del add_56, parameter_74 + + # pd_op.reshape: (1x15x1x1xf32) <- (15xf32, 4xi64) + reshape_8 = paddle._C_ops.reshape(parameter_73, full_int_array_1) + del parameter_73 + + # pd_op.add: (-1x15x32x32xf32) <- (-1x15x32x32xf32, 1x15x1x1xf32) + add_57 = paddle._C_ops.add(conv2d_144, reshape_8) + del conv2d_144, reshape_8 + + # pd_op.conv2d: (-1x768x1x1xf32) <- (-1x768x1x1xf32, 768x768x1x1xf32) + conv2d_145 = paddle._C_ops.conv2d( + pool2d_3, parameter_72, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_72 + + # pd_op.reshape: (1x768x1x1xf32) <- (768xf32, 4xi64) + reshape_9 = paddle._C_ops.reshape(parameter_71, full_int_array_1) + del parameter_71 + + # pd_op.add: (-1x768x1x1xf32) <- (-1x768x1x1xf32, 1x768x1x1xf32) + add_58 = paddle._C_ops.add(conv2d_145, reshape_9) + del conv2d_145, reshape_9 + + # pd_op.sigmoid: (-1x768x1x1xf32) <- (-1x768x1x1xf32) + sigmoid_1 = paddle._C_ops.sigmoid(add_58) + del add_58 + + # pd_op.multiply: (-1x768x32x32xf32) <- (-1x768x32x32xf32, -1x768x1x1xf32) + multiply_38 = paddle._C_ops.multiply(swish_104, sigmoid_1) + del sigmoid_1 + + # pd_op.conv2d: (-1x768x32x32xf32) <- (-1x768x32x32xf32, 768x768x1x1xf32) + conv2d_146 = paddle._C_ops.conv2d( + multiply_38, parameter_70, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del multiply_38, parameter_70 + + # pd_op.batch_norm_: (-1x768x32x32xf32, 768xf32, 768xf32, 768xf32, 768xf32, -1xui8) <- (-1x768x32x32xf32, 768xf32, 768xf32, 768xf32, 768xf32) + ( + batch_norm__834, + batch_norm__835, + batch_norm__836, + batch_norm__837, + batch_norm__838, + batch_norm__839, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_146, + parameter_69, + parameter_68, + parameter_67, + parameter_66, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_146, parameter_66, parameter_67, parameter_68, parameter_69 + + # pd_op.swish: (-1x768x32x32xf32) <- (-1x768x32x32xf32) + swish_106 = paddle._C_ops.swish(batch_norm__834) + del batch_norm__834 + + # pd_op.conv2d: (-1x4x32x32xf32) <- (-1x768x32x32xf32, 4x768x3x3xf32) + conv2d_147 = paddle._C_ops.conv2d( + swish_106, parameter_65, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_65, swish_106 + + # pd_op.reshape: (1x4x1x1xf32) <- (4xf32, 4xi64) + reshape_10 = paddle._C_ops.reshape(parameter_64, full_int_array_1) + del parameter_64 + + # pd_op.add: (-1x4x32x32xf32) <- (-1x4x32x32xf32, 1x4x1x1xf32) + add_59 = paddle._C_ops.add(conv2d_147, reshape_10) + del conv2d_147, reshape_10 + + # pd_op.split_with_num: ([-1x2x32x32xf32, -1x2x32x32xf32]) <- (-1x4x32x32xf32, 1xi32) + split_with_num_0 = paddle._C_ops.split_with_num(add_59, 2, full_0) + del add_59 + + # builtin.split: (-1x2x32x32xf32, -1x2x32x32xf32) <- ([-1x2x32x32xf32, -1x2x32x32xf32]) + ( + split_6, + split_7, + ) = split_with_num_0 + del split_with_num_0 + + # pd_op.scale: (-1x2x32x32xf32) <- (-1x2x32x32xf32, 1xf32) + scale_6 = paddle._C_ops.scale(split_6, full_5, float("0"), True) + del split_6 + + # pd_op.elu: (-1x2x32x32xf32) <- (-1x2x32x32xf32) + elu_0 = paddle._C_ops.elu(split_7, float("1")) + del split_7 + + # pd_op.scale: (-1x2x32x32xf32) <- (-1x2x32x32xf32, 1xf32) + scale_7 = paddle._C_ops.scale(elu_0, full_4, float("1"), True) + del elu_0 + + # pd_op.scale: (-1x2x32x32xf32) <- (-1x2x32x32xf32, 1xf32) + scale_8 = paddle._C_ops.scale(scale_7, full_5, float("0"), True) + del full_5, scale_7 + + # pd_op.conv2d: (-1x768x1x1xf32) <- (-1x768x1x1xf32, 768x768x1x1xf32) + conv2d_148 = paddle._C_ops.conv2d( + pool2d_3, parameter_63, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_63, pool2d_3 + + # pd_op.reshape: (1x768x1x1xf32) <- (768xf32, 4xi64) + reshape_11 = paddle._C_ops.reshape(parameter_62, full_int_array_1) + del parameter_62 + + # pd_op.add: (-1x768x1x1xf32) <- (-1x768x1x1xf32, 1x768x1x1xf32) + add_60 = paddle._C_ops.add(conv2d_148, reshape_11) + del conv2d_148, reshape_11 + + # pd_op.sigmoid: (-1x768x1x1xf32) <- (-1x768x1x1xf32) + sigmoid_2 = paddle._C_ops.sigmoid(add_60) + del add_60 + + # pd_op.multiply: (-1x768x32x32xf32) <- (-1x768x32x32xf32, -1x768x1x1xf32) + multiply_39 = paddle._C_ops.multiply(swish_104, sigmoid_2) + del sigmoid_2, swish_104 + + # pd_op.conv2d: (-1x768x32x32xf32) <- (-1x768x32x32xf32, 768x768x1x1xf32) + conv2d_149 = paddle._C_ops.conv2d( + multiply_39, parameter_61, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del multiply_39, parameter_61 + + # pd_op.batch_norm_: (-1x768x32x32xf32, 768xf32, 768xf32, 768xf32, 768xf32, -1xui8) <- (-1x768x32x32xf32, 768xf32, 768xf32, 768xf32, 768xf32) + ( + batch_norm__840, + batch_norm__841, + batch_norm__842, + batch_norm__843, + batch_norm__844, + batch_norm__845, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_149, + parameter_60, + parameter_59, + parameter_58, + parameter_57, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_149, parameter_57, parameter_58, parameter_59, parameter_60 + + # pd_op.swish: (-1x768x32x32xf32) <- (-1x768x32x32xf32) + swish_107 = paddle._C_ops.swish(batch_norm__840) + del batch_norm__840 + + # pd_op.conv2d: (-1x91x32x32xf32) <- (-1x768x32x32xf32, 91x768x3x3xf32) + conv2d_150 = paddle._C_ops.conv2d( + swish_107, parameter_56, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_56, swish_107 + + # pd_op.reshape: (1x91x1x1xf32) <- (91xf32, 4xi64) + reshape_12 = paddle._C_ops.reshape(parameter_55, full_int_array_1) + del parameter_55 + + # pd_op.add: (-1x91x32x32xf32) <- (-1x91x32x32xf32, 1x91x1x1xf32) + add_61 = paddle._C_ops.add(conv2d_150, reshape_12) + del conv2d_150, reshape_12 + + # pd_op.softmax: (-1x91x32x32xf32) <- (-1x91x32x32xf32) + softmax_0 = paddle._C_ops.softmax(add_61, 1) + del add_61 + + # pd_op.conv2d: (-1x1x32x32xf32) <- (-1x91x32x32xf32, 1x91x1x1xf32) + conv2d_151 = paddle._C_ops.conv2d( + softmax_0, parameter_54, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del softmax_0 + + # builtin.combine: ([-1x2x32x32xf32, -1x2x32x32xf32, -1x1x32x32xf32]) <- (-1x2x32x32xf32, -1x2x32x32xf32, -1x1x32x32xf32) + combine_22 = [scale_6, scale_8, conv2d_151] + del conv2d_151, scale_6, scale_8 + + # pd_op.concat: (-1x5x32x32xf32) <- ([-1x2x32x32xf32, -1x2x32x32xf32, -1x1x32x32xf32], 1xi32) + concat_18 = paddle._C_ops.concat(combine_22, full_0) + del combine_22 + + # pd_op.sigmoid: (-1x15x32x32xf32) <- (-1x15x32x32xf32) + sigmoid_3 = paddle._C_ops.sigmoid(add_57) + del add_57 + + # pd_op.full: (xi64) <- () + full_13 = paddle._C_ops.full( + [], float("15"), paddle.int64, paddle.core.CPUPlace() + ) + + # pd_op.full: (xi64) <- () + full_14 = paddle._C_ops.full( + [], float("1024"), paddle.int64, paddle.core.CPUPlace() + ) + + # builtin.combine: ([xi64, xi64, xi64]) <- (xi64, xi64, xi64) + combine_23 = [slice_0, full_13, full_14] + + # pd_op.stack: (3xi64) <- ([xi64, xi64, xi64]) + stack_3 = paddle._C_ops.stack(combine_23, 0) + del combine_23 + + # pd_op.reshape: (-1x15x1024xf32) <- (-1x15x32x32xf32, 3xi64) + reshape_13 = paddle._C_ops.reshape(sigmoid_3, stack_3) + del sigmoid_3, stack_3 + + # pd_op.full: (xi64) <- () + full_15 = paddle._C_ops.full( + [], float("5"), paddle.int64, paddle.core.CPUPlace() + ) + + # builtin.combine: ([xi64, xi64, xi64]) <- (xi64, xi64, xi64) + combine_24 = [slice_0, full_15, full_14] + del full_14, slice_0 + + # pd_op.stack: (3xi64) <- ([xi64, xi64, xi64]) + stack_4 = paddle._C_ops.stack(combine_24, 0) + del combine_24 + + # pd_op.reshape: (-1x5x1024xf32) <- (-1x5x32x32xf32, 3xi64) + reshape_14 = paddle._C_ops.reshape(concat_18, stack_4) + del concat_18, stack_4 + + # pd_op.pool2d: (-1x384x1x1xf32) <- (-1x384x64x64xf32, 2xi64) + pool2d_4 = paddle._C_ops.pool2d( + swish_94, + full_int_array_8, + [1, 1], + [0, 0], + False, + True, + "NCHW", + "avg", + False, + True, + "EXPLICIT", + ) + + # pd_op.conv2d: (-1x384x1x1xf32) <- (-1x384x1x1xf32, 384x384x1x1xf32) + conv2d_152 = paddle._C_ops.conv2d( + pool2d_4, parameter_53, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_53 + + # pd_op.reshape: (1x384x1x1xf32) <- (384xf32, 4xi64) + reshape_15 = paddle._C_ops.reshape(parameter_52, full_int_array_1) + del parameter_52 + + # pd_op.add: (-1x384x1x1xf32) <- (-1x384x1x1xf32, 1x384x1x1xf32) + add_62 = paddle._C_ops.add(conv2d_152, reshape_15) + del conv2d_152, reshape_15 + + # pd_op.sigmoid: (-1x384x1x1xf32) <- (-1x384x1x1xf32) + sigmoid_4 = paddle._C_ops.sigmoid(add_62) + del add_62 + + # pd_op.multiply: (-1x384x64x64xf32) <- (-1x384x64x64xf32, -1x384x1x1xf32) + multiply_40 = paddle._C_ops.multiply(swish_94, sigmoid_4) + del sigmoid_4 + + # pd_op.conv2d: (-1x384x64x64xf32) <- (-1x384x64x64xf32, 384x384x1x1xf32) + conv2d_153 = paddle._C_ops.conv2d( + multiply_40, parameter_51, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del multiply_40, parameter_51 + + # pd_op.batch_norm_: (-1x384x64x64xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (-1x384x64x64xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__846, + batch_norm__847, + batch_norm__848, + batch_norm__849, + batch_norm__850, + batch_norm__851, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_153, + parameter_50, + parameter_49, + parameter_48, + parameter_47, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_153, parameter_47, parameter_48, parameter_49, parameter_50 + + # pd_op.swish: (-1x384x64x64xf32) <- (-1x384x64x64xf32) + swish_108 = paddle._C_ops.swish(batch_norm__846) + del batch_norm__846 + + # pd_op.add: (-1x384x64x64xf32) <- (-1x384x64x64xf32, -1x384x64x64xf32) + add_63 = paddle._C_ops.add(swish_108, swish_94) + del swish_108 + + # pd_op.conv2d: (-1x15x64x64xf32) <- (-1x384x64x64xf32, 15x384x3x3xf32) + conv2d_154 = paddle._C_ops.conv2d( + add_63, parameter_46, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del add_63, parameter_46 + + # pd_op.reshape: (1x15x1x1xf32) <- (15xf32, 4xi64) + reshape_16 = paddle._C_ops.reshape(parameter_45, full_int_array_1) + del parameter_45 + + # pd_op.add: (-1x15x64x64xf32) <- (-1x15x64x64xf32, 1x15x1x1xf32) + add_64 = paddle._C_ops.add(conv2d_154, reshape_16) + del conv2d_154, reshape_16 + + # pd_op.conv2d: (-1x384x1x1xf32) <- (-1x384x1x1xf32, 384x384x1x1xf32) + conv2d_155 = paddle._C_ops.conv2d( + pool2d_4, parameter_44, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_44 + + # pd_op.reshape: (1x384x1x1xf32) <- (384xf32, 4xi64) + reshape_17 = paddle._C_ops.reshape(parameter_43, full_int_array_1) + del parameter_43 + + # pd_op.add: (-1x384x1x1xf32) <- (-1x384x1x1xf32, 1x384x1x1xf32) + add_65 = paddle._C_ops.add(conv2d_155, reshape_17) + del conv2d_155, reshape_17 + + # pd_op.sigmoid: (-1x384x1x1xf32) <- (-1x384x1x1xf32) + sigmoid_5 = paddle._C_ops.sigmoid(add_65) + del add_65 + + # pd_op.multiply: (-1x384x64x64xf32) <- (-1x384x64x64xf32, -1x384x1x1xf32) + multiply_41 = paddle._C_ops.multiply(swish_94, sigmoid_5) + del sigmoid_5 + + # pd_op.conv2d: (-1x384x64x64xf32) <- (-1x384x64x64xf32, 384x384x1x1xf32) + conv2d_156 = paddle._C_ops.conv2d( + multiply_41, parameter_42, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del multiply_41, parameter_42 + + # pd_op.batch_norm_: (-1x384x64x64xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (-1x384x64x64xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__852, + batch_norm__853, + batch_norm__854, + batch_norm__855, + batch_norm__856, + batch_norm__857, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_156, + parameter_41, + parameter_40, + parameter_39, + parameter_38, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_156, parameter_38, parameter_39, parameter_40, parameter_41 + + # pd_op.swish: (-1x384x64x64xf32) <- (-1x384x64x64xf32) + swish_109 = paddle._C_ops.swish(batch_norm__852) + del batch_norm__852 + + # pd_op.conv2d: (-1x4x64x64xf32) <- (-1x384x64x64xf32, 4x384x3x3xf32) + conv2d_157 = paddle._C_ops.conv2d( + swish_109, parameter_37, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_37, swish_109 + + # pd_op.reshape: (1x4x1x1xf32) <- (4xf32, 4xi64) + reshape_18 = paddle._C_ops.reshape(parameter_36, full_int_array_1) + del parameter_36 + + # pd_op.add: (-1x4x64x64xf32) <- (-1x4x64x64xf32, 1x4x1x1xf32) + add_66 = paddle._C_ops.add(conv2d_157, reshape_18) + del conv2d_157, reshape_18 + + # pd_op.split_with_num: ([-1x2x64x64xf32, -1x2x64x64xf32]) <- (-1x4x64x64xf32, 1xi32) + split_with_num_1 = paddle._C_ops.split_with_num(add_66, 2, full_0) + del add_66 + + # builtin.split: (-1x2x64x64xf32, -1x2x64x64xf32) <- ([-1x2x64x64xf32, -1x2x64x64xf32]) + ( + split_8, + split_9, + ) = split_with_num_1 + del split_with_num_1 + + # pd_op.scale: (-1x2x64x64xf32) <- (-1x2x64x64xf32, 1xf32) + scale_9 = paddle._C_ops.scale(split_8, full_8, float("0"), True) + del split_8 + + # pd_op.elu: (-1x2x64x64xf32) <- (-1x2x64x64xf32) + elu_1 = paddle._C_ops.elu(split_9, float("1")) + del split_9 + + # pd_op.scale: (-1x2x64x64xf32) <- (-1x2x64x64xf32, 1xf32) + scale_10 = paddle._C_ops.scale(elu_1, full_4, float("1"), True) + del elu_1 + + # pd_op.scale: (-1x2x64x64xf32) <- (-1x2x64x64xf32, 1xf32) + scale_11 = paddle._C_ops.scale(scale_10, full_8, float("0"), True) + del full_8, scale_10 + + # pd_op.conv2d: (-1x384x1x1xf32) <- (-1x384x1x1xf32, 384x384x1x1xf32) + conv2d_158 = paddle._C_ops.conv2d( + pool2d_4, parameter_35, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_35, pool2d_4 + + # pd_op.reshape: (1x384x1x1xf32) <- (384xf32, 4xi64) + reshape_19 = paddle._C_ops.reshape(parameter_34, full_int_array_1) + del parameter_34 + + # pd_op.add: (-1x384x1x1xf32) <- (-1x384x1x1xf32, 1x384x1x1xf32) + add_67 = paddle._C_ops.add(conv2d_158, reshape_19) + del conv2d_158, reshape_19 + + # pd_op.sigmoid: (-1x384x1x1xf32) <- (-1x384x1x1xf32) + sigmoid_6 = paddle._C_ops.sigmoid(add_67) + del add_67 + + # pd_op.multiply: (-1x384x64x64xf32) <- (-1x384x64x64xf32, -1x384x1x1xf32) + multiply_42 = paddle._C_ops.multiply(swish_94, sigmoid_6) + del sigmoid_6, swish_94 + + # pd_op.conv2d: (-1x384x64x64xf32) <- (-1x384x64x64xf32, 384x384x1x1xf32) + conv2d_159 = paddle._C_ops.conv2d( + multiply_42, parameter_33, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del multiply_42, parameter_33 + + # pd_op.batch_norm_: (-1x384x64x64xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (-1x384x64x64xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__858, + batch_norm__859, + batch_norm__860, + batch_norm__861, + batch_norm__862, + batch_norm__863, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_159, + parameter_32, + parameter_31, + parameter_30, + parameter_29, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_159, parameter_29, parameter_30, parameter_31, parameter_32 + + # pd_op.swish: (-1x384x64x64xf32) <- (-1x384x64x64xf32) + swish_110 = paddle._C_ops.swish(batch_norm__858) + del batch_norm__858 + + # pd_op.conv2d: (-1x91x64x64xf32) <- (-1x384x64x64xf32, 91x384x3x3xf32) + conv2d_160 = paddle._C_ops.conv2d( + swish_110, parameter_28, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_28, swish_110 + + # pd_op.reshape: (1x91x1x1xf32) <- (91xf32, 4xi64) + reshape_20 = paddle._C_ops.reshape(parameter_27, full_int_array_1) + del parameter_27 + + # pd_op.add: (-1x91x64x64xf32) <- (-1x91x64x64xf32, 1x91x1x1xf32) + add_68 = paddle._C_ops.add(conv2d_160, reshape_20) + del conv2d_160, reshape_20 + + # pd_op.softmax: (-1x91x64x64xf32) <- (-1x91x64x64xf32) + softmax_1 = paddle._C_ops.softmax(add_68, 1) + del add_68 + + # pd_op.conv2d: (-1x1x64x64xf32) <- (-1x91x64x64xf32, 1x91x1x1xf32) + conv2d_161 = paddle._C_ops.conv2d( + softmax_1, parameter_54, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del softmax_1 + + # builtin.combine: ([-1x2x64x64xf32, -1x2x64x64xf32, -1x1x64x64xf32]) <- (-1x2x64x64xf32, -1x2x64x64xf32, -1x1x64x64xf32) + combine_25 = [scale_9, scale_11, conv2d_161] + del conv2d_161, scale_11, scale_9 + + # pd_op.concat: (-1x5x64x64xf32) <- ([-1x2x64x64xf32, -1x2x64x64xf32, -1x1x64x64xf32], 1xi32) + concat_19 = paddle._C_ops.concat(combine_25, full_0) + del combine_25 + + # pd_op.sigmoid: (-1x15x64x64xf32) <- (-1x15x64x64xf32) + sigmoid_7 = paddle._C_ops.sigmoid(add_64) + del add_64 + + # pd_op.full: (xi64) <- () + full_16 = paddle._C_ops.full( + [], float("4096"), paddle.int64, paddle.core.CPUPlace() + ) + + # builtin.combine: ([xi64, xi64, xi64]) <- (xi64, xi64, xi64) + combine_26 = [slice_1, full_13, full_16] + + # pd_op.stack: (3xi64) <- ([xi64, xi64, xi64]) + stack_5 = paddle._C_ops.stack(combine_26, 0) + del combine_26 + + # pd_op.reshape: (-1x15x4096xf32) <- (-1x15x64x64xf32, 3xi64) + reshape_21 = paddle._C_ops.reshape(sigmoid_7, stack_5) + del sigmoid_7, stack_5 + + # builtin.combine: ([xi64, xi64, xi64]) <- (xi64, xi64, xi64) + combine_27 = [slice_1, full_15, full_16] + del full_16, slice_1 + + # pd_op.stack: (3xi64) <- ([xi64, xi64, xi64]) + stack_6 = paddle._C_ops.stack(combine_27, 0) + del combine_27 + + # pd_op.reshape: (-1x5x4096xf32) <- (-1x5x64x64xf32, 3xi64) + reshape_22 = paddle._C_ops.reshape(concat_19, stack_6) + del concat_19, stack_6 + + # pd_op.pool2d: (-1x192x1x1xf32) <- (-1x192x128x128xf32, 2xi64) + pool2d_5 = paddle._C_ops.pool2d( + swish_84, + full_int_array_8, + [1, 1], + [0, 0], + False, + True, + "NCHW", + "avg", + False, + True, + "EXPLICIT", + ) + del full_int_array_8 + + # pd_op.conv2d: (-1x192x1x1xf32) <- (-1x192x1x1xf32, 192x192x1x1xf32) + conv2d_162 = paddle._C_ops.conv2d( + pool2d_5, parameter_26, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_26 + + # pd_op.reshape: (1x192x1x1xf32) <- (192xf32, 4xi64) + reshape_23 = paddle._C_ops.reshape(parameter_25, full_int_array_1) + del parameter_25 + + # pd_op.add: (-1x192x1x1xf32) <- (-1x192x1x1xf32, 1x192x1x1xf32) + add_69 = paddle._C_ops.add(conv2d_162, reshape_23) + del conv2d_162, reshape_23 + + # pd_op.sigmoid: (-1x192x1x1xf32) <- (-1x192x1x1xf32) + sigmoid_8 = paddle._C_ops.sigmoid(add_69) + del add_69 + + # pd_op.multiply: (-1x192x128x128xf32) <- (-1x192x128x128xf32, -1x192x1x1xf32) + multiply_43 = paddle._C_ops.multiply(swish_84, sigmoid_8) + del sigmoid_8 + + # pd_op.conv2d: (-1x192x128x128xf32) <- (-1x192x128x128xf32, 192x192x1x1xf32) + conv2d_163 = paddle._C_ops.conv2d( + multiply_43, parameter_24, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del multiply_43, parameter_24 + + # pd_op.batch_norm_: (-1x192x128x128xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (-1x192x128x128xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__864, + batch_norm__865, + batch_norm__866, + batch_norm__867, + batch_norm__868, + batch_norm__869, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_163, + parameter_23, + parameter_22, + parameter_21, + parameter_20, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_163, parameter_20, parameter_21, parameter_22, parameter_23 + + # pd_op.swish: (-1x192x128x128xf32) <- (-1x192x128x128xf32) + swish_111 = paddle._C_ops.swish(batch_norm__864) + del batch_norm__864 + + # pd_op.add: (-1x192x128x128xf32) <- (-1x192x128x128xf32, -1x192x128x128xf32) + add_70 = paddle._C_ops.add(swish_111, swish_84) + del swish_111 + + # pd_op.conv2d: (-1x15x128x128xf32) <- (-1x192x128x128xf32, 15x192x3x3xf32) + conv2d_164 = paddle._C_ops.conv2d( + add_70, parameter_19, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del add_70, parameter_19 + + # pd_op.reshape: (1x15x1x1xf32) <- (15xf32, 4xi64) + reshape_24 = paddle._C_ops.reshape(parameter_18, full_int_array_1) + del parameter_18 + + # pd_op.add: (-1x15x128x128xf32) <- (-1x15x128x128xf32, 1x15x1x1xf32) + add_71 = paddle._C_ops.add(conv2d_164, reshape_24) + del conv2d_164, reshape_24 + + # pd_op.conv2d: (-1x192x1x1xf32) <- (-1x192x1x1xf32, 192x192x1x1xf32) + conv2d_165 = paddle._C_ops.conv2d( + pool2d_5, parameter_17, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_17 + + # pd_op.reshape: (1x192x1x1xf32) <- (192xf32, 4xi64) + reshape_25 = paddle._C_ops.reshape(parameter_16, full_int_array_1) + del parameter_16 + + # pd_op.add: (-1x192x1x1xf32) <- (-1x192x1x1xf32, 1x192x1x1xf32) + add_72 = paddle._C_ops.add(conv2d_165, reshape_25) + del conv2d_165, reshape_25 + + # pd_op.sigmoid: (-1x192x1x1xf32) <- (-1x192x1x1xf32) + sigmoid_9 = paddle._C_ops.sigmoid(add_72) + del add_72 + + # pd_op.multiply: (-1x192x128x128xf32) <- (-1x192x128x128xf32, -1x192x1x1xf32) + multiply_44 = paddle._C_ops.multiply(swish_84, sigmoid_9) + del sigmoid_9 + + # pd_op.conv2d: (-1x192x128x128xf32) <- (-1x192x128x128xf32, 192x192x1x1xf32) + conv2d_166 = paddle._C_ops.conv2d( + multiply_44, parameter_15, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del multiply_44, parameter_15 + + # pd_op.batch_norm_: (-1x192x128x128xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (-1x192x128x128xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__870, + batch_norm__871, + batch_norm__872, + batch_norm__873, + batch_norm__874, + batch_norm__875, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_166, + parameter_14, + parameter_13, + parameter_12, + parameter_11, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_166, parameter_11, parameter_12, parameter_13, parameter_14 + + # pd_op.swish: (-1x192x128x128xf32) <- (-1x192x128x128xf32) + swish_112 = paddle._C_ops.swish(batch_norm__870) + del batch_norm__870 + + # pd_op.conv2d: (-1x4x128x128xf32) <- (-1x192x128x128xf32, 4x192x3x3xf32) + conv2d_167 = paddle._C_ops.conv2d( + swish_112, parameter_10, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_10, swish_112 + + # pd_op.reshape: (1x4x1x1xf32) <- (4xf32, 4xi64) + reshape_26 = paddle._C_ops.reshape(parameter_9, full_int_array_1) + del parameter_9 + + # pd_op.add: (-1x4x128x128xf32) <- (-1x4x128x128xf32, 1x4x1x1xf32) + add_73 = paddle._C_ops.add(conv2d_167, reshape_26) + del conv2d_167, reshape_26 + + # pd_op.split_with_num: ([-1x2x128x128xf32, -1x2x128x128xf32]) <- (-1x4x128x128xf32, 1xi32) + split_with_num_2 = paddle._C_ops.split_with_num(add_73, 2, full_0) + del add_73 + + # builtin.split: (-1x2x128x128xf32, -1x2x128x128xf32) <- ([-1x2x128x128xf32, -1x2x128x128xf32]) + ( + split_10, + split_11, + ) = split_with_num_2 + del split_with_num_2 + + # pd_op.scale: (-1x2x128x128xf32) <- (-1x2x128x128xf32, 1xf32) + scale_12 = paddle._C_ops.scale(split_10, full_11, float("0"), True) + del split_10 + + # pd_op.elu: (-1x2x128x128xf32) <- (-1x2x128x128xf32) + elu_2 = paddle._C_ops.elu(split_11, float("1")) + del split_11 + + # pd_op.scale: (-1x2x128x128xf32) <- (-1x2x128x128xf32, 1xf32) + scale_13 = paddle._C_ops.scale(elu_2, full_4, float("1"), True) + del elu_2, full_4 + + # pd_op.scale: (-1x2x128x128xf32) <- (-1x2x128x128xf32, 1xf32) + scale_14 = paddle._C_ops.scale(scale_13, full_11, float("0"), True) + del full_11, scale_13 + + # pd_op.conv2d: (-1x192x1x1xf32) <- (-1x192x1x1xf32, 192x192x1x1xf32) + conv2d_168 = paddle._C_ops.conv2d( + pool2d_5, parameter_8, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_8, pool2d_5 + + # pd_op.reshape: (1x192x1x1xf32) <- (192xf32, 4xi64) + reshape_27 = paddle._C_ops.reshape(parameter_7, full_int_array_1) + del parameter_7 + + # pd_op.add: (-1x192x1x1xf32) <- (-1x192x1x1xf32, 1x192x1x1xf32) + add_74 = paddle._C_ops.add(conv2d_168, reshape_27) + del conv2d_168, reshape_27 + + # pd_op.sigmoid: (-1x192x1x1xf32) <- (-1x192x1x1xf32) + sigmoid_10 = paddle._C_ops.sigmoid(add_74) + del add_74 + + # pd_op.multiply: (-1x192x128x128xf32) <- (-1x192x128x128xf32, -1x192x1x1xf32) + multiply_45 = paddle._C_ops.multiply(swish_84, sigmoid_10) + del sigmoid_10, swish_84 + + # pd_op.conv2d: (-1x192x128x128xf32) <- (-1x192x128x128xf32, 192x192x1x1xf32) + conv2d_169 = paddle._C_ops.conv2d( + multiply_45, parameter_6, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del multiply_45, parameter_6 + + # pd_op.batch_norm_: (-1x192x128x128xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (-1x192x128x128xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__876, + batch_norm__877, + batch_norm__878, + batch_norm__879, + batch_norm__880, + batch_norm__881, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_169, + parameter_5, + parameter_4, + parameter_3, + parameter_2, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_169, parameter_2, parameter_3, parameter_4, parameter_5 + + # pd_op.swish: (-1x192x128x128xf32) <- (-1x192x128x128xf32) + swish_113 = paddle._C_ops.swish(batch_norm__876) + del batch_norm__876 + + # pd_op.conv2d: (-1x91x128x128xf32) <- (-1x192x128x128xf32, 91x192x3x3xf32) + conv2d_170 = paddle._C_ops.conv2d( + swish_113, parameter_1, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_1, swish_113 + + # pd_op.reshape: (1x91x1x1xf32) <- (91xf32, 4xi64) + reshape_28 = paddle._C_ops.reshape(parameter_0, full_int_array_1) + del full_int_array_1, parameter_0 + + # pd_op.add: (-1x91x128x128xf32) <- (-1x91x128x128xf32, 1x91x1x1xf32) + add_75 = paddle._C_ops.add(conv2d_170, reshape_28) + del conv2d_170, reshape_28 + + # pd_op.softmax: (-1x91x128x128xf32) <- (-1x91x128x128xf32) + softmax_2 = paddle._C_ops.softmax(add_75, 1) + del add_75 + + # pd_op.conv2d: (-1x1x128x128xf32) <- (-1x91x128x128xf32, 1x91x1x1xf32) + conv2d_171 = paddle._C_ops.conv2d( + softmax_2, parameter_54, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_54, softmax_2 + + # builtin.combine: ([-1x2x128x128xf32, -1x2x128x128xf32, -1x1x128x128xf32]) <- (-1x2x128x128xf32, -1x2x128x128xf32, -1x1x128x128xf32) + combine_28 = [scale_12, scale_14, conv2d_171] + del conv2d_171, scale_12, scale_14 + + # pd_op.concat: (-1x5x128x128xf32) <- ([-1x2x128x128xf32, -1x2x128x128xf32, -1x1x128x128xf32], 1xi32) + concat_20 = paddle._C_ops.concat(combine_28, full_0) + del combine_28, full_0 + + # pd_op.sigmoid: (-1x15x128x128xf32) <- (-1x15x128x128xf32) + sigmoid_11 = paddle._C_ops.sigmoid(add_71) + del add_71 + + # pd_op.full: (xi64) <- () + full_17 = paddle._C_ops.full( + [], float("16384"), paddle.int64, paddle.core.CPUPlace() + ) + + # builtin.combine: ([xi64, xi64, xi64]) <- (xi64, xi64, xi64) + combine_29 = [slice_2, full_13, full_17] + del full_13 + + # pd_op.stack: (3xi64) <- ([xi64, xi64, xi64]) + stack_7 = paddle._C_ops.stack(combine_29, 0) + del combine_29 + + # pd_op.reshape: (-1x15x16384xf32) <- (-1x15x128x128xf32, 3xi64) + reshape_29 = paddle._C_ops.reshape(sigmoid_11, stack_7) + del sigmoid_11, stack_7 + + # builtin.combine: ([xi64, xi64, xi64]) <- (xi64, xi64, xi64) + combine_30 = [slice_2, full_15, full_17] + del full_15, full_17, slice_2 + + # pd_op.stack: (3xi64) <- ([xi64, xi64, xi64]) + stack_8 = paddle._C_ops.stack(combine_30, 0) + del combine_30 + + # pd_op.reshape: (-1x5x16384xf32) <- (-1x5x128x128xf32, 3xi64) + reshape_30 = paddle._C_ops.reshape(concat_20, stack_8) + del concat_20, stack_8 + + # pd_op.full: (1xi32) <- () + full_18 = paddle._C_ops.full( + [1], float("-1"), paddle.int32, paddle.core.CPUPlace() + ) + + # builtin.combine: ([-1x15x1024xf32, -1x15x4096xf32, -1x15x16384xf32]) <- (-1x15x1024xf32, -1x15x4096xf32, -1x15x16384xf32) + combine_31 = [reshape_13, reshape_21, reshape_29] + del reshape_13, reshape_21, reshape_29 + + # pd_op.concat: (-1x15x21504xf32) <- ([-1x15x1024xf32, -1x15x4096xf32, -1x15x16384xf32], 1xi32) + concat_0 = paddle._C_ops.concat(combine_31, full_18) + del combine_31 + + # builtin.combine: ([-1x5x1024xf32, -1x5x4096xf32, -1x5x16384xf32]) <- (-1x5x1024xf32, -1x5x4096xf32, -1x5x16384xf32) + combine_32 = [reshape_14, reshape_22, reshape_30] + del reshape_14, reshape_22, reshape_30 + + # pd_op.concat: (-1x5x21504xf32) <- ([-1x5x1024xf32, -1x5x4096xf32, -1x5x16384xf32], 1xi32) + concat_21 = paddle._C_ops.concat(combine_32, full_18) + del combine_32 + + # pd_op.transpose: (-1x21504x5xf32) <- (-1x5x21504xf32) + transpose_0 = paddle._C_ops.transpose(concat_21, [0, 2, 1]) + del concat_21 + + # pd_op.full: (1xi32) <- () + full_19 = paddle._C_ops.full( + [1], float("2"), paddle.int32, paddle.core.CPUPlace() + ) + + # pd_op.split: ([-1x21504x2xf32, -1x21504x3xf32]) <- (-1x21504x5xf32, 2xi64, 1xi32) + split_12 = paddle._C_ops.split(transpose_0, full_int_array_0, full_19) + del full_19, full_int_array_0, transpose_0 + + # builtin.split: (-1x21504x2xf32, -1x21504x3xf32) <- ([-1x21504x2xf32, -1x21504x3xf32]) + ( + split_13, + split_14, + ) = split_12 + del split_12 + + # pd_op.add: (-1x21504x2xf32) <- (-1x21504x2xf32, 1x21504x2xf32) + add_76 = paddle._C_ops.add(split_13, concat_16) + del concat_16, split_13 + + # builtin.combine: ([-1x21504x2xf32, -1x21504x3xf32]) <- (-1x21504x2xf32, -1x21504x3xf32) + combine_33 = [add_76, split_14] + del add_76, split_14 + + # pd_op.concat: (-1x21504x5xf32) <- ([-1x21504x2xf32, -1x21504x3xf32], 1xi32) + concat_1 = paddle._C_ops.concat(combine_33, full_18) + del combine_33, full_18 + + return concat_0, concat_1 diff --git a/paddle_samples/PaddleX/PP-YOLOE-R-L/subgraph_0/weight_meta.py b/paddle_samples/PaddleX/PP-YOLOE-R-L/subgraph_0/weight_meta.py new file mode 100644 index 000000000..ff16c67d6 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE-R-L/subgraph_0/weight_meta.py @@ -0,0 +1,8453 @@ +class Program_weight_tensor_parameter_0: + name = "parameter_0" + shape = [91] + dtype = "float32" + min_val = float("0.877182") + max_val = float("8.40945") + mean = float("0.993277") + std = float("0.782274") + data = None + + +class Program_weight_tensor_parameter_1: + name = "parameter_1" + shape = [91, 192, 3, 3] + dtype = "float32" + min_val = float("-0.128916") + max_val = float("0.178208") + mean = float("5.45697e-12") + std = float("0.0059698") + data = None + + +class Program_weight_tensor_parameter_2: + name = "parameter_2" + shape = [192] + dtype = "float32" + min_val = float("-0.0150886") + max_val = float("0.220627") + mean = float("0.0669744") + std = float("0.0433538") + data = None + + +class Program_weight_tensor_parameter_3: + name = "parameter_3" + shape = [192] + dtype = "float32" + min_val = float("0.946031") + max_val = float("1.03046") + mean = float("1.00162") + std = float("0.00895951") + data = None + + +class Program_weight_tensor_parameter_4: + name = "parameter_4" + shape = [192] + dtype = "float32" + min_val = float("0.00658349") + max_val = float("1.16729") + mean = float("0.223915") + std = float("0.187155") + data = None + + +class Program_weight_tensor_parameter_5: + name = "parameter_5" + shape = [192] + dtype = "float32" + min_val = float("-0.363783") + max_val = float("0.328094") + mean = float("-0.0229285") + std = float("0.135406") + data = None + + +class Program_weight_tensor_parameter_6: + name = "parameter_6" + shape = [192, 192, 1, 1] + dtype = "float32" + min_val = float("-0.379053") + max_val = float("0.419396") + mean = float("-0.000356726") + std = float("0.0920988") + data = None + + +class Program_weight_tensor_parameter_7: + name = "parameter_7" + shape = [192] + dtype = "float32" + min_val = float("-0.0155162") + max_val = float("0.0169016") + mean = float("-4.8625e-06") + std = float("0.00442658") + data = None + + +class Program_weight_tensor_parameter_8: + name = "parameter_8" + shape = [192, 192, 1, 1] + dtype = "float32" + min_val = float("-0.0386631") + max_val = float("0.0373425") + mean = float("5.72623e-06") + std = float("0.0090976") + data = None + + +class Program_weight_tensor_parameter_9: + name = "parameter_9" + shape = [4] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_10: + name = "parameter_10" + shape = [4, 192, 3, 3] + dtype = "float32" + min_val = float("-0.153646") + max_val = float("0.153742") + mean = float("0.0198921") + std = float("0.0369248") + data = None + + +class Program_weight_tensor_parameter_11: + name = "parameter_11" + shape = [192] + dtype = "float32" + min_val = float("-0.00216282") + max_val = float("0.255776") + mean = float("0.125974") + std = float("0.0552256") + data = None + + +class Program_weight_tensor_parameter_12: + name = "parameter_12" + shape = [192] + dtype = "float32" + min_val = float("0.982561") + max_val = float("1.0436") + mean = float("1.00646") + std = float("0.0134768") + data = None + + +class Program_weight_tensor_parameter_13: + name = "parameter_13" + shape = [192] + dtype = "float32" + min_val = float("0.00339444") + max_val = float("3.14997") + mean = float("0.278499") + std = float("0.3785") + data = None + + +class Program_weight_tensor_parameter_14: + name = "parameter_14" + shape = [192] + dtype = "float32" + min_val = float("-0.506493") + max_val = float("0.494137") + mean = float("-0.0431043") + std = float("0.150596") + data = None + + +class Program_weight_tensor_parameter_15: + name = "parameter_15" + shape = [192, 192, 1, 1] + dtype = "float32" + min_val = float("-0.387041") + max_val = float("0.377885") + mean = float("-0.00117551") + std = float("0.0925499") + data = None + + +class Program_weight_tensor_parameter_16: + name = "parameter_16" + shape = [192] + dtype = "float32" + min_val = float("-0.0339666") + max_val = float("0.0590763") + mean = float("-3.76376e-05") + std = float("0.00890678") + data = None + + +class Program_weight_tensor_parameter_17: + name = "parameter_17" + shape = [192, 192, 1, 1] + dtype = "float32" + min_val = float("-0.0422493") + max_val = float("0.0381497") + mean = float("4.02209e-05") + std = float("0.00914441") + data = None + + +class Program_weight_tensor_parameter_18: + name = "parameter_18" + shape = [15] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_19: + name = "parameter_19" + shape = [15, 192, 3, 3] + dtype = "float32" + min_val = float("-2.90567") + max_val = float("0.369179") + mean = float("-0.209713") + std = float("0.38908") + data = None + + +class Program_weight_tensor_parameter_20: + name = "parameter_20" + shape = [192] + dtype = "float32" + min_val = float("-0.484906") + max_val = float("1.13642") + mean = float("0.156505") + std = float("0.283327") + data = None + + +class Program_weight_tensor_parameter_21: + name = "parameter_21" + shape = [192] + dtype = "float32" + min_val = float("0.347111") + max_val = float("1.26039") + mean = float("0.967426") + std = float("0.122821") + data = None + + +class Program_weight_tensor_parameter_22: + name = "parameter_22" + shape = [192] + dtype = "float32" + min_val = float("0.0123304") + max_val = float("113.217") + mean = float("5.25092") + std = float("14.6164") + data = None + + +class Program_weight_tensor_parameter_23: + name = "parameter_23" + shape = [192] + dtype = "float32" + min_val = float("-1.46575") + max_val = float("2.7441") + mean = float("-0.108308") + std = float("0.595511") + data = None + + +class Program_weight_tensor_parameter_24: + name = "parameter_24" + shape = [192, 192, 1, 1] + dtype = "float32" + min_val = float("-1.25381") + max_val = float("1.08789") + mean = float("-0.0125534") + std = float("0.114296") + data = None + + +class Program_weight_tensor_parameter_25: + name = "parameter_25" + shape = [192] + dtype = "float32" + min_val = float("-0.182331") + max_val = float("0.163191") + mean = float("-0.000126207") + std = float("0.0575178") + data = None + + +class Program_weight_tensor_parameter_26: + name = "parameter_26" + shape = [192, 192, 1, 1] + dtype = "float32" + min_val = float("-0.058022") + max_val = float("0.0885056") + mean = float("-3.53841e-05") + std = float("0.0107611") + data = None + + +class Program_weight_tensor_parameter_27: + name = "parameter_27" + shape = [91] + dtype = "float32" + min_val = float("0.891881") + max_val = float("8.34487") + mean = float("0.993277") + std = float("0.775188") + data = None + + +class Program_weight_tensor_parameter_28: + name = "parameter_28" + shape = [91, 384, 3, 3] + dtype = "float32" + min_val = float("-0.612899") + max_val = float("0.464294") + mean = float("2.18279e-11") + std = float("0.00943922") + data = None + + +class Program_weight_tensor_parameter_29: + name = "parameter_29" + shape = [384] + dtype = "float32" + min_val = float("-0.0507016") + max_val = float("0.123661") + mean = float("0.0333182") + std = float("0.03886") + data = None + + +class Program_weight_tensor_parameter_30: + name = "parameter_30" + shape = [384] + dtype = "float32" + min_val = float("0.933039") + max_val = float("1.02186") + mean = float("0.997379") + std = float("0.0147948") + data = None + + +class Program_weight_tensor_parameter_31: + name = "parameter_31" + shape = [384] + dtype = "float32" + min_val = float("0.0105492") + max_val = float("6.86746") + mean = float("0.3761") + std = float("0.683657") + data = None + + +class Program_weight_tensor_parameter_32: + name = "parameter_32" + shape = [384] + dtype = "float32" + min_val = float("-0.910786") + max_val = float("1.16573") + mean = float("0.015651") + std = float("0.37882") + data = None + + +class Program_weight_tensor_parameter_33: + name = "parameter_33" + shape = [384, 384, 1, 1] + dtype = "float32" + min_val = float("-0.29345") + max_val = float("0.317778") + mean = float("0.000314139") + std = float("0.0657461") + data = None + + +class Program_weight_tensor_parameter_34: + name = "parameter_34" + shape = [384] + dtype = "float32" + min_val = float("-0.0213208") + max_val = float("0.0287533") + mean = float("3.81929e-05") + std = float("0.00524146") + data = None + + +class Program_weight_tensor_parameter_35: + name = "parameter_35" + shape = [384, 384, 1, 1] + dtype = "float32" + min_val = float("-0.0489233") + max_val = float("0.0531111") + mean = float("-3.35779e-05") + std = float("0.00942571") + data = None + + +class Program_weight_tensor_parameter_36: + name = "parameter_36" + shape = [4] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_37: + name = "parameter_37" + shape = [4, 384, 3, 3] + dtype = "float32" + min_val = float("-0.347818") + max_val = float("0.557307") + mean = float("0.0132944") + std = float("0.055601") + data = None + + +class Program_weight_tensor_parameter_38: + name = "parameter_38" + shape = [384] + dtype = "float32" + min_val = float("-0.0930328") + max_val = float("0.402702") + mean = float("0.0936579") + std = float("0.072547") + data = None + + +class Program_weight_tensor_parameter_39: + name = "parameter_39" + shape = [384] + dtype = "float32" + min_val = float("0.938371") + max_val = float("1.05693") + mean = float("0.996096") + std = float("0.0144091") + data = None + + +class Program_weight_tensor_parameter_40: + name = "parameter_40" + shape = [384] + dtype = "float32" + min_val = float("0.0299231") + max_val = float("9.95969") + mean = float("0.559694") + std = float("0.778694") + data = None + + +class Program_weight_tensor_parameter_41: + name = "parameter_41" + shape = [384] + dtype = "float32" + min_val = float("-1.95233") + max_val = float("2.55836") + mean = float("0.000464053") + std = float("0.526444") + data = None + + +class Program_weight_tensor_parameter_42: + name = "parameter_42" + shape = [384, 384, 1, 1] + dtype = "float32" + min_val = float("-0.330658") + max_val = float("0.365838") + mean = float("-3.98001e-05") + std = float("0.0666285") + data = None + + +class Program_weight_tensor_parameter_43: + name = "parameter_43" + shape = [384] + dtype = "float32" + min_val = float("-0.0303532") + max_val = float("0.0403522") + mean = float("6.50974e-05") + std = float("0.00829692") + data = None + + +class Program_weight_tensor_parameter_44: + name = "parameter_44" + shape = [384, 384, 1, 1] + dtype = "float32" + min_val = float("-0.0446622") + max_val = float("0.0455009") + mean = float("7.36713e-06") + std = float("0.0092036") + data = None + + +class Program_weight_tensor_parameter_45: + name = "parameter_45" + shape = [15] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_46: + name = "parameter_46" + shape = [15, 384, 3, 3] + dtype = "float32" + min_val = float("-2.26341") + max_val = float("2.21788") + mean = float("-0.045758") + std = float("0.193034") + data = None + + +class Program_weight_tensor_parameter_47: + name = "parameter_47" + shape = [384] + dtype = "float32" + min_val = float("-1.04832") + max_val = float("1.85397") + mean = float("0.249053") + std = float("0.613197") + data = None + + +class Program_weight_tensor_parameter_48: + name = "parameter_48" + shape = [384] + dtype = "float32" + min_val = float("0.628778") + max_val = float("1.27621") + mean = float("0.960137") + std = float("0.0980127") + data = None + + +class Program_weight_tensor_parameter_49: + name = "parameter_49" + shape = [384] + dtype = "float32" + min_val = float("0.0251774") + max_val = float("67.2734") + mean = float("4.84938") + std = float("7.73527") + data = None + + +class Program_weight_tensor_parameter_50: + name = "parameter_50" + shape = [384] + dtype = "float32" + min_val = float("-6.16895") + max_val = float("6.10034") + mean = float("-0.629452") + std = float("1.46774") + data = None + + +class Program_weight_tensor_parameter_51: + name = "parameter_51" + shape = [384, 384, 1, 1] + dtype = "float32" + min_val = float("-0.726994") + max_val = float("0.663052") + mean = float("-0.00468944") + std = float("0.0751415") + data = None + + +class Program_weight_tensor_parameter_52: + name = "parameter_52" + shape = [384] + dtype = "float32" + min_val = float("-0.114512") + max_val = float("0.274733") + mean = float("-2.88235e-05") + std = float("0.038074") + data = None + + +class Program_weight_tensor_parameter_53: + name = "parameter_53" + shape = [384, 384, 1, 1] + dtype = "float32" + min_val = float("-0.0560383") + max_val = float("0.0631579") + mean = float("1.14647e-05") + std = float("0.00957159") + data = None + + +class Program_weight_tensor_parameter_54: + name = "parameter_54" + shape = [1, 91, 1, 1] + dtype = "float32" + max_val = float("1.57103") + mean = float("0.785513") + std = float("0.458528") + data = None + + +class Program_weight_tensor_parameter_55: + name = "parameter_55" + shape = [91] + dtype = "float32" + min_val = float("0.9008") + max_val = float("8.83038") + mean = float("0.993277") + std = float("0.826177") + data = None + + +class Program_weight_tensor_parameter_56: + name = "parameter_56" + shape = [91, 768, 3, 3] + dtype = "float32" + min_val = float("-0.0306847") + max_val = float("0.0286871") + mean = float("6.36646e-12") + std = float("0.00173076") + data = None + + +class Program_weight_tensor_parameter_57: + name = "parameter_57" + shape = [768] + dtype = "float32" + min_val = float("-0.00317967") + max_val = float("0.00845782") + mean = float("0.00206217") + std = float("0.00312036") + data = None + + +class Program_weight_tensor_parameter_58: + name = "parameter_58" + shape = [768] + dtype = "float32" + min_val = float("0.999325") + max_val = float("1.00215") + mean = float("1.00054") + std = float("0.00041489") + data = None + + +class Program_weight_tensor_parameter_59: + name = "parameter_59" + shape = [768] + dtype = "float32" + min_val = float("0.00424119") + max_val = float("2.56222") + mean = float("0.192023") + std = float("0.241115") + data = None + + +class Program_weight_tensor_parameter_60: + name = "parameter_60" + shape = [768] + dtype = "float32" + min_val = float("-0.483635") + max_val = float("0.396868") + mean = float("0.00195309") + std = float("0.149763") + data = None + + +class Program_weight_tensor_parameter_61: + name = "parameter_61" + shape = [768, 768, 1, 1] + dtype = "float32" + min_val = float("-0.22877") + max_val = float("0.211554") + mean = float("4.27815e-06") + std = float("0.0461663") + data = None + + +class Program_weight_tensor_parameter_62: + name = "parameter_62" + shape = [768] + dtype = "float32" + min_val = float("-0.000942905") + max_val = float("0.000813291") + mean = float("2.87677e-07") + std = float("0.000213226") + data = None + + +class Program_weight_tensor_parameter_63: + name = "parameter_63" + shape = [768, 768, 1, 1] + dtype = "float32" + min_val = float("-0.0421295") + max_val = float("0.0447389") + mean = float("-1.34209e-05") + std = float("0.00904798") + data = None + + +class Program_weight_tensor_parameter_64: + name = "parameter_64" + shape = [4] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_65: + name = "parameter_65" + shape = [4, 768, 3, 3] + dtype = "float32" + min_val = float("-0.575427") + max_val = float("0.327722") + mean = float("0.00157116") + std = float("0.048682") + data = None + + +class Program_weight_tensor_parameter_66: + name = "parameter_66" + shape = [768] + dtype = "float32" + min_val = float("-0.0591507") + max_val = float("0.0632899") + mean = float("-0.0052611") + std = float("0.0205046") + data = None + + +class Program_weight_tensor_parameter_67: + name = "parameter_67" + shape = [768] + dtype = "float32" + min_val = float("0.907904") + max_val = float("1.12458") + mean = float("0.998695") + std = float("0.0151758") + data = None + + +class Program_weight_tensor_parameter_68: + name = "parameter_68" + shape = [768] + dtype = "float32" + min_val = float("0.0040485") + max_val = float("761.942") + mean = float("8.47951") + std = float("41.0635") + data = None + + +class Program_weight_tensor_parameter_69: + name = "parameter_69" + shape = [768] + dtype = "float32" + min_val = float("-1.46709") + max_val = float("3.06514") + mean = float("0.0221035") + std = float("0.343846") + data = None + + +class Program_weight_tensor_parameter_70: + name = "parameter_70" + shape = [768, 768, 1, 1] + dtype = "float32" + min_val = float("-0.408368") + max_val = float("0.426008") + mean = float("0.0014699") + std = float("0.0486754") + data = None + + +class Program_weight_tensor_parameter_71: + name = "parameter_71" + shape = [768] + dtype = "float32" + min_val = float("-0.108961") + max_val = float("0.0565043") + mean = float("-1.3717e-05") + std = float("0.0123372") + data = None + + +class Program_weight_tensor_parameter_72: + name = "parameter_72" + shape = [768, 768, 1, 1] + dtype = "float32" + min_val = float("-0.0430523") + max_val = float("0.0407983") + mean = float("8.64601e-06") + std = float("0.00911004") + data = None + + +class Program_weight_tensor_parameter_73: + name = "parameter_73" + shape = [15] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_74: + name = "parameter_74" + shape = [15, 768, 3, 3] + dtype = "float32" + min_val = float("-2.91125") + max_val = float("0.555559") + mean = float("-0.0276697") + std = float("0.12425") + data = None + + +class Program_weight_tensor_parameter_75: + name = "parameter_75" + shape = [768] + dtype = "float32" + min_val = float("-0.610156") + max_val = float("0.758607") + mean = float("0.0837075") + std = float("0.290929") + data = None + + +class Program_weight_tensor_parameter_76: + name = "parameter_76" + shape = [768] + dtype = "float32" + min_val = float("0.781808") + max_val = float("1.36956") + mean = float("0.992477") + std = float("0.0723641") + data = None + + +class Program_weight_tensor_parameter_77: + name = "parameter_77" + shape = [768] + dtype = "float32" + min_val = float("0.00748565") + max_val = float("774.042") + mean = float("27.2262") + std = float("65.5261") + data = None + + +class Program_weight_tensor_parameter_78: + name = "parameter_78" + shape = [768] + dtype = "float32" + min_val = float("-7.29225") + max_val = float("5.29933") + mean = float("-0.218485") + std = float("1.23564") + data = None + + +class Program_weight_tensor_parameter_79: + name = "parameter_79" + shape = [768, 768, 1, 1] + dtype = "float32" + min_val = float("-0.47751") + max_val = float("0.610723") + mean = float("-0.00272541") + std = float("0.0572338") + data = None + + +class Program_weight_tensor_parameter_80: + name = "parameter_80" + shape = [768] + dtype = "float32" + min_val = float("-0.13397") + max_val = float("0.0992914") + mean = float("4.83042e-05") + std = float("0.0242773") + data = None + + +class Program_weight_tensor_parameter_81: + name = "parameter_81" + shape = [768, 768, 1, 1] + dtype = "float32" + min_val = float("-0.0422658") + max_val = float("0.0488555") + mean = float("4.02433e-06") + std = float("0.00931904") + data = None + + +class Program_weight_tensor_parameter_82: + name = "parameter_82" + shape = [768] + dtype = "float32" + min_val = float("-0.696603") + max_val = float("0.806137") + mean = float("0.0817205") + std = float("0.297821") + data = None + + +class Program_weight_tensor_parameter_83: + name = "parameter_83" + shape = [768] + dtype = "float32" + min_val = float("0.619233") + max_val = float("1.56676") + mean = float("0.986595") + std = float("0.100654") + data = None + + +class Program_weight_tensor_parameter_84: + name = "parameter_84" + shape = [768] + dtype = "float32" + min_val = float("3.47772") + max_val = float("38923.4") + mean = float("1382.77") + std = float("3162.68") + data = None + + +class Program_weight_tensor_parameter_85: + name = "parameter_85" + shape = [768] + dtype = "float32" + min_val = float("-32.2776") + max_val = float("29.8367") + mean = float("0.0772672") + std = float("6.22384") + data = None + + +class Program_weight_tensor_parameter_86: + name = "parameter_86" + shape = [768, 768, 1, 1] + dtype = "float32" + min_val = float("-0.536894") + max_val = float("0.699611") + mean = float("8.05193e-05") + std = float("0.0616149") + data = None + + +class Program_weight_tensor_parameter_87: + name = "parameter_87" + shape = [384] + dtype = "float32" + min_val = float("-0.1146") + max_val = float("0.12233") + mean = float("-0.00723084") + std = float("0.0366562") + data = None + + +class Program_weight_tensor_parameter_88: + name = "parameter_88" + shape = [384] + dtype = "float32" + min_val = float("0.811486") + max_val = float("1.26255") + mean = float("1.00823") + std = float("0.0586068") + data = None + + +class Program_weight_tensor_parameter_89: + name = "parameter_89" + shape = [384] + dtype = "float32" + min_val = float("0.262443") + max_val = float("444.702") + mean = float("25.8179") + std = float("43.9889") + data = None + + +class Program_weight_tensor_parameter_90: + name = "parameter_90" + shape = [384] + dtype = "float32" + min_val = float("-2.11258") + max_val = float("2.43772") + mean = float("0.0998679") + std = float("0.609864") + data = None + + +class Program_weight_tensor_parameter_91: + name = "parameter_91" + shape = [384, 384, 1, 1] + dtype = "float32" + min_val = float("-0.475674") + max_val = float("0.37166") + mean = float("0.00302042") + std = float("0.0730386") + data = None + + +class Program_weight_tensor_parameter_92: + name = "parameter_92" + shape = [384] + dtype = "float32" + min_val = float("-0.0789028") + max_val = float("0.0619333") + mean = float("-0.00577501") + std = float("0.0238715") + data = None + + +class Program_weight_tensor_parameter_93: + name = "parameter_93" + shape = [384] + dtype = "float32" + min_val = float("0.897984") + max_val = float("1.18145") + mean = float("1.00433") + std = float("0.0434142") + data = None + + +class Program_weight_tensor_parameter_94: + name = "parameter_94" + shape = [384] + dtype = "float32" + min_val = float("0.197346") + max_val = float("4591.16") + mean = float("214.268") + std = float("478.096") + data = None + + +class Program_weight_tensor_parameter_95: + name = "parameter_95" + shape = [384] + dtype = "float32" + min_val = float("-7.88997") + max_val = float("9.74397") + mean = float("0.0529707") + std = float("2.05025") + data = None + + +class Program_weight_tensor_parameter_96: + name = "parameter_96" + shape = [384, 384, 3, 3] + dtype = "float32" + min_val = float("-0.226377") + max_val = float("0.284484") + mean = float("0.00016763") + std = float("0.026481") + data = None + + +class Program_weight_tensor_parameter_97: + name = "parameter_97" + shape = [384] + dtype = "float32" + min_val = float("-0.0907502") + max_val = float("0.0852418") + mean = float("-0.00854405") + std = float("0.0320432") + data = None + + +class Program_weight_tensor_parameter_98: + name = "parameter_98" + shape = [384] + dtype = "float32" + min_val = float("0.783884") + max_val = float("1.21458") + mean = float("0.994006") + std = float("0.0570193") + data = None + + +class Program_weight_tensor_parameter_99: + name = "parameter_99" + shape = [384] + dtype = "float32" + min_val = float("2.15634") + max_val = float("70109.3") + mean = float("516.086") + std = float("3594.03") + data = None + + +class Program_weight_tensor_parameter_100: + name = "parameter_100" + shape = [384] + dtype = "float32" + min_val = float("-11.3011") + max_val = float("41.4135") + mean = float("0.160822") + std = float("3.37354") + data = None + + +class Program_weight_tensor_parameter_101: + name = "parameter_101" + shape = [384, 384, 3, 3] + dtype = "float32" + min_val = float("-0.262635") + max_val = float("0.736791") + mean = float("0.000372577") + std = float("0.0289898") + data = None + + +class Program_weight_tensor_parameter_102: + name = "parameter_102" + shape = [384] + dtype = "float32" + min_val = float("-0.0391431") + max_val = float("0.0343779") + mean = float("-0.00435717") + std = float("0.010884") + data = None + + +class Program_weight_tensor_parameter_103: + name = "parameter_103" + shape = [384] + dtype = "float32" + min_val = float("0.932092") + max_val = float("1.06085") + mean = float("0.998739") + std = float("0.0197838") + data = None + + +class Program_weight_tensor_parameter_104: + name = "parameter_104" + shape = [384] + dtype = "float32" + min_val = float("0.115183") + max_val = float("93.317") + mean = float("5.86999") + std = float("9.86346") + data = None + + +class Program_weight_tensor_parameter_105: + name = "parameter_105" + shape = [384] + dtype = "float32" + min_val = float("-1.19009") + max_val = float("1.02783") + mean = float("0.0460531") + std = float("0.269698") + data = None + + +class Program_weight_tensor_parameter_106: + name = "parameter_106" + shape = [384, 384, 1, 1] + dtype = "float32" + min_val = float("-0.289539") + max_val = float("0.302107") + mean = float("0.00131288") + std = float("0.0664707") + data = None + + +class Program_weight_tensor_parameter_107: + name = "parameter_107" + shape = [384] + dtype = "float32" + min_val = float("-0.0376836") + max_val = float("0.0334493") + mean = float("-0.00430655") + std = float("0.0105386") + data = None + + +class Program_weight_tensor_parameter_108: + name = "parameter_108" + shape = [384] + dtype = "float32" + min_val = float("0.928018") + max_val = float("1.08186") + mean = float("0.998044") + std = float("0.0216224") + data = None + + +class Program_weight_tensor_parameter_109: + name = "parameter_109" + shape = [384] + dtype = "float32" + min_val = float("1.64052") + max_val = float("926.692") + mean = float("70.6442") + std = float("106.512") + data = None + + +class Program_weight_tensor_parameter_110: + name = "parameter_110" + shape = [384] + dtype = "float32" + min_val = float("-4.2866") + max_val = float("3.6185") + mean = float("0.163523") + std = float("1.07817") + data = None + + +class Program_weight_tensor_parameter_111: + name = "parameter_111" + shape = [384, 384, 3, 3] + dtype = "float32" + min_val = float("-0.161027") + max_val = float("0.140722") + mean = float("0.000562854") + std = float("0.0237057") + data = None + + +class Program_weight_tensor_parameter_112: + name = "parameter_112" + shape = [384] + dtype = "float32" + min_val = float("-0.0464781") + max_val = float("0.0444253") + mean = float("-0.0070937") + std = float("0.0126462") + data = None + + +class Program_weight_tensor_parameter_113: + name = "parameter_113" + shape = [384] + dtype = "float32" + min_val = float("0.902588") + max_val = float("1.09117") + mean = float("0.995299") + std = float("0.02712") + data = None + + +class Program_weight_tensor_parameter_114: + name = "parameter_114" + shape = [384] + dtype = "float32" + min_val = float("1.20329") + max_val = float("2845.48") + mean = float("187.732") + std = float("266.646") + data = None + + +class Program_weight_tensor_parameter_115: + name = "parameter_115" + shape = [384] + dtype = "float32" + min_val = float("-7.29734") + max_val = float("7.97309") + mean = float("0.0406299") + std = float("1.65827") + data = None + + +class Program_weight_tensor_parameter_116: + name = "parameter_116" + shape = [384, 384, 3, 3] + dtype = "float32" + min_val = float("-0.164235") + max_val = float("0.179667") + mean = float("2.92452e-05") + std = float("0.0245917") + data = None + + +class Program_weight_tensor_parameter_117: + name = "parameter_117" + shape = [384] + dtype = "float32" + min_val = float("-0.0216404") + max_val = float("0.0305013") + mean = float("-0.00355329") + std = float("0.00676937") + data = None + + +class Program_weight_tensor_parameter_118: + name = "parameter_118" + shape = [384] + dtype = "float32" + min_val = float("0.945684") + max_val = float("1.04694") + mean = float("0.999204") + std = float("0.0129503") + data = None + + +class Program_weight_tensor_parameter_119: + name = "parameter_119" + shape = [384] + dtype = "float32" + min_val = float("0.15966") + max_val = float("34.2892") + mean = float("3.09401") + std = float("3.66962") + data = None + + +class Program_weight_tensor_parameter_120: + name = "parameter_120" + shape = [384] + dtype = "float32" + min_val = float("-0.567879") + max_val = float("0.667796") + mean = float("0.0364117") + std = float("0.187583") + data = None + + +class Program_weight_tensor_parameter_121: + name = "parameter_121" + shape = [384, 384, 1, 1] + dtype = "float32" + min_val = float("-0.272359") + max_val = float("0.286916") + mean = float("0.00115292") + std = float("0.0658946") + data = None + + +class Program_weight_tensor_parameter_122: + name = "parameter_122" + shape = [384] + dtype = "float32" + min_val = float("-0.0211202") + max_val = float("0.0296509") + mean = float("-0.00346369") + std = float("0.00650824") + data = None + + +class Program_weight_tensor_parameter_123: + name = "parameter_123" + shape = [384] + dtype = "float32" + min_val = float("0.954891") + max_val = float("1.05621") + mean = float("0.999106") + std = float("0.0141641") + data = None + + +class Program_weight_tensor_parameter_124: + name = "parameter_124" + shape = [384] + dtype = "float32" + min_val = float("0.415837") + max_val = float("181.646") + mean = float("31.1867") + std = float("30.3212") + data = None + + +class Program_weight_tensor_parameter_125: + name = "parameter_125" + shape = [384] + dtype = "float32" + min_val = float("-2.15357") + max_val = float("1.75768") + mean = float("0.10672") + std = float("0.637922") + data = None + + +class Program_weight_tensor_parameter_126: + name = "parameter_126" + shape = [384, 384, 3, 3] + dtype = "float32" + min_val = float("-0.118324") + max_val = float("0.117122") + mean = float("0.000368882") + std = float("0.0227871") + data = None + + +class Program_weight_tensor_parameter_127: + name = "parameter_127" + shape = [384] + dtype = "float32" + min_val = float("-0.0272537") + max_val = float("0.0309027") + mean = float("-0.00481844") + std = float("0.00916649") + data = None + + +class Program_weight_tensor_parameter_128: + name = "parameter_128" + shape = [384] + dtype = "float32" + min_val = float("0.908988") + max_val = float("1.07948") + mean = float("0.995765") + std = float("0.0198002") + data = None + + +class Program_weight_tensor_parameter_129: + name = "parameter_129" + shape = [384] + dtype = "float32" + min_val = float("0.510545") + max_val = float("230.923") + mean = float("36.744") + std = float("36.2149") + data = None + + +class Program_weight_tensor_parameter_130: + name = "parameter_130" + shape = [384] + dtype = "float32" + min_val = float("-2.44497") + max_val = float("2.60491") + mean = float("0.107389") + std = float("0.903497") + data = None + + +class Program_weight_tensor_parameter_131: + name = "parameter_131" + shape = [384, 384, 3, 3] + dtype = "float32" + min_val = float("-0.133943") + max_val = float("0.131533") + mean = float("0.000353968") + std = float("0.0236025") + data = None + + +class Program_weight_tensor_parameter_132: + name = "parameter_132" + shape = [384] + dtype = "float32" + min_val = float("-0.0239787") + max_val = float("0.0262966") + mean = float("-0.0034731") + std = float("0.00719533") + data = None + + +class Program_weight_tensor_parameter_133: + name = "parameter_133" + shape = [384] + dtype = "float32" + min_val = float("0.939076") + max_val = float("1.0633") + mean = float("0.998074") + std = float("0.0171947") + data = None + + +class Program_weight_tensor_parameter_134: + name = "parameter_134" + shape = [384] + dtype = "float32" + min_val = float("0.106364") + max_val = float("84.2305") + mean = float("6.59058") + std = float("10.7777") + data = None + + +class Program_weight_tensor_parameter_135: + name = "parameter_135" + shape = [384] + dtype = "float32" + min_val = float("-3.06921") + max_val = float("1.96613") + mean = float("0.0555672") + std = float("0.634851") + data = None + + +class Program_weight_tensor_parameter_136: + name = "parameter_136" + shape = [384, 1152, 1, 1] + dtype = "float32" + min_val = float("-0.20559") + max_val = float("0.188911") + mean = float("0.000528236") + std = float("0.0390187") + data = None + + +class Program_weight_tensor_parameter_137: + name = "parameter_137" + shape = [384] + dtype = "float32" + min_val = float("-0.175548") + max_val = float("0.0945382") + mean = float("-0.0184112") + std = float("0.0324003") + data = None + + +class Program_weight_tensor_parameter_138: + name = "parameter_138" + shape = [384] + dtype = "float32" + min_val = float("0.605111") + max_val = float("1.1453") + mean = float("0.982498") + std = float("0.0548707") + data = None + + +class Program_weight_tensor_parameter_139: + name = "parameter_139" + shape = [384] + dtype = "float32" + min_val = float("0.398498") + max_val = float("1296.33") + mean = float("42.691") + std = float("111.656") + data = None + + +class Program_weight_tensor_parameter_140: + name = "parameter_140" + shape = [384] + dtype = "float32" + min_val = float("-4.82186") + max_val = float("10.6965") + mean = float("0.239932") + std = float("1.62645") + data = None + + +class Program_weight_tensor_parameter_141: + name = "parameter_141" + shape = [384, 1152, 1, 1] + dtype = "float32" + min_val = float("-0.304295") + max_val = float("0.326482") + mean = float("0.00198698") + std = float("0.0459777") + data = None + + +class Program_weight_tensor_parameter_142: + name = "parameter_142" + shape = [384] + dtype = "float32" + min_val = float("-0.0164474") + max_val = float("0.0229604") + mean = float("0.000316569") + std = float("0.00563347") + data = None + + +class Program_weight_tensor_parameter_143: + name = "parameter_143" + shape = [384] + dtype = "float32" + min_val = float("0.916764") + max_val = float("1.12311") + mean = float("1.00649") + std = float("0.0278337") + data = None + + +class Program_weight_tensor_parameter_144: + name = "parameter_144" + shape = [384] + dtype = "float32" + min_val = float("0.973033") + max_val = float("822.927") + mean = float("48.0236") + std = float("89.6557") + data = None + + +class Program_weight_tensor_parameter_145: + name = "parameter_145" + shape = [384] + dtype = "float32" + min_val = float("-9.26825") + max_val = float("12.6002") + mean = float("0.630577") + std = float("3.53906") + data = None + + +class Program_weight_tensor_parameter_146: + name = "parameter_146" + shape = [384, 384, 3, 3] + dtype = "float32" + min_val = float("-0.12711") + max_val = float("0.150926") + mean = float("0.000452219") + std = float("0.0230884") + data = None + + +class Program_weight_tensor_parameter_147: + name = "parameter_147" + shape = [384] + dtype = "float32" + min_val = float("-1.04579") + max_val = float("1.92241") + mean = float("0.230886") + std = float("0.610372") + data = None + + +class Program_weight_tensor_parameter_148: + name = "parameter_148" + shape = [384] + dtype = "float32" + min_val = float("0.601836") + max_val = float("1.7122") + mean = float("0.95531") + std = float("0.127606") + data = None + + +class Program_weight_tensor_parameter_149: + name = "parameter_149" + shape = [384] + dtype = "float32" + min_val = float("17.3201") + max_val = float("163923.0") + mean = float("1453.52") + std = float("10363.7") + data = None + + +class Program_weight_tensor_parameter_150: + name = "parameter_150" + shape = [384] + dtype = "float32" + min_val = float("-31.1601") + max_val = float("33.2699") + mean = float("-1.02696") + std = float("3.17863") + data = None + + +class Program_weight_tensor_parameter_151: + name = "parameter_151" + shape = [384, 384, 1, 1] + dtype = "float32" + min_val = float("-1.6603") + max_val = float("2.97545") + mean = float("-0.008633") + std = float("0.090097") + data = None + + +class Program_weight_tensor_parameter_152: + name = "parameter_152" + shape = [192] + dtype = "float32" + min_val = float("-0.194767") + max_val = float("0.294472") + mean = float("-0.0233228") + std = float("0.0587915") + data = None + + +class Program_weight_tensor_parameter_153: + name = "parameter_153" + shape = [192] + dtype = "float32" + min_val = float("0.712298") + max_val = float("1.62272") + mean = float("1.03499") + std = float("0.124621") + data = None + + +class Program_weight_tensor_parameter_154: + name = "parameter_154" + shape = [192] + dtype = "float32" + min_val = float("0.33456") + max_val = float("456.995") + mean = float("11.9797") + std = float("35.0885") + data = None + + +class Program_weight_tensor_parameter_155: + name = "parameter_155" + shape = [192] + dtype = "float32" + min_val = float("-0.371407") + max_val = float("1.04339") + mean = float("0.0509573") + std = float("0.183773") + data = None + + +class Program_weight_tensor_parameter_156: + name = "parameter_156" + shape = [192, 192, 1, 1] + dtype = "float32" + min_val = float("-0.528498") + max_val = float("0.891468") + mean = float("0.00686413") + std = float("0.105194") + data = None + + +class Program_weight_tensor_parameter_157: + name = "parameter_157" + shape = [192] + dtype = "float32" + min_val = float("-0.138649") + max_val = float("0.142665") + mean = float("-0.0137971") + std = float("0.0343011") + data = None + + +class Program_weight_tensor_parameter_158: + name = "parameter_158" + shape = [192] + dtype = "float32" + min_val = float("0.827269") + max_val = float("1.38624") + mean = float("0.997735") + std = float("0.0628707") + data = None + + +class Program_weight_tensor_parameter_159: + name = "parameter_159" + shape = [192] + dtype = "float32" + min_val = float("0.463259") + max_val = float("557.039") + mean = float("39.363") + std = float("67.5477") + data = None + + +class Program_weight_tensor_parameter_160: + name = "parameter_160" + shape = [192] + dtype = "float32" + min_val = float("-1.16661") + max_val = float("1.42887") + mean = float("0.0418912") + std = float("0.392438") + data = None + + +class Program_weight_tensor_parameter_161: + name = "parameter_161" + shape = [192, 192, 3, 3] + dtype = "float32" + min_val = float("-0.220773") + max_val = float("0.325277") + mean = float("0.000366562") + std = float("0.0347667") + data = None + + +class Program_weight_tensor_parameter_162: + name = "parameter_162" + shape = [192] + dtype = "float32" + min_val = float("-0.136427") + max_val = float("0.100632") + mean = float("-0.0255659") + std = float("0.0488208") + data = None + + +class Program_weight_tensor_parameter_163: + name = "parameter_163" + shape = [192] + dtype = "float32" + min_val = float("0.677168") + max_val = float("1.69017") + mean = float("0.98428") + std = float("0.109758") + data = None + + +class Program_weight_tensor_parameter_164: + name = "parameter_164" + shape = [192] + dtype = "float32" + min_val = float("1.31333") + max_val = float("4527.47") + mean = float("233.731") + std = float("502.24") + data = None + + +class Program_weight_tensor_parameter_165: + name = "parameter_165" + shape = [192] + dtype = "float32" + min_val = float("-3.75035") + max_val = float("6.00459") + mean = float("-0.0327371") + std = float("1.21323") + data = None + + +class Program_weight_tensor_parameter_166: + name = "parameter_166" + shape = [192, 192, 3, 3] + dtype = "float32" + min_val = float("-0.409575") + max_val = float("0.618041") + mean = float("9.89239e-05") + std = float("0.0412771") + data = None + + +class Program_weight_tensor_parameter_167: + name = "parameter_167" + shape = [192] + dtype = "float32" + min_val = float("-0.0900733") + max_val = float("0.0632743") + mean = float("-0.0149884") + std = float("0.0253332") + data = None + + +class Program_weight_tensor_parameter_168: + name = "parameter_168" + shape = [192] + dtype = "float32" + min_val = float("0.898573") + max_val = float("1.17436") + mean = float("1.00027") + std = float("0.0419993") + data = None + + +class Program_weight_tensor_parameter_169: + name = "parameter_169" + shape = [192] + dtype = "float32" + min_val = float("0.0435796") + max_val = float("62.5783") + mean = float("5.50164") + std = float("9.51597") + data = None + + +class Program_weight_tensor_parameter_170: + name = "parameter_170" + shape = [192] + dtype = "float32" + min_val = float("-0.363209") + max_val = float("0.392473") + mean = float("0.0208475") + std = float("0.139701") + data = None + + +class Program_weight_tensor_parameter_171: + name = "parameter_171" + shape = [192, 192, 1, 1] + dtype = "float32" + min_val = float("-0.361923") + max_val = float("0.463382") + mean = float("0.00139017") + std = float("0.0966078") + data = None + + +class Program_weight_tensor_parameter_172: + name = "parameter_172" + shape = [192] + dtype = "float32" + min_val = float("-0.0964994") + max_val = float("0.0677587") + mean = float("-0.015352") + std = float("0.0268539") + data = None + + +class Program_weight_tensor_parameter_173: + name = "parameter_173" + shape = [192] + dtype = "float32" + min_val = float("0.85219") + max_val = float("1.39826") + mean = float("0.990621") + std = float("0.0522755") + data = None + + +class Program_weight_tensor_parameter_174: + name = "parameter_174" + shape = [192] + dtype = "float32" + min_val = float("0.274308") + max_val = float("889.981") + mean = float("43.4119") + std = float("94.1824") + data = None + + +class Program_weight_tensor_parameter_175: + name = "parameter_175" + shape = [192] + dtype = "float32" + min_val = float("-1.65395") + max_val = float("1.55758") + mean = float("0.0433233") + std = float("0.478715") + data = None + + +class Program_weight_tensor_parameter_176: + name = "parameter_176" + shape = [192, 192, 3, 3] + dtype = "float32" + min_val = float("-0.207538") + max_val = float("0.2139") + mean = float("0.000219662") + std = float("0.0345904") + data = None + + +class Program_weight_tensor_parameter_177: + name = "parameter_177" + shape = [192] + dtype = "float32" + min_val = float("-0.0990062") + max_val = float("0.088517") + mean = float("-0.0159438") + std = float("0.0333741") + data = None + + +class Program_weight_tensor_parameter_178: + name = "parameter_178" + shape = [192] + dtype = "float32" + min_val = float("0.765253") + max_val = float("1.17699") + mean = float("0.984961") + std = float("0.0600722") + data = None + + +class Program_weight_tensor_parameter_179: + name = "parameter_179" + shape = [192] + dtype = "float32" + min_val = float("4.09633") + max_val = float("12211.4") + mean = float("564.601") + std = float("1356.82") + data = None + + +class Program_weight_tensor_parameter_180: + name = "parameter_180" + shape = [192] + dtype = "float32" + min_val = float("-8.97993") + max_val = float("8.13058") + mean = float("-0.247795") + std = float("1.96702") + data = None + + +class Program_weight_tensor_parameter_181: + name = "parameter_181" + shape = [192, 192, 3, 3] + dtype = "float32" + min_val = float("-0.283698") + max_val = float("0.269537") + mean = float("-0.00100076") + std = float("0.0368439") + data = None + + +class Program_weight_tensor_parameter_182: + name = "parameter_182" + shape = [192] + dtype = "float32" + min_val = float("-0.0583135") + max_val = float("0.0515282") + mean = float("-0.00694267") + std = float("0.0191664") + data = None + + +class Program_weight_tensor_parameter_183: + name = "parameter_183" + shape = [192] + dtype = "float32" + min_val = float("0.908051") + max_val = float("1.14271") + mean = float("1.00615") + std = float("0.0334371") + data = None + + +class Program_weight_tensor_parameter_184: + name = "parameter_184" + shape = [192] + dtype = "float32" + min_val = float("0.0400766") + max_val = float("122.701") + mean = float("5.19849") + std = float("11.8725") + data = None + + +class Program_weight_tensor_parameter_185: + name = "parameter_185" + shape = [192] + dtype = "float32" + min_val = float("-0.533708") + max_val = float("0.807457") + mean = float("0.0404764") + std = float("0.161235") + data = None + + +class Program_weight_tensor_parameter_186: + name = "parameter_186" + shape = [192, 192, 1, 1] + dtype = "float32" + min_val = float("-0.432453") + max_val = float("0.408917") + mean = float("0.0039336") + std = float("0.0958764") + data = None + + +class Program_weight_tensor_parameter_187: + name = "parameter_187" + shape = [192] + dtype = "float32" + min_val = float("-0.0463518") + max_val = float("0.0359835") + mean = float("-0.00622377") + std = float("0.0149874") + data = None + + +class Program_weight_tensor_parameter_188: + name = "parameter_188" + shape = [192] + dtype = "float32" + min_val = float("0.889761") + max_val = float("1.09578") + mean = float("0.989522") + std = float("0.0346185") + data = None + + +class Program_weight_tensor_parameter_189: + name = "parameter_189" + shape = [192] + dtype = "float32" + min_val = float("0.509445") + max_val = float("678.159") + mean = float("27.9167") + std = float("60.2359") + data = None + + +class Program_weight_tensor_parameter_190: + name = "parameter_190" + shape = [192] + dtype = "float32" + min_val = float("-2.1565") + max_val = float("1.33522") + mean = float("0.03486") + std = float("0.443388") + data = None + + +class Program_weight_tensor_parameter_191: + name = "parameter_191" + shape = [192, 192, 3, 3] + dtype = "float32" + min_val = float("-0.21815") + max_val = float("0.148176") + mean = float("0.000274103") + std = float("0.0330544") + data = None + + +class Program_weight_tensor_parameter_192: + name = "parameter_192" + shape = [192] + dtype = "float32" + min_val = float("-0.110089") + max_val = float("0.0616018") + mean = float("-0.00992149") + std = float("0.0289617") + data = None + + +class Program_weight_tensor_parameter_193: + name = "parameter_193" + shape = [192] + dtype = "float32" + min_val = float("0.823583") + max_val = float("1.23492") + mean = float("0.987365") + std = float("0.0561288") + data = None + + +class Program_weight_tensor_parameter_194: + name = "parameter_194" + shape = [192] + dtype = "float32" + min_val = float("1.23603") + max_val = float("494.819") + mean = float("48.1713") + std = float("75.5185") + data = None + + +class Program_weight_tensor_parameter_195: + name = "parameter_195" + shape = [192] + dtype = "float32" + min_val = float("-1.84393") + max_val = float("2.94947") + mean = float("0.0452953") + std = float("0.812476") + data = None + + +class Program_weight_tensor_parameter_196: + name = "parameter_196" + shape = [192, 192, 3, 3] + dtype = "float32" + min_val = float("-0.263582") + max_val = float("0.262209") + mean = float("0.000306907") + std = float("0.0356392") + data = None + + +class Program_weight_tensor_parameter_197: + name = "parameter_197" + shape = [192] + dtype = "float32" + min_val = float("-0.0498079") + max_val = float("0.0568802") + mean = float("-0.00630744") + std = float("0.0190522") + data = None + + +class Program_weight_tensor_parameter_198: + name = "parameter_198" + shape = [192] + dtype = "float32" + min_val = float("0.872373") + max_val = float("1.22478") + mean = float("0.989828") + std = float("0.0416918") + data = None + + +class Program_weight_tensor_parameter_199: + name = "parameter_199" + shape = [192] + dtype = "float32" + min_val = float("0.266218") + max_val = float("128.442") + mean = float("11.113") + std = float("16.5731") + data = None + + +class Program_weight_tensor_parameter_200: + name = "parameter_200" + shape = [192] + dtype = "float32" + min_val = float("-1.64229") + max_val = float("1.52159") + mean = float("0.0252965") + std = float("0.431795") + data = None + + +class Program_weight_tensor_parameter_201: + name = "parameter_201" + shape = [192, 576, 1, 1] + dtype = "float32" + min_val = float("-0.28714") + max_val = float("0.317958") + mean = float("0.000708386") + std = float("0.0581333") + data = None + + +class Program_weight_tensor_parameter_202: + name = "parameter_202" + shape = [192] + dtype = "float32" + min_val = float("-0.136058") + max_val = float("0.0585401") + mean = float("-0.0203575") + std = float("0.0351045") + data = None + + +class Program_weight_tensor_parameter_203: + name = "parameter_203" + shape = [192] + dtype = "float32" + min_val = float("0.574764") + max_val = float("1.38488") + mean = float("0.954825") + std = float("0.0845629") + data = None + + +class Program_weight_tensor_parameter_204: + name = "parameter_204" + shape = [192] + dtype = "float32" + min_val = float("0.247479") + max_val = float("2146.49") + mean = float("62.6485") + std = float("217.952") + data = None + + +class Program_weight_tensor_parameter_205: + name = "parameter_205" + shape = [192] + dtype = "float32" + min_val = float("-7.07974") + max_val = float("4.28925") + mean = float("-0.0331852") + std = float("1.09888") + data = None + + +class Program_weight_tensor_parameter_206: + name = "parameter_206" + shape = [192, 576, 1, 1] + dtype = "float32" + min_val = float("-0.746348") + max_val = float("0.869955") + mean = float("-0.000149784") + std = float("0.0710865") + data = None + + +class Program_weight_tensor_parameter_207: + name = "parameter_207" + shape = [192] + dtype = "float32" + min_val = float("-0.0594283") + max_val = float("0.0425612") + mean = float("-0.00892828") + std = float("0.0199237") + data = None + + +class Program_weight_tensor_parameter_208: + name = "parameter_208" + shape = [192] + dtype = "float32" + min_val = float("0.747908") + max_val = float("1.12917") + mean = float("0.992536") + std = float("0.0545915") + data = None + + +class Program_weight_tensor_parameter_209: + name = "parameter_209" + shape = [192] + dtype = "float32" + min_val = float("0.803226") + max_val = float("2706.74") + mean = float("91.0405") + std = float("329.521") + data = None + + +class Program_weight_tensor_parameter_210: + name = "parameter_210" + shape = [192] + dtype = "float32" + min_val = float("-13.7659") + max_val = float("16.1078") + mean = float("0.341559") + std = float("2.91453") + data = None + + +class Program_weight_tensor_parameter_211: + name = "parameter_211" + shape = [192, 192, 3, 3] + dtype = "float32" + min_val = float("-0.586446") + max_val = float("0.756866") + mean = float("0.00166009") + std = float("0.0382125") + data = None + + +class Program_weight_tensor_parameter_212: + name = "parameter_212" + shape = [192] + dtype = "float32" + min_val = float("-0.51222") + max_val = float("1.05992") + mean = float("0.126397") + std = float("0.258412") + data = None + + +class Program_weight_tensor_parameter_213: + name = "parameter_213" + shape = [192] + dtype = "float32" + min_val = float("0.199432") + max_val = float("1.52402") + mean = float("0.940871") + std = float("0.173961") + data = None + + +class Program_weight_tensor_parameter_214: + name = "parameter_214" + shape = [192] + dtype = "float32" + min_val = float("1.51084") + max_val = float("2422.24") + mean = float("110.501") + std = float("242.671") + data = None + + +class Program_weight_tensor_parameter_215: + name = "parameter_215" + shape = [192] + dtype = "float32" + min_val = float("-1.65387") + max_val = float("2.87343") + mean = float("-0.127357") + std = float("0.702115") + data = None + + +class Program_weight_tensor_parameter_216: + name = "parameter_216" + shape = [192, 192, 1, 1] + dtype = "float32" + min_val = float("-0.619559") + max_val = float("1.20148") + mean = float("-0.00786138") + std = float("0.113945") + data = None + + +class Program_weight_tensor_parameter_217: + name = "parameter_217" + shape = [96] + dtype = "float32" + min_val = float("-0.258723") + max_val = float("0.0925575") + mean = float("-0.0188967") + std = float("0.0550286") + data = None + + +class Program_weight_tensor_parameter_218: + name = "parameter_218" + shape = [96] + dtype = "float32" + min_val = float("0.761539") + max_val = float("1.32246") + mean = float("1.01678") + std = float("0.0798023") + data = None + + +class Program_weight_tensor_parameter_219: + name = "parameter_219" + shape = [96] + dtype = "float32" + min_val = float("0.251207") + max_val = float("119.893") + mean = float("7.32287") + std = float("14.9814") + data = None + + +class Program_weight_tensor_parameter_220: + name = "parameter_220" + shape = [96] + dtype = "float32" + min_val = float("-0.315096") + max_val = float("0.310932") + mean = float("0.00549788") + std = float("0.130201") + data = None + + +class Program_weight_tensor_parameter_221: + name = "parameter_221" + shape = [96, 96, 1, 1] + dtype = "float32" + min_val = float("-0.455177") + max_val = float("0.534389") + mean = float("0.00292207") + std = float("0.139234") + data = None + + +class Program_weight_tensor_parameter_222: + name = "parameter_222" + shape = [96] + dtype = "float32" + min_val = float("-0.179448") + max_val = float("0.0641929") + mean = float("-0.014071") + std = float("0.0403469") + data = None + + +class Program_weight_tensor_parameter_223: + name = "parameter_223" + shape = [96] + dtype = "float32" + min_val = float("0.816665") + max_val = float("1.21905") + mean = float("0.998645") + std = float("0.0625322") + data = None + + +class Program_weight_tensor_parameter_224: + name = "parameter_224" + shape = [96] + dtype = "float32" + min_val = float("2.44014") + max_val = float("1837.06") + mean = float("83.0239") + std = float("243.258") + data = None + + +class Program_weight_tensor_parameter_225: + name = "parameter_225" + shape = [96] + dtype = "float32" + min_val = float("-1.43893") + max_val = float("2.38085") + mean = float("0.0182832") + std = float("0.505594") + data = None + + +class Program_weight_tensor_parameter_226: + name = "parameter_226" + shape = [96, 96, 3, 3] + dtype = "float32" + min_val = float("-0.279388") + max_val = float("0.353141") + mean = float("0.000671006") + std = float("0.0507518") + data = None + + +class Program_weight_tensor_parameter_227: + name = "parameter_227" + shape = [96] + dtype = "float32" + min_val = float("-0.170389") + max_val = float("0.121929") + mean = float("-0.0199139") + std = float("0.0560429") + data = None + + +class Program_weight_tensor_parameter_228: + name = "parameter_228" + shape = [96] + dtype = "float32" + min_val = float("0.681239") + max_val = float("1.29549") + mean = float("0.988103") + std = float("0.098565") + data = None + + +class Program_weight_tensor_parameter_229: + name = "parameter_229" + shape = [96] + dtype = "float32" + min_val = float("2.36948") + max_val = float("1201.2") + mean = float("109.059") + std = float("182.565") + data = None + + +class Program_weight_tensor_parameter_230: + name = "parameter_230" + shape = [96] + dtype = "float32" + min_val = float("-2.66997") + max_val = float("3.36824") + mean = float("0.0648373") + std = float("0.94691") + data = None + + +class Program_weight_tensor_parameter_231: + name = "parameter_231" + shape = [96, 96, 3, 3] + dtype = "float32" + min_val = float("-0.281493") + max_val = float("0.293921") + mean = float("0.000483919") + std = float("0.0529507") + data = None + + +class Program_weight_tensor_parameter_232: + name = "parameter_232" + shape = [96] + dtype = "float32" + min_val = float("-0.115587") + max_val = float("0.0380624") + mean = float("-0.0142143") + std = float("0.028026") + data = None + + +class Program_weight_tensor_parameter_233: + name = "parameter_233" + shape = [96] + dtype = "float32" + min_val = float("0.855433") + max_val = float("1.14759") + mean = float("0.998586") + std = float("0.0427177") + data = None + + +class Program_weight_tensor_parameter_234: + name = "parameter_234" + shape = [96] + dtype = "float32" + min_val = float("0.0639763") + max_val = float("6.9312") + mean = float("1.09626") + std = float("1.06072") + data = None + + +class Program_weight_tensor_parameter_235: + name = "parameter_235" + shape = [96] + dtype = "float32" + min_val = float("-0.26312") + max_val = float("0.392399") + mean = float("0.0135331") + std = float("0.146641") + data = None + + +class Program_weight_tensor_parameter_236: + name = "parameter_236" + shape = [96, 96, 1, 1] + dtype = "float32" + min_val = float("-0.495775") + max_val = float("0.556032") + mean = float("0.000502987") + std = float("0.133449") + data = None + + +class Program_weight_tensor_parameter_237: + name = "parameter_237" + shape = [96] + dtype = "float32" + min_val = float("-0.140773") + max_val = float("0.0450861") + mean = float("-0.0174376") + std = float("0.034146") + data = None + + +class Program_weight_tensor_parameter_238: + name = "parameter_238" + shape = [96] + dtype = "float32" + min_val = float("0.879512") + max_val = float("1.1629") + mean = float("0.995569") + std = float("0.044189") + data = None + + +class Program_weight_tensor_parameter_239: + name = "parameter_239" + shape = [96] + dtype = "float32" + min_val = float("0.24277") + max_val = float("169.4") + mean = float("11.1743") + std = float("18.8006") + data = None + + +class Program_weight_tensor_parameter_240: + name = "parameter_240" + shape = [96] + dtype = "float32" + min_val = float("-0.578891") + max_val = float("1.45981") + mean = float("0.0772112") + std = float("0.437565") + data = None + + +class Program_weight_tensor_parameter_241: + name = "parameter_241" + shape = [96, 96, 3, 3] + dtype = "float32" + min_val = float("-0.341535") + max_val = float("0.217225") + mean = float("0.00124153") + std = float("0.0472001") + data = None + + +class Program_weight_tensor_parameter_242: + name = "parameter_242" + shape = [96] + dtype = "float32" + min_val = float("-0.152525") + max_val = float("0.154776") + mean = float("-0.012056") + std = float("0.0644723") + data = None + + +class Program_weight_tensor_parameter_243: + name = "parameter_243" + shape = [96] + dtype = "float32" + min_val = float("0.68198") + max_val = float("1.16393") + mean = float("0.979808") + std = float("0.0697055") + data = None + + +class Program_weight_tensor_parameter_244: + name = "parameter_244" + shape = [96] + dtype = "float32" + min_val = float("2.1232") + max_val = float("461.16") + mean = float("46.0347") + std = float("71.6091") + data = None + + +class Program_weight_tensor_parameter_245: + name = "parameter_245" + shape = [96] + dtype = "float32" + min_val = float("-3.28414") + max_val = float("2.96856") + mean = float("-0.188492") + std = float("1.20658") + data = None + + +class Program_weight_tensor_parameter_246: + name = "parameter_246" + shape = [96, 96, 3, 3] + dtype = "float32" + min_val = float("-0.30215") + max_val = float("0.230948") + mean = float("-0.00152358") + std = float("0.0507553") + data = None + + +class Program_weight_tensor_parameter_247: + name = "parameter_247" + shape = [96] + dtype = "float32" + min_val = float("-0.13592") + max_val = float("0.0532456") + mean = float("-0.0142334") + std = float("0.0301638") + data = None + + +class Program_weight_tensor_parameter_248: + name = "parameter_248" + shape = [96] + dtype = "float32" + min_val = float("0.90681") + max_val = float("1.11646") + mean = float("1.00126") + std = float("0.0396633") + data = None + + +class Program_weight_tensor_parameter_249: + name = "parameter_249" + shape = [96] + dtype = "float32" + min_val = float("0.0752334") + max_val = float("14.977") + mean = float("1.26618") + std = float("1.70785") + data = None + + +class Program_weight_tensor_parameter_250: + name = "parameter_250" + shape = [96] + dtype = "float32" + min_val = float("-0.349419") + max_val = float("0.463675") + mean = float("0.0233694") + std = float("0.146608") + data = None + + +class Program_weight_tensor_parameter_251: + name = "parameter_251" + shape = [96, 96, 1, 1] + dtype = "float32" + min_val = float("-0.522396") + max_val = float("0.518311") + mean = float("0.00347223") + std = float("0.133901") + data = None + + +class Program_weight_tensor_parameter_252: + name = "parameter_252" + shape = [96] + dtype = "float32" + min_val = float("-0.166976") + max_val = float("0.0687658") + mean = float("-0.0179712") + std = float("0.0388761") + data = None + + +class Program_weight_tensor_parameter_253: + name = "parameter_253" + shape = [96] + dtype = "float32" + min_val = float("0.842969") + max_val = float("1.13231") + mean = float("0.992037") + std = float("0.0464498") + data = None + + +class Program_weight_tensor_parameter_254: + name = "parameter_254" + shape = [96] + dtype = "float32" + min_val = float("0.121267") + max_val = float("64.8448") + mean = float("6.91268") + std = float("10.9642") + data = None + + +class Program_weight_tensor_parameter_255: + name = "parameter_255" + shape = [96] + dtype = "float32" + min_val = float("-0.808938") + max_val = float("1.78066") + mean = float("0.0612192") + std = float("0.524093") + data = None + + +class Program_weight_tensor_parameter_256: + name = "parameter_256" + shape = [96, 96, 3, 3] + dtype = "float32" + min_val = float("-0.213637") + max_val = float("0.26121") + mean = float("0.00120516") + std = float("0.0485494") + data = None + + +class Program_weight_tensor_parameter_257: + name = "parameter_257" + shape = [96] + dtype = "float32" + min_val = float("-0.187071") + max_val = float("0.104777") + mean = float("-0.0101053") + std = float("0.059951") + data = None + + +class Program_weight_tensor_parameter_258: + name = "parameter_258" + shape = [96] + dtype = "float32" + min_val = float("0.616066") + max_val = float("1.2073") + mean = float("0.982213") + std = float("0.0866724") + data = None + + +class Program_weight_tensor_parameter_259: + name = "parameter_259" + shape = [96] + dtype = "float32" + min_val = float("0.594645") + max_val = float("231.567") + mean = float("18.8141") + std = float("32.7849") + data = None + + +class Program_weight_tensor_parameter_260: + name = "parameter_260" + shape = [96] + dtype = "float32" + min_val = float("-1.67772") + max_val = float("2.33029") + mean = float("0.0564106") + std = float("0.664833") + data = None + + +class Program_weight_tensor_parameter_261: + name = "parameter_261" + shape = [96, 96, 3, 3] + dtype = "float32" + min_val = float("-0.518288") + max_val = float("0.437914") + mean = float("0.00030005") + std = float("0.0589145") + data = None + + +class Program_weight_tensor_parameter_262: + name = "parameter_262" + shape = [96] + dtype = "float32" + min_val = float("-0.153127") + max_val = float("0.125309") + mean = float("-0.0116483") + std = float("0.0515388") + data = None + + +class Program_weight_tensor_parameter_263: + name = "parameter_263" + shape = [96] + dtype = "float32" + min_val = float("0.59631") + max_val = float("1.51103") + mean = float("0.982552") + std = float("0.142295") + data = None + + +class Program_weight_tensor_parameter_264: + name = "parameter_264" + shape = [96] + dtype = "float32" + min_val = float("1.39238") + max_val = float("781.378") + mean = float("41.4838") + std = float("91.507") + data = None + + +class Program_weight_tensor_parameter_265: + name = "parameter_265" + shape = [96] + dtype = "float32" + min_val = float("-1.33067") + max_val = float("4.08105") + mean = float("0.358732") + std = float("0.824744") + data = None + + +class Program_weight_tensor_parameter_266: + name = "parameter_266" + shape = [96, 448, 1, 1] + dtype = "float32" + min_val = float("-1.41628") + max_val = float("0.739793") + mean = float("-0.0084089") + std = float("0.0813229") + data = None + + +class Program_weight_tensor_parameter_267: + name = "parameter_267" + shape = [96] + dtype = "float32" + min_val = float("-0.105492") + max_val = float("0.0828045") + mean = float("-0.00455445") + std = float("0.0434266") + data = None + + +class Program_weight_tensor_parameter_268: + name = "parameter_268" + shape = [96] + dtype = "float32" + min_val = float("0.688956") + max_val = float("1.23964") + mean = float("0.984143") + std = float("0.0986745") + data = None + + +class Program_weight_tensor_parameter_269: + name = "parameter_269" + shape = [96] + dtype = "float32" + min_val = float("0.447428") + max_val = float("269.625") + mean = float("14.1089") + std = float("28.3845") + data = None + + +class Program_weight_tensor_parameter_270: + name = "parameter_270" + shape = [96] + dtype = "float32" + min_val = float("-2.33771") + max_val = float("1.85228") + mean = float("-0.0482937") + std = float("0.637914") + data = None + + +class Program_weight_tensor_parameter_271: + name = "parameter_271" + shape = [96, 448, 1, 1] + dtype = "float32" + min_val = float("-0.358608") + max_val = float("0.653689") + mean = float("0.000792925") + std = float("0.0699691") + data = None + + +class Program_weight_tensor_parameter_272: + name = "parameter_272" + shape = [192] + dtype = "float32" + min_val = float("-0.0739306") + max_val = float("0.0727139") + mean = float("-0.000145623") + std = float("0.022709") + data = None + + +class Program_weight_tensor_parameter_273: + name = "parameter_273" + shape = [192] + dtype = "float32" + min_val = float("0.578968") + max_val = float("1.48448") + mean = float("1.00638") + std = float("0.113897") + data = None + + +class Program_weight_tensor_parameter_274: + name = "parameter_274" + shape = [192] + dtype = "float32" + min_val = float("0.359775") + max_val = float("864.301") + mean = float("38.1066") + std = float("106.125") + data = None + + +class Program_weight_tensor_parameter_275: + name = "parameter_275" + shape = [192] + dtype = "float32" + min_val = float("-4.20147") + max_val = float("2.92375") + mean = float("-0.192031") + std = float("0.877454") + data = None + + +class Program_weight_tensor_parameter_276: + name = "parameter_276" + shape = [192, 384, 1, 1] + dtype = "float32" + min_val = float("-1.32956") + max_val = float("0.749342") + mean = float("-0.00673329") + std = float("0.0850714") + data = None + + +class Program_weight_tensor_parameter_277: + name = "parameter_277" + shape = [384] + dtype = "float32" + min_val = float("-0.0740596") + max_val = float("0.107538") + mean = float("-0.00259845") + std = float("0.0249234") + data = None + + +class Program_weight_tensor_parameter_278: + name = "parameter_278" + shape = [384] + dtype = "float32" + min_val = float("0.572386") + max_val = float("1.47143") + mean = float("0.996013") + std = float("0.1005") + data = None + + +class Program_weight_tensor_parameter_279: + name = "parameter_279" + shape = [384] + dtype = "float32" + min_val = float("0.49967") + max_val = float("1281.57") + mean = float("38.9638") + std = float("106.344") + data = None + + +class Program_weight_tensor_parameter_280: + name = "parameter_280" + shape = [384] + dtype = "float32" + min_val = float("-4.91208") + max_val = float("4.19611") + mean = float("-0.049233") + std = float("1.10985") + data = None + + +class Program_weight_tensor_parameter_281: + name = "parameter_281" + shape = [384, 384, 1, 1] + dtype = "float32" + min_val = float("-0.526038") + max_val = float("0.532592") + mean = float("-0.000444086") + std = float("0.0769584") + data = None + + +class Program_weight_tensor_parameter_282: + name = "parameter_282" + shape = [192] + dtype = "float32" + min_val = float("-0.0721193") + max_val = float("0.0732841") + mean = float("-0.00115785") + std = float("0.0264436") + data = None + + +class Program_weight_tensor_parameter_283: + name = "parameter_283" + shape = [192] + dtype = "float32" + min_val = float("0.713209") + max_val = float("1.48343") + mean = float("1.00318") + std = float("0.0947352") + data = None + + +class Program_weight_tensor_parameter_284: + name = "parameter_284" + shape = [192] + dtype = "float32" + min_val = float("0.0635321") + max_val = float("76.5136") + mean = float("6.85208") + std = float("9.78308") + data = None + + +class Program_weight_tensor_parameter_285: + name = "parameter_285" + shape = [192] + dtype = "float32" + min_val = float("-1.08755") + max_val = float("1.15928") + mean = float("-0.0326197") + std = float("0.303907") + data = None + + +class Program_weight_tensor_parameter_286: + name = "parameter_286" + shape = [192, 192, 1, 1] + dtype = "float32" + min_val = float("-0.547425") + max_val = float("0.463725") + mean = float("-0.00175964") + std = float("0.101882") + data = None + + +class Program_weight_tensor_parameter_287: + name = "parameter_287" + shape = [192] + dtype = "float32" + min_val = float("-0.0524512") + max_val = float("0.0489817") + mean = float("-0.00176026") + std = float("0.0182054") + data = None + + +class Program_weight_tensor_parameter_288: + name = "parameter_288" + shape = [192] + dtype = "float32" + min_val = float("0.69951") + max_val = float("1.18178") + mean = float("0.982897") + std = float("0.066713") + data = None + + +class Program_weight_tensor_parameter_289: + name = "parameter_289" + shape = [192] + dtype = "float32" + min_val = float("0.442201") + max_val = float("316.954") + mean = float("38.1873") + std = float("60.7814") + data = None + + +class Program_weight_tensor_parameter_290: + name = "parameter_290" + shape = [192] + dtype = "float32" + min_val = float("-2.14913") + max_val = float("2.39724") + mean = float("-0.0172111") + std = float("0.827171") + data = None + + +class Program_weight_tensor_parameter_291: + name = "parameter_291" + shape = [192, 192, 3, 3] + dtype = "float32" + min_val = float("-0.222358") + max_val = float("0.245729") + mean = float("-0.000106222") + std = float("0.034196") + data = None + + +class Program_weight_tensor_parameter_292: + name = "parameter_292" + shape = [192] + dtype = "float32" + min_val = float("-0.0894685") + max_val = float("0.0655666") + mean = float("-0.00505891") + std = float("0.0287383") + data = None + + +class Program_weight_tensor_parameter_293: + name = "parameter_293" + shape = [192] + dtype = "float32" + min_val = float("0.706643") + max_val = float("1.41354") + mean = float("0.996181") + std = float("0.0903274") + data = None + + +class Program_weight_tensor_parameter_294: + name = "parameter_294" + shape = [192] + dtype = "float32" + min_val = float("2.37073") + max_val = float("2165.14") + mean = float("277.858") + std = float("359.106") + data = None + + +class Program_weight_tensor_parameter_295: + name = "parameter_295" + shape = [192] + dtype = "float32" + min_val = float("-4.91035") + max_val = float("6.39829") + mean = float("-0.245971") + std = float("2.20864") + data = None + + +class Program_weight_tensor_parameter_296: + name = "parameter_296" + shape = [192, 192, 3, 3] + dtype = "float32" + min_val = float("-0.262721") + max_val = float("0.285556") + mean = float("-0.000990375") + std = float("0.0376111") + data = None + + +class Program_weight_tensor_parameter_297: + name = "parameter_297" + shape = [192] + dtype = "float32" + min_val = float("-0.0496921") + max_val = float("0.0548411") + mean = float("0.00115262") + std = float("0.0134173") + data = None + + +class Program_weight_tensor_parameter_298: + name = "parameter_298" + shape = [192] + dtype = "float32" + min_val = float("0.837418") + max_val = float("1.1849") + mean = float("1.00162") + std = float("0.0457524") + data = None + + +class Program_weight_tensor_parameter_299: + name = "parameter_299" + shape = [192] + dtype = "float32" + min_val = float("0.0377514") + max_val = float("113.995") + mean = float("4.54665") + std = float("9.49234") + data = None + + +class Program_weight_tensor_parameter_300: + name = "parameter_300" + shape = [192] + dtype = "float32" + min_val = float("-0.89959") + max_val = float("1.52316") + mean = float("-0.0083991") + std = float("0.293552") + data = None + + +class Program_weight_tensor_parameter_301: + name = "parameter_301" + shape = [192, 192, 1, 1] + dtype = "float32" + min_val = float("-0.415965") + max_val = float("0.676929") + mean = float("-0.000744373") + std = float("0.0979339") + data = None + + +class Program_weight_tensor_parameter_302: + name = "parameter_302" + shape = [192] + dtype = "float32" + min_val = float("-0.045661") + max_val = float("0.0506461") + mean = float("0.000985666") + std = float("0.0123463") + data = None + + +class Program_weight_tensor_parameter_303: + name = "parameter_303" + shape = [192] + dtype = "float32" + min_val = float("0.878419") + max_val = float("1.13374") + mean = float("0.998159") + std = float("0.0397148") + data = None + + +class Program_weight_tensor_parameter_304: + name = "parameter_304" + shape = [192] + dtype = "float32" + min_val = float("0.620383") + max_val = float("261.612") + mean = float("26.6222") + std = float("32.6681") + data = None + + +class Program_weight_tensor_parameter_305: + name = "parameter_305" + shape = [192] + dtype = "float32" + min_val = float("-1.67542") + max_val = float("2.16229") + mean = float("-0.0179215") + std = float("0.68865") + data = None + + +class Program_weight_tensor_parameter_306: + name = "parameter_306" + shape = [192, 192, 3, 3] + dtype = "float32" + min_val = float("-0.167968") + max_val = float("0.193792") + mean = float("-0.000192665") + std = float("0.0332395") + data = None + + +class Program_weight_tensor_parameter_307: + name = "parameter_307" + shape = [192] + dtype = "float32" + min_val = float("-0.0777107") + max_val = float("0.0986988") + mean = float("-0.00274194") + std = float("0.0270615") + data = None + + +class Program_weight_tensor_parameter_308: + name = "parameter_308" + shape = [192] + dtype = "float32" + min_val = float("0.752505") + max_val = float("1.3134") + mean = float("0.995861") + std = float("0.0720777") + data = None + + +class Program_weight_tensor_parameter_309: + name = "parameter_309" + shape = [192] + dtype = "float32" + min_val = float("1.57184") + max_val = float("4068.71") + mean = float("236.298") + std = float("452.551") + data = None + + +class Program_weight_tensor_parameter_310: + name = "parameter_310" + shape = [192] + dtype = "float32" + min_val = float("-7.36242") + max_val = float("13.9269") + mean = float("0.494478") + std = float("2.88688") + data = None + + +class Program_weight_tensor_parameter_311: + name = "parameter_311" + shape = [192, 192, 3, 3] + dtype = "float32" + min_val = float("-0.389068") + max_val = float("0.600198") + mean = float("0.000965221") + std = float("0.0393839") + data = None + + +class Program_weight_tensor_parameter_312: + name = "parameter_312" + shape = [192] + dtype = "float32" + min_val = float("-0.0590307") + max_val = float("0.0656391") + mean = float("-0.000852152") + std = float("0.0208437") + data = None + + +class Program_weight_tensor_parameter_313: + name = "parameter_313" + shape = [192] + dtype = "float32" + min_val = float("0.723306") + max_val = float("1.37861") + mean = float("1.00331") + std = float("0.0807734") + data = None + + +class Program_weight_tensor_parameter_314: + name = "parameter_314" + shape = [192] + dtype = "float32" + min_val = float("0.114673") + max_val = float("118.28") + mean = float("9.38346") + std = float("17.0872") + data = None + + +class Program_weight_tensor_parameter_315: + name = "parameter_315" + shape = [192] + dtype = "float32" + min_val = float("-1.53384") + max_val = float("1.66714") + mean = float("0.0189122") + std = float("0.449842") + data = None + + +class Program_weight_tensor_parameter_316: + name = "parameter_316" + shape = [192, 192, 1, 1] + dtype = "float32" + min_val = float("-0.591228") + max_val = float("0.735237") + mean = float("0.000982142") + std = float("0.102908") + data = None + + +class Program_weight_tensor_parameter_317: + name = "parameter_317" + shape = [192] + dtype = "float32" + min_val = float("-0.0502755") + max_val = float("0.0559567") + mean = float("-0.00045906") + std = float("0.0178236") + data = None + + +class Program_weight_tensor_parameter_318: + name = "parameter_318" + shape = [192] + dtype = "float32" + min_val = float("0.808281") + max_val = float("1.20181") + mean = float("0.992988") + std = float("0.0547234") + data = None + + +class Program_weight_tensor_parameter_319: + name = "parameter_319" + shape = [192] + dtype = "float32" + min_val = float("0.578128") + max_val = float("1167.0") + mean = float("54.5883") + std = float("123.498") + data = None + + +class Program_weight_tensor_parameter_320: + name = "parameter_320" + shape = [192] + dtype = "float32" + min_val = float("-4.85651") + max_val = float("4.12752") + mean = float("-0.106072") + std = float("1.13567") + data = None + + +class Program_weight_tensor_parameter_321: + name = "parameter_321" + shape = [192, 192, 3, 3] + dtype = "float32" + min_val = float("-0.239538") + max_val = float("0.433092") + mean = float("-0.000652541") + std = float("0.0356457") + data = None + + +class Program_weight_tensor_parameter_322: + name = "parameter_322" + shape = [192] + dtype = "float32" + min_val = float("-0.0540416") + max_val = float("0.0637231") + mean = float("-0.00300834") + std = float("0.0215551") + data = None + + +class Program_weight_tensor_parameter_323: + name = "parameter_323" + shape = [192] + dtype = "float32" + min_val = float("0.684645") + max_val = float("1.35384") + mean = float("0.996179") + std = float("0.0898825") + data = None + + +class Program_weight_tensor_parameter_324: + name = "parameter_324" + shape = [192] + dtype = "float32" + min_val = float("1.53622") + max_val = float("5520.21") + mean = float("106.579") + std = float("414.061") + data = None + + +class Program_weight_tensor_parameter_325: + name = "parameter_325" + shape = [192] + dtype = "float32" + min_val = float("-5.22382") + max_val = float("15.1322") + mean = float("0.117247") + std = float("1.94684") + data = None + + +class Program_weight_tensor_parameter_326: + name = "parameter_326" + shape = [192, 192, 3, 3] + dtype = "float32" + min_val = float("-0.476226") + max_val = float("0.7963") + mean = float("0.000443236") + std = float("0.0425482") + data = None + + +class Program_weight_tensor_parameter_327: + name = "parameter_327" + shape = [192] + dtype = "float32" + min_val = float("-0.0445659") + max_val = float("0.0403295") + mean = float("-0.000147552") + std = float("0.0169218") + data = None + + +class Program_weight_tensor_parameter_328: + name = "parameter_328" + shape = [192] + dtype = "float32" + min_val = float("0.767909") + max_val = float("1.39806") + mean = float("0.997772") + std = float("0.0704353") + data = None + + +class Program_weight_tensor_parameter_329: + name = "parameter_329" + shape = [192] + dtype = "float32" + min_val = float("0.115274") + max_val = float("161.267") + mean = float("9.90997") + std = float("18.1927") + data = None + + +class Program_weight_tensor_parameter_330: + name = "parameter_330" + shape = [192] + dtype = "float32" + min_val = float("-4.84612") + max_val = float("4.5059") + mean = float("0.273024") + std = float("1.10092") + data = None + + +class Program_weight_tensor_parameter_331: + name = "parameter_331" + shape = [192, 896, 1, 1] + dtype = "float32" + min_val = float("-0.464411") + max_val = float("0.360917") + mean = float("-0.00258586") + std = float("0.0512678") + data = None + + +class Program_weight_tensor_parameter_332: + name = "parameter_332" + shape = [192] + dtype = "float32" + min_val = float("-0.0431242") + max_val = float("0.0649733") + mean = float("-0.00145664") + std = float("0.0176858") + data = None + + +class Program_weight_tensor_parameter_333: + name = "parameter_333" + shape = [192] + dtype = "float32" + min_val = float("0.671148") + max_val = float("1.40867") + mean = float("1.01097") + std = float("0.114101") + data = None + + +class Program_weight_tensor_parameter_334: + name = "parameter_334" + shape = [192] + dtype = "float32" + min_val = float("0.104781") + max_val = float("107.432") + mean = float("7.53737") + std = float("12.3489") + data = None + + +class Program_weight_tensor_parameter_335: + name = "parameter_335" + shape = [192] + dtype = "float32" + min_val = float("-3.1768") + max_val = float("3.19585") + mean = float("-0.16878") + std = float("0.931493") + data = None + + +class Program_weight_tensor_parameter_336: + name = "parameter_336" + shape = [192, 896, 1, 1] + dtype = "float32" + min_val = float("-0.421621") + max_val = float("0.507273") + mean = float("-0.00174951") + std = float("0.0490677") + data = None + + +class Program_weight_tensor_parameter_337: + name = "parameter_337" + shape = [384] + dtype = "float32" + min_val = float("-0.0689209") + max_val = float("0.0467315") + mean = float("0.0017889") + std = float("0.0151475") + data = None + + +class Program_weight_tensor_parameter_338: + name = "parameter_338" + shape = [384] + dtype = "float32" + min_val = float("0.858678") + max_val = float("1.30337") + mean = float("1.01214") + std = float("0.0484672") + data = None + + +class Program_weight_tensor_parameter_339: + name = "parameter_339" + shape = [384] + dtype = "float32" + min_val = float("0.128919") + max_val = float("342.898") + mean = float("14.0405") + std = float("33.2551") + data = None + + +class Program_weight_tensor_parameter_340: + name = "parameter_340" + shape = [384] + dtype = "float32" + min_val = float("-3.81375") + max_val = float("3.75923") + mean = float("-0.136808") + std = float("0.772147") + data = None + + +class Program_weight_tensor_parameter_341: + name = "parameter_341" + shape = [384, 768, 1, 1] + dtype = "float32" + min_val = float("-0.378922") + max_val = float("0.26222") + mean = float("-0.00155034") + std = float("0.0505663") + data = None + + +class Program_weight_tensor_parameter_342: + name = "parameter_342" + shape = [768] + dtype = "float32" + min_val = float("-0.114198") + max_val = float("0.0530032") + mean = float("-0.00785264") + std = float("0.0193075") + data = None + + +class Program_weight_tensor_parameter_343: + name = "parameter_343" + shape = [768] + dtype = "float32" + min_val = float("0.854257") + max_val = float("1.14609") + mean = float("0.996132") + std = float("0.0399797") + data = None + + +class Program_weight_tensor_parameter_344: + name = "parameter_344" + shape = [768] + dtype = "float32" + min_val = float("2.37579") + max_val = float("6551.21") + mean = float("250.642") + std = float("653.171") + data = None + + +class Program_weight_tensor_parameter_345: + name = "parameter_345" + shape = [768] + dtype = "float32" + min_val = float("-19.687") + max_val = float("24.064") + mean = float("-0.620033") + std = float("4.64238") + data = None + + +class Program_weight_tensor_parameter_346: + name = "parameter_346" + shape = [768, 768, 1, 1] + dtype = "float32" + min_val = float("-0.318152") + max_val = float("0.348793") + mean = float("-0.000277384") + std = float("0.0506764") + data = None + + +class Program_weight_tensor_parameter_347: + name = "parameter_347" + shape = [384] + dtype = "float32" + min_val = float("-0.0607342") + max_val = float("0.0605421") + mean = float("0.00081638") + std = float("0.0175246") + data = None + + +class Program_weight_tensor_parameter_348: + name = "parameter_348" + shape = [384] + dtype = "float32" + min_val = float("0.90443") + max_val = float("1.11447") + mean = float("1.00277") + std = float("0.0328078") + data = None + + +class Program_weight_tensor_parameter_349: + name = "parameter_349" + shape = [384] + dtype = "float32" + min_val = float("0.0940703") + max_val = float("7927.03") + mean = float("29.5057") + std = float("406.318") + data = None + + +class Program_weight_tensor_parameter_350: + name = "parameter_350" + shape = [384] + dtype = "float32" + min_val = float("-1.86948") + max_val = float("13.8127") + mean = float("0.0622895") + std = float("0.859249") + data = None + + +class Program_weight_tensor_parameter_351: + name = "parameter_351" + shape = [384, 384, 1, 1] + dtype = "float32" + min_val = float("-0.340141") + max_val = float("0.841461") + mean = float("0.00104827") + std = float("0.0703278") + data = None + + +class Program_weight_tensor_parameter_352: + name = "parameter_352" + shape = [384] + dtype = "float32" + min_val = float("-0.0668361") + max_val = float("0.0352145") + mean = float("-0.0106309") + std = float("0.0184961") + data = None + + +class Program_weight_tensor_parameter_353: + name = "parameter_353" + shape = [384] + dtype = "float32" + min_val = float("0.880444") + max_val = float("1.07712") + mean = float("0.98476") + std = float("0.0330571") + data = None + + +class Program_weight_tensor_parameter_354: + name = "parameter_354" + shape = [384] + dtype = "float32" + min_val = float("0.826809") + max_val = float("1101.08") + mean = float("45.945") + std = float("103.117") + data = None + + +class Program_weight_tensor_parameter_355: + name = "parameter_355" + shape = [384] + dtype = "float32" + min_val = float("-6.3234") + max_val = float("4.75869") + mean = float("0.219007") + std = float("1.53871") + data = None + + +class Program_weight_tensor_parameter_356: + name = "parameter_356" + shape = [384, 384, 3, 3] + dtype = "float32" + min_val = float("-0.113781") + max_val = float("0.115707") + mean = float("0.000405682") + std = float("0.0225404") + data = None + + +class Program_weight_tensor_parameter_357: + name = "parameter_357" + shape = [384] + dtype = "float32" + min_val = float("-0.0619347") + max_val = float("0.0508997") + mean = float("-0.0028225") + std = float("0.0162703") + data = None + + +class Program_weight_tensor_parameter_358: + name = "parameter_358" + shape = [384] + dtype = "float32" + min_val = float("0.831477") + max_val = float("1.18401") + mean = float("1.00106") + std = float("0.0441437") + data = None + + +class Program_weight_tensor_parameter_359: + name = "parameter_359" + shape = [384] + dtype = "float32" + min_val = float("1.0502") + max_val = float("1465.25") + mean = float("57.8083") + std = float("121.87") + data = None + + +class Program_weight_tensor_parameter_360: + name = "parameter_360" + shape = [384] + dtype = "float32" + min_val = float("-6.17757") + max_val = float("6.42098") + mean = float("0.24933") + std = float("1.69282") + data = None + + +class Program_weight_tensor_parameter_361: + name = "parameter_361" + shape = [384, 384, 3, 3] + dtype = "float32" + min_val = float("-0.108575") + max_val = float("0.122906") + mean = float("0.000415731") + std = float("0.0228954") + data = None + + +class Program_weight_tensor_parameter_362: + name = "parameter_362" + shape = [384] + dtype = "float32" + min_val = float("-0.0272461") + max_val = float("0.0323086") + mean = float("-0.00212997") + std = float("0.00787912") + data = None + + +class Program_weight_tensor_parameter_363: + name = "parameter_363" + shape = [384] + dtype = "float32" + min_val = float("0.914261") + max_val = float("1.07928") + mean = float("1.00003") + std = float("0.0182877") + data = None + + +class Program_weight_tensor_parameter_364: + name = "parameter_364" + shape = [384] + dtype = "float32" + min_val = float("8.2708") + max_val = float("20969.4") + mean = float("1017.41") + std = float("1582.44") + data = None + + +class Program_weight_tensor_parameter_365: + name = "parameter_365" + shape = [384] + dtype = "float32" + min_val = float("-91.1669") + max_val = float("51.3756") + mean = float("-0.349723") + std = float("18.1432") + data = None + + +class Program_weight_tensor_parameter_366: + name = "parameter_366" + shape = [384, 1536, 1, 1] + dtype = "float32" + min_val = float("-0.166378") + max_val = float("0.172815") + mean = float("0.000589818") + std = float("0.0336912") + data = None + + +class Program_weight_tensor_parameter_367: + name = "parameter_367" + shape = [384] + dtype = "float32" + min_val = float("-0.00342985") + max_val = float("0.00340284") + mean = float("0.000100895") + std = float("0.00127731") + data = None + + +class Program_weight_tensor_parameter_368: + name = "parameter_368" + shape = [384] + dtype = "float32" + min_val = float("0.971091") + max_val = float("1.04306") + mean = float("1.00104") + std = float("0.0108205") + data = None + + +class Program_weight_tensor_parameter_369: + name = "parameter_369" + shape = [384] + dtype = "float32" + min_val = float("0.131069") + max_val = float("18.0855") + mean = float("1.70763") + std = float("1.47022") + data = None + + +class Program_weight_tensor_parameter_370: + name = "parameter_370" + shape = [384] + dtype = "float32" + min_val = float("-0.341731") + max_val = float("0.4665") + mean = float("-0.00591625") + std = float("0.146621") + data = None + + +class Program_weight_tensor_parameter_371: + name = "parameter_371" + shape = [384, 384, 1, 1] + dtype = "float32" + min_val = float("-0.343178") + max_val = float("0.298537") + mean = float("-0.000155183") + std = float("0.065637") + data = None + + +class Program_weight_tensor_parameter_372: + name = "parameter_372" + shape = [384] + dtype = "float32" + min_val = float("-0.0033501") + max_val = float("0.00332228") + mean = float("9.22636e-05") + std = float("0.00124989") + data = None + + +class Program_weight_tensor_parameter_373: + name = "parameter_373" + shape = [384] + dtype = "float32" + min_val = float("0.975538") + max_val = float("1.03185") + mean = float("0.99934") + std = float("0.00883726") + data = None + + +class Program_weight_tensor_parameter_374: + name = "parameter_374" + shape = [384] + dtype = "float32" + min_val = float("0.941216") + max_val = float("51.3406") + mean = float("9.15217") + std = float("6.63408") + data = None + + +class Program_weight_tensor_parameter_375: + name = "parameter_375" + shape = [384] + dtype = "float32" + min_val = float("-1.03652") + max_val = float("0.835484") + mean = float("-0.0190484") + std = float("0.416734") + data = None + + +class Program_weight_tensor_parameter_376: + name = "parameter_376" + shape = [384, 384, 3, 3] + dtype = "float32" + min_val = float("-0.101721") + max_val = float("0.109141") + mean = float("-5.90297e-05") + std = float("0.022196") + data = None + + +class Program_weight_tensor_parameter_377: + name = "parameter_377" + shape = [384] + dtype = "float32" + min_val = float("-0.0106587") + max_val = float("0.0108129") + mean = float("0.000210264") + std = float("0.00408712") + data = None + + +class Program_weight_tensor_parameter_378: + name = "parameter_378" + shape = [384] + dtype = "float32" + min_val = float("0.960378") + max_val = float("1.04577") + mean = float("1.00062") + std = float("0.0135727") + data = None + + +class Program_weight_tensor_parameter_379: + name = "parameter_379" + shape = [384] + dtype = "float32" + min_val = float("8.67209") + max_val = float("185.997") + mean = float("52.6139") + std = float("32.0555") + data = None + + +class Program_weight_tensor_parameter_380: + name = "parameter_380" + shape = [384] + dtype = "float32" + min_val = float("-2.06089") + max_val = float("2.14975") + mean = float("0.0307506") + std = float("1.03235") + data = None + + +class Program_weight_tensor_parameter_381: + name = "parameter_381" + shape = [384, 384, 3, 3] + dtype = "float32" + min_val = float("-0.111831") + max_val = float("0.112815") + mean = float("3.78755e-05") + std = float("0.0227548") + data = None + + +class Program_weight_tensor_parameter_382: + name = "parameter_382" + shape = [384] + dtype = "float32" + min_val = float("-0.00958883") + max_val = float("0.010455") + mean = float("0.000316098") + std = float("0.00334068") + data = None + + +class Program_weight_tensor_parameter_383: + name = "parameter_383" + shape = [384] + dtype = "float32" + min_val = float("0.970238") + max_val = float("1.02715") + mean = float("1.00048") + std = float("0.00843436") + data = None + + +class Program_weight_tensor_parameter_384: + name = "parameter_384" + shape = [384] + dtype = "float32" + min_val = float("0.10354") + max_val = float("7.19702") + mean = float("1.42465") + std = float("1.13005") + data = None + + +class Program_weight_tensor_parameter_385: + name = "parameter_385" + shape = [384] + dtype = "float32" + min_val = float("-0.317744") + max_val = float("0.26737") + mean = float("0.00248273") + std = float("0.120801") + data = None + + +class Program_weight_tensor_parameter_386: + name = "parameter_386" + shape = [384, 384, 1, 1] + dtype = "float32" + min_val = float("-0.282549") + max_val = float("0.319494") + mean = float("6.89056e-05") + std = float("0.0656039") + data = None + + +class Program_weight_tensor_parameter_387: + name = "parameter_387" + shape = [384] + dtype = "float32" + min_val = float("-0.00957529") + max_val = float("0.01049") + mean = float("0.000327405") + std = float("0.00334983") + data = None + + +class Program_weight_tensor_parameter_388: + name = "parameter_388" + shape = [384] + dtype = "float32" + min_val = float("0.97484") + max_val = float("1.02471") + mean = float("0.999861") + std = float("0.00796976") + data = None + + +class Program_weight_tensor_parameter_389: + name = "parameter_389" + shape = [384] + dtype = "float32" + min_val = float("0.593037") + max_val = float("41.8132") + mean = float("7.50701") + std = float("6.20102") + data = None + + +class Program_weight_tensor_parameter_390: + name = "parameter_390" + shape = [384] + dtype = "float32" + min_val = float("-0.863724") + max_val = float("1.00091") + mean = float("0.023285") + std = float("0.376417") + data = None + + +class Program_weight_tensor_parameter_391: + name = "parameter_391" + shape = [384, 384, 3, 3] + dtype = "float32" + min_val = float("-0.104849") + max_val = float("0.11186") + mean = float("7.72765e-05") + std = float("0.0222984") + data = None + + +class Program_weight_tensor_parameter_392: + name = "parameter_392" + shape = [384] + dtype = "float32" + min_val = float("-0.0121373") + max_val = float("0.0163125") + mean = float("0.000390836") + std = float("0.0051407") + data = None + + +class Program_weight_tensor_parameter_393: + name = "parameter_393" + shape = [384] + dtype = "float32" + min_val = float("0.964751") + max_val = float("1.05554") + mean = float("1.00027") + std = float("0.0130082") + data = None + + +class Program_weight_tensor_parameter_394: + name = "parameter_394" + shape = [384] + dtype = "float32" + min_val = float("0.882644") + max_val = float("49.9032") + mean = float("10.4933") + std = float("8.33945") + data = None + + +class Program_weight_tensor_parameter_395: + name = "parameter_395" + shape = [384] + dtype = "float32" + min_val = float("-1.07077") + max_val = float("0.990824") + mean = float("0.00393083") + std = float("0.459185") + data = None + + +class Program_weight_tensor_parameter_396: + name = "parameter_396" + shape = [384, 384, 3, 3] + dtype = "float32" + min_val = float("-0.109571") + max_val = float("0.109836") + mean = float("1.91045e-05") + std = float("0.0231211") + data = None + + +class Program_weight_tensor_parameter_397: + name = "parameter_397" + shape = [384] + dtype = "float32" + min_val = float("-0.0137883") + max_val = float("0.0138815") + mean = float("-0.000422823") + std = float("0.00446412") + data = None + + +class Program_weight_tensor_parameter_398: + name = "parameter_398" + shape = [384] + dtype = "float32" + min_val = float("0.945794") + max_val = float("1.07131") + mean = float("0.999947") + std = float("0.0167394") + data = None + + +class Program_weight_tensor_parameter_399: + name = "parameter_399" + shape = [384] + dtype = "float32" + min_val = float("0.0904289") + max_val = float("843.503") + mean = float("21.9554") + std = float("69.7332") + data = None + + +class Program_weight_tensor_parameter_400: + name = "parameter_400" + shape = [384] + dtype = "float32" + min_val = float("-1.5622") + max_val = float("1.92178") + mean = float("0.175603") + std = float("0.658664") + data = None + + +class Program_weight_tensor_parameter_401: + name = "parameter_401" + shape = [384, 1024, 1, 1] + dtype = "float32" + min_val = float("-0.282124") + max_val = float("0.350672") + mean = float("-0.000361834") + std = float("0.0416163") + data = None + + +class Program_weight_tensor_parameter_402: + name = "parameter_402" + shape = [384] + dtype = "float32" + min_val = float("-0.0338144") + max_val = float("0.0451766") + mean = float("-0.00243304") + std = float("0.0103412") + data = None + + +class Program_weight_tensor_parameter_403: + name = "parameter_403" + shape = [384] + dtype = "float32" + min_val = float("0.849588") + max_val = float("1.17766") + mean = float("1.01245") + std = float("0.0436496") + data = None + + +class Program_weight_tensor_parameter_404: + name = "parameter_404" + shape = [384] + dtype = "float32" + min_val = float("0.14536") + max_val = float("4734.06") + mean = float("72.8271") + std = float("307.472") + data = None + + +class Program_weight_tensor_parameter_405: + name = "parameter_405" + shape = [384] + dtype = "float32" + min_val = float("-7.96754") + max_val = float("4.34304") + mean = float("0.0353008") + std = float("1.16638") + data = None + + +class Program_weight_tensor_parameter_406: + name = "parameter_406" + shape = [384, 1024, 1, 1] + dtype = "float32" + min_val = float("-2.08438") + max_val = float("2.25581") + mean = float("-0.000173532") + std = float("0.0502131") + data = None + + +class Program_weight_tensor_parameter_407: + name = "parameter_407" + shape = [1024] + dtype = "float32" + min_val = float("-3.75685") + max_val = float("-0.733727") + mean = float("-2.1772") + std = float("0.429073") + data = None + + +class Program_weight_tensor_parameter_408: + name = "parameter_408" + shape = [1024] + dtype = "float32" + min_val = float("1.47377") + max_val = float("4.45374") + mean = float("3.08911") + std = float("0.257253") + data = None + + +class Program_weight_tensor_parameter_409: + name = "parameter_409" + shape = [1024] + dtype = "float32" + min_val = float("0.659145") + max_val = float("5668.03") + mean = float("51.7014") + std = float("284.823") + data = None + + +class Program_weight_tensor_parameter_410: + name = "parameter_410" + shape = [1024] + dtype = "float32" + min_val = float("-10.5218") + max_val = float("15.0826") + mean = float("0.600535") + std = float("1.19894") + data = None + + +class Program_weight_tensor_parameter_411: + name = "parameter_411" + shape = [1024, 768, 1, 1] + dtype = "float32" + min_val = float("-2.25504") + max_val = float("1.50503") + mean = float("-0.00340465") + std = float("0.0587546") + data = None + + +class Program_weight_tensor_parameter_412: + name = "parameter_412" + shape = [768] + dtype = "float32" + min_val = float("-1.66615") + max_val = float("0.702461") + mean = float("-0.0298825") + std = float("0.329057") + data = None + + +class Program_weight_tensor_parameter_413: + name = "parameter_413" + shape = [768, 768, 1, 1] + dtype = "float32" + min_val = float("-0.672851") + max_val = float("1.39983") + mean = float("-0.00293712") + std = float("0.04192") + data = None + + +class Program_weight_tensor_parameter_414: + name = "parameter_414" + shape = [384] + dtype = "float32" + min_val = float("-1.77599") + max_val = float("0.186765") + mean = float("-0.349988") + std = float("0.282633") + data = None + + +class Program_weight_tensor_parameter_415: + name = "parameter_415" + shape = [384] + dtype = "float32" + min_val = float("0.176236") + max_val = float("1.49971") + mean = float("0.574363") + std = float("0.239746") + data = None + + +class Program_weight_tensor_parameter_416: + name = "parameter_416" + shape = [384] + dtype = "float32" + min_val = float("0.0088205") + max_val = float("61.4436") + mean = float("1.35834") + std = float("4.13771") + data = None + + +class Program_weight_tensor_parameter_417: + name = "parameter_417" + shape = [384] + dtype = "float32" + min_val = float("-3.52838") + max_val = float("1.94912") + mean = float("0.168807") + std = float("0.545052") + data = None + + +class Program_weight_tensor_parameter_418: + name = "parameter_418" + shape = [384, 384, 1, 1] + dtype = "float32" + min_val = float("-0.382595") + max_val = float("0.58743") + mean = float("-0.00222704") + std = float("0.0314634") + data = None + + +class Program_weight_tensor_parameter_419: + name = "parameter_419" + shape = [384] + dtype = "float32" + min_val = float("-1.77573") + max_val = float("0.171147") + mean = float("-0.34999") + std = float("0.282129") + data = None + + +class Program_weight_tensor_parameter_420: + name = "parameter_420" + shape = [384] + dtype = "float32" + min_val = float("0.250169") + max_val = float("2.37409") + mean = float("0.951232") + std = float("0.269453") + data = None + + +class Program_weight_tensor_parameter_421: + name = "parameter_421" + shape = [384] + dtype = "float32" + min_val = float("0.0574441") + max_val = float("347.715") + mean = float("14.6074") + std = float("39.5219") + data = None + + +class Program_weight_tensor_parameter_422: + name = "parameter_422" + shape = [384] + dtype = "float32" + min_val = float("-6.67799") + max_val = float("8.445") + mean = float("0.551903") + std = float("1.74112") + data = None + + +class Program_weight_tensor_parameter_423: + name = "parameter_423" + shape = [384, 384, 3, 3] + dtype = "float32" + min_val = float("-0.302029") + max_val = float("0.368959") + mean = float("-0.000751333") + std = float("0.0179471") + data = None + + +class Program_weight_tensor_parameter_424: + name = "parameter_424" + shape = [384] + dtype = "float32" + min_val = float("-2.55943") + max_val = float("0.0228944") + mean = float("-1.54527") + std = float("0.416442") + data = None + + +class Program_weight_tensor_parameter_425: + name = "parameter_425" + shape = [384] + dtype = "float32" + min_val = float("0.435413") + max_val = float("1.70978") + mean = float("1.14726") + std = float("0.165635") + data = None + + +class Program_weight_tensor_parameter_426: + name = "parameter_426" + shape = [384] + dtype = "float32" + min_val = float("2.43117") + max_val = float("2350.33") + mean = float("140.333") + std = float("274.404") + data = None + + +class Program_weight_tensor_parameter_427: + name = "parameter_427" + shape = [384] + dtype = "float32" + min_val = float("-17.4251") + max_val = float("10.3728") + mean = float("-0.130744") + std = float("3.70163") + data = None + + +class Program_weight_tensor_parameter_428: + name = "parameter_428" + shape = [384, 384, 3, 3] + dtype = "float32" + min_val = float("-0.1823") + max_val = float("0.331406") + mean = float("-0.00101749") + std = float("0.0210084") + data = None + + +class Program_weight_tensor_parameter_429: + name = "parameter_429" + shape = [384] + dtype = "float32" + min_val = float("-1.95172") + max_val = float("0.430872") + mean = float("-0.598014") + std = float("0.342132") + data = None + + +class Program_weight_tensor_parameter_430: + name = "parameter_430" + shape = [384] + dtype = "float32" + min_val = float("0.141609") + max_val = float("2.06798") + mean = float("0.549227") + std = float("0.232963") + data = None + + +class Program_weight_tensor_parameter_431: + name = "parameter_431" + shape = [384] + dtype = "float32" + min_val = float("0.0151657") + max_val = float("87.8278") + mean = float("1.47577") + std = float("5.94834") + data = None + + +class Program_weight_tensor_parameter_432: + name = "parameter_432" + shape = [384] + dtype = "float32" + min_val = float("-1.58243") + max_val = float("3.19358") + mean = float("0.362351") + std = float("0.491451") + data = None + + +class Program_weight_tensor_parameter_433: + name = "parameter_433" + shape = [384, 384, 1, 1] + dtype = "float32" + min_val = float("-0.583842") + max_val = float("0.691119") + mean = float("-0.00479886") + std = float("0.0297045") + data = None + + +class Program_weight_tensor_parameter_434: + name = "parameter_434" + shape = [384] + dtype = "float32" + min_val = float("-1.95183") + max_val = float("0.421584") + mean = float("-0.598194") + std = float("0.341726") + data = None + + +class Program_weight_tensor_parameter_435: + name = "parameter_435" + shape = [384] + dtype = "float32" + min_val = float("0.484618") + max_val = float("2.09273") + mean = float("1.03953") + std = float("0.262543") + data = None + + +class Program_weight_tensor_parameter_436: + name = "parameter_436" + shape = [384] + dtype = "float32" + min_val = float("0.0984016") + max_val = float("532.444") + mean = float("11.2946") + std = float("32.1487") + data = None + + +class Program_weight_tensor_parameter_437: + name = "parameter_437" + shape = [384] + dtype = "float32" + min_val = float("-5.20531") + max_val = float("7.12344") + mean = float("0.647736") + std = float("1.48765") + data = None + + +class Program_weight_tensor_parameter_438: + name = "parameter_438" + shape = [384, 384, 3, 3] + dtype = "float32" + min_val = float("-0.532799") + max_val = float("0.6992") + mean = float("-0.00098897") + std = float("0.0192363") + data = None + + +class Program_weight_tensor_parameter_439: + name = "parameter_439" + shape = [384] + dtype = "float32" + min_val = float("-2.4112") + max_val = float("0.821292") + mean = float("-1.37331") + std = float("0.36105") + data = None + + +class Program_weight_tensor_parameter_440: + name = "parameter_440" + shape = [384] + dtype = "float32" + min_val = float("0.317548") + max_val = float("1.87229") + mean = float("1.18063") + std = float("0.167971") + data = None + + +class Program_weight_tensor_parameter_441: + name = "parameter_441" + shape = [384] + dtype = "float32" + min_val = float("1.28502") + max_val = float("713.458") + mean = float("59.6624") + std = float("99.5875") + data = None + + +class Program_weight_tensor_parameter_442: + name = "parameter_442" + shape = [384] + dtype = "float32" + min_val = float("-10.6965") + max_val = float("6.54959") + mean = float("0.285962") + std = float("2.06646") + data = None + + +class Program_weight_tensor_parameter_443: + name = "parameter_443" + shape = [384, 384, 3, 3] + dtype = "float32" + min_val = float("-0.184577") + max_val = float("0.665096") + mean = float("-0.00143532") + std = float("0.0219188") + data = None + + +class Program_weight_tensor_parameter_444: + name = "parameter_444" + shape = [384] + dtype = "float32" + min_val = float("-1.87326") + max_val = float("0.355556") + mean = float("-0.513615") + std = float("0.359933") + data = None + + +class Program_weight_tensor_parameter_445: + name = "parameter_445" + shape = [384] + dtype = "float32" + min_val = float("0.0430065") + max_val = float("2.13559") + mean = float("0.432735") + std = float("0.222723") + data = None + + +class Program_weight_tensor_parameter_446: + name = "parameter_446" + shape = [384] + dtype = "float32" + min_val = float("0.011033") + max_val = float("84.8634") + mean = float("1.41424") + std = float("4.85621") + data = None + + +class Program_weight_tensor_parameter_447: + name = "parameter_447" + shape = [384] + dtype = "float32" + min_val = float("-1.22806") + max_val = float("2.50911") + mean = float("0.405188") + std = float("0.404834") + data = None + + +class Program_weight_tensor_parameter_448: + name = "parameter_448" + shape = [384, 384, 1, 1] + dtype = "float32" + min_val = float("-0.359869") + max_val = float("0.221885") + mean = float("-0.00491368") + std = float("0.0260252") + data = None + + +class Program_weight_tensor_parameter_449: + name = "parameter_449" + shape = [384] + dtype = "float32" + min_val = float("-1.87315") + max_val = float("0.349385") + mean = float("-0.51344") + std = float("0.35956") + data = None + + +class Program_weight_tensor_parameter_450: + name = "parameter_450" + shape = [384] + dtype = "float32" + min_val = float("0.400932") + max_val = float("2.34596") + mean = float("0.999601") + std = float("0.283924") + data = None + + +class Program_weight_tensor_parameter_451: + name = "parameter_451" + shape = [384] + dtype = "float32" + min_val = float("0.523321") + max_val = float("818.546") + mean = float("21.1392") + std = float("56.959") + data = None + + +class Program_weight_tensor_parameter_452: + name = "parameter_452" + shape = [384] + dtype = "float32" + min_val = float("-4.49472") + max_val = float("10.0959") + mean = float("0.877271") + std = float("1.48286") + data = None + + +class Program_weight_tensor_parameter_453: + name = "parameter_453" + shape = [384, 384, 3, 3] + dtype = "float32" + min_val = float("-0.320017") + max_val = float("0.283446") + mean = float("-0.00132138") + std = float("0.0204543") + data = None + + +class Program_weight_tensor_parameter_454: + name = "parameter_454" + shape = [384] + dtype = "float32" + min_val = float("-2.14544") + max_val = float("0.38419") + mean = float("-1.33575") + std = float("0.279468") + data = None + + +class Program_weight_tensor_parameter_455: + name = "parameter_455" + shape = [384] + dtype = "float32" + min_val = float("0.357814") + max_val = float("1.64831") + mean = float("1.15305") + std = float("0.133234") + data = None + + +class Program_weight_tensor_parameter_456: + name = "parameter_456" + shape = [384] + dtype = "float32" + min_val = float("1.09025") + max_val = float("3503.07") + mean = float("91.7642") + std = float("265.422") + data = None + + +class Program_weight_tensor_parameter_457: + name = "parameter_457" + shape = [384] + dtype = "float32" + min_val = float("-7.1763") + max_val = float("4.5789") + mean = float("0.403464") + std = float("0.915648") + data = None + + +class Program_weight_tensor_parameter_458: + name = "parameter_458" + shape = [384, 384, 3, 3] + dtype = "float32" + min_val = float("-0.478562") + max_val = float("0.69387") + mean = float("-0.00155617") + std = float("0.0246579") + data = None + + +class Program_weight_tensor_parameter_459: + name = "parameter_459" + shape = [384] + dtype = "float32" + min_val = float("-2.9134") + max_val = float("1.40052") + mean = float("-0.763909") + std = float("0.616595") + data = None + + +class Program_weight_tensor_parameter_460: + name = "parameter_460" + shape = [384] + dtype = "float32" + min_val = float("0.831946") + max_val = float("2.9683") + mean = float("1.8751") + std = float("0.299764") + data = None + + +class Program_weight_tensor_parameter_461: + name = "parameter_461" + shape = [384] + dtype = "float32" + min_val = float("0.0885172") + max_val = float("1887.7") + mean = float("48.5145") + std = float("178.738") + data = None + + +class Program_weight_tensor_parameter_462: + name = "parameter_462" + shape = [384] + dtype = "float32" + min_val = float("-11.3592") + max_val = float("8.7687") + mean = float("0.801955") + std = float("1.99121") + data = None + + +class Program_weight_tensor_parameter_463: + name = "parameter_463" + shape = [384, 768, 1, 1] + dtype = "float32" + min_val = float("-0.504619") + max_val = float("1.10023") + mean = float("-0.00499413") + std = float("0.0634642") + data = None + + +class Program_weight_tensor_parameter_464: + name = "parameter_464" + shape = [384] + dtype = "float32" + min_val = float("-2.23368") + max_val = float("0.489748") + mean = float("-0.78342") + std = float("0.463359") + data = None + + +class Program_weight_tensor_parameter_465: + name = "parameter_465" + shape = [384] + dtype = "float32" + min_val = float("0.814619") + max_val = float("2.89755") + mean = float("2.0896") + std = float("0.313297") + data = None + + +class Program_weight_tensor_parameter_466: + name = "parameter_466" + shape = [384] + dtype = "float32" + min_val = float("0.102848") + max_val = float("1756.01") + mean = float("13.9081") + std = float("95.8671") + data = None + + +class Program_weight_tensor_parameter_467: + name = "parameter_467" + shape = [384] + dtype = "float32" + min_val = float("-4.26149") + max_val = float("9.79096") + mean = float("0.575806") + std = float("0.886261") + data = None + + +class Program_weight_tensor_parameter_468: + name = "parameter_468" + shape = [384, 768, 1, 1] + dtype = "float32" + min_val = float("-1.20124") + max_val = float("0.522659") + mean = float("-0.0035768") + std = float("0.0549484") + data = None + + +class Program_weight_tensor_parameter_469: + name = "parameter_469" + shape = [768] + dtype = "float32" + min_val = float("-2.41109") + max_val = float("0.567457") + mean = float("-0.897966") + std = float("0.334904") + data = None + + +class Program_weight_tensor_parameter_470: + name = "parameter_470" + shape = [768] + dtype = "float32" + min_val = float("0.0119823") + max_val = float("1.90298") + mean = float("0.923928") + std = float("0.183501") + data = None + + +class Program_weight_tensor_parameter_471: + name = "parameter_471" + shape = [768] + dtype = "float32" + min_val = float("0.0652324") + max_val = float("3146.51") + mean = float("76.1555") + std = float("254.337") + data = None + + +class Program_weight_tensor_parameter_472: + name = "parameter_472" + shape = [768] + dtype = "float32" + min_val = float("-23.861") + max_val = float("23.0945") + mean = float("1.23532") + std = float("3.78923") + data = None + + +class Program_weight_tensor_parameter_473: + name = "parameter_473" + shape = [768, 512, 3, 3] + dtype = "float32" + min_val = float("-0.654471") + max_val = float("1.28219") + mean = float("-0.00113471") + std = float("0.0287547") + data = None + + +class Program_weight_tensor_parameter_474: + name = "parameter_474" + shape = [512] + dtype = "float32" + min_val = float("-3.37396") + max_val = float("1.17072") + mean = float("-1.11952") + std = float("0.48423") + data = None + + +class Program_weight_tensor_parameter_475: + name = "parameter_475" + shape = [512] + dtype = "float32" + min_val = float("-0.405846") + max_val = float("2.13322") + mean = float("1.07334") + std = float("0.21502") + data = None + + +class Program_weight_tensor_parameter_476: + name = "parameter_476" + shape = [512] + dtype = "float32" + min_val = float("1.78352") + max_val = float("36496.4") + mean = float("310.532") + std = float("2130.65") + data = None + + +class Program_weight_tensor_parameter_477: + name = "parameter_477" + shape = [512] + dtype = "float32" + min_val = float("-6.728") + max_val = float("3.88714") + mean = float("0.154674") + std = float("0.853746") + data = None + + +class Program_weight_tensor_parameter_478: + name = "parameter_478" + shape = [512, 384, 1, 1] + dtype = "float32" + min_val = float("-1.0782") + max_val = float("3.02886") + mean = float("-0.00466678") + std = float("0.0847267") + data = None + + +class Program_weight_tensor_parameter_479: + name = "parameter_479" + shape = [384] + dtype = "float32" + min_val = float("-0.431336") + max_val = float("1.00537") + mean = float("0.161959") + std = float("0.242457") + data = None + + +class Program_weight_tensor_parameter_480: + name = "parameter_480" + shape = [384, 384, 1, 1] + dtype = "float32" + min_val = float("-1.21179") + max_val = float("1.07606") + mean = float("-0.000138772") + std = float("0.0579385") + data = None + + +class Program_weight_tensor_parameter_481: + name = "parameter_481" + shape = [192] + dtype = "float32" + min_val = float("-2.01724") + max_val = float("0.0718206") + mean = float("-0.423892") + std = float("0.321935") + data = None + + +class Program_weight_tensor_parameter_482: + name = "parameter_482" + shape = [192] + dtype = "float32" + min_val = float("0.0428034") + max_val = float("1.92188") + mean = float("0.510445") + std = float("0.35662") + data = None + + +class Program_weight_tensor_parameter_483: + name = "parameter_483" + shape = [192] + dtype = "float32" + min_val = float("0.000667287") + max_val = float("70.1317") + mean = float("1.27171") + std = float("5.45848") + data = None + + +class Program_weight_tensor_parameter_484: + name = "parameter_484" + shape = [192] + dtype = "float32" + min_val = float("-0.776173") + max_val = float("1.30126") + mean = float("0.140402") + std = float("0.263462") + data = None + + +class Program_weight_tensor_parameter_485: + name = "parameter_485" + shape = [192, 192, 1, 1] + dtype = "float32" + min_val = float("-0.498725") + max_val = float("0.295175") + mean = float("-0.00350298") + std = float("0.0350809") + data = None + + +class Program_weight_tensor_parameter_486: + name = "parameter_486" + shape = [192] + dtype = "float32" + min_val = float("-2.01302") + max_val = float("0.064935") + mean = float("-0.423182") + std = float("0.321837") + data = None + + +class Program_weight_tensor_parameter_487: + name = "parameter_487" + shape = [192] + dtype = "float32" + min_val = float("0.280099") + max_val = float("2.30206") + mean = float("1.1007") + std = float("0.429049") + data = None + + +class Program_weight_tensor_parameter_488: + name = "parameter_488" + shape = [192] + dtype = "float32" + min_val = float("0.126362") + max_val = float("182.901") + mean = float("17.8803") + std = float("30.1525") + data = None + + +class Program_weight_tensor_parameter_489: + name = "parameter_489" + shape = [192] + dtype = "float32" + min_val = float("-4.10686") + max_val = float("3.52747") + mean = float("0.321384") + std = float("1.26875") + data = None + + +class Program_weight_tensor_parameter_490: + name = "parameter_490" + shape = [192, 192, 3, 3] + dtype = "float32" + min_val = float("-0.221819") + max_val = float("0.252313") + mean = float("-0.00094495") + std = float("0.0269884") + data = None + + +class Program_weight_tensor_parameter_491: + name = "parameter_491" + shape = [192] + dtype = "float32" + min_val = float("-2.8515") + max_val = float("-0.322537") + mean = float("-1.26222") + std = float("0.400818") + data = None + + +class Program_weight_tensor_parameter_492: + name = "parameter_492" + shape = [192] + dtype = "float32" + min_val = float("0.536893") + max_val = float("2.11896") + mean = float("1.21905") + std = float("0.193579") + data = None + + +class Program_weight_tensor_parameter_493: + name = "parameter_493" + shape = [192] + dtype = "float32" + min_val = float("7.93103") + max_val = float("5614.43") + mean = float("492.992") + std = float("807.397") + data = None + + +class Program_weight_tensor_parameter_494: + name = "parameter_494" + shape = [192] + dtype = "float32" + min_val = float("-9.40175") + max_val = float("14.6092") + mean = float("1.12784") + std = float("3.3654") + data = None + + +class Program_weight_tensor_parameter_495: + name = "parameter_495" + shape = [192, 192, 3, 3] + dtype = "float32" + min_val = float("-0.442802") + max_val = float("0.367789") + mean = float("-0.00224649") + std = float("0.0304799") + data = None + + +class Program_weight_tensor_parameter_496: + name = "parameter_496" + shape = [192] + dtype = "float32" + min_val = float("-1.92356") + max_val = float("0.190305") + mean = float("-0.343814") + std = float("0.278074") + data = None + + +class Program_weight_tensor_parameter_497: + name = "parameter_497" + shape = [192] + dtype = "float32" + min_val = float("-0.0332801") + max_val = float("1.79271") + mean = float("0.407485") + std = float("0.312698") + data = None + + +class Program_weight_tensor_parameter_498: + name = "parameter_498" + shape = [192] + dtype = "float32" + min_val = float("0.00213834") + max_val = float("54.8583") + mean = float("1.52478") + std = float("4.65095") + data = None + + +class Program_weight_tensor_parameter_499: + name = "parameter_499" + shape = [192] + dtype = "float32" + min_val = float("-1.27044") + max_val = float("0.841473") + mean = float("0.11748") + std = float("0.283272") + data = None + + +class Program_weight_tensor_parameter_500: + name = "parameter_500" + shape = [192, 192, 1, 1] + dtype = "float32" + min_val = float("-0.329507") + max_val = float("0.44394") + mean = float("-0.00297809") + std = float("0.0343006") + data = None + + +class Program_weight_tensor_parameter_501: + name = "parameter_501" + shape = [192] + dtype = "float32" + min_val = float("-1.92439") + max_val = float("0.191089") + mean = float("-0.342999") + std = float("0.278334") + data = None + + +class Program_weight_tensor_parameter_502: + name = "parameter_502" + shape = [192] + dtype = "float32" + min_val = float("0.43576") + max_val = float("2.28299") + mean = float("1.07179") + std = float("0.365403") + data = None + + +class Program_weight_tensor_parameter_503: + name = "parameter_503" + shape = [192] + dtype = "float32" + min_val = float("0.142339") + max_val = float("393.795") + mean = float("32.873") + std = float("64.5534") + data = None + + +class Program_weight_tensor_parameter_504: + name = "parameter_504" + shape = [192] + dtype = "float32" + min_val = float("-5.0846") + max_val = float("3.21811") + mean = float("0.391168") + std = float("1.32244") + data = None + + +class Program_weight_tensor_parameter_505: + name = "parameter_505" + shape = [192, 192, 3, 3] + dtype = "float32" + min_val = float("-0.258582") + max_val = float("0.385827") + mean = float("-0.00112742") + std = float("0.0289482") + data = None + + +class Program_weight_tensor_parameter_506: + name = "parameter_506" + shape = [192] + dtype = "float32" + min_val = float("-2.44838") + max_val = float("-0.12055") + mean = float("-1.21761") + std = float("0.449176") + data = None + + +class Program_weight_tensor_parameter_507: + name = "parameter_507" + shape = [192] + dtype = "float32" + min_val = float("0.743828") + max_val = float("1.86172") + mean = float("1.24799") + std = float("0.189118") + data = None + + +class Program_weight_tensor_parameter_508: + name = "parameter_508" + shape = [192] + dtype = "float32" + min_val = float("3.38171") + max_val = float("6982.98") + mean = float("382.169") + std = float("707.156") + data = None + + +class Program_weight_tensor_parameter_509: + name = "parameter_509" + shape = [192] + dtype = "float32" + min_val = float("-12.3814") + max_val = float("8.54937") + mean = float("0.284356") + std = float("3.18591") + data = None + + +class Program_weight_tensor_parameter_510: + name = "parameter_510" + shape = [192, 192, 3, 3] + dtype = "float32" + min_val = float("-0.366981") + max_val = float("0.413695") + mean = float("-0.00116539") + std = float("0.0323276") + data = None + + +class Program_weight_tensor_parameter_511: + name = "parameter_511" + shape = [192] + dtype = "float32" + min_val = float("-1.75204") + max_val = float("0.148384") + mean = float("-0.31488") + std = float("0.292428") + data = None + + +class Program_weight_tensor_parameter_512: + name = "parameter_512" + shape = [192] + dtype = "float32" + min_val = float("-0.000193485") + max_val = float("1.73229") + mean = float("0.333772") + std = float("0.264249") + data = None + + +class Program_weight_tensor_parameter_513: + name = "parameter_513" + shape = [192] + dtype = "float32" + min_val = float("4.31104e-05") + max_val = float("136.033") + mean = float("1.37734") + std = float("9.92725") + data = None + + +class Program_weight_tensor_parameter_514: + name = "parameter_514" + shape = [192] + dtype = "float32" + min_val = float("-1.90882") + max_val = float("0.61387") + mean = float("0.11144") + std = float("0.271409") + data = None + + +class Program_weight_tensor_parameter_515: + name = "parameter_515" + shape = [192, 192, 1, 1] + dtype = "float32" + min_val = float("-0.326499") + max_val = float("0.738359") + mean = float("-0.00238391") + std = float("0.0330837") + data = None + + +class Program_weight_tensor_parameter_516: + name = "parameter_516" + shape = [192] + dtype = "float32" + min_val = float("-1.75084") + max_val = float("0.150921") + mean = float("-0.314417") + std = float("0.292503") + data = None + + +class Program_weight_tensor_parameter_517: + name = "parameter_517" + shape = [192] + dtype = "float32" + min_val = float("0.414409") + max_val = float("2.02981") + mean = float("1.02513") + std = float("0.335319") + data = None + + +class Program_weight_tensor_parameter_518: + name = "parameter_518" + shape = [192] + dtype = "float32" + min_val = float("0.311201") + max_val = float("462.824") + mean = float("27.9648") + std = float("55.8063") + data = None + + +class Program_weight_tensor_parameter_519: + name = "parameter_519" + shape = [192] + dtype = "float32" + min_val = float("-3.55488") + max_val = float("2.97708") + mean = float("0.437663") + std = float("1.14933") + data = None + + +class Program_weight_tensor_parameter_520: + name = "parameter_520" + shape = [192, 192, 3, 3] + dtype = "float32" + min_val = float("-0.309979") + max_val = float("0.302101") + mean = float("-0.00130832") + std = float("0.0304696") + data = None + + +class Program_weight_tensor_parameter_521: + name = "parameter_521" + shape = [192] + dtype = "float32" + min_val = float("-2.44374") + max_val = float("0.125063") + mean = float("-1.17079") + std = float("0.421874") + data = None + + +class Program_weight_tensor_parameter_522: + name = "parameter_522" + shape = [192] + dtype = "float32" + min_val = float("0.629982") + max_val = float("1.85267") + mean = float("1.21472") + std = float("0.190543") + data = None + + +class Program_weight_tensor_parameter_523: + name = "parameter_523" + shape = [192] + dtype = "float32" + min_val = float("5.10165") + max_val = float("3185.55") + mean = float("217.947") + std = float("421.372") + data = None + + +class Program_weight_tensor_parameter_524: + name = "parameter_524" + shape = [192] + dtype = "float32" + min_val = float("-8.76845") + max_val = float("4.75125") + mean = float("0.120205") + std = float("1.80007") + data = None + + +class Program_weight_tensor_parameter_525: + name = "parameter_525" + shape = [192, 192, 3, 3] + dtype = "float32" + min_val = float("-0.297765") + max_val = float("0.385871") + mean = float("-0.00233613") + std = float("0.0331732") + data = None + + +class Program_weight_tensor_parameter_526: + name = "parameter_526" + shape = [192] + dtype = "float32" + min_val = float("-2.0708") + max_val = float("0.237989") + mean = float("-0.320903") + std = float("0.336285") + data = None + + +class Program_weight_tensor_parameter_527: + name = "parameter_527" + shape = [192] + dtype = "float32" + min_val = float("-0.105348") + max_val = float("0.75366") + mean = float("0.18913") + std = float("0.147605") + data = None + + +class Program_weight_tensor_parameter_528: + name = "parameter_528" + shape = [192] + dtype = "float32" + min_val = float("0.00102651") + max_val = float("3.84627") + mean = float("0.363051") + std = float("0.622389") + data = None + + +class Program_weight_tensor_parameter_529: + name = "parameter_529" + shape = [192] + dtype = "float32" + min_val = float("-0.554819") + max_val = float("0.802086") + mean = float("0.0967655") + std = float("0.177221") + data = None + + +class Program_weight_tensor_parameter_530: + name = "parameter_530" + shape = [192, 192, 1, 1] + dtype = "float32" + min_val = float("-0.299275") + max_val = float("0.239252") + mean = float("-0.00227099") + std = float("0.0271605") + data = None + + +class Program_weight_tensor_parameter_531: + name = "parameter_531" + shape = [192] + dtype = "float32" + min_val = float("-2.07036") + max_val = float("0.239385") + mean = float("-0.320647") + std = float("0.336358") + data = None + + +class Program_weight_tensor_parameter_532: + name = "parameter_532" + shape = [192] + dtype = "float32" + min_val = float("0.3209") + max_val = float("1.98189") + mean = float("0.932811") + std = float("0.310541") + data = None + + +class Program_weight_tensor_parameter_533: + name = "parameter_533" + shape = [192] + dtype = "float32" + min_val = float("0.426529") + max_val = float("516.92") + mean = float("21.4315") + std = float("46.4704") + data = None + + +class Program_weight_tensor_parameter_534: + name = "parameter_534" + shape = [192] + dtype = "float32" + min_val = float("-4.45164") + max_val = float("3.97231") + mean = float("0.378246") + std = float("1.29374") + data = None + + +class Program_weight_tensor_parameter_535: + name = "parameter_535" + shape = [192, 192, 3, 3] + dtype = "float32" + min_val = float("-0.359009") + max_val = float("0.461203") + mean = float("-0.000954906") + std = float("0.0312542") + data = None + + +class Program_weight_tensor_parameter_536: + name = "parameter_536" + shape = [192] + dtype = "float32" + min_val = float("-2.67946") + max_val = float("-0.0859459") + mean = float("-1.17086") + std = float("0.432141") + data = None + + +class Program_weight_tensor_parameter_537: + name = "parameter_537" + shape = [192] + dtype = "float32" + min_val = float("0.640065") + max_val = float("1.66218") + mean = float("1.1969") + std = float("0.162891") + data = None + + +class Program_weight_tensor_parameter_538: + name = "parameter_538" + shape = [192] + dtype = "float32" + min_val = float("1.42683") + max_val = float("3118.74") + mean = float("129.891") + std = float("314.577") + data = None + + +class Program_weight_tensor_parameter_539: + name = "parameter_539" + shape = [192] + dtype = "float32" + min_val = float("-9.93296") + max_val = float("2.81927") + mean = float("-0.365634") + std = float("1.17774") + data = None + + +class Program_weight_tensor_parameter_540: + name = "parameter_540" + shape = [192, 192, 3, 3] + dtype = "float32" + min_val = float("-0.267835") + max_val = float("0.327041") + mean = float("-0.00211123") + std = float("0.0334733") + data = None + + +class Program_weight_tensor_parameter_541: + name = "parameter_541" + shape = [192] + dtype = "float32" + min_val = float("-1.22832") + max_val = float("0.26444") + mean = float("-0.272456") + std = float("0.298021") + data = None + + +class Program_weight_tensor_parameter_542: + name = "parameter_542" + shape = [192] + dtype = "float32" + min_val = float("-0.49636") + max_val = float("0.673764") + mean = float("0.183466") + std = float("0.163715") + data = None + + +class Program_weight_tensor_parameter_543: + name = "parameter_543" + shape = [192] + dtype = "float32" + min_val = float("0.00342555") + max_val = float("35.8824") + mean = float("1.07553") + std = float("3.45142") + data = None + + +class Program_weight_tensor_parameter_544: + name = "parameter_544" + shape = [192] + dtype = "float32" + min_val = float("-0.615439") + max_val = float("0.945103") + mean = float("0.119416") + std = float("0.219094") + data = None + + +class Program_weight_tensor_parameter_545: + name = "parameter_545" + shape = [192, 192, 1, 1] + dtype = "float32" + min_val = float("-0.253603") + max_val = float("0.459184") + mean = float("-0.0030267") + std = float("0.029738") + data = None + + +class Program_weight_tensor_parameter_546: + name = "parameter_546" + shape = [192] + dtype = "float32" + min_val = float("-1.22758") + max_val = float("0.264218") + mean = float("-0.272363") + std = float("0.298417") + data = None + + +class Program_weight_tensor_parameter_547: + name = "parameter_547" + shape = [192] + dtype = "float32" + min_val = float("0.219365") + max_val = float("1.58241") + mean = float("0.841959") + std = float("0.278616") + data = None + + +class Program_weight_tensor_parameter_548: + name = "parameter_548" + shape = [192] + dtype = "float32" + min_val = float("0.289301") + max_val = float("512.437") + mean = float("32.0927") + std = float("57.7776") + data = None + + +class Program_weight_tensor_parameter_549: + name = "parameter_549" + shape = [192] + dtype = "float32" + min_val = float("-5.8443") + max_val = float("4.38688") + mean = float("0.514502") + std = float("1.27497") + data = None + + +class Program_weight_tensor_parameter_550: + name = "parameter_550" + shape = [192, 192, 3, 3] + dtype = "float32" + min_val = float("-0.326333") + max_val = float("0.442457") + mean = float("-0.00171487") + std = float("0.0321544") + data = None + + +class Program_weight_tensor_parameter_551: + name = "parameter_551" + shape = [192] + dtype = "float32" + min_val = float("-2.4627") + max_val = float("-0.120065") + mean = float("-1.18036") + std = float("0.413127") + data = None + + +class Program_weight_tensor_parameter_552: + name = "parameter_552" + shape = [192] + dtype = "float32" + min_val = float("0.655899") + max_val = float("1.87317") + mean = float("1.17568") + std = float("0.16221") + data = None + + +class Program_weight_tensor_parameter_553: + name = "parameter_553" + shape = [192] + dtype = "float32" + min_val = float("2.6823") + max_val = float("1525.38") + mean = float("73.5291") + std = float("163.885") + data = None + + +class Program_weight_tensor_parameter_554: + name = "parameter_554" + shape = [192] + dtype = "float32" + min_val = float("-4.20434") + max_val = float("2.61799") + mean = float("-0.431519") + std = float("1.02022") + data = None + + +class Program_weight_tensor_parameter_555: + name = "parameter_555" + shape = [192, 192, 3, 3] + dtype = "float32" + min_val = float("-0.416157") + max_val = float("0.431709") + mean = float("-0.00131864") + std = float("0.0350356") + data = None + + +class Program_weight_tensor_parameter_556: + name = "parameter_556" + shape = [192] + dtype = "float32" + min_val = float("-1.22994") + max_val = float("0.303213") + mean = float("-0.21823") + std = float("0.258642") + data = None + + +class Program_weight_tensor_parameter_557: + name = "parameter_557" + shape = [192] + dtype = "float32" + min_val = float("-0.422019") + max_val = float("1.41747") + mean = float("0.220797") + std = float("0.228871") + data = None + + +class Program_weight_tensor_parameter_558: + name = "parameter_558" + shape = [192] + dtype = "float32" + min_val = float("0.0165019") + max_val = float("33.7927") + mean = float("1.13732") + std = float("3.11293") + data = None + + +class Program_weight_tensor_parameter_559: + name = "parameter_559" + shape = [192] + dtype = "float32" + min_val = float("-0.963032") + max_val = float("1.31264") + mean = float("0.23276") + std = float("0.267414") + data = None + + +class Program_weight_tensor_parameter_560: + name = "parameter_560" + shape = [192, 192, 1, 1] + dtype = "float32" + min_val = float("-0.43918") + max_val = float("0.33995") + mean = float("-0.00616669") + std = float("0.0345367") + data = None + + +class Program_weight_tensor_parameter_561: + name = "parameter_561" + shape = [192] + dtype = "float32" + min_val = float("-1.22949") + max_val = float("0.309031") + mean = float("-0.218564") + std = float("0.259709") + data = None + + +class Program_weight_tensor_parameter_562: + name = "parameter_562" + shape = [192] + dtype = "float32" + min_val = float("0.14077") + max_val = float("1.42084") + mean = float("0.745138") + std = float("0.240274") + data = None + + +class Program_weight_tensor_parameter_563: + name = "parameter_563" + shape = [192] + dtype = "float32" + min_val = float("0.456227") + max_val = float("562.681") + mean = float("40.0527") + std = float("76.3966") + data = None + + +class Program_weight_tensor_parameter_564: + name = "parameter_564" + shape = [192] + dtype = "float32" + min_val = float("-4.43539") + max_val = float("5.67768") + mean = float("0.649728") + std = float("1.48479") + data = None + + +class Program_weight_tensor_parameter_565: + name = "parameter_565" + shape = [192, 192, 3, 3] + dtype = "float32" + min_val = float("-0.343651") + max_val = float("0.2904") + mean = float("-0.00201592") + std = float("0.0335947") + data = None + + +class Program_weight_tensor_parameter_566: + name = "parameter_566" + shape = [192] + dtype = "float32" + min_val = float("-1.80453") + max_val = float("-0.162858") + mean = float("-1.09713") + std = float("0.315717") + data = None + + +class Program_weight_tensor_parameter_567: + name = "parameter_567" + shape = [192] + dtype = "float32" + min_val = float("0.769545") + max_val = float("1.70833") + mean = float("1.16026") + std = float("0.155045") + data = None + + +class Program_weight_tensor_parameter_568: + name = "parameter_568" + shape = [192] + dtype = "float32" + min_val = float("0.805022") + max_val = float("748.072") + mean = float("51.6621") + std = float("87.6371") + data = None + + +class Program_weight_tensor_parameter_569: + name = "parameter_569" + shape = [192] + dtype = "float32" + min_val = float("-6.15158") + max_val = float("3.38521") + mean = float("-0.302766") + std = float("1.31562") + data = None + + +class Program_weight_tensor_parameter_570: + name = "parameter_570" + shape = [192, 192, 3, 3] + dtype = "float32" + min_val = float("-0.590461") + max_val = float("0.584368") + mean = float("-0.00190877") + std = float("0.0351376") + data = None + + +class Program_weight_tensor_parameter_571: + name = "parameter_571" + shape = [192] + dtype = "float32" + min_val = float("-2.83859") + max_val = float("1.44201") + mean = float("-0.0242275") + std = float("0.688732") + data = None + + +class Program_weight_tensor_parameter_572: + name = "parameter_572" + shape = [192] + dtype = "float32" + min_val = float("0.44453") + max_val = float("2.27656") + mean = float("1.01019") + std = float("0.281205") + data = None + + +class Program_weight_tensor_parameter_573: + name = "parameter_573" + shape = [192] + dtype = "float32" + min_val = float("0.707151") + max_val = float("881.223") + mean = float("28.3452") + std = float("71.9112") + data = None + + +class Program_weight_tensor_parameter_574: + name = "parameter_574" + shape = [192] + dtype = "float32" + min_val = float("-0.900372") + max_val = float("1.3303") + mean = float("0.0718146") + std = float("0.41501") + data = None + + +class Program_weight_tensor_parameter_575: + name = "parameter_575" + shape = [192, 384, 1, 1] + dtype = "float32" + min_val = float("-0.911939") + max_val = float("0.85712") + mean = float("-0.00409194") + std = float("0.0737914") + data = None + + +class Program_weight_tensor_parameter_576: + name = "parameter_576" + shape = [192] + dtype = "float32" + min_val = float("-3.07941") + max_val = float("0.923214") + mean = float("0.0507847") + std = float("0.65589") + data = None + + +class Program_weight_tensor_parameter_577: + name = "parameter_577" + shape = [192] + dtype = "float32" + min_val = float("0.830631") + max_val = float("5.26287") + mean = float("1.86701") + std = float("0.838413") + data = None + + +class Program_weight_tensor_parameter_578: + name = "parameter_578" + shape = [192] + dtype = "float32" + min_val = float("0.105688") + max_val = float("397.272") + mean = float("13.0574") + std = float("33.7723") + data = None + + +class Program_weight_tensor_parameter_579: + name = "parameter_579" + shape = [192] + dtype = "float32" + min_val = float("-0.748885") + max_val = float("1.41428") + mean = float("0.243549") + std = float("0.417015") + data = None + + +class Program_weight_tensor_parameter_580: + name = "parameter_580" + shape = [192, 384, 1, 1] + dtype = "float32" + min_val = float("-0.876728") + max_val = float("0.658872") + mean = float("-0.00150673") + std = float("0.0792434") + data = None + + +class Program_weight_tensor_parameter_581: + name = "parameter_581" + shape = [384] + dtype = "float32" + min_val = float("-2.91636") + max_val = float("1.31059") + mean = float("-0.267473") + std = float("0.547222") + data = None + + +class Program_weight_tensor_parameter_582: + name = "parameter_582" + shape = [384] + dtype = "float32" + min_val = float("0.674047") + max_val = float("2.39839") + mean = float("1.18568") + std = float("0.256968") + data = None + + +class Program_weight_tensor_parameter_583: + name = "parameter_583" + shape = [384] + dtype = "float32" + min_val = float("2.11319") + max_val = float("4730.93") + mean = float("128.251") + std = float("344.595") + data = None + + +class Program_weight_tensor_parameter_584: + name = "parameter_584" + shape = [384] + dtype = "float32" + min_val = float("-8.14084") + max_val = float("8.5369") + mean = float("0.278321") + std = float("1.8371") + data = None + + +class Program_weight_tensor_parameter_585: + name = "parameter_585" + shape = [384, 256, 3, 3] + dtype = "float32" + min_val = float("-0.58989") + max_val = float("0.559152") + mean = float("-0.000654299") + std = float("0.0371397") + data = None + + +class Program_weight_tensor_parameter_586: + name = "parameter_586" + shape = [256] + dtype = "float32" + min_val = float("-1.97693") + max_val = float("1.28056") + mean = float("-0.884327") + std = float("0.460732") + data = None + + +class Program_weight_tensor_parameter_587: + name = "parameter_587" + shape = [256] + dtype = "float32" + min_val = float("0.420192") + max_val = float("1.66725") + mean = float("1.06325") + std = float("0.209393") + data = None + + +class Program_weight_tensor_parameter_588: + name = "parameter_588" + shape = [256] + dtype = "float32" + min_val = float("0.419827") + max_val = float("284.629") + mean = float("28.6387") + std = float("31.5573") + data = None + + +class Program_weight_tensor_parameter_589: + name = "parameter_589" + shape = [256] + dtype = "float32" + min_val = float("-2.2536") + max_val = float("2.56914") + mean = float("0.0636573") + std = float("0.911947") + data = None + + +class Program_weight_tensor_parameter_590: + name = "parameter_590" + shape = [256, 192, 1, 1] + dtype = "float32" + min_val = float("-1.39226") + max_val = float("0.767124") + mean = float("0.000585002") + std = float("0.117626") + data = None + + +class Program_weight_tensor_parameter_591: + name = "parameter_591" + shape = [192] + dtype = "float32" + min_val = float("-0.495692") + max_val = float("0.840966") + mean = float("0.2112") + std = float("0.286077") + data = None + + +class Program_weight_tensor_parameter_592: + name = "parameter_592" + shape = [192, 192, 1, 1] + dtype = "float32" + min_val = float("-1.73305") + max_val = float("1.36794") + mean = float("0.00205272") + std = float("0.0863243") + data = None + + +class Program_weight_tensor_parameter_593: + name = "parameter_593" + shape = [96] + dtype = "float32" + min_val = float("-2.0489") + max_val = float("0.157801") + mean = float("-0.326535") + std = float("0.403898") + data = None + + +class Program_weight_tensor_parameter_594: + name = "parameter_594" + shape = [96] + dtype = "float32" + min_val = float("0.0739081") + max_val = float("2.62834") + mean = float("0.593411") + std = float("0.576866") + data = None + + +class Program_weight_tensor_parameter_595: + name = "parameter_595" + shape = [96] + dtype = "float32" + min_val = float("0.0100179") + max_val = float("21.0602") + mean = float("0.787071") + std = float("2.3345") + data = None + + +class Program_weight_tensor_parameter_596: + name = "parameter_596" + shape = [96] + dtype = "float32" + min_val = float("-0.866379") + max_val = float("0.600191") + mean = float("0.090116") + std = float("0.243145") + data = None + + +class Program_weight_tensor_parameter_597: + name = "parameter_597" + shape = [96, 96, 1, 1] + dtype = "float32" + min_val = float("-0.620142") + max_val = float("0.645097") + mean = float("-0.0046413") + std = float("0.0578111") + data = None + + +class Program_weight_tensor_parameter_598: + name = "parameter_598" + shape = [96] + dtype = "float32" + min_val = float("-2.04879") + max_val = float("0.159057") + mean = float("-0.326665") + std = float("0.403854") + data = None + + +class Program_weight_tensor_parameter_599: + name = "parameter_599" + shape = [96] + dtype = "float32" + min_val = float("0.187591") + max_val = float("4.74473") + mean = float("0.96641") + std = float("0.764339") + data = None + + +class Program_weight_tensor_parameter_600: + name = "parameter_600" + shape = [96] + dtype = "float32" + min_val = float("0.253939") + max_val = float("83.734") + mean = float("9.82653") + std = float("15.134") + data = None + + +class Program_weight_tensor_parameter_601: + name = "parameter_601" + shape = [96] + dtype = "float32" + min_val = float("-2.26675") + max_val = float("2.16465") + mean = float("0.165962") + std = float("0.822871") + data = None + + +class Program_weight_tensor_parameter_602: + name = "parameter_602" + shape = [96, 96, 3, 3] + dtype = "float32" + min_val = float("-0.398881") + max_val = float("0.402724") + mean = float("-0.000938139") + std = float("0.0386576") + data = None + + +class Program_weight_tensor_parameter_603: + name = "parameter_603" + shape = [96] + dtype = "float32" + min_val = float("-2.37295") + max_val = float("-0.102665") + mean = float("-1.13883") + std = float("0.443407") + data = None + + +class Program_weight_tensor_parameter_604: + name = "parameter_604" + shape = [96] + dtype = "float32" + min_val = float("0.555401") + max_val = float("1.73264") + mean = float("1.03319") + std = float("0.194665") + data = None + + +class Program_weight_tensor_parameter_605: + name = "parameter_605" + shape = [96] + dtype = "float32" + min_val = float("6.24368") + max_val = float("2915.66") + mean = float("325.921") + std = float("486.218") + data = None + + +class Program_weight_tensor_parameter_606: + name = "parameter_606" + shape = [96] + dtype = "float32" + min_val = float("-5.92415") + max_val = float("7.2442") + mean = float("0.105117") + std = float("2.22391") + data = None + + +class Program_weight_tensor_parameter_607: + name = "parameter_607" + shape = [96, 96, 3, 3] + dtype = "float32" + min_val = float("-1.00528") + max_val = float("1.15887") + mean = float("-0.00140307") + std = float("0.0476841") + data = None + + +class Program_weight_tensor_parameter_608: + name = "parameter_608" + shape = [96] + dtype = "float32" + min_val = float("-1.43425") + max_val = float("0.243537") + mean = float("-0.221451") + std = float("0.290778") + data = None + + +class Program_weight_tensor_parameter_609: + name = "parameter_609" + shape = [96] + dtype = "float32" + min_val = float("-0.0458792") + max_val = float("1.91389") + mean = float("0.451652") + std = float("0.38683") + data = None + + +class Program_weight_tensor_parameter_610: + name = "parameter_610" + shape = [96] + dtype = "float32" + min_val = float("0.0117115") + max_val = float("20.7558") + mean = float("1.10912") + std = float("2.40245") + data = None + + +class Program_weight_tensor_parameter_611: + name = "parameter_611" + shape = [96] + dtype = "float32" + min_val = float("-0.430932") + max_val = float("0.428379") + mean = float("0.0421364") + std = float("0.179957") + data = None + + +class Program_weight_tensor_parameter_612: + name = "parameter_612" + shape = [96, 96, 1, 1] + dtype = "float32" + min_val = float("-0.521203") + max_val = float("0.496512") + mean = float("-0.000920286") + std = float("0.0572645") + data = None + + +class Program_weight_tensor_parameter_613: + name = "parameter_613" + shape = [96] + dtype = "float32" + min_val = float("-1.43413") + max_val = float("0.242648") + mean = float("-0.221874") + std = float("0.290667") + data = None + + +class Program_weight_tensor_parameter_614: + name = "parameter_614" + shape = [96] + dtype = "float32" + min_val = float("0.1859") + max_val = float("2.36951") + mean = float("0.838957") + std = float("0.457927") + data = None + + +class Program_weight_tensor_parameter_615: + name = "parameter_615" + shape = [96] + dtype = "float32" + min_val = float("0.659278") + max_val = float("286.866") + mean = float("25.6404") + std = float("46.992") + data = None + + +class Program_weight_tensor_parameter_616: + name = "parameter_616" + shape = [96] + dtype = "float32" + min_val = float("-2.40232") + max_val = float("1.86559") + mean = float("0.0296896") + std = float("0.734277") + data = None + + +class Program_weight_tensor_parameter_617: + name = "parameter_617" + shape = [96, 96, 3, 3] + dtype = "float32" + min_val = float("-0.404469") + max_val = float("0.565108") + mean = float("-2.12389e-05") + std = float("0.0414978") + data = None + + +class Program_weight_tensor_parameter_618: + name = "parameter_618" + shape = [96] + dtype = "float32" + min_val = float("-3.34532") + max_val = float("0.317392") + mean = float("-1.06949") + std = float("0.557347") + data = None + + +class Program_weight_tensor_parameter_619: + name = "parameter_619" + shape = [96] + dtype = "float32" + min_val = float("0.542508") + max_val = float("2.18039") + mean = float("1.13094") + std = float("0.260662") + data = None + + +class Program_weight_tensor_parameter_620: + name = "parameter_620" + shape = [96] + dtype = "float32" + min_val = float("6.24523") + max_val = float("1074.18") + mean = float("207.588") + std = float("262.618") + data = None + + +class Program_weight_tensor_parameter_621: + name = "parameter_621" + shape = [96] + dtype = "float32" + min_val = float("-3.89899") + max_val = float("7.8651") + mean = float("0.352327") + std = float("2.12024") + data = None + + +class Program_weight_tensor_parameter_622: + name = "parameter_622" + shape = [96, 96, 3, 3] + dtype = "float32" + min_val = float("-0.823878") + max_val = float("0.962464") + mean = float("0.00078916") + std = float("0.0482483") + data = None + + +class Program_weight_tensor_parameter_623: + name = "parameter_623" + shape = [96] + dtype = "float32" + min_val = float("-1.27224") + max_val = float("0.348938") + mean = float("-0.176879") + std = float("0.22796") + data = None + + +class Program_weight_tensor_parameter_624: + name = "parameter_624" + shape = [96] + dtype = "float32" + min_val = float("-0.0369533") + max_val = float("1.31647") + mean = float("0.318727") + std = float("0.214196") + data = None + + +class Program_weight_tensor_parameter_625: + name = "parameter_625" + shape = [96] + dtype = "float32" + min_val = float("0.00826663") + max_val = float("9.61473") + mean = float("0.763141") + std = float("1.29069") + data = None + + +class Program_weight_tensor_parameter_626: + name = "parameter_626" + shape = [96] + dtype = "float32" + min_val = float("-0.429425") + max_val = float("0.532455") + mean = float("0.0734063") + std = float("0.207301") + data = None + + +class Program_weight_tensor_parameter_627: + name = "parameter_627" + shape = [96, 96, 1, 1] + dtype = "float32" + min_val = float("-0.640784") + max_val = float("0.626448") + mean = float("-0.00459119") + std = float("0.0563952") + data = None + + +class Program_weight_tensor_parameter_628: + name = "parameter_628" + shape = [96] + dtype = "float32" + min_val = float("-1.27228") + max_val = float("0.351268") + mean = float("-0.177959") + std = float("0.228957") + data = None + + +class Program_weight_tensor_parameter_629: + name = "parameter_629" + shape = [96] + dtype = "float32" + min_val = float("0.279118") + max_val = float("1.68802") + mean = float("0.748112") + std = float("0.283976") + data = None + + +class Program_weight_tensor_parameter_630: + name = "parameter_630" + shape = [96] + dtype = "float32" + min_val = float("0.495236") + max_val = float("305.476") + mean = float("23.2293") + std = float("41.7657") + data = None + + +class Program_weight_tensor_parameter_631: + name = "parameter_631" + shape = [96] + dtype = "float32" + min_val = float("-2.4097") + max_val = float("2.88329") + mean = float("0.22083") + std = float("0.90357") + data = None + + +class Program_weight_tensor_parameter_632: + name = "parameter_632" + shape = [96, 96, 3, 3] + dtype = "float32" + min_val = float("-0.499947") + max_val = float("0.322677") + mean = float("-0.00179138") + std = float("0.0438781") + data = None + + +class Program_weight_tensor_parameter_633: + name = "parameter_633" + shape = [96] + dtype = "float32" + min_val = float("-3.63583") + max_val = float("0.216427") + mean = float("-1.03942") + std = float("0.568947") + data = None + + +class Program_weight_tensor_parameter_634: + name = "parameter_634" + shape = [96] + dtype = "float32" + min_val = float("0.536295") + max_val = float("2.13875") + mean = float("1.13948") + std = float("0.251778") + data = None + + +class Program_weight_tensor_parameter_635: + name = "parameter_635" + shape = [96] + dtype = "float32" + min_val = float("4.23194") + max_val = float("2150.7") + mean = float("240.647") + std = float("382.499") + data = None + + +class Program_weight_tensor_parameter_636: + name = "parameter_636" + shape = [96] + dtype = "float32" + min_val = float("-5.80433") + max_val = float("5.21501") + mean = float("-0.0774617") + std = float("2.37025") + data = None + + +class Program_weight_tensor_parameter_637: + name = "parameter_637" + shape = [96, 96, 3, 3] + dtype = "float32" + min_val = float("-0.699821") + max_val = float("0.878286") + mean = float("-0.000862026") + std = float("0.0505102") + data = None + + +class Program_weight_tensor_parameter_638: + name = "parameter_638" + shape = [96] + dtype = "float32" + min_val = float("-0.894234") + max_val = float("0.270373") + mean = float("-0.205966") + std = float("0.225605") + data = None + + +class Program_weight_tensor_parameter_639: + name = "parameter_639" + shape = [96] + dtype = "float32" + min_val = float("-0.0471566") + max_val = float("1.43967") + mean = float("0.356894") + std = float("0.244147") + data = None + + +class Program_weight_tensor_parameter_640: + name = "parameter_640" + shape = [96] + dtype = "float32" + min_val = float("0.00309543") + max_val = float("5.74781") + mean = float("0.909017") + std = float("1.17822") + data = None + + +class Program_weight_tensor_parameter_641: + name = "parameter_641" + shape = [96] + dtype = "float32" + min_val = float("-0.416857") + max_val = float("0.556843") + mean = float("0.0822026") + std = float("0.199091") + data = None + + +class Program_weight_tensor_parameter_642: + name = "parameter_642" + shape = [96, 96, 1, 1] + dtype = "float32" + min_val = float("-0.636734") + max_val = float("0.441051") + mean = float("-0.00533323") + std = float("0.0594385") + data = None + + +class Program_weight_tensor_parameter_643: + name = "parameter_643" + shape = [96] + dtype = "float32" + min_val = float("-0.895106") + max_val = float("0.285499") + mean = float("-0.205824") + std = float("0.22571") + data = None + + +class Program_weight_tensor_parameter_644: + name = "parameter_644" + shape = [96] + dtype = "float32" + min_val = float("0.102597") + max_val = float("1.82759") + mean = float("0.733685") + std = float("0.319525") + data = None + + +class Program_weight_tensor_parameter_645: + name = "parameter_645" + shape = [96] + dtype = "float32" + min_val = float("0.222644") + max_val = float("158.216") + mean = float("22.1457") + std = float("30.2752") + data = None + + +class Program_weight_tensor_parameter_646: + name = "parameter_646" + shape = [96] + dtype = "float32" + min_val = float("-2.05672") + max_val = float("1.86763") + mean = float("0.141492") + std = float("0.784561") + data = None + + +class Program_weight_tensor_parameter_647: + name = "parameter_647" + shape = [96, 96, 3, 3] + dtype = "float32" + min_val = float("-0.416337") + max_val = float("0.620195") + mean = float("-0.000740634") + std = float("0.0450492") + data = None + + +class Program_weight_tensor_parameter_648: + name = "parameter_648" + shape = [96] + dtype = "float32" + min_val = float("-2.61732") + max_val = float("-0.0206351") + mean = float("-0.990844") + std = float("0.467117") + data = None + + +class Program_weight_tensor_parameter_649: + name = "parameter_649" + shape = [96] + dtype = "float32" + min_val = float("0.475645") + max_val = float("1.78157") + mean = float("1.10744") + std = float("0.215003") + data = None + + +class Program_weight_tensor_parameter_650: + name = "parameter_650" + shape = [96] + dtype = "float32" + min_val = float("3.34611") + max_val = float("1355.62") + mean = float("137.001") + std = float("202.584") + data = None + + +class Program_weight_tensor_parameter_651: + name = "parameter_651" + shape = [96] + dtype = "float32" + min_val = float("-6.40911") + max_val = float("5.82494") + mean = float("0.15797") + std = float("2.53998") + data = None + + +class Program_weight_tensor_parameter_652: + name = "parameter_652" + shape = [96, 96, 3, 3] + dtype = "float32" + min_val = float("-0.460123") + max_val = float("0.614522") + mean = float("-0.000390355") + std = float("0.0518526") + data = None + + +class Program_weight_tensor_parameter_653: + name = "parameter_653" + shape = [96] + dtype = "float32" + min_val = float("-0.982194") + max_val = float("0.229789") + mean = float("-0.185322") + std = float("0.219885") + data = None + + +class Program_weight_tensor_parameter_654: + name = "parameter_654" + shape = [96] + dtype = "float32" + min_val = float("-0.282037") + max_val = float("1.25054") + mean = float("0.316928") + std = float("0.213298") + data = None + + +class Program_weight_tensor_parameter_655: + name = "parameter_655" + shape = [96] + dtype = "float32" + min_val = float("0.0222483") + max_val = float("18.4911") + mean = float("1.86677") + std = float("3.22647") + data = None + + +class Program_weight_tensor_parameter_656: + name = "parameter_656" + shape = [96] + dtype = "float32" + min_val = float("-0.467087") + max_val = float("0.423376") + mean = float("0.0611457") + std = float("0.208977") + data = None + + +class Program_weight_tensor_parameter_657: + name = "parameter_657" + shape = [96, 96, 1, 1] + dtype = "float32" + min_val = float("-0.427189") + max_val = float("0.852143") + mean = float("-0.00153765") + std = float("0.0685056") + data = None + + +class Program_weight_tensor_parameter_658: + name = "parameter_658" + shape = [96] + dtype = "float32" + min_val = float("-0.970182") + max_val = float("0.239637") + mean = float("-0.184384") + std = float("0.219045") + data = None + + +class Program_weight_tensor_parameter_659: + name = "parameter_659" + shape = [96] + dtype = "float32" + min_val = float("0.253524") + max_val = float("1.70301") + mean = float("0.65924") + std = float("0.231204") + data = None + + +class Program_weight_tensor_parameter_660: + name = "parameter_660" + shape = [96] + dtype = "float32" + min_val = float("1.15441") + max_val = float("1075.07") + mean = float("57.2359") + std = float("127.646") + data = None + + +class Program_weight_tensor_parameter_661: + name = "parameter_661" + shape = [96] + dtype = "float32" + min_val = float("-3.29423") + max_val = float("2.0501") + mean = float("-0.00340411") + std = float("1.01608") + data = None + + +class Program_weight_tensor_parameter_662: + name = "parameter_662" + shape = [96, 96, 3, 3] + dtype = "float32" + min_val = float("-0.787543") + max_val = float("0.35957") + mean = float("0.00104606") + std = float("0.0479502") + data = None + + +class Program_weight_tensor_parameter_663: + name = "parameter_663" + shape = [96] + dtype = "float32" + min_val = float("-3.55844") + max_val = float("0.140159") + mean = float("-0.971782") + std = float("0.538303") + data = None + + +class Program_weight_tensor_parameter_664: + name = "parameter_664" + shape = [96] + dtype = "float32" + min_val = float("0.609623") + max_val = float("2.32706") + mean = float("1.16435") + std = float("0.226121") + data = None + + +class Program_weight_tensor_parameter_665: + name = "parameter_665" + shape = [96] + dtype = "float32" + min_val = float("3.3782") + max_val = float("483.508") + mean = float("80.7722") + std = float("108.303") + data = None + + +class Program_weight_tensor_parameter_666: + name = "parameter_666" + shape = [96] + dtype = "float32" + min_val = float("-7.23203") + max_val = float("8.44751") + mean = float("-0.264363") + std = float("3.16954") + data = None + + +class Program_weight_tensor_parameter_667: + name = "parameter_667" + shape = [96, 96, 3, 3] + dtype = "float32" + min_val = float("-0.500573") + max_val = float("0.510454") + mean = float("-0.000457783") + std = float("0.0550219") + data = None + + +class Program_weight_tensor_parameter_668: + name = "parameter_668" + shape = [96] + dtype = "float32" + min_val = float("-0.740562") + max_val = float("0.275522") + mean = float("-0.137098") + std = float("0.240038") + data = None + + +class Program_weight_tensor_parameter_669: + name = "parameter_669" + shape = [96] + dtype = "float32" + min_val = float("-0.0642621") + max_val = float("0.900569") + mean = float("0.317589") + std = float("0.184519") + data = None + + +class Program_weight_tensor_parameter_670: + name = "parameter_670" + shape = [96] + dtype = "float32" + min_val = float("0.013838") + max_val = float("73.0926") + mean = float("2.66058") + std = float("7.86075") + data = None + + +class Program_weight_tensor_parameter_671: + name = "parameter_671" + shape = [96] + dtype = "float32" + min_val = float("-0.540133") + max_val = float("0.71052") + mean = float("0.051634") + std = float("0.237114") + data = None + + +class Program_weight_tensor_parameter_672: + name = "parameter_672" + shape = [96, 96, 1, 1] + dtype = "float32" + min_val = float("-0.58299") + max_val = float("0.513708") + mean = float("-0.0024741") + std = float("0.0809066") + data = None + + +class Program_weight_tensor_parameter_673: + name = "parameter_673" + shape = [96] + dtype = "float32" + min_val = float("-0.76242") + max_val = float("0.286095") + mean = float("-0.133455") + std = float("0.241416") + data = None + + +class Program_weight_tensor_parameter_674: + name = "parameter_674" + shape = [96] + dtype = "float32" + min_val = float("-0.0769798") + max_val = float("1.90189") + mean = float("0.638415") + std = float("0.302378") + data = None + + +class Program_weight_tensor_parameter_675: + name = "parameter_675" + shape = [96] + dtype = "float32" + min_val = float("1.19415") + max_val = float("720.339") + mean = float("47.8804") + std = float("104.915") + data = None + + +class Program_weight_tensor_parameter_676: + name = "parameter_676" + shape = [96] + dtype = "float32" + min_val = float("-2.17885") + max_val = float("2.29826") + mean = float("0.133152") + std = float("0.804261") + data = None + + +class Program_weight_tensor_parameter_677: + name = "parameter_677" + shape = [96, 96, 3, 3] + dtype = "float32" + min_val = float("-0.604363") + max_val = float("0.541052") + mean = float("-0.000537909") + std = float("0.0503947") + data = None + + +class Program_weight_tensor_parameter_678: + name = "parameter_678" + shape = [96] + dtype = "float32" + min_val = float("-2.40858") + max_val = float("0.654719") + mean = float("-0.81931") + std = float("0.457021") + data = None + + +class Program_weight_tensor_parameter_679: + name = "parameter_679" + shape = [96] + dtype = "float32" + min_val = float("0.733019") + max_val = float("1.99197") + mean = float("1.32325") + std = float("0.2") + data = None + + +class Program_weight_tensor_parameter_680: + name = "parameter_680" + shape = [96] + dtype = "float32" + min_val = float("1.96847") + max_val = float("7347.78") + mean = float("172.368") + std = float("763.087") + data = None + + +class Program_weight_tensor_parameter_681: + name = "parameter_681" + shape = [96] + dtype = "float32" + min_val = float("-28.9588") + max_val = float("15.2612") + mean = float("-0.934713") + std = float("5.31562") + data = None + + +class Program_weight_tensor_parameter_682: + name = "parameter_682" + shape = [96, 96, 3, 3] + dtype = "float32" + min_val = float("-0.622791") + max_val = float("0.610964") + mean = float("-0.00335927") + std = float("0.066342") + data = None + + +class Program_weight_tensor_parameter_683: + name = "parameter_683" + shape = [96] + dtype = "float32" + min_val = float("-3.43012") + max_val = float("1.69945") + mean = float("0.43019") + std = float("0.798988") + data = None + + +class Program_weight_tensor_parameter_684: + name = "parameter_684" + shape = [96] + dtype = "float32" + min_val = float("0.0693542") + max_val = float("2.40273") + mean = float("0.791134") + std = float("0.36104") + data = None + + +class Program_weight_tensor_parameter_685: + name = "parameter_685" + shape = [96] + dtype = "float32" + min_val = float("0.730734") + max_val = float("239.296") + mean = float("19.6016") + std = float("29.7552") + data = None + + +class Program_weight_tensor_parameter_686: + name = "parameter_686" + shape = [96] + dtype = "float32" + min_val = float("-1.65449") + max_val = float("3.46413") + mean = float("0.270252") + std = float("0.882987") + data = None + + +class Program_weight_tensor_parameter_687: + name = "parameter_687" + shape = [96, 192, 1, 1] + dtype = "float32" + min_val = float("-1.11466") + max_val = float("1.0935") + mean = float("0.00791812") + std = float("0.120039") + data = None + + +class Program_weight_tensor_parameter_688: + name = "parameter_688" + shape = [96] + dtype = "float32" + min_val = float("-5.00342") + max_val = float("1.18185") + mean = float("0.273499") + std = float("1.0312") + data = None + + +class Program_weight_tensor_parameter_689: + name = "parameter_689" + shape = [96] + dtype = "float32" + min_val = float("0.548102") + max_val = float("6.30616") + mean = float("1.70901") + std = float("1.10806") + data = None + + +class Program_weight_tensor_parameter_690: + name = "parameter_690" + shape = [96] + dtype = "float32" + min_val = float("0.401452") + max_val = float("79.4102") + mean = float("5.61265") + std = float("9.14938") + data = None + + +class Program_weight_tensor_parameter_691: + name = "parameter_691" + shape = [96] + dtype = "float32" + min_val = float("-1.28944") + max_val = float("2.11406") + mean = float("0.358334") + std = float("0.782604") + data = None + + +class Program_weight_tensor_parameter_692: + name = "parameter_692" + shape = [96, 192, 1, 1] + dtype = "float32" + min_val = float("-0.983543") + max_val = float("1.00941") + mean = float("0.00142594") + std = float("0.123025") + data = None + + +class Program_weight_tensor_parameter_693: + name = "parameter_693" + shape = [192] + dtype = "float32" + min_val = float("-2.24516") + max_val = float("1.60394") + mean = float("-0.141221") + std = float("0.694769") + data = None + + +class Program_weight_tensor_parameter_694: + name = "parameter_694" + shape = [192] + dtype = "float32" + min_val = float("0.492261") + max_val = float("2.54274") + mean = float("1.18118") + std = float("0.30233") + data = None + + +class Program_weight_tensor_parameter_695: + name = "parameter_695" + shape = [192] + dtype = "float32" + min_val = float("2.22564") + max_val = float("2930.74") + mean = float("78.117") + std = float("233.601") + data = None + + +class Program_weight_tensor_parameter_696: + name = "parameter_696" + shape = [192] + dtype = "float32" + min_val = float("-2.97997") + max_val = float("3.93014") + mean = float("-0.103294") + std = float("1.05165") + data = None + + +class Program_weight_tensor_parameter_697: + name = "parameter_697" + shape = [192, 128, 3, 3] + dtype = "float32" + min_val = float("-0.775601") + max_val = float("0.807253") + mean = float("-0.000981649") + std = float("0.0587625") + data = None + + +class Program_weight_tensor_parameter_698: + name = "parameter_698" + shape = [128] + dtype = "float32" + min_val = float("-2.76475") + max_val = float("1.75245") + mean = float("-0.639799") + std = float("0.618682") + data = None + + +class Program_weight_tensor_parameter_699: + name = "parameter_699" + shape = [128] + dtype = "float32" + min_val = float("0.201337") + max_val = float("2.13478") + mean = float("1.1577") + std = float("0.303696") + data = None + + +class Program_weight_tensor_parameter_700: + name = "parameter_700" + shape = [128] + dtype = "float32" + min_val = float("0.330187") + max_val = float("310.77") + mean = float("20.1263") + std = float("40.3503") + data = None + + +class Program_weight_tensor_parameter_701: + name = "parameter_701" + shape = [128] + dtype = "float32" + min_val = float("-6.76625") + max_val = float("4.04947") + mean = float("0.254725") + std = float("1.90791") + data = None + + +class Program_weight_tensor_parameter_702: + name = "parameter_702" + shape = [128, 96, 1, 1] + dtype = "float32" + min_val = float("-3.1203") + max_val = float("1.57468") + mean = float("0.00464949") + std = float("0.188413") + data = None + + +class Program_weight_tensor_parameter_703: + name = "parameter_703" + shape = [96] + dtype = "float32" + min_val = float("-0.691001") + max_val = float("1.12635") + mean = float("0.284098") + std = float("0.383834") + data = None + + +class Program_weight_tensor_parameter_704: + name = "parameter_704" + shape = [96, 96, 1, 1] + dtype = "float32" + min_val = float("-2.16618") + max_val = float("1.93596") + mean = float("0.00101948") + std = float("0.130652") + data = None + + +class Program_weight_tensor_parameter_705: + name = "parameter_705" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_706: + name = "parameter_706" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_707: + name = "parameter_707" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_708: + name = "parameter_708" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_709: + name = "parameter_709" + shape = [48, 48, 1, 1] + dtype = "float32" + min_val = float("-1.3342") + max_val = float("0.643177") + mean = float("-0.014466") + std = float("0.129446") + data = None + + +class Program_weight_tensor_parameter_710: + name = "parameter_710" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_711: + name = "parameter_711" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_712: + name = "parameter_712" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_713: + name = "parameter_713" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_714: + name = "parameter_714" + shape = [48, 48, 3, 3] + dtype = "float32" + min_val = float("-0.904055") + max_val = float("0.582988") + mean = float("-0.00744752") + std = float("0.0843891") + data = None + + +class Program_weight_tensor_parameter_715: + name = "parameter_715" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_716: + name = "parameter_716" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_717: + name = "parameter_717" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_718: + name = "parameter_718" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_719: + name = "parameter_719" + shape = [48, 48, 3, 3] + dtype = "float32" + min_val = float("-0.983279") + max_val = float("0.728112") + mean = float("-0.00725756") + std = float("0.097145") + data = None + + +class Program_weight_tensor_parameter_720: + name = "parameter_720" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_721: + name = "parameter_721" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_722: + name = "parameter_722" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_723: + name = "parameter_723" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_724: + name = "parameter_724" + shape = [48, 48, 1, 1] + dtype = "float32" + min_val = float("-2.42689") + max_val = float("1.36788") + mean = float("-0.00454079") + std = float("0.158896") + data = None + + +class Program_weight_tensor_parameter_725: + name = "parameter_725" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_726: + name = "parameter_726" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_727: + name = "parameter_727" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_728: + name = "parameter_728" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_729: + name = "parameter_729" + shape = [48, 48, 3, 3] + dtype = "float32" + min_val = float("-0.577534") + max_val = float("0.814785") + mean = float("-0.00399821") + std = float("0.0871958") + data = None + + +class Program_weight_tensor_parameter_730: + name = "parameter_730" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_731: + name = "parameter_731" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_732: + name = "parameter_732" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_733: + name = "parameter_733" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_734: + name = "parameter_734" + shape = [48, 48, 3, 3] + dtype = "float32" + min_val = float("-1.26447") + max_val = float("0.800345") + mean = float("-0.00266239") + std = float("0.110801") + data = None + + +class Program_weight_tensor_parameter_735: + name = "parameter_735" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_736: + name = "parameter_736" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_737: + name = "parameter_737" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_738: + name = "parameter_738" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_739: + name = "parameter_739" + shape = [48, 48, 1, 1] + dtype = "float32" + min_val = float("-0.865236") + max_val = float("0.707685") + mean = float("-0.0149688") + std = float("0.157189") + data = None + + +class Program_weight_tensor_parameter_740: + name = "parameter_740" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_741: + name = "parameter_741" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_742: + name = "parameter_742" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_743: + name = "parameter_743" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_744: + name = "parameter_744" + shape = [48, 48, 3, 3] + dtype = "float32" + min_val = float("-0.911688") + max_val = float("0.559965") + mean = float("-0.00539753") + std = float("0.0989298") + data = None + + +class Program_weight_tensor_parameter_745: + name = "parameter_745" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_746: + name = "parameter_746" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_747: + name = "parameter_747" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_748: + name = "parameter_748" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_749: + name = "parameter_749" + shape = [48, 48, 3, 3] + dtype = "float32" + min_val = float("-0.710898") + max_val = float("0.776086") + mean = float("-0.000182997") + std = float("0.109296") + data = None + + +class Program_weight_tensor_parameter_750: + name = "parameter_750" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_751: + name = "parameter_751" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_752: + name = "parameter_752" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_753: + name = "parameter_753" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_754: + name = "parameter_754" + shape = [48, 96, 1, 1] + dtype = "float32" + min_val = float("-1.46152") + max_val = float("1.10767") + mean = float("-0.00941781") + std = float("0.208816") + data = None + + +class Program_weight_tensor_parameter_755: + name = "parameter_755" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_756: + name = "parameter_756" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_757: + name = "parameter_757" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_758: + name = "parameter_758" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_759: + name = "parameter_759" + shape = [48, 96, 1, 1] + dtype = "float32" + min_val = float("-0.999123") + max_val = float("0.927899") + mean = float("0.00295053") + std = float("0.176646") + data = None + + +class Program_weight_tensor_parameter_760: + name = "parameter_760" + shape = [96] + dtype = "float32" + min_val = float("-3.73791") + max_val = float("3.22588") + mean = float("0.182662") + std = float("1.15497") + data = None + + +class Program_weight_tensor_parameter_761: + name = "parameter_761" + shape = [96] + dtype = "float32" + min_val = float("0.626809") + max_val = float("4.85717") + mean = float("1.99097") + std = float("0.757279") + data = None + + +class Program_weight_tensor_parameter_762: + name = "parameter_762" + shape = [96] + dtype = "float32" + min_val = float("29.91") + max_val = float("24728.4") + mean = float("2140.07") + std = float("3468.62") + data = None + + +class Program_weight_tensor_parameter_763: + name = "parameter_763" + shape = [96] + dtype = "float32" + min_val = float("-42.3248") + max_val = float("48.192") + mean = float("-3.1277") + std = float("18.4928") + data = None + + +class Program_weight_tensor_parameter_764: + name = "parameter_764" + shape = [96, 64, 3, 3] + dtype = "float32" + min_val = float("-0.931582") + max_val = float("0.766496") + mean = float("-0.00253308") + std = float("0.10077") + data = None + + +class Program_weight_tensor_parameter_765: + name = "parameter_765" + shape = [64] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_766: + name = "parameter_766" + shape = [64] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_767: + name = "parameter_767" + shape = [64] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_768: + name = "parameter_768" + shape = [64] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_769: + name = "parameter_769" + shape = [64, 32, 3, 3] + dtype = "float32" + min_val = float("-1.3745") + max_val = float("1.24561") + mean = float("0.00185304") + std = float("0.161557") + data = None + + +class Program_weight_tensor_parameter_770: + name = "parameter_770" + shape = [32] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_771: + name = "parameter_771" + shape = [32] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_772: + name = "parameter_772" + shape = [32] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_773: + name = "parameter_773" + shape = [32] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_774: + name = "parameter_774" + shape = [32, 32, 3, 3] + dtype = "float32" + min_val = float("-2.09102") + max_val = float("1.70396") + mean = float("-0.00633192") + std = float("0.212712") + data = None + + +class Program_weight_tensor_parameter_775: + name = "parameter_775" + shape = [32] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_776: + name = "parameter_776" + shape = [32] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_777: + name = "parameter_777" + shape = [32] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_778: + name = "parameter_778" + shape = [32] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_779: + name = "parameter_779" + shape = [32, 3, 3, 3] + dtype = "float32" + min_val = float("-2.56815") + max_val = float("2.51032") + mean = float("-0.0226906") + std = float("0.583319") + data = None diff --git a/paddle_samples/PaddleX/PP-YOLOE-R-L/subgraph_1/graph_hash.txt b/paddle_samples/PaddleX/PP-YOLOE-R-L/subgraph_1/graph_hash.txt new file mode 100644 index 000000000..316d9bf6a --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE-R-L/subgraph_1/graph_hash.txt @@ -0,0 +1 @@ +a7d47f7ed75ec9b55a42cf1c60b5fc2030e2639e9672ffbc87bea34164440ec7 \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE-R-L/subgraph_1/graph_net.json b/paddle_samples/PaddleX/PP-YOLOE-R-L/subgraph_1/graph_net.json new file mode 100644 index 000000000..93527f12f --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE-R-L/subgraph_1/graph_net.json @@ -0,0 +1,6 @@ +{ + "framework": "paddle", + "model_name": "PP-YOLOE-R-L", + "num_devices_required": 1, + "num_nodes_required": 1 +} \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE-R-L/subgraph_1/input_meta.py b/paddle_samples/PaddleX/PP-YOLOE-R-L/subgraph_1/input_meta.py new file mode 100644 index 000000000..bd7febe3b --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE-R-L/subgraph_1/input_meta.py @@ -0,0 +1,74 @@ +class Program_weight_tensor_data_0: + name = "data_0" + shape = [] + dtype = "int64" + data = [3] + + +class Program_weight_tensor_data_1: + name = "data_1" + shape = [1, 3, 21504] + dtype = "float32" + max_val = float("0.207679") + mean = float("0.00012728") + std = float("0.00194434") + data = None + + +class Program_weight_tensor_data_2: + name = "data_2" + shape = [1, 21504, 15] + dtype = "float32" + min_val = float("8.02509e-07") + max_val = float("0.996306") + mean = float("0.0118541") + std = float("0.0209358") + data = None + + +class Program_weight_tensor_data_3: + name = "data_3" + shape = [1, 3, 1] + dtype = "int32" + data = [1, 1, 1] + + +class Program_weight_tensor_data_4: + name = "data_4" + shape = [1, 21504, 2] + dtype = "float32" + min_val = float("4.0") + max_val = float("1020.0") + mean = float("512.0") + std = float("295.583") + data = None + + +class Program_weight_tensor_data_5: + name = "data_5" + shape = [1, 3, 5] + dtype = "float32" + data = [ + 248.448, + 614.298, + 137.72, + 133.965, + 1.06359, + 602.824, + 672.606, + 103.848, + 95.0988, + 1.06909, + 498.844, + 472.744, + 142.145, + 125.935, + 1.05772, + ] + + +class Program_weight_tensor_data_6: + name = "data_6" + shape = [1, 3, 1] + dtype = "float32" + data = [1.0, 1.0, 1.0] diff --git a/paddle_samples/PaddleX/PP-YOLOE-R-L/subgraph_1/model.py b/paddle_samples/PaddleX/PP-YOLOE-R-L/subgraph_1/model.py new file mode 100644 index 000000000..7d1254226 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE-R-L/subgraph_1/model.py @@ -0,0 +1,484 @@ +import paddle + + +class GraphModule(paddle.nn.Layer): + def __init__(self): + super().__init__() + + def forward(self, data_0, data_1, data_2, data_3, data_4, data_5, data_6): + # pd_op.full: (xf32) <- () + full_0 = paddle._C_ops.full( + [], float("1"), paddle.float32, paddle.framework._current_expected_place() + ) + + # pd_op.greater_than: (1x-1x21504xb) <- (1x-1x21504xf32, xf32) + greater_than_1 = paddle._C_ops.greater_than(data_1, full_0) + del full_0 + + # pd_op.full: (1xf32) <- () + full_1 = paddle._C_ops.full( + [1], float("0"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.full_like: (1x-1x21504xf32) <- (1x-1x21504xf32, 1xf32) + full_like_0 = paddle._C_ops.full_like( + data_1, full_1, paddle.float32, paddle.framework._current_expected_place() + ) + del full_1 + + # pd_op.where: (1x-1x21504xf32) <- (1x-1x21504xb, 1x-1x21504xf32, 1x-1x21504xf32) + where_0 = paddle._C_ops.where(greater_than_1, full_like_0, data_1) + del data_1, full_like_0, greater_than_1 + + # pd_op.transpose: (1x15x21504xf32) <- (1x21504x15xf32) + transpose_0 = paddle._C_ops.transpose(data_2, [0, 2, 1]) + del data_2 + + # pd_op.full: (1xf64) <- () + full_2 = paddle._C_ops.full( + [1], float("0"), paddle.float64, paddle.core.CPUPlace() + ) + + # pd_op.full: (1xf64) <- () + full_3 = paddle._C_ops.full( + [1], float("1"), paddle.float64, paddle.core.CPUPlace() + ) + + # pd_op.arange: (1xi32) <- (1xf64, 1xf64, 1xf64) + arange_0 = paddle.arange(full_2, full_3, full_3, dtype="int32") + del full_2, full_3 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_0 = [-1] + + # pd_op.unsqueeze: (1x1xi32) <- (1xi32, 1xi64) + unsqueeze_0 = paddle._C_ops.unsqueeze(arange_0, full_int_array_0) + del arange_0 + + # pd_op.full: (xi64) <- () + full_4 = paddle._C_ops.full( + [], float("1"), paddle.int64, paddle.core.CPUPlace() + ) + + # builtin.combine: ([xi64, xi64]) <- (xi64, xi64) + combine_0 = [full_4, data_0] + del data_0, full_4 + + # pd_op.stack: (2xi64) <- ([xi64, xi64]) + stack_0 = paddle._C_ops.stack(combine_0, 0) + del combine_0 + + # pd_op.tile: (1x-1xi32) <- (1x1xi32, 2xi64) + tile_0 = paddle._C_ops.tile(unsqueeze_0, stack_0) + del stack_0 + + # pd_op.squeeze: (1x-1xi32) <- (1x-1x1xi32, 1xi64) + squeeze_0 = paddle._C_ops.squeeze(data_3, full_int_array_0) + del data_3 + + # builtin.combine: ([1x-1xi32, 1x-1xi32]) <- (1x-1xi32, 1x-1xi32) + combine_1 = [tile_0, squeeze_0] + del squeeze_0, tile_0 + + # pd_op.stack: (1x-1x2xi32) <- ([1x-1xi32, 1x-1xi32]) + stack_1 = paddle._C_ops.stack(combine_1, -1) + del combine_1 + + # pd_op.gather_nd: (1x-1x21504xf32) <- (1x15x21504xf32, 1x-1x2xi32) + gather_nd_0 = paddle._C_ops.gather_nd(transpose_0, stack_1) + del stack_1, transpose_0 + + # pd_op.pow: (1x-1x21504xf32) <- (1x-1x21504xf32) + pow_0 = paddle._C_ops.pow(gather_nd_0, float("1")) + del gather_nd_0 + + # pd_op.pow: (1x-1x21504xf32) <- (1x-1x21504xf32) + pow_1 = paddle._C_ops.pow(where_0, float("6")) + + # pd_op.multiply: (1x-1x21504xf32) <- (1x-1x21504xf32, 1x-1x21504xf32) + multiply_0 = paddle._C_ops.multiply(pow_0, pow_1) + del pow_0, pow_1 + + # pd_op.full: (1xi32) <- () + full_5 = paddle._C_ops.full( + [1], float("2"), paddle.int32, paddle.core.CPUPlace() + ) + + # pd_op.split_with_num: ([1x-1x1xf32, 1x-1x1xf32, 1x-1x1xf32, 1x-1x1xf32, 1x-1x1xf32]) <- (1x-1x5xf32, 1xi32) + split_with_num_0 = paddle._C_ops.split_with_num(data_5, 5, full_5) + del data_5 + + # builtin.split: (1x-1x1xf32, 1x-1x1xf32, 1x-1x1xf32, 1x-1x1xf32, 1x-1x1xf32) <- ([1x-1x1xf32, 1x-1x1xf32, 1x-1x1xf32, 1x-1x1xf32, 1x-1x1xf32]) + ( + split_0, + split_1, + split_2, + split_3, + split_4, + ) = split_with_num_0 + del split_with_num_0 + + # pd_op.full: (4xf32) <- () + full_6 = paddle._C_ops.full( + [4], float("0"), paddle.float32, paddle.framework._current_expected_place() + ) + + # pd_op.assign_value_: (4xf32) <- (4xf32) + assign_value__0 = paddle._C_ops.assign_value_( + full_6, + [4], + paddle.float32, + [float("0.5"), float("0.5"), float("-0.5"), float("-0.5")], + paddle.framework._current_expected_place(), + ) + del full_6 + + # pd_op.full_int_array: (3xi64) <- () + full_int_array_1 = [1, 1, 4] + + # pd_op.reshape: (1x1x4xf32) <- (4xf32, 3xi64) + reshape_0 = paddle._C_ops.reshape(assign_value__0, full_int_array_1) + del assign_value__0 + + # pd_op.multiply: (1x-1x4xf32) <- (1x1x4xf32, 1x-1x1xf32) + multiply_1 = paddle._C_ops.multiply(reshape_0, split_2) + del reshape_0, split_2 + + # pd_op.full: (4xf32) <- () + full_7 = paddle._C_ops.full( + [4], float("0"), paddle.float32, paddle.framework._current_expected_place() + ) + + # pd_op.assign_value_: (4xf32) <- (4xf32) + assign_value__1 = paddle._C_ops.assign_value_( + full_7, + [4], + paddle.float32, + [float("-0.5"), float("0.5"), float("0.5"), float("-0.5")], + paddle.framework._current_expected_place(), + ) + del full_7 + + # pd_op.reshape: (1x1x4xf32) <- (4xf32, 3xi64) + reshape_1 = paddle._C_ops.reshape(assign_value__1, full_int_array_1) + del assign_value__1, full_int_array_1 + + # pd_op.multiply: (1x-1x4xf32) <- (1x1x4xf32, 1x-1x1xf32) + multiply_2 = paddle._C_ops.multiply(reshape_1, split_3) + del reshape_1, split_3 + + # builtin.combine: ([1x-1x4xf32, 1x-1x4xf32]) <- (1x-1x4xf32, 1x-1x4xf32) + combine_2 = [multiply_1, multiply_2] + del multiply_1, multiply_2 + + # pd_op.stack: (1x-1x4x2xf32) <- ([1x-1x4xf32, 1x-1x4xf32]) + stack_2 = paddle._C_ops.stack(combine_2, -1) + del combine_2 + + # pd_op.sin: (1x-1x1xf32) <- (1x-1x1xf32) + sin_0 = paddle._C_ops.sin(split_4) + + # pd_op.cos: (1x-1x1xf32) <- (1x-1x1xf32) + cos_0 = paddle._C_ops.cos(split_4) + del split_4 + + # pd_op.full: (1xi32) <- () + full_8 = paddle._C_ops.full( + [1], float("-1"), paddle.int32, paddle.core.CPUPlace() + ) + + # builtin.combine: ([1x-1x1xf32, 1x-1x1xf32]) <- (1x-1x1xf32, 1x-1x1xf32) + combine_3 = [cos_0, sin_0] + + # pd_op.concat: (1x-1x2xf32) <- ([1x-1x1xf32, 1x-1x1xf32], 1xi32) + concat_0 = paddle._C_ops.concat(combine_3, full_8) + del combine_3 + + # pd_op.full: (1xf32) <- () + full_9 = paddle._C_ops.full( + [1], float("-1"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.scale: (1x-1x1xf32) <- (1x-1x1xf32, 1xf32) + scale_0 = paddle._C_ops.scale(sin_0, full_9, float("0"), True) + del full_9, sin_0 + + # builtin.combine: ([1x-1x1xf32, 1x-1x1xf32]) <- (1x-1x1xf32, 1x-1x1xf32) + combine_4 = [scale_0, cos_0] + del cos_0, scale_0 + + # pd_op.concat: (1x-1x2xf32) <- ([1x-1x1xf32, 1x-1x1xf32], 1xi32) + concat_1 = paddle._C_ops.concat(combine_4, full_8) + del combine_4, full_8 + + # builtin.combine: ([1x-1x2xf32, 1x-1x2xf32]) <- (1x-1x2xf32, 1x-1x2xf32) + combine_5 = [concat_0, concat_1] + del concat_0, concat_1 + + # pd_op.stack: (1x-1x2x2xf32) <- ([1x-1x2xf32, 1x-1x2xf32]) + stack_3 = paddle._C_ops.stack(combine_5, -2) + del combine_5 + + # pd_op.full_int_array: (3xi64) <- () + full_int_array_2 = [-1, 4, 2] + + # pd_op.reshape: (-1x4x2xf32) <- (1x-1x4x2xf32, 3xi64) + reshape_2 = paddle._C_ops.reshape(stack_2, full_int_array_2) + del full_int_array_2, stack_2 + + # pd_op.full_int_array: (3xi64) <- () + full_int_array_3 = [-1, 2, 2] + + # pd_op.reshape: (-1x2x2xf32) <- (1x-1x2x2xf32, 3xi64) + reshape_3 = paddle._C_ops.reshape(stack_3, full_int_array_3) + del full_int_array_3, stack_3 + + # pd_op.bmm: (-1x4x2xf32) <- (-1x4x2xf32, -1x2x2xf32) + bmm_0 = paddle._C_ops.bmm(reshape_2, reshape_3) + del reshape_2, reshape_3 + + # pd_op.full_int_array: (4xi64) <- () + full_int_array_4 = [1, -1, 4, 2] + + # pd_op.reshape: (1x-1x4x2xf32) <- (-1x4x2xf32, 4xi64) + reshape_4 = paddle._C_ops.reshape(bmm_0, full_int_array_4) + del bmm_0, full_int_array_4 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_5 = [0] + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_6 = [1] + + # pd_op.slice: (1x-1x4xf32) <- (1x-1x4x2xf32, 1xi64, 1xi64) + slice_0 = paddle._C_ops.slice( + reshape_4, [3], full_int_array_5, full_int_array_6, [1], [3] + ) + + # pd_op.add: (1x-1x4xf32) <- (1x-1x4xf32, 1x-1x1xf32) + add_0 = paddle._C_ops.add(slice_0, split_0) + del slice_0, split_0 + + # pd_op.set_value_with_tensor_: (1x-1x4x2xf32) <- (1x-1x4x2xf32, 1x-1x4xf32, 1xi64, 1xi64, 1xi64) + set_value_with_tensor__0 = paddle._C_ops.set_value_with_tensor_( + reshape_4, + add_0, + full_int_array_5, + full_int_array_6, + full_int_array_6, + [3], + [3], + [], + ) + del add_0, reshape_4 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_7 = [2] + + # pd_op.slice: (1x-1x4xf32) <- (1x-1x4x2xf32, 1xi64, 1xi64) + slice_1 = paddle._C_ops.slice( + set_value_with_tensor__0, [3], full_int_array_6, full_int_array_7, [1], [3] + ) + + # pd_op.add: (1x-1x4xf32) <- (1x-1x4xf32, 1x-1x1xf32) + add_1 = paddle._C_ops.add(slice_1, split_1) + del slice_1, split_1 + + # pd_op.set_value_with_tensor_: (1x-1x4x2xf32) <- (1x-1x4x2xf32, 1x-1x4xf32, 1xi64, 1xi64, 1xi64) + set_value_with_tensor__1 = paddle._C_ops.set_value_with_tensor_( + set_value_with_tensor__0, + add_1, + full_int_array_6, + full_int_array_7, + full_int_array_6, + [3], + [3], + [], + ) + del add_1, set_value_with_tensor__0 + + # pd_op.unsqueeze: (1x1x21504x2xf32) <- (1x21504x2xf32, 1xi64) + unsqueeze_1 = paddle._C_ops.unsqueeze(data_4, full_int_array_5) + del data_4, full_int_array_5 + + # pd_op.split_with_num: ([1x-1x1x2xf32, 1x-1x1x2xf32, 1x-1x1x2xf32, 1x-1x1x2xf32]) <- (1x-1x4x2xf32, 1xi32) + split_with_num_1 = paddle._C_ops.split_with_num( + set_value_with_tensor__1, 4, full_5 + ) + del full_5 + + # builtin.split: (1x-1x1x2xf32, 1x-1x1x2xf32, 1x-1x1x2xf32, 1x-1x1x2xf32) <- ([1x-1x1x2xf32, 1x-1x1x2xf32, 1x-1x1x2xf32, 1x-1x1x2xf32]) + ( + split_5, + split_6, + split_7, + split_8, + ) = split_with_num_1 + del split_with_num_1 + + # pd_op.subtract: (1x-1x1x2xf32) <- (1x-1x1x2xf32, 1x-1x1x2xf32) + subtract_0 = paddle._C_ops.subtract(split_6, split_5) + del split_6 + + # pd_op.subtract: (1x-1x1x2xf32) <- (1x-1x1x2xf32, 1x-1x1x2xf32) + subtract_1 = paddle._C_ops.subtract(split_8, split_5) + del split_8 + + # pd_op.subtract: (1x-1x21504x2xf32) <- (1x1x21504x2xf32, 1x-1x1x2xf32) + subtract_2 = paddle._C_ops.subtract(unsqueeze_1, split_5) + del split_5, unsqueeze_1 + + # pd_op.multiply: (1x-1x1x2xf32) <- (1x-1x1x2xf32, 1x-1x1x2xf32) + multiply_3 = paddle._C_ops.multiply(subtract_0, subtract_0) + + # pd_op.sum: (1x-1x1xf32) <- (1x-1x1x2xf32, 1xi64) + sum_0 = paddle._C_ops.sum(multiply_3, full_int_array_0, None, False) + del multiply_3 + + # pd_op.multiply: (1x-1x1x2xf32) <- (1x-1x1x2xf32, 1x-1x1x2xf32) + multiply_4 = paddle._C_ops.multiply(subtract_1, subtract_1) + + # pd_op.sum: (1x-1x1xf32) <- (1x-1x1x2xf32, 1xi64) + sum_1 = paddle._C_ops.sum(multiply_4, full_int_array_0, None, False) + del multiply_4 + + # pd_op.multiply: (1x-1x21504x2xf32) <- (1x-1x21504x2xf32, 1x-1x1x2xf32) + multiply_5 = paddle._C_ops.multiply(subtract_2, subtract_0) + del subtract_0 + + # pd_op.sum: (1x-1x21504xf32) <- (1x-1x21504x2xf32, 1xi64) + sum_2 = paddle._C_ops.sum(multiply_5, full_int_array_0, None, False) + del multiply_5 + + # pd_op.multiply: (1x-1x21504x2xf32) <- (1x-1x21504x2xf32, 1x-1x1x2xf32) + multiply_6 = paddle._C_ops.multiply(subtract_2, subtract_1) + del subtract_1, subtract_2 + + # pd_op.sum: (1x-1x21504xf32) <- (1x-1x21504x2xf32, 1xi64) + sum_3 = paddle._C_ops.sum(multiply_6, full_int_array_0, None, False) + del full_int_array_0, multiply_6 + + # pd_op.full: (xf32) <- () + full_10 = paddle._C_ops.full( + [], float("0"), paddle.float32, paddle.framework._current_expected_place() + ) + + # pd_op.greater_equal: (1x-1x21504xb) <- (1x-1x21504xf32, xf32) + greater_equal_0 = paddle._C_ops.greater_equal(sum_2, full_10) + + # pd_op.less_equal: (1x-1x21504xb) <- (1x-1x21504xf32, 1x-1x1xf32) + less_equal_0 = paddle._C_ops.less_equal(sum_2, sum_0) + del sum_0, sum_2 + + # pd_op.bitwise_and: (1x-1x21504xb) <- (1x-1x21504xb, 1x-1x21504xb) + bitwise_and_0 = paddle._C_ops.bitwise_and(greater_equal_0, less_equal_0) + del greater_equal_0, less_equal_0 + + # pd_op.greater_equal: (1x-1x21504xb) <- (1x-1x21504xf32, xf32) + greater_equal_1 = paddle._C_ops.greater_equal(sum_3, full_10) + del full_10 + + # pd_op.bitwise_and: (1x-1x21504xb) <- (1x-1x21504xb, 1x-1x21504xb) + bitwise_and_1 = paddle._C_ops.bitwise_and(bitwise_and_0, greater_equal_1) + del bitwise_and_0, greater_equal_1 + + # pd_op.less_equal: (1x-1x21504xb) <- (1x-1x21504xf32, 1x-1x1xf32) + less_equal_1 = paddle._C_ops.less_equal(sum_3, sum_1) + del sum_1, sum_3 + + # pd_op.bitwise_and: (1x-1x21504xb) <- (1x-1x21504xb, 1x-1x21504xb) + bitwise_and_2 = paddle._C_ops.bitwise_and(bitwise_and_1, less_equal_1) + del bitwise_and_1, less_equal_1 + + # pd_op.cast: (1x-1x21504xf32) <- (1x-1x21504xb) + cast_0 = paddle._C_ops.cast(bitwise_and_2, paddle.float32) + + # pd_op.multiply: (1x-1x21504xf32) <- (1x-1x21504xf32, 1x-1x21504xf32) + multiply_7 = paddle._C_ops.multiply(multiply_0, cast_0) + del cast_0 + + # pd_op.shape64: (3xi64) <- (1x-1x21504xf32) + shape64_0 = paddle._C_ops.shape64(multiply_7) + + # pd_op.slice: (xi64) <- (3xi64, 1xi64, 1xi64) + slice_2 = paddle._C_ops.slice( + shape64_0, [0], full_int_array_6, full_int_array_7, [1], [0] + ) + del full_int_array_6, full_int_array_7, shape64_0 + + # pd_op.full: (1xi32) <- () + full_11 = paddle._C_ops.full( + [1], float("13"), paddle.int32, paddle.core.CPUPlace() + ) + + # pd_op.topk: (1x-1x13xf32, 1x-1x13xi64) <- (1x-1x21504xf32, 1xi32) + topk_0, topk_1 = (lambda x, f: f(x))( + paddle._C_ops.topk(multiply_7, full_11, -1, True, True), + lambda out: out if isinstance(out, (list, tuple)) else (out, None), + ) + del full_11, multiply_7 + + # pd_op.full: (1xi32) <- () + full_12 = paddle._C_ops.full( + [1], float("21504"), paddle.int32, paddle.core.CPUPlace() + ) + + # pd_op.one_hot: (1x-1x13x21504xf32) <- (1x-1x13xi64, 1xi32) + one_hot_0 = paddle._C_ops.one_hot( + topk_1 % paddle.cast(full_12, topk_1.dtype), full_12 + ) + del full_12, topk_1 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_8 = [-2] + + # pd_op.sum: (1x-1x21504xf32) <- (1x-1x13x21504xf32, 1xi64) + sum_4 = paddle._C_ops.sum(one_hot_0, full_int_array_8, None, False) + del one_hot_0 + + # pd_op.multiply: (1x-1x21504xf32) <- (1x-1x21504xf32, 1x-1x1xf32) + multiply_8 = paddle._C_ops.multiply(sum_4, data_6) + del sum_4 + + # pd_op.cast: (1x-1x21504xf32) <- (1x-1x21504xb) + cast_1 = paddle._C_ops.cast(bitwise_and_2, paddle.float32) + del bitwise_and_2 + + # pd_op.multiply: (1x-1x21504xf32) <- (1x-1x21504xf32, 1x-1x21504xf32) + multiply_9 = paddle._C_ops.multiply(multiply_8, cast_1) + del cast_1, multiply_8 + + # pd_op.multiply: (1x-1x21504xf32) <- (1x-1x21504xf32, 1x-1x1xf32) + multiply_10 = paddle._C_ops.multiply(multiply_9, data_6) + del data_6, multiply_9 + + # pd_op.sum: (1x21504xf32) <- (1x-1x21504xf32, 1xi64) + sum_5 = paddle._C_ops.sum(multiply_10, full_int_array_8, None, False) + del full_int_array_8 + + # pd_op.full_int_array: (0xi64) <- () + full_int_array_9 = [] + + # pd_op.max: (xf32) <- (1x21504xf32, 0xi64) + max_0 = paddle._C_ops.max(sum_5, full_int_array_9, False) + del full_int_array_9 + + # pd_op.full: (xf32) <- () + full_13 = paddle._C_ops.full( + [], float("1"), paddle.float32, paddle.framework._current_expected_place() + ) + + # pd_op.greater_than: (xb) <- (xf32, xf32) + greater_than_0 = paddle._C_ops.greater_than(max_0, full_13) + del ( + full_13, + max_0, + multiply_0, + multiply_10, + set_value_with_tensor__1, + sum_5, + unsqueeze_0, + where_0, + ) + + return greater_than_0 diff --git a/paddle_samples/PaddleX/PP-YOLOE-R-L/subgraph_1/weight_meta.py b/paddle_samples/PaddleX/PP-YOLOE-R-L/subgraph_1/weight_meta.py new file mode 100644 index 000000000..8b1378917 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE-R-L/subgraph_1/weight_meta.py @@ -0,0 +1 @@ + diff --git a/paddle_samples/PaddleX/PP-YOLOE-R-L/subgraph_10/graph_hash.txt b/paddle_samples/PaddleX/PP-YOLOE-R-L/subgraph_10/graph_hash.txt new file mode 100644 index 000000000..68e54c8f3 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE-R-L/subgraph_10/graph_hash.txt @@ -0,0 +1 @@ +a1c982d6209f1af82607ac615f8d65901a61675e46badb73a97a8a6613374fbc \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE-R-L/subgraph_10/graph_net.json b/paddle_samples/PaddleX/PP-YOLOE-R-L/subgraph_10/graph_net.json new file mode 100644 index 000000000..93527f12f --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE-R-L/subgraph_10/graph_net.json @@ -0,0 +1,6 @@ +{ + "framework": "paddle", + "model_name": "PP-YOLOE-R-L", + "num_devices_required": 1, + "num_nodes_required": 1 +} \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE-R-L/subgraph_10/input_meta.py b/paddle_samples/PaddleX/PP-YOLOE-R-L/subgraph_10/input_meta.py new file mode 100644 index 000000000..1e1760e64 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE-R-L/subgraph_10/input_meta.py @@ -0,0 +1,16 @@ +class Program_weight_tensor_data_0: + name = "data_0" + shape = [6, 21504, 5] + dtype = "float32" + min_val = float("-63958.4") + max_val = float("98465.1") + mean = float("301.803") + std = float("1547.43") + data = None + + +class Program_weight_tensor_data_1: + name = "data_1" + shape = [6, 2] + dtype = "float32" + data = [1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0] diff --git a/paddle_samples/PaddleX/PP-YOLOE-R-L/subgraph_10/model.py b/paddle_samples/PaddleX/PP-YOLOE-R-L/subgraph_10/model.py new file mode 100644 index 000000000..ad5093869 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE-R-L/subgraph_10/model.py @@ -0,0 +1,180 @@ +import paddle + + +class GraphModule(paddle.nn.Layer): + def __init__(self): + super().__init__() + + def forward(self, data_0, data_1): + # pd_op.full: (1xi32) <- () + full_0 = paddle._C_ops.full( + [1], float("2"), paddle.int32, paddle.core.CPUPlace() + ) + + # pd_op.split_with_num: ([6x21504x1xf32, 6x21504x1xf32, 6x21504x1xf32, 6x21504x1xf32, 6x21504x1xf32]) <- (6x21504x5xf32, 1xi32) + split_with_num_0 = paddle._C_ops.split_with_num(data_0, 5, full_0) + del data_0, full_0 + + # builtin.split: (6x21504x1xf32, 6x21504x1xf32, 6x21504x1xf32, 6x21504x1xf32, 6x21504x1xf32) <- ([6x21504x1xf32, 6x21504x1xf32, 6x21504x1xf32, 6x21504x1xf32, 6x21504x1xf32]) + ( + split_0, + split_1, + split_2, + split_3, + split_4, + ) = split_with_num_0 + del split_with_num_0 + + # pd_op.cos: (6x21504x1xf32) <- (6x21504x1xf32) + cos_0 = paddle._C_ops.cos(split_4) + + # pd_op.full: (1xf32) <- () + full_1 = paddle._C_ops.full( + [1], float("0.5"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.scale: (6x21504x1xf32) <- (6x21504x1xf32, 1xf32) + scale_0 = paddle._C_ops.scale(cos_0, full_1, float("0"), True) + del cos_0 + + # pd_op.sin: (6x21504x1xf32) <- (6x21504x1xf32) + sin_0 = paddle._C_ops.sin(split_4) + del split_4 + + # pd_op.scale: (6x21504x1xf32) <- (6x21504x1xf32, 1xf32) + scale_1 = paddle._C_ops.scale(sin_0, full_1, float("0"), True) + del full_1, sin_0 + + # pd_op.multiply: (6x21504x1xf32) <- (6x21504x1xf32, 6x21504x1xf32) + multiply_0 = paddle._C_ops.multiply(scale_0, split_2) + + # pd_op.multiply: (6x21504x1xf32) <- (6x21504x1xf32, 6x21504x1xf32) + multiply_1 = paddle._C_ops.multiply(scale_1, split_2) + del split_2 + + # pd_op.full: (1xf32) <- () + full_2 = paddle._C_ops.full( + [1], float("-1"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.scale: (6x21504x1xf32) <- (6x21504x1xf32, 1xf32) + scale_2 = paddle._C_ops.scale(scale_1, full_2, float("0"), True) + del full_2, scale_1 + + # pd_op.multiply: (6x21504x1xf32) <- (6x21504x1xf32, 6x21504x1xf32) + multiply_2 = paddle._C_ops.multiply(scale_2, split_3) + del scale_2 + + # pd_op.multiply: (6x21504x1xf32) <- (6x21504x1xf32, 6x21504x1xf32) + multiply_3 = paddle._C_ops.multiply(scale_0, split_3) + del scale_0, split_3 + + # pd_op.add: (6x21504x1xf32) <- (6x21504x1xf32, 6x21504x1xf32) + add_0 = paddle._C_ops.add(split_0, multiply_0) + + # pd_op.add: (6x21504x1xf32) <- (6x21504x1xf32, 6x21504x1xf32) + add_1 = paddle._C_ops.add(add_0, multiply_2) + + # pd_op.add: (6x21504x1xf32) <- (6x21504x1xf32, 6x21504x1xf32) + add_2 = paddle._C_ops.add(split_1, multiply_1) + + # pd_op.add: (6x21504x1xf32) <- (6x21504x1xf32, 6x21504x1xf32) + add_3 = paddle._C_ops.add(add_2, multiply_3) + + # pd_op.subtract: (6x21504x1xf32) <- (6x21504x1xf32, 6x21504x1xf32) + subtract_0 = paddle._C_ops.subtract(split_0, multiply_0) + del multiply_0, split_0 + + # pd_op.add: (6x21504x1xf32) <- (6x21504x1xf32, 6x21504x1xf32) + add_4 = paddle._C_ops.add(subtract_0, multiply_2) + + # pd_op.subtract: (6x21504x1xf32) <- (6x21504x1xf32, 6x21504x1xf32) + subtract_1 = paddle._C_ops.subtract(split_1, multiply_1) + del multiply_1, split_1 + + # pd_op.add: (6x21504x1xf32) <- (6x21504x1xf32, 6x21504x1xf32) + add_5 = paddle._C_ops.add(subtract_1, multiply_3) + + # pd_op.subtract: (6x21504x1xf32) <- (6x21504x1xf32, 6x21504x1xf32) + subtract_2 = paddle._C_ops.subtract(subtract_0, multiply_2) + del subtract_0 + + # pd_op.subtract: (6x21504x1xf32) <- (6x21504x1xf32, 6x21504x1xf32) + subtract_3 = paddle._C_ops.subtract(subtract_1, multiply_3) + del subtract_1 + + # pd_op.subtract: (6x21504x1xf32) <- (6x21504x1xf32, 6x21504x1xf32) + subtract_4 = paddle._C_ops.subtract(add_0, multiply_2) + del add_0, multiply_2 + + # pd_op.subtract: (6x21504x1xf32) <- (6x21504x1xf32, 6x21504x1xf32) + subtract_5 = paddle._C_ops.subtract(add_2, multiply_3) + del add_2, multiply_3 + + # pd_op.full: (1xi32) <- () + full_3 = paddle._C_ops.full( + [1], float("-1"), paddle.int32, paddle.core.CPUPlace() + ) + + # builtin.combine: ([6x21504x1xf32, 6x21504x1xf32, 6x21504x1xf32, 6x21504x1xf32, 6x21504x1xf32, 6x21504x1xf32, 6x21504x1xf32, 6x21504x1xf32]) <- (6x21504x1xf32, 6x21504x1xf32, 6x21504x1xf32, 6x21504x1xf32, 6x21504x1xf32, 6x21504x1xf32, 6x21504x1xf32, 6x21504x1xf32) + combine_0 = [ + add_1, + add_3, + add_4, + add_5, + subtract_2, + subtract_3, + subtract_4, + subtract_5, + ] + del add_1, add_3, add_4, add_5, subtract_2, subtract_3, subtract_4, subtract_5 + + # pd_op.concat: (6x21504x8xf32) <- ([6x21504x1xf32, 6x21504x1xf32, 6x21504x1xf32, 6x21504x1xf32, 6x21504x1xf32, 6x21504x1xf32, 6x21504x1xf32, 6x21504x1xf32], 1xi32) + concat_0 = paddle._C_ops.concat(combine_0, full_3) + del combine_0 + + # pd_op.full: (1xi32) <- () + full_4 = paddle._C_ops.full( + [1], float("1"), paddle.int32, paddle.core.CPUPlace() + ) + + # pd_op.split_with_num: ([6x1xf32, 6x1xf32]) <- (6x2xf32, 1xi32) + split_with_num_1 = paddle._C_ops.split_with_num(data_1, 2, full_4) + del data_1, full_4 + + # builtin.split: (6x1xf32, 6x1xf32) <- ([6x1xf32, 6x1xf32]) + ( + split_5, + split_6, + ) = split_with_num_1 + del split_with_num_1 + + # builtin.combine: ([6x1xf32, 6x1xf32, 6x1xf32, 6x1xf32, 6x1xf32, 6x1xf32, 6x1xf32, 6x1xf32]) <- (6x1xf32, 6x1xf32, 6x1xf32, 6x1xf32, 6x1xf32, 6x1xf32, 6x1xf32, 6x1xf32) + combine_1 = [ + split_6, + split_5, + split_6, + split_5, + split_6, + split_5, + split_6, + split_5, + ] + del split_5, split_6 + + # pd_op.concat: (6x8xf32) <- ([6x1xf32, 6x1xf32, 6x1xf32, 6x1xf32, 6x1xf32, 6x1xf32, 6x1xf32, 6x1xf32], 1xi32) + concat_1 = paddle._C_ops.concat(combine_1, full_3) + del combine_1, full_3 + + # pd_op.full_int_array: (3xi64) <- () + full_int_array_0 = [-1, 1, 8] + + # pd_op.reshape: (6x1x8xf32) <- (6x8xf32, 3xi64) + reshape_0 = paddle._C_ops.reshape(concat_1, full_int_array_0) + del concat_1, full_int_array_0 + + # pd_op.divide: (6x21504x8xf32) <- (6x21504x8xf32, 6x1x8xf32) + divide_0 = paddle._C_ops.divide(concat_0, reshape_0) + del concat_0, reshape_0 + + return divide_0 diff --git a/paddle_samples/PaddleX/PP-YOLOE-R-L/subgraph_10/weight_meta.py b/paddle_samples/PaddleX/PP-YOLOE-R-L/subgraph_10/weight_meta.py new file mode 100644 index 000000000..8b1378917 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE-R-L/subgraph_10/weight_meta.py @@ -0,0 +1 @@ + diff --git a/paddle_samples/PaddleX/PP-YOLOE-R-L/subgraph_11/graph_hash.txt b/paddle_samples/PaddleX/PP-YOLOE-R-L/subgraph_11/graph_hash.txt new file mode 100644 index 000000000..5ad04b22b --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE-R-L/subgraph_11/graph_hash.txt @@ -0,0 +1 @@ +fc6c1013a1c537ed9069d967e1ede3864e15bc7a5dd09d50bf07295e0ee1d545 \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE-R-L/subgraph_11/graph_net.json b/paddle_samples/PaddleX/PP-YOLOE-R-L/subgraph_11/graph_net.json new file mode 100644 index 000000000..93527f12f --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE-R-L/subgraph_11/graph_net.json @@ -0,0 +1,6 @@ +{ + "framework": "paddle", + "model_name": "PP-YOLOE-R-L", + "num_devices_required": 1, + "num_nodes_required": 1 +} \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE-R-L/subgraph_11/input_meta.py b/paddle_samples/PaddleX/PP-YOLOE-R-L/subgraph_11/input_meta.py new file mode 100644 index 000000000..47a8e449f --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE-R-L/subgraph_11/input_meta.py @@ -0,0 +1,63 @@ +class Program_weight_tensor_data_0: + name = "data_0" + shape = [1, 21504, 15] + dtype = "float32" + min_val = float("4.06804e-06") + max_val = float("0.992795") + mean = float("0.0118279") + std = float("0.0190866") + data = None + + +class Program_weight_tensor_data_1: + name = "data_1" + shape = [1, 21504, 4] + dtype = "float32" + min_val = float("-5.79205") + max_val = float("3.41272") + mean = float("-0.0335645") + std = float("0.348286") + data = None + + +class Program_weight_tensor_data_2: + name = "data_2" + shape = [1, 21504, 91] + dtype = "float32" + min_val = float("1.0") + max_val = float("10.0") + mean = float("1.0989") + std = float("0.938258") + data = None + + +class Program_weight_tensor_data_3: + name = "data_3" + shape = [1, 21504, 2] + dtype = "float32" + min_val = float("4.0") + max_val = float("1020.0") + mean = float("512.0") + std = float("295.583") + data = None + + +class Program_weight_tensor_data_4: + name = "data_4" + shape = [1, 21504, 1] + dtype = "float32" + min_val = float("8.0") + max_val = float("32.0") + mean = float("10.6667") + std = float("5.70157") + data = None + + +class Program_weight_tensor_data_5: + name = "data_5" + shape = [91] + dtype = "float32" + max_val = float("1.5708") + mean = float("0.785398") + std = float("0.458461") + data = None diff --git a/paddle_samples/PaddleX/PP-YOLOE-R-L/subgraph_11/model.py b/paddle_samples/PaddleX/PP-YOLOE-R-L/subgraph_11/model.py new file mode 100644 index 000000000..805ee21d7 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE-R-L/subgraph_11/model.py @@ -0,0 +1,96 @@ +import paddle + + +class GraphModule(paddle.nn.Layer): + def __init__(self): + super().__init__() + + def forward(self, data_0, data_1, data_2, data_3, data_4, data_5): + # pd_op.full: (1xi32) <- () + full_0 = paddle._C_ops.full( + [1], float("2"), paddle.int32, paddle.core.CPUPlace() + ) + + # pd_op.split_with_num: ([1x21504x2xf32, 1x21504x2xf32]) <- (1x21504x4xf32, 1xi32) + split_with_num_0 = paddle._C_ops.split_with_num(data_1, 2, full_0) + del data_1 + + # builtin.split: (1x21504x2xf32, 1x21504x2xf32) <- ([1x21504x2xf32, 1x21504x2xf32]) + ( + split_0, + split_1, + ) = split_with_num_0 + del split_with_num_0 + + # pd_op.multiply: (1x21504x2xf32) <- (1x21504x2xf32, 1x21504x1xf32) + multiply_0 = paddle._C_ops.multiply(split_0, data_4) + + # pd_op.add: (1x21504x2xf32) <- (1x21504x2xf32, 1x21504x2xf32) + add_0 = paddle._C_ops.add(multiply_0, data_3) + del data_3 + + # pd_op.elu: (1x21504x2xf32) <- (1x21504x2xf32) + elu_0 = paddle._C_ops.elu(split_1, float("1")) + + # pd_op.full: (1xf32) <- () + full_1 = paddle._C_ops.full( + [1], float("1"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.scale: (1x21504x2xf32) <- (1x21504x2xf32, 1xf32) + scale_0 = paddle._C_ops.scale(elu_0, full_1, float("1"), True) + + # pd_op.multiply: (1x21504x2xf32) <- (1x21504x2xf32, 1x21504x1xf32) + multiply_1 = paddle._C_ops.multiply(scale_0, data_4) + del data_4 + + # pd_op.full_int_array: (4xi64) <- () + full_int_array_0 = [1, 21504, 1, 91] + + # pd_op.reshape: (1x21504x1x91xf32) <- (1x21504x91xf32, 4xi64) + reshape_0 = paddle._C_ops.reshape(data_2, full_int_array_0) + del data_2, full_int_array_0 + + # pd_op.softmax: (1x21504x1x91xf32) <- (1x21504x1x91xf32) + softmax_0 = paddle._C_ops.softmax(reshape_0, -1) + del reshape_0 + + # pd_op.matmul: (1x21504x1xf32) <- (1x21504x1x91xf32, 91xf32) + matmul_0 = paddle._C_ops.matmul(softmax_0, data_5, False, False) + del data_5 + + # pd_op.full: (1xi32) <- () + full_2 = paddle._C_ops.full( + [1], float("-1"), paddle.int32, paddle.core.CPUPlace() + ) + + # builtin.combine: ([1x21504x2xf32, 1x21504x2xf32, 1x21504x1xf32]) <- (1x21504x2xf32, 1x21504x2xf32, 1x21504x1xf32) + combine_0 = [add_0, multiply_1, matmul_0] + + # pd_op.concat: (1x21504x5xf32) <- ([1x21504x2xf32, 1x21504x2xf32, 1x21504x1xf32], 1xi32) + concat_0 = paddle._C_ops.concat(combine_0, full_2) + del combine_0 + + # pd_op.share_data_: (1x21504x15xf32) <- (1x21504x15xf32) + share_data__0 = data_0.detach() + del data_0 + + # pd_op.share_data_: (1x21504x5xf32) <- (1x21504x5xf32) + share_data__1 = concat_0.detach() + del ( + add_0, + concat_0, + elu_0, + full_0, + full_1, + full_2, + matmul_0, + multiply_0, + multiply_1, + scale_0, + softmax_0, + split_0, + split_1, + ) + + return share_data__0, share_data__1 diff --git a/paddle_samples/PaddleX/PP-YOLOE-R-L/subgraph_11/weight_meta.py b/paddle_samples/PaddleX/PP-YOLOE-R-L/subgraph_11/weight_meta.py new file mode 100644 index 000000000..8b1378917 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE-R-L/subgraph_11/weight_meta.py @@ -0,0 +1 @@ + diff --git a/paddle_samples/PaddleX/PP-YOLOE-R-L/subgraph_12/graph_hash.txt b/paddle_samples/PaddleX/PP-YOLOE-R-L/subgraph_12/graph_hash.txt new file mode 100644 index 000000000..4d4ae9229 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE-R-L/subgraph_12/graph_hash.txt @@ -0,0 +1 @@ +abb57645d2484441c98fdd426775c0779eb8f171f1d589f9f8c3a56db2cc1026 \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE-R-L/subgraph_12/graph_net.json b/paddle_samples/PaddleX/PP-YOLOE-R-L/subgraph_12/graph_net.json new file mode 100644 index 000000000..93527f12f --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE-R-L/subgraph_12/graph_net.json @@ -0,0 +1,6 @@ +{ + "framework": "paddle", + "model_name": "PP-YOLOE-R-L", + "num_devices_required": 1, + "num_nodes_required": 1 +} \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE-R-L/subgraph_12/input_meta.py b/paddle_samples/PaddleX/PP-YOLOE-R-L/subgraph_12/input_meta.py new file mode 100644 index 000000000..475fca721 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE-R-L/subgraph_12/input_meta.py @@ -0,0 +1,31 @@ +class Program_weight_tensor_data_0: + name = "data_0" + shape = [1, 768, 32, 32] + dtype = "float32" + min_val = float("-0.278465") + max_val = float("11.8559") + mean = float("0.165505") + std = float("0.617142") + data = None + + +class Program_weight_tensor_data_1: + name = "data_1" + shape = [1, 384, 64, 64] + dtype = "float32" + min_val = float("-0.278465") + max_val = float("14.8137") + mean = float("0.141144") + std = float("0.643106") + data = None + + +class Program_weight_tensor_data_2: + name = "data_2" + shape = [1, 192, 128, 128] + dtype = "float32" + min_val = float("-0.278465") + max_val = float("14.7666") + mean = float("0.151417") + std = float("0.633179") + data = None diff --git a/paddle_samples/PaddleX/PP-YOLOE-R-L/subgraph_12/model.py b/paddle_samples/PaddleX/PP-YOLOE-R-L/subgraph_12/model.py new file mode 100644 index 000000000..4c03df97c --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE-R-L/subgraph_12/model.py @@ -0,0 +1,1292 @@ +import paddle + + +class GraphModule(paddle.nn.Layer): + def __init__(self): + super().__init__() + + def forward( + self, + parameter_0, + parameter_1, + parameter_2, + parameter_3, + parameter_4, + parameter_5, + parameter_6, + parameter_7, + parameter_8, + parameter_9, + parameter_10, + parameter_11, + parameter_12, + parameter_13, + parameter_14, + parameter_15, + parameter_16, + parameter_17, + parameter_18, + parameter_19, + parameter_20, + parameter_21, + parameter_22, + parameter_23, + parameter_24, + parameter_25, + parameter_26, + parameter_27, + parameter_28, + parameter_29, + parameter_30, + parameter_31, + parameter_32, + parameter_33, + parameter_34, + parameter_35, + parameter_36, + parameter_37, + parameter_38, + parameter_39, + parameter_40, + parameter_41, + parameter_42, + parameter_43, + parameter_44, + parameter_45, + parameter_46, + parameter_47, + parameter_48, + parameter_49, + parameter_50, + parameter_51, + parameter_52, + parameter_53, + parameter_54, + parameter_55, + parameter_56, + parameter_57, + parameter_58, + parameter_59, + parameter_60, + parameter_61, + parameter_62, + parameter_63, + parameter_64, + parameter_65, + parameter_66, + parameter_67, + parameter_68, + parameter_69, + parameter_70, + parameter_71, + parameter_72, + parameter_73, + parameter_74, + parameter_75, + parameter_76, + parameter_77, + parameter_78, + parameter_79, + parameter_80, + data_0, + data_1, + data_2, + ): + # pd_op.full: (1xf64) <- () + full_0 = paddle._C_ops.full( + [1], float("0"), paddle.float64, paddle.core.CPUPlace() + ) + + # pd_op.full: (1xf64) <- () + full_1 = paddle._C_ops.full( + [1], float("32"), paddle.float64, paddle.core.CPUPlace() + ) + + # pd_op.full: (1xf64) <- () + full_2 = paddle._C_ops.full( + [1], float("1"), paddle.float64, paddle.core.CPUPlace() + ) + + # pd_op.arange: (32xi64) <- (1xf64, 1xf64, 1xf64) + arange_0 = paddle.arange(full_0, full_1, full_2, dtype="int64") + del full_1 + + # pd_op.cast: (32xf32) <- (32xi64) + cast_0 = paddle._C_ops.cast(arange_0, paddle.float32) + del arange_0 + + # pd_op.full: (1xf32) <- () + full_3 = paddle._C_ops.full( + [1], float("1"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.scale: (32xf32) <- (32xf32, 1xf32) + scale_0 = paddle._C_ops.scale(cast_0, full_3, float("0.5"), True) + del cast_0 + + # pd_op.full: (1xf32) <- () + full_4 = paddle._C_ops.full( + [1], float("32"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.scale: (32xf32) <- (32xf32, 1xf32) + scale_1 = paddle._C_ops.scale(scale_0, full_4, float("0"), True) + del full_4, scale_0 + + # builtin.combine: ([32xf32, 32xf32]) <- (32xf32, 32xf32) + combine_0 = [scale_1, scale_1] + del scale_1 + + # pd_op.meshgrid: ([32x32xf32, 32x32xf32]) <- ([32xf32, 32xf32]) + meshgrid_0 = paddle._C_ops.meshgrid(combine_0) + del combine_0 + + # builtin.split: (32x32xf32, 32x32xf32) <- ([32x32xf32, 32x32xf32]) + ( + split_0, + split_1, + ) = meshgrid_0 + del meshgrid_0 + + # builtin.combine: ([32x32xf32, 32x32xf32]) <- (32x32xf32, 32x32xf32) + combine_1 = [split_1, split_0] + del split_0, split_1 + + # pd_op.stack: (32x32x2xf32) <- ([32x32xf32, 32x32xf32]) + stack_0 = paddle._C_ops.stack(combine_1, -1) + del combine_1 + + # pd_op.cast: (32x32x2xf32) <- (32x32x2xf32) + cast_1 = paddle._C_ops.cast(stack_0, paddle.float32) + del stack_0 + + # pd_op.full_int_array: (3xi64) <- () + full_int_array_0 = [1, -1, 2] + + # pd_op.reshape: (1x1024x2xf32) <- (32x32x2xf32, 3xi64) + reshape_0 = paddle._C_ops.reshape(cast_1, full_int_array_0) + del cast_1 + + # pd_op.full: (1x1024x1xf32) <- () + full_5 = paddle._C_ops.full( + [1, 1024, 1], + float("32"), + paddle.float32, + paddle.framework._current_expected_place(), + ) + + # pd_op.full: (1xf64) <- () + full_6 = paddle._C_ops.full( + [1], float("64"), paddle.float64, paddle.core.CPUPlace() + ) + + # pd_op.arange: (64xi64) <- (1xf64, 1xf64, 1xf64) + arange_1 = paddle.arange(full_0, full_6, full_2, dtype="int64") + del full_6 + + # pd_op.cast: (64xf32) <- (64xi64) + cast_2 = paddle._C_ops.cast(arange_1, paddle.float32) + del arange_1 + + # pd_op.scale: (64xf32) <- (64xf32, 1xf32) + scale_2 = paddle._C_ops.scale(cast_2, full_3, float("0.5"), True) + del cast_2 + + # pd_op.full: (1xf32) <- () + full_7 = paddle._C_ops.full( + [1], float("16"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.scale: (64xf32) <- (64xf32, 1xf32) + scale_3 = paddle._C_ops.scale(scale_2, full_7, float("0"), True) + del full_7, scale_2 + + # builtin.combine: ([64xf32, 64xf32]) <- (64xf32, 64xf32) + combine_2 = [scale_3, scale_3] + del scale_3 + + # pd_op.meshgrid: ([64x64xf32, 64x64xf32]) <- ([64xf32, 64xf32]) + meshgrid_1 = paddle._C_ops.meshgrid(combine_2) + del combine_2 + + # builtin.split: (64x64xf32, 64x64xf32) <- ([64x64xf32, 64x64xf32]) + ( + split_2, + split_3, + ) = meshgrid_1 + del meshgrid_1 + + # builtin.combine: ([64x64xf32, 64x64xf32]) <- (64x64xf32, 64x64xf32) + combine_3 = [split_3, split_2] + del split_2, split_3 + + # pd_op.stack: (64x64x2xf32) <- ([64x64xf32, 64x64xf32]) + stack_1 = paddle._C_ops.stack(combine_3, -1) + del combine_3 + + # pd_op.cast: (64x64x2xf32) <- (64x64x2xf32) + cast_3 = paddle._C_ops.cast(stack_1, paddle.float32) + del stack_1 + + # pd_op.reshape: (1x4096x2xf32) <- (64x64x2xf32, 3xi64) + reshape_1 = paddle._C_ops.reshape(cast_3, full_int_array_0) + del cast_3 + + # pd_op.full: (1x4096x1xf32) <- () + full_8 = paddle._C_ops.full( + [1, 4096, 1], + float("16"), + paddle.float32, + paddle.framework._current_expected_place(), + ) + + # pd_op.full: (1xf64) <- () + full_9 = paddle._C_ops.full( + [1], float("128"), paddle.float64, paddle.core.CPUPlace() + ) + + # pd_op.arange: (128xi64) <- (1xf64, 1xf64, 1xf64) + arange_2 = paddle.arange(full_0, full_9, full_2, dtype="int64") + del full_0, full_2, full_9 + + # pd_op.cast: (128xf32) <- (128xi64) + cast_4 = paddle._C_ops.cast(arange_2, paddle.float32) + del arange_2 + + # pd_op.scale: (128xf32) <- (128xf32, 1xf32) + scale_4 = paddle._C_ops.scale(cast_4, full_3, float("0.5"), True) + del cast_4, full_3 + + # pd_op.full: (1xf32) <- () + full_10 = paddle._C_ops.full( + [1], float("8"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.scale: (128xf32) <- (128xf32, 1xf32) + scale_5 = paddle._C_ops.scale(scale_4, full_10, float("0"), True) + del full_10, scale_4 + + # builtin.combine: ([128xf32, 128xf32]) <- (128xf32, 128xf32) + combine_4 = [scale_5, scale_5] + del scale_5 + + # pd_op.meshgrid: ([128x128xf32, 128x128xf32]) <- ([128xf32, 128xf32]) + meshgrid_2 = paddle._C_ops.meshgrid(combine_4) + del combine_4 + + # builtin.split: (128x128xf32, 128x128xf32) <- ([128x128xf32, 128x128xf32]) + ( + split_4, + split_5, + ) = meshgrid_2 + del meshgrid_2 + + # builtin.combine: ([128x128xf32, 128x128xf32]) <- (128x128xf32, 128x128xf32) + combine_5 = [split_5, split_4] + del split_4, split_5 + + # pd_op.stack: (128x128x2xf32) <- ([128x128xf32, 128x128xf32]) + stack_2 = paddle._C_ops.stack(combine_5, -1) + del combine_5 + + # pd_op.cast: (128x128x2xf32) <- (128x128x2xf32) + cast_5 = paddle._C_ops.cast(stack_2, paddle.float32) + del stack_2 + + # pd_op.reshape: (1x16384x2xf32) <- (128x128x2xf32, 3xi64) + reshape_2 = paddle._C_ops.reshape(cast_5, full_int_array_0) + del cast_5, full_int_array_0 + + # pd_op.full: (1x16384x1xf32) <- () + full_11 = paddle._C_ops.full( + [1, 16384, 1], + float("8"), + paddle.float32, + paddle.framework._current_expected_place(), + ) + + # pd_op.full: (1xi32) <- () + full_12 = paddle._C_ops.full( + [1], float("1"), paddle.int32, paddle.core.CPUPlace() + ) + + # pd_op.assign: (1xi32) <- (1xi32) + assign_0 = full_12 + + # pd_op.assign: (1xi32) <- (1xi32) + assign_1 = full_12 + + # pd_op.assign: (1xi32) <- (1xi32) + assign_2 = full_12 + + # builtin.combine: ([1x1024x2xf32, 1x4096x2xf32, 1x16384x2xf32]) <- (1x1024x2xf32, 1x4096x2xf32, 1x16384x2xf32) + combine_6 = [reshape_0, reshape_1, reshape_2] + del reshape_0, reshape_1, reshape_2 + + # pd_op.concat: (1x21504x2xf32) <- ([1x1024x2xf32, 1x4096x2xf32, 1x16384x2xf32], 1xi32) + concat_3 = paddle._C_ops.concat(combine_6, full_12) + del combine_6 + + # builtin.combine: ([1x1024x1xf32, 1x4096x1xf32, 1x16384x1xf32]) <- (1x1024x1xf32, 1x4096x1xf32, 1x16384x1xf32) + combine_7 = [full_5, full_8, full_11] + del full_11, full_5, full_8 + + # pd_op.concat: (1x21504x1xf32) <- ([1x1024x1xf32, 1x4096x1xf32, 1x16384x1xf32], 1xi32) + concat_4 = paddle._C_ops.concat(combine_7, full_12) + del combine_7 + + # pd_op.full_int_array: (2xi64) <- () + full_int_array_1 = [1, 1] + + # pd_op.assign: (2xi64) <- (2xi64) + assign_3 = full_int_array_1 + + # pd_op.assign: (2xi64) <- (2xi64) + assign_4 = full_int_array_1 + + # pd_op.pool2d: (1x768x1x1xf32) <- (1x768x32x32xf32, 2xi64) + pool2d_0 = paddle._C_ops.pool2d( + data_0, + full_int_array_1, + [1, 1], + [0, 0], + False, + True, + "NCHW", + "avg", + False, + True, + "EXPLICIT", + ) + + # pd_op.conv2d: (1x768x1x1xf32) <- (1x768x1x1xf32, 768x768x1x1xf32) + conv2d_0 = paddle._C_ops.conv2d( + pool2d_0, parameter_80, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_80 + + # pd_op.full_int_array: (4xi64) <- () + full_int_array_2 = [1, -1, 1, 1] + + # pd_op.reshape: (1x768x1x1xf32) <- (768xf32, 4xi64) + reshape_3 = paddle._C_ops.reshape(parameter_79, full_int_array_2) + del parameter_79 + + # pd_op.add: (1x768x1x1xf32) <- (1x768x1x1xf32, 1x768x1x1xf32) + add_0 = paddle._C_ops.add(conv2d_0, reshape_3) + + # pd_op.sigmoid: (1x768x1x1xf32) <- (1x768x1x1xf32) + sigmoid_0 = paddle._C_ops.sigmoid(add_0) + del add_0 + + # pd_op.multiply: (1x768x32x32xf32) <- (1x768x32x32xf32, 1x768x1x1xf32) + multiply_0 = paddle._C_ops.multiply(data_0, sigmoid_0) + + # pd_op.conv2d: (1x768x32x32xf32) <- (1x768x32x32xf32, 768x768x1x1xf32) + conv2d_1 = paddle._C_ops.conv2d( + multiply_0, parameter_78, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_78 + + # pd_op.batch_norm_: (1x768x32x32xf32, 768xf32, 768xf32, 768xf32, 768xf32, -1xui8) <- (1x768x32x32xf32, 768xf32, 768xf32, 768xf32, 768xf32) + ( + batch_norm__0, + batch_norm__1, + batch_norm__2, + batch_norm__3, + batch_norm__4, + batch_norm__5, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_1, + parameter_77, + parameter_76, + parameter_75, + parameter_74, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_74, parameter_75, parameter_76, parameter_77 + + # pd_op.swish: (1x768x32x32xf32) <- (1x768x32x32xf32) + swish_0 = paddle._C_ops.swish(batch_norm__0) + + # pd_op.add: (1x768x32x32xf32) <- (1x768x32x32xf32, 1x768x32x32xf32) + add_1 = paddle._C_ops.add(swish_0, data_0) + + # pd_op.conv2d: (1x15x32x32xf32) <- (1x768x32x32xf32, 15x768x3x3xf32) + conv2d_2 = paddle._C_ops.conv2d( + add_1, parameter_73, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_73 + + # pd_op.reshape: (1x15x1x1xf32) <- (15xf32, 4xi64) + reshape_4 = paddle._C_ops.reshape(parameter_72, full_int_array_2) + del parameter_72 + + # pd_op.add: (1x15x32x32xf32) <- (1x15x32x32xf32, 1x15x1x1xf32) + add_2 = paddle._C_ops.add(conv2d_2, reshape_4) + + # pd_op.conv2d: (1x768x1x1xf32) <- (1x768x1x1xf32, 768x768x1x1xf32) + conv2d_3 = paddle._C_ops.conv2d( + pool2d_0, parameter_71, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_71 + + # pd_op.reshape: (1x768x1x1xf32) <- (768xf32, 4xi64) + reshape_5 = paddle._C_ops.reshape(parameter_70, full_int_array_2) + del parameter_70 + + # pd_op.add: (1x768x1x1xf32) <- (1x768x1x1xf32, 1x768x1x1xf32) + add_3 = paddle._C_ops.add(conv2d_3, reshape_5) + + # pd_op.sigmoid: (1x768x1x1xf32) <- (1x768x1x1xf32) + sigmoid_1 = paddle._C_ops.sigmoid(add_3) + del add_3 + + # pd_op.multiply: (1x768x32x32xf32) <- (1x768x32x32xf32, 1x768x1x1xf32) + multiply_1 = paddle._C_ops.multiply(data_0, sigmoid_1) + + # pd_op.conv2d: (1x768x32x32xf32) <- (1x768x32x32xf32, 768x768x1x1xf32) + conv2d_4 = paddle._C_ops.conv2d( + multiply_1, parameter_69, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_69 + + # pd_op.batch_norm_: (1x768x32x32xf32, 768xf32, 768xf32, 768xf32, 768xf32, -1xui8) <- (1x768x32x32xf32, 768xf32, 768xf32, 768xf32, 768xf32) + ( + batch_norm__6, + batch_norm__7, + batch_norm__8, + batch_norm__9, + batch_norm__10, + batch_norm__11, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_4, + parameter_68, + parameter_67, + parameter_66, + parameter_65, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_65, parameter_66, parameter_67, parameter_68 + + # pd_op.swish: (1x768x32x32xf32) <- (1x768x32x32xf32) + swish_1 = paddle._C_ops.swish(batch_norm__6) + + # pd_op.conv2d: (1x4x32x32xf32) <- (1x768x32x32xf32, 4x768x3x3xf32) + conv2d_5 = paddle._C_ops.conv2d( + swish_1, parameter_64, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_64 + + # pd_op.reshape: (1x4x1x1xf32) <- (4xf32, 4xi64) + reshape_6 = paddle._C_ops.reshape(parameter_63, full_int_array_2) + del parameter_63 + + # pd_op.add: (1x4x32x32xf32) <- (1x4x32x32xf32, 1x4x1x1xf32) + add_4 = paddle._C_ops.add(conv2d_5, reshape_6) + + # pd_op.conv2d: (1x768x1x1xf32) <- (1x768x1x1xf32, 768x768x1x1xf32) + conv2d_6 = paddle._C_ops.conv2d( + pool2d_0, parameter_62, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_62 + + # pd_op.reshape: (1x768x1x1xf32) <- (768xf32, 4xi64) + reshape_7 = paddle._C_ops.reshape(parameter_61, full_int_array_2) + del parameter_61 + + # pd_op.add: (1x768x1x1xf32) <- (1x768x1x1xf32, 1x768x1x1xf32) + add_5 = paddle._C_ops.add(conv2d_6, reshape_7) + + # pd_op.sigmoid: (1x768x1x1xf32) <- (1x768x1x1xf32) + sigmoid_2 = paddle._C_ops.sigmoid(add_5) + del add_5 + + # pd_op.multiply: (1x768x32x32xf32) <- (1x768x32x32xf32, 1x768x1x1xf32) + multiply_2 = paddle._C_ops.multiply(data_0, sigmoid_2) + del data_0 + + # pd_op.conv2d: (1x768x32x32xf32) <- (1x768x32x32xf32, 768x768x1x1xf32) + conv2d_7 = paddle._C_ops.conv2d( + multiply_2, parameter_60, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_60 + + # pd_op.batch_norm_: (1x768x32x32xf32, 768xf32, 768xf32, 768xf32, 768xf32, -1xui8) <- (1x768x32x32xf32, 768xf32, 768xf32, 768xf32, 768xf32) + ( + batch_norm__12, + batch_norm__13, + batch_norm__14, + batch_norm__15, + batch_norm__16, + batch_norm__17, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_7, + parameter_59, + parameter_58, + parameter_57, + parameter_56, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_56, parameter_57, parameter_58, parameter_59 + + # pd_op.swish: (1x768x32x32xf32) <- (1x768x32x32xf32) + swish_2 = paddle._C_ops.swish(batch_norm__12) + + # pd_op.conv2d: (1x91x32x32xf32) <- (1x768x32x32xf32, 91x768x3x3xf32) + conv2d_8 = paddle._C_ops.conv2d( + swish_2, parameter_55, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_55 + + # pd_op.reshape: (1x91x1x1xf32) <- (91xf32, 4xi64) + reshape_8 = paddle._C_ops.reshape(parameter_54, full_int_array_2) + del parameter_54 + + # pd_op.add: (1x91x32x32xf32) <- (1x91x32x32xf32, 1x91x1x1xf32) + add_6 = paddle._C_ops.add(conv2d_8, reshape_8) + + # pd_op.sigmoid: (1x15x32x32xf32) <- (1x15x32x32xf32) + sigmoid_3 = paddle._C_ops.sigmoid(add_2) + del add_2 + + # pd_op.flatten: (1x15x1024xf32) <- (1x15x32x32xf32) + flatten_0 = paddle._C_ops.flatten(sigmoid_3, 2, 3) + + # pd_op.transpose: (1x1024x15xf32) <- (1x15x1024xf32) + transpose_0 = paddle._C_ops.transpose(flatten_0, [0, 2, 1]) + del flatten_0 + + # pd_op.flatten: (1x4x1024xf32) <- (1x4x32x32xf32) + flatten_1 = paddle._C_ops.flatten(add_4, 2, 3) + + # pd_op.transpose: (1x1024x4xf32) <- (1x4x1024xf32) + transpose_1 = paddle._C_ops.transpose(flatten_1, [0, 2, 1]) + del flatten_1 + + # pd_op.flatten: (1x91x1024xf32) <- (1x91x32x32xf32) + flatten_2 = paddle._C_ops.flatten(add_6, 2, 3) + + # pd_op.transpose: (1x1024x91xf32) <- (1x91x1024xf32) + transpose_2 = paddle._C_ops.transpose(flatten_2, [0, 2, 1]) + del flatten_2 + + # pd_op.pool2d: (1x384x1x1xf32) <- (1x384x64x64xf32, 2xi64) + pool2d_1 = paddle._C_ops.pool2d( + data_1, + full_int_array_1, + [1, 1], + [0, 0], + False, + True, + "NCHW", + "avg", + False, + True, + "EXPLICIT", + ) + + # pd_op.conv2d: (1x384x1x1xf32) <- (1x384x1x1xf32, 384x384x1x1xf32) + conv2d_9 = paddle._C_ops.conv2d( + pool2d_1, parameter_53, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_53 + + # pd_op.reshape: (1x384x1x1xf32) <- (384xf32, 4xi64) + reshape_9 = paddle._C_ops.reshape(parameter_52, full_int_array_2) + del parameter_52 + + # pd_op.add: (1x384x1x1xf32) <- (1x384x1x1xf32, 1x384x1x1xf32) + add_7 = paddle._C_ops.add(conv2d_9, reshape_9) + + # pd_op.sigmoid: (1x384x1x1xf32) <- (1x384x1x1xf32) + sigmoid_4 = paddle._C_ops.sigmoid(add_7) + del add_7 + + # pd_op.multiply: (1x384x64x64xf32) <- (1x384x64x64xf32, 1x384x1x1xf32) + multiply_3 = paddle._C_ops.multiply(data_1, sigmoid_4) + + # pd_op.conv2d: (1x384x64x64xf32) <- (1x384x64x64xf32, 384x384x1x1xf32) + conv2d_10 = paddle._C_ops.conv2d( + multiply_3, parameter_51, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_51 + + # pd_op.batch_norm_: (1x384x64x64xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (1x384x64x64xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__18, + batch_norm__19, + batch_norm__20, + batch_norm__21, + batch_norm__22, + batch_norm__23, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_10, + parameter_50, + parameter_49, + parameter_48, + parameter_47, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_47, parameter_48, parameter_49, parameter_50 + + # pd_op.swish: (1x384x64x64xf32) <- (1x384x64x64xf32) + swish_3 = paddle._C_ops.swish(batch_norm__18) + + # pd_op.add: (1x384x64x64xf32) <- (1x384x64x64xf32, 1x384x64x64xf32) + add_8 = paddle._C_ops.add(swish_3, data_1) + + # pd_op.conv2d: (1x15x64x64xf32) <- (1x384x64x64xf32, 15x384x3x3xf32) + conv2d_11 = paddle._C_ops.conv2d( + add_8, parameter_46, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_46 + + # pd_op.reshape: (1x15x1x1xf32) <- (15xf32, 4xi64) + reshape_10 = paddle._C_ops.reshape(parameter_45, full_int_array_2) + del parameter_45 + + # pd_op.add: (1x15x64x64xf32) <- (1x15x64x64xf32, 1x15x1x1xf32) + add_9 = paddle._C_ops.add(conv2d_11, reshape_10) + + # pd_op.conv2d: (1x384x1x1xf32) <- (1x384x1x1xf32, 384x384x1x1xf32) + conv2d_12 = paddle._C_ops.conv2d( + pool2d_1, parameter_44, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_44 + + # pd_op.reshape: (1x384x1x1xf32) <- (384xf32, 4xi64) + reshape_11 = paddle._C_ops.reshape(parameter_43, full_int_array_2) + del parameter_43 + + # pd_op.add: (1x384x1x1xf32) <- (1x384x1x1xf32, 1x384x1x1xf32) + add_10 = paddle._C_ops.add(conv2d_12, reshape_11) + + # pd_op.sigmoid: (1x384x1x1xf32) <- (1x384x1x1xf32) + sigmoid_5 = paddle._C_ops.sigmoid(add_10) + del add_10 + + # pd_op.multiply: (1x384x64x64xf32) <- (1x384x64x64xf32, 1x384x1x1xf32) + multiply_4 = paddle._C_ops.multiply(data_1, sigmoid_5) + + # pd_op.conv2d: (1x384x64x64xf32) <- (1x384x64x64xf32, 384x384x1x1xf32) + conv2d_13 = paddle._C_ops.conv2d( + multiply_4, parameter_42, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_42 + + # pd_op.batch_norm_: (1x384x64x64xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (1x384x64x64xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__24, + batch_norm__25, + batch_norm__26, + batch_norm__27, + batch_norm__28, + batch_norm__29, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_13, + parameter_41, + parameter_40, + parameter_39, + parameter_38, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_38, parameter_39, parameter_40, parameter_41 + + # pd_op.swish: (1x384x64x64xf32) <- (1x384x64x64xf32) + swish_4 = paddle._C_ops.swish(batch_norm__24) + + # pd_op.conv2d: (1x4x64x64xf32) <- (1x384x64x64xf32, 4x384x3x3xf32) + conv2d_14 = paddle._C_ops.conv2d( + swish_4, parameter_37, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_37 + + # pd_op.reshape: (1x4x1x1xf32) <- (4xf32, 4xi64) + reshape_12 = paddle._C_ops.reshape(parameter_36, full_int_array_2) + del parameter_36 + + # pd_op.add: (1x4x64x64xf32) <- (1x4x64x64xf32, 1x4x1x1xf32) + add_11 = paddle._C_ops.add(conv2d_14, reshape_12) + + # pd_op.conv2d: (1x384x1x1xf32) <- (1x384x1x1xf32, 384x384x1x1xf32) + conv2d_15 = paddle._C_ops.conv2d( + pool2d_1, parameter_35, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_35 + + # pd_op.reshape: (1x384x1x1xf32) <- (384xf32, 4xi64) + reshape_13 = paddle._C_ops.reshape(parameter_34, full_int_array_2) + del parameter_34 + + # pd_op.add: (1x384x1x1xf32) <- (1x384x1x1xf32, 1x384x1x1xf32) + add_12 = paddle._C_ops.add(conv2d_15, reshape_13) + + # pd_op.sigmoid: (1x384x1x1xf32) <- (1x384x1x1xf32) + sigmoid_6 = paddle._C_ops.sigmoid(add_12) + del add_12 + + # pd_op.multiply: (1x384x64x64xf32) <- (1x384x64x64xf32, 1x384x1x1xf32) + multiply_5 = paddle._C_ops.multiply(data_1, sigmoid_6) + del data_1 + + # pd_op.conv2d: (1x384x64x64xf32) <- (1x384x64x64xf32, 384x384x1x1xf32) + conv2d_16 = paddle._C_ops.conv2d( + multiply_5, parameter_33, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_33 + + # pd_op.batch_norm_: (1x384x64x64xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (1x384x64x64xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__30, + batch_norm__31, + batch_norm__32, + batch_norm__33, + batch_norm__34, + batch_norm__35, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_16, + parameter_32, + parameter_31, + parameter_30, + parameter_29, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_29, parameter_30, parameter_31, parameter_32 + + # pd_op.swish: (1x384x64x64xf32) <- (1x384x64x64xf32) + swish_5 = paddle._C_ops.swish(batch_norm__30) + + # pd_op.conv2d: (1x91x64x64xf32) <- (1x384x64x64xf32, 91x384x3x3xf32) + conv2d_17 = paddle._C_ops.conv2d( + swish_5, parameter_28, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_28 + + # pd_op.reshape: (1x91x1x1xf32) <- (91xf32, 4xi64) + reshape_14 = paddle._C_ops.reshape(parameter_27, full_int_array_2) + del parameter_27 + + # pd_op.add: (1x91x64x64xf32) <- (1x91x64x64xf32, 1x91x1x1xf32) + add_13 = paddle._C_ops.add(conv2d_17, reshape_14) + + # pd_op.sigmoid: (1x15x64x64xf32) <- (1x15x64x64xf32) + sigmoid_7 = paddle._C_ops.sigmoid(add_9) + del add_9 + + # pd_op.flatten: (1x15x4096xf32) <- (1x15x64x64xf32) + flatten_3 = paddle._C_ops.flatten(sigmoid_7, 2, 3) + + # pd_op.transpose: (1x4096x15xf32) <- (1x15x4096xf32) + transpose_3 = paddle._C_ops.transpose(flatten_3, [0, 2, 1]) + del flatten_3 + + # pd_op.flatten: (1x4x4096xf32) <- (1x4x64x64xf32) + flatten_4 = paddle._C_ops.flatten(add_11, 2, 3) + + # pd_op.transpose: (1x4096x4xf32) <- (1x4x4096xf32) + transpose_4 = paddle._C_ops.transpose(flatten_4, [0, 2, 1]) + del flatten_4 + + # pd_op.flatten: (1x91x4096xf32) <- (1x91x64x64xf32) + flatten_5 = paddle._C_ops.flatten(add_13, 2, 3) + + # pd_op.transpose: (1x4096x91xf32) <- (1x91x4096xf32) + transpose_5 = paddle._C_ops.transpose(flatten_5, [0, 2, 1]) + del flatten_5 + + # pd_op.pool2d: (1x192x1x1xf32) <- (1x192x128x128xf32, 2xi64) + pool2d_2 = paddle._C_ops.pool2d( + data_2, + full_int_array_1, + [1, 1], + [0, 0], + False, + True, + "NCHW", + "avg", + False, + True, + "EXPLICIT", + ) + + # pd_op.conv2d: (1x192x1x1xf32) <- (1x192x1x1xf32, 192x192x1x1xf32) + conv2d_18 = paddle._C_ops.conv2d( + pool2d_2, parameter_26, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_26 + + # pd_op.reshape: (1x192x1x1xf32) <- (192xf32, 4xi64) + reshape_15 = paddle._C_ops.reshape(parameter_25, full_int_array_2) + del parameter_25 + + # pd_op.add: (1x192x1x1xf32) <- (1x192x1x1xf32, 1x192x1x1xf32) + add_14 = paddle._C_ops.add(conv2d_18, reshape_15) + + # pd_op.sigmoid: (1x192x1x1xf32) <- (1x192x1x1xf32) + sigmoid_8 = paddle._C_ops.sigmoid(add_14) + del add_14 + + # pd_op.multiply: (1x192x128x128xf32) <- (1x192x128x128xf32, 1x192x1x1xf32) + multiply_6 = paddle._C_ops.multiply(data_2, sigmoid_8) + + # pd_op.conv2d: (1x192x128x128xf32) <- (1x192x128x128xf32, 192x192x1x1xf32) + conv2d_19 = paddle._C_ops.conv2d( + multiply_6, parameter_24, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_24 + + # pd_op.batch_norm_: (1x192x128x128xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (1x192x128x128xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__36, + batch_norm__37, + batch_norm__38, + batch_norm__39, + batch_norm__40, + batch_norm__41, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_19, + parameter_23, + parameter_22, + parameter_21, + parameter_20, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_20, parameter_21, parameter_22, parameter_23 + + # pd_op.swish: (1x192x128x128xf32) <- (1x192x128x128xf32) + swish_6 = paddle._C_ops.swish(batch_norm__36) + + # pd_op.add: (1x192x128x128xf32) <- (1x192x128x128xf32, 1x192x128x128xf32) + add_15 = paddle._C_ops.add(swish_6, data_2) + + # pd_op.conv2d: (1x15x128x128xf32) <- (1x192x128x128xf32, 15x192x3x3xf32) + conv2d_20 = paddle._C_ops.conv2d( + add_15, parameter_19, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_19 + + # pd_op.reshape: (1x15x1x1xf32) <- (15xf32, 4xi64) + reshape_16 = paddle._C_ops.reshape(parameter_18, full_int_array_2) + del parameter_18 + + # pd_op.add: (1x15x128x128xf32) <- (1x15x128x128xf32, 1x15x1x1xf32) + add_16 = paddle._C_ops.add(conv2d_20, reshape_16) + + # pd_op.conv2d: (1x192x1x1xf32) <- (1x192x1x1xf32, 192x192x1x1xf32) + conv2d_21 = paddle._C_ops.conv2d( + pool2d_2, parameter_17, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_17 + + # pd_op.reshape: (1x192x1x1xf32) <- (192xf32, 4xi64) + reshape_17 = paddle._C_ops.reshape(parameter_16, full_int_array_2) + del parameter_16 + + # pd_op.add: (1x192x1x1xf32) <- (1x192x1x1xf32, 1x192x1x1xf32) + add_17 = paddle._C_ops.add(conv2d_21, reshape_17) + + # pd_op.sigmoid: (1x192x1x1xf32) <- (1x192x1x1xf32) + sigmoid_9 = paddle._C_ops.sigmoid(add_17) + del add_17 + + # pd_op.multiply: (1x192x128x128xf32) <- (1x192x128x128xf32, 1x192x1x1xf32) + multiply_7 = paddle._C_ops.multiply(data_2, sigmoid_9) + + # pd_op.conv2d: (1x192x128x128xf32) <- (1x192x128x128xf32, 192x192x1x1xf32) + conv2d_22 = paddle._C_ops.conv2d( + multiply_7, parameter_15, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_15 + + # pd_op.batch_norm_: (1x192x128x128xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (1x192x128x128xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__42, + batch_norm__43, + batch_norm__44, + batch_norm__45, + batch_norm__46, + batch_norm__47, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_22, + parameter_14, + parameter_13, + parameter_12, + parameter_11, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_11, parameter_12, parameter_13, parameter_14 + + # pd_op.swish: (1x192x128x128xf32) <- (1x192x128x128xf32) + swish_7 = paddle._C_ops.swish(batch_norm__42) + + # pd_op.conv2d: (1x4x128x128xf32) <- (1x192x128x128xf32, 4x192x3x3xf32) + conv2d_23 = paddle._C_ops.conv2d( + swish_7, parameter_10, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_10 + + # pd_op.reshape: (1x4x1x1xf32) <- (4xf32, 4xi64) + reshape_18 = paddle._C_ops.reshape(parameter_9, full_int_array_2) + del parameter_9 + + # pd_op.add: (1x4x128x128xf32) <- (1x4x128x128xf32, 1x4x1x1xf32) + add_18 = paddle._C_ops.add(conv2d_23, reshape_18) + + # pd_op.conv2d: (1x192x1x1xf32) <- (1x192x1x1xf32, 192x192x1x1xf32) + conv2d_24 = paddle._C_ops.conv2d( + pool2d_2, parameter_8, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_8 + + # pd_op.reshape: (1x192x1x1xf32) <- (192xf32, 4xi64) + reshape_19 = paddle._C_ops.reshape(parameter_7, full_int_array_2) + del parameter_7 + + # pd_op.add: (1x192x1x1xf32) <- (1x192x1x1xf32, 1x192x1x1xf32) + add_19 = paddle._C_ops.add(conv2d_24, reshape_19) + + # pd_op.sigmoid: (1x192x1x1xf32) <- (1x192x1x1xf32) + sigmoid_10 = paddle._C_ops.sigmoid(add_19) + del add_19 + + # pd_op.multiply: (1x192x128x128xf32) <- (1x192x128x128xf32, 1x192x1x1xf32) + multiply_8 = paddle._C_ops.multiply(data_2, sigmoid_10) + del data_2 + + # pd_op.conv2d: (1x192x128x128xf32) <- (1x192x128x128xf32, 192x192x1x1xf32) + conv2d_25 = paddle._C_ops.conv2d( + multiply_8, parameter_6, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_6 + + # pd_op.batch_norm_: (1x192x128x128xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (1x192x128x128xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__48, + batch_norm__49, + batch_norm__50, + batch_norm__51, + batch_norm__52, + batch_norm__53, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_25, + parameter_5, + parameter_4, + parameter_3, + parameter_2, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_2, parameter_3, parameter_4, parameter_5 + + # pd_op.swish: (1x192x128x128xf32) <- (1x192x128x128xf32) + swish_8 = paddle._C_ops.swish(batch_norm__48) + + # pd_op.conv2d: (1x91x128x128xf32) <- (1x192x128x128xf32, 91x192x3x3xf32) + conv2d_26 = paddle._C_ops.conv2d( + swish_8, parameter_1, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_1 + + # pd_op.reshape: (1x91x1x1xf32) <- (91xf32, 4xi64) + reshape_20 = paddle._C_ops.reshape(parameter_0, full_int_array_2) + del full_int_array_2, parameter_0 + + # pd_op.add: (1x91x128x128xf32) <- (1x91x128x128xf32, 1x91x1x1xf32) + add_20 = paddle._C_ops.add(conv2d_26, reshape_20) + + # pd_op.sigmoid: (1x15x128x128xf32) <- (1x15x128x128xf32) + sigmoid_11 = paddle._C_ops.sigmoid(add_16) + del add_16 + + # pd_op.flatten: (1x15x16384xf32) <- (1x15x128x128xf32) + flatten_6 = paddle._C_ops.flatten(sigmoid_11, 2, 3) + + # pd_op.transpose: (1x16384x15xf32) <- (1x15x16384xf32) + transpose_6 = paddle._C_ops.transpose(flatten_6, [0, 2, 1]) + del flatten_6 + + # pd_op.flatten: (1x4x16384xf32) <- (1x4x128x128xf32) + flatten_7 = paddle._C_ops.flatten(add_18, 2, 3) + + # pd_op.transpose: (1x16384x4xf32) <- (1x4x16384xf32) + transpose_7 = paddle._C_ops.transpose(flatten_7, [0, 2, 1]) + del flatten_7 + + # pd_op.flatten: (1x91x16384xf32) <- (1x91x128x128xf32) + flatten_8 = paddle._C_ops.flatten(add_20, 2, 3) + + # pd_op.transpose: (1x16384x91xf32) <- (1x91x16384xf32) + transpose_8 = paddle._C_ops.transpose(flatten_8, [0, 2, 1]) + del flatten_8 + + # builtin.combine: ([1x1024x15xf32, 1x4096x15xf32, 1x16384x15xf32]) <- (1x1024x15xf32, 1x4096x15xf32, 1x16384x15xf32) + combine_8 = [transpose_0, transpose_3, transpose_6] + + # pd_op.concat: (1x21504x15xf32) <- ([1x1024x15xf32, 1x4096x15xf32, 1x16384x15xf32], 1xi32) + concat_0 = paddle._C_ops.concat(combine_8, full_12) + del combine_8 + + # builtin.combine: ([1x1024x4xf32, 1x4096x4xf32, 1x16384x4xf32]) <- (1x1024x4xf32, 1x4096x4xf32, 1x16384x4xf32) + combine_9 = [transpose_1, transpose_4, transpose_7] + + # pd_op.concat: (1x21504x4xf32) <- ([1x1024x4xf32, 1x4096x4xf32, 1x16384x4xf32], 1xi32) + concat_1 = paddle._C_ops.concat(combine_9, full_12) + del combine_9 + + # builtin.combine: ([1x1024x91xf32, 1x4096x91xf32, 1x16384x91xf32]) <- (1x1024x91xf32, 1x4096x91xf32, 1x16384x91xf32) + combine_10 = [transpose_2, transpose_5, transpose_8] + + # pd_op.concat: (1x21504x91xf32) <- ([1x1024x91xf32, 1x4096x91xf32, 1x16384x91xf32], 1xi32) + concat_2 = paddle._C_ops.concat(combine_10, full_12) + del ( + add_1, + add_11, + add_13, + add_15, + add_18, + add_20, + add_4, + add_6, + add_8, + assign_0, + assign_1, + assign_2, + assign_3, + assign_4, + batch_norm__0, + batch_norm__1, + batch_norm__10, + batch_norm__11, + batch_norm__12, + batch_norm__13, + batch_norm__14, + batch_norm__15, + batch_norm__16, + batch_norm__17, + batch_norm__18, + batch_norm__19, + batch_norm__2, + batch_norm__20, + batch_norm__21, + batch_norm__22, + batch_norm__23, + batch_norm__24, + batch_norm__25, + batch_norm__26, + batch_norm__27, + batch_norm__28, + batch_norm__29, + batch_norm__3, + batch_norm__30, + batch_norm__31, + batch_norm__32, + batch_norm__33, + batch_norm__34, + batch_norm__35, + batch_norm__36, + batch_norm__37, + batch_norm__38, + batch_norm__39, + batch_norm__4, + batch_norm__40, + batch_norm__41, + batch_norm__42, + batch_norm__43, + batch_norm__44, + batch_norm__45, + batch_norm__46, + batch_norm__47, + batch_norm__48, + batch_norm__49, + batch_norm__5, + batch_norm__50, + batch_norm__51, + batch_norm__52, + batch_norm__53, + batch_norm__6, + batch_norm__7, + batch_norm__8, + batch_norm__9, + combine_10, + conv2d_0, + conv2d_1, + conv2d_10, + conv2d_11, + conv2d_12, + conv2d_13, + conv2d_14, + conv2d_15, + conv2d_16, + conv2d_17, + conv2d_18, + conv2d_19, + conv2d_2, + conv2d_20, + conv2d_21, + conv2d_22, + conv2d_23, + conv2d_24, + conv2d_25, + conv2d_26, + conv2d_3, + conv2d_4, + conv2d_5, + conv2d_6, + conv2d_7, + conv2d_8, + conv2d_9, + full_12, + full_int_array_1, + multiply_0, + multiply_1, + multiply_2, + multiply_3, + multiply_4, + multiply_5, + multiply_6, + multiply_7, + multiply_8, + pool2d_0, + pool2d_1, + pool2d_2, + reshape_10, + reshape_11, + reshape_12, + reshape_13, + reshape_14, + reshape_15, + reshape_16, + reshape_17, + reshape_18, + reshape_19, + reshape_20, + reshape_3, + reshape_4, + reshape_5, + reshape_6, + reshape_7, + reshape_8, + reshape_9, + sigmoid_0, + sigmoid_1, + sigmoid_10, + sigmoid_11, + sigmoid_2, + sigmoid_3, + sigmoid_4, + sigmoid_5, + sigmoid_6, + sigmoid_7, + sigmoid_8, + sigmoid_9, + swish_0, + swish_1, + swish_2, + swish_3, + swish_4, + swish_5, + swish_6, + swish_7, + swish_8, + transpose_0, + transpose_1, + transpose_2, + transpose_3, + transpose_4, + transpose_5, + transpose_6, + transpose_7, + transpose_8, + ) + + return concat_0, concat_1, concat_2, concat_3, concat_4 diff --git a/paddle_samples/PaddleX/PP-YOLOE-R-L/subgraph_12/weight_meta.py b/paddle_samples/PaddleX/PP-YOLOE-R-L/subgraph_12/weight_meta.py new file mode 100644 index 000000000..b5c98be8b --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE-R-L/subgraph_12/weight_meta.py @@ -0,0 +1,745 @@ +class Program_weight_tensor_parameter_0: + name = "parameter_0" + shape = [91] + dtype = "float32" + min_val = float("1.0") + max_val = float("10.0") + mean = float("1.0989") + std = float("0.938258") + data = None + + +class Program_weight_tensor_parameter_1: + name = "parameter_1" + shape = [91, 192, 3, 3] + dtype = "float32" + data = None + + +class Program_weight_tensor_parameter_2: + name = "parameter_2" + shape = [192] + dtype = "float32" + data = None + + +class Program_weight_tensor_parameter_3: + name = "parameter_3" + shape = [192] + dtype = "float32" + min_val = float("1.0") + max_val = float("1.0") + mean = float("1.0") + data = None + + +class Program_weight_tensor_parameter_4: + name = "parameter_4" + shape = [192] + dtype = "float32" + min_val = float("1.0") + max_val = float("1.0") + mean = float("1.0") + data = None + + +class Program_weight_tensor_parameter_5: + name = "parameter_5" + shape = [192] + dtype = "float32" + data = None + + +class Program_weight_tensor_parameter_6: + name = "parameter_6" + shape = [192, 192, 1, 1] + dtype = "float32" + min_val = float("-0.420665") + max_val = float("0.457819") + mean = float("0.000411122") + std = float("0.10168") + data = None + + +class Program_weight_tensor_parameter_7: + name = "parameter_7" + shape = [192] + dtype = "float32" + data = None + + +class Program_weight_tensor_parameter_8: + name = "parameter_8" + shape = [192, 192, 1, 1] + dtype = "float32" + min_val = float("-0.0403718") + max_val = float("0.0407056") + mean = float("7.11352e-06") + std = float("0.0100255") + data = None + + +class Program_weight_tensor_parameter_9: + name = "parameter_9" + shape = [4] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_10: + name = "parameter_10" + shape = [4, 192, 3, 3] + dtype = "float32" + min_val = float("-0.037025") + max_val = float("0.0372395") + mean = float("-0.00015253") + std = float("0.0100632") + data = None + + +class Program_weight_tensor_parameter_11: + name = "parameter_11" + shape = [192] + dtype = "float32" + data = None + + +class Program_weight_tensor_parameter_12: + name = "parameter_12" + shape = [192] + dtype = "float32" + min_val = float("1.0") + max_val = float("1.0") + mean = float("1.0") + data = None + + +class Program_weight_tensor_parameter_13: + name = "parameter_13" + shape = [192] + dtype = "float32" + min_val = float("1.0") + max_val = float("1.0") + mean = float("1.0") + data = None + + +class Program_weight_tensor_parameter_14: + name = "parameter_14" + shape = [192] + dtype = "float32" + data = None + + +class Program_weight_tensor_parameter_15: + name = "parameter_15" + shape = [192, 192, 1, 1] + dtype = "float32" + min_val = float("-0.427028") + max_val = float("0.417813") + mean = float("-0.000532781") + std = float("0.101908") + data = None + + +class Program_weight_tensor_parameter_16: + name = "parameter_16" + shape = [192] + dtype = "float32" + data = None + + +class Program_weight_tensor_parameter_17: + name = "parameter_17" + shape = [192, 192, 1, 1] + dtype = "float32" + min_val = float("-0.0467618") + max_val = float("0.0421866") + mean = float("5.02445e-05") + std = float("0.0100046") + data = None + + +class Program_weight_tensor_parameter_18: + name = "parameter_18" + shape = [15] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_19: + name = "parameter_19" + shape = [15, 192, 3, 3] + dtype = "float32" + min_val = float("-0.0388388") + max_val = float("0.0362681") + mean = float("1.86818e-05") + std = float("0.0100246") + data = None + + +class Program_weight_tensor_parameter_20: + name = "parameter_20" + shape = [192] + dtype = "float32" + data = None + + +class Program_weight_tensor_parameter_21: + name = "parameter_21" + shape = [192] + dtype = "float32" + min_val = float("1.0") + max_val = float("1.0") + mean = float("1.0") + data = None + + +class Program_weight_tensor_parameter_22: + name = "parameter_22" + shape = [192] + dtype = "float32" + min_val = float("1.0") + max_val = float("1.0") + mean = float("1.0") + data = None + + +class Program_weight_tensor_parameter_23: + name = "parameter_23" + shape = [192] + dtype = "float32" + data = None + + +class Program_weight_tensor_parameter_24: + name = "parameter_24" + shape = [192, 192, 1, 1] + dtype = "float32" + min_val = float("-0.423573") + max_val = float("0.415424") + mean = float("-0.000960885") + std = float("0.101777") + data = None + + +class Program_weight_tensor_parameter_25: + name = "parameter_25" + shape = [192] + dtype = "float32" + data = None + + +class Program_weight_tensor_parameter_26: + name = "parameter_26" + shape = [192, 192, 1, 1] + dtype = "float32" + min_val = float("-0.0445055") + max_val = float("0.0375154") + mean = float("-1.1306e-05") + std = float("0.0099596") + data = None + + +class Program_weight_tensor_parameter_27: + name = "parameter_27" + shape = [91] + dtype = "float32" + min_val = float("1.0") + max_val = float("10.0") + mean = float("1.0989") + std = float("0.938258") + data = None + + +class Program_weight_tensor_parameter_28: + name = "parameter_28" + shape = [91, 384, 3, 3] + dtype = "float32" + data = None + + +class Program_weight_tensor_parameter_29: + name = "parameter_29" + shape = [384] + dtype = "float32" + data = None + + +class Program_weight_tensor_parameter_30: + name = "parameter_30" + shape = [384] + dtype = "float32" + min_val = float("1.0") + max_val = float("1.0") + mean = float("1.0") + std = float("5.96046e-08") + data = None + + +class Program_weight_tensor_parameter_31: + name = "parameter_31" + shape = [384] + dtype = "float32" + min_val = float("1.0") + max_val = float("1.0") + mean = float("1.0") + std = float("5.96046e-08") + data = None + + +class Program_weight_tensor_parameter_32: + name = "parameter_32" + shape = [384] + dtype = "float32" + data = None + + +class Program_weight_tensor_parameter_33: + name = "parameter_33" + shape = [384, 384, 1, 1] + dtype = "float32" + min_val = float("-0.321228") + max_val = float("0.311238") + mean = float("0.000141189") + std = float("0.072261") + data = None + + +class Program_weight_tensor_parameter_34: + name = "parameter_34" + shape = [384] + dtype = "float32" + data = None + + +class Program_weight_tensor_parameter_35: + name = "parameter_35" + shape = [384, 384, 1, 1] + dtype = "float32" + min_val = float("-0.049454") + max_val = float("0.0463895") + mean = float("-4.90638e-05") + std = float("0.0100202") + data = None + + +class Program_weight_tensor_parameter_36: + name = "parameter_36" + shape = [4] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_37: + name = "parameter_37" + shape = [4, 384, 3, 3] + dtype = "float32" + min_val = float("-0.0393483") + max_val = float("0.0357258") + mean = float("-2.82805e-05") + std = float("0.00994175") + data = None + + +class Program_weight_tensor_parameter_38: + name = "parameter_38" + shape = [384] + dtype = "float32" + data = None + + +class Program_weight_tensor_parameter_39: + name = "parameter_39" + shape = [384] + dtype = "float32" + min_val = float("1.0") + max_val = float("1.0") + mean = float("1.0") + std = float("5.96046e-08") + data = None + + +class Program_weight_tensor_parameter_40: + name = "parameter_40" + shape = [384] + dtype = "float32" + min_val = float("1.0") + max_val = float("1.0") + mean = float("1.0") + std = float("5.96046e-08") + data = None + + +class Program_weight_tensor_parameter_41: + name = "parameter_41" + shape = [384] + dtype = "float32" + data = None + + +class Program_weight_tensor_parameter_42: + name = "parameter_42" + shape = [384, 384, 1, 1] + dtype = "float32" + min_val = float("-0.30457") + max_val = float("0.408674") + mean = float("0.000253346") + std = float("0.0724243") + data = None + + +class Program_weight_tensor_parameter_43: + name = "parameter_43" + shape = [384] + dtype = "float32" + data = None + + +class Program_weight_tensor_parameter_44: + name = "parameter_44" + shape = [384, 384, 1, 1] + dtype = "float32" + min_val = float("-0.0442846") + max_val = float("0.0451698") + mean = float("-1.03823e-05") + std = float("0.00999299") + data = None + + +class Program_weight_tensor_parameter_45: + name = "parameter_45" + shape = [15] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_46: + name = "parameter_46" + shape = [15, 384, 3, 3] + dtype = "float32" + min_val = float("-0.0403172") + max_val = float("0.0419366") + mean = float("1.60246e-05") + std = float("0.0100121") + data = None + + +class Program_weight_tensor_parameter_47: + name = "parameter_47" + shape = [384] + dtype = "float32" + data = None + + +class Program_weight_tensor_parameter_48: + name = "parameter_48" + shape = [384] + dtype = "float32" + min_val = float("1.0") + max_val = float("1.0") + mean = float("1.0") + std = float("5.96046e-08") + data = None + + +class Program_weight_tensor_parameter_49: + name = "parameter_49" + shape = [384] + dtype = "float32" + min_val = float("1.0") + max_val = float("1.0") + mean = float("1.0") + std = float("5.96046e-08") + data = None + + +class Program_weight_tensor_parameter_50: + name = "parameter_50" + shape = [384] + dtype = "float32" + data = None + + +class Program_weight_tensor_parameter_51: + name = "parameter_51" + shape = [384, 384, 1, 1] + dtype = "float32" + min_val = float("-0.368617") + max_val = float("0.2941") + mean = float("-0.000356655") + std = float("0.0722282") + data = None + + +class Program_weight_tensor_parameter_52: + name = "parameter_52" + shape = [384] + dtype = "float32" + data = None + + +class Program_weight_tensor_parameter_53: + name = "parameter_53" + shape = [384, 384, 1, 1] + dtype = "float32" + min_val = float("-0.0445604") + max_val = float("0.0431038") + mean = float("1.08175e-05") + std = float("0.0100079") + data = None + + +class Program_weight_tensor_parameter_54: + name = "parameter_54" + shape = [91] + dtype = "float32" + min_val = float("1.0") + max_val = float("10.0") + mean = float("1.0989") + std = float("0.938258") + data = None + + +class Program_weight_tensor_parameter_55: + name = "parameter_55" + shape = [91, 768, 3, 3] + dtype = "float32" + data = None + + +class Program_weight_tensor_parameter_56: + name = "parameter_56" + shape = [768] + dtype = "float32" + data = None + + +class Program_weight_tensor_parameter_57: + name = "parameter_57" + shape = [768] + dtype = "float32" + min_val = float("1.0") + max_val = float("1.0") + mean = float("1.0") + data = None + + +class Program_weight_tensor_parameter_58: + name = "parameter_58" + shape = [768] + dtype = "float32" + min_val = float("1.0") + max_val = float("1.0") + mean = float("1.0") + data = None + + +class Program_weight_tensor_parameter_59: + name = "parameter_59" + shape = [768] + dtype = "float32" + data = None + + +class Program_weight_tensor_parameter_60: + name = "parameter_60" + shape = [768, 768, 1, 1] + dtype = "float32" + min_val = float("-0.253106") + max_val = float("0.234041") + mean = float("5.22974e-05") + std = float("0.0510744") + data = None + + +class Program_weight_tensor_parameter_61: + name = "parameter_61" + shape = [768] + dtype = "float32" + data = None + + +class Program_weight_tensor_parameter_62: + name = "parameter_62" + shape = [768, 768, 1, 1] + dtype = "float32" + min_val = float("-0.0466099") + max_val = float("0.049475") + mean = float("-1.48928e-05") + std = float("0.0100101") + data = None + + +class Program_weight_tensor_parameter_63: + name = "parameter_63" + shape = [4] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_64: + name = "parameter_64" + shape = [4, 768, 3, 3] + dtype = "float32" + min_val = float("-0.0412352") + max_val = float("0.0395057") + mean = float("-3.96412e-05") + std = float("0.00999212") + data = None + + +class Program_weight_tensor_parameter_65: + name = "parameter_65" + shape = [768] + dtype = "float32" + data = None + + +class Program_weight_tensor_parameter_66: + name = "parameter_66" + shape = [768] + dtype = "float32" + min_val = float("1.0") + max_val = float("1.0") + mean = float("1.0") + data = None + + +class Program_weight_tensor_parameter_67: + name = "parameter_67" + shape = [768] + dtype = "float32" + min_val = float("1.0") + max_val = float("1.0") + mean = float("1.0") + data = None + + +class Program_weight_tensor_parameter_68: + name = "parameter_68" + shape = [768] + dtype = "float32" + data = None + + +class Program_weight_tensor_parameter_69: + name = "parameter_69" + shape = [768, 768, 1, 1] + dtype = "float32" + min_val = float("-0.243763") + max_val = float("0.257337") + mean = float("-9.37645e-06") + std = float("0.0510448") + data = None + + +class Program_weight_tensor_parameter_70: + name = "parameter_70" + shape = [768] + dtype = "float32" + data = None + + +class Program_weight_tensor_parameter_71: + name = "parameter_71" + shape = [768, 768, 1, 1] + dtype = "float32" + min_val = float("-0.0467227") + max_val = float("0.0455831") + mean = float("1.09615e-05") + std = float("0.00998477") + data = None + + +class Program_weight_tensor_parameter_72: + name = "parameter_72" + shape = [15] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_73: + name = "parameter_73" + shape = [15, 768, 3, 3] + dtype = "float32" + min_val = float("-0.0386572") + max_val = float("0.0508844") + mean = float("1.37647e-05") + std = float("0.0100225") + data = None + + +class Program_weight_tensor_parameter_74: + name = "parameter_74" + shape = [768] + dtype = "float32" + data = None + + +class Program_weight_tensor_parameter_75: + name = "parameter_75" + shape = [768] + dtype = "float32" + min_val = float("1.0") + max_val = float("1.0") + mean = float("1.0") + data = None + + +class Program_weight_tensor_parameter_76: + name = "parameter_76" + shape = [768] + dtype = "float32" + min_val = float("1.0") + max_val = float("1.0") + mean = float("1.0") + data = None + + +class Program_weight_tensor_parameter_77: + name = "parameter_77" + shape = [768] + dtype = "float32" + data = None + + +class Program_weight_tensor_parameter_78: + name = "parameter_78" + shape = [768, 768, 1, 1] + dtype = "float32" + min_val = float("-0.246625") + max_val = float("0.248906") + mean = float("-1.3066e-05") + std = float("0.0509498") + data = None + + +class Program_weight_tensor_parameter_79: + name = "parameter_79" + shape = [768] + dtype = "float32" + data = None + + +class Program_weight_tensor_parameter_80: + name = "parameter_80" + shape = [768, 768, 1, 1] + dtype = "float32" + min_val = float("-0.0465822") + max_val = float("0.0514786") + mean = float("-3.29765e-07") + std = float("0.0100021") + data = None diff --git a/paddle_samples/PaddleX/PP-YOLOE-R-L/subgraph_13/graph_hash.txt b/paddle_samples/PaddleX/PP-YOLOE-R-L/subgraph_13/graph_hash.txt new file mode 100644 index 000000000..0dad282f9 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE-R-L/subgraph_13/graph_hash.txt @@ -0,0 +1 @@ +9c2ba69b001c30f35f1fff615eb077d3cc490e07ffb0a0a5110d683d166c9e2e \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE-R-L/subgraph_13/graph_net.json b/paddle_samples/PaddleX/PP-YOLOE-R-L/subgraph_13/graph_net.json new file mode 100644 index 000000000..93527f12f --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE-R-L/subgraph_13/graph_net.json @@ -0,0 +1,6 @@ +{ + "framework": "paddle", + "model_name": "PP-YOLOE-R-L", + "num_devices_required": 1, + "num_nodes_required": 1 +} \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE-R-L/subgraph_13/input_meta.py b/paddle_samples/PaddleX/PP-YOLOE-R-L/subgraph_13/input_meta.py new file mode 100644 index 000000000..a4b29d39d --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE-R-L/subgraph_13/input_meta.py @@ -0,0 +1,63 @@ +class Program_weight_tensor_data_0: + name = "data_0" + shape = [1, 25, 21504] + dtype = "float32" + max_val = float("1.0") + mean = float("0.000336682") + std = float("0.0183458") + data = None + + +class Program_weight_tensor_data_1: + name = "data_1" + shape = [1, 1] + dtype = "int32" + data = [0] + + +class Program_weight_tensor_data_2: + name = "data_2" + shape = [1, 25, 1] + dtype = "int32" + data = [4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 5, 7, 7, 7, 7, 7, 7, 4, 4, 4, 4, 4, 4, 4, 4] + + +class Program_weight_tensor_data_3: + name = "data_3" + shape = [1, 21504] + dtype = "float32" + max_val = float("1.0") + mean = float("0.00841704") + std = float("0.0913575") + data = None + + +class Program_weight_tensor_data_4: + name = "data_4" + shape = [1, 25, 5] + dtype = "float32" + min_val = float("0.666691") + max_val = float("891.995") + mean = float("262.9") + std = float("311.619") + data = None + + +class Program_weight_tensor_data_5: + name = "data_5" + shape = [1, 25, 21504] + dtype = "float32" + max_val = float("9.76424e-05") + mean = float("5.83661e-10") + std = float("1.62072e-07") + data = None + + +class Program_weight_tensor_data_6: + name = "data_6" + shape = [1, 25, 21504] + dtype = "float32" + max_val = float("0.450419") + mean = float("6.86159e-05") + std = float("0.00270931") + data = None diff --git a/paddle_samples/PaddleX/PP-YOLOE-R-L/subgraph_13/model.py b/paddle_samples/PaddleX/PP-YOLOE-R-L/subgraph_13/model.py new file mode 100644 index 000000000..f6c3c14ea --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE-R-L/subgraph_13/model.py @@ -0,0 +1,197 @@ +import paddle + + +class GraphModule(paddle.nn.Layer): + def __init__(self): + super().__init__() + + def forward(self, data_0, data_1, data_2, data_3, data_4, data_5, data_6): + # pd_op.full: (1xi64) <- () + full_0 = paddle._C_ops.full( + [1], float("-2"), paddle.int64, paddle.core.CPUPlace() + ) + + # pd_op.argmax: (1x21504xi64) <- (1x25x21504xf32, 1xi64) + argmax_0 = paddle._C_ops.argmax(data_0, full_0, False, False, paddle.int64) + del full_0 + + # pd_op.full: (1xf32) <- () + full_1 = paddle._C_ops.full( + [1], float("25"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.scale: (1x1xi32) <- (1x1xi32, 1xf32) + scale_0 = paddle._C_ops.scale(data_1, full_1, float("0"), True) + del data_1, full_1 + + # pd_op.cast: (1x1xi64) <- (1x1xi32) + cast_0 = paddle._C_ops.cast(scale_0, paddle.int64) + del scale_0 + + # pd_op.add: (1x21504xi64) <- (1x21504xi64, 1x1xi64) + add_0 = paddle._C_ops.add(argmax_0, cast_0) + del argmax_0, cast_0 + + # pd_op.flatten: (25xi32) <- (1x25x1xi32) + flatten_0 = paddle._C_ops.flatten(data_2, 0, 2) + del data_2 + + # pd_op.flatten: (21504xi64) <- (1x21504xi64) + flatten_1 = paddle._C_ops.flatten(add_0, 0, 1) + del add_0 + + # pd_op.full: (1xi32) <- () + full_2 = paddle._C_ops.full( + [1], float("0"), paddle.int32, paddle.core.CPUPlace() + ) + + # pd_op.gather: (21504xi32) <- (25xi32, 21504xi64, 1xi32) + gather_0 = paddle._C_ops.gather(flatten_0, flatten_1, full_2) + del flatten_0 + + # pd_op.full_int_array: (2xi64) <- () + full_int_array_0 = [1, 21504] + + # pd_op.reshape: (1x21504xi32) <- (21504xi32, 2xi64) + reshape_1 = paddle._C_ops.reshape(gather_0, full_int_array_0) + del full_int_array_0, gather_0 + + # pd_op.full: (xf32) <- () + full_3 = paddle._C_ops.full( + [], float("0"), paddle.float32, paddle.framework._current_expected_place() + ) + + # pd_op.greater_than: (1x21504xb) <- (1x21504xf32, xf32) + greater_than_0 = paddle._C_ops.greater_than(data_3, full_3) + del data_3, full_3 + + # pd_op.full: (1xf32) <- () + full_4 = paddle._C_ops.full( + [1], float("15"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.full_like: (1x21504xi32) <- (1x21504xi32, 1xf32) + full_like_0 = paddle._C_ops.full_like( + reshape_1, full_4, paddle.int32, paddle.framework._current_expected_place() + ) + del full_4 + + # pd_op.where: (1x21504xi32) <- (1x21504xb, 1x21504xi32, 1x21504xi32) + where_0 = paddle._C_ops.where(greater_than_0, reshape_1, full_like_0) + del full_like_0, greater_than_0, reshape_1 + + # pd_op.full_int_array: (2xi64) <- () + full_int_array_1 = [-1, 5] + + # pd_op.reshape: (25x5xf32) <- (1x25x5xf32, 2xi64) + reshape_2 = paddle._C_ops.reshape(data_4, full_int_array_1) + del data_4, full_int_array_1 + + # pd_op.gather: (21504x5xf32) <- (25x5xf32, 21504xi64, 1xi32) + gather_1 = paddle._C_ops.gather(reshape_2, flatten_1, full_2) + del flatten_1, full_2, reshape_2 + + # pd_op.full_int_array: (3xi64) <- () + full_int_array_2 = [1, 21504, 5] + + # pd_op.reshape: (1x21504x5xf32) <- (21504x5xf32, 3xi64) + reshape_0 = paddle._C_ops.reshape(gather_1, full_int_array_2) + del full_int_array_2, gather_1 + + # pd_op.full: (1xi32) <- () + full_5 = paddle._C_ops.full( + [1], float("16"), paddle.int32, paddle.core.CPUPlace() + ) + + # pd_op.one_hot: (1x21504x16xf32) <- (1x21504xi32, 1xi32) + one_hot_0 = paddle._C_ops.one_hot( + where_0 % paddle.cast(full_5, where_0.dtype), full_5 + ) + del full_5 + + # pd_op.full: (15xi64) <- () + full_6 = paddle._C_ops.full( + [15], float("0"), paddle.int64, paddle.framework._current_expected_place() + ) + + # pd_op.assign_value_: (15xi64) <- (15xi64) + assign_value__0 = paddle._C_ops.assign_value_( + full_6, + [15], + paddle.int64, + [ + float("0"), + float("1"), + float("2"), + float("3"), + float("4"), + float("5"), + float("6"), + float("7"), + float("8"), + float("9"), + float("10"), + float("11"), + float("12"), + float("13"), + float("14"), + ], + paddle.framework._current_expected_place(), + ) + del full_6 + + # pd_op.index_select: (1x21504x15xf32) <- (1x21504x16xf32, 15xi64) + index_select_0 = paddle._C_ops.index_select(one_hot_0, assign_value__0, -1) + del assign_value__0, one_hot_0 + + # pd_op.multiply: (1x25x21504xf32) <- (1x25x21504xf32, 1x25x21504xf32) + multiply_1 = paddle._C_ops.multiply(data_5, data_0) + del data_5 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_3 = [-1] + + # pd_op.max: (1x25x1xf32) <- (1x25x21504xf32, 1xi64) + max_0 = paddle._C_ops.max(multiply_1, full_int_array_3, True) + + # pd_op.multiply: (1x25x21504xf32) <- (1x25x21504xf32, 1x25x21504xf32) + multiply_2 = paddle._C_ops.multiply(data_6, data_0) + del data_0, data_6 + + # pd_op.max: (1x25x1xf32) <- (1x25x21504xf32, 1xi64) + max_1 = paddle._C_ops.max(multiply_2, full_int_array_3, True) + del multiply_2 + + # pd_op.full: (1xf32) <- () + full_7 = paddle._C_ops.full( + [1], float("1"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.scale: (1x25x1xf32) <- (1x25x1xf32, 1xf32) + scale_1 = paddle._C_ops.scale(max_0, full_7, float("1e-09"), True) + del full_7, max_0 + + # pd_op.divide: (1x25x21504xf32) <- (1x25x21504xf32, 1x25x1xf32) + divide_0 = paddle._C_ops.divide(multiply_1, scale_1) + del multiply_1, scale_1 + + # pd_op.multiply: (1x25x21504xf32) <- (1x25x21504xf32, 1x25x1xf32) + multiply_3 = paddle._C_ops.multiply(divide_0, max_1) + del divide_0, max_1 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_4 = [-2] + + # pd_op.max: (1x21504xf32) <- (1x25x21504xf32, 1xi64) + max_2 = paddle._C_ops.max(multiply_3, full_int_array_4, False) + del full_int_array_4, multiply_3 + + # pd_op.unsqueeze: (1x21504x1xf32) <- (1x21504xf32, 1xi64) + unsqueeze_0 = paddle._C_ops.unsqueeze(max_2, full_int_array_3) + del full_int_array_3, max_2 + + # pd_op.multiply: (1x21504x15xf32) <- (1x21504x15xf32, 1x21504x1xf32) + multiply_0 = paddle._C_ops.multiply(index_select_0, unsqueeze_0) + del index_select_0, unsqueeze_0, where_0 + + return reshape_0, multiply_0 diff --git a/paddle_samples/PaddleX/PP-YOLOE-R-L/subgraph_13/weight_meta.py b/paddle_samples/PaddleX/PP-YOLOE-R-L/subgraph_13/weight_meta.py new file mode 100644 index 000000000..8b1378917 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE-R-L/subgraph_13/weight_meta.py @@ -0,0 +1 @@ + diff --git a/paddle_samples/PaddleX/PP-YOLOE-R-L/subgraph_14/graph_hash.txt b/paddle_samples/PaddleX/PP-YOLOE-R-L/subgraph_14/graph_hash.txt new file mode 100644 index 000000000..dc95f7eb1 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE-R-L/subgraph_14/graph_hash.txt @@ -0,0 +1 @@ +dfc510274da062c371bd6a71687a1b4af2f590fd054d89e9c1d4f310a3f1a66c \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE-R-L/subgraph_14/graph_net.json b/paddle_samples/PaddleX/PP-YOLOE-R-L/subgraph_14/graph_net.json new file mode 100644 index 000000000..93527f12f --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE-R-L/subgraph_14/graph_net.json @@ -0,0 +1,6 @@ +{ + "framework": "paddle", + "model_name": "PP-YOLOE-R-L", + "num_devices_required": 1, + "num_nodes_required": 1 +} \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE-R-L/subgraph_14/input_meta.py b/paddle_samples/PaddleX/PP-YOLOE-R-L/subgraph_14/input_meta.py new file mode 100644 index 000000000..a51e55d48 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE-R-L/subgraph_14/input_meta.py @@ -0,0 +1,104 @@ +class Program_weight_tensor_data_0: + name = "data_0" + shape = [] + dtype = "int64" + data = [6] + + +class Program_weight_tensor_data_1: + name = "data_1" + shape = [] + dtype = "int64" + data = [6] + + +class Program_weight_tensor_data_2: + name = "data_2" + shape = [1, 21504] + dtype = "float32" + max_val = float("2.0") + mean = float("0.00199963") + std = float("0.053223") + data = None + + +class Program_weight_tensor_data_3: + name = "data_3" + shape = [1, 6, 21504] + dtype = "float32" + max_val = float("0.533057") + mean = float("8.40499e-05") + std = float("0.00395285") + data = None + + +class Program_weight_tensor_data_4: + name = "data_4" + shape = [1, 6, 21504] + dtype = "float32" + max_val = float("1.0") + mean = float("0.000333271") + std = float("0.0182527") + data = None + + +class Program_weight_tensor_data_5: + name = "data_5" + shape = [1, 1] + dtype = "int32" + data = [0] + + +class Program_weight_tensor_data_6: + name = "data_6" + shape = [1, 6, 1] + dtype = "int32" + data = [2, 2, 3, 10, 3, 10] + + +class Program_weight_tensor_data_7: + name = "data_7" + shape = [1, 6, 5] + dtype = "float32" + data = [ + 653.0, + 372.0, + 26.2372, + 7.18399, + 0.896055, + 647.0, + 376.0, + 24.577, + 7.20924, + 0.96007, + 1017.5, + 514.0, + 43.1335, + 24.7487, + 0.785398, + 1021.0, + 516.5, + 24.7487, + 16.2635, + 0.785398, + 247.234, + 69.7757, + 27.5441, + 45.4497, + 0.866302, + 247.013, + 68.5477, + 10.5418, + 22.6716, + 0.847817, + ] + + +class Program_weight_tensor_data_8: + name = "data_8" + shape = [1, 6, 21504] + dtype = "float32" + max_val = float("0.000219842") + mean = float("2.68748e-09") + std = float("6.40427e-07") + data = None diff --git a/paddle_samples/PaddleX/PP-YOLOE-R-L/subgraph_14/model.py b/paddle_samples/PaddleX/PP-YOLOE-R-L/subgraph_14/model.py new file mode 100644 index 000000000..a62adcdd8 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE-R-L/subgraph_14/model.py @@ -0,0 +1,251 @@ +import paddle + + +class GraphModule(paddle.nn.Layer): + def __init__(self): + super().__init__() + + def forward( + self, data_0, data_1, data_2, data_3, data_4, data_5, data_6, data_7, data_8 + ): + # pd_op.full_int_array: (1xi64) <- () + full_int_array_0 = [1] + + # pd_op.unsqueeze: (1x1x21504xf32) <- (1x21504xf32, 1xi64) + unsqueeze_0 = paddle._C_ops.unsqueeze(data_2, full_int_array_0) + del data_2, full_int_array_0 + + # pd_op.full: (xf32) <- () + full_0 = paddle._C_ops.full( + [], float("1"), paddle.float32, paddle.framework._current_expected_place() + ) + + # pd_op.greater_than: (1x1x21504xb) <- (1x1x21504xf32, xf32) + greater_than_0 = paddle._C_ops.greater_than(unsqueeze_0, full_0) + del full_0, unsqueeze_0 + + # pd_op.full: (xi64) <- () + full_1 = paddle._C_ops.full( + [], float("1"), paddle.int64, paddle.core.CPUPlace() + ) + + # builtin.combine: ([xi64, xi64, xi64]) <- (xi64, xi64, xi64) + combine_0 = [full_1, data_0, full_1] + del full_1 + + # pd_op.stack: (3xi64) <- ([xi64, xi64, xi64]) + stack_0 = paddle._C_ops.stack(combine_0, 0) + del combine_0 + + # pd_op.tile: (1x-1x21504xb) <- (1x1x21504xb, 3xi64) + tile_0 = paddle._C_ops.tile(greater_than_0, stack_0) + del greater_than_0, stack_0 + + # pd_op.full: (1xi64) <- () + full_2 = paddle._C_ops.full( + [1], float("-2"), paddle.int64, paddle.core.CPUPlace() + ) + + # pd_op.argmax: (1x21504xi64) <- (1x-1x21504xf32, 1xi64) + argmax_0 = paddle._C_ops.argmax(data_3, full_2, False, False, paddle.int64) + + # pd_op.one_hot: (1x21504x-1xf32) <- (1x21504xi64, xi64) + one_hot_0 = paddle._C_ops.one_hot( + argmax_0 % paddle.cast(data_1, argmax_0.dtype), data_1 + ) + del argmax_0, data_1 + + # pd_op.transpose: (1x-1x21504xf32) <- (1x21504x-1xf32) + transpose_0 = paddle._C_ops.transpose(one_hot_0, [0, 2, 1]) + del one_hot_0 + + # pd_op.where: (1x-1x21504xf32) <- (1x-1x21504xb, 1x-1x21504xf32, 1x-1x21504xf32) + where_0 = paddle._C_ops.where(tile_0, transpose_0, data_4) + del data_4, tile_0, transpose_0 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_1 = [-2] + + # pd_op.sum: (1x21504xf32) <- (1x-1x21504xf32, 1xi64) + sum_0 = paddle._C_ops.sum(where_0, full_int_array_1, None, False) + + # pd_op.argmax: (1x21504xi64) <- (1x-1x21504xf32, 1xi64) + argmax_1 = paddle._C_ops.argmax(where_0, full_2, False, False, paddle.int64) + del full_2 + + # pd_op.cast: (xi32) <- (xi64) + cast_0 = paddle._C_ops.cast(data_0, paddle.int32) + del data_0 + + # pd_op.multiply: (1x1xi32) <- (1x1xi32, xi32) + multiply_1 = paddle._C_ops.multiply(data_5, cast_0) + del cast_0, data_5 + + # pd_op.cast: (1x1xi64) <- (1x1xi32) + cast_1 = paddle._C_ops.cast(multiply_1, paddle.int64) + del multiply_1 + + # pd_op.add: (1x21504xi64) <- (1x21504xi64, 1x1xi64) + add_0 = paddle._C_ops.add(argmax_1, cast_1) + del argmax_1, cast_1 + + # pd_op.flatten: (-1xi32) <- (1x-1x1xi32) + flatten_0 = paddle._C_ops.flatten(data_6, 0, 2) + del data_6 + + # pd_op.flatten: (21504xi64) <- (1x21504xi64) + flatten_1 = paddle._C_ops.flatten(add_0, 0, 1) + del add_0 + + # pd_op.full: (1xi32) <- () + full_3 = paddle._C_ops.full( + [1], float("0"), paddle.int32, paddle.core.CPUPlace() + ) + + # pd_op.gather: (21504xi32) <- (-1xi32, 21504xi64, 1xi32) + gather_0 = paddle._C_ops.gather(flatten_0, flatten_1, full_3) + del flatten_0 + + # pd_op.full_int_array: (2xi64) <- () + full_int_array_2 = [1, 21504] + + # pd_op.reshape: (1x21504xi32) <- (21504xi32, 2xi64) + reshape_1 = paddle._C_ops.reshape(gather_0, full_int_array_2) + del full_int_array_2, gather_0 + + # pd_op.full: (xf32) <- () + full_4 = paddle._C_ops.full( + [], float("0"), paddle.float32, paddle.framework._current_expected_place() + ) + + # pd_op.greater_than: (1x21504xb) <- (1x21504xf32, xf32) + greater_than_1 = paddle._C_ops.greater_than(sum_0, full_4) + del full_4, sum_0 + + # pd_op.full: (1xf32) <- () + full_5 = paddle._C_ops.full( + [1], float("15"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.full_like: (1x21504xi32) <- (1x21504xi32, 1xf32) + full_like_0 = paddle._C_ops.full_like( + reshape_1, full_5, paddle.int32, paddle.framework._current_expected_place() + ) + del full_5 + + # pd_op.where: (1x21504xi32) <- (1x21504xb, 1x21504xi32, 1x21504xi32) + where_1 = paddle._C_ops.where(greater_than_1, reshape_1, full_like_0) + del full_like_0, greater_than_1, reshape_1 + + # pd_op.full_int_array: (2xi64) <- () + full_int_array_3 = [-1, 5] + + # pd_op.reshape: (-1x5xf32) <- (1x-1x5xf32, 2xi64) + reshape_2 = paddle._C_ops.reshape(data_7, full_int_array_3) + del data_7, full_int_array_3 + + # pd_op.gather: (21504x5xf32) <- (-1x5xf32, 21504xi64, 1xi32) + gather_1 = paddle._C_ops.gather(reshape_2, flatten_1, full_3) + del flatten_1, full_3, reshape_2 + + # pd_op.full_int_array: (3xi64) <- () + full_int_array_4 = [1, 21504, 5] + + # pd_op.reshape: (1x21504x5xf32) <- (21504x5xf32, 3xi64) + reshape_0 = paddle._C_ops.reshape(gather_1, full_int_array_4) + del full_int_array_4, gather_1 + + # pd_op.full: (1xi32) <- () + full_6 = paddle._C_ops.full( + [1], float("16"), paddle.int32, paddle.core.CPUPlace() + ) + + # pd_op.one_hot: (1x21504x16xf32) <- (1x21504xi32, 1xi32) + one_hot_1 = paddle._C_ops.one_hot( + where_1 % paddle.cast(full_6, where_1.dtype), full_6 + ) + del full_6 + + # pd_op.full: (15xi64) <- () + full_7 = paddle._C_ops.full( + [15], float("0"), paddle.int64, paddle.framework._current_expected_place() + ) + + # pd_op.assign_value_: (15xi64) <- (15xi64) + assign_value__0 = paddle._C_ops.assign_value_( + full_7, + [15], + paddle.int64, + [ + float("0"), + float("1"), + float("2"), + float("3"), + float("4"), + float("5"), + float("6"), + float("7"), + float("8"), + float("9"), + float("10"), + float("11"), + float("12"), + float("13"), + float("14"), + ], + paddle.framework._current_expected_place(), + ) + del full_7 + + # pd_op.index_select: (1x21504x15xf32) <- (1x21504x16xf32, 15xi64) + index_select_0 = paddle._C_ops.index_select(one_hot_1, assign_value__0, -1) + del assign_value__0, one_hot_1 + + # pd_op.multiply: (1x-1x21504xf32) <- (1x-1x21504xf32, 1x-1x21504xf32) + multiply_2 = paddle._C_ops.multiply(data_8, where_0) + del data_8 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_5 = [-1] + + # pd_op.max: (1x-1x1xf32) <- (1x-1x21504xf32, 1xi64) + max_0 = paddle._C_ops.max(multiply_2, full_int_array_5, True) + + # pd_op.multiply: (1x-1x21504xf32) <- (1x-1x21504xf32, 1x-1x21504xf32) + multiply_3 = paddle._C_ops.multiply(data_3, where_0) + del data_3, where_0 + + # pd_op.max: (1x-1x1xf32) <- (1x-1x21504xf32, 1xi64) + max_1 = paddle._C_ops.max(multiply_3, full_int_array_5, True) + del multiply_3 + + # pd_op.full: (1xf32) <- () + full_8 = paddle._C_ops.full( + [1], float("1"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.scale: (1x-1x1xf32) <- (1x-1x1xf32, 1xf32) + scale_0 = paddle._C_ops.scale(max_0, full_8, float("1e-09"), True) + del full_8, max_0 + + # pd_op.divide: (1x-1x21504xf32) <- (1x-1x21504xf32, 1x-1x1xf32) + divide_0 = paddle._C_ops.divide(multiply_2, scale_0) + del multiply_2, scale_0 + + # pd_op.multiply: (1x-1x21504xf32) <- (1x-1x21504xf32, 1x-1x1xf32) + multiply_4 = paddle._C_ops.multiply(divide_0, max_1) + del divide_0, max_1 + + # pd_op.max: (1x21504xf32) <- (1x-1x21504xf32, 1xi64) + max_2 = paddle._C_ops.max(multiply_4, full_int_array_1, False) + del full_int_array_1, multiply_4 + + # pd_op.unsqueeze: (1x21504x1xf32) <- (1x21504xf32, 1xi64) + unsqueeze_1 = paddle._C_ops.unsqueeze(max_2, full_int_array_5) + del full_int_array_5, max_2 + + # pd_op.multiply: (1x21504x15xf32) <- (1x21504x15xf32, 1x21504x1xf32) + multiply_0 = paddle._C_ops.multiply(index_select_0, unsqueeze_1) + del index_select_0, unsqueeze_1, where_1 + + return reshape_0, multiply_0 diff --git a/paddle_samples/PaddleX/PP-YOLOE-R-L/subgraph_14/weight_meta.py b/paddle_samples/PaddleX/PP-YOLOE-R-L/subgraph_14/weight_meta.py new file mode 100644 index 000000000..8b1378917 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE-R-L/subgraph_14/weight_meta.py @@ -0,0 +1 @@ + diff --git a/paddle_samples/PaddleX/PP-YOLOE-R-L/subgraph_15/graph_hash.txt b/paddle_samples/PaddleX/PP-YOLOE-R-L/subgraph_15/graph_hash.txt new file mode 100644 index 000000000..bd368ecea --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE-R-L/subgraph_15/graph_hash.txt @@ -0,0 +1 @@ +0739535a303e963f75e3d3f35bf46eb7168b11f70d4b9ad32cbdbb7a3b49e344 \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE-R-L/subgraph_15/graph_net.json b/paddle_samples/PaddleX/PP-YOLOE-R-L/subgraph_15/graph_net.json new file mode 100644 index 000000000..93527f12f --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE-R-L/subgraph_15/graph_net.json @@ -0,0 +1,6 @@ +{ + "framework": "paddle", + "model_name": "PP-YOLOE-R-L", + "num_devices_required": 1, + "num_nodes_required": 1 +} \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE-R-L/subgraph_15/input_meta.py b/paddle_samples/PaddleX/PP-YOLOE-R-L/subgraph_15/input_meta.py new file mode 100644 index 000000000..81be1c2ff --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE-R-L/subgraph_15/input_meta.py @@ -0,0 +1,82 @@ +class Program_weight_tensor_data_0: + name = "data_0" + shape = [] + dtype = "int64" + data = [3] + + +class Program_weight_tensor_data_1: + name = "data_1" + shape = [1, 3, 21504] + dtype = "float32" + max_val = float("1.0") + mean = float("0.000604539") + std = float("0.0245799") + data = None + + +class Program_weight_tensor_data_2: + name = "data_2" + shape = [1, 1] + dtype = "int32" + data = [0] + + +class Program_weight_tensor_data_3: + name = "data_3" + shape = [1, 3, 1] + dtype = "int32" + data = [1, 1, 1] + + +class Program_weight_tensor_data_4: + name = "data_4" + shape = [1, 21504] + dtype = "float32" + max_val = float("1.0") + mean = float("0.00181362") + std = float("0.0425479") + data = None + + +class Program_weight_tensor_data_5: + name = "data_5" + shape = [1, 3, 5] + dtype = "float32" + data = [ + 248.448, + 614.298, + 137.72, + 133.965, + 1.06359, + 602.824, + 672.606, + 103.848, + 95.0988, + 1.06909, + 498.844, + 472.744, + 142.145, + 125.935, + 1.05772, + ] + + +class Program_weight_tensor_data_6: + name = "data_6" + shape = [1, 3, 21504] + dtype = "float32" + max_val = float("4.25288e-07") + mean = float("7.54294e-12") + std = float("1.67909e-09") + data = None + + +class Program_weight_tensor_data_7: + name = "data_7" + shape = [1, 3, 21504] + dtype = "float32" + max_val = float("0.207679") + mean = float("0.00012728") + std = float("0.00194434") + data = None diff --git a/paddle_samples/PaddleX/PP-YOLOE-R-L/subgraph_15/model.py b/paddle_samples/PaddleX/PP-YOLOE-R-L/subgraph_15/model.py new file mode 100644 index 000000000..6f65f2340 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE-R-L/subgraph_15/model.py @@ -0,0 +1,196 @@ +import paddle + + +class GraphModule(paddle.nn.Layer): + def __init__(self): + super().__init__() + + def forward(self, data_0, data_1, data_2, data_3, data_4, data_5, data_6, data_7): + # pd_op.full: (1xi64) <- () + full_0 = paddle._C_ops.full( + [1], float("-2"), paddle.int64, paddle.core.CPUPlace() + ) + + # pd_op.argmax: (1x21504xi64) <- (1x-1x21504xf32, 1xi64) + argmax_0 = paddle._C_ops.argmax(data_1, full_0, False, False, paddle.int64) + del full_0 + + # pd_op.cast: (xi32) <- (xi64) + cast_0 = paddle._C_ops.cast(data_0, paddle.int32) + del data_0 + + # pd_op.multiply: (1x1xi32) <- (1x1xi32, xi32) + multiply_1 = paddle._C_ops.multiply(data_2, cast_0) + del cast_0, data_2 + + # pd_op.cast: (1x1xi64) <- (1x1xi32) + cast_1 = paddle._C_ops.cast(multiply_1, paddle.int64) + del multiply_1 + + # pd_op.add: (1x21504xi64) <- (1x21504xi64, 1x1xi64) + add_0 = paddle._C_ops.add(argmax_0, cast_1) + del argmax_0, cast_1 + + # pd_op.flatten: (-1xi32) <- (1x-1x1xi32) + flatten_0 = paddle._C_ops.flatten(data_3, 0, 2) + del data_3 + + # pd_op.flatten: (21504xi64) <- (1x21504xi64) + flatten_1 = paddle._C_ops.flatten(add_0, 0, 1) + del add_0 + + # pd_op.full: (1xi32) <- () + full_1 = paddle._C_ops.full( + [1], float("0"), paddle.int32, paddle.core.CPUPlace() + ) + + # pd_op.gather: (21504xi32) <- (-1xi32, 21504xi64, 1xi32) + gather_0 = paddle._C_ops.gather(flatten_0, flatten_1, full_1) + del flatten_0 + + # pd_op.full_int_array: (2xi64) <- () + full_int_array_0 = [1, 21504] + + # pd_op.reshape: (1x21504xi32) <- (21504xi32, 2xi64) + reshape_1 = paddle._C_ops.reshape(gather_0, full_int_array_0) + del full_int_array_0, gather_0 + + # pd_op.full: (xf32) <- () + full_2 = paddle._C_ops.full( + [], float("0"), paddle.float32, paddle.framework._current_expected_place() + ) + + # pd_op.greater_than: (1x21504xb) <- (1x21504xf32, xf32) + greater_than_0 = paddle._C_ops.greater_than(data_4, full_2) + del data_4, full_2 + + # pd_op.full: (1xf32) <- () + full_3 = paddle._C_ops.full( + [1], float("15"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.full_like: (1x21504xi32) <- (1x21504xi32, 1xf32) + full_like_0 = paddle._C_ops.full_like( + reshape_1, full_3, paddle.int32, paddle.framework._current_expected_place() + ) + del full_3 + + # pd_op.where: (1x21504xi32) <- (1x21504xb, 1x21504xi32, 1x21504xi32) + where_0 = paddle._C_ops.where(greater_than_0, reshape_1, full_like_0) + del full_like_0, greater_than_0, reshape_1 + + # pd_op.full_int_array: (2xi64) <- () + full_int_array_1 = [-1, 5] + + # pd_op.reshape: (-1x5xf32) <- (1x-1x5xf32, 2xi64) + reshape_2 = paddle._C_ops.reshape(data_5, full_int_array_1) + del data_5, full_int_array_1 + + # pd_op.gather: (21504x5xf32) <- (-1x5xf32, 21504xi64, 1xi32) + gather_1 = paddle._C_ops.gather(reshape_2, flatten_1, full_1) + del flatten_1, full_1, reshape_2 + + # pd_op.full_int_array: (3xi64) <- () + full_int_array_2 = [1, 21504, 5] + + # pd_op.reshape: (1x21504x5xf32) <- (21504x5xf32, 3xi64) + reshape_0 = paddle._C_ops.reshape(gather_1, full_int_array_2) + del full_int_array_2, gather_1 + + # pd_op.full: (1xi32) <- () + full_4 = paddle._C_ops.full( + [1], float("16"), paddle.int32, paddle.core.CPUPlace() + ) + + # pd_op.one_hot: (1x21504x16xf32) <- (1x21504xi32, 1xi32) + one_hot_0 = paddle._C_ops.one_hot( + where_0 % paddle.cast(full_4, where_0.dtype), full_4 + ) + del full_4 + + # pd_op.full: (15xi64) <- () + full_5 = paddle._C_ops.full( + [15], float("0"), paddle.int64, paddle.framework._current_expected_place() + ) + + # pd_op.assign_value_: (15xi64) <- (15xi64) + assign_value__0 = paddle._C_ops.assign_value_( + full_5, + [15], + paddle.int64, + [ + float("0"), + float("1"), + float("2"), + float("3"), + float("4"), + float("5"), + float("6"), + float("7"), + float("8"), + float("9"), + float("10"), + float("11"), + float("12"), + float("13"), + float("14"), + ], + paddle.framework._current_expected_place(), + ) + del full_5 + + # pd_op.index_select: (1x21504x15xf32) <- (1x21504x16xf32, 15xi64) + index_select_0 = paddle._C_ops.index_select(one_hot_0, assign_value__0, -1) + del assign_value__0, one_hot_0 + + # pd_op.multiply: (1x-1x21504xf32) <- (1x-1x21504xf32, 1x-1x21504xf32) + multiply_2 = paddle._C_ops.multiply(data_6, data_1) + del data_6 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_3 = [-1] + + # pd_op.max: (1x-1x1xf32) <- (1x-1x21504xf32, 1xi64) + max_0 = paddle._C_ops.max(multiply_2, full_int_array_3, True) + + # pd_op.multiply: (1x-1x21504xf32) <- (1x-1x21504xf32, 1x-1x21504xf32) + multiply_3 = paddle._C_ops.multiply(data_7, data_1) + del data_1, data_7 + + # pd_op.max: (1x-1x1xf32) <- (1x-1x21504xf32, 1xi64) + max_1 = paddle._C_ops.max(multiply_3, full_int_array_3, True) + del multiply_3 + + # pd_op.full: (1xf32) <- () + full_6 = paddle._C_ops.full( + [1], float("1"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.scale: (1x-1x1xf32) <- (1x-1x1xf32, 1xf32) + scale_0 = paddle._C_ops.scale(max_0, full_6, float("1e-09"), True) + del full_6, max_0 + + # pd_op.divide: (1x-1x21504xf32) <- (1x-1x21504xf32, 1x-1x1xf32) + divide_0 = paddle._C_ops.divide(multiply_2, scale_0) + del multiply_2, scale_0 + + # pd_op.multiply: (1x-1x21504xf32) <- (1x-1x21504xf32, 1x-1x1xf32) + multiply_4 = paddle._C_ops.multiply(divide_0, max_1) + del divide_0, max_1 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_4 = [-2] + + # pd_op.max: (1x21504xf32) <- (1x-1x21504xf32, 1xi64) + max_2 = paddle._C_ops.max(multiply_4, full_int_array_4, False) + del full_int_array_4, multiply_4 + + # pd_op.unsqueeze: (1x21504x1xf32) <- (1x21504xf32, 1xi64) + unsqueeze_0 = paddle._C_ops.unsqueeze(max_2, full_int_array_3) + del full_int_array_3, max_2 + + # pd_op.multiply: (1x21504x15xf32) <- (1x21504x15xf32, 1x21504x1xf32) + multiply_0 = paddle._C_ops.multiply(index_select_0, unsqueeze_0) + del index_select_0, unsqueeze_0, where_0 + + return reshape_0, multiply_0 diff --git a/paddle_samples/PaddleX/PP-YOLOE-R-L/subgraph_15/weight_meta.py b/paddle_samples/PaddleX/PP-YOLOE-R-L/subgraph_15/weight_meta.py new file mode 100644 index 000000000..8b1378917 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE-R-L/subgraph_15/weight_meta.py @@ -0,0 +1 @@ + diff --git a/paddle_samples/PaddleX/PP-YOLOE-R-L/subgraph_2/graph_hash.txt b/paddle_samples/PaddleX/PP-YOLOE-R-L/subgraph_2/graph_hash.txt new file mode 100644 index 000000000..2a28039c8 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE-R-L/subgraph_2/graph_hash.txt @@ -0,0 +1 @@ +76de3a25bbfdb9c5decc06d06c4e92a9c9aaf027b35f42107faadedc97adb77a \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE-R-L/subgraph_2/graph_net.json b/paddle_samples/PaddleX/PP-YOLOE-R-L/subgraph_2/graph_net.json new file mode 100644 index 000000000..93527f12f --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE-R-L/subgraph_2/graph_net.json @@ -0,0 +1,6 @@ +{ + "framework": "paddle", + "model_name": "PP-YOLOE-R-L", + "num_devices_required": 1, + "num_nodes_required": 1 +} \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE-R-L/subgraph_2/input_meta.py b/paddle_samples/PaddleX/PP-YOLOE-R-L/subgraph_2/input_meta.py new file mode 100644 index 000000000..a0f4dc57e --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE-R-L/subgraph_2/input_meta.py @@ -0,0 +1,5 @@ +class Program_weight_tensor_data_0: + name = "data_0" + shape = [] + dtype = "int64" + data = [3] diff --git a/paddle_samples/PaddleX/PP-YOLOE-R-L/subgraph_2/model.py b/paddle_samples/PaddleX/PP-YOLOE-R-L/subgraph_2/model.py new file mode 100644 index 000000000..59825e860 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE-R-L/subgraph_2/model.py @@ -0,0 +1,34 @@ +import paddle + + +class GraphModule(paddle.nn.Layer): + def __init__(self): + super().__init__() + + def forward(self, data_0): + # pd_op.full: (xi64) <- () + full_0 = paddle._C_ops.full( + [], float("0"), paddle.int64, paddle.framework._current_expected_place() + ) + + # pd_op.equal: (xb) <- (xi64, xi64) + equal_1 = paddle._C_ops.equal(data_0, full_0) + del data_0 + + # pd_op.cast: (xi64) <- (xb) + cast_0 = paddle._C_ops.cast(equal_1, paddle.int64) + del equal_1 + + # pd_op.not_equal: (xb) <- (xi64, xi64) + not_equal_0 = paddle._C_ops.not_equal(cast_0, full_0) + del cast_0 + + # pd_op.cast: (xi64) <- (xb) + cast_1 = paddle._C_ops.cast(not_equal_0, paddle.int64) + del not_equal_0 + + # pd_op.equal: (xb) <- (xi64, xi64) + equal_0 = paddle._C_ops.equal(cast_1, full_0) + del cast_1, full_0 + + return equal_0 diff --git a/paddle_samples/PaddleX/PP-YOLOE-R-L/subgraph_2/weight_meta.py b/paddle_samples/PaddleX/PP-YOLOE-R-L/subgraph_2/weight_meta.py new file mode 100644 index 000000000..8b1378917 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE-R-L/subgraph_2/weight_meta.py @@ -0,0 +1 @@ + diff --git a/paddle_samples/PaddleX/PP-YOLOE-R-L/subgraph_3/graph_hash.txt b/paddle_samples/PaddleX/PP-YOLOE-R-L/subgraph_3/graph_hash.txt new file mode 100644 index 000000000..e36947479 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE-R-L/subgraph_3/graph_hash.txt @@ -0,0 +1 @@ +933a1da5ea0509dc0825c747220a06198cbaaf34ad1a890eaca225939ed98247 \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE-R-L/subgraph_3/graph_net.json b/paddle_samples/PaddleX/PP-YOLOE-R-L/subgraph_3/graph_net.json new file mode 100644 index 000000000..93527f12f --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE-R-L/subgraph_3/graph_net.json @@ -0,0 +1,6 @@ +{ + "framework": "paddle", + "model_name": "PP-YOLOE-R-L", + "num_devices_required": 1, + "num_nodes_required": 1 +} \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE-R-L/subgraph_3/input_meta.py b/paddle_samples/PaddleX/PP-YOLOE-R-L/subgraph_3/input_meta.py new file mode 100644 index 000000000..eef1c0cfc --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE-R-L/subgraph_3/input_meta.py @@ -0,0 +1,240 @@ +class Program_weight_tensor_data_0: + name = "data_0" + shape = [1] + dtype = "float32" + data = [1.0] + + +class Program_weight_tensor_data_1: + name = "data_1" + shape = [1] + dtype = "float32" + data = [1.0] + + +class Program_weight_tensor_data_2: + name = "data_2" + shape = [1] + dtype = "float32" + data = [1.0] + + +class Program_weight_tensor_data_3: + name = "data_3" + shape = [1] + dtype = "float32" + data = [1.0] + + +class Program_weight_tensor_data_4: + name = "data_4" + shape = [1] + dtype = "float32" + data = [1.0] + + +class Program_weight_tensor_data_5: + name = "data_5" + shape = [1] + dtype = "float32" + data = [1.0] + + +class Program_weight_tensor_data_6: + name = "data_6" + shape = [1] + dtype = "float32" + data = [1.0] + + +class Program_weight_tensor_data_7: + name = "data_7" + shape = [1] + dtype = "float32" + data = [1.0] + + +class Program_weight_tensor_data_8: + name = "data_8" + shape = [1] + dtype = "float32" + data = [1.0] + + +class Program_weight_tensor_data_9: + name = "data_9" + shape = [1] + dtype = "float32" + data = [1.0] + + +class Program_weight_tensor_data_10: + name = "data_10" + shape = [1] + dtype = "float32" + data = [1.0] + + +class Program_weight_tensor_data_11: + name = "data_11" + shape = [1] + dtype = "float32" + data = [1.0] + + +class Program_weight_tensor_data_12: + name = "data_12" + shape = [1] + dtype = "float32" + data = [1.0] + + +class Program_weight_tensor_data_13: + name = "data_13" + shape = [1] + dtype = "float32" + data = [1.0] + + +class Program_weight_tensor_data_14: + name = "data_14" + shape = [1] + dtype = "float32" + data = [1.0] + + +class Program_weight_tensor_data_15: + name = "data_15" + shape = [1] + dtype = "float32" + data = [1.0] + + +class Program_weight_tensor_data_16: + name = "data_16" + shape = [1] + dtype = "float32" + data = [1.0] + + +class Program_weight_tensor_data_17: + name = "data_17" + shape = [1] + dtype = "float32" + data = [1.0] + + +class Program_weight_tensor_data_18: + name = "data_18" + shape = [1] + dtype = "float32" + data = [1.0] + + +class Program_weight_tensor_data_19: + name = "data_19" + shape = [1] + dtype = "float32" + data = [1.0] + + +class Program_weight_tensor_data_20: + name = "data_20" + shape = [1] + dtype = "float32" + data = [1.0] + + +class Program_weight_tensor_data_21: + name = "data_21" + shape = [1] + dtype = "float32" + data = [1.0] + + +class Program_weight_tensor_data_22: + name = "data_22" + shape = [1] + dtype = "float32" + data = [1.0] + + +class Program_weight_tensor_data_23: + name = "data_23" + shape = [1] + dtype = "float32" + data = [1.0] + + +class Program_weight_tensor_data_24: + name = "data_24" + shape = [1] + dtype = "float32" + data = [1.0] + + +class Program_weight_tensor_data_25: + name = "data_25" + shape = [1] + dtype = "float32" + data = [1.0] + + +class Program_weight_tensor_data_26: + name = "data_26" + shape = [1] + dtype = "float32" + data = [1.0] + + +class Program_weight_tensor_data_27: + name = "data_27" + shape = [1] + dtype = "float32" + data = [1.0] + + +class Program_weight_tensor_data_28: + name = "data_28" + shape = [1] + dtype = "float32" + data = [1.0] + + +class Program_weight_tensor_data_29: + name = "data_29" + shape = [1] + dtype = "float32" + data = [1.0] + + +class Program_weight_tensor_data_30: + name = "data_30" + shape = [1] + dtype = "float32" + data = [1.0] + + +class Program_weight_tensor_data_31: + name = "data_31" + shape = [1] + dtype = "float32" + data = [1.0] + + +class Program_weight_tensor_data_32: + name = "data_32" + shape = [1] + dtype = "float32" + data = [1.0] + + +class Program_weight_tensor_data_33: + name = "data_33" + shape = [1, 3, 1024, 1024] + dtype = "float32" + min_val = float("-2.1179") + max_val = float("2.64") + mean = float("-1.26739") + std = float("0.913724") + data = None diff --git a/paddle_samples/PaddleX/PP-YOLOE-R-L/subgraph_3/model.py b/paddle_samples/PaddleX/PP-YOLOE-R-L/subgraph_3/model.py new file mode 100644 index 000000000..502d4c722 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE-R-L/subgraph_3/model.py @@ -0,0 +1,7594 @@ +import paddle + + +class GraphModule(paddle.nn.Layer): + def __init__(self): + super().__init__() + + def forward( + self, + parameter_0, + parameter_1, + parameter_2, + parameter_3, + parameter_4, + parameter_5, + parameter_6, + parameter_7, + parameter_8, + parameter_9, + parameter_10, + parameter_11, + parameter_12, + parameter_13, + parameter_14, + parameter_15, + parameter_16, + parameter_17, + parameter_18, + parameter_19, + parameter_20, + parameter_21, + parameter_22, + parameter_23, + parameter_24, + parameter_25, + parameter_26, + parameter_27, + parameter_28, + parameter_29, + parameter_30, + parameter_31, + parameter_32, + parameter_33, + parameter_34, + parameter_35, + parameter_36, + parameter_37, + parameter_38, + parameter_39, + parameter_40, + parameter_41, + parameter_42, + parameter_43, + parameter_44, + parameter_45, + parameter_46, + parameter_47, + parameter_48, + parameter_49, + parameter_50, + parameter_51, + parameter_52, + parameter_53, + parameter_54, + parameter_55, + parameter_56, + parameter_57, + parameter_58, + parameter_59, + parameter_60, + parameter_61, + parameter_62, + parameter_63, + parameter_64, + parameter_65, + parameter_66, + parameter_67, + parameter_68, + parameter_69, + parameter_70, + parameter_71, + parameter_72, + parameter_73, + parameter_74, + parameter_75, + parameter_76, + parameter_77, + parameter_78, + parameter_79, + parameter_80, + parameter_81, + parameter_82, + parameter_83, + parameter_84, + parameter_85, + parameter_86, + parameter_87, + parameter_88, + parameter_89, + parameter_90, + parameter_91, + parameter_92, + parameter_93, + parameter_94, + parameter_95, + parameter_96, + parameter_97, + parameter_98, + parameter_99, + parameter_100, + parameter_101, + parameter_102, + parameter_103, + parameter_104, + parameter_105, + parameter_106, + parameter_107, + parameter_108, + parameter_109, + parameter_110, + parameter_111, + parameter_112, + parameter_113, + parameter_114, + parameter_115, + parameter_116, + parameter_117, + parameter_118, + parameter_119, + parameter_120, + parameter_121, + parameter_122, + parameter_123, + parameter_124, + parameter_125, + parameter_126, + parameter_127, + parameter_128, + parameter_129, + parameter_130, + parameter_131, + parameter_132, + parameter_133, + parameter_134, + parameter_135, + parameter_136, + parameter_137, + parameter_138, + parameter_139, + parameter_140, + parameter_141, + parameter_142, + parameter_143, + parameter_144, + parameter_145, + parameter_146, + parameter_147, + parameter_148, + parameter_149, + parameter_150, + parameter_151, + parameter_152, + parameter_153, + parameter_154, + parameter_155, + parameter_156, + parameter_157, + parameter_158, + parameter_159, + parameter_160, + parameter_161, + parameter_162, + parameter_163, + parameter_164, + parameter_165, + parameter_166, + parameter_167, + parameter_168, + parameter_169, + parameter_170, + parameter_171, + parameter_172, + parameter_173, + parameter_174, + parameter_175, + parameter_176, + parameter_177, + parameter_178, + parameter_179, + parameter_180, + parameter_181, + parameter_182, + parameter_183, + parameter_184, + parameter_185, + parameter_186, + parameter_187, + parameter_188, + parameter_189, + parameter_190, + parameter_191, + parameter_192, + parameter_193, + parameter_194, + parameter_195, + parameter_196, + parameter_197, + parameter_198, + parameter_199, + parameter_200, + parameter_201, + parameter_202, + parameter_203, + parameter_204, + parameter_205, + parameter_206, + parameter_207, + parameter_208, + parameter_209, + parameter_210, + parameter_211, + parameter_212, + parameter_213, + parameter_214, + parameter_215, + parameter_216, + parameter_217, + parameter_218, + parameter_219, + parameter_220, + parameter_221, + parameter_222, + parameter_223, + parameter_224, + parameter_225, + parameter_226, + parameter_227, + parameter_228, + parameter_229, + parameter_230, + parameter_231, + parameter_232, + parameter_233, + parameter_234, + parameter_235, + parameter_236, + parameter_237, + parameter_238, + parameter_239, + parameter_240, + parameter_241, + parameter_242, + parameter_243, + parameter_244, + parameter_245, + parameter_246, + parameter_247, + parameter_248, + parameter_249, + parameter_250, + parameter_251, + parameter_252, + parameter_253, + parameter_254, + parameter_255, + parameter_256, + parameter_257, + parameter_258, + parameter_259, + parameter_260, + parameter_261, + parameter_262, + parameter_263, + parameter_264, + parameter_265, + parameter_266, + parameter_267, + parameter_268, + parameter_269, + parameter_270, + parameter_271, + parameter_272, + parameter_273, + parameter_274, + parameter_275, + parameter_276, + parameter_277, + parameter_278, + parameter_279, + parameter_280, + parameter_281, + parameter_282, + parameter_283, + parameter_284, + parameter_285, + parameter_286, + parameter_287, + parameter_288, + parameter_289, + parameter_290, + parameter_291, + parameter_292, + parameter_293, + parameter_294, + parameter_295, + parameter_296, + parameter_297, + parameter_298, + parameter_299, + parameter_300, + parameter_301, + parameter_302, + parameter_303, + parameter_304, + parameter_305, + parameter_306, + parameter_307, + parameter_308, + parameter_309, + parameter_310, + parameter_311, + parameter_312, + parameter_313, + parameter_314, + parameter_315, + parameter_316, + parameter_317, + parameter_318, + parameter_319, + parameter_320, + parameter_321, + parameter_322, + parameter_323, + parameter_324, + parameter_325, + parameter_326, + parameter_327, + parameter_328, + parameter_329, + parameter_330, + parameter_331, + parameter_332, + parameter_333, + parameter_334, + parameter_335, + parameter_336, + parameter_337, + parameter_338, + parameter_339, + parameter_340, + parameter_341, + parameter_342, + parameter_343, + parameter_344, + parameter_345, + parameter_346, + parameter_347, + parameter_348, + parameter_349, + parameter_350, + parameter_351, + parameter_352, + parameter_353, + parameter_354, + parameter_355, + parameter_356, + parameter_357, + parameter_358, + parameter_359, + parameter_360, + parameter_361, + parameter_362, + parameter_363, + parameter_364, + parameter_365, + parameter_366, + parameter_367, + parameter_368, + parameter_369, + parameter_370, + parameter_371, + parameter_372, + parameter_373, + parameter_374, + parameter_375, + parameter_376, + parameter_377, + parameter_378, + parameter_379, + parameter_380, + parameter_381, + parameter_382, + parameter_383, + parameter_384, + parameter_385, + parameter_386, + parameter_387, + parameter_388, + parameter_389, + parameter_390, + parameter_391, + parameter_392, + parameter_393, + parameter_394, + parameter_395, + parameter_396, + parameter_397, + parameter_398, + parameter_399, + parameter_400, + parameter_401, + parameter_402, + parameter_403, + parameter_404, + parameter_405, + parameter_406, + parameter_407, + parameter_408, + parameter_409, + parameter_410, + parameter_411, + parameter_412, + parameter_413, + parameter_414, + parameter_415, + parameter_416, + parameter_417, + parameter_418, + parameter_419, + parameter_420, + parameter_421, + parameter_422, + parameter_423, + parameter_424, + parameter_425, + parameter_426, + parameter_427, + parameter_428, + parameter_429, + parameter_430, + parameter_431, + parameter_432, + parameter_433, + parameter_434, + parameter_435, + parameter_436, + parameter_437, + parameter_438, + parameter_439, + parameter_440, + parameter_441, + parameter_442, + parameter_443, + parameter_444, + parameter_445, + parameter_446, + parameter_447, + parameter_448, + parameter_449, + parameter_450, + parameter_451, + parameter_452, + parameter_453, + parameter_454, + parameter_455, + parameter_456, + parameter_457, + parameter_458, + parameter_459, + parameter_460, + parameter_461, + parameter_462, + parameter_463, + parameter_464, + parameter_465, + parameter_466, + parameter_467, + parameter_468, + parameter_469, + parameter_470, + parameter_471, + parameter_472, + parameter_473, + parameter_474, + parameter_475, + parameter_476, + parameter_477, + parameter_478, + parameter_479, + parameter_480, + parameter_481, + parameter_482, + parameter_483, + parameter_484, + parameter_485, + parameter_486, + parameter_487, + parameter_488, + parameter_489, + parameter_490, + parameter_491, + parameter_492, + parameter_493, + parameter_494, + parameter_495, + parameter_496, + parameter_497, + parameter_498, + parameter_499, + parameter_500, + parameter_501, + parameter_502, + parameter_503, + parameter_504, + parameter_505, + parameter_506, + parameter_507, + parameter_508, + parameter_509, + parameter_510, + parameter_511, + parameter_512, + parameter_513, + parameter_514, + parameter_515, + parameter_516, + parameter_517, + parameter_518, + parameter_519, + parameter_520, + parameter_521, + parameter_522, + parameter_523, + parameter_524, + parameter_525, + parameter_526, + parameter_527, + parameter_528, + parameter_529, + parameter_530, + parameter_531, + parameter_532, + parameter_533, + parameter_534, + parameter_535, + parameter_536, + parameter_537, + parameter_538, + parameter_539, + parameter_540, + parameter_541, + parameter_542, + parameter_543, + parameter_544, + parameter_545, + parameter_546, + parameter_547, + parameter_548, + parameter_549, + parameter_550, + parameter_551, + parameter_552, + parameter_553, + parameter_554, + parameter_555, + parameter_556, + parameter_557, + parameter_558, + parameter_559, + parameter_560, + parameter_561, + parameter_562, + parameter_563, + parameter_564, + parameter_565, + parameter_566, + parameter_567, + parameter_568, + parameter_569, + parameter_570, + parameter_571, + parameter_572, + parameter_573, + parameter_574, + parameter_575, + parameter_576, + parameter_577, + parameter_578, + parameter_579, + parameter_580, + parameter_581, + parameter_582, + parameter_583, + parameter_584, + parameter_585, + parameter_586, + parameter_587, + parameter_588, + parameter_589, + parameter_590, + parameter_591, + parameter_592, + parameter_593, + parameter_594, + parameter_595, + parameter_596, + parameter_597, + parameter_598, + parameter_599, + parameter_600, + parameter_601, + parameter_602, + parameter_603, + parameter_604, + parameter_605, + parameter_606, + parameter_607, + parameter_608, + parameter_609, + parameter_610, + parameter_611, + parameter_612, + parameter_613, + parameter_614, + parameter_615, + parameter_616, + parameter_617, + parameter_618, + parameter_619, + parameter_620, + parameter_621, + parameter_622, + parameter_623, + parameter_624, + parameter_625, + parameter_626, + parameter_627, + parameter_628, + parameter_629, + parameter_630, + parameter_631, + parameter_632, + parameter_633, + parameter_634, + parameter_635, + parameter_636, + parameter_637, + parameter_638, + parameter_639, + parameter_640, + parameter_641, + parameter_642, + parameter_643, + parameter_644, + parameter_645, + parameter_646, + parameter_647, + parameter_648, + parameter_649, + parameter_650, + parameter_651, + parameter_652, + parameter_653, + parameter_654, + parameter_655, + parameter_656, + parameter_657, + parameter_658, + parameter_659, + parameter_660, + parameter_661, + parameter_662, + parameter_663, + parameter_664, + parameter_665, + parameter_666, + parameter_667, + parameter_668, + parameter_669, + parameter_670, + parameter_671, + parameter_672, + parameter_673, + parameter_674, + parameter_675, + parameter_676, + parameter_677, + parameter_678, + parameter_679, + parameter_680, + parameter_681, + parameter_682, + parameter_683, + parameter_684, + parameter_685, + parameter_686, + parameter_687, + parameter_688, + parameter_689, + parameter_690, + parameter_691, + parameter_692, + parameter_693, + parameter_694, + parameter_695, + parameter_696, + parameter_697, + data_0, + data_1, + data_2, + data_3, + data_4, + data_5, + data_6, + data_7, + data_8, + data_9, + data_10, + data_11, + data_12, + data_13, + data_14, + data_15, + data_16, + data_17, + data_18, + data_19, + data_20, + data_21, + data_22, + data_23, + data_24, + data_25, + data_26, + data_27, + data_28, + data_29, + data_30, + data_31, + data_32, + data_33, + ): + # pd_op.conv2d: (1x32x512x512xf32) <- (1x3x1024x1024xf32, 32x3x3x3xf32) + conv2d_0 = paddle._C_ops.conv2d( + data_33, parameter_697, [2, 2], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del data_33, parameter_697 + + # pd_op.batch_norm_: (1x32x512x512xf32, 32xf32, 32xf32, 32xf32, 32xf32, -1xui8) <- (1x32x512x512xf32, 32xf32, 32xf32, 32xf32, 32xf32) + ( + batch_norm__0, + batch_norm__1, + batch_norm__2, + batch_norm__3, + batch_norm__4, + batch_norm__5, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_0, + parameter_696, + parameter_695, + parameter_694, + parameter_693, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_693, parameter_694, parameter_695, parameter_696 + + # pd_op.swish: (1x32x512x512xf32) <- (1x32x512x512xf32) + swish_1 = paddle._C_ops.swish(batch_norm__0) + + # pd_op.conv2d: (1x32x512x512xf32) <- (1x32x512x512xf32, 32x32x3x3xf32) + conv2d_1 = paddle._C_ops.conv2d( + swish_1, parameter_692, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_692 + + # pd_op.batch_norm_: (1x32x512x512xf32, 32xf32, 32xf32, 32xf32, 32xf32, -1xui8) <- (1x32x512x512xf32, 32xf32, 32xf32, 32xf32, 32xf32) + ( + batch_norm__6, + batch_norm__7, + batch_norm__8, + batch_norm__9, + batch_norm__10, + batch_norm__11, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_1, + parameter_691, + parameter_690, + parameter_689, + parameter_688, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_688, parameter_689, parameter_690, parameter_691 + + # pd_op.swish: (1x32x512x512xf32) <- (1x32x512x512xf32) + swish_2 = paddle._C_ops.swish(batch_norm__6) + + # pd_op.conv2d: (1x64x512x512xf32) <- (1x32x512x512xf32, 64x32x3x3xf32) + conv2d_2 = paddle._C_ops.conv2d( + swish_2, parameter_687, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_687 + + # pd_op.batch_norm_: (1x64x512x512xf32, 64xf32, 64xf32, 64xf32, 64xf32, -1xui8) <- (1x64x512x512xf32, 64xf32, 64xf32, 64xf32, 64xf32) + ( + batch_norm__12, + batch_norm__13, + batch_norm__14, + batch_norm__15, + batch_norm__16, + batch_norm__17, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_2, + parameter_686, + parameter_685, + parameter_684, + parameter_683, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_683, parameter_684, parameter_685, parameter_686 + + # pd_op.swish: (1x64x512x512xf32) <- (1x64x512x512xf32) + swish_3 = paddle._C_ops.swish(batch_norm__12) + + # pd_op.conv2d: (1x96x256x256xf32) <- (1x64x512x512xf32, 96x64x3x3xf32) + conv2d_3 = paddle._C_ops.conv2d( + swish_3, parameter_682, [2, 2], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_682 + + # pd_op.batch_norm_: (1x96x256x256xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (1x96x256x256xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__18, + batch_norm__19, + batch_norm__20, + batch_norm__21, + batch_norm__22, + batch_norm__23, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_3, + parameter_681, + parameter_680, + parameter_679, + parameter_678, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_678, parameter_679, parameter_680, parameter_681 + + # pd_op.swish: (1x96x256x256xf32) <- (1x96x256x256xf32) + swish_4 = paddle._C_ops.swish(batch_norm__18) + + # pd_op.conv2d: (1x48x256x256xf32) <- (1x96x256x256xf32, 48x96x1x1xf32) + conv2d_4 = paddle._C_ops.conv2d( + swish_4, parameter_677, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_677 + + # pd_op.batch_norm_: (1x48x256x256xf32, 48xf32, 48xf32, 48xf32, 48xf32, -1xui8) <- (1x48x256x256xf32, 48xf32, 48xf32, 48xf32, 48xf32) + ( + batch_norm__24, + batch_norm__25, + batch_norm__26, + batch_norm__27, + batch_norm__28, + batch_norm__29, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_4, + parameter_676, + parameter_675, + parameter_674, + parameter_673, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_673, parameter_674, parameter_675, parameter_676 + + # pd_op.swish: (1x48x256x256xf32) <- (1x48x256x256xf32) + swish_5 = paddle._C_ops.swish(batch_norm__24) + + # pd_op.conv2d: (1x48x256x256xf32) <- (1x96x256x256xf32, 48x96x1x1xf32) + conv2d_5 = paddle._C_ops.conv2d( + swish_4, parameter_672, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_672 + + # pd_op.batch_norm_: (1x48x256x256xf32, 48xf32, 48xf32, 48xf32, 48xf32, -1xui8) <- (1x48x256x256xf32, 48xf32, 48xf32, 48xf32, 48xf32) + ( + batch_norm__30, + batch_norm__31, + batch_norm__32, + batch_norm__33, + batch_norm__34, + batch_norm__35, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_5, + parameter_671, + parameter_670, + parameter_669, + parameter_668, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_668, parameter_669, parameter_670, parameter_671 + + # pd_op.swish: (1x48x256x256xf32) <- (1x48x256x256xf32) + swish_6 = paddle._C_ops.swish(batch_norm__30) + + # pd_op.conv2d: (1x48x256x256xf32) <- (1x48x256x256xf32, 48x48x3x3xf32) + conv2d_6 = paddle._C_ops.conv2d( + swish_6, parameter_667, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_667 + + # pd_op.batch_norm_: (1x48x256x256xf32, 48xf32, 48xf32, 48xf32, 48xf32, -1xui8) <- (1x48x256x256xf32, 48xf32, 48xf32, 48xf32, 48xf32) + ( + batch_norm__36, + batch_norm__37, + batch_norm__38, + batch_norm__39, + batch_norm__40, + batch_norm__41, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_6, + parameter_666, + parameter_665, + parameter_664, + parameter_663, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_663, parameter_664, parameter_665, parameter_666 + + # pd_op.swish: (1x48x256x256xf32) <- (1x48x256x256xf32) + swish_7 = paddle._C_ops.swish(batch_norm__36) + + # pd_op.conv2d: (1x48x256x256xf32) <- (1x48x256x256xf32, 48x48x3x3xf32) + conv2d_7 = paddle._C_ops.conv2d( + swish_7, parameter_662, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_662 + + # pd_op.batch_norm_: (1x48x256x256xf32, 48xf32, 48xf32, 48xf32, 48xf32, -1xui8) <- (1x48x256x256xf32, 48xf32, 48xf32, 48xf32, 48xf32) + ( + batch_norm__42, + batch_norm__43, + batch_norm__44, + batch_norm__45, + batch_norm__46, + batch_norm__47, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_7, + parameter_661, + parameter_660, + parameter_659, + parameter_658, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_658, parameter_659, parameter_660, parameter_661 + + # pd_op.conv2d: (1x48x256x256xf32) <- (1x48x256x256xf32, 48x48x1x1xf32) + conv2d_8 = paddle._C_ops.conv2d( + swish_7, parameter_657, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_657 + + # pd_op.batch_norm_: (1x48x256x256xf32, 48xf32, 48xf32, 48xf32, 48xf32, -1xui8) <- (1x48x256x256xf32, 48xf32, 48xf32, 48xf32, 48xf32) + ( + batch_norm__48, + batch_norm__49, + batch_norm__50, + batch_norm__51, + batch_norm__52, + batch_norm__53, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_8, + parameter_656, + parameter_655, + parameter_654, + parameter_653, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_653, parameter_654, parameter_655, parameter_656 + + # pd_op.multiply: (1x48x256x256xf32) <- (1xf32, 1x48x256x256xf32) + multiply_0 = paddle._C_ops.multiply(data_0, batch_norm__48) + del data_0 + + # pd_op.add: (1x48x256x256xf32) <- (1x48x256x256xf32, 1x48x256x256xf32) + add_0 = paddle._C_ops.add(batch_norm__42, multiply_0) + + # pd_op.swish: (1x48x256x256xf32) <- (1x48x256x256xf32) + swish_8 = paddle._C_ops.swish(add_0) + + # pd_op.add: (1x48x256x256xf32) <- (1x48x256x256xf32, 1x48x256x256xf32) + add_1 = paddle._C_ops.add(swish_6, swish_8) + + # pd_op.conv2d: (1x48x256x256xf32) <- (1x48x256x256xf32, 48x48x3x3xf32) + conv2d_9 = paddle._C_ops.conv2d( + add_1, parameter_652, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_652 + + # pd_op.batch_norm_: (1x48x256x256xf32, 48xf32, 48xf32, 48xf32, 48xf32, -1xui8) <- (1x48x256x256xf32, 48xf32, 48xf32, 48xf32, 48xf32) + ( + batch_norm__54, + batch_norm__55, + batch_norm__56, + batch_norm__57, + batch_norm__58, + batch_norm__59, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_9, + parameter_651, + parameter_650, + parameter_649, + parameter_648, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_648, parameter_649, parameter_650, parameter_651 + + # pd_op.swish: (1x48x256x256xf32) <- (1x48x256x256xf32) + swish_9 = paddle._C_ops.swish(batch_norm__54) + + # pd_op.conv2d: (1x48x256x256xf32) <- (1x48x256x256xf32, 48x48x3x3xf32) + conv2d_10 = paddle._C_ops.conv2d( + swish_9, parameter_647, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_647 + + # pd_op.batch_norm_: (1x48x256x256xf32, 48xf32, 48xf32, 48xf32, 48xf32, -1xui8) <- (1x48x256x256xf32, 48xf32, 48xf32, 48xf32, 48xf32) + ( + batch_norm__60, + batch_norm__61, + batch_norm__62, + batch_norm__63, + batch_norm__64, + batch_norm__65, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_10, + parameter_646, + parameter_645, + parameter_644, + parameter_643, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_643, parameter_644, parameter_645, parameter_646 + + # pd_op.conv2d: (1x48x256x256xf32) <- (1x48x256x256xf32, 48x48x1x1xf32) + conv2d_11 = paddle._C_ops.conv2d( + swish_9, parameter_642, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_642 + + # pd_op.batch_norm_: (1x48x256x256xf32, 48xf32, 48xf32, 48xf32, 48xf32, -1xui8) <- (1x48x256x256xf32, 48xf32, 48xf32, 48xf32, 48xf32) + ( + batch_norm__66, + batch_norm__67, + batch_norm__68, + batch_norm__69, + batch_norm__70, + batch_norm__71, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_11, + parameter_641, + parameter_640, + parameter_639, + parameter_638, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_638, parameter_639, parameter_640, parameter_641 + + # pd_op.multiply: (1x48x256x256xf32) <- (1xf32, 1x48x256x256xf32) + multiply_1 = paddle._C_ops.multiply(data_1, batch_norm__66) + del data_1 + + # pd_op.add: (1x48x256x256xf32) <- (1x48x256x256xf32, 1x48x256x256xf32) + add_2 = paddle._C_ops.add(batch_norm__60, multiply_1) + + # pd_op.swish: (1x48x256x256xf32) <- (1x48x256x256xf32) + swish_10 = paddle._C_ops.swish(add_2) + + # pd_op.add: (1x48x256x256xf32) <- (1x48x256x256xf32, 1x48x256x256xf32) + add_3 = paddle._C_ops.add(add_1, swish_10) + + # pd_op.conv2d: (1x48x256x256xf32) <- (1x48x256x256xf32, 48x48x3x3xf32) + conv2d_12 = paddle._C_ops.conv2d( + add_3, parameter_637, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_637 + + # pd_op.batch_norm_: (1x48x256x256xf32, 48xf32, 48xf32, 48xf32, 48xf32, -1xui8) <- (1x48x256x256xf32, 48xf32, 48xf32, 48xf32, 48xf32) + ( + batch_norm__72, + batch_norm__73, + batch_norm__74, + batch_norm__75, + batch_norm__76, + batch_norm__77, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_12, + parameter_636, + parameter_635, + parameter_634, + parameter_633, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_633, parameter_634, parameter_635, parameter_636 + + # pd_op.swish: (1x48x256x256xf32) <- (1x48x256x256xf32) + swish_11 = paddle._C_ops.swish(batch_norm__72) + + # pd_op.conv2d: (1x48x256x256xf32) <- (1x48x256x256xf32, 48x48x3x3xf32) + conv2d_13 = paddle._C_ops.conv2d( + swish_11, parameter_632, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_632 + + # pd_op.batch_norm_: (1x48x256x256xf32, 48xf32, 48xf32, 48xf32, 48xf32, -1xui8) <- (1x48x256x256xf32, 48xf32, 48xf32, 48xf32, 48xf32) + ( + batch_norm__78, + batch_norm__79, + batch_norm__80, + batch_norm__81, + batch_norm__82, + batch_norm__83, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_13, + parameter_631, + parameter_630, + parameter_629, + parameter_628, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_628, parameter_629, parameter_630, parameter_631 + + # pd_op.conv2d: (1x48x256x256xf32) <- (1x48x256x256xf32, 48x48x1x1xf32) + conv2d_14 = paddle._C_ops.conv2d( + swish_11, parameter_627, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_627 + + # pd_op.batch_norm_: (1x48x256x256xf32, 48xf32, 48xf32, 48xf32, 48xf32, -1xui8) <- (1x48x256x256xf32, 48xf32, 48xf32, 48xf32, 48xf32) + ( + batch_norm__84, + batch_norm__85, + batch_norm__86, + batch_norm__87, + batch_norm__88, + batch_norm__89, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_14, + parameter_626, + parameter_625, + parameter_624, + parameter_623, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_623, parameter_624, parameter_625, parameter_626 + + # pd_op.multiply: (1x48x256x256xf32) <- (1xf32, 1x48x256x256xf32) + multiply_2 = paddle._C_ops.multiply(data_2, batch_norm__84) + del data_2 + + # pd_op.add: (1x48x256x256xf32) <- (1x48x256x256xf32, 1x48x256x256xf32) + add_4 = paddle._C_ops.add(batch_norm__78, multiply_2) + + # pd_op.swish: (1x48x256x256xf32) <- (1x48x256x256xf32) + swish_12 = paddle._C_ops.swish(add_4) + + # pd_op.add: (1x48x256x256xf32) <- (1x48x256x256xf32, 1x48x256x256xf32) + add_5 = paddle._C_ops.add(add_3, swish_12) + + # pd_op.full: (1xi32) <- () + full_0 = paddle._C_ops.full( + [1], float("1"), paddle.int32, paddle.core.CPUPlace() + ) + + # pd_op.assign: (1xi32) <- (1xi32) + assign_0 = full_0 + + # pd_op.assign: (1xi32) <- (1xi32) + assign_1 = full_0 + + # pd_op.assign: (1xi32) <- (1xi32) + assign_2 = full_0 + + # pd_op.assign: (1xi32) <- (1xi32) + assign_3 = full_0 + + # pd_op.assign: (1xi32) <- (1xi32) + assign_4 = full_0 + + # pd_op.assign: (1xi32) <- (1xi32) + assign_5 = full_0 + + # pd_op.assign: (1xi32) <- (1xi32) + assign_6 = full_0 + + # pd_op.assign: (1xi32) <- (1xi32) + assign_7 = full_0 + + # pd_op.assign: (1xi32) <- (1xi32) + assign_8 = full_0 + + # pd_op.assign: (1xi32) <- (1xi32) + assign_9 = full_0 + + # pd_op.assign: (1xi32) <- (1xi32) + assign_10 = full_0 + + # pd_op.assign: (1xi32) <- (1xi32) + assign_11 = full_0 + + # pd_op.assign: (1xi32) <- (1xi32) + assign_12 = full_0 + + # builtin.combine: ([1x48x256x256xf32, 1x48x256x256xf32]) <- (1x48x256x256xf32, 1x48x256x256xf32) + combine_0 = [swish_5, add_5] + + # pd_op.concat: (1x96x256x256xf32) <- ([1x48x256x256xf32, 1x48x256x256xf32], 1xi32) + concat_0 = paddle._C_ops.concat(combine_0, full_0) + del combine_0 + + # pd_op.full_int_array: (2xi64) <- () + full_int_array_0 = [2, 3] + + # pd_op.assign: (2xi64) <- (2xi64) + assign_13 = full_int_array_0 + + # pd_op.assign: (2xi64) <- (2xi64) + assign_14 = full_int_array_0 + + # pd_op.assign: (2xi64) <- (2xi64) + assign_15 = full_int_array_0 + + # pd_op.mean: (1x96x1x1xf32) <- (1x96x256x256xf32, 2xi64) + mean_0 = paddle._C_ops.mean(concat_0, full_int_array_0, True) + + # pd_op.conv2d: (1x96x1x1xf32) <- (1x96x1x1xf32, 96x96x1x1xf32) + conv2d_15 = paddle._C_ops.conv2d( + mean_0, parameter_622, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_622 + + # pd_op.full_int_array: (4xi64) <- () + full_int_array_1 = [1, -1, 1, 1] + + # pd_op.reshape: (1x96x1x1xf32) <- (96xf32, 4xi64) + reshape_0 = paddle._C_ops.reshape(parameter_621, full_int_array_1) + del parameter_621 + + # pd_op.add: (1x96x1x1xf32) <- (1x96x1x1xf32, 1x96x1x1xf32) + add_6 = paddle._C_ops.add(conv2d_15, reshape_0) + + # pd_op.hardsigmoid: (1x96x1x1xf32) <- (1x96x1x1xf32) + hardsigmoid_0 = paddle._C_ops.hardsigmoid( + add_6, float("0.166667"), float("0.5") + ) + del add_6 + + # pd_op.multiply: (1x96x256x256xf32) <- (1x96x256x256xf32, 1x96x1x1xf32) + multiply_3 = paddle._C_ops.multiply(concat_0, hardsigmoid_0) + + # pd_op.conv2d: (1x128x256x256xf32) <- (1x96x256x256xf32, 128x96x1x1xf32) + conv2d_16 = paddle._C_ops.conv2d( + multiply_3, parameter_620, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_620 + + # pd_op.batch_norm_: (1x128x256x256xf32, 128xf32, 128xf32, 128xf32, 128xf32, -1xui8) <- (1x128x256x256xf32, 128xf32, 128xf32, 128xf32, 128xf32) + ( + batch_norm__90, + batch_norm__91, + batch_norm__92, + batch_norm__93, + batch_norm__94, + batch_norm__95, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_16, + parameter_619, + parameter_618, + parameter_617, + parameter_616, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_616, parameter_617, parameter_618, parameter_619 + + # pd_op.swish: (1x128x256x256xf32) <- (1x128x256x256xf32) + swish_13 = paddle._C_ops.swish(batch_norm__90) + + # pd_op.conv2d: (1x192x128x128xf32) <- (1x128x256x256xf32, 192x128x3x3xf32) + conv2d_17 = paddle._C_ops.conv2d( + swish_13, parameter_615, [2, 2], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_615 + + # pd_op.batch_norm_: (1x192x128x128xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (1x192x128x128xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__96, + batch_norm__97, + batch_norm__98, + batch_norm__99, + batch_norm__100, + batch_norm__101, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_17, + parameter_614, + parameter_613, + parameter_612, + parameter_611, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_611, parameter_612, parameter_613, parameter_614 + + # pd_op.swish: (1x192x128x128xf32) <- (1x192x128x128xf32) + swish_14 = paddle._C_ops.swish(batch_norm__96) + + # pd_op.conv2d: (1x96x128x128xf32) <- (1x192x128x128xf32, 96x192x1x1xf32) + conv2d_18 = paddle._C_ops.conv2d( + swish_14, parameter_610, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_610 + + # pd_op.batch_norm_: (1x96x128x128xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (1x96x128x128xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__102, + batch_norm__103, + batch_norm__104, + batch_norm__105, + batch_norm__106, + batch_norm__107, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_18, + parameter_609, + parameter_608, + parameter_607, + parameter_606, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_606, parameter_607, parameter_608, parameter_609 + + # pd_op.swish: (1x96x128x128xf32) <- (1x96x128x128xf32) + swish_15 = paddle._C_ops.swish(batch_norm__102) + + # pd_op.conv2d: (1x96x128x128xf32) <- (1x192x128x128xf32, 96x192x1x1xf32) + conv2d_19 = paddle._C_ops.conv2d( + swish_14, parameter_605, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_605 + + # pd_op.batch_norm_: (1x96x128x128xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (1x96x128x128xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__108, + batch_norm__109, + batch_norm__110, + batch_norm__111, + batch_norm__112, + batch_norm__113, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_19, + parameter_604, + parameter_603, + parameter_602, + parameter_601, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_601, parameter_602, parameter_603, parameter_604 + + # pd_op.swish: (1x96x128x128xf32) <- (1x96x128x128xf32) + swish_16 = paddle._C_ops.swish(batch_norm__108) + + # pd_op.conv2d: (1x96x128x128xf32) <- (1x96x128x128xf32, 96x96x3x3xf32) + conv2d_20 = paddle._C_ops.conv2d( + swish_16, parameter_600, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_600 + + # pd_op.batch_norm_: (1x96x128x128xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (1x96x128x128xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__114, + batch_norm__115, + batch_norm__116, + batch_norm__117, + batch_norm__118, + batch_norm__119, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_20, + parameter_599, + parameter_598, + parameter_597, + parameter_596, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_596, parameter_597, parameter_598, parameter_599 + + # pd_op.swish: (1x96x128x128xf32) <- (1x96x128x128xf32) + swish_17 = paddle._C_ops.swish(batch_norm__114) + + # pd_op.conv2d: (1x96x128x128xf32) <- (1x96x128x128xf32, 96x96x3x3xf32) + conv2d_21 = paddle._C_ops.conv2d( + swish_17, parameter_595, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_595 + + # pd_op.batch_norm_: (1x96x128x128xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (1x96x128x128xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__120, + batch_norm__121, + batch_norm__122, + batch_norm__123, + batch_norm__124, + batch_norm__125, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_21, + parameter_594, + parameter_593, + parameter_592, + parameter_591, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_591, parameter_592, parameter_593, parameter_594 + + # pd_op.conv2d: (1x96x128x128xf32) <- (1x96x128x128xf32, 96x96x1x1xf32) + conv2d_22 = paddle._C_ops.conv2d( + swish_17, parameter_590, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_590 + + # pd_op.batch_norm_: (1x96x128x128xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (1x96x128x128xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__126, + batch_norm__127, + batch_norm__128, + batch_norm__129, + batch_norm__130, + batch_norm__131, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_22, + parameter_589, + parameter_588, + parameter_587, + parameter_586, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_586, parameter_587, parameter_588, parameter_589 + + # pd_op.multiply: (1x96x128x128xf32) <- (1xf32, 1x96x128x128xf32) + multiply_4 = paddle._C_ops.multiply(data_3, batch_norm__126) + del data_3 + + # pd_op.add: (1x96x128x128xf32) <- (1x96x128x128xf32, 1x96x128x128xf32) + add_7 = paddle._C_ops.add(batch_norm__120, multiply_4) + + # pd_op.swish: (1x96x128x128xf32) <- (1x96x128x128xf32) + swish_18 = paddle._C_ops.swish(add_7) + + # pd_op.add: (1x96x128x128xf32) <- (1x96x128x128xf32, 1x96x128x128xf32) + add_8 = paddle._C_ops.add(swish_16, swish_18) + + # pd_op.conv2d: (1x96x128x128xf32) <- (1x96x128x128xf32, 96x96x3x3xf32) + conv2d_23 = paddle._C_ops.conv2d( + add_8, parameter_585, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_585 + + # pd_op.batch_norm_: (1x96x128x128xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (1x96x128x128xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__132, + batch_norm__133, + batch_norm__134, + batch_norm__135, + batch_norm__136, + batch_norm__137, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_23, + parameter_584, + parameter_583, + parameter_582, + parameter_581, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_581, parameter_582, parameter_583, parameter_584 + + # pd_op.swish: (1x96x128x128xf32) <- (1x96x128x128xf32) + swish_19 = paddle._C_ops.swish(batch_norm__132) + + # pd_op.conv2d: (1x96x128x128xf32) <- (1x96x128x128xf32, 96x96x3x3xf32) + conv2d_24 = paddle._C_ops.conv2d( + swish_19, parameter_580, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_580 + + # pd_op.batch_norm_: (1x96x128x128xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (1x96x128x128xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__138, + batch_norm__139, + batch_norm__140, + batch_norm__141, + batch_norm__142, + batch_norm__143, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_24, + parameter_579, + parameter_578, + parameter_577, + parameter_576, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_576, parameter_577, parameter_578, parameter_579 + + # pd_op.conv2d: (1x96x128x128xf32) <- (1x96x128x128xf32, 96x96x1x1xf32) + conv2d_25 = paddle._C_ops.conv2d( + swish_19, parameter_575, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_575 + + # pd_op.batch_norm_: (1x96x128x128xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (1x96x128x128xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__144, + batch_norm__145, + batch_norm__146, + batch_norm__147, + batch_norm__148, + batch_norm__149, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_25, + parameter_574, + parameter_573, + parameter_572, + parameter_571, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_571, parameter_572, parameter_573, parameter_574 + + # pd_op.multiply: (1x96x128x128xf32) <- (1xf32, 1x96x128x128xf32) + multiply_5 = paddle._C_ops.multiply(data_4, batch_norm__144) + del data_4 + + # pd_op.add: (1x96x128x128xf32) <- (1x96x128x128xf32, 1x96x128x128xf32) + add_9 = paddle._C_ops.add(batch_norm__138, multiply_5) + + # pd_op.swish: (1x96x128x128xf32) <- (1x96x128x128xf32) + swish_20 = paddle._C_ops.swish(add_9) + + # pd_op.add: (1x96x128x128xf32) <- (1x96x128x128xf32, 1x96x128x128xf32) + add_10 = paddle._C_ops.add(add_8, swish_20) + + # pd_op.conv2d: (1x96x128x128xf32) <- (1x96x128x128xf32, 96x96x3x3xf32) + conv2d_26 = paddle._C_ops.conv2d( + add_10, parameter_570, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_570 + + # pd_op.batch_norm_: (1x96x128x128xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (1x96x128x128xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__150, + batch_norm__151, + batch_norm__152, + batch_norm__153, + batch_norm__154, + batch_norm__155, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_26, + parameter_569, + parameter_568, + parameter_567, + parameter_566, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_566, parameter_567, parameter_568, parameter_569 + + # pd_op.swish: (1x96x128x128xf32) <- (1x96x128x128xf32) + swish_21 = paddle._C_ops.swish(batch_norm__150) + + # pd_op.conv2d: (1x96x128x128xf32) <- (1x96x128x128xf32, 96x96x3x3xf32) + conv2d_27 = paddle._C_ops.conv2d( + swish_21, parameter_565, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_565 + + # pd_op.batch_norm_: (1x96x128x128xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (1x96x128x128xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__156, + batch_norm__157, + batch_norm__158, + batch_norm__159, + batch_norm__160, + batch_norm__161, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_27, + parameter_564, + parameter_563, + parameter_562, + parameter_561, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_561, parameter_562, parameter_563, parameter_564 + + # pd_op.conv2d: (1x96x128x128xf32) <- (1x96x128x128xf32, 96x96x1x1xf32) + conv2d_28 = paddle._C_ops.conv2d( + swish_21, parameter_560, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_560 + + # pd_op.batch_norm_: (1x96x128x128xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (1x96x128x128xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__162, + batch_norm__163, + batch_norm__164, + batch_norm__165, + batch_norm__166, + batch_norm__167, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_28, + parameter_559, + parameter_558, + parameter_557, + parameter_556, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_556, parameter_557, parameter_558, parameter_559 + + # pd_op.multiply: (1x96x128x128xf32) <- (1xf32, 1x96x128x128xf32) + multiply_6 = paddle._C_ops.multiply(data_5, batch_norm__162) + del data_5 + + # pd_op.add: (1x96x128x128xf32) <- (1x96x128x128xf32, 1x96x128x128xf32) + add_11 = paddle._C_ops.add(batch_norm__156, multiply_6) + + # pd_op.swish: (1x96x128x128xf32) <- (1x96x128x128xf32) + swish_22 = paddle._C_ops.swish(add_11) + + # pd_op.add: (1x96x128x128xf32) <- (1x96x128x128xf32, 1x96x128x128xf32) + add_12 = paddle._C_ops.add(add_10, swish_22) + + # pd_op.conv2d: (1x96x128x128xf32) <- (1x96x128x128xf32, 96x96x3x3xf32) + conv2d_29 = paddle._C_ops.conv2d( + add_12, parameter_555, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_555 + + # pd_op.batch_norm_: (1x96x128x128xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (1x96x128x128xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__168, + batch_norm__169, + batch_norm__170, + batch_norm__171, + batch_norm__172, + batch_norm__173, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_29, + parameter_554, + parameter_553, + parameter_552, + parameter_551, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_551, parameter_552, parameter_553, parameter_554 + + # pd_op.swish: (1x96x128x128xf32) <- (1x96x128x128xf32) + swish_23 = paddle._C_ops.swish(batch_norm__168) + + # pd_op.conv2d: (1x96x128x128xf32) <- (1x96x128x128xf32, 96x96x3x3xf32) + conv2d_30 = paddle._C_ops.conv2d( + swish_23, parameter_550, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_550 + + # pd_op.batch_norm_: (1x96x128x128xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (1x96x128x128xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__174, + batch_norm__175, + batch_norm__176, + batch_norm__177, + batch_norm__178, + batch_norm__179, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_30, + parameter_549, + parameter_548, + parameter_547, + parameter_546, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_546, parameter_547, parameter_548, parameter_549 + + # pd_op.conv2d: (1x96x128x128xf32) <- (1x96x128x128xf32, 96x96x1x1xf32) + conv2d_31 = paddle._C_ops.conv2d( + swish_23, parameter_545, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_545 + + # pd_op.batch_norm_: (1x96x128x128xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (1x96x128x128xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__180, + batch_norm__181, + batch_norm__182, + batch_norm__183, + batch_norm__184, + batch_norm__185, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_31, + parameter_544, + parameter_543, + parameter_542, + parameter_541, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_541, parameter_542, parameter_543, parameter_544 + + # pd_op.multiply: (1x96x128x128xf32) <- (1xf32, 1x96x128x128xf32) + multiply_7 = paddle._C_ops.multiply(data_6, batch_norm__180) + del data_6 + + # pd_op.add: (1x96x128x128xf32) <- (1x96x128x128xf32, 1x96x128x128xf32) + add_13 = paddle._C_ops.add(batch_norm__174, multiply_7) + + # pd_op.swish: (1x96x128x128xf32) <- (1x96x128x128xf32) + swish_24 = paddle._C_ops.swish(add_13) + + # pd_op.add: (1x96x128x128xf32) <- (1x96x128x128xf32, 1x96x128x128xf32) + add_14 = paddle._C_ops.add(add_12, swish_24) + + # pd_op.conv2d: (1x96x128x128xf32) <- (1x96x128x128xf32, 96x96x3x3xf32) + conv2d_32 = paddle._C_ops.conv2d( + add_14, parameter_540, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_540 + + # pd_op.batch_norm_: (1x96x128x128xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (1x96x128x128xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__186, + batch_norm__187, + batch_norm__188, + batch_norm__189, + batch_norm__190, + batch_norm__191, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_32, + parameter_539, + parameter_538, + parameter_537, + parameter_536, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_536, parameter_537, parameter_538, parameter_539 + + # pd_op.swish: (1x96x128x128xf32) <- (1x96x128x128xf32) + swish_25 = paddle._C_ops.swish(batch_norm__186) + + # pd_op.conv2d: (1x96x128x128xf32) <- (1x96x128x128xf32, 96x96x3x3xf32) + conv2d_33 = paddle._C_ops.conv2d( + swish_25, parameter_535, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_535 + + # pd_op.batch_norm_: (1x96x128x128xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (1x96x128x128xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__192, + batch_norm__193, + batch_norm__194, + batch_norm__195, + batch_norm__196, + batch_norm__197, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_33, + parameter_534, + parameter_533, + parameter_532, + parameter_531, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_531, parameter_532, parameter_533, parameter_534 + + # pd_op.conv2d: (1x96x128x128xf32) <- (1x96x128x128xf32, 96x96x1x1xf32) + conv2d_34 = paddle._C_ops.conv2d( + swish_25, parameter_530, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_530 + + # pd_op.batch_norm_: (1x96x128x128xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (1x96x128x128xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__198, + batch_norm__199, + batch_norm__200, + batch_norm__201, + batch_norm__202, + batch_norm__203, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_34, + parameter_529, + parameter_528, + parameter_527, + parameter_526, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_526, parameter_527, parameter_528, parameter_529 + + # pd_op.multiply: (1x96x128x128xf32) <- (1xf32, 1x96x128x128xf32) + multiply_8 = paddle._C_ops.multiply(data_7, batch_norm__198) + del data_7 + + # pd_op.add: (1x96x128x128xf32) <- (1x96x128x128xf32, 1x96x128x128xf32) + add_15 = paddle._C_ops.add(batch_norm__192, multiply_8) + + # pd_op.swish: (1x96x128x128xf32) <- (1x96x128x128xf32) + swish_26 = paddle._C_ops.swish(add_15) + + # pd_op.add: (1x96x128x128xf32) <- (1x96x128x128xf32, 1x96x128x128xf32) + add_16 = paddle._C_ops.add(add_14, swish_26) + + # pd_op.conv2d: (1x96x128x128xf32) <- (1x96x128x128xf32, 96x96x3x3xf32) + conv2d_35 = paddle._C_ops.conv2d( + add_16, parameter_525, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_525 + + # pd_op.batch_norm_: (1x96x128x128xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (1x96x128x128xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__204, + batch_norm__205, + batch_norm__206, + batch_norm__207, + batch_norm__208, + batch_norm__209, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_35, + parameter_524, + parameter_523, + parameter_522, + parameter_521, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_521, parameter_522, parameter_523, parameter_524 + + # pd_op.swish: (1x96x128x128xf32) <- (1x96x128x128xf32) + swish_27 = paddle._C_ops.swish(batch_norm__204) + + # pd_op.conv2d: (1x96x128x128xf32) <- (1x96x128x128xf32, 96x96x3x3xf32) + conv2d_36 = paddle._C_ops.conv2d( + swish_27, parameter_520, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_520 + + # pd_op.batch_norm_: (1x96x128x128xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (1x96x128x128xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__210, + batch_norm__211, + batch_norm__212, + batch_norm__213, + batch_norm__214, + batch_norm__215, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_36, + parameter_519, + parameter_518, + parameter_517, + parameter_516, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_516, parameter_517, parameter_518, parameter_519 + + # pd_op.conv2d: (1x96x128x128xf32) <- (1x96x128x128xf32, 96x96x1x1xf32) + conv2d_37 = paddle._C_ops.conv2d( + swish_27, parameter_515, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_515 + + # pd_op.batch_norm_: (1x96x128x128xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (1x96x128x128xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__216, + batch_norm__217, + batch_norm__218, + batch_norm__219, + batch_norm__220, + batch_norm__221, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_37, + parameter_514, + parameter_513, + parameter_512, + parameter_511, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_511, parameter_512, parameter_513, parameter_514 + + # pd_op.multiply: (1x96x128x128xf32) <- (1xf32, 1x96x128x128xf32) + multiply_9 = paddle._C_ops.multiply(data_8, batch_norm__216) + del data_8 + + # pd_op.add: (1x96x128x128xf32) <- (1x96x128x128xf32, 1x96x128x128xf32) + add_17 = paddle._C_ops.add(batch_norm__210, multiply_9) + + # pd_op.swish: (1x96x128x128xf32) <- (1x96x128x128xf32) + swish_28 = paddle._C_ops.swish(add_17) + + # pd_op.add: (1x96x128x128xf32) <- (1x96x128x128xf32, 1x96x128x128xf32) + add_18 = paddle._C_ops.add(add_16, swish_28) + + # builtin.combine: ([1x96x128x128xf32, 1x96x128x128xf32]) <- (1x96x128x128xf32, 1x96x128x128xf32) + combine_1 = [swish_15, add_18] + + # pd_op.concat: (1x192x128x128xf32) <- ([1x96x128x128xf32, 1x96x128x128xf32], 1xi32) + concat_1 = paddle._C_ops.concat(combine_1, full_0) + del combine_1 + + # pd_op.mean: (1x192x1x1xf32) <- (1x192x128x128xf32, 2xi64) + mean_1 = paddle._C_ops.mean(concat_1, full_int_array_0, True) + + # pd_op.conv2d: (1x192x1x1xf32) <- (1x192x1x1xf32, 192x192x1x1xf32) + conv2d_38 = paddle._C_ops.conv2d( + mean_1, parameter_510, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_510 + + # pd_op.reshape: (1x192x1x1xf32) <- (192xf32, 4xi64) + reshape_1 = paddle._C_ops.reshape(parameter_509, full_int_array_1) + del parameter_509 + + # pd_op.add: (1x192x1x1xf32) <- (1x192x1x1xf32, 1x192x1x1xf32) + add_19 = paddle._C_ops.add(conv2d_38, reshape_1) + + # pd_op.hardsigmoid: (1x192x1x1xf32) <- (1x192x1x1xf32) + hardsigmoid_1 = paddle._C_ops.hardsigmoid( + add_19, float("0.166667"), float("0.5") + ) + del add_19 + + # pd_op.multiply: (1x192x128x128xf32) <- (1x192x128x128xf32, 1x192x1x1xf32) + multiply_10 = paddle._C_ops.multiply(concat_1, hardsigmoid_1) + + # pd_op.conv2d: (1x256x128x128xf32) <- (1x192x128x128xf32, 256x192x1x1xf32) + conv2d_39 = paddle._C_ops.conv2d( + multiply_10, parameter_508, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_508 + + # pd_op.batch_norm_: (1x256x128x128xf32, 256xf32, 256xf32, 256xf32, 256xf32, -1xui8) <- (1x256x128x128xf32, 256xf32, 256xf32, 256xf32, 256xf32) + ( + batch_norm__222, + batch_norm__223, + batch_norm__224, + batch_norm__225, + batch_norm__226, + batch_norm__227, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_39, + parameter_507, + parameter_506, + parameter_505, + parameter_504, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_504, parameter_505, parameter_506, parameter_507 + + # pd_op.swish: (1x256x128x128xf32) <- (1x256x128x128xf32) + swish_29 = paddle._C_ops.swish(batch_norm__222) + + # pd_op.conv2d: (1x384x64x64xf32) <- (1x256x128x128xf32, 384x256x3x3xf32) + conv2d_40 = paddle._C_ops.conv2d( + swish_29, parameter_503, [2, 2], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_503 + + # pd_op.batch_norm_: (1x384x64x64xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (1x384x64x64xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__228, + batch_norm__229, + batch_norm__230, + batch_norm__231, + batch_norm__232, + batch_norm__233, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_40, + parameter_502, + parameter_501, + parameter_500, + parameter_499, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_499, parameter_500, parameter_501, parameter_502 + + # pd_op.swish: (1x384x64x64xf32) <- (1x384x64x64xf32) + swish_30 = paddle._C_ops.swish(batch_norm__228) + + # pd_op.conv2d: (1x192x64x64xf32) <- (1x384x64x64xf32, 192x384x1x1xf32) + conv2d_41 = paddle._C_ops.conv2d( + swish_30, parameter_498, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_498 + + # pd_op.batch_norm_: (1x192x64x64xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (1x192x64x64xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__234, + batch_norm__235, + batch_norm__236, + batch_norm__237, + batch_norm__238, + batch_norm__239, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_41, + parameter_497, + parameter_496, + parameter_495, + parameter_494, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_494, parameter_495, parameter_496, parameter_497 + + # pd_op.swish: (1x192x64x64xf32) <- (1x192x64x64xf32) + swish_31 = paddle._C_ops.swish(batch_norm__234) + + # pd_op.conv2d: (1x192x64x64xf32) <- (1x384x64x64xf32, 192x384x1x1xf32) + conv2d_42 = paddle._C_ops.conv2d( + swish_30, parameter_493, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_493 + + # pd_op.batch_norm_: (1x192x64x64xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (1x192x64x64xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__240, + batch_norm__241, + batch_norm__242, + batch_norm__243, + batch_norm__244, + batch_norm__245, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_42, + parameter_492, + parameter_491, + parameter_490, + parameter_489, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_489, parameter_490, parameter_491, parameter_492 + + # pd_op.swish: (1x192x64x64xf32) <- (1x192x64x64xf32) + swish_32 = paddle._C_ops.swish(batch_norm__240) + + # pd_op.conv2d: (1x192x64x64xf32) <- (1x192x64x64xf32, 192x192x3x3xf32) + conv2d_43 = paddle._C_ops.conv2d( + swish_32, parameter_488, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_488 + + # pd_op.batch_norm_: (1x192x64x64xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (1x192x64x64xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__246, + batch_norm__247, + batch_norm__248, + batch_norm__249, + batch_norm__250, + batch_norm__251, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_43, + parameter_487, + parameter_486, + parameter_485, + parameter_484, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_484, parameter_485, parameter_486, parameter_487 + + # pd_op.swish: (1x192x64x64xf32) <- (1x192x64x64xf32) + swish_33 = paddle._C_ops.swish(batch_norm__246) + + # pd_op.conv2d: (1x192x64x64xf32) <- (1x192x64x64xf32, 192x192x3x3xf32) + conv2d_44 = paddle._C_ops.conv2d( + swish_33, parameter_483, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_483 + + # pd_op.batch_norm_: (1x192x64x64xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (1x192x64x64xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__252, + batch_norm__253, + batch_norm__254, + batch_norm__255, + batch_norm__256, + batch_norm__257, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_44, + parameter_482, + parameter_481, + parameter_480, + parameter_479, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_479, parameter_480, parameter_481, parameter_482 + + # pd_op.conv2d: (1x192x64x64xf32) <- (1x192x64x64xf32, 192x192x1x1xf32) + conv2d_45 = paddle._C_ops.conv2d( + swish_33, parameter_478, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_478 + + # pd_op.batch_norm_: (1x192x64x64xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (1x192x64x64xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__258, + batch_norm__259, + batch_norm__260, + batch_norm__261, + batch_norm__262, + batch_norm__263, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_45, + parameter_477, + parameter_476, + parameter_475, + parameter_474, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_474, parameter_475, parameter_476, parameter_477 + + # pd_op.multiply: (1x192x64x64xf32) <- (1xf32, 1x192x64x64xf32) + multiply_11 = paddle._C_ops.multiply(data_9, batch_norm__258) + del data_9 + + # pd_op.add: (1x192x64x64xf32) <- (1x192x64x64xf32, 1x192x64x64xf32) + add_20 = paddle._C_ops.add(batch_norm__252, multiply_11) + + # pd_op.swish: (1x192x64x64xf32) <- (1x192x64x64xf32) + swish_34 = paddle._C_ops.swish(add_20) + + # pd_op.add: (1x192x64x64xf32) <- (1x192x64x64xf32, 1x192x64x64xf32) + add_21 = paddle._C_ops.add(swish_32, swish_34) + + # pd_op.conv2d: (1x192x64x64xf32) <- (1x192x64x64xf32, 192x192x3x3xf32) + conv2d_46 = paddle._C_ops.conv2d( + add_21, parameter_473, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_473 + + # pd_op.batch_norm_: (1x192x64x64xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (1x192x64x64xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__264, + batch_norm__265, + batch_norm__266, + batch_norm__267, + batch_norm__268, + batch_norm__269, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_46, + parameter_472, + parameter_471, + parameter_470, + parameter_469, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_469, parameter_470, parameter_471, parameter_472 + + # pd_op.swish: (1x192x64x64xf32) <- (1x192x64x64xf32) + swish_35 = paddle._C_ops.swish(batch_norm__264) + + # pd_op.conv2d: (1x192x64x64xf32) <- (1x192x64x64xf32, 192x192x3x3xf32) + conv2d_47 = paddle._C_ops.conv2d( + swish_35, parameter_468, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_468 + + # pd_op.batch_norm_: (1x192x64x64xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (1x192x64x64xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__270, + batch_norm__271, + batch_norm__272, + batch_norm__273, + batch_norm__274, + batch_norm__275, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_47, + parameter_467, + parameter_466, + parameter_465, + parameter_464, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_464, parameter_465, parameter_466, parameter_467 + + # pd_op.conv2d: (1x192x64x64xf32) <- (1x192x64x64xf32, 192x192x1x1xf32) + conv2d_48 = paddle._C_ops.conv2d( + swish_35, parameter_463, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_463 + + # pd_op.batch_norm_: (1x192x64x64xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (1x192x64x64xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__276, + batch_norm__277, + batch_norm__278, + batch_norm__279, + batch_norm__280, + batch_norm__281, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_48, + parameter_462, + parameter_461, + parameter_460, + parameter_459, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_459, parameter_460, parameter_461, parameter_462 + + # pd_op.multiply: (1x192x64x64xf32) <- (1xf32, 1x192x64x64xf32) + multiply_12 = paddle._C_ops.multiply(data_10, batch_norm__276) + del data_10 + + # pd_op.add: (1x192x64x64xf32) <- (1x192x64x64xf32, 1x192x64x64xf32) + add_22 = paddle._C_ops.add(batch_norm__270, multiply_12) + + # pd_op.swish: (1x192x64x64xf32) <- (1x192x64x64xf32) + swish_36 = paddle._C_ops.swish(add_22) + + # pd_op.add: (1x192x64x64xf32) <- (1x192x64x64xf32, 1x192x64x64xf32) + add_23 = paddle._C_ops.add(add_21, swish_36) + + # pd_op.conv2d: (1x192x64x64xf32) <- (1x192x64x64xf32, 192x192x3x3xf32) + conv2d_49 = paddle._C_ops.conv2d( + add_23, parameter_458, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_458 + + # pd_op.batch_norm_: (1x192x64x64xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (1x192x64x64xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__282, + batch_norm__283, + batch_norm__284, + batch_norm__285, + batch_norm__286, + batch_norm__287, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_49, + parameter_457, + parameter_456, + parameter_455, + parameter_454, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_454, parameter_455, parameter_456, parameter_457 + + # pd_op.swish: (1x192x64x64xf32) <- (1x192x64x64xf32) + swish_37 = paddle._C_ops.swish(batch_norm__282) + + # pd_op.conv2d: (1x192x64x64xf32) <- (1x192x64x64xf32, 192x192x3x3xf32) + conv2d_50 = paddle._C_ops.conv2d( + swish_37, parameter_453, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_453 + + # pd_op.batch_norm_: (1x192x64x64xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (1x192x64x64xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__288, + batch_norm__289, + batch_norm__290, + batch_norm__291, + batch_norm__292, + batch_norm__293, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_50, + parameter_452, + parameter_451, + parameter_450, + parameter_449, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_449, parameter_450, parameter_451, parameter_452 + + # pd_op.conv2d: (1x192x64x64xf32) <- (1x192x64x64xf32, 192x192x1x1xf32) + conv2d_51 = paddle._C_ops.conv2d( + swish_37, parameter_448, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_448 + + # pd_op.batch_norm_: (1x192x64x64xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (1x192x64x64xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__294, + batch_norm__295, + batch_norm__296, + batch_norm__297, + batch_norm__298, + batch_norm__299, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_51, + parameter_447, + parameter_446, + parameter_445, + parameter_444, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_444, parameter_445, parameter_446, parameter_447 + + # pd_op.multiply: (1x192x64x64xf32) <- (1xf32, 1x192x64x64xf32) + multiply_13 = paddle._C_ops.multiply(data_11, batch_norm__294) + del data_11 + + # pd_op.add: (1x192x64x64xf32) <- (1x192x64x64xf32, 1x192x64x64xf32) + add_24 = paddle._C_ops.add(batch_norm__288, multiply_13) + + # pd_op.swish: (1x192x64x64xf32) <- (1x192x64x64xf32) + swish_38 = paddle._C_ops.swish(add_24) + + # pd_op.add: (1x192x64x64xf32) <- (1x192x64x64xf32, 1x192x64x64xf32) + add_25 = paddle._C_ops.add(add_23, swish_38) + + # pd_op.conv2d: (1x192x64x64xf32) <- (1x192x64x64xf32, 192x192x3x3xf32) + conv2d_52 = paddle._C_ops.conv2d( + add_25, parameter_443, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_443 + + # pd_op.batch_norm_: (1x192x64x64xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (1x192x64x64xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__300, + batch_norm__301, + batch_norm__302, + batch_norm__303, + batch_norm__304, + batch_norm__305, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_52, + parameter_442, + parameter_441, + parameter_440, + parameter_439, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_439, parameter_440, parameter_441, parameter_442 + + # pd_op.swish: (1x192x64x64xf32) <- (1x192x64x64xf32) + swish_39 = paddle._C_ops.swish(batch_norm__300) + + # pd_op.conv2d: (1x192x64x64xf32) <- (1x192x64x64xf32, 192x192x3x3xf32) + conv2d_53 = paddle._C_ops.conv2d( + swish_39, parameter_438, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_438 + + # pd_op.batch_norm_: (1x192x64x64xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (1x192x64x64xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__306, + batch_norm__307, + batch_norm__308, + batch_norm__309, + batch_norm__310, + batch_norm__311, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_53, + parameter_437, + parameter_436, + parameter_435, + parameter_434, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_434, parameter_435, parameter_436, parameter_437 + + # pd_op.conv2d: (1x192x64x64xf32) <- (1x192x64x64xf32, 192x192x1x1xf32) + conv2d_54 = paddle._C_ops.conv2d( + swish_39, parameter_433, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_433 + + # pd_op.batch_norm_: (1x192x64x64xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (1x192x64x64xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__312, + batch_norm__313, + batch_norm__314, + batch_norm__315, + batch_norm__316, + batch_norm__317, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_54, + parameter_432, + parameter_431, + parameter_430, + parameter_429, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_429, parameter_430, parameter_431, parameter_432 + + # pd_op.multiply: (1x192x64x64xf32) <- (1xf32, 1x192x64x64xf32) + multiply_14 = paddle._C_ops.multiply(data_12, batch_norm__312) + del data_12 + + # pd_op.add: (1x192x64x64xf32) <- (1x192x64x64xf32, 1x192x64x64xf32) + add_26 = paddle._C_ops.add(batch_norm__306, multiply_14) + + # pd_op.swish: (1x192x64x64xf32) <- (1x192x64x64xf32) + swish_40 = paddle._C_ops.swish(add_26) + + # pd_op.add: (1x192x64x64xf32) <- (1x192x64x64xf32, 1x192x64x64xf32) + add_27 = paddle._C_ops.add(add_25, swish_40) + + # pd_op.conv2d: (1x192x64x64xf32) <- (1x192x64x64xf32, 192x192x3x3xf32) + conv2d_55 = paddle._C_ops.conv2d( + add_27, parameter_428, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_428 + + # pd_op.batch_norm_: (1x192x64x64xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (1x192x64x64xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__318, + batch_norm__319, + batch_norm__320, + batch_norm__321, + batch_norm__322, + batch_norm__323, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_55, + parameter_427, + parameter_426, + parameter_425, + parameter_424, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_424, parameter_425, parameter_426, parameter_427 + + # pd_op.swish: (1x192x64x64xf32) <- (1x192x64x64xf32) + swish_41 = paddle._C_ops.swish(batch_norm__318) + + # pd_op.conv2d: (1x192x64x64xf32) <- (1x192x64x64xf32, 192x192x3x3xf32) + conv2d_56 = paddle._C_ops.conv2d( + swish_41, parameter_423, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_423 + + # pd_op.batch_norm_: (1x192x64x64xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (1x192x64x64xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__324, + batch_norm__325, + batch_norm__326, + batch_norm__327, + batch_norm__328, + batch_norm__329, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_56, + parameter_422, + parameter_421, + parameter_420, + parameter_419, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_419, parameter_420, parameter_421, parameter_422 + + # pd_op.conv2d: (1x192x64x64xf32) <- (1x192x64x64xf32, 192x192x1x1xf32) + conv2d_57 = paddle._C_ops.conv2d( + swish_41, parameter_418, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_418 + + # pd_op.batch_norm_: (1x192x64x64xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (1x192x64x64xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__330, + batch_norm__331, + batch_norm__332, + batch_norm__333, + batch_norm__334, + batch_norm__335, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_57, + parameter_417, + parameter_416, + parameter_415, + parameter_414, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_414, parameter_415, parameter_416, parameter_417 + + # pd_op.multiply: (1x192x64x64xf32) <- (1xf32, 1x192x64x64xf32) + multiply_15 = paddle._C_ops.multiply(data_13, batch_norm__330) + del data_13 + + # pd_op.add: (1x192x64x64xf32) <- (1x192x64x64xf32, 1x192x64x64xf32) + add_28 = paddle._C_ops.add(batch_norm__324, multiply_15) + + # pd_op.swish: (1x192x64x64xf32) <- (1x192x64x64xf32) + swish_42 = paddle._C_ops.swish(add_28) + + # pd_op.add: (1x192x64x64xf32) <- (1x192x64x64xf32, 1x192x64x64xf32) + add_29 = paddle._C_ops.add(add_27, swish_42) + + # pd_op.conv2d: (1x192x64x64xf32) <- (1x192x64x64xf32, 192x192x3x3xf32) + conv2d_58 = paddle._C_ops.conv2d( + add_29, parameter_413, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_413 + + # pd_op.batch_norm_: (1x192x64x64xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (1x192x64x64xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__336, + batch_norm__337, + batch_norm__338, + batch_norm__339, + batch_norm__340, + batch_norm__341, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_58, + parameter_412, + parameter_411, + parameter_410, + parameter_409, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_409, parameter_410, parameter_411, parameter_412 + + # pd_op.swish: (1x192x64x64xf32) <- (1x192x64x64xf32) + swish_43 = paddle._C_ops.swish(batch_norm__336) + + # pd_op.conv2d: (1x192x64x64xf32) <- (1x192x64x64xf32, 192x192x3x3xf32) + conv2d_59 = paddle._C_ops.conv2d( + swish_43, parameter_408, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_408 + + # pd_op.batch_norm_: (1x192x64x64xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (1x192x64x64xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__342, + batch_norm__343, + batch_norm__344, + batch_norm__345, + batch_norm__346, + batch_norm__347, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_59, + parameter_407, + parameter_406, + parameter_405, + parameter_404, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_404, parameter_405, parameter_406, parameter_407 + + # pd_op.conv2d: (1x192x64x64xf32) <- (1x192x64x64xf32, 192x192x1x1xf32) + conv2d_60 = paddle._C_ops.conv2d( + swish_43, parameter_403, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_403 + + # pd_op.batch_norm_: (1x192x64x64xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (1x192x64x64xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__348, + batch_norm__349, + batch_norm__350, + batch_norm__351, + batch_norm__352, + batch_norm__353, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_60, + parameter_402, + parameter_401, + parameter_400, + parameter_399, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_399, parameter_400, parameter_401, parameter_402 + + # pd_op.multiply: (1x192x64x64xf32) <- (1xf32, 1x192x64x64xf32) + multiply_16 = paddle._C_ops.multiply(data_14, batch_norm__348) + del data_14 + + # pd_op.add: (1x192x64x64xf32) <- (1x192x64x64xf32, 1x192x64x64xf32) + add_30 = paddle._C_ops.add(batch_norm__342, multiply_16) + + # pd_op.swish: (1x192x64x64xf32) <- (1x192x64x64xf32) + swish_44 = paddle._C_ops.swish(add_30) + + # pd_op.add: (1x192x64x64xf32) <- (1x192x64x64xf32, 1x192x64x64xf32) + add_31 = paddle._C_ops.add(add_29, swish_44) + + # builtin.combine: ([1x192x64x64xf32, 1x192x64x64xf32]) <- (1x192x64x64xf32, 1x192x64x64xf32) + combine_2 = [swish_31, add_31] + + # pd_op.concat: (1x384x64x64xf32) <- ([1x192x64x64xf32, 1x192x64x64xf32], 1xi32) + concat_2 = paddle._C_ops.concat(combine_2, full_0) + del combine_2 + + # pd_op.mean: (1x384x1x1xf32) <- (1x384x64x64xf32, 2xi64) + mean_2 = paddle._C_ops.mean(concat_2, full_int_array_0, True) + + # pd_op.conv2d: (1x384x1x1xf32) <- (1x384x1x1xf32, 384x384x1x1xf32) + conv2d_61 = paddle._C_ops.conv2d( + mean_2, parameter_398, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_398 + + # pd_op.reshape: (1x384x1x1xf32) <- (384xf32, 4xi64) + reshape_2 = paddle._C_ops.reshape(parameter_397, full_int_array_1) + del parameter_397 + + # pd_op.add: (1x384x1x1xf32) <- (1x384x1x1xf32, 1x384x1x1xf32) + add_32 = paddle._C_ops.add(conv2d_61, reshape_2) + + # pd_op.hardsigmoid: (1x384x1x1xf32) <- (1x384x1x1xf32) + hardsigmoid_2 = paddle._C_ops.hardsigmoid( + add_32, float("0.166667"), float("0.5") + ) + del add_32 + + # pd_op.multiply: (1x384x64x64xf32) <- (1x384x64x64xf32, 1x384x1x1xf32) + multiply_17 = paddle._C_ops.multiply(concat_2, hardsigmoid_2) + + # pd_op.conv2d: (1x512x64x64xf32) <- (1x384x64x64xf32, 512x384x1x1xf32) + conv2d_62 = paddle._C_ops.conv2d( + multiply_17, parameter_396, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_396 + + # pd_op.batch_norm_: (1x512x64x64xf32, 512xf32, 512xf32, 512xf32, 512xf32, -1xui8) <- (1x512x64x64xf32, 512xf32, 512xf32, 512xf32, 512xf32) + ( + batch_norm__354, + batch_norm__355, + batch_norm__356, + batch_norm__357, + batch_norm__358, + batch_norm__359, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_62, + parameter_395, + parameter_394, + parameter_393, + parameter_392, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_392, parameter_393, parameter_394, parameter_395 + + # pd_op.swish: (1x512x64x64xf32) <- (1x512x64x64xf32) + swish_45 = paddle._C_ops.swish(batch_norm__354) + + # pd_op.conv2d: (1x768x32x32xf32) <- (1x512x64x64xf32, 768x512x3x3xf32) + conv2d_63 = paddle._C_ops.conv2d( + swish_45, parameter_391, [2, 2], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_391 + + # pd_op.batch_norm_: (1x768x32x32xf32, 768xf32, 768xf32, 768xf32, 768xf32, -1xui8) <- (1x768x32x32xf32, 768xf32, 768xf32, 768xf32, 768xf32) + ( + batch_norm__360, + batch_norm__361, + batch_norm__362, + batch_norm__363, + batch_norm__364, + batch_norm__365, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_63, + parameter_390, + parameter_389, + parameter_388, + parameter_387, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_387, parameter_388, parameter_389, parameter_390 + + # pd_op.swish: (1x768x32x32xf32) <- (1x768x32x32xf32) + swish_46 = paddle._C_ops.swish(batch_norm__360) + + # pd_op.conv2d: (1x384x32x32xf32) <- (1x768x32x32xf32, 384x768x1x1xf32) + conv2d_64 = paddle._C_ops.conv2d( + swish_46, parameter_386, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_386 + + # pd_op.batch_norm_: (1x384x32x32xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (1x384x32x32xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__366, + batch_norm__367, + batch_norm__368, + batch_norm__369, + batch_norm__370, + batch_norm__371, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_64, + parameter_385, + parameter_384, + parameter_383, + parameter_382, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_382, parameter_383, parameter_384, parameter_385 + + # pd_op.swish: (1x384x32x32xf32) <- (1x384x32x32xf32) + swish_47 = paddle._C_ops.swish(batch_norm__366) + + # pd_op.conv2d: (1x384x32x32xf32) <- (1x768x32x32xf32, 384x768x1x1xf32) + conv2d_65 = paddle._C_ops.conv2d( + swish_46, parameter_381, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_381 + + # pd_op.batch_norm_: (1x384x32x32xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (1x384x32x32xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__372, + batch_norm__373, + batch_norm__374, + batch_norm__375, + batch_norm__376, + batch_norm__377, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_65, + parameter_380, + parameter_379, + parameter_378, + parameter_377, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_377, parameter_378, parameter_379, parameter_380 + + # pd_op.swish: (1x384x32x32xf32) <- (1x384x32x32xf32) + swish_48 = paddle._C_ops.swish(batch_norm__372) + + # pd_op.conv2d: (1x384x32x32xf32) <- (1x384x32x32xf32, 384x384x3x3xf32) + conv2d_66 = paddle._C_ops.conv2d( + swish_48, parameter_376, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_376 + + # pd_op.batch_norm_: (1x384x32x32xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (1x384x32x32xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__378, + batch_norm__379, + batch_norm__380, + batch_norm__381, + batch_norm__382, + batch_norm__383, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_66, + parameter_375, + parameter_374, + parameter_373, + parameter_372, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_372, parameter_373, parameter_374, parameter_375 + + # pd_op.swish: (1x384x32x32xf32) <- (1x384x32x32xf32) + swish_49 = paddle._C_ops.swish(batch_norm__378) + + # pd_op.conv2d: (1x384x32x32xf32) <- (1x384x32x32xf32, 384x384x3x3xf32) + conv2d_67 = paddle._C_ops.conv2d( + swish_49, parameter_371, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_371 + + # pd_op.batch_norm_: (1x384x32x32xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (1x384x32x32xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__384, + batch_norm__385, + batch_norm__386, + batch_norm__387, + batch_norm__388, + batch_norm__389, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_67, + parameter_370, + parameter_369, + parameter_368, + parameter_367, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_367, parameter_368, parameter_369, parameter_370 + + # pd_op.conv2d: (1x384x32x32xf32) <- (1x384x32x32xf32, 384x384x1x1xf32) + conv2d_68 = paddle._C_ops.conv2d( + swish_49, parameter_366, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_366 + + # pd_op.batch_norm_: (1x384x32x32xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (1x384x32x32xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__390, + batch_norm__391, + batch_norm__392, + batch_norm__393, + batch_norm__394, + batch_norm__395, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_68, + parameter_365, + parameter_364, + parameter_363, + parameter_362, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_362, parameter_363, parameter_364, parameter_365 + + # pd_op.multiply: (1x384x32x32xf32) <- (1xf32, 1x384x32x32xf32) + multiply_18 = paddle._C_ops.multiply(data_15, batch_norm__390) + del data_15 + + # pd_op.add: (1x384x32x32xf32) <- (1x384x32x32xf32, 1x384x32x32xf32) + add_33 = paddle._C_ops.add(batch_norm__384, multiply_18) + + # pd_op.swish: (1x384x32x32xf32) <- (1x384x32x32xf32) + swish_50 = paddle._C_ops.swish(add_33) + + # pd_op.add: (1x384x32x32xf32) <- (1x384x32x32xf32, 1x384x32x32xf32) + add_34 = paddle._C_ops.add(swish_48, swish_50) + + # pd_op.conv2d: (1x384x32x32xf32) <- (1x384x32x32xf32, 384x384x3x3xf32) + conv2d_69 = paddle._C_ops.conv2d( + add_34, parameter_361, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_361 + + # pd_op.batch_norm_: (1x384x32x32xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (1x384x32x32xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__396, + batch_norm__397, + batch_norm__398, + batch_norm__399, + batch_norm__400, + batch_norm__401, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_69, + parameter_360, + parameter_359, + parameter_358, + parameter_357, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_357, parameter_358, parameter_359, parameter_360 + + # pd_op.swish: (1x384x32x32xf32) <- (1x384x32x32xf32) + swish_51 = paddle._C_ops.swish(batch_norm__396) + + # pd_op.conv2d: (1x384x32x32xf32) <- (1x384x32x32xf32, 384x384x3x3xf32) + conv2d_70 = paddle._C_ops.conv2d( + swish_51, parameter_356, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_356 + + # pd_op.batch_norm_: (1x384x32x32xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (1x384x32x32xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__402, + batch_norm__403, + batch_norm__404, + batch_norm__405, + batch_norm__406, + batch_norm__407, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_70, + parameter_355, + parameter_354, + parameter_353, + parameter_352, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_352, parameter_353, parameter_354, parameter_355 + + # pd_op.conv2d: (1x384x32x32xf32) <- (1x384x32x32xf32, 384x384x1x1xf32) + conv2d_71 = paddle._C_ops.conv2d( + swish_51, parameter_351, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_351 + + # pd_op.batch_norm_: (1x384x32x32xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (1x384x32x32xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__408, + batch_norm__409, + batch_norm__410, + batch_norm__411, + batch_norm__412, + batch_norm__413, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_71, + parameter_350, + parameter_349, + parameter_348, + parameter_347, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_347, parameter_348, parameter_349, parameter_350 + + # pd_op.multiply: (1x384x32x32xf32) <- (1xf32, 1x384x32x32xf32) + multiply_19 = paddle._C_ops.multiply(data_16, batch_norm__408) + del data_16 + + # pd_op.add: (1x384x32x32xf32) <- (1x384x32x32xf32, 1x384x32x32xf32) + add_35 = paddle._C_ops.add(batch_norm__402, multiply_19) + + # pd_op.swish: (1x384x32x32xf32) <- (1x384x32x32xf32) + swish_52 = paddle._C_ops.swish(add_35) + + # pd_op.add: (1x384x32x32xf32) <- (1x384x32x32xf32, 1x384x32x32xf32) + add_36 = paddle._C_ops.add(add_34, swish_52) + + # pd_op.conv2d: (1x384x32x32xf32) <- (1x384x32x32xf32, 384x384x3x3xf32) + conv2d_72 = paddle._C_ops.conv2d( + add_36, parameter_346, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_346 + + # pd_op.batch_norm_: (1x384x32x32xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (1x384x32x32xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__414, + batch_norm__415, + batch_norm__416, + batch_norm__417, + batch_norm__418, + batch_norm__419, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_72, + parameter_345, + parameter_344, + parameter_343, + parameter_342, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_342, parameter_343, parameter_344, parameter_345 + + # pd_op.swish: (1x384x32x32xf32) <- (1x384x32x32xf32) + swish_53 = paddle._C_ops.swish(batch_norm__414) + + # pd_op.conv2d: (1x384x32x32xf32) <- (1x384x32x32xf32, 384x384x3x3xf32) + conv2d_73 = paddle._C_ops.conv2d( + swish_53, parameter_341, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_341 + + # pd_op.batch_norm_: (1x384x32x32xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (1x384x32x32xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__420, + batch_norm__421, + batch_norm__422, + batch_norm__423, + batch_norm__424, + batch_norm__425, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_73, + parameter_340, + parameter_339, + parameter_338, + parameter_337, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_337, parameter_338, parameter_339, parameter_340 + + # pd_op.conv2d: (1x384x32x32xf32) <- (1x384x32x32xf32, 384x384x1x1xf32) + conv2d_74 = paddle._C_ops.conv2d( + swish_53, parameter_336, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_336 + + # pd_op.batch_norm_: (1x384x32x32xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (1x384x32x32xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__426, + batch_norm__427, + batch_norm__428, + batch_norm__429, + batch_norm__430, + batch_norm__431, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_74, + parameter_335, + parameter_334, + parameter_333, + parameter_332, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_332, parameter_333, parameter_334, parameter_335 + + # pd_op.multiply: (1x384x32x32xf32) <- (1xf32, 1x384x32x32xf32) + multiply_20 = paddle._C_ops.multiply(data_17, batch_norm__426) + del data_17 + + # pd_op.add: (1x384x32x32xf32) <- (1x384x32x32xf32, 1x384x32x32xf32) + add_37 = paddle._C_ops.add(batch_norm__420, multiply_20) + + # pd_op.swish: (1x384x32x32xf32) <- (1x384x32x32xf32) + swish_54 = paddle._C_ops.swish(add_37) + + # pd_op.add: (1x384x32x32xf32) <- (1x384x32x32xf32, 1x384x32x32xf32) + add_38 = paddle._C_ops.add(add_36, swish_54) + + # builtin.combine: ([1x384x32x32xf32, 1x384x32x32xf32]) <- (1x384x32x32xf32, 1x384x32x32xf32) + combine_3 = [swish_47, add_38] + + # pd_op.concat: (1x768x32x32xf32) <- ([1x384x32x32xf32, 1x384x32x32xf32], 1xi32) + concat_3 = paddle._C_ops.concat(combine_3, full_0) + del combine_3 + + # pd_op.mean: (1x768x1x1xf32) <- (1x768x32x32xf32, 2xi64) + mean_3 = paddle._C_ops.mean(concat_3, full_int_array_0, True) + + # pd_op.conv2d: (1x768x1x1xf32) <- (1x768x1x1xf32, 768x768x1x1xf32) + conv2d_75 = paddle._C_ops.conv2d( + mean_3, parameter_331, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_331 + + # pd_op.reshape: (1x768x1x1xf32) <- (768xf32, 4xi64) + reshape_3 = paddle._C_ops.reshape(parameter_330, full_int_array_1) + del full_int_array_1, parameter_330 + + # pd_op.add: (1x768x1x1xf32) <- (1x768x1x1xf32, 1x768x1x1xf32) + add_39 = paddle._C_ops.add(conv2d_75, reshape_3) + + # pd_op.hardsigmoid: (1x768x1x1xf32) <- (1x768x1x1xf32) + hardsigmoid_3 = paddle._C_ops.hardsigmoid( + add_39, float("0.166667"), float("0.5") + ) + del add_39 + + # pd_op.multiply: (1x768x32x32xf32) <- (1x768x32x32xf32, 1x768x1x1xf32) + multiply_21 = paddle._C_ops.multiply(concat_3, hardsigmoid_3) + + # pd_op.conv2d: (1x1024x32x32xf32) <- (1x768x32x32xf32, 1024x768x1x1xf32) + conv2d_76 = paddle._C_ops.conv2d( + multiply_21, parameter_329, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_329 + + # pd_op.batch_norm_: (1x1024x32x32xf32, 1024xf32, 1024xf32, 1024xf32, 1024xf32, -1xui8) <- (1x1024x32x32xf32, 1024xf32, 1024xf32, 1024xf32, 1024xf32) + ( + batch_norm__432, + batch_norm__433, + batch_norm__434, + batch_norm__435, + batch_norm__436, + batch_norm__437, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_76, + parameter_328, + parameter_327, + parameter_326, + parameter_325, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_325, parameter_326, parameter_327, parameter_328 + + # pd_op.swish: (1x1024x32x32xf32) <- (1x1024x32x32xf32) + swish_55 = paddle._C_ops.swish(batch_norm__432) + + # pd_op.conv2d: (1x384x32x32xf32) <- (1x1024x32x32xf32, 384x1024x1x1xf32) + conv2d_77 = paddle._C_ops.conv2d( + swish_55, parameter_324, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_324 + + # pd_op.batch_norm_: (1x384x32x32xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (1x384x32x32xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__438, + batch_norm__439, + batch_norm__440, + batch_norm__441, + batch_norm__442, + batch_norm__443, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_77, + parameter_323, + parameter_322, + parameter_321, + parameter_320, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_320, parameter_321, parameter_322, parameter_323 + + # pd_op.swish: (1x384x32x32xf32) <- (1x384x32x32xf32) + swish_56 = paddle._C_ops.swish(batch_norm__438) + + # pd_op.conv2d: (1x384x32x32xf32) <- (1x1024x32x32xf32, 384x1024x1x1xf32) + conv2d_78 = paddle._C_ops.conv2d( + swish_55, parameter_319, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_319 + + # pd_op.batch_norm_: (1x384x32x32xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (1x384x32x32xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__444, + batch_norm__445, + batch_norm__446, + batch_norm__447, + batch_norm__448, + batch_norm__449, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_78, + parameter_318, + parameter_317, + parameter_316, + parameter_315, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_315, parameter_316, parameter_317, parameter_318 + + # pd_op.swish: (1x384x32x32xf32) <- (1x384x32x32xf32) + swish_57 = paddle._C_ops.swish(batch_norm__444) + + # pd_op.conv2d: (1x384x32x32xf32) <- (1x384x32x32xf32, 384x384x3x3xf32) + conv2d_79 = paddle._C_ops.conv2d( + swish_57, parameter_314, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_314 + + # pd_op.batch_norm_: (1x384x32x32xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (1x384x32x32xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__450, + batch_norm__451, + batch_norm__452, + batch_norm__453, + batch_norm__454, + batch_norm__455, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_79, + parameter_313, + parameter_312, + parameter_311, + parameter_310, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_310, parameter_311, parameter_312, parameter_313 + + # pd_op.swish: (1x384x32x32xf32) <- (1x384x32x32xf32) + swish_58 = paddle._C_ops.swish(batch_norm__450) + + # pd_op.conv2d: (1x384x32x32xf32) <- (1x384x32x32xf32, 384x384x3x3xf32) + conv2d_80 = paddle._C_ops.conv2d( + swish_58, parameter_309, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_309 + + # pd_op.batch_norm_: (1x384x32x32xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (1x384x32x32xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__456, + batch_norm__457, + batch_norm__458, + batch_norm__459, + batch_norm__460, + batch_norm__461, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_80, + parameter_308, + parameter_307, + parameter_306, + parameter_305, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_305, parameter_306, parameter_307, parameter_308 + + # pd_op.conv2d: (1x384x32x32xf32) <- (1x384x32x32xf32, 384x384x1x1xf32) + conv2d_81 = paddle._C_ops.conv2d( + swish_58, parameter_304, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_304 + + # pd_op.batch_norm_: (1x384x32x32xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (1x384x32x32xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__462, + batch_norm__463, + batch_norm__464, + batch_norm__465, + batch_norm__466, + batch_norm__467, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_81, + parameter_303, + parameter_302, + parameter_301, + parameter_300, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_300, parameter_301, parameter_302, parameter_303 + + # pd_op.multiply: (1x384x32x32xf32) <- (1xf32, 1x384x32x32xf32) + multiply_22 = paddle._C_ops.multiply(data_18, batch_norm__462) + del data_18 + + # pd_op.add: (1x384x32x32xf32) <- (1x384x32x32xf32, 1x384x32x32xf32) + add_40 = paddle._C_ops.add(batch_norm__456, multiply_22) + + # pd_op.swish: (1x384x32x32xf32) <- (1x384x32x32xf32) + swish_59 = paddle._C_ops.swish(add_40) + + # pd_op.conv2d: (1x384x32x32xf32) <- (1x384x32x32xf32, 384x384x3x3xf32) + conv2d_82 = paddle._C_ops.conv2d( + swish_59, parameter_299, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_299 + + # pd_op.batch_norm_: (1x384x32x32xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (1x384x32x32xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__468, + batch_norm__469, + batch_norm__470, + batch_norm__471, + batch_norm__472, + batch_norm__473, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_82, + parameter_298, + parameter_297, + parameter_296, + parameter_295, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_295, parameter_296, parameter_297, parameter_298 + + # pd_op.swish: (1x384x32x32xf32) <- (1x384x32x32xf32) + swish_60 = paddle._C_ops.swish(batch_norm__468) + + # pd_op.conv2d: (1x384x32x32xf32) <- (1x384x32x32xf32, 384x384x3x3xf32) + conv2d_83 = paddle._C_ops.conv2d( + swish_60, parameter_294, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_294 + + # pd_op.batch_norm_: (1x384x32x32xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (1x384x32x32xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__474, + batch_norm__475, + batch_norm__476, + batch_norm__477, + batch_norm__478, + batch_norm__479, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_83, + parameter_293, + parameter_292, + parameter_291, + parameter_290, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_290, parameter_291, parameter_292, parameter_293 + + # pd_op.conv2d: (1x384x32x32xf32) <- (1x384x32x32xf32, 384x384x1x1xf32) + conv2d_84 = paddle._C_ops.conv2d( + swish_60, parameter_289, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_289 + + # pd_op.batch_norm_: (1x384x32x32xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (1x384x32x32xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__480, + batch_norm__481, + batch_norm__482, + batch_norm__483, + batch_norm__484, + batch_norm__485, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_84, + parameter_288, + parameter_287, + parameter_286, + parameter_285, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_285, parameter_286, parameter_287, parameter_288 + + # pd_op.multiply: (1x384x32x32xf32) <- (1xf32, 1x384x32x32xf32) + multiply_23 = paddle._C_ops.multiply(data_19, batch_norm__480) + del data_19 + + # pd_op.add: (1x384x32x32xf32) <- (1x384x32x32xf32, 1x384x32x32xf32) + add_41 = paddle._C_ops.add(batch_norm__474, multiply_23) + + # pd_op.swish: (1x384x32x32xf32) <- (1x384x32x32xf32) + swish_61 = paddle._C_ops.swish(add_41) + + # pd_op.full_int_array: (2xi64) <- () + full_int_array_2 = [5, 5] + + # pd_op.pool2d: (1x384x32x32xf32) <- (1x384x32x32xf32, 2xi64) + pool2d_0 = paddle._C_ops.pool2d( + swish_61, + full_int_array_2, + [1, 1], + [2, 2], + False, + True, + "NCHW", + "max", + False, + False, + "EXPLICIT", + ) + + # pd_op.full_int_array: (2xi64) <- () + full_int_array_3 = [9, 9] + + # pd_op.pool2d: (1x384x32x32xf32) <- (1x384x32x32xf32, 2xi64) + pool2d_1 = paddle._C_ops.pool2d( + swish_61, + full_int_array_3, + [1, 1], + [4, 4], + False, + True, + "NCHW", + "max", + False, + False, + "EXPLICIT", + ) + + # pd_op.full_int_array: (2xi64) <- () + full_int_array_4 = [13, 13] + + # pd_op.pool2d: (1x384x32x32xf32) <- (1x384x32x32xf32, 2xi64) + pool2d_2 = paddle._C_ops.pool2d( + swish_61, + full_int_array_4, + [1, 1], + [6, 6], + False, + True, + "NCHW", + "max", + False, + False, + "EXPLICIT", + ) + + # builtin.combine: ([1x384x32x32xf32, 1x384x32x32xf32, 1x384x32x32xf32, 1x384x32x32xf32]) <- (1x384x32x32xf32, 1x384x32x32xf32, 1x384x32x32xf32, 1x384x32x32xf32) + combine_4 = [swish_61, pool2d_0, pool2d_1, pool2d_2] + + # pd_op.concat: (1x1536x32x32xf32) <- ([1x384x32x32xf32, 1x384x32x32xf32, 1x384x32x32xf32, 1x384x32x32xf32], 1xi32) + concat_4 = paddle._C_ops.concat(combine_4, full_0) + del combine_4 + + # pd_op.conv2d: (1x384x32x32xf32) <- (1x1536x32x32xf32, 384x1536x1x1xf32) + conv2d_85 = paddle._C_ops.conv2d( + concat_4, parameter_284, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_284 + + # pd_op.batch_norm_: (1x384x32x32xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (1x384x32x32xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__486, + batch_norm__487, + batch_norm__488, + batch_norm__489, + batch_norm__490, + batch_norm__491, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_85, + parameter_283, + parameter_282, + parameter_281, + parameter_280, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_280, parameter_281, parameter_282, parameter_283 + + # pd_op.swish: (1x384x32x32xf32) <- (1x384x32x32xf32) + swish_62 = paddle._C_ops.swish(batch_norm__486) + + # pd_op.conv2d: (1x384x32x32xf32) <- (1x384x32x32xf32, 384x384x3x3xf32) + conv2d_86 = paddle._C_ops.conv2d( + swish_62, parameter_279, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_279 + + # pd_op.batch_norm_: (1x384x32x32xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (1x384x32x32xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__492, + batch_norm__493, + batch_norm__494, + batch_norm__495, + batch_norm__496, + batch_norm__497, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_86, + parameter_278, + parameter_277, + parameter_276, + parameter_275, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_275, parameter_276, parameter_277, parameter_278 + + # pd_op.swish: (1x384x32x32xf32) <- (1x384x32x32xf32) + swish_63 = paddle._C_ops.swish(batch_norm__492) + + # pd_op.conv2d: (1x384x32x32xf32) <- (1x384x32x32xf32, 384x384x3x3xf32) + conv2d_87 = paddle._C_ops.conv2d( + swish_63, parameter_274, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_274 + + # pd_op.batch_norm_: (1x384x32x32xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (1x384x32x32xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__498, + batch_norm__499, + batch_norm__500, + batch_norm__501, + batch_norm__502, + batch_norm__503, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_87, + parameter_273, + parameter_272, + parameter_271, + parameter_270, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_270, parameter_271, parameter_272, parameter_273 + + # pd_op.conv2d: (1x384x32x32xf32) <- (1x384x32x32xf32, 384x384x1x1xf32) + conv2d_88 = paddle._C_ops.conv2d( + swish_63, parameter_269, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_269 + + # pd_op.batch_norm_: (1x384x32x32xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (1x384x32x32xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__504, + batch_norm__505, + batch_norm__506, + batch_norm__507, + batch_norm__508, + batch_norm__509, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_88, + parameter_268, + parameter_267, + parameter_266, + parameter_265, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_265, parameter_266, parameter_267, parameter_268 + + # pd_op.multiply: (1x384x32x32xf32) <- (1xf32, 1x384x32x32xf32) + multiply_24 = paddle._C_ops.multiply(data_20, batch_norm__504) + del data_20 + + # pd_op.add: (1x384x32x32xf32) <- (1x384x32x32xf32, 1x384x32x32xf32) + add_42 = paddle._C_ops.add(batch_norm__498, multiply_24) + + # pd_op.swish: (1x384x32x32xf32) <- (1x384x32x32xf32) + swish_64 = paddle._C_ops.swish(add_42) + + # builtin.combine: ([1x384x32x32xf32, 1x384x32x32xf32]) <- (1x384x32x32xf32, 1x384x32x32xf32) + combine_5 = [swish_56, swish_64] + + # pd_op.concat: (1x768x32x32xf32) <- ([1x384x32x32xf32, 1x384x32x32xf32], 1xi32) + concat_5 = paddle._C_ops.concat(combine_5, full_0) + del combine_5 + + # pd_op.conv2d: (1x768x32x32xf32) <- (1x768x32x32xf32, 768x768x1x1xf32) + conv2d_89 = paddle._C_ops.conv2d( + concat_5, parameter_264, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_264 + + # pd_op.batch_norm_: (1x768x32x32xf32, 768xf32, 768xf32, 768xf32, 768xf32, -1xui8) <- (1x768x32x32xf32, 768xf32, 768xf32, 768xf32, 768xf32) + ( + batch_norm__510, + batch_norm__511, + batch_norm__512, + batch_norm__513, + batch_norm__514, + batch_norm__515, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_89, + parameter_263, + parameter_262, + parameter_261, + parameter_260, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_260, parameter_261, parameter_262, parameter_263 + + # pd_op.swish: (1x768x32x32xf32) <- (1x768x32x32xf32) + swish_65 = paddle._C_ops.swish(batch_norm__510) + + # pd_op.conv2d: (1x384x32x32xf32) <- (1x768x32x32xf32, 384x768x1x1xf32) + conv2d_90 = paddle._C_ops.conv2d( + swish_65, parameter_259, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_259 + + # pd_op.batch_norm_: (1x384x32x32xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (1x384x32x32xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__516, + batch_norm__517, + batch_norm__518, + batch_norm__519, + batch_norm__520, + batch_norm__521, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_90, + parameter_258, + parameter_257, + parameter_256, + parameter_255, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_255, parameter_256, parameter_257, parameter_258 + + # pd_op.swish: (1x384x32x32xf32) <- (1x384x32x32xf32) + swish_66 = paddle._C_ops.swish(batch_norm__516) + + # pd_op.nearest_interp: (1x384x64x64xf32) <- (1x384x32x32xf32, None, None, None) + nearest_interp_0 = paddle._C_ops.nearest_interp( + swish_66, + None, + None, + None, + "NCHW", + -1, + -1, + -1, + [float("2"), float("2")], + "nearest", + False, + 0, + ) + + # builtin.combine: ([1x384x64x64xf32, 1x512x64x64xf32]) <- (1x384x64x64xf32, 1x512x64x64xf32) + combine_6 = [nearest_interp_0, swish_45] + + # pd_op.concat: (1x896x64x64xf32) <- ([1x384x64x64xf32, 1x512x64x64xf32], 1xi32) + concat_6 = paddle._C_ops.concat(combine_6, full_0) + del combine_6 + + # pd_op.conv2d: (1x192x64x64xf32) <- (1x896x64x64xf32, 192x896x1x1xf32) + conv2d_91 = paddle._C_ops.conv2d( + concat_6, parameter_254, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_254 + + # pd_op.batch_norm_: (1x192x64x64xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (1x192x64x64xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__522, + batch_norm__523, + batch_norm__524, + batch_norm__525, + batch_norm__526, + batch_norm__527, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_91, + parameter_253, + parameter_252, + parameter_251, + parameter_250, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_250, parameter_251, parameter_252, parameter_253 + + # pd_op.swish: (1x192x64x64xf32) <- (1x192x64x64xf32) + swish_67 = paddle._C_ops.swish(batch_norm__522) + + # pd_op.conv2d: (1x192x64x64xf32) <- (1x896x64x64xf32, 192x896x1x1xf32) + conv2d_92 = paddle._C_ops.conv2d( + concat_6, parameter_249, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_249 + + # pd_op.batch_norm_: (1x192x64x64xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (1x192x64x64xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__528, + batch_norm__529, + batch_norm__530, + batch_norm__531, + batch_norm__532, + batch_norm__533, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_92, + parameter_248, + parameter_247, + parameter_246, + parameter_245, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_245, parameter_246, parameter_247, parameter_248 + + # pd_op.swish: (1x192x64x64xf32) <- (1x192x64x64xf32) + swish_68 = paddle._C_ops.swish(batch_norm__528) + + # pd_op.conv2d: (1x192x64x64xf32) <- (1x192x64x64xf32, 192x192x3x3xf32) + conv2d_93 = paddle._C_ops.conv2d( + swish_68, parameter_244, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_244 + + # pd_op.batch_norm_: (1x192x64x64xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (1x192x64x64xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__534, + batch_norm__535, + batch_norm__536, + batch_norm__537, + batch_norm__538, + batch_norm__539, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_93, + parameter_243, + parameter_242, + parameter_241, + parameter_240, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_240, parameter_241, parameter_242, parameter_243 + + # pd_op.swish: (1x192x64x64xf32) <- (1x192x64x64xf32) + swish_69 = paddle._C_ops.swish(batch_norm__534) + + # pd_op.conv2d: (1x192x64x64xf32) <- (1x192x64x64xf32, 192x192x3x3xf32) + conv2d_94 = paddle._C_ops.conv2d( + swish_69, parameter_239, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_239 + + # pd_op.batch_norm_: (1x192x64x64xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (1x192x64x64xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__540, + batch_norm__541, + batch_norm__542, + batch_norm__543, + batch_norm__544, + batch_norm__545, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_94, + parameter_238, + parameter_237, + parameter_236, + parameter_235, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_235, parameter_236, parameter_237, parameter_238 + + # pd_op.conv2d: (1x192x64x64xf32) <- (1x192x64x64xf32, 192x192x1x1xf32) + conv2d_95 = paddle._C_ops.conv2d( + swish_69, parameter_234, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_234 + + # pd_op.batch_norm_: (1x192x64x64xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (1x192x64x64xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__546, + batch_norm__547, + batch_norm__548, + batch_norm__549, + batch_norm__550, + batch_norm__551, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_95, + parameter_233, + parameter_232, + parameter_231, + parameter_230, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_230, parameter_231, parameter_232, parameter_233 + + # pd_op.multiply: (1x192x64x64xf32) <- (1xf32, 1x192x64x64xf32) + multiply_25 = paddle._C_ops.multiply(data_21, batch_norm__546) + del data_21 + + # pd_op.add: (1x192x64x64xf32) <- (1x192x64x64xf32, 1x192x64x64xf32) + add_43 = paddle._C_ops.add(batch_norm__540, multiply_25) + + # pd_op.swish: (1x192x64x64xf32) <- (1x192x64x64xf32) + swish_70 = paddle._C_ops.swish(add_43) + + # pd_op.conv2d: (1x192x64x64xf32) <- (1x192x64x64xf32, 192x192x3x3xf32) + conv2d_96 = paddle._C_ops.conv2d( + swish_70, parameter_229, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_229 + + # pd_op.batch_norm_: (1x192x64x64xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (1x192x64x64xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__552, + batch_norm__553, + batch_norm__554, + batch_norm__555, + batch_norm__556, + batch_norm__557, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_96, + parameter_228, + parameter_227, + parameter_226, + parameter_225, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_225, parameter_226, parameter_227, parameter_228 + + # pd_op.swish: (1x192x64x64xf32) <- (1x192x64x64xf32) + swish_71 = paddle._C_ops.swish(batch_norm__552) + + # pd_op.conv2d: (1x192x64x64xf32) <- (1x192x64x64xf32, 192x192x3x3xf32) + conv2d_97 = paddle._C_ops.conv2d( + swish_71, parameter_224, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_224 + + # pd_op.batch_norm_: (1x192x64x64xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (1x192x64x64xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__558, + batch_norm__559, + batch_norm__560, + batch_norm__561, + batch_norm__562, + batch_norm__563, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_97, + parameter_223, + parameter_222, + parameter_221, + parameter_220, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_220, parameter_221, parameter_222, parameter_223 + + # pd_op.conv2d: (1x192x64x64xf32) <- (1x192x64x64xf32, 192x192x1x1xf32) + conv2d_98 = paddle._C_ops.conv2d( + swish_71, parameter_219, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_219 + + # pd_op.batch_norm_: (1x192x64x64xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (1x192x64x64xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__564, + batch_norm__565, + batch_norm__566, + batch_norm__567, + batch_norm__568, + batch_norm__569, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_98, + parameter_218, + parameter_217, + parameter_216, + parameter_215, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_215, parameter_216, parameter_217, parameter_218 + + # pd_op.multiply: (1x192x64x64xf32) <- (1xf32, 1x192x64x64xf32) + multiply_26 = paddle._C_ops.multiply(data_22, batch_norm__564) + del data_22 + + # pd_op.add: (1x192x64x64xf32) <- (1x192x64x64xf32, 1x192x64x64xf32) + add_44 = paddle._C_ops.add(batch_norm__558, multiply_26) + + # pd_op.swish: (1x192x64x64xf32) <- (1x192x64x64xf32) + swish_72 = paddle._C_ops.swish(add_44) + + # pd_op.conv2d: (1x192x64x64xf32) <- (1x192x64x64xf32, 192x192x3x3xf32) + conv2d_99 = paddle._C_ops.conv2d( + swish_72, parameter_214, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_214 + + # pd_op.batch_norm_: (1x192x64x64xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (1x192x64x64xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__570, + batch_norm__571, + batch_norm__572, + batch_norm__573, + batch_norm__574, + batch_norm__575, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_99, + parameter_213, + parameter_212, + parameter_211, + parameter_210, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_210, parameter_211, parameter_212, parameter_213 + + # pd_op.swish: (1x192x64x64xf32) <- (1x192x64x64xf32) + swish_73 = paddle._C_ops.swish(batch_norm__570) + + # pd_op.conv2d: (1x192x64x64xf32) <- (1x192x64x64xf32, 192x192x3x3xf32) + conv2d_100 = paddle._C_ops.conv2d( + swish_73, parameter_209, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_209 + + # pd_op.batch_norm_: (1x192x64x64xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (1x192x64x64xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__576, + batch_norm__577, + batch_norm__578, + batch_norm__579, + batch_norm__580, + batch_norm__581, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_100, + parameter_208, + parameter_207, + parameter_206, + parameter_205, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_205, parameter_206, parameter_207, parameter_208 + + # pd_op.conv2d: (1x192x64x64xf32) <- (1x192x64x64xf32, 192x192x1x1xf32) + conv2d_101 = paddle._C_ops.conv2d( + swish_73, parameter_204, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_204 + + # pd_op.batch_norm_: (1x192x64x64xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (1x192x64x64xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__582, + batch_norm__583, + batch_norm__584, + batch_norm__585, + batch_norm__586, + batch_norm__587, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_101, + parameter_203, + parameter_202, + parameter_201, + parameter_200, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_200, parameter_201, parameter_202, parameter_203 + + # pd_op.multiply: (1x192x64x64xf32) <- (1xf32, 1x192x64x64xf32) + multiply_27 = paddle._C_ops.multiply(data_23, batch_norm__582) + del data_23 + + # pd_op.add: (1x192x64x64xf32) <- (1x192x64x64xf32, 1x192x64x64xf32) + add_45 = paddle._C_ops.add(batch_norm__576, multiply_27) + + # pd_op.swish: (1x192x64x64xf32) <- (1x192x64x64xf32) + swish_74 = paddle._C_ops.swish(add_45) + + # builtin.combine: ([1x192x64x64xf32, 1x192x64x64xf32]) <- (1x192x64x64xf32, 1x192x64x64xf32) + combine_7 = [swish_67, swish_74] + + # pd_op.concat: (1x384x64x64xf32) <- ([1x192x64x64xf32, 1x192x64x64xf32], 1xi32) + concat_7 = paddle._C_ops.concat(combine_7, full_0) + del combine_7 + + # pd_op.conv2d: (1x384x64x64xf32) <- (1x384x64x64xf32, 384x384x1x1xf32) + conv2d_102 = paddle._C_ops.conv2d( + concat_7, parameter_199, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_199 + + # pd_op.batch_norm_: (1x384x64x64xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (1x384x64x64xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__588, + batch_norm__589, + batch_norm__590, + batch_norm__591, + batch_norm__592, + batch_norm__593, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_102, + parameter_198, + parameter_197, + parameter_196, + parameter_195, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_195, parameter_196, parameter_197, parameter_198 + + # pd_op.swish: (1x384x64x64xf32) <- (1x384x64x64xf32) + swish_75 = paddle._C_ops.swish(batch_norm__588) + + # pd_op.conv2d: (1x192x64x64xf32) <- (1x384x64x64xf32, 192x384x1x1xf32) + conv2d_103 = paddle._C_ops.conv2d( + swish_75, parameter_194, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_194 + + # pd_op.batch_norm_: (1x192x64x64xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (1x192x64x64xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__594, + batch_norm__595, + batch_norm__596, + batch_norm__597, + batch_norm__598, + batch_norm__599, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_103, + parameter_193, + parameter_192, + parameter_191, + parameter_190, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_190, parameter_191, parameter_192, parameter_193 + + # pd_op.swish: (1x192x64x64xf32) <- (1x192x64x64xf32) + swish_76 = paddle._C_ops.swish(batch_norm__594) + + # pd_op.nearest_interp: (1x192x128x128xf32) <- (1x192x64x64xf32, None, None, None) + nearest_interp_1 = paddle._C_ops.nearest_interp( + swish_76, + None, + None, + None, + "NCHW", + -1, + -1, + -1, + [float("2"), float("2")], + "nearest", + False, + 0, + ) + + # builtin.combine: ([1x192x128x128xf32, 1x256x128x128xf32]) <- (1x192x128x128xf32, 1x256x128x128xf32) + combine_8 = [nearest_interp_1, swish_29] + + # pd_op.concat: (1x448x128x128xf32) <- ([1x192x128x128xf32, 1x256x128x128xf32], 1xi32) + concat_8 = paddle._C_ops.concat(combine_8, full_0) + del combine_8 + + # pd_op.conv2d: (1x96x128x128xf32) <- (1x448x128x128xf32, 96x448x1x1xf32) + conv2d_104 = paddle._C_ops.conv2d( + concat_8, parameter_189, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_189 + + # pd_op.batch_norm_: (1x96x128x128xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (1x96x128x128xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__600, + batch_norm__601, + batch_norm__602, + batch_norm__603, + batch_norm__604, + batch_norm__605, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_104, + parameter_188, + parameter_187, + parameter_186, + parameter_185, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_185, parameter_186, parameter_187, parameter_188 + + # pd_op.swish: (1x96x128x128xf32) <- (1x96x128x128xf32) + swish_77 = paddle._C_ops.swish(batch_norm__600) + + # pd_op.conv2d: (1x96x128x128xf32) <- (1x448x128x128xf32, 96x448x1x1xf32) + conv2d_105 = paddle._C_ops.conv2d( + concat_8, parameter_184, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_184 + + # pd_op.batch_norm_: (1x96x128x128xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (1x96x128x128xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__606, + batch_norm__607, + batch_norm__608, + batch_norm__609, + batch_norm__610, + batch_norm__611, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_105, + parameter_183, + parameter_182, + parameter_181, + parameter_180, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_180, parameter_181, parameter_182, parameter_183 + + # pd_op.swish: (1x96x128x128xf32) <- (1x96x128x128xf32) + swish_78 = paddle._C_ops.swish(batch_norm__606) + + # pd_op.conv2d: (1x96x128x128xf32) <- (1x96x128x128xf32, 96x96x3x3xf32) + conv2d_106 = paddle._C_ops.conv2d( + swish_78, parameter_179, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_179 + + # pd_op.batch_norm_: (1x96x128x128xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (1x96x128x128xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__612, + batch_norm__613, + batch_norm__614, + batch_norm__615, + batch_norm__616, + batch_norm__617, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_106, + parameter_178, + parameter_177, + parameter_176, + parameter_175, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_175, parameter_176, parameter_177, parameter_178 + + # pd_op.swish: (1x96x128x128xf32) <- (1x96x128x128xf32) + swish_79 = paddle._C_ops.swish(batch_norm__612) + + # pd_op.conv2d: (1x96x128x128xf32) <- (1x96x128x128xf32, 96x96x3x3xf32) + conv2d_107 = paddle._C_ops.conv2d( + swish_79, parameter_174, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_174 + + # pd_op.batch_norm_: (1x96x128x128xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (1x96x128x128xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__618, + batch_norm__619, + batch_norm__620, + batch_norm__621, + batch_norm__622, + batch_norm__623, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_107, + parameter_173, + parameter_172, + parameter_171, + parameter_170, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_170, parameter_171, parameter_172, parameter_173 + + # pd_op.conv2d: (1x96x128x128xf32) <- (1x96x128x128xf32, 96x96x1x1xf32) + conv2d_108 = paddle._C_ops.conv2d( + swish_79, parameter_169, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_169 + + # pd_op.batch_norm_: (1x96x128x128xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (1x96x128x128xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__624, + batch_norm__625, + batch_norm__626, + batch_norm__627, + batch_norm__628, + batch_norm__629, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_108, + parameter_168, + parameter_167, + parameter_166, + parameter_165, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_165, parameter_166, parameter_167, parameter_168 + + # pd_op.multiply: (1x96x128x128xf32) <- (1xf32, 1x96x128x128xf32) + multiply_28 = paddle._C_ops.multiply(data_24, batch_norm__624) + del data_24 + + # pd_op.add: (1x96x128x128xf32) <- (1x96x128x128xf32, 1x96x128x128xf32) + add_46 = paddle._C_ops.add(batch_norm__618, multiply_28) + + # pd_op.swish: (1x96x128x128xf32) <- (1x96x128x128xf32) + swish_80 = paddle._C_ops.swish(add_46) + + # pd_op.conv2d: (1x96x128x128xf32) <- (1x96x128x128xf32, 96x96x3x3xf32) + conv2d_109 = paddle._C_ops.conv2d( + swish_80, parameter_164, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_164 + + # pd_op.batch_norm_: (1x96x128x128xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (1x96x128x128xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__630, + batch_norm__631, + batch_norm__632, + batch_norm__633, + batch_norm__634, + batch_norm__635, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_109, + parameter_163, + parameter_162, + parameter_161, + parameter_160, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_160, parameter_161, parameter_162, parameter_163 + + # pd_op.swish: (1x96x128x128xf32) <- (1x96x128x128xf32) + swish_81 = paddle._C_ops.swish(batch_norm__630) + + # pd_op.conv2d: (1x96x128x128xf32) <- (1x96x128x128xf32, 96x96x3x3xf32) + conv2d_110 = paddle._C_ops.conv2d( + swish_81, parameter_159, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_159 + + # pd_op.batch_norm_: (1x96x128x128xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (1x96x128x128xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__636, + batch_norm__637, + batch_norm__638, + batch_norm__639, + batch_norm__640, + batch_norm__641, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_110, + parameter_158, + parameter_157, + parameter_156, + parameter_155, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_155, parameter_156, parameter_157, parameter_158 + + # pd_op.conv2d: (1x96x128x128xf32) <- (1x96x128x128xf32, 96x96x1x1xf32) + conv2d_111 = paddle._C_ops.conv2d( + swish_81, parameter_154, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_154 + + # pd_op.batch_norm_: (1x96x128x128xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (1x96x128x128xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__642, + batch_norm__643, + batch_norm__644, + batch_norm__645, + batch_norm__646, + batch_norm__647, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_111, + parameter_153, + parameter_152, + parameter_151, + parameter_150, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_150, parameter_151, parameter_152, parameter_153 + + # pd_op.multiply: (1x96x128x128xf32) <- (1xf32, 1x96x128x128xf32) + multiply_29 = paddle._C_ops.multiply(data_25, batch_norm__642) + del data_25 + + # pd_op.add: (1x96x128x128xf32) <- (1x96x128x128xf32, 1x96x128x128xf32) + add_47 = paddle._C_ops.add(batch_norm__636, multiply_29) + + # pd_op.swish: (1x96x128x128xf32) <- (1x96x128x128xf32) + swish_82 = paddle._C_ops.swish(add_47) + + # pd_op.conv2d: (1x96x128x128xf32) <- (1x96x128x128xf32, 96x96x3x3xf32) + conv2d_112 = paddle._C_ops.conv2d( + swish_82, parameter_149, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_149 + + # pd_op.batch_norm_: (1x96x128x128xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (1x96x128x128xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__648, + batch_norm__649, + batch_norm__650, + batch_norm__651, + batch_norm__652, + batch_norm__653, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_112, + parameter_148, + parameter_147, + parameter_146, + parameter_145, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_145, parameter_146, parameter_147, parameter_148 + + # pd_op.swish: (1x96x128x128xf32) <- (1x96x128x128xf32) + swish_83 = paddle._C_ops.swish(batch_norm__648) + + # pd_op.conv2d: (1x96x128x128xf32) <- (1x96x128x128xf32, 96x96x3x3xf32) + conv2d_113 = paddle._C_ops.conv2d( + swish_83, parameter_144, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_144 + + # pd_op.batch_norm_: (1x96x128x128xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (1x96x128x128xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__654, + batch_norm__655, + batch_norm__656, + batch_norm__657, + batch_norm__658, + batch_norm__659, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_113, + parameter_143, + parameter_142, + parameter_141, + parameter_140, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_140, parameter_141, parameter_142, parameter_143 + + # pd_op.conv2d: (1x96x128x128xf32) <- (1x96x128x128xf32, 96x96x1x1xf32) + conv2d_114 = paddle._C_ops.conv2d( + swish_83, parameter_139, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_139 + + # pd_op.batch_norm_: (1x96x128x128xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (1x96x128x128xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__660, + batch_norm__661, + batch_norm__662, + batch_norm__663, + batch_norm__664, + batch_norm__665, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_114, + parameter_138, + parameter_137, + parameter_136, + parameter_135, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_135, parameter_136, parameter_137, parameter_138 + + # pd_op.multiply: (1x96x128x128xf32) <- (1xf32, 1x96x128x128xf32) + multiply_30 = paddle._C_ops.multiply(data_26, batch_norm__660) + del data_26 + + # pd_op.add: (1x96x128x128xf32) <- (1x96x128x128xf32, 1x96x128x128xf32) + add_48 = paddle._C_ops.add(batch_norm__654, multiply_30) + + # pd_op.swish: (1x96x128x128xf32) <- (1x96x128x128xf32) + swish_84 = paddle._C_ops.swish(add_48) + + # builtin.combine: ([1x96x128x128xf32, 1x96x128x128xf32]) <- (1x96x128x128xf32, 1x96x128x128xf32) + combine_9 = [swish_77, swish_84] + + # pd_op.concat: (1x192x128x128xf32) <- ([1x96x128x128xf32, 1x96x128x128xf32], 1xi32) + concat_9 = paddle._C_ops.concat(combine_9, full_0) + del combine_9 + + # pd_op.conv2d: (1x192x128x128xf32) <- (1x192x128x128xf32, 192x192x1x1xf32) + conv2d_115 = paddle._C_ops.conv2d( + concat_9, parameter_134, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_134 + + # pd_op.batch_norm_: (1x192x128x128xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (1x192x128x128xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__666, + batch_norm__667, + batch_norm__668, + batch_norm__669, + batch_norm__670, + batch_norm__671, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_115, + parameter_133, + parameter_132, + parameter_131, + parameter_130, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_130, parameter_131, parameter_132, parameter_133 + + # pd_op.swish: (1x192x128x128xf32) <- (1x192x128x128xf32) + swish_85 = paddle._C_ops.swish(batch_norm__666) + + # pd_op.conv2d: (1x192x64x64xf32) <- (1x192x128x128xf32, 192x192x3x3xf32) + conv2d_116 = paddle._C_ops.conv2d( + swish_85, parameter_129, [2, 2], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_129 + + # pd_op.batch_norm_: (1x192x64x64xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (1x192x64x64xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__672, + batch_norm__673, + batch_norm__674, + batch_norm__675, + batch_norm__676, + batch_norm__677, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_116, + parameter_128, + parameter_127, + parameter_126, + parameter_125, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_125, parameter_126, parameter_127, parameter_128 + + # pd_op.swish: (1x192x64x64xf32) <- (1x192x64x64xf32) + swish_86 = paddle._C_ops.swish(batch_norm__672) + + # builtin.combine: ([1x192x64x64xf32, 1x384x64x64xf32]) <- (1x192x64x64xf32, 1x384x64x64xf32) + combine_10 = [swish_86, swish_75] + + # pd_op.concat: (1x576x64x64xf32) <- ([1x192x64x64xf32, 1x384x64x64xf32], 1xi32) + concat_10 = paddle._C_ops.concat(combine_10, full_0) + del combine_10 + + # pd_op.conv2d: (1x192x64x64xf32) <- (1x576x64x64xf32, 192x576x1x1xf32) + conv2d_117 = paddle._C_ops.conv2d( + concat_10, parameter_124, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_124 + + # pd_op.batch_norm_: (1x192x64x64xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (1x192x64x64xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__678, + batch_norm__679, + batch_norm__680, + batch_norm__681, + batch_norm__682, + batch_norm__683, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_117, + parameter_123, + parameter_122, + parameter_121, + parameter_120, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_120, parameter_121, parameter_122, parameter_123 + + # pd_op.swish: (1x192x64x64xf32) <- (1x192x64x64xf32) + swish_87 = paddle._C_ops.swish(batch_norm__678) + + # pd_op.conv2d: (1x192x64x64xf32) <- (1x576x64x64xf32, 192x576x1x1xf32) + conv2d_118 = paddle._C_ops.conv2d( + concat_10, parameter_119, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_119 + + # pd_op.batch_norm_: (1x192x64x64xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (1x192x64x64xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__684, + batch_norm__685, + batch_norm__686, + batch_norm__687, + batch_norm__688, + batch_norm__689, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_118, + parameter_118, + parameter_117, + parameter_116, + parameter_115, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_115, parameter_116, parameter_117, parameter_118 + + # pd_op.swish: (1x192x64x64xf32) <- (1x192x64x64xf32) + swish_88 = paddle._C_ops.swish(batch_norm__684) + + # pd_op.conv2d: (1x192x64x64xf32) <- (1x192x64x64xf32, 192x192x3x3xf32) + conv2d_119 = paddle._C_ops.conv2d( + swish_88, parameter_114, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_114 + + # pd_op.batch_norm_: (1x192x64x64xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (1x192x64x64xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__690, + batch_norm__691, + batch_norm__692, + batch_norm__693, + batch_norm__694, + batch_norm__695, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_119, + parameter_113, + parameter_112, + parameter_111, + parameter_110, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_110, parameter_111, parameter_112, parameter_113 + + # pd_op.swish: (1x192x64x64xf32) <- (1x192x64x64xf32) + swish_89 = paddle._C_ops.swish(batch_norm__690) + + # pd_op.conv2d: (1x192x64x64xf32) <- (1x192x64x64xf32, 192x192x3x3xf32) + conv2d_120 = paddle._C_ops.conv2d( + swish_89, parameter_109, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_109 + + # pd_op.batch_norm_: (1x192x64x64xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (1x192x64x64xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__696, + batch_norm__697, + batch_norm__698, + batch_norm__699, + batch_norm__700, + batch_norm__701, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_120, + parameter_108, + parameter_107, + parameter_106, + parameter_105, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_105, parameter_106, parameter_107, parameter_108 + + # pd_op.conv2d: (1x192x64x64xf32) <- (1x192x64x64xf32, 192x192x1x1xf32) + conv2d_121 = paddle._C_ops.conv2d( + swish_89, parameter_104, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_104 + + # pd_op.batch_norm_: (1x192x64x64xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (1x192x64x64xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__702, + batch_norm__703, + batch_norm__704, + batch_norm__705, + batch_norm__706, + batch_norm__707, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_121, + parameter_103, + parameter_102, + parameter_101, + parameter_100, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_100, parameter_101, parameter_102, parameter_103 + + # pd_op.multiply: (1x192x64x64xf32) <- (1xf32, 1x192x64x64xf32) + multiply_31 = paddle._C_ops.multiply(data_27, batch_norm__702) + del data_27 + + # pd_op.add: (1x192x64x64xf32) <- (1x192x64x64xf32, 1x192x64x64xf32) + add_49 = paddle._C_ops.add(batch_norm__696, multiply_31) + + # pd_op.swish: (1x192x64x64xf32) <- (1x192x64x64xf32) + swish_90 = paddle._C_ops.swish(add_49) + + # pd_op.conv2d: (1x192x64x64xf32) <- (1x192x64x64xf32, 192x192x3x3xf32) + conv2d_122 = paddle._C_ops.conv2d( + swish_90, parameter_99, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_99 + + # pd_op.batch_norm_: (1x192x64x64xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (1x192x64x64xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__708, + batch_norm__709, + batch_norm__710, + batch_norm__711, + batch_norm__712, + batch_norm__713, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_122, + parameter_98, + parameter_97, + parameter_96, + parameter_95, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_95, parameter_96, parameter_97, parameter_98 + + # pd_op.swish: (1x192x64x64xf32) <- (1x192x64x64xf32) + swish_91 = paddle._C_ops.swish(batch_norm__708) + + # pd_op.conv2d: (1x192x64x64xf32) <- (1x192x64x64xf32, 192x192x3x3xf32) + conv2d_123 = paddle._C_ops.conv2d( + swish_91, parameter_94, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_94 + + # pd_op.batch_norm_: (1x192x64x64xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (1x192x64x64xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__714, + batch_norm__715, + batch_norm__716, + batch_norm__717, + batch_norm__718, + batch_norm__719, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_123, + parameter_93, + parameter_92, + parameter_91, + parameter_90, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_90, parameter_91, parameter_92, parameter_93 + + # pd_op.conv2d: (1x192x64x64xf32) <- (1x192x64x64xf32, 192x192x1x1xf32) + conv2d_124 = paddle._C_ops.conv2d( + swish_91, parameter_89, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_89 + + # pd_op.batch_norm_: (1x192x64x64xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (1x192x64x64xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__720, + batch_norm__721, + batch_norm__722, + batch_norm__723, + batch_norm__724, + batch_norm__725, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_124, + parameter_88, + parameter_87, + parameter_86, + parameter_85, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_85, parameter_86, parameter_87, parameter_88 + + # pd_op.multiply: (1x192x64x64xf32) <- (1xf32, 1x192x64x64xf32) + multiply_32 = paddle._C_ops.multiply(data_28, batch_norm__720) + del data_28 + + # pd_op.add: (1x192x64x64xf32) <- (1x192x64x64xf32, 1x192x64x64xf32) + add_50 = paddle._C_ops.add(batch_norm__714, multiply_32) + + # pd_op.swish: (1x192x64x64xf32) <- (1x192x64x64xf32) + swish_92 = paddle._C_ops.swish(add_50) + + # pd_op.conv2d: (1x192x64x64xf32) <- (1x192x64x64xf32, 192x192x3x3xf32) + conv2d_125 = paddle._C_ops.conv2d( + swish_92, parameter_84, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_84 + + # pd_op.batch_norm_: (1x192x64x64xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (1x192x64x64xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__726, + batch_norm__727, + batch_norm__728, + batch_norm__729, + batch_norm__730, + batch_norm__731, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_125, + parameter_83, + parameter_82, + parameter_81, + parameter_80, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_80, parameter_81, parameter_82, parameter_83 + + # pd_op.swish: (1x192x64x64xf32) <- (1x192x64x64xf32) + swish_93 = paddle._C_ops.swish(batch_norm__726) + + # pd_op.conv2d: (1x192x64x64xf32) <- (1x192x64x64xf32, 192x192x3x3xf32) + conv2d_126 = paddle._C_ops.conv2d( + swish_93, parameter_79, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_79 + + # pd_op.batch_norm_: (1x192x64x64xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (1x192x64x64xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__732, + batch_norm__733, + batch_norm__734, + batch_norm__735, + batch_norm__736, + batch_norm__737, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_126, + parameter_78, + parameter_77, + parameter_76, + parameter_75, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_75, parameter_76, parameter_77, parameter_78 + + # pd_op.conv2d: (1x192x64x64xf32) <- (1x192x64x64xf32, 192x192x1x1xf32) + conv2d_127 = paddle._C_ops.conv2d( + swish_93, parameter_74, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_74 + + # pd_op.batch_norm_: (1x192x64x64xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (1x192x64x64xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__738, + batch_norm__739, + batch_norm__740, + batch_norm__741, + batch_norm__742, + batch_norm__743, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_127, + parameter_73, + parameter_72, + parameter_71, + parameter_70, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_70, parameter_71, parameter_72, parameter_73 + + # pd_op.multiply: (1x192x64x64xf32) <- (1xf32, 1x192x64x64xf32) + multiply_33 = paddle._C_ops.multiply(data_29, batch_norm__738) + del data_29 + + # pd_op.add: (1x192x64x64xf32) <- (1x192x64x64xf32, 1x192x64x64xf32) + add_51 = paddle._C_ops.add(batch_norm__732, multiply_33) + + # pd_op.swish: (1x192x64x64xf32) <- (1x192x64x64xf32) + swish_94 = paddle._C_ops.swish(add_51) + + # builtin.combine: ([1x192x64x64xf32, 1x192x64x64xf32]) <- (1x192x64x64xf32, 1x192x64x64xf32) + combine_11 = [swish_87, swish_94] + + # pd_op.concat: (1x384x64x64xf32) <- ([1x192x64x64xf32, 1x192x64x64xf32], 1xi32) + concat_11 = paddle._C_ops.concat(combine_11, full_0) + del combine_11 + + # pd_op.conv2d: (1x384x64x64xf32) <- (1x384x64x64xf32, 384x384x1x1xf32) + conv2d_128 = paddle._C_ops.conv2d( + concat_11, parameter_69, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_69 + + # pd_op.batch_norm_: (1x384x64x64xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (1x384x64x64xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__744, + batch_norm__745, + batch_norm__746, + batch_norm__747, + batch_norm__748, + batch_norm__749, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_128, + parameter_68, + parameter_67, + parameter_66, + parameter_65, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_65, parameter_66, parameter_67, parameter_68 + + # pd_op.swish: (1x384x64x64xf32) <- (1x384x64x64xf32) + swish_95 = paddle._C_ops.swish(batch_norm__744) + + # pd_op.conv2d: (1x384x32x32xf32) <- (1x384x64x64xf32, 384x384x3x3xf32) + conv2d_129 = paddle._C_ops.conv2d( + swish_95, parameter_64, [2, 2], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_64 + + # pd_op.batch_norm_: (1x384x32x32xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (1x384x32x32xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__750, + batch_norm__751, + batch_norm__752, + batch_norm__753, + batch_norm__754, + batch_norm__755, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_129, + parameter_63, + parameter_62, + parameter_61, + parameter_60, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_60, parameter_61, parameter_62, parameter_63 + + # pd_op.swish: (1x384x32x32xf32) <- (1x384x32x32xf32) + swish_96 = paddle._C_ops.swish(batch_norm__750) + + # builtin.combine: ([1x384x32x32xf32, 1x768x32x32xf32]) <- (1x384x32x32xf32, 1x768x32x32xf32) + combine_12 = [swish_96, swish_65] + + # pd_op.concat: (1x1152x32x32xf32) <- ([1x384x32x32xf32, 1x768x32x32xf32], 1xi32) + concat_12 = paddle._C_ops.concat(combine_12, full_0) + del combine_12 + + # pd_op.conv2d: (1x384x32x32xf32) <- (1x1152x32x32xf32, 384x1152x1x1xf32) + conv2d_130 = paddle._C_ops.conv2d( + concat_12, parameter_59, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_59 + + # pd_op.batch_norm_: (1x384x32x32xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (1x384x32x32xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__756, + batch_norm__757, + batch_norm__758, + batch_norm__759, + batch_norm__760, + batch_norm__761, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_130, + parameter_58, + parameter_57, + parameter_56, + parameter_55, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_55, parameter_56, parameter_57, parameter_58 + + # pd_op.swish: (1x384x32x32xf32) <- (1x384x32x32xf32) + swish_97 = paddle._C_ops.swish(batch_norm__756) + + # pd_op.conv2d: (1x384x32x32xf32) <- (1x1152x32x32xf32, 384x1152x1x1xf32) + conv2d_131 = paddle._C_ops.conv2d( + concat_12, parameter_54, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_54 + + # pd_op.batch_norm_: (1x384x32x32xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (1x384x32x32xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__762, + batch_norm__763, + batch_norm__764, + batch_norm__765, + batch_norm__766, + batch_norm__767, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_131, + parameter_53, + parameter_52, + parameter_51, + parameter_50, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_50, parameter_51, parameter_52, parameter_53 + + # pd_op.swish: (1x384x32x32xf32) <- (1x384x32x32xf32) + swish_98 = paddle._C_ops.swish(batch_norm__762) + + # pd_op.conv2d: (1x384x32x32xf32) <- (1x384x32x32xf32, 384x384x3x3xf32) + conv2d_132 = paddle._C_ops.conv2d( + swish_98, parameter_49, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_49 + + # pd_op.batch_norm_: (1x384x32x32xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (1x384x32x32xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__768, + batch_norm__769, + batch_norm__770, + batch_norm__771, + batch_norm__772, + batch_norm__773, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_132, + parameter_48, + parameter_47, + parameter_46, + parameter_45, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_45, parameter_46, parameter_47, parameter_48 + + # pd_op.swish: (1x384x32x32xf32) <- (1x384x32x32xf32) + swish_99 = paddle._C_ops.swish(batch_norm__768) + + # pd_op.conv2d: (1x384x32x32xf32) <- (1x384x32x32xf32, 384x384x3x3xf32) + conv2d_133 = paddle._C_ops.conv2d( + swish_99, parameter_44, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_44 + + # pd_op.batch_norm_: (1x384x32x32xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (1x384x32x32xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__774, + batch_norm__775, + batch_norm__776, + batch_norm__777, + batch_norm__778, + batch_norm__779, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_133, + parameter_43, + parameter_42, + parameter_41, + parameter_40, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_40, parameter_41, parameter_42, parameter_43 + + # pd_op.conv2d: (1x384x32x32xf32) <- (1x384x32x32xf32, 384x384x1x1xf32) + conv2d_134 = paddle._C_ops.conv2d( + swish_99, parameter_39, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_39 + + # pd_op.batch_norm_: (1x384x32x32xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (1x384x32x32xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__780, + batch_norm__781, + batch_norm__782, + batch_norm__783, + batch_norm__784, + batch_norm__785, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_134, + parameter_38, + parameter_37, + parameter_36, + parameter_35, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_35, parameter_36, parameter_37, parameter_38 + + # pd_op.multiply: (1x384x32x32xf32) <- (1xf32, 1x384x32x32xf32) + multiply_34 = paddle._C_ops.multiply(data_30, batch_norm__780) + del data_30 + + # pd_op.add: (1x384x32x32xf32) <- (1x384x32x32xf32, 1x384x32x32xf32) + add_52 = paddle._C_ops.add(batch_norm__774, multiply_34) + + # pd_op.swish: (1x384x32x32xf32) <- (1x384x32x32xf32) + swish_100 = paddle._C_ops.swish(add_52) + + # pd_op.conv2d: (1x384x32x32xf32) <- (1x384x32x32xf32, 384x384x3x3xf32) + conv2d_135 = paddle._C_ops.conv2d( + swish_100, parameter_34, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_34 + + # pd_op.batch_norm_: (1x384x32x32xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (1x384x32x32xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__786, + batch_norm__787, + batch_norm__788, + batch_norm__789, + batch_norm__790, + batch_norm__791, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_135, + parameter_33, + parameter_32, + parameter_31, + parameter_30, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_30, parameter_31, parameter_32, parameter_33 + + # pd_op.swish: (1x384x32x32xf32) <- (1x384x32x32xf32) + swish_101 = paddle._C_ops.swish(batch_norm__786) + + # pd_op.conv2d: (1x384x32x32xf32) <- (1x384x32x32xf32, 384x384x3x3xf32) + conv2d_136 = paddle._C_ops.conv2d( + swish_101, parameter_29, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_29 + + # pd_op.batch_norm_: (1x384x32x32xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (1x384x32x32xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__792, + batch_norm__793, + batch_norm__794, + batch_norm__795, + batch_norm__796, + batch_norm__797, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_136, + parameter_28, + parameter_27, + parameter_26, + parameter_25, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_25, parameter_26, parameter_27, parameter_28 + + # pd_op.conv2d: (1x384x32x32xf32) <- (1x384x32x32xf32, 384x384x1x1xf32) + conv2d_137 = paddle._C_ops.conv2d( + swish_101, parameter_24, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_24 + + # pd_op.batch_norm_: (1x384x32x32xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (1x384x32x32xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__798, + batch_norm__799, + batch_norm__800, + batch_norm__801, + batch_norm__802, + batch_norm__803, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_137, + parameter_23, + parameter_22, + parameter_21, + parameter_20, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_20, parameter_21, parameter_22, parameter_23 + + # pd_op.multiply: (1x384x32x32xf32) <- (1xf32, 1x384x32x32xf32) + multiply_35 = paddle._C_ops.multiply(data_31, batch_norm__798) + del data_31 + + # pd_op.add: (1x384x32x32xf32) <- (1x384x32x32xf32, 1x384x32x32xf32) + add_53 = paddle._C_ops.add(batch_norm__792, multiply_35) + + # pd_op.swish: (1x384x32x32xf32) <- (1x384x32x32xf32) + swish_102 = paddle._C_ops.swish(add_53) + + # pd_op.conv2d: (1x384x32x32xf32) <- (1x384x32x32xf32, 384x384x3x3xf32) + conv2d_138 = paddle._C_ops.conv2d( + swish_102, parameter_19, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_19 + + # pd_op.batch_norm_: (1x384x32x32xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (1x384x32x32xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__804, + batch_norm__805, + batch_norm__806, + batch_norm__807, + batch_norm__808, + batch_norm__809, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_138, + parameter_18, + parameter_17, + parameter_16, + parameter_15, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_15, parameter_16, parameter_17, parameter_18 + + # pd_op.swish: (1x384x32x32xf32) <- (1x384x32x32xf32) + swish_103 = paddle._C_ops.swish(batch_norm__804) + + # pd_op.conv2d: (1x384x32x32xf32) <- (1x384x32x32xf32, 384x384x3x3xf32) + conv2d_139 = paddle._C_ops.conv2d( + swish_103, parameter_14, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_14 + + # pd_op.batch_norm_: (1x384x32x32xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (1x384x32x32xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__810, + batch_norm__811, + batch_norm__812, + batch_norm__813, + batch_norm__814, + batch_norm__815, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_139, + parameter_13, + parameter_12, + parameter_11, + parameter_10, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_10, parameter_11, parameter_12, parameter_13 + + # pd_op.conv2d: (1x384x32x32xf32) <- (1x384x32x32xf32, 384x384x1x1xf32) + conv2d_140 = paddle._C_ops.conv2d( + swish_103, parameter_9, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_9 + + # pd_op.batch_norm_: (1x384x32x32xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (1x384x32x32xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__816, + batch_norm__817, + batch_norm__818, + batch_norm__819, + batch_norm__820, + batch_norm__821, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_140, + parameter_8, + parameter_7, + parameter_6, + parameter_5, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_5, parameter_6, parameter_7, parameter_8 + + # pd_op.multiply: (1x384x32x32xf32) <- (1xf32, 1x384x32x32xf32) + multiply_36 = paddle._C_ops.multiply(data_32, batch_norm__816) + del data_32 + + # pd_op.add: (1x384x32x32xf32) <- (1x384x32x32xf32, 1x384x32x32xf32) + add_54 = paddle._C_ops.add(batch_norm__810, multiply_36) + + # pd_op.swish: (1x384x32x32xf32) <- (1x384x32x32xf32) + swish_104 = paddle._C_ops.swish(add_54) + + # builtin.combine: ([1x384x32x32xf32, 1x384x32x32xf32]) <- (1x384x32x32xf32, 1x384x32x32xf32) + combine_13 = [swish_97, swish_104] + + # pd_op.concat: (1x768x32x32xf32) <- ([1x384x32x32xf32, 1x384x32x32xf32], 1xi32) + concat_13 = paddle._C_ops.concat(combine_13, full_0) + del combine_13 + + # pd_op.conv2d: (1x768x32x32xf32) <- (1x768x32x32xf32, 768x768x1x1xf32) + conv2d_141 = paddle._C_ops.conv2d( + concat_13, parameter_4, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_4 + + # pd_op.batch_norm_: (1x768x32x32xf32, 768xf32, 768xf32, 768xf32, 768xf32, -1xui8) <- (1x768x32x32xf32, 768xf32, 768xf32, 768xf32, 768xf32) + ( + batch_norm__822, + batch_norm__823, + batch_norm__824, + batch_norm__825, + batch_norm__826, + batch_norm__827, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_141, + parameter_3, + parameter_2, + parameter_1, + parameter_0, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_0, parameter_1, parameter_2, parameter_3 + + # pd_op.swish: (1x768x32x32xf32) <- (1x768x32x32xf32) + swish_0 = paddle._C_ops.swish(batch_norm__822) + del ( + add_0, + add_1, + add_10, + add_11, + add_12, + add_13, + add_14, + add_15, + add_16, + add_17, + add_18, + add_2, + add_20, + add_21, + add_22, + add_23, + add_24, + add_25, + add_26, + add_27, + add_28, + add_29, + add_3, + add_30, + add_31, + add_33, + add_34, + add_35, + add_36, + add_37, + add_38, + add_4, + add_40, + add_41, + add_42, + add_43, + add_44, + add_45, + add_46, + add_47, + add_48, + add_49, + add_5, + add_50, + add_51, + add_52, + add_53, + add_54, + add_7, + add_8, + add_9, + assign_0, + assign_1, + assign_10, + assign_11, + assign_12, + assign_13, + assign_14, + assign_15, + assign_2, + assign_3, + assign_4, + assign_5, + assign_6, + assign_7, + assign_8, + assign_9, + batch_norm__0, + batch_norm__1, + batch_norm__10, + batch_norm__100, + batch_norm__101, + batch_norm__102, + batch_norm__103, + batch_norm__104, + batch_norm__105, + batch_norm__106, + batch_norm__107, + batch_norm__108, + batch_norm__109, + batch_norm__11, + batch_norm__110, + batch_norm__111, + batch_norm__112, + batch_norm__113, + batch_norm__114, + batch_norm__115, + batch_norm__116, + batch_norm__117, + batch_norm__118, + batch_norm__119, + batch_norm__12, + batch_norm__120, + batch_norm__121, + batch_norm__122, + batch_norm__123, + batch_norm__124, + batch_norm__125, + batch_norm__126, + batch_norm__127, + batch_norm__128, + batch_norm__129, + batch_norm__13, + batch_norm__130, + batch_norm__131, + batch_norm__132, + batch_norm__133, + batch_norm__134, + batch_norm__135, + batch_norm__136, + batch_norm__137, + batch_norm__138, + batch_norm__139, + batch_norm__14, + batch_norm__140, + batch_norm__141, + batch_norm__142, + batch_norm__143, + batch_norm__144, + batch_norm__145, + batch_norm__146, + batch_norm__147, + batch_norm__148, + batch_norm__149, + batch_norm__15, + batch_norm__150, + batch_norm__151, + batch_norm__152, + batch_norm__153, + batch_norm__154, + batch_norm__155, + batch_norm__156, + batch_norm__157, + batch_norm__158, + batch_norm__159, + batch_norm__16, + batch_norm__160, + batch_norm__161, + batch_norm__162, + batch_norm__163, + batch_norm__164, + batch_norm__165, + batch_norm__166, + batch_norm__167, + batch_norm__168, + batch_norm__169, + batch_norm__17, + batch_norm__170, + batch_norm__171, + batch_norm__172, + batch_norm__173, + batch_norm__174, + batch_norm__175, + batch_norm__176, + batch_norm__177, + batch_norm__178, + batch_norm__179, + batch_norm__18, + batch_norm__180, + batch_norm__181, + batch_norm__182, + batch_norm__183, + batch_norm__184, + batch_norm__185, + batch_norm__186, + batch_norm__187, + batch_norm__188, + batch_norm__189, + batch_norm__19, + batch_norm__190, + batch_norm__191, + batch_norm__192, + batch_norm__193, + batch_norm__194, + batch_norm__195, + batch_norm__196, + batch_norm__197, + batch_norm__198, + batch_norm__199, + batch_norm__2, + batch_norm__20, + batch_norm__200, + batch_norm__201, + batch_norm__202, + batch_norm__203, + batch_norm__204, + batch_norm__205, + batch_norm__206, + batch_norm__207, + batch_norm__208, + batch_norm__209, + batch_norm__21, + batch_norm__210, + batch_norm__211, + batch_norm__212, + batch_norm__213, + batch_norm__214, + batch_norm__215, + batch_norm__216, + batch_norm__217, + batch_norm__218, + batch_norm__219, + batch_norm__22, + batch_norm__220, + batch_norm__221, + batch_norm__222, + batch_norm__223, + batch_norm__224, + batch_norm__225, + batch_norm__226, + batch_norm__227, + batch_norm__228, + batch_norm__229, + batch_norm__23, + batch_norm__230, + batch_norm__231, + batch_norm__232, + batch_norm__233, + batch_norm__234, + batch_norm__235, + batch_norm__236, + batch_norm__237, + batch_norm__238, + batch_norm__239, + batch_norm__24, + batch_norm__240, + batch_norm__241, + batch_norm__242, + batch_norm__243, + batch_norm__244, + batch_norm__245, + batch_norm__246, + batch_norm__247, + batch_norm__248, + batch_norm__249, + batch_norm__25, + batch_norm__250, + batch_norm__251, + batch_norm__252, + batch_norm__253, + batch_norm__254, + batch_norm__255, + batch_norm__256, + batch_norm__257, + batch_norm__258, + batch_norm__259, + batch_norm__26, + batch_norm__260, + batch_norm__261, + batch_norm__262, + batch_norm__263, + batch_norm__264, + batch_norm__265, + batch_norm__266, + batch_norm__267, + batch_norm__268, + batch_norm__269, + batch_norm__27, + batch_norm__270, + batch_norm__271, + batch_norm__272, + batch_norm__273, + batch_norm__274, + batch_norm__275, + batch_norm__276, + batch_norm__277, + batch_norm__278, + batch_norm__279, + batch_norm__28, + batch_norm__280, + batch_norm__281, + batch_norm__282, + batch_norm__283, + batch_norm__284, + batch_norm__285, + batch_norm__286, + batch_norm__287, + batch_norm__288, + batch_norm__289, + batch_norm__29, + batch_norm__290, + batch_norm__291, + batch_norm__292, + batch_norm__293, + batch_norm__294, + batch_norm__295, + batch_norm__296, + batch_norm__297, + batch_norm__298, + batch_norm__299, + batch_norm__3, + batch_norm__30, + batch_norm__300, + batch_norm__301, + batch_norm__302, + batch_norm__303, + batch_norm__304, + batch_norm__305, + batch_norm__306, + batch_norm__307, + batch_norm__308, + batch_norm__309, + batch_norm__31, + batch_norm__310, + batch_norm__311, + batch_norm__312, + batch_norm__313, + batch_norm__314, + batch_norm__315, + batch_norm__316, + batch_norm__317, + batch_norm__318, + batch_norm__319, + batch_norm__32, + batch_norm__320, + batch_norm__321, + batch_norm__322, + batch_norm__323, + batch_norm__324, + batch_norm__325, + batch_norm__326, + batch_norm__327, + batch_norm__328, + batch_norm__329, + batch_norm__33, + batch_norm__330, + batch_norm__331, + batch_norm__332, + batch_norm__333, + batch_norm__334, + batch_norm__335, + batch_norm__336, + batch_norm__337, + batch_norm__338, + batch_norm__339, + batch_norm__34, + batch_norm__340, + batch_norm__341, + batch_norm__342, + batch_norm__343, + batch_norm__344, + batch_norm__345, + batch_norm__346, + batch_norm__347, + batch_norm__348, + batch_norm__349, + batch_norm__35, + batch_norm__350, + batch_norm__351, + batch_norm__352, + batch_norm__353, + batch_norm__354, + batch_norm__355, + batch_norm__356, + batch_norm__357, + batch_norm__358, + batch_norm__359, + batch_norm__36, + batch_norm__360, + batch_norm__361, + batch_norm__362, + batch_norm__363, + batch_norm__364, + batch_norm__365, + batch_norm__366, + batch_norm__367, + batch_norm__368, + batch_norm__369, + batch_norm__37, + batch_norm__370, + batch_norm__371, + batch_norm__372, + batch_norm__373, + batch_norm__374, + batch_norm__375, + batch_norm__376, + batch_norm__377, + batch_norm__378, + batch_norm__379, + batch_norm__38, + batch_norm__380, + batch_norm__381, + batch_norm__382, + batch_norm__383, + batch_norm__384, + batch_norm__385, + batch_norm__386, + batch_norm__387, + batch_norm__388, + batch_norm__389, + batch_norm__39, + batch_norm__390, + batch_norm__391, + batch_norm__392, + batch_norm__393, + batch_norm__394, + batch_norm__395, + batch_norm__396, + batch_norm__397, + batch_norm__398, + batch_norm__399, + batch_norm__4, + batch_norm__40, + batch_norm__400, + batch_norm__401, + batch_norm__402, + batch_norm__403, + batch_norm__404, + batch_norm__405, + batch_norm__406, + batch_norm__407, + batch_norm__408, + batch_norm__409, + batch_norm__41, + batch_norm__410, + batch_norm__411, + batch_norm__412, + batch_norm__413, + batch_norm__414, + batch_norm__415, + batch_norm__416, + batch_norm__417, + batch_norm__418, + batch_norm__419, + batch_norm__42, + batch_norm__420, + batch_norm__421, + batch_norm__422, + batch_norm__423, + batch_norm__424, + batch_norm__425, + batch_norm__426, + batch_norm__427, + batch_norm__428, + batch_norm__429, + batch_norm__43, + batch_norm__430, + batch_norm__431, + batch_norm__432, + batch_norm__433, + batch_norm__434, + batch_norm__435, + batch_norm__436, + batch_norm__437, + batch_norm__438, + batch_norm__439, + batch_norm__44, + batch_norm__440, + batch_norm__441, + batch_norm__442, + batch_norm__443, + batch_norm__444, + batch_norm__445, + batch_norm__446, + batch_norm__447, + batch_norm__448, + batch_norm__449, + batch_norm__45, + batch_norm__450, + batch_norm__451, + batch_norm__452, + batch_norm__453, + batch_norm__454, + batch_norm__455, + batch_norm__456, + batch_norm__457, + batch_norm__458, + batch_norm__459, + batch_norm__46, + batch_norm__460, + batch_norm__461, + batch_norm__462, + batch_norm__463, + batch_norm__464, + batch_norm__465, + batch_norm__466, + batch_norm__467, + batch_norm__468, + batch_norm__469, + batch_norm__47, + batch_norm__470, + batch_norm__471, + batch_norm__472, + batch_norm__473, + batch_norm__474, + batch_norm__475, + batch_norm__476, + batch_norm__477, + batch_norm__478, + batch_norm__479, + batch_norm__48, + batch_norm__480, + batch_norm__481, + batch_norm__482, + batch_norm__483, + batch_norm__484, + batch_norm__485, + batch_norm__486, + batch_norm__487, + batch_norm__488, + batch_norm__489, + batch_norm__49, + batch_norm__490, + batch_norm__491, + batch_norm__492, + batch_norm__493, + batch_norm__494, + batch_norm__495, + batch_norm__496, + batch_norm__497, + batch_norm__498, + batch_norm__499, + batch_norm__5, + batch_norm__50, + batch_norm__500, + batch_norm__501, + batch_norm__502, + batch_norm__503, + batch_norm__504, + batch_norm__505, + batch_norm__506, + batch_norm__507, + batch_norm__508, + batch_norm__509, + batch_norm__51, + batch_norm__510, + batch_norm__511, + batch_norm__512, + batch_norm__513, + batch_norm__514, + batch_norm__515, + batch_norm__516, + batch_norm__517, + batch_norm__518, + batch_norm__519, + batch_norm__52, + batch_norm__520, + batch_norm__521, + batch_norm__522, + batch_norm__523, + batch_norm__524, + batch_norm__525, + batch_norm__526, + batch_norm__527, + batch_norm__528, + batch_norm__529, + batch_norm__53, + batch_norm__530, + batch_norm__531, + batch_norm__532, + batch_norm__533, + batch_norm__534, + batch_norm__535, + batch_norm__536, + batch_norm__537, + batch_norm__538, + batch_norm__539, + batch_norm__54, + batch_norm__540, + batch_norm__541, + batch_norm__542, + batch_norm__543, + batch_norm__544, + batch_norm__545, + batch_norm__546, + batch_norm__547, + batch_norm__548, + batch_norm__549, + batch_norm__55, + batch_norm__550, + batch_norm__551, + batch_norm__552, + batch_norm__553, + batch_norm__554, + batch_norm__555, + batch_norm__556, + batch_norm__557, + batch_norm__558, + batch_norm__559, + batch_norm__56, + batch_norm__560, + batch_norm__561, + batch_norm__562, + batch_norm__563, + batch_norm__564, + batch_norm__565, + batch_norm__566, + batch_norm__567, + batch_norm__568, + batch_norm__569, + batch_norm__57, + batch_norm__570, + batch_norm__571, + batch_norm__572, + batch_norm__573, + batch_norm__574, + batch_norm__575, + batch_norm__576, + batch_norm__577, + batch_norm__578, + batch_norm__579, + batch_norm__58, + batch_norm__580, + batch_norm__581, + batch_norm__582, + batch_norm__583, + batch_norm__584, + batch_norm__585, + batch_norm__586, + batch_norm__587, + batch_norm__588, + batch_norm__589, + batch_norm__59, + batch_norm__590, + batch_norm__591, + batch_norm__592, + batch_norm__593, + batch_norm__594, + batch_norm__595, + batch_norm__596, + batch_norm__597, + batch_norm__598, + batch_norm__599, + batch_norm__6, + batch_norm__60, + batch_norm__600, + batch_norm__601, + batch_norm__602, + batch_norm__603, + batch_norm__604, + batch_norm__605, + batch_norm__606, + batch_norm__607, + batch_norm__608, + batch_norm__609, + batch_norm__61, + batch_norm__610, + batch_norm__611, + batch_norm__612, + batch_norm__613, + batch_norm__614, + batch_norm__615, + batch_norm__616, + batch_norm__617, + batch_norm__618, + batch_norm__619, + batch_norm__62, + batch_norm__620, + batch_norm__621, + batch_norm__622, + batch_norm__623, + batch_norm__624, + batch_norm__625, + batch_norm__626, + batch_norm__627, + batch_norm__628, + batch_norm__629, + batch_norm__63, + batch_norm__630, + batch_norm__631, + batch_norm__632, + batch_norm__633, + batch_norm__634, + batch_norm__635, + batch_norm__636, + batch_norm__637, + batch_norm__638, + batch_norm__639, + batch_norm__64, + batch_norm__640, + batch_norm__641, + batch_norm__642, + batch_norm__643, + batch_norm__644, + batch_norm__645, + batch_norm__646, + batch_norm__647, + batch_norm__648, + batch_norm__649, + batch_norm__65, + batch_norm__650, + batch_norm__651, + batch_norm__652, + batch_norm__653, + batch_norm__654, + batch_norm__655, + batch_norm__656, + batch_norm__657, + batch_norm__658, + batch_norm__659, + batch_norm__66, + batch_norm__660, + batch_norm__661, + batch_norm__662, + batch_norm__663, + batch_norm__664, + batch_norm__665, + batch_norm__666, + batch_norm__667, + batch_norm__668, + batch_norm__669, + batch_norm__67, + batch_norm__670, + batch_norm__671, + batch_norm__672, + batch_norm__673, + batch_norm__674, + batch_norm__675, + batch_norm__676, + batch_norm__677, + batch_norm__678, + batch_norm__679, + batch_norm__68, + batch_norm__680, + batch_norm__681, + batch_norm__682, + batch_norm__683, + batch_norm__684, + batch_norm__685, + batch_norm__686, + batch_norm__687, + batch_norm__688, + batch_norm__689, + batch_norm__69, + batch_norm__690, + batch_norm__691, + batch_norm__692, + batch_norm__693, + batch_norm__694, + batch_norm__695, + batch_norm__696, + batch_norm__697, + batch_norm__698, + batch_norm__699, + batch_norm__7, + batch_norm__70, + batch_norm__700, + batch_norm__701, + batch_norm__702, + batch_norm__703, + batch_norm__704, + batch_norm__705, + batch_norm__706, + batch_norm__707, + batch_norm__708, + batch_norm__709, + batch_norm__71, + batch_norm__710, + batch_norm__711, + batch_norm__712, + batch_norm__713, + batch_norm__714, + batch_norm__715, + batch_norm__716, + batch_norm__717, + batch_norm__718, + batch_norm__719, + batch_norm__72, + batch_norm__720, + batch_norm__721, + batch_norm__722, + batch_norm__723, + batch_norm__724, + batch_norm__725, + batch_norm__726, + batch_norm__727, + batch_norm__728, + batch_norm__729, + batch_norm__73, + batch_norm__730, + batch_norm__731, + batch_norm__732, + batch_norm__733, + batch_norm__734, + batch_norm__735, + batch_norm__736, + batch_norm__737, + batch_norm__738, + batch_norm__739, + batch_norm__74, + batch_norm__740, + batch_norm__741, + batch_norm__742, + batch_norm__743, + batch_norm__744, + batch_norm__745, + batch_norm__746, + batch_norm__747, + batch_norm__748, + batch_norm__749, + batch_norm__75, + batch_norm__750, + batch_norm__751, + batch_norm__752, + batch_norm__753, + batch_norm__754, + batch_norm__755, + batch_norm__756, + batch_norm__757, + batch_norm__758, + batch_norm__759, + batch_norm__76, + batch_norm__760, + batch_norm__761, + batch_norm__762, + batch_norm__763, + batch_norm__764, + batch_norm__765, + batch_norm__766, + batch_norm__767, + batch_norm__768, + batch_norm__769, + batch_norm__77, + batch_norm__770, + batch_norm__771, + batch_norm__772, + batch_norm__773, + batch_norm__774, + batch_norm__775, + batch_norm__776, + batch_norm__777, + batch_norm__778, + batch_norm__779, + batch_norm__78, + batch_norm__780, + batch_norm__781, + batch_norm__782, + batch_norm__783, + batch_norm__784, + batch_norm__785, + batch_norm__786, + batch_norm__787, + batch_norm__788, + batch_norm__789, + batch_norm__79, + batch_norm__790, + batch_norm__791, + batch_norm__792, + batch_norm__793, + batch_norm__794, + batch_norm__795, + batch_norm__796, + batch_norm__797, + batch_norm__798, + batch_norm__799, + batch_norm__8, + batch_norm__80, + batch_norm__800, + batch_norm__801, + batch_norm__802, + batch_norm__803, + batch_norm__804, + batch_norm__805, + batch_norm__806, + batch_norm__807, + batch_norm__808, + batch_norm__809, + batch_norm__81, + batch_norm__810, + batch_norm__811, + batch_norm__812, + batch_norm__813, + batch_norm__814, + batch_norm__815, + batch_norm__816, + batch_norm__817, + batch_norm__818, + batch_norm__819, + batch_norm__82, + batch_norm__820, + batch_norm__821, + batch_norm__822, + batch_norm__823, + batch_norm__824, + batch_norm__825, + batch_norm__826, + batch_norm__827, + batch_norm__83, + batch_norm__84, + batch_norm__85, + batch_norm__86, + batch_norm__87, + batch_norm__88, + batch_norm__89, + batch_norm__9, + batch_norm__90, + batch_norm__91, + batch_norm__92, + batch_norm__93, + batch_norm__94, + batch_norm__95, + batch_norm__96, + batch_norm__97, + batch_norm__98, + batch_norm__99, + concat_0, + concat_1, + concat_10, + concat_11, + concat_12, + concat_13, + concat_2, + concat_3, + concat_4, + concat_5, + concat_6, + concat_7, + concat_8, + concat_9, + conv2d_0, + conv2d_1, + conv2d_10, + conv2d_100, + conv2d_101, + conv2d_102, + conv2d_103, + conv2d_104, + conv2d_105, + conv2d_106, + conv2d_107, + conv2d_108, + conv2d_109, + conv2d_11, + conv2d_110, + conv2d_111, + conv2d_112, + conv2d_113, + conv2d_114, + conv2d_115, + conv2d_116, + conv2d_117, + conv2d_118, + conv2d_119, + conv2d_12, + conv2d_120, + conv2d_121, + conv2d_122, + conv2d_123, + conv2d_124, + conv2d_125, + conv2d_126, + conv2d_127, + conv2d_128, + conv2d_129, + conv2d_13, + conv2d_130, + conv2d_131, + conv2d_132, + conv2d_133, + conv2d_134, + conv2d_135, + conv2d_136, + conv2d_137, + conv2d_138, + conv2d_139, + conv2d_14, + conv2d_140, + conv2d_141, + conv2d_15, + conv2d_16, + conv2d_17, + conv2d_18, + conv2d_19, + conv2d_2, + conv2d_20, + conv2d_21, + conv2d_22, + conv2d_23, + conv2d_24, + conv2d_25, + conv2d_26, + conv2d_27, + conv2d_28, + conv2d_29, + conv2d_3, + conv2d_30, + conv2d_31, + conv2d_32, + conv2d_33, + conv2d_34, + conv2d_35, + conv2d_36, + conv2d_37, + conv2d_38, + conv2d_39, + conv2d_4, + conv2d_40, + conv2d_41, + conv2d_42, + conv2d_43, + conv2d_44, + conv2d_45, + conv2d_46, + conv2d_47, + conv2d_48, + conv2d_49, + conv2d_5, + conv2d_50, + conv2d_51, + conv2d_52, + conv2d_53, + conv2d_54, + conv2d_55, + conv2d_56, + conv2d_57, + conv2d_58, + conv2d_59, + conv2d_6, + conv2d_60, + conv2d_61, + conv2d_62, + conv2d_63, + conv2d_64, + conv2d_65, + conv2d_66, + conv2d_67, + conv2d_68, + conv2d_69, + conv2d_7, + conv2d_70, + conv2d_71, + conv2d_72, + conv2d_73, + conv2d_74, + conv2d_75, + conv2d_76, + conv2d_77, + conv2d_78, + conv2d_79, + conv2d_8, + conv2d_80, + conv2d_81, + conv2d_82, + conv2d_83, + conv2d_84, + conv2d_85, + conv2d_86, + conv2d_87, + conv2d_88, + conv2d_89, + conv2d_9, + conv2d_90, + conv2d_91, + conv2d_92, + conv2d_93, + conv2d_94, + conv2d_95, + conv2d_96, + conv2d_97, + conv2d_98, + conv2d_99, + full_0, + full_int_array_0, + full_int_array_2, + full_int_array_3, + full_int_array_4, + hardsigmoid_0, + hardsigmoid_1, + hardsigmoid_2, + hardsigmoid_3, + mean_0, + mean_1, + mean_2, + mean_3, + multiply_0, + multiply_1, + multiply_10, + multiply_11, + multiply_12, + multiply_13, + multiply_14, + multiply_15, + multiply_16, + multiply_17, + multiply_18, + multiply_19, + multiply_2, + multiply_20, + multiply_21, + multiply_22, + multiply_23, + multiply_24, + multiply_25, + multiply_26, + multiply_27, + multiply_28, + multiply_29, + multiply_3, + multiply_30, + multiply_31, + multiply_32, + multiply_33, + multiply_34, + multiply_35, + multiply_36, + multiply_4, + multiply_5, + multiply_6, + multiply_7, + multiply_8, + multiply_9, + nearest_interp_0, + nearest_interp_1, + pool2d_0, + pool2d_1, + pool2d_2, + reshape_0, + reshape_1, + reshape_2, + reshape_3, + swish_1, + swish_10, + swish_100, + swish_101, + swish_102, + swish_103, + swish_104, + swish_11, + swish_12, + swish_13, + swish_14, + swish_15, + swish_16, + swish_17, + swish_18, + swish_19, + swish_2, + swish_20, + swish_21, + swish_22, + swish_23, + swish_24, + swish_25, + swish_26, + swish_27, + swish_28, + swish_29, + swish_3, + swish_30, + swish_31, + swish_32, + swish_33, + swish_34, + swish_35, + swish_36, + swish_37, + swish_38, + swish_39, + swish_4, + swish_40, + swish_41, + swish_42, + swish_43, + swish_44, + swish_45, + swish_46, + swish_47, + swish_48, + swish_49, + swish_5, + swish_50, + swish_51, + swish_52, + swish_53, + swish_54, + swish_55, + swish_56, + swish_57, + swish_58, + swish_59, + swish_6, + swish_60, + swish_61, + swish_62, + swish_63, + swish_64, + swish_65, + swish_66, + swish_67, + swish_68, + swish_69, + swish_7, + swish_70, + swish_71, + swish_72, + swish_73, + swish_74, + swish_75, + swish_76, + swish_77, + swish_78, + swish_79, + swish_8, + swish_80, + swish_81, + swish_82, + swish_83, + swish_84, + swish_85, + swish_86, + swish_87, + swish_88, + swish_89, + swish_9, + swish_90, + swish_91, + swish_92, + swish_93, + swish_94, + swish_95, + swish_96, + swish_97, + swish_98, + swish_99, + ) + + return swish_0 diff --git a/paddle_samples/PaddleX/PP-YOLOE-R-L/subgraph_3/weight_meta.py b/paddle_samples/PaddleX/PP-YOLOE-R-L/subgraph_3/weight_meta.py new file mode 100644 index 000000000..a6a19c8c2 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE-R-L/subgraph_3/weight_meta.py @@ -0,0 +1,6968 @@ +class Program_weight_tensor_parameter_0: + name = "parameter_0" + shape = [768] + dtype = "float32" + data = None + + +class Program_weight_tensor_parameter_1: + name = "parameter_1" + shape = [768] + dtype = "float32" + min_val = float("1.0") + max_val = float("1.0") + mean = float("1.0") + data = None + + +class Program_weight_tensor_parameter_2: + name = "parameter_2" + shape = [768] + dtype = "float32" + min_val = float("1.0") + max_val = float("1.0") + mean = float("1.0") + data = None + + +class Program_weight_tensor_parameter_3: + name = "parameter_3" + shape = [768] + dtype = "float32" + data = None + + +class Program_weight_tensor_parameter_4: + name = "parameter_4" + shape = [768, 768, 1, 1] + dtype = "float32" + min_val = float("-0.225002") + max_val = float("0.227257") + mean = float("3.57954e-05") + std = float("0.0509807") + data = None + + +class Program_weight_tensor_parameter_5: + name = "parameter_5" + shape = [384] + dtype = "float32" + data = None + + +class Program_weight_tensor_parameter_6: + name = "parameter_6" + shape = [384] + dtype = "float32" + min_val = float("1.0") + max_val = float("1.0") + mean = float("1.0") + std = float("5.96046e-08") + data = None + + +class Program_weight_tensor_parameter_7: + name = "parameter_7" + shape = [384] + dtype = "float32" + min_val = float("1.0") + max_val = float("1.0") + mean = float("1.0") + std = float("5.96046e-08") + data = None + + +class Program_weight_tensor_parameter_8: + name = "parameter_8" + shape = [384] + dtype = "float32" + data = None + + +class Program_weight_tensor_parameter_9: + name = "parameter_9" + shape = [384, 384, 1, 1] + dtype = "float32" + min_val = float("-0.322942") + max_val = float("0.326923") + mean = float("-0.000418788") + std = float("0.0723935") + data = None + + +class Program_weight_tensor_parameter_10: + name = "parameter_10" + shape = [384] + dtype = "float32" + data = None + + +class Program_weight_tensor_parameter_11: + name = "parameter_11" + shape = [384] + dtype = "float32" + min_val = float("1.0") + max_val = float("1.0") + mean = float("1.0") + std = float("5.96046e-08") + data = None + + +class Program_weight_tensor_parameter_12: + name = "parameter_12" + shape = [384] + dtype = "float32" + min_val = float("1.0") + max_val = float("1.0") + mean = float("1.0") + std = float("5.96046e-08") + data = None + + +class Program_weight_tensor_parameter_13: + name = "parameter_13" + shape = [384] + dtype = "float32" + data = None + + +class Program_weight_tensor_parameter_14: + name = "parameter_14" + shape = [384, 384, 3, 3] + dtype = "float32" + min_val = float("-0.12304") + max_val = float("0.124361") + mean = float("-4.58648e-05") + std = float("0.0240385") + data = None + + +class Program_weight_tensor_parameter_15: + name = "parameter_15" + shape = [384] + dtype = "float32" + data = None + + +class Program_weight_tensor_parameter_16: + name = "parameter_16" + shape = [384] + dtype = "float32" + min_val = float("1.0") + max_val = float("1.0") + mean = float("1.0") + std = float("5.96046e-08") + data = None + + +class Program_weight_tensor_parameter_17: + name = "parameter_17" + shape = [384] + dtype = "float32" + min_val = float("1.0") + max_val = float("1.0") + mean = float("1.0") + std = float("5.96046e-08") + data = None + + +class Program_weight_tensor_parameter_18: + name = "parameter_18" + shape = [384] + dtype = "float32" + data = None + + +class Program_weight_tensor_parameter_19: + name = "parameter_19" + shape = [384, 384, 3, 3] + dtype = "float32" + min_val = float("-0.119696") + max_val = float("0.115869") + mean = float("-3.41492e-05") + std = float("0.024043") + data = None + + +class Program_weight_tensor_parameter_20: + name = "parameter_20" + shape = [384] + dtype = "float32" + data = None + + +class Program_weight_tensor_parameter_21: + name = "parameter_21" + shape = [384] + dtype = "float32" + min_val = float("1.0") + max_val = float("1.0") + mean = float("1.0") + std = float("5.96046e-08") + data = None + + +class Program_weight_tensor_parameter_22: + name = "parameter_22" + shape = [384] + dtype = "float32" + min_val = float("1.0") + max_val = float("1.0") + mean = float("1.0") + std = float("5.96046e-08") + data = None + + +class Program_weight_tensor_parameter_23: + name = "parameter_23" + shape = [384] + dtype = "float32" + data = None + + +class Program_weight_tensor_parameter_24: + name = "parameter_24" + shape = [384, 384, 1, 1] + dtype = "float32" + min_val = float("-0.31449") + max_val = float("0.344925") + mean = float("-0.000205947") + std = float("0.0718123") + data = None + + +class Program_weight_tensor_parameter_25: + name = "parameter_25" + shape = [384] + dtype = "float32" + data = None + + +class Program_weight_tensor_parameter_26: + name = "parameter_26" + shape = [384] + dtype = "float32" + min_val = float("1.0") + max_val = float("1.0") + mean = float("1.0") + std = float("5.96046e-08") + data = None + + +class Program_weight_tensor_parameter_27: + name = "parameter_27" + shape = [384] + dtype = "float32" + min_val = float("1.0") + max_val = float("1.0") + mean = float("1.0") + std = float("5.96046e-08") + data = None + + +class Program_weight_tensor_parameter_28: + name = "parameter_28" + shape = [384] + dtype = "float32" + data = None + + +class Program_weight_tensor_parameter_29: + name = "parameter_29" + shape = [384, 384, 3, 3] + dtype = "float32" + min_val = float("-0.115579") + max_val = float("0.115337") + mean = float("4.42976e-05") + std = float("0.0240468") + data = None + + +class Program_weight_tensor_parameter_30: + name = "parameter_30" + shape = [384] + dtype = "float32" + data = None + + +class Program_weight_tensor_parameter_31: + name = "parameter_31" + shape = [384] + dtype = "float32" + min_val = float("1.0") + max_val = float("1.0") + mean = float("1.0") + std = float("5.96046e-08") + data = None + + +class Program_weight_tensor_parameter_32: + name = "parameter_32" + shape = [384] + dtype = "float32" + min_val = float("1.0") + max_val = float("1.0") + mean = float("1.0") + std = float("5.96046e-08") + data = None + + +class Program_weight_tensor_parameter_33: + name = "parameter_33" + shape = [384] + dtype = "float32" + data = None + + +class Program_weight_tensor_parameter_34: + name = "parameter_34" + shape = [384, 384, 3, 3] + dtype = "float32" + min_val = float("-0.113888") + max_val = float("0.114251") + mean = float("-1.16386e-06") + std = float("0.024036") + data = None + + +class Program_weight_tensor_parameter_35: + name = "parameter_35" + shape = [384] + dtype = "float32" + data = None + + +class Program_weight_tensor_parameter_36: + name = "parameter_36" + shape = [384] + dtype = "float32" + min_val = float("1.0") + max_val = float("1.0") + mean = float("1.0") + std = float("5.96046e-08") + data = None + + +class Program_weight_tensor_parameter_37: + name = "parameter_37" + shape = [384] + dtype = "float32" + min_val = float("1.0") + max_val = float("1.0") + mean = float("1.0") + std = float("5.96046e-08") + data = None + + +class Program_weight_tensor_parameter_38: + name = "parameter_38" + shape = [384] + dtype = "float32" + data = None + + +class Program_weight_tensor_parameter_39: + name = "parameter_39" + shape = [384, 384, 1, 1] + dtype = "float32" + min_val = float("-0.302931") + max_val = float("0.321711") + mean = float("0.000192558") + std = float("0.0720569") + data = None + + +class Program_weight_tensor_parameter_40: + name = "parameter_40" + shape = [384] + dtype = "float32" + data = None + + +class Program_weight_tensor_parameter_41: + name = "parameter_41" + shape = [384] + dtype = "float32" + min_val = float("1.0") + max_val = float("1.0") + mean = float("1.0") + std = float("5.96046e-08") + data = None + + +class Program_weight_tensor_parameter_42: + name = "parameter_42" + shape = [384] + dtype = "float32" + min_val = float("1.0") + max_val = float("1.0") + mean = float("1.0") + std = float("5.96046e-08") + data = None + + +class Program_weight_tensor_parameter_43: + name = "parameter_43" + shape = [384] + dtype = "float32" + data = None + + +class Program_weight_tensor_parameter_44: + name = "parameter_44" + shape = [384, 384, 3, 3] + dtype = "float32" + min_val = float("-0.114187") + max_val = float("0.116464") + mean = float("4.70456e-06") + std = float("0.0240593") + data = None + + +class Program_weight_tensor_parameter_45: + name = "parameter_45" + shape = [384] + dtype = "float32" + data = None + + +class Program_weight_tensor_parameter_46: + name = "parameter_46" + shape = [384] + dtype = "float32" + min_val = float("1.0") + max_val = float("1.0") + mean = float("1.0") + std = float("5.96046e-08") + data = None + + +class Program_weight_tensor_parameter_47: + name = "parameter_47" + shape = [384] + dtype = "float32" + min_val = float("1.0") + max_val = float("1.0") + mean = float("1.0") + std = float("5.96046e-08") + data = None + + +class Program_weight_tensor_parameter_48: + name = "parameter_48" + shape = [384] + dtype = "float32" + data = None + + +class Program_weight_tensor_parameter_49: + name = "parameter_49" + shape = [384, 384, 3, 3] + dtype = "float32" + min_val = float("-0.118342") + max_val = float("0.116485") + mean = float("2.0012e-05") + std = float("0.0240428") + data = None + + +class Program_weight_tensor_parameter_50: + name = "parameter_50" + shape = [384] + dtype = "float32" + data = None + + +class Program_weight_tensor_parameter_51: + name = "parameter_51" + shape = [384] + dtype = "float32" + min_val = float("1.0") + max_val = float("1.0") + mean = float("1.0") + std = float("5.96046e-08") + data = None + + +class Program_weight_tensor_parameter_52: + name = "parameter_52" + shape = [384] + dtype = "float32" + min_val = float("1.0") + max_val = float("1.0") + mean = float("1.0") + std = float("5.96046e-08") + data = None + + +class Program_weight_tensor_parameter_53: + name = "parameter_53" + shape = [384] + dtype = "float32" + data = None + + +class Program_weight_tensor_parameter_54: + name = "parameter_54" + shape = [384, 1152, 1, 1] + dtype = "float32" + min_val = float("-0.191508") + max_val = float("0.200703") + mean = float("9.88319e-06") + std = float("0.0416645") + data = None + + +class Program_weight_tensor_parameter_55: + name = "parameter_55" + shape = [384] + dtype = "float32" + data = None + + +class Program_weight_tensor_parameter_56: + name = "parameter_56" + shape = [384] + dtype = "float32" + min_val = float("1.0") + max_val = float("1.0") + mean = float("1.0") + std = float("5.96046e-08") + data = None + + +class Program_weight_tensor_parameter_57: + name = "parameter_57" + shape = [384] + dtype = "float32" + min_val = float("1.0") + max_val = float("1.0") + mean = float("1.0") + std = float("5.96046e-08") + data = None + + +class Program_weight_tensor_parameter_58: + name = "parameter_58" + shape = [384] + dtype = "float32" + data = None + + +class Program_weight_tensor_parameter_59: + name = "parameter_59" + shape = [384, 1152, 1, 1] + dtype = "float32" + min_val = float("-0.181007") + max_val = float("0.188079") + mean = float("3.5754e-05") + std = float("0.0416366") + data = None + + +class Program_weight_tensor_parameter_60: + name = "parameter_60" + shape = [384] + dtype = "float32" + data = None + + +class Program_weight_tensor_parameter_61: + name = "parameter_61" + shape = [384] + dtype = "float32" + min_val = float("1.0") + max_val = float("1.0") + mean = float("1.0") + std = float("5.96046e-08") + data = None + + +class Program_weight_tensor_parameter_62: + name = "parameter_62" + shape = [384] + dtype = "float32" + min_val = float("1.0") + max_val = float("1.0") + mean = float("1.0") + std = float("5.96046e-08") + data = None + + +class Program_weight_tensor_parameter_63: + name = "parameter_63" + shape = [384] + dtype = "float32" + data = None + + +class Program_weight_tensor_parameter_64: + name = "parameter_64" + shape = [384, 384, 3, 3] + dtype = "float32" + min_val = float("-0.129977") + max_val = float("0.124483") + mean = float("-1.74457e-05") + std = float("0.0240556") + data = None + + +class Program_weight_tensor_parameter_65: + name = "parameter_65" + shape = [384] + dtype = "float32" + data = None + + +class Program_weight_tensor_parameter_66: + name = "parameter_66" + shape = [384] + dtype = "float32" + min_val = float("1.0") + max_val = float("1.0") + mean = float("1.0") + std = float("5.96046e-08") + data = None + + +class Program_weight_tensor_parameter_67: + name = "parameter_67" + shape = [384] + dtype = "float32" + min_val = float("1.0") + max_val = float("1.0") + mean = float("1.0") + std = float("5.96046e-08") + data = None + + +class Program_weight_tensor_parameter_68: + name = "parameter_68" + shape = [384] + dtype = "float32" + data = None + + +class Program_weight_tensor_parameter_69: + name = "parameter_69" + shape = [384, 384, 1, 1] + dtype = "float32" + min_val = float("-0.302833") + max_val = float("0.331978") + mean = float("-2.36044e-05") + std = float("0.0721805") + data = None + + +class Program_weight_tensor_parameter_70: + name = "parameter_70" + shape = [192] + dtype = "float32" + data = None + + +class Program_weight_tensor_parameter_71: + name = "parameter_71" + shape = [192] + dtype = "float32" + min_val = float("1.0") + max_val = float("1.0") + mean = float("1.0") + data = None + + +class Program_weight_tensor_parameter_72: + name = "parameter_72" + shape = [192] + dtype = "float32" + min_val = float("1.0") + max_val = float("1.0") + mean = float("1.0") + data = None + + +class Program_weight_tensor_parameter_73: + name = "parameter_73" + shape = [192] + dtype = "float32" + data = None + + +class Program_weight_tensor_parameter_74: + name = "parameter_74" + shape = [192, 192, 1, 1] + dtype = "float32" + min_val = float("-0.400722") + max_val = float("0.427238") + mean = float("-0.000784854") + std = float("0.102043") + data = None + + +class Program_weight_tensor_parameter_75: + name = "parameter_75" + shape = [192] + dtype = "float32" + data = None + + +class Program_weight_tensor_parameter_76: + name = "parameter_76" + shape = [192] + dtype = "float32" + min_val = float("1.0") + max_val = float("1.0") + mean = float("1.0") + data = None + + +class Program_weight_tensor_parameter_77: + name = "parameter_77" + shape = [192] + dtype = "float32" + min_val = float("1.0") + max_val = float("1.0") + mean = float("1.0") + data = None + + +class Program_weight_tensor_parameter_78: + name = "parameter_78" + shape = [192] + dtype = "float32" + data = None + + +class Program_weight_tensor_parameter_79: + name = "parameter_79" + shape = [192, 192, 3, 3] + dtype = "float32" + min_val = float("-0.154177") + max_val = float("0.143337") + mean = float("2.49918e-05") + std = float("0.034047") + data = None + + +class Program_weight_tensor_parameter_80: + name = "parameter_80" + shape = [192] + dtype = "float32" + data = None + + +class Program_weight_tensor_parameter_81: + name = "parameter_81" + shape = [192] + dtype = "float32" + min_val = float("1.0") + max_val = float("1.0") + mean = float("1.0") + data = None + + +class Program_weight_tensor_parameter_82: + name = "parameter_82" + shape = [192] + dtype = "float32" + min_val = float("1.0") + max_val = float("1.0") + mean = float("1.0") + data = None + + +class Program_weight_tensor_parameter_83: + name = "parameter_83" + shape = [192] + dtype = "float32" + data = None + + +class Program_weight_tensor_parameter_84: + name = "parameter_84" + shape = [192, 192, 3, 3] + dtype = "float32" + min_val = float("-0.181651") + max_val = float("0.162164") + mean = float("2.34681e-06") + std = float("0.034039") + data = None + + +class Program_weight_tensor_parameter_85: + name = "parameter_85" + shape = [192] + dtype = "float32" + data = None + + +class Program_weight_tensor_parameter_86: + name = "parameter_86" + shape = [192] + dtype = "float32" + min_val = float("1.0") + max_val = float("1.0") + mean = float("1.0") + data = None + + +class Program_weight_tensor_parameter_87: + name = "parameter_87" + shape = [192] + dtype = "float32" + min_val = float("1.0") + max_val = float("1.0") + mean = float("1.0") + data = None + + +class Program_weight_tensor_parameter_88: + name = "parameter_88" + shape = [192] + dtype = "float32" + data = None + + +class Program_weight_tensor_parameter_89: + name = "parameter_89" + shape = [192, 192, 1, 1] + dtype = "float32" + min_val = float("-0.384016") + max_val = float("0.427916") + mean = float("0.000628316") + std = float("0.101792") + data = None + + +class Program_weight_tensor_parameter_90: + name = "parameter_90" + shape = [192] + dtype = "float32" + data = None + + +class Program_weight_tensor_parameter_91: + name = "parameter_91" + shape = [192] + dtype = "float32" + min_val = float("1.0") + max_val = float("1.0") + mean = float("1.0") + data = None + + +class Program_weight_tensor_parameter_92: + name = "parameter_92" + shape = [192] + dtype = "float32" + min_val = float("1.0") + max_val = float("1.0") + mean = float("1.0") + data = None + + +class Program_weight_tensor_parameter_93: + name = "parameter_93" + shape = [192] + dtype = "float32" + data = None + + +class Program_weight_tensor_parameter_94: + name = "parameter_94" + shape = [192, 192, 3, 3] + dtype = "float32" + min_val = float("-0.158839") + max_val = float("0.149041") + mean = float("6.79425e-05") + std = float("0.0341052") + data = None + + +class Program_weight_tensor_parameter_95: + name = "parameter_95" + shape = [192] + dtype = "float32" + data = None + + +class Program_weight_tensor_parameter_96: + name = "parameter_96" + shape = [192] + dtype = "float32" + min_val = float("1.0") + max_val = float("1.0") + mean = float("1.0") + data = None + + +class Program_weight_tensor_parameter_97: + name = "parameter_97" + shape = [192] + dtype = "float32" + min_val = float("1.0") + max_val = float("1.0") + mean = float("1.0") + data = None + + +class Program_weight_tensor_parameter_98: + name = "parameter_98" + shape = [192] + dtype = "float32" + data = None + + +class Program_weight_tensor_parameter_99: + name = "parameter_99" + shape = [192, 192, 3, 3] + dtype = "float32" + min_val = float("-0.152118") + max_val = float("0.154967") + mean = float("4.15251e-05") + std = float("0.0339881") + data = None + + +class Program_weight_tensor_parameter_100: + name = "parameter_100" + shape = [192] + dtype = "float32" + data = None + + +class Program_weight_tensor_parameter_101: + name = "parameter_101" + shape = [192] + dtype = "float32" + min_val = float("1.0") + max_val = float("1.0") + mean = float("1.0") + data = None + + +class Program_weight_tensor_parameter_102: + name = "parameter_102" + shape = [192] + dtype = "float32" + min_val = float("1.0") + max_val = float("1.0") + mean = float("1.0") + data = None + + +class Program_weight_tensor_parameter_103: + name = "parameter_103" + shape = [192] + dtype = "float32" + data = None + + +class Program_weight_tensor_parameter_104: + name = "parameter_104" + shape = [192, 192, 1, 1] + dtype = "float32" + min_val = float("-0.412261") + max_val = float("0.410866") + mean = float("0.000799833") + std = float("0.101969") + data = None + + +class Program_weight_tensor_parameter_105: + name = "parameter_105" + shape = [192] + dtype = "float32" + data = None + + +class Program_weight_tensor_parameter_106: + name = "parameter_106" + shape = [192] + dtype = "float32" + min_val = float("1.0") + max_val = float("1.0") + mean = float("1.0") + data = None + + +class Program_weight_tensor_parameter_107: + name = "parameter_107" + shape = [192] + dtype = "float32" + min_val = float("1.0") + max_val = float("1.0") + mean = float("1.0") + data = None + + +class Program_weight_tensor_parameter_108: + name = "parameter_108" + shape = [192] + dtype = "float32" + data = None + + +class Program_weight_tensor_parameter_109: + name = "parameter_109" + shape = [192, 192, 3, 3] + dtype = "float32" + min_val = float("-0.149017") + max_val = float("0.148443") + mean = float("6.11429e-05") + std = float("0.0339847") + data = None + + +class Program_weight_tensor_parameter_110: + name = "parameter_110" + shape = [192] + dtype = "float32" + data = None + + +class Program_weight_tensor_parameter_111: + name = "parameter_111" + shape = [192] + dtype = "float32" + min_val = float("1.0") + max_val = float("1.0") + mean = float("1.0") + data = None + + +class Program_weight_tensor_parameter_112: + name = "parameter_112" + shape = [192] + dtype = "float32" + min_val = float("1.0") + max_val = float("1.0") + mean = float("1.0") + data = None + + +class Program_weight_tensor_parameter_113: + name = "parameter_113" + shape = [192] + dtype = "float32" + data = None + + +class Program_weight_tensor_parameter_114: + name = "parameter_114" + shape = [192, 192, 3, 3] + dtype = "float32" + min_val = float("-0.171461") + max_val = float("0.1505") + mean = float("9.49073e-05") + std = float("0.034") + data = None + + +class Program_weight_tensor_parameter_115: + name = "parameter_115" + shape = [192] + dtype = "float32" + data = None + + +class Program_weight_tensor_parameter_116: + name = "parameter_116" + shape = [192] + dtype = "float32" + min_val = float("1.0") + max_val = float("1.0") + mean = float("1.0") + data = None + + +class Program_weight_tensor_parameter_117: + name = "parameter_117" + shape = [192] + dtype = "float32" + min_val = float("1.0") + max_val = float("1.0") + mean = float("1.0") + data = None + + +class Program_weight_tensor_parameter_118: + name = "parameter_118" + shape = [192] + dtype = "float32" + data = None + + +class Program_weight_tensor_parameter_119: + name = "parameter_119" + shape = [192, 576, 1, 1] + dtype = "float32" + min_val = float("-0.255931") + max_val = float("0.281748") + mean = float("0.000129119") + std = float("0.0593246") + data = None + + +class Program_weight_tensor_parameter_120: + name = "parameter_120" + shape = [192] + dtype = "float32" + data = None + + +class Program_weight_tensor_parameter_121: + name = "parameter_121" + shape = [192] + dtype = "float32" + min_val = float("1.0") + max_val = float("1.0") + mean = float("1.0") + data = None + + +class Program_weight_tensor_parameter_122: + name = "parameter_122" + shape = [192] + dtype = "float32" + min_val = float("1.0") + max_val = float("1.0") + mean = float("1.0") + data = None + + +class Program_weight_tensor_parameter_123: + name = "parameter_123" + shape = [192] + dtype = "float32" + data = None + + +class Program_weight_tensor_parameter_124: + name = "parameter_124" + shape = [192, 576, 1, 1] + dtype = "float32" + min_val = float("-0.286651") + max_val = float("0.244655") + mean = float("0.000256951") + std = float("0.0584998") + data = None + + +class Program_weight_tensor_parameter_125: + name = "parameter_125" + shape = [192] + dtype = "float32" + data = None + + +class Program_weight_tensor_parameter_126: + name = "parameter_126" + shape = [192] + dtype = "float32" + min_val = float("1.0") + max_val = float("1.0") + mean = float("1.0") + data = None + + +class Program_weight_tensor_parameter_127: + name = "parameter_127" + shape = [192] + dtype = "float32" + min_val = float("1.0") + max_val = float("1.0") + mean = float("1.0") + data = None + + +class Program_weight_tensor_parameter_128: + name = "parameter_128" + shape = [192] + dtype = "float32" + data = None + + +class Program_weight_tensor_parameter_129: + name = "parameter_129" + shape = [192, 192, 3, 3] + dtype = "float32" + min_val = float("-0.162603") + max_val = float("0.160605") + mean = float("9.68154e-05") + std = float("0.0339947") + data = None + + +class Program_weight_tensor_parameter_130: + name = "parameter_130" + shape = [192] + dtype = "float32" + data = None + + +class Program_weight_tensor_parameter_131: + name = "parameter_131" + shape = [192] + dtype = "float32" + min_val = float("1.0") + max_val = float("1.0") + mean = float("1.0") + data = None + + +class Program_weight_tensor_parameter_132: + name = "parameter_132" + shape = [192] + dtype = "float32" + min_val = float("1.0") + max_val = float("1.0") + mean = float("1.0") + data = None + + +class Program_weight_tensor_parameter_133: + name = "parameter_133" + shape = [192] + dtype = "float32" + data = None + + +class Program_weight_tensor_parameter_134: + name = "parameter_134" + shape = [192, 192, 1, 1] + dtype = "float32" + min_val = float("-0.441582") + max_val = float("0.424864") + mean = float("0.000426026") + std = float("0.102098") + data = None + + +class Program_weight_tensor_parameter_135: + name = "parameter_135" + shape = [96] + dtype = "float32" + data = None + + +class Program_weight_tensor_parameter_136: + name = "parameter_136" + shape = [96] + dtype = "float32" + min_val = float("1.0") + max_val = float("1.0") + mean = float("1.0") + data = None + + +class Program_weight_tensor_parameter_137: + name = "parameter_137" + shape = [96] + dtype = "float32" + min_val = float("1.0") + max_val = float("1.0") + mean = float("1.0") + data = None + + +class Program_weight_tensor_parameter_138: + name = "parameter_138" + shape = [96] + dtype = "float32" + data = None + + +class Program_weight_tensor_parameter_139: + name = "parameter_139" + shape = [96, 96, 1, 1] + dtype = "float32" + min_val = float("-0.473161") + max_val = float("0.525533") + mean = float("-0.00121177") + std = float("0.142925") + data = None + + +class Program_weight_tensor_parameter_140: + name = "parameter_140" + shape = [96] + dtype = "float32" + data = None + + +class Program_weight_tensor_parameter_141: + name = "parameter_141" + shape = [96] + dtype = "float32" + min_val = float("1.0") + max_val = float("1.0") + mean = float("1.0") + data = None + + +class Program_weight_tensor_parameter_142: + name = "parameter_142" + shape = [96] + dtype = "float32" + min_val = float("1.0") + max_val = float("1.0") + mean = float("1.0") + data = None + + +class Program_weight_tensor_parameter_143: + name = "parameter_143" + shape = [96] + dtype = "float32" + data = None + + +class Program_weight_tensor_parameter_144: + name = "parameter_144" + shape = [96, 96, 3, 3] + dtype = "float32" + min_val = float("-0.209848") + max_val = float("0.218788") + mean = float("-0.000374423") + std = float("0.0482804") + data = None + + +class Program_weight_tensor_parameter_145: + name = "parameter_145" + shape = [96] + dtype = "float32" + data = None + + +class Program_weight_tensor_parameter_146: + name = "parameter_146" + shape = [96] + dtype = "float32" + min_val = float("1.0") + max_val = float("1.0") + mean = float("1.0") + data = None + + +class Program_weight_tensor_parameter_147: + name = "parameter_147" + shape = [96] + dtype = "float32" + min_val = float("1.0") + max_val = float("1.0") + mean = float("1.0") + data = None + + +class Program_weight_tensor_parameter_148: + name = "parameter_148" + shape = [96] + dtype = "float32" + data = None + + +class Program_weight_tensor_parameter_149: + name = "parameter_149" + shape = [96, 96, 3, 3] + dtype = "float32" + min_val = float("-0.201192") + max_val = float("0.215516") + mean = float("1.78053e-05") + std = float("0.048281") + data = None + + +class Program_weight_tensor_parameter_150: + name = "parameter_150" + shape = [96] + dtype = "float32" + data = None + + +class Program_weight_tensor_parameter_151: + name = "parameter_151" + shape = [96] + dtype = "float32" + min_val = float("1.0") + max_val = float("1.0") + mean = float("1.0") + data = None + + +class Program_weight_tensor_parameter_152: + name = "parameter_152" + shape = [96] + dtype = "float32" + min_val = float("1.0") + max_val = float("1.0") + mean = float("1.0") + data = None + + +class Program_weight_tensor_parameter_153: + name = "parameter_153" + shape = [96] + dtype = "float32" + data = None + + +class Program_weight_tensor_parameter_154: + name = "parameter_154" + shape = [96, 96, 1, 1] + dtype = "float32" + min_val = float("-0.570801") + max_val = float("0.562312") + mean = float("-0.00288663") + std = float("0.143806") + data = None + + +class Program_weight_tensor_parameter_155: + name = "parameter_155" + shape = [96] + dtype = "float32" + data = None + + +class Program_weight_tensor_parameter_156: + name = "parameter_156" + shape = [96] + dtype = "float32" + min_val = float("1.0") + max_val = float("1.0") + mean = float("1.0") + data = None + + +class Program_weight_tensor_parameter_157: + name = "parameter_157" + shape = [96] + dtype = "float32" + min_val = float("1.0") + max_val = float("1.0") + mean = float("1.0") + data = None + + +class Program_weight_tensor_parameter_158: + name = "parameter_158" + shape = [96] + dtype = "float32" + data = None + + +class Program_weight_tensor_parameter_159: + name = "parameter_159" + shape = [96, 96, 3, 3] + dtype = "float32" + min_val = float("-0.209186") + max_val = float("0.210877") + mean = float("0.000248478") + std = float("0.0480063") + data = None + + +class Program_weight_tensor_parameter_160: + name = "parameter_160" + shape = [96] + dtype = "float32" + data = None + + +class Program_weight_tensor_parameter_161: + name = "parameter_161" + shape = [96] + dtype = "float32" + min_val = float("1.0") + max_val = float("1.0") + mean = float("1.0") + data = None + + +class Program_weight_tensor_parameter_162: + name = "parameter_162" + shape = [96] + dtype = "float32" + min_val = float("1.0") + max_val = float("1.0") + mean = float("1.0") + data = None + + +class Program_weight_tensor_parameter_163: + name = "parameter_163" + shape = [96] + dtype = "float32" + data = None + + +class Program_weight_tensor_parameter_164: + name = "parameter_164" + shape = [96, 96, 3, 3] + dtype = "float32" + min_val = float("-0.211593") + max_val = float("0.210352") + mean = float("-0.000131744") + std = float("0.0480463") + data = None + + +class Program_weight_tensor_parameter_165: + name = "parameter_165" + shape = [96] + dtype = "float32" + data = None + + +class Program_weight_tensor_parameter_166: + name = "parameter_166" + shape = [96] + dtype = "float32" + min_val = float("1.0") + max_val = float("1.0") + mean = float("1.0") + data = None + + +class Program_weight_tensor_parameter_167: + name = "parameter_167" + shape = [96] + dtype = "float32" + min_val = float("1.0") + max_val = float("1.0") + mean = float("1.0") + data = None + + +class Program_weight_tensor_parameter_168: + name = "parameter_168" + shape = [96] + dtype = "float32" + data = None + + +class Program_weight_tensor_parameter_169: + name = "parameter_169" + shape = [96, 96, 1, 1] + dtype = "float32" + min_val = float("-0.574778") + max_val = float("0.542257") + mean = float("0.00209471") + std = float("0.143267") + data = None + + +class Program_weight_tensor_parameter_170: + name = "parameter_170" + shape = [96] + dtype = "float32" + data = None + + +class Program_weight_tensor_parameter_171: + name = "parameter_171" + shape = [96] + dtype = "float32" + min_val = float("1.0") + max_val = float("1.0") + mean = float("1.0") + data = None + + +class Program_weight_tensor_parameter_172: + name = "parameter_172" + shape = [96] + dtype = "float32" + min_val = float("1.0") + max_val = float("1.0") + mean = float("1.0") + data = None + + +class Program_weight_tensor_parameter_173: + name = "parameter_173" + shape = [96] + dtype = "float32" + data = None + + +class Program_weight_tensor_parameter_174: + name = "parameter_174" + shape = [96, 96, 3, 3] + dtype = "float32" + min_val = float("-0.235471") + max_val = float("0.197988") + mean = float("0.000243513") + std = float("0.0483375") + data = None + + +class Program_weight_tensor_parameter_175: + name = "parameter_175" + shape = [96] + dtype = "float32" + data = None + + +class Program_weight_tensor_parameter_176: + name = "parameter_176" + shape = [96] + dtype = "float32" + min_val = float("1.0") + max_val = float("1.0") + mean = float("1.0") + data = None + + +class Program_weight_tensor_parameter_177: + name = "parameter_177" + shape = [96] + dtype = "float32" + min_val = float("1.0") + max_val = float("1.0") + mean = float("1.0") + data = None + + +class Program_weight_tensor_parameter_178: + name = "parameter_178" + shape = [96] + dtype = "float32" + data = None + + +class Program_weight_tensor_parameter_179: + name = "parameter_179" + shape = [96, 96, 3, 3] + dtype = "float32" + min_val = float("-0.231455") + max_val = float("0.187554") + mean = float("0.000129944") + std = float("0.0481659") + data = None + + +class Program_weight_tensor_parameter_180: + name = "parameter_180" + shape = [96] + dtype = "float32" + data = None + + +class Program_weight_tensor_parameter_181: + name = "parameter_181" + shape = [96] + dtype = "float32" + min_val = float("1.0") + max_val = float("1.0") + mean = float("1.0") + data = None + + +class Program_weight_tensor_parameter_182: + name = "parameter_182" + shape = [96] + dtype = "float32" + min_val = float("1.0") + max_val = float("1.0") + mean = float("1.0") + data = None + + +class Program_weight_tensor_parameter_183: + name = "parameter_183" + shape = [96] + dtype = "float32" + data = None + + +class Program_weight_tensor_parameter_184: + name = "parameter_184" + shape = [96, 448, 1, 1] + dtype = "float32" + min_val = float("-0.273341") + max_val = float("0.330442") + mean = float("0.000821447") + std = float("0.0668376") + data = None + + +class Program_weight_tensor_parameter_185: + name = "parameter_185" + shape = [96] + dtype = "float32" + data = None + + +class Program_weight_tensor_parameter_186: + name = "parameter_186" + shape = [96] + dtype = "float32" + min_val = float("1.0") + max_val = float("1.0") + mean = float("1.0") + data = None + + +class Program_weight_tensor_parameter_187: + name = "parameter_187" + shape = [96] + dtype = "float32" + min_val = float("1.0") + max_val = float("1.0") + mean = float("1.0") + data = None + + +class Program_weight_tensor_parameter_188: + name = "parameter_188" + shape = [96] + dtype = "float32" + data = None + + +class Program_weight_tensor_parameter_189: + name = "parameter_189" + shape = [96, 448, 1, 1] + dtype = "float32" + min_val = float("-0.279151") + max_val = float("0.302572") + mean = float("-1.76963e-05") + std = float("0.0665958") + data = None + + +class Program_weight_tensor_parameter_190: + name = "parameter_190" + shape = [192] + dtype = "float32" + data = None + + +class Program_weight_tensor_parameter_191: + name = "parameter_191" + shape = [192] + dtype = "float32" + min_val = float("1.0") + max_val = float("1.0") + mean = float("1.0") + data = None + + +class Program_weight_tensor_parameter_192: + name = "parameter_192" + shape = [192] + dtype = "float32" + min_val = float("1.0") + max_val = float("1.0") + mean = float("1.0") + data = None + + +class Program_weight_tensor_parameter_193: + name = "parameter_193" + shape = [192] + dtype = "float32" + data = None + + +class Program_weight_tensor_parameter_194: + name = "parameter_194" + shape = [192, 384, 1, 1] + dtype = "float32" + min_val = float("-0.299474") + max_val = float("0.321562") + mean = float("2.65801e-05") + std = float("0.0720443") + data = None + + +class Program_weight_tensor_parameter_195: + name = "parameter_195" + shape = [384] + dtype = "float32" + data = None + + +class Program_weight_tensor_parameter_196: + name = "parameter_196" + shape = [384] + dtype = "float32" + min_val = float("1.0") + max_val = float("1.0") + mean = float("1.0") + std = float("5.96046e-08") + data = None + + +class Program_weight_tensor_parameter_197: + name = "parameter_197" + shape = [384] + dtype = "float32" + min_val = float("1.0") + max_val = float("1.0") + mean = float("1.0") + std = float("5.96046e-08") + data = None + + +class Program_weight_tensor_parameter_198: + name = "parameter_198" + shape = [384] + dtype = "float32" + data = None + + +class Program_weight_tensor_parameter_199: + name = "parameter_199" + shape = [384, 384, 1, 1] + dtype = "float32" + min_val = float("-0.322599") + max_val = float("0.313516") + mean = float("-1.47029e-05") + std = float("0.0720052") + data = None + + +class Program_weight_tensor_parameter_200: + name = "parameter_200" + shape = [192] + dtype = "float32" + data = None + + +class Program_weight_tensor_parameter_201: + name = "parameter_201" + shape = [192] + dtype = "float32" + min_val = float("1.0") + max_val = float("1.0") + mean = float("1.0") + data = None + + +class Program_weight_tensor_parameter_202: + name = "parameter_202" + shape = [192] + dtype = "float32" + min_val = float("1.0") + max_val = float("1.0") + mean = float("1.0") + data = None + + +class Program_weight_tensor_parameter_203: + name = "parameter_203" + shape = [192] + dtype = "float32" + data = None + + +class Program_weight_tensor_parameter_204: + name = "parameter_204" + shape = [192, 192, 1, 1] + dtype = "float32" + min_val = float("-0.414858") + max_val = float("0.42724") + mean = float("-0.000267412") + std = float("0.101692") + data = None + + +class Program_weight_tensor_parameter_205: + name = "parameter_205" + shape = [192] + dtype = "float32" + data = None + + +class Program_weight_tensor_parameter_206: + name = "parameter_206" + shape = [192] + dtype = "float32" + min_val = float("1.0") + max_val = float("1.0") + mean = float("1.0") + data = None + + +class Program_weight_tensor_parameter_207: + name = "parameter_207" + shape = [192] + dtype = "float32" + min_val = float("1.0") + max_val = float("1.0") + mean = float("1.0") + data = None + + +class Program_weight_tensor_parameter_208: + name = "parameter_208" + shape = [192] + dtype = "float32" + data = None + + +class Program_weight_tensor_parameter_209: + name = "parameter_209" + shape = [192, 192, 3, 3] + dtype = "float32" + min_val = float("-0.187654") + max_val = float("0.15405") + mean = float("2.50637e-05") + std = float("0.0341147") + data = None + + +class Program_weight_tensor_parameter_210: + name = "parameter_210" + shape = [192] + dtype = "float32" + data = None + + +class Program_weight_tensor_parameter_211: + name = "parameter_211" + shape = [192] + dtype = "float32" + min_val = float("1.0") + max_val = float("1.0") + mean = float("1.0") + data = None + + +class Program_weight_tensor_parameter_212: + name = "parameter_212" + shape = [192] + dtype = "float32" + min_val = float("1.0") + max_val = float("1.0") + mean = float("1.0") + data = None + + +class Program_weight_tensor_parameter_213: + name = "parameter_213" + shape = [192] + dtype = "float32" + data = None + + +class Program_weight_tensor_parameter_214: + name = "parameter_214" + shape = [192, 192, 3, 3] + dtype = "float32" + min_val = float("-0.159537") + max_val = float("0.173581") + mean = float("5.40353e-05") + std = float("0.0340465") + data = None + + +class Program_weight_tensor_parameter_215: + name = "parameter_215" + shape = [192] + dtype = "float32" + data = None + + +class Program_weight_tensor_parameter_216: + name = "parameter_216" + shape = [192] + dtype = "float32" + min_val = float("1.0") + max_val = float("1.0") + mean = float("1.0") + data = None + + +class Program_weight_tensor_parameter_217: + name = "parameter_217" + shape = [192] + dtype = "float32" + min_val = float("1.0") + max_val = float("1.0") + mean = float("1.0") + data = None + + +class Program_weight_tensor_parameter_218: + name = "parameter_218" + shape = [192] + dtype = "float32" + data = None + + +class Program_weight_tensor_parameter_219: + name = "parameter_219" + shape = [192, 192, 1, 1] + dtype = "float32" + min_val = float("-0.409606") + max_val = float("0.46023") + mean = float("-0.000349727") + std = float("0.102309") + data = None + + +class Program_weight_tensor_parameter_220: + name = "parameter_220" + shape = [192] + dtype = "float32" + data = None + + +class Program_weight_tensor_parameter_221: + name = "parameter_221" + shape = [192] + dtype = "float32" + min_val = float("1.0") + max_val = float("1.0") + mean = float("1.0") + data = None + + +class Program_weight_tensor_parameter_222: + name = "parameter_222" + shape = [192] + dtype = "float32" + min_val = float("1.0") + max_val = float("1.0") + mean = float("1.0") + data = None + + +class Program_weight_tensor_parameter_223: + name = "parameter_223" + shape = [192] + dtype = "float32" + data = None + + +class Program_weight_tensor_parameter_224: + name = "parameter_224" + shape = [192, 192, 3, 3] + dtype = "float32" + min_val = float("-0.158137") + max_val = float("0.167091") + mean = float("-2.39766e-05") + std = float("0.0340454") + data = None + + +class Program_weight_tensor_parameter_225: + name = "parameter_225" + shape = [192] + dtype = "float32" + data = None + + +class Program_weight_tensor_parameter_226: + name = "parameter_226" + shape = [192] + dtype = "float32" + min_val = float("1.0") + max_val = float("1.0") + mean = float("1.0") + data = None + + +class Program_weight_tensor_parameter_227: + name = "parameter_227" + shape = [192] + dtype = "float32" + min_val = float("1.0") + max_val = float("1.0") + mean = float("1.0") + data = None + + +class Program_weight_tensor_parameter_228: + name = "parameter_228" + shape = [192] + dtype = "float32" + data = None + + +class Program_weight_tensor_parameter_229: + name = "parameter_229" + shape = [192, 192, 3, 3] + dtype = "float32" + min_val = float("-0.163613") + max_val = float("0.166173") + mean = float("3.52039e-05") + std = float("0.0340068") + data = None + + +class Program_weight_tensor_parameter_230: + name = "parameter_230" + shape = [192] + dtype = "float32" + data = None + + +class Program_weight_tensor_parameter_231: + name = "parameter_231" + shape = [192] + dtype = "float32" + min_val = float("1.0") + max_val = float("1.0") + mean = float("1.0") + data = None + + +class Program_weight_tensor_parameter_232: + name = "parameter_232" + shape = [192] + dtype = "float32" + min_val = float("1.0") + max_val = float("1.0") + mean = float("1.0") + data = None + + +class Program_weight_tensor_parameter_233: + name = "parameter_233" + shape = [192] + dtype = "float32" + data = None + + +class Program_weight_tensor_parameter_234: + name = "parameter_234" + shape = [192, 192, 1, 1] + dtype = "float32" + min_val = float("-0.39617") + max_val = float("0.430945") + mean = float("-0.000492756") + std = float("0.101597") + data = None + + +class Program_weight_tensor_parameter_235: + name = "parameter_235" + shape = [192] + dtype = "float32" + data = None + + +class Program_weight_tensor_parameter_236: + name = "parameter_236" + shape = [192] + dtype = "float32" + min_val = float("1.0") + max_val = float("1.0") + mean = float("1.0") + data = None + + +class Program_weight_tensor_parameter_237: + name = "parameter_237" + shape = [192] + dtype = "float32" + min_val = float("1.0") + max_val = float("1.0") + mean = float("1.0") + data = None + + +class Program_weight_tensor_parameter_238: + name = "parameter_238" + shape = [192] + dtype = "float32" + data = None + + +class Program_weight_tensor_parameter_239: + name = "parameter_239" + shape = [192, 192, 3, 3] + dtype = "float32" + min_val = float("-0.152365") + max_val = float("0.151254") + mean = float("3.36291e-05") + std = float("0.0340444") + data = None + + +class Program_weight_tensor_parameter_240: + name = "parameter_240" + shape = [192] + dtype = "float32" + data = None + + +class Program_weight_tensor_parameter_241: + name = "parameter_241" + shape = [192] + dtype = "float32" + min_val = float("1.0") + max_val = float("1.0") + mean = float("1.0") + data = None + + +class Program_weight_tensor_parameter_242: + name = "parameter_242" + shape = [192] + dtype = "float32" + min_val = float("1.0") + max_val = float("1.0") + mean = float("1.0") + data = None + + +class Program_weight_tensor_parameter_243: + name = "parameter_243" + shape = [192] + dtype = "float32" + data = None + + +class Program_weight_tensor_parameter_244: + name = "parameter_244" + shape = [192, 192, 3, 3] + dtype = "float32" + min_val = float("-0.141975") + max_val = float("0.149465") + mean = float("1.32056e-05") + std = float("0.0340086") + data = None + + +class Program_weight_tensor_parameter_245: + name = "parameter_245" + shape = [192] + dtype = "float32" + data = None + + +class Program_weight_tensor_parameter_246: + name = "parameter_246" + shape = [192] + dtype = "float32" + min_val = float("1.0") + max_val = float("1.0") + mean = float("1.0") + data = None + + +class Program_weight_tensor_parameter_247: + name = "parameter_247" + shape = [192] + dtype = "float32" + min_val = float("1.0") + max_val = float("1.0") + mean = float("1.0") + data = None + + +class Program_weight_tensor_parameter_248: + name = "parameter_248" + shape = [192] + dtype = "float32" + data = None + + +class Program_weight_tensor_parameter_249: + name = "parameter_249" + shape = [192, 896, 1, 1] + dtype = "float32" + min_val = float("-0.218845") + max_val = float("0.260585") + mean = float("-7.7441e-06") + std = float("0.0471847") + data = None + + +class Program_weight_tensor_parameter_250: + name = "parameter_250" + shape = [192] + dtype = "float32" + data = None + + +class Program_weight_tensor_parameter_251: + name = "parameter_251" + shape = [192] + dtype = "float32" + min_val = float("1.0") + max_val = float("1.0") + mean = float("1.0") + data = None + + +class Program_weight_tensor_parameter_252: + name = "parameter_252" + shape = [192] + dtype = "float32" + min_val = float("1.0") + max_val = float("1.0") + mean = float("1.0") + data = None + + +class Program_weight_tensor_parameter_253: + name = "parameter_253" + shape = [192] + dtype = "float32" + data = None + + +class Program_weight_tensor_parameter_254: + name = "parameter_254" + shape = [192, 896, 1, 1] + dtype = "float32" + min_val = float("-0.236832") + max_val = float("0.206537") + mean = float("-9.01452e-05") + std = float("0.0472838") + data = None + + +class Program_weight_tensor_parameter_255: + name = "parameter_255" + shape = [384] + dtype = "float32" + data = None + + +class Program_weight_tensor_parameter_256: + name = "parameter_256" + shape = [384] + dtype = "float32" + min_val = float("1.0") + max_val = float("1.0") + mean = float("1.0") + std = float("5.96046e-08") + data = None + + +class Program_weight_tensor_parameter_257: + name = "parameter_257" + shape = [384] + dtype = "float32" + min_val = float("1.0") + max_val = float("1.0") + mean = float("1.0") + std = float("5.96046e-08") + data = None + + +class Program_weight_tensor_parameter_258: + name = "parameter_258" + shape = [384] + dtype = "float32" + data = None + + +class Program_weight_tensor_parameter_259: + name = "parameter_259" + shape = [384, 768, 1, 1] + dtype = "float32" + min_val = float("-0.232199") + max_val = float("0.243184") + mean = float("-5.58502e-05") + std = float("0.051212") + data = None + + +class Program_weight_tensor_parameter_260: + name = "parameter_260" + shape = [768] + dtype = "float32" + data = None + + +class Program_weight_tensor_parameter_261: + name = "parameter_261" + shape = [768] + dtype = "float32" + min_val = float("1.0") + max_val = float("1.0") + mean = float("1.0") + data = None + + +class Program_weight_tensor_parameter_262: + name = "parameter_262" + shape = [768] + dtype = "float32" + min_val = float("1.0") + max_val = float("1.0") + mean = float("1.0") + data = None + + +class Program_weight_tensor_parameter_263: + name = "parameter_263" + shape = [768] + dtype = "float32" + data = None + + +class Program_weight_tensor_parameter_264: + name = "parameter_264" + shape = [768, 768, 1, 1] + dtype = "float32" + min_val = float("-0.248379") + max_val = float("0.256895") + mean = float("2.04505e-05") + std = float("0.0510396") + data = None + + +class Program_weight_tensor_parameter_265: + name = "parameter_265" + shape = [384] + dtype = "float32" + data = None + + +class Program_weight_tensor_parameter_266: + name = "parameter_266" + shape = [384] + dtype = "float32" + min_val = float("1.0") + max_val = float("1.0") + mean = float("1.0") + std = float("5.96046e-08") + data = None + + +class Program_weight_tensor_parameter_267: + name = "parameter_267" + shape = [384] + dtype = "float32" + min_val = float("1.0") + max_val = float("1.0") + mean = float("1.0") + std = float("5.96046e-08") + data = None + + +class Program_weight_tensor_parameter_268: + name = "parameter_268" + shape = [384] + dtype = "float32" + data = None + + +class Program_weight_tensor_parameter_269: + name = "parameter_269" + shape = [384, 384, 1, 1] + dtype = "float32" + min_val = float("-0.30031") + max_val = float("0.331174") + mean = float("6.06994e-05") + std = float("0.0720973") + data = None + + +class Program_weight_tensor_parameter_270: + name = "parameter_270" + shape = [384] + dtype = "float32" + data = None + + +class Program_weight_tensor_parameter_271: + name = "parameter_271" + shape = [384] + dtype = "float32" + min_val = float("1.0") + max_val = float("1.0") + mean = float("1.0") + std = float("5.96046e-08") + data = None + + +class Program_weight_tensor_parameter_272: + name = "parameter_272" + shape = [384] + dtype = "float32" + min_val = float("1.0") + max_val = float("1.0") + mean = float("1.0") + std = float("5.96046e-08") + data = None + + +class Program_weight_tensor_parameter_273: + name = "parameter_273" + shape = [384] + dtype = "float32" + data = None + + +class Program_weight_tensor_parameter_274: + name = "parameter_274" + shape = [384, 384, 3, 3] + dtype = "float32" + min_val = float("-0.123888") + max_val = float("0.126823") + mean = float("-2.84452e-06") + std = float("0.0240717") + data = None + + +class Program_weight_tensor_parameter_275: + name = "parameter_275" + shape = [384] + dtype = "float32" + data = None + + +class Program_weight_tensor_parameter_276: + name = "parameter_276" + shape = [384] + dtype = "float32" + min_val = float("1.0") + max_val = float("1.0") + mean = float("1.0") + std = float("5.96046e-08") + data = None + + +class Program_weight_tensor_parameter_277: + name = "parameter_277" + shape = [384] + dtype = "float32" + min_val = float("1.0") + max_val = float("1.0") + mean = float("1.0") + std = float("5.96046e-08") + data = None + + +class Program_weight_tensor_parameter_278: + name = "parameter_278" + shape = [384] + dtype = "float32" + data = None + + +class Program_weight_tensor_parameter_279: + name = "parameter_279" + shape = [384, 384, 3, 3] + dtype = "float32" + min_val = float("-0.117054") + max_val = float("0.119106") + mean = float("-4.36817e-06") + std = float("0.024088") + data = None + + +class Program_weight_tensor_parameter_280: + name = "parameter_280" + shape = [384] + dtype = "float32" + data = None + + +class Program_weight_tensor_parameter_281: + name = "parameter_281" + shape = [384] + dtype = "float32" + min_val = float("1.0") + max_val = float("1.0") + mean = float("1.0") + std = float("5.96046e-08") + data = None + + +class Program_weight_tensor_parameter_282: + name = "parameter_282" + shape = [384] + dtype = "float32" + min_val = float("1.0") + max_val = float("1.0") + mean = float("1.0") + std = float("5.96046e-08") + data = None + + +class Program_weight_tensor_parameter_283: + name = "parameter_283" + shape = [384] + dtype = "float32" + data = None + + +class Program_weight_tensor_parameter_284: + name = "parameter_284" + shape = [384, 1536, 1, 1] + dtype = "float32" + min_val = float("-0.171136") + max_val = float("0.165988") + mean = float("-1.43518e-05") + std = float("0.0360497") + data = None + + +class Program_weight_tensor_parameter_285: + name = "parameter_285" + shape = [384] + dtype = "float32" + data = None + + +class Program_weight_tensor_parameter_286: + name = "parameter_286" + shape = [384] + dtype = "float32" + min_val = float("1.0") + max_val = float("1.0") + mean = float("1.0") + std = float("5.96046e-08") + data = None + + +class Program_weight_tensor_parameter_287: + name = "parameter_287" + shape = [384] + dtype = "float32" + min_val = float("1.0") + max_val = float("1.0") + mean = float("1.0") + std = float("5.96046e-08") + data = None + + +class Program_weight_tensor_parameter_288: + name = "parameter_288" + shape = [384] + dtype = "float32" + data = None + + +class Program_weight_tensor_parameter_289: + name = "parameter_289" + shape = [384, 384, 1, 1] + dtype = "float32" + min_val = float("-0.381462") + max_val = float("0.330935") + mean = float("3.46427e-05") + std = float("0.0722746") + data = None + + +class Program_weight_tensor_parameter_290: + name = "parameter_290" + shape = [384] + dtype = "float32" + data = None + + +class Program_weight_tensor_parameter_291: + name = "parameter_291" + shape = [384] + dtype = "float32" + min_val = float("1.0") + max_val = float("1.0") + mean = float("1.0") + std = float("5.96046e-08") + data = None + + +class Program_weight_tensor_parameter_292: + name = "parameter_292" + shape = [384] + dtype = "float32" + min_val = float("1.0") + max_val = float("1.0") + mean = float("1.0") + std = float("5.96046e-08") + data = None + + +class Program_weight_tensor_parameter_293: + name = "parameter_293" + shape = [384] + dtype = "float32" + data = None + + +class Program_weight_tensor_parameter_294: + name = "parameter_294" + shape = [384, 384, 3, 3] + dtype = "float32" + min_val = float("-0.113052") + max_val = float("0.118714") + mean = float("2.2299e-06") + std = float("0.02406") + data = None + + +class Program_weight_tensor_parameter_295: + name = "parameter_295" + shape = [384] + dtype = "float32" + data = None + + +class Program_weight_tensor_parameter_296: + name = "parameter_296" + shape = [384] + dtype = "float32" + min_val = float("1.0") + max_val = float("1.0") + mean = float("1.0") + std = float("5.96046e-08") + data = None + + +class Program_weight_tensor_parameter_297: + name = "parameter_297" + shape = [384] + dtype = "float32" + min_val = float("1.0") + max_val = float("1.0") + mean = float("1.0") + std = float("5.96046e-08") + data = None + + +class Program_weight_tensor_parameter_298: + name = "parameter_298" + shape = [384] + dtype = "float32" + data = None + + +class Program_weight_tensor_parameter_299: + name = "parameter_299" + shape = [384, 384, 3, 3] + dtype = "float32" + min_val = float("-0.122883") + max_val = float("0.116335") + mean = float("4.54892e-05") + std = float("0.02406") + data = None + + +class Program_weight_tensor_parameter_300: + name = "parameter_300" + shape = [384] + dtype = "float32" + data = None + + +class Program_weight_tensor_parameter_301: + name = "parameter_301" + shape = [384] + dtype = "float32" + min_val = float("1.0") + max_val = float("1.0") + mean = float("1.0") + std = float("5.96046e-08") + data = None + + +class Program_weight_tensor_parameter_302: + name = "parameter_302" + shape = [384] + dtype = "float32" + min_val = float("1.0") + max_val = float("1.0") + mean = float("1.0") + std = float("5.96046e-08") + data = None + + +class Program_weight_tensor_parameter_303: + name = "parameter_303" + shape = [384] + dtype = "float32" + data = None + + +class Program_weight_tensor_parameter_304: + name = "parameter_304" + shape = [384, 384, 1, 1] + dtype = "float32" + min_val = float("-0.308828") + max_val = float("0.359127") + mean = float("-0.000108209") + std = float("0.0722834") + data = None + + +class Program_weight_tensor_parameter_305: + name = "parameter_305" + shape = [384] + dtype = "float32" + data = None + + +class Program_weight_tensor_parameter_306: + name = "parameter_306" + shape = [384] + dtype = "float32" + min_val = float("1.0") + max_val = float("1.0") + mean = float("1.0") + std = float("5.96046e-08") + data = None + + +class Program_weight_tensor_parameter_307: + name = "parameter_307" + shape = [384] + dtype = "float32" + min_val = float("1.0") + max_val = float("1.0") + mean = float("1.0") + std = float("5.96046e-08") + data = None + + +class Program_weight_tensor_parameter_308: + name = "parameter_308" + shape = [384] + dtype = "float32" + data = None + + +class Program_weight_tensor_parameter_309: + name = "parameter_309" + shape = [384, 384, 3, 3] + dtype = "float32" + min_val = float("-0.109728") + max_val = float("0.11905") + mean = float("1.94702e-06") + std = float("0.0240484") + data = None + + +class Program_weight_tensor_parameter_310: + name = "parameter_310" + shape = [384] + dtype = "float32" + data = None + + +class Program_weight_tensor_parameter_311: + name = "parameter_311" + shape = [384] + dtype = "float32" + min_val = float("1.0") + max_val = float("1.0") + mean = float("1.0") + std = float("5.96046e-08") + data = None + + +class Program_weight_tensor_parameter_312: + name = "parameter_312" + shape = [384] + dtype = "float32" + min_val = float("1.0") + max_val = float("1.0") + mean = float("1.0") + std = float("5.96046e-08") + data = None + + +class Program_weight_tensor_parameter_313: + name = "parameter_313" + shape = [384] + dtype = "float32" + data = None + + +class Program_weight_tensor_parameter_314: + name = "parameter_314" + shape = [384, 384, 3, 3] + dtype = "float32" + min_val = float("-0.111787") + max_val = float("0.10788") + mean = float("-3.44857e-05") + std = float("0.0240339") + data = None + + +class Program_weight_tensor_parameter_315: + name = "parameter_315" + shape = [384] + dtype = "float32" + data = None + + +class Program_weight_tensor_parameter_316: + name = "parameter_316" + shape = [384] + dtype = "float32" + min_val = float("1.0") + max_val = float("1.0") + mean = float("1.0") + std = float("5.96046e-08") + data = None + + +class Program_weight_tensor_parameter_317: + name = "parameter_317" + shape = [384] + dtype = "float32" + min_val = float("1.0") + max_val = float("1.0") + mean = float("1.0") + std = float("5.96046e-08") + data = None + + +class Program_weight_tensor_parameter_318: + name = "parameter_318" + shape = [384] + dtype = "float32" + data = None + + +class Program_weight_tensor_parameter_319: + name = "parameter_319" + shape = [384, 1024, 1, 1] + dtype = "float32" + min_val = float("-0.223901") + max_val = float("0.19764") + mean = float("5.67966e-05") + std = float("0.0441371") + data = None + + +class Program_weight_tensor_parameter_320: + name = "parameter_320" + shape = [384] + dtype = "float32" + data = None + + +class Program_weight_tensor_parameter_321: + name = "parameter_321" + shape = [384] + dtype = "float32" + min_val = float("1.0") + max_val = float("1.0") + mean = float("1.0") + std = float("5.96046e-08") + data = None + + +class Program_weight_tensor_parameter_322: + name = "parameter_322" + shape = [384] + dtype = "float32" + min_val = float("1.0") + max_val = float("1.0") + mean = float("1.0") + std = float("5.96046e-08") + data = None + + +class Program_weight_tensor_parameter_323: + name = "parameter_323" + shape = [384] + dtype = "float32" + data = None + + +class Program_weight_tensor_parameter_324: + name = "parameter_324" + shape = [384, 1024, 1, 1] + dtype = "float32" + min_val = float("-0.204276") + max_val = float("0.196037") + mean = float("-2.92633e-05") + std = float("0.044094") + data = None + + +class Program_weight_tensor_parameter_325: + name = "parameter_325" + shape = [1024] + dtype = "float32" + min_val = float("-3.74704") + max_val = float("-0.732204") + mean = float("-2.17624") + std = float("0.427795") + data = None + + +class Program_weight_tensor_parameter_326: + name = "parameter_326" + shape = [1024] + dtype = "float32" + min_val = float("1.59825") + max_val = float("4.45289") + mean = float("3.08841") + std = float("0.256755") + data = None + + +class Program_weight_tensor_parameter_327: + name = "parameter_327" + shape = [1024] + dtype = "float32" + min_val = float("0.350855") + max_val = float("1.34723") + mean = float("0.548647") + std = float("0.0627094") + data = None + + +class Program_weight_tensor_parameter_328: + name = "parameter_328" + shape = [1024] + dtype = "float32" + min_val = float("-0.827043") + max_val = float("0.849308") + mean = float("-0.255753") + std = float("0.138965") + data = None + + +class Program_weight_tensor_parameter_329: + name = "parameter_329" + shape = [1024, 768, 1, 1] + dtype = "float32" + min_val = float("-0.630881") + max_val = float("0.80315") + mean = float("-0.00304364") + std = float("0.0604632") + data = None + + +class Program_weight_tensor_parameter_330: + name = "parameter_330" + shape = [768] + dtype = "float32" + min_val = float("-1.84619") + max_val = float("0.776603") + mean = float("-0.0329383") + std = float("0.364898") + data = None + + +class Program_weight_tensor_parameter_331: + name = "parameter_331" + shape = [768, 768, 1, 1] + dtype = "float32" + min_val = float("-0.750361") + max_val = float("1.54096") + mean = float("-0.00325312") + std = float("0.045924") + data = None + + +class Program_weight_tensor_parameter_332: + name = "parameter_332" + shape = [384] + dtype = "float32" + min_val = float("-1.7711") + max_val = float("0.110421") + mean = float("-0.347179") + std = float("0.279599") + data = None + + +class Program_weight_tensor_parameter_333: + name = "parameter_333" + shape = [384] + dtype = "float32" + min_val = float("0.181179") + max_val = float("1.59652") + mean = float("0.574482") + std = float("0.238983") + data = None + + +class Program_weight_tensor_parameter_334: + name = "parameter_334" + shape = [384] + dtype = "float32" + min_val = float("0.00704936") + max_val = float("0.237007") + mean = float("0.0367522") + std = float("0.0225968") + data = None + + +class Program_weight_tensor_parameter_335: + name = "parameter_335" + shape = [384] + dtype = "float32" + min_val = float("-1.19051") + max_val = float("0.819303") + mean = float("0.125802") + std = float("0.295442") + data = None + + +class Program_weight_tensor_parameter_336: + name = "parameter_336" + shape = [384, 384, 1, 1] + dtype = "float32" + min_val = float("-0.584765") + max_val = float("0.355969") + mean = float("-0.00210953") + std = float("0.0281305") + data = None + + +class Program_weight_tensor_parameter_337: + name = "parameter_337" + shape = [384] + dtype = "float32" + min_val = float("-1.7711") + max_val = float("0.110421") + mean = float("-0.347179") + std = float("0.279599") + data = None + + +class Program_weight_tensor_parameter_338: + name = "parameter_338" + shape = [384] + dtype = "float32" + min_val = float("0.255804") + max_val = float("2.35107") + mean = float("0.949467") + std = float("0.268639") + data = None + + +class Program_weight_tensor_parameter_339: + name = "parameter_339" + shape = [384] + dtype = "float32" + min_val = float("0.0698564") + max_val = float("0.504136") + mean = float("0.154798") + std = float("0.0500254") + data = None + + +class Program_weight_tensor_parameter_340: + name = "parameter_340" + shape = [384] + dtype = "float32" + min_val = float("-1.27275") + max_val = float("1.23981") + mean = float("0.384295") + std = float("0.390618") + data = None + + +class Program_weight_tensor_parameter_341: + name = "parameter_341" + shape = [384, 384, 3, 3] + dtype = "float32" + min_val = float("-0.304078") + max_val = float("0.409131") + mean = float("-0.000695641") + std = float("0.0166628") + data = None + + +class Program_weight_tensor_parameter_342: + name = "parameter_342" + shape = [384] + dtype = "float32" + min_val = float("-2.55839") + max_val = float("0.0163429") + mean = float("-1.54004") + std = float("0.414111") + data = None + + +class Program_weight_tensor_parameter_343: + name = "parameter_343" + shape = [384] + dtype = "float32" + min_val = float("0.53528") + max_val = float("1.71079") + mean = float("1.15472") + std = float("0.158429") + data = None + + +class Program_weight_tensor_parameter_344: + name = "parameter_344" + shape = [384] + dtype = "float32" + min_val = float("3.97934") + max_val = float("26.2457") + mean = float("6.57467") + std = float("2.10136") + data = None + + +class Program_weight_tensor_parameter_345: + name = "parameter_345" + shape = [384] + dtype = "float32" + min_val = float("-4.97699") + max_val = float("5.7738") + mean = float("-1.89469") + std = float("1.00581") + data = None + + +class Program_weight_tensor_parameter_346: + name = "parameter_346" + shape = [384, 384, 3, 3] + dtype = "float32" + min_val = float("-0.196336") + max_val = float("0.365703") + mean = float("-0.000958924") + std = float("0.0209693") + data = None + + +class Program_weight_tensor_parameter_347: + name = "parameter_347" + shape = [384] + dtype = "float32" + min_val = float("-1.95623") + max_val = float("0.442957") + mean = float("-0.594685") + std = float("0.341251") + data = None + + +class Program_weight_tensor_parameter_348: + name = "parameter_348" + shape = [384] + dtype = "float32" + min_val = float("0.136667") + max_val = float("2.07099") + mean = float("0.549739") + std = float("0.23037") + data = None + + +class Program_weight_tensor_parameter_349: + name = "parameter_349" + shape = [384] + dtype = "float32" + min_val = float("0.0126792") + max_val = float("0.56062") + mean = float("0.0519998") + std = float("0.0457513") + data = None + + +class Program_weight_tensor_parameter_350: + name = "parameter_350" + shape = [384] + dtype = "float32" + min_val = float("-0.846751") + max_val = float("0.852409") + mean = float("0.180871") + std = float("0.240219") + data = None + + +class Program_weight_tensor_parameter_351: + name = "parameter_351" + shape = [384, 384, 1, 1] + dtype = "float32" + min_val = float("-0.395") + max_val = float("0.77597") + mean = float("-0.00390879") + std = float("0.027433") + data = None + + +class Program_weight_tensor_parameter_352: + name = "parameter_352" + shape = [384] + dtype = "float32" + min_val = float("-1.95623") + max_val = float("0.442957") + mean = float("-0.594685") + std = float("0.341251") + data = None + + +class Program_weight_tensor_parameter_353: + name = "parameter_353" + shape = [384] + dtype = "float32" + min_val = float("0.476039") + max_val = float("2.08024") + mean = float("1.03971") + std = float("0.262148") + data = None + + +class Program_weight_tensor_parameter_354: + name = "parameter_354" + shape = [384] + dtype = "float32" + min_val = float("0.0993289") + max_val = float("3.07776") + mean = float("0.231462") + std = float("0.171344") + data = None + + +class Program_weight_tensor_parameter_355: + name = "parameter_355" + shape = [384] + dtype = "float32" + min_val = float("-2.91041") + max_val = float("1.60815") + mean = float("0.353761") + std = float("0.378925") + data = None + + +class Program_weight_tensor_parameter_356: + name = "parameter_356" + shape = [384, 384, 3, 3] + dtype = "float32" + min_val = float("-0.548844") + max_val = float("0.362948") + mean = float("-0.000978024") + std = float("0.0185736") + data = None + + +class Program_weight_tensor_parameter_357: + name = "parameter_357" + shape = [384] + dtype = "float32" + min_val = float("-2.4119") + max_val = float("0.865276") + mean = float("-1.36754") + std = float("0.362374") + data = None + + +class Program_weight_tensor_parameter_358: + name = "parameter_358" + shape = [384] + dtype = "float32" + min_val = float("0.617352") + max_val = float("1.87098") + mean = float("1.18714") + std = float("0.154625") + data = None + + +class Program_weight_tensor_parameter_359: + name = "parameter_359" + shape = [384] + dtype = "float32" + min_val = float("3.32112") + max_val = float("15.0416") + mean = float("5.04696") + std = float("1.47879") + data = None + + +class Program_weight_tensor_parameter_360: + name = "parameter_360" + shape = [384] + dtype = "float32" + min_val = float("-6.04079") + max_val = float("3.15991") + mean = float("-1.54845") + std = float("0.754024") + data = None + + +class Program_weight_tensor_parameter_361: + name = "parameter_361" + shape = [384, 384, 3, 3] + dtype = "float32" + min_val = float("-0.18178") + max_val = float("0.764448") + mean = float("-0.00161474") + std = float("0.0221807") + data = None + + +class Program_weight_tensor_parameter_362: + name = "parameter_362" + shape = [384] + dtype = "float32" + min_val = float("-1.87273") + max_val = float("0.32821") + mean = float("-0.506978") + std = float("0.357104") + data = None + + +class Program_weight_tensor_parameter_363: + name = "parameter_363" + shape = [384] + dtype = "float32" + min_val = float("0.0435078") + max_val = float("2.14346") + mean = float("0.437331") + std = float("0.224181") + data = None + + +class Program_weight_tensor_parameter_364: + name = "parameter_364" + shape = [384] + dtype = "float32" + min_val = float("0.00471868") + max_val = float("1.12307") + mean = float("0.0613553") + std = float("0.085815") + data = None + + +class Program_weight_tensor_parameter_365: + name = "parameter_365" + shape = [384] + dtype = "float32" + min_val = float("-1.03153") + max_val = float("0.75915") + mean = float("0.231845") + std = float("0.233537") + data = None + + +class Program_weight_tensor_parameter_366: + name = "parameter_366" + shape = [384, 384, 1, 1] + dtype = "float32" + min_val = float("-0.387213") + max_val = float("0.251696") + mean = float("-0.00435802") + std = float("0.0257492") + data = None + + +class Program_weight_tensor_parameter_367: + name = "parameter_367" + shape = [384] + dtype = "float32" + min_val = float("-1.87273") + max_val = float("0.32821") + mean = float("-0.506978") + std = float("0.357104") + data = None + + +class Program_weight_tensor_parameter_368: + name = "parameter_368" + shape = [384] + dtype = "float32" + min_val = float("0.455155") + max_val = float("2.22589") + mean = float("1.01484") + std = float("0.274774") + data = None + + +class Program_weight_tensor_parameter_369: + name = "parameter_369" + shape = [384] + dtype = "float32" + min_val = float("0.120887") + max_val = float("1.82676") + mean = float("0.306026") + std = float("0.144707") + data = None + + +class Program_weight_tensor_parameter_370: + name = "parameter_370" + shape = [384] + dtype = "float32" + min_val = float("-1.6152") + max_val = float("1.59863") + mean = float("0.399959") + std = float("0.360926") + data = None + + +class Program_weight_tensor_parameter_371: + name = "parameter_371" + shape = [384, 384, 3, 3] + dtype = "float32" + min_val = float("-0.353474") + max_val = float("0.341442") + mean = float("-0.000970363") + std = float("0.0204197") + data = None + + +class Program_weight_tensor_parameter_372: + name = "parameter_372" + shape = [384] + dtype = "float32" + min_val = float("-2.14605") + max_val = float("0.421522") + mean = float("-1.32652") + std = float("0.278806") + data = None + + +class Program_weight_tensor_parameter_373: + name = "parameter_373" + shape = [384] + dtype = "float32" + min_val = float("0.774001") + max_val = float("1.64796") + mean = float("1.16424") + std = float("0.108873") + data = None + + +class Program_weight_tensor_parameter_374: + name = "parameter_374" + shape = [384] + dtype = "float32" + min_val = float("2.64922") + max_val = float("13.5521") + mean = float("3.82973") + std = float("0.984553") + data = None + + +class Program_weight_tensor_parameter_375: + name = "parameter_375" + shape = [384] + dtype = "float32" + min_val = float("-5.25263") + max_val = float("1.46638") + mean = float("-1.38693") + std = float("0.677743") + data = None + + +class Program_weight_tensor_parameter_376: + name = "parameter_376" + shape = [384, 384, 3, 3] + dtype = "float32" + min_val = float("-0.206734") + max_val = float("0.386618") + mean = float("-0.00195101") + std = float("0.0230433") + data = None + + +class Program_weight_tensor_parameter_377: + name = "parameter_377" + shape = [384] + dtype = "float32" + min_val = float("-2.91797") + max_val = float("1.36626") + mean = float("-0.756402") + std = float("0.615925") + data = None + + +class Program_weight_tensor_parameter_378: + name = "parameter_378" + shape = [384] + dtype = "float32" + min_val = float("0.867847") + max_val = float("2.97312") + mean = float("1.88017") + std = float("0.289525") + data = None + + +class Program_weight_tensor_parameter_379: + name = "parameter_379" + shape = [384] + dtype = "float32" + min_val = float("0.262423") + max_val = float("1.63799") + mean = float("0.427128") + std = float("0.0974533") + data = None + + +class Program_weight_tensor_parameter_380: + name = "parameter_380" + shape = [384] + dtype = "float32" + min_val = float("-2.60546") + max_val = float("1.21468") + mean = float("0.480087") + std = float("0.290265") + data = None + + +class Program_weight_tensor_parameter_381: + name = "parameter_381" + shape = [384, 768, 1, 1] + dtype = "float32" + min_val = float("-0.567572") + max_val = float("0.457624") + mean = float("-0.00487478") + std = float("0.0609497") + data = None + + +class Program_weight_tensor_parameter_382: + name = "parameter_382" + shape = [384] + dtype = "float32" + min_val = float("-2.23836") + max_val = float("0.461357") + mean = float("-0.783443") + std = float("0.463137") + data = None + + +class Program_weight_tensor_parameter_383: + name = "parameter_383" + shape = [384] + dtype = "float32" + min_val = float("0.651326") + max_val = float("2.90383") + mean = float("2.08609") + std = float("0.315274") + data = None + + +class Program_weight_tensor_parameter_384: + name = "parameter_384" + shape = [384] + dtype = "float32" + min_val = float("0.286499") + max_val = float("0.669731") + mean = float("0.397054") + std = float("0.0529008") + data = None + + +class Program_weight_tensor_parameter_385: + name = "parameter_385" + shape = [384] + dtype = "float32" + min_val = float("-0.166745") + max_val = float("1.18842") + mean = float("0.374375") + std = float("0.166678") + data = None + + +class Program_weight_tensor_parameter_386: + name = "parameter_386" + shape = [384, 768, 1, 1] + dtype = "float32" + min_val = float("-1.33249") + max_val = float("0.583348") + mean = float("-0.00355021") + std = float("0.0586729") + data = None + + +class Program_weight_tensor_parameter_387: + name = "parameter_387" + shape = [768] + dtype = "float32" + min_val = float("-2.40044") + max_val = float("0.606756") + mean = float("-0.888147") + std = float("0.333099") + data = None + + +class Program_weight_tensor_parameter_388: + name = "parameter_388" + shape = [768] + dtype = "float32" + min_val = float("0.507329") + max_val = float("1.91505") + mean = float("0.930805") + std = float("0.148314") + data = None + + +class Program_weight_tensor_parameter_389: + name = "parameter_389" + shape = [768] + dtype = "float32" + min_val = float("0.726932") + max_val = float("6.80459") + mean = float("1.23914") + std = float("0.343665") + data = None + + +class Program_weight_tensor_parameter_390: + name = "parameter_390" + shape = [768] + dtype = "float32" + min_val = float("-1.00842") + max_val = float("1.56902") + mean = float("0.338213") + std = float("0.446215") + data = None + + +class Program_weight_tensor_parameter_391: + name = "parameter_391" + shape = [768, 512, 3, 3] + dtype = "float32" + min_val = float("-0.539869") + max_val = float("0.767872") + mean = float("-0.000881062") + std = float("0.0275231") + data = None + + +class Program_weight_tensor_parameter_392: + name = "parameter_392" + shape = [512] + dtype = "float32" + min_val = float("-3.38221") + max_val = float("1.60624") + mean = float("-1.1291") + std = float("0.475979") + data = None + + +class Program_weight_tensor_parameter_393: + name = "parameter_393" + shape = [512] + dtype = "float32" + min_val = float("0.311062") + max_val = float("1.64816") + mean = float("1.06252") + std = float("0.170948") + data = None + + +class Program_weight_tensor_parameter_394: + name = "parameter_394" + shape = [512] + dtype = "float32" + min_val = float("1.06478") + max_val = float("3.62191") + mean = float("1.88625") + std = float("0.376004") + data = None + + +class Program_weight_tensor_parameter_395: + name = "parameter_395" + shape = [512] + dtype = "float32" + min_val = float("-1.49863") + max_val = float("0.849814") + mean = float("-0.527795") + std = float("0.397717") + data = None + + +class Program_weight_tensor_parameter_396: + name = "parameter_396" + shape = [512, 384, 1, 1] + dtype = "float32" + min_val = float("-0.760857") + max_val = float("0.88926") + mean = float("-0.00348775") + std = float("0.0819788") + data = None + + +class Program_weight_tensor_parameter_397: + name = "parameter_397" + shape = [384] + dtype = "float32" + min_val = float("-0.507828") + max_val = float("1.07468") + mean = float("0.176648") + std = float("0.270934") + data = None + + +class Program_weight_tensor_parameter_398: + name = "parameter_398" + shape = [384, 384, 1, 1] + dtype = "float32" + min_val = float("-1.34497") + max_val = float("1.18137") + mean = float("-0.000231607") + std = float("0.0623732") + data = None + + +class Program_weight_tensor_parameter_399: + name = "parameter_399" + shape = [192] + dtype = "float32" + min_val = float("-2.01897") + max_val = float("0.0989438") + mean = float("-0.426764") + std = float("0.320564") + data = None + + +class Program_weight_tensor_parameter_400: + name = "parameter_400" + shape = [192] + dtype = "float32" + min_val = float("0.0468001") + max_val = float("1.93472") + mean = float("0.511951") + std = float("0.361628") + data = None + + +class Program_weight_tensor_parameter_401: + name = "parameter_401" + shape = [192] + dtype = "float32" + min_val = float("0.00434022") + max_val = float("0.657705") + mean = float("0.0555087") + std = float("0.0698823") + data = None + + +class Program_weight_tensor_parameter_402: + name = "parameter_402" + shape = [192] + dtype = "float32" + min_val = float("-0.282835") + max_val = float("0.28453") + mean = float("0.0636442") + std = float("0.109437") + data = None + + +class Program_weight_tensor_parameter_403: + name = "parameter_403" + shape = [192, 192, 1, 1] + dtype = "float32" + min_val = float("-0.559419") + max_val = float("0.296305") + mean = float("-0.00366903") + std = float("0.0319186") + data = None + + +class Program_weight_tensor_parameter_404: + name = "parameter_404" + shape = [192] + dtype = "float32" + min_val = float("-2.01897") + max_val = float("0.0989438") + mean = float("-0.426764") + std = float("0.320564") + data = None + + +class Program_weight_tensor_parameter_405: + name = "parameter_405" + shape = [192] + dtype = "float32" + min_val = float("0.292172") + max_val = float("2.2927") + mean = float("1.09788") + std = float("0.424566") + data = None + + +class Program_weight_tensor_parameter_406: + name = "parameter_406" + shape = [192] + dtype = "float32" + min_val = float("0.0869552") + max_val = float("1.10043") + mean = float("0.225629") + std = float("0.0981285") + data = None + + +class Program_weight_tensor_parameter_407: + name = "parameter_407" + shape = [192] + dtype = "float32" + min_val = float("-0.311857") + max_val = float("0.530845") + mean = float("0.140823") + std = float("0.169316") + data = None + + +class Program_weight_tensor_parameter_408: + name = "parameter_408" + shape = [192, 192, 3, 3] + dtype = "float32" + min_val = float("-0.253119") + max_val = float("0.250075") + mean = float("-0.00118965") + std = float("0.0265546") + data = None + + +class Program_weight_tensor_parameter_409: + name = "parameter_409" + shape = [192] + dtype = "float32" + min_val = float("-2.84761") + max_val = float("-0.278888") + mean = float("-1.26374") + std = float("0.397727") + data = None + + +class Program_weight_tensor_parameter_410: + name = "parameter_410" + shape = [192] + dtype = "float32" + min_val = float("0.617317") + max_val = float("2.12396") + mean = float("1.2186") + std = float("0.187169") + data = None + + +class Program_weight_tensor_parameter_411: + name = "parameter_411" + shape = [192] + dtype = "float32" + min_val = float("4.84834") + max_val = float("30.867") + mean = float("8.66074") + std = float("3.28617") + data = None + + +class Program_weight_tensor_parameter_412: + name = "parameter_412" + shape = [192] + dtype = "float32" + min_val = float("-6.62629") + max_val = float("5.48672") + mean = float("-1.54293") + std = float("1.46433") + data = None + + +class Program_weight_tensor_parameter_413: + name = "parameter_413" + shape = [192, 192, 3, 3] + dtype = "float32" + min_val = float("-0.485569") + max_val = float("0.398836") + mean = float("-0.00115391") + std = float("0.0303942") + data = None + + +class Program_weight_tensor_parameter_414: + name = "parameter_414" + shape = [192] + dtype = "float32" + min_val = float("-1.93093") + max_val = float("0.177263") + mean = float("-0.344331") + std = float("0.277833") + data = None + + +class Program_weight_tensor_parameter_415: + name = "parameter_415" + shape = [192] + dtype = "float32" + min_val = float("0.0292327") + max_val = float("1.76124") + mean = float("0.40804") + std = float("0.312806") + data = None + + +class Program_weight_tensor_parameter_416: + name = "parameter_416" + shape = [192] + dtype = "float32" + min_val = float("0.00463541") + max_val = float("0.307531") + mean = float("0.055457") + std = float("0.0488782") + data = None + + +class Program_weight_tensor_parameter_417: + name = "parameter_417" + shape = [192] + dtype = "float32" + min_val = float("-0.267673") + max_val = float("0.313243") + mean = float("0.0513852") + std = float("0.0991333") + data = None + + +class Program_weight_tensor_parameter_418: + name = "parameter_418" + shape = [192, 192, 1, 1] + dtype = "float32" + min_val = float("-0.372544") + max_val = float("0.496768") + mean = float("-0.00286693") + std = float("0.0309081") + data = None + + +class Program_weight_tensor_parameter_419: + name = "parameter_419" + shape = [192] + dtype = "float32" + min_val = float("-1.93093") + max_val = float("0.177263") + mean = float("-0.344331") + std = float("0.277833") + data = None + + +class Program_weight_tensor_parameter_420: + name = "parameter_420" + shape = [192] + dtype = "float32" + min_val = float("0.4465") + max_val = float("2.2786") + mean = float("1.07712") + std = float("0.367612") + data = None + + +class Program_weight_tensor_parameter_421: + name = "parameter_421" + shape = [192] + dtype = "float32" + min_val = float("0.160429") + max_val = float("2.03602") + mean = float("0.323852") + std = float("0.159396") + data = None + + +class Program_weight_tensor_parameter_422: + name = "parameter_422" + shape = [192] + dtype = "float32" + min_val = float("-0.62254") + max_val = float("0.71634") + mean = float("0.15556") + std = float("0.191245") + data = None + + +class Program_weight_tensor_parameter_423: + name = "parameter_423" + shape = [192, 192, 3, 3] + dtype = "float32" + min_val = float("-0.28053") + max_val = float("0.3717") + mean = float("-0.00131036") + std = float("0.0285193") + data = None + + +class Program_weight_tensor_parameter_424: + name = "parameter_424" + shape = [192] + dtype = "float32" + min_val = float("-2.45161") + max_val = float("-0.163549") + mean = float("-1.22136") + std = float("0.447861") + data = None + + +class Program_weight_tensor_parameter_425: + name = "parameter_425" + shape = [192] + dtype = "float32" + min_val = float("0.665839") + max_val = float("1.86864") + mean = float("1.2456") + std = float("0.184019") + data = None + + +class Program_weight_tensor_parameter_426: + name = "parameter_426" + shape = [192] + dtype = "float32" + min_val = float("3.70249") + max_val = float("24.5939") + mean = float("6.04481") + std = float("2.04615") + data = None + + +class Program_weight_tensor_parameter_427: + name = "parameter_427" + shape = [192] + dtype = "float32" + min_val = float("-8.16516") + max_val = float("2.47629") + mean = float("-1.34792") + std = float("1.3002") + data = None + + +class Program_weight_tensor_parameter_428: + name = "parameter_428" + shape = [192, 192, 3, 3] + dtype = "float32" + min_val = float("-0.418041") + max_val = float("0.47425") + mean = float("-0.00162313") + std = float("0.03167") + data = None + + +class Program_weight_tensor_parameter_429: + name = "parameter_429" + shape = [192] + dtype = "float32" + min_val = float("-1.75197") + max_val = float("0.17298") + mean = float("-0.317614") + std = float("0.291786") + data = None + + +class Program_weight_tensor_parameter_430: + name = "parameter_430" + shape = [192] + dtype = "float32" + min_val = float("0.00390728") + max_val = float("1.72878") + mean = float("0.334542") + std = float("0.263931") + data = None + + +class Program_weight_tensor_parameter_431: + name = "parameter_431" + shape = [192] + dtype = "float32" + min_val = float("0.00212122") + max_val = float("0.760825") + mean = float("0.0670221") + std = float("0.0961478") + data = None + + +class Program_weight_tensor_parameter_432: + name = "parameter_432" + shape = [192] + dtype = "float32" + min_val = float("-0.236415") + max_val = float("0.361719") + mean = float("0.058539") + std = float("0.0903572") + data = None + + +class Program_weight_tensor_parameter_433: + name = "parameter_433" + shape = [192, 192, 1, 1] + dtype = "float32" + min_val = float("-0.367892") + max_val = float("0.304343") + mean = float("-0.00279801") + std = float("0.0297171") + data = None + + +class Program_weight_tensor_parameter_434: + name = "parameter_434" + shape = [192] + dtype = "float32" + min_val = float("-1.75197") + max_val = float("0.172979") + mean = float("-0.317614") + std = float("0.291786") + data = None + + +class Program_weight_tensor_parameter_435: + name = "parameter_435" + shape = [192] + dtype = "float32" + min_val = float("0.409451") + max_val = float("1.99619") + mean = float("1.02908") + std = float("0.334997") + data = None + + +class Program_weight_tensor_parameter_436: + name = "parameter_436" + shape = [192] + dtype = "float32" + min_val = float("0.140448") + max_val = float("0.915") + mean = float("0.338831") + std = float("0.103163") + data = None + + +class Program_weight_tensor_parameter_437: + name = "parameter_437" + shape = [192] + dtype = "float32" + min_val = float("-0.355816") + max_val = float("0.701121") + mean = float("0.183374") + std = float("0.198424") + data = None + + +class Program_weight_tensor_parameter_438: + name = "parameter_438" + shape = [192, 192, 3, 3] + dtype = "float32" + min_val = float("-0.356702") + max_val = float("0.325866") + mean = float("-0.00124644") + std = float("0.0299642") + data = None + + +class Program_weight_tensor_parameter_439: + name = "parameter_439" + shape = [192] + dtype = "float32" + min_val = float("-2.43882") + max_val = float("0.118144") + mean = float("-1.17558") + std = float("0.421154") + data = None + + +class Program_weight_tensor_parameter_440: + name = "parameter_440" + shape = [192] + dtype = "float32" + min_val = float("0.704566") + max_val = float("1.87451") + mean = float("1.21191") + std = float("0.182923") + data = None + + +class Program_weight_tensor_parameter_441: + name = "parameter_441" + shape = [192] + dtype = "float32" + min_val = float("2.71863") + max_val = float("15.9049") + mean = float("4.35307") + std = float("1.42514") + data = None + + +class Program_weight_tensor_parameter_442: + name = "parameter_442" + shape = [192] + dtype = "float32" + min_val = float("-7.84718") + max_val = float("1.84736") + mean = float("-1.31385") + std = float("1.06362") + data = None + + +class Program_weight_tensor_parameter_443: + name = "parameter_443" + shape = [192, 192, 3, 3] + dtype = "float32" + min_val = float("-0.260597") + max_val = float("0.412342") + mean = float("-0.00207208") + std = float("0.0324988") + data = None + + +class Program_weight_tensor_parameter_444: + name = "parameter_444" + shape = [192] + dtype = "float32" + min_val = float("-2.0766") + max_val = float("0.208712") + mean = float("-0.324254") + std = float("0.33393") + data = None + + +class Program_weight_tensor_parameter_445: + name = "parameter_445" + shape = [192] + dtype = "float32" + min_val = float("-0.0011458") + max_val = float("0.755893") + mean = float("0.185622") + std = float("0.14407") + data = None + + +class Program_weight_tensor_parameter_446: + name = "parameter_446" + shape = [192] + dtype = "float32" + min_val = float("0.000773168") + max_val = float("0.0618737") + mean = float("0.0213263") + std = float("0.0130674") + data = None + + +class Program_weight_tensor_parameter_447: + name = "parameter_447" + shape = [192] + dtype = "float32" + min_val = float("-0.235196") + max_val = float("0.261614") + mean = float("0.0547004") + std = float("0.0727225") + data = None + + +class Program_weight_tensor_parameter_448: + name = "parameter_448" + shape = [192, 192, 1, 1] + dtype = "float32" + min_val = float("-0.231821") + max_val = float("0.276367") + mean = float("-0.00238072") + std = float("0.0269558") + data = None + + +class Program_weight_tensor_parameter_449: + name = "parameter_449" + shape = [192] + dtype = "float32" + min_val = float("-2.0766") + max_val = float("0.208713") + mean = float("-0.324254") + std = float("0.333929") + data = None + + +class Program_weight_tensor_parameter_450: + name = "parameter_450" + shape = [192] + dtype = "float32" + min_val = float("0.391129") + max_val = float("1.97874") + mean = float("0.933861") + std = float("0.309194") + data = None + + +class Program_weight_tensor_parameter_451: + name = "parameter_451" + shape = [192] + dtype = "float32" + min_val = float("0.153988") + max_val = float("0.794535") + mean = float("0.314531") + std = float("0.0897818") + data = None + + +class Program_weight_tensor_parameter_452: + name = "parameter_452" + shape = [192] + dtype = "float32" + min_val = float("-0.425378") + max_val = float("0.750795") + mean = float("0.209336") + std = float("0.214874") + data = None + + +class Program_weight_tensor_parameter_453: + name = "parameter_453" + shape = [192, 192, 3, 3] + dtype = "float32" + min_val = float("-0.342234") + max_val = float("0.508412") + mean = float("-0.00135816") + std = float("0.0310818") + data = None + + +class Program_weight_tensor_parameter_454: + name = "parameter_454" + shape = [192] + dtype = "float32" + min_val = float("-2.68478") + max_val = float("-0.0879268") + mean = float("-1.17286") + std = float("0.431287") + data = None + + +class Program_weight_tensor_parameter_455: + name = "parameter_455" + shape = [192] + dtype = "float32" + min_val = float("0.729013") + max_val = float("1.64543") + mean = float("1.19503") + std = float("0.157817") + data = None + + +class Program_weight_tensor_parameter_456: + name = "parameter_456" + shape = [192] + dtype = "float32" + min_val = float("1.95681") + max_val = float("6.36205") + mean = float("3.19424") + std = float("0.643544") + data = None + + +class Program_weight_tensor_parameter_457: + name = "parameter_457" + shape = [192] + dtype = "float32" + min_val = float("-9.70608") + max_val = float("1.85131") + mean = float("-1.28804") + std = float("1.11841") + data = None + + +class Program_weight_tensor_parameter_458: + name = "parameter_458" + shape = [192, 192, 3, 3] + dtype = "float32" + min_val = float("-0.298277") + max_val = float("0.38271") + mean = float("-0.00236101") + std = float("0.033313") + data = None + + +class Program_weight_tensor_parameter_459: + name = "parameter_459" + shape = [192] + dtype = "float32" + min_val = float("-1.22542") + max_val = float("0.213624") + mean = float("-0.275191") + std = float("0.296188") + data = None + + +class Program_weight_tensor_parameter_460: + name = "parameter_460" + shape = [192] + dtype = "float32" + min_val = float("-0.00595314") + max_val = float("0.675359") + mean = float("0.174998") + std = float("0.12874") + data = None + + +class Program_weight_tensor_parameter_461: + name = "parameter_461" + shape = [192] + dtype = "float32" + min_val = float("0.0013768") + max_val = float("0.0973269") + mean = float("0.0234223") + std = float("0.0156887") + data = None + + +class Program_weight_tensor_parameter_462: + name = "parameter_462" + shape = [192] + dtype = "float32" + min_val = float("-0.2057") + max_val = float("0.318132") + mean = float("0.0707503") + std = float("0.0869326") + data = None + + +class Program_weight_tensor_parameter_463: + name = "parameter_463" + shape = [192, 192, 1, 1] + dtype = "float32" + min_val = float("-0.280158") + max_val = float("0.215769") + mean = float("-0.00291163") + std = float("0.0281267") + data = None + + +class Program_weight_tensor_parameter_464: + name = "parameter_464" + shape = [192] + dtype = "float32" + min_val = float("-1.22542") + max_val = float("0.213624") + mean = float("-0.275191") + std = float("0.296188") + data = None + + +class Program_weight_tensor_parameter_465: + name = "parameter_465" + shape = [192] + dtype = "float32" + min_val = float("0.400501") + max_val = float("1.59482") + mean = float("0.844685") + std = float("0.270351") + data = None + + +class Program_weight_tensor_parameter_466: + name = "parameter_466" + shape = [192] + dtype = "float32" + min_val = float("0.134699") + max_val = float("0.721497") + mean = float("0.319353") + std = float("0.0890785") + data = None + + +class Program_weight_tensor_parameter_467: + name = "parameter_467" + shape = [192] + dtype = "float32" + min_val = float("-0.478965") + max_val = float("0.839604") + mean = float("0.210004") + std = float("0.204389") + data = None + + +class Program_weight_tensor_parameter_468: + name = "parameter_468" + shape = [192, 192, 3, 3] + dtype = "float32" + min_val = float("-0.357121") + max_val = float("0.487324") + mean = float("-0.00118956") + std = float("0.031957") + data = None + + +class Program_weight_tensor_parameter_469: + name = "parameter_469" + shape = [192] + dtype = "float32" + min_val = float("-2.4647") + max_val = float("-0.139859") + mean = float("-1.18512") + std = float("0.414686") + data = None + + +class Program_weight_tensor_parameter_470: + name = "parameter_470" + shape = [192] + dtype = "float32" + min_val = float("0.696898") + max_val = float("1.59859") + mean = float("1.17212") + std = float("0.145496") + data = None + + +class Program_weight_tensor_parameter_471: + name = "parameter_471" + shape = [192] + dtype = "float32" + min_val = float("1.42719") + max_val = float("4.37024") + mean = float("2.44086") + std = float("0.549365") + data = None + + +class Program_weight_tensor_parameter_472: + name = "parameter_472" + shape = [192] + dtype = "float32" + min_val = float("-4.3222") + max_val = float("2.14184") + mean = float("-1.22353") + std = float("0.883304") + data = None + + +class Program_weight_tensor_parameter_473: + name = "parameter_473" + shape = [192, 192, 3, 3] + dtype = "float32" + min_val = float("-0.497696") + max_val = float("0.463389") + mean = float("-0.00259625") + std = float("0.0341798") + data = None + + +class Program_weight_tensor_parameter_474: + name = "parameter_474" + shape = [192] + dtype = "float32" + min_val = float("-1.23483") + max_val = float("0.267302") + mean = float("-0.218713") + std = float("0.255287") + data = None + + +class Program_weight_tensor_parameter_475: + name = "parameter_475" + shape = [192] + dtype = "float32" + min_val = float("0.0145354") + max_val = float("1.43917") + mean = float("0.223981") + std = float("0.209561") + data = None + + +class Program_weight_tensor_parameter_476: + name = "parameter_476" + shape = [192] + dtype = "float32" + min_val = float("0.0048331") + max_val = float("0.290934") + mean = float("0.0427425") + std = float("0.0382429") + data = None + + +class Program_weight_tensor_parameter_477: + name = "parameter_477" + shape = [192] + dtype = "float32" + min_val = float("-0.530999") + max_val = float("0.577263") + mean = float("0.106982") + std = float("0.121021") + data = None + + +class Program_weight_tensor_parameter_478: + name = "parameter_478" + shape = [192, 192, 1, 1] + dtype = "float32" + min_val = float("-0.487726") + max_val = float("0.321109") + mean = float("-0.00510497") + std = float("0.033837") + data = None + + +class Program_weight_tensor_parameter_479: + name = "parameter_479" + shape = [192] + dtype = "float32" + min_val = float("-1.23483") + max_val = float("0.267302") + mean = float("-0.218713") + std = float("0.255287") + data = None + + +class Program_weight_tensor_parameter_480: + name = "parameter_480" + shape = [192] + dtype = "float32" + min_val = float("0.34022") + max_val = float("1.42072") + mean = float("0.755703") + std = float("0.213444") + data = None + + +class Program_weight_tensor_parameter_481: + name = "parameter_481" + shape = [192] + dtype = "float32" + min_val = float("0.212544") + max_val = float("0.937862") + mean = float("0.418457") + std = float("0.109271") + data = None + + +class Program_weight_tensor_parameter_482: + name = "parameter_482" + shape = [192] + dtype = "float32" + min_val = float("-0.759339") + max_val = float("0.74774") + mean = float("0.217433") + std = float("0.207777") + data = None + + +class Program_weight_tensor_parameter_483: + name = "parameter_483" + shape = [192, 192, 3, 3] + dtype = "float32" + min_val = float("-0.38033") + max_val = float("0.338514") + mean = float("-0.00125319") + std = float("0.0324863") + data = None + + +class Program_weight_tensor_parameter_484: + name = "parameter_484" + shape = [192] + dtype = "float32" + min_val = float("-1.83112") + max_val = float("-0.166895") + mean = float("-1.10081") + std = float("0.314891") + data = None + + +class Program_weight_tensor_parameter_485: + name = "parameter_485" + shape = [192] + dtype = "float32" + min_val = float("0.819725") + max_val = float("1.73456") + mean = float("1.15749") + std = float("0.130299") + data = None + + +class Program_weight_tensor_parameter_486: + name = "parameter_486" + shape = [192] + dtype = "float32" + min_val = float("0.891588") + max_val = float("3.99136") + mean = float("2.08281") + std = float("0.529117") + data = None + + +class Program_weight_tensor_parameter_487: + name = "parameter_487" + shape = [192] + dtype = "float32" + min_val = float("-4.72978") + max_val = float("1.02882") + mean = float("-0.668246") + std = float("0.896257") + data = None + + +class Program_weight_tensor_parameter_488: + name = "parameter_488" + shape = [192, 192, 3, 3] + dtype = "float32" + min_val = float("-0.65171") + max_val = float("0.640822") + mean = float("-0.0020547") + std = float("0.0343565") + data = None + + +class Program_weight_tensor_parameter_489: + name = "parameter_489" + shape = [192] + dtype = "float32" + min_val = float("-2.86404") + max_val = float("1.38911") + mean = float("-0.0370499") + std = float("0.683507") + data = None + + +class Program_weight_tensor_parameter_490: + name = "parameter_490" + shape = [192] + dtype = "float32" + min_val = float("0.500034") + max_val = float("2.2503") + mean = float("0.980003") + std = float("0.25874") + data = None + + +class Program_weight_tensor_parameter_491: + name = "parameter_491" + shape = [192] + dtype = "float32" + min_val = float("0.644561") + max_val = float("7.45927") + mean = float("1.50548") + std = float("0.788261") + data = None + + +class Program_weight_tensor_parameter_492: + name = "parameter_492" + shape = [192] + dtype = "float32" + min_val = float("-1.2704") + max_val = float("2.31177") + mean = float("-0.279101") + std = float("0.45695") + data = None + + +class Program_weight_tensor_parameter_493: + name = "parameter_493" + shape = [192, 384, 1, 1] + dtype = "float32" + min_val = float("-0.982015") + max_val = float("0.869502") + mean = float("-0.00510772") + std = float("0.07352") + data = None + + +class Program_weight_tensor_parameter_494: + name = "parameter_494" + shape = [192] + dtype = "float32" + min_val = float("-3.0935") + max_val = float("0.910982") + mean = float("0.0494977") + std = float("0.655638") + data = None + + +class Program_weight_tensor_parameter_495: + name = "parameter_495" + shape = [192] + dtype = "float32" + min_val = float("0.875539") + max_val = float("5.21581") + mean = float("1.86321") + std = float("0.839035") + data = None + + +class Program_weight_tensor_parameter_496: + name = "parameter_496" + shape = [192] + dtype = "float32" + min_val = float("0.762109") + max_val = float("5.12713") + mean = float("1.77503") + std = float("0.582029") + data = None + + +class Program_weight_tensor_parameter_497: + name = "parameter_497" + shape = [192] + dtype = "float32" + min_val = float("-1.00415") + max_val = float("1.11301") + mean = float("0.0328207") + std = float("0.415593") + data = None + + +class Program_weight_tensor_parameter_498: + name = "parameter_498" + shape = [192, 384, 1, 1] + dtype = "float32" + min_val = float("-0.985177") + max_val = float("0.73921") + mean = float("-0.00231196") + std = float("0.0848335") + data = None + + +class Program_weight_tensor_parameter_499: + name = "parameter_499" + shape = [384] + dtype = "float32" + min_val = float("-2.90784") + max_val = float("1.34582") + mean = float("-0.272276") + std = float("0.547194") + data = None + + +class Program_weight_tensor_parameter_500: + name = "parameter_500" + shape = [384] + dtype = "float32" + min_val = float("0.648698") + max_val = float("2.39355") + mean = float("1.18517") + std = float("0.25274") + data = None + + +class Program_weight_tensor_parameter_501: + name = "parameter_501" + shape = [384] + dtype = "float32" + min_val = float("0.59614") + max_val = float("10.0643") + mean = float("1.56043") + std = float("0.865042") + data = None + + +class Program_weight_tensor_parameter_502: + name = "parameter_502" + shape = [384] + dtype = "float32" + min_val = float("-2.10662") + max_val = float("1.66147") + mean = float("0.0886253") + std = float("0.47705") + data = None + + +class Program_weight_tensor_parameter_503: + name = "parameter_503" + shape = [384, 256, 3, 3] + dtype = "float32" + min_val = float("-0.655717") + max_val = float("0.612911") + mean = float("-0.000772766") + std = float("0.0379079") + data = None + + +class Program_weight_tensor_parameter_504: + name = "parameter_504" + shape = [256] + dtype = "float32" + min_val = float("-1.98829") + max_val = float("1.25385") + mean = float("-0.884212") + std = float("0.456923") + data = None + + +class Program_weight_tensor_parameter_505: + name = "parameter_505" + shape = [256] + dtype = "float32" + min_val = float("0.40595") + max_val = float("1.72871") + mean = float("1.06825") + std = float("0.188221") + data = None + + +class Program_weight_tensor_parameter_506: + name = "parameter_506" + shape = [256] + dtype = "float32" + min_val = float("0.758135") + max_val = float("4.31852") + mean = float("2.07119") + std = float("0.616895") + data = None + + +class Program_weight_tensor_parameter_507: + name = "parameter_507" + shape = [256] + dtype = "float32" + min_val = float("-1.75746") + max_val = float("2.3472") + mean = float("-0.126613") + std = float("0.611997") + data = None + + +class Program_weight_tensor_parameter_508: + name = "parameter_508" + shape = [256, 192, 1, 1] + dtype = "float32" + min_val = float("-1.45832") + max_val = float("0.857092") + mean = float("-0.000274916") + std = float("0.120769") + data = None + + +class Program_weight_tensor_parameter_509: + name = "parameter_509" + shape = [192] + dtype = "float32" + min_val = float("-0.515235") + max_val = float("0.947143") + mean = float("0.234233") + std = float("0.313363") + data = None + + +class Program_weight_tensor_parameter_510: + name = "parameter_510" + shape = [192, 192, 1, 1] + dtype = "float32" + min_val = float("-1.91635") + max_val = float("1.51123") + mean = float("0.00259021") + std = float("0.0936603") + data = None + + +class Program_weight_tensor_parameter_511: + name = "parameter_511" + shape = [96] + dtype = "float32" + min_val = float("-2.05715") + max_val = float("0.115899") + mean = float("-0.327041") + std = float("0.401782") + data = None + + +class Program_weight_tensor_parameter_512: + name = "parameter_512" + shape = [96] + dtype = "float32" + min_val = float("0.143618") + max_val = float("2.63298") + mean = float("0.597474") + std = float("0.576575") + data = None + + +class Program_weight_tensor_parameter_513: + name = "parameter_513" + shape = [96] + dtype = "float32" + min_val = float("0.00942651") + max_val = float("0.191157") + mean = float("0.0402118") + std = float("0.0330148") + data = None + + +class Program_weight_tensor_parameter_514: + name = "parameter_514" + shape = [96] + dtype = "float32" + min_val = float("-0.193494") + max_val = float("0.299148") + mean = float("0.0543616") + std = float("0.0975037") + data = None + + +class Program_weight_tensor_parameter_515: + name = "parameter_515" + shape = [96, 96, 1, 1] + dtype = "float32" + min_val = float("-0.68938") + max_val = float("0.718855") + mean = float("-0.00591613") + std = float("0.0549315") + data = None + + +class Program_weight_tensor_parameter_516: + name = "parameter_516" + shape = [96] + dtype = "float32" + min_val = float("-2.05715") + max_val = float("0.115899") + mean = float("-0.327041") + std = float("0.401782") + data = None + + +class Program_weight_tensor_parameter_517: + name = "parameter_517" + shape = [96] + dtype = "float32" + min_val = float("0.308202") + max_val = float("4.73592") + mean = float("0.982514") + std = float("0.758903") + data = None + + +class Program_weight_tensor_parameter_518: + name = "parameter_518" + shape = [96] + dtype = "float32" + min_val = float("0.0612546") + max_val = float("1.29406") + mean = float("0.185109") + std = float("0.196911") + data = None + + +class Program_weight_tensor_parameter_519: + name = "parameter_519" + shape = [96] + dtype = "float32" + min_val = float("-0.413394") + max_val = float("0.432952") + mean = float("0.110958") + std = float("0.180747") + data = None + + +class Program_weight_tensor_parameter_520: + name = "parameter_520" + shape = [96, 96, 3, 3] + dtype = "float32" + min_val = float("-0.440687") + max_val = float("0.443321") + mean = float("-0.00126252") + std = float("0.0389334") + data = None + + +class Program_weight_tensor_parameter_521: + name = "parameter_521" + shape = [96] + dtype = "float32" + min_val = float("-2.3818") + max_val = float("-0.0764691") + mean = float("-1.14179") + std = float("0.445418") + data = None + + +class Program_weight_tensor_parameter_522: + name = "parameter_522" + shape = [96] + dtype = "float32" + min_val = float("0.517878") + max_val = float("1.72317") + mean = float("1.02982") + std = float("0.181972") + data = None + + +class Program_weight_tensor_parameter_523: + name = "parameter_523" + shape = [96] + dtype = "float32" + min_val = float("3.14105") + max_val = float("22.6149") + mean = float("5.93893") + std = float("3.31479") + data = None + + +class Program_weight_tensor_parameter_524: + name = "parameter_524" + shape = [96] + dtype = "float32" + min_val = float("-11.2555") + max_val = float("8.63629") + mean = float("-0.607513") + std = float("2.33309") + data = None + + +class Program_weight_tensor_parameter_525: + name = "parameter_525" + shape = [96, 96, 3, 3] + dtype = "float32" + min_val = float("-1.11358") + max_val = float("1.2498") + mean = float("-0.000776569") + std = float("0.048516") + data = None + + +class Program_weight_tensor_parameter_526: + name = "parameter_526" + shape = [96] + dtype = "float32" + min_val = float("-1.43264") + max_val = float("0.226847") + mean = float("-0.221555") + std = float("0.287846") + data = None + + +class Program_weight_tensor_parameter_527: + name = "parameter_527" + shape = [96] + dtype = "float32" + min_val = float("0.0612513") + max_val = float("1.91808") + mean = float("0.457443") + std = float("0.384398") + data = None + + +class Program_weight_tensor_parameter_528: + name = "parameter_528" + shape = [96] + dtype = "float32" + min_val = float("0.0120131") + max_val = float("0.942893") + mean = float("0.0788421") + std = float("0.114894") + data = None + + +class Program_weight_tensor_parameter_529: + name = "parameter_529" + shape = [96] + dtype = "float32" + min_val = float("-0.1799") + max_val = float("0.223621") + mean = float("0.0181186") + std = float("0.0785986") + data = None + + +class Program_weight_tensor_parameter_530: + name = "parameter_530" + shape = [96, 96, 1, 1] + dtype = "float32" + min_val = float("-0.56327") + max_val = float("0.472933") + mean = float("-0.0038017") + std = float("0.0551371") + data = None + + +class Program_weight_tensor_parameter_531: + name = "parameter_531" + shape = [96] + dtype = "float32" + min_val = float("-1.43264") + max_val = float("0.226847") + mean = float("-0.221555") + std = float("0.287846") + data = None + + +class Program_weight_tensor_parameter_532: + name = "parameter_532" + shape = [96] + dtype = "float32" + min_val = float("0.345133") + max_val = float("2.38181") + mean = float("0.861025") + std = float("0.450705") + data = None + + +class Program_weight_tensor_parameter_533: + name = "parameter_533" + shape = [96] + dtype = "float32" + min_val = float("0.143109") + max_val = float("3.90845") + mean = float("0.392039") + std = float("0.474193") + data = None + + +class Program_weight_tensor_parameter_534: + name = "parameter_534" + shape = [96] + dtype = "float32" + min_val = float("-0.465589") + max_val = float("0.529685") + mean = float("0.0311307") + std = float("0.205792") + data = None + + +class Program_weight_tensor_parameter_535: + name = "parameter_535" + shape = [96, 96, 3, 3] + dtype = "float32" + min_val = float("-0.44419") + max_val = float("0.623048") + mean = float("-0.00131212") + std = float("0.0417043") + data = None + + +class Program_weight_tensor_parameter_536: + name = "parameter_536" + shape = [96] + dtype = "float32" + min_val = float("-3.33855") + max_val = float("0.285591") + mean = float("-1.07727") + std = float("0.560807") + data = None + + +class Program_weight_tensor_parameter_537: + name = "parameter_537" + shape = [96] + dtype = "float32" + min_val = float("0.53089") + max_val = float("2.00605") + mean = float("1.12546") + std = float("0.241448") + data = None + + +class Program_weight_tensor_parameter_538: + name = "parameter_538" + shape = [96] + dtype = "float32" + min_val = float("2.23028") + max_val = float("22.8735") + mean = float("4.20662") + std = float("2.92029") + data = None + + +class Program_weight_tensor_parameter_539: + name = "parameter_539" + shape = [96] + dtype = "float32" + min_val = float("-6.68954") + max_val = float("4.20746") + mean = float("-0.707249") + std = float("1.79248") + data = None + + +class Program_weight_tensor_parameter_540: + name = "parameter_540" + shape = [96, 96, 3, 3] + dtype = "float32" + min_val = float("-0.895325") + max_val = float("1.03946") + mean = float("-0.00149083") + std = float("0.0488456") + data = None + + +class Program_weight_tensor_parameter_541: + name = "parameter_541" + shape = [96] + dtype = "float32" + min_val = float("-1.27494") + max_val = float("0.306387") + mean = float("-0.179164") + std = float("0.226933") + data = None + + +class Program_weight_tensor_parameter_542: + name = "parameter_542" + shape = [96] + dtype = "float32" + min_val = float("0.0455835") + max_val = float("1.3424") + mean = float("0.332632") + std = float("0.21051") + data = None + + +class Program_weight_tensor_parameter_543: + name = "parameter_543" + shape = [96] + dtype = "float32" + min_val = float("0.0122159") + max_val = float("0.757072") + mean = float("0.0681865") + std = float("0.0851182") + data = None + + +class Program_weight_tensor_parameter_544: + name = "parameter_544" + shape = [96] + dtype = "float32" + min_val = float("-0.184219") + max_val = float("0.219062") + mean = float("0.0290813") + std = float("0.0753948") + data = None + + +class Program_weight_tensor_parameter_545: + name = "parameter_545" + shape = [96, 96, 1, 1] + dtype = "float32" + min_val = float("-0.712045") + max_val = float("0.716301") + mean = float("-0.00410221") + std = float("0.0541295") + data = None + + +class Program_weight_tensor_parameter_546: + name = "parameter_546" + shape = [96] + dtype = "float32" + min_val = float("-1.27494") + max_val = float("0.306387") + mean = float("-0.179164") + std = float("0.226933") + data = None + + +class Program_weight_tensor_parameter_547: + name = "parameter_547" + shape = [96] + dtype = "float32" + min_val = float("0.294566") + max_val = float("1.6937") + mean = float("0.748729") + std = float("0.271039") + data = None + + +class Program_weight_tensor_parameter_548: + name = "parameter_548" + shape = [96] + dtype = "float32" + min_val = float("0.165557") + max_val = float("1.69999") + mean = float("0.381198") + std = float("0.235512") + data = None + + +class Program_weight_tensor_parameter_549: + name = "parameter_549" + shape = [96] + dtype = "float32" + min_val = float("-0.341849") + max_val = float("0.496007") + mean = float("0.0821328") + std = float("0.15405") + data = None + + +class Program_weight_tensor_parameter_550: + name = "parameter_550" + shape = [96, 96, 3, 3] + dtype = "float32" + min_val = float("-0.497612") + max_val = float("0.370914") + mean = float("-0.0017355") + std = float("0.0430277") + data = None + + +class Program_weight_tensor_parameter_551: + name = "parameter_551" + shape = [96] + dtype = "float32" + min_val = float("-3.61693") + max_val = float("0.205492") + mean = float("-1.04185") + std = float("0.565545") + data = None + + +class Program_weight_tensor_parameter_552: + name = "parameter_552" + shape = [96] + dtype = "float32" + min_val = float("0.560756") + max_val = float("2.16211") + mean = float("1.1375") + std = float("0.244029") + data = None + + +class Program_weight_tensor_parameter_553: + name = "parameter_553" + shape = [96] + dtype = "float32" + min_val = float("1.75486") + max_val = float("11.7535") + mean = float("3.10958") + std = float("1.20657") + data = None + + +class Program_weight_tensor_parameter_554: + name = "parameter_554" + shape = [96] + dtype = "float32" + min_val = float("-5.71005") + max_val = float("2.05147") + mean = float("-0.85203") + std = float("1.45283") + data = None + + +class Program_weight_tensor_parameter_555: + name = "parameter_555" + shape = [96, 96, 3, 3] + dtype = "float32" + min_val = float("-0.800917") + max_val = float("0.937442") + mean = float("-0.00209265") + std = float("0.0501332") + data = None + + +class Program_weight_tensor_parameter_556: + name = "parameter_556" + shape = [96] + dtype = "float32" + min_val = float("-0.883924") + max_val = float("0.231285") + mean = float("-0.207603") + std = float("0.226615") + data = None + + +class Program_weight_tensor_parameter_557: + name = "parameter_557" + shape = [96] + dtype = "float32" + min_val = float("0.0635187") + max_val = float("1.45514") + mean = float("0.359385") + std = float("0.229845") + data = None + + +class Program_weight_tensor_parameter_558: + name = "parameter_558" + shape = [96] + dtype = "float32" + min_val = float("0.00987779") + max_val = float("0.734688") + mean = float("0.0796212") + std = float("0.085873") + data = None + + +class Program_weight_tensor_parameter_559: + name = "parameter_559" + shape = [96] + dtype = "float32" + min_val = float("-0.191291") + max_val = float("0.2258") + mean = float("0.0320165") + std = float("0.0759852") + data = None + + +class Program_weight_tensor_parameter_560: + name = "parameter_560" + shape = [96, 96, 1, 1] + dtype = "float32" + min_val = float("-0.690611") + max_val = float("0.471862") + mean = float("-0.00613876") + std = float("0.0581474") + data = None + + +class Program_weight_tensor_parameter_561: + name = "parameter_561" + shape = [96] + dtype = "float32" + min_val = float("-0.883924") + max_val = float("0.231285") + mean = float("-0.207603") + std = float("0.226615") + data = None + + +class Program_weight_tensor_parameter_562: + name = "parameter_562" + shape = [96] + dtype = "float32" + min_val = float("0.196107") + max_val = float("1.81826") + mean = float("0.747497") + std = float("0.297756") + data = None + + +class Program_weight_tensor_parameter_563: + name = "parameter_563" + shape = [96] + dtype = "float32" + min_val = float("0.124698") + max_val = float("6.99878") + mean = float("0.46637") + std = float("0.7477") + data = None + + +class Program_weight_tensor_parameter_564: + name = "parameter_564" + shape = [96] + dtype = "float32" + min_val = float("-0.536893") + max_val = float("0.616421") + mean = float("0.0956487") + std = float("0.183151") + data = None + + +class Program_weight_tensor_parameter_565: + name = "parameter_565" + shape = [96, 96, 3, 3] + dtype = "float32" + min_val = float("-0.460783") + max_val = float("0.692826") + mean = float("-0.0016117") + std = float("0.0448123") + data = None + + +class Program_weight_tensor_parameter_566: + name = "parameter_566" + shape = [96] + dtype = "float32" + min_val = float("-2.61271") + max_val = float("0.00630139") + mean = float("-0.996315") + std = float("0.466445") + data = None + + +class Program_weight_tensor_parameter_567: + name = "parameter_567" + shape = [96] + dtype = "float32" + min_val = float("0.555948") + max_val = float("1.79166") + mean = float("1.10217") + std = float("0.201266") + data = None + + +class Program_weight_tensor_parameter_568: + name = "parameter_568" + shape = [96] + dtype = "float32" + min_val = float("1.34698") + max_val = float("8.75833") + mean = float("2.63463") + std = float("1.02123") + data = None + + +class Program_weight_tensor_parameter_569: + name = "parameter_569" + shape = [96] + dtype = "float32" + min_val = float("-6.05567") + max_val = float("3.21192") + mean = float("-0.688994") + std = float("1.46249") + data = None + + +class Program_weight_tensor_parameter_570: + name = "parameter_570" + shape = [96, 96, 3, 3] + dtype = "float32" + min_val = float("-0.570501") + max_val = float("0.698391") + mean = float("-0.00235646") + std = float("0.0518138") + data = None + + +class Program_weight_tensor_parameter_571: + name = "parameter_571" + shape = [96] + dtype = "float32" + min_val = float("-1.02922") + max_val = float("0.225558") + mean = float("-0.189243") + std = float("0.220085") + data = None + + +class Program_weight_tensor_parameter_572: + name = "parameter_572" + shape = [96] + dtype = "float32" + min_val = float("0.0878477") + max_val = float("1.33702") + mean = float("0.327596") + std = float("0.19433") + data = None + + +class Program_weight_tensor_parameter_573: + name = "parameter_573" + shape = [96] + dtype = "float32" + min_val = float("0.0219696") + max_val = float("0.757899") + mean = float("0.0849127") + std = float("0.092243") + data = None + + +class Program_weight_tensor_parameter_574: + name = "parameter_574" + shape = [96] + dtype = "float32" + min_val = float("-0.183021") + max_val = float("0.260461") + mean = float("0.0454974") + std = float("0.0777081") + data = None + + +class Program_weight_tensor_parameter_575: + name = "parameter_575" + shape = [96, 96, 1, 1] + dtype = "float32" + min_val = float("-0.42972") + max_val = float("0.953388") + mean = float("-0.00543217") + std = float("0.0637082") + data = None + + +class Program_weight_tensor_parameter_576: + name = "parameter_576" + shape = [96] + dtype = "float32" + min_val = float("-1.02923") + max_val = float("0.225559") + mean = float("-0.189243") + std = float("0.220085") + data = None + + +class Program_weight_tensor_parameter_577: + name = "parameter_577" + shape = [96] + dtype = "float32" + min_val = float("0.324905") + max_val = float("1.70274") + mean = float("0.664026") + std = float("0.227891") + data = None + + +class Program_weight_tensor_parameter_578: + name = "parameter_578" + shape = [96] + dtype = "float32" + min_val = float("0.243143") + max_val = float("3.3049") + mean = float("0.531128") + std = float("0.396586") + data = None + + +class Program_weight_tensor_parameter_579: + name = "parameter_579" + shape = [96] + dtype = "float32" + min_val = float("-0.382214") + max_val = float("0.875943") + mean = float("0.091856") + std = float("0.21977") + data = None + + +class Program_weight_tensor_parameter_580: + name = "parameter_580" + shape = [96, 96, 3, 3] + dtype = "float32" + min_val = float("-0.862711") + max_val = float("0.375887") + mean = float("-0.00229437") + std = float("0.0459623") + data = None + + +class Program_weight_tensor_parameter_581: + name = "parameter_581" + shape = [96] + dtype = "float32" + min_val = float("-3.53042") + max_val = float("0.099046") + mean = float("-0.975022") + std = float("0.531989") + data = None + + +class Program_weight_tensor_parameter_582: + name = "parameter_582" + shape = [96] + dtype = "float32" + min_val = float("0.642452") + max_val = float("2.363") + mean = float("1.16114") + std = float("0.207619") + data = None + + +class Program_weight_tensor_parameter_583: + name = "parameter_583" + shape = [96] + dtype = "float32" + min_val = float("1.07356") + max_val = float("4.52197") + mean = float("2.24717") + std = float("0.789203") + data = None + + +class Program_weight_tensor_parameter_584: + name = "parameter_584" + shape = [96] + dtype = "float32" + min_val = float("-4.34406") + max_val = float("6.3187") + mean = float("-0.778938") + std = float("1.34158") + data = None + + +class Program_weight_tensor_parameter_585: + name = "parameter_585" + shape = [96, 96, 3, 3] + dtype = "float32" + min_val = float("-0.503152") + max_val = float("0.553356") + mean = float("-0.00239045") + std = float("0.0535807") + data = None + + +class Program_weight_tensor_parameter_586: + name = "parameter_586" + shape = [96] + dtype = "float32" + min_val = float("-0.681237") + max_val = float("0.273461") + mean = float("-0.142185") + std = float("0.230306") + data = None + + +class Program_weight_tensor_parameter_587: + name = "parameter_587" + shape = [96] + dtype = "float32" + min_val = float("0.0912332") + max_val = float("0.939362") + mean = float("0.338614") + std = float("0.158234") + data = None + + +class Program_weight_tensor_parameter_588: + name = "parameter_588" + shape = [96] + dtype = "float32" + min_val = float("0.0386177") + max_val = float("0.52359") + mean = float("0.192302") + std = float("0.104673") + data = None + + +class Program_weight_tensor_parameter_589: + name = "parameter_589" + shape = [96] + dtype = "float32" + min_val = float("-0.274091") + max_val = float("0.176536") + mean = float("-0.00710569") + std = float("0.0872727") + data = None + + +class Program_weight_tensor_parameter_590: + name = "parameter_590" + shape = [96, 96, 1, 1] + dtype = "float32" + min_val = float("-0.47366") + max_val = float("0.460749") + mean = float("-0.00636777") + std = float("0.0731692") + data = None + + +class Program_weight_tensor_parameter_591: + name = "parameter_591" + shape = [96] + dtype = "float32" + min_val = float("-0.681237") + max_val = float("0.273461") + mean = float("-0.142185") + std = float("0.230306") + data = None + + +class Program_weight_tensor_parameter_592: + name = "parameter_592" + shape = [96] + dtype = "float32" + min_val = float("0.275505") + max_val = float("1.62479") + mean = float("0.612677") + std = float("0.265647") + data = None + + +class Program_weight_tensor_parameter_593: + name = "parameter_593" + shape = [96] + dtype = "float32" + min_val = float("0.482727") + max_val = float("3.46696") + mean = float("1.14967") + std = float("0.585049") + data = None + + +class Program_weight_tensor_parameter_594: + name = "parameter_594" + shape = [96] + dtype = "float32" + min_val = float("-1.52674") + max_val = float("0.528282") + mean = float("-0.0880626") + std = float("0.271565") + data = None + + +class Program_weight_tensor_parameter_595: + name = "parameter_595" + shape = [96, 96, 3, 3] + dtype = "float32" + min_val = float("-0.63607") + max_val = float("0.483546") + mean = float("-0.00295896") + std = float("0.0464414") + data = None + + +class Program_weight_tensor_parameter_596: + name = "parameter_596" + shape = [96] + dtype = "float32" + min_val = float("-2.46451") + max_val = float("0.641417") + mean = float("-0.817452") + std = float("0.449442") + data = None + + +class Program_weight_tensor_parameter_597: + name = "parameter_597" + shape = [96] + dtype = "float32" + min_val = float("0.859783") + max_val = float("1.94063") + mean = float("1.32407") + std = float("0.184198") + data = None + + +class Program_weight_tensor_parameter_598: + name = "parameter_598" + shape = [96] + dtype = "float32" + min_val = float("0.795885") + max_val = float("5.05218") + mean = float("2.06466") + std = float("0.746691") + data = None + + +class Program_weight_tensor_parameter_599: + name = "parameter_599" + shape = [96] + dtype = "float32" + min_val = float("-5.34734") + max_val = float("3.02376") + mean = float("-0.369897") + std = float("1.18482") + data = None + + +class Program_weight_tensor_parameter_600: + name = "parameter_600" + shape = [96, 96, 3, 3] + dtype = "float32" + min_val = float("-0.637024") + max_val = float("0.694483") + mean = float("-0.00201516") + std = float("0.0565668") + data = None + + +class Program_weight_tensor_parameter_601: + name = "parameter_601" + shape = [96] + dtype = "float32" + min_val = float("-3.39272") + max_val = float("1.70564") + mean = float("0.435602") + std = float("0.796967") + data = None + + +class Program_weight_tensor_parameter_602: + name = "parameter_602" + shape = [96] + dtype = "float32" + min_val = float("0.395125") + max_val = float("2.42058") + mean = float("0.727615") + std = float("0.286742") + data = None + + +class Program_weight_tensor_parameter_603: + name = "parameter_603" + shape = [96] + dtype = "float32" + min_val = float("0.520837") + max_val = float("5.40336") + mean = float("1.71633") + std = float("0.97006") + data = None + + +class Program_weight_tensor_parameter_604: + name = "parameter_604" + shape = [96] + dtype = "float32" + min_val = float("-1.68717") + max_val = float("1.90724") + mean = float("-0.0678927") + std = float("0.642659") + data = None + + +class Program_weight_tensor_parameter_605: + name = "parameter_605" + shape = [96, 192, 1, 1] + dtype = "float32" + min_val = float("-1.22045") + max_val = float("1.2035") + mean = float("-0.00280952") + std = float("0.117665") + data = None + + +class Program_weight_tensor_parameter_606: + name = "parameter_606" + shape = [96] + dtype = "float32" + min_val = float("-5.00988") + max_val = float("1.19828") + mean = float("0.274645") + std = float("1.0304") + data = None + + +class Program_weight_tensor_parameter_607: + name = "parameter_607" + shape = [96] + dtype = "float32" + min_val = float("0.640985") + max_val = float("6.28885") + mean = float("1.69438") + std = float("1.10998") + data = None + + +class Program_weight_tensor_parameter_608: + name = "parameter_608" + shape = [96] + dtype = "float32" + min_val = float("0.650263") + max_val = float("4.97387") + mean = float("1.60583") + std = float("0.739515") + data = None + + +class Program_weight_tensor_parameter_609: + name = "parameter_609" + shape = [96] + dtype = "float32" + min_val = float("-1.2689") + max_val = float("2.29969") + mean = float("0.395696") + std = float("0.709586") + data = None + + +class Program_weight_tensor_parameter_610: + name = "parameter_610" + shape = [96, 192, 1, 1] + dtype = "float32" + min_val = float("-1.09266") + max_val = float("1.09475") + mean = float("0.00157442") + std = float("0.131054") + data = None + + +class Program_weight_tensor_parameter_611: + name = "parameter_611" + shape = [192] + dtype = "float32" + min_val = float("-2.25095") + max_val = float("1.6108") + mean = float("-0.13608") + std = float("0.692312") + data = None + + +class Program_weight_tensor_parameter_612: + name = "parameter_612" + shape = [192] + dtype = "float32" + min_val = float("0.639639") + max_val = float("2.51496") + mean = float("1.18642") + std = float("0.284883") + data = None + + +class Program_weight_tensor_parameter_613: + name = "parameter_613" + shape = [192] + dtype = "float32" + min_val = float("0.882003") + max_val = float("14.5083") + mean = float("3.00899") + std = float("1.98185") + data = None + + +class Program_weight_tensor_parameter_614: + name = "parameter_614" + shape = [192] + dtype = "float32" + min_val = float("-2.85994") + max_val = float("2.23918") + mean = float("-0.246529") + std = float("0.817683") + data = None + + +class Program_weight_tensor_parameter_615: + name = "parameter_615" + shape = [192, 128, 3, 3] + dtype = "float32" + min_val = float("-0.839395") + max_val = float("0.911987") + mean = float("-0.00089984") + std = float("0.0587142") + data = None + + +class Program_weight_tensor_parameter_616: + name = "parameter_616" + shape = [128] + dtype = "float32" + min_val = float("-2.74747") + max_val = float("1.7416") + mean = float("-0.614142") + std = float("0.609429") + data = None + + +class Program_weight_tensor_parameter_617: + name = "parameter_617" + shape = [128] + dtype = "float32" + min_val = float("0.352065") + max_val = float("2.40159") + mean = float("1.17724") + std = float("0.266749") + data = None + + +class Program_weight_tensor_parameter_618: + name = "parameter_618" + shape = [128] + dtype = "float32" + min_val = float("0.407101") + max_val = float("3.78448") + mean = float("1.38393") + std = float("0.581977") + data = None + + +class Program_weight_tensor_parameter_619: + name = "parameter_619" + shape = [128] + dtype = "float32" + min_val = float("-1.02626") + max_val = float("2.58908") + mean = float("0.639998") + std = float("0.714048") + data = None + + +class Program_weight_tensor_parameter_620: + name = "parameter_620" + shape = [128, 96, 1, 1] + dtype = "float32" + min_val = float("-1.79941") + max_val = float("1.63418") + mean = float("0.00848209") + std = float("0.176504") + data = None + + +class Program_weight_tensor_parameter_621: + name = "parameter_621" + shape = [96] + dtype = "float32" + min_val = float("-0.721414") + max_val = float("1.3229") + mean = float("0.312009") + std = float("0.42377") + data = None + + +class Program_weight_tensor_parameter_622: + name = "parameter_622" + shape = [96, 96, 1, 1] + dtype = "float32" + min_val = float("-2.39477") + max_val = float("2.15486") + mean = float("-0.000934765") + std = float("0.13967") + data = None + + +class Program_weight_tensor_parameter_623: + name = "parameter_623" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_624: + name = "parameter_624" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_625: + name = "parameter_625" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_626: + name = "parameter_626" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_627: + name = "parameter_627" + shape = [48, 48, 1, 1] + dtype = "float32" + min_val = float("-0.500373") + max_val = float("0.408237") + mean = float("-0.0106499") + std = float("0.0914108") + data = None + + +class Program_weight_tensor_parameter_628: + name = "parameter_628" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_629: + name = "parameter_629" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_630: + name = "parameter_630" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_631: + name = "parameter_631" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_632: + name = "parameter_632" + shape = [48, 48, 3, 3] + dtype = "float32" + min_val = float("-0.391989") + max_val = float("0.443921") + mean = float("-0.00464531") + std = float("0.0692893") + data = None + + +class Program_weight_tensor_parameter_633: + name = "parameter_633" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_634: + name = "parameter_634" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_635: + name = "parameter_635" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_636: + name = "parameter_636" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_637: + name = "parameter_637" + shape = [48, 48, 3, 3] + dtype = "float32" + min_val = float("-0.563675") + max_val = float("0.61353") + mean = float("-0.00213426") + std = float("0.0809505") + data = None + + +class Program_weight_tensor_parameter_638: + name = "parameter_638" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_639: + name = "parameter_639" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_640: + name = "parameter_640" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_641: + name = "parameter_641" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_642: + name = "parameter_642" + shape = [48, 48, 1, 1] + dtype = "float32" + min_val = float("-0.528835") + max_val = float("0.42184") + mean = float("-0.0046131") + std = float("0.0956008") + data = None + + +class Program_weight_tensor_parameter_643: + name = "parameter_643" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_644: + name = "parameter_644" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_645: + name = "parameter_645" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_646: + name = "parameter_646" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_647: + name = "parameter_647" + shape = [48, 48, 3, 3] + dtype = "float32" + min_val = float("-0.399429") + max_val = float("0.459418") + mean = float("-0.00603599") + std = float("0.0738089") + data = None + + +class Program_weight_tensor_parameter_648: + name = "parameter_648" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_649: + name = "parameter_649" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_650: + name = "parameter_650" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_651: + name = "parameter_651" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_652: + name = "parameter_652" + shape = [48, 48, 3, 3] + dtype = "float32" + min_val = float("-0.555547") + max_val = float("0.659281") + mean = float("-0.0032142") + std = float("0.0872176") + data = None + + +class Program_weight_tensor_parameter_653: + name = "parameter_653" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_654: + name = "parameter_654" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_655: + name = "parameter_655" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_656: + name = "parameter_656" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_657: + name = "parameter_657" + shape = [48, 48, 1, 1] + dtype = "float32" + min_val = float("-0.851714") + max_val = float("0.589482") + mean = float("-0.0177896") + std = float("0.131722") + data = None + + +class Program_weight_tensor_parameter_658: + name = "parameter_658" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_659: + name = "parameter_659" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_660: + name = "parameter_660" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_661: + name = "parameter_661" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_662: + name = "parameter_662" + shape = [48, 48, 3, 3] + dtype = "float32" + min_val = float("-0.609572") + max_val = float("0.517247") + mean = float("-0.00250047") + std = float("0.0848317") + data = None + + +class Program_weight_tensor_parameter_663: + name = "parameter_663" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_664: + name = "parameter_664" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_665: + name = "parameter_665" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_666: + name = "parameter_666" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_667: + name = "parameter_667" + shape = [48, 48, 3, 3] + dtype = "float32" + min_val = float("-0.796944") + max_val = float("0.841953") + mean = float("-0.00225679") + std = float("0.0959956") + data = None + + +class Program_weight_tensor_parameter_668: + name = "parameter_668" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_669: + name = "parameter_669" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_670: + name = "parameter_670" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_671: + name = "parameter_671" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_672: + name = "parameter_672" + shape = [48, 96, 1, 1] + dtype = "float32" + min_val = float("-1.66802") + max_val = float("1.07059") + mean = float("-0.0107785") + std = float("0.179795") + data = None + + +class Program_weight_tensor_parameter_673: + name = "parameter_673" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_674: + name = "parameter_674" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_675: + name = "parameter_675" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_676: + name = "parameter_676" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_677: + name = "parameter_677" + shape = [48, 96, 1, 1] + dtype = "float32" + min_val = float("-1.06553") + max_val = float("1.03272") + mean = float("0.00299396") + std = float("0.184458") + data = None + + +class Program_weight_tensor_parameter_678: + name = "parameter_678" + shape = [96] + dtype = "float32" + min_val = float("-3.6652") + max_val = float("3.22366") + mean = float("0.206414") + std = float("1.15229") + data = None + + +class Program_weight_tensor_parameter_679: + name = "parameter_679" + shape = [96] + dtype = "float32" + min_val = float("0.972278") + max_val = float("4.7607") + mean = float("1.98846") + std = float("0.7408") + data = None + + +class Program_weight_tensor_parameter_680: + name = "parameter_680" + shape = [96] + dtype = "float32" + min_val = float("24.1873") + max_val = float("292.403") + mean = float("93.9316") + std = float("48.7402") + data = None + + +class Program_weight_tensor_parameter_681: + name = "parameter_681" + shape = [96] + dtype = "float32" + min_val = float("-14.6306") + max_val = float("10.8272") + mean = float("-3.34394") + std = float("4.78479") + data = None + + +class Program_weight_tensor_parameter_682: + name = "parameter_682" + shape = [96, 64, 3, 3] + dtype = "float32" + min_val = float("-1.17881") + max_val = float("0.670355") + mean = float("-0.00405706") + std = float("0.0943891") + data = None + + +class Program_weight_tensor_parameter_683: + name = "parameter_683" + shape = [64] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_684: + name = "parameter_684" + shape = [64] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_685: + name = "parameter_685" + shape = [64] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_686: + name = "parameter_686" + shape = [64] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_687: + name = "parameter_687" + shape = [64, 32, 3, 3] + dtype = "float32" + min_val = float("-1.49789") + max_val = float("1.24286") + mean = float("-0.00829203") + std = float("0.139659") + data = None + + +class Program_weight_tensor_parameter_688: + name = "parameter_688" + shape = [32] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_689: + name = "parameter_689" + shape = [32] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_690: + name = "parameter_690" + shape = [32] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_691: + name = "parameter_691" + shape = [32] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_692: + name = "parameter_692" + shape = [32, 32, 3, 3] + dtype = "float32" + min_val = float("-2.24893") + max_val = float("1.82363") + mean = float("-0.00825131") + std = float("0.187155") + data = None + + +class Program_weight_tensor_parameter_693: + name = "parameter_693" + shape = [32] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_694: + name = "parameter_694" + shape = [32] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_695: + name = "parameter_695" + shape = [32] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_696: + name = "parameter_696" + shape = [32] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_697: + name = "parameter_697" + shape = [32, 3, 3, 3] + dtype = "float32" + min_val = float("-2.75119") + max_val = float("2.47949") + mean = float("-0.00716291") + std = float("0.569193") + data = None diff --git a/paddle_samples/PaddleX/PP-YOLOE-R-L/subgraph_4/graph_hash.txt b/paddle_samples/PaddleX/PP-YOLOE-R-L/subgraph_4/graph_hash.txt new file mode 100644 index 000000000..78ddeb23d --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE-R-L/subgraph_4/graph_hash.txt @@ -0,0 +1 @@ +2fc04306fb10cd9cb65a1a6c768f4e1be6ed27375ecebadec001a8bd38306733 \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE-R-L/subgraph_4/graph_net.json b/paddle_samples/PaddleX/PP-YOLOE-R-L/subgraph_4/graph_net.json new file mode 100644 index 000000000..93527f12f --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE-R-L/subgraph_4/graph_net.json @@ -0,0 +1,6 @@ +{ + "framework": "paddle", + "model_name": "PP-YOLOE-R-L", + "num_devices_required": 1, + "num_nodes_required": 1 +} \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE-R-L/subgraph_4/input_meta.py b/paddle_samples/PaddleX/PP-YOLOE-R-L/subgraph_4/input_meta.py new file mode 100644 index 000000000..972f8be27 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE-R-L/subgraph_4/input_meta.py @@ -0,0 +1,19 @@ +class Program_weight_tensor_data_0: + name = "data_0" + shape = [] + dtype = "float32" + data = [0.0] + + +class Program_weight_tensor_data_1: + name = "data_1" + shape = [1] + dtype = "float32" + data = [0.0] + + +class Program_weight_tensor_data_2: + name = "data_2" + shape = [] + dtype = "float32" + data = [0.00731888] diff --git a/paddle_samples/PaddleX/PP-YOLOE-R-L/subgraph_4/model.py b/paddle_samples/PaddleX/PP-YOLOE-R-L/subgraph_4/model.py new file mode 100644 index 000000000..cb4aa2d35 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE-R-L/subgraph_4/model.py @@ -0,0 +1,43 @@ +import paddle + + +class GraphModule(paddle.nn.Layer): + def __init__(self): + super().__init__() + + def forward(self, data_0, data_1, data_2): + # pd_op.full: (1xf32) <- () + full_0 = paddle._C_ops.full( + [1], float("1"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.scale: (xf32) <- (xf32, 1xf32) + scale_0 = paddle._C_ops.scale(data_2, full_0, float("0"), True) + del data_2 + + # pd_op.full: (1xf32) <- () + full_1 = paddle._C_ops.full( + [1], float("2.5"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.scale: (xf32) <- (xf32, 1xf32) + scale_1 = paddle._C_ops.scale(data_0, full_1, float("0"), True) + del data_0 + + # pd_op.add: (xf32) <- (xf32, xf32) + add_1 = paddle._C_ops.add(scale_0, scale_1) + + # pd_op.full: (1xf32) <- () + full_2 = paddle._C_ops.full( + [1], float("0.05"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.scale: (1xf32) <- (1xf32, 1xf32) + scale_2 = paddle._C_ops.scale(data_1, full_2, float("0"), True) + del data_1, full_2 + + # pd_op.add: (1xf32) <- (xf32, 1xf32) + add_0 = paddle._C_ops.add(add_1, scale_2) + del add_1, full_0, full_1, scale_0, scale_1, scale_2 + + return add_0 diff --git a/paddle_samples/PaddleX/PP-YOLOE-R-L/subgraph_4/weight_meta.py b/paddle_samples/PaddleX/PP-YOLOE-R-L/subgraph_4/weight_meta.py new file mode 100644 index 000000000..8b1378917 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE-R-L/subgraph_4/weight_meta.py @@ -0,0 +1 @@ + diff --git a/paddle_samples/PaddleX/PP-YOLOE-R-L/subgraph_5/graph_hash.txt b/paddle_samples/PaddleX/PP-YOLOE-R-L/subgraph_5/graph_hash.txt new file mode 100644 index 000000000..f2b2e24e8 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE-R-L/subgraph_5/graph_hash.txt @@ -0,0 +1 @@ +f7702ce22ebd08a9d0227ec828a8615448e94e4ee70413df0be086ee761d1ad7 \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE-R-L/subgraph_5/graph_net.json b/paddle_samples/PaddleX/PP-YOLOE-R-L/subgraph_5/graph_net.json new file mode 100644 index 000000000..93527f12f --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE-R-L/subgraph_5/graph_net.json @@ -0,0 +1,6 @@ +{ + "framework": "paddle", + "model_name": "PP-YOLOE-R-L", + "num_devices_required": 1, + "num_nodes_required": 1 +} \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE-R-L/subgraph_5/input_meta.py b/paddle_samples/PaddleX/PP-YOLOE-R-L/subgraph_5/input_meta.py new file mode 100644 index 000000000..a9aac913b --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE-R-L/subgraph_5/input_meta.py @@ -0,0 +1,81 @@ +class Program_weight_tensor_data_0: + name = "data_0" + shape = [1, 25, 21504] + dtype = "float32" + max_val = float("0.450419") + mean = float("6.86159e-05") + std = float("0.00270931") + data = None + + +class Program_weight_tensor_data_1: + name = "data_1" + shape = [1, 21504, 15] + dtype = "float32" + min_val = float("4.06804e-06") + max_val = float("0.992795") + mean = float("0.0118279") + std = float("0.0190866") + data = None + + +class Program_weight_tensor_data_2: + name = "data_2" + shape = [1, 25, 1] + dtype = "int32" + data = [4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 5, 7, 7, 7, 7, 7, 7, 4, 4, 4, 4, 4, 4, 4, 4] + + +class Program_weight_tensor_data_3: + name = "data_3" + shape = [1, 21504, 2] + dtype = "float32" + min_val = float("4.0") + max_val = float("1020.0") + mean = float("512.0") + std = float("295.583") + data = None + + +class Program_weight_tensor_data_4: + name = "data_4" + shape = [1, 25, 5] + dtype = "float32" + min_val = float("0.666691") + max_val = float("891.995") + mean = float("262.9") + std = float("311.619") + data = None + + +class Program_weight_tensor_data_5: + name = "data_5" + shape = [1, 25, 1] + dtype = "float32" + data = [ + 1.0, + 1.0, + 1.0, + 1.0, + 1.0, + 1.0, + 1.0, + 1.0, + 1.0, + 1.0, + 1.0, + 1.0, + 1.0, + 1.0, + 1.0, + 1.0, + 1.0, + 1.0, + 1.0, + 1.0, + 1.0, + 1.0, + 1.0, + 1.0, + 1.0, + ] diff --git a/paddle_samples/PaddleX/PP-YOLOE-R-L/subgraph_5/model.py b/paddle_samples/PaddleX/PP-YOLOE-R-L/subgraph_5/model.py new file mode 100644 index 000000000..2e761adab --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE-R-L/subgraph_5/model.py @@ -0,0 +1,465 @@ +import paddle + + +class GraphModule(paddle.nn.Layer): + def __init__(self): + super().__init__() + + def forward(self, data_0, data_1, data_2, data_3, data_4, data_5): + # pd_op.full: (xf32) <- () + full_0 = paddle._C_ops.full( + [], float("1"), paddle.float32, paddle.framework._current_expected_place() + ) + + # pd_op.greater_than: (1x25x21504xb) <- (1x25x21504xf32, xf32) + greater_than_1 = paddle._C_ops.greater_than(data_0, full_0) + del full_0 + + # pd_op.full: (1xf32) <- () + full_1 = paddle._C_ops.full( + [1], float("0"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.full_like: (1x25x21504xf32) <- (1x25x21504xf32, 1xf32) + full_like_0 = paddle._C_ops.full_like( + data_0, full_1, paddle.float32, paddle.framework._current_expected_place() + ) + del full_1 + + # pd_op.where: (1x25x21504xf32) <- (1x25x21504xb, 1x25x21504xf32, 1x25x21504xf32) + where_0 = paddle._C_ops.where(greater_than_1, full_like_0, data_0) + del data_0, full_like_0, greater_than_1 + + # pd_op.transpose: (1x15x21504xf32) <- (1x21504x15xf32) + transpose_0 = paddle._C_ops.transpose(data_1, [0, 2, 1]) + del data_1 + + # pd_op.full: (1xf64) <- () + full_2 = paddle._C_ops.full( + [1], float("0"), paddle.float64, paddle.core.CPUPlace() + ) + + # pd_op.full: (1xf64) <- () + full_3 = paddle._C_ops.full( + [1], float("1"), paddle.float64, paddle.core.CPUPlace() + ) + + # pd_op.arange: (1xi32) <- (1xf64, 1xf64, 1xf64) + arange_0 = paddle.arange(full_2, full_3, full_3, dtype="int32") + del full_2, full_3 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_0 = [-1] + + # pd_op.unsqueeze: (1x1xi32) <- (1xi32, 1xi64) + unsqueeze_0 = paddle._C_ops.unsqueeze(arange_0, full_int_array_0) + del arange_0 + + # pd_op.full_int_array: (2xi64) <- () + full_int_array_1 = [1, 25] + + # pd_op.tile: (1x25xi32) <- (1x1xi32, 2xi64) + tile_0 = paddle._C_ops.tile(unsqueeze_0, full_int_array_1) + del full_int_array_1 + + # pd_op.squeeze: (1x25xi32) <- (1x25x1xi32, 1xi64) + squeeze_0 = paddle._C_ops.squeeze(data_2, full_int_array_0) + del data_2 + + # builtin.combine: ([1x25xi32, 1x25xi32]) <- (1x25xi32, 1x25xi32) + combine_0 = [tile_0, squeeze_0] + del squeeze_0, tile_0 + + # pd_op.stack: (1x25x2xi32) <- ([1x25xi32, 1x25xi32]) + stack_0 = paddle._C_ops.stack(combine_0, -1) + del combine_0 + + # pd_op.gather_nd: (1x25x21504xf32) <- (1x15x21504xf32, 1x25x2xi32) + gather_nd_0 = paddle._C_ops.gather_nd(transpose_0, stack_0) + del stack_0, transpose_0 + + # pd_op.pow: (1x25x21504xf32) <- (1x25x21504xf32) + pow_0 = paddle._C_ops.pow(gather_nd_0, float("1")) + del gather_nd_0 + + # pd_op.pow: (1x25x21504xf32) <- (1x25x21504xf32) + pow_1 = paddle._C_ops.pow(where_0, float("6")) + + # pd_op.multiply: (1x25x21504xf32) <- (1x25x21504xf32, 1x25x21504xf32) + multiply_0 = paddle._C_ops.multiply(pow_0, pow_1) + del pow_0, pow_1 + + # pd_op.full: (1xi32) <- () + full_4 = paddle._C_ops.full( + [1], float("2"), paddle.int32, paddle.core.CPUPlace() + ) + + # pd_op.split_with_num: ([1x25x1xf32, 1x25x1xf32, 1x25x1xf32, 1x25x1xf32, 1x25x1xf32]) <- (1x25x5xf32, 1xi32) + split_with_num_0 = paddle._C_ops.split_with_num(data_4, 5, full_4) + del data_4 + + # builtin.split: (1x25x1xf32, 1x25x1xf32, 1x25x1xf32, 1x25x1xf32, 1x25x1xf32) <- ([1x25x1xf32, 1x25x1xf32, 1x25x1xf32, 1x25x1xf32, 1x25x1xf32]) + ( + split_0, + split_1, + split_2, + split_3, + split_4, + ) = split_with_num_0 + del split_with_num_0 + + # pd_op.full: (4xf32) <- () + full_5 = paddle._C_ops.full( + [4], float("0"), paddle.float32, paddle.framework._current_expected_place() + ) + + # pd_op.assign_value_: (4xf32) <- (4xf32) + assign_value__0 = paddle._C_ops.assign_value_( + full_5, + [4], + paddle.float32, + [float("0.5"), float("0.5"), float("-0.5"), float("-0.5")], + paddle.framework._current_expected_place(), + ) + del full_5 + + # pd_op.full_int_array: (3xi64) <- () + full_int_array_2 = [1, 1, 4] + + # pd_op.reshape: (1x1x4xf32) <- (4xf32, 3xi64) + reshape_0 = paddle._C_ops.reshape(assign_value__0, full_int_array_2) + del assign_value__0 + + # pd_op.multiply: (1x25x4xf32) <- (1x1x4xf32, 1x25x1xf32) + multiply_1 = paddle._C_ops.multiply(reshape_0, split_2) + del reshape_0, split_2 + + # pd_op.full: (4xf32) <- () + full_6 = paddle._C_ops.full( + [4], float("0"), paddle.float32, paddle.framework._current_expected_place() + ) + + # pd_op.assign_value_: (4xf32) <- (4xf32) + assign_value__1 = paddle._C_ops.assign_value_( + full_6, + [4], + paddle.float32, + [float("-0.5"), float("0.5"), float("0.5"), float("-0.5")], + paddle.framework._current_expected_place(), + ) + del full_6 + + # pd_op.reshape: (1x1x4xf32) <- (4xf32, 3xi64) + reshape_1 = paddle._C_ops.reshape(assign_value__1, full_int_array_2) + del assign_value__1, full_int_array_2 + + # pd_op.multiply: (1x25x4xf32) <- (1x1x4xf32, 1x25x1xf32) + multiply_2 = paddle._C_ops.multiply(reshape_1, split_3) + del reshape_1, split_3 + + # builtin.combine: ([1x25x4xf32, 1x25x4xf32]) <- (1x25x4xf32, 1x25x4xf32) + combine_1 = [multiply_1, multiply_2] + del multiply_1, multiply_2 + + # pd_op.stack: (1x25x4x2xf32) <- ([1x25x4xf32, 1x25x4xf32]) + stack_1 = paddle._C_ops.stack(combine_1, -1) + del combine_1 + + # pd_op.sin: (1x25x1xf32) <- (1x25x1xf32) + sin_0 = paddle._C_ops.sin(split_4) + + # pd_op.cos: (1x25x1xf32) <- (1x25x1xf32) + cos_0 = paddle._C_ops.cos(split_4) + del split_4 + + # pd_op.full: (1xi32) <- () + full_7 = paddle._C_ops.full( + [1], float("-1"), paddle.int32, paddle.core.CPUPlace() + ) + + # builtin.combine: ([1x25x1xf32, 1x25x1xf32]) <- (1x25x1xf32, 1x25x1xf32) + combine_2 = [cos_0, sin_0] + + # pd_op.concat: (1x25x2xf32) <- ([1x25x1xf32, 1x25x1xf32], 1xi32) + concat_0 = paddle._C_ops.concat(combine_2, full_7) + del combine_2 + + # pd_op.full: (1xf32) <- () + full_8 = paddle._C_ops.full( + [1], float("-1"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.scale: (1x25x1xf32) <- (1x25x1xf32, 1xf32) + scale_0 = paddle._C_ops.scale(sin_0, full_8, float("0"), True) + del full_8, sin_0 + + # builtin.combine: ([1x25x1xf32, 1x25x1xf32]) <- (1x25x1xf32, 1x25x1xf32) + combine_3 = [scale_0, cos_0] + del cos_0, scale_0 + + # pd_op.concat: (1x25x2xf32) <- ([1x25x1xf32, 1x25x1xf32], 1xi32) + concat_1 = paddle._C_ops.concat(combine_3, full_7) + del combine_3, full_7 + + # builtin.combine: ([1x25x2xf32, 1x25x2xf32]) <- (1x25x2xf32, 1x25x2xf32) + combine_4 = [concat_0, concat_1] + del concat_0, concat_1 + + # pd_op.stack: (1x25x2x2xf32) <- ([1x25x2xf32, 1x25x2xf32]) + stack_2 = paddle._C_ops.stack(combine_4, -2) + del combine_4 + + # pd_op.full_int_array: (3xi64) <- () + full_int_array_3 = [-1, 4, 2] + + # pd_op.reshape: (25x4x2xf32) <- (1x25x4x2xf32, 3xi64) + reshape_2 = paddle._C_ops.reshape(stack_1, full_int_array_3) + del full_int_array_3, stack_1 + + # pd_op.full_int_array: (3xi64) <- () + full_int_array_4 = [-1, 2, 2] + + # pd_op.reshape: (25x2x2xf32) <- (1x25x2x2xf32, 3xi64) + reshape_3 = paddle._C_ops.reshape(stack_2, full_int_array_4) + del full_int_array_4, stack_2 + + # pd_op.bmm: (25x4x2xf32) <- (25x4x2xf32, 25x2x2xf32) + bmm_0 = paddle._C_ops.bmm(reshape_2, reshape_3) + del reshape_2, reshape_3 + + # pd_op.full_int_array: (4xi64) <- () + full_int_array_5 = [1, -1, 4, 2] + + # pd_op.reshape: (1x25x4x2xf32) <- (25x4x2xf32, 4xi64) + reshape_4 = paddle._C_ops.reshape(bmm_0, full_int_array_5) + del bmm_0, full_int_array_5 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_6 = [0] + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_7 = [1] + + # pd_op.slice: (1x25x4xf32) <- (1x25x4x2xf32, 1xi64, 1xi64) + slice_0 = paddle._C_ops.slice( + reshape_4, [3], full_int_array_6, full_int_array_7, [1], [3] + ) + + # pd_op.add: (1x25x4xf32) <- (1x25x4xf32, 1x25x1xf32) + add_0 = paddle._C_ops.add(slice_0, split_0) + del slice_0, split_0 + + # pd_op.set_value_with_tensor_: (1x25x4x2xf32) <- (1x25x4x2xf32, 1x25x4xf32, 1xi64, 1xi64, 1xi64) + set_value_with_tensor__0 = paddle._C_ops.set_value_with_tensor_( + reshape_4, + add_0, + full_int_array_6, + full_int_array_7, + full_int_array_7, + [3], + [3], + [], + ) + del add_0, reshape_4 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_8 = [2] + + # pd_op.slice: (1x25x4xf32) <- (1x25x4x2xf32, 1xi64, 1xi64) + slice_1 = paddle._C_ops.slice( + set_value_with_tensor__0, [3], full_int_array_7, full_int_array_8, [1], [3] + ) + + # pd_op.add: (1x25x4xf32) <- (1x25x4xf32, 1x25x1xf32) + add_1 = paddle._C_ops.add(slice_1, split_1) + del slice_1, split_1 + + # pd_op.set_value_with_tensor_: (1x25x4x2xf32) <- (1x25x4x2xf32, 1x25x4xf32, 1xi64, 1xi64, 1xi64) + set_value_with_tensor__1 = paddle._C_ops.set_value_with_tensor_( + set_value_with_tensor__0, + add_1, + full_int_array_7, + full_int_array_8, + full_int_array_7, + [3], + [3], + [], + ) + del add_1, full_int_array_7, full_int_array_8, set_value_with_tensor__0 + + # pd_op.unsqueeze: (1x1x21504x2xf32) <- (1x21504x2xf32, 1xi64) + unsqueeze_1 = paddle._C_ops.unsqueeze(data_3, full_int_array_6) + del data_3, full_int_array_6 + + # pd_op.split_with_num: ([1x25x1x2xf32, 1x25x1x2xf32, 1x25x1x2xf32, 1x25x1x2xf32]) <- (1x25x4x2xf32, 1xi32) + split_with_num_1 = paddle._C_ops.split_with_num( + set_value_with_tensor__1, 4, full_4 + ) + del full_4 + + # builtin.split: (1x25x1x2xf32, 1x25x1x2xf32, 1x25x1x2xf32, 1x25x1x2xf32) <- ([1x25x1x2xf32, 1x25x1x2xf32, 1x25x1x2xf32, 1x25x1x2xf32]) + ( + split_5, + split_6, + split_7, + split_8, + ) = split_with_num_1 + del split_with_num_1 + + # pd_op.subtract: (1x25x1x2xf32) <- (1x25x1x2xf32, 1x25x1x2xf32) + subtract_0 = paddle._C_ops.subtract(split_6, split_5) + del split_6 + + # pd_op.subtract: (1x25x1x2xf32) <- (1x25x1x2xf32, 1x25x1x2xf32) + subtract_1 = paddle._C_ops.subtract(split_8, split_5) + del split_8 + + # pd_op.subtract: (1x25x21504x2xf32) <- (1x1x21504x2xf32, 1x25x1x2xf32) + subtract_2 = paddle._C_ops.subtract(unsqueeze_1, split_5) + del split_5, unsqueeze_1 + + # pd_op.multiply: (1x25x1x2xf32) <- (1x25x1x2xf32, 1x25x1x2xf32) + multiply_3 = paddle._C_ops.multiply(subtract_0, subtract_0) + + # pd_op.sum: (1x25x1xf32) <- (1x25x1x2xf32, 1xi64) + sum_0 = paddle._C_ops.sum(multiply_3, full_int_array_0, None, False) + del multiply_3 + + # pd_op.multiply: (1x25x1x2xf32) <- (1x25x1x2xf32, 1x25x1x2xf32) + multiply_4 = paddle._C_ops.multiply(subtract_1, subtract_1) + + # pd_op.sum: (1x25x1xf32) <- (1x25x1x2xf32, 1xi64) + sum_1 = paddle._C_ops.sum(multiply_4, full_int_array_0, None, False) + del multiply_4 + + # pd_op.multiply: (1x25x21504x2xf32) <- (1x25x21504x2xf32, 1x25x1x2xf32) + multiply_5 = paddle._C_ops.multiply(subtract_2, subtract_0) + del subtract_0 + + # pd_op.sum: (1x25x21504xf32) <- (1x25x21504x2xf32, 1xi64) + sum_2 = paddle._C_ops.sum(multiply_5, full_int_array_0, None, False) + del multiply_5 + + # pd_op.multiply: (1x25x21504x2xf32) <- (1x25x21504x2xf32, 1x25x1x2xf32) + multiply_6 = paddle._C_ops.multiply(subtract_2, subtract_1) + del subtract_1, subtract_2 + + # pd_op.sum: (1x25x21504xf32) <- (1x25x21504x2xf32, 1xi64) + sum_3 = paddle._C_ops.sum(multiply_6, full_int_array_0, None, False) + del full_int_array_0, multiply_6 + + # pd_op.full: (xf32) <- () + full_9 = paddle._C_ops.full( + [], float("0"), paddle.float32, paddle.framework._current_expected_place() + ) + + # pd_op.greater_equal: (1x25x21504xb) <- (1x25x21504xf32, xf32) + greater_equal_0 = paddle._C_ops.greater_equal(sum_2, full_9) + + # pd_op.less_equal: (1x25x21504xb) <- (1x25x21504xf32, 1x25x1xf32) + less_equal_0 = paddle._C_ops.less_equal(sum_2, sum_0) + del sum_0, sum_2 + + # pd_op.bitwise_and: (1x25x21504xb) <- (1x25x21504xb, 1x25x21504xb) + bitwise_and_0 = paddle._C_ops.bitwise_and(greater_equal_0, less_equal_0) + del greater_equal_0, less_equal_0 + + # pd_op.greater_equal: (1x25x21504xb) <- (1x25x21504xf32, xf32) + greater_equal_1 = paddle._C_ops.greater_equal(sum_3, full_9) + del full_9 + + # pd_op.bitwise_and: (1x25x21504xb) <- (1x25x21504xb, 1x25x21504xb) + bitwise_and_1 = paddle._C_ops.bitwise_and(bitwise_and_0, greater_equal_1) + del bitwise_and_0, greater_equal_1 + + # pd_op.less_equal: (1x25x21504xb) <- (1x25x21504xf32, 1x25x1xf32) + less_equal_1 = paddle._C_ops.less_equal(sum_3, sum_1) + del sum_1, sum_3 + + # pd_op.bitwise_and: (1x25x21504xb) <- (1x25x21504xb, 1x25x21504xb) + bitwise_and_2 = paddle._C_ops.bitwise_and(bitwise_and_1, less_equal_1) + del bitwise_and_1, less_equal_1 + + # pd_op.cast: (1x25x21504xf32) <- (1x25x21504xb) + cast_0 = paddle._C_ops.cast(bitwise_and_2, paddle.float32) + + # pd_op.multiply: (1x25x21504xf32) <- (1x25x21504xf32, 1x25x21504xf32) + multiply_7 = paddle._C_ops.multiply(multiply_0, cast_0) + del cast_0 + + # pd_op.full: (1xi32) <- () + full_10 = paddle._C_ops.full( + [1], float("13"), paddle.int32, paddle.core.CPUPlace() + ) + + # pd_op.topk: (1x25x13xf32, 1x25x13xi64) <- (1x25x21504xf32, 1xi32) + topk_0, topk_1 = (lambda x, f: f(x))( + paddle._C_ops.topk(multiply_7, full_10, -1, True, True), + lambda out: out if isinstance(out, (list, tuple)) else (out, None), + ) + del full_10, multiply_7 + + # pd_op.full: (1xi32) <- () + full_11 = paddle._C_ops.full( + [1], float("21504"), paddle.int32, paddle.core.CPUPlace() + ) + + # pd_op.one_hot: (1x25x13x21504xf32) <- (1x25x13xi64, 1xi32) + one_hot_0 = paddle._C_ops.one_hot( + topk_1 % paddle.cast(full_11, topk_1.dtype), full_11 + ) + del full_11, topk_1 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_9 = [-2] + + # pd_op.sum: (1x25x21504xf32) <- (1x25x13x21504xf32, 1xi64) + sum_4 = paddle._C_ops.sum(one_hot_0, full_int_array_9, None, False) + del one_hot_0 + + # pd_op.multiply: (1x25x21504xf32) <- (1x25x21504xf32, 1x25x1xf32) + multiply_8 = paddle._C_ops.multiply(sum_4, data_5) + del sum_4 + + # pd_op.cast: (1x25x21504xf32) <- (1x25x21504xb) + cast_1 = paddle._C_ops.cast(bitwise_and_2, paddle.float32) + del bitwise_and_2 + + # pd_op.multiply: (1x25x21504xf32) <- (1x25x21504xf32, 1x25x21504xf32) + multiply_9 = paddle._C_ops.multiply(multiply_8, cast_1) + del cast_1, multiply_8 + + # pd_op.multiply: (1x25x21504xf32) <- (1x25x21504xf32, 1x25x1xf32) + multiply_10 = paddle._C_ops.multiply(multiply_9, data_5) + del data_5, multiply_9 + + # pd_op.sum: (1x21504xf32) <- (1x25x21504xf32, 1xi64) + sum_5 = paddle._C_ops.sum(multiply_10, full_int_array_9, None, False) + del full_int_array_9 + + # pd_op.full_int_array: (0xi64) <- () + full_int_array_10 = [] + + # pd_op.max: (xf32) <- (1x21504xf32, 0xi64) + max_0 = paddle._C_ops.max(sum_5, full_int_array_10, False) + del full_int_array_10 + + # pd_op.full: (xf32) <- () + full_12 = paddle._C_ops.full( + [], float("1"), paddle.float32, paddle.framework._current_expected_place() + ) + + # pd_op.greater_than: (xb) <- (xf32, xf32) + greater_than_0 = paddle._C_ops.greater_than(max_0, full_12) + del ( + full_12, + max_0, + multiply_0, + multiply_10, + set_value_with_tensor__1, + sum_5, + unsqueeze_0, + where_0, + ) + + return greater_than_0 diff --git a/paddle_samples/PaddleX/PP-YOLOE-R-L/subgraph_5/weight_meta.py b/paddle_samples/PaddleX/PP-YOLOE-R-L/subgraph_5/weight_meta.py new file mode 100644 index 000000000..8b1378917 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE-R-L/subgraph_5/weight_meta.py @@ -0,0 +1 @@ + diff --git a/paddle_samples/PaddleX/PP-YOLOE-R-L/subgraph_6/graph_hash.txt b/paddle_samples/PaddleX/PP-YOLOE-R-L/subgraph_6/graph_hash.txt new file mode 100644 index 000000000..7ef9d4761 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE-R-L/subgraph_6/graph_hash.txt @@ -0,0 +1 @@ +f57fc80a3f5c6d7b77d51fc6aa07f467eafce95a075d35dcbab196e3c37e5f1e \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE-R-L/subgraph_6/graph_net.json b/paddle_samples/PaddleX/PP-YOLOE-R-L/subgraph_6/graph_net.json new file mode 100644 index 000000000..93527f12f --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE-R-L/subgraph_6/graph_net.json @@ -0,0 +1,6 @@ +{ + "framework": "paddle", + "model_name": "PP-YOLOE-R-L", + "num_devices_required": 1, + "num_nodes_required": 1 +} \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE-R-L/subgraph_6/input_meta.py b/paddle_samples/PaddleX/PP-YOLOE-R-L/subgraph_6/input_meta.py new file mode 100644 index 000000000..589f8e2f7 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE-R-L/subgraph_6/input_meta.py @@ -0,0 +1,28 @@ +class Program_weight_tensor_data_0: + name = "data_0" + shape = [1, 21504] + dtype = "int32" + min_val = 4 + max_val = 15 + data = None + + +class Program_weight_tensor_data_1: + name = "data_1" + shape = [1, 21504, 15] + dtype = "float32" + max_val = float("0.236708") + mean = float("9.6547e-06") + std = float("0.00116044") + data = None + + +class Program_weight_tensor_data_2: + name = "data_2" + shape = [1, 21504, 15] + dtype = "float32" + min_val = float("4.06804e-06") + max_val = float("0.992795") + mean = float("0.0118279") + std = float("0.0190866") + data = None diff --git a/paddle_samples/PaddleX/PP-YOLOE-R-L/subgraph_6/model.py b/paddle_samples/PaddleX/PP-YOLOE-R-L/subgraph_6/model.py new file mode 100644 index 000000000..12cbb2e7d --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE-R-L/subgraph_6/model.py @@ -0,0 +1,110 @@ +import paddle + + +class GraphModule(paddle.nn.Layer): + def __init__(self): + super().__init__() + + def forward(self, data_0, data_1, data_2): + # pd_op.full: (1xi32) <- () + full_0 = paddle._C_ops.full( + [1], float("16"), paddle.int32, paddle.core.CPUPlace() + ) + + # pd_op.one_hot: (1x21504x16xf32) <- (1x21504xi32, 1xi32) + one_hot_0 = paddle._C_ops.one_hot( + data_0 % paddle.cast(full_0, data_0.dtype), full_0 + ) + del data_0, full_0 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_0 = [0] + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_1 = [-1] + + # pd_op.slice: (1x21504x15xf32) <- (1x21504x16xf32, 1xi64, 1xi64) + slice_0 = paddle._C_ops.slice( + one_hot_0, [2], full_int_array_0, full_int_array_1, [1], [] + ) + del full_int_array_0, full_int_array_1, one_hot_0 + + # pd_op.pow: (1x21504x15xf32) <- (1x21504x15xf32) + pow_0 = paddle._C_ops.pow(data_2, float("2")) + + # pd_op.full: (1xf32) <- () + full_1 = paddle._C_ops.full( + [1], float("0.75"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.scale: (1x21504x15xf32) <- (1x21504x15xf32, 1xf32) + scale_0 = paddle._C_ops.scale(pow_0, full_1, float("0"), True) + del pow_0 + + # pd_op.full: (1xf32) <- () + full_2 = paddle._C_ops.full( + [1], float("-1"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.scale: (1x21504x15xf32) <- (1x21504x15xf32, 1xf32) + scale_1 = paddle._C_ops.scale(slice_0, full_2, float("1"), True) + del full_2 + + # pd_op.multiply: (1x21504x15xf32) <- (1x21504x15xf32, 1x21504x15xf32) + multiply_0 = paddle._C_ops.multiply(scale_0, scale_1) + + # pd_op.multiply: (1x21504x15xf32) <- (1x21504x15xf32, 1x21504x15xf32) + multiply_1 = paddle._C_ops.multiply(data_1, slice_0) + del slice_0 + + # pd_op.add: (1x21504x15xf32) <- (1x21504x15xf32, 1x21504x15xf32) + add_0 = paddle._C_ops.add(multiply_0, multiply_1) + + # pd_op.bce_loss: (1x21504x15xf32) <- (1x21504x15xf32, 1x21504x15xf32) + bce_loss_0 = paddle._C_ops.bce_loss(data_2, data_1) + del data_2 + + # pd_op.multiply: (1x21504x15xf32) <- (1x21504x15xf32, 1x21504x15xf32) + multiply_2 = paddle._C_ops.multiply(bce_loss_0, add_0) + + # pd_op.full_int_array: (0xi64) <- () + full_int_array_2 = [] + + # pd_op.sum: (xf32) <- (1x21504x15xf32, 0xi64) + sum_0 = paddle._C_ops.sum(multiply_2, full_int_array_2, None, False) + + # pd_op.sum: (xf32) <- (1x21504x15xf32, 0xi64) + sum_1 = paddle._C_ops.sum(data_1, full_int_array_2, None, False) + del data_1 + + # pd_op.full: (1xf32) <- () + full_3 = paddle._C_ops.full( + [1], float("1"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.full: (1xf32) <- () + full_4 = paddle._C_ops.full( + [1], float("3.40282e+38"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.clip: (xf32) <- (xf32, 1xf32, 1xf32) + clip_0 = paddle._C_ops.clip(sum_1, full_3, full_4) + del full_3, full_4, sum_1 + + # pd_op.divide: (xf32) <- (xf32, xf32) + divide_0 = paddle._C_ops.divide(sum_0, clip_0) + del ( + add_0, + bce_loss_0, + clip_0, + full_1, + full_int_array_2, + multiply_0, + multiply_1, + multiply_2, + scale_0, + scale_1, + sum_0, + ) + + return divide_0 diff --git a/paddle_samples/PaddleX/PP-YOLOE-R-L/subgraph_6/weight_meta.py b/paddle_samples/PaddleX/PP-YOLOE-R-L/subgraph_6/weight_meta.py new file mode 100644 index 000000000..8b1378917 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE-R-L/subgraph_6/weight_meta.py @@ -0,0 +1 @@ + diff --git a/paddle_samples/PaddleX/PP-YOLOE-R-L/subgraph_7/graph_hash.txt b/paddle_samples/PaddleX/PP-YOLOE-R-L/subgraph_7/graph_hash.txt new file mode 100644 index 000000000..1a621aabd --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE-R-L/subgraph_7/graph_hash.txt @@ -0,0 +1 @@ +39d4100483aa4e64e442723fc1c71201bbd35f6b269473df8b274bae6e0cde24 \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE-R-L/subgraph_7/graph_net.json b/paddle_samples/PaddleX/PP-YOLOE-R-L/subgraph_7/graph_net.json new file mode 100644 index 000000000..93527f12f --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE-R-L/subgraph_7/graph_net.json @@ -0,0 +1,6 @@ +{ + "framework": "paddle", + "model_name": "PP-YOLOE-R-L", + "num_devices_required": 1, + "num_nodes_required": 1 +} \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE-R-L/subgraph_7/input_meta.py b/paddle_samples/PaddleX/PP-YOLOE-R-L/subgraph_7/input_meta.py new file mode 100644 index 000000000..f37a0b7a4 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE-R-L/subgraph_7/input_meta.py @@ -0,0 +1,65 @@ +class Program_weight_tensor_data_0: + name = "data_0" + shape = [1, 21504] + dtype = "float32" + max_val = float("2.0") + mean = float("0.0815197") + std = float("0.278349") + data = None + + +class Program_weight_tensor_data_1: + name = "data_1" + shape = [1, 155, 21504] + dtype = "float32" + max_val = float("0.637167") + mean = float("0.000108427") + std = float("0.00408889") + data = None + + +class Program_weight_tensor_data_2: + name = "data_2" + shape = [1, 155, 21504] + dtype = "float32" + max_val = float("1.0") + mean = float("0.000525934") + std = float("0.0229272") + data = None + + +class Program_weight_tensor_data_3: + name = "data_3" + shape = [1, 1] + dtype = "int32" + data = [0] + + +class Program_weight_tensor_data_4: + name = "data_4" + shape = [1, 155, 1] + dtype = "int32" + min_val = 6 + max_val = 12 + data = None + + +class Program_weight_tensor_data_5: + name = "data_5" + shape = [1, 155, 5] + dtype = "float32" + min_val = float("0.231091") + max_val = float("1021.82") + mean = float("266.123") + std = float("330.341") + data = None + + +class Program_weight_tensor_data_6: + name = "data_6" + shape = [1, 155, 21504] + dtype = "float32" + max_val = float("0.00115321") + mean = float("2.20005e-09") + std = float("7.35257e-07") + data = None diff --git a/paddle_samples/PaddleX/PP-YOLOE-R-L/subgraph_7/model.py b/paddle_samples/PaddleX/PP-YOLOE-R-L/subgraph_7/model.py new file mode 100644 index 000000000..b64476001 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE-R-L/subgraph_7/model.py @@ -0,0 +1,245 @@ +import paddle + + +class GraphModule(paddle.nn.Layer): + def __init__(self): + super().__init__() + + def forward(self, data_0, data_1, data_2, data_3, data_4, data_5, data_6): + # pd_op.full_int_array: (1xi64) <- () + full_int_array_0 = [1] + + # pd_op.unsqueeze: (1x1x21504xf32) <- (1x21504xf32, 1xi64) + unsqueeze_0 = paddle._C_ops.unsqueeze(data_0, full_int_array_0) + del data_0, full_int_array_0 + + # pd_op.full: (xf32) <- () + full_0 = paddle._C_ops.full( + [], float("1"), paddle.float32, paddle.framework._current_expected_place() + ) + + # pd_op.greater_than: (1x1x21504xb) <- (1x1x21504xf32, xf32) + greater_than_0 = paddle._C_ops.greater_than(unsqueeze_0, full_0) + del full_0, unsqueeze_0 + + # pd_op.full_int_array: (3xi64) <- () + full_int_array_1 = [1, 155, 1] + + # pd_op.tile: (1x155x21504xb) <- (1x1x21504xb, 3xi64) + tile_0 = paddle._C_ops.tile(greater_than_0, full_int_array_1) + del full_int_array_1, greater_than_0 + + # pd_op.full: (1xi64) <- () + full_1 = paddle._C_ops.full( + [1], float("-2"), paddle.int64, paddle.core.CPUPlace() + ) + + # pd_op.argmax: (1x21504xi64) <- (1x155x21504xf32, 1xi64) + argmax_0 = paddle._C_ops.argmax(data_1, full_1, False, False, paddle.int64) + + # pd_op.full: (1xi32) <- () + full_2 = paddle._C_ops.full( + [1], float("155"), paddle.int32, paddle.core.CPUPlace() + ) + + # pd_op.one_hot: (1x21504x155xf32) <- (1x21504xi64, 1xi32) + one_hot_0 = paddle._C_ops.one_hot( + argmax_0 % paddle.cast(full_2, argmax_0.dtype), full_2 + ) + del argmax_0, full_2 + + # pd_op.transpose: (1x155x21504xf32) <- (1x21504x155xf32) + transpose_0 = paddle._C_ops.transpose(one_hot_0, [0, 2, 1]) + del one_hot_0 + + # pd_op.where: (1x155x21504xf32) <- (1x155x21504xb, 1x155x21504xf32, 1x155x21504xf32) + where_0 = paddle._C_ops.where(tile_0, transpose_0, data_2) + del data_2, tile_0, transpose_0 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_2 = [-2] + + # pd_op.sum: (1x21504xf32) <- (1x155x21504xf32, 1xi64) + sum_0 = paddle._C_ops.sum(where_0, full_int_array_2, None, False) + + # pd_op.argmax: (1x21504xi64) <- (1x155x21504xf32, 1xi64) + argmax_1 = paddle._C_ops.argmax(where_0, full_1, False, False, paddle.int64) + del full_1 + + # pd_op.full: (1xf32) <- () + full_3 = paddle._C_ops.full( + [1], float("155"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.scale: (1x1xi32) <- (1x1xi32, 1xf32) + scale_0 = paddle._C_ops.scale(data_3, full_3, float("0"), True) + del data_3, full_3 + + # pd_op.cast: (1x1xi64) <- (1x1xi32) + cast_0 = paddle._C_ops.cast(scale_0, paddle.int64) + del scale_0 + + # pd_op.add: (1x21504xi64) <- (1x21504xi64, 1x1xi64) + add_0 = paddle._C_ops.add(argmax_1, cast_0) + del argmax_1, cast_0 + + # pd_op.flatten: (155xi32) <- (1x155x1xi32) + flatten_0 = paddle._C_ops.flatten(data_4, 0, 2) + del data_4 + + # pd_op.flatten: (21504xi64) <- (1x21504xi64) + flatten_1 = paddle._C_ops.flatten(add_0, 0, 1) + del add_0 + + # pd_op.full: (1xi32) <- () + full_4 = paddle._C_ops.full( + [1], float("0"), paddle.int32, paddle.core.CPUPlace() + ) + + # pd_op.gather: (21504xi32) <- (155xi32, 21504xi64, 1xi32) + gather_0 = paddle._C_ops.gather(flatten_0, flatten_1, full_4) + del flatten_0 + + # pd_op.full_int_array: (2xi64) <- () + full_int_array_3 = [1, 21504] + + # pd_op.reshape: (1x21504xi32) <- (21504xi32, 2xi64) + reshape_1 = paddle._C_ops.reshape(gather_0, full_int_array_3) + del full_int_array_3, gather_0 + + # pd_op.full: (xf32) <- () + full_5 = paddle._C_ops.full( + [], float("0"), paddle.float32, paddle.framework._current_expected_place() + ) + + # pd_op.greater_than: (1x21504xb) <- (1x21504xf32, xf32) + greater_than_1 = paddle._C_ops.greater_than(sum_0, full_5) + del full_5, sum_0 + + # pd_op.full: (1xf32) <- () + full_6 = paddle._C_ops.full( + [1], float("15"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.full_like: (1x21504xi32) <- (1x21504xi32, 1xf32) + full_like_0 = paddle._C_ops.full_like( + reshape_1, full_6, paddle.int32, paddle.framework._current_expected_place() + ) + del full_6 + + # pd_op.where: (1x21504xi32) <- (1x21504xb, 1x21504xi32, 1x21504xi32) + where_1 = paddle._C_ops.where(greater_than_1, reshape_1, full_like_0) + del full_like_0, greater_than_1, reshape_1 + + # pd_op.full_int_array: (2xi64) <- () + full_int_array_4 = [-1, 5] + + # pd_op.reshape: (155x5xf32) <- (1x155x5xf32, 2xi64) + reshape_2 = paddle._C_ops.reshape(data_5, full_int_array_4) + del data_5, full_int_array_4 + + # pd_op.gather: (21504x5xf32) <- (155x5xf32, 21504xi64, 1xi32) + gather_1 = paddle._C_ops.gather(reshape_2, flatten_1, full_4) + del flatten_1, full_4, reshape_2 + + # pd_op.full_int_array: (3xi64) <- () + full_int_array_5 = [1, 21504, 5] + + # pd_op.reshape: (1x21504x5xf32) <- (21504x5xf32, 3xi64) + reshape_0 = paddle._C_ops.reshape(gather_1, full_int_array_5) + del full_int_array_5, gather_1 + + # pd_op.full: (1xi32) <- () + full_7 = paddle._C_ops.full( + [1], float("16"), paddle.int32, paddle.core.CPUPlace() + ) + + # pd_op.one_hot: (1x21504x16xf32) <- (1x21504xi32, 1xi32) + one_hot_1 = paddle._C_ops.one_hot( + where_1 % paddle.cast(full_7, where_1.dtype), full_7 + ) + del full_7 + + # pd_op.full: (15xi64) <- () + full_8 = paddle._C_ops.full( + [15], float("0"), paddle.int64, paddle.framework._current_expected_place() + ) + + # pd_op.assign_value_: (15xi64) <- (15xi64) + assign_value__0 = paddle._C_ops.assign_value_( + full_8, + [15], + paddle.int64, + [ + float("0"), + float("1"), + float("2"), + float("3"), + float("4"), + float("5"), + float("6"), + float("7"), + float("8"), + float("9"), + float("10"), + float("11"), + float("12"), + float("13"), + float("14"), + ], + paddle.framework._current_expected_place(), + ) + del full_8 + + # pd_op.index_select: (1x21504x15xf32) <- (1x21504x16xf32, 15xi64) + index_select_0 = paddle._C_ops.index_select(one_hot_1, assign_value__0, -1) + del assign_value__0, one_hot_1 + + # pd_op.multiply: (1x155x21504xf32) <- (1x155x21504xf32, 1x155x21504xf32) + multiply_1 = paddle._C_ops.multiply(data_6, where_0) + del data_6 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_6 = [-1] + + # pd_op.max: (1x155x1xf32) <- (1x155x21504xf32, 1xi64) + max_0 = paddle._C_ops.max(multiply_1, full_int_array_6, True) + + # pd_op.multiply: (1x155x21504xf32) <- (1x155x21504xf32, 1x155x21504xf32) + multiply_2 = paddle._C_ops.multiply(data_1, where_0) + del data_1, where_0 + + # pd_op.max: (1x155x1xf32) <- (1x155x21504xf32, 1xi64) + max_1 = paddle._C_ops.max(multiply_2, full_int_array_6, True) + del multiply_2 + + # pd_op.full: (1xf32) <- () + full_9 = paddle._C_ops.full( + [1], float("1"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.scale: (1x155x1xf32) <- (1x155x1xf32, 1xf32) + scale_1 = paddle._C_ops.scale(max_0, full_9, float("1e-09"), True) + del full_9, max_0 + + # pd_op.divide: (1x155x21504xf32) <- (1x155x21504xf32, 1x155x1xf32) + divide_0 = paddle._C_ops.divide(multiply_1, scale_1) + del multiply_1, scale_1 + + # pd_op.multiply: (1x155x21504xf32) <- (1x155x21504xf32, 1x155x1xf32) + multiply_3 = paddle._C_ops.multiply(divide_0, max_1) + del divide_0, max_1 + + # pd_op.max: (1x21504xf32) <- (1x155x21504xf32, 1xi64) + max_2 = paddle._C_ops.max(multiply_3, full_int_array_2, False) + del full_int_array_2, multiply_3 + + # pd_op.unsqueeze: (1x21504x1xf32) <- (1x21504xf32, 1xi64) + unsqueeze_1 = paddle._C_ops.unsqueeze(max_2, full_int_array_6) + del full_int_array_6, max_2 + + # pd_op.multiply: (1x21504x15xf32) <- (1x21504x15xf32, 1x21504x1xf32) + multiply_0 = paddle._C_ops.multiply(index_select_0, unsqueeze_1) + del index_select_0, unsqueeze_1, where_1 + + return reshape_0, multiply_0 diff --git a/paddle_samples/PaddleX/PP-YOLOE-R-L/subgraph_7/weight_meta.py b/paddle_samples/PaddleX/PP-YOLOE-R-L/subgraph_7/weight_meta.py new file mode 100644 index 000000000..8b1378917 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE-R-L/subgraph_7/weight_meta.py @@ -0,0 +1 @@ + diff --git a/paddle_samples/PaddleX/PP-YOLOE-R-L/subgraph_8/graph_hash.txt b/paddle_samples/PaddleX/PP-YOLOE-R-L/subgraph_8/graph_hash.txt new file mode 100644 index 000000000..e9f7ce787 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE-R-L/subgraph_8/graph_hash.txt @@ -0,0 +1 @@ +03997de5ec3a4cd5ed5db2d1365661435b2432ebcacd906a6be3edec6dc9f223 \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE-R-L/subgraph_8/graph_net.json b/paddle_samples/PaddleX/PP-YOLOE-R-L/subgraph_8/graph_net.json new file mode 100644 index 000000000..93527f12f --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE-R-L/subgraph_8/graph_net.json @@ -0,0 +1,6 @@ +{ + "framework": "paddle", + "model_name": "PP-YOLOE-R-L", + "num_devices_required": 1, + "num_nodes_required": 1 +} \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE-R-L/subgraph_8/input_meta.py b/paddle_samples/PaddleX/PP-YOLOE-R-L/subgraph_8/input_meta.py new file mode 100644 index 000000000..b1e263298 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE-R-L/subgraph_8/input_meta.py @@ -0,0 +1,64 @@ +class Program_weight_tensor_data_0: + name = "data_0" + shape = [1, 21504] + dtype = "bool" + min_val = 0 + max_val = 2 + data = None + + +class Program_weight_tensor_data_1: + name = "data_1" + shape = [1, 21504, 5] + dtype = "float32" + min_val = float("-23.2896") + max_val = float("1021.9") + mean = float("208.967") + std = float("310.024") + data = None + + +class Program_weight_tensor_data_2: + name = "data_2" + shape = [1, 21504, 5] + dtype = "float32" + min_val = float("0.666691") + max_val = float("891.995") + mean = float("284.85") + std = float("333.405") + data = None + + +class Program_weight_tensor_data_3: + name = "data_3" + shape = [1, 21504, 15] + dtype = "float32" + max_val = float("0.236708") + mean = float("9.6547e-06") + std = float("0.00116044") + data = None + + +class Program_weight_tensor_data_4: + name = "data_4" + shape = [] + dtype = "float32" + data = [3.11422] + + +class Program_weight_tensor_data_5: + name = "data_5" + shape = [1, 21504, 91] + dtype = "float32" + min_val = float("1.0") + max_val = float("10.0") + mean = float("1.0989") + std = float("0.938258") + data = None + + +class Program_weight_tensor_data_6: + name = "data_6" + shape = [1] + dtype = "float32" + data = [0.0174533] diff --git a/paddle_samples/PaddleX/PP-YOLOE-R-L/subgraph_8/model.py b/paddle_samples/PaddleX/PP-YOLOE-R-L/subgraph_8/model.py new file mode 100644 index 000000000..bfdf4c819 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE-R-L/subgraph_8/model.py @@ -0,0 +1,753 @@ +import paddle + + +class GraphModule(paddle.nn.Layer): + def __init__(self): + super().__init__() + + def forward(self, data_0, data_1, data_2, data_3, data_4, data_5, data_6): + # pd_op.full_int_array: (1xi64) <- () + full_int_array_0 = [-1] + + # pd_op.assign: (1xi64) <- (1xi64) + assign_0 = full_int_array_0 + + # pd_op.assign: (1xi64) <- (1xi64) + assign_1 = full_int_array_0 + + # pd_op.assign: (1xi64) <- (1xi64) + assign_2 = full_int_array_0 + + # pd_op.unsqueeze: (1x21504x1xb) <- (1x21504xb, 1xi64) + unsqueeze_0 = paddle._C_ops.unsqueeze(data_0, full_int_array_0) + + # pd_op.full_int_array: (3xi64) <- () + full_int_array_1 = [1, 1, 5] + + # pd_op.tile: (1x21504x5xb) <- (1x21504x1xb, 3xi64) + tile_0 = paddle._C_ops.tile(unsqueeze_0, full_int_array_1) + del full_int_array_1 + + # pd_op.masked_select: (-1xf32) <- (1x21504x5xf32, 1x21504x5xb) + masked_select_0 = paddle._C_ops.masked_select(data_1, tile_0) + del data_1 + + # pd_op.full_int_array: (2xi64) <- () + full_int_array_2 = [-1, 5] + + # pd_op.reshape: (-1x5xf32) <- (-1xf32, 2xi64) + reshape_0 = paddle._C_ops.reshape(masked_select_0, full_int_array_2) + + # pd_op.masked_select: (-1xf32) <- (1x21504x5xf32, 1x21504x5xb) + masked_select_1 = paddle._C_ops.masked_select(data_2, tile_0) + del data_2 + + # pd_op.reshape: (-1x5xf32) <- (-1xf32, 2xi64) + reshape_1 = paddle._C_ops.reshape(masked_select_1, full_int_array_2) + del full_int_array_2, masked_select_1 + + # pd_op.sum: (1x21504xf32) <- (1x21504x15xf32, 1xi64) + sum_0 = paddle._C_ops.sum(data_3, full_int_array_0, None, False) + del data_3 + + # pd_op.masked_select: (-1xf32) <- (1x21504xf32, 1x21504xb) + masked_select_2 = paddle._C_ops.masked_select(sum_0, data_0) + del data_0, sum_0 + + # pd_op.reshape: (-1xf32) <- (-1xf32, 1xi64) + reshape_2 = paddle._C_ops.reshape(masked_select_2, full_int_array_0) + del masked_select_2 + + # pd_op.full_int_array: (3xi64) <- () + full_int_array_3 = [2, 2, 1] + + # pd_op.full: (1xi32) <- () + full_0 = paddle._C_ops.full( + [1], float("1"), paddle.int32, paddle.core.CPUPlace() + ) + + # pd_op.split: ([-1x2xf32, -1x2xf32, -1x1xf32]) <- (-1x5xf32, 3xi64, 1xi32) + split_0 = paddle._C_ops.split(reshape_0, full_int_array_3, full_0) + del reshape_0 + + # builtin.split: (-1x2xf32, -1x2xf32, -1x1xf32) <- ([-1x2xf32, -1x2xf32, -1x1xf32]) + ( + split_1, + split_2, + split_3, + ) = split_0 + del split_0 + + # pd_op.pow: (-1x2xf32) <- (-1x2xf32) + pow_0 = paddle._C_ops.pow(split_2, float("2")) + + # pd_op.full: (1xf32) <- () + full_1 = paddle._C_ops.full( + [1], float("0.0833333"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.scale: (-1x2xf32) <- (-1x2xf32, 1xf32) + scale_0 = paddle._C_ops.scale(pow_0, full_1, float("0"), True) + del pow_0 + + # pd_op.full: (1xi32) <- () + full_2 = paddle._C_ops.full( + [1], float("-1"), paddle.int32, paddle.core.CPUPlace() + ) + + # builtin.combine: ([-1x2xf32, -1x2xf32, -1x1xf32]) <- (-1x2xf32, -1x2xf32, -1x1xf32) + combine_0 = [split_1, scale_0, split_3] + + # pd_op.concat: (-1x5xf32) <- ([-1x2xf32, -1x2xf32, -1x1xf32], 1xi32) + concat_0 = paddle._C_ops.concat(combine_0, full_2) + del combine_0 + + # pd_op.split: ([-1x2xf32, -1x2xf32, -1x1xf32]) <- (-1x5xf32, 3xi64, 1xi32) + split_4 = paddle._C_ops.split(reshape_1, full_int_array_3, full_0) + del full_int_array_3 + + # builtin.split: (-1x2xf32, -1x2xf32, -1x1xf32) <- ([-1x2xf32, -1x2xf32, -1x1xf32]) + ( + split_5, + split_6, + split_7, + ) = split_4 + del split_4 + + # pd_op.pow: (-1x2xf32) <- (-1x2xf32) + pow_1 = paddle._C_ops.pow(split_6, float("2")) + del split_6 + + # pd_op.scale: (-1x2xf32) <- (-1x2xf32, 1xf32) + scale_1 = paddle._C_ops.scale(pow_1, full_1, float("0"), True) + del pow_1 + + # builtin.combine: ([-1x2xf32, -1x2xf32, -1x1xf32]) <- (-1x2xf32, -1x2xf32, -1x1xf32) + combine_1 = [split_5, scale_1, split_7] + del scale_1, split_5, split_7 + + # pd_op.concat: (-1x5xf32) <- ([-1x2xf32, -1x2xf32, -1x1xf32], 1xi32) + concat_1 = paddle._C_ops.concat(combine_1, full_2) + del combine_1 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_4 = [0] + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_5 = [1] + + # pd_op.assign: (1xi64) <- (1xi64) + assign_3 = full_int_array_5 + + # pd_op.slice: (-1xf32) <- (-1x5xf32, 1xi64, 1xi64) + slice_0 = paddle._C_ops.slice( + concat_0, [1], full_int_array_4, full_int_array_5, [1], [1] + ) + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_6 = [2] + + # pd_op.assign: (1xi64) <- (1xi64) + assign_4 = full_int_array_6 + + # pd_op.slice: (-1xf32) <- (-1x5xf32, 1xi64, 1xi64) + slice_1 = paddle._C_ops.slice( + concat_0, [1], full_int_array_5, full_int_array_6, [1], [1] + ) + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_7 = [3] + + # pd_op.assign: (1xi64) <- (1xi64) + assign_5 = full_int_array_7 + + # pd_op.slice: (-1xf32) <- (-1x5xf32, 1xi64, 1xi64) + slice_2 = paddle._C_ops.slice( + concat_0, [1], full_int_array_6, full_int_array_7, [1], [1] + ) + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_8 = [4] + + # pd_op.assign: (1xi64) <- (1xi64) + assign_6 = full_int_array_8 + + # pd_op.slice: (-1xf32) <- (-1x5xf32, 1xi64, 1xi64) + slice_3 = paddle._C_ops.slice( + concat_0, [1], full_int_array_7, full_int_array_8, [1], [1] + ) + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_9 = [5] + + # pd_op.slice: (-1xf32) <- (-1x5xf32, 1xi64, 1xi64) + slice_4 = paddle._C_ops.slice( + concat_0, [1], full_int_array_8, full_int_array_9, [1], [1] + ) + + # pd_op.slice: (-1xf32) <- (-1x5xf32, 1xi64, 1xi64) + slice_5 = paddle._C_ops.slice( + concat_1, [1], full_int_array_4, full_int_array_5, [1], [1] + ) + + # pd_op.slice: (-1xf32) <- (-1x5xf32, 1xi64, 1xi64) + slice_6 = paddle._C_ops.slice( + concat_1, [1], full_int_array_5, full_int_array_6, [1], [1] + ) + + # pd_op.slice: (-1xf32) <- (-1x5xf32, 1xi64, 1xi64) + slice_7 = paddle._C_ops.slice( + concat_1, [1], full_int_array_6, full_int_array_7, [1], [1] + ) + + # pd_op.slice: (-1xf32) <- (-1x5xf32, 1xi64, 1xi64) + slice_8 = paddle._C_ops.slice( + concat_1, [1], full_int_array_7, full_int_array_8, [1], [1] + ) + + # pd_op.slice: (-1xf32) <- (-1x5xf32, 1xi64, 1xi64) + slice_9 = paddle._C_ops.slice( + concat_1, [1], full_int_array_8, full_int_array_9, [1], [1] + ) + del concat_1 + + # pd_op.cos: (-1xf32) <- (-1xf32) + cos_0 = paddle._C_ops.cos(slice_4) + + # pd_op.sin: (-1xf32) <- (-1xf32) + sin_0 = paddle._C_ops.sin(slice_4) + + # pd_op.pow: (-1xf32) <- (-1xf32) + pow_2 = paddle._C_ops.pow(cos_0, float("2")) + + # pd_op.assign: (-1xf32) <- (-1xf32) + assign_7 = pow_2 + + # pd_op.multiply: (-1xf32) <- (-1xf32, -1xf32) + multiply_0 = paddle._C_ops.multiply(slice_2, pow_2) + + # pd_op.pow: (-1xf32) <- (-1xf32) + pow_3 = paddle._C_ops.pow(sin_0, float("2")) + + # pd_op.assign: (-1xf32) <- (-1xf32) + assign_8 = pow_3 + + # pd_op.multiply: (-1xf32) <- (-1xf32, -1xf32) + multiply_1 = paddle._C_ops.multiply(slice_3, pow_3) + + # pd_op.add: (-1xf32) <- (-1xf32, -1xf32) + add_0 = paddle._C_ops.add(multiply_0, multiply_1) + + # pd_op.multiply: (-1xf32) <- (-1xf32, -1xf32) + multiply_2 = paddle._C_ops.multiply(slice_2, pow_3) + + # pd_op.multiply: (-1xf32) <- (-1xf32, -1xf32) + multiply_3 = paddle._C_ops.multiply(slice_3, pow_2) + + # pd_op.add: (-1xf32) <- (-1xf32, -1xf32) + add_1 = paddle._C_ops.add(multiply_2, multiply_3) + + # pd_op.subtract: (-1xf32) <- (-1xf32, -1xf32) + subtract_0 = paddle._C_ops.subtract(slice_2, slice_3) + + # pd_op.multiply: (-1xf32) <- (-1xf32, -1xf32) + multiply_4 = paddle._C_ops.multiply(subtract_0, cos_0) + + # pd_op.multiply: (-1xf32) <- (-1xf32, -1xf32) + multiply_5 = paddle._C_ops.multiply(multiply_4, sin_0) + + # pd_op.cos: (-1xf32) <- (-1xf32) + cos_1 = paddle._C_ops.cos(slice_9) + + # pd_op.sin: (-1xf32) <- (-1xf32) + sin_1 = paddle._C_ops.sin(slice_9) + del slice_9 + + # pd_op.pow: (-1xf32) <- (-1xf32) + pow_4 = paddle._C_ops.pow(cos_1, float("2")) + + # pd_op.multiply: (-1xf32) <- (-1xf32, -1xf32) + multiply_6 = paddle._C_ops.multiply(slice_7, pow_4) + + # pd_op.pow: (-1xf32) <- (-1xf32) + pow_5 = paddle._C_ops.pow(sin_1, float("2")) + + # pd_op.multiply: (-1xf32) <- (-1xf32, -1xf32) + multiply_7 = paddle._C_ops.multiply(slice_8, pow_5) + + # pd_op.add: (-1xf32) <- (-1xf32, -1xf32) + add_2 = paddle._C_ops.add(multiply_6, multiply_7) + del multiply_6, multiply_7 + + # pd_op.multiply: (-1xf32) <- (-1xf32, -1xf32) + multiply_8 = paddle._C_ops.multiply(slice_7, pow_5) + del pow_5 + + # pd_op.multiply: (-1xf32) <- (-1xf32, -1xf32) + multiply_9 = paddle._C_ops.multiply(slice_8, pow_4) + del pow_4 + + # pd_op.add: (-1xf32) <- (-1xf32, -1xf32) + add_3 = paddle._C_ops.add(multiply_8, multiply_9) + del multiply_8, multiply_9 + + # pd_op.subtract: (-1xf32) <- (-1xf32, -1xf32) + subtract_1 = paddle._C_ops.subtract(slice_7, slice_8) + del slice_7, slice_8 + + # pd_op.multiply: (-1xf32) <- (-1xf32, -1xf32) + multiply_10 = paddle._C_ops.multiply(subtract_1, cos_1) + del cos_1, subtract_1 + + # pd_op.multiply: (-1xf32) <- (-1xf32, -1xf32) + multiply_11 = paddle._C_ops.multiply(multiply_10, sin_1) + del multiply_10, sin_1 + + # pd_op.add: (-1xf32) <- (-1xf32, -1xf32) + add_4 = paddle._C_ops.add(add_0, add_2) + + # pd_op.assign: (-1xf32) <- (-1xf32) + assign_9 = add_4 + + # pd_op.subtract: (-1xf32) <- (-1xf32, -1xf32) + subtract_2 = paddle._C_ops.subtract(slice_1, slice_6) + + # pd_op.assign: (-1xf32) <- (-1xf32) + assign_10 = subtract_2 + + # pd_op.pow: (-1xf32) <- (-1xf32) + pow_6 = paddle._C_ops.pow(subtract_2, float("2")) + + # pd_op.multiply: (-1xf32) <- (-1xf32, -1xf32) + multiply_12 = paddle._C_ops.multiply(add_4, pow_6) + + # pd_op.add: (-1xf32) <- (-1xf32, -1xf32) + add_5 = paddle._C_ops.add(add_1, add_3) + + # pd_op.assign: (-1xf32) <- (-1xf32) + assign_11 = add_5 + + # pd_op.subtract: (-1xf32) <- (-1xf32, -1xf32) + subtract_3 = paddle._C_ops.subtract(slice_0, slice_5) + + # pd_op.pow: (-1xf32) <- (-1xf32) + pow_7 = paddle._C_ops.pow(subtract_3, float("2")) + + # pd_op.multiply: (-1xf32) <- (-1xf32, -1xf32) + multiply_13 = paddle._C_ops.multiply(add_5, pow_7) + + # pd_op.add: (-1xf32) <- (-1xf32, -1xf32) + add_6 = paddle._C_ops.add(multiply_12, multiply_13) + + # pd_op.full: (1xf32) <- () + full_3 = paddle._C_ops.full( + [1], float("0.25"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.scale: (-1xf32) <- (-1xf32, 1xf32) + scale_2 = paddle._C_ops.scale(add_6, full_3, float("0"), True) + del add_6 + + # pd_op.add: (-1xf32) <- (-1xf32, -1xf32) + add_7 = paddle._C_ops.add(multiply_5, multiply_11) + + # pd_op.assign: (-1xf32) <- (-1xf32) + assign_12 = add_7 + + # pd_op.subtract: (-1xf32) <- (-1xf32, -1xf32) + subtract_4 = paddle._C_ops.subtract(slice_5, slice_0) + + # pd_op.multiply: (-1xf32) <- (-1xf32, -1xf32) + multiply_14 = paddle._C_ops.multiply(add_7, subtract_4) + + # pd_op.multiply: (-1xf32) <- (-1xf32, -1xf32) + multiply_15 = paddle._C_ops.multiply(multiply_14, subtract_2) + + # pd_op.full: (1xf32) <- () + full_4 = paddle._C_ops.full( + [1], float("0.5"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.assign: (1xf32) <- (1xf32) + assign_13 = full_4 + + # pd_op.scale: (-1xf32) <- (-1xf32, 1xf32) + scale_3 = paddle._C_ops.scale(multiply_15, full_4, float("0"), True) + del multiply_15 + + # pd_op.add: (-1xf32) <- (-1xf32, -1xf32) + add_8 = paddle._C_ops.add(scale_2, scale_3) + + # pd_op.multiply: (-1xf32) <- (-1xf32, -1xf32) + multiply_16 = paddle._C_ops.multiply(add_4, add_5) + + # pd_op.pow: (-1xf32) <- (-1xf32) + pow_8 = paddle._C_ops.pow(add_7, float("2")) + + # pd_op.subtract: (-1xf32) <- (-1xf32, -1xf32) + subtract_5 = paddle._C_ops.subtract(multiply_16, pow_8) + + # pd_op.multiply: (-1xf32) <- (-1xf32, -1xf32) + multiply_17 = paddle._C_ops.multiply(add_0, add_1) + + # pd_op.multiply: (-1xf32) <- (-1xf32, -1xf32) + multiply_18 = paddle._C_ops.multiply(multiply_5, multiply_5) + + # pd_op.subtract: (-1xf32) <- (-1xf32, -1xf32) + subtract_6 = paddle._C_ops.subtract(multiply_17, multiply_18) + + # pd_op.multiply: (-1xf32) <- (-1xf32, -1xf32) + multiply_19 = paddle._C_ops.multiply(add_2, add_3) + + # pd_op.multiply: (-1xf32) <- (-1xf32, -1xf32) + multiply_20 = paddle._C_ops.multiply(multiply_11, multiply_11) + + # pd_op.subtract: (-1xf32) <- (-1xf32, -1xf32) + subtract_7 = paddle._C_ops.subtract(multiply_19, multiply_20) + del multiply_19, multiply_20 + + # pd_op.multiply: (-1xf32) <- (-1xf32, -1xf32) + multiply_21 = paddle._C_ops.multiply(subtract_6, subtract_7) + + # pd_op.relu: (-1xf32) <- (-1xf32) + relu_0 = paddle._C_ops.relu(multiply_21) + del multiply_21 + + # pd_op.sqrt: (-1xf32) <- (-1xf32) + sqrt_0 = paddle._C_ops.sqrt(relu_0) + + # pd_op.full: (1xf32) <- () + full_5 = paddle._C_ops.full( + [1], float("4"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.scale: (-1xf32) <- (-1xf32, 1xf32) + scale_4 = paddle._C_ops.scale(sqrt_0, full_5, float("0"), True) + + # pd_op.full: (1xf32) <- () + full_6 = paddle._C_ops.full( + [1], float("1"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.assign: (1xf32) <- (1xf32) + assign_14 = full_6 + + # pd_op.scale: (-1xf32) <- (-1xf32, 1xf32) + scale_5 = paddle._C_ops.scale(scale_4, full_6, float("0.001"), True) + del scale_4 + + # pd_op.divide: (-1xf32) <- (-1xf32, -1xf32) + divide_1 = paddle._C_ops.divide(subtract_5, scale_5) + + # pd_op.log: (-1xf32) <- (-1xf32) + log_0 = paddle._C_ops.log(divide_1) + + # pd_op.scale: (-1xf32) <- (-1xf32, 1xf32) + scale_6 = paddle._C_ops.scale(log_0, full_4, float("0"), True) + del log_0 + + # pd_op.divide: (-1xf32) <- (-1xf32, -1xf32) + divide_2 = paddle._C_ops.divide(add_8, subtract_5) + + # pd_op.add: (-1xf32) <- (-1xf32, -1xf32) + add_9 = paddle._C_ops.add(divide_2, scale_6) + + # pd_op.full: (1xf32) <- () + full_7 = paddle._C_ops.full( + [1], float("0.001"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.full: (1xf32) <- () + full_8 = paddle._C_ops.full( + [1], float("100"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.clip: (-1xf32) <- (-1xf32, 1xf32, 1xf32) + clip_0 = paddle._C_ops.clip(add_9, full_7, full_8) + + # pd_op.full: (1xf32) <- () + full_9 = paddle._C_ops.full( + [1], float("-1"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.assign: (1xf32) <- (1xf32) + assign_15 = full_9 + + # pd_op.scale: (-1xf32) <- (-1xf32, 1xf32) + scale_7 = paddle._C_ops.scale(clip_0, full_9, float("0"), True) + del clip_0 + + # pd_op.exp: (-1xf32) <- (-1xf32) + exp_0 = paddle._C_ops.exp(scale_7) + del scale_7 + + # pd_op.scale: (-1xf32) <- (-1xf32, 1xf32) + scale_8 = paddle._C_ops.scale(exp_0, full_9, float("1"), True) + + # pd_op.scale: (-1xf32) <- (-1xf32, 1xf32) + scale_9 = paddle._C_ops.scale(scale_8, full_6, float("0.001"), True) + del scale_8 + + # pd_op.sqrt: (-1xf32) <- (-1xf32) + sqrt_1 = paddle._C_ops.sqrt(scale_9) + del scale_9 + + # pd_op.pow: (-1xf32) <- (-1xf32) + pow_9 = paddle._C_ops.pow(sqrt_1, float("2")) + + # pd_op.scale: (-1xf32) <- (-1xf32, 1xf32) + scale_10 = paddle._C_ops.scale(pow_9, full_9, float("1"), True) + del pow_9 + + # pd_op.scale: (-1xf32) <- (-1xf32, 1xf32) + scale_11 = paddle._C_ops.scale(scale_10, full_6, float("0.001"), True) + del scale_10 + + # pd_op.log: (-1xf32) <- (-1xf32) + log_1 = paddle._C_ops.log(scale_11) + del scale_11 + + # pd_op.scale: (-1xf32) <- (-1xf32, 1xf32) + scale_12 = paddle._C_ops.scale(log_1, full_9, float("0"), True) + del log_1 + + # pd_op.multiply: (-1xf32) <- (-1xf32, -1xf32) + multiply_22 = paddle._C_ops.multiply(sqrt_1, reshape_2) + + # pd_op.full_int_array: (0xi64) <- () + full_int_array_10 = [] + + # pd_op.sum: (xf32) <- (-1xf32, 0xi64) + sum_1 = paddle._C_ops.sum(multiply_22, full_int_array_10, None, False) + + # pd_op.divide: (xf32) <- (xf32, xf32) + divide_0 = paddle._C_ops.divide(sum_1, data_4) + del data_4 + + # pd_op.full_int_array: (3xi64) <- () + full_int_array_11 = [1, 1, 91] + + # pd_op.tile: (1x21504x91xb) <- (1x21504x1xb, 3xi64) + tile_1 = paddle._C_ops.tile(unsqueeze_0, full_int_array_11) + del full_int_array_11, unsqueeze_0 + + # pd_op.masked_select: (-1xf32) <- (1x21504x91xf32, 1x21504x91xb) + masked_select_3 = paddle._C_ops.masked_select(data_5, tile_1) + del data_5 + + # pd_op.full_int_array: (2xi64) <- () + full_int_array_12 = [-1, 91] + + # pd_op.reshape: (-1x91xf32) <- (-1xf32, 2xi64) + reshape_3 = paddle._C_ops.reshape(masked_select_3, full_int_array_12) + del full_int_array_12 + + # pd_op.slice: (-1xf32) <- (-1x5xf32, 1xi64, 1xi64) + slice_10 = paddle._C_ops.slice( + reshape_1, [1], full_int_array_8, full_int_array_9, [1], [1] + ) + del reshape_1 + + # pd_op.divide: (-1xf32) <- (-1xf32, 1xf32) + divide_3 = paddle._C_ops.divide(slice_10, data_6) + del data_6, slice_10 + + # pd_op.full: (1xf32) <- () + full_10 = paddle._C_ops.full( + [1], float("0"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.full: (1xf32) <- () + full_11 = paddle._C_ops.full( + [1], float("89.99"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.clip: (-1xf32) <- (-1xf32, 1xf32, 1xf32) + clip_1 = paddle._C_ops.clip(divide_3, full_10, full_11) + del divide_3, full_10, full_11 + + # pd_op.cast: (-1xi64) <- (-1xf32) + cast_0 = paddle._C_ops.cast(clip_1, paddle.int64) + + # pd_op.scale: (-1xi64) <- (-1xi64, 1xf32) + scale_13 = paddle._C_ops.scale(cast_0, full_6, float("1"), True) + + # pd_op.cast: (-1xf32) <- (-1xi64) + cast_1 = paddle._C_ops.cast(scale_13, paddle.float32) + + # pd_op.subtract: (-1xf32) <- (-1xf32, -1xf32) + subtract_8 = paddle._C_ops.subtract(cast_1, clip_1) + del cast_1, clip_1 + + # pd_op.scale: (-1xf32) <- (-1xf32, 1xf32) + scale_14 = paddle._C_ops.scale(subtract_8, full_9, float("1"), True) + + # pd_op.unsqueeze: (-1x1xi64) <- (-1xi64, 1xi64) + unsqueeze_1 = paddle._C_ops.unsqueeze(cast_0, full_int_array_0) + del cast_0 + + # pd_op.cross_entropy_with_softmax: (-1x91xf32, -1x1xf32) <- (-1x91xf32, -1x1xi64) + cross_entropy_with_softmax_0, cross_entropy_with_softmax_2 = ( + lambda x, f: f(x) + )( + paddle._C_ops.cross_entropy_with_softmax( + reshape_3, unsqueeze_1, False, True, True, -100, -1 + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None), + ) + + # pd_op.squeeze: (-1xf32) <- (-1x1xf32, 1xi64) + squeeze_0 = paddle._C_ops.squeeze( + cross_entropy_with_softmax_2, full_int_array_0 + ) + + # pd_op.multiply: (-1xf32) <- (-1xf32, -1xf32) + multiply_23 = paddle._C_ops.multiply(squeeze_0, subtract_8) + + # pd_op.unsqueeze: (-1x1xi64) <- (-1xi64, 1xi64) + unsqueeze_2 = paddle._C_ops.unsqueeze(scale_13, full_int_array_0) + del scale_13 + + # pd_op.cross_entropy_with_softmax: (-1x91xf32, -1x1xf32) <- (-1x91xf32, -1x1xi64) + cross_entropy_with_softmax_1, cross_entropy_with_softmax_3 = ( + lambda x, f: f(x) + )( + paddle._C_ops.cross_entropy_with_softmax( + reshape_3, unsqueeze_2, False, True, True, -100, -1 + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None), + ) + del reshape_3 + + # pd_op.squeeze: (-1xf32) <- (-1x1xf32, 1xi64) + squeeze_1 = paddle._C_ops.squeeze( + cross_entropy_with_softmax_3, full_int_array_0 + ) + + # pd_op.multiply: (-1xf32) <- (-1xf32, -1xf32) + multiply_24 = paddle._C_ops.multiply(squeeze_1, scale_14) + + # pd_op.add: (-1xf32) <- (-1xf32, -1xf32) + add_10 = paddle._C_ops.add(multiply_23, multiply_24) + + # pd_op.mean: (1xf32) <- (-1xf32, 1xi64) + mean_0 = paddle._C_ops.mean(add_10, full_int_array_0, True) + del ( + add_0, + add_1, + add_10, + add_2, + add_3, + add_4, + add_5, + add_7, + add_8, + add_9, + assign_0, + assign_1, + assign_10, + assign_11, + assign_12, + assign_13, + assign_14, + assign_15, + assign_2, + assign_3, + assign_4, + assign_5, + assign_6, + assign_7, + assign_8, + assign_9, + concat_0, + cos_0, + cross_entropy_with_softmax_2, + cross_entropy_with_softmax_3, + divide_1, + divide_2, + exp_0, + full_0, + full_1, + full_2, + full_3, + full_4, + full_5, + full_6, + full_7, + full_8, + full_9, + full_int_array_0, + full_int_array_10, + full_int_array_4, + full_int_array_5, + full_int_array_6, + full_int_array_7, + full_int_array_8, + full_int_array_9, + masked_select_0, + masked_select_3, + multiply_0, + multiply_1, + multiply_11, + multiply_12, + multiply_13, + multiply_14, + multiply_16, + multiply_17, + multiply_18, + multiply_2, + multiply_22, + multiply_23, + multiply_24, + multiply_3, + multiply_4, + multiply_5, + pow_2, + pow_3, + pow_6, + pow_7, + pow_8, + relu_0, + reshape_2, + scale_0, + scale_14, + scale_2, + scale_3, + scale_5, + scale_6, + sin_0, + slice_0, + slice_1, + slice_2, + slice_3, + slice_4, + slice_5, + slice_6, + split_1, + split_2, + split_3, + sqrt_0, + sqrt_1, + squeeze_0, + squeeze_1, + subtract_0, + subtract_2, + subtract_3, + subtract_4, + subtract_5, + subtract_6, + subtract_7, + subtract_8, + sum_1, + tile_0, + tile_1, + unsqueeze_1, + unsqueeze_2, + ) + + return ( + cross_entropy_with_softmax_0, + cross_entropy_with_softmax_1, + divide_0, + mean_0, + ) diff --git a/paddle_samples/PaddleX/PP-YOLOE-R-L/subgraph_8/weight_meta.py b/paddle_samples/PaddleX/PP-YOLOE-R-L/subgraph_8/weight_meta.py new file mode 100644 index 000000000..8b1378917 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE-R-L/subgraph_8/weight_meta.py @@ -0,0 +1 @@ + diff --git a/paddle_samples/PaddleX/PP-YOLOE-R-L/subgraph_9/graph_hash.txt b/paddle_samples/PaddleX/PP-YOLOE-R-L/subgraph_9/graph_hash.txt new file mode 100644 index 000000000..7d7ecfd98 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE-R-L/subgraph_9/graph_hash.txt @@ -0,0 +1 @@ +d3c0cf999304c69bc5a1b32523a7375e4782a894dfbb9b4bc1e31a8a45349cfe \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE-R-L/subgraph_9/graph_net.json b/paddle_samples/PaddleX/PP-YOLOE-R-L/subgraph_9/graph_net.json new file mode 100644 index 000000000..93527f12f --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE-R-L/subgraph_9/graph_net.json @@ -0,0 +1,6 @@ +{ + "framework": "paddle", + "model_name": "PP-YOLOE-R-L", + "num_devices_required": 1, + "num_nodes_required": 1 +} \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE-R-L/subgraph_9/input_meta.py b/paddle_samples/PaddleX/PP-YOLOE-R-L/subgraph_9/input_meta.py new file mode 100644 index 000000000..2be9764d3 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE-R-L/subgraph_9/input_meta.py @@ -0,0 +1,69 @@ +class Program_weight_tensor_data_0: + name = "data_0" + shape = [] + dtype = "int64" + data = [1] + + +class Program_weight_tensor_data_1: + name = "data_1" + shape = [1, 21504, 15] + dtype = "float32" + max_val = float("1.0") + mean = float("0.0132611") + std = float("0.111912") + data = None + + +class Program_weight_tensor_data_2: + name = "data_2" + shape = [1, 21504, 4] + dtype = "float32" + min_val = float("-733.076") + max_val = float("1205.76") + mean = float("3.69083") + std = float("22.0759") + data = None + + +class Program_weight_tensor_data_3: + name = "data_3" + shape = [1, 21504, 91] + dtype = "float32" + min_val = float("-155.033") + max_val = float("176.291") + mean = float("1.05416") + std = float("1.24631") + data = None + + +class Program_weight_tensor_data_4: + name = "data_4" + shape = [1, 21504, 2] + dtype = "float32" + min_val = float("4.0") + max_val = float("1020.0") + mean = float("512.0") + std = float("295.583") + data = None + + +class Program_weight_tensor_data_5: + name = "data_5" + shape = [1, 21504, 1] + dtype = "float32" + min_val = float("8.0") + max_val = float("32.0") + mean = float("10.6667") + std = float("5.70157") + data = None + + +class Program_weight_tensor_data_6: + name = "data_6" + shape = [91] + dtype = "float32" + max_val = float("1.5708") + mean = float("0.785398") + std = float("0.458461") + data = None diff --git a/paddle_samples/PaddleX/PP-YOLOE-R-L/subgraph_9/model.py b/paddle_samples/PaddleX/PP-YOLOE-R-L/subgraph_9/model.py new file mode 100644 index 000000000..9527d92e2 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE-R-L/subgraph_9/model.py @@ -0,0 +1,126 @@ +import paddle + + +class GraphModule(paddle.nn.Layer): + def __init__(self): + super().__init__() + + def forward(self, data_0, data_1, data_2, data_3, data_4, data_5, data_6): + # pd_op.full: (1xi32) <- () + full_0 = paddle._C_ops.full( + [1], float("2"), paddle.int32, paddle.core.CPUPlace() + ) + + # pd_op.split_with_num: ([1x21504x2xf32, 1x21504x2xf32]) <- (1x21504x4xf32, 1xi32) + split_with_num_0 = paddle._C_ops.split_with_num(data_2, 2, full_0) + del data_2 + + # builtin.split: (1x21504x2xf32, 1x21504x2xf32) <- ([1x21504x2xf32, 1x21504x2xf32]) + ( + split_0, + split_1, + ) = split_with_num_0 + del split_with_num_0 + + # pd_op.multiply: (1x21504x2xf32) <- (1x21504x2xf32, 1x21504x1xf32) + multiply_0 = paddle._C_ops.multiply(split_0, data_5) + + # pd_op.add: (1x21504x2xf32) <- (1x21504x2xf32, 1x21504x2xf32) + add_0 = paddle._C_ops.add(multiply_0, data_4) + del data_4 + + # pd_op.elu: (1x21504x2xf32) <- (1x21504x2xf32) + elu_0 = paddle._C_ops.elu(split_1, float("1")) + + # pd_op.full: (1xf32) <- () + full_1 = paddle._C_ops.full( + [1], float("1"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.scale: (1x21504x2xf32) <- (1x21504x2xf32, 1xf32) + scale_0 = paddle._C_ops.scale(elu_0, full_1, float("1"), True) + + # pd_op.multiply: (1x21504x2xf32) <- (1x21504x2xf32, 1x21504x1xf32) + multiply_1 = paddle._C_ops.multiply(scale_0, data_5) + del data_5 + + # pd_op.full_int_array: (4xi64) <- () + full_int_array_0 = [1, 21504, 1, 91] + + # pd_op.reshape: (1x21504x1x91xf32) <- (1x21504x91xf32, 4xi64) + reshape_0 = paddle._C_ops.reshape(data_3, full_int_array_0) + del data_3, full_int_array_0 + + # pd_op.softmax: (1x21504x1x91xf32) <- (1x21504x1x91xf32) + softmax_0 = paddle._C_ops.softmax(reshape_0, -1) + del reshape_0 + + # pd_op.matmul: (1x21504x1xf32) <- (1x21504x1x91xf32, 91xf32) + matmul_0 = paddle._C_ops.matmul(softmax_0, data_6, False, False) + del data_6 + + # pd_op.full: (1xi32) <- () + full_2 = paddle._C_ops.full( + [1], float("-1"), paddle.int32, paddle.core.CPUPlace() + ) + + # builtin.combine: ([1x21504x2xf32, 1x21504x2xf32, 1x21504x1xf32]) <- (1x21504x2xf32, 1x21504x2xf32, 1x21504x1xf32) + combine_0 = [add_0, multiply_1, matmul_0] + + # pd_op.concat: (1x21504x5xf32) <- ([1x21504x2xf32, 1x21504x2xf32, 1x21504x1xf32], 1xi32) + concat_0 = paddle._C_ops.concat(combine_0, full_2) + del combine_0 + + # pd_op.full: (xi64) <- () + full_3 = paddle._C_ops.full( + [], float("-1"), paddle.int64, paddle.framework._current_expected_place() + ) + + # pd_op.less_than: (xb) <- (xi64, xi64) + less_than_0 = paddle._C_ops.less_than(data_0, full_3) + del data_0, full_3 + + # pd_op.cast: (xi64) <- (xb) + cast_0 = paddle._C_ops.cast(less_than_0, paddle.int64) + del less_than_0 + + # pd_op.full: (xi64) <- () + full_4 = paddle._C_ops.full( + [], float("0"), paddle.int64, paddle.framework._current_expected_place() + ) + + # pd_op.not_equal: (xb) <- (xi64, xi64) + not_equal_0 = paddle._C_ops.not_equal(cast_0, full_4) + del cast_0 + + # pd_op.cast: (xi64) <- (xb) + cast_1 = paddle._C_ops.cast(not_equal_0, paddle.int64) + del not_equal_0 + + # pd_op.equal: (xb) <- (xi64, xi64) + equal_0 = paddle._C_ops.equal(cast_1, full_4) + del cast_1, full_4 + + # pd_op.share_data_: (1x21504x15xf32) <- (1x21504x15xf32) + share_data__0 = data_1.detach() + del data_1 + + # pd_op.share_data_: (1x21504x5xf32) <- (1x21504x5xf32) + share_data__1 = concat_0.detach() + del ( + add_0, + concat_0, + elu_0, + full_0, + full_1, + full_2, + matmul_0, + multiply_0, + multiply_1, + scale_0, + softmax_0, + split_0, + split_1, + ) + + return share_data__0, share_data__1 diff --git a/paddle_samples/PaddleX/PP-YOLOE-R-L/subgraph_9/weight_meta.py b/paddle_samples/PaddleX/PP-YOLOE-R-L/subgraph_9/weight_meta.py new file mode 100644 index 000000000..8b1378917 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE-R-L/subgraph_9/weight_meta.py @@ -0,0 +1 @@ + diff --git a/paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_0/graph_hash.txt b/paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_0/graph_hash.txt new file mode 100644 index 000000000..4e2142c30 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_0/graph_hash.txt @@ -0,0 +1 @@ +933a277bd96b5968d6442985553c5c06a3642ca4a7c70d2a5dbb73274c8a9440 \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_0/graph_net.json b/paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_0/graph_net.json new file mode 100644 index 000000000..399d8354e --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_0/graph_net.json @@ -0,0 +1,6 @@ +{ + "framework": "paddle", + "model_name": "PP-YOLOE-S_human", + "num_devices_required": 1, + "num_nodes_required": 1 +} \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_0/input_meta.py b/paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_0/input_meta.py new file mode 100644 index 000000000..ebedcc374 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_0/input_meta.py @@ -0,0 +1,93 @@ +class Program_weight_tensor_data_0: + name = "data_0" + shape = [2, 5376] + dtype = "float32" + max_val = float("3.0") + mean = float("0.0205543") + std = float("0.179507") + data = None + + +class Program_weight_tensor_data_1: + name = "data_1" + shape = [2, 15, 5376] + dtype = "float32" + max_val = float("0.964353") + mean = float("0.0217275") + std = float("0.0736307") + data = None + + +class Program_weight_tensor_data_2: + name = "data_2" + shape = [2, 15, 5376] + dtype = "float32" + max_val = float("1.0") + mean = float("0.00137029") + std = float("0.036992") + data = None + + +class Program_weight_tensor_data_3: + name = "data_3" + shape = [2, 1] + dtype = "int32" + data = [0, 1] + + +class Program_weight_tensor_data_4: + name = "data_4" + shape = [2, 15, 1] + dtype = "int32" + data = [ + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + ] + + +class Program_weight_tensor_data_5: + name = "data_5" + shape = [2, 15, 4] + dtype = "float32" + max_val = float("512.0") + mean = float("126.218") + std = float("182.758") + data = None + + +class Program_weight_tensor_data_6: + name = "data_6" + shape = [2, 15, 5376] + dtype = "float32" + max_val = float("0.455376") + mean = float("7.96332e-05") + std = float("0.0033028") + data = None diff --git a/paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_0/model.py b/paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_0/model.py new file mode 100644 index 000000000..d3950802b --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_0/model.py @@ -0,0 +1,223 @@ +import paddle + + +class GraphModule(paddle.nn.Layer): + def __init__(self): + super().__init__() + + def forward(self, data_0, data_1, data_2, data_3, data_4, data_5, data_6): + # pd_op.full_int_array: (1xi64) <- () + full_int_array_0 = [1] + + # pd_op.unsqueeze: (2x1x5376xf32) <- (2x5376xf32, 1xi64) + unsqueeze_0 = paddle._C_ops.unsqueeze(data_0, full_int_array_0) + del data_0, full_int_array_0 + + # pd_op.full: (xf32) <- () + full_0 = paddle._C_ops.full( + [], float("1"), paddle.float32, paddle.framework._current_expected_place() + ) + + # pd_op.greater_than: (2x1x5376xb) <- (2x1x5376xf32, xf32) + greater_than_0 = paddle._C_ops.greater_than(unsqueeze_0, full_0) + del full_0, unsqueeze_0 + + # pd_op.full_int_array: (3xi64) <- () + full_int_array_1 = [1, 15, 1] + + # pd_op.tile: (2x15x5376xb) <- (2x1x5376xb, 3xi64) + tile_0 = paddle._C_ops.tile(greater_than_0, full_int_array_1) + del full_int_array_1, greater_than_0 + + # pd_op.full: (1xi64) <- () + full_1 = paddle._C_ops.full( + [1], float("-2"), paddle.int64, paddle.core.CPUPlace() + ) + + # pd_op.argmax: (2x5376xi64) <- (2x15x5376xf32, 1xi64) + argmax_0 = paddle._C_ops.argmax(data_1, full_1, False, False, paddle.int64) + + # pd_op.full: (1xi32) <- () + full_2 = paddle._C_ops.full( + [1], float("15"), paddle.int32, paddle.core.CPUPlace() + ) + + # pd_op.one_hot: (2x5376x15xf32) <- (2x5376xi64, 1xi32) + one_hot_0 = paddle._C_ops.one_hot( + argmax_0 % paddle.cast(full_2, argmax_0.dtype), full_2 + ) + del argmax_0, full_2 + + # pd_op.transpose: (2x15x5376xf32) <- (2x5376x15xf32) + transpose_0 = paddle._C_ops.transpose(one_hot_0, [0, 2, 1]) + del one_hot_0 + + # pd_op.where: (2x15x5376xf32) <- (2x15x5376xb, 2x15x5376xf32, 2x15x5376xf32) + where_0 = paddle._C_ops.where(tile_0, transpose_0, data_2) + del data_2, tile_0, transpose_0 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_2 = [-2] + + # pd_op.sum: (2x5376xf32) <- (2x15x5376xf32, 1xi64) + sum_0 = paddle._C_ops.sum(where_0, full_int_array_2, None, False) + + # pd_op.argmax: (2x5376xi64) <- (2x15x5376xf32, 1xi64) + argmax_1 = paddle._C_ops.argmax(where_0, full_1, False, False, paddle.int64) + del full_1 + + # pd_op.full: (1xf32) <- () + full_3 = paddle._C_ops.full( + [1], float("15"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.scale: (2x1xi32) <- (2x1xi32, 1xf32) + scale_0 = paddle._C_ops.scale(data_3, full_3, float("0"), True) + del data_3, full_3 + + # pd_op.cast: (2x1xi64) <- (2x1xi32) + cast_0 = paddle._C_ops.cast(scale_0, paddle.int64) + del scale_0 + + # pd_op.add: (2x5376xi64) <- (2x5376xi64, 2x1xi64) + add_0 = paddle._C_ops.add(argmax_1, cast_0) + del argmax_1, cast_0 + + # pd_op.flatten: (30xi32) <- (2x15x1xi32) + flatten_0 = paddle._C_ops.flatten(data_4, 0, 2) + del data_4 + + # pd_op.flatten: (10752xi64) <- (2x5376xi64) + flatten_1 = paddle._C_ops.flatten(add_0, 0, 1) + del add_0 + + # pd_op.full: (1xi32) <- () + full_4 = paddle._C_ops.full( + [1], float("0"), paddle.int32, paddle.core.CPUPlace() + ) + + # pd_op.gather: (10752xi32) <- (30xi32, 10752xi64, 1xi32) + gather_0 = paddle._C_ops.gather(flatten_0, flatten_1, full_4) + del flatten_0 + + # pd_op.full_int_array: (2xi64) <- () + full_int_array_3 = [2, 5376] + + # pd_op.reshape: (2x5376xi32) <- (10752xi32, 2xi64) + reshape_1 = paddle._C_ops.reshape(gather_0, full_int_array_3) + del full_int_array_3, gather_0 + + # pd_op.full: (xf32) <- () + full_5 = paddle._C_ops.full( + [], float("0"), paddle.float32, paddle.framework._current_expected_place() + ) + + # pd_op.greater_than: (2x5376xb) <- (2x5376xf32, xf32) + greater_than_1 = paddle._C_ops.greater_than(sum_0, full_5) + del full_5, sum_0 + + # pd_op.full: (1xf32) <- () + full_6 = paddle._C_ops.full( + [1], float("1"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.full_like: (2x5376xi32) <- (2x5376xi32, 1xf32) + full_like_0 = paddle._C_ops.full_like( + reshape_1, full_6, paddle.int32, paddle.framework._current_expected_place() + ) + + # pd_op.where: (2x5376xi32) <- (2x5376xb, 2x5376xi32, 2x5376xi32) + where_1 = paddle._C_ops.where(greater_than_1, reshape_1, full_like_0) + del full_like_0, greater_than_1, reshape_1 + + # pd_op.full_int_array: (2xi64) <- () + full_int_array_4 = [-1, 4] + + # pd_op.reshape: (30x4xf32) <- (2x15x4xf32, 2xi64) + reshape_2 = paddle._C_ops.reshape(data_5, full_int_array_4) + del data_5, full_int_array_4 + + # pd_op.gather: (10752x4xf32) <- (30x4xf32, 10752xi64, 1xi32) + gather_1 = paddle._C_ops.gather(reshape_2, flatten_1, full_4) + del flatten_1, full_4, reshape_2 + + # pd_op.full_int_array: (3xi64) <- () + full_int_array_5 = [2, 5376, 4] + + # pd_op.reshape: (2x5376x4xf32) <- (10752x4xf32, 3xi64) + reshape_0 = paddle._C_ops.reshape(gather_1, full_int_array_5) + del full_int_array_5, gather_1 + + # pd_op.full: (1xi32) <- () + full_7 = paddle._C_ops.full( + [1], float("2"), paddle.int32, paddle.core.CPUPlace() + ) + + # pd_op.one_hot: (2x5376x2xf32) <- (2x5376xi32, 1xi32) + one_hot_1 = paddle._C_ops.one_hot( + where_1 % paddle.cast(full_7, where_1.dtype), full_7 + ) + del full_7 + + # pd_op.full: (1xi64) <- () + full_8 = paddle._C_ops.full( + [1], float("0"), paddle.int64, paddle.framework._current_expected_place() + ) + + # pd_op.assign_value_: (1xi64) <- (1xi64) + assign_value__0 = paddle._C_ops.assign_value_( + full_8, + [1], + paddle.int64, + [float("0")], + paddle.framework._current_expected_place(), + ) + del full_8 + + # pd_op.index_select: (2x5376x1xf32) <- (2x5376x2xf32, 1xi64) + index_select_0 = paddle._C_ops.index_select(one_hot_1, assign_value__0, -1) + del assign_value__0, one_hot_1 + + # pd_op.multiply: (2x15x5376xf32) <- (2x15x5376xf32, 2x15x5376xf32) + multiply_1 = paddle._C_ops.multiply(data_6, where_0) + del data_6 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_6 = [-1] + + # pd_op.max: (2x15x1xf32) <- (2x15x5376xf32, 1xi64) + max_0 = paddle._C_ops.max(multiply_1, full_int_array_6, True) + + # pd_op.multiply: (2x15x5376xf32) <- (2x15x5376xf32, 2x15x5376xf32) + multiply_2 = paddle._C_ops.multiply(data_1, where_0) + del data_1, where_0 + + # pd_op.max: (2x15x1xf32) <- (2x15x5376xf32, 1xi64) + max_1 = paddle._C_ops.max(multiply_2, full_int_array_6, True) + del multiply_2 + + # pd_op.scale: (2x15x1xf32) <- (2x15x1xf32, 1xf32) + scale_1 = paddle._C_ops.scale(max_0, full_6, float("1e-09"), True) + del full_6, max_0 + + # pd_op.divide: (2x15x5376xf32) <- (2x15x5376xf32, 2x15x1xf32) + divide_0 = paddle._C_ops.divide(multiply_1, scale_1) + del multiply_1, scale_1 + + # pd_op.multiply: (2x15x5376xf32) <- (2x15x5376xf32, 2x15x1xf32) + multiply_3 = paddle._C_ops.multiply(divide_0, max_1) + del divide_0, max_1 + + # pd_op.max: (2x5376xf32) <- (2x15x5376xf32, 1xi64) + max_2 = paddle._C_ops.max(multiply_3, full_int_array_2, False) + del full_int_array_2, multiply_3 + + # pd_op.unsqueeze: (2x5376x1xf32) <- (2x5376xf32, 1xi64) + unsqueeze_1 = paddle._C_ops.unsqueeze(max_2, full_int_array_6) + del full_int_array_6, max_2 + + # pd_op.multiply: (2x5376x1xf32) <- (2x5376x1xf32, 2x5376x1xf32) + multiply_0 = paddle._C_ops.multiply(index_select_0, unsqueeze_1) + del index_select_0, unsqueeze_1, where_1 + + return reshape_0, multiply_0 diff --git a/paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_0/weight_meta.py b/paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_0/weight_meta.py new file mode 100644 index 000000000..8b1378917 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_0/weight_meta.py @@ -0,0 +1 @@ + diff --git a/paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_1/graph_hash.txt b/paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_1/graph_hash.txt new file mode 100644 index 000000000..a0a4bdddf --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_1/graph_hash.txt @@ -0,0 +1 @@ +92d3ca6357b660bcf1334e1feb5b6b37f7616ff93ec1a53ea5a94dd2dd47ce97 \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_1/graph_net.json b/paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_1/graph_net.json new file mode 100644 index 000000000..399d8354e --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_1/graph_net.json @@ -0,0 +1,6 @@ +{ + "framework": "paddle", + "model_name": "PP-YOLOE-S_human", + "num_devices_required": 1, + "num_nodes_required": 1 +} \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_1/input_meta.py b/paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_1/input_meta.py new file mode 100644 index 000000000..27d631348 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_1/input_meta.py @@ -0,0 +1,42 @@ +class Program_weight_tensor_data_0: + name = "data_0" + shape = [2, 8400, 1] + dtype = "float32" + min_val = float("0.000732164") + max_val = float("0.935386") + mean = float("0.0393097") + std = float("0.0887762") + data = None + + +class Program_weight_tensor_data_1: + name = "data_1" + shape = [2, 8400, 68] + dtype = "float32" + min_val = float("-7.01895") + max_val = float("14.5228") + mean = float("8.74382e-06") + std = float("1.67872") + data = None + + +class Program_weight_tensor_data_2: + name = "data_2" + shape = [8400, 2] + dtype = "float32" + min_val = float("4.0") + max_val = float("636.0") + mean = float("320.0") + std = float("184.719") + data = None + + +class Program_weight_tensor_data_3: + name = "data_3" + shape = [8400, 1] + dtype = "float32" + min_val = float("8.0") + max_val = float("32.0") + mean = float("10.6667") + std = float("5.70157") + data = None diff --git a/paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_1/model.py b/paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_1/model.py new file mode 100644 index 000000000..98e7d51c4 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_1/model.py @@ -0,0 +1,162 @@ +import paddle + + +class GraphModule(paddle.nn.Layer): + def __init__(self): + super().__init__() + + def forward(self, parameter_0, data_0, data_1, data_2, data_3): + # pd_op.divide: (-1x2xf32) <- (-1x2xf32, -1x1xf32) + divide_0 = paddle._C_ops.divide(data_2, data_3) + del data_2 + + # pd_op.shape64: (3xi64) <- (2x-1x68xf32) + shape64_0 = paddle._C_ops.shape64(data_1) + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_0 = [0] + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_1 = [1] + + # pd_op.assign: (1xi64) <- (1xi64) + assign_0 = full_int_array_1 + + # pd_op.slice: (xi64) <- (3xi64, 1xi64, 1xi64) + slice_0 = paddle._C_ops.slice( + shape64_0, [0], full_int_array_0, full_int_array_1, [1], [0] + ) + del full_int_array_0 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_2 = [2] + + # pd_op.slice: (xi64) <- (3xi64, 1xi64, 1xi64) + slice_1 = paddle._C_ops.slice( + shape64_0, [0], full_int_array_1, full_int_array_2, [1], [0] + ) + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_3 = [3] + + # pd_op.slice: (xi64) <- (3xi64, 1xi64, 1xi64) + slice_2 = paddle._C_ops.slice( + shape64_0, [0], full_int_array_2, full_int_array_3, [1], [0] + ) + del full_int_array_2, full_int_array_3, shape64_0 + + # pd_op.full: (xi64) <- () + full_0 = paddle._C_ops.full( + [], float("-1"), paddle.int64, paddle.core.CPUPlace() + ) + + # pd_op.full: (xi64) <- () + full_1 = paddle._C_ops.full( + [], float("4"), paddle.int64, paddle.core.CPUPlace() + ) + + # pd_op.full: (xi64) <- () + full_2 = paddle._C_ops.full( + [], float("17"), paddle.int64, paddle.core.CPUPlace() + ) + + # builtin.combine: ([xi64, xi64, xi64, xi64]) <- (xi64, xi64, xi64, xi64) + combine_0 = [full_0, slice_1, full_1, full_2] + del full_0, full_1, full_2, slice_1 + + # pd_op.stack: (4xi64) <- ([xi64, xi64, xi64, xi64]) + stack_0 = paddle._C_ops.stack(combine_0, 0) + del combine_0 + + # pd_op.reshape: (-1x-1x4x17xf32) <- (2x-1x68xf32, 4xi64) + reshape_0 = paddle._C_ops.reshape(data_1, stack_0) + del data_1, stack_0 + + # pd_op.softmax: (-1x-1x4x17xf32) <- (-1x-1x4x17xf32) + softmax_0 = paddle._C_ops.softmax(reshape_0, -1) + del reshape_0 + + # pd_op.transpose: (-1x17x-1x4xf32) <- (-1x-1x4x17xf32) + transpose_0 = paddle._C_ops.transpose(softmax_0, [0, 3, 1, 2]) + + # pd_op.conv2d: (-1x1x-1x4xf32) <- (-1x17x-1x4xf32, 1x17x1x1xf32) + conv2d_0 = paddle._C_ops.conv2d( + transpose_0, parameter_0, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_0 + + # pd_op.squeeze: (-1x-1x4xf32) <- (-1x1x-1x4xf32, 1xi64) + squeeze_0 = paddle._C_ops.squeeze(conv2d_0, full_int_array_1) + del full_int_array_1 + + # pd_op.full: (1xi32) <- () + full_3 = paddle._C_ops.full( + [1], float("2"), paddle.int32, paddle.core.CPUPlace() + ) + + # pd_op.split_with_num: ([-1x-1x2xf32, -1x-1x2xf32]) <- (-1x-1x4xf32, 1xi32) + split_with_num_0 = paddle._C_ops.split_with_num(squeeze_0, 2, full_3) + del squeeze_0 + + # builtin.split: (-1x-1x2xf32, -1x-1x2xf32) <- ([-1x-1x2xf32, -1x-1x2xf32]) + ( + split_0, + split_1, + ) = split_with_num_0 + del split_with_num_0 + + # pd_op.full: (1xf32) <- () + full_4 = paddle._C_ops.full( + [1], float("-1"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.scale: (-1x-1x2xf32) <- (-1x-1x2xf32, 1xf32) + scale_0 = paddle._C_ops.scale(split_0, full_4, float("0"), True) + del split_0 + + # pd_op.add: (-1x-1x2xf32) <- (-1x-1x2xf32, -1x2xf32) + add_0 = paddle._C_ops.add(scale_0, divide_0) + + # pd_op.add: (-1x-1x2xf32) <- (-1x-1x2xf32, -1x2xf32) + add_1 = paddle._C_ops.add(split_1, divide_0) + + # pd_op.full: (1xi32) <- () + full_5 = paddle._C_ops.full( + [1], float("-1"), paddle.int32, paddle.core.CPUPlace() + ) + + # builtin.combine: ([-1x-1x2xf32, -1x-1x2xf32]) <- (-1x-1x2xf32, -1x-1x2xf32) + combine_1 = [add_0, add_1] + + # pd_op.concat: (-1x-1x4xf32) <- ([-1x-1x2xf32, -1x-1x2xf32], 1xi32) + concat_0 = paddle._C_ops.concat(combine_1, full_5) + del combine_1 + + # pd_op.share_data_: (2x-1x1xf32) <- (2x-1x1xf32) + share_data__0 = data_0.detach() + del data_0 + + # pd_op.share_data_: (-1x-1x4xf32) <- (-1x-1x4xf32) + share_data__1 = concat_0.detach() + + # pd_op.multiply: (-1x-1x4xf32) <- (-1x-1x4xf32, -1x1xf32) + multiply_0 = paddle._C_ops.multiply(share_data__1, data_3) + del ( + add_0, + add_1, + assign_0, + concat_0, + conv2d_0, + data_3, + divide_0, + full_3, + full_4, + full_5, + scale_0, + share_data__1, + softmax_0, + split_1, + transpose_0, + ) + + return share_data__0, multiply_0 diff --git a/paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_1/weight_meta.py b/paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_1/weight_meta.py new file mode 100644 index 000000000..28198680e --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_1/weight_meta.py @@ -0,0 +1,7 @@ +class Program_weight_tensor_parameter_0: + name = "parameter_0" + shape = [1, 17, 1, 1] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None diff --git a/paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_10/graph_hash.txt b/paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_10/graph_hash.txt new file mode 100644 index 000000000..740832943 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_10/graph_hash.txt @@ -0,0 +1 @@ +abe7a59d414d67e8b36ea3baf93dd665033fd681f03833e93a277b4a1096aad3 \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_10/graph_net.json b/paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_10/graph_net.json new file mode 100644 index 000000000..399d8354e --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_10/graph_net.json @@ -0,0 +1,6 @@ +{ + "framework": "paddle", + "model_name": "PP-YOLOE-S_human", + "num_devices_required": 1, + "num_nodes_required": 1 +} \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_10/input_meta.py b/paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_10/input_meta.py new file mode 100644 index 000000000..9b27ac5ab --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_10/input_meta.py @@ -0,0 +1,108 @@ +class Program_weight_tensor_data_0: + name = "data_0" + shape = [2, 6, 12096] + dtype = "float32" + max_val = float("1.0") + mean = float("0.000895613") + std = float("0.0299134") + data = None + + +class Program_weight_tensor_data_1: + name = "data_1" + shape = [2, 1] + dtype = "int32" + data = [0, 1] + + +class Program_weight_tensor_data_2: + name = "data_2" + shape = [2, 6, 1] + dtype = "int32" + data = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0] + + +class Program_weight_tensor_data_3: + name = "data_3" + shape = [2, 12096] + dtype = "float32" + max_val = float("1.0") + mean = float("0.00537368") + std = float("0.0731081") + data = None + + +class Program_weight_tensor_data_4: + name = "data_4" + shape = [2, 6, 4] + dtype = "float32" + data = [ + 359.226, + 0.0, + 740.129, + 768.0, + 148.645, + 0.0, + 219.871, + 102.165, + 337.548, + 0.0, + 414.968, + 123.303, + 0.0, + 0.0, + 52.6452, + 112.734, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 296.727, + 100.905, + 506.182, + 605.431, + 645.818, + 117.723, + 750.545, + 369.985, + 546.909, + 128.934, + 610.909, + 280.292, + 436.364, + 123.328, + 523.636, + 330.745, + 209.455, + 140.146, + 354.909, + 493.314, + 157.091, + 112.117, + 238.545, + 308.321, + ] + + +class Program_weight_tensor_data_5: + name = "data_5" + shape = [2, 6, 12096] + dtype = "float32" + max_val = float("0.42711") + mean = float("7.96203e-05") + std = float("0.00288483") + data = None + + +class Program_weight_tensor_data_6: + name = "data_6" + shape = [2, 6, 12096] + dtype = "float32" + max_val = float("0.933696") + mean = float("0.0107169") + std = float("0.0520399") + data = None diff --git a/paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_10/model.py b/paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_10/model.py new file mode 100644 index 000000000..a58ceab85 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_10/model.py @@ -0,0 +1,175 @@ +import paddle + + +class GraphModule(paddle.nn.Layer): + def __init__(self): + super().__init__() + + def forward(self, data_0, data_1, data_2, data_3, data_4, data_5, data_6): + # pd_op.full: (1xi64) <- () + full_0 = paddle._C_ops.full( + [1], float("-2"), paddle.int64, paddle.core.CPUPlace() + ) + + # pd_op.argmax: (2x12096xi64) <- (2x6x12096xf32, 1xi64) + argmax_0 = paddle._C_ops.argmax(data_0, full_0, False, False, paddle.int64) + del full_0 + + # pd_op.full: (1xf32) <- () + full_1 = paddle._C_ops.full( + [1], float("6"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.scale: (2x1xi32) <- (2x1xi32, 1xf32) + scale_0 = paddle._C_ops.scale(data_1, full_1, float("0"), True) + del data_1, full_1 + + # pd_op.cast: (2x1xi64) <- (2x1xi32) + cast_0 = paddle._C_ops.cast(scale_0, paddle.int64) + del scale_0 + + # pd_op.add: (2x12096xi64) <- (2x12096xi64, 2x1xi64) + add_0 = paddle._C_ops.add(argmax_0, cast_0) + del argmax_0, cast_0 + + # pd_op.flatten: (12xi32) <- (2x6x1xi32) + flatten_0 = paddle._C_ops.flatten(data_2, 0, 2) + del data_2 + + # pd_op.flatten: (24192xi64) <- (2x12096xi64) + flatten_1 = paddle._C_ops.flatten(add_0, 0, 1) + del add_0 + + # pd_op.full: (1xi32) <- () + full_2 = paddle._C_ops.full( + [1], float("0"), paddle.int32, paddle.core.CPUPlace() + ) + + # pd_op.gather: (24192xi32) <- (12xi32, 24192xi64, 1xi32) + gather_0 = paddle._C_ops.gather(flatten_0, flatten_1, full_2) + del flatten_0 + + # pd_op.full_int_array: (2xi64) <- () + full_int_array_0 = [2, 12096] + + # pd_op.reshape: (2x12096xi32) <- (24192xi32, 2xi64) + reshape_1 = paddle._C_ops.reshape(gather_0, full_int_array_0) + del full_int_array_0, gather_0 + + # pd_op.full: (xf32) <- () + full_3 = paddle._C_ops.full( + [], float("0"), paddle.float32, paddle.framework._current_expected_place() + ) + + # pd_op.greater_than: (2x12096xb) <- (2x12096xf32, xf32) + greater_than_0 = paddle._C_ops.greater_than(data_3, full_3) + del data_3, full_3 + + # pd_op.full: (1xf32) <- () + full_4 = paddle._C_ops.full( + [1], float("1"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.full_like: (2x12096xi32) <- (2x12096xi32, 1xf32) + full_like_0 = paddle._C_ops.full_like( + reshape_1, full_4, paddle.int32, paddle.framework._current_expected_place() + ) + + # pd_op.where: (2x12096xi32) <- (2x12096xb, 2x12096xi32, 2x12096xi32) + where_0 = paddle._C_ops.where(greater_than_0, reshape_1, full_like_0) + del full_like_0, greater_than_0, reshape_1 + + # pd_op.full_int_array: (2xi64) <- () + full_int_array_1 = [-1, 4] + + # pd_op.reshape: (12x4xf32) <- (2x6x4xf32, 2xi64) + reshape_2 = paddle._C_ops.reshape(data_4, full_int_array_1) + del data_4, full_int_array_1 + + # pd_op.gather: (24192x4xf32) <- (12x4xf32, 24192xi64, 1xi32) + gather_1 = paddle._C_ops.gather(reshape_2, flatten_1, full_2) + del flatten_1, full_2, reshape_2 + + # pd_op.full_int_array: (3xi64) <- () + full_int_array_2 = [2, 12096, 4] + + # pd_op.reshape: (2x12096x4xf32) <- (24192x4xf32, 3xi64) + reshape_0 = paddle._C_ops.reshape(gather_1, full_int_array_2) + del full_int_array_2, gather_1 + + # pd_op.full: (1xi32) <- () + full_5 = paddle._C_ops.full( + [1], float("2"), paddle.int32, paddle.core.CPUPlace() + ) + + # pd_op.one_hot: (2x12096x2xf32) <- (2x12096xi32, 1xi32) + one_hot_0 = paddle._C_ops.one_hot( + where_0 % paddle.cast(full_5, where_0.dtype), full_5 + ) + del full_5 + + # pd_op.full: (1xi64) <- () + full_6 = paddle._C_ops.full( + [1], float("0"), paddle.int64, paddle.framework._current_expected_place() + ) + + # pd_op.assign_value_: (1xi64) <- (1xi64) + assign_value__0 = paddle._C_ops.assign_value_( + full_6, + [1], + paddle.int64, + [float("0")], + paddle.framework._current_expected_place(), + ) + del full_6 + + # pd_op.index_select: (2x12096x1xf32) <- (2x12096x2xf32, 1xi64) + index_select_0 = paddle._C_ops.index_select(one_hot_0, assign_value__0, -1) + del assign_value__0, one_hot_0 + + # pd_op.multiply: (2x6x12096xf32) <- (2x6x12096xf32, 2x6x12096xf32) + multiply_1 = paddle._C_ops.multiply(data_5, data_0) + del data_5 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_3 = [-1] + + # pd_op.max: (2x6x1xf32) <- (2x6x12096xf32, 1xi64) + max_0 = paddle._C_ops.max(multiply_1, full_int_array_3, True) + + # pd_op.multiply: (2x6x12096xf32) <- (2x6x12096xf32, 2x6x12096xf32) + multiply_2 = paddle._C_ops.multiply(data_6, data_0) + del data_0, data_6 + + # pd_op.max: (2x6x1xf32) <- (2x6x12096xf32, 1xi64) + max_1 = paddle._C_ops.max(multiply_2, full_int_array_3, True) + del multiply_2 + + # pd_op.scale: (2x6x1xf32) <- (2x6x1xf32, 1xf32) + scale_1 = paddle._C_ops.scale(max_0, full_4, float("1e-09"), True) + del full_4, max_0 + + # pd_op.divide: (2x6x12096xf32) <- (2x6x12096xf32, 2x6x1xf32) + divide_0 = paddle._C_ops.divide(multiply_1, scale_1) + del multiply_1, scale_1 + + # pd_op.multiply: (2x6x12096xf32) <- (2x6x12096xf32, 2x6x1xf32) + multiply_3 = paddle._C_ops.multiply(divide_0, max_1) + del divide_0, max_1 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_4 = [-2] + + # pd_op.max: (2x12096xf32) <- (2x6x12096xf32, 1xi64) + max_2 = paddle._C_ops.max(multiply_3, full_int_array_4, False) + del full_int_array_4, multiply_3 + + # pd_op.unsqueeze: (2x12096x1xf32) <- (2x12096xf32, 1xi64) + unsqueeze_0 = paddle._C_ops.unsqueeze(max_2, full_int_array_3) + del full_int_array_3, max_2 + + # pd_op.multiply: (2x12096x1xf32) <- (2x12096x1xf32, 2x12096x1xf32) + multiply_0 = paddle._C_ops.multiply(index_select_0, unsqueeze_0) + del index_select_0, unsqueeze_0, where_0 + + return reshape_0, multiply_0 diff --git a/paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_10/weight_meta.py b/paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_10/weight_meta.py new file mode 100644 index 000000000..8b1378917 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_10/weight_meta.py @@ -0,0 +1 @@ + diff --git a/paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_11/graph_hash.txt b/paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_11/graph_hash.txt new file mode 100644 index 000000000..ad26781d8 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_11/graph_hash.txt @@ -0,0 +1 @@ +99bd1d3460daced0b08a14965df1673734764218c3df6e74b7dd522b3b824b95 \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_11/graph_net.json b/paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_11/graph_net.json new file mode 100644 index 000000000..399d8354e --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_11/graph_net.json @@ -0,0 +1,6 @@ +{ + "framework": "paddle", + "model_name": "PP-YOLOE-S_human", + "num_devices_required": 1, + "num_nodes_required": 1 +} \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_11/input_meta.py b/paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_11/input_meta.py new file mode 100644 index 000000000..d01cfc936 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_11/input_meta.py @@ -0,0 +1,111 @@ +class Program_weight_tensor_data_0: + name = "data_0" + shape = [] + dtype = "int64" + data = [17] + + +class Program_weight_tensor_data_1: + name = "data_1" + shape = [] + dtype = "int64" + data = [6804] + + +class Program_weight_tensor_data_2: + name = "data_2" + shape = [2, 17, 6804] + dtype = "float32" + max_val = float("1.0") + mean = float("0.000989902") + std = float("0.0314471") + data = None + + +class Program_weight_tensor_data_3: + name = "data_3" + shape = [2, 1] + dtype = "int32" + data = [0, 1] + + +class Program_weight_tensor_data_4: + name = "data_4" + shape = [2, 17, 1] + dtype = "int32" + data = [ + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + ] + + +class Program_weight_tensor_data_5: + name = "data_5" + shape = [2, 6804] + dtype = "float32" + max_val = float("1.0") + mean = float("0.0168283") + std = float("0.128628") + data = None + + +class Program_weight_tensor_data_6: + name = "data_6" + shape = [2, 17, 4] + dtype = "float32" + max_val = float("576.0") + mean = float("141.197") + std = float("163.352") + data = None + + +class Program_weight_tensor_data_7: + name = "data_7" + shape = [2, 17, 6804] + dtype = "float32" + max_val = float("0.438622") + mean = float("6.76947e-05") + std = float("0.00266896") + data = None + + +class Program_weight_tensor_data_8: + name = "data_8" + shape = [2, 17, 6804] + dtype = "float32" + max_val = float("0.949961") + mean = float("0.00868228") + std = float("0.0526563") + data = None diff --git a/paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_11/model.py b/paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_11/model.py new file mode 100644 index 000000000..2c208d4ad --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_11/model.py @@ -0,0 +1,195 @@ +import paddle + + +class GraphModule(paddle.nn.Layer): + def __init__(self): + super().__init__() + + def forward( + self, data_0, data_1, data_2, data_3, data_4, data_5, data_6, data_7, data_8 + ): + # pd_op.full: (1xi64) <- () + full_0 = paddle._C_ops.full( + [1], float("-2"), paddle.int64, paddle.core.CPUPlace() + ) + + # pd_op.argmax: (2x-1xi64) <- (2x-1x-1xf32, 1xi64) + argmax_0 = paddle._C_ops.argmax(data_2, full_0, False, False, paddle.int64) + del full_0 + + # pd_op.cast: (xi32) <- (xi64) + cast_0 = paddle._C_ops.cast(data_0, paddle.int32) + del data_0 + + # pd_op.multiply: (2x1xi32) <- (2x1xi32, xi32) + multiply_1 = paddle._C_ops.multiply(data_3, cast_0) + del cast_0, data_3 + + # pd_op.cast: (2x1xi64) <- (2x1xi32) + cast_1 = paddle._C_ops.cast(multiply_1, paddle.int64) + del multiply_1 + + # pd_op.add: (2x-1xi64) <- (2x-1xi64, 2x1xi64) + add_0 = paddle._C_ops.add(argmax_0, cast_1) + del argmax_0, cast_1 + + # pd_op.flatten: (-1xi32) <- (2x-1x1xi32) + flatten_0 = paddle._C_ops.flatten(data_4, 0, 2) + del data_4 + + # pd_op.flatten: (-1xi64) <- (2x-1xi64) + flatten_1 = paddle._C_ops.flatten(add_0, 0, 1) + del add_0 + + # pd_op.full: (1xi32) <- () + full_1 = paddle._C_ops.full( + [1], float("0"), paddle.int32, paddle.core.CPUPlace() + ) + + # pd_op.gather: (-1xi32) <- (-1xi32, -1xi64, 1xi32) + gather_0 = paddle._C_ops.gather(flatten_0, flatten_1, full_1) + del flatten_0 + + # pd_op.full: (xi64) <- () + full_2 = paddle._C_ops.full( + [], float("2"), paddle.int64, paddle.core.CPUPlace() + ) + + # builtin.combine: ([xi64, xi64]) <- (xi64, xi64) + combine_0 = [full_2, data_1] + + # pd_op.stack: (2xi64) <- ([xi64, xi64]) + stack_0 = paddle._C_ops.stack(combine_0, 0) + del combine_0 + + # pd_op.reshape: (2x-1xi32) <- (-1xi32, 2xi64) + reshape_1 = paddle._C_ops.reshape(gather_0, stack_0) + del gather_0, stack_0 + + # pd_op.full: (xf32) <- () + full_3 = paddle._C_ops.full( + [], float("0"), paddle.float32, paddle.framework._current_expected_place() + ) + + # pd_op.greater_than: (2x-1xb) <- (2x-1xf32, xf32) + greater_than_0 = paddle._C_ops.greater_than(data_5, full_3) + del data_5, full_3 + + # pd_op.full: (1xf32) <- () + full_4 = paddle._C_ops.full( + [1], float("1"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.full_like: (2x-1xi32) <- (2x-1xi32, 1xf32) + full_like_0 = paddle._C_ops.full_like( + reshape_1, full_4, paddle.int32, paddle.framework._current_expected_place() + ) + + # pd_op.where: (2x-1xi32) <- (2x-1xb, 2x-1xi32, 2x-1xi32) + where_0 = paddle._C_ops.where(greater_than_0, reshape_1, full_like_0) + del full_like_0, greater_than_0, reshape_1 + + # pd_op.full_int_array: (2xi64) <- () + full_int_array_0 = [-1, 4] + + # pd_op.reshape: (-1x4xf32) <- (2x-1x4xf32, 2xi64) + reshape_2 = paddle._C_ops.reshape(data_6, full_int_array_0) + del data_6, full_int_array_0 + + # pd_op.gather: (-1x4xf32) <- (-1x4xf32, -1xi64, 1xi32) + gather_1 = paddle._C_ops.gather(reshape_2, flatten_1, full_1) + del flatten_1, full_1, reshape_2 + + # pd_op.full: (xi64) <- () + full_5 = paddle._C_ops.full( + [], float("4"), paddle.int64, paddle.core.CPUPlace() + ) + + # builtin.combine: ([xi64, xi64, xi64]) <- (xi64, xi64, xi64) + combine_1 = [full_2, data_1, full_5] + del data_1, full_2, full_5 + + # pd_op.stack: (3xi64) <- ([xi64, xi64, xi64]) + stack_1 = paddle._C_ops.stack(combine_1, 0) + del combine_1 + + # pd_op.reshape: (2x-1x4xf32) <- (-1x4xf32, 3xi64) + reshape_0 = paddle._C_ops.reshape(gather_1, stack_1) + del gather_1, stack_1 + + # pd_op.full: (1xi32) <- () + full_6 = paddle._C_ops.full( + [1], float("2"), paddle.int32, paddle.core.CPUPlace() + ) + + # pd_op.one_hot: (2x-1x2xf32) <- (2x-1xi32, 1xi32) + one_hot_0 = paddle._C_ops.one_hot( + where_0 % paddle.cast(full_6, where_0.dtype), full_6 + ) + del full_6 + + # pd_op.full: (1xi64) <- () + full_7 = paddle._C_ops.full( + [1], float("0"), paddle.int64, paddle.framework._current_expected_place() + ) + + # pd_op.assign_value_: (1xi64) <- (1xi64) + assign_value__0 = paddle._C_ops.assign_value_( + full_7, + [1], + paddle.int64, + [float("0")], + paddle.framework._current_expected_place(), + ) + del full_7 + + # pd_op.index_select: (2x-1x1xf32) <- (2x-1x2xf32, 1xi64) + index_select_0 = paddle._C_ops.index_select(one_hot_0, assign_value__0, -1) + del assign_value__0, one_hot_0 + + # pd_op.multiply: (2x-1x-1xf32) <- (2x-1x-1xf32, 2x-1x-1xf32) + multiply_2 = paddle._C_ops.multiply(data_7, data_2) + del data_7 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_1 = [-1] + + # pd_op.max: (2x-1x1xf32) <- (2x-1x-1xf32, 1xi64) + max_0 = paddle._C_ops.max(multiply_2, full_int_array_1, True) + + # pd_op.multiply: (2x-1x-1xf32) <- (2x-1x-1xf32, 2x-1x-1xf32) + multiply_3 = paddle._C_ops.multiply(data_8, data_2) + del data_2, data_8 + + # pd_op.max: (2x-1x1xf32) <- (2x-1x-1xf32, 1xi64) + max_1 = paddle._C_ops.max(multiply_3, full_int_array_1, True) + del multiply_3 + + # pd_op.scale: (2x-1x1xf32) <- (2x-1x1xf32, 1xf32) + scale_0 = paddle._C_ops.scale(max_0, full_4, float("1e-09"), True) + del full_4, max_0 + + # pd_op.divide: (2x-1x-1xf32) <- (2x-1x-1xf32, 2x-1x1xf32) + divide_0 = paddle._C_ops.divide(multiply_2, scale_0) + del multiply_2, scale_0 + + # pd_op.multiply: (2x-1x-1xf32) <- (2x-1x-1xf32, 2x-1x1xf32) + multiply_4 = paddle._C_ops.multiply(divide_0, max_1) + del divide_0, max_1 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_2 = [-2] + + # pd_op.max: (2x-1xf32) <- (2x-1x-1xf32, 1xi64) + max_2 = paddle._C_ops.max(multiply_4, full_int_array_2, False) + del full_int_array_2, multiply_4 + + # pd_op.unsqueeze: (2x-1x1xf32) <- (2x-1xf32, 1xi64) + unsqueeze_0 = paddle._C_ops.unsqueeze(max_2, full_int_array_1) + del full_int_array_1, max_2 + + # pd_op.multiply: (2x-1x1xf32) <- (2x-1x1xf32, 2x-1x1xf32) + multiply_0 = paddle._C_ops.multiply(index_select_0, unsqueeze_0) + del index_select_0, unsqueeze_0, where_0 + + return reshape_0, multiply_0 diff --git a/paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_11/weight_meta.py b/paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_11/weight_meta.py new file mode 100644 index 000000000..8b1378917 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_11/weight_meta.py @@ -0,0 +1 @@ + diff --git a/paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_12/graph_hash.txt b/paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_12/graph_hash.txt new file mode 100644 index 000000000..8644d9e4f --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_12/graph_hash.txt @@ -0,0 +1 @@ +ab00f78367b398fd627ea53927a42dac99230f6540cf3006c528aebbab4be04a \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_12/graph_net.json b/paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_12/graph_net.json new file mode 100644 index 000000000..399d8354e --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_12/graph_net.json @@ -0,0 +1,6 @@ +{ + "framework": "paddle", + "model_name": "PP-YOLOE-S_human", + "num_devices_required": 1, + "num_nodes_required": 1 +} \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_12/input_meta.py b/paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_12/input_meta.py new file mode 100644 index 000000000..b82c62293 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_12/input_meta.py @@ -0,0 +1,28 @@ +class Program_weight_tensor_data_0: + name = "data_0" + shape = [2, 8400, 1] + dtype = "float32" + min_val = float("0.000732164") + max_val = float("0.935386") + mean = float("0.0393097") + std = float("0.0887762") + data = None + + +class Program_weight_tensor_data_1: + name = "data_1" + shape = [2, 8400] + dtype = "int32" + min_val = 0 + max_val = 1 + data = None + + +class Program_weight_tensor_data_2: + name = "data_2" + shape = [2, 8400, 1] + dtype = "float32" + max_val = float("0.911359") + mean = float("0.00967398") + std = float("0.0703979") + data = None diff --git a/paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_12/model.py b/paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_12/model.py new file mode 100644 index 000000000..747380d08 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_12/model.py @@ -0,0 +1,110 @@ +import paddle + + +class GraphModule(paddle.nn.Layer): + def __init__(self): + super().__init__() + + def forward(self, data_0, data_1, data_2): + # pd_op.full: (1xi32) <- () + full_0 = paddle._C_ops.full( + [1], float("2"), paddle.int32, paddle.core.CPUPlace() + ) + + # pd_op.one_hot: (2x-1x2xf32) <- (2x-1xi32, 1xi32) + one_hot_0 = paddle._C_ops.one_hot( + data_1 % paddle.cast(full_0, data_1.dtype), full_0 + ) + del data_1, full_0 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_0 = [0] + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_1 = [-1] + + # pd_op.slice: (2x-1x1xf32) <- (2x-1x2xf32, 1xi64, 1xi64) + slice_0 = paddle._C_ops.slice( + one_hot_0, [2], full_int_array_0, full_int_array_1, [1], [] + ) + del full_int_array_0, full_int_array_1, one_hot_0 + + # pd_op.pow: (2x-1x1xf32) <- (2x-1x1xf32) + pow_0 = paddle._C_ops.pow(data_0, float("2")) + + # pd_op.full: (1xf32) <- () + full_1 = paddle._C_ops.full( + [1], float("0.75"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.scale: (2x-1x1xf32) <- (2x-1x1xf32, 1xf32) + scale_0 = paddle._C_ops.scale(pow_0, full_1, float("0"), True) + del pow_0 + + # pd_op.full: (1xf32) <- () + full_2 = paddle._C_ops.full( + [1], float("-1"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.scale: (2x-1x1xf32) <- (2x-1x1xf32, 1xf32) + scale_1 = paddle._C_ops.scale(slice_0, full_2, float("1"), True) + del full_2 + + # pd_op.multiply: (2x-1x1xf32) <- (2x-1x1xf32, 2x-1x1xf32) + multiply_0 = paddle._C_ops.multiply(scale_0, scale_1) + + # pd_op.multiply: (2x-1x1xf32) <- (2x-1x1xf32, 2x-1x1xf32) + multiply_1 = paddle._C_ops.multiply(data_2, slice_0) + del slice_0 + + # pd_op.add: (2x-1x1xf32) <- (2x-1x1xf32, 2x-1x1xf32) + add_0 = paddle._C_ops.add(multiply_0, multiply_1) + + # pd_op.bce_loss: (2x-1x1xf32) <- (2x-1x1xf32, 2x-1x1xf32) + bce_loss_0 = paddle._C_ops.bce_loss(data_0, data_2) + del data_0 + + # pd_op.multiply: (2x-1x1xf32) <- (2x-1x1xf32, 2x-1x1xf32) + multiply_2 = paddle._C_ops.multiply(bce_loss_0, add_0) + + # pd_op.full_int_array: (0xi64) <- () + full_int_array_2 = [] + + # pd_op.sum: (xf32) <- (2x-1x1xf32, 0xi64) + sum_0 = paddle._C_ops.sum(multiply_2, full_int_array_2, None, False) + + # pd_op.sum: (xf32) <- (2x-1x1xf32, 0xi64) + sum_1 = paddle._C_ops.sum(data_2, full_int_array_2, None, False) + del data_2 + + # pd_op.full: (1xf32) <- () + full_3 = paddle._C_ops.full( + [1], float("1"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.full: (1xf32) <- () + full_4 = paddle._C_ops.full( + [1], float("3.40282e+38"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.clip: (xf32) <- (xf32, 1xf32, 1xf32) + clip_0 = paddle._C_ops.clip(sum_1, full_3, full_4) + del full_3, full_4, sum_1 + + # pd_op.divide: (xf32) <- (xf32, xf32) + divide_0 = paddle._C_ops.divide(sum_0, clip_0) + del ( + add_0, + bce_loss_0, + clip_0, + full_1, + full_int_array_2, + multiply_0, + multiply_1, + multiply_2, + scale_0, + scale_1, + sum_0, + ) + + return divide_0 diff --git a/paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_12/weight_meta.py b/paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_12/weight_meta.py new file mode 100644 index 000000000..8b1378917 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_12/weight_meta.py @@ -0,0 +1 @@ + diff --git a/paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_13/graph_hash.txt b/paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_13/graph_hash.txt new file mode 100644 index 000000000..4e276d96d --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_13/graph_hash.txt @@ -0,0 +1 @@ +dd9b6f7fec930c53532803207e780b6bed2e5fa78a9c0c93e36150ac8b0d9463 \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_13/graph_net.json b/paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_13/graph_net.json new file mode 100644 index 000000000..399d8354e --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_13/graph_net.json @@ -0,0 +1,6 @@ +{ + "framework": "paddle", + "model_name": "PP-YOLOE-S_human", + "num_devices_required": 1, + "num_nodes_required": 1 +} \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_13/input_meta.py b/paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_13/input_meta.py new file mode 100644 index 000000000..73ef358c5 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_13/input_meta.py @@ -0,0 +1,9 @@ +class Program_weight_tensor_data_0: + name = "data_0" + shape = [2, 3, 640, 640] + dtype = "float32" + min_val = float("-2.1179") + max_val = float("2.64") + mean = float("0.521346") + std = float("0.808427") + data = None diff --git a/paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_13/model.py b/paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_13/model.py new file mode 100644 index 000000000..7a218f1b7 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_13/model.py @@ -0,0 +1,4048 @@ +import paddle + + +class GraphModule(paddle.nn.Layer): + def __init__(self): + super().__init__() + + def forward( + self, + parameter_0, + parameter_1, + parameter_2, + parameter_3, + parameter_4, + parameter_5, + parameter_6, + parameter_7, + parameter_8, + parameter_9, + parameter_10, + parameter_11, + parameter_12, + parameter_13, + parameter_14, + parameter_15, + parameter_16, + parameter_17, + parameter_18, + parameter_19, + parameter_20, + parameter_21, + parameter_22, + parameter_23, + parameter_24, + parameter_25, + parameter_26, + parameter_27, + parameter_28, + parameter_29, + parameter_30, + parameter_31, + parameter_32, + parameter_33, + parameter_34, + parameter_35, + parameter_36, + parameter_37, + parameter_38, + parameter_39, + parameter_40, + parameter_41, + parameter_42, + parameter_43, + parameter_44, + parameter_45, + parameter_46, + parameter_47, + parameter_48, + parameter_49, + parameter_50, + parameter_51, + parameter_52, + parameter_53, + parameter_54, + parameter_55, + parameter_56, + parameter_57, + parameter_58, + parameter_59, + parameter_60, + parameter_61, + parameter_62, + parameter_63, + parameter_64, + parameter_65, + parameter_66, + parameter_67, + parameter_68, + parameter_69, + parameter_70, + parameter_71, + parameter_72, + parameter_73, + parameter_74, + parameter_75, + parameter_76, + parameter_77, + parameter_78, + parameter_79, + parameter_80, + parameter_81, + parameter_82, + parameter_83, + parameter_84, + parameter_85, + parameter_86, + parameter_87, + parameter_88, + parameter_89, + parameter_90, + parameter_91, + parameter_92, + parameter_93, + parameter_94, + parameter_95, + parameter_96, + parameter_97, + parameter_98, + parameter_99, + parameter_100, + parameter_101, + parameter_102, + parameter_103, + parameter_104, + parameter_105, + parameter_106, + parameter_107, + parameter_108, + parameter_109, + parameter_110, + parameter_111, + parameter_112, + parameter_113, + parameter_114, + parameter_115, + parameter_116, + parameter_117, + parameter_118, + parameter_119, + parameter_120, + parameter_121, + parameter_122, + parameter_123, + parameter_124, + parameter_125, + parameter_126, + parameter_127, + parameter_128, + parameter_129, + parameter_130, + parameter_131, + parameter_132, + parameter_133, + parameter_134, + parameter_135, + parameter_136, + parameter_137, + parameter_138, + parameter_139, + parameter_140, + parameter_141, + parameter_142, + parameter_143, + parameter_144, + parameter_145, + parameter_146, + parameter_147, + parameter_148, + parameter_149, + parameter_150, + parameter_151, + parameter_152, + parameter_153, + parameter_154, + parameter_155, + parameter_156, + parameter_157, + parameter_158, + parameter_159, + parameter_160, + parameter_161, + parameter_162, + parameter_163, + parameter_164, + parameter_165, + parameter_166, + parameter_167, + parameter_168, + parameter_169, + parameter_170, + parameter_171, + parameter_172, + parameter_173, + parameter_174, + parameter_175, + parameter_176, + parameter_177, + parameter_178, + parameter_179, + parameter_180, + parameter_181, + parameter_182, + parameter_183, + parameter_184, + parameter_185, + parameter_186, + parameter_187, + parameter_188, + parameter_189, + parameter_190, + parameter_191, + parameter_192, + parameter_193, + parameter_194, + parameter_195, + parameter_196, + parameter_197, + parameter_198, + parameter_199, + parameter_200, + parameter_201, + parameter_202, + parameter_203, + parameter_204, + parameter_205, + parameter_206, + parameter_207, + parameter_208, + parameter_209, + parameter_210, + parameter_211, + parameter_212, + parameter_213, + parameter_214, + parameter_215, + parameter_216, + parameter_217, + parameter_218, + parameter_219, + parameter_220, + parameter_221, + parameter_222, + parameter_223, + parameter_224, + parameter_225, + parameter_226, + parameter_227, + parameter_228, + parameter_229, + parameter_230, + parameter_231, + parameter_232, + parameter_233, + parameter_234, + parameter_235, + parameter_236, + parameter_237, + parameter_238, + parameter_239, + parameter_240, + parameter_241, + parameter_242, + parameter_243, + parameter_244, + parameter_245, + parameter_246, + parameter_247, + parameter_248, + parameter_249, + parameter_250, + parameter_251, + parameter_252, + parameter_253, + parameter_254, + parameter_255, + parameter_256, + parameter_257, + parameter_258, + parameter_259, + parameter_260, + parameter_261, + parameter_262, + parameter_263, + parameter_264, + parameter_265, + parameter_266, + parameter_267, + parameter_268, + parameter_269, + parameter_270, + parameter_271, + parameter_272, + parameter_273, + parameter_274, + parameter_275, + parameter_276, + parameter_277, + parameter_278, + parameter_279, + parameter_280, + parameter_281, + parameter_282, + parameter_283, + parameter_284, + parameter_285, + parameter_286, + parameter_287, + parameter_288, + parameter_289, + parameter_290, + parameter_291, + parameter_292, + parameter_293, + parameter_294, + parameter_295, + parameter_296, + parameter_297, + parameter_298, + parameter_299, + parameter_300, + parameter_301, + parameter_302, + parameter_303, + parameter_304, + parameter_305, + parameter_306, + parameter_307, + parameter_308, + parameter_309, + parameter_310, + parameter_311, + parameter_312, + parameter_313, + parameter_314, + parameter_315, + parameter_316, + parameter_317, + parameter_318, + parameter_319, + parameter_320, + parameter_321, + parameter_322, + parameter_323, + parameter_324, + parameter_325, + parameter_326, + parameter_327, + parameter_328, + parameter_329, + parameter_330, + parameter_331, + parameter_332, + parameter_333, + parameter_334, + parameter_335, + parameter_336, + parameter_337, + parameter_338, + parameter_339, + parameter_340, + parameter_341, + parameter_342, + parameter_343, + parameter_344, + parameter_345, + parameter_346, + parameter_347, + parameter_348, + parameter_349, + parameter_350, + parameter_351, + parameter_352, + parameter_353, + parameter_354, + parameter_355, + parameter_356, + parameter_357, + parameter_358, + parameter_359, + parameter_360, + parameter_361, + parameter_362, + parameter_363, + parameter_364, + parameter_365, + parameter_366, + parameter_367, + data_0, + ): + # pd_op.conv2d: (2x16x-1x-1xf32) <- (2x3x-1x-1xf32, 16x3x3x3xf32) + conv2d_0 = paddle._C_ops.conv2d( + data_0, parameter_367, [2, 2], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del data_0, parameter_367 + + # pd_op.batch_norm_: (2x16x-1x-1xf32, 16xf32, 16xf32, 16xf32, 16xf32, -1xui8) <- (2x16x-1x-1xf32, 16xf32, 16xf32, 16xf32, 16xf32) + ( + batch_norm__0, + batch_norm__1, + batch_norm__2, + batch_norm__3, + batch_norm__4, + batch_norm__5, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_0, + parameter_366, + parameter_365, + parameter_364, + parameter_363, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_363, parameter_364, parameter_365, parameter_366 + + # pd_op.swish: (2x16x-1x-1xf32) <- (2x16x-1x-1xf32) + swish_1 = paddle._C_ops.swish(batch_norm__0) + + # pd_op.conv2d: (2x16x-1x-1xf32) <- (2x16x-1x-1xf32, 16x16x3x3xf32) + conv2d_1 = paddle._C_ops.conv2d( + swish_1, parameter_362, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_362 + + # pd_op.batch_norm_: (2x16x-1x-1xf32, 16xf32, 16xf32, 16xf32, 16xf32, -1xui8) <- (2x16x-1x-1xf32, 16xf32, 16xf32, 16xf32, 16xf32) + ( + batch_norm__6, + batch_norm__7, + batch_norm__8, + batch_norm__9, + batch_norm__10, + batch_norm__11, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_1, + parameter_361, + parameter_360, + parameter_359, + parameter_358, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_358, parameter_359, parameter_360, parameter_361 + + # pd_op.swish: (2x16x-1x-1xf32) <- (2x16x-1x-1xf32) + swish_2 = paddle._C_ops.swish(batch_norm__6) + + # pd_op.conv2d: (2x32x-1x-1xf32) <- (2x16x-1x-1xf32, 32x16x3x3xf32) + conv2d_2 = paddle._C_ops.conv2d( + swish_2, parameter_357, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_357 + + # pd_op.batch_norm_: (2x32x-1x-1xf32, 32xf32, 32xf32, 32xf32, 32xf32, -1xui8) <- (2x32x-1x-1xf32, 32xf32, 32xf32, 32xf32, 32xf32) + ( + batch_norm__12, + batch_norm__13, + batch_norm__14, + batch_norm__15, + batch_norm__16, + batch_norm__17, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_2, + parameter_356, + parameter_355, + parameter_354, + parameter_353, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_353, parameter_354, parameter_355, parameter_356 + + # pd_op.swish: (2x32x-1x-1xf32) <- (2x32x-1x-1xf32) + swish_3 = paddle._C_ops.swish(batch_norm__12) + + # pd_op.conv2d: (2x48x-1x-1xf32) <- (2x32x-1x-1xf32, 48x32x3x3xf32) + conv2d_3 = paddle._C_ops.conv2d( + swish_3, parameter_352, [2, 2], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_352 + + # pd_op.batch_norm_: (2x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32, -1xui8) <- (2x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32) + ( + batch_norm__18, + batch_norm__19, + batch_norm__20, + batch_norm__21, + batch_norm__22, + batch_norm__23, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_3, + parameter_351, + parameter_350, + parameter_349, + parameter_348, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_348, parameter_349, parameter_350, parameter_351 + + # pd_op.swish: (2x48x-1x-1xf32) <- (2x48x-1x-1xf32) + swish_4 = paddle._C_ops.swish(batch_norm__18) + + # pd_op.conv2d: (2x24x-1x-1xf32) <- (2x48x-1x-1xf32, 24x48x1x1xf32) + conv2d_4 = paddle._C_ops.conv2d( + swish_4, parameter_347, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_347 + + # pd_op.batch_norm_: (2x24x-1x-1xf32, 24xf32, 24xf32, 24xf32, 24xf32, -1xui8) <- (2x24x-1x-1xf32, 24xf32, 24xf32, 24xf32, 24xf32) + ( + batch_norm__24, + batch_norm__25, + batch_norm__26, + batch_norm__27, + batch_norm__28, + batch_norm__29, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_4, + parameter_346, + parameter_345, + parameter_344, + parameter_343, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_343, parameter_344, parameter_345, parameter_346 + + # pd_op.swish: (2x24x-1x-1xf32) <- (2x24x-1x-1xf32) + swish_5 = paddle._C_ops.swish(batch_norm__24) + + # pd_op.conv2d: (2x24x-1x-1xf32) <- (2x48x-1x-1xf32, 24x48x1x1xf32) + conv2d_5 = paddle._C_ops.conv2d( + swish_4, parameter_342, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_342 + + # pd_op.batch_norm_: (2x24x-1x-1xf32, 24xf32, 24xf32, 24xf32, 24xf32, -1xui8) <- (2x24x-1x-1xf32, 24xf32, 24xf32, 24xf32, 24xf32) + ( + batch_norm__30, + batch_norm__31, + batch_norm__32, + batch_norm__33, + batch_norm__34, + batch_norm__35, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_5, + parameter_341, + parameter_340, + parameter_339, + parameter_338, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_338, parameter_339, parameter_340, parameter_341 + + # pd_op.swish: (2x24x-1x-1xf32) <- (2x24x-1x-1xf32) + swish_6 = paddle._C_ops.swish(batch_norm__30) + + # pd_op.conv2d: (2x24x-1x-1xf32) <- (2x24x-1x-1xf32, 24x24x3x3xf32) + conv2d_6 = paddle._C_ops.conv2d( + swish_6, parameter_337, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_337 + + # pd_op.batch_norm_: (2x24x-1x-1xf32, 24xf32, 24xf32, 24xf32, 24xf32, -1xui8) <- (2x24x-1x-1xf32, 24xf32, 24xf32, 24xf32, 24xf32) + ( + batch_norm__36, + batch_norm__37, + batch_norm__38, + batch_norm__39, + batch_norm__40, + batch_norm__41, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_6, + parameter_336, + parameter_335, + parameter_334, + parameter_333, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_333, parameter_334, parameter_335, parameter_336 + + # pd_op.swish: (2x24x-1x-1xf32) <- (2x24x-1x-1xf32) + swish_7 = paddle._C_ops.swish(batch_norm__36) + + # pd_op.conv2d: (2x24x-1x-1xf32) <- (2x24x-1x-1xf32, 24x24x3x3xf32) + conv2d_7 = paddle._C_ops.conv2d( + swish_7, parameter_332, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_332 + + # pd_op.batch_norm_: (2x24x-1x-1xf32, 24xf32, 24xf32, 24xf32, 24xf32, -1xui8) <- (2x24x-1x-1xf32, 24xf32, 24xf32, 24xf32, 24xf32) + ( + batch_norm__42, + batch_norm__43, + batch_norm__44, + batch_norm__45, + batch_norm__46, + batch_norm__47, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_7, + parameter_331, + parameter_330, + parameter_329, + parameter_328, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_328, parameter_329, parameter_330, parameter_331 + + # pd_op.conv2d: (2x24x-1x-1xf32) <- (2x24x-1x-1xf32, 24x24x1x1xf32) + conv2d_8 = paddle._C_ops.conv2d( + swish_7, parameter_327, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_327 + + # pd_op.batch_norm_: (2x24x-1x-1xf32, 24xf32, 24xf32, 24xf32, 24xf32, -1xui8) <- (2x24x-1x-1xf32, 24xf32, 24xf32, 24xf32, 24xf32) + ( + batch_norm__48, + batch_norm__49, + batch_norm__50, + batch_norm__51, + batch_norm__52, + batch_norm__53, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_8, + parameter_326, + parameter_325, + parameter_324, + parameter_323, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_323, parameter_324, parameter_325, parameter_326 + + # pd_op.add: (2x24x-1x-1xf32) <- (2x24x-1x-1xf32, 2x24x-1x-1xf32) + add_0 = paddle._C_ops.add(batch_norm__42, batch_norm__48) + + # pd_op.swish: (2x24x-1x-1xf32) <- (2x24x-1x-1xf32) + swish_8 = paddle._C_ops.swish(add_0) + + # pd_op.add: (2x24x-1x-1xf32) <- (2x24x-1x-1xf32, 2x24x-1x-1xf32) + add_1 = paddle._C_ops.add(swish_6, swish_8) + + # pd_op.full: (1xi32) <- () + full_0 = paddle._C_ops.full( + [1], float("1"), paddle.int32, paddle.core.CPUPlace() + ) + + # pd_op.assign: (1xi32) <- (1xi32) + assign_0 = full_0 + + # pd_op.assign: (1xi32) <- (1xi32) + assign_1 = full_0 + + # pd_op.assign: (1xi32) <- (1xi32) + assign_2 = full_0 + + # pd_op.assign: (1xi32) <- (1xi32) + assign_3 = full_0 + + # pd_op.assign: (1xi32) <- (1xi32) + assign_4 = full_0 + + # pd_op.assign: (1xi32) <- (1xi32) + assign_5 = full_0 + + # pd_op.assign: (1xi32) <- (1xi32) + assign_6 = full_0 + + # pd_op.assign: (1xi32) <- (1xi32) + assign_7 = full_0 + + # pd_op.assign: (1xi32) <- (1xi32) + assign_8 = full_0 + + # pd_op.assign: (1xi32) <- (1xi32) + assign_9 = full_0 + + # pd_op.assign: (1xi32) <- (1xi32) + assign_10 = full_0 + + # pd_op.assign: (1xi32) <- (1xi32) + assign_11 = full_0 + + # pd_op.assign: (1xi32) <- (1xi32) + assign_12 = full_0 + + # builtin.combine: ([2x24x-1x-1xf32, 2x24x-1x-1xf32]) <- (2x24x-1x-1xf32, 2x24x-1x-1xf32) + combine_0 = [swish_5, add_1] + + # pd_op.concat: (2x48x-1x-1xf32) <- ([2x24x-1x-1xf32, 2x24x-1x-1xf32], 1xi32) + concat_0 = paddle._C_ops.concat(combine_0, full_0) + del combine_0 + + # pd_op.full_int_array: (2xi64) <- () + full_int_array_0 = [2, 3] + + # pd_op.assign: (2xi64) <- (2xi64) + assign_13 = full_int_array_0 + + # pd_op.assign: (2xi64) <- (2xi64) + assign_14 = full_int_array_0 + + # pd_op.assign: (2xi64) <- (2xi64) + assign_15 = full_int_array_0 + + # pd_op.mean: (2x48x1x1xf32) <- (2x48x-1x-1xf32, 2xi64) + mean_0 = paddle._C_ops.mean(concat_0, full_int_array_0, True) + + # pd_op.conv2d: (2x48x1x1xf32) <- (2x48x1x1xf32, 48x48x1x1xf32) + conv2d_9 = paddle._C_ops.conv2d( + mean_0, parameter_322, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_322 + + # pd_op.full_int_array: (4xi64) <- () + full_int_array_1 = [1, -1, 1, 1] + + # pd_op.reshape: (1x48x1x1xf32) <- (48xf32, 4xi64) + reshape_0 = paddle._C_ops.reshape(parameter_321, full_int_array_1) + del parameter_321 + + # pd_op.add: (2x48x1x1xf32) <- (2x48x1x1xf32, 1x48x1x1xf32) + add_2 = paddle._C_ops.add(conv2d_9, reshape_0) + + # pd_op.hardsigmoid: (2x48x1x1xf32) <- (2x48x1x1xf32) + hardsigmoid_0 = paddle._C_ops.hardsigmoid( + add_2, float("0.166667"), float("0.5") + ) + del add_2 + + # pd_op.multiply: (2x48x-1x-1xf32) <- (2x48x-1x-1xf32, 2x48x1x1xf32) + multiply_0 = paddle._C_ops.multiply(concat_0, hardsigmoid_0) + + # pd_op.conv2d: (2x64x-1x-1xf32) <- (2x48x-1x-1xf32, 64x48x1x1xf32) + conv2d_10 = paddle._C_ops.conv2d( + multiply_0, parameter_320, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_320 + + # pd_op.batch_norm_: (2x64x-1x-1xf32, 64xf32, 64xf32, 64xf32, 64xf32, -1xui8) <- (2x64x-1x-1xf32, 64xf32, 64xf32, 64xf32, 64xf32) + ( + batch_norm__54, + batch_norm__55, + batch_norm__56, + batch_norm__57, + batch_norm__58, + batch_norm__59, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_10, + parameter_319, + parameter_318, + parameter_317, + parameter_316, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_316, parameter_317, parameter_318, parameter_319 + + # pd_op.swish: (2x64x-1x-1xf32) <- (2x64x-1x-1xf32) + swish_9 = paddle._C_ops.swish(batch_norm__54) + + # pd_op.conv2d: (2x96x-1x-1xf32) <- (2x64x-1x-1xf32, 96x64x3x3xf32) + conv2d_11 = paddle._C_ops.conv2d( + swish_9, parameter_315, [2, 2], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_315 + + # pd_op.batch_norm_: (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__60, + batch_norm__61, + batch_norm__62, + batch_norm__63, + batch_norm__64, + batch_norm__65, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_11, + parameter_314, + parameter_313, + parameter_312, + parameter_311, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_311, parameter_312, parameter_313, parameter_314 + + # pd_op.swish: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32) + swish_10 = paddle._C_ops.swish(batch_norm__60) + + # pd_op.conv2d: (2x48x-1x-1xf32) <- (2x96x-1x-1xf32, 48x96x1x1xf32) + conv2d_12 = paddle._C_ops.conv2d( + swish_10, parameter_310, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_310 + + # pd_op.batch_norm_: (2x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32, -1xui8) <- (2x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32) + ( + batch_norm__66, + batch_norm__67, + batch_norm__68, + batch_norm__69, + batch_norm__70, + batch_norm__71, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_12, + parameter_309, + parameter_308, + parameter_307, + parameter_306, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_306, parameter_307, parameter_308, parameter_309 + + # pd_op.swish: (2x48x-1x-1xf32) <- (2x48x-1x-1xf32) + swish_11 = paddle._C_ops.swish(batch_norm__66) + + # pd_op.conv2d: (2x48x-1x-1xf32) <- (2x96x-1x-1xf32, 48x96x1x1xf32) + conv2d_13 = paddle._C_ops.conv2d( + swish_10, parameter_305, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_305 + + # pd_op.batch_norm_: (2x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32, -1xui8) <- (2x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32) + ( + batch_norm__72, + batch_norm__73, + batch_norm__74, + batch_norm__75, + batch_norm__76, + batch_norm__77, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_13, + parameter_304, + parameter_303, + parameter_302, + parameter_301, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_301, parameter_302, parameter_303, parameter_304 + + # pd_op.swish: (2x48x-1x-1xf32) <- (2x48x-1x-1xf32) + swish_12 = paddle._C_ops.swish(batch_norm__72) + + # pd_op.conv2d: (2x48x-1x-1xf32) <- (2x48x-1x-1xf32, 48x48x3x3xf32) + conv2d_14 = paddle._C_ops.conv2d( + swish_12, parameter_300, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_300 + + # pd_op.batch_norm_: (2x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32, -1xui8) <- (2x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32) + ( + batch_norm__78, + batch_norm__79, + batch_norm__80, + batch_norm__81, + batch_norm__82, + batch_norm__83, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_14, + parameter_299, + parameter_298, + parameter_297, + parameter_296, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_296, parameter_297, parameter_298, parameter_299 + + # pd_op.swish: (2x48x-1x-1xf32) <- (2x48x-1x-1xf32) + swish_13 = paddle._C_ops.swish(batch_norm__78) + + # pd_op.conv2d: (2x48x-1x-1xf32) <- (2x48x-1x-1xf32, 48x48x3x3xf32) + conv2d_15 = paddle._C_ops.conv2d( + swish_13, parameter_295, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_295 + + # pd_op.batch_norm_: (2x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32, -1xui8) <- (2x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32) + ( + batch_norm__84, + batch_norm__85, + batch_norm__86, + batch_norm__87, + batch_norm__88, + batch_norm__89, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_15, + parameter_294, + parameter_293, + parameter_292, + parameter_291, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_291, parameter_292, parameter_293, parameter_294 + + # pd_op.conv2d: (2x48x-1x-1xf32) <- (2x48x-1x-1xf32, 48x48x1x1xf32) + conv2d_16 = paddle._C_ops.conv2d( + swish_13, parameter_290, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_290 + + # pd_op.batch_norm_: (2x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32, -1xui8) <- (2x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32) + ( + batch_norm__90, + batch_norm__91, + batch_norm__92, + batch_norm__93, + batch_norm__94, + batch_norm__95, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_16, + parameter_289, + parameter_288, + parameter_287, + parameter_286, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_286, parameter_287, parameter_288, parameter_289 + + # pd_op.add: (2x48x-1x-1xf32) <- (2x48x-1x-1xf32, 2x48x-1x-1xf32) + add_3 = paddle._C_ops.add(batch_norm__84, batch_norm__90) + + # pd_op.swish: (2x48x-1x-1xf32) <- (2x48x-1x-1xf32) + swish_14 = paddle._C_ops.swish(add_3) + + # pd_op.add: (2x48x-1x-1xf32) <- (2x48x-1x-1xf32, 2x48x-1x-1xf32) + add_4 = paddle._C_ops.add(swish_12, swish_14) + + # pd_op.conv2d: (2x48x-1x-1xf32) <- (2x48x-1x-1xf32, 48x48x3x3xf32) + conv2d_17 = paddle._C_ops.conv2d( + add_4, parameter_285, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_285 + + # pd_op.batch_norm_: (2x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32, -1xui8) <- (2x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32) + ( + batch_norm__96, + batch_norm__97, + batch_norm__98, + batch_norm__99, + batch_norm__100, + batch_norm__101, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_17, + parameter_284, + parameter_283, + parameter_282, + parameter_281, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_281, parameter_282, parameter_283, parameter_284 + + # pd_op.swish: (2x48x-1x-1xf32) <- (2x48x-1x-1xf32) + swish_15 = paddle._C_ops.swish(batch_norm__96) + + # pd_op.conv2d: (2x48x-1x-1xf32) <- (2x48x-1x-1xf32, 48x48x3x3xf32) + conv2d_18 = paddle._C_ops.conv2d( + swish_15, parameter_280, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_280 + + # pd_op.batch_norm_: (2x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32, -1xui8) <- (2x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32) + ( + batch_norm__102, + batch_norm__103, + batch_norm__104, + batch_norm__105, + batch_norm__106, + batch_norm__107, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_18, + parameter_279, + parameter_278, + parameter_277, + parameter_276, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_276, parameter_277, parameter_278, parameter_279 + + # pd_op.conv2d: (2x48x-1x-1xf32) <- (2x48x-1x-1xf32, 48x48x1x1xf32) + conv2d_19 = paddle._C_ops.conv2d( + swish_15, parameter_275, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_275 + + # pd_op.batch_norm_: (2x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32, -1xui8) <- (2x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32) + ( + batch_norm__108, + batch_norm__109, + batch_norm__110, + batch_norm__111, + batch_norm__112, + batch_norm__113, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_19, + parameter_274, + parameter_273, + parameter_272, + parameter_271, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_271, parameter_272, parameter_273, parameter_274 + + # pd_op.add: (2x48x-1x-1xf32) <- (2x48x-1x-1xf32, 2x48x-1x-1xf32) + add_5 = paddle._C_ops.add(batch_norm__102, batch_norm__108) + + # pd_op.swish: (2x48x-1x-1xf32) <- (2x48x-1x-1xf32) + swish_16 = paddle._C_ops.swish(add_5) + + # pd_op.add: (2x48x-1x-1xf32) <- (2x48x-1x-1xf32, 2x48x-1x-1xf32) + add_6 = paddle._C_ops.add(add_4, swish_16) + + # builtin.combine: ([2x48x-1x-1xf32, 2x48x-1x-1xf32]) <- (2x48x-1x-1xf32, 2x48x-1x-1xf32) + combine_1 = [swish_11, add_6] + + # pd_op.concat: (2x96x-1x-1xf32) <- ([2x48x-1x-1xf32, 2x48x-1x-1xf32], 1xi32) + concat_1 = paddle._C_ops.concat(combine_1, full_0) + del combine_1 + + # pd_op.mean: (2x96x1x1xf32) <- (2x96x-1x-1xf32, 2xi64) + mean_1 = paddle._C_ops.mean(concat_1, full_int_array_0, True) + + # pd_op.conv2d: (2x96x1x1xf32) <- (2x96x1x1xf32, 96x96x1x1xf32) + conv2d_20 = paddle._C_ops.conv2d( + mean_1, parameter_270, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_270 + + # pd_op.reshape: (1x96x1x1xf32) <- (96xf32, 4xi64) + reshape_1 = paddle._C_ops.reshape(parameter_269, full_int_array_1) + del parameter_269 + + # pd_op.add: (2x96x1x1xf32) <- (2x96x1x1xf32, 1x96x1x1xf32) + add_7 = paddle._C_ops.add(conv2d_20, reshape_1) + + # pd_op.hardsigmoid: (2x96x1x1xf32) <- (2x96x1x1xf32) + hardsigmoid_1 = paddle._C_ops.hardsigmoid( + add_7, float("0.166667"), float("0.5") + ) + del add_7 + + # pd_op.multiply: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, 2x96x1x1xf32) + multiply_1 = paddle._C_ops.multiply(concat_1, hardsigmoid_1) + + # pd_op.conv2d: (2x128x-1x-1xf32) <- (2x96x-1x-1xf32, 128x96x1x1xf32) + conv2d_21 = paddle._C_ops.conv2d( + multiply_1, parameter_268, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_268 + + # pd_op.batch_norm_: (2x128x-1x-1xf32, 128xf32, 128xf32, 128xf32, 128xf32, -1xui8) <- (2x128x-1x-1xf32, 128xf32, 128xf32, 128xf32, 128xf32) + ( + batch_norm__114, + batch_norm__115, + batch_norm__116, + batch_norm__117, + batch_norm__118, + batch_norm__119, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_21, + parameter_267, + parameter_266, + parameter_265, + parameter_264, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_264, parameter_265, parameter_266, parameter_267 + + # pd_op.swish: (2x128x-1x-1xf32) <- (2x128x-1x-1xf32) + swish_17 = paddle._C_ops.swish(batch_norm__114) + + # pd_op.conv2d: (2x192x-1x-1xf32) <- (2x128x-1x-1xf32, 192x128x3x3xf32) + conv2d_22 = paddle._C_ops.conv2d( + swish_17, parameter_263, [2, 2], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_263 + + # pd_op.batch_norm_: (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__120, + batch_norm__121, + batch_norm__122, + batch_norm__123, + batch_norm__124, + batch_norm__125, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_22, + parameter_262, + parameter_261, + parameter_260, + parameter_259, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_259, parameter_260, parameter_261, parameter_262 + + # pd_op.swish: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32) + swish_18 = paddle._C_ops.swish(batch_norm__120) + + # pd_op.conv2d: (2x96x-1x-1xf32) <- (2x192x-1x-1xf32, 96x192x1x1xf32) + conv2d_23 = paddle._C_ops.conv2d( + swish_18, parameter_258, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_258 + + # pd_op.batch_norm_: (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__126, + batch_norm__127, + batch_norm__128, + batch_norm__129, + batch_norm__130, + batch_norm__131, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_23, + parameter_257, + parameter_256, + parameter_255, + parameter_254, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_254, parameter_255, parameter_256, parameter_257 + + # pd_op.swish: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32) + swish_19 = paddle._C_ops.swish(batch_norm__126) + + # pd_op.conv2d: (2x96x-1x-1xf32) <- (2x192x-1x-1xf32, 96x192x1x1xf32) + conv2d_24 = paddle._C_ops.conv2d( + swish_18, parameter_253, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_253 + + # pd_op.batch_norm_: (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__132, + batch_norm__133, + batch_norm__134, + batch_norm__135, + batch_norm__136, + batch_norm__137, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_24, + parameter_252, + parameter_251, + parameter_250, + parameter_249, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_249, parameter_250, parameter_251, parameter_252 + + # pd_op.swish: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32) + swish_20 = paddle._C_ops.swish(batch_norm__132) + + # pd_op.conv2d: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, 96x96x3x3xf32) + conv2d_25 = paddle._C_ops.conv2d( + swish_20, parameter_248, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_248 + + # pd_op.batch_norm_: (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__138, + batch_norm__139, + batch_norm__140, + batch_norm__141, + batch_norm__142, + batch_norm__143, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_25, + parameter_247, + parameter_246, + parameter_245, + parameter_244, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_244, parameter_245, parameter_246, parameter_247 + + # pd_op.swish: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32) + swish_21 = paddle._C_ops.swish(batch_norm__138) + + # pd_op.conv2d: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, 96x96x3x3xf32) + conv2d_26 = paddle._C_ops.conv2d( + swish_21, parameter_243, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_243 + + # pd_op.batch_norm_: (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__144, + batch_norm__145, + batch_norm__146, + batch_norm__147, + batch_norm__148, + batch_norm__149, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_26, + parameter_242, + parameter_241, + parameter_240, + parameter_239, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_239, parameter_240, parameter_241, parameter_242 + + # pd_op.conv2d: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, 96x96x1x1xf32) + conv2d_27 = paddle._C_ops.conv2d( + swish_21, parameter_238, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_238 + + # pd_op.batch_norm_: (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__150, + batch_norm__151, + batch_norm__152, + batch_norm__153, + batch_norm__154, + batch_norm__155, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_27, + parameter_237, + parameter_236, + parameter_235, + parameter_234, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_234, parameter_235, parameter_236, parameter_237 + + # pd_op.add: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, 2x96x-1x-1xf32) + add_8 = paddle._C_ops.add(batch_norm__144, batch_norm__150) + + # pd_op.swish: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32) + swish_22 = paddle._C_ops.swish(add_8) + + # pd_op.add: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, 2x96x-1x-1xf32) + add_9 = paddle._C_ops.add(swish_20, swish_22) + + # pd_op.conv2d: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, 96x96x3x3xf32) + conv2d_28 = paddle._C_ops.conv2d( + add_9, parameter_233, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_233 + + # pd_op.batch_norm_: (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__156, + batch_norm__157, + batch_norm__158, + batch_norm__159, + batch_norm__160, + batch_norm__161, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_28, + parameter_232, + parameter_231, + parameter_230, + parameter_229, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_229, parameter_230, parameter_231, parameter_232 + + # pd_op.swish: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32) + swish_23 = paddle._C_ops.swish(batch_norm__156) + + # pd_op.conv2d: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, 96x96x3x3xf32) + conv2d_29 = paddle._C_ops.conv2d( + swish_23, parameter_228, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_228 + + # pd_op.batch_norm_: (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__162, + batch_norm__163, + batch_norm__164, + batch_norm__165, + batch_norm__166, + batch_norm__167, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_29, + parameter_227, + parameter_226, + parameter_225, + parameter_224, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_224, parameter_225, parameter_226, parameter_227 + + # pd_op.conv2d: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, 96x96x1x1xf32) + conv2d_30 = paddle._C_ops.conv2d( + swish_23, parameter_223, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_223 + + # pd_op.batch_norm_: (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__168, + batch_norm__169, + batch_norm__170, + batch_norm__171, + batch_norm__172, + batch_norm__173, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_30, + parameter_222, + parameter_221, + parameter_220, + parameter_219, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_219, parameter_220, parameter_221, parameter_222 + + # pd_op.add: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, 2x96x-1x-1xf32) + add_10 = paddle._C_ops.add(batch_norm__162, batch_norm__168) + + # pd_op.swish: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32) + swish_24 = paddle._C_ops.swish(add_10) + + # pd_op.add: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, 2x96x-1x-1xf32) + add_11 = paddle._C_ops.add(add_9, swish_24) + + # builtin.combine: ([2x96x-1x-1xf32, 2x96x-1x-1xf32]) <- (2x96x-1x-1xf32, 2x96x-1x-1xf32) + combine_2 = [swish_19, add_11] + + # pd_op.concat: (2x192x-1x-1xf32) <- ([2x96x-1x-1xf32, 2x96x-1x-1xf32], 1xi32) + concat_2 = paddle._C_ops.concat(combine_2, full_0) + del combine_2 + + # pd_op.mean: (2x192x1x1xf32) <- (2x192x-1x-1xf32, 2xi64) + mean_2 = paddle._C_ops.mean(concat_2, full_int_array_0, True) + + # pd_op.conv2d: (2x192x1x1xf32) <- (2x192x1x1xf32, 192x192x1x1xf32) + conv2d_31 = paddle._C_ops.conv2d( + mean_2, parameter_218, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_218 + + # pd_op.reshape: (1x192x1x1xf32) <- (192xf32, 4xi64) + reshape_2 = paddle._C_ops.reshape(parameter_217, full_int_array_1) + del parameter_217 + + # pd_op.add: (2x192x1x1xf32) <- (2x192x1x1xf32, 1x192x1x1xf32) + add_12 = paddle._C_ops.add(conv2d_31, reshape_2) + + # pd_op.hardsigmoid: (2x192x1x1xf32) <- (2x192x1x1xf32) + hardsigmoid_2 = paddle._C_ops.hardsigmoid( + add_12, float("0.166667"), float("0.5") + ) + del add_12 + + # pd_op.multiply: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 2x192x1x1xf32) + multiply_2 = paddle._C_ops.multiply(concat_2, hardsigmoid_2) + + # pd_op.conv2d: (2x256x-1x-1xf32) <- (2x192x-1x-1xf32, 256x192x1x1xf32) + conv2d_32 = paddle._C_ops.conv2d( + multiply_2, parameter_216, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_216 + + # pd_op.batch_norm_: (2x256x-1x-1xf32, 256xf32, 256xf32, 256xf32, 256xf32, -1xui8) <- (2x256x-1x-1xf32, 256xf32, 256xf32, 256xf32, 256xf32) + ( + batch_norm__174, + batch_norm__175, + batch_norm__176, + batch_norm__177, + batch_norm__178, + batch_norm__179, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_32, + parameter_215, + parameter_214, + parameter_213, + parameter_212, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_212, parameter_213, parameter_214, parameter_215 + + # pd_op.swish: (2x256x-1x-1xf32) <- (2x256x-1x-1xf32) + swish_25 = paddle._C_ops.swish(batch_norm__174) + + # pd_op.conv2d: (2x384x-1x-1xf32) <- (2x256x-1x-1xf32, 384x256x3x3xf32) + conv2d_33 = paddle._C_ops.conv2d( + swish_25, parameter_211, [2, 2], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_211 + + # pd_op.batch_norm_: (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__180, + batch_norm__181, + batch_norm__182, + batch_norm__183, + batch_norm__184, + batch_norm__185, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_33, + parameter_210, + parameter_209, + parameter_208, + parameter_207, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_207, parameter_208, parameter_209, parameter_210 + + # pd_op.swish: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32) + swish_26 = paddle._C_ops.swish(batch_norm__180) + + # pd_op.conv2d: (2x192x-1x-1xf32) <- (2x384x-1x-1xf32, 192x384x1x1xf32) + conv2d_34 = paddle._C_ops.conv2d( + swish_26, parameter_206, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_206 + + # pd_op.batch_norm_: (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__186, + batch_norm__187, + batch_norm__188, + batch_norm__189, + batch_norm__190, + batch_norm__191, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_34, + parameter_205, + parameter_204, + parameter_203, + parameter_202, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_202, parameter_203, parameter_204, parameter_205 + + # pd_op.swish: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32) + swish_27 = paddle._C_ops.swish(batch_norm__186) + + # pd_op.conv2d: (2x192x-1x-1xf32) <- (2x384x-1x-1xf32, 192x384x1x1xf32) + conv2d_35 = paddle._C_ops.conv2d( + swish_26, parameter_201, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_201 + + # pd_op.batch_norm_: (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__192, + batch_norm__193, + batch_norm__194, + batch_norm__195, + batch_norm__196, + batch_norm__197, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_35, + parameter_200, + parameter_199, + parameter_198, + parameter_197, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_197, parameter_198, parameter_199, parameter_200 + + # pd_op.swish: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32) + swish_28 = paddle._C_ops.swish(batch_norm__192) + + # pd_op.conv2d: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 192x192x3x3xf32) + conv2d_36 = paddle._C_ops.conv2d( + swish_28, parameter_196, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_196 + + # pd_op.batch_norm_: (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__198, + batch_norm__199, + batch_norm__200, + batch_norm__201, + batch_norm__202, + batch_norm__203, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_36, + parameter_195, + parameter_194, + parameter_193, + parameter_192, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_192, parameter_193, parameter_194, parameter_195 + + # pd_op.swish: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32) + swish_29 = paddle._C_ops.swish(batch_norm__198) + + # pd_op.conv2d: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 192x192x3x3xf32) + conv2d_37 = paddle._C_ops.conv2d( + swish_29, parameter_191, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_191 + + # pd_op.batch_norm_: (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__204, + batch_norm__205, + batch_norm__206, + batch_norm__207, + batch_norm__208, + batch_norm__209, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_37, + parameter_190, + parameter_189, + parameter_188, + parameter_187, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_187, parameter_188, parameter_189, parameter_190 + + # pd_op.conv2d: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 192x192x1x1xf32) + conv2d_38 = paddle._C_ops.conv2d( + swish_29, parameter_186, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_186 + + # pd_op.batch_norm_: (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__210, + batch_norm__211, + batch_norm__212, + batch_norm__213, + batch_norm__214, + batch_norm__215, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_38, + parameter_185, + parameter_184, + parameter_183, + parameter_182, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_182, parameter_183, parameter_184, parameter_185 + + # pd_op.add: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 2x192x-1x-1xf32) + add_13 = paddle._C_ops.add(batch_norm__204, batch_norm__210) + + # pd_op.swish: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32) + swish_30 = paddle._C_ops.swish(add_13) + + # pd_op.add: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 2x192x-1x-1xf32) + add_14 = paddle._C_ops.add(swish_28, swish_30) + + # builtin.combine: ([2x192x-1x-1xf32, 2x192x-1x-1xf32]) <- (2x192x-1x-1xf32, 2x192x-1x-1xf32) + combine_3 = [swish_27, add_14] + + # pd_op.concat: (2x384x-1x-1xf32) <- ([2x192x-1x-1xf32, 2x192x-1x-1xf32], 1xi32) + concat_3 = paddle._C_ops.concat(combine_3, full_0) + del combine_3 + + # pd_op.mean: (2x384x1x1xf32) <- (2x384x-1x-1xf32, 2xi64) + mean_3 = paddle._C_ops.mean(concat_3, full_int_array_0, True) + + # pd_op.conv2d: (2x384x1x1xf32) <- (2x384x1x1xf32, 384x384x1x1xf32) + conv2d_39 = paddle._C_ops.conv2d( + mean_3, parameter_181, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_181 + + # pd_op.reshape: (1x384x1x1xf32) <- (384xf32, 4xi64) + reshape_3 = paddle._C_ops.reshape(parameter_180, full_int_array_1) + del full_int_array_1, parameter_180 + + # pd_op.add: (2x384x1x1xf32) <- (2x384x1x1xf32, 1x384x1x1xf32) + add_15 = paddle._C_ops.add(conv2d_39, reshape_3) + + # pd_op.hardsigmoid: (2x384x1x1xf32) <- (2x384x1x1xf32) + hardsigmoid_3 = paddle._C_ops.hardsigmoid( + add_15, float("0.166667"), float("0.5") + ) + del add_15 + + # pd_op.multiply: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32, 2x384x1x1xf32) + multiply_3 = paddle._C_ops.multiply(concat_3, hardsigmoid_3) + + # pd_op.conv2d: (2x512x-1x-1xf32) <- (2x384x-1x-1xf32, 512x384x1x1xf32) + conv2d_40 = paddle._C_ops.conv2d( + multiply_3, parameter_179, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_179 + + # pd_op.batch_norm_: (2x512x-1x-1xf32, 512xf32, 512xf32, 512xf32, 512xf32, -1xui8) <- (2x512x-1x-1xf32, 512xf32, 512xf32, 512xf32, 512xf32) + ( + batch_norm__216, + batch_norm__217, + batch_norm__218, + batch_norm__219, + batch_norm__220, + batch_norm__221, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_40, + parameter_178, + parameter_177, + parameter_176, + parameter_175, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_175, parameter_176, parameter_177, parameter_178 + + # pd_op.swish: (2x512x-1x-1xf32) <- (2x512x-1x-1xf32) + swish_31 = paddle._C_ops.swish(batch_norm__216) + + # pd_op.conv2d: (2x192x-1x-1xf32) <- (2x512x-1x-1xf32, 192x512x1x1xf32) + conv2d_41 = paddle._C_ops.conv2d( + swish_31, parameter_174, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_174 + + # pd_op.batch_norm_: (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__222, + batch_norm__223, + batch_norm__224, + batch_norm__225, + batch_norm__226, + batch_norm__227, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_41, + parameter_173, + parameter_172, + parameter_171, + parameter_170, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_170, parameter_171, parameter_172, parameter_173 + + # pd_op.swish: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32) + swish_32 = paddle._C_ops.swish(batch_norm__222) + + # pd_op.conv2d: (2x192x-1x-1xf32) <- (2x512x-1x-1xf32, 192x512x1x1xf32) + conv2d_42 = paddle._C_ops.conv2d( + swish_31, parameter_169, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_169 + + # pd_op.batch_norm_: (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__228, + batch_norm__229, + batch_norm__230, + batch_norm__231, + batch_norm__232, + batch_norm__233, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_42, + parameter_168, + parameter_167, + parameter_166, + parameter_165, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_165, parameter_166, parameter_167, parameter_168 + + # pd_op.swish: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32) + swish_33 = paddle._C_ops.swish(batch_norm__228) + + # pd_op.conv2d: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 192x192x3x3xf32) + conv2d_43 = paddle._C_ops.conv2d( + swish_33, parameter_164, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_164 + + # pd_op.batch_norm_: (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__234, + batch_norm__235, + batch_norm__236, + batch_norm__237, + batch_norm__238, + batch_norm__239, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_43, + parameter_163, + parameter_162, + parameter_161, + parameter_160, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_160, parameter_161, parameter_162, parameter_163 + + # pd_op.swish: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32) + swish_34 = paddle._C_ops.swish(batch_norm__234) + + # pd_op.conv2d: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 192x192x3x3xf32) + conv2d_44 = paddle._C_ops.conv2d( + swish_34, parameter_159, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_159 + + # pd_op.batch_norm_: (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__240, + batch_norm__241, + batch_norm__242, + batch_norm__243, + batch_norm__244, + batch_norm__245, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_44, + parameter_158, + parameter_157, + parameter_156, + parameter_155, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_155, parameter_156, parameter_157, parameter_158 + + # pd_op.conv2d: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 192x192x1x1xf32) + conv2d_45 = paddle._C_ops.conv2d( + swish_34, parameter_154, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_154 + + # pd_op.batch_norm_: (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__246, + batch_norm__247, + batch_norm__248, + batch_norm__249, + batch_norm__250, + batch_norm__251, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_45, + parameter_153, + parameter_152, + parameter_151, + parameter_150, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_150, parameter_151, parameter_152, parameter_153 + + # pd_op.add: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 2x192x-1x-1xf32) + add_16 = paddle._C_ops.add(batch_norm__240, batch_norm__246) + + # pd_op.swish: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32) + swish_35 = paddle._C_ops.swish(add_16) + + # pd_op.full_int_array: (2xi64) <- () + full_int_array_2 = [5, 5] + + # pd_op.pool2d: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 2xi64) + pool2d_0 = paddle._C_ops.pool2d( + swish_35, + full_int_array_2, + [1, 1], + [2, 2], + False, + True, + "NCHW", + "max", + False, + False, + "EXPLICIT", + ) + + # pd_op.full_int_array: (2xi64) <- () + full_int_array_3 = [9, 9] + + # pd_op.pool2d: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 2xi64) + pool2d_1 = paddle._C_ops.pool2d( + swish_35, + full_int_array_3, + [1, 1], + [4, 4], + False, + True, + "NCHW", + "max", + False, + False, + "EXPLICIT", + ) + + # pd_op.full_int_array: (2xi64) <- () + full_int_array_4 = [13, 13] + + # pd_op.pool2d: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 2xi64) + pool2d_2 = paddle._C_ops.pool2d( + swish_35, + full_int_array_4, + [1, 1], + [6, 6], + False, + True, + "NCHW", + "max", + False, + False, + "EXPLICIT", + ) + + # builtin.combine: ([2x192x-1x-1xf32, 2x192x-1x-1xf32, 2x192x-1x-1xf32, 2x192x-1x-1xf32]) <- (2x192x-1x-1xf32, 2x192x-1x-1xf32, 2x192x-1x-1xf32, 2x192x-1x-1xf32) + combine_4 = [swish_35, pool2d_0, pool2d_1, pool2d_2] + + # pd_op.concat: (2x768x-1x-1xf32) <- ([2x192x-1x-1xf32, 2x192x-1x-1xf32, 2x192x-1x-1xf32, 2x192x-1x-1xf32], 1xi32) + concat_4 = paddle._C_ops.concat(combine_4, full_0) + del combine_4 + + # pd_op.conv2d: (2x192x-1x-1xf32) <- (2x768x-1x-1xf32, 192x768x1x1xf32) + conv2d_46 = paddle._C_ops.conv2d( + concat_4, parameter_149, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_149 + + # pd_op.batch_norm_: (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__252, + batch_norm__253, + batch_norm__254, + batch_norm__255, + batch_norm__256, + batch_norm__257, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_46, + parameter_148, + parameter_147, + parameter_146, + parameter_145, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_145, parameter_146, parameter_147, parameter_148 + + # pd_op.swish: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32) + swish_36 = paddle._C_ops.swish(batch_norm__252) + + # builtin.combine: ([2x192x-1x-1xf32, 2x192x-1x-1xf32]) <- (2x192x-1x-1xf32, 2x192x-1x-1xf32) + combine_5 = [swish_32, swish_36] + + # pd_op.concat: (2x384x-1x-1xf32) <- ([2x192x-1x-1xf32, 2x192x-1x-1xf32], 1xi32) + concat_5 = paddle._C_ops.concat(combine_5, full_0) + del combine_5 + + # pd_op.conv2d: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32, 384x384x1x1xf32) + conv2d_47 = paddle._C_ops.conv2d( + concat_5, parameter_144, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_144 + + # pd_op.batch_norm_: (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__258, + batch_norm__259, + batch_norm__260, + batch_norm__261, + batch_norm__262, + batch_norm__263, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_47, + parameter_143, + parameter_142, + parameter_141, + parameter_140, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_140, parameter_141, parameter_142, parameter_143 + + # pd_op.swish: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32) + swish_37 = paddle._C_ops.swish(batch_norm__258) + + # pd_op.conv2d: (2x192x-1x-1xf32) <- (2x384x-1x-1xf32, 192x384x1x1xf32) + conv2d_48 = paddle._C_ops.conv2d( + swish_37, parameter_139, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_139 + + # pd_op.batch_norm_: (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__264, + batch_norm__265, + batch_norm__266, + batch_norm__267, + batch_norm__268, + batch_norm__269, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_48, + parameter_138, + parameter_137, + parameter_136, + parameter_135, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_135, parameter_136, parameter_137, parameter_138 + + # pd_op.swish: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32) + swish_38 = paddle._C_ops.swish(batch_norm__264) + + # pd_op.nearest_interp: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, None, None, None) + nearest_interp_0 = paddle._C_ops.nearest_interp( + swish_38, + None, + None, + None, + "NCHW", + -1, + -1, + -1, + [float("2"), float("2")], + "nearest", + False, + 0, + ) + + # builtin.combine: ([2x192x-1x-1xf32, 2x256x-1x-1xf32]) <- (2x192x-1x-1xf32, 2x256x-1x-1xf32) + combine_6 = [nearest_interp_0, swish_25] + + # pd_op.concat: (2x448x-1x-1xf32) <- ([2x192x-1x-1xf32, 2x256x-1x-1xf32], 1xi32) + concat_6 = paddle._C_ops.concat(combine_6, full_0) + del combine_6 + + # pd_op.conv2d: (2x96x-1x-1xf32) <- (2x448x-1x-1xf32, 96x448x1x1xf32) + conv2d_49 = paddle._C_ops.conv2d( + concat_6, parameter_134, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_134 + + # pd_op.batch_norm_: (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__270, + batch_norm__271, + batch_norm__272, + batch_norm__273, + batch_norm__274, + batch_norm__275, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_49, + parameter_133, + parameter_132, + parameter_131, + parameter_130, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_130, parameter_131, parameter_132, parameter_133 + + # pd_op.swish: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32) + swish_39 = paddle._C_ops.swish(batch_norm__270) + + # pd_op.conv2d: (2x96x-1x-1xf32) <- (2x448x-1x-1xf32, 96x448x1x1xf32) + conv2d_50 = paddle._C_ops.conv2d( + concat_6, parameter_129, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_129 + + # pd_op.batch_norm_: (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__276, + batch_norm__277, + batch_norm__278, + batch_norm__279, + batch_norm__280, + batch_norm__281, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_50, + parameter_128, + parameter_127, + parameter_126, + parameter_125, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_125, parameter_126, parameter_127, parameter_128 + + # pd_op.swish: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32) + swish_40 = paddle._C_ops.swish(batch_norm__276) + + # pd_op.conv2d: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, 96x96x3x3xf32) + conv2d_51 = paddle._C_ops.conv2d( + swish_40, parameter_124, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_124 + + # pd_op.batch_norm_: (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__282, + batch_norm__283, + batch_norm__284, + batch_norm__285, + batch_norm__286, + batch_norm__287, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_51, + parameter_123, + parameter_122, + parameter_121, + parameter_120, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_120, parameter_121, parameter_122, parameter_123 + + # pd_op.swish: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32) + swish_41 = paddle._C_ops.swish(batch_norm__282) + + # pd_op.conv2d: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, 96x96x3x3xf32) + conv2d_52 = paddle._C_ops.conv2d( + swish_41, parameter_119, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_119 + + # pd_op.batch_norm_: (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__288, + batch_norm__289, + batch_norm__290, + batch_norm__291, + batch_norm__292, + batch_norm__293, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_52, + parameter_118, + parameter_117, + parameter_116, + parameter_115, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_115, parameter_116, parameter_117, parameter_118 + + # pd_op.conv2d: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, 96x96x1x1xf32) + conv2d_53 = paddle._C_ops.conv2d( + swish_41, parameter_114, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_114 + + # pd_op.batch_norm_: (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__294, + batch_norm__295, + batch_norm__296, + batch_norm__297, + batch_norm__298, + batch_norm__299, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_53, + parameter_113, + parameter_112, + parameter_111, + parameter_110, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_110, parameter_111, parameter_112, parameter_113 + + # pd_op.add: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, 2x96x-1x-1xf32) + add_17 = paddle._C_ops.add(batch_norm__288, batch_norm__294) + + # pd_op.swish: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32) + swish_42 = paddle._C_ops.swish(add_17) + + # builtin.combine: ([2x96x-1x-1xf32, 2x96x-1x-1xf32]) <- (2x96x-1x-1xf32, 2x96x-1x-1xf32) + combine_7 = [swish_39, swish_42] + + # pd_op.concat: (2x192x-1x-1xf32) <- ([2x96x-1x-1xf32, 2x96x-1x-1xf32], 1xi32) + concat_7 = paddle._C_ops.concat(combine_7, full_0) + del combine_7 + + # pd_op.conv2d: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 192x192x1x1xf32) + conv2d_54 = paddle._C_ops.conv2d( + concat_7, parameter_109, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_109 + + # pd_op.batch_norm_: (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__300, + batch_norm__301, + batch_norm__302, + batch_norm__303, + batch_norm__304, + batch_norm__305, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_54, + parameter_108, + parameter_107, + parameter_106, + parameter_105, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_105, parameter_106, parameter_107, parameter_108 + + # pd_op.swish: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32) + swish_43 = paddle._C_ops.swish(batch_norm__300) + + # pd_op.conv2d: (2x96x-1x-1xf32) <- (2x192x-1x-1xf32, 96x192x1x1xf32) + conv2d_55 = paddle._C_ops.conv2d( + swish_43, parameter_104, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_104 + + # pd_op.batch_norm_: (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__306, + batch_norm__307, + batch_norm__308, + batch_norm__309, + batch_norm__310, + batch_norm__311, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_55, + parameter_103, + parameter_102, + parameter_101, + parameter_100, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_100, parameter_101, parameter_102, parameter_103 + + # pd_op.swish: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32) + swish_44 = paddle._C_ops.swish(batch_norm__306) + + # pd_op.nearest_interp: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, None, None, None) + nearest_interp_1 = paddle._C_ops.nearest_interp( + swish_44, + None, + None, + None, + "NCHW", + -1, + -1, + -1, + [float("2"), float("2")], + "nearest", + False, + 0, + ) + + # builtin.combine: ([2x96x-1x-1xf32, 2x128x-1x-1xf32]) <- (2x96x-1x-1xf32, 2x128x-1x-1xf32) + combine_8 = [nearest_interp_1, swish_17] + + # pd_op.concat: (2x224x-1x-1xf32) <- ([2x96x-1x-1xf32, 2x128x-1x-1xf32], 1xi32) + concat_8 = paddle._C_ops.concat(combine_8, full_0) + del combine_8 + + # pd_op.conv2d: (2x48x-1x-1xf32) <- (2x224x-1x-1xf32, 48x224x1x1xf32) + conv2d_56 = paddle._C_ops.conv2d( + concat_8, parameter_99, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_99 + + # pd_op.batch_norm_: (2x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32, -1xui8) <- (2x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32) + ( + batch_norm__312, + batch_norm__313, + batch_norm__314, + batch_norm__315, + batch_norm__316, + batch_norm__317, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_56, + parameter_98, + parameter_97, + parameter_96, + parameter_95, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_95, parameter_96, parameter_97, parameter_98 + + # pd_op.swish: (2x48x-1x-1xf32) <- (2x48x-1x-1xf32) + swish_45 = paddle._C_ops.swish(batch_norm__312) + + # pd_op.conv2d: (2x48x-1x-1xf32) <- (2x224x-1x-1xf32, 48x224x1x1xf32) + conv2d_57 = paddle._C_ops.conv2d( + concat_8, parameter_94, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_94 + + # pd_op.batch_norm_: (2x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32, -1xui8) <- (2x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32) + ( + batch_norm__318, + batch_norm__319, + batch_norm__320, + batch_norm__321, + batch_norm__322, + batch_norm__323, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_57, + parameter_93, + parameter_92, + parameter_91, + parameter_90, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_90, parameter_91, parameter_92, parameter_93 + + # pd_op.swish: (2x48x-1x-1xf32) <- (2x48x-1x-1xf32) + swish_46 = paddle._C_ops.swish(batch_norm__318) + + # pd_op.conv2d: (2x48x-1x-1xf32) <- (2x48x-1x-1xf32, 48x48x3x3xf32) + conv2d_58 = paddle._C_ops.conv2d( + swish_46, parameter_89, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_89 + + # pd_op.batch_norm_: (2x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32, -1xui8) <- (2x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32) + ( + batch_norm__324, + batch_norm__325, + batch_norm__326, + batch_norm__327, + batch_norm__328, + batch_norm__329, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_58, + parameter_88, + parameter_87, + parameter_86, + parameter_85, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_85, parameter_86, parameter_87, parameter_88 + + # pd_op.swish: (2x48x-1x-1xf32) <- (2x48x-1x-1xf32) + swish_47 = paddle._C_ops.swish(batch_norm__324) + + # pd_op.conv2d: (2x48x-1x-1xf32) <- (2x48x-1x-1xf32, 48x48x3x3xf32) + conv2d_59 = paddle._C_ops.conv2d( + swish_47, parameter_84, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_84 + + # pd_op.batch_norm_: (2x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32, -1xui8) <- (2x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32) + ( + batch_norm__330, + batch_norm__331, + batch_norm__332, + batch_norm__333, + batch_norm__334, + batch_norm__335, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_59, + parameter_83, + parameter_82, + parameter_81, + parameter_80, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_80, parameter_81, parameter_82, parameter_83 + + # pd_op.conv2d: (2x48x-1x-1xf32) <- (2x48x-1x-1xf32, 48x48x1x1xf32) + conv2d_60 = paddle._C_ops.conv2d( + swish_47, parameter_79, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_79 + + # pd_op.batch_norm_: (2x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32, -1xui8) <- (2x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32) + ( + batch_norm__336, + batch_norm__337, + batch_norm__338, + batch_norm__339, + batch_norm__340, + batch_norm__341, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_60, + parameter_78, + parameter_77, + parameter_76, + parameter_75, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_75, parameter_76, parameter_77, parameter_78 + + # pd_op.add: (2x48x-1x-1xf32) <- (2x48x-1x-1xf32, 2x48x-1x-1xf32) + add_18 = paddle._C_ops.add(batch_norm__330, batch_norm__336) + + # pd_op.swish: (2x48x-1x-1xf32) <- (2x48x-1x-1xf32) + swish_48 = paddle._C_ops.swish(add_18) + + # builtin.combine: ([2x48x-1x-1xf32, 2x48x-1x-1xf32]) <- (2x48x-1x-1xf32, 2x48x-1x-1xf32) + combine_9 = [swish_45, swish_48] + + # pd_op.concat: (2x96x-1x-1xf32) <- ([2x48x-1x-1xf32, 2x48x-1x-1xf32], 1xi32) + concat_9 = paddle._C_ops.concat(combine_9, full_0) + del combine_9 + + # pd_op.conv2d: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, 96x96x1x1xf32) + conv2d_61 = paddle._C_ops.conv2d( + concat_9, parameter_74, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_74 + + # pd_op.batch_norm_: (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__342, + batch_norm__343, + batch_norm__344, + batch_norm__345, + batch_norm__346, + batch_norm__347, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_61, + parameter_73, + parameter_72, + parameter_71, + parameter_70, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_70, parameter_71, parameter_72, parameter_73 + + # pd_op.swish: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32) + swish_49 = paddle._C_ops.swish(batch_norm__342) + + # pd_op.conv2d: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, 96x96x3x3xf32) + conv2d_62 = paddle._C_ops.conv2d( + swish_49, parameter_69, [2, 2], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_69 + + # pd_op.batch_norm_: (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__348, + batch_norm__349, + batch_norm__350, + batch_norm__351, + batch_norm__352, + batch_norm__353, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_62, + parameter_68, + parameter_67, + parameter_66, + parameter_65, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_65, parameter_66, parameter_67, parameter_68 + + # pd_op.swish: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32) + swish_50 = paddle._C_ops.swish(batch_norm__348) + + # builtin.combine: ([2x96x-1x-1xf32, 2x192x-1x-1xf32]) <- (2x96x-1x-1xf32, 2x192x-1x-1xf32) + combine_10 = [swish_50, swish_43] + + # pd_op.concat: (2x288x-1x-1xf32) <- ([2x96x-1x-1xf32, 2x192x-1x-1xf32], 1xi32) + concat_10 = paddle._C_ops.concat(combine_10, full_0) + del combine_10 + + # pd_op.conv2d: (2x96x-1x-1xf32) <- (2x288x-1x-1xf32, 96x288x1x1xf32) + conv2d_63 = paddle._C_ops.conv2d( + concat_10, parameter_64, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_64 + + # pd_op.batch_norm_: (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__354, + batch_norm__355, + batch_norm__356, + batch_norm__357, + batch_norm__358, + batch_norm__359, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_63, + parameter_63, + parameter_62, + parameter_61, + parameter_60, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_60, parameter_61, parameter_62, parameter_63 + + # pd_op.swish: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32) + swish_51 = paddle._C_ops.swish(batch_norm__354) + + # pd_op.conv2d: (2x96x-1x-1xf32) <- (2x288x-1x-1xf32, 96x288x1x1xf32) + conv2d_64 = paddle._C_ops.conv2d( + concat_10, parameter_59, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_59 + + # pd_op.batch_norm_: (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__360, + batch_norm__361, + batch_norm__362, + batch_norm__363, + batch_norm__364, + batch_norm__365, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_64, + parameter_58, + parameter_57, + parameter_56, + parameter_55, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_55, parameter_56, parameter_57, parameter_58 + + # pd_op.swish: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32) + swish_52 = paddle._C_ops.swish(batch_norm__360) + + # pd_op.conv2d: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, 96x96x3x3xf32) + conv2d_65 = paddle._C_ops.conv2d( + swish_52, parameter_54, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_54 + + # pd_op.batch_norm_: (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__366, + batch_norm__367, + batch_norm__368, + batch_norm__369, + batch_norm__370, + batch_norm__371, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_65, + parameter_53, + parameter_52, + parameter_51, + parameter_50, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_50, parameter_51, parameter_52, parameter_53 + + # pd_op.swish: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32) + swish_53 = paddle._C_ops.swish(batch_norm__366) + + # pd_op.conv2d: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, 96x96x3x3xf32) + conv2d_66 = paddle._C_ops.conv2d( + swish_53, parameter_49, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_49 + + # pd_op.batch_norm_: (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__372, + batch_norm__373, + batch_norm__374, + batch_norm__375, + batch_norm__376, + batch_norm__377, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_66, + parameter_48, + parameter_47, + parameter_46, + parameter_45, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_45, parameter_46, parameter_47, parameter_48 + + # pd_op.conv2d: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, 96x96x1x1xf32) + conv2d_67 = paddle._C_ops.conv2d( + swish_53, parameter_44, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_44 + + # pd_op.batch_norm_: (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__378, + batch_norm__379, + batch_norm__380, + batch_norm__381, + batch_norm__382, + batch_norm__383, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_67, + parameter_43, + parameter_42, + parameter_41, + parameter_40, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_40, parameter_41, parameter_42, parameter_43 + + # pd_op.add: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, 2x96x-1x-1xf32) + add_19 = paddle._C_ops.add(batch_norm__372, batch_norm__378) + + # pd_op.swish: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32) + swish_54 = paddle._C_ops.swish(add_19) + + # builtin.combine: ([2x96x-1x-1xf32, 2x96x-1x-1xf32]) <- (2x96x-1x-1xf32, 2x96x-1x-1xf32) + combine_11 = [swish_51, swish_54] + + # pd_op.concat: (2x192x-1x-1xf32) <- ([2x96x-1x-1xf32, 2x96x-1x-1xf32], 1xi32) + concat_11 = paddle._C_ops.concat(combine_11, full_0) + del combine_11 + + # pd_op.conv2d: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 192x192x1x1xf32) + conv2d_68 = paddle._C_ops.conv2d( + concat_11, parameter_39, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_39 + + # pd_op.batch_norm_: (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__384, + batch_norm__385, + batch_norm__386, + batch_norm__387, + batch_norm__388, + batch_norm__389, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_68, + parameter_38, + parameter_37, + parameter_36, + parameter_35, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_35, parameter_36, parameter_37, parameter_38 + + # pd_op.swish: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32) + swish_55 = paddle._C_ops.swish(batch_norm__384) + + # pd_op.conv2d: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 192x192x3x3xf32) + conv2d_69 = paddle._C_ops.conv2d( + swish_55, parameter_34, [2, 2], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_34 + + # pd_op.batch_norm_: (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__390, + batch_norm__391, + batch_norm__392, + batch_norm__393, + batch_norm__394, + batch_norm__395, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_69, + parameter_33, + parameter_32, + parameter_31, + parameter_30, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_30, parameter_31, parameter_32, parameter_33 + + # pd_op.swish: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32) + swish_56 = paddle._C_ops.swish(batch_norm__390) + + # builtin.combine: ([2x192x-1x-1xf32, 2x384x-1x-1xf32]) <- (2x192x-1x-1xf32, 2x384x-1x-1xf32) + combine_12 = [swish_56, swish_37] + + # pd_op.concat: (2x576x-1x-1xf32) <- ([2x192x-1x-1xf32, 2x384x-1x-1xf32], 1xi32) + concat_12 = paddle._C_ops.concat(combine_12, full_0) + del combine_12 + + # pd_op.conv2d: (2x192x-1x-1xf32) <- (2x576x-1x-1xf32, 192x576x1x1xf32) + conv2d_70 = paddle._C_ops.conv2d( + concat_12, parameter_29, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_29 + + # pd_op.batch_norm_: (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__396, + batch_norm__397, + batch_norm__398, + batch_norm__399, + batch_norm__400, + batch_norm__401, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_70, + parameter_28, + parameter_27, + parameter_26, + parameter_25, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_25, parameter_26, parameter_27, parameter_28 + + # pd_op.swish: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32) + swish_57 = paddle._C_ops.swish(batch_norm__396) + + # pd_op.conv2d: (2x192x-1x-1xf32) <- (2x576x-1x-1xf32, 192x576x1x1xf32) + conv2d_71 = paddle._C_ops.conv2d( + concat_12, parameter_24, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_24 + + # pd_op.batch_norm_: (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__402, + batch_norm__403, + batch_norm__404, + batch_norm__405, + batch_norm__406, + batch_norm__407, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_71, + parameter_23, + parameter_22, + parameter_21, + parameter_20, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_20, parameter_21, parameter_22, parameter_23 + + # pd_op.swish: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32) + swish_58 = paddle._C_ops.swish(batch_norm__402) + + # pd_op.conv2d: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 192x192x3x3xf32) + conv2d_72 = paddle._C_ops.conv2d( + swish_58, parameter_19, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_19 + + # pd_op.batch_norm_: (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__408, + batch_norm__409, + batch_norm__410, + batch_norm__411, + batch_norm__412, + batch_norm__413, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_72, + parameter_18, + parameter_17, + parameter_16, + parameter_15, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_15, parameter_16, parameter_17, parameter_18 + + # pd_op.swish: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32) + swish_59 = paddle._C_ops.swish(batch_norm__408) + + # pd_op.conv2d: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 192x192x3x3xf32) + conv2d_73 = paddle._C_ops.conv2d( + swish_59, parameter_14, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_14 + + # pd_op.batch_norm_: (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__414, + batch_norm__415, + batch_norm__416, + batch_norm__417, + batch_norm__418, + batch_norm__419, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_73, + parameter_13, + parameter_12, + parameter_11, + parameter_10, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_10, parameter_11, parameter_12, parameter_13 + + # pd_op.conv2d: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 192x192x1x1xf32) + conv2d_74 = paddle._C_ops.conv2d( + swish_59, parameter_9, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_9 + + # pd_op.batch_norm_: (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__420, + batch_norm__421, + batch_norm__422, + batch_norm__423, + batch_norm__424, + batch_norm__425, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_74, + parameter_8, + parameter_7, + parameter_6, + parameter_5, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_5, parameter_6, parameter_7, parameter_8 + + # pd_op.add: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 2x192x-1x-1xf32) + add_20 = paddle._C_ops.add(batch_norm__414, batch_norm__420) + + # pd_op.swish: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32) + swish_60 = paddle._C_ops.swish(add_20) + + # builtin.combine: ([2x192x-1x-1xf32, 2x192x-1x-1xf32]) <- (2x192x-1x-1xf32, 2x192x-1x-1xf32) + combine_13 = [swish_57, swish_60] + + # pd_op.concat: (2x384x-1x-1xf32) <- ([2x192x-1x-1xf32, 2x192x-1x-1xf32], 1xi32) + concat_13 = paddle._C_ops.concat(combine_13, full_0) + del combine_13 + + # pd_op.conv2d: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32, 384x384x1x1xf32) + conv2d_75 = paddle._C_ops.conv2d( + concat_13, parameter_4, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_4 + + # pd_op.batch_norm_: (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__426, + batch_norm__427, + batch_norm__428, + batch_norm__429, + batch_norm__430, + batch_norm__431, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_75, + parameter_3, + parameter_2, + parameter_1, + parameter_0, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_0, parameter_1, parameter_2, parameter_3 + + # pd_op.swish: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32) + swish_0 = paddle._C_ops.swish(batch_norm__426) + del ( + add_0, + add_1, + add_10, + add_11, + add_13, + add_14, + add_16, + add_17, + add_18, + add_19, + add_20, + add_3, + add_4, + add_5, + add_6, + add_8, + add_9, + assign_0, + assign_1, + assign_10, + assign_11, + assign_12, + assign_13, + assign_14, + assign_15, + assign_2, + assign_3, + assign_4, + assign_5, + assign_6, + assign_7, + assign_8, + assign_9, + batch_norm__0, + batch_norm__1, + batch_norm__10, + batch_norm__100, + batch_norm__101, + batch_norm__102, + batch_norm__103, + batch_norm__104, + batch_norm__105, + batch_norm__106, + batch_norm__107, + batch_norm__108, + batch_norm__109, + batch_norm__11, + batch_norm__110, + batch_norm__111, + batch_norm__112, + batch_norm__113, + batch_norm__114, + batch_norm__115, + batch_norm__116, + batch_norm__117, + batch_norm__118, + batch_norm__119, + batch_norm__12, + batch_norm__120, + batch_norm__121, + batch_norm__122, + batch_norm__123, + batch_norm__124, + batch_norm__125, + batch_norm__126, + batch_norm__127, + batch_norm__128, + batch_norm__129, + batch_norm__13, + batch_norm__130, + batch_norm__131, + batch_norm__132, + batch_norm__133, + batch_norm__134, + batch_norm__135, + batch_norm__136, + batch_norm__137, + batch_norm__138, + batch_norm__139, + batch_norm__14, + batch_norm__140, + batch_norm__141, + batch_norm__142, + batch_norm__143, + batch_norm__144, + batch_norm__145, + batch_norm__146, + batch_norm__147, + batch_norm__148, + batch_norm__149, + batch_norm__15, + batch_norm__150, + batch_norm__151, + batch_norm__152, + batch_norm__153, + batch_norm__154, + batch_norm__155, + batch_norm__156, + batch_norm__157, + batch_norm__158, + batch_norm__159, + batch_norm__16, + batch_norm__160, + batch_norm__161, + batch_norm__162, + batch_norm__163, + batch_norm__164, + batch_norm__165, + batch_norm__166, + batch_norm__167, + batch_norm__168, + batch_norm__169, + batch_norm__17, + batch_norm__170, + batch_norm__171, + batch_norm__172, + batch_norm__173, + batch_norm__174, + batch_norm__175, + batch_norm__176, + batch_norm__177, + batch_norm__178, + batch_norm__179, + batch_norm__18, + batch_norm__180, + batch_norm__181, + batch_norm__182, + batch_norm__183, + batch_norm__184, + batch_norm__185, + batch_norm__186, + batch_norm__187, + batch_norm__188, + batch_norm__189, + batch_norm__19, + batch_norm__190, + batch_norm__191, + batch_norm__192, + batch_norm__193, + batch_norm__194, + batch_norm__195, + batch_norm__196, + batch_norm__197, + batch_norm__198, + batch_norm__199, + batch_norm__2, + batch_norm__20, + batch_norm__200, + batch_norm__201, + batch_norm__202, + batch_norm__203, + batch_norm__204, + batch_norm__205, + batch_norm__206, + batch_norm__207, + batch_norm__208, + batch_norm__209, + batch_norm__21, + batch_norm__210, + batch_norm__211, + batch_norm__212, + batch_norm__213, + batch_norm__214, + batch_norm__215, + batch_norm__216, + batch_norm__217, + batch_norm__218, + batch_norm__219, + batch_norm__22, + batch_norm__220, + batch_norm__221, + batch_norm__222, + batch_norm__223, + batch_norm__224, + batch_norm__225, + batch_norm__226, + batch_norm__227, + batch_norm__228, + batch_norm__229, + batch_norm__23, + batch_norm__230, + batch_norm__231, + batch_norm__232, + batch_norm__233, + batch_norm__234, + batch_norm__235, + batch_norm__236, + batch_norm__237, + batch_norm__238, + batch_norm__239, + batch_norm__24, + batch_norm__240, + batch_norm__241, + batch_norm__242, + batch_norm__243, + batch_norm__244, + batch_norm__245, + batch_norm__246, + batch_norm__247, + batch_norm__248, + batch_norm__249, + batch_norm__25, + batch_norm__250, + batch_norm__251, + batch_norm__252, + batch_norm__253, + batch_norm__254, + batch_norm__255, + batch_norm__256, + batch_norm__257, + batch_norm__258, + batch_norm__259, + batch_norm__26, + batch_norm__260, + batch_norm__261, + batch_norm__262, + batch_norm__263, + batch_norm__264, + batch_norm__265, + batch_norm__266, + batch_norm__267, + batch_norm__268, + batch_norm__269, + batch_norm__27, + batch_norm__270, + batch_norm__271, + batch_norm__272, + batch_norm__273, + batch_norm__274, + batch_norm__275, + batch_norm__276, + batch_norm__277, + batch_norm__278, + batch_norm__279, + batch_norm__28, + batch_norm__280, + batch_norm__281, + batch_norm__282, + batch_norm__283, + batch_norm__284, + batch_norm__285, + batch_norm__286, + batch_norm__287, + batch_norm__288, + batch_norm__289, + batch_norm__29, + batch_norm__290, + batch_norm__291, + batch_norm__292, + batch_norm__293, + batch_norm__294, + batch_norm__295, + batch_norm__296, + batch_norm__297, + batch_norm__298, + batch_norm__299, + batch_norm__3, + batch_norm__30, + batch_norm__300, + batch_norm__301, + batch_norm__302, + batch_norm__303, + batch_norm__304, + batch_norm__305, + batch_norm__306, + batch_norm__307, + batch_norm__308, + batch_norm__309, + batch_norm__31, + batch_norm__310, + batch_norm__311, + batch_norm__312, + batch_norm__313, + batch_norm__314, + batch_norm__315, + batch_norm__316, + batch_norm__317, + batch_norm__318, + batch_norm__319, + batch_norm__32, + batch_norm__320, + batch_norm__321, + batch_norm__322, + batch_norm__323, + batch_norm__324, + batch_norm__325, + batch_norm__326, + batch_norm__327, + batch_norm__328, + batch_norm__329, + batch_norm__33, + batch_norm__330, + batch_norm__331, + batch_norm__332, + batch_norm__333, + batch_norm__334, + batch_norm__335, + batch_norm__336, + batch_norm__337, + batch_norm__338, + batch_norm__339, + batch_norm__34, + batch_norm__340, + batch_norm__341, + batch_norm__342, + batch_norm__343, + batch_norm__344, + batch_norm__345, + batch_norm__346, + batch_norm__347, + batch_norm__348, + batch_norm__349, + batch_norm__35, + batch_norm__350, + batch_norm__351, + batch_norm__352, + batch_norm__353, + batch_norm__354, + batch_norm__355, + batch_norm__356, + batch_norm__357, + batch_norm__358, + batch_norm__359, + batch_norm__36, + batch_norm__360, + batch_norm__361, + batch_norm__362, + batch_norm__363, + batch_norm__364, + batch_norm__365, + batch_norm__366, + batch_norm__367, + batch_norm__368, + batch_norm__369, + batch_norm__37, + batch_norm__370, + batch_norm__371, + batch_norm__372, + batch_norm__373, + batch_norm__374, + batch_norm__375, + batch_norm__376, + batch_norm__377, + batch_norm__378, + batch_norm__379, + batch_norm__38, + batch_norm__380, + batch_norm__381, + batch_norm__382, + batch_norm__383, + batch_norm__384, + batch_norm__385, + batch_norm__386, + batch_norm__387, + batch_norm__388, + batch_norm__389, + batch_norm__39, + batch_norm__390, + batch_norm__391, + batch_norm__392, + batch_norm__393, + batch_norm__394, + batch_norm__395, + batch_norm__396, + batch_norm__397, + batch_norm__398, + batch_norm__399, + batch_norm__4, + batch_norm__40, + batch_norm__400, + batch_norm__401, + batch_norm__402, + batch_norm__403, + batch_norm__404, + batch_norm__405, + batch_norm__406, + batch_norm__407, + batch_norm__408, + batch_norm__409, + batch_norm__41, + batch_norm__410, + batch_norm__411, + batch_norm__412, + batch_norm__413, + batch_norm__414, + batch_norm__415, + batch_norm__416, + batch_norm__417, + batch_norm__418, + batch_norm__419, + batch_norm__42, + batch_norm__420, + batch_norm__421, + batch_norm__422, + batch_norm__423, + batch_norm__424, + batch_norm__425, + batch_norm__426, + batch_norm__427, + batch_norm__428, + batch_norm__429, + batch_norm__43, + batch_norm__430, + batch_norm__431, + batch_norm__44, + batch_norm__45, + batch_norm__46, + batch_norm__47, + batch_norm__48, + batch_norm__49, + batch_norm__5, + batch_norm__50, + batch_norm__51, + batch_norm__52, + batch_norm__53, + batch_norm__54, + batch_norm__55, + batch_norm__56, + batch_norm__57, + batch_norm__58, + batch_norm__59, + batch_norm__6, + batch_norm__60, + batch_norm__61, + batch_norm__62, + batch_norm__63, + batch_norm__64, + batch_norm__65, + batch_norm__66, + batch_norm__67, + batch_norm__68, + batch_norm__69, + batch_norm__7, + batch_norm__70, + batch_norm__71, + batch_norm__72, + batch_norm__73, + batch_norm__74, + batch_norm__75, + batch_norm__76, + batch_norm__77, + batch_norm__78, + batch_norm__79, + batch_norm__8, + batch_norm__80, + batch_norm__81, + batch_norm__82, + batch_norm__83, + batch_norm__84, + batch_norm__85, + batch_norm__86, + batch_norm__87, + batch_norm__88, + batch_norm__89, + batch_norm__9, + batch_norm__90, + batch_norm__91, + batch_norm__92, + batch_norm__93, + batch_norm__94, + batch_norm__95, + batch_norm__96, + batch_norm__97, + batch_norm__98, + batch_norm__99, + concat_0, + concat_1, + concat_10, + concat_11, + concat_12, + concat_13, + concat_2, + concat_3, + concat_4, + concat_5, + concat_6, + concat_7, + concat_8, + concat_9, + conv2d_0, + conv2d_1, + conv2d_10, + conv2d_11, + conv2d_12, + conv2d_13, + conv2d_14, + conv2d_15, + conv2d_16, + conv2d_17, + conv2d_18, + conv2d_19, + conv2d_2, + conv2d_20, + conv2d_21, + conv2d_22, + conv2d_23, + conv2d_24, + conv2d_25, + conv2d_26, + conv2d_27, + conv2d_28, + conv2d_29, + conv2d_3, + conv2d_30, + conv2d_31, + conv2d_32, + conv2d_33, + conv2d_34, + conv2d_35, + conv2d_36, + conv2d_37, + conv2d_38, + conv2d_39, + conv2d_4, + conv2d_40, + conv2d_41, + conv2d_42, + conv2d_43, + conv2d_44, + conv2d_45, + conv2d_46, + conv2d_47, + conv2d_48, + conv2d_49, + conv2d_5, + conv2d_50, + conv2d_51, + conv2d_52, + conv2d_53, + conv2d_54, + conv2d_55, + conv2d_56, + conv2d_57, + conv2d_58, + conv2d_59, + conv2d_6, + conv2d_60, + conv2d_61, + conv2d_62, + conv2d_63, + conv2d_64, + conv2d_65, + conv2d_66, + conv2d_67, + conv2d_68, + conv2d_69, + conv2d_7, + conv2d_70, + conv2d_71, + conv2d_72, + conv2d_73, + conv2d_74, + conv2d_75, + conv2d_8, + conv2d_9, + full_0, + full_int_array_0, + full_int_array_2, + full_int_array_3, + full_int_array_4, + hardsigmoid_0, + hardsigmoid_1, + hardsigmoid_2, + hardsigmoid_3, + mean_0, + mean_1, + mean_2, + mean_3, + multiply_0, + multiply_1, + multiply_2, + multiply_3, + nearest_interp_0, + nearest_interp_1, + pool2d_0, + pool2d_1, + pool2d_2, + reshape_0, + reshape_1, + reshape_2, + reshape_3, + swish_1, + swish_10, + swish_11, + swish_12, + swish_13, + swish_14, + swish_15, + swish_16, + swish_17, + swish_18, + swish_19, + swish_2, + swish_20, + swish_21, + swish_22, + swish_23, + swish_24, + swish_25, + swish_26, + swish_27, + swish_28, + swish_29, + swish_3, + swish_30, + swish_31, + swish_32, + swish_33, + swish_34, + swish_35, + swish_36, + swish_37, + swish_38, + swish_39, + swish_4, + swish_40, + swish_41, + swish_42, + swish_43, + swish_44, + swish_45, + swish_46, + swish_47, + swish_48, + swish_49, + swish_5, + swish_50, + swish_51, + swish_52, + swish_53, + swish_54, + swish_55, + swish_56, + swish_57, + swish_58, + swish_59, + swish_6, + swish_60, + swish_7, + swish_8, + swish_9, + ) + + return swish_0 diff --git a/paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_13/weight_meta.py b/paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_13/weight_meta.py new file mode 100644 index 000000000..d58909a7b --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_13/weight_meta.py @@ -0,0 +1,3860 @@ +class Program_weight_tensor_parameter_0: + name = "parameter_0" + shape = [384] + dtype = "float32" + min_val = float("-0.65221") + max_val = float("1.18847") + mean = float("0.0280678") + std = float("0.238602") + data = None + + +class Program_weight_tensor_parameter_1: + name = "parameter_1" + shape = [384] + dtype = "float32" + min_val = float("0.840777") + max_val = float("1.38166") + mean = float("0.983157") + std = float("0.0682519") + data = None + + +class Program_weight_tensor_parameter_2: + name = "parameter_2" + shape = [384] + dtype = "float32" + min_val = float("0.00367937") + max_val = float("0.0877201") + mean = float("0.0123265") + std = float("0.0078903") + data = None + + +class Program_weight_tensor_parameter_3: + name = "parameter_3" + shape = [384] + dtype = "float32" + min_val = float("-0.194012") + max_val = float("0.0551918") + mean = float("-0.02945") + std = float("0.036829") + data = None + + +class Program_weight_tensor_parameter_4: + name = "parameter_4" + shape = [384, 384, 1, 1] + dtype = "float32" + min_val = float("-0.100636") + max_val = float("0.0644305") + mean = float("-0.000367326") + std = float("0.00632618") + data = None + + +class Program_weight_tensor_parameter_5: + name = "parameter_5" + shape = [192] + dtype = "float32" + min_val = float("-0.444539") + max_val = float("0.100308") + mean = float("-0.0845765") + std = float("0.104435") + data = None + + +class Program_weight_tensor_parameter_6: + name = "parameter_6" + shape = [192] + dtype = "float32" + min_val = float("0.827634") + max_val = float("1.20887") + mean = float("0.926304") + std = float("0.0461583") + data = None + + +class Program_weight_tensor_parameter_7: + name = "parameter_7" + shape = [192] + dtype = "float32" + min_val = float("0.0022587") + max_val = float("0.0317087") + mean = float("0.0127237") + std = float("0.00671595") + data = None + + +class Program_weight_tensor_parameter_8: + name = "parameter_8" + shape = [192] + dtype = "float32" + min_val = float("-0.0420164") + max_val = float("0.0489711") + mean = float("7.80708e-05") + std = float("0.023324") + data = None + + +class Program_weight_tensor_parameter_9: + name = "parameter_9" + shape = [192, 192, 1, 1] + dtype = "float32" + min_val = float("-0.0494705") + max_val = float("0.0588015") + mean = float("-0.000304397") + std = float("0.00403892") + data = None + + +class Program_weight_tensor_parameter_10: + name = "parameter_10" + shape = [192] + dtype = "float32" + min_val = float("-0.444539") + max_val = float("0.100308") + mean = float("-0.0845765") + std = float("0.104435") + data = None + + +class Program_weight_tensor_parameter_11: + name = "parameter_11" + shape = [192] + dtype = "float32" + min_val = float("0.861989") + max_val = float("1.42115") + mean = float("1.11192") + std = float("0.0818402") + data = None + + +class Program_weight_tensor_parameter_12: + name = "parameter_12" + shape = [192] + dtype = "float32" + min_val = float("0.00625506") + max_val = float("0.0604123") + mean = float("0.0170478") + std = float("0.00762762") + data = None + + +class Program_weight_tensor_parameter_13: + name = "parameter_13" + shape = [192] + dtype = "float32" + min_val = float("-0.114496") + max_val = float("0.0773463") + mean = float("-0.0180438") + std = float("0.0309436") + data = None + + +class Program_weight_tensor_parameter_14: + name = "parameter_14" + shape = [192, 192, 3, 3] + dtype = "float32" + min_val = float("-0.0719425") + max_val = float("0.0798922") + mean = float("-0.000137273") + std = float("0.0037425") + data = None + + +class Program_weight_tensor_parameter_15: + name = "parameter_15" + shape = [192] + dtype = "float32" + min_val = float("-0.519307") + max_val = float("0.119213") + mean = float("-0.173655") + std = float("0.128078") + data = None + + +class Program_weight_tensor_parameter_16: + name = "parameter_16" + shape = [192] + dtype = "float32" + min_val = float("0.843345") + max_val = float("1.65101") + mean = float("1.06412") + std = float("0.100931") + data = None + + +class Program_weight_tensor_parameter_17: + name = "parameter_17" + shape = [192] + dtype = "float32" + min_val = float("0.0129167") + max_val = float("0.102406") + mean = float("0.0387628") + std = float("0.0164023") + data = None + + +class Program_weight_tensor_parameter_18: + name = "parameter_18" + shape = [192] + dtype = "float32" + min_val = float("-0.201974") + max_val = float("0.118718") + mean = float("-0.070128") + std = float("0.0516738") + data = None + + +class Program_weight_tensor_parameter_19: + name = "parameter_19" + shape = [192, 192, 3, 3] + dtype = "float32" + min_val = float("-0.0703397") + max_val = float("0.0838756") + mean = float("-0.000297626") + std = float("0.00409097") + data = None + + +class Program_weight_tensor_parameter_20: + name = "parameter_20" + shape = [192] + dtype = "float32" + min_val = float("-0.45538") + max_val = float("0.186694") + mean = float("-0.0819211") + std = float("0.101908") + data = None + + +class Program_weight_tensor_parameter_21: + name = "parameter_21" + shape = [192] + dtype = "float32" + min_val = float("0.842136") + max_val = float("1.25451") + mean = float("1.02689") + std = float("0.0669984") + data = None + + +class Program_weight_tensor_parameter_22: + name = "parameter_22" + shape = [192] + dtype = "float32" + min_val = float("0.00609049") + max_val = float("0.0366868") + mean = float("0.0132496") + std = float("0.00446736") + data = None + + +class Program_weight_tensor_parameter_23: + name = "parameter_23" + shape = [192] + dtype = "float32" + min_val = float("-0.108315") + max_val = float("0.071515") + mean = float("-0.0205931") + std = float("0.0309136") + data = None + + +class Program_weight_tensor_parameter_24: + name = "parameter_24" + shape = [192, 576, 1, 1] + dtype = "float32" + min_val = float("-0.102669") + max_val = float("0.10072") + mean = float("-0.000208583") + std = float("0.00580958") + data = None + + +class Program_weight_tensor_parameter_25: + name = "parameter_25" + shape = [192] + dtype = "float32" + min_val = float("-0.217714") + max_val = float("0.0349411") + mean = float("-0.0691139") + std = float("0.0385785") + data = None + + +class Program_weight_tensor_parameter_26: + name = "parameter_26" + shape = [192] + dtype = "float32" + min_val = float("0.843869") + max_val = float("1.15213") + mean = float("1.01543") + std = float("0.0502928") + data = None + + +class Program_weight_tensor_parameter_27: + name = "parameter_27" + shape = [192] + dtype = "float32" + min_val = float("0.00469781") + max_val = float("0.0515763") + mean = float("0.0100063") + std = float("0.00473549") + data = None + + +class Program_weight_tensor_parameter_28: + name = "parameter_28" + shape = [192] + dtype = "float32" + min_val = float("-0.100338") + max_val = float("0.103443") + mean = float("-0.028043") + std = float("0.0298177") + data = None + + +class Program_weight_tensor_parameter_29: + name = "parameter_29" + shape = [192, 576, 1, 1] + dtype = "float32" + min_val = float("-0.0445699") + max_val = float("0.0626305") + mean = float("-0.000342079") + std = float("0.00527897") + data = None + + +class Program_weight_tensor_parameter_30: + name = "parameter_30" + shape = [192] + dtype = "float32" + min_val = float("-0.295573") + max_val = float("-0.00727007") + mean = float("-0.0908822") + std = float("0.0602539") + data = None + + +class Program_weight_tensor_parameter_31: + name = "parameter_31" + shape = [192] + dtype = "float32" + min_val = float("0.782898") + max_val = float("1.34847") + mean = float("1.05287") + std = float("0.0658118") + data = None + + +class Program_weight_tensor_parameter_32: + name = "parameter_32" + shape = [192] + dtype = "float32" + min_val = float("0.00853608") + max_val = float("0.0834335") + mean = float("0.0220792") + std = float("0.0106956") + data = None + + +class Program_weight_tensor_parameter_33: + name = "parameter_33" + shape = [192] + dtype = "float32" + min_val = float("-0.281933") + max_val = float("0.32665") + mean = float("-0.0476883") + std = float("0.0906137") + data = None + + +class Program_weight_tensor_parameter_34: + name = "parameter_34" + shape = [192, 192, 3, 3] + dtype = "float32" + min_val = float("-0.0372086") + max_val = float("0.0434341") + mean = float("-9.58531e-05") + std = float("0.00301696") + data = None + + +class Program_weight_tensor_parameter_35: + name = "parameter_35" + shape = [192] + dtype = "float32" + min_val = float("-0.529319") + max_val = float("1.03253") + mean = float("0.1482") + std = float("0.259312") + data = None + + +class Program_weight_tensor_parameter_36: + name = "parameter_36" + shape = [192] + dtype = "float32" + min_val = float("0.733167") + max_val = float("1.57011") + mean = float("1.01433") + std = float("0.106495") + data = None + + +class Program_weight_tensor_parameter_37: + name = "parameter_37" + shape = [192] + dtype = "float32" + min_val = float("0.00533333") + max_val = float("0.0628113") + mean = float("0.0202291") + std = float("0.0104903") + data = None + + +class Program_weight_tensor_parameter_38: + name = "parameter_38" + shape = [192] + dtype = "float32" + min_val = float("-0.274173") + max_val = float("0.190265") + mean = float("-0.0383734") + std = float("0.0518374") + data = None + + +class Program_weight_tensor_parameter_39: + name = "parameter_39" + shape = [192, 192, 1, 1] + dtype = "float32" + min_val = float("-0.148329") + max_val = float("0.115949") + mean = float("-0.000786037") + std = float("0.0116676") + data = None + + +class Program_weight_tensor_parameter_40: + name = "parameter_40" + shape = [96] + dtype = "float32" + min_val = float("-0.290208") + max_val = float("0.171692") + mean = float("-0.0709438") + std = float("0.105357") + data = None + + +class Program_weight_tensor_parameter_41: + name = "parameter_41" + shape = [96] + dtype = "float32" + min_val = float("0.730214") + max_val = float("1.20725") + mean = float("0.877815") + std = float("0.0776628") + data = None + + +class Program_weight_tensor_parameter_42: + name = "parameter_42" + shape = [96] + dtype = "float32" + min_val = float("0.00244432") + max_val = float("0.0359181") + mean = float("0.0138133") + std = float("0.00616422") + data = None + + +class Program_weight_tensor_parameter_43: + name = "parameter_43" + shape = [96] + dtype = "float32" + min_val = float("-0.0390463") + max_val = float("0.0281496") + mean = float("-0.00737283") + std = float("0.0187507") + data = None + + +class Program_weight_tensor_parameter_44: + name = "parameter_44" + shape = [96, 96, 1, 1] + dtype = "float32" + min_val = float("-0.0574805") + max_val = float("0.0583116") + mean = float("-0.00132585") + std = float("0.00692157") + data = None + + +class Program_weight_tensor_parameter_45: + name = "parameter_45" + shape = [96] + dtype = "float32" + min_val = float("-0.290208") + max_val = float("0.171692") + mean = float("-0.0709438") + std = float("0.105357") + data = None + + +class Program_weight_tensor_parameter_46: + name = "parameter_46" + shape = [96] + dtype = "float32" + min_val = float("0.969898") + max_val = float("1.3202") + mean = float("1.13218") + std = float("0.0750079") + data = None + + +class Program_weight_tensor_parameter_47: + name = "parameter_47" + shape = [96] + dtype = "float32" + min_val = float("0.00695661") + max_val = float("0.0436736") + mean = float("0.0204114") + std = float("0.00835407") + data = None + + +class Program_weight_tensor_parameter_48: + name = "parameter_48" + shape = [96] + dtype = "float32" + min_val = float("-0.0615079") + max_val = float("0.0687841") + mean = float("-0.00728561") + std = float("0.0229485") + data = None + + +class Program_weight_tensor_parameter_49: + name = "parameter_49" + shape = [96, 96, 3, 3] + dtype = "float32" + min_val = float("-0.0888788") + max_val = float("0.0951908") + mean = float("-0.00012594") + std = float("0.00695592") + data = None + + +class Program_weight_tensor_parameter_50: + name = "parameter_50" + shape = [96] + dtype = "float32" + min_val = float("-0.672726") + max_val = float("0.111066") + mean = float("-0.258997") + std = float("0.150238") + data = None + + +class Program_weight_tensor_parameter_51: + name = "parameter_51" + shape = [96] + dtype = "float32" + min_val = float("0.802266") + max_val = float("1.40896") + mean = float("1.04531") + std = float("0.116924") + data = None + + +class Program_weight_tensor_parameter_52: + name = "parameter_52" + shape = [96] + dtype = "float32" + min_val = float("0.0207014") + max_val = float("0.108587") + mean = float("0.0455437") + std = float("0.0186345") + data = None + + +class Program_weight_tensor_parameter_53: + name = "parameter_53" + shape = [96] + dtype = "float32" + min_val = float("-0.112693") + max_val = float("0.0516466") + mean = float("-0.0468167") + std = float("0.0294565") + data = None + + +class Program_weight_tensor_parameter_54: + name = "parameter_54" + shape = [96, 96, 3, 3] + dtype = "float32" + min_val = float("-0.0830006") + max_val = float("0.106216") + mean = float("-0.00053104") + std = float("0.00775187") + data = None + + +class Program_weight_tensor_parameter_55: + name = "parameter_55" + shape = [96] + dtype = "float32" + min_val = float("-0.642948") + max_val = float("0.150819") + mean = float("-0.155351") + std = float("0.115348") + data = None + + +class Program_weight_tensor_parameter_56: + name = "parameter_56" + shape = [96] + dtype = "float32" + min_val = float("0.84901") + max_val = float("1.26259") + mean = float("1.03349") + std = float("0.0720544") + data = None + + +class Program_weight_tensor_parameter_57: + name = "parameter_57" + shape = [96] + dtype = "float32" + min_val = float("0.00947266") + max_val = float("0.0469677") + mean = float("0.0192275") + std = float("0.00716215") + data = None + + +class Program_weight_tensor_parameter_58: + name = "parameter_58" + shape = [96] + dtype = "float32" + min_val = float("-0.135148") + max_val = float("0.0288217") + mean = float("-0.0359178") + std = float("0.0292974") + data = None + + +class Program_weight_tensor_parameter_59: + name = "parameter_59" + shape = [96, 288, 1, 1] + dtype = "float32" + min_val = float("-0.0792001") + max_val = float("0.0950111") + mean = float("-0.00062642") + std = float("0.0105349") + data = None + + +class Program_weight_tensor_parameter_60: + name = "parameter_60" + shape = [96] + dtype = "float32" + min_val = float("-0.198091") + max_val = float("0.0826105") + mean = float("-0.0299011") + std = float("0.0459564") + data = None + + +class Program_weight_tensor_parameter_61: + name = "parameter_61" + shape = [96] + dtype = "float32" + min_val = float("0.685979") + max_val = float("1.33637") + mean = float("0.95441") + std = float("0.0883622") + data = None + + +class Program_weight_tensor_parameter_62: + name = "parameter_62" + shape = [96] + dtype = "float32" + min_val = float("0.00495099") + max_val = float("0.0554625") + mean = float("0.012309") + std = float("0.00627308") + data = None + + +class Program_weight_tensor_parameter_63: + name = "parameter_63" + shape = [96] + dtype = "float32" + min_val = float("-0.0965082") + max_val = float("0.0676515") + mean = float("-0.0128269") + std = float("0.0323591") + data = None + + +class Program_weight_tensor_parameter_64: + name = "parameter_64" + shape = [96, 288, 1, 1] + dtype = "float32" + min_val = float("-0.0773482") + max_val = float("0.0795383") + mean = float("-0.000205562") + std = float("0.00892342") + data = None + + +class Program_weight_tensor_parameter_65: + name = "parameter_65" + shape = [96] + dtype = "float32" + min_val = float("-0.3351") + max_val = float("0.0180339") + mean = float("-0.108666") + std = float("0.0839689") + data = None + + +class Program_weight_tensor_parameter_66: + name = "parameter_66" + shape = [96] + dtype = "float32" + min_val = float("0.730829") + max_val = float("1.20386") + mean = float("1.0551") + std = float("0.0746036") + data = None + + +class Program_weight_tensor_parameter_67: + name = "parameter_67" + shape = [96] + dtype = "float32" + min_val = float("0.00893169") + max_val = float("0.0609817") + mean = float("0.0216752") + std = float("0.0103436") + data = None + + +class Program_weight_tensor_parameter_68: + name = "parameter_68" + shape = [96] + dtype = "float32" + min_val = float("-0.43779") + max_val = float("0.530239") + mean = float("-0.00359941") + std = float("0.156358") + data = None + + +class Program_weight_tensor_parameter_69: + name = "parameter_69" + shape = [96, 96, 3, 3] + dtype = "float32" + min_val = float("-0.0647316") + max_val = float("0.0613698") + mean = float("-1.86625e-05") + std = float("0.00623329") + data = None + + +class Program_weight_tensor_parameter_70: + name = "parameter_70" + shape = [96] + dtype = "float32" + min_val = float("-1.07261") + max_val = float("2.35998") + mean = float("0.312216") + std = float("0.587121") + data = None + + +class Program_weight_tensor_parameter_71: + name = "parameter_71" + shape = [96] + dtype = "float32" + min_val = float("0.476972") + max_val = float("1.40751") + mean = float("0.884046") + std = float("0.166927") + data = None + + +class Program_weight_tensor_parameter_72: + name = "parameter_72" + shape = [96] + dtype = "float32" + min_val = float("0.00642742") + max_val = float("0.129825") + mean = float("0.0330591") + std = float("0.0219559") + data = None + + +class Program_weight_tensor_parameter_73: + name = "parameter_73" + shape = [96] + dtype = "float32" + min_val = float("-0.294202") + max_val = float("0.222749") + mean = float("-0.0136424") + std = float("0.071643") + data = None + + +class Program_weight_tensor_parameter_74: + name = "parameter_74" + shape = [96, 96, 1, 1] + dtype = "float32" + min_val = float("-0.188209") + max_val = float("0.120938") + mean = float("-0.000991715") + std = float("0.0220383") + data = None + + +class Program_weight_tensor_parameter_75: + name = "parameter_75" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_76: + name = "parameter_76" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_77: + name = "parameter_77" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_78: + name = "parameter_78" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_79: + name = "parameter_79" + shape = [48, 48, 1, 1] + dtype = "float32" + min_val = float("-0.0700832") + max_val = float("0.0649644") + mean = float("-0.00214276") + std = float("0.0125585") + data = None + + +class Program_weight_tensor_parameter_80: + name = "parameter_80" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_81: + name = "parameter_81" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_82: + name = "parameter_82" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_83: + name = "parameter_83" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_84: + name = "parameter_84" + shape = [48, 48, 3, 3] + dtype = "float32" + min_val = float("-0.136078") + max_val = float("0.180716") + mean = float("-0.000282686") + std = float("0.0138341") + data = None + + +class Program_weight_tensor_parameter_85: + name = "parameter_85" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_86: + name = "parameter_86" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_87: + name = "parameter_87" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_88: + name = "parameter_88" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_89: + name = "parameter_89" + shape = [48, 48, 3, 3] + dtype = "float32" + min_val = float("-0.114709") + max_val = float("0.160677") + mean = float("-0.000863383") + std = float("0.0150497") + data = None + + +class Program_weight_tensor_parameter_90: + name = "parameter_90" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_91: + name = "parameter_91" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_92: + name = "parameter_92" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_93: + name = "parameter_93" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_94: + name = "parameter_94" + shape = [48, 224, 1, 1] + dtype = "float32" + min_val = float("-0.241983") + max_val = float("0.156755") + mean = float("-0.000908695") + std = float("0.0191741") + data = None + + +class Program_weight_tensor_parameter_95: + name = "parameter_95" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_96: + name = "parameter_96" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_97: + name = "parameter_97" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_98: + name = "parameter_98" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_99: + name = "parameter_99" + shape = [48, 224, 1, 1] + dtype = "float32" + min_val = float("-0.112347") + max_val = float("0.148275") + mean = float("0.00022258") + std = float("0.0143593") + data = None + + +class Program_weight_tensor_parameter_100: + name = "parameter_100" + shape = [96] + dtype = "float32" + min_val = float("-0.354063") + max_val = float("0.389439") + mean = float("-0.0077798") + std = float("0.135547") + data = None + + +class Program_weight_tensor_parameter_101: + name = "parameter_101" + shape = [96] + dtype = "float32" + min_val = float("0.581944") + max_val = float("1.61146") + mean = float("0.798361") + std = float("0.141114") + data = None + + +class Program_weight_tensor_parameter_102: + name = "parameter_102" + shape = [96] + dtype = "float32" + min_val = float("0.00926414") + max_val = float("0.0779837") + mean = float("0.0233765") + std = float("0.011586") + data = None + + +class Program_weight_tensor_parameter_103: + name = "parameter_103" + shape = [96] + dtype = "float32" + min_val = float("-0.205203") + max_val = float("0.065409") + mean = float("-0.0368353") + std = float("0.0408489") + data = None + + +class Program_weight_tensor_parameter_104: + name = "parameter_104" + shape = [96, 192, 1, 1] + dtype = "float32" + min_val = float("-0.107548") + max_val = float("0.121609") + mean = float("-0.00106908") + std = float("0.0142625") + data = None + + +class Program_weight_tensor_parameter_105: + name = "parameter_105" + shape = [192] + dtype = "float32" + min_val = float("-0.33747") + max_val = float("0.174782") + mean = float("-0.0799299") + std = float("0.089405") + data = None + + +class Program_weight_tensor_parameter_106: + name = "parameter_106" + shape = [192] + dtype = "float32" + min_val = float("0.697042") + max_val = float("1.47363") + mean = float("0.990958") + std = float("0.0990363") + data = None + + +class Program_weight_tensor_parameter_107: + name = "parameter_107" + shape = [192] + dtype = "float32" + min_val = float("0.0106758") + max_val = float("0.0901736") + mean = float("0.0233258") + std = float("0.00975982") + data = None + + +class Program_weight_tensor_parameter_108: + name = "parameter_108" + shape = [192] + dtype = "float32" + min_val = float("-0.218067") + max_val = float("0.191552") + mean = float("-0.054689") + std = float("0.0548119") + data = None + + +class Program_weight_tensor_parameter_109: + name = "parameter_109" + shape = [192, 192, 1, 1] + dtype = "float32" + min_val = float("-0.11401") + max_val = float("0.147871") + mean = float("-0.00132952") + std = float("0.0140748") + data = None + + +class Program_weight_tensor_parameter_110: + name = "parameter_110" + shape = [96] + dtype = "float32" + min_val = float("-0.308172") + max_val = float("0.100695") + mean = float("-0.0814609") + std = float("0.0991118") + data = None + + +class Program_weight_tensor_parameter_111: + name = "parameter_111" + shape = [96] + dtype = "float32" + min_val = float("0.552538") + max_val = float("0.935439") + mean = float("0.809676") + std = float("0.0654097") + data = None + + +class Program_weight_tensor_parameter_112: + name = "parameter_112" + shape = [96] + dtype = "float32" + min_val = float("0.0077781") + max_val = float("0.0360303") + mean = float("0.0172914") + std = float("0.0054044") + data = None + + +class Program_weight_tensor_parameter_113: + name = "parameter_113" + shape = [96] + dtype = "float32" + min_val = float("-0.0473472") + max_val = float("0.0334054") + mean = float("-0.0169048") + std = float("0.0189166") + data = None + + +class Program_weight_tensor_parameter_114: + name = "parameter_114" + shape = [96, 96, 1, 1] + dtype = "float32" + min_val = float("-0.0532353") + max_val = float("0.0545219") + mean = float("-0.0018903") + std = float("0.00933137") + data = None + + +class Program_weight_tensor_parameter_115: + name = "parameter_115" + shape = [96] + dtype = "float32" + min_val = float("-0.308172") + max_val = float("0.100695") + mean = float("-0.0814609") + std = float("0.0991118") + data = None + + +class Program_weight_tensor_parameter_116: + name = "parameter_116" + shape = [96] + dtype = "float32" + min_val = float("0.843695") + max_val = float("1.28928") + mean = float("1.0347") + std = float("0.0944676") + data = None + + +class Program_weight_tensor_parameter_117: + name = "parameter_117" + shape = [96] + dtype = "float32" + min_val = float("0.0169339") + max_val = float("0.161601") + mean = float("0.0363965") + std = float("0.0171841") + data = None + + +class Program_weight_tensor_parameter_118: + name = "parameter_118" + shape = [96] + dtype = "float32" + min_val = float("-0.0918578") + max_val = float("0.0336943") + mean = float("-0.0247889") + std = float("0.0245036") + data = None + + +class Program_weight_tensor_parameter_119: + name = "parameter_119" + shape = [96, 96, 3, 3] + dtype = "float32" + min_val = float("-0.101803") + max_val = float("0.200902") + mean = float("-0.000267266") + std = float("0.0083296") + data = None + + +class Program_weight_tensor_parameter_120: + name = "parameter_120" + shape = [96] + dtype = "float32" + min_val = float("-0.728683") + max_val = float("0.317979") + mean = float("-0.275123") + std = float("0.174815") + data = None + + +class Program_weight_tensor_parameter_121: + name = "parameter_121" + shape = [96] + dtype = "float32" + min_val = float("0.764105") + max_val = float("1.3124") + mean = float("1.04343") + std = float("0.115762") + data = None + + +class Program_weight_tensor_parameter_122: + name = "parameter_122" + shape = [96] + dtype = "float32" + min_val = float("0.0272856") + max_val = float("0.0961064") + mean = float("0.0504816") + std = float("0.0161463") + data = None + + +class Program_weight_tensor_parameter_123: + name = "parameter_123" + shape = [96] + dtype = "float32" + min_val = float("-0.135954") + max_val = float("0.0746318") + mean = float("-0.0577229") + std = float("0.0449165") + data = None + + +class Program_weight_tensor_parameter_124: + name = "parameter_124" + shape = [96, 96, 3, 3] + dtype = "float32" + min_val = float("-0.185749") + max_val = float("0.160808") + mean = float("-0.000603971") + std = float("0.00986059") + data = None + + +class Program_weight_tensor_parameter_125: + name = "parameter_125" + shape = [96] + dtype = "float32" + min_val = float("-0.646446") + max_val = float("0.382953") + mean = float("-0.253615") + std = float("0.209383") + data = None + + +class Program_weight_tensor_parameter_126: + name = "parameter_126" + shape = [96] + dtype = "float32" + min_val = float("0.737499") + max_val = float("1.37821") + mean = float("1.02572") + std = float("0.122249") + data = None + + +class Program_weight_tensor_parameter_127: + name = "parameter_127" + shape = [96] + dtype = "float32" + min_val = float("0.00985305") + max_val = float("0.0428714") + mean = float("0.0193938") + std = float("0.00597774") + data = None + + +class Program_weight_tensor_parameter_128: + name = "parameter_128" + shape = [96] + dtype = "float32" + min_val = float("-0.372134") + max_val = float("0.319757") + mean = float("0.0183732") + std = float("0.0780363") + data = None + + +class Program_weight_tensor_parameter_129: + name = "parameter_129" + shape = [96, 448, 1, 1] + dtype = "float32" + min_val = float("-0.203733") + max_val = float("0.134243") + mean = float("-0.000669446") + std = float("0.0129484") + data = None + + +class Program_weight_tensor_parameter_130: + name = "parameter_130" + shape = [96] + dtype = "float32" + min_val = float("-0.238657") + max_val = float("0.170206") + mean = float("-0.0409469") + std = float("0.0883869") + data = None + + +class Program_weight_tensor_parameter_131: + name = "parameter_131" + shape = [96] + dtype = "float32" + min_val = float("0.915723") + max_val = float("1.41222") + mean = float("1.07273") + std = float("0.0922586") + data = None + + +class Program_weight_tensor_parameter_132: + name = "parameter_132" + shape = [96] + dtype = "float32" + min_val = float("0.00786671") + max_val = float("0.0588079") + mean = float("0.0163456") + std = float("0.0066827") + data = None + + +class Program_weight_tensor_parameter_133: + name = "parameter_133" + shape = [96] + dtype = "float32" + min_val = float("-0.11548") + max_val = float("0.0928178") + mean = float("0.00883762") + std = float("0.0363849") + data = None + + +class Program_weight_tensor_parameter_134: + name = "parameter_134" + shape = [96, 448, 1, 1] + dtype = "float32" + min_val = float("-0.0962928") + max_val = float("0.16056") + mean = float("-0.000559513") + std = float("0.0114298") + data = None + + +class Program_weight_tensor_parameter_135: + name = "parameter_135" + shape = [192] + dtype = "float32" + min_val = float("-0.538624") + max_val = float("-0.101332") + mean = float("-0.294138") + std = float("0.0707416") + data = None + + +class Program_weight_tensor_parameter_136: + name = "parameter_136" + shape = [192] + dtype = "float32" + min_val = float("0.651882") + max_val = float("1.08011") + mean = float("0.852092") + std = float("0.0724292") + data = None + + +class Program_weight_tensor_parameter_137: + name = "parameter_137" + shape = [192] + dtype = "float32" + min_val = float("0.0137887") + max_val = float("0.0661338") + mean = float("0.0268937") + std = float("0.00906335") + data = None + + +class Program_weight_tensor_parameter_138: + name = "parameter_138" + shape = [192] + dtype = "float32" + min_val = float("-0.120176") + max_val = float("0.0680941") + mean = float("-0.0396069") + std = float("0.0327503") + data = None + + +class Program_weight_tensor_parameter_139: + name = "parameter_139" + shape = [192, 384, 1, 1] + dtype = "float32" + min_val = float("-0.0639447") + max_val = float("0.0672375") + mean = float("-0.000833786") + std = float("0.0103229") + data = None + + +class Program_weight_tensor_parameter_140: + name = "parameter_140" + shape = [384] + dtype = "float32" + min_val = float("-0.521228") + max_val = float("0.213986") + mean = float("-0.1682") + std = float("0.0775017") + data = None + + +class Program_weight_tensor_parameter_141: + name = "parameter_141" + shape = [384] + dtype = "float32" + min_val = float("0.850237") + max_val = float("1.39388") + mean = float("1.0626") + std = float("0.0773034") + data = None + + +class Program_weight_tensor_parameter_142: + name = "parameter_142" + shape = [384] + dtype = "float32" + min_val = float("0.0088734") + max_val = float("0.0441515") + mean = float("0.0191098") + std = float("0.00573499") + data = None + + +class Program_weight_tensor_parameter_143: + name = "parameter_143" + shape = [384] + dtype = "float32" + min_val = float("-0.129402") + max_val = float("0.0875489") + mean = float("-0.0445092") + std = float("0.0396322") + data = None + + +class Program_weight_tensor_parameter_144: + name = "parameter_144" + shape = [384, 384, 1, 1] + dtype = "float32" + min_val = float("-0.121659") + max_val = float("0.133055") + mean = float("-0.000702422") + std = float("0.00934463") + data = None + + +class Program_weight_tensor_parameter_145: + name = "parameter_145" + shape = [192] + dtype = "float32" + min_val = float("-0.382852") + max_val = float("0.227352") + mean = float("-0.117886") + std = float("0.101914") + data = None + + +class Program_weight_tensor_parameter_146: + name = "parameter_146" + shape = [192] + dtype = "float32" + min_val = float("0.86878") + max_val = float("1.51462") + mean = float("1.12296") + std = float("0.11892") + data = None + + +class Program_weight_tensor_parameter_147: + name = "parameter_147" + shape = [192] + dtype = "float32" + min_val = float("0.0805715") + max_val = float("0.861815") + mean = float("0.24173") + std = float("0.115689") + data = None + + +class Program_weight_tensor_parameter_148: + name = "parameter_148" + shape = [192] + dtype = "float32" + min_val = float("-1.76276") + max_val = float("1.36192") + mean = float("-0.20097") + std = float("0.618042") + data = None + + +class Program_weight_tensor_parameter_149: + name = "parameter_149" + shape = [192, 768, 1, 1] + dtype = "float32" + min_val = float("-0.143123") + max_val = float("0.101835") + mean = float("-0.000122669") + std = float("0.00812429") + data = None + + +class Program_weight_tensor_parameter_150: + name = "parameter_150" + shape = [192] + dtype = "float32" + min_val = float("-0.242929") + max_val = float("0.168527") + mean = float("-0.0174021") + std = float("0.0538902") + data = None + + +class Program_weight_tensor_parameter_151: + name = "parameter_151" + shape = [192] + dtype = "float32" + min_val = float("0.618325") + max_val = float("1.01596") + mean = float("0.837489") + std = float("0.06312") + data = None + + +class Program_weight_tensor_parameter_152: + name = "parameter_152" + shape = [192] + dtype = "float32" + min_val = float("0.00769304") + max_val = float("0.0329157") + mean = float("0.0170219") + std = float("0.00463802") + data = None + + +class Program_weight_tensor_parameter_153: + name = "parameter_153" + shape = [192] + dtype = "float32" + min_val = float("-0.141686") + max_val = float("0.0792819") + mean = float("-0.0667949") + std = float("0.0484579") + data = None + + +class Program_weight_tensor_parameter_154: + name = "parameter_154" + shape = [192, 192, 1, 1] + dtype = "float32" + min_val = float("-0.0463403") + max_val = float("0.0758786") + mean = float("-0.00171087") + std = float("0.00771467") + data = None + + +class Program_weight_tensor_parameter_155: + name = "parameter_155" + shape = [192] + dtype = "float32" + min_val = float("-0.242929") + max_val = float("0.168527") + mean = float("-0.0174021") + std = float("0.0538902") + data = None + + +class Program_weight_tensor_parameter_156: + name = "parameter_156" + shape = [192] + dtype = "float32" + min_val = float("0.874574") + max_val = float("1.46208") + mean = float("1.1059") + std = float("0.129661") + data = None + + +class Program_weight_tensor_parameter_157: + name = "parameter_157" + shape = [192] + dtype = "float32" + min_val = float("0.0313595") + max_val = float("0.113252") + mean = float("0.0588345") + std = float("0.0156086") + data = None + + +class Program_weight_tensor_parameter_158: + name = "parameter_158" + shape = [192] + dtype = "float32" + min_val = float("-0.332132") + max_val = float("0.010893") + mean = float("-0.152239") + std = float("0.0624963") + data = None + + +class Program_weight_tensor_parameter_159: + name = "parameter_159" + shape = [192, 192, 3, 3] + dtype = "float32" + min_val = float("-0.0503168") + max_val = float("0.0641412") + mean = float("-0.000498968") + std = float("0.00471767") + data = None + + +class Program_weight_tensor_parameter_160: + name = "parameter_160" + shape = [192] + dtype = "float32" + min_val = float("-0.311021") + max_val = float("0.0670138") + mean = float("-0.114925") + std = float("0.0801899") + data = None + + +class Program_weight_tensor_parameter_161: + name = "parameter_161" + shape = [192] + dtype = "float32" + min_val = float("0.909746") + max_val = float("1.44623") + mean = float("1.10815") + std = float("0.101996") + data = None + + +class Program_weight_tensor_parameter_162: + name = "parameter_162" + shape = [192] + dtype = "float32" + min_val = float("0.0399431") + max_val = float("0.140144") + mean = float("0.0747907") + std = float("0.0215638") + data = None + + +class Program_weight_tensor_parameter_163: + name = "parameter_163" + shape = [192] + dtype = "float32" + min_val = float("-0.533458") + max_val = float("0.244514") + mean = float("-0.168827") + std = float("0.11435") + data = None + + +class Program_weight_tensor_parameter_164: + name = "parameter_164" + shape = [192, 192, 3, 3] + dtype = "float32" + min_val = float("-0.0566842") + max_val = float("0.0514181") + mean = float("-0.000587536") + std = float("0.00527014") + data = None + + +class Program_weight_tensor_parameter_165: + name = "parameter_165" + shape = [192] + dtype = "float32" + min_val = float("-0.444627") + max_val = float("0.412033") + mean = float("-0.137488") + std = float("0.130168") + data = None + + +class Program_weight_tensor_parameter_166: + name = "parameter_166" + shape = [192] + dtype = "float32" + min_val = float("0.95351") + max_val = float("1.37306") + mean = float("1.11002") + std = float("0.0723609") + data = None + + +class Program_weight_tensor_parameter_167: + name = "parameter_167" + shape = [192] + dtype = "float32" + min_val = float("0.0551676") + max_val = float("0.229055") + mean = float("0.0906291") + std = float("0.0274345") + data = None + + +class Program_weight_tensor_parameter_168: + name = "parameter_168" + shape = [192] + dtype = "float32" + min_val = float("-0.313806") + max_val = float("0.492745") + mean = float("-0.132664") + std = float("0.085465") + data = None + + +class Program_weight_tensor_parameter_169: + name = "parameter_169" + shape = [192, 512, 1, 1] + dtype = "float32" + min_val = float("-0.0601338") + max_val = float("0.10781") + mean = float("-0.000964638") + std = float("0.00915433") + data = None + + +class Program_weight_tensor_parameter_170: + name = "parameter_170" + shape = [192] + dtype = "float32" + min_val = float("-0.163877") + max_val = float("0.00112327") + mean = float("-0.0651513") + std = float("0.0261511") + data = None + + +class Program_weight_tensor_parameter_171: + name = "parameter_171" + shape = [192] + dtype = "float32" + min_val = float("0.819712") + max_val = float("1.06624") + mean = float("0.968869") + std = float("0.0460872") + data = None + + +class Program_weight_tensor_parameter_172: + name = "parameter_172" + shape = [192] + dtype = "float32" + min_val = float("0.0346107") + max_val = float("0.137989") + mean = float("0.054459") + std = float("0.0124478") + data = None + + +class Program_weight_tensor_parameter_173: + name = "parameter_173" + shape = [192] + dtype = "float32" + min_val = float("-0.200298") + max_val = float("0.0295223") + mean = float("-0.0891939") + std = float("0.0476986") + data = None + + +class Program_weight_tensor_parameter_174: + name = "parameter_174" + shape = [192, 512, 1, 1] + dtype = "float32" + min_val = float("-0.0357362") + max_val = float("0.0637335") + mean = float("-0.000787404") + std = float("0.00752266") + data = None + + +class Program_weight_tensor_parameter_175: + name = "parameter_175" + shape = [512] + dtype = "float32" + min_val = float("-4.82803") + max_val = float("-0.112013") + mean = float("-2.29502") + std = float("0.775166") + data = None + + +class Program_weight_tensor_parameter_176: + name = "parameter_176" + shape = [512] + dtype = "float32" + min_val = float("2.10203") + max_val = float("5.21664") + mean = float("3.70064") + std = float("0.482744") + data = None + + +class Program_weight_tensor_parameter_177: + name = "parameter_177" + shape = [512] + dtype = "float32" + min_val = float("0.00190724") + max_val = float("0.0148178") + mean = float("0.00510842") + std = float("0.00181044") + data = None + + +class Program_weight_tensor_parameter_178: + name = "parameter_178" + shape = [512] + dtype = "float32" + min_val = float("-0.130767") + max_val = float("0.0826418") + mean = float("-0.0410784") + std = float("0.0246182") + data = None + + +class Program_weight_tensor_parameter_179: + name = "parameter_179" + shape = [512, 384, 1, 1] + dtype = "float32" + min_val = float("-0.106883") + max_val = float("0.153104") + mean = float("-0.00124309") + std = float("0.00937421") + data = None + + +class Program_weight_tensor_parameter_180: + name = "parameter_180" + shape = [384] + dtype = "float32" + min_val = float("-0.0182735") + max_val = float("-0.000488762") + mean = float("-0.0066305") + std = float("0.00403925") + data = None + + +class Program_weight_tensor_parameter_181: + name = "parameter_181" + shape = [384, 384, 1, 1] + dtype = "float32" + min_val = float("-0.243656") + max_val = float("0.180358") + mean = float("-0.00257278") + std = float("0.00811913") + data = None + + +class Program_weight_tensor_parameter_182: + name = "parameter_182" + shape = [192] + dtype = "float32" + min_val = float("-2.38777") + max_val = float("3.15932") + mean = float("-0.20407") + std = float("0.562338") + data = None + + +class Program_weight_tensor_parameter_183: + name = "parameter_183" + shape = [192] + dtype = "float32" + min_val = float("0.123779") + max_val = float("2.40527") + mean = float("0.524516") + std = float("0.334825") + data = None + + +class Program_weight_tensor_parameter_184: + name = "parameter_184" + shape = [192] + dtype = "float32" + min_val = float("0.000126989") + max_val = float("0.0045594") + mean = float("0.000819872") + std = float("0.000558532") + data = None + + +class Program_weight_tensor_parameter_185: + name = "parameter_185" + shape = [192] + dtype = "float32" + min_val = float("-0.0804045") + max_val = float("0.119949") + mean = float("0.0138328") + std = float("0.0270058") + data = None + + +class Program_weight_tensor_parameter_186: + name = "parameter_186" + shape = [192, 192, 1, 1] + dtype = "float32" + min_val = float("-0.0707601") + max_val = float("0.0621697") + mean = float("-0.000379924") + std = float("0.00602374") + data = None + + +class Program_weight_tensor_parameter_187: + name = "parameter_187" + shape = [192] + dtype = "float32" + min_val = float("-2.38777") + max_val = float("3.15932") + mean = float("-0.20407") + std = float("0.562338") + data = None + + +class Program_weight_tensor_parameter_188: + name = "parameter_188" + shape = [192] + dtype = "float32" + min_val = float("0.67694") + max_val = float("3.07362") + mean = float("1.54467") + std = float("0.450817") + data = None + + +class Program_weight_tensor_parameter_189: + name = "parameter_189" + shape = [192] + dtype = "float32" + min_val = float("0.00215705") + max_val = float("0.0356044") + mean = float("0.00897467") + std = float("0.00458554") + data = None + + +class Program_weight_tensor_parameter_190: + name = "parameter_190" + shape = [192] + dtype = "float32" + min_val = float("-0.241985") + max_val = float("0.257354") + mean = float("0.0137577") + std = float("0.0550272") + data = None + + +class Program_weight_tensor_parameter_191: + name = "parameter_191" + shape = [192, 192, 3, 3] + dtype = "float32" + min_val = float("-0.0901592") + max_val = float("0.0804") + mean = float("-9.38039e-05") + std = float("0.00532701") + data = None + + +class Program_weight_tensor_parameter_192: + name = "parameter_192" + shape = [192] + dtype = "float32" + min_val = float("-3.43174") + max_val = float("1.16938") + mean = float("-1.42833") + std = float("0.634694") + data = None + + +class Program_weight_tensor_parameter_193: + name = "parameter_193" + shape = [192] + dtype = "float32" + min_val = float("0.389358") + max_val = float("1.7276") + mean = float("1.0897") + std = float("0.190293") + data = None + + +class Program_weight_tensor_parameter_194: + name = "parameter_194" + shape = [192] + dtype = "float32" + min_val = float("0.0526142") + max_val = float("0.270718") + mean = float("0.108176") + std = float("0.0325536") + data = None + + +class Program_weight_tensor_parameter_195: + name = "parameter_195" + shape = [192] + dtype = "float32" + min_val = float("-1.54162") + max_val = float("0.471207") + mean = float("-0.283055") + std = float("0.214286") + data = None + + +class Program_weight_tensor_parameter_196: + name = "parameter_196" + shape = [192, 192, 3, 3] + dtype = "float32" + min_val = float("-0.0845723") + max_val = float("0.0650064") + mean = float("-0.000462494") + std = float("0.00626612") + data = None + + +class Program_weight_tensor_parameter_197: + name = "parameter_197" + shape = [192] + dtype = "float32" + min_val = float("-3.87665") + max_val = float("4.23691") + mean = float("-0.62962") + std = float("0.987882") + data = None + + +class Program_weight_tensor_parameter_198: + name = "parameter_198" + shape = [192] + dtype = "float32" + min_val = float("0.580822") + max_val = float("4.17446") + mean = float("1.54478") + std = float("0.398628") + data = None + + +class Program_weight_tensor_parameter_199: + name = "parameter_199" + shape = [192] + dtype = "float32" + min_val = float("0.00595442") + max_val = float("0.0273129") + mean = float("0.011053") + std = float("0.00374204") + data = None + + +class Program_weight_tensor_parameter_200: + name = "parameter_200" + shape = [192] + dtype = "float32" + min_val = float("-0.22698") + max_val = float("0.189248") + mean = float("0.0593732") + std = float("0.0450403") + data = None + + +class Program_weight_tensor_parameter_201: + name = "parameter_201" + shape = [192, 384, 1, 1] + dtype = "float32" + min_val = float("-0.114839") + max_val = float("0.085935") + mean = float("-0.00162257") + std = float("0.011314") + data = None + + +class Program_weight_tensor_parameter_202: + name = "parameter_202" + shape = [192] + dtype = "float32" + min_val = float("-2.93751") + max_val = float("1.02421") + mean = float("-0.427093") + std = float("0.681453") + data = None + + +class Program_weight_tensor_parameter_203: + name = "parameter_203" + shape = [192] + dtype = "float32" + min_val = float("0.698228") + max_val = float("3.61037") + mean = float("1.48106") + std = float("0.505456") + data = None + + +class Program_weight_tensor_parameter_204: + name = "parameter_204" + shape = [192] + dtype = "float32" + min_val = float("0.00245686") + max_val = float("0.012822") + mean = float("0.00510958") + std = float("0.00159036") + data = None + + +class Program_weight_tensor_parameter_205: + name = "parameter_205" + shape = [192] + dtype = "float32" + min_val = float("-0.100272") + max_val = float("0.105996") + mean = float("0.0195239") + std = float("0.0348524") + data = None + + +class Program_weight_tensor_parameter_206: + name = "parameter_206" + shape = [192, 384, 1, 1] + dtype = "float32" + min_val = float("-0.0708848") + max_val = float("0.0850015") + mean = float("-0.000706931") + std = float("0.00901731") + data = None + + +class Program_weight_tensor_parameter_207: + name = "parameter_207" + shape = [384] + dtype = "float32" + min_val = float("-2.84209") + max_val = float("1.12257") + mean = float("-0.753555") + std = float("0.497094") + data = None + + +class Program_weight_tensor_parameter_208: + name = "parameter_208" + shape = [384] + dtype = "float32" + min_val = float("0.417665") + max_val = float("1.80337") + mean = float("0.867666") + std = float("0.218119") + data = None + + +class Program_weight_tensor_parameter_209: + name = "parameter_209" + shape = [384] + dtype = "float32" + min_val = float("0.0108198") + max_val = float("0.0878778") + mean = float("0.0200508") + std = float("0.00708447") + data = None + + +class Program_weight_tensor_parameter_210: + name = "parameter_210" + shape = [384] + dtype = "float32" + min_val = float("-0.552474") + max_val = float("0.359625") + mean = float("0.0190155") + std = float("0.105451") + data = None + + +class Program_weight_tensor_parameter_211: + name = "parameter_211" + shape = [384, 256, 3, 3] + dtype = "float32" + min_val = float("-0.050684") + max_val = float("0.0606495") + mean = float("-0.000221799") + std = float("0.00520848") + data = None + + +class Program_weight_tensor_parameter_212: + name = "parameter_212" + shape = [256] + dtype = "float32" + min_val = float("-2.81734") + max_val = float("1.46527") + mean = float("-1.07834") + std = float("0.63321") + data = None + + +class Program_weight_tensor_parameter_213: + name = "parameter_213" + shape = [256] + dtype = "float32" + min_val = float("0.430261") + max_val = float("1.7692") + mean = float("0.978084") + std = float("0.17059") + data = None + + +class Program_weight_tensor_parameter_214: + name = "parameter_214" + shape = [256] + dtype = "float32" + min_val = float("0.00302249") + max_val = float("0.0150254") + mean = float("0.00711712") + std = float("0.0018291") + data = None + + +class Program_weight_tensor_parameter_215: + name = "parameter_215" + shape = [256] + dtype = "float32" + min_val = float("-0.242576") + max_val = float("0.242969") + mean = float("-0.0674006") + std = float("0.0835729") + data = None + + +class Program_weight_tensor_parameter_216: + name = "parameter_216" + shape = [256, 192, 1, 1] + dtype = "float32" + min_val = float("-0.11703") + max_val = float("0.206335") + mean = float("-0.00133698") + std = float("0.016598") + data = None + + +class Program_weight_tensor_parameter_217: + name = "parameter_217" + shape = [192] + dtype = "float32" + min_val = float("-0.0214622") + max_val = float("0.00204459") + mean = float("-0.00682654") + std = float("0.00523474") + data = None + + +class Program_weight_tensor_parameter_218: + name = "parameter_218" + shape = [192, 192, 1, 1] + dtype = "float32" + min_val = float("-0.274972") + max_val = float("0.196684") + mean = float("-0.00446271") + std = float("0.0117188") + data = None + + +class Program_weight_tensor_parameter_219: + name = "parameter_219" + shape = [96] + dtype = "float32" + min_val = float("-2.27808") + max_val = float("0.747421") + mean = float("-0.117479") + std = float("0.506792") + data = None + + +class Program_weight_tensor_parameter_220: + name = "parameter_220" + shape = [96] + dtype = "float32" + min_val = float("-0.0587442") + max_val = float("2.30527") + mean = float("0.261103") + std = float("0.366411") + data = None + + +class Program_weight_tensor_parameter_221: + name = "parameter_221" + shape = [96] + dtype = "float32" + min_val = float("9.94584e-12") + max_val = float("0.00288558") + mean = float("0.000611538") + std = float("0.000479206") + data = None + + +class Program_weight_tensor_parameter_222: + name = "parameter_222" + shape = [96] + dtype = "float32" + min_val = float("-0.0613965") + max_val = float("0.0867185") + mean = float("0.0075244") + std = float("0.022618") + data = None + + +class Program_weight_tensor_parameter_223: + name = "parameter_223" + shape = [96, 96, 1, 1] + dtype = "float32" + min_val = float("-0.0458095") + max_val = float("0.0794823") + mean = float("-0.000380958") + std = float("0.0066521") + data = None + + +class Program_weight_tensor_parameter_224: + name = "parameter_224" + shape = [96] + dtype = "float32" + min_val = float("-2.27808") + max_val = float("0.747421") + mean = float("-0.117479") + std = float("0.506792") + data = None + + +class Program_weight_tensor_parameter_225: + name = "parameter_225" + shape = [96] + dtype = "float32" + min_val = float("0.348389") + max_val = float("3.24093") + mean = float("1.29082") + std = float("0.633395") + data = None + + +class Program_weight_tensor_parameter_226: + name = "parameter_226" + shape = [96] + dtype = "float32" + min_val = float("0.00469999") + max_val = float("0.0420037") + mean = float("0.0162317") + std = float("0.00640931") + data = None + + +class Program_weight_tensor_parameter_227: + name = "parameter_227" + shape = [96] + dtype = "float32" + min_val = float("-0.198405") + max_val = float("0.208243") + mean = float("0.0278847") + std = float("0.0747534") + data = None + + +class Program_weight_tensor_parameter_228: + name = "parameter_228" + shape = [96, 96, 3, 3] + dtype = "float32" + min_val = float("-0.0703987") + max_val = float("0.0847217") + mean = float("-0.000400512") + std = float("0.00880434") + data = None + + +class Program_weight_tensor_parameter_229: + name = "parameter_229" + shape = [96] + dtype = "float32" + min_val = float("-2.79718") + max_val = float("1.50453") + mean = float("-1.09173") + std = float("0.69636") + data = None + + +class Program_weight_tensor_parameter_230: + name = "parameter_230" + shape = [96] + dtype = "float32" + min_val = float("0.319783") + max_val = float("1.80086") + mean = float("1.07317") + std = float("0.21342") + data = None + + +class Program_weight_tensor_parameter_231: + name = "parameter_231" + shape = [96] + dtype = "float32" + min_val = float("0.0391363") + max_val = float("0.160891") + mean = float("0.0831981") + std = float("0.0236604") + data = None + + +class Program_weight_tensor_parameter_232: + name = "parameter_232" + shape = [96] + dtype = "float32" + min_val = float("-1.59676") + max_val = float("0.430385") + mean = float("-0.176851") + std = float("0.317024") + data = None + + +class Program_weight_tensor_parameter_233: + name = "parameter_233" + shape = [96, 96, 3, 3] + dtype = "float32" + min_val = float("-0.0684623") + max_val = float("0.0821956") + mean = float("-0.000727853") + std = float("0.00945016") + data = None + + +class Program_weight_tensor_parameter_234: + name = "parameter_234" + shape = [96] + dtype = "float32" + min_val = float("-2.53961") + max_val = float("0.660095") + mean = float("-0.0506683") + std = float("0.473015") + data = None + + +class Program_weight_tensor_parameter_235: + name = "parameter_235" + shape = [96] + dtype = "float32" + min_val = float("-0.0761853") + max_val = float("3.15108") + mean = float("0.280991") + std = float("0.409237") + data = None + + +class Program_weight_tensor_parameter_236: + name = "parameter_236" + shape = [96] + dtype = "float32" + min_val = float("2.2112e-10") + max_val = float("0.0182811") + mean = float("0.0018356") + std = float("0.00247258") + data = None + + +class Program_weight_tensor_parameter_237: + name = "parameter_237" + shape = [96] + dtype = "float32" + min_val = float("-0.0564698") + max_val = float("0.134674") + mean = float("0.0208637") + std = float("0.0323877") + data = None + + +class Program_weight_tensor_parameter_238: + name = "parameter_238" + shape = [96, 96, 1, 1] + dtype = "float32" + min_val = float("-0.149971") + max_val = float("0.0877827") + mean = float("-0.00159757") + std = float("0.00996893") + data = None + + +class Program_weight_tensor_parameter_239: + name = "parameter_239" + shape = [96] + dtype = "float32" + min_val = float("-2.53961") + max_val = float("0.660096") + mean = float("-0.0506683") + std = float("0.473015") + data = None + + +class Program_weight_tensor_parameter_240: + name = "parameter_240" + shape = [96] + dtype = "float32" + min_val = float("0.340415") + max_val = float("2.99317") + mean = float("0.929372") + std = float("0.41222") + data = None + + +class Program_weight_tensor_parameter_241: + name = "parameter_241" + shape = [96] + dtype = "float32" + min_val = float("0.0101736") + max_val = float("0.0521186") + mean = float("0.0255179") + std = float("0.00892784") + data = None + + +class Program_weight_tensor_parameter_242: + name = "parameter_242" + shape = [96] + dtype = "float32" + min_val = float("-0.226393") + max_val = float("0.221011") + mean = float("0.0450625") + std = float("0.0782651") + data = None + + +class Program_weight_tensor_parameter_243: + name = "parameter_243" + shape = [96, 96, 3, 3] + dtype = "float32" + min_val = float("-0.0614525") + max_val = float("0.0642893") + mean = float("-0.000703378") + std = float("0.00896078") + data = None + + +class Program_weight_tensor_parameter_244: + name = "parameter_244" + shape = [96] + dtype = "float32" + min_val = float("-2.01737") + max_val = float("1.65537") + mean = float("-0.920561") + std = float("0.650231") + data = None + + +class Program_weight_tensor_parameter_245: + name = "parameter_245" + shape = [96] + dtype = "float32" + min_val = float("0.434872") + max_val = float("1.96317") + mean = float("1.06433") + std = float("0.227725") + data = None + + +class Program_weight_tensor_parameter_246: + name = "parameter_246" + shape = [96] + dtype = "float32" + min_val = float("0.0165936") + max_val = float("0.132837") + mean = float("0.0345742") + std = float("0.0150947") + data = None + + +class Program_weight_tensor_parameter_247: + name = "parameter_247" + shape = [96] + dtype = "float32" + min_val = float("-2.3732") + max_val = float("0.24267") + mean = float("-0.0505484") + std = float("0.287143") + data = None + + +class Program_weight_tensor_parameter_248: + name = "parameter_248" + shape = [96, 96, 3, 3] + dtype = "float32" + min_val = float("-0.127211") + max_val = float("0.160397") + mean = float("-0.000544621") + std = float("0.0101928") + data = None + + +class Program_weight_tensor_parameter_249: + name = "parameter_249" + shape = [96] + dtype = "float32" + min_val = float("-1.61344") + max_val = float("1.88195") + mean = float("0.00484379") + std = float("0.837537") + data = None + + +class Program_weight_tensor_parameter_250: + name = "parameter_250" + shape = [96] + dtype = "float32" + min_val = float("0.347101") + max_val = float("1.32016") + mean = float("0.70129") + std = float("0.236267") + data = None + + +class Program_weight_tensor_parameter_251: + name = "parameter_251" + shape = [96] + dtype = "float32" + min_val = float("0.0166919") + max_val = float("0.0891169") + mean = float("0.0374108") + std = float("0.0147788") + data = None + + +class Program_weight_tensor_parameter_252: + name = "parameter_252" + shape = [96] + dtype = "float32" + min_val = float("-0.381066") + max_val = float("0.566263") + mean = float("-0.0972959") + std = float("0.136521") + data = None + + +class Program_weight_tensor_parameter_253: + name = "parameter_253" + shape = [96, 192, 1, 1] + dtype = "float32" + min_val = float("-0.143906") + max_val = float("0.121873") + mean = float("-0.00172717") + std = float("0.0164963") + data = None + + +class Program_weight_tensor_parameter_254: + name = "parameter_254" + shape = [96] + dtype = "float32" + min_val = float("-2.46841") + max_val = float("1.71052") + mean = float("0.339601") + std = float("0.678564") + data = None + + +class Program_weight_tensor_parameter_255: + name = "parameter_255" + shape = [96] + dtype = "float32" + min_val = float("0.541707") + max_val = float("4.87976") + mean = float("1.48201") + std = float("0.958387") + data = None + + +class Program_weight_tensor_parameter_256: + name = "parameter_256" + shape = [96] + dtype = "float32" + min_val = float("0.0122307") + max_val = float("0.120423") + mean = float("0.0291796") + std = float("0.0157993") + data = None + + +class Program_weight_tensor_parameter_257: + name = "parameter_257" + shape = [96] + dtype = "float32" + min_val = float("-0.296455") + max_val = float("0.260254") + mean = float("-0.00596865") + std = float("0.132222") + data = None + + +class Program_weight_tensor_parameter_258: + name = "parameter_258" + shape = [96, 192, 1, 1] + dtype = "float32" + min_val = float("-0.111668") + max_val = float("0.214196") + mean = float("-0.000763054") + std = float("0.0168732") + data = None + + +class Program_weight_tensor_parameter_259: + name = "parameter_259" + shape = [192] + dtype = "float32" + min_val = float("-4.44408") + max_val = float("2.00967") + mean = float("-0.0987827") + std = float("0.883275") + data = None + + +class Program_weight_tensor_parameter_260: + name = "parameter_260" + shape = [192] + dtype = "float32" + min_val = float("0.570935") + max_val = float("4.51306") + mean = float("1.08264") + std = float("0.425298") + data = None + + +class Program_weight_tensor_parameter_261: + name = "parameter_261" + shape = [192] + dtype = "float32" + min_val = float("0.0126533") + max_val = float("0.136437") + mean = float("0.0356273") + std = float("0.0206117") + data = None + + +class Program_weight_tensor_parameter_262: + name = "parameter_262" + shape = [192] + dtype = "float32" + min_val = float("-0.392763") + max_val = float("0.345645") + mean = float("0.022741") + std = float("0.127592") + data = None + + +class Program_weight_tensor_parameter_263: + name = "parameter_263" + shape = [192, 128, 3, 3] + dtype = "float32" + min_val = float("-0.109003") + max_val = float("0.0868493") + mean = float("-0.00035271") + std = float("0.00845821") + data = None + + +class Program_weight_tensor_parameter_264: + name = "parameter_264" + shape = [128] + dtype = "float32" + min_val = float("-2.14726") + max_val = float("1.36561") + mean = float("-0.674451") + std = float("0.681161") + data = None + + +class Program_weight_tensor_parameter_265: + name = "parameter_265" + shape = [128] + dtype = "float32" + min_val = float("0.375234") + max_val = float("2.23956") + mean = float("0.877769") + std = float("0.235655") + data = None + + +class Program_weight_tensor_parameter_266: + name = "parameter_266" + shape = [128] + dtype = "float32" + min_val = float("0.00172908") + max_val = float("0.0243042") + mean = float("0.00734318") + std = float("0.00288478") + data = None + + +class Program_weight_tensor_parameter_267: + name = "parameter_267" + shape = [128] + dtype = "float32" + min_val = float("-0.322838") + max_val = float("0.289878") + mean = float("-0.0726895") + std = float("0.129009") + data = None + + +class Program_weight_tensor_parameter_268: + name = "parameter_268" + shape = [128, 96, 1, 1] + dtype = "float32" + min_val = float("-0.258942") + max_val = float("0.222471") + mean = float("-0.00147825") + std = float("0.0265093") + data = None + + +class Program_weight_tensor_parameter_269: + name = "parameter_269" + shape = [96] + dtype = "float32" + min_val = float("-0.0261312") + max_val = float("0.00440601") + mean = float("-0.00868593") + std = float("0.00785641") + data = None + + +class Program_weight_tensor_parameter_270: + name = "parameter_270" + shape = [96, 96, 1, 1] + dtype = "float32" + min_val = float("-0.323766") + max_val = float("0.314811") + mean = float("-0.00588923") + std = float("0.0203338") + data = None + + +class Program_weight_tensor_parameter_271: + name = "parameter_271" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_272: + name = "parameter_272" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_273: + name = "parameter_273" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_274: + name = "parameter_274" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_275: + name = "parameter_275" + shape = [48, 48, 1, 1] + dtype = "float32" + min_val = float("-0.0754874") + max_val = float("0.0918744") + mean = float("-0.00116183") + std = float("0.0137375") + data = None + + +class Program_weight_tensor_parameter_276: + name = "parameter_276" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_277: + name = "parameter_277" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_278: + name = "parameter_278" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_279: + name = "parameter_279" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_280: + name = "parameter_280" + shape = [48, 48, 3, 3] + dtype = "float32" + min_val = float("-0.0909754") + max_val = float("0.119188") + mean = float("-0.000469278") + std = float("0.0143324") + data = None + + +class Program_weight_tensor_parameter_281: + name = "parameter_281" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_282: + name = "parameter_282" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_283: + name = "parameter_283" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_284: + name = "parameter_284" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_285: + name = "parameter_285" + shape = [48, 48, 3, 3] + dtype = "float32" + min_val = float("-0.107025") + max_val = float("0.127668") + mean = float("-0.00119973") + std = float("0.0151861") + data = None + + +class Program_weight_tensor_parameter_286: + name = "parameter_286" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_287: + name = "parameter_287" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_288: + name = "parameter_288" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_289: + name = "parameter_289" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_290: + name = "parameter_290" + shape = [48, 48, 1, 1] + dtype = "float32" + min_val = float("-0.0825525") + max_val = float("0.0847429") + mean = float("-0.00293553") + std = float("0.0180257") + data = None + + +class Program_weight_tensor_parameter_291: + name = "parameter_291" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_292: + name = "parameter_292" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_293: + name = "parameter_293" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_294: + name = "parameter_294" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_295: + name = "parameter_295" + shape = [48, 48, 3, 3] + dtype = "float32" + min_val = float("-0.121405") + max_val = float("0.106816") + mean = float("-0.00103162") + std = float("0.0139597") + data = None + + +class Program_weight_tensor_parameter_296: + name = "parameter_296" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_297: + name = "parameter_297" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_298: + name = "parameter_298" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_299: + name = "parameter_299" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_300: + name = "parameter_300" + shape = [48, 48, 3, 3] + dtype = "float32" + min_val = float("-0.104387") + max_val = float("0.0933051") + mean = float("-0.000898517") + std = float("0.0163105") + data = None + + +class Program_weight_tensor_parameter_301: + name = "parameter_301" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_302: + name = "parameter_302" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_303: + name = "parameter_303" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_304: + name = "parameter_304" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_305: + name = "parameter_305" + shape = [48, 96, 1, 1] + dtype = "float32" + min_val = float("-0.127218") + max_val = float("0.131789") + mean = float("-0.00305303") + std = float("0.0260694") + data = None + + +class Program_weight_tensor_parameter_306: + name = "parameter_306" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_307: + name = "parameter_307" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_308: + name = "parameter_308" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_309: + name = "parameter_309" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_310: + name = "parameter_310" + shape = [48, 96, 1, 1] + dtype = "float32" + min_val = float("-0.187551") + max_val = float("0.266311") + mean = float("0.000454803") + std = float("0.0279756") + data = None + + +class Program_weight_tensor_parameter_311: + name = "parameter_311" + shape = [96] + dtype = "float32" + min_val = float("-3.32387") + max_val = float("3.83534") + mean = float("0.265485") + std = float("1.21084") + data = None + + +class Program_weight_tensor_parameter_312: + name = "parameter_312" + shape = [96] + dtype = "float32" + min_val = float("0.503076") + max_val = float("5.39398") + mean = float("1.12896") + std = float("0.545544") + data = None + + +class Program_weight_tensor_parameter_313: + name = "parameter_313" + shape = [96] + dtype = "float32" + min_val = float("0.0185778") + max_val = float("0.251561") + mean = float("0.0602414") + std = float("0.0415712") + data = None + + +class Program_weight_tensor_parameter_314: + name = "parameter_314" + shape = [96] + dtype = "float32" + min_val = float("-0.530586") + max_val = float("0.599692") + mean = float("-0.0329156") + std = float("0.184534") + data = None + + +class Program_weight_tensor_parameter_315: + name = "parameter_315" + shape = [96, 64, 3, 3] + dtype = "float32" + min_val = float("-0.129865") + max_val = float("0.140642") + mean = float("-0.000231144") + std = float("0.0135591") + data = None + + +class Program_weight_tensor_parameter_316: + name = "parameter_316" + shape = [64] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_317: + name = "parameter_317" + shape = [64] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_318: + name = "parameter_318" + shape = [64] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_319: + name = "parameter_319" + shape = [64] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_320: + name = "parameter_320" + shape = [64, 48, 1, 1] + dtype = "float32" + min_val = float("-0.217846") + max_val = float("0.197691") + mean = float("-0.00202445") + std = float("0.0390741") + data = None + + +class Program_weight_tensor_parameter_321: + name = "parameter_321" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_322: + name = "parameter_322" + shape = [48, 48, 1, 1] + dtype = "float32" + min_val = float("-0.185619") + max_val = float("0.168747") + mean = float("-0.0126224") + std = float("0.0278031") + data = None + + +class Program_weight_tensor_parameter_323: + name = "parameter_323" + shape = [24] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_324: + name = "parameter_324" + shape = [24] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_325: + name = "parameter_325" + shape = [24] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_326: + name = "parameter_326" + shape = [24] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_327: + name = "parameter_327" + shape = [24, 24, 1, 1] + dtype = "float32" + min_val = float("-0.11592") + max_val = float("0.15413") + mean = float("-0.00151763") + std = float("0.0277352") + data = None + + +class Program_weight_tensor_parameter_328: + name = "parameter_328" + shape = [24] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_329: + name = "parameter_329" + shape = [24] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_330: + name = "parameter_330" + shape = [24] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_331: + name = "parameter_331" + shape = [24] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_332: + name = "parameter_332" + shape = [24, 24, 3, 3] + dtype = "float32" + min_val = float("-0.106175") + max_val = float("0.114008") + mean = float("-0.000884197") + std = float("0.0238809") + data = None + + +class Program_weight_tensor_parameter_333: + name = "parameter_333" + shape = [24] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_334: + name = "parameter_334" + shape = [24] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_335: + name = "parameter_335" + shape = [24] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_336: + name = "parameter_336" + shape = [24] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_337: + name = "parameter_337" + shape = [24, 24, 3, 3] + dtype = "float32" + min_val = float("-0.138643") + max_val = float("0.153767") + mean = float("-0.000248352") + std = float("0.0266901") + data = None + + +class Program_weight_tensor_parameter_338: + name = "parameter_338" + shape = [24] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_339: + name = "parameter_339" + shape = [24] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_340: + name = "parameter_340" + shape = [24] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_341: + name = "parameter_341" + shape = [24] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_342: + name = "parameter_342" + shape = [24, 48, 1, 1] + dtype = "float32" + min_val = float("-0.220553") + max_val = float("0.201403") + mean = float("-0.00385181") + std = float("0.04011") + data = None + + +class Program_weight_tensor_parameter_343: + name = "parameter_343" + shape = [24] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_344: + name = "parameter_344" + shape = [24] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_345: + name = "parameter_345" + shape = [24] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_346: + name = "parameter_346" + shape = [24] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_347: + name = "parameter_347" + shape = [24, 48, 1, 1] + dtype = "float32" + min_val = float("-0.232263") + max_val = float("0.179328") + mean = float("-0.00110555") + std = float("0.0447387") + data = None + + +class Program_weight_tensor_parameter_348: + name = "parameter_348" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_349: + name = "parameter_349" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_350: + name = "parameter_350" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_351: + name = "parameter_351" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_352: + name = "parameter_352" + shape = [48, 32, 3, 3] + dtype = "float32" + min_val = float("-0.152721") + max_val = float("0.151792") + mean = float("-0.000535835") + std = float("0.0230336") + data = None + + +class Program_weight_tensor_parameter_353: + name = "parameter_353" + shape = [32] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_354: + name = "parameter_354" + shape = [32] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_355: + name = "parameter_355" + shape = [32] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_356: + name = "parameter_356" + shape = [32] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_357: + name = "parameter_357" + shape = [32, 16, 3, 3] + dtype = "float32" + min_val = float("-0.279318") + max_val = float("0.276461") + mean = float("-0.00085436") + std = float("0.0389004") + data = None + + +class Program_weight_tensor_parameter_358: + name = "parameter_358" + shape = [16] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_359: + name = "parameter_359" + shape = [16] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_360: + name = "parameter_360" + shape = [16] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_361: + name = "parameter_361" + shape = [16] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_362: + name = "parameter_362" + shape = [16, 16, 3, 3] + dtype = "float32" + min_val = float("-0.337845") + max_val = float("0.369192") + mean = float("-0.00069606") + std = float("0.0519452") + data = None + + +class Program_weight_tensor_parameter_363: + name = "parameter_363" + shape = [16] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_364: + name = "parameter_364" + shape = [16] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_365: + name = "parameter_365" + shape = [16] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_366: + name = "parameter_366" + shape = [16] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_367: + name = "parameter_367" + shape = [16, 3, 3, 3] + dtype = "float32" + min_val = float("-0.245822") + max_val = float("0.288484") + mean = float("-0.00340321") + std = float("0.0733952") + data = None diff --git a/paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_14/graph_hash.txt b/paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_14/graph_hash.txt new file mode 100644 index 000000000..a93514ba4 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_14/graph_hash.txt @@ -0,0 +1 @@ +be2fb57bd448a9ffeb7401288b396cc0d51942b463c2f34662d7485236768468 \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_14/graph_net.json b/paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_14/graph_net.json new file mode 100644 index 000000000..399d8354e --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_14/graph_net.json @@ -0,0 +1,6 @@ +{ + "framework": "paddle", + "model_name": "PP-YOLOE-S_human", + "num_devices_required": 1, + "num_nodes_required": 1 +} \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_14/input_meta.py b/paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_14/input_meta.py new file mode 100644 index 000000000..1cb0adc74 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_14/input_meta.py @@ -0,0 +1,31 @@ +class Program_weight_tensor_data_0: + name = "data_0" + shape = [2, 384, 16, 16] + dtype = "float32" + min_val = float("-0.278465") + max_val = float("5.37003") + mean = float("0.223584") + std = float("0.551132") + data = None + + +class Program_weight_tensor_data_1: + name = "data_1" + shape = [2, 192, 32, 32] + dtype = "float32" + min_val = float("-0.278465") + max_val = float("7.76898") + mean = float("0.298021") + std = float("0.601257") + data = None + + +class Program_weight_tensor_data_2: + name = "data_2" + shape = [2, 96, 64, 64] + dtype = "float32" + min_val = float("-0.278465") + max_val = float("9.44375") + mean = float("0.389922") + std = float("0.624856") + data = None diff --git a/paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_14/model.py b/paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_14/model.py new file mode 100644 index 000000000..fc0f65e76 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_14/model.py @@ -0,0 +1,1050 @@ +import paddle + + +class GraphModule(paddle.nn.Layer): + def __init__(self): + super().__init__() + + def forward( + self, + parameter_0, + parameter_1, + parameter_2, + parameter_3, + parameter_4, + parameter_5, + parameter_6, + parameter_7, + parameter_8, + parameter_9, + parameter_10, + parameter_11, + parameter_12, + parameter_13, + parameter_14, + parameter_15, + parameter_16, + parameter_17, + parameter_18, + parameter_19, + parameter_20, + parameter_21, + parameter_22, + parameter_23, + parameter_24, + parameter_25, + parameter_26, + parameter_27, + parameter_28, + parameter_29, + parameter_30, + parameter_31, + parameter_32, + parameter_33, + parameter_34, + parameter_35, + parameter_36, + parameter_37, + parameter_38, + parameter_39, + parameter_40, + parameter_41, + parameter_42, + parameter_43, + parameter_44, + parameter_45, + parameter_46, + parameter_47, + parameter_48, + parameter_49, + parameter_50, + parameter_51, + parameter_52, + parameter_53, + data_0, + data_1, + data_2, + ): + # pd_op.full: (1xf64) <- () + full_0 = paddle._C_ops.full( + [1], float("0"), paddle.float64, paddle.core.CPUPlace() + ) + + # pd_op.full: (1xf64) <- () + full_1 = paddle._C_ops.full( + [1], float("16"), paddle.float64, paddle.core.CPUPlace() + ) + + # pd_op.full: (1xf64) <- () + full_2 = paddle._C_ops.full( + [1], float("1"), paddle.float64, paddle.core.CPUPlace() + ) + + # pd_op.arange: (16xi64) <- (1xf64, 1xf64, 1xf64) + arange_0 = paddle.arange(full_0, full_1, full_2, dtype="int64") + del full_1 + + # pd_op.cast: (16xf32) <- (16xi64) + cast_0 = paddle._C_ops.cast(arange_0, paddle.float32) + del arange_0 + + # pd_op.full: (1xf32) <- () + full_3 = paddle._C_ops.full( + [1], float("1"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.scale: (16xf32) <- (16xf32, 1xf32) + scale_0 = paddle._C_ops.scale(cast_0, full_3, float("0.5"), True) + del cast_0 + + # pd_op.full: (1xf32) <- () + full_4 = paddle._C_ops.full( + [1], float("32"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.scale: (16xf32) <- (16xf32, 1xf32) + scale_1 = paddle._C_ops.scale(scale_0, full_4, float("0"), True) + del full_4, scale_0 + + # builtin.combine: ([16xf32, 16xf32]) <- (16xf32, 16xf32) + combine_0 = [scale_1, scale_1] + del scale_1 + + # pd_op.meshgrid: ([16x16xf32, 16x16xf32]) <- ([16xf32, 16xf32]) + meshgrid_0 = paddle._C_ops.meshgrid(combine_0) + del combine_0 + + # builtin.split: (16x16xf32, 16x16xf32) <- ([16x16xf32, 16x16xf32]) + ( + split_0, + split_1, + ) = meshgrid_0 + del meshgrid_0 + + # pd_op.scale: (16x16xf32) <- (16x16xf32, 1xf32) + scale_2 = paddle._C_ops.scale(split_1, full_3, float("-80"), True) + + # pd_op.scale: (16x16xf32) <- (16x16xf32, 1xf32) + scale_3 = paddle._C_ops.scale(split_0, full_3, float("-80"), True) + + # pd_op.scale: (16x16xf32) <- (16x16xf32, 1xf32) + scale_4 = paddle._C_ops.scale(split_1, full_3, float("80"), True) + + # pd_op.scale: (16x16xf32) <- (16x16xf32, 1xf32) + scale_5 = paddle._C_ops.scale(split_0, full_3, float("80"), True) + + # builtin.combine: ([16x16xf32, 16x16xf32, 16x16xf32, 16x16xf32]) <- (16x16xf32, 16x16xf32, 16x16xf32, 16x16xf32) + combine_1 = [scale_2, scale_3, scale_4, scale_5] + del scale_2, scale_3, scale_4, scale_5 + + # pd_op.stack: (16x16x4xf32) <- ([16x16xf32, 16x16xf32, 16x16xf32, 16x16xf32]) + stack_0 = paddle._C_ops.stack(combine_1, -1) + del combine_1 + + # builtin.combine: ([16x16xf32, 16x16xf32]) <- (16x16xf32, 16x16xf32) + combine_2 = [split_1, split_0] + del split_0, split_1 + + # pd_op.stack: (16x16x2xf32) <- ([16x16xf32, 16x16xf32]) + stack_1 = paddle._C_ops.stack(combine_2, -1) + del combine_2 + + # pd_op.full_int_array: (2xi64) <- () + full_int_array_0 = [-1, 4] + + # pd_op.reshape: (256x4xf32) <- (16x16x4xf32, 2xi64) + reshape_0 = paddle._C_ops.reshape(stack_0, full_int_array_0) + del stack_0 + + # pd_op.full_int_array: (2xi64) <- () + full_int_array_1 = [-1, 2] + + # pd_op.reshape: (256x2xf32) <- (16x16x2xf32, 2xi64) + reshape_1 = paddle._C_ops.reshape(stack_1, full_int_array_1) + del stack_1 + + # pd_op.full: (256x1xf32) <- () + full_5 = paddle._C_ops.full( + [256, 1], + float("32"), + paddle.float32, + paddle.framework._current_expected_place(), + ) + + # pd_op.full: (1xf64) <- () + full_6 = paddle._C_ops.full( + [1], float("32"), paddle.float64, paddle.core.CPUPlace() + ) + + # pd_op.arange: (32xi64) <- (1xf64, 1xf64, 1xf64) + arange_1 = paddle.arange(full_0, full_6, full_2, dtype="int64") + del full_6 + + # pd_op.cast: (32xf32) <- (32xi64) + cast_1 = paddle._C_ops.cast(arange_1, paddle.float32) + del arange_1 + + # pd_op.scale: (32xf32) <- (32xf32, 1xf32) + scale_6 = paddle._C_ops.scale(cast_1, full_3, float("0.5"), True) + del cast_1 + + # pd_op.full: (1xf32) <- () + full_7 = paddle._C_ops.full( + [1], float("16"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.scale: (32xf32) <- (32xf32, 1xf32) + scale_7 = paddle._C_ops.scale(scale_6, full_7, float("0"), True) + del full_7, scale_6 + + # builtin.combine: ([32xf32, 32xf32]) <- (32xf32, 32xf32) + combine_3 = [scale_7, scale_7] + del scale_7 + + # pd_op.meshgrid: ([32x32xf32, 32x32xf32]) <- ([32xf32, 32xf32]) + meshgrid_1 = paddle._C_ops.meshgrid(combine_3) + del combine_3 + + # builtin.split: (32x32xf32, 32x32xf32) <- ([32x32xf32, 32x32xf32]) + ( + split_2, + split_3, + ) = meshgrid_1 + del meshgrid_1 + + # pd_op.scale: (32x32xf32) <- (32x32xf32, 1xf32) + scale_8 = paddle._C_ops.scale(split_3, full_3, float("-40"), True) + + # pd_op.scale: (32x32xf32) <- (32x32xf32, 1xf32) + scale_9 = paddle._C_ops.scale(split_2, full_3, float("-40"), True) + + # pd_op.scale: (32x32xf32) <- (32x32xf32, 1xf32) + scale_10 = paddle._C_ops.scale(split_3, full_3, float("40"), True) + + # pd_op.scale: (32x32xf32) <- (32x32xf32, 1xf32) + scale_11 = paddle._C_ops.scale(split_2, full_3, float("40"), True) + + # builtin.combine: ([32x32xf32, 32x32xf32, 32x32xf32, 32x32xf32]) <- (32x32xf32, 32x32xf32, 32x32xf32, 32x32xf32) + combine_4 = [scale_8, scale_9, scale_10, scale_11] + del scale_10, scale_11, scale_8, scale_9 + + # pd_op.stack: (32x32x4xf32) <- ([32x32xf32, 32x32xf32, 32x32xf32, 32x32xf32]) + stack_2 = paddle._C_ops.stack(combine_4, -1) + del combine_4 + + # builtin.combine: ([32x32xf32, 32x32xf32]) <- (32x32xf32, 32x32xf32) + combine_5 = [split_3, split_2] + del split_2, split_3 + + # pd_op.stack: (32x32x2xf32) <- ([32x32xf32, 32x32xf32]) + stack_3 = paddle._C_ops.stack(combine_5, -1) + del combine_5 + + # pd_op.reshape: (1024x4xf32) <- (32x32x4xf32, 2xi64) + reshape_2 = paddle._C_ops.reshape(stack_2, full_int_array_0) + del stack_2 + + # pd_op.reshape: (1024x2xf32) <- (32x32x2xf32, 2xi64) + reshape_3 = paddle._C_ops.reshape(stack_3, full_int_array_1) + del stack_3 + + # pd_op.full: (1024x1xf32) <- () + full_8 = paddle._C_ops.full( + [1024, 1], + float("16"), + paddle.float32, + paddle.framework._current_expected_place(), + ) + + # pd_op.full: (1xf64) <- () + full_9 = paddle._C_ops.full( + [1], float("64"), paddle.float64, paddle.core.CPUPlace() + ) + + # pd_op.arange: (64xi64) <- (1xf64, 1xf64, 1xf64) + arange_2 = paddle.arange(full_0, full_9, full_2, dtype="int64") + del full_0, full_2, full_9 + + # pd_op.cast: (64xf32) <- (64xi64) + cast_2 = paddle._C_ops.cast(arange_2, paddle.float32) + del arange_2 + + # pd_op.scale: (64xf32) <- (64xf32, 1xf32) + scale_12 = paddle._C_ops.scale(cast_2, full_3, float("0.5"), True) + del cast_2 + + # pd_op.full: (1xf32) <- () + full_10 = paddle._C_ops.full( + [1], float("8"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.scale: (64xf32) <- (64xf32, 1xf32) + scale_13 = paddle._C_ops.scale(scale_12, full_10, float("0"), True) + del full_10, scale_12 + + # builtin.combine: ([64xf32, 64xf32]) <- (64xf32, 64xf32) + combine_6 = [scale_13, scale_13] + del scale_13 + + # pd_op.meshgrid: ([64x64xf32, 64x64xf32]) <- ([64xf32, 64xf32]) + meshgrid_2 = paddle._C_ops.meshgrid(combine_6) + del combine_6 + + # builtin.split: (64x64xf32, 64x64xf32) <- ([64x64xf32, 64x64xf32]) + ( + split_4, + split_5, + ) = meshgrid_2 + del meshgrid_2 + + # pd_op.scale: (64x64xf32) <- (64x64xf32, 1xf32) + scale_14 = paddle._C_ops.scale(split_5, full_3, float("-20"), True) + + # pd_op.scale: (64x64xf32) <- (64x64xf32, 1xf32) + scale_15 = paddle._C_ops.scale(split_4, full_3, float("-20"), True) + + # pd_op.scale: (64x64xf32) <- (64x64xf32, 1xf32) + scale_16 = paddle._C_ops.scale(split_5, full_3, float("20"), True) + + # pd_op.scale: (64x64xf32) <- (64x64xf32, 1xf32) + scale_17 = paddle._C_ops.scale(split_4, full_3, float("20"), True) + del full_3 + + # builtin.combine: ([64x64xf32, 64x64xf32, 64x64xf32, 64x64xf32]) <- (64x64xf32, 64x64xf32, 64x64xf32, 64x64xf32) + combine_7 = [scale_14, scale_15, scale_16, scale_17] + del scale_14, scale_15, scale_16, scale_17 + + # pd_op.stack: (64x64x4xf32) <- ([64x64xf32, 64x64xf32, 64x64xf32, 64x64xf32]) + stack_4 = paddle._C_ops.stack(combine_7, -1) + del combine_7 + + # builtin.combine: ([64x64xf32, 64x64xf32]) <- (64x64xf32, 64x64xf32) + combine_8 = [split_5, split_4] + del split_4, split_5 + + # pd_op.stack: (64x64x2xf32) <- ([64x64xf32, 64x64xf32]) + stack_5 = paddle._C_ops.stack(combine_8, -1) + del combine_8 + + # pd_op.reshape: (4096x4xf32) <- (64x64x4xf32, 2xi64) + reshape_4 = paddle._C_ops.reshape(stack_4, full_int_array_0) + del full_int_array_0, stack_4 + + # pd_op.reshape: (4096x2xf32) <- (64x64x2xf32, 2xi64) + reshape_5 = paddle._C_ops.reshape(stack_5, full_int_array_1) + del full_int_array_1, stack_5 + + # pd_op.full: (4096x1xf32) <- () + full_11 = paddle._C_ops.full( + [4096, 1], + float("8"), + paddle.float32, + paddle.framework._current_expected_place(), + ) + + # pd_op.full: (1xi32) <- () + full_12 = paddle._C_ops.full( + [1], float("0"), paddle.int32, paddle.core.CPUPlace() + ) + + # builtin.combine: ([256x4xf32, 1024x4xf32, 4096x4xf32]) <- (256x4xf32, 1024x4xf32, 4096x4xf32) + combine_9 = [reshape_0, reshape_2, reshape_4] + + # pd_op.concat: (5376x4xf32) <- ([256x4xf32, 1024x4xf32, 4096x4xf32], 1xi32) + concat_2 = paddle._C_ops.concat(combine_9, full_12) + del combine_9 + + # builtin.combine: ([256x2xf32, 1024x2xf32, 4096x2xf32]) <- (256x2xf32, 1024x2xf32, 4096x2xf32) + combine_10 = [reshape_1, reshape_3, reshape_5] + del reshape_1, reshape_3, reshape_5 + + # pd_op.concat: (5376x2xf32) <- ([256x2xf32, 1024x2xf32, 4096x2xf32], 1xi32) + concat_3 = paddle._C_ops.concat(combine_10, full_12) + del combine_10 + + # builtin.combine: ([256x1xf32, 1024x1xf32, 4096x1xf32]) <- (256x1xf32, 1024x1xf32, 4096x1xf32) + combine_11 = [full_5, full_8, full_11] + del full_11, full_5, full_8 + + # pd_op.concat: (5376x1xf32) <- ([256x1xf32, 1024x1xf32, 4096x1xf32], 1xi32) + concat_4 = paddle._C_ops.concat(combine_11, full_12) + del combine_11, full_12 + + # pd_op.full_int_array: (2xi64) <- () + full_int_array_2 = [1, 1] + + # pd_op.assign: (2xi64) <- (2xi64) + assign_0 = full_int_array_2 + + # pd_op.assign: (2xi64) <- (2xi64) + assign_1 = full_int_array_2 + + # pd_op.pool2d: (2x384x1x1xf32) <- (2x384x16x16xf32, 2xi64) + pool2d_0 = paddle._C_ops.pool2d( + data_0, + full_int_array_2, + [1, 1], + [0, 0], + False, + True, + "NCHW", + "avg", + False, + True, + "EXPLICIT", + ) + + # pd_op.conv2d: (2x384x1x1xf32) <- (2x384x1x1xf32, 384x384x1x1xf32) + conv2d_0 = paddle._C_ops.conv2d( + pool2d_0, parameter_53, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_53 + + # pd_op.full_int_array: (4xi64) <- () + full_int_array_3 = [1, -1, 1, 1] + + # pd_op.reshape: (1x384x1x1xf32) <- (384xf32, 4xi64) + reshape_6 = paddle._C_ops.reshape(parameter_52, full_int_array_3) + del parameter_52 + + # pd_op.add: (2x384x1x1xf32) <- (2x384x1x1xf32, 1x384x1x1xf32) + add_0 = paddle._C_ops.add(conv2d_0, reshape_6) + + # pd_op.sigmoid: (2x384x1x1xf32) <- (2x384x1x1xf32) + sigmoid_0 = paddle._C_ops.sigmoid(add_0) + del add_0 + + # pd_op.multiply: (2x384x16x16xf32) <- (2x384x16x16xf32, 2x384x1x1xf32) + multiply_0 = paddle._C_ops.multiply(data_0, sigmoid_0) + + # pd_op.conv2d: (2x384x16x16xf32) <- (2x384x16x16xf32, 384x384x1x1xf32) + conv2d_1 = paddle._C_ops.conv2d( + multiply_0, parameter_51, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_51 + + # pd_op.batch_norm_: (2x384x16x16xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x16x16xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__0, + batch_norm__1, + batch_norm__2, + batch_norm__3, + batch_norm__4, + batch_norm__5, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_1, + parameter_50, + parameter_49, + parameter_48, + parameter_47, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_47, parameter_48, parameter_49, parameter_50 + + # pd_op.swish: (2x384x16x16xf32) <- (2x384x16x16xf32) + swish_0 = paddle._C_ops.swish(batch_norm__0) + + # pd_op.add: (2x384x16x16xf32) <- (2x384x16x16xf32, 2x384x16x16xf32) + add_1 = paddle._C_ops.add(swish_0, data_0) + + # pd_op.conv2d: (2x1x16x16xf32) <- (2x384x16x16xf32, 1x384x3x3xf32) + conv2d_2 = paddle._C_ops.conv2d( + add_1, parameter_46, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_46 + + # pd_op.reshape: (1x1x1x1xf32) <- (1xf32, 4xi64) + reshape_7 = paddle._C_ops.reshape(parameter_45, full_int_array_3) + del parameter_45 + + # pd_op.add: (2x1x16x16xf32) <- (2x1x16x16xf32, 1x1x1x1xf32) + add_2 = paddle._C_ops.add(conv2d_2, reshape_7) + + # pd_op.conv2d: (2x384x1x1xf32) <- (2x384x1x1xf32, 384x384x1x1xf32) + conv2d_3 = paddle._C_ops.conv2d( + pool2d_0, parameter_44, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_44 + + # pd_op.reshape: (1x384x1x1xf32) <- (384xf32, 4xi64) + reshape_8 = paddle._C_ops.reshape(parameter_43, full_int_array_3) + del parameter_43 + + # pd_op.add: (2x384x1x1xf32) <- (2x384x1x1xf32, 1x384x1x1xf32) + add_3 = paddle._C_ops.add(conv2d_3, reshape_8) + + # pd_op.sigmoid: (2x384x1x1xf32) <- (2x384x1x1xf32) + sigmoid_1 = paddle._C_ops.sigmoid(add_3) + del add_3 + + # pd_op.multiply: (2x384x16x16xf32) <- (2x384x16x16xf32, 2x384x1x1xf32) + multiply_1 = paddle._C_ops.multiply(data_0, sigmoid_1) + del data_0 + + # pd_op.conv2d: (2x384x16x16xf32) <- (2x384x16x16xf32, 384x384x1x1xf32) + conv2d_4 = paddle._C_ops.conv2d( + multiply_1, parameter_42, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_42 + + # pd_op.batch_norm_: (2x384x16x16xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x16x16xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__6, + batch_norm__7, + batch_norm__8, + batch_norm__9, + batch_norm__10, + batch_norm__11, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_4, + parameter_41, + parameter_40, + parameter_39, + parameter_38, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_38, parameter_39, parameter_40, parameter_41 + + # pd_op.swish: (2x384x16x16xf32) <- (2x384x16x16xf32) + swish_1 = paddle._C_ops.swish(batch_norm__6) + + # pd_op.conv2d: (2x68x16x16xf32) <- (2x384x16x16xf32, 68x384x3x3xf32) + conv2d_5 = paddle._C_ops.conv2d( + swish_1, parameter_37, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_37 + + # pd_op.reshape: (1x68x1x1xf32) <- (68xf32, 4xi64) + reshape_9 = paddle._C_ops.reshape(parameter_36, full_int_array_3) + del parameter_36 + + # pd_op.add: (2x68x16x16xf32) <- (2x68x16x16xf32, 1x68x1x1xf32) + add_4 = paddle._C_ops.add(conv2d_5, reshape_9) + + # pd_op.sigmoid: (2x1x16x16xf32) <- (2x1x16x16xf32) + sigmoid_2 = paddle._C_ops.sigmoid(add_2) + del add_2 + + # pd_op.flatten: (2x1x256xf32) <- (2x1x16x16xf32) + flatten_0 = paddle._C_ops.flatten(sigmoid_2, 2, 3) + + # pd_op.transpose: (2x256x1xf32) <- (2x1x256xf32) + transpose_0 = paddle._C_ops.transpose(flatten_0, [0, 2, 1]) + del flatten_0 + + # pd_op.flatten: (2x68x256xf32) <- (2x68x16x16xf32) + flatten_1 = paddle._C_ops.flatten(add_4, 2, 3) + + # pd_op.transpose: (2x256x68xf32) <- (2x68x256xf32) + transpose_1 = paddle._C_ops.transpose(flatten_1, [0, 2, 1]) + del flatten_1 + + # pd_op.pool2d: (2x192x1x1xf32) <- (2x192x32x32xf32, 2xi64) + pool2d_1 = paddle._C_ops.pool2d( + data_1, + full_int_array_2, + [1, 1], + [0, 0], + False, + True, + "NCHW", + "avg", + False, + True, + "EXPLICIT", + ) + + # pd_op.conv2d: (2x192x1x1xf32) <- (2x192x1x1xf32, 192x192x1x1xf32) + conv2d_6 = paddle._C_ops.conv2d( + pool2d_1, parameter_35, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_35 + + # pd_op.reshape: (1x192x1x1xf32) <- (192xf32, 4xi64) + reshape_10 = paddle._C_ops.reshape(parameter_34, full_int_array_3) + del parameter_34 + + # pd_op.add: (2x192x1x1xf32) <- (2x192x1x1xf32, 1x192x1x1xf32) + add_5 = paddle._C_ops.add(conv2d_6, reshape_10) + + # pd_op.sigmoid: (2x192x1x1xf32) <- (2x192x1x1xf32) + sigmoid_3 = paddle._C_ops.sigmoid(add_5) + del add_5 + + # pd_op.multiply: (2x192x32x32xf32) <- (2x192x32x32xf32, 2x192x1x1xf32) + multiply_2 = paddle._C_ops.multiply(data_1, sigmoid_3) + + # pd_op.conv2d: (2x192x32x32xf32) <- (2x192x32x32xf32, 192x192x1x1xf32) + conv2d_7 = paddle._C_ops.conv2d( + multiply_2, parameter_33, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_33 + + # pd_op.batch_norm_: (2x192x32x32xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x32x32xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__12, + batch_norm__13, + batch_norm__14, + batch_norm__15, + batch_norm__16, + batch_norm__17, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_7, + parameter_32, + parameter_31, + parameter_30, + parameter_29, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_29, parameter_30, parameter_31, parameter_32 + + # pd_op.swish: (2x192x32x32xf32) <- (2x192x32x32xf32) + swish_2 = paddle._C_ops.swish(batch_norm__12) + + # pd_op.add: (2x192x32x32xf32) <- (2x192x32x32xf32, 2x192x32x32xf32) + add_6 = paddle._C_ops.add(swish_2, data_1) + + # pd_op.conv2d: (2x1x32x32xf32) <- (2x192x32x32xf32, 1x192x3x3xf32) + conv2d_8 = paddle._C_ops.conv2d( + add_6, parameter_28, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_28 + + # pd_op.reshape: (1x1x1x1xf32) <- (1xf32, 4xi64) + reshape_11 = paddle._C_ops.reshape(parameter_27, full_int_array_3) + del parameter_27 + + # pd_op.add: (2x1x32x32xf32) <- (2x1x32x32xf32, 1x1x1x1xf32) + add_7 = paddle._C_ops.add(conv2d_8, reshape_11) + + # pd_op.conv2d: (2x192x1x1xf32) <- (2x192x1x1xf32, 192x192x1x1xf32) + conv2d_9 = paddle._C_ops.conv2d( + pool2d_1, parameter_26, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_26 + + # pd_op.reshape: (1x192x1x1xf32) <- (192xf32, 4xi64) + reshape_12 = paddle._C_ops.reshape(parameter_25, full_int_array_3) + del parameter_25 + + # pd_op.add: (2x192x1x1xf32) <- (2x192x1x1xf32, 1x192x1x1xf32) + add_8 = paddle._C_ops.add(conv2d_9, reshape_12) + + # pd_op.sigmoid: (2x192x1x1xf32) <- (2x192x1x1xf32) + sigmoid_4 = paddle._C_ops.sigmoid(add_8) + del add_8 + + # pd_op.multiply: (2x192x32x32xf32) <- (2x192x32x32xf32, 2x192x1x1xf32) + multiply_3 = paddle._C_ops.multiply(data_1, sigmoid_4) + del data_1 + + # pd_op.conv2d: (2x192x32x32xf32) <- (2x192x32x32xf32, 192x192x1x1xf32) + conv2d_10 = paddle._C_ops.conv2d( + multiply_3, parameter_24, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_24 + + # pd_op.batch_norm_: (2x192x32x32xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x32x32xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__18, + batch_norm__19, + batch_norm__20, + batch_norm__21, + batch_norm__22, + batch_norm__23, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_10, + parameter_23, + parameter_22, + parameter_21, + parameter_20, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_20, parameter_21, parameter_22, parameter_23 + + # pd_op.swish: (2x192x32x32xf32) <- (2x192x32x32xf32) + swish_3 = paddle._C_ops.swish(batch_norm__18) + + # pd_op.conv2d: (2x68x32x32xf32) <- (2x192x32x32xf32, 68x192x3x3xf32) + conv2d_11 = paddle._C_ops.conv2d( + swish_3, parameter_19, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_19 + + # pd_op.reshape: (1x68x1x1xf32) <- (68xf32, 4xi64) + reshape_13 = paddle._C_ops.reshape(parameter_18, full_int_array_3) + del parameter_18 + + # pd_op.add: (2x68x32x32xf32) <- (2x68x32x32xf32, 1x68x1x1xf32) + add_9 = paddle._C_ops.add(conv2d_11, reshape_13) + + # pd_op.sigmoid: (2x1x32x32xf32) <- (2x1x32x32xf32) + sigmoid_5 = paddle._C_ops.sigmoid(add_7) + del add_7 + + # pd_op.flatten: (2x1x1024xf32) <- (2x1x32x32xf32) + flatten_2 = paddle._C_ops.flatten(sigmoid_5, 2, 3) + + # pd_op.transpose: (2x1024x1xf32) <- (2x1x1024xf32) + transpose_2 = paddle._C_ops.transpose(flatten_2, [0, 2, 1]) + del flatten_2 + + # pd_op.flatten: (2x68x1024xf32) <- (2x68x32x32xf32) + flatten_3 = paddle._C_ops.flatten(add_9, 2, 3) + + # pd_op.transpose: (2x1024x68xf32) <- (2x68x1024xf32) + transpose_3 = paddle._C_ops.transpose(flatten_3, [0, 2, 1]) + del flatten_3 + + # pd_op.pool2d: (2x96x1x1xf32) <- (2x96x64x64xf32, 2xi64) + pool2d_2 = paddle._C_ops.pool2d( + data_2, + full_int_array_2, + [1, 1], + [0, 0], + False, + True, + "NCHW", + "avg", + False, + True, + "EXPLICIT", + ) + + # pd_op.conv2d: (2x96x1x1xf32) <- (2x96x1x1xf32, 96x96x1x1xf32) + conv2d_12 = paddle._C_ops.conv2d( + pool2d_2, parameter_17, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_17 + + # pd_op.reshape: (1x96x1x1xf32) <- (96xf32, 4xi64) + reshape_14 = paddle._C_ops.reshape(parameter_16, full_int_array_3) + del parameter_16 + + # pd_op.add: (2x96x1x1xf32) <- (2x96x1x1xf32, 1x96x1x1xf32) + add_10 = paddle._C_ops.add(conv2d_12, reshape_14) + + # pd_op.sigmoid: (2x96x1x1xf32) <- (2x96x1x1xf32) + sigmoid_6 = paddle._C_ops.sigmoid(add_10) + del add_10 + + # pd_op.multiply: (2x96x64x64xf32) <- (2x96x64x64xf32, 2x96x1x1xf32) + multiply_4 = paddle._C_ops.multiply(data_2, sigmoid_6) + + # pd_op.conv2d: (2x96x64x64xf32) <- (2x96x64x64xf32, 96x96x1x1xf32) + conv2d_13 = paddle._C_ops.conv2d( + multiply_4, parameter_15, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_15 + + # pd_op.batch_norm_: (2x96x64x64xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x64x64xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__24, + batch_norm__25, + batch_norm__26, + batch_norm__27, + batch_norm__28, + batch_norm__29, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_13, + parameter_14, + parameter_13, + parameter_12, + parameter_11, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_11, parameter_12, parameter_13, parameter_14 + + # pd_op.swish: (2x96x64x64xf32) <- (2x96x64x64xf32) + swish_4 = paddle._C_ops.swish(batch_norm__24) + + # pd_op.add: (2x96x64x64xf32) <- (2x96x64x64xf32, 2x96x64x64xf32) + add_11 = paddle._C_ops.add(swish_4, data_2) + + # pd_op.conv2d: (2x1x64x64xf32) <- (2x96x64x64xf32, 1x96x3x3xf32) + conv2d_14 = paddle._C_ops.conv2d( + add_11, parameter_10, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_10 + + # pd_op.reshape: (1x1x1x1xf32) <- (1xf32, 4xi64) + reshape_15 = paddle._C_ops.reshape(parameter_9, full_int_array_3) + del parameter_9 + + # pd_op.add: (2x1x64x64xf32) <- (2x1x64x64xf32, 1x1x1x1xf32) + add_12 = paddle._C_ops.add(conv2d_14, reshape_15) + + # pd_op.conv2d: (2x96x1x1xf32) <- (2x96x1x1xf32, 96x96x1x1xf32) + conv2d_15 = paddle._C_ops.conv2d( + pool2d_2, parameter_8, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_8 + + # pd_op.reshape: (1x96x1x1xf32) <- (96xf32, 4xi64) + reshape_16 = paddle._C_ops.reshape(parameter_7, full_int_array_3) + del parameter_7 + + # pd_op.add: (2x96x1x1xf32) <- (2x96x1x1xf32, 1x96x1x1xf32) + add_13 = paddle._C_ops.add(conv2d_15, reshape_16) + + # pd_op.sigmoid: (2x96x1x1xf32) <- (2x96x1x1xf32) + sigmoid_7 = paddle._C_ops.sigmoid(add_13) + del add_13 + + # pd_op.multiply: (2x96x64x64xf32) <- (2x96x64x64xf32, 2x96x1x1xf32) + multiply_5 = paddle._C_ops.multiply(data_2, sigmoid_7) + del data_2 + + # pd_op.conv2d: (2x96x64x64xf32) <- (2x96x64x64xf32, 96x96x1x1xf32) + conv2d_16 = paddle._C_ops.conv2d( + multiply_5, parameter_6, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_6 + + # pd_op.batch_norm_: (2x96x64x64xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x64x64xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__30, + batch_norm__31, + batch_norm__32, + batch_norm__33, + batch_norm__34, + batch_norm__35, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_16, + parameter_5, + parameter_4, + parameter_3, + parameter_2, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_2, parameter_3, parameter_4, parameter_5 + + # pd_op.swish: (2x96x64x64xf32) <- (2x96x64x64xf32) + swish_5 = paddle._C_ops.swish(batch_norm__30) + + # pd_op.conv2d: (2x68x64x64xf32) <- (2x96x64x64xf32, 68x96x3x3xf32) + conv2d_17 = paddle._C_ops.conv2d( + swish_5, parameter_1, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_1 + + # pd_op.reshape: (1x68x1x1xf32) <- (68xf32, 4xi64) + reshape_17 = paddle._C_ops.reshape(parameter_0, full_int_array_3) + del full_int_array_3, parameter_0 + + # pd_op.add: (2x68x64x64xf32) <- (2x68x64x64xf32, 1x68x1x1xf32) + add_14 = paddle._C_ops.add(conv2d_17, reshape_17) + + # pd_op.sigmoid: (2x1x64x64xf32) <- (2x1x64x64xf32) + sigmoid_8 = paddle._C_ops.sigmoid(add_12) + del add_12 + + # pd_op.flatten: (2x1x4096xf32) <- (2x1x64x64xf32) + flatten_4 = paddle._C_ops.flatten(sigmoid_8, 2, 3) + + # pd_op.transpose: (2x4096x1xf32) <- (2x1x4096xf32) + transpose_4 = paddle._C_ops.transpose(flatten_4, [0, 2, 1]) + del flatten_4 + + # pd_op.flatten: (2x68x4096xf32) <- (2x68x64x64xf32) + flatten_5 = paddle._C_ops.flatten(add_14, 2, 3) + + # pd_op.transpose: (2x4096x68xf32) <- (2x68x4096xf32) + transpose_5 = paddle._C_ops.transpose(flatten_5, [0, 2, 1]) + del flatten_5 + + # pd_op.full: (1xi32) <- () + full_13 = paddle._C_ops.full( + [1], float("1"), paddle.int32, paddle.core.CPUPlace() + ) + + # pd_op.assign: (1xi32) <- (1xi32) + assign_2 = full_13 + + # builtin.combine: ([2x256x1xf32, 2x1024x1xf32, 2x4096x1xf32]) <- (2x256x1xf32, 2x1024x1xf32, 2x4096x1xf32) + combine_12 = [transpose_0, transpose_2, transpose_4] + + # pd_op.concat: (2x5376x1xf32) <- ([2x256x1xf32, 2x1024x1xf32, 2x4096x1xf32], 1xi32) + concat_0 = paddle._C_ops.concat(combine_12, full_13) + del combine_12 + + # builtin.combine: ([2x256x68xf32, 2x1024x68xf32, 2x4096x68xf32]) <- (2x256x68xf32, 2x1024x68xf32, 2x4096x68xf32) + combine_13 = [transpose_1, transpose_3, transpose_5] + + # pd_op.concat: (2x5376x68xf32) <- ([2x256x68xf32, 2x1024x68xf32, 2x4096x68xf32], 1xi32) + concat_1 = paddle._C_ops.concat(combine_13, full_13) + del ( + add_1, + add_11, + add_14, + add_4, + add_6, + add_9, + assign_0, + assign_1, + assign_2, + batch_norm__0, + batch_norm__1, + batch_norm__10, + batch_norm__11, + batch_norm__12, + batch_norm__13, + batch_norm__14, + batch_norm__15, + batch_norm__16, + batch_norm__17, + batch_norm__18, + batch_norm__19, + batch_norm__2, + batch_norm__20, + batch_norm__21, + batch_norm__22, + batch_norm__23, + batch_norm__24, + batch_norm__25, + batch_norm__26, + batch_norm__27, + batch_norm__28, + batch_norm__29, + batch_norm__3, + batch_norm__30, + batch_norm__31, + batch_norm__32, + batch_norm__33, + batch_norm__34, + batch_norm__35, + batch_norm__4, + batch_norm__5, + batch_norm__6, + batch_norm__7, + batch_norm__8, + batch_norm__9, + combine_13, + conv2d_0, + conv2d_1, + conv2d_10, + conv2d_11, + conv2d_12, + conv2d_13, + conv2d_14, + conv2d_15, + conv2d_16, + conv2d_17, + conv2d_2, + conv2d_3, + conv2d_4, + conv2d_5, + conv2d_6, + conv2d_7, + conv2d_8, + conv2d_9, + full_13, + full_int_array_2, + multiply_0, + multiply_1, + multiply_2, + multiply_3, + multiply_4, + multiply_5, + pool2d_0, + pool2d_1, + pool2d_2, + reshape_0, + reshape_10, + reshape_11, + reshape_12, + reshape_13, + reshape_14, + reshape_15, + reshape_16, + reshape_17, + reshape_2, + reshape_4, + reshape_6, + reshape_7, + reshape_8, + reshape_9, + sigmoid_0, + sigmoid_1, + sigmoid_2, + sigmoid_3, + sigmoid_4, + sigmoid_5, + sigmoid_6, + sigmoid_7, + sigmoid_8, + swish_0, + swish_1, + swish_2, + swish_3, + swish_4, + swish_5, + transpose_0, + transpose_1, + transpose_2, + transpose_3, + transpose_4, + transpose_5, + ) + + return concat_0, concat_1, concat_2, concat_3, concat_4 diff --git a/paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_14/weight_meta.py b/paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_14/weight_meta.py new file mode 100644 index 000000000..a80fe5856 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_14/weight_meta.py @@ -0,0 +1,586 @@ +class Program_weight_tensor_parameter_0: + name = "parameter_0" + shape = [68] + dtype = "float32" + min_val = float("-0.0172485") + max_val = float("0.027465") + mean = float("1.46232e-07") + std = float("0.00758165") + data = None + + +class Program_weight_tensor_parameter_1: + name = "parameter_1" + shape = [68, 96, 3, 3] + dtype = "float32" + min_val = float("-0.193407") + max_val = float("0.203896") + mean = float("4.08909e-08") + std = float("0.0115569") + data = None + + +class Program_weight_tensor_parameter_2: + name = "parameter_2" + shape = [96] + dtype = "float32" + min_val = float("-0.149226") + max_val = float("0.348802") + mean = float("0.0836582") + std = float("0.116186") + data = None + + +class Program_weight_tensor_parameter_3: + name = "parameter_3" + shape = [96] + dtype = "float32" + min_val = float("0.92059") + max_val = float("2.01352") + mean = float("1.39698") + std = float("0.216217") + data = None + + +class Program_weight_tensor_parameter_4: + name = "parameter_4" + shape = [96] + dtype = "float32" + min_val = float("0.000218703") + max_val = float("0.00374521") + mean = float("0.000878038") + std = float("0.00058106") + data = None + + +class Program_weight_tensor_parameter_5: + name = "parameter_5" + shape = [96] + dtype = "float32" + min_val = float("-0.0825809") + max_val = float("0.0417536") + mean = float("-0.00846266") + std = float("0.0198982") + data = None + + +class Program_weight_tensor_parameter_6: + name = "parameter_6" + shape = [96, 96, 1, 1] + dtype = "float32" + min_val = float("-0.0948878") + max_val = float("0.109061") + mean = float("-0.000797905") + std = float("0.0137305") + data = None + + +class Program_weight_tensor_parameter_7: + name = "parameter_7" + shape = [96] + dtype = "float32" + min_val = float("-0.012275") + max_val = float("0.0115959") + mean = float("-0.000356062") + std = float("0.00537855") + data = None + + +class Program_weight_tensor_parameter_8: + name = "parameter_8" + shape = [96, 96, 1, 1] + dtype = "float32" + min_val = float("-0.0233111") + max_val = float("0.0248623") + mean = float("-0.000113331") + std = float("0.0035026") + data = None + + +class Program_weight_tensor_parameter_9: + name = "parameter_9" + shape = [1] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_10: + name = "parameter_10" + shape = [1, 96, 3, 3] + dtype = "float32" + min_val = float("-0.0555531") + max_val = float("0.0394738") + mean = float("0.00027914") + std = float("0.0112485") + data = None + + +class Program_weight_tensor_parameter_11: + name = "parameter_11" + shape = [96] + dtype = "float32" + min_val = float("-0.661613") + max_val = float("1.11986") + mean = float("0.208505") + std = float("0.335963") + data = None + + +class Program_weight_tensor_parameter_12: + name = "parameter_12" + shape = [96] + dtype = "float32" + min_val = float("0.773318") + max_val = float("1.56281") + mean = float("1.11195") + std = float("0.138849") + data = None + + +class Program_weight_tensor_parameter_13: + name = "parameter_13" + shape = [96] + dtype = "float32" + min_val = float("0.000158442") + max_val = float("0.00563483") + mean = float("0.00119669") + std = float("0.0010054") + data = None + + +class Program_weight_tensor_parameter_14: + name = "parameter_14" + shape = [96] + dtype = "float32" + min_val = float("-0.217371") + max_val = float("0.0873086") + mean = float("-0.0303735") + std = float("0.050933") + data = None + + +class Program_weight_tensor_parameter_15: + name = "parameter_15" + shape = [96, 96, 1, 1] + dtype = "float32" + min_val = float("-0.113703") + max_val = float("0.0885765") + mean = float("-0.00122357") + std = float("0.0140255") + data = None + + +class Program_weight_tensor_parameter_16: + name = "parameter_16" + shape = [96] + dtype = "float32" + min_val = float("-0.00655335") + max_val = float("0.00788225") + mean = float("-0.000702387") + std = float("0.00315771") + data = None + + +class Program_weight_tensor_parameter_17: + name = "parameter_17" + shape = [96, 96, 1, 1] + dtype = "float32" + min_val = float("-0.0695131") + max_val = float("0.0989064") + mean = float("-0.000394188") + std = float("0.00429104") + data = None + + +class Program_weight_tensor_parameter_18: + name = "parameter_18" + shape = [68] + dtype = "float32" + min_val = float("-0.00653538") + max_val = float("0.0248622") + mean = float("1.52999e-07") + std = float("0.00624483") + data = None + + +class Program_weight_tensor_parameter_19: + name = "parameter_19" + shape = [68, 192, 3, 3] + dtype = "float32" + min_val = float("-0.155994") + max_val = float("0.17862") + mean = float("-1.00845e-08") + std = float("0.00807346") + data = None + + +class Program_weight_tensor_parameter_20: + name = "parameter_20" + shape = [192] + dtype = "float32" + min_val = float("-0.111521") + max_val = float("0.136793") + mean = float("0.050372") + std = float("0.0428473") + data = None + + +class Program_weight_tensor_parameter_21: + name = "parameter_21" + shape = [192] + dtype = "float32" + min_val = float("0.941879") + max_val = float("1.4895") + mean = float("1.20932") + std = float("0.101229") + data = None + + +class Program_weight_tensor_parameter_22: + name = "parameter_22" + shape = [192] + dtype = "float32" + min_val = float("0.000166872") + max_val = float("0.00497783") + mean = float("0.00083062") + std = float("0.000691699") + data = None + + +class Program_weight_tensor_parameter_23: + name = "parameter_23" + shape = [192] + dtype = "float32" + min_val = float("-0.0352484") + max_val = float("0.0207761") + mean = float("-0.00483897") + std = float("0.00870871") + data = None + + +class Program_weight_tensor_parameter_24: + name = "parameter_24" + shape = [192, 192, 1, 1] + dtype = "float32" + min_val = float("-0.0620273") + max_val = float("0.101258") + mean = float("-0.000222798") + std = float("0.00681063") + data = None + + +class Program_weight_tensor_parameter_25: + name = "parameter_25" + shape = [192] + dtype = "float32" + min_val = float("-0.0102195") + max_val = float("0.0101765") + mean = float("-0.000115416") + std = float("0.00396591") + data = None + + +class Program_weight_tensor_parameter_26: + name = "parameter_26" + shape = [192, 192, 1, 1] + dtype = "float32" + min_val = float("-0.00892386") + max_val = float("0.0199459") + mean = float("-0.000135272") + std = float("0.00154918") + data = None + + +class Program_weight_tensor_parameter_27: + name = "parameter_27" + shape = [1] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_28: + name = "parameter_28" + shape = [1, 192, 3, 3] + dtype = "float32" + min_val = float("-0.0670605") + max_val = float("0.0310249") + mean = float("0.000294137") + std = float("0.00757806") + data = None + + +class Program_weight_tensor_parameter_29: + name = "parameter_29" + shape = [192] + dtype = "float32" + min_val = float("-0.290544") + max_val = float("0.608277") + mean = float("0.147622") + std = float("0.158959") + data = None + + +class Program_weight_tensor_parameter_30: + name = "parameter_30" + shape = [192] + dtype = "float32" + min_val = float("0.913214") + max_val = float("1.4959") + mean = float("1.08724") + std = float("0.0815711") + data = None + + +class Program_weight_tensor_parameter_31: + name = "parameter_31" + shape = [192] + dtype = "float32" + min_val = float("0.000173535") + max_val = float("0.00980167") + mean = float("0.00157464") + std = float("0.00163303") + data = None + + +class Program_weight_tensor_parameter_32: + name = "parameter_32" + shape = [192] + dtype = "float32" + min_val = float("-0.1722") + max_val = float("0.0293933") + mean = float("-0.0362368") + std = float("0.0320849") + data = None + + +class Program_weight_tensor_parameter_33: + name = "parameter_33" + shape = [192, 192, 1, 1] + dtype = "float32" + min_val = float("-0.0771266") + max_val = float("0.0600578") + mean = float("-0.00104893") + std = float("0.00708841") + data = None + + +class Program_weight_tensor_parameter_34: + name = "parameter_34" + shape = [192] + dtype = "float32" + min_val = float("-0.00520655") + max_val = float("0.0122276") + mean = float("-0.000186298") + std = float("0.00206584") + data = None + + +class Program_weight_tensor_parameter_35: + name = "parameter_35" + shape = [192, 192, 1, 1] + dtype = "float32" + min_val = float("-0.0221196") + max_val = float("0.0288162") + mean = float("-0.000192599") + std = float("0.00153376") + data = None + + +class Program_weight_tensor_parameter_36: + name = "parameter_36" + shape = [68] + dtype = "float32" + min_val = float("-0.00618645") + max_val = float("0.0126918") + mean = float("1.55094e-07") + std = float("0.0052247") + data = None + + +class Program_weight_tensor_parameter_37: + name = "parameter_37" + shape = [68, 384, 3, 3] + dtype = "float32" + min_val = float("-0.0898313") + max_val = float("0.115939") + mean = float("1.97397e-08") + std = float("0.00562912") + data = None + + +class Program_weight_tensor_parameter_38: + name = "parameter_38" + shape = [384] + dtype = "float32" + min_val = float("-0.0751669") + max_val = float("0.111426") + mean = float("0.0119129") + std = float("0.0354094") + data = None + + +class Program_weight_tensor_parameter_39: + name = "parameter_39" + shape = [384] + dtype = "float32" + min_val = float("0.969448") + max_val = float("1.49376") + mean = float("1.16935") + std = float("0.0775516") + data = None + + +class Program_weight_tensor_parameter_40: + name = "parameter_40" + shape = [384] + dtype = "float32" + min_val = float("9.22385e-05") + max_val = float("0.00462362") + mean = float("0.000541906") + std = float("0.000444355") + data = None + + +class Program_weight_tensor_parameter_41: + name = "parameter_41" + shape = [384] + dtype = "float32" + min_val = float("-0.0402063") + max_val = float("0.0136726") + mean = float("-0.00386345") + std = float("0.00617741") + data = None + + +class Program_weight_tensor_parameter_42: + name = "parameter_42" + shape = [384, 384, 1, 1] + dtype = "float32" + min_val = float("-0.0664712") + max_val = float("0.0691916") + mean = float("-0.000151613") + std = float("0.00377438") + data = None + + +class Program_weight_tensor_parameter_43: + name = "parameter_43" + shape = [384] + dtype = "float32" + min_val = float("-0.00514094") + max_val = float("0.00626889") + mean = float("-6.43167e-05") + std = float("0.00278132") + data = None + + +class Program_weight_tensor_parameter_44: + name = "parameter_44" + shape = [384, 384, 1, 1] + dtype = "float32" + min_val = float("-0.0303347") + max_val = float("0.0123028") + mean = float("-5.00562e-05") + std = float("0.000943714") + data = None + + +class Program_weight_tensor_parameter_45: + name = "parameter_45" + shape = [1] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_46: + name = "parameter_46" + shape = [1, 384, 3, 3] + dtype = "float32" + min_val = float("-0.0481817") + max_val = float("0.0280031") + mean = float("0.000415518") + std = float("0.0047328") + data = None + + +class Program_weight_tensor_parameter_47: + name = "parameter_47" + shape = [384] + dtype = "float32" + min_val = float("-0.369118") + max_val = float("0.494762") + mean = float("0.0350664") + std = float("0.121505") + data = None + + +class Program_weight_tensor_parameter_48: + name = "parameter_48" + shape = [384] + dtype = "float32" + min_val = float("0.883076") + max_val = float("1.55393") + mean = float("1.05789") + std = float("0.0835119") + data = None + + +class Program_weight_tensor_parameter_49: + name = "parameter_49" + shape = [384] + dtype = "float32" + min_val = float("0.000182895") + max_val = float("0.00777303") + mean = float("0.001135") + std = float("0.00109732") + data = None + + +class Program_weight_tensor_parameter_50: + name = "parameter_50" + shape = [384] + dtype = "float32" + min_val = float("-0.130947") + max_val = float("0.0318047") + mean = float("-0.0288516") + std = float("0.0238519") + data = None + + +class Program_weight_tensor_parameter_51: + name = "parameter_51" + shape = [384, 384, 1, 1] + dtype = "float32" + min_val = float("-0.0496468") + max_val = float("0.0518443") + mean = float("-0.000562622") + std = float("0.0041021") + data = None + + +class Program_weight_tensor_parameter_52: + name = "parameter_52" + shape = [384] + dtype = "float32" + min_val = float("-0.0141813") + max_val = float("0.0110761") + mean = float("-0.000147647") + std = float("0.00165558") + data = None + + +class Program_weight_tensor_parameter_53: + name = "parameter_53" + shape = [384, 384, 1, 1] + dtype = "float32" + min_val = float("-0.109289") + max_val = float("0.057067") + mean = float("-4.32392e-05") + std = float("0.00113413") + data = None diff --git a/paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_15/graph_hash.txt b/paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_15/graph_hash.txt new file mode 100644 index 000000000..a90272c96 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_15/graph_hash.txt @@ -0,0 +1 @@ +32cd2923bc0a61a7c5f1bc48378db5c4de25399c2f9388132502f296c7a71760 \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_15/graph_net.json b/paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_15/graph_net.json new file mode 100644 index 000000000..399d8354e --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_15/graph_net.json @@ -0,0 +1,6 @@ +{ + "framework": "paddle", + "model_name": "PP-YOLOE-S_human", + "num_devices_required": 1, + "num_nodes_required": 1 +} \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_15/input_meta.py b/paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_15/input_meta.py new file mode 100644 index 000000000..db22b1c6a --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_15/input_meta.py @@ -0,0 +1,73 @@ +class Program_weight_tensor_data_0: + name = "data_0" + shape = [] + dtype = "int64" + data = [20] + + +class Program_weight_tensor_data_1: + name = "data_1" + shape = [] + dtype = "int64" + data = [20] + + +class Program_weight_tensor_data_2: + name = "data_2" + shape = [] + dtype = "int64" + data = [40] + + +class Program_weight_tensor_data_3: + name = "data_3" + shape = [] + dtype = "int64" + data = [40] + + +class Program_weight_tensor_data_4: + name = "data_4" + shape = [] + dtype = "int64" + data = [80] + + +class Program_weight_tensor_data_5: + name = "data_5" + shape = [] + dtype = "int64" + data = [80] + + +class Program_weight_tensor_data_6: + name = "data_6" + shape = [2, 384, 20, 20] + dtype = "float32" + min_val = float("-0.278465") + max_val = float("7.04506") + mean = float("0.222573") + std = float("0.551935") + data = None + + +class Program_weight_tensor_data_7: + name = "data_7" + shape = [2, 192, 40, 40] + dtype = "float32" + min_val = float("-0.278465") + max_val = float("10.254") + mean = float("0.29377") + std = float("0.609551") + data = None + + +class Program_weight_tensor_data_8: + name = "data_8" + shape = [2, 96, 80, 80] + dtype = "float32" + min_val = float("-0.278465") + max_val = float("9.26657") + mean = float("0.391271") + std = float("0.62744") + data = None diff --git a/paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_15/model.py b/paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_15/model.py new file mode 100644 index 000000000..56d62b15a --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_15/model.py @@ -0,0 +1,1144 @@ +import paddle + + +class GraphModule(paddle.nn.Layer): + def __init__(self): + super().__init__() + + def forward( + self, + parameter_0, + parameter_1, + parameter_2, + parameter_3, + parameter_4, + parameter_5, + parameter_6, + parameter_7, + parameter_8, + parameter_9, + parameter_10, + parameter_11, + parameter_12, + parameter_13, + parameter_14, + parameter_15, + parameter_16, + parameter_17, + parameter_18, + parameter_19, + parameter_20, + parameter_21, + parameter_22, + parameter_23, + parameter_24, + parameter_25, + parameter_26, + parameter_27, + parameter_28, + parameter_29, + parameter_30, + parameter_31, + parameter_32, + parameter_33, + parameter_34, + parameter_35, + parameter_36, + parameter_37, + parameter_38, + parameter_39, + parameter_40, + parameter_41, + parameter_42, + parameter_43, + parameter_44, + parameter_45, + parameter_46, + parameter_47, + parameter_48, + parameter_49, + parameter_50, + parameter_51, + parameter_52, + parameter_53, + data_0, + data_1, + data_2, + data_3, + data_4, + data_5, + data_6, + data_7, + data_8, + ): + # pd_op.full: (1xi64) <- () + full_0 = paddle._C_ops.full( + [1], float("0"), paddle.int64, paddle.core.CPUPlace() + ) + + # pd_op.full: (1xi64) <- () + full_1 = paddle._C_ops.full( + [1], float("1"), paddle.int64, paddle.core.CPUPlace() + ) + + # pd_op.arange: (-1xi64) <- (1xi64, xi64, 1xi64) + arange_0 = paddle.arange(full_0, data_1, full_1, dtype="int64") + del data_1 + + # pd_op.cast: (-1xf32) <- (-1xi64) + cast_0 = paddle._C_ops.cast(arange_0, paddle.float32) + del arange_0 + + # pd_op.full: (1xf32) <- () + full_2 = paddle._C_ops.full( + [1], float("1"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.scale: (-1xf32) <- (-1xf32, 1xf32) + scale_0 = paddle._C_ops.scale(cast_0, full_2, float("0.5"), True) + del cast_0 + + # pd_op.full: (1xf32) <- () + full_3 = paddle._C_ops.full( + [1], float("32"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.scale: (-1xf32) <- (-1xf32, 1xf32) + scale_1 = paddle._C_ops.scale(scale_0, full_3, float("0"), True) + del scale_0 + + # pd_op.arange: (-1xi64) <- (1xi64, xi64, 1xi64) + arange_1 = paddle.arange(full_0, data_0, full_1, dtype="int64") + del data_0 + + # pd_op.cast: (-1xf32) <- (-1xi64) + cast_1 = paddle._C_ops.cast(arange_1, paddle.float32) + del arange_1 + + # pd_op.scale: (-1xf32) <- (-1xf32, 1xf32) + scale_2 = paddle._C_ops.scale(cast_1, full_2, float("0.5"), True) + del cast_1 + + # pd_op.scale: (-1xf32) <- (-1xf32, 1xf32) + scale_3 = paddle._C_ops.scale(scale_2, full_3, float("0"), True) + del scale_2 + + # builtin.combine: ([-1xf32, -1xf32]) <- (-1xf32, -1xf32) + combine_0 = [scale_3, scale_1] + del scale_1, scale_3 + + # pd_op.meshgrid: ([-1x-1xf32, -1x-1xf32]) <- ([-1xf32, -1xf32]) + meshgrid_0 = paddle._C_ops.meshgrid(combine_0) + del combine_0 + + # builtin.split: (-1x-1xf32, -1x-1xf32) <- ([-1x-1xf32, -1x-1xf32]) + ( + split_0, + split_1, + ) = meshgrid_0 + del meshgrid_0 + + # pd_op.scale: (-1x-1xf32) <- (-1x-1xf32, 1xf32) + scale_4 = paddle._C_ops.scale(split_1, full_2, float("-80"), True) + + # pd_op.scale: (-1x-1xf32) <- (-1x-1xf32, 1xf32) + scale_5 = paddle._C_ops.scale(split_0, full_2, float("-80"), True) + + # pd_op.scale: (-1x-1xf32) <- (-1x-1xf32, 1xf32) + scale_6 = paddle._C_ops.scale(split_1, full_2, float("80"), True) + + # pd_op.scale: (-1x-1xf32) <- (-1x-1xf32, 1xf32) + scale_7 = paddle._C_ops.scale(split_0, full_2, float("80"), True) + + # builtin.combine: ([-1x-1xf32, -1x-1xf32, -1x-1xf32, -1x-1xf32]) <- (-1x-1xf32, -1x-1xf32, -1x-1xf32, -1x-1xf32) + combine_1 = [scale_4, scale_5, scale_6, scale_7] + del scale_4, scale_5, scale_6, scale_7 + + # pd_op.stack: (-1x-1x4xf32) <- ([-1x-1xf32, -1x-1xf32, -1x-1xf32, -1x-1xf32]) + stack_0 = paddle._C_ops.stack(combine_1, -1) + del combine_1 + + # builtin.combine: ([-1x-1xf32, -1x-1xf32]) <- (-1x-1xf32, -1x-1xf32) + combine_2 = [split_1, split_0] + del split_0, split_1 + + # pd_op.stack: (-1x-1x2xf32) <- ([-1x-1xf32, -1x-1xf32]) + stack_1 = paddle._C_ops.stack(combine_2, -1) + del combine_2 + + # pd_op.full_int_array: (2xi64) <- () + full_int_array_0 = [-1, 4] + + # pd_op.reshape: (-1x4xf32) <- (-1x-1x4xf32, 2xi64) + reshape_0 = paddle._C_ops.reshape(stack_0, full_int_array_0) + del stack_0 + + # pd_op.full_int_array: (2xi64) <- () + full_int_array_1 = [-1, 2] + + # pd_op.reshape: (-1x2xf32) <- (-1x-1x2xf32, 2xi64) + reshape_1 = paddle._C_ops.reshape(stack_1, full_int_array_1) + del stack_1 + + # pd_op.shape64: (2xi64) <- (-1x4xf32) + shape64_0 = paddle._C_ops.shape64(reshape_0) + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_2 = [0] + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_3 = [1] + + # pd_op.slice: (xi64) <- (2xi64, 1xi64, 1xi64) + slice_0 = paddle._C_ops.slice( + shape64_0, [0], full_int_array_2, full_int_array_3, [1], [0] + ) + del shape64_0 + + # pd_op.full: (xi64) <- () + full_4 = paddle._C_ops.full( + [], float("1"), paddle.int64, paddle.core.CPUPlace() + ) + + # builtin.combine: ([xi64, xi64]) <- (xi64, xi64) + combine_3 = [slice_0, full_4] + + # pd_op.stack: (2xi64) <- ([xi64, xi64]) + stack_2 = paddle._C_ops.stack(combine_3, 0) + del combine_3 + + # pd_op.full_with_tensor: (-1x1xf32) <- (1xf32, 2xi64) + full_with_tensor_0 = paddle._C_ops.full_with_tensor( + full_3, stack_2, paddle.float32 + ) + del full_3, stack_2 + + # pd_op.arange: (-1xi64) <- (1xi64, xi64, 1xi64) + arange_2 = paddle.arange(full_0, data_3, full_1, dtype="int64") + del data_3 + + # pd_op.cast: (-1xf32) <- (-1xi64) + cast_2 = paddle._C_ops.cast(arange_2, paddle.float32) + del arange_2 + + # pd_op.scale: (-1xf32) <- (-1xf32, 1xf32) + scale_8 = paddle._C_ops.scale(cast_2, full_2, float("0.5"), True) + del cast_2 + + # pd_op.full: (1xf32) <- () + full_5 = paddle._C_ops.full( + [1], float("16"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.scale: (-1xf32) <- (-1xf32, 1xf32) + scale_9 = paddle._C_ops.scale(scale_8, full_5, float("0"), True) + del scale_8 + + # pd_op.arange: (-1xi64) <- (1xi64, xi64, 1xi64) + arange_3 = paddle.arange(full_0, data_2, full_1, dtype="int64") + del data_2 + + # pd_op.cast: (-1xf32) <- (-1xi64) + cast_3 = paddle._C_ops.cast(arange_3, paddle.float32) + del arange_3 + + # pd_op.scale: (-1xf32) <- (-1xf32, 1xf32) + scale_10 = paddle._C_ops.scale(cast_3, full_2, float("0.5"), True) + del cast_3 + + # pd_op.scale: (-1xf32) <- (-1xf32, 1xf32) + scale_11 = paddle._C_ops.scale(scale_10, full_5, float("0"), True) + del scale_10 + + # builtin.combine: ([-1xf32, -1xf32]) <- (-1xf32, -1xf32) + combine_4 = [scale_11, scale_9] + del scale_11, scale_9 + + # pd_op.meshgrid: ([-1x-1xf32, -1x-1xf32]) <- ([-1xf32, -1xf32]) + meshgrid_1 = paddle._C_ops.meshgrid(combine_4) + del combine_4 + + # builtin.split: (-1x-1xf32, -1x-1xf32) <- ([-1x-1xf32, -1x-1xf32]) + ( + split_2, + split_3, + ) = meshgrid_1 + del meshgrid_1 + + # pd_op.scale: (-1x-1xf32) <- (-1x-1xf32, 1xf32) + scale_12 = paddle._C_ops.scale(split_3, full_2, float("-40"), True) + + # pd_op.scale: (-1x-1xf32) <- (-1x-1xf32, 1xf32) + scale_13 = paddle._C_ops.scale(split_2, full_2, float("-40"), True) + + # pd_op.scale: (-1x-1xf32) <- (-1x-1xf32, 1xf32) + scale_14 = paddle._C_ops.scale(split_3, full_2, float("40"), True) + + # pd_op.scale: (-1x-1xf32) <- (-1x-1xf32, 1xf32) + scale_15 = paddle._C_ops.scale(split_2, full_2, float("40"), True) + + # builtin.combine: ([-1x-1xf32, -1x-1xf32, -1x-1xf32, -1x-1xf32]) <- (-1x-1xf32, -1x-1xf32, -1x-1xf32, -1x-1xf32) + combine_5 = [scale_12, scale_13, scale_14, scale_15] + del scale_12, scale_13, scale_14, scale_15 + + # pd_op.stack: (-1x-1x4xf32) <- ([-1x-1xf32, -1x-1xf32, -1x-1xf32, -1x-1xf32]) + stack_3 = paddle._C_ops.stack(combine_5, -1) + del combine_5 + + # builtin.combine: ([-1x-1xf32, -1x-1xf32]) <- (-1x-1xf32, -1x-1xf32) + combine_6 = [split_3, split_2] + del split_2, split_3 + + # pd_op.stack: (-1x-1x2xf32) <- ([-1x-1xf32, -1x-1xf32]) + stack_4 = paddle._C_ops.stack(combine_6, -1) + del combine_6 + + # pd_op.reshape: (-1x4xf32) <- (-1x-1x4xf32, 2xi64) + reshape_2 = paddle._C_ops.reshape(stack_3, full_int_array_0) + del stack_3 + + # pd_op.reshape: (-1x2xf32) <- (-1x-1x2xf32, 2xi64) + reshape_3 = paddle._C_ops.reshape(stack_4, full_int_array_1) + del stack_4 + + # pd_op.shape64: (2xi64) <- (-1x4xf32) + shape64_1 = paddle._C_ops.shape64(reshape_2) + + # pd_op.slice: (xi64) <- (2xi64, 1xi64, 1xi64) + slice_1 = paddle._C_ops.slice( + shape64_1, [0], full_int_array_2, full_int_array_3, [1], [0] + ) + del shape64_1 + + # builtin.combine: ([xi64, xi64]) <- (xi64, xi64) + combine_7 = [slice_1, full_4] + + # pd_op.stack: (2xi64) <- ([xi64, xi64]) + stack_5 = paddle._C_ops.stack(combine_7, 0) + del combine_7 + + # pd_op.full_with_tensor: (-1x1xf32) <- (1xf32, 2xi64) + full_with_tensor_1 = paddle._C_ops.full_with_tensor( + full_5, stack_5, paddle.float32 + ) + del full_5, stack_5 + + # pd_op.arange: (-1xi64) <- (1xi64, xi64, 1xi64) + arange_4 = paddle.arange(full_0, data_5, full_1, dtype="int64") + del data_5 + + # pd_op.cast: (-1xf32) <- (-1xi64) + cast_4 = paddle._C_ops.cast(arange_4, paddle.float32) + del arange_4 + + # pd_op.scale: (-1xf32) <- (-1xf32, 1xf32) + scale_16 = paddle._C_ops.scale(cast_4, full_2, float("0.5"), True) + del cast_4 + + # pd_op.full: (1xf32) <- () + full_6 = paddle._C_ops.full( + [1], float("8"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.scale: (-1xf32) <- (-1xf32, 1xf32) + scale_17 = paddle._C_ops.scale(scale_16, full_6, float("0"), True) + del scale_16 + + # pd_op.arange: (-1xi64) <- (1xi64, xi64, 1xi64) + arange_5 = paddle.arange(full_0, data_4, full_1, dtype="int64") + del data_4, full_0, full_1 + + # pd_op.cast: (-1xf32) <- (-1xi64) + cast_5 = paddle._C_ops.cast(arange_5, paddle.float32) + del arange_5 + + # pd_op.scale: (-1xf32) <- (-1xf32, 1xf32) + scale_18 = paddle._C_ops.scale(cast_5, full_2, float("0.5"), True) + del cast_5 + + # pd_op.scale: (-1xf32) <- (-1xf32, 1xf32) + scale_19 = paddle._C_ops.scale(scale_18, full_6, float("0"), True) + del scale_18 + + # builtin.combine: ([-1xf32, -1xf32]) <- (-1xf32, -1xf32) + combine_8 = [scale_19, scale_17] + del scale_17, scale_19 + + # pd_op.meshgrid: ([-1x-1xf32, -1x-1xf32]) <- ([-1xf32, -1xf32]) + meshgrid_2 = paddle._C_ops.meshgrid(combine_8) + del combine_8 + + # builtin.split: (-1x-1xf32, -1x-1xf32) <- ([-1x-1xf32, -1x-1xf32]) + ( + split_4, + split_5, + ) = meshgrid_2 + del meshgrid_2 + + # pd_op.scale: (-1x-1xf32) <- (-1x-1xf32, 1xf32) + scale_20 = paddle._C_ops.scale(split_5, full_2, float("-20"), True) + + # pd_op.scale: (-1x-1xf32) <- (-1x-1xf32, 1xf32) + scale_21 = paddle._C_ops.scale(split_4, full_2, float("-20"), True) + + # pd_op.scale: (-1x-1xf32) <- (-1x-1xf32, 1xf32) + scale_22 = paddle._C_ops.scale(split_5, full_2, float("20"), True) + + # pd_op.scale: (-1x-1xf32) <- (-1x-1xf32, 1xf32) + scale_23 = paddle._C_ops.scale(split_4, full_2, float("20"), True) + del full_2 + + # builtin.combine: ([-1x-1xf32, -1x-1xf32, -1x-1xf32, -1x-1xf32]) <- (-1x-1xf32, -1x-1xf32, -1x-1xf32, -1x-1xf32) + combine_9 = [scale_20, scale_21, scale_22, scale_23] + del scale_20, scale_21, scale_22, scale_23 + + # pd_op.stack: (-1x-1x4xf32) <- ([-1x-1xf32, -1x-1xf32, -1x-1xf32, -1x-1xf32]) + stack_6 = paddle._C_ops.stack(combine_9, -1) + del combine_9 + + # builtin.combine: ([-1x-1xf32, -1x-1xf32]) <- (-1x-1xf32, -1x-1xf32) + combine_10 = [split_5, split_4] + del split_4, split_5 + + # pd_op.stack: (-1x-1x2xf32) <- ([-1x-1xf32, -1x-1xf32]) + stack_7 = paddle._C_ops.stack(combine_10, -1) + del combine_10 + + # pd_op.reshape: (-1x4xf32) <- (-1x-1x4xf32, 2xi64) + reshape_4 = paddle._C_ops.reshape(stack_6, full_int_array_0) + del full_int_array_0, stack_6 + + # pd_op.reshape: (-1x2xf32) <- (-1x-1x2xf32, 2xi64) + reshape_5 = paddle._C_ops.reshape(stack_7, full_int_array_1) + del full_int_array_1, stack_7 + + # pd_op.shape64: (2xi64) <- (-1x4xf32) + shape64_2 = paddle._C_ops.shape64(reshape_4) + + # pd_op.slice: (xi64) <- (2xi64, 1xi64, 1xi64) + slice_2 = paddle._C_ops.slice( + shape64_2, [0], full_int_array_2, full_int_array_3, [1], [0] + ) + del full_int_array_2, full_int_array_3, shape64_2 + + # builtin.combine: ([xi64, xi64]) <- (xi64, xi64) + combine_11 = [slice_2, full_4] + del full_4 + + # pd_op.stack: (2xi64) <- ([xi64, xi64]) + stack_8 = paddle._C_ops.stack(combine_11, 0) + del combine_11 + + # pd_op.full_with_tensor: (-1x1xf32) <- (1xf32, 2xi64) + full_with_tensor_2 = paddle._C_ops.full_with_tensor( + full_6, stack_8, paddle.float32 + ) + del full_6, stack_8 + + # pd_op.full: (1xi32) <- () + full_7 = paddle._C_ops.full( + [1], float("0"), paddle.int32, paddle.core.CPUPlace() + ) + + # builtin.combine: ([-1x4xf32, -1x4xf32, -1x4xf32]) <- (-1x4xf32, -1x4xf32, -1x4xf32) + combine_12 = [reshape_0, reshape_2, reshape_4] + del reshape_0, reshape_2, reshape_4 + + # pd_op.concat: (-1x4xf32) <- ([-1x4xf32, -1x4xf32, -1x4xf32], 1xi32) + concat_2 = paddle._C_ops.concat(combine_12, full_7) + del combine_12 + + # builtin.combine: ([-1x2xf32, -1x2xf32, -1x2xf32]) <- (-1x2xf32, -1x2xf32, -1x2xf32) + combine_13 = [reshape_1, reshape_3, reshape_5] + del reshape_1, reshape_3, reshape_5 + + # pd_op.concat: (-1x2xf32) <- ([-1x2xf32, -1x2xf32, -1x2xf32], 1xi32) + concat_3 = paddle._C_ops.concat(combine_13, full_7) + del combine_13 + + # builtin.combine: ([-1x1xf32, -1x1xf32, -1x1xf32]) <- (-1x1xf32, -1x1xf32, -1x1xf32) + combine_14 = [full_with_tensor_0, full_with_tensor_1, full_with_tensor_2] + del full_with_tensor_0, full_with_tensor_1, full_with_tensor_2 + + # pd_op.concat: (-1x1xf32) <- ([-1x1xf32, -1x1xf32, -1x1xf32], 1xi32) + concat_4 = paddle._C_ops.concat(combine_14, full_7) + del combine_14, full_7 + + # pd_op.full_int_array: (2xi64) <- () + full_int_array_4 = [1, 1] + + # pd_op.assign: (2xi64) <- (2xi64) + assign_0 = full_int_array_4 + + # pd_op.assign: (2xi64) <- (2xi64) + assign_1 = full_int_array_4 + + # pd_op.pool2d: (2x384x1x1xf32) <- (2x384x-1x-1xf32, 2xi64) + pool2d_0 = paddle._C_ops.pool2d( + data_6, + full_int_array_4, + [1, 1], + [0, 0], + False, + True, + "NCHW", + "avg", + False, + True, + "EXPLICIT", + ) + + # pd_op.conv2d: (2x384x1x1xf32) <- (2x384x1x1xf32, 384x384x1x1xf32) + conv2d_0 = paddle._C_ops.conv2d( + pool2d_0, parameter_53, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_53 + + # pd_op.full_int_array: (4xi64) <- () + full_int_array_5 = [1, -1, 1, 1] + + # pd_op.reshape: (1x384x1x1xf32) <- (384xf32, 4xi64) + reshape_6 = paddle._C_ops.reshape(parameter_52, full_int_array_5) + del parameter_52 + + # pd_op.add: (2x384x1x1xf32) <- (2x384x1x1xf32, 1x384x1x1xf32) + add_0 = paddle._C_ops.add(conv2d_0, reshape_6) + + # pd_op.sigmoid: (2x384x1x1xf32) <- (2x384x1x1xf32) + sigmoid_0 = paddle._C_ops.sigmoid(add_0) + del add_0 + + # pd_op.multiply: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32, 2x384x1x1xf32) + multiply_0 = paddle._C_ops.multiply(data_6, sigmoid_0) + + # pd_op.conv2d: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32, 384x384x1x1xf32) + conv2d_1 = paddle._C_ops.conv2d( + multiply_0, parameter_51, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_51 + + # pd_op.batch_norm_: (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__0, + batch_norm__1, + batch_norm__2, + batch_norm__3, + batch_norm__4, + batch_norm__5, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_1, + parameter_50, + parameter_49, + parameter_48, + parameter_47, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_47, parameter_48, parameter_49, parameter_50 + + # pd_op.swish: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32) + swish_0 = paddle._C_ops.swish(batch_norm__0) + + # pd_op.add: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32, 2x384x-1x-1xf32) + add_1 = paddle._C_ops.add(swish_0, data_6) + + # pd_op.conv2d: (2x1x-1x-1xf32) <- (2x384x-1x-1xf32, 1x384x3x3xf32) + conv2d_2 = paddle._C_ops.conv2d( + add_1, parameter_46, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_46 + + # pd_op.reshape: (1x1x1x1xf32) <- (1xf32, 4xi64) + reshape_7 = paddle._C_ops.reshape(parameter_45, full_int_array_5) + del parameter_45 + + # pd_op.add: (2x1x-1x-1xf32) <- (2x1x-1x-1xf32, 1x1x1x1xf32) + add_2 = paddle._C_ops.add(conv2d_2, reshape_7) + + # pd_op.conv2d: (2x384x1x1xf32) <- (2x384x1x1xf32, 384x384x1x1xf32) + conv2d_3 = paddle._C_ops.conv2d( + pool2d_0, parameter_44, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_44 + + # pd_op.reshape: (1x384x1x1xf32) <- (384xf32, 4xi64) + reshape_8 = paddle._C_ops.reshape(parameter_43, full_int_array_5) + del parameter_43 + + # pd_op.add: (2x384x1x1xf32) <- (2x384x1x1xf32, 1x384x1x1xf32) + add_3 = paddle._C_ops.add(conv2d_3, reshape_8) + + # pd_op.sigmoid: (2x384x1x1xf32) <- (2x384x1x1xf32) + sigmoid_1 = paddle._C_ops.sigmoid(add_3) + del add_3 + + # pd_op.multiply: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32, 2x384x1x1xf32) + multiply_1 = paddle._C_ops.multiply(data_6, sigmoid_1) + del data_6 + + # pd_op.conv2d: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32, 384x384x1x1xf32) + conv2d_4 = paddle._C_ops.conv2d( + multiply_1, parameter_42, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_42 + + # pd_op.batch_norm_: (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__6, + batch_norm__7, + batch_norm__8, + batch_norm__9, + batch_norm__10, + batch_norm__11, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_4, + parameter_41, + parameter_40, + parameter_39, + parameter_38, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_38, parameter_39, parameter_40, parameter_41 + + # pd_op.swish: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32) + swish_1 = paddle._C_ops.swish(batch_norm__6) + + # pd_op.conv2d: (2x68x-1x-1xf32) <- (2x384x-1x-1xf32, 68x384x3x3xf32) + conv2d_5 = paddle._C_ops.conv2d( + swish_1, parameter_37, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_37 + + # pd_op.reshape: (1x68x1x1xf32) <- (68xf32, 4xi64) + reshape_9 = paddle._C_ops.reshape(parameter_36, full_int_array_5) + del parameter_36 + + # pd_op.add: (2x68x-1x-1xf32) <- (2x68x-1x-1xf32, 1x68x1x1xf32) + add_4 = paddle._C_ops.add(conv2d_5, reshape_9) + + # pd_op.sigmoid: (2x1x-1x-1xf32) <- (2x1x-1x-1xf32) + sigmoid_2 = paddle._C_ops.sigmoid(add_2) + del add_2 + + # pd_op.flatten: (2x1x-1xf32) <- (2x1x-1x-1xf32) + flatten_0 = paddle._C_ops.flatten(sigmoid_2, 2, 3) + + # pd_op.transpose: (2x-1x1xf32) <- (2x1x-1xf32) + transpose_0 = paddle._C_ops.transpose(flatten_0, [0, 2, 1]) + del flatten_0 + + # pd_op.flatten: (2x68x-1xf32) <- (2x68x-1x-1xf32) + flatten_1 = paddle._C_ops.flatten(add_4, 2, 3) + + # pd_op.transpose: (2x-1x68xf32) <- (2x68x-1xf32) + transpose_1 = paddle._C_ops.transpose(flatten_1, [0, 2, 1]) + del flatten_1 + + # pd_op.pool2d: (2x192x1x1xf32) <- (2x192x-1x-1xf32, 2xi64) + pool2d_1 = paddle._C_ops.pool2d( + data_7, + full_int_array_4, + [1, 1], + [0, 0], + False, + True, + "NCHW", + "avg", + False, + True, + "EXPLICIT", + ) + + # pd_op.conv2d: (2x192x1x1xf32) <- (2x192x1x1xf32, 192x192x1x1xf32) + conv2d_6 = paddle._C_ops.conv2d( + pool2d_1, parameter_35, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_35 + + # pd_op.reshape: (1x192x1x1xf32) <- (192xf32, 4xi64) + reshape_10 = paddle._C_ops.reshape(parameter_34, full_int_array_5) + del parameter_34 + + # pd_op.add: (2x192x1x1xf32) <- (2x192x1x1xf32, 1x192x1x1xf32) + add_5 = paddle._C_ops.add(conv2d_6, reshape_10) + + # pd_op.sigmoid: (2x192x1x1xf32) <- (2x192x1x1xf32) + sigmoid_3 = paddle._C_ops.sigmoid(add_5) + del add_5 + + # pd_op.multiply: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 2x192x1x1xf32) + multiply_2 = paddle._C_ops.multiply(data_7, sigmoid_3) + + # pd_op.conv2d: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 192x192x1x1xf32) + conv2d_7 = paddle._C_ops.conv2d( + multiply_2, parameter_33, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_33 + + # pd_op.batch_norm_: (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__12, + batch_norm__13, + batch_norm__14, + batch_norm__15, + batch_norm__16, + batch_norm__17, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_7, + parameter_32, + parameter_31, + parameter_30, + parameter_29, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_29, parameter_30, parameter_31, parameter_32 + + # pd_op.swish: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32) + swish_2 = paddle._C_ops.swish(batch_norm__12) + + # pd_op.add: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 2x192x-1x-1xf32) + add_6 = paddle._C_ops.add(swish_2, data_7) + + # pd_op.conv2d: (2x1x-1x-1xf32) <- (2x192x-1x-1xf32, 1x192x3x3xf32) + conv2d_8 = paddle._C_ops.conv2d( + add_6, parameter_28, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_28 + + # pd_op.reshape: (1x1x1x1xf32) <- (1xf32, 4xi64) + reshape_11 = paddle._C_ops.reshape(parameter_27, full_int_array_5) + del parameter_27 + + # pd_op.add: (2x1x-1x-1xf32) <- (2x1x-1x-1xf32, 1x1x1x1xf32) + add_7 = paddle._C_ops.add(conv2d_8, reshape_11) + + # pd_op.conv2d: (2x192x1x1xf32) <- (2x192x1x1xf32, 192x192x1x1xf32) + conv2d_9 = paddle._C_ops.conv2d( + pool2d_1, parameter_26, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_26 + + # pd_op.reshape: (1x192x1x1xf32) <- (192xf32, 4xi64) + reshape_12 = paddle._C_ops.reshape(parameter_25, full_int_array_5) + del parameter_25 + + # pd_op.add: (2x192x1x1xf32) <- (2x192x1x1xf32, 1x192x1x1xf32) + add_8 = paddle._C_ops.add(conv2d_9, reshape_12) + + # pd_op.sigmoid: (2x192x1x1xf32) <- (2x192x1x1xf32) + sigmoid_4 = paddle._C_ops.sigmoid(add_8) + del add_8 + + # pd_op.multiply: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 2x192x1x1xf32) + multiply_3 = paddle._C_ops.multiply(data_7, sigmoid_4) + del data_7 + + # pd_op.conv2d: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 192x192x1x1xf32) + conv2d_10 = paddle._C_ops.conv2d( + multiply_3, parameter_24, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_24 + + # pd_op.batch_norm_: (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__18, + batch_norm__19, + batch_norm__20, + batch_norm__21, + batch_norm__22, + batch_norm__23, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_10, + parameter_23, + parameter_22, + parameter_21, + parameter_20, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_20, parameter_21, parameter_22, parameter_23 + + # pd_op.swish: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32) + swish_3 = paddle._C_ops.swish(batch_norm__18) + + # pd_op.conv2d: (2x68x-1x-1xf32) <- (2x192x-1x-1xf32, 68x192x3x3xf32) + conv2d_11 = paddle._C_ops.conv2d( + swish_3, parameter_19, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_19 + + # pd_op.reshape: (1x68x1x1xf32) <- (68xf32, 4xi64) + reshape_13 = paddle._C_ops.reshape(parameter_18, full_int_array_5) + del parameter_18 + + # pd_op.add: (2x68x-1x-1xf32) <- (2x68x-1x-1xf32, 1x68x1x1xf32) + add_9 = paddle._C_ops.add(conv2d_11, reshape_13) + + # pd_op.sigmoid: (2x1x-1x-1xf32) <- (2x1x-1x-1xf32) + sigmoid_5 = paddle._C_ops.sigmoid(add_7) + del add_7 + + # pd_op.flatten: (2x1x-1xf32) <- (2x1x-1x-1xf32) + flatten_2 = paddle._C_ops.flatten(sigmoid_5, 2, 3) + + # pd_op.transpose: (2x-1x1xf32) <- (2x1x-1xf32) + transpose_2 = paddle._C_ops.transpose(flatten_2, [0, 2, 1]) + del flatten_2 + + # pd_op.flatten: (2x68x-1xf32) <- (2x68x-1x-1xf32) + flatten_3 = paddle._C_ops.flatten(add_9, 2, 3) + + # pd_op.transpose: (2x-1x68xf32) <- (2x68x-1xf32) + transpose_3 = paddle._C_ops.transpose(flatten_3, [0, 2, 1]) + del flatten_3 + + # pd_op.pool2d: (2x96x1x1xf32) <- (2x96x-1x-1xf32, 2xi64) + pool2d_2 = paddle._C_ops.pool2d( + data_8, + full_int_array_4, + [1, 1], + [0, 0], + False, + True, + "NCHW", + "avg", + False, + True, + "EXPLICIT", + ) + + # pd_op.conv2d: (2x96x1x1xf32) <- (2x96x1x1xf32, 96x96x1x1xf32) + conv2d_12 = paddle._C_ops.conv2d( + pool2d_2, parameter_17, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_17 + + # pd_op.reshape: (1x96x1x1xf32) <- (96xf32, 4xi64) + reshape_14 = paddle._C_ops.reshape(parameter_16, full_int_array_5) + del parameter_16 + + # pd_op.add: (2x96x1x1xf32) <- (2x96x1x1xf32, 1x96x1x1xf32) + add_10 = paddle._C_ops.add(conv2d_12, reshape_14) + + # pd_op.sigmoid: (2x96x1x1xf32) <- (2x96x1x1xf32) + sigmoid_6 = paddle._C_ops.sigmoid(add_10) + del add_10 + + # pd_op.multiply: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, 2x96x1x1xf32) + multiply_4 = paddle._C_ops.multiply(data_8, sigmoid_6) + + # pd_op.conv2d: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, 96x96x1x1xf32) + conv2d_13 = paddle._C_ops.conv2d( + multiply_4, parameter_15, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_15 + + # pd_op.batch_norm_: (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__24, + batch_norm__25, + batch_norm__26, + batch_norm__27, + batch_norm__28, + batch_norm__29, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_13, + parameter_14, + parameter_13, + parameter_12, + parameter_11, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_11, parameter_12, parameter_13, parameter_14 + + # pd_op.swish: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32) + swish_4 = paddle._C_ops.swish(batch_norm__24) + + # pd_op.add: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, 2x96x-1x-1xf32) + add_11 = paddle._C_ops.add(swish_4, data_8) + + # pd_op.conv2d: (2x1x-1x-1xf32) <- (2x96x-1x-1xf32, 1x96x3x3xf32) + conv2d_14 = paddle._C_ops.conv2d( + add_11, parameter_10, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_10 + + # pd_op.reshape: (1x1x1x1xf32) <- (1xf32, 4xi64) + reshape_15 = paddle._C_ops.reshape(parameter_9, full_int_array_5) + del parameter_9 + + # pd_op.add: (2x1x-1x-1xf32) <- (2x1x-1x-1xf32, 1x1x1x1xf32) + add_12 = paddle._C_ops.add(conv2d_14, reshape_15) + + # pd_op.conv2d: (2x96x1x1xf32) <- (2x96x1x1xf32, 96x96x1x1xf32) + conv2d_15 = paddle._C_ops.conv2d( + pool2d_2, parameter_8, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_8 + + # pd_op.reshape: (1x96x1x1xf32) <- (96xf32, 4xi64) + reshape_16 = paddle._C_ops.reshape(parameter_7, full_int_array_5) + del parameter_7 + + # pd_op.add: (2x96x1x1xf32) <- (2x96x1x1xf32, 1x96x1x1xf32) + add_13 = paddle._C_ops.add(conv2d_15, reshape_16) + + # pd_op.sigmoid: (2x96x1x1xf32) <- (2x96x1x1xf32) + sigmoid_7 = paddle._C_ops.sigmoid(add_13) + del add_13 + + # pd_op.multiply: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, 2x96x1x1xf32) + multiply_5 = paddle._C_ops.multiply(data_8, sigmoid_7) + del data_8 + + # pd_op.conv2d: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, 96x96x1x1xf32) + conv2d_16 = paddle._C_ops.conv2d( + multiply_5, parameter_6, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_6 + + # pd_op.batch_norm_: (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__30, + batch_norm__31, + batch_norm__32, + batch_norm__33, + batch_norm__34, + batch_norm__35, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_16, + parameter_5, + parameter_4, + parameter_3, + parameter_2, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_2, parameter_3, parameter_4, parameter_5 + + # pd_op.swish: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32) + swish_5 = paddle._C_ops.swish(batch_norm__30) + + # pd_op.conv2d: (2x68x-1x-1xf32) <- (2x96x-1x-1xf32, 68x96x3x3xf32) + conv2d_17 = paddle._C_ops.conv2d( + swish_5, parameter_1, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_1 + + # pd_op.reshape: (1x68x1x1xf32) <- (68xf32, 4xi64) + reshape_17 = paddle._C_ops.reshape(parameter_0, full_int_array_5) + del full_int_array_5, parameter_0 + + # pd_op.add: (2x68x-1x-1xf32) <- (2x68x-1x-1xf32, 1x68x1x1xf32) + add_14 = paddle._C_ops.add(conv2d_17, reshape_17) + + # pd_op.sigmoid: (2x1x-1x-1xf32) <- (2x1x-1x-1xf32) + sigmoid_8 = paddle._C_ops.sigmoid(add_12) + del add_12 + + # pd_op.flatten: (2x1x-1xf32) <- (2x1x-1x-1xf32) + flatten_4 = paddle._C_ops.flatten(sigmoid_8, 2, 3) + + # pd_op.transpose: (2x-1x1xf32) <- (2x1x-1xf32) + transpose_4 = paddle._C_ops.transpose(flatten_4, [0, 2, 1]) + del flatten_4 + + # pd_op.flatten: (2x68x-1xf32) <- (2x68x-1x-1xf32) + flatten_5 = paddle._C_ops.flatten(add_14, 2, 3) + + # pd_op.transpose: (2x-1x68xf32) <- (2x68x-1xf32) + transpose_5 = paddle._C_ops.transpose(flatten_5, [0, 2, 1]) + del flatten_5 + + # pd_op.full: (1xi32) <- () + full_8 = paddle._C_ops.full( + [1], float("1"), paddle.int32, paddle.core.CPUPlace() + ) + + # pd_op.assign: (1xi32) <- (1xi32) + assign_2 = full_8 + + # builtin.combine: ([2x-1x1xf32, 2x-1x1xf32, 2x-1x1xf32]) <- (2x-1x1xf32, 2x-1x1xf32, 2x-1x1xf32) + combine_15 = [transpose_0, transpose_2, transpose_4] + + # pd_op.concat: (2x-1x1xf32) <- ([2x-1x1xf32, 2x-1x1xf32, 2x-1x1xf32], 1xi32) + concat_0 = paddle._C_ops.concat(combine_15, full_8) + del combine_15 + + # builtin.combine: ([2x-1x68xf32, 2x-1x68xf32, 2x-1x68xf32]) <- (2x-1x68xf32, 2x-1x68xf32, 2x-1x68xf32) + combine_16 = [transpose_1, transpose_3, transpose_5] + + # pd_op.concat: (2x-1x68xf32) <- ([2x-1x68xf32, 2x-1x68xf32, 2x-1x68xf32], 1xi32) + concat_1 = paddle._C_ops.concat(combine_16, full_8) + del ( + add_1, + add_11, + add_14, + add_4, + add_6, + add_9, + assign_0, + assign_1, + assign_2, + batch_norm__0, + batch_norm__1, + batch_norm__10, + batch_norm__11, + batch_norm__12, + batch_norm__13, + batch_norm__14, + batch_norm__15, + batch_norm__16, + batch_norm__17, + batch_norm__18, + batch_norm__19, + batch_norm__2, + batch_norm__20, + batch_norm__21, + batch_norm__22, + batch_norm__23, + batch_norm__24, + batch_norm__25, + batch_norm__26, + batch_norm__27, + batch_norm__28, + batch_norm__29, + batch_norm__3, + batch_norm__30, + batch_norm__31, + batch_norm__32, + batch_norm__33, + batch_norm__34, + batch_norm__35, + batch_norm__4, + batch_norm__5, + batch_norm__6, + batch_norm__7, + batch_norm__8, + batch_norm__9, + combine_16, + conv2d_0, + conv2d_1, + conv2d_10, + conv2d_11, + conv2d_12, + conv2d_13, + conv2d_14, + conv2d_15, + conv2d_16, + conv2d_17, + conv2d_2, + conv2d_3, + conv2d_4, + conv2d_5, + conv2d_6, + conv2d_7, + conv2d_8, + conv2d_9, + full_8, + full_int_array_4, + multiply_0, + multiply_1, + multiply_2, + multiply_3, + multiply_4, + multiply_5, + pool2d_0, + pool2d_1, + pool2d_2, + reshape_10, + reshape_11, + reshape_12, + reshape_13, + reshape_14, + reshape_15, + reshape_16, + reshape_17, + reshape_6, + reshape_7, + reshape_8, + reshape_9, + sigmoid_0, + sigmoid_1, + sigmoid_2, + sigmoid_3, + sigmoid_4, + sigmoid_5, + sigmoid_6, + sigmoid_7, + sigmoid_8, + slice_0, + slice_1, + slice_2, + swish_0, + swish_1, + swish_2, + swish_3, + swish_4, + swish_5, + transpose_0, + transpose_1, + transpose_2, + transpose_3, + transpose_4, + transpose_5, + ) + + return concat_0, concat_1, concat_2, concat_3, concat_4 diff --git a/paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_15/weight_meta.py b/paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_15/weight_meta.py new file mode 100644 index 000000000..3f8a077fa --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_15/weight_meta.py @@ -0,0 +1,586 @@ +class Program_weight_tensor_parameter_0: + name = "parameter_0" + shape = [68] + dtype = "float32" + min_val = float("-0.0172485") + max_val = float("0.027465") + mean = float("1.46232e-07") + std = float("0.00758165") + data = None + + +class Program_weight_tensor_parameter_1: + name = "parameter_1" + shape = [68, 96, 3, 3] + dtype = "float32" + min_val = float("-0.193407") + max_val = float("0.203896") + mean = float("4.08909e-08") + std = float("0.0115569") + data = None + + +class Program_weight_tensor_parameter_2: + name = "parameter_2" + shape = [96] + dtype = "float32" + min_val = float("-0.149226") + max_val = float("0.348802") + mean = float("0.0836582") + std = float("0.116186") + data = None + + +class Program_weight_tensor_parameter_3: + name = "parameter_3" + shape = [96] + dtype = "float32" + min_val = float("0.92059") + max_val = float("2.01352") + mean = float("1.39698") + std = float("0.216217") + data = None + + +class Program_weight_tensor_parameter_4: + name = "parameter_4" + shape = [96] + dtype = "float32" + min_val = float("0.000214868") + max_val = float("0.00379309") + mean = float("0.00088") + std = float("0.000584581") + data = None + + +class Program_weight_tensor_parameter_5: + name = "parameter_5" + shape = [96] + dtype = "float32" + min_val = float("-0.0826215") + max_val = float("0.0417897") + mean = float("-0.00846916") + std = float("0.0199154") + data = None + + +class Program_weight_tensor_parameter_6: + name = "parameter_6" + shape = [96, 96, 1, 1] + dtype = "float32" + min_val = float("-0.0948878") + max_val = float("0.109061") + mean = float("-0.000797905") + std = float("0.0137305") + data = None + + +class Program_weight_tensor_parameter_7: + name = "parameter_7" + shape = [96] + dtype = "float32" + min_val = float("-0.012275") + max_val = float("0.0115959") + mean = float("-0.000356062") + std = float("0.00537855") + data = None + + +class Program_weight_tensor_parameter_8: + name = "parameter_8" + shape = [96, 96, 1, 1] + dtype = "float32" + min_val = float("-0.0233111") + max_val = float("0.0248623") + mean = float("-0.000113331") + std = float("0.0035026") + data = None + + +class Program_weight_tensor_parameter_9: + name = "parameter_9" + shape = [1] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_10: + name = "parameter_10" + shape = [1, 96, 3, 3] + dtype = "float32" + min_val = float("-0.0555531") + max_val = float("0.0394738") + mean = float("0.00027914") + std = float("0.0112485") + data = None + + +class Program_weight_tensor_parameter_11: + name = "parameter_11" + shape = [96] + dtype = "float32" + min_val = float("-0.661613") + max_val = float("1.11986") + mean = float("0.208505") + std = float("0.335963") + data = None + + +class Program_weight_tensor_parameter_12: + name = "parameter_12" + shape = [96] + dtype = "float32" + min_val = float("0.773318") + max_val = float("1.56281") + mean = float("1.11195") + std = float("0.138849") + data = None + + +class Program_weight_tensor_parameter_13: + name = "parameter_13" + shape = [96] + dtype = "float32" + min_val = float("0.000160273") + max_val = float("0.00558751") + mean = float("0.00119567") + std = float("0.00100221") + data = None + + +class Program_weight_tensor_parameter_14: + name = "parameter_14" + shape = [96] + dtype = "float32" + min_val = float("-0.217468") + max_val = float("0.0872407") + mean = float("-0.0303984") + std = float("0.0509472") + data = None + + +class Program_weight_tensor_parameter_15: + name = "parameter_15" + shape = [96, 96, 1, 1] + dtype = "float32" + min_val = float("-0.113703") + max_val = float("0.0885765") + mean = float("-0.00122357") + std = float("0.0140255") + data = None + + +class Program_weight_tensor_parameter_16: + name = "parameter_16" + shape = [96] + dtype = "float32" + min_val = float("-0.00655335") + max_val = float("0.00788225") + mean = float("-0.000702387") + std = float("0.00315771") + data = None + + +class Program_weight_tensor_parameter_17: + name = "parameter_17" + shape = [96, 96, 1, 1] + dtype = "float32" + min_val = float("-0.0695131") + max_val = float("0.0989064") + mean = float("-0.000394188") + std = float("0.00429104") + data = None + + +class Program_weight_tensor_parameter_18: + name = "parameter_18" + shape = [68] + dtype = "float32" + min_val = float("-0.00653538") + max_val = float("0.0248622") + mean = float("1.52999e-07") + std = float("0.00624483") + data = None + + +class Program_weight_tensor_parameter_19: + name = "parameter_19" + shape = [68, 192, 3, 3] + dtype = "float32" + min_val = float("-0.155994") + max_val = float("0.17862") + mean = float("-1.00845e-08") + std = float("0.00807346") + data = None + + +class Program_weight_tensor_parameter_20: + name = "parameter_20" + shape = [192] + dtype = "float32" + min_val = float("-0.111521") + max_val = float("0.136793") + mean = float("0.050372") + std = float("0.0428473") + data = None + + +class Program_weight_tensor_parameter_21: + name = "parameter_21" + shape = [192] + dtype = "float32" + min_val = float("0.941879") + max_val = float("1.4895") + mean = float("1.20932") + std = float("0.101229") + data = None + + +class Program_weight_tensor_parameter_22: + name = "parameter_22" + shape = [192] + dtype = "float32" + min_val = float("0.000166807") + max_val = float("0.00501064") + mean = float("0.000835983") + std = float("0.000696757") + data = None + + +class Program_weight_tensor_parameter_23: + name = "parameter_23" + shape = [192] + dtype = "float32" + min_val = float("-0.035292") + max_val = float("0.0208402") + mean = float("-0.00487211") + std = float("0.00872887") + data = None + + +class Program_weight_tensor_parameter_24: + name = "parameter_24" + shape = [192, 192, 1, 1] + dtype = "float32" + min_val = float("-0.0620273") + max_val = float("0.101258") + mean = float("-0.000222798") + std = float("0.00681063") + data = None + + +class Program_weight_tensor_parameter_25: + name = "parameter_25" + shape = [192] + dtype = "float32" + min_val = float("-0.0102195") + max_val = float("0.0101765") + mean = float("-0.000115416") + std = float("0.00396591") + data = None + + +class Program_weight_tensor_parameter_26: + name = "parameter_26" + shape = [192, 192, 1, 1] + dtype = "float32" + min_val = float("-0.00892386") + max_val = float("0.0199459") + mean = float("-0.000135272") + std = float("0.00154918") + data = None + + +class Program_weight_tensor_parameter_27: + name = "parameter_27" + shape = [1] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_28: + name = "parameter_28" + shape = [1, 192, 3, 3] + dtype = "float32" + min_val = float("-0.0670605") + max_val = float("0.0310249") + mean = float("0.000294137") + std = float("0.00757806") + data = None + + +class Program_weight_tensor_parameter_29: + name = "parameter_29" + shape = [192] + dtype = "float32" + min_val = float("-0.290544") + max_val = float("0.608277") + mean = float("0.147622") + std = float("0.158959") + data = None + + +class Program_weight_tensor_parameter_30: + name = "parameter_30" + shape = [192] + dtype = "float32" + min_val = float("0.913214") + max_val = float("1.4959") + mean = float("1.08724") + std = float("0.0815711") + data = None + + +class Program_weight_tensor_parameter_31: + name = "parameter_31" + shape = [192] + dtype = "float32" + min_val = float("0.000174132") + max_val = float("0.00975865") + mean = float("0.00156737") + std = float("0.0016239") + data = None + + +class Program_weight_tensor_parameter_32: + name = "parameter_32" + shape = [192] + dtype = "float32" + min_val = float("-0.172393") + max_val = float("0.0294285") + mean = float("-0.0362749") + std = float("0.0321233") + data = None + + +class Program_weight_tensor_parameter_33: + name = "parameter_33" + shape = [192, 192, 1, 1] + dtype = "float32" + min_val = float("-0.0771266") + max_val = float("0.0600578") + mean = float("-0.00104893") + std = float("0.00708841") + data = None + + +class Program_weight_tensor_parameter_34: + name = "parameter_34" + shape = [192] + dtype = "float32" + min_val = float("-0.00520655") + max_val = float("0.0122276") + mean = float("-0.000186298") + std = float("0.00206584") + data = None + + +class Program_weight_tensor_parameter_35: + name = "parameter_35" + shape = [192, 192, 1, 1] + dtype = "float32" + min_val = float("-0.0221196") + max_val = float("0.0288162") + mean = float("-0.000192599") + std = float("0.00153376") + data = None + + +class Program_weight_tensor_parameter_36: + name = "parameter_36" + shape = [68] + dtype = "float32" + min_val = float("-0.00618645") + max_val = float("0.0126918") + mean = float("1.55094e-07") + std = float("0.0052247") + data = None + + +class Program_weight_tensor_parameter_37: + name = "parameter_37" + shape = [68, 384, 3, 3] + dtype = "float32" + min_val = float("-0.0898313") + max_val = float("0.115939") + mean = float("1.97397e-08") + std = float("0.00562912") + data = None + + +class Program_weight_tensor_parameter_38: + name = "parameter_38" + shape = [384] + dtype = "float32" + min_val = float("-0.0751669") + max_val = float("0.111426") + mean = float("0.0119129") + std = float("0.0354094") + data = None + + +class Program_weight_tensor_parameter_39: + name = "parameter_39" + shape = [384] + dtype = "float32" + min_val = float("0.969448") + max_val = float("1.49376") + mean = float("1.16935") + std = float("0.0775516") + data = None + + +class Program_weight_tensor_parameter_40: + name = "parameter_40" + shape = [384] + dtype = "float32" + min_val = float("9.28235e-05") + max_val = float("0.00458808") + mean = float("0.000545173") + std = float("0.000444824") + data = None + + +class Program_weight_tensor_parameter_41: + name = "parameter_41" + shape = [384] + dtype = "float32" + min_val = float("-0.0402347") + max_val = float("0.0137294") + mean = float("-0.00387673") + std = float("0.00618745") + data = None + + +class Program_weight_tensor_parameter_42: + name = "parameter_42" + shape = [384, 384, 1, 1] + dtype = "float32" + min_val = float("-0.0664712") + max_val = float("0.0691916") + mean = float("-0.000151613") + std = float("0.00377438") + data = None + + +class Program_weight_tensor_parameter_43: + name = "parameter_43" + shape = [384] + dtype = "float32" + min_val = float("-0.00514094") + max_val = float("0.00626889") + mean = float("-6.43167e-05") + std = float("0.00278132") + data = None + + +class Program_weight_tensor_parameter_44: + name = "parameter_44" + shape = [384, 384, 1, 1] + dtype = "float32" + min_val = float("-0.0303347") + max_val = float("0.0123028") + mean = float("-5.00562e-05") + std = float("0.000943714") + data = None + + +class Program_weight_tensor_parameter_45: + name = "parameter_45" + shape = [1] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_46: + name = "parameter_46" + shape = [1, 384, 3, 3] + dtype = "float32" + min_val = float("-0.0481817") + max_val = float("0.0280031") + mean = float("0.000415518") + std = float("0.0047328") + data = None + + +class Program_weight_tensor_parameter_47: + name = "parameter_47" + shape = [384] + dtype = "float32" + min_val = float("-0.369118") + max_val = float("0.494762") + mean = float("0.0350664") + std = float("0.121505") + data = None + + +class Program_weight_tensor_parameter_48: + name = "parameter_48" + shape = [384] + dtype = "float32" + min_val = float("0.883076") + max_val = float("1.55393") + mean = float("1.05789") + std = float("0.0835119") + data = None + + +class Program_weight_tensor_parameter_49: + name = "parameter_49" + shape = [384] + dtype = "float32" + min_val = float("0.000183695") + max_val = float("0.00768491") + mean = float("0.00113098") + std = float("0.00108608") + data = None + + +class Program_weight_tensor_parameter_50: + name = "parameter_50" + shape = [384] + dtype = "float32" + min_val = float("-0.131104") + max_val = float("0.0317986") + mean = float("-0.0288971") + std = float("0.0238826") + data = None + + +class Program_weight_tensor_parameter_51: + name = "parameter_51" + shape = [384, 384, 1, 1] + dtype = "float32" + min_val = float("-0.0496468") + max_val = float("0.0518443") + mean = float("-0.000562622") + std = float("0.0041021") + data = None + + +class Program_weight_tensor_parameter_52: + name = "parameter_52" + shape = [384] + dtype = "float32" + min_val = float("-0.0141813") + max_val = float("0.0110761") + mean = float("-0.000147647") + std = float("0.00165558") + data = None + + +class Program_weight_tensor_parameter_53: + name = "parameter_53" + shape = [384, 384, 1, 1] + dtype = "float32" + min_val = float("-0.109289") + max_val = float("0.057067") + mean = float("-4.32392e-05") + std = float("0.00113413") + data = None diff --git a/paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_16/graph_hash.txt b/paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_16/graph_hash.txt new file mode 100644 index 000000000..665cc1cb7 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_16/graph_hash.txt @@ -0,0 +1 @@ +4c194f1b47af22d5dbdc2dc8f63cad5abcfa9a3548b3439131bcdfe6c15f25bd \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_16/graph_net.json b/paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_16/graph_net.json new file mode 100644 index 000000000..399d8354e --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_16/graph_net.json @@ -0,0 +1,6 @@ +{ + "framework": "paddle", + "model_name": "PP-YOLOE-S_human", + "num_devices_required": 1, + "num_nodes_required": 1 +} \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_16/input_meta.py b/paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_16/input_meta.py new file mode 100644 index 000000000..268c9fefb --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_16/input_meta.py @@ -0,0 +1,124 @@ +class Program_weight_tensor_data_0: + name = "data_0" + shape = [] + dtype = "int64" + data = [20] + + +class Program_weight_tensor_data_1: + name = "data_1" + shape = [] + dtype = "int64" + data = [8400] + + +class Program_weight_tensor_data_2: + name = "data_2" + shape = [] + dtype = "int64" + data = [20] + + +class Program_weight_tensor_data_3: + name = "data_3" + shape = [2, 8400] + dtype = "float32" + max_val = float("3.0") + mean = float("0.0289881") + std = float("0.183368") + data = None + + +class Program_weight_tensor_data_4: + name = "data_4" + shape = [2, 20, 8400] + dtype = "float32" + max_val = float("0.911359") + mean = float("0.0080156") + std = float("0.0534019") + data = None + + +class Program_weight_tensor_data_5: + name = "data_5" + shape = [2, 20, 8400] + dtype = "float32" + max_val = float("1.0") + mean = float("0.0014494") + std = float("0.0380434") + data = None + + +class Program_weight_tensor_data_6: + name = "data_6" + shape = [2, 1] + dtype = "int32" + data = [0, 1] + + +class Program_weight_tensor_data_7: + name = "data_7" + shape = [2, 20, 1] + dtype = "int32" + data = [ + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + ] + + +class Program_weight_tensor_data_8: + name = "data_8" + shape = [2, 20, 4] + dtype = "float32" + max_val = float("640.0") + mean = float("258.282") + std = float("180.734") + data = None + + +class Program_weight_tensor_data_9: + name = "data_9" + shape = [2, 20, 8400] + dtype = "float32" + max_val = float("0.331258") + mean = float("9.44418e-05") + std = float("0.00284092") + data = None diff --git a/paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_16/model.py b/paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_16/model.py new file mode 100644 index 000000000..0eefa8f8c --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_16/model.py @@ -0,0 +1,258 @@ +import paddle + + +class GraphModule(paddle.nn.Layer): + def __init__(self): + super().__init__() + + def forward( + self, + data_0, + data_1, + data_2, + data_3, + data_4, + data_5, + data_6, + data_7, + data_8, + data_9, + ): + # pd_op.full_int_array: (1xi64) <- () + full_int_array_0 = [1] + + # pd_op.unsqueeze: (2x1x-1xf32) <- (2x-1xf32, 1xi64) + unsqueeze_0 = paddle._C_ops.unsqueeze(data_3, full_int_array_0) + del data_3, full_int_array_0 + + # pd_op.full: (xf32) <- () + full_0 = paddle._C_ops.full( + [], float("1"), paddle.float32, paddle.framework._current_expected_place() + ) + + # pd_op.greater_than: (2x1x-1xb) <- (2x1x-1xf32, xf32) + greater_than_0 = paddle._C_ops.greater_than(unsqueeze_0, full_0) + del full_0, unsqueeze_0 + + # pd_op.full: (xi64) <- () + full_1 = paddle._C_ops.full( + [], float("1"), paddle.int64, paddle.core.CPUPlace() + ) + + # builtin.combine: ([xi64, xi64, xi64]) <- (xi64, xi64, xi64) + combine_0 = [full_1, data_0, full_1] + del full_1 + + # pd_op.stack: (3xi64) <- ([xi64, xi64, xi64]) + stack_0 = paddle._C_ops.stack(combine_0, 0) + del combine_0 + + # pd_op.tile: (2x-1x-1xb) <- (2x1x-1xb, 3xi64) + tile_0 = paddle._C_ops.tile(greater_than_0, stack_0) + del greater_than_0, stack_0 + + # pd_op.full: (1xi64) <- () + full_2 = paddle._C_ops.full( + [1], float("-2"), paddle.int64, paddle.core.CPUPlace() + ) + + # pd_op.argmax: (2x-1xi64) <- (2x-1x-1xf32, 1xi64) + argmax_0 = paddle._C_ops.argmax(data_4, full_2, False, False, paddle.int64) + + # pd_op.one_hot: (2x-1x-1xf32) <- (2x-1xi64, xi64) + one_hot_0 = paddle._C_ops.one_hot( + argmax_0 % paddle.cast(data_2, argmax_0.dtype), data_2 + ) + del argmax_0, data_2 + + # pd_op.transpose: (2x-1x-1xf32) <- (2x-1x-1xf32) + transpose_0 = paddle._C_ops.transpose(one_hot_0, [0, 2, 1]) + del one_hot_0 + + # pd_op.where: (2x-1x-1xf32) <- (2x-1x-1xb, 2x-1x-1xf32, 2x-1x-1xf32) + where_0 = paddle._C_ops.where(tile_0, transpose_0, data_5) + del data_5, tile_0, transpose_0 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_1 = [-2] + + # pd_op.sum: (2x-1xf32) <- (2x-1x-1xf32, 1xi64) + sum_0 = paddle._C_ops.sum(where_0, full_int_array_1, None, False) + + # pd_op.argmax: (2x-1xi64) <- (2x-1x-1xf32, 1xi64) + argmax_1 = paddle._C_ops.argmax(where_0, full_2, False, False, paddle.int64) + del full_2 + + # pd_op.cast: (xi32) <- (xi64) + cast_0 = paddle._C_ops.cast(data_0, paddle.int32) + del data_0 + + # pd_op.multiply: (2x1xi32) <- (2x1xi32, xi32) + multiply_1 = paddle._C_ops.multiply(data_6, cast_0) + del cast_0, data_6 + + # pd_op.cast: (2x1xi64) <- (2x1xi32) + cast_1 = paddle._C_ops.cast(multiply_1, paddle.int64) + del multiply_1 + + # pd_op.add: (2x-1xi64) <- (2x-1xi64, 2x1xi64) + add_0 = paddle._C_ops.add(argmax_1, cast_1) + del argmax_1, cast_1 + + # pd_op.flatten: (-1xi32) <- (2x-1x1xi32) + flatten_0 = paddle._C_ops.flatten(data_7, 0, 2) + del data_7 + + # pd_op.flatten: (-1xi64) <- (2x-1xi64) + flatten_1 = paddle._C_ops.flatten(add_0, 0, 1) + del add_0 + + # pd_op.full: (1xi32) <- () + full_3 = paddle._C_ops.full( + [1], float("0"), paddle.int32, paddle.core.CPUPlace() + ) + + # pd_op.gather: (-1xi32) <- (-1xi32, -1xi64, 1xi32) + gather_0 = paddle._C_ops.gather(flatten_0, flatten_1, full_3) + del flatten_0 + + # pd_op.full: (xi64) <- () + full_4 = paddle._C_ops.full( + [], float("2"), paddle.int64, paddle.core.CPUPlace() + ) + + # builtin.combine: ([xi64, xi64]) <- (xi64, xi64) + combine_1 = [full_4, data_1] + + # pd_op.stack: (2xi64) <- ([xi64, xi64]) + stack_1 = paddle._C_ops.stack(combine_1, 0) + del combine_1 + + # pd_op.reshape: (2x-1xi32) <- (-1xi32, 2xi64) + reshape_1 = paddle._C_ops.reshape(gather_0, stack_1) + del gather_0, stack_1 + + # pd_op.full: (xf32) <- () + full_5 = paddle._C_ops.full( + [], float("0"), paddle.float32, paddle.framework._current_expected_place() + ) + + # pd_op.greater_than: (2x-1xb) <- (2x-1xf32, xf32) + greater_than_1 = paddle._C_ops.greater_than(sum_0, full_5) + del full_5, sum_0 + + # pd_op.full: (1xf32) <- () + full_6 = paddle._C_ops.full( + [1], float("1"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.full_like: (2x-1xi32) <- (2x-1xi32, 1xf32) + full_like_0 = paddle._C_ops.full_like( + reshape_1, full_6, paddle.int32, paddle.framework._current_expected_place() + ) + + # pd_op.where: (2x-1xi32) <- (2x-1xb, 2x-1xi32, 2x-1xi32) + where_1 = paddle._C_ops.where(greater_than_1, reshape_1, full_like_0) + del full_like_0, greater_than_1, reshape_1 + + # pd_op.full_int_array: (2xi64) <- () + full_int_array_2 = [-1, 4] + + # pd_op.reshape: (-1x4xf32) <- (2x-1x4xf32, 2xi64) + reshape_2 = paddle._C_ops.reshape(data_8, full_int_array_2) + del data_8, full_int_array_2 + + # pd_op.gather: (-1x4xf32) <- (-1x4xf32, -1xi64, 1xi32) + gather_1 = paddle._C_ops.gather(reshape_2, flatten_1, full_3) + del flatten_1, full_3, reshape_2 + + # pd_op.full: (xi64) <- () + full_7 = paddle._C_ops.full( + [], float("4"), paddle.int64, paddle.core.CPUPlace() + ) + + # builtin.combine: ([xi64, xi64, xi64]) <- (xi64, xi64, xi64) + combine_2 = [full_4, data_1, full_7] + del data_1, full_4, full_7 + + # pd_op.stack: (3xi64) <- ([xi64, xi64, xi64]) + stack_2 = paddle._C_ops.stack(combine_2, 0) + del combine_2 + + # pd_op.reshape: (2x-1x4xf32) <- (-1x4xf32, 3xi64) + reshape_0 = paddle._C_ops.reshape(gather_1, stack_2) + del gather_1, stack_2 + + # pd_op.full: (1xi32) <- () + full_8 = paddle._C_ops.full( + [1], float("2"), paddle.int32, paddle.core.CPUPlace() + ) + + # pd_op.one_hot: (2x-1x2xf32) <- (2x-1xi32, 1xi32) + one_hot_1 = paddle._C_ops.one_hot( + where_1 % paddle.cast(full_8, where_1.dtype), full_8 + ) + del full_8 + + # pd_op.full: (1xi64) <- () + full_9 = paddle._C_ops.full( + [1], float("0"), paddle.int64, paddle.framework._current_expected_place() + ) + + # pd_op.assign_value_: (1xi64) <- (1xi64) + assign_value__0 = paddle._C_ops.assign_value_( + full_9, + [1], + paddle.int64, + [float("0")], + paddle.framework._current_expected_place(), + ) + del full_9 + + # pd_op.index_select: (2x-1x1xf32) <- (2x-1x2xf32, 1xi64) + index_select_0 = paddle._C_ops.index_select(one_hot_1, assign_value__0, -1) + del assign_value__0, one_hot_1 + + # pd_op.multiply: (2x-1x-1xf32) <- (2x-1x-1xf32, 2x-1x-1xf32) + multiply_2 = paddle._C_ops.multiply(data_9, where_0) + del data_9 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_3 = [-1] + + # pd_op.max: (2x-1x1xf32) <- (2x-1x-1xf32, 1xi64) + max_0 = paddle._C_ops.max(multiply_2, full_int_array_3, True) + + # pd_op.multiply: (2x-1x-1xf32) <- (2x-1x-1xf32, 2x-1x-1xf32) + multiply_3 = paddle._C_ops.multiply(data_4, where_0) + del data_4, where_0 + + # pd_op.max: (2x-1x1xf32) <- (2x-1x-1xf32, 1xi64) + max_1 = paddle._C_ops.max(multiply_3, full_int_array_3, True) + del multiply_3 + + # pd_op.scale: (2x-1x1xf32) <- (2x-1x1xf32, 1xf32) + scale_0 = paddle._C_ops.scale(max_0, full_6, float("1e-09"), True) + del full_6, max_0 + + # pd_op.divide: (2x-1x-1xf32) <- (2x-1x-1xf32, 2x-1x1xf32) + divide_0 = paddle._C_ops.divide(multiply_2, scale_0) + del multiply_2, scale_0 + + # pd_op.multiply: (2x-1x-1xf32) <- (2x-1x-1xf32, 2x-1x1xf32) + multiply_4 = paddle._C_ops.multiply(divide_0, max_1) + del divide_0, max_1 + + # pd_op.max: (2x-1xf32) <- (2x-1x-1xf32, 1xi64) + max_2 = paddle._C_ops.max(multiply_4, full_int_array_1, False) + del full_int_array_1, multiply_4 + + # pd_op.unsqueeze: (2x-1x1xf32) <- (2x-1xf32, 1xi64) + unsqueeze_1 = paddle._C_ops.unsqueeze(max_2, full_int_array_3) + del full_int_array_3, max_2 + + # pd_op.multiply: (2x-1x1xf32) <- (2x-1x1xf32, 2x-1x1xf32) + multiply_0 = paddle._C_ops.multiply(index_select_0, unsqueeze_1) + del index_select_0, unsqueeze_1, where_1 + + return reshape_0, multiply_0 diff --git a/paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_16/weight_meta.py b/paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_16/weight_meta.py new file mode 100644 index 000000000..8b1378917 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_16/weight_meta.py @@ -0,0 +1 @@ + diff --git a/paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_2/graph_hash.txt b/paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_2/graph_hash.txt new file mode 100644 index 000000000..658f2bbb3 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_2/graph_hash.txt @@ -0,0 +1 @@ +421938027b4b9ee89be16cfe46e23d1f0bea007eb4a694b280e1afd3bfbe8afb \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_2/graph_net.json b/paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_2/graph_net.json new file mode 100644 index 000000000..399d8354e --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_2/graph_net.json @@ -0,0 +1,6 @@ +{ + "framework": "paddle", + "model_name": "PP-YOLOE-S_human", + "num_devices_required": 1, + "num_nodes_required": 1 +} \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_2/input_meta.py b/paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_2/input_meta.py new file mode 100644 index 000000000..90255453f --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_2/input_meta.py @@ -0,0 +1,117 @@ +class Program_weight_tensor_data_0: + name = "data_0" + shape = [2, 5376, 1] + dtype = "float32" + min_val = float("0.000669258") + max_val = float("0.865134") + mean = float("0.0309776") + std = float("0.0553916") + data = None + + +class Program_weight_tensor_data_1: + name = "data_1" + shape = [2, 5376, 4] + dtype = "float32" + min_val = float("-294.891") + max_val = float("766.846") + mean = float("257.703") + std = float("162.051") + data = None + + +class Program_weight_tensor_data_2: + name = "data_2" + shape = [5376, 2] + dtype = "float32" + min_val = float("4.0") + max_val = float("508.0") + mean = float("256.0") + std = float("147.76") + data = None + + +class Program_weight_tensor_data_3: + name = "data_3" + shape = [2, 15, 1] + dtype = "int32" + data = [ + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + ] + + +class Program_weight_tensor_data_4: + name = "data_4" + shape = [2, 15, 4] + dtype = "float32" + max_val = float("512.0") + mean = float("126.218") + std = float("182.758") + data = None + + +class Program_weight_tensor_data_5: + name = "data_5" + shape = [2, 15, 1] + dtype = "float32" + data = [ + 1.0, + 1.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 1.0, + 1.0, + 1.0, + 1.0, + 1.0, + 1.0, + 1.0, + 1.0, + 1.0, + 1.0, + 1.0, + 1.0, + 1.0, + 1.0, + 1.0, + ] diff --git a/paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_2/model.py b/paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_2/model.py new file mode 100644 index 000000000..bec2cdddb --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_2/model.py @@ -0,0 +1,338 @@ +import paddle + + +class GraphModule(paddle.nn.Layer): + def __init__(self): + super().__init__() + + def forward(self, data_0, data_1, data_2, data_3, data_4, data_5): + # pd_op.full_int_array: (1xi64) <- () + full_int_array_0 = [2] + + # pd_op.unsqueeze: (2x15x1x4xf32) <- (2x15x4xf32, 1xi64) + unsqueeze_0 = paddle._C_ops.unsqueeze(data_4, full_int_array_0) + del data_4 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_1 = [1] + + # pd_op.unsqueeze: (2x1x5376x4xf32) <- (2x5376x4xf32, 1xi64) + unsqueeze_1 = paddle._C_ops.unsqueeze(data_1, full_int_array_1) + del data_1, full_int_array_1 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_2 = [0] + + # pd_op.slice: (2x15x1x2xf32) <- (2x15x1x4xf32, 1xi64, 1xi64) + slice_0 = paddle._C_ops.slice( + unsqueeze_0, [3], full_int_array_2, full_int_array_0, [1], [] + ) + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_3 = [2147483647] + + # pd_op.slice: (2x15x1x2xf32) <- (2x15x1x4xf32, 1xi64, 1xi64) + slice_1 = paddle._C_ops.slice( + unsqueeze_0, [3], full_int_array_0, full_int_array_3, [1], [] + ) + + # pd_op.slice: (2x1x5376x2xf32) <- (2x1x5376x4xf32, 1xi64, 1xi64) + slice_2 = paddle._C_ops.slice( + unsqueeze_1, [3], full_int_array_2, full_int_array_0, [1], [] + ) + del full_int_array_2 + + # pd_op.slice: (2x1x5376x2xf32) <- (2x1x5376x4xf32, 1xi64, 1xi64) + slice_3 = paddle._C_ops.slice( + unsqueeze_1, [3], full_int_array_0, full_int_array_3, [1], [] + ) + del full_int_array_0, full_int_array_3, unsqueeze_1 + + # pd_op.maximum: (2x15x5376x2xf32) <- (2x15x1x2xf32, 2x1x5376x2xf32) + maximum_0 = paddle._C_ops.maximum(slice_0, slice_2) + + # pd_op.minimum: (2x15x5376x2xf32) <- (2x15x1x2xf32, 2x1x5376x2xf32) + minimum_0 = paddle._C_ops.minimum(slice_1, slice_3) + + # pd_op.subtract: (2x15x5376x2xf32) <- (2x15x5376x2xf32, 2x15x5376x2xf32) + subtract_0 = paddle._C_ops.subtract(minimum_0, maximum_0) + del maximum_0, minimum_0 + + # pd_op.full: (1xf32) <- () + full_0 = paddle._C_ops.full( + [1], float("0"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.full: (1xf32) <- () + full_1 = paddle._C_ops.full( + [1], float("3.40282e+38"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.clip: (2x15x5376x2xf32) <- (2x15x5376x2xf32, 1xf32, 1xf32) + clip_0 = paddle._C_ops.clip(subtract_0, full_0, full_1) + del subtract_0 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_4 = [-1] + + # pd_op.prod: (2x15x5376xf32) <- (2x15x5376x2xf32, 1xi64) + prod_0 = paddle._C_ops.prod(clip_0, full_int_array_4, False, False) + del clip_0 + + # pd_op.subtract: (2x15x1x2xf32) <- (2x15x1x2xf32, 2x15x1x2xf32) + subtract_1 = paddle._C_ops.subtract(slice_1, slice_0) + del slice_0, slice_1 + + # pd_op.clip: (2x15x1x2xf32) <- (2x15x1x2xf32, 1xf32, 1xf32) + clip_1 = paddle._C_ops.clip(subtract_1, full_0, full_1) + del subtract_1 + + # pd_op.prod: (2x15x1xf32) <- (2x15x1x2xf32, 1xi64) + prod_1 = paddle._C_ops.prod(clip_1, full_int_array_4, False, False) + del clip_1 + + # pd_op.subtract: (2x1x5376x2xf32) <- (2x1x5376x2xf32, 2x1x5376x2xf32) + subtract_2 = paddle._C_ops.subtract(slice_3, slice_2) + del slice_2, slice_3 + + # pd_op.clip: (2x1x5376x2xf32) <- (2x1x5376x2xf32, 1xf32, 1xf32) + clip_2 = paddle._C_ops.clip(subtract_2, full_0, full_1) + del full_0, full_1, subtract_2 + + # pd_op.prod: (2x1x5376xf32) <- (2x1x5376x2xf32, 1xi64) + prod_2 = paddle._C_ops.prod(clip_2, full_int_array_4, False, False) + del clip_2 + + # pd_op.add: (2x15x5376xf32) <- (2x15x1xf32, 2x1x5376xf32) + add_0 = paddle._C_ops.add(prod_1, prod_2) + del prod_1, prod_2 + + # pd_op.subtract: (2x15x5376xf32) <- (2x15x5376xf32, 2x15x5376xf32) + subtract_3 = paddle._C_ops.subtract(add_0, prod_0) + del add_0 + + # pd_op.full: (1xf32) <- () + full_2 = paddle._C_ops.full( + [1], float("1"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.scale: (2x15x5376xf32) <- (2x15x5376xf32, 1xf32) + scale_0 = paddle._C_ops.scale(subtract_3, full_2, float("1e-09"), True) + del full_2, subtract_3 + + # pd_op.divide: (2x15x5376xf32) <- (2x15x5376xf32, 2x15x5376xf32) + divide_0 = paddle._C_ops.divide(prod_0, scale_0) + del prod_0, scale_0 + + # pd_op.transpose: (2x1x5376xf32) <- (2x5376x1xf32) + transpose_0 = paddle._C_ops.transpose(data_0, [0, 2, 1]) + del data_0 + + # pd_op.full: (1xf64) <- () + full_3 = paddle._C_ops.full( + [1], float("0"), paddle.float64, paddle.core.CPUPlace() + ) + + # pd_op.full: (1xf64) <- () + full_4 = paddle._C_ops.full( + [1], float("2"), paddle.float64, paddle.core.CPUPlace() + ) + + # pd_op.full: (1xf64) <- () + full_5 = paddle._C_ops.full( + [1], float("1"), paddle.float64, paddle.core.CPUPlace() + ) + + # pd_op.arange: (2xi32) <- (1xf64, 1xf64, 1xf64) + arange_0 = paddle.arange(full_3, full_4, full_5, dtype="int32") + del full_3, full_4, full_5 + + # pd_op.unsqueeze: (2x1xi32) <- (2xi32, 1xi64) + unsqueeze_2 = paddle._C_ops.unsqueeze(arange_0, full_int_array_4) + del arange_0 + + # pd_op.full_int_array: (2xi64) <- () + full_int_array_5 = [1, 15] + + # pd_op.tile: (2x15xi32) <- (2x1xi32, 2xi64) + tile_0 = paddle._C_ops.tile(unsqueeze_2, full_int_array_5) + del full_int_array_5 + + # pd_op.squeeze: (2x15xi32) <- (2x15x1xi32, 1xi64) + squeeze_0 = paddle._C_ops.squeeze(data_3, full_int_array_4) + del data_3 + + # builtin.combine: ([2x15xi32, 2x15xi32]) <- (2x15xi32, 2x15xi32) + combine_0 = [tile_0, squeeze_0] + del squeeze_0, tile_0 + + # pd_op.stack: (2x15x2xi32) <- ([2x15xi32, 2x15xi32]) + stack_0 = paddle._C_ops.stack(combine_0, -1) + del combine_0 + + # pd_op.gather_nd: (2x15x5376xf32) <- (2x1x5376xf32, 2x15x2xi32) + gather_nd_0 = paddle._C_ops.gather_nd(transpose_0, stack_0) + del stack_0, transpose_0 + + # pd_op.pow: (2x15x5376xf32) <- (2x15x5376xf32) + pow_0 = paddle._C_ops.pow(gather_nd_0, float("1")) + del gather_nd_0 + + # pd_op.pow: (2x15x5376xf32) <- (2x15x5376xf32) + pow_1 = paddle._C_ops.pow(divide_0, float("6")) + + # pd_op.multiply: (2x15x5376xf32) <- (2x15x5376xf32, 2x15x5376xf32) + multiply_0 = paddle._C_ops.multiply(pow_0, pow_1) + del pow_0, pow_1 + + # pd_op.full_int_array: (2xi64) <- () + full_int_array_6 = [0, 1] + + # pd_op.unsqueeze: (1x1x5376x2xf32) <- (5376x2xf32, 2xi64) + unsqueeze_3 = paddle._C_ops.unsqueeze(data_2, full_int_array_6) + del data_2, full_int_array_6 + + # pd_op.full: (1xi32) <- () + full_6 = paddle._C_ops.full( + [1], float("3"), paddle.int32, paddle.core.CPUPlace() + ) + + # pd_op.split_with_num: ([1x1x5376x1xf32, 1x1x5376x1xf32]) <- (1x1x5376x2xf32, 1xi32) + split_with_num_0 = paddle._C_ops.split_with_num(unsqueeze_3, 2, full_6) + del unsqueeze_3 + + # builtin.split: (1x1x5376x1xf32, 1x1x5376x1xf32) <- ([1x1x5376x1xf32, 1x1x5376x1xf32]) + ( + split_0, + split_1, + ) = split_with_num_0 + del split_with_num_0 + + # pd_op.split_with_num: ([2x15x1x1xf32, 2x15x1x1xf32, 2x15x1x1xf32, 2x15x1x1xf32]) <- (2x15x1x4xf32, 1xi32) + split_with_num_1 = paddle._C_ops.split_with_num(unsqueeze_0, 4, full_6) + del full_6, unsqueeze_0 + + # builtin.split: (2x15x1x1xf32, 2x15x1x1xf32, 2x15x1x1xf32, 2x15x1x1xf32) <- ([2x15x1x1xf32, 2x15x1x1xf32, 2x15x1x1xf32, 2x15x1x1xf32]) + ( + split_2, + split_3, + split_4, + split_5, + ) = split_with_num_1 + del split_with_num_1 + + # pd_op.subtract: (2x15x5376x1xf32) <- (1x1x5376x1xf32, 2x15x1x1xf32) + subtract_4 = paddle._C_ops.subtract(split_0, split_2) + del split_2 + + # pd_op.subtract: (2x15x5376x1xf32) <- (1x1x5376x1xf32, 2x15x1x1xf32) + subtract_5 = paddle._C_ops.subtract(split_1, split_3) + del split_3 + + # pd_op.subtract: (2x15x5376x1xf32) <- (2x15x1x1xf32, 1x1x5376x1xf32) + subtract_6 = paddle._C_ops.subtract(split_4, split_0) + del split_0, split_4 + + # pd_op.subtract: (2x15x5376x1xf32) <- (2x15x1x1xf32, 1x1x5376x1xf32) + subtract_7 = paddle._C_ops.subtract(split_5, split_1) + del split_1, split_5 + + # pd_op.full: (1xi32) <- () + full_7 = paddle._C_ops.full( + [1], float("-1"), paddle.int32, paddle.core.CPUPlace() + ) + + # builtin.combine: ([2x15x5376x1xf32, 2x15x5376x1xf32, 2x15x5376x1xf32, 2x15x5376x1xf32]) <- (2x15x5376x1xf32, 2x15x5376x1xf32, 2x15x5376x1xf32, 2x15x5376x1xf32) + combine_1 = [subtract_4, subtract_5, subtract_6, subtract_7] + del subtract_4, subtract_5, subtract_6, subtract_7 + + # pd_op.concat: (2x15x5376x4xf32) <- ([2x15x5376x1xf32, 2x15x5376x1xf32, 2x15x5376x1xf32, 2x15x5376x1xf32], 1xi32) + concat_0 = paddle._C_ops.concat(combine_1, full_7) + del combine_1, full_7 + + # pd_op.min: (2x15x5376xf32) <- (2x15x5376x4xf32, 1xi64) + min_0 = paddle._C_ops.min(concat_0, full_int_array_4, False) + del concat_0, full_int_array_4 + + # pd_op.full: (xf32) <- () + full_8 = paddle._C_ops.full( + [], + float("1e-09"), + paddle.float32, + paddle.framework._current_expected_place(), + ) + + # pd_op.greater_than: (2x15x5376xb) <- (2x15x5376xf32, xf32) + greater_than_1 = paddle._C_ops.greater_than(min_0, full_8) + del full_8, min_0 + + # pd_op.cast: (2x15x5376xf32) <- (2x15x5376xb) + cast_0 = paddle._C_ops.cast(greater_than_1, paddle.float32) + del greater_than_1 + + # pd_op.multiply: (2x15x5376xf32) <- (2x15x5376xf32, 2x15x5376xf32) + multiply_1 = paddle._C_ops.multiply(multiply_0, cast_0) + + # pd_op.full: (1xi32) <- () + full_9 = paddle._C_ops.full( + [1], float("13"), paddle.int32, paddle.core.CPUPlace() + ) + + # pd_op.topk: (2x15x13xf32, 2x15x13xi64) <- (2x15x5376xf32, 1xi32) + topk_0, topk_1 = (lambda x, f: f(x))( + paddle._C_ops.topk(multiply_1, full_9, -1, True, True), + lambda out: out if isinstance(out, (list, tuple)) else (out, None), + ) + del full_9, multiply_1 + + # pd_op.full: (1xi32) <- () + full_10 = paddle._C_ops.full( + [1], float("5376"), paddle.int32, paddle.core.CPUPlace() + ) + + # pd_op.one_hot: (2x15x13x5376xf32) <- (2x15x13xi64, 1xi32) + one_hot_0 = paddle._C_ops.one_hot( + topk_1 % paddle.cast(full_10, topk_1.dtype), full_10 + ) + del full_10, topk_1 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_7 = [-2] + + # pd_op.sum: (2x15x5376xf32) <- (2x15x13x5376xf32, 1xi64) + sum_0 = paddle._C_ops.sum(one_hot_0, full_int_array_7, None, False) + del one_hot_0 + + # pd_op.multiply: (2x15x5376xf32) <- (2x15x5376xf32, 2x15x1xf32) + multiply_2 = paddle._C_ops.multiply(sum_0, data_5) + del sum_0 + + # pd_op.multiply: (2x15x5376xf32) <- (2x15x5376xf32, 2x15x5376xf32) + multiply_3 = paddle._C_ops.multiply(multiply_2, cast_0) + del cast_0, multiply_2 + + # pd_op.multiply: (2x15x5376xf32) <- (2x15x5376xf32, 2x15x1xf32) + multiply_4 = paddle._C_ops.multiply(multiply_3, data_5) + del data_5, multiply_3 + + # pd_op.sum: (2x5376xf32) <- (2x15x5376xf32, 1xi64) + sum_1 = paddle._C_ops.sum(multiply_4, full_int_array_7, None, False) + del full_int_array_7 + + # pd_op.full_int_array: (0xi64) <- () + full_int_array_8 = [] + + # pd_op.max: (xf32) <- (2x5376xf32, 0xi64) + max_0 = paddle._C_ops.max(sum_1, full_int_array_8, False) + del full_int_array_8 + + # pd_op.full: (xf32) <- () + full_11 = paddle._C_ops.full( + [], float("1"), paddle.float32, paddle.framework._current_expected_place() + ) + + # pd_op.greater_than: (xb) <- (xf32, xf32) + greater_than_0 = paddle._C_ops.greater_than(max_0, full_11) + del divide_0, full_11, max_0, multiply_0, multiply_4, sum_1, unsqueeze_2 + + return greater_than_0 diff --git a/paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_2/weight_meta.py b/paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_2/weight_meta.py new file mode 100644 index 000000000..8b1378917 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_2/weight_meta.py @@ -0,0 +1 @@ + diff --git a/paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_3/graph_hash.txt b/paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_3/graph_hash.txt new file mode 100644 index 000000000..2f9daab91 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_3/graph_hash.txt @@ -0,0 +1 @@ +237f1666acd796c265f31ddd9bc78ab95e4da70095a07017ec39287d25ff30bd \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_3/graph_net.json b/paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_3/graph_net.json new file mode 100644 index 000000000..399d8354e --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_3/graph_net.json @@ -0,0 +1,6 @@ +{ + "framework": "paddle", + "model_name": "PP-YOLOE-S_human", + "num_devices_required": 1, + "num_nodes_required": 1 +} \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_3/input_meta.py b/paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_3/input_meta.py new file mode 100644 index 000000000..201ee0397 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_3/input_meta.py @@ -0,0 +1,67 @@ +class Program_weight_tensor_data_0: + name = "data_0" + shape = [2, 8400] + dtype = "bool" + min_val = 0 + max_val = 2 + data = None + + +class Program_weight_tensor_data_1: + name = "data_1" + shape = [2, 8400, 4] + dtype = "float32" + min_val = float("-8.72054") + max_val = float("86.5058") + mean = float("34.8123") + std = float("23.6174") + data = None + + +class Program_weight_tensor_data_2: + name = "data_2" + shape = [2, 8400, 4] + dtype = "float32" + max_val = float("80.0") + mean = float("34.7665") + std = float("25.0051") + data = None + + +class Program_weight_tensor_data_3: + name = "data_3" + shape = [2, 8400, 1] + dtype = "float32" + max_val = float("0.911359") + mean = float("0.00967398") + std = float("0.0703979") + data = None + + +class Program_weight_tensor_data_4: + name = "data_4" + shape = [] + dtype = "float32" + data = [162.523] + + +class Program_weight_tensor_data_5: + name = "data_5" + shape = [2, 8400, 68] + dtype = "float32" + min_val = float("-7.01895") + max_val = float("14.5228") + mean = float("8.74382e-06") + std = float("1.67872") + data = None + + +class Program_weight_tensor_data_6: + name = "data_6" + shape = [8400, 2] + dtype = "float32" + min_val = float("0.5") + max_val = float("79.5") + mean = float("34.7619") + std = float("22.9098") + data = None diff --git a/paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_3/model.py b/paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_3/model.py new file mode 100644 index 000000000..e706a2c08 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_3/model.py @@ -0,0 +1,509 @@ +import paddle + + +class GraphModule(paddle.nn.Layer): + def __init__(self): + super().__init__() + + def forward(self, data_0, data_1, data_2, data_3, data_4, data_5, data_6): + # pd_op.cast: (2x-1xi32) <- (2x-1xb) + cast_0 = paddle._C_ops.cast(data_0, paddle.int32) + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_0 = [-1] + + # pd_op.assign: (1xi64) <- (1xi64) + assign_0 = full_int_array_0 + + # pd_op.assign: (1xi64) <- (1xi64) + assign_1 = full_int_array_0 + + # pd_op.assign: (1xi64) <- (1xi64) + assign_2 = full_int_array_0 + + # pd_op.unsqueeze: (2x-1x1xi32) <- (2x-1xi32, 1xi64) + unsqueeze_0 = paddle._C_ops.unsqueeze(cast_0, full_int_array_0) + del cast_0 + + # pd_op.full_int_array: (3xi64) <- () + full_int_array_1 = [1, 1, 4] + + # pd_op.tile: (2x-1x4xi32) <- (2x-1x1xi32, 3xi64) + tile_0 = paddle._C_ops.tile(unsqueeze_0, full_int_array_1) + del full_int_array_1, unsqueeze_0 + + # pd_op.cast: (2x-1x4xb) <- (2x-1x4xi32) + cast_1 = paddle._C_ops.cast(tile_0, paddle.bool) + del tile_0 + + # pd_op.masked_select: (-1xf32) <- (2x-1x4xf32, 2x-1x4xb) + masked_select_0 = paddle._C_ops.masked_select(data_1, cast_1) + del data_1 + + # pd_op.full_int_array: (2xi64) <- () + full_int_array_2 = [-1, 4] + + # pd_op.reshape: (-1x4xf32) <- (-1xf32, 2xi64) + reshape_0 = paddle._C_ops.reshape(masked_select_0, full_int_array_2) + + # pd_op.masked_select: (-1xf32) <- (2x-1x4xf32, 2x-1x4xb) + masked_select_1 = paddle._C_ops.masked_select(data_2, cast_1) + + # pd_op.reshape: (-1x4xf32) <- (-1xf32, 2xi64) + reshape_1 = paddle._C_ops.reshape(masked_select_1, full_int_array_2) + del masked_select_1 + + # pd_op.sum: (2x-1xf32) <- (2x-1x1xf32, 1xi64) + sum_0 = paddle._C_ops.sum(data_3, full_int_array_0, None, False) + del data_3 + + # pd_op.masked_select: (-1xf32) <- (2x-1xf32, 2x-1xb) + masked_select_2 = paddle._C_ops.masked_select(sum_0, data_0) + del sum_0 + + # pd_op.unsqueeze: (-1x1xf32) <- (-1xf32, 1xi64) + unsqueeze_1 = paddle._C_ops.unsqueeze(masked_select_2, full_int_array_0) + del masked_select_2 + + # pd_op.subtract: (-1x4xf32) <- (-1x4xf32, -1x4xf32) + subtract_0 = paddle._C_ops.subtract(reshape_0, reshape_1) + + # pd_op.abs: (-1x4xf32) <- (-1x4xf32) + abs_0 = paddle._C_ops.abs(subtract_0) + + # pd_op.mean_all: (xf32) <- (-1x4xf32) + mean_all_0 = paddle._C_ops.mean_all(abs_0) + + # pd_op.full: (1xi32) <- () + full_0 = paddle._C_ops.full( + [1], float("1"), paddle.int32, paddle.core.CPUPlace() + ) + + # pd_op.split_with_num: ([-1x1xf32, -1x1xf32, -1x1xf32, -1x1xf32]) <- (-1x4xf32, 1xi32) + split_with_num_0 = paddle._C_ops.split_with_num(reshape_0, 4, full_0) + + # builtin.split: (-1x1xf32, -1x1xf32, -1x1xf32, -1x1xf32) <- ([-1x1xf32, -1x1xf32, -1x1xf32, -1x1xf32]) + ( + split_0, + split_1, + split_2, + split_3, + ) = split_with_num_0 + del split_with_num_0 + + # pd_op.split_with_num: ([-1x1xf32, -1x1xf32, -1x1xf32, -1x1xf32]) <- (-1x4xf32, 1xi32) + split_with_num_1 = paddle._C_ops.split_with_num(reshape_1, 4, full_0) + + # builtin.split: (-1x1xf32, -1x1xf32, -1x1xf32, -1x1xf32) <- ([-1x1xf32, -1x1xf32, -1x1xf32, -1x1xf32]) + ( + split_4, + split_5, + split_6, + split_7, + ) = split_with_num_1 + del split_with_num_1 + + # pd_op.maximum: (-1x1xf32) <- (-1x1xf32, -1x1xf32) + maximum_0 = paddle._C_ops.maximum(split_0, split_4) + + # pd_op.maximum: (-1x1xf32) <- (-1x1xf32, -1x1xf32) + maximum_1 = paddle._C_ops.maximum(split_1, split_5) + + # pd_op.minimum: (-1x1xf32) <- (-1x1xf32, -1x1xf32) + minimum_0 = paddle._C_ops.minimum(split_2, split_6) + + # pd_op.minimum: (-1x1xf32) <- (-1x1xf32, -1x1xf32) + minimum_1 = paddle._C_ops.minimum(split_3, split_7) + + # pd_op.subtract: (-1x1xf32) <- (-1x1xf32, -1x1xf32) + subtract_1 = paddle._C_ops.subtract(minimum_0, maximum_0) + + # pd_op.full: (1xf32) <- () + full_1 = paddle._C_ops.full( + [1], float("0"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.assign: (1xf32) <- (1xf32) + assign_3 = full_1 + + # pd_op.full: (1xf32) <- () + full_2 = paddle._C_ops.full( + [1], float("3.40282e+38"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.assign: (1xf32) <- (1xf32) + assign_4 = full_2 + + # pd_op.clip: (-1x1xf32) <- (-1x1xf32, 1xf32, 1xf32) + clip_0 = paddle._C_ops.clip(subtract_1, full_1, full_2) + + # pd_op.subtract: (-1x1xf32) <- (-1x1xf32, -1x1xf32) + subtract_2 = paddle._C_ops.subtract(minimum_1, maximum_1) + + # pd_op.clip: (-1x1xf32) <- (-1x1xf32, 1xf32, 1xf32) + clip_1 = paddle._C_ops.clip(subtract_2, full_1, full_2) + + # pd_op.multiply: (-1x1xf32) <- (-1x1xf32, -1x1xf32) + multiply_0 = paddle._C_ops.multiply(clip_0, clip_1) + + # pd_op.subtract: (-1x1xf32) <- (-1x1xf32, -1x1xf32) + subtract_3 = paddle._C_ops.subtract(split_2, split_0) + + # pd_op.subtract: (-1x1xf32) <- (-1x1xf32, -1x1xf32) + subtract_4 = paddle._C_ops.subtract(split_3, split_1) + + # pd_op.multiply: (-1x1xf32) <- (-1x1xf32, -1x1xf32) + multiply_1 = paddle._C_ops.multiply(subtract_3, subtract_4) + + # pd_op.subtract: (-1x1xf32) <- (-1x1xf32, -1x1xf32) + subtract_5 = paddle._C_ops.subtract(split_6, split_4) + + # pd_op.subtract: (-1x1xf32) <- (-1x1xf32, -1x1xf32) + subtract_6 = paddle._C_ops.subtract(split_7, split_5) + + # pd_op.multiply: (-1x1xf32) <- (-1x1xf32, -1x1xf32) + multiply_2 = paddle._C_ops.multiply(subtract_5, subtract_6) + del subtract_5, subtract_6 + + # pd_op.add: (-1x1xf32) <- (-1x1xf32, -1x1xf32) + add_0 = paddle._C_ops.add(multiply_1, multiply_2) + + # pd_op.subtract: (-1x1xf32) <- (-1x1xf32, -1x1xf32) + subtract_7 = paddle._C_ops.subtract(add_0, multiply_0) + + # pd_op.full: (1xf32) <- () + full_3 = paddle._C_ops.full( + [1], float("1"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.assign: (1xf32) <- (1xf32) + assign_5 = full_3 + + # pd_op.assign: (1xf32) <- (1xf32) + assign_6 = full_3 + + # pd_op.scale: (-1x1xf32) <- (-1x1xf32, 1xf32) + scale_0 = paddle._C_ops.scale(subtract_7, full_3, float("1e-10"), True) + del subtract_7 + + # pd_op.divide: (-1x1xf32) <- (-1x1xf32, -1x1xf32) + divide_2 = paddle._C_ops.divide(multiply_0, scale_0) + + # pd_op.minimum: (-1x1xf32) <- (-1x1xf32, -1x1xf32) + minimum_2 = paddle._C_ops.minimum(split_0, split_4) + + # pd_op.minimum: (-1x1xf32) <- (-1x1xf32, -1x1xf32) + minimum_3 = paddle._C_ops.minimum(split_1, split_5) + + # pd_op.maximum: (-1x1xf32) <- (-1x1xf32, -1x1xf32) + maximum_2 = paddle._C_ops.maximum(split_2, split_6) + + # pd_op.maximum: (-1x1xf32) <- (-1x1xf32, -1x1xf32) + maximum_3 = paddle._C_ops.maximum(split_3, split_7) + + # pd_op.subtract: (-1x1xf32) <- (-1x1xf32, -1x1xf32) + subtract_8 = paddle._C_ops.subtract(maximum_2, minimum_2) + + # pd_op.subtract: (-1x1xf32) <- (-1x1xf32, -1x1xf32) + subtract_9 = paddle._C_ops.subtract(maximum_3, minimum_3) + + # pd_op.multiply: (-1x1xf32) <- (-1x1xf32, -1x1xf32) + multiply_3 = paddle._C_ops.multiply(subtract_8, subtract_9) + + # pd_op.scale: (-1x1xf32) <- (-1x1xf32, 1xf32) + scale_1 = paddle._C_ops.scale(multiply_3, full_3, float("1e-10"), True) + del multiply_3 + + # pd_op.subtract: (-1x1xf32) <- (-1x1xf32, -1x1xf32) + subtract_10 = paddle._C_ops.subtract(scale_1, scale_0) + + # pd_op.divide: (-1x1xf32) <- (-1x1xf32, -1x1xf32) + divide_3 = paddle._C_ops.divide(subtract_10, scale_1) + + # pd_op.subtract: (-1x1xf32) <- (-1x1xf32, -1x1xf32) + subtract_11 = paddle._C_ops.subtract(divide_2, divide_3) + + # pd_op.full: (1xf32) <- () + full_4 = paddle._C_ops.full( + [1], float("-1"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.scale: (-1x1xf32) <- (-1x1xf32, 1xf32) + scale_2 = paddle._C_ops.scale(subtract_11, full_4, float("1"), True) + del subtract_11 + + # pd_op.scale: (-1x1xf32) <- (-1x1xf32, 1xf32) + scale_3 = paddle._C_ops.scale(scale_2, full_3, float("0"), True) + del scale_2 + + # pd_op.multiply: (-1x1xf32) <- (-1x1xf32, -1x1xf32) + multiply_4 = paddle._C_ops.multiply(scale_3, unsqueeze_1) + + # pd_op.full_int_array: (0xi64) <- () + full_int_array_3 = [] + + # pd_op.assign: (0xi64) <- (0xi64) + assign_7 = full_int_array_3 + + # pd_op.sum: (xf32) <- (-1x1xf32, 0xi64) + sum_1 = paddle._C_ops.sum(multiply_4, full_int_array_3, None, False) + + # pd_op.divide: (xf32) <- (xf32, xf32) + divide_0 = paddle._C_ops.divide(sum_1, data_4) + + # pd_op.unsqueeze: (2x-1x1xb) <- (2x-1xb, 1xi64) + unsqueeze_2 = paddle._C_ops.unsqueeze(data_0, full_int_array_0) + del data_0 + + # pd_op.cast: (2x-1x1xi32) <- (2x-1x1xb) + cast_2 = paddle._C_ops.cast(unsqueeze_2, paddle.int32) + del unsqueeze_2 + + # pd_op.full_int_array: (3xi64) <- () + full_int_array_4 = [1, 1, 68] + + # pd_op.tile: (2x-1x68xi32) <- (2x-1x1xi32, 3xi64) + tile_1 = paddle._C_ops.tile(cast_2, full_int_array_4) + del cast_2, full_int_array_4 + + # pd_op.cast: (2x-1x68xb) <- (2x-1x68xi32) + cast_3 = paddle._C_ops.cast(tile_1, paddle.bool) + del tile_1 + + # pd_op.masked_select: (-1xf32) <- (2x-1x68xf32, 2x-1x68xb) + masked_select_3 = paddle._C_ops.masked_select(data_5, cast_3) + del data_5 + + # pd_op.full_int_array: (3xi64) <- () + full_int_array_5 = [-1, 4, 17] + + # pd_op.reshape: (-1x4x17xf32) <- (-1xf32, 3xi64) + reshape_2 = paddle._C_ops.reshape(masked_select_3, full_int_array_5) + del full_int_array_5 + + # pd_op.full: (1xi32) <- () + full_5 = paddle._C_ops.full( + [1], float("2"), paddle.int32, paddle.core.CPUPlace() + ) + + # pd_op.split_with_num: ([2x-1x2xf32, 2x-1x2xf32]) <- (2x-1x4xf32, 1xi32) + split_with_num_2 = paddle._C_ops.split_with_num(data_2, 2, full_5) + del data_2, full_5 + + # builtin.split: (2x-1x2xf32, 2x-1x2xf32) <- ([2x-1x2xf32, 2x-1x2xf32]) + ( + split_8, + split_9, + ) = split_with_num_2 + del split_with_num_2 + + # pd_op.subtract: (2x-1x2xf32) <- (-1x2xf32, 2x-1x2xf32) + subtract_12 = paddle._C_ops.subtract(data_6, split_8) + del split_8 + + # pd_op.subtract: (2x-1x2xf32) <- (2x-1x2xf32, -1x2xf32) + subtract_13 = paddle._C_ops.subtract(split_9, data_6) + del data_6, split_9 + + # pd_op.full: (1xi32) <- () + full_6 = paddle._C_ops.full( + [1], float("-1"), paddle.int32, paddle.core.CPUPlace() + ) + + # builtin.combine: ([2x-1x2xf32, 2x-1x2xf32]) <- (2x-1x2xf32, 2x-1x2xf32) + combine_0 = [subtract_12, subtract_13] + del subtract_12, subtract_13 + + # pd_op.concat: (2x-1x4xf32) <- ([2x-1x2xf32, 2x-1x2xf32], 1xi32) + concat_0 = paddle._C_ops.concat(combine_0, full_6) + del combine_0, full_6 + + # pd_op.full: (1xf32) <- () + full_7 = paddle._C_ops.full( + [1], float("15.99"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.clip: (2x-1x4xf32) <- (2x-1x4xf32, 1xf32, 1xf32) + clip_2 = paddle._C_ops.clip(concat_0, full_1, full_7) + del concat_0, full_7 + + # pd_op.masked_select: (-1xf32) <- (2x-1x4xf32, 2x-1x4xb) + masked_select_4 = paddle._C_ops.masked_select(clip_2, cast_1) + del clip_2 + + # pd_op.reshape: (-1x4xf32) <- (-1xf32, 2xi64) + reshape_3 = paddle._C_ops.reshape(masked_select_4, full_int_array_2) + del full_int_array_2, masked_select_4 + + # pd_op.floor: (-1x4xf32) <- (-1x4xf32) + floor_0 = paddle._C_ops.floor(reshape_3) + + # pd_op.cast: (-1x4xi64) <- (-1x4xf32) + cast_4 = paddle._C_ops.cast(floor_0, paddle.int64) + del floor_0 + + # pd_op.scale: (-1x4xi64) <- (-1x4xi64, 1xf32) + scale_4 = paddle._C_ops.scale(cast_4, full_3, float("1"), True) + + # pd_op.cast: (-1x4xf32) <- (-1x4xi64) + cast_5 = paddle._C_ops.cast(scale_4, paddle.float32) + + # pd_op.subtract: (-1x4xf32) <- (-1x4xf32, -1x4xf32) + subtract_14 = paddle._C_ops.subtract(cast_5, reshape_3) + del cast_5, reshape_3 + + # pd_op.scale: (-1x4xf32) <- (-1x4xf32, 1xf32) + scale_5 = paddle._C_ops.scale(subtract_14, full_4, float("1"), True) + + # pd_op.scale: (-1x4xi64) <- (-1x4xi64, 1xf32) + scale_6 = paddle._C_ops.scale(cast_4, full_3, float("0"), True) + del cast_4 + + # pd_op.unsqueeze: (-1x4x1xi64) <- (-1x4xi64, 1xi64) + unsqueeze_3 = paddle._C_ops.unsqueeze(scale_6, full_int_array_0) + del scale_6 + + # pd_op.cross_entropy_with_softmax: (-1x4x17xf32, -1x4x1xf32) <- (-1x4x17xf32, -1x4x1xi64) + cross_entropy_with_softmax_0, cross_entropy_with_softmax_2 = ( + lambda x, f: f(x) + )( + paddle._C_ops.cross_entropy_with_softmax( + reshape_2, unsqueeze_3, False, True, True, -100, -1 + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None), + ) + + # pd_op.squeeze: (-1x4xf32) <- (-1x4x1xf32, 1xi64) + squeeze_0 = paddle._C_ops.squeeze( + cross_entropy_with_softmax_2, full_int_array_0 + ) + + # pd_op.multiply: (-1x4xf32) <- (-1x4xf32, -1x4xf32) + multiply_5 = paddle._C_ops.multiply(squeeze_0, subtract_14) + + # pd_op.scale: (-1x4xi64) <- (-1x4xi64, 1xf32) + scale_7 = paddle._C_ops.scale(scale_4, full_3, float("0"), True) + del scale_4 + + # pd_op.unsqueeze: (-1x4x1xi64) <- (-1x4xi64, 1xi64) + unsqueeze_4 = paddle._C_ops.unsqueeze(scale_7, full_int_array_0) + del scale_7 + + # pd_op.cross_entropy_with_softmax: (-1x4x17xf32, -1x4x1xf32) <- (-1x4x17xf32, -1x4x1xi64) + cross_entropy_with_softmax_1, cross_entropy_with_softmax_3 = ( + lambda x, f: f(x) + )( + paddle._C_ops.cross_entropy_with_softmax( + reshape_2, unsqueeze_4, False, True, True, -100, -1 + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None), + ) + del reshape_2 + + # pd_op.squeeze: (-1x4xf32) <- (-1x4x1xf32, 1xi64) + squeeze_1 = paddle._C_ops.squeeze( + cross_entropy_with_softmax_3, full_int_array_0 + ) + + # pd_op.multiply: (-1x4xf32) <- (-1x4xf32, -1x4xf32) + multiply_6 = paddle._C_ops.multiply(squeeze_1, scale_5) + + # pd_op.add: (-1x4xf32) <- (-1x4xf32, -1x4xf32) + add_1 = paddle._C_ops.add(multiply_5, multiply_6) + + # pd_op.mean: (-1x1xf32) <- (-1x4xf32, 1xi64) + mean_0 = paddle._C_ops.mean(add_1, full_int_array_0, True) + del full_int_array_0 + + # pd_op.multiply: (-1x1xf32) <- (-1x1xf32, -1x1xf32) + multiply_7 = paddle._C_ops.multiply(mean_0, unsqueeze_1) + + # pd_op.sum: (xf32) <- (-1x1xf32, 0xi64) + sum_2 = paddle._C_ops.sum(multiply_7, full_int_array_3, None, False) + + # pd_op.divide: (xf32) <- (xf32, xf32) + divide_1 = paddle._C_ops.divide(sum_2, data_4) + del ( + abs_0, + add_0, + add_1, + assign_0, + assign_1, + assign_2, + assign_3, + assign_4, + assign_5, + assign_6, + assign_7, + cast_1, + cast_3, + clip_0, + clip_1, + cross_entropy_with_softmax_2, + cross_entropy_with_softmax_3, + data_4, + divide_2, + divide_3, + full_0, + full_1, + full_2, + full_3, + full_4, + full_int_array_3, + masked_select_0, + masked_select_3, + maximum_0, + maximum_1, + maximum_2, + maximum_3, + mean_0, + minimum_0, + minimum_1, + minimum_2, + minimum_3, + multiply_0, + multiply_1, + multiply_2, + multiply_4, + multiply_5, + multiply_6, + multiply_7, + reshape_0, + reshape_1, + scale_0, + scale_1, + scale_3, + scale_5, + split_0, + split_1, + split_2, + split_3, + split_4, + split_5, + split_6, + split_7, + squeeze_0, + squeeze_1, + subtract_0, + subtract_1, + subtract_10, + subtract_14, + subtract_2, + subtract_3, + subtract_4, + subtract_8, + subtract_9, + sum_1, + sum_2, + unsqueeze_1, + unsqueeze_3, + unsqueeze_4, + ) + + return ( + cross_entropy_with_softmax_0, + cross_entropy_with_softmax_1, + mean_all_0, + divide_0, + divide_1, + ) diff --git a/paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_3/weight_meta.py b/paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_3/weight_meta.py new file mode 100644 index 000000000..8b1378917 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_3/weight_meta.py @@ -0,0 +1 @@ + diff --git a/paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_4/graph_hash.txt b/paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_4/graph_hash.txt new file mode 100644 index 000000000..babd6567f --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_4/graph_hash.txt @@ -0,0 +1 @@ +3d13d62f659e1dd50de7ed21b396c52b0de085fbe77030af69ce5dcd93ef5c05 \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_4/graph_net.json b/paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_4/graph_net.json new file mode 100644 index 000000000..399d8354e --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_4/graph_net.json @@ -0,0 +1,6 @@ +{ + "framework": "paddle", + "model_name": "PP-YOLOE-S_human", + "num_devices_required": 1, + "num_nodes_required": 1 +} \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_4/input_meta.py b/paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_4/input_meta.py new file mode 100644 index 000000000..9c4624ce5 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_4/input_meta.py @@ -0,0 +1,49 @@ +class Program_weight_tensor_data_0: + name = "data_0" + shape = [] + dtype = "int64" + data = [1] + + +class Program_weight_tensor_data_1: + name = "data_1" + shape = [2, 4116, 1] + dtype = "float32" + min_val = float("1.5049e-05") + max_val = float("0.499167") + mean = float("0.0312852") + std = float("0.0464118") + data = None + + +class Program_weight_tensor_data_2: + name = "data_2" + shape = [2, 4116, 68] + dtype = "float32" + min_val = float("-6.68647") + max_val = float("12.4806") + mean = float("9.84161e-06") + std = float("1.67254") + data = None + + +class Program_weight_tensor_data_3: + name = "data_3" + shape = [4116, 2] + dtype = "float32" + min_val = float("4.0") + max_val = float("444.0") + mean = float("224.0") + std = float("129.279") + data = None + + +class Program_weight_tensor_data_4: + name = "data_4" + shape = [4116, 1] + dtype = "float32" + min_val = float("8.0") + max_val = float("32.0") + mean = float("10.6667") + std = float("5.70157") + data = None diff --git a/paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_4/model.py b/paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_4/model.py new file mode 100644 index 000000000..4bd73f18f --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_4/model.py @@ -0,0 +1,192 @@ +import paddle + + +class GraphModule(paddle.nn.Layer): + def __init__(self): + super().__init__() + + def forward(self, parameter_0, data_0, data_1, data_2, data_3, data_4): + # pd_op.divide: (-1x2xf32) <- (-1x2xf32, -1x1xf32) + divide_0 = paddle._C_ops.divide(data_3, data_4) + del data_3 + + # pd_op.shape64: (3xi64) <- (2x-1x68xf32) + shape64_0 = paddle._C_ops.shape64(data_2) + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_0 = [0] + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_1 = [1] + + # pd_op.assign: (1xi64) <- (1xi64) + assign_0 = full_int_array_1 + + # pd_op.slice: (xi64) <- (3xi64, 1xi64, 1xi64) + slice_0 = paddle._C_ops.slice( + shape64_0, [0], full_int_array_0, full_int_array_1, [1], [0] + ) + del full_int_array_0 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_2 = [2] + + # pd_op.slice: (xi64) <- (3xi64, 1xi64, 1xi64) + slice_1 = paddle._C_ops.slice( + shape64_0, [0], full_int_array_1, full_int_array_2, [1], [0] + ) + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_3 = [3] + + # pd_op.slice: (xi64) <- (3xi64, 1xi64, 1xi64) + slice_2 = paddle._C_ops.slice( + shape64_0, [0], full_int_array_2, full_int_array_3, [1], [0] + ) + del full_int_array_2, full_int_array_3, shape64_0 + + # pd_op.full: (xi64) <- () + full_0 = paddle._C_ops.full( + [], float("-1"), paddle.int64, paddle.core.CPUPlace() + ) + + # pd_op.full: (xi64) <- () + full_1 = paddle._C_ops.full( + [], float("4"), paddle.int64, paddle.core.CPUPlace() + ) + + # pd_op.full: (xi64) <- () + full_2 = paddle._C_ops.full( + [], float("17"), paddle.int64, paddle.core.CPUPlace() + ) + + # builtin.combine: ([xi64, xi64, xi64, xi64]) <- (xi64, xi64, xi64, xi64) + combine_0 = [full_0, slice_1, full_1, full_2] + del full_0, full_1, full_2, slice_1 + + # pd_op.stack: (4xi64) <- ([xi64, xi64, xi64, xi64]) + stack_0 = paddle._C_ops.stack(combine_0, 0) + del combine_0 + + # pd_op.reshape: (-1x-1x4x17xf32) <- (2x-1x68xf32, 4xi64) + reshape_0 = paddle._C_ops.reshape(data_2, stack_0) + del data_2, stack_0 + + # pd_op.softmax: (-1x-1x4x17xf32) <- (-1x-1x4x17xf32) + softmax_0 = paddle._C_ops.softmax(reshape_0, -1) + del reshape_0 + + # pd_op.transpose: (-1x17x-1x4xf32) <- (-1x-1x4x17xf32) + transpose_0 = paddle._C_ops.transpose(softmax_0, [0, 3, 1, 2]) + + # pd_op.conv2d: (-1x1x-1x4xf32) <- (-1x17x-1x4xf32, 1x17x1x1xf32) + conv2d_0 = paddle._C_ops.conv2d( + transpose_0, parameter_0, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_0 + + # pd_op.squeeze: (-1x-1x4xf32) <- (-1x1x-1x4xf32, 1xi64) + squeeze_0 = paddle._C_ops.squeeze(conv2d_0, full_int_array_1) + del full_int_array_1 + + # pd_op.full: (1xi32) <- () + full_3 = paddle._C_ops.full( + [1], float("2"), paddle.int32, paddle.core.CPUPlace() + ) + + # pd_op.split_with_num: ([-1x-1x2xf32, -1x-1x2xf32]) <- (-1x-1x4xf32, 1xi32) + split_with_num_0 = paddle._C_ops.split_with_num(squeeze_0, 2, full_3) + del squeeze_0 + + # builtin.split: (-1x-1x2xf32, -1x-1x2xf32) <- ([-1x-1x2xf32, -1x-1x2xf32]) + ( + split_0, + split_1, + ) = split_with_num_0 + del split_with_num_0 + + # pd_op.full: (1xf32) <- () + full_4 = paddle._C_ops.full( + [1], float("-1"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.scale: (-1x-1x2xf32) <- (-1x-1x2xf32, 1xf32) + scale_0 = paddle._C_ops.scale(split_0, full_4, float("0"), True) + del split_0 + + # pd_op.add: (-1x-1x2xf32) <- (-1x-1x2xf32, -1x2xf32) + add_0 = paddle._C_ops.add(scale_0, divide_0) + + # pd_op.add: (-1x-1x2xf32) <- (-1x-1x2xf32, -1x2xf32) + add_1 = paddle._C_ops.add(split_1, divide_0) + + # pd_op.full: (1xi32) <- () + full_5 = paddle._C_ops.full( + [1], float("-1"), paddle.int32, paddle.core.CPUPlace() + ) + + # builtin.combine: ([-1x-1x2xf32, -1x-1x2xf32]) <- (-1x-1x2xf32, -1x-1x2xf32) + combine_1 = [add_0, add_1] + + # pd_op.concat: (-1x-1x4xf32) <- ([-1x-1x2xf32, -1x-1x2xf32], 1xi32) + concat_0 = paddle._C_ops.concat(combine_1, full_5) + del combine_1 + + # pd_op.full: (xi64) <- () + full_6 = paddle._C_ops.full( + [], float("-1"), paddle.int64, paddle.framework._current_expected_place() + ) + + # pd_op.less_than: (xb) <- (xi64, xi64) + less_than_0 = paddle._C_ops.less_than(data_0, full_6) + del data_0, full_6 + + # pd_op.cast: (xi64) <- (xb) + cast_0 = paddle._C_ops.cast(less_than_0, paddle.int64) + del less_than_0 + + # pd_op.full: (xi64) <- () + full_7 = paddle._C_ops.full( + [], float("0"), paddle.int64, paddle.framework._current_expected_place() + ) + + # pd_op.not_equal: (xb) <- (xi64, xi64) + not_equal_0 = paddle._C_ops.not_equal(cast_0, full_7) + del cast_0 + + # pd_op.cast: (xi64) <- (xb) + cast_1 = paddle._C_ops.cast(not_equal_0, paddle.int64) + del not_equal_0 + + # pd_op.equal: (xb) <- (xi64, xi64) + equal_0 = paddle._C_ops.equal(cast_1, full_7) + del cast_1, full_7 + + # pd_op.share_data_: (2x-1x1xf32) <- (2x-1x1xf32) + share_data__0 = data_1.detach() + del data_1 + + # pd_op.share_data_: (-1x-1x4xf32) <- (-1x-1x4xf32) + share_data__1 = concat_0.detach() + + # pd_op.multiply: (-1x-1x4xf32) <- (-1x-1x4xf32, -1x1xf32) + multiply_0 = paddle._C_ops.multiply(share_data__1, data_4) + del ( + add_0, + add_1, + assign_0, + concat_0, + conv2d_0, + data_4, + divide_0, + full_3, + full_4, + full_5, + scale_0, + share_data__1, + softmax_0, + split_1, + transpose_0, + ) + + return share_data__0, multiply_0 diff --git a/paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_4/weight_meta.py b/paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_4/weight_meta.py new file mode 100644 index 000000000..28198680e --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_4/weight_meta.py @@ -0,0 +1,7 @@ +class Program_weight_tensor_parameter_0: + name = "parameter_0" + shape = [1, 17, 1, 1] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None diff --git a/paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_5/graph_hash.txt b/paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_5/graph_hash.txt new file mode 100644 index 000000000..d7f7b18a2 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_5/graph_hash.txt @@ -0,0 +1 @@ +7bb8a2b2502a471463ad03a6babccb8a2db42f0cbace538187cdc42cf672f3d5 \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_5/graph_net.json b/paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_5/graph_net.json new file mode 100644 index 000000000..399d8354e --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_5/graph_net.json @@ -0,0 +1,6 @@ +{ + "framework": "paddle", + "model_name": "PP-YOLOE-S_human", + "num_devices_required": 1, + "num_nodes_required": 1 +} \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_5/input_meta.py b/paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_5/input_meta.py new file mode 100644 index 000000000..e5e8a7d9f --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_5/input_meta.py @@ -0,0 +1,144 @@ +class Program_weight_tensor_data_0: + name = "data_0" + shape = [] + dtype = "int64" + data = [20] + + +class Program_weight_tensor_data_1: + name = "data_1" + shape = [2, 8400, 1] + dtype = "float32" + min_val = float("0.000732164") + max_val = float("0.935386") + mean = float("0.0393097") + std = float("0.0887762") + data = None + + +class Program_weight_tensor_data_2: + name = "data_2" + shape = [2, 8400, 4] + dtype = "float32" + min_val = float("-279.057") + max_val = float("909.266") + mean = float("320.456") + std = float("197.243") + data = None + + +class Program_weight_tensor_data_3: + name = "data_3" + shape = [8400, 2] + dtype = "float32" + min_val = float("4.0") + max_val = float("636.0") + mean = float("320.0") + std = float("184.719") + data = None + + +class Program_weight_tensor_data_4: + name = "data_4" + shape = [2, 20, 1] + dtype = "int32" + data = [ + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + ] + + +class Program_weight_tensor_data_5: + name = "data_5" + shape = [2, 20, 4] + dtype = "float32" + max_val = float("640.0") + mean = float("258.282") + std = float("180.734") + data = None + + +class Program_weight_tensor_data_6: + name = "data_6" + shape = [2, 20, 1] + dtype = "float32" + data = [ + 1.0, + 1.0, + 1.0, + 1.0, + 1.0, + 1.0, + 1.0, + 1.0, + 1.0, + 1.0, + 1.0, + 1.0, + 1.0, + 1.0, + 1.0, + 1.0, + 1.0, + 1.0, + 1.0, + 1.0, + 1.0, + 1.0, + 1.0, + 1.0, + 1.0, + 1.0, + 1.0, + 1.0, + 1.0, + 1.0, + 1.0, + 1.0, + 1.0, + 1.0, + 1.0, + 1.0, + 1.0, + 1.0, + 1.0, + 1.0, + ] diff --git a/paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_5/model.py b/paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_5/model.py new file mode 100644 index 000000000..c8d4c25a1 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_5/model.py @@ -0,0 +1,385 @@ +import paddle + + +class GraphModule(paddle.nn.Layer): + def __init__(self): + super().__init__() + + def forward(self, data_0, data_1, data_2, data_3, data_4, data_5, data_6): + # pd_op.full: (xi64) <- () + full_0 = paddle._C_ops.full( + [], float("0"), paddle.int64, paddle.framework._current_expected_place() + ) + + # pd_op.equal: (xb) <- (xi64, xi64) + equal_0 = paddle._C_ops.equal(data_0, full_0) + + # pd_op.cast: (xi64) <- (xb) + cast_0 = paddle._C_ops.cast(equal_0, paddle.int64) + del equal_0 + + # pd_op.not_equal: (xb) <- (xi64, xi64) + not_equal_0 = paddle._C_ops.not_equal(cast_0, full_0) + del cast_0 + + # pd_op.cast: (xi64) <- (xb) + cast_1 = paddle._C_ops.cast(not_equal_0, paddle.int64) + del not_equal_0 + + # pd_op.equal: (xb) <- (xi64, xi64) + equal_1 = paddle._C_ops.equal(cast_1, full_0) + del cast_1, full_0 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_0 = [2] + + # pd_op.unsqueeze: (2x-1x1x4xf32) <- (2x-1x4xf32, 1xi64) + unsqueeze_0 = paddle._C_ops.unsqueeze(data_5, full_int_array_0) + del data_5 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_1 = [1] + + # pd_op.unsqueeze: (2x1x-1x4xf32) <- (2x-1x4xf32, 1xi64) + unsqueeze_1 = paddle._C_ops.unsqueeze(data_2, full_int_array_1) + del data_2 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_2 = [0] + + # pd_op.slice: (2x-1x1x2xf32) <- (2x-1x1x4xf32, 1xi64, 1xi64) + slice_0 = paddle._C_ops.slice( + unsqueeze_0, [3], full_int_array_2, full_int_array_0, [1], [] + ) + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_3 = [2147483647] + + # pd_op.slice: (2x-1x1x2xf32) <- (2x-1x1x4xf32, 1xi64, 1xi64) + slice_1 = paddle._C_ops.slice( + unsqueeze_0, [3], full_int_array_0, full_int_array_3, [1], [] + ) + + # pd_op.slice: (2x1x-1x2xf32) <- (2x1x-1x4xf32, 1xi64, 1xi64) + slice_2 = paddle._C_ops.slice( + unsqueeze_1, [3], full_int_array_2, full_int_array_0, [1], [] + ) + del full_int_array_2 + + # pd_op.slice: (2x1x-1x2xf32) <- (2x1x-1x4xf32, 1xi64, 1xi64) + slice_3 = paddle._C_ops.slice( + unsqueeze_1, [3], full_int_array_0, full_int_array_3, [1], [] + ) + del full_int_array_3, unsqueeze_1 + + # pd_op.maximum: (2x-1x-1x2xf32) <- (2x-1x1x2xf32, 2x1x-1x2xf32) + maximum_0 = paddle._C_ops.maximum(slice_0, slice_2) + + # pd_op.minimum: (2x-1x-1x2xf32) <- (2x-1x1x2xf32, 2x1x-1x2xf32) + minimum_0 = paddle._C_ops.minimum(slice_1, slice_3) + + # pd_op.subtract: (2x-1x-1x2xf32) <- (2x-1x-1x2xf32, 2x-1x-1x2xf32) + subtract_0 = paddle._C_ops.subtract(minimum_0, maximum_0) + del maximum_0, minimum_0 + + # pd_op.full: (1xf32) <- () + full_1 = paddle._C_ops.full( + [1], float("0"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.full: (1xf32) <- () + full_2 = paddle._C_ops.full( + [1], float("3.40282e+38"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.clip: (2x-1x-1x2xf32) <- (2x-1x-1x2xf32, 1xf32, 1xf32) + clip_0 = paddle._C_ops.clip(subtract_0, full_1, full_2) + del subtract_0 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_4 = [-1] + + # pd_op.prod: (2x-1x-1xf32) <- (2x-1x-1x2xf32, 1xi64) + prod_0 = paddle._C_ops.prod(clip_0, full_int_array_4, False, False) + del clip_0 + + # pd_op.subtract: (2x-1x1x2xf32) <- (2x-1x1x2xf32, 2x-1x1x2xf32) + subtract_1 = paddle._C_ops.subtract(slice_1, slice_0) + del slice_0, slice_1 + + # pd_op.clip: (2x-1x1x2xf32) <- (2x-1x1x2xf32, 1xf32, 1xf32) + clip_1 = paddle._C_ops.clip(subtract_1, full_1, full_2) + del subtract_1 + + # pd_op.prod: (2x-1x1xf32) <- (2x-1x1x2xf32, 1xi64) + prod_1 = paddle._C_ops.prod(clip_1, full_int_array_4, False, False) + del clip_1 + + # pd_op.subtract: (2x1x-1x2xf32) <- (2x1x-1x2xf32, 2x1x-1x2xf32) + subtract_2 = paddle._C_ops.subtract(slice_3, slice_2) + del slice_2, slice_3 + + # pd_op.clip: (2x1x-1x2xf32) <- (2x1x-1x2xf32, 1xf32, 1xf32) + clip_2 = paddle._C_ops.clip(subtract_2, full_1, full_2) + del full_1, full_2, subtract_2 + + # pd_op.prod: (2x1x-1xf32) <- (2x1x-1x2xf32, 1xi64) + prod_2 = paddle._C_ops.prod(clip_2, full_int_array_4, False, False) + del clip_2 + + # pd_op.add: (2x-1x-1xf32) <- (2x-1x1xf32, 2x1x-1xf32) + add_0 = paddle._C_ops.add(prod_1, prod_2) + del prod_1, prod_2 + + # pd_op.subtract: (2x-1x-1xf32) <- (2x-1x-1xf32, 2x-1x-1xf32) + subtract_3 = paddle._C_ops.subtract(add_0, prod_0) + del add_0 + + # pd_op.full: (1xf32) <- () + full_3 = paddle._C_ops.full( + [1], float("1"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.scale: (2x-1x-1xf32) <- (2x-1x-1xf32, 1xf32) + scale_0 = paddle._C_ops.scale(subtract_3, full_3, float("1e-09"), True) + del full_3, subtract_3 + + # pd_op.divide: (2x-1x-1xf32) <- (2x-1x-1xf32, 2x-1x-1xf32) + divide_0 = paddle._C_ops.divide(prod_0, scale_0) + del prod_0, scale_0 + + # pd_op.transpose: (2x1x-1xf32) <- (2x-1x1xf32) + transpose_0 = paddle._C_ops.transpose(data_1, [0, 2, 1]) + del data_1 + + # pd_op.full: (1xf64) <- () + full_4 = paddle._C_ops.full( + [1], float("0"), paddle.float64, paddle.core.CPUPlace() + ) + + # pd_op.full: (1xf64) <- () + full_5 = paddle._C_ops.full( + [1], float("2"), paddle.float64, paddle.core.CPUPlace() + ) + + # pd_op.full: (1xf64) <- () + full_6 = paddle._C_ops.full( + [1], float("1"), paddle.float64, paddle.core.CPUPlace() + ) + + # pd_op.arange: (2xi32) <- (1xf64, 1xf64, 1xf64) + arange_0 = paddle.arange(full_4, full_5, full_6, dtype="int32") + del full_4, full_5, full_6 + + # pd_op.unsqueeze: (2x1xi32) <- (2xi32, 1xi64) + unsqueeze_2 = paddle._C_ops.unsqueeze(arange_0, full_int_array_4) + del arange_0 + + # pd_op.full: (xi64) <- () + full_7 = paddle._C_ops.full( + [], float("1"), paddle.int64, paddle.core.CPUPlace() + ) + + # builtin.combine: ([xi64, xi64]) <- (xi64, xi64) + combine_0 = [full_7, data_0] + del data_0, full_7 + + # pd_op.stack: (2xi64) <- ([xi64, xi64]) + stack_0 = paddle._C_ops.stack(combine_0, 0) + del combine_0 + + # pd_op.tile: (2x-1xi32) <- (2x1xi32, 2xi64) + tile_0 = paddle._C_ops.tile(unsqueeze_2, stack_0) + del stack_0 + + # pd_op.squeeze: (2x-1xi32) <- (2x-1x1xi32, 1xi64) + squeeze_0 = paddle._C_ops.squeeze(data_4, full_int_array_4) + del data_4 + + # builtin.combine: ([2x-1xi32, 2x-1xi32]) <- (2x-1xi32, 2x-1xi32) + combine_1 = [tile_0, squeeze_0] + del squeeze_0, tile_0 + + # pd_op.stack: (2x-1x2xi32) <- ([2x-1xi32, 2x-1xi32]) + stack_1 = paddle._C_ops.stack(combine_1, -1) + del combine_1 + + # pd_op.gather_nd: (2x-1x-1xf32) <- (2x1x-1xf32, 2x-1x2xi32) + gather_nd_0 = paddle._C_ops.gather_nd(transpose_0, stack_1) + del stack_1, transpose_0 + + # pd_op.pow: (2x-1x-1xf32) <- (2x-1x-1xf32) + pow_0 = paddle._C_ops.pow(gather_nd_0, float("1")) + del gather_nd_0 + + # pd_op.pow: (2x-1x-1xf32) <- (2x-1x-1xf32) + pow_1 = paddle._C_ops.pow(divide_0, float("6")) + + # pd_op.multiply: (2x-1x-1xf32) <- (2x-1x-1xf32, 2x-1x-1xf32) + multiply_0 = paddle._C_ops.multiply(pow_0, pow_1) + del pow_0, pow_1 + + # pd_op.full_int_array: (2xi64) <- () + full_int_array_5 = [0, 1] + + # pd_op.unsqueeze: (1x1x-1x2xf32) <- (-1x2xf32, 2xi64) + unsqueeze_3 = paddle._C_ops.unsqueeze(data_3, full_int_array_5) + del data_3, full_int_array_5 + + # pd_op.full: (1xi32) <- () + full_8 = paddle._C_ops.full( + [1], float("3"), paddle.int32, paddle.core.CPUPlace() + ) + + # pd_op.split_with_num: ([1x1x-1x1xf32, 1x1x-1x1xf32]) <- (1x1x-1x2xf32, 1xi32) + split_with_num_0 = paddle._C_ops.split_with_num(unsqueeze_3, 2, full_8) + del unsqueeze_3 + + # builtin.split: (1x1x-1x1xf32, 1x1x-1x1xf32) <- ([1x1x-1x1xf32, 1x1x-1x1xf32]) + ( + split_0, + split_1, + ) = split_with_num_0 + del split_with_num_0 + + # pd_op.split_with_num: ([2x-1x1x1xf32, 2x-1x1x1xf32, 2x-1x1x1xf32, 2x-1x1x1xf32]) <- (2x-1x1x4xf32, 1xi32) + split_with_num_1 = paddle._C_ops.split_with_num(unsqueeze_0, 4, full_8) + del full_8, unsqueeze_0 + + # builtin.split: (2x-1x1x1xf32, 2x-1x1x1xf32, 2x-1x1x1xf32, 2x-1x1x1xf32) <- ([2x-1x1x1xf32, 2x-1x1x1xf32, 2x-1x1x1xf32, 2x-1x1x1xf32]) + ( + split_2, + split_3, + split_4, + split_5, + ) = split_with_num_1 + del split_with_num_1 + + # pd_op.subtract: (2x-1x-1x1xf32) <- (1x1x-1x1xf32, 2x-1x1x1xf32) + subtract_4 = paddle._C_ops.subtract(split_0, split_2) + del split_2 + + # pd_op.subtract: (2x-1x-1x1xf32) <- (1x1x-1x1xf32, 2x-1x1x1xf32) + subtract_5 = paddle._C_ops.subtract(split_1, split_3) + del split_3 + + # pd_op.subtract: (2x-1x-1x1xf32) <- (2x-1x1x1xf32, 1x1x-1x1xf32) + subtract_6 = paddle._C_ops.subtract(split_4, split_0) + del split_0, split_4 + + # pd_op.subtract: (2x-1x-1x1xf32) <- (2x-1x1x1xf32, 1x1x-1x1xf32) + subtract_7 = paddle._C_ops.subtract(split_5, split_1) + del split_1, split_5 + + # pd_op.full: (1xi32) <- () + full_9 = paddle._C_ops.full( + [1], float("-1"), paddle.int32, paddle.core.CPUPlace() + ) + + # builtin.combine: ([2x-1x-1x1xf32, 2x-1x-1x1xf32, 2x-1x-1x1xf32, 2x-1x-1x1xf32]) <- (2x-1x-1x1xf32, 2x-1x-1x1xf32, 2x-1x-1x1xf32, 2x-1x-1x1xf32) + combine_2 = [subtract_4, subtract_5, subtract_6, subtract_7] + del subtract_4, subtract_5, subtract_6, subtract_7 + + # pd_op.concat: (2x-1x-1x4xf32) <- ([2x-1x-1x1xf32, 2x-1x-1x1xf32, 2x-1x-1x1xf32, 2x-1x-1x1xf32], 1xi32) + concat_0 = paddle._C_ops.concat(combine_2, full_9) + del combine_2, full_9 + + # pd_op.min: (2x-1x-1xf32) <- (2x-1x-1x4xf32, 1xi64) + min_0 = paddle._C_ops.min(concat_0, full_int_array_4, False) + del concat_0, full_int_array_4 + + # pd_op.full: (xf32) <- () + full_10 = paddle._C_ops.full( + [], + float("1e-09"), + paddle.float32, + paddle.framework._current_expected_place(), + ) + + # pd_op.greater_than: (2x-1x-1xb) <- (2x-1x-1xf32, xf32) + greater_than_1 = paddle._C_ops.greater_than(min_0, full_10) + del full_10, min_0 + + # pd_op.cast: (2x-1x-1xf32) <- (2x-1x-1xb) + cast_2 = paddle._C_ops.cast(greater_than_1, paddle.float32) + del greater_than_1 + + # pd_op.multiply: (2x-1x-1xf32) <- (2x-1x-1xf32, 2x-1x-1xf32) + multiply_1 = paddle._C_ops.multiply(multiply_0, cast_2) + + # pd_op.shape64: (3xi64) <- (2x-1x-1xf32) + shape64_0 = paddle._C_ops.shape64(multiply_1) + + # pd_op.slice: (xi64) <- (3xi64, 1xi64, 1xi64) + slice_4 = paddle._C_ops.slice( + shape64_0, [0], full_int_array_1, full_int_array_0, [1], [0] + ) + del full_int_array_1 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_6 = [3] + + # pd_op.slice: (xi64) <- (3xi64, 1xi64, 1xi64) + slice_5 = paddle._C_ops.slice( + shape64_0, [0], full_int_array_0, full_int_array_6, [1], [0] + ) + del full_int_array_0, full_int_array_6, shape64_0 + + # pd_op.full: (1xi32) <- () + full_11 = paddle._C_ops.full( + [1], float("13"), paddle.int32, paddle.core.CPUPlace() + ) + + # pd_op.topk: (2x-1x13xf32, 2x-1x13xi64) <- (2x-1x-1xf32, 1xi32) + topk_0, topk_1 = (lambda x, f: f(x))( + paddle._C_ops.topk(multiply_1, full_11, -1, True, True), + lambda out: out if isinstance(out, (list, tuple)) else (out, None), + ) + del full_11, multiply_1 + + # pd_op.one_hot: (2x-1x13x-1xf32) <- (2x-1x13xi64, xi64) + one_hot_0 = paddle._C_ops.one_hot( + topk_1 % paddle.cast(slice_5, topk_1.dtype), slice_5 + ) + del slice_5, topk_1 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_7 = [-2] + + # pd_op.sum: (2x-1x-1xf32) <- (2x-1x13x-1xf32, 1xi64) + sum_0 = paddle._C_ops.sum(one_hot_0, full_int_array_7, None, False) + del one_hot_0 + + # pd_op.multiply: (2x-1x-1xf32) <- (2x-1x-1xf32, 2x-1x1xf32) + multiply_2 = paddle._C_ops.multiply(sum_0, data_6) + del sum_0 + + # pd_op.multiply: (2x-1x-1xf32) <- (2x-1x-1xf32, 2x-1x-1xf32) + multiply_3 = paddle._C_ops.multiply(multiply_2, cast_2) + del cast_2, multiply_2 + + # pd_op.multiply: (2x-1x-1xf32) <- (2x-1x-1xf32, 2x-1x1xf32) + multiply_4 = paddle._C_ops.multiply(multiply_3, data_6) + del data_6, multiply_3 + + # pd_op.sum: (2x-1xf32) <- (2x-1x-1xf32, 1xi64) + sum_1 = paddle._C_ops.sum(multiply_4, full_int_array_7, None, False) + del full_int_array_7 + + # pd_op.full_int_array: (0xi64) <- () + full_int_array_8 = [] + + # pd_op.max: (xf32) <- (2x-1xf32, 0xi64) + max_0 = paddle._C_ops.max(sum_1, full_int_array_8, False) + del full_int_array_8 + + # pd_op.full: (xf32) <- () + full_12 = paddle._C_ops.full( + [], float("1"), paddle.float32, paddle.framework._current_expected_place() + ) + + # pd_op.greater_than: (xb) <- (xf32, xf32) + greater_than_0 = paddle._C_ops.greater_than(max_0, full_12) + del divide_0, full_12, max_0, multiply_0, multiply_4, sum_1, unsqueeze_2 + + return greater_than_0 diff --git a/paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_5/weight_meta.py b/paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_5/weight_meta.py new file mode 100644 index 000000000..8b1378917 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_5/weight_meta.py @@ -0,0 +1 @@ + diff --git a/paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_6/graph_hash.txt b/paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_6/graph_hash.txt new file mode 100644 index 000000000..463698320 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_6/graph_hash.txt @@ -0,0 +1 @@ +611aaf40802ae728109fb4c99495540bcf765813a1a25b37c13dde4b4b8683ee \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_6/graph_net.json b/paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_6/graph_net.json new file mode 100644 index 000000000..399d8354e --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_6/graph_net.json @@ -0,0 +1,6 @@ +{ + "framework": "paddle", + "model_name": "PP-YOLOE-S_human", + "num_devices_required": 1, + "num_nodes_required": 1 +} \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_6/input_meta.py b/paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_6/input_meta.py new file mode 100644 index 000000000..4202f26aa --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_6/input_meta.py @@ -0,0 +1,38 @@ +class Program_weight_tensor_data_0: + name = "data_0" + shape = [2, 8400, 4] + dtype = "float32" + min_val = float("0.0843685") + max_val = float("15.1723") + mean = float("4.87061") + std = float("3.19583") + data = None + + +class Program_weight_tensor_data_1: + name = "data_1" + shape = [8400, 2] + dtype = "float32" + min_val = float("0.5") + max_val = float("79.5") + mean = float("34.7619") + std = float("22.9098") + data = None + + +class Program_weight_tensor_data_2: + name = "data_2" + shape = [8400, 1] + dtype = "float32" + min_val = float("8.0") + max_val = float("32.0") + mean = float("10.6667") + std = float("5.70157") + data = None + + +class Program_weight_tensor_data_3: + name = "data_3" + shape = [2, 2] + dtype = "float32" + data = [1.20075, 0.802005, 1.74863, 1.16364] diff --git a/paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_6/model.py b/paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_6/model.py new file mode 100644 index 000000000..561c0c35b --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_6/model.py @@ -0,0 +1,94 @@ +import paddle + + +class GraphModule(paddle.nn.Layer): + def __init__(self): + super().__init__() + + def forward(self, data_0, data_1, data_2, data_3): + # pd_op.full: (1xi32) <- () + full_0 = paddle._C_ops.full( + [1], float("2"), paddle.int32, paddle.core.CPUPlace() + ) + + # pd_op.split_with_num: ([2x8400x2xf32, 2x8400x2xf32]) <- (2x8400x4xf32, 1xi32) + split_with_num_0 = paddle._C_ops.split_with_num(data_0, 2, full_0) + del data_0, full_0 + + # builtin.split: (2x8400x2xf32, 2x8400x2xf32) <- ([2x8400x2xf32, 2x8400x2xf32]) + ( + split_0, + split_1, + ) = split_with_num_0 + del split_with_num_0 + + # pd_op.full: (1xf32) <- () + full_1 = paddle._C_ops.full( + [1], float("-1"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.scale: (2x8400x2xf32) <- (2x8400x2xf32, 1xf32) + scale_0 = paddle._C_ops.scale(split_0, full_1, float("0"), True) + del full_1, split_0 + + # pd_op.add: (2x8400x2xf32) <- (2x8400x2xf32, 8400x2xf32) + add_0 = paddle._C_ops.add(scale_0, data_1) + del scale_0 + + # pd_op.add: (2x8400x2xf32) <- (2x8400x2xf32, 8400x2xf32) + add_1 = paddle._C_ops.add(split_1, data_1) + del data_1, split_1 + + # pd_op.full: (1xi32) <- () + full_2 = paddle._C_ops.full( + [1], float("-1"), paddle.int32, paddle.core.CPUPlace() + ) + + # builtin.combine: ([2x8400x2xf32, 2x8400x2xf32]) <- (2x8400x2xf32, 2x8400x2xf32) + combine_0 = [add_0, add_1] + del add_0, add_1 + + # pd_op.concat: (2x8400x4xf32) <- ([2x8400x2xf32, 2x8400x2xf32], 1xi32) + concat_0 = paddle._C_ops.concat(combine_0, full_2) + del combine_0 + + # pd_op.multiply: (2x8400x4xf32) <- (2x8400x4xf32, 8400x1xf32) + multiply_0 = paddle._C_ops.multiply(concat_0, data_2) + del concat_0, data_2 + + # pd_op.full: (1xi32) <- () + full_3 = paddle._C_ops.full( + [1], float("1"), paddle.int32, paddle.core.CPUPlace() + ) + + # pd_op.split_with_num: ([2x1xf32, 2x1xf32]) <- (2x2xf32, 1xi32) + split_with_num_1 = paddle._C_ops.split_with_num(data_3, 2, full_3) + del data_3, full_3 + + # builtin.split: (2x1xf32, 2x1xf32) <- ([2x1xf32, 2x1xf32]) + ( + split_2, + split_3, + ) = split_with_num_1 + del split_with_num_1 + + # builtin.combine: ([2x1xf32, 2x1xf32, 2x1xf32, 2x1xf32]) <- (2x1xf32, 2x1xf32, 2x1xf32, 2x1xf32) + combine_1 = [split_3, split_2, split_3, split_2] + del split_2, split_3 + + # pd_op.concat: (2x4xf32) <- ([2x1xf32, 2x1xf32, 2x1xf32, 2x1xf32], 1xi32) + concat_1 = paddle._C_ops.concat(combine_1, full_2) + del combine_1, full_2 + + # pd_op.full_int_array: (3xi64) <- () + full_int_array_0 = [-1, 1, 4] + + # pd_op.reshape: (2x1x4xf32) <- (2x4xf32, 3xi64) + reshape_0 = paddle._C_ops.reshape(concat_1, full_int_array_0) + del concat_1, full_int_array_0 + + # pd_op.divide: (2x8400x4xf32) <- (2x8400x4xf32, 2x1x4xf32) + divide_0 = paddle._C_ops.divide(multiply_0, reshape_0) + del multiply_0, reshape_0 + + return divide_0 diff --git a/paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_6/weight_meta.py b/paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_6/weight_meta.py new file mode 100644 index 000000000..8b1378917 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_6/weight_meta.py @@ -0,0 +1 @@ + diff --git a/paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_7/graph_hash.txt b/paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_7/graph_hash.txt new file mode 100644 index 000000000..35c8b5035 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_7/graph_hash.txt @@ -0,0 +1 @@ +9b97a8be55a85c114a1defeeda79b51bdacb0da2022b5c09e40b3c3e409a91cc \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_7/graph_net.json b/paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_7/graph_net.json new file mode 100644 index 000000000..399d8354e --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_7/graph_net.json @@ -0,0 +1,6 @@ +{ + "framework": "paddle", + "model_name": "PP-YOLOE-S_human", + "num_devices_required": 1, + "num_nodes_required": 1 +} \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_7/input_meta.py b/paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_7/input_meta.py new file mode 100644 index 000000000..0efbf63c7 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_7/input_meta.py @@ -0,0 +1,9 @@ +class Program_weight_tensor_data_0: + name = "data_0" + shape = [2, 3, 640, 640] + dtype = "float32" + min_val = float("-2.1179") + max_val = float("2.64") + mean = float("0.558125") + std = float("1.3994") + data = None diff --git a/paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_7/model.py b/paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_7/model.py new file mode 100644 index 000000000..83d1c05ac --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_7/model.py @@ -0,0 +1,4278 @@ +import paddle + + +class GraphModule(paddle.nn.Layer): + def __init__(self): + super().__init__() + + def forward( + self, + parameter_0, + parameter_1, + parameter_2, + parameter_3, + parameter_4, + parameter_5, + parameter_6, + parameter_7, + parameter_8, + parameter_9, + parameter_10, + parameter_11, + parameter_12, + parameter_13, + parameter_14, + parameter_15, + parameter_16, + parameter_17, + parameter_18, + parameter_19, + parameter_20, + parameter_21, + parameter_22, + parameter_23, + parameter_24, + parameter_25, + parameter_26, + parameter_27, + parameter_28, + parameter_29, + parameter_30, + parameter_31, + parameter_32, + parameter_33, + parameter_34, + parameter_35, + parameter_36, + parameter_37, + parameter_38, + parameter_39, + parameter_40, + parameter_41, + parameter_42, + parameter_43, + parameter_44, + parameter_45, + parameter_46, + parameter_47, + parameter_48, + parameter_49, + parameter_50, + parameter_51, + parameter_52, + parameter_53, + parameter_54, + parameter_55, + parameter_56, + parameter_57, + parameter_58, + parameter_59, + parameter_60, + parameter_61, + parameter_62, + parameter_63, + parameter_64, + parameter_65, + parameter_66, + parameter_67, + parameter_68, + parameter_69, + parameter_70, + parameter_71, + parameter_72, + parameter_73, + parameter_74, + parameter_75, + parameter_76, + parameter_77, + parameter_78, + parameter_79, + parameter_80, + parameter_81, + parameter_82, + parameter_83, + parameter_84, + parameter_85, + parameter_86, + parameter_87, + parameter_88, + parameter_89, + parameter_90, + parameter_91, + parameter_92, + parameter_93, + parameter_94, + parameter_95, + parameter_96, + parameter_97, + parameter_98, + parameter_99, + parameter_100, + parameter_101, + parameter_102, + parameter_103, + parameter_104, + parameter_105, + parameter_106, + parameter_107, + parameter_108, + parameter_109, + parameter_110, + parameter_111, + parameter_112, + parameter_113, + parameter_114, + parameter_115, + parameter_116, + parameter_117, + parameter_118, + parameter_119, + parameter_120, + parameter_121, + parameter_122, + parameter_123, + parameter_124, + parameter_125, + parameter_126, + parameter_127, + parameter_128, + parameter_129, + parameter_130, + parameter_131, + parameter_132, + parameter_133, + parameter_134, + parameter_135, + parameter_136, + parameter_137, + parameter_138, + parameter_139, + parameter_140, + parameter_141, + parameter_142, + parameter_143, + parameter_144, + parameter_145, + parameter_146, + parameter_147, + parameter_148, + parameter_149, + parameter_150, + parameter_151, + parameter_152, + parameter_153, + parameter_154, + parameter_155, + parameter_156, + parameter_157, + parameter_158, + parameter_159, + parameter_160, + parameter_161, + parameter_162, + parameter_163, + parameter_164, + parameter_165, + parameter_166, + parameter_167, + parameter_168, + parameter_169, + parameter_170, + parameter_171, + parameter_172, + parameter_173, + parameter_174, + parameter_175, + parameter_176, + parameter_177, + parameter_178, + parameter_179, + parameter_180, + parameter_181, + parameter_182, + parameter_183, + parameter_184, + parameter_185, + parameter_186, + parameter_187, + parameter_188, + parameter_189, + parameter_190, + parameter_191, + parameter_192, + parameter_193, + parameter_194, + parameter_195, + parameter_196, + parameter_197, + parameter_198, + parameter_199, + parameter_200, + parameter_201, + parameter_202, + parameter_203, + parameter_204, + parameter_205, + parameter_206, + parameter_207, + parameter_208, + parameter_209, + parameter_210, + parameter_211, + parameter_212, + parameter_213, + parameter_214, + parameter_215, + parameter_216, + parameter_217, + parameter_218, + parameter_219, + parameter_220, + parameter_221, + parameter_222, + parameter_223, + parameter_224, + parameter_225, + parameter_226, + parameter_227, + parameter_228, + parameter_229, + parameter_230, + parameter_231, + parameter_232, + parameter_233, + parameter_234, + parameter_235, + parameter_236, + parameter_237, + parameter_238, + parameter_239, + parameter_240, + parameter_241, + parameter_242, + parameter_243, + parameter_244, + parameter_245, + parameter_246, + parameter_247, + parameter_248, + parameter_249, + parameter_250, + parameter_251, + parameter_252, + parameter_253, + parameter_254, + parameter_255, + parameter_256, + parameter_257, + parameter_258, + parameter_259, + parameter_260, + parameter_261, + parameter_262, + parameter_263, + parameter_264, + parameter_265, + parameter_266, + parameter_267, + parameter_268, + parameter_269, + parameter_270, + parameter_271, + parameter_272, + parameter_273, + parameter_274, + parameter_275, + parameter_276, + parameter_277, + parameter_278, + parameter_279, + parameter_280, + parameter_281, + parameter_282, + parameter_283, + parameter_284, + parameter_285, + parameter_286, + parameter_287, + parameter_288, + parameter_289, + parameter_290, + parameter_291, + parameter_292, + parameter_293, + parameter_294, + parameter_295, + parameter_296, + parameter_297, + parameter_298, + parameter_299, + parameter_300, + parameter_301, + parameter_302, + parameter_303, + parameter_304, + parameter_305, + parameter_306, + parameter_307, + parameter_308, + parameter_309, + parameter_310, + parameter_311, + parameter_312, + parameter_313, + parameter_314, + parameter_315, + parameter_316, + parameter_317, + parameter_318, + parameter_319, + parameter_320, + parameter_321, + parameter_322, + parameter_323, + parameter_324, + parameter_325, + parameter_326, + parameter_327, + parameter_328, + parameter_329, + parameter_330, + parameter_331, + parameter_332, + parameter_333, + parameter_334, + parameter_335, + parameter_336, + parameter_337, + parameter_338, + parameter_339, + parameter_340, + parameter_341, + parameter_342, + parameter_343, + parameter_344, + parameter_345, + parameter_346, + parameter_347, + parameter_348, + parameter_349, + parameter_350, + parameter_351, + parameter_352, + parameter_353, + parameter_354, + parameter_355, + parameter_356, + parameter_357, + parameter_358, + parameter_359, + parameter_360, + parameter_361, + parameter_362, + parameter_363, + parameter_364, + parameter_365, + parameter_366, + parameter_367, + parameter_368, + parameter_369, + parameter_370, + parameter_371, + parameter_372, + parameter_373, + parameter_374, + parameter_375, + parameter_376, + parameter_377, + parameter_378, + parameter_379, + parameter_380, + parameter_381, + parameter_382, + parameter_383, + parameter_384, + parameter_385, + parameter_386, + parameter_387, + parameter_388, + parameter_389, + parameter_390, + parameter_391, + parameter_392, + parameter_393, + parameter_394, + parameter_395, + parameter_396, + parameter_397, + parameter_398, + parameter_399, + parameter_400, + parameter_401, + parameter_402, + parameter_403, + parameter_404, + parameter_405, + parameter_406, + parameter_407, + parameter_408, + parameter_409, + parameter_410, + parameter_411, + parameter_412, + parameter_413, + parameter_414, + parameter_415, + parameter_416, + parameter_417, + parameter_418, + parameter_419, + parameter_420, + parameter_421, + parameter_422, + data_0, + ): + # pd_op.conv2d: (2x16x-1x-1xf32) <- (2x3x-1x-1xf32, 16x3x3x3xf32) + conv2d_0 = paddle._C_ops.conv2d( + data_0, parameter_422, [2, 2], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del data_0, parameter_422 + + # pd_op.batch_norm_: (2x16x-1x-1xf32, 16xf32, 16xf32, 16xf32, 16xf32, -1xui8) <- (2x16x-1x-1xf32, 16xf32, 16xf32, 16xf32, 16xf32) + ( + batch_norm__0, + batch_norm__1, + batch_norm__2, + batch_norm__3, + batch_norm__4, + batch_norm__5, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_0, + parameter_421, + parameter_420, + parameter_419, + parameter_418, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_0, parameter_418, parameter_419, parameter_420, parameter_421 + + # pd_op.swish: (2x16x-1x-1xf32) <- (2x16x-1x-1xf32) + swish_0 = paddle._C_ops.swish(batch_norm__0) + del batch_norm__0 + + # pd_op.conv2d: (2x16x-1x-1xf32) <- (2x16x-1x-1xf32, 16x16x3x3xf32) + conv2d_1 = paddle._C_ops.conv2d( + swish_0, parameter_417, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_417, swish_0 + + # pd_op.batch_norm_: (2x16x-1x-1xf32, 16xf32, 16xf32, 16xf32, 16xf32, -1xui8) <- (2x16x-1x-1xf32, 16xf32, 16xf32, 16xf32, 16xf32) + ( + batch_norm__6, + batch_norm__7, + batch_norm__8, + batch_norm__9, + batch_norm__10, + batch_norm__11, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_1, + parameter_416, + parameter_415, + parameter_414, + parameter_413, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_1, parameter_413, parameter_414, parameter_415, parameter_416 + + # pd_op.swish: (2x16x-1x-1xf32) <- (2x16x-1x-1xf32) + swish_1 = paddle._C_ops.swish(batch_norm__6) + del batch_norm__6 + + # pd_op.conv2d: (2x32x-1x-1xf32) <- (2x16x-1x-1xf32, 32x16x3x3xf32) + conv2d_2 = paddle._C_ops.conv2d( + swish_1, parameter_412, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_412, swish_1 + + # pd_op.batch_norm_: (2x32x-1x-1xf32, 32xf32, 32xf32, 32xf32, 32xf32, -1xui8) <- (2x32x-1x-1xf32, 32xf32, 32xf32, 32xf32, 32xf32) + ( + batch_norm__12, + batch_norm__13, + batch_norm__14, + batch_norm__15, + batch_norm__16, + batch_norm__17, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_2, + parameter_411, + parameter_410, + parameter_409, + parameter_408, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_2, parameter_408, parameter_409, parameter_410, parameter_411 + + # pd_op.swish: (2x32x-1x-1xf32) <- (2x32x-1x-1xf32) + swish_2 = paddle._C_ops.swish(batch_norm__12) + del batch_norm__12 + + # pd_op.conv2d: (2x48x-1x-1xf32) <- (2x32x-1x-1xf32, 48x32x3x3xf32) + conv2d_3 = paddle._C_ops.conv2d( + swish_2, parameter_407, [2, 2], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_407, swish_2 + + # pd_op.batch_norm_: (2x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32, -1xui8) <- (2x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32) + ( + batch_norm__18, + batch_norm__19, + batch_norm__20, + batch_norm__21, + batch_norm__22, + batch_norm__23, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_3, + parameter_406, + parameter_405, + parameter_404, + parameter_403, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_3, parameter_403, parameter_404, parameter_405, parameter_406 + + # pd_op.swish: (2x48x-1x-1xf32) <- (2x48x-1x-1xf32) + swish_3 = paddle._C_ops.swish(batch_norm__18) + del batch_norm__18 + + # pd_op.conv2d: (2x24x-1x-1xf32) <- (2x48x-1x-1xf32, 24x48x1x1xf32) + conv2d_4 = paddle._C_ops.conv2d( + swish_3, parameter_402, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_402 + + # pd_op.batch_norm_: (2x24x-1x-1xf32, 24xf32, 24xf32, 24xf32, 24xf32, -1xui8) <- (2x24x-1x-1xf32, 24xf32, 24xf32, 24xf32, 24xf32) + ( + batch_norm__24, + batch_norm__25, + batch_norm__26, + batch_norm__27, + batch_norm__28, + batch_norm__29, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_4, + parameter_401, + parameter_400, + parameter_399, + parameter_398, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_4, parameter_398, parameter_399, parameter_400, parameter_401 + + # pd_op.swish: (2x24x-1x-1xf32) <- (2x24x-1x-1xf32) + swish_4 = paddle._C_ops.swish(batch_norm__24) + del batch_norm__24 + + # pd_op.conv2d: (2x24x-1x-1xf32) <- (2x48x-1x-1xf32, 24x48x1x1xf32) + conv2d_5 = paddle._C_ops.conv2d( + swish_3, parameter_397, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_397, swish_3 + + # pd_op.batch_norm_: (2x24x-1x-1xf32, 24xf32, 24xf32, 24xf32, 24xf32, -1xui8) <- (2x24x-1x-1xf32, 24xf32, 24xf32, 24xf32, 24xf32) + ( + batch_norm__30, + batch_norm__31, + batch_norm__32, + batch_norm__33, + batch_norm__34, + batch_norm__35, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_5, + parameter_396, + parameter_395, + parameter_394, + parameter_393, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_5, parameter_393, parameter_394, parameter_395, parameter_396 + + # pd_op.swish: (2x24x-1x-1xf32) <- (2x24x-1x-1xf32) + swish_5 = paddle._C_ops.swish(batch_norm__30) + del batch_norm__30 + + # pd_op.conv2d: (2x24x-1x-1xf32) <- (2x24x-1x-1xf32, 24x24x3x3xf32) + conv2d_6 = paddle._C_ops.conv2d( + swish_5, parameter_392, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_392 + + # pd_op.batch_norm_: (2x24x-1x-1xf32, 24xf32, 24xf32, 24xf32, 24xf32, -1xui8) <- (2x24x-1x-1xf32, 24xf32, 24xf32, 24xf32, 24xf32) + ( + batch_norm__36, + batch_norm__37, + batch_norm__38, + batch_norm__39, + batch_norm__40, + batch_norm__41, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_6, + parameter_391, + parameter_390, + parameter_389, + parameter_388, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_6, parameter_388, parameter_389, parameter_390, parameter_391 + + # pd_op.swish: (2x24x-1x-1xf32) <- (2x24x-1x-1xf32) + swish_6 = paddle._C_ops.swish(batch_norm__36) + del batch_norm__36 + + # pd_op.conv2d: (2x24x-1x-1xf32) <- (2x24x-1x-1xf32, 24x24x3x3xf32) + conv2d_7 = paddle._C_ops.conv2d( + swish_6, parameter_387, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_387 + + # pd_op.batch_norm_: (2x24x-1x-1xf32, 24xf32, 24xf32, 24xf32, 24xf32, -1xui8) <- (2x24x-1x-1xf32, 24xf32, 24xf32, 24xf32, 24xf32) + ( + batch_norm__42, + batch_norm__43, + batch_norm__44, + batch_norm__45, + batch_norm__46, + batch_norm__47, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_7, + parameter_386, + parameter_385, + parameter_384, + parameter_383, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_7, parameter_383, parameter_384, parameter_385, parameter_386 + + # pd_op.conv2d: (2x24x-1x-1xf32) <- (2x24x-1x-1xf32, 24x24x1x1xf32) + conv2d_8 = paddle._C_ops.conv2d( + swish_6, parameter_382, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_382, swish_6 + + # pd_op.batch_norm_: (2x24x-1x-1xf32, 24xf32, 24xf32, 24xf32, 24xf32, -1xui8) <- (2x24x-1x-1xf32, 24xf32, 24xf32, 24xf32, 24xf32) + ( + batch_norm__48, + batch_norm__49, + batch_norm__50, + batch_norm__51, + batch_norm__52, + batch_norm__53, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_8, + parameter_381, + parameter_380, + parameter_379, + parameter_378, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_8, parameter_378, parameter_379, parameter_380, parameter_381 + + # pd_op.add: (2x24x-1x-1xf32) <- (2x24x-1x-1xf32, 2x24x-1x-1xf32) + add_0 = paddle._C_ops.add(batch_norm__42, batch_norm__48) + del batch_norm__42, batch_norm__48 + + # pd_op.swish: (2x24x-1x-1xf32) <- (2x24x-1x-1xf32) + swish_7 = paddle._C_ops.swish(add_0) + del add_0 + + # pd_op.add: (2x24x-1x-1xf32) <- (2x24x-1x-1xf32, 2x24x-1x-1xf32) + add_1 = paddle._C_ops.add(swish_5, swish_7) + del swish_5, swish_7 + + # pd_op.full: (1xi32) <- () + full_0 = paddle._C_ops.full( + [1], float("1"), paddle.int32, paddle.core.CPUPlace() + ) + + # builtin.combine: ([2x24x-1x-1xf32, 2x24x-1x-1xf32]) <- (2x24x-1x-1xf32, 2x24x-1x-1xf32) + combine_0 = [swish_4, add_1] + del add_1, swish_4 + + # pd_op.concat: (2x48x-1x-1xf32) <- ([2x24x-1x-1xf32, 2x24x-1x-1xf32], 1xi32) + concat_2 = paddle._C_ops.concat(combine_0, full_0) + del combine_0 + + # pd_op.full_int_array: (2xi64) <- () + full_int_array_0 = [2, 3] + + # pd_op.mean: (2x48x1x1xf32) <- (2x48x-1x-1xf32, 2xi64) + mean_0 = paddle._C_ops.mean(concat_2, full_int_array_0, True) + + # pd_op.conv2d: (2x48x1x1xf32) <- (2x48x1x1xf32, 48x48x1x1xf32) + conv2d_9 = paddle._C_ops.conv2d( + mean_0, parameter_377, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del mean_0, parameter_377 + + # pd_op.full_int_array: (4xi64) <- () + full_int_array_1 = [1, -1, 1, 1] + + # pd_op.reshape: (1x48x1x1xf32) <- (48xf32, 4xi64) + reshape_0 = paddle._C_ops.reshape(parameter_376, full_int_array_1) + del parameter_376 + + # pd_op.add: (2x48x1x1xf32) <- (2x48x1x1xf32, 1x48x1x1xf32) + add_2 = paddle._C_ops.add(conv2d_9, reshape_0) + del conv2d_9, reshape_0 + + # pd_op.hardsigmoid: (2x48x1x1xf32) <- (2x48x1x1xf32) + hardsigmoid_0 = paddle._C_ops.hardsigmoid( + add_2, float("0.166667"), float("0.5") + ) + del add_2 + + # pd_op.multiply: (2x48x-1x-1xf32) <- (2x48x-1x-1xf32, 2x48x1x1xf32) + multiply_0 = paddle._C_ops.multiply(concat_2, hardsigmoid_0) + del concat_2, hardsigmoid_0 + + # pd_op.conv2d: (2x64x-1x-1xf32) <- (2x48x-1x-1xf32, 64x48x1x1xf32) + conv2d_10 = paddle._C_ops.conv2d( + multiply_0, parameter_375, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del multiply_0, parameter_375 + + # pd_op.batch_norm_: (2x64x-1x-1xf32, 64xf32, 64xf32, 64xf32, 64xf32, -1xui8) <- (2x64x-1x-1xf32, 64xf32, 64xf32, 64xf32, 64xf32) + ( + batch_norm__54, + batch_norm__55, + batch_norm__56, + batch_norm__57, + batch_norm__58, + batch_norm__59, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_10, + parameter_374, + parameter_373, + parameter_372, + parameter_371, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_10, parameter_371, parameter_372, parameter_373, parameter_374 + + # pd_op.swish: (2x64x-1x-1xf32) <- (2x64x-1x-1xf32) + swish_8 = paddle._C_ops.swish(batch_norm__54) + del batch_norm__54 + + # pd_op.conv2d: (2x96x-1x-1xf32) <- (2x64x-1x-1xf32, 96x64x3x3xf32) + conv2d_11 = paddle._C_ops.conv2d( + swish_8, parameter_370, [2, 2], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_370, swish_8 + + # pd_op.batch_norm_: (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__60, + batch_norm__61, + batch_norm__62, + batch_norm__63, + batch_norm__64, + batch_norm__65, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_11, + parameter_369, + parameter_368, + parameter_367, + parameter_366, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_11, parameter_366, parameter_367, parameter_368, parameter_369 + + # pd_op.swish: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32) + swish_9 = paddle._C_ops.swish(batch_norm__60) + del batch_norm__60 + + # pd_op.conv2d: (2x48x-1x-1xf32) <- (2x96x-1x-1xf32, 48x96x1x1xf32) + conv2d_12 = paddle._C_ops.conv2d( + swish_9, parameter_365, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_365 + + # pd_op.batch_norm_: (2x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32, -1xui8) <- (2x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32) + ( + batch_norm__66, + batch_norm__67, + batch_norm__68, + batch_norm__69, + batch_norm__70, + batch_norm__71, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_12, + parameter_364, + parameter_363, + parameter_362, + parameter_361, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_12, parameter_361, parameter_362, parameter_363, parameter_364 + + # pd_op.swish: (2x48x-1x-1xf32) <- (2x48x-1x-1xf32) + swish_10 = paddle._C_ops.swish(batch_norm__66) + del batch_norm__66 + + # pd_op.conv2d: (2x48x-1x-1xf32) <- (2x96x-1x-1xf32, 48x96x1x1xf32) + conv2d_13 = paddle._C_ops.conv2d( + swish_9, parameter_360, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_360, swish_9 + + # pd_op.batch_norm_: (2x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32, -1xui8) <- (2x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32) + ( + batch_norm__72, + batch_norm__73, + batch_norm__74, + batch_norm__75, + batch_norm__76, + batch_norm__77, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_13, + parameter_359, + parameter_358, + parameter_357, + parameter_356, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_13, parameter_356, parameter_357, parameter_358, parameter_359 + + # pd_op.swish: (2x48x-1x-1xf32) <- (2x48x-1x-1xf32) + swish_11 = paddle._C_ops.swish(batch_norm__72) + del batch_norm__72 + + # pd_op.conv2d: (2x48x-1x-1xf32) <- (2x48x-1x-1xf32, 48x48x3x3xf32) + conv2d_14 = paddle._C_ops.conv2d( + swish_11, parameter_355, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_355 + + # pd_op.batch_norm_: (2x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32, -1xui8) <- (2x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32) + ( + batch_norm__78, + batch_norm__79, + batch_norm__80, + batch_norm__81, + batch_norm__82, + batch_norm__83, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_14, + parameter_354, + parameter_353, + parameter_352, + parameter_351, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_14, parameter_351, parameter_352, parameter_353, parameter_354 + + # pd_op.swish: (2x48x-1x-1xf32) <- (2x48x-1x-1xf32) + swish_12 = paddle._C_ops.swish(batch_norm__78) + del batch_norm__78 + + # pd_op.conv2d: (2x48x-1x-1xf32) <- (2x48x-1x-1xf32, 48x48x3x3xf32) + conv2d_15 = paddle._C_ops.conv2d( + swish_12, parameter_350, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_350 + + # pd_op.batch_norm_: (2x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32, -1xui8) <- (2x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32) + ( + batch_norm__84, + batch_norm__85, + batch_norm__86, + batch_norm__87, + batch_norm__88, + batch_norm__89, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_15, + parameter_349, + parameter_348, + parameter_347, + parameter_346, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_15, parameter_346, parameter_347, parameter_348, parameter_349 + + # pd_op.conv2d: (2x48x-1x-1xf32) <- (2x48x-1x-1xf32, 48x48x1x1xf32) + conv2d_16 = paddle._C_ops.conv2d( + swish_12, parameter_345, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_345, swish_12 + + # pd_op.batch_norm_: (2x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32, -1xui8) <- (2x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32) + ( + batch_norm__90, + batch_norm__91, + batch_norm__92, + batch_norm__93, + batch_norm__94, + batch_norm__95, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_16, + parameter_344, + parameter_343, + parameter_342, + parameter_341, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_16, parameter_341, parameter_342, parameter_343, parameter_344 + + # pd_op.add: (2x48x-1x-1xf32) <- (2x48x-1x-1xf32, 2x48x-1x-1xf32) + add_3 = paddle._C_ops.add(batch_norm__84, batch_norm__90) + del batch_norm__84, batch_norm__90 + + # pd_op.swish: (2x48x-1x-1xf32) <- (2x48x-1x-1xf32) + swish_13 = paddle._C_ops.swish(add_3) + del add_3 + + # pd_op.add: (2x48x-1x-1xf32) <- (2x48x-1x-1xf32, 2x48x-1x-1xf32) + add_4 = paddle._C_ops.add(swish_11, swish_13) + del swish_11, swish_13 + + # pd_op.conv2d: (2x48x-1x-1xf32) <- (2x48x-1x-1xf32, 48x48x3x3xf32) + conv2d_17 = paddle._C_ops.conv2d( + add_4, parameter_340, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_340 + + # pd_op.batch_norm_: (2x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32, -1xui8) <- (2x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32) + ( + batch_norm__96, + batch_norm__97, + batch_norm__98, + batch_norm__99, + batch_norm__100, + batch_norm__101, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_17, + parameter_339, + parameter_338, + parameter_337, + parameter_336, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_17, parameter_336, parameter_337, parameter_338, parameter_339 + + # pd_op.swish: (2x48x-1x-1xf32) <- (2x48x-1x-1xf32) + swish_14 = paddle._C_ops.swish(batch_norm__96) + del batch_norm__96 + + # pd_op.conv2d: (2x48x-1x-1xf32) <- (2x48x-1x-1xf32, 48x48x3x3xf32) + conv2d_18 = paddle._C_ops.conv2d( + swish_14, parameter_335, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_335 + + # pd_op.batch_norm_: (2x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32, -1xui8) <- (2x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32) + ( + batch_norm__102, + batch_norm__103, + batch_norm__104, + batch_norm__105, + batch_norm__106, + batch_norm__107, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_18, + parameter_334, + parameter_333, + parameter_332, + parameter_331, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_18, parameter_331, parameter_332, parameter_333, parameter_334 + + # pd_op.conv2d: (2x48x-1x-1xf32) <- (2x48x-1x-1xf32, 48x48x1x1xf32) + conv2d_19 = paddle._C_ops.conv2d( + swish_14, parameter_330, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_330, swish_14 + + # pd_op.batch_norm_: (2x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32, -1xui8) <- (2x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32) + ( + batch_norm__108, + batch_norm__109, + batch_norm__110, + batch_norm__111, + batch_norm__112, + batch_norm__113, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_19, + parameter_329, + parameter_328, + parameter_327, + parameter_326, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_19, parameter_326, parameter_327, parameter_328, parameter_329 + + # pd_op.add: (2x48x-1x-1xf32) <- (2x48x-1x-1xf32, 2x48x-1x-1xf32) + add_5 = paddle._C_ops.add(batch_norm__102, batch_norm__108) + del batch_norm__102, batch_norm__108 + + # pd_op.swish: (2x48x-1x-1xf32) <- (2x48x-1x-1xf32) + swish_15 = paddle._C_ops.swish(add_5) + del add_5 + + # pd_op.add: (2x48x-1x-1xf32) <- (2x48x-1x-1xf32, 2x48x-1x-1xf32) + add_6 = paddle._C_ops.add(add_4, swish_15) + del add_4, swish_15 + + # builtin.combine: ([2x48x-1x-1xf32, 2x48x-1x-1xf32]) <- (2x48x-1x-1xf32, 2x48x-1x-1xf32) + combine_1 = [swish_10, add_6] + del add_6, swish_10 + + # pd_op.concat: (2x96x-1x-1xf32) <- ([2x48x-1x-1xf32, 2x48x-1x-1xf32], 1xi32) + concat_3 = paddle._C_ops.concat(combine_1, full_0) + del combine_1 + + # pd_op.mean: (2x96x1x1xf32) <- (2x96x-1x-1xf32, 2xi64) + mean_1 = paddle._C_ops.mean(concat_3, full_int_array_0, True) + + # pd_op.conv2d: (2x96x1x1xf32) <- (2x96x1x1xf32, 96x96x1x1xf32) + conv2d_20 = paddle._C_ops.conv2d( + mean_1, parameter_325, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del mean_1, parameter_325 + + # pd_op.reshape: (1x96x1x1xf32) <- (96xf32, 4xi64) + reshape_1 = paddle._C_ops.reshape(parameter_324, full_int_array_1) + del parameter_324 + + # pd_op.add: (2x96x1x1xf32) <- (2x96x1x1xf32, 1x96x1x1xf32) + add_7 = paddle._C_ops.add(conv2d_20, reshape_1) + del conv2d_20, reshape_1 + + # pd_op.hardsigmoid: (2x96x1x1xf32) <- (2x96x1x1xf32) + hardsigmoid_1 = paddle._C_ops.hardsigmoid( + add_7, float("0.166667"), float("0.5") + ) + del add_7 + + # pd_op.multiply: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, 2x96x1x1xf32) + multiply_1 = paddle._C_ops.multiply(concat_3, hardsigmoid_1) + del concat_3, hardsigmoid_1 + + # pd_op.conv2d: (2x128x-1x-1xf32) <- (2x96x-1x-1xf32, 128x96x1x1xf32) + conv2d_21 = paddle._C_ops.conv2d( + multiply_1, parameter_323, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del multiply_1, parameter_323 + + # pd_op.batch_norm_: (2x128x-1x-1xf32, 128xf32, 128xf32, 128xf32, 128xf32, -1xui8) <- (2x128x-1x-1xf32, 128xf32, 128xf32, 128xf32, 128xf32) + ( + batch_norm__114, + batch_norm__115, + batch_norm__116, + batch_norm__117, + batch_norm__118, + batch_norm__119, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_21, + parameter_322, + parameter_321, + parameter_320, + parameter_319, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_21, parameter_319, parameter_320, parameter_321, parameter_322 + + # pd_op.swish: (2x128x-1x-1xf32) <- (2x128x-1x-1xf32) + swish_16 = paddle._C_ops.swish(batch_norm__114) + del batch_norm__114 + + # pd_op.conv2d: (2x192x-1x-1xf32) <- (2x128x-1x-1xf32, 192x128x3x3xf32) + conv2d_22 = paddle._C_ops.conv2d( + swish_16, parameter_318, [2, 2], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_318 + + # pd_op.batch_norm_: (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__120, + batch_norm__121, + batch_norm__122, + batch_norm__123, + batch_norm__124, + batch_norm__125, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_22, + parameter_317, + parameter_316, + parameter_315, + parameter_314, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_22, parameter_314, parameter_315, parameter_316, parameter_317 + + # pd_op.swish: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32) + swish_17 = paddle._C_ops.swish(batch_norm__120) + del batch_norm__120 + + # pd_op.conv2d: (2x96x-1x-1xf32) <- (2x192x-1x-1xf32, 96x192x1x1xf32) + conv2d_23 = paddle._C_ops.conv2d( + swish_17, parameter_313, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_313 + + # pd_op.batch_norm_: (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__126, + batch_norm__127, + batch_norm__128, + batch_norm__129, + batch_norm__130, + batch_norm__131, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_23, + parameter_312, + parameter_311, + parameter_310, + parameter_309, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_23, parameter_309, parameter_310, parameter_311, parameter_312 + + # pd_op.swish: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32) + swish_18 = paddle._C_ops.swish(batch_norm__126) + del batch_norm__126 + + # pd_op.conv2d: (2x96x-1x-1xf32) <- (2x192x-1x-1xf32, 96x192x1x1xf32) + conv2d_24 = paddle._C_ops.conv2d( + swish_17, parameter_308, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_308, swish_17 + + # pd_op.batch_norm_: (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__132, + batch_norm__133, + batch_norm__134, + batch_norm__135, + batch_norm__136, + batch_norm__137, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_24, + parameter_307, + parameter_306, + parameter_305, + parameter_304, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_24, parameter_304, parameter_305, parameter_306, parameter_307 + + # pd_op.swish: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32) + swish_19 = paddle._C_ops.swish(batch_norm__132) + del batch_norm__132 + + # pd_op.conv2d: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, 96x96x3x3xf32) + conv2d_25 = paddle._C_ops.conv2d( + swish_19, parameter_303, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_303 + + # pd_op.batch_norm_: (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__138, + batch_norm__139, + batch_norm__140, + batch_norm__141, + batch_norm__142, + batch_norm__143, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_25, + parameter_302, + parameter_301, + parameter_300, + parameter_299, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_25, parameter_299, parameter_300, parameter_301, parameter_302 + + # pd_op.swish: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32) + swish_20 = paddle._C_ops.swish(batch_norm__138) + del batch_norm__138 + + # pd_op.conv2d: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, 96x96x3x3xf32) + conv2d_26 = paddle._C_ops.conv2d( + swish_20, parameter_298, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_298 + + # pd_op.batch_norm_: (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__144, + batch_norm__145, + batch_norm__146, + batch_norm__147, + batch_norm__148, + batch_norm__149, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_26, + parameter_297, + parameter_296, + parameter_295, + parameter_294, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_26, parameter_294, parameter_295, parameter_296, parameter_297 + + # pd_op.conv2d: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, 96x96x1x1xf32) + conv2d_27 = paddle._C_ops.conv2d( + swish_20, parameter_293, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_293, swish_20 + + # pd_op.batch_norm_: (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__150, + batch_norm__151, + batch_norm__152, + batch_norm__153, + batch_norm__154, + batch_norm__155, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_27, + parameter_292, + parameter_291, + parameter_290, + parameter_289, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_27, parameter_289, parameter_290, parameter_291, parameter_292 + + # pd_op.add: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, 2x96x-1x-1xf32) + add_8 = paddle._C_ops.add(batch_norm__144, batch_norm__150) + del batch_norm__144, batch_norm__150 + + # pd_op.swish: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32) + swish_21 = paddle._C_ops.swish(add_8) + del add_8 + + # pd_op.add: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, 2x96x-1x-1xf32) + add_9 = paddle._C_ops.add(swish_19, swish_21) + del swish_19, swish_21 + + # pd_op.conv2d: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, 96x96x3x3xf32) + conv2d_28 = paddle._C_ops.conv2d( + add_9, parameter_288, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_288 + + # pd_op.batch_norm_: (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__156, + batch_norm__157, + batch_norm__158, + batch_norm__159, + batch_norm__160, + batch_norm__161, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_28, + parameter_287, + parameter_286, + parameter_285, + parameter_284, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_28, parameter_284, parameter_285, parameter_286, parameter_287 + + # pd_op.swish: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32) + swish_22 = paddle._C_ops.swish(batch_norm__156) + del batch_norm__156 + + # pd_op.conv2d: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, 96x96x3x3xf32) + conv2d_29 = paddle._C_ops.conv2d( + swish_22, parameter_283, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_283 + + # pd_op.batch_norm_: (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__162, + batch_norm__163, + batch_norm__164, + batch_norm__165, + batch_norm__166, + batch_norm__167, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_29, + parameter_282, + parameter_281, + parameter_280, + parameter_279, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_29, parameter_279, parameter_280, parameter_281, parameter_282 + + # pd_op.conv2d: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, 96x96x1x1xf32) + conv2d_30 = paddle._C_ops.conv2d( + swish_22, parameter_278, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_278, swish_22 + + # pd_op.batch_norm_: (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__168, + batch_norm__169, + batch_norm__170, + batch_norm__171, + batch_norm__172, + batch_norm__173, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_30, + parameter_277, + parameter_276, + parameter_275, + parameter_274, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_30, parameter_274, parameter_275, parameter_276, parameter_277 + + # pd_op.add: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, 2x96x-1x-1xf32) + add_10 = paddle._C_ops.add(batch_norm__162, batch_norm__168) + del batch_norm__162, batch_norm__168 + + # pd_op.swish: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32) + swish_23 = paddle._C_ops.swish(add_10) + del add_10 + + # pd_op.add: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, 2x96x-1x-1xf32) + add_11 = paddle._C_ops.add(add_9, swish_23) + del add_9, swish_23 + + # builtin.combine: ([2x96x-1x-1xf32, 2x96x-1x-1xf32]) <- (2x96x-1x-1xf32, 2x96x-1x-1xf32) + combine_2 = [swish_18, add_11] + del add_11, swish_18 + + # pd_op.concat: (2x192x-1x-1xf32) <- ([2x96x-1x-1xf32, 2x96x-1x-1xf32], 1xi32) + concat_4 = paddle._C_ops.concat(combine_2, full_0) + del combine_2 + + # pd_op.mean: (2x192x1x1xf32) <- (2x192x-1x-1xf32, 2xi64) + mean_2 = paddle._C_ops.mean(concat_4, full_int_array_0, True) + + # pd_op.conv2d: (2x192x1x1xf32) <- (2x192x1x1xf32, 192x192x1x1xf32) + conv2d_31 = paddle._C_ops.conv2d( + mean_2, parameter_273, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del mean_2, parameter_273 + + # pd_op.reshape: (1x192x1x1xf32) <- (192xf32, 4xi64) + reshape_2 = paddle._C_ops.reshape(parameter_272, full_int_array_1) + del parameter_272 + + # pd_op.add: (2x192x1x1xf32) <- (2x192x1x1xf32, 1x192x1x1xf32) + add_12 = paddle._C_ops.add(conv2d_31, reshape_2) + del conv2d_31, reshape_2 + + # pd_op.hardsigmoid: (2x192x1x1xf32) <- (2x192x1x1xf32) + hardsigmoid_2 = paddle._C_ops.hardsigmoid( + add_12, float("0.166667"), float("0.5") + ) + del add_12 + + # pd_op.multiply: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 2x192x1x1xf32) + multiply_2 = paddle._C_ops.multiply(concat_4, hardsigmoid_2) + del concat_4, hardsigmoid_2 + + # pd_op.conv2d: (2x256x-1x-1xf32) <- (2x192x-1x-1xf32, 256x192x1x1xf32) + conv2d_32 = paddle._C_ops.conv2d( + multiply_2, parameter_271, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del multiply_2, parameter_271 + + # pd_op.batch_norm_: (2x256x-1x-1xf32, 256xf32, 256xf32, 256xf32, 256xf32, -1xui8) <- (2x256x-1x-1xf32, 256xf32, 256xf32, 256xf32, 256xf32) + ( + batch_norm__174, + batch_norm__175, + batch_norm__176, + batch_norm__177, + batch_norm__178, + batch_norm__179, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_32, + parameter_270, + parameter_269, + parameter_268, + parameter_267, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_32, parameter_267, parameter_268, parameter_269, parameter_270 + + # pd_op.swish: (2x256x-1x-1xf32) <- (2x256x-1x-1xf32) + swish_24 = paddle._C_ops.swish(batch_norm__174) + del batch_norm__174 + + # pd_op.conv2d: (2x384x-1x-1xf32) <- (2x256x-1x-1xf32, 384x256x3x3xf32) + conv2d_33 = paddle._C_ops.conv2d( + swish_24, parameter_266, [2, 2], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_266 + + # pd_op.batch_norm_: (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__180, + batch_norm__181, + batch_norm__182, + batch_norm__183, + batch_norm__184, + batch_norm__185, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_33, + parameter_265, + parameter_264, + parameter_263, + parameter_262, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_33, parameter_262, parameter_263, parameter_264, parameter_265 + + # pd_op.swish: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32) + swish_25 = paddle._C_ops.swish(batch_norm__180) + del batch_norm__180 + + # pd_op.conv2d: (2x192x-1x-1xf32) <- (2x384x-1x-1xf32, 192x384x1x1xf32) + conv2d_34 = paddle._C_ops.conv2d( + swish_25, parameter_261, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_261 + + # pd_op.batch_norm_: (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__186, + batch_norm__187, + batch_norm__188, + batch_norm__189, + batch_norm__190, + batch_norm__191, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_34, + parameter_260, + parameter_259, + parameter_258, + parameter_257, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_34, parameter_257, parameter_258, parameter_259, parameter_260 + + # pd_op.swish: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32) + swish_26 = paddle._C_ops.swish(batch_norm__186) + del batch_norm__186 + + # pd_op.conv2d: (2x192x-1x-1xf32) <- (2x384x-1x-1xf32, 192x384x1x1xf32) + conv2d_35 = paddle._C_ops.conv2d( + swish_25, parameter_256, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_256, swish_25 + + # pd_op.batch_norm_: (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__192, + batch_norm__193, + batch_norm__194, + batch_norm__195, + batch_norm__196, + batch_norm__197, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_35, + parameter_255, + parameter_254, + parameter_253, + parameter_252, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_35, parameter_252, parameter_253, parameter_254, parameter_255 + + # pd_op.swish: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32) + swish_27 = paddle._C_ops.swish(batch_norm__192) + del batch_norm__192 + + # pd_op.conv2d: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 192x192x3x3xf32) + conv2d_36 = paddle._C_ops.conv2d( + swish_27, parameter_251, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_251 + + # pd_op.batch_norm_: (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__198, + batch_norm__199, + batch_norm__200, + batch_norm__201, + batch_norm__202, + batch_norm__203, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_36, + parameter_250, + parameter_249, + parameter_248, + parameter_247, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_36, parameter_247, parameter_248, parameter_249, parameter_250 + + # pd_op.swish: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32) + swish_28 = paddle._C_ops.swish(batch_norm__198) + del batch_norm__198 + + # pd_op.conv2d: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 192x192x3x3xf32) + conv2d_37 = paddle._C_ops.conv2d( + swish_28, parameter_246, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_246 + + # pd_op.batch_norm_: (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__204, + batch_norm__205, + batch_norm__206, + batch_norm__207, + batch_norm__208, + batch_norm__209, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_37, + parameter_245, + parameter_244, + parameter_243, + parameter_242, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_37, parameter_242, parameter_243, parameter_244, parameter_245 + + # pd_op.conv2d: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 192x192x1x1xf32) + conv2d_38 = paddle._C_ops.conv2d( + swish_28, parameter_241, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_241, swish_28 + + # pd_op.batch_norm_: (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__210, + batch_norm__211, + batch_norm__212, + batch_norm__213, + batch_norm__214, + batch_norm__215, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_38, + parameter_240, + parameter_239, + parameter_238, + parameter_237, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_38, parameter_237, parameter_238, parameter_239, parameter_240 + + # pd_op.add: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 2x192x-1x-1xf32) + add_13 = paddle._C_ops.add(batch_norm__204, batch_norm__210) + del batch_norm__204, batch_norm__210 + + # pd_op.swish: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32) + swish_29 = paddle._C_ops.swish(add_13) + del add_13 + + # pd_op.add: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 2x192x-1x-1xf32) + add_14 = paddle._C_ops.add(swish_27, swish_29) + del swish_27, swish_29 + + # builtin.combine: ([2x192x-1x-1xf32, 2x192x-1x-1xf32]) <- (2x192x-1x-1xf32, 2x192x-1x-1xf32) + combine_3 = [swish_26, add_14] + del add_14, swish_26 + + # pd_op.concat: (2x384x-1x-1xf32) <- ([2x192x-1x-1xf32, 2x192x-1x-1xf32], 1xi32) + concat_5 = paddle._C_ops.concat(combine_3, full_0) + del combine_3 + + # pd_op.mean: (2x384x1x1xf32) <- (2x384x-1x-1xf32, 2xi64) + mean_3 = paddle._C_ops.mean(concat_5, full_int_array_0, True) + del full_int_array_0 + + # pd_op.conv2d: (2x384x1x1xf32) <- (2x384x1x1xf32, 384x384x1x1xf32) + conv2d_39 = paddle._C_ops.conv2d( + mean_3, parameter_236, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del mean_3, parameter_236 + + # pd_op.reshape: (1x384x1x1xf32) <- (384xf32, 4xi64) + reshape_3 = paddle._C_ops.reshape(parameter_235, full_int_array_1) + del parameter_235 + + # pd_op.add: (2x384x1x1xf32) <- (2x384x1x1xf32, 1x384x1x1xf32) + add_15 = paddle._C_ops.add(conv2d_39, reshape_3) + del conv2d_39, reshape_3 + + # pd_op.hardsigmoid: (2x384x1x1xf32) <- (2x384x1x1xf32) + hardsigmoid_3 = paddle._C_ops.hardsigmoid( + add_15, float("0.166667"), float("0.5") + ) + del add_15 + + # pd_op.multiply: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32, 2x384x1x1xf32) + multiply_3 = paddle._C_ops.multiply(concat_5, hardsigmoid_3) + del concat_5, hardsigmoid_3 + + # pd_op.conv2d: (2x512x-1x-1xf32) <- (2x384x-1x-1xf32, 512x384x1x1xf32) + conv2d_40 = paddle._C_ops.conv2d( + multiply_3, parameter_234, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del multiply_3, parameter_234 + + # pd_op.batch_norm_: (2x512x-1x-1xf32, 512xf32, 512xf32, 512xf32, 512xf32, -1xui8) <- (2x512x-1x-1xf32, 512xf32, 512xf32, 512xf32, 512xf32) + ( + batch_norm__216, + batch_norm__217, + batch_norm__218, + batch_norm__219, + batch_norm__220, + batch_norm__221, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_40, + parameter_233, + parameter_232, + parameter_231, + parameter_230, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_40, parameter_230, parameter_231, parameter_232, parameter_233 + + # pd_op.swish: (2x512x-1x-1xf32) <- (2x512x-1x-1xf32) + swish_30 = paddle._C_ops.swish(batch_norm__216) + del batch_norm__216 + + # pd_op.conv2d: (2x192x-1x-1xf32) <- (2x512x-1x-1xf32, 192x512x1x1xf32) + conv2d_41 = paddle._C_ops.conv2d( + swish_30, parameter_229, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_229 + + # pd_op.batch_norm_: (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__222, + batch_norm__223, + batch_norm__224, + batch_norm__225, + batch_norm__226, + batch_norm__227, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_41, + parameter_228, + parameter_227, + parameter_226, + parameter_225, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_41, parameter_225, parameter_226, parameter_227, parameter_228 + + # pd_op.swish: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32) + swish_31 = paddle._C_ops.swish(batch_norm__222) + del batch_norm__222 + + # pd_op.conv2d: (2x192x-1x-1xf32) <- (2x512x-1x-1xf32, 192x512x1x1xf32) + conv2d_42 = paddle._C_ops.conv2d( + swish_30, parameter_224, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_224, swish_30 + + # pd_op.batch_norm_: (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__228, + batch_norm__229, + batch_norm__230, + batch_norm__231, + batch_norm__232, + batch_norm__233, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_42, + parameter_223, + parameter_222, + parameter_221, + parameter_220, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_42, parameter_220, parameter_221, parameter_222, parameter_223 + + # pd_op.swish: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32) + swish_32 = paddle._C_ops.swish(batch_norm__228) + del batch_norm__228 + + # pd_op.conv2d: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 192x192x3x3xf32) + conv2d_43 = paddle._C_ops.conv2d( + swish_32, parameter_219, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_219, swish_32 + + # pd_op.batch_norm_: (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__234, + batch_norm__235, + batch_norm__236, + batch_norm__237, + batch_norm__238, + batch_norm__239, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_43, + parameter_218, + parameter_217, + parameter_216, + parameter_215, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_43, parameter_215, parameter_216, parameter_217, parameter_218 + + # pd_op.swish: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32) + swish_33 = paddle._C_ops.swish(batch_norm__234) + del batch_norm__234 + + # pd_op.conv2d: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 192x192x3x3xf32) + conv2d_44 = paddle._C_ops.conv2d( + swish_33, parameter_214, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_214 + + # pd_op.batch_norm_: (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__240, + batch_norm__241, + batch_norm__242, + batch_norm__243, + batch_norm__244, + batch_norm__245, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_44, + parameter_213, + parameter_212, + parameter_211, + parameter_210, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_44, parameter_210, parameter_211, parameter_212, parameter_213 + + # pd_op.conv2d: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 192x192x1x1xf32) + conv2d_45 = paddle._C_ops.conv2d( + swish_33, parameter_209, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_209, swish_33 + + # pd_op.batch_norm_: (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__246, + batch_norm__247, + batch_norm__248, + batch_norm__249, + batch_norm__250, + batch_norm__251, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_45, + parameter_208, + parameter_207, + parameter_206, + parameter_205, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_45, parameter_205, parameter_206, parameter_207, parameter_208 + + # pd_op.add: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 2x192x-1x-1xf32) + add_16 = paddle._C_ops.add(batch_norm__240, batch_norm__246) + del batch_norm__240, batch_norm__246 + + # pd_op.swish: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32) + swish_34 = paddle._C_ops.swish(add_16) + del add_16 + + # pd_op.full_int_array: (2xi64) <- () + full_int_array_2 = [5, 5] + + # pd_op.pool2d: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 2xi64) + pool2d_0 = paddle._C_ops.pool2d( + swish_34, + full_int_array_2, + [1, 1], + [2, 2], + False, + True, + "NCHW", + "max", + False, + False, + "EXPLICIT", + ) + del full_int_array_2 + + # pd_op.full_int_array: (2xi64) <- () + full_int_array_3 = [9, 9] + + # pd_op.pool2d: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 2xi64) + pool2d_1 = paddle._C_ops.pool2d( + swish_34, + full_int_array_3, + [1, 1], + [4, 4], + False, + True, + "NCHW", + "max", + False, + False, + "EXPLICIT", + ) + del full_int_array_3 + + # pd_op.full_int_array: (2xi64) <- () + full_int_array_4 = [13, 13] + + # pd_op.pool2d: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 2xi64) + pool2d_2 = paddle._C_ops.pool2d( + swish_34, + full_int_array_4, + [1, 1], + [6, 6], + False, + True, + "NCHW", + "max", + False, + False, + "EXPLICIT", + ) + del full_int_array_4 + + # builtin.combine: ([2x192x-1x-1xf32, 2x192x-1x-1xf32, 2x192x-1x-1xf32, 2x192x-1x-1xf32]) <- (2x192x-1x-1xf32, 2x192x-1x-1xf32, 2x192x-1x-1xf32, 2x192x-1x-1xf32) + combine_4 = [swish_34, pool2d_0, pool2d_1, pool2d_2] + del pool2d_0, pool2d_1, pool2d_2, swish_34 + + # pd_op.concat: (2x768x-1x-1xf32) <- ([2x192x-1x-1xf32, 2x192x-1x-1xf32, 2x192x-1x-1xf32, 2x192x-1x-1xf32], 1xi32) + concat_6 = paddle._C_ops.concat(combine_4, full_0) + del combine_4 + + # pd_op.conv2d: (2x192x-1x-1xf32) <- (2x768x-1x-1xf32, 192x768x1x1xf32) + conv2d_46 = paddle._C_ops.conv2d( + concat_6, parameter_204, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del concat_6, parameter_204 + + # pd_op.batch_norm_: (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__252, + batch_norm__253, + batch_norm__254, + batch_norm__255, + batch_norm__256, + batch_norm__257, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_46, + parameter_203, + parameter_202, + parameter_201, + parameter_200, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_46, parameter_200, parameter_201, parameter_202, parameter_203 + + # pd_op.swish: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32) + swish_35 = paddle._C_ops.swish(batch_norm__252) + del batch_norm__252 + + # builtin.combine: ([2x192x-1x-1xf32, 2x192x-1x-1xf32]) <- (2x192x-1x-1xf32, 2x192x-1x-1xf32) + combine_5 = [swish_31, swish_35] + del swish_31, swish_35 + + # pd_op.concat: (2x384x-1x-1xf32) <- ([2x192x-1x-1xf32, 2x192x-1x-1xf32], 1xi32) + concat_7 = paddle._C_ops.concat(combine_5, full_0) + del combine_5 + + # pd_op.conv2d: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32, 384x384x1x1xf32) + conv2d_47 = paddle._C_ops.conv2d( + concat_7, parameter_199, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del concat_7, parameter_199 + + # pd_op.batch_norm_: (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__258, + batch_norm__259, + batch_norm__260, + batch_norm__261, + batch_norm__262, + batch_norm__263, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_47, + parameter_198, + parameter_197, + parameter_196, + parameter_195, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_47, parameter_195, parameter_196, parameter_197, parameter_198 + + # pd_op.swish: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32) + swish_36 = paddle._C_ops.swish(batch_norm__258) + del batch_norm__258 + + # pd_op.conv2d: (2x192x-1x-1xf32) <- (2x384x-1x-1xf32, 192x384x1x1xf32) + conv2d_48 = paddle._C_ops.conv2d( + swish_36, parameter_194, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_194 + + # pd_op.batch_norm_: (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__264, + batch_norm__265, + batch_norm__266, + batch_norm__267, + batch_norm__268, + batch_norm__269, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_48, + parameter_193, + parameter_192, + parameter_191, + parameter_190, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_48, parameter_190, parameter_191, parameter_192, parameter_193 + + # pd_op.swish: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32) + swish_37 = paddle._C_ops.swish(batch_norm__264) + del batch_norm__264 + + # pd_op.nearest_interp: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, None, None, None) + nearest_interp_0 = paddle._C_ops.nearest_interp( + swish_37, + None, + None, + None, + "NCHW", + -1, + -1, + -1, + [float("2"), float("2")], + "nearest", + False, + 0, + ) + del swish_37 + + # builtin.combine: ([2x192x-1x-1xf32, 2x256x-1x-1xf32]) <- (2x192x-1x-1xf32, 2x256x-1x-1xf32) + combine_6 = [nearest_interp_0, swish_24] + del nearest_interp_0, swish_24 + + # pd_op.concat: (2x448x-1x-1xf32) <- ([2x192x-1x-1xf32, 2x256x-1x-1xf32], 1xi32) + concat_8 = paddle._C_ops.concat(combine_6, full_0) + del combine_6 + + # pd_op.conv2d: (2x96x-1x-1xf32) <- (2x448x-1x-1xf32, 96x448x1x1xf32) + conv2d_49 = paddle._C_ops.conv2d( + concat_8, parameter_189, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_189 + + # pd_op.batch_norm_: (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__270, + batch_norm__271, + batch_norm__272, + batch_norm__273, + batch_norm__274, + batch_norm__275, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_49, + parameter_188, + parameter_187, + parameter_186, + parameter_185, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_49, parameter_185, parameter_186, parameter_187, parameter_188 + + # pd_op.swish: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32) + swish_38 = paddle._C_ops.swish(batch_norm__270) + del batch_norm__270 + + # pd_op.conv2d: (2x96x-1x-1xf32) <- (2x448x-1x-1xf32, 96x448x1x1xf32) + conv2d_50 = paddle._C_ops.conv2d( + concat_8, parameter_184, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del concat_8, parameter_184 + + # pd_op.batch_norm_: (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__276, + batch_norm__277, + batch_norm__278, + batch_norm__279, + batch_norm__280, + batch_norm__281, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_50, + parameter_183, + parameter_182, + parameter_181, + parameter_180, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_50, parameter_180, parameter_181, parameter_182, parameter_183 + + # pd_op.swish: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32) + swish_39 = paddle._C_ops.swish(batch_norm__276) + del batch_norm__276 + + # pd_op.conv2d: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, 96x96x3x3xf32) + conv2d_51 = paddle._C_ops.conv2d( + swish_39, parameter_179, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_179, swish_39 + + # pd_op.batch_norm_: (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__282, + batch_norm__283, + batch_norm__284, + batch_norm__285, + batch_norm__286, + batch_norm__287, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_51, + parameter_178, + parameter_177, + parameter_176, + parameter_175, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_51, parameter_175, parameter_176, parameter_177, parameter_178 + + # pd_op.swish: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32) + swish_40 = paddle._C_ops.swish(batch_norm__282) + del batch_norm__282 + + # pd_op.conv2d: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, 96x96x3x3xf32) + conv2d_52 = paddle._C_ops.conv2d( + swish_40, parameter_174, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_174 + + # pd_op.batch_norm_: (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__288, + batch_norm__289, + batch_norm__290, + batch_norm__291, + batch_norm__292, + batch_norm__293, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_52, + parameter_173, + parameter_172, + parameter_171, + parameter_170, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_52, parameter_170, parameter_171, parameter_172, parameter_173 + + # pd_op.conv2d: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, 96x96x1x1xf32) + conv2d_53 = paddle._C_ops.conv2d( + swish_40, parameter_169, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_169, swish_40 + + # pd_op.batch_norm_: (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__294, + batch_norm__295, + batch_norm__296, + batch_norm__297, + batch_norm__298, + batch_norm__299, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_53, + parameter_168, + parameter_167, + parameter_166, + parameter_165, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_53, parameter_165, parameter_166, parameter_167, parameter_168 + + # pd_op.add: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, 2x96x-1x-1xf32) + add_17 = paddle._C_ops.add(batch_norm__288, batch_norm__294) + del batch_norm__288, batch_norm__294 + + # pd_op.swish: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32) + swish_41 = paddle._C_ops.swish(add_17) + del add_17 + + # builtin.combine: ([2x96x-1x-1xf32, 2x96x-1x-1xf32]) <- (2x96x-1x-1xf32, 2x96x-1x-1xf32) + combine_7 = [swish_38, swish_41] + del swish_38, swish_41 + + # pd_op.concat: (2x192x-1x-1xf32) <- ([2x96x-1x-1xf32, 2x96x-1x-1xf32], 1xi32) + concat_9 = paddle._C_ops.concat(combine_7, full_0) + del combine_7 + + # pd_op.conv2d: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 192x192x1x1xf32) + conv2d_54 = paddle._C_ops.conv2d( + concat_9, parameter_164, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del concat_9, parameter_164 + + # pd_op.batch_norm_: (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__300, + batch_norm__301, + batch_norm__302, + batch_norm__303, + batch_norm__304, + batch_norm__305, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_54, + parameter_163, + parameter_162, + parameter_161, + parameter_160, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_54, parameter_160, parameter_161, parameter_162, parameter_163 + + # pd_op.swish: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32) + swish_42 = paddle._C_ops.swish(batch_norm__300) + del batch_norm__300 + + # pd_op.conv2d: (2x96x-1x-1xf32) <- (2x192x-1x-1xf32, 96x192x1x1xf32) + conv2d_55 = paddle._C_ops.conv2d( + swish_42, parameter_159, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_159 + + # pd_op.batch_norm_: (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__306, + batch_norm__307, + batch_norm__308, + batch_norm__309, + batch_norm__310, + batch_norm__311, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_55, + parameter_158, + parameter_157, + parameter_156, + parameter_155, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_55, parameter_155, parameter_156, parameter_157, parameter_158 + + # pd_op.swish: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32) + swish_43 = paddle._C_ops.swish(batch_norm__306) + del batch_norm__306 + + # pd_op.nearest_interp: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, None, None, None) + nearest_interp_1 = paddle._C_ops.nearest_interp( + swish_43, + None, + None, + None, + "NCHW", + -1, + -1, + -1, + [float("2"), float("2")], + "nearest", + False, + 0, + ) + del swish_43 + + # builtin.combine: ([2x96x-1x-1xf32, 2x128x-1x-1xf32]) <- (2x96x-1x-1xf32, 2x128x-1x-1xf32) + combine_8 = [nearest_interp_1, swish_16] + del nearest_interp_1, swish_16 + + # pd_op.concat: (2x224x-1x-1xf32) <- ([2x96x-1x-1xf32, 2x128x-1x-1xf32], 1xi32) + concat_10 = paddle._C_ops.concat(combine_8, full_0) + del combine_8 + + # pd_op.conv2d: (2x48x-1x-1xf32) <- (2x224x-1x-1xf32, 48x224x1x1xf32) + conv2d_56 = paddle._C_ops.conv2d( + concat_10, parameter_154, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_154 + + # pd_op.batch_norm_: (2x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32, -1xui8) <- (2x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32) + ( + batch_norm__312, + batch_norm__313, + batch_norm__314, + batch_norm__315, + batch_norm__316, + batch_norm__317, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_56, + parameter_153, + parameter_152, + parameter_151, + parameter_150, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_56, parameter_150, parameter_151, parameter_152, parameter_153 + + # pd_op.swish: (2x48x-1x-1xf32) <- (2x48x-1x-1xf32) + swish_44 = paddle._C_ops.swish(batch_norm__312) + del batch_norm__312 + + # pd_op.conv2d: (2x48x-1x-1xf32) <- (2x224x-1x-1xf32, 48x224x1x1xf32) + conv2d_57 = paddle._C_ops.conv2d( + concat_10, parameter_149, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del concat_10, parameter_149 + + # pd_op.batch_norm_: (2x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32, -1xui8) <- (2x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32) + ( + batch_norm__318, + batch_norm__319, + batch_norm__320, + batch_norm__321, + batch_norm__322, + batch_norm__323, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_57, + parameter_148, + parameter_147, + parameter_146, + parameter_145, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_57, parameter_145, parameter_146, parameter_147, parameter_148 + + # pd_op.swish: (2x48x-1x-1xf32) <- (2x48x-1x-1xf32) + swish_45 = paddle._C_ops.swish(batch_norm__318) + del batch_norm__318 + + # pd_op.conv2d: (2x48x-1x-1xf32) <- (2x48x-1x-1xf32, 48x48x3x3xf32) + conv2d_58 = paddle._C_ops.conv2d( + swish_45, parameter_144, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_144, swish_45 + + # pd_op.batch_norm_: (2x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32, -1xui8) <- (2x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32) + ( + batch_norm__324, + batch_norm__325, + batch_norm__326, + batch_norm__327, + batch_norm__328, + batch_norm__329, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_58, + parameter_143, + parameter_142, + parameter_141, + parameter_140, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_58, parameter_140, parameter_141, parameter_142, parameter_143 + + # pd_op.swish: (2x48x-1x-1xf32) <- (2x48x-1x-1xf32) + swish_46 = paddle._C_ops.swish(batch_norm__324) + del batch_norm__324 + + # pd_op.conv2d: (2x48x-1x-1xf32) <- (2x48x-1x-1xf32, 48x48x3x3xf32) + conv2d_59 = paddle._C_ops.conv2d( + swish_46, parameter_139, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_139 + + # pd_op.batch_norm_: (2x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32, -1xui8) <- (2x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32) + ( + batch_norm__330, + batch_norm__331, + batch_norm__332, + batch_norm__333, + batch_norm__334, + batch_norm__335, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_59, + parameter_138, + parameter_137, + parameter_136, + parameter_135, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_59, parameter_135, parameter_136, parameter_137, parameter_138 + + # pd_op.conv2d: (2x48x-1x-1xf32) <- (2x48x-1x-1xf32, 48x48x1x1xf32) + conv2d_60 = paddle._C_ops.conv2d( + swish_46, parameter_134, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_134, swish_46 + + # pd_op.batch_norm_: (2x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32, -1xui8) <- (2x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32) + ( + batch_norm__336, + batch_norm__337, + batch_norm__338, + batch_norm__339, + batch_norm__340, + batch_norm__341, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_60, + parameter_133, + parameter_132, + parameter_131, + parameter_130, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_60, parameter_130, parameter_131, parameter_132, parameter_133 + + # pd_op.add: (2x48x-1x-1xf32) <- (2x48x-1x-1xf32, 2x48x-1x-1xf32) + add_18 = paddle._C_ops.add(batch_norm__330, batch_norm__336) + del batch_norm__330, batch_norm__336 + + # pd_op.swish: (2x48x-1x-1xf32) <- (2x48x-1x-1xf32) + swish_47 = paddle._C_ops.swish(add_18) + del add_18 + + # builtin.combine: ([2x48x-1x-1xf32, 2x48x-1x-1xf32]) <- (2x48x-1x-1xf32, 2x48x-1x-1xf32) + combine_9 = [swish_44, swish_47] + del swish_44, swish_47 + + # pd_op.concat: (2x96x-1x-1xf32) <- ([2x48x-1x-1xf32, 2x48x-1x-1xf32], 1xi32) + concat_11 = paddle._C_ops.concat(combine_9, full_0) + del combine_9 + + # pd_op.conv2d: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, 96x96x1x1xf32) + conv2d_61 = paddle._C_ops.conv2d( + concat_11, parameter_129, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del concat_11, parameter_129 + + # pd_op.batch_norm_: (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__342, + batch_norm__343, + batch_norm__344, + batch_norm__345, + batch_norm__346, + batch_norm__347, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_61, + parameter_128, + parameter_127, + parameter_126, + parameter_125, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_61, parameter_125, parameter_126, parameter_127, parameter_128 + + # pd_op.swish: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32) + swish_48 = paddle._C_ops.swish(batch_norm__342) + del batch_norm__342 + + # pd_op.conv2d: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, 96x96x3x3xf32) + conv2d_62 = paddle._C_ops.conv2d( + swish_48, parameter_124, [2, 2], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_124 + + # pd_op.batch_norm_: (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__348, + batch_norm__349, + batch_norm__350, + batch_norm__351, + batch_norm__352, + batch_norm__353, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_62, + parameter_123, + parameter_122, + parameter_121, + parameter_120, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_62, parameter_120, parameter_121, parameter_122, parameter_123 + + # pd_op.swish: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32) + swish_49 = paddle._C_ops.swish(batch_norm__348) + del batch_norm__348 + + # builtin.combine: ([2x96x-1x-1xf32, 2x192x-1x-1xf32]) <- (2x96x-1x-1xf32, 2x192x-1x-1xf32) + combine_10 = [swish_49, swish_42] + del swish_42, swish_49 + + # pd_op.concat: (2x288x-1x-1xf32) <- ([2x96x-1x-1xf32, 2x192x-1x-1xf32], 1xi32) + concat_12 = paddle._C_ops.concat(combine_10, full_0) + del combine_10 + + # pd_op.conv2d: (2x96x-1x-1xf32) <- (2x288x-1x-1xf32, 96x288x1x1xf32) + conv2d_63 = paddle._C_ops.conv2d( + concat_12, parameter_119, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_119 + + # pd_op.batch_norm_: (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__354, + batch_norm__355, + batch_norm__356, + batch_norm__357, + batch_norm__358, + batch_norm__359, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_63, + parameter_118, + parameter_117, + parameter_116, + parameter_115, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_63, parameter_115, parameter_116, parameter_117, parameter_118 + + # pd_op.swish: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32) + swish_50 = paddle._C_ops.swish(batch_norm__354) + del batch_norm__354 + + # pd_op.conv2d: (2x96x-1x-1xf32) <- (2x288x-1x-1xf32, 96x288x1x1xf32) + conv2d_64 = paddle._C_ops.conv2d( + concat_12, parameter_114, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del concat_12, parameter_114 + + # pd_op.batch_norm_: (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__360, + batch_norm__361, + batch_norm__362, + batch_norm__363, + batch_norm__364, + batch_norm__365, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_64, + parameter_113, + parameter_112, + parameter_111, + parameter_110, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_64, parameter_110, parameter_111, parameter_112, parameter_113 + + # pd_op.swish: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32) + swish_51 = paddle._C_ops.swish(batch_norm__360) + del batch_norm__360 + + # pd_op.conv2d: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, 96x96x3x3xf32) + conv2d_65 = paddle._C_ops.conv2d( + swish_51, parameter_109, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_109, swish_51 + + # pd_op.batch_norm_: (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__366, + batch_norm__367, + batch_norm__368, + batch_norm__369, + batch_norm__370, + batch_norm__371, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_65, + parameter_108, + parameter_107, + parameter_106, + parameter_105, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_65, parameter_105, parameter_106, parameter_107, parameter_108 + + # pd_op.swish: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32) + swish_52 = paddle._C_ops.swish(batch_norm__366) + del batch_norm__366 + + # pd_op.conv2d: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, 96x96x3x3xf32) + conv2d_66 = paddle._C_ops.conv2d( + swish_52, parameter_104, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_104 + + # pd_op.batch_norm_: (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__372, + batch_norm__373, + batch_norm__374, + batch_norm__375, + batch_norm__376, + batch_norm__377, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_66, + parameter_103, + parameter_102, + parameter_101, + parameter_100, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_66, parameter_100, parameter_101, parameter_102, parameter_103 + + # pd_op.conv2d: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, 96x96x1x1xf32) + conv2d_67 = paddle._C_ops.conv2d( + swish_52, parameter_99, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_99, swish_52 + + # pd_op.batch_norm_: (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__378, + batch_norm__379, + batch_norm__380, + batch_norm__381, + batch_norm__382, + batch_norm__383, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_67, + parameter_98, + parameter_97, + parameter_96, + parameter_95, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_67, parameter_95, parameter_96, parameter_97, parameter_98 + + # pd_op.add: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, 2x96x-1x-1xf32) + add_19 = paddle._C_ops.add(batch_norm__372, batch_norm__378) + del batch_norm__372, batch_norm__378 + + # pd_op.swish: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32) + swish_53 = paddle._C_ops.swish(add_19) + del add_19 + + # builtin.combine: ([2x96x-1x-1xf32, 2x96x-1x-1xf32]) <- (2x96x-1x-1xf32, 2x96x-1x-1xf32) + combine_11 = [swish_50, swish_53] + del swish_50, swish_53 + + # pd_op.concat: (2x192x-1x-1xf32) <- ([2x96x-1x-1xf32, 2x96x-1x-1xf32], 1xi32) + concat_13 = paddle._C_ops.concat(combine_11, full_0) + del combine_11 + + # pd_op.conv2d: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 192x192x1x1xf32) + conv2d_68 = paddle._C_ops.conv2d( + concat_13, parameter_94, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del concat_13, parameter_94 + + # pd_op.batch_norm_: (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__384, + batch_norm__385, + batch_norm__386, + batch_norm__387, + batch_norm__388, + batch_norm__389, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_68, + parameter_93, + parameter_92, + parameter_91, + parameter_90, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_68, parameter_90, parameter_91, parameter_92, parameter_93 + + # pd_op.swish: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32) + swish_54 = paddle._C_ops.swish(batch_norm__384) + del batch_norm__384 + + # pd_op.conv2d: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 192x192x3x3xf32) + conv2d_69 = paddle._C_ops.conv2d( + swish_54, parameter_89, [2, 2], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_89 + + # pd_op.batch_norm_: (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__390, + batch_norm__391, + batch_norm__392, + batch_norm__393, + batch_norm__394, + batch_norm__395, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_69, + parameter_88, + parameter_87, + parameter_86, + parameter_85, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_69, parameter_85, parameter_86, parameter_87, parameter_88 + + # pd_op.swish: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32) + swish_55 = paddle._C_ops.swish(batch_norm__390) + del batch_norm__390 + + # builtin.combine: ([2x192x-1x-1xf32, 2x384x-1x-1xf32]) <- (2x192x-1x-1xf32, 2x384x-1x-1xf32) + combine_12 = [swish_55, swish_36] + del swish_36, swish_55 + + # pd_op.concat: (2x576x-1x-1xf32) <- ([2x192x-1x-1xf32, 2x384x-1x-1xf32], 1xi32) + concat_14 = paddle._C_ops.concat(combine_12, full_0) + del combine_12 + + # pd_op.conv2d: (2x192x-1x-1xf32) <- (2x576x-1x-1xf32, 192x576x1x1xf32) + conv2d_70 = paddle._C_ops.conv2d( + concat_14, parameter_84, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_84 + + # pd_op.batch_norm_: (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__396, + batch_norm__397, + batch_norm__398, + batch_norm__399, + batch_norm__400, + batch_norm__401, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_70, + parameter_83, + parameter_82, + parameter_81, + parameter_80, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_70, parameter_80, parameter_81, parameter_82, parameter_83 + + # pd_op.swish: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32) + swish_56 = paddle._C_ops.swish(batch_norm__396) + del batch_norm__396 + + # pd_op.conv2d: (2x192x-1x-1xf32) <- (2x576x-1x-1xf32, 192x576x1x1xf32) + conv2d_71 = paddle._C_ops.conv2d( + concat_14, parameter_79, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del concat_14, parameter_79 + + # pd_op.batch_norm_: (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__402, + batch_norm__403, + batch_norm__404, + batch_norm__405, + batch_norm__406, + batch_norm__407, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_71, + parameter_78, + parameter_77, + parameter_76, + parameter_75, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_71, parameter_75, parameter_76, parameter_77, parameter_78 + + # pd_op.swish: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32) + swish_57 = paddle._C_ops.swish(batch_norm__402) + del batch_norm__402 + + # pd_op.conv2d: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 192x192x3x3xf32) + conv2d_72 = paddle._C_ops.conv2d( + swish_57, parameter_74, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_74, swish_57 + + # pd_op.batch_norm_: (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__408, + batch_norm__409, + batch_norm__410, + batch_norm__411, + batch_norm__412, + batch_norm__413, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_72, + parameter_73, + parameter_72, + parameter_71, + parameter_70, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_72, parameter_70, parameter_71, parameter_72, parameter_73 + + # pd_op.swish: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32) + swish_58 = paddle._C_ops.swish(batch_norm__408) + del batch_norm__408 + + # pd_op.conv2d: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 192x192x3x3xf32) + conv2d_73 = paddle._C_ops.conv2d( + swish_58, parameter_69, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_69 + + # pd_op.batch_norm_: (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__414, + batch_norm__415, + batch_norm__416, + batch_norm__417, + batch_norm__418, + batch_norm__419, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_73, + parameter_68, + parameter_67, + parameter_66, + parameter_65, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_73, parameter_65, parameter_66, parameter_67, parameter_68 + + # pd_op.conv2d: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 192x192x1x1xf32) + conv2d_74 = paddle._C_ops.conv2d( + swish_58, parameter_64, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_64, swish_58 + + # pd_op.batch_norm_: (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__420, + batch_norm__421, + batch_norm__422, + batch_norm__423, + batch_norm__424, + batch_norm__425, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_74, + parameter_63, + parameter_62, + parameter_61, + parameter_60, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_74, parameter_60, parameter_61, parameter_62, parameter_63 + + # pd_op.add: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 2x192x-1x-1xf32) + add_20 = paddle._C_ops.add(batch_norm__414, batch_norm__420) + del batch_norm__414, batch_norm__420 + + # pd_op.swish: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32) + swish_59 = paddle._C_ops.swish(add_20) + del add_20 + + # builtin.combine: ([2x192x-1x-1xf32, 2x192x-1x-1xf32]) <- (2x192x-1x-1xf32, 2x192x-1x-1xf32) + combine_13 = [swish_56, swish_59] + del swish_56, swish_59 + + # pd_op.concat: (2x384x-1x-1xf32) <- ([2x192x-1x-1xf32, 2x192x-1x-1xf32], 1xi32) + concat_15 = paddle._C_ops.concat(combine_13, full_0) + del combine_13 + + # pd_op.conv2d: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32, 384x384x1x1xf32) + conv2d_75 = paddle._C_ops.conv2d( + concat_15, parameter_59, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del concat_15, parameter_59 + + # pd_op.batch_norm_: (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__426, + batch_norm__427, + batch_norm__428, + batch_norm__429, + batch_norm__430, + batch_norm__431, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_75, + parameter_58, + parameter_57, + parameter_56, + parameter_55, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_75, parameter_55, parameter_56, parameter_57, parameter_58 + + # pd_op.swish: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32) + swish_60 = paddle._C_ops.swish(batch_norm__426) + del batch_norm__426 + + # pd_op.shape64: (4xi64) <- (2x384x-1x-1xf32) + shape64_0 = paddle._C_ops.shape64(swish_60) + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_5 = [2] + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_6 = [3] + + # pd_op.slice: (xi64) <- (4xi64, 1xi64, 1xi64) + slice_0 = paddle._C_ops.slice( + shape64_0, [0], full_int_array_5, full_int_array_6, [1], [0] + ) + del shape64_0 + + # pd_op.shape64: (4xi64) <- (2x384x-1x-1xf32) + shape64_1 = paddle._C_ops.shape64(swish_60) + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_7 = [4] + + # pd_op.slice: (xi64) <- (4xi64, 1xi64, 1xi64) + slice_1 = paddle._C_ops.slice( + shape64_1, [0], full_int_array_6, full_int_array_7, [1], [0] + ) + del shape64_1 + + # pd_op.multiply: (xi64) <- (xi64, xi64) + multiply_4 = paddle._C_ops.multiply(slice_0, slice_1) + del slice_0, slice_1 + + # pd_op.full_int_array: (2xi64) <- () + full_int_array_8 = [1, 1] + + # pd_op.pool2d: (2x384x1x1xf32) <- (2x384x-1x-1xf32, 2xi64) + pool2d_3 = paddle._C_ops.pool2d( + swish_60, + full_int_array_8, + [1, 1], + [0, 0], + False, + True, + "NCHW", + "avg", + False, + True, + "EXPLICIT", + ) + + # pd_op.conv2d: (2x384x1x1xf32) <- (2x384x1x1xf32, 384x384x1x1xf32) + conv2d_76 = paddle._C_ops.conv2d( + pool2d_3, parameter_54, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_54 + + # pd_op.reshape: (1x384x1x1xf32) <- (384xf32, 4xi64) + reshape_4 = paddle._C_ops.reshape(parameter_53, full_int_array_1) + del parameter_53 + + # pd_op.add: (2x384x1x1xf32) <- (2x384x1x1xf32, 1x384x1x1xf32) + add_21 = paddle._C_ops.add(conv2d_76, reshape_4) + del conv2d_76, reshape_4 + + # pd_op.sigmoid: (2x384x1x1xf32) <- (2x384x1x1xf32) + sigmoid_0 = paddle._C_ops.sigmoid(add_21) + del add_21 + + # pd_op.multiply: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32, 2x384x1x1xf32) + multiply_5 = paddle._C_ops.multiply(swish_60, sigmoid_0) + del sigmoid_0 + + # pd_op.conv2d: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32, 384x384x1x1xf32) + conv2d_77 = paddle._C_ops.conv2d( + multiply_5, parameter_52, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del multiply_5, parameter_52 + + # pd_op.batch_norm_: (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__432, + batch_norm__433, + batch_norm__434, + batch_norm__435, + batch_norm__436, + batch_norm__437, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_77, + parameter_51, + parameter_50, + parameter_49, + parameter_48, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_77, parameter_48, parameter_49, parameter_50, parameter_51 + + # pd_op.swish: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32) + swish_61 = paddle._C_ops.swish(batch_norm__432) + del batch_norm__432 + + # pd_op.add: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32, 2x384x-1x-1xf32) + add_22 = paddle._C_ops.add(swish_61, swish_60) + del swish_61 + + # pd_op.conv2d: (2x1x-1x-1xf32) <- (2x384x-1x-1xf32, 1x384x3x3xf32) + conv2d_78 = paddle._C_ops.conv2d( + add_22, parameter_47, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del add_22, parameter_47 + + # pd_op.reshape: (1x1x1x1xf32) <- (1xf32, 4xi64) + reshape_5 = paddle._C_ops.reshape(parameter_46, full_int_array_1) + del parameter_46 + + # pd_op.add: (2x1x-1x-1xf32) <- (2x1x-1x-1xf32, 1x1x1x1xf32) + add_23 = paddle._C_ops.add(conv2d_78, reshape_5) + del conv2d_78, reshape_5 + + # pd_op.conv2d: (2x384x1x1xf32) <- (2x384x1x1xf32, 384x384x1x1xf32) + conv2d_79 = paddle._C_ops.conv2d( + pool2d_3, parameter_45, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_45, pool2d_3 + + # pd_op.reshape: (1x384x1x1xf32) <- (384xf32, 4xi64) + reshape_6 = paddle._C_ops.reshape(parameter_44, full_int_array_1) + del parameter_44 + + # pd_op.add: (2x384x1x1xf32) <- (2x384x1x1xf32, 1x384x1x1xf32) + add_24 = paddle._C_ops.add(conv2d_79, reshape_6) + del conv2d_79, reshape_6 + + # pd_op.sigmoid: (2x384x1x1xf32) <- (2x384x1x1xf32) + sigmoid_1 = paddle._C_ops.sigmoid(add_24) + del add_24 + + # pd_op.multiply: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32, 2x384x1x1xf32) + multiply_6 = paddle._C_ops.multiply(swish_60, sigmoid_1) + del sigmoid_1, swish_60 + + # pd_op.conv2d: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32, 384x384x1x1xf32) + conv2d_80 = paddle._C_ops.conv2d( + multiply_6, parameter_43, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del multiply_6, parameter_43 + + # pd_op.batch_norm_: (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__438, + batch_norm__439, + batch_norm__440, + batch_norm__441, + batch_norm__442, + batch_norm__443, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_80, + parameter_42, + parameter_41, + parameter_40, + parameter_39, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_80, parameter_39, parameter_40, parameter_41, parameter_42 + + # pd_op.swish: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32) + swish_62 = paddle._C_ops.swish(batch_norm__438) + del batch_norm__438 + + # pd_op.conv2d: (2x68x-1x-1xf32) <- (2x384x-1x-1xf32, 68x384x3x3xf32) + conv2d_81 = paddle._C_ops.conv2d( + swish_62, parameter_38, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_38, swish_62 + + # pd_op.reshape: (1x68x1x1xf32) <- (68xf32, 4xi64) + reshape_7 = paddle._C_ops.reshape(parameter_37, full_int_array_1) + del parameter_37 + + # pd_op.add: (2x68x-1x-1xf32) <- (2x68x-1x-1xf32, 1x68x1x1xf32) + add_25 = paddle._C_ops.add(conv2d_81, reshape_7) + del conv2d_81, reshape_7 + + # pd_op.full: (xi64) <- () + full_1 = paddle._C_ops.full( + [], float("-1"), paddle.int64, paddle.core.CPUPlace() + ) + + # pd_op.full: (xi64) <- () + full_2 = paddle._C_ops.full( + [], float("4"), paddle.int64, paddle.core.CPUPlace() + ) + + # pd_op.full: (xi64) <- () + full_3 = paddle._C_ops.full( + [], float("17"), paddle.int64, paddle.core.CPUPlace() + ) + + # builtin.combine: ([xi64, xi64, xi64, xi64]) <- (xi64, xi64, xi64, xi64) + combine_14 = [full_1, full_2, full_3, multiply_4] + + # pd_op.stack: (4xi64) <- ([xi64, xi64, xi64, xi64]) + stack_0 = paddle._C_ops.stack(combine_14, 0) + del combine_14 + + # pd_op.reshape: (-1x4x17x-1xf32) <- (2x68x-1x-1xf32, 4xi64) + reshape_8 = paddle._C_ops.reshape(add_25, stack_0) + del add_25, stack_0 + + # pd_op.transpose: (-1x17x-1x4xf32) <- (-1x4x17x-1xf32) + transpose_0 = paddle._C_ops.transpose(reshape_8, [0, 2, 3, 1]) + del reshape_8 + + # pd_op.softmax: (-1x17x-1x4xf32) <- (-1x17x-1x4xf32) + softmax_0 = paddle._C_ops.softmax(transpose_0, 1) + del transpose_0 + + # pd_op.conv2d: (-1x1x-1x4xf32) <- (-1x17x-1x4xf32, 1x17x1x1xf32) + conv2d_82 = paddle._C_ops.conv2d( + softmax_0, parameter_36, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del softmax_0 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_9 = [1] + + # pd_op.squeeze: (-1x-1x4xf32) <- (-1x1x-1x4xf32, 1xi64) + squeeze_0 = paddle._C_ops.squeeze(conv2d_82, full_int_array_9) + del conv2d_82 + + # pd_op.sigmoid: (2x1x-1x-1xf32) <- (2x1x-1x-1xf32) + sigmoid_2 = paddle._C_ops.sigmoid(add_23) + del add_23 + + # pd_op.full: (xi64) <- () + full_4 = paddle._C_ops.full( + [], float("1"), paddle.int64, paddle.core.CPUPlace() + ) + + # builtin.combine: ([xi64, xi64, xi64]) <- (xi64, xi64, xi64) + combine_15 = [full_1, full_4, multiply_4] + del multiply_4 + + # pd_op.stack: (3xi64) <- ([xi64, xi64, xi64]) + stack_1 = paddle._C_ops.stack(combine_15, 0) + del combine_15 + + # pd_op.reshape: (-1x1x-1xf32) <- (2x1x-1x-1xf32, 3xi64) + reshape_9 = paddle._C_ops.reshape(sigmoid_2, stack_1) + del sigmoid_2, stack_1 + + # pd_op.shape64: (4xi64) <- (2x192x-1x-1xf32) + shape64_2 = paddle._C_ops.shape64(swish_54) + + # pd_op.slice: (xi64) <- (4xi64, 1xi64, 1xi64) + slice_2 = paddle._C_ops.slice( + shape64_2, [0], full_int_array_5, full_int_array_6, [1], [0] + ) + del shape64_2 + + # pd_op.shape64: (4xi64) <- (2x192x-1x-1xf32) + shape64_3 = paddle._C_ops.shape64(swish_54) + + # pd_op.slice: (xi64) <- (4xi64, 1xi64, 1xi64) + slice_3 = paddle._C_ops.slice( + shape64_3, [0], full_int_array_6, full_int_array_7, [1], [0] + ) + del shape64_3 + + # pd_op.multiply: (xi64) <- (xi64, xi64) + multiply_7 = paddle._C_ops.multiply(slice_2, slice_3) + del slice_2, slice_3 + + # pd_op.pool2d: (2x192x1x1xf32) <- (2x192x-1x-1xf32, 2xi64) + pool2d_4 = paddle._C_ops.pool2d( + swish_54, + full_int_array_8, + [1, 1], + [0, 0], + False, + True, + "NCHW", + "avg", + False, + True, + "EXPLICIT", + ) + + # pd_op.conv2d: (2x192x1x1xf32) <- (2x192x1x1xf32, 192x192x1x1xf32) + conv2d_83 = paddle._C_ops.conv2d( + pool2d_4, parameter_35, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_35 + + # pd_op.reshape: (1x192x1x1xf32) <- (192xf32, 4xi64) + reshape_10 = paddle._C_ops.reshape(parameter_34, full_int_array_1) + del parameter_34 + + # pd_op.add: (2x192x1x1xf32) <- (2x192x1x1xf32, 1x192x1x1xf32) + add_26 = paddle._C_ops.add(conv2d_83, reshape_10) + del conv2d_83, reshape_10 + + # pd_op.sigmoid: (2x192x1x1xf32) <- (2x192x1x1xf32) + sigmoid_3 = paddle._C_ops.sigmoid(add_26) + del add_26 + + # pd_op.multiply: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 2x192x1x1xf32) + multiply_8 = paddle._C_ops.multiply(swish_54, sigmoid_3) + del sigmoid_3 + + # pd_op.conv2d: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 192x192x1x1xf32) + conv2d_84 = paddle._C_ops.conv2d( + multiply_8, parameter_33, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del multiply_8, parameter_33 + + # pd_op.batch_norm_: (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__444, + batch_norm__445, + batch_norm__446, + batch_norm__447, + batch_norm__448, + batch_norm__449, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_84, + parameter_32, + parameter_31, + parameter_30, + parameter_29, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_84, parameter_29, parameter_30, parameter_31, parameter_32 + + # pd_op.swish: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32) + swish_63 = paddle._C_ops.swish(batch_norm__444) + del batch_norm__444 + + # pd_op.add: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 2x192x-1x-1xf32) + add_27 = paddle._C_ops.add(swish_63, swish_54) + del swish_63 + + # pd_op.conv2d: (2x1x-1x-1xf32) <- (2x192x-1x-1xf32, 1x192x3x3xf32) + conv2d_85 = paddle._C_ops.conv2d( + add_27, parameter_28, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del add_27, parameter_28 + + # pd_op.reshape: (1x1x1x1xf32) <- (1xf32, 4xi64) + reshape_11 = paddle._C_ops.reshape(parameter_27, full_int_array_1) + del parameter_27 + + # pd_op.add: (2x1x-1x-1xf32) <- (2x1x-1x-1xf32, 1x1x1x1xf32) + add_28 = paddle._C_ops.add(conv2d_85, reshape_11) + del conv2d_85, reshape_11 + + # pd_op.conv2d: (2x192x1x1xf32) <- (2x192x1x1xf32, 192x192x1x1xf32) + conv2d_86 = paddle._C_ops.conv2d( + pool2d_4, parameter_26, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_26, pool2d_4 + + # pd_op.reshape: (1x192x1x1xf32) <- (192xf32, 4xi64) + reshape_12 = paddle._C_ops.reshape(parameter_25, full_int_array_1) + del parameter_25 + + # pd_op.add: (2x192x1x1xf32) <- (2x192x1x1xf32, 1x192x1x1xf32) + add_29 = paddle._C_ops.add(conv2d_86, reshape_12) + del conv2d_86, reshape_12 + + # pd_op.sigmoid: (2x192x1x1xf32) <- (2x192x1x1xf32) + sigmoid_4 = paddle._C_ops.sigmoid(add_29) + del add_29 + + # pd_op.multiply: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 2x192x1x1xf32) + multiply_9 = paddle._C_ops.multiply(swish_54, sigmoid_4) + del sigmoid_4, swish_54 + + # pd_op.conv2d: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 192x192x1x1xf32) + conv2d_87 = paddle._C_ops.conv2d( + multiply_9, parameter_24, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del multiply_9, parameter_24 + + # pd_op.batch_norm_: (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__450, + batch_norm__451, + batch_norm__452, + batch_norm__453, + batch_norm__454, + batch_norm__455, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_87, + parameter_23, + parameter_22, + parameter_21, + parameter_20, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_87, parameter_20, parameter_21, parameter_22, parameter_23 + + # pd_op.swish: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32) + swish_64 = paddle._C_ops.swish(batch_norm__450) + del batch_norm__450 + + # pd_op.conv2d: (2x68x-1x-1xf32) <- (2x192x-1x-1xf32, 68x192x3x3xf32) + conv2d_88 = paddle._C_ops.conv2d( + swish_64, parameter_19, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_19, swish_64 + + # pd_op.reshape: (1x68x1x1xf32) <- (68xf32, 4xi64) + reshape_13 = paddle._C_ops.reshape(parameter_18, full_int_array_1) + del parameter_18 + + # pd_op.add: (2x68x-1x-1xf32) <- (2x68x-1x-1xf32, 1x68x1x1xf32) + add_30 = paddle._C_ops.add(conv2d_88, reshape_13) + del conv2d_88, reshape_13 + + # builtin.combine: ([xi64, xi64, xi64, xi64]) <- (xi64, xi64, xi64, xi64) + combine_16 = [full_1, full_2, full_3, multiply_7] + + # pd_op.stack: (4xi64) <- ([xi64, xi64, xi64, xi64]) + stack_2 = paddle._C_ops.stack(combine_16, 0) + del combine_16 + + # pd_op.reshape: (-1x4x17x-1xf32) <- (2x68x-1x-1xf32, 4xi64) + reshape_14 = paddle._C_ops.reshape(add_30, stack_2) + del add_30, stack_2 + + # pd_op.transpose: (-1x17x-1x4xf32) <- (-1x4x17x-1xf32) + transpose_1 = paddle._C_ops.transpose(reshape_14, [0, 2, 3, 1]) + del reshape_14 + + # pd_op.softmax: (-1x17x-1x4xf32) <- (-1x17x-1x4xf32) + softmax_1 = paddle._C_ops.softmax(transpose_1, 1) + del transpose_1 + + # pd_op.conv2d: (-1x1x-1x4xf32) <- (-1x17x-1x4xf32, 1x17x1x1xf32) + conv2d_89 = paddle._C_ops.conv2d( + softmax_1, parameter_36, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del softmax_1 + + # pd_op.squeeze: (-1x-1x4xf32) <- (-1x1x-1x4xf32, 1xi64) + squeeze_1 = paddle._C_ops.squeeze(conv2d_89, full_int_array_9) + del conv2d_89 + + # pd_op.sigmoid: (2x1x-1x-1xf32) <- (2x1x-1x-1xf32) + sigmoid_5 = paddle._C_ops.sigmoid(add_28) + del add_28 + + # builtin.combine: ([xi64, xi64, xi64]) <- (xi64, xi64, xi64) + combine_17 = [full_1, full_4, multiply_7] + del multiply_7 + + # pd_op.stack: (3xi64) <- ([xi64, xi64, xi64]) + stack_3 = paddle._C_ops.stack(combine_17, 0) + del combine_17 + + # pd_op.reshape: (-1x1x-1xf32) <- (2x1x-1x-1xf32, 3xi64) + reshape_15 = paddle._C_ops.reshape(sigmoid_5, stack_3) + del sigmoid_5, stack_3 + + # pd_op.shape64: (4xi64) <- (2x96x-1x-1xf32) + shape64_4 = paddle._C_ops.shape64(swish_48) + + # pd_op.slice: (xi64) <- (4xi64, 1xi64, 1xi64) + slice_4 = paddle._C_ops.slice( + shape64_4, [0], full_int_array_5, full_int_array_6, [1], [0] + ) + del full_int_array_5, shape64_4 + + # pd_op.shape64: (4xi64) <- (2x96x-1x-1xf32) + shape64_5 = paddle._C_ops.shape64(swish_48) + + # pd_op.slice: (xi64) <- (4xi64, 1xi64, 1xi64) + slice_5 = paddle._C_ops.slice( + shape64_5, [0], full_int_array_6, full_int_array_7, [1], [0] + ) + del full_int_array_6, full_int_array_7, shape64_5 + + # pd_op.multiply: (xi64) <- (xi64, xi64) + multiply_10 = paddle._C_ops.multiply(slice_4, slice_5) + del slice_4, slice_5 + + # pd_op.pool2d: (2x96x1x1xf32) <- (2x96x-1x-1xf32, 2xi64) + pool2d_5 = paddle._C_ops.pool2d( + swish_48, + full_int_array_8, + [1, 1], + [0, 0], + False, + True, + "NCHW", + "avg", + False, + True, + "EXPLICIT", + ) + del full_int_array_8 + + # pd_op.conv2d: (2x96x1x1xf32) <- (2x96x1x1xf32, 96x96x1x1xf32) + conv2d_90 = paddle._C_ops.conv2d( + pool2d_5, parameter_17, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_17 + + # pd_op.reshape: (1x96x1x1xf32) <- (96xf32, 4xi64) + reshape_16 = paddle._C_ops.reshape(parameter_16, full_int_array_1) + del parameter_16 + + # pd_op.add: (2x96x1x1xf32) <- (2x96x1x1xf32, 1x96x1x1xf32) + add_31 = paddle._C_ops.add(conv2d_90, reshape_16) + del conv2d_90, reshape_16 + + # pd_op.sigmoid: (2x96x1x1xf32) <- (2x96x1x1xf32) + sigmoid_6 = paddle._C_ops.sigmoid(add_31) + del add_31 + + # pd_op.multiply: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, 2x96x1x1xf32) + multiply_11 = paddle._C_ops.multiply(swish_48, sigmoid_6) + del sigmoid_6 + + # pd_op.conv2d: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, 96x96x1x1xf32) + conv2d_91 = paddle._C_ops.conv2d( + multiply_11, parameter_15, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del multiply_11, parameter_15 + + # pd_op.batch_norm_: (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__456, + batch_norm__457, + batch_norm__458, + batch_norm__459, + batch_norm__460, + batch_norm__461, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_91, + parameter_14, + parameter_13, + parameter_12, + parameter_11, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_91, parameter_11, parameter_12, parameter_13, parameter_14 + + # pd_op.swish: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32) + swish_65 = paddle._C_ops.swish(batch_norm__456) + del batch_norm__456 + + # pd_op.add: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, 2x96x-1x-1xf32) + add_32 = paddle._C_ops.add(swish_65, swish_48) + del swish_65 + + # pd_op.conv2d: (2x1x-1x-1xf32) <- (2x96x-1x-1xf32, 1x96x3x3xf32) + conv2d_92 = paddle._C_ops.conv2d( + add_32, parameter_10, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del add_32, parameter_10 + + # pd_op.reshape: (1x1x1x1xf32) <- (1xf32, 4xi64) + reshape_17 = paddle._C_ops.reshape(parameter_9, full_int_array_1) + del parameter_9 + + # pd_op.add: (2x1x-1x-1xf32) <- (2x1x-1x-1xf32, 1x1x1x1xf32) + add_33 = paddle._C_ops.add(conv2d_92, reshape_17) + del conv2d_92, reshape_17 + + # pd_op.conv2d: (2x96x1x1xf32) <- (2x96x1x1xf32, 96x96x1x1xf32) + conv2d_93 = paddle._C_ops.conv2d( + pool2d_5, parameter_8, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_8, pool2d_5 + + # pd_op.reshape: (1x96x1x1xf32) <- (96xf32, 4xi64) + reshape_18 = paddle._C_ops.reshape(parameter_7, full_int_array_1) + del parameter_7 + + # pd_op.add: (2x96x1x1xf32) <- (2x96x1x1xf32, 1x96x1x1xf32) + add_34 = paddle._C_ops.add(conv2d_93, reshape_18) + del conv2d_93, reshape_18 + + # pd_op.sigmoid: (2x96x1x1xf32) <- (2x96x1x1xf32) + sigmoid_7 = paddle._C_ops.sigmoid(add_34) + del add_34 + + # pd_op.multiply: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, 2x96x1x1xf32) + multiply_12 = paddle._C_ops.multiply(swish_48, sigmoid_7) + del sigmoid_7, swish_48 + + # pd_op.conv2d: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, 96x96x1x1xf32) + conv2d_94 = paddle._C_ops.conv2d( + multiply_12, parameter_6, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del multiply_12, parameter_6 + + # pd_op.batch_norm_: (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__462, + batch_norm__463, + batch_norm__464, + batch_norm__465, + batch_norm__466, + batch_norm__467, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_94, + parameter_5, + parameter_4, + parameter_3, + parameter_2, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_94, parameter_2, parameter_3, parameter_4, parameter_5 + + # pd_op.swish: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32) + swish_66 = paddle._C_ops.swish(batch_norm__462) + del batch_norm__462 + + # pd_op.conv2d: (2x68x-1x-1xf32) <- (2x96x-1x-1xf32, 68x96x3x3xf32) + conv2d_95 = paddle._C_ops.conv2d( + swish_66, parameter_1, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_1, swish_66 + + # pd_op.reshape: (1x68x1x1xf32) <- (68xf32, 4xi64) + reshape_19 = paddle._C_ops.reshape(parameter_0, full_int_array_1) + del full_int_array_1, parameter_0 + + # pd_op.add: (2x68x-1x-1xf32) <- (2x68x-1x-1xf32, 1x68x1x1xf32) + add_35 = paddle._C_ops.add(conv2d_95, reshape_19) + del conv2d_95, reshape_19 + + # builtin.combine: ([xi64, xi64, xi64, xi64]) <- (xi64, xi64, xi64, xi64) + combine_18 = [full_1, full_2, full_3, multiply_10] + del full_2, full_3 + + # pd_op.stack: (4xi64) <- ([xi64, xi64, xi64, xi64]) + stack_4 = paddle._C_ops.stack(combine_18, 0) + del combine_18 + + # pd_op.reshape: (-1x4x17x-1xf32) <- (2x68x-1x-1xf32, 4xi64) + reshape_20 = paddle._C_ops.reshape(add_35, stack_4) + del add_35, stack_4 + + # pd_op.transpose: (-1x17x-1x4xf32) <- (-1x4x17x-1xf32) + transpose_2 = paddle._C_ops.transpose(reshape_20, [0, 2, 3, 1]) + del reshape_20 + + # pd_op.softmax: (-1x17x-1x4xf32) <- (-1x17x-1x4xf32) + softmax_2 = paddle._C_ops.softmax(transpose_2, 1) + del transpose_2 + + # pd_op.conv2d: (-1x1x-1x4xf32) <- (-1x17x-1x4xf32, 1x17x1x1xf32) + conv2d_96 = paddle._C_ops.conv2d( + softmax_2, parameter_36, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_36, softmax_2 + + # pd_op.squeeze: (-1x-1x4xf32) <- (-1x1x-1x4xf32, 1xi64) + squeeze_2 = paddle._C_ops.squeeze(conv2d_96, full_int_array_9) + del conv2d_96, full_int_array_9 + + # pd_op.sigmoid: (2x1x-1x-1xf32) <- (2x1x-1x-1xf32) + sigmoid_8 = paddle._C_ops.sigmoid(add_33) + del add_33 + + # builtin.combine: ([xi64, xi64, xi64]) <- (xi64, xi64, xi64) + combine_19 = [full_1, full_4, multiply_10] + del full_1, full_4, multiply_10 + + # pd_op.stack: (3xi64) <- ([xi64, xi64, xi64]) + stack_5 = paddle._C_ops.stack(combine_19, 0) + del combine_19 + + # pd_op.reshape: (-1x1x-1xf32) <- (2x1x-1x-1xf32, 3xi64) + reshape_21 = paddle._C_ops.reshape(sigmoid_8, stack_5) + del sigmoid_8, stack_5 + + # pd_op.full: (1xi32) <- () + full_5 = paddle._C_ops.full( + [1], float("-1"), paddle.int32, paddle.core.CPUPlace() + ) + + # builtin.combine: ([-1x1x-1xf32, -1x1x-1xf32, -1x1x-1xf32]) <- (-1x1x-1xf32, -1x1x-1xf32, -1x1x-1xf32) + combine_20 = [reshape_9, reshape_15, reshape_21] + del reshape_15, reshape_21, reshape_9 + + # pd_op.concat: (-1x1x-1xf32) <- ([-1x1x-1xf32, -1x1x-1xf32, -1x1x-1xf32], 1xi32) + concat_0 = paddle._C_ops.concat(combine_20, full_5) + del combine_20, full_5 + + # builtin.combine: ([-1x-1x4xf32, -1x-1x4xf32, -1x-1x4xf32]) <- (-1x-1x4xf32, -1x-1x4xf32, -1x-1x4xf32) + combine_21 = [squeeze_0, squeeze_1, squeeze_2] + del squeeze_0, squeeze_1, squeeze_2 + + # pd_op.concat: (-1x-1x4xf32) <- ([-1x-1x4xf32, -1x-1x4xf32, -1x-1x4xf32], 1xi32) + concat_1 = paddle._C_ops.concat(combine_21, full_0) + del combine_21, full_0 + + return concat_0, concat_1 diff --git a/paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_7/weight_meta.py b/paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_7/weight_meta.py new file mode 100644 index 000000000..a4ea9ef0c --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_7/weight_meta.py @@ -0,0 +1,4457 @@ +class Program_weight_tensor_parameter_0: + name = "parameter_0" + shape = [68] + dtype = "float32" + min_val = float("-0.0163307") + max_val = float("0.0275929") + mean = float("1.46567e-07") + std = float("0.00765504") + data = None + + +class Program_weight_tensor_parameter_1: + name = "parameter_1" + shape = [68, 96, 3, 3] + dtype = "float32" + min_val = float("-0.192808") + max_val = float("0.205093") + mean = float("4.08909e-08") + std = float("0.0115702") + data = None + + +class Program_weight_tensor_parameter_2: + name = "parameter_2" + shape = [96] + dtype = "float32" + min_val = float("-0.149252") + max_val = float("0.349001") + mean = float("0.0835852") + std = float("0.116184") + data = None + + +class Program_weight_tensor_parameter_3: + name = "parameter_3" + shape = [96] + dtype = "float32" + min_val = float("0.919771") + max_val = float("2.01389") + mean = float("1.39702") + std = float("0.216297") + data = None + + +class Program_weight_tensor_parameter_4: + name = "parameter_4" + shape = [96] + dtype = "float32" + min_val = float("0.000313847") + max_val = float("0.00421753") + mean = float("0.00123865") + std = float("0.000700263") + data = None + + +class Program_weight_tensor_parameter_5: + name = "parameter_5" + shape = [96] + dtype = "float32" + min_val = float("-0.0793845") + max_val = float("0.044821") + mean = float("-0.00684044") + std = float("0.0212271") + data = None + + +class Program_weight_tensor_parameter_6: + name = "parameter_6" + shape = [96, 96, 1, 1] + dtype = "float32" + min_val = float("-0.0969632") + max_val = float("0.109979") + mean = float("-0.000823613") + std = float("0.0139602") + data = None + + +class Program_weight_tensor_parameter_7: + name = "parameter_7" + shape = [96] + dtype = "float32" + min_val = float("-0.0123995") + max_val = float("0.0116914") + mean = float("-0.000358165") + std = float("0.00541008") + data = None + + +class Program_weight_tensor_parameter_8: + name = "parameter_8" + shape = [96, 96, 1, 1] + dtype = "float32" + min_val = float("-0.0236713") + max_val = float("0.0246478") + mean = float("-0.000113513") + std = float("0.00351232") + data = None + + +class Program_weight_tensor_parameter_9: + name = "parameter_9" + shape = [1] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_10: + name = "parameter_10" + shape = [1, 96, 3, 3] + dtype = "float32" + min_val = float("-0.0569998") + max_val = float("0.0321533") + mean = float("-0.00116458") + std = float("0.0120465") + data = None + + +class Program_weight_tensor_parameter_11: + name = "parameter_11" + shape = [96] + dtype = "float32" + min_val = float("-0.661843") + max_val = float("1.11699") + mean = float("0.20777") + std = float("0.335629") + data = None + + +class Program_weight_tensor_parameter_12: + name = "parameter_12" + shape = [96] + dtype = "float32" + min_val = float("0.77338") + max_val = float("1.56279") + mean = float("1.11158") + std = float("0.13896") + data = None + + +class Program_weight_tensor_parameter_13: + name = "parameter_13" + shape = [96] + dtype = "float32" + min_val = float("0.000334776") + max_val = float("0.00910882") + mean = float("0.00197008") + std = float("0.00133187") + data = None + + +class Program_weight_tensor_parameter_14: + name = "parameter_14" + shape = [96] + dtype = "float32" + min_val = float("-0.207032") + max_val = float("0.0863594") + mean = float("-0.0240909") + std = float("0.0496559") + data = None + + +class Program_weight_tensor_parameter_15: + name = "parameter_15" + shape = [96, 96, 1, 1] + dtype = "float32" + min_val = float("-0.114352") + max_val = float("0.0839186") + mean = float("-0.00100657") + std = float("0.0143567") + data = None + + +class Program_weight_tensor_parameter_16: + name = "parameter_16" + shape = [96] + dtype = "float32" + min_val = float("-0.0063111") + max_val = float("0.00917824") + mean = float("-0.000700085") + std = float("0.00313538") + data = None + + +class Program_weight_tensor_parameter_17: + name = "parameter_17" + shape = [96, 96, 1, 1] + dtype = "float32" + min_val = float("-0.0692012") + max_val = float("0.0984743") + mean = float("-0.000397446") + std = float("0.00426427") + data = None + + +class Program_weight_tensor_parameter_18: + name = "parameter_18" + shape = [68] + dtype = "float32" + min_val = float("-0.00655906") + max_val = float("0.0249212") + mean = float("1.5297e-07") + std = float("0.0063484") + data = None + + +class Program_weight_tensor_parameter_19: + name = "parameter_19" + shape = [68, 192, 3, 3] + dtype = "float32" + min_val = float("-0.156725") + max_val = float("0.1784") + mean = float("-1.00699e-08") + std = float("0.00808214") + data = None + + +class Program_weight_tensor_parameter_20: + name = "parameter_20" + shape = [192] + dtype = "float32" + min_val = float("-0.112356") + max_val = float("0.136771") + mean = float("0.050338") + std = float("0.0428929") + data = None + + +class Program_weight_tensor_parameter_21: + name = "parameter_21" + shape = [192] + dtype = "float32" + min_val = float("0.941564") + max_val = float("1.48964") + mean = float("1.20947") + std = float("0.101263") + data = None + + +class Program_weight_tensor_parameter_22: + name = "parameter_22" + shape = [192] + dtype = "float32" + min_val = float("0.000245904") + max_val = float("0.00500193") + mean = float("0.00117342") + std = float("0.000804413") + data = None + + +class Program_weight_tensor_parameter_23: + name = "parameter_23" + shape = [192] + dtype = "float32" + min_val = float("-0.0324674") + max_val = float("0.0216373") + mean = float("-0.00597953") + std = float("0.00917446") + data = None + + +class Program_weight_tensor_parameter_24: + name = "parameter_24" + shape = [192, 192, 1, 1] + dtype = "float32" + min_val = float("-0.0624337") + max_val = float("0.101627") + mean = float("-0.000272172") + std = float("0.00689876") + data = None + + +class Program_weight_tensor_parameter_25: + name = "parameter_25" + shape = [192] + dtype = "float32" + min_val = float("-0.0102569") + max_val = float("0.0101425") + mean = float("-0.000115904") + std = float("0.00398212") + data = None + + +class Program_weight_tensor_parameter_26: + name = "parameter_26" + shape = [192, 192, 1, 1] + dtype = "float32" + min_val = float("-0.00896481") + max_val = float("0.0198979") + mean = float("-0.000134851") + std = float("0.00155236") + data = None + + +class Program_weight_tensor_parameter_27: + name = "parameter_27" + shape = [1] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_28: + name = "parameter_28" + shape = [1, 192, 3, 3] + dtype = "float32" + min_val = float("-0.0656306") + max_val = float("0.0315766") + mean = float("0.000603919") + std = float("0.00731087") + data = None + + +class Program_weight_tensor_parameter_29: + name = "parameter_29" + shape = [192] + dtype = "float32" + min_val = float("-0.290617") + max_val = float("0.608093") + mean = float("0.147486") + std = float("0.158939") + data = None + + +class Program_weight_tensor_parameter_30: + name = "parameter_30" + shape = [192] + dtype = "float32" + min_val = float("0.913305") + max_val = float("1.49614") + mean = float("1.08725") + std = float("0.0815601") + data = None + + +class Program_weight_tensor_parameter_31: + name = "parameter_31" + shape = [192] + dtype = "float32" + min_val = float("0.000323452") + max_val = float("0.0106743") + mean = float("0.00209889") + std = float("0.0018428") + data = None + + +class Program_weight_tensor_parameter_32: + name = "parameter_32" + shape = [192] + dtype = "float32" + min_val = float("-0.160357") + max_val = float("0.0295584") + mean = float("-0.0355943") + std = float("0.0295474") + data = None + + +class Program_weight_tensor_parameter_33: + name = "parameter_33" + shape = [192, 192, 1, 1] + dtype = "float32" + min_val = float("-0.0778715") + max_val = float("0.0605191") + mean = float("-0.0010786") + std = float("0.00718304") + data = None + + +class Program_weight_tensor_parameter_34: + name = "parameter_34" + shape = [192] + dtype = "float32" + min_val = float("-0.00529322") + max_val = float("0.0123779") + mean = float("-0.000186711") + std = float("0.00207214") + data = None + + +class Program_weight_tensor_parameter_35: + name = "parameter_35" + shape = [192, 192, 1, 1] + dtype = "float32" + min_val = float("-0.0221781") + max_val = float("0.028757") + mean = float("-0.000192755") + std = float("0.00153223") + data = None + + +class Program_weight_tensor_parameter_36: + name = "parameter_36" + shape = [1, 17, 1, 1] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_37: + name = "parameter_37" + shape = [68] + dtype = "float32" + min_val = float("-0.006167") + max_val = float("0.013496") + mean = float("1.54832e-07") + std = float("0.0052848") + data = None + + +class Program_weight_tensor_parameter_38: + name = "parameter_38" + shape = [68, 384, 3, 3] + dtype = "float32" + min_val = float("-0.0895466") + max_val = float("0.114612") + mean = float("1.97178e-08") + std = float("0.00563151") + data = None + + +class Program_weight_tensor_parameter_39: + name = "parameter_39" + shape = [384] + dtype = "float32" + min_val = float("-0.0751319") + max_val = float("0.111518") + mean = float("0.011909") + std = float("0.0354143") + data = None + + +class Program_weight_tensor_parameter_40: + name = "parameter_40" + shape = [384] + dtype = "float32" + min_val = float("0.969588") + max_val = float("1.49393") + mean = float("1.16953") + std = float("0.077571") + data = None + + +class Program_weight_tensor_parameter_41: + name = "parameter_41" + shape = [384] + dtype = "float32" + min_val = float("0.000143777") + max_val = float("0.00429499") + mean = float("0.000762601") + std = float("0.000518202") + data = None + + +class Program_weight_tensor_parameter_42: + name = "parameter_42" + shape = [384] + dtype = "float32" + min_val = float("-0.0384409") + max_val = float("0.0141075") + mean = float("-0.0039853") + std = float("0.00647297") + data = None + + +class Program_weight_tensor_parameter_43: + name = "parameter_43" + shape = [384, 384, 1, 1] + dtype = "float32" + min_val = float("-0.0669083") + max_val = float("0.0686928") + mean = float("-0.000153253") + std = float("0.00379707") + data = None + + +class Program_weight_tensor_parameter_44: + name = "parameter_44" + shape = [384] + dtype = "float32" + min_val = float("-0.00515159") + max_val = float("0.00627215") + mean = float("-6.45075e-05") + std = float("0.0027863") + data = None + + +class Program_weight_tensor_parameter_45: + name = "parameter_45" + shape = [384, 384, 1, 1] + dtype = "float32" + min_val = float("-0.030346") + max_val = float("0.0123038") + mean = float("-5.00784e-05") + std = float("0.000944853") + data = None + + +class Program_weight_tensor_parameter_46: + name = "parameter_46" + shape = [1] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_47: + name = "parameter_47" + shape = [1, 384, 3, 3] + dtype = "float32" + min_val = float("-0.0461587") + max_val = float("0.0285616") + mean = float("0.000467543") + std = float("0.00455667") + data = None + + +class Program_weight_tensor_parameter_48: + name = "parameter_48" + shape = [384] + dtype = "float32" + min_val = float("-0.369161") + max_val = float("0.494787") + mean = float("0.0350315") + std = float("0.121512") + data = None + + +class Program_weight_tensor_parameter_49: + name = "parameter_49" + shape = [384] + dtype = "float32" + min_val = float("0.883133") + max_val = float("1.55414") + mean = float("1.058") + std = float("0.0835096") + data = None + + +class Program_weight_tensor_parameter_50: + name = "parameter_50" + shape = [384] + dtype = "float32" + min_val = float("0.000226873") + max_val = float("0.00986458") + mean = float("0.00135755") + std = float("0.00135202") + data = None + + +class Program_weight_tensor_parameter_51: + name = "parameter_51" + shape = [384] + dtype = "float32" + min_val = float("-0.131605") + max_val = float("0.0328457") + mean = float("-0.029394") + std = float("0.0238602") + data = None + + +class Program_weight_tensor_parameter_52: + name = "parameter_52" + shape = [384, 384, 1, 1] + dtype = "float32" + min_val = float("-0.0495981") + max_val = float("0.0516572") + mean = float("-0.000565") + std = float("0.00411614") + data = None + + +class Program_weight_tensor_parameter_53: + name = "parameter_53" + shape = [384] + dtype = "float32" + min_val = float("-0.0143905") + max_val = float("0.0110406") + mean = float("-0.000147783") + std = float("0.00165664") + data = None + + +class Program_weight_tensor_parameter_54: + name = "parameter_54" + shape = [384, 384, 1, 1] + dtype = "float32" + min_val = float("-0.109273") + max_val = float("0.0569415") + mean = float("-4.33282e-05") + std = float("0.00113346") + data = None + + +class Program_weight_tensor_parameter_55: + name = "parameter_55" + shape = [384] + dtype = "float32" + min_val = float("-0.652412") + max_val = float("1.18868") + mean = float("0.028027") + std = float("0.23864") + data = None + + +class Program_weight_tensor_parameter_56: + name = "parameter_56" + shape = [384] + dtype = "float32" + min_val = float("0.840927") + max_val = float("1.38172") + mean = float("0.983279") + std = float("0.0682552") + data = None + + +class Program_weight_tensor_parameter_57: + name = "parameter_57" + shape = [384] + dtype = "float32" + min_val = float("0.00493241") + max_val = float("0.13321") + mean = float("0.0165815") + std = float("0.0118007") + data = None + + +class Program_weight_tensor_parameter_58: + name = "parameter_58" + shape = [384] + dtype = "float32" + min_val = float("-0.218846") + max_val = float("0.0775906") + mean = float("-0.0337555") + std = float("0.0420991") + data = None + + +class Program_weight_tensor_parameter_59: + name = "parameter_59" + shape = [384, 384, 1, 1] + dtype = "float32" + min_val = float("-0.100981") + max_val = float("0.0660343") + mean = float("-0.0003663") + std = float("0.00635283") + data = None + + +class Program_weight_tensor_parameter_60: + name = "parameter_60" + shape = [192] + dtype = "float32" + min_val = float("-0.444364") + max_val = float("0.100385") + mean = float("-0.0846002") + std = float("0.104443") + data = None + + +class Program_weight_tensor_parameter_61: + name = "parameter_61" + shape = [192] + dtype = "float32" + min_val = float("0.827627") + max_val = float("1.20934") + mean = float("0.926424") + std = float("0.046184") + data = None + + +class Program_weight_tensor_parameter_62: + name = "parameter_62" + shape = [192] + dtype = "float32" + min_val = float("0.00251987") + max_val = float("0.0159434") + mean = float("0.00757274") + std = float("0.00257049") + data = None + + +class Program_weight_tensor_parameter_63: + name = "parameter_63" + shape = [192] + dtype = "float32" + min_val = float("-0.048923") + max_val = float("0.0568989") + mean = float("-0.00235178") + std = float("0.0277083") + data = None + + +class Program_weight_tensor_parameter_64: + name = "parameter_64" + shape = [192, 192, 1, 1] + dtype = "float32" + min_val = float("-0.0501355") + max_val = float("0.0597728") + mean = float("-0.00031338") + std = float("0.00409301") + data = None + + +class Program_weight_tensor_parameter_65: + name = "parameter_65" + shape = [192] + dtype = "float32" + min_val = float("-0.444364") + max_val = float("0.100385") + mean = float("-0.0846002") + std = float("0.104443") + data = None + + +class Program_weight_tensor_parameter_66: + name = "parameter_66" + shape = [192] + dtype = "float32" + min_val = float("0.862156") + max_val = float("1.42169") + mean = float("1.1121") + std = float("0.0818691") + data = None + + +class Program_weight_tensor_parameter_67: + name = "parameter_67" + shape = [192] + dtype = "float32" + min_val = float("0.00710722") + max_val = float("0.118693") + mean = float("0.023709") + std = float("0.0122795") + data = None + + +class Program_weight_tensor_parameter_68: + name = "parameter_68" + shape = [192] + dtype = "float32" + min_val = float("-0.110066") + max_val = float("0.0773703") + mean = float("-0.0202408") + std = float("0.0351718") + data = None + + +class Program_weight_tensor_parameter_69: + name = "parameter_69" + shape = [192, 192, 3, 3] + dtype = "float32" + min_val = float("-0.0715017") + max_val = float("0.0795768") + mean = float("-0.000136613") + std = float("0.00376362") + data = None + + +class Program_weight_tensor_parameter_70: + name = "parameter_70" + shape = [192] + dtype = "float32" + min_val = float("-0.519405") + max_val = float("0.119361") + mean = float("-0.173688") + std = float("0.128102") + data = None + + +class Program_weight_tensor_parameter_71: + name = "parameter_71" + shape = [192] + dtype = "float32" + min_val = float("0.843348") + max_val = float("1.65154") + mean = float("1.06427") + std = float("0.100943") + data = None + + +class Program_weight_tensor_parameter_72: + name = "parameter_72" + shape = [192] + dtype = "float32" + min_val = float("0.0154812") + max_val = float("0.101307") + mean = float("0.0367064") + std = float("0.0149866") + data = None + + +class Program_weight_tensor_parameter_73: + name = "parameter_73" + shape = [192] + dtype = "float32" + min_val = float("-0.200798") + max_val = float("0.113029") + mean = float("-0.0714866") + std = float("0.0546003") + data = None + + +class Program_weight_tensor_parameter_74: + name = "parameter_74" + shape = [192, 192, 3, 3] + dtype = "float32" + min_val = float("-0.0700468") + max_val = float("0.0844069") + mean = float("-0.00029784") + std = float("0.00411446") + data = None + + +class Program_weight_tensor_parameter_75: + name = "parameter_75" + shape = [192] + dtype = "float32" + min_val = float("-0.455534") + max_val = float("0.186854") + mean = float("-0.0819333") + std = float("0.10193") + data = None + + +class Program_weight_tensor_parameter_76: + name = "parameter_76" + shape = [192] + dtype = "float32" + min_val = float("0.842289") + max_val = float("1.25465") + mean = float("1.02705") + std = float("0.0670224") + data = None + + +class Program_weight_tensor_parameter_77: + name = "parameter_77" + shape = [192] + dtype = "float32" + min_val = float("0.0058961") + max_val = float("0.0528038") + mean = float("0.0140426") + std = float("0.00642803") + data = None + + +class Program_weight_tensor_parameter_78: + name = "parameter_78" + shape = [192] + dtype = "float32" + min_val = float("-0.109344") + max_val = float("0.0802359") + mean = float("-0.0201977") + std = float("0.0321837") + data = None + + +class Program_weight_tensor_parameter_79: + name = "parameter_79" + shape = [192, 576, 1, 1] + dtype = "float32" + min_val = float("-0.102845") + max_val = float("0.102872") + mean = float("-0.000202321") + std = float("0.00584292") + data = None + + +class Program_weight_tensor_parameter_80: + name = "parameter_80" + shape = [192] + dtype = "float32" + min_val = float("-0.217732") + max_val = float("0.0349152") + mean = float("-0.0691269") + std = float("0.038583") + data = None + + +class Program_weight_tensor_parameter_81: + name = "parameter_81" + shape = [192] + dtype = "float32" + min_val = float("0.843844") + max_val = float("1.15231") + mean = float("1.01558") + std = float("0.0503168") + data = None + + +class Program_weight_tensor_parameter_82: + name = "parameter_82" + shape = [192] + dtype = "float32" + min_val = float("0.00399343") + max_val = float("0.0571346") + mean = float("0.0106075") + std = float("0.00583007") + data = None + + +class Program_weight_tensor_parameter_83: + name = "parameter_83" + shape = [192] + dtype = "float32" + min_val = float("-0.10334") + max_val = float("0.101338") + mean = float("-0.0275971") + std = float("0.029018") + data = None + + +class Program_weight_tensor_parameter_84: + name = "parameter_84" + shape = [192, 576, 1, 1] + dtype = "float32" + min_val = float("-0.0438454") + max_val = float("0.0636846") + mean = float("-0.00033995") + std = float("0.00529052") + data = None + + +class Program_weight_tensor_parameter_85: + name = "parameter_85" + shape = [192] + dtype = "float32" + min_val = float("-0.29566") + max_val = float("-0.00736167") + mean = float("-0.0909067") + std = float("0.0602533") + data = None + + +class Program_weight_tensor_parameter_86: + name = "parameter_86" + shape = [192] + dtype = "float32" + min_val = float("0.782649") + max_val = float("1.34864") + mean = float("1.05301") + std = float("0.0658496") + data = None + + +class Program_weight_tensor_parameter_87: + name = "parameter_87" + shape = [192] + dtype = "float32" + min_val = float("0.00965167") + max_val = float("0.0952922") + mean = float("0.02547") + std = float("0.0128275") + data = None + + +class Program_weight_tensor_parameter_88: + name = "parameter_88" + shape = [192] + dtype = "float32" + min_val = float("-0.272824") + max_val = float("0.325847") + mean = float("-0.0402323") + std = float("0.0921633") + data = None + + +class Program_weight_tensor_parameter_89: + name = "parameter_89" + shape = [192, 192, 3, 3] + dtype = "float32" + min_val = float("-0.0373015") + max_val = float("0.0428806") + mean = float("-8.89687e-05") + std = float("0.00303968") + data = None + + +class Program_weight_tensor_parameter_90: + name = "parameter_90" + shape = [192] + dtype = "float32" + min_val = float("-0.529492") + max_val = float("1.03232") + mean = float("0.148041") + std = float("0.259307") + data = None + + +class Program_weight_tensor_parameter_91: + name = "parameter_91" + shape = [192] + dtype = "float32" + min_val = float("0.733074") + max_val = float("1.57103") + mean = float("1.01438") + std = float("0.106471") + data = None + + +class Program_weight_tensor_parameter_92: + name = "parameter_92" + shape = [192] + dtype = "float32" + min_val = float("0.00886385") + max_val = float("0.149483") + mean = float("0.0308811") + std = float("0.0195022") + data = None + + +class Program_weight_tensor_parameter_93: + name = "parameter_93" + shape = [192] + dtype = "float32" + min_val = float("-0.285269") + max_val = float("0.190366") + mean = float("-0.0411682") + std = float("0.0555587") + data = None + + +class Program_weight_tensor_parameter_94: + name = "parameter_94" + shape = [192, 192, 1, 1] + dtype = "float32" + min_val = float("-0.149885") + max_val = float("0.116546") + mean = float("-0.000797077") + std = float("0.0117709") + data = None + + +class Program_weight_tensor_parameter_95: + name = "parameter_95" + shape = [96] + dtype = "float32" + min_val = float("-0.290448") + max_val = float("0.171859") + mean = float("-0.0709887") + std = float("0.105379") + data = None + + +class Program_weight_tensor_parameter_96: + name = "parameter_96" + shape = [96] + dtype = "float32" + min_val = float("0.730719") + max_val = float("1.20732") + mean = float("0.877889") + std = float("0.0776592") + data = None + + +class Program_weight_tensor_parameter_97: + name = "parameter_97" + shape = [96] + dtype = "float32" + min_val = float("0.00269725") + max_val = float("0.0235544") + mean = float("0.00912672") + std = float("0.00399733") + data = None + + +class Program_weight_tensor_parameter_98: + name = "parameter_98" + shape = [96] + dtype = "float32" + min_val = float("-0.043472") + max_val = float("0.0317433") + mean = float("-0.00957491") + std = float("0.0219505") + data = None + + +class Program_weight_tensor_parameter_99: + name = "parameter_99" + shape = [96, 96, 1, 1] + dtype = "float32" + min_val = float("-0.0584486") + max_val = float("0.0584962") + mean = float("-0.00136724") + std = float("0.00706042") + data = None + + +class Program_weight_tensor_parameter_100: + name = "parameter_100" + shape = [96] + dtype = "float32" + min_val = float("-0.290448") + max_val = float("0.171859") + mean = float("-0.0709887") + std = float("0.105379") + data = None + + +class Program_weight_tensor_parameter_101: + name = "parameter_101" + shape = [96] + dtype = "float32" + min_val = float("0.970121") + max_val = float("1.32067") + mean = float("1.13235") + std = float("0.0750719") + data = None + + +class Program_weight_tensor_parameter_102: + name = "parameter_102" + shape = [96] + dtype = "float32" + min_val = float("0.00983473") + max_val = float("0.0607255") + mean = float("0.0273053") + std = float("0.0111428") + data = None + + +class Program_weight_tensor_parameter_103: + name = "parameter_103" + shape = [96] + dtype = "float32" + min_val = float("-0.0647536") + max_val = float("0.0782784") + mean = float("-0.00781233") + std = float("0.0255747") + data = None + + +class Program_weight_tensor_parameter_104: + name = "parameter_104" + shape = [96, 96, 3, 3] + dtype = "float32" + min_val = float("-0.0891592") + max_val = float("0.0951206") + mean = float("-0.000149461") + std = float("0.00702326") + data = None + + +class Program_weight_tensor_parameter_105: + name = "parameter_105" + shape = [96] + dtype = "float32" + min_val = float("-0.672786") + max_val = float("0.111145") + mean = float("-0.259065") + std = float("0.150325") + data = None + + +class Program_weight_tensor_parameter_106: + name = "parameter_106" + shape = [96] + dtype = "float32" + min_val = float("0.803037") + max_val = float("1.40812") + mean = float("1.04549") + std = float("0.116766") + data = None + + +class Program_weight_tensor_parameter_107: + name = "parameter_107" + shape = [96] + dtype = "float32" + min_val = float("0.024322") + max_val = float("0.145971") + mean = float("0.0475988") + std = float("0.0190494") + data = None + + +class Program_weight_tensor_parameter_108: + name = "parameter_108" + shape = [96] + dtype = "float32" + min_val = float("-0.14253") + max_val = float("0.0572453") + mean = float("-0.0503258") + std = float("0.031919") + data = None + + +class Program_weight_tensor_parameter_109: + name = "parameter_109" + shape = [96, 96, 3, 3] + dtype = "float32" + min_val = float("-0.0837165") + max_val = float("0.110891") + mean = float("-0.000547585") + std = float("0.00782371") + data = None + + +class Program_weight_tensor_parameter_110: + name = "parameter_110" + shape = [96] + dtype = "float32" + min_val = float("-0.643027") + max_val = float("0.150766") + mean = float("-0.155399") + std = float("0.115388") + data = None + + +class Program_weight_tensor_parameter_111: + name = "parameter_111" + shape = [96] + dtype = "float32" + min_val = float("0.848979") + max_val = float("1.26323") + mean = float("1.03366") + std = float("0.0720591") + data = None + + +class Program_weight_tensor_parameter_112: + name = "parameter_112" + shape = [96] + dtype = "float32" + min_val = float("0.00805419") + max_val = float("0.0523056") + mean = float("0.0218209") + std = float("0.00804793") + data = None + + +class Program_weight_tensor_parameter_113: + name = "parameter_113" + shape = [96] + dtype = "float32" + min_val = float("-0.123211") + max_val = float("0.028595") + mean = float("-0.0348856") + std = float("0.028161") + data = None + + +class Program_weight_tensor_parameter_114: + name = "parameter_114" + shape = [96, 288, 1, 1] + dtype = "float32" + min_val = float("-0.0807313") + max_val = float("0.105058") + mean = float("-0.000622323") + std = float("0.0106254") + data = None + + +class Program_weight_tensor_parameter_115: + name = "parameter_115" + shape = [96] + dtype = "float32" + min_val = float("-0.197879") + max_val = float("0.0829326") + mean = float("-0.0298967") + std = float("0.0459711") + data = None + + +class Program_weight_tensor_parameter_116: + name = "parameter_116" + shape = [96] + dtype = "float32" + min_val = float("0.687122") + max_val = float("1.3367") + mean = float("0.954603") + std = float("0.0883876") + data = None + + +class Program_weight_tensor_parameter_117: + name = "parameter_117" + shape = [96] + dtype = "float32" + min_val = float("0.00516108") + max_val = float("0.0542385") + mean = float("0.0148265") + std = float("0.00775269") + data = None + + +class Program_weight_tensor_parameter_118: + name = "parameter_118" + shape = [96] + dtype = "float32" + min_val = float("-0.0798188") + max_val = float("0.0689562") + mean = float("-0.0115996") + std = float("0.0305599") + data = None + + +class Program_weight_tensor_parameter_119: + name = "parameter_119" + shape = [96, 288, 1, 1] + dtype = "float32" + min_val = float("-0.0766702") + max_val = float("0.0794778") + mean = float("-0.00020077") + std = float("0.00898092") + data = None + + +class Program_weight_tensor_parameter_120: + name = "parameter_120" + shape = [96] + dtype = "float32" + min_val = float("-0.334792") + max_val = float("0.0180334") + mean = float("-0.108684") + std = float("0.0839829") + data = None + + +class Program_weight_tensor_parameter_121: + name = "parameter_121" + shape = [96] + dtype = "float32" + min_val = float("0.730948") + max_val = float("1.20448") + mean = float("1.05528") + std = float("0.0746083") + data = None + + +class Program_weight_tensor_parameter_122: + name = "parameter_122" + shape = [96] + dtype = "float32" + min_val = float("0.0110936") + max_val = float("0.071451") + mean = float("0.0245603") + std = float("0.0108497") + data = None + + +class Program_weight_tensor_parameter_123: + name = "parameter_123" + shape = [96] + dtype = "float32" + min_val = float("-0.482531") + max_val = float("0.573528") + mean = float("-0.000560784") + std = float("0.169821") + data = None + + +class Program_weight_tensor_parameter_124: + name = "parameter_124" + shape = [96, 96, 3, 3] + dtype = "float32" + min_val = float("-0.0661076") + max_val = float("0.0630252") + mean = float("-2.06632e-05") + std = float("0.006297") + data = None + + +class Program_weight_tensor_parameter_125: + name = "parameter_125" + shape = [96] + dtype = "float32" + min_val = float("-1.07008") + max_val = float("2.35472") + mean = float("0.311187") + std = float("0.586174") + data = None + + +class Program_weight_tensor_parameter_126: + name = "parameter_126" + shape = [96] + dtype = "float32" + min_val = float("0.476967") + max_val = float("1.41049") + mean = float("0.883555") + std = float("0.166807") + data = None + + +class Program_weight_tensor_parameter_127: + name = "parameter_127" + shape = [96] + dtype = "float32" + min_val = float("0.00589901") + max_val = float("0.198467") + mean = float("0.0421386") + std = float("0.0296621") + data = None + + +class Program_weight_tensor_parameter_128: + name = "parameter_128" + shape = [96] + dtype = "float32" + min_val = float("-0.342217") + max_val = float("0.225623") + mean = float("-0.0164005") + std = float("0.0760282") + data = None + + +class Program_weight_tensor_parameter_129: + name = "parameter_129" + shape = [96, 96, 1, 1] + dtype = "float32" + min_val = float("-0.196834") + max_val = float("0.117682") + mean = float("-0.00106869") + std = float("0.0223541") + data = None + + +class Program_weight_tensor_parameter_130: + name = "parameter_130" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_131: + name = "parameter_131" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_132: + name = "parameter_132" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_133: + name = "parameter_133" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_134: + name = "parameter_134" + shape = [48, 48, 1, 1] + dtype = "float32" + min_val = float("-0.0712602") + max_val = float("0.0718291") + mean = float("-0.0024387") + std = float("0.0127401") + data = None + + +class Program_weight_tensor_parameter_135: + name = "parameter_135" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_136: + name = "parameter_136" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_137: + name = "parameter_137" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_138: + name = "parameter_138" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_139: + name = "parameter_139" + shape = [48, 48, 3, 3] + dtype = "float32" + min_val = float("-0.13621") + max_val = float("0.182583") + mean = float("-0.000499217") + std = float("0.0140042") + data = None + + +class Program_weight_tensor_parameter_140: + name = "parameter_140" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_141: + name = "parameter_141" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_142: + name = "parameter_142" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_143: + name = "parameter_143" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_144: + name = "parameter_144" + shape = [48, 48, 3, 3] + dtype = "float32" + min_val = float("-0.114987") + max_val = float("0.165091") + mean = float("-0.000945569") + std = float("0.0152021") + data = None + + +class Program_weight_tensor_parameter_145: + name = "parameter_145" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_146: + name = "parameter_146" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_147: + name = "parameter_147" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_148: + name = "parameter_148" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_149: + name = "parameter_149" + shape = [48, 224, 1, 1] + dtype = "float32" + min_val = float("-0.242079") + max_val = float("0.162661") + mean = float("-0.0009583") + std = float("0.0193368") + data = None + + +class Program_weight_tensor_parameter_150: + name = "parameter_150" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_151: + name = "parameter_151" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_152: + name = "parameter_152" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_153: + name = "parameter_153" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_154: + name = "parameter_154" + shape = [48, 224, 1, 1] + dtype = "float32" + min_val = float("-0.112223") + max_val = float("0.15074") + mean = float("0.000104434") + std = float("0.0145183") + data = None + + +class Program_weight_tensor_parameter_155: + name = "parameter_155" + shape = [96] + dtype = "float32" + min_val = float("-0.354205") + max_val = float("0.387992") + mean = float("-0.00787041") + std = float("0.13551") + data = None + + +class Program_weight_tensor_parameter_156: + name = "parameter_156" + shape = [96] + dtype = "float32" + min_val = float("0.580833") + max_val = float("1.61278") + mean = float("0.798233") + std = float("0.141253") + data = None + + +class Program_weight_tensor_parameter_157: + name = "parameter_157" + shape = [96] + dtype = "float32" + min_val = float("0.00935292") + max_val = float("0.154225") + mean = float("0.0331852") + std = float("0.0238495") + data = None + + +class Program_weight_tensor_parameter_158: + name = "parameter_158" + shape = [96] + dtype = "float32" + min_val = float("-0.185005") + max_val = float("0.0639201") + mean = float("-0.0332347") + std = float("0.0409409") + data = None + + +class Program_weight_tensor_parameter_159: + name = "parameter_159" + shape = [96, 192, 1, 1] + dtype = "float32" + min_val = float("-0.106488") + max_val = float("0.128147") + mean = float("-0.00102675") + std = float("0.0143571") + data = None + + +class Program_weight_tensor_parameter_160: + name = "parameter_160" + shape = [192] + dtype = "float32" + min_val = float("-0.337813") + max_val = float("0.175399") + mean = float("-0.0799928") + std = float("0.0894844") + data = None + + +class Program_weight_tensor_parameter_161: + name = "parameter_161" + shape = [192] + dtype = "float32" + min_val = float("0.697109") + max_val = float("1.47498") + mean = float("0.991096") + std = float("0.0991178") + data = None + + +class Program_weight_tensor_parameter_162: + name = "parameter_162" + shape = [192] + dtype = "float32" + min_val = float("0.00896957") + max_val = float("0.11584") + mean = float("0.02548") + std = float("0.0119719") + data = None + + +class Program_weight_tensor_parameter_163: + name = "parameter_163" + shape = [192] + dtype = "float32" + min_val = float("-0.199172") + max_val = float("0.184837") + mean = float("-0.0539991") + std = float("0.0541833") + data = None + + +class Program_weight_tensor_parameter_164: + name = "parameter_164" + shape = [192, 192, 1, 1] + dtype = "float32" + min_val = float("-0.113856") + max_val = float("0.159339") + mean = float("-0.00135097") + std = float("0.014143") + data = None + + +class Program_weight_tensor_parameter_165: + name = "parameter_165" + shape = [96] + dtype = "float32" + min_val = float("-0.308056") + max_val = float("0.100869") + mean = float("-0.0814319") + std = float("0.0991315") + data = None + + +class Program_weight_tensor_parameter_166: + name = "parameter_166" + shape = [96] + dtype = "float32" + min_val = float("0.552483") + max_val = float("0.935581") + mean = float("0.809788") + std = float("0.065447") + data = None + + +class Program_weight_tensor_parameter_167: + name = "parameter_167" + shape = [96] + dtype = "float32" + min_val = float("0.00774417") + max_val = float("0.0309166") + mean = float("0.0171435") + std = float("0.00538118") + data = None + + +class Program_weight_tensor_parameter_168: + name = "parameter_168" + shape = [96] + dtype = "float32" + min_val = float("-0.0456495") + max_val = float("0.0345007") + mean = float("-0.0168296") + std = float("0.0191065") + data = None + + +class Program_weight_tensor_parameter_169: + name = "parameter_169" + shape = [96, 96, 1, 1] + dtype = "float32" + min_val = float("-0.0537465") + max_val = float("0.0565006") + mean = float("-0.00190934") + std = float("0.00941741") + data = None + + +class Program_weight_tensor_parameter_170: + name = "parameter_170" + shape = [96] + dtype = "float32" + min_val = float("-0.308056") + max_val = float("0.100869") + mean = float("-0.0814319") + std = float("0.0991315") + data = None + + +class Program_weight_tensor_parameter_171: + name = "parameter_171" + shape = [96] + dtype = "float32" + min_val = float("0.843507") + max_val = float("1.28979") + mean = float("1.03489") + std = float("0.0945734") + data = None + + +class Program_weight_tensor_parameter_172: + name = "parameter_172" + shape = [96] + dtype = "float32" + min_val = float("0.0137113") + max_val = float("0.130869") + mean = float("0.0455843") + std = float("0.0182419") + data = None + + +class Program_weight_tensor_parameter_173: + name = "parameter_173" + shape = [96] + dtype = "float32" + min_val = float("-0.0897292") + max_val = float("0.0381496") + mean = float("-0.0226693") + std = float("0.0267104") + data = None + + +class Program_weight_tensor_parameter_174: + name = "parameter_174" + shape = [96, 96, 3, 3] + dtype = "float32" + min_val = float("-0.106428") + max_val = float("0.208719") + mean = float("-0.000274714") + std = float("0.00838485") + data = None + + +class Program_weight_tensor_parameter_175: + name = "parameter_175" + shape = [96] + dtype = "float32" + min_val = float("-0.729017") + max_val = float("0.318529") + mean = float("-0.275227") + std = float("0.174833") + data = None + + +class Program_weight_tensor_parameter_176: + name = "parameter_176" + shape = [96] + dtype = "float32" + min_val = float("0.764417") + max_val = float("1.31279") + mean = float("1.04363") + std = float("0.115705") + data = None + + +class Program_weight_tensor_parameter_177: + name = "parameter_177" + shape = [96] + dtype = "float32" + min_val = float("0.0271559") + max_val = float("0.148551") + mean = float("0.0588704") + std = float("0.0227166") + data = None + + +class Program_weight_tensor_parameter_178: + name = "parameter_178" + shape = [96] + dtype = "float32" + min_val = float("-0.137054") + max_val = float("0.0828371") + mean = float("-0.0553352") + std = float("0.0447619") + data = None + + +class Program_weight_tensor_parameter_179: + name = "parameter_179" + shape = [96, 96, 3, 3] + dtype = "float32" + min_val = float("-0.188418") + max_val = float("0.164129") + mean = float("-0.000595268") + std = float("0.00991775") + data = None + + +class Program_weight_tensor_parameter_180: + name = "parameter_180" + shape = [96] + dtype = "float32" + min_val = float("-0.646384") + max_val = float("0.382978") + mean = float("-0.253686") + std = float("0.209416") + data = None + + +class Program_weight_tensor_parameter_181: + name = "parameter_181" + shape = [96] + dtype = "float32" + min_val = float("0.738014") + max_val = float("1.37809") + mean = float("1.0259") + std = float("0.122238") + data = None + + +class Program_weight_tensor_parameter_182: + name = "parameter_182" + shape = [96] + dtype = "float32" + min_val = float("0.00908363") + max_val = float("0.0603377") + mean = float("0.0210404") + std = float("0.00802409") + data = None + + +class Program_weight_tensor_parameter_183: + name = "parameter_183" + shape = [96] + dtype = "float32" + min_val = float("-0.378384") + max_val = float("0.335917") + mean = float("0.0211247") + std = float("0.0800373") + data = None + + +class Program_weight_tensor_parameter_184: + name = "parameter_184" + shape = [96, 448, 1, 1] + dtype = "float32" + min_val = float("-0.206402") + max_val = float("0.13389") + mean = float("-0.000659265") + std = float("0.0130104") + data = None + + +class Program_weight_tensor_parameter_185: + name = "parameter_185" + shape = [96] + dtype = "float32" + min_val = float("-0.238659") + max_val = float("0.170264") + mean = float("-0.0409717") + std = float("0.0884392") + data = None + + +class Program_weight_tensor_parameter_186: + name = "parameter_186" + shape = [96] + dtype = "float32" + min_val = float("0.915303") + max_val = float("1.41228") + mean = float("1.07281") + std = float("0.0923147") + data = None + + +class Program_weight_tensor_parameter_187: + name = "parameter_187" + shape = [96] + dtype = "float32" + min_val = float("0.00678826") + max_val = float("0.0710945") + mean = float("0.0195807") + std = float("0.00994888") + data = None + + +class Program_weight_tensor_parameter_188: + name = "parameter_188" + shape = [96] + dtype = "float32" + min_val = float("-0.127214") + max_val = float("0.113488") + mean = float("0.0122712") + std = float("0.0393284") + data = None + + +class Program_weight_tensor_parameter_189: + name = "parameter_189" + shape = [96, 448, 1, 1] + dtype = "float32" + min_val = float("-0.0954995") + max_val = float("0.157952") + mean = float("-0.000585108") + std = float("0.0114624") + data = None + + +class Program_weight_tensor_parameter_190: + name = "parameter_190" + shape = [192] + dtype = "float32" + min_val = float("-0.538677") + max_val = float("-0.10111") + mean = float("-0.294294") + std = float("0.0707617") + data = None + + +class Program_weight_tensor_parameter_191: + name = "parameter_191" + shape = [192] + dtype = "float32" + min_val = float("0.652028") + max_val = float("1.08106") + mean = float("0.852103") + std = float("0.0724995") + data = None + + +class Program_weight_tensor_parameter_192: + name = "parameter_192" + shape = [192] + dtype = "float32" + min_val = float("0.0142155") + max_val = float("0.101132") + mean = float("0.0330356") + std = float("0.0148941") + data = None + + +class Program_weight_tensor_parameter_193: + name = "parameter_193" + shape = [192] + dtype = "float32" + min_val = float("-0.127378") + max_val = float("0.0574297") + mean = float("-0.0350462") + std = float("0.0346896") + data = None + + +class Program_weight_tensor_parameter_194: + name = "parameter_194" + shape = [192, 384, 1, 1] + dtype = "float32" + min_val = float("-0.0625454") + max_val = float("0.0695873") + mean = float("-0.000757186") + std = float("0.0103635") + data = None + + +class Program_weight_tensor_parameter_195: + name = "parameter_195" + shape = [384] + dtype = "float32" + min_val = float("-0.52133") + max_val = float("0.214144") + mean = float("-0.168244") + std = float("0.077536") + data = None + + +class Program_weight_tensor_parameter_196: + name = "parameter_196" + shape = [384] + dtype = "float32" + min_val = float("0.850791") + max_val = float("1.39429") + mean = float("1.06276") + std = float("0.0773233") + data = None + + +class Program_weight_tensor_parameter_197: + name = "parameter_197" + shape = [384] + dtype = "float32" + min_val = float("0.00770505") + max_val = float("0.0768713") + mean = float("0.0215296") + std = float("0.00896033") + data = None + + +class Program_weight_tensor_parameter_198: + name = "parameter_198" + shape = [384] + dtype = "float32" + min_val = float("-0.141261") + max_val = float("0.0834302") + mean = float("-0.0447718") + std = float("0.0407265") + data = None + + +class Program_weight_tensor_parameter_199: + name = "parameter_199" + shape = [384, 384, 1, 1] + dtype = "float32" + min_val = float("-0.121859") + max_val = float("0.13235") + mean = float("-0.000702913") + std = float("0.00937657") + data = None + + +class Program_weight_tensor_parameter_200: + name = "parameter_200" + shape = [192] + dtype = "float32" + min_val = float("-0.383137") + max_val = float("0.227205") + mean = float("-0.11791") + std = float("0.101924") + data = None + + +class Program_weight_tensor_parameter_201: + name = "parameter_201" + shape = [192] + dtype = "float32" + min_val = float("0.86892") + max_val = float("1.51506") + mean = float("1.12311") + std = float("0.118984") + data = None + + +class Program_weight_tensor_parameter_202: + name = "parameter_202" + shape = [192] + dtype = "float32" + min_val = float("0.0817753") + max_val = float("0.916578") + mean = float("0.283568") + std = float("0.14005") + data = None + + +class Program_weight_tensor_parameter_203: + name = "parameter_203" + shape = [192] + dtype = "float32" + min_val = float("-2.60933") + max_val = float("1.84773") + mean = float("-0.285018") + std = float("0.928792") + data = None + + +class Program_weight_tensor_parameter_204: + name = "parameter_204" + shape = [192, 768, 1, 1] + dtype = "float32" + min_val = float("-0.14293") + max_val = float("0.101718") + mean = float("-0.000155675") + std = float("0.00816021") + data = None + + +class Program_weight_tensor_parameter_205: + name = "parameter_205" + shape = [192] + dtype = "float32" + min_val = float("-0.243086") + max_val = float("0.168519") + mean = float("-0.0174047") + std = float("0.0539075") + data = None + + +class Program_weight_tensor_parameter_206: + name = "parameter_206" + shape = [192] + dtype = "float32" + min_val = float("0.618656") + max_val = float("1.01619") + mean = float("0.837578") + std = float("0.0631344") + data = None + + +class Program_weight_tensor_parameter_207: + name = "parameter_207" + shape = [192] + dtype = "float32" + min_val = float("0.00637876") + max_val = float("0.0339486") + mean = float("0.0151296") + std = float("0.00486236") + data = None + + +class Program_weight_tensor_parameter_208: + name = "parameter_208" + shape = [192] + dtype = "float32" + min_val = float("-0.14601") + max_val = float("0.078116") + mean = float("-0.0669569") + std = float("0.0474516") + data = None + + +class Program_weight_tensor_parameter_209: + name = "parameter_209" + shape = [192, 192, 1, 1] + dtype = "float32" + min_val = float("-0.0461945") + max_val = float("0.0766803") + mean = float("-0.00171556") + std = float("0.00775623") + data = None + + +class Program_weight_tensor_parameter_210: + name = "parameter_210" + shape = [192] + dtype = "float32" + min_val = float("-0.243086") + max_val = float("0.168519") + mean = float("-0.0174047") + std = float("0.0539075") + data = None + + +class Program_weight_tensor_parameter_211: + name = "parameter_211" + shape = [192] + dtype = "float32" + min_val = float("0.874593") + max_val = float("1.46238") + mean = float("1.10608") + std = float("0.129724") + data = None + + +class Program_weight_tensor_parameter_212: + name = "parameter_212" + shape = [192] + dtype = "float32" + min_val = float("0.0243082") + max_val = float("0.146606") + mean = float("0.065561") + std = float("0.0225463") + data = None + + +class Program_weight_tensor_parameter_213: + name = "parameter_213" + shape = [192] + dtype = "float32" + min_val = float("-0.377685") + max_val = float("0.0211826") + mean = float("-0.152586") + std = float("0.0625447") + data = None + + +class Program_weight_tensor_parameter_214: + name = "parameter_214" + shape = [192, 192, 3, 3] + dtype = "float32" + min_val = float("-0.0501245") + max_val = float("0.0655244") + mean = float("-0.000498586") + std = float("0.0047433") + data = None + + +class Program_weight_tensor_parameter_215: + name = "parameter_215" + shape = [192] + dtype = "float32" + min_val = float("-0.311216") + max_val = float("0.0670604") + mean = float("-0.114926") + std = float("0.0802163") + data = None + + +class Program_weight_tensor_parameter_216: + name = "parameter_216" + shape = [192] + dtype = "float32" + min_val = float("0.909789") + max_val = float("1.44627") + mean = float("1.1083") + std = float("0.102016") + data = None + + +class Program_weight_tensor_parameter_217: + name = "parameter_217" + shape = [192] + dtype = "float32" + min_val = float("0.0423617") + max_val = float("0.201795") + mean = float("0.0849754") + std = float("0.0267366") + data = None + + +class Program_weight_tensor_parameter_218: + name = "parameter_218" + shape = [192] + dtype = "float32" + min_val = float("-0.51253") + max_val = float("0.245796") + mean = float("-0.162947") + std = float("0.113762") + data = None + + +class Program_weight_tensor_parameter_219: + name = "parameter_219" + shape = [192, 192, 3, 3] + dtype = "float32" + min_val = float("-0.0574799") + max_val = float("0.0551901") + mean = float("-0.000582995") + std = float("0.00529742") + data = None + + +class Program_weight_tensor_parameter_220: + name = "parameter_220" + shape = [192] + dtype = "float32" + min_val = float("-0.444829") + max_val = float("0.412067") + mean = float("-0.137499") + std = float("0.130213") + data = None + + +class Program_weight_tensor_parameter_221: + name = "parameter_221" + shape = [192] + dtype = "float32" + min_val = float("0.953772") + max_val = float("1.37354") + mean = float("1.11019") + std = float("0.0723714") + data = None + + +class Program_weight_tensor_parameter_222: + name = "parameter_222" + shape = [192] + dtype = "float32" + min_val = float("0.0514176") + max_val = float("0.250132") + mean = float("0.108812") + std = float("0.0355389") + data = None + + +class Program_weight_tensor_parameter_223: + name = "parameter_223" + shape = [192] + dtype = "float32" + min_val = float("-0.378405") + max_val = float("0.571278") + mean = float("-0.138561") + std = float("0.0979553") + data = None + + +class Program_weight_tensor_parameter_224: + name = "parameter_224" + shape = [192, 512, 1, 1] + dtype = "float32" + min_val = float("-0.061051") + max_val = float("0.105958") + mean = float("-0.000928176") + std = float("0.00919237") + data = None + + +class Program_weight_tensor_parameter_225: + name = "parameter_225" + shape = [192] + dtype = "float32" + min_val = float("-0.163915") + max_val = float("0.00105614") + mean = float("-0.0651606") + std = float("0.0261623") + data = None + + +class Program_weight_tensor_parameter_226: + name = "parameter_226" + shape = [192] + dtype = "float32" + min_val = float("0.819806") + max_val = float("1.06636") + mean = float("0.969017") + std = float("0.0461045") + data = None + + +class Program_weight_tensor_parameter_227: + name = "parameter_227" + shape = [192] + dtype = "float32" + min_val = float("0.0273588") + max_val = float("0.192653") + mean = float("0.0655929") + std = float("0.0180239") + data = None + + +class Program_weight_tensor_parameter_228: + name = "parameter_228" + shape = [192] + dtype = "float32" + min_val = float("-0.237023") + max_val = float("0.0322046") + mean = float("-0.10365") + std = float("0.0569664") + data = None + + +class Program_weight_tensor_parameter_229: + name = "parameter_229" + shape = [192, 512, 1, 1] + dtype = "float32" + min_val = float("-0.0345776") + max_val = float("0.0640798") + mean = float("-0.000792568") + std = float("0.00754612") + data = None + + +class Program_weight_tensor_parameter_230: + name = "parameter_230" + shape = [512] + dtype = "float32" + min_val = float("-4.82875") + max_val = float("-0.111921") + mean = float("-2.29535") + std = float("0.775276") + data = None + + +class Program_weight_tensor_parameter_231: + name = "parameter_231" + shape = [512] + dtype = "float32" + min_val = float("2.10244") + max_val = float("5.21744") + mean = float("3.70118") + std = float("0.482816") + data = None + + +class Program_weight_tensor_parameter_232: + name = "parameter_232" + shape = [512] + dtype = "float32" + min_val = float("0.00157807") + max_val = float("0.0137836") + mean = float("0.00446205") + std = float("0.001576") + data = None + + +class Program_weight_tensor_parameter_233: + name = "parameter_233" + shape = [512] + dtype = "float32" + min_val = float("-0.127648") + max_val = float("0.0819897") + mean = float("-0.0385038") + std = float("0.0254054") + data = None + + +class Program_weight_tensor_parameter_234: + name = "parameter_234" + shape = [512, 384, 1, 1] + dtype = "float32" + min_val = float("-0.106633") + max_val = float("0.153101") + mean = float("-0.00124312") + std = float("0.00940861") + data = None + + +class Program_weight_tensor_parameter_235: + name = "parameter_235" + shape = [384] + dtype = "float32" + min_val = float("-0.0177346") + max_val = float("-0.000449668") + mean = float("-0.00661776") + std = float("0.00402364") + data = None + + +class Program_weight_tensor_parameter_236: + name = "parameter_236" + shape = [384, 384, 1, 1] + dtype = "float32" + min_val = float("-0.242919") + max_val = float("0.18048") + mean = float("-0.00256832") + std = float("0.00810944") + data = None + + +class Program_weight_tensor_parameter_237: + name = "parameter_237" + shape = [192] + dtype = "float32" + min_val = float("-2.38813") + max_val = float("3.15948") + mean = float("-0.20409") + std = float("0.562397") + data = None + + +class Program_weight_tensor_parameter_238: + name = "parameter_238" + shape = [192] + dtype = "float32" + min_val = float("0.123214") + max_val = float("2.40548") + mean = float("0.524593") + std = float("0.334862") + data = None + + +class Program_weight_tensor_parameter_239: + name = "parameter_239" + shape = [192] + dtype = "float32" + min_val = float("0.000155224") + max_val = float("0.00512515") + mean = float("0.000857816") + std = float("0.000608603") + data = None + + +class Program_weight_tensor_parameter_240: + name = "parameter_240" + shape = [192] + dtype = "float32" + min_val = float("-0.0833487") + max_val = float("0.118251") + mean = float("0.0141678") + std = float("0.0272982") + data = None + + +class Program_weight_tensor_parameter_241: + name = "parameter_241" + shape = [192, 192, 1, 1] + dtype = "float32" + min_val = float("-0.0711458") + max_val = float("0.063938") + mean = float("-0.000378359") + std = float("0.00604257") + data = None + + +class Program_weight_tensor_parameter_242: + name = "parameter_242" + shape = [192] + dtype = "float32" + min_val = float("-2.38813") + max_val = float("3.15949") + mean = float("-0.20409") + std = float("0.562397") + data = None + + +class Program_weight_tensor_parameter_243: + name = "parameter_243" + shape = [192] + dtype = "float32" + min_val = float("0.67679") + max_val = float("3.0739") + mean = float("1.5449") + std = float("0.450885") + data = None + + +class Program_weight_tensor_parameter_244: + name = "parameter_244" + shape = [192] + dtype = "float32" + min_val = float("0.00209578") + max_val = float("0.0395518") + mean = float("0.00889543") + std = float("0.00490051") + data = None + + +class Program_weight_tensor_parameter_245: + name = "parameter_245" + shape = [192] + dtype = "float32" + min_val = float("-0.279066") + max_val = float("0.257507") + mean = float("0.0175742") + std = float("0.0590098") + data = None + + +class Program_weight_tensor_parameter_246: + name = "parameter_246" + shape = [192, 192, 3, 3] + dtype = "float32" + min_val = float("-0.089924") + max_val = float("0.0801576") + mean = float("-0.00010217") + std = float("0.00534983") + data = None + + +class Program_weight_tensor_parameter_247: + name = "parameter_247" + shape = [192] + dtype = "float32" + min_val = float("-3.43186") + max_val = float("1.16924") + mean = float("-1.42852") + std = float("0.634791") + data = None + + +class Program_weight_tensor_parameter_248: + name = "parameter_248" + shape = [192] + dtype = "float32" + min_val = float("0.390187") + max_val = float("1.72859") + mean = float("1.08988") + std = float("0.190312") + data = None + + +class Program_weight_tensor_parameter_249: + name = "parameter_249" + shape = [192] + dtype = "float32" + min_val = float("0.0524798") + max_val = float("0.235788") + mean = float("0.0978559") + std = float("0.0320899") + data = None + + +class Program_weight_tensor_parameter_250: + name = "parameter_250" + shape = [192] + dtype = "float32" + min_val = float("-1.53618") + max_val = float("0.347559") + mean = float("-0.263158") + std = float("0.217919") + data = None + + +class Program_weight_tensor_parameter_251: + name = "parameter_251" + shape = [192, 192, 3, 3] + dtype = "float32" + min_val = float("-0.0837804") + max_val = float("0.0659382") + mean = float("-0.000463374") + std = float("0.00629325") + data = None + + +class Program_weight_tensor_parameter_252: + name = "parameter_252" + shape = [192] + dtype = "float32" + min_val = float("-3.8771") + max_val = float("4.23734") + mean = float("-0.629717") + std = float("0.988013") + data = None + + +class Program_weight_tensor_parameter_253: + name = "parameter_253" + shape = [192] + dtype = "float32" + min_val = float("0.580515") + max_val = float("4.17488") + mean = float("1.54499") + std = float("0.398695") + data = None + + +class Program_weight_tensor_parameter_254: + name = "parameter_254" + shape = [192] + dtype = "float32" + min_val = float("0.0049061") + max_val = float("0.0342126") + mean = float("0.0119678") + std = float("0.0050514") + data = None + + +class Program_weight_tensor_parameter_255: + name = "parameter_255" + shape = [192] + dtype = "float32" + min_val = float("-0.233705") + max_val = float("0.19222") + mean = float("0.0611703") + std = float("0.0465953") + data = None + + +class Program_weight_tensor_parameter_256: + name = "parameter_256" + shape = [192, 384, 1, 1] + dtype = "float32" + min_val = float("-0.114335") + max_val = float("0.0851333") + mean = float("-0.00162313") + std = float("0.0113559") + data = None + + +class Program_weight_tensor_parameter_257: + name = "parameter_257" + shape = [192] + dtype = "float32" + min_val = float("-2.93792") + max_val = float("1.02442") + mean = float("-0.427162") + std = float("0.681544") + data = None + + +class Program_weight_tensor_parameter_258: + name = "parameter_258" + shape = [192] + dtype = "float32" + min_val = float("0.698407") + max_val = float("3.61091") + mean = float("1.48124") + std = float("0.505556") + data = None + + +class Program_weight_tensor_parameter_259: + name = "parameter_259" + shape = [192] + dtype = "float32" + min_val = float("0.00266091") + max_val = float("0.0168383") + mean = float("0.00541367") + std = float("0.00205341") + data = None + + +class Program_weight_tensor_parameter_260: + name = "parameter_260" + shape = [192] + dtype = "float32" + min_val = float("-0.0974047") + max_val = float("0.103422") + mean = float("0.0197739") + std = float("0.0351046") + data = None + + +class Program_weight_tensor_parameter_261: + name = "parameter_261" + shape = [192, 384, 1, 1] + dtype = "float32" + min_val = float("-0.0719896") + max_val = float("0.0846321") + mean = float("-0.000697024") + std = float("0.00905481") + data = None + + +class Program_weight_tensor_parameter_262: + name = "parameter_262" + shape = [384] + dtype = "float32" + min_val = float("-2.84257") + max_val = float("1.12262") + mean = float("-0.753662") + std = float("0.497191") + data = None + + +class Program_weight_tensor_parameter_263: + name = "parameter_263" + shape = [384] + dtype = "float32" + min_val = float("0.418336") + max_val = float("1.80344") + mean = float("0.867791") + std = float("0.218116") + data = None + + +class Program_weight_tensor_parameter_264: + name = "parameter_264" + shape = [384] + dtype = "float32" + min_val = float("0.00942549") + max_val = float("0.0797359") + mean = float("0.0194219") + std = float("0.0078047") + data = None + + +class Program_weight_tensor_parameter_265: + name = "parameter_265" + shape = [384] + dtype = "float32" + min_val = float("-0.614057") + max_val = float("0.347293") + mean = float("0.0240462") + std = float("0.112432") + data = None + + +class Program_weight_tensor_parameter_266: + name = "parameter_266" + shape = [384, 256, 3, 3] + dtype = "float32" + min_val = float("-0.0516956") + max_val = float("0.0616539") + mean = float("-0.000224183") + std = float("0.00523626") + data = None + + +class Program_weight_tensor_parameter_267: + name = "parameter_267" + shape = [256] + dtype = "float32" + min_val = float("-2.81742") + max_val = float("1.46565") + mean = float("-1.07843") + std = float("0.633359") + data = None + + +class Program_weight_tensor_parameter_268: + name = "parameter_268" + shape = [256] + dtype = "float32" + min_val = float("0.431212") + max_val = float("1.77033") + mean = float("0.978328") + std = float("0.170529") + data = None + + +class Program_weight_tensor_parameter_269: + name = "parameter_269" + shape = [256] + dtype = "float32" + min_val = float("0.00259178") + max_val = float("0.0171178") + mean = float("0.00690838") + std = float("0.00206204") + data = None + + +class Program_weight_tensor_parameter_270: + name = "parameter_270" + shape = [256] + dtype = "float32" + min_val = float("-0.251608") + max_val = float("0.233115") + mean = float("-0.065386") + std = float("0.0828144") + data = None + + +class Program_weight_tensor_parameter_271: + name = "parameter_271" + shape = [256, 192, 1, 1] + dtype = "float32" + min_val = float("-0.119069") + max_val = float("0.205117") + mean = float("-0.00133732") + std = float("0.0166794") + data = None + + +class Program_weight_tensor_parameter_272: + name = "parameter_272" + shape = [192] + dtype = "float32" + min_val = float("-0.0231535") + max_val = float("0.0020282") + mean = float("-0.00682996") + std = float("0.00526648") + data = None + + +class Program_weight_tensor_parameter_273: + name = "parameter_273" + shape = [192, 192, 1, 1] + dtype = "float32" + min_val = float("-0.27523") + max_val = float("0.197511") + mean = float("-0.00447133") + std = float("0.0117313") + data = None + + +class Program_weight_tensor_parameter_274: + name = "parameter_274" + shape = [96] + dtype = "float32" + min_val = float("-2.27845") + max_val = float("0.747499") + mean = float("-0.117478") + std = float("0.506892") + data = None + + +class Program_weight_tensor_parameter_275: + name = "parameter_275" + shape = [96] + dtype = "float32" + min_val = float("-0.0595264") + max_val = float("2.30581") + mean = float("0.261136") + std = float("0.36646") + data = None + + +class Program_weight_tensor_parameter_276: + name = "parameter_276" + shape = [96] + dtype = "float32" + min_val = float("8.74855e-12") + max_val = float("0.00326221") + mean = float("0.000651884") + std = float("0.00054916") + data = None + + +class Program_weight_tensor_parameter_277: + name = "parameter_277" + shape = [96] + dtype = "float32" + min_val = float("-0.0592549") + max_val = float("0.0904533") + mean = float("0.00809429") + std = float("0.0232161") + data = None + + +class Program_weight_tensor_parameter_278: + name = "parameter_278" + shape = [96, 96, 1, 1] + dtype = "float32" + min_val = float("-0.0455384") + max_val = float("0.0779468") + mean = float("-0.000396615") + std = float("0.00669089") + data = None + + +class Program_weight_tensor_parameter_279: + name = "parameter_279" + shape = [96] + dtype = "float32" + min_val = float("-2.27845") + max_val = float("0.747499") + mean = float("-0.117478") + std = float("0.506892") + data = None + + +class Program_weight_tensor_parameter_280: + name = "parameter_280" + shape = [96] + dtype = "float32" + min_val = float("0.348228") + max_val = float("3.2413") + mean = float("1.29098") + std = float("0.633531") + data = None + + +class Program_weight_tensor_parameter_281: + name = "parameter_281" + shape = [96] + dtype = "float32" + min_val = float("0.00453392") + max_val = float("0.0315112") + mean = float("0.0163812") + std = float("0.00628281") + data = None + + +class Program_weight_tensor_parameter_282: + name = "parameter_282" + shape = [96] + dtype = "float32" + min_val = float("-0.199779") + max_val = float("0.205548") + mean = float("0.0315405") + std = float("0.0828307") + data = None + + +class Program_weight_tensor_parameter_283: + name = "parameter_283" + shape = [96, 96, 3, 3] + dtype = "float32" + min_val = float("-0.0720974") + max_val = float("0.0876726") + mean = float("-0.000404154") + std = float("0.00886376") + data = None + + +class Program_weight_tensor_parameter_284: + name = "parameter_284" + shape = [96] + dtype = "float32" + min_val = float("-2.79674") + max_val = float("1.50545") + mean = float("-1.09186") + std = float("0.696438") + data = None + + +class Program_weight_tensor_parameter_285: + name = "parameter_285" + shape = [96] + dtype = "float32" + min_val = float("0.320451") + max_val = float("1.80036") + mean = float("1.07335") + std = float("0.213563") + data = None + + +class Program_weight_tensor_parameter_286: + name = "parameter_286" + shape = [96] + dtype = "float32" + min_val = float("0.04414") + max_val = float("0.24305") + mean = float("0.0904373") + std = float("0.0280397") + data = None + + +class Program_weight_tensor_parameter_287: + name = "parameter_287" + shape = [96] + dtype = "float32" + min_val = float("-1.69372") + max_val = float("0.548616") + mean = float("-0.156102") + std = float("0.345438") + data = None + + +class Program_weight_tensor_parameter_288: + name = "parameter_288" + shape = [96, 96, 3, 3] + dtype = "float32" + min_val = float("-0.0691973") + max_val = float("0.0768993") + mean = float("-0.000721415") + std = float("0.00953274") + data = None + + +class Program_weight_tensor_parameter_289: + name = "parameter_289" + shape = [96] + dtype = "float32" + min_val = float("-2.5403") + max_val = float("0.660334") + mean = float("-0.0506846") + std = float("0.473114") + data = None + + +class Program_weight_tensor_parameter_290: + name = "parameter_290" + shape = [96] + dtype = "float32" + min_val = float("-0.0773968") + max_val = float("3.15159") + mean = float("0.28098") + std = float("0.409273") + data = None + + +class Program_weight_tensor_parameter_291: + name = "parameter_291" + shape = [96] + dtype = "float32" + min_val = float("2.60552e-10") + max_val = float("0.0275112") + mean = float("0.00205075") + std = float("0.00323917") + data = None + + +class Program_weight_tensor_parameter_292: + name = "parameter_292" + shape = [96] + dtype = "float32" + min_val = float("-0.0558812") + max_val = float("0.16265") + mean = float("0.0224866") + std = float("0.0340769") + data = None + + +class Program_weight_tensor_parameter_293: + name = "parameter_293" + shape = [96, 96, 1, 1] + dtype = "float32" + min_val = float("-0.152419") + max_val = float("0.0876098") + mean = float("-0.00159057") + std = float("0.0100433") + data = None + + +class Program_weight_tensor_parameter_294: + name = "parameter_294" + shape = [96] + dtype = "float32" + min_val = float("-2.5403") + max_val = float("0.660335") + mean = float("-0.0506846") + std = float("0.473114") + data = None + + +class Program_weight_tensor_parameter_295: + name = "parameter_295" + shape = [96] + dtype = "float32" + min_val = float("0.341096") + max_val = float("2.99377") + mean = float("0.929523") + std = float("0.412162") + data = None + + +class Program_weight_tensor_parameter_296: + name = "parameter_296" + shape = [96] + dtype = "float32" + min_val = float("0.0093175") + max_val = float("0.066007") + mean = float("0.0253511") + std = float("0.0103347") + data = None + + +class Program_weight_tensor_parameter_297: + name = "parameter_297" + shape = [96] + dtype = "float32" + min_val = float("-0.245975") + max_val = float("0.231137") + mean = float("0.0513949") + std = float("0.0826916") + data = None + + +class Program_weight_tensor_parameter_298: + name = "parameter_298" + shape = [96, 96, 3, 3] + dtype = "float32" + min_val = float("-0.0615515") + max_val = float("0.0640368") + mean = float("-0.000696901") + std = float("0.00903359") + data = None + + +class Program_weight_tensor_parameter_299: + name = "parameter_299" + shape = [96] + dtype = "float32" + min_val = float("-2.01753") + max_val = float("1.65546") + mean = float("-0.920704") + std = float("0.650299") + data = None + + +class Program_weight_tensor_parameter_300: + name = "parameter_300" + shape = [96] + dtype = "float32" + min_val = float("0.435112") + max_val = float("1.96524") + mean = float("1.06448") + std = float("0.227819") + data = None + + +class Program_weight_tensor_parameter_301: + name = "parameter_301" + shape = [96] + dtype = "float32" + min_val = float("0.0173697") + max_val = float("0.175391") + mean = float("0.0386346") + std = float("0.0194835") + data = None + + +class Program_weight_tensor_parameter_302: + name = "parameter_302" + shape = [96] + dtype = "float32" + min_val = float("-2.4092") + max_val = float("0.31836") + mean = float("-0.045096") + std = float("0.29578") + data = None + + +class Program_weight_tensor_parameter_303: + name = "parameter_303" + shape = [96, 96, 3, 3] + dtype = "float32" + min_val = float("-0.129335") + max_val = float("0.160741") + mean = float("-0.000571696") + std = float("0.0102703") + data = None + + +class Program_weight_tensor_parameter_304: + name = "parameter_304" + shape = [96] + dtype = "float32" + min_val = float("-1.61322") + max_val = float("1.88206") + mean = float("0.00487736") + std = float("0.837656") + data = None + + +class Program_weight_tensor_parameter_305: + name = "parameter_305" + shape = [96] + dtype = "float32" + min_val = float("0.346152") + max_val = float("1.32019") + mean = float("0.701657") + std = float("0.235965") + data = None + + +class Program_weight_tensor_parameter_306: + name = "parameter_306" + shape = [96] + dtype = "float32" + min_val = float("0.0173827") + max_val = float("0.0953752") + mean = float("0.0393886") + std = float("0.0142575") + data = None + + +class Program_weight_tensor_parameter_307: + name = "parameter_307" + shape = [96] + dtype = "float32" + min_val = float("-0.356234") + max_val = float("0.498266") + mean = float("-0.092226") + std = float("0.131297") + data = None + + +class Program_weight_tensor_parameter_308: + name = "parameter_308" + shape = [96, 192, 1, 1] + dtype = "float32" + min_val = float("-0.144182") + max_val = float("0.121205") + mean = float("-0.00166362") + std = float("0.0166493") + data = None + + +class Program_weight_tensor_parameter_309: + name = "parameter_309" + shape = [96] + dtype = "float32" + min_val = float("-2.46877") + max_val = float("1.71077") + mean = float("0.339624") + std = float("0.678648") + data = None + + +class Program_weight_tensor_parameter_310: + name = "parameter_310" + shape = [96] + dtype = "float32" + min_val = float("0.542507") + max_val = float("4.88036") + mean = float("1.48218") + std = float("0.958568") + data = None + + +class Program_weight_tensor_parameter_311: + name = "parameter_311" + shape = [96] + dtype = "float32" + min_val = float("0.0124582") + max_val = float("0.158439") + mean = float("0.0301589") + std = float("0.0178037") + data = None + + +class Program_weight_tensor_parameter_312: + name = "parameter_312" + shape = [96] + dtype = "float32" + min_val = float("-0.332188") + max_val = float("0.27597") + mean = float("-0.00036455") + std = float("0.129704") + data = None + + +class Program_weight_tensor_parameter_313: + name = "parameter_313" + shape = [96, 192, 1, 1] + dtype = "float32" + min_val = float("-0.110563") + max_val = float("0.21305") + mean = float("-0.000740848") + std = float("0.0169398") + data = None + + +class Program_weight_tensor_parameter_314: + name = "parameter_314" + shape = [192] + dtype = "float32" + min_val = float("-4.4449") + max_val = float("2.00993") + mean = float("-0.0987741") + std = float("0.88341") + data = None + + +class Program_weight_tensor_parameter_315: + name = "parameter_315" + shape = [192] + dtype = "float32" + min_val = float("0.570946") + max_val = float("4.51323") + mean = float("1.08278") + std = float("0.42537") + data = None + + +class Program_weight_tensor_parameter_316: + name = "parameter_316" + shape = [192] + dtype = "float32" + min_val = float("0.0127679") + max_val = float("0.141392") + mean = float("0.0381032") + std = float("0.0213252") + data = None + + +class Program_weight_tensor_parameter_317: + name = "parameter_317" + shape = [192] + dtype = "float32" + min_val = float("-0.52499") + max_val = float("0.386336") + mean = float("0.0322393") + std = float("0.135226") + data = None + + +class Program_weight_tensor_parameter_318: + name = "parameter_318" + shape = [192, 128, 3, 3] + dtype = "float32" + min_val = float("-0.111646") + max_val = float("0.0867232") + mean = float("-0.000368538") + std = float("0.00852941") + data = None + + +class Program_weight_tensor_parameter_319: + name = "parameter_319" + shape = [128] + dtype = "float32" + min_val = float("-2.14742") + max_val = float("1.36597") + mean = float("-0.674385") + std = float("0.681344") + data = None + + +class Program_weight_tensor_parameter_320: + name = "parameter_320" + shape = [128] + dtype = "float32" + min_val = float("0.375724") + max_val = float("2.2421") + mean = float("0.878112") + std = float("0.235402") + data = None + + +class Program_weight_tensor_parameter_321: + name = "parameter_321" + shape = [128] + dtype = "float32" + min_val = float("0.00306557") + max_val = float("0.022173") + mean = float("0.00803682") + std = float("0.00294688") + data = None + + +class Program_weight_tensor_parameter_322: + name = "parameter_322" + shape = [128] + dtype = "float32" + min_val = float("-0.307786") + max_val = float("0.262498") + mean = float("-0.0704927") + std = float("0.125184") + data = None + + +class Program_weight_tensor_parameter_323: + name = "parameter_323" + shape = [128, 96, 1, 1] + dtype = "float32" + min_val = float("-0.256834") + max_val = float("0.225518") + mean = float("-0.00145085") + std = float("0.0267221") + data = None + + +class Program_weight_tensor_parameter_324: + name = "parameter_324" + shape = [96] + dtype = "float32" + min_val = float("-0.0278245") + max_val = float("0.00419023") + mean = float("-0.00867688") + std = float("0.00787205") + data = None + + +class Program_weight_tensor_parameter_325: + name = "parameter_325" + shape = [96, 96, 1, 1] + dtype = "float32" + min_val = float("-0.324906") + max_val = float("0.313066") + mean = float("-0.00593829") + std = float("0.0203494") + data = None + + +class Program_weight_tensor_parameter_326: + name = "parameter_326" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_327: + name = "parameter_327" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_328: + name = "parameter_328" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_329: + name = "parameter_329" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_330: + name = "parameter_330" + shape = [48, 48, 1, 1] + dtype = "float32" + min_val = float("-0.0747816") + max_val = float("0.0925306") + mean = float("-0.00126573") + std = float("0.0138781") + data = None + + +class Program_weight_tensor_parameter_331: + name = "parameter_331" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_332: + name = "parameter_332" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_333: + name = "parameter_333" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_334: + name = "parameter_334" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_335: + name = "parameter_335" + shape = [48, 48, 3, 3] + dtype = "float32" + min_val = float("-0.0931524") + max_val = float("0.118168") + mean = float("-0.000544389") + std = float("0.0144669") + data = None + + +class Program_weight_tensor_parameter_336: + name = "parameter_336" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_337: + name = "parameter_337" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_338: + name = "parameter_338" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_339: + name = "parameter_339" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_340: + name = "parameter_340" + shape = [48, 48, 3, 3] + dtype = "float32" + min_val = float("-0.108693") + max_val = float("0.127411") + mean = float("-0.00117306") + std = float("0.0153537") + data = None + + +class Program_weight_tensor_parameter_341: + name = "parameter_341" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_342: + name = "parameter_342" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_343: + name = "parameter_343" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_344: + name = "parameter_344" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_345: + name = "parameter_345" + shape = [48, 48, 1, 1] + dtype = "float32" + min_val = float("-0.0850338") + max_val = float("0.0864447") + mean = float("-0.00287232") + std = float("0.0182123") + data = None + + +class Program_weight_tensor_parameter_346: + name = "parameter_346" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_347: + name = "parameter_347" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_348: + name = "parameter_348" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_349: + name = "parameter_349" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_350: + name = "parameter_350" + shape = [48, 48, 3, 3] + dtype = "float32" + min_val = float("-0.124461") + max_val = float("0.113607") + mean = float("-0.00105662") + std = float("0.0141114") + data = None + + +class Program_weight_tensor_parameter_351: + name = "parameter_351" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_352: + name = "parameter_352" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_353: + name = "parameter_353" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_354: + name = "parameter_354" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_355: + name = "parameter_355" + shape = [48, 48, 3, 3] + dtype = "float32" + min_val = float("-0.104689") + max_val = float("0.0935112") + mean = float("-0.000980916") + std = float("0.0165068") + data = None + + +class Program_weight_tensor_parameter_356: + name = "parameter_356" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_357: + name = "parameter_357" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_358: + name = "parameter_358" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_359: + name = "parameter_359" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_360: + name = "parameter_360" + shape = [48, 96, 1, 1] + dtype = "float32" + min_val = float("-0.127435") + max_val = float("0.134394") + mean = float("-0.00299847") + std = float("0.0264723") + data = None + + +class Program_weight_tensor_parameter_361: + name = "parameter_361" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_362: + name = "parameter_362" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_363: + name = "parameter_363" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_364: + name = "parameter_364" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_365: + name = "parameter_365" + shape = [48, 96, 1, 1] + dtype = "float32" + min_val = float("-0.185866") + max_val = float("0.267945") + mean = float("0.000328969") + std = float("0.0281863") + data = None + + +class Program_weight_tensor_parameter_366: + name = "parameter_366" + shape = [96] + dtype = "float32" + min_val = float("-3.32264") + max_val = float("3.83592") + mean = float("0.26557") + std = float("1.21101") + data = None + + +class Program_weight_tensor_parameter_367: + name = "parameter_367" + shape = [96] + dtype = "float32" + min_val = float("0.506423") + max_val = float("5.39595") + mean = float("1.12909") + std = float("0.545653") + data = None + + +class Program_weight_tensor_parameter_368: + name = "parameter_368" + shape = [96] + dtype = "float32" + min_val = float("0.0247421") + max_val = float("0.271024") + mean = float("0.0700887") + std = float("0.0433429") + data = None + + +class Program_weight_tensor_parameter_369: + name = "parameter_369" + shape = [96] + dtype = "float32" + min_val = float("-0.526787") + max_val = float("0.637669") + mean = float("-0.0317124") + std = float("0.186494") + data = None + + +class Program_weight_tensor_parameter_370: + name = "parameter_370" + shape = [96, 64, 3, 3] + dtype = "float32" + min_val = float("-0.128108") + max_val = float("0.138471") + mean = float("-0.000338224") + std = float("0.0137454") + data = None + + +class Program_weight_tensor_parameter_371: + name = "parameter_371" + shape = [64] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_372: + name = "parameter_372" + shape = [64] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_373: + name = "parameter_373" + shape = [64] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_374: + name = "parameter_374" + shape = [64] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_375: + name = "parameter_375" + shape = [64, 48, 1, 1] + dtype = "float32" + min_val = float("-0.215238") + max_val = float("0.198746") + mean = float("-0.00208971") + std = float("0.0395838") + data = None + + +class Program_weight_tensor_parameter_376: + name = "parameter_376" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_377: + name = "parameter_377" + shape = [48, 48, 1, 1] + dtype = "float32" + min_val = float("-0.183855") + max_val = float("0.169069") + mean = float("-0.012646") + std = float("0.0278793") + data = None + + +class Program_weight_tensor_parameter_378: + name = "parameter_378" + shape = [24] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_379: + name = "parameter_379" + shape = [24] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_380: + name = "parameter_380" + shape = [24] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_381: + name = "parameter_381" + shape = [24] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_382: + name = "parameter_382" + shape = [24, 24, 1, 1] + dtype = "float32" + min_val = float("-0.115204") + max_val = float("0.156112") + mean = float("-0.00147512") + std = float("0.0282583") + data = None + + +class Program_weight_tensor_parameter_383: + name = "parameter_383" + shape = [24] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_384: + name = "parameter_384" + shape = [24] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_385: + name = "parameter_385" + shape = [24] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_386: + name = "parameter_386" + shape = [24] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_387: + name = "parameter_387" + shape = [24, 24, 3, 3] + dtype = "float32" + min_val = float("-0.10708") + max_val = float("0.116236") + mean = float("-0.000653134") + std = float("0.0242895") + data = None + + +class Program_weight_tensor_parameter_388: + name = "parameter_388" + shape = [24] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_389: + name = "parameter_389" + shape = [24] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_390: + name = "parameter_390" + shape = [24] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_391: + name = "parameter_391" + shape = [24] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_392: + name = "parameter_392" + shape = [24, 24, 3, 3] + dtype = "float32" + min_val = float("-0.141704") + max_val = float("0.152941") + mean = float("1.08049e-05") + std = float("0.0271325") + data = None + + +class Program_weight_tensor_parameter_393: + name = "parameter_393" + shape = [24] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_394: + name = "parameter_394" + shape = [24] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_395: + name = "parameter_395" + shape = [24] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_396: + name = "parameter_396" + shape = [24] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_397: + name = "parameter_397" + shape = [24, 48, 1, 1] + dtype = "float32" + min_val = float("-0.214059") + max_val = float("0.202656") + mean = float("-0.00405349") + std = float("0.0407002") + data = None + + +class Program_weight_tensor_parameter_398: + name = "parameter_398" + shape = [24] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_399: + name = "parameter_399" + shape = [24] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_400: + name = "parameter_400" + shape = [24] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_401: + name = "parameter_401" + shape = [24] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_402: + name = "parameter_402" + shape = [24, 48, 1, 1] + dtype = "float32" + min_val = float("-0.233899") + max_val = float("0.179146") + mean = float("-0.0011248") + std = float("0.0453221") + data = None + + +class Program_weight_tensor_parameter_403: + name = "parameter_403" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_404: + name = "parameter_404" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_405: + name = "parameter_405" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_406: + name = "parameter_406" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_407: + name = "parameter_407" + shape = [48, 32, 3, 3] + dtype = "float32" + min_val = float("-0.155663") + max_val = float("0.153256") + mean = float("-0.000687016") + std = float("0.0233493") + data = None + + +class Program_weight_tensor_parameter_408: + name = "parameter_408" + shape = [32] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_409: + name = "parameter_409" + shape = [32] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_410: + name = "parameter_410" + shape = [32] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_411: + name = "parameter_411" + shape = [32] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_412: + name = "parameter_412" + shape = [32, 16, 3, 3] + dtype = "float32" + min_val = float("-0.275898") + max_val = float("0.282368") + mean = float("-0.00118724") + std = float("0.0397104") + data = None + + +class Program_weight_tensor_parameter_413: + name = "parameter_413" + shape = [16] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_414: + name = "parameter_414" + shape = [16] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_415: + name = "parameter_415" + shape = [16] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_416: + name = "parameter_416" + shape = [16] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_417: + name = "parameter_417" + shape = [16, 16, 3, 3] + dtype = "float32" + min_val = float("-0.339907") + max_val = float("0.372339") + mean = float("-0.000570874") + std = float("0.0527294") + data = None + + +class Program_weight_tensor_parameter_418: + name = "parameter_418" + shape = [16] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_419: + name = "parameter_419" + shape = [16] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_420: + name = "parameter_420" + shape = [16] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_421: + name = "parameter_421" + shape = [16] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_422: + name = "parameter_422" + shape = [16, 3, 3, 3] + dtype = "float32" + min_val = float("-0.244337") + max_val = float("0.297699") + mean = float("-0.00453704") + std = float("0.0747566") + data = None diff --git a/paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_8/graph_hash.txt b/paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_8/graph_hash.txt new file mode 100644 index 000000000..8a3ea7694 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_8/graph_hash.txt @@ -0,0 +1 @@ +a29b29f75d677bfc4d3d22fe15a38544cc611ed4c5426ad85c9ccd866320ff3a \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_8/graph_net.json b/paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_8/graph_net.json new file mode 100644 index 000000000..399d8354e --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_8/graph_net.json @@ -0,0 +1,6 @@ +{ + "framework": "paddle", + "model_name": "PP-YOLOE-S_human", + "num_devices_required": 1, + "num_nodes_required": 1 +} \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_8/input_meta.py b/paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_8/input_meta.py new file mode 100644 index 000000000..0937a478f --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_8/input_meta.py @@ -0,0 +1,7 @@ +class Program_weight_tensor_data_0: + name = "data_0" + shape = [2, 8400] + dtype = "int32" + min_val = 0 + max_val = 1 + data = None diff --git a/paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_8/model.py b/paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_8/model.py new file mode 100644 index 000000000..e57fc793b --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_8/model.py @@ -0,0 +1,34 @@ +import paddle + + +class GraphModule(paddle.nn.Layer): + def __init__(self): + super().__init__() + + def forward(self, data_0): + # pd_op.full: (xi32) <- () + full_0 = paddle._C_ops.full( + [], float("1"), paddle.int32, paddle.framework._current_expected_place() + ) + + # pd_op.not_equal: (2x-1xb) <- (2x-1xi32, xi32) + not_equal_0 = paddle._C_ops.not_equal(data_0, full_0) + del data_0, full_0 + + # pd_op.full_int_array: (0xi64) <- () + full_int_array_0 = [] + + # pd_op.sum: (xi64) <- (2x-1xb, 0xi64) + sum_0 = paddle._C_ops.sum(not_equal_0, full_int_array_0, None, False) + del full_int_array_0 + + # pd_op.full: (xi64) <- () + full_1 = paddle._C_ops.full( + [], float("0"), paddle.int64, paddle.framework._current_expected_place() + ) + + # pd_op.greater_than: (xb) <- (xi64, xi64) + greater_than_0 = paddle._C_ops.greater_than(sum_0, full_1) + del full_1, not_equal_0, sum_0 + + return greater_than_0 diff --git a/paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_8/weight_meta.py b/paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_8/weight_meta.py new file mode 100644 index 000000000..8b1378917 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_8/weight_meta.py @@ -0,0 +1 @@ + diff --git a/paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_9/graph_hash.txt b/paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_9/graph_hash.txt new file mode 100644 index 000000000..b08da1263 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_9/graph_hash.txt @@ -0,0 +1 @@ +4d26d0110d82b3e8a1ea0c4059adeb25427870d4527791748b1197dfa32e25ef \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_9/graph_net.json b/paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_9/graph_net.json new file mode 100644 index 000000000..399d8354e --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_9/graph_net.json @@ -0,0 +1,6 @@ +{ + "framework": "paddle", + "model_name": "PP-YOLOE-S_human", + "num_devices_required": 1, + "num_nodes_required": 1 +} \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_9/input_meta.py b/paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_9/input_meta.py new file mode 100644 index 000000000..4ea29a61c --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_9/input_meta.py @@ -0,0 +1,19 @@ +class Program_weight_tensor_data_0: + name = "data_0" + shape = [] + dtype = "float32" + data = [0.241028] + + +class Program_weight_tensor_data_1: + name = "data_1" + shape = [] + dtype = "float32" + data = [1.26015] + + +class Program_weight_tensor_data_2: + name = "data_2" + shape = [] + dtype = "float32" + data = [1.07448] diff --git a/paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_9/model.py b/paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_9/model.py new file mode 100644 index 000000000..4cccb2b8e --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_9/model.py @@ -0,0 +1,43 @@ +import paddle + + +class GraphModule(paddle.nn.Layer): + def __init__(self): + super().__init__() + + def forward(self, data_0, data_1, data_2): + # pd_op.full: (1xf32) <- () + full_0 = paddle._C_ops.full( + [1], float("1"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.scale: (xf32) <- (xf32, 1xf32) + scale_0 = paddle._C_ops.scale(data_2, full_0, float("0"), True) + del data_2 + + # pd_op.full: (1xf32) <- () + full_1 = paddle._C_ops.full( + [1], float("2.5"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.scale: (xf32) <- (xf32, 1xf32) + scale_1 = paddle._C_ops.scale(data_0, full_1, float("0"), True) + del data_0 + + # pd_op.add: (xf32) <- (xf32, xf32) + add_1 = paddle._C_ops.add(scale_0, scale_1) + + # pd_op.full: (1xf32) <- () + full_2 = paddle._C_ops.full( + [1], float("0.5"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.scale: (xf32) <- (xf32, 1xf32) + scale_2 = paddle._C_ops.scale(data_1, full_2, float("0"), True) + del data_1 + + # pd_op.add: (xf32) <- (xf32, xf32) + add_0 = paddle._C_ops.add(add_1, scale_2) + del add_1, full_0, full_1, full_2, scale_0, scale_1, scale_2 + + return add_0 diff --git a/paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_9/weight_meta.py b/paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_9/weight_meta.py new file mode 100644 index 000000000..8b1378917 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_9/weight_meta.py @@ -0,0 +1 @@ + diff --git a/paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_0/graph_hash.txt b/paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_0/graph_hash.txt new file mode 100644 index 000000000..df6fb86a6 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_0/graph_hash.txt @@ -0,0 +1 @@ +0edecae6372779122c0886fc76c53f28d45d32c115180d22e204d37f4f709ce2 \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_0/graph_net.json b/paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_0/graph_net.json new file mode 100644 index 000000000..d8719c2c9 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_0/graph_net.json @@ -0,0 +1,6 @@ +{ + "framework": "paddle", + "model_name": "PP-YOLOE-S_vehicle", + "num_devices_required": 1, + "num_nodes_required": 1 +} \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_0/input_meta.py b/paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_0/input_meta.py new file mode 100644 index 000000000..dce0b815d --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_0/input_meta.py @@ -0,0 +1,28 @@ +class Program_weight_tensor_data_0: + name = "data_0" + shape = [2, 5376, 4] + dtype = "float32" + min_val = float("0.01") + max_val = float("0.01") + mean = float("0.01") + std = float("9.31323e-10") + data = None + + +class Program_weight_tensor_data_1: + name = "data_1" + shape = [2, 5376] + dtype = "int32" + min_val = 0 + max_val = 4 + data = None + + +class Program_weight_tensor_data_2: + name = "data_2" + shape = [2, 5376, 4] + dtype = "float32" + max_val = float("0.945922") + mean = float("0.00102082") + std = float("0.0260199") + data = None diff --git a/paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_0/model.py b/paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_0/model.py new file mode 100644 index 000000000..7c04ede5b --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_0/model.py @@ -0,0 +1,110 @@ +import paddle + + +class GraphModule(paddle.nn.Layer): + def __init__(self): + super().__init__() + + def forward(self, data_0, data_1, data_2): + # pd_op.full: (1xi32) <- () + full_0 = paddle._C_ops.full( + [1], float("5"), paddle.int32, paddle.core.CPUPlace() + ) + + # pd_op.one_hot: (2x-1x5xf32) <- (2x-1xi32, 1xi32) + one_hot_0 = paddle._C_ops.one_hot( + data_1 % paddle.cast(full_0, data_1.dtype), full_0 + ) + del data_1, full_0 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_0 = [0] + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_1 = [-1] + + # pd_op.slice: (2x-1x4xf32) <- (2x-1x5xf32, 1xi64, 1xi64) + slice_0 = paddle._C_ops.slice( + one_hot_0, [2], full_int_array_0, full_int_array_1, [1], [] + ) + del full_int_array_0, full_int_array_1, one_hot_0 + + # pd_op.pow: (2x-1x4xf32) <- (2x-1x4xf32) + pow_0 = paddle._C_ops.pow(data_0, float("2")) + + # pd_op.full: (1xf32) <- () + full_1 = paddle._C_ops.full( + [1], float("0.75"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.scale: (2x-1x4xf32) <- (2x-1x4xf32, 1xf32) + scale_0 = paddle._C_ops.scale(pow_0, full_1, float("0"), True) + del pow_0 + + # pd_op.full: (1xf32) <- () + full_2 = paddle._C_ops.full( + [1], float("-1"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.scale: (2x-1x4xf32) <- (2x-1x4xf32, 1xf32) + scale_1 = paddle._C_ops.scale(slice_0, full_2, float("1"), True) + del full_2 + + # pd_op.multiply: (2x-1x4xf32) <- (2x-1x4xf32, 2x-1x4xf32) + multiply_0 = paddle._C_ops.multiply(scale_0, scale_1) + + # pd_op.multiply: (2x-1x4xf32) <- (2x-1x4xf32, 2x-1x4xf32) + multiply_1 = paddle._C_ops.multiply(data_2, slice_0) + del slice_0 + + # pd_op.add: (2x-1x4xf32) <- (2x-1x4xf32, 2x-1x4xf32) + add_0 = paddle._C_ops.add(multiply_0, multiply_1) + + # pd_op.bce_loss: (2x-1x4xf32) <- (2x-1x4xf32, 2x-1x4xf32) + bce_loss_0 = paddle._C_ops.bce_loss(data_0, data_2) + del data_0 + + # pd_op.multiply: (2x-1x4xf32) <- (2x-1x4xf32, 2x-1x4xf32) + multiply_2 = paddle._C_ops.multiply(bce_loss_0, add_0) + + # pd_op.full_int_array: (0xi64) <- () + full_int_array_2 = [] + + # pd_op.sum: (xf32) <- (2x-1x4xf32, 0xi64) + sum_0 = paddle._C_ops.sum(multiply_2, full_int_array_2, None, False) + + # pd_op.sum: (xf32) <- (2x-1x4xf32, 0xi64) + sum_1 = paddle._C_ops.sum(data_2, full_int_array_2, None, False) + del data_2 + + # pd_op.full: (1xf32) <- () + full_3 = paddle._C_ops.full( + [1], float("1"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.full: (1xf32) <- () + full_4 = paddle._C_ops.full( + [1], float("3.40282e+38"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.clip: (xf32) <- (xf32, 1xf32, 1xf32) + clip_0 = paddle._C_ops.clip(sum_1, full_3, full_4) + del full_3, full_4, sum_1 + + # pd_op.divide: (xf32) <- (xf32, xf32) + divide_0 = paddle._C_ops.divide(sum_0, clip_0) + del ( + add_0, + bce_loss_0, + clip_0, + full_1, + full_int_array_2, + multiply_0, + multiply_1, + multiply_2, + scale_0, + scale_1, + sum_0, + ) + + return divide_0 diff --git a/paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_0/weight_meta.py b/paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_0/weight_meta.py new file mode 100644 index 000000000..8b1378917 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_0/weight_meta.py @@ -0,0 +1 @@ + diff --git a/paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_1/graph_hash.txt b/paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_1/graph_hash.txt new file mode 100644 index 000000000..ff5ee4421 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_1/graph_hash.txt @@ -0,0 +1 @@ +926ad786388dcdbc5641be69f210f8d6d088d56e6f57512e3ca7ee358a2b968b \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_1/graph_net.json b/paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_1/graph_net.json new file mode 100644 index 000000000..d8719c2c9 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_1/graph_net.json @@ -0,0 +1,6 @@ +{ + "framework": "paddle", + "model_name": "PP-YOLOE-S_vehicle", + "num_devices_required": 1, + "num_nodes_required": 1 +} \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_1/input_meta.py b/paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_1/input_meta.py new file mode 100644 index 000000000..12c56f973 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_1/input_meta.py @@ -0,0 +1,76 @@ +class Program_weight_tensor_data_0: + name = "data_0" + shape = [] + dtype = "int64" + data = [12] + + +class Program_weight_tensor_data_1: + name = "data_1" + shape = [] + dtype = "int64" + data = [5376] + + +class Program_weight_tensor_data_2: + name = "data_2" + shape = [2, 12, 5376] + dtype = "float32" + max_val = float("1.0") + mean = float("0.000612289") + std = float("0.0247369") + data = None + + +class Program_weight_tensor_data_3: + name = "data_3" + shape = [2, 1] + dtype = "int32" + data = [0, 1] + + +class Program_weight_tensor_data_4: + name = "data_4" + shape = [2, 12, 1] + dtype = "int32" + data = [0, 3, 0, 0, 1, 0, 0, 0, 0, 0, 0, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0] + + +class Program_weight_tensor_data_5: + name = "data_5" + shape = [2, 5376] + dtype = "float32" + max_val = float("1.0") + mean = float("0.00734747") + std = float("0.0854019") + data = None + + +class Program_weight_tensor_data_6: + name = "data_6" + shape = [2, 12, 4] + dtype = "float32" + max_val = float("326.78") + mean = float("160.052") + std = float("110.95") + data = None + + +class Program_weight_tensor_data_7: + name = "data_7" + shape = [2, 12, 5376] + dtype = "float32" + max_val = float("0.00716361") + mean = float("2.14567e-06") + std = float("8.59189e-05") + data = None + + +class Program_weight_tensor_data_8: + name = "data_8" + shape = [2, 12, 5376] + dtype = "float32" + max_val = float("0.945922") + mean = float("0.00158376") + std = float("0.0267929") + data = None diff --git a/paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_1/model.py b/paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_1/model.py new file mode 100644 index 000000000..4ae94d4a6 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_1/model.py @@ -0,0 +1,201 @@ +import paddle + + +class GraphModule(paddle.nn.Layer): + def __init__(self): + super().__init__() + + def forward( + self, data_0, data_1, data_2, data_3, data_4, data_5, data_6, data_7, data_8 + ): + # pd_op.full: (1xi64) <- () + full_0 = paddle._C_ops.full( + [1], float("-2"), paddle.int64, paddle.core.CPUPlace() + ) + + # pd_op.argmax: (2x-1xi64) <- (2x-1x-1xf32, 1xi64) + argmax_0 = paddle._C_ops.argmax(data_2, full_0, False, False, paddle.int64) + del full_0 + + # pd_op.cast: (xi32) <- (xi64) + cast_0 = paddle._C_ops.cast(data_0, paddle.int32) + del data_0 + + # pd_op.multiply: (2x1xi32) <- (2x1xi32, xi32) + multiply_1 = paddle._C_ops.multiply(data_3, cast_0) + del cast_0, data_3 + + # pd_op.cast: (2x1xi64) <- (2x1xi32) + cast_1 = paddle._C_ops.cast(multiply_1, paddle.int64) + del multiply_1 + + # pd_op.add: (2x-1xi64) <- (2x-1xi64, 2x1xi64) + add_0 = paddle._C_ops.add(argmax_0, cast_1) + del argmax_0, cast_1 + + # pd_op.flatten: (-1xi32) <- (2x-1x1xi32) + flatten_0 = paddle._C_ops.flatten(data_4, 0, 2) + del data_4 + + # pd_op.flatten: (-1xi64) <- (2x-1xi64) + flatten_1 = paddle._C_ops.flatten(add_0, 0, 1) + del add_0 + + # pd_op.full: (1xi32) <- () + full_1 = paddle._C_ops.full( + [1], float("0"), paddle.int32, paddle.core.CPUPlace() + ) + + # pd_op.gather: (-1xi32) <- (-1xi32, -1xi64, 1xi32) + gather_0 = paddle._C_ops.gather(flatten_0, flatten_1, full_1) + del flatten_0 + + # pd_op.full: (xi64) <- () + full_2 = paddle._C_ops.full( + [], float("2"), paddle.int64, paddle.core.CPUPlace() + ) + + # builtin.combine: ([xi64, xi64]) <- (xi64, xi64) + combine_0 = [full_2, data_1] + + # pd_op.stack: (2xi64) <- ([xi64, xi64]) + stack_0 = paddle._C_ops.stack(combine_0, 0) + del combine_0 + + # pd_op.reshape: (2x-1xi32) <- (-1xi32, 2xi64) + reshape_1 = paddle._C_ops.reshape(gather_0, stack_0) + del gather_0, stack_0 + + # pd_op.full: (xf32) <- () + full_3 = paddle._C_ops.full( + [], float("0"), paddle.float32, paddle.framework._current_expected_place() + ) + + # pd_op.greater_than: (2x-1xb) <- (2x-1xf32, xf32) + greater_than_0 = paddle._C_ops.greater_than(data_5, full_3) + del data_5, full_3 + + # pd_op.full: (1xf32) <- () + full_4 = paddle._C_ops.full( + [1], float("4"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.full_like: (2x-1xi32) <- (2x-1xi32, 1xf32) + full_like_0 = paddle._C_ops.full_like( + reshape_1, full_4, paddle.int32, paddle.framework._current_expected_place() + ) + del full_4 + + # pd_op.where: (2x-1xi32) <- (2x-1xb, 2x-1xi32, 2x-1xi32) + where_0 = paddle._C_ops.where(greater_than_0, reshape_1, full_like_0) + del full_like_0, greater_than_0, reshape_1 + + # pd_op.full_int_array: (2xi64) <- () + full_int_array_0 = [-1, 4] + + # pd_op.reshape: (-1x4xf32) <- (2x-1x4xf32, 2xi64) + reshape_2 = paddle._C_ops.reshape(data_6, full_int_array_0) + del data_6, full_int_array_0 + + # pd_op.gather: (-1x4xf32) <- (-1x4xf32, -1xi64, 1xi32) + gather_1 = paddle._C_ops.gather(reshape_2, flatten_1, full_1) + del flatten_1, full_1, reshape_2 + + # pd_op.full: (xi64) <- () + full_5 = paddle._C_ops.full( + [], float("4"), paddle.int64, paddle.core.CPUPlace() + ) + + # builtin.combine: ([xi64, xi64, xi64]) <- (xi64, xi64, xi64) + combine_1 = [full_2, data_1, full_5] + del data_1, full_2, full_5 + + # pd_op.stack: (3xi64) <- ([xi64, xi64, xi64]) + stack_1 = paddle._C_ops.stack(combine_1, 0) + del combine_1 + + # pd_op.reshape: (2x-1x4xf32) <- (-1x4xf32, 3xi64) + reshape_0 = paddle._C_ops.reshape(gather_1, stack_1) + del gather_1, stack_1 + + # pd_op.full: (1xi32) <- () + full_6 = paddle._C_ops.full( + [1], float("5"), paddle.int32, paddle.core.CPUPlace() + ) + + # pd_op.one_hot: (2x-1x5xf32) <- (2x-1xi32, 1xi32) + one_hot_0 = paddle._C_ops.one_hot( + where_0 % paddle.cast(full_6, where_0.dtype), full_6 + ) + del full_6 + + # pd_op.full: (4xi64) <- () + full_7 = paddle._C_ops.full( + [4], float("0"), paddle.int64, paddle.framework._current_expected_place() + ) + + # pd_op.assign_value_: (4xi64) <- (4xi64) + assign_value__0 = paddle._C_ops.assign_value_( + full_7, + [4], + paddle.int64, + [float("0"), float("1"), float("2"), float("3")], + paddle.framework._current_expected_place(), + ) + del full_7 + + # pd_op.index_select: (2x-1x4xf32) <- (2x-1x5xf32, 4xi64) + index_select_0 = paddle._C_ops.index_select(one_hot_0, assign_value__0, -1) + del assign_value__0, one_hot_0 + + # pd_op.multiply: (2x-1x-1xf32) <- (2x-1x-1xf32, 2x-1x-1xf32) + multiply_2 = paddle._C_ops.multiply(data_7, data_2) + del data_7 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_1 = [-1] + + # pd_op.max: (2x-1x1xf32) <- (2x-1x-1xf32, 1xi64) + max_0 = paddle._C_ops.max(multiply_2, full_int_array_1, True) + + # pd_op.multiply: (2x-1x-1xf32) <- (2x-1x-1xf32, 2x-1x-1xf32) + multiply_3 = paddle._C_ops.multiply(data_8, data_2) + del data_2, data_8 + + # pd_op.max: (2x-1x1xf32) <- (2x-1x-1xf32, 1xi64) + max_1 = paddle._C_ops.max(multiply_3, full_int_array_1, True) + del multiply_3 + + # pd_op.full: (1xf32) <- () + full_8 = paddle._C_ops.full( + [1], float("1"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.scale: (2x-1x1xf32) <- (2x-1x1xf32, 1xf32) + scale_0 = paddle._C_ops.scale(max_0, full_8, float("1e-09"), True) + del full_8, max_0 + + # pd_op.divide: (2x-1x-1xf32) <- (2x-1x-1xf32, 2x-1x1xf32) + divide_0 = paddle._C_ops.divide(multiply_2, scale_0) + del multiply_2, scale_0 + + # pd_op.multiply: (2x-1x-1xf32) <- (2x-1x-1xf32, 2x-1x1xf32) + multiply_4 = paddle._C_ops.multiply(divide_0, max_1) + del divide_0, max_1 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_2 = [-2] + + # pd_op.max: (2x-1xf32) <- (2x-1x-1xf32, 1xi64) + max_2 = paddle._C_ops.max(multiply_4, full_int_array_2, False) + del full_int_array_2, multiply_4 + + # pd_op.unsqueeze: (2x-1x1xf32) <- (2x-1xf32, 1xi64) + unsqueeze_0 = paddle._C_ops.unsqueeze(max_2, full_int_array_1) + del full_int_array_1, max_2 + + # pd_op.multiply: (2x-1x4xf32) <- (2x-1x4xf32, 2x-1x1xf32) + multiply_0 = paddle._C_ops.multiply(index_select_0, unsqueeze_0) + del index_select_0, unsqueeze_0, where_0 + + return reshape_0, multiply_0 diff --git a/paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_1/weight_meta.py b/paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_1/weight_meta.py new file mode 100644 index 000000000..8b1378917 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_1/weight_meta.py @@ -0,0 +1 @@ + diff --git a/paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_10/graph_hash.txt b/paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_10/graph_hash.txt new file mode 100644 index 000000000..babd6567f --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_10/graph_hash.txt @@ -0,0 +1 @@ +3d13d62f659e1dd50de7ed21b396c52b0de085fbe77030af69ce5dcd93ef5c05 \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_10/graph_net.json b/paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_10/graph_net.json new file mode 100644 index 000000000..d8719c2c9 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_10/graph_net.json @@ -0,0 +1,6 @@ +{ + "framework": "paddle", + "model_name": "PP-YOLOE-S_vehicle", + "num_devices_required": 1, + "num_nodes_required": 1 +} \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_10/input_meta.py b/paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_10/input_meta.py new file mode 100644 index 000000000..722001275 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_10/input_meta.py @@ -0,0 +1,49 @@ +class Program_weight_tensor_data_0: + name = "data_0" + shape = [] + dtype = "int64" + data = [1] + + +class Program_weight_tensor_data_1: + name = "data_1" + shape = [2, 12096, 4] + dtype = "float32" + min_val = float("0.000468483") + max_val = float("0.715156") + mean = float("0.0165239") + std = float("0.021405") + data = None + + +class Program_weight_tensor_data_2: + name = "data_2" + shape = [2, 12096, 68] + dtype = "float32" + min_val = float("-4.94556") + max_val = float("11.8664") + mean = float("7.84781e-06") + std = float("1.47238") + data = None + + +class Program_weight_tensor_data_3: + name = "data_3" + shape = [12096, 2] + dtype = "float32" + min_val = float("4.0") + max_val = float("764.0") + mean = float("384.0") + std = float("221.675") + data = None + + +class Program_weight_tensor_data_4: + name = "data_4" + shape = [12096, 1] + dtype = "float32" + min_val = float("8.0") + max_val = float("32.0") + mean = float("10.6667") + std = float("5.70157") + data = None diff --git a/paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_10/model.py b/paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_10/model.py new file mode 100644 index 000000000..2d525f6d3 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_10/model.py @@ -0,0 +1,192 @@ +import paddle + + +class GraphModule(paddle.nn.Layer): + def __init__(self): + super().__init__() + + def forward(self, parameter_0, data_0, data_1, data_2, data_3, data_4): + # pd_op.divide: (-1x2xf32) <- (-1x2xf32, -1x1xf32) + divide_0 = paddle._C_ops.divide(data_3, data_4) + del data_3 + + # pd_op.shape64: (3xi64) <- (2x-1x68xf32) + shape64_0 = paddle._C_ops.shape64(data_2) + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_0 = [0] + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_1 = [1] + + # pd_op.assign: (1xi64) <- (1xi64) + assign_0 = full_int_array_1 + + # pd_op.slice: (xi64) <- (3xi64, 1xi64, 1xi64) + slice_0 = paddle._C_ops.slice( + shape64_0, [0], full_int_array_0, full_int_array_1, [1], [0] + ) + del full_int_array_0 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_2 = [2] + + # pd_op.slice: (xi64) <- (3xi64, 1xi64, 1xi64) + slice_1 = paddle._C_ops.slice( + shape64_0, [0], full_int_array_1, full_int_array_2, [1], [0] + ) + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_3 = [3] + + # pd_op.slice: (xi64) <- (3xi64, 1xi64, 1xi64) + slice_2 = paddle._C_ops.slice( + shape64_0, [0], full_int_array_2, full_int_array_3, [1], [0] + ) + del full_int_array_2, full_int_array_3, shape64_0 + + # pd_op.full: (xi64) <- () + full_0 = paddle._C_ops.full( + [], float("-1"), paddle.int64, paddle.core.CPUPlace() + ) + + # pd_op.full: (xi64) <- () + full_1 = paddle._C_ops.full( + [], float("4"), paddle.int64, paddle.core.CPUPlace() + ) + + # pd_op.full: (xi64) <- () + full_2 = paddle._C_ops.full( + [], float("17"), paddle.int64, paddle.core.CPUPlace() + ) + + # builtin.combine: ([xi64, xi64, xi64, xi64]) <- (xi64, xi64, xi64, xi64) + combine_0 = [full_0, slice_1, full_1, full_2] + del full_0, full_1, full_2, slice_1 + + # pd_op.stack: (4xi64) <- ([xi64, xi64, xi64, xi64]) + stack_0 = paddle._C_ops.stack(combine_0, 0) + del combine_0 + + # pd_op.reshape: (-1x-1x4x17xf32) <- (2x-1x68xf32, 4xi64) + reshape_0 = paddle._C_ops.reshape(data_2, stack_0) + del data_2, stack_0 + + # pd_op.softmax: (-1x-1x4x17xf32) <- (-1x-1x4x17xf32) + softmax_0 = paddle._C_ops.softmax(reshape_0, -1) + del reshape_0 + + # pd_op.transpose: (-1x17x-1x4xf32) <- (-1x-1x4x17xf32) + transpose_0 = paddle._C_ops.transpose(softmax_0, [0, 3, 1, 2]) + + # pd_op.conv2d: (-1x1x-1x4xf32) <- (-1x17x-1x4xf32, 1x17x1x1xf32) + conv2d_0 = paddle._C_ops.conv2d( + transpose_0, parameter_0, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_0 + + # pd_op.squeeze: (-1x-1x4xf32) <- (-1x1x-1x4xf32, 1xi64) + squeeze_0 = paddle._C_ops.squeeze(conv2d_0, full_int_array_1) + del full_int_array_1 + + # pd_op.full: (1xi32) <- () + full_3 = paddle._C_ops.full( + [1], float("2"), paddle.int32, paddle.core.CPUPlace() + ) + + # pd_op.split_with_num: ([-1x-1x2xf32, -1x-1x2xf32]) <- (-1x-1x4xf32, 1xi32) + split_with_num_0 = paddle._C_ops.split_with_num(squeeze_0, 2, full_3) + del squeeze_0 + + # builtin.split: (-1x-1x2xf32, -1x-1x2xf32) <- ([-1x-1x2xf32, -1x-1x2xf32]) + ( + split_0, + split_1, + ) = split_with_num_0 + del split_with_num_0 + + # pd_op.full: (1xf32) <- () + full_4 = paddle._C_ops.full( + [1], float("-1"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.scale: (-1x-1x2xf32) <- (-1x-1x2xf32, 1xf32) + scale_0 = paddle._C_ops.scale(split_0, full_4, float("0"), True) + del split_0 + + # pd_op.add: (-1x-1x2xf32) <- (-1x-1x2xf32, -1x2xf32) + add_0 = paddle._C_ops.add(scale_0, divide_0) + + # pd_op.add: (-1x-1x2xf32) <- (-1x-1x2xf32, -1x2xf32) + add_1 = paddle._C_ops.add(split_1, divide_0) + + # pd_op.full: (1xi32) <- () + full_5 = paddle._C_ops.full( + [1], float("-1"), paddle.int32, paddle.core.CPUPlace() + ) + + # builtin.combine: ([-1x-1x2xf32, -1x-1x2xf32]) <- (-1x-1x2xf32, -1x-1x2xf32) + combine_1 = [add_0, add_1] + + # pd_op.concat: (-1x-1x4xf32) <- ([-1x-1x2xf32, -1x-1x2xf32], 1xi32) + concat_0 = paddle._C_ops.concat(combine_1, full_5) + del combine_1 + + # pd_op.full: (xi64) <- () + full_6 = paddle._C_ops.full( + [], float("-1"), paddle.int64, paddle.framework._current_expected_place() + ) + + # pd_op.less_than: (xb) <- (xi64, xi64) + less_than_0 = paddle._C_ops.less_than(data_0, full_6) + del data_0, full_6 + + # pd_op.cast: (xi64) <- (xb) + cast_0 = paddle._C_ops.cast(less_than_0, paddle.int64) + del less_than_0 + + # pd_op.full: (xi64) <- () + full_7 = paddle._C_ops.full( + [], float("0"), paddle.int64, paddle.framework._current_expected_place() + ) + + # pd_op.not_equal: (xb) <- (xi64, xi64) + not_equal_0 = paddle._C_ops.not_equal(cast_0, full_7) + del cast_0 + + # pd_op.cast: (xi64) <- (xb) + cast_1 = paddle._C_ops.cast(not_equal_0, paddle.int64) + del not_equal_0 + + # pd_op.equal: (xb) <- (xi64, xi64) + equal_0 = paddle._C_ops.equal(cast_1, full_7) + del cast_1, full_7 + + # pd_op.share_data_: (2x-1x4xf32) <- (2x-1x4xf32) + share_data__0 = data_1.detach() + del data_1 + + # pd_op.share_data_: (-1x-1x4xf32) <- (-1x-1x4xf32) + share_data__1 = concat_0.detach() + + # pd_op.multiply: (-1x-1x4xf32) <- (-1x-1x4xf32, -1x1xf32) + multiply_0 = paddle._C_ops.multiply(share_data__1, data_4) + del ( + add_0, + add_1, + assign_0, + concat_0, + conv2d_0, + data_4, + divide_0, + full_3, + full_4, + full_5, + scale_0, + share_data__1, + softmax_0, + split_1, + transpose_0, + ) + + return share_data__0, multiply_0 diff --git a/paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_10/weight_meta.py b/paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_10/weight_meta.py new file mode 100644 index 000000000..28198680e --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_10/weight_meta.py @@ -0,0 +1,7 @@ +class Program_weight_tensor_parameter_0: + name = "parameter_0" + shape = [1, 17, 1, 1] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None diff --git a/paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_11/graph_hash.txt b/paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_11/graph_hash.txt new file mode 100644 index 000000000..273d2ae5d --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_11/graph_hash.txt @@ -0,0 +1 @@ +b8f543de30ca185d01a94256b2c02f15007155c859cd8c5c4308a69806d92a45 \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_11/graph_net.json b/paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_11/graph_net.json new file mode 100644 index 000000000..d8719c2c9 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_11/graph_net.json @@ -0,0 +1,6 @@ +{ + "framework": "paddle", + "model_name": "PP-YOLOE-S_vehicle", + "num_devices_required": 1, + "num_nodes_required": 1 +} \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_11/input_meta.py b/paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_11/input_meta.py new file mode 100644 index 000000000..3217a39a7 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_11/input_meta.py @@ -0,0 +1,91 @@ +class Program_weight_tensor_data_0: + name = "data_0" + shape = [2, 3549] + dtype = "float32" + max_val = float("2.0") + mean = float("0.0250775") + std = float("0.157259") + data = None + + +class Program_weight_tensor_data_1: + name = "data_1" + shape = [2, 14, 3549] + dtype = "float32" + max_val = float("0.973582") + mean = float("0.00986958") + std = float("0.0654711") + data = None + + +class Program_weight_tensor_data_2: + name = "data_2" + shape = [2, 14, 3549] + dtype = "float32" + max_val = float("1.0") + mean = float("0.00179125") + std = float("0.0422852") + data = None + + +class Program_weight_tensor_data_3: + name = "data_3" + shape = [2, 1] + dtype = "int32" + data = [0, 1] + + +class Program_weight_tensor_data_4: + name = "data_4" + shape = [2, 14, 1] + dtype = "int32" + data = [ + 0, + 0, + 1, + 0, + 0, + 0, + 0, + 0, + 3, + 0, + 0, + 0, + 0, + 3, + 0, + 0, + 1, + 0, + 0, + 0, + 0, + 0, + 3, + 0, + 0, + 0, + 0, + 0, + ] + + +class Program_weight_tensor_data_5: + name = "data_5" + shape = [2, 14, 4] + dtype = "float32" + max_val = float("384.824") + mean = float("133.114") + std = float("96.9844") + data = None + + +class Program_weight_tensor_data_6: + name = "data_6" + shape = [2, 14, 3549] + dtype = "float32" + max_val = float("0.00888292") + mean = float("1.5455e-05") + std = float("0.000268339") + data = None diff --git a/paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_11/model.py b/paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_11/model.py new file mode 100644 index 000000000..2432102f1 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_11/model.py @@ -0,0 +1,229 @@ +import paddle + + +class GraphModule(paddle.nn.Layer): + def __init__(self): + super().__init__() + + def forward(self, data_0, data_1, data_2, data_3, data_4, data_5, data_6): + # pd_op.full_int_array: (1xi64) <- () + full_int_array_0 = [1] + + # pd_op.unsqueeze: (2x1x3549xf32) <- (2x3549xf32, 1xi64) + unsqueeze_0 = paddle._C_ops.unsqueeze(data_0, full_int_array_0) + del data_0, full_int_array_0 + + # pd_op.full: (xf32) <- () + full_0 = paddle._C_ops.full( + [], float("1"), paddle.float32, paddle.framework._current_expected_place() + ) + + # pd_op.greater_than: (2x1x3549xb) <- (2x1x3549xf32, xf32) + greater_than_0 = paddle._C_ops.greater_than(unsqueeze_0, full_0) + del full_0, unsqueeze_0 + + # pd_op.full_int_array: (3xi64) <- () + full_int_array_1 = [1, 14, 1] + + # pd_op.tile: (2x14x3549xb) <- (2x1x3549xb, 3xi64) + tile_0 = paddle._C_ops.tile(greater_than_0, full_int_array_1) + del full_int_array_1, greater_than_0 + + # pd_op.full: (1xi64) <- () + full_1 = paddle._C_ops.full( + [1], float("-2"), paddle.int64, paddle.core.CPUPlace() + ) + + # pd_op.argmax: (2x3549xi64) <- (2x14x3549xf32, 1xi64) + argmax_0 = paddle._C_ops.argmax(data_1, full_1, False, False, paddle.int64) + + # pd_op.full: (1xi32) <- () + full_2 = paddle._C_ops.full( + [1], float("14"), paddle.int32, paddle.core.CPUPlace() + ) + + # pd_op.one_hot: (2x3549x14xf32) <- (2x3549xi64, 1xi32) + one_hot_0 = paddle._C_ops.one_hot( + argmax_0 % paddle.cast(full_2, argmax_0.dtype), full_2 + ) + del argmax_0, full_2 + + # pd_op.transpose: (2x14x3549xf32) <- (2x3549x14xf32) + transpose_0 = paddle._C_ops.transpose(one_hot_0, [0, 2, 1]) + del one_hot_0 + + # pd_op.where: (2x14x3549xf32) <- (2x14x3549xb, 2x14x3549xf32, 2x14x3549xf32) + where_0 = paddle._C_ops.where(tile_0, transpose_0, data_2) + del data_2, tile_0, transpose_0 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_2 = [-2] + + # pd_op.sum: (2x3549xf32) <- (2x14x3549xf32, 1xi64) + sum_0 = paddle._C_ops.sum(where_0, full_int_array_2, None, False) + + # pd_op.argmax: (2x3549xi64) <- (2x14x3549xf32, 1xi64) + argmax_1 = paddle._C_ops.argmax(where_0, full_1, False, False, paddle.int64) + del full_1 + + # pd_op.full: (1xf32) <- () + full_3 = paddle._C_ops.full( + [1], float("14"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.scale: (2x1xi32) <- (2x1xi32, 1xf32) + scale_0 = paddle._C_ops.scale(data_3, full_3, float("0"), True) + del data_3, full_3 + + # pd_op.cast: (2x1xi64) <- (2x1xi32) + cast_0 = paddle._C_ops.cast(scale_0, paddle.int64) + del scale_0 + + # pd_op.add: (2x3549xi64) <- (2x3549xi64, 2x1xi64) + add_0 = paddle._C_ops.add(argmax_1, cast_0) + del argmax_1, cast_0 + + # pd_op.flatten: (28xi32) <- (2x14x1xi32) + flatten_0 = paddle._C_ops.flatten(data_4, 0, 2) + del data_4 + + # pd_op.flatten: (7098xi64) <- (2x3549xi64) + flatten_1 = paddle._C_ops.flatten(add_0, 0, 1) + del add_0 + + # pd_op.full: (1xi32) <- () + full_4 = paddle._C_ops.full( + [1], float("0"), paddle.int32, paddle.core.CPUPlace() + ) + + # pd_op.gather: (7098xi32) <- (28xi32, 7098xi64, 1xi32) + gather_0 = paddle._C_ops.gather(flatten_0, flatten_1, full_4) + del flatten_0 + + # pd_op.full_int_array: (2xi64) <- () + full_int_array_3 = [2, 3549] + + # pd_op.reshape: (2x3549xi32) <- (7098xi32, 2xi64) + reshape_1 = paddle._C_ops.reshape(gather_0, full_int_array_3) + del full_int_array_3, gather_0 + + # pd_op.full: (xf32) <- () + full_5 = paddle._C_ops.full( + [], float("0"), paddle.float32, paddle.framework._current_expected_place() + ) + + # pd_op.greater_than: (2x3549xb) <- (2x3549xf32, xf32) + greater_than_1 = paddle._C_ops.greater_than(sum_0, full_5) + del full_5, sum_0 + + # pd_op.full: (1xf32) <- () + full_6 = paddle._C_ops.full( + [1], float("4"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.full_like: (2x3549xi32) <- (2x3549xi32, 1xf32) + full_like_0 = paddle._C_ops.full_like( + reshape_1, full_6, paddle.int32, paddle.framework._current_expected_place() + ) + del full_6 + + # pd_op.where: (2x3549xi32) <- (2x3549xb, 2x3549xi32, 2x3549xi32) + where_1 = paddle._C_ops.where(greater_than_1, reshape_1, full_like_0) + del full_like_0, greater_than_1, reshape_1 + + # pd_op.full_int_array: (2xi64) <- () + full_int_array_4 = [-1, 4] + + # pd_op.reshape: (28x4xf32) <- (2x14x4xf32, 2xi64) + reshape_2 = paddle._C_ops.reshape(data_5, full_int_array_4) + del data_5, full_int_array_4 + + # pd_op.gather: (7098x4xf32) <- (28x4xf32, 7098xi64, 1xi32) + gather_1 = paddle._C_ops.gather(reshape_2, flatten_1, full_4) + del flatten_1, full_4, reshape_2 + + # pd_op.full_int_array: (3xi64) <- () + full_int_array_5 = [2, 3549, 4] + + # pd_op.reshape: (2x3549x4xf32) <- (7098x4xf32, 3xi64) + reshape_0 = paddle._C_ops.reshape(gather_1, full_int_array_5) + del full_int_array_5, gather_1 + + # pd_op.full: (1xi32) <- () + full_7 = paddle._C_ops.full( + [1], float("5"), paddle.int32, paddle.core.CPUPlace() + ) + + # pd_op.one_hot: (2x3549x5xf32) <- (2x3549xi32, 1xi32) + one_hot_1 = paddle._C_ops.one_hot( + where_1 % paddle.cast(full_7, where_1.dtype), full_7 + ) + del full_7 + + # pd_op.full: (4xi64) <- () + full_8 = paddle._C_ops.full( + [4], float("0"), paddle.int64, paddle.framework._current_expected_place() + ) + + # pd_op.assign_value_: (4xi64) <- (4xi64) + assign_value__0 = paddle._C_ops.assign_value_( + full_8, + [4], + paddle.int64, + [float("0"), float("1"), float("2"), float("3")], + paddle.framework._current_expected_place(), + ) + del full_8 + + # pd_op.index_select: (2x3549x4xf32) <- (2x3549x5xf32, 4xi64) + index_select_0 = paddle._C_ops.index_select(one_hot_1, assign_value__0, -1) + del assign_value__0, one_hot_1 + + # pd_op.multiply: (2x14x3549xf32) <- (2x14x3549xf32, 2x14x3549xf32) + multiply_1 = paddle._C_ops.multiply(data_6, where_0) + del data_6 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_6 = [-1] + + # pd_op.max: (2x14x1xf32) <- (2x14x3549xf32, 1xi64) + max_0 = paddle._C_ops.max(multiply_1, full_int_array_6, True) + + # pd_op.multiply: (2x14x3549xf32) <- (2x14x3549xf32, 2x14x3549xf32) + multiply_2 = paddle._C_ops.multiply(data_1, where_0) + del data_1, where_0 + + # pd_op.max: (2x14x1xf32) <- (2x14x3549xf32, 1xi64) + max_1 = paddle._C_ops.max(multiply_2, full_int_array_6, True) + del multiply_2 + + # pd_op.full: (1xf32) <- () + full_9 = paddle._C_ops.full( + [1], float("1"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.scale: (2x14x1xf32) <- (2x14x1xf32, 1xf32) + scale_1 = paddle._C_ops.scale(max_0, full_9, float("1e-09"), True) + del full_9, max_0 + + # pd_op.divide: (2x14x3549xf32) <- (2x14x3549xf32, 2x14x1xf32) + divide_0 = paddle._C_ops.divide(multiply_1, scale_1) + del multiply_1, scale_1 + + # pd_op.multiply: (2x14x3549xf32) <- (2x14x3549xf32, 2x14x1xf32) + multiply_3 = paddle._C_ops.multiply(divide_0, max_1) + del divide_0, max_1 + + # pd_op.max: (2x3549xf32) <- (2x14x3549xf32, 1xi64) + max_2 = paddle._C_ops.max(multiply_3, full_int_array_2, False) + del full_int_array_2, multiply_3 + + # pd_op.unsqueeze: (2x3549x1xf32) <- (2x3549xf32, 1xi64) + unsqueeze_1 = paddle._C_ops.unsqueeze(max_2, full_int_array_6) + del full_int_array_6, max_2 + + # pd_op.multiply: (2x3549x4xf32) <- (2x3549x4xf32, 2x3549x1xf32) + multiply_0 = paddle._C_ops.multiply(index_select_0, unsqueeze_1) + del index_select_0, unsqueeze_1, where_1 + + return reshape_0, multiply_0 diff --git a/paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_11/weight_meta.py b/paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_11/weight_meta.py new file mode 100644 index 000000000..8b1378917 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_11/weight_meta.py @@ -0,0 +1 @@ + diff --git a/paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_12/graph_hash.txt b/paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_12/graph_hash.txt new file mode 100644 index 000000000..5677bb473 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_12/graph_hash.txt @@ -0,0 +1 @@ +90cbaafb4a2694392a3b8cfac44e3931402031cc7dfec40c50c3d7cbd5f0c41b \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_12/graph_net.json b/paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_12/graph_net.json new file mode 100644 index 000000000..d8719c2c9 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_12/graph_net.json @@ -0,0 +1,6 @@ +{ + "framework": "paddle", + "model_name": "PP-YOLOE-S_vehicle", + "num_devices_required": 1, + "num_nodes_required": 1 +} \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_12/input_meta.py b/paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_12/input_meta.py new file mode 100644 index 000000000..5a54b3b62 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_12/input_meta.py @@ -0,0 +1,83 @@ +class Program_weight_tensor_data_0: + name = "data_0" + shape = [] + dtype = "int64" + data = [12] + + +class Program_weight_tensor_data_1: + name = "data_1" + shape = [] + dtype = "int64" + data = [10164] + + +class Program_weight_tensor_data_2: + name = "data_2" + shape = [] + dtype = "int64" + data = [12] + + +class Program_weight_tensor_data_3: + name = "data_3" + shape = [2, 10164] + dtype = "float32" + max_val = float("2.0") + mean = float("0.00491932") + std = float("0.0706648") + data = None + + +class Program_weight_tensor_data_4: + name = "data_4" + shape = [2, 12, 10164] + dtype = "float32" + max_val = float("0.964484") + mean = float("0.00145547") + std = float("0.0263451") + data = None + + +class Program_weight_tensor_data_5: + name = "data_5" + shape = [2, 12, 10164] + dtype = "float32" + max_val = float("1.0") + mean = float("0.000409944") + std = float("0.0202429") + data = None + + +class Program_weight_tensor_data_6: + name = "data_6" + shape = [2, 1] + dtype = "int32" + data = [0, 1] + + +class Program_weight_tensor_data_7: + name = "data_7" + shape = [2, 12, 1] + dtype = "int32" + data = [0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0] + + +class Program_weight_tensor_data_8: + name = "data_8" + shape = [2, 12, 4] + dtype = "float32" + max_val = float("658.644") + mean = float("251.921") + std = float("253.674") + data = None + + +class Program_weight_tensor_data_9: + name = "data_9" + shape = [2, 12, 10164] + dtype = "float32" + max_val = float("0.00990145") + mean = float("2.84369e-06") + std = float("0.000124311") + data = None diff --git a/paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_12/model.py b/paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_12/model.py new file mode 100644 index 000000000..d3b764f89 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_12/model.py @@ -0,0 +1,264 @@ +import paddle + + +class GraphModule(paddle.nn.Layer): + def __init__(self): + super().__init__() + + def forward( + self, + data_0, + data_1, + data_2, + data_3, + data_4, + data_5, + data_6, + data_7, + data_8, + data_9, + ): + # pd_op.full_int_array: (1xi64) <- () + full_int_array_0 = [1] + + # pd_op.unsqueeze: (2x1x-1xf32) <- (2x-1xf32, 1xi64) + unsqueeze_0 = paddle._C_ops.unsqueeze(data_3, full_int_array_0) + del data_3, full_int_array_0 + + # pd_op.full: (xf32) <- () + full_0 = paddle._C_ops.full( + [], float("1"), paddle.float32, paddle.framework._current_expected_place() + ) + + # pd_op.greater_than: (2x1x-1xb) <- (2x1x-1xf32, xf32) + greater_than_0 = paddle._C_ops.greater_than(unsqueeze_0, full_0) + del full_0, unsqueeze_0 + + # pd_op.full: (xi64) <- () + full_1 = paddle._C_ops.full( + [], float("1"), paddle.int64, paddle.core.CPUPlace() + ) + + # builtin.combine: ([xi64, xi64, xi64]) <- (xi64, xi64, xi64) + combine_0 = [full_1, data_0, full_1] + del full_1 + + # pd_op.stack: (3xi64) <- ([xi64, xi64, xi64]) + stack_0 = paddle._C_ops.stack(combine_0, 0) + del combine_0 + + # pd_op.tile: (2x-1x-1xb) <- (2x1x-1xb, 3xi64) + tile_0 = paddle._C_ops.tile(greater_than_0, stack_0) + del greater_than_0, stack_0 + + # pd_op.full: (1xi64) <- () + full_2 = paddle._C_ops.full( + [1], float("-2"), paddle.int64, paddle.core.CPUPlace() + ) + + # pd_op.argmax: (2x-1xi64) <- (2x-1x-1xf32, 1xi64) + argmax_0 = paddle._C_ops.argmax(data_4, full_2, False, False, paddle.int64) + + # pd_op.one_hot: (2x-1x-1xf32) <- (2x-1xi64, xi64) + one_hot_0 = paddle._C_ops.one_hot( + argmax_0 % paddle.cast(data_2, argmax_0.dtype), data_2 + ) + del argmax_0, data_2 + + # pd_op.transpose: (2x-1x-1xf32) <- (2x-1x-1xf32) + transpose_0 = paddle._C_ops.transpose(one_hot_0, [0, 2, 1]) + del one_hot_0 + + # pd_op.where: (2x-1x-1xf32) <- (2x-1x-1xb, 2x-1x-1xf32, 2x-1x-1xf32) + where_0 = paddle._C_ops.where(tile_0, transpose_0, data_5) + del data_5, tile_0, transpose_0 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_1 = [-2] + + # pd_op.sum: (2x-1xf32) <- (2x-1x-1xf32, 1xi64) + sum_0 = paddle._C_ops.sum(where_0, full_int_array_1, None, False) + + # pd_op.argmax: (2x-1xi64) <- (2x-1x-1xf32, 1xi64) + argmax_1 = paddle._C_ops.argmax(where_0, full_2, False, False, paddle.int64) + del full_2 + + # pd_op.cast: (xi32) <- (xi64) + cast_0 = paddle._C_ops.cast(data_0, paddle.int32) + del data_0 + + # pd_op.multiply: (2x1xi32) <- (2x1xi32, xi32) + multiply_1 = paddle._C_ops.multiply(data_6, cast_0) + del cast_0, data_6 + + # pd_op.cast: (2x1xi64) <- (2x1xi32) + cast_1 = paddle._C_ops.cast(multiply_1, paddle.int64) + del multiply_1 + + # pd_op.add: (2x-1xi64) <- (2x-1xi64, 2x1xi64) + add_0 = paddle._C_ops.add(argmax_1, cast_1) + del argmax_1, cast_1 + + # pd_op.flatten: (-1xi32) <- (2x-1x1xi32) + flatten_0 = paddle._C_ops.flatten(data_7, 0, 2) + del data_7 + + # pd_op.flatten: (-1xi64) <- (2x-1xi64) + flatten_1 = paddle._C_ops.flatten(add_0, 0, 1) + del add_0 + + # pd_op.full: (1xi32) <- () + full_3 = paddle._C_ops.full( + [1], float("0"), paddle.int32, paddle.core.CPUPlace() + ) + + # pd_op.gather: (-1xi32) <- (-1xi32, -1xi64, 1xi32) + gather_0 = paddle._C_ops.gather(flatten_0, flatten_1, full_3) + del flatten_0 + + # pd_op.full: (xi64) <- () + full_4 = paddle._C_ops.full( + [], float("2"), paddle.int64, paddle.core.CPUPlace() + ) + + # builtin.combine: ([xi64, xi64]) <- (xi64, xi64) + combine_1 = [full_4, data_1] + + # pd_op.stack: (2xi64) <- ([xi64, xi64]) + stack_1 = paddle._C_ops.stack(combine_1, 0) + del combine_1 + + # pd_op.reshape: (2x-1xi32) <- (-1xi32, 2xi64) + reshape_1 = paddle._C_ops.reshape(gather_0, stack_1) + del gather_0, stack_1 + + # pd_op.full: (xf32) <- () + full_5 = paddle._C_ops.full( + [], float("0"), paddle.float32, paddle.framework._current_expected_place() + ) + + # pd_op.greater_than: (2x-1xb) <- (2x-1xf32, xf32) + greater_than_1 = paddle._C_ops.greater_than(sum_0, full_5) + del full_5, sum_0 + + # pd_op.full: (1xf32) <- () + full_6 = paddle._C_ops.full( + [1], float("4"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.full_like: (2x-1xi32) <- (2x-1xi32, 1xf32) + full_like_0 = paddle._C_ops.full_like( + reshape_1, full_6, paddle.int32, paddle.framework._current_expected_place() + ) + del full_6 + + # pd_op.where: (2x-1xi32) <- (2x-1xb, 2x-1xi32, 2x-1xi32) + where_1 = paddle._C_ops.where(greater_than_1, reshape_1, full_like_0) + del full_like_0, greater_than_1, reshape_1 + + # pd_op.full_int_array: (2xi64) <- () + full_int_array_2 = [-1, 4] + + # pd_op.reshape: (-1x4xf32) <- (2x-1x4xf32, 2xi64) + reshape_2 = paddle._C_ops.reshape(data_8, full_int_array_2) + del data_8, full_int_array_2 + + # pd_op.gather: (-1x4xf32) <- (-1x4xf32, -1xi64, 1xi32) + gather_1 = paddle._C_ops.gather(reshape_2, flatten_1, full_3) + del flatten_1, full_3, reshape_2 + + # pd_op.full: (xi64) <- () + full_7 = paddle._C_ops.full( + [], float("4"), paddle.int64, paddle.core.CPUPlace() + ) + + # builtin.combine: ([xi64, xi64, xi64]) <- (xi64, xi64, xi64) + combine_2 = [full_4, data_1, full_7] + del data_1, full_4, full_7 + + # pd_op.stack: (3xi64) <- ([xi64, xi64, xi64]) + stack_2 = paddle._C_ops.stack(combine_2, 0) + del combine_2 + + # pd_op.reshape: (2x-1x4xf32) <- (-1x4xf32, 3xi64) + reshape_0 = paddle._C_ops.reshape(gather_1, stack_2) + del gather_1, stack_2 + + # pd_op.full: (1xi32) <- () + full_8 = paddle._C_ops.full( + [1], float("5"), paddle.int32, paddle.core.CPUPlace() + ) + + # pd_op.one_hot: (2x-1x5xf32) <- (2x-1xi32, 1xi32) + one_hot_1 = paddle._C_ops.one_hot( + where_1 % paddle.cast(full_8, where_1.dtype), full_8 + ) + del full_8 + + # pd_op.full: (4xi64) <- () + full_9 = paddle._C_ops.full( + [4], float("0"), paddle.int64, paddle.framework._current_expected_place() + ) + + # pd_op.assign_value_: (4xi64) <- (4xi64) + assign_value__0 = paddle._C_ops.assign_value_( + full_9, + [4], + paddle.int64, + [float("0"), float("1"), float("2"), float("3")], + paddle.framework._current_expected_place(), + ) + del full_9 + + # pd_op.index_select: (2x-1x4xf32) <- (2x-1x5xf32, 4xi64) + index_select_0 = paddle._C_ops.index_select(one_hot_1, assign_value__0, -1) + del assign_value__0, one_hot_1 + + # pd_op.multiply: (2x-1x-1xf32) <- (2x-1x-1xf32, 2x-1x-1xf32) + multiply_2 = paddle._C_ops.multiply(data_9, where_0) + del data_9 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_3 = [-1] + + # pd_op.max: (2x-1x1xf32) <- (2x-1x-1xf32, 1xi64) + max_0 = paddle._C_ops.max(multiply_2, full_int_array_3, True) + + # pd_op.multiply: (2x-1x-1xf32) <- (2x-1x-1xf32, 2x-1x-1xf32) + multiply_3 = paddle._C_ops.multiply(data_4, where_0) + del data_4, where_0 + + # pd_op.max: (2x-1x1xf32) <- (2x-1x-1xf32, 1xi64) + max_1 = paddle._C_ops.max(multiply_3, full_int_array_3, True) + del multiply_3 + + # pd_op.full: (1xf32) <- () + full_10 = paddle._C_ops.full( + [1], float("1"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.scale: (2x-1x1xf32) <- (2x-1x1xf32, 1xf32) + scale_0 = paddle._C_ops.scale(max_0, full_10, float("1e-09"), True) + del full_10, max_0 + + # pd_op.divide: (2x-1x-1xf32) <- (2x-1x-1xf32, 2x-1x1xf32) + divide_0 = paddle._C_ops.divide(multiply_2, scale_0) + del multiply_2, scale_0 + + # pd_op.multiply: (2x-1x-1xf32) <- (2x-1x-1xf32, 2x-1x1xf32) + multiply_4 = paddle._C_ops.multiply(divide_0, max_1) + del divide_0, max_1 + + # pd_op.max: (2x-1xf32) <- (2x-1x-1xf32, 1xi64) + max_2 = paddle._C_ops.max(multiply_4, full_int_array_1, False) + del full_int_array_1, multiply_4 + + # pd_op.unsqueeze: (2x-1x1xf32) <- (2x-1xf32, 1xi64) + unsqueeze_1 = paddle._C_ops.unsqueeze(max_2, full_int_array_3) + del full_int_array_3, max_2 + + # pd_op.multiply: (2x-1x4xf32) <- (2x-1x4xf32, 2x-1x1xf32) + multiply_0 = paddle._C_ops.multiply(index_select_0, unsqueeze_1) + del index_select_0, unsqueeze_1, where_1 + + return reshape_0, multiply_0 diff --git a/paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_12/weight_meta.py b/paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_12/weight_meta.py new file mode 100644 index 000000000..8b1378917 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_12/weight_meta.py @@ -0,0 +1 @@ + diff --git a/paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_13/graph_hash.txt b/paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_13/graph_hash.txt new file mode 100644 index 000000000..a90272c96 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_13/graph_hash.txt @@ -0,0 +1 @@ +32cd2923bc0a61a7c5f1bc48378db5c4de25399c2f9388132502f296c7a71760 \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_13/graph_net.json b/paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_13/graph_net.json new file mode 100644 index 000000000..d8719c2c9 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_13/graph_net.json @@ -0,0 +1,6 @@ +{ + "framework": "paddle", + "model_name": "PP-YOLOE-S_vehicle", + "num_devices_required": 1, + "num_nodes_required": 1 +} \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_13/input_meta.py b/paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_13/input_meta.py new file mode 100644 index 000000000..ca7ee63ae --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_13/input_meta.py @@ -0,0 +1,73 @@ +class Program_weight_tensor_data_0: + name = "data_0" + shape = [] + dtype = "int64" + data = [24] + + +class Program_weight_tensor_data_1: + name = "data_1" + shape = [] + dtype = "int64" + data = [24] + + +class Program_weight_tensor_data_2: + name = "data_2" + shape = [] + dtype = "int64" + data = [48] + + +class Program_weight_tensor_data_3: + name = "data_3" + shape = [] + dtype = "int64" + data = [48] + + +class Program_weight_tensor_data_4: + name = "data_4" + shape = [] + dtype = "int64" + data = [96] + + +class Program_weight_tensor_data_5: + name = "data_5" + shape = [] + dtype = "int64" + data = [96] + + +class Program_weight_tensor_data_6: + name = "data_6" + shape = [2, 384, 24, 24] + dtype = "float32" + min_val = float("-0.278465") + max_val = float("5.8224") + mean = float("0.222827") + std = float("0.550168") + data = None + + +class Program_weight_tensor_data_7: + name = "data_7" + shape = [2, 192, 48, 48] + dtype = "float32" + min_val = float("-0.278465") + max_val = float("9.60545") + mean = float("0.294541") + std = float("0.619793") + data = None + + +class Program_weight_tensor_data_8: + name = "data_8" + shape = [2, 96, 96, 96] + dtype = "float32" + min_val = float("-0.278465") + max_val = float("9.11941") + mean = float("0.398206") + std = float("0.662797") + data = None diff --git a/paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_13/model.py b/paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_13/model.py new file mode 100644 index 000000000..e6e9cf731 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_13/model.py @@ -0,0 +1,1144 @@ +import paddle + + +class GraphModule(paddle.nn.Layer): + def __init__(self): + super().__init__() + + def forward( + self, + parameter_0, + parameter_1, + parameter_2, + parameter_3, + parameter_4, + parameter_5, + parameter_6, + parameter_7, + parameter_8, + parameter_9, + parameter_10, + parameter_11, + parameter_12, + parameter_13, + parameter_14, + parameter_15, + parameter_16, + parameter_17, + parameter_18, + parameter_19, + parameter_20, + parameter_21, + parameter_22, + parameter_23, + parameter_24, + parameter_25, + parameter_26, + parameter_27, + parameter_28, + parameter_29, + parameter_30, + parameter_31, + parameter_32, + parameter_33, + parameter_34, + parameter_35, + parameter_36, + parameter_37, + parameter_38, + parameter_39, + parameter_40, + parameter_41, + parameter_42, + parameter_43, + parameter_44, + parameter_45, + parameter_46, + parameter_47, + parameter_48, + parameter_49, + parameter_50, + parameter_51, + parameter_52, + parameter_53, + data_0, + data_1, + data_2, + data_3, + data_4, + data_5, + data_6, + data_7, + data_8, + ): + # pd_op.full: (1xi64) <- () + full_0 = paddle._C_ops.full( + [1], float("0"), paddle.int64, paddle.core.CPUPlace() + ) + + # pd_op.full: (1xi64) <- () + full_1 = paddle._C_ops.full( + [1], float("1"), paddle.int64, paddle.core.CPUPlace() + ) + + # pd_op.arange: (-1xi64) <- (1xi64, xi64, 1xi64) + arange_0 = paddle.arange(full_0, data_1, full_1, dtype="int64") + del data_1 + + # pd_op.cast: (-1xf32) <- (-1xi64) + cast_0 = paddle._C_ops.cast(arange_0, paddle.float32) + del arange_0 + + # pd_op.full: (1xf32) <- () + full_2 = paddle._C_ops.full( + [1], float("1"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.scale: (-1xf32) <- (-1xf32, 1xf32) + scale_0 = paddle._C_ops.scale(cast_0, full_2, float("0.5"), True) + del cast_0 + + # pd_op.full: (1xf32) <- () + full_3 = paddle._C_ops.full( + [1], float("32"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.scale: (-1xf32) <- (-1xf32, 1xf32) + scale_1 = paddle._C_ops.scale(scale_0, full_3, float("0"), True) + del scale_0 + + # pd_op.arange: (-1xi64) <- (1xi64, xi64, 1xi64) + arange_1 = paddle.arange(full_0, data_0, full_1, dtype="int64") + del data_0 + + # pd_op.cast: (-1xf32) <- (-1xi64) + cast_1 = paddle._C_ops.cast(arange_1, paddle.float32) + del arange_1 + + # pd_op.scale: (-1xf32) <- (-1xf32, 1xf32) + scale_2 = paddle._C_ops.scale(cast_1, full_2, float("0.5"), True) + del cast_1 + + # pd_op.scale: (-1xf32) <- (-1xf32, 1xf32) + scale_3 = paddle._C_ops.scale(scale_2, full_3, float("0"), True) + del scale_2 + + # builtin.combine: ([-1xf32, -1xf32]) <- (-1xf32, -1xf32) + combine_0 = [scale_3, scale_1] + del scale_1, scale_3 + + # pd_op.meshgrid: ([-1x-1xf32, -1x-1xf32]) <- ([-1xf32, -1xf32]) + meshgrid_0 = paddle._C_ops.meshgrid(combine_0) + del combine_0 + + # builtin.split: (-1x-1xf32, -1x-1xf32) <- ([-1x-1xf32, -1x-1xf32]) + ( + split_0, + split_1, + ) = meshgrid_0 + del meshgrid_0 + + # pd_op.scale: (-1x-1xf32) <- (-1x-1xf32, 1xf32) + scale_4 = paddle._C_ops.scale(split_1, full_2, float("-80"), True) + + # pd_op.scale: (-1x-1xf32) <- (-1x-1xf32, 1xf32) + scale_5 = paddle._C_ops.scale(split_0, full_2, float("-80"), True) + + # pd_op.scale: (-1x-1xf32) <- (-1x-1xf32, 1xf32) + scale_6 = paddle._C_ops.scale(split_1, full_2, float("80"), True) + + # pd_op.scale: (-1x-1xf32) <- (-1x-1xf32, 1xf32) + scale_7 = paddle._C_ops.scale(split_0, full_2, float("80"), True) + + # builtin.combine: ([-1x-1xf32, -1x-1xf32, -1x-1xf32, -1x-1xf32]) <- (-1x-1xf32, -1x-1xf32, -1x-1xf32, -1x-1xf32) + combine_1 = [scale_4, scale_5, scale_6, scale_7] + del scale_4, scale_5, scale_6, scale_7 + + # pd_op.stack: (-1x-1x4xf32) <- ([-1x-1xf32, -1x-1xf32, -1x-1xf32, -1x-1xf32]) + stack_0 = paddle._C_ops.stack(combine_1, -1) + del combine_1 + + # builtin.combine: ([-1x-1xf32, -1x-1xf32]) <- (-1x-1xf32, -1x-1xf32) + combine_2 = [split_1, split_0] + del split_0, split_1 + + # pd_op.stack: (-1x-1x2xf32) <- ([-1x-1xf32, -1x-1xf32]) + stack_1 = paddle._C_ops.stack(combine_2, -1) + del combine_2 + + # pd_op.full_int_array: (2xi64) <- () + full_int_array_0 = [-1, 4] + + # pd_op.reshape: (-1x4xf32) <- (-1x-1x4xf32, 2xi64) + reshape_0 = paddle._C_ops.reshape(stack_0, full_int_array_0) + del stack_0 + + # pd_op.full_int_array: (2xi64) <- () + full_int_array_1 = [-1, 2] + + # pd_op.reshape: (-1x2xf32) <- (-1x-1x2xf32, 2xi64) + reshape_1 = paddle._C_ops.reshape(stack_1, full_int_array_1) + del stack_1 + + # pd_op.shape64: (2xi64) <- (-1x4xf32) + shape64_0 = paddle._C_ops.shape64(reshape_0) + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_2 = [0] + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_3 = [1] + + # pd_op.slice: (xi64) <- (2xi64, 1xi64, 1xi64) + slice_0 = paddle._C_ops.slice( + shape64_0, [0], full_int_array_2, full_int_array_3, [1], [0] + ) + del shape64_0 + + # pd_op.full: (xi64) <- () + full_4 = paddle._C_ops.full( + [], float("1"), paddle.int64, paddle.core.CPUPlace() + ) + + # builtin.combine: ([xi64, xi64]) <- (xi64, xi64) + combine_3 = [slice_0, full_4] + + # pd_op.stack: (2xi64) <- ([xi64, xi64]) + stack_2 = paddle._C_ops.stack(combine_3, 0) + del combine_3 + + # pd_op.full_with_tensor: (-1x1xf32) <- (1xf32, 2xi64) + full_with_tensor_0 = paddle._C_ops.full_with_tensor( + full_3, stack_2, paddle.float32 + ) + del full_3, stack_2 + + # pd_op.arange: (-1xi64) <- (1xi64, xi64, 1xi64) + arange_2 = paddle.arange(full_0, data_3, full_1, dtype="int64") + del data_3 + + # pd_op.cast: (-1xf32) <- (-1xi64) + cast_2 = paddle._C_ops.cast(arange_2, paddle.float32) + del arange_2 + + # pd_op.scale: (-1xf32) <- (-1xf32, 1xf32) + scale_8 = paddle._C_ops.scale(cast_2, full_2, float("0.5"), True) + del cast_2 + + # pd_op.full: (1xf32) <- () + full_5 = paddle._C_ops.full( + [1], float("16"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.scale: (-1xf32) <- (-1xf32, 1xf32) + scale_9 = paddle._C_ops.scale(scale_8, full_5, float("0"), True) + del scale_8 + + # pd_op.arange: (-1xi64) <- (1xi64, xi64, 1xi64) + arange_3 = paddle.arange(full_0, data_2, full_1, dtype="int64") + del data_2 + + # pd_op.cast: (-1xf32) <- (-1xi64) + cast_3 = paddle._C_ops.cast(arange_3, paddle.float32) + del arange_3 + + # pd_op.scale: (-1xf32) <- (-1xf32, 1xf32) + scale_10 = paddle._C_ops.scale(cast_3, full_2, float("0.5"), True) + del cast_3 + + # pd_op.scale: (-1xf32) <- (-1xf32, 1xf32) + scale_11 = paddle._C_ops.scale(scale_10, full_5, float("0"), True) + del scale_10 + + # builtin.combine: ([-1xf32, -1xf32]) <- (-1xf32, -1xf32) + combine_4 = [scale_11, scale_9] + del scale_11, scale_9 + + # pd_op.meshgrid: ([-1x-1xf32, -1x-1xf32]) <- ([-1xf32, -1xf32]) + meshgrid_1 = paddle._C_ops.meshgrid(combine_4) + del combine_4 + + # builtin.split: (-1x-1xf32, -1x-1xf32) <- ([-1x-1xf32, -1x-1xf32]) + ( + split_2, + split_3, + ) = meshgrid_1 + del meshgrid_1 + + # pd_op.scale: (-1x-1xf32) <- (-1x-1xf32, 1xf32) + scale_12 = paddle._C_ops.scale(split_3, full_2, float("-40"), True) + + # pd_op.scale: (-1x-1xf32) <- (-1x-1xf32, 1xf32) + scale_13 = paddle._C_ops.scale(split_2, full_2, float("-40"), True) + + # pd_op.scale: (-1x-1xf32) <- (-1x-1xf32, 1xf32) + scale_14 = paddle._C_ops.scale(split_3, full_2, float("40"), True) + + # pd_op.scale: (-1x-1xf32) <- (-1x-1xf32, 1xf32) + scale_15 = paddle._C_ops.scale(split_2, full_2, float("40"), True) + + # builtin.combine: ([-1x-1xf32, -1x-1xf32, -1x-1xf32, -1x-1xf32]) <- (-1x-1xf32, -1x-1xf32, -1x-1xf32, -1x-1xf32) + combine_5 = [scale_12, scale_13, scale_14, scale_15] + del scale_12, scale_13, scale_14, scale_15 + + # pd_op.stack: (-1x-1x4xf32) <- ([-1x-1xf32, -1x-1xf32, -1x-1xf32, -1x-1xf32]) + stack_3 = paddle._C_ops.stack(combine_5, -1) + del combine_5 + + # builtin.combine: ([-1x-1xf32, -1x-1xf32]) <- (-1x-1xf32, -1x-1xf32) + combine_6 = [split_3, split_2] + del split_2, split_3 + + # pd_op.stack: (-1x-1x2xf32) <- ([-1x-1xf32, -1x-1xf32]) + stack_4 = paddle._C_ops.stack(combine_6, -1) + del combine_6 + + # pd_op.reshape: (-1x4xf32) <- (-1x-1x4xf32, 2xi64) + reshape_2 = paddle._C_ops.reshape(stack_3, full_int_array_0) + del stack_3 + + # pd_op.reshape: (-1x2xf32) <- (-1x-1x2xf32, 2xi64) + reshape_3 = paddle._C_ops.reshape(stack_4, full_int_array_1) + del stack_4 + + # pd_op.shape64: (2xi64) <- (-1x4xf32) + shape64_1 = paddle._C_ops.shape64(reshape_2) + + # pd_op.slice: (xi64) <- (2xi64, 1xi64, 1xi64) + slice_1 = paddle._C_ops.slice( + shape64_1, [0], full_int_array_2, full_int_array_3, [1], [0] + ) + del shape64_1 + + # builtin.combine: ([xi64, xi64]) <- (xi64, xi64) + combine_7 = [slice_1, full_4] + + # pd_op.stack: (2xi64) <- ([xi64, xi64]) + stack_5 = paddle._C_ops.stack(combine_7, 0) + del combine_7 + + # pd_op.full_with_tensor: (-1x1xf32) <- (1xf32, 2xi64) + full_with_tensor_1 = paddle._C_ops.full_with_tensor( + full_5, stack_5, paddle.float32 + ) + del full_5, stack_5 + + # pd_op.arange: (-1xi64) <- (1xi64, xi64, 1xi64) + arange_4 = paddle.arange(full_0, data_5, full_1, dtype="int64") + del data_5 + + # pd_op.cast: (-1xf32) <- (-1xi64) + cast_4 = paddle._C_ops.cast(arange_4, paddle.float32) + del arange_4 + + # pd_op.scale: (-1xf32) <- (-1xf32, 1xf32) + scale_16 = paddle._C_ops.scale(cast_4, full_2, float("0.5"), True) + del cast_4 + + # pd_op.full: (1xf32) <- () + full_6 = paddle._C_ops.full( + [1], float("8"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.scale: (-1xf32) <- (-1xf32, 1xf32) + scale_17 = paddle._C_ops.scale(scale_16, full_6, float("0"), True) + del scale_16 + + # pd_op.arange: (-1xi64) <- (1xi64, xi64, 1xi64) + arange_5 = paddle.arange(full_0, data_4, full_1, dtype="int64") + del data_4, full_0, full_1 + + # pd_op.cast: (-1xf32) <- (-1xi64) + cast_5 = paddle._C_ops.cast(arange_5, paddle.float32) + del arange_5 + + # pd_op.scale: (-1xf32) <- (-1xf32, 1xf32) + scale_18 = paddle._C_ops.scale(cast_5, full_2, float("0.5"), True) + del cast_5 + + # pd_op.scale: (-1xf32) <- (-1xf32, 1xf32) + scale_19 = paddle._C_ops.scale(scale_18, full_6, float("0"), True) + del scale_18 + + # builtin.combine: ([-1xf32, -1xf32]) <- (-1xf32, -1xf32) + combine_8 = [scale_19, scale_17] + del scale_17, scale_19 + + # pd_op.meshgrid: ([-1x-1xf32, -1x-1xf32]) <- ([-1xf32, -1xf32]) + meshgrid_2 = paddle._C_ops.meshgrid(combine_8) + del combine_8 + + # builtin.split: (-1x-1xf32, -1x-1xf32) <- ([-1x-1xf32, -1x-1xf32]) + ( + split_4, + split_5, + ) = meshgrid_2 + del meshgrid_2 + + # pd_op.scale: (-1x-1xf32) <- (-1x-1xf32, 1xf32) + scale_20 = paddle._C_ops.scale(split_5, full_2, float("-20"), True) + + # pd_op.scale: (-1x-1xf32) <- (-1x-1xf32, 1xf32) + scale_21 = paddle._C_ops.scale(split_4, full_2, float("-20"), True) + + # pd_op.scale: (-1x-1xf32) <- (-1x-1xf32, 1xf32) + scale_22 = paddle._C_ops.scale(split_5, full_2, float("20"), True) + + # pd_op.scale: (-1x-1xf32) <- (-1x-1xf32, 1xf32) + scale_23 = paddle._C_ops.scale(split_4, full_2, float("20"), True) + del full_2 + + # builtin.combine: ([-1x-1xf32, -1x-1xf32, -1x-1xf32, -1x-1xf32]) <- (-1x-1xf32, -1x-1xf32, -1x-1xf32, -1x-1xf32) + combine_9 = [scale_20, scale_21, scale_22, scale_23] + del scale_20, scale_21, scale_22, scale_23 + + # pd_op.stack: (-1x-1x4xf32) <- ([-1x-1xf32, -1x-1xf32, -1x-1xf32, -1x-1xf32]) + stack_6 = paddle._C_ops.stack(combine_9, -1) + del combine_9 + + # builtin.combine: ([-1x-1xf32, -1x-1xf32]) <- (-1x-1xf32, -1x-1xf32) + combine_10 = [split_5, split_4] + del split_4, split_5 + + # pd_op.stack: (-1x-1x2xf32) <- ([-1x-1xf32, -1x-1xf32]) + stack_7 = paddle._C_ops.stack(combine_10, -1) + del combine_10 + + # pd_op.reshape: (-1x4xf32) <- (-1x-1x4xf32, 2xi64) + reshape_4 = paddle._C_ops.reshape(stack_6, full_int_array_0) + del full_int_array_0, stack_6 + + # pd_op.reshape: (-1x2xf32) <- (-1x-1x2xf32, 2xi64) + reshape_5 = paddle._C_ops.reshape(stack_7, full_int_array_1) + del full_int_array_1, stack_7 + + # pd_op.shape64: (2xi64) <- (-1x4xf32) + shape64_2 = paddle._C_ops.shape64(reshape_4) + + # pd_op.slice: (xi64) <- (2xi64, 1xi64, 1xi64) + slice_2 = paddle._C_ops.slice( + shape64_2, [0], full_int_array_2, full_int_array_3, [1], [0] + ) + del full_int_array_2, full_int_array_3, shape64_2 + + # builtin.combine: ([xi64, xi64]) <- (xi64, xi64) + combine_11 = [slice_2, full_4] + del full_4 + + # pd_op.stack: (2xi64) <- ([xi64, xi64]) + stack_8 = paddle._C_ops.stack(combine_11, 0) + del combine_11 + + # pd_op.full_with_tensor: (-1x1xf32) <- (1xf32, 2xi64) + full_with_tensor_2 = paddle._C_ops.full_with_tensor( + full_6, stack_8, paddle.float32 + ) + del full_6, stack_8 + + # pd_op.full: (1xi32) <- () + full_7 = paddle._C_ops.full( + [1], float("0"), paddle.int32, paddle.core.CPUPlace() + ) + + # builtin.combine: ([-1x4xf32, -1x4xf32, -1x4xf32]) <- (-1x4xf32, -1x4xf32, -1x4xf32) + combine_12 = [reshape_0, reshape_2, reshape_4] + del reshape_0, reshape_2, reshape_4 + + # pd_op.concat: (-1x4xf32) <- ([-1x4xf32, -1x4xf32, -1x4xf32], 1xi32) + concat_2 = paddle._C_ops.concat(combine_12, full_7) + del combine_12 + + # builtin.combine: ([-1x2xf32, -1x2xf32, -1x2xf32]) <- (-1x2xf32, -1x2xf32, -1x2xf32) + combine_13 = [reshape_1, reshape_3, reshape_5] + del reshape_1, reshape_3, reshape_5 + + # pd_op.concat: (-1x2xf32) <- ([-1x2xf32, -1x2xf32, -1x2xf32], 1xi32) + concat_3 = paddle._C_ops.concat(combine_13, full_7) + del combine_13 + + # builtin.combine: ([-1x1xf32, -1x1xf32, -1x1xf32]) <- (-1x1xf32, -1x1xf32, -1x1xf32) + combine_14 = [full_with_tensor_0, full_with_tensor_1, full_with_tensor_2] + del full_with_tensor_0, full_with_tensor_1, full_with_tensor_2 + + # pd_op.concat: (-1x1xf32) <- ([-1x1xf32, -1x1xf32, -1x1xf32], 1xi32) + concat_4 = paddle._C_ops.concat(combine_14, full_7) + del combine_14, full_7 + + # pd_op.full_int_array: (2xi64) <- () + full_int_array_4 = [1, 1] + + # pd_op.assign: (2xi64) <- (2xi64) + assign_0 = full_int_array_4 + + # pd_op.assign: (2xi64) <- (2xi64) + assign_1 = full_int_array_4 + + # pd_op.pool2d: (2x384x1x1xf32) <- (2x384x-1x-1xf32, 2xi64) + pool2d_0 = paddle._C_ops.pool2d( + data_6, + full_int_array_4, + [1, 1], + [0, 0], + False, + True, + "NCHW", + "avg", + False, + True, + "EXPLICIT", + ) + + # pd_op.conv2d: (2x384x1x1xf32) <- (2x384x1x1xf32, 384x384x1x1xf32) + conv2d_0 = paddle._C_ops.conv2d( + pool2d_0, parameter_53, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_53 + + # pd_op.full_int_array: (4xi64) <- () + full_int_array_5 = [1, -1, 1, 1] + + # pd_op.reshape: (1x384x1x1xf32) <- (384xf32, 4xi64) + reshape_6 = paddle._C_ops.reshape(parameter_52, full_int_array_5) + del parameter_52 + + # pd_op.add: (2x384x1x1xf32) <- (2x384x1x1xf32, 1x384x1x1xf32) + add_0 = paddle._C_ops.add(conv2d_0, reshape_6) + + # pd_op.sigmoid: (2x384x1x1xf32) <- (2x384x1x1xf32) + sigmoid_0 = paddle._C_ops.sigmoid(add_0) + del add_0 + + # pd_op.multiply: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32, 2x384x1x1xf32) + multiply_0 = paddle._C_ops.multiply(data_6, sigmoid_0) + + # pd_op.conv2d: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32, 384x384x1x1xf32) + conv2d_1 = paddle._C_ops.conv2d( + multiply_0, parameter_51, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_51 + + # pd_op.batch_norm_: (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__0, + batch_norm__1, + batch_norm__2, + batch_norm__3, + batch_norm__4, + batch_norm__5, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_1, + parameter_50, + parameter_49, + parameter_48, + parameter_47, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_47, parameter_48, parameter_49, parameter_50 + + # pd_op.swish: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32) + swish_0 = paddle._C_ops.swish(batch_norm__0) + + # pd_op.add: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32, 2x384x-1x-1xf32) + add_1 = paddle._C_ops.add(swish_0, data_6) + + # pd_op.conv2d: (2x4x-1x-1xf32) <- (2x384x-1x-1xf32, 4x384x3x3xf32) + conv2d_2 = paddle._C_ops.conv2d( + add_1, parameter_46, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_46 + + # pd_op.reshape: (1x4x1x1xf32) <- (4xf32, 4xi64) + reshape_7 = paddle._C_ops.reshape(parameter_45, full_int_array_5) + del parameter_45 + + # pd_op.add: (2x4x-1x-1xf32) <- (2x4x-1x-1xf32, 1x4x1x1xf32) + add_2 = paddle._C_ops.add(conv2d_2, reshape_7) + + # pd_op.conv2d: (2x384x1x1xf32) <- (2x384x1x1xf32, 384x384x1x1xf32) + conv2d_3 = paddle._C_ops.conv2d( + pool2d_0, parameter_44, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_44 + + # pd_op.reshape: (1x384x1x1xf32) <- (384xf32, 4xi64) + reshape_8 = paddle._C_ops.reshape(parameter_43, full_int_array_5) + del parameter_43 + + # pd_op.add: (2x384x1x1xf32) <- (2x384x1x1xf32, 1x384x1x1xf32) + add_3 = paddle._C_ops.add(conv2d_3, reshape_8) + + # pd_op.sigmoid: (2x384x1x1xf32) <- (2x384x1x1xf32) + sigmoid_1 = paddle._C_ops.sigmoid(add_3) + del add_3 + + # pd_op.multiply: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32, 2x384x1x1xf32) + multiply_1 = paddle._C_ops.multiply(data_6, sigmoid_1) + del data_6 + + # pd_op.conv2d: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32, 384x384x1x1xf32) + conv2d_4 = paddle._C_ops.conv2d( + multiply_1, parameter_42, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_42 + + # pd_op.batch_norm_: (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__6, + batch_norm__7, + batch_norm__8, + batch_norm__9, + batch_norm__10, + batch_norm__11, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_4, + parameter_41, + parameter_40, + parameter_39, + parameter_38, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_38, parameter_39, parameter_40, parameter_41 + + # pd_op.swish: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32) + swish_1 = paddle._C_ops.swish(batch_norm__6) + + # pd_op.conv2d: (2x68x-1x-1xf32) <- (2x384x-1x-1xf32, 68x384x3x3xf32) + conv2d_5 = paddle._C_ops.conv2d( + swish_1, parameter_37, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_37 + + # pd_op.reshape: (1x68x1x1xf32) <- (68xf32, 4xi64) + reshape_9 = paddle._C_ops.reshape(parameter_36, full_int_array_5) + del parameter_36 + + # pd_op.add: (2x68x-1x-1xf32) <- (2x68x-1x-1xf32, 1x68x1x1xf32) + add_4 = paddle._C_ops.add(conv2d_5, reshape_9) + + # pd_op.sigmoid: (2x4x-1x-1xf32) <- (2x4x-1x-1xf32) + sigmoid_2 = paddle._C_ops.sigmoid(add_2) + del add_2 + + # pd_op.flatten: (2x4x-1xf32) <- (2x4x-1x-1xf32) + flatten_0 = paddle._C_ops.flatten(sigmoid_2, 2, 3) + + # pd_op.transpose: (2x-1x4xf32) <- (2x4x-1xf32) + transpose_0 = paddle._C_ops.transpose(flatten_0, [0, 2, 1]) + del flatten_0 + + # pd_op.flatten: (2x68x-1xf32) <- (2x68x-1x-1xf32) + flatten_1 = paddle._C_ops.flatten(add_4, 2, 3) + + # pd_op.transpose: (2x-1x68xf32) <- (2x68x-1xf32) + transpose_1 = paddle._C_ops.transpose(flatten_1, [0, 2, 1]) + del flatten_1 + + # pd_op.pool2d: (2x192x1x1xf32) <- (2x192x-1x-1xf32, 2xi64) + pool2d_1 = paddle._C_ops.pool2d( + data_7, + full_int_array_4, + [1, 1], + [0, 0], + False, + True, + "NCHW", + "avg", + False, + True, + "EXPLICIT", + ) + + # pd_op.conv2d: (2x192x1x1xf32) <- (2x192x1x1xf32, 192x192x1x1xf32) + conv2d_6 = paddle._C_ops.conv2d( + pool2d_1, parameter_35, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_35 + + # pd_op.reshape: (1x192x1x1xf32) <- (192xf32, 4xi64) + reshape_10 = paddle._C_ops.reshape(parameter_34, full_int_array_5) + del parameter_34 + + # pd_op.add: (2x192x1x1xf32) <- (2x192x1x1xf32, 1x192x1x1xf32) + add_5 = paddle._C_ops.add(conv2d_6, reshape_10) + + # pd_op.sigmoid: (2x192x1x1xf32) <- (2x192x1x1xf32) + sigmoid_3 = paddle._C_ops.sigmoid(add_5) + del add_5 + + # pd_op.multiply: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 2x192x1x1xf32) + multiply_2 = paddle._C_ops.multiply(data_7, sigmoid_3) + + # pd_op.conv2d: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 192x192x1x1xf32) + conv2d_7 = paddle._C_ops.conv2d( + multiply_2, parameter_33, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_33 + + # pd_op.batch_norm_: (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__12, + batch_norm__13, + batch_norm__14, + batch_norm__15, + batch_norm__16, + batch_norm__17, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_7, + parameter_32, + parameter_31, + parameter_30, + parameter_29, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_29, parameter_30, parameter_31, parameter_32 + + # pd_op.swish: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32) + swish_2 = paddle._C_ops.swish(batch_norm__12) + + # pd_op.add: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 2x192x-1x-1xf32) + add_6 = paddle._C_ops.add(swish_2, data_7) + + # pd_op.conv2d: (2x4x-1x-1xf32) <- (2x192x-1x-1xf32, 4x192x3x3xf32) + conv2d_8 = paddle._C_ops.conv2d( + add_6, parameter_28, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_28 + + # pd_op.reshape: (1x4x1x1xf32) <- (4xf32, 4xi64) + reshape_11 = paddle._C_ops.reshape(parameter_27, full_int_array_5) + del parameter_27 + + # pd_op.add: (2x4x-1x-1xf32) <- (2x4x-1x-1xf32, 1x4x1x1xf32) + add_7 = paddle._C_ops.add(conv2d_8, reshape_11) + + # pd_op.conv2d: (2x192x1x1xf32) <- (2x192x1x1xf32, 192x192x1x1xf32) + conv2d_9 = paddle._C_ops.conv2d( + pool2d_1, parameter_26, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_26 + + # pd_op.reshape: (1x192x1x1xf32) <- (192xf32, 4xi64) + reshape_12 = paddle._C_ops.reshape(parameter_25, full_int_array_5) + del parameter_25 + + # pd_op.add: (2x192x1x1xf32) <- (2x192x1x1xf32, 1x192x1x1xf32) + add_8 = paddle._C_ops.add(conv2d_9, reshape_12) + + # pd_op.sigmoid: (2x192x1x1xf32) <- (2x192x1x1xf32) + sigmoid_4 = paddle._C_ops.sigmoid(add_8) + del add_8 + + # pd_op.multiply: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 2x192x1x1xf32) + multiply_3 = paddle._C_ops.multiply(data_7, sigmoid_4) + del data_7 + + # pd_op.conv2d: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 192x192x1x1xf32) + conv2d_10 = paddle._C_ops.conv2d( + multiply_3, parameter_24, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_24 + + # pd_op.batch_norm_: (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__18, + batch_norm__19, + batch_norm__20, + batch_norm__21, + batch_norm__22, + batch_norm__23, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_10, + parameter_23, + parameter_22, + parameter_21, + parameter_20, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_20, parameter_21, parameter_22, parameter_23 + + # pd_op.swish: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32) + swish_3 = paddle._C_ops.swish(batch_norm__18) + + # pd_op.conv2d: (2x68x-1x-1xf32) <- (2x192x-1x-1xf32, 68x192x3x3xf32) + conv2d_11 = paddle._C_ops.conv2d( + swish_3, parameter_19, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_19 + + # pd_op.reshape: (1x68x1x1xf32) <- (68xf32, 4xi64) + reshape_13 = paddle._C_ops.reshape(parameter_18, full_int_array_5) + del parameter_18 + + # pd_op.add: (2x68x-1x-1xf32) <- (2x68x-1x-1xf32, 1x68x1x1xf32) + add_9 = paddle._C_ops.add(conv2d_11, reshape_13) + + # pd_op.sigmoid: (2x4x-1x-1xf32) <- (2x4x-1x-1xf32) + sigmoid_5 = paddle._C_ops.sigmoid(add_7) + del add_7 + + # pd_op.flatten: (2x4x-1xf32) <- (2x4x-1x-1xf32) + flatten_2 = paddle._C_ops.flatten(sigmoid_5, 2, 3) + + # pd_op.transpose: (2x-1x4xf32) <- (2x4x-1xf32) + transpose_2 = paddle._C_ops.transpose(flatten_2, [0, 2, 1]) + del flatten_2 + + # pd_op.flatten: (2x68x-1xf32) <- (2x68x-1x-1xf32) + flatten_3 = paddle._C_ops.flatten(add_9, 2, 3) + + # pd_op.transpose: (2x-1x68xf32) <- (2x68x-1xf32) + transpose_3 = paddle._C_ops.transpose(flatten_3, [0, 2, 1]) + del flatten_3 + + # pd_op.pool2d: (2x96x1x1xf32) <- (2x96x-1x-1xf32, 2xi64) + pool2d_2 = paddle._C_ops.pool2d( + data_8, + full_int_array_4, + [1, 1], + [0, 0], + False, + True, + "NCHW", + "avg", + False, + True, + "EXPLICIT", + ) + + # pd_op.conv2d: (2x96x1x1xf32) <- (2x96x1x1xf32, 96x96x1x1xf32) + conv2d_12 = paddle._C_ops.conv2d( + pool2d_2, parameter_17, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_17 + + # pd_op.reshape: (1x96x1x1xf32) <- (96xf32, 4xi64) + reshape_14 = paddle._C_ops.reshape(parameter_16, full_int_array_5) + del parameter_16 + + # pd_op.add: (2x96x1x1xf32) <- (2x96x1x1xf32, 1x96x1x1xf32) + add_10 = paddle._C_ops.add(conv2d_12, reshape_14) + + # pd_op.sigmoid: (2x96x1x1xf32) <- (2x96x1x1xf32) + sigmoid_6 = paddle._C_ops.sigmoid(add_10) + del add_10 + + # pd_op.multiply: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, 2x96x1x1xf32) + multiply_4 = paddle._C_ops.multiply(data_8, sigmoid_6) + + # pd_op.conv2d: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, 96x96x1x1xf32) + conv2d_13 = paddle._C_ops.conv2d( + multiply_4, parameter_15, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_15 + + # pd_op.batch_norm_: (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__24, + batch_norm__25, + batch_norm__26, + batch_norm__27, + batch_norm__28, + batch_norm__29, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_13, + parameter_14, + parameter_13, + parameter_12, + parameter_11, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_11, parameter_12, parameter_13, parameter_14 + + # pd_op.swish: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32) + swish_4 = paddle._C_ops.swish(batch_norm__24) + + # pd_op.add: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, 2x96x-1x-1xf32) + add_11 = paddle._C_ops.add(swish_4, data_8) + + # pd_op.conv2d: (2x4x-1x-1xf32) <- (2x96x-1x-1xf32, 4x96x3x3xf32) + conv2d_14 = paddle._C_ops.conv2d( + add_11, parameter_10, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_10 + + # pd_op.reshape: (1x4x1x1xf32) <- (4xf32, 4xi64) + reshape_15 = paddle._C_ops.reshape(parameter_9, full_int_array_5) + del parameter_9 + + # pd_op.add: (2x4x-1x-1xf32) <- (2x4x-1x-1xf32, 1x4x1x1xf32) + add_12 = paddle._C_ops.add(conv2d_14, reshape_15) + + # pd_op.conv2d: (2x96x1x1xf32) <- (2x96x1x1xf32, 96x96x1x1xf32) + conv2d_15 = paddle._C_ops.conv2d( + pool2d_2, parameter_8, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_8 + + # pd_op.reshape: (1x96x1x1xf32) <- (96xf32, 4xi64) + reshape_16 = paddle._C_ops.reshape(parameter_7, full_int_array_5) + del parameter_7 + + # pd_op.add: (2x96x1x1xf32) <- (2x96x1x1xf32, 1x96x1x1xf32) + add_13 = paddle._C_ops.add(conv2d_15, reshape_16) + + # pd_op.sigmoid: (2x96x1x1xf32) <- (2x96x1x1xf32) + sigmoid_7 = paddle._C_ops.sigmoid(add_13) + del add_13 + + # pd_op.multiply: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, 2x96x1x1xf32) + multiply_5 = paddle._C_ops.multiply(data_8, sigmoid_7) + del data_8 + + # pd_op.conv2d: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, 96x96x1x1xf32) + conv2d_16 = paddle._C_ops.conv2d( + multiply_5, parameter_6, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_6 + + # pd_op.batch_norm_: (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__30, + batch_norm__31, + batch_norm__32, + batch_norm__33, + batch_norm__34, + batch_norm__35, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_16, + parameter_5, + parameter_4, + parameter_3, + parameter_2, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_2, parameter_3, parameter_4, parameter_5 + + # pd_op.swish: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32) + swish_5 = paddle._C_ops.swish(batch_norm__30) + + # pd_op.conv2d: (2x68x-1x-1xf32) <- (2x96x-1x-1xf32, 68x96x3x3xf32) + conv2d_17 = paddle._C_ops.conv2d( + swish_5, parameter_1, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_1 + + # pd_op.reshape: (1x68x1x1xf32) <- (68xf32, 4xi64) + reshape_17 = paddle._C_ops.reshape(parameter_0, full_int_array_5) + del full_int_array_5, parameter_0 + + # pd_op.add: (2x68x-1x-1xf32) <- (2x68x-1x-1xf32, 1x68x1x1xf32) + add_14 = paddle._C_ops.add(conv2d_17, reshape_17) + + # pd_op.sigmoid: (2x4x-1x-1xf32) <- (2x4x-1x-1xf32) + sigmoid_8 = paddle._C_ops.sigmoid(add_12) + del add_12 + + # pd_op.flatten: (2x4x-1xf32) <- (2x4x-1x-1xf32) + flatten_4 = paddle._C_ops.flatten(sigmoid_8, 2, 3) + + # pd_op.transpose: (2x-1x4xf32) <- (2x4x-1xf32) + transpose_4 = paddle._C_ops.transpose(flatten_4, [0, 2, 1]) + del flatten_4 + + # pd_op.flatten: (2x68x-1xf32) <- (2x68x-1x-1xf32) + flatten_5 = paddle._C_ops.flatten(add_14, 2, 3) + + # pd_op.transpose: (2x-1x68xf32) <- (2x68x-1xf32) + transpose_5 = paddle._C_ops.transpose(flatten_5, [0, 2, 1]) + del flatten_5 + + # pd_op.full: (1xi32) <- () + full_8 = paddle._C_ops.full( + [1], float("1"), paddle.int32, paddle.core.CPUPlace() + ) + + # pd_op.assign: (1xi32) <- (1xi32) + assign_2 = full_8 + + # builtin.combine: ([2x-1x4xf32, 2x-1x4xf32, 2x-1x4xf32]) <- (2x-1x4xf32, 2x-1x4xf32, 2x-1x4xf32) + combine_15 = [transpose_0, transpose_2, transpose_4] + + # pd_op.concat: (2x-1x4xf32) <- ([2x-1x4xf32, 2x-1x4xf32, 2x-1x4xf32], 1xi32) + concat_0 = paddle._C_ops.concat(combine_15, full_8) + del combine_15 + + # builtin.combine: ([2x-1x68xf32, 2x-1x68xf32, 2x-1x68xf32]) <- (2x-1x68xf32, 2x-1x68xf32, 2x-1x68xf32) + combine_16 = [transpose_1, transpose_3, transpose_5] + + # pd_op.concat: (2x-1x68xf32) <- ([2x-1x68xf32, 2x-1x68xf32, 2x-1x68xf32], 1xi32) + concat_1 = paddle._C_ops.concat(combine_16, full_8) + del ( + add_1, + add_11, + add_14, + add_4, + add_6, + add_9, + assign_0, + assign_1, + assign_2, + batch_norm__0, + batch_norm__1, + batch_norm__10, + batch_norm__11, + batch_norm__12, + batch_norm__13, + batch_norm__14, + batch_norm__15, + batch_norm__16, + batch_norm__17, + batch_norm__18, + batch_norm__19, + batch_norm__2, + batch_norm__20, + batch_norm__21, + batch_norm__22, + batch_norm__23, + batch_norm__24, + batch_norm__25, + batch_norm__26, + batch_norm__27, + batch_norm__28, + batch_norm__29, + batch_norm__3, + batch_norm__30, + batch_norm__31, + batch_norm__32, + batch_norm__33, + batch_norm__34, + batch_norm__35, + batch_norm__4, + batch_norm__5, + batch_norm__6, + batch_norm__7, + batch_norm__8, + batch_norm__9, + combine_16, + conv2d_0, + conv2d_1, + conv2d_10, + conv2d_11, + conv2d_12, + conv2d_13, + conv2d_14, + conv2d_15, + conv2d_16, + conv2d_17, + conv2d_2, + conv2d_3, + conv2d_4, + conv2d_5, + conv2d_6, + conv2d_7, + conv2d_8, + conv2d_9, + full_8, + full_int_array_4, + multiply_0, + multiply_1, + multiply_2, + multiply_3, + multiply_4, + multiply_5, + pool2d_0, + pool2d_1, + pool2d_2, + reshape_10, + reshape_11, + reshape_12, + reshape_13, + reshape_14, + reshape_15, + reshape_16, + reshape_17, + reshape_6, + reshape_7, + reshape_8, + reshape_9, + sigmoid_0, + sigmoid_1, + sigmoid_2, + sigmoid_3, + sigmoid_4, + sigmoid_5, + sigmoid_6, + sigmoid_7, + sigmoid_8, + slice_0, + slice_1, + slice_2, + swish_0, + swish_1, + swish_2, + swish_3, + swish_4, + swish_5, + transpose_0, + transpose_1, + transpose_2, + transpose_3, + transpose_4, + transpose_5, + ) + + return concat_0, concat_1, concat_2, concat_3, concat_4 diff --git a/paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_13/weight_meta.py b/paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_13/weight_meta.py new file mode 100644 index 000000000..7e28e029a --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_13/weight_meta.py @@ -0,0 +1,586 @@ +class Program_weight_tensor_parameter_0: + name = "parameter_0" + shape = [68] + dtype = "float32" + min_val = float("-0.01867") + max_val = float("0.0312348") + mean = float("1.29832e-07") + std = float("0.00771859") + data = None + + +class Program_weight_tensor_parameter_1: + name = "parameter_1" + shape = [68, 96, 3, 3] + dtype = "float32" + min_val = float("-0.190366") + max_val = float("0.209062") + mean = float("3.7835e-08") + std = float("0.0111262") + data = None + + +class Program_weight_tensor_parameter_2: + name = "parameter_2" + shape = [96] + dtype = "float32" + min_val = float("-0.149282") + max_val = float("0.349186") + mean = float("0.0834577") + std = float("0.116457") + data = None + + +class Program_weight_tensor_parameter_3: + name = "parameter_3" + shape = [96] + dtype = "float32" + min_val = float("0.919176") + max_val = float("2.00814") + mean = float("1.39768") + std = float("0.216931") + data = None + + +class Program_weight_tensor_parameter_4: + name = "parameter_4" + shape = [96] + dtype = "float32" + min_val = float("0.000271733") + max_val = float("0.00590923") + mean = float("0.00157361") + std = float("0.00111281") + data = None + + +class Program_weight_tensor_parameter_5: + name = "parameter_5" + shape = [96] + dtype = "float32" + min_val = float("-0.0956086") + max_val = float("0.0352103") + mean = float("-0.0141469") + std = float("0.0252637") + data = None + + +class Program_weight_tensor_parameter_6: + name = "parameter_6" + shape = [96, 96, 1, 1] + dtype = "float32" + min_val = float("-0.0961666") + max_val = float("0.113657") + mean = float("-0.0013113") + std = float("0.0138926") + data = None + + +class Program_weight_tensor_parameter_7: + name = "parameter_7" + shape = [96] + dtype = "float32" + min_val = float("-0.0102701") + max_val = float("0.010842") + mean = float("-0.000304252") + std = float("0.00455448") + data = None + + +class Program_weight_tensor_parameter_8: + name = "parameter_8" + shape = [96, 96, 1, 1] + dtype = "float32" + min_val = float("-0.0201881") + max_val = float("0.0236108") + mean = float("-9.55467e-05") + std = float("0.00307889") + data = None + + +class Program_weight_tensor_parameter_9: + name = "parameter_9" + shape = [4] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_10: + name = "parameter_10" + shape = [4, 96, 3, 3] + dtype = "float32" + min_val = float("-0.0345001") + max_val = float("0.0452811") + mean = float("0.000588751") + std = float("0.00700596") + data = None + + +class Program_weight_tensor_parameter_11: + name = "parameter_11" + shape = [96] + dtype = "float32" + min_val = float("-0.661577") + max_val = float("1.11988") + mean = float("0.207977") + std = float("0.336067") + data = None + + +class Program_weight_tensor_parameter_12: + name = "parameter_12" + shape = [96] + dtype = "float32" + min_val = float("0.77465") + max_val = float("1.56598") + mean = float("1.11194") + std = float("0.140079") + data = None + + +class Program_weight_tensor_parameter_13: + name = "parameter_13" + shape = [96] + dtype = "float32" + min_val = float("0.000333886") + max_val = float("0.0117272") + mean = float("0.00165627") + std = float("0.00164869") + data = None + + +class Program_weight_tensor_parameter_14: + name = "parameter_14" + shape = [96] + dtype = "float32" + min_val = float("-0.209036") + max_val = float("0.113816") + mean = float("-0.0263839") + std = float("0.0475555") + data = None + + +class Program_weight_tensor_parameter_15: + name = "parameter_15" + shape = [96, 96, 1, 1] + dtype = "float32" + min_val = float("-0.0900876") + max_val = float("0.0968945") + mean = float("-0.00172322") + std = float("0.0125328") + data = None + + +class Program_weight_tensor_parameter_16: + name = "parameter_16" + shape = [96] + dtype = "float32" + min_val = float("-0.00596337") + max_val = float("0.00750892") + mean = float("-0.000550842") + std = float("0.00263204") + data = None + + +class Program_weight_tensor_parameter_17: + name = "parameter_17" + shape = [96, 96, 1, 1] + dtype = "float32" + min_val = float("-0.0532268") + max_val = float("0.0784856") + mean = float("-0.000321678") + std = float("0.00341917") + data = None + + +class Program_weight_tensor_parameter_18: + name = "parameter_18" + shape = [68] + dtype = "float32" + min_val = float("-0.00592061") + max_val = float("0.0247132") + mean = float("1.32481e-07") + std = float("0.00592226") + data = None + + +class Program_weight_tensor_parameter_19: + name = "parameter_19" + shape = [68, 192, 3, 3] + dtype = "float32" + min_val = float("-0.147978") + max_val = float("0.187543") + mean = float("-7.52334e-09") + std = float("0.00734942") + data = None + + +class Program_weight_tensor_parameter_20: + name = "parameter_20" + shape = [192] + dtype = "float32" + min_val = float("-0.112021") + max_val = float("0.136867") + mean = float("0.0501839") + std = float("0.042905") + data = None + + +class Program_weight_tensor_parameter_21: + name = "parameter_21" + shape = [192] + dtype = "float32" + min_val = float("0.942477") + max_val = float("1.48633") + mean = float("1.20882") + std = float("0.101319") + data = None + + +class Program_weight_tensor_parameter_22: + name = "parameter_22" + shape = [192] + dtype = "float32" + min_val = float("0.000288076") + max_val = float("0.00451999") + mean = float("0.00132633") + std = float("0.000892365") + data = None + + +class Program_weight_tensor_parameter_23: + name = "parameter_23" + shape = [192] + dtype = "float32" + min_val = float("-0.0538666") + max_val = float("0.022064") + mean = float("-0.00861151") + std = float("0.011553") + data = None + + +class Program_weight_tensor_parameter_24: + name = "parameter_24" + shape = [192, 192, 1, 1] + dtype = "float32" + min_val = float("-0.0604445") + max_val = float("0.104465") + mean = float("-0.00035645") + std = float("0.00636366") + data = None + + +class Program_weight_tensor_parameter_25: + name = "parameter_25" + shape = [192] + dtype = "float32" + min_val = float("-0.00821074") + max_val = float("0.0088804") + mean = float("-9.74108e-05") + std = float("0.00329883") + data = None + + +class Program_weight_tensor_parameter_26: + name = "parameter_26" + shape = [192, 192, 1, 1] + dtype = "float32" + min_val = float("-0.00943089") + max_val = float("0.0136782") + mean = float("-9.81002e-05") + std = float("0.00127399") + data = None + + +class Program_weight_tensor_parameter_27: + name = "parameter_27" + shape = [4] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_28: + name = "parameter_28" + shape = [4, 192, 3, 3] + dtype = "float32" + min_val = float("-0.0210322") + max_val = float("0.0177619") + mean = float("0.000586926") + std = float("0.00393367") + data = None + + +class Program_weight_tensor_parameter_29: + name = "parameter_29" + shape = [192] + dtype = "float32" + min_val = float("-0.291016") + max_val = float("0.608033") + mean = float("0.147424") + std = float("0.158974") + data = None + + +class Program_weight_tensor_parameter_30: + name = "parameter_30" + shape = [192] + dtype = "float32" + min_val = float("0.913559") + max_val = float("1.49704") + mean = float("1.08711") + std = float("0.081868") + data = None + + +class Program_weight_tensor_parameter_31: + name = "parameter_31" + shape = [192] + dtype = "float32" + min_val = float("0.000300777") + max_val = float("0.00617361") + mean = float("0.00150893") + std = float("0.00100147") + data = None + + +class Program_weight_tensor_parameter_32: + name = "parameter_32" + shape = [192] + dtype = "float32" + min_val = float("-0.140336") + max_val = float("0.0238392") + mean = float("-0.035943") + std = float("0.0279579") + data = None + + +class Program_weight_tensor_parameter_33: + name = "parameter_33" + shape = [192, 192, 1, 1] + dtype = "float32" + min_val = float("-0.0585124") + max_val = float("0.0465296") + mean = float("-0.0011076") + std = float("0.00599635") + data = None + + +class Program_weight_tensor_parameter_34: + name = "parameter_34" + shape = [192] + dtype = "float32" + min_val = float("-0.0048463") + max_val = float("0.00933846") + mean = float("-0.000150099") + std = float("0.00178424") + data = None + + +class Program_weight_tensor_parameter_35: + name = "parameter_35" + shape = [192, 192, 1, 1] + dtype = "float32" + min_val = float("-0.0175125") + max_val = float("0.0225495") + mean = float("-0.000152001") + std = float("0.00123854") + data = None + + +class Program_weight_tensor_parameter_36: + name = "parameter_36" + shape = [68] + dtype = "float32" + min_val = float("-0.00490643") + max_val = float("0.00925847") + mean = float("1.23866e-07") + std = float("0.00411753") + data = None + + +class Program_weight_tensor_parameter_37: + name = "parameter_37" + shape = [68, 384, 3, 3] + dtype = "float32" + min_val = float("-0.0794817") + max_val = float("0.102581") + mean = float("1.60253e-08") + std = float("0.00459103") + data = None + + +class Program_weight_tensor_parameter_38: + name = "parameter_38" + shape = [384] + dtype = "float32" + min_val = float("-0.0756332") + max_val = float("0.111576") + mean = float("0.0118709") + std = float("0.0354367") + data = None + + +class Program_weight_tensor_parameter_39: + name = "parameter_39" + shape = [384] + dtype = "float32" + min_val = float("0.969313") + max_val = float("1.49445") + mean = float("1.16933") + std = float("0.0775608") + data = None + + +class Program_weight_tensor_parameter_40: + name = "parameter_40" + shape = [384] + dtype = "float32" + min_val = float("7.70018e-05") + max_val = float("0.00657961") + mean = float("0.000860292") + std = float("0.000700233") + data = None + + +class Program_weight_tensor_parameter_41: + name = "parameter_41" + shape = [384] + dtype = "float32" + min_val = float("-0.0400199") + max_val = float("0.0158384") + mean = float("-0.0040727") + std = float("0.00851873") + data = None + + +class Program_weight_tensor_parameter_42: + name = "parameter_42" + shape = [384, 384, 1, 1] + dtype = "float32" + min_val = float("-0.057676") + max_val = float("0.0600769") + mean = float("-0.000136005") + std = float("0.00316255") + data = None + + +class Program_weight_tensor_parameter_43: + name = "parameter_43" + shape = [384] + dtype = "float32" + min_val = float("-0.00427527") + max_val = float("0.00492272") + mean = float("-5.07143e-05") + std = float("0.00221192") + data = None + + +class Program_weight_tensor_parameter_44: + name = "parameter_44" + shape = [384, 384, 1, 1] + dtype = "float32" + min_val = float("-0.0242271") + max_val = float("0.00942592") + mean = float("-3.94584e-05") + std = float("0.000747219") + data = None + + +class Program_weight_tensor_parameter_45: + name = "parameter_45" + shape = [4] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_46: + name = "parameter_46" + shape = [4, 384, 3, 3] + dtype = "float32" + min_val = float("-0.00842134") + max_val = float("0.0140129") + mean = float("0.000298813") + std = float("0.00184562") + data = None + + +class Program_weight_tensor_parameter_47: + name = "parameter_47" + shape = [384] + dtype = "float32" + min_val = float("-0.369098") + max_val = float("0.49456") + mean = float("0.034985") + std = float("0.121491") + data = None + + +class Program_weight_tensor_parameter_48: + name = "parameter_48" + shape = [384] + dtype = "float32" + min_val = float("0.883329") + max_val = float("1.55445") + mean = float("1.05782") + std = float("0.0835191") + data = None + + +class Program_weight_tensor_parameter_49: + name = "parameter_49" + shape = [384] + dtype = "float32" + min_val = float("0.000128458") + max_val = float("0.00767343") + mean = float("0.000900769") + std = float("0.00072817") + data = None + + +class Program_weight_tensor_parameter_50: + name = "parameter_50" + shape = [384] + dtype = "float32" + min_val = float("-0.105285") + max_val = float("0.0292789") + mean = float("-0.0262692") + std = float("0.0195137") + data = None + + +class Program_weight_tensor_parameter_51: + name = "parameter_51" + shape = [384, 384, 1, 1] + dtype = "float32" + min_val = float("-0.0387553") + max_val = float("0.0393238") + mean = float("-0.000523755") + std = float("0.00328344") + data = None + + +class Program_weight_tensor_parameter_52: + name = "parameter_52" + shape = [384] + dtype = "float32" + min_val = float("-0.0106934") + max_val = float("0.00930153") + mean = float("-0.000115954") + std = float("0.00131161") + data = None + + +class Program_weight_tensor_parameter_53: + name = "parameter_53" + shape = [384, 384, 1, 1] + dtype = "float32" + min_val = float("-0.084446") + max_val = float("0.0443376") + mean = float("-3.32786e-05") + std = float("0.000884257") + data = None diff --git a/paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_14/graph_hash.txt b/paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_14/graph_hash.txt new file mode 100644 index 000000000..b08da1263 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_14/graph_hash.txt @@ -0,0 +1 @@ +4d26d0110d82b3e8a1ea0c4059adeb25427870d4527791748b1197dfa32e25ef \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_14/graph_net.json b/paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_14/graph_net.json new file mode 100644 index 000000000..d8719c2c9 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_14/graph_net.json @@ -0,0 +1,6 @@ +{ + "framework": "paddle", + "model_name": "PP-YOLOE-S_vehicle", + "num_devices_required": 1, + "num_nodes_required": 1 +} \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_14/input_meta.py b/paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_14/input_meta.py new file mode 100644 index 000000000..5e383c184 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_14/input_meta.py @@ -0,0 +1,19 @@ +class Program_weight_tensor_data_0: + name = "data_0" + shape = [] + dtype = "float32" + data = [0.197761] + + +class Program_weight_tensor_data_1: + name = "data_1" + shape = [] + dtype = "float32" + data = [1.17599] + + +class Program_weight_tensor_data_2: + name = "data_2" + shape = [] + dtype = "float32" + data = [3.1444] diff --git a/paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_14/model.py b/paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_14/model.py new file mode 100644 index 000000000..4cccb2b8e --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_14/model.py @@ -0,0 +1,43 @@ +import paddle + + +class GraphModule(paddle.nn.Layer): + def __init__(self): + super().__init__() + + def forward(self, data_0, data_1, data_2): + # pd_op.full: (1xf32) <- () + full_0 = paddle._C_ops.full( + [1], float("1"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.scale: (xf32) <- (xf32, 1xf32) + scale_0 = paddle._C_ops.scale(data_2, full_0, float("0"), True) + del data_2 + + # pd_op.full: (1xf32) <- () + full_1 = paddle._C_ops.full( + [1], float("2.5"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.scale: (xf32) <- (xf32, 1xf32) + scale_1 = paddle._C_ops.scale(data_0, full_1, float("0"), True) + del data_0 + + # pd_op.add: (xf32) <- (xf32, xf32) + add_1 = paddle._C_ops.add(scale_0, scale_1) + + # pd_op.full: (1xf32) <- () + full_2 = paddle._C_ops.full( + [1], float("0.5"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.scale: (xf32) <- (xf32, 1xf32) + scale_2 = paddle._C_ops.scale(data_1, full_2, float("0"), True) + del data_1 + + # pd_op.add: (xf32) <- (xf32, xf32) + add_0 = paddle._C_ops.add(add_1, scale_2) + del add_1, full_0, full_1, full_2, scale_0, scale_1, scale_2 + + return add_0 diff --git a/paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_14/weight_meta.py b/paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_14/weight_meta.py new file mode 100644 index 000000000..8b1378917 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_14/weight_meta.py @@ -0,0 +1 @@ + diff --git a/paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_15/graph_hash.txt b/paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_15/graph_hash.txt new file mode 100644 index 000000000..6198709b8 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_15/graph_hash.txt @@ -0,0 +1 @@ +fb732d9fddca2e574feb08da62b81b28bc53bcf9dfe5a1522a8ffb71ef252d85 \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_15/graph_net.json b/paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_15/graph_net.json new file mode 100644 index 000000000..d8719c2c9 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_15/graph_net.json @@ -0,0 +1,6 @@ +{ + "framework": "paddle", + "model_name": "PP-YOLOE-S_vehicle", + "num_devices_required": 1, + "num_nodes_required": 1 +} \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_15/input_meta.py b/paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_15/input_meta.py new file mode 100644 index 000000000..a6962fdb0 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_15/input_meta.py @@ -0,0 +1,31 @@ +class Program_weight_tensor_data_0: + name = "data_0" + shape = [2, 384, 19, 19] + dtype = "float32" + min_val = float("-0.278465") + max_val = float("6.12013") + mean = float("0.220692") + std = float("0.554071") + data = None + + +class Program_weight_tensor_data_1: + name = "data_1" + shape = [2, 192, 38, 38] + dtype = "float32" + min_val = float("-0.278465") + max_val = float("9.89113") + mean = float("0.297275") + std = float("0.613829") + data = None + + +class Program_weight_tensor_data_2: + name = "data_2" + shape = [2, 96, 76, 76] + dtype = "float32" + min_val = float("-0.278465") + max_val = float("10.154") + mean = float("0.396337") + std = float("0.633813") + data = None diff --git a/paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_15/model.py b/paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_15/model.py new file mode 100644 index 000000000..3494309b2 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_15/model.py @@ -0,0 +1,1050 @@ +import paddle + + +class GraphModule(paddle.nn.Layer): + def __init__(self): + super().__init__() + + def forward( + self, + parameter_0, + parameter_1, + parameter_2, + parameter_3, + parameter_4, + parameter_5, + parameter_6, + parameter_7, + parameter_8, + parameter_9, + parameter_10, + parameter_11, + parameter_12, + parameter_13, + parameter_14, + parameter_15, + parameter_16, + parameter_17, + parameter_18, + parameter_19, + parameter_20, + parameter_21, + parameter_22, + parameter_23, + parameter_24, + parameter_25, + parameter_26, + parameter_27, + parameter_28, + parameter_29, + parameter_30, + parameter_31, + parameter_32, + parameter_33, + parameter_34, + parameter_35, + parameter_36, + parameter_37, + parameter_38, + parameter_39, + parameter_40, + parameter_41, + parameter_42, + parameter_43, + parameter_44, + parameter_45, + parameter_46, + parameter_47, + parameter_48, + parameter_49, + parameter_50, + parameter_51, + parameter_52, + parameter_53, + data_0, + data_1, + data_2, + ): + # pd_op.full: (1xf64) <- () + full_0 = paddle._C_ops.full( + [1], float("0"), paddle.float64, paddle.core.CPUPlace() + ) + + # pd_op.full: (1xf64) <- () + full_1 = paddle._C_ops.full( + [1], float("19"), paddle.float64, paddle.core.CPUPlace() + ) + + # pd_op.full: (1xf64) <- () + full_2 = paddle._C_ops.full( + [1], float("1"), paddle.float64, paddle.core.CPUPlace() + ) + + # pd_op.arange: (19xi64) <- (1xf64, 1xf64, 1xf64) + arange_0 = paddle.arange(full_0, full_1, full_2, dtype="int64") + del full_1 + + # pd_op.cast: (19xf32) <- (19xi64) + cast_0 = paddle._C_ops.cast(arange_0, paddle.float32) + del arange_0 + + # pd_op.full: (1xf32) <- () + full_3 = paddle._C_ops.full( + [1], float("1"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.scale: (19xf32) <- (19xf32, 1xf32) + scale_0 = paddle._C_ops.scale(cast_0, full_3, float("0.5"), True) + del cast_0 + + # pd_op.full: (1xf32) <- () + full_4 = paddle._C_ops.full( + [1], float("32"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.scale: (19xf32) <- (19xf32, 1xf32) + scale_1 = paddle._C_ops.scale(scale_0, full_4, float("0"), True) + del full_4, scale_0 + + # builtin.combine: ([19xf32, 19xf32]) <- (19xf32, 19xf32) + combine_0 = [scale_1, scale_1] + del scale_1 + + # pd_op.meshgrid: ([19x19xf32, 19x19xf32]) <- ([19xf32, 19xf32]) + meshgrid_0 = paddle._C_ops.meshgrid(combine_0) + del combine_0 + + # builtin.split: (19x19xf32, 19x19xf32) <- ([19x19xf32, 19x19xf32]) + ( + split_0, + split_1, + ) = meshgrid_0 + del meshgrid_0 + + # pd_op.scale: (19x19xf32) <- (19x19xf32, 1xf32) + scale_2 = paddle._C_ops.scale(split_1, full_3, float("-80"), True) + + # pd_op.scale: (19x19xf32) <- (19x19xf32, 1xf32) + scale_3 = paddle._C_ops.scale(split_0, full_3, float("-80"), True) + + # pd_op.scale: (19x19xf32) <- (19x19xf32, 1xf32) + scale_4 = paddle._C_ops.scale(split_1, full_3, float("80"), True) + + # pd_op.scale: (19x19xf32) <- (19x19xf32, 1xf32) + scale_5 = paddle._C_ops.scale(split_0, full_3, float("80"), True) + + # builtin.combine: ([19x19xf32, 19x19xf32, 19x19xf32, 19x19xf32]) <- (19x19xf32, 19x19xf32, 19x19xf32, 19x19xf32) + combine_1 = [scale_2, scale_3, scale_4, scale_5] + del scale_2, scale_3, scale_4, scale_5 + + # pd_op.stack: (19x19x4xf32) <- ([19x19xf32, 19x19xf32, 19x19xf32, 19x19xf32]) + stack_0 = paddle._C_ops.stack(combine_1, -1) + del combine_1 + + # builtin.combine: ([19x19xf32, 19x19xf32]) <- (19x19xf32, 19x19xf32) + combine_2 = [split_1, split_0] + del split_0, split_1 + + # pd_op.stack: (19x19x2xf32) <- ([19x19xf32, 19x19xf32]) + stack_1 = paddle._C_ops.stack(combine_2, -1) + del combine_2 + + # pd_op.full_int_array: (2xi64) <- () + full_int_array_0 = [-1, 4] + + # pd_op.reshape: (361x4xf32) <- (19x19x4xf32, 2xi64) + reshape_0 = paddle._C_ops.reshape(stack_0, full_int_array_0) + del stack_0 + + # pd_op.full_int_array: (2xi64) <- () + full_int_array_1 = [-1, 2] + + # pd_op.reshape: (361x2xf32) <- (19x19x2xf32, 2xi64) + reshape_1 = paddle._C_ops.reshape(stack_1, full_int_array_1) + del stack_1 + + # pd_op.full: (361x1xf32) <- () + full_5 = paddle._C_ops.full( + [361, 1], + float("32"), + paddle.float32, + paddle.framework._current_expected_place(), + ) + + # pd_op.full: (1xf64) <- () + full_6 = paddle._C_ops.full( + [1], float("38"), paddle.float64, paddle.core.CPUPlace() + ) + + # pd_op.arange: (38xi64) <- (1xf64, 1xf64, 1xf64) + arange_1 = paddle.arange(full_0, full_6, full_2, dtype="int64") + del full_6 + + # pd_op.cast: (38xf32) <- (38xi64) + cast_1 = paddle._C_ops.cast(arange_1, paddle.float32) + del arange_1 + + # pd_op.scale: (38xf32) <- (38xf32, 1xf32) + scale_6 = paddle._C_ops.scale(cast_1, full_3, float("0.5"), True) + del cast_1 + + # pd_op.full: (1xf32) <- () + full_7 = paddle._C_ops.full( + [1], float("16"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.scale: (38xf32) <- (38xf32, 1xf32) + scale_7 = paddle._C_ops.scale(scale_6, full_7, float("0"), True) + del full_7, scale_6 + + # builtin.combine: ([38xf32, 38xf32]) <- (38xf32, 38xf32) + combine_3 = [scale_7, scale_7] + del scale_7 + + # pd_op.meshgrid: ([38x38xf32, 38x38xf32]) <- ([38xf32, 38xf32]) + meshgrid_1 = paddle._C_ops.meshgrid(combine_3) + del combine_3 + + # builtin.split: (38x38xf32, 38x38xf32) <- ([38x38xf32, 38x38xf32]) + ( + split_2, + split_3, + ) = meshgrid_1 + del meshgrid_1 + + # pd_op.scale: (38x38xf32) <- (38x38xf32, 1xf32) + scale_8 = paddle._C_ops.scale(split_3, full_3, float("-40"), True) + + # pd_op.scale: (38x38xf32) <- (38x38xf32, 1xf32) + scale_9 = paddle._C_ops.scale(split_2, full_3, float("-40"), True) + + # pd_op.scale: (38x38xf32) <- (38x38xf32, 1xf32) + scale_10 = paddle._C_ops.scale(split_3, full_3, float("40"), True) + + # pd_op.scale: (38x38xf32) <- (38x38xf32, 1xf32) + scale_11 = paddle._C_ops.scale(split_2, full_3, float("40"), True) + + # builtin.combine: ([38x38xf32, 38x38xf32, 38x38xf32, 38x38xf32]) <- (38x38xf32, 38x38xf32, 38x38xf32, 38x38xf32) + combine_4 = [scale_8, scale_9, scale_10, scale_11] + del scale_10, scale_11, scale_8, scale_9 + + # pd_op.stack: (38x38x4xf32) <- ([38x38xf32, 38x38xf32, 38x38xf32, 38x38xf32]) + stack_2 = paddle._C_ops.stack(combine_4, -1) + del combine_4 + + # builtin.combine: ([38x38xf32, 38x38xf32]) <- (38x38xf32, 38x38xf32) + combine_5 = [split_3, split_2] + del split_2, split_3 + + # pd_op.stack: (38x38x2xf32) <- ([38x38xf32, 38x38xf32]) + stack_3 = paddle._C_ops.stack(combine_5, -1) + del combine_5 + + # pd_op.reshape: (1444x4xf32) <- (38x38x4xf32, 2xi64) + reshape_2 = paddle._C_ops.reshape(stack_2, full_int_array_0) + del stack_2 + + # pd_op.reshape: (1444x2xf32) <- (38x38x2xf32, 2xi64) + reshape_3 = paddle._C_ops.reshape(stack_3, full_int_array_1) + del stack_3 + + # pd_op.full: (1444x1xf32) <- () + full_8 = paddle._C_ops.full( + [1444, 1], + float("16"), + paddle.float32, + paddle.framework._current_expected_place(), + ) + + # pd_op.full: (1xf64) <- () + full_9 = paddle._C_ops.full( + [1], float("76"), paddle.float64, paddle.core.CPUPlace() + ) + + # pd_op.arange: (76xi64) <- (1xf64, 1xf64, 1xf64) + arange_2 = paddle.arange(full_0, full_9, full_2, dtype="int64") + del full_0, full_2, full_9 + + # pd_op.cast: (76xf32) <- (76xi64) + cast_2 = paddle._C_ops.cast(arange_2, paddle.float32) + del arange_2 + + # pd_op.scale: (76xf32) <- (76xf32, 1xf32) + scale_12 = paddle._C_ops.scale(cast_2, full_3, float("0.5"), True) + del cast_2 + + # pd_op.full: (1xf32) <- () + full_10 = paddle._C_ops.full( + [1], float("8"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.scale: (76xf32) <- (76xf32, 1xf32) + scale_13 = paddle._C_ops.scale(scale_12, full_10, float("0"), True) + del full_10, scale_12 + + # builtin.combine: ([76xf32, 76xf32]) <- (76xf32, 76xf32) + combine_6 = [scale_13, scale_13] + del scale_13 + + # pd_op.meshgrid: ([76x76xf32, 76x76xf32]) <- ([76xf32, 76xf32]) + meshgrid_2 = paddle._C_ops.meshgrid(combine_6) + del combine_6 + + # builtin.split: (76x76xf32, 76x76xf32) <- ([76x76xf32, 76x76xf32]) + ( + split_4, + split_5, + ) = meshgrid_2 + del meshgrid_2 + + # pd_op.scale: (76x76xf32) <- (76x76xf32, 1xf32) + scale_14 = paddle._C_ops.scale(split_5, full_3, float("-20"), True) + + # pd_op.scale: (76x76xf32) <- (76x76xf32, 1xf32) + scale_15 = paddle._C_ops.scale(split_4, full_3, float("-20"), True) + + # pd_op.scale: (76x76xf32) <- (76x76xf32, 1xf32) + scale_16 = paddle._C_ops.scale(split_5, full_3, float("20"), True) + + # pd_op.scale: (76x76xf32) <- (76x76xf32, 1xf32) + scale_17 = paddle._C_ops.scale(split_4, full_3, float("20"), True) + del full_3 + + # builtin.combine: ([76x76xf32, 76x76xf32, 76x76xf32, 76x76xf32]) <- (76x76xf32, 76x76xf32, 76x76xf32, 76x76xf32) + combine_7 = [scale_14, scale_15, scale_16, scale_17] + del scale_14, scale_15, scale_16, scale_17 + + # pd_op.stack: (76x76x4xf32) <- ([76x76xf32, 76x76xf32, 76x76xf32, 76x76xf32]) + stack_4 = paddle._C_ops.stack(combine_7, -1) + del combine_7 + + # builtin.combine: ([76x76xf32, 76x76xf32]) <- (76x76xf32, 76x76xf32) + combine_8 = [split_5, split_4] + del split_4, split_5 + + # pd_op.stack: (76x76x2xf32) <- ([76x76xf32, 76x76xf32]) + stack_5 = paddle._C_ops.stack(combine_8, -1) + del combine_8 + + # pd_op.reshape: (5776x4xf32) <- (76x76x4xf32, 2xi64) + reshape_4 = paddle._C_ops.reshape(stack_4, full_int_array_0) + del full_int_array_0, stack_4 + + # pd_op.reshape: (5776x2xf32) <- (76x76x2xf32, 2xi64) + reshape_5 = paddle._C_ops.reshape(stack_5, full_int_array_1) + del full_int_array_1, stack_5 + + # pd_op.full: (5776x1xf32) <- () + full_11 = paddle._C_ops.full( + [5776, 1], + float("8"), + paddle.float32, + paddle.framework._current_expected_place(), + ) + + # pd_op.full: (1xi32) <- () + full_12 = paddle._C_ops.full( + [1], float("0"), paddle.int32, paddle.core.CPUPlace() + ) + + # builtin.combine: ([361x4xf32, 1444x4xf32, 5776x4xf32]) <- (361x4xf32, 1444x4xf32, 5776x4xf32) + combine_9 = [reshape_0, reshape_2, reshape_4] + + # pd_op.concat: (7581x4xf32) <- ([361x4xf32, 1444x4xf32, 5776x4xf32], 1xi32) + concat_2 = paddle._C_ops.concat(combine_9, full_12) + del combine_9 + + # builtin.combine: ([361x2xf32, 1444x2xf32, 5776x2xf32]) <- (361x2xf32, 1444x2xf32, 5776x2xf32) + combine_10 = [reshape_1, reshape_3, reshape_5] + del reshape_1, reshape_3, reshape_5 + + # pd_op.concat: (7581x2xf32) <- ([361x2xf32, 1444x2xf32, 5776x2xf32], 1xi32) + concat_3 = paddle._C_ops.concat(combine_10, full_12) + del combine_10 + + # builtin.combine: ([361x1xf32, 1444x1xf32, 5776x1xf32]) <- (361x1xf32, 1444x1xf32, 5776x1xf32) + combine_11 = [full_5, full_8, full_11] + del full_11, full_5, full_8 + + # pd_op.concat: (7581x1xf32) <- ([361x1xf32, 1444x1xf32, 5776x1xf32], 1xi32) + concat_4 = paddle._C_ops.concat(combine_11, full_12) + del combine_11, full_12 + + # pd_op.full_int_array: (2xi64) <- () + full_int_array_2 = [1, 1] + + # pd_op.assign: (2xi64) <- (2xi64) + assign_0 = full_int_array_2 + + # pd_op.assign: (2xi64) <- (2xi64) + assign_1 = full_int_array_2 + + # pd_op.pool2d: (2x384x1x1xf32) <- (2x384x19x19xf32, 2xi64) + pool2d_0 = paddle._C_ops.pool2d( + data_0, + full_int_array_2, + [1, 1], + [0, 0], + False, + True, + "NCHW", + "avg", + False, + True, + "EXPLICIT", + ) + + # pd_op.conv2d: (2x384x1x1xf32) <- (2x384x1x1xf32, 384x384x1x1xf32) + conv2d_0 = paddle._C_ops.conv2d( + pool2d_0, parameter_53, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_53 + + # pd_op.full_int_array: (4xi64) <- () + full_int_array_3 = [1, -1, 1, 1] + + # pd_op.reshape: (1x384x1x1xf32) <- (384xf32, 4xi64) + reshape_6 = paddle._C_ops.reshape(parameter_52, full_int_array_3) + del parameter_52 + + # pd_op.add: (2x384x1x1xf32) <- (2x384x1x1xf32, 1x384x1x1xf32) + add_0 = paddle._C_ops.add(conv2d_0, reshape_6) + + # pd_op.sigmoid: (2x384x1x1xf32) <- (2x384x1x1xf32) + sigmoid_0 = paddle._C_ops.sigmoid(add_0) + del add_0 + + # pd_op.multiply: (2x384x19x19xf32) <- (2x384x19x19xf32, 2x384x1x1xf32) + multiply_0 = paddle._C_ops.multiply(data_0, sigmoid_0) + + # pd_op.conv2d: (2x384x19x19xf32) <- (2x384x19x19xf32, 384x384x1x1xf32) + conv2d_1 = paddle._C_ops.conv2d( + multiply_0, parameter_51, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_51 + + # pd_op.batch_norm_: (2x384x19x19xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x19x19xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__0, + batch_norm__1, + batch_norm__2, + batch_norm__3, + batch_norm__4, + batch_norm__5, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_1, + parameter_50, + parameter_49, + parameter_48, + parameter_47, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_47, parameter_48, parameter_49, parameter_50 + + # pd_op.swish: (2x384x19x19xf32) <- (2x384x19x19xf32) + swish_0 = paddle._C_ops.swish(batch_norm__0) + + # pd_op.add: (2x384x19x19xf32) <- (2x384x19x19xf32, 2x384x19x19xf32) + add_1 = paddle._C_ops.add(swish_0, data_0) + + # pd_op.conv2d: (2x4x19x19xf32) <- (2x384x19x19xf32, 4x384x3x3xf32) + conv2d_2 = paddle._C_ops.conv2d( + add_1, parameter_46, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_46 + + # pd_op.reshape: (1x4x1x1xf32) <- (4xf32, 4xi64) + reshape_7 = paddle._C_ops.reshape(parameter_45, full_int_array_3) + del parameter_45 + + # pd_op.add: (2x4x19x19xf32) <- (2x4x19x19xf32, 1x4x1x1xf32) + add_2 = paddle._C_ops.add(conv2d_2, reshape_7) + + # pd_op.conv2d: (2x384x1x1xf32) <- (2x384x1x1xf32, 384x384x1x1xf32) + conv2d_3 = paddle._C_ops.conv2d( + pool2d_0, parameter_44, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_44 + + # pd_op.reshape: (1x384x1x1xf32) <- (384xf32, 4xi64) + reshape_8 = paddle._C_ops.reshape(parameter_43, full_int_array_3) + del parameter_43 + + # pd_op.add: (2x384x1x1xf32) <- (2x384x1x1xf32, 1x384x1x1xf32) + add_3 = paddle._C_ops.add(conv2d_3, reshape_8) + + # pd_op.sigmoid: (2x384x1x1xf32) <- (2x384x1x1xf32) + sigmoid_1 = paddle._C_ops.sigmoid(add_3) + del add_3 + + # pd_op.multiply: (2x384x19x19xf32) <- (2x384x19x19xf32, 2x384x1x1xf32) + multiply_1 = paddle._C_ops.multiply(data_0, sigmoid_1) + del data_0 + + # pd_op.conv2d: (2x384x19x19xf32) <- (2x384x19x19xf32, 384x384x1x1xf32) + conv2d_4 = paddle._C_ops.conv2d( + multiply_1, parameter_42, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_42 + + # pd_op.batch_norm_: (2x384x19x19xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x19x19xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__6, + batch_norm__7, + batch_norm__8, + batch_norm__9, + batch_norm__10, + batch_norm__11, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_4, + parameter_41, + parameter_40, + parameter_39, + parameter_38, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_38, parameter_39, parameter_40, parameter_41 + + # pd_op.swish: (2x384x19x19xf32) <- (2x384x19x19xf32) + swish_1 = paddle._C_ops.swish(batch_norm__6) + + # pd_op.conv2d: (2x68x19x19xf32) <- (2x384x19x19xf32, 68x384x3x3xf32) + conv2d_5 = paddle._C_ops.conv2d( + swish_1, parameter_37, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_37 + + # pd_op.reshape: (1x68x1x1xf32) <- (68xf32, 4xi64) + reshape_9 = paddle._C_ops.reshape(parameter_36, full_int_array_3) + del parameter_36 + + # pd_op.add: (2x68x19x19xf32) <- (2x68x19x19xf32, 1x68x1x1xf32) + add_4 = paddle._C_ops.add(conv2d_5, reshape_9) + + # pd_op.sigmoid: (2x4x19x19xf32) <- (2x4x19x19xf32) + sigmoid_2 = paddle._C_ops.sigmoid(add_2) + del add_2 + + # pd_op.flatten: (2x4x361xf32) <- (2x4x19x19xf32) + flatten_0 = paddle._C_ops.flatten(sigmoid_2, 2, 3) + + # pd_op.transpose: (2x361x4xf32) <- (2x4x361xf32) + transpose_0 = paddle._C_ops.transpose(flatten_0, [0, 2, 1]) + del flatten_0 + + # pd_op.flatten: (2x68x361xf32) <- (2x68x19x19xf32) + flatten_1 = paddle._C_ops.flatten(add_4, 2, 3) + + # pd_op.transpose: (2x361x68xf32) <- (2x68x361xf32) + transpose_1 = paddle._C_ops.transpose(flatten_1, [0, 2, 1]) + del flatten_1 + + # pd_op.pool2d: (2x192x1x1xf32) <- (2x192x38x38xf32, 2xi64) + pool2d_1 = paddle._C_ops.pool2d( + data_1, + full_int_array_2, + [1, 1], + [0, 0], + False, + True, + "NCHW", + "avg", + False, + True, + "EXPLICIT", + ) + + # pd_op.conv2d: (2x192x1x1xf32) <- (2x192x1x1xf32, 192x192x1x1xf32) + conv2d_6 = paddle._C_ops.conv2d( + pool2d_1, parameter_35, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_35 + + # pd_op.reshape: (1x192x1x1xf32) <- (192xf32, 4xi64) + reshape_10 = paddle._C_ops.reshape(parameter_34, full_int_array_3) + del parameter_34 + + # pd_op.add: (2x192x1x1xf32) <- (2x192x1x1xf32, 1x192x1x1xf32) + add_5 = paddle._C_ops.add(conv2d_6, reshape_10) + + # pd_op.sigmoid: (2x192x1x1xf32) <- (2x192x1x1xf32) + sigmoid_3 = paddle._C_ops.sigmoid(add_5) + del add_5 + + # pd_op.multiply: (2x192x38x38xf32) <- (2x192x38x38xf32, 2x192x1x1xf32) + multiply_2 = paddle._C_ops.multiply(data_1, sigmoid_3) + + # pd_op.conv2d: (2x192x38x38xf32) <- (2x192x38x38xf32, 192x192x1x1xf32) + conv2d_7 = paddle._C_ops.conv2d( + multiply_2, parameter_33, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_33 + + # pd_op.batch_norm_: (2x192x38x38xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x38x38xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__12, + batch_norm__13, + batch_norm__14, + batch_norm__15, + batch_norm__16, + batch_norm__17, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_7, + parameter_32, + parameter_31, + parameter_30, + parameter_29, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_29, parameter_30, parameter_31, parameter_32 + + # pd_op.swish: (2x192x38x38xf32) <- (2x192x38x38xf32) + swish_2 = paddle._C_ops.swish(batch_norm__12) + + # pd_op.add: (2x192x38x38xf32) <- (2x192x38x38xf32, 2x192x38x38xf32) + add_6 = paddle._C_ops.add(swish_2, data_1) + + # pd_op.conv2d: (2x4x38x38xf32) <- (2x192x38x38xf32, 4x192x3x3xf32) + conv2d_8 = paddle._C_ops.conv2d( + add_6, parameter_28, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_28 + + # pd_op.reshape: (1x4x1x1xf32) <- (4xf32, 4xi64) + reshape_11 = paddle._C_ops.reshape(parameter_27, full_int_array_3) + del parameter_27 + + # pd_op.add: (2x4x38x38xf32) <- (2x4x38x38xf32, 1x4x1x1xf32) + add_7 = paddle._C_ops.add(conv2d_8, reshape_11) + + # pd_op.conv2d: (2x192x1x1xf32) <- (2x192x1x1xf32, 192x192x1x1xf32) + conv2d_9 = paddle._C_ops.conv2d( + pool2d_1, parameter_26, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_26 + + # pd_op.reshape: (1x192x1x1xf32) <- (192xf32, 4xi64) + reshape_12 = paddle._C_ops.reshape(parameter_25, full_int_array_3) + del parameter_25 + + # pd_op.add: (2x192x1x1xf32) <- (2x192x1x1xf32, 1x192x1x1xf32) + add_8 = paddle._C_ops.add(conv2d_9, reshape_12) + + # pd_op.sigmoid: (2x192x1x1xf32) <- (2x192x1x1xf32) + sigmoid_4 = paddle._C_ops.sigmoid(add_8) + del add_8 + + # pd_op.multiply: (2x192x38x38xf32) <- (2x192x38x38xf32, 2x192x1x1xf32) + multiply_3 = paddle._C_ops.multiply(data_1, sigmoid_4) + del data_1 + + # pd_op.conv2d: (2x192x38x38xf32) <- (2x192x38x38xf32, 192x192x1x1xf32) + conv2d_10 = paddle._C_ops.conv2d( + multiply_3, parameter_24, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_24 + + # pd_op.batch_norm_: (2x192x38x38xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x38x38xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__18, + batch_norm__19, + batch_norm__20, + batch_norm__21, + batch_norm__22, + batch_norm__23, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_10, + parameter_23, + parameter_22, + parameter_21, + parameter_20, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_20, parameter_21, parameter_22, parameter_23 + + # pd_op.swish: (2x192x38x38xf32) <- (2x192x38x38xf32) + swish_3 = paddle._C_ops.swish(batch_norm__18) + + # pd_op.conv2d: (2x68x38x38xf32) <- (2x192x38x38xf32, 68x192x3x3xf32) + conv2d_11 = paddle._C_ops.conv2d( + swish_3, parameter_19, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_19 + + # pd_op.reshape: (1x68x1x1xf32) <- (68xf32, 4xi64) + reshape_13 = paddle._C_ops.reshape(parameter_18, full_int_array_3) + del parameter_18 + + # pd_op.add: (2x68x38x38xf32) <- (2x68x38x38xf32, 1x68x1x1xf32) + add_9 = paddle._C_ops.add(conv2d_11, reshape_13) + + # pd_op.sigmoid: (2x4x38x38xf32) <- (2x4x38x38xf32) + sigmoid_5 = paddle._C_ops.sigmoid(add_7) + del add_7 + + # pd_op.flatten: (2x4x1444xf32) <- (2x4x38x38xf32) + flatten_2 = paddle._C_ops.flatten(sigmoid_5, 2, 3) + + # pd_op.transpose: (2x1444x4xf32) <- (2x4x1444xf32) + transpose_2 = paddle._C_ops.transpose(flatten_2, [0, 2, 1]) + del flatten_2 + + # pd_op.flatten: (2x68x1444xf32) <- (2x68x38x38xf32) + flatten_3 = paddle._C_ops.flatten(add_9, 2, 3) + + # pd_op.transpose: (2x1444x68xf32) <- (2x68x1444xf32) + transpose_3 = paddle._C_ops.transpose(flatten_3, [0, 2, 1]) + del flatten_3 + + # pd_op.pool2d: (2x96x1x1xf32) <- (2x96x76x76xf32, 2xi64) + pool2d_2 = paddle._C_ops.pool2d( + data_2, + full_int_array_2, + [1, 1], + [0, 0], + False, + True, + "NCHW", + "avg", + False, + True, + "EXPLICIT", + ) + + # pd_op.conv2d: (2x96x1x1xf32) <- (2x96x1x1xf32, 96x96x1x1xf32) + conv2d_12 = paddle._C_ops.conv2d( + pool2d_2, parameter_17, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_17 + + # pd_op.reshape: (1x96x1x1xf32) <- (96xf32, 4xi64) + reshape_14 = paddle._C_ops.reshape(parameter_16, full_int_array_3) + del parameter_16 + + # pd_op.add: (2x96x1x1xf32) <- (2x96x1x1xf32, 1x96x1x1xf32) + add_10 = paddle._C_ops.add(conv2d_12, reshape_14) + + # pd_op.sigmoid: (2x96x1x1xf32) <- (2x96x1x1xf32) + sigmoid_6 = paddle._C_ops.sigmoid(add_10) + del add_10 + + # pd_op.multiply: (2x96x76x76xf32) <- (2x96x76x76xf32, 2x96x1x1xf32) + multiply_4 = paddle._C_ops.multiply(data_2, sigmoid_6) + + # pd_op.conv2d: (2x96x76x76xf32) <- (2x96x76x76xf32, 96x96x1x1xf32) + conv2d_13 = paddle._C_ops.conv2d( + multiply_4, parameter_15, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_15 + + # pd_op.batch_norm_: (2x96x76x76xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x76x76xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__24, + batch_norm__25, + batch_norm__26, + batch_norm__27, + batch_norm__28, + batch_norm__29, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_13, + parameter_14, + parameter_13, + parameter_12, + parameter_11, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_11, parameter_12, parameter_13, parameter_14 + + # pd_op.swish: (2x96x76x76xf32) <- (2x96x76x76xf32) + swish_4 = paddle._C_ops.swish(batch_norm__24) + + # pd_op.add: (2x96x76x76xf32) <- (2x96x76x76xf32, 2x96x76x76xf32) + add_11 = paddle._C_ops.add(swish_4, data_2) + + # pd_op.conv2d: (2x4x76x76xf32) <- (2x96x76x76xf32, 4x96x3x3xf32) + conv2d_14 = paddle._C_ops.conv2d( + add_11, parameter_10, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_10 + + # pd_op.reshape: (1x4x1x1xf32) <- (4xf32, 4xi64) + reshape_15 = paddle._C_ops.reshape(parameter_9, full_int_array_3) + del parameter_9 + + # pd_op.add: (2x4x76x76xf32) <- (2x4x76x76xf32, 1x4x1x1xf32) + add_12 = paddle._C_ops.add(conv2d_14, reshape_15) + + # pd_op.conv2d: (2x96x1x1xf32) <- (2x96x1x1xf32, 96x96x1x1xf32) + conv2d_15 = paddle._C_ops.conv2d( + pool2d_2, parameter_8, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_8 + + # pd_op.reshape: (1x96x1x1xf32) <- (96xf32, 4xi64) + reshape_16 = paddle._C_ops.reshape(parameter_7, full_int_array_3) + del parameter_7 + + # pd_op.add: (2x96x1x1xf32) <- (2x96x1x1xf32, 1x96x1x1xf32) + add_13 = paddle._C_ops.add(conv2d_15, reshape_16) + + # pd_op.sigmoid: (2x96x1x1xf32) <- (2x96x1x1xf32) + sigmoid_7 = paddle._C_ops.sigmoid(add_13) + del add_13 + + # pd_op.multiply: (2x96x76x76xf32) <- (2x96x76x76xf32, 2x96x1x1xf32) + multiply_5 = paddle._C_ops.multiply(data_2, sigmoid_7) + del data_2 + + # pd_op.conv2d: (2x96x76x76xf32) <- (2x96x76x76xf32, 96x96x1x1xf32) + conv2d_16 = paddle._C_ops.conv2d( + multiply_5, parameter_6, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_6 + + # pd_op.batch_norm_: (2x96x76x76xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x76x76xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__30, + batch_norm__31, + batch_norm__32, + batch_norm__33, + batch_norm__34, + batch_norm__35, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_16, + parameter_5, + parameter_4, + parameter_3, + parameter_2, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_2, parameter_3, parameter_4, parameter_5 + + # pd_op.swish: (2x96x76x76xf32) <- (2x96x76x76xf32) + swish_5 = paddle._C_ops.swish(batch_norm__30) + + # pd_op.conv2d: (2x68x76x76xf32) <- (2x96x76x76xf32, 68x96x3x3xf32) + conv2d_17 = paddle._C_ops.conv2d( + swish_5, parameter_1, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_1 + + # pd_op.reshape: (1x68x1x1xf32) <- (68xf32, 4xi64) + reshape_17 = paddle._C_ops.reshape(parameter_0, full_int_array_3) + del full_int_array_3, parameter_0 + + # pd_op.add: (2x68x76x76xf32) <- (2x68x76x76xf32, 1x68x1x1xf32) + add_14 = paddle._C_ops.add(conv2d_17, reshape_17) + + # pd_op.sigmoid: (2x4x76x76xf32) <- (2x4x76x76xf32) + sigmoid_8 = paddle._C_ops.sigmoid(add_12) + del add_12 + + # pd_op.flatten: (2x4x5776xf32) <- (2x4x76x76xf32) + flatten_4 = paddle._C_ops.flatten(sigmoid_8, 2, 3) + + # pd_op.transpose: (2x5776x4xf32) <- (2x4x5776xf32) + transpose_4 = paddle._C_ops.transpose(flatten_4, [0, 2, 1]) + del flatten_4 + + # pd_op.flatten: (2x68x5776xf32) <- (2x68x76x76xf32) + flatten_5 = paddle._C_ops.flatten(add_14, 2, 3) + + # pd_op.transpose: (2x5776x68xf32) <- (2x68x5776xf32) + transpose_5 = paddle._C_ops.transpose(flatten_5, [0, 2, 1]) + del flatten_5 + + # pd_op.full: (1xi32) <- () + full_13 = paddle._C_ops.full( + [1], float("1"), paddle.int32, paddle.core.CPUPlace() + ) + + # pd_op.assign: (1xi32) <- (1xi32) + assign_2 = full_13 + + # builtin.combine: ([2x361x4xf32, 2x1444x4xf32, 2x5776x4xf32]) <- (2x361x4xf32, 2x1444x4xf32, 2x5776x4xf32) + combine_12 = [transpose_0, transpose_2, transpose_4] + + # pd_op.concat: (2x7581x4xf32) <- ([2x361x4xf32, 2x1444x4xf32, 2x5776x4xf32], 1xi32) + concat_0 = paddle._C_ops.concat(combine_12, full_13) + del combine_12 + + # builtin.combine: ([2x361x68xf32, 2x1444x68xf32, 2x5776x68xf32]) <- (2x361x68xf32, 2x1444x68xf32, 2x5776x68xf32) + combine_13 = [transpose_1, transpose_3, transpose_5] + + # pd_op.concat: (2x7581x68xf32) <- ([2x361x68xf32, 2x1444x68xf32, 2x5776x68xf32], 1xi32) + concat_1 = paddle._C_ops.concat(combine_13, full_13) + del ( + add_1, + add_11, + add_14, + add_4, + add_6, + add_9, + assign_0, + assign_1, + assign_2, + batch_norm__0, + batch_norm__1, + batch_norm__10, + batch_norm__11, + batch_norm__12, + batch_norm__13, + batch_norm__14, + batch_norm__15, + batch_norm__16, + batch_norm__17, + batch_norm__18, + batch_norm__19, + batch_norm__2, + batch_norm__20, + batch_norm__21, + batch_norm__22, + batch_norm__23, + batch_norm__24, + batch_norm__25, + batch_norm__26, + batch_norm__27, + batch_norm__28, + batch_norm__29, + batch_norm__3, + batch_norm__30, + batch_norm__31, + batch_norm__32, + batch_norm__33, + batch_norm__34, + batch_norm__35, + batch_norm__4, + batch_norm__5, + batch_norm__6, + batch_norm__7, + batch_norm__8, + batch_norm__9, + combine_13, + conv2d_0, + conv2d_1, + conv2d_10, + conv2d_11, + conv2d_12, + conv2d_13, + conv2d_14, + conv2d_15, + conv2d_16, + conv2d_17, + conv2d_2, + conv2d_3, + conv2d_4, + conv2d_5, + conv2d_6, + conv2d_7, + conv2d_8, + conv2d_9, + full_13, + full_int_array_2, + multiply_0, + multiply_1, + multiply_2, + multiply_3, + multiply_4, + multiply_5, + pool2d_0, + pool2d_1, + pool2d_2, + reshape_0, + reshape_10, + reshape_11, + reshape_12, + reshape_13, + reshape_14, + reshape_15, + reshape_16, + reshape_17, + reshape_2, + reshape_4, + reshape_6, + reshape_7, + reshape_8, + reshape_9, + sigmoid_0, + sigmoid_1, + sigmoid_2, + sigmoid_3, + sigmoid_4, + sigmoid_5, + sigmoid_6, + sigmoid_7, + sigmoid_8, + swish_0, + swish_1, + swish_2, + swish_3, + swish_4, + swish_5, + transpose_0, + transpose_1, + transpose_2, + transpose_3, + transpose_4, + transpose_5, + ) + + return concat_0, concat_1, concat_2, concat_3, concat_4 diff --git a/paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_15/weight_meta.py b/paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_15/weight_meta.py new file mode 100644 index 000000000..32dff16bc --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_15/weight_meta.py @@ -0,0 +1,574 @@ +class Program_weight_tensor_parameter_0: + name = "parameter_0" + shape = [68] + dtype = "float32" + min_val = float("-0.0188682") + max_val = float("0.0309226") + mean = float("1.30196e-07") + std = float("0.00768398") + data = None + + +class Program_weight_tensor_parameter_1: + name = "parameter_1" + shape = [68, 96, 3, 3] + dtype = "float32" + min_val = float("-0.190663") + max_val = float("0.207798") + mean = float("3.77768e-08") + std = float("0.0110706") + data = None + + +class Program_weight_tensor_parameter_2: + name = "parameter_2" + shape = [96] + dtype = "float32" + min_val = float("-0.149254") + max_val = float("0.349168") + mean = float("0.0835276") + std = float("0.116463") + data = None + + +class Program_weight_tensor_parameter_3: + name = "parameter_3" + shape = [96] + dtype = "float32" + min_val = float("0.918143") + max_val = float("2.00825") + mean = float("1.39798") + std = float("0.216882") + data = None + + +class Program_weight_tensor_parameter_4: + name = "parameter_4" + shape = [96] + dtype = "float32" + min_val = float("0.000162607") + max_val = float("0.00388893") + mean = float("0.00088138") + std = float("0.000786205") + data = None + + +class Program_weight_tensor_parameter_5: + name = "parameter_5" + shape = [96] + dtype = "float32" + min_val = float("-0.0830223") + max_val = float("0.0280102") + mean = float("-0.0145027") + std = float("0.0223857") + data = None + + +class Program_weight_tensor_parameter_6: + name = "parameter_6" + shape = [96, 96, 1, 1] + dtype = "float32" + min_val = float("-0.0933541") + max_val = float("0.110091") + mean = float("-0.00111545") + std = float("0.0134611") + data = None + + +class Program_weight_tensor_parameter_7: + name = "parameter_7" + shape = [96] + dtype = "float32" + min_val = float("-0.0100145") + max_val = float("0.0105314") + mean = float("-0.000295481") + std = float("0.00444221") + data = None + + +class Program_weight_tensor_parameter_8: + name = "parameter_8" + shape = [96, 96, 1, 1] + dtype = "float32" + min_val = float("-0.0198113") + max_val = float("0.0231775") + mean = float("-9.15599e-05") + std = float("0.00302891") + data = None + + +class Program_weight_tensor_parameter_9: + name = "parameter_9" + shape = [4] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_10: + name = "parameter_10" + shape = [4, 96, 3, 3] + dtype = "float32" + data = None + + +class Program_weight_tensor_parameter_11: + name = "parameter_11" + shape = [96] + dtype = "float32" + min_val = float("-0.661881") + max_val = float("1.12004") + mean = float("0.208231") + std = float("0.336128") + data = None + + +class Program_weight_tensor_parameter_12: + name = "parameter_12" + shape = [96] + dtype = "float32" + min_val = float("0.775077") + max_val = float("1.56685") + mean = float("1.11233") + std = float("0.140103") + data = None + + +class Program_weight_tensor_parameter_13: + name = "parameter_13" + shape = [96] + dtype = "float32" + min_val = float("0.000182489") + max_val = float("0.00894932") + mean = float("0.000931807") + std = float("0.00131154") + data = None + + +class Program_weight_tensor_parameter_14: + name = "parameter_14" + shape = [96] + dtype = "float32" + min_val = float("-0.16193") + max_val = float("0.101819") + mean = float("-0.0215767") + std = float("0.0431567") + data = None + + +class Program_weight_tensor_parameter_15: + name = "parameter_15" + shape = [96, 96, 1, 1] + dtype = "float32" + min_val = float("-0.0878642") + max_val = float("0.0947074") + mean = float("-0.00114346") + std = float("0.0122243") + data = None + + +class Program_weight_tensor_parameter_16: + name = "parameter_16" + shape = [96] + dtype = "float32" + min_val = float("-0.00572376") + max_val = float("0.00791657") + mean = float("-0.000547455") + std = float("0.0026065") + data = None + + +class Program_weight_tensor_parameter_17: + name = "parameter_17" + shape = [96, 96, 1, 1] + dtype = "float32" + min_val = float("-0.0533008") + max_val = float("0.0785588") + mean = float("-0.00032157") + std = float("0.00341333") + data = None + + +class Program_weight_tensor_parameter_18: + name = "parameter_18" + shape = [68] + dtype = "float32" + min_val = float("-0.00589985") + max_val = float("0.0234623") + mean = float("1.32888e-07") + std = float("0.00591045") + data = None + + +class Program_weight_tensor_parameter_19: + name = "parameter_19" + shape = [68, 192, 3, 3] + dtype = "float32" + min_val = float("-0.147936") + max_val = float("0.186712") + mean = float("-7.50879e-09") + std = float("0.00730501") + data = None + + +class Program_weight_tensor_parameter_20: + name = "parameter_20" + shape = [192] + dtype = "float32" + min_val = float("-0.112221") + max_val = float("0.136917") + mean = float("0.0501883") + std = float("0.0429377") + data = None + + +class Program_weight_tensor_parameter_21: + name = "parameter_21" + shape = [192] + dtype = "float32" + min_val = float("0.941362") + max_val = float("1.48625") + mean = float("1.20879") + std = float("0.101322") + data = None + + +class Program_weight_tensor_parameter_22: + name = "parameter_22" + shape = [192] + dtype = "float32" + min_val = float("0.000121594") + max_val = float("0.00402415") + mean = float("0.000757792") + std = float("0.000756284") + data = None + + +class Program_weight_tensor_parameter_23: + name = "parameter_23" + shape = [192] + dtype = "float32" + min_val = float("-0.0461994") + max_val = float("0.0128936") + mean = float("-0.00724618") + std = float("0.0100391") + data = None + + +class Program_weight_tensor_parameter_24: + name = "parameter_24" + shape = [192, 192, 1, 1] + dtype = "float32" + min_val = float("-0.0579905") + max_val = float("0.102794") + mean = float("-0.000307686") + std = float("0.00616114") + data = None + + +class Program_weight_tensor_parameter_25: + name = "parameter_25" + shape = [192] + dtype = "float32" + min_val = float("-0.00813584") + max_val = float("0.00870546") + mean = float("-9.65097e-05") + std = float("0.00325838") + data = None + + +class Program_weight_tensor_parameter_26: + name = "parameter_26" + shape = [192, 192, 1, 1] + dtype = "float32" + min_val = float("-0.00935234") + max_val = float("0.013743") + mean = float("-9.82735e-05") + std = float("0.00126391") + data = None + + +class Program_weight_tensor_parameter_27: + name = "parameter_27" + shape = [4] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_28: + name = "parameter_28" + shape = [4, 192, 3, 3] + dtype = "float32" + data = None + + +class Program_weight_tensor_parameter_29: + name = "parameter_29" + shape = [192] + dtype = "float32" + min_val = float("-0.290962") + max_val = float("0.608133") + mean = float("0.147555") + std = float("0.158996") + data = None + + +class Program_weight_tensor_parameter_30: + name = "parameter_30" + shape = [192] + dtype = "float32" + min_val = float("0.913507") + max_val = float("1.49828") + mean = float("1.08729") + std = float("0.0819726") + data = None + + +class Program_weight_tensor_parameter_31: + name = "parameter_31" + shape = [192] + dtype = "float32" + min_val = float("0.000202706") + max_val = float("0.00673322") + mean = float("0.00112525") + std = float("0.00107757") + data = None + + +class Program_weight_tensor_parameter_32: + name = "parameter_32" + shape = [192] + dtype = "float32" + min_val = float("-0.135766") + max_val = float("0.0202504") + mean = float("-0.0332958") + std = float("0.0267069") + data = None + + +class Program_weight_tensor_parameter_33: + name = "parameter_33" + shape = [192, 192, 1, 1] + dtype = "float32" + min_val = float("-0.0583346") + max_val = float("0.046482") + mean = float("-0.000950949") + std = float("0.00586351") + data = None + + +class Program_weight_tensor_parameter_34: + name = "parameter_34" + shape = [192] + dtype = "float32" + min_val = float("-0.00471561") + max_val = float("0.00999051") + mean = float("-0.000148549") + std = float("0.00175949") + data = None + + +class Program_weight_tensor_parameter_35: + name = "parameter_35" + shape = [192, 192, 1, 1] + dtype = "float32" + min_val = float("-0.0174469") + max_val = float("0.022582") + mean = float("-0.000151254") + std = float("0.00123312") + data = None + + +class Program_weight_tensor_parameter_36: + name = "parameter_36" + shape = [68] + dtype = "float32" + min_val = float("-0.00497465") + max_val = float("0.00940788") + mean = float("1.24041e-07") + std = float("0.00409397") + data = None + + +class Program_weight_tensor_parameter_37: + name = "parameter_37" + shape = [68, 384, 3, 3] + dtype = "float32" + min_val = float("-0.0794595") + max_val = float("0.101147") + mean = float("1.60398e-08") + std = float("0.00457391") + data = None + + +class Program_weight_tensor_parameter_38: + name = "parameter_38" + shape = [384] + dtype = "float32" + min_val = float("-0.0755618") + max_val = float("0.111409") + mean = float("0.0118624") + std = float("0.0354419") + data = None + + +class Program_weight_tensor_parameter_39: + name = "parameter_39" + shape = [384] + dtype = "float32" + min_val = float("0.969196") + max_val = float("1.49475") + mean = float("1.1693") + std = float("0.0775768") + data = None + + +class Program_weight_tensor_parameter_40: + name = "parameter_40" + shape = [384] + dtype = "float32" + min_val = float("6.75531e-05") + max_val = float("0.00282806") + mean = float("0.000353576") + std = float("0.000318727") + data = None + + +class Program_weight_tensor_parameter_41: + name = "parameter_41" + shape = [384] + dtype = "float32" + min_val = float("-0.0411649") + max_val = float("0.012051") + mean = float("-0.00318295") + std = float("0.00529963") + data = None + + +class Program_weight_tensor_parameter_42: + name = "parameter_42" + shape = [384, 384, 1, 1] + dtype = "float32" + min_val = float("-0.0573293") + max_val = float("0.0588992") + mean = float("-0.000134053") + std = float("0.00308464") + data = None + + +class Program_weight_tensor_parameter_43: + name = "parameter_43" + shape = [384] + dtype = "float32" + min_val = float("-0.00427222") + max_val = float("0.00491835") + mean = float("-5.0739e-05") + std = float("0.00220072") + data = None + + +class Program_weight_tensor_parameter_44: + name = "parameter_44" + shape = [384, 384, 1, 1] + dtype = "float32" + min_val = float("-0.0241554") + max_val = float("0.00942843") + mean = float("-3.95892e-05") + std = float("0.00074492") + data = None + + +class Program_weight_tensor_parameter_45: + name = "parameter_45" + shape = [4] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_46: + name = "parameter_46" + shape = [4, 384, 3, 3] + dtype = "float32" + data = None + + +class Program_weight_tensor_parameter_47: + name = "parameter_47" + shape = [384] + dtype = "float32" + min_val = float("-0.369103") + max_val = float("0.494598") + mean = float("0.0350349") + std = float("0.121492") + data = None + + +class Program_weight_tensor_parameter_48: + name = "parameter_48" + shape = [384] + dtype = "float32" + min_val = float("0.883363") + max_val = float("1.55475") + mean = float("1.05789") + std = float("0.0835507") + data = None + + +class Program_weight_tensor_parameter_49: + name = "parameter_49" + shape = [384] + dtype = "float32" + min_val = float("0.000114828") + max_val = float("0.00443528") + mean = float("0.000784919") + std = float("0.000672251") + data = None + + +class Program_weight_tensor_parameter_50: + name = "parameter_50" + shape = [384] + dtype = "float32" + min_val = float("-0.102214") + max_val = float("0.0300733") + mean = float("-0.024105") + std = float("0.0192775") + data = None + + +class Program_weight_tensor_parameter_51: + name = "parameter_51" + shape = [384, 384, 1, 1] + dtype = "float32" + min_val = float("-0.0387616") + max_val = float("0.0395992") + mean = float("-0.000463294") + std = float("0.00326359") + data = None + + +class Program_weight_tensor_parameter_52: + name = "parameter_52" + shape = [384] + dtype = "float32" + min_val = float("-0.0105386") + max_val = float("0.00939931") + mean = float("-0.000115501") + std = float("0.0013139") + data = None + + +class Program_weight_tensor_parameter_53: + name = "parameter_53" + shape = [384, 384, 1, 1] + dtype = "float32" + min_val = float("-0.0842626") + max_val = float("0.0443552") + mean = float("-3.33189e-05") + std = float("0.000884768") + data = None diff --git a/paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_16/graph_hash.txt b/paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_16/graph_hash.txt new file mode 100644 index 000000000..94ae09a2a --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_16/graph_hash.txt @@ -0,0 +1 @@ +6b4257ba1ea6147f1b8bb7bffb48a1fb61d1b3eb51b1c340874756e4ffc52693 \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_16/graph_net.json b/paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_16/graph_net.json new file mode 100644 index 000000000..d8719c2c9 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_16/graph_net.json @@ -0,0 +1,6 @@ +{ + "framework": "paddle", + "model_name": "PP-YOLOE-S_vehicle", + "num_devices_required": 1, + "num_nodes_required": 1 +} \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_16/input_meta.py b/paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_16/input_meta.py new file mode 100644 index 000000000..2525ffce1 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_16/input_meta.py @@ -0,0 +1,52 @@ +class Program_weight_tensor_data_0: + name = "data_0" + shape = [2, 7581, 4] + dtype = "float32" + min_val = float("0.01") + max_val = float("0.01") + mean = float("0.01") + std = float("2.79397e-09") + data = None + + +class Program_weight_tensor_data_1: + name = "data_1" + shape = [2, 7581, 4] + dtype = "float32" + min_val = float("-231.068") + max_val = float("849.397") + mean = float("303.519") + std = float("185.947") + data = None + + +class Program_weight_tensor_data_2: + name = "data_2" + shape = [7581, 2] + dtype = "float32" + min_val = float("4.0") + max_val = float("604.0") + mean = float("304.0") + std = float("175.48") + data = None + + +class Program_weight_tensor_data_3: + name = "data_3" + shape = [2, 1, 1] + dtype = "int32" + data = [0, 3] + + +class Program_weight_tensor_data_4: + name = "data_4" + shape = [2, 1, 4] + dtype = "float32" + data = [376.443, 61.9806, 517.447, 398.447, 562.465, 468.683, 608.0, 608.0] + + +class Program_weight_tensor_data_5: + name = "data_5" + shape = [2, 1, 1] + dtype = "float32" + data = [1.0, 1.0] diff --git a/paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_16/model.py b/paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_16/model.py new file mode 100644 index 000000000..950175fb3 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_16/model.py @@ -0,0 +1,338 @@ +import paddle + + +class GraphModule(paddle.nn.Layer): + def __init__(self): + super().__init__() + + def forward(self, data_0, data_1, data_2, data_3, data_4, data_5): + # pd_op.full_int_array: (1xi64) <- () + full_int_array_0 = [2] + + # pd_op.unsqueeze: (2x1x1x4xf32) <- (2x1x4xf32, 1xi64) + unsqueeze_0 = paddle._C_ops.unsqueeze(data_4, full_int_array_0) + del data_4 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_1 = [1] + + # pd_op.unsqueeze: (2x1x7581x4xf32) <- (2x7581x4xf32, 1xi64) + unsqueeze_1 = paddle._C_ops.unsqueeze(data_1, full_int_array_1) + del data_1, full_int_array_1 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_2 = [0] + + # pd_op.slice: (2x1x1x2xf32) <- (2x1x1x4xf32, 1xi64, 1xi64) + slice_0 = paddle._C_ops.slice( + unsqueeze_0, [3], full_int_array_2, full_int_array_0, [1], [] + ) + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_3 = [2147483647] + + # pd_op.slice: (2x1x1x2xf32) <- (2x1x1x4xf32, 1xi64, 1xi64) + slice_1 = paddle._C_ops.slice( + unsqueeze_0, [3], full_int_array_0, full_int_array_3, [1], [] + ) + + # pd_op.slice: (2x1x7581x2xf32) <- (2x1x7581x4xf32, 1xi64, 1xi64) + slice_2 = paddle._C_ops.slice( + unsqueeze_1, [3], full_int_array_2, full_int_array_0, [1], [] + ) + del full_int_array_2 + + # pd_op.slice: (2x1x7581x2xf32) <- (2x1x7581x4xf32, 1xi64, 1xi64) + slice_3 = paddle._C_ops.slice( + unsqueeze_1, [3], full_int_array_0, full_int_array_3, [1], [] + ) + del full_int_array_0, full_int_array_3, unsqueeze_1 + + # pd_op.maximum: (2x1x7581x2xf32) <- (2x1x1x2xf32, 2x1x7581x2xf32) + maximum_0 = paddle._C_ops.maximum(slice_0, slice_2) + + # pd_op.minimum: (2x1x7581x2xf32) <- (2x1x1x2xf32, 2x1x7581x2xf32) + minimum_0 = paddle._C_ops.minimum(slice_1, slice_3) + + # pd_op.subtract: (2x1x7581x2xf32) <- (2x1x7581x2xf32, 2x1x7581x2xf32) + subtract_0 = paddle._C_ops.subtract(minimum_0, maximum_0) + del maximum_0, minimum_0 + + # pd_op.full: (1xf32) <- () + full_0 = paddle._C_ops.full( + [1], float("0"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.full: (1xf32) <- () + full_1 = paddle._C_ops.full( + [1], float("3.40282e+38"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.clip: (2x1x7581x2xf32) <- (2x1x7581x2xf32, 1xf32, 1xf32) + clip_0 = paddle._C_ops.clip(subtract_0, full_0, full_1) + del subtract_0 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_4 = [-1] + + # pd_op.prod: (2x1x7581xf32) <- (2x1x7581x2xf32, 1xi64) + prod_0 = paddle._C_ops.prod(clip_0, full_int_array_4, False, False) + del clip_0 + + # pd_op.subtract: (2x1x1x2xf32) <- (2x1x1x2xf32, 2x1x1x2xf32) + subtract_1 = paddle._C_ops.subtract(slice_1, slice_0) + del slice_0, slice_1 + + # pd_op.clip: (2x1x1x2xf32) <- (2x1x1x2xf32, 1xf32, 1xf32) + clip_1 = paddle._C_ops.clip(subtract_1, full_0, full_1) + del subtract_1 + + # pd_op.prod: (2x1x1xf32) <- (2x1x1x2xf32, 1xi64) + prod_1 = paddle._C_ops.prod(clip_1, full_int_array_4, False, False) + del clip_1 + + # pd_op.subtract: (2x1x7581x2xf32) <- (2x1x7581x2xf32, 2x1x7581x2xf32) + subtract_2 = paddle._C_ops.subtract(slice_3, slice_2) + del slice_2, slice_3 + + # pd_op.clip: (2x1x7581x2xf32) <- (2x1x7581x2xf32, 1xf32, 1xf32) + clip_2 = paddle._C_ops.clip(subtract_2, full_0, full_1) + del full_0, full_1, subtract_2 + + # pd_op.prod: (2x1x7581xf32) <- (2x1x7581x2xf32, 1xi64) + prod_2 = paddle._C_ops.prod(clip_2, full_int_array_4, False, False) + del clip_2 + + # pd_op.add: (2x1x7581xf32) <- (2x1x1xf32, 2x1x7581xf32) + add_0 = paddle._C_ops.add(prod_1, prod_2) + del prod_1, prod_2 + + # pd_op.subtract: (2x1x7581xf32) <- (2x1x7581xf32, 2x1x7581xf32) + subtract_3 = paddle._C_ops.subtract(add_0, prod_0) + del add_0 + + # pd_op.full: (1xf32) <- () + full_2 = paddle._C_ops.full( + [1], float("1"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.scale: (2x1x7581xf32) <- (2x1x7581xf32, 1xf32) + scale_0 = paddle._C_ops.scale(subtract_3, full_2, float("1e-09"), True) + del full_2, subtract_3 + + # pd_op.divide: (2x1x7581xf32) <- (2x1x7581xf32, 2x1x7581xf32) + divide_0 = paddle._C_ops.divide(prod_0, scale_0) + del prod_0, scale_0 + + # pd_op.transpose: (2x4x7581xf32) <- (2x7581x4xf32) + transpose_0 = paddle._C_ops.transpose(data_0, [0, 2, 1]) + del data_0 + + # pd_op.full: (1xf64) <- () + full_3 = paddle._C_ops.full( + [1], float("0"), paddle.float64, paddle.core.CPUPlace() + ) + + # pd_op.full: (1xf64) <- () + full_4 = paddle._C_ops.full( + [1], float("2"), paddle.float64, paddle.core.CPUPlace() + ) + + # pd_op.full: (1xf64) <- () + full_5 = paddle._C_ops.full( + [1], float("1"), paddle.float64, paddle.core.CPUPlace() + ) + + # pd_op.arange: (2xi32) <- (1xf64, 1xf64, 1xf64) + arange_0 = paddle.arange(full_3, full_4, full_5, dtype="int32") + del full_3, full_4, full_5 + + # pd_op.unsqueeze: (2x1xi32) <- (2xi32, 1xi64) + unsqueeze_2 = paddle._C_ops.unsqueeze(arange_0, full_int_array_4) + del arange_0 + + # pd_op.full_int_array: (2xi64) <- () + full_int_array_5 = [1, 1] + + # pd_op.tile: (2x1xi32) <- (2x1xi32, 2xi64) + tile_0 = paddle._C_ops.tile(unsqueeze_2, full_int_array_5) + del full_int_array_5 + + # pd_op.squeeze: (2x1xi32) <- (2x1x1xi32, 1xi64) + squeeze_0 = paddle._C_ops.squeeze(data_3, full_int_array_4) + del data_3 + + # builtin.combine: ([2x1xi32, 2x1xi32]) <- (2x1xi32, 2x1xi32) + combine_0 = [tile_0, squeeze_0] + del squeeze_0, tile_0 + + # pd_op.stack: (2x1x2xi32) <- ([2x1xi32, 2x1xi32]) + stack_0 = paddle._C_ops.stack(combine_0, -1) + del combine_0 + + # pd_op.gather_nd: (2x1x7581xf32) <- (2x4x7581xf32, 2x1x2xi32) + gather_nd_0 = paddle._C_ops.gather_nd(transpose_0, stack_0) + del stack_0, transpose_0 + + # pd_op.pow: (2x1x7581xf32) <- (2x1x7581xf32) + pow_0 = paddle._C_ops.pow(gather_nd_0, float("1")) + del gather_nd_0 + + # pd_op.pow: (2x1x7581xf32) <- (2x1x7581xf32) + pow_1 = paddle._C_ops.pow(divide_0, float("6")) + + # pd_op.multiply: (2x1x7581xf32) <- (2x1x7581xf32, 2x1x7581xf32) + multiply_0 = paddle._C_ops.multiply(pow_0, pow_1) + del pow_0, pow_1 + + # pd_op.full_int_array: (2xi64) <- () + full_int_array_6 = [0, 1] + + # pd_op.unsqueeze: (1x1x7581x2xf32) <- (7581x2xf32, 2xi64) + unsqueeze_3 = paddle._C_ops.unsqueeze(data_2, full_int_array_6) + del data_2, full_int_array_6 + + # pd_op.full: (1xi32) <- () + full_6 = paddle._C_ops.full( + [1], float("3"), paddle.int32, paddle.core.CPUPlace() + ) + + # pd_op.split_with_num: ([1x1x7581x1xf32, 1x1x7581x1xf32]) <- (1x1x7581x2xf32, 1xi32) + split_with_num_0 = paddle._C_ops.split_with_num(unsqueeze_3, 2, full_6) + del unsqueeze_3 + + # builtin.split: (1x1x7581x1xf32, 1x1x7581x1xf32) <- ([1x1x7581x1xf32, 1x1x7581x1xf32]) + ( + split_0, + split_1, + ) = split_with_num_0 + del split_with_num_0 + + # pd_op.split_with_num: ([2x1x1x1xf32, 2x1x1x1xf32, 2x1x1x1xf32, 2x1x1x1xf32]) <- (2x1x1x4xf32, 1xi32) + split_with_num_1 = paddle._C_ops.split_with_num(unsqueeze_0, 4, full_6) + del full_6, unsqueeze_0 + + # builtin.split: (2x1x1x1xf32, 2x1x1x1xf32, 2x1x1x1xf32, 2x1x1x1xf32) <- ([2x1x1x1xf32, 2x1x1x1xf32, 2x1x1x1xf32, 2x1x1x1xf32]) + ( + split_2, + split_3, + split_4, + split_5, + ) = split_with_num_1 + del split_with_num_1 + + # pd_op.subtract: (2x1x7581x1xf32) <- (1x1x7581x1xf32, 2x1x1x1xf32) + subtract_4 = paddle._C_ops.subtract(split_0, split_2) + del split_2 + + # pd_op.subtract: (2x1x7581x1xf32) <- (1x1x7581x1xf32, 2x1x1x1xf32) + subtract_5 = paddle._C_ops.subtract(split_1, split_3) + del split_3 + + # pd_op.subtract: (2x1x7581x1xf32) <- (2x1x1x1xf32, 1x1x7581x1xf32) + subtract_6 = paddle._C_ops.subtract(split_4, split_0) + del split_0, split_4 + + # pd_op.subtract: (2x1x7581x1xf32) <- (2x1x1x1xf32, 1x1x7581x1xf32) + subtract_7 = paddle._C_ops.subtract(split_5, split_1) + del split_1, split_5 + + # pd_op.full: (1xi32) <- () + full_7 = paddle._C_ops.full( + [1], float("-1"), paddle.int32, paddle.core.CPUPlace() + ) + + # builtin.combine: ([2x1x7581x1xf32, 2x1x7581x1xf32, 2x1x7581x1xf32, 2x1x7581x1xf32]) <- (2x1x7581x1xf32, 2x1x7581x1xf32, 2x1x7581x1xf32, 2x1x7581x1xf32) + combine_1 = [subtract_4, subtract_5, subtract_6, subtract_7] + del subtract_4, subtract_5, subtract_6, subtract_7 + + # pd_op.concat: (2x1x7581x4xf32) <- ([2x1x7581x1xf32, 2x1x7581x1xf32, 2x1x7581x1xf32, 2x1x7581x1xf32], 1xi32) + concat_0 = paddle._C_ops.concat(combine_1, full_7) + del combine_1, full_7 + + # pd_op.min: (2x1x7581xf32) <- (2x1x7581x4xf32, 1xi64) + min_0 = paddle._C_ops.min(concat_0, full_int_array_4, False) + del concat_0, full_int_array_4 + + # pd_op.full: (xf32) <- () + full_8 = paddle._C_ops.full( + [], + float("1e-09"), + paddle.float32, + paddle.framework._current_expected_place(), + ) + + # pd_op.greater_than: (2x1x7581xb) <- (2x1x7581xf32, xf32) + greater_than_1 = paddle._C_ops.greater_than(min_0, full_8) + del full_8, min_0 + + # pd_op.cast: (2x1x7581xf32) <- (2x1x7581xb) + cast_0 = paddle._C_ops.cast(greater_than_1, paddle.float32) + del greater_than_1 + + # pd_op.multiply: (2x1x7581xf32) <- (2x1x7581xf32, 2x1x7581xf32) + multiply_1 = paddle._C_ops.multiply(multiply_0, cast_0) + + # pd_op.full: (1xi32) <- () + full_9 = paddle._C_ops.full( + [1], float("13"), paddle.int32, paddle.core.CPUPlace() + ) + + # pd_op.topk: (2x1x13xf32, 2x1x13xi64) <- (2x1x7581xf32, 1xi32) + topk_0, topk_1 = (lambda x, f: f(x))( + paddle._C_ops.topk(multiply_1, full_9, -1, True, True), + lambda out: out if isinstance(out, (list, tuple)) else (out, None), + ) + del full_9, multiply_1 + + # pd_op.full: (1xi32) <- () + full_10 = paddle._C_ops.full( + [1], float("7581"), paddle.int32, paddle.core.CPUPlace() + ) + + # pd_op.one_hot: (2x1x13x7581xf32) <- (2x1x13xi64, 1xi32) + one_hot_0 = paddle._C_ops.one_hot( + topk_1 % paddle.cast(full_10, topk_1.dtype), full_10 + ) + del full_10, topk_1 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_7 = [-2] + + # pd_op.sum: (2x1x7581xf32) <- (2x1x13x7581xf32, 1xi64) + sum_0 = paddle._C_ops.sum(one_hot_0, full_int_array_7, None, False) + del one_hot_0 + + # pd_op.multiply: (2x1x7581xf32) <- (2x1x7581xf32, 2x1x1xf32) + multiply_2 = paddle._C_ops.multiply(sum_0, data_5) + del sum_0 + + # pd_op.multiply: (2x1x7581xf32) <- (2x1x7581xf32, 2x1x7581xf32) + multiply_3 = paddle._C_ops.multiply(multiply_2, cast_0) + del cast_0, multiply_2 + + # pd_op.multiply: (2x1x7581xf32) <- (2x1x7581xf32, 2x1x1xf32) + multiply_4 = paddle._C_ops.multiply(multiply_3, data_5) + del data_5, multiply_3 + + # pd_op.sum: (2x7581xf32) <- (2x1x7581xf32, 1xi64) + sum_1 = paddle._C_ops.sum(multiply_4, full_int_array_7, None, False) + del full_int_array_7 + + # pd_op.full_int_array: (0xi64) <- () + full_int_array_8 = [] + + # pd_op.max: (xf32) <- (2x7581xf32, 0xi64) + max_0 = paddle._C_ops.max(sum_1, full_int_array_8, False) + del full_int_array_8 + + # pd_op.full: (xf32) <- () + full_11 = paddle._C_ops.full( + [], float("1"), paddle.float32, paddle.framework._current_expected_place() + ) + + # pd_op.greater_than: (xb) <- (xf32, xf32) + greater_than_0 = paddle._C_ops.greater_than(max_0, full_11) + del divide_0, full_11, max_0, multiply_0, multiply_4, sum_1, unsqueeze_2 + + return greater_than_0 diff --git a/paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_16/weight_meta.py b/paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_16/weight_meta.py new file mode 100644 index 000000000..8b1378917 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_16/weight_meta.py @@ -0,0 +1 @@ + diff --git a/paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_2/graph_hash.txt b/paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_2/graph_hash.txt new file mode 100644 index 000000000..a966d3244 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_2/graph_hash.txt @@ -0,0 +1 @@ +de7af2c23556cc5dc0e9a4dcaa2df9ba4a70606752e4671dd5b9d5a4d76e5de7 \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_2/graph_net.json b/paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_2/graph_net.json new file mode 100644 index 000000000..d8719c2c9 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_2/graph_net.json @@ -0,0 +1,6 @@ +{ + "framework": "paddle", + "model_name": "PP-YOLOE-S_vehicle", + "num_devices_required": 1, + "num_nodes_required": 1 +} \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_2/input_meta.py b/paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_2/input_meta.py new file mode 100644 index 000000000..f58dc071b --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_2/input_meta.py @@ -0,0 +1,9 @@ +class Program_weight_tensor_data_0: + name = "data_0" + shape = [2, 3, 640, 640] + dtype = "float32" + min_val = float("-2.01516") + max_val = float("2.64") + mean = float("0.187747") + std = float("0.681331") + data = None diff --git a/paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_2/model.py b/paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_2/model.py new file mode 100644 index 000000000..f20830b9f --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_2/model.py @@ -0,0 +1,4273 @@ +import paddle + + +class GraphModule(paddle.nn.Layer): + def __init__(self): + super().__init__() + + def forward( + self, + parameter_0, + parameter_1, + parameter_2, + parameter_3, + parameter_4, + parameter_5, + parameter_6, + parameter_7, + parameter_8, + parameter_9, + parameter_10, + parameter_11, + parameter_12, + parameter_13, + parameter_14, + parameter_15, + parameter_16, + parameter_17, + parameter_18, + parameter_19, + parameter_20, + parameter_21, + parameter_22, + parameter_23, + parameter_24, + parameter_25, + parameter_26, + parameter_27, + parameter_28, + parameter_29, + parameter_30, + parameter_31, + parameter_32, + parameter_33, + parameter_34, + parameter_35, + parameter_36, + parameter_37, + parameter_38, + parameter_39, + parameter_40, + parameter_41, + parameter_42, + parameter_43, + parameter_44, + parameter_45, + parameter_46, + parameter_47, + parameter_48, + parameter_49, + parameter_50, + parameter_51, + parameter_52, + parameter_53, + parameter_54, + parameter_55, + parameter_56, + parameter_57, + parameter_58, + parameter_59, + parameter_60, + parameter_61, + parameter_62, + parameter_63, + parameter_64, + parameter_65, + parameter_66, + parameter_67, + parameter_68, + parameter_69, + parameter_70, + parameter_71, + parameter_72, + parameter_73, + parameter_74, + parameter_75, + parameter_76, + parameter_77, + parameter_78, + parameter_79, + parameter_80, + parameter_81, + parameter_82, + parameter_83, + parameter_84, + parameter_85, + parameter_86, + parameter_87, + parameter_88, + parameter_89, + parameter_90, + parameter_91, + parameter_92, + parameter_93, + parameter_94, + parameter_95, + parameter_96, + parameter_97, + parameter_98, + parameter_99, + parameter_100, + parameter_101, + parameter_102, + parameter_103, + parameter_104, + parameter_105, + parameter_106, + parameter_107, + parameter_108, + parameter_109, + parameter_110, + parameter_111, + parameter_112, + parameter_113, + parameter_114, + parameter_115, + parameter_116, + parameter_117, + parameter_118, + parameter_119, + parameter_120, + parameter_121, + parameter_122, + parameter_123, + parameter_124, + parameter_125, + parameter_126, + parameter_127, + parameter_128, + parameter_129, + parameter_130, + parameter_131, + parameter_132, + parameter_133, + parameter_134, + parameter_135, + parameter_136, + parameter_137, + parameter_138, + parameter_139, + parameter_140, + parameter_141, + parameter_142, + parameter_143, + parameter_144, + parameter_145, + parameter_146, + parameter_147, + parameter_148, + parameter_149, + parameter_150, + parameter_151, + parameter_152, + parameter_153, + parameter_154, + parameter_155, + parameter_156, + parameter_157, + parameter_158, + parameter_159, + parameter_160, + parameter_161, + parameter_162, + parameter_163, + parameter_164, + parameter_165, + parameter_166, + parameter_167, + parameter_168, + parameter_169, + parameter_170, + parameter_171, + parameter_172, + parameter_173, + parameter_174, + parameter_175, + parameter_176, + parameter_177, + parameter_178, + parameter_179, + parameter_180, + parameter_181, + parameter_182, + parameter_183, + parameter_184, + parameter_185, + parameter_186, + parameter_187, + parameter_188, + parameter_189, + parameter_190, + parameter_191, + parameter_192, + parameter_193, + parameter_194, + parameter_195, + parameter_196, + parameter_197, + parameter_198, + parameter_199, + parameter_200, + parameter_201, + parameter_202, + parameter_203, + parameter_204, + parameter_205, + parameter_206, + parameter_207, + parameter_208, + parameter_209, + parameter_210, + parameter_211, + parameter_212, + parameter_213, + parameter_214, + parameter_215, + parameter_216, + parameter_217, + parameter_218, + parameter_219, + parameter_220, + parameter_221, + parameter_222, + parameter_223, + parameter_224, + parameter_225, + parameter_226, + parameter_227, + parameter_228, + parameter_229, + parameter_230, + parameter_231, + parameter_232, + parameter_233, + parameter_234, + parameter_235, + parameter_236, + parameter_237, + parameter_238, + parameter_239, + parameter_240, + parameter_241, + parameter_242, + parameter_243, + parameter_244, + parameter_245, + parameter_246, + parameter_247, + parameter_248, + parameter_249, + parameter_250, + parameter_251, + parameter_252, + parameter_253, + parameter_254, + parameter_255, + parameter_256, + parameter_257, + parameter_258, + parameter_259, + parameter_260, + parameter_261, + parameter_262, + parameter_263, + parameter_264, + parameter_265, + parameter_266, + parameter_267, + parameter_268, + parameter_269, + parameter_270, + parameter_271, + parameter_272, + parameter_273, + parameter_274, + parameter_275, + parameter_276, + parameter_277, + parameter_278, + parameter_279, + parameter_280, + parameter_281, + parameter_282, + parameter_283, + parameter_284, + parameter_285, + parameter_286, + parameter_287, + parameter_288, + parameter_289, + parameter_290, + parameter_291, + parameter_292, + parameter_293, + parameter_294, + parameter_295, + parameter_296, + parameter_297, + parameter_298, + parameter_299, + parameter_300, + parameter_301, + parameter_302, + parameter_303, + parameter_304, + parameter_305, + parameter_306, + parameter_307, + parameter_308, + parameter_309, + parameter_310, + parameter_311, + parameter_312, + parameter_313, + parameter_314, + parameter_315, + parameter_316, + parameter_317, + parameter_318, + parameter_319, + parameter_320, + parameter_321, + parameter_322, + parameter_323, + parameter_324, + parameter_325, + parameter_326, + parameter_327, + parameter_328, + parameter_329, + parameter_330, + parameter_331, + parameter_332, + parameter_333, + parameter_334, + parameter_335, + parameter_336, + parameter_337, + parameter_338, + parameter_339, + parameter_340, + parameter_341, + parameter_342, + parameter_343, + parameter_344, + parameter_345, + parameter_346, + parameter_347, + parameter_348, + parameter_349, + parameter_350, + parameter_351, + parameter_352, + parameter_353, + parameter_354, + parameter_355, + parameter_356, + parameter_357, + parameter_358, + parameter_359, + parameter_360, + parameter_361, + parameter_362, + parameter_363, + parameter_364, + parameter_365, + parameter_366, + parameter_367, + parameter_368, + parameter_369, + parameter_370, + parameter_371, + parameter_372, + parameter_373, + parameter_374, + parameter_375, + parameter_376, + parameter_377, + parameter_378, + parameter_379, + parameter_380, + parameter_381, + parameter_382, + parameter_383, + parameter_384, + parameter_385, + parameter_386, + parameter_387, + parameter_388, + parameter_389, + parameter_390, + parameter_391, + parameter_392, + parameter_393, + parameter_394, + parameter_395, + parameter_396, + parameter_397, + parameter_398, + parameter_399, + parameter_400, + parameter_401, + parameter_402, + parameter_403, + parameter_404, + parameter_405, + parameter_406, + parameter_407, + parameter_408, + parameter_409, + parameter_410, + parameter_411, + parameter_412, + parameter_413, + parameter_414, + parameter_415, + parameter_416, + parameter_417, + parameter_418, + parameter_419, + parameter_420, + parameter_421, + parameter_422, + data_0, + ): + # pd_op.conv2d: (2x16x-1x-1xf32) <- (2x3x-1x-1xf32, 16x3x3x3xf32) + conv2d_0 = paddle._C_ops.conv2d( + data_0, parameter_422, [2, 2], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del data_0, parameter_422 + + # pd_op.batch_norm_: (2x16x-1x-1xf32, 16xf32, 16xf32, 16xf32, 16xf32, -1xui8) <- (2x16x-1x-1xf32, 16xf32, 16xf32, 16xf32, 16xf32) + ( + batch_norm__0, + batch_norm__1, + batch_norm__2, + batch_norm__3, + batch_norm__4, + batch_norm__5, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_0, + parameter_421, + parameter_420, + parameter_419, + parameter_418, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_0, parameter_418, parameter_419, parameter_420, parameter_421 + + # pd_op.swish: (2x16x-1x-1xf32) <- (2x16x-1x-1xf32) + swish_0 = paddle._C_ops.swish(batch_norm__0) + del batch_norm__0 + + # pd_op.conv2d: (2x16x-1x-1xf32) <- (2x16x-1x-1xf32, 16x16x3x3xf32) + conv2d_1 = paddle._C_ops.conv2d( + swish_0, parameter_417, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_417, swish_0 + + # pd_op.batch_norm_: (2x16x-1x-1xf32, 16xf32, 16xf32, 16xf32, 16xf32, -1xui8) <- (2x16x-1x-1xf32, 16xf32, 16xf32, 16xf32, 16xf32) + ( + batch_norm__6, + batch_norm__7, + batch_norm__8, + batch_norm__9, + batch_norm__10, + batch_norm__11, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_1, + parameter_416, + parameter_415, + parameter_414, + parameter_413, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_1, parameter_413, parameter_414, parameter_415, parameter_416 + + # pd_op.swish: (2x16x-1x-1xf32) <- (2x16x-1x-1xf32) + swish_1 = paddle._C_ops.swish(batch_norm__6) + del batch_norm__6 + + # pd_op.conv2d: (2x32x-1x-1xf32) <- (2x16x-1x-1xf32, 32x16x3x3xf32) + conv2d_2 = paddle._C_ops.conv2d( + swish_1, parameter_412, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_412, swish_1 + + # pd_op.batch_norm_: (2x32x-1x-1xf32, 32xf32, 32xf32, 32xf32, 32xf32, -1xui8) <- (2x32x-1x-1xf32, 32xf32, 32xf32, 32xf32, 32xf32) + ( + batch_norm__12, + batch_norm__13, + batch_norm__14, + batch_norm__15, + batch_norm__16, + batch_norm__17, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_2, + parameter_411, + parameter_410, + parameter_409, + parameter_408, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_2, parameter_408, parameter_409, parameter_410, parameter_411 + + # pd_op.swish: (2x32x-1x-1xf32) <- (2x32x-1x-1xf32) + swish_2 = paddle._C_ops.swish(batch_norm__12) + del batch_norm__12 + + # pd_op.conv2d: (2x48x-1x-1xf32) <- (2x32x-1x-1xf32, 48x32x3x3xf32) + conv2d_3 = paddle._C_ops.conv2d( + swish_2, parameter_407, [2, 2], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_407, swish_2 + + # pd_op.batch_norm_: (2x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32, -1xui8) <- (2x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32) + ( + batch_norm__18, + batch_norm__19, + batch_norm__20, + batch_norm__21, + batch_norm__22, + batch_norm__23, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_3, + parameter_406, + parameter_405, + parameter_404, + parameter_403, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_3, parameter_403, parameter_404, parameter_405, parameter_406 + + # pd_op.swish: (2x48x-1x-1xf32) <- (2x48x-1x-1xf32) + swish_3 = paddle._C_ops.swish(batch_norm__18) + del batch_norm__18 + + # pd_op.conv2d: (2x24x-1x-1xf32) <- (2x48x-1x-1xf32, 24x48x1x1xf32) + conv2d_4 = paddle._C_ops.conv2d( + swish_3, parameter_402, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_402 + + # pd_op.batch_norm_: (2x24x-1x-1xf32, 24xf32, 24xf32, 24xf32, 24xf32, -1xui8) <- (2x24x-1x-1xf32, 24xf32, 24xf32, 24xf32, 24xf32) + ( + batch_norm__24, + batch_norm__25, + batch_norm__26, + batch_norm__27, + batch_norm__28, + batch_norm__29, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_4, + parameter_401, + parameter_400, + parameter_399, + parameter_398, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_4, parameter_398, parameter_399, parameter_400, parameter_401 + + # pd_op.swish: (2x24x-1x-1xf32) <- (2x24x-1x-1xf32) + swish_4 = paddle._C_ops.swish(batch_norm__24) + del batch_norm__24 + + # pd_op.conv2d: (2x24x-1x-1xf32) <- (2x48x-1x-1xf32, 24x48x1x1xf32) + conv2d_5 = paddle._C_ops.conv2d( + swish_3, parameter_397, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_397, swish_3 + + # pd_op.batch_norm_: (2x24x-1x-1xf32, 24xf32, 24xf32, 24xf32, 24xf32, -1xui8) <- (2x24x-1x-1xf32, 24xf32, 24xf32, 24xf32, 24xf32) + ( + batch_norm__30, + batch_norm__31, + batch_norm__32, + batch_norm__33, + batch_norm__34, + batch_norm__35, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_5, + parameter_396, + parameter_395, + parameter_394, + parameter_393, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_5, parameter_393, parameter_394, parameter_395, parameter_396 + + # pd_op.swish: (2x24x-1x-1xf32) <- (2x24x-1x-1xf32) + swish_5 = paddle._C_ops.swish(batch_norm__30) + del batch_norm__30 + + # pd_op.conv2d: (2x24x-1x-1xf32) <- (2x24x-1x-1xf32, 24x24x3x3xf32) + conv2d_6 = paddle._C_ops.conv2d( + swish_5, parameter_392, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_392 + + # pd_op.batch_norm_: (2x24x-1x-1xf32, 24xf32, 24xf32, 24xf32, 24xf32, -1xui8) <- (2x24x-1x-1xf32, 24xf32, 24xf32, 24xf32, 24xf32) + ( + batch_norm__36, + batch_norm__37, + batch_norm__38, + batch_norm__39, + batch_norm__40, + batch_norm__41, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_6, + parameter_391, + parameter_390, + parameter_389, + parameter_388, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_6, parameter_388, parameter_389, parameter_390, parameter_391 + + # pd_op.swish: (2x24x-1x-1xf32) <- (2x24x-1x-1xf32) + swish_6 = paddle._C_ops.swish(batch_norm__36) + del batch_norm__36 + + # pd_op.conv2d: (2x24x-1x-1xf32) <- (2x24x-1x-1xf32, 24x24x3x3xf32) + conv2d_7 = paddle._C_ops.conv2d( + swish_6, parameter_387, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_387 + + # pd_op.batch_norm_: (2x24x-1x-1xf32, 24xf32, 24xf32, 24xf32, 24xf32, -1xui8) <- (2x24x-1x-1xf32, 24xf32, 24xf32, 24xf32, 24xf32) + ( + batch_norm__42, + batch_norm__43, + batch_norm__44, + batch_norm__45, + batch_norm__46, + batch_norm__47, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_7, + parameter_386, + parameter_385, + parameter_384, + parameter_383, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_7, parameter_383, parameter_384, parameter_385, parameter_386 + + # pd_op.conv2d: (2x24x-1x-1xf32) <- (2x24x-1x-1xf32, 24x24x1x1xf32) + conv2d_8 = paddle._C_ops.conv2d( + swish_6, parameter_382, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_382, swish_6 + + # pd_op.batch_norm_: (2x24x-1x-1xf32, 24xf32, 24xf32, 24xf32, 24xf32, -1xui8) <- (2x24x-1x-1xf32, 24xf32, 24xf32, 24xf32, 24xf32) + ( + batch_norm__48, + batch_norm__49, + batch_norm__50, + batch_norm__51, + batch_norm__52, + batch_norm__53, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_8, + parameter_381, + parameter_380, + parameter_379, + parameter_378, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_8, parameter_378, parameter_379, parameter_380, parameter_381 + + # pd_op.add: (2x24x-1x-1xf32) <- (2x24x-1x-1xf32, 2x24x-1x-1xf32) + add_0 = paddle._C_ops.add(batch_norm__42, batch_norm__48) + del batch_norm__42, batch_norm__48 + + # pd_op.swish: (2x24x-1x-1xf32) <- (2x24x-1x-1xf32) + swish_7 = paddle._C_ops.swish(add_0) + del add_0 + + # pd_op.add: (2x24x-1x-1xf32) <- (2x24x-1x-1xf32, 2x24x-1x-1xf32) + add_1 = paddle._C_ops.add(swish_5, swish_7) + del swish_5, swish_7 + + # pd_op.full: (1xi32) <- () + full_0 = paddle._C_ops.full( + [1], float("1"), paddle.int32, paddle.core.CPUPlace() + ) + + # builtin.combine: ([2x24x-1x-1xf32, 2x24x-1x-1xf32]) <- (2x24x-1x-1xf32, 2x24x-1x-1xf32) + combine_0 = [swish_4, add_1] + del add_1, swish_4 + + # pd_op.concat: (2x48x-1x-1xf32) <- ([2x24x-1x-1xf32, 2x24x-1x-1xf32], 1xi32) + concat_2 = paddle._C_ops.concat(combine_0, full_0) + del combine_0 + + # pd_op.full_int_array: (2xi64) <- () + full_int_array_0 = [2, 3] + + # pd_op.mean: (2x48x1x1xf32) <- (2x48x-1x-1xf32, 2xi64) + mean_0 = paddle._C_ops.mean(concat_2, full_int_array_0, True) + + # pd_op.conv2d: (2x48x1x1xf32) <- (2x48x1x1xf32, 48x48x1x1xf32) + conv2d_9 = paddle._C_ops.conv2d( + mean_0, parameter_377, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del mean_0, parameter_377 + + # pd_op.full_int_array: (4xi64) <- () + full_int_array_1 = [1, -1, 1, 1] + + # pd_op.reshape: (1x48x1x1xf32) <- (48xf32, 4xi64) + reshape_0 = paddle._C_ops.reshape(parameter_376, full_int_array_1) + del parameter_376 + + # pd_op.add: (2x48x1x1xf32) <- (2x48x1x1xf32, 1x48x1x1xf32) + add_2 = paddle._C_ops.add(conv2d_9, reshape_0) + del conv2d_9, reshape_0 + + # pd_op.hardsigmoid: (2x48x1x1xf32) <- (2x48x1x1xf32) + hardsigmoid_0 = paddle._C_ops.hardsigmoid( + add_2, float("0.166667"), float("0.5") + ) + del add_2 + + # pd_op.multiply: (2x48x-1x-1xf32) <- (2x48x-1x-1xf32, 2x48x1x1xf32) + multiply_0 = paddle._C_ops.multiply(concat_2, hardsigmoid_0) + del concat_2, hardsigmoid_0 + + # pd_op.conv2d: (2x64x-1x-1xf32) <- (2x48x-1x-1xf32, 64x48x1x1xf32) + conv2d_10 = paddle._C_ops.conv2d( + multiply_0, parameter_375, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del multiply_0, parameter_375 + + # pd_op.batch_norm_: (2x64x-1x-1xf32, 64xf32, 64xf32, 64xf32, 64xf32, -1xui8) <- (2x64x-1x-1xf32, 64xf32, 64xf32, 64xf32, 64xf32) + ( + batch_norm__54, + batch_norm__55, + batch_norm__56, + batch_norm__57, + batch_norm__58, + batch_norm__59, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_10, + parameter_374, + parameter_373, + parameter_372, + parameter_371, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_10, parameter_371, parameter_372, parameter_373, parameter_374 + + # pd_op.swish: (2x64x-1x-1xf32) <- (2x64x-1x-1xf32) + swish_8 = paddle._C_ops.swish(batch_norm__54) + del batch_norm__54 + + # pd_op.conv2d: (2x96x-1x-1xf32) <- (2x64x-1x-1xf32, 96x64x3x3xf32) + conv2d_11 = paddle._C_ops.conv2d( + swish_8, parameter_370, [2, 2], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_370, swish_8 + + # pd_op.batch_norm_: (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__60, + batch_norm__61, + batch_norm__62, + batch_norm__63, + batch_norm__64, + batch_norm__65, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_11, + parameter_369, + parameter_368, + parameter_367, + parameter_366, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_11, parameter_366, parameter_367, parameter_368, parameter_369 + + # pd_op.swish: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32) + swish_9 = paddle._C_ops.swish(batch_norm__60) + del batch_norm__60 + + # pd_op.conv2d: (2x48x-1x-1xf32) <- (2x96x-1x-1xf32, 48x96x1x1xf32) + conv2d_12 = paddle._C_ops.conv2d( + swish_9, parameter_365, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_365 + + # pd_op.batch_norm_: (2x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32, -1xui8) <- (2x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32) + ( + batch_norm__66, + batch_norm__67, + batch_norm__68, + batch_norm__69, + batch_norm__70, + batch_norm__71, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_12, + parameter_364, + parameter_363, + parameter_362, + parameter_361, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_12, parameter_361, parameter_362, parameter_363, parameter_364 + + # pd_op.swish: (2x48x-1x-1xf32) <- (2x48x-1x-1xf32) + swish_10 = paddle._C_ops.swish(batch_norm__66) + del batch_norm__66 + + # pd_op.conv2d: (2x48x-1x-1xf32) <- (2x96x-1x-1xf32, 48x96x1x1xf32) + conv2d_13 = paddle._C_ops.conv2d( + swish_9, parameter_360, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_360, swish_9 + + # pd_op.batch_norm_: (2x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32, -1xui8) <- (2x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32) + ( + batch_norm__72, + batch_norm__73, + batch_norm__74, + batch_norm__75, + batch_norm__76, + batch_norm__77, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_13, + parameter_359, + parameter_358, + parameter_357, + parameter_356, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_13, parameter_356, parameter_357, parameter_358, parameter_359 + + # pd_op.swish: (2x48x-1x-1xf32) <- (2x48x-1x-1xf32) + swish_11 = paddle._C_ops.swish(batch_norm__72) + del batch_norm__72 + + # pd_op.conv2d: (2x48x-1x-1xf32) <- (2x48x-1x-1xf32, 48x48x3x3xf32) + conv2d_14 = paddle._C_ops.conv2d( + swish_11, parameter_355, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_355 + + # pd_op.batch_norm_: (2x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32, -1xui8) <- (2x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32) + ( + batch_norm__78, + batch_norm__79, + batch_norm__80, + batch_norm__81, + batch_norm__82, + batch_norm__83, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_14, + parameter_354, + parameter_353, + parameter_352, + parameter_351, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_14, parameter_351, parameter_352, parameter_353, parameter_354 + + # pd_op.swish: (2x48x-1x-1xf32) <- (2x48x-1x-1xf32) + swish_12 = paddle._C_ops.swish(batch_norm__78) + del batch_norm__78 + + # pd_op.conv2d: (2x48x-1x-1xf32) <- (2x48x-1x-1xf32, 48x48x3x3xf32) + conv2d_15 = paddle._C_ops.conv2d( + swish_12, parameter_350, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_350 + + # pd_op.batch_norm_: (2x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32, -1xui8) <- (2x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32) + ( + batch_norm__84, + batch_norm__85, + batch_norm__86, + batch_norm__87, + batch_norm__88, + batch_norm__89, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_15, + parameter_349, + parameter_348, + parameter_347, + parameter_346, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_15, parameter_346, parameter_347, parameter_348, parameter_349 + + # pd_op.conv2d: (2x48x-1x-1xf32) <- (2x48x-1x-1xf32, 48x48x1x1xf32) + conv2d_16 = paddle._C_ops.conv2d( + swish_12, parameter_345, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_345, swish_12 + + # pd_op.batch_norm_: (2x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32, -1xui8) <- (2x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32) + ( + batch_norm__90, + batch_norm__91, + batch_norm__92, + batch_norm__93, + batch_norm__94, + batch_norm__95, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_16, + parameter_344, + parameter_343, + parameter_342, + parameter_341, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_16, parameter_341, parameter_342, parameter_343, parameter_344 + + # pd_op.add: (2x48x-1x-1xf32) <- (2x48x-1x-1xf32, 2x48x-1x-1xf32) + add_3 = paddle._C_ops.add(batch_norm__84, batch_norm__90) + del batch_norm__84, batch_norm__90 + + # pd_op.swish: (2x48x-1x-1xf32) <- (2x48x-1x-1xf32) + swish_13 = paddle._C_ops.swish(add_3) + del add_3 + + # pd_op.add: (2x48x-1x-1xf32) <- (2x48x-1x-1xf32, 2x48x-1x-1xf32) + add_4 = paddle._C_ops.add(swish_11, swish_13) + del swish_11, swish_13 + + # pd_op.conv2d: (2x48x-1x-1xf32) <- (2x48x-1x-1xf32, 48x48x3x3xf32) + conv2d_17 = paddle._C_ops.conv2d( + add_4, parameter_340, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_340 + + # pd_op.batch_norm_: (2x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32, -1xui8) <- (2x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32) + ( + batch_norm__96, + batch_norm__97, + batch_norm__98, + batch_norm__99, + batch_norm__100, + batch_norm__101, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_17, + parameter_339, + parameter_338, + parameter_337, + parameter_336, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_17, parameter_336, parameter_337, parameter_338, parameter_339 + + # pd_op.swish: (2x48x-1x-1xf32) <- (2x48x-1x-1xf32) + swish_14 = paddle._C_ops.swish(batch_norm__96) + del batch_norm__96 + + # pd_op.conv2d: (2x48x-1x-1xf32) <- (2x48x-1x-1xf32, 48x48x3x3xf32) + conv2d_18 = paddle._C_ops.conv2d( + swish_14, parameter_335, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_335 + + # pd_op.batch_norm_: (2x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32, -1xui8) <- (2x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32) + ( + batch_norm__102, + batch_norm__103, + batch_norm__104, + batch_norm__105, + batch_norm__106, + batch_norm__107, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_18, + parameter_334, + parameter_333, + parameter_332, + parameter_331, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_18, parameter_331, parameter_332, parameter_333, parameter_334 + + # pd_op.conv2d: (2x48x-1x-1xf32) <- (2x48x-1x-1xf32, 48x48x1x1xf32) + conv2d_19 = paddle._C_ops.conv2d( + swish_14, parameter_330, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_330, swish_14 + + # pd_op.batch_norm_: (2x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32, -1xui8) <- (2x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32) + ( + batch_norm__108, + batch_norm__109, + batch_norm__110, + batch_norm__111, + batch_norm__112, + batch_norm__113, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_19, + parameter_329, + parameter_328, + parameter_327, + parameter_326, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_19, parameter_326, parameter_327, parameter_328, parameter_329 + + # pd_op.add: (2x48x-1x-1xf32) <- (2x48x-1x-1xf32, 2x48x-1x-1xf32) + add_5 = paddle._C_ops.add(batch_norm__102, batch_norm__108) + del batch_norm__102, batch_norm__108 + + # pd_op.swish: (2x48x-1x-1xf32) <- (2x48x-1x-1xf32) + swish_15 = paddle._C_ops.swish(add_5) + del add_5 + + # pd_op.add: (2x48x-1x-1xf32) <- (2x48x-1x-1xf32, 2x48x-1x-1xf32) + add_6 = paddle._C_ops.add(add_4, swish_15) + del add_4, swish_15 + + # builtin.combine: ([2x48x-1x-1xf32, 2x48x-1x-1xf32]) <- (2x48x-1x-1xf32, 2x48x-1x-1xf32) + combine_1 = [swish_10, add_6] + del add_6, swish_10 + + # pd_op.concat: (2x96x-1x-1xf32) <- ([2x48x-1x-1xf32, 2x48x-1x-1xf32], 1xi32) + concat_3 = paddle._C_ops.concat(combine_1, full_0) + del combine_1 + + # pd_op.mean: (2x96x1x1xf32) <- (2x96x-1x-1xf32, 2xi64) + mean_1 = paddle._C_ops.mean(concat_3, full_int_array_0, True) + + # pd_op.conv2d: (2x96x1x1xf32) <- (2x96x1x1xf32, 96x96x1x1xf32) + conv2d_20 = paddle._C_ops.conv2d( + mean_1, parameter_325, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del mean_1, parameter_325 + + # pd_op.reshape: (1x96x1x1xf32) <- (96xf32, 4xi64) + reshape_1 = paddle._C_ops.reshape(parameter_324, full_int_array_1) + del parameter_324 + + # pd_op.add: (2x96x1x1xf32) <- (2x96x1x1xf32, 1x96x1x1xf32) + add_7 = paddle._C_ops.add(conv2d_20, reshape_1) + del conv2d_20, reshape_1 + + # pd_op.hardsigmoid: (2x96x1x1xf32) <- (2x96x1x1xf32) + hardsigmoid_1 = paddle._C_ops.hardsigmoid( + add_7, float("0.166667"), float("0.5") + ) + del add_7 + + # pd_op.multiply: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, 2x96x1x1xf32) + multiply_1 = paddle._C_ops.multiply(concat_3, hardsigmoid_1) + del concat_3, hardsigmoid_1 + + # pd_op.conv2d: (2x128x-1x-1xf32) <- (2x96x-1x-1xf32, 128x96x1x1xf32) + conv2d_21 = paddle._C_ops.conv2d( + multiply_1, parameter_323, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del multiply_1, parameter_323 + + # pd_op.batch_norm_: (2x128x-1x-1xf32, 128xf32, 128xf32, 128xf32, 128xf32, -1xui8) <- (2x128x-1x-1xf32, 128xf32, 128xf32, 128xf32, 128xf32) + ( + batch_norm__114, + batch_norm__115, + batch_norm__116, + batch_norm__117, + batch_norm__118, + batch_norm__119, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_21, + parameter_322, + parameter_321, + parameter_320, + parameter_319, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_21, parameter_319, parameter_320, parameter_321, parameter_322 + + # pd_op.swish: (2x128x-1x-1xf32) <- (2x128x-1x-1xf32) + swish_16 = paddle._C_ops.swish(batch_norm__114) + del batch_norm__114 + + # pd_op.conv2d: (2x192x-1x-1xf32) <- (2x128x-1x-1xf32, 192x128x3x3xf32) + conv2d_22 = paddle._C_ops.conv2d( + swish_16, parameter_318, [2, 2], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_318 + + # pd_op.batch_norm_: (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__120, + batch_norm__121, + batch_norm__122, + batch_norm__123, + batch_norm__124, + batch_norm__125, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_22, + parameter_317, + parameter_316, + parameter_315, + parameter_314, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_22, parameter_314, parameter_315, parameter_316, parameter_317 + + # pd_op.swish: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32) + swish_17 = paddle._C_ops.swish(batch_norm__120) + del batch_norm__120 + + # pd_op.conv2d: (2x96x-1x-1xf32) <- (2x192x-1x-1xf32, 96x192x1x1xf32) + conv2d_23 = paddle._C_ops.conv2d( + swish_17, parameter_313, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_313 + + # pd_op.batch_norm_: (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__126, + batch_norm__127, + batch_norm__128, + batch_norm__129, + batch_norm__130, + batch_norm__131, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_23, + parameter_312, + parameter_311, + parameter_310, + parameter_309, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_23, parameter_309, parameter_310, parameter_311, parameter_312 + + # pd_op.swish: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32) + swish_18 = paddle._C_ops.swish(batch_norm__126) + del batch_norm__126 + + # pd_op.conv2d: (2x96x-1x-1xf32) <- (2x192x-1x-1xf32, 96x192x1x1xf32) + conv2d_24 = paddle._C_ops.conv2d( + swish_17, parameter_308, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_308, swish_17 + + # pd_op.batch_norm_: (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__132, + batch_norm__133, + batch_norm__134, + batch_norm__135, + batch_norm__136, + batch_norm__137, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_24, + parameter_307, + parameter_306, + parameter_305, + parameter_304, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_24, parameter_304, parameter_305, parameter_306, parameter_307 + + # pd_op.swish: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32) + swish_19 = paddle._C_ops.swish(batch_norm__132) + del batch_norm__132 + + # pd_op.conv2d: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, 96x96x3x3xf32) + conv2d_25 = paddle._C_ops.conv2d( + swish_19, parameter_303, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_303 + + # pd_op.batch_norm_: (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__138, + batch_norm__139, + batch_norm__140, + batch_norm__141, + batch_norm__142, + batch_norm__143, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_25, + parameter_302, + parameter_301, + parameter_300, + parameter_299, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_25, parameter_299, parameter_300, parameter_301, parameter_302 + + # pd_op.swish: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32) + swish_20 = paddle._C_ops.swish(batch_norm__138) + del batch_norm__138 + + # pd_op.conv2d: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, 96x96x3x3xf32) + conv2d_26 = paddle._C_ops.conv2d( + swish_20, parameter_298, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_298 + + # pd_op.batch_norm_: (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__144, + batch_norm__145, + batch_norm__146, + batch_norm__147, + batch_norm__148, + batch_norm__149, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_26, + parameter_297, + parameter_296, + parameter_295, + parameter_294, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_26, parameter_294, parameter_295, parameter_296, parameter_297 + + # pd_op.conv2d: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, 96x96x1x1xf32) + conv2d_27 = paddle._C_ops.conv2d( + swish_20, parameter_293, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_293, swish_20 + + # pd_op.batch_norm_: (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__150, + batch_norm__151, + batch_norm__152, + batch_norm__153, + batch_norm__154, + batch_norm__155, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_27, + parameter_292, + parameter_291, + parameter_290, + parameter_289, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_27, parameter_289, parameter_290, parameter_291, parameter_292 + + # pd_op.add: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, 2x96x-1x-1xf32) + add_8 = paddle._C_ops.add(batch_norm__144, batch_norm__150) + del batch_norm__144, batch_norm__150 + + # pd_op.swish: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32) + swish_21 = paddle._C_ops.swish(add_8) + del add_8 + + # pd_op.add: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, 2x96x-1x-1xf32) + add_9 = paddle._C_ops.add(swish_19, swish_21) + del swish_19, swish_21 + + # pd_op.conv2d: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, 96x96x3x3xf32) + conv2d_28 = paddle._C_ops.conv2d( + add_9, parameter_288, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_288 + + # pd_op.batch_norm_: (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__156, + batch_norm__157, + batch_norm__158, + batch_norm__159, + batch_norm__160, + batch_norm__161, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_28, + parameter_287, + parameter_286, + parameter_285, + parameter_284, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_28, parameter_284, parameter_285, parameter_286, parameter_287 + + # pd_op.swish: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32) + swish_22 = paddle._C_ops.swish(batch_norm__156) + del batch_norm__156 + + # pd_op.conv2d: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, 96x96x3x3xf32) + conv2d_29 = paddle._C_ops.conv2d( + swish_22, parameter_283, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_283 + + # pd_op.batch_norm_: (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__162, + batch_norm__163, + batch_norm__164, + batch_norm__165, + batch_norm__166, + batch_norm__167, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_29, + parameter_282, + parameter_281, + parameter_280, + parameter_279, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_29, parameter_279, parameter_280, parameter_281, parameter_282 + + # pd_op.conv2d: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, 96x96x1x1xf32) + conv2d_30 = paddle._C_ops.conv2d( + swish_22, parameter_278, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_278, swish_22 + + # pd_op.batch_norm_: (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__168, + batch_norm__169, + batch_norm__170, + batch_norm__171, + batch_norm__172, + batch_norm__173, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_30, + parameter_277, + parameter_276, + parameter_275, + parameter_274, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_30, parameter_274, parameter_275, parameter_276, parameter_277 + + # pd_op.add: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, 2x96x-1x-1xf32) + add_10 = paddle._C_ops.add(batch_norm__162, batch_norm__168) + del batch_norm__162, batch_norm__168 + + # pd_op.swish: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32) + swish_23 = paddle._C_ops.swish(add_10) + del add_10 + + # pd_op.add: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, 2x96x-1x-1xf32) + add_11 = paddle._C_ops.add(add_9, swish_23) + del add_9, swish_23 + + # builtin.combine: ([2x96x-1x-1xf32, 2x96x-1x-1xf32]) <- (2x96x-1x-1xf32, 2x96x-1x-1xf32) + combine_2 = [swish_18, add_11] + del add_11, swish_18 + + # pd_op.concat: (2x192x-1x-1xf32) <- ([2x96x-1x-1xf32, 2x96x-1x-1xf32], 1xi32) + concat_4 = paddle._C_ops.concat(combine_2, full_0) + del combine_2 + + # pd_op.mean: (2x192x1x1xf32) <- (2x192x-1x-1xf32, 2xi64) + mean_2 = paddle._C_ops.mean(concat_4, full_int_array_0, True) + + # pd_op.conv2d: (2x192x1x1xf32) <- (2x192x1x1xf32, 192x192x1x1xf32) + conv2d_31 = paddle._C_ops.conv2d( + mean_2, parameter_273, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del mean_2, parameter_273 + + # pd_op.reshape: (1x192x1x1xf32) <- (192xf32, 4xi64) + reshape_2 = paddle._C_ops.reshape(parameter_272, full_int_array_1) + del parameter_272 + + # pd_op.add: (2x192x1x1xf32) <- (2x192x1x1xf32, 1x192x1x1xf32) + add_12 = paddle._C_ops.add(conv2d_31, reshape_2) + del conv2d_31, reshape_2 + + # pd_op.hardsigmoid: (2x192x1x1xf32) <- (2x192x1x1xf32) + hardsigmoid_2 = paddle._C_ops.hardsigmoid( + add_12, float("0.166667"), float("0.5") + ) + del add_12 + + # pd_op.multiply: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 2x192x1x1xf32) + multiply_2 = paddle._C_ops.multiply(concat_4, hardsigmoid_2) + del concat_4, hardsigmoid_2 + + # pd_op.conv2d: (2x256x-1x-1xf32) <- (2x192x-1x-1xf32, 256x192x1x1xf32) + conv2d_32 = paddle._C_ops.conv2d( + multiply_2, parameter_271, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del multiply_2, parameter_271 + + # pd_op.batch_norm_: (2x256x-1x-1xf32, 256xf32, 256xf32, 256xf32, 256xf32, -1xui8) <- (2x256x-1x-1xf32, 256xf32, 256xf32, 256xf32, 256xf32) + ( + batch_norm__174, + batch_norm__175, + batch_norm__176, + batch_norm__177, + batch_norm__178, + batch_norm__179, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_32, + parameter_270, + parameter_269, + parameter_268, + parameter_267, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_32, parameter_267, parameter_268, parameter_269, parameter_270 + + # pd_op.swish: (2x256x-1x-1xf32) <- (2x256x-1x-1xf32) + swish_24 = paddle._C_ops.swish(batch_norm__174) + del batch_norm__174 + + # pd_op.conv2d: (2x384x-1x-1xf32) <- (2x256x-1x-1xf32, 384x256x3x3xf32) + conv2d_33 = paddle._C_ops.conv2d( + swish_24, parameter_266, [2, 2], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_266 + + # pd_op.batch_norm_: (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__180, + batch_norm__181, + batch_norm__182, + batch_norm__183, + batch_norm__184, + batch_norm__185, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_33, + parameter_265, + parameter_264, + parameter_263, + parameter_262, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_33, parameter_262, parameter_263, parameter_264, parameter_265 + + # pd_op.swish: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32) + swish_25 = paddle._C_ops.swish(batch_norm__180) + del batch_norm__180 + + # pd_op.conv2d: (2x192x-1x-1xf32) <- (2x384x-1x-1xf32, 192x384x1x1xf32) + conv2d_34 = paddle._C_ops.conv2d( + swish_25, parameter_261, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_261 + + # pd_op.batch_norm_: (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__186, + batch_norm__187, + batch_norm__188, + batch_norm__189, + batch_norm__190, + batch_norm__191, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_34, + parameter_260, + parameter_259, + parameter_258, + parameter_257, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_34, parameter_257, parameter_258, parameter_259, parameter_260 + + # pd_op.swish: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32) + swish_26 = paddle._C_ops.swish(batch_norm__186) + del batch_norm__186 + + # pd_op.conv2d: (2x192x-1x-1xf32) <- (2x384x-1x-1xf32, 192x384x1x1xf32) + conv2d_35 = paddle._C_ops.conv2d( + swish_25, parameter_256, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_256, swish_25 + + # pd_op.batch_norm_: (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__192, + batch_norm__193, + batch_norm__194, + batch_norm__195, + batch_norm__196, + batch_norm__197, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_35, + parameter_255, + parameter_254, + parameter_253, + parameter_252, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_35, parameter_252, parameter_253, parameter_254, parameter_255 + + # pd_op.swish: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32) + swish_27 = paddle._C_ops.swish(batch_norm__192) + del batch_norm__192 + + # pd_op.conv2d: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 192x192x3x3xf32) + conv2d_36 = paddle._C_ops.conv2d( + swish_27, parameter_251, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_251 + + # pd_op.batch_norm_: (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__198, + batch_norm__199, + batch_norm__200, + batch_norm__201, + batch_norm__202, + batch_norm__203, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_36, + parameter_250, + parameter_249, + parameter_248, + parameter_247, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_36, parameter_247, parameter_248, parameter_249, parameter_250 + + # pd_op.swish: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32) + swish_28 = paddle._C_ops.swish(batch_norm__198) + del batch_norm__198 + + # pd_op.conv2d: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 192x192x3x3xf32) + conv2d_37 = paddle._C_ops.conv2d( + swish_28, parameter_246, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_246 + + # pd_op.batch_norm_: (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__204, + batch_norm__205, + batch_norm__206, + batch_norm__207, + batch_norm__208, + batch_norm__209, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_37, + parameter_245, + parameter_244, + parameter_243, + parameter_242, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_37, parameter_242, parameter_243, parameter_244, parameter_245 + + # pd_op.conv2d: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 192x192x1x1xf32) + conv2d_38 = paddle._C_ops.conv2d( + swish_28, parameter_241, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_241, swish_28 + + # pd_op.batch_norm_: (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__210, + batch_norm__211, + batch_norm__212, + batch_norm__213, + batch_norm__214, + batch_norm__215, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_38, + parameter_240, + parameter_239, + parameter_238, + parameter_237, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_38, parameter_237, parameter_238, parameter_239, parameter_240 + + # pd_op.add: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 2x192x-1x-1xf32) + add_13 = paddle._C_ops.add(batch_norm__204, batch_norm__210) + del batch_norm__204, batch_norm__210 + + # pd_op.swish: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32) + swish_29 = paddle._C_ops.swish(add_13) + del add_13 + + # pd_op.add: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 2x192x-1x-1xf32) + add_14 = paddle._C_ops.add(swish_27, swish_29) + del swish_27, swish_29 + + # builtin.combine: ([2x192x-1x-1xf32, 2x192x-1x-1xf32]) <- (2x192x-1x-1xf32, 2x192x-1x-1xf32) + combine_3 = [swish_26, add_14] + del add_14, swish_26 + + # pd_op.concat: (2x384x-1x-1xf32) <- ([2x192x-1x-1xf32, 2x192x-1x-1xf32], 1xi32) + concat_5 = paddle._C_ops.concat(combine_3, full_0) + del combine_3 + + # pd_op.mean: (2x384x1x1xf32) <- (2x384x-1x-1xf32, 2xi64) + mean_3 = paddle._C_ops.mean(concat_5, full_int_array_0, True) + del full_int_array_0 + + # pd_op.conv2d: (2x384x1x1xf32) <- (2x384x1x1xf32, 384x384x1x1xf32) + conv2d_39 = paddle._C_ops.conv2d( + mean_3, parameter_236, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del mean_3, parameter_236 + + # pd_op.reshape: (1x384x1x1xf32) <- (384xf32, 4xi64) + reshape_3 = paddle._C_ops.reshape(parameter_235, full_int_array_1) + del parameter_235 + + # pd_op.add: (2x384x1x1xf32) <- (2x384x1x1xf32, 1x384x1x1xf32) + add_15 = paddle._C_ops.add(conv2d_39, reshape_3) + del conv2d_39, reshape_3 + + # pd_op.hardsigmoid: (2x384x1x1xf32) <- (2x384x1x1xf32) + hardsigmoid_3 = paddle._C_ops.hardsigmoid( + add_15, float("0.166667"), float("0.5") + ) + del add_15 + + # pd_op.multiply: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32, 2x384x1x1xf32) + multiply_3 = paddle._C_ops.multiply(concat_5, hardsigmoid_3) + del concat_5, hardsigmoid_3 + + # pd_op.conv2d: (2x512x-1x-1xf32) <- (2x384x-1x-1xf32, 512x384x1x1xf32) + conv2d_40 = paddle._C_ops.conv2d( + multiply_3, parameter_234, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del multiply_3, parameter_234 + + # pd_op.batch_norm_: (2x512x-1x-1xf32, 512xf32, 512xf32, 512xf32, 512xf32, -1xui8) <- (2x512x-1x-1xf32, 512xf32, 512xf32, 512xf32, 512xf32) + ( + batch_norm__216, + batch_norm__217, + batch_norm__218, + batch_norm__219, + batch_norm__220, + batch_norm__221, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_40, + parameter_233, + parameter_232, + parameter_231, + parameter_230, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_40, parameter_230, parameter_231, parameter_232, parameter_233 + + # pd_op.swish: (2x512x-1x-1xf32) <- (2x512x-1x-1xf32) + swish_30 = paddle._C_ops.swish(batch_norm__216) + del batch_norm__216 + + # pd_op.conv2d: (2x192x-1x-1xf32) <- (2x512x-1x-1xf32, 192x512x1x1xf32) + conv2d_41 = paddle._C_ops.conv2d( + swish_30, parameter_229, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_229 + + # pd_op.batch_norm_: (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__222, + batch_norm__223, + batch_norm__224, + batch_norm__225, + batch_norm__226, + batch_norm__227, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_41, + parameter_228, + parameter_227, + parameter_226, + parameter_225, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_41, parameter_225, parameter_226, parameter_227, parameter_228 + + # pd_op.swish: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32) + swish_31 = paddle._C_ops.swish(batch_norm__222) + del batch_norm__222 + + # pd_op.conv2d: (2x192x-1x-1xf32) <- (2x512x-1x-1xf32, 192x512x1x1xf32) + conv2d_42 = paddle._C_ops.conv2d( + swish_30, parameter_224, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_224, swish_30 + + # pd_op.batch_norm_: (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__228, + batch_norm__229, + batch_norm__230, + batch_norm__231, + batch_norm__232, + batch_norm__233, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_42, + parameter_223, + parameter_222, + parameter_221, + parameter_220, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_42, parameter_220, parameter_221, parameter_222, parameter_223 + + # pd_op.swish: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32) + swish_32 = paddle._C_ops.swish(batch_norm__228) + del batch_norm__228 + + # pd_op.conv2d: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 192x192x3x3xf32) + conv2d_43 = paddle._C_ops.conv2d( + swish_32, parameter_219, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_219, swish_32 + + # pd_op.batch_norm_: (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__234, + batch_norm__235, + batch_norm__236, + batch_norm__237, + batch_norm__238, + batch_norm__239, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_43, + parameter_218, + parameter_217, + parameter_216, + parameter_215, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_43, parameter_215, parameter_216, parameter_217, parameter_218 + + # pd_op.swish: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32) + swish_33 = paddle._C_ops.swish(batch_norm__234) + del batch_norm__234 + + # pd_op.conv2d: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 192x192x3x3xf32) + conv2d_44 = paddle._C_ops.conv2d( + swish_33, parameter_214, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_214 + + # pd_op.batch_norm_: (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__240, + batch_norm__241, + batch_norm__242, + batch_norm__243, + batch_norm__244, + batch_norm__245, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_44, + parameter_213, + parameter_212, + parameter_211, + parameter_210, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_44, parameter_210, parameter_211, parameter_212, parameter_213 + + # pd_op.conv2d: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 192x192x1x1xf32) + conv2d_45 = paddle._C_ops.conv2d( + swish_33, parameter_209, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_209, swish_33 + + # pd_op.batch_norm_: (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__246, + batch_norm__247, + batch_norm__248, + batch_norm__249, + batch_norm__250, + batch_norm__251, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_45, + parameter_208, + parameter_207, + parameter_206, + parameter_205, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_45, parameter_205, parameter_206, parameter_207, parameter_208 + + # pd_op.add: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 2x192x-1x-1xf32) + add_16 = paddle._C_ops.add(batch_norm__240, batch_norm__246) + del batch_norm__240, batch_norm__246 + + # pd_op.swish: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32) + swish_34 = paddle._C_ops.swish(add_16) + del add_16 + + # pd_op.full_int_array: (2xi64) <- () + full_int_array_2 = [5, 5] + + # pd_op.pool2d: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 2xi64) + pool2d_0 = paddle._C_ops.pool2d( + swish_34, + full_int_array_2, + [1, 1], + [2, 2], + False, + True, + "NCHW", + "max", + False, + False, + "EXPLICIT", + ) + del full_int_array_2 + + # pd_op.full_int_array: (2xi64) <- () + full_int_array_3 = [9, 9] + + # pd_op.pool2d: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 2xi64) + pool2d_1 = paddle._C_ops.pool2d( + swish_34, + full_int_array_3, + [1, 1], + [4, 4], + False, + True, + "NCHW", + "max", + False, + False, + "EXPLICIT", + ) + del full_int_array_3 + + # pd_op.full_int_array: (2xi64) <- () + full_int_array_4 = [13, 13] + + # pd_op.pool2d: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 2xi64) + pool2d_2 = paddle._C_ops.pool2d( + swish_34, + full_int_array_4, + [1, 1], + [6, 6], + False, + True, + "NCHW", + "max", + False, + False, + "EXPLICIT", + ) + del full_int_array_4 + + # builtin.combine: ([2x192x-1x-1xf32, 2x192x-1x-1xf32, 2x192x-1x-1xf32, 2x192x-1x-1xf32]) <- (2x192x-1x-1xf32, 2x192x-1x-1xf32, 2x192x-1x-1xf32, 2x192x-1x-1xf32) + combine_4 = [swish_34, pool2d_0, pool2d_1, pool2d_2] + del pool2d_0, pool2d_1, pool2d_2, swish_34 + + # pd_op.concat: (2x768x-1x-1xf32) <- ([2x192x-1x-1xf32, 2x192x-1x-1xf32, 2x192x-1x-1xf32, 2x192x-1x-1xf32], 1xi32) + concat_6 = paddle._C_ops.concat(combine_4, full_0) + del combine_4 + + # pd_op.conv2d: (2x192x-1x-1xf32) <- (2x768x-1x-1xf32, 192x768x1x1xf32) + conv2d_46 = paddle._C_ops.conv2d( + concat_6, parameter_204, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del concat_6, parameter_204 + + # pd_op.batch_norm_: (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__252, + batch_norm__253, + batch_norm__254, + batch_norm__255, + batch_norm__256, + batch_norm__257, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_46, + parameter_203, + parameter_202, + parameter_201, + parameter_200, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_46, parameter_200, parameter_201, parameter_202, parameter_203 + + # pd_op.swish: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32) + swish_35 = paddle._C_ops.swish(batch_norm__252) + del batch_norm__252 + + # builtin.combine: ([2x192x-1x-1xf32, 2x192x-1x-1xf32]) <- (2x192x-1x-1xf32, 2x192x-1x-1xf32) + combine_5 = [swish_31, swish_35] + del swish_31, swish_35 + + # pd_op.concat: (2x384x-1x-1xf32) <- ([2x192x-1x-1xf32, 2x192x-1x-1xf32], 1xi32) + concat_7 = paddle._C_ops.concat(combine_5, full_0) + del combine_5 + + # pd_op.conv2d: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32, 384x384x1x1xf32) + conv2d_47 = paddle._C_ops.conv2d( + concat_7, parameter_199, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del concat_7, parameter_199 + + # pd_op.batch_norm_: (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__258, + batch_norm__259, + batch_norm__260, + batch_norm__261, + batch_norm__262, + batch_norm__263, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_47, + parameter_198, + parameter_197, + parameter_196, + parameter_195, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_47, parameter_195, parameter_196, parameter_197, parameter_198 + + # pd_op.swish: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32) + swish_36 = paddle._C_ops.swish(batch_norm__258) + del batch_norm__258 + + # pd_op.conv2d: (2x192x-1x-1xf32) <- (2x384x-1x-1xf32, 192x384x1x1xf32) + conv2d_48 = paddle._C_ops.conv2d( + swish_36, parameter_194, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_194 + + # pd_op.batch_norm_: (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__264, + batch_norm__265, + batch_norm__266, + batch_norm__267, + batch_norm__268, + batch_norm__269, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_48, + parameter_193, + parameter_192, + parameter_191, + parameter_190, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_48, parameter_190, parameter_191, parameter_192, parameter_193 + + # pd_op.swish: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32) + swish_37 = paddle._C_ops.swish(batch_norm__264) + del batch_norm__264 + + # pd_op.nearest_interp: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, None, None, None) + nearest_interp_0 = paddle._C_ops.nearest_interp( + swish_37, + None, + None, + None, + "NCHW", + -1, + -1, + -1, + [float("2"), float("2")], + "nearest", + False, + 0, + ) + del swish_37 + + # builtin.combine: ([2x192x-1x-1xf32, 2x256x-1x-1xf32]) <- (2x192x-1x-1xf32, 2x256x-1x-1xf32) + combine_6 = [nearest_interp_0, swish_24] + del nearest_interp_0, swish_24 + + # pd_op.concat: (2x448x-1x-1xf32) <- ([2x192x-1x-1xf32, 2x256x-1x-1xf32], 1xi32) + concat_8 = paddle._C_ops.concat(combine_6, full_0) + del combine_6 + + # pd_op.conv2d: (2x96x-1x-1xf32) <- (2x448x-1x-1xf32, 96x448x1x1xf32) + conv2d_49 = paddle._C_ops.conv2d( + concat_8, parameter_189, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_189 + + # pd_op.batch_norm_: (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__270, + batch_norm__271, + batch_norm__272, + batch_norm__273, + batch_norm__274, + batch_norm__275, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_49, + parameter_188, + parameter_187, + parameter_186, + parameter_185, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_49, parameter_185, parameter_186, parameter_187, parameter_188 + + # pd_op.swish: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32) + swish_38 = paddle._C_ops.swish(batch_norm__270) + del batch_norm__270 + + # pd_op.conv2d: (2x96x-1x-1xf32) <- (2x448x-1x-1xf32, 96x448x1x1xf32) + conv2d_50 = paddle._C_ops.conv2d( + concat_8, parameter_184, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del concat_8, parameter_184 + + # pd_op.batch_norm_: (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__276, + batch_norm__277, + batch_norm__278, + batch_norm__279, + batch_norm__280, + batch_norm__281, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_50, + parameter_183, + parameter_182, + parameter_181, + parameter_180, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_50, parameter_180, parameter_181, parameter_182, parameter_183 + + # pd_op.swish: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32) + swish_39 = paddle._C_ops.swish(batch_norm__276) + del batch_norm__276 + + # pd_op.conv2d: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, 96x96x3x3xf32) + conv2d_51 = paddle._C_ops.conv2d( + swish_39, parameter_179, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_179, swish_39 + + # pd_op.batch_norm_: (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__282, + batch_norm__283, + batch_norm__284, + batch_norm__285, + batch_norm__286, + batch_norm__287, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_51, + parameter_178, + parameter_177, + parameter_176, + parameter_175, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_51, parameter_175, parameter_176, parameter_177, parameter_178 + + # pd_op.swish: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32) + swish_40 = paddle._C_ops.swish(batch_norm__282) + del batch_norm__282 + + # pd_op.conv2d: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, 96x96x3x3xf32) + conv2d_52 = paddle._C_ops.conv2d( + swish_40, parameter_174, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_174 + + # pd_op.batch_norm_: (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__288, + batch_norm__289, + batch_norm__290, + batch_norm__291, + batch_norm__292, + batch_norm__293, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_52, + parameter_173, + parameter_172, + parameter_171, + parameter_170, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_52, parameter_170, parameter_171, parameter_172, parameter_173 + + # pd_op.conv2d: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, 96x96x1x1xf32) + conv2d_53 = paddle._C_ops.conv2d( + swish_40, parameter_169, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_169, swish_40 + + # pd_op.batch_norm_: (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__294, + batch_norm__295, + batch_norm__296, + batch_norm__297, + batch_norm__298, + batch_norm__299, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_53, + parameter_168, + parameter_167, + parameter_166, + parameter_165, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_53, parameter_165, parameter_166, parameter_167, parameter_168 + + # pd_op.add: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, 2x96x-1x-1xf32) + add_17 = paddle._C_ops.add(batch_norm__288, batch_norm__294) + del batch_norm__288, batch_norm__294 + + # pd_op.swish: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32) + swish_41 = paddle._C_ops.swish(add_17) + del add_17 + + # builtin.combine: ([2x96x-1x-1xf32, 2x96x-1x-1xf32]) <- (2x96x-1x-1xf32, 2x96x-1x-1xf32) + combine_7 = [swish_38, swish_41] + del swish_38, swish_41 + + # pd_op.concat: (2x192x-1x-1xf32) <- ([2x96x-1x-1xf32, 2x96x-1x-1xf32], 1xi32) + concat_9 = paddle._C_ops.concat(combine_7, full_0) + del combine_7 + + # pd_op.conv2d: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 192x192x1x1xf32) + conv2d_54 = paddle._C_ops.conv2d( + concat_9, parameter_164, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del concat_9, parameter_164 + + # pd_op.batch_norm_: (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__300, + batch_norm__301, + batch_norm__302, + batch_norm__303, + batch_norm__304, + batch_norm__305, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_54, + parameter_163, + parameter_162, + parameter_161, + parameter_160, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_54, parameter_160, parameter_161, parameter_162, parameter_163 + + # pd_op.swish: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32) + swish_42 = paddle._C_ops.swish(batch_norm__300) + del batch_norm__300 + + # pd_op.conv2d: (2x96x-1x-1xf32) <- (2x192x-1x-1xf32, 96x192x1x1xf32) + conv2d_55 = paddle._C_ops.conv2d( + swish_42, parameter_159, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_159 + + # pd_op.batch_norm_: (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__306, + batch_norm__307, + batch_norm__308, + batch_norm__309, + batch_norm__310, + batch_norm__311, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_55, + parameter_158, + parameter_157, + parameter_156, + parameter_155, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_55, parameter_155, parameter_156, parameter_157, parameter_158 + + # pd_op.swish: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32) + swish_43 = paddle._C_ops.swish(batch_norm__306) + del batch_norm__306 + + # pd_op.nearest_interp: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, None, None, None) + nearest_interp_1 = paddle._C_ops.nearest_interp( + swish_43, + None, + None, + None, + "NCHW", + -1, + -1, + -1, + [float("2"), float("2")], + "nearest", + False, + 0, + ) + del swish_43 + + # builtin.combine: ([2x96x-1x-1xf32, 2x128x-1x-1xf32]) <- (2x96x-1x-1xf32, 2x128x-1x-1xf32) + combine_8 = [nearest_interp_1, swish_16] + del nearest_interp_1, swish_16 + + # pd_op.concat: (2x224x-1x-1xf32) <- ([2x96x-1x-1xf32, 2x128x-1x-1xf32], 1xi32) + concat_10 = paddle._C_ops.concat(combine_8, full_0) + del combine_8 + + # pd_op.conv2d: (2x48x-1x-1xf32) <- (2x224x-1x-1xf32, 48x224x1x1xf32) + conv2d_56 = paddle._C_ops.conv2d( + concat_10, parameter_154, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_154 + + # pd_op.batch_norm_: (2x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32, -1xui8) <- (2x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32) + ( + batch_norm__312, + batch_norm__313, + batch_norm__314, + batch_norm__315, + batch_norm__316, + batch_norm__317, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_56, + parameter_153, + parameter_152, + parameter_151, + parameter_150, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_56, parameter_150, parameter_151, parameter_152, parameter_153 + + # pd_op.swish: (2x48x-1x-1xf32) <- (2x48x-1x-1xf32) + swish_44 = paddle._C_ops.swish(batch_norm__312) + del batch_norm__312 + + # pd_op.conv2d: (2x48x-1x-1xf32) <- (2x224x-1x-1xf32, 48x224x1x1xf32) + conv2d_57 = paddle._C_ops.conv2d( + concat_10, parameter_149, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del concat_10, parameter_149 + + # pd_op.batch_norm_: (2x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32, -1xui8) <- (2x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32) + ( + batch_norm__318, + batch_norm__319, + batch_norm__320, + batch_norm__321, + batch_norm__322, + batch_norm__323, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_57, + parameter_148, + parameter_147, + parameter_146, + parameter_145, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_57, parameter_145, parameter_146, parameter_147, parameter_148 + + # pd_op.swish: (2x48x-1x-1xf32) <- (2x48x-1x-1xf32) + swish_45 = paddle._C_ops.swish(batch_norm__318) + del batch_norm__318 + + # pd_op.conv2d: (2x48x-1x-1xf32) <- (2x48x-1x-1xf32, 48x48x3x3xf32) + conv2d_58 = paddle._C_ops.conv2d( + swish_45, parameter_144, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_144, swish_45 + + # pd_op.batch_norm_: (2x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32, -1xui8) <- (2x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32) + ( + batch_norm__324, + batch_norm__325, + batch_norm__326, + batch_norm__327, + batch_norm__328, + batch_norm__329, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_58, + parameter_143, + parameter_142, + parameter_141, + parameter_140, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_58, parameter_140, parameter_141, parameter_142, parameter_143 + + # pd_op.swish: (2x48x-1x-1xf32) <- (2x48x-1x-1xf32) + swish_46 = paddle._C_ops.swish(batch_norm__324) + del batch_norm__324 + + # pd_op.conv2d: (2x48x-1x-1xf32) <- (2x48x-1x-1xf32, 48x48x3x3xf32) + conv2d_59 = paddle._C_ops.conv2d( + swish_46, parameter_139, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_139 + + # pd_op.batch_norm_: (2x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32, -1xui8) <- (2x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32) + ( + batch_norm__330, + batch_norm__331, + batch_norm__332, + batch_norm__333, + batch_norm__334, + batch_norm__335, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_59, + parameter_138, + parameter_137, + parameter_136, + parameter_135, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_59, parameter_135, parameter_136, parameter_137, parameter_138 + + # pd_op.conv2d: (2x48x-1x-1xf32) <- (2x48x-1x-1xf32, 48x48x1x1xf32) + conv2d_60 = paddle._C_ops.conv2d( + swish_46, parameter_134, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_134, swish_46 + + # pd_op.batch_norm_: (2x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32, -1xui8) <- (2x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32) + ( + batch_norm__336, + batch_norm__337, + batch_norm__338, + batch_norm__339, + batch_norm__340, + batch_norm__341, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_60, + parameter_133, + parameter_132, + parameter_131, + parameter_130, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_60, parameter_130, parameter_131, parameter_132, parameter_133 + + # pd_op.add: (2x48x-1x-1xf32) <- (2x48x-1x-1xf32, 2x48x-1x-1xf32) + add_18 = paddle._C_ops.add(batch_norm__330, batch_norm__336) + del batch_norm__330, batch_norm__336 + + # pd_op.swish: (2x48x-1x-1xf32) <- (2x48x-1x-1xf32) + swish_47 = paddle._C_ops.swish(add_18) + del add_18 + + # builtin.combine: ([2x48x-1x-1xf32, 2x48x-1x-1xf32]) <- (2x48x-1x-1xf32, 2x48x-1x-1xf32) + combine_9 = [swish_44, swish_47] + del swish_44, swish_47 + + # pd_op.concat: (2x96x-1x-1xf32) <- ([2x48x-1x-1xf32, 2x48x-1x-1xf32], 1xi32) + concat_11 = paddle._C_ops.concat(combine_9, full_0) + del combine_9 + + # pd_op.conv2d: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, 96x96x1x1xf32) + conv2d_61 = paddle._C_ops.conv2d( + concat_11, parameter_129, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del concat_11, parameter_129 + + # pd_op.batch_norm_: (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__342, + batch_norm__343, + batch_norm__344, + batch_norm__345, + batch_norm__346, + batch_norm__347, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_61, + parameter_128, + parameter_127, + parameter_126, + parameter_125, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_61, parameter_125, parameter_126, parameter_127, parameter_128 + + # pd_op.swish: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32) + swish_48 = paddle._C_ops.swish(batch_norm__342) + del batch_norm__342 + + # pd_op.conv2d: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, 96x96x3x3xf32) + conv2d_62 = paddle._C_ops.conv2d( + swish_48, parameter_124, [2, 2], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_124 + + # pd_op.batch_norm_: (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__348, + batch_norm__349, + batch_norm__350, + batch_norm__351, + batch_norm__352, + batch_norm__353, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_62, + parameter_123, + parameter_122, + parameter_121, + parameter_120, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_62, parameter_120, parameter_121, parameter_122, parameter_123 + + # pd_op.swish: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32) + swish_49 = paddle._C_ops.swish(batch_norm__348) + del batch_norm__348 + + # builtin.combine: ([2x96x-1x-1xf32, 2x192x-1x-1xf32]) <- (2x96x-1x-1xf32, 2x192x-1x-1xf32) + combine_10 = [swish_49, swish_42] + del swish_42, swish_49 + + # pd_op.concat: (2x288x-1x-1xf32) <- ([2x96x-1x-1xf32, 2x192x-1x-1xf32], 1xi32) + concat_12 = paddle._C_ops.concat(combine_10, full_0) + del combine_10 + + # pd_op.conv2d: (2x96x-1x-1xf32) <- (2x288x-1x-1xf32, 96x288x1x1xf32) + conv2d_63 = paddle._C_ops.conv2d( + concat_12, parameter_119, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_119 + + # pd_op.batch_norm_: (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__354, + batch_norm__355, + batch_norm__356, + batch_norm__357, + batch_norm__358, + batch_norm__359, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_63, + parameter_118, + parameter_117, + parameter_116, + parameter_115, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_63, parameter_115, parameter_116, parameter_117, parameter_118 + + # pd_op.swish: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32) + swish_50 = paddle._C_ops.swish(batch_norm__354) + del batch_norm__354 + + # pd_op.conv2d: (2x96x-1x-1xf32) <- (2x288x-1x-1xf32, 96x288x1x1xf32) + conv2d_64 = paddle._C_ops.conv2d( + concat_12, parameter_114, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del concat_12, parameter_114 + + # pd_op.batch_norm_: (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__360, + batch_norm__361, + batch_norm__362, + batch_norm__363, + batch_norm__364, + batch_norm__365, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_64, + parameter_113, + parameter_112, + parameter_111, + parameter_110, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_64, parameter_110, parameter_111, parameter_112, parameter_113 + + # pd_op.swish: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32) + swish_51 = paddle._C_ops.swish(batch_norm__360) + del batch_norm__360 + + # pd_op.conv2d: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, 96x96x3x3xf32) + conv2d_65 = paddle._C_ops.conv2d( + swish_51, parameter_109, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_109, swish_51 + + # pd_op.batch_norm_: (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__366, + batch_norm__367, + batch_norm__368, + batch_norm__369, + batch_norm__370, + batch_norm__371, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_65, + parameter_108, + parameter_107, + parameter_106, + parameter_105, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_65, parameter_105, parameter_106, parameter_107, parameter_108 + + # pd_op.swish: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32) + swish_52 = paddle._C_ops.swish(batch_norm__366) + del batch_norm__366 + + # pd_op.conv2d: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, 96x96x3x3xf32) + conv2d_66 = paddle._C_ops.conv2d( + swish_52, parameter_104, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_104 + + # pd_op.batch_norm_: (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__372, + batch_norm__373, + batch_norm__374, + batch_norm__375, + batch_norm__376, + batch_norm__377, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_66, + parameter_103, + parameter_102, + parameter_101, + parameter_100, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_66, parameter_100, parameter_101, parameter_102, parameter_103 + + # pd_op.conv2d: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, 96x96x1x1xf32) + conv2d_67 = paddle._C_ops.conv2d( + swish_52, parameter_99, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_99, swish_52 + + # pd_op.batch_norm_: (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__378, + batch_norm__379, + batch_norm__380, + batch_norm__381, + batch_norm__382, + batch_norm__383, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_67, + parameter_98, + parameter_97, + parameter_96, + parameter_95, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_67, parameter_95, parameter_96, parameter_97, parameter_98 + + # pd_op.add: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, 2x96x-1x-1xf32) + add_19 = paddle._C_ops.add(batch_norm__372, batch_norm__378) + del batch_norm__372, batch_norm__378 + + # pd_op.swish: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32) + swish_53 = paddle._C_ops.swish(add_19) + del add_19 + + # builtin.combine: ([2x96x-1x-1xf32, 2x96x-1x-1xf32]) <- (2x96x-1x-1xf32, 2x96x-1x-1xf32) + combine_11 = [swish_50, swish_53] + del swish_50, swish_53 + + # pd_op.concat: (2x192x-1x-1xf32) <- ([2x96x-1x-1xf32, 2x96x-1x-1xf32], 1xi32) + concat_13 = paddle._C_ops.concat(combine_11, full_0) + del combine_11 + + # pd_op.conv2d: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 192x192x1x1xf32) + conv2d_68 = paddle._C_ops.conv2d( + concat_13, parameter_94, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del concat_13, parameter_94 + + # pd_op.batch_norm_: (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__384, + batch_norm__385, + batch_norm__386, + batch_norm__387, + batch_norm__388, + batch_norm__389, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_68, + parameter_93, + parameter_92, + parameter_91, + parameter_90, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_68, parameter_90, parameter_91, parameter_92, parameter_93 + + # pd_op.swish: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32) + swish_54 = paddle._C_ops.swish(batch_norm__384) + del batch_norm__384 + + # pd_op.conv2d: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 192x192x3x3xf32) + conv2d_69 = paddle._C_ops.conv2d( + swish_54, parameter_89, [2, 2], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_89 + + # pd_op.batch_norm_: (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__390, + batch_norm__391, + batch_norm__392, + batch_norm__393, + batch_norm__394, + batch_norm__395, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_69, + parameter_88, + parameter_87, + parameter_86, + parameter_85, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_69, parameter_85, parameter_86, parameter_87, parameter_88 + + # pd_op.swish: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32) + swish_55 = paddle._C_ops.swish(batch_norm__390) + del batch_norm__390 + + # builtin.combine: ([2x192x-1x-1xf32, 2x384x-1x-1xf32]) <- (2x192x-1x-1xf32, 2x384x-1x-1xf32) + combine_12 = [swish_55, swish_36] + del swish_36, swish_55 + + # pd_op.concat: (2x576x-1x-1xf32) <- ([2x192x-1x-1xf32, 2x384x-1x-1xf32], 1xi32) + concat_14 = paddle._C_ops.concat(combine_12, full_0) + del combine_12 + + # pd_op.conv2d: (2x192x-1x-1xf32) <- (2x576x-1x-1xf32, 192x576x1x1xf32) + conv2d_70 = paddle._C_ops.conv2d( + concat_14, parameter_84, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_84 + + # pd_op.batch_norm_: (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__396, + batch_norm__397, + batch_norm__398, + batch_norm__399, + batch_norm__400, + batch_norm__401, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_70, + parameter_83, + parameter_82, + parameter_81, + parameter_80, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_70, parameter_80, parameter_81, parameter_82, parameter_83 + + # pd_op.swish: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32) + swish_56 = paddle._C_ops.swish(batch_norm__396) + del batch_norm__396 + + # pd_op.conv2d: (2x192x-1x-1xf32) <- (2x576x-1x-1xf32, 192x576x1x1xf32) + conv2d_71 = paddle._C_ops.conv2d( + concat_14, parameter_79, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del concat_14, parameter_79 + + # pd_op.batch_norm_: (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__402, + batch_norm__403, + batch_norm__404, + batch_norm__405, + batch_norm__406, + batch_norm__407, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_71, + parameter_78, + parameter_77, + parameter_76, + parameter_75, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_71, parameter_75, parameter_76, parameter_77, parameter_78 + + # pd_op.swish: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32) + swish_57 = paddle._C_ops.swish(batch_norm__402) + del batch_norm__402 + + # pd_op.conv2d: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 192x192x3x3xf32) + conv2d_72 = paddle._C_ops.conv2d( + swish_57, parameter_74, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_74, swish_57 + + # pd_op.batch_norm_: (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__408, + batch_norm__409, + batch_norm__410, + batch_norm__411, + batch_norm__412, + batch_norm__413, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_72, + parameter_73, + parameter_72, + parameter_71, + parameter_70, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_72, parameter_70, parameter_71, parameter_72, parameter_73 + + # pd_op.swish: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32) + swish_58 = paddle._C_ops.swish(batch_norm__408) + del batch_norm__408 + + # pd_op.conv2d: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 192x192x3x3xf32) + conv2d_73 = paddle._C_ops.conv2d( + swish_58, parameter_69, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_69 + + # pd_op.batch_norm_: (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__414, + batch_norm__415, + batch_norm__416, + batch_norm__417, + batch_norm__418, + batch_norm__419, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_73, + parameter_68, + parameter_67, + parameter_66, + parameter_65, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_73, parameter_65, parameter_66, parameter_67, parameter_68 + + # pd_op.conv2d: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 192x192x1x1xf32) + conv2d_74 = paddle._C_ops.conv2d( + swish_58, parameter_64, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_64, swish_58 + + # pd_op.batch_norm_: (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__420, + batch_norm__421, + batch_norm__422, + batch_norm__423, + batch_norm__424, + batch_norm__425, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_74, + parameter_63, + parameter_62, + parameter_61, + parameter_60, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_74, parameter_60, parameter_61, parameter_62, parameter_63 + + # pd_op.add: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 2x192x-1x-1xf32) + add_20 = paddle._C_ops.add(batch_norm__414, batch_norm__420) + del batch_norm__414, batch_norm__420 + + # pd_op.swish: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32) + swish_59 = paddle._C_ops.swish(add_20) + del add_20 + + # builtin.combine: ([2x192x-1x-1xf32, 2x192x-1x-1xf32]) <- (2x192x-1x-1xf32, 2x192x-1x-1xf32) + combine_13 = [swish_56, swish_59] + del swish_56, swish_59 + + # pd_op.concat: (2x384x-1x-1xf32) <- ([2x192x-1x-1xf32, 2x192x-1x-1xf32], 1xi32) + concat_15 = paddle._C_ops.concat(combine_13, full_0) + del combine_13 + + # pd_op.conv2d: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32, 384x384x1x1xf32) + conv2d_75 = paddle._C_ops.conv2d( + concat_15, parameter_59, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del concat_15, parameter_59 + + # pd_op.batch_norm_: (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__426, + batch_norm__427, + batch_norm__428, + batch_norm__429, + batch_norm__430, + batch_norm__431, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_75, + parameter_58, + parameter_57, + parameter_56, + parameter_55, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_75, parameter_55, parameter_56, parameter_57, parameter_58 + + # pd_op.swish: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32) + swish_60 = paddle._C_ops.swish(batch_norm__426) + del batch_norm__426 + + # pd_op.shape64: (4xi64) <- (2x384x-1x-1xf32) + shape64_0 = paddle._C_ops.shape64(swish_60) + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_5 = [2] + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_6 = [3] + + # pd_op.slice: (xi64) <- (4xi64, 1xi64, 1xi64) + slice_0 = paddle._C_ops.slice( + shape64_0, [0], full_int_array_5, full_int_array_6, [1], [0] + ) + del shape64_0 + + # pd_op.shape64: (4xi64) <- (2x384x-1x-1xf32) + shape64_1 = paddle._C_ops.shape64(swish_60) + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_7 = [4] + + # pd_op.slice: (xi64) <- (4xi64, 1xi64, 1xi64) + slice_1 = paddle._C_ops.slice( + shape64_1, [0], full_int_array_6, full_int_array_7, [1], [0] + ) + del shape64_1 + + # pd_op.multiply: (xi64) <- (xi64, xi64) + multiply_4 = paddle._C_ops.multiply(slice_0, slice_1) + del slice_0, slice_1 + + # pd_op.full_int_array: (2xi64) <- () + full_int_array_8 = [1, 1] + + # pd_op.pool2d: (2x384x1x1xf32) <- (2x384x-1x-1xf32, 2xi64) + pool2d_3 = paddle._C_ops.pool2d( + swish_60, + full_int_array_8, + [1, 1], + [0, 0], + False, + True, + "NCHW", + "avg", + False, + True, + "EXPLICIT", + ) + + # pd_op.conv2d: (2x384x1x1xf32) <- (2x384x1x1xf32, 384x384x1x1xf32) + conv2d_76 = paddle._C_ops.conv2d( + pool2d_3, parameter_54, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_54 + + # pd_op.reshape: (1x384x1x1xf32) <- (384xf32, 4xi64) + reshape_4 = paddle._C_ops.reshape(parameter_53, full_int_array_1) + del parameter_53 + + # pd_op.add: (2x384x1x1xf32) <- (2x384x1x1xf32, 1x384x1x1xf32) + add_21 = paddle._C_ops.add(conv2d_76, reshape_4) + del conv2d_76, reshape_4 + + # pd_op.sigmoid: (2x384x1x1xf32) <- (2x384x1x1xf32) + sigmoid_0 = paddle._C_ops.sigmoid(add_21) + del add_21 + + # pd_op.multiply: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32, 2x384x1x1xf32) + multiply_5 = paddle._C_ops.multiply(swish_60, sigmoid_0) + del sigmoid_0 + + # pd_op.conv2d: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32, 384x384x1x1xf32) + conv2d_77 = paddle._C_ops.conv2d( + multiply_5, parameter_52, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del multiply_5, parameter_52 + + # pd_op.batch_norm_: (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__432, + batch_norm__433, + batch_norm__434, + batch_norm__435, + batch_norm__436, + batch_norm__437, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_77, + parameter_51, + parameter_50, + parameter_49, + parameter_48, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_77, parameter_48, parameter_49, parameter_50, parameter_51 + + # pd_op.swish: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32) + swish_61 = paddle._C_ops.swish(batch_norm__432) + del batch_norm__432 + + # pd_op.add: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32, 2x384x-1x-1xf32) + add_22 = paddle._C_ops.add(swish_61, swish_60) + del swish_61 + + # pd_op.conv2d: (2x4x-1x-1xf32) <- (2x384x-1x-1xf32, 4x384x3x3xf32) + conv2d_78 = paddle._C_ops.conv2d( + add_22, parameter_47, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del add_22, parameter_47 + + # pd_op.reshape: (1x4x1x1xf32) <- (4xf32, 4xi64) + reshape_5 = paddle._C_ops.reshape(parameter_46, full_int_array_1) + del parameter_46 + + # pd_op.add: (2x4x-1x-1xf32) <- (2x4x-1x-1xf32, 1x4x1x1xf32) + add_23 = paddle._C_ops.add(conv2d_78, reshape_5) + del conv2d_78, reshape_5 + + # pd_op.conv2d: (2x384x1x1xf32) <- (2x384x1x1xf32, 384x384x1x1xf32) + conv2d_79 = paddle._C_ops.conv2d( + pool2d_3, parameter_45, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_45, pool2d_3 + + # pd_op.reshape: (1x384x1x1xf32) <- (384xf32, 4xi64) + reshape_6 = paddle._C_ops.reshape(parameter_44, full_int_array_1) + del parameter_44 + + # pd_op.add: (2x384x1x1xf32) <- (2x384x1x1xf32, 1x384x1x1xf32) + add_24 = paddle._C_ops.add(conv2d_79, reshape_6) + del conv2d_79, reshape_6 + + # pd_op.sigmoid: (2x384x1x1xf32) <- (2x384x1x1xf32) + sigmoid_1 = paddle._C_ops.sigmoid(add_24) + del add_24 + + # pd_op.multiply: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32, 2x384x1x1xf32) + multiply_6 = paddle._C_ops.multiply(swish_60, sigmoid_1) + del sigmoid_1, swish_60 + + # pd_op.conv2d: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32, 384x384x1x1xf32) + conv2d_80 = paddle._C_ops.conv2d( + multiply_6, parameter_43, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del multiply_6, parameter_43 + + # pd_op.batch_norm_: (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__438, + batch_norm__439, + batch_norm__440, + batch_norm__441, + batch_norm__442, + batch_norm__443, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_80, + parameter_42, + parameter_41, + parameter_40, + parameter_39, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_80, parameter_39, parameter_40, parameter_41, parameter_42 + + # pd_op.swish: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32) + swish_62 = paddle._C_ops.swish(batch_norm__438) + del batch_norm__438 + + # pd_op.conv2d: (2x68x-1x-1xf32) <- (2x384x-1x-1xf32, 68x384x3x3xf32) + conv2d_81 = paddle._C_ops.conv2d( + swish_62, parameter_38, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_38, swish_62 + + # pd_op.reshape: (1x68x1x1xf32) <- (68xf32, 4xi64) + reshape_7 = paddle._C_ops.reshape(parameter_37, full_int_array_1) + del parameter_37 + + # pd_op.add: (2x68x-1x-1xf32) <- (2x68x-1x-1xf32, 1x68x1x1xf32) + add_25 = paddle._C_ops.add(conv2d_81, reshape_7) + del conv2d_81, reshape_7 + + # pd_op.full: (xi64) <- () + full_1 = paddle._C_ops.full( + [], float("-1"), paddle.int64, paddle.core.CPUPlace() + ) + + # pd_op.full: (xi64) <- () + full_2 = paddle._C_ops.full( + [], float("4"), paddle.int64, paddle.core.CPUPlace() + ) + + # pd_op.full: (xi64) <- () + full_3 = paddle._C_ops.full( + [], float("17"), paddle.int64, paddle.core.CPUPlace() + ) + + # builtin.combine: ([xi64, xi64, xi64, xi64]) <- (xi64, xi64, xi64, xi64) + combine_14 = [full_1, full_2, full_3, multiply_4] + + # pd_op.stack: (4xi64) <- ([xi64, xi64, xi64, xi64]) + stack_0 = paddle._C_ops.stack(combine_14, 0) + del combine_14 + + # pd_op.reshape: (-1x4x17x-1xf32) <- (2x68x-1x-1xf32, 4xi64) + reshape_8 = paddle._C_ops.reshape(add_25, stack_0) + del add_25, stack_0 + + # pd_op.transpose: (-1x17x-1x4xf32) <- (-1x4x17x-1xf32) + transpose_0 = paddle._C_ops.transpose(reshape_8, [0, 2, 3, 1]) + del reshape_8 + + # pd_op.softmax: (-1x17x-1x4xf32) <- (-1x17x-1x4xf32) + softmax_0 = paddle._C_ops.softmax(transpose_0, 1) + del transpose_0 + + # pd_op.conv2d: (-1x1x-1x4xf32) <- (-1x17x-1x4xf32, 1x17x1x1xf32) + conv2d_82 = paddle._C_ops.conv2d( + softmax_0, parameter_36, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del softmax_0 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_9 = [1] + + # pd_op.squeeze: (-1x-1x4xf32) <- (-1x1x-1x4xf32, 1xi64) + squeeze_0 = paddle._C_ops.squeeze(conv2d_82, full_int_array_9) + del conv2d_82 + + # pd_op.sigmoid: (2x4x-1x-1xf32) <- (2x4x-1x-1xf32) + sigmoid_2 = paddle._C_ops.sigmoid(add_23) + del add_23 + + # builtin.combine: ([xi64, xi64, xi64]) <- (xi64, xi64, xi64) + combine_15 = [full_1, full_2, multiply_4] + del multiply_4 + + # pd_op.stack: (3xi64) <- ([xi64, xi64, xi64]) + stack_1 = paddle._C_ops.stack(combine_15, 0) + del combine_15 + + # pd_op.reshape: (-1x4x-1xf32) <- (2x4x-1x-1xf32, 3xi64) + reshape_9 = paddle._C_ops.reshape(sigmoid_2, stack_1) + del sigmoid_2, stack_1 + + # pd_op.shape64: (4xi64) <- (2x192x-1x-1xf32) + shape64_2 = paddle._C_ops.shape64(swish_54) + + # pd_op.slice: (xi64) <- (4xi64, 1xi64, 1xi64) + slice_2 = paddle._C_ops.slice( + shape64_2, [0], full_int_array_5, full_int_array_6, [1], [0] + ) + del shape64_2 + + # pd_op.shape64: (4xi64) <- (2x192x-1x-1xf32) + shape64_3 = paddle._C_ops.shape64(swish_54) + + # pd_op.slice: (xi64) <- (4xi64, 1xi64, 1xi64) + slice_3 = paddle._C_ops.slice( + shape64_3, [0], full_int_array_6, full_int_array_7, [1], [0] + ) + del shape64_3 + + # pd_op.multiply: (xi64) <- (xi64, xi64) + multiply_7 = paddle._C_ops.multiply(slice_2, slice_3) + del slice_2, slice_3 + + # pd_op.pool2d: (2x192x1x1xf32) <- (2x192x-1x-1xf32, 2xi64) + pool2d_4 = paddle._C_ops.pool2d( + swish_54, + full_int_array_8, + [1, 1], + [0, 0], + False, + True, + "NCHW", + "avg", + False, + True, + "EXPLICIT", + ) + + # pd_op.conv2d: (2x192x1x1xf32) <- (2x192x1x1xf32, 192x192x1x1xf32) + conv2d_83 = paddle._C_ops.conv2d( + pool2d_4, parameter_35, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_35 + + # pd_op.reshape: (1x192x1x1xf32) <- (192xf32, 4xi64) + reshape_10 = paddle._C_ops.reshape(parameter_34, full_int_array_1) + del parameter_34 + + # pd_op.add: (2x192x1x1xf32) <- (2x192x1x1xf32, 1x192x1x1xf32) + add_26 = paddle._C_ops.add(conv2d_83, reshape_10) + del conv2d_83, reshape_10 + + # pd_op.sigmoid: (2x192x1x1xf32) <- (2x192x1x1xf32) + sigmoid_3 = paddle._C_ops.sigmoid(add_26) + del add_26 + + # pd_op.multiply: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 2x192x1x1xf32) + multiply_8 = paddle._C_ops.multiply(swish_54, sigmoid_3) + del sigmoid_3 + + # pd_op.conv2d: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 192x192x1x1xf32) + conv2d_84 = paddle._C_ops.conv2d( + multiply_8, parameter_33, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del multiply_8, parameter_33 + + # pd_op.batch_norm_: (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__444, + batch_norm__445, + batch_norm__446, + batch_norm__447, + batch_norm__448, + batch_norm__449, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_84, + parameter_32, + parameter_31, + parameter_30, + parameter_29, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_84, parameter_29, parameter_30, parameter_31, parameter_32 + + # pd_op.swish: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32) + swish_63 = paddle._C_ops.swish(batch_norm__444) + del batch_norm__444 + + # pd_op.add: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 2x192x-1x-1xf32) + add_27 = paddle._C_ops.add(swish_63, swish_54) + del swish_63 + + # pd_op.conv2d: (2x4x-1x-1xf32) <- (2x192x-1x-1xf32, 4x192x3x3xf32) + conv2d_85 = paddle._C_ops.conv2d( + add_27, parameter_28, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del add_27, parameter_28 + + # pd_op.reshape: (1x4x1x1xf32) <- (4xf32, 4xi64) + reshape_11 = paddle._C_ops.reshape(parameter_27, full_int_array_1) + del parameter_27 + + # pd_op.add: (2x4x-1x-1xf32) <- (2x4x-1x-1xf32, 1x4x1x1xf32) + add_28 = paddle._C_ops.add(conv2d_85, reshape_11) + del conv2d_85, reshape_11 + + # pd_op.conv2d: (2x192x1x1xf32) <- (2x192x1x1xf32, 192x192x1x1xf32) + conv2d_86 = paddle._C_ops.conv2d( + pool2d_4, parameter_26, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_26, pool2d_4 + + # pd_op.reshape: (1x192x1x1xf32) <- (192xf32, 4xi64) + reshape_12 = paddle._C_ops.reshape(parameter_25, full_int_array_1) + del parameter_25 + + # pd_op.add: (2x192x1x1xf32) <- (2x192x1x1xf32, 1x192x1x1xf32) + add_29 = paddle._C_ops.add(conv2d_86, reshape_12) + del conv2d_86, reshape_12 + + # pd_op.sigmoid: (2x192x1x1xf32) <- (2x192x1x1xf32) + sigmoid_4 = paddle._C_ops.sigmoid(add_29) + del add_29 + + # pd_op.multiply: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 2x192x1x1xf32) + multiply_9 = paddle._C_ops.multiply(swish_54, sigmoid_4) + del sigmoid_4, swish_54 + + # pd_op.conv2d: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 192x192x1x1xf32) + conv2d_87 = paddle._C_ops.conv2d( + multiply_9, parameter_24, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del multiply_9, parameter_24 + + # pd_op.batch_norm_: (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__450, + batch_norm__451, + batch_norm__452, + batch_norm__453, + batch_norm__454, + batch_norm__455, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_87, + parameter_23, + parameter_22, + parameter_21, + parameter_20, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_87, parameter_20, parameter_21, parameter_22, parameter_23 + + # pd_op.swish: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32) + swish_64 = paddle._C_ops.swish(batch_norm__450) + del batch_norm__450 + + # pd_op.conv2d: (2x68x-1x-1xf32) <- (2x192x-1x-1xf32, 68x192x3x3xf32) + conv2d_88 = paddle._C_ops.conv2d( + swish_64, parameter_19, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_19, swish_64 + + # pd_op.reshape: (1x68x1x1xf32) <- (68xf32, 4xi64) + reshape_13 = paddle._C_ops.reshape(parameter_18, full_int_array_1) + del parameter_18 + + # pd_op.add: (2x68x-1x-1xf32) <- (2x68x-1x-1xf32, 1x68x1x1xf32) + add_30 = paddle._C_ops.add(conv2d_88, reshape_13) + del conv2d_88, reshape_13 + + # builtin.combine: ([xi64, xi64, xi64, xi64]) <- (xi64, xi64, xi64, xi64) + combine_16 = [full_1, full_2, full_3, multiply_7] + + # pd_op.stack: (4xi64) <- ([xi64, xi64, xi64, xi64]) + stack_2 = paddle._C_ops.stack(combine_16, 0) + del combine_16 + + # pd_op.reshape: (-1x4x17x-1xf32) <- (2x68x-1x-1xf32, 4xi64) + reshape_14 = paddle._C_ops.reshape(add_30, stack_2) + del add_30, stack_2 + + # pd_op.transpose: (-1x17x-1x4xf32) <- (-1x4x17x-1xf32) + transpose_1 = paddle._C_ops.transpose(reshape_14, [0, 2, 3, 1]) + del reshape_14 + + # pd_op.softmax: (-1x17x-1x4xf32) <- (-1x17x-1x4xf32) + softmax_1 = paddle._C_ops.softmax(transpose_1, 1) + del transpose_1 + + # pd_op.conv2d: (-1x1x-1x4xf32) <- (-1x17x-1x4xf32, 1x17x1x1xf32) + conv2d_89 = paddle._C_ops.conv2d( + softmax_1, parameter_36, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del softmax_1 + + # pd_op.squeeze: (-1x-1x4xf32) <- (-1x1x-1x4xf32, 1xi64) + squeeze_1 = paddle._C_ops.squeeze(conv2d_89, full_int_array_9) + del conv2d_89 + + # pd_op.sigmoid: (2x4x-1x-1xf32) <- (2x4x-1x-1xf32) + sigmoid_5 = paddle._C_ops.sigmoid(add_28) + del add_28 + + # builtin.combine: ([xi64, xi64, xi64]) <- (xi64, xi64, xi64) + combine_17 = [full_1, full_2, multiply_7] + del multiply_7 + + # pd_op.stack: (3xi64) <- ([xi64, xi64, xi64]) + stack_3 = paddle._C_ops.stack(combine_17, 0) + del combine_17 + + # pd_op.reshape: (-1x4x-1xf32) <- (2x4x-1x-1xf32, 3xi64) + reshape_15 = paddle._C_ops.reshape(sigmoid_5, stack_3) + del sigmoid_5, stack_3 + + # pd_op.shape64: (4xi64) <- (2x96x-1x-1xf32) + shape64_4 = paddle._C_ops.shape64(swish_48) + + # pd_op.slice: (xi64) <- (4xi64, 1xi64, 1xi64) + slice_4 = paddle._C_ops.slice( + shape64_4, [0], full_int_array_5, full_int_array_6, [1], [0] + ) + del full_int_array_5, shape64_4 + + # pd_op.shape64: (4xi64) <- (2x96x-1x-1xf32) + shape64_5 = paddle._C_ops.shape64(swish_48) + + # pd_op.slice: (xi64) <- (4xi64, 1xi64, 1xi64) + slice_5 = paddle._C_ops.slice( + shape64_5, [0], full_int_array_6, full_int_array_7, [1], [0] + ) + del full_int_array_6, full_int_array_7, shape64_5 + + # pd_op.multiply: (xi64) <- (xi64, xi64) + multiply_10 = paddle._C_ops.multiply(slice_4, slice_5) + del slice_4, slice_5 + + # pd_op.pool2d: (2x96x1x1xf32) <- (2x96x-1x-1xf32, 2xi64) + pool2d_5 = paddle._C_ops.pool2d( + swish_48, + full_int_array_8, + [1, 1], + [0, 0], + False, + True, + "NCHW", + "avg", + False, + True, + "EXPLICIT", + ) + del full_int_array_8 + + # pd_op.conv2d: (2x96x1x1xf32) <- (2x96x1x1xf32, 96x96x1x1xf32) + conv2d_90 = paddle._C_ops.conv2d( + pool2d_5, parameter_17, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_17 + + # pd_op.reshape: (1x96x1x1xf32) <- (96xf32, 4xi64) + reshape_16 = paddle._C_ops.reshape(parameter_16, full_int_array_1) + del parameter_16 + + # pd_op.add: (2x96x1x1xf32) <- (2x96x1x1xf32, 1x96x1x1xf32) + add_31 = paddle._C_ops.add(conv2d_90, reshape_16) + del conv2d_90, reshape_16 + + # pd_op.sigmoid: (2x96x1x1xf32) <- (2x96x1x1xf32) + sigmoid_6 = paddle._C_ops.sigmoid(add_31) + del add_31 + + # pd_op.multiply: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, 2x96x1x1xf32) + multiply_11 = paddle._C_ops.multiply(swish_48, sigmoid_6) + del sigmoid_6 + + # pd_op.conv2d: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, 96x96x1x1xf32) + conv2d_91 = paddle._C_ops.conv2d( + multiply_11, parameter_15, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del multiply_11, parameter_15 + + # pd_op.batch_norm_: (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__456, + batch_norm__457, + batch_norm__458, + batch_norm__459, + batch_norm__460, + batch_norm__461, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_91, + parameter_14, + parameter_13, + parameter_12, + parameter_11, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_91, parameter_11, parameter_12, parameter_13, parameter_14 + + # pd_op.swish: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32) + swish_65 = paddle._C_ops.swish(batch_norm__456) + del batch_norm__456 + + # pd_op.add: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, 2x96x-1x-1xf32) + add_32 = paddle._C_ops.add(swish_65, swish_48) + del swish_65 + + # pd_op.conv2d: (2x4x-1x-1xf32) <- (2x96x-1x-1xf32, 4x96x3x3xf32) + conv2d_92 = paddle._C_ops.conv2d( + add_32, parameter_10, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del add_32, parameter_10 + + # pd_op.reshape: (1x4x1x1xf32) <- (4xf32, 4xi64) + reshape_17 = paddle._C_ops.reshape(parameter_9, full_int_array_1) + del parameter_9 + + # pd_op.add: (2x4x-1x-1xf32) <- (2x4x-1x-1xf32, 1x4x1x1xf32) + add_33 = paddle._C_ops.add(conv2d_92, reshape_17) + del conv2d_92, reshape_17 + + # pd_op.conv2d: (2x96x1x1xf32) <- (2x96x1x1xf32, 96x96x1x1xf32) + conv2d_93 = paddle._C_ops.conv2d( + pool2d_5, parameter_8, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_8, pool2d_5 + + # pd_op.reshape: (1x96x1x1xf32) <- (96xf32, 4xi64) + reshape_18 = paddle._C_ops.reshape(parameter_7, full_int_array_1) + del parameter_7 + + # pd_op.add: (2x96x1x1xf32) <- (2x96x1x1xf32, 1x96x1x1xf32) + add_34 = paddle._C_ops.add(conv2d_93, reshape_18) + del conv2d_93, reshape_18 + + # pd_op.sigmoid: (2x96x1x1xf32) <- (2x96x1x1xf32) + sigmoid_7 = paddle._C_ops.sigmoid(add_34) + del add_34 + + # pd_op.multiply: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, 2x96x1x1xf32) + multiply_12 = paddle._C_ops.multiply(swish_48, sigmoid_7) + del sigmoid_7, swish_48 + + # pd_op.conv2d: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, 96x96x1x1xf32) + conv2d_94 = paddle._C_ops.conv2d( + multiply_12, parameter_6, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del multiply_12, parameter_6 + + # pd_op.batch_norm_: (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__462, + batch_norm__463, + batch_norm__464, + batch_norm__465, + batch_norm__466, + batch_norm__467, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_94, + parameter_5, + parameter_4, + parameter_3, + parameter_2, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_94, parameter_2, parameter_3, parameter_4, parameter_5 + + # pd_op.swish: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32) + swish_66 = paddle._C_ops.swish(batch_norm__462) + del batch_norm__462 + + # pd_op.conv2d: (2x68x-1x-1xf32) <- (2x96x-1x-1xf32, 68x96x3x3xf32) + conv2d_95 = paddle._C_ops.conv2d( + swish_66, parameter_1, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_1, swish_66 + + # pd_op.reshape: (1x68x1x1xf32) <- (68xf32, 4xi64) + reshape_19 = paddle._C_ops.reshape(parameter_0, full_int_array_1) + del full_int_array_1, parameter_0 + + # pd_op.add: (2x68x-1x-1xf32) <- (2x68x-1x-1xf32, 1x68x1x1xf32) + add_35 = paddle._C_ops.add(conv2d_95, reshape_19) + del conv2d_95, reshape_19 + + # builtin.combine: ([xi64, xi64, xi64, xi64]) <- (xi64, xi64, xi64, xi64) + combine_18 = [full_1, full_2, full_3, multiply_10] + del full_3 + + # pd_op.stack: (4xi64) <- ([xi64, xi64, xi64, xi64]) + stack_4 = paddle._C_ops.stack(combine_18, 0) + del combine_18 + + # pd_op.reshape: (-1x4x17x-1xf32) <- (2x68x-1x-1xf32, 4xi64) + reshape_20 = paddle._C_ops.reshape(add_35, stack_4) + del add_35, stack_4 + + # pd_op.transpose: (-1x17x-1x4xf32) <- (-1x4x17x-1xf32) + transpose_2 = paddle._C_ops.transpose(reshape_20, [0, 2, 3, 1]) + del reshape_20 + + # pd_op.softmax: (-1x17x-1x4xf32) <- (-1x17x-1x4xf32) + softmax_2 = paddle._C_ops.softmax(transpose_2, 1) + del transpose_2 + + # pd_op.conv2d: (-1x1x-1x4xf32) <- (-1x17x-1x4xf32, 1x17x1x1xf32) + conv2d_96 = paddle._C_ops.conv2d( + softmax_2, parameter_36, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_36, softmax_2 + + # pd_op.squeeze: (-1x-1x4xf32) <- (-1x1x-1x4xf32, 1xi64) + squeeze_2 = paddle._C_ops.squeeze(conv2d_96, full_int_array_9) + del conv2d_96, full_int_array_9 + + # pd_op.sigmoid: (2x4x-1x-1xf32) <- (2x4x-1x-1xf32) + sigmoid_8 = paddle._C_ops.sigmoid(add_33) + del add_33 + + # builtin.combine: ([xi64, xi64, xi64]) <- (xi64, xi64, xi64) + combine_19 = [full_1, full_2, multiply_10] + del full_1, full_2, multiply_10 + + # pd_op.stack: (3xi64) <- ([xi64, xi64, xi64]) + stack_5 = paddle._C_ops.stack(combine_19, 0) + del combine_19 + + # pd_op.reshape: (-1x4x-1xf32) <- (2x4x-1x-1xf32, 3xi64) + reshape_21 = paddle._C_ops.reshape(sigmoid_8, stack_5) + del sigmoid_8, stack_5 + + # pd_op.full: (1xi32) <- () + full_4 = paddle._C_ops.full( + [1], float("-1"), paddle.int32, paddle.core.CPUPlace() + ) + + # builtin.combine: ([-1x4x-1xf32, -1x4x-1xf32, -1x4x-1xf32]) <- (-1x4x-1xf32, -1x4x-1xf32, -1x4x-1xf32) + combine_20 = [reshape_9, reshape_15, reshape_21] + del reshape_15, reshape_21, reshape_9 + + # pd_op.concat: (-1x4x-1xf32) <- ([-1x4x-1xf32, -1x4x-1xf32, -1x4x-1xf32], 1xi32) + concat_0 = paddle._C_ops.concat(combine_20, full_4) + del combine_20, full_4 + + # builtin.combine: ([-1x-1x4xf32, -1x-1x4xf32, -1x-1x4xf32]) <- (-1x-1x4xf32, -1x-1x4xf32, -1x-1x4xf32) + combine_21 = [squeeze_0, squeeze_1, squeeze_2] + del squeeze_0, squeeze_1, squeeze_2 + + # pd_op.concat: (-1x-1x4xf32) <- ([-1x-1x4xf32, -1x-1x4xf32, -1x-1x4xf32], 1xi32) + concat_1 = paddle._C_ops.concat(combine_21, full_0) + del combine_21, full_0 + + return concat_0, concat_1 diff --git a/paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_2/weight_meta.py b/paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_2/weight_meta.py new file mode 100644 index 000000000..2a1c68dc7 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_2/weight_meta.py @@ -0,0 +1,4457 @@ +class Program_weight_tensor_parameter_0: + name = "parameter_0" + shape = [68] + dtype = "float32" + min_val = float("-0.018653") + max_val = float("0.0310325") + mean = float("1.30123e-07") + std = float("0.00772894") + data = None + + +class Program_weight_tensor_parameter_1: + name = "parameter_1" + shape = [68, 96, 3, 3] + dtype = "float32" + min_val = float("-0.190211") + max_val = float("0.209443") + mean = float("3.7835e-08") + std = float("0.0111182") + data = None + + +class Program_weight_tensor_parameter_2: + name = "parameter_2" + shape = [96] + dtype = "float32" + min_val = float("-0.149303") + max_val = float("0.349229") + mean = float("0.0834703") + std = float("0.116476") + data = None + + +class Program_weight_tensor_parameter_3: + name = "parameter_3" + shape = [96] + dtype = "float32" + min_val = float("0.918914") + max_val = float("2.00844") + mean = float("1.39791") + std = float("0.216949") + data = None + + +class Program_weight_tensor_parameter_4: + name = "parameter_4" + shape = [96] + dtype = "float32" + min_val = float("0.000284833") + max_val = float("0.00552148") + mean = float("0.00151597") + std = float("0.00109394") + data = None + + +class Program_weight_tensor_parameter_5: + name = "parameter_5" + shape = [96] + dtype = "float32" + min_val = float("-0.0946916") + max_val = float("0.0327932") + mean = float("-0.0138573") + std = float("0.0245203") + data = None + + +class Program_weight_tensor_parameter_6: + name = "parameter_6" + shape = [96, 96, 1, 1] + dtype = "float32" + min_val = float("-0.0951807") + max_val = float("0.113065") + mean = float("-0.00129554") + std = float("0.013839") + data = None + + +class Program_weight_tensor_parameter_7: + name = "parameter_7" + shape = [96] + dtype = "float32" + min_val = float("-0.0103042") + max_val = float("0.0108003") + mean = float("-0.000302473") + std = float("0.00453127") + data = None + + +class Program_weight_tensor_parameter_8: + name = "parameter_8" + shape = [96, 96, 1, 1] + dtype = "float32" + min_val = float("-0.0202563") + max_val = float("0.0235644") + mean = float("-9.45269e-05") + std = float("0.00306802") + data = None + + +class Program_weight_tensor_parameter_9: + name = "parameter_9" + shape = [4] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_10: + name = "parameter_10" + shape = [4, 96, 3, 3] + dtype = "float32" + min_val = float("-0.0327041") + max_val = float("0.0429711") + mean = float("0.000567976") + std = float("0.00660947") + data = None + + +class Program_weight_tensor_parameter_11: + name = "parameter_11" + shape = [96] + dtype = "float32" + min_val = float("-0.66171") + max_val = float("1.12003") + mean = float("0.208007") + std = float("0.336117") + data = None + + +class Program_weight_tensor_parameter_12: + name = "parameter_12" + shape = [96] + dtype = "float32" + min_val = float("0.774741") + max_val = float("1.56622") + mean = float("1.11212") + std = float("0.140094") + data = None + + +class Program_weight_tensor_parameter_13: + name = "parameter_13" + shape = [96] + dtype = "float32" + min_val = float("0.000349212") + max_val = float("0.0115798") + mean = float("0.00164328") + std = float("0.00164938") + data = None + + +class Program_weight_tensor_parameter_14: + name = "parameter_14" + shape = [96] + dtype = "float32" + min_val = float("-0.201551") + max_val = float("0.114135") + mean = float("-0.0261146") + std = float("0.047028") + data = None + + +class Program_weight_tensor_parameter_15: + name = "parameter_15" + shape = [96, 96, 1, 1] + dtype = "float32" + min_val = float("-0.0897622") + max_val = float("0.0968557") + mean = float("-0.00169456") + std = float("0.0125057") + data = None + + +class Program_weight_tensor_parameter_16: + name = "parameter_16" + shape = [96] + dtype = "float32" + min_val = float("-0.0059556") + max_val = float("0.00753131") + mean = float("-0.000550704") + std = float("0.0026296") + data = None + + +class Program_weight_tensor_parameter_17: + name = "parameter_17" + shape = [96, 96, 1, 1] + dtype = "float32" + min_val = float("-0.0532542") + max_val = float("0.0784924") + mean = float("-0.000321779") + std = float("0.00341872") + data = None + + +class Program_weight_tensor_parameter_18: + name = "parameter_18" + shape = [68] + dtype = "float32" + min_val = float("-0.00592671") + max_val = float("0.0245108") + mean = float("1.32248e-07") + std = float("0.00592063") + data = None + + +class Program_weight_tensor_parameter_19: + name = "parameter_19" + shape = [68, 192, 3, 3] + dtype = "float32" + min_val = float("-0.148013") + max_val = float("0.188684") + mean = float("-7.52334e-09") + std = float("0.00734346") + data = None + + +class Program_weight_tensor_parameter_20: + name = "parameter_20" + shape = [192] + dtype = "float32" + min_val = float("-0.112013") + max_val = float("0.136873") + mean = float("0.0501926") + std = float("0.042912") + data = None + + +class Program_weight_tensor_parameter_21: + name = "parameter_21" + shape = [192] + dtype = "float32" + min_val = float("0.942516") + max_val = float("1.48651") + mean = float("1.209") + std = float("0.101334") + data = None + + +class Program_weight_tensor_parameter_22: + name = "parameter_22" + shape = [192] + dtype = "float32" + min_val = float("0.000274533") + max_val = float("0.00473256") + mean = float("0.00131755") + std = float("0.00090147") + data = None + + +class Program_weight_tensor_parameter_23: + name = "parameter_23" + shape = [192] + dtype = "float32" + min_val = float("-0.0539903") + max_val = float("0.0221637") + mean = float("-0.00864077") + std = float("0.011364") + data = None + + +class Program_weight_tensor_parameter_24: + name = "parameter_24" + shape = [192, 192, 1, 1] + dtype = "float32" + min_val = float("-0.0600062") + max_val = float("0.103906") + mean = float("-0.000355432") + std = float("0.00633016") + data = None + + +class Program_weight_tensor_parameter_25: + name = "parameter_25" + shape = [192] + dtype = "float32" + min_val = float("-0.00820877") + max_val = float("0.00880323") + mean = float("-9.72734e-05") + std = float("0.0032937") + data = None + + +class Program_weight_tensor_parameter_26: + name = "parameter_26" + shape = [192, 192, 1, 1] + dtype = "float32" + min_val = float("-0.00942992") + max_val = float("0.0136885") + mean = float("-9.81164e-05") + std = float("0.00127282") + data = None + + +class Program_weight_tensor_parameter_27: + name = "parameter_27" + shape = [4] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_28: + name = "parameter_28" + shape = [4, 192, 3, 3] + dtype = "float32" + min_val = float("-0.0185986") + max_val = float("0.017836") + mean = float("0.00051337") + std = float("0.00358993") + data = None + + +class Program_weight_tensor_parameter_29: + name = "parameter_29" + shape = [192] + dtype = "float32" + min_val = float("-0.291073") + max_val = float("0.60814") + mean = float("0.147454") + std = float("0.159") + data = None + + +class Program_weight_tensor_parameter_30: + name = "parameter_30" + shape = [192] + dtype = "float32" + min_val = float("0.913676") + max_val = float("1.49743") + mean = float("1.08728") + std = float("0.081887") + data = None + + +class Program_weight_tensor_parameter_31: + name = "parameter_31" + shape = [192] + dtype = "float32" + min_val = float("0.000306423") + max_val = float("0.00589856") + mean = float("0.00147365") + std = float("0.000924636") + data = None + + +class Program_weight_tensor_parameter_32: + name = "parameter_32" + shape = [192] + dtype = "float32" + min_val = float("-0.139638") + max_val = float("0.023164") + mean = float("-0.0355444") + std = float("0.0277406") + data = None + + +class Program_weight_tensor_parameter_33: + name = "parameter_33" + shape = [192, 192, 1, 1] + dtype = "float32" + min_val = float("-0.0584223") + max_val = float("0.0463674") + mean = float("-0.00109099") + std = float("0.00597895") + data = None + + +class Program_weight_tensor_parameter_34: + name = "parameter_34" + shape = [192] + dtype = "float32" + min_val = float("-0.00481239") + max_val = float("0.00933689") + mean = float("-0.000150003") + std = float("0.00178034") + data = None + + +class Program_weight_tensor_parameter_35: + name = "parameter_35" + shape = [192, 192, 1, 1] + dtype = "float32" + min_val = float("-0.0175333") + max_val = float("0.0225389") + mean = float("-0.000152035") + std = float("0.00123807") + data = None + + +class Program_weight_tensor_parameter_36: + name = "parameter_36" + shape = [1, 17, 1, 1] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_37: + name = "parameter_37" + shape = [68] + dtype = "float32" + min_val = float("-0.00491466") + max_val = float("0.00925987") + mean = float("1.23691e-07") + std = float("0.00410694") + data = None + + +class Program_weight_tensor_parameter_38: + name = "parameter_38" + shape = [68, 384, 3, 3] + dtype = "float32" + min_val = float("-0.0794888") + max_val = float("0.102667") + mean = float("1.60253e-08") + std = float("0.00459131") + data = None + + +class Program_weight_tensor_parameter_39: + name = "parameter_39" + shape = [384] + dtype = "float32" + min_val = float("-0.0756471") + max_val = float("0.111591") + mean = float("0.0118707") + std = float("0.0354397") + data = None + + +class Program_weight_tensor_parameter_40: + name = "parameter_40" + shape = [384] + dtype = "float32" + min_val = float("0.969442") + max_val = float("1.49466") + mean = float("1.1695") + std = float("0.0775726") + data = None + + +class Program_weight_tensor_parameter_41: + name = "parameter_41" + shape = [384] + dtype = "float32" + min_val = float("7.22536e-05") + max_val = float("0.0068391") + mean = float("0.000845068") + std = float("0.000703644") + data = None + + +class Program_weight_tensor_parameter_42: + name = "parameter_42" + shape = [384] + dtype = "float32" + min_val = float("-0.040997") + max_val = float("0.0158692") + mean = float("-0.00441209") + std = float("0.00851866") + data = None + + +class Program_weight_tensor_parameter_43: + name = "parameter_43" + shape = [384, 384, 1, 1] + dtype = "float32" + min_val = float("-0.0576993") + max_val = float("0.0600385") + mean = float("-0.000138602") + std = float("0.00315958") + data = None + + +class Program_weight_tensor_parameter_44: + name = "parameter_44" + shape = [384] + dtype = "float32" + min_val = float("-0.00427117") + max_val = float("0.00491851") + mean = float("-5.07427e-05") + std = float("0.00221218") + data = None + + +class Program_weight_tensor_parameter_45: + name = "parameter_45" + shape = [384, 384, 1, 1] + dtype = "float32" + min_val = float("-0.0242408") + max_val = float("0.00942797") + mean = float("-3.94686e-05") + std = float("0.000747366") + data = None + + +class Program_weight_tensor_parameter_46: + name = "parameter_46" + shape = [4] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_47: + name = "parameter_47" + shape = [4, 384, 3, 3] + dtype = "float32" + min_val = float("-0.00841769") + max_val = float("0.0133731") + mean = float("0.000292149") + std = float("0.00177607") + data = None + + +class Program_weight_tensor_parameter_48: + name = "parameter_48" + shape = [384] + dtype = "float32" + min_val = float("-0.369152") + max_val = float("0.494632") + mean = float("0.0349918") + std = float("0.121509") + data = None + + +class Program_weight_tensor_parameter_49: + name = "parameter_49" + shape = [384] + dtype = "float32" + min_val = float("0.883459") + max_val = float("1.55467") + mean = float("1.05797") + std = float("0.0835312") + data = None + + +class Program_weight_tensor_parameter_50: + name = "parameter_50" + shape = [384] + dtype = "float32" + min_val = float("0.00013119") + max_val = float("0.00702002") + mean = float("0.000884237") + std = float("0.000684954") + data = None + + +class Program_weight_tensor_parameter_51: + name = "parameter_51" + shape = [384] + dtype = "float32" + min_val = float("-0.105751") + max_val = float("0.0294571") + mean = float("-0.0262986") + std = float("0.0195655") + data = None + + +class Program_weight_tensor_parameter_52: + name = "parameter_52" + shape = [384, 384, 1, 1] + dtype = "float32" + min_val = float("-0.0387492") + max_val = float("0.0393312") + mean = float("-0.000523745") + std = float("0.00328341") + data = None + + +class Program_weight_tensor_parameter_53: + name = "parameter_53" + shape = [384] + dtype = "float32" + min_val = float("-0.0107119") + max_val = float("0.00930954") + mean = float("-0.000115991") + std = float("0.00131259") + data = None + + +class Program_weight_tensor_parameter_54: + name = "parameter_54" + shape = [384, 384, 1, 1] + dtype = "float32" + min_val = float("-0.0844844") + max_val = float("0.0443428") + mean = float("-3.32923e-05") + std = float("0.000884614") + data = None + + +class Program_weight_tensor_parameter_55: + name = "parameter_55" + shape = [384] + dtype = "float32" + min_val = float("-0.652269") + max_val = float("1.18859") + mean = float("0.0279915") + std = float("0.238646") + data = None + + +class Program_weight_tensor_parameter_56: + name = "parameter_56" + shape = [384] + dtype = "float32" + min_val = float("0.840802") + max_val = float("1.38345") + mean = float("0.983163") + std = float("0.068319") + data = None + + +class Program_weight_tensor_parameter_57: + name = "parameter_57" + shape = [384] + dtype = "float32" + min_val = float("0.0028222") + max_val = float("0.120091") + mean = float("0.0150878") + std = float("0.0128566") + data = None + + +class Program_weight_tensor_parameter_58: + name = "parameter_58" + shape = [384] + dtype = "float32" + min_val = float("-0.186506") + max_val = float("0.0914117") + mean = float("-0.0370423") + std = float("0.0408906") + data = None + + +class Program_weight_tensor_parameter_59: + name = "parameter_59" + shape = [384, 384, 1, 1] + dtype = "float32" + min_val = float("-0.0804522") + max_val = float("0.0621238") + mean = float("-0.000348866") + std = float("0.00515003") + data = None + + +class Program_weight_tensor_parameter_60: + name = "parameter_60" + shape = [192] + dtype = "float32" + min_val = float("-0.445858") + max_val = float("0.100597") + mean = float("-0.0845864") + std = float("0.104559") + data = None + + +class Program_weight_tensor_parameter_61: + name = "parameter_61" + shape = [192] + dtype = "float32" + min_val = float("0.827741") + max_val = float("1.21013") + mean = float("0.92639") + std = float("0.0462105") + data = None + + +class Program_weight_tensor_parameter_62: + name = "parameter_62" + shape = [192] + dtype = "float32" + min_val = float("0.00247019") + max_val = float("0.0260277") + mean = float("0.00631141") + std = float("0.00287846") + data = None + + +class Program_weight_tensor_parameter_63: + name = "parameter_63" + shape = [192] + dtype = "float32" + min_val = float("-0.0379476") + max_val = float("0.0497353") + mean = float("-0.0021489") + std = float("0.0234555") + data = None + + +class Program_weight_tensor_parameter_64: + name = "parameter_64" + shape = [192, 192, 1, 1] + dtype = "float32" + min_val = float("-0.0401693") + max_val = float("0.0482149") + mean = float("-0.000258359") + std = float("0.003519") + data = None + + +class Program_weight_tensor_parameter_65: + name = "parameter_65" + shape = [192] + dtype = "float32" + min_val = float("-0.445858") + max_val = float("0.100597") + mean = float("-0.0845864") + std = float("0.104559") + data = None + + +class Program_weight_tensor_parameter_66: + name = "parameter_66" + shape = [192] + dtype = "float32" + min_val = float("0.861621") + max_val = float("1.42119") + mean = float("1.11205") + std = float("0.0818467") + data = None + + +class Program_weight_tensor_parameter_67: + name = "parameter_67" + shape = [192] + dtype = "float32" + min_val = float("0.00521346") + max_val = float("0.108237") + mean = float("0.0222325") + std = float("0.0164108") + data = None + + +class Program_weight_tensor_parameter_68: + name = "parameter_68" + shape = [192] + dtype = "float32" + min_val = float("-0.112022") + max_val = float("0.0861229") + mean = float("-0.0205872") + std = float("0.039563") + data = None + + +class Program_weight_tensor_parameter_69: + name = "parameter_69" + shape = [192, 192, 3, 3] + dtype = "float32" + min_val = float("-0.0590664") + max_val = float("0.0651393") + mean = float("-0.000122795") + std = float("0.00309452") + data = None + + +class Program_weight_tensor_parameter_70: + name = "parameter_70" + shape = [192] + dtype = "float32" + min_val = float("-0.519576") + max_val = float("0.119175") + mean = float("-0.173677") + std = float("0.128148") + data = None + + +class Program_weight_tensor_parameter_71: + name = "parameter_71" + shape = [192] + dtype = "float32" + min_val = float("0.843577") + max_val = float("1.65244") + mean = float("1.0642") + std = float("0.100969") + data = None + + +class Program_weight_tensor_parameter_72: + name = "parameter_72" + shape = [192] + dtype = "float32" + min_val = float("0.0134686") + max_val = float("0.262111") + mean = float("0.035477") + std = float("0.0257605") + data = None + + +class Program_weight_tensor_parameter_73: + name = "parameter_73" + shape = [192] + dtype = "float32" + min_val = float("-0.2601") + max_val = float("0.142926") + mean = float("-0.0593171") + std = float("0.0572285") + data = None + + +class Program_weight_tensor_parameter_74: + name = "parameter_74" + shape = [192, 192, 3, 3] + dtype = "float32" + min_val = float("-0.0566011") + max_val = float("0.0723318") + mean = float("-0.000243394") + std = float("0.00339424") + data = None + + +class Program_weight_tensor_parameter_75: + name = "parameter_75" + shape = [192] + dtype = "float32" + min_val = float("-0.455876") + max_val = float("0.187264") + mean = float("-0.0820069") + std = float("0.10205") + data = None + + +class Program_weight_tensor_parameter_76: + name = "parameter_76" + shape = [192] + dtype = "float32" + min_val = float("0.841971") + max_val = float("1.25563") + mean = float("1.02701") + std = float("0.0670478") + data = None + + +class Program_weight_tensor_parameter_77: + name = "parameter_77" + shape = [192] + dtype = "float32" + min_val = float("0.00508938") + max_val = float("0.0629504") + mean = float("0.0128185") + std = float("0.00617773") + data = None + + +class Program_weight_tensor_parameter_78: + name = "parameter_78" + shape = [192] + dtype = "float32" + min_val = float("-0.11049") + max_val = float("0.0547617") + mean = float("-0.0181669") + std = float("0.0279962") + data = None + + +class Program_weight_tensor_parameter_79: + name = "parameter_79" + shape = [192, 576, 1, 1] + dtype = "float32" + min_val = float("-0.080574") + max_val = float("0.0848044") + mean = float("-0.000185678") + std = float("0.00481275") + data = None + + +class Program_weight_tensor_parameter_80: + name = "parameter_80" + shape = [192] + dtype = "float32" + min_val = float("-0.217908") + max_val = float("0.0355956") + mean = float("-0.0691499") + std = float("0.0386375") + data = None + + +class Program_weight_tensor_parameter_81: + name = "parameter_81" + shape = [192] + dtype = "float32" + min_val = float("0.844091") + max_val = float("1.15255") + mean = float("1.01562") + std = float("0.0503148") + data = None + + +class Program_weight_tensor_parameter_82: + name = "parameter_82" + shape = [192] + dtype = "float32" + min_val = float("0.00256848") + max_val = float("0.0204398") + mean = float("0.00702577") + std = float("0.00290168") + data = None + + +class Program_weight_tensor_parameter_83: + name = "parameter_83" + shape = [192] + dtype = "float32" + min_val = float("-0.0967604") + max_val = float("0.0866626") + mean = float("-0.0222285") + std = float("0.0273828") + data = None + + +class Program_weight_tensor_parameter_84: + name = "parameter_84" + shape = [192, 576, 1, 1] + dtype = "float32" + min_val = float("-0.0433461") + max_val = float("0.0508427") + mean = float("-0.000278308") + std = float("0.00423993") + data = None + + +class Program_weight_tensor_parameter_85: + name = "parameter_85" + shape = [192] + dtype = "float32" + min_val = float("-0.296093") + max_val = float("-0.00746985") + mean = float("-0.0909473") + std = float("0.0603181") + data = None + + +class Program_weight_tensor_parameter_86: + name = "parameter_86" + shape = [192] + dtype = "float32" + min_val = float("0.78297") + max_val = float("1.34841") + mean = float("1.0531") + std = float("0.0659046") + data = None + + +class Program_weight_tensor_parameter_87: + name = "parameter_87" + shape = [192] + dtype = "float32" + min_val = float("0.00866336") + max_val = float("0.0606129") + mean = float("0.0236506") + std = float("0.00920598") + data = None + + +class Program_weight_tensor_parameter_88: + name = "parameter_88" + shape = [192] + dtype = "float32" + min_val = float("-0.274437") + max_val = float("0.303481") + mean = float("-0.028094") + std = float("0.0952102") + data = None + + +class Program_weight_tensor_parameter_89: + name = "parameter_89" + shape = [192, 192, 3, 3] + dtype = "float32" + min_val = float("-0.0320502") + max_val = float("0.0370765") + mean = float("-5.98437e-05") + std = float("0.00253528") + data = None + + +class Program_weight_tensor_parameter_90: + name = "parameter_90" + shape = [192] + dtype = "float32" + min_val = float("-0.530717") + max_val = float("1.03145") + mean = float("0.148064") + std = float("0.259361") + data = None + + +class Program_weight_tensor_parameter_91: + name = "parameter_91" + shape = [192] + dtype = "float32" + min_val = float("0.731798") + max_val = float("1.56803") + mean = float("1.014") + std = float("0.106695") + data = None + + +class Program_weight_tensor_parameter_92: + name = "parameter_92" + shape = [192] + dtype = "float32" + min_val = float("0.00711023") + max_val = float("0.0905165") + mean = float("0.0211361") + std = float("0.0114095") + data = None + + +class Program_weight_tensor_parameter_93: + name = "parameter_93" + shape = [192] + dtype = "float32" + min_val = float("-0.279082") + max_val = float("0.153551") + mean = float("-0.0509504") + std = float("0.0522264") + data = None + + +class Program_weight_tensor_parameter_94: + name = "parameter_94" + shape = [192, 192, 1, 1] + dtype = "float32" + min_val = float("-0.13468") + max_val = float("0.0903167") + mean = float("-0.000883571") + std = float("0.0100158") + data = None + + +class Program_weight_tensor_parameter_95: + name = "parameter_95" + shape = [96] + dtype = "float32" + min_val = float("-0.29043") + max_val = float("0.172569") + mean = float("-0.070874") + std = float("0.105573") + data = None + + +class Program_weight_tensor_parameter_96: + name = "parameter_96" + shape = [96] + dtype = "float32" + min_val = float("0.730954") + max_val = float("1.20877") + mean = float("0.877662") + std = float("0.077901") + data = None + + +class Program_weight_tensor_parameter_97: + name = "parameter_97" + shape = [96] + dtype = "float32" + min_val = float("0.00246171") + max_val = float("0.0129422") + mean = float("0.00671665") + std = float("0.00235517") + data = None + + +class Program_weight_tensor_parameter_98: + name = "parameter_98" + shape = [96] + dtype = "float32" + min_val = float("-0.0452652") + max_val = float("0.0355382") + mean = float("-0.0105203") + std = float("0.0215082") + data = None + + +class Program_weight_tensor_parameter_99: + name = "parameter_99" + shape = [96, 96, 1, 1] + dtype = "float32" + min_val = float("-0.0494") + max_val = float("0.0504503") + mean = float("-0.00128754") + std = float("0.00640071") + data = None + + +class Program_weight_tensor_parameter_100: + name = "parameter_100" + shape = [96] + dtype = "float32" + min_val = float("-0.29043") + max_val = float("0.172569") + mean = float("-0.070874") + std = float("0.105573") + data = None + + +class Program_weight_tensor_parameter_101: + name = "parameter_101" + shape = [96] + dtype = "float32" + min_val = float("0.97156") + max_val = float("1.32049") + mean = float("1.13236") + std = float("0.0752191") + data = None + + +class Program_weight_tensor_parameter_102: + name = "parameter_102" + shape = [96] + dtype = "float32" + min_val = float("0.0093333") + max_val = float("0.0602011") + mean = float("0.0268797") + std = float("0.0114369") + data = None + + +class Program_weight_tensor_parameter_103: + name = "parameter_103" + shape = [96] + dtype = "float32" + min_val = float("-0.0726866") + max_val = float("0.101348") + mean = float("-0.0120426") + std = float("0.0278587") + data = None + + +class Program_weight_tensor_parameter_104: + name = "parameter_104" + shape = [96, 96, 3, 3] + dtype = "float32" + min_val = float("-0.0765906") + max_val = float("0.0838438") + mean = float("-0.000172577") + std = float("0.00601682") + data = None + + +class Program_weight_tensor_parameter_105: + name = "parameter_105" + shape = [96] + dtype = "float32" + min_val = float("-0.673369") + max_val = float("0.111337") + mean = float("-0.259249") + std = float("0.15059") + data = None + + +class Program_weight_tensor_parameter_106: + name = "parameter_106" + shape = [96] + dtype = "float32" + min_val = float("0.801527") + max_val = float("1.41146") + mean = float("1.04521") + std = float("0.116809") + data = None + + +class Program_weight_tensor_parameter_107: + name = "parameter_107" + shape = [96] + dtype = "float32" + min_val = float("0.0179787") + max_val = float("0.123656") + mean = float("0.0407975") + std = float("0.0163583") + data = None + + +class Program_weight_tensor_parameter_108: + name = "parameter_108" + shape = [96] + dtype = "float32" + min_val = float("-0.210235") + max_val = float("0.0371458") + mean = float("-0.0429902") + std = float("0.0344348") + data = None + + +class Program_weight_tensor_parameter_109: + name = "parameter_109" + shape = [96, 96, 3, 3] + dtype = "float32" + min_val = float("-0.075314") + max_val = float("0.0818873") + mean = float("-0.000470995") + std = float("0.00667813") + data = None + + +class Program_weight_tensor_parameter_110: + name = "parameter_110" + shape = [96] + dtype = "float32" + min_val = float("-0.644536") + max_val = float("0.152401") + mean = float("-0.155624") + std = float("0.115775") + data = None + + +class Program_weight_tensor_parameter_111: + name = "parameter_111" + shape = [96] + dtype = "float32" + min_val = float("0.84937") + max_val = float("1.26645") + mean = float("1.03345") + std = float("0.0722039") + data = None + + +class Program_weight_tensor_parameter_112: + name = "parameter_112" + shape = [96] + dtype = "float32" + min_val = float("0.00973348") + max_val = float("0.0487037") + mean = float("0.0192495") + std = float("0.00666947") + data = None + + +class Program_weight_tensor_parameter_113: + name = "parameter_113" + shape = [96] + dtype = "float32" + min_val = float("-0.122358") + max_val = float("0.0284554") + mean = float("-0.0357051") + std = float("0.0301403") + data = None + + +class Program_weight_tensor_parameter_114: + name = "parameter_114" + shape = [96, 288, 1, 1] + dtype = "float32" + min_val = float("-0.0666353") + max_val = float("0.07655") + mean = float("-0.000641477") + std = float("0.00909018") + data = None + + +class Program_weight_tensor_parameter_115: + name = "parameter_115" + shape = [96] + dtype = "float32" + min_val = float("-0.198184") + max_val = float("0.0830438") + mean = float("-0.029894") + std = float("0.0460273") + data = None + + +class Program_weight_tensor_parameter_116: + name = "parameter_116" + shape = [96] + dtype = "float32" + min_val = float("0.68536") + max_val = float("1.33613") + mean = float("0.954422") + std = float("0.0884454") + data = None + + +class Program_weight_tensor_parameter_117: + name = "parameter_117" + shape = [96] + dtype = "float32" + min_val = float("0.00439953") + max_val = float("0.0376729") + mean = float("0.0110806") + std = float("0.00518735") + data = None + + +class Program_weight_tensor_parameter_118: + name = "parameter_118" + shape = [96] + dtype = "float32" + min_val = float("-0.110611") + max_val = float("0.053016") + mean = float("-0.0161266") + std = float("0.0307335") + data = None + + +class Program_weight_tensor_parameter_119: + name = "parameter_119" + shape = [96, 288, 1, 1] + dtype = "float32" + min_val = float("-0.0847356") + max_val = float("0.0781943") + mean = float("-0.00030171") + std = float("0.00760969") + data = None + + +class Program_weight_tensor_parameter_120: + name = "parameter_120" + shape = [96] + dtype = "float32" + min_val = float("-0.335709") + max_val = float("0.0179744") + mean = float("-0.108667") + std = float("0.0840873") + data = None + + +class Program_weight_tensor_parameter_121: + name = "parameter_121" + shape = [96] + dtype = "float32" + min_val = float("0.731015") + max_val = float("1.20665") + mean = float("1.05574") + std = float("0.0750007") + data = None + + +class Program_weight_tensor_parameter_122: + name = "parameter_122" + shape = [96] + dtype = "float32" + min_val = float("0.0110792") + max_val = float("0.108652") + mean = float("0.0289631") + std = float("0.0151623") + data = None + + +class Program_weight_tensor_parameter_123: + name = "parameter_123" + shape = [96] + dtype = "float32" + min_val = float("-0.402624") + max_val = float("0.415541") + mean = float("-0.0212279") + std = float("0.14343") + data = None + + +class Program_weight_tensor_parameter_124: + name = "parameter_124" + shape = [96, 96, 3, 3] + dtype = "float32" + min_val = float("-0.0566918") + max_val = float("0.0566383") + mean = float("-4.65555e-05") + std = float("0.00561229") + data = None + + +class Program_weight_tensor_parameter_125: + name = "parameter_125" + shape = [96] + dtype = "float32" + min_val = float("-1.07712") + max_val = float("2.35644") + mean = float("0.310791") + std = float("0.586496") + data = None + + +class Program_weight_tensor_parameter_126: + name = "parameter_126" + shape = [96] + dtype = "float32" + min_val = float("0.468523") + max_val = float("1.40452") + mean = float("0.882442") + std = float("0.167292") + data = None + + +class Program_weight_tensor_parameter_127: + name = "parameter_127" + shape = [96] + dtype = "float32" + min_val = float("0.00622077") + max_val = float("0.145427") + mean = float("0.0336655") + std = float("0.0192279") + data = None + + +class Program_weight_tensor_parameter_128: + name = "parameter_128" + shape = [96] + dtype = "float32" + min_val = float("-0.252919") + max_val = float("0.171344") + mean = float("-0.0348699") + std = float("0.0800858") + data = None + + +class Program_weight_tensor_parameter_129: + name = "parameter_129" + shape = [96, 96, 1, 1] + dtype = "float32" + min_val = float("-0.151113") + max_val = float("0.113524") + mean = float("-0.00159645") + std = float("0.0201408") + data = None + + +class Program_weight_tensor_parameter_130: + name = "parameter_130" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_131: + name = "parameter_131" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_132: + name = "parameter_132" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_133: + name = "parameter_133" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_134: + name = "parameter_134" + shape = [48, 48, 1, 1] + dtype = "float32" + min_val = float("-0.0664404") + max_val = float("0.0734275") + mean = float("-0.00237338") + std = float("0.012309") + data = None + + +class Program_weight_tensor_parameter_135: + name = "parameter_135" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_136: + name = "parameter_136" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_137: + name = "parameter_137" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_138: + name = "parameter_138" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_139: + name = "parameter_139" + shape = [48, 48, 3, 3] + dtype = "float32" + min_val = float("-0.126099") + max_val = float("0.146893") + mean = float("-0.000602698") + std = float("0.0131643") + data = None + + +class Program_weight_tensor_parameter_140: + name = "parameter_140" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_141: + name = "parameter_141" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_142: + name = "parameter_142" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_143: + name = "parameter_143" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_144: + name = "parameter_144" + shape = [48, 48, 3, 3] + dtype = "float32" + min_val = float("-0.10691") + max_val = float("0.123684") + mean = float("-0.0011924") + std = float("0.0144144") + data = None + + +class Program_weight_tensor_parameter_145: + name = "parameter_145" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_146: + name = "parameter_146" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_147: + name = "parameter_147" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_148: + name = "parameter_148" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_149: + name = "parameter_149" + shape = [48, 224, 1, 1] + dtype = "float32" + min_val = float("-0.188612") + max_val = float("0.131781") + mean = float("-0.00116675") + std = float("0.0182059") + data = None + + +class Program_weight_tensor_parameter_150: + name = "parameter_150" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_151: + name = "parameter_151" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_152: + name = "parameter_152" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_153: + name = "parameter_153" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_154: + name = "parameter_154" + shape = [48, 224, 1, 1] + dtype = "float32" + min_val = float("-0.102996") + max_val = float("0.127082") + mean = float("-0.000100325") + std = float("0.0125415") + data = None + + +class Program_weight_tensor_parameter_155: + name = "parameter_155" + shape = [96] + dtype = "float32" + min_val = float("-0.355722") + max_val = float("0.392015") + mean = float("-0.00761515") + std = float("0.135765") + data = None + + +class Program_weight_tensor_parameter_156: + name = "parameter_156" + shape = [96] + dtype = "float32" + min_val = float("0.58176") + max_val = float("1.61796") + mean = float("0.798413") + std = float("0.141773") + data = None + + +class Program_weight_tensor_parameter_157: + name = "parameter_157" + shape = [96] + dtype = "float32" + min_val = float("0.00744695") + max_val = float("0.0777617") + mean = float("0.0234491") + std = float("0.0125216") + data = None + + +class Program_weight_tensor_parameter_158: + name = "parameter_158" + shape = [96] + dtype = "float32" + min_val = float("-0.126434") + max_val = float("0.0750849") + mean = float("-0.0331653") + std = float("0.0358366") + data = None + + +class Program_weight_tensor_parameter_159: + name = "parameter_159" + shape = [96, 192, 1, 1] + dtype = "float32" + min_val = float("-0.0965171") + max_val = float("0.103243") + mean = float("-0.000979696") + std = float("0.0126998") + data = None + + +class Program_weight_tensor_parameter_160: + name = "parameter_160" + shape = [192] + dtype = "float32" + min_val = float("-0.337168") + max_val = float("0.172866") + mean = float("-0.0804302") + std = float("0.0895865") + data = None + + +class Program_weight_tensor_parameter_161: + name = "parameter_161" + shape = [192] + dtype = "float32" + min_val = float("0.695369") + max_val = float("1.47856") + mean = float("0.99055") + std = float("0.0996963") + data = None + + +class Program_weight_tensor_parameter_162: + name = "parameter_162" + shape = [192] + dtype = "float32" + min_val = float("0.0097712") + max_val = float("0.0987394") + mean = float("0.0251769") + std = float("0.012782") + data = None + + +class Program_weight_tensor_parameter_163: + name = "parameter_163" + shape = [192] + dtype = "float32" + min_val = float("-0.203449") + max_val = float("0.19128") + mean = float("-0.0630189") + std = float("0.0562037") + data = None + + +class Program_weight_tensor_parameter_164: + name = "parameter_164" + shape = [192, 192, 1, 1] + dtype = "float32" + min_val = float("-0.10035") + max_val = float("0.120314") + mean = float("-0.00133976") + std = float("0.0122628") + data = None + + +class Program_weight_tensor_parameter_165: + name = "parameter_165" + shape = [96] + dtype = "float32" + min_val = float("-0.307168") + max_val = float("0.102436") + mean = float("-0.0815792") + std = float("0.0994278") + data = None + + +class Program_weight_tensor_parameter_166: + name = "parameter_166" + shape = [96] + dtype = "float32" + min_val = float("0.550693") + max_val = float("0.936676") + mean = float("0.809285") + std = float("0.0654997") + data = None + + +class Program_weight_tensor_parameter_167: + name = "parameter_167" + shape = [96] + dtype = "float32" + min_val = float("0.00340201") + max_val = float("0.01613") + mean = float("0.00847885") + std = float("0.00252636") + data = None + + +class Program_weight_tensor_parameter_168: + name = "parameter_168" + shape = [96] + dtype = "float32" + min_val = float("-0.0470381") + max_val = float("0.0327168") + mean = float("-0.0154057") + std = float("0.0195276") + data = None + + +class Program_weight_tensor_parameter_169: + name = "parameter_169" + shape = [96, 96, 1, 1] + dtype = "float32" + min_val = float("-0.046942") + max_val = float("0.0545283") + mean = float("-0.00159272") + std = float("0.00865431") + data = None + + +class Program_weight_tensor_parameter_170: + name = "parameter_170" + shape = [96] + dtype = "float32" + min_val = float("-0.307168") + max_val = float("0.102436") + mean = float("-0.0815792") + std = float("0.0994278") + data = None + + +class Program_weight_tensor_parameter_171: + name = "parameter_171" + shape = [96] + dtype = "float32" + min_val = float("0.843562") + max_val = float("1.28869") + mean = float("1.03487") + std = float("0.0944074") + data = None + + +class Program_weight_tensor_parameter_172: + name = "parameter_172" + shape = [96] + dtype = "float32" + min_val = float("0.0162234") + max_val = float("0.0750483") + mean = float("0.0356845") + std = float("0.0121583") + data = None + + +class Program_weight_tensor_parameter_173: + name = "parameter_173" + shape = [96] + dtype = "float32" + min_val = float("-0.0894171") + max_val = float("0.0639562") + mean = float("-0.0235775") + std = float("0.0292923") + data = None + + +class Program_weight_tensor_parameter_174: + name = "parameter_174" + shape = [96, 96, 3, 3] + dtype = "float32" + min_val = float("-0.086771") + max_val = float("0.165089") + mean = float("-0.000248005") + std = float("0.00738139") + data = None + + +class Program_weight_tensor_parameter_175: + name = "parameter_175" + shape = [96] + dtype = "float32" + min_val = float("-0.732503") + max_val = float("0.316296") + mean = float("-0.275731") + std = float("0.175263") + data = None + + +class Program_weight_tensor_parameter_176: + name = "parameter_176" + shape = [96] + dtype = "float32" + min_val = float("0.764579") + max_val = float("1.31051") + mean = float("1.0436") + std = float("0.115503") + data = None + + +class Program_weight_tensor_parameter_177: + name = "parameter_177" + shape = [96] + dtype = "float32" + min_val = float("0.0234519") + max_val = float("0.125482") + mean = float("0.0544499") + std = float("0.0175139") + data = None + + +class Program_weight_tensor_parameter_178: + name = "parameter_178" + shape = [96] + dtype = "float32" + min_val = float("-0.166347") + max_val = float("0.0892188") + mean = float("-0.0515841") + std = float("0.0489599") + data = None + + +class Program_weight_tensor_parameter_179: + name = "parameter_179" + shape = [96, 96, 3, 3] + dtype = "float32" + min_val = float("-0.144257") + max_val = float("0.132913") + mean = float("-0.000565483") + std = float("0.00873368") + data = None + + +class Program_weight_tensor_parameter_180: + name = "parameter_180" + shape = [96] + dtype = "float32" + min_val = float("-0.649417") + max_val = float("0.386836") + mean = float("-0.253654") + std = float("0.210215") + data = None + + +class Program_weight_tensor_parameter_181: + name = "parameter_181" + shape = [96] + dtype = "float32" + min_val = float("0.74298") + max_val = float("1.3799") + mean = float("1.02561") + std = float("0.122082") + data = None + + +class Program_weight_tensor_parameter_182: + name = "parameter_182" + shape = [96] + dtype = "float32" + min_val = float("0.00554249") + max_val = float("0.0349953") + mean = float("0.0169508") + std = float("0.00634658") + data = None + + +class Program_weight_tensor_parameter_183: + name = "parameter_183" + shape = [96] + dtype = "float32" + min_val = float("-0.314719") + max_val = float("0.272384") + mean = float("0.0111478") + std = float("0.0678291") + data = None + + +class Program_weight_tensor_parameter_184: + name = "parameter_184" + shape = [96, 448, 1, 1] + dtype = "float32" + min_val = float("-0.169447") + max_val = float("0.110463") + mean = float("-0.000526406") + std = float("0.0113187") + data = None + + +class Program_weight_tensor_parameter_185: + name = "parameter_185" + shape = [96] + dtype = "float32" + min_val = float("-0.239085") + max_val = float("0.172277") + mean = float("-0.0410098") + std = float("0.088654") + data = None + + +class Program_weight_tensor_parameter_186: + name = "parameter_186" + shape = [96] + dtype = "float32" + min_val = float("0.917909") + max_val = float("1.41228") + mean = float("1.07289") + std = float("0.0921621") + data = None + + +class Program_weight_tensor_parameter_187: + name = "parameter_187" + shape = [96] + dtype = "float32" + min_val = float("0.00512108") + max_val = float("0.0377565") + mean = float("0.012503") + std = float("0.00556829") + data = None + + +class Program_weight_tensor_parameter_188: + name = "parameter_188" + shape = [96] + dtype = "float32" + min_val = float("-0.0764749") + max_val = float("0.0829136") + mean = float("0.0112017") + std = float("0.0312016") + data = None + + +class Program_weight_tensor_parameter_189: + name = "parameter_189" + shape = [96, 448, 1, 1] + dtype = "float32" + min_val = float("-0.114799") + max_val = float("0.116536") + mean = float("-0.000477416") + std = float("0.00980395") + data = None + + +class Program_weight_tensor_parameter_190: + name = "parameter_190" + shape = [192] + dtype = "float32" + min_val = float("-0.539928") + max_val = float("-0.101676") + mean = float("-0.294772") + std = float("0.070854") + data = None + + +class Program_weight_tensor_parameter_191: + name = "parameter_191" + shape = [192] + dtype = "float32" + min_val = float("0.649882") + max_val = float("1.08068") + mean = float("0.851912") + std = float("0.0726184") + data = None + + +class Program_weight_tensor_parameter_192: + name = "parameter_192" + shape = [192] + dtype = "float32" + min_val = float("0.00896797") + max_val = float("0.0613196") + mean = float("0.0239899") + std = float("0.00961639") + data = None + + +class Program_weight_tensor_parameter_193: + name = "parameter_193" + shape = [192] + dtype = "float32" + min_val = float("-0.110165") + max_val = float("0.0578497") + mean = float("-0.0313571") + std = float("0.0318844") + data = None + + +class Program_weight_tensor_parameter_194: + name = "parameter_194" + shape = [192, 384, 1, 1] + dtype = "float32" + min_val = float("-0.0569669") + max_val = float("0.0580897") + mean = float("-0.000683949") + std = float("0.00880451") + data = None + + +class Program_weight_tensor_parameter_195: + name = "parameter_195" + shape = [384] + dtype = "float32" + min_val = float("-0.522469") + max_val = float("0.214295") + mean = float("-0.168543") + std = float("0.0775358") + data = None + + +class Program_weight_tensor_parameter_196: + name = "parameter_196" + shape = [384] + dtype = "float32" + min_val = float("0.848906") + max_val = float("1.39284") + mean = float("1.06283") + std = float("0.0773502") + data = None + + +class Program_weight_tensor_parameter_197: + name = "parameter_197" + shape = [384] + dtype = "float32" + min_val = float("0.00501982") + max_val = float("0.0397325") + mean = float("0.0160268") + std = float("0.00625591") + data = None + + +class Program_weight_tensor_parameter_198: + name = "parameter_198" + shape = [384] + dtype = "float32" + min_val = float("-0.114137") + max_val = float("0.0857981") + mean = float("-0.0355965") + std = float("0.0347035") + data = None + + +class Program_weight_tensor_parameter_199: + name = "parameter_199" + shape = [384, 384, 1, 1] + dtype = "float32" + min_val = float("-0.0936791") + max_val = float("0.106798") + mean = float("-0.000567793") + std = float("0.00789782") + data = None + + +class Program_weight_tensor_parameter_200: + name = "parameter_200" + shape = [192] + dtype = "float32" + min_val = float("-0.384521") + max_val = float("0.227763") + mean = float("-0.118185") + std = float("0.102049") + data = None + + +class Program_weight_tensor_parameter_201: + name = "parameter_201" + shape = [192] + dtype = "float32" + min_val = float("0.869586") + max_val = float("1.51327") + mean = float("1.12323") + std = float("0.119089") + data = None + + +class Program_weight_tensor_parameter_202: + name = "parameter_202" + shape = [192] + dtype = "float32" + min_val = float("0.119222") + max_val = float("1.51213") + mean = float("0.355679") + std = float("0.194075") + data = None + + +class Program_weight_tensor_parameter_203: + name = "parameter_203" + shape = [192] + dtype = "float32" + min_val = float("-3.46501") + max_val = float("1.69978") + mean = float("-0.19182") + std = float("0.986212") + data = None + + +class Program_weight_tensor_parameter_204: + name = "parameter_204" + shape = [192, 768, 1, 1] + dtype = "float32" + min_val = float("-0.113572") + max_val = float("0.0748414") + mean = float("-8.3021e-05") + std = float("0.00688727") + data = None + + +class Program_weight_tensor_parameter_205: + name = "parameter_205" + shape = [192] + dtype = "float32" + min_val = float("-0.243321") + max_val = float("0.168978") + mean = float("-0.0173026") + std = float("0.0539895") + data = None + + +class Program_weight_tensor_parameter_206: + name = "parameter_206" + shape = [192] + dtype = "float32" + min_val = float("0.617694") + max_val = float("1.01652") + mean = float("0.837363") + std = float("0.0632031") + data = None + + +class Program_weight_tensor_parameter_207: + name = "parameter_207" + shape = [192] + dtype = "float32" + min_val = float("0.00364927") + max_val = float("0.0302202") + mean = float("0.00897402") + std = float("0.00329128") + data = None + + +class Program_weight_tensor_parameter_208: + name = "parameter_208" + shape = [192] + dtype = "float32" + min_val = float("-0.116422") + max_val = float("0.0722651") + mean = float("-0.0506104") + std = float("0.0400639") + data = None + + +class Program_weight_tensor_parameter_209: + name = "parameter_209" + shape = [192, 192, 1, 1] + dtype = "float32" + min_val = float("-0.0376764") + max_val = float("0.0673975") + mean = float("-0.00139833") + std = float("0.00662605") + data = None + + +class Program_weight_tensor_parameter_210: + name = "parameter_210" + shape = [192] + dtype = "float32" + min_val = float("-0.243321") + max_val = float("0.168978") + mean = float("-0.0173026") + std = float("0.0539895") + data = None + + +class Program_weight_tensor_parameter_211: + name = "parameter_211" + shape = [192] + dtype = "float32" + min_val = float("0.875399") + max_val = float("1.46098") + mean = float("1.10627") + std = float("0.129572") + data = None + + +class Program_weight_tensor_parameter_212: + name = "parameter_212" + shape = [192] + dtype = "float32" + min_val = float("0.0227077") + max_val = float("0.237351") + mean = float("0.0547969") + std = float("0.0225584") + data = None + + +class Program_weight_tensor_parameter_213: + name = "parameter_213" + shape = [192] + dtype = "float32" + min_val = float("-0.293552") + max_val = float("0.0473091") + mean = float("-0.118203") + std = float("0.0657697") + data = None + + +class Program_weight_tensor_parameter_214: + name = "parameter_214" + shape = [192, 192, 3, 3] + dtype = "float32" + min_val = float("-0.0368216") + max_val = float("0.0546366") + mean = float("-0.000404501") + std = float("0.00405002") + data = None + + +class Program_weight_tensor_parameter_215: + name = "parameter_215" + shape = [192] + dtype = "float32" + min_val = float("-0.311401") + max_val = float("0.0667354") + mean = float("-0.115178") + std = float("0.0802349") + data = None + + +class Program_weight_tensor_parameter_216: + name = "parameter_216" + shape = [192] + dtype = "float32" + min_val = float("0.910824") + max_val = float("1.44605") + mean = float("1.10855") + std = float("0.101864") + data = None + + +class Program_weight_tensor_parameter_217: + name = "parameter_217" + shape = [192] + dtype = "float32" + min_val = float("0.0286424") + max_val = float("0.136652") + mean = float("0.0700486") + std = float("0.0228431") + data = None + + +class Program_weight_tensor_parameter_218: + name = "parameter_218" + shape = [192] + dtype = "float32" + min_val = float("-0.460812") + max_val = float("0.215499") + mean = float("-0.119224") + std = float("0.0838897") + data = None + + +class Program_weight_tensor_parameter_219: + name = "parameter_219" + shape = [192, 192, 3, 3] + dtype = "float32" + min_val = float("-0.0498457") + max_val = float("0.0422007") + mean = float("-0.000479539") + std = float("0.00450633") + data = None + + +class Program_weight_tensor_parameter_220: + name = "parameter_220" + shape = [192] + dtype = "float32" + min_val = float("-0.444339") + max_val = float("0.412099") + mean = float("-0.137793") + std = float("0.130321") + data = None + + +class Program_weight_tensor_parameter_221: + name = "parameter_221" + shape = [192] + dtype = "float32" + min_val = float("0.955612") + max_val = float("1.37241") + mean = float("1.11033") + std = float("0.0722102") + data = None + + +class Program_weight_tensor_parameter_222: + name = "parameter_222" + shape = [192] + dtype = "float32" + min_val = float("0.0599313") + max_val = float("0.406381") + mean = float("0.146198") + std = float("0.0531059") + data = None + + +class Program_weight_tensor_parameter_223: + name = "parameter_223" + shape = [192] + dtype = "float32" + min_val = float("-0.3746") + max_val = float("0.457922") + mean = float("-0.113673") + std = float("0.104493") + data = None + + +class Program_weight_tensor_parameter_224: + name = "parameter_224" + shape = [192, 512, 1, 1] + dtype = "float32" + min_val = float("-0.0511306") + max_val = float("0.0904195") + mean = float("-0.000705076") + std = float("0.00778049") + data = None + + +class Program_weight_tensor_parameter_225: + name = "parameter_225" + shape = [192] + dtype = "float32" + min_val = float("-0.164214") + max_val = float("0.00108657") + mean = float("-0.0652814") + std = float("0.0261733") + data = None + + +class Program_weight_tensor_parameter_226: + name = "parameter_226" + shape = [192] + dtype = "float32" + min_val = float("0.819443") + max_val = float("1.06651") + mean = float("0.969047") + std = float("0.046122") + data = None + + +class Program_weight_tensor_parameter_227: + name = "parameter_227" + shape = [192] + dtype = "float32" + min_val = float("0.0402565") + max_val = float("0.234889") + mean = float("0.0881219") + std = float("0.0288936") + data = None + + +class Program_weight_tensor_parameter_228: + name = "parameter_228" + shape = [192] + dtype = "float32" + min_val = float("-0.264071") + max_val = float("0.148941") + mean = float("-0.104244") + std = float("0.0769829") + data = None + + +class Program_weight_tensor_parameter_229: + name = "parameter_229" + shape = [192, 512, 1, 1] + dtype = "float32" + min_val = float("-0.0279262") + max_val = float("0.0522477") + mean = float("-0.00062665") + std = float("0.0064125") + data = None + + +class Program_weight_tensor_parameter_230: + name = "parameter_230" + shape = [512] + dtype = "float32" + min_val = float("-4.82887") + max_val = float("-0.110974") + mean = float("-2.29538") + std = float("0.775295") + data = None + + +class Program_weight_tensor_parameter_231: + name = "parameter_231" + shape = [512] + dtype = "float32" + min_val = float("2.10191") + max_val = float("5.21727") + mean = float("3.70112") + std = float("0.482785") + data = None + + +class Program_weight_tensor_parameter_232: + name = "parameter_232" + shape = [512] + dtype = "float32" + min_val = float("0.00205017") + max_val = float("0.0129863") + mean = float("0.00439085") + std = float("0.00153234") + data = None + + +class Program_weight_tensor_parameter_233: + name = "parameter_233" + shape = [512] + dtype = "float32" + min_val = float("-0.125558") + max_val = float("0.0844085") + mean = float("-0.0389285") + std = float("0.0304083") + data = None + + +class Program_weight_tensor_parameter_234: + name = "parameter_234" + shape = [512, 384, 1, 1] + dtype = "float32" + min_val = float("-0.0812518") + max_val = float("0.135076") + mean = float("-0.000961032") + std = float("0.00790225") + data = None + + +class Program_weight_tensor_parameter_235: + name = "parameter_235" + shape = [384] + dtype = "float32" + min_val = float("-0.0156508") + max_val = float("-0.000140212") + mean = float("-0.00546153") + std = float("0.0036436") + data = None + + +class Program_weight_tensor_parameter_236: + name = "parameter_236" + shape = [384, 384, 1, 1] + dtype = "float32" + min_val = float("-0.186417") + max_val = float("0.14642") + mean = float("-0.00212537") + std = float("0.00664924") + data = None + + +class Program_weight_tensor_parameter_237: + name = "parameter_237" + shape = [192] + dtype = "float32" + min_val = float("-2.38809") + max_val = float("3.17072") + mean = float("-0.203472") + std = float("0.563206") + data = None + + +class Program_weight_tensor_parameter_238: + name = "parameter_238" + shape = [192] + dtype = "float32" + min_val = float("0.123887") + max_val = float("2.40473") + mean = float("0.524729") + std = float("0.334968") + data = None + + +class Program_weight_tensor_parameter_239: + name = "parameter_239" + shape = [192] + dtype = "float32" + min_val = float("0.000170815") + max_val = float("0.00268326") + mean = float("0.000688421") + std = float("0.000383392") + data = None + + +class Program_weight_tensor_parameter_240: + name = "parameter_240" + shape = [192] + dtype = "float32" + min_val = float("-0.0629836") + max_val = float("0.0878972") + mean = float("0.0115645") + std = float("0.0223406") + data = None + + +class Program_weight_tensor_parameter_241: + name = "parameter_241" + shape = [192, 192, 1, 1] + dtype = "float32" + min_val = float("-0.0580822") + max_val = float("0.058285") + mean = float("-0.000330827") + std = float("0.00504808") + data = None + + +class Program_weight_tensor_parameter_242: + name = "parameter_242" + shape = [192] + dtype = "float32" + min_val = float("-2.38809") + max_val = float("3.17072") + mean = float("-0.203472") + std = float("0.563206") + data = None + + +class Program_weight_tensor_parameter_243: + name = "parameter_243" + shape = [192] + dtype = "float32" + min_val = float("0.679292") + max_val = float("3.07313") + mean = float("1.54536") + std = float("0.450864") + data = None + + +class Program_weight_tensor_parameter_244: + name = "parameter_244" + shape = [192] + dtype = "float32" + min_val = float("0.0033827") + max_val = float("0.0334462") + mean = float("0.00949376") + std = float("0.00462441") + data = None + + +class Program_weight_tensor_parameter_245: + name = "parameter_245" + shape = [192] + dtype = "float32" + min_val = float("-0.225625") + max_val = float("0.191391") + mean = float("0.012192") + std = float("0.0564082") + data = None + + +class Program_weight_tensor_parameter_246: + name = "parameter_246" + shape = [192, 192, 3, 3] + dtype = "float32" + min_val = float("-0.074483") + max_val = float("0.0656173") + mean = float("-7.80967e-05") + std = float("0.00448667") + data = None + + +class Program_weight_tensor_parameter_247: + name = "parameter_247" + shape = [192] + dtype = "float32" + min_val = float("-3.43262") + max_val = float("1.16961") + mean = float("-1.42837") + std = float("0.634837") + data = None + + +class Program_weight_tensor_parameter_248: + name = "parameter_248" + shape = [192] + dtype = "float32" + min_val = float("0.392163") + max_val = float("1.72692") + mean = float("1.08998") + std = float("0.190011") + data = None + + +class Program_weight_tensor_parameter_249: + name = "parameter_249" + shape = [192] + dtype = "float32" + min_val = float("0.0270888") + max_val = float("0.44195") + mean = float("0.084525") + std = float("0.0387844") + data = None + + +class Program_weight_tensor_parameter_250: + name = "parameter_250" + shape = [192] + dtype = "float32" + min_val = float("-1.22131") + max_val = float("0.423185") + mean = float("-0.221144") + std = float("0.173004") + data = None + + +class Program_weight_tensor_parameter_251: + name = "parameter_251" + shape = [192, 192, 3, 3] + dtype = "float32" + min_val = float("-0.060306") + max_val = float("0.0575339") + mean = float("-0.000382961") + std = float("0.00524134") + data = None + + +class Program_weight_tensor_parameter_252: + name = "parameter_252" + shape = [192] + dtype = "float32" + min_val = float("-3.8777") + max_val = float("4.24427") + mean = float("-0.62919") + std = float("0.988716") + data = None + + +class Program_weight_tensor_parameter_253: + name = "parameter_253" + shape = [192] + dtype = "float32" + min_val = float("0.580104") + max_val = float("4.17524") + mean = float("1.54492") + std = float("0.398566") + data = None + + +class Program_weight_tensor_parameter_254: + name = "parameter_254" + shape = [192] + dtype = "float32" + min_val = float("0.00324018") + max_val = float("0.0287499") + mean = float("0.0091977") + std = float("0.00409842") + data = None + + +class Program_weight_tensor_parameter_255: + name = "parameter_255" + shape = [192] + dtype = "float32" + min_val = float("-0.163414") + max_val = float("0.133777") + mean = float("0.0531707") + std = float("0.0370733") + data = None + + +class Program_weight_tensor_parameter_256: + name = "parameter_256" + shape = [192, 384, 1, 1] + dtype = "float32" + min_val = float("-0.0969858") + max_val = float("0.0643058") + mean = float("-0.00132637") + std = float("0.00937933") + data = None + + +class Program_weight_tensor_parameter_257: + name = "parameter_257" + shape = [192] + dtype = "float32" + min_val = float("-2.93792") + max_val = float("1.02599") + mean = float("-0.426756") + std = float("0.68174") + data = None + + +class Program_weight_tensor_parameter_258: + name = "parameter_258" + shape = [192] + dtype = "float32" + min_val = float("0.700506") + max_val = float("3.6114") + mean = float("1.48179") + std = float("0.505296") + data = None + + +class Program_weight_tensor_parameter_259: + name = "parameter_259" + shape = [192] + dtype = "float32" + min_val = float("0.00157973") + max_val = float("0.00922621") + mean = float("0.00455175") + std = float("0.00146605") + data = None + + +class Program_weight_tensor_parameter_260: + name = "parameter_260" + shape = [192] + dtype = "float32" + min_val = float("-0.062057") + max_val = float("0.0798436") + mean = float("0.0163012") + std = float("0.0297843") + data = None + + +class Program_weight_tensor_parameter_261: + name = "parameter_261" + shape = [192, 384, 1, 1] + dtype = "float32" + min_val = float("-0.0711863") + max_val = float("0.0659081") + mean = float("-0.000541447") + std = float("0.00758362") + data = None + + +class Program_weight_tensor_parameter_262: + name = "parameter_262" + shape = [384] + dtype = "float32" + min_val = float("-2.84279") + max_val = float("1.12229") + mean = float("-0.753376") + std = float("0.497189") + data = None + + +class Program_weight_tensor_parameter_263: + name = "parameter_263" + shape = [384] + dtype = "float32" + min_val = float("0.419122") + max_val = float("1.80266") + mean = float("0.867781") + std = float("0.218188") + data = None + + +class Program_weight_tensor_parameter_264: + name = "parameter_264" + shape = [384] + dtype = "float32" + min_val = float("0.00581871") + max_val = float("0.137693") + mean = float("0.0206925") + std = float("0.0128053") + data = None + + +class Program_weight_tensor_parameter_265: + name = "parameter_265" + shape = [384] + dtype = "float32" + min_val = float("-0.489828") + max_val = float("0.409672") + mean = float("0.0184444") + std = float("0.103856") + data = None + + +class Program_weight_tensor_parameter_266: + name = "parameter_266" + shape = [384, 256, 3, 3] + dtype = "float32" + min_val = float("-0.0545942") + max_val = float("0.0530515") + mean = float("-0.000171083") + std = float("0.00433153") + data = None + + +class Program_weight_tensor_parameter_267: + name = "parameter_267" + shape = [256] + dtype = "float32" + min_val = float("-2.82072") + max_val = float("1.4645") + mean = float("-1.07775") + std = float("0.63367") + data = None + + +class Program_weight_tensor_parameter_268: + name = "parameter_268" + shape = [256] + dtype = "float32" + min_val = float("0.424072") + max_val = float("1.76956") + mean = float("0.978554") + std = float("0.170473") + data = None + + +class Program_weight_tensor_parameter_269: + name = "parameter_269" + shape = [256] + dtype = "float32" + min_val = float("0.00145624") + max_val = float("0.0198528") + mean = float("0.00622552") + std = float("0.00273816") + data = None + + +class Program_weight_tensor_parameter_270: + name = "parameter_270" + shape = [256] + dtype = "float32" + min_val = float("-0.233052") + max_val = float("0.227501") + mean = float("-0.0537504") + std = float("0.0737751") + data = None + + +class Program_weight_tensor_parameter_271: + name = "parameter_271" + shape = [256, 192, 1, 1] + dtype = "float32" + min_val = float("-0.129954") + max_val = float("0.174189") + mean = float("-0.00111637") + std = float("0.014005") + data = None + + +class Program_weight_tensor_parameter_272: + name = "parameter_272" + shape = [192] + dtype = "float32" + min_val = float("-0.0203441") + max_val = float("0.00142473") + mean = float("-0.00616218") + std = float("0.00515112") + data = None + + +class Program_weight_tensor_parameter_273: + name = "parameter_273" + shape = [192, 192, 1, 1] + dtype = "float32" + min_val = float("-0.235185") + max_val = float("0.180564") + mean = float("-0.00409182") + std = float("0.010316") + data = None + + +class Program_weight_tensor_parameter_274: + name = "parameter_274" + shape = [96] + dtype = "float32" + min_val = float("-2.27838") + max_val = float("0.754522") + mean = float("-0.115757") + std = float("0.508129") + data = None + + +class Program_weight_tensor_parameter_275: + name = "parameter_275" + shape = [96] + dtype = "float32" + min_val = float("-0.0583754") + max_val = float("2.30701") + mean = float("0.261422") + std = float("0.366858") + data = None + + +class Program_weight_tensor_parameter_276: + name = "parameter_276" + shape = [96] + dtype = "float32" + min_val = float("5.52856e-12") + max_val = float("0.00211136") + mean = float("0.000440278") + std = float("0.00038522") + data = None + + +class Program_weight_tensor_parameter_277: + name = "parameter_277" + shape = [96] + dtype = "float32" + min_val = float("-0.0522517") + max_val = float("0.0841538") + mean = float("0.00706718") + std = float("0.0204386") + data = None + + +class Program_weight_tensor_parameter_278: + name = "parameter_278" + shape = [96, 96, 1, 1] + dtype = "float32" + min_val = float("-0.0379797") + max_val = float("0.0638861") + mean = float("-0.000312308") + std = float("0.00575617") + data = None + + +class Program_weight_tensor_parameter_279: + name = "parameter_279" + shape = [96] + dtype = "float32" + min_val = float("-2.27838") + max_val = float("0.754522") + mean = float("-0.115757") + std = float("0.508129") + data = None + + +class Program_weight_tensor_parameter_280: + name = "parameter_280" + shape = [96] + dtype = "float32" + min_val = float("0.349893") + max_val = float("3.24248") + mean = float("1.29137") + std = float("0.633887") + data = None + + +class Program_weight_tensor_parameter_281: + name = "parameter_281" + shape = [96] + dtype = "float32" + min_val = float("0.00382734") + max_val = float("0.0305749") + mean = float("0.0139795") + std = float("0.00619915") + data = None + + +class Program_weight_tensor_parameter_282: + name = "parameter_282" + shape = [96] + dtype = "float32" + min_val = float("-0.179476") + max_val = float("0.231631") + mean = float("0.0342856") + std = float("0.0751833") + data = None + + +class Program_weight_tensor_parameter_283: + name = "parameter_283" + shape = [96, 96, 3, 3] + dtype = "float32" + min_val = float("-0.0540098") + max_val = float("0.0647126") + mean = float("-0.000333391") + std = float("0.00757747") + data = None + + +class Program_weight_tensor_parameter_284: + name = "parameter_284" + shape = [96] + dtype = "float32" + min_val = float("-2.80047") + max_val = float("1.50581") + mean = float("-1.09128") + std = float("0.696783") + data = None + + +class Program_weight_tensor_parameter_285: + name = "parameter_285" + shape = [96] + dtype = "float32" + min_val = float("0.32196") + max_val = float("1.80506") + mean = float("1.07312") + std = float("0.213196") + data = None + + +class Program_weight_tensor_parameter_286: + name = "parameter_286" + shape = [96] + dtype = "float32" + min_val = float("0.0443664") + max_val = float("0.229848") + mean = float("0.0920728") + std = float("0.0345847") + data = None + + +class Program_weight_tensor_parameter_287: + name = "parameter_287" + shape = [96] + dtype = "float32" + min_val = float("-1.52006") + max_val = float("0.514198") + mean = float("-0.12686") + std = float("0.297573") + data = None + + +class Program_weight_tensor_parameter_288: + name = "parameter_288" + shape = [96, 96, 3, 3] + dtype = "float32" + min_val = float("-0.0508838") + max_val = float("0.0716904") + mean = float("-0.000600059") + std = float("0.00819703") + data = None + + +class Program_weight_tensor_parameter_289: + name = "parameter_289" + shape = [96] + dtype = "float32" + min_val = float("-2.54073") + max_val = float("0.664611") + mean = float("-0.0486788") + std = float("0.474242") + data = None + + +class Program_weight_tensor_parameter_290: + name = "parameter_290" + shape = [96] + dtype = "float32" + min_val = float("-0.0782312") + max_val = float("3.15097") + mean = float("0.280453") + std = float("0.408781") + data = None + + +class Program_weight_tensor_parameter_291: + name = "parameter_291" + shape = [96] + dtype = "float32" + min_val = float("1.38686e-10") + max_val = float("0.0216024") + mean = float("0.00193235") + std = float("0.00284515") + data = None + + +class Program_weight_tensor_parameter_292: + name = "parameter_292" + shape = [96] + dtype = "float32" + min_val = float("-0.051793") + max_val = float("0.122769") + mean = float("0.0178591") + std = float("0.0297203") + data = None + + +class Program_weight_tensor_parameter_293: + name = "parameter_293" + shape = [96, 96, 1, 1] + dtype = "float32" + min_val = float("-0.111904") + max_val = float("0.0788536") + mean = float("-0.00121451") + std = float("0.00859042") + data = None + + +class Program_weight_tensor_parameter_294: + name = "parameter_294" + shape = [96] + dtype = "float32" + min_val = float("-2.54073") + max_val = float("0.664612") + mean = float("-0.0486788") + std = float("0.474242") + data = None + + +class Program_weight_tensor_parameter_295: + name = "parameter_295" + shape = [96] + dtype = "float32" + min_val = float("0.34207") + max_val = float("2.99219") + mean = float("0.929546") + std = float("0.412034") + data = None + + +class Program_weight_tensor_parameter_296: + name = "parameter_296" + shape = [96] + dtype = "float32" + min_val = float("0.00578641") + max_val = float("0.0746827") + mean = float("0.0256145") + std = float("0.0116189") + data = None + + +class Program_weight_tensor_parameter_297: + name = "parameter_297" + shape = [96] + dtype = "float32" + min_val = float("-0.159121") + max_val = float("0.1999") + mean = float("0.0399585") + std = float("0.0761745") + data = None + + +class Program_weight_tensor_parameter_298: + name = "parameter_298" + shape = [96, 96, 3, 3] + dtype = "float32" + min_val = float("-0.0537042") + max_val = float("0.0504456") + mean = float("-0.000550937") + std = float("0.00775915") + data = None + + +class Program_weight_tensor_parameter_299: + name = "parameter_299" + shape = [96] + dtype = "float32" + min_val = float("-2.02001") + max_val = float("1.65623") + mean = float("-0.919923") + std = float("0.650572") + data = None + + +class Program_weight_tensor_parameter_300: + name = "parameter_300" + shape = [96] + dtype = "float32" + min_val = float("0.442811") + max_val = float("1.97179") + mean = float("1.06409") + std = float("0.227631") + data = None + + +class Program_weight_tensor_parameter_301: + name = "parameter_301" + shape = [96] + dtype = "float32" + min_val = float("0.0126015") + max_val = float("0.116688") + mean = float("0.0335163") + std = float("0.0162914") + data = None + + +class Program_weight_tensor_parameter_302: + name = "parameter_302" + shape = [96] + dtype = "float32" + min_val = float("-2.08549") + max_val = float("0.285641") + mean = float("-0.0344373") + std = float("0.253697") + data = None + + +class Program_weight_tensor_parameter_303: + name = "parameter_303" + shape = [96, 96, 3, 3] + dtype = "float32" + min_val = float("-0.105598") + max_val = float("0.126106") + mean = float("-0.000400525") + std = float("0.00876452") + data = None + + +class Program_weight_tensor_parameter_304: + name = "parameter_304" + shape = [96] + dtype = "float32" + min_val = float("-1.61964") + max_val = float("1.88676") + mean = float("0.00618829") + std = float("0.838881") + data = None + + +class Program_weight_tensor_parameter_305: + name = "parameter_305" + shape = [96] + dtype = "float32" + min_val = float("0.352578") + max_val = float("1.32187") + mean = float("0.700929") + std = float("0.236185") + data = None + + +class Program_weight_tensor_parameter_306: + name = "parameter_306" + shape = [96] + dtype = "float32" + min_val = float("0.00963589") + max_val = float("0.0791035") + mean = float("0.0353916") + std = float("0.0149812") + data = None + + +class Program_weight_tensor_parameter_307: + name = "parameter_307" + shape = [96] + dtype = "float32" + min_val = float("-0.28232") + max_val = float("0.404327") + mean = float("-0.0642217") + std = float("0.107473") + data = None + + +class Program_weight_tensor_parameter_308: + name = "parameter_308" + shape = [96, 192, 1, 1] + dtype = "float32" + min_val = float("-0.125711") + max_val = float("0.113026") + mean = float("-0.00129075") + std = float("0.0142062") + data = None + + +class Program_weight_tensor_parameter_309: + name = "parameter_309" + shape = [96] + dtype = "float32" + min_val = float("-2.46694") + max_val = float("1.71626") + mean = float("0.341079") + std = float("0.679162") + data = None + + +class Program_weight_tensor_parameter_310: + name = "parameter_310" + shape = [96] + dtype = "float32" + min_val = float("0.539295") + max_val = float("4.8854") + mean = float("1.48247") + std = float("0.959871") + data = None + + +class Program_weight_tensor_parameter_311: + name = "parameter_311" + shape = [96] + dtype = "float32" + min_val = float("0.00783824") + max_val = float("0.0876481") + mean = float("0.0244986") + std = float("0.0120614") + data = None + + +class Program_weight_tensor_parameter_312: + name = "parameter_312" + shape = [96] + dtype = "float32" + min_val = float("-0.287482") + max_val = float("0.306711") + mean = float("-0.00499391") + std = float("0.109698") + data = None + + +class Program_weight_tensor_parameter_313: + name = "parameter_313" + shape = [96, 192, 1, 1] + dtype = "float32" + min_val = float("-0.0888948") + max_val = float("0.165433") + mean = float("-0.000715943") + std = float("0.0141494") + data = None + + +class Program_weight_tensor_parameter_314: + name = "parameter_314" + shape = [192] + dtype = "float32" + min_val = float("-4.44162") + max_val = float("2.00951") + mean = float("-0.0984248") + std = float("0.883313") + data = None + + +class Program_weight_tensor_parameter_315: + name = "parameter_315" + shape = [192] + dtype = "float32" + min_val = float("0.575367") + max_val = float("4.51851") + mean = float("1.08197") + std = float("0.426247") + data = None + + +class Program_weight_tensor_parameter_316: + name = "parameter_316" + shape = [192] + dtype = "float32" + min_val = float("0.00785823") + max_val = float("0.189363") + mean = float("0.0403725") + std = float("0.0275254") + data = None + + +class Program_weight_tensor_parameter_317: + name = "parameter_317" + shape = [192] + dtype = "float32" + min_val = float("-0.270304") + max_val = float("0.337865") + mean = float("0.0348807") + std = float("0.105792") + data = None + + +class Program_weight_tensor_parameter_318: + name = "parameter_318" + shape = [192, 128, 3, 3] + dtype = "float32" + min_val = float("-0.087766") + max_val = float("0.0743655") + mean = float("-0.000243175") + std = float("0.00724859") + data = None + + +class Program_weight_tensor_parameter_319: + name = "parameter_319" + shape = [128] + dtype = "float32" + min_val = float("-2.15213") + max_val = float("1.36751") + mean = float("-0.673313") + std = float("0.682255") + data = None + + +class Program_weight_tensor_parameter_320: + name = "parameter_320" + shape = [128] + dtype = "float32" + min_val = float("0.368949") + max_val = float("2.25056") + mean = float("0.876029") + std = float("0.235567") + data = None + + +class Program_weight_tensor_parameter_321: + name = "parameter_321" + shape = [128] + dtype = "float32" + min_val = float("0.00227055") + max_val = float("0.0578315") + mean = float("0.00881457") + std = float("0.00556285") + data = None + + +class Program_weight_tensor_parameter_322: + name = "parameter_322" + shape = [128] + dtype = "float32" + min_val = float("-0.299004") + max_val = float("0.266637") + mean = float("-0.0648951") + std = float("0.119821") + data = None + + +class Program_weight_tensor_parameter_323: + name = "parameter_323" + shape = [128, 96, 1, 1] + dtype = "float32" + min_val = float("-0.205453") + max_val = float("0.19767") + mean = float("-0.00132955") + std = float("0.0233044") + data = None + + +class Program_weight_tensor_parameter_324: + name = "parameter_324" + shape = [96] + dtype = "float32" + min_val = float("-0.0287844") + max_val = float("0.00351177") + mean = float("-0.00835282") + std = float("0.00772968") + data = None + + +class Program_weight_tensor_parameter_325: + name = "parameter_325" + shape = [96, 96, 1, 1] + dtype = "float32" + min_val = float("-0.293759") + max_val = float("0.281304") + mean = float("-0.00581101") + std = float("0.0185083") + data = None + + +class Program_weight_tensor_parameter_326: + name = "parameter_326" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_327: + name = "parameter_327" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_328: + name = "parameter_328" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_329: + name = "parameter_329" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_330: + name = "parameter_330" + shape = [48, 48, 1, 1] + dtype = "float32" + min_val = float("-0.083645") + max_val = float("0.0802369") + mean = float("-0.00148088") + std = float("0.0125285") + data = None + + +class Program_weight_tensor_parameter_331: + name = "parameter_331" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_332: + name = "parameter_332" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_333: + name = "parameter_333" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_334: + name = "parameter_334" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_335: + name = "parameter_335" + shape = [48, 48, 3, 3] + dtype = "float32" + min_val = float("-0.101441") + max_val = float("0.102416") + mean = float("-0.000544695") + std = float("0.0126239") + data = None + + +class Program_weight_tensor_parameter_336: + name = "parameter_336" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_337: + name = "parameter_337" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_338: + name = "parameter_338" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_339: + name = "parameter_339" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_340: + name = "parameter_340" + shape = [48, 48, 3, 3] + dtype = "float32" + min_val = float("-0.0993692") + max_val = float("0.108811") + mean = float("-0.00104822") + std = float("0.0135148") + data = None + + +class Program_weight_tensor_parameter_341: + name = "parameter_341" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_342: + name = "parameter_342" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_343: + name = "parameter_343" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_344: + name = "parameter_344" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_345: + name = "parameter_345" + shape = [48, 48, 1, 1] + dtype = "float32" + min_val = float("-0.0692134") + max_val = float("0.0765493") + mean = float("-0.00277088") + std = float("0.0156891") + data = None + + +class Program_weight_tensor_parameter_346: + name = "parameter_346" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_347: + name = "parameter_347" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_348: + name = "parameter_348" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_349: + name = "parameter_349" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_350: + name = "parameter_350" + shape = [48, 48, 3, 3] + dtype = "float32" + min_val = float("-0.0966216") + max_val = float("0.0984974") + mean = float("-0.00101028") + std = float("0.0123098") + data = None + + +class Program_weight_tensor_parameter_351: + name = "parameter_351" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_352: + name = "parameter_352" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_353: + name = "parameter_353" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_354: + name = "parameter_354" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_355: + name = "parameter_355" + shape = [48, 48, 3, 3] + dtype = "float32" + min_val = float("-0.0948994") + max_val = float("0.0895512") + mean = float("-0.000794946") + std = float("0.0143308") + data = None + + +class Program_weight_tensor_parameter_356: + name = "parameter_356" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_357: + name = "parameter_357" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_358: + name = "parameter_358" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_359: + name = "parameter_359" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_360: + name = "parameter_360" + shape = [48, 96, 1, 1] + dtype = "float32" + min_val = float("-0.0970452") + max_val = float("0.119633") + mean = float("-0.00232752") + std = float("0.0233937") + data = None + + +class Program_weight_tensor_parameter_361: + name = "parameter_361" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_362: + name = "parameter_362" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_363: + name = "parameter_363" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_364: + name = "parameter_364" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_365: + name = "parameter_365" + shape = [48, 96, 1, 1] + dtype = "float32" + min_val = float("-0.161532") + max_val = float("0.246027") + mean = float("0.000306195") + std = float("0.0243305") + data = None + + +class Program_weight_tensor_parameter_366: + name = "parameter_366" + shape = [96] + dtype = "float32" + min_val = float("-3.31592") + max_val = float("3.83597") + mean = float("0.267111") + std = float("1.21094") + data = None + + +class Program_weight_tensor_parameter_367: + name = "parameter_367" + shape = [96] + dtype = "float32" + min_val = float("0.511478") + max_val = float("5.40365") + mean = float("1.12531") + std = float("0.546749") + data = None + + +class Program_weight_tensor_parameter_368: + name = "parameter_368" + shape = [96] + dtype = "float32" + min_val = float("0.0162512") + max_val = float("0.320396") + mean = float("0.0760809") + std = float("0.0518261") + data = None + + +class Program_weight_tensor_parameter_369: + name = "parameter_369" + shape = [96] + dtype = "float32" + min_val = float("-0.502761") + max_val = float("0.492542") + mean = float("-0.0358312") + std = float("0.181422") + data = None + + +class Program_weight_tensor_parameter_370: + name = "parameter_370" + shape = [96, 64, 3, 3] + dtype = "float32" + min_val = float("-0.0955215") + max_val = float("0.123103") + mean = float("-0.000235878") + std = float("0.0120646") + data = None + + +class Program_weight_tensor_parameter_371: + name = "parameter_371" + shape = [64] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_372: + name = "parameter_372" + shape = [64] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_373: + name = "parameter_373" + shape = [64] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_374: + name = "parameter_374" + shape = [64] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_375: + name = "parameter_375" + shape = [64, 48, 1, 1] + dtype = "float32" + min_val = float("-0.17433") + max_val = float("0.176451") + mean = float("-0.00222374") + std = float("0.0347099") + data = None + + +class Program_weight_tensor_parameter_376: + name = "parameter_376" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_377: + name = "parameter_377" + shape = [48, 48, 1, 1] + dtype = "float32" + min_val = float("-0.166649") + max_val = float("0.156533") + mean = float("-0.0129927") + std = float("0.0258165") + data = None + + +class Program_weight_tensor_parameter_378: + name = "parameter_378" + shape = [24] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_379: + name = "parameter_379" + shape = [24] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_380: + name = "parameter_380" + shape = [24] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_381: + name = "parameter_381" + shape = [24] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_382: + name = "parameter_382" + shape = [24, 24, 1, 1] + dtype = "float32" + min_val = float("-0.0915942") + max_val = float("0.147481") + mean = float("-0.00102278") + std = float("0.0251579") + data = None + + +class Program_weight_tensor_parameter_383: + name = "parameter_383" + shape = [24] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_384: + name = "parameter_384" + shape = [24] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_385: + name = "parameter_385" + shape = [24] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_386: + name = "parameter_386" + shape = [24] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_387: + name = "parameter_387" + shape = [24, 24, 3, 3] + dtype = "float32" + min_val = float("-0.110347") + max_val = float("0.0904305") + mean = float("-0.000736999") + std = float("0.0215299") + data = None + + +class Program_weight_tensor_parameter_388: + name = "parameter_388" + shape = [24] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_389: + name = "parameter_389" + shape = [24] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_390: + name = "parameter_390" + shape = [24] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_391: + name = "parameter_391" + shape = [24] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_392: + name = "parameter_392" + shape = [24, 24, 3, 3] + dtype = "float32" + min_val = float("-0.125611") + max_val = float("0.174817") + mean = float("-0.00010855") + std = float("0.023961") + data = None + + +class Program_weight_tensor_parameter_393: + name = "parameter_393" + shape = [24] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_394: + name = "parameter_394" + shape = [24] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_395: + name = "parameter_395" + shape = [24] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_396: + name = "parameter_396" + shape = [24] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_397: + name = "parameter_397" + shape = [24, 48, 1, 1] + dtype = "float32" + min_val = float("-0.205563") + max_val = float("0.188891") + mean = float("-0.00450245") + std = float("0.0361434") + data = None + + +class Program_weight_tensor_parameter_398: + name = "parameter_398" + shape = [24] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_399: + name = "parameter_399" + shape = [24] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_400: + name = "parameter_400" + shape = [24] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_401: + name = "parameter_401" + shape = [24] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_402: + name = "parameter_402" + shape = [24, 48, 1, 1] + dtype = "float32" + min_val = float("-0.192775") + max_val = float("0.166962") + mean = float("-0.00176564") + std = float("0.0388666") + data = None + + +class Program_weight_tensor_parameter_403: + name = "parameter_403" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_404: + name = "parameter_404" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_405: + name = "parameter_405" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_406: + name = "parameter_406" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_407: + name = "parameter_407" + shape = [48, 32, 3, 3] + dtype = "float32" + min_val = float("-0.162459") + max_val = float("0.113525") + mean = float("-0.000259478") + std = float("0.0203147") + data = None + + +class Program_weight_tensor_parameter_408: + name = "parameter_408" + shape = [32] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_409: + name = "parameter_409" + shape = [32] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_410: + name = "parameter_410" + shape = [32] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_411: + name = "parameter_411" + shape = [32] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_412: + name = "parameter_412" + shape = [32, 16, 3, 3] + dtype = "float32" + min_val = float("-0.21861") + max_val = float("0.233137") + mean = float("-0.000345629") + std = float("0.0349451") + data = None + + +class Program_weight_tensor_parameter_413: + name = "parameter_413" + shape = [16] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_414: + name = "parameter_414" + shape = [16] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_415: + name = "parameter_415" + shape = [16] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_416: + name = "parameter_416" + shape = [16] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_417: + name = "parameter_417" + shape = [16, 16, 3, 3] + dtype = "float32" + min_val = float("-0.301154") + max_val = float("0.330949") + mean = float("-0.00144702") + std = float("0.0464861") + data = None + + +class Program_weight_tensor_parameter_418: + name = "parameter_418" + shape = [16] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_419: + name = "parameter_419" + shape = [16] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_420: + name = "parameter_420" + shape = [16] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_421: + name = "parameter_421" + shape = [16] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_422: + name = "parameter_422" + shape = [16, 3, 3, 3] + dtype = "float32" + min_val = float("-0.247963") + max_val = float("0.271619") + mean = float("-0.00315738") + std = float("0.068481") + data = None diff --git a/paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_3/graph_hash.txt b/paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_3/graph_hash.txt new file mode 100644 index 000000000..463698320 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_3/graph_hash.txt @@ -0,0 +1 @@ +611aaf40802ae728109fb4c99495540bcf765813a1a25b37c13dde4b4b8683ee \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_3/graph_net.json b/paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_3/graph_net.json new file mode 100644 index 000000000..d8719c2c9 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_3/graph_net.json @@ -0,0 +1,6 @@ +{ + "framework": "paddle", + "model_name": "PP-YOLOE-S_vehicle", + "num_devices_required": 1, + "num_nodes_required": 1 +} \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_3/input_meta.py b/paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_3/input_meta.py new file mode 100644 index 000000000..5c608d9b7 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_3/input_meta.py @@ -0,0 +1,38 @@ +class Program_weight_tensor_data_0: + name = "data_0" + shape = [2, 8400, 4] + dtype = "float32" + min_val = float("0.0430395") + max_val = float("12.2027") + mean = float("5.0132") + std = float("2.64575") + data = None + + +class Program_weight_tensor_data_1: + name = "data_1" + shape = [8400, 2] + dtype = "float32" + min_val = float("0.5") + max_val = float("79.5") + mean = float("34.7619") + std = float("22.9098") + data = None + + +class Program_weight_tensor_data_2: + name = "data_2" + shape = [8400, 1] + dtype = "float32" + min_val = float("8.0") + max_val = float("32.0") + mean = float("10.6667") + std = float("5.70157") + data = None + + +class Program_weight_tensor_data_3: + name = "data_3" + shape = [2, 2] + dtype = "float32" + data = [1.18519, 0.666667, 1.18519, 0.666667] diff --git a/paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_3/model.py b/paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_3/model.py new file mode 100644 index 000000000..561c0c35b --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_3/model.py @@ -0,0 +1,94 @@ +import paddle + + +class GraphModule(paddle.nn.Layer): + def __init__(self): + super().__init__() + + def forward(self, data_0, data_1, data_2, data_3): + # pd_op.full: (1xi32) <- () + full_0 = paddle._C_ops.full( + [1], float("2"), paddle.int32, paddle.core.CPUPlace() + ) + + # pd_op.split_with_num: ([2x8400x2xf32, 2x8400x2xf32]) <- (2x8400x4xf32, 1xi32) + split_with_num_0 = paddle._C_ops.split_with_num(data_0, 2, full_0) + del data_0, full_0 + + # builtin.split: (2x8400x2xf32, 2x8400x2xf32) <- ([2x8400x2xf32, 2x8400x2xf32]) + ( + split_0, + split_1, + ) = split_with_num_0 + del split_with_num_0 + + # pd_op.full: (1xf32) <- () + full_1 = paddle._C_ops.full( + [1], float("-1"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.scale: (2x8400x2xf32) <- (2x8400x2xf32, 1xf32) + scale_0 = paddle._C_ops.scale(split_0, full_1, float("0"), True) + del full_1, split_0 + + # pd_op.add: (2x8400x2xf32) <- (2x8400x2xf32, 8400x2xf32) + add_0 = paddle._C_ops.add(scale_0, data_1) + del scale_0 + + # pd_op.add: (2x8400x2xf32) <- (2x8400x2xf32, 8400x2xf32) + add_1 = paddle._C_ops.add(split_1, data_1) + del data_1, split_1 + + # pd_op.full: (1xi32) <- () + full_2 = paddle._C_ops.full( + [1], float("-1"), paddle.int32, paddle.core.CPUPlace() + ) + + # builtin.combine: ([2x8400x2xf32, 2x8400x2xf32]) <- (2x8400x2xf32, 2x8400x2xf32) + combine_0 = [add_0, add_1] + del add_0, add_1 + + # pd_op.concat: (2x8400x4xf32) <- ([2x8400x2xf32, 2x8400x2xf32], 1xi32) + concat_0 = paddle._C_ops.concat(combine_0, full_2) + del combine_0 + + # pd_op.multiply: (2x8400x4xf32) <- (2x8400x4xf32, 8400x1xf32) + multiply_0 = paddle._C_ops.multiply(concat_0, data_2) + del concat_0, data_2 + + # pd_op.full: (1xi32) <- () + full_3 = paddle._C_ops.full( + [1], float("1"), paddle.int32, paddle.core.CPUPlace() + ) + + # pd_op.split_with_num: ([2x1xf32, 2x1xf32]) <- (2x2xf32, 1xi32) + split_with_num_1 = paddle._C_ops.split_with_num(data_3, 2, full_3) + del data_3, full_3 + + # builtin.split: (2x1xf32, 2x1xf32) <- ([2x1xf32, 2x1xf32]) + ( + split_2, + split_3, + ) = split_with_num_1 + del split_with_num_1 + + # builtin.combine: ([2x1xf32, 2x1xf32, 2x1xf32, 2x1xf32]) <- (2x1xf32, 2x1xf32, 2x1xf32, 2x1xf32) + combine_1 = [split_3, split_2, split_3, split_2] + del split_2, split_3 + + # pd_op.concat: (2x4xf32) <- ([2x1xf32, 2x1xf32, 2x1xf32, 2x1xf32], 1xi32) + concat_1 = paddle._C_ops.concat(combine_1, full_2) + del combine_1, full_2 + + # pd_op.full_int_array: (3xi64) <- () + full_int_array_0 = [-1, 1, 4] + + # pd_op.reshape: (2x1x4xf32) <- (2x4xf32, 3xi64) + reshape_0 = paddle._C_ops.reshape(concat_1, full_int_array_0) + del concat_1, full_int_array_0 + + # pd_op.divide: (2x8400x4xf32) <- (2x8400x4xf32, 2x1x4xf32) + divide_0 = paddle._C_ops.divide(multiply_0, reshape_0) + del multiply_0, reshape_0 + + return divide_0 diff --git a/paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_3/weight_meta.py b/paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_3/weight_meta.py new file mode 100644 index 000000000..8b1378917 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_3/weight_meta.py @@ -0,0 +1 @@ + diff --git a/paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_4/graph_hash.txt b/paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_4/graph_hash.txt new file mode 100644 index 000000000..9b79cc15a --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_4/graph_hash.txt @@ -0,0 +1 @@ +b7415b84d8b0e7f854eeb429f2ca3c71394b5140d8727a259198ad3e98435ab5 \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_4/graph_net.json b/paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_4/graph_net.json new file mode 100644 index 000000000..d8719c2c9 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_4/graph_net.json @@ -0,0 +1,6 @@ +{ + "framework": "paddle", + "model_name": "PP-YOLOE-S_vehicle", + "num_devices_required": 1, + "num_nodes_required": 1 +} \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_4/input_meta.py b/paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_4/input_meta.py new file mode 100644 index 000000000..aa6620489 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_4/input_meta.py @@ -0,0 +1,59 @@ +class Program_weight_tensor_data_0: + name = "data_0" + shape = [2, 1, 7581] + dtype = "float32" + max_val = float("1.0") + mean = float("0.00171481") + std = float("0.0413748") + data = None + + +class Program_weight_tensor_data_1: + name = "data_1" + shape = [2, 1] + dtype = "int32" + data = [0, 1] + + +class Program_weight_tensor_data_2: + name = "data_2" + shape = [2, 1, 1] + dtype = "int32" + data = [0, 3] + + +class Program_weight_tensor_data_3: + name = "data_3" + shape = [2, 7581] + dtype = "float32" + max_val = float("1.0") + mean = float("0.00171481") + std = float("0.0413748") + data = None + + +class Program_weight_tensor_data_4: + name = "data_4" + shape = [2, 1, 4] + dtype = "float32" + data = [376.443, 61.9806, 517.447, 398.447, 562.465, 468.683, 608.0, 608.0] + + +class Program_weight_tensor_data_5: + name = "data_5" + shape = [2, 1, 7581] + dtype = "float32" + max_val = float("0.00652957") + mean = float("2.09634e-05") + std = float("0.000282651") + data = None + + +class Program_weight_tensor_data_6: + name = "data_6" + shape = [2, 1, 7581] + dtype = "float32" + max_val = float("0.931424") + mean = float("0.0255599") + std = float("0.091854") + data = None diff --git a/paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_4/model.py b/paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_4/model.py new file mode 100644 index 000000000..80304c15d --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_4/model.py @@ -0,0 +1,176 @@ +import paddle + + +class GraphModule(paddle.nn.Layer): + def __init__(self): + super().__init__() + + def forward(self, data_0, data_1, data_2, data_3, data_4, data_5, data_6): + # pd_op.full: (1xi64) <- () + full_0 = paddle._C_ops.full( + [1], float("-2"), paddle.int64, paddle.core.CPUPlace() + ) + + # pd_op.argmax: (2x7581xi64) <- (2x1x7581xf32, 1xi64) + argmax_0 = paddle._C_ops.argmax(data_0, full_0, False, False, paddle.int64) + del full_0 + + # pd_op.full: (1xf32) <- () + full_1 = paddle._C_ops.full( + [1], float("1"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.scale: (2x1xi32) <- (2x1xi32, 1xf32) + scale_0 = paddle._C_ops.scale(data_1, full_1, float("0"), True) + del data_1 + + # pd_op.cast: (2x1xi64) <- (2x1xi32) + cast_0 = paddle._C_ops.cast(scale_0, paddle.int64) + del scale_0 + + # pd_op.add: (2x7581xi64) <- (2x7581xi64, 2x1xi64) + add_0 = paddle._C_ops.add(argmax_0, cast_0) + del argmax_0, cast_0 + + # pd_op.flatten: (2xi32) <- (2x1x1xi32) + flatten_0 = paddle._C_ops.flatten(data_2, 0, 2) + del data_2 + + # pd_op.flatten: (15162xi64) <- (2x7581xi64) + flatten_1 = paddle._C_ops.flatten(add_0, 0, 1) + del add_0 + + # pd_op.full: (1xi32) <- () + full_2 = paddle._C_ops.full( + [1], float("0"), paddle.int32, paddle.core.CPUPlace() + ) + + # pd_op.gather: (15162xi32) <- (2xi32, 15162xi64, 1xi32) + gather_0 = paddle._C_ops.gather(flatten_0, flatten_1, full_2) + del flatten_0 + + # pd_op.full_int_array: (2xi64) <- () + full_int_array_0 = [2, 7581] + + # pd_op.reshape: (2x7581xi32) <- (15162xi32, 2xi64) + reshape_1 = paddle._C_ops.reshape(gather_0, full_int_array_0) + del full_int_array_0, gather_0 + + # pd_op.full: (xf32) <- () + full_3 = paddle._C_ops.full( + [], float("0"), paddle.float32, paddle.framework._current_expected_place() + ) + + # pd_op.greater_than: (2x7581xb) <- (2x7581xf32, xf32) + greater_than_0 = paddle._C_ops.greater_than(data_3, full_3) + del data_3, full_3 + + # pd_op.full: (1xf32) <- () + full_4 = paddle._C_ops.full( + [1], float("4"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.full_like: (2x7581xi32) <- (2x7581xi32, 1xf32) + full_like_0 = paddle._C_ops.full_like( + reshape_1, full_4, paddle.int32, paddle.framework._current_expected_place() + ) + del full_4 + + # pd_op.where: (2x7581xi32) <- (2x7581xb, 2x7581xi32, 2x7581xi32) + where_0 = paddle._C_ops.where(greater_than_0, reshape_1, full_like_0) + del full_like_0, greater_than_0, reshape_1 + + # pd_op.full_int_array: (2xi64) <- () + full_int_array_1 = [-1, 4] + + # pd_op.reshape: (2x4xf32) <- (2x1x4xf32, 2xi64) + reshape_2 = paddle._C_ops.reshape(data_4, full_int_array_1) + del data_4, full_int_array_1 + + # pd_op.gather: (15162x4xf32) <- (2x4xf32, 15162xi64, 1xi32) + gather_1 = paddle._C_ops.gather(reshape_2, flatten_1, full_2) + del flatten_1, full_2, reshape_2 + + # pd_op.full_int_array: (3xi64) <- () + full_int_array_2 = [2, 7581, 4] + + # pd_op.reshape: (2x7581x4xf32) <- (15162x4xf32, 3xi64) + reshape_0 = paddle._C_ops.reshape(gather_1, full_int_array_2) + del full_int_array_2, gather_1 + + # pd_op.full: (1xi32) <- () + full_5 = paddle._C_ops.full( + [1], float("5"), paddle.int32, paddle.core.CPUPlace() + ) + + # pd_op.one_hot: (2x7581x5xf32) <- (2x7581xi32, 1xi32) + one_hot_0 = paddle._C_ops.one_hot( + where_0 % paddle.cast(full_5, where_0.dtype), full_5 + ) + del full_5 + + # pd_op.full: (4xi64) <- () + full_6 = paddle._C_ops.full( + [4], float("0"), paddle.int64, paddle.framework._current_expected_place() + ) + + # pd_op.assign_value_: (4xi64) <- (4xi64) + assign_value__0 = paddle._C_ops.assign_value_( + full_6, + [4], + paddle.int64, + [float("0"), float("1"), float("2"), float("3")], + paddle.framework._current_expected_place(), + ) + del full_6 + + # pd_op.index_select: (2x7581x4xf32) <- (2x7581x5xf32, 4xi64) + index_select_0 = paddle._C_ops.index_select(one_hot_0, assign_value__0, -1) + del assign_value__0, one_hot_0 + + # pd_op.multiply: (2x1x7581xf32) <- (2x1x7581xf32, 2x1x7581xf32) + multiply_1 = paddle._C_ops.multiply(data_5, data_0) + del data_5 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_3 = [-1] + + # pd_op.max: (2x1x1xf32) <- (2x1x7581xf32, 1xi64) + max_0 = paddle._C_ops.max(multiply_1, full_int_array_3, True) + + # pd_op.multiply: (2x1x7581xf32) <- (2x1x7581xf32, 2x1x7581xf32) + multiply_2 = paddle._C_ops.multiply(data_6, data_0) + del data_0, data_6 + + # pd_op.max: (2x1x1xf32) <- (2x1x7581xf32, 1xi64) + max_1 = paddle._C_ops.max(multiply_2, full_int_array_3, True) + del multiply_2 + + # pd_op.scale: (2x1x1xf32) <- (2x1x1xf32, 1xf32) + scale_1 = paddle._C_ops.scale(max_0, full_1, float("1e-09"), True) + del full_1, max_0 + + # pd_op.divide: (2x1x7581xf32) <- (2x1x7581xf32, 2x1x1xf32) + divide_0 = paddle._C_ops.divide(multiply_1, scale_1) + del multiply_1, scale_1 + + # pd_op.multiply: (2x1x7581xf32) <- (2x1x7581xf32, 2x1x1xf32) + multiply_3 = paddle._C_ops.multiply(divide_0, max_1) + del divide_0, max_1 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_4 = [-2] + + # pd_op.max: (2x7581xf32) <- (2x1x7581xf32, 1xi64) + max_2 = paddle._C_ops.max(multiply_3, full_int_array_4, False) + del full_int_array_4, multiply_3 + + # pd_op.unsqueeze: (2x7581x1xf32) <- (2x7581xf32, 1xi64) + unsqueeze_0 = paddle._C_ops.unsqueeze(max_2, full_int_array_3) + del full_int_array_3, max_2 + + # pd_op.multiply: (2x7581x4xf32) <- (2x7581x4xf32, 2x7581x1xf32) + multiply_0 = paddle._C_ops.multiply(index_select_0, unsqueeze_0) + del index_select_0, unsqueeze_0, where_0 + + return reshape_0, multiply_0 diff --git a/paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_4/weight_meta.py b/paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_4/weight_meta.py new file mode 100644 index 000000000..8b1378917 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_4/weight_meta.py @@ -0,0 +1 @@ + diff --git a/paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_5/graph_hash.txt b/paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_5/graph_hash.txt new file mode 100644 index 000000000..4e276d96d --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_5/graph_hash.txt @@ -0,0 +1 @@ +dd9b6f7fec930c53532803207e780b6bed2e5fa78a9c0c93e36150ac8b0d9463 \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_5/graph_net.json b/paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_5/graph_net.json new file mode 100644 index 000000000..d8719c2c9 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_5/graph_net.json @@ -0,0 +1,6 @@ +{ + "framework": "paddle", + "model_name": "PP-YOLOE-S_vehicle", + "num_devices_required": 1, + "num_nodes_required": 1 +} \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_5/input_meta.py b/paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_5/input_meta.py new file mode 100644 index 000000000..903e9a326 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_5/input_meta.py @@ -0,0 +1,9 @@ +class Program_weight_tensor_data_0: + name = "data_0" + shape = [2, 3, 608, 608] + dtype = "float32" + min_val = float("-1.85379") + max_val = float("2.55285") + mean = float("0.163596") + std = float("0.495929") + data = None diff --git a/paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_5/model.py b/paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_5/model.py new file mode 100644 index 000000000..70cc252dc --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_5/model.py @@ -0,0 +1,4048 @@ +import paddle + + +class GraphModule(paddle.nn.Layer): + def __init__(self): + super().__init__() + + def forward( + self, + parameter_0, + parameter_1, + parameter_2, + parameter_3, + parameter_4, + parameter_5, + parameter_6, + parameter_7, + parameter_8, + parameter_9, + parameter_10, + parameter_11, + parameter_12, + parameter_13, + parameter_14, + parameter_15, + parameter_16, + parameter_17, + parameter_18, + parameter_19, + parameter_20, + parameter_21, + parameter_22, + parameter_23, + parameter_24, + parameter_25, + parameter_26, + parameter_27, + parameter_28, + parameter_29, + parameter_30, + parameter_31, + parameter_32, + parameter_33, + parameter_34, + parameter_35, + parameter_36, + parameter_37, + parameter_38, + parameter_39, + parameter_40, + parameter_41, + parameter_42, + parameter_43, + parameter_44, + parameter_45, + parameter_46, + parameter_47, + parameter_48, + parameter_49, + parameter_50, + parameter_51, + parameter_52, + parameter_53, + parameter_54, + parameter_55, + parameter_56, + parameter_57, + parameter_58, + parameter_59, + parameter_60, + parameter_61, + parameter_62, + parameter_63, + parameter_64, + parameter_65, + parameter_66, + parameter_67, + parameter_68, + parameter_69, + parameter_70, + parameter_71, + parameter_72, + parameter_73, + parameter_74, + parameter_75, + parameter_76, + parameter_77, + parameter_78, + parameter_79, + parameter_80, + parameter_81, + parameter_82, + parameter_83, + parameter_84, + parameter_85, + parameter_86, + parameter_87, + parameter_88, + parameter_89, + parameter_90, + parameter_91, + parameter_92, + parameter_93, + parameter_94, + parameter_95, + parameter_96, + parameter_97, + parameter_98, + parameter_99, + parameter_100, + parameter_101, + parameter_102, + parameter_103, + parameter_104, + parameter_105, + parameter_106, + parameter_107, + parameter_108, + parameter_109, + parameter_110, + parameter_111, + parameter_112, + parameter_113, + parameter_114, + parameter_115, + parameter_116, + parameter_117, + parameter_118, + parameter_119, + parameter_120, + parameter_121, + parameter_122, + parameter_123, + parameter_124, + parameter_125, + parameter_126, + parameter_127, + parameter_128, + parameter_129, + parameter_130, + parameter_131, + parameter_132, + parameter_133, + parameter_134, + parameter_135, + parameter_136, + parameter_137, + parameter_138, + parameter_139, + parameter_140, + parameter_141, + parameter_142, + parameter_143, + parameter_144, + parameter_145, + parameter_146, + parameter_147, + parameter_148, + parameter_149, + parameter_150, + parameter_151, + parameter_152, + parameter_153, + parameter_154, + parameter_155, + parameter_156, + parameter_157, + parameter_158, + parameter_159, + parameter_160, + parameter_161, + parameter_162, + parameter_163, + parameter_164, + parameter_165, + parameter_166, + parameter_167, + parameter_168, + parameter_169, + parameter_170, + parameter_171, + parameter_172, + parameter_173, + parameter_174, + parameter_175, + parameter_176, + parameter_177, + parameter_178, + parameter_179, + parameter_180, + parameter_181, + parameter_182, + parameter_183, + parameter_184, + parameter_185, + parameter_186, + parameter_187, + parameter_188, + parameter_189, + parameter_190, + parameter_191, + parameter_192, + parameter_193, + parameter_194, + parameter_195, + parameter_196, + parameter_197, + parameter_198, + parameter_199, + parameter_200, + parameter_201, + parameter_202, + parameter_203, + parameter_204, + parameter_205, + parameter_206, + parameter_207, + parameter_208, + parameter_209, + parameter_210, + parameter_211, + parameter_212, + parameter_213, + parameter_214, + parameter_215, + parameter_216, + parameter_217, + parameter_218, + parameter_219, + parameter_220, + parameter_221, + parameter_222, + parameter_223, + parameter_224, + parameter_225, + parameter_226, + parameter_227, + parameter_228, + parameter_229, + parameter_230, + parameter_231, + parameter_232, + parameter_233, + parameter_234, + parameter_235, + parameter_236, + parameter_237, + parameter_238, + parameter_239, + parameter_240, + parameter_241, + parameter_242, + parameter_243, + parameter_244, + parameter_245, + parameter_246, + parameter_247, + parameter_248, + parameter_249, + parameter_250, + parameter_251, + parameter_252, + parameter_253, + parameter_254, + parameter_255, + parameter_256, + parameter_257, + parameter_258, + parameter_259, + parameter_260, + parameter_261, + parameter_262, + parameter_263, + parameter_264, + parameter_265, + parameter_266, + parameter_267, + parameter_268, + parameter_269, + parameter_270, + parameter_271, + parameter_272, + parameter_273, + parameter_274, + parameter_275, + parameter_276, + parameter_277, + parameter_278, + parameter_279, + parameter_280, + parameter_281, + parameter_282, + parameter_283, + parameter_284, + parameter_285, + parameter_286, + parameter_287, + parameter_288, + parameter_289, + parameter_290, + parameter_291, + parameter_292, + parameter_293, + parameter_294, + parameter_295, + parameter_296, + parameter_297, + parameter_298, + parameter_299, + parameter_300, + parameter_301, + parameter_302, + parameter_303, + parameter_304, + parameter_305, + parameter_306, + parameter_307, + parameter_308, + parameter_309, + parameter_310, + parameter_311, + parameter_312, + parameter_313, + parameter_314, + parameter_315, + parameter_316, + parameter_317, + parameter_318, + parameter_319, + parameter_320, + parameter_321, + parameter_322, + parameter_323, + parameter_324, + parameter_325, + parameter_326, + parameter_327, + parameter_328, + parameter_329, + parameter_330, + parameter_331, + parameter_332, + parameter_333, + parameter_334, + parameter_335, + parameter_336, + parameter_337, + parameter_338, + parameter_339, + parameter_340, + parameter_341, + parameter_342, + parameter_343, + parameter_344, + parameter_345, + parameter_346, + parameter_347, + parameter_348, + parameter_349, + parameter_350, + parameter_351, + parameter_352, + parameter_353, + parameter_354, + parameter_355, + parameter_356, + parameter_357, + parameter_358, + parameter_359, + parameter_360, + parameter_361, + parameter_362, + parameter_363, + parameter_364, + parameter_365, + parameter_366, + parameter_367, + data_0, + ): + # pd_op.conv2d: (2x16x304x304xf32) <- (2x3x608x608xf32, 16x3x3x3xf32) + conv2d_0 = paddle._C_ops.conv2d( + data_0, parameter_367, [2, 2], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del data_0, parameter_367 + + # pd_op.batch_norm_: (2x16x304x304xf32, 16xf32, 16xf32, 16xf32, 16xf32, -1xui8) <- (2x16x304x304xf32, 16xf32, 16xf32, 16xf32, 16xf32) + ( + batch_norm__0, + batch_norm__1, + batch_norm__2, + batch_norm__3, + batch_norm__4, + batch_norm__5, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_0, + parameter_366, + parameter_365, + parameter_364, + parameter_363, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_363, parameter_364, parameter_365, parameter_366 + + # pd_op.swish: (2x16x304x304xf32) <- (2x16x304x304xf32) + swish_1 = paddle._C_ops.swish(batch_norm__0) + + # pd_op.conv2d: (2x16x304x304xf32) <- (2x16x304x304xf32, 16x16x3x3xf32) + conv2d_1 = paddle._C_ops.conv2d( + swish_1, parameter_362, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_362 + + # pd_op.batch_norm_: (2x16x304x304xf32, 16xf32, 16xf32, 16xf32, 16xf32, -1xui8) <- (2x16x304x304xf32, 16xf32, 16xf32, 16xf32, 16xf32) + ( + batch_norm__6, + batch_norm__7, + batch_norm__8, + batch_norm__9, + batch_norm__10, + batch_norm__11, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_1, + parameter_361, + parameter_360, + parameter_359, + parameter_358, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_358, parameter_359, parameter_360, parameter_361 + + # pd_op.swish: (2x16x304x304xf32) <- (2x16x304x304xf32) + swish_2 = paddle._C_ops.swish(batch_norm__6) + + # pd_op.conv2d: (2x32x304x304xf32) <- (2x16x304x304xf32, 32x16x3x3xf32) + conv2d_2 = paddle._C_ops.conv2d( + swish_2, parameter_357, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_357 + + # pd_op.batch_norm_: (2x32x304x304xf32, 32xf32, 32xf32, 32xf32, 32xf32, -1xui8) <- (2x32x304x304xf32, 32xf32, 32xf32, 32xf32, 32xf32) + ( + batch_norm__12, + batch_norm__13, + batch_norm__14, + batch_norm__15, + batch_norm__16, + batch_norm__17, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_2, + parameter_356, + parameter_355, + parameter_354, + parameter_353, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_353, parameter_354, parameter_355, parameter_356 + + # pd_op.swish: (2x32x304x304xf32) <- (2x32x304x304xf32) + swish_3 = paddle._C_ops.swish(batch_norm__12) + + # pd_op.conv2d: (2x48x152x152xf32) <- (2x32x304x304xf32, 48x32x3x3xf32) + conv2d_3 = paddle._C_ops.conv2d( + swish_3, parameter_352, [2, 2], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_352 + + # pd_op.batch_norm_: (2x48x152x152xf32, 48xf32, 48xf32, 48xf32, 48xf32, -1xui8) <- (2x48x152x152xf32, 48xf32, 48xf32, 48xf32, 48xf32) + ( + batch_norm__18, + batch_norm__19, + batch_norm__20, + batch_norm__21, + batch_norm__22, + batch_norm__23, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_3, + parameter_351, + parameter_350, + parameter_349, + parameter_348, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_348, parameter_349, parameter_350, parameter_351 + + # pd_op.swish: (2x48x152x152xf32) <- (2x48x152x152xf32) + swish_4 = paddle._C_ops.swish(batch_norm__18) + + # pd_op.conv2d: (2x24x152x152xf32) <- (2x48x152x152xf32, 24x48x1x1xf32) + conv2d_4 = paddle._C_ops.conv2d( + swish_4, parameter_347, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_347 + + # pd_op.batch_norm_: (2x24x152x152xf32, 24xf32, 24xf32, 24xf32, 24xf32, -1xui8) <- (2x24x152x152xf32, 24xf32, 24xf32, 24xf32, 24xf32) + ( + batch_norm__24, + batch_norm__25, + batch_norm__26, + batch_norm__27, + batch_norm__28, + batch_norm__29, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_4, + parameter_346, + parameter_345, + parameter_344, + parameter_343, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_343, parameter_344, parameter_345, parameter_346 + + # pd_op.swish: (2x24x152x152xf32) <- (2x24x152x152xf32) + swish_5 = paddle._C_ops.swish(batch_norm__24) + + # pd_op.conv2d: (2x24x152x152xf32) <- (2x48x152x152xf32, 24x48x1x1xf32) + conv2d_5 = paddle._C_ops.conv2d( + swish_4, parameter_342, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_342 + + # pd_op.batch_norm_: (2x24x152x152xf32, 24xf32, 24xf32, 24xf32, 24xf32, -1xui8) <- (2x24x152x152xf32, 24xf32, 24xf32, 24xf32, 24xf32) + ( + batch_norm__30, + batch_norm__31, + batch_norm__32, + batch_norm__33, + batch_norm__34, + batch_norm__35, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_5, + parameter_341, + parameter_340, + parameter_339, + parameter_338, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_338, parameter_339, parameter_340, parameter_341 + + # pd_op.swish: (2x24x152x152xf32) <- (2x24x152x152xf32) + swish_6 = paddle._C_ops.swish(batch_norm__30) + + # pd_op.conv2d: (2x24x152x152xf32) <- (2x24x152x152xf32, 24x24x3x3xf32) + conv2d_6 = paddle._C_ops.conv2d( + swish_6, parameter_337, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_337 + + # pd_op.batch_norm_: (2x24x152x152xf32, 24xf32, 24xf32, 24xf32, 24xf32, -1xui8) <- (2x24x152x152xf32, 24xf32, 24xf32, 24xf32, 24xf32) + ( + batch_norm__36, + batch_norm__37, + batch_norm__38, + batch_norm__39, + batch_norm__40, + batch_norm__41, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_6, + parameter_336, + parameter_335, + parameter_334, + parameter_333, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_333, parameter_334, parameter_335, parameter_336 + + # pd_op.swish: (2x24x152x152xf32) <- (2x24x152x152xf32) + swish_7 = paddle._C_ops.swish(batch_norm__36) + + # pd_op.conv2d: (2x24x152x152xf32) <- (2x24x152x152xf32, 24x24x3x3xf32) + conv2d_7 = paddle._C_ops.conv2d( + swish_7, parameter_332, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_332 + + # pd_op.batch_norm_: (2x24x152x152xf32, 24xf32, 24xf32, 24xf32, 24xf32, -1xui8) <- (2x24x152x152xf32, 24xf32, 24xf32, 24xf32, 24xf32) + ( + batch_norm__42, + batch_norm__43, + batch_norm__44, + batch_norm__45, + batch_norm__46, + batch_norm__47, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_7, + parameter_331, + parameter_330, + parameter_329, + parameter_328, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_328, parameter_329, parameter_330, parameter_331 + + # pd_op.conv2d: (2x24x152x152xf32) <- (2x24x152x152xf32, 24x24x1x1xf32) + conv2d_8 = paddle._C_ops.conv2d( + swish_7, parameter_327, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_327 + + # pd_op.batch_norm_: (2x24x152x152xf32, 24xf32, 24xf32, 24xf32, 24xf32, -1xui8) <- (2x24x152x152xf32, 24xf32, 24xf32, 24xf32, 24xf32) + ( + batch_norm__48, + batch_norm__49, + batch_norm__50, + batch_norm__51, + batch_norm__52, + batch_norm__53, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_8, + parameter_326, + parameter_325, + parameter_324, + parameter_323, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_323, parameter_324, parameter_325, parameter_326 + + # pd_op.add: (2x24x152x152xf32) <- (2x24x152x152xf32, 2x24x152x152xf32) + add_0 = paddle._C_ops.add(batch_norm__42, batch_norm__48) + + # pd_op.swish: (2x24x152x152xf32) <- (2x24x152x152xf32) + swish_8 = paddle._C_ops.swish(add_0) + + # pd_op.add: (2x24x152x152xf32) <- (2x24x152x152xf32, 2x24x152x152xf32) + add_1 = paddle._C_ops.add(swish_6, swish_8) + + # pd_op.full: (1xi32) <- () + full_0 = paddle._C_ops.full( + [1], float("1"), paddle.int32, paddle.core.CPUPlace() + ) + + # pd_op.assign: (1xi32) <- (1xi32) + assign_0 = full_0 + + # pd_op.assign: (1xi32) <- (1xi32) + assign_1 = full_0 + + # pd_op.assign: (1xi32) <- (1xi32) + assign_2 = full_0 + + # pd_op.assign: (1xi32) <- (1xi32) + assign_3 = full_0 + + # pd_op.assign: (1xi32) <- (1xi32) + assign_4 = full_0 + + # pd_op.assign: (1xi32) <- (1xi32) + assign_5 = full_0 + + # pd_op.assign: (1xi32) <- (1xi32) + assign_6 = full_0 + + # pd_op.assign: (1xi32) <- (1xi32) + assign_7 = full_0 + + # pd_op.assign: (1xi32) <- (1xi32) + assign_8 = full_0 + + # pd_op.assign: (1xi32) <- (1xi32) + assign_9 = full_0 + + # pd_op.assign: (1xi32) <- (1xi32) + assign_10 = full_0 + + # pd_op.assign: (1xi32) <- (1xi32) + assign_11 = full_0 + + # pd_op.assign: (1xi32) <- (1xi32) + assign_12 = full_0 + + # builtin.combine: ([2x24x152x152xf32, 2x24x152x152xf32]) <- (2x24x152x152xf32, 2x24x152x152xf32) + combine_0 = [swish_5, add_1] + + # pd_op.concat: (2x48x152x152xf32) <- ([2x24x152x152xf32, 2x24x152x152xf32], 1xi32) + concat_0 = paddle._C_ops.concat(combine_0, full_0) + del combine_0 + + # pd_op.full_int_array: (2xi64) <- () + full_int_array_0 = [2, 3] + + # pd_op.assign: (2xi64) <- (2xi64) + assign_13 = full_int_array_0 + + # pd_op.assign: (2xi64) <- (2xi64) + assign_14 = full_int_array_0 + + # pd_op.assign: (2xi64) <- (2xi64) + assign_15 = full_int_array_0 + + # pd_op.mean: (2x48x1x1xf32) <- (2x48x152x152xf32, 2xi64) + mean_0 = paddle._C_ops.mean(concat_0, full_int_array_0, True) + + # pd_op.conv2d: (2x48x1x1xf32) <- (2x48x1x1xf32, 48x48x1x1xf32) + conv2d_9 = paddle._C_ops.conv2d( + mean_0, parameter_322, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_322 + + # pd_op.full_int_array: (4xi64) <- () + full_int_array_1 = [1, -1, 1, 1] + + # pd_op.reshape: (1x48x1x1xf32) <- (48xf32, 4xi64) + reshape_0 = paddle._C_ops.reshape(parameter_321, full_int_array_1) + del parameter_321 + + # pd_op.add: (2x48x1x1xf32) <- (2x48x1x1xf32, 1x48x1x1xf32) + add_2 = paddle._C_ops.add(conv2d_9, reshape_0) + + # pd_op.hardsigmoid: (2x48x1x1xf32) <- (2x48x1x1xf32) + hardsigmoid_0 = paddle._C_ops.hardsigmoid( + add_2, float("0.166667"), float("0.5") + ) + del add_2 + + # pd_op.multiply: (2x48x152x152xf32) <- (2x48x152x152xf32, 2x48x1x1xf32) + multiply_0 = paddle._C_ops.multiply(concat_0, hardsigmoid_0) + + # pd_op.conv2d: (2x64x152x152xf32) <- (2x48x152x152xf32, 64x48x1x1xf32) + conv2d_10 = paddle._C_ops.conv2d( + multiply_0, parameter_320, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_320 + + # pd_op.batch_norm_: (2x64x152x152xf32, 64xf32, 64xf32, 64xf32, 64xf32, -1xui8) <- (2x64x152x152xf32, 64xf32, 64xf32, 64xf32, 64xf32) + ( + batch_norm__54, + batch_norm__55, + batch_norm__56, + batch_norm__57, + batch_norm__58, + batch_norm__59, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_10, + parameter_319, + parameter_318, + parameter_317, + parameter_316, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_316, parameter_317, parameter_318, parameter_319 + + # pd_op.swish: (2x64x152x152xf32) <- (2x64x152x152xf32) + swish_9 = paddle._C_ops.swish(batch_norm__54) + + # pd_op.conv2d: (2x96x76x76xf32) <- (2x64x152x152xf32, 96x64x3x3xf32) + conv2d_11 = paddle._C_ops.conv2d( + swish_9, parameter_315, [2, 2], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_315 + + # pd_op.batch_norm_: (2x96x76x76xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x76x76xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__60, + batch_norm__61, + batch_norm__62, + batch_norm__63, + batch_norm__64, + batch_norm__65, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_11, + parameter_314, + parameter_313, + parameter_312, + parameter_311, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_311, parameter_312, parameter_313, parameter_314 + + # pd_op.swish: (2x96x76x76xf32) <- (2x96x76x76xf32) + swish_10 = paddle._C_ops.swish(batch_norm__60) + + # pd_op.conv2d: (2x48x76x76xf32) <- (2x96x76x76xf32, 48x96x1x1xf32) + conv2d_12 = paddle._C_ops.conv2d( + swish_10, parameter_310, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_310 + + # pd_op.batch_norm_: (2x48x76x76xf32, 48xf32, 48xf32, 48xf32, 48xf32, -1xui8) <- (2x48x76x76xf32, 48xf32, 48xf32, 48xf32, 48xf32) + ( + batch_norm__66, + batch_norm__67, + batch_norm__68, + batch_norm__69, + batch_norm__70, + batch_norm__71, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_12, + parameter_309, + parameter_308, + parameter_307, + parameter_306, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_306, parameter_307, parameter_308, parameter_309 + + # pd_op.swish: (2x48x76x76xf32) <- (2x48x76x76xf32) + swish_11 = paddle._C_ops.swish(batch_norm__66) + + # pd_op.conv2d: (2x48x76x76xf32) <- (2x96x76x76xf32, 48x96x1x1xf32) + conv2d_13 = paddle._C_ops.conv2d( + swish_10, parameter_305, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_305 + + # pd_op.batch_norm_: (2x48x76x76xf32, 48xf32, 48xf32, 48xf32, 48xf32, -1xui8) <- (2x48x76x76xf32, 48xf32, 48xf32, 48xf32, 48xf32) + ( + batch_norm__72, + batch_norm__73, + batch_norm__74, + batch_norm__75, + batch_norm__76, + batch_norm__77, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_13, + parameter_304, + parameter_303, + parameter_302, + parameter_301, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_301, parameter_302, parameter_303, parameter_304 + + # pd_op.swish: (2x48x76x76xf32) <- (2x48x76x76xf32) + swish_12 = paddle._C_ops.swish(batch_norm__72) + + # pd_op.conv2d: (2x48x76x76xf32) <- (2x48x76x76xf32, 48x48x3x3xf32) + conv2d_14 = paddle._C_ops.conv2d( + swish_12, parameter_300, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_300 + + # pd_op.batch_norm_: (2x48x76x76xf32, 48xf32, 48xf32, 48xf32, 48xf32, -1xui8) <- (2x48x76x76xf32, 48xf32, 48xf32, 48xf32, 48xf32) + ( + batch_norm__78, + batch_norm__79, + batch_norm__80, + batch_norm__81, + batch_norm__82, + batch_norm__83, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_14, + parameter_299, + parameter_298, + parameter_297, + parameter_296, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_296, parameter_297, parameter_298, parameter_299 + + # pd_op.swish: (2x48x76x76xf32) <- (2x48x76x76xf32) + swish_13 = paddle._C_ops.swish(batch_norm__78) + + # pd_op.conv2d: (2x48x76x76xf32) <- (2x48x76x76xf32, 48x48x3x3xf32) + conv2d_15 = paddle._C_ops.conv2d( + swish_13, parameter_295, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_295 + + # pd_op.batch_norm_: (2x48x76x76xf32, 48xf32, 48xf32, 48xf32, 48xf32, -1xui8) <- (2x48x76x76xf32, 48xf32, 48xf32, 48xf32, 48xf32) + ( + batch_norm__84, + batch_norm__85, + batch_norm__86, + batch_norm__87, + batch_norm__88, + batch_norm__89, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_15, + parameter_294, + parameter_293, + parameter_292, + parameter_291, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_291, parameter_292, parameter_293, parameter_294 + + # pd_op.conv2d: (2x48x76x76xf32) <- (2x48x76x76xf32, 48x48x1x1xf32) + conv2d_16 = paddle._C_ops.conv2d( + swish_13, parameter_290, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_290 + + # pd_op.batch_norm_: (2x48x76x76xf32, 48xf32, 48xf32, 48xf32, 48xf32, -1xui8) <- (2x48x76x76xf32, 48xf32, 48xf32, 48xf32, 48xf32) + ( + batch_norm__90, + batch_norm__91, + batch_norm__92, + batch_norm__93, + batch_norm__94, + batch_norm__95, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_16, + parameter_289, + parameter_288, + parameter_287, + parameter_286, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_286, parameter_287, parameter_288, parameter_289 + + # pd_op.add: (2x48x76x76xf32) <- (2x48x76x76xf32, 2x48x76x76xf32) + add_3 = paddle._C_ops.add(batch_norm__84, batch_norm__90) + + # pd_op.swish: (2x48x76x76xf32) <- (2x48x76x76xf32) + swish_14 = paddle._C_ops.swish(add_3) + + # pd_op.add: (2x48x76x76xf32) <- (2x48x76x76xf32, 2x48x76x76xf32) + add_4 = paddle._C_ops.add(swish_12, swish_14) + + # pd_op.conv2d: (2x48x76x76xf32) <- (2x48x76x76xf32, 48x48x3x3xf32) + conv2d_17 = paddle._C_ops.conv2d( + add_4, parameter_285, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_285 + + # pd_op.batch_norm_: (2x48x76x76xf32, 48xf32, 48xf32, 48xf32, 48xf32, -1xui8) <- (2x48x76x76xf32, 48xf32, 48xf32, 48xf32, 48xf32) + ( + batch_norm__96, + batch_norm__97, + batch_norm__98, + batch_norm__99, + batch_norm__100, + batch_norm__101, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_17, + parameter_284, + parameter_283, + parameter_282, + parameter_281, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_281, parameter_282, parameter_283, parameter_284 + + # pd_op.swish: (2x48x76x76xf32) <- (2x48x76x76xf32) + swish_15 = paddle._C_ops.swish(batch_norm__96) + + # pd_op.conv2d: (2x48x76x76xf32) <- (2x48x76x76xf32, 48x48x3x3xf32) + conv2d_18 = paddle._C_ops.conv2d( + swish_15, parameter_280, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_280 + + # pd_op.batch_norm_: (2x48x76x76xf32, 48xf32, 48xf32, 48xf32, 48xf32, -1xui8) <- (2x48x76x76xf32, 48xf32, 48xf32, 48xf32, 48xf32) + ( + batch_norm__102, + batch_norm__103, + batch_norm__104, + batch_norm__105, + batch_norm__106, + batch_norm__107, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_18, + parameter_279, + parameter_278, + parameter_277, + parameter_276, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_276, parameter_277, parameter_278, parameter_279 + + # pd_op.conv2d: (2x48x76x76xf32) <- (2x48x76x76xf32, 48x48x1x1xf32) + conv2d_19 = paddle._C_ops.conv2d( + swish_15, parameter_275, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_275 + + # pd_op.batch_norm_: (2x48x76x76xf32, 48xf32, 48xf32, 48xf32, 48xf32, -1xui8) <- (2x48x76x76xf32, 48xf32, 48xf32, 48xf32, 48xf32) + ( + batch_norm__108, + batch_norm__109, + batch_norm__110, + batch_norm__111, + batch_norm__112, + batch_norm__113, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_19, + parameter_274, + parameter_273, + parameter_272, + parameter_271, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_271, parameter_272, parameter_273, parameter_274 + + # pd_op.add: (2x48x76x76xf32) <- (2x48x76x76xf32, 2x48x76x76xf32) + add_5 = paddle._C_ops.add(batch_norm__102, batch_norm__108) + + # pd_op.swish: (2x48x76x76xf32) <- (2x48x76x76xf32) + swish_16 = paddle._C_ops.swish(add_5) + + # pd_op.add: (2x48x76x76xf32) <- (2x48x76x76xf32, 2x48x76x76xf32) + add_6 = paddle._C_ops.add(add_4, swish_16) + + # builtin.combine: ([2x48x76x76xf32, 2x48x76x76xf32]) <- (2x48x76x76xf32, 2x48x76x76xf32) + combine_1 = [swish_11, add_6] + + # pd_op.concat: (2x96x76x76xf32) <- ([2x48x76x76xf32, 2x48x76x76xf32], 1xi32) + concat_1 = paddle._C_ops.concat(combine_1, full_0) + del combine_1 + + # pd_op.mean: (2x96x1x1xf32) <- (2x96x76x76xf32, 2xi64) + mean_1 = paddle._C_ops.mean(concat_1, full_int_array_0, True) + + # pd_op.conv2d: (2x96x1x1xf32) <- (2x96x1x1xf32, 96x96x1x1xf32) + conv2d_20 = paddle._C_ops.conv2d( + mean_1, parameter_270, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_270 + + # pd_op.reshape: (1x96x1x1xf32) <- (96xf32, 4xi64) + reshape_1 = paddle._C_ops.reshape(parameter_269, full_int_array_1) + del parameter_269 + + # pd_op.add: (2x96x1x1xf32) <- (2x96x1x1xf32, 1x96x1x1xf32) + add_7 = paddle._C_ops.add(conv2d_20, reshape_1) + + # pd_op.hardsigmoid: (2x96x1x1xf32) <- (2x96x1x1xf32) + hardsigmoid_1 = paddle._C_ops.hardsigmoid( + add_7, float("0.166667"), float("0.5") + ) + del add_7 + + # pd_op.multiply: (2x96x76x76xf32) <- (2x96x76x76xf32, 2x96x1x1xf32) + multiply_1 = paddle._C_ops.multiply(concat_1, hardsigmoid_1) + + # pd_op.conv2d: (2x128x76x76xf32) <- (2x96x76x76xf32, 128x96x1x1xf32) + conv2d_21 = paddle._C_ops.conv2d( + multiply_1, parameter_268, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_268 + + # pd_op.batch_norm_: (2x128x76x76xf32, 128xf32, 128xf32, 128xf32, 128xf32, -1xui8) <- (2x128x76x76xf32, 128xf32, 128xf32, 128xf32, 128xf32) + ( + batch_norm__114, + batch_norm__115, + batch_norm__116, + batch_norm__117, + batch_norm__118, + batch_norm__119, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_21, + parameter_267, + parameter_266, + parameter_265, + parameter_264, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_264, parameter_265, parameter_266, parameter_267 + + # pd_op.swish: (2x128x76x76xf32) <- (2x128x76x76xf32) + swish_17 = paddle._C_ops.swish(batch_norm__114) + + # pd_op.conv2d: (2x192x38x38xf32) <- (2x128x76x76xf32, 192x128x3x3xf32) + conv2d_22 = paddle._C_ops.conv2d( + swish_17, parameter_263, [2, 2], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_263 + + # pd_op.batch_norm_: (2x192x38x38xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x38x38xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__120, + batch_norm__121, + batch_norm__122, + batch_norm__123, + batch_norm__124, + batch_norm__125, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_22, + parameter_262, + parameter_261, + parameter_260, + parameter_259, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_259, parameter_260, parameter_261, parameter_262 + + # pd_op.swish: (2x192x38x38xf32) <- (2x192x38x38xf32) + swish_18 = paddle._C_ops.swish(batch_norm__120) + + # pd_op.conv2d: (2x96x38x38xf32) <- (2x192x38x38xf32, 96x192x1x1xf32) + conv2d_23 = paddle._C_ops.conv2d( + swish_18, parameter_258, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_258 + + # pd_op.batch_norm_: (2x96x38x38xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x38x38xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__126, + batch_norm__127, + batch_norm__128, + batch_norm__129, + batch_norm__130, + batch_norm__131, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_23, + parameter_257, + parameter_256, + parameter_255, + parameter_254, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_254, parameter_255, parameter_256, parameter_257 + + # pd_op.swish: (2x96x38x38xf32) <- (2x96x38x38xf32) + swish_19 = paddle._C_ops.swish(batch_norm__126) + + # pd_op.conv2d: (2x96x38x38xf32) <- (2x192x38x38xf32, 96x192x1x1xf32) + conv2d_24 = paddle._C_ops.conv2d( + swish_18, parameter_253, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_253 + + # pd_op.batch_norm_: (2x96x38x38xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x38x38xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__132, + batch_norm__133, + batch_norm__134, + batch_norm__135, + batch_norm__136, + batch_norm__137, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_24, + parameter_252, + parameter_251, + parameter_250, + parameter_249, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_249, parameter_250, parameter_251, parameter_252 + + # pd_op.swish: (2x96x38x38xf32) <- (2x96x38x38xf32) + swish_20 = paddle._C_ops.swish(batch_norm__132) + + # pd_op.conv2d: (2x96x38x38xf32) <- (2x96x38x38xf32, 96x96x3x3xf32) + conv2d_25 = paddle._C_ops.conv2d( + swish_20, parameter_248, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_248 + + # pd_op.batch_norm_: (2x96x38x38xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x38x38xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__138, + batch_norm__139, + batch_norm__140, + batch_norm__141, + batch_norm__142, + batch_norm__143, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_25, + parameter_247, + parameter_246, + parameter_245, + parameter_244, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_244, parameter_245, parameter_246, parameter_247 + + # pd_op.swish: (2x96x38x38xf32) <- (2x96x38x38xf32) + swish_21 = paddle._C_ops.swish(batch_norm__138) + + # pd_op.conv2d: (2x96x38x38xf32) <- (2x96x38x38xf32, 96x96x3x3xf32) + conv2d_26 = paddle._C_ops.conv2d( + swish_21, parameter_243, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_243 + + # pd_op.batch_norm_: (2x96x38x38xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x38x38xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__144, + batch_norm__145, + batch_norm__146, + batch_norm__147, + batch_norm__148, + batch_norm__149, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_26, + parameter_242, + parameter_241, + parameter_240, + parameter_239, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_239, parameter_240, parameter_241, parameter_242 + + # pd_op.conv2d: (2x96x38x38xf32) <- (2x96x38x38xf32, 96x96x1x1xf32) + conv2d_27 = paddle._C_ops.conv2d( + swish_21, parameter_238, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_238 + + # pd_op.batch_norm_: (2x96x38x38xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x38x38xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__150, + batch_norm__151, + batch_norm__152, + batch_norm__153, + batch_norm__154, + batch_norm__155, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_27, + parameter_237, + parameter_236, + parameter_235, + parameter_234, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_234, parameter_235, parameter_236, parameter_237 + + # pd_op.add: (2x96x38x38xf32) <- (2x96x38x38xf32, 2x96x38x38xf32) + add_8 = paddle._C_ops.add(batch_norm__144, batch_norm__150) + + # pd_op.swish: (2x96x38x38xf32) <- (2x96x38x38xf32) + swish_22 = paddle._C_ops.swish(add_8) + + # pd_op.add: (2x96x38x38xf32) <- (2x96x38x38xf32, 2x96x38x38xf32) + add_9 = paddle._C_ops.add(swish_20, swish_22) + + # pd_op.conv2d: (2x96x38x38xf32) <- (2x96x38x38xf32, 96x96x3x3xf32) + conv2d_28 = paddle._C_ops.conv2d( + add_9, parameter_233, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_233 + + # pd_op.batch_norm_: (2x96x38x38xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x38x38xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__156, + batch_norm__157, + batch_norm__158, + batch_norm__159, + batch_norm__160, + batch_norm__161, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_28, + parameter_232, + parameter_231, + parameter_230, + parameter_229, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_229, parameter_230, parameter_231, parameter_232 + + # pd_op.swish: (2x96x38x38xf32) <- (2x96x38x38xf32) + swish_23 = paddle._C_ops.swish(batch_norm__156) + + # pd_op.conv2d: (2x96x38x38xf32) <- (2x96x38x38xf32, 96x96x3x3xf32) + conv2d_29 = paddle._C_ops.conv2d( + swish_23, parameter_228, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_228 + + # pd_op.batch_norm_: (2x96x38x38xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x38x38xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__162, + batch_norm__163, + batch_norm__164, + batch_norm__165, + batch_norm__166, + batch_norm__167, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_29, + parameter_227, + parameter_226, + parameter_225, + parameter_224, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_224, parameter_225, parameter_226, parameter_227 + + # pd_op.conv2d: (2x96x38x38xf32) <- (2x96x38x38xf32, 96x96x1x1xf32) + conv2d_30 = paddle._C_ops.conv2d( + swish_23, parameter_223, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_223 + + # pd_op.batch_norm_: (2x96x38x38xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x38x38xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__168, + batch_norm__169, + batch_norm__170, + batch_norm__171, + batch_norm__172, + batch_norm__173, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_30, + parameter_222, + parameter_221, + parameter_220, + parameter_219, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_219, parameter_220, parameter_221, parameter_222 + + # pd_op.add: (2x96x38x38xf32) <- (2x96x38x38xf32, 2x96x38x38xf32) + add_10 = paddle._C_ops.add(batch_norm__162, batch_norm__168) + + # pd_op.swish: (2x96x38x38xf32) <- (2x96x38x38xf32) + swish_24 = paddle._C_ops.swish(add_10) + + # pd_op.add: (2x96x38x38xf32) <- (2x96x38x38xf32, 2x96x38x38xf32) + add_11 = paddle._C_ops.add(add_9, swish_24) + + # builtin.combine: ([2x96x38x38xf32, 2x96x38x38xf32]) <- (2x96x38x38xf32, 2x96x38x38xf32) + combine_2 = [swish_19, add_11] + + # pd_op.concat: (2x192x38x38xf32) <- ([2x96x38x38xf32, 2x96x38x38xf32], 1xi32) + concat_2 = paddle._C_ops.concat(combine_2, full_0) + del combine_2 + + # pd_op.mean: (2x192x1x1xf32) <- (2x192x38x38xf32, 2xi64) + mean_2 = paddle._C_ops.mean(concat_2, full_int_array_0, True) + + # pd_op.conv2d: (2x192x1x1xf32) <- (2x192x1x1xf32, 192x192x1x1xf32) + conv2d_31 = paddle._C_ops.conv2d( + mean_2, parameter_218, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_218 + + # pd_op.reshape: (1x192x1x1xf32) <- (192xf32, 4xi64) + reshape_2 = paddle._C_ops.reshape(parameter_217, full_int_array_1) + del parameter_217 + + # pd_op.add: (2x192x1x1xf32) <- (2x192x1x1xf32, 1x192x1x1xf32) + add_12 = paddle._C_ops.add(conv2d_31, reshape_2) + + # pd_op.hardsigmoid: (2x192x1x1xf32) <- (2x192x1x1xf32) + hardsigmoid_2 = paddle._C_ops.hardsigmoid( + add_12, float("0.166667"), float("0.5") + ) + del add_12 + + # pd_op.multiply: (2x192x38x38xf32) <- (2x192x38x38xf32, 2x192x1x1xf32) + multiply_2 = paddle._C_ops.multiply(concat_2, hardsigmoid_2) + + # pd_op.conv2d: (2x256x38x38xf32) <- (2x192x38x38xf32, 256x192x1x1xf32) + conv2d_32 = paddle._C_ops.conv2d( + multiply_2, parameter_216, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_216 + + # pd_op.batch_norm_: (2x256x38x38xf32, 256xf32, 256xf32, 256xf32, 256xf32, -1xui8) <- (2x256x38x38xf32, 256xf32, 256xf32, 256xf32, 256xf32) + ( + batch_norm__174, + batch_norm__175, + batch_norm__176, + batch_norm__177, + batch_norm__178, + batch_norm__179, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_32, + parameter_215, + parameter_214, + parameter_213, + parameter_212, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_212, parameter_213, parameter_214, parameter_215 + + # pd_op.swish: (2x256x38x38xf32) <- (2x256x38x38xf32) + swish_25 = paddle._C_ops.swish(batch_norm__174) + + # pd_op.conv2d: (2x384x19x19xf32) <- (2x256x38x38xf32, 384x256x3x3xf32) + conv2d_33 = paddle._C_ops.conv2d( + swish_25, parameter_211, [2, 2], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_211 + + # pd_op.batch_norm_: (2x384x19x19xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x19x19xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__180, + batch_norm__181, + batch_norm__182, + batch_norm__183, + batch_norm__184, + batch_norm__185, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_33, + parameter_210, + parameter_209, + parameter_208, + parameter_207, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_207, parameter_208, parameter_209, parameter_210 + + # pd_op.swish: (2x384x19x19xf32) <- (2x384x19x19xf32) + swish_26 = paddle._C_ops.swish(batch_norm__180) + + # pd_op.conv2d: (2x192x19x19xf32) <- (2x384x19x19xf32, 192x384x1x1xf32) + conv2d_34 = paddle._C_ops.conv2d( + swish_26, parameter_206, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_206 + + # pd_op.batch_norm_: (2x192x19x19xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x19x19xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__186, + batch_norm__187, + batch_norm__188, + batch_norm__189, + batch_norm__190, + batch_norm__191, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_34, + parameter_205, + parameter_204, + parameter_203, + parameter_202, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_202, parameter_203, parameter_204, parameter_205 + + # pd_op.swish: (2x192x19x19xf32) <- (2x192x19x19xf32) + swish_27 = paddle._C_ops.swish(batch_norm__186) + + # pd_op.conv2d: (2x192x19x19xf32) <- (2x384x19x19xf32, 192x384x1x1xf32) + conv2d_35 = paddle._C_ops.conv2d( + swish_26, parameter_201, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_201 + + # pd_op.batch_norm_: (2x192x19x19xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x19x19xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__192, + batch_norm__193, + batch_norm__194, + batch_norm__195, + batch_norm__196, + batch_norm__197, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_35, + parameter_200, + parameter_199, + parameter_198, + parameter_197, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_197, parameter_198, parameter_199, parameter_200 + + # pd_op.swish: (2x192x19x19xf32) <- (2x192x19x19xf32) + swish_28 = paddle._C_ops.swish(batch_norm__192) + + # pd_op.conv2d: (2x192x19x19xf32) <- (2x192x19x19xf32, 192x192x3x3xf32) + conv2d_36 = paddle._C_ops.conv2d( + swish_28, parameter_196, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_196 + + # pd_op.batch_norm_: (2x192x19x19xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x19x19xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__198, + batch_norm__199, + batch_norm__200, + batch_norm__201, + batch_norm__202, + batch_norm__203, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_36, + parameter_195, + parameter_194, + parameter_193, + parameter_192, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_192, parameter_193, parameter_194, parameter_195 + + # pd_op.swish: (2x192x19x19xf32) <- (2x192x19x19xf32) + swish_29 = paddle._C_ops.swish(batch_norm__198) + + # pd_op.conv2d: (2x192x19x19xf32) <- (2x192x19x19xf32, 192x192x3x3xf32) + conv2d_37 = paddle._C_ops.conv2d( + swish_29, parameter_191, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_191 + + # pd_op.batch_norm_: (2x192x19x19xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x19x19xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__204, + batch_norm__205, + batch_norm__206, + batch_norm__207, + batch_norm__208, + batch_norm__209, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_37, + parameter_190, + parameter_189, + parameter_188, + parameter_187, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_187, parameter_188, parameter_189, parameter_190 + + # pd_op.conv2d: (2x192x19x19xf32) <- (2x192x19x19xf32, 192x192x1x1xf32) + conv2d_38 = paddle._C_ops.conv2d( + swish_29, parameter_186, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_186 + + # pd_op.batch_norm_: (2x192x19x19xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x19x19xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__210, + batch_norm__211, + batch_norm__212, + batch_norm__213, + batch_norm__214, + batch_norm__215, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_38, + parameter_185, + parameter_184, + parameter_183, + parameter_182, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_182, parameter_183, parameter_184, parameter_185 + + # pd_op.add: (2x192x19x19xf32) <- (2x192x19x19xf32, 2x192x19x19xf32) + add_13 = paddle._C_ops.add(batch_norm__204, batch_norm__210) + + # pd_op.swish: (2x192x19x19xf32) <- (2x192x19x19xf32) + swish_30 = paddle._C_ops.swish(add_13) + + # pd_op.add: (2x192x19x19xf32) <- (2x192x19x19xf32, 2x192x19x19xf32) + add_14 = paddle._C_ops.add(swish_28, swish_30) + + # builtin.combine: ([2x192x19x19xf32, 2x192x19x19xf32]) <- (2x192x19x19xf32, 2x192x19x19xf32) + combine_3 = [swish_27, add_14] + + # pd_op.concat: (2x384x19x19xf32) <- ([2x192x19x19xf32, 2x192x19x19xf32], 1xi32) + concat_3 = paddle._C_ops.concat(combine_3, full_0) + del combine_3 + + # pd_op.mean: (2x384x1x1xf32) <- (2x384x19x19xf32, 2xi64) + mean_3 = paddle._C_ops.mean(concat_3, full_int_array_0, True) + + # pd_op.conv2d: (2x384x1x1xf32) <- (2x384x1x1xf32, 384x384x1x1xf32) + conv2d_39 = paddle._C_ops.conv2d( + mean_3, parameter_181, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_181 + + # pd_op.reshape: (1x384x1x1xf32) <- (384xf32, 4xi64) + reshape_3 = paddle._C_ops.reshape(parameter_180, full_int_array_1) + del full_int_array_1, parameter_180 + + # pd_op.add: (2x384x1x1xf32) <- (2x384x1x1xf32, 1x384x1x1xf32) + add_15 = paddle._C_ops.add(conv2d_39, reshape_3) + + # pd_op.hardsigmoid: (2x384x1x1xf32) <- (2x384x1x1xf32) + hardsigmoid_3 = paddle._C_ops.hardsigmoid( + add_15, float("0.166667"), float("0.5") + ) + del add_15 + + # pd_op.multiply: (2x384x19x19xf32) <- (2x384x19x19xf32, 2x384x1x1xf32) + multiply_3 = paddle._C_ops.multiply(concat_3, hardsigmoid_3) + + # pd_op.conv2d: (2x512x19x19xf32) <- (2x384x19x19xf32, 512x384x1x1xf32) + conv2d_40 = paddle._C_ops.conv2d( + multiply_3, parameter_179, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_179 + + # pd_op.batch_norm_: (2x512x19x19xf32, 512xf32, 512xf32, 512xf32, 512xf32, -1xui8) <- (2x512x19x19xf32, 512xf32, 512xf32, 512xf32, 512xf32) + ( + batch_norm__216, + batch_norm__217, + batch_norm__218, + batch_norm__219, + batch_norm__220, + batch_norm__221, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_40, + parameter_178, + parameter_177, + parameter_176, + parameter_175, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_175, parameter_176, parameter_177, parameter_178 + + # pd_op.swish: (2x512x19x19xf32) <- (2x512x19x19xf32) + swish_31 = paddle._C_ops.swish(batch_norm__216) + + # pd_op.conv2d: (2x192x19x19xf32) <- (2x512x19x19xf32, 192x512x1x1xf32) + conv2d_41 = paddle._C_ops.conv2d( + swish_31, parameter_174, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_174 + + # pd_op.batch_norm_: (2x192x19x19xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x19x19xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__222, + batch_norm__223, + batch_norm__224, + batch_norm__225, + batch_norm__226, + batch_norm__227, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_41, + parameter_173, + parameter_172, + parameter_171, + parameter_170, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_170, parameter_171, parameter_172, parameter_173 + + # pd_op.swish: (2x192x19x19xf32) <- (2x192x19x19xf32) + swish_32 = paddle._C_ops.swish(batch_norm__222) + + # pd_op.conv2d: (2x192x19x19xf32) <- (2x512x19x19xf32, 192x512x1x1xf32) + conv2d_42 = paddle._C_ops.conv2d( + swish_31, parameter_169, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_169 + + # pd_op.batch_norm_: (2x192x19x19xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x19x19xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__228, + batch_norm__229, + batch_norm__230, + batch_norm__231, + batch_norm__232, + batch_norm__233, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_42, + parameter_168, + parameter_167, + parameter_166, + parameter_165, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_165, parameter_166, parameter_167, parameter_168 + + # pd_op.swish: (2x192x19x19xf32) <- (2x192x19x19xf32) + swish_33 = paddle._C_ops.swish(batch_norm__228) + + # pd_op.conv2d: (2x192x19x19xf32) <- (2x192x19x19xf32, 192x192x3x3xf32) + conv2d_43 = paddle._C_ops.conv2d( + swish_33, parameter_164, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_164 + + # pd_op.batch_norm_: (2x192x19x19xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x19x19xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__234, + batch_norm__235, + batch_norm__236, + batch_norm__237, + batch_norm__238, + batch_norm__239, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_43, + parameter_163, + parameter_162, + parameter_161, + parameter_160, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_160, parameter_161, parameter_162, parameter_163 + + # pd_op.swish: (2x192x19x19xf32) <- (2x192x19x19xf32) + swish_34 = paddle._C_ops.swish(batch_norm__234) + + # pd_op.conv2d: (2x192x19x19xf32) <- (2x192x19x19xf32, 192x192x3x3xf32) + conv2d_44 = paddle._C_ops.conv2d( + swish_34, parameter_159, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_159 + + # pd_op.batch_norm_: (2x192x19x19xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x19x19xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__240, + batch_norm__241, + batch_norm__242, + batch_norm__243, + batch_norm__244, + batch_norm__245, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_44, + parameter_158, + parameter_157, + parameter_156, + parameter_155, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_155, parameter_156, parameter_157, parameter_158 + + # pd_op.conv2d: (2x192x19x19xf32) <- (2x192x19x19xf32, 192x192x1x1xf32) + conv2d_45 = paddle._C_ops.conv2d( + swish_34, parameter_154, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_154 + + # pd_op.batch_norm_: (2x192x19x19xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x19x19xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__246, + batch_norm__247, + batch_norm__248, + batch_norm__249, + batch_norm__250, + batch_norm__251, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_45, + parameter_153, + parameter_152, + parameter_151, + parameter_150, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_150, parameter_151, parameter_152, parameter_153 + + # pd_op.add: (2x192x19x19xf32) <- (2x192x19x19xf32, 2x192x19x19xf32) + add_16 = paddle._C_ops.add(batch_norm__240, batch_norm__246) + + # pd_op.swish: (2x192x19x19xf32) <- (2x192x19x19xf32) + swish_35 = paddle._C_ops.swish(add_16) + + # pd_op.full_int_array: (2xi64) <- () + full_int_array_2 = [5, 5] + + # pd_op.pool2d: (2x192x19x19xf32) <- (2x192x19x19xf32, 2xi64) + pool2d_0 = paddle._C_ops.pool2d( + swish_35, + full_int_array_2, + [1, 1], + [2, 2], + False, + True, + "NCHW", + "max", + False, + False, + "EXPLICIT", + ) + + # pd_op.full_int_array: (2xi64) <- () + full_int_array_3 = [9, 9] + + # pd_op.pool2d: (2x192x19x19xf32) <- (2x192x19x19xf32, 2xi64) + pool2d_1 = paddle._C_ops.pool2d( + swish_35, + full_int_array_3, + [1, 1], + [4, 4], + False, + True, + "NCHW", + "max", + False, + False, + "EXPLICIT", + ) + + # pd_op.full_int_array: (2xi64) <- () + full_int_array_4 = [13, 13] + + # pd_op.pool2d: (2x192x19x19xf32) <- (2x192x19x19xf32, 2xi64) + pool2d_2 = paddle._C_ops.pool2d( + swish_35, + full_int_array_4, + [1, 1], + [6, 6], + False, + True, + "NCHW", + "max", + False, + False, + "EXPLICIT", + ) + + # builtin.combine: ([2x192x19x19xf32, 2x192x19x19xf32, 2x192x19x19xf32, 2x192x19x19xf32]) <- (2x192x19x19xf32, 2x192x19x19xf32, 2x192x19x19xf32, 2x192x19x19xf32) + combine_4 = [swish_35, pool2d_0, pool2d_1, pool2d_2] + + # pd_op.concat: (2x768x19x19xf32) <- ([2x192x19x19xf32, 2x192x19x19xf32, 2x192x19x19xf32, 2x192x19x19xf32], 1xi32) + concat_4 = paddle._C_ops.concat(combine_4, full_0) + del combine_4 + + # pd_op.conv2d: (2x192x19x19xf32) <- (2x768x19x19xf32, 192x768x1x1xf32) + conv2d_46 = paddle._C_ops.conv2d( + concat_4, parameter_149, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_149 + + # pd_op.batch_norm_: (2x192x19x19xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x19x19xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__252, + batch_norm__253, + batch_norm__254, + batch_norm__255, + batch_norm__256, + batch_norm__257, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_46, + parameter_148, + parameter_147, + parameter_146, + parameter_145, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_145, parameter_146, parameter_147, parameter_148 + + # pd_op.swish: (2x192x19x19xf32) <- (2x192x19x19xf32) + swish_36 = paddle._C_ops.swish(batch_norm__252) + + # builtin.combine: ([2x192x19x19xf32, 2x192x19x19xf32]) <- (2x192x19x19xf32, 2x192x19x19xf32) + combine_5 = [swish_32, swish_36] + + # pd_op.concat: (2x384x19x19xf32) <- ([2x192x19x19xf32, 2x192x19x19xf32], 1xi32) + concat_5 = paddle._C_ops.concat(combine_5, full_0) + del combine_5 + + # pd_op.conv2d: (2x384x19x19xf32) <- (2x384x19x19xf32, 384x384x1x1xf32) + conv2d_47 = paddle._C_ops.conv2d( + concat_5, parameter_144, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_144 + + # pd_op.batch_norm_: (2x384x19x19xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x19x19xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__258, + batch_norm__259, + batch_norm__260, + batch_norm__261, + batch_norm__262, + batch_norm__263, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_47, + parameter_143, + parameter_142, + parameter_141, + parameter_140, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_140, parameter_141, parameter_142, parameter_143 + + # pd_op.swish: (2x384x19x19xf32) <- (2x384x19x19xf32) + swish_37 = paddle._C_ops.swish(batch_norm__258) + + # pd_op.conv2d: (2x192x19x19xf32) <- (2x384x19x19xf32, 192x384x1x1xf32) + conv2d_48 = paddle._C_ops.conv2d( + swish_37, parameter_139, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_139 + + # pd_op.batch_norm_: (2x192x19x19xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x19x19xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__264, + batch_norm__265, + batch_norm__266, + batch_norm__267, + batch_norm__268, + batch_norm__269, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_48, + parameter_138, + parameter_137, + parameter_136, + parameter_135, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_135, parameter_136, parameter_137, parameter_138 + + # pd_op.swish: (2x192x19x19xf32) <- (2x192x19x19xf32) + swish_38 = paddle._C_ops.swish(batch_norm__264) + + # pd_op.nearest_interp: (2x192x38x38xf32) <- (2x192x19x19xf32, None, None, None) + nearest_interp_0 = paddle._C_ops.nearest_interp( + swish_38, + None, + None, + None, + "NCHW", + -1, + -1, + -1, + [float("2"), float("2")], + "nearest", + False, + 0, + ) + + # builtin.combine: ([2x192x38x38xf32, 2x256x38x38xf32]) <- (2x192x38x38xf32, 2x256x38x38xf32) + combine_6 = [nearest_interp_0, swish_25] + + # pd_op.concat: (2x448x38x38xf32) <- ([2x192x38x38xf32, 2x256x38x38xf32], 1xi32) + concat_6 = paddle._C_ops.concat(combine_6, full_0) + del combine_6 + + # pd_op.conv2d: (2x96x38x38xf32) <- (2x448x38x38xf32, 96x448x1x1xf32) + conv2d_49 = paddle._C_ops.conv2d( + concat_6, parameter_134, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_134 + + # pd_op.batch_norm_: (2x96x38x38xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x38x38xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__270, + batch_norm__271, + batch_norm__272, + batch_norm__273, + batch_norm__274, + batch_norm__275, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_49, + parameter_133, + parameter_132, + parameter_131, + parameter_130, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_130, parameter_131, parameter_132, parameter_133 + + # pd_op.swish: (2x96x38x38xf32) <- (2x96x38x38xf32) + swish_39 = paddle._C_ops.swish(batch_norm__270) + + # pd_op.conv2d: (2x96x38x38xf32) <- (2x448x38x38xf32, 96x448x1x1xf32) + conv2d_50 = paddle._C_ops.conv2d( + concat_6, parameter_129, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_129 + + # pd_op.batch_norm_: (2x96x38x38xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x38x38xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__276, + batch_norm__277, + batch_norm__278, + batch_norm__279, + batch_norm__280, + batch_norm__281, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_50, + parameter_128, + parameter_127, + parameter_126, + parameter_125, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_125, parameter_126, parameter_127, parameter_128 + + # pd_op.swish: (2x96x38x38xf32) <- (2x96x38x38xf32) + swish_40 = paddle._C_ops.swish(batch_norm__276) + + # pd_op.conv2d: (2x96x38x38xf32) <- (2x96x38x38xf32, 96x96x3x3xf32) + conv2d_51 = paddle._C_ops.conv2d( + swish_40, parameter_124, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_124 + + # pd_op.batch_norm_: (2x96x38x38xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x38x38xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__282, + batch_norm__283, + batch_norm__284, + batch_norm__285, + batch_norm__286, + batch_norm__287, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_51, + parameter_123, + parameter_122, + parameter_121, + parameter_120, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_120, parameter_121, parameter_122, parameter_123 + + # pd_op.swish: (2x96x38x38xf32) <- (2x96x38x38xf32) + swish_41 = paddle._C_ops.swish(batch_norm__282) + + # pd_op.conv2d: (2x96x38x38xf32) <- (2x96x38x38xf32, 96x96x3x3xf32) + conv2d_52 = paddle._C_ops.conv2d( + swish_41, parameter_119, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_119 + + # pd_op.batch_norm_: (2x96x38x38xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x38x38xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__288, + batch_norm__289, + batch_norm__290, + batch_norm__291, + batch_norm__292, + batch_norm__293, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_52, + parameter_118, + parameter_117, + parameter_116, + parameter_115, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_115, parameter_116, parameter_117, parameter_118 + + # pd_op.conv2d: (2x96x38x38xf32) <- (2x96x38x38xf32, 96x96x1x1xf32) + conv2d_53 = paddle._C_ops.conv2d( + swish_41, parameter_114, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_114 + + # pd_op.batch_norm_: (2x96x38x38xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x38x38xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__294, + batch_norm__295, + batch_norm__296, + batch_norm__297, + batch_norm__298, + batch_norm__299, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_53, + parameter_113, + parameter_112, + parameter_111, + parameter_110, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_110, parameter_111, parameter_112, parameter_113 + + # pd_op.add: (2x96x38x38xf32) <- (2x96x38x38xf32, 2x96x38x38xf32) + add_17 = paddle._C_ops.add(batch_norm__288, batch_norm__294) + + # pd_op.swish: (2x96x38x38xf32) <- (2x96x38x38xf32) + swish_42 = paddle._C_ops.swish(add_17) + + # builtin.combine: ([2x96x38x38xf32, 2x96x38x38xf32]) <- (2x96x38x38xf32, 2x96x38x38xf32) + combine_7 = [swish_39, swish_42] + + # pd_op.concat: (2x192x38x38xf32) <- ([2x96x38x38xf32, 2x96x38x38xf32], 1xi32) + concat_7 = paddle._C_ops.concat(combine_7, full_0) + del combine_7 + + # pd_op.conv2d: (2x192x38x38xf32) <- (2x192x38x38xf32, 192x192x1x1xf32) + conv2d_54 = paddle._C_ops.conv2d( + concat_7, parameter_109, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_109 + + # pd_op.batch_norm_: (2x192x38x38xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x38x38xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__300, + batch_norm__301, + batch_norm__302, + batch_norm__303, + batch_norm__304, + batch_norm__305, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_54, + parameter_108, + parameter_107, + parameter_106, + parameter_105, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_105, parameter_106, parameter_107, parameter_108 + + # pd_op.swish: (2x192x38x38xf32) <- (2x192x38x38xf32) + swish_43 = paddle._C_ops.swish(batch_norm__300) + + # pd_op.conv2d: (2x96x38x38xf32) <- (2x192x38x38xf32, 96x192x1x1xf32) + conv2d_55 = paddle._C_ops.conv2d( + swish_43, parameter_104, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_104 + + # pd_op.batch_norm_: (2x96x38x38xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x38x38xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__306, + batch_norm__307, + batch_norm__308, + batch_norm__309, + batch_norm__310, + batch_norm__311, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_55, + parameter_103, + parameter_102, + parameter_101, + parameter_100, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_100, parameter_101, parameter_102, parameter_103 + + # pd_op.swish: (2x96x38x38xf32) <- (2x96x38x38xf32) + swish_44 = paddle._C_ops.swish(batch_norm__306) + + # pd_op.nearest_interp: (2x96x76x76xf32) <- (2x96x38x38xf32, None, None, None) + nearest_interp_1 = paddle._C_ops.nearest_interp( + swish_44, + None, + None, + None, + "NCHW", + -1, + -1, + -1, + [float("2"), float("2")], + "nearest", + False, + 0, + ) + + # builtin.combine: ([2x96x76x76xf32, 2x128x76x76xf32]) <- (2x96x76x76xf32, 2x128x76x76xf32) + combine_8 = [nearest_interp_1, swish_17] + + # pd_op.concat: (2x224x76x76xf32) <- ([2x96x76x76xf32, 2x128x76x76xf32], 1xi32) + concat_8 = paddle._C_ops.concat(combine_8, full_0) + del combine_8 + + # pd_op.conv2d: (2x48x76x76xf32) <- (2x224x76x76xf32, 48x224x1x1xf32) + conv2d_56 = paddle._C_ops.conv2d( + concat_8, parameter_99, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_99 + + # pd_op.batch_norm_: (2x48x76x76xf32, 48xf32, 48xf32, 48xf32, 48xf32, -1xui8) <- (2x48x76x76xf32, 48xf32, 48xf32, 48xf32, 48xf32) + ( + batch_norm__312, + batch_norm__313, + batch_norm__314, + batch_norm__315, + batch_norm__316, + batch_norm__317, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_56, + parameter_98, + parameter_97, + parameter_96, + parameter_95, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_95, parameter_96, parameter_97, parameter_98 + + # pd_op.swish: (2x48x76x76xf32) <- (2x48x76x76xf32) + swish_45 = paddle._C_ops.swish(batch_norm__312) + + # pd_op.conv2d: (2x48x76x76xf32) <- (2x224x76x76xf32, 48x224x1x1xf32) + conv2d_57 = paddle._C_ops.conv2d( + concat_8, parameter_94, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_94 + + # pd_op.batch_norm_: (2x48x76x76xf32, 48xf32, 48xf32, 48xf32, 48xf32, -1xui8) <- (2x48x76x76xf32, 48xf32, 48xf32, 48xf32, 48xf32) + ( + batch_norm__318, + batch_norm__319, + batch_norm__320, + batch_norm__321, + batch_norm__322, + batch_norm__323, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_57, + parameter_93, + parameter_92, + parameter_91, + parameter_90, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_90, parameter_91, parameter_92, parameter_93 + + # pd_op.swish: (2x48x76x76xf32) <- (2x48x76x76xf32) + swish_46 = paddle._C_ops.swish(batch_norm__318) + + # pd_op.conv2d: (2x48x76x76xf32) <- (2x48x76x76xf32, 48x48x3x3xf32) + conv2d_58 = paddle._C_ops.conv2d( + swish_46, parameter_89, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_89 + + # pd_op.batch_norm_: (2x48x76x76xf32, 48xf32, 48xf32, 48xf32, 48xf32, -1xui8) <- (2x48x76x76xf32, 48xf32, 48xf32, 48xf32, 48xf32) + ( + batch_norm__324, + batch_norm__325, + batch_norm__326, + batch_norm__327, + batch_norm__328, + batch_norm__329, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_58, + parameter_88, + parameter_87, + parameter_86, + parameter_85, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_85, parameter_86, parameter_87, parameter_88 + + # pd_op.swish: (2x48x76x76xf32) <- (2x48x76x76xf32) + swish_47 = paddle._C_ops.swish(batch_norm__324) + + # pd_op.conv2d: (2x48x76x76xf32) <- (2x48x76x76xf32, 48x48x3x3xf32) + conv2d_59 = paddle._C_ops.conv2d( + swish_47, parameter_84, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_84 + + # pd_op.batch_norm_: (2x48x76x76xf32, 48xf32, 48xf32, 48xf32, 48xf32, -1xui8) <- (2x48x76x76xf32, 48xf32, 48xf32, 48xf32, 48xf32) + ( + batch_norm__330, + batch_norm__331, + batch_norm__332, + batch_norm__333, + batch_norm__334, + batch_norm__335, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_59, + parameter_83, + parameter_82, + parameter_81, + parameter_80, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_80, parameter_81, parameter_82, parameter_83 + + # pd_op.conv2d: (2x48x76x76xf32) <- (2x48x76x76xf32, 48x48x1x1xf32) + conv2d_60 = paddle._C_ops.conv2d( + swish_47, parameter_79, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_79 + + # pd_op.batch_norm_: (2x48x76x76xf32, 48xf32, 48xf32, 48xf32, 48xf32, -1xui8) <- (2x48x76x76xf32, 48xf32, 48xf32, 48xf32, 48xf32) + ( + batch_norm__336, + batch_norm__337, + batch_norm__338, + batch_norm__339, + batch_norm__340, + batch_norm__341, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_60, + parameter_78, + parameter_77, + parameter_76, + parameter_75, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_75, parameter_76, parameter_77, parameter_78 + + # pd_op.add: (2x48x76x76xf32) <- (2x48x76x76xf32, 2x48x76x76xf32) + add_18 = paddle._C_ops.add(batch_norm__330, batch_norm__336) + + # pd_op.swish: (2x48x76x76xf32) <- (2x48x76x76xf32) + swish_48 = paddle._C_ops.swish(add_18) + + # builtin.combine: ([2x48x76x76xf32, 2x48x76x76xf32]) <- (2x48x76x76xf32, 2x48x76x76xf32) + combine_9 = [swish_45, swish_48] + + # pd_op.concat: (2x96x76x76xf32) <- ([2x48x76x76xf32, 2x48x76x76xf32], 1xi32) + concat_9 = paddle._C_ops.concat(combine_9, full_0) + del combine_9 + + # pd_op.conv2d: (2x96x76x76xf32) <- (2x96x76x76xf32, 96x96x1x1xf32) + conv2d_61 = paddle._C_ops.conv2d( + concat_9, parameter_74, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_74 + + # pd_op.batch_norm_: (2x96x76x76xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x76x76xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__342, + batch_norm__343, + batch_norm__344, + batch_norm__345, + batch_norm__346, + batch_norm__347, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_61, + parameter_73, + parameter_72, + parameter_71, + parameter_70, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_70, parameter_71, parameter_72, parameter_73 + + # pd_op.swish: (2x96x76x76xf32) <- (2x96x76x76xf32) + swish_49 = paddle._C_ops.swish(batch_norm__342) + + # pd_op.conv2d: (2x96x38x38xf32) <- (2x96x76x76xf32, 96x96x3x3xf32) + conv2d_62 = paddle._C_ops.conv2d( + swish_49, parameter_69, [2, 2], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_69 + + # pd_op.batch_norm_: (2x96x38x38xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x38x38xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__348, + batch_norm__349, + batch_norm__350, + batch_norm__351, + batch_norm__352, + batch_norm__353, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_62, + parameter_68, + parameter_67, + parameter_66, + parameter_65, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_65, parameter_66, parameter_67, parameter_68 + + # pd_op.swish: (2x96x38x38xf32) <- (2x96x38x38xf32) + swish_50 = paddle._C_ops.swish(batch_norm__348) + + # builtin.combine: ([2x96x38x38xf32, 2x192x38x38xf32]) <- (2x96x38x38xf32, 2x192x38x38xf32) + combine_10 = [swish_50, swish_43] + + # pd_op.concat: (2x288x38x38xf32) <- ([2x96x38x38xf32, 2x192x38x38xf32], 1xi32) + concat_10 = paddle._C_ops.concat(combine_10, full_0) + del combine_10 + + # pd_op.conv2d: (2x96x38x38xf32) <- (2x288x38x38xf32, 96x288x1x1xf32) + conv2d_63 = paddle._C_ops.conv2d( + concat_10, parameter_64, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_64 + + # pd_op.batch_norm_: (2x96x38x38xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x38x38xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__354, + batch_norm__355, + batch_norm__356, + batch_norm__357, + batch_norm__358, + batch_norm__359, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_63, + parameter_63, + parameter_62, + parameter_61, + parameter_60, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_60, parameter_61, parameter_62, parameter_63 + + # pd_op.swish: (2x96x38x38xf32) <- (2x96x38x38xf32) + swish_51 = paddle._C_ops.swish(batch_norm__354) + + # pd_op.conv2d: (2x96x38x38xf32) <- (2x288x38x38xf32, 96x288x1x1xf32) + conv2d_64 = paddle._C_ops.conv2d( + concat_10, parameter_59, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_59 + + # pd_op.batch_norm_: (2x96x38x38xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x38x38xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__360, + batch_norm__361, + batch_norm__362, + batch_norm__363, + batch_norm__364, + batch_norm__365, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_64, + parameter_58, + parameter_57, + parameter_56, + parameter_55, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_55, parameter_56, parameter_57, parameter_58 + + # pd_op.swish: (2x96x38x38xf32) <- (2x96x38x38xf32) + swish_52 = paddle._C_ops.swish(batch_norm__360) + + # pd_op.conv2d: (2x96x38x38xf32) <- (2x96x38x38xf32, 96x96x3x3xf32) + conv2d_65 = paddle._C_ops.conv2d( + swish_52, parameter_54, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_54 + + # pd_op.batch_norm_: (2x96x38x38xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x38x38xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__366, + batch_norm__367, + batch_norm__368, + batch_norm__369, + batch_norm__370, + batch_norm__371, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_65, + parameter_53, + parameter_52, + parameter_51, + parameter_50, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_50, parameter_51, parameter_52, parameter_53 + + # pd_op.swish: (2x96x38x38xf32) <- (2x96x38x38xf32) + swish_53 = paddle._C_ops.swish(batch_norm__366) + + # pd_op.conv2d: (2x96x38x38xf32) <- (2x96x38x38xf32, 96x96x3x3xf32) + conv2d_66 = paddle._C_ops.conv2d( + swish_53, parameter_49, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_49 + + # pd_op.batch_norm_: (2x96x38x38xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x38x38xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__372, + batch_norm__373, + batch_norm__374, + batch_norm__375, + batch_norm__376, + batch_norm__377, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_66, + parameter_48, + parameter_47, + parameter_46, + parameter_45, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_45, parameter_46, parameter_47, parameter_48 + + # pd_op.conv2d: (2x96x38x38xf32) <- (2x96x38x38xf32, 96x96x1x1xf32) + conv2d_67 = paddle._C_ops.conv2d( + swish_53, parameter_44, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_44 + + # pd_op.batch_norm_: (2x96x38x38xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x38x38xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__378, + batch_norm__379, + batch_norm__380, + batch_norm__381, + batch_norm__382, + batch_norm__383, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_67, + parameter_43, + parameter_42, + parameter_41, + parameter_40, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_40, parameter_41, parameter_42, parameter_43 + + # pd_op.add: (2x96x38x38xf32) <- (2x96x38x38xf32, 2x96x38x38xf32) + add_19 = paddle._C_ops.add(batch_norm__372, batch_norm__378) + + # pd_op.swish: (2x96x38x38xf32) <- (2x96x38x38xf32) + swish_54 = paddle._C_ops.swish(add_19) + + # builtin.combine: ([2x96x38x38xf32, 2x96x38x38xf32]) <- (2x96x38x38xf32, 2x96x38x38xf32) + combine_11 = [swish_51, swish_54] + + # pd_op.concat: (2x192x38x38xf32) <- ([2x96x38x38xf32, 2x96x38x38xf32], 1xi32) + concat_11 = paddle._C_ops.concat(combine_11, full_0) + del combine_11 + + # pd_op.conv2d: (2x192x38x38xf32) <- (2x192x38x38xf32, 192x192x1x1xf32) + conv2d_68 = paddle._C_ops.conv2d( + concat_11, parameter_39, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_39 + + # pd_op.batch_norm_: (2x192x38x38xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x38x38xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__384, + batch_norm__385, + batch_norm__386, + batch_norm__387, + batch_norm__388, + batch_norm__389, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_68, + parameter_38, + parameter_37, + parameter_36, + parameter_35, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_35, parameter_36, parameter_37, parameter_38 + + # pd_op.swish: (2x192x38x38xf32) <- (2x192x38x38xf32) + swish_55 = paddle._C_ops.swish(batch_norm__384) + + # pd_op.conv2d: (2x192x19x19xf32) <- (2x192x38x38xf32, 192x192x3x3xf32) + conv2d_69 = paddle._C_ops.conv2d( + swish_55, parameter_34, [2, 2], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_34 + + # pd_op.batch_norm_: (2x192x19x19xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x19x19xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__390, + batch_norm__391, + batch_norm__392, + batch_norm__393, + batch_norm__394, + batch_norm__395, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_69, + parameter_33, + parameter_32, + parameter_31, + parameter_30, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_30, parameter_31, parameter_32, parameter_33 + + # pd_op.swish: (2x192x19x19xf32) <- (2x192x19x19xf32) + swish_56 = paddle._C_ops.swish(batch_norm__390) + + # builtin.combine: ([2x192x19x19xf32, 2x384x19x19xf32]) <- (2x192x19x19xf32, 2x384x19x19xf32) + combine_12 = [swish_56, swish_37] + + # pd_op.concat: (2x576x19x19xf32) <- ([2x192x19x19xf32, 2x384x19x19xf32], 1xi32) + concat_12 = paddle._C_ops.concat(combine_12, full_0) + del combine_12 + + # pd_op.conv2d: (2x192x19x19xf32) <- (2x576x19x19xf32, 192x576x1x1xf32) + conv2d_70 = paddle._C_ops.conv2d( + concat_12, parameter_29, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_29 + + # pd_op.batch_norm_: (2x192x19x19xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x19x19xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__396, + batch_norm__397, + batch_norm__398, + batch_norm__399, + batch_norm__400, + batch_norm__401, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_70, + parameter_28, + parameter_27, + parameter_26, + parameter_25, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_25, parameter_26, parameter_27, parameter_28 + + # pd_op.swish: (2x192x19x19xf32) <- (2x192x19x19xf32) + swish_57 = paddle._C_ops.swish(batch_norm__396) + + # pd_op.conv2d: (2x192x19x19xf32) <- (2x576x19x19xf32, 192x576x1x1xf32) + conv2d_71 = paddle._C_ops.conv2d( + concat_12, parameter_24, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_24 + + # pd_op.batch_norm_: (2x192x19x19xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x19x19xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__402, + batch_norm__403, + batch_norm__404, + batch_norm__405, + batch_norm__406, + batch_norm__407, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_71, + parameter_23, + parameter_22, + parameter_21, + parameter_20, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_20, parameter_21, parameter_22, parameter_23 + + # pd_op.swish: (2x192x19x19xf32) <- (2x192x19x19xf32) + swish_58 = paddle._C_ops.swish(batch_norm__402) + + # pd_op.conv2d: (2x192x19x19xf32) <- (2x192x19x19xf32, 192x192x3x3xf32) + conv2d_72 = paddle._C_ops.conv2d( + swish_58, parameter_19, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_19 + + # pd_op.batch_norm_: (2x192x19x19xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x19x19xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__408, + batch_norm__409, + batch_norm__410, + batch_norm__411, + batch_norm__412, + batch_norm__413, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_72, + parameter_18, + parameter_17, + parameter_16, + parameter_15, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_15, parameter_16, parameter_17, parameter_18 + + # pd_op.swish: (2x192x19x19xf32) <- (2x192x19x19xf32) + swish_59 = paddle._C_ops.swish(batch_norm__408) + + # pd_op.conv2d: (2x192x19x19xf32) <- (2x192x19x19xf32, 192x192x3x3xf32) + conv2d_73 = paddle._C_ops.conv2d( + swish_59, parameter_14, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_14 + + # pd_op.batch_norm_: (2x192x19x19xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x19x19xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__414, + batch_norm__415, + batch_norm__416, + batch_norm__417, + batch_norm__418, + batch_norm__419, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_73, + parameter_13, + parameter_12, + parameter_11, + parameter_10, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_10, parameter_11, parameter_12, parameter_13 + + # pd_op.conv2d: (2x192x19x19xf32) <- (2x192x19x19xf32, 192x192x1x1xf32) + conv2d_74 = paddle._C_ops.conv2d( + swish_59, parameter_9, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_9 + + # pd_op.batch_norm_: (2x192x19x19xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x19x19xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__420, + batch_norm__421, + batch_norm__422, + batch_norm__423, + batch_norm__424, + batch_norm__425, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_74, + parameter_8, + parameter_7, + parameter_6, + parameter_5, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_5, parameter_6, parameter_7, parameter_8 + + # pd_op.add: (2x192x19x19xf32) <- (2x192x19x19xf32, 2x192x19x19xf32) + add_20 = paddle._C_ops.add(batch_norm__414, batch_norm__420) + + # pd_op.swish: (2x192x19x19xf32) <- (2x192x19x19xf32) + swish_60 = paddle._C_ops.swish(add_20) + + # builtin.combine: ([2x192x19x19xf32, 2x192x19x19xf32]) <- (2x192x19x19xf32, 2x192x19x19xf32) + combine_13 = [swish_57, swish_60] + + # pd_op.concat: (2x384x19x19xf32) <- ([2x192x19x19xf32, 2x192x19x19xf32], 1xi32) + concat_13 = paddle._C_ops.concat(combine_13, full_0) + del combine_13 + + # pd_op.conv2d: (2x384x19x19xf32) <- (2x384x19x19xf32, 384x384x1x1xf32) + conv2d_75 = paddle._C_ops.conv2d( + concat_13, parameter_4, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_4 + + # pd_op.batch_norm_: (2x384x19x19xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x19x19xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__426, + batch_norm__427, + batch_norm__428, + batch_norm__429, + batch_norm__430, + batch_norm__431, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_75, + parameter_3, + parameter_2, + parameter_1, + parameter_0, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_0, parameter_1, parameter_2, parameter_3 + + # pd_op.swish: (2x384x19x19xf32) <- (2x384x19x19xf32) + swish_0 = paddle._C_ops.swish(batch_norm__426) + del ( + add_0, + add_1, + add_10, + add_11, + add_13, + add_14, + add_16, + add_17, + add_18, + add_19, + add_20, + add_3, + add_4, + add_5, + add_6, + add_8, + add_9, + assign_0, + assign_1, + assign_10, + assign_11, + assign_12, + assign_13, + assign_14, + assign_15, + assign_2, + assign_3, + assign_4, + assign_5, + assign_6, + assign_7, + assign_8, + assign_9, + batch_norm__0, + batch_norm__1, + batch_norm__10, + batch_norm__100, + batch_norm__101, + batch_norm__102, + batch_norm__103, + batch_norm__104, + batch_norm__105, + batch_norm__106, + batch_norm__107, + batch_norm__108, + batch_norm__109, + batch_norm__11, + batch_norm__110, + batch_norm__111, + batch_norm__112, + batch_norm__113, + batch_norm__114, + batch_norm__115, + batch_norm__116, + batch_norm__117, + batch_norm__118, + batch_norm__119, + batch_norm__12, + batch_norm__120, + batch_norm__121, + batch_norm__122, + batch_norm__123, + batch_norm__124, + batch_norm__125, + batch_norm__126, + batch_norm__127, + batch_norm__128, + batch_norm__129, + batch_norm__13, + batch_norm__130, + batch_norm__131, + batch_norm__132, + batch_norm__133, + batch_norm__134, + batch_norm__135, + batch_norm__136, + batch_norm__137, + batch_norm__138, + batch_norm__139, + batch_norm__14, + batch_norm__140, + batch_norm__141, + batch_norm__142, + batch_norm__143, + batch_norm__144, + batch_norm__145, + batch_norm__146, + batch_norm__147, + batch_norm__148, + batch_norm__149, + batch_norm__15, + batch_norm__150, + batch_norm__151, + batch_norm__152, + batch_norm__153, + batch_norm__154, + batch_norm__155, + batch_norm__156, + batch_norm__157, + batch_norm__158, + batch_norm__159, + batch_norm__16, + batch_norm__160, + batch_norm__161, + batch_norm__162, + batch_norm__163, + batch_norm__164, + batch_norm__165, + batch_norm__166, + batch_norm__167, + batch_norm__168, + batch_norm__169, + batch_norm__17, + batch_norm__170, + batch_norm__171, + batch_norm__172, + batch_norm__173, + batch_norm__174, + batch_norm__175, + batch_norm__176, + batch_norm__177, + batch_norm__178, + batch_norm__179, + batch_norm__18, + batch_norm__180, + batch_norm__181, + batch_norm__182, + batch_norm__183, + batch_norm__184, + batch_norm__185, + batch_norm__186, + batch_norm__187, + batch_norm__188, + batch_norm__189, + batch_norm__19, + batch_norm__190, + batch_norm__191, + batch_norm__192, + batch_norm__193, + batch_norm__194, + batch_norm__195, + batch_norm__196, + batch_norm__197, + batch_norm__198, + batch_norm__199, + batch_norm__2, + batch_norm__20, + batch_norm__200, + batch_norm__201, + batch_norm__202, + batch_norm__203, + batch_norm__204, + batch_norm__205, + batch_norm__206, + batch_norm__207, + batch_norm__208, + batch_norm__209, + batch_norm__21, + batch_norm__210, + batch_norm__211, + batch_norm__212, + batch_norm__213, + batch_norm__214, + batch_norm__215, + batch_norm__216, + batch_norm__217, + batch_norm__218, + batch_norm__219, + batch_norm__22, + batch_norm__220, + batch_norm__221, + batch_norm__222, + batch_norm__223, + batch_norm__224, + batch_norm__225, + batch_norm__226, + batch_norm__227, + batch_norm__228, + batch_norm__229, + batch_norm__23, + batch_norm__230, + batch_norm__231, + batch_norm__232, + batch_norm__233, + batch_norm__234, + batch_norm__235, + batch_norm__236, + batch_norm__237, + batch_norm__238, + batch_norm__239, + batch_norm__24, + batch_norm__240, + batch_norm__241, + batch_norm__242, + batch_norm__243, + batch_norm__244, + batch_norm__245, + batch_norm__246, + batch_norm__247, + batch_norm__248, + batch_norm__249, + batch_norm__25, + batch_norm__250, + batch_norm__251, + batch_norm__252, + batch_norm__253, + batch_norm__254, + batch_norm__255, + batch_norm__256, + batch_norm__257, + batch_norm__258, + batch_norm__259, + batch_norm__26, + batch_norm__260, + batch_norm__261, + batch_norm__262, + batch_norm__263, + batch_norm__264, + batch_norm__265, + batch_norm__266, + batch_norm__267, + batch_norm__268, + batch_norm__269, + batch_norm__27, + batch_norm__270, + batch_norm__271, + batch_norm__272, + batch_norm__273, + batch_norm__274, + batch_norm__275, + batch_norm__276, + batch_norm__277, + batch_norm__278, + batch_norm__279, + batch_norm__28, + batch_norm__280, + batch_norm__281, + batch_norm__282, + batch_norm__283, + batch_norm__284, + batch_norm__285, + batch_norm__286, + batch_norm__287, + batch_norm__288, + batch_norm__289, + batch_norm__29, + batch_norm__290, + batch_norm__291, + batch_norm__292, + batch_norm__293, + batch_norm__294, + batch_norm__295, + batch_norm__296, + batch_norm__297, + batch_norm__298, + batch_norm__299, + batch_norm__3, + batch_norm__30, + batch_norm__300, + batch_norm__301, + batch_norm__302, + batch_norm__303, + batch_norm__304, + batch_norm__305, + batch_norm__306, + batch_norm__307, + batch_norm__308, + batch_norm__309, + batch_norm__31, + batch_norm__310, + batch_norm__311, + batch_norm__312, + batch_norm__313, + batch_norm__314, + batch_norm__315, + batch_norm__316, + batch_norm__317, + batch_norm__318, + batch_norm__319, + batch_norm__32, + batch_norm__320, + batch_norm__321, + batch_norm__322, + batch_norm__323, + batch_norm__324, + batch_norm__325, + batch_norm__326, + batch_norm__327, + batch_norm__328, + batch_norm__329, + batch_norm__33, + batch_norm__330, + batch_norm__331, + batch_norm__332, + batch_norm__333, + batch_norm__334, + batch_norm__335, + batch_norm__336, + batch_norm__337, + batch_norm__338, + batch_norm__339, + batch_norm__34, + batch_norm__340, + batch_norm__341, + batch_norm__342, + batch_norm__343, + batch_norm__344, + batch_norm__345, + batch_norm__346, + batch_norm__347, + batch_norm__348, + batch_norm__349, + batch_norm__35, + batch_norm__350, + batch_norm__351, + batch_norm__352, + batch_norm__353, + batch_norm__354, + batch_norm__355, + batch_norm__356, + batch_norm__357, + batch_norm__358, + batch_norm__359, + batch_norm__36, + batch_norm__360, + batch_norm__361, + batch_norm__362, + batch_norm__363, + batch_norm__364, + batch_norm__365, + batch_norm__366, + batch_norm__367, + batch_norm__368, + batch_norm__369, + batch_norm__37, + batch_norm__370, + batch_norm__371, + batch_norm__372, + batch_norm__373, + batch_norm__374, + batch_norm__375, + batch_norm__376, + batch_norm__377, + batch_norm__378, + batch_norm__379, + batch_norm__38, + batch_norm__380, + batch_norm__381, + batch_norm__382, + batch_norm__383, + batch_norm__384, + batch_norm__385, + batch_norm__386, + batch_norm__387, + batch_norm__388, + batch_norm__389, + batch_norm__39, + batch_norm__390, + batch_norm__391, + batch_norm__392, + batch_norm__393, + batch_norm__394, + batch_norm__395, + batch_norm__396, + batch_norm__397, + batch_norm__398, + batch_norm__399, + batch_norm__4, + batch_norm__40, + batch_norm__400, + batch_norm__401, + batch_norm__402, + batch_norm__403, + batch_norm__404, + batch_norm__405, + batch_norm__406, + batch_norm__407, + batch_norm__408, + batch_norm__409, + batch_norm__41, + batch_norm__410, + batch_norm__411, + batch_norm__412, + batch_norm__413, + batch_norm__414, + batch_norm__415, + batch_norm__416, + batch_norm__417, + batch_norm__418, + batch_norm__419, + batch_norm__42, + batch_norm__420, + batch_norm__421, + batch_norm__422, + batch_norm__423, + batch_norm__424, + batch_norm__425, + batch_norm__426, + batch_norm__427, + batch_norm__428, + batch_norm__429, + batch_norm__43, + batch_norm__430, + batch_norm__431, + batch_norm__44, + batch_norm__45, + batch_norm__46, + batch_norm__47, + batch_norm__48, + batch_norm__49, + batch_norm__5, + batch_norm__50, + batch_norm__51, + batch_norm__52, + batch_norm__53, + batch_norm__54, + batch_norm__55, + batch_norm__56, + batch_norm__57, + batch_norm__58, + batch_norm__59, + batch_norm__6, + batch_norm__60, + batch_norm__61, + batch_norm__62, + batch_norm__63, + batch_norm__64, + batch_norm__65, + batch_norm__66, + batch_norm__67, + batch_norm__68, + batch_norm__69, + batch_norm__7, + batch_norm__70, + batch_norm__71, + batch_norm__72, + batch_norm__73, + batch_norm__74, + batch_norm__75, + batch_norm__76, + batch_norm__77, + batch_norm__78, + batch_norm__79, + batch_norm__8, + batch_norm__80, + batch_norm__81, + batch_norm__82, + batch_norm__83, + batch_norm__84, + batch_norm__85, + batch_norm__86, + batch_norm__87, + batch_norm__88, + batch_norm__89, + batch_norm__9, + batch_norm__90, + batch_norm__91, + batch_norm__92, + batch_norm__93, + batch_norm__94, + batch_norm__95, + batch_norm__96, + batch_norm__97, + batch_norm__98, + batch_norm__99, + concat_0, + concat_1, + concat_10, + concat_11, + concat_12, + concat_13, + concat_2, + concat_3, + concat_4, + concat_5, + concat_6, + concat_7, + concat_8, + concat_9, + conv2d_0, + conv2d_1, + conv2d_10, + conv2d_11, + conv2d_12, + conv2d_13, + conv2d_14, + conv2d_15, + conv2d_16, + conv2d_17, + conv2d_18, + conv2d_19, + conv2d_2, + conv2d_20, + conv2d_21, + conv2d_22, + conv2d_23, + conv2d_24, + conv2d_25, + conv2d_26, + conv2d_27, + conv2d_28, + conv2d_29, + conv2d_3, + conv2d_30, + conv2d_31, + conv2d_32, + conv2d_33, + conv2d_34, + conv2d_35, + conv2d_36, + conv2d_37, + conv2d_38, + conv2d_39, + conv2d_4, + conv2d_40, + conv2d_41, + conv2d_42, + conv2d_43, + conv2d_44, + conv2d_45, + conv2d_46, + conv2d_47, + conv2d_48, + conv2d_49, + conv2d_5, + conv2d_50, + conv2d_51, + conv2d_52, + conv2d_53, + conv2d_54, + conv2d_55, + conv2d_56, + conv2d_57, + conv2d_58, + conv2d_59, + conv2d_6, + conv2d_60, + conv2d_61, + conv2d_62, + conv2d_63, + conv2d_64, + conv2d_65, + conv2d_66, + conv2d_67, + conv2d_68, + conv2d_69, + conv2d_7, + conv2d_70, + conv2d_71, + conv2d_72, + conv2d_73, + conv2d_74, + conv2d_75, + conv2d_8, + conv2d_9, + full_0, + full_int_array_0, + full_int_array_2, + full_int_array_3, + full_int_array_4, + hardsigmoid_0, + hardsigmoid_1, + hardsigmoid_2, + hardsigmoid_3, + mean_0, + mean_1, + mean_2, + mean_3, + multiply_0, + multiply_1, + multiply_2, + multiply_3, + nearest_interp_0, + nearest_interp_1, + pool2d_0, + pool2d_1, + pool2d_2, + reshape_0, + reshape_1, + reshape_2, + reshape_3, + swish_1, + swish_10, + swish_11, + swish_12, + swish_13, + swish_14, + swish_15, + swish_16, + swish_17, + swish_18, + swish_19, + swish_2, + swish_20, + swish_21, + swish_22, + swish_23, + swish_24, + swish_25, + swish_26, + swish_27, + swish_28, + swish_29, + swish_3, + swish_30, + swish_31, + swish_32, + swish_33, + swish_34, + swish_35, + swish_36, + swish_37, + swish_38, + swish_39, + swish_4, + swish_40, + swish_41, + swish_42, + swish_43, + swish_44, + swish_45, + swish_46, + swish_47, + swish_48, + swish_49, + swish_5, + swish_50, + swish_51, + swish_52, + swish_53, + swish_54, + swish_55, + swish_56, + swish_57, + swish_58, + swish_59, + swish_6, + swish_60, + swish_7, + swish_8, + swish_9, + ) + + return swish_0 diff --git a/paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_5/weight_meta.py b/paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_5/weight_meta.py new file mode 100644 index 000000000..df50680d5 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_5/weight_meta.py @@ -0,0 +1,3860 @@ +class Program_weight_tensor_parameter_0: + name = "parameter_0" + shape = [384] + dtype = "float32" + min_val = float("-0.652218") + max_val = float("1.18857") + mean = float("0.0280274") + std = float("0.238618") + data = None + + +class Program_weight_tensor_parameter_1: + name = "parameter_1" + shape = [384] + dtype = "float32" + min_val = float("0.840697") + max_val = float("1.38324") + mean = float("0.98305") + std = float("0.0683125") + data = None + + +class Program_weight_tensor_parameter_2: + name = "parameter_2" + shape = [384] + dtype = "float32" + min_val = float("0.0026422") + max_val = float("0.0571294") + mean = float("0.00840019") + std = float("0.00480438") + data = None + + +class Program_weight_tensor_parameter_3: + name = "parameter_3" + shape = [384] + dtype = "float32" + min_val = float("-0.153452") + max_val = float("0.0567554") + mean = float("-0.0268421") + std = float("0.029361") + data = None + + +class Program_weight_tensor_parameter_4: + name = "parameter_4" + shape = [384, 384, 1, 1] + dtype = "float32" + min_val = float("-0.0814399") + max_val = float("0.0603684") + mean = float("-0.00033216") + std = float("0.00506095") + data = None + + +class Program_weight_tensor_parameter_5: + name = "parameter_5" + shape = [192] + dtype = "float32" + min_val = float("-0.44561") + max_val = float("0.100505") + mean = float("-0.084569") + std = float("0.10453") + data = None + + +class Program_weight_tensor_parameter_6: + name = "parameter_6" + shape = [192] + dtype = "float32" + min_val = float("0.827482") + max_val = float("1.2095") + mean = float("0.926275") + std = float("0.0461945") + data = None + + +class Program_weight_tensor_parameter_7: + name = "parameter_7" + shape = [192] + dtype = "float32" + min_val = float("0.00190456") + max_val = float("0.0205137") + mean = float("0.00863049") + std = float("0.00430541") + data = None + + +class Program_weight_tensor_parameter_8: + name = "parameter_8" + shape = [192] + dtype = "float32" + min_val = float("-0.0318185") + max_val = float("0.0379224") + mean = float("-0.000110429") + std = float("0.0186888") + data = None + + +class Program_weight_tensor_parameter_9: + name = "parameter_9" + shape = [192, 192, 1, 1] + dtype = "float32" + min_val = float("-0.0393104") + max_val = float("0.0483612") + mean = float("-0.000255694") + std = float("0.00325694") + data = None + + +class Program_weight_tensor_parameter_10: + name = "parameter_10" + shape = [192] + dtype = "float32" + min_val = float("-0.44561") + max_val = float("0.100505") + mean = float("-0.084569") + std = float("0.10453") + data = None + + +class Program_weight_tensor_parameter_11: + name = "parameter_11" + shape = [192] + dtype = "float32" + min_val = float("0.860758") + max_val = float("1.4208") + mean = float("1.11187") + std = float("0.0818408") + data = None + + +class Program_weight_tensor_parameter_12: + name = "parameter_12" + shape = [192] + dtype = "float32" + min_val = float("0.00352246") + max_val = float("0.035092") + mean = float("0.0105274") + std = float("0.00437158") + data = None + + +class Program_weight_tensor_parameter_13: + name = "parameter_13" + shape = [192] + dtype = "float32" + min_val = float("-0.0797058") + max_val = float("0.0680474") + mean = float("-0.0157311") + std = float("0.0255872") + data = None + + +class Program_weight_tensor_parameter_14: + name = "parameter_14" + shape = [192, 192, 3, 3] + dtype = "float32" + min_val = float("-0.0573713") + max_val = float("0.0648518") + mean = float("-0.000118352") + std = float("0.00301846") + data = None + + +class Program_weight_tensor_parameter_15: + name = "parameter_15" + shape = [192] + dtype = "float32" + min_val = float("-0.519542") + max_val = float("0.119264") + mean = float("-0.17366") + std = float("0.128125") + data = None + + +class Program_weight_tensor_parameter_16: + name = "parameter_16" + shape = [192] + dtype = "float32" + min_val = float("0.843388") + max_val = float("1.65187") + mean = float("1.06405") + std = float("0.100937") + data = None + + +class Program_weight_tensor_parameter_17: + name = "parameter_17" + shape = [192] + dtype = "float32" + min_val = float("0.0093173") + max_val = float("0.0586855") + mean = float("0.0231857") + std = float("0.00925497") + data = None + + +class Program_weight_tensor_parameter_18: + name = "parameter_18" + shape = [192] + dtype = "float32" + min_val = float("-0.168595") + max_val = float("0.0949792") + mean = float("-0.0552586") + std = float("0.0424072") + data = None + + +class Program_weight_tensor_parameter_19: + name = "parameter_19" + shape = [192, 192, 3, 3] + dtype = "float32" + min_val = float("-0.0550749") + max_val = float("0.0694004") + mean = float("-0.000238535") + std = float("0.00329047") + data = None + + +class Program_weight_tensor_parameter_20: + name = "parameter_20" + shape = [192] + dtype = "float32" + min_val = float("-0.455754") + max_val = float("0.186932") + mean = float("-0.0819852") + std = float("0.102015") + data = None + + +class Program_weight_tensor_parameter_21: + name = "parameter_21" + shape = [192] + dtype = "float32" + min_val = float("0.841905") + max_val = float("1.25543") + mean = float("1.02686") + std = float("0.0670556") + data = None + + +class Program_weight_tensor_parameter_22: + name = "parameter_22" + shape = [192] + dtype = "float32" + min_val = float("0.00382876") + max_val = float("0.0168788") + mean = float("0.00831223") + std = float("0.00247857") + data = None + + +class Program_weight_tensor_parameter_23: + name = "parameter_23" + shape = [192] + dtype = "float32" + min_val = float("-0.0941842") + max_val = float("0.0492551") + mean = float("-0.0169118") + std = float("0.0260979") + data = None + + +class Program_weight_tensor_parameter_24: + name = "parameter_24" + shape = [192, 576, 1, 1] + dtype = "float32" + min_val = float("-0.0820429") + max_val = float("0.0807641") + mean = float("-0.00017519") + std = float("0.00465979") + data = None + + +class Program_weight_tensor_parameter_25: + name = "parameter_25" + shape = [192] + dtype = "float32" + min_val = float("-0.217783") + max_val = float("0.0355625") + mean = float("-0.0691304") + std = float("0.0386326") + data = None + + +class Program_weight_tensor_parameter_26: + name = "parameter_26" + shape = [192] + dtype = "float32" + min_val = float("0.843901") + max_val = float("1.15229") + mean = float("1.01548") + std = float("0.0503056") + data = None + + +class Program_weight_tensor_parameter_27: + name = "parameter_27" + shape = [192] + dtype = "float32" + min_val = float("0.00247351") + max_val = float("0.0164084") + mean = float("0.00608305") + std = float("0.00215721") + data = None + + +class Program_weight_tensor_parameter_28: + name = "parameter_28" + shape = [192] + dtype = "float32" + min_val = float("-0.076298") + max_val = float("0.0940074") + mean = float("-0.0220292") + std = float("0.025105") + data = None + + +class Program_weight_tensor_parameter_29: + name = "parameter_29" + shape = [192, 576, 1, 1] + dtype = "float32" + min_val = float("-0.0441749") + max_val = float("0.05036") + mean = float("-0.000272052") + std = float("0.0041864") + data = None + + +class Program_weight_tensor_parameter_30: + name = "parameter_30" + shape = [192] + dtype = "float32" + min_val = float("-0.296363") + max_val = float("-0.00731421") + mean = float("-0.0909181") + std = float("0.0603085") + data = None + + +class Program_weight_tensor_parameter_31: + name = "parameter_31" + shape = [192] + dtype = "float32" + min_val = float("0.781699") + max_val = float("1.34829") + mean = float("1.05295") + std = float("0.0659017") + data = None + + +class Program_weight_tensor_parameter_32: + name = "parameter_32" + shape = [192] + dtype = "float32" + min_val = float("0.00518533") + max_val = float("0.0436202") + mean = float("0.0137517") + std = float("0.00626765") + data = None + + +class Program_weight_tensor_parameter_33: + name = "parameter_33" + shape = [192] + dtype = "float32" + min_val = float("-0.24857") + max_val = float("0.288341") + mean = float("-0.0322151") + std = float("0.0764451") + data = None + + +class Program_weight_tensor_parameter_34: + name = "parameter_34" + shape = [192, 192, 3, 3] + dtype = "float32" + min_val = float("-0.0319297") + max_val = float("0.0371234") + mean = float("-6.61646e-05") + std = float("0.00247009") + data = None + + +class Program_weight_tensor_parameter_35: + name = "parameter_35" + shape = [192] + dtype = "float32" + min_val = float("-0.530523") + max_val = float("1.03181") + mean = float("0.148142") + std = float("0.25938") + data = None + + +class Program_weight_tensor_parameter_36: + name = "parameter_36" + shape = [192] + dtype = "float32" + min_val = float("0.732244") + max_val = float("1.56838") + mean = float("1.01394") + std = float("0.106713") + data = None + + +class Program_weight_tensor_parameter_37: + name = "parameter_37" + shape = [192] + dtype = "float32" + min_val = float("0.00474578") + max_val = float("0.0449361") + mean = float("0.0159591") + std = float("0.00817663") + data = None + + +class Program_weight_tensor_parameter_38: + name = "parameter_38" + shape = [192] + dtype = "float32" + min_val = float("-0.23459") + max_val = float("0.15968") + mean = float("-0.0416686") + std = float("0.0471789") + data = None + + +class Program_weight_tensor_parameter_39: + name = "parameter_39" + shape = [192, 192, 1, 1] + dtype = "float32" + min_val = float("-0.130799") + max_val = float("0.0914109") + mean = float("-0.000795506") + std = float("0.00978873") + data = None + + +class Program_weight_tensor_parameter_40: + name = "parameter_40" + shape = [96] + dtype = "float32" + min_val = float("-0.290307") + max_val = float("0.172582") + mean = float("-0.0708609") + std = float("0.105586") + data = None + + +class Program_weight_tensor_parameter_41: + name = "parameter_41" + shape = [96] + dtype = "float32" + min_val = float("0.730168") + max_val = float("1.20841") + mean = float("0.877696") + std = float("0.0778222") + data = None + + +class Program_weight_tensor_parameter_42: + name = "parameter_42" + shape = [96] + dtype = "float32" + min_val = float("0.00209252") + max_val = float("0.0172969") + mean = float("0.00895461") + std = float("0.00362651") + data = None + + +class Program_weight_tensor_parameter_43: + name = "parameter_43" + shape = [96] + dtype = "float32" + min_val = float("-0.030881") + max_val = float("0.0264932") + mean = float("-0.00664423") + std = float("0.0163084") + data = None + + +class Program_weight_tensor_parameter_44: + name = "parameter_44" + shape = [96, 96, 1, 1] + dtype = "float32" + min_val = float("-0.0481719") + max_val = float("0.0491173") + mean = float("-0.00114232") + std = float("0.0059524") + data = None + + +class Program_weight_tensor_parameter_45: + name = "parameter_45" + shape = [96] + dtype = "float32" + min_val = float("-0.290307") + max_val = float("0.172582") + mean = float("-0.0708609") + std = float("0.105586") + data = None + + +class Program_weight_tensor_parameter_46: + name = "parameter_46" + shape = [96] + dtype = "float32" + min_val = float("0.970765") + max_val = float("1.31932") + mean = float("1.13205") + std = float("0.0751806") + data = None + + +class Program_weight_tensor_parameter_47: + name = "parameter_47" + shape = [96] + dtype = "float32" + min_val = float("0.00509897") + max_val = float("0.0269461") + mean = float("0.0144103") + std = float("0.00491216") + data = None + + +class Program_weight_tensor_parameter_48: + name = "parameter_48" + shape = [96] + dtype = "float32" + min_val = float("-0.0593684") + max_val = float("0.0709636") + mean = float("-0.00964274") + std = float("0.0202695") + data = None + + +class Program_weight_tensor_parameter_49: + name = "parameter_49" + shape = [96, 96, 3, 3] + dtype = "float32" + min_val = float("-0.0744412") + max_val = float("0.0808028") + mean = float("-0.000134804") + std = float("0.00586885") + data = None + + +class Program_weight_tensor_parameter_50: + name = "parameter_50" + shape = [96] + dtype = "float32" + min_val = float("-0.672978") + max_val = float("0.110937") + mean = float("-0.259195") + std = float("0.150512") + data = None + + +class Program_weight_tensor_parameter_51: + name = "parameter_51" + shape = [96] + dtype = "float32" + min_val = float("0.800552") + max_val = float("1.41215") + mean = float("1.04504") + std = float("0.11692") + data = None + + +class Program_weight_tensor_parameter_52: + name = "parameter_52" + shape = [96] + dtype = "float32" + min_val = float("0.0141801") + max_val = float("0.0721458") + mean = float("0.0300632") + std = float("0.0107204") + data = None + + +class Program_weight_tensor_parameter_53: + name = "parameter_53" + shape = [96] + dtype = "float32" + min_val = float("-0.0891538") + max_val = float("0.0552256") + mean = float("-0.0348822") + std = float("0.0246653") + data = None + + +class Program_weight_tensor_parameter_54: + name = "parameter_54" + shape = [96, 96, 3, 3] + dtype = "float32" + min_val = float("-0.0755165") + max_val = float("0.0788485") + mean = float("-0.00042175") + std = float("0.00651052") + data = None + + +class Program_weight_tensor_parameter_55: + name = "parameter_55" + shape = [96] + dtype = "float32" + min_val = float("-0.644167") + max_val = float("0.152209") + mean = float("-0.155579") + std = float("0.115703") + data = None + + +class Program_weight_tensor_parameter_56: + name = "parameter_56" + shape = [96] + dtype = "float32" + min_val = float("0.849539") + max_val = float("1.26621") + mean = float("1.03329") + std = float("0.0722571") + data = None + + +class Program_weight_tensor_parameter_57: + name = "parameter_57" + shape = [96] + dtype = "float32" + min_val = float("0.00630263") + max_val = float("0.0491987") + mean = float("0.01393") + std = float("0.00582943") + data = None + + +class Program_weight_tensor_parameter_58: + name = "parameter_58" + shape = [96] + dtype = "float32" + min_val = float("-0.115691") + max_val = float("0.0294622") + mean = float("-0.0334293") + std = float("0.0282444") + data = None + + +class Program_weight_tensor_parameter_59: + name = "parameter_59" + shape = [96, 288, 1, 1] + dtype = "float32" + min_val = float("-0.0675787") + max_val = float("0.0715923") + mean = float("-0.000602525") + std = float("0.00883627") + data = None + + +class Program_weight_tensor_parameter_60: + name = "parameter_60" + shape = [96] + dtype = "float32" + min_val = float("-0.19838") + max_val = float("0.0828483") + mean = float("-0.0298722") + std = float("0.0460234") + data = None + + +class Program_weight_tensor_parameter_61: + name = "parameter_61" + shape = [96] + dtype = "float32" + min_val = float("0.684756") + max_val = float("1.33599") + mean = float("0.954296") + std = float("0.0884899") + data = None + + +class Program_weight_tensor_parameter_62: + name = "parameter_62" + shape = [96] + dtype = "float32" + min_val = float("0.00456548") + max_val = float("0.0382428") + mean = float("0.00924608") + std = float("0.00437704") + data = None + + +class Program_weight_tensor_parameter_63: + name = "parameter_63" + shape = [96] + dtype = "float32" + min_val = float("-0.0819243") + max_val = float("0.0486769") + mean = float("-0.0149872") + std = float("0.0303643") + data = None + + +class Program_weight_tensor_parameter_64: + name = "parameter_64" + shape = [96, 288, 1, 1] + dtype = "float32" + min_val = float("-0.0851124") + max_val = float("0.0753681") + mean = float("-0.000267914") + std = float("0.00746335") + data = None + + +class Program_weight_tensor_parameter_65: + name = "parameter_65" + shape = [96] + dtype = "float32" + min_val = float("-0.335793") + max_val = float("0.0181512") + mean = float("-0.108645") + std = float("0.0840511") + data = None + + +class Program_weight_tensor_parameter_66: + name = "parameter_66" + shape = [96] + dtype = "float32" + min_val = float("0.72996") + max_val = float("1.20589") + mean = float("1.05546") + std = float("0.0750732") + data = None + + +class Program_weight_tensor_parameter_67: + name = "parameter_67" + shape = [96] + dtype = "float32" + min_val = float("0.00560508") + max_val = float("0.0383423") + mean = float("0.0151426") + std = float("0.00688387") + data = None + + +class Program_weight_tensor_parameter_68: + name = "parameter_68" + shape = [96] + dtype = "float32" + min_val = float("-0.364406") + max_val = float("0.413851") + mean = float("-0.0185548") + std = float("0.134763") + data = None + + +class Program_weight_tensor_parameter_69: + name = "parameter_69" + shape = [96, 96, 3, 3] + dtype = "float32" + min_val = float("-0.0556938") + max_val = float("0.0565333") + mean = float("-3.0497e-05") + std = float("0.00548975") + data = None + + +class Program_weight_tensor_parameter_70: + name = "parameter_70" + shape = [96] + dtype = "float32" + min_val = float("-1.07759") + max_val = float("2.35772") + mean = float("0.310808") + std = float("0.586812") + data = None + + +class Program_weight_tensor_parameter_71: + name = "parameter_71" + shape = [96] + dtype = "float32" + min_val = float("0.468543") + max_val = float("1.40561") + mean = float("0.882514") + std = float("0.167348") + data = None + + +class Program_weight_tensor_parameter_72: + name = "parameter_72" + shape = [96] + dtype = "float32" + min_val = float("0.00504964") + max_val = float("0.106532") + mean = float("0.0264739") + std = float("0.0164013") + data = None + + +class Program_weight_tensor_parameter_73: + name = "parameter_73" + shape = [96] + dtype = "float32" + min_val = float("-0.217561") + max_val = float("0.181267") + mean = float("-0.0162134") + std = float("0.0711045") + data = None + + +class Program_weight_tensor_parameter_74: + name = "parameter_74" + shape = [96, 96, 1, 1] + dtype = "float32" + min_val = float("-0.149095") + max_val = float("0.112249") + mean = float("-0.00101846") + std = float("0.0196923") + data = None + + +class Program_weight_tensor_parameter_75: + name = "parameter_75" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_76: + name = "parameter_76" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_77: + name = "parameter_77" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_78: + name = "parameter_78" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_79: + name = "parameter_79" + shape = [48, 48, 1, 1] + dtype = "float32" + min_val = float("-0.064872") + max_val = float("0.067209") + mean = float("-0.00182682") + std = float("0.0118552") + data = None + + +class Program_weight_tensor_parameter_80: + name = "parameter_80" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_81: + name = "parameter_81" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_82: + name = "parameter_82" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_83: + name = "parameter_83" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_84: + name = "parameter_84" + shape = [48, 48, 3, 3] + dtype = "float32" + min_val = float("-0.126562") + max_val = float("0.145589") + mean = float("-0.00030101") + std = float("0.0128623") + data = None + + +class Program_weight_tensor_parameter_85: + name = "parameter_85" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_86: + name = "parameter_86" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_87: + name = "parameter_87" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_88: + name = "parameter_88" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_89: + name = "parameter_89" + shape = [48, 48, 3, 3] + dtype = "float32" + min_val = float("-0.105155") + max_val = float("0.121463") + mean = float("-0.000928041") + std = float("0.0141327") + data = None + + +class Program_weight_tensor_parameter_90: + name = "parameter_90" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_91: + name = "parameter_91" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_92: + name = "parameter_92" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_93: + name = "parameter_93" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_94: + name = "parameter_94" + shape = [48, 224, 1, 1] + dtype = "float32" + min_val = float("-0.18791") + max_val = float("0.126171") + mean = float("-0.00105876") + std = float("0.0178172") + data = None + + +class Program_weight_tensor_parameter_95: + name = "parameter_95" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_96: + name = "parameter_96" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_97: + name = "parameter_97" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_98: + name = "parameter_98" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_99: + name = "parameter_99" + shape = [48, 224, 1, 1] + dtype = "float32" + min_val = float("-0.102039") + max_val = float("0.121479") + mean = float("2.99226e-05") + std = float("0.0123136") + data = None + + +class Program_weight_tensor_parameter_100: + name = "parameter_100" + shape = [96] + dtype = "float32" + min_val = float("-0.355935") + max_val = float("0.392551") + mean = float("-0.00746683") + std = float("0.135778") + data = None + + +class Program_weight_tensor_parameter_101: + name = "parameter_101" + shape = [96] + dtype = "float32" + min_val = float("0.58214") + max_val = float("1.61741") + mean = float("0.798871") + std = float("0.141455") + data = None + + +class Program_weight_tensor_parameter_102: + name = "parameter_102" + shape = [96] + dtype = "float32" + min_val = float("0.00708678") + max_val = float("0.0623589") + mean = float("0.0174758") + std = float("0.00821209") + data = None + + +class Program_weight_tensor_parameter_103: + name = "parameter_103" + shape = [96] + dtype = "float32" + min_val = float("-0.138992") + max_val = float("0.0786226") + mean = float("-0.0313739") + std = float("0.0364738") + data = None + + +class Program_weight_tensor_parameter_104: + name = "parameter_104" + shape = [96, 192, 1, 1] + dtype = "float32" + min_val = float("-0.0964661") + max_val = float("0.0936981") + mean = float("-0.000909439") + std = float("0.0125162") + data = None + + +class Program_weight_tensor_parameter_105: + name = "parameter_105" + shape = [192] + dtype = "float32" + min_val = float("-0.337392") + max_val = float("0.172941") + mean = float("-0.0803987") + std = float("0.0895922") + data = None + + +class Program_weight_tensor_parameter_106: + name = "parameter_106" + shape = [192] + dtype = "float32" + min_val = float("0.695083") + max_val = float("1.47609") + mean = float("0.990456") + std = float("0.0995214") + data = None + + +class Program_weight_tensor_parameter_107: + name = "parameter_107" + shape = [192] + dtype = "float32" + min_val = float("0.00754238") + max_val = float("0.0988877") + mean = float("0.0184313") + std = float("0.00883254") + data = None + + +class Program_weight_tensor_parameter_108: + name = "parameter_108" + shape = [192] + dtype = "float32" + min_val = float("-0.204684") + max_val = float("0.16103") + mean = float("-0.0503988") + std = float("0.049554") + data = None + + +class Program_weight_tensor_parameter_109: + name = "parameter_109" + shape = [192, 192, 1, 1] + dtype = "float32" + min_val = float("-0.099788") + max_val = float("0.114986") + mean = float("-0.00122472") + std = float("0.0120725") + data = None + + +class Program_weight_tensor_parameter_110: + name = "parameter_110" + shape = [96] + dtype = "float32" + min_val = float("-0.307239") + max_val = float("0.101878") + mean = float("-0.0816011") + std = float("0.0993456") + data = None + + +class Program_weight_tensor_parameter_111: + name = "parameter_111" + shape = [96] + dtype = "float32" + min_val = float("0.551598") + max_val = float("0.936081") + mean = float("0.809201") + std = float("0.0653613") + data = None + + +class Program_weight_tensor_parameter_112: + name = "parameter_112" + shape = [96] + dtype = "float32" + min_val = float("0.0035") + max_val = float("0.0258289") + mean = float("0.0122223") + std = float("0.00384374") + data = None + + +class Program_weight_tensor_parameter_113: + name = "parameter_113" + shape = [96] + dtype = "float32" + min_val = float("-0.0463445") + max_val = float("0.0326283") + mean = float("-0.0141616") + std = float("0.0176504") + data = None + + +class Program_weight_tensor_parameter_114: + name = "parameter_114" + shape = [96, 96, 1, 1] + dtype = "float32" + min_val = float("-0.0439526") + max_val = float("0.0551086") + mean = float("-0.00157619") + std = float("0.00827761") + data = None + + +class Program_weight_tensor_parameter_115: + name = "parameter_115" + shape = [96] + dtype = "float32" + min_val = float("-0.307239") + max_val = float("0.101878") + mean = float("-0.0816011") + std = float("0.0993456") + data = None + + +class Program_weight_tensor_parameter_116: + name = "parameter_116" + shape = [96] + dtype = "float32" + min_val = float("0.842432") + max_val = float("1.28751") + mean = float("1.03469") + std = float("0.0943599") + data = None + + +class Program_weight_tensor_parameter_117: + name = "parameter_117" + shape = [96] + dtype = "float32" + min_val = float("0.013302") + max_val = float("0.0739954") + mean = float("0.0273068") + std = float("0.00856333") + data = None + + +class Program_weight_tensor_parameter_118: + name = "parameter_118" + shape = [96] + dtype = "float32" + min_val = float("-0.0788659") + max_val = float("0.0467382") + mean = float("-0.0227617") + std = float("0.0268656") + data = None + + +class Program_weight_tensor_parameter_119: + name = "parameter_119" + shape = [96, 96, 3, 3] + dtype = "float32" + min_val = float("-0.0842602") + max_val = float("0.155772") + mean = float("-0.00023666") + std = float("0.00725825") + data = None + + +class Program_weight_tensor_parameter_120: + name = "parameter_120" + shape = [96] + dtype = "float32" + min_val = float("-0.731847") + max_val = float("0.315882") + mean = float("-0.275694") + std = float("0.175237") + data = None + + +class Program_weight_tensor_parameter_121: + name = "parameter_121" + shape = [96] + dtype = "float32" + min_val = float("0.765079") + max_val = float("1.30982") + mean = float("1.04342") + std = float("0.115486") + data = None + + +class Program_weight_tensor_parameter_122: + name = "parameter_122" + shape = [96] + dtype = "float32" + min_val = float("0.0173859") + max_val = float("0.0675619") + mean = float("0.0355326") + std = float("0.00949136") + data = None + + +class Program_weight_tensor_parameter_123: + name = "parameter_123" + shape = [96] + dtype = "float32" + min_val = float("-0.119758") + max_val = float("0.0576312") + mean = float("-0.0514595") + std = float("0.0439535") + data = None + + +class Program_weight_tensor_parameter_124: + name = "parameter_124" + shape = [96, 96, 3, 3] + dtype = "float32" + min_val = float("-0.141323") + max_val = float("0.125212") + mean = float("-0.000531571") + std = float("0.00857427") + data = None + + +class Program_weight_tensor_parameter_125: + name = "parameter_125" + shape = [96] + dtype = "float32" + min_val = float("-0.649336") + max_val = float("0.386205") + mean = float("-0.253708") + std = float("0.210117") + data = None + + +class Program_weight_tensor_parameter_126: + name = "parameter_126" + shape = [96] + dtype = "float32" + min_val = float("0.743859") + max_val = float("1.37989") + mean = float("1.02545") + std = float("0.12205") + data = None + + +class Program_weight_tensor_parameter_127: + name = "parameter_127" + shape = [96] + dtype = "float32" + min_val = float("0.00720624") + max_val = float("0.0314703") + mean = float("0.0139725") + std = float("0.00426494") + data = None + + +class Program_weight_tensor_parameter_128: + name = "parameter_128" + shape = [96] + dtype = "float32" + min_val = float("-0.313971") + max_val = float("0.275864") + mean = float("0.0100895") + std = float("0.0634222") + data = None + + +class Program_weight_tensor_parameter_129: + name = "parameter_129" + shape = [96, 448, 1, 1] + dtype = "float32" + min_val = float("-0.167863") + max_val = float("0.111638") + mean = float("-0.000518773") + std = float("0.0111108") + data = None + + +class Program_weight_tensor_parameter_130: + name = "parameter_130" + shape = [96] + dtype = "float32" + min_val = float("-0.23906") + max_val = float("0.172271") + mean = float("-0.0410503") + std = float("0.0886321") + data = None + + +class Program_weight_tensor_parameter_131: + name = "parameter_131" + shape = [96] + dtype = "float32" + min_val = float("0.917932") + max_val = float("1.41199") + mean = float("1.07275") + std = float("0.0920705") + data = None + + +class Program_weight_tensor_parameter_132: + name = "parameter_132" + shape = [96] + dtype = "float32" + min_val = float("0.00501564") + max_val = float("0.0446779") + mean = float("0.0113275") + std = float("0.00531961") + data = None + + +class Program_weight_tensor_parameter_133: + name = "parameter_133" + shape = [96] + dtype = "float32" + min_val = float("-0.066575") + max_val = float("0.0649515") + mean = float("0.00698475") + std = float("0.0279665") + data = None + + +class Program_weight_tensor_parameter_134: + name = "parameter_134" + shape = [96, 448, 1, 1] + dtype = "float32" + min_val = float("-0.112935") + max_val = float("0.117872") + mean = float("-0.000426335") + std = float("0.00968317") + data = None + + +class Program_weight_tensor_parameter_135: + name = "parameter_135" + shape = [192] + dtype = "float32" + min_val = float("-0.540293") + max_val = float("-0.102122") + mean = float("-0.294636") + std = float("0.0708506") + data = None + + +class Program_weight_tensor_parameter_136: + name = "parameter_136" + shape = [192] + dtype = "float32" + min_val = float("0.648817") + max_val = float("1.08017") + mean = float("0.851902") + std = float("0.0725407") + data = None + + +class Program_weight_tensor_parameter_137: + name = "parameter_137" + shape = [192] + dtype = "float32" + min_val = float("0.00844823") + max_val = float("0.0608477") + mean = float("0.0196898") + std = float("0.00777921") + data = None + + +class Program_weight_tensor_parameter_138: + name = "parameter_138" + shape = [192] + dtype = "float32" + min_val = float("-0.103642") + max_val = float("0.0398287") + mean = float("-0.0337268") + std = float("0.0283827") + data = None + + +class Program_weight_tensor_parameter_139: + name = "parameter_139" + shape = [192, 384, 1, 1] + dtype = "float32" + min_val = float("-0.0581647") + max_val = float("0.0551684") + mean = float("-0.000715396") + std = float("0.00866401") + data = None + + +class Program_weight_tensor_parameter_140: + name = "parameter_140" + shape = [384] + dtype = "float32" + min_val = float("-0.522165") + max_val = float("0.213846") + mean = float("-0.168509") + std = float("0.0774913") + data = None + + +class Program_weight_tensor_parameter_141: + name = "parameter_141" + shape = [384] + dtype = "float32" + min_val = float("0.849121") + max_val = float("1.39276") + mean = float("1.06266") + std = float("0.0773261") + data = None + + +class Program_weight_tensor_parameter_142: + name = "parameter_142" + shape = [384] + dtype = "float32" + min_val = float("0.00619419") + max_val = float("0.0326996") + mean = float("0.0131745") + std = float("0.00396857") + data = None + + +class Program_weight_tensor_parameter_143: + name = "parameter_143" + shape = [384] + dtype = "float32" + min_val = float("-0.120299") + max_val = float("0.0721486") + mean = float("-0.0363523") + std = float("0.0318809") + data = None + + +class Program_weight_tensor_parameter_144: + name = "parameter_144" + shape = [384, 384, 1, 1] + dtype = "float32" + min_val = float("-0.0950377") + max_val = float("0.108554") + mean = float("-0.000578436") + std = float("0.00775758") + data = None + + +class Program_weight_tensor_parameter_145: + name = "parameter_145" + shape = [192] + dtype = "float32" + min_val = float("-0.384308") + max_val = float("0.227819") + mean = float("-0.118179") + std = float("0.102012") + data = None + + +class Program_weight_tensor_parameter_146: + name = "parameter_146" + shape = [192] + dtype = "float32" + min_val = float("0.868523") + max_val = float("1.51316") + mean = float("1.12307") + std = float("0.119072") + data = None + + +class Program_weight_tensor_parameter_147: + name = "parameter_147" + shape = [192] + dtype = "float32" + min_val = float("0.0671554") + max_val = float("0.590166") + mean = float("0.189132") + std = float("0.0743371") + data = None + + +class Program_weight_tensor_parameter_148: + name = "parameter_148" + shape = [192] + dtype = "float32" + min_val = float("-1.66989") + max_val = float("0.864913") + mean = float("-0.128574") + std = float("0.49302") + data = None + + +class Program_weight_tensor_parameter_149: + name = "parameter_149" + shape = [192, 768, 1, 1] + dtype = "float32" + min_val = float("-0.113072") + max_val = float("0.078636") + mean = float("-9.22477e-05") + std = float("0.00673866") + data = None + + +class Program_weight_tensor_parameter_150: + name = "parameter_150" + shape = [192] + dtype = "float32" + min_val = float("-0.24328") + max_val = float("0.168953") + mean = float("-0.0173023") + std = float("0.0539756") + data = None + + +class Program_weight_tensor_parameter_151: + name = "parameter_151" + shape = [192] + dtype = "float32" + min_val = float("0.617702") + max_val = float("1.01648") + mean = float("0.837238") + std = float("0.0631802") + data = None + + +class Program_weight_tensor_parameter_152: + name = "parameter_152" + shape = [192] + dtype = "float32" + min_val = float("0.00614033") + max_val = float("0.0266385") + mean = float("0.0103423") + std = float("0.0026319") + data = None + + +class Program_weight_tensor_parameter_153: + name = "parameter_153" + shape = [192] + dtype = "float32" + min_val = float("-0.127304") + max_val = float("0.0867262") + mean = float("-0.0538262") + std = float("0.0399598") + data = None + + +class Program_weight_tensor_parameter_154: + name = "parameter_154" + shape = [192, 192, 1, 1] + dtype = "float32" + min_val = float("-0.0383872") + max_val = float("0.0621299") + mean = float("-0.00140552") + std = float("0.00644221") + data = None + + +class Program_weight_tensor_parameter_155: + name = "parameter_155" + shape = [192] + dtype = "float32" + min_val = float("-0.24328") + max_val = float("0.168953") + mean = float("-0.0173023") + std = float("0.0539756") + data = None + + +class Program_weight_tensor_parameter_156: + name = "parameter_156" + shape = [192] + dtype = "float32" + min_val = float("0.874918") + max_val = float("1.46078") + mean = float("1.10611") + std = float("0.129545") + data = None + + +class Program_weight_tensor_parameter_157: + name = "parameter_157" + shape = [192] + dtype = "float32" + min_val = float("0.0245048") + max_val = float("0.0980499") + mean = float("0.045066") + std = float("0.0114572") + data = None + + +class Program_weight_tensor_parameter_158: + name = "parameter_158" + shape = [192] + dtype = "float32" + min_val = float("-0.272811") + max_val = float("0.0283262") + mean = float("-0.122117") + std = float("0.0543901") + data = None + + +class Program_weight_tensor_parameter_159: + name = "parameter_159" + shape = [192, 192, 3, 3] + dtype = "float32" + min_val = float("-0.037555") + max_val = float("0.0532694") + mean = float("-0.000403704") + std = float("0.00395116") + data = None + + +class Program_weight_tensor_parameter_160: + name = "parameter_160" + shape = [192] + dtype = "float32" + min_val = float("-0.311171") + max_val = float("0.0670066") + mean = float("-0.115181") + std = float("0.0802375") + data = None + + +class Program_weight_tensor_parameter_161: + name = "parameter_161" + shape = [192] + dtype = "float32" + min_val = float("0.910219") + max_val = float("1.44564") + mean = float("1.10838") + std = float("0.101829") + data = None + + +class Program_weight_tensor_parameter_162: + name = "parameter_162" + shape = [192] + dtype = "float32" + min_val = float("0.0298363") + max_val = float("0.10738") + mean = float("0.0514797") + std = float("0.0140253") + data = None + + +class Program_weight_tensor_parameter_163: + name = "parameter_163" + shape = [192] + dtype = "float32" + min_val = float("-0.487163") + max_val = float("0.229195") + mean = float("-0.132677") + std = float("0.0943938") + data = None + + +class Program_weight_tensor_parameter_164: + name = "parameter_164" + shape = [192, 192, 3, 3] + dtype = "float32" + min_val = float("-0.0485576") + max_val = float("0.040365") + mean = float("-0.000462303") + std = float("0.00440033") + data = None + + +class Program_weight_tensor_parameter_165: + name = "parameter_165" + shape = [192] + dtype = "float32" + min_val = float("-0.444127") + max_val = float("0.411817") + mean = float("-0.1378") + std = float("0.130204") + data = None + + +class Program_weight_tensor_parameter_166: + name = "parameter_166" + shape = [192] + dtype = "float32" + min_val = float("0.955474") + max_val = float("1.3718") + mean = float("1.11018") + std = float("0.072198") + data = None + + +class Program_weight_tensor_parameter_167: + name = "parameter_167" + shape = [192] + dtype = "float32" + min_val = float("0.0481328") + max_val = float("0.201416") + mean = float("0.0730914") + std = float("0.0211211") + data = None + + +class Program_weight_tensor_parameter_168: + name = "parameter_168" + shape = [192] + dtype = "float32" + min_val = float("-0.272425") + max_val = float("0.493843") + mean = float("-0.124677") + std = float("0.0789093") + data = None + + +class Program_weight_tensor_parameter_169: + name = "parameter_169" + shape = [192, 512, 1, 1] + dtype = "float32" + min_val = float("-0.0520012") + max_val = float("0.0916206") + mean = float("-0.00073398") + std = float("0.0076239") + data = None + + +class Program_weight_tensor_parameter_170: + name = "parameter_170" + shape = [192] + dtype = "float32" + min_val = float("-0.16409") + max_val = float("0.00104506") + mean = float("-0.0652767") + std = float("0.0261643") + data = None + + +class Program_weight_tensor_parameter_171: + name = "parameter_171" + shape = [192] + dtype = "float32" + min_val = float("0.819388") + max_val = float("1.06661") + mean = float("0.968901") + std = float("0.0460972") + data = None + + +class Program_weight_tensor_parameter_172: + name = "parameter_172" + shape = [192] + dtype = "float32" + min_val = float("0.0308169") + max_val = float("0.0790712") + mean = float("0.0462185") + std = float("0.00870431") + data = None + + +class Program_weight_tensor_parameter_173: + name = "parameter_173" + shape = [192] + dtype = "float32" + min_val = float("-0.182206") + max_val = float("0.0940335") + mean = float("-0.089627") + std = float("0.0463932") + data = None + + +class Program_weight_tensor_parameter_174: + name = "parameter_174" + shape = [192, 512, 1, 1] + dtype = "float32" + min_val = float("-0.0262639") + max_val = float("0.0504804") + mean = float("-0.000603328") + std = float("0.00628844") + data = None + + +class Program_weight_tensor_parameter_175: + name = "parameter_175" + shape = [512] + dtype = "float32" + min_val = float("-4.82816") + max_val = float("-0.111144") + mean = float("-2.29505") + std = float("0.77518") + data = None + + +class Program_weight_tensor_parameter_176: + name = "parameter_176" + shape = [512] + dtype = "float32" + min_val = float("2.1017") + max_val = float("5.21657") + mean = float("3.70059") + std = float("0.482718") + data = None + + +class Program_weight_tensor_parameter_177: + name = "parameter_177" + shape = [512] + dtype = "float32" + min_val = float("0.00200829") + max_val = float("0.0114641") + mean = float("0.00406053") + std = float("0.00119042") + data = None + + +class Program_weight_tensor_parameter_178: + name = "parameter_178" + shape = [512] + dtype = "float32" + min_val = float("-0.123017") + max_val = float("0.0755615") + mean = float("-0.042568") + std = float("0.0275421") + data = None + + +class Program_weight_tensor_parameter_179: + name = "parameter_179" + shape = [512, 384, 1, 1] + dtype = "float32" + min_val = float("-0.0810492") + max_val = float("0.133509") + mean = float("-0.000971967") + std = float("0.00774565") + data = None + + +class Program_weight_tensor_parameter_180: + name = "parameter_180" + shape = [384] + dtype = "float32" + min_val = float("-0.0162429") + max_val = float("-0.000278986") + mean = float("-0.0054732") + std = float("0.00365563") + data = None + + +class Program_weight_tensor_parameter_181: + name = "parameter_181" + shape = [384, 384, 1, 1] + dtype = "float32" + min_val = float("-0.18691") + max_val = float("0.145937") + mean = float("-0.00212935") + std = float("0.00665719") + data = None + + +class Program_weight_tensor_parameter_182: + name = "parameter_182" + shape = [192] + dtype = "float32" + min_val = float("-2.38779") + max_val = float("3.17061") + mean = float("-0.203411") + std = float("0.563155") + data = None + + +class Program_weight_tensor_parameter_183: + name = "parameter_183" + shape = [192] + dtype = "float32" + min_val = float("0.123346") + max_val = float("2.40428") + mean = float("0.524679") + std = float("0.3349") + data = None + + +class Program_weight_tensor_parameter_184: + name = "parameter_184" + shape = [192] + dtype = "float32" + min_val = float("0.000128668") + max_val = float("0.0027455") + mean = float("0.00061953") + std = float("0.000399688") + data = None + + +class Program_weight_tensor_parameter_185: + name = "parameter_185" + shape = [192] + dtype = "float32" + min_val = float("-0.0609001") + max_val = float("0.0904291") + mean = float("0.010232") + std = float("0.0217379") + data = None + + +class Program_weight_tensor_parameter_186: + name = "parameter_186" + shape = [192, 192, 1, 1] + dtype = "float32" + min_val = float("-0.0576781") + max_val = float("0.0497244") + mean = float("-0.000313562") + std = float("0.00494929") + data = None + + +class Program_weight_tensor_parameter_187: + name = "parameter_187" + shape = [192] + dtype = "float32" + min_val = float("-2.38779") + max_val = float("3.17061") + mean = float("-0.203411") + std = float("0.563155") + data = None + + +class Program_weight_tensor_parameter_188: + name = "parameter_188" + shape = [192] + dtype = "float32" + min_val = float("0.678982") + max_val = float("3.07273") + mean = float("1.54519") + std = float("0.450797") + data = None + + +class Program_weight_tensor_parameter_189: + name = "parameter_189" + shape = [192] + dtype = "float32" + min_val = float("0.00238059") + max_val = float("0.0225906") + mean = float("0.00724614") + std = float("0.00305679") + data = None + + +class Program_weight_tensor_parameter_190: + name = "parameter_190" + shape = [192] + dtype = "float32" + min_val = float("-0.212221") + max_val = float("0.164978") + mean = float("0.0097172") + std = float("0.0447557") + data = None + + +class Program_weight_tensor_parameter_191: + name = "parameter_191" + shape = [192, 192, 3, 3] + dtype = "float32" + min_val = float("-0.0724552") + max_val = float("0.0669129") + mean = float("-8.66037e-05") + std = float("0.00438841") + data = None + + +class Program_weight_tensor_parameter_192: + name = "parameter_192" + shape = [192] + dtype = "float32" + min_val = float("-3.43225") + max_val = float("1.16814") + mean = float("-1.42818") + std = float("0.634737") + data = None + + +class Program_weight_tensor_parameter_193: + name = "parameter_193" + shape = [192] + dtype = "float32" + min_val = float("0.390538") + max_val = float("1.72646") + mean = float("1.08981") + std = float("0.189952") + data = None + + +class Program_weight_tensor_parameter_194: + name = "parameter_194" + shape = [192] + dtype = "float32" + min_val = float("0.0318757") + max_val = float("0.213938") + mean = float("0.0702584") + std = float("0.0222017") + data = None + + +class Program_weight_tensor_parameter_195: + name = "parameter_195" + shape = [192] + dtype = "float32" + min_val = float("-1.23721") + max_val = float("0.358903") + mean = float("-0.222679") + std = float("0.169848") + data = None + + +class Program_weight_tensor_parameter_196: + name = "parameter_196" + shape = [192, 192, 3, 3] + dtype = "float32" + min_val = float("-0.0627818") + max_val = float("0.0522698") + mean = float("-0.000387135") + std = float("0.00513507") + data = None + + +class Program_weight_tensor_parameter_197: + name = "parameter_197" + shape = [192] + dtype = "float32" + min_val = float("-3.87733") + max_val = float("4.24375") + mean = float("-0.629121") + std = float("0.988583") + data = None + + +class Program_weight_tensor_parameter_198: + name = "parameter_198" + shape = [192] + dtype = "float32" + min_val = float("0.579177") + max_val = float("4.17445") + mean = float("1.54468") + std = float("0.398498") + data = None + + +class Program_weight_tensor_parameter_199: + name = "parameter_199" + shape = [192] + dtype = "float32" + min_val = float("0.00376591") + max_val = float("0.0199072") + mean = float("0.00750177") + std = float("0.0024196") + data = None + + +class Program_weight_tensor_parameter_200: + name = "parameter_200" + shape = [192] + dtype = "float32" + min_val = float("-0.181981") + max_val = float("0.142706") + mean = float("0.0472299") + std = float("0.0360465") + data = None + + +class Program_weight_tensor_parameter_201: + name = "parameter_201" + shape = [192, 384, 1, 1] + dtype = "float32" + min_val = float("-0.0974781") + max_val = float("0.0606577") + mean = float("-0.00130154") + std = float("0.00921657") + data = None + + +class Program_weight_tensor_parameter_202: + name = "parameter_202" + shape = [192] + dtype = "float32" + min_val = float("-2.93753") + max_val = float("1.02574") + mean = float("-0.426705") + std = float("0.681645") + data = None + + +class Program_weight_tensor_parameter_203: + name = "parameter_203" + shape = [192] + dtype = "float32" + min_val = float("0.700073") + max_val = float("3.61084") + mean = float("1.48155") + std = float("0.505235") + data = None + + +class Program_weight_tensor_parameter_204: + name = "parameter_204" + shape = [192] + dtype = "float32" + min_val = float("0.00182554") + max_val = float("0.00715893") + mean = float("0.00350375") + std = float("0.000839174") + data = None + + +class Program_weight_tensor_parameter_205: + name = "parameter_205" + shape = [192] + dtype = "float32" + min_val = float("-0.0595251") + max_val = float("0.0837498") + mean = float("0.0142918") + std = float("0.0271037") + data = None + + +class Program_weight_tensor_parameter_206: + name = "parameter_206" + shape = [192, 384, 1, 1] + dtype = "float32" + min_val = float("-0.0725004") + max_val = float("0.0651294") + mean = float("-0.000532804") + std = float("0.00742023") + data = None + + +class Program_weight_tensor_parameter_207: + name = "parameter_207" + shape = [384] + dtype = "float32" + min_val = float("-2.84234") + max_val = float("1.12211") + mean = float("-0.753291") + std = float("0.497125") + data = None + + +class Program_weight_tensor_parameter_208: + name = "parameter_208" + shape = [384] + dtype = "float32" + min_val = float("0.420357") + max_val = float("1.80213") + mean = float("0.867653") + std = float("0.218147") + data = None + + +class Program_weight_tensor_parameter_209: + name = "parameter_209" + shape = [384] + dtype = "float32" + min_val = float("0.00857563") + max_val = float("0.0680256") + mean = float("0.0166475") + std = float("0.00541149") + data = None + + +class Program_weight_tensor_parameter_210: + name = "parameter_210" + shape = [384] + dtype = "float32" + min_val = float("-0.467733") + max_val = float("0.319199") + mean = float("0.00858269") + std = float("0.0842052") + data = None + + +class Program_weight_tensor_parameter_211: + name = "parameter_211" + shape = [384, 256, 3, 3] + dtype = "float32" + min_val = float("-0.0544237") + max_val = float("0.0537574") + mean = float("-0.000167554") + std = float("0.00423747") + data = None + + +class Program_weight_tensor_parameter_212: + name = "parameter_212" + shape = [256] + dtype = "float32" + min_val = float("-2.82015") + max_val = float("1.46513") + mean = float("-1.07771") + std = float("0.633538") + data = None + + +class Program_weight_tensor_parameter_213: + name = "parameter_213" + shape = [256] + dtype = "float32" + min_val = float("0.419279") + max_val = float("1.76958") + mean = float("0.978268") + std = float("0.170537") + data = None + + +class Program_weight_tensor_parameter_214: + name = "parameter_214" + shape = [256] + dtype = "float32" + min_val = float("0.00255461") + max_val = float("0.0103385") + mean = float("0.00516842") + std = float("0.00131104") + data = None + + +class Program_weight_tensor_parameter_215: + name = "parameter_215" + shape = [256] + dtype = "float32" + min_val = float("-0.222755") + max_val = float("0.223332") + mean = float("-0.057983") + std = float("0.0714632") + data = None + + +class Program_weight_tensor_parameter_216: + name = "parameter_216" + shape = [256, 192, 1, 1] + dtype = "float32" + min_val = float("-0.122474") + max_val = float("0.173112") + mean = float("-0.00112125") + std = float("0.0137328") + data = None + + +class Program_weight_tensor_parameter_217: + name = "parameter_217" + shape = [192] + dtype = "float32" + min_val = float("-0.0217534") + max_val = float("0.00201664") + mean = float("-0.00613465") + std = float("0.00506215") + data = None + + +class Program_weight_tensor_parameter_218: + name = "parameter_218" + shape = [192, 192, 1, 1] + dtype = "float32" + min_val = float("-0.23422") + max_val = float("0.177901") + mean = float("-0.00406409") + std = float("0.0102684") + data = None + + +class Program_weight_tensor_parameter_219: + name = "parameter_219" + shape = [96] + dtype = "float32" + min_val = float("-2.2781") + max_val = float("0.7544") + mean = float("-0.115735") + std = float("0.508041") + data = None + + +class Program_weight_tensor_parameter_220: + name = "parameter_220" + shape = [96] + dtype = "float32" + min_val = float("-0.0586392") + max_val = float("2.30658") + mean = float("0.261357") + std = float("0.366816") + data = None + + +class Program_weight_tensor_parameter_221: + name = "parameter_221" + shape = [96] + dtype = "float32" + min_val = float("5.76256e-12") + max_val = float("0.00191318") + mean = float("0.000441882") + std = float("0.000366175") + data = None + + +class Program_weight_tensor_parameter_222: + name = "parameter_222" + shape = [96] + dtype = "float32" + min_val = float("-0.048434") + max_val = float("0.0685867") + mean = float("0.00619257") + std = float("0.0183565") + data = None + + +class Program_weight_tensor_parameter_223: + name = "parameter_223" + shape = [96, 96, 1, 1] + dtype = "float32" + min_val = float("-0.0376665") + max_val = float("0.0662012") + mean = float("-0.000297498") + std = float("0.00560219") + data = None + + +class Program_weight_tensor_parameter_224: + name = "parameter_224" + shape = [96] + dtype = "float32" + min_val = float("-2.2781") + max_val = float("0.7544") + mean = float("-0.115735") + std = float("0.508041") + data = None + + +class Program_weight_tensor_parameter_225: + name = "parameter_225" + shape = [96] + dtype = "float32" + min_val = float("0.35139") + max_val = float("3.24211") + mean = float("1.2913") + std = float("0.633692") + data = None + + +class Program_weight_tensor_parameter_226: + name = "parameter_226" + shape = [96] + dtype = "float32" + min_val = float("0.00288832") + max_val = float("0.0301202") + mean = float("0.0128354") + std = float("0.00565579") + data = None + + +class Program_weight_tensor_parameter_227: + name = "parameter_227" + shape = [96] + dtype = "float32" + min_val = float("-0.186493") + max_val = float("0.170245") + mean = float("0.0280013") + std = float("0.0660658") + data = None + + +class Program_weight_tensor_parameter_228: + name = "parameter_228" + shape = [96, 96, 3, 3] + dtype = "float32" + min_val = float("-0.0540548") + max_val = float("0.0585126") + mean = float("-0.000334364") + std = float("0.00738032") + data = None + + +class Program_weight_tensor_parameter_229: + name = "parameter_229" + shape = [96] + dtype = "float32" + min_val = float("-2.79991") + max_val = float("1.50593") + mean = float("-1.09119") + std = float("0.696756") + data = None + + +class Program_weight_tensor_parameter_230: + name = "parameter_230" + shape = [96] + dtype = "float32" + min_val = float("0.324891") + max_val = float("1.80804") + mean = float("1.07292") + std = float("0.213034") + data = None + + +class Program_weight_tensor_parameter_231: + name = "parameter_231" + shape = [96] + dtype = "float32" + min_val = float("0.0318799") + max_val = float("0.113866") + mean = float("0.0602373") + std = float("0.0156272") + data = None + + +class Program_weight_tensor_parameter_232: + name = "parameter_232" + shape = [96] + dtype = "float32" + min_val = float("-1.57323") + max_val = float("0.320517") + mean = float("-0.143123") + std = float("0.265242") + data = None + + +class Program_weight_tensor_parameter_233: + name = "parameter_233" + shape = [96, 96, 3, 3] + dtype = "float32" + min_val = float("-0.0521981") + max_val = float("0.0688539") + mean = float("-0.000580267") + std = float("0.00795035") + data = None + + +class Program_weight_tensor_parameter_234: + name = "parameter_234" + shape = [96] + dtype = "float32" + min_val = float("-2.54041") + max_val = float("0.664474") + mean = float("-0.0487346") + std = float("0.474109") + data = None + + +class Program_weight_tensor_parameter_235: + name = "parameter_235" + shape = [96] + dtype = "float32" + min_val = float("-0.0773368") + max_val = float("3.15117") + mean = float("0.280349") + std = float("0.408758") + data = None + + +class Program_weight_tensor_parameter_236: + name = "parameter_236" + shape = [96] + dtype = "float32" + min_val = float("1.3812e-10") + max_val = float("0.0159383") + mean = float("0.00155641") + std = float("0.00234573") + data = None + + +class Program_weight_tensor_parameter_237: + name = "parameter_237" + shape = [96] + dtype = "float32" + min_val = float("-0.0454894") + max_val = float("0.108167") + mean = float("0.0171959") + std = float("0.0277947") + data = None + + +class Program_weight_tensor_parameter_238: + name = "parameter_238" + shape = [96, 96, 1, 1] + dtype = "float32" + min_val = float("-0.11378") + max_val = float("0.0697877") + mean = float("-0.00124357") + std = float("0.00831874") + data = None + + +class Program_weight_tensor_parameter_239: + name = "parameter_239" + shape = [96] + dtype = "float32" + min_val = float("-2.54041") + max_val = float("0.664475") + mean = float("-0.0487346") + std = float("0.474109") + data = None + + +class Program_weight_tensor_parameter_240: + name = "parameter_240" + shape = [96] + dtype = "float32" + min_val = float("0.343863") + max_val = float("2.99332") + mean = float("0.929472") + std = float("0.412076") + data = None + + +class Program_weight_tensor_parameter_241: + name = "parameter_241" + shape = [96] + dtype = "float32" + min_val = float("0.00889548") + max_val = float("0.0360352") + mean = float("0.0196807") + std = float("0.00631594") + data = None + + +class Program_weight_tensor_parameter_242: + name = "parameter_242" + shape = [96] + dtype = "float32" + min_val = float("-0.183188") + max_val = float("0.203949") + mean = float("0.0349526") + std = float("0.0692374") + data = None + + +class Program_weight_tensor_parameter_243: + name = "parameter_243" + shape = [96, 96, 3, 3] + dtype = "float32" + min_val = float("-0.0529212") + max_val = float("0.050965") + mean = float("-0.000562978") + std = float("0.00752018") + data = None + + +class Program_weight_tensor_parameter_244: + name = "parameter_244" + shape = [96] + dtype = "float32" + min_val = float("-2.01882") + max_val = float("1.6565") + mean = float("-0.91983") + std = float("0.650475") + data = None + + +class Program_weight_tensor_parameter_245: + name = "parameter_245" + shape = [96] + dtype = "float32" + min_val = float("0.443451") + max_val = float("1.97486") + mean = float("1.06386") + std = float("0.2277") + data = None + + +class Program_weight_tensor_parameter_246: + name = "parameter_246" + shape = [96] + dtype = "float32" + min_val = float("0.00968838") + max_val = float("0.115069") + mean = float("0.0238439") + std = float("0.011445") + data = None + + +class Program_weight_tensor_parameter_247: + name = "parameter_247" + shape = [96] + dtype = "float32" + min_val = float("-2.04544") + max_val = float("0.240057") + mean = float("-0.0326069") + std = float("0.235198") + data = None + + +class Program_weight_tensor_parameter_248: + name = "parameter_248" + shape = [96, 96, 3, 3] + dtype = "float32" + min_val = float("-0.102806") + max_val = float("0.1255") + mean = float("-0.000419155") + std = float("0.00850691") + data = None + + +class Program_weight_tensor_parameter_249: + name = "parameter_249" + shape = [96] + dtype = "float32" + min_val = float("-1.61915") + max_val = float("1.88666") + mean = float("0.00600959") + std = float("0.838747") + data = None + + +class Program_weight_tensor_parameter_250: + name = "parameter_250" + shape = [96] + dtype = "float32" + min_val = float("0.348796") + max_val = float("1.32224") + mean = float("0.700437") + std = float("0.236363") + data = None + + +class Program_weight_tensor_parameter_251: + name = "parameter_251" + shape = [96] + dtype = "float32" + min_val = float("0.00889485") + max_val = float("0.0570436") + mean = float("0.0271583") + std = float("0.0110354") + data = None + + +class Program_weight_tensor_parameter_252: + name = "parameter_252" + shape = [96] + dtype = "float32" + min_val = float("-0.327651") + max_val = float("0.488071") + mean = float("-0.0740222") + std = float("0.115843") + data = None + + +class Program_weight_tensor_parameter_253: + name = "parameter_253" + shape = [96, 192, 1, 1] + dtype = "float32" + min_val = float("-0.124833") + max_val = float("0.114017") + mean = float("-0.001301") + std = float("0.0137993") + data = None + + +class Program_weight_tensor_parameter_254: + name = "parameter_254" + shape = [96] + dtype = "float32" + min_val = float("-2.46673") + max_val = float("1.7157") + mean = float("0.340991") + std = float("0.679024") + data = None + + +class Program_weight_tensor_parameter_255: + name = "parameter_255" + shape = [96] + dtype = "float32" + min_val = float("0.539684") + max_val = float("4.88444") + mean = float("1.48216") + std = float("0.959789") + data = None + + +class Program_weight_tensor_parameter_256: + name = "parameter_256" + shape = [96] + dtype = "float32" + min_val = float("0.00993858") + max_val = float("0.070056") + mean = float("0.0215824") + std = float("0.0104011") + data = None + + +class Program_weight_tensor_parameter_257: + name = "parameter_257" + shape = [96] + dtype = "float32" + min_val = float("-0.283006") + max_val = float("0.290595") + mean = float("-0.00648709") + std = float("0.112547") + data = None + + +class Program_weight_tensor_parameter_258: + name = "parameter_258" + shape = [96, 192, 1, 1] + dtype = "float32" + min_val = float("-0.0886065") + max_val = float("0.166141") + mean = float("-0.000584531") + std = float("0.0139549") + data = None + + +class Program_weight_tensor_parameter_259: + name = "parameter_259" + shape = [192] + dtype = "float32" + min_val = float("-4.44154") + max_val = float("2.00924") + mean = float("-0.0984691") + std = float("0.883168") + data = None + + +class Program_weight_tensor_parameter_260: + name = "parameter_260" + shape = [192] + dtype = "float32" + min_val = float("0.574537") + max_val = float("4.5171") + mean = float("1.08183") + std = float("0.426228") + data = None + + +class Program_weight_tensor_parameter_261: + name = "parameter_261" + shape = [192] + dtype = "float32" + min_val = float("0.00770389") + max_val = float("0.101175") + mean = float("0.0267908") + std = float("0.0158745") + data = None + + +class Program_weight_tensor_parameter_262: + name = "parameter_262" + shape = [192] + dtype = "float32" + min_val = float("-0.351997") + max_val = float("0.260742") + mean = float("0.0118419") + std = float("0.104813") + data = None + + +class Program_weight_tensor_parameter_263: + name = "parameter_263" + shape = [192, 128, 3, 3] + dtype = "float32" + min_val = float("-0.0873539") + max_val = float("0.0704455") + mean = float("-0.000213424") + std = float("0.00708121") + data = None + + +class Program_weight_tensor_parameter_264: + name = "parameter_264" + shape = [128] + dtype = "float32" + min_val = float("-2.15168") + max_val = float("1.36722") + mean = float("-0.673621") + std = float("0.681958") + data = None + + +class Program_weight_tensor_parameter_265: + name = "parameter_265" + shape = [128] + dtype = "float32" + min_val = float("0.364727") + max_val = float("2.2521") + mean = float("0.875301") + std = float("0.236523") + data = None + + +class Program_weight_tensor_parameter_266: + name = "parameter_266" + shape = [128] + dtype = "float32" + min_val = float("0.000976585") + max_val = float("0.0184592") + mean = float("0.00559844") + std = float("0.0023354") + data = None + + +class Program_weight_tensor_parameter_267: + name = "parameter_267" + shape = [128] + dtype = "float32" + min_val = float("-0.295433") + max_val = float("0.256224") + mean = float("-0.0630695") + std = float("0.112593") + data = None + + +class Program_weight_tensor_parameter_268: + name = "parameter_268" + shape = [128, 96, 1, 1] + dtype = "float32" + min_val = float("-0.211698") + max_val = float("0.196697") + mean = float("-0.00125294") + std = float("0.0226183") + data = None + + +class Program_weight_tensor_parameter_269: + name = "parameter_269" + shape = [96] + dtype = "float32" + min_val = float("-0.0253266") + max_val = float("0.00358212") + mean = float("-0.0083497") + std = float("0.00765327") + data = None + + +class Program_weight_tensor_parameter_270: + name = "parameter_270" + shape = [96, 96, 1, 1] + dtype = "float32" + min_val = float("-0.293386") + max_val = float("0.279833") + mean = float("-0.00579641") + std = float("0.0184424") + data = None + + +class Program_weight_tensor_parameter_271: + name = "parameter_271" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_272: + name = "parameter_272" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_273: + name = "parameter_273" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_274: + name = "parameter_274" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_275: + name = "parameter_275" + shape = [48, 48, 1, 1] + dtype = "float32" + min_val = float("-0.0497361") + max_val = float("0.082633") + mean = float("-0.00112833") + std = float("0.0118656") + data = None + + +class Program_weight_tensor_parameter_276: + name = "parameter_276" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_277: + name = "parameter_277" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_278: + name = "parameter_278" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_279: + name = "parameter_279" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_280: + name = "parameter_280" + shape = [48, 48, 3, 3] + dtype = "float32" + min_val = float("-0.0995478") + max_val = float("0.100891") + mean = float("-0.00032949") + std = float("0.012257") + data = None + + +class Program_weight_tensor_parameter_281: + name = "parameter_281" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_282: + name = "parameter_282" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_283: + name = "parameter_283" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_284: + name = "parameter_284" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_285: + name = "parameter_285" + shape = [48, 48, 3, 3] + dtype = "float32" + min_val = float("-0.09303") + max_val = float("0.111166") + mean = float("-0.00105886") + std = float("0.0130745") + data = None + + +class Program_weight_tensor_parameter_286: + name = "parameter_286" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_287: + name = "parameter_287" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_288: + name = "parameter_288" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_289: + name = "parameter_289" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_290: + name = "parameter_290" + shape = [48, 48, 1, 1] + dtype = "float32" + min_val = float("-0.0686653") + max_val = float("0.073192") + mean = float("-0.00264945") + std = float("0.015246") + data = None + + +class Program_weight_tensor_parameter_291: + name = "parameter_291" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_292: + name = "parameter_292" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_293: + name = "parameter_293" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_294: + name = "parameter_294" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_295: + name = "parameter_295" + shape = [48, 48, 3, 3] + dtype = "float32" + min_val = float("-0.0915852") + max_val = float("0.0964297") + mean = float("-0.000904809") + std = float("0.0119445") + data = None + + +class Program_weight_tensor_parameter_296: + name = "parameter_296" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_297: + name = "parameter_297" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_298: + name = "parameter_298" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_299: + name = "parameter_299" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_300: + name = "parameter_300" + shape = [48, 48, 3, 3] + dtype = "float32" + min_val = float("-0.0993379") + max_val = float("0.0790286") + mean = float("-0.00075533") + std = float("0.0139095") + data = None + + +class Program_weight_tensor_parameter_301: + name = "parameter_301" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_302: + name = "parameter_302" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_303: + name = "parameter_303" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_304: + name = "parameter_304" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_305: + name = "parameter_305" + shape = [48, 96, 1, 1] + dtype = "float32" + min_val = float("-0.0993822") + max_val = float("0.120858") + mean = float("-0.00232535") + std = float("0.0225912") + data = None + + +class Program_weight_tensor_parameter_306: + name = "parameter_306" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_307: + name = "parameter_307" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_308: + name = "parameter_308" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_309: + name = "parameter_309" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_310: + name = "parameter_310" + shape = [48, 96, 1, 1] + dtype = "float32" + min_val = float("-0.158166") + max_val = float("0.239253") + mean = float("0.000385605") + std = float("0.023753") + data = None + + +class Program_weight_tensor_parameter_311: + name = "parameter_311" + shape = [96] + dtype = "float32" + min_val = float("-3.31552") + max_val = float("3.83527") + mean = float("0.267103") + std = float("1.21077") + data = None + + +class Program_weight_tensor_parameter_312: + name = "parameter_312" + shape = [96] + dtype = "float32" + min_val = float("0.510535") + max_val = float("5.40356") + mean = float("1.12504") + std = float("0.54684") + data = None + + +class Program_weight_tensor_parameter_313: + name = "parameter_313" + shape = [96] + dtype = "float32" + min_val = float("0.0102515") + max_val = float("0.199232") + mean = float("0.0491335") + std = float("0.034631") + data = None + + +class Program_weight_tensor_parameter_314: + name = "parameter_314" + shape = [96] + dtype = "float32" + min_val = float("-0.48286") + max_val = float("0.474603") + mean = float("-0.0301538") + std = float("0.175452") + data = None + + +class Program_weight_tensor_parameter_315: + name = "parameter_315" + shape = [96, 64, 3, 3] + dtype = "float32" + min_val = float("-0.0965118") + max_val = float("0.119571") + mean = float("-0.000173883") + std = float("0.0116758") + data = None + + +class Program_weight_tensor_parameter_316: + name = "parameter_316" + shape = [64] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_317: + name = "parameter_317" + shape = [64] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_318: + name = "parameter_318" + shape = [64] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_319: + name = "parameter_319" + shape = [64] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_320: + name = "parameter_320" + shape = [64, 48, 1, 1] + dtype = "float32" + min_val = float("-0.1726") + max_val = float("0.173966") + mean = float("-0.00244534") + std = float("0.0334275") + data = None + + +class Program_weight_tensor_parameter_321: + name = "parameter_321" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_322: + name = "parameter_322" + shape = [48, 48, 1, 1] + dtype = "float32" + min_val = float("-0.162368") + max_val = float("0.15788") + mean = float("-0.0128963") + std = float("0.0255366") + data = None + + +class Program_weight_tensor_parameter_323: + name = "parameter_323" + shape = [24] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_324: + name = "parameter_324" + shape = [24] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_325: + name = "parameter_325" + shape = [24] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_326: + name = "parameter_326" + shape = [24] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_327: + name = "parameter_327" + shape = [24, 24, 1, 1] + dtype = "float32" + min_val = float("-0.101262") + max_val = float("0.144448") + mean = float("-0.00119167") + std = float("0.0240568") + data = None + + +class Program_weight_tensor_parameter_328: + name = "parameter_328" + shape = [24] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_329: + name = "parameter_329" + shape = [24] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_330: + name = "parameter_330" + shape = [24] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_331: + name = "parameter_331" + shape = [24] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_332: + name = "parameter_332" + shape = [24, 24, 3, 3] + dtype = "float32" + min_val = float("-0.0939293") + max_val = float("0.0880602") + mean = float("-0.000667988") + std = float("0.0206646") + data = None + + +class Program_weight_tensor_parameter_333: + name = "parameter_333" + shape = [24] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_334: + name = "parameter_334" + shape = [24] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_335: + name = "parameter_335" + shape = [24] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_336: + name = "parameter_336" + shape = [24] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_337: + name = "parameter_337" + shape = [24, 24, 3, 3] + dtype = "float32" + min_val = float("-0.121119") + max_val = float("0.170902") + mean = float("-0.000408415") + std = float("0.0230859") + data = None + + +class Program_weight_tensor_parameter_338: + name = "parameter_338" + shape = [24] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_339: + name = "parameter_339" + shape = [24] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_340: + name = "parameter_340" + shape = [24] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_341: + name = "parameter_341" + shape = [24] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_342: + name = "parameter_342" + shape = [24, 48, 1, 1] + dtype = "float32" + min_val = float("-0.19645") + max_val = float("0.180409") + mean = float("-0.00327746") + std = float("0.0345822") + data = None + + +class Program_weight_tensor_parameter_343: + name = "parameter_343" + shape = [24] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_344: + name = "parameter_344" + shape = [24] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_345: + name = "parameter_345" + shape = [24] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_346: + name = "parameter_346" + shape = [24] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_347: + name = "parameter_347" + shape = [24, 48, 1, 1] + dtype = "float32" + min_val = float("-0.189487") + max_val = float("0.153256") + mean = float("-0.00117991") + std = float("0.037909") + data = None + + +class Program_weight_tensor_parameter_348: + name = "parameter_348" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_349: + name = "parameter_349" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_350: + name = "parameter_350" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_351: + name = "parameter_351" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_352: + name = "parameter_352" + shape = [48, 32, 3, 3] + dtype = "float32" + min_val = float("-0.165199") + max_val = float("0.114686") + mean = float("-0.000200965") + std = float("0.0197641") + data = None + + +class Program_weight_tensor_parameter_353: + name = "parameter_353" + shape = [32] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_354: + name = "parameter_354" + shape = [32] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_355: + name = "parameter_355" + shape = [32] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_356: + name = "parameter_356" + shape = [32] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_357: + name = "parameter_357" + shape = [32, 16, 3, 3] + dtype = "float32" + min_val = float("-0.233333") + max_val = float("0.23547") + mean = float("-0.000463458") + std = float("0.0335966") + data = None + + +class Program_weight_tensor_parameter_358: + name = "parameter_358" + shape = [16] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_359: + name = "parameter_359" + shape = [16] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_360: + name = "parameter_360" + shape = [16] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_361: + name = "parameter_361" + shape = [16] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_362: + name = "parameter_362" + shape = [16, 16, 3, 3] + dtype = "float32" + min_val = float("-0.296254") + max_val = float("0.317451") + mean = float("-0.000376615") + std = float("0.0448856") + data = None + + +class Program_weight_tensor_parameter_363: + name = "parameter_363" + shape = [16] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_364: + name = "parameter_364" + shape = [16] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_365: + name = "parameter_365" + shape = [16] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_366: + name = "parameter_366" + shape = [16] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_367: + name = "parameter_367" + shape = [16, 3, 3, 3] + dtype = "float32" + min_val = float("-0.229598") + max_val = float("0.264831") + mean = float("-0.00222273") + std = float("0.0664701") + data = None diff --git a/paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_6/graph_hash.txt b/paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_6/graph_hash.txt new file mode 100644 index 000000000..d7f7b18a2 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_6/graph_hash.txt @@ -0,0 +1 @@ +7bb8a2b2502a471463ad03a6babccb8a2db42f0cbace538187cdc42cf672f3d5 \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_6/graph_net.json b/paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_6/graph_net.json new file mode 100644 index 000000000..d8719c2c9 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_6/graph_net.json @@ -0,0 +1,6 @@ +{ + "framework": "paddle", + "model_name": "PP-YOLOE-S_vehicle", + "num_devices_required": 1, + "num_nodes_required": 1 +} \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_6/input_meta.py b/paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_6/input_meta.py new file mode 100644 index 000000000..768c21597 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_6/input_meta.py @@ -0,0 +1,87 @@ +class Program_weight_tensor_data_0: + name = "data_0" + shape = [] + dtype = "int64" + data = [12] + + +class Program_weight_tensor_data_1: + name = "data_1" + shape = [2, 5376, 4] + dtype = "float32" + min_val = float("0.01") + max_val = float("0.01") + mean = float("0.01") + std = float("9.31323e-10") + data = None + + +class Program_weight_tensor_data_2: + name = "data_2" + shape = [2, 5376, 4] + dtype = "float32" + min_val = float("-244.623") + max_val = float("811.433") + mean = float("255.675") + std = float("162.392") + data = None + + +class Program_weight_tensor_data_3: + name = "data_3" + shape = [5376, 2] + dtype = "float32" + min_val = float("4.0") + max_val = float("508.0") + mean = float("256.0") + std = float("147.76") + data = None + + +class Program_weight_tensor_data_4: + name = "data_4" + shape = [2, 12, 1] + dtype = "int32" + data = [0, 3, 0, 0, 1, 0, 0, 0, 0, 0, 0, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0] + + +class Program_weight_tensor_data_5: + name = "data_5" + shape = [2, 12, 4] + dtype = "float32" + max_val = float("326.78") + mean = float("160.052") + std = float("110.95") + data = None + + +class Program_weight_tensor_data_6: + name = "data_6" + shape = [2, 12, 1] + dtype = "float32" + data = [ + 1.0, + 1.0, + 1.0, + 1.0, + 1.0, + 1.0, + 1.0, + 1.0, + 1.0, + 1.0, + 1.0, + 1.0, + 1.0, + 1.0, + 1.0, + 1.0, + 1.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + ] diff --git a/paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_6/model.py b/paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_6/model.py new file mode 100644 index 000000000..098cbb449 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_6/model.py @@ -0,0 +1,385 @@ +import paddle + + +class GraphModule(paddle.nn.Layer): + def __init__(self): + super().__init__() + + def forward(self, data_0, data_1, data_2, data_3, data_4, data_5, data_6): + # pd_op.full: (xi64) <- () + full_0 = paddle._C_ops.full( + [], float("0"), paddle.int64, paddle.framework._current_expected_place() + ) + + # pd_op.equal: (xb) <- (xi64, xi64) + equal_0 = paddle._C_ops.equal(data_0, full_0) + + # pd_op.cast: (xi64) <- (xb) + cast_0 = paddle._C_ops.cast(equal_0, paddle.int64) + del equal_0 + + # pd_op.not_equal: (xb) <- (xi64, xi64) + not_equal_0 = paddle._C_ops.not_equal(cast_0, full_0) + del cast_0 + + # pd_op.cast: (xi64) <- (xb) + cast_1 = paddle._C_ops.cast(not_equal_0, paddle.int64) + del not_equal_0 + + # pd_op.equal: (xb) <- (xi64, xi64) + equal_1 = paddle._C_ops.equal(cast_1, full_0) + del cast_1, full_0 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_0 = [2] + + # pd_op.unsqueeze: (2x-1x1x4xf32) <- (2x-1x4xf32, 1xi64) + unsqueeze_0 = paddle._C_ops.unsqueeze(data_5, full_int_array_0) + del data_5 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_1 = [1] + + # pd_op.unsqueeze: (2x1x-1x4xf32) <- (2x-1x4xf32, 1xi64) + unsqueeze_1 = paddle._C_ops.unsqueeze(data_2, full_int_array_1) + del data_2 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_2 = [0] + + # pd_op.slice: (2x-1x1x2xf32) <- (2x-1x1x4xf32, 1xi64, 1xi64) + slice_0 = paddle._C_ops.slice( + unsqueeze_0, [3], full_int_array_2, full_int_array_0, [1], [] + ) + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_3 = [2147483647] + + # pd_op.slice: (2x-1x1x2xf32) <- (2x-1x1x4xf32, 1xi64, 1xi64) + slice_1 = paddle._C_ops.slice( + unsqueeze_0, [3], full_int_array_0, full_int_array_3, [1], [] + ) + + # pd_op.slice: (2x1x-1x2xf32) <- (2x1x-1x4xf32, 1xi64, 1xi64) + slice_2 = paddle._C_ops.slice( + unsqueeze_1, [3], full_int_array_2, full_int_array_0, [1], [] + ) + del full_int_array_2 + + # pd_op.slice: (2x1x-1x2xf32) <- (2x1x-1x4xf32, 1xi64, 1xi64) + slice_3 = paddle._C_ops.slice( + unsqueeze_1, [3], full_int_array_0, full_int_array_3, [1], [] + ) + del full_int_array_3, unsqueeze_1 + + # pd_op.maximum: (2x-1x-1x2xf32) <- (2x-1x1x2xf32, 2x1x-1x2xf32) + maximum_0 = paddle._C_ops.maximum(slice_0, slice_2) + + # pd_op.minimum: (2x-1x-1x2xf32) <- (2x-1x1x2xf32, 2x1x-1x2xf32) + minimum_0 = paddle._C_ops.minimum(slice_1, slice_3) + + # pd_op.subtract: (2x-1x-1x2xf32) <- (2x-1x-1x2xf32, 2x-1x-1x2xf32) + subtract_0 = paddle._C_ops.subtract(minimum_0, maximum_0) + del maximum_0, minimum_0 + + # pd_op.full: (1xf32) <- () + full_1 = paddle._C_ops.full( + [1], float("0"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.full: (1xf32) <- () + full_2 = paddle._C_ops.full( + [1], float("3.40282e+38"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.clip: (2x-1x-1x2xf32) <- (2x-1x-1x2xf32, 1xf32, 1xf32) + clip_0 = paddle._C_ops.clip(subtract_0, full_1, full_2) + del subtract_0 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_4 = [-1] + + # pd_op.prod: (2x-1x-1xf32) <- (2x-1x-1x2xf32, 1xi64) + prod_0 = paddle._C_ops.prod(clip_0, full_int_array_4, False, False) + del clip_0 + + # pd_op.subtract: (2x-1x1x2xf32) <- (2x-1x1x2xf32, 2x-1x1x2xf32) + subtract_1 = paddle._C_ops.subtract(slice_1, slice_0) + del slice_0, slice_1 + + # pd_op.clip: (2x-1x1x2xf32) <- (2x-1x1x2xf32, 1xf32, 1xf32) + clip_1 = paddle._C_ops.clip(subtract_1, full_1, full_2) + del subtract_1 + + # pd_op.prod: (2x-1x1xf32) <- (2x-1x1x2xf32, 1xi64) + prod_1 = paddle._C_ops.prod(clip_1, full_int_array_4, False, False) + del clip_1 + + # pd_op.subtract: (2x1x-1x2xf32) <- (2x1x-1x2xf32, 2x1x-1x2xf32) + subtract_2 = paddle._C_ops.subtract(slice_3, slice_2) + del slice_2, slice_3 + + # pd_op.clip: (2x1x-1x2xf32) <- (2x1x-1x2xf32, 1xf32, 1xf32) + clip_2 = paddle._C_ops.clip(subtract_2, full_1, full_2) + del full_1, full_2, subtract_2 + + # pd_op.prod: (2x1x-1xf32) <- (2x1x-1x2xf32, 1xi64) + prod_2 = paddle._C_ops.prod(clip_2, full_int_array_4, False, False) + del clip_2 + + # pd_op.add: (2x-1x-1xf32) <- (2x-1x1xf32, 2x1x-1xf32) + add_0 = paddle._C_ops.add(prod_1, prod_2) + del prod_1, prod_2 + + # pd_op.subtract: (2x-1x-1xf32) <- (2x-1x-1xf32, 2x-1x-1xf32) + subtract_3 = paddle._C_ops.subtract(add_0, prod_0) + del add_0 + + # pd_op.full: (1xf32) <- () + full_3 = paddle._C_ops.full( + [1], float("1"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.scale: (2x-1x-1xf32) <- (2x-1x-1xf32, 1xf32) + scale_0 = paddle._C_ops.scale(subtract_3, full_3, float("1e-09"), True) + del full_3, subtract_3 + + # pd_op.divide: (2x-1x-1xf32) <- (2x-1x-1xf32, 2x-1x-1xf32) + divide_0 = paddle._C_ops.divide(prod_0, scale_0) + del prod_0, scale_0 + + # pd_op.transpose: (2x4x-1xf32) <- (2x-1x4xf32) + transpose_0 = paddle._C_ops.transpose(data_1, [0, 2, 1]) + del data_1 + + # pd_op.full: (1xf64) <- () + full_4 = paddle._C_ops.full( + [1], float("0"), paddle.float64, paddle.core.CPUPlace() + ) + + # pd_op.full: (1xf64) <- () + full_5 = paddle._C_ops.full( + [1], float("2"), paddle.float64, paddle.core.CPUPlace() + ) + + # pd_op.full: (1xf64) <- () + full_6 = paddle._C_ops.full( + [1], float("1"), paddle.float64, paddle.core.CPUPlace() + ) + + # pd_op.arange: (2xi32) <- (1xf64, 1xf64, 1xf64) + arange_0 = paddle.arange(full_4, full_5, full_6, dtype="int32") + del full_4, full_5, full_6 + + # pd_op.unsqueeze: (2x1xi32) <- (2xi32, 1xi64) + unsqueeze_2 = paddle._C_ops.unsqueeze(arange_0, full_int_array_4) + del arange_0 + + # pd_op.full: (xi64) <- () + full_7 = paddle._C_ops.full( + [], float("1"), paddle.int64, paddle.core.CPUPlace() + ) + + # builtin.combine: ([xi64, xi64]) <- (xi64, xi64) + combine_0 = [full_7, data_0] + del data_0, full_7 + + # pd_op.stack: (2xi64) <- ([xi64, xi64]) + stack_0 = paddle._C_ops.stack(combine_0, 0) + del combine_0 + + # pd_op.tile: (2x-1xi32) <- (2x1xi32, 2xi64) + tile_0 = paddle._C_ops.tile(unsqueeze_2, stack_0) + del stack_0 + + # pd_op.squeeze: (2x-1xi32) <- (2x-1x1xi32, 1xi64) + squeeze_0 = paddle._C_ops.squeeze(data_4, full_int_array_4) + del data_4 + + # builtin.combine: ([2x-1xi32, 2x-1xi32]) <- (2x-1xi32, 2x-1xi32) + combine_1 = [tile_0, squeeze_0] + del squeeze_0, tile_0 + + # pd_op.stack: (2x-1x2xi32) <- ([2x-1xi32, 2x-1xi32]) + stack_1 = paddle._C_ops.stack(combine_1, -1) + del combine_1 + + # pd_op.gather_nd: (2x-1x-1xf32) <- (2x4x-1xf32, 2x-1x2xi32) + gather_nd_0 = paddle._C_ops.gather_nd(transpose_0, stack_1) + del stack_1, transpose_0 + + # pd_op.pow: (2x-1x-1xf32) <- (2x-1x-1xf32) + pow_0 = paddle._C_ops.pow(gather_nd_0, float("1")) + del gather_nd_0 + + # pd_op.pow: (2x-1x-1xf32) <- (2x-1x-1xf32) + pow_1 = paddle._C_ops.pow(divide_0, float("6")) + + # pd_op.multiply: (2x-1x-1xf32) <- (2x-1x-1xf32, 2x-1x-1xf32) + multiply_0 = paddle._C_ops.multiply(pow_0, pow_1) + del pow_0, pow_1 + + # pd_op.full_int_array: (2xi64) <- () + full_int_array_5 = [0, 1] + + # pd_op.unsqueeze: (1x1x-1x2xf32) <- (-1x2xf32, 2xi64) + unsqueeze_3 = paddle._C_ops.unsqueeze(data_3, full_int_array_5) + del data_3, full_int_array_5 + + # pd_op.full: (1xi32) <- () + full_8 = paddle._C_ops.full( + [1], float("3"), paddle.int32, paddle.core.CPUPlace() + ) + + # pd_op.split_with_num: ([1x1x-1x1xf32, 1x1x-1x1xf32]) <- (1x1x-1x2xf32, 1xi32) + split_with_num_0 = paddle._C_ops.split_with_num(unsqueeze_3, 2, full_8) + del unsqueeze_3 + + # builtin.split: (1x1x-1x1xf32, 1x1x-1x1xf32) <- ([1x1x-1x1xf32, 1x1x-1x1xf32]) + ( + split_0, + split_1, + ) = split_with_num_0 + del split_with_num_0 + + # pd_op.split_with_num: ([2x-1x1x1xf32, 2x-1x1x1xf32, 2x-1x1x1xf32, 2x-1x1x1xf32]) <- (2x-1x1x4xf32, 1xi32) + split_with_num_1 = paddle._C_ops.split_with_num(unsqueeze_0, 4, full_8) + del full_8, unsqueeze_0 + + # builtin.split: (2x-1x1x1xf32, 2x-1x1x1xf32, 2x-1x1x1xf32, 2x-1x1x1xf32) <- ([2x-1x1x1xf32, 2x-1x1x1xf32, 2x-1x1x1xf32, 2x-1x1x1xf32]) + ( + split_2, + split_3, + split_4, + split_5, + ) = split_with_num_1 + del split_with_num_1 + + # pd_op.subtract: (2x-1x-1x1xf32) <- (1x1x-1x1xf32, 2x-1x1x1xf32) + subtract_4 = paddle._C_ops.subtract(split_0, split_2) + del split_2 + + # pd_op.subtract: (2x-1x-1x1xf32) <- (1x1x-1x1xf32, 2x-1x1x1xf32) + subtract_5 = paddle._C_ops.subtract(split_1, split_3) + del split_3 + + # pd_op.subtract: (2x-1x-1x1xf32) <- (2x-1x1x1xf32, 1x1x-1x1xf32) + subtract_6 = paddle._C_ops.subtract(split_4, split_0) + del split_0, split_4 + + # pd_op.subtract: (2x-1x-1x1xf32) <- (2x-1x1x1xf32, 1x1x-1x1xf32) + subtract_7 = paddle._C_ops.subtract(split_5, split_1) + del split_1, split_5 + + # pd_op.full: (1xi32) <- () + full_9 = paddle._C_ops.full( + [1], float("-1"), paddle.int32, paddle.core.CPUPlace() + ) + + # builtin.combine: ([2x-1x-1x1xf32, 2x-1x-1x1xf32, 2x-1x-1x1xf32, 2x-1x-1x1xf32]) <- (2x-1x-1x1xf32, 2x-1x-1x1xf32, 2x-1x-1x1xf32, 2x-1x-1x1xf32) + combine_2 = [subtract_4, subtract_5, subtract_6, subtract_7] + del subtract_4, subtract_5, subtract_6, subtract_7 + + # pd_op.concat: (2x-1x-1x4xf32) <- ([2x-1x-1x1xf32, 2x-1x-1x1xf32, 2x-1x-1x1xf32, 2x-1x-1x1xf32], 1xi32) + concat_0 = paddle._C_ops.concat(combine_2, full_9) + del combine_2, full_9 + + # pd_op.min: (2x-1x-1xf32) <- (2x-1x-1x4xf32, 1xi64) + min_0 = paddle._C_ops.min(concat_0, full_int_array_4, False) + del concat_0, full_int_array_4 + + # pd_op.full: (xf32) <- () + full_10 = paddle._C_ops.full( + [], + float("1e-09"), + paddle.float32, + paddle.framework._current_expected_place(), + ) + + # pd_op.greater_than: (2x-1x-1xb) <- (2x-1x-1xf32, xf32) + greater_than_1 = paddle._C_ops.greater_than(min_0, full_10) + del full_10, min_0 + + # pd_op.cast: (2x-1x-1xf32) <- (2x-1x-1xb) + cast_2 = paddle._C_ops.cast(greater_than_1, paddle.float32) + del greater_than_1 + + # pd_op.multiply: (2x-1x-1xf32) <- (2x-1x-1xf32, 2x-1x-1xf32) + multiply_1 = paddle._C_ops.multiply(multiply_0, cast_2) + + # pd_op.shape64: (3xi64) <- (2x-1x-1xf32) + shape64_0 = paddle._C_ops.shape64(multiply_1) + + # pd_op.slice: (xi64) <- (3xi64, 1xi64, 1xi64) + slice_4 = paddle._C_ops.slice( + shape64_0, [0], full_int_array_1, full_int_array_0, [1], [0] + ) + del full_int_array_1 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_6 = [3] + + # pd_op.slice: (xi64) <- (3xi64, 1xi64, 1xi64) + slice_5 = paddle._C_ops.slice( + shape64_0, [0], full_int_array_0, full_int_array_6, [1], [0] + ) + del full_int_array_0, full_int_array_6, shape64_0 + + # pd_op.full: (1xi32) <- () + full_11 = paddle._C_ops.full( + [1], float("13"), paddle.int32, paddle.core.CPUPlace() + ) + + # pd_op.topk: (2x-1x13xf32, 2x-1x13xi64) <- (2x-1x-1xf32, 1xi32) + topk_0, topk_1 = (lambda x, f: f(x))( + paddle._C_ops.topk(multiply_1, full_11, -1, True, True), + lambda out: out if isinstance(out, (list, tuple)) else (out, None), + ) + del full_11, multiply_1 + + # pd_op.one_hot: (2x-1x13x-1xf32) <- (2x-1x13xi64, xi64) + one_hot_0 = paddle._C_ops.one_hot( + topk_1 % paddle.cast(slice_5, topk_1.dtype), slice_5 + ) + del slice_5, topk_1 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_7 = [-2] + + # pd_op.sum: (2x-1x-1xf32) <- (2x-1x13x-1xf32, 1xi64) + sum_0 = paddle._C_ops.sum(one_hot_0, full_int_array_7, None, False) + del one_hot_0 + + # pd_op.multiply: (2x-1x-1xf32) <- (2x-1x-1xf32, 2x-1x1xf32) + multiply_2 = paddle._C_ops.multiply(sum_0, data_6) + del sum_0 + + # pd_op.multiply: (2x-1x-1xf32) <- (2x-1x-1xf32, 2x-1x-1xf32) + multiply_3 = paddle._C_ops.multiply(multiply_2, cast_2) + del cast_2, multiply_2 + + # pd_op.multiply: (2x-1x-1xf32) <- (2x-1x-1xf32, 2x-1x1xf32) + multiply_4 = paddle._C_ops.multiply(multiply_3, data_6) + del data_6, multiply_3 + + # pd_op.sum: (2x-1xf32) <- (2x-1x-1xf32, 1xi64) + sum_1 = paddle._C_ops.sum(multiply_4, full_int_array_7, None, False) + del full_int_array_7 + + # pd_op.full_int_array: (0xi64) <- () + full_int_array_8 = [] + + # pd_op.max: (xf32) <- (2x-1xf32, 0xi64) + max_0 = paddle._C_ops.max(sum_1, full_int_array_8, False) + del full_int_array_8 + + # pd_op.full: (xf32) <- () + full_12 = paddle._C_ops.full( + [], float("1"), paddle.float32, paddle.framework._current_expected_place() + ) + + # pd_op.greater_than: (xb) <- (xf32, xf32) + greater_than_0 = paddle._C_ops.greater_than(max_0, full_12) + del divide_0, full_12, max_0, multiply_0, multiply_4, sum_1, unsqueeze_2 + + return greater_than_0 diff --git a/paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_6/weight_meta.py b/paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_6/weight_meta.py new file mode 100644 index 000000000..8b1378917 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_6/weight_meta.py @@ -0,0 +1 @@ + diff --git a/paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_7/graph_hash.txt b/paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_7/graph_hash.txt new file mode 100644 index 000000000..a0a4bdddf --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_7/graph_hash.txt @@ -0,0 +1 @@ +92d3ca6357b660bcf1334e1feb5b6b37f7616ff93ec1a53ea5a94dd2dd47ce97 \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_7/graph_net.json b/paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_7/graph_net.json new file mode 100644 index 000000000..d8719c2c9 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_7/graph_net.json @@ -0,0 +1,6 @@ +{ + "framework": "paddle", + "model_name": "PP-YOLOE-S_vehicle", + "num_devices_required": 1, + "num_nodes_required": 1 +} \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_7/input_meta.py b/paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_7/input_meta.py new file mode 100644 index 000000000..cb0882d83 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_7/input_meta.py @@ -0,0 +1,42 @@ +class Program_weight_tensor_data_0: + name = "data_0" + shape = [2, 5376, 4] + dtype = "float32" + min_val = float("0.01") + max_val = float("0.01") + mean = float("0.01") + std = float("9.31323e-10") + data = None + + +class Program_weight_tensor_data_1: + name = "data_1" + shape = [2, 5376, 68] + dtype = "float32" + min_val = float("-4.51732") + max_val = float("13.1562") + mean = float("7.42927e-06") + std = float("1.41402") + data = None + + +class Program_weight_tensor_data_2: + name = "data_2" + shape = [5376, 2] + dtype = "float32" + min_val = float("4.0") + max_val = float("508.0") + mean = float("256.0") + std = float("147.76") + data = None + + +class Program_weight_tensor_data_3: + name = "data_3" + shape = [5376, 1] + dtype = "float32" + min_val = float("8.0") + max_val = float("32.0") + mean = float("10.6667") + std = float("5.70157") + data = None diff --git a/paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_7/model.py b/paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_7/model.py new file mode 100644 index 000000000..4910e2ad1 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_7/model.py @@ -0,0 +1,162 @@ +import paddle + + +class GraphModule(paddle.nn.Layer): + def __init__(self): + super().__init__() + + def forward(self, parameter_0, data_0, data_1, data_2, data_3): + # pd_op.divide: (-1x2xf32) <- (-1x2xf32, -1x1xf32) + divide_0 = paddle._C_ops.divide(data_2, data_3) + del data_2 + + # pd_op.shape64: (3xi64) <- (2x-1x68xf32) + shape64_0 = paddle._C_ops.shape64(data_1) + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_0 = [0] + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_1 = [1] + + # pd_op.assign: (1xi64) <- (1xi64) + assign_0 = full_int_array_1 + + # pd_op.slice: (xi64) <- (3xi64, 1xi64, 1xi64) + slice_0 = paddle._C_ops.slice( + shape64_0, [0], full_int_array_0, full_int_array_1, [1], [0] + ) + del full_int_array_0 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_2 = [2] + + # pd_op.slice: (xi64) <- (3xi64, 1xi64, 1xi64) + slice_1 = paddle._C_ops.slice( + shape64_0, [0], full_int_array_1, full_int_array_2, [1], [0] + ) + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_3 = [3] + + # pd_op.slice: (xi64) <- (3xi64, 1xi64, 1xi64) + slice_2 = paddle._C_ops.slice( + shape64_0, [0], full_int_array_2, full_int_array_3, [1], [0] + ) + del full_int_array_2, full_int_array_3, shape64_0 + + # pd_op.full: (xi64) <- () + full_0 = paddle._C_ops.full( + [], float("-1"), paddle.int64, paddle.core.CPUPlace() + ) + + # pd_op.full: (xi64) <- () + full_1 = paddle._C_ops.full( + [], float("4"), paddle.int64, paddle.core.CPUPlace() + ) + + # pd_op.full: (xi64) <- () + full_2 = paddle._C_ops.full( + [], float("17"), paddle.int64, paddle.core.CPUPlace() + ) + + # builtin.combine: ([xi64, xi64, xi64, xi64]) <- (xi64, xi64, xi64, xi64) + combine_0 = [full_0, slice_1, full_1, full_2] + del full_0, full_1, full_2, slice_1 + + # pd_op.stack: (4xi64) <- ([xi64, xi64, xi64, xi64]) + stack_0 = paddle._C_ops.stack(combine_0, 0) + del combine_0 + + # pd_op.reshape: (-1x-1x4x17xf32) <- (2x-1x68xf32, 4xi64) + reshape_0 = paddle._C_ops.reshape(data_1, stack_0) + del data_1, stack_0 + + # pd_op.softmax: (-1x-1x4x17xf32) <- (-1x-1x4x17xf32) + softmax_0 = paddle._C_ops.softmax(reshape_0, -1) + del reshape_0 + + # pd_op.transpose: (-1x17x-1x4xf32) <- (-1x-1x4x17xf32) + transpose_0 = paddle._C_ops.transpose(softmax_0, [0, 3, 1, 2]) + + # pd_op.conv2d: (-1x1x-1x4xf32) <- (-1x17x-1x4xf32, 1x17x1x1xf32) + conv2d_0 = paddle._C_ops.conv2d( + transpose_0, parameter_0, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_0 + + # pd_op.squeeze: (-1x-1x4xf32) <- (-1x1x-1x4xf32, 1xi64) + squeeze_0 = paddle._C_ops.squeeze(conv2d_0, full_int_array_1) + del full_int_array_1 + + # pd_op.full: (1xi32) <- () + full_3 = paddle._C_ops.full( + [1], float("2"), paddle.int32, paddle.core.CPUPlace() + ) + + # pd_op.split_with_num: ([-1x-1x2xf32, -1x-1x2xf32]) <- (-1x-1x4xf32, 1xi32) + split_with_num_0 = paddle._C_ops.split_with_num(squeeze_0, 2, full_3) + del squeeze_0 + + # builtin.split: (-1x-1x2xf32, -1x-1x2xf32) <- ([-1x-1x2xf32, -1x-1x2xf32]) + ( + split_0, + split_1, + ) = split_with_num_0 + del split_with_num_0 + + # pd_op.full: (1xf32) <- () + full_4 = paddle._C_ops.full( + [1], float("-1"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.scale: (-1x-1x2xf32) <- (-1x-1x2xf32, 1xf32) + scale_0 = paddle._C_ops.scale(split_0, full_4, float("0"), True) + del split_0 + + # pd_op.add: (-1x-1x2xf32) <- (-1x-1x2xf32, -1x2xf32) + add_0 = paddle._C_ops.add(scale_0, divide_0) + + # pd_op.add: (-1x-1x2xf32) <- (-1x-1x2xf32, -1x2xf32) + add_1 = paddle._C_ops.add(split_1, divide_0) + + # pd_op.full: (1xi32) <- () + full_5 = paddle._C_ops.full( + [1], float("-1"), paddle.int32, paddle.core.CPUPlace() + ) + + # builtin.combine: ([-1x-1x2xf32, -1x-1x2xf32]) <- (-1x-1x2xf32, -1x-1x2xf32) + combine_1 = [add_0, add_1] + + # pd_op.concat: (-1x-1x4xf32) <- ([-1x-1x2xf32, -1x-1x2xf32], 1xi32) + concat_0 = paddle._C_ops.concat(combine_1, full_5) + del combine_1 + + # pd_op.share_data_: (2x-1x4xf32) <- (2x-1x4xf32) + share_data__0 = data_0.detach() + del data_0 + + # pd_op.share_data_: (-1x-1x4xf32) <- (-1x-1x4xf32) + share_data__1 = concat_0.detach() + + # pd_op.multiply: (-1x-1x4xf32) <- (-1x-1x4xf32, -1x1xf32) + multiply_0 = paddle._C_ops.multiply(share_data__1, data_3) + del ( + add_0, + add_1, + assign_0, + concat_0, + conv2d_0, + data_3, + divide_0, + full_3, + full_4, + full_5, + scale_0, + share_data__1, + softmax_0, + split_1, + transpose_0, + ) + + return share_data__0, multiply_0 diff --git a/paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_7/weight_meta.py b/paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_7/weight_meta.py new file mode 100644 index 000000000..28198680e --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_7/weight_meta.py @@ -0,0 +1,7 @@ +class Program_weight_tensor_parameter_0: + name = "parameter_0" + shape = [1, 17, 1, 1] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None diff --git a/paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_8/graph_hash.txt b/paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_8/graph_hash.txt new file mode 100644 index 000000000..f0876b5b3 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_8/graph_hash.txt @@ -0,0 +1 @@ +f34aa271170a2591bf3e52dc6e22e71b666cb770bb99ef237e369b16b462bfac \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_8/graph_net.json b/paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_8/graph_net.json new file mode 100644 index 000000000..d8719c2c9 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_8/graph_net.json @@ -0,0 +1,6 @@ +{ + "framework": "paddle", + "model_name": "PP-YOLOE-S_vehicle", + "num_devices_required": 1, + "num_nodes_required": 1 +} \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_8/input_meta.py b/paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_8/input_meta.py new file mode 100644 index 000000000..e5a9cfd6e --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_8/input_meta.py @@ -0,0 +1,7 @@ +class Program_weight_tensor_data_0: + name = "data_0" + shape = [2, 5376] + dtype = "int32" + min_val = 0 + max_val = 4 + data = None diff --git a/paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_8/model.py b/paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_8/model.py new file mode 100644 index 000000000..301a09c22 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_8/model.py @@ -0,0 +1,34 @@ +import paddle + + +class GraphModule(paddle.nn.Layer): + def __init__(self): + super().__init__() + + def forward(self, data_0): + # pd_op.full: (xi32) <- () + full_0 = paddle._C_ops.full( + [], float("4"), paddle.int32, paddle.framework._current_expected_place() + ) + + # pd_op.not_equal: (2x-1xb) <- (2x-1xi32, xi32) + not_equal_0 = paddle._C_ops.not_equal(data_0, full_0) + del data_0, full_0 + + # pd_op.full_int_array: (0xi64) <- () + full_int_array_0 = [] + + # pd_op.sum: (xi64) <- (2x-1xb, 0xi64) + sum_0 = paddle._C_ops.sum(not_equal_0, full_int_array_0, None, False) + del full_int_array_0 + + # pd_op.full: (xi64) <- () + full_1 = paddle._C_ops.full( + [], float("0"), paddle.int64, paddle.framework._current_expected_place() + ) + + # pd_op.greater_than: (xb) <- (xi64, xi64) + greater_than_0 = paddle._C_ops.greater_than(sum_0, full_1) + del full_1, not_equal_0, sum_0 + + return greater_than_0 diff --git a/paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_8/weight_meta.py b/paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_8/weight_meta.py new file mode 100644 index 000000000..8b1378917 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_8/weight_meta.py @@ -0,0 +1 @@ + diff --git a/paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_9/graph_hash.txt b/paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_9/graph_hash.txt new file mode 100644 index 000000000..2f9daab91 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_9/graph_hash.txt @@ -0,0 +1 @@ +237f1666acd796c265f31ddd9bc78ab95e4da70095a07017ec39287d25ff30bd \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_9/graph_net.json b/paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_9/graph_net.json new file mode 100644 index 000000000..d8719c2c9 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_9/graph_net.json @@ -0,0 +1,6 @@ +{ + "framework": "paddle", + "model_name": "PP-YOLOE-S_vehicle", + "num_devices_required": 1, + "num_nodes_required": 1 +} \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_9/input_meta.py b/paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_9/input_meta.py new file mode 100644 index 000000000..43e90e8e7 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_9/input_meta.py @@ -0,0 +1,68 @@ +class Program_weight_tensor_data_0: + name = "data_0" + shape = [2, 5376] + dtype = "bool" + min_val = 0 + max_val = 2 + data = None + + +class Program_weight_tensor_data_1: + name = "data_1" + shape = [2, 5376, 4] + dtype = "float32" + min_val = float("-7.64445") + max_val = float("71.5192") + mean = float("27.7741") + std = float("19.0893") + data = None + + +class Program_weight_tensor_data_2: + name = "data_2" + shape = [2, 5376, 4] + dtype = "float32" + min_val = float("2.86074") + max_val = float("40.8475") + mean = float("22.0368") + std = float("10.1043") + data = None + + +class Program_weight_tensor_data_3: + name = "data_3" + shape = [2, 5376, 4] + dtype = "float32" + max_val = float("0.945922") + mean = float("0.00102082") + std = float("0.0260199") + data = None + + +class Program_weight_tensor_data_4: + name = "data_4" + shape = [] + dtype = "float32" + data = [43.9036] + + +class Program_weight_tensor_data_5: + name = "data_5" + shape = [2, 5376, 68] + dtype = "float32" + min_val = float("-4.51732") + max_val = float("13.1562") + mean = float("7.42927e-06") + std = float("1.41402") + data = None + + +class Program_weight_tensor_data_6: + name = "data_6" + shape = [5376, 2] + dtype = "float32" + min_val = float("0.5") + max_val = float("63.5") + mean = float("27.8095") + std = float("18.327") + data = None diff --git a/paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_9/model.py b/paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_9/model.py new file mode 100644 index 000000000..5db76f83d --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_9/model.py @@ -0,0 +1,509 @@ +import paddle + + +class GraphModule(paddle.nn.Layer): + def __init__(self): + super().__init__() + + def forward(self, data_0, data_1, data_2, data_3, data_4, data_5, data_6): + # pd_op.cast: (2x-1xi32) <- (2x-1xb) + cast_0 = paddle._C_ops.cast(data_0, paddle.int32) + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_0 = [-1] + + # pd_op.assign: (1xi64) <- (1xi64) + assign_0 = full_int_array_0 + + # pd_op.assign: (1xi64) <- (1xi64) + assign_1 = full_int_array_0 + + # pd_op.assign: (1xi64) <- (1xi64) + assign_2 = full_int_array_0 + + # pd_op.unsqueeze: (2x-1x1xi32) <- (2x-1xi32, 1xi64) + unsqueeze_0 = paddle._C_ops.unsqueeze(cast_0, full_int_array_0) + del cast_0 + + # pd_op.full_int_array: (3xi64) <- () + full_int_array_1 = [1, 1, 4] + + # pd_op.tile: (2x-1x4xi32) <- (2x-1x1xi32, 3xi64) + tile_0 = paddle._C_ops.tile(unsqueeze_0, full_int_array_1) + del full_int_array_1, unsqueeze_0 + + # pd_op.cast: (2x-1x4xb) <- (2x-1x4xi32) + cast_1 = paddle._C_ops.cast(tile_0, paddle.bool) + del tile_0 + + # pd_op.masked_select: (-1xf32) <- (2x-1x4xf32, 2x-1x4xb) + masked_select_0 = paddle._C_ops.masked_select(data_1, cast_1) + del data_1 + + # pd_op.full_int_array: (2xi64) <- () + full_int_array_2 = [-1, 4] + + # pd_op.reshape: (-1x4xf32) <- (-1xf32, 2xi64) + reshape_0 = paddle._C_ops.reshape(masked_select_0, full_int_array_2) + + # pd_op.masked_select: (-1xf32) <- (2x-1x4xf32, 2x-1x4xb) + masked_select_1 = paddle._C_ops.masked_select(data_2, cast_1) + + # pd_op.reshape: (-1x4xf32) <- (-1xf32, 2xi64) + reshape_1 = paddle._C_ops.reshape(masked_select_1, full_int_array_2) + del masked_select_1 + + # pd_op.sum: (2x-1xf32) <- (2x-1x4xf32, 1xi64) + sum_0 = paddle._C_ops.sum(data_3, full_int_array_0, None, False) + del data_3 + + # pd_op.masked_select: (-1xf32) <- (2x-1xf32, 2x-1xb) + masked_select_2 = paddle._C_ops.masked_select(sum_0, data_0) + del sum_0 + + # pd_op.unsqueeze: (-1x1xf32) <- (-1xf32, 1xi64) + unsqueeze_1 = paddle._C_ops.unsqueeze(masked_select_2, full_int_array_0) + del masked_select_2 + + # pd_op.subtract: (-1x4xf32) <- (-1x4xf32, -1x4xf32) + subtract_0 = paddle._C_ops.subtract(reshape_0, reshape_1) + + # pd_op.abs: (-1x4xf32) <- (-1x4xf32) + abs_0 = paddle._C_ops.abs(subtract_0) + + # pd_op.mean_all: (xf32) <- (-1x4xf32) + mean_all_0 = paddle._C_ops.mean_all(abs_0) + + # pd_op.full: (1xi32) <- () + full_0 = paddle._C_ops.full( + [1], float("1"), paddle.int32, paddle.core.CPUPlace() + ) + + # pd_op.split_with_num: ([-1x1xf32, -1x1xf32, -1x1xf32, -1x1xf32]) <- (-1x4xf32, 1xi32) + split_with_num_0 = paddle._C_ops.split_with_num(reshape_0, 4, full_0) + + # builtin.split: (-1x1xf32, -1x1xf32, -1x1xf32, -1x1xf32) <- ([-1x1xf32, -1x1xf32, -1x1xf32, -1x1xf32]) + ( + split_0, + split_1, + split_2, + split_3, + ) = split_with_num_0 + del split_with_num_0 + + # pd_op.split_with_num: ([-1x1xf32, -1x1xf32, -1x1xf32, -1x1xf32]) <- (-1x4xf32, 1xi32) + split_with_num_1 = paddle._C_ops.split_with_num(reshape_1, 4, full_0) + + # builtin.split: (-1x1xf32, -1x1xf32, -1x1xf32, -1x1xf32) <- ([-1x1xf32, -1x1xf32, -1x1xf32, -1x1xf32]) + ( + split_4, + split_5, + split_6, + split_7, + ) = split_with_num_1 + del split_with_num_1 + + # pd_op.maximum: (-1x1xf32) <- (-1x1xf32, -1x1xf32) + maximum_0 = paddle._C_ops.maximum(split_0, split_4) + + # pd_op.maximum: (-1x1xf32) <- (-1x1xf32, -1x1xf32) + maximum_1 = paddle._C_ops.maximum(split_1, split_5) + + # pd_op.minimum: (-1x1xf32) <- (-1x1xf32, -1x1xf32) + minimum_0 = paddle._C_ops.minimum(split_2, split_6) + + # pd_op.minimum: (-1x1xf32) <- (-1x1xf32, -1x1xf32) + minimum_1 = paddle._C_ops.minimum(split_3, split_7) + + # pd_op.subtract: (-1x1xf32) <- (-1x1xf32, -1x1xf32) + subtract_1 = paddle._C_ops.subtract(minimum_0, maximum_0) + + # pd_op.full: (1xf32) <- () + full_1 = paddle._C_ops.full( + [1], float("0"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.assign: (1xf32) <- (1xf32) + assign_3 = full_1 + + # pd_op.full: (1xf32) <- () + full_2 = paddle._C_ops.full( + [1], float("3.40282e+38"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.assign: (1xf32) <- (1xf32) + assign_4 = full_2 + + # pd_op.clip: (-1x1xf32) <- (-1x1xf32, 1xf32, 1xf32) + clip_0 = paddle._C_ops.clip(subtract_1, full_1, full_2) + + # pd_op.subtract: (-1x1xf32) <- (-1x1xf32, -1x1xf32) + subtract_2 = paddle._C_ops.subtract(minimum_1, maximum_1) + + # pd_op.clip: (-1x1xf32) <- (-1x1xf32, 1xf32, 1xf32) + clip_1 = paddle._C_ops.clip(subtract_2, full_1, full_2) + + # pd_op.multiply: (-1x1xf32) <- (-1x1xf32, -1x1xf32) + multiply_0 = paddle._C_ops.multiply(clip_0, clip_1) + + # pd_op.subtract: (-1x1xf32) <- (-1x1xf32, -1x1xf32) + subtract_3 = paddle._C_ops.subtract(split_2, split_0) + + # pd_op.subtract: (-1x1xf32) <- (-1x1xf32, -1x1xf32) + subtract_4 = paddle._C_ops.subtract(split_3, split_1) + + # pd_op.multiply: (-1x1xf32) <- (-1x1xf32, -1x1xf32) + multiply_1 = paddle._C_ops.multiply(subtract_3, subtract_4) + + # pd_op.subtract: (-1x1xf32) <- (-1x1xf32, -1x1xf32) + subtract_5 = paddle._C_ops.subtract(split_6, split_4) + + # pd_op.subtract: (-1x1xf32) <- (-1x1xf32, -1x1xf32) + subtract_6 = paddle._C_ops.subtract(split_7, split_5) + + # pd_op.multiply: (-1x1xf32) <- (-1x1xf32, -1x1xf32) + multiply_2 = paddle._C_ops.multiply(subtract_5, subtract_6) + del subtract_5, subtract_6 + + # pd_op.add: (-1x1xf32) <- (-1x1xf32, -1x1xf32) + add_0 = paddle._C_ops.add(multiply_1, multiply_2) + + # pd_op.subtract: (-1x1xf32) <- (-1x1xf32, -1x1xf32) + subtract_7 = paddle._C_ops.subtract(add_0, multiply_0) + + # pd_op.full: (1xf32) <- () + full_3 = paddle._C_ops.full( + [1], float("1"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.assign: (1xf32) <- (1xf32) + assign_5 = full_3 + + # pd_op.assign: (1xf32) <- (1xf32) + assign_6 = full_3 + + # pd_op.scale: (-1x1xf32) <- (-1x1xf32, 1xf32) + scale_0 = paddle._C_ops.scale(subtract_7, full_3, float("1e-10"), True) + del subtract_7 + + # pd_op.divide: (-1x1xf32) <- (-1x1xf32, -1x1xf32) + divide_2 = paddle._C_ops.divide(multiply_0, scale_0) + + # pd_op.minimum: (-1x1xf32) <- (-1x1xf32, -1x1xf32) + minimum_2 = paddle._C_ops.minimum(split_0, split_4) + + # pd_op.minimum: (-1x1xf32) <- (-1x1xf32, -1x1xf32) + minimum_3 = paddle._C_ops.minimum(split_1, split_5) + + # pd_op.maximum: (-1x1xf32) <- (-1x1xf32, -1x1xf32) + maximum_2 = paddle._C_ops.maximum(split_2, split_6) + + # pd_op.maximum: (-1x1xf32) <- (-1x1xf32, -1x1xf32) + maximum_3 = paddle._C_ops.maximum(split_3, split_7) + + # pd_op.subtract: (-1x1xf32) <- (-1x1xf32, -1x1xf32) + subtract_8 = paddle._C_ops.subtract(maximum_2, minimum_2) + + # pd_op.subtract: (-1x1xf32) <- (-1x1xf32, -1x1xf32) + subtract_9 = paddle._C_ops.subtract(maximum_3, minimum_3) + + # pd_op.multiply: (-1x1xf32) <- (-1x1xf32, -1x1xf32) + multiply_3 = paddle._C_ops.multiply(subtract_8, subtract_9) + + # pd_op.scale: (-1x1xf32) <- (-1x1xf32, 1xf32) + scale_1 = paddle._C_ops.scale(multiply_3, full_3, float("1e-10"), True) + del multiply_3 + + # pd_op.subtract: (-1x1xf32) <- (-1x1xf32, -1x1xf32) + subtract_10 = paddle._C_ops.subtract(scale_1, scale_0) + + # pd_op.divide: (-1x1xf32) <- (-1x1xf32, -1x1xf32) + divide_3 = paddle._C_ops.divide(subtract_10, scale_1) + + # pd_op.subtract: (-1x1xf32) <- (-1x1xf32, -1x1xf32) + subtract_11 = paddle._C_ops.subtract(divide_2, divide_3) + + # pd_op.full: (1xf32) <- () + full_4 = paddle._C_ops.full( + [1], float("-1"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.scale: (-1x1xf32) <- (-1x1xf32, 1xf32) + scale_2 = paddle._C_ops.scale(subtract_11, full_4, float("1"), True) + del subtract_11 + + # pd_op.scale: (-1x1xf32) <- (-1x1xf32, 1xf32) + scale_3 = paddle._C_ops.scale(scale_2, full_3, float("0"), True) + del scale_2 + + # pd_op.multiply: (-1x1xf32) <- (-1x1xf32, -1x1xf32) + multiply_4 = paddle._C_ops.multiply(scale_3, unsqueeze_1) + + # pd_op.full_int_array: (0xi64) <- () + full_int_array_3 = [] + + # pd_op.assign: (0xi64) <- (0xi64) + assign_7 = full_int_array_3 + + # pd_op.sum: (xf32) <- (-1x1xf32, 0xi64) + sum_1 = paddle._C_ops.sum(multiply_4, full_int_array_3, None, False) + + # pd_op.divide: (xf32) <- (xf32, xf32) + divide_0 = paddle._C_ops.divide(sum_1, data_4) + + # pd_op.unsqueeze: (2x-1x1xb) <- (2x-1xb, 1xi64) + unsqueeze_2 = paddle._C_ops.unsqueeze(data_0, full_int_array_0) + del data_0 + + # pd_op.cast: (2x-1x1xi32) <- (2x-1x1xb) + cast_2 = paddle._C_ops.cast(unsqueeze_2, paddle.int32) + del unsqueeze_2 + + # pd_op.full_int_array: (3xi64) <- () + full_int_array_4 = [1, 1, 68] + + # pd_op.tile: (2x-1x68xi32) <- (2x-1x1xi32, 3xi64) + tile_1 = paddle._C_ops.tile(cast_2, full_int_array_4) + del cast_2, full_int_array_4 + + # pd_op.cast: (2x-1x68xb) <- (2x-1x68xi32) + cast_3 = paddle._C_ops.cast(tile_1, paddle.bool) + del tile_1 + + # pd_op.masked_select: (-1xf32) <- (2x-1x68xf32, 2x-1x68xb) + masked_select_3 = paddle._C_ops.masked_select(data_5, cast_3) + del data_5 + + # pd_op.full_int_array: (3xi64) <- () + full_int_array_5 = [-1, 4, 17] + + # pd_op.reshape: (-1x4x17xf32) <- (-1xf32, 3xi64) + reshape_2 = paddle._C_ops.reshape(masked_select_3, full_int_array_5) + del full_int_array_5 + + # pd_op.full: (1xi32) <- () + full_5 = paddle._C_ops.full( + [1], float("2"), paddle.int32, paddle.core.CPUPlace() + ) + + # pd_op.split_with_num: ([2x-1x2xf32, 2x-1x2xf32]) <- (2x-1x4xf32, 1xi32) + split_with_num_2 = paddle._C_ops.split_with_num(data_2, 2, full_5) + del data_2, full_5 + + # builtin.split: (2x-1x2xf32, 2x-1x2xf32) <- ([2x-1x2xf32, 2x-1x2xf32]) + ( + split_8, + split_9, + ) = split_with_num_2 + del split_with_num_2 + + # pd_op.subtract: (2x-1x2xf32) <- (-1x2xf32, 2x-1x2xf32) + subtract_12 = paddle._C_ops.subtract(data_6, split_8) + del split_8 + + # pd_op.subtract: (2x-1x2xf32) <- (2x-1x2xf32, -1x2xf32) + subtract_13 = paddle._C_ops.subtract(split_9, data_6) + del data_6, split_9 + + # pd_op.full: (1xi32) <- () + full_6 = paddle._C_ops.full( + [1], float("-1"), paddle.int32, paddle.core.CPUPlace() + ) + + # builtin.combine: ([2x-1x2xf32, 2x-1x2xf32]) <- (2x-1x2xf32, 2x-1x2xf32) + combine_0 = [subtract_12, subtract_13] + del subtract_12, subtract_13 + + # pd_op.concat: (2x-1x4xf32) <- ([2x-1x2xf32, 2x-1x2xf32], 1xi32) + concat_0 = paddle._C_ops.concat(combine_0, full_6) + del combine_0, full_6 + + # pd_op.full: (1xf32) <- () + full_7 = paddle._C_ops.full( + [1], float("15.99"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.clip: (2x-1x4xf32) <- (2x-1x4xf32, 1xf32, 1xf32) + clip_2 = paddle._C_ops.clip(concat_0, full_1, full_7) + del concat_0, full_7 + + # pd_op.masked_select: (-1xf32) <- (2x-1x4xf32, 2x-1x4xb) + masked_select_4 = paddle._C_ops.masked_select(clip_2, cast_1) + del clip_2 + + # pd_op.reshape: (-1x4xf32) <- (-1xf32, 2xi64) + reshape_3 = paddle._C_ops.reshape(masked_select_4, full_int_array_2) + del full_int_array_2, masked_select_4 + + # pd_op.floor: (-1x4xf32) <- (-1x4xf32) + floor_0 = paddle._C_ops.floor(reshape_3) + + # pd_op.cast: (-1x4xi64) <- (-1x4xf32) + cast_4 = paddle._C_ops.cast(floor_0, paddle.int64) + del floor_0 + + # pd_op.scale: (-1x4xi64) <- (-1x4xi64, 1xf32) + scale_4 = paddle._C_ops.scale(cast_4, full_3, float("1"), True) + + # pd_op.cast: (-1x4xf32) <- (-1x4xi64) + cast_5 = paddle._C_ops.cast(scale_4, paddle.float32) + + # pd_op.subtract: (-1x4xf32) <- (-1x4xf32, -1x4xf32) + subtract_14 = paddle._C_ops.subtract(cast_5, reshape_3) + del cast_5, reshape_3 + + # pd_op.scale: (-1x4xf32) <- (-1x4xf32, 1xf32) + scale_5 = paddle._C_ops.scale(subtract_14, full_4, float("1"), True) + + # pd_op.scale: (-1x4xi64) <- (-1x4xi64, 1xf32) + scale_6 = paddle._C_ops.scale(cast_4, full_3, float("0"), True) + del cast_4 + + # pd_op.unsqueeze: (-1x4x1xi64) <- (-1x4xi64, 1xi64) + unsqueeze_3 = paddle._C_ops.unsqueeze(scale_6, full_int_array_0) + del scale_6 + + # pd_op.cross_entropy_with_softmax: (-1x4x17xf32, -1x4x1xf32) <- (-1x4x17xf32, -1x4x1xi64) + cross_entropy_with_softmax_0, cross_entropy_with_softmax_2 = ( + lambda x, f: f(x) + )( + paddle._C_ops.cross_entropy_with_softmax( + reshape_2, unsqueeze_3, False, True, True, -100, -1 + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None), + ) + + # pd_op.squeeze: (-1x4xf32) <- (-1x4x1xf32, 1xi64) + squeeze_0 = paddle._C_ops.squeeze( + cross_entropy_with_softmax_2, full_int_array_0 + ) + + # pd_op.multiply: (-1x4xf32) <- (-1x4xf32, -1x4xf32) + multiply_5 = paddle._C_ops.multiply(squeeze_0, subtract_14) + + # pd_op.scale: (-1x4xi64) <- (-1x4xi64, 1xf32) + scale_7 = paddle._C_ops.scale(scale_4, full_3, float("0"), True) + del scale_4 + + # pd_op.unsqueeze: (-1x4x1xi64) <- (-1x4xi64, 1xi64) + unsqueeze_4 = paddle._C_ops.unsqueeze(scale_7, full_int_array_0) + del scale_7 + + # pd_op.cross_entropy_with_softmax: (-1x4x17xf32, -1x4x1xf32) <- (-1x4x17xf32, -1x4x1xi64) + cross_entropy_with_softmax_1, cross_entropy_with_softmax_3 = ( + lambda x, f: f(x) + )( + paddle._C_ops.cross_entropy_with_softmax( + reshape_2, unsqueeze_4, False, True, True, -100, -1 + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None), + ) + del reshape_2 + + # pd_op.squeeze: (-1x4xf32) <- (-1x4x1xf32, 1xi64) + squeeze_1 = paddle._C_ops.squeeze( + cross_entropy_with_softmax_3, full_int_array_0 + ) + + # pd_op.multiply: (-1x4xf32) <- (-1x4xf32, -1x4xf32) + multiply_6 = paddle._C_ops.multiply(squeeze_1, scale_5) + + # pd_op.add: (-1x4xf32) <- (-1x4xf32, -1x4xf32) + add_1 = paddle._C_ops.add(multiply_5, multiply_6) + + # pd_op.mean: (-1x1xf32) <- (-1x4xf32, 1xi64) + mean_0 = paddle._C_ops.mean(add_1, full_int_array_0, True) + del full_int_array_0 + + # pd_op.multiply: (-1x1xf32) <- (-1x1xf32, -1x1xf32) + multiply_7 = paddle._C_ops.multiply(mean_0, unsqueeze_1) + + # pd_op.sum: (xf32) <- (-1x1xf32, 0xi64) + sum_2 = paddle._C_ops.sum(multiply_7, full_int_array_3, None, False) + + # pd_op.divide: (xf32) <- (xf32, xf32) + divide_1 = paddle._C_ops.divide(sum_2, data_4) + del ( + abs_0, + add_0, + add_1, + assign_0, + assign_1, + assign_2, + assign_3, + assign_4, + assign_5, + assign_6, + assign_7, + cast_1, + cast_3, + clip_0, + clip_1, + cross_entropy_with_softmax_2, + cross_entropy_with_softmax_3, + data_4, + divide_2, + divide_3, + full_0, + full_1, + full_2, + full_3, + full_4, + full_int_array_3, + masked_select_0, + masked_select_3, + maximum_0, + maximum_1, + maximum_2, + maximum_3, + mean_0, + minimum_0, + minimum_1, + minimum_2, + minimum_3, + multiply_0, + multiply_1, + multiply_2, + multiply_4, + multiply_5, + multiply_6, + multiply_7, + reshape_0, + reshape_1, + scale_0, + scale_1, + scale_3, + scale_5, + split_0, + split_1, + split_2, + split_3, + split_4, + split_5, + split_6, + split_7, + squeeze_0, + squeeze_1, + subtract_0, + subtract_1, + subtract_10, + subtract_14, + subtract_2, + subtract_3, + subtract_4, + subtract_8, + subtract_9, + sum_1, + sum_2, + unsqueeze_1, + unsqueeze_3, + unsqueeze_4, + ) + + return ( + cross_entropy_with_softmax_0, + cross_entropy_with_softmax_1, + mean_all_0, + divide_0, + divide_1, + ) diff --git a/paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_9/weight_meta.py b/paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_9/weight_meta.py new file mode 100644 index 000000000..8b1378917 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_9/weight_meta.py @@ -0,0 +1 @@ + diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus-L/subgraph_0/graph_hash.txt b/paddle_samples/PaddleX/PP-YOLOE_plus-L/subgraph_0/graph_hash.txt new file mode 100644 index 000000000..df6fb86a6 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE_plus-L/subgraph_0/graph_hash.txt @@ -0,0 +1 @@ +0edecae6372779122c0886fc76c53f28d45d32c115180d22e204d37f4f709ce2 \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus-L/subgraph_0/graph_net.json b/paddle_samples/PaddleX/PP-YOLOE_plus-L/subgraph_0/graph_net.json new file mode 100644 index 000000000..32219c0fa --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE_plus-L/subgraph_0/graph_net.json @@ -0,0 +1,6 @@ +{ + "framework": "paddle", + "model_name": "PP-YOLOE_plus-L", + "num_devices_required": 1, + "num_nodes_required": 1 +} \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus-L/subgraph_0/input_meta.py b/paddle_samples/PaddleX/PP-YOLOE_plus-L/subgraph_0/input_meta.py new file mode 100644 index 000000000..fd9b56629 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE_plus-L/subgraph_0/input_meta.py @@ -0,0 +1,27 @@ +class Program_weight_tensor_data_0: + name = "data_0" + shape = [8, 4116, 4] + dtype = "float32" + min_val = float("0.01") + max_val = float("0.01") + mean = float("0.01") + data = None + + +class Program_weight_tensor_data_1: + name = "data_1" + shape = [8, 4116] + dtype = "int32" + min_val = 0 + max_val = 4 + data = None + + +class Program_weight_tensor_data_2: + name = "data_2" + shape = [8, 4116, 4] + dtype = "float32" + max_val = float("0.947339") + mean = float("0.000280391") + std = float("0.0157366") + data = None diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus-L/subgraph_0/model.py b/paddle_samples/PaddleX/PP-YOLOE_plus-L/subgraph_0/model.py new file mode 100644 index 000000000..a899ea187 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE_plus-L/subgraph_0/model.py @@ -0,0 +1,110 @@ +import paddle + + +class GraphModule(paddle.nn.Layer): + def __init__(self): + super().__init__() + + def forward(self, data_0, data_1, data_2): + # pd_op.full: (1xi32) <- () + full_0 = paddle._C_ops.full( + [1], float("5"), paddle.int32, paddle.core.CPUPlace() + ) + + # pd_op.one_hot: (8x4116x5xf32) <- (8x4116xi32, 1xi32) + one_hot_0 = paddle._C_ops.one_hot( + data_1 % paddle.cast(full_0, data_1.dtype), full_0 + ) + del data_1, full_0 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_0 = [0] + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_1 = [-1] + + # pd_op.slice: (8x4116x4xf32) <- (8x4116x5xf32, 1xi64, 1xi64) + slice_0 = paddle._C_ops.slice( + one_hot_0, [2], full_int_array_0, full_int_array_1, [1], [] + ) + del full_int_array_0, full_int_array_1, one_hot_0 + + # pd_op.pow: (8x4116x4xf32) <- (8x4116x4xf32) + pow_0 = paddle._C_ops.pow(data_0, float("2")) + + # pd_op.full: (1xf32) <- () + full_1 = paddle._C_ops.full( + [1], float("0.75"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.scale: (8x4116x4xf32) <- (8x4116x4xf32, 1xf32) + scale_0 = paddle._C_ops.scale(pow_0, full_1, float("0"), True) + del pow_0 + + # pd_op.full: (1xf32) <- () + full_2 = paddle._C_ops.full( + [1], float("-1"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.scale: (8x4116x4xf32) <- (8x4116x4xf32, 1xf32) + scale_1 = paddle._C_ops.scale(slice_0, full_2, float("1"), True) + del full_2 + + # pd_op.multiply: (8x4116x4xf32) <- (8x4116x4xf32, 8x4116x4xf32) + multiply_0 = paddle._C_ops.multiply(scale_0, scale_1) + + # pd_op.multiply: (8x4116x4xf32) <- (8x4116x4xf32, 8x4116x4xf32) + multiply_1 = paddle._C_ops.multiply(data_2, slice_0) + del slice_0 + + # pd_op.add: (8x4116x4xf32) <- (8x4116x4xf32, 8x4116x4xf32) + add_0 = paddle._C_ops.add(multiply_0, multiply_1) + + # pd_op.bce_loss: (8x4116x4xf32) <- (8x4116x4xf32, 8x4116x4xf32) + bce_loss_0 = paddle._C_ops.bce_loss(data_0, data_2) + del data_0 + + # pd_op.multiply: (8x4116x4xf32) <- (8x4116x4xf32, 8x4116x4xf32) + multiply_2 = paddle._C_ops.multiply(bce_loss_0, add_0) + + # pd_op.full_int_array: (0xi64) <- () + full_int_array_2 = [] + + # pd_op.sum: (xf32) <- (8x4116x4xf32, 0xi64) + sum_0 = paddle._C_ops.sum(multiply_2, full_int_array_2, None, False) + + # pd_op.sum: (xf32) <- (8x4116x4xf32, 0xi64) + sum_1 = paddle._C_ops.sum(data_2, full_int_array_2, None, False) + del data_2 + + # pd_op.full: (1xf32) <- () + full_3 = paddle._C_ops.full( + [1], float("1"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.full: (1xf32) <- () + full_4 = paddle._C_ops.full( + [1], float("3.40282e+38"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.clip: (xf32) <- (xf32, 1xf32, 1xf32) + clip_0 = paddle._C_ops.clip(sum_1, full_3, full_4) + del full_3, full_4, sum_1 + + # pd_op.divide: (xf32) <- (xf32, xf32) + divide_0 = paddle._C_ops.divide(sum_0, clip_0) + del ( + add_0, + bce_loss_0, + clip_0, + full_1, + full_int_array_2, + multiply_0, + multiply_1, + multiply_2, + scale_0, + scale_1, + sum_0, + ) + + return divide_0 diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus-L/subgraph_0/weight_meta.py b/paddle_samples/PaddleX/PP-YOLOE_plus-L/subgraph_0/weight_meta.py new file mode 100644 index 000000000..8b1378917 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE_plus-L/subgraph_0/weight_meta.py @@ -0,0 +1 @@ + diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus-L/subgraph_1/graph_hash.txt b/paddle_samples/PaddleX/PP-YOLOE_plus-L/subgraph_1/graph_hash.txt new file mode 100644 index 000000000..89a179d30 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE_plus-L/subgraph_1/graph_hash.txt @@ -0,0 +1 @@ +de57ef5ee73523dbca992bd101b3d1d176ccd0302317b6af8565a53d1e458be9 \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus-L/subgraph_1/graph_net.json b/paddle_samples/PaddleX/PP-YOLOE_plus-L/subgraph_1/graph_net.json new file mode 100644 index 000000000..32219c0fa --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE_plus-L/subgraph_1/graph_net.json @@ -0,0 +1,6 @@ +{ + "framework": "paddle", + "model_name": "PP-YOLOE_plus-L", + "num_devices_required": 1, + "num_nodes_required": 1 +} \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus-L/subgraph_1/input_meta.py b/paddle_samples/PaddleX/PP-YOLOE_plus-L/subgraph_1/input_meta.py new file mode 100644 index 000000000..e3a32d9be --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE_plus-L/subgraph_1/input_meta.py @@ -0,0 +1,38 @@ +class Program_weight_tensor_data_0: + name = "data_0" + shape = [8, 1, 196] + dtype = "float32" + min_val = float("1.83332") + max_val = float("480.06") + mean = float("206.189") + std = float("95.7397") + data = None + + +class Program_weight_tensor_data_1: + name = "data_1" + shape = [8, 1, 784] + dtype = "float32" + min_val = float("0.727295") + max_val = float("491.367") + mean = float("206.423") + std = float("95.9052") + data = None + + +class Program_weight_tensor_data_2: + name = "data_2" + shape = [8, 1, 3136] + dtype = "float32" + min_val = float("1.38277") + max_val = float("497.02") + mean = float("206.481") + std = float("95.9469") + data = None + + +class Program_weight_tensor_data_3: + name = "data_3" + shape = [8, 1, 1] + dtype = "float32" + data = [1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0] diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus-L/subgraph_1/model.py b/paddle_samples/PaddleX/PP-YOLOE_plus-L/subgraph_1/model.py new file mode 100644 index 000000000..8ee1235d3 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE_plus-L/subgraph_1/model.py @@ -0,0 +1,131 @@ +import paddle + + +class GraphModule(paddle.nn.Layer): + def __init__(self): + super().__init__() + + def forward(self, data_0, data_1, data_2, data_3): + # pd_op.full: (1xi32) <- () + full_0 = paddle._C_ops.full( + [1], float("9"), paddle.int32, paddle.core.CPUPlace() + ) + + # pd_op.topk: (8x1x9xf32, 8x1x9xi64) <- (8x1x196xf32, 1xi32) + topk_0, topk_1 = (lambda x, f: f(x))( + paddle._C_ops.topk(data_0, full_0, -1, False, True), + lambda out: out if isinstance(out, (list, tuple)) else (out, None), + ) + del data_0 + + # pd_op.full: (1xf32) <- () + full_1 = paddle._C_ops.full( + [1], float("1"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.scale: (8x1x9xi64) <- (8x1x9xi64, 1xf32) + scale_0 = paddle._C_ops.scale(topk_1, full_1, float("0"), True) + + # pd_op.full: (1xi32) <- () + full_2 = paddle._C_ops.full( + [1], float("196"), paddle.int32, paddle.core.CPUPlace() + ) + + # pd_op.one_hot: (8x1x9x196xf32) <- (8x1x9xi64, 1xi32) + one_hot_0 = paddle._C_ops.one_hot( + topk_1 % paddle.cast(full_2, topk_1.dtype), full_2 + ) + del full_2, topk_1 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_0 = [-2] + + # pd_op.sum: (8x1x196xf32) <- (8x1x9x196xf32, 1xi64) + sum_0 = paddle._C_ops.sum(one_hot_0, full_int_array_0, None, False) + del one_hot_0 + + # pd_op.multiply: (8x1x196xf32) <- (8x1x196xf32, 8x1x1xf32) + multiply_0 = paddle._C_ops.multiply(sum_0, data_3) + del sum_0 + + # pd_op.topk: (8x1x9xf32, 8x1x9xi64) <- (8x1x784xf32, 1xi32) + topk_2, topk_3 = (lambda x, f: f(x))( + paddle._C_ops.topk(data_1, full_0, -1, False, True), + lambda out: out if isinstance(out, (list, tuple)) else (out, None), + ) + del data_1 + + # pd_op.scale: (8x1x9xi64) <- (8x1x9xi64, 1xf32) + scale_1 = paddle._C_ops.scale(topk_3, full_1, float("196"), True) + + # pd_op.full: (1xi32) <- () + full_3 = paddle._C_ops.full( + [1], float("784"), paddle.int32, paddle.core.CPUPlace() + ) + + # pd_op.one_hot: (8x1x9x784xf32) <- (8x1x9xi64, 1xi32) + one_hot_1 = paddle._C_ops.one_hot( + topk_3 % paddle.cast(full_3, topk_3.dtype), full_3 + ) + del full_3, topk_3 + + # pd_op.sum: (8x1x784xf32) <- (8x1x9x784xf32, 1xi64) + sum_1 = paddle._C_ops.sum(one_hot_1, full_int_array_0, None, False) + del one_hot_1 + + # pd_op.multiply: (8x1x784xf32) <- (8x1x784xf32, 8x1x1xf32) + multiply_1 = paddle._C_ops.multiply(sum_1, data_3) + del sum_1 + + # pd_op.topk: (8x1x9xf32, 8x1x9xi64) <- (8x1x3136xf32, 1xi32) + topk_4, topk_5 = (lambda x, f: f(x))( + paddle._C_ops.topk(data_2, full_0, -1, False, True), + lambda out: out if isinstance(out, (list, tuple)) else (out, None), + ) + del data_2, full_0 + + # pd_op.scale: (8x1x9xi64) <- (8x1x9xi64, 1xf32) + scale_2 = paddle._C_ops.scale(topk_5, full_1, float("980"), True) + del full_1 + + # pd_op.full: (1xi32) <- () + full_4 = paddle._C_ops.full( + [1], float("3136"), paddle.int32, paddle.core.CPUPlace() + ) + + # pd_op.one_hot: (8x1x9x3136xf32) <- (8x1x9xi64, 1xi32) + one_hot_2 = paddle._C_ops.one_hot( + topk_5 % paddle.cast(full_4, topk_5.dtype), full_4 + ) + del full_4, topk_5 + + # pd_op.sum: (8x1x3136xf32) <- (8x1x9x3136xf32, 1xi64) + sum_2 = paddle._C_ops.sum(one_hot_2, full_int_array_0, None, False) + del full_int_array_0, one_hot_2 + + # pd_op.multiply: (8x1x3136xf32) <- (8x1x3136xf32, 8x1x1xf32) + multiply_2 = paddle._C_ops.multiply(sum_2, data_3) + del data_3, sum_2 + + # pd_op.full: (1xi32) <- () + full_5 = paddle._C_ops.full( + [1], float("-1"), paddle.int32, paddle.core.CPUPlace() + ) + + # builtin.combine: ([8x1x196xf32, 8x1x784xf32, 8x1x3136xf32]) <- (8x1x196xf32, 8x1x784xf32, 8x1x3136xf32) + combine_0 = [multiply_0, multiply_1, multiply_2] + del multiply_0, multiply_1, multiply_2 + + # pd_op.concat: (8x1x4116xf32) <- ([8x1x196xf32, 8x1x784xf32, 8x1x3136xf32], 1xi32) + concat_0 = paddle._C_ops.concat(combine_0, full_5) + del combine_0 + + # builtin.combine: ([8x1x9xi64, 8x1x9xi64, 8x1x9xi64]) <- (8x1x9xi64, 8x1x9xi64, 8x1x9xi64) + combine_1 = [scale_0, scale_1, scale_2] + del scale_0, scale_1, scale_2 + + # pd_op.concat: (8x1x27xi64) <- ([8x1x9xi64, 8x1x9xi64, 8x1x9xi64], 1xi32) + concat_1 = paddle._C_ops.concat(combine_1, full_5) + del combine_1, full_5 + + return concat_0, concat_1 diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus-L/subgraph_1/weight_meta.py b/paddle_samples/PaddleX/PP-YOLOE_plus-L/subgraph_1/weight_meta.py new file mode 100644 index 000000000..8b1378917 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE_plus-L/subgraph_1/weight_meta.py @@ -0,0 +1 @@ + diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus-L/subgraph_10/graph_hash.txt b/paddle_samples/PaddleX/PP-YOLOE_plus-L/subgraph_10/graph_hash.txt new file mode 100644 index 000000000..4f6800c87 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE_plus-L/subgraph_10/graph_hash.txt @@ -0,0 +1 @@ +2110ebb6abb561e779d61c83f0d5c8580b4c167db46c1ce92f6ce0e75e4a0e62 \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus-L/subgraph_10/graph_net.json b/paddle_samples/PaddleX/PP-YOLOE_plus-L/subgraph_10/graph_net.json new file mode 100644 index 000000000..32219c0fa --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE_plus-L/subgraph_10/graph_net.json @@ -0,0 +1,6 @@ +{ + "framework": "paddle", + "model_name": "PP-YOLOE_plus-L", + "num_devices_required": 1, + "num_nodes_required": 1 +} \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus-L/subgraph_10/input_meta.py b/paddle_samples/PaddleX/PP-YOLOE_plus-L/subgraph_10/input_meta.py new file mode 100644 index 000000000..7fd3d63e3 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE_plus-L/subgraph_10/input_meta.py @@ -0,0 +1,76 @@ +class Program_weight_tensor_data_0: + name = "data_0" + shape = [8, 1, 4116] + dtype = "float32" + max_val = float("1.0") + mean = float("0.00127551") + std = float("0.0356915") + data = None + + +class Program_weight_tensor_data_1: + name = "data_1" + shape = [8, 1, 1] + dtype = "int32" + data = [0, 0, 0, 0, 0, 0, 0, 3] + + +class Program_weight_tensor_data_2: + name = "data_2" + shape = [8, 4116] + dtype = "float32" + max_val = float("1.0") + mean = float("0.00127551") + std = float("0.0356915") + data = None + + +class Program_weight_tensor_data_3: + name = "data_3" + shape = [8, 1, 4] + dtype = "float32" + data = [ + 35.3684, + 311.273, + 300.632, + 442.182, + 78.6168, + 241.764, + 110.41, + 266.46, + 195.413, + 193.28, + 236.373, + 224.0, + 130.415, + 227.85, + 158.792, + 248.688, + 360.901, + 338.022, + 374.268, + 347.726, + 304.951, + 76.6956, + 336.0, + 109.565, + 295.171, + 70.9146, + 350.609, + 111.437, + 18.5379, + 0.0, + 293.517, + 337.836, + ] + + +class Program_weight_tensor_data_4: + name = "data_4" + shape = [8, 4116, 4] + dtype = "float32" + min_val = float("-317.929") + max_val = float("740.992") + mean = float("224.215") + std = float("149.051") + data = None diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus-L/subgraph_10/model.py b/paddle_samples/PaddleX/PP-YOLOE_plus-L/subgraph_10/model.py new file mode 100644 index 000000000..0619d9191 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE_plus-L/subgraph_10/model.py @@ -0,0 +1,287 @@ +import paddle + + +class GraphModule(paddle.nn.Layer): + def __init__(self): + super().__init__() + + def forward(self, data_0, data_1, data_2, data_3, data_4): + # pd_op.full: (1xi64) <- () + full_0 = paddle._C_ops.full( + [1], float("-2"), paddle.int64, paddle.core.CPUPlace() + ) + + # pd_op.argmax: (8x4116xi64) <- (8x1x4116xf32, 1xi64) + argmax_0 = paddle._C_ops.argmax(data_0, full_0, False, False, paddle.int64) + del full_0 + + # pd_op.full: (1xf64) <- () + full_1 = paddle._C_ops.full( + [1], float("0"), paddle.float64, paddle.core.CPUPlace() + ) + + # pd_op.full: (1xf64) <- () + full_2 = paddle._C_ops.full( + [1], float("8"), paddle.float64, paddle.core.CPUPlace() + ) + + # pd_op.full: (1xf64) <- () + full_3 = paddle._C_ops.full( + [1], float("1"), paddle.float64, paddle.core.CPUPlace() + ) + + # pd_op.arange: (8xi32) <- (1xf64, 1xf64, 1xf64) + arange_0 = paddle.arange(full_1, full_2, full_3, dtype="int32") + del full_1, full_2, full_3 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_0 = [-1] + + # pd_op.unsqueeze: (8x1xi32) <- (8xi32, 1xi64) + unsqueeze_0 = paddle._C_ops.unsqueeze(arange_0, full_int_array_0) + del arange_0 + + # pd_op.full: (1xf32) <- () + full_4 = paddle._C_ops.full( + [1], float("1"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.scale: (8x1xi32) <- (8x1xi32, 1xf32) + scale_0 = paddle._C_ops.scale(unsqueeze_0, full_4, float("0"), True) + del unsqueeze_0 + + # pd_op.cast: (8x1xi64) <- (8x1xi32) + cast_0 = paddle._C_ops.cast(scale_0, paddle.int64) + del scale_0 + + # pd_op.add: (8x4116xi64) <- (8x4116xi64, 8x1xi64) + add_0 = paddle._C_ops.add(argmax_0, cast_0) + del argmax_0, cast_0 + + # pd_op.flatten: (8xi32) <- (8x1x1xi32) + flatten_0 = paddle._C_ops.flatten(data_1, 0, 2) + del data_1 + + # pd_op.flatten: (32928xi64) <- (8x4116xi64) + flatten_1 = paddle._C_ops.flatten(add_0, 0, 1) + del add_0 + + # pd_op.full: (1xi32) <- () + full_5 = paddle._C_ops.full( + [1], float("0"), paddle.int32, paddle.core.CPUPlace() + ) + + # pd_op.gather: (32928xi32) <- (8xi32, 32928xi64, 1xi32) + gather_0 = paddle._C_ops.gather(flatten_0, flatten_1, full_5) + del flatten_0 + + # pd_op.full_int_array: (2xi64) <- () + full_int_array_1 = [8, 4116] + + # pd_op.reshape: (8x4116xi32) <- (32928xi32, 2xi64) + reshape_1 = paddle._C_ops.reshape(gather_0, full_int_array_1) + del full_int_array_1, gather_0 + + # pd_op.full: (xf32) <- () + full_6 = paddle._C_ops.full( + [], float("0"), paddle.float32, paddle.framework._current_expected_place() + ) + + # pd_op.greater_than: (8x4116xb) <- (8x4116xf32, xf32) + greater_than_0 = paddle._C_ops.greater_than(data_2, full_6) + del data_2, full_6 + + # pd_op.full: (1xf32) <- () + full_7 = paddle._C_ops.full( + [1], float("4"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.full_like: (8x4116xi32) <- (8x4116xi32, 1xf32) + full_like_0 = paddle._C_ops.full_like( + reshape_1, full_7, paddle.int32, paddle.framework._current_expected_place() + ) + del full_7 + + # pd_op.where: (8x4116xi32) <- (8x4116xb, 8x4116xi32, 8x4116xi32) + where_0 = paddle._C_ops.where(greater_than_0, reshape_1, full_like_0) + del full_like_0, greater_than_0, reshape_1 + + # pd_op.full_int_array: (2xi64) <- () + full_int_array_2 = [-1, 4] + + # pd_op.reshape: (8x4xf32) <- (8x1x4xf32, 2xi64) + reshape_2 = paddle._C_ops.reshape(data_3, full_int_array_2) + del full_int_array_2 + + # pd_op.gather: (32928x4xf32) <- (8x4xf32, 32928xi64, 1xi32) + gather_1 = paddle._C_ops.gather(reshape_2, flatten_1, full_5) + del flatten_1, full_5, reshape_2 + + # pd_op.full_int_array: (3xi64) <- () + full_int_array_3 = [8, 4116, 4] + + # pd_op.reshape: (8x4116x4xf32) <- (32928x4xf32, 3xi64) + reshape_0 = paddle._C_ops.reshape(gather_1, full_int_array_3) + del full_int_array_3, gather_1 + + # pd_op.full: (1xi32) <- () + full_8 = paddle._C_ops.full( + [1], float("5"), paddle.int32, paddle.core.CPUPlace() + ) + + # pd_op.one_hot: (8x4116x5xf32) <- (8x4116xi32, 1xi32) + one_hot_0 = paddle._C_ops.one_hot( + where_0 % paddle.cast(full_8, where_0.dtype), full_8 + ) + del full_8 + + # pd_op.full: (4xi64) <- () + full_9 = paddle._C_ops.full( + [4], float("0"), paddle.int64, paddle.framework._current_expected_place() + ) + + # pd_op.assign_value_: (4xi64) <- (4xi64) + assign_value__0 = paddle._C_ops.assign_value_( + full_9, + [4], + paddle.int64, + [float("0"), float("1"), float("2"), float("3")], + paddle.framework._current_expected_place(), + ) + del full_9 + + # pd_op.index_select: (8x4116x4xf32) <- (8x4116x5xf32, 4xi64) + index_select_0 = paddle._C_ops.index_select(one_hot_0, assign_value__0, -1) + del assign_value__0, one_hot_0 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_4 = [2] + + # pd_op.unsqueeze: (8x1x1x4xf32) <- (8x1x4xf32, 1xi64) + unsqueeze_1 = paddle._C_ops.unsqueeze(data_3, full_int_array_4) + del data_3 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_5 = [1] + + # pd_op.unsqueeze: (8x1x4116x4xf32) <- (8x4116x4xf32, 1xi64) + unsqueeze_2 = paddle._C_ops.unsqueeze(data_4, full_int_array_5) + del data_4, full_int_array_5 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_6 = [0] + + # pd_op.slice: (8x1x1x2xf32) <- (8x1x1x4xf32, 1xi64, 1xi64) + slice_0 = paddle._C_ops.slice( + unsqueeze_1, [3], full_int_array_6, full_int_array_4, [1], [] + ) + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_7 = [2147483647] + + # pd_op.slice: (8x1x1x2xf32) <- (8x1x1x4xf32, 1xi64, 1xi64) + slice_1 = paddle._C_ops.slice( + unsqueeze_1, [3], full_int_array_4, full_int_array_7, [1], [] + ) + del unsqueeze_1 + + # pd_op.slice: (8x1x4116x2xf32) <- (8x1x4116x4xf32, 1xi64, 1xi64) + slice_2 = paddle._C_ops.slice( + unsqueeze_2, [3], full_int_array_6, full_int_array_4, [1], [] + ) + del full_int_array_6 + + # pd_op.slice: (8x1x4116x2xf32) <- (8x1x4116x4xf32, 1xi64, 1xi64) + slice_3 = paddle._C_ops.slice( + unsqueeze_2, [3], full_int_array_4, full_int_array_7, [1], [] + ) + del full_int_array_4, full_int_array_7, unsqueeze_2 + + # pd_op.maximum: (8x1x4116x2xf32) <- (8x1x1x2xf32, 8x1x4116x2xf32) + maximum_0 = paddle._C_ops.maximum(slice_0, slice_2) + + # pd_op.minimum: (8x1x4116x2xf32) <- (8x1x1x2xf32, 8x1x4116x2xf32) + minimum_0 = paddle._C_ops.minimum(slice_1, slice_3) + + # pd_op.subtract: (8x1x4116x2xf32) <- (8x1x4116x2xf32, 8x1x4116x2xf32) + subtract_0 = paddle._C_ops.subtract(minimum_0, maximum_0) + del maximum_0, minimum_0 + + # pd_op.full: (1xf32) <- () + full_10 = paddle._C_ops.full( + [1], float("0"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.full: (1xf32) <- () + full_11 = paddle._C_ops.full( + [1], float("3.40282e+38"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.clip: (8x1x4116x2xf32) <- (8x1x4116x2xf32, 1xf32, 1xf32) + clip_0 = paddle._C_ops.clip(subtract_0, full_10, full_11) + del subtract_0 + + # pd_op.prod: (8x1x4116xf32) <- (8x1x4116x2xf32, 1xi64) + prod_0 = paddle._C_ops.prod(clip_0, full_int_array_0, False, False) + del clip_0 + + # pd_op.subtract: (8x1x1x2xf32) <- (8x1x1x2xf32, 8x1x1x2xf32) + subtract_1 = paddle._C_ops.subtract(slice_1, slice_0) + del slice_0, slice_1 + + # pd_op.clip: (8x1x1x2xf32) <- (8x1x1x2xf32, 1xf32, 1xf32) + clip_1 = paddle._C_ops.clip(subtract_1, full_10, full_11) + del subtract_1 + + # pd_op.prod: (8x1x1xf32) <- (8x1x1x2xf32, 1xi64) + prod_1 = paddle._C_ops.prod(clip_1, full_int_array_0, False, False) + del clip_1 + + # pd_op.subtract: (8x1x4116x2xf32) <- (8x1x4116x2xf32, 8x1x4116x2xf32) + subtract_2 = paddle._C_ops.subtract(slice_3, slice_2) + del slice_2, slice_3 + + # pd_op.clip: (8x1x4116x2xf32) <- (8x1x4116x2xf32, 1xf32, 1xf32) + clip_2 = paddle._C_ops.clip(subtract_2, full_10, full_11) + del full_10, full_11, subtract_2 + + # pd_op.prod: (8x1x4116xf32) <- (8x1x4116x2xf32, 1xi64) + prod_2 = paddle._C_ops.prod(clip_2, full_int_array_0, False, False) + del clip_2 + + # pd_op.add: (8x1x4116xf32) <- (8x1x1xf32, 8x1x4116xf32) + add_1 = paddle._C_ops.add(prod_1, prod_2) + del prod_1, prod_2 + + # pd_op.subtract: (8x1x4116xf32) <- (8x1x4116xf32, 8x1x4116xf32) + subtract_3 = paddle._C_ops.subtract(add_1, prod_0) + del add_1 + + # pd_op.scale: (8x1x4116xf32) <- (8x1x4116xf32, 1xf32) + scale_1 = paddle._C_ops.scale(subtract_3, full_4, float("1e-09"), True) + del full_4, subtract_3 + + # pd_op.divide: (8x1x4116xf32) <- (8x1x4116xf32, 8x1x4116xf32) + divide_0 = paddle._C_ops.divide(prod_0, scale_1) + del prod_0, scale_1 + + # pd_op.multiply: (8x1x4116xf32) <- (8x1x4116xf32, 8x1x4116xf32) + multiply_1 = paddle._C_ops.multiply(divide_0, data_0) + del data_0, divide_0 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_8 = [-2] + + # pd_op.max: (8x4116xf32) <- (8x1x4116xf32, 1xi64) + max_0 = paddle._C_ops.max(multiply_1, full_int_array_8, False) + del full_int_array_8, multiply_1 + + # pd_op.unsqueeze: (8x4116x1xf32) <- (8x4116xf32, 1xi64) + unsqueeze_3 = paddle._C_ops.unsqueeze(max_0, full_int_array_0) + del full_int_array_0, max_0 + + # pd_op.multiply: (8x4116x4xf32) <- (8x4116x4xf32, 8x4116x1xf32) + multiply_0 = paddle._C_ops.multiply(index_select_0, unsqueeze_3) + del index_select_0, unsqueeze_3, where_0 + + return reshape_0, multiply_0 diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus-L/subgraph_10/weight_meta.py b/paddle_samples/PaddleX/PP-YOLOE_plus-L/subgraph_10/weight_meta.py new file mode 100644 index 000000000..8b1378917 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE_plus-L/subgraph_10/weight_meta.py @@ -0,0 +1 @@ + diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus-L/subgraph_12/graph_hash.txt b/paddle_samples/PaddleX/PP-YOLOE_plus-L/subgraph_12/graph_hash.txt new file mode 100644 index 000000000..06c2e2c81 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE_plus-L/subgraph_12/graph_hash.txt @@ -0,0 +1 @@ +e4523a4b80f6d91bda08e60737d8d55feb6499fbd5650e10a21e653f89b2a688 \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus-L/subgraph_12/graph_net.json b/paddle_samples/PaddleX/PP-YOLOE_plus-L/subgraph_12/graph_net.json new file mode 100644 index 000000000..32219c0fa --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE_plus-L/subgraph_12/graph_net.json @@ -0,0 +1,6 @@ +{ + "framework": "paddle", + "model_name": "PP-YOLOE_plus-L", + "num_devices_required": 1, + "num_nodes_required": 1 +} \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus-L/subgraph_12/input_meta.py b/paddle_samples/PaddleX/PP-YOLOE_plus-L/subgraph_12/input_meta.py new file mode 100644 index 000000000..0f55d3bba --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE_plus-L/subgraph_12/input_meta.py @@ -0,0 +1,31 @@ +class Program_weight_tensor_data_0: + name = "data_0" + shape = [8, 4116, 68] + dtype = "float32" + min_val = float("-6.52177") + max_val = float("13.8275") + mean = float("2.55256e-05") + std = float("1.49822") + data = None + + +class Program_weight_tensor_data_1: + name = "data_1" + shape = [4116, 2] + dtype = "float32" + min_val = float("4.0") + max_val = float("444.0") + mean = float("224.0") + std = float("129.279") + data = None + + +class Program_weight_tensor_data_2: + name = "data_2" + shape = [4116, 1] + dtype = "float32" + min_val = float("8.0") + max_val = float("32.0") + mean = float("10.6667") + std = float("5.70157") + data = None diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus-L/subgraph_12/model.py b/paddle_samples/PaddleX/PP-YOLOE_plus-L/subgraph_12/model.py new file mode 100644 index 000000000..f692c519c --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE_plus-L/subgraph_12/model.py @@ -0,0 +1,158 @@ +import paddle + + +class GraphModule(paddle.nn.Layer): + def __init__(self): + super().__init__() + + def forward(self, parameter_0, data_0, data_1, data_2): + # pd_op.divide: (4116x2xf32) <- (4116x2xf32, 4116x1xf32) + divide_0 = paddle._C_ops.divide(data_1, data_2) + del data_1 + + # pd_op.shape64: (3xi64) <- (8x4116x68xf32) + shape64_0 = paddle._C_ops.shape64(data_0) + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_0 = [0] + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_1 = [1] + + # pd_op.assign: (1xi64) <- (1xi64) + assign_0 = full_int_array_1 + + # pd_op.slice: (xi64) <- (3xi64, 1xi64, 1xi64) + slice_0 = paddle._C_ops.slice( + shape64_0, [0], full_int_array_0, full_int_array_1, [1], [0] + ) + del full_int_array_0 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_2 = [2] + + # pd_op.slice: (xi64) <- (3xi64, 1xi64, 1xi64) + slice_1 = paddle._C_ops.slice( + shape64_0, [0], full_int_array_1, full_int_array_2, [1], [0] + ) + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_3 = [3] + + # pd_op.slice: (xi64) <- (3xi64, 1xi64, 1xi64) + slice_2 = paddle._C_ops.slice( + shape64_0, [0], full_int_array_2, full_int_array_3, [1], [0] + ) + del full_int_array_2, full_int_array_3, shape64_0 + + # pd_op.full: (xi64) <- () + full_0 = paddle._C_ops.full( + [], float("-1"), paddle.int64, paddle.core.CPUPlace() + ) + + # pd_op.full: (xi64) <- () + full_1 = paddle._C_ops.full( + [], float("4"), paddle.int64, paddle.core.CPUPlace() + ) + + # pd_op.full: (xi64) <- () + full_2 = paddle._C_ops.full( + [], float("17"), paddle.int64, paddle.core.CPUPlace() + ) + + # builtin.combine: ([xi64, xi64, xi64, xi64]) <- (xi64, xi64, xi64, xi64) + combine_0 = [full_0, slice_1, full_1, full_2] + del full_0, full_1, full_2, slice_1 + + # pd_op.stack: (4xi64) <- ([xi64, xi64, xi64, xi64]) + stack_0 = paddle._C_ops.stack(combine_0, 0) + del combine_0 + + # pd_op.reshape: (-1x-1x4x17xf32) <- (8x4116x68xf32, 4xi64) + reshape_0 = paddle._C_ops.reshape(data_0, stack_0) + del data_0, stack_0 + + # pd_op.softmax: (-1x-1x4x17xf32) <- (-1x-1x4x17xf32) + softmax_0 = paddle._C_ops.softmax(reshape_0, -1) + del reshape_0 + + # pd_op.transpose: (-1x17x-1x4xf32) <- (-1x-1x4x17xf32) + transpose_0 = paddle._C_ops.transpose(softmax_0, [0, 3, 1, 2]) + + # pd_op.conv2d: (-1x1x-1x4xf32) <- (-1x17x-1x4xf32, 1x17x1x1xf32) + conv2d_0 = paddle._C_ops.conv2d( + transpose_0, parameter_0, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_0 + + # pd_op.squeeze: (-1x-1x4xf32) <- (-1x1x-1x4xf32, 1xi64) + squeeze_0 = paddle._C_ops.squeeze(conv2d_0, full_int_array_1) + del full_int_array_1 + + # pd_op.full: (1xi32) <- () + full_3 = paddle._C_ops.full( + [1], float("2"), paddle.int32, paddle.core.CPUPlace() + ) + + # pd_op.split_with_num: ([-1x-1x2xf32, -1x-1x2xf32]) <- (-1x-1x4xf32, 1xi32) + split_with_num_0 = paddle._C_ops.split_with_num(squeeze_0, 2, full_3) + del squeeze_0 + + # builtin.split: (-1x-1x2xf32, -1x-1x2xf32) <- ([-1x-1x2xf32, -1x-1x2xf32]) + ( + split_0, + split_1, + ) = split_with_num_0 + del split_with_num_0 + + # pd_op.full: (1xf32) <- () + full_4 = paddle._C_ops.full( + [1], float("-1"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.scale: (-1x-1x2xf32) <- (-1x-1x2xf32, 1xf32) + scale_0 = paddle._C_ops.scale(split_0, full_4, float("0"), True) + del split_0 + + # pd_op.add: (-1x4116x2xf32) <- (-1x-1x2xf32, 4116x2xf32) + add_0 = paddle._C_ops.add(scale_0, divide_0) + + # pd_op.add: (-1x4116x2xf32) <- (-1x-1x2xf32, 4116x2xf32) + add_1 = paddle._C_ops.add(split_1, divide_0) + + # pd_op.full: (1xi32) <- () + full_5 = paddle._C_ops.full( + [1], float("-1"), paddle.int32, paddle.core.CPUPlace() + ) + + # builtin.combine: ([-1x4116x2xf32, -1x4116x2xf32]) <- (-1x4116x2xf32, -1x4116x2xf32) + combine_1 = [add_0, add_1] + + # pd_op.concat: (-1x4116x4xf32) <- ([-1x4116x2xf32, -1x4116x2xf32], 1xi32) + concat_0 = paddle._C_ops.concat(combine_1, full_5) + del combine_1 + + # pd_op.share_data_: (-1x4116x4xf32) <- (-1x4116x4xf32) + share_data__0 = concat_0.detach() + + # pd_op.multiply: (-1x4116x4xf32) <- (-1x4116x4xf32, 4116x1xf32) + multiply_0 = paddle._C_ops.multiply(share_data__0, data_2) + del ( + add_0, + add_1, + assign_0, + concat_0, + conv2d_0, + data_2, + divide_0, + full_3, + full_4, + full_5, + scale_0, + share_data__0, + softmax_0, + split_1, + transpose_0, + ) + + return multiply_0 diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus-L/subgraph_12/weight_meta.py b/paddle_samples/PaddleX/PP-YOLOE_plus-L/subgraph_12/weight_meta.py new file mode 100644 index 000000000..28198680e --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE_plus-L/subgraph_12/weight_meta.py @@ -0,0 +1,7 @@ +class Program_weight_tensor_parameter_0: + name = "parameter_0" + shape = [1, 17, 1, 1] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus-L/subgraph_13/graph_hash.txt b/paddle_samples/PaddleX/PP-YOLOE_plus-L/subgraph_13/graph_hash.txt new file mode 100644 index 000000000..921f13953 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE_plus-L/subgraph_13/graph_hash.txt @@ -0,0 +1 @@ +3fc0f07cdf416f4962400bce4989ce1d8133a51df64f85e39eba593d301c104c \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus-L/subgraph_13/graph_net.json b/paddle_samples/PaddleX/PP-YOLOE_plus-L/subgraph_13/graph_net.json new file mode 100644 index 000000000..32219c0fa --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE_plus-L/subgraph_13/graph_net.json @@ -0,0 +1,6 @@ +{ + "framework": "paddle", + "model_name": "PP-YOLOE_plus-L", + "num_devices_required": 1, + "num_nodes_required": 1 +} \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus-L/subgraph_13/input_meta.py b/paddle_samples/PaddleX/PP-YOLOE_plus-L/subgraph_13/input_meta.py new file mode 100644 index 000000000..3981bf949 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE_plus-L/subgraph_13/input_meta.py @@ -0,0 +1,49 @@ +class Program_weight_tensor_data_0: + name = "data_0" + shape = [4116, 4] + dtype = "float32" + min_val = float("-64.0") + max_val = float("512.0") + mean = float("224.0") + std = float("132.768") + data = None + + +class Program_weight_tensor_data_1: + name = "data_1" + shape = [8, 1, 4] + dtype = "float32" + data = [ + 35.3684, + 311.273, + 300.632, + 442.182, + 78.6168, + 241.764, + 110.41, + 266.46, + 195.413, + 193.28, + 236.373, + 224.0, + 130.415, + 227.85, + 158.792, + 248.688, + 360.901, + 338.022, + 374.268, + 347.726, + 304.951, + 76.6956, + 336.0, + 109.565, + 295.171, + 70.9146, + 350.609, + 111.437, + 18.5379, + 0.0, + 293.517, + 337.836, + ] diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus-L/subgraph_13/model.py b/paddle_samples/PaddleX/PP-YOLOE_plus-L/subgraph_13/model.py new file mode 100644 index 000000000..218186811 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE_plus-L/subgraph_13/model.py @@ -0,0 +1,263 @@ +import paddle + + +class GraphModule(paddle.nn.Layer): + def __init__(self): + super().__init__() + + def forward(self, data_0, data_1): + # pd_op.full_int_array: (2xi64) <- () + full_int_array_0 = [-1, 4] + + # pd_op.reshape: (8x4xf32) <- (8x1x4xf32, 2xi64) + reshape_2 = paddle._C_ops.reshape(data_1, full_int_array_0) + del data_1, full_int_array_0 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_1 = [1] + + # pd_op.unsqueeze: (8x1x4xf32) <- (8x4xf32, 1xi64) + unsqueeze_0 = paddle._C_ops.unsqueeze(reshape_2, full_int_array_1) + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_2 = [0] + + # pd_op.unsqueeze: (1x4116x4xf32) <- (4116x4xf32, 1xi64) + unsqueeze_1 = paddle._C_ops.unsqueeze(data_0, full_int_array_2) + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_3 = [2] + + # pd_op.slice: (8x1x2xf32) <- (8x1x4xf32, 1xi64, 1xi64) + slice_0 = paddle._C_ops.slice( + unsqueeze_0, [2], full_int_array_2, full_int_array_3, [1], [] + ) + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_4 = [2147483647] + + # pd_op.slice: (8x1x2xf32) <- (8x1x4xf32, 1xi64, 1xi64) + slice_1 = paddle._C_ops.slice( + unsqueeze_0, [2], full_int_array_3, full_int_array_4, [1], [] + ) + del unsqueeze_0 + + # pd_op.slice: (1x4116x2xf32) <- (1x4116x4xf32, 1xi64, 1xi64) + slice_2 = paddle._C_ops.slice( + unsqueeze_1, [2], full_int_array_2, full_int_array_3, [1], [] + ) + + # pd_op.slice: (1x4116x2xf32) <- (1x4116x4xf32, 1xi64, 1xi64) + slice_3 = paddle._C_ops.slice( + unsqueeze_1, [2], full_int_array_3, full_int_array_4, [1], [] + ) + del full_int_array_4, unsqueeze_1 + + # pd_op.maximum: (8x4116x2xf32) <- (8x1x2xf32, 1x4116x2xf32) + maximum_0 = paddle._C_ops.maximum(slice_0, slice_2) + + # pd_op.minimum: (8x4116x2xf32) <- (8x1x2xf32, 1x4116x2xf32) + minimum_0 = paddle._C_ops.minimum(slice_1, slice_3) + + # pd_op.subtract: (8x4116x2xf32) <- (8x4116x2xf32, 8x4116x2xf32) + subtract_0 = paddle._C_ops.subtract(minimum_0, maximum_0) + del maximum_0, minimum_0 + + # pd_op.full: (1xf32) <- () + full_0 = paddle._C_ops.full( + [1], float("0"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.full: (1xf32) <- () + full_1 = paddle._C_ops.full( + [1], float("3.40282e+38"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.clip: (8x4116x2xf32) <- (8x4116x2xf32, 1xf32, 1xf32) + clip_0 = paddle._C_ops.clip(subtract_0, full_0, full_1) + del subtract_0 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_5 = [-1] + + # pd_op.prod: (8x4116xf32) <- (8x4116x2xf32, 1xi64) + prod_0 = paddle._C_ops.prod(clip_0, full_int_array_5, False, False) + del clip_0 + + # pd_op.subtract: (8x1x2xf32) <- (8x1x2xf32, 8x1x2xf32) + subtract_1 = paddle._C_ops.subtract(slice_1, slice_0) + del slice_0, slice_1 + + # pd_op.clip: (8x1x2xf32) <- (8x1x2xf32, 1xf32, 1xf32) + clip_1 = paddle._C_ops.clip(subtract_1, full_0, full_1) + del subtract_1 + + # pd_op.prod: (8x1xf32) <- (8x1x2xf32, 1xi64) + prod_1 = paddle._C_ops.prod(clip_1, full_int_array_5, False, False) + del clip_1 + + # pd_op.subtract: (1x4116x2xf32) <- (1x4116x2xf32, 1x4116x2xf32) + subtract_2 = paddle._C_ops.subtract(slice_3, slice_2) + del slice_2, slice_3 + + # pd_op.clip: (1x4116x2xf32) <- (1x4116x2xf32, 1xf32, 1xf32) + clip_2 = paddle._C_ops.clip(subtract_2, full_0, full_1) + del full_0, full_1, subtract_2 + + # pd_op.prod: (1x4116xf32) <- (1x4116x2xf32, 1xi64) + prod_2 = paddle._C_ops.prod(clip_2, full_int_array_5, False, False) + del clip_2, full_int_array_5 + + # pd_op.add: (8x4116xf32) <- (8x1xf32, 1x4116xf32) + add_0 = paddle._C_ops.add(prod_1, prod_2) + del prod_1, prod_2 + + # pd_op.subtract: (8x4116xf32) <- (8x4116xf32, 8x4116xf32) + subtract_3 = paddle._C_ops.subtract(add_0, prod_0) + del add_0 + + # pd_op.full: (1xf32) <- () + full_2 = paddle._C_ops.full( + [1], float("1"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.scale: (8x4116xf32) <- (8x4116xf32, 1xf32) + scale_0 = paddle._C_ops.scale(subtract_3, full_2, float("1e-10"), True) + del full_2, subtract_3 + + # pd_op.divide: (8x4116xf32) <- (8x4116xf32, 8x4116xf32) + divide_0 = paddle._C_ops.divide(prod_0, scale_0) + del prod_0, scale_0 + + # pd_op.full_int_array: (3xi64) <- () + full_int_array_6 = [8, -1, 4116] + + # pd_op.reshape: (8x1x4116xf32) <- (8x4116xf32, 3xi64) + reshape_1 = paddle._C_ops.reshape(divide_0, full_int_array_6) + del divide_0 + + # pd_op.slice: (8xf32) <- (8x4xf32, 1xi64, 1xi64) + slice_4 = paddle._C_ops.slice( + reshape_2, [1], full_int_array_2, full_int_array_1, [1], [1] + ) + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_7 = [3] + + # pd_op.slice: (8xf32) <- (8x4xf32, 1xi64, 1xi64) + slice_5 = paddle._C_ops.slice( + reshape_2, [1], full_int_array_3, full_int_array_7, [1], [1] + ) + + # pd_op.add: (8xf32) <- (8xf32, 8xf32) + add_1 = paddle._C_ops.add(slice_4, slice_5) + del slice_4, slice_5 + + # pd_op.full: (1xf32) <- () + full_3 = paddle._C_ops.full( + [1], float("0.5"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.scale: (8xf32) <- (8xf32, 1xf32) + scale_1 = paddle._C_ops.scale(add_1, full_3, float("0"), True) + del add_1 + + # pd_op.slice: (8xf32) <- (8x4xf32, 1xi64, 1xi64) + slice_6 = paddle._C_ops.slice( + reshape_2, [1], full_int_array_1, full_int_array_3, [1], [1] + ) + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_8 = [4] + + # pd_op.slice: (8xf32) <- (8x4xf32, 1xi64, 1xi64) + slice_7 = paddle._C_ops.slice( + reshape_2, [1], full_int_array_7, full_int_array_8, [1], [1] + ) + del reshape_2 + + # pd_op.add: (8xf32) <- (8xf32, 8xf32) + add_2 = paddle._C_ops.add(slice_6, slice_7) + del slice_6, slice_7 + + # pd_op.scale: (8xf32) <- (8xf32, 1xf32) + scale_2 = paddle._C_ops.scale(add_2, full_3, float("0"), True) + del add_2 + + # builtin.combine: ([8xf32, 8xf32]) <- (8xf32, 8xf32) + combine_0 = [scale_1, scale_2] + del scale_1, scale_2 + + # pd_op.stack: (8x2xf32) <- ([8xf32, 8xf32]) + stack_0 = paddle._C_ops.stack(combine_0, -1) + del combine_0 + + # pd_op.unsqueeze: (8x1x2xf32) <- (8x2xf32, 1xi64) + unsqueeze_2 = paddle._C_ops.unsqueeze(stack_0, full_int_array_1) + del stack_0 + + # pd_op.slice: (4116xf32) <- (4116x4xf32, 1xi64, 1xi64) + slice_8 = paddle._C_ops.slice( + data_0, [1], full_int_array_2, full_int_array_1, [1], [1] + ) + + # pd_op.slice: (4116xf32) <- (4116x4xf32, 1xi64, 1xi64) + slice_9 = paddle._C_ops.slice( + data_0, [1], full_int_array_3, full_int_array_7, [1], [1] + ) + + # pd_op.add: (4116xf32) <- (4116xf32, 4116xf32) + add_3 = paddle._C_ops.add(slice_8, slice_9) + del slice_8, slice_9 + + # pd_op.scale: (4116xf32) <- (4116xf32, 1xf32) + scale_3 = paddle._C_ops.scale(add_3, full_3, float("0"), True) + del add_3 + + # pd_op.slice: (4116xf32) <- (4116x4xf32, 1xi64, 1xi64) + slice_10 = paddle._C_ops.slice( + data_0, [1], full_int_array_1, full_int_array_3, [1], [1] + ) + del full_int_array_1, full_int_array_3 + + # pd_op.slice: (4116xf32) <- (4116x4xf32, 1xi64, 1xi64) + slice_11 = paddle._C_ops.slice( + data_0, [1], full_int_array_7, full_int_array_8, [1], [1] + ) + del data_0, full_int_array_7, full_int_array_8 + + # pd_op.add: (4116xf32) <- (4116xf32, 4116xf32) + add_4 = paddle._C_ops.add(slice_10, slice_11) + del slice_10, slice_11 + + # pd_op.scale: (4116xf32) <- (4116xf32, 1xf32) + scale_4 = paddle._C_ops.scale(add_4, full_3, float("0"), True) + del add_4, full_3 + + # builtin.combine: ([4116xf32, 4116xf32]) <- (4116xf32, 4116xf32) + combine_1 = [scale_3, scale_4] + del scale_3, scale_4 + + # pd_op.stack: (4116x2xf32) <- ([4116xf32, 4116xf32]) + stack_1 = paddle._C_ops.stack(combine_1, -1) + del combine_1 + + # pd_op.unsqueeze: (1x4116x2xf32) <- (4116x2xf32, 1xi64) + unsqueeze_3 = paddle._C_ops.unsqueeze(stack_1, full_int_array_2) + del full_int_array_2 + + # pd_op.subtract: (8x4116x2xf32) <- (8x1x2xf32, 1x4116x2xf32) + subtract_4 = paddle._C_ops.subtract(unsqueeze_2, unsqueeze_3) + del unsqueeze_2, unsqueeze_3 + + # pd_op.p_norm: (8x4116xf32) <- (8x4116x2xf32) + p_norm_0 = paddle._C_ops.p_norm( + subtract_4, float("2"), -1, float("1e-12"), False, False + ) + del subtract_4 + + # pd_op.reshape: (8x1x4116xf32) <- (8x4116xf32, 3xi64) + reshape_0 = paddle._C_ops.reshape(p_norm_0, full_int_array_6) + del full_int_array_6, p_norm_0, stack_1 + + return reshape_0, reshape_1 diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus-L/subgraph_13/weight_meta.py b/paddle_samples/PaddleX/PP-YOLOE_plus-L/subgraph_13/weight_meta.py new file mode 100644 index 000000000..8b1378917 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE_plus-L/subgraph_13/weight_meta.py @@ -0,0 +1 @@ + diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus-L/subgraph_15/graph_hash.txt b/paddle_samples/PaddleX/PP-YOLOE_plus-L/subgraph_15/graph_hash.txt new file mode 100644 index 000000000..a90272c96 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE_plus-L/subgraph_15/graph_hash.txt @@ -0,0 +1 @@ +32cd2923bc0a61a7c5f1bc48378db5c4de25399c2f9388132502f296c7a71760 \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus-L/subgraph_15/graph_net.json b/paddle_samples/PaddleX/PP-YOLOE_plus-L/subgraph_15/graph_net.json new file mode 100644 index 000000000..32219c0fa --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE_plus-L/subgraph_15/graph_net.json @@ -0,0 +1,6 @@ +{ + "framework": "paddle", + "model_name": "PP-YOLOE_plus-L", + "num_devices_required": 1, + "num_nodes_required": 1 +} \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus-L/subgraph_15/input_meta.py b/paddle_samples/PaddleX/PP-YOLOE_plus-L/subgraph_15/input_meta.py new file mode 100644 index 000000000..6d134b9a9 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE_plus-L/subgraph_15/input_meta.py @@ -0,0 +1,73 @@ +class Program_weight_tensor_data_0: + name = "data_0" + shape = [] + dtype = "int64" + data = [24] + + +class Program_weight_tensor_data_1: + name = "data_1" + shape = [] + dtype = "int64" + data = [24] + + +class Program_weight_tensor_data_2: + name = "data_2" + shape = [] + dtype = "int64" + data = [48] + + +class Program_weight_tensor_data_3: + name = "data_3" + shape = [] + dtype = "int64" + data = [48] + + +class Program_weight_tensor_data_4: + name = "data_4" + shape = [] + dtype = "int64" + data = [96] + + +class Program_weight_tensor_data_5: + name = "data_5" + shape = [] + dtype = "int64" + data = [96] + + +class Program_weight_tensor_data_6: + name = "data_6" + shape = [8, 768, 24, 24] + dtype = "float32" + min_val = float("-0.278465") + max_val = float("8.34367") + mean = float("0.270209") + std = float("0.622509") + data = None + + +class Program_weight_tensor_data_7: + name = "data_7" + shape = [8, 384, 48, 48] + dtype = "float32" + min_val = float("-0.278465") + max_val = float("11.4196") + mean = float("0.372588") + std = float("0.68828") + data = None + + +class Program_weight_tensor_data_8: + name = "data_8" + shape = [8, 192, 96, 96] + dtype = "float32" + min_val = float("-0.278465") + max_val = float("12.9519") + mean = float("0.484099") + std = float("0.760233") + data = None diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus-L/subgraph_15/model.py b/paddle_samples/PaddleX/PP-YOLOE_plus-L/subgraph_15/model.py new file mode 100644 index 000000000..a98e31988 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE_plus-L/subgraph_15/model.py @@ -0,0 +1,1144 @@ +import paddle + + +class GraphModule(paddle.nn.Layer): + def __init__(self): + super().__init__() + + def forward( + self, + parameter_0, + parameter_1, + parameter_2, + parameter_3, + parameter_4, + parameter_5, + parameter_6, + parameter_7, + parameter_8, + parameter_9, + parameter_10, + parameter_11, + parameter_12, + parameter_13, + parameter_14, + parameter_15, + parameter_16, + parameter_17, + parameter_18, + parameter_19, + parameter_20, + parameter_21, + parameter_22, + parameter_23, + parameter_24, + parameter_25, + parameter_26, + parameter_27, + parameter_28, + parameter_29, + parameter_30, + parameter_31, + parameter_32, + parameter_33, + parameter_34, + parameter_35, + parameter_36, + parameter_37, + parameter_38, + parameter_39, + parameter_40, + parameter_41, + parameter_42, + parameter_43, + parameter_44, + parameter_45, + parameter_46, + parameter_47, + parameter_48, + parameter_49, + parameter_50, + parameter_51, + parameter_52, + parameter_53, + data_0, + data_1, + data_2, + data_3, + data_4, + data_5, + data_6, + data_7, + data_8, + ): + # pd_op.full: (1xi64) <- () + full_0 = paddle._C_ops.full( + [1], float("0"), paddle.int64, paddle.core.CPUPlace() + ) + + # pd_op.full: (1xi64) <- () + full_1 = paddle._C_ops.full( + [1], float("1"), paddle.int64, paddle.core.CPUPlace() + ) + + # pd_op.arange: (-1xi64) <- (1xi64, xi64, 1xi64) + arange_0 = paddle.arange(full_0, data_1, full_1, dtype="int64") + del data_1 + + # pd_op.cast: (-1xf32) <- (-1xi64) + cast_0 = paddle._C_ops.cast(arange_0, paddle.float32) + del arange_0 + + # pd_op.full: (1xf32) <- () + full_2 = paddle._C_ops.full( + [1], float("1"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.scale: (-1xf32) <- (-1xf32, 1xf32) + scale_0 = paddle._C_ops.scale(cast_0, full_2, float("0.5"), True) + del cast_0 + + # pd_op.full: (1xf32) <- () + full_3 = paddle._C_ops.full( + [1], float("32"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.scale: (-1xf32) <- (-1xf32, 1xf32) + scale_1 = paddle._C_ops.scale(scale_0, full_3, float("0"), True) + del scale_0 + + # pd_op.arange: (-1xi64) <- (1xi64, xi64, 1xi64) + arange_1 = paddle.arange(full_0, data_0, full_1, dtype="int64") + del data_0 + + # pd_op.cast: (-1xf32) <- (-1xi64) + cast_1 = paddle._C_ops.cast(arange_1, paddle.float32) + del arange_1 + + # pd_op.scale: (-1xf32) <- (-1xf32, 1xf32) + scale_2 = paddle._C_ops.scale(cast_1, full_2, float("0.5"), True) + del cast_1 + + # pd_op.scale: (-1xf32) <- (-1xf32, 1xf32) + scale_3 = paddle._C_ops.scale(scale_2, full_3, float("0"), True) + del scale_2 + + # builtin.combine: ([-1xf32, -1xf32]) <- (-1xf32, -1xf32) + combine_0 = [scale_3, scale_1] + del scale_1, scale_3 + + # pd_op.meshgrid: ([-1x-1xf32, -1x-1xf32]) <- ([-1xf32, -1xf32]) + meshgrid_0 = paddle._C_ops.meshgrid(combine_0) + del combine_0 + + # builtin.split: (-1x-1xf32, -1x-1xf32) <- ([-1x-1xf32, -1x-1xf32]) + ( + split_0, + split_1, + ) = meshgrid_0 + del meshgrid_0 + + # pd_op.scale: (-1x-1xf32) <- (-1x-1xf32, 1xf32) + scale_4 = paddle._C_ops.scale(split_1, full_2, float("-80"), True) + + # pd_op.scale: (-1x-1xf32) <- (-1x-1xf32, 1xf32) + scale_5 = paddle._C_ops.scale(split_0, full_2, float("-80"), True) + + # pd_op.scale: (-1x-1xf32) <- (-1x-1xf32, 1xf32) + scale_6 = paddle._C_ops.scale(split_1, full_2, float("80"), True) + + # pd_op.scale: (-1x-1xf32) <- (-1x-1xf32, 1xf32) + scale_7 = paddle._C_ops.scale(split_0, full_2, float("80"), True) + + # builtin.combine: ([-1x-1xf32, -1x-1xf32, -1x-1xf32, -1x-1xf32]) <- (-1x-1xf32, -1x-1xf32, -1x-1xf32, -1x-1xf32) + combine_1 = [scale_4, scale_5, scale_6, scale_7] + del scale_4, scale_5, scale_6, scale_7 + + # pd_op.stack: (-1x-1x4xf32) <- ([-1x-1xf32, -1x-1xf32, -1x-1xf32, -1x-1xf32]) + stack_0 = paddle._C_ops.stack(combine_1, -1) + del combine_1 + + # builtin.combine: ([-1x-1xf32, -1x-1xf32]) <- (-1x-1xf32, -1x-1xf32) + combine_2 = [split_1, split_0] + del split_0, split_1 + + # pd_op.stack: (-1x-1x2xf32) <- ([-1x-1xf32, -1x-1xf32]) + stack_1 = paddle._C_ops.stack(combine_2, -1) + del combine_2 + + # pd_op.full_int_array: (2xi64) <- () + full_int_array_0 = [-1, 4] + + # pd_op.reshape: (-1x4xf32) <- (-1x-1x4xf32, 2xi64) + reshape_0 = paddle._C_ops.reshape(stack_0, full_int_array_0) + del stack_0 + + # pd_op.full_int_array: (2xi64) <- () + full_int_array_1 = [-1, 2] + + # pd_op.reshape: (-1x2xf32) <- (-1x-1x2xf32, 2xi64) + reshape_1 = paddle._C_ops.reshape(stack_1, full_int_array_1) + del stack_1 + + # pd_op.shape64: (2xi64) <- (-1x4xf32) + shape64_0 = paddle._C_ops.shape64(reshape_0) + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_2 = [0] + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_3 = [1] + + # pd_op.slice: (xi64) <- (2xi64, 1xi64, 1xi64) + slice_0 = paddle._C_ops.slice( + shape64_0, [0], full_int_array_2, full_int_array_3, [1], [0] + ) + del shape64_0 + + # pd_op.full: (xi64) <- () + full_4 = paddle._C_ops.full( + [], float("1"), paddle.int64, paddle.core.CPUPlace() + ) + + # builtin.combine: ([xi64, xi64]) <- (xi64, xi64) + combine_3 = [slice_0, full_4] + + # pd_op.stack: (2xi64) <- ([xi64, xi64]) + stack_2 = paddle._C_ops.stack(combine_3, 0) + del combine_3 + + # pd_op.full_with_tensor: (-1x1xf32) <- (1xf32, 2xi64) + full_with_tensor_0 = paddle._C_ops.full_with_tensor( + full_3, stack_2, paddle.float32 + ) + del full_3, stack_2 + + # pd_op.arange: (-1xi64) <- (1xi64, xi64, 1xi64) + arange_2 = paddle.arange(full_0, data_3, full_1, dtype="int64") + del data_3 + + # pd_op.cast: (-1xf32) <- (-1xi64) + cast_2 = paddle._C_ops.cast(arange_2, paddle.float32) + del arange_2 + + # pd_op.scale: (-1xf32) <- (-1xf32, 1xf32) + scale_8 = paddle._C_ops.scale(cast_2, full_2, float("0.5"), True) + del cast_2 + + # pd_op.full: (1xf32) <- () + full_5 = paddle._C_ops.full( + [1], float("16"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.scale: (-1xf32) <- (-1xf32, 1xf32) + scale_9 = paddle._C_ops.scale(scale_8, full_5, float("0"), True) + del scale_8 + + # pd_op.arange: (-1xi64) <- (1xi64, xi64, 1xi64) + arange_3 = paddle.arange(full_0, data_2, full_1, dtype="int64") + del data_2 + + # pd_op.cast: (-1xf32) <- (-1xi64) + cast_3 = paddle._C_ops.cast(arange_3, paddle.float32) + del arange_3 + + # pd_op.scale: (-1xf32) <- (-1xf32, 1xf32) + scale_10 = paddle._C_ops.scale(cast_3, full_2, float("0.5"), True) + del cast_3 + + # pd_op.scale: (-1xf32) <- (-1xf32, 1xf32) + scale_11 = paddle._C_ops.scale(scale_10, full_5, float("0"), True) + del scale_10 + + # builtin.combine: ([-1xf32, -1xf32]) <- (-1xf32, -1xf32) + combine_4 = [scale_11, scale_9] + del scale_11, scale_9 + + # pd_op.meshgrid: ([-1x-1xf32, -1x-1xf32]) <- ([-1xf32, -1xf32]) + meshgrid_1 = paddle._C_ops.meshgrid(combine_4) + del combine_4 + + # builtin.split: (-1x-1xf32, -1x-1xf32) <- ([-1x-1xf32, -1x-1xf32]) + ( + split_2, + split_3, + ) = meshgrid_1 + del meshgrid_1 + + # pd_op.scale: (-1x-1xf32) <- (-1x-1xf32, 1xf32) + scale_12 = paddle._C_ops.scale(split_3, full_2, float("-40"), True) + + # pd_op.scale: (-1x-1xf32) <- (-1x-1xf32, 1xf32) + scale_13 = paddle._C_ops.scale(split_2, full_2, float("-40"), True) + + # pd_op.scale: (-1x-1xf32) <- (-1x-1xf32, 1xf32) + scale_14 = paddle._C_ops.scale(split_3, full_2, float("40"), True) + + # pd_op.scale: (-1x-1xf32) <- (-1x-1xf32, 1xf32) + scale_15 = paddle._C_ops.scale(split_2, full_2, float("40"), True) + + # builtin.combine: ([-1x-1xf32, -1x-1xf32, -1x-1xf32, -1x-1xf32]) <- (-1x-1xf32, -1x-1xf32, -1x-1xf32, -1x-1xf32) + combine_5 = [scale_12, scale_13, scale_14, scale_15] + del scale_12, scale_13, scale_14, scale_15 + + # pd_op.stack: (-1x-1x4xf32) <- ([-1x-1xf32, -1x-1xf32, -1x-1xf32, -1x-1xf32]) + stack_3 = paddle._C_ops.stack(combine_5, -1) + del combine_5 + + # builtin.combine: ([-1x-1xf32, -1x-1xf32]) <- (-1x-1xf32, -1x-1xf32) + combine_6 = [split_3, split_2] + del split_2, split_3 + + # pd_op.stack: (-1x-1x2xf32) <- ([-1x-1xf32, -1x-1xf32]) + stack_4 = paddle._C_ops.stack(combine_6, -1) + del combine_6 + + # pd_op.reshape: (-1x4xf32) <- (-1x-1x4xf32, 2xi64) + reshape_2 = paddle._C_ops.reshape(stack_3, full_int_array_0) + del stack_3 + + # pd_op.reshape: (-1x2xf32) <- (-1x-1x2xf32, 2xi64) + reshape_3 = paddle._C_ops.reshape(stack_4, full_int_array_1) + del stack_4 + + # pd_op.shape64: (2xi64) <- (-1x4xf32) + shape64_1 = paddle._C_ops.shape64(reshape_2) + + # pd_op.slice: (xi64) <- (2xi64, 1xi64, 1xi64) + slice_1 = paddle._C_ops.slice( + shape64_1, [0], full_int_array_2, full_int_array_3, [1], [0] + ) + del shape64_1 + + # builtin.combine: ([xi64, xi64]) <- (xi64, xi64) + combine_7 = [slice_1, full_4] + + # pd_op.stack: (2xi64) <- ([xi64, xi64]) + stack_5 = paddle._C_ops.stack(combine_7, 0) + del combine_7 + + # pd_op.full_with_tensor: (-1x1xf32) <- (1xf32, 2xi64) + full_with_tensor_1 = paddle._C_ops.full_with_tensor( + full_5, stack_5, paddle.float32 + ) + del full_5, stack_5 + + # pd_op.arange: (-1xi64) <- (1xi64, xi64, 1xi64) + arange_4 = paddle.arange(full_0, data_5, full_1, dtype="int64") + del data_5 + + # pd_op.cast: (-1xf32) <- (-1xi64) + cast_4 = paddle._C_ops.cast(arange_4, paddle.float32) + del arange_4 + + # pd_op.scale: (-1xf32) <- (-1xf32, 1xf32) + scale_16 = paddle._C_ops.scale(cast_4, full_2, float("0.5"), True) + del cast_4 + + # pd_op.full: (1xf32) <- () + full_6 = paddle._C_ops.full( + [1], float("8"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.scale: (-1xf32) <- (-1xf32, 1xf32) + scale_17 = paddle._C_ops.scale(scale_16, full_6, float("0"), True) + del scale_16 + + # pd_op.arange: (-1xi64) <- (1xi64, xi64, 1xi64) + arange_5 = paddle.arange(full_0, data_4, full_1, dtype="int64") + del data_4, full_0, full_1 + + # pd_op.cast: (-1xf32) <- (-1xi64) + cast_5 = paddle._C_ops.cast(arange_5, paddle.float32) + del arange_5 + + # pd_op.scale: (-1xf32) <- (-1xf32, 1xf32) + scale_18 = paddle._C_ops.scale(cast_5, full_2, float("0.5"), True) + del cast_5 + + # pd_op.scale: (-1xf32) <- (-1xf32, 1xf32) + scale_19 = paddle._C_ops.scale(scale_18, full_6, float("0"), True) + del scale_18 + + # builtin.combine: ([-1xf32, -1xf32]) <- (-1xf32, -1xf32) + combine_8 = [scale_19, scale_17] + del scale_17, scale_19 + + # pd_op.meshgrid: ([-1x-1xf32, -1x-1xf32]) <- ([-1xf32, -1xf32]) + meshgrid_2 = paddle._C_ops.meshgrid(combine_8) + del combine_8 + + # builtin.split: (-1x-1xf32, -1x-1xf32) <- ([-1x-1xf32, -1x-1xf32]) + ( + split_4, + split_5, + ) = meshgrid_2 + del meshgrid_2 + + # pd_op.scale: (-1x-1xf32) <- (-1x-1xf32, 1xf32) + scale_20 = paddle._C_ops.scale(split_5, full_2, float("-20"), True) + + # pd_op.scale: (-1x-1xf32) <- (-1x-1xf32, 1xf32) + scale_21 = paddle._C_ops.scale(split_4, full_2, float("-20"), True) + + # pd_op.scale: (-1x-1xf32) <- (-1x-1xf32, 1xf32) + scale_22 = paddle._C_ops.scale(split_5, full_2, float("20"), True) + + # pd_op.scale: (-1x-1xf32) <- (-1x-1xf32, 1xf32) + scale_23 = paddle._C_ops.scale(split_4, full_2, float("20"), True) + del full_2 + + # builtin.combine: ([-1x-1xf32, -1x-1xf32, -1x-1xf32, -1x-1xf32]) <- (-1x-1xf32, -1x-1xf32, -1x-1xf32, -1x-1xf32) + combine_9 = [scale_20, scale_21, scale_22, scale_23] + del scale_20, scale_21, scale_22, scale_23 + + # pd_op.stack: (-1x-1x4xf32) <- ([-1x-1xf32, -1x-1xf32, -1x-1xf32, -1x-1xf32]) + stack_6 = paddle._C_ops.stack(combine_9, -1) + del combine_9 + + # builtin.combine: ([-1x-1xf32, -1x-1xf32]) <- (-1x-1xf32, -1x-1xf32) + combine_10 = [split_5, split_4] + del split_4, split_5 + + # pd_op.stack: (-1x-1x2xf32) <- ([-1x-1xf32, -1x-1xf32]) + stack_7 = paddle._C_ops.stack(combine_10, -1) + del combine_10 + + # pd_op.reshape: (-1x4xf32) <- (-1x-1x4xf32, 2xi64) + reshape_4 = paddle._C_ops.reshape(stack_6, full_int_array_0) + del full_int_array_0, stack_6 + + # pd_op.reshape: (-1x2xf32) <- (-1x-1x2xf32, 2xi64) + reshape_5 = paddle._C_ops.reshape(stack_7, full_int_array_1) + del full_int_array_1, stack_7 + + # pd_op.shape64: (2xi64) <- (-1x4xf32) + shape64_2 = paddle._C_ops.shape64(reshape_4) + + # pd_op.slice: (xi64) <- (2xi64, 1xi64, 1xi64) + slice_2 = paddle._C_ops.slice( + shape64_2, [0], full_int_array_2, full_int_array_3, [1], [0] + ) + del full_int_array_2, full_int_array_3, shape64_2 + + # builtin.combine: ([xi64, xi64]) <- (xi64, xi64) + combine_11 = [slice_2, full_4] + del full_4 + + # pd_op.stack: (2xi64) <- ([xi64, xi64]) + stack_8 = paddle._C_ops.stack(combine_11, 0) + del combine_11 + + # pd_op.full_with_tensor: (-1x1xf32) <- (1xf32, 2xi64) + full_with_tensor_2 = paddle._C_ops.full_with_tensor( + full_6, stack_8, paddle.float32 + ) + del full_6, stack_8 + + # pd_op.full: (1xi32) <- () + full_7 = paddle._C_ops.full( + [1], float("0"), paddle.int32, paddle.core.CPUPlace() + ) + + # builtin.combine: ([-1x4xf32, -1x4xf32, -1x4xf32]) <- (-1x4xf32, -1x4xf32, -1x4xf32) + combine_12 = [reshape_0, reshape_2, reshape_4] + del reshape_0, reshape_2, reshape_4 + + # pd_op.concat: (-1x4xf32) <- ([-1x4xf32, -1x4xf32, -1x4xf32], 1xi32) + concat_2 = paddle._C_ops.concat(combine_12, full_7) + del combine_12 + + # builtin.combine: ([-1x2xf32, -1x2xf32, -1x2xf32]) <- (-1x2xf32, -1x2xf32, -1x2xf32) + combine_13 = [reshape_1, reshape_3, reshape_5] + del reshape_1, reshape_3, reshape_5 + + # pd_op.concat: (-1x2xf32) <- ([-1x2xf32, -1x2xf32, -1x2xf32], 1xi32) + concat_3 = paddle._C_ops.concat(combine_13, full_7) + del combine_13 + + # builtin.combine: ([-1x1xf32, -1x1xf32, -1x1xf32]) <- (-1x1xf32, -1x1xf32, -1x1xf32) + combine_14 = [full_with_tensor_0, full_with_tensor_1, full_with_tensor_2] + del full_with_tensor_0, full_with_tensor_1, full_with_tensor_2 + + # pd_op.concat: (-1x1xf32) <- ([-1x1xf32, -1x1xf32, -1x1xf32], 1xi32) + concat_4 = paddle._C_ops.concat(combine_14, full_7) + del combine_14, full_7 + + # pd_op.full_int_array: (2xi64) <- () + full_int_array_4 = [1, 1] + + # pd_op.assign: (2xi64) <- (2xi64) + assign_0 = full_int_array_4 + + # pd_op.assign: (2xi64) <- (2xi64) + assign_1 = full_int_array_4 + + # pd_op.pool2d: (8x768x1x1xf32) <- (8x768x-1x-1xf32, 2xi64) + pool2d_0 = paddle._C_ops.pool2d( + data_6, + full_int_array_4, + [1, 1], + [0, 0], + False, + True, + "NCHW", + "avg", + False, + True, + "EXPLICIT", + ) + + # pd_op.conv2d: (8x768x1x1xf32) <- (8x768x1x1xf32, 768x768x1x1xf32) + conv2d_0 = paddle._C_ops.conv2d( + pool2d_0, parameter_53, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_53 + + # pd_op.full_int_array: (4xi64) <- () + full_int_array_5 = [1, -1, 1, 1] + + # pd_op.reshape: (1x768x1x1xf32) <- (768xf32, 4xi64) + reshape_6 = paddle._C_ops.reshape(parameter_52, full_int_array_5) + del parameter_52 + + # pd_op.add: (8x768x1x1xf32) <- (8x768x1x1xf32, 1x768x1x1xf32) + add_0 = paddle._C_ops.add(conv2d_0, reshape_6) + + # pd_op.sigmoid: (8x768x1x1xf32) <- (8x768x1x1xf32) + sigmoid_0 = paddle._C_ops.sigmoid(add_0) + del add_0 + + # pd_op.multiply: (8x768x-1x-1xf32) <- (8x768x-1x-1xf32, 8x768x1x1xf32) + multiply_0 = paddle._C_ops.multiply(data_6, sigmoid_0) + + # pd_op.conv2d: (8x768x-1x-1xf32) <- (8x768x-1x-1xf32, 768x768x1x1xf32) + conv2d_1 = paddle._C_ops.conv2d( + multiply_0, parameter_51, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_51 + + # pd_op.batch_norm_: (8x768x-1x-1xf32, 768xf32, 768xf32, 768xf32, 768xf32, -1xui8) <- (8x768x-1x-1xf32, 768xf32, 768xf32, 768xf32, 768xf32) + ( + batch_norm__0, + batch_norm__1, + batch_norm__2, + batch_norm__3, + batch_norm__4, + batch_norm__5, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_1, + parameter_50, + parameter_49, + parameter_48, + parameter_47, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_47, parameter_48, parameter_49, parameter_50 + + # pd_op.swish: (8x768x-1x-1xf32) <- (8x768x-1x-1xf32) + swish_0 = paddle._C_ops.swish(batch_norm__0) + + # pd_op.add: (8x768x-1x-1xf32) <- (8x768x-1x-1xf32, 8x768x-1x-1xf32) + add_1 = paddle._C_ops.add(swish_0, data_6) + + # pd_op.conv2d: (8x4x-1x-1xf32) <- (8x768x-1x-1xf32, 4x768x3x3xf32) + conv2d_2 = paddle._C_ops.conv2d( + add_1, parameter_46, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_46 + + # pd_op.reshape: (1x4x1x1xf32) <- (4xf32, 4xi64) + reshape_7 = paddle._C_ops.reshape(parameter_45, full_int_array_5) + del parameter_45 + + # pd_op.add: (8x4x-1x-1xf32) <- (8x4x-1x-1xf32, 1x4x1x1xf32) + add_2 = paddle._C_ops.add(conv2d_2, reshape_7) + + # pd_op.conv2d: (8x768x1x1xf32) <- (8x768x1x1xf32, 768x768x1x1xf32) + conv2d_3 = paddle._C_ops.conv2d( + pool2d_0, parameter_44, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_44 + + # pd_op.reshape: (1x768x1x1xf32) <- (768xf32, 4xi64) + reshape_8 = paddle._C_ops.reshape(parameter_43, full_int_array_5) + del parameter_43 + + # pd_op.add: (8x768x1x1xf32) <- (8x768x1x1xf32, 1x768x1x1xf32) + add_3 = paddle._C_ops.add(conv2d_3, reshape_8) + + # pd_op.sigmoid: (8x768x1x1xf32) <- (8x768x1x1xf32) + sigmoid_1 = paddle._C_ops.sigmoid(add_3) + del add_3 + + # pd_op.multiply: (8x768x-1x-1xf32) <- (8x768x-1x-1xf32, 8x768x1x1xf32) + multiply_1 = paddle._C_ops.multiply(data_6, sigmoid_1) + del data_6 + + # pd_op.conv2d: (8x768x-1x-1xf32) <- (8x768x-1x-1xf32, 768x768x1x1xf32) + conv2d_4 = paddle._C_ops.conv2d( + multiply_1, parameter_42, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_42 + + # pd_op.batch_norm_: (8x768x-1x-1xf32, 768xf32, 768xf32, 768xf32, 768xf32, -1xui8) <- (8x768x-1x-1xf32, 768xf32, 768xf32, 768xf32, 768xf32) + ( + batch_norm__6, + batch_norm__7, + batch_norm__8, + batch_norm__9, + batch_norm__10, + batch_norm__11, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_4, + parameter_41, + parameter_40, + parameter_39, + parameter_38, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_38, parameter_39, parameter_40, parameter_41 + + # pd_op.swish: (8x768x-1x-1xf32) <- (8x768x-1x-1xf32) + swish_1 = paddle._C_ops.swish(batch_norm__6) + + # pd_op.conv2d: (8x68x-1x-1xf32) <- (8x768x-1x-1xf32, 68x768x3x3xf32) + conv2d_5 = paddle._C_ops.conv2d( + swish_1, parameter_37, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_37 + + # pd_op.reshape: (1x68x1x1xf32) <- (68xf32, 4xi64) + reshape_9 = paddle._C_ops.reshape(parameter_36, full_int_array_5) + del parameter_36 + + # pd_op.add: (8x68x-1x-1xf32) <- (8x68x-1x-1xf32, 1x68x1x1xf32) + add_4 = paddle._C_ops.add(conv2d_5, reshape_9) + + # pd_op.sigmoid: (8x4x-1x-1xf32) <- (8x4x-1x-1xf32) + sigmoid_2 = paddle._C_ops.sigmoid(add_2) + del add_2 + + # pd_op.flatten: (8x4x-1xf32) <- (8x4x-1x-1xf32) + flatten_0 = paddle._C_ops.flatten(sigmoid_2, 2, 3) + + # pd_op.transpose: (8x-1x4xf32) <- (8x4x-1xf32) + transpose_0 = paddle._C_ops.transpose(flatten_0, [0, 2, 1]) + del flatten_0 + + # pd_op.flatten: (8x68x-1xf32) <- (8x68x-1x-1xf32) + flatten_1 = paddle._C_ops.flatten(add_4, 2, 3) + + # pd_op.transpose: (8x-1x68xf32) <- (8x68x-1xf32) + transpose_1 = paddle._C_ops.transpose(flatten_1, [0, 2, 1]) + del flatten_1 + + # pd_op.pool2d: (8x384x1x1xf32) <- (8x384x-1x-1xf32, 2xi64) + pool2d_1 = paddle._C_ops.pool2d( + data_7, + full_int_array_4, + [1, 1], + [0, 0], + False, + True, + "NCHW", + "avg", + False, + True, + "EXPLICIT", + ) + + # pd_op.conv2d: (8x384x1x1xf32) <- (8x384x1x1xf32, 384x384x1x1xf32) + conv2d_6 = paddle._C_ops.conv2d( + pool2d_1, parameter_35, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_35 + + # pd_op.reshape: (1x384x1x1xf32) <- (384xf32, 4xi64) + reshape_10 = paddle._C_ops.reshape(parameter_34, full_int_array_5) + del parameter_34 + + # pd_op.add: (8x384x1x1xf32) <- (8x384x1x1xf32, 1x384x1x1xf32) + add_5 = paddle._C_ops.add(conv2d_6, reshape_10) + + # pd_op.sigmoid: (8x384x1x1xf32) <- (8x384x1x1xf32) + sigmoid_3 = paddle._C_ops.sigmoid(add_5) + del add_5 + + # pd_op.multiply: (8x384x-1x-1xf32) <- (8x384x-1x-1xf32, 8x384x1x1xf32) + multiply_2 = paddle._C_ops.multiply(data_7, sigmoid_3) + + # pd_op.conv2d: (8x384x-1x-1xf32) <- (8x384x-1x-1xf32, 384x384x1x1xf32) + conv2d_7 = paddle._C_ops.conv2d( + multiply_2, parameter_33, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_33 + + # pd_op.batch_norm_: (8x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (8x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__12, + batch_norm__13, + batch_norm__14, + batch_norm__15, + batch_norm__16, + batch_norm__17, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_7, + parameter_32, + parameter_31, + parameter_30, + parameter_29, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_29, parameter_30, parameter_31, parameter_32 + + # pd_op.swish: (8x384x-1x-1xf32) <- (8x384x-1x-1xf32) + swish_2 = paddle._C_ops.swish(batch_norm__12) + + # pd_op.add: (8x384x-1x-1xf32) <- (8x384x-1x-1xf32, 8x384x-1x-1xf32) + add_6 = paddle._C_ops.add(swish_2, data_7) + + # pd_op.conv2d: (8x4x-1x-1xf32) <- (8x384x-1x-1xf32, 4x384x3x3xf32) + conv2d_8 = paddle._C_ops.conv2d( + add_6, parameter_28, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_28 + + # pd_op.reshape: (1x4x1x1xf32) <- (4xf32, 4xi64) + reshape_11 = paddle._C_ops.reshape(parameter_27, full_int_array_5) + del parameter_27 + + # pd_op.add: (8x4x-1x-1xf32) <- (8x4x-1x-1xf32, 1x4x1x1xf32) + add_7 = paddle._C_ops.add(conv2d_8, reshape_11) + + # pd_op.conv2d: (8x384x1x1xf32) <- (8x384x1x1xf32, 384x384x1x1xf32) + conv2d_9 = paddle._C_ops.conv2d( + pool2d_1, parameter_26, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_26 + + # pd_op.reshape: (1x384x1x1xf32) <- (384xf32, 4xi64) + reshape_12 = paddle._C_ops.reshape(parameter_25, full_int_array_5) + del parameter_25 + + # pd_op.add: (8x384x1x1xf32) <- (8x384x1x1xf32, 1x384x1x1xf32) + add_8 = paddle._C_ops.add(conv2d_9, reshape_12) + + # pd_op.sigmoid: (8x384x1x1xf32) <- (8x384x1x1xf32) + sigmoid_4 = paddle._C_ops.sigmoid(add_8) + del add_8 + + # pd_op.multiply: (8x384x-1x-1xf32) <- (8x384x-1x-1xf32, 8x384x1x1xf32) + multiply_3 = paddle._C_ops.multiply(data_7, sigmoid_4) + del data_7 + + # pd_op.conv2d: (8x384x-1x-1xf32) <- (8x384x-1x-1xf32, 384x384x1x1xf32) + conv2d_10 = paddle._C_ops.conv2d( + multiply_3, parameter_24, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_24 + + # pd_op.batch_norm_: (8x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (8x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__18, + batch_norm__19, + batch_norm__20, + batch_norm__21, + batch_norm__22, + batch_norm__23, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_10, + parameter_23, + parameter_22, + parameter_21, + parameter_20, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_20, parameter_21, parameter_22, parameter_23 + + # pd_op.swish: (8x384x-1x-1xf32) <- (8x384x-1x-1xf32) + swish_3 = paddle._C_ops.swish(batch_norm__18) + + # pd_op.conv2d: (8x68x-1x-1xf32) <- (8x384x-1x-1xf32, 68x384x3x3xf32) + conv2d_11 = paddle._C_ops.conv2d( + swish_3, parameter_19, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_19 + + # pd_op.reshape: (1x68x1x1xf32) <- (68xf32, 4xi64) + reshape_13 = paddle._C_ops.reshape(parameter_18, full_int_array_5) + del parameter_18 + + # pd_op.add: (8x68x-1x-1xf32) <- (8x68x-1x-1xf32, 1x68x1x1xf32) + add_9 = paddle._C_ops.add(conv2d_11, reshape_13) + + # pd_op.sigmoid: (8x4x-1x-1xf32) <- (8x4x-1x-1xf32) + sigmoid_5 = paddle._C_ops.sigmoid(add_7) + del add_7 + + # pd_op.flatten: (8x4x-1xf32) <- (8x4x-1x-1xf32) + flatten_2 = paddle._C_ops.flatten(sigmoid_5, 2, 3) + + # pd_op.transpose: (8x-1x4xf32) <- (8x4x-1xf32) + transpose_2 = paddle._C_ops.transpose(flatten_2, [0, 2, 1]) + del flatten_2 + + # pd_op.flatten: (8x68x-1xf32) <- (8x68x-1x-1xf32) + flatten_3 = paddle._C_ops.flatten(add_9, 2, 3) + + # pd_op.transpose: (8x-1x68xf32) <- (8x68x-1xf32) + transpose_3 = paddle._C_ops.transpose(flatten_3, [0, 2, 1]) + del flatten_3 + + # pd_op.pool2d: (8x192x1x1xf32) <- (8x192x-1x-1xf32, 2xi64) + pool2d_2 = paddle._C_ops.pool2d( + data_8, + full_int_array_4, + [1, 1], + [0, 0], + False, + True, + "NCHW", + "avg", + False, + True, + "EXPLICIT", + ) + + # pd_op.conv2d: (8x192x1x1xf32) <- (8x192x1x1xf32, 192x192x1x1xf32) + conv2d_12 = paddle._C_ops.conv2d( + pool2d_2, parameter_17, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_17 + + # pd_op.reshape: (1x192x1x1xf32) <- (192xf32, 4xi64) + reshape_14 = paddle._C_ops.reshape(parameter_16, full_int_array_5) + del parameter_16 + + # pd_op.add: (8x192x1x1xf32) <- (8x192x1x1xf32, 1x192x1x1xf32) + add_10 = paddle._C_ops.add(conv2d_12, reshape_14) + + # pd_op.sigmoid: (8x192x1x1xf32) <- (8x192x1x1xf32) + sigmoid_6 = paddle._C_ops.sigmoid(add_10) + del add_10 + + # pd_op.multiply: (8x192x-1x-1xf32) <- (8x192x-1x-1xf32, 8x192x1x1xf32) + multiply_4 = paddle._C_ops.multiply(data_8, sigmoid_6) + + # pd_op.conv2d: (8x192x-1x-1xf32) <- (8x192x-1x-1xf32, 192x192x1x1xf32) + conv2d_13 = paddle._C_ops.conv2d( + multiply_4, parameter_15, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_15 + + # pd_op.batch_norm_: (8x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (8x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__24, + batch_norm__25, + batch_norm__26, + batch_norm__27, + batch_norm__28, + batch_norm__29, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_13, + parameter_14, + parameter_13, + parameter_12, + parameter_11, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_11, parameter_12, parameter_13, parameter_14 + + # pd_op.swish: (8x192x-1x-1xf32) <- (8x192x-1x-1xf32) + swish_4 = paddle._C_ops.swish(batch_norm__24) + + # pd_op.add: (8x192x-1x-1xf32) <- (8x192x-1x-1xf32, 8x192x-1x-1xf32) + add_11 = paddle._C_ops.add(swish_4, data_8) + + # pd_op.conv2d: (8x4x-1x-1xf32) <- (8x192x-1x-1xf32, 4x192x3x3xf32) + conv2d_14 = paddle._C_ops.conv2d( + add_11, parameter_10, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_10 + + # pd_op.reshape: (1x4x1x1xf32) <- (4xf32, 4xi64) + reshape_15 = paddle._C_ops.reshape(parameter_9, full_int_array_5) + del parameter_9 + + # pd_op.add: (8x4x-1x-1xf32) <- (8x4x-1x-1xf32, 1x4x1x1xf32) + add_12 = paddle._C_ops.add(conv2d_14, reshape_15) + + # pd_op.conv2d: (8x192x1x1xf32) <- (8x192x1x1xf32, 192x192x1x1xf32) + conv2d_15 = paddle._C_ops.conv2d( + pool2d_2, parameter_8, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_8 + + # pd_op.reshape: (1x192x1x1xf32) <- (192xf32, 4xi64) + reshape_16 = paddle._C_ops.reshape(parameter_7, full_int_array_5) + del parameter_7 + + # pd_op.add: (8x192x1x1xf32) <- (8x192x1x1xf32, 1x192x1x1xf32) + add_13 = paddle._C_ops.add(conv2d_15, reshape_16) + + # pd_op.sigmoid: (8x192x1x1xf32) <- (8x192x1x1xf32) + sigmoid_7 = paddle._C_ops.sigmoid(add_13) + del add_13 + + # pd_op.multiply: (8x192x-1x-1xf32) <- (8x192x-1x-1xf32, 8x192x1x1xf32) + multiply_5 = paddle._C_ops.multiply(data_8, sigmoid_7) + del data_8 + + # pd_op.conv2d: (8x192x-1x-1xf32) <- (8x192x-1x-1xf32, 192x192x1x1xf32) + conv2d_16 = paddle._C_ops.conv2d( + multiply_5, parameter_6, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_6 + + # pd_op.batch_norm_: (8x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (8x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__30, + batch_norm__31, + batch_norm__32, + batch_norm__33, + batch_norm__34, + batch_norm__35, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_16, + parameter_5, + parameter_4, + parameter_3, + parameter_2, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_2, parameter_3, parameter_4, parameter_5 + + # pd_op.swish: (8x192x-1x-1xf32) <- (8x192x-1x-1xf32) + swish_5 = paddle._C_ops.swish(batch_norm__30) + + # pd_op.conv2d: (8x68x-1x-1xf32) <- (8x192x-1x-1xf32, 68x192x3x3xf32) + conv2d_17 = paddle._C_ops.conv2d( + swish_5, parameter_1, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_1 + + # pd_op.reshape: (1x68x1x1xf32) <- (68xf32, 4xi64) + reshape_17 = paddle._C_ops.reshape(parameter_0, full_int_array_5) + del full_int_array_5, parameter_0 + + # pd_op.add: (8x68x-1x-1xf32) <- (8x68x-1x-1xf32, 1x68x1x1xf32) + add_14 = paddle._C_ops.add(conv2d_17, reshape_17) + + # pd_op.sigmoid: (8x4x-1x-1xf32) <- (8x4x-1x-1xf32) + sigmoid_8 = paddle._C_ops.sigmoid(add_12) + del add_12 + + # pd_op.flatten: (8x4x-1xf32) <- (8x4x-1x-1xf32) + flatten_4 = paddle._C_ops.flatten(sigmoid_8, 2, 3) + + # pd_op.transpose: (8x-1x4xf32) <- (8x4x-1xf32) + transpose_4 = paddle._C_ops.transpose(flatten_4, [0, 2, 1]) + del flatten_4 + + # pd_op.flatten: (8x68x-1xf32) <- (8x68x-1x-1xf32) + flatten_5 = paddle._C_ops.flatten(add_14, 2, 3) + + # pd_op.transpose: (8x-1x68xf32) <- (8x68x-1xf32) + transpose_5 = paddle._C_ops.transpose(flatten_5, [0, 2, 1]) + del flatten_5 + + # pd_op.full: (1xi32) <- () + full_8 = paddle._C_ops.full( + [1], float("1"), paddle.int32, paddle.core.CPUPlace() + ) + + # pd_op.assign: (1xi32) <- (1xi32) + assign_2 = full_8 + + # builtin.combine: ([8x-1x4xf32, 8x-1x4xf32, 8x-1x4xf32]) <- (8x-1x4xf32, 8x-1x4xf32, 8x-1x4xf32) + combine_15 = [transpose_0, transpose_2, transpose_4] + + # pd_op.concat: (8x-1x4xf32) <- ([8x-1x4xf32, 8x-1x4xf32, 8x-1x4xf32], 1xi32) + concat_0 = paddle._C_ops.concat(combine_15, full_8) + del combine_15 + + # builtin.combine: ([8x-1x68xf32, 8x-1x68xf32, 8x-1x68xf32]) <- (8x-1x68xf32, 8x-1x68xf32, 8x-1x68xf32) + combine_16 = [transpose_1, transpose_3, transpose_5] + + # pd_op.concat: (8x-1x68xf32) <- ([8x-1x68xf32, 8x-1x68xf32, 8x-1x68xf32], 1xi32) + concat_1 = paddle._C_ops.concat(combine_16, full_8) + del ( + add_1, + add_11, + add_14, + add_4, + add_6, + add_9, + assign_0, + assign_1, + assign_2, + batch_norm__0, + batch_norm__1, + batch_norm__10, + batch_norm__11, + batch_norm__12, + batch_norm__13, + batch_norm__14, + batch_norm__15, + batch_norm__16, + batch_norm__17, + batch_norm__18, + batch_norm__19, + batch_norm__2, + batch_norm__20, + batch_norm__21, + batch_norm__22, + batch_norm__23, + batch_norm__24, + batch_norm__25, + batch_norm__26, + batch_norm__27, + batch_norm__28, + batch_norm__29, + batch_norm__3, + batch_norm__30, + batch_norm__31, + batch_norm__32, + batch_norm__33, + batch_norm__34, + batch_norm__35, + batch_norm__4, + batch_norm__5, + batch_norm__6, + batch_norm__7, + batch_norm__8, + batch_norm__9, + combine_16, + conv2d_0, + conv2d_1, + conv2d_10, + conv2d_11, + conv2d_12, + conv2d_13, + conv2d_14, + conv2d_15, + conv2d_16, + conv2d_17, + conv2d_2, + conv2d_3, + conv2d_4, + conv2d_5, + conv2d_6, + conv2d_7, + conv2d_8, + conv2d_9, + full_8, + full_int_array_4, + multiply_0, + multiply_1, + multiply_2, + multiply_3, + multiply_4, + multiply_5, + pool2d_0, + pool2d_1, + pool2d_2, + reshape_10, + reshape_11, + reshape_12, + reshape_13, + reshape_14, + reshape_15, + reshape_16, + reshape_17, + reshape_6, + reshape_7, + reshape_8, + reshape_9, + sigmoid_0, + sigmoid_1, + sigmoid_2, + sigmoid_3, + sigmoid_4, + sigmoid_5, + sigmoid_6, + sigmoid_7, + sigmoid_8, + slice_0, + slice_1, + slice_2, + swish_0, + swish_1, + swish_2, + swish_3, + swish_4, + swish_5, + transpose_0, + transpose_1, + transpose_2, + transpose_3, + transpose_4, + transpose_5, + ) + + return concat_0, concat_1, concat_2, concat_3, concat_4 diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus-L/subgraph_15/weight_meta.py b/paddle_samples/PaddleX/PP-YOLOE_plus-L/subgraph_15/weight_meta.py new file mode 100644 index 000000000..71ab4788d --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE_plus-L/subgraph_15/weight_meta.py @@ -0,0 +1,586 @@ +class Program_weight_tensor_parameter_0: + name = "parameter_0" + shape = [68] + dtype = "float32" + min_val = float("-0.00994362") + max_val = float("0.0295465") + mean = float("1.85202e-07") + std = float("0.00657191") + data = None + + +class Program_weight_tensor_parameter_1: + name = "parameter_1" + shape = [68, 192, 3, 3] + dtype = "float32" + min_val = float("-0.132052") + max_val = float("0.152988") + mean = float("5.82513e-08") + std = float("0.00697616") + data = None + + +class Program_weight_tensor_parameter_2: + name = "parameter_2" + shape = [192] + dtype = "float32" + min_val = float("-0.0439922") + max_val = float("0.203765") + mean = float("0.0504072") + std = float("0.039567") + data = None + + +class Program_weight_tensor_parameter_3: + name = "parameter_3" + shape = [192] + dtype = "float32" + min_val = float("0.852608") + max_val = float("1.61916") + mean = float("1.21933") + std = float("0.143034") + data = None + + +class Program_weight_tensor_parameter_4: + name = "parameter_4" + shape = [192] + dtype = "float32" + min_val = float("0.0001243") + max_val = float("0.002682") + mean = float("0.000430402") + std = float("0.000330101") + data = None + + +class Program_weight_tensor_parameter_5: + name = "parameter_5" + shape = [192] + dtype = "float32" + min_val = float("-0.0351355") + max_val = float("0.0297269") + mean = float("-0.0034854") + std = float("0.0106233") + data = None + + +class Program_weight_tensor_parameter_6: + name = "parameter_6" + shape = [192, 192, 1, 1] + dtype = "float32" + min_val = float("-0.0519345") + max_val = float("0.0751488") + mean = float("-0.000115261") + std = float("0.00540221") + data = None + + +class Program_weight_tensor_parameter_7: + name = "parameter_7" + shape = [192] + dtype = "float32" + min_val = float("-0.00467515") + max_val = float("0.00851965") + mean = float("3.17589e-05") + std = float("0.00259112") + data = None + + +class Program_weight_tensor_parameter_8: + name = "parameter_8" + shape = [192, 192, 1, 1] + dtype = "float32" + min_val = float("-0.00531911") + max_val = float("0.00943623") + mean = float("-9.32832e-05") + std = float("0.00138251") + data = None + + +class Program_weight_tensor_parameter_9: + name = "parameter_9" + shape = [4] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_10: + name = "parameter_10" + shape = [4, 192, 3, 3] + dtype = "float32" + min_val = float("-1.05015e-05") + max_val = float("0.000297828") + mean = float("1.38129e-05") + std = float("2.54719e-05") + data = None + + +class Program_weight_tensor_parameter_11: + name = "parameter_11" + shape = [192] + dtype = "float32" + min_val = float("-0.327098") + max_val = float("0.890395") + mean = float("0.357592") + std = float("0.269366") + data = None + + +class Program_weight_tensor_parameter_12: + name = "parameter_12" + shape = [192] + dtype = "float32" + min_val = float("1.01729") + max_val = float("1.7703") + mean = float("1.31569") + std = float("0.141051") + data = None + + +class Program_weight_tensor_parameter_13: + name = "parameter_13" + shape = [192] + dtype = "float32" + min_val = float("0.000190713") + max_val = float("0.00419513") + mean = float("0.00073314") + std = float("0.000566899") + data = None + + +class Program_weight_tensor_parameter_14: + name = "parameter_14" + shape = [192] + dtype = "float32" + min_val = float("-0.171341") + max_val = float("0.038741") + mean = float("-0.0247317") + std = float("0.0309201") + data = None + + +class Program_weight_tensor_parameter_15: + name = "parameter_15" + shape = [192, 192, 1, 1] + dtype = "float32" + min_val = float("-0.0756384") + max_val = float("0.068676") + mean = float("-0.000504495") + std = float("0.00650538") + data = None + + +class Program_weight_tensor_parameter_16: + name = "parameter_16" + shape = [192] + dtype = "float32" + min_val = float("-0.00462125") + max_val = float("0.00952984") + mean = float("-0.000108344") + std = float("0.00180966") + data = None + + +class Program_weight_tensor_parameter_17: + name = "parameter_17" + shape = [192, 192, 1, 1] + dtype = "float32" + min_val = float("-0.0166403") + max_val = float("0.0150159") + mean = float("-1.40574e-05") + std = float("0.00152367") + data = None + + +class Program_weight_tensor_parameter_18: + name = "parameter_18" + shape = [68] + dtype = "float32" + min_val = float("-0.00413451") + max_val = float("0.0247658") + mean = float("1.70316e-07") + std = float("0.00515376") + data = None + + +class Program_weight_tensor_parameter_19: + name = "parameter_19" + shape = [68, 384, 3, 3] + dtype = "float32" + min_val = float("-0.0879382") + max_val = float("0.115543") + mean = float("3.09738e-08") + std = float("0.0046703") + data = None + + +class Program_weight_tensor_parameter_20: + name = "parameter_20" + shape = [384] + dtype = "float32" + min_val = float("-0.00504264") + max_val = float("0.0677623") + mean = float("0.025296") + std = float("0.012925") + data = None + + +class Program_weight_tensor_parameter_21: + name = "parameter_21" + shape = [384] + dtype = "float32" + min_val = float("0.998659") + max_val = float("1.23249") + mean = float("1.10437") + std = float("0.040582") + data = None + + +class Program_weight_tensor_parameter_22: + name = "parameter_22" + shape = [384] + dtype = "float32" + min_val = float("6.73466e-05") + max_val = float("0.00290197") + mean = float("0.000311055") + std = float("0.000316507") + data = None + + +class Program_weight_tensor_parameter_23: + name = "parameter_23" + shape = [384] + dtype = "float32" + min_val = float("-0.0401042") + max_val = float("0.013136") + mean = float("-0.00636438") + std = float("0.00737522") + data = None + + +class Program_weight_tensor_parameter_24: + name = "parameter_24" + shape = [384, 384, 1, 1] + dtype = "float32" + min_val = float("-0.0493892") + max_val = float("0.0650258") + mean = float("-8.70157e-05") + std = float("0.00262175") + data = None + + +class Program_weight_tensor_parameter_25: + name = "parameter_25" + shape = [384] + dtype = "float32" + min_val = float("-0.00258805") + max_val = float("0.00556427") + mean = float("9.34648e-05") + std = float("0.00146967") + data = None + + +class Program_weight_tensor_parameter_26: + name = "parameter_26" + shape = [384, 384, 1, 1] + dtype = "float32" + min_val = float("-0.00175289") + max_val = float("0.00489604") + mean = float("1.0622e-05") + std = float("0.000586047") + data = None + + +class Program_weight_tensor_parameter_27: + name = "parameter_27" + shape = [4] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_28: + name = "parameter_28" + shape = [4, 384, 3, 3] + dtype = "float32" + min_val = float("-2.74996e-06") + max_val = float("6.80481e-05") + mean = float("2.24375e-06") + std = float("4.72166e-06") + data = None + + +class Program_weight_tensor_parameter_29: + name = "parameter_29" + shape = [384] + dtype = "float32" + min_val = float("-0.150027") + max_val = float("0.451389") + mean = float("0.229437") + std = float("0.0996485") + data = None + + +class Program_weight_tensor_parameter_30: + name = "parameter_30" + shape = [384] + dtype = "float32" + min_val = float("1.00298") + max_val = float("1.39843") + mean = float("1.18623") + std = float("0.059847") + data = None + + +class Program_weight_tensor_parameter_31: + name = "parameter_31" + shape = [384] + dtype = "float32" + min_val = float("0.000159452") + max_val = float("0.00362229") + mean = float("0.00070861") + std = float("0.000574984") + data = None + + +class Program_weight_tensor_parameter_32: + name = "parameter_32" + shape = [384] + dtype = "float32" + min_val = float("-0.108472") + max_val = float("0.0564258") + mean = float("-0.0264087") + std = float("0.0221104") + data = None + + +class Program_weight_tensor_parameter_33: + name = "parameter_33" + shape = [384, 384, 1, 1] + dtype = "float32" + min_val = float("-0.0480316") + max_val = float("0.0447625") + mean = float("-0.000359058") + std = float("0.00295423") + data = None + + +class Program_weight_tensor_parameter_34: + name = "parameter_34" + shape = [384] + dtype = "float32" + min_val = float("-0.00203913") + max_val = float("0.00903738") + mean = float("-3.66196e-06") + std = float("0.000959619") + data = None + + +class Program_weight_tensor_parameter_35: + name = "parameter_35" + shape = [384, 384, 1, 1] + dtype = "float32" + min_val = float("-0.00522582") + max_val = float("0.0088395") + mean = float("-4.68691e-06") + std = float("0.000619469") + data = None + + +class Program_weight_tensor_parameter_36: + name = "parameter_36" + shape = [68] + dtype = "float32" + min_val = float("-0.00290223") + max_val = float("0.0101817") + mean = float("1.30633e-07") + std = float("0.00299045") + data = None + + +class Program_weight_tensor_parameter_37: + name = "parameter_37" + shape = [68, 768, 3, 3] + dtype = "float32" + min_val = float("-0.0411349") + max_val = float("0.0738933") + mean = float("1.41499e-08") + std = float("0.00274114") + data = None + + +class Program_weight_tensor_parameter_38: + name = "parameter_38" + shape = [768] + dtype = "float32" + min_val = float("-0.0141816") + max_val = float("0.0470838") + mean = float("0.0110249") + std = float("0.0102353") + data = None + + +class Program_weight_tensor_parameter_39: + name = "parameter_39" + shape = [768] + dtype = "float32" + min_val = float("1.00835") + max_val = float("1.19911") + mean = float("1.06458") + std = float("0.0222771") + data = None + + +class Program_weight_tensor_parameter_40: + name = "parameter_40" + shape = [768] + dtype = "float32" + min_val = float("3.6147e-05") + max_val = float("0.00134983") + mean = float("0.000155992") + std = float("0.000111102") + data = None + + +class Program_weight_tensor_parameter_41: + name = "parameter_41" + shape = [768] + dtype = "float32" + min_val = float("-0.0238165") + max_val = float("0.0081075") + mean = float("-0.00384075") + std = float("0.00341938") + data = None + + +class Program_weight_tensor_parameter_42: + name = "parameter_42" + shape = [768, 768, 1, 1] + dtype = "float32" + min_val = float("-0.0354012") + max_val = float("0.0311471") + mean = float("-3.50872e-05") + std = float("0.0011905") + data = None + + +class Program_weight_tensor_parameter_43: + name = "parameter_43" + shape = [768] + dtype = "float32" + min_val = float("-0.00350695") + max_val = float("0.00217254") + mean = float("0.000104712") + std = float("0.000668859") + data = None + + +class Program_weight_tensor_parameter_44: + name = "parameter_44" + shape = [768, 768, 1, 1] + dtype = "float32" + min_val = float("-0.00236527") + max_val = float("0.00288539") + mean = float("2.74167e-05") + std = float("0.000209731") + data = None + + +class Program_weight_tensor_parameter_45: + name = "parameter_45" + shape = [4] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_46: + name = "parameter_46" + shape = [4, 768, 3, 3] + dtype = "float32" + min_val = float("-1.42393e-05") + max_val = float("0.000154221") + mean = float("5.88424e-06") + std = float("1.23428e-05") + data = None + + +class Program_weight_tensor_parameter_47: + name = "parameter_47" + shape = [768] + dtype = "float32" + min_val = float("-0.109319") + max_val = float("0.200294") + mean = float("0.0936331") + std = float("0.0420139") + data = None + + +class Program_weight_tensor_parameter_48: + name = "parameter_48" + shape = [768] + dtype = "float32" + min_val = float("1.00715") + max_val = float("1.25105") + mean = float("1.07838") + std = float("0.0259236") + data = None + + +class Program_weight_tensor_parameter_49: + name = "parameter_49" + shape = [768] + dtype = "float32" + min_val = float("9.71354e-05") + max_val = float("0.00323476") + mean = float("0.000618285") + std = float("0.000447346") + data = None + + +class Program_weight_tensor_parameter_50: + name = "parameter_50" + shape = [768] + dtype = "float32" + min_val = float("-0.0504189") + max_val = float("0.0943639") + mean = float("-0.0192478") + std = float("0.0111401") + data = None + + +class Program_weight_tensor_parameter_51: + name = "parameter_51" + shape = [768, 768, 1, 1] + dtype = "float32" + min_val = float("-0.0485631") + max_val = float("0.0317378") + mean = float("-0.000183678") + std = float("0.00129677") + data = None + + +class Program_weight_tensor_parameter_52: + name = "parameter_52" + shape = [768] + dtype = "float32" + min_val = float("-0.00522906") + max_val = float("0.00428608") + mean = float("1.59338e-05") + std = float("0.000442491") + data = None + + +class Program_weight_tensor_parameter_53: + name = "parameter_53" + shape = [768, 768, 1, 1] + dtype = "float32" + min_val = float("-0.0190742") + max_val = float("0.0352904") + mean = float("5.40338e-06") + std = float("0.000293143") + data = None diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus-L/subgraph_16/graph_hash.txt b/paddle_samples/PaddleX/PP-YOLOE_plus-L/subgraph_16/graph_hash.txt new file mode 100644 index 000000000..22e5f661b --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE_plus-L/subgraph_16/graph_hash.txt @@ -0,0 +1 @@ +e99f0bbda5d49a054d0abd2b59c60c1e02a4fbc2ea84bf54ca2ec398c82ba1ca \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus-L/subgraph_16/graph_net.json b/paddle_samples/PaddleX/PP-YOLOE_plus-L/subgraph_16/graph_net.json new file mode 100644 index 000000000..32219c0fa --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE_plus-L/subgraph_16/graph_net.json @@ -0,0 +1,6 @@ +{ + "framework": "paddle", + "model_name": "PP-YOLOE_plus-L", + "num_devices_required": 1, + "num_nodes_required": 1 +} \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus-L/subgraph_16/input_meta.py b/paddle_samples/PaddleX/PP-YOLOE_plus-L/subgraph_16/input_meta.py new file mode 100644 index 000000000..7738acf83 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE_plus-L/subgraph_16/input_meta.py @@ -0,0 +1,134 @@ +class Program_weight_tensor_data_0: + name = "data_0" + shape = [1] + dtype = "float32" + data = [1.0] + + +class Program_weight_tensor_data_1: + name = "data_1" + shape = [1] + dtype = "float32" + data = [1.0] + + +class Program_weight_tensor_data_2: + name = "data_2" + shape = [1] + dtype = "float32" + data = [1.0] + + +class Program_weight_tensor_data_3: + name = "data_3" + shape = [1] + dtype = "float32" + data = [1.0] + + +class Program_weight_tensor_data_4: + name = "data_4" + shape = [1] + dtype = "float32" + data = [1.0] + + +class Program_weight_tensor_data_5: + name = "data_5" + shape = [1] + dtype = "float32" + data = [1.0] + + +class Program_weight_tensor_data_6: + name = "data_6" + shape = [1] + dtype = "float32" + data = [1.0] + + +class Program_weight_tensor_data_7: + name = "data_7" + shape = [1] + dtype = "float32" + data = [1.0] + + +class Program_weight_tensor_data_8: + name = "data_8" + shape = [1] + dtype = "float32" + data = [1.0] + + +class Program_weight_tensor_data_9: + name = "data_9" + shape = [1] + dtype = "float32" + data = [1.0] + + +class Program_weight_tensor_data_10: + name = "data_10" + shape = [1] + dtype = "float32" + data = [1.0] + + +class Program_weight_tensor_data_11: + name = "data_11" + shape = [1] + dtype = "float32" + data = [1.0] + + +class Program_weight_tensor_data_12: + name = "data_12" + shape = [1] + dtype = "float32" + data = [1.0] + + +class Program_weight_tensor_data_13: + name = "data_13" + shape = [1] + dtype = "float32" + data = [1.0] + + +class Program_weight_tensor_data_14: + name = "data_14" + shape = [1] + dtype = "float32" + data = [1.0] + + +class Program_weight_tensor_data_15: + name = "data_15" + shape = [1] + dtype = "float32" + data = [1.0] + + +class Program_weight_tensor_data_16: + name = "data_16" + shape = [1] + dtype = "float32" + data = [1.0] + + +class Program_weight_tensor_data_17: + name = "data_17" + shape = [1] + dtype = "float32" + data = [1.0] + + +class Program_weight_tensor_data_18: + name = "data_18" + shape = [8, 3, 448, 448] + dtype = "float32" + max_val = float("1.0") + mean = float("0.527216") + std = float("0.223765") + data = None diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus-L/subgraph_16/model.py b/paddle_samples/PaddleX/PP-YOLOE_plus-L/subgraph_16/model.py new file mode 100644 index 000000000..4f581b7bd --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE_plus-L/subgraph_16/model.py @@ -0,0 +1,7504 @@ +import paddle + + +class GraphModule(paddle.nn.Layer): + def __init__(self): + super().__init__() + + def forward( + self, + parameter_0, + parameter_1, + parameter_2, + parameter_3, + parameter_4, + parameter_5, + parameter_6, + parameter_7, + parameter_8, + parameter_9, + parameter_10, + parameter_11, + parameter_12, + parameter_13, + parameter_14, + parameter_15, + parameter_16, + parameter_17, + parameter_18, + parameter_19, + parameter_20, + parameter_21, + parameter_22, + parameter_23, + parameter_24, + parameter_25, + parameter_26, + parameter_27, + parameter_28, + parameter_29, + parameter_30, + parameter_31, + parameter_32, + parameter_33, + parameter_34, + parameter_35, + parameter_36, + parameter_37, + parameter_38, + parameter_39, + parameter_40, + parameter_41, + parameter_42, + parameter_43, + parameter_44, + parameter_45, + parameter_46, + parameter_47, + parameter_48, + parameter_49, + parameter_50, + parameter_51, + parameter_52, + parameter_53, + parameter_54, + parameter_55, + parameter_56, + parameter_57, + parameter_58, + parameter_59, + parameter_60, + parameter_61, + parameter_62, + parameter_63, + parameter_64, + parameter_65, + parameter_66, + parameter_67, + parameter_68, + parameter_69, + parameter_70, + parameter_71, + parameter_72, + parameter_73, + parameter_74, + parameter_75, + parameter_76, + parameter_77, + parameter_78, + parameter_79, + parameter_80, + parameter_81, + parameter_82, + parameter_83, + parameter_84, + parameter_85, + parameter_86, + parameter_87, + parameter_88, + parameter_89, + parameter_90, + parameter_91, + parameter_92, + parameter_93, + parameter_94, + parameter_95, + parameter_96, + parameter_97, + parameter_98, + parameter_99, + parameter_100, + parameter_101, + parameter_102, + parameter_103, + parameter_104, + parameter_105, + parameter_106, + parameter_107, + parameter_108, + parameter_109, + parameter_110, + parameter_111, + parameter_112, + parameter_113, + parameter_114, + parameter_115, + parameter_116, + parameter_117, + parameter_118, + parameter_119, + parameter_120, + parameter_121, + parameter_122, + parameter_123, + parameter_124, + parameter_125, + parameter_126, + parameter_127, + parameter_128, + parameter_129, + parameter_130, + parameter_131, + parameter_132, + parameter_133, + parameter_134, + parameter_135, + parameter_136, + parameter_137, + parameter_138, + parameter_139, + parameter_140, + parameter_141, + parameter_142, + parameter_143, + parameter_144, + parameter_145, + parameter_146, + parameter_147, + parameter_148, + parameter_149, + parameter_150, + parameter_151, + parameter_152, + parameter_153, + parameter_154, + parameter_155, + parameter_156, + parameter_157, + parameter_158, + parameter_159, + parameter_160, + parameter_161, + parameter_162, + parameter_163, + parameter_164, + parameter_165, + parameter_166, + parameter_167, + parameter_168, + parameter_169, + parameter_170, + parameter_171, + parameter_172, + parameter_173, + parameter_174, + parameter_175, + parameter_176, + parameter_177, + parameter_178, + parameter_179, + parameter_180, + parameter_181, + parameter_182, + parameter_183, + parameter_184, + parameter_185, + parameter_186, + parameter_187, + parameter_188, + parameter_189, + parameter_190, + parameter_191, + parameter_192, + parameter_193, + parameter_194, + parameter_195, + parameter_196, + parameter_197, + parameter_198, + parameter_199, + parameter_200, + parameter_201, + parameter_202, + parameter_203, + parameter_204, + parameter_205, + parameter_206, + parameter_207, + parameter_208, + parameter_209, + parameter_210, + parameter_211, + parameter_212, + parameter_213, + parameter_214, + parameter_215, + parameter_216, + parameter_217, + parameter_218, + parameter_219, + parameter_220, + parameter_221, + parameter_222, + parameter_223, + parameter_224, + parameter_225, + parameter_226, + parameter_227, + parameter_228, + parameter_229, + parameter_230, + parameter_231, + parameter_232, + parameter_233, + parameter_234, + parameter_235, + parameter_236, + parameter_237, + parameter_238, + parameter_239, + parameter_240, + parameter_241, + parameter_242, + parameter_243, + parameter_244, + parameter_245, + parameter_246, + parameter_247, + parameter_248, + parameter_249, + parameter_250, + parameter_251, + parameter_252, + parameter_253, + parameter_254, + parameter_255, + parameter_256, + parameter_257, + parameter_258, + parameter_259, + parameter_260, + parameter_261, + parameter_262, + parameter_263, + parameter_264, + parameter_265, + parameter_266, + parameter_267, + parameter_268, + parameter_269, + parameter_270, + parameter_271, + parameter_272, + parameter_273, + parameter_274, + parameter_275, + parameter_276, + parameter_277, + parameter_278, + parameter_279, + parameter_280, + parameter_281, + parameter_282, + parameter_283, + parameter_284, + parameter_285, + parameter_286, + parameter_287, + parameter_288, + parameter_289, + parameter_290, + parameter_291, + parameter_292, + parameter_293, + parameter_294, + parameter_295, + parameter_296, + parameter_297, + parameter_298, + parameter_299, + parameter_300, + parameter_301, + parameter_302, + parameter_303, + parameter_304, + parameter_305, + parameter_306, + parameter_307, + parameter_308, + parameter_309, + parameter_310, + parameter_311, + parameter_312, + parameter_313, + parameter_314, + parameter_315, + parameter_316, + parameter_317, + parameter_318, + parameter_319, + parameter_320, + parameter_321, + parameter_322, + parameter_323, + parameter_324, + parameter_325, + parameter_326, + parameter_327, + parameter_328, + parameter_329, + parameter_330, + parameter_331, + parameter_332, + parameter_333, + parameter_334, + parameter_335, + parameter_336, + parameter_337, + parameter_338, + parameter_339, + parameter_340, + parameter_341, + parameter_342, + parameter_343, + parameter_344, + parameter_345, + parameter_346, + parameter_347, + parameter_348, + parameter_349, + parameter_350, + parameter_351, + parameter_352, + parameter_353, + parameter_354, + parameter_355, + parameter_356, + parameter_357, + parameter_358, + parameter_359, + parameter_360, + parameter_361, + parameter_362, + parameter_363, + parameter_364, + parameter_365, + parameter_366, + parameter_367, + parameter_368, + parameter_369, + parameter_370, + parameter_371, + parameter_372, + parameter_373, + parameter_374, + parameter_375, + parameter_376, + parameter_377, + parameter_378, + parameter_379, + parameter_380, + parameter_381, + parameter_382, + parameter_383, + parameter_384, + parameter_385, + parameter_386, + parameter_387, + parameter_388, + parameter_389, + parameter_390, + parameter_391, + parameter_392, + parameter_393, + parameter_394, + parameter_395, + parameter_396, + parameter_397, + parameter_398, + parameter_399, + parameter_400, + parameter_401, + parameter_402, + parameter_403, + parameter_404, + parameter_405, + parameter_406, + parameter_407, + parameter_408, + parameter_409, + parameter_410, + parameter_411, + parameter_412, + parameter_413, + parameter_414, + parameter_415, + parameter_416, + parameter_417, + parameter_418, + parameter_419, + parameter_420, + parameter_421, + parameter_422, + parameter_423, + parameter_424, + parameter_425, + parameter_426, + parameter_427, + parameter_428, + parameter_429, + parameter_430, + parameter_431, + parameter_432, + parameter_433, + parameter_434, + parameter_435, + parameter_436, + parameter_437, + parameter_438, + parameter_439, + parameter_440, + parameter_441, + parameter_442, + parameter_443, + parameter_444, + parameter_445, + parameter_446, + parameter_447, + parameter_448, + parameter_449, + parameter_450, + parameter_451, + parameter_452, + parameter_453, + parameter_454, + parameter_455, + parameter_456, + parameter_457, + parameter_458, + parameter_459, + parameter_460, + parameter_461, + parameter_462, + parameter_463, + parameter_464, + parameter_465, + parameter_466, + parameter_467, + parameter_468, + parameter_469, + parameter_470, + parameter_471, + parameter_472, + parameter_473, + parameter_474, + parameter_475, + parameter_476, + parameter_477, + parameter_478, + parameter_479, + parameter_480, + parameter_481, + parameter_482, + parameter_483, + parameter_484, + parameter_485, + parameter_486, + parameter_487, + parameter_488, + parameter_489, + parameter_490, + parameter_491, + parameter_492, + parameter_493, + parameter_494, + parameter_495, + parameter_496, + parameter_497, + parameter_498, + parameter_499, + parameter_500, + parameter_501, + parameter_502, + parameter_503, + parameter_504, + parameter_505, + parameter_506, + parameter_507, + parameter_508, + parameter_509, + parameter_510, + parameter_511, + parameter_512, + parameter_513, + parameter_514, + parameter_515, + parameter_516, + parameter_517, + parameter_518, + parameter_519, + parameter_520, + parameter_521, + parameter_522, + parameter_523, + parameter_524, + parameter_525, + parameter_526, + parameter_527, + parameter_528, + parameter_529, + parameter_530, + parameter_531, + parameter_532, + parameter_533, + parameter_534, + parameter_535, + parameter_536, + parameter_537, + parameter_538, + parameter_539, + parameter_540, + parameter_541, + parameter_542, + parameter_543, + parameter_544, + parameter_545, + parameter_546, + parameter_547, + parameter_548, + parameter_549, + parameter_550, + parameter_551, + parameter_552, + parameter_553, + parameter_554, + parameter_555, + parameter_556, + parameter_557, + parameter_558, + parameter_559, + parameter_560, + parameter_561, + parameter_562, + parameter_563, + parameter_564, + parameter_565, + parameter_566, + parameter_567, + parameter_568, + parameter_569, + parameter_570, + parameter_571, + parameter_572, + parameter_573, + parameter_574, + parameter_575, + parameter_576, + parameter_577, + parameter_578, + parameter_579, + parameter_580, + parameter_581, + parameter_582, + parameter_583, + parameter_584, + parameter_585, + parameter_586, + parameter_587, + parameter_588, + parameter_589, + parameter_590, + parameter_591, + parameter_592, + parameter_593, + parameter_594, + parameter_595, + parameter_596, + parameter_597, + parameter_598, + parameter_599, + parameter_600, + parameter_601, + parameter_602, + parameter_603, + parameter_604, + parameter_605, + parameter_606, + parameter_607, + parameter_608, + parameter_609, + parameter_610, + parameter_611, + parameter_612, + parameter_613, + parameter_614, + parameter_615, + parameter_616, + parameter_617, + parameter_618, + parameter_619, + parameter_620, + parameter_621, + parameter_622, + parameter_623, + parameter_624, + parameter_625, + parameter_626, + parameter_627, + parameter_628, + parameter_629, + parameter_630, + parameter_631, + parameter_632, + parameter_633, + parameter_634, + parameter_635, + parameter_636, + parameter_637, + parameter_638, + parameter_639, + parameter_640, + parameter_641, + parameter_642, + parameter_643, + parameter_644, + parameter_645, + parameter_646, + parameter_647, + parameter_648, + parameter_649, + parameter_650, + parameter_651, + parameter_652, + parameter_653, + parameter_654, + parameter_655, + parameter_656, + parameter_657, + parameter_658, + parameter_659, + parameter_660, + parameter_661, + parameter_662, + parameter_663, + parameter_664, + parameter_665, + parameter_666, + parameter_667, + parameter_668, + parameter_669, + parameter_670, + parameter_671, + parameter_672, + parameter_673, + parameter_674, + parameter_675, + parameter_676, + parameter_677, + parameter_678, + parameter_679, + parameter_680, + parameter_681, + parameter_682, + parameter_683, + parameter_684, + parameter_685, + parameter_686, + parameter_687, + parameter_688, + parameter_689, + parameter_690, + parameter_691, + parameter_692, + parameter_693, + parameter_694, + parameter_695, + parameter_696, + parameter_697, + data_0, + data_1, + data_2, + data_3, + data_4, + data_5, + data_6, + data_7, + data_8, + data_9, + data_10, + data_11, + data_12, + data_13, + data_14, + data_15, + data_16, + data_17, + data_18, + ): + # pd_op.conv2d: (8x32x224x224xf32) <- (8x3x448x448xf32, 32x3x3x3xf32) + conv2d_0 = paddle._C_ops.conv2d( + data_18, parameter_697, [2, 2], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del data_18, parameter_697 + + # pd_op.batch_norm_: (8x32x224x224xf32, 32xf32, 32xf32, 32xf32, 32xf32, -1xui8) <- (8x32x224x224xf32, 32xf32, 32xf32, 32xf32, 32xf32) + ( + batch_norm__0, + batch_norm__1, + batch_norm__2, + batch_norm__3, + batch_norm__4, + batch_norm__5, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_0, + parameter_696, + parameter_695, + parameter_694, + parameter_693, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_693, parameter_694, parameter_695, parameter_696 + + # pd_op.swish: (8x32x224x224xf32) <- (8x32x224x224xf32) + swish_1 = paddle._C_ops.swish(batch_norm__0) + + # pd_op.conv2d: (8x32x224x224xf32) <- (8x32x224x224xf32, 32x32x3x3xf32) + conv2d_1 = paddle._C_ops.conv2d( + swish_1, parameter_692, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_692 + + # pd_op.batch_norm_: (8x32x224x224xf32, 32xf32, 32xf32, 32xf32, 32xf32, -1xui8) <- (8x32x224x224xf32, 32xf32, 32xf32, 32xf32, 32xf32) + ( + batch_norm__6, + batch_norm__7, + batch_norm__8, + batch_norm__9, + batch_norm__10, + batch_norm__11, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_1, + parameter_691, + parameter_690, + parameter_689, + parameter_688, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_688, parameter_689, parameter_690, parameter_691 + + # pd_op.swish: (8x32x224x224xf32) <- (8x32x224x224xf32) + swish_2 = paddle._C_ops.swish(batch_norm__6) + + # pd_op.conv2d: (8x64x224x224xf32) <- (8x32x224x224xf32, 64x32x3x3xf32) + conv2d_2 = paddle._C_ops.conv2d( + swish_2, parameter_687, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_687 + + # pd_op.batch_norm_: (8x64x224x224xf32, 64xf32, 64xf32, 64xf32, 64xf32, -1xui8) <- (8x64x224x224xf32, 64xf32, 64xf32, 64xf32, 64xf32) + ( + batch_norm__12, + batch_norm__13, + batch_norm__14, + batch_norm__15, + batch_norm__16, + batch_norm__17, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_2, + parameter_686, + parameter_685, + parameter_684, + parameter_683, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_683, parameter_684, parameter_685, parameter_686 + + # pd_op.swish: (8x64x224x224xf32) <- (8x64x224x224xf32) + swish_3 = paddle._C_ops.swish(batch_norm__12) + + # pd_op.conv2d: (8x96x112x112xf32) <- (8x64x224x224xf32, 96x64x3x3xf32) + conv2d_3 = paddle._C_ops.conv2d( + swish_3, parameter_682, [2, 2], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_682 + + # pd_op.batch_norm_: (8x96x112x112xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (8x96x112x112xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__18, + batch_norm__19, + batch_norm__20, + batch_norm__21, + batch_norm__22, + batch_norm__23, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_3, + parameter_681, + parameter_680, + parameter_679, + parameter_678, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_678, parameter_679, parameter_680, parameter_681 + + # pd_op.swish: (8x96x112x112xf32) <- (8x96x112x112xf32) + swish_4 = paddle._C_ops.swish(batch_norm__18) + + # pd_op.conv2d: (8x48x112x112xf32) <- (8x96x112x112xf32, 48x96x1x1xf32) + conv2d_4 = paddle._C_ops.conv2d( + swish_4, parameter_677, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_677 + + # pd_op.batch_norm_: (8x48x112x112xf32, 48xf32, 48xf32, 48xf32, 48xf32, -1xui8) <- (8x48x112x112xf32, 48xf32, 48xf32, 48xf32, 48xf32) + ( + batch_norm__24, + batch_norm__25, + batch_norm__26, + batch_norm__27, + batch_norm__28, + batch_norm__29, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_4, + parameter_676, + parameter_675, + parameter_674, + parameter_673, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_673, parameter_674, parameter_675, parameter_676 + + # pd_op.swish: (8x48x112x112xf32) <- (8x48x112x112xf32) + swish_5 = paddle._C_ops.swish(batch_norm__24) + + # pd_op.conv2d: (8x48x112x112xf32) <- (8x96x112x112xf32, 48x96x1x1xf32) + conv2d_5 = paddle._C_ops.conv2d( + swish_4, parameter_672, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_672 + + # pd_op.batch_norm_: (8x48x112x112xf32, 48xf32, 48xf32, 48xf32, 48xf32, -1xui8) <- (8x48x112x112xf32, 48xf32, 48xf32, 48xf32, 48xf32) + ( + batch_norm__30, + batch_norm__31, + batch_norm__32, + batch_norm__33, + batch_norm__34, + batch_norm__35, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_5, + parameter_671, + parameter_670, + parameter_669, + parameter_668, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_668, parameter_669, parameter_670, parameter_671 + + # pd_op.swish: (8x48x112x112xf32) <- (8x48x112x112xf32) + swish_6 = paddle._C_ops.swish(batch_norm__30) + + # pd_op.conv2d: (8x48x112x112xf32) <- (8x48x112x112xf32, 48x48x3x3xf32) + conv2d_6 = paddle._C_ops.conv2d( + swish_6, parameter_667, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_667 + + # pd_op.batch_norm_: (8x48x112x112xf32, 48xf32, 48xf32, 48xf32, 48xf32, -1xui8) <- (8x48x112x112xf32, 48xf32, 48xf32, 48xf32, 48xf32) + ( + batch_norm__36, + batch_norm__37, + batch_norm__38, + batch_norm__39, + batch_norm__40, + batch_norm__41, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_6, + parameter_666, + parameter_665, + parameter_664, + parameter_663, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_663, parameter_664, parameter_665, parameter_666 + + # pd_op.swish: (8x48x112x112xf32) <- (8x48x112x112xf32) + swish_7 = paddle._C_ops.swish(batch_norm__36) + + # pd_op.conv2d: (8x48x112x112xf32) <- (8x48x112x112xf32, 48x48x3x3xf32) + conv2d_7 = paddle._C_ops.conv2d( + swish_7, parameter_662, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_662 + + # pd_op.batch_norm_: (8x48x112x112xf32, 48xf32, 48xf32, 48xf32, 48xf32, -1xui8) <- (8x48x112x112xf32, 48xf32, 48xf32, 48xf32, 48xf32) + ( + batch_norm__42, + batch_norm__43, + batch_norm__44, + batch_norm__45, + batch_norm__46, + batch_norm__47, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_7, + parameter_661, + parameter_660, + parameter_659, + parameter_658, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_658, parameter_659, parameter_660, parameter_661 + + # pd_op.conv2d: (8x48x112x112xf32) <- (8x48x112x112xf32, 48x48x1x1xf32) + conv2d_8 = paddle._C_ops.conv2d( + swish_7, parameter_657, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_657 + + # pd_op.batch_norm_: (8x48x112x112xf32, 48xf32, 48xf32, 48xf32, 48xf32, -1xui8) <- (8x48x112x112xf32, 48xf32, 48xf32, 48xf32, 48xf32) + ( + batch_norm__48, + batch_norm__49, + batch_norm__50, + batch_norm__51, + batch_norm__52, + batch_norm__53, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_8, + parameter_656, + parameter_655, + parameter_654, + parameter_653, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_653, parameter_654, parameter_655, parameter_656 + + # pd_op.multiply: (8x48x112x112xf32) <- (1xf32, 8x48x112x112xf32) + multiply_0 = paddle._C_ops.multiply(data_0, batch_norm__48) + del data_0 + + # pd_op.add: (8x48x112x112xf32) <- (8x48x112x112xf32, 8x48x112x112xf32) + add_0 = paddle._C_ops.add(batch_norm__42, multiply_0) + + # pd_op.swish: (8x48x112x112xf32) <- (8x48x112x112xf32) + swish_8 = paddle._C_ops.swish(add_0) + + # pd_op.add: (8x48x112x112xf32) <- (8x48x112x112xf32, 8x48x112x112xf32) + add_1 = paddle._C_ops.add(swish_6, swish_8) + + # pd_op.conv2d: (8x48x112x112xf32) <- (8x48x112x112xf32, 48x48x3x3xf32) + conv2d_9 = paddle._C_ops.conv2d( + add_1, parameter_652, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_652 + + # pd_op.batch_norm_: (8x48x112x112xf32, 48xf32, 48xf32, 48xf32, 48xf32, -1xui8) <- (8x48x112x112xf32, 48xf32, 48xf32, 48xf32, 48xf32) + ( + batch_norm__54, + batch_norm__55, + batch_norm__56, + batch_norm__57, + batch_norm__58, + batch_norm__59, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_9, + parameter_651, + parameter_650, + parameter_649, + parameter_648, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_648, parameter_649, parameter_650, parameter_651 + + # pd_op.swish: (8x48x112x112xf32) <- (8x48x112x112xf32) + swish_9 = paddle._C_ops.swish(batch_norm__54) + + # pd_op.conv2d: (8x48x112x112xf32) <- (8x48x112x112xf32, 48x48x3x3xf32) + conv2d_10 = paddle._C_ops.conv2d( + swish_9, parameter_647, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_647 + + # pd_op.batch_norm_: (8x48x112x112xf32, 48xf32, 48xf32, 48xf32, 48xf32, -1xui8) <- (8x48x112x112xf32, 48xf32, 48xf32, 48xf32, 48xf32) + ( + batch_norm__60, + batch_norm__61, + batch_norm__62, + batch_norm__63, + batch_norm__64, + batch_norm__65, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_10, + parameter_646, + parameter_645, + parameter_644, + parameter_643, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_643, parameter_644, parameter_645, parameter_646 + + # pd_op.conv2d: (8x48x112x112xf32) <- (8x48x112x112xf32, 48x48x1x1xf32) + conv2d_11 = paddle._C_ops.conv2d( + swish_9, parameter_642, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_642 + + # pd_op.batch_norm_: (8x48x112x112xf32, 48xf32, 48xf32, 48xf32, 48xf32, -1xui8) <- (8x48x112x112xf32, 48xf32, 48xf32, 48xf32, 48xf32) + ( + batch_norm__66, + batch_norm__67, + batch_norm__68, + batch_norm__69, + batch_norm__70, + batch_norm__71, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_11, + parameter_641, + parameter_640, + parameter_639, + parameter_638, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_638, parameter_639, parameter_640, parameter_641 + + # pd_op.multiply: (8x48x112x112xf32) <- (1xf32, 8x48x112x112xf32) + multiply_1 = paddle._C_ops.multiply(data_1, batch_norm__66) + del data_1 + + # pd_op.add: (8x48x112x112xf32) <- (8x48x112x112xf32, 8x48x112x112xf32) + add_2 = paddle._C_ops.add(batch_norm__60, multiply_1) + + # pd_op.swish: (8x48x112x112xf32) <- (8x48x112x112xf32) + swish_10 = paddle._C_ops.swish(add_2) + + # pd_op.add: (8x48x112x112xf32) <- (8x48x112x112xf32, 8x48x112x112xf32) + add_3 = paddle._C_ops.add(add_1, swish_10) + + # pd_op.conv2d: (8x48x112x112xf32) <- (8x48x112x112xf32, 48x48x3x3xf32) + conv2d_12 = paddle._C_ops.conv2d( + add_3, parameter_637, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_637 + + # pd_op.batch_norm_: (8x48x112x112xf32, 48xf32, 48xf32, 48xf32, 48xf32, -1xui8) <- (8x48x112x112xf32, 48xf32, 48xf32, 48xf32, 48xf32) + ( + batch_norm__72, + batch_norm__73, + batch_norm__74, + batch_norm__75, + batch_norm__76, + batch_norm__77, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_12, + parameter_636, + parameter_635, + parameter_634, + parameter_633, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_633, parameter_634, parameter_635, parameter_636 + + # pd_op.swish: (8x48x112x112xf32) <- (8x48x112x112xf32) + swish_11 = paddle._C_ops.swish(batch_norm__72) + + # pd_op.conv2d: (8x48x112x112xf32) <- (8x48x112x112xf32, 48x48x3x3xf32) + conv2d_13 = paddle._C_ops.conv2d( + swish_11, parameter_632, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_632 + + # pd_op.batch_norm_: (8x48x112x112xf32, 48xf32, 48xf32, 48xf32, 48xf32, -1xui8) <- (8x48x112x112xf32, 48xf32, 48xf32, 48xf32, 48xf32) + ( + batch_norm__78, + batch_norm__79, + batch_norm__80, + batch_norm__81, + batch_norm__82, + batch_norm__83, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_13, + parameter_631, + parameter_630, + parameter_629, + parameter_628, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_628, parameter_629, parameter_630, parameter_631 + + # pd_op.conv2d: (8x48x112x112xf32) <- (8x48x112x112xf32, 48x48x1x1xf32) + conv2d_14 = paddle._C_ops.conv2d( + swish_11, parameter_627, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_627 + + # pd_op.batch_norm_: (8x48x112x112xf32, 48xf32, 48xf32, 48xf32, 48xf32, -1xui8) <- (8x48x112x112xf32, 48xf32, 48xf32, 48xf32, 48xf32) + ( + batch_norm__84, + batch_norm__85, + batch_norm__86, + batch_norm__87, + batch_norm__88, + batch_norm__89, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_14, + parameter_626, + parameter_625, + parameter_624, + parameter_623, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_623, parameter_624, parameter_625, parameter_626 + + # pd_op.multiply: (8x48x112x112xf32) <- (1xf32, 8x48x112x112xf32) + multiply_2 = paddle._C_ops.multiply(data_2, batch_norm__84) + del data_2 + + # pd_op.add: (8x48x112x112xf32) <- (8x48x112x112xf32, 8x48x112x112xf32) + add_4 = paddle._C_ops.add(batch_norm__78, multiply_2) + + # pd_op.swish: (8x48x112x112xf32) <- (8x48x112x112xf32) + swish_12 = paddle._C_ops.swish(add_4) + + # pd_op.add: (8x48x112x112xf32) <- (8x48x112x112xf32, 8x48x112x112xf32) + add_5 = paddle._C_ops.add(add_3, swish_12) + + # pd_op.full: (1xi32) <- () + full_0 = paddle._C_ops.full( + [1], float("1"), paddle.int32, paddle.core.CPUPlace() + ) + + # pd_op.assign: (1xi32) <- (1xi32) + assign_0 = full_0 + + # pd_op.assign: (1xi32) <- (1xi32) + assign_1 = full_0 + + # pd_op.assign: (1xi32) <- (1xi32) + assign_2 = full_0 + + # pd_op.assign: (1xi32) <- (1xi32) + assign_3 = full_0 + + # pd_op.assign: (1xi32) <- (1xi32) + assign_4 = full_0 + + # pd_op.assign: (1xi32) <- (1xi32) + assign_5 = full_0 + + # pd_op.assign: (1xi32) <- (1xi32) + assign_6 = full_0 + + # pd_op.assign: (1xi32) <- (1xi32) + assign_7 = full_0 + + # pd_op.assign: (1xi32) <- (1xi32) + assign_8 = full_0 + + # pd_op.assign: (1xi32) <- (1xi32) + assign_9 = full_0 + + # pd_op.assign: (1xi32) <- (1xi32) + assign_10 = full_0 + + # pd_op.assign: (1xi32) <- (1xi32) + assign_11 = full_0 + + # pd_op.assign: (1xi32) <- (1xi32) + assign_12 = full_0 + + # builtin.combine: ([8x48x112x112xf32, 8x48x112x112xf32]) <- (8x48x112x112xf32, 8x48x112x112xf32) + combine_0 = [swish_5, add_5] + + # pd_op.concat: (8x96x112x112xf32) <- ([8x48x112x112xf32, 8x48x112x112xf32], 1xi32) + concat_0 = paddle._C_ops.concat(combine_0, full_0) + del combine_0 + + # pd_op.full_int_array: (2xi64) <- () + full_int_array_0 = [2, 3] + + # pd_op.assign: (2xi64) <- (2xi64) + assign_13 = full_int_array_0 + + # pd_op.assign: (2xi64) <- (2xi64) + assign_14 = full_int_array_0 + + # pd_op.assign: (2xi64) <- (2xi64) + assign_15 = full_int_array_0 + + # pd_op.mean: (8x96x1x1xf32) <- (8x96x112x112xf32, 2xi64) + mean_0 = paddle._C_ops.mean(concat_0, full_int_array_0, True) + + # pd_op.conv2d: (8x96x1x1xf32) <- (8x96x1x1xf32, 96x96x1x1xf32) + conv2d_15 = paddle._C_ops.conv2d( + mean_0, parameter_622, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_622 + + # pd_op.full_int_array: (4xi64) <- () + full_int_array_1 = [1, -1, 1, 1] + + # pd_op.reshape: (1x96x1x1xf32) <- (96xf32, 4xi64) + reshape_0 = paddle._C_ops.reshape(parameter_621, full_int_array_1) + del parameter_621 + + # pd_op.add: (8x96x1x1xf32) <- (8x96x1x1xf32, 1x96x1x1xf32) + add_6 = paddle._C_ops.add(conv2d_15, reshape_0) + + # pd_op.hardsigmoid: (8x96x1x1xf32) <- (8x96x1x1xf32) + hardsigmoid_0 = paddle._C_ops.hardsigmoid( + add_6, float("0.166667"), float("0.5") + ) + del add_6 + + # pd_op.multiply: (8x96x112x112xf32) <- (8x96x112x112xf32, 8x96x1x1xf32) + multiply_3 = paddle._C_ops.multiply(concat_0, hardsigmoid_0) + + # pd_op.conv2d: (8x128x112x112xf32) <- (8x96x112x112xf32, 128x96x1x1xf32) + conv2d_16 = paddle._C_ops.conv2d( + multiply_3, parameter_620, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_620 + + # pd_op.batch_norm_: (8x128x112x112xf32, 128xf32, 128xf32, 128xf32, 128xf32, -1xui8) <- (8x128x112x112xf32, 128xf32, 128xf32, 128xf32, 128xf32) + ( + batch_norm__90, + batch_norm__91, + batch_norm__92, + batch_norm__93, + batch_norm__94, + batch_norm__95, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_16, + parameter_619, + parameter_618, + parameter_617, + parameter_616, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_616, parameter_617, parameter_618, parameter_619 + + # pd_op.swish: (8x128x112x112xf32) <- (8x128x112x112xf32) + swish_13 = paddle._C_ops.swish(batch_norm__90) + + # pd_op.conv2d: (8x192x56x56xf32) <- (8x128x112x112xf32, 192x128x3x3xf32) + conv2d_17 = paddle._C_ops.conv2d( + swish_13, parameter_615, [2, 2], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_615 + + # pd_op.batch_norm_: (8x192x56x56xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (8x192x56x56xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__96, + batch_norm__97, + batch_norm__98, + batch_norm__99, + batch_norm__100, + batch_norm__101, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_17, + parameter_614, + parameter_613, + parameter_612, + parameter_611, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_611, parameter_612, parameter_613, parameter_614 + + # pd_op.swish: (8x192x56x56xf32) <- (8x192x56x56xf32) + swish_14 = paddle._C_ops.swish(batch_norm__96) + + # pd_op.conv2d: (8x96x56x56xf32) <- (8x192x56x56xf32, 96x192x1x1xf32) + conv2d_18 = paddle._C_ops.conv2d( + swish_14, parameter_610, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_610 + + # pd_op.batch_norm_: (8x96x56x56xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (8x96x56x56xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__102, + batch_norm__103, + batch_norm__104, + batch_norm__105, + batch_norm__106, + batch_norm__107, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_18, + parameter_609, + parameter_608, + parameter_607, + parameter_606, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_606, parameter_607, parameter_608, parameter_609 + + # pd_op.swish: (8x96x56x56xf32) <- (8x96x56x56xf32) + swish_15 = paddle._C_ops.swish(batch_norm__102) + + # pd_op.conv2d: (8x96x56x56xf32) <- (8x192x56x56xf32, 96x192x1x1xf32) + conv2d_19 = paddle._C_ops.conv2d( + swish_14, parameter_605, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_605 + + # pd_op.batch_norm_: (8x96x56x56xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (8x96x56x56xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__108, + batch_norm__109, + batch_norm__110, + batch_norm__111, + batch_norm__112, + batch_norm__113, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_19, + parameter_604, + parameter_603, + parameter_602, + parameter_601, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_601, parameter_602, parameter_603, parameter_604 + + # pd_op.swish: (8x96x56x56xf32) <- (8x96x56x56xf32) + swish_16 = paddle._C_ops.swish(batch_norm__108) + + # pd_op.conv2d: (8x96x56x56xf32) <- (8x96x56x56xf32, 96x96x3x3xf32) + conv2d_20 = paddle._C_ops.conv2d( + swish_16, parameter_600, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_600 + + # pd_op.batch_norm_: (8x96x56x56xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (8x96x56x56xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__114, + batch_norm__115, + batch_norm__116, + batch_norm__117, + batch_norm__118, + batch_norm__119, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_20, + parameter_599, + parameter_598, + parameter_597, + parameter_596, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_596, parameter_597, parameter_598, parameter_599 + + # pd_op.swish: (8x96x56x56xf32) <- (8x96x56x56xf32) + swish_17 = paddle._C_ops.swish(batch_norm__114) + + # pd_op.conv2d: (8x96x56x56xf32) <- (8x96x56x56xf32, 96x96x3x3xf32) + conv2d_21 = paddle._C_ops.conv2d( + swish_17, parameter_595, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_595 + + # pd_op.batch_norm_: (8x96x56x56xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (8x96x56x56xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__120, + batch_norm__121, + batch_norm__122, + batch_norm__123, + batch_norm__124, + batch_norm__125, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_21, + parameter_594, + parameter_593, + parameter_592, + parameter_591, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_591, parameter_592, parameter_593, parameter_594 + + # pd_op.conv2d: (8x96x56x56xf32) <- (8x96x56x56xf32, 96x96x1x1xf32) + conv2d_22 = paddle._C_ops.conv2d( + swish_17, parameter_590, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_590 + + # pd_op.batch_norm_: (8x96x56x56xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (8x96x56x56xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__126, + batch_norm__127, + batch_norm__128, + batch_norm__129, + batch_norm__130, + batch_norm__131, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_22, + parameter_589, + parameter_588, + parameter_587, + parameter_586, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_586, parameter_587, parameter_588, parameter_589 + + # pd_op.multiply: (8x96x56x56xf32) <- (1xf32, 8x96x56x56xf32) + multiply_4 = paddle._C_ops.multiply(data_3, batch_norm__126) + del data_3 + + # pd_op.add: (8x96x56x56xf32) <- (8x96x56x56xf32, 8x96x56x56xf32) + add_7 = paddle._C_ops.add(batch_norm__120, multiply_4) + + # pd_op.swish: (8x96x56x56xf32) <- (8x96x56x56xf32) + swish_18 = paddle._C_ops.swish(add_7) + + # pd_op.add: (8x96x56x56xf32) <- (8x96x56x56xf32, 8x96x56x56xf32) + add_8 = paddle._C_ops.add(swish_16, swish_18) + + # pd_op.conv2d: (8x96x56x56xf32) <- (8x96x56x56xf32, 96x96x3x3xf32) + conv2d_23 = paddle._C_ops.conv2d( + add_8, parameter_585, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_585 + + # pd_op.batch_norm_: (8x96x56x56xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (8x96x56x56xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__132, + batch_norm__133, + batch_norm__134, + batch_norm__135, + batch_norm__136, + batch_norm__137, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_23, + parameter_584, + parameter_583, + parameter_582, + parameter_581, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_581, parameter_582, parameter_583, parameter_584 + + # pd_op.swish: (8x96x56x56xf32) <- (8x96x56x56xf32) + swish_19 = paddle._C_ops.swish(batch_norm__132) + + # pd_op.conv2d: (8x96x56x56xf32) <- (8x96x56x56xf32, 96x96x3x3xf32) + conv2d_24 = paddle._C_ops.conv2d( + swish_19, parameter_580, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_580 + + # pd_op.batch_norm_: (8x96x56x56xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (8x96x56x56xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__138, + batch_norm__139, + batch_norm__140, + batch_norm__141, + batch_norm__142, + batch_norm__143, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_24, + parameter_579, + parameter_578, + parameter_577, + parameter_576, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_576, parameter_577, parameter_578, parameter_579 + + # pd_op.conv2d: (8x96x56x56xf32) <- (8x96x56x56xf32, 96x96x1x1xf32) + conv2d_25 = paddle._C_ops.conv2d( + swish_19, parameter_575, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_575 + + # pd_op.batch_norm_: (8x96x56x56xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (8x96x56x56xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__144, + batch_norm__145, + batch_norm__146, + batch_norm__147, + batch_norm__148, + batch_norm__149, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_25, + parameter_574, + parameter_573, + parameter_572, + parameter_571, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_571, parameter_572, parameter_573, parameter_574 + + # pd_op.multiply: (8x96x56x56xf32) <- (1xf32, 8x96x56x56xf32) + multiply_5 = paddle._C_ops.multiply(data_4, batch_norm__144) + del data_4 + + # pd_op.add: (8x96x56x56xf32) <- (8x96x56x56xf32, 8x96x56x56xf32) + add_9 = paddle._C_ops.add(batch_norm__138, multiply_5) + + # pd_op.swish: (8x96x56x56xf32) <- (8x96x56x56xf32) + swish_20 = paddle._C_ops.swish(add_9) + + # pd_op.add: (8x96x56x56xf32) <- (8x96x56x56xf32, 8x96x56x56xf32) + add_10 = paddle._C_ops.add(add_8, swish_20) + + # pd_op.conv2d: (8x96x56x56xf32) <- (8x96x56x56xf32, 96x96x3x3xf32) + conv2d_26 = paddle._C_ops.conv2d( + add_10, parameter_570, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_570 + + # pd_op.batch_norm_: (8x96x56x56xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (8x96x56x56xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__150, + batch_norm__151, + batch_norm__152, + batch_norm__153, + batch_norm__154, + batch_norm__155, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_26, + parameter_569, + parameter_568, + parameter_567, + parameter_566, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_566, parameter_567, parameter_568, parameter_569 + + # pd_op.swish: (8x96x56x56xf32) <- (8x96x56x56xf32) + swish_21 = paddle._C_ops.swish(batch_norm__150) + + # pd_op.conv2d: (8x96x56x56xf32) <- (8x96x56x56xf32, 96x96x3x3xf32) + conv2d_27 = paddle._C_ops.conv2d( + swish_21, parameter_565, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_565 + + # pd_op.batch_norm_: (8x96x56x56xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (8x96x56x56xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__156, + batch_norm__157, + batch_norm__158, + batch_norm__159, + batch_norm__160, + batch_norm__161, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_27, + parameter_564, + parameter_563, + parameter_562, + parameter_561, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_561, parameter_562, parameter_563, parameter_564 + + # pd_op.conv2d: (8x96x56x56xf32) <- (8x96x56x56xf32, 96x96x1x1xf32) + conv2d_28 = paddle._C_ops.conv2d( + swish_21, parameter_560, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_560 + + # pd_op.batch_norm_: (8x96x56x56xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (8x96x56x56xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__162, + batch_norm__163, + batch_norm__164, + batch_norm__165, + batch_norm__166, + batch_norm__167, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_28, + parameter_559, + parameter_558, + parameter_557, + parameter_556, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_556, parameter_557, parameter_558, parameter_559 + + # pd_op.multiply: (8x96x56x56xf32) <- (1xf32, 8x96x56x56xf32) + multiply_6 = paddle._C_ops.multiply(data_5, batch_norm__162) + del data_5 + + # pd_op.add: (8x96x56x56xf32) <- (8x96x56x56xf32, 8x96x56x56xf32) + add_11 = paddle._C_ops.add(batch_norm__156, multiply_6) + + # pd_op.swish: (8x96x56x56xf32) <- (8x96x56x56xf32) + swish_22 = paddle._C_ops.swish(add_11) + + # pd_op.add: (8x96x56x56xf32) <- (8x96x56x56xf32, 8x96x56x56xf32) + add_12 = paddle._C_ops.add(add_10, swish_22) + + # pd_op.conv2d: (8x96x56x56xf32) <- (8x96x56x56xf32, 96x96x3x3xf32) + conv2d_29 = paddle._C_ops.conv2d( + add_12, parameter_555, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_555 + + # pd_op.batch_norm_: (8x96x56x56xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (8x96x56x56xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__168, + batch_norm__169, + batch_norm__170, + batch_norm__171, + batch_norm__172, + batch_norm__173, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_29, + parameter_554, + parameter_553, + parameter_552, + parameter_551, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_551, parameter_552, parameter_553, parameter_554 + + # pd_op.swish: (8x96x56x56xf32) <- (8x96x56x56xf32) + swish_23 = paddle._C_ops.swish(batch_norm__168) + + # pd_op.conv2d: (8x96x56x56xf32) <- (8x96x56x56xf32, 96x96x3x3xf32) + conv2d_30 = paddle._C_ops.conv2d( + swish_23, parameter_550, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_550 + + # pd_op.batch_norm_: (8x96x56x56xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (8x96x56x56xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__174, + batch_norm__175, + batch_norm__176, + batch_norm__177, + batch_norm__178, + batch_norm__179, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_30, + parameter_549, + parameter_548, + parameter_547, + parameter_546, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_546, parameter_547, parameter_548, parameter_549 + + # pd_op.conv2d: (8x96x56x56xf32) <- (8x96x56x56xf32, 96x96x1x1xf32) + conv2d_31 = paddle._C_ops.conv2d( + swish_23, parameter_545, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_545 + + # pd_op.batch_norm_: (8x96x56x56xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (8x96x56x56xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__180, + batch_norm__181, + batch_norm__182, + batch_norm__183, + batch_norm__184, + batch_norm__185, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_31, + parameter_544, + parameter_543, + parameter_542, + parameter_541, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_541, parameter_542, parameter_543, parameter_544 + + # pd_op.multiply: (8x96x56x56xf32) <- (1xf32, 8x96x56x56xf32) + multiply_7 = paddle._C_ops.multiply(data_6, batch_norm__180) + del data_6 + + # pd_op.add: (8x96x56x56xf32) <- (8x96x56x56xf32, 8x96x56x56xf32) + add_13 = paddle._C_ops.add(batch_norm__174, multiply_7) + + # pd_op.swish: (8x96x56x56xf32) <- (8x96x56x56xf32) + swish_24 = paddle._C_ops.swish(add_13) + + # pd_op.add: (8x96x56x56xf32) <- (8x96x56x56xf32, 8x96x56x56xf32) + add_14 = paddle._C_ops.add(add_12, swish_24) + + # pd_op.conv2d: (8x96x56x56xf32) <- (8x96x56x56xf32, 96x96x3x3xf32) + conv2d_32 = paddle._C_ops.conv2d( + add_14, parameter_540, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_540 + + # pd_op.batch_norm_: (8x96x56x56xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (8x96x56x56xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__186, + batch_norm__187, + batch_norm__188, + batch_norm__189, + batch_norm__190, + batch_norm__191, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_32, + parameter_539, + parameter_538, + parameter_537, + parameter_536, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_536, parameter_537, parameter_538, parameter_539 + + # pd_op.swish: (8x96x56x56xf32) <- (8x96x56x56xf32) + swish_25 = paddle._C_ops.swish(batch_norm__186) + + # pd_op.conv2d: (8x96x56x56xf32) <- (8x96x56x56xf32, 96x96x3x3xf32) + conv2d_33 = paddle._C_ops.conv2d( + swish_25, parameter_535, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_535 + + # pd_op.batch_norm_: (8x96x56x56xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (8x96x56x56xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__192, + batch_norm__193, + batch_norm__194, + batch_norm__195, + batch_norm__196, + batch_norm__197, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_33, + parameter_534, + parameter_533, + parameter_532, + parameter_531, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_531, parameter_532, parameter_533, parameter_534 + + # pd_op.conv2d: (8x96x56x56xf32) <- (8x96x56x56xf32, 96x96x1x1xf32) + conv2d_34 = paddle._C_ops.conv2d( + swish_25, parameter_530, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_530 + + # pd_op.batch_norm_: (8x96x56x56xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (8x96x56x56xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__198, + batch_norm__199, + batch_norm__200, + batch_norm__201, + batch_norm__202, + batch_norm__203, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_34, + parameter_529, + parameter_528, + parameter_527, + parameter_526, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_526, parameter_527, parameter_528, parameter_529 + + # pd_op.multiply: (8x96x56x56xf32) <- (1xf32, 8x96x56x56xf32) + multiply_8 = paddle._C_ops.multiply(data_7, batch_norm__198) + del data_7 + + # pd_op.add: (8x96x56x56xf32) <- (8x96x56x56xf32, 8x96x56x56xf32) + add_15 = paddle._C_ops.add(batch_norm__192, multiply_8) + + # pd_op.swish: (8x96x56x56xf32) <- (8x96x56x56xf32) + swish_26 = paddle._C_ops.swish(add_15) + + # pd_op.add: (8x96x56x56xf32) <- (8x96x56x56xf32, 8x96x56x56xf32) + add_16 = paddle._C_ops.add(add_14, swish_26) + + # pd_op.conv2d: (8x96x56x56xf32) <- (8x96x56x56xf32, 96x96x3x3xf32) + conv2d_35 = paddle._C_ops.conv2d( + add_16, parameter_525, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_525 + + # pd_op.batch_norm_: (8x96x56x56xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (8x96x56x56xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__204, + batch_norm__205, + batch_norm__206, + batch_norm__207, + batch_norm__208, + batch_norm__209, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_35, + parameter_524, + parameter_523, + parameter_522, + parameter_521, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_521, parameter_522, parameter_523, parameter_524 + + # pd_op.swish: (8x96x56x56xf32) <- (8x96x56x56xf32) + swish_27 = paddle._C_ops.swish(batch_norm__204) + + # pd_op.conv2d: (8x96x56x56xf32) <- (8x96x56x56xf32, 96x96x3x3xf32) + conv2d_36 = paddle._C_ops.conv2d( + swish_27, parameter_520, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_520 + + # pd_op.batch_norm_: (8x96x56x56xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (8x96x56x56xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__210, + batch_norm__211, + batch_norm__212, + batch_norm__213, + batch_norm__214, + batch_norm__215, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_36, + parameter_519, + parameter_518, + parameter_517, + parameter_516, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_516, parameter_517, parameter_518, parameter_519 + + # pd_op.conv2d: (8x96x56x56xf32) <- (8x96x56x56xf32, 96x96x1x1xf32) + conv2d_37 = paddle._C_ops.conv2d( + swish_27, parameter_515, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_515 + + # pd_op.batch_norm_: (8x96x56x56xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (8x96x56x56xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__216, + batch_norm__217, + batch_norm__218, + batch_norm__219, + batch_norm__220, + batch_norm__221, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_37, + parameter_514, + parameter_513, + parameter_512, + parameter_511, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_511, parameter_512, parameter_513, parameter_514 + + # pd_op.multiply: (8x96x56x56xf32) <- (1xf32, 8x96x56x56xf32) + multiply_9 = paddle._C_ops.multiply(data_8, batch_norm__216) + del data_8 + + # pd_op.add: (8x96x56x56xf32) <- (8x96x56x56xf32, 8x96x56x56xf32) + add_17 = paddle._C_ops.add(batch_norm__210, multiply_9) + + # pd_op.swish: (8x96x56x56xf32) <- (8x96x56x56xf32) + swish_28 = paddle._C_ops.swish(add_17) + + # pd_op.add: (8x96x56x56xf32) <- (8x96x56x56xf32, 8x96x56x56xf32) + add_18 = paddle._C_ops.add(add_16, swish_28) + + # builtin.combine: ([8x96x56x56xf32, 8x96x56x56xf32]) <- (8x96x56x56xf32, 8x96x56x56xf32) + combine_1 = [swish_15, add_18] + + # pd_op.concat: (8x192x56x56xf32) <- ([8x96x56x56xf32, 8x96x56x56xf32], 1xi32) + concat_1 = paddle._C_ops.concat(combine_1, full_0) + del combine_1 + + # pd_op.mean: (8x192x1x1xf32) <- (8x192x56x56xf32, 2xi64) + mean_1 = paddle._C_ops.mean(concat_1, full_int_array_0, True) + + # pd_op.conv2d: (8x192x1x1xf32) <- (8x192x1x1xf32, 192x192x1x1xf32) + conv2d_38 = paddle._C_ops.conv2d( + mean_1, parameter_510, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_510 + + # pd_op.reshape: (1x192x1x1xf32) <- (192xf32, 4xi64) + reshape_1 = paddle._C_ops.reshape(parameter_509, full_int_array_1) + del parameter_509 + + # pd_op.add: (8x192x1x1xf32) <- (8x192x1x1xf32, 1x192x1x1xf32) + add_19 = paddle._C_ops.add(conv2d_38, reshape_1) + + # pd_op.hardsigmoid: (8x192x1x1xf32) <- (8x192x1x1xf32) + hardsigmoid_1 = paddle._C_ops.hardsigmoid( + add_19, float("0.166667"), float("0.5") + ) + del add_19 + + # pd_op.multiply: (8x192x56x56xf32) <- (8x192x56x56xf32, 8x192x1x1xf32) + multiply_10 = paddle._C_ops.multiply(concat_1, hardsigmoid_1) + + # pd_op.conv2d: (8x256x56x56xf32) <- (8x192x56x56xf32, 256x192x1x1xf32) + conv2d_39 = paddle._C_ops.conv2d( + multiply_10, parameter_508, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_508 + + # pd_op.batch_norm_: (8x256x56x56xf32, 256xf32, 256xf32, 256xf32, 256xf32, -1xui8) <- (8x256x56x56xf32, 256xf32, 256xf32, 256xf32, 256xf32) + ( + batch_norm__222, + batch_norm__223, + batch_norm__224, + batch_norm__225, + batch_norm__226, + batch_norm__227, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_39, + parameter_507, + parameter_506, + parameter_505, + parameter_504, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_504, parameter_505, parameter_506, parameter_507 + + # pd_op.swish: (8x256x56x56xf32) <- (8x256x56x56xf32) + swish_29 = paddle._C_ops.swish(batch_norm__222) + + # pd_op.conv2d: (8x384x28x28xf32) <- (8x256x56x56xf32, 384x256x3x3xf32) + conv2d_40 = paddle._C_ops.conv2d( + swish_29, parameter_503, [2, 2], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_503 + + # pd_op.batch_norm_: (8x384x28x28xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (8x384x28x28xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__228, + batch_norm__229, + batch_norm__230, + batch_norm__231, + batch_norm__232, + batch_norm__233, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_40, + parameter_502, + parameter_501, + parameter_500, + parameter_499, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_499, parameter_500, parameter_501, parameter_502 + + # pd_op.swish: (8x384x28x28xf32) <- (8x384x28x28xf32) + swish_30 = paddle._C_ops.swish(batch_norm__228) + + # pd_op.conv2d: (8x192x28x28xf32) <- (8x384x28x28xf32, 192x384x1x1xf32) + conv2d_41 = paddle._C_ops.conv2d( + swish_30, parameter_498, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_498 + + # pd_op.batch_norm_: (8x192x28x28xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (8x192x28x28xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__234, + batch_norm__235, + batch_norm__236, + batch_norm__237, + batch_norm__238, + batch_norm__239, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_41, + parameter_497, + parameter_496, + parameter_495, + parameter_494, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_494, parameter_495, parameter_496, parameter_497 + + # pd_op.swish: (8x192x28x28xf32) <- (8x192x28x28xf32) + swish_31 = paddle._C_ops.swish(batch_norm__234) + + # pd_op.conv2d: (8x192x28x28xf32) <- (8x384x28x28xf32, 192x384x1x1xf32) + conv2d_42 = paddle._C_ops.conv2d( + swish_30, parameter_493, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_493 + + # pd_op.batch_norm_: (8x192x28x28xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (8x192x28x28xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__240, + batch_norm__241, + batch_norm__242, + batch_norm__243, + batch_norm__244, + batch_norm__245, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_42, + parameter_492, + parameter_491, + parameter_490, + parameter_489, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_489, parameter_490, parameter_491, parameter_492 + + # pd_op.swish: (8x192x28x28xf32) <- (8x192x28x28xf32) + swish_32 = paddle._C_ops.swish(batch_norm__240) + + # pd_op.conv2d: (8x192x28x28xf32) <- (8x192x28x28xf32, 192x192x3x3xf32) + conv2d_43 = paddle._C_ops.conv2d( + swish_32, parameter_488, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_488 + + # pd_op.batch_norm_: (8x192x28x28xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (8x192x28x28xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__246, + batch_norm__247, + batch_norm__248, + batch_norm__249, + batch_norm__250, + batch_norm__251, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_43, + parameter_487, + parameter_486, + parameter_485, + parameter_484, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_484, parameter_485, parameter_486, parameter_487 + + # pd_op.swish: (8x192x28x28xf32) <- (8x192x28x28xf32) + swish_33 = paddle._C_ops.swish(batch_norm__246) + + # pd_op.conv2d: (8x192x28x28xf32) <- (8x192x28x28xf32, 192x192x3x3xf32) + conv2d_44 = paddle._C_ops.conv2d( + swish_33, parameter_483, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_483 + + # pd_op.batch_norm_: (8x192x28x28xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (8x192x28x28xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__252, + batch_norm__253, + batch_norm__254, + batch_norm__255, + batch_norm__256, + batch_norm__257, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_44, + parameter_482, + parameter_481, + parameter_480, + parameter_479, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_479, parameter_480, parameter_481, parameter_482 + + # pd_op.conv2d: (8x192x28x28xf32) <- (8x192x28x28xf32, 192x192x1x1xf32) + conv2d_45 = paddle._C_ops.conv2d( + swish_33, parameter_478, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_478 + + # pd_op.batch_norm_: (8x192x28x28xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (8x192x28x28xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__258, + batch_norm__259, + batch_norm__260, + batch_norm__261, + batch_norm__262, + batch_norm__263, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_45, + parameter_477, + parameter_476, + parameter_475, + parameter_474, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_474, parameter_475, parameter_476, parameter_477 + + # pd_op.multiply: (8x192x28x28xf32) <- (1xf32, 8x192x28x28xf32) + multiply_11 = paddle._C_ops.multiply(data_9, batch_norm__258) + del data_9 + + # pd_op.add: (8x192x28x28xf32) <- (8x192x28x28xf32, 8x192x28x28xf32) + add_20 = paddle._C_ops.add(batch_norm__252, multiply_11) + + # pd_op.swish: (8x192x28x28xf32) <- (8x192x28x28xf32) + swish_34 = paddle._C_ops.swish(add_20) + + # pd_op.add: (8x192x28x28xf32) <- (8x192x28x28xf32, 8x192x28x28xf32) + add_21 = paddle._C_ops.add(swish_32, swish_34) + + # pd_op.conv2d: (8x192x28x28xf32) <- (8x192x28x28xf32, 192x192x3x3xf32) + conv2d_46 = paddle._C_ops.conv2d( + add_21, parameter_473, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_473 + + # pd_op.batch_norm_: (8x192x28x28xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (8x192x28x28xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__264, + batch_norm__265, + batch_norm__266, + batch_norm__267, + batch_norm__268, + batch_norm__269, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_46, + parameter_472, + parameter_471, + parameter_470, + parameter_469, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_469, parameter_470, parameter_471, parameter_472 + + # pd_op.swish: (8x192x28x28xf32) <- (8x192x28x28xf32) + swish_35 = paddle._C_ops.swish(batch_norm__264) + + # pd_op.conv2d: (8x192x28x28xf32) <- (8x192x28x28xf32, 192x192x3x3xf32) + conv2d_47 = paddle._C_ops.conv2d( + swish_35, parameter_468, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_468 + + # pd_op.batch_norm_: (8x192x28x28xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (8x192x28x28xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__270, + batch_norm__271, + batch_norm__272, + batch_norm__273, + batch_norm__274, + batch_norm__275, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_47, + parameter_467, + parameter_466, + parameter_465, + parameter_464, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_464, parameter_465, parameter_466, parameter_467 + + # pd_op.conv2d: (8x192x28x28xf32) <- (8x192x28x28xf32, 192x192x1x1xf32) + conv2d_48 = paddle._C_ops.conv2d( + swish_35, parameter_463, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_463 + + # pd_op.batch_norm_: (8x192x28x28xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (8x192x28x28xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__276, + batch_norm__277, + batch_norm__278, + batch_norm__279, + batch_norm__280, + batch_norm__281, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_48, + parameter_462, + parameter_461, + parameter_460, + parameter_459, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_459, parameter_460, parameter_461, parameter_462 + + # pd_op.multiply: (8x192x28x28xf32) <- (1xf32, 8x192x28x28xf32) + multiply_12 = paddle._C_ops.multiply(data_10, batch_norm__276) + del data_10 + + # pd_op.add: (8x192x28x28xf32) <- (8x192x28x28xf32, 8x192x28x28xf32) + add_22 = paddle._C_ops.add(batch_norm__270, multiply_12) + + # pd_op.swish: (8x192x28x28xf32) <- (8x192x28x28xf32) + swish_36 = paddle._C_ops.swish(add_22) + + # pd_op.add: (8x192x28x28xf32) <- (8x192x28x28xf32, 8x192x28x28xf32) + add_23 = paddle._C_ops.add(add_21, swish_36) + + # pd_op.conv2d: (8x192x28x28xf32) <- (8x192x28x28xf32, 192x192x3x3xf32) + conv2d_49 = paddle._C_ops.conv2d( + add_23, parameter_458, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_458 + + # pd_op.batch_norm_: (8x192x28x28xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (8x192x28x28xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__282, + batch_norm__283, + batch_norm__284, + batch_norm__285, + batch_norm__286, + batch_norm__287, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_49, + parameter_457, + parameter_456, + parameter_455, + parameter_454, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_454, parameter_455, parameter_456, parameter_457 + + # pd_op.swish: (8x192x28x28xf32) <- (8x192x28x28xf32) + swish_37 = paddle._C_ops.swish(batch_norm__282) + + # pd_op.conv2d: (8x192x28x28xf32) <- (8x192x28x28xf32, 192x192x3x3xf32) + conv2d_50 = paddle._C_ops.conv2d( + swish_37, parameter_453, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_453 + + # pd_op.batch_norm_: (8x192x28x28xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (8x192x28x28xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__288, + batch_norm__289, + batch_norm__290, + batch_norm__291, + batch_norm__292, + batch_norm__293, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_50, + parameter_452, + parameter_451, + parameter_450, + parameter_449, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_449, parameter_450, parameter_451, parameter_452 + + # pd_op.conv2d: (8x192x28x28xf32) <- (8x192x28x28xf32, 192x192x1x1xf32) + conv2d_51 = paddle._C_ops.conv2d( + swish_37, parameter_448, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_448 + + # pd_op.batch_norm_: (8x192x28x28xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (8x192x28x28xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__294, + batch_norm__295, + batch_norm__296, + batch_norm__297, + batch_norm__298, + batch_norm__299, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_51, + parameter_447, + parameter_446, + parameter_445, + parameter_444, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_444, parameter_445, parameter_446, parameter_447 + + # pd_op.multiply: (8x192x28x28xf32) <- (1xf32, 8x192x28x28xf32) + multiply_13 = paddle._C_ops.multiply(data_11, batch_norm__294) + del data_11 + + # pd_op.add: (8x192x28x28xf32) <- (8x192x28x28xf32, 8x192x28x28xf32) + add_24 = paddle._C_ops.add(batch_norm__288, multiply_13) + + # pd_op.swish: (8x192x28x28xf32) <- (8x192x28x28xf32) + swish_38 = paddle._C_ops.swish(add_24) + + # pd_op.add: (8x192x28x28xf32) <- (8x192x28x28xf32, 8x192x28x28xf32) + add_25 = paddle._C_ops.add(add_23, swish_38) + + # pd_op.conv2d: (8x192x28x28xf32) <- (8x192x28x28xf32, 192x192x3x3xf32) + conv2d_52 = paddle._C_ops.conv2d( + add_25, parameter_443, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_443 + + # pd_op.batch_norm_: (8x192x28x28xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (8x192x28x28xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__300, + batch_norm__301, + batch_norm__302, + batch_norm__303, + batch_norm__304, + batch_norm__305, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_52, + parameter_442, + parameter_441, + parameter_440, + parameter_439, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_439, parameter_440, parameter_441, parameter_442 + + # pd_op.swish: (8x192x28x28xf32) <- (8x192x28x28xf32) + swish_39 = paddle._C_ops.swish(batch_norm__300) + + # pd_op.conv2d: (8x192x28x28xf32) <- (8x192x28x28xf32, 192x192x3x3xf32) + conv2d_53 = paddle._C_ops.conv2d( + swish_39, parameter_438, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_438 + + # pd_op.batch_norm_: (8x192x28x28xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (8x192x28x28xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__306, + batch_norm__307, + batch_norm__308, + batch_norm__309, + batch_norm__310, + batch_norm__311, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_53, + parameter_437, + parameter_436, + parameter_435, + parameter_434, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_434, parameter_435, parameter_436, parameter_437 + + # pd_op.conv2d: (8x192x28x28xf32) <- (8x192x28x28xf32, 192x192x1x1xf32) + conv2d_54 = paddle._C_ops.conv2d( + swish_39, parameter_433, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_433 + + # pd_op.batch_norm_: (8x192x28x28xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (8x192x28x28xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__312, + batch_norm__313, + batch_norm__314, + batch_norm__315, + batch_norm__316, + batch_norm__317, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_54, + parameter_432, + parameter_431, + parameter_430, + parameter_429, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_429, parameter_430, parameter_431, parameter_432 + + # pd_op.multiply: (8x192x28x28xf32) <- (1xf32, 8x192x28x28xf32) + multiply_14 = paddle._C_ops.multiply(data_12, batch_norm__312) + del data_12 + + # pd_op.add: (8x192x28x28xf32) <- (8x192x28x28xf32, 8x192x28x28xf32) + add_26 = paddle._C_ops.add(batch_norm__306, multiply_14) + + # pd_op.swish: (8x192x28x28xf32) <- (8x192x28x28xf32) + swish_40 = paddle._C_ops.swish(add_26) + + # pd_op.add: (8x192x28x28xf32) <- (8x192x28x28xf32, 8x192x28x28xf32) + add_27 = paddle._C_ops.add(add_25, swish_40) + + # pd_op.conv2d: (8x192x28x28xf32) <- (8x192x28x28xf32, 192x192x3x3xf32) + conv2d_55 = paddle._C_ops.conv2d( + add_27, parameter_428, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_428 + + # pd_op.batch_norm_: (8x192x28x28xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (8x192x28x28xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__318, + batch_norm__319, + batch_norm__320, + batch_norm__321, + batch_norm__322, + batch_norm__323, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_55, + parameter_427, + parameter_426, + parameter_425, + parameter_424, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_424, parameter_425, parameter_426, parameter_427 + + # pd_op.swish: (8x192x28x28xf32) <- (8x192x28x28xf32) + swish_41 = paddle._C_ops.swish(batch_norm__318) + + # pd_op.conv2d: (8x192x28x28xf32) <- (8x192x28x28xf32, 192x192x3x3xf32) + conv2d_56 = paddle._C_ops.conv2d( + swish_41, parameter_423, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_423 + + # pd_op.batch_norm_: (8x192x28x28xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (8x192x28x28xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__324, + batch_norm__325, + batch_norm__326, + batch_norm__327, + batch_norm__328, + batch_norm__329, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_56, + parameter_422, + parameter_421, + parameter_420, + parameter_419, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_419, parameter_420, parameter_421, parameter_422 + + # pd_op.conv2d: (8x192x28x28xf32) <- (8x192x28x28xf32, 192x192x1x1xf32) + conv2d_57 = paddle._C_ops.conv2d( + swish_41, parameter_418, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_418 + + # pd_op.batch_norm_: (8x192x28x28xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (8x192x28x28xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__330, + batch_norm__331, + batch_norm__332, + batch_norm__333, + batch_norm__334, + batch_norm__335, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_57, + parameter_417, + parameter_416, + parameter_415, + parameter_414, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_414, parameter_415, parameter_416, parameter_417 + + # pd_op.multiply: (8x192x28x28xf32) <- (1xf32, 8x192x28x28xf32) + multiply_15 = paddle._C_ops.multiply(data_13, batch_norm__330) + del data_13 + + # pd_op.add: (8x192x28x28xf32) <- (8x192x28x28xf32, 8x192x28x28xf32) + add_28 = paddle._C_ops.add(batch_norm__324, multiply_15) + + # pd_op.swish: (8x192x28x28xf32) <- (8x192x28x28xf32) + swish_42 = paddle._C_ops.swish(add_28) + + # pd_op.add: (8x192x28x28xf32) <- (8x192x28x28xf32, 8x192x28x28xf32) + add_29 = paddle._C_ops.add(add_27, swish_42) + + # pd_op.conv2d: (8x192x28x28xf32) <- (8x192x28x28xf32, 192x192x3x3xf32) + conv2d_58 = paddle._C_ops.conv2d( + add_29, parameter_413, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_413 + + # pd_op.batch_norm_: (8x192x28x28xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (8x192x28x28xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__336, + batch_norm__337, + batch_norm__338, + batch_norm__339, + batch_norm__340, + batch_norm__341, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_58, + parameter_412, + parameter_411, + parameter_410, + parameter_409, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_409, parameter_410, parameter_411, parameter_412 + + # pd_op.swish: (8x192x28x28xf32) <- (8x192x28x28xf32) + swish_43 = paddle._C_ops.swish(batch_norm__336) + + # pd_op.conv2d: (8x192x28x28xf32) <- (8x192x28x28xf32, 192x192x3x3xf32) + conv2d_59 = paddle._C_ops.conv2d( + swish_43, parameter_408, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_408 + + # pd_op.batch_norm_: (8x192x28x28xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (8x192x28x28xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__342, + batch_norm__343, + batch_norm__344, + batch_norm__345, + batch_norm__346, + batch_norm__347, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_59, + parameter_407, + parameter_406, + parameter_405, + parameter_404, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_404, parameter_405, parameter_406, parameter_407 + + # pd_op.conv2d: (8x192x28x28xf32) <- (8x192x28x28xf32, 192x192x1x1xf32) + conv2d_60 = paddle._C_ops.conv2d( + swish_43, parameter_403, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_403 + + # pd_op.batch_norm_: (8x192x28x28xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (8x192x28x28xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__348, + batch_norm__349, + batch_norm__350, + batch_norm__351, + batch_norm__352, + batch_norm__353, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_60, + parameter_402, + parameter_401, + parameter_400, + parameter_399, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_399, parameter_400, parameter_401, parameter_402 + + # pd_op.multiply: (8x192x28x28xf32) <- (1xf32, 8x192x28x28xf32) + multiply_16 = paddle._C_ops.multiply(data_14, batch_norm__348) + del data_14 + + # pd_op.add: (8x192x28x28xf32) <- (8x192x28x28xf32, 8x192x28x28xf32) + add_30 = paddle._C_ops.add(batch_norm__342, multiply_16) + + # pd_op.swish: (8x192x28x28xf32) <- (8x192x28x28xf32) + swish_44 = paddle._C_ops.swish(add_30) + + # pd_op.add: (8x192x28x28xf32) <- (8x192x28x28xf32, 8x192x28x28xf32) + add_31 = paddle._C_ops.add(add_29, swish_44) + + # builtin.combine: ([8x192x28x28xf32, 8x192x28x28xf32]) <- (8x192x28x28xf32, 8x192x28x28xf32) + combine_2 = [swish_31, add_31] + + # pd_op.concat: (8x384x28x28xf32) <- ([8x192x28x28xf32, 8x192x28x28xf32], 1xi32) + concat_2 = paddle._C_ops.concat(combine_2, full_0) + del combine_2 + + # pd_op.mean: (8x384x1x1xf32) <- (8x384x28x28xf32, 2xi64) + mean_2 = paddle._C_ops.mean(concat_2, full_int_array_0, True) + + # pd_op.conv2d: (8x384x1x1xf32) <- (8x384x1x1xf32, 384x384x1x1xf32) + conv2d_61 = paddle._C_ops.conv2d( + mean_2, parameter_398, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_398 + + # pd_op.reshape: (1x384x1x1xf32) <- (384xf32, 4xi64) + reshape_2 = paddle._C_ops.reshape(parameter_397, full_int_array_1) + del parameter_397 + + # pd_op.add: (8x384x1x1xf32) <- (8x384x1x1xf32, 1x384x1x1xf32) + add_32 = paddle._C_ops.add(conv2d_61, reshape_2) + + # pd_op.hardsigmoid: (8x384x1x1xf32) <- (8x384x1x1xf32) + hardsigmoid_2 = paddle._C_ops.hardsigmoid( + add_32, float("0.166667"), float("0.5") + ) + del add_32 + + # pd_op.multiply: (8x384x28x28xf32) <- (8x384x28x28xf32, 8x384x1x1xf32) + multiply_17 = paddle._C_ops.multiply(concat_2, hardsigmoid_2) + + # pd_op.conv2d: (8x512x28x28xf32) <- (8x384x28x28xf32, 512x384x1x1xf32) + conv2d_62 = paddle._C_ops.conv2d( + multiply_17, parameter_396, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_396 + + # pd_op.batch_norm_: (8x512x28x28xf32, 512xf32, 512xf32, 512xf32, 512xf32, -1xui8) <- (8x512x28x28xf32, 512xf32, 512xf32, 512xf32, 512xf32) + ( + batch_norm__354, + batch_norm__355, + batch_norm__356, + batch_norm__357, + batch_norm__358, + batch_norm__359, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_62, + parameter_395, + parameter_394, + parameter_393, + parameter_392, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_392, parameter_393, parameter_394, parameter_395 + + # pd_op.swish: (8x512x28x28xf32) <- (8x512x28x28xf32) + swish_45 = paddle._C_ops.swish(batch_norm__354) + + # pd_op.conv2d: (8x768x14x14xf32) <- (8x512x28x28xf32, 768x512x3x3xf32) + conv2d_63 = paddle._C_ops.conv2d( + swish_45, parameter_391, [2, 2], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_391 + + # pd_op.batch_norm_: (8x768x14x14xf32, 768xf32, 768xf32, 768xf32, 768xf32, -1xui8) <- (8x768x14x14xf32, 768xf32, 768xf32, 768xf32, 768xf32) + ( + batch_norm__360, + batch_norm__361, + batch_norm__362, + batch_norm__363, + batch_norm__364, + batch_norm__365, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_63, + parameter_390, + parameter_389, + parameter_388, + parameter_387, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_387, parameter_388, parameter_389, parameter_390 + + # pd_op.swish: (8x768x14x14xf32) <- (8x768x14x14xf32) + swish_46 = paddle._C_ops.swish(batch_norm__360) + + # pd_op.conv2d: (8x384x14x14xf32) <- (8x768x14x14xf32, 384x768x1x1xf32) + conv2d_64 = paddle._C_ops.conv2d( + swish_46, parameter_386, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_386 + + # pd_op.batch_norm_: (8x384x14x14xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (8x384x14x14xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__366, + batch_norm__367, + batch_norm__368, + batch_norm__369, + batch_norm__370, + batch_norm__371, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_64, + parameter_385, + parameter_384, + parameter_383, + parameter_382, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_382, parameter_383, parameter_384, parameter_385 + + # pd_op.swish: (8x384x14x14xf32) <- (8x384x14x14xf32) + swish_47 = paddle._C_ops.swish(batch_norm__366) + + # pd_op.conv2d: (8x384x14x14xf32) <- (8x768x14x14xf32, 384x768x1x1xf32) + conv2d_65 = paddle._C_ops.conv2d( + swish_46, parameter_381, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_381 + + # pd_op.batch_norm_: (8x384x14x14xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (8x384x14x14xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__372, + batch_norm__373, + batch_norm__374, + batch_norm__375, + batch_norm__376, + batch_norm__377, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_65, + parameter_380, + parameter_379, + parameter_378, + parameter_377, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_377, parameter_378, parameter_379, parameter_380 + + # pd_op.swish: (8x384x14x14xf32) <- (8x384x14x14xf32) + swish_48 = paddle._C_ops.swish(batch_norm__372) + + # pd_op.conv2d: (8x384x14x14xf32) <- (8x384x14x14xf32, 384x384x3x3xf32) + conv2d_66 = paddle._C_ops.conv2d( + swish_48, parameter_376, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_376 + + # pd_op.batch_norm_: (8x384x14x14xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (8x384x14x14xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__378, + batch_norm__379, + batch_norm__380, + batch_norm__381, + batch_norm__382, + batch_norm__383, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_66, + parameter_375, + parameter_374, + parameter_373, + parameter_372, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_372, parameter_373, parameter_374, parameter_375 + + # pd_op.swish: (8x384x14x14xf32) <- (8x384x14x14xf32) + swish_49 = paddle._C_ops.swish(batch_norm__378) + + # pd_op.conv2d: (8x384x14x14xf32) <- (8x384x14x14xf32, 384x384x3x3xf32) + conv2d_67 = paddle._C_ops.conv2d( + swish_49, parameter_371, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_371 + + # pd_op.batch_norm_: (8x384x14x14xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (8x384x14x14xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__384, + batch_norm__385, + batch_norm__386, + batch_norm__387, + batch_norm__388, + batch_norm__389, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_67, + parameter_370, + parameter_369, + parameter_368, + parameter_367, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_367, parameter_368, parameter_369, parameter_370 + + # pd_op.conv2d: (8x384x14x14xf32) <- (8x384x14x14xf32, 384x384x1x1xf32) + conv2d_68 = paddle._C_ops.conv2d( + swish_49, parameter_366, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_366 + + # pd_op.batch_norm_: (8x384x14x14xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (8x384x14x14xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__390, + batch_norm__391, + batch_norm__392, + batch_norm__393, + batch_norm__394, + batch_norm__395, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_68, + parameter_365, + parameter_364, + parameter_363, + parameter_362, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_362, parameter_363, parameter_364, parameter_365 + + # pd_op.multiply: (8x384x14x14xf32) <- (1xf32, 8x384x14x14xf32) + multiply_18 = paddle._C_ops.multiply(data_15, batch_norm__390) + del data_15 + + # pd_op.add: (8x384x14x14xf32) <- (8x384x14x14xf32, 8x384x14x14xf32) + add_33 = paddle._C_ops.add(batch_norm__384, multiply_18) + + # pd_op.swish: (8x384x14x14xf32) <- (8x384x14x14xf32) + swish_50 = paddle._C_ops.swish(add_33) + + # pd_op.add: (8x384x14x14xf32) <- (8x384x14x14xf32, 8x384x14x14xf32) + add_34 = paddle._C_ops.add(swish_48, swish_50) + + # pd_op.conv2d: (8x384x14x14xf32) <- (8x384x14x14xf32, 384x384x3x3xf32) + conv2d_69 = paddle._C_ops.conv2d( + add_34, parameter_361, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_361 + + # pd_op.batch_norm_: (8x384x14x14xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (8x384x14x14xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__396, + batch_norm__397, + batch_norm__398, + batch_norm__399, + batch_norm__400, + batch_norm__401, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_69, + parameter_360, + parameter_359, + parameter_358, + parameter_357, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_357, parameter_358, parameter_359, parameter_360 + + # pd_op.swish: (8x384x14x14xf32) <- (8x384x14x14xf32) + swish_51 = paddle._C_ops.swish(batch_norm__396) + + # pd_op.conv2d: (8x384x14x14xf32) <- (8x384x14x14xf32, 384x384x3x3xf32) + conv2d_70 = paddle._C_ops.conv2d( + swish_51, parameter_356, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_356 + + # pd_op.batch_norm_: (8x384x14x14xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (8x384x14x14xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__402, + batch_norm__403, + batch_norm__404, + batch_norm__405, + batch_norm__406, + batch_norm__407, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_70, + parameter_355, + parameter_354, + parameter_353, + parameter_352, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_352, parameter_353, parameter_354, parameter_355 + + # pd_op.conv2d: (8x384x14x14xf32) <- (8x384x14x14xf32, 384x384x1x1xf32) + conv2d_71 = paddle._C_ops.conv2d( + swish_51, parameter_351, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_351 + + # pd_op.batch_norm_: (8x384x14x14xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (8x384x14x14xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__408, + batch_norm__409, + batch_norm__410, + batch_norm__411, + batch_norm__412, + batch_norm__413, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_71, + parameter_350, + parameter_349, + parameter_348, + parameter_347, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_347, parameter_348, parameter_349, parameter_350 + + # pd_op.multiply: (8x384x14x14xf32) <- (1xf32, 8x384x14x14xf32) + multiply_19 = paddle._C_ops.multiply(data_16, batch_norm__408) + del data_16 + + # pd_op.add: (8x384x14x14xf32) <- (8x384x14x14xf32, 8x384x14x14xf32) + add_35 = paddle._C_ops.add(batch_norm__402, multiply_19) + + # pd_op.swish: (8x384x14x14xf32) <- (8x384x14x14xf32) + swish_52 = paddle._C_ops.swish(add_35) + + # pd_op.add: (8x384x14x14xf32) <- (8x384x14x14xf32, 8x384x14x14xf32) + add_36 = paddle._C_ops.add(add_34, swish_52) + + # pd_op.conv2d: (8x384x14x14xf32) <- (8x384x14x14xf32, 384x384x3x3xf32) + conv2d_72 = paddle._C_ops.conv2d( + add_36, parameter_346, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_346 + + # pd_op.batch_norm_: (8x384x14x14xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (8x384x14x14xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__414, + batch_norm__415, + batch_norm__416, + batch_norm__417, + batch_norm__418, + batch_norm__419, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_72, + parameter_345, + parameter_344, + parameter_343, + parameter_342, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_342, parameter_343, parameter_344, parameter_345 + + # pd_op.swish: (8x384x14x14xf32) <- (8x384x14x14xf32) + swish_53 = paddle._C_ops.swish(batch_norm__414) + + # pd_op.conv2d: (8x384x14x14xf32) <- (8x384x14x14xf32, 384x384x3x3xf32) + conv2d_73 = paddle._C_ops.conv2d( + swish_53, parameter_341, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_341 + + # pd_op.batch_norm_: (8x384x14x14xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (8x384x14x14xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__420, + batch_norm__421, + batch_norm__422, + batch_norm__423, + batch_norm__424, + batch_norm__425, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_73, + parameter_340, + parameter_339, + parameter_338, + parameter_337, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_337, parameter_338, parameter_339, parameter_340 + + # pd_op.conv2d: (8x384x14x14xf32) <- (8x384x14x14xf32, 384x384x1x1xf32) + conv2d_74 = paddle._C_ops.conv2d( + swish_53, parameter_336, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_336 + + # pd_op.batch_norm_: (8x384x14x14xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (8x384x14x14xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__426, + batch_norm__427, + batch_norm__428, + batch_norm__429, + batch_norm__430, + batch_norm__431, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_74, + parameter_335, + parameter_334, + parameter_333, + parameter_332, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_332, parameter_333, parameter_334, parameter_335 + + # pd_op.multiply: (8x384x14x14xf32) <- (1xf32, 8x384x14x14xf32) + multiply_20 = paddle._C_ops.multiply(data_17, batch_norm__426) + del data_17 + + # pd_op.add: (8x384x14x14xf32) <- (8x384x14x14xf32, 8x384x14x14xf32) + add_37 = paddle._C_ops.add(batch_norm__420, multiply_20) + + # pd_op.swish: (8x384x14x14xf32) <- (8x384x14x14xf32) + swish_54 = paddle._C_ops.swish(add_37) + + # pd_op.add: (8x384x14x14xf32) <- (8x384x14x14xf32, 8x384x14x14xf32) + add_38 = paddle._C_ops.add(add_36, swish_54) + + # builtin.combine: ([8x384x14x14xf32, 8x384x14x14xf32]) <- (8x384x14x14xf32, 8x384x14x14xf32) + combine_3 = [swish_47, add_38] + + # pd_op.concat: (8x768x14x14xf32) <- ([8x384x14x14xf32, 8x384x14x14xf32], 1xi32) + concat_3 = paddle._C_ops.concat(combine_3, full_0) + del combine_3 + + # pd_op.mean: (8x768x1x1xf32) <- (8x768x14x14xf32, 2xi64) + mean_3 = paddle._C_ops.mean(concat_3, full_int_array_0, True) + + # pd_op.conv2d: (8x768x1x1xf32) <- (8x768x1x1xf32, 768x768x1x1xf32) + conv2d_75 = paddle._C_ops.conv2d( + mean_3, parameter_331, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_331 + + # pd_op.reshape: (1x768x1x1xf32) <- (768xf32, 4xi64) + reshape_3 = paddle._C_ops.reshape(parameter_330, full_int_array_1) + del full_int_array_1, parameter_330 + + # pd_op.add: (8x768x1x1xf32) <- (8x768x1x1xf32, 1x768x1x1xf32) + add_39 = paddle._C_ops.add(conv2d_75, reshape_3) + + # pd_op.hardsigmoid: (8x768x1x1xf32) <- (8x768x1x1xf32) + hardsigmoid_3 = paddle._C_ops.hardsigmoid( + add_39, float("0.166667"), float("0.5") + ) + del add_39 + + # pd_op.multiply: (8x768x14x14xf32) <- (8x768x14x14xf32, 8x768x1x1xf32) + multiply_21 = paddle._C_ops.multiply(concat_3, hardsigmoid_3) + + # pd_op.conv2d: (8x1024x14x14xf32) <- (8x768x14x14xf32, 1024x768x1x1xf32) + conv2d_76 = paddle._C_ops.conv2d( + multiply_21, parameter_329, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_329 + + # pd_op.batch_norm_: (8x1024x14x14xf32, 1024xf32, 1024xf32, 1024xf32, 1024xf32, -1xui8) <- (8x1024x14x14xf32, 1024xf32, 1024xf32, 1024xf32, 1024xf32) + ( + batch_norm__432, + batch_norm__433, + batch_norm__434, + batch_norm__435, + batch_norm__436, + batch_norm__437, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_76, + parameter_328, + parameter_327, + parameter_326, + parameter_325, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_325, parameter_326, parameter_327, parameter_328 + + # pd_op.swish: (8x1024x14x14xf32) <- (8x1024x14x14xf32) + swish_55 = paddle._C_ops.swish(batch_norm__432) + + # pd_op.conv2d: (8x384x14x14xf32) <- (8x1024x14x14xf32, 384x1024x1x1xf32) + conv2d_77 = paddle._C_ops.conv2d( + swish_55, parameter_324, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_324 + + # pd_op.batch_norm_: (8x384x14x14xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (8x384x14x14xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__438, + batch_norm__439, + batch_norm__440, + batch_norm__441, + batch_norm__442, + batch_norm__443, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_77, + parameter_323, + parameter_322, + parameter_321, + parameter_320, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_320, parameter_321, parameter_322, parameter_323 + + # pd_op.swish: (8x384x14x14xf32) <- (8x384x14x14xf32) + swish_56 = paddle._C_ops.swish(batch_norm__438) + + # pd_op.conv2d: (8x384x14x14xf32) <- (8x1024x14x14xf32, 384x1024x1x1xf32) + conv2d_78 = paddle._C_ops.conv2d( + swish_55, parameter_319, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_319 + + # pd_op.batch_norm_: (8x384x14x14xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (8x384x14x14xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__444, + batch_norm__445, + batch_norm__446, + batch_norm__447, + batch_norm__448, + batch_norm__449, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_78, + parameter_318, + parameter_317, + parameter_316, + parameter_315, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_315, parameter_316, parameter_317, parameter_318 + + # pd_op.swish: (8x384x14x14xf32) <- (8x384x14x14xf32) + swish_57 = paddle._C_ops.swish(batch_norm__444) + + # pd_op.conv2d: (8x384x14x14xf32) <- (8x384x14x14xf32, 384x384x3x3xf32) + conv2d_79 = paddle._C_ops.conv2d( + swish_57, parameter_314, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_314 + + # pd_op.batch_norm_: (8x384x14x14xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (8x384x14x14xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__450, + batch_norm__451, + batch_norm__452, + batch_norm__453, + batch_norm__454, + batch_norm__455, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_79, + parameter_313, + parameter_312, + parameter_311, + parameter_310, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_310, parameter_311, parameter_312, parameter_313 + + # pd_op.swish: (8x384x14x14xf32) <- (8x384x14x14xf32) + swish_58 = paddle._C_ops.swish(batch_norm__450) + + # pd_op.conv2d: (8x384x14x14xf32) <- (8x384x14x14xf32, 384x384x3x3xf32) + conv2d_80 = paddle._C_ops.conv2d( + swish_58, parameter_309, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_309 + + # pd_op.batch_norm_: (8x384x14x14xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (8x384x14x14xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__456, + batch_norm__457, + batch_norm__458, + batch_norm__459, + batch_norm__460, + batch_norm__461, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_80, + parameter_308, + parameter_307, + parameter_306, + parameter_305, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_305, parameter_306, parameter_307, parameter_308 + + # pd_op.conv2d: (8x384x14x14xf32) <- (8x384x14x14xf32, 384x384x1x1xf32) + conv2d_81 = paddle._C_ops.conv2d( + swish_58, parameter_304, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_304 + + # pd_op.batch_norm_: (8x384x14x14xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (8x384x14x14xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__462, + batch_norm__463, + batch_norm__464, + batch_norm__465, + batch_norm__466, + batch_norm__467, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_81, + parameter_303, + parameter_302, + parameter_301, + parameter_300, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_300, parameter_301, parameter_302, parameter_303 + + # pd_op.add: (8x384x14x14xf32) <- (8x384x14x14xf32, 8x384x14x14xf32) + add_40 = paddle._C_ops.add(batch_norm__456, batch_norm__462) + + # pd_op.swish: (8x384x14x14xf32) <- (8x384x14x14xf32) + swish_59 = paddle._C_ops.swish(add_40) + + # pd_op.conv2d: (8x384x14x14xf32) <- (8x384x14x14xf32, 384x384x3x3xf32) + conv2d_82 = paddle._C_ops.conv2d( + swish_59, parameter_299, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_299 + + # pd_op.batch_norm_: (8x384x14x14xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (8x384x14x14xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__468, + batch_norm__469, + batch_norm__470, + batch_norm__471, + batch_norm__472, + batch_norm__473, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_82, + parameter_298, + parameter_297, + parameter_296, + parameter_295, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_295, parameter_296, parameter_297, parameter_298 + + # pd_op.swish: (8x384x14x14xf32) <- (8x384x14x14xf32) + swish_60 = paddle._C_ops.swish(batch_norm__468) + + # pd_op.conv2d: (8x384x14x14xf32) <- (8x384x14x14xf32, 384x384x3x3xf32) + conv2d_83 = paddle._C_ops.conv2d( + swish_60, parameter_294, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_294 + + # pd_op.batch_norm_: (8x384x14x14xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (8x384x14x14xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__474, + batch_norm__475, + batch_norm__476, + batch_norm__477, + batch_norm__478, + batch_norm__479, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_83, + parameter_293, + parameter_292, + parameter_291, + parameter_290, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_290, parameter_291, parameter_292, parameter_293 + + # pd_op.conv2d: (8x384x14x14xf32) <- (8x384x14x14xf32, 384x384x1x1xf32) + conv2d_84 = paddle._C_ops.conv2d( + swish_60, parameter_289, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_289 + + # pd_op.batch_norm_: (8x384x14x14xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (8x384x14x14xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__480, + batch_norm__481, + batch_norm__482, + batch_norm__483, + batch_norm__484, + batch_norm__485, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_84, + parameter_288, + parameter_287, + parameter_286, + parameter_285, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_285, parameter_286, parameter_287, parameter_288 + + # pd_op.add: (8x384x14x14xf32) <- (8x384x14x14xf32, 8x384x14x14xf32) + add_41 = paddle._C_ops.add(batch_norm__474, batch_norm__480) + + # pd_op.swish: (8x384x14x14xf32) <- (8x384x14x14xf32) + swish_61 = paddle._C_ops.swish(add_41) + + # pd_op.full_int_array: (2xi64) <- () + full_int_array_2 = [5, 5] + + # pd_op.pool2d: (8x384x14x14xf32) <- (8x384x14x14xf32, 2xi64) + pool2d_0 = paddle._C_ops.pool2d( + swish_61, + full_int_array_2, + [1, 1], + [2, 2], + False, + True, + "NCHW", + "max", + False, + False, + "EXPLICIT", + ) + + # pd_op.full_int_array: (2xi64) <- () + full_int_array_3 = [9, 9] + + # pd_op.pool2d: (8x384x14x14xf32) <- (8x384x14x14xf32, 2xi64) + pool2d_1 = paddle._C_ops.pool2d( + swish_61, + full_int_array_3, + [1, 1], + [4, 4], + False, + True, + "NCHW", + "max", + False, + False, + "EXPLICIT", + ) + + # pd_op.full_int_array: (2xi64) <- () + full_int_array_4 = [13, 13] + + # pd_op.pool2d: (8x384x14x14xf32) <- (8x384x14x14xf32, 2xi64) + pool2d_2 = paddle._C_ops.pool2d( + swish_61, + full_int_array_4, + [1, 1], + [6, 6], + False, + True, + "NCHW", + "max", + False, + False, + "EXPLICIT", + ) + + # builtin.combine: ([8x384x14x14xf32, 8x384x14x14xf32, 8x384x14x14xf32, 8x384x14x14xf32]) <- (8x384x14x14xf32, 8x384x14x14xf32, 8x384x14x14xf32, 8x384x14x14xf32) + combine_4 = [swish_61, pool2d_0, pool2d_1, pool2d_2] + + # pd_op.concat: (8x1536x14x14xf32) <- ([8x384x14x14xf32, 8x384x14x14xf32, 8x384x14x14xf32, 8x384x14x14xf32], 1xi32) + concat_4 = paddle._C_ops.concat(combine_4, full_0) + del combine_4 + + # pd_op.conv2d: (8x384x14x14xf32) <- (8x1536x14x14xf32, 384x1536x1x1xf32) + conv2d_85 = paddle._C_ops.conv2d( + concat_4, parameter_284, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_284 + + # pd_op.batch_norm_: (8x384x14x14xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (8x384x14x14xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__486, + batch_norm__487, + batch_norm__488, + batch_norm__489, + batch_norm__490, + batch_norm__491, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_85, + parameter_283, + parameter_282, + parameter_281, + parameter_280, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_280, parameter_281, parameter_282, parameter_283 + + # pd_op.swish: (8x384x14x14xf32) <- (8x384x14x14xf32) + swish_62 = paddle._C_ops.swish(batch_norm__486) + + # pd_op.conv2d: (8x384x14x14xf32) <- (8x384x14x14xf32, 384x384x3x3xf32) + conv2d_86 = paddle._C_ops.conv2d( + swish_62, parameter_279, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_279 + + # pd_op.batch_norm_: (8x384x14x14xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (8x384x14x14xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__492, + batch_norm__493, + batch_norm__494, + batch_norm__495, + batch_norm__496, + batch_norm__497, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_86, + parameter_278, + parameter_277, + parameter_276, + parameter_275, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_275, parameter_276, parameter_277, parameter_278 + + # pd_op.swish: (8x384x14x14xf32) <- (8x384x14x14xf32) + swish_63 = paddle._C_ops.swish(batch_norm__492) + + # pd_op.conv2d: (8x384x14x14xf32) <- (8x384x14x14xf32, 384x384x3x3xf32) + conv2d_87 = paddle._C_ops.conv2d( + swish_63, parameter_274, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_274 + + # pd_op.batch_norm_: (8x384x14x14xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (8x384x14x14xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__498, + batch_norm__499, + batch_norm__500, + batch_norm__501, + batch_norm__502, + batch_norm__503, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_87, + parameter_273, + parameter_272, + parameter_271, + parameter_270, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_270, parameter_271, parameter_272, parameter_273 + + # pd_op.conv2d: (8x384x14x14xf32) <- (8x384x14x14xf32, 384x384x1x1xf32) + conv2d_88 = paddle._C_ops.conv2d( + swish_63, parameter_269, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_269 + + # pd_op.batch_norm_: (8x384x14x14xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (8x384x14x14xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__504, + batch_norm__505, + batch_norm__506, + batch_norm__507, + batch_norm__508, + batch_norm__509, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_88, + parameter_268, + parameter_267, + parameter_266, + parameter_265, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_265, parameter_266, parameter_267, parameter_268 + + # pd_op.add: (8x384x14x14xf32) <- (8x384x14x14xf32, 8x384x14x14xf32) + add_42 = paddle._C_ops.add(batch_norm__498, batch_norm__504) + + # pd_op.swish: (8x384x14x14xf32) <- (8x384x14x14xf32) + swish_64 = paddle._C_ops.swish(add_42) + + # builtin.combine: ([8x384x14x14xf32, 8x384x14x14xf32]) <- (8x384x14x14xf32, 8x384x14x14xf32) + combine_5 = [swish_56, swish_64] + + # pd_op.concat: (8x768x14x14xf32) <- ([8x384x14x14xf32, 8x384x14x14xf32], 1xi32) + concat_5 = paddle._C_ops.concat(combine_5, full_0) + del combine_5 + + # pd_op.conv2d: (8x768x14x14xf32) <- (8x768x14x14xf32, 768x768x1x1xf32) + conv2d_89 = paddle._C_ops.conv2d( + concat_5, parameter_264, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_264 + + # pd_op.batch_norm_: (8x768x14x14xf32, 768xf32, 768xf32, 768xf32, 768xf32, -1xui8) <- (8x768x14x14xf32, 768xf32, 768xf32, 768xf32, 768xf32) + ( + batch_norm__510, + batch_norm__511, + batch_norm__512, + batch_norm__513, + batch_norm__514, + batch_norm__515, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_89, + parameter_263, + parameter_262, + parameter_261, + parameter_260, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_260, parameter_261, parameter_262, parameter_263 + + # pd_op.swish: (8x768x14x14xf32) <- (8x768x14x14xf32) + swish_65 = paddle._C_ops.swish(batch_norm__510) + + # pd_op.conv2d: (8x384x14x14xf32) <- (8x768x14x14xf32, 384x768x1x1xf32) + conv2d_90 = paddle._C_ops.conv2d( + swish_65, parameter_259, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_259 + + # pd_op.batch_norm_: (8x384x14x14xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (8x384x14x14xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__516, + batch_norm__517, + batch_norm__518, + batch_norm__519, + batch_norm__520, + batch_norm__521, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_90, + parameter_258, + parameter_257, + parameter_256, + parameter_255, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_255, parameter_256, parameter_257, parameter_258 + + # pd_op.swish: (8x384x14x14xf32) <- (8x384x14x14xf32) + swish_66 = paddle._C_ops.swish(batch_norm__516) + + # pd_op.nearest_interp: (8x384x28x28xf32) <- (8x384x14x14xf32, None, None, None) + nearest_interp_0 = paddle._C_ops.nearest_interp( + swish_66, + None, + None, + None, + "NCHW", + -1, + -1, + -1, + [float("2"), float("2")], + "nearest", + False, + 0, + ) + + # builtin.combine: ([8x384x28x28xf32, 8x512x28x28xf32]) <- (8x384x28x28xf32, 8x512x28x28xf32) + combine_6 = [nearest_interp_0, swish_45] + + # pd_op.concat: (8x896x28x28xf32) <- ([8x384x28x28xf32, 8x512x28x28xf32], 1xi32) + concat_6 = paddle._C_ops.concat(combine_6, full_0) + del combine_6 + + # pd_op.conv2d: (8x192x28x28xf32) <- (8x896x28x28xf32, 192x896x1x1xf32) + conv2d_91 = paddle._C_ops.conv2d( + concat_6, parameter_254, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_254 + + # pd_op.batch_norm_: (8x192x28x28xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (8x192x28x28xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__522, + batch_norm__523, + batch_norm__524, + batch_norm__525, + batch_norm__526, + batch_norm__527, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_91, + parameter_253, + parameter_252, + parameter_251, + parameter_250, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_250, parameter_251, parameter_252, parameter_253 + + # pd_op.swish: (8x192x28x28xf32) <- (8x192x28x28xf32) + swish_67 = paddle._C_ops.swish(batch_norm__522) + + # pd_op.conv2d: (8x192x28x28xf32) <- (8x896x28x28xf32, 192x896x1x1xf32) + conv2d_92 = paddle._C_ops.conv2d( + concat_6, parameter_249, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_249 + + # pd_op.batch_norm_: (8x192x28x28xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (8x192x28x28xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__528, + batch_norm__529, + batch_norm__530, + batch_norm__531, + batch_norm__532, + batch_norm__533, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_92, + parameter_248, + parameter_247, + parameter_246, + parameter_245, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_245, parameter_246, parameter_247, parameter_248 + + # pd_op.swish: (8x192x28x28xf32) <- (8x192x28x28xf32) + swish_68 = paddle._C_ops.swish(batch_norm__528) + + # pd_op.conv2d: (8x192x28x28xf32) <- (8x192x28x28xf32, 192x192x3x3xf32) + conv2d_93 = paddle._C_ops.conv2d( + swish_68, parameter_244, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_244 + + # pd_op.batch_norm_: (8x192x28x28xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (8x192x28x28xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__534, + batch_norm__535, + batch_norm__536, + batch_norm__537, + batch_norm__538, + batch_norm__539, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_93, + parameter_243, + parameter_242, + parameter_241, + parameter_240, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_240, parameter_241, parameter_242, parameter_243 + + # pd_op.swish: (8x192x28x28xf32) <- (8x192x28x28xf32) + swish_69 = paddle._C_ops.swish(batch_norm__534) + + # pd_op.conv2d: (8x192x28x28xf32) <- (8x192x28x28xf32, 192x192x3x3xf32) + conv2d_94 = paddle._C_ops.conv2d( + swish_69, parameter_239, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_239 + + # pd_op.batch_norm_: (8x192x28x28xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (8x192x28x28xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__540, + batch_norm__541, + batch_norm__542, + batch_norm__543, + batch_norm__544, + batch_norm__545, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_94, + parameter_238, + parameter_237, + parameter_236, + parameter_235, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_235, parameter_236, parameter_237, parameter_238 + + # pd_op.conv2d: (8x192x28x28xf32) <- (8x192x28x28xf32, 192x192x1x1xf32) + conv2d_95 = paddle._C_ops.conv2d( + swish_69, parameter_234, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_234 + + # pd_op.batch_norm_: (8x192x28x28xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (8x192x28x28xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__546, + batch_norm__547, + batch_norm__548, + batch_norm__549, + batch_norm__550, + batch_norm__551, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_95, + parameter_233, + parameter_232, + parameter_231, + parameter_230, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_230, parameter_231, parameter_232, parameter_233 + + # pd_op.add: (8x192x28x28xf32) <- (8x192x28x28xf32, 8x192x28x28xf32) + add_43 = paddle._C_ops.add(batch_norm__540, batch_norm__546) + + # pd_op.swish: (8x192x28x28xf32) <- (8x192x28x28xf32) + swish_70 = paddle._C_ops.swish(add_43) + + # pd_op.conv2d: (8x192x28x28xf32) <- (8x192x28x28xf32, 192x192x3x3xf32) + conv2d_96 = paddle._C_ops.conv2d( + swish_70, parameter_229, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_229 + + # pd_op.batch_norm_: (8x192x28x28xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (8x192x28x28xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__552, + batch_norm__553, + batch_norm__554, + batch_norm__555, + batch_norm__556, + batch_norm__557, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_96, + parameter_228, + parameter_227, + parameter_226, + parameter_225, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_225, parameter_226, parameter_227, parameter_228 + + # pd_op.swish: (8x192x28x28xf32) <- (8x192x28x28xf32) + swish_71 = paddle._C_ops.swish(batch_norm__552) + + # pd_op.conv2d: (8x192x28x28xf32) <- (8x192x28x28xf32, 192x192x3x3xf32) + conv2d_97 = paddle._C_ops.conv2d( + swish_71, parameter_224, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_224 + + # pd_op.batch_norm_: (8x192x28x28xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (8x192x28x28xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__558, + batch_norm__559, + batch_norm__560, + batch_norm__561, + batch_norm__562, + batch_norm__563, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_97, + parameter_223, + parameter_222, + parameter_221, + parameter_220, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_220, parameter_221, parameter_222, parameter_223 + + # pd_op.conv2d: (8x192x28x28xf32) <- (8x192x28x28xf32, 192x192x1x1xf32) + conv2d_98 = paddle._C_ops.conv2d( + swish_71, parameter_219, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_219 + + # pd_op.batch_norm_: (8x192x28x28xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (8x192x28x28xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__564, + batch_norm__565, + batch_norm__566, + batch_norm__567, + batch_norm__568, + batch_norm__569, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_98, + parameter_218, + parameter_217, + parameter_216, + parameter_215, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_215, parameter_216, parameter_217, parameter_218 + + # pd_op.add: (8x192x28x28xf32) <- (8x192x28x28xf32, 8x192x28x28xf32) + add_44 = paddle._C_ops.add(batch_norm__558, batch_norm__564) + + # pd_op.swish: (8x192x28x28xf32) <- (8x192x28x28xf32) + swish_72 = paddle._C_ops.swish(add_44) + + # pd_op.conv2d: (8x192x28x28xf32) <- (8x192x28x28xf32, 192x192x3x3xf32) + conv2d_99 = paddle._C_ops.conv2d( + swish_72, parameter_214, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_214 + + # pd_op.batch_norm_: (8x192x28x28xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (8x192x28x28xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__570, + batch_norm__571, + batch_norm__572, + batch_norm__573, + batch_norm__574, + batch_norm__575, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_99, + parameter_213, + parameter_212, + parameter_211, + parameter_210, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_210, parameter_211, parameter_212, parameter_213 + + # pd_op.swish: (8x192x28x28xf32) <- (8x192x28x28xf32) + swish_73 = paddle._C_ops.swish(batch_norm__570) + + # pd_op.conv2d: (8x192x28x28xf32) <- (8x192x28x28xf32, 192x192x3x3xf32) + conv2d_100 = paddle._C_ops.conv2d( + swish_73, parameter_209, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_209 + + # pd_op.batch_norm_: (8x192x28x28xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (8x192x28x28xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__576, + batch_norm__577, + batch_norm__578, + batch_norm__579, + batch_norm__580, + batch_norm__581, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_100, + parameter_208, + parameter_207, + parameter_206, + parameter_205, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_205, parameter_206, parameter_207, parameter_208 + + # pd_op.conv2d: (8x192x28x28xf32) <- (8x192x28x28xf32, 192x192x1x1xf32) + conv2d_101 = paddle._C_ops.conv2d( + swish_73, parameter_204, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_204 + + # pd_op.batch_norm_: (8x192x28x28xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (8x192x28x28xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__582, + batch_norm__583, + batch_norm__584, + batch_norm__585, + batch_norm__586, + batch_norm__587, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_101, + parameter_203, + parameter_202, + parameter_201, + parameter_200, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_200, parameter_201, parameter_202, parameter_203 + + # pd_op.add: (8x192x28x28xf32) <- (8x192x28x28xf32, 8x192x28x28xf32) + add_45 = paddle._C_ops.add(batch_norm__576, batch_norm__582) + + # pd_op.swish: (8x192x28x28xf32) <- (8x192x28x28xf32) + swish_74 = paddle._C_ops.swish(add_45) + + # builtin.combine: ([8x192x28x28xf32, 8x192x28x28xf32]) <- (8x192x28x28xf32, 8x192x28x28xf32) + combine_7 = [swish_67, swish_74] + + # pd_op.concat: (8x384x28x28xf32) <- ([8x192x28x28xf32, 8x192x28x28xf32], 1xi32) + concat_7 = paddle._C_ops.concat(combine_7, full_0) + del combine_7 + + # pd_op.conv2d: (8x384x28x28xf32) <- (8x384x28x28xf32, 384x384x1x1xf32) + conv2d_102 = paddle._C_ops.conv2d( + concat_7, parameter_199, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_199 + + # pd_op.batch_norm_: (8x384x28x28xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (8x384x28x28xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__588, + batch_norm__589, + batch_norm__590, + batch_norm__591, + batch_norm__592, + batch_norm__593, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_102, + parameter_198, + parameter_197, + parameter_196, + parameter_195, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_195, parameter_196, parameter_197, parameter_198 + + # pd_op.swish: (8x384x28x28xf32) <- (8x384x28x28xf32) + swish_75 = paddle._C_ops.swish(batch_norm__588) + + # pd_op.conv2d: (8x192x28x28xf32) <- (8x384x28x28xf32, 192x384x1x1xf32) + conv2d_103 = paddle._C_ops.conv2d( + swish_75, parameter_194, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_194 + + # pd_op.batch_norm_: (8x192x28x28xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (8x192x28x28xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__594, + batch_norm__595, + batch_norm__596, + batch_norm__597, + batch_norm__598, + batch_norm__599, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_103, + parameter_193, + parameter_192, + parameter_191, + parameter_190, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_190, parameter_191, parameter_192, parameter_193 + + # pd_op.swish: (8x192x28x28xf32) <- (8x192x28x28xf32) + swish_76 = paddle._C_ops.swish(batch_norm__594) + + # pd_op.nearest_interp: (8x192x56x56xf32) <- (8x192x28x28xf32, None, None, None) + nearest_interp_1 = paddle._C_ops.nearest_interp( + swish_76, + None, + None, + None, + "NCHW", + -1, + -1, + -1, + [float("2"), float("2")], + "nearest", + False, + 0, + ) + + # builtin.combine: ([8x192x56x56xf32, 8x256x56x56xf32]) <- (8x192x56x56xf32, 8x256x56x56xf32) + combine_8 = [nearest_interp_1, swish_29] + + # pd_op.concat: (8x448x56x56xf32) <- ([8x192x56x56xf32, 8x256x56x56xf32], 1xi32) + concat_8 = paddle._C_ops.concat(combine_8, full_0) + del combine_8 + + # pd_op.conv2d: (8x96x56x56xf32) <- (8x448x56x56xf32, 96x448x1x1xf32) + conv2d_104 = paddle._C_ops.conv2d( + concat_8, parameter_189, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_189 + + # pd_op.batch_norm_: (8x96x56x56xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (8x96x56x56xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__600, + batch_norm__601, + batch_norm__602, + batch_norm__603, + batch_norm__604, + batch_norm__605, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_104, + parameter_188, + parameter_187, + parameter_186, + parameter_185, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_185, parameter_186, parameter_187, parameter_188 + + # pd_op.swish: (8x96x56x56xf32) <- (8x96x56x56xf32) + swish_77 = paddle._C_ops.swish(batch_norm__600) + + # pd_op.conv2d: (8x96x56x56xf32) <- (8x448x56x56xf32, 96x448x1x1xf32) + conv2d_105 = paddle._C_ops.conv2d( + concat_8, parameter_184, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_184 + + # pd_op.batch_norm_: (8x96x56x56xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (8x96x56x56xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__606, + batch_norm__607, + batch_norm__608, + batch_norm__609, + batch_norm__610, + batch_norm__611, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_105, + parameter_183, + parameter_182, + parameter_181, + parameter_180, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_180, parameter_181, parameter_182, parameter_183 + + # pd_op.swish: (8x96x56x56xf32) <- (8x96x56x56xf32) + swish_78 = paddle._C_ops.swish(batch_norm__606) + + # pd_op.conv2d: (8x96x56x56xf32) <- (8x96x56x56xf32, 96x96x3x3xf32) + conv2d_106 = paddle._C_ops.conv2d( + swish_78, parameter_179, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_179 + + # pd_op.batch_norm_: (8x96x56x56xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (8x96x56x56xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__612, + batch_norm__613, + batch_norm__614, + batch_norm__615, + batch_norm__616, + batch_norm__617, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_106, + parameter_178, + parameter_177, + parameter_176, + parameter_175, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_175, parameter_176, parameter_177, parameter_178 + + # pd_op.swish: (8x96x56x56xf32) <- (8x96x56x56xf32) + swish_79 = paddle._C_ops.swish(batch_norm__612) + + # pd_op.conv2d: (8x96x56x56xf32) <- (8x96x56x56xf32, 96x96x3x3xf32) + conv2d_107 = paddle._C_ops.conv2d( + swish_79, parameter_174, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_174 + + # pd_op.batch_norm_: (8x96x56x56xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (8x96x56x56xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__618, + batch_norm__619, + batch_norm__620, + batch_norm__621, + batch_norm__622, + batch_norm__623, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_107, + parameter_173, + parameter_172, + parameter_171, + parameter_170, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_170, parameter_171, parameter_172, parameter_173 + + # pd_op.conv2d: (8x96x56x56xf32) <- (8x96x56x56xf32, 96x96x1x1xf32) + conv2d_108 = paddle._C_ops.conv2d( + swish_79, parameter_169, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_169 + + # pd_op.batch_norm_: (8x96x56x56xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (8x96x56x56xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__624, + batch_norm__625, + batch_norm__626, + batch_norm__627, + batch_norm__628, + batch_norm__629, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_108, + parameter_168, + parameter_167, + parameter_166, + parameter_165, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_165, parameter_166, parameter_167, parameter_168 + + # pd_op.add: (8x96x56x56xf32) <- (8x96x56x56xf32, 8x96x56x56xf32) + add_46 = paddle._C_ops.add(batch_norm__618, batch_norm__624) + + # pd_op.swish: (8x96x56x56xf32) <- (8x96x56x56xf32) + swish_80 = paddle._C_ops.swish(add_46) + + # pd_op.conv2d: (8x96x56x56xf32) <- (8x96x56x56xf32, 96x96x3x3xf32) + conv2d_109 = paddle._C_ops.conv2d( + swish_80, parameter_164, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_164 + + # pd_op.batch_norm_: (8x96x56x56xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (8x96x56x56xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__630, + batch_norm__631, + batch_norm__632, + batch_norm__633, + batch_norm__634, + batch_norm__635, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_109, + parameter_163, + parameter_162, + parameter_161, + parameter_160, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_160, parameter_161, parameter_162, parameter_163 + + # pd_op.swish: (8x96x56x56xf32) <- (8x96x56x56xf32) + swish_81 = paddle._C_ops.swish(batch_norm__630) + + # pd_op.conv2d: (8x96x56x56xf32) <- (8x96x56x56xf32, 96x96x3x3xf32) + conv2d_110 = paddle._C_ops.conv2d( + swish_81, parameter_159, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_159 + + # pd_op.batch_norm_: (8x96x56x56xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (8x96x56x56xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__636, + batch_norm__637, + batch_norm__638, + batch_norm__639, + batch_norm__640, + batch_norm__641, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_110, + parameter_158, + parameter_157, + parameter_156, + parameter_155, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_155, parameter_156, parameter_157, parameter_158 + + # pd_op.conv2d: (8x96x56x56xf32) <- (8x96x56x56xf32, 96x96x1x1xf32) + conv2d_111 = paddle._C_ops.conv2d( + swish_81, parameter_154, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_154 + + # pd_op.batch_norm_: (8x96x56x56xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (8x96x56x56xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__642, + batch_norm__643, + batch_norm__644, + batch_norm__645, + batch_norm__646, + batch_norm__647, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_111, + parameter_153, + parameter_152, + parameter_151, + parameter_150, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_150, parameter_151, parameter_152, parameter_153 + + # pd_op.add: (8x96x56x56xf32) <- (8x96x56x56xf32, 8x96x56x56xf32) + add_47 = paddle._C_ops.add(batch_norm__636, batch_norm__642) + + # pd_op.swish: (8x96x56x56xf32) <- (8x96x56x56xf32) + swish_82 = paddle._C_ops.swish(add_47) + + # pd_op.conv2d: (8x96x56x56xf32) <- (8x96x56x56xf32, 96x96x3x3xf32) + conv2d_112 = paddle._C_ops.conv2d( + swish_82, parameter_149, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_149 + + # pd_op.batch_norm_: (8x96x56x56xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (8x96x56x56xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__648, + batch_norm__649, + batch_norm__650, + batch_norm__651, + batch_norm__652, + batch_norm__653, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_112, + parameter_148, + parameter_147, + parameter_146, + parameter_145, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_145, parameter_146, parameter_147, parameter_148 + + # pd_op.swish: (8x96x56x56xf32) <- (8x96x56x56xf32) + swish_83 = paddle._C_ops.swish(batch_norm__648) + + # pd_op.conv2d: (8x96x56x56xf32) <- (8x96x56x56xf32, 96x96x3x3xf32) + conv2d_113 = paddle._C_ops.conv2d( + swish_83, parameter_144, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_144 + + # pd_op.batch_norm_: (8x96x56x56xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (8x96x56x56xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__654, + batch_norm__655, + batch_norm__656, + batch_norm__657, + batch_norm__658, + batch_norm__659, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_113, + parameter_143, + parameter_142, + parameter_141, + parameter_140, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_140, parameter_141, parameter_142, parameter_143 + + # pd_op.conv2d: (8x96x56x56xf32) <- (8x96x56x56xf32, 96x96x1x1xf32) + conv2d_114 = paddle._C_ops.conv2d( + swish_83, parameter_139, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_139 + + # pd_op.batch_norm_: (8x96x56x56xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (8x96x56x56xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__660, + batch_norm__661, + batch_norm__662, + batch_norm__663, + batch_norm__664, + batch_norm__665, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_114, + parameter_138, + parameter_137, + parameter_136, + parameter_135, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_135, parameter_136, parameter_137, parameter_138 + + # pd_op.add: (8x96x56x56xf32) <- (8x96x56x56xf32, 8x96x56x56xf32) + add_48 = paddle._C_ops.add(batch_norm__654, batch_norm__660) + + # pd_op.swish: (8x96x56x56xf32) <- (8x96x56x56xf32) + swish_84 = paddle._C_ops.swish(add_48) + + # builtin.combine: ([8x96x56x56xf32, 8x96x56x56xf32]) <- (8x96x56x56xf32, 8x96x56x56xf32) + combine_9 = [swish_77, swish_84] + + # pd_op.concat: (8x192x56x56xf32) <- ([8x96x56x56xf32, 8x96x56x56xf32], 1xi32) + concat_9 = paddle._C_ops.concat(combine_9, full_0) + del combine_9 + + # pd_op.conv2d: (8x192x56x56xf32) <- (8x192x56x56xf32, 192x192x1x1xf32) + conv2d_115 = paddle._C_ops.conv2d( + concat_9, parameter_134, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_134 + + # pd_op.batch_norm_: (8x192x56x56xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (8x192x56x56xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__666, + batch_norm__667, + batch_norm__668, + batch_norm__669, + batch_norm__670, + batch_norm__671, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_115, + parameter_133, + parameter_132, + parameter_131, + parameter_130, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_130, parameter_131, parameter_132, parameter_133 + + # pd_op.swish: (8x192x56x56xf32) <- (8x192x56x56xf32) + swish_85 = paddle._C_ops.swish(batch_norm__666) + + # pd_op.conv2d: (8x192x28x28xf32) <- (8x192x56x56xf32, 192x192x3x3xf32) + conv2d_116 = paddle._C_ops.conv2d( + swish_85, parameter_129, [2, 2], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_129 + + # pd_op.batch_norm_: (8x192x28x28xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (8x192x28x28xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__672, + batch_norm__673, + batch_norm__674, + batch_norm__675, + batch_norm__676, + batch_norm__677, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_116, + parameter_128, + parameter_127, + parameter_126, + parameter_125, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_125, parameter_126, parameter_127, parameter_128 + + # pd_op.swish: (8x192x28x28xf32) <- (8x192x28x28xf32) + swish_86 = paddle._C_ops.swish(batch_norm__672) + + # builtin.combine: ([8x192x28x28xf32, 8x384x28x28xf32]) <- (8x192x28x28xf32, 8x384x28x28xf32) + combine_10 = [swish_86, swish_75] + + # pd_op.concat: (8x576x28x28xf32) <- ([8x192x28x28xf32, 8x384x28x28xf32], 1xi32) + concat_10 = paddle._C_ops.concat(combine_10, full_0) + del combine_10 + + # pd_op.conv2d: (8x192x28x28xf32) <- (8x576x28x28xf32, 192x576x1x1xf32) + conv2d_117 = paddle._C_ops.conv2d( + concat_10, parameter_124, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_124 + + # pd_op.batch_norm_: (8x192x28x28xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (8x192x28x28xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__678, + batch_norm__679, + batch_norm__680, + batch_norm__681, + batch_norm__682, + batch_norm__683, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_117, + parameter_123, + parameter_122, + parameter_121, + parameter_120, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_120, parameter_121, parameter_122, parameter_123 + + # pd_op.swish: (8x192x28x28xf32) <- (8x192x28x28xf32) + swish_87 = paddle._C_ops.swish(batch_norm__678) + + # pd_op.conv2d: (8x192x28x28xf32) <- (8x576x28x28xf32, 192x576x1x1xf32) + conv2d_118 = paddle._C_ops.conv2d( + concat_10, parameter_119, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_119 + + # pd_op.batch_norm_: (8x192x28x28xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (8x192x28x28xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__684, + batch_norm__685, + batch_norm__686, + batch_norm__687, + batch_norm__688, + batch_norm__689, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_118, + parameter_118, + parameter_117, + parameter_116, + parameter_115, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_115, parameter_116, parameter_117, parameter_118 + + # pd_op.swish: (8x192x28x28xf32) <- (8x192x28x28xf32) + swish_88 = paddle._C_ops.swish(batch_norm__684) + + # pd_op.conv2d: (8x192x28x28xf32) <- (8x192x28x28xf32, 192x192x3x3xf32) + conv2d_119 = paddle._C_ops.conv2d( + swish_88, parameter_114, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_114 + + # pd_op.batch_norm_: (8x192x28x28xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (8x192x28x28xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__690, + batch_norm__691, + batch_norm__692, + batch_norm__693, + batch_norm__694, + batch_norm__695, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_119, + parameter_113, + parameter_112, + parameter_111, + parameter_110, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_110, parameter_111, parameter_112, parameter_113 + + # pd_op.swish: (8x192x28x28xf32) <- (8x192x28x28xf32) + swish_89 = paddle._C_ops.swish(batch_norm__690) + + # pd_op.conv2d: (8x192x28x28xf32) <- (8x192x28x28xf32, 192x192x3x3xf32) + conv2d_120 = paddle._C_ops.conv2d( + swish_89, parameter_109, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_109 + + # pd_op.batch_norm_: (8x192x28x28xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (8x192x28x28xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__696, + batch_norm__697, + batch_norm__698, + batch_norm__699, + batch_norm__700, + batch_norm__701, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_120, + parameter_108, + parameter_107, + parameter_106, + parameter_105, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_105, parameter_106, parameter_107, parameter_108 + + # pd_op.conv2d: (8x192x28x28xf32) <- (8x192x28x28xf32, 192x192x1x1xf32) + conv2d_121 = paddle._C_ops.conv2d( + swish_89, parameter_104, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_104 + + # pd_op.batch_norm_: (8x192x28x28xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (8x192x28x28xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__702, + batch_norm__703, + batch_norm__704, + batch_norm__705, + batch_norm__706, + batch_norm__707, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_121, + parameter_103, + parameter_102, + parameter_101, + parameter_100, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_100, parameter_101, parameter_102, parameter_103 + + # pd_op.add: (8x192x28x28xf32) <- (8x192x28x28xf32, 8x192x28x28xf32) + add_49 = paddle._C_ops.add(batch_norm__696, batch_norm__702) + + # pd_op.swish: (8x192x28x28xf32) <- (8x192x28x28xf32) + swish_90 = paddle._C_ops.swish(add_49) + + # pd_op.conv2d: (8x192x28x28xf32) <- (8x192x28x28xf32, 192x192x3x3xf32) + conv2d_122 = paddle._C_ops.conv2d( + swish_90, parameter_99, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_99 + + # pd_op.batch_norm_: (8x192x28x28xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (8x192x28x28xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__708, + batch_norm__709, + batch_norm__710, + batch_norm__711, + batch_norm__712, + batch_norm__713, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_122, + parameter_98, + parameter_97, + parameter_96, + parameter_95, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_95, parameter_96, parameter_97, parameter_98 + + # pd_op.swish: (8x192x28x28xf32) <- (8x192x28x28xf32) + swish_91 = paddle._C_ops.swish(batch_norm__708) + + # pd_op.conv2d: (8x192x28x28xf32) <- (8x192x28x28xf32, 192x192x3x3xf32) + conv2d_123 = paddle._C_ops.conv2d( + swish_91, parameter_94, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_94 + + # pd_op.batch_norm_: (8x192x28x28xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (8x192x28x28xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__714, + batch_norm__715, + batch_norm__716, + batch_norm__717, + batch_norm__718, + batch_norm__719, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_123, + parameter_93, + parameter_92, + parameter_91, + parameter_90, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_90, parameter_91, parameter_92, parameter_93 + + # pd_op.conv2d: (8x192x28x28xf32) <- (8x192x28x28xf32, 192x192x1x1xf32) + conv2d_124 = paddle._C_ops.conv2d( + swish_91, parameter_89, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_89 + + # pd_op.batch_norm_: (8x192x28x28xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (8x192x28x28xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__720, + batch_norm__721, + batch_norm__722, + batch_norm__723, + batch_norm__724, + batch_norm__725, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_124, + parameter_88, + parameter_87, + parameter_86, + parameter_85, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_85, parameter_86, parameter_87, parameter_88 + + # pd_op.add: (8x192x28x28xf32) <- (8x192x28x28xf32, 8x192x28x28xf32) + add_50 = paddle._C_ops.add(batch_norm__714, batch_norm__720) + + # pd_op.swish: (8x192x28x28xf32) <- (8x192x28x28xf32) + swish_92 = paddle._C_ops.swish(add_50) + + # pd_op.conv2d: (8x192x28x28xf32) <- (8x192x28x28xf32, 192x192x3x3xf32) + conv2d_125 = paddle._C_ops.conv2d( + swish_92, parameter_84, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_84 + + # pd_op.batch_norm_: (8x192x28x28xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (8x192x28x28xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__726, + batch_norm__727, + batch_norm__728, + batch_norm__729, + batch_norm__730, + batch_norm__731, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_125, + parameter_83, + parameter_82, + parameter_81, + parameter_80, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_80, parameter_81, parameter_82, parameter_83 + + # pd_op.swish: (8x192x28x28xf32) <- (8x192x28x28xf32) + swish_93 = paddle._C_ops.swish(batch_norm__726) + + # pd_op.conv2d: (8x192x28x28xf32) <- (8x192x28x28xf32, 192x192x3x3xf32) + conv2d_126 = paddle._C_ops.conv2d( + swish_93, parameter_79, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_79 + + # pd_op.batch_norm_: (8x192x28x28xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (8x192x28x28xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__732, + batch_norm__733, + batch_norm__734, + batch_norm__735, + batch_norm__736, + batch_norm__737, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_126, + parameter_78, + parameter_77, + parameter_76, + parameter_75, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_75, parameter_76, parameter_77, parameter_78 + + # pd_op.conv2d: (8x192x28x28xf32) <- (8x192x28x28xf32, 192x192x1x1xf32) + conv2d_127 = paddle._C_ops.conv2d( + swish_93, parameter_74, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_74 + + # pd_op.batch_norm_: (8x192x28x28xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (8x192x28x28xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__738, + batch_norm__739, + batch_norm__740, + batch_norm__741, + batch_norm__742, + batch_norm__743, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_127, + parameter_73, + parameter_72, + parameter_71, + parameter_70, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_70, parameter_71, parameter_72, parameter_73 + + # pd_op.add: (8x192x28x28xf32) <- (8x192x28x28xf32, 8x192x28x28xf32) + add_51 = paddle._C_ops.add(batch_norm__732, batch_norm__738) + + # pd_op.swish: (8x192x28x28xf32) <- (8x192x28x28xf32) + swish_94 = paddle._C_ops.swish(add_51) + + # builtin.combine: ([8x192x28x28xf32, 8x192x28x28xf32]) <- (8x192x28x28xf32, 8x192x28x28xf32) + combine_11 = [swish_87, swish_94] + + # pd_op.concat: (8x384x28x28xf32) <- ([8x192x28x28xf32, 8x192x28x28xf32], 1xi32) + concat_11 = paddle._C_ops.concat(combine_11, full_0) + del combine_11 + + # pd_op.conv2d: (8x384x28x28xf32) <- (8x384x28x28xf32, 384x384x1x1xf32) + conv2d_128 = paddle._C_ops.conv2d( + concat_11, parameter_69, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_69 + + # pd_op.batch_norm_: (8x384x28x28xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (8x384x28x28xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__744, + batch_norm__745, + batch_norm__746, + batch_norm__747, + batch_norm__748, + batch_norm__749, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_128, + parameter_68, + parameter_67, + parameter_66, + parameter_65, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_65, parameter_66, parameter_67, parameter_68 + + # pd_op.swish: (8x384x28x28xf32) <- (8x384x28x28xf32) + swish_95 = paddle._C_ops.swish(batch_norm__744) + + # pd_op.conv2d: (8x384x14x14xf32) <- (8x384x28x28xf32, 384x384x3x3xf32) + conv2d_129 = paddle._C_ops.conv2d( + swish_95, parameter_64, [2, 2], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_64 + + # pd_op.batch_norm_: (8x384x14x14xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (8x384x14x14xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__750, + batch_norm__751, + batch_norm__752, + batch_norm__753, + batch_norm__754, + batch_norm__755, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_129, + parameter_63, + parameter_62, + parameter_61, + parameter_60, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_60, parameter_61, parameter_62, parameter_63 + + # pd_op.swish: (8x384x14x14xf32) <- (8x384x14x14xf32) + swish_96 = paddle._C_ops.swish(batch_norm__750) + + # builtin.combine: ([8x384x14x14xf32, 8x768x14x14xf32]) <- (8x384x14x14xf32, 8x768x14x14xf32) + combine_12 = [swish_96, swish_65] + + # pd_op.concat: (8x1152x14x14xf32) <- ([8x384x14x14xf32, 8x768x14x14xf32], 1xi32) + concat_12 = paddle._C_ops.concat(combine_12, full_0) + del combine_12 + + # pd_op.conv2d: (8x384x14x14xf32) <- (8x1152x14x14xf32, 384x1152x1x1xf32) + conv2d_130 = paddle._C_ops.conv2d( + concat_12, parameter_59, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_59 + + # pd_op.batch_norm_: (8x384x14x14xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (8x384x14x14xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__756, + batch_norm__757, + batch_norm__758, + batch_norm__759, + batch_norm__760, + batch_norm__761, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_130, + parameter_58, + parameter_57, + parameter_56, + parameter_55, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_55, parameter_56, parameter_57, parameter_58 + + # pd_op.swish: (8x384x14x14xf32) <- (8x384x14x14xf32) + swish_97 = paddle._C_ops.swish(batch_norm__756) + + # pd_op.conv2d: (8x384x14x14xf32) <- (8x1152x14x14xf32, 384x1152x1x1xf32) + conv2d_131 = paddle._C_ops.conv2d( + concat_12, parameter_54, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_54 + + # pd_op.batch_norm_: (8x384x14x14xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (8x384x14x14xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__762, + batch_norm__763, + batch_norm__764, + batch_norm__765, + batch_norm__766, + batch_norm__767, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_131, + parameter_53, + parameter_52, + parameter_51, + parameter_50, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_50, parameter_51, parameter_52, parameter_53 + + # pd_op.swish: (8x384x14x14xf32) <- (8x384x14x14xf32) + swish_98 = paddle._C_ops.swish(batch_norm__762) + + # pd_op.conv2d: (8x384x14x14xf32) <- (8x384x14x14xf32, 384x384x3x3xf32) + conv2d_132 = paddle._C_ops.conv2d( + swish_98, parameter_49, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_49 + + # pd_op.batch_norm_: (8x384x14x14xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (8x384x14x14xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__768, + batch_norm__769, + batch_norm__770, + batch_norm__771, + batch_norm__772, + batch_norm__773, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_132, + parameter_48, + parameter_47, + parameter_46, + parameter_45, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_45, parameter_46, parameter_47, parameter_48 + + # pd_op.swish: (8x384x14x14xf32) <- (8x384x14x14xf32) + swish_99 = paddle._C_ops.swish(batch_norm__768) + + # pd_op.conv2d: (8x384x14x14xf32) <- (8x384x14x14xf32, 384x384x3x3xf32) + conv2d_133 = paddle._C_ops.conv2d( + swish_99, parameter_44, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_44 + + # pd_op.batch_norm_: (8x384x14x14xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (8x384x14x14xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__774, + batch_norm__775, + batch_norm__776, + batch_norm__777, + batch_norm__778, + batch_norm__779, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_133, + parameter_43, + parameter_42, + parameter_41, + parameter_40, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_40, parameter_41, parameter_42, parameter_43 + + # pd_op.conv2d: (8x384x14x14xf32) <- (8x384x14x14xf32, 384x384x1x1xf32) + conv2d_134 = paddle._C_ops.conv2d( + swish_99, parameter_39, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_39 + + # pd_op.batch_norm_: (8x384x14x14xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (8x384x14x14xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__780, + batch_norm__781, + batch_norm__782, + batch_norm__783, + batch_norm__784, + batch_norm__785, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_134, + parameter_38, + parameter_37, + parameter_36, + parameter_35, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_35, parameter_36, parameter_37, parameter_38 + + # pd_op.add: (8x384x14x14xf32) <- (8x384x14x14xf32, 8x384x14x14xf32) + add_52 = paddle._C_ops.add(batch_norm__774, batch_norm__780) + + # pd_op.swish: (8x384x14x14xf32) <- (8x384x14x14xf32) + swish_100 = paddle._C_ops.swish(add_52) + + # pd_op.conv2d: (8x384x14x14xf32) <- (8x384x14x14xf32, 384x384x3x3xf32) + conv2d_135 = paddle._C_ops.conv2d( + swish_100, parameter_34, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_34 + + # pd_op.batch_norm_: (8x384x14x14xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (8x384x14x14xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__786, + batch_norm__787, + batch_norm__788, + batch_norm__789, + batch_norm__790, + batch_norm__791, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_135, + parameter_33, + parameter_32, + parameter_31, + parameter_30, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_30, parameter_31, parameter_32, parameter_33 + + # pd_op.swish: (8x384x14x14xf32) <- (8x384x14x14xf32) + swish_101 = paddle._C_ops.swish(batch_norm__786) + + # pd_op.conv2d: (8x384x14x14xf32) <- (8x384x14x14xf32, 384x384x3x3xf32) + conv2d_136 = paddle._C_ops.conv2d( + swish_101, parameter_29, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_29 + + # pd_op.batch_norm_: (8x384x14x14xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (8x384x14x14xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__792, + batch_norm__793, + batch_norm__794, + batch_norm__795, + batch_norm__796, + batch_norm__797, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_136, + parameter_28, + parameter_27, + parameter_26, + parameter_25, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_25, parameter_26, parameter_27, parameter_28 + + # pd_op.conv2d: (8x384x14x14xf32) <- (8x384x14x14xf32, 384x384x1x1xf32) + conv2d_137 = paddle._C_ops.conv2d( + swish_101, parameter_24, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_24 + + # pd_op.batch_norm_: (8x384x14x14xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (8x384x14x14xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__798, + batch_norm__799, + batch_norm__800, + batch_norm__801, + batch_norm__802, + batch_norm__803, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_137, + parameter_23, + parameter_22, + parameter_21, + parameter_20, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_20, parameter_21, parameter_22, parameter_23 + + # pd_op.add: (8x384x14x14xf32) <- (8x384x14x14xf32, 8x384x14x14xf32) + add_53 = paddle._C_ops.add(batch_norm__792, batch_norm__798) + + # pd_op.swish: (8x384x14x14xf32) <- (8x384x14x14xf32) + swish_102 = paddle._C_ops.swish(add_53) + + # pd_op.conv2d: (8x384x14x14xf32) <- (8x384x14x14xf32, 384x384x3x3xf32) + conv2d_138 = paddle._C_ops.conv2d( + swish_102, parameter_19, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_19 + + # pd_op.batch_norm_: (8x384x14x14xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (8x384x14x14xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__804, + batch_norm__805, + batch_norm__806, + batch_norm__807, + batch_norm__808, + batch_norm__809, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_138, + parameter_18, + parameter_17, + parameter_16, + parameter_15, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_15, parameter_16, parameter_17, parameter_18 + + # pd_op.swish: (8x384x14x14xf32) <- (8x384x14x14xf32) + swish_103 = paddle._C_ops.swish(batch_norm__804) + + # pd_op.conv2d: (8x384x14x14xf32) <- (8x384x14x14xf32, 384x384x3x3xf32) + conv2d_139 = paddle._C_ops.conv2d( + swish_103, parameter_14, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_14 + + # pd_op.batch_norm_: (8x384x14x14xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (8x384x14x14xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__810, + batch_norm__811, + batch_norm__812, + batch_norm__813, + batch_norm__814, + batch_norm__815, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_139, + parameter_13, + parameter_12, + parameter_11, + parameter_10, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_10, parameter_11, parameter_12, parameter_13 + + # pd_op.conv2d: (8x384x14x14xf32) <- (8x384x14x14xf32, 384x384x1x1xf32) + conv2d_140 = paddle._C_ops.conv2d( + swish_103, parameter_9, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_9 + + # pd_op.batch_norm_: (8x384x14x14xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (8x384x14x14xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__816, + batch_norm__817, + batch_norm__818, + batch_norm__819, + batch_norm__820, + batch_norm__821, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_140, + parameter_8, + parameter_7, + parameter_6, + parameter_5, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_5, parameter_6, parameter_7, parameter_8 + + # pd_op.add: (8x384x14x14xf32) <- (8x384x14x14xf32, 8x384x14x14xf32) + add_54 = paddle._C_ops.add(batch_norm__810, batch_norm__816) + + # pd_op.swish: (8x384x14x14xf32) <- (8x384x14x14xf32) + swish_104 = paddle._C_ops.swish(add_54) + + # builtin.combine: ([8x384x14x14xf32, 8x384x14x14xf32]) <- (8x384x14x14xf32, 8x384x14x14xf32) + combine_13 = [swish_97, swish_104] + + # pd_op.concat: (8x768x14x14xf32) <- ([8x384x14x14xf32, 8x384x14x14xf32], 1xi32) + concat_13 = paddle._C_ops.concat(combine_13, full_0) + del combine_13 + + # pd_op.conv2d: (8x768x14x14xf32) <- (8x768x14x14xf32, 768x768x1x1xf32) + conv2d_141 = paddle._C_ops.conv2d( + concat_13, parameter_4, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_4 + + # pd_op.batch_norm_: (8x768x14x14xf32, 768xf32, 768xf32, 768xf32, 768xf32, -1xui8) <- (8x768x14x14xf32, 768xf32, 768xf32, 768xf32, 768xf32) + ( + batch_norm__822, + batch_norm__823, + batch_norm__824, + batch_norm__825, + batch_norm__826, + batch_norm__827, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_141, + parameter_3, + parameter_2, + parameter_1, + parameter_0, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_0, parameter_1, parameter_2, parameter_3 + + # pd_op.swish: (8x768x14x14xf32) <- (8x768x14x14xf32) + swish_0 = paddle._C_ops.swish(batch_norm__822) + del ( + add_0, + add_1, + add_10, + add_11, + add_12, + add_13, + add_14, + add_15, + add_16, + add_17, + add_18, + add_2, + add_20, + add_21, + add_22, + add_23, + add_24, + add_25, + add_26, + add_27, + add_28, + add_29, + add_3, + add_30, + add_31, + add_33, + add_34, + add_35, + add_36, + add_37, + add_38, + add_4, + add_40, + add_41, + add_42, + add_43, + add_44, + add_45, + add_46, + add_47, + add_48, + add_49, + add_5, + add_50, + add_51, + add_52, + add_53, + add_54, + add_7, + add_8, + add_9, + assign_0, + assign_1, + assign_10, + assign_11, + assign_12, + assign_13, + assign_14, + assign_15, + assign_2, + assign_3, + assign_4, + assign_5, + assign_6, + assign_7, + assign_8, + assign_9, + batch_norm__0, + batch_norm__1, + batch_norm__10, + batch_norm__100, + batch_norm__101, + batch_norm__102, + batch_norm__103, + batch_norm__104, + batch_norm__105, + batch_norm__106, + batch_norm__107, + batch_norm__108, + batch_norm__109, + batch_norm__11, + batch_norm__110, + batch_norm__111, + batch_norm__112, + batch_norm__113, + batch_norm__114, + batch_norm__115, + batch_norm__116, + batch_norm__117, + batch_norm__118, + batch_norm__119, + batch_norm__12, + batch_norm__120, + batch_norm__121, + batch_norm__122, + batch_norm__123, + batch_norm__124, + batch_norm__125, + batch_norm__126, + batch_norm__127, + batch_norm__128, + batch_norm__129, + batch_norm__13, + batch_norm__130, + batch_norm__131, + batch_norm__132, + batch_norm__133, + batch_norm__134, + batch_norm__135, + batch_norm__136, + batch_norm__137, + batch_norm__138, + batch_norm__139, + batch_norm__14, + batch_norm__140, + batch_norm__141, + batch_norm__142, + batch_norm__143, + batch_norm__144, + batch_norm__145, + batch_norm__146, + batch_norm__147, + batch_norm__148, + batch_norm__149, + batch_norm__15, + batch_norm__150, + batch_norm__151, + batch_norm__152, + batch_norm__153, + batch_norm__154, + batch_norm__155, + batch_norm__156, + batch_norm__157, + batch_norm__158, + batch_norm__159, + batch_norm__16, + batch_norm__160, + batch_norm__161, + batch_norm__162, + batch_norm__163, + batch_norm__164, + batch_norm__165, + batch_norm__166, + batch_norm__167, + batch_norm__168, + batch_norm__169, + batch_norm__17, + batch_norm__170, + batch_norm__171, + batch_norm__172, + batch_norm__173, + batch_norm__174, + batch_norm__175, + batch_norm__176, + batch_norm__177, + batch_norm__178, + batch_norm__179, + batch_norm__18, + batch_norm__180, + batch_norm__181, + batch_norm__182, + batch_norm__183, + batch_norm__184, + batch_norm__185, + batch_norm__186, + batch_norm__187, + batch_norm__188, + batch_norm__189, + batch_norm__19, + batch_norm__190, + batch_norm__191, + batch_norm__192, + batch_norm__193, + batch_norm__194, + batch_norm__195, + batch_norm__196, + batch_norm__197, + batch_norm__198, + batch_norm__199, + batch_norm__2, + batch_norm__20, + batch_norm__200, + batch_norm__201, + batch_norm__202, + batch_norm__203, + batch_norm__204, + batch_norm__205, + batch_norm__206, + batch_norm__207, + batch_norm__208, + batch_norm__209, + batch_norm__21, + batch_norm__210, + batch_norm__211, + batch_norm__212, + batch_norm__213, + batch_norm__214, + batch_norm__215, + batch_norm__216, + batch_norm__217, + batch_norm__218, + batch_norm__219, + batch_norm__22, + batch_norm__220, + batch_norm__221, + batch_norm__222, + batch_norm__223, + batch_norm__224, + batch_norm__225, + batch_norm__226, + batch_norm__227, + batch_norm__228, + batch_norm__229, + batch_norm__23, + batch_norm__230, + batch_norm__231, + batch_norm__232, + batch_norm__233, + batch_norm__234, + batch_norm__235, + batch_norm__236, + batch_norm__237, + batch_norm__238, + batch_norm__239, + batch_norm__24, + batch_norm__240, + batch_norm__241, + batch_norm__242, + batch_norm__243, + batch_norm__244, + batch_norm__245, + batch_norm__246, + batch_norm__247, + batch_norm__248, + batch_norm__249, + batch_norm__25, + batch_norm__250, + batch_norm__251, + batch_norm__252, + batch_norm__253, + batch_norm__254, + batch_norm__255, + batch_norm__256, + batch_norm__257, + batch_norm__258, + batch_norm__259, + batch_norm__26, + batch_norm__260, + batch_norm__261, + batch_norm__262, + batch_norm__263, + batch_norm__264, + batch_norm__265, + batch_norm__266, + batch_norm__267, + batch_norm__268, + batch_norm__269, + batch_norm__27, + batch_norm__270, + batch_norm__271, + batch_norm__272, + batch_norm__273, + batch_norm__274, + batch_norm__275, + batch_norm__276, + batch_norm__277, + batch_norm__278, + batch_norm__279, + batch_norm__28, + batch_norm__280, + batch_norm__281, + batch_norm__282, + batch_norm__283, + batch_norm__284, + batch_norm__285, + batch_norm__286, + batch_norm__287, + batch_norm__288, + batch_norm__289, + batch_norm__29, + batch_norm__290, + batch_norm__291, + batch_norm__292, + batch_norm__293, + batch_norm__294, + batch_norm__295, + batch_norm__296, + batch_norm__297, + batch_norm__298, + batch_norm__299, + batch_norm__3, + batch_norm__30, + batch_norm__300, + batch_norm__301, + batch_norm__302, + batch_norm__303, + batch_norm__304, + batch_norm__305, + batch_norm__306, + batch_norm__307, + batch_norm__308, + batch_norm__309, + batch_norm__31, + batch_norm__310, + batch_norm__311, + batch_norm__312, + batch_norm__313, + batch_norm__314, + batch_norm__315, + batch_norm__316, + batch_norm__317, + batch_norm__318, + batch_norm__319, + batch_norm__32, + batch_norm__320, + batch_norm__321, + batch_norm__322, + batch_norm__323, + batch_norm__324, + batch_norm__325, + batch_norm__326, + batch_norm__327, + batch_norm__328, + batch_norm__329, + batch_norm__33, + batch_norm__330, + batch_norm__331, + batch_norm__332, + batch_norm__333, + batch_norm__334, + batch_norm__335, + batch_norm__336, + batch_norm__337, + batch_norm__338, + batch_norm__339, + batch_norm__34, + batch_norm__340, + batch_norm__341, + batch_norm__342, + batch_norm__343, + batch_norm__344, + batch_norm__345, + batch_norm__346, + batch_norm__347, + batch_norm__348, + batch_norm__349, + batch_norm__35, + batch_norm__350, + batch_norm__351, + batch_norm__352, + batch_norm__353, + batch_norm__354, + batch_norm__355, + batch_norm__356, + batch_norm__357, + batch_norm__358, + batch_norm__359, + batch_norm__36, + batch_norm__360, + batch_norm__361, + batch_norm__362, + batch_norm__363, + batch_norm__364, + batch_norm__365, + batch_norm__366, + batch_norm__367, + batch_norm__368, + batch_norm__369, + batch_norm__37, + batch_norm__370, + batch_norm__371, + batch_norm__372, + batch_norm__373, + batch_norm__374, + batch_norm__375, + batch_norm__376, + batch_norm__377, + batch_norm__378, + batch_norm__379, + batch_norm__38, + batch_norm__380, + batch_norm__381, + batch_norm__382, + batch_norm__383, + batch_norm__384, + batch_norm__385, + batch_norm__386, + batch_norm__387, + batch_norm__388, + batch_norm__389, + batch_norm__39, + batch_norm__390, + batch_norm__391, + batch_norm__392, + batch_norm__393, + batch_norm__394, + batch_norm__395, + batch_norm__396, + batch_norm__397, + batch_norm__398, + batch_norm__399, + batch_norm__4, + batch_norm__40, + batch_norm__400, + batch_norm__401, + batch_norm__402, + batch_norm__403, + batch_norm__404, + batch_norm__405, + batch_norm__406, + batch_norm__407, + batch_norm__408, + batch_norm__409, + batch_norm__41, + batch_norm__410, + batch_norm__411, + batch_norm__412, + batch_norm__413, + batch_norm__414, + batch_norm__415, + batch_norm__416, + batch_norm__417, + batch_norm__418, + batch_norm__419, + batch_norm__42, + batch_norm__420, + batch_norm__421, + batch_norm__422, + batch_norm__423, + batch_norm__424, + batch_norm__425, + batch_norm__426, + batch_norm__427, + batch_norm__428, + batch_norm__429, + batch_norm__43, + batch_norm__430, + batch_norm__431, + batch_norm__432, + batch_norm__433, + batch_norm__434, + batch_norm__435, + batch_norm__436, + batch_norm__437, + batch_norm__438, + batch_norm__439, + batch_norm__44, + batch_norm__440, + batch_norm__441, + batch_norm__442, + batch_norm__443, + batch_norm__444, + batch_norm__445, + batch_norm__446, + batch_norm__447, + batch_norm__448, + batch_norm__449, + batch_norm__45, + batch_norm__450, + batch_norm__451, + batch_norm__452, + batch_norm__453, + batch_norm__454, + batch_norm__455, + batch_norm__456, + batch_norm__457, + batch_norm__458, + batch_norm__459, + batch_norm__46, + batch_norm__460, + batch_norm__461, + batch_norm__462, + batch_norm__463, + batch_norm__464, + batch_norm__465, + batch_norm__466, + batch_norm__467, + batch_norm__468, + batch_norm__469, + batch_norm__47, + batch_norm__470, + batch_norm__471, + batch_norm__472, + batch_norm__473, + batch_norm__474, + batch_norm__475, + batch_norm__476, + batch_norm__477, + batch_norm__478, + batch_norm__479, + batch_norm__48, + batch_norm__480, + batch_norm__481, + batch_norm__482, + batch_norm__483, + batch_norm__484, + batch_norm__485, + batch_norm__486, + batch_norm__487, + batch_norm__488, + batch_norm__489, + batch_norm__49, + batch_norm__490, + batch_norm__491, + batch_norm__492, + batch_norm__493, + batch_norm__494, + batch_norm__495, + batch_norm__496, + batch_norm__497, + batch_norm__498, + batch_norm__499, + batch_norm__5, + batch_norm__50, + batch_norm__500, + batch_norm__501, + batch_norm__502, + batch_norm__503, + batch_norm__504, + batch_norm__505, + batch_norm__506, + batch_norm__507, + batch_norm__508, + batch_norm__509, + batch_norm__51, + batch_norm__510, + batch_norm__511, + batch_norm__512, + batch_norm__513, + batch_norm__514, + batch_norm__515, + batch_norm__516, + batch_norm__517, + batch_norm__518, + batch_norm__519, + batch_norm__52, + batch_norm__520, + batch_norm__521, + batch_norm__522, + batch_norm__523, + batch_norm__524, + batch_norm__525, + batch_norm__526, + batch_norm__527, + batch_norm__528, + batch_norm__529, + batch_norm__53, + batch_norm__530, + batch_norm__531, + batch_norm__532, + batch_norm__533, + batch_norm__534, + batch_norm__535, + batch_norm__536, + batch_norm__537, + batch_norm__538, + batch_norm__539, + batch_norm__54, + batch_norm__540, + batch_norm__541, + batch_norm__542, + batch_norm__543, + batch_norm__544, + batch_norm__545, + batch_norm__546, + batch_norm__547, + batch_norm__548, + batch_norm__549, + batch_norm__55, + batch_norm__550, + batch_norm__551, + batch_norm__552, + batch_norm__553, + batch_norm__554, + batch_norm__555, + batch_norm__556, + batch_norm__557, + batch_norm__558, + batch_norm__559, + batch_norm__56, + batch_norm__560, + batch_norm__561, + batch_norm__562, + batch_norm__563, + batch_norm__564, + batch_norm__565, + batch_norm__566, + batch_norm__567, + batch_norm__568, + batch_norm__569, + batch_norm__57, + batch_norm__570, + batch_norm__571, + batch_norm__572, + batch_norm__573, + batch_norm__574, + batch_norm__575, + batch_norm__576, + batch_norm__577, + batch_norm__578, + batch_norm__579, + batch_norm__58, + batch_norm__580, + batch_norm__581, + batch_norm__582, + batch_norm__583, + batch_norm__584, + batch_norm__585, + batch_norm__586, + batch_norm__587, + batch_norm__588, + batch_norm__589, + batch_norm__59, + batch_norm__590, + batch_norm__591, + batch_norm__592, + batch_norm__593, + batch_norm__594, + batch_norm__595, + batch_norm__596, + batch_norm__597, + batch_norm__598, + batch_norm__599, + batch_norm__6, + batch_norm__60, + batch_norm__600, + batch_norm__601, + batch_norm__602, + batch_norm__603, + batch_norm__604, + batch_norm__605, + batch_norm__606, + batch_norm__607, + batch_norm__608, + batch_norm__609, + batch_norm__61, + batch_norm__610, + batch_norm__611, + batch_norm__612, + batch_norm__613, + batch_norm__614, + batch_norm__615, + batch_norm__616, + batch_norm__617, + batch_norm__618, + batch_norm__619, + batch_norm__62, + batch_norm__620, + batch_norm__621, + batch_norm__622, + batch_norm__623, + batch_norm__624, + batch_norm__625, + batch_norm__626, + batch_norm__627, + batch_norm__628, + batch_norm__629, + batch_norm__63, + batch_norm__630, + batch_norm__631, + batch_norm__632, + batch_norm__633, + batch_norm__634, + batch_norm__635, + batch_norm__636, + batch_norm__637, + batch_norm__638, + batch_norm__639, + batch_norm__64, + batch_norm__640, + batch_norm__641, + batch_norm__642, + batch_norm__643, + batch_norm__644, + batch_norm__645, + batch_norm__646, + batch_norm__647, + batch_norm__648, + batch_norm__649, + batch_norm__65, + batch_norm__650, + batch_norm__651, + batch_norm__652, + batch_norm__653, + batch_norm__654, + batch_norm__655, + batch_norm__656, + batch_norm__657, + batch_norm__658, + batch_norm__659, + batch_norm__66, + batch_norm__660, + batch_norm__661, + batch_norm__662, + batch_norm__663, + batch_norm__664, + batch_norm__665, + batch_norm__666, + batch_norm__667, + batch_norm__668, + batch_norm__669, + batch_norm__67, + batch_norm__670, + batch_norm__671, + batch_norm__672, + batch_norm__673, + batch_norm__674, + batch_norm__675, + batch_norm__676, + batch_norm__677, + batch_norm__678, + batch_norm__679, + batch_norm__68, + batch_norm__680, + batch_norm__681, + batch_norm__682, + batch_norm__683, + batch_norm__684, + batch_norm__685, + batch_norm__686, + batch_norm__687, + batch_norm__688, + batch_norm__689, + batch_norm__69, + batch_norm__690, + batch_norm__691, + batch_norm__692, + batch_norm__693, + batch_norm__694, + batch_norm__695, + batch_norm__696, + batch_norm__697, + batch_norm__698, + batch_norm__699, + batch_norm__7, + batch_norm__70, + batch_norm__700, + batch_norm__701, + batch_norm__702, + batch_norm__703, + batch_norm__704, + batch_norm__705, + batch_norm__706, + batch_norm__707, + batch_norm__708, + batch_norm__709, + batch_norm__71, + batch_norm__710, + batch_norm__711, + batch_norm__712, + batch_norm__713, + batch_norm__714, + batch_norm__715, + batch_norm__716, + batch_norm__717, + batch_norm__718, + batch_norm__719, + batch_norm__72, + batch_norm__720, + batch_norm__721, + batch_norm__722, + batch_norm__723, + batch_norm__724, + batch_norm__725, + batch_norm__726, + batch_norm__727, + batch_norm__728, + batch_norm__729, + batch_norm__73, + batch_norm__730, + batch_norm__731, + batch_norm__732, + batch_norm__733, + batch_norm__734, + batch_norm__735, + batch_norm__736, + batch_norm__737, + batch_norm__738, + batch_norm__739, + batch_norm__74, + batch_norm__740, + batch_norm__741, + batch_norm__742, + batch_norm__743, + batch_norm__744, + batch_norm__745, + batch_norm__746, + batch_norm__747, + batch_norm__748, + batch_norm__749, + batch_norm__75, + batch_norm__750, + batch_norm__751, + batch_norm__752, + batch_norm__753, + batch_norm__754, + batch_norm__755, + batch_norm__756, + batch_norm__757, + batch_norm__758, + batch_norm__759, + batch_norm__76, + batch_norm__760, + batch_norm__761, + batch_norm__762, + batch_norm__763, + batch_norm__764, + batch_norm__765, + batch_norm__766, + batch_norm__767, + batch_norm__768, + batch_norm__769, + batch_norm__77, + batch_norm__770, + batch_norm__771, + batch_norm__772, + batch_norm__773, + batch_norm__774, + batch_norm__775, + batch_norm__776, + batch_norm__777, + batch_norm__778, + batch_norm__779, + batch_norm__78, + batch_norm__780, + batch_norm__781, + batch_norm__782, + batch_norm__783, + batch_norm__784, + batch_norm__785, + batch_norm__786, + batch_norm__787, + batch_norm__788, + batch_norm__789, + batch_norm__79, + batch_norm__790, + batch_norm__791, + batch_norm__792, + batch_norm__793, + batch_norm__794, + batch_norm__795, + batch_norm__796, + batch_norm__797, + batch_norm__798, + batch_norm__799, + batch_norm__8, + batch_norm__80, + batch_norm__800, + batch_norm__801, + batch_norm__802, + batch_norm__803, + batch_norm__804, + batch_norm__805, + batch_norm__806, + batch_norm__807, + batch_norm__808, + batch_norm__809, + batch_norm__81, + batch_norm__810, + batch_norm__811, + batch_norm__812, + batch_norm__813, + batch_norm__814, + batch_norm__815, + batch_norm__816, + batch_norm__817, + batch_norm__818, + batch_norm__819, + batch_norm__82, + batch_norm__820, + batch_norm__821, + batch_norm__822, + batch_norm__823, + batch_norm__824, + batch_norm__825, + batch_norm__826, + batch_norm__827, + batch_norm__83, + batch_norm__84, + batch_norm__85, + batch_norm__86, + batch_norm__87, + batch_norm__88, + batch_norm__89, + batch_norm__9, + batch_norm__90, + batch_norm__91, + batch_norm__92, + batch_norm__93, + batch_norm__94, + batch_norm__95, + batch_norm__96, + batch_norm__97, + batch_norm__98, + batch_norm__99, + concat_0, + concat_1, + concat_10, + concat_11, + concat_12, + concat_13, + concat_2, + concat_3, + concat_4, + concat_5, + concat_6, + concat_7, + concat_8, + concat_9, + conv2d_0, + conv2d_1, + conv2d_10, + conv2d_100, + conv2d_101, + conv2d_102, + conv2d_103, + conv2d_104, + conv2d_105, + conv2d_106, + conv2d_107, + conv2d_108, + conv2d_109, + conv2d_11, + conv2d_110, + conv2d_111, + conv2d_112, + conv2d_113, + conv2d_114, + conv2d_115, + conv2d_116, + conv2d_117, + conv2d_118, + conv2d_119, + conv2d_12, + conv2d_120, + conv2d_121, + conv2d_122, + conv2d_123, + conv2d_124, + conv2d_125, + conv2d_126, + conv2d_127, + conv2d_128, + conv2d_129, + conv2d_13, + conv2d_130, + conv2d_131, + conv2d_132, + conv2d_133, + conv2d_134, + conv2d_135, + conv2d_136, + conv2d_137, + conv2d_138, + conv2d_139, + conv2d_14, + conv2d_140, + conv2d_141, + conv2d_15, + conv2d_16, + conv2d_17, + conv2d_18, + conv2d_19, + conv2d_2, + conv2d_20, + conv2d_21, + conv2d_22, + conv2d_23, + conv2d_24, + conv2d_25, + conv2d_26, + conv2d_27, + conv2d_28, + conv2d_29, + conv2d_3, + conv2d_30, + conv2d_31, + conv2d_32, + conv2d_33, + conv2d_34, + conv2d_35, + conv2d_36, + conv2d_37, + conv2d_38, + conv2d_39, + conv2d_4, + conv2d_40, + conv2d_41, + conv2d_42, + conv2d_43, + conv2d_44, + conv2d_45, + conv2d_46, + conv2d_47, + conv2d_48, + conv2d_49, + conv2d_5, + conv2d_50, + conv2d_51, + conv2d_52, + conv2d_53, + conv2d_54, + conv2d_55, + conv2d_56, + conv2d_57, + conv2d_58, + conv2d_59, + conv2d_6, + conv2d_60, + conv2d_61, + conv2d_62, + conv2d_63, + conv2d_64, + conv2d_65, + conv2d_66, + conv2d_67, + conv2d_68, + conv2d_69, + conv2d_7, + conv2d_70, + conv2d_71, + conv2d_72, + conv2d_73, + conv2d_74, + conv2d_75, + conv2d_76, + conv2d_77, + conv2d_78, + conv2d_79, + conv2d_8, + conv2d_80, + conv2d_81, + conv2d_82, + conv2d_83, + conv2d_84, + conv2d_85, + conv2d_86, + conv2d_87, + conv2d_88, + conv2d_89, + conv2d_9, + conv2d_90, + conv2d_91, + conv2d_92, + conv2d_93, + conv2d_94, + conv2d_95, + conv2d_96, + conv2d_97, + conv2d_98, + conv2d_99, + full_0, + full_int_array_0, + full_int_array_2, + full_int_array_3, + full_int_array_4, + hardsigmoid_0, + hardsigmoid_1, + hardsigmoid_2, + hardsigmoid_3, + mean_0, + mean_1, + mean_2, + mean_3, + multiply_0, + multiply_1, + multiply_10, + multiply_11, + multiply_12, + multiply_13, + multiply_14, + multiply_15, + multiply_16, + multiply_17, + multiply_18, + multiply_19, + multiply_2, + multiply_20, + multiply_21, + multiply_3, + multiply_4, + multiply_5, + multiply_6, + multiply_7, + multiply_8, + multiply_9, + nearest_interp_0, + nearest_interp_1, + pool2d_0, + pool2d_1, + pool2d_2, + reshape_0, + reshape_1, + reshape_2, + reshape_3, + swish_1, + swish_10, + swish_100, + swish_101, + swish_102, + swish_103, + swish_104, + swish_11, + swish_12, + swish_13, + swish_14, + swish_15, + swish_16, + swish_17, + swish_18, + swish_19, + swish_2, + swish_20, + swish_21, + swish_22, + swish_23, + swish_24, + swish_25, + swish_26, + swish_27, + swish_28, + swish_29, + swish_3, + swish_30, + swish_31, + swish_32, + swish_33, + swish_34, + swish_35, + swish_36, + swish_37, + swish_38, + swish_39, + swish_4, + swish_40, + swish_41, + swish_42, + swish_43, + swish_44, + swish_45, + swish_46, + swish_47, + swish_48, + swish_49, + swish_5, + swish_50, + swish_51, + swish_52, + swish_53, + swish_54, + swish_55, + swish_56, + swish_57, + swish_58, + swish_59, + swish_6, + swish_60, + swish_61, + swish_62, + swish_63, + swish_64, + swish_65, + swish_66, + swish_67, + swish_68, + swish_69, + swish_7, + swish_70, + swish_71, + swish_72, + swish_73, + swish_74, + swish_75, + swish_76, + swish_77, + swish_78, + swish_79, + swish_8, + swish_80, + swish_81, + swish_82, + swish_83, + swish_84, + swish_85, + swish_86, + swish_87, + swish_88, + swish_89, + swish_9, + swish_90, + swish_91, + swish_92, + swish_93, + swish_94, + swish_95, + swish_96, + swish_97, + swish_98, + swish_99, + ) + + return swish_0 diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus-L/subgraph_16/weight_meta.py b/paddle_samples/PaddleX/PP-YOLOE_plus-L/subgraph_16/weight_meta.py new file mode 100644 index 000000000..3e3459082 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE_plus-L/subgraph_16/weight_meta.py @@ -0,0 +1,7564 @@ +class Program_weight_tensor_parameter_0: + name = "parameter_0" + shape = [768] + dtype = "float32" + min_val = float("-0.174892") + max_val = float("0.211093") + mean = float("0.0845739") + std = float("0.0562164") + data = None + + +class Program_weight_tensor_parameter_1: + name = "parameter_1" + shape = [768] + dtype = "float32" + min_val = float("0.937556") + max_val = float("1.29453") + mean = float("1.06387") + std = float("0.0310557") + data = None + + +class Program_weight_tensor_parameter_2: + name = "parameter_2" + shape = [768] + dtype = "float32" + min_val = float("0.000993105") + max_val = float("0.0333757") + mean = float("0.0034883") + std = float("0.00275358") + data = None + + +class Program_weight_tensor_parameter_3: + name = "parameter_3" + shape = [768] + dtype = "float32" + min_val = float("-0.148256") + max_val = float("0.0988512") + mean = float("-0.0232443") + std = float("0.0228301") + data = None + + +class Program_weight_tensor_parameter_4: + name = "parameter_4" + shape = [768, 768, 1, 1] + dtype = "float32" + min_val = float("-0.0483446") + max_val = float("0.0300933") + mean = float("-0.00011656") + std = float("0.00197846") + data = None + + +class Program_weight_tensor_parameter_5: + name = "parameter_5" + shape = [384] + dtype = "float32" + min_val = float("-0.138311") + max_val = float("0.0301208") + mean = float("-0.0180613") + std = float("0.0228147") + data = None + + +class Program_weight_tensor_parameter_6: + name = "parameter_6" + shape = [384] + dtype = "float32" + min_val = float("0.94769") + max_val = float("1.04411") + mean = float("0.986729") + std = float("0.010451") + data = None + + +class Program_weight_tensor_parameter_7: + name = "parameter_7" + shape = [384] + dtype = "float32" + min_val = float("0.000385121") + max_val = float("0.00740646") + mean = float("0.00205141") + std = float("0.00109356") + data = None + + +class Program_weight_tensor_parameter_8: + name = "parameter_8" + shape = [384] + dtype = "float32" + min_val = float("-0.056999") + max_val = float("0.0488077") + mean = float("0.000832463") + std = float("0.0152666") + data = None + + +class Program_weight_tensor_parameter_9: + name = "parameter_9" + shape = [384, 384, 1, 1] + dtype = "float32" + min_val = float("-0.0299557") + max_val = float("0.0156578") + mean = float("1.50258e-07") + std = float("0.00151464") + data = None + + +class Program_weight_tensor_parameter_10: + name = "parameter_10" + shape = [384] + dtype = "float32" + min_val = float("-0.138311") + max_val = float("0.0301207") + mean = float("-0.0180613") + std = float("0.0228147") + data = None + + +class Program_weight_tensor_parameter_11: + name = "parameter_11" + shape = [384] + dtype = "float32" + min_val = float("0.96766") + max_val = float("1.12876") + mean = float("1.01502") + std = float("0.0169862") + data = None + + +class Program_weight_tensor_parameter_12: + name = "parameter_12" + shape = [384] + dtype = "float32" + min_val = float("0.00123062") + max_val = float("0.016384") + mean = float("0.00352568") + std = float("0.00161305") + data = None + + +class Program_weight_tensor_parameter_13: + name = "parameter_13" + shape = [384] + dtype = "float32" + min_val = float("-0.111547") + max_val = float("0.117325") + mean = float("-0.0243321") + std = float("0.0245689") + data = None + + +class Program_weight_tensor_parameter_14: + name = "parameter_14" + shape = [384, 384, 3, 3] + dtype = "float32" + min_val = float("-0.0252101") + max_val = float("0.0260363") + mean = float("-4.54914e-05") + std = float("0.00101999") + data = None + + +class Program_weight_tensor_parameter_15: + name = "parameter_15" + shape = [384] + dtype = "float32" + min_val = float("-0.166952") + max_val = float("0.0194256") + mean = float("-0.0337306") + std = float("0.027196") + data = None + + +class Program_weight_tensor_parameter_16: + name = "parameter_16" + shape = [384] + dtype = "float32" + min_val = float("0.975112") + max_val = float("1.12519") + mean = float("1.01447") + std = float("0.0236601") + data = None + + +class Program_weight_tensor_parameter_17: + name = "parameter_17" + shape = [384] + dtype = "float32" + min_val = float("0.00332451") + max_val = float("0.0380223") + mean = float("0.00904499") + std = float("0.00447072") + data = None + + +class Program_weight_tensor_parameter_18: + name = "parameter_18" + shape = [384] + dtype = "float32" + min_val = float("-0.183511") + max_val = float("0.20755") + mean = float("-0.0253151") + std = float("0.0335419") + data = None + + +class Program_weight_tensor_parameter_19: + name = "parameter_19" + shape = [384, 384, 3, 3] + dtype = "float32" + min_val = float("-0.0246938") + max_val = float("0.041621") + mean = float("-4.62599e-05") + std = float("0.00115623") + data = None + + +class Program_weight_tensor_parameter_20: + name = "parameter_20" + shape = [384] + dtype = "float32" + min_val = float("-0.101971") + max_val = float("0.0125234") + mean = float("-0.0345069") + std = float("0.0187505") + data = None + + +class Program_weight_tensor_parameter_21: + name = "parameter_21" + shape = [384] + dtype = "float32" + min_val = float("0.947094") + max_val = float("1.04411") + mean = float("0.98866") + std = float("0.00962754") + data = None + + +class Program_weight_tensor_parameter_22: + name = "parameter_22" + shape = [384] + dtype = "float32" + min_val = float("0.000608792") + max_val = float("0.00692297") + mean = float("0.0027435") + std = float("0.00129451") + data = None + + +class Program_weight_tensor_parameter_23: + name = "parameter_23" + shape = [384] + dtype = "float32" + min_val = float("-0.0673723") + max_val = float("0.0326082") + mean = float("-0.0032464") + std = float("0.0123981") + data = None + + +class Program_weight_tensor_parameter_24: + name = "parameter_24" + shape = [384, 384, 1, 1] + dtype = "float32" + min_val = float("-0.023885") + max_val = float("0.0195613") + mean = float("-6.63058e-05") + std = float("0.00158147") + data = None + + +class Program_weight_tensor_parameter_25: + name = "parameter_25" + shape = [384] + dtype = "float32" + min_val = float("-0.101971") + max_val = float("0.0125234") + mean = float("-0.0345069") + std = float("0.0187505") + data = None + + +class Program_weight_tensor_parameter_26: + name = "parameter_26" + shape = [384] + dtype = "float32" + min_val = float("0.959612") + max_val = float("1.10373") + mean = float("1.01555") + std = float("0.0174682") + data = None + + +class Program_weight_tensor_parameter_27: + name = "parameter_27" + shape = [384] + dtype = "float32" + min_val = float("0.00180756") + max_val = float("0.0233911") + mean = float("0.00513154") + std = float("0.00255676") + data = None + + +class Program_weight_tensor_parameter_28: + name = "parameter_28" + shape = [384] + dtype = "float32" + min_val = float("-0.110394") + max_val = float("0.130029") + mean = float("-0.0339939") + std = float("0.0268802") + data = None + + +class Program_weight_tensor_parameter_29: + name = "parameter_29" + shape = [384, 384, 3, 3] + dtype = "float32" + min_val = float("-0.0305313") + max_val = float("0.0390134") + mean = float("-6.33746e-05") + std = float("0.00104922") + data = None + + +class Program_weight_tensor_parameter_30: + name = "parameter_30" + shape = [384] + dtype = "float32" + min_val = float("-0.0876863") + max_val = float("0.0186708") + mean = float("-0.0349243") + std = float("0.0190156") + data = None + + +class Program_weight_tensor_parameter_31: + name = "parameter_31" + shape = [384] + dtype = "float32" + min_val = float("0.935469") + max_val = float("1.11267") + mean = float("1.01117") + std = float("0.0260146") + data = None + + +class Program_weight_tensor_parameter_32: + name = "parameter_32" + shape = [384] + dtype = "float32" + min_val = float("0.00350475") + max_val = float("0.035784") + mean = float("0.00906684") + std = float("0.00437423") + data = None + + +class Program_weight_tensor_parameter_33: + name = "parameter_33" + shape = [384] + dtype = "float32" + min_val = float("-0.111072") + max_val = float("0.065702") + mean = float("-0.0147716") + std = float("0.0296686") + data = None + + +class Program_weight_tensor_parameter_34: + name = "parameter_34" + shape = [384, 384, 3, 3] + dtype = "float32" + min_val = float("-0.03558") + max_val = float("0.0368855") + mean = float("-4.08548e-05") + std = float("0.00121263") + data = None + + +class Program_weight_tensor_parameter_35: + name = "parameter_35" + shape = [384] + dtype = "float32" + min_val = float("-0.112996") + max_val = float("0.0138999") + mean = float("-0.0359425") + std = float("0.0194224") + data = None + + +class Program_weight_tensor_parameter_36: + name = "parameter_36" + shape = [384] + dtype = "float32" + min_val = float("0.930402") + max_val = float("1.02683") + mean = float("0.987073") + std = float("0.0107267") + data = None + + +class Program_weight_tensor_parameter_37: + name = "parameter_37" + shape = [384] + dtype = "float32" + min_val = float("0.000834798") + max_val = float("0.00843597") + mean = float("0.00358387") + std = float("0.00139869") + data = None + + +class Program_weight_tensor_parameter_38: + name = "parameter_38" + shape = [384] + dtype = "float32" + min_val = float("-0.0318765") + max_val = float("0.0402997") + mean = float("-0.00716202") + std = float("0.0105811") + data = None + + +class Program_weight_tensor_parameter_39: + name = "parameter_39" + shape = [384, 384, 1, 1] + dtype = "float32" + min_val = float("-0.0246045") + max_val = float("0.0245792") + mean = float("-0.000134425") + std = float("0.00162067") + data = None + + +class Program_weight_tensor_parameter_40: + name = "parameter_40" + shape = [384] + dtype = "float32" + min_val = float("-0.112996") + max_val = float("0.0138999") + mean = float("-0.0359425") + std = float("0.0194224") + data = None + + +class Program_weight_tensor_parameter_41: + name = "parameter_41" + shape = [384] + dtype = "float32" + min_val = float("0.981669") + max_val = float("1.10373") + mean = float("1.01772") + std = float("0.0216875") + data = None + + +class Program_weight_tensor_parameter_42: + name = "parameter_42" + shape = [384] + dtype = "float32" + min_val = float("0.00248832") + max_val = float("0.0233141") + mean = float("0.00665286") + std = float("0.00295268") + data = None + + +class Program_weight_tensor_parameter_43: + name = "parameter_43" + shape = [384] + dtype = "float32" + min_val = float("-0.119239") + max_val = float("0.0711163") + mean = float("-0.0168139") + std = float("0.0258729") + data = None + + +class Program_weight_tensor_parameter_44: + name = "parameter_44" + shape = [384, 384, 3, 3] + dtype = "float32" + min_val = float("-0.0269935") + max_val = float("0.0461291") + mean = float("-3.54625e-05") + std = float("0.00113348") + data = None + + +class Program_weight_tensor_parameter_45: + name = "parameter_45" + shape = [384] + dtype = "float32" + min_val = float("-0.104569") + max_val = float("0.0223233") + mean = float("-0.0363337") + std = float("0.0208304") + data = None + + +class Program_weight_tensor_parameter_46: + name = "parameter_46" + shape = [384] + dtype = "float32" + min_val = float("0.946498") + max_val = float("1.11386") + mean = float("1.01137") + std = float("0.0272341") + data = None + + +class Program_weight_tensor_parameter_47: + name = "parameter_47" + shape = [384] + dtype = "float32" + min_val = float("0.00353592") + max_val = float("0.0393769") + mean = float("0.00931501") + std = float("0.00473543") + data = None + + +class Program_weight_tensor_parameter_48: + name = "parameter_48" + shape = [384] + dtype = "float32" + min_val = float("-0.105865") + max_val = float("0.126411") + mean = float("-0.0300176") + std = float("0.0363126") + data = None + + +class Program_weight_tensor_parameter_49: + name = "parameter_49" + shape = [384, 384, 3, 3] + dtype = "float32" + min_val = float("-0.019792") + max_val = float("0.0308629") + mean = float("-5.53246e-05") + std = float("0.00127613") + data = None + + +class Program_weight_tensor_parameter_50: + name = "parameter_50" + shape = [384] + dtype = "float32" + min_val = float("-0.103835") + max_val = float("0.044645") + mean = float("-0.0253276") + std = float("0.0148572") + data = None + + +class Program_weight_tensor_parameter_51: + name = "parameter_51" + shape = [384] + dtype = "float32" + min_val = float("0.974516") + max_val = float("1.08584") + mean = float("1.00846") + std = float("0.0167497") + data = None + + +class Program_weight_tensor_parameter_52: + name = "parameter_52" + shape = [384] + dtype = "float32" + min_val = float("0.00205733") + max_val = float("0.0145022") + mean = float("0.00368356") + std = float("0.00126217") + data = None + + +class Program_weight_tensor_parameter_53: + name = "parameter_53" + shape = [384] + dtype = "float32" + min_val = float("-0.0618369") + max_val = float("0.0440849") + mean = float("-0.0152888") + std = float("0.0176908") + data = None + + +class Program_weight_tensor_parameter_54: + name = "parameter_54" + shape = [384, 1152, 1, 1] + dtype = "float32" + min_val = float("-0.0510992") + max_val = float("0.054144") + mean = float("-7.3901e-05") + std = float("0.00193455") + data = None + + +class Program_weight_tensor_parameter_55: + name = "parameter_55" + shape = [384] + dtype = "float32" + min_val = float("-0.0409565") + max_val = float("0.0158325") + mean = float("-0.0085231") + std = float("0.00820876") + data = None + + +class Program_weight_tensor_parameter_56: + name = "parameter_56" + shape = [384] + dtype = "float32" + min_val = float("0.962593") + max_val = float("1.05007") + mean = float("1.00729") + std = float("0.0112735") + data = None + + +class Program_weight_tensor_parameter_57: + name = "parameter_57" + shape = [384] + dtype = "float32" + min_val = float("0.00152991") + max_val = float("0.0173492") + mean = float("0.00281447") + std = float("0.00127325") + data = None + + +class Program_weight_tensor_parameter_58: + name = "parameter_58" + shape = [384] + dtype = "float32" + min_val = float("-0.0602306") + max_val = float("0.044153") + mean = float("-0.0178836") + std = float("0.0172563") + data = None + + +class Program_weight_tensor_parameter_59: + name = "parameter_59" + shape = [384, 1152, 1, 1] + dtype = "float32" + min_val = float("-0.0415169") + max_val = float("0.0339306") + mean = float("-8.93716e-05") + std = float("0.00170999") + data = None + + +class Program_weight_tensor_parameter_60: + name = "parameter_60" + shape = [384] + dtype = "float32" + min_val = float("-0.0516134") + max_val = float("0.00606255") + mean = float("-0.0160099") + std = float("0.00955839") + data = None + + +class Program_weight_tensor_parameter_61: + name = "parameter_61" + shape = [384] + dtype = "float32" + min_val = float("0.988237") + max_val = float("1.10253") + mean = float("1.01881") + std = float("0.0165775") + data = None + + +class Program_weight_tensor_parameter_62: + name = "parameter_62" + shape = [384] + dtype = "float32" + min_val = float("0.00262887") + max_val = float("0.0330672") + mean = float("0.00918311") + std = float("0.0043945") + data = None + + +class Program_weight_tensor_parameter_63: + name = "parameter_63" + shape = [384] + dtype = "float32" + min_val = float("-0.255923") + max_val = float("0.206816") + mean = float("-0.0284714") + std = float("0.0560007") + data = None + + +class Program_weight_tensor_parameter_64: + name = "parameter_64" + shape = [384, 384, 3, 3] + dtype = "float32" + min_val = float("-0.0248827") + max_val = float("0.0276378") + mean = float("-1.93879e-05") + std = float("0.00104169") + data = None + + +class Program_weight_tensor_parameter_65: + name = "parameter_65" + shape = [384] + dtype = "float32" + min_val = float("-0.222232") + max_val = float("0.491769") + mean = float("0.218792") + std = float("0.123626") + data = None + + +class Program_weight_tensor_parameter_66: + name = "parameter_66" + shape = [384] + dtype = "float32" + min_val = float("0.924739") + max_val = float("1.47435") + mean = float("1.14148") + std = float("0.073515") + data = None + + +class Program_weight_tensor_parameter_67: + name = "parameter_67" + shape = [384] + dtype = "float32" + min_val = float("0.00274326") + max_val = float("0.0990496") + mean = float("0.00811996") + std = float("0.0067466") + data = None + + +class Program_weight_tensor_parameter_68: + name = "parameter_68" + shape = [384] + dtype = "float32" + min_val = float("-0.156631") + max_val = float("0.121317") + mean = float("-0.0252877") + std = float("0.0298744") + data = None + + +class Program_weight_tensor_parameter_69: + name = "parameter_69" + shape = [384, 384, 1, 1] + dtype = "float32" + min_val = float("-0.0874231") + max_val = float("0.0601109") + mean = float("-0.000241914") + std = float("0.00452219") + data = None + + +class Program_weight_tensor_parameter_70: + name = "parameter_70" + shape = [192] + dtype = "float32" + min_val = float("-0.164047") + max_val = float("0.0461356") + mean = float("-0.0239281") + std = float("0.0388386") + data = None + + +class Program_weight_tensor_parameter_71: + name = "parameter_71" + shape = [192] + dtype = "float32" + min_val = float("0.844262") + max_val = float("1.05127") + mean = float("0.973164") + std = float("0.0236033") + data = None + + +class Program_weight_tensor_parameter_72: + name = "parameter_72" + shape = [192] + dtype = "float32" + min_val = float("0.000590004") + max_val = float("0.0238958") + mean = float("0.00396219") + std = float("0.00271564") + data = None + + +class Program_weight_tensor_parameter_73: + name = "parameter_73" + shape = [192] + dtype = "float32" + min_val = float("-0.0504661") + max_val = float("0.0756272") + mean = float("-0.0037403") + std = float("0.0153807") + data = None + + +class Program_weight_tensor_parameter_74: + name = "parameter_74" + shape = [192, 192, 1, 1] + dtype = "float32" + min_val = float("-0.0501829") + max_val = float("0.0286784") + mean = float("-0.000135816") + std = float("0.00335627") + data = None + + +class Program_weight_tensor_parameter_75: + name = "parameter_75" + shape = [192] + dtype = "float32" + min_val = float("-0.164047") + max_val = float("0.0461356") + mean = float("-0.0239281") + std = float("0.0388386") + data = None + + +class Program_weight_tensor_parameter_76: + name = "parameter_76" + shape = [192] + dtype = "float32" + min_val = float("0.734277") + max_val = float("1.11982") + mean = float("1.0217") + std = float("0.0365418") + data = None + + +class Program_weight_tensor_parameter_77: + name = "parameter_77" + shape = [192] + dtype = "float32" + min_val = float("0.00271339") + max_val = float("0.0219093") + mean = float("0.00670251") + std = float("0.00274959") + data = None + + +class Program_weight_tensor_parameter_78: + name = "parameter_78" + shape = [192] + dtype = "float32" + min_val = float("-0.159305") + max_val = float("0.100634") + mean = float("-0.0222208") + std = float("0.0336454") + data = None + + +class Program_weight_tensor_parameter_79: + name = "parameter_79" + shape = [192, 192, 3, 3] + dtype = "float32" + min_val = float("-0.0443179") + max_val = float("0.0401566") + mean = float("-7.1021e-05") + std = float("0.00226241") + data = None + + +class Program_weight_tensor_parameter_80: + name = "parameter_80" + shape = [192] + dtype = "float32" + min_val = float("-0.188321") + max_val = float("0.0420972") + mean = float("-0.0566532") + std = float("0.0480229") + data = None + + +class Program_weight_tensor_parameter_81: + name = "parameter_81" + shape = [192] + dtype = "float32" + min_val = float("0.89851") + max_val = float("1.18003") + mean = float("1.01508") + std = float("0.0478824") + data = None + + +class Program_weight_tensor_parameter_82: + name = "parameter_82" + shape = [192] + dtype = "float32" + min_val = float("0.00548443") + max_val = float("0.0752202") + mean = float("0.0172845") + std = float("0.0105698") + data = None + + +class Program_weight_tensor_parameter_83: + name = "parameter_83" + shape = [192] + dtype = "float32" + min_val = float("-0.233482") + max_val = float("0.28974") + mean = float("-0.025231") + std = float("0.0409422") + data = None + + +class Program_weight_tensor_parameter_84: + name = "parameter_84" + shape = [192, 192, 3, 3] + dtype = "float32" + min_val = float("-0.0579255") + max_val = float("0.063931") + mean = float("-9.44856e-05") + std = float("0.00257") + data = None + + +class Program_weight_tensor_parameter_85: + name = "parameter_85" + shape = [192] + dtype = "float32" + min_val = float("-0.188276") + max_val = float("0.00864405") + mean = float("-0.062358") + std = float("0.0326101") + data = None + + +class Program_weight_tensor_parameter_86: + name = "parameter_86" + shape = [192] + dtype = "float32" + min_val = float("0.923547") + max_val = float("1.0459") + mean = float("0.973796") + std = float("0.0176449") + data = None + + +class Program_weight_tensor_parameter_87: + name = "parameter_87" + shape = [192] + dtype = "float32" + min_val = float("0.00150378") + max_val = float("0.0125396") + mean = float("0.00488357") + std = float("0.0021003") + data = None + + +class Program_weight_tensor_parameter_88: + name = "parameter_88" + shape = [192] + dtype = "float32" + min_val = float("-0.0484967") + max_val = float("0.0343622") + mean = float("-0.00791008") + std = float("0.0126357") + data = None + + +class Program_weight_tensor_parameter_89: + name = "parameter_89" + shape = [192, 192, 1, 1] + dtype = "float32" + min_val = float("-0.043172") + max_val = float("0.0265178") + mean = float("-0.000338721") + std = float("0.00336143") + data = None + + +class Program_weight_tensor_parameter_90: + name = "parameter_90" + shape = [192] + dtype = "float32" + min_val = float("-0.188276") + max_val = float("0.00864405") + mean = float("-0.062358") + std = float("0.0326101") + data = None + + +class Program_weight_tensor_parameter_91: + name = "parameter_91" + shape = [192] + dtype = "float32" + min_val = float("0.969747") + max_val = float("1.14784") + mean = float("1.02344") + std = float("0.0287259") + data = None + + +class Program_weight_tensor_parameter_92: + name = "parameter_92" + shape = [192] + dtype = "float32" + min_val = float("0.00270009") + max_val = float("0.0365071") + mean = float("0.00845342") + std = float("0.00496361") + data = None + + +class Program_weight_tensor_parameter_93: + name = "parameter_93" + shape = [192] + dtype = "float32" + min_val = float("-0.104084") + max_val = float("0.11502") + mean = float("-0.0222725") + std = float("0.0283642") + data = None + + +class Program_weight_tensor_parameter_94: + name = "parameter_94" + shape = [192, 192, 3, 3] + dtype = "float32" + min_val = float("-0.0430966") + max_val = float("0.0566778") + mean = float("-8.59508e-05") + std = float("0.00241031") + data = None + + +class Program_weight_tensor_parameter_95: + name = "parameter_95" + shape = [192] + dtype = "float32" + min_val = float("-0.186552") + max_val = float("0.0595296") + mean = float("-0.073781") + std = float("0.0397609") + data = None + + +class Program_weight_tensor_parameter_96: + name = "parameter_96" + shape = [192] + dtype = "float32" + min_val = float("0.882116") + max_val = float("1.21043") + mean = float("1.0143") + std = float("0.0499515") + data = None + + +class Program_weight_tensor_parameter_97: + name = "parameter_97" + shape = [192] + dtype = "float32" + min_val = float("0.00590687") + max_val = float("0.0420289") + mean = float("0.0126886") + std = float("0.0059983") + data = None + + +class Program_weight_tensor_parameter_98: + name = "parameter_98" + shape = [192] + dtype = "float32" + min_val = float("-0.0728663") + max_val = float("0.0406784") + mean = float("-0.016258") + std = float("0.0228685") + data = None + + +class Program_weight_tensor_parameter_99: + name = "parameter_99" + shape = [192, 192, 3, 3] + dtype = "float32" + min_val = float("-0.0453925") + max_val = float("0.0701272") + mean = float("-8.82827e-05") + std = float("0.00273142") + data = None + + +class Program_weight_tensor_parameter_100: + name = "parameter_100" + shape = [192] + dtype = "float32" + min_val = float("-0.223283") + max_val = float("-0.0118199") + mean = float("-0.0806518") + std = float("0.0410641") + data = None + + +class Program_weight_tensor_parameter_101: + name = "parameter_101" + shape = [192] + dtype = "float32" + min_val = float("0.903255") + max_val = float("1.02623") + mean = float("0.975461") + std = float("0.0224407") + data = None + + +class Program_weight_tensor_parameter_102: + name = "parameter_102" + shape = [192] + dtype = "float32" + min_val = float("0.0019003") + max_val = float("0.0153178") + mean = float("0.00530644") + std = float("0.00181159") + data = None + + +class Program_weight_tensor_parameter_103: + name = "parameter_103" + shape = [192] + dtype = "float32" + min_val = float("-0.0416717") + max_val = float("0.0451235") + mean = float("-0.0100494") + std = float("0.0182678") + data = None + + +class Program_weight_tensor_parameter_104: + name = "parameter_104" + shape = [192, 192, 1, 1] + dtype = "float32" + min_val = float("-0.0389131") + max_val = float("0.0686594") + mean = float("-0.000450541") + std = float("0.00385248") + data = None + + +class Program_weight_tensor_parameter_105: + name = "parameter_105" + shape = [192] + dtype = "float32" + min_val = float("-0.223283") + max_val = float("-0.0118199") + mean = float("-0.0806518") + std = float("0.0410641") + data = None + + +class Program_weight_tensor_parameter_106: + name = "parameter_106" + shape = [192] + dtype = "float32" + min_val = float("0.947687") + max_val = float("1.10849") + mean = float("1.0205") + std = float("0.0300019") + data = None + + +class Program_weight_tensor_parameter_107: + name = "parameter_107" + shape = [192] + dtype = "float32" + min_val = float("0.00463295") + max_val = float("0.0520741") + mean = float("0.0115664") + std = float("0.00673197") + data = None + + +class Program_weight_tensor_parameter_108: + name = "parameter_108" + shape = [192] + dtype = "float32" + min_val = float("-0.09989") + max_val = float("0.0749035") + mean = float("-0.0170108") + std = float("0.029879") + data = None + + +class Program_weight_tensor_parameter_109: + name = "parameter_109" + shape = [192, 192, 3, 3] + dtype = "float32" + min_val = float("-0.0473608") + max_val = float("0.0538585") + mean = float("-7.29276e-05") + std = float("0.00259151") + data = None + + +class Program_weight_tensor_parameter_110: + name = "parameter_110" + shape = [192] + dtype = "float32" + min_val = float("-0.228002") + max_val = float("0.0775768") + mean = float("-0.0921916") + std = float("0.0450604") + data = None + + +class Program_weight_tensor_parameter_111: + name = "parameter_111" + shape = [192] + dtype = "float32" + min_val = float("0.888077") + max_val = float("1.19613") + mean = float("1.0162") + std = float("0.0531144") + data = None + + +class Program_weight_tensor_parameter_112: + name = "parameter_112" + shape = [192] + dtype = "float32" + min_val = float("0.00606638") + max_val = float("0.058893") + mean = float("0.0150242") + std = float("0.00884026") + data = None + + +class Program_weight_tensor_parameter_113: + name = "parameter_113" + shape = [192] + dtype = "float32" + min_val = float("-0.145865") + max_val = float("0.113215") + mean = float("-0.0320989") + std = float("0.0329434") + data = None + + +class Program_weight_tensor_parameter_114: + name = "parameter_114" + shape = [192, 192, 3, 3] + dtype = "float32" + min_val = float("-0.0418221") + max_val = float("0.0858073") + mean = float("-0.000108662") + std = float("0.00299481") + data = None + + +class Program_weight_tensor_parameter_115: + name = "parameter_115" + shape = [192] + dtype = "float32" + min_val = float("-0.192804") + max_val = float("0.0142954") + mean = float("-0.0642525") + std = float("0.0302109") + data = None + + +class Program_weight_tensor_parameter_116: + name = "parameter_116" + shape = [192] + dtype = "float32" + min_val = float("0.925751") + max_val = float("1.14837") + mean = float("1.0126") + std = float("0.0375163") + data = None + + +class Program_weight_tensor_parameter_117: + name = "parameter_117" + shape = [192] + dtype = "float32" + min_val = float("0.00312917") + max_val = float("0.0201934") + mean = float("0.0065128") + std = float("0.00249212") + data = None + + +class Program_weight_tensor_parameter_118: + name = "parameter_118" + shape = [192] + dtype = "float32" + min_val = float("-0.0749669") + max_val = float("0.0929836") + mean = float("-0.0200869") + std = float("0.0235728") + data = None + + +class Program_weight_tensor_parameter_119: + name = "parameter_119" + shape = [192, 576, 1, 1] + dtype = "float32" + min_val = float("-0.0569455") + max_val = float("0.0683815") + mean = float("-0.000177842") + std = float("0.0043464") + data = None + + +class Program_weight_tensor_parameter_120: + name = "parameter_120" + shape = [192] + dtype = "float32" + min_val = float("-0.0978959") + max_val = float("0.0361387") + mean = float("-0.0134877") + std = float("0.0200082") + data = None + + +class Program_weight_tensor_parameter_121: + name = "parameter_121" + shape = [192] + dtype = "float32" + min_val = float("0.925028") + max_val = float("1.19057") + mean = float("1.00208") + std = float("0.0253154") + data = None + + +class Program_weight_tensor_parameter_122: + name = "parameter_122" + shape = [192] + dtype = "float32" + min_val = float("0.00234222") + max_val = float("0.0305314") + mean = float("0.0051251") + std = float("0.00278406") + data = None + + +class Program_weight_tensor_parameter_123: + name = "parameter_123" + shape = [192] + dtype = "float32" + min_val = float("-0.0571601") + max_val = float("0.0371591") + mean = float("-0.0129105") + std = float("0.0182218") + data = None + + +class Program_weight_tensor_parameter_124: + name = "parameter_124" + shape = [192, 576, 1, 1] + dtype = "float32" + min_val = float("-0.0968447") + max_val = float("0.0664063") + mean = float("-0.000110508") + std = float("0.00376605") + data = None + + +class Program_weight_tensor_parameter_125: + name = "parameter_125" + shape = [192] + dtype = "float32" + min_val = float("-0.155666") + max_val = float("-0.000484626") + mean = float("-0.0379649") + std = float("0.0212116") + data = None + + +class Program_weight_tensor_parameter_126: + name = "parameter_126" + shape = [192] + dtype = "float32" + min_val = float("0.921162") + max_val = float("1.24441") + mean = float("1.0065") + std = float("0.0297735") + data = None + + +class Program_weight_tensor_parameter_127: + name = "parameter_127" + shape = [192] + dtype = "float32" + min_val = float("0.00411353") + max_val = float("0.0623699") + mean = float("0.0134111") + std = float("0.00728295") + data = None + + +class Program_weight_tensor_parameter_128: + name = "parameter_128" + shape = [192] + dtype = "float32" + min_val = float("-0.431603") + max_val = float("0.577253") + mean = float("-0.0312247") + std = float("0.102855") + data = None + + +class Program_weight_tensor_parameter_129: + name = "parameter_129" + shape = [192, 192, 3, 3] + dtype = "float32" + min_val = float("-0.0481316") + max_val = float("0.0394776") + mean = float("-2.79861e-05") + std = float("0.00235401") + data = None + + +class Program_weight_tensor_parameter_130: + name = "parameter_130" + shape = [192] + dtype = "float32" + min_val = float("-0.548219") + max_val = float("1.15393") + mean = float("0.360733") + std = float("0.346235") + data = None + + +class Program_weight_tensor_parameter_131: + name = "parameter_131" + shape = [192] + dtype = "float32" + min_val = float("0.546543") + max_val = float("1.57003") + mean = float("1.15376") + std = float("0.183789") + data = None + + +class Program_weight_tensor_parameter_132: + name = "parameter_132" + shape = [192] + dtype = "float32" + min_val = float("0.00264418") + max_val = float("0.181722") + mean = float("0.0148293") + std = float("0.015115") + data = None + + +class Program_weight_tensor_parameter_133: + name = "parameter_133" + shape = [192] + dtype = "float32" + min_val = float("-0.205861") + max_val = float("0.200967") + mean = float("-0.0217564") + std = float("0.05363") + data = None + + +class Program_weight_tensor_parameter_134: + name = "parameter_134" + shape = [192, 192, 1, 1] + dtype = "float32" + min_val = float("-0.122667") + max_val = float("0.0973039") + mean = float("-0.000421208") + std = float("0.00946449") + data = None + + +class Program_weight_tensor_parameter_135: + name = "parameter_135" + shape = [96] + dtype = "float32" + min_val = float("-0.454892") + max_val = float("0.224851") + mean = float("-0.00897303") + std = float("0.14211") + data = None + + +class Program_weight_tensor_parameter_136: + name = "parameter_136" + shape = [96] + dtype = "float32" + min_val = float("0.766222") + max_val = float("1.235") + mean = float("0.949644") + std = float("0.0702665") + data = None + + +class Program_weight_tensor_parameter_137: + name = "parameter_137" + shape = [96] + dtype = "float32" + min_val = float("0.00171231") + max_val = float("0.0311737") + mean = float("0.00819604") + std = float("0.00561701") + data = None + + +class Program_weight_tensor_parameter_138: + name = "parameter_138" + shape = [96] + dtype = "float32" + min_val = float("-0.0394539") + max_val = float("0.076554") + mean = float("-0.007295") + std = float("0.017333") + data = None + + +class Program_weight_tensor_parameter_139: + name = "parameter_139" + shape = [96, 96, 1, 1] + dtype = "float32" + min_val = float("-0.0689192") + max_val = float("0.0478361") + mean = float("-0.000702064") + std = float("0.00707104") + data = None + + +class Program_weight_tensor_parameter_140: + name = "parameter_140" + shape = [96] + dtype = "float32" + min_val = float("-0.454892") + max_val = float("0.224851") + mean = float("-0.00897303") + std = float("0.14211") + data = None + + +class Program_weight_tensor_parameter_141: + name = "parameter_141" + shape = [96] + dtype = "float32" + min_val = float("0.510462") + max_val = float("1.27064") + mean = float("1.02895") + std = float("0.0949206") + data = None + + +class Program_weight_tensor_parameter_142: + name = "parameter_142" + shape = [96] + dtype = "float32" + min_val = float("0.00451032") + max_val = float("0.0416103") + mean = float("0.0147237") + std = float("0.00886156") + data = None + + +class Program_weight_tensor_parameter_143: + name = "parameter_143" + shape = [96] + dtype = "float32" + min_val = float("-0.300223") + max_val = float("0.115573") + mean = float("-0.0164775") + std = float("0.0540504") + data = None + + +class Program_weight_tensor_parameter_144: + name = "parameter_144" + shape = [96, 96, 3, 3] + dtype = "float32" + min_val = float("-0.0746669") + max_val = float("0.0809888") + mean = float("-3.77535e-05") + std = float("0.0050147") + data = None + + +class Program_weight_tensor_parameter_145: + name = "parameter_145" + shape = [96] + dtype = "float32" + min_val = float("-0.698004") + max_val = float("0.487979") + mean = float("-0.110327") + std = float("0.194759") + data = None + + +class Program_weight_tensor_parameter_146: + name = "parameter_146" + shape = [96] + dtype = "float32" + min_val = float("0.728018") + max_val = float("1.69389") + mean = float("0.996546") + std = float("0.13194") + data = None + + +class Program_weight_tensor_parameter_147: + name = "parameter_147" + shape = [96] + dtype = "float32" + min_val = float("0.00625557") + max_val = float("0.0763945") + mean = float("0.0194982") + std = float("0.015669") + data = None + + +class Program_weight_tensor_parameter_148: + name = "parameter_148" + shape = [96] + dtype = "float32" + min_val = float("-0.16835") + max_val = float("0.129387") + mean = float("-0.0150474") + std = float("0.0526258") + data = None + + +class Program_weight_tensor_parameter_149: + name = "parameter_149" + shape = [96, 96, 3, 3] + dtype = "float32" + min_val = float("-0.0901918") + max_val = float("0.0673388") + mean = float("-0.000269007") + std = float("0.00562194") + data = None + + +class Program_weight_tensor_parameter_150: + name = "parameter_150" + shape = [96] + dtype = "float32" + min_val = float("-0.356561") + max_val = float("0.180482") + mean = float("-0.135362") + std = float("0.0936647") + data = None + + +class Program_weight_tensor_parameter_151: + name = "parameter_151" + shape = [96] + dtype = "float32" + min_val = float("0.63226") + max_val = float("1.02086") + mean = float("0.908336") + std = float("0.0545737") + data = None + + +class Program_weight_tensor_parameter_152: + name = "parameter_152" + shape = [96] + dtype = "float32" + min_val = float("0.00246361") + max_val = float("0.0123176") + mean = float("0.00684781") + std = float("0.0022603") + data = None + + +class Program_weight_tensor_parameter_153: + name = "parameter_153" + shape = [96] + dtype = "float32" + min_val = float("-0.0391778") + max_val = float("0.0290665") + mean = float("-0.00619328") + std = float("0.013607") + data = None + + +class Program_weight_tensor_parameter_154: + name = "parameter_154" + shape = [96, 96, 1, 1] + dtype = "float32" + min_val = float("-0.0529296") + max_val = float("0.0587942") + mean = float("-0.000719364") + std = float("0.00725231") + data = None + + +class Program_weight_tensor_parameter_155: + name = "parameter_155" + shape = [96] + dtype = "float32" + min_val = float("-0.356561") + max_val = float("0.180482") + mean = float("-0.135362") + std = float("0.0936647") + data = None + + +class Program_weight_tensor_parameter_156: + name = "parameter_156" + shape = [96] + dtype = "float32" + min_val = float("0.816664") + max_val = float("1.1538") + mean = float("1.02227") + std = float("0.0592556") + data = None + + +class Program_weight_tensor_parameter_157: + name = "parameter_157" + shape = [96] + dtype = "float32" + min_val = float("0.00554989") + max_val = float("0.0572929") + mean = float("0.0142202") + std = float("0.00873025") + data = None + + +class Program_weight_tensor_parameter_158: + name = "parameter_158" + shape = [96] + dtype = "float32" + min_val = float("-0.0892734") + max_val = float("0.0313603") + mean = float("-0.0205233") + std = float("0.0246192") + data = None + + +class Program_weight_tensor_parameter_159: + name = "parameter_159" + shape = [96, 96, 3, 3] + dtype = "float32" + min_val = float("-0.0686605") + max_val = float("0.0707037") + mean = float("-0.000262325") + std = float("0.00531466") + data = None + + +class Program_weight_tensor_parameter_160: + name = "parameter_160" + shape = [96] + dtype = "float32" + min_val = float("-0.479114") + max_val = float("0.160087") + mean = float("-0.163416") + std = float("0.128723") + data = None + + +class Program_weight_tensor_parameter_161: + name = "parameter_161" + shape = [96] + dtype = "float32" + min_val = float("0.779583") + max_val = float("1.2922") + mean = float("0.96437") + std = float("0.0974768") + data = None + + +class Program_weight_tensor_parameter_162: + name = "parameter_162" + shape = [96] + dtype = "float32" + min_val = float("0.00461085") + max_val = float("0.059027") + mean = float("0.0116795") + std = float("0.00804888") + data = None + + +class Program_weight_tensor_parameter_163: + name = "parameter_163" + shape = [96] + dtype = "float32" + min_val = float("-0.130604") + max_val = float("0.0420061") + mean = float("0.0023562") + std = float("0.029741") + data = None + + +class Program_weight_tensor_parameter_164: + name = "parameter_164" + shape = [96, 96, 3, 3] + dtype = "float32" + min_val = float("-0.0836645") + max_val = float("0.0654402") + mean = float("-0.000286786") + std = float("0.00619629") + data = None + + +class Program_weight_tensor_parameter_165: + name = "parameter_165" + shape = [96] + dtype = "float32" + min_val = float("-0.481327") + max_val = float("0.06771") + mean = float("-0.163698") + std = float("0.112814") + data = None + + +class Program_weight_tensor_parameter_166: + name = "parameter_166" + shape = [96] + dtype = "float32" + min_val = float("0.72772") + max_val = float("1.00119") + mean = float("0.919543") + std = float("0.0523302") + data = None + + +class Program_weight_tensor_parameter_167: + name = "parameter_167" + shape = [96] + dtype = "float32" + min_val = float("0.00471793") + max_val = float("0.0215554") + mean = float("0.0103953") + std = float("0.0033512") + data = None + + +class Program_weight_tensor_parameter_168: + name = "parameter_168" + shape = [96] + dtype = "float32" + min_val = float("-0.0437622") + max_val = float("0.0317326") + mean = float("-0.0166984") + std = float("0.0169709") + data = None + + +class Program_weight_tensor_parameter_169: + name = "parameter_169" + shape = [96, 96, 1, 1] + dtype = "float32" + min_val = float("-0.0828667") + max_val = float("0.0608235") + mean = float("-0.00159151") + std = float("0.00855272") + data = None + + +class Program_weight_tensor_parameter_170: + name = "parameter_170" + shape = [96] + dtype = "float32" + min_val = float("-0.481327") + max_val = float("0.06771") + mean = float("-0.163698") + std = float("0.112814") + data = None + + +class Program_weight_tensor_parameter_171: + name = "parameter_171" + shape = [96] + dtype = "float32" + min_val = float("0.769747") + max_val = float("1.15082") + mean = float("0.983926") + std = float("0.0570166") + data = None + + +class Program_weight_tensor_parameter_172: + name = "parameter_172" + shape = [96] + dtype = "float32" + min_val = float("0.00991252") + max_val = float("0.12168") + mean = float("0.0229116") + std = float("0.0156004") + data = None + + +class Program_weight_tensor_parameter_173: + name = "parameter_173" + shape = [96] + dtype = "float32" + min_val = float("-0.116497") + max_val = float("0.0589981") + mean = float("-0.0124062") + std = float("0.0337506") + data = None + + +class Program_weight_tensor_parameter_174: + name = "parameter_174" + shape = [96, 96, 3, 3] + dtype = "float32" + min_val = float("-0.098636") + max_val = float("0.0785302") + mean = float("-0.000160047") + std = float("0.00609555") + data = None + + +class Program_weight_tensor_parameter_175: + name = "parameter_175" + shape = [96] + dtype = "float32" + min_val = float("-0.554545") + max_val = float("0.345827") + mean = float("-0.174318") + std = float("0.169147") + data = None + + +class Program_weight_tensor_parameter_176: + name = "parameter_176" + shape = [96] + dtype = "float32" + min_val = float("0.755258") + max_val = float("1.33741") + mean = float("0.955165") + std = float("0.11072") + data = None + + +class Program_weight_tensor_parameter_177: + name = "parameter_177" + shape = [96] + dtype = "float32" + min_val = float("0.00741183") + max_val = float("0.0607503") + mean = float("0.0161127") + std = float("0.0109302") + data = None + + +class Program_weight_tensor_parameter_178: + name = "parameter_178" + shape = [96] + dtype = "float32" + min_val = float("-0.113194") + max_val = float("0.169167") + mean = float("-0.00745381") + std = float("0.06047") + data = None + + +class Program_weight_tensor_parameter_179: + name = "parameter_179" + shape = [96, 96, 3, 3] + dtype = "float32" + min_val = float("-0.117137") + max_val = float("0.105905") + mean = float("-0.000120768") + std = float("0.00707651") + data = None + + +class Program_weight_tensor_parameter_180: + name = "parameter_180" + shape = [96] + dtype = "float32" + min_val = float("-0.610344") + max_val = float("0.5775") + mean = float("-0.078999") + std = float("0.24755") + data = None + + +class Program_weight_tensor_parameter_181: + name = "parameter_181" + shape = [96] + dtype = "float32" + min_val = float("0.658569") + max_val = float("1.22712") + mean = float("0.869713") + std = float("0.11224") + data = None + + +class Program_weight_tensor_parameter_182: + name = "parameter_182" + shape = [96] + dtype = "float32" + min_val = float("0.00753543") + max_val = float("0.0439633") + mean = float("0.015098") + std = float("0.00692945") + data = None + + +class Program_weight_tensor_parameter_183: + name = "parameter_183" + shape = [96] + dtype = "float32" + min_val = float("-0.0903701") + max_val = float("0.0527284") + mean = float("-0.0148833") + std = float("0.0304859") + data = None + + +class Program_weight_tensor_parameter_184: + name = "parameter_184" + shape = [96, 448, 1, 1] + dtype = "float32" + min_val = float("-0.130957") + max_val = float("0.115988") + mean = float("-0.000327661") + std = float("0.00936182") + data = None + + +class Program_weight_tensor_parameter_185: + name = "parameter_185" + shape = [96] + dtype = "float32" + min_val = float("-0.0946454") + max_val = float("0.22398") + mean = float("0.0616498") + std = float("0.0537758") + data = None + + +class Program_weight_tensor_parameter_186: + name = "parameter_186" + shape = [96] + dtype = "float32" + min_val = float("0.711917") + max_val = float("1.12101") + mean = float("0.933085") + std = float("0.0627216") + data = None + + +class Program_weight_tensor_parameter_187: + name = "parameter_187" + shape = [96] + dtype = "float32" + min_val = float("0.00157056") + max_val = float("0.0275589") + mean = float("0.00617893") + std = float("0.00279518") + data = None + + +class Program_weight_tensor_parameter_188: + name = "parameter_188" + shape = [96] + dtype = "float32" + min_val = float("-0.0600397") + max_val = float("0.113757") + mean = float("-0.0203758") + std = float("0.0250297") + data = None + + +class Program_weight_tensor_parameter_189: + name = "parameter_189" + shape = [96, 448, 1, 1] + dtype = "float32" + min_val = float("-0.0876125") + max_val = float("0.0791409") + mean = float("-0.000117389") + std = float("0.00624421") + data = None + + +class Program_weight_tensor_parameter_190: + name = "parameter_190" + shape = [192] + dtype = "float32" + min_val = float("-0.28994") + max_val = float("0.197021") + mean = float("-0.0640566") + std = float("0.0680938") + data = None + + +class Program_weight_tensor_parameter_191: + name = "parameter_191" + shape = [192] + dtype = "float32" + min_val = float("0.675984") + max_val = float("1.44247") + mean = float("0.887379") + std = float("0.0773697") + data = None + + +class Program_weight_tensor_parameter_192: + name = "parameter_192" + shape = [192] + dtype = "float32" + min_val = float("0.00621495") + max_val = float("0.0701442") + mean = float("0.0141641") + std = float("0.00635089") + data = None + + +class Program_weight_tensor_parameter_193: + name = "parameter_193" + shape = [192] + dtype = "float32" + min_val = float("-0.146238") + max_val = float("0.0447718") + mean = float("-0.0296659") + std = float("0.025314") + data = None + + +class Program_weight_tensor_parameter_194: + name = "parameter_194" + shape = [192, 384, 1, 1] + dtype = "float32" + min_val = float("-0.103707") + max_val = float("0.088719") + mean = float("-0.000487102") + std = float("0.0067597") + data = None + + +class Program_weight_tensor_parameter_195: + name = "parameter_195" + shape = [384] + dtype = "float32" + min_val = float("-0.196718") + max_val = float("0.235386") + mean = float("-0.0648365") + std = float("0.0406221") + data = None + + +class Program_weight_tensor_parameter_196: + name = "parameter_196" + shape = [384] + dtype = "float32" + min_val = float("0.871884") + max_val = float("1.52666") + mean = float("1.01848") + std = float("0.0619347") + data = None + + +class Program_weight_tensor_parameter_197: + name = "parameter_197" + shape = [384] + dtype = "float32" + min_val = float("0.00568249") + max_val = float("0.0557212") + mean = float("0.0100772") + std = float("0.00465598") + data = None + + +class Program_weight_tensor_parameter_198: + name = "parameter_198" + shape = [384] + dtype = "float32" + min_val = float("-0.243456") + max_val = float("0.13077") + mean = float("-0.0377891") + std = float("0.03733") + data = None + + +class Program_weight_tensor_parameter_199: + name = "parameter_199" + shape = [384, 384, 1, 1] + dtype = "float32" + min_val = float("-0.155294") + max_val = float("0.0952353") + mean = float("-0.000484224") + std = float("0.00631135") + data = None + + +class Program_weight_tensor_parameter_200: + name = "parameter_200" + shape = [192] + dtype = "float32" + min_val = float("-0.172574") + max_val = float("0.00600923") + mean = float("-0.0634233") + std = float("0.0317358") + data = None + + +class Program_weight_tensor_parameter_201: + name = "parameter_201" + shape = [192] + dtype = "float32" + min_val = float("0.886587") + max_val = float("0.991209") + mean = float("0.950227") + std = float("0.0161195") + data = None + + +class Program_weight_tensor_parameter_202: + name = "parameter_202" + shape = [192] + dtype = "float32" + min_val = float("0.00369063") + max_val = float("0.0177636") + mean = float("0.00660926") + std = float("0.00212373") + data = None + + +class Program_weight_tensor_parameter_203: + name = "parameter_203" + shape = [192] + dtype = "float32" + min_val = float("-0.0644225") + max_val = float("0.0495055") + mean = float("-0.0195959") + std = float("0.0238377") + data = None + + +class Program_weight_tensor_parameter_204: + name = "parameter_204" + shape = [192, 192, 1, 1] + dtype = "float32" + min_val = float("-0.0502286") + max_val = float("0.0312501") + mean = float("-0.000608958") + std = float("0.00473088") + data = None + + +class Program_weight_tensor_parameter_205: + name = "parameter_205" + shape = [192] + dtype = "float32" + min_val = float("-0.172574") + max_val = float("0.00600923") + mean = float("-0.0634233") + std = float("0.0317358") + data = None + + +class Program_weight_tensor_parameter_206: + name = "parameter_206" + shape = [192] + dtype = "float32" + min_val = float("0.945305") + max_val = float("1.02981") + mean = float("0.988006") + std = float("0.0161849") + data = None + + +class Program_weight_tensor_parameter_207: + name = "parameter_207" + shape = [192] + dtype = "float32" + min_val = float("0.0098824") + max_val = float("0.0374918") + mean = float("0.0180389") + std = float("0.00587459") + data = None + + +class Program_weight_tensor_parameter_208: + name = "parameter_208" + shape = [192] + dtype = "float32" + min_val = float("-0.137861") + max_val = float("0.136049") + mean = float("-0.0236441") + std = float("0.0413456") + data = None + + +class Program_weight_tensor_parameter_209: + name = "parameter_209" + shape = [192, 192, 3, 3] + dtype = "float32" + min_val = float("-0.0481666") + max_val = float("0.0732784") + mean = float("-7.89008e-05") + std = float("0.00270519") + data = None + + +class Program_weight_tensor_parameter_210: + name = "parameter_210" + shape = [192] + dtype = "float32" + min_val = float("-0.210876") + max_val = float("-0.00262478") + mean = float("-0.0713724") + std = float("0.0343171") + data = None + + +class Program_weight_tensor_parameter_211: + name = "parameter_211" + shape = [192] + dtype = "float32" + min_val = float("0.939491") + max_val = float("1.14844") + mean = float("1.02838") + std = float("0.0422414") + data = None + + +class Program_weight_tensor_parameter_212: + name = "parameter_212" + shape = [192] + dtype = "float32" + min_val = float("0.0216612") + max_val = float("0.143707") + mean = float("0.0364187") + std = float("0.0115818") + data = None + + +class Program_weight_tensor_parameter_213: + name = "parameter_213" + shape = [192] + dtype = "float32" + min_val = float("-0.165667") + max_val = float("0.255057") + mean = float("-0.0420635") + std = float("0.0501881") + data = None + + +class Program_weight_tensor_parameter_214: + name = "parameter_214" + shape = [192, 192, 3, 3] + dtype = "float32" + min_val = float("-0.0665545") + max_val = float("0.0591627") + mean = float("-0.000107205") + std = float("0.00323341") + data = None + + +class Program_weight_tensor_parameter_215: + name = "parameter_215" + shape = [192] + dtype = "float32" + min_val = float("-0.190391") + max_val = float("-0.00890704") + mean = float("-0.0682439") + std = float("0.0307511") + data = None + + +class Program_weight_tensor_parameter_216: + name = "parameter_216" + shape = [192] + dtype = "float32" + min_val = float("0.944709") + max_val = float("1.04292") + mean = float("0.988002") + std = float("0.0133025") + data = None + + +class Program_weight_tensor_parameter_217: + name = "parameter_217" + shape = [192] + dtype = "float32" + min_val = float("0.00209797") + max_val = float("0.00987326") + mean = float("0.00353901") + std = float("0.000905426") + data = None + + +class Program_weight_tensor_parameter_218: + name = "parameter_218" + shape = [192] + dtype = "float32" + min_val = float("-0.0731497") + max_val = float("0.0378485") + mean = float("-0.0186648") + std = float("0.0166593") + data = None + + +class Program_weight_tensor_parameter_219: + name = "parameter_219" + shape = [192, 192, 1, 1] + dtype = "float32" + min_val = float("-0.0318287") + max_val = float("0.0416197") + mean = float("-0.000598784") + std = float("0.00486833") + data = None + + +class Program_weight_tensor_parameter_220: + name = "parameter_220" + shape = [192] + dtype = "float32" + min_val = float("-0.190391") + max_val = float("-0.00890704") + mean = float("-0.0682439") + std = float("0.0307511") + data = None + + +class Program_weight_tensor_parameter_221: + name = "parameter_221" + shape = [192] + dtype = "float32" + min_val = float("0.954247") + max_val = float("1.11148") + mean = float("1.00434") + std = float("0.0256726") + data = None + + +class Program_weight_tensor_parameter_222: + name = "parameter_222" + shape = [192] + dtype = "float32" + min_val = float("0.00638622") + max_val = float("0.0236275") + mean = float("0.0105406") + std = float("0.00293498") + data = None + + +class Program_weight_tensor_parameter_223: + name = "parameter_223" + shape = [192] + dtype = "float32" + min_val = float("-0.13966") + max_val = float("0.0690487") + mean = float("-0.0297218") + std = float("0.0308299") + data = None + + +class Program_weight_tensor_parameter_224: + name = "parameter_224" + shape = [192, 192, 3, 3] + dtype = "float32" + min_val = float("-0.0503067") + max_val = float("0.0760073") + mean = float("-0.000102199") + std = float("0.00271198") + data = None + + +class Program_weight_tensor_parameter_225: + name = "parameter_225" + shape = [192] + dtype = "float32" + min_val = float("-0.224111") + max_val = float("-0.0186564") + mean = float("-0.0909903") + std = float("0.0386362") + data = None + + +class Program_weight_tensor_parameter_226: + name = "parameter_226" + shape = [192] + dtype = "float32" + min_val = float("0.947988") + max_val = float("1.18957") + mean = float("1.02335") + std = float("0.0450781") + data = None + + +class Program_weight_tensor_parameter_227: + name = "parameter_227" + shape = [192] + dtype = "float32" + min_val = float("0.0202697") + max_val = float("0.0809053") + mean = float("0.0356586") + std = float("0.0113639") + data = None + + +class Program_weight_tensor_parameter_228: + name = "parameter_228" + shape = [192] + dtype = "float32" + min_val = float("-0.221703") + max_val = float("0.0932416") + mean = float("-0.056843") + std = float("0.0617265") + data = None + + +class Program_weight_tensor_parameter_229: + name = "parameter_229" + shape = [192, 192, 3, 3] + dtype = "float32" + min_val = float("-0.0627662") + max_val = float("0.0881393") + mean = float("-0.000127279") + std = float("0.0033596") + data = None + + +class Program_weight_tensor_parameter_230: + name = "parameter_230" + shape = [192] + dtype = "float32" + min_val = float("-0.149871") + max_val = float("-0.00319106") + mean = float("-0.065912") + std = float("0.0224973") + data = None + + +class Program_weight_tensor_parameter_231: + name = "parameter_231" + shape = [192] + dtype = "float32" + min_val = float("0.933681") + max_val = float("1.07034") + mean = float("0.998331") + std = float("0.0211287") + data = None + + +class Program_weight_tensor_parameter_232: + name = "parameter_232" + shape = [192] + dtype = "float32" + min_val = float("0.00185054") + max_val = float("0.00643037") + mean = float("0.00320099") + std = float("0.000870594") + data = None + + +class Program_weight_tensor_parameter_233: + name = "parameter_233" + shape = [192] + dtype = "float32" + min_val = float("-0.0635071") + max_val = float("0.0871591") + mean = float("-0.00971039") + std = float("0.0177047") + data = None + + +class Program_weight_tensor_parameter_234: + name = "parameter_234" + shape = [192, 192, 1, 1] + dtype = "float32" + min_val = float("-0.0294449") + max_val = float("0.042994") + mean = float("-0.000328625") + std = float("0.00548288") + data = None + + +class Program_weight_tensor_parameter_235: + name = "parameter_235" + shape = [192] + dtype = "float32" + min_val = float("-0.149871") + max_val = float("-0.00319107") + mean = float("-0.065912") + std = float("0.0224973") + data = None + + +class Program_weight_tensor_parameter_236: + name = "parameter_236" + shape = [192] + dtype = "float32" + min_val = float("0.937556") + max_val = float("1.11241") + mean = float("0.992723") + std = float("0.0250658") + data = None + + +class Program_weight_tensor_parameter_237: + name = "parameter_237" + shape = [192] + dtype = "float32" + min_val = float("0.0065578") + max_val = float("0.0277795") + mean = float("0.0119257") + std = float("0.0033028") + data = None + + +class Program_weight_tensor_parameter_238: + name = "parameter_238" + shape = [192] + dtype = "float32" + min_val = float("-0.17874") + max_val = float("0.0961657") + mean = float("-0.0321881") + std = float("0.0326858") + data = None + + +class Program_weight_tensor_parameter_239: + name = "parameter_239" + shape = [192, 192, 3, 3] + dtype = "float32" + min_val = float("-0.0381431") + max_val = float("0.0662821") + mean = float("-0.000127777") + std = float("0.00266586") + data = None + + +class Program_weight_tensor_parameter_240: + name = "parameter_240" + shape = [192] + dtype = "float32" + min_val = float("-0.28186") + max_val = float("0.0109399") + mean = float("-0.105907") + std = float("0.0387257") + data = None + + +class Program_weight_tensor_parameter_241: + name = "parameter_241" + shape = [192] + dtype = "float32" + min_val = float("0.943815") + max_val = float("1.2462") + mean = float("1.02556") + std = float("0.0410248") + data = None + + +class Program_weight_tensor_parameter_242: + name = "parameter_242" + shape = [192] + dtype = "float32" + min_val = float("0.00906398") + max_val = float("0.0423329") + mean = float("0.0164563") + std = float("0.00498075") + data = None + + +class Program_weight_tensor_parameter_243: + name = "parameter_243" + shape = [192] + dtype = "float32" + min_val = float("-0.218708") + max_val = float("0.107669") + mean = float("-0.0368758") + std = float("0.0439908") + data = None + + +class Program_weight_tensor_parameter_244: + name = "parameter_244" + shape = [192, 192, 3, 3] + dtype = "float32" + min_val = float("-0.053249") + max_val = float("0.0593346") + mean = float("-0.000147598") + std = float("0.00370186") + data = None + + +class Program_weight_tensor_parameter_245: + name = "parameter_245" + shape = [192] + dtype = "float32" + min_val = float("-0.249578") + max_val = float("-0.0169046") + mean = float("-0.117906") + std = float("0.0427768") + data = None + + +class Program_weight_tensor_parameter_246: + name = "parameter_246" + shape = [192] + dtype = "float32" + min_val = float("0.914307") + max_val = float("1.12922") + mean = float("1.02329") + std = float("0.0415191") + data = None + + +class Program_weight_tensor_parameter_247: + name = "parameter_247" + shape = [192] + dtype = "float32" + min_val = float("0.00415368") + max_val = float("0.0120239") + mean = float("0.00636188") + std = float("0.00146852") + data = None + + +class Program_weight_tensor_parameter_248: + name = "parameter_248" + shape = [192] + dtype = "float32" + min_val = float("-0.11797") + max_val = float("0.0659958") + mean = float("0.0108878") + std = float("0.0234366") + data = None + + +class Program_weight_tensor_parameter_249: + name = "parameter_249" + shape = [192, 896, 1, 1] + dtype = "float32" + min_val = float("-0.0596061") + max_val = float("0.0852154") + mean = float("-0.000166259") + std = float("0.00510582") + data = None + + +class Program_weight_tensor_parameter_250: + name = "parameter_250" + shape = [192] + dtype = "float32" + min_val = float("-0.174062") + max_val = float("0.20652") + mean = float("-0.00687458") + std = float("0.0495588") + data = None + + +class Program_weight_tensor_parameter_251: + name = "parameter_251" + shape = [192] + dtype = "float32" + min_val = float("0.949294") + max_val = float("1.21282") + mean = float("1.05382") + std = float("0.0493175") + data = None + + +class Program_weight_tensor_parameter_252: + name = "parameter_252" + shape = [192] + dtype = "float32" + min_val = float("0.00454854") + max_val = float("0.0401108") + mean = float("0.00759753") + std = float("0.00303154") + data = None + + +class Program_weight_tensor_parameter_253: + name = "parameter_253" + shape = [192] + dtype = "float32" + min_val = float("-0.0931261") + max_val = float("0.0411332") + mean = float("-0.00779903") + std = float("0.0242455") + data = None + + +class Program_weight_tensor_parameter_254: + name = "parameter_254" + shape = [192, 896, 1, 1] + dtype = "float32" + min_val = float("-0.0577965") + max_val = float("0.207752") + mean = float("-0.000163517") + std = float("0.00518159") + data = None + + +class Program_weight_tensor_parameter_255: + name = "parameter_255" + shape = [384] + dtype = "float32" + min_val = float("-0.243157") + max_val = float("-0.0545114") + mean = float("-0.120993") + std = float("0.0328659") + data = None + + +class Program_weight_tensor_parameter_256: + name = "parameter_256" + shape = [384] + dtype = "float32" + min_val = float("0.819523") + max_val = float("1.01252") + mean = float("0.911391") + std = float("0.0254671") + data = None + + +class Program_weight_tensor_parameter_257: + name = "parameter_257" + shape = [384] + dtype = "float32" + min_val = float("0.00653635") + max_val = float("0.0391007") + mean = float("0.0110587") + std = float("0.00365075") + data = None + + +class Program_weight_tensor_parameter_258: + name = "parameter_258" + shape = [384] + dtype = "float32" + min_val = float("-0.0841299") + max_val = float("0.0837342") + mean = float("-0.0268797") + std = float("0.0226601") + data = None + + +class Program_weight_tensor_parameter_259: + name = "parameter_259" + shape = [384, 768, 1, 1] + dtype = "float32" + min_val = float("-0.036508") + max_val = float("0.0461299") + mean = float("-0.000226844") + std = float("0.00407098") + data = None + + +class Program_weight_tensor_parameter_260: + name = "parameter_260" + shape = [768] + dtype = "float32" + min_val = float("-0.101546") + max_val = float("0.0664281") + mean = float("-0.0545292") + std = float("0.0146517") + data = None + + +class Program_weight_tensor_parameter_261: + name = "parameter_261" + shape = [768] + dtype = "float32" + min_val = float("0.953949") + max_val = float("1.13556") + mean = float("1.01973") + std = float("0.0205277") + data = None + + +class Program_weight_tensor_parameter_262: + name = "parameter_262" + shape = [768] + dtype = "float32" + min_val = float("0.00467604") + max_val = float("0.0298971") + mean = float("0.00760263") + std = float("0.00203123") + data = None + + +class Program_weight_tensor_parameter_263: + name = "parameter_263" + shape = [768] + dtype = "float32" + min_val = float("-0.0978559") + max_val = float("0.0937744") + mean = float("-0.0368353") + std = float("0.0221066") + data = None + + +class Program_weight_tensor_parameter_264: + name = "parameter_264" + shape = [768, 768, 1, 1] + dtype = "float32" + min_val = float("-0.0460146") + max_val = float("0.0867384") + mean = float("-0.000256297") + std = float("0.00341213") + data = None + + +class Program_weight_tensor_parameter_265: + name = "parameter_265" + shape = [384] + dtype = "float32" + min_val = float("-0.151408") + max_val = float("0.0694086") + mean = float("-0.0383627") + std = float("0.0200899") + data = None + + +class Program_weight_tensor_parameter_266: + name = "parameter_266" + shape = [384] + dtype = "float32" + min_val = float("0.891922") + max_val = float("1.06984") + mean = float("0.982304") + std = float("0.0127622") + data = None + + +class Program_weight_tensor_parameter_267: + name = "parameter_267" + shape = [384] + dtype = "float32" + min_val = float("0.00286638") + max_val = float("0.041628") + mean = float("0.00685663") + std = float("0.00334755") + data = None + + +class Program_weight_tensor_parameter_268: + name = "parameter_268" + shape = [384] + dtype = "float32" + min_val = float("-0.064554") + max_val = float("0.0514031") + mean = float("-0.00612239") + std = float("0.0154064") + data = None + + +class Program_weight_tensor_parameter_269: + name = "parameter_269" + shape = [384, 384, 1, 1] + dtype = "float32" + min_val = float("-0.0308451") + max_val = float("0.0532299") + mean = float("-7.96992e-05") + std = float("0.00291815") + data = None + + +class Program_weight_tensor_parameter_270: + name = "parameter_270" + shape = [384] + dtype = "float32" + min_val = float("-0.151408") + max_val = float("0.0694086") + mean = float("-0.0383627") + std = float("0.0200899") + data = None + + +class Program_weight_tensor_parameter_271: + name = "parameter_271" + shape = [384] + dtype = "float32" + min_val = float("0.886572") + max_val = float("1.07296") + mean = float("0.993806") + std = float("0.0118894") + data = None + + +class Program_weight_tensor_parameter_272: + name = "parameter_272" + shape = [384] + dtype = "float32" + min_val = float("0.0148773") + max_val = float("0.321349") + mean = float("0.0439823") + std = float("0.0223398") + data = None + + +class Program_weight_tensor_parameter_273: + name = "parameter_273" + shape = [384] + dtype = "float32" + min_val = float("-0.214635") + max_val = float("0.109773") + mean = float("-0.0660826") + std = float("0.0506459") + data = None + + +class Program_weight_tensor_parameter_274: + name = "parameter_274" + shape = [384, 384, 3, 3] + dtype = "float32" + min_val = float("-0.0333005") + max_val = float("0.041677") + mean = float("-0.00010749") + std = float("0.00109327") + data = None + + +class Program_weight_tensor_parameter_275: + name = "parameter_275" + shape = [384] + dtype = "float32" + min_val = float("-0.0780192") + max_val = float("0.112505") + mean = float("-0.0177938") + std = float("0.0153081") + data = None + + +class Program_weight_tensor_parameter_276: + name = "parameter_276" + shape = [384] + dtype = "float32" + min_val = float("0.91913") + max_val = float("1.16334") + mean = float("1.01408") + std = float("0.024304") + data = None + + +class Program_weight_tensor_parameter_277: + name = "parameter_277" + shape = [384] + dtype = "float32" + min_val = float("0.0103669") + max_val = float("0.128258") + mean = float("0.0322904") + std = float("0.013408") + data = None + + +class Program_weight_tensor_parameter_278: + name = "parameter_278" + shape = [384] + dtype = "float32" + min_val = float("-0.152271") + max_val = float("0.144351") + mean = float("-0.036936") + std = float("0.0458743") + data = None + + +class Program_weight_tensor_parameter_279: + name = "parameter_279" + shape = [384, 384, 3, 3] + dtype = "float32" + min_val = float("-0.022605") + max_val = float("0.036334") + mean = float("-6.19771e-05") + std = float("0.00142301") + data = None + + +class Program_weight_tensor_parameter_280: + name = "parameter_280" + shape = [384] + dtype = "float32" + min_val = float("-0.0692737") + max_val = float("0.020956") + mean = float("-0.022123") + std = float("0.012998") + data = None + + +class Program_weight_tensor_parameter_281: + name = "parameter_281" + shape = [384] + dtype = "float32" + min_val = float("0.946121") + max_val = float("1.16334") + mean = float("1.01367") + std = float("0.0267342") + data = None + + +class Program_weight_tensor_parameter_282: + name = "parameter_282" + shape = [384] + dtype = "float32" + min_val = float("0.038754") + max_val = float("0.265584") + mean = float("0.0992723") + std = float("0.0346033") + data = None + + +class Program_weight_tensor_parameter_283: + name = "parameter_283" + shape = [384] + dtype = "float32" + min_val = float("-1.36389") + max_val = float("1.31343") + mean = float("-0.0600774") + std = float("0.421027") + data = None + + +class Program_weight_tensor_parameter_284: + name = "parameter_284" + shape = [384, 1536, 1, 1] + dtype = "float32" + min_val = float("-0.0364844") + max_val = float("0.0476714") + mean = float("3.74628e-05") + std = float("0.00243743") + data = None + + +class Program_weight_tensor_parameter_285: + name = "parameter_285" + shape = [384] + dtype = "float32" + min_val = float("-0.0175083") + max_val = float("0.0242722") + mean = float("-0.00128546") + std = float("0.00651277") + data = None + + +class Program_weight_tensor_parameter_286: + name = "parameter_286" + shape = [384] + dtype = "float32" + min_val = float("0.969747") + max_val = float("1.05842") + mean = float("0.993825") + std = float("0.0119916") + data = None + + +class Program_weight_tensor_parameter_287: + name = "parameter_287" + shape = [384] + dtype = "float32" + min_val = float("0.00151688") + max_val = float("0.00760405") + mean = float("0.00337224") + std = float("0.000929367") + data = None + + +class Program_weight_tensor_parameter_288: + name = "parameter_288" + shape = [384] + dtype = "float32" + min_val = float("-0.082494") + max_val = float("0.0589318") + mean = float("-0.0285745") + std = float("0.0175221") + data = None + + +class Program_weight_tensor_parameter_289: + name = "parameter_289" + shape = [384, 384, 1, 1] + dtype = "float32" + min_val = float("-0.0288604") + max_val = float("0.0277987") + mean = float("-0.000359512") + std = float("0.00261882") + data = None + + +class Program_weight_tensor_parameter_290: + name = "parameter_290" + shape = [384] + dtype = "float32" + min_val = float("-0.0175083") + max_val = float("0.0242723") + mean = float("-0.00128546") + std = float("0.00651277") + data = None + + +class Program_weight_tensor_parameter_291: + name = "parameter_291" + shape = [384] + dtype = "float32" + min_val = float("0.972131") + max_val = float("1.08107") + mean = float("1.00345") + std = float("0.0176991") + data = None + + +class Program_weight_tensor_parameter_292: + name = "parameter_292" + shape = [384] + dtype = "float32" + min_val = float("0.00691705") + max_val = float("0.0471256") + mean = float("0.01716") + std = float("0.00618384") + data = None + + +class Program_weight_tensor_parameter_293: + name = "parameter_293" + shape = [384] + dtype = "float32" + min_val = float("-0.247766") + max_val = float("0.0730777") + mean = float("-0.0791956") + std = float("0.0415174") + data = None + + +class Program_weight_tensor_parameter_294: + name = "parameter_294" + shape = [384, 384, 3, 3] + dtype = "float32" + min_val = float("-0.0229517") + max_val = float("0.0484596") + mean = float("-0.000124174") + std = float("0.00111198") + data = None + + +class Program_weight_tensor_parameter_295: + name = "parameter_295" + shape = [384] + dtype = "float32" + min_val = float("-0.0465124") + max_val = float("0.00896559") + mean = float("-0.00777664") + std = float("0.00744783") + data = None + + +class Program_weight_tensor_parameter_296: + name = "parameter_296" + shape = [384] + dtype = "float32" + min_val = float("0.955738") + max_val = float("1.12697") + mean = float("1.01182") + std = float("0.0192506") + data = None + + +class Program_weight_tensor_parameter_297: + name = "parameter_297" + shape = [384] + dtype = "float32" + min_val = float("0.0314414") + max_val = float("0.14125") + mean = float("0.0746866") + std = float("0.0188475") + data = None + + +class Program_weight_tensor_parameter_298: + name = "parameter_298" + shape = [384] + dtype = "float32" + min_val = float("-0.812785") + max_val = float("0.611656") + mean = float("-0.193694") + std = float("0.17956") + data = None + + +class Program_weight_tensor_parameter_299: + name = "parameter_299" + shape = [384, 384, 3, 3] + dtype = "float32" + min_val = float("-0.0225096") + max_val = float("0.0381849") + mean = float("-0.000121796") + std = float("0.00132888") + data = None + + +class Program_weight_tensor_parameter_300: + name = "parameter_300" + shape = [384] + dtype = "float32" + min_val = float("-0.033777") + max_val = float("0.013876") + mean = float("-0.00696848") + std = float("0.00753703") + data = None + + +class Program_weight_tensor_parameter_301: + name = "parameter_301" + shape = [384] + dtype = "float32" + min_val = float("0.984352") + max_val = float("1.03279") + mean = float("0.999734") + std = float("0.00691496") + data = None + + +class Program_weight_tensor_parameter_302: + name = "parameter_302" + shape = [384] + dtype = "float32" + min_val = float("0.00111383") + max_val = float("0.00461813") + mean = float("0.00196932") + std = float("0.000512259") + data = None + + +class Program_weight_tensor_parameter_303: + name = "parameter_303" + shape = [384] + dtype = "float32" + min_val = float("-0.0572523") + max_val = float("0.106779") + mean = float("-0.0161018") + std = float("0.0194414") + data = None + + +class Program_weight_tensor_parameter_304: + name = "parameter_304" + shape = [384, 384, 1, 1] + dtype = "float32" + min_val = float("-0.0207385") + max_val = float("0.0285771") + mean = float("-0.000218194") + std = float("0.00228236") + data = None + + +class Program_weight_tensor_parameter_305: + name = "parameter_305" + shape = [384] + dtype = "float32" + min_val = float("-0.033777") + max_val = float("0.013876") + mean = float("-0.00696848") + std = float("0.00753703") + data = None + + +class Program_weight_tensor_parameter_306: + name = "parameter_306" + shape = [384] + dtype = "float32" + min_val = float("0.982265") + max_val = float("1.06494") + mean = float("1.00417") + std = float("0.0121376") + data = None + + +class Program_weight_tensor_parameter_307: + name = "parameter_307" + shape = [384] + dtype = "float32" + min_val = float("0.00461328") + max_val = float("0.0224104") + mean = float("0.0103707") + std = float("0.00337702") + data = None + + +class Program_weight_tensor_parameter_308: + name = "parameter_308" + shape = [384] + dtype = "float32" + min_val = float("-0.141128") + max_val = float("0.260715") + mean = float("-0.0555715") + std = float("0.0478601") + data = None + + +class Program_weight_tensor_parameter_309: + name = "parameter_309" + shape = [384, 384, 3, 3] + dtype = "float32" + min_val = float("-0.0123945") + max_val = float("0.0276121") + mean = float("-9.53489e-05") + std = float("0.000946776") + data = None + + +class Program_weight_tensor_parameter_310: + name = "parameter_310" + shape = [384] + dtype = "float32" + min_val = float("-0.0511097") + max_val = float("0.00384727") + mean = float("-0.0195031") + std = float("0.00834569") + data = None + + +class Program_weight_tensor_parameter_311: + name = "parameter_311" + shape = [384] + dtype = "float32" + min_val = float("0.976304") + max_val = float("1.08227") + mean = float("1.01125") + std = float("0.0153522") + data = None + + +class Program_weight_tensor_parameter_312: + name = "parameter_312" + shape = [384] + dtype = "float32" + min_val = float("0.00669036") + max_val = float("0.0269234") + mean = float("0.0118865") + std = float("0.00284745") + data = None + + +class Program_weight_tensor_parameter_313: + name = "parameter_313" + shape = [384] + dtype = "float32" + min_val = float("-0.11881") + max_val = float("0.12982") + mean = float("-0.0253082") + std = float("0.0293866") + data = None + + +class Program_weight_tensor_parameter_314: + name = "parameter_314" + shape = [384, 384, 3, 3] + dtype = "float32" + min_val = float("-0.0142638") + max_val = float("0.0240848") + mean = float("-4.77966e-05") + std = float("0.00130967") + data = None + + +class Program_weight_tensor_parameter_315: + name = "parameter_315" + shape = [384] + dtype = "float32" + min_val = float("-0.0675065") + max_val = float("0.0208505") + mean = float("-0.0317977") + std = float("0.0122018") + data = None + + +class Program_weight_tensor_parameter_316: + name = "parameter_316" + shape = [384] + dtype = "float32" + min_val = float("0.981929") + max_val = float("1.05544") + mean = float("1.01258") + std = float("0.0106512") + data = None + + +class Program_weight_tensor_parameter_317: + name = "parameter_317" + shape = [384] + dtype = "float32" + min_val = float("0.00777962") + max_val = float("0.0354149") + mean = float("0.0127707") + std = float("0.00298295") + data = None + + +class Program_weight_tensor_parameter_318: + name = "parameter_318" + shape = [384] + dtype = "float32" + min_val = float("-0.101238") + max_val = float("0.159996") + mean = float("-0.0341247") + std = float("0.0255105") + data = None + + +class Program_weight_tensor_parameter_319: + name = "parameter_319" + shape = [384, 1024, 1, 1] + dtype = "float32" + min_val = float("-0.0171645") + max_val = float("0.0375344") + mean = float("-0.000182758") + std = float("0.00258819") + data = None + + +class Program_weight_tensor_parameter_320: + name = "parameter_320" + shape = [384] + dtype = "float32" + min_val = float("-0.0229577") + max_val = float("0.0210528") + mean = float("5.72014e-05") + std = float("0.00781106") + data = None + + +class Program_weight_tensor_parameter_321: + name = "parameter_321" + shape = [384] + dtype = "float32" + min_val = float("0.992697") + max_val = float("1.08107") + mean = float("1.03953") + std = float("0.0135059") + data = None + + +class Program_weight_tensor_parameter_322: + name = "parameter_322" + shape = [384] + dtype = "float32" + min_val = float("0.0119054") + max_val = float("0.0249") + mean = float("0.0154694") + std = float("0.00189669") + data = None + + +class Program_weight_tensor_parameter_323: + name = "parameter_323" + shape = [384] + dtype = "float32" + min_val = float("-0.0851231") + max_val = float("0.0327932") + mean = float("-0.0414702") + std = float("0.0173") + data = None + + +class Program_weight_tensor_parameter_324: + name = "parameter_324" + shape = [384, 1024, 1, 1] + dtype = "float32" + min_val = float("-0.0427528") + max_val = float("0.0452984") + mean = float("-0.000216378") + std = float("0.00310206") + data = None + + +class Program_weight_tensor_parameter_325: + name = "parameter_325" + shape = [1024] + dtype = "float32" + min_val = float("-3.75857") + max_val = float("-0.733979") + mean = float("-2.18654") + std = float("0.428748") + data = None + + +class Program_weight_tensor_parameter_326: + name = "parameter_326" + shape = [1024] + dtype = "float32" + min_val = float("1.61878") + max_val = float("4.44113") + mean = float("3.08009") + std = float("0.254208") + data = None + + +class Program_weight_tensor_parameter_327: + name = "parameter_327" + shape = [1024] + dtype = "float32" + min_val = float("0.00282903") + max_val = float("0.0172792") + mean = float("0.0057324") + std = float("0.0014338") + data = None + + +class Program_weight_tensor_parameter_328: + name = "parameter_328" + shape = [1024] + dtype = "float32" + min_val = float("-0.0984391") + max_val = float("0.109499") + mean = float("-0.0431774") + std = float("0.0202153") + data = None + + +class Program_weight_tensor_parameter_329: + name = "parameter_329" + shape = [1024, 768, 1, 1] + dtype = "float32" + min_val = float("-0.0757279") + max_val = float("0.0957757") + mean = float("-0.000302785") + std = float("0.00326755") + data = None + + +class Program_weight_tensor_parameter_330: + name = "parameter_330" + shape = [768] + dtype = "float32" + min_val = float("-0.0164166") + max_val = float("0.00135737") + mean = float("-0.000806967") + std = float("0.00233613") + data = None + + +class Program_weight_tensor_parameter_331: + name = "parameter_331" + shape = [768, 768, 1, 1] + dtype = "float32" + min_val = float("-0.085699") + max_val = float("0.147523") + mean = float("-0.000293948") + std = float("0.00174542") + data = None + + +class Program_weight_tensor_parameter_332: + name = "parameter_332" + shape = [384] + dtype = "float32" + min_val = float("-1.77377") + max_val = float("0.311103") + mean = float("-0.311573") + std = float("0.290765") + data = None + + +class Program_weight_tensor_parameter_333: + name = "parameter_333" + shape = [384] + dtype = "float32" + min_val = float("0.188432") + max_val = float("1.81349") + mean = float("0.608742") + std = float("0.261608") + data = None + + +class Program_weight_tensor_parameter_334: + name = "parameter_334" + shape = [384] + dtype = "float32" + min_val = float("4.42942e-05") + max_val = float("0.00118799") + mean = float("0.000192041") + std = float("0.00011591") + data = None + + +class Program_weight_tensor_parameter_335: + name = "parameter_335" + shape = [384] + dtype = "float32" + min_val = float("-0.0900967") + max_val = float("0.0529868") + mean = float("0.0173526") + std = float("0.0155363") + data = None + + +class Program_weight_tensor_parameter_336: + name = "parameter_336" + shape = [384, 384, 1, 1] + dtype = "float32" + min_val = float("-0.0238567") + max_val = float("0.0253444") + mean = float("-0.00029369") + std = float("0.00235824") + data = None + + +class Program_weight_tensor_parameter_337: + name = "parameter_337" + shape = [384] + dtype = "float32" + min_val = float("-1.77377") + max_val = float("0.311103") + mean = float("-0.311573") + std = float("0.290765") + data = None + + +class Program_weight_tensor_parameter_338: + name = "parameter_338" + shape = [384] + dtype = "float32" + min_val = float("0.331669") + max_val = float("2.59613") + mean = float("1.02383") + std = float("0.289312") + data = None + + +class Program_weight_tensor_parameter_339: + name = "parameter_339" + shape = [384] + dtype = "float32" + min_val = float("0.000339636") + max_val = float("0.00420647") + mean = float("0.00115218") + std = float("0.000522931") + data = None + + +class Program_weight_tensor_parameter_340: + name = "parameter_340" + shape = [384] + dtype = "float32" + min_val = float("-0.185857") + max_val = float("0.0902937") + mean = float("0.017054") + std = float("0.0215204") + data = None + + +class Program_weight_tensor_parameter_341: + name = "parameter_341" + shape = [384, 384, 3, 3] + dtype = "float32" + min_val = float("-0.0184368") + max_val = float("0.0309487") + mean = float("-3.74719e-05") + std = float("0.00148404") + data = None + + +class Program_weight_tensor_parameter_342: + name = "parameter_342" + shape = [384] + dtype = "float32" + min_val = float("-2.58063") + max_val = float("0.0348998") + mean = float("-1.56714") + std = float("0.41632") + data = None + + +class Program_weight_tensor_parameter_343: + name = "parameter_343" + shape = [384] + dtype = "float32" + min_val = float("0.524143") + max_val = float("1.6456") + mean = float("1.13585") + std = float("0.149435") + data = None + + +class Program_weight_tensor_parameter_344: + name = "parameter_344" + shape = [384] + dtype = "float32" + min_val = float("0.0283643") + max_val = float("0.118305") + mean = float("0.0577061") + std = float("0.0133887") + data = None + + +class Program_weight_tensor_parameter_345: + name = "parameter_345" + shape = [384] + dtype = "float32" + min_val = float("-0.790455") + max_val = float("0.351067") + mean = float("-0.224794") + std = float("0.104268") + data = None + + +class Program_weight_tensor_parameter_346: + name = "parameter_346" + shape = [384, 384, 3, 3] + dtype = "float32" + min_val = float("-0.0295918") + max_val = float("0.0535949") + mean = float("-0.000158566") + std = float("0.00196587") + data = None + + +class Program_weight_tensor_parameter_347: + name = "parameter_347" + shape = [384] + dtype = "float32" + min_val = float("-1.9389") + max_val = float("0.63964") + mean = float("-0.575116") + std = float("0.358224") + data = None + + +class Program_weight_tensor_parameter_348: + name = "parameter_348" + shape = [384] + dtype = "float32" + min_val = float("0.164447") + max_val = float("2.06557") + mean = float("0.56162") + std = float("0.227236") + data = None + + +class Program_weight_tensor_parameter_349: + name = "parameter_349" + shape = [384] + dtype = "float32" + min_val = float("7.09489e-05") + max_val = float("0.00126223") + mean = float("0.000274291") + std = float("0.000141013") + data = None + + +class Program_weight_tensor_parameter_350: + name = "parameter_350" + shape = [384] + dtype = "float32" + min_val = float("-0.0343246") + max_val = float("0.0610397") + mean = float("0.0169236") + std = float("0.012518") + data = None + + +class Program_weight_tensor_parameter_351: + name = "parameter_351" + shape = [384, 384, 1, 1] + dtype = "float32" + min_val = float("-0.0210778") + max_val = float("0.0255864") + mean = float("-0.0003161") + std = float("0.00224406") + data = None + + +class Program_weight_tensor_parameter_352: + name = "parameter_352" + shape = [384] + dtype = "float32" + min_val = float("-1.9389") + max_val = float("0.63964") + mean = float("-0.575116") + std = float("0.358224") + data = None + + +class Program_weight_tensor_parameter_353: + name = "parameter_353" + shape = [384] + dtype = "float32" + min_val = float("0.580775") + max_val = float("2.15618") + mean = float("1.08268") + std = float("0.256056") + data = None + + +class Program_weight_tensor_parameter_354: + name = "parameter_354" + shape = [384] + dtype = "float32" + min_val = float("0.000843902") + max_val = float("0.00484606") + mean = float("0.00177336") + std = float("0.0005744") + data = None + + +class Program_weight_tensor_parameter_355: + name = "parameter_355" + shape = [384] + dtype = "float32" + min_val = float("-0.0632911") + max_val = float("0.0897295") + mean = float("0.0226457") + std = float("0.0198268") + data = None + + +class Program_weight_tensor_parameter_356: + name = "parameter_356" + shape = [384, 384, 3, 3] + dtype = "float32" + min_val = float("-0.0186829") + max_val = float("0.0311627") + mean = float("-6.31943e-05") + std = float("0.00157415") + data = None + + +class Program_weight_tensor_parameter_357: + name = "parameter_357" + shape = [384] + dtype = "float32" + min_val = float("-2.39583") + max_val = float("0.846047") + mean = float("-1.40377") + std = float("0.360818") + data = None + + +class Program_weight_tensor_parameter_358: + name = "parameter_358" + shape = [384] + dtype = "float32" + min_val = float("0.460879") + max_val = float("1.91751") + mean = float("1.16673") + std = float("0.148212") + data = None + + +class Program_weight_tensor_parameter_359: + name = "parameter_359" + shape = [384] + dtype = "float32" + min_val = float("0.023841") + max_val = float("0.0718949") + mean = float("0.0364854") + std = float("0.00756489") + data = None + + +class Program_weight_tensor_parameter_360: + name = "parameter_360" + shape = [384] + dtype = "float32" + min_val = float("-0.608691") + max_val = float("0.726329") + mean = float("-0.148658") + std = float("0.084412") + data = None + + +class Program_weight_tensor_parameter_361: + name = "parameter_361" + shape = [384, 384, 3, 3] + dtype = "float32" + min_val = float("-0.0202641") + max_val = float("0.040492") + mean = float("-0.000153374") + std = float("0.00197749") + data = None + + +class Program_weight_tensor_parameter_362: + name = "parameter_362" + shape = [384] + dtype = "float32" + min_val = float("-1.87571") + max_val = float("0.45144") + mean = float("-0.485654") + std = float("0.375942") + data = None + + +class Program_weight_tensor_parameter_363: + name = "parameter_363" + shape = [384] + dtype = "float32" + min_val = float("0.0770862") + max_val = float("2.11923") + mean = float("0.441717") + std = float("0.217735") + data = None + + +class Program_weight_tensor_parameter_364: + name = "parameter_364" + shape = [384] + dtype = "float32" + min_val = float("5.43266e-05") + max_val = float("0.00177058") + mean = float("0.000327251") + std = float("0.000183281") + data = None + + +class Program_weight_tensor_parameter_365: + name = "parameter_365" + shape = [384] + dtype = "float32" + min_val = float("-0.0347425") + max_val = float("0.0612964") + mean = float("0.0215239") + std = float("0.0145228") + data = None + + +class Program_weight_tensor_parameter_366: + name = "parameter_366" + shape = [384, 384, 1, 1] + dtype = "float32" + min_val = float("-0.0144415") + max_val = float("0.0263401") + mean = float("-0.000420787") + std = float("0.00193921") + data = None + + +class Program_weight_tensor_parameter_367: + name = "parameter_367" + shape = [384] + dtype = "float32" + min_val = float("-1.87571") + max_val = float("0.45144") + mean = float("-0.485654") + std = float("0.375942") + data = None + + +class Program_weight_tensor_parameter_368: + name = "parameter_368" + shape = [384] + dtype = "float32" + min_val = float("0.517586") + max_val = float("2.22534") + mean = float("1.05167") + std = float("0.260686") + data = None + + +class Program_weight_tensor_parameter_369: + name = "parameter_369" + shape = [384] + dtype = "float32" + min_val = float("0.000996551") + max_val = float("0.0053321") + mean = float("0.00235186") + std = float("0.00069961") + data = None + + +class Program_weight_tensor_parameter_370: + name = "parameter_370" + shape = [384] + dtype = "float32" + min_val = float("-0.154082") + max_val = float("0.0977398") + mean = float("0.0285175") + std = float("0.0234466") + data = None + + +class Program_weight_tensor_parameter_371: + name = "parameter_371" + shape = [384, 384, 3, 3] + dtype = "float32" + min_val = float("-0.0199711") + max_val = float("0.0376546") + mean = float("-6.76069e-05") + std = float("0.00166261") + data = None + + +class Program_weight_tensor_parameter_372: + name = "parameter_372" + shape = [384] + dtype = "float32" + min_val = float("-2.15499") + max_val = float("0.421036") + mean = float("-1.36535") + std = float("0.277727") + data = None + + +class Program_weight_tensor_parameter_373: + name = "parameter_373" + shape = [384] + dtype = "float32" + min_val = float("0.711873") + max_val = float("1.63726") + mean = float("1.14347") + std = float("0.101775") + data = None + + +class Program_weight_tensor_parameter_374: + name = "parameter_374" + shape = [384] + dtype = "float32" + min_val = float("0.0152961") + max_val = float("0.0674481") + mean = float("0.0272479") + std = float("0.00732693") + data = None + + +class Program_weight_tensor_parameter_375: + name = "parameter_375" + shape = [384] + dtype = "float32" + min_val = float("-0.599745") + max_val = float("0.187363") + mean = float("-0.104299") + std = float("0.0685149") + data = None + + +class Program_weight_tensor_parameter_376: + name = "parameter_376" + shape = [384, 384, 3, 3] + dtype = "float32" + min_val = float("-0.0258655") + max_val = float("0.0433082") + mean = float("-0.000125913") + std = float("0.00187128") + data = None + + +class Program_weight_tensor_parameter_377: + name = "parameter_377" + shape = [384] + dtype = "float32" + min_val = float("-2.92168") + max_val = float("1.65848") + mean = float("-0.759508") + std = float("0.64292") + data = None + + +class Program_weight_tensor_parameter_378: + name = "parameter_378" + shape = [384] + dtype = "float32" + min_val = float("0.951267") + max_val = float("2.91922") + mean = float("1.86371") + std = float("0.276283") + data = None + + +class Program_weight_tensor_parameter_379: + name = "parameter_379" + shape = [384] + dtype = "float32" + min_val = float("0.00163804") + max_val = float("0.00628804") + mean = float("0.0030995") + std = float("0.000676972") + data = None + + +class Program_weight_tensor_parameter_380: + name = "parameter_380" + shape = [384] + dtype = "float32" + min_val = float("-0.203596") + max_val = float("0.107816") + mean = float("0.0468359") + std = float("0.0244462") + data = None + + +class Program_weight_tensor_parameter_381: + name = "parameter_381" + shape = [384, 768, 1, 1] + dtype = "float32" + min_val = float("-0.045466") + max_val = float("0.0356856") + mean = float("-0.000552717") + std = float("0.00430629") + data = None + + +class Program_weight_tensor_parameter_382: + name = "parameter_382" + shape = [384] + dtype = "float32" + min_val = float("-2.2468") + max_val = float("0.680328") + mean = float("-0.776828") + std = float("0.472723") + data = None + + +class Program_weight_tensor_parameter_383: + name = "parameter_383" + shape = [384] + dtype = "float32" + min_val = float("0.963785") + max_val = float("2.893") + mean = float("2.09663") + std = float("0.305598") + data = None + + +class Program_weight_tensor_parameter_384: + name = "parameter_384" + shape = [384] + dtype = "float32" + min_val = float("0.000409017") + max_val = float("0.00497771") + mean = float("0.00101325") + std = float("0.000291463") + data = None + + +class Program_weight_tensor_parameter_385: + name = "parameter_385" + shape = [384] + dtype = "float32" + min_val = float("-0.0495182") + max_val = float("0.0676708") + mean = float("0.0213109") + std = float("0.0122201") + data = None + + +class Program_weight_tensor_parameter_386: + name = "parameter_386" + shape = [384, 768, 1, 1] + dtype = "float32" + min_val = float("-0.145268") + max_val = float("0.0573858") + mean = float("-0.000240528") + std = float("0.00299925") + data = None + + +class Program_weight_tensor_parameter_387: + name = "parameter_387" + shape = [768] + dtype = "float32" + min_val = float("-2.40179") + max_val = float("0.641282") + mean = float("-0.907379") + std = float("0.339186") + data = None + + +class Program_weight_tensor_parameter_388: + name = "parameter_388" + shape = [768] + dtype = "float32" + min_val = float("0.528614") + max_val = float("1.90849") + mean = float("0.919765") + std = float("0.149009") + data = None + + +class Program_weight_tensor_parameter_389: + name = "parameter_389" + shape = [768] + dtype = "float32" + min_val = float("0.00512452") + max_val = float("0.0388909") + mean = float("0.00876681") + std = float("0.00254869") + data = None + + +class Program_weight_tensor_parameter_390: + name = "parameter_390" + shape = [768] + dtype = "float32" + min_val = float("-0.181132") + max_val = float("0.160598") + mean = float("0.0272393") + std = float("0.0394703") + data = None + + +class Program_weight_tensor_parameter_391: + name = "parameter_391" + shape = [768, 512, 3, 3] + dtype = "float32" + min_val = float("-0.0589591") + max_val = float("0.0443489") + mean = float("-7.0658e-05") + std = float("0.00198784") + data = None + + +class Program_weight_tensor_parameter_392: + name = "parameter_392" + shape = [512] + dtype = "float32" + min_val = float("-3.39016") + max_val = float("1.66528") + mean = float("-1.16035") + std = float("0.513188") + data = None + + +class Program_weight_tensor_parameter_393: + name = "parameter_393" + shape = [512] + dtype = "float32" + min_val = float("0.522279") + max_val = float("1.67183") + mean = float("1.10945") + std = float("0.147787") + data = None + + +class Program_weight_tensor_parameter_394: + name = "parameter_394" + shape = [512] + dtype = "float32" + min_val = float("0.00103462") + max_val = float("0.00758723") + mean = float("0.00356649") + std = float("0.000818923") + data = None + + +class Program_weight_tensor_parameter_395: + name = "parameter_395" + shape = [512] + dtype = "float32" + min_val = float("-0.109764") + max_val = float("0.0736659") + mean = float("-0.0369159") + std = float("0.0292337") + data = None + + +class Program_weight_tensor_parameter_396: + name = "parameter_396" + shape = [512, 384, 1, 1] + dtype = "float32" + min_val = float("-0.330093") + max_val = float("0.172505") + mean = float("-0.000458776") + std = float("0.00660531") + data = None + + +class Program_weight_tensor_parameter_397: + name = "parameter_397" + shape = [384] + dtype = "float32" + min_val = float("-0.0107955") + max_val = float("0.000905586") + mean = float("-0.00310322") + std = float("0.00226709") + data = None + + +class Program_weight_tensor_parameter_398: + name = "parameter_398" + shape = [384, 384, 1, 1] + dtype = "float32" + min_val = float("-0.223299") + max_val = float("0.204565") + mean = float("-0.00218247") + std = float("0.00488398") + data = None + + +class Program_weight_tensor_parameter_399: + name = "parameter_399" + shape = [192] + dtype = "float32" + min_val = float("-1.97347") + max_val = float("0.401125") + mean = float("-0.35017") + std = float("0.333063") + data = None + + +class Program_weight_tensor_parameter_400: + name = "parameter_400" + shape = [192] + dtype = "float32" + min_val = float("0.0526394") + max_val = float("2.15499") + mean = float("0.579605") + std = float("0.41794") + data = None + + +class Program_weight_tensor_parameter_401: + name = "parameter_401" + shape = [192] + dtype = "float32" + min_val = float("6.09083e-05") + max_val = float("0.00111335") + mean = float("0.000343106") + std = float("0.000170676") + data = None + + +class Program_weight_tensor_parameter_402: + name = "parameter_402" + shape = [192] + dtype = "float32" + min_val = float("-0.0270574") + max_val = float("0.0425926") + mean = float("0.00396761") + std = float("0.011207") + data = None + + +class Program_weight_tensor_parameter_403: + name = "parameter_403" + shape = [192, 192, 1, 1] + dtype = "float32" + min_val = float("-0.0211288") + max_val = float("0.057486") + mean = float("-0.000295079") + std = float("0.00359086") + data = None + + +class Program_weight_tensor_parameter_404: + name = "parameter_404" + shape = [192] + dtype = "float32" + min_val = float("-1.97347") + max_val = float("0.401125") + mean = float("-0.35017") + std = float("0.333063") + data = None + + +class Program_weight_tensor_parameter_405: + name = "parameter_405" + shape = [192] + dtype = "float32" + min_val = float("0.371516") + max_val = float("2.68793") + mean = float("1.19957") + std = float("0.49139") + data = None + + +class Program_weight_tensor_parameter_406: + name = "parameter_406" + shape = [192] + dtype = "float32" + min_val = float("0.000794724") + max_val = float("0.00962854") + mean = float("0.00291015") + std = float("0.00111223") + data = None + + +class Program_weight_tensor_parameter_407: + name = "parameter_407" + shape = [192] + dtype = "float32" + min_val = float("-0.0718468") + max_val = float("0.0984716") + mean = float("0.0129269") + std = float("0.0281783") + data = None + + +class Program_weight_tensor_parameter_408: + name = "parameter_408" + shape = [192, 192, 3, 3] + dtype = "float32" + min_val = float("-0.0267308") + max_val = float("0.0379135") + mean = float("-0.000114666") + std = float("0.00269441") + data = None + + +class Program_weight_tensor_parameter_409: + name = "parameter_409" + shape = [192] + dtype = "float32" + min_val = float("-2.88942") + max_val = float("-0.180578") + mean = float("-1.31266") + std = float("0.40108") + data = None + + +class Program_weight_tensor_parameter_410: + name = "parameter_410" + shape = [192] + dtype = "float32" + min_val = float("0.693744") + max_val = float("2.09657") + mean = float("1.17992") + std = float("0.170419") + data = None + + +class Program_weight_tensor_parameter_411: + name = "parameter_411" + shape = [192] + dtype = "float32" + min_val = float("0.0397823") + max_val = float("0.20648") + mean = float("0.0832138") + std = float("0.0266194") + data = None + + +class Program_weight_tensor_parameter_412: + name = "parameter_412" + shape = [192] + dtype = "float32" + min_val = float("-1.78945") + max_val = float("1.40064") + mean = float("-0.16076") + std = float("0.278992") + data = None + + +class Program_weight_tensor_parameter_413: + name = "parameter_413" + shape = [192, 192, 3, 3] + dtype = "float32" + min_val = float("-0.0375591") + max_val = float("0.0441631") + mean = float("-0.000156238") + std = float("0.003236") + data = None + + +class Program_weight_tensor_parameter_414: + name = "parameter_414" + shape = [192] + dtype = "float32" + min_val = float("-1.93949") + max_val = float("0.50554") + mean = float("-0.2803") + std = float("0.320371") + data = None + + +class Program_weight_tensor_parameter_415: + name = "parameter_415" + shape = [192] + dtype = "float32" + min_val = float("0.0469211") + max_val = float("1.77019") + mean = float("0.443418") + std = float("0.305724") + data = None + + +class Program_weight_tensor_parameter_416: + name = "parameter_416" + shape = [192] + dtype = "float32" + min_val = float("5.97514e-05") + max_val = float("0.00154188") + mean = float("0.000361428") + std = float("0.000233247") + data = None + + +class Program_weight_tensor_parameter_417: + name = "parameter_417" + shape = [192] + dtype = "float32" + min_val = float("-0.0317628") + max_val = float("0.0419369") + mean = float("0.00775383") + std = float("0.0105125") + data = None + + +class Program_weight_tensor_parameter_418: + name = "parameter_418" + shape = [192, 192, 1, 1] + dtype = "float32" + min_val = float("-0.0232224") + max_val = float("0.0339654") + mean = float("-0.000371148") + std = float("0.00342873") + data = None + + +class Program_weight_tensor_parameter_419: + name = "parameter_419" + shape = [192] + dtype = "float32" + min_val = float("-1.93949") + max_val = float("0.50554") + mean = float("-0.2803") + std = float("0.320371") + data = None + + +class Program_weight_tensor_parameter_420: + name = "parameter_420" + shape = [192] + dtype = "float32" + min_val = float("0.485243") + max_val = float("2.27064") + mean = float("1.13745") + std = float("0.375246") + data = None + + +class Program_weight_tensor_parameter_421: + name = "parameter_421" + shape = [192] + dtype = "float32" + min_val = float("0.00155942") + max_val = float("0.00633988") + mean = float("0.00340888") + std = float("0.000888715") + data = None + + +class Program_weight_tensor_parameter_422: + name = "parameter_422" + shape = [192] + dtype = "float32" + min_val = float("-0.059897") + max_val = float("0.0791163") + mean = float("0.0257864") + std = float("0.0241054") + data = None + + +class Program_weight_tensor_parameter_423: + name = "parameter_423" + shape = [192, 192, 3, 3] + dtype = "float32" + min_val = float("-0.021281") + max_val = float("0.031861") + mean = float("-0.000147834") + std = float("0.00288432") + data = None + + +class Program_weight_tensor_parameter_424: + name = "parameter_424" + shape = [192] + dtype = "float32" + min_val = float("-2.50671") + max_val = float("-0.125499") + mean = float("-1.2865") + std = float("0.444025") + data = None + + +class Program_weight_tensor_parameter_425: + name = "parameter_425" + shape = [192] + dtype = "float32" + min_val = float("0.654125") + max_val = float("1.67481") + mean = float("1.20035") + std = float("0.166495") + data = None + + +class Program_weight_tensor_parameter_426: + name = "parameter_426" + shape = [192] + dtype = "float32" + min_val = float("0.0320525") + max_val = float("0.122194") + mean = float("0.0551534") + std = float("0.015108") + data = None + + +class Program_weight_tensor_parameter_427: + name = "parameter_427" + shape = [192] + dtype = "float32" + min_val = float("-1.74443") + max_val = float("0.268407") + mean = float("-0.0634487") + std = float("0.184172") + data = None + + +class Program_weight_tensor_parameter_428: + name = "parameter_428" + shape = [192, 192, 3, 3] + dtype = "float32" + min_val = float("-0.0357247") + max_val = float("0.0446181") + mean = float("-0.000190407") + std = float("0.0033489") + data = None + + +class Program_weight_tensor_parameter_429: + name = "parameter_429" + shape = [192] + dtype = "float32" + min_val = float("-1.75648") + max_val = float("0.462185") + mean = float("-0.263008") + std = float("0.334666") + data = None + + +class Program_weight_tensor_parameter_430: + name = "parameter_430" + shape = [192] + dtype = "float32" + min_val = float("0.00362431") + max_val = float("1.67958") + mean = float("0.351223") + std = float("0.251845") + data = None + + +class Program_weight_tensor_parameter_431: + name = "parameter_431" + shape = [192] + dtype = "float32" + min_val = float("1.06024e-06") + max_val = float("0.00195092") + mean = float("0.000339658") + std = float("0.000257414") + data = None + + +class Program_weight_tensor_parameter_432: + name = "parameter_432" + shape = [192] + dtype = "float32" + min_val = float("-0.0316145") + max_val = float("0.0422595") + mean = float("0.0098318") + std = float("0.0102434") + data = None + + +class Program_weight_tensor_parameter_433: + name = "parameter_433" + shape = [192, 192, 1, 1] + dtype = "float32" + min_val = float("-0.0332344") + max_val = float("0.0281833") + mean = float("-0.00042353") + std = float("0.00328104") + data = None + + +class Program_weight_tensor_parameter_434: + name = "parameter_434" + shape = [192] + dtype = "float32" + min_val = float("-1.75648") + max_val = float("0.462186") + mean = float("-0.263008") + std = float("0.334666") + data = None + + +class Program_weight_tensor_parameter_435: + name = "parameter_435" + shape = [192] + dtype = "float32" + min_val = float("0.40635") + max_val = float("1.97932") + mean = float("1.06547") + std = float("0.334289") + data = None + + +class Program_weight_tensor_parameter_436: + name = "parameter_436" + shape = [192] + dtype = "float32" + min_val = float("0.00153847") + max_val = float("0.00863182") + mean = float("0.00370828") + std = float("0.001042") + data = None + + +class Program_weight_tensor_parameter_437: + name = "parameter_437" + shape = [192] + dtype = "float32" + min_val = float("-0.0309003") + max_val = float("0.0776153") + mean = float("0.0263704") + std = float("0.019024") + data = None + + +class Program_weight_tensor_parameter_438: + name = "parameter_438" + shape = [192, 192, 3, 3] + dtype = "float32" + min_val = float("-0.0284898") + max_val = float("0.0384781") + mean = float("-0.000143667") + std = float("0.00297951") + data = None + + +class Program_weight_tensor_parameter_439: + name = "parameter_439" + shape = [192] + dtype = "float32" + min_val = float("-2.49598") + max_val = float("0.137446") + mean = float("-1.24074") + std = float("0.42434") + data = None + + +class Program_weight_tensor_parameter_440: + name = "parameter_440" + shape = [192] + dtype = "float32" + min_val = float("0.655528") + max_val = float("1.81322") + mean = float("1.16819") + std = float("0.165752") + data = None + + +class Program_weight_tensor_parameter_441: + name = "parameter_441" + shape = [192] + dtype = "float32" + min_val = float("0.0187082") + max_val = float("0.0742778") + mean = float("0.0367084") + std = float("0.00881254") + data = None + + +class Program_weight_tensor_parameter_442: + name = "parameter_442" + shape = [192] + dtype = "float32" + min_val = float("-1.21444") + max_val = float("0.264136") + mean = float("-0.0373475") + std = float("0.1279") + data = None + + +class Program_weight_tensor_parameter_443: + name = "parameter_443" + shape = [192, 192, 3, 3] + dtype = "float32" + min_val = float("-0.0372317") + max_val = float("0.0498756") + mean = float("-0.000196987") + std = float("0.00340361") + data = None + + +class Program_weight_tensor_parameter_444: + name = "parameter_444" + shape = [192] + dtype = "float32" + min_val = float("-2.07869") + max_val = float("0.524922") + mean = float("-0.272756") + std = float("0.374215") + data = None + + +class Program_weight_tensor_parameter_445: + name = "parameter_445" + shape = [192] + dtype = "float32" + min_val = float("0.000538036") + max_val = float("0.731893") + mean = float("0.211296") + std = float("0.136047") + data = None + + +class Program_weight_tensor_parameter_446: + name = "parameter_446" + shape = [192] + dtype = "float32" + min_val = float("6.11339e-08") + max_val = float("0.000771449") + mean = float("0.000195393") + std = float("0.000113573") + data = None + + +class Program_weight_tensor_parameter_447: + name = "parameter_447" + shape = [192] + dtype = "float32" + min_val = float("-0.0245402") + max_val = float("0.0291137") + mean = float("0.00657376") + std = float("0.00853559") + data = None + + +class Program_weight_tensor_parameter_448: + name = "parameter_448" + shape = [192, 192, 1, 1] + dtype = "float32" + min_val = float("-0.0222475") + max_val = float("0.0284342") + mean = float("-0.000272233") + std = float("0.00290082") + data = None + + +class Program_weight_tensor_parameter_449: + name = "parameter_449" + shape = [192] + dtype = "float32" + min_val = float("-2.07869") + max_val = float("0.524922") + mean = float("-0.272756") + std = float("0.374215") + data = None + + +class Program_weight_tensor_parameter_450: + name = "parameter_450" + shape = [192] + dtype = "float32" + min_val = float("0.395056") + max_val = float("1.96334") + mean = float("0.958921") + std = float("0.303885") + data = None + + +class Program_weight_tensor_parameter_451: + name = "parameter_451" + shape = [192] + dtype = "float32" + min_val = float("0.00160291") + max_val = float("0.0081802") + mean = float("0.00366326") + std = float("0.00108829") + data = None + + +class Program_weight_tensor_parameter_452: + name = "parameter_452" + shape = [192] + dtype = "float32" + min_val = float("-0.0313156") + max_val = float("0.0952543") + mean = float("0.0346585") + std = float("0.0239282") + data = None + + +class Program_weight_tensor_parameter_453: + name = "parameter_453" + shape = [192, 192, 3, 3] + dtype = "float32" + min_val = float("-0.0299429") + max_val = float("0.0330693") + mean = float("-0.000173594") + std = float("0.00307091") + data = None + + +class Program_weight_tensor_parameter_454: + name = "parameter_454" + shape = [192] + dtype = "float32" + min_val = float("-2.7392") + max_val = float("-0.0809957") + mean = float("-1.23445") + std = float("0.43409") + data = None + + +class Program_weight_tensor_parameter_455: + name = "parameter_455" + shape = [192] + dtype = "float32" + min_val = float("0.761401") + max_val = float("1.62235") + mean = float("1.15196") + std = float("0.143019") + data = None + + +class Program_weight_tensor_parameter_456: + name = "parameter_456" + shape = [192] + dtype = "float32" + min_val = float("0.0164018") + max_val = float("0.0482247") + mean = float("0.0267568") + std = float("0.00582182") + data = None + + +class Program_weight_tensor_parameter_457: + name = "parameter_457" + shape = [192] + dtype = "float32" + min_val = float("-0.982911") + max_val = float("0.208931") + mean = float("-0.0475525") + std = float("0.111443") + data = None + + +class Program_weight_tensor_parameter_458: + name = "parameter_458" + shape = [192, 192, 3, 3] + dtype = "float32" + min_val = float("-0.0516537") + max_val = float("0.0532344") + mean = float("-0.000217094") + std = float("0.00339434") + data = None + + +class Program_weight_tensor_parameter_459: + name = "parameter_459" + shape = [192] + dtype = "float32" + min_val = float("-1.21222") + max_val = float("0.442158") + mean = float("-0.232609") + std = float("0.338112") + data = None + + +class Program_weight_tensor_parameter_460: + name = "parameter_460" + shape = [192] + dtype = "float32" + min_val = float("-0.000141087") + max_val = float("0.675271") + mean = float("0.191502") + std = float("0.12054") + data = None + + +class Program_weight_tensor_parameter_461: + name = "parameter_461" + shape = [192] + dtype = "float32" + min_val = float("2.02891e-10") + max_val = float("0.000659419") + mean = float("0.00021025") + std = float("0.000127842") + data = None + + +class Program_weight_tensor_parameter_462: + name = "parameter_462" + shape = [192] + dtype = "float32" + min_val = float("-0.0400934") + max_val = float("0.0347371") + mean = float("0.00629852") + std = float("0.0100068") + data = None + + +class Program_weight_tensor_parameter_463: + name = "parameter_463" + shape = [192, 192, 1, 1] + dtype = "float32" + min_val = float("-0.0361348") + max_val = float("0.0383275") + mean = float("-0.000250241") + std = float("0.00300167") + data = None + + +class Program_weight_tensor_parameter_464: + name = "parameter_464" + shape = [192] + dtype = "float32" + min_val = float("-1.21222") + max_val = float("0.442158") + mean = float("-0.232609") + std = float("0.338112") + data = None + + +class Program_weight_tensor_parameter_465: + name = "parameter_465" + shape = [192] + dtype = "float32" + min_val = float("0.38394") + max_val = float("1.56692") + mean = float("0.852564") + std = float("0.260405") + data = None + + +class Program_weight_tensor_parameter_466: + name = "parameter_466" + shape = [192] + dtype = "float32" + min_val = float("0.00141501") + max_val = float("0.00697749") + mean = float("0.00385611") + std = float("0.00104996") + data = None + + +class Program_weight_tensor_parameter_467: + name = "parameter_467" + shape = [192] + dtype = "float32" + min_val = float("-0.0684229") + max_val = float("0.0967151") + mean = float("0.0289781") + std = float("0.0217573") + data = None + + +class Program_weight_tensor_parameter_468: + name = "parameter_468" + shape = [192, 192, 3, 3] + dtype = "float32" + min_val = float("-0.0302635") + max_val = float("0.03357") + mean = float("-0.000135225") + std = float("0.00306898") + data = None + + +class Program_weight_tensor_parameter_469: + name = "parameter_469" + shape = [192] + dtype = "float32" + min_val = float("-2.48525") + max_val = float("-0.133212") + mean = float("-1.24772") + std = float("0.418315") + data = None + + +class Program_weight_tensor_parameter_470: + name = "parameter_470" + shape = [192] + dtype = "float32" + min_val = float("0.687779") + max_val = float("1.52042") + mean = float("1.12604") + std = float("0.135067") + data = None + + +class Program_weight_tensor_parameter_471: + name = "parameter_471" + shape = [192] + dtype = "float32" + min_val = float("0.0103392") + max_val = float("0.0338966") + mean = float("0.018704") + std = float("0.00454464") + data = None + + +class Program_weight_tensor_parameter_472: + name = "parameter_472" + shape = [192] + dtype = "float32" + min_val = float("-0.598739") + max_val = float("0.204727") + mean = float("-0.0409936") + std = float("0.0907072") + data = None + + +class Program_weight_tensor_parameter_473: + name = "parameter_473" + shape = [192, 192, 3, 3] + dtype = "float32" + min_val = float("-0.053814") + max_val = float("0.0540895") + mean = float("-0.000198499") + std = float("0.00340103") + data = None + + +class Program_weight_tensor_parameter_474: + name = "parameter_474" + shape = [192] + dtype = "float32" + min_val = float("-1.21759") + max_val = float("0.495005") + mean = float("-0.168448") + std = float("0.29241") + data = None + + +class Program_weight_tensor_parameter_475: + name = "parameter_475" + shape = [192] + dtype = "float32" + min_val = float("0.00874149") + max_val = float("1.52519") + mean = float("0.237194") + std = float("0.211174") + data = None + + +class Program_weight_tensor_parameter_476: + name = "parameter_476" + shape = [192] + dtype = "float32" + min_val = float("1.85538e-05") + max_val = float("0.00557322") + mean = float("0.000454281") + std = float("0.000544007") + data = None + + +class Program_weight_tensor_parameter_477: + name = "parameter_477" + shape = [192] + dtype = "float32" + min_val = float("-0.0629882") + max_val = float("0.0759431") + mean = float("0.00814591") + std = float("0.0145241") + data = None + + +class Program_weight_tensor_parameter_478: + name = "parameter_478" + shape = [192, 192, 1, 1] + dtype = "float32" + min_val = float("-0.0659294") + max_val = float("0.0276847") + mean = float("-0.000370954") + std = float("0.0036383") + data = None + + +class Program_weight_tensor_parameter_479: + name = "parameter_479" + shape = [192] + dtype = "float32" + min_val = float("-1.21759") + max_val = float("0.495005") + mean = float("-0.168448") + std = float("0.29241") + data = None + + +class Program_weight_tensor_parameter_480: + name = "parameter_480" + shape = [192] + dtype = "float32" + min_val = float("0.355067") + max_val = float("1.45127") + mean = float("0.757089") + std = float("0.216673") + data = None + + +class Program_weight_tensor_parameter_481: + name = "parameter_481" + shape = [192] + dtype = "float32" + min_val = float("0.00277976") + max_val = float("0.0131242") + mean = float("0.00620411") + std = float("0.00184947") + data = None + + +class Program_weight_tensor_parameter_482: + name = "parameter_482" + shape = [192] + dtype = "float32" + min_val = float("-0.0724428") + max_val = float("0.0937716") + mean = float("0.0344426") + std = float("0.0309409") + data = None + + +class Program_weight_tensor_parameter_483: + name = "parameter_483" + shape = [192, 192, 3, 3] + dtype = "float32" + min_val = float("-0.0699304") + max_val = float("0.0563516") + mean = float("-0.000169718") + std = float("0.00300916") + data = None + + +class Program_weight_tensor_parameter_484: + name = "parameter_484" + shape = [192] + dtype = "float32" + min_val = float("-1.8763") + max_val = float("-0.211474") + mean = float("-1.14452") + std = float("0.325577") + data = None + + +class Program_weight_tensor_parameter_485: + name = "parameter_485" + shape = [192] + dtype = "float32" + min_val = float("0.790047") + max_val = float("1.60209") + mean = float("1.12225") + std = float("0.129806") + data = None + + +class Program_weight_tensor_parameter_486: + name = "parameter_486" + shape = [192] + dtype = "float32" + min_val = float("0.00787833") + max_val = float("0.0428001") + mean = float("0.01604") + std = float("0.00482246") + data = None + + +class Program_weight_tensor_parameter_487: + name = "parameter_487" + shape = [192] + dtype = "float32" + min_val = float("-0.452939") + max_val = float("0.210443") + mean = float("-0.0367874") + std = float("0.0808769") + data = None + + +class Program_weight_tensor_parameter_488: + name = "parameter_488" + shape = [192, 192, 3, 3] + dtype = "float32" + min_val = float("-0.0608784") + max_val = float("0.0702238") + mean = float("-0.000151292") + std = float("0.00330708") + data = None + + +class Program_weight_tensor_parameter_489: + name = "parameter_489" + shape = [192] + dtype = "float32" + min_val = float("-2.8608") + max_val = float("1.57705") + mean = float("-0.0284849") + std = float("0.745788") + data = None + + +class Program_weight_tensor_parameter_490: + name = "parameter_490" + shape = [192] + dtype = "float32" + min_val = float("0.485748") + max_val = float("2.08107") + mean = float("0.90061") + std = float("0.232861") + data = None + + +class Program_weight_tensor_parameter_491: + name = "parameter_491" + shape = [192] + dtype = "float32" + min_val = float("0.0067126") + max_val = float("0.0448422") + mean = float("0.0156545") + std = float("0.00613534") + data = None + + +class Program_weight_tensor_parameter_492: + name = "parameter_492" + shape = [192] + dtype = "float32" + min_val = float("-0.197878") + max_val = float("0.277046") + mean = float("-0.0352484") + std = float("0.0502412") + data = None + + +class Program_weight_tensor_parameter_493: + name = "parameter_493" + shape = [192, 384, 1, 1] + dtype = "float32" + min_val = float("-0.0901092") + max_val = float("0.077961") + mean = float("-0.00049004") + std = float("0.00702032") + data = None + + +class Program_weight_tensor_parameter_494: + name = "parameter_494" + shape = [192] + dtype = "float32" + min_val = float("-2.96811") + max_val = float("1.66143") + mean = float("0.0961059") + std = float("0.663116") + data = None + + +class Program_weight_tensor_parameter_495: + name = "parameter_495" + shape = [192] + dtype = "float32" + min_val = float("0.83173") + max_val = float("5.55469") + mean = float("1.91226") + std = float("0.931019") + data = None + + +class Program_weight_tensor_parameter_496: + name = "parameter_496" + shape = [192] + dtype = "float32" + min_val = float("0.0026531") + max_val = float("0.049029") + mean = float("0.0102262") + std = float("0.00443075") + data = None + + +class Program_weight_tensor_parameter_497: + name = "parameter_497" + shape = [192] + dtype = "float32" + min_val = float("-0.12448") + max_val = float("0.107055") + mean = float("-0.0161019") + std = float("0.0435851") + data = None + + +class Program_weight_tensor_parameter_498: + name = "parameter_498" + shape = [192, 384, 1, 1] + dtype = "float32" + min_val = float("-0.0687437") + max_val = float("0.112334") + mean = float("-0.000327953") + std = float("0.00655925") + data = None + + +class Program_weight_tensor_parameter_499: + name = "parameter_499" + shape = [384] + dtype = "float32" + min_val = float("-2.9228") + max_val = float("1.32787") + mean = float("-0.300426") + std = float("0.562826") + data = None + + +class Program_weight_tensor_parameter_500: + name = "parameter_500" + shape = [384] + dtype = "float32" + min_val = float("0.638427") + max_val = float("2.47213") + mean = float("1.16034") + std = float("0.257402") + data = None + + +class Program_weight_tensor_parameter_501: + name = "parameter_501" + shape = [384] + dtype = "float32" + min_val = float("0.00594162") + max_val = float("0.0661259") + mean = float("0.015018") + std = float("0.007417") + data = None + + +class Program_weight_tensor_parameter_502: + name = "parameter_502" + shape = [384] + dtype = "float32" + min_val = float("-0.173063") + max_val = float("0.179721") + mean = float("0.0179612") + std = float("0.0526315") + data = None + + +class Program_weight_tensor_parameter_503: + name = "parameter_503" + shape = [384, 256, 3, 3] + dtype = "float32" + min_val = float("-0.0649605") + max_val = float("0.0630394") + mean = float("-7.20001e-05") + std = float("0.00349767") + data = None + + +class Program_weight_tensor_parameter_504: + name = "parameter_504" + shape = [256] + dtype = "float32" + min_val = float("-2.04411") + max_val = float("1.28972") + mean = float("-0.923474") + std = float("0.540605") + data = None + + +class Program_weight_tensor_parameter_505: + name = "parameter_505" + shape = [256] + dtype = "float32" + min_val = float("0.528197") + max_val = float("1.69329") + mean = float("1.0537") + std = float("0.176193") + data = None + + +class Program_weight_tensor_parameter_506: + name = "parameter_506" + shape = [256] + dtype = "float32" + min_val = float("0.00073195") + max_val = float("0.0101391") + mean = float("0.0028745") + std = float("0.00128946") + data = None + + +class Program_weight_tensor_parameter_507: + name = "parameter_507" + shape = [256] + dtype = "float32" + min_val = float("-0.164563") + max_val = float("0.104931") + mean = float("-0.034803") + std = float("0.0476285") + data = None + + +class Program_weight_tensor_parameter_508: + name = "parameter_508" + shape = [256, 192, 1, 1] + dtype = "float32" + min_val = float("-0.165736") + max_val = float("0.120457") + mean = float("-0.000623197") + std = float("0.011113") + data = None + + +class Program_weight_tensor_parameter_509: + name = "parameter_509" + shape = [192] + dtype = "float32" + min_val = float("-0.0132348") + max_val = float("0.000998451") + mean = float("-0.00487955") + std = float("0.0031722") + data = None + + +class Program_weight_tensor_parameter_510: + name = "parameter_510" + shape = [192, 192, 1, 1] + dtype = "float32" + min_val = float("-0.308917") + max_val = float("0.197456") + mean = float("-0.0038993") + std = float("0.00944006") + data = None + + +class Program_weight_tensor_parameter_511: + name = "parameter_511" + shape = [96] + dtype = "float32" + min_val = float("-1.91684") + max_val = float("0.526926") + mean = float("-0.210784") + std = float("0.433617") + data = None + + +class Program_weight_tensor_parameter_512: + name = "parameter_512" + shape = [96] + dtype = "float32" + min_val = float("0.140578") + max_val = float("3.21133") + mean = float("0.634555") + std = float("0.665264") + data = None + + +class Program_weight_tensor_parameter_513: + name = "parameter_513" + shape = [96] + dtype = "float32" + min_val = float("7.2207e-05") + max_val = float("0.00154974") + mean = float("0.000376223") + std = float("0.000272902") + data = None + + +class Program_weight_tensor_parameter_514: + name = "parameter_514" + shape = [96] + dtype = "float32" + min_val = float("-0.0374736") + max_val = float("0.0466451") + mean = float("0.00496612") + std = float("0.0164318") + data = None + + +class Program_weight_tensor_parameter_515: + name = "parameter_515" + shape = [96, 96, 1, 1] + dtype = "float32" + min_val = float("-0.041906") + max_val = float("0.0800592") + mean = float("-0.000562998") + std = float("0.00645051") + data = None + + +class Program_weight_tensor_parameter_516: + name = "parameter_516" + shape = [96] + dtype = "float32" + min_val = float("-1.91684") + max_val = float("0.526926") + mean = float("-0.210784") + std = float("0.433617") + data = None + + +class Program_weight_tensor_parameter_517: + name = "parameter_517" + shape = [96] + dtype = "float32" + min_val = float("0.349141") + max_val = float("5.44739") + mean = float("1.08468") + std = float("0.878192") + data = None + + +class Program_weight_tensor_parameter_518: + name = "parameter_518" + shape = [96] + dtype = "float32" + min_val = float("0.000395259") + max_val = float("0.00620783") + mean = float("0.00217174") + std = float("0.00114726") + data = None + + +class Program_weight_tensor_parameter_519: + name = "parameter_519" + shape = [96] + dtype = "float32" + min_val = float("-0.0894642") + max_val = float("0.108141") + mean = float("0.0145054") + std = float("0.0375987") + data = None + + +class Program_weight_tensor_parameter_520: + name = "parameter_520" + shape = [96, 96, 3, 3] + dtype = "float32" + min_val = float("-0.0396274") + max_val = float("0.0583744") + mean = float("-0.000193799") + std = float("0.00473819") + data = None + + +class Program_weight_tensor_parameter_521: + name = "parameter_521" + shape = [96] + dtype = "float32" + min_val = float("-2.46379") + max_val = float("-0.0206903") + mean = float("-1.22339") + std = float("0.443773") + data = None + + +class Program_weight_tensor_parameter_522: + name = "parameter_522" + shape = [96] + dtype = "float32" + min_val = float("0.531857") + max_val = float("1.64262") + mean = float("0.947435") + std = float("0.173066") + data = None + + +class Program_weight_tensor_parameter_523: + name = "parameter_523" + shape = [96] + dtype = "float32" + min_val = float("0.025952") + max_val = float("0.125559") + mean = float("0.0552305") + std = float("0.0225117") + data = None + + +class Program_weight_tensor_parameter_524: + name = "parameter_524" + shape = [96] + dtype = "float32" + min_val = float("-2.5031") + max_val = float("1.21422") + mean = float("-0.175206") + std = float("0.381897") + data = None + + +class Program_weight_tensor_parameter_525: + name = "parameter_525" + shape = [96, 96, 3, 3] + dtype = "float32" + min_val = float("-0.17314") + max_val = float("0.0869067") + mean = float("-0.000259912") + std = float("0.00587908") + data = None + + +class Program_weight_tensor_parameter_526: + name = "parameter_526" + shape = [96] + dtype = "float32" + min_val = float("-1.38808") + max_val = float("0.55576") + mean = float("-0.133704") + std = float("0.345848") + data = None + + +class Program_weight_tensor_parameter_527: + name = "parameter_527" + shape = [96] + dtype = "float32" + min_val = float("0.0455848") + max_val = float("1.86557") + mean = float("0.459828") + std = float("0.366929") + data = None + + +class Program_weight_tensor_parameter_528: + name = "parameter_528" + shape = [96] + dtype = "float32" + min_val = float("9.00694e-05") + max_val = float("0.00284189") + mean = float("0.000704594") + std = float("0.000599579") + data = None + + +class Program_weight_tensor_parameter_529: + name = "parameter_529" + shape = [96] + dtype = "float32" + min_val = float("-0.0278321") + max_val = float("0.0383854") + mean = float("0.00572555") + std = float("0.0138668") + data = None + + +class Program_weight_tensor_parameter_530: + name = "parameter_530" + shape = [96, 96, 1, 1] + dtype = "float32" + min_val = float("-0.0497848") + max_val = float("0.0370223") + mean = float("-0.00051534") + std = float("0.00599686") + data = None + + +class Program_weight_tensor_parameter_531: + name = "parameter_531" + shape = [96] + dtype = "float32" + min_val = float("-1.38808") + max_val = float("0.55576") + mean = float("-0.133704") + std = float("0.345848") + data = None + + +class Program_weight_tensor_parameter_532: + name = "parameter_532" + shape = [96] + dtype = "float32" + min_val = float("0.368559") + max_val = float("2.33025") + mean = float("0.902622") + std = float("0.42661") + data = None + + +class Program_weight_tensor_parameter_533: + name = "parameter_533" + shape = [96] + dtype = "float32" + min_val = float("0.00148637") + max_val = float("0.0135387") + mean = float("0.0041443") + std = float("0.00214544") + data = None + + +class Program_weight_tensor_parameter_534: + name = "parameter_534" + shape = [96] + dtype = "float32" + min_val = float("-0.0662393") + max_val = float("0.102528") + mean = float("0.0217098") + std = float("0.0293973") + data = None + + +class Program_weight_tensor_parameter_535: + name = "parameter_535" + shape = [96, 96, 3, 3] + dtype = "float32" + min_val = float("-0.0543383") + max_val = float("0.0431882") + mean = float("-0.000226573") + std = float("0.00480004") + data = None + + +class Program_weight_tensor_parameter_536: + name = "parameter_536" + shape = [96] + dtype = "float32" + min_val = float("-3.31982") + max_val = float("0.361874") + mean = float("-1.17483") + std = float("0.556563") + data = None + + +class Program_weight_tensor_parameter_537: + name = "parameter_537" + shape = [96] + dtype = "float32" + min_val = float("0.46978") + max_val = float("1.97943") + mean = float("1.04118") + std = float("0.238858") + data = None + + +class Program_weight_tensor_parameter_538: + name = "parameter_538" + shape = [96] + dtype = "float32" + min_val = float("0.0175926") + max_val = float("0.0836044") + mean = float("0.0339409") + std = float("0.0109669") + data = None + + +class Program_weight_tensor_parameter_539: + name = "parameter_539" + shape = [96] + dtype = "float32" + min_val = float("-0.65582") + max_val = float("0.42796") + mean = float("-0.0773746") + std = float("0.183965") + data = None + + +class Program_weight_tensor_parameter_540: + name = "parameter_540" + shape = [96, 96, 3, 3] + dtype = "float32" + min_val = float("-0.135994") + max_val = float("0.14312") + mean = float("-0.000301808") + std = float("0.00581911") + data = None + + +class Program_weight_tensor_parameter_541: + name = "parameter_541" + shape = [96] + dtype = "float32" + min_val = float("-1.24978") + max_val = float("0.578367") + mean = float("-0.109897") + std = float("0.290186") + data = None + + +class Program_weight_tensor_parameter_542: + name = "parameter_542" + shape = [96] + dtype = "float32" + min_val = float("0.0246004") + max_val = float("1.27839") + mean = float("0.323197") + std = float("0.193467") + data = None + + +class Program_weight_tensor_parameter_543: + name = "parameter_543" + shape = [96] + dtype = "float32" + min_val = float("3.01205e-05") + max_val = float("0.00338782") + mean = float("0.000592576") + std = float("0.000520317") + data = None + + +class Program_weight_tensor_parameter_544: + name = "parameter_544" + shape = [96] + dtype = "float32" + min_val = float("-0.0360867") + max_val = float("0.0475566") + mean = float("0.00362787") + std = float("0.013798") + data = None + + +class Program_weight_tensor_parameter_545: + name = "parameter_545" + shape = [96, 96, 1, 1] + dtype = "float32" + min_val = float("-0.0411901") + max_val = float("0.0430319") + mean = float("-0.000340746") + std = float("0.00614955") + data = None + + +class Program_weight_tensor_parameter_546: + name = "parameter_546" + shape = [96] + dtype = "float32" + min_val = float("-1.24978") + max_val = float("0.578367") + mean = float("-0.109897") + std = float("0.290186") + data = None + + +class Program_weight_tensor_parameter_547: + name = "parameter_547" + shape = [96] + dtype = "float32" + min_val = float("0.316771") + max_val = float("1.67064") + mean = float("0.748948") + std = float("0.257654") + data = None + + +class Program_weight_tensor_parameter_548: + name = "parameter_548" + shape = [96] + dtype = "float32" + min_val = float("0.00151664") + max_val = float("0.0114809") + mean = float("0.00452479") + std = float("0.0018046") + data = None + + +class Program_weight_tensor_parameter_549: + name = "parameter_549" + shape = [96] + dtype = "float32" + min_val = float("-0.0689274") + max_val = float("0.101842") + mean = float("0.0145439") + std = float("0.0281882") + data = None + + +class Program_weight_tensor_parameter_550: + name = "parameter_550" + shape = [96, 96, 3, 3] + dtype = "float32" + min_val = float("-0.0705669") + max_val = float("0.0532138") + mean = float("-0.000195257") + std = float("0.00485332") + data = None + + +class Program_weight_tensor_parameter_551: + name = "parameter_551" + shape = [96] + dtype = "float32" + min_val = float("-3.58212") + max_val = float("0.290835") + mean = float("-1.12477") + std = float("0.572674") + data = None + + +class Program_weight_tensor_parameter_552: + name = "parameter_552" + shape = [96] + dtype = "float32" + min_val = float("0.517852") + max_val = float("2.19076") + mean = float("1.05388") + std = float("0.238427") + data = None + + +class Program_weight_tensor_parameter_553: + name = "parameter_553" + shape = [96] + dtype = "float32" + min_val = float("0.0152262") + max_val = float("0.0469304") + mean = float("0.0254356") + std = float("0.00569535") + data = None + + +class Program_weight_tensor_parameter_554: + name = "parameter_554" + shape = [96] + dtype = "float32" + min_val = float("-0.696373") + max_val = float("0.556499") + mean = float("-0.0268212") + std = float("0.160048") + data = None + + +class Program_weight_tensor_parameter_555: + name = "parameter_555" + shape = [96, 96, 3, 3] + dtype = "float32" + min_val = float("-0.0861719") + max_val = float("0.122968") + mean = float("-0.000287154") + std = float("0.00589893") + data = None + + +class Program_weight_tensor_parameter_556: + name = "parameter_556" + shape = [96] + dtype = "float32" + min_val = float("-0.891952") + max_val = float("0.527213") + mean = float("-0.160534") + std = float("0.280112") + data = None + + +class Program_weight_tensor_parameter_557: + name = "parameter_557" + shape = [96] + dtype = "float32" + min_val = float("0.0198505") + max_val = float("1.40596") + mean = float("0.32365") + std = float("0.21384") + data = None + + +class Program_weight_tensor_parameter_558: + name = "parameter_558" + shape = [96] + dtype = "float32" + min_val = float("2.65875e-05") + max_val = float("0.0035545") + mean = float("0.000666042") + std = float("0.000557693") + data = None + + +class Program_weight_tensor_parameter_559: + name = "parameter_559" + shape = [96] + dtype = "float32" + min_val = float("-0.0225542") + max_val = float("0.0470766") + mean = float("0.00719281") + std = float("0.0127843") + data = None + + +class Program_weight_tensor_parameter_560: + name = "parameter_560" + shape = [96, 96, 1, 1] + dtype = "float32" + min_val = float("-0.0515445") + max_val = float("0.0402409") + mean = float("-0.000649295") + std = float("0.00623422") + data = None + + +class Program_weight_tensor_parameter_561: + name = "parameter_561" + shape = [96] + dtype = "float32" + min_val = float("-0.891952") + max_val = float("0.527213") + mean = float("-0.160534") + std = float("0.280112") + data = None + + +class Program_weight_tensor_parameter_562: + name = "parameter_562" + shape = [96] + dtype = "float32" + min_val = float("0.177251") + max_val = float("1.78152") + mean = float("0.711271") + std = float("0.284394") + data = None + + +class Program_weight_tensor_parameter_563: + name = "parameter_563" + shape = [96] + dtype = "float32" + min_val = float("0.000954749") + max_val = float("0.0140023") + mean = float("0.00493077") + std = float("0.00203911") + data = None + + +class Program_weight_tensor_parameter_564: + name = "parameter_564" + shape = [96] + dtype = "float32" + min_val = float("-0.0437256") + max_val = float("0.0972775") + mean = float("0.0232467") + std = float("0.0274604") + data = None + + +class Program_weight_tensor_parameter_565: + name = "parameter_565" + shape = [96, 96, 3, 3] + dtype = "float32" + min_val = float("-0.0615728") + max_val = float("0.0543972") + mean = float("-0.000235728") + std = float("0.0048777") + data = None + + +class Program_weight_tensor_parameter_566: + name = "parameter_566" + shape = [96] + dtype = "float32" + min_val = float("-2.65693") + max_val = float("0.0621813") + mean = float("-1.06121") + std = float("0.488185") + data = None + + +class Program_weight_tensor_parameter_567: + name = "parameter_567" + shape = [96] + dtype = "float32" + min_val = float("0.514903") + max_val = float("1.7386") + mean = float("1.01718") + std = float("0.19379") + data = None + + +class Program_weight_tensor_parameter_568: + name = "parameter_568" + shape = [96] + dtype = "float32" + min_val = float("0.00916572") + max_val = float("0.0355501") + mean = float("0.0189882") + std = float("0.0049441") + data = None + + +class Program_weight_tensor_parameter_569: + name = "parameter_569" + shape = [96] + dtype = "float32" + min_val = float("-0.578484") + max_val = float("0.392696") + mean = float("-0.0519735") + std = float("0.150499") + data = None + + +class Program_weight_tensor_parameter_570: + name = "parameter_570" + shape = [96, 96, 3, 3] + dtype = "float32" + min_val = float("-0.0817411") + max_val = float("0.109853") + mean = float("-0.000328862") + std = float("0.00579377") + data = None + + +class Program_weight_tensor_parameter_571: + name = "parameter_571" + shape = [96] + dtype = "float32" + min_val = float("-0.977794") + max_val = float("0.481905") + mean = float("-0.136288") + std = float("0.276572") + data = None + + +class Program_weight_tensor_parameter_572: + name = "parameter_572" + shape = [96] + dtype = "float32" + min_val = float("0.0466995") + max_val = float("1.14307") + mean = float("0.293555") + std = float("0.17287") + data = None + + +class Program_weight_tensor_parameter_573: + name = "parameter_573" + shape = [96] + dtype = "float32" + min_val = float("0.000139938") + max_val = float("0.00502886") + mean = float("0.000944156") + std = float("0.000701334") + data = None + + +class Program_weight_tensor_parameter_574: + name = "parameter_574" + shape = [96] + dtype = "float32" + min_val = float("-0.0343031") + max_val = float("0.0488385") + mean = float("0.00443104") + std = float("0.0151837") + data = None + + +class Program_weight_tensor_parameter_575: + name = "parameter_575" + shape = [96, 96, 1, 1] + dtype = "float32" + min_val = float("-0.0659496") + max_val = float("0.0592768") + mean = float("-0.000596122") + std = float("0.00703548") + data = None + + +class Program_weight_tensor_parameter_576: + name = "parameter_576" + shape = [96] + dtype = "float32" + min_val = float("-0.977794") + max_val = float("0.481905") + mean = float("-0.136288") + std = float("0.276572") + data = None + + +class Program_weight_tensor_parameter_577: + name = "parameter_577" + shape = [96] + dtype = "float32" + min_val = float("0.245048") + max_val = float("1.70073") + mean = float("0.606904") + std = float("0.228214") + data = None + + +class Program_weight_tensor_parameter_578: + name = "parameter_578" + shape = [96] + dtype = "float32" + min_val = float("0.00285907") + max_val = float("0.0180896") + mean = float("0.00738542") + std = float("0.00273146") + data = None + + +class Program_weight_tensor_parameter_579: + name = "parameter_579" + shape = [96] + dtype = "float32" + min_val = float("-0.041536") + max_val = float("0.106912") + mean = float("0.0150356") + std = float("0.0301621") + data = None + + +class Program_weight_tensor_parameter_580: + name = "parameter_580" + shape = [96, 96, 3, 3] + dtype = "float32" + min_val = float("-0.0701934") + max_val = float("0.0473947") + mean = float("-0.000231472") + std = float("0.00491904") + data = None + + +class Program_weight_tensor_parameter_581: + name = "parameter_581" + shape = [96] + dtype = "float32" + min_val = float("-3.46647") + max_val = float("0.198407") + mean = float("-1.0028") + std = float("0.54819") + data = None + + +class Program_weight_tensor_parameter_582: + name = "parameter_582" + shape = [96] + dtype = "float32" + min_val = float("0.683607") + max_val = float("2.5079") + mean = float("1.07611") + std = float("0.212277") + data = None + + +class Program_weight_tensor_parameter_583: + name = "parameter_583" + shape = [96] + dtype = "float32" + min_val = float("0.00759054") + max_val = float("0.0340864") + mean = float("0.0154622") + std = float("0.00491081") + data = None + + +class Program_weight_tensor_parameter_584: + name = "parameter_584" + shape = [96] + dtype = "float32" + min_val = float("-0.403653") + max_val = float("0.238067") + mean = float("-0.0341318") + std = float("0.129941") + data = None + + +class Program_weight_tensor_parameter_585: + name = "parameter_585" + shape = [96, 96, 3, 3] + dtype = "float32" + min_val = float("-0.070539") + max_val = float("0.0822124") + mean = float("-0.000249488") + std = float("0.00593475") + data = None + + +class Program_weight_tensor_parameter_586: + name = "parameter_586" + shape = [96] + dtype = "float32" + min_val = float("-0.625482") + max_val = float("0.447775") + mean = float("-0.0818442") + std = float("0.255238") + data = None + + +class Program_weight_tensor_parameter_587: + name = "parameter_587" + shape = [96] + dtype = "float32" + min_val = float("0.0903974") + max_val = float("1.28733") + mean = float("0.30658") + std = float("0.194355") + data = None + + +class Program_weight_tensor_parameter_588: + name = "parameter_588" + shape = [96] + dtype = "float32" + min_val = float("0.00029884") + max_val = float("0.0154906") + mean = float("0.00327074") + std = float("0.00250709") + data = None + + +class Program_weight_tensor_parameter_589: + name = "parameter_589" + shape = [96] + dtype = "float32" + min_val = float("-0.0358192") + max_val = float("0.0176688") + mean = float("-0.000884761") + std = float("0.0092238") + data = None + + +class Program_weight_tensor_parameter_590: + name = "parameter_590" + shape = [96, 96, 1, 1] + dtype = "float32" + min_val = float("-0.0909244") + max_val = float("0.0638429") + mean = float("-0.000939245") + std = float("0.00810732") + data = None + + +class Program_weight_tensor_parameter_591: + name = "parameter_591" + shape = [96] + dtype = "float32" + min_val = float("-0.625482") + max_val = float("0.447775") + mean = float("-0.0818442") + std = float("0.255238") + data = None + + +class Program_weight_tensor_parameter_592: + name = "parameter_592" + shape = [96] + dtype = "float32" + min_val = float("0.209018") + max_val = float("1.43577") + mean = float("0.530685") + std = float("0.258463") + data = None + + +class Program_weight_tensor_parameter_593: + name = "parameter_593" + shape = [96] + dtype = "float32" + min_val = float("0.00483437") + max_val = float("0.0579556") + mean = float("0.0205523") + std = float("0.0102812") + data = None + + +class Program_weight_tensor_parameter_594: + name = "parameter_594" + shape = [96] + dtype = "float32" + min_val = float("-0.112834") + max_val = float("0.0619045") + mean = float("-0.00905359") + std = float("0.0304477") + data = None + + +class Program_weight_tensor_parameter_595: + name = "parameter_595" + shape = [96, 96, 3, 3] + dtype = "float32" + min_val = float("-0.0923533") + max_val = float("0.0562995") + mean = float("-0.000298776") + std = float("0.00484448") + data = None + + +class Program_weight_tensor_parameter_596: + name = "parameter_596" + shape = [96] + dtype = "float32" + min_val = float("-2.41013") + max_val = float("0.515499") + mean = float("-0.827551") + std = float("0.466858") + data = None + + +class Program_weight_tensor_parameter_597: + name = "parameter_597" + shape = [96] + dtype = "float32" + min_val = float("0.856185") + max_val = float("2.17526") + mean = float("1.27625") + std = float("0.208572") + data = None + + +class Program_weight_tensor_parameter_598: + name = "parameter_598" + shape = [96] + dtype = "float32" + min_val = float("0.00590531") + max_val = float("0.0284532") + mean = float("0.0132807") + std = float("0.00484257") + data = None + + +class Program_weight_tensor_parameter_599: + name = "parameter_599" + shape = [96] + dtype = "float32" + min_val = float("-0.468657") + max_val = float("0.234248") + mean = float("-0.0469978") + std = float("0.115918") + data = None + + +class Program_weight_tensor_parameter_600: + name = "parameter_600" + shape = [96, 96, 3, 3] + dtype = "float32" + min_val = float("-0.139957") + max_val = float("0.143678") + mean = float("-0.000168185") + std = float("0.00618277") + data = None + + +class Program_weight_tensor_parameter_601: + name = "parameter_601" + shape = [96] + dtype = "float32" + min_val = float("-3.16841") + max_val = float("1.88347") + mean = float("0.499626") + std = float("0.860109") + data = None + + +class Program_weight_tensor_parameter_602: + name = "parameter_602" + shape = [96] + dtype = "float32" + min_val = float("0.217997") + max_val = float("2.63547") + mean = float("0.55599") + std = float("0.320463") + data = None + + +class Program_weight_tensor_parameter_603: + name = "parameter_603" + shape = [96] + dtype = "float32" + min_val = float("0.00570217") + max_val = float("0.0797291") + mean = float("0.019333") + std = float("0.013037") + data = None + + +class Program_weight_tensor_parameter_604: + name = "parameter_604" + shape = [96] + dtype = "float32" + min_val = float("-0.222949") + max_val = float("0.246596") + mean = float("-0.0255712") + std = float("0.0726957") + data = None + + +class Program_weight_tensor_parameter_605: + name = "parameter_605" + shape = [96, 192, 1, 1] + dtype = "float32" + min_val = float("-0.158868") + max_val = float("0.164315") + mean = float("-0.000488541") + std = float("0.0122862") + data = None + + +class Program_weight_tensor_parameter_606: + name = "parameter_606" + shape = [96] + dtype = "float32" + min_val = float("-4.92519") + max_val = float("1.56853") + mean = float("0.381345") + std = float("1.04759") + data = None + + +class Program_weight_tensor_parameter_607: + name = "parameter_607" + shape = [96] + dtype = "float32" + min_val = float("0.407504") + max_val = float("6.75887") + mean = float("1.69518") + std = float("1.30251") + data = None + + +class Program_weight_tensor_parameter_608: + name = "parameter_608" + shape = [96] + dtype = "float32" + min_val = float("0.00267938") + max_val = float("0.10553") + mean = float("0.0171359") + std = float("0.0149448") + data = None + + +class Program_weight_tensor_parameter_609: + name = "parameter_609" + shape = [96] + dtype = "float32" + min_val = float("-0.114101") + max_val = float("0.256348") + mean = float("0.0291075") + std = float("0.0728672") + data = None + + +class Program_weight_tensor_parameter_610: + name = "parameter_610" + shape = [96, 192, 1, 1] + dtype = "float32" + min_val = float("-0.0863343") + max_val = float("0.131113") + mean = float("0.000256062") + std = float("0.011109") + data = None + + +class Program_weight_tensor_parameter_611: + name = "parameter_611" + shape = [192] + dtype = "float32" + min_val = float("-2.27303") + max_val = float("1.74575") + mean = float("-0.126138") + std = float("0.738736") + data = None + + +class Program_weight_tensor_parameter_612: + name = "parameter_612" + shape = [192] + dtype = "float32" + min_val = float("0.631744") + max_val = float("2.97049") + mean = float("1.08976") + std = float("0.283661") + data = None + + +class Program_weight_tensor_parameter_613: + name = "parameter_613" + shape = [192] + dtype = "float32" + min_val = float("0.0066416") + max_val = float("0.127108") + mean = float("0.0245677") + std = float("0.0180396") + data = None + + +class Program_weight_tensor_parameter_614: + name = "parameter_614" + shape = [192] + dtype = "float32" + min_val = float("-0.372282") + max_val = float("0.192252") + mean = float("-0.0580008") + std = float("0.0912545") + data = None + + +class Program_weight_tensor_parameter_615: + name = "parameter_615" + shape = [192, 128, 3, 3] + dtype = "float32" + min_val = float("-0.0730784") + max_val = float("0.0815416") + mean = float("-0.000188759") + std = float("0.005804") + data = None + + +class Program_weight_tensor_parameter_616: + name = "parameter_616" + shape = [128] + dtype = "float32" + min_val = float("-2.81073") + max_val = float("1.95499") + mean = float("-0.709629") + std = float("0.64717") + data = None + + +class Program_weight_tensor_parameter_617: + name = "parameter_617" + shape = [128] + dtype = "float32" + min_val = float("0.305109") + max_val = float("2.86915") + mean = float("1.02276") + std = float("0.27811") + data = None + + +class Program_weight_tensor_parameter_618: + name = "parameter_618" + shape = [128] + dtype = "float32" + min_val = float("0.000253069") + max_val = float("0.0069014") + mean = float("0.00178166") + std = float("0.000956453") + data = None + + +class Program_weight_tensor_parameter_619: + name = "parameter_619" + shape = [128] + dtype = "float32" + min_val = float("-0.239333") + max_val = float("0.217496") + mean = float("0.00924142") + std = float("0.0610474") + data = None + + +class Program_weight_tensor_parameter_620: + name = "parameter_620" + shape = [128, 96, 1, 1] + dtype = "float32" + min_val = float("-0.147037") + max_val = float("0.149342") + mean = float("-0.00110396") + std = float("0.0173671") + data = None + + +class Program_weight_tensor_parameter_621: + name = "parameter_621" + shape = [96] + dtype = "float32" + min_val = float("-0.0165634") + max_val = float("-0.00196879") + mean = float("-0.00769962") + std = float("0.00367971") + data = None + + +class Program_weight_tensor_parameter_622: + name = "parameter_622" + shape = [96, 96, 1, 1] + dtype = "float32" + min_val = float("-0.243907") + max_val = float("0.114992") + mean = float("-0.00840666") + std = float("0.0162978") + data = None + + +class Program_weight_tensor_parameter_623: + name = "parameter_623" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_624: + name = "parameter_624" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_625: + name = "parameter_625" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_626: + name = "parameter_626" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_627: + name = "parameter_627" + shape = [48, 48, 1, 1] + dtype = "float32" + min_val = float("-0.0450246") + max_val = float("0.0514816") + mean = float("-0.00123045") + std = float("0.0107863") + data = None + + +class Program_weight_tensor_parameter_628: + name = "parameter_628" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_629: + name = "parameter_629" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_630: + name = "parameter_630" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_631: + name = "parameter_631" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_632: + name = "parameter_632" + shape = [48, 48, 3, 3] + dtype = "float32" + min_val = float("-0.0521573") + max_val = float("0.0611979") + mean = float("-0.000213153") + std = float("0.008606") + data = None + + +class Program_weight_tensor_parameter_633: + name = "parameter_633" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_634: + name = "parameter_634" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_635: + name = "parameter_635" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_636: + name = "parameter_636" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_637: + name = "parameter_637" + shape = [48, 48, 3, 3] + dtype = "float32" + min_val = float("-0.0706573") + max_val = float("0.0786005") + mean = float("-0.00045913") + std = float("0.00953234") + data = None + + +class Program_weight_tensor_parameter_638: + name = "parameter_638" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_639: + name = "parameter_639" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_640: + name = "parameter_640" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_641: + name = "parameter_641" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_642: + name = "parameter_642" + shape = [48, 48, 1, 1] + dtype = "float32" + min_val = float("-0.0663249") + max_val = float("0.067324") + mean = float("-0.000909569") + std = float("0.01127") + data = None + + +class Program_weight_tensor_parameter_643: + name = "parameter_643" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_644: + name = "parameter_644" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_645: + name = "parameter_645" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_646: + name = "parameter_646" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_647: + name = "parameter_647" + shape = [48, 48, 3, 3] + dtype = "float32" + min_val = float("-0.0616683") + max_val = float("0.0461804") + mean = float("-0.000493448") + std = float("0.00846644") + data = None + + +class Program_weight_tensor_parameter_648: + name = "parameter_648" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_649: + name = "parameter_649" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_650: + name = "parameter_650" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_651: + name = "parameter_651" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_652: + name = "parameter_652" + shape = [48, 48, 3, 3] + dtype = "float32" + min_val = float("-0.0900923") + max_val = float("0.0705786") + mean = float("-0.000340877") + std = float("0.00971893") + data = None + + +class Program_weight_tensor_parameter_653: + name = "parameter_653" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_654: + name = "parameter_654" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_655: + name = "parameter_655" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_656: + name = "parameter_656" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_657: + name = "parameter_657" + shape = [48, 48, 1, 1] + dtype = "float32" + min_val = float("-0.079166") + max_val = float("0.0586207") + mean = float("-0.00142305") + std = float("0.0137654") + data = None + + +class Program_weight_tensor_parameter_658: + name = "parameter_658" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_659: + name = "parameter_659" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_660: + name = "parameter_660" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_661: + name = "parameter_661" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_662: + name = "parameter_662" + shape = [48, 48, 3, 3] + dtype = "float32" + min_val = float("-0.0574322") + max_val = float("0.0707387") + mean = float("-0.000248503") + std = float("0.00893624") + data = None + + +class Program_weight_tensor_parameter_663: + name = "parameter_663" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_664: + name = "parameter_664" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_665: + name = "parameter_665" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_666: + name = "parameter_666" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_667: + name = "parameter_667" + shape = [48, 48, 3, 3] + dtype = "float32" + min_val = float("-0.104177") + max_val = float("0.0689732") + mean = float("-0.000258248") + std = float("0.0104282") + data = None + + +class Program_weight_tensor_parameter_668: + name = "parameter_668" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_669: + name = "parameter_669" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_670: + name = "parameter_670" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_671: + name = "parameter_671" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_672: + name = "parameter_672" + shape = [48, 96, 1, 1] + dtype = "float32" + min_val = float("-0.136412") + max_val = float("0.099814") + mean = float("-0.00170858") + std = float("0.0182544") + data = None + + +class Program_weight_tensor_parameter_673: + name = "parameter_673" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_674: + name = "parameter_674" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_675: + name = "parameter_675" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_676: + name = "parameter_676" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_677: + name = "parameter_677" + shape = [48, 96, 1, 1] + dtype = "float32" + min_val = float("-0.101645") + max_val = float("0.142665") + mean = float("-0.000441607") + std = float("0.0175402") + data = None + + +class Program_weight_tensor_parameter_678: + name = "parameter_678" + shape = [96] + dtype = "float32" + min_val = float("-3.41513") + max_val = float("3.27332") + mean = float("0.327219") + std = float("1.14459") + data = None + + +class Program_weight_tensor_parameter_679: + name = "parameter_679" + shape = [96] + dtype = "float32" + min_val = float("0.872879") + max_val = float("4.91326") + mean = float("1.91771") + std = float("0.753124") + data = None + + +class Program_weight_tensor_parameter_680: + name = "parameter_680" + shape = [96] + dtype = "float32" + min_val = float("0.36528") + max_val = float("14.1346") + mean = float("1.51672") + std = float("1.67236") + data = None + + +class Program_weight_tensor_parameter_681: + name = "parameter_681" + shape = [96] + dtype = "float32" + min_val = float("-1.13612") + max_val = float("1.46522") + mean = float("-0.212556") + std = float("0.518482") + data = None + + +class Program_weight_tensor_parameter_682: + name = "parameter_682" + shape = [96, 64, 3, 3] + dtype = "float32" + min_val = float("-0.112294") + max_val = float("0.100519") + mean = float("-0.000268971") + std = float("0.00998146") + data = None + + +class Program_weight_tensor_parameter_683: + name = "parameter_683" + shape = [64] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_684: + name = "parameter_684" + shape = [64] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_685: + name = "parameter_685" + shape = [64] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_686: + name = "parameter_686" + shape = [64] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_687: + name = "parameter_687" + shape = [64, 32, 3, 3] + dtype = "float32" + min_val = float("-0.126613") + max_val = float("0.131386") + mean = float("-0.000428213") + std = float("0.0153943") + data = None + + +class Program_weight_tensor_parameter_688: + name = "parameter_688" + shape = [32] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_689: + name = "parameter_689" + shape = [32] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_690: + name = "parameter_690" + shape = [32] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_691: + name = "parameter_691" + shape = [32] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_692: + name = "parameter_692" + shape = [32, 32, 3, 3] + dtype = "float32" + min_val = float("-0.253111") + max_val = float("0.1558") + mean = float("-0.000161754") + std = float("0.0200489") + data = None + + +class Program_weight_tensor_parameter_693: + name = "parameter_693" + shape = [32] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_694: + name = "parameter_694" + shape = [32] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_695: + name = "parameter_695" + shape = [32] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_696: + name = "parameter_696" + shape = [32] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_697: + name = "parameter_697" + shape = [32, 3, 3, 3] + dtype = "float32" + min_val = float("-0.246483") + max_val = float("0.223526") + mean = float("-0.00143837") + std = float("0.0545445") + data = None diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus-L/subgraph_2/graph_hash.txt b/paddle_samples/PaddleX/PP-YOLOE_plus-L/subgraph_2/graph_hash.txt new file mode 100644 index 000000000..b08da1263 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE_plus-L/subgraph_2/graph_hash.txt @@ -0,0 +1 @@ +4d26d0110d82b3e8a1ea0c4059adeb25427870d4527791748b1197dfa32e25ef \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus-L/subgraph_2/graph_net.json b/paddle_samples/PaddleX/PP-YOLOE_plus-L/subgraph_2/graph_net.json new file mode 100644 index 000000000..32219c0fa --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE_plus-L/subgraph_2/graph_net.json @@ -0,0 +1,6 @@ +{ + "framework": "paddle", + "model_name": "PP-YOLOE_plus-L", + "num_devices_required": 1, + "num_nodes_required": 1 +} \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus-L/subgraph_2/input_meta.py b/paddle_samples/PaddleX/PP-YOLOE_plus-L/subgraph_2/input_meta.py new file mode 100644 index 000000000..3e8b52976 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE_plus-L/subgraph_2/input_meta.py @@ -0,0 +1,19 @@ +class Program_weight_tensor_data_0: + name = "data_0" + shape = [] + dtype = "float32" + data = [0.118063] + + +class Program_weight_tensor_data_1: + name = "data_1" + shape = [] + dtype = "float32" + data = [0.630255] + + +class Program_weight_tensor_data_2: + name = "data_2" + shape = [] + dtype = "float32" + data = [4.07241] diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus-L/subgraph_2/model.py b/paddle_samples/PaddleX/PP-YOLOE_plus-L/subgraph_2/model.py new file mode 100644 index 000000000..4cccb2b8e --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE_plus-L/subgraph_2/model.py @@ -0,0 +1,43 @@ +import paddle + + +class GraphModule(paddle.nn.Layer): + def __init__(self): + super().__init__() + + def forward(self, data_0, data_1, data_2): + # pd_op.full: (1xf32) <- () + full_0 = paddle._C_ops.full( + [1], float("1"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.scale: (xf32) <- (xf32, 1xf32) + scale_0 = paddle._C_ops.scale(data_2, full_0, float("0"), True) + del data_2 + + # pd_op.full: (1xf32) <- () + full_1 = paddle._C_ops.full( + [1], float("2.5"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.scale: (xf32) <- (xf32, 1xf32) + scale_1 = paddle._C_ops.scale(data_0, full_1, float("0"), True) + del data_0 + + # pd_op.add: (xf32) <- (xf32, xf32) + add_1 = paddle._C_ops.add(scale_0, scale_1) + + # pd_op.full: (1xf32) <- () + full_2 = paddle._C_ops.full( + [1], float("0.5"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.scale: (xf32) <- (xf32, 1xf32) + scale_2 = paddle._C_ops.scale(data_1, full_2, float("0"), True) + del data_1 + + # pd_op.add: (xf32) <- (xf32, xf32) + add_0 = paddle._C_ops.add(add_1, scale_2) + del add_1, full_0, full_1, full_2, scale_0, scale_1, scale_2 + + return add_0 diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus-L/subgraph_2/weight_meta.py b/paddle_samples/PaddleX/PP-YOLOE_plus-L/subgraph_2/weight_meta.py new file mode 100644 index 000000000..8b1378917 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE_plus-L/subgraph_2/weight_meta.py @@ -0,0 +1 @@ + diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus-L/subgraph_3/graph_hash.txt b/paddle_samples/PaddleX/PP-YOLOE_plus-L/subgraph_3/graph_hash.txt new file mode 100644 index 000000000..2f9daab91 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE_plus-L/subgraph_3/graph_hash.txt @@ -0,0 +1 @@ +237f1666acd796c265f31ddd9bc78ab95e4da70095a07017ec39287d25ff30bd \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus-L/subgraph_3/graph_net.json b/paddle_samples/PaddleX/PP-YOLOE_plus-L/subgraph_3/graph_net.json new file mode 100644 index 000000000..32219c0fa --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE_plus-L/subgraph_3/graph_net.json @@ -0,0 +1,6 @@ +{ + "framework": "paddle", + "model_name": "PP-YOLOE_plus-L", + "num_devices_required": 1, + "num_nodes_required": 1 +} \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus-L/subgraph_3/input_meta.py b/paddle_samples/PaddleX/PP-YOLOE_plus-L/subgraph_3/input_meta.py new file mode 100644 index 000000000..d6af4f831 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE_plus-L/subgraph_3/input_meta.py @@ -0,0 +1,67 @@ +class Program_weight_tensor_data_0: + name = "data_0" + shape = [8, 4116] + dtype = "bool" + min_val = 0 + max_val = 2 + data = None + + +class Program_weight_tensor_data_1: + name = "data_1" + shape = [8, 4116, 4] + dtype = "float32" + min_val = float("-9.93527") + max_val = float("63.9265") + mean = float("24.3499") + std = float("17.1828") + data = None + + +class Program_weight_tensor_data_2: + name = "data_2" + shape = [8, 4116, 4] + dtype = "float32" + max_val = float("55.2727") + mean = float("24.1964") + std = float("14.7865") + data = None + + +class Program_weight_tensor_data_3: + name = "data_3" + shape = [8, 4116, 4] + dtype = "float32" + max_val = float("0.947339") + mean = float("0.000280391") + std = float("0.0157366") + data = None + + +class Program_weight_tensor_data_4: + name = "data_4" + shape = [] + dtype = "float32" + data = [36.9308] + + +class Program_weight_tensor_data_5: + name = "data_5" + shape = [8, 4116, 68] + dtype = "float32" + min_val = float("-6.52177") + max_val = float("13.8275") + mean = float("2.55256e-05") + std = float("1.49822") + data = None + + +class Program_weight_tensor_data_6: + name = "data_6" + shape = [4116, 2] + dtype = "float32" + min_val = float("0.5") + max_val = float("55.5") + mean = float("24.3333") + std = float("16.0356") + data = None diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus-L/subgraph_3/model.py b/paddle_samples/PaddleX/PP-YOLOE_plus-L/subgraph_3/model.py new file mode 100644 index 000000000..04dca1210 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE_plus-L/subgraph_3/model.py @@ -0,0 +1,509 @@ +import paddle + + +class GraphModule(paddle.nn.Layer): + def __init__(self): + super().__init__() + + def forward(self, data_0, data_1, data_2, data_3, data_4, data_5, data_6): + # pd_op.cast: (8x4116xi32) <- (8x4116xb) + cast_0 = paddle._C_ops.cast(data_0, paddle.int32) + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_0 = [-1] + + # pd_op.assign: (1xi64) <- (1xi64) + assign_0 = full_int_array_0 + + # pd_op.assign: (1xi64) <- (1xi64) + assign_1 = full_int_array_0 + + # pd_op.assign: (1xi64) <- (1xi64) + assign_2 = full_int_array_0 + + # pd_op.unsqueeze: (8x4116x1xi32) <- (8x4116xi32, 1xi64) + unsqueeze_0 = paddle._C_ops.unsqueeze(cast_0, full_int_array_0) + del cast_0 + + # pd_op.full_int_array: (3xi64) <- () + full_int_array_1 = [1, 1, 4] + + # pd_op.tile: (8x4116x4xi32) <- (8x4116x1xi32, 3xi64) + tile_0 = paddle._C_ops.tile(unsqueeze_0, full_int_array_1) + del full_int_array_1, unsqueeze_0 + + # pd_op.cast: (8x4116x4xb) <- (8x4116x4xi32) + cast_1 = paddle._C_ops.cast(tile_0, paddle.bool) + del tile_0 + + # pd_op.masked_select: (-1xf32) <- (8x4116x4xf32, 8x4116x4xb) + masked_select_0 = paddle._C_ops.masked_select(data_1, cast_1) + del data_1 + + # pd_op.full_int_array: (2xi64) <- () + full_int_array_2 = [-1, 4] + + # pd_op.reshape: (-1x4xf32) <- (-1xf32, 2xi64) + reshape_0 = paddle._C_ops.reshape(masked_select_0, full_int_array_2) + + # pd_op.masked_select: (-1xf32) <- (8x4116x4xf32, 8x4116x4xb) + masked_select_1 = paddle._C_ops.masked_select(data_2, cast_1) + + # pd_op.reshape: (-1x4xf32) <- (-1xf32, 2xi64) + reshape_1 = paddle._C_ops.reshape(masked_select_1, full_int_array_2) + del masked_select_1 + + # pd_op.sum: (8x4116xf32) <- (8x4116x4xf32, 1xi64) + sum_0 = paddle._C_ops.sum(data_3, full_int_array_0, None, False) + del data_3 + + # pd_op.masked_select: (-1xf32) <- (8x4116xf32, 8x4116xb) + masked_select_2 = paddle._C_ops.masked_select(sum_0, data_0) + del sum_0 + + # pd_op.unsqueeze: (-1x1xf32) <- (-1xf32, 1xi64) + unsqueeze_1 = paddle._C_ops.unsqueeze(masked_select_2, full_int_array_0) + del masked_select_2 + + # pd_op.subtract: (-1x4xf32) <- (-1x4xf32, -1x4xf32) + subtract_0 = paddle._C_ops.subtract(reshape_0, reshape_1) + + # pd_op.abs: (-1x4xf32) <- (-1x4xf32) + abs_0 = paddle._C_ops.abs(subtract_0) + + # pd_op.mean_all: (xf32) <- (-1x4xf32) + mean_all_0 = paddle._C_ops.mean_all(abs_0) + + # pd_op.full: (1xi32) <- () + full_0 = paddle._C_ops.full( + [1], float("1"), paddle.int32, paddle.core.CPUPlace() + ) + + # pd_op.split_with_num: ([-1x1xf32, -1x1xf32, -1x1xf32, -1x1xf32]) <- (-1x4xf32, 1xi32) + split_with_num_0 = paddle._C_ops.split_with_num(reshape_0, 4, full_0) + + # builtin.split: (-1x1xf32, -1x1xf32, -1x1xf32, -1x1xf32) <- ([-1x1xf32, -1x1xf32, -1x1xf32, -1x1xf32]) + ( + split_0, + split_1, + split_2, + split_3, + ) = split_with_num_0 + del split_with_num_0 + + # pd_op.split_with_num: ([-1x1xf32, -1x1xf32, -1x1xf32, -1x1xf32]) <- (-1x4xf32, 1xi32) + split_with_num_1 = paddle._C_ops.split_with_num(reshape_1, 4, full_0) + + # builtin.split: (-1x1xf32, -1x1xf32, -1x1xf32, -1x1xf32) <- ([-1x1xf32, -1x1xf32, -1x1xf32, -1x1xf32]) + ( + split_4, + split_5, + split_6, + split_7, + ) = split_with_num_1 + del split_with_num_1 + + # pd_op.maximum: (-1x1xf32) <- (-1x1xf32, -1x1xf32) + maximum_0 = paddle._C_ops.maximum(split_0, split_4) + + # pd_op.maximum: (-1x1xf32) <- (-1x1xf32, -1x1xf32) + maximum_1 = paddle._C_ops.maximum(split_1, split_5) + + # pd_op.minimum: (-1x1xf32) <- (-1x1xf32, -1x1xf32) + minimum_0 = paddle._C_ops.minimum(split_2, split_6) + + # pd_op.minimum: (-1x1xf32) <- (-1x1xf32, -1x1xf32) + minimum_1 = paddle._C_ops.minimum(split_3, split_7) + + # pd_op.subtract: (-1x1xf32) <- (-1x1xf32, -1x1xf32) + subtract_1 = paddle._C_ops.subtract(minimum_0, maximum_0) + + # pd_op.full: (1xf32) <- () + full_1 = paddle._C_ops.full( + [1], float("0"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.assign: (1xf32) <- (1xf32) + assign_3 = full_1 + + # pd_op.full: (1xf32) <- () + full_2 = paddle._C_ops.full( + [1], float("3.40282e+38"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.assign: (1xf32) <- (1xf32) + assign_4 = full_2 + + # pd_op.clip: (-1x1xf32) <- (-1x1xf32, 1xf32, 1xf32) + clip_0 = paddle._C_ops.clip(subtract_1, full_1, full_2) + + # pd_op.subtract: (-1x1xf32) <- (-1x1xf32, -1x1xf32) + subtract_2 = paddle._C_ops.subtract(minimum_1, maximum_1) + + # pd_op.clip: (-1x1xf32) <- (-1x1xf32, 1xf32, 1xf32) + clip_1 = paddle._C_ops.clip(subtract_2, full_1, full_2) + + # pd_op.multiply: (-1x1xf32) <- (-1x1xf32, -1x1xf32) + multiply_0 = paddle._C_ops.multiply(clip_0, clip_1) + + # pd_op.subtract: (-1x1xf32) <- (-1x1xf32, -1x1xf32) + subtract_3 = paddle._C_ops.subtract(split_2, split_0) + + # pd_op.subtract: (-1x1xf32) <- (-1x1xf32, -1x1xf32) + subtract_4 = paddle._C_ops.subtract(split_3, split_1) + + # pd_op.multiply: (-1x1xf32) <- (-1x1xf32, -1x1xf32) + multiply_1 = paddle._C_ops.multiply(subtract_3, subtract_4) + + # pd_op.subtract: (-1x1xf32) <- (-1x1xf32, -1x1xf32) + subtract_5 = paddle._C_ops.subtract(split_6, split_4) + + # pd_op.subtract: (-1x1xf32) <- (-1x1xf32, -1x1xf32) + subtract_6 = paddle._C_ops.subtract(split_7, split_5) + + # pd_op.multiply: (-1x1xf32) <- (-1x1xf32, -1x1xf32) + multiply_2 = paddle._C_ops.multiply(subtract_5, subtract_6) + del subtract_5, subtract_6 + + # pd_op.add: (-1x1xf32) <- (-1x1xf32, -1x1xf32) + add_0 = paddle._C_ops.add(multiply_1, multiply_2) + + # pd_op.subtract: (-1x1xf32) <- (-1x1xf32, -1x1xf32) + subtract_7 = paddle._C_ops.subtract(add_0, multiply_0) + + # pd_op.full: (1xf32) <- () + full_3 = paddle._C_ops.full( + [1], float("1"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.assign: (1xf32) <- (1xf32) + assign_5 = full_3 + + # pd_op.assign: (1xf32) <- (1xf32) + assign_6 = full_3 + + # pd_op.scale: (-1x1xf32) <- (-1x1xf32, 1xf32) + scale_0 = paddle._C_ops.scale(subtract_7, full_3, float("1e-10"), True) + del subtract_7 + + # pd_op.divide: (-1x1xf32) <- (-1x1xf32, -1x1xf32) + divide_2 = paddle._C_ops.divide(multiply_0, scale_0) + + # pd_op.minimum: (-1x1xf32) <- (-1x1xf32, -1x1xf32) + minimum_2 = paddle._C_ops.minimum(split_0, split_4) + + # pd_op.minimum: (-1x1xf32) <- (-1x1xf32, -1x1xf32) + minimum_3 = paddle._C_ops.minimum(split_1, split_5) + + # pd_op.maximum: (-1x1xf32) <- (-1x1xf32, -1x1xf32) + maximum_2 = paddle._C_ops.maximum(split_2, split_6) + + # pd_op.maximum: (-1x1xf32) <- (-1x1xf32, -1x1xf32) + maximum_3 = paddle._C_ops.maximum(split_3, split_7) + + # pd_op.subtract: (-1x1xf32) <- (-1x1xf32, -1x1xf32) + subtract_8 = paddle._C_ops.subtract(maximum_2, minimum_2) + + # pd_op.subtract: (-1x1xf32) <- (-1x1xf32, -1x1xf32) + subtract_9 = paddle._C_ops.subtract(maximum_3, minimum_3) + + # pd_op.multiply: (-1x1xf32) <- (-1x1xf32, -1x1xf32) + multiply_3 = paddle._C_ops.multiply(subtract_8, subtract_9) + + # pd_op.scale: (-1x1xf32) <- (-1x1xf32, 1xf32) + scale_1 = paddle._C_ops.scale(multiply_3, full_3, float("1e-10"), True) + del multiply_3 + + # pd_op.subtract: (-1x1xf32) <- (-1x1xf32, -1x1xf32) + subtract_10 = paddle._C_ops.subtract(scale_1, scale_0) + + # pd_op.divide: (-1x1xf32) <- (-1x1xf32, -1x1xf32) + divide_3 = paddle._C_ops.divide(subtract_10, scale_1) + + # pd_op.subtract: (-1x1xf32) <- (-1x1xf32, -1x1xf32) + subtract_11 = paddle._C_ops.subtract(divide_2, divide_3) + + # pd_op.full: (1xf32) <- () + full_4 = paddle._C_ops.full( + [1], float("-1"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.scale: (-1x1xf32) <- (-1x1xf32, 1xf32) + scale_2 = paddle._C_ops.scale(subtract_11, full_4, float("1"), True) + del subtract_11 + + # pd_op.scale: (-1x1xf32) <- (-1x1xf32, 1xf32) + scale_3 = paddle._C_ops.scale(scale_2, full_3, float("0"), True) + del scale_2 + + # pd_op.multiply: (-1x1xf32) <- (-1x1xf32, -1x1xf32) + multiply_4 = paddle._C_ops.multiply(scale_3, unsqueeze_1) + + # pd_op.full_int_array: (0xi64) <- () + full_int_array_3 = [] + + # pd_op.assign: (0xi64) <- (0xi64) + assign_7 = full_int_array_3 + + # pd_op.sum: (xf32) <- (-1x1xf32, 0xi64) + sum_1 = paddle._C_ops.sum(multiply_4, full_int_array_3, None, False) + + # pd_op.divide: (xf32) <- (xf32, xf32) + divide_0 = paddle._C_ops.divide(sum_1, data_4) + + # pd_op.unsqueeze: (8x4116x1xb) <- (8x4116xb, 1xi64) + unsqueeze_2 = paddle._C_ops.unsqueeze(data_0, full_int_array_0) + del data_0 + + # pd_op.cast: (8x4116x1xi32) <- (8x4116x1xb) + cast_2 = paddle._C_ops.cast(unsqueeze_2, paddle.int32) + del unsqueeze_2 + + # pd_op.full_int_array: (3xi64) <- () + full_int_array_4 = [1, 1, 68] + + # pd_op.tile: (8x4116x68xi32) <- (8x4116x1xi32, 3xi64) + tile_1 = paddle._C_ops.tile(cast_2, full_int_array_4) + del cast_2, full_int_array_4 + + # pd_op.cast: (8x4116x68xb) <- (8x4116x68xi32) + cast_3 = paddle._C_ops.cast(tile_1, paddle.bool) + del tile_1 + + # pd_op.masked_select: (-1xf32) <- (8x4116x68xf32, 8x4116x68xb) + masked_select_3 = paddle._C_ops.masked_select(data_5, cast_3) + del data_5 + + # pd_op.full_int_array: (3xi64) <- () + full_int_array_5 = [-1, 4, 17] + + # pd_op.reshape: (-1x4x17xf32) <- (-1xf32, 3xi64) + reshape_2 = paddle._C_ops.reshape(masked_select_3, full_int_array_5) + del full_int_array_5 + + # pd_op.full: (1xi32) <- () + full_5 = paddle._C_ops.full( + [1], float("2"), paddle.int32, paddle.core.CPUPlace() + ) + + # pd_op.split_with_num: ([8x4116x2xf32, 8x4116x2xf32]) <- (8x4116x4xf32, 1xi32) + split_with_num_2 = paddle._C_ops.split_with_num(data_2, 2, full_5) + del data_2, full_5 + + # builtin.split: (8x4116x2xf32, 8x4116x2xf32) <- ([8x4116x2xf32, 8x4116x2xf32]) + ( + split_8, + split_9, + ) = split_with_num_2 + del split_with_num_2 + + # pd_op.subtract: (8x4116x2xf32) <- (4116x2xf32, 8x4116x2xf32) + subtract_12 = paddle._C_ops.subtract(data_6, split_8) + del split_8 + + # pd_op.subtract: (8x4116x2xf32) <- (8x4116x2xf32, 4116x2xf32) + subtract_13 = paddle._C_ops.subtract(split_9, data_6) + del data_6, split_9 + + # pd_op.full: (1xi32) <- () + full_6 = paddle._C_ops.full( + [1], float("-1"), paddle.int32, paddle.core.CPUPlace() + ) + + # builtin.combine: ([8x4116x2xf32, 8x4116x2xf32]) <- (8x4116x2xf32, 8x4116x2xf32) + combine_0 = [subtract_12, subtract_13] + del subtract_12, subtract_13 + + # pd_op.concat: (8x4116x4xf32) <- ([8x4116x2xf32, 8x4116x2xf32], 1xi32) + concat_0 = paddle._C_ops.concat(combine_0, full_6) + del combine_0, full_6 + + # pd_op.full: (1xf32) <- () + full_7 = paddle._C_ops.full( + [1], float("15.99"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.clip: (8x4116x4xf32) <- (8x4116x4xf32, 1xf32, 1xf32) + clip_2 = paddle._C_ops.clip(concat_0, full_1, full_7) + del concat_0, full_7 + + # pd_op.masked_select: (-1xf32) <- (8x4116x4xf32, 8x4116x4xb) + masked_select_4 = paddle._C_ops.masked_select(clip_2, cast_1) + del clip_2 + + # pd_op.reshape: (-1x4xf32) <- (-1xf32, 2xi64) + reshape_3 = paddle._C_ops.reshape(masked_select_4, full_int_array_2) + del full_int_array_2, masked_select_4 + + # pd_op.floor: (-1x4xf32) <- (-1x4xf32) + floor_0 = paddle._C_ops.floor(reshape_3) + + # pd_op.cast: (-1x4xi64) <- (-1x4xf32) + cast_4 = paddle._C_ops.cast(floor_0, paddle.int64) + del floor_0 + + # pd_op.scale: (-1x4xi64) <- (-1x4xi64, 1xf32) + scale_4 = paddle._C_ops.scale(cast_4, full_3, float("1"), True) + + # pd_op.cast: (-1x4xf32) <- (-1x4xi64) + cast_5 = paddle._C_ops.cast(scale_4, paddle.float32) + + # pd_op.subtract: (-1x4xf32) <- (-1x4xf32, -1x4xf32) + subtract_14 = paddle._C_ops.subtract(cast_5, reshape_3) + del cast_5, reshape_3 + + # pd_op.scale: (-1x4xf32) <- (-1x4xf32, 1xf32) + scale_5 = paddle._C_ops.scale(subtract_14, full_4, float("1"), True) + + # pd_op.scale: (-1x4xi64) <- (-1x4xi64, 1xf32) + scale_6 = paddle._C_ops.scale(cast_4, full_3, float("0"), True) + del cast_4 + + # pd_op.unsqueeze: (-1x4x1xi64) <- (-1x4xi64, 1xi64) + unsqueeze_3 = paddle._C_ops.unsqueeze(scale_6, full_int_array_0) + del scale_6 + + # pd_op.cross_entropy_with_softmax: (-1x4x17xf32, -1x4x1xf32) <- (-1x4x17xf32, -1x4x1xi64) + cross_entropy_with_softmax_0, cross_entropy_with_softmax_2 = ( + lambda x, f: f(x) + )( + paddle._C_ops.cross_entropy_with_softmax( + reshape_2, unsqueeze_3, False, True, True, -100, -1 + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None), + ) + + # pd_op.squeeze: (-1x4xf32) <- (-1x4x1xf32, 1xi64) + squeeze_0 = paddle._C_ops.squeeze( + cross_entropy_with_softmax_2, full_int_array_0 + ) + + # pd_op.multiply: (-1x4xf32) <- (-1x4xf32, -1x4xf32) + multiply_5 = paddle._C_ops.multiply(squeeze_0, subtract_14) + + # pd_op.scale: (-1x4xi64) <- (-1x4xi64, 1xf32) + scale_7 = paddle._C_ops.scale(scale_4, full_3, float("0"), True) + del scale_4 + + # pd_op.unsqueeze: (-1x4x1xi64) <- (-1x4xi64, 1xi64) + unsqueeze_4 = paddle._C_ops.unsqueeze(scale_7, full_int_array_0) + del scale_7 + + # pd_op.cross_entropy_with_softmax: (-1x4x17xf32, -1x4x1xf32) <- (-1x4x17xf32, -1x4x1xi64) + cross_entropy_with_softmax_1, cross_entropy_with_softmax_3 = ( + lambda x, f: f(x) + )( + paddle._C_ops.cross_entropy_with_softmax( + reshape_2, unsqueeze_4, False, True, True, -100, -1 + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None), + ) + del reshape_2 + + # pd_op.squeeze: (-1x4xf32) <- (-1x4x1xf32, 1xi64) + squeeze_1 = paddle._C_ops.squeeze( + cross_entropy_with_softmax_3, full_int_array_0 + ) + + # pd_op.multiply: (-1x4xf32) <- (-1x4xf32, -1x4xf32) + multiply_6 = paddle._C_ops.multiply(squeeze_1, scale_5) + + # pd_op.add: (-1x4xf32) <- (-1x4xf32, -1x4xf32) + add_1 = paddle._C_ops.add(multiply_5, multiply_6) + + # pd_op.mean: (-1x1xf32) <- (-1x4xf32, 1xi64) + mean_0 = paddle._C_ops.mean(add_1, full_int_array_0, True) + del full_int_array_0 + + # pd_op.multiply: (-1x1xf32) <- (-1x1xf32, -1x1xf32) + multiply_7 = paddle._C_ops.multiply(mean_0, unsqueeze_1) + + # pd_op.sum: (xf32) <- (-1x1xf32, 0xi64) + sum_2 = paddle._C_ops.sum(multiply_7, full_int_array_3, None, False) + + # pd_op.divide: (xf32) <- (xf32, xf32) + divide_1 = paddle._C_ops.divide(sum_2, data_4) + del ( + abs_0, + add_0, + add_1, + assign_0, + assign_1, + assign_2, + assign_3, + assign_4, + assign_5, + assign_6, + assign_7, + cast_1, + cast_3, + clip_0, + clip_1, + cross_entropy_with_softmax_2, + cross_entropy_with_softmax_3, + data_4, + divide_2, + divide_3, + full_0, + full_1, + full_2, + full_3, + full_4, + full_int_array_3, + masked_select_0, + masked_select_3, + maximum_0, + maximum_1, + maximum_2, + maximum_3, + mean_0, + minimum_0, + minimum_1, + minimum_2, + minimum_3, + multiply_0, + multiply_1, + multiply_2, + multiply_4, + multiply_5, + multiply_6, + multiply_7, + reshape_0, + reshape_1, + scale_0, + scale_1, + scale_3, + scale_5, + split_0, + split_1, + split_2, + split_3, + split_4, + split_5, + split_6, + split_7, + squeeze_0, + squeeze_1, + subtract_0, + subtract_1, + subtract_10, + subtract_14, + subtract_2, + subtract_3, + subtract_4, + subtract_8, + subtract_9, + sum_1, + sum_2, + unsqueeze_1, + unsqueeze_3, + unsqueeze_4, + ) + + return ( + cross_entropy_with_softmax_0, + cross_entropy_with_softmax_1, + mean_all_0, + divide_0, + divide_1, + ) diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus-L/subgraph_3/weight_meta.py b/paddle_samples/PaddleX/PP-YOLOE_plus-L/subgraph_3/weight_meta.py new file mode 100644 index 000000000..8b1378917 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE_plus-L/subgraph_3/weight_meta.py @@ -0,0 +1 @@ + diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus-L/subgraph_5/graph_hash.txt b/paddle_samples/PaddleX/PP-YOLOE_plus-L/subgraph_5/graph_hash.txt new file mode 100644 index 000000000..fc6df8003 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE_plus-L/subgraph_5/graph_hash.txt @@ -0,0 +1 @@ +04a47b3e6a28ad406660a0b13a9bb86de942c8058abd14e7f9ffdf52c75c884d \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus-L/subgraph_5/graph_net.json b/paddle_samples/PaddleX/PP-YOLOE_plus-L/subgraph_5/graph_net.json new file mode 100644 index 000000000..32219c0fa --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE_plus-L/subgraph_5/graph_net.json @@ -0,0 +1,6 @@ +{ + "framework": "paddle", + "model_name": "PP-YOLOE_plus-L", + "num_devices_required": 1, + "num_nodes_required": 1 +} \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus-L/subgraph_5/input_meta.py b/paddle_samples/PaddleX/PP-YOLOE_plus-L/subgraph_5/input_meta.py new file mode 100644 index 000000000..09671b32f --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE_plus-L/subgraph_5/input_meta.py @@ -0,0 +1,134 @@ +class Program_weight_tensor_data_0: + name = "data_0" + shape = [1] + dtype = "float32" + data = [1.00241] + + +class Program_weight_tensor_data_1: + name = "data_1" + shape = [1] + dtype = "float32" + data = [1.00237] + + +class Program_weight_tensor_data_2: + name = "data_2" + shape = [1] + dtype = "float32" + data = [1.00237] + + +class Program_weight_tensor_data_3: + name = "data_3" + shape = [1] + dtype = "float32" + data = [1.00237] + + +class Program_weight_tensor_data_4: + name = "data_4" + shape = [1] + dtype = "float32" + data = [1.00237] + + +class Program_weight_tensor_data_5: + name = "data_5" + shape = [1] + dtype = "float32" + data = [1.00237] + + +class Program_weight_tensor_data_6: + name = "data_6" + shape = [1] + dtype = "float32" + data = [1.00236] + + +class Program_weight_tensor_data_7: + name = "data_7" + shape = [1] + dtype = "float32" + data = [1.00237] + + +class Program_weight_tensor_data_8: + name = "data_8" + shape = [1] + dtype = "float32" + data = [1.00237] + + +class Program_weight_tensor_data_9: + name = "data_9" + shape = [1] + dtype = "float32" + data = [1.00237] + + +class Program_weight_tensor_data_10: + name = "data_10" + shape = [1] + dtype = "float32" + data = [1.00237] + + +class Program_weight_tensor_data_11: + name = "data_11" + shape = [1] + dtype = "float32" + data = [1.00237] + + +class Program_weight_tensor_data_12: + name = "data_12" + shape = [1] + dtype = "float32" + data = [1.00237] + + +class Program_weight_tensor_data_13: + name = "data_13" + shape = [1] + dtype = "float32" + data = [1.00237] + + +class Program_weight_tensor_data_14: + name = "data_14" + shape = [1] + dtype = "float32" + data = [1.00237] + + +class Program_weight_tensor_data_15: + name = "data_15" + shape = [1] + dtype = "float32" + data = [1.00237] + + +class Program_weight_tensor_data_16: + name = "data_16" + shape = [1] + dtype = "float32" + data = [1.00237] + + +class Program_weight_tensor_data_17: + name = "data_17" + shape = [1] + dtype = "float32" + data = [1.00237] + + +class Program_weight_tensor_data_18: + name = "data_18" + shape = [2, 3, 640, 640] + dtype = "float32" + max_val = float("1.0") + mean = float("0.471598") + std = float("0.270715") + data = None diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus-L/subgraph_5/model.py b/paddle_samples/PaddleX/PP-YOLOE_plus-L/subgraph_5/model.py new file mode 100644 index 000000000..1eb1d0609 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE_plus-L/subgraph_5/model.py @@ -0,0 +1,7279 @@ +import paddle + + +class GraphModule(paddle.nn.Layer): + def __init__(self): + super().__init__() + + def forward( + self, + parameter_0, + parameter_1, + parameter_2, + parameter_3, + parameter_4, + parameter_5, + parameter_6, + parameter_7, + parameter_8, + parameter_9, + parameter_10, + parameter_11, + parameter_12, + parameter_13, + parameter_14, + parameter_15, + parameter_16, + parameter_17, + parameter_18, + parameter_19, + parameter_20, + parameter_21, + parameter_22, + parameter_23, + parameter_24, + parameter_25, + parameter_26, + parameter_27, + parameter_28, + parameter_29, + parameter_30, + parameter_31, + parameter_32, + parameter_33, + parameter_34, + parameter_35, + parameter_36, + parameter_37, + parameter_38, + parameter_39, + parameter_40, + parameter_41, + parameter_42, + parameter_43, + parameter_44, + parameter_45, + parameter_46, + parameter_47, + parameter_48, + parameter_49, + parameter_50, + parameter_51, + parameter_52, + parameter_53, + parameter_54, + parameter_55, + parameter_56, + parameter_57, + parameter_58, + parameter_59, + parameter_60, + parameter_61, + parameter_62, + parameter_63, + parameter_64, + parameter_65, + parameter_66, + parameter_67, + parameter_68, + parameter_69, + parameter_70, + parameter_71, + parameter_72, + parameter_73, + parameter_74, + parameter_75, + parameter_76, + parameter_77, + parameter_78, + parameter_79, + parameter_80, + parameter_81, + parameter_82, + parameter_83, + parameter_84, + parameter_85, + parameter_86, + parameter_87, + parameter_88, + parameter_89, + parameter_90, + parameter_91, + parameter_92, + parameter_93, + parameter_94, + parameter_95, + parameter_96, + parameter_97, + parameter_98, + parameter_99, + parameter_100, + parameter_101, + parameter_102, + parameter_103, + parameter_104, + parameter_105, + parameter_106, + parameter_107, + parameter_108, + parameter_109, + parameter_110, + parameter_111, + parameter_112, + parameter_113, + parameter_114, + parameter_115, + parameter_116, + parameter_117, + parameter_118, + parameter_119, + parameter_120, + parameter_121, + parameter_122, + parameter_123, + parameter_124, + parameter_125, + parameter_126, + parameter_127, + parameter_128, + parameter_129, + parameter_130, + parameter_131, + parameter_132, + parameter_133, + parameter_134, + parameter_135, + parameter_136, + parameter_137, + parameter_138, + parameter_139, + parameter_140, + parameter_141, + parameter_142, + parameter_143, + parameter_144, + parameter_145, + parameter_146, + parameter_147, + parameter_148, + parameter_149, + parameter_150, + parameter_151, + parameter_152, + parameter_153, + parameter_154, + parameter_155, + parameter_156, + parameter_157, + parameter_158, + parameter_159, + parameter_160, + parameter_161, + parameter_162, + parameter_163, + parameter_164, + parameter_165, + parameter_166, + parameter_167, + parameter_168, + parameter_169, + parameter_170, + parameter_171, + parameter_172, + parameter_173, + parameter_174, + parameter_175, + parameter_176, + parameter_177, + parameter_178, + parameter_179, + parameter_180, + parameter_181, + parameter_182, + parameter_183, + parameter_184, + parameter_185, + parameter_186, + parameter_187, + parameter_188, + parameter_189, + parameter_190, + parameter_191, + parameter_192, + parameter_193, + parameter_194, + parameter_195, + parameter_196, + parameter_197, + parameter_198, + parameter_199, + parameter_200, + parameter_201, + parameter_202, + parameter_203, + parameter_204, + parameter_205, + parameter_206, + parameter_207, + parameter_208, + parameter_209, + parameter_210, + parameter_211, + parameter_212, + parameter_213, + parameter_214, + parameter_215, + parameter_216, + parameter_217, + parameter_218, + parameter_219, + parameter_220, + parameter_221, + parameter_222, + parameter_223, + parameter_224, + parameter_225, + parameter_226, + parameter_227, + parameter_228, + parameter_229, + parameter_230, + parameter_231, + parameter_232, + parameter_233, + parameter_234, + parameter_235, + parameter_236, + parameter_237, + parameter_238, + parameter_239, + parameter_240, + parameter_241, + parameter_242, + parameter_243, + parameter_244, + parameter_245, + parameter_246, + parameter_247, + parameter_248, + parameter_249, + parameter_250, + parameter_251, + parameter_252, + parameter_253, + parameter_254, + parameter_255, + parameter_256, + parameter_257, + parameter_258, + parameter_259, + parameter_260, + parameter_261, + parameter_262, + parameter_263, + parameter_264, + parameter_265, + parameter_266, + parameter_267, + parameter_268, + parameter_269, + parameter_270, + parameter_271, + parameter_272, + parameter_273, + parameter_274, + parameter_275, + parameter_276, + parameter_277, + parameter_278, + parameter_279, + parameter_280, + parameter_281, + parameter_282, + parameter_283, + parameter_284, + parameter_285, + parameter_286, + parameter_287, + parameter_288, + parameter_289, + parameter_290, + parameter_291, + parameter_292, + parameter_293, + parameter_294, + parameter_295, + parameter_296, + parameter_297, + parameter_298, + parameter_299, + parameter_300, + parameter_301, + parameter_302, + parameter_303, + parameter_304, + parameter_305, + parameter_306, + parameter_307, + parameter_308, + parameter_309, + parameter_310, + parameter_311, + parameter_312, + parameter_313, + parameter_314, + parameter_315, + parameter_316, + parameter_317, + parameter_318, + parameter_319, + parameter_320, + parameter_321, + parameter_322, + parameter_323, + parameter_324, + parameter_325, + parameter_326, + parameter_327, + parameter_328, + parameter_329, + parameter_330, + parameter_331, + parameter_332, + parameter_333, + parameter_334, + parameter_335, + parameter_336, + parameter_337, + parameter_338, + parameter_339, + parameter_340, + parameter_341, + parameter_342, + parameter_343, + parameter_344, + parameter_345, + parameter_346, + parameter_347, + parameter_348, + parameter_349, + parameter_350, + parameter_351, + parameter_352, + parameter_353, + parameter_354, + parameter_355, + parameter_356, + parameter_357, + parameter_358, + parameter_359, + parameter_360, + parameter_361, + parameter_362, + parameter_363, + parameter_364, + parameter_365, + parameter_366, + parameter_367, + parameter_368, + parameter_369, + parameter_370, + parameter_371, + parameter_372, + parameter_373, + parameter_374, + parameter_375, + parameter_376, + parameter_377, + parameter_378, + parameter_379, + parameter_380, + parameter_381, + parameter_382, + parameter_383, + parameter_384, + parameter_385, + parameter_386, + parameter_387, + parameter_388, + parameter_389, + parameter_390, + parameter_391, + parameter_392, + parameter_393, + parameter_394, + parameter_395, + parameter_396, + parameter_397, + parameter_398, + parameter_399, + parameter_400, + parameter_401, + parameter_402, + parameter_403, + parameter_404, + parameter_405, + parameter_406, + parameter_407, + parameter_408, + parameter_409, + parameter_410, + parameter_411, + parameter_412, + parameter_413, + parameter_414, + parameter_415, + parameter_416, + parameter_417, + parameter_418, + parameter_419, + parameter_420, + parameter_421, + parameter_422, + parameter_423, + parameter_424, + parameter_425, + parameter_426, + parameter_427, + parameter_428, + parameter_429, + parameter_430, + parameter_431, + parameter_432, + parameter_433, + parameter_434, + parameter_435, + parameter_436, + parameter_437, + parameter_438, + parameter_439, + parameter_440, + parameter_441, + parameter_442, + parameter_443, + parameter_444, + parameter_445, + parameter_446, + parameter_447, + parameter_448, + parameter_449, + parameter_450, + parameter_451, + parameter_452, + parameter_453, + parameter_454, + parameter_455, + parameter_456, + parameter_457, + parameter_458, + parameter_459, + parameter_460, + parameter_461, + parameter_462, + parameter_463, + parameter_464, + parameter_465, + parameter_466, + parameter_467, + parameter_468, + parameter_469, + parameter_470, + parameter_471, + parameter_472, + parameter_473, + parameter_474, + parameter_475, + parameter_476, + parameter_477, + parameter_478, + parameter_479, + parameter_480, + parameter_481, + parameter_482, + parameter_483, + parameter_484, + parameter_485, + parameter_486, + parameter_487, + parameter_488, + parameter_489, + parameter_490, + parameter_491, + parameter_492, + parameter_493, + parameter_494, + parameter_495, + parameter_496, + parameter_497, + parameter_498, + parameter_499, + parameter_500, + parameter_501, + parameter_502, + parameter_503, + parameter_504, + parameter_505, + parameter_506, + parameter_507, + parameter_508, + parameter_509, + parameter_510, + parameter_511, + parameter_512, + parameter_513, + parameter_514, + parameter_515, + parameter_516, + parameter_517, + parameter_518, + parameter_519, + parameter_520, + parameter_521, + parameter_522, + parameter_523, + parameter_524, + parameter_525, + parameter_526, + parameter_527, + parameter_528, + parameter_529, + parameter_530, + parameter_531, + parameter_532, + parameter_533, + parameter_534, + parameter_535, + parameter_536, + parameter_537, + parameter_538, + parameter_539, + parameter_540, + parameter_541, + parameter_542, + parameter_543, + parameter_544, + parameter_545, + parameter_546, + parameter_547, + parameter_548, + parameter_549, + parameter_550, + parameter_551, + parameter_552, + parameter_553, + parameter_554, + parameter_555, + parameter_556, + parameter_557, + parameter_558, + parameter_559, + parameter_560, + parameter_561, + parameter_562, + parameter_563, + parameter_564, + parameter_565, + parameter_566, + parameter_567, + parameter_568, + parameter_569, + parameter_570, + parameter_571, + parameter_572, + parameter_573, + parameter_574, + parameter_575, + parameter_576, + parameter_577, + parameter_578, + parameter_579, + parameter_580, + parameter_581, + parameter_582, + parameter_583, + parameter_584, + parameter_585, + parameter_586, + parameter_587, + parameter_588, + parameter_589, + parameter_590, + parameter_591, + parameter_592, + parameter_593, + parameter_594, + parameter_595, + parameter_596, + parameter_597, + parameter_598, + parameter_599, + parameter_600, + parameter_601, + parameter_602, + parameter_603, + parameter_604, + parameter_605, + parameter_606, + parameter_607, + parameter_608, + parameter_609, + parameter_610, + parameter_611, + parameter_612, + parameter_613, + parameter_614, + parameter_615, + parameter_616, + parameter_617, + parameter_618, + parameter_619, + parameter_620, + parameter_621, + parameter_622, + parameter_623, + parameter_624, + parameter_625, + parameter_626, + parameter_627, + parameter_628, + parameter_629, + parameter_630, + parameter_631, + parameter_632, + parameter_633, + parameter_634, + parameter_635, + parameter_636, + parameter_637, + parameter_638, + parameter_639, + parameter_640, + parameter_641, + parameter_642, + parameter_643, + parameter_644, + parameter_645, + parameter_646, + parameter_647, + parameter_648, + parameter_649, + parameter_650, + parameter_651, + parameter_652, + parameter_653, + parameter_654, + parameter_655, + parameter_656, + parameter_657, + parameter_658, + parameter_659, + parameter_660, + parameter_661, + parameter_662, + parameter_663, + parameter_664, + parameter_665, + parameter_666, + parameter_667, + parameter_668, + parameter_669, + parameter_670, + parameter_671, + parameter_672, + parameter_673, + parameter_674, + parameter_675, + parameter_676, + parameter_677, + parameter_678, + parameter_679, + parameter_680, + parameter_681, + parameter_682, + parameter_683, + parameter_684, + parameter_685, + parameter_686, + parameter_687, + parameter_688, + parameter_689, + parameter_690, + parameter_691, + parameter_692, + parameter_693, + parameter_694, + parameter_695, + parameter_696, + parameter_697, + parameter_698, + parameter_699, + parameter_700, + parameter_701, + parameter_702, + parameter_703, + parameter_704, + parameter_705, + parameter_706, + parameter_707, + parameter_708, + parameter_709, + parameter_710, + parameter_711, + parameter_712, + parameter_713, + parameter_714, + parameter_715, + parameter_716, + parameter_717, + parameter_718, + parameter_719, + parameter_720, + parameter_721, + parameter_722, + parameter_723, + parameter_724, + parameter_725, + parameter_726, + parameter_727, + parameter_728, + parameter_729, + parameter_730, + parameter_731, + parameter_732, + parameter_733, + parameter_734, + parameter_735, + parameter_736, + parameter_737, + parameter_738, + parameter_739, + parameter_740, + parameter_741, + parameter_742, + parameter_743, + parameter_744, + parameter_745, + parameter_746, + parameter_747, + parameter_748, + parameter_749, + parameter_750, + parameter_751, + parameter_752, + data_0, + data_1, + data_2, + data_3, + data_4, + data_5, + data_6, + data_7, + data_8, + data_9, + data_10, + data_11, + data_12, + data_13, + data_14, + data_15, + data_16, + data_17, + data_18, + ): + # pd_op.conv2d: (-1x32x-1x-1xf32) <- (-1x3x-1x-1xf32, 32x3x3x3xf32) + conv2d_0 = paddle._C_ops.conv2d( + data_18, parameter_752, [2, 2], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del data_18, parameter_752 + + # pd_op.batch_norm_: (-1x32x-1x-1xf32, 32xf32, 32xf32, 32xf32, 32xf32, -1xui8) <- (-1x32x-1x-1xf32, 32xf32, 32xf32, 32xf32, 32xf32) + ( + batch_norm__0, + batch_norm__1, + batch_norm__2, + batch_norm__3, + batch_norm__4, + batch_norm__5, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_0, + parameter_751, + parameter_750, + parameter_749, + parameter_748, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_0, parameter_748, parameter_749, parameter_750, parameter_751 + + # pd_op.swish: (-1x32x-1x-1xf32) <- (-1x32x-1x-1xf32) + swish_0 = paddle._C_ops.swish(batch_norm__0) + del batch_norm__0 + + # pd_op.conv2d: (-1x32x-1x-1xf32) <- (-1x32x-1x-1xf32, 32x32x3x3xf32) + conv2d_1 = paddle._C_ops.conv2d( + swish_0, parameter_747, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_747, swish_0 + + # pd_op.batch_norm_: (-1x32x-1x-1xf32, 32xf32, 32xf32, 32xf32, 32xf32, -1xui8) <- (-1x32x-1x-1xf32, 32xf32, 32xf32, 32xf32, 32xf32) + ( + batch_norm__6, + batch_norm__7, + batch_norm__8, + batch_norm__9, + batch_norm__10, + batch_norm__11, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_1, + parameter_746, + parameter_745, + parameter_744, + parameter_743, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_1, parameter_743, parameter_744, parameter_745, parameter_746 + + # pd_op.swish: (-1x32x-1x-1xf32) <- (-1x32x-1x-1xf32) + swish_1 = paddle._C_ops.swish(batch_norm__6) + del batch_norm__6 + + # pd_op.conv2d: (-1x64x-1x-1xf32) <- (-1x32x-1x-1xf32, 64x32x3x3xf32) + conv2d_2 = paddle._C_ops.conv2d( + swish_1, parameter_742, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_742, swish_1 + + # pd_op.batch_norm_: (-1x64x-1x-1xf32, 64xf32, 64xf32, 64xf32, 64xf32, -1xui8) <- (-1x64x-1x-1xf32, 64xf32, 64xf32, 64xf32, 64xf32) + ( + batch_norm__12, + batch_norm__13, + batch_norm__14, + batch_norm__15, + batch_norm__16, + batch_norm__17, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_2, + parameter_741, + parameter_740, + parameter_739, + parameter_738, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_2, parameter_738, parameter_739, parameter_740, parameter_741 + + # pd_op.swish: (-1x64x-1x-1xf32) <- (-1x64x-1x-1xf32) + swish_2 = paddle._C_ops.swish(batch_norm__12) + del batch_norm__12 + + # pd_op.conv2d: (-1x96x-1x-1xf32) <- (-1x64x-1x-1xf32, 96x64x3x3xf32) + conv2d_3 = paddle._C_ops.conv2d( + swish_2, parameter_737, [2, 2], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_737, swish_2 + + # pd_op.batch_norm_: (-1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (-1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__18, + batch_norm__19, + batch_norm__20, + batch_norm__21, + batch_norm__22, + batch_norm__23, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_3, + parameter_736, + parameter_735, + parameter_734, + parameter_733, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_3, parameter_733, parameter_734, parameter_735, parameter_736 + + # pd_op.swish: (-1x96x-1x-1xf32) <- (-1x96x-1x-1xf32) + swish_3 = paddle._C_ops.swish(batch_norm__18) + del batch_norm__18 + + # pd_op.conv2d: (-1x48x-1x-1xf32) <- (-1x96x-1x-1xf32, 48x96x1x1xf32) + conv2d_4 = paddle._C_ops.conv2d( + swish_3, parameter_732, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_732 + + # pd_op.batch_norm_: (-1x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32, -1xui8) <- (-1x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32) + ( + batch_norm__24, + batch_norm__25, + batch_norm__26, + batch_norm__27, + batch_norm__28, + batch_norm__29, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_4, + parameter_731, + parameter_730, + parameter_729, + parameter_728, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_4, parameter_728, parameter_729, parameter_730, parameter_731 + + # pd_op.swish: (-1x48x-1x-1xf32) <- (-1x48x-1x-1xf32) + swish_4 = paddle._C_ops.swish(batch_norm__24) + del batch_norm__24 + + # pd_op.conv2d: (-1x48x-1x-1xf32) <- (-1x96x-1x-1xf32, 48x96x1x1xf32) + conv2d_5 = paddle._C_ops.conv2d( + swish_3, parameter_727, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_727, swish_3 + + # pd_op.batch_norm_: (-1x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32, -1xui8) <- (-1x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32) + ( + batch_norm__30, + batch_norm__31, + batch_norm__32, + batch_norm__33, + batch_norm__34, + batch_norm__35, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_5, + parameter_726, + parameter_725, + parameter_724, + parameter_723, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_5, parameter_723, parameter_724, parameter_725, parameter_726 + + # pd_op.swish: (-1x48x-1x-1xf32) <- (-1x48x-1x-1xf32) + swish_5 = paddle._C_ops.swish(batch_norm__30) + del batch_norm__30 + + # pd_op.conv2d: (-1x48x-1x-1xf32) <- (-1x48x-1x-1xf32, 48x48x3x3xf32) + conv2d_6 = paddle._C_ops.conv2d( + swish_5, parameter_722, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_722 + + # pd_op.batch_norm_: (-1x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32, -1xui8) <- (-1x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32) + ( + batch_norm__36, + batch_norm__37, + batch_norm__38, + batch_norm__39, + batch_norm__40, + batch_norm__41, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_6, + parameter_721, + parameter_720, + parameter_719, + parameter_718, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_6, parameter_718, parameter_719, parameter_720, parameter_721 + + # pd_op.swish: (-1x48x-1x-1xf32) <- (-1x48x-1x-1xf32) + swish_6 = paddle._C_ops.swish(batch_norm__36) + del batch_norm__36 + + # pd_op.conv2d: (-1x48x-1x-1xf32) <- (-1x48x-1x-1xf32, 48x48x3x3xf32) + conv2d_7 = paddle._C_ops.conv2d( + swish_6, parameter_717, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_717 + + # pd_op.batch_norm_: (-1x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32, -1xui8) <- (-1x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32) + ( + batch_norm__42, + batch_norm__43, + batch_norm__44, + batch_norm__45, + batch_norm__46, + batch_norm__47, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_7, + parameter_716, + parameter_715, + parameter_714, + parameter_713, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_7, parameter_713, parameter_714, parameter_715, parameter_716 + + # pd_op.conv2d: (-1x48x-1x-1xf32) <- (-1x48x-1x-1xf32, 48x48x1x1xf32) + conv2d_8 = paddle._C_ops.conv2d( + swish_6, parameter_712, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_712, swish_6 + + # pd_op.batch_norm_: (-1x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32, -1xui8) <- (-1x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32) + ( + batch_norm__48, + batch_norm__49, + batch_norm__50, + batch_norm__51, + batch_norm__52, + batch_norm__53, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_8, + parameter_711, + parameter_710, + parameter_709, + parameter_708, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_8, parameter_708, parameter_709, parameter_710, parameter_711 + + # pd_op.multiply: (-1x48x-1x-1xf32) <- (1xf32, -1x48x-1x-1xf32) + multiply_0 = paddle._C_ops.multiply(data_0, batch_norm__48) + del batch_norm__48, data_0 + + # pd_op.add: (-1x48x-1x-1xf32) <- (-1x48x-1x-1xf32, -1x48x-1x-1xf32) + add_0 = paddle._C_ops.add(batch_norm__42, multiply_0) + del batch_norm__42, multiply_0 + + # pd_op.swish: (-1x48x-1x-1xf32) <- (-1x48x-1x-1xf32) + swish_7 = paddle._C_ops.swish(add_0) + del add_0 + + # pd_op.add: (-1x48x-1x-1xf32) <- (-1x48x-1x-1xf32, -1x48x-1x-1xf32) + add_1 = paddle._C_ops.add(swish_5, swish_7) + del swish_5, swish_7 + + # pd_op.conv2d: (-1x48x-1x-1xf32) <- (-1x48x-1x-1xf32, 48x48x3x3xf32) + conv2d_9 = paddle._C_ops.conv2d( + add_1, parameter_707, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_707 + + # pd_op.batch_norm_: (-1x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32, -1xui8) <- (-1x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32) + ( + batch_norm__54, + batch_norm__55, + batch_norm__56, + batch_norm__57, + batch_norm__58, + batch_norm__59, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_9, + parameter_706, + parameter_705, + parameter_704, + parameter_703, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_9, parameter_703, parameter_704, parameter_705, parameter_706 + + # pd_op.swish: (-1x48x-1x-1xf32) <- (-1x48x-1x-1xf32) + swish_8 = paddle._C_ops.swish(batch_norm__54) + del batch_norm__54 + + # pd_op.conv2d: (-1x48x-1x-1xf32) <- (-1x48x-1x-1xf32, 48x48x3x3xf32) + conv2d_10 = paddle._C_ops.conv2d( + swish_8, parameter_702, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_702 + + # pd_op.batch_norm_: (-1x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32, -1xui8) <- (-1x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32) + ( + batch_norm__60, + batch_norm__61, + batch_norm__62, + batch_norm__63, + batch_norm__64, + batch_norm__65, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_10, + parameter_701, + parameter_700, + parameter_699, + parameter_698, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_10, parameter_698, parameter_699, parameter_700, parameter_701 + + # pd_op.conv2d: (-1x48x-1x-1xf32) <- (-1x48x-1x-1xf32, 48x48x1x1xf32) + conv2d_11 = paddle._C_ops.conv2d( + swish_8, parameter_697, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_697, swish_8 + + # pd_op.batch_norm_: (-1x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32, -1xui8) <- (-1x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32) + ( + batch_norm__66, + batch_norm__67, + batch_norm__68, + batch_norm__69, + batch_norm__70, + batch_norm__71, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_11, + parameter_696, + parameter_695, + parameter_694, + parameter_693, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_11, parameter_693, parameter_694, parameter_695, parameter_696 + + # pd_op.multiply: (-1x48x-1x-1xf32) <- (1xf32, -1x48x-1x-1xf32) + multiply_1 = paddle._C_ops.multiply(data_1, batch_norm__66) + del batch_norm__66, data_1 + + # pd_op.add: (-1x48x-1x-1xf32) <- (-1x48x-1x-1xf32, -1x48x-1x-1xf32) + add_2 = paddle._C_ops.add(batch_norm__60, multiply_1) + del batch_norm__60, multiply_1 + + # pd_op.swish: (-1x48x-1x-1xf32) <- (-1x48x-1x-1xf32) + swish_9 = paddle._C_ops.swish(add_2) + del add_2 + + # pd_op.add: (-1x48x-1x-1xf32) <- (-1x48x-1x-1xf32, -1x48x-1x-1xf32) + add_3 = paddle._C_ops.add(add_1, swish_9) + del add_1, swish_9 + + # pd_op.conv2d: (-1x48x-1x-1xf32) <- (-1x48x-1x-1xf32, 48x48x3x3xf32) + conv2d_12 = paddle._C_ops.conv2d( + add_3, parameter_692, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_692 + + # pd_op.batch_norm_: (-1x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32, -1xui8) <- (-1x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32) + ( + batch_norm__72, + batch_norm__73, + batch_norm__74, + batch_norm__75, + batch_norm__76, + batch_norm__77, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_12, + parameter_691, + parameter_690, + parameter_689, + parameter_688, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_12, parameter_688, parameter_689, parameter_690, parameter_691 + + # pd_op.swish: (-1x48x-1x-1xf32) <- (-1x48x-1x-1xf32) + swish_10 = paddle._C_ops.swish(batch_norm__72) + del batch_norm__72 + + # pd_op.conv2d: (-1x48x-1x-1xf32) <- (-1x48x-1x-1xf32, 48x48x3x3xf32) + conv2d_13 = paddle._C_ops.conv2d( + swish_10, parameter_687, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_687 + + # pd_op.batch_norm_: (-1x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32, -1xui8) <- (-1x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32) + ( + batch_norm__78, + batch_norm__79, + batch_norm__80, + batch_norm__81, + batch_norm__82, + batch_norm__83, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_13, + parameter_686, + parameter_685, + parameter_684, + parameter_683, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_13, parameter_683, parameter_684, parameter_685, parameter_686 + + # pd_op.conv2d: (-1x48x-1x-1xf32) <- (-1x48x-1x-1xf32, 48x48x1x1xf32) + conv2d_14 = paddle._C_ops.conv2d( + swish_10, parameter_682, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_682, swish_10 + + # pd_op.batch_norm_: (-1x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32, -1xui8) <- (-1x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32) + ( + batch_norm__84, + batch_norm__85, + batch_norm__86, + batch_norm__87, + batch_norm__88, + batch_norm__89, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_14, + parameter_681, + parameter_680, + parameter_679, + parameter_678, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_14, parameter_678, parameter_679, parameter_680, parameter_681 + + # pd_op.multiply: (-1x48x-1x-1xf32) <- (1xf32, -1x48x-1x-1xf32) + multiply_2 = paddle._C_ops.multiply(data_2, batch_norm__84) + del batch_norm__84, data_2 + + # pd_op.add: (-1x48x-1x-1xf32) <- (-1x48x-1x-1xf32, -1x48x-1x-1xf32) + add_4 = paddle._C_ops.add(batch_norm__78, multiply_2) + del batch_norm__78, multiply_2 + + # pd_op.swish: (-1x48x-1x-1xf32) <- (-1x48x-1x-1xf32) + swish_11 = paddle._C_ops.swish(add_4) + del add_4 + + # pd_op.add: (-1x48x-1x-1xf32) <- (-1x48x-1x-1xf32, -1x48x-1x-1xf32) + add_5 = paddle._C_ops.add(add_3, swish_11) + del add_3, swish_11 + + # pd_op.full: (1xi32) <- () + full_0 = paddle._C_ops.full( + [1], float("1"), paddle.int32, paddle.core.CPUPlace() + ) + + # builtin.combine: ([-1x48x-1x-1xf32, -1x48x-1x-1xf32]) <- (-1x48x-1x-1xf32, -1x48x-1x-1xf32) + combine_0 = [swish_4, add_5] + del add_5, swish_4 + + # pd_op.concat: (-1x96x-1x-1xf32) <- ([-1x48x-1x-1xf32, -1x48x-1x-1xf32], 1xi32) + concat_2 = paddle._C_ops.concat(combine_0, full_0) + del combine_0 + + # pd_op.full_int_array: (2xi64) <- () + full_int_array_0 = [2, 3] + + # pd_op.mean: (-1x96x1x1xf32) <- (-1x96x-1x-1xf32, 2xi64) + mean_0 = paddle._C_ops.mean(concat_2, full_int_array_0, True) + + # pd_op.conv2d: (-1x96x1x1xf32) <- (-1x96x1x1xf32, 96x96x1x1xf32) + conv2d_15 = paddle._C_ops.conv2d( + mean_0, parameter_677, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del mean_0, parameter_677 + + # pd_op.full_int_array: (4xi64) <- () + full_int_array_1 = [1, -1, 1, 1] + + # pd_op.reshape: (1x96x1x1xf32) <- (96xf32, 4xi64) + reshape_0 = paddle._C_ops.reshape(parameter_676, full_int_array_1) + del parameter_676 + + # pd_op.add: (-1x96x1x1xf32) <- (-1x96x1x1xf32, 1x96x1x1xf32) + add_6 = paddle._C_ops.add(conv2d_15, reshape_0) + del conv2d_15, reshape_0 + + # pd_op.hardsigmoid: (-1x96x1x1xf32) <- (-1x96x1x1xf32) + hardsigmoid_0 = paddle._C_ops.hardsigmoid( + add_6, float("0.166667"), float("0.5") + ) + del add_6 + + # pd_op.multiply: (-1x96x-1x-1xf32) <- (-1x96x-1x-1xf32, -1x96x1x1xf32) + multiply_3 = paddle._C_ops.multiply(concat_2, hardsigmoid_0) + del concat_2, hardsigmoid_0 + + # pd_op.conv2d: (-1x128x-1x-1xf32) <- (-1x96x-1x-1xf32, 128x96x1x1xf32) + conv2d_16 = paddle._C_ops.conv2d( + multiply_3, parameter_675, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del multiply_3, parameter_675 + + # pd_op.batch_norm_: (-1x128x-1x-1xf32, 128xf32, 128xf32, 128xf32, 128xf32, -1xui8) <- (-1x128x-1x-1xf32, 128xf32, 128xf32, 128xf32, 128xf32) + ( + batch_norm__90, + batch_norm__91, + batch_norm__92, + batch_norm__93, + batch_norm__94, + batch_norm__95, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_16, + parameter_674, + parameter_673, + parameter_672, + parameter_671, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_16, parameter_671, parameter_672, parameter_673, parameter_674 + + # pd_op.swish: (-1x128x-1x-1xf32) <- (-1x128x-1x-1xf32) + swish_12 = paddle._C_ops.swish(batch_norm__90) + del batch_norm__90 + + # pd_op.conv2d: (-1x192x-1x-1xf32) <- (-1x128x-1x-1xf32, 192x128x3x3xf32) + conv2d_17 = paddle._C_ops.conv2d( + swish_12, parameter_670, [2, 2], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_670, swish_12 + + # pd_op.batch_norm_: (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__96, + batch_norm__97, + batch_norm__98, + batch_norm__99, + batch_norm__100, + batch_norm__101, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_17, + parameter_669, + parameter_668, + parameter_667, + parameter_666, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_17, parameter_666, parameter_667, parameter_668, parameter_669 + + # pd_op.swish: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32) + swish_13 = paddle._C_ops.swish(batch_norm__96) + del batch_norm__96 + + # pd_op.conv2d: (-1x96x-1x-1xf32) <- (-1x192x-1x-1xf32, 96x192x1x1xf32) + conv2d_18 = paddle._C_ops.conv2d( + swish_13, parameter_665, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_665 + + # pd_op.batch_norm_: (-1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (-1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__102, + batch_norm__103, + batch_norm__104, + batch_norm__105, + batch_norm__106, + batch_norm__107, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_18, + parameter_664, + parameter_663, + parameter_662, + parameter_661, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_18, parameter_661, parameter_662, parameter_663, parameter_664 + + # pd_op.swish: (-1x96x-1x-1xf32) <- (-1x96x-1x-1xf32) + swish_14 = paddle._C_ops.swish(batch_norm__102) + del batch_norm__102 + + # pd_op.conv2d: (-1x96x-1x-1xf32) <- (-1x192x-1x-1xf32, 96x192x1x1xf32) + conv2d_19 = paddle._C_ops.conv2d( + swish_13, parameter_660, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_660, swish_13 + + # pd_op.batch_norm_: (-1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (-1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__108, + batch_norm__109, + batch_norm__110, + batch_norm__111, + batch_norm__112, + batch_norm__113, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_19, + parameter_659, + parameter_658, + parameter_657, + parameter_656, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_19, parameter_656, parameter_657, parameter_658, parameter_659 + + # pd_op.swish: (-1x96x-1x-1xf32) <- (-1x96x-1x-1xf32) + swish_15 = paddle._C_ops.swish(batch_norm__108) + del batch_norm__108 + + # pd_op.conv2d: (-1x96x-1x-1xf32) <- (-1x96x-1x-1xf32, 96x96x3x3xf32) + conv2d_20 = paddle._C_ops.conv2d( + swish_15, parameter_655, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_655 + + # pd_op.batch_norm_: (-1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (-1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__114, + batch_norm__115, + batch_norm__116, + batch_norm__117, + batch_norm__118, + batch_norm__119, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_20, + parameter_654, + parameter_653, + parameter_652, + parameter_651, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_20, parameter_651, parameter_652, parameter_653, parameter_654 + + # pd_op.swish: (-1x96x-1x-1xf32) <- (-1x96x-1x-1xf32) + swish_16 = paddle._C_ops.swish(batch_norm__114) + del batch_norm__114 + + # pd_op.conv2d: (-1x96x-1x-1xf32) <- (-1x96x-1x-1xf32, 96x96x3x3xf32) + conv2d_21 = paddle._C_ops.conv2d( + swish_16, parameter_650, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_650 + + # pd_op.batch_norm_: (-1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (-1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__120, + batch_norm__121, + batch_norm__122, + batch_norm__123, + batch_norm__124, + batch_norm__125, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_21, + parameter_649, + parameter_648, + parameter_647, + parameter_646, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_21, parameter_646, parameter_647, parameter_648, parameter_649 + + # pd_op.conv2d: (-1x96x-1x-1xf32) <- (-1x96x-1x-1xf32, 96x96x1x1xf32) + conv2d_22 = paddle._C_ops.conv2d( + swish_16, parameter_645, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_645, swish_16 + + # pd_op.batch_norm_: (-1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (-1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__126, + batch_norm__127, + batch_norm__128, + batch_norm__129, + batch_norm__130, + batch_norm__131, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_22, + parameter_644, + parameter_643, + parameter_642, + parameter_641, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_22, parameter_641, parameter_642, parameter_643, parameter_644 + + # pd_op.multiply: (-1x96x-1x-1xf32) <- (1xf32, -1x96x-1x-1xf32) + multiply_4 = paddle._C_ops.multiply(data_3, batch_norm__126) + del batch_norm__126, data_3 + + # pd_op.add: (-1x96x-1x-1xf32) <- (-1x96x-1x-1xf32, -1x96x-1x-1xf32) + add_7 = paddle._C_ops.add(batch_norm__120, multiply_4) + del batch_norm__120, multiply_4 + + # pd_op.swish: (-1x96x-1x-1xf32) <- (-1x96x-1x-1xf32) + swish_17 = paddle._C_ops.swish(add_7) + del add_7 + + # pd_op.add: (-1x96x-1x-1xf32) <- (-1x96x-1x-1xf32, -1x96x-1x-1xf32) + add_8 = paddle._C_ops.add(swish_15, swish_17) + del swish_15, swish_17 + + # pd_op.conv2d: (-1x96x-1x-1xf32) <- (-1x96x-1x-1xf32, 96x96x3x3xf32) + conv2d_23 = paddle._C_ops.conv2d( + add_8, parameter_640, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_640 + + # pd_op.batch_norm_: (-1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (-1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__132, + batch_norm__133, + batch_norm__134, + batch_norm__135, + batch_norm__136, + batch_norm__137, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_23, + parameter_639, + parameter_638, + parameter_637, + parameter_636, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_23, parameter_636, parameter_637, parameter_638, parameter_639 + + # pd_op.swish: (-1x96x-1x-1xf32) <- (-1x96x-1x-1xf32) + swish_18 = paddle._C_ops.swish(batch_norm__132) + del batch_norm__132 + + # pd_op.conv2d: (-1x96x-1x-1xf32) <- (-1x96x-1x-1xf32, 96x96x3x3xf32) + conv2d_24 = paddle._C_ops.conv2d( + swish_18, parameter_635, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_635 + + # pd_op.batch_norm_: (-1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (-1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__138, + batch_norm__139, + batch_norm__140, + batch_norm__141, + batch_norm__142, + batch_norm__143, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_24, + parameter_634, + parameter_633, + parameter_632, + parameter_631, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_24, parameter_631, parameter_632, parameter_633, parameter_634 + + # pd_op.conv2d: (-1x96x-1x-1xf32) <- (-1x96x-1x-1xf32, 96x96x1x1xf32) + conv2d_25 = paddle._C_ops.conv2d( + swish_18, parameter_630, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_630, swish_18 + + # pd_op.batch_norm_: (-1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (-1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__144, + batch_norm__145, + batch_norm__146, + batch_norm__147, + batch_norm__148, + batch_norm__149, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_25, + parameter_629, + parameter_628, + parameter_627, + parameter_626, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_25, parameter_626, parameter_627, parameter_628, parameter_629 + + # pd_op.multiply: (-1x96x-1x-1xf32) <- (1xf32, -1x96x-1x-1xf32) + multiply_5 = paddle._C_ops.multiply(data_4, batch_norm__144) + del batch_norm__144, data_4 + + # pd_op.add: (-1x96x-1x-1xf32) <- (-1x96x-1x-1xf32, -1x96x-1x-1xf32) + add_9 = paddle._C_ops.add(batch_norm__138, multiply_5) + del batch_norm__138, multiply_5 + + # pd_op.swish: (-1x96x-1x-1xf32) <- (-1x96x-1x-1xf32) + swish_19 = paddle._C_ops.swish(add_9) + del add_9 + + # pd_op.add: (-1x96x-1x-1xf32) <- (-1x96x-1x-1xf32, -1x96x-1x-1xf32) + add_10 = paddle._C_ops.add(add_8, swish_19) + del add_8, swish_19 + + # pd_op.conv2d: (-1x96x-1x-1xf32) <- (-1x96x-1x-1xf32, 96x96x3x3xf32) + conv2d_26 = paddle._C_ops.conv2d( + add_10, parameter_625, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_625 + + # pd_op.batch_norm_: (-1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (-1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__150, + batch_norm__151, + batch_norm__152, + batch_norm__153, + batch_norm__154, + batch_norm__155, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_26, + parameter_624, + parameter_623, + parameter_622, + parameter_621, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_26, parameter_621, parameter_622, parameter_623, parameter_624 + + # pd_op.swish: (-1x96x-1x-1xf32) <- (-1x96x-1x-1xf32) + swish_20 = paddle._C_ops.swish(batch_norm__150) + del batch_norm__150 + + # pd_op.conv2d: (-1x96x-1x-1xf32) <- (-1x96x-1x-1xf32, 96x96x3x3xf32) + conv2d_27 = paddle._C_ops.conv2d( + swish_20, parameter_620, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_620 + + # pd_op.batch_norm_: (-1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (-1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__156, + batch_norm__157, + batch_norm__158, + batch_norm__159, + batch_norm__160, + batch_norm__161, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_27, + parameter_619, + parameter_618, + parameter_617, + parameter_616, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_27, parameter_616, parameter_617, parameter_618, parameter_619 + + # pd_op.conv2d: (-1x96x-1x-1xf32) <- (-1x96x-1x-1xf32, 96x96x1x1xf32) + conv2d_28 = paddle._C_ops.conv2d( + swish_20, parameter_615, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_615, swish_20 + + # pd_op.batch_norm_: (-1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (-1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__162, + batch_norm__163, + batch_norm__164, + batch_norm__165, + batch_norm__166, + batch_norm__167, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_28, + parameter_614, + parameter_613, + parameter_612, + parameter_611, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_28, parameter_611, parameter_612, parameter_613, parameter_614 + + # pd_op.multiply: (-1x96x-1x-1xf32) <- (1xf32, -1x96x-1x-1xf32) + multiply_6 = paddle._C_ops.multiply(data_5, batch_norm__162) + del batch_norm__162, data_5 + + # pd_op.add: (-1x96x-1x-1xf32) <- (-1x96x-1x-1xf32, -1x96x-1x-1xf32) + add_11 = paddle._C_ops.add(batch_norm__156, multiply_6) + del batch_norm__156, multiply_6 + + # pd_op.swish: (-1x96x-1x-1xf32) <- (-1x96x-1x-1xf32) + swish_21 = paddle._C_ops.swish(add_11) + del add_11 + + # pd_op.add: (-1x96x-1x-1xf32) <- (-1x96x-1x-1xf32, -1x96x-1x-1xf32) + add_12 = paddle._C_ops.add(add_10, swish_21) + del add_10, swish_21 + + # pd_op.conv2d: (-1x96x-1x-1xf32) <- (-1x96x-1x-1xf32, 96x96x3x3xf32) + conv2d_29 = paddle._C_ops.conv2d( + add_12, parameter_610, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_610 + + # pd_op.batch_norm_: (-1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (-1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__168, + batch_norm__169, + batch_norm__170, + batch_norm__171, + batch_norm__172, + batch_norm__173, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_29, + parameter_609, + parameter_608, + parameter_607, + parameter_606, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_29, parameter_606, parameter_607, parameter_608, parameter_609 + + # pd_op.swish: (-1x96x-1x-1xf32) <- (-1x96x-1x-1xf32) + swish_22 = paddle._C_ops.swish(batch_norm__168) + del batch_norm__168 + + # pd_op.conv2d: (-1x96x-1x-1xf32) <- (-1x96x-1x-1xf32, 96x96x3x3xf32) + conv2d_30 = paddle._C_ops.conv2d( + swish_22, parameter_605, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_605 + + # pd_op.batch_norm_: (-1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (-1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__174, + batch_norm__175, + batch_norm__176, + batch_norm__177, + batch_norm__178, + batch_norm__179, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_30, + parameter_604, + parameter_603, + parameter_602, + parameter_601, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_30, parameter_601, parameter_602, parameter_603, parameter_604 + + # pd_op.conv2d: (-1x96x-1x-1xf32) <- (-1x96x-1x-1xf32, 96x96x1x1xf32) + conv2d_31 = paddle._C_ops.conv2d( + swish_22, parameter_600, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_600, swish_22 + + # pd_op.batch_norm_: (-1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (-1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__180, + batch_norm__181, + batch_norm__182, + batch_norm__183, + batch_norm__184, + batch_norm__185, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_31, + parameter_599, + parameter_598, + parameter_597, + parameter_596, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_31, parameter_596, parameter_597, parameter_598, parameter_599 + + # pd_op.multiply: (-1x96x-1x-1xf32) <- (1xf32, -1x96x-1x-1xf32) + multiply_7 = paddle._C_ops.multiply(data_6, batch_norm__180) + del batch_norm__180, data_6 + + # pd_op.add: (-1x96x-1x-1xf32) <- (-1x96x-1x-1xf32, -1x96x-1x-1xf32) + add_13 = paddle._C_ops.add(batch_norm__174, multiply_7) + del batch_norm__174, multiply_7 + + # pd_op.swish: (-1x96x-1x-1xf32) <- (-1x96x-1x-1xf32) + swish_23 = paddle._C_ops.swish(add_13) + del add_13 + + # pd_op.add: (-1x96x-1x-1xf32) <- (-1x96x-1x-1xf32, -1x96x-1x-1xf32) + add_14 = paddle._C_ops.add(add_12, swish_23) + del add_12, swish_23 + + # pd_op.conv2d: (-1x96x-1x-1xf32) <- (-1x96x-1x-1xf32, 96x96x3x3xf32) + conv2d_32 = paddle._C_ops.conv2d( + add_14, parameter_595, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_595 + + # pd_op.batch_norm_: (-1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (-1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__186, + batch_norm__187, + batch_norm__188, + batch_norm__189, + batch_norm__190, + batch_norm__191, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_32, + parameter_594, + parameter_593, + parameter_592, + parameter_591, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_32, parameter_591, parameter_592, parameter_593, parameter_594 + + # pd_op.swish: (-1x96x-1x-1xf32) <- (-1x96x-1x-1xf32) + swish_24 = paddle._C_ops.swish(batch_norm__186) + del batch_norm__186 + + # pd_op.conv2d: (-1x96x-1x-1xf32) <- (-1x96x-1x-1xf32, 96x96x3x3xf32) + conv2d_33 = paddle._C_ops.conv2d( + swish_24, parameter_590, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_590 + + # pd_op.batch_norm_: (-1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (-1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__192, + batch_norm__193, + batch_norm__194, + batch_norm__195, + batch_norm__196, + batch_norm__197, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_33, + parameter_589, + parameter_588, + parameter_587, + parameter_586, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_33, parameter_586, parameter_587, parameter_588, parameter_589 + + # pd_op.conv2d: (-1x96x-1x-1xf32) <- (-1x96x-1x-1xf32, 96x96x1x1xf32) + conv2d_34 = paddle._C_ops.conv2d( + swish_24, parameter_585, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_585, swish_24 + + # pd_op.batch_norm_: (-1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (-1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__198, + batch_norm__199, + batch_norm__200, + batch_norm__201, + batch_norm__202, + batch_norm__203, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_34, + parameter_584, + parameter_583, + parameter_582, + parameter_581, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_34, parameter_581, parameter_582, parameter_583, parameter_584 + + # pd_op.multiply: (-1x96x-1x-1xf32) <- (1xf32, -1x96x-1x-1xf32) + multiply_8 = paddle._C_ops.multiply(data_7, batch_norm__198) + del batch_norm__198, data_7 + + # pd_op.add: (-1x96x-1x-1xf32) <- (-1x96x-1x-1xf32, -1x96x-1x-1xf32) + add_15 = paddle._C_ops.add(batch_norm__192, multiply_8) + del batch_norm__192, multiply_8 + + # pd_op.swish: (-1x96x-1x-1xf32) <- (-1x96x-1x-1xf32) + swish_25 = paddle._C_ops.swish(add_15) + del add_15 + + # pd_op.add: (-1x96x-1x-1xf32) <- (-1x96x-1x-1xf32, -1x96x-1x-1xf32) + add_16 = paddle._C_ops.add(add_14, swish_25) + del add_14, swish_25 + + # pd_op.conv2d: (-1x96x-1x-1xf32) <- (-1x96x-1x-1xf32, 96x96x3x3xf32) + conv2d_35 = paddle._C_ops.conv2d( + add_16, parameter_580, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_580 + + # pd_op.batch_norm_: (-1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (-1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__204, + batch_norm__205, + batch_norm__206, + batch_norm__207, + batch_norm__208, + batch_norm__209, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_35, + parameter_579, + parameter_578, + parameter_577, + parameter_576, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_35, parameter_576, parameter_577, parameter_578, parameter_579 + + # pd_op.swish: (-1x96x-1x-1xf32) <- (-1x96x-1x-1xf32) + swish_26 = paddle._C_ops.swish(batch_norm__204) + del batch_norm__204 + + # pd_op.conv2d: (-1x96x-1x-1xf32) <- (-1x96x-1x-1xf32, 96x96x3x3xf32) + conv2d_36 = paddle._C_ops.conv2d( + swish_26, parameter_575, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_575 + + # pd_op.batch_norm_: (-1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (-1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__210, + batch_norm__211, + batch_norm__212, + batch_norm__213, + batch_norm__214, + batch_norm__215, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_36, + parameter_574, + parameter_573, + parameter_572, + parameter_571, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_36, parameter_571, parameter_572, parameter_573, parameter_574 + + # pd_op.conv2d: (-1x96x-1x-1xf32) <- (-1x96x-1x-1xf32, 96x96x1x1xf32) + conv2d_37 = paddle._C_ops.conv2d( + swish_26, parameter_570, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_570, swish_26 + + # pd_op.batch_norm_: (-1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (-1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__216, + batch_norm__217, + batch_norm__218, + batch_norm__219, + batch_norm__220, + batch_norm__221, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_37, + parameter_569, + parameter_568, + parameter_567, + parameter_566, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_37, parameter_566, parameter_567, parameter_568, parameter_569 + + # pd_op.multiply: (-1x96x-1x-1xf32) <- (1xf32, -1x96x-1x-1xf32) + multiply_9 = paddle._C_ops.multiply(data_8, batch_norm__216) + del batch_norm__216, data_8 + + # pd_op.add: (-1x96x-1x-1xf32) <- (-1x96x-1x-1xf32, -1x96x-1x-1xf32) + add_17 = paddle._C_ops.add(batch_norm__210, multiply_9) + del batch_norm__210, multiply_9 + + # pd_op.swish: (-1x96x-1x-1xf32) <- (-1x96x-1x-1xf32) + swish_27 = paddle._C_ops.swish(add_17) + del add_17 + + # pd_op.add: (-1x96x-1x-1xf32) <- (-1x96x-1x-1xf32, -1x96x-1x-1xf32) + add_18 = paddle._C_ops.add(add_16, swish_27) + del add_16, swish_27 + + # builtin.combine: ([-1x96x-1x-1xf32, -1x96x-1x-1xf32]) <- (-1x96x-1x-1xf32, -1x96x-1x-1xf32) + combine_1 = [swish_14, add_18] + del add_18, swish_14 + + # pd_op.concat: (-1x192x-1x-1xf32) <- ([-1x96x-1x-1xf32, -1x96x-1x-1xf32], 1xi32) + concat_3 = paddle._C_ops.concat(combine_1, full_0) + del combine_1 + + # pd_op.mean: (-1x192x1x1xf32) <- (-1x192x-1x-1xf32, 2xi64) + mean_1 = paddle._C_ops.mean(concat_3, full_int_array_0, True) + + # pd_op.conv2d: (-1x192x1x1xf32) <- (-1x192x1x1xf32, 192x192x1x1xf32) + conv2d_38 = paddle._C_ops.conv2d( + mean_1, parameter_565, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del mean_1, parameter_565 + + # pd_op.reshape: (1x192x1x1xf32) <- (192xf32, 4xi64) + reshape_1 = paddle._C_ops.reshape(parameter_564, full_int_array_1) + del parameter_564 + + # pd_op.add: (-1x192x1x1xf32) <- (-1x192x1x1xf32, 1x192x1x1xf32) + add_19 = paddle._C_ops.add(conv2d_38, reshape_1) + del conv2d_38, reshape_1 + + # pd_op.hardsigmoid: (-1x192x1x1xf32) <- (-1x192x1x1xf32) + hardsigmoid_1 = paddle._C_ops.hardsigmoid( + add_19, float("0.166667"), float("0.5") + ) + del add_19 + + # pd_op.multiply: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32, -1x192x1x1xf32) + multiply_10 = paddle._C_ops.multiply(concat_3, hardsigmoid_1) + del concat_3, hardsigmoid_1 + + # pd_op.conv2d: (-1x256x-1x-1xf32) <- (-1x192x-1x-1xf32, 256x192x1x1xf32) + conv2d_39 = paddle._C_ops.conv2d( + multiply_10, parameter_563, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del multiply_10, parameter_563 + + # pd_op.batch_norm_: (-1x256x-1x-1xf32, 256xf32, 256xf32, 256xf32, 256xf32, -1xui8) <- (-1x256x-1x-1xf32, 256xf32, 256xf32, 256xf32, 256xf32) + ( + batch_norm__222, + batch_norm__223, + batch_norm__224, + batch_norm__225, + batch_norm__226, + batch_norm__227, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_39, + parameter_562, + parameter_561, + parameter_560, + parameter_559, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_39, parameter_559, parameter_560, parameter_561, parameter_562 + + # pd_op.swish: (-1x256x-1x-1xf32) <- (-1x256x-1x-1xf32) + swish_28 = paddle._C_ops.swish(batch_norm__222) + del batch_norm__222 + + # pd_op.conv2d: (-1x384x-1x-1xf32) <- (-1x256x-1x-1xf32, 384x256x3x3xf32) + conv2d_40 = paddle._C_ops.conv2d( + swish_28, parameter_558, [2, 2], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_558 + + # pd_op.batch_norm_: (-1x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (-1x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__228, + batch_norm__229, + batch_norm__230, + batch_norm__231, + batch_norm__232, + batch_norm__233, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_40, + parameter_557, + parameter_556, + parameter_555, + parameter_554, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_40, parameter_554, parameter_555, parameter_556, parameter_557 + + # pd_op.swish: (-1x384x-1x-1xf32) <- (-1x384x-1x-1xf32) + swish_29 = paddle._C_ops.swish(batch_norm__228) + del batch_norm__228 + + # pd_op.conv2d: (-1x192x-1x-1xf32) <- (-1x384x-1x-1xf32, 192x384x1x1xf32) + conv2d_41 = paddle._C_ops.conv2d( + swish_29, parameter_553, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_553 + + # pd_op.batch_norm_: (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__234, + batch_norm__235, + batch_norm__236, + batch_norm__237, + batch_norm__238, + batch_norm__239, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_41, + parameter_552, + parameter_551, + parameter_550, + parameter_549, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_41, parameter_549, parameter_550, parameter_551, parameter_552 + + # pd_op.swish: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32) + swish_30 = paddle._C_ops.swish(batch_norm__234) + del batch_norm__234 + + # pd_op.conv2d: (-1x192x-1x-1xf32) <- (-1x384x-1x-1xf32, 192x384x1x1xf32) + conv2d_42 = paddle._C_ops.conv2d( + swish_29, parameter_548, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_548, swish_29 + + # pd_op.batch_norm_: (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__240, + batch_norm__241, + batch_norm__242, + batch_norm__243, + batch_norm__244, + batch_norm__245, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_42, + parameter_547, + parameter_546, + parameter_545, + parameter_544, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_42, parameter_544, parameter_545, parameter_546, parameter_547 + + # pd_op.swish: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32) + swish_31 = paddle._C_ops.swish(batch_norm__240) + del batch_norm__240 + + # pd_op.conv2d: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32, 192x192x3x3xf32) + conv2d_43 = paddle._C_ops.conv2d( + swish_31, parameter_543, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_543 + + # pd_op.batch_norm_: (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__246, + batch_norm__247, + batch_norm__248, + batch_norm__249, + batch_norm__250, + batch_norm__251, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_43, + parameter_542, + parameter_541, + parameter_540, + parameter_539, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_43, parameter_539, parameter_540, parameter_541, parameter_542 + + # pd_op.swish: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32) + swish_32 = paddle._C_ops.swish(batch_norm__246) + del batch_norm__246 + + # pd_op.conv2d: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32, 192x192x3x3xf32) + conv2d_44 = paddle._C_ops.conv2d( + swish_32, parameter_538, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_538 + + # pd_op.batch_norm_: (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__252, + batch_norm__253, + batch_norm__254, + batch_norm__255, + batch_norm__256, + batch_norm__257, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_44, + parameter_537, + parameter_536, + parameter_535, + parameter_534, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_44, parameter_534, parameter_535, parameter_536, parameter_537 + + # pd_op.conv2d: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32, 192x192x1x1xf32) + conv2d_45 = paddle._C_ops.conv2d( + swish_32, parameter_533, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_533, swish_32 + + # pd_op.batch_norm_: (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__258, + batch_norm__259, + batch_norm__260, + batch_norm__261, + batch_norm__262, + batch_norm__263, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_45, + parameter_532, + parameter_531, + parameter_530, + parameter_529, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_45, parameter_529, parameter_530, parameter_531, parameter_532 + + # pd_op.multiply: (-1x192x-1x-1xf32) <- (1xf32, -1x192x-1x-1xf32) + multiply_11 = paddle._C_ops.multiply(data_9, batch_norm__258) + del batch_norm__258, data_9 + + # pd_op.add: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32, -1x192x-1x-1xf32) + add_20 = paddle._C_ops.add(batch_norm__252, multiply_11) + del batch_norm__252, multiply_11 + + # pd_op.swish: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32) + swish_33 = paddle._C_ops.swish(add_20) + del add_20 + + # pd_op.add: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32, -1x192x-1x-1xf32) + add_21 = paddle._C_ops.add(swish_31, swish_33) + del swish_31, swish_33 + + # pd_op.conv2d: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32, 192x192x3x3xf32) + conv2d_46 = paddle._C_ops.conv2d( + add_21, parameter_528, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_528 + + # pd_op.batch_norm_: (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__264, + batch_norm__265, + batch_norm__266, + batch_norm__267, + batch_norm__268, + batch_norm__269, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_46, + parameter_527, + parameter_526, + parameter_525, + parameter_524, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_46, parameter_524, parameter_525, parameter_526, parameter_527 + + # pd_op.swish: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32) + swish_34 = paddle._C_ops.swish(batch_norm__264) + del batch_norm__264 + + # pd_op.conv2d: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32, 192x192x3x3xf32) + conv2d_47 = paddle._C_ops.conv2d( + swish_34, parameter_523, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_523 + + # pd_op.batch_norm_: (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__270, + batch_norm__271, + batch_norm__272, + batch_norm__273, + batch_norm__274, + batch_norm__275, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_47, + parameter_522, + parameter_521, + parameter_520, + parameter_519, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_47, parameter_519, parameter_520, parameter_521, parameter_522 + + # pd_op.conv2d: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32, 192x192x1x1xf32) + conv2d_48 = paddle._C_ops.conv2d( + swish_34, parameter_518, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_518, swish_34 + + # pd_op.batch_norm_: (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__276, + batch_norm__277, + batch_norm__278, + batch_norm__279, + batch_norm__280, + batch_norm__281, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_48, + parameter_517, + parameter_516, + parameter_515, + parameter_514, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_48, parameter_514, parameter_515, parameter_516, parameter_517 + + # pd_op.multiply: (-1x192x-1x-1xf32) <- (1xf32, -1x192x-1x-1xf32) + multiply_12 = paddle._C_ops.multiply(data_10, batch_norm__276) + del batch_norm__276, data_10 + + # pd_op.add: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32, -1x192x-1x-1xf32) + add_22 = paddle._C_ops.add(batch_norm__270, multiply_12) + del batch_norm__270, multiply_12 + + # pd_op.swish: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32) + swish_35 = paddle._C_ops.swish(add_22) + del add_22 + + # pd_op.add: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32, -1x192x-1x-1xf32) + add_23 = paddle._C_ops.add(add_21, swish_35) + del add_21, swish_35 + + # pd_op.conv2d: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32, 192x192x3x3xf32) + conv2d_49 = paddle._C_ops.conv2d( + add_23, parameter_513, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_513 + + # pd_op.batch_norm_: (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__282, + batch_norm__283, + batch_norm__284, + batch_norm__285, + batch_norm__286, + batch_norm__287, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_49, + parameter_512, + parameter_511, + parameter_510, + parameter_509, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_49, parameter_509, parameter_510, parameter_511, parameter_512 + + # pd_op.swish: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32) + swish_36 = paddle._C_ops.swish(batch_norm__282) + del batch_norm__282 + + # pd_op.conv2d: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32, 192x192x3x3xf32) + conv2d_50 = paddle._C_ops.conv2d( + swish_36, parameter_508, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_508 + + # pd_op.batch_norm_: (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__288, + batch_norm__289, + batch_norm__290, + batch_norm__291, + batch_norm__292, + batch_norm__293, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_50, + parameter_507, + parameter_506, + parameter_505, + parameter_504, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_50, parameter_504, parameter_505, parameter_506, parameter_507 + + # pd_op.conv2d: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32, 192x192x1x1xf32) + conv2d_51 = paddle._C_ops.conv2d( + swish_36, parameter_503, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_503, swish_36 + + # pd_op.batch_norm_: (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__294, + batch_norm__295, + batch_norm__296, + batch_norm__297, + batch_norm__298, + batch_norm__299, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_51, + parameter_502, + parameter_501, + parameter_500, + parameter_499, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_51, parameter_499, parameter_500, parameter_501, parameter_502 + + # pd_op.multiply: (-1x192x-1x-1xf32) <- (1xf32, -1x192x-1x-1xf32) + multiply_13 = paddle._C_ops.multiply(data_11, batch_norm__294) + del batch_norm__294, data_11 + + # pd_op.add: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32, -1x192x-1x-1xf32) + add_24 = paddle._C_ops.add(batch_norm__288, multiply_13) + del batch_norm__288, multiply_13 + + # pd_op.swish: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32) + swish_37 = paddle._C_ops.swish(add_24) + del add_24 + + # pd_op.add: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32, -1x192x-1x-1xf32) + add_25 = paddle._C_ops.add(add_23, swish_37) + del add_23, swish_37 + + # pd_op.conv2d: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32, 192x192x3x3xf32) + conv2d_52 = paddle._C_ops.conv2d( + add_25, parameter_498, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_498 + + # pd_op.batch_norm_: (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__300, + batch_norm__301, + batch_norm__302, + batch_norm__303, + batch_norm__304, + batch_norm__305, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_52, + parameter_497, + parameter_496, + parameter_495, + parameter_494, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_52, parameter_494, parameter_495, parameter_496, parameter_497 + + # pd_op.swish: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32) + swish_38 = paddle._C_ops.swish(batch_norm__300) + del batch_norm__300 + + # pd_op.conv2d: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32, 192x192x3x3xf32) + conv2d_53 = paddle._C_ops.conv2d( + swish_38, parameter_493, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_493 + + # pd_op.batch_norm_: (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__306, + batch_norm__307, + batch_norm__308, + batch_norm__309, + batch_norm__310, + batch_norm__311, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_53, + parameter_492, + parameter_491, + parameter_490, + parameter_489, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_53, parameter_489, parameter_490, parameter_491, parameter_492 + + # pd_op.conv2d: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32, 192x192x1x1xf32) + conv2d_54 = paddle._C_ops.conv2d( + swish_38, parameter_488, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_488, swish_38 + + # pd_op.batch_norm_: (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__312, + batch_norm__313, + batch_norm__314, + batch_norm__315, + batch_norm__316, + batch_norm__317, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_54, + parameter_487, + parameter_486, + parameter_485, + parameter_484, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_54, parameter_484, parameter_485, parameter_486, parameter_487 + + # pd_op.multiply: (-1x192x-1x-1xf32) <- (1xf32, -1x192x-1x-1xf32) + multiply_14 = paddle._C_ops.multiply(data_12, batch_norm__312) + del batch_norm__312, data_12 + + # pd_op.add: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32, -1x192x-1x-1xf32) + add_26 = paddle._C_ops.add(batch_norm__306, multiply_14) + del batch_norm__306, multiply_14 + + # pd_op.swish: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32) + swish_39 = paddle._C_ops.swish(add_26) + del add_26 + + # pd_op.add: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32, -1x192x-1x-1xf32) + add_27 = paddle._C_ops.add(add_25, swish_39) + del add_25, swish_39 + + # pd_op.conv2d: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32, 192x192x3x3xf32) + conv2d_55 = paddle._C_ops.conv2d( + add_27, parameter_483, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_483 + + # pd_op.batch_norm_: (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__318, + batch_norm__319, + batch_norm__320, + batch_norm__321, + batch_norm__322, + batch_norm__323, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_55, + parameter_482, + parameter_481, + parameter_480, + parameter_479, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_55, parameter_479, parameter_480, parameter_481, parameter_482 + + # pd_op.swish: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32) + swish_40 = paddle._C_ops.swish(batch_norm__318) + del batch_norm__318 + + # pd_op.conv2d: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32, 192x192x3x3xf32) + conv2d_56 = paddle._C_ops.conv2d( + swish_40, parameter_478, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_478 + + # pd_op.batch_norm_: (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__324, + batch_norm__325, + batch_norm__326, + batch_norm__327, + batch_norm__328, + batch_norm__329, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_56, + parameter_477, + parameter_476, + parameter_475, + parameter_474, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_56, parameter_474, parameter_475, parameter_476, parameter_477 + + # pd_op.conv2d: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32, 192x192x1x1xf32) + conv2d_57 = paddle._C_ops.conv2d( + swish_40, parameter_473, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_473, swish_40 + + # pd_op.batch_norm_: (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__330, + batch_norm__331, + batch_norm__332, + batch_norm__333, + batch_norm__334, + batch_norm__335, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_57, + parameter_472, + parameter_471, + parameter_470, + parameter_469, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_57, parameter_469, parameter_470, parameter_471, parameter_472 + + # pd_op.multiply: (-1x192x-1x-1xf32) <- (1xf32, -1x192x-1x-1xf32) + multiply_15 = paddle._C_ops.multiply(data_13, batch_norm__330) + del batch_norm__330, data_13 + + # pd_op.add: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32, -1x192x-1x-1xf32) + add_28 = paddle._C_ops.add(batch_norm__324, multiply_15) + del batch_norm__324, multiply_15 + + # pd_op.swish: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32) + swish_41 = paddle._C_ops.swish(add_28) + del add_28 + + # pd_op.add: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32, -1x192x-1x-1xf32) + add_29 = paddle._C_ops.add(add_27, swish_41) + del add_27, swish_41 + + # pd_op.conv2d: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32, 192x192x3x3xf32) + conv2d_58 = paddle._C_ops.conv2d( + add_29, parameter_468, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_468 + + # pd_op.batch_norm_: (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__336, + batch_norm__337, + batch_norm__338, + batch_norm__339, + batch_norm__340, + batch_norm__341, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_58, + parameter_467, + parameter_466, + parameter_465, + parameter_464, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_58, parameter_464, parameter_465, parameter_466, parameter_467 + + # pd_op.swish: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32) + swish_42 = paddle._C_ops.swish(batch_norm__336) + del batch_norm__336 + + # pd_op.conv2d: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32, 192x192x3x3xf32) + conv2d_59 = paddle._C_ops.conv2d( + swish_42, parameter_463, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_463 + + # pd_op.batch_norm_: (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__342, + batch_norm__343, + batch_norm__344, + batch_norm__345, + batch_norm__346, + batch_norm__347, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_59, + parameter_462, + parameter_461, + parameter_460, + parameter_459, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_59, parameter_459, parameter_460, parameter_461, parameter_462 + + # pd_op.conv2d: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32, 192x192x1x1xf32) + conv2d_60 = paddle._C_ops.conv2d( + swish_42, parameter_458, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_458, swish_42 + + # pd_op.batch_norm_: (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__348, + batch_norm__349, + batch_norm__350, + batch_norm__351, + batch_norm__352, + batch_norm__353, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_60, + parameter_457, + parameter_456, + parameter_455, + parameter_454, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_60, parameter_454, parameter_455, parameter_456, parameter_457 + + # pd_op.multiply: (-1x192x-1x-1xf32) <- (1xf32, -1x192x-1x-1xf32) + multiply_16 = paddle._C_ops.multiply(data_14, batch_norm__348) + del batch_norm__348, data_14 + + # pd_op.add: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32, -1x192x-1x-1xf32) + add_30 = paddle._C_ops.add(batch_norm__342, multiply_16) + del batch_norm__342, multiply_16 + + # pd_op.swish: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32) + swish_43 = paddle._C_ops.swish(add_30) + del add_30 + + # pd_op.add: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32, -1x192x-1x-1xf32) + add_31 = paddle._C_ops.add(add_29, swish_43) + del add_29, swish_43 + + # builtin.combine: ([-1x192x-1x-1xf32, -1x192x-1x-1xf32]) <- (-1x192x-1x-1xf32, -1x192x-1x-1xf32) + combine_2 = [swish_30, add_31] + del add_31, swish_30 + + # pd_op.concat: (-1x384x-1x-1xf32) <- ([-1x192x-1x-1xf32, -1x192x-1x-1xf32], 1xi32) + concat_4 = paddle._C_ops.concat(combine_2, full_0) + del combine_2 + + # pd_op.mean: (-1x384x1x1xf32) <- (-1x384x-1x-1xf32, 2xi64) + mean_2 = paddle._C_ops.mean(concat_4, full_int_array_0, True) + + # pd_op.conv2d: (-1x384x1x1xf32) <- (-1x384x1x1xf32, 384x384x1x1xf32) + conv2d_61 = paddle._C_ops.conv2d( + mean_2, parameter_453, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del mean_2, parameter_453 + + # pd_op.reshape: (1x384x1x1xf32) <- (384xf32, 4xi64) + reshape_2 = paddle._C_ops.reshape(parameter_452, full_int_array_1) + del parameter_452 + + # pd_op.add: (-1x384x1x1xf32) <- (-1x384x1x1xf32, 1x384x1x1xf32) + add_32 = paddle._C_ops.add(conv2d_61, reshape_2) + del conv2d_61, reshape_2 + + # pd_op.hardsigmoid: (-1x384x1x1xf32) <- (-1x384x1x1xf32) + hardsigmoid_2 = paddle._C_ops.hardsigmoid( + add_32, float("0.166667"), float("0.5") + ) + del add_32 + + # pd_op.multiply: (-1x384x-1x-1xf32) <- (-1x384x-1x-1xf32, -1x384x1x1xf32) + multiply_17 = paddle._C_ops.multiply(concat_4, hardsigmoid_2) + del concat_4, hardsigmoid_2 + + # pd_op.conv2d: (-1x512x-1x-1xf32) <- (-1x384x-1x-1xf32, 512x384x1x1xf32) + conv2d_62 = paddle._C_ops.conv2d( + multiply_17, parameter_451, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del multiply_17, parameter_451 + + # pd_op.batch_norm_: (-1x512x-1x-1xf32, 512xf32, 512xf32, 512xf32, 512xf32, -1xui8) <- (-1x512x-1x-1xf32, 512xf32, 512xf32, 512xf32, 512xf32) + ( + batch_norm__354, + batch_norm__355, + batch_norm__356, + batch_norm__357, + batch_norm__358, + batch_norm__359, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_62, + parameter_450, + parameter_449, + parameter_448, + parameter_447, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_62, parameter_447, parameter_448, parameter_449, parameter_450 + + # pd_op.swish: (-1x512x-1x-1xf32) <- (-1x512x-1x-1xf32) + swish_44 = paddle._C_ops.swish(batch_norm__354) + del batch_norm__354 + + # pd_op.conv2d: (-1x768x-1x-1xf32) <- (-1x512x-1x-1xf32, 768x512x3x3xf32) + conv2d_63 = paddle._C_ops.conv2d( + swish_44, parameter_446, [2, 2], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_446 + + # pd_op.batch_norm_: (-1x768x-1x-1xf32, 768xf32, 768xf32, 768xf32, 768xf32, -1xui8) <- (-1x768x-1x-1xf32, 768xf32, 768xf32, 768xf32, 768xf32) + ( + batch_norm__360, + batch_norm__361, + batch_norm__362, + batch_norm__363, + batch_norm__364, + batch_norm__365, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_63, + parameter_445, + parameter_444, + parameter_443, + parameter_442, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_63, parameter_442, parameter_443, parameter_444, parameter_445 + + # pd_op.swish: (-1x768x-1x-1xf32) <- (-1x768x-1x-1xf32) + swish_45 = paddle._C_ops.swish(batch_norm__360) + del batch_norm__360 + + # pd_op.conv2d: (-1x384x-1x-1xf32) <- (-1x768x-1x-1xf32, 384x768x1x1xf32) + conv2d_64 = paddle._C_ops.conv2d( + swish_45, parameter_441, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_441 + + # pd_op.batch_norm_: (-1x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (-1x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__366, + batch_norm__367, + batch_norm__368, + batch_norm__369, + batch_norm__370, + batch_norm__371, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_64, + parameter_440, + parameter_439, + parameter_438, + parameter_437, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_64, parameter_437, parameter_438, parameter_439, parameter_440 + + # pd_op.swish: (-1x384x-1x-1xf32) <- (-1x384x-1x-1xf32) + swish_46 = paddle._C_ops.swish(batch_norm__366) + del batch_norm__366 + + # pd_op.conv2d: (-1x384x-1x-1xf32) <- (-1x768x-1x-1xf32, 384x768x1x1xf32) + conv2d_65 = paddle._C_ops.conv2d( + swish_45, parameter_436, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_436, swish_45 + + # pd_op.batch_norm_: (-1x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (-1x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__372, + batch_norm__373, + batch_norm__374, + batch_norm__375, + batch_norm__376, + batch_norm__377, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_65, + parameter_435, + parameter_434, + parameter_433, + parameter_432, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_65, parameter_432, parameter_433, parameter_434, parameter_435 + + # pd_op.swish: (-1x384x-1x-1xf32) <- (-1x384x-1x-1xf32) + swish_47 = paddle._C_ops.swish(batch_norm__372) + del batch_norm__372 + + # pd_op.conv2d: (-1x384x-1x-1xf32) <- (-1x384x-1x-1xf32, 384x384x3x3xf32) + conv2d_66 = paddle._C_ops.conv2d( + swish_47, parameter_431, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_431 + + # pd_op.batch_norm_: (-1x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (-1x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__378, + batch_norm__379, + batch_norm__380, + batch_norm__381, + batch_norm__382, + batch_norm__383, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_66, + parameter_430, + parameter_429, + parameter_428, + parameter_427, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_66, parameter_427, parameter_428, parameter_429, parameter_430 + + # pd_op.swish: (-1x384x-1x-1xf32) <- (-1x384x-1x-1xf32) + swish_48 = paddle._C_ops.swish(batch_norm__378) + del batch_norm__378 + + # pd_op.conv2d: (-1x384x-1x-1xf32) <- (-1x384x-1x-1xf32, 384x384x3x3xf32) + conv2d_67 = paddle._C_ops.conv2d( + swish_48, parameter_426, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_426 + + # pd_op.batch_norm_: (-1x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (-1x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__384, + batch_norm__385, + batch_norm__386, + batch_norm__387, + batch_norm__388, + batch_norm__389, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_67, + parameter_425, + parameter_424, + parameter_423, + parameter_422, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_67, parameter_422, parameter_423, parameter_424, parameter_425 + + # pd_op.conv2d: (-1x384x-1x-1xf32) <- (-1x384x-1x-1xf32, 384x384x1x1xf32) + conv2d_68 = paddle._C_ops.conv2d( + swish_48, parameter_421, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_421, swish_48 + + # pd_op.batch_norm_: (-1x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (-1x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__390, + batch_norm__391, + batch_norm__392, + batch_norm__393, + batch_norm__394, + batch_norm__395, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_68, + parameter_420, + parameter_419, + parameter_418, + parameter_417, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_68, parameter_417, parameter_418, parameter_419, parameter_420 + + # pd_op.multiply: (-1x384x-1x-1xf32) <- (1xf32, -1x384x-1x-1xf32) + multiply_18 = paddle._C_ops.multiply(data_15, batch_norm__390) + del batch_norm__390, data_15 + + # pd_op.add: (-1x384x-1x-1xf32) <- (-1x384x-1x-1xf32, -1x384x-1x-1xf32) + add_33 = paddle._C_ops.add(batch_norm__384, multiply_18) + del batch_norm__384, multiply_18 + + # pd_op.swish: (-1x384x-1x-1xf32) <- (-1x384x-1x-1xf32) + swish_49 = paddle._C_ops.swish(add_33) + del add_33 + + # pd_op.add: (-1x384x-1x-1xf32) <- (-1x384x-1x-1xf32, -1x384x-1x-1xf32) + add_34 = paddle._C_ops.add(swish_47, swish_49) + del swish_47, swish_49 + + # pd_op.conv2d: (-1x384x-1x-1xf32) <- (-1x384x-1x-1xf32, 384x384x3x3xf32) + conv2d_69 = paddle._C_ops.conv2d( + add_34, parameter_416, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_416 + + # pd_op.batch_norm_: (-1x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (-1x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__396, + batch_norm__397, + batch_norm__398, + batch_norm__399, + batch_norm__400, + batch_norm__401, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_69, + parameter_415, + parameter_414, + parameter_413, + parameter_412, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_69, parameter_412, parameter_413, parameter_414, parameter_415 + + # pd_op.swish: (-1x384x-1x-1xf32) <- (-1x384x-1x-1xf32) + swish_50 = paddle._C_ops.swish(batch_norm__396) + del batch_norm__396 + + # pd_op.conv2d: (-1x384x-1x-1xf32) <- (-1x384x-1x-1xf32, 384x384x3x3xf32) + conv2d_70 = paddle._C_ops.conv2d( + swish_50, parameter_411, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_411 + + # pd_op.batch_norm_: (-1x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (-1x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__402, + batch_norm__403, + batch_norm__404, + batch_norm__405, + batch_norm__406, + batch_norm__407, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_70, + parameter_410, + parameter_409, + parameter_408, + parameter_407, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_70, parameter_407, parameter_408, parameter_409, parameter_410 + + # pd_op.conv2d: (-1x384x-1x-1xf32) <- (-1x384x-1x-1xf32, 384x384x1x1xf32) + conv2d_71 = paddle._C_ops.conv2d( + swish_50, parameter_406, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_406, swish_50 + + # pd_op.batch_norm_: (-1x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (-1x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__408, + batch_norm__409, + batch_norm__410, + batch_norm__411, + batch_norm__412, + batch_norm__413, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_71, + parameter_405, + parameter_404, + parameter_403, + parameter_402, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_71, parameter_402, parameter_403, parameter_404, parameter_405 + + # pd_op.multiply: (-1x384x-1x-1xf32) <- (1xf32, -1x384x-1x-1xf32) + multiply_19 = paddle._C_ops.multiply(data_16, batch_norm__408) + del batch_norm__408, data_16 + + # pd_op.add: (-1x384x-1x-1xf32) <- (-1x384x-1x-1xf32, -1x384x-1x-1xf32) + add_35 = paddle._C_ops.add(batch_norm__402, multiply_19) + del batch_norm__402, multiply_19 + + # pd_op.swish: (-1x384x-1x-1xf32) <- (-1x384x-1x-1xf32) + swish_51 = paddle._C_ops.swish(add_35) + del add_35 + + # pd_op.add: (-1x384x-1x-1xf32) <- (-1x384x-1x-1xf32, -1x384x-1x-1xf32) + add_36 = paddle._C_ops.add(add_34, swish_51) + del add_34, swish_51 + + # pd_op.conv2d: (-1x384x-1x-1xf32) <- (-1x384x-1x-1xf32, 384x384x3x3xf32) + conv2d_72 = paddle._C_ops.conv2d( + add_36, parameter_401, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_401 + + # pd_op.batch_norm_: (-1x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (-1x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__414, + batch_norm__415, + batch_norm__416, + batch_norm__417, + batch_norm__418, + batch_norm__419, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_72, + parameter_400, + parameter_399, + parameter_398, + parameter_397, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_72, parameter_397, parameter_398, parameter_399, parameter_400 + + # pd_op.swish: (-1x384x-1x-1xf32) <- (-1x384x-1x-1xf32) + swish_52 = paddle._C_ops.swish(batch_norm__414) + del batch_norm__414 + + # pd_op.conv2d: (-1x384x-1x-1xf32) <- (-1x384x-1x-1xf32, 384x384x3x3xf32) + conv2d_73 = paddle._C_ops.conv2d( + swish_52, parameter_396, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_396 + + # pd_op.batch_norm_: (-1x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (-1x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__420, + batch_norm__421, + batch_norm__422, + batch_norm__423, + batch_norm__424, + batch_norm__425, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_73, + parameter_395, + parameter_394, + parameter_393, + parameter_392, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_73, parameter_392, parameter_393, parameter_394, parameter_395 + + # pd_op.conv2d: (-1x384x-1x-1xf32) <- (-1x384x-1x-1xf32, 384x384x1x1xf32) + conv2d_74 = paddle._C_ops.conv2d( + swish_52, parameter_391, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_391, swish_52 + + # pd_op.batch_norm_: (-1x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (-1x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__426, + batch_norm__427, + batch_norm__428, + batch_norm__429, + batch_norm__430, + batch_norm__431, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_74, + parameter_390, + parameter_389, + parameter_388, + parameter_387, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_74, parameter_387, parameter_388, parameter_389, parameter_390 + + # pd_op.multiply: (-1x384x-1x-1xf32) <- (1xf32, -1x384x-1x-1xf32) + multiply_20 = paddle._C_ops.multiply(data_17, batch_norm__426) + del batch_norm__426, data_17 + + # pd_op.add: (-1x384x-1x-1xf32) <- (-1x384x-1x-1xf32, -1x384x-1x-1xf32) + add_37 = paddle._C_ops.add(batch_norm__420, multiply_20) + del batch_norm__420, multiply_20 + + # pd_op.swish: (-1x384x-1x-1xf32) <- (-1x384x-1x-1xf32) + swish_53 = paddle._C_ops.swish(add_37) + del add_37 + + # pd_op.add: (-1x384x-1x-1xf32) <- (-1x384x-1x-1xf32, -1x384x-1x-1xf32) + add_38 = paddle._C_ops.add(add_36, swish_53) + del add_36, swish_53 + + # builtin.combine: ([-1x384x-1x-1xf32, -1x384x-1x-1xf32]) <- (-1x384x-1x-1xf32, -1x384x-1x-1xf32) + combine_3 = [swish_46, add_38] + del add_38, swish_46 + + # pd_op.concat: (-1x768x-1x-1xf32) <- ([-1x384x-1x-1xf32, -1x384x-1x-1xf32], 1xi32) + concat_5 = paddle._C_ops.concat(combine_3, full_0) + del combine_3 + + # pd_op.mean: (-1x768x1x1xf32) <- (-1x768x-1x-1xf32, 2xi64) + mean_3 = paddle._C_ops.mean(concat_5, full_int_array_0, True) + del full_int_array_0 + + # pd_op.conv2d: (-1x768x1x1xf32) <- (-1x768x1x1xf32, 768x768x1x1xf32) + conv2d_75 = paddle._C_ops.conv2d( + mean_3, parameter_386, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del mean_3, parameter_386 + + # pd_op.reshape: (1x768x1x1xf32) <- (768xf32, 4xi64) + reshape_3 = paddle._C_ops.reshape(parameter_385, full_int_array_1) + del parameter_385 + + # pd_op.add: (-1x768x1x1xf32) <- (-1x768x1x1xf32, 1x768x1x1xf32) + add_39 = paddle._C_ops.add(conv2d_75, reshape_3) + del conv2d_75, reshape_3 + + # pd_op.hardsigmoid: (-1x768x1x1xf32) <- (-1x768x1x1xf32) + hardsigmoid_3 = paddle._C_ops.hardsigmoid( + add_39, float("0.166667"), float("0.5") + ) + del add_39 + + # pd_op.multiply: (-1x768x-1x-1xf32) <- (-1x768x-1x-1xf32, -1x768x1x1xf32) + multiply_21 = paddle._C_ops.multiply(concat_5, hardsigmoid_3) + del concat_5, hardsigmoid_3 + + # pd_op.conv2d: (-1x1024x-1x-1xf32) <- (-1x768x-1x-1xf32, 1024x768x1x1xf32) + conv2d_76 = paddle._C_ops.conv2d( + multiply_21, parameter_384, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del multiply_21, parameter_384 + + # pd_op.batch_norm_: (-1x1024x-1x-1xf32, 1024xf32, 1024xf32, 1024xf32, 1024xf32, -1xui8) <- (-1x1024x-1x-1xf32, 1024xf32, 1024xf32, 1024xf32, 1024xf32) + ( + batch_norm__432, + batch_norm__433, + batch_norm__434, + batch_norm__435, + batch_norm__436, + batch_norm__437, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_76, + parameter_383, + parameter_382, + parameter_381, + parameter_380, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_76, parameter_380, parameter_381, parameter_382, parameter_383 + + # pd_op.swish: (-1x1024x-1x-1xf32) <- (-1x1024x-1x-1xf32) + swish_54 = paddle._C_ops.swish(batch_norm__432) + del batch_norm__432 + + # pd_op.conv2d: (-1x384x-1x-1xf32) <- (-1x1024x-1x-1xf32, 384x1024x1x1xf32) + conv2d_77 = paddle._C_ops.conv2d( + swish_54, parameter_379, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_379 + + # pd_op.batch_norm_: (-1x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (-1x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__438, + batch_norm__439, + batch_norm__440, + batch_norm__441, + batch_norm__442, + batch_norm__443, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_77, + parameter_378, + parameter_377, + parameter_376, + parameter_375, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_77, parameter_375, parameter_376, parameter_377, parameter_378 + + # pd_op.swish: (-1x384x-1x-1xf32) <- (-1x384x-1x-1xf32) + swish_55 = paddle._C_ops.swish(batch_norm__438) + del batch_norm__438 + + # pd_op.conv2d: (-1x384x-1x-1xf32) <- (-1x1024x-1x-1xf32, 384x1024x1x1xf32) + conv2d_78 = paddle._C_ops.conv2d( + swish_54, parameter_374, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_374, swish_54 + + # pd_op.batch_norm_: (-1x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (-1x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__444, + batch_norm__445, + batch_norm__446, + batch_norm__447, + batch_norm__448, + batch_norm__449, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_78, + parameter_373, + parameter_372, + parameter_371, + parameter_370, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_78, parameter_370, parameter_371, parameter_372, parameter_373 + + # pd_op.swish: (-1x384x-1x-1xf32) <- (-1x384x-1x-1xf32) + swish_56 = paddle._C_ops.swish(batch_norm__444) + del batch_norm__444 + + # pd_op.conv2d: (-1x384x-1x-1xf32) <- (-1x384x-1x-1xf32, 384x384x3x3xf32) + conv2d_79 = paddle._C_ops.conv2d( + swish_56, parameter_369, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_369, swish_56 + + # pd_op.batch_norm_: (-1x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (-1x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__450, + batch_norm__451, + batch_norm__452, + batch_norm__453, + batch_norm__454, + batch_norm__455, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_79, + parameter_368, + parameter_367, + parameter_366, + parameter_365, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_79, parameter_365, parameter_366, parameter_367, parameter_368 + + # pd_op.swish: (-1x384x-1x-1xf32) <- (-1x384x-1x-1xf32) + swish_57 = paddle._C_ops.swish(batch_norm__450) + del batch_norm__450 + + # pd_op.conv2d: (-1x384x-1x-1xf32) <- (-1x384x-1x-1xf32, 384x384x3x3xf32) + conv2d_80 = paddle._C_ops.conv2d( + swish_57, parameter_364, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_364 + + # pd_op.batch_norm_: (-1x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (-1x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__456, + batch_norm__457, + batch_norm__458, + batch_norm__459, + batch_norm__460, + batch_norm__461, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_80, + parameter_363, + parameter_362, + parameter_361, + parameter_360, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_80, parameter_360, parameter_361, parameter_362, parameter_363 + + # pd_op.conv2d: (-1x384x-1x-1xf32) <- (-1x384x-1x-1xf32, 384x384x1x1xf32) + conv2d_81 = paddle._C_ops.conv2d( + swish_57, parameter_359, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_359, swish_57 + + # pd_op.batch_norm_: (-1x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (-1x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__462, + batch_norm__463, + batch_norm__464, + batch_norm__465, + batch_norm__466, + batch_norm__467, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_81, + parameter_358, + parameter_357, + parameter_356, + parameter_355, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_81, parameter_355, parameter_356, parameter_357, parameter_358 + + # pd_op.add: (-1x384x-1x-1xf32) <- (-1x384x-1x-1xf32, -1x384x-1x-1xf32) + add_40 = paddle._C_ops.add(batch_norm__456, batch_norm__462) + del batch_norm__456, batch_norm__462 + + # pd_op.swish: (-1x384x-1x-1xf32) <- (-1x384x-1x-1xf32) + swish_58 = paddle._C_ops.swish(add_40) + del add_40 + + # pd_op.conv2d: (-1x384x-1x-1xf32) <- (-1x384x-1x-1xf32, 384x384x3x3xf32) + conv2d_82 = paddle._C_ops.conv2d( + swish_58, parameter_354, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_354, swish_58 + + # pd_op.batch_norm_: (-1x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (-1x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__468, + batch_norm__469, + batch_norm__470, + batch_norm__471, + batch_norm__472, + batch_norm__473, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_82, + parameter_353, + parameter_352, + parameter_351, + parameter_350, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_82, parameter_350, parameter_351, parameter_352, parameter_353 + + # pd_op.swish: (-1x384x-1x-1xf32) <- (-1x384x-1x-1xf32) + swish_59 = paddle._C_ops.swish(batch_norm__468) + del batch_norm__468 + + # pd_op.conv2d: (-1x384x-1x-1xf32) <- (-1x384x-1x-1xf32, 384x384x3x3xf32) + conv2d_83 = paddle._C_ops.conv2d( + swish_59, parameter_349, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_349 + + # pd_op.batch_norm_: (-1x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (-1x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__474, + batch_norm__475, + batch_norm__476, + batch_norm__477, + batch_norm__478, + batch_norm__479, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_83, + parameter_348, + parameter_347, + parameter_346, + parameter_345, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_83, parameter_345, parameter_346, parameter_347, parameter_348 + + # pd_op.conv2d: (-1x384x-1x-1xf32) <- (-1x384x-1x-1xf32, 384x384x1x1xf32) + conv2d_84 = paddle._C_ops.conv2d( + swish_59, parameter_344, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_344, swish_59 + + # pd_op.batch_norm_: (-1x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (-1x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__480, + batch_norm__481, + batch_norm__482, + batch_norm__483, + batch_norm__484, + batch_norm__485, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_84, + parameter_343, + parameter_342, + parameter_341, + parameter_340, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_84, parameter_340, parameter_341, parameter_342, parameter_343 + + # pd_op.add: (-1x384x-1x-1xf32) <- (-1x384x-1x-1xf32, -1x384x-1x-1xf32) + add_41 = paddle._C_ops.add(batch_norm__474, batch_norm__480) + del batch_norm__474, batch_norm__480 + + # pd_op.swish: (-1x384x-1x-1xf32) <- (-1x384x-1x-1xf32) + swish_60 = paddle._C_ops.swish(add_41) + del add_41 + + # pd_op.full_int_array: (2xi64) <- () + full_int_array_2 = [5, 5] + + # pd_op.pool2d: (-1x384x-1x-1xf32) <- (-1x384x-1x-1xf32, 2xi64) + pool2d_0 = paddle._C_ops.pool2d( + swish_60, + full_int_array_2, + [1, 1], + [2, 2], + False, + True, + "NCHW", + "max", + False, + False, + "EXPLICIT", + ) + del full_int_array_2 + + # pd_op.full_int_array: (2xi64) <- () + full_int_array_3 = [9, 9] + + # pd_op.pool2d: (-1x384x-1x-1xf32) <- (-1x384x-1x-1xf32, 2xi64) + pool2d_1 = paddle._C_ops.pool2d( + swish_60, + full_int_array_3, + [1, 1], + [4, 4], + False, + True, + "NCHW", + "max", + False, + False, + "EXPLICIT", + ) + del full_int_array_3 + + # pd_op.full_int_array: (2xi64) <- () + full_int_array_4 = [13, 13] + + # pd_op.pool2d: (-1x384x-1x-1xf32) <- (-1x384x-1x-1xf32, 2xi64) + pool2d_2 = paddle._C_ops.pool2d( + swish_60, + full_int_array_4, + [1, 1], + [6, 6], + False, + True, + "NCHW", + "max", + False, + False, + "EXPLICIT", + ) + del full_int_array_4 + + # builtin.combine: ([-1x384x-1x-1xf32, -1x384x-1x-1xf32, -1x384x-1x-1xf32, -1x384x-1x-1xf32]) <- (-1x384x-1x-1xf32, -1x384x-1x-1xf32, -1x384x-1x-1xf32, -1x384x-1x-1xf32) + combine_4 = [swish_60, pool2d_0, pool2d_1, pool2d_2] + del pool2d_0, pool2d_1, pool2d_2, swish_60 + + # pd_op.concat: (-1x1536x-1x-1xf32) <- ([-1x384x-1x-1xf32, -1x384x-1x-1xf32, -1x384x-1x-1xf32, -1x384x-1x-1xf32], 1xi32) + concat_6 = paddle._C_ops.concat(combine_4, full_0) + del combine_4 + + # pd_op.conv2d: (-1x384x-1x-1xf32) <- (-1x1536x-1x-1xf32, 384x1536x1x1xf32) + conv2d_85 = paddle._C_ops.conv2d( + concat_6, parameter_339, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del concat_6, parameter_339 + + # pd_op.batch_norm_: (-1x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (-1x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__486, + batch_norm__487, + batch_norm__488, + batch_norm__489, + batch_norm__490, + batch_norm__491, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_85, + parameter_338, + parameter_337, + parameter_336, + parameter_335, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_85, parameter_335, parameter_336, parameter_337, parameter_338 + + # pd_op.swish: (-1x384x-1x-1xf32) <- (-1x384x-1x-1xf32) + swish_61 = paddle._C_ops.swish(batch_norm__486) + del batch_norm__486 + + # pd_op.conv2d: (-1x384x-1x-1xf32) <- (-1x384x-1x-1xf32, 384x384x3x3xf32) + conv2d_86 = paddle._C_ops.conv2d( + swish_61, parameter_334, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_334, swish_61 + + # pd_op.batch_norm_: (-1x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (-1x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__492, + batch_norm__493, + batch_norm__494, + batch_norm__495, + batch_norm__496, + batch_norm__497, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_86, + parameter_333, + parameter_332, + parameter_331, + parameter_330, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_86, parameter_330, parameter_331, parameter_332, parameter_333 + + # pd_op.swish: (-1x384x-1x-1xf32) <- (-1x384x-1x-1xf32) + swish_62 = paddle._C_ops.swish(batch_norm__492) + del batch_norm__492 + + # pd_op.conv2d: (-1x384x-1x-1xf32) <- (-1x384x-1x-1xf32, 384x384x3x3xf32) + conv2d_87 = paddle._C_ops.conv2d( + swish_62, parameter_329, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_329 + + # pd_op.batch_norm_: (-1x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (-1x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__498, + batch_norm__499, + batch_norm__500, + batch_norm__501, + batch_norm__502, + batch_norm__503, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_87, + parameter_328, + parameter_327, + parameter_326, + parameter_325, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_87, parameter_325, parameter_326, parameter_327, parameter_328 + + # pd_op.conv2d: (-1x384x-1x-1xf32) <- (-1x384x-1x-1xf32, 384x384x1x1xf32) + conv2d_88 = paddle._C_ops.conv2d( + swish_62, parameter_324, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_324, swish_62 + + # pd_op.batch_norm_: (-1x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (-1x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__504, + batch_norm__505, + batch_norm__506, + batch_norm__507, + batch_norm__508, + batch_norm__509, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_88, + parameter_323, + parameter_322, + parameter_321, + parameter_320, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_88, parameter_320, parameter_321, parameter_322, parameter_323 + + # pd_op.add: (-1x384x-1x-1xf32) <- (-1x384x-1x-1xf32, -1x384x-1x-1xf32) + add_42 = paddle._C_ops.add(batch_norm__498, batch_norm__504) + del batch_norm__498, batch_norm__504 + + # pd_op.swish: (-1x384x-1x-1xf32) <- (-1x384x-1x-1xf32) + swish_63 = paddle._C_ops.swish(add_42) + del add_42 + + # builtin.combine: ([-1x384x-1x-1xf32, -1x384x-1x-1xf32]) <- (-1x384x-1x-1xf32, -1x384x-1x-1xf32) + combine_5 = [swish_55, swish_63] + del swish_55, swish_63 + + # pd_op.concat: (-1x768x-1x-1xf32) <- ([-1x384x-1x-1xf32, -1x384x-1x-1xf32], 1xi32) + concat_7 = paddle._C_ops.concat(combine_5, full_0) + del combine_5 + + # pd_op.conv2d: (-1x768x-1x-1xf32) <- (-1x768x-1x-1xf32, 768x768x1x1xf32) + conv2d_89 = paddle._C_ops.conv2d( + concat_7, parameter_319, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del concat_7, parameter_319 + + # pd_op.batch_norm_: (-1x768x-1x-1xf32, 768xf32, 768xf32, 768xf32, 768xf32, -1xui8) <- (-1x768x-1x-1xf32, 768xf32, 768xf32, 768xf32, 768xf32) + ( + batch_norm__510, + batch_norm__511, + batch_norm__512, + batch_norm__513, + batch_norm__514, + batch_norm__515, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_89, + parameter_318, + parameter_317, + parameter_316, + parameter_315, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_89, parameter_315, parameter_316, parameter_317, parameter_318 + + # pd_op.swish: (-1x768x-1x-1xf32) <- (-1x768x-1x-1xf32) + swish_64 = paddle._C_ops.swish(batch_norm__510) + del batch_norm__510 + + # pd_op.conv2d: (-1x384x-1x-1xf32) <- (-1x768x-1x-1xf32, 384x768x1x1xf32) + conv2d_90 = paddle._C_ops.conv2d( + swish_64, parameter_314, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_314 + + # pd_op.batch_norm_: (-1x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (-1x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__516, + batch_norm__517, + batch_norm__518, + batch_norm__519, + batch_norm__520, + batch_norm__521, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_90, + parameter_313, + parameter_312, + parameter_311, + parameter_310, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_90, parameter_310, parameter_311, parameter_312, parameter_313 + + # pd_op.swish: (-1x384x-1x-1xf32) <- (-1x384x-1x-1xf32) + swish_65 = paddle._C_ops.swish(batch_norm__516) + del batch_norm__516 + + # pd_op.nearest_interp: (-1x384x-1x-1xf32) <- (-1x384x-1x-1xf32, None, None, None) + nearest_interp_0 = paddle._C_ops.nearest_interp( + swish_65, + None, + None, + None, + "NCHW", + -1, + -1, + -1, + [float("2"), float("2")], + "nearest", + False, + 0, + ) + del swish_65 + + # builtin.combine: ([-1x384x-1x-1xf32, -1x512x-1x-1xf32]) <- (-1x384x-1x-1xf32, -1x512x-1x-1xf32) + combine_6 = [nearest_interp_0, swish_44] + del nearest_interp_0, swish_44 + + # pd_op.concat: (-1x896x-1x-1xf32) <- ([-1x384x-1x-1xf32, -1x512x-1x-1xf32], 1xi32) + concat_8 = paddle._C_ops.concat(combine_6, full_0) + del combine_6 + + # pd_op.conv2d: (-1x192x-1x-1xf32) <- (-1x896x-1x-1xf32, 192x896x1x1xf32) + conv2d_91 = paddle._C_ops.conv2d( + concat_8, parameter_309, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_309 + + # pd_op.batch_norm_: (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__522, + batch_norm__523, + batch_norm__524, + batch_norm__525, + batch_norm__526, + batch_norm__527, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_91, + parameter_308, + parameter_307, + parameter_306, + parameter_305, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_91, parameter_305, parameter_306, parameter_307, parameter_308 + + # pd_op.swish: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32) + swish_66 = paddle._C_ops.swish(batch_norm__522) + del batch_norm__522 + + # pd_op.conv2d: (-1x192x-1x-1xf32) <- (-1x896x-1x-1xf32, 192x896x1x1xf32) + conv2d_92 = paddle._C_ops.conv2d( + concat_8, parameter_304, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del concat_8, parameter_304 + + # pd_op.batch_norm_: (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__528, + batch_norm__529, + batch_norm__530, + batch_norm__531, + batch_norm__532, + batch_norm__533, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_92, + parameter_303, + parameter_302, + parameter_301, + parameter_300, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_92, parameter_300, parameter_301, parameter_302, parameter_303 + + # pd_op.swish: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32) + swish_67 = paddle._C_ops.swish(batch_norm__528) + del batch_norm__528 + + # pd_op.conv2d: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32, 192x192x3x3xf32) + conv2d_93 = paddle._C_ops.conv2d( + swish_67, parameter_299, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_299, swish_67 + + # pd_op.batch_norm_: (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__534, + batch_norm__535, + batch_norm__536, + batch_norm__537, + batch_norm__538, + batch_norm__539, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_93, + parameter_298, + parameter_297, + parameter_296, + parameter_295, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_93, parameter_295, parameter_296, parameter_297, parameter_298 + + # pd_op.swish: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32) + swish_68 = paddle._C_ops.swish(batch_norm__534) + del batch_norm__534 + + # pd_op.conv2d: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32, 192x192x3x3xf32) + conv2d_94 = paddle._C_ops.conv2d( + swish_68, parameter_294, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_294 + + # pd_op.batch_norm_: (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__540, + batch_norm__541, + batch_norm__542, + batch_norm__543, + batch_norm__544, + batch_norm__545, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_94, + parameter_293, + parameter_292, + parameter_291, + parameter_290, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_94, parameter_290, parameter_291, parameter_292, parameter_293 + + # pd_op.conv2d: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32, 192x192x1x1xf32) + conv2d_95 = paddle._C_ops.conv2d( + swish_68, parameter_289, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_289, swish_68 + + # pd_op.batch_norm_: (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__546, + batch_norm__547, + batch_norm__548, + batch_norm__549, + batch_norm__550, + batch_norm__551, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_95, + parameter_288, + parameter_287, + parameter_286, + parameter_285, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_95, parameter_285, parameter_286, parameter_287, parameter_288 + + # pd_op.add: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32, -1x192x-1x-1xf32) + add_43 = paddle._C_ops.add(batch_norm__540, batch_norm__546) + del batch_norm__540, batch_norm__546 + + # pd_op.swish: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32) + swish_69 = paddle._C_ops.swish(add_43) + del add_43 + + # pd_op.conv2d: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32, 192x192x3x3xf32) + conv2d_96 = paddle._C_ops.conv2d( + swish_69, parameter_284, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_284, swish_69 + + # pd_op.batch_norm_: (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__552, + batch_norm__553, + batch_norm__554, + batch_norm__555, + batch_norm__556, + batch_norm__557, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_96, + parameter_283, + parameter_282, + parameter_281, + parameter_280, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_96, parameter_280, parameter_281, parameter_282, parameter_283 + + # pd_op.swish: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32) + swish_70 = paddle._C_ops.swish(batch_norm__552) + del batch_norm__552 + + # pd_op.conv2d: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32, 192x192x3x3xf32) + conv2d_97 = paddle._C_ops.conv2d( + swish_70, parameter_279, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_279 + + # pd_op.batch_norm_: (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__558, + batch_norm__559, + batch_norm__560, + batch_norm__561, + batch_norm__562, + batch_norm__563, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_97, + parameter_278, + parameter_277, + parameter_276, + parameter_275, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_97, parameter_275, parameter_276, parameter_277, parameter_278 + + # pd_op.conv2d: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32, 192x192x1x1xf32) + conv2d_98 = paddle._C_ops.conv2d( + swish_70, parameter_274, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_274, swish_70 + + # pd_op.batch_norm_: (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__564, + batch_norm__565, + batch_norm__566, + batch_norm__567, + batch_norm__568, + batch_norm__569, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_98, + parameter_273, + parameter_272, + parameter_271, + parameter_270, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_98, parameter_270, parameter_271, parameter_272, parameter_273 + + # pd_op.add: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32, -1x192x-1x-1xf32) + add_44 = paddle._C_ops.add(batch_norm__558, batch_norm__564) + del batch_norm__558, batch_norm__564 + + # pd_op.swish: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32) + swish_71 = paddle._C_ops.swish(add_44) + del add_44 + + # pd_op.conv2d: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32, 192x192x3x3xf32) + conv2d_99 = paddle._C_ops.conv2d( + swish_71, parameter_269, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_269, swish_71 + + # pd_op.batch_norm_: (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__570, + batch_norm__571, + batch_norm__572, + batch_norm__573, + batch_norm__574, + batch_norm__575, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_99, + parameter_268, + parameter_267, + parameter_266, + parameter_265, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_99, parameter_265, parameter_266, parameter_267, parameter_268 + + # pd_op.swish: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32) + swish_72 = paddle._C_ops.swish(batch_norm__570) + del batch_norm__570 + + # pd_op.conv2d: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32, 192x192x3x3xf32) + conv2d_100 = paddle._C_ops.conv2d( + swish_72, parameter_264, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_264 + + # pd_op.batch_norm_: (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__576, + batch_norm__577, + batch_norm__578, + batch_norm__579, + batch_norm__580, + batch_norm__581, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_100, + parameter_263, + parameter_262, + parameter_261, + parameter_260, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_100, parameter_260, parameter_261, parameter_262, parameter_263 + + # pd_op.conv2d: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32, 192x192x1x1xf32) + conv2d_101 = paddle._C_ops.conv2d( + swish_72, parameter_259, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_259, swish_72 + + # pd_op.batch_norm_: (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__582, + batch_norm__583, + batch_norm__584, + batch_norm__585, + batch_norm__586, + batch_norm__587, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_101, + parameter_258, + parameter_257, + parameter_256, + parameter_255, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_101, parameter_255, parameter_256, parameter_257, parameter_258 + + # pd_op.add: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32, -1x192x-1x-1xf32) + add_45 = paddle._C_ops.add(batch_norm__576, batch_norm__582) + del batch_norm__576, batch_norm__582 + + # pd_op.swish: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32) + swish_73 = paddle._C_ops.swish(add_45) + del add_45 + + # builtin.combine: ([-1x192x-1x-1xf32, -1x192x-1x-1xf32]) <- (-1x192x-1x-1xf32, -1x192x-1x-1xf32) + combine_7 = [swish_66, swish_73] + del swish_66, swish_73 + + # pd_op.concat: (-1x384x-1x-1xf32) <- ([-1x192x-1x-1xf32, -1x192x-1x-1xf32], 1xi32) + concat_9 = paddle._C_ops.concat(combine_7, full_0) + del combine_7 + + # pd_op.conv2d: (-1x384x-1x-1xf32) <- (-1x384x-1x-1xf32, 384x384x1x1xf32) + conv2d_102 = paddle._C_ops.conv2d( + concat_9, parameter_254, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del concat_9, parameter_254 + + # pd_op.batch_norm_: (-1x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (-1x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__588, + batch_norm__589, + batch_norm__590, + batch_norm__591, + batch_norm__592, + batch_norm__593, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_102, + parameter_253, + parameter_252, + parameter_251, + parameter_250, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_102, parameter_250, parameter_251, parameter_252, parameter_253 + + # pd_op.swish: (-1x384x-1x-1xf32) <- (-1x384x-1x-1xf32) + swish_74 = paddle._C_ops.swish(batch_norm__588) + del batch_norm__588 + + # pd_op.conv2d: (-1x192x-1x-1xf32) <- (-1x384x-1x-1xf32, 192x384x1x1xf32) + conv2d_103 = paddle._C_ops.conv2d( + swish_74, parameter_249, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_249 + + # pd_op.batch_norm_: (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__594, + batch_norm__595, + batch_norm__596, + batch_norm__597, + batch_norm__598, + batch_norm__599, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_103, + parameter_248, + parameter_247, + parameter_246, + parameter_245, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_103, parameter_245, parameter_246, parameter_247, parameter_248 + + # pd_op.swish: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32) + swish_75 = paddle._C_ops.swish(batch_norm__594) + del batch_norm__594 + + # pd_op.nearest_interp: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32, None, None, None) + nearest_interp_1 = paddle._C_ops.nearest_interp( + swish_75, + None, + None, + None, + "NCHW", + -1, + -1, + -1, + [float("2"), float("2")], + "nearest", + False, + 0, + ) + del swish_75 + + # builtin.combine: ([-1x192x-1x-1xf32, -1x256x-1x-1xf32]) <- (-1x192x-1x-1xf32, -1x256x-1x-1xf32) + combine_8 = [nearest_interp_1, swish_28] + del nearest_interp_1, swish_28 + + # pd_op.concat: (-1x448x-1x-1xf32) <- ([-1x192x-1x-1xf32, -1x256x-1x-1xf32], 1xi32) + concat_10 = paddle._C_ops.concat(combine_8, full_0) + del combine_8 + + # pd_op.conv2d: (-1x96x-1x-1xf32) <- (-1x448x-1x-1xf32, 96x448x1x1xf32) + conv2d_104 = paddle._C_ops.conv2d( + concat_10, parameter_244, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_244 + + # pd_op.batch_norm_: (-1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (-1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__600, + batch_norm__601, + batch_norm__602, + batch_norm__603, + batch_norm__604, + batch_norm__605, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_104, + parameter_243, + parameter_242, + parameter_241, + parameter_240, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_104, parameter_240, parameter_241, parameter_242, parameter_243 + + # pd_op.swish: (-1x96x-1x-1xf32) <- (-1x96x-1x-1xf32) + swish_76 = paddle._C_ops.swish(batch_norm__600) + del batch_norm__600 + + # pd_op.conv2d: (-1x96x-1x-1xf32) <- (-1x448x-1x-1xf32, 96x448x1x1xf32) + conv2d_105 = paddle._C_ops.conv2d( + concat_10, parameter_239, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del concat_10, parameter_239 + + # pd_op.batch_norm_: (-1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (-1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__606, + batch_norm__607, + batch_norm__608, + batch_norm__609, + batch_norm__610, + batch_norm__611, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_105, + parameter_238, + parameter_237, + parameter_236, + parameter_235, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_105, parameter_235, parameter_236, parameter_237, parameter_238 + + # pd_op.swish: (-1x96x-1x-1xf32) <- (-1x96x-1x-1xf32) + swish_77 = paddle._C_ops.swish(batch_norm__606) + del batch_norm__606 + + # pd_op.conv2d: (-1x96x-1x-1xf32) <- (-1x96x-1x-1xf32, 96x96x3x3xf32) + conv2d_106 = paddle._C_ops.conv2d( + swish_77, parameter_234, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_234, swish_77 + + # pd_op.batch_norm_: (-1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (-1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__612, + batch_norm__613, + batch_norm__614, + batch_norm__615, + batch_norm__616, + batch_norm__617, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_106, + parameter_233, + parameter_232, + parameter_231, + parameter_230, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_106, parameter_230, parameter_231, parameter_232, parameter_233 + + # pd_op.swish: (-1x96x-1x-1xf32) <- (-1x96x-1x-1xf32) + swish_78 = paddle._C_ops.swish(batch_norm__612) + del batch_norm__612 + + # pd_op.conv2d: (-1x96x-1x-1xf32) <- (-1x96x-1x-1xf32, 96x96x3x3xf32) + conv2d_107 = paddle._C_ops.conv2d( + swish_78, parameter_229, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_229 + + # pd_op.batch_norm_: (-1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (-1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__618, + batch_norm__619, + batch_norm__620, + batch_norm__621, + batch_norm__622, + batch_norm__623, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_107, + parameter_228, + parameter_227, + parameter_226, + parameter_225, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_107, parameter_225, parameter_226, parameter_227, parameter_228 + + # pd_op.conv2d: (-1x96x-1x-1xf32) <- (-1x96x-1x-1xf32, 96x96x1x1xf32) + conv2d_108 = paddle._C_ops.conv2d( + swish_78, parameter_224, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_224, swish_78 + + # pd_op.batch_norm_: (-1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (-1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__624, + batch_norm__625, + batch_norm__626, + batch_norm__627, + batch_norm__628, + batch_norm__629, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_108, + parameter_223, + parameter_222, + parameter_221, + parameter_220, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_108, parameter_220, parameter_221, parameter_222, parameter_223 + + # pd_op.add: (-1x96x-1x-1xf32) <- (-1x96x-1x-1xf32, -1x96x-1x-1xf32) + add_46 = paddle._C_ops.add(batch_norm__618, batch_norm__624) + del batch_norm__618, batch_norm__624 + + # pd_op.swish: (-1x96x-1x-1xf32) <- (-1x96x-1x-1xf32) + swish_79 = paddle._C_ops.swish(add_46) + del add_46 + + # pd_op.conv2d: (-1x96x-1x-1xf32) <- (-1x96x-1x-1xf32, 96x96x3x3xf32) + conv2d_109 = paddle._C_ops.conv2d( + swish_79, parameter_219, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_219, swish_79 + + # pd_op.batch_norm_: (-1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (-1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__630, + batch_norm__631, + batch_norm__632, + batch_norm__633, + batch_norm__634, + batch_norm__635, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_109, + parameter_218, + parameter_217, + parameter_216, + parameter_215, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_109, parameter_215, parameter_216, parameter_217, parameter_218 + + # pd_op.swish: (-1x96x-1x-1xf32) <- (-1x96x-1x-1xf32) + swish_80 = paddle._C_ops.swish(batch_norm__630) + del batch_norm__630 + + # pd_op.conv2d: (-1x96x-1x-1xf32) <- (-1x96x-1x-1xf32, 96x96x3x3xf32) + conv2d_110 = paddle._C_ops.conv2d( + swish_80, parameter_214, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_214 + + # pd_op.batch_norm_: (-1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (-1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__636, + batch_norm__637, + batch_norm__638, + batch_norm__639, + batch_norm__640, + batch_norm__641, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_110, + parameter_213, + parameter_212, + parameter_211, + parameter_210, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_110, parameter_210, parameter_211, parameter_212, parameter_213 + + # pd_op.conv2d: (-1x96x-1x-1xf32) <- (-1x96x-1x-1xf32, 96x96x1x1xf32) + conv2d_111 = paddle._C_ops.conv2d( + swish_80, parameter_209, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_209, swish_80 + + # pd_op.batch_norm_: (-1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (-1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__642, + batch_norm__643, + batch_norm__644, + batch_norm__645, + batch_norm__646, + batch_norm__647, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_111, + parameter_208, + parameter_207, + parameter_206, + parameter_205, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_111, parameter_205, parameter_206, parameter_207, parameter_208 + + # pd_op.add: (-1x96x-1x-1xf32) <- (-1x96x-1x-1xf32, -1x96x-1x-1xf32) + add_47 = paddle._C_ops.add(batch_norm__636, batch_norm__642) + del batch_norm__636, batch_norm__642 + + # pd_op.swish: (-1x96x-1x-1xf32) <- (-1x96x-1x-1xf32) + swish_81 = paddle._C_ops.swish(add_47) + del add_47 + + # pd_op.conv2d: (-1x96x-1x-1xf32) <- (-1x96x-1x-1xf32, 96x96x3x3xf32) + conv2d_112 = paddle._C_ops.conv2d( + swish_81, parameter_204, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_204, swish_81 + + # pd_op.batch_norm_: (-1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (-1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__648, + batch_norm__649, + batch_norm__650, + batch_norm__651, + batch_norm__652, + batch_norm__653, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_112, + parameter_203, + parameter_202, + parameter_201, + parameter_200, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_112, parameter_200, parameter_201, parameter_202, parameter_203 + + # pd_op.swish: (-1x96x-1x-1xf32) <- (-1x96x-1x-1xf32) + swish_82 = paddle._C_ops.swish(batch_norm__648) + del batch_norm__648 + + # pd_op.conv2d: (-1x96x-1x-1xf32) <- (-1x96x-1x-1xf32, 96x96x3x3xf32) + conv2d_113 = paddle._C_ops.conv2d( + swish_82, parameter_199, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_199 + + # pd_op.batch_norm_: (-1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (-1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__654, + batch_norm__655, + batch_norm__656, + batch_norm__657, + batch_norm__658, + batch_norm__659, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_113, + parameter_198, + parameter_197, + parameter_196, + parameter_195, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_113, parameter_195, parameter_196, parameter_197, parameter_198 + + # pd_op.conv2d: (-1x96x-1x-1xf32) <- (-1x96x-1x-1xf32, 96x96x1x1xf32) + conv2d_114 = paddle._C_ops.conv2d( + swish_82, parameter_194, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_194, swish_82 + + # pd_op.batch_norm_: (-1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (-1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__660, + batch_norm__661, + batch_norm__662, + batch_norm__663, + batch_norm__664, + batch_norm__665, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_114, + parameter_193, + parameter_192, + parameter_191, + parameter_190, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_114, parameter_190, parameter_191, parameter_192, parameter_193 + + # pd_op.add: (-1x96x-1x-1xf32) <- (-1x96x-1x-1xf32, -1x96x-1x-1xf32) + add_48 = paddle._C_ops.add(batch_norm__654, batch_norm__660) + del batch_norm__654, batch_norm__660 + + # pd_op.swish: (-1x96x-1x-1xf32) <- (-1x96x-1x-1xf32) + swish_83 = paddle._C_ops.swish(add_48) + del add_48 + + # builtin.combine: ([-1x96x-1x-1xf32, -1x96x-1x-1xf32]) <- (-1x96x-1x-1xf32, -1x96x-1x-1xf32) + combine_9 = [swish_76, swish_83] + del swish_76, swish_83 + + # pd_op.concat: (-1x192x-1x-1xf32) <- ([-1x96x-1x-1xf32, -1x96x-1x-1xf32], 1xi32) + concat_11 = paddle._C_ops.concat(combine_9, full_0) + del combine_9 + + # pd_op.conv2d: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32, 192x192x1x1xf32) + conv2d_115 = paddle._C_ops.conv2d( + concat_11, parameter_189, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del concat_11, parameter_189 + + # pd_op.batch_norm_: (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__666, + batch_norm__667, + batch_norm__668, + batch_norm__669, + batch_norm__670, + batch_norm__671, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_115, + parameter_188, + parameter_187, + parameter_186, + parameter_185, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_115, parameter_185, parameter_186, parameter_187, parameter_188 + + # pd_op.swish: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32) + swish_84 = paddle._C_ops.swish(batch_norm__666) + del batch_norm__666 + + # pd_op.conv2d: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32, 192x192x3x3xf32) + conv2d_116 = paddle._C_ops.conv2d( + swish_84, parameter_184, [2, 2], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_184 + + # pd_op.batch_norm_: (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__672, + batch_norm__673, + batch_norm__674, + batch_norm__675, + batch_norm__676, + batch_norm__677, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_116, + parameter_183, + parameter_182, + parameter_181, + parameter_180, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_116, parameter_180, parameter_181, parameter_182, parameter_183 + + # pd_op.swish: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32) + swish_85 = paddle._C_ops.swish(batch_norm__672) + del batch_norm__672 + + # builtin.combine: ([-1x192x-1x-1xf32, -1x384x-1x-1xf32]) <- (-1x192x-1x-1xf32, -1x384x-1x-1xf32) + combine_10 = [swish_85, swish_74] + del swish_74, swish_85 + + # pd_op.concat: (-1x576x-1x-1xf32) <- ([-1x192x-1x-1xf32, -1x384x-1x-1xf32], 1xi32) + concat_12 = paddle._C_ops.concat(combine_10, full_0) + del combine_10 + + # pd_op.conv2d: (-1x192x-1x-1xf32) <- (-1x576x-1x-1xf32, 192x576x1x1xf32) + conv2d_117 = paddle._C_ops.conv2d( + concat_12, parameter_179, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_179 + + # pd_op.batch_norm_: (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__678, + batch_norm__679, + batch_norm__680, + batch_norm__681, + batch_norm__682, + batch_norm__683, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_117, + parameter_178, + parameter_177, + parameter_176, + parameter_175, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_117, parameter_175, parameter_176, parameter_177, parameter_178 + + # pd_op.swish: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32) + swish_86 = paddle._C_ops.swish(batch_norm__678) + del batch_norm__678 + + # pd_op.conv2d: (-1x192x-1x-1xf32) <- (-1x576x-1x-1xf32, 192x576x1x1xf32) + conv2d_118 = paddle._C_ops.conv2d( + concat_12, parameter_174, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del concat_12, parameter_174 + + # pd_op.batch_norm_: (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__684, + batch_norm__685, + batch_norm__686, + batch_norm__687, + batch_norm__688, + batch_norm__689, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_118, + parameter_173, + parameter_172, + parameter_171, + parameter_170, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_118, parameter_170, parameter_171, parameter_172, parameter_173 + + # pd_op.swish: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32) + swish_87 = paddle._C_ops.swish(batch_norm__684) + del batch_norm__684 + + # pd_op.conv2d: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32, 192x192x3x3xf32) + conv2d_119 = paddle._C_ops.conv2d( + swish_87, parameter_169, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_169, swish_87 + + # pd_op.batch_norm_: (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__690, + batch_norm__691, + batch_norm__692, + batch_norm__693, + batch_norm__694, + batch_norm__695, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_119, + parameter_168, + parameter_167, + parameter_166, + parameter_165, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_119, parameter_165, parameter_166, parameter_167, parameter_168 + + # pd_op.swish: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32) + swish_88 = paddle._C_ops.swish(batch_norm__690) + del batch_norm__690 + + # pd_op.conv2d: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32, 192x192x3x3xf32) + conv2d_120 = paddle._C_ops.conv2d( + swish_88, parameter_164, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_164 + + # pd_op.batch_norm_: (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__696, + batch_norm__697, + batch_norm__698, + batch_norm__699, + batch_norm__700, + batch_norm__701, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_120, + parameter_163, + parameter_162, + parameter_161, + parameter_160, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_120, parameter_160, parameter_161, parameter_162, parameter_163 + + # pd_op.conv2d: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32, 192x192x1x1xf32) + conv2d_121 = paddle._C_ops.conv2d( + swish_88, parameter_159, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_159, swish_88 + + # pd_op.batch_norm_: (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__702, + batch_norm__703, + batch_norm__704, + batch_norm__705, + batch_norm__706, + batch_norm__707, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_121, + parameter_158, + parameter_157, + parameter_156, + parameter_155, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_121, parameter_155, parameter_156, parameter_157, parameter_158 + + # pd_op.add: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32, -1x192x-1x-1xf32) + add_49 = paddle._C_ops.add(batch_norm__696, batch_norm__702) + del batch_norm__696, batch_norm__702 + + # pd_op.swish: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32) + swish_89 = paddle._C_ops.swish(add_49) + del add_49 + + # pd_op.conv2d: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32, 192x192x3x3xf32) + conv2d_122 = paddle._C_ops.conv2d( + swish_89, parameter_154, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_154, swish_89 + + # pd_op.batch_norm_: (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__708, + batch_norm__709, + batch_norm__710, + batch_norm__711, + batch_norm__712, + batch_norm__713, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_122, + parameter_153, + parameter_152, + parameter_151, + parameter_150, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_122, parameter_150, parameter_151, parameter_152, parameter_153 + + # pd_op.swish: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32) + swish_90 = paddle._C_ops.swish(batch_norm__708) + del batch_norm__708 + + # pd_op.conv2d: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32, 192x192x3x3xf32) + conv2d_123 = paddle._C_ops.conv2d( + swish_90, parameter_149, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_149 + + # pd_op.batch_norm_: (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__714, + batch_norm__715, + batch_norm__716, + batch_norm__717, + batch_norm__718, + batch_norm__719, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_123, + parameter_148, + parameter_147, + parameter_146, + parameter_145, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_123, parameter_145, parameter_146, parameter_147, parameter_148 + + # pd_op.conv2d: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32, 192x192x1x1xf32) + conv2d_124 = paddle._C_ops.conv2d( + swish_90, parameter_144, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_144, swish_90 + + # pd_op.batch_norm_: (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__720, + batch_norm__721, + batch_norm__722, + batch_norm__723, + batch_norm__724, + batch_norm__725, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_124, + parameter_143, + parameter_142, + parameter_141, + parameter_140, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_124, parameter_140, parameter_141, parameter_142, parameter_143 + + # pd_op.add: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32, -1x192x-1x-1xf32) + add_50 = paddle._C_ops.add(batch_norm__714, batch_norm__720) + del batch_norm__714, batch_norm__720 + + # pd_op.swish: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32) + swish_91 = paddle._C_ops.swish(add_50) + del add_50 + + # pd_op.conv2d: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32, 192x192x3x3xf32) + conv2d_125 = paddle._C_ops.conv2d( + swish_91, parameter_139, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_139, swish_91 + + # pd_op.batch_norm_: (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__726, + batch_norm__727, + batch_norm__728, + batch_norm__729, + batch_norm__730, + batch_norm__731, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_125, + parameter_138, + parameter_137, + parameter_136, + parameter_135, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_125, parameter_135, parameter_136, parameter_137, parameter_138 + + # pd_op.swish: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32) + swish_92 = paddle._C_ops.swish(batch_norm__726) + del batch_norm__726 + + # pd_op.conv2d: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32, 192x192x3x3xf32) + conv2d_126 = paddle._C_ops.conv2d( + swish_92, parameter_134, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_134 + + # pd_op.batch_norm_: (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__732, + batch_norm__733, + batch_norm__734, + batch_norm__735, + batch_norm__736, + batch_norm__737, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_126, + parameter_133, + parameter_132, + parameter_131, + parameter_130, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_126, parameter_130, parameter_131, parameter_132, parameter_133 + + # pd_op.conv2d: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32, 192x192x1x1xf32) + conv2d_127 = paddle._C_ops.conv2d( + swish_92, parameter_129, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_129, swish_92 + + # pd_op.batch_norm_: (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__738, + batch_norm__739, + batch_norm__740, + batch_norm__741, + batch_norm__742, + batch_norm__743, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_127, + parameter_128, + parameter_127, + parameter_126, + parameter_125, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_127, parameter_125, parameter_126, parameter_127, parameter_128 + + # pd_op.add: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32, -1x192x-1x-1xf32) + add_51 = paddle._C_ops.add(batch_norm__732, batch_norm__738) + del batch_norm__732, batch_norm__738 + + # pd_op.swish: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32) + swish_93 = paddle._C_ops.swish(add_51) + del add_51 + + # builtin.combine: ([-1x192x-1x-1xf32, -1x192x-1x-1xf32]) <- (-1x192x-1x-1xf32, -1x192x-1x-1xf32) + combine_11 = [swish_86, swish_93] + del swish_86, swish_93 + + # pd_op.concat: (-1x384x-1x-1xf32) <- ([-1x192x-1x-1xf32, -1x192x-1x-1xf32], 1xi32) + concat_13 = paddle._C_ops.concat(combine_11, full_0) + del combine_11 + + # pd_op.conv2d: (-1x384x-1x-1xf32) <- (-1x384x-1x-1xf32, 384x384x1x1xf32) + conv2d_128 = paddle._C_ops.conv2d( + concat_13, parameter_124, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del concat_13, parameter_124 + + # pd_op.batch_norm_: (-1x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (-1x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__744, + batch_norm__745, + batch_norm__746, + batch_norm__747, + batch_norm__748, + batch_norm__749, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_128, + parameter_123, + parameter_122, + parameter_121, + parameter_120, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_128, parameter_120, parameter_121, parameter_122, parameter_123 + + # pd_op.swish: (-1x384x-1x-1xf32) <- (-1x384x-1x-1xf32) + swish_94 = paddle._C_ops.swish(batch_norm__744) + del batch_norm__744 + + # pd_op.conv2d: (-1x384x-1x-1xf32) <- (-1x384x-1x-1xf32, 384x384x3x3xf32) + conv2d_129 = paddle._C_ops.conv2d( + swish_94, parameter_119, [2, 2], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_119 + + # pd_op.batch_norm_: (-1x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (-1x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__750, + batch_norm__751, + batch_norm__752, + batch_norm__753, + batch_norm__754, + batch_norm__755, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_129, + parameter_118, + parameter_117, + parameter_116, + parameter_115, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_129, parameter_115, parameter_116, parameter_117, parameter_118 + + # pd_op.swish: (-1x384x-1x-1xf32) <- (-1x384x-1x-1xf32) + swish_95 = paddle._C_ops.swish(batch_norm__750) + del batch_norm__750 + + # builtin.combine: ([-1x384x-1x-1xf32, -1x768x-1x-1xf32]) <- (-1x384x-1x-1xf32, -1x768x-1x-1xf32) + combine_12 = [swish_95, swish_64] + del swish_64, swish_95 + + # pd_op.concat: (-1x1152x-1x-1xf32) <- ([-1x384x-1x-1xf32, -1x768x-1x-1xf32], 1xi32) + concat_14 = paddle._C_ops.concat(combine_12, full_0) + del combine_12 + + # pd_op.conv2d: (-1x384x-1x-1xf32) <- (-1x1152x-1x-1xf32, 384x1152x1x1xf32) + conv2d_130 = paddle._C_ops.conv2d( + concat_14, parameter_114, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_114 + + # pd_op.batch_norm_: (-1x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (-1x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__756, + batch_norm__757, + batch_norm__758, + batch_norm__759, + batch_norm__760, + batch_norm__761, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_130, + parameter_113, + parameter_112, + parameter_111, + parameter_110, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_130, parameter_110, parameter_111, parameter_112, parameter_113 + + # pd_op.swish: (-1x384x-1x-1xf32) <- (-1x384x-1x-1xf32) + swish_96 = paddle._C_ops.swish(batch_norm__756) + del batch_norm__756 + + # pd_op.conv2d: (-1x384x-1x-1xf32) <- (-1x1152x-1x-1xf32, 384x1152x1x1xf32) + conv2d_131 = paddle._C_ops.conv2d( + concat_14, parameter_109, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del concat_14, parameter_109 + + # pd_op.batch_norm_: (-1x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (-1x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__762, + batch_norm__763, + batch_norm__764, + batch_norm__765, + batch_norm__766, + batch_norm__767, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_131, + parameter_108, + parameter_107, + parameter_106, + parameter_105, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_131, parameter_105, parameter_106, parameter_107, parameter_108 + + # pd_op.swish: (-1x384x-1x-1xf32) <- (-1x384x-1x-1xf32) + swish_97 = paddle._C_ops.swish(batch_norm__762) + del batch_norm__762 + + # pd_op.conv2d: (-1x384x-1x-1xf32) <- (-1x384x-1x-1xf32, 384x384x3x3xf32) + conv2d_132 = paddle._C_ops.conv2d( + swish_97, parameter_104, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_104, swish_97 + + # pd_op.batch_norm_: (-1x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (-1x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__768, + batch_norm__769, + batch_norm__770, + batch_norm__771, + batch_norm__772, + batch_norm__773, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_132, + parameter_103, + parameter_102, + parameter_101, + parameter_100, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_132, parameter_100, parameter_101, parameter_102, parameter_103 + + # pd_op.swish: (-1x384x-1x-1xf32) <- (-1x384x-1x-1xf32) + swish_98 = paddle._C_ops.swish(batch_norm__768) + del batch_norm__768 + + # pd_op.conv2d: (-1x384x-1x-1xf32) <- (-1x384x-1x-1xf32, 384x384x3x3xf32) + conv2d_133 = paddle._C_ops.conv2d( + swish_98, parameter_99, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_99 + + # pd_op.batch_norm_: (-1x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (-1x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__774, + batch_norm__775, + batch_norm__776, + batch_norm__777, + batch_norm__778, + batch_norm__779, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_133, + parameter_98, + parameter_97, + parameter_96, + parameter_95, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_133, parameter_95, parameter_96, parameter_97, parameter_98 + + # pd_op.conv2d: (-1x384x-1x-1xf32) <- (-1x384x-1x-1xf32, 384x384x1x1xf32) + conv2d_134 = paddle._C_ops.conv2d( + swish_98, parameter_94, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_94, swish_98 + + # pd_op.batch_norm_: (-1x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (-1x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__780, + batch_norm__781, + batch_norm__782, + batch_norm__783, + batch_norm__784, + batch_norm__785, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_134, + parameter_93, + parameter_92, + parameter_91, + parameter_90, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_134, parameter_90, parameter_91, parameter_92, parameter_93 + + # pd_op.add: (-1x384x-1x-1xf32) <- (-1x384x-1x-1xf32, -1x384x-1x-1xf32) + add_52 = paddle._C_ops.add(batch_norm__774, batch_norm__780) + del batch_norm__774, batch_norm__780 + + # pd_op.swish: (-1x384x-1x-1xf32) <- (-1x384x-1x-1xf32) + swish_99 = paddle._C_ops.swish(add_52) + del add_52 + + # pd_op.conv2d: (-1x384x-1x-1xf32) <- (-1x384x-1x-1xf32, 384x384x3x3xf32) + conv2d_135 = paddle._C_ops.conv2d( + swish_99, parameter_89, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_89, swish_99 + + # pd_op.batch_norm_: (-1x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (-1x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__786, + batch_norm__787, + batch_norm__788, + batch_norm__789, + batch_norm__790, + batch_norm__791, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_135, + parameter_88, + parameter_87, + parameter_86, + parameter_85, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_135, parameter_85, parameter_86, parameter_87, parameter_88 + + # pd_op.swish: (-1x384x-1x-1xf32) <- (-1x384x-1x-1xf32) + swish_100 = paddle._C_ops.swish(batch_norm__786) + del batch_norm__786 + + # pd_op.conv2d: (-1x384x-1x-1xf32) <- (-1x384x-1x-1xf32, 384x384x3x3xf32) + conv2d_136 = paddle._C_ops.conv2d( + swish_100, parameter_84, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_84 + + # pd_op.batch_norm_: (-1x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (-1x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__792, + batch_norm__793, + batch_norm__794, + batch_norm__795, + batch_norm__796, + batch_norm__797, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_136, + parameter_83, + parameter_82, + parameter_81, + parameter_80, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_136, parameter_80, parameter_81, parameter_82, parameter_83 + + # pd_op.conv2d: (-1x384x-1x-1xf32) <- (-1x384x-1x-1xf32, 384x384x1x1xf32) + conv2d_137 = paddle._C_ops.conv2d( + swish_100, parameter_79, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_79, swish_100 + + # pd_op.batch_norm_: (-1x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (-1x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__798, + batch_norm__799, + batch_norm__800, + batch_norm__801, + batch_norm__802, + batch_norm__803, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_137, + parameter_78, + parameter_77, + parameter_76, + parameter_75, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_137, parameter_75, parameter_76, parameter_77, parameter_78 + + # pd_op.add: (-1x384x-1x-1xf32) <- (-1x384x-1x-1xf32, -1x384x-1x-1xf32) + add_53 = paddle._C_ops.add(batch_norm__792, batch_norm__798) + del batch_norm__792, batch_norm__798 + + # pd_op.swish: (-1x384x-1x-1xf32) <- (-1x384x-1x-1xf32) + swish_101 = paddle._C_ops.swish(add_53) + del add_53 + + # pd_op.conv2d: (-1x384x-1x-1xf32) <- (-1x384x-1x-1xf32, 384x384x3x3xf32) + conv2d_138 = paddle._C_ops.conv2d( + swish_101, parameter_74, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_74, swish_101 + + # pd_op.batch_norm_: (-1x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (-1x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__804, + batch_norm__805, + batch_norm__806, + batch_norm__807, + batch_norm__808, + batch_norm__809, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_138, + parameter_73, + parameter_72, + parameter_71, + parameter_70, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_138, parameter_70, parameter_71, parameter_72, parameter_73 + + # pd_op.swish: (-1x384x-1x-1xf32) <- (-1x384x-1x-1xf32) + swish_102 = paddle._C_ops.swish(batch_norm__804) + del batch_norm__804 + + # pd_op.conv2d: (-1x384x-1x-1xf32) <- (-1x384x-1x-1xf32, 384x384x3x3xf32) + conv2d_139 = paddle._C_ops.conv2d( + swish_102, parameter_69, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_69 + + # pd_op.batch_norm_: (-1x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (-1x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__810, + batch_norm__811, + batch_norm__812, + batch_norm__813, + batch_norm__814, + batch_norm__815, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_139, + parameter_68, + parameter_67, + parameter_66, + parameter_65, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_139, parameter_65, parameter_66, parameter_67, parameter_68 + + # pd_op.conv2d: (-1x384x-1x-1xf32) <- (-1x384x-1x-1xf32, 384x384x1x1xf32) + conv2d_140 = paddle._C_ops.conv2d( + swish_102, parameter_64, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_64, swish_102 + + # pd_op.batch_norm_: (-1x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (-1x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__816, + batch_norm__817, + batch_norm__818, + batch_norm__819, + batch_norm__820, + batch_norm__821, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_140, + parameter_63, + parameter_62, + parameter_61, + parameter_60, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_140, parameter_60, parameter_61, parameter_62, parameter_63 + + # pd_op.add: (-1x384x-1x-1xf32) <- (-1x384x-1x-1xf32, -1x384x-1x-1xf32) + add_54 = paddle._C_ops.add(batch_norm__810, batch_norm__816) + del batch_norm__810, batch_norm__816 + + # pd_op.swish: (-1x384x-1x-1xf32) <- (-1x384x-1x-1xf32) + swish_103 = paddle._C_ops.swish(add_54) + del add_54 + + # builtin.combine: ([-1x384x-1x-1xf32, -1x384x-1x-1xf32]) <- (-1x384x-1x-1xf32, -1x384x-1x-1xf32) + combine_13 = [swish_96, swish_103] + del swish_103, swish_96 + + # pd_op.concat: (-1x768x-1x-1xf32) <- ([-1x384x-1x-1xf32, -1x384x-1x-1xf32], 1xi32) + concat_15 = paddle._C_ops.concat(combine_13, full_0) + del combine_13 + + # pd_op.conv2d: (-1x768x-1x-1xf32) <- (-1x768x-1x-1xf32, 768x768x1x1xf32) + conv2d_141 = paddle._C_ops.conv2d( + concat_15, parameter_59, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del concat_15, parameter_59 + + # pd_op.batch_norm_: (-1x768x-1x-1xf32, 768xf32, 768xf32, 768xf32, 768xf32, -1xui8) <- (-1x768x-1x-1xf32, 768xf32, 768xf32, 768xf32, 768xf32) + ( + batch_norm__822, + batch_norm__823, + batch_norm__824, + batch_norm__825, + batch_norm__826, + batch_norm__827, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_141, + parameter_58, + parameter_57, + parameter_56, + parameter_55, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_141, parameter_55, parameter_56, parameter_57, parameter_58 + + # pd_op.swish: (-1x768x-1x-1xf32) <- (-1x768x-1x-1xf32) + swish_104 = paddle._C_ops.swish(batch_norm__822) + del batch_norm__822 + + # pd_op.shape64: (4xi64) <- (-1x768x-1x-1xf32) + shape64_0 = paddle._C_ops.shape64(swish_104) + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_5 = [0] + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_6 = [1] + + # pd_op.slice: (xi64) <- (4xi64, 1xi64, 1xi64) + slice_0 = paddle._C_ops.slice( + shape64_0, [0], full_int_array_5, full_int_array_6, [1], [0] + ) + del shape64_0 + + # pd_op.shape64: (4xi64) <- (-1x768x-1x-1xf32) + shape64_1 = paddle._C_ops.shape64(swish_104) + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_7 = [2] + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_8 = [3] + + # pd_op.slice: (xi64) <- (4xi64, 1xi64, 1xi64) + slice_1 = paddle._C_ops.slice( + shape64_1, [0], full_int_array_7, full_int_array_8, [1], [0] + ) + del shape64_1 + + # pd_op.shape64: (4xi64) <- (-1x768x-1x-1xf32) + shape64_2 = paddle._C_ops.shape64(swish_104) + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_9 = [4] + + # pd_op.slice: (xi64) <- (4xi64, 1xi64, 1xi64) + slice_2 = paddle._C_ops.slice( + shape64_2, [0], full_int_array_8, full_int_array_9, [1], [0] + ) + del shape64_2 + + # pd_op.multiply: (xi64) <- (xi64, xi64) + multiply_22 = paddle._C_ops.multiply(slice_1, slice_2) + del slice_1, slice_2 + + # pd_op.full_int_array: (2xi64) <- () + full_int_array_10 = [1, 1] + + # pd_op.pool2d: (-1x768x1x1xf32) <- (-1x768x-1x-1xf32, 2xi64) + pool2d_3 = paddle._C_ops.pool2d( + swish_104, + full_int_array_10, + [1, 1], + [0, 0], + False, + True, + "NCHW", + "avg", + False, + True, + "EXPLICIT", + ) + + # pd_op.conv2d: (-1x768x1x1xf32) <- (-1x768x1x1xf32, 768x768x1x1xf32) + conv2d_142 = paddle._C_ops.conv2d( + pool2d_3, parameter_54, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_54 + + # pd_op.reshape: (1x768x1x1xf32) <- (768xf32, 4xi64) + reshape_4 = paddle._C_ops.reshape(parameter_53, full_int_array_1) + del parameter_53 + + # pd_op.add: (-1x768x1x1xf32) <- (-1x768x1x1xf32, 1x768x1x1xf32) + add_55 = paddle._C_ops.add(conv2d_142, reshape_4) + del conv2d_142, reshape_4 + + # pd_op.sigmoid: (-1x768x1x1xf32) <- (-1x768x1x1xf32) + sigmoid_0 = paddle._C_ops.sigmoid(add_55) + del add_55 + + # pd_op.multiply: (-1x768x-1x-1xf32) <- (-1x768x-1x-1xf32, -1x768x1x1xf32) + multiply_23 = paddle._C_ops.multiply(swish_104, sigmoid_0) + del sigmoid_0 + + # pd_op.conv2d: (-1x768x-1x-1xf32) <- (-1x768x-1x-1xf32, 768x768x1x1xf32) + conv2d_143 = paddle._C_ops.conv2d( + multiply_23, parameter_52, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del multiply_23, parameter_52 + + # pd_op.batch_norm_: (-1x768x-1x-1xf32, 768xf32, 768xf32, 768xf32, 768xf32, -1xui8) <- (-1x768x-1x-1xf32, 768xf32, 768xf32, 768xf32, 768xf32) + ( + batch_norm__828, + batch_norm__829, + batch_norm__830, + batch_norm__831, + batch_norm__832, + batch_norm__833, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_143, + parameter_51, + parameter_50, + parameter_49, + parameter_48, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_143, parameter_48, parameter_49, parameter_50, parameter_51 + + # pd_op.swish: (-1x768x-1x-1xf32) <- (-1x768x-1x-1xf32) + swish_105 = paddle._C_ops.swish(batch_norm__828) + del batch_norm__828 + + # pd_op.add: (-1x768x-1x-1xf32) <- (-1x768x-1x-1xf32, -1x768x-1x-1xf32) + add_56 = paddle._C_ops.add(swish_105, swish_104) + del swish_105 + + # pd_op.conv2d: (-1x4x-1x-1xf32) <- (-1x768x-1x-1xf32, 4x768x3x3xf32) + conv2d_144 = paddle._C_ops.conv2d( + add_56, parameter_47, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del add_56, parameter_47 + + # pd_op.reshape: (1x4x1x1xf32) <- (4xf32, 4xi64) + reshape_5 = paddle._C_ops.reshape(parameter_46, full_int_array_1) + del parameter_46 + + # pd_op.add: (-1x4x-1x-1xf32) <- (-1x4x-1x-1xf32, 1x4x1x1xf32) + add_57 = paddle._C_ops.add(conv2d_144, reshape_5) + del conv2d_144, reshape_5 + + # pd_op.conv2d: (-1x768x1x1xf32) <- (-1x768x1x1xf32, 768x768x1x1xf32) + conv2d_145 = paddle._C_ops.conv2d( + pool2d_3, parameter_45, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_45, pool2d_3 + + # pd_op.reshape: (1x768x1x1xf32) <- (768xf32, 4xi64) + reshape_6 = paddle._C_ops.reshape(parameter_44, full_int_array_1) + del parameter_44 + + # pd_op.add: (-1x768x1x1xf32) <- (-1x768x1x1xf32, 1x768x1x1xf32) + add_58 = paddle._C_ops.add(conv2d_145, reshape_6) + del conv2d_145, reshape_6 + + # pd_op.sigmoid: (-1x768x1x1xf32) <- (-1x768x1x1xf32) + sigmoid_1 = paddle._C_ops.sigmoid(add_58) + del add_58 + + # pd_op.multiply: (-1x768x-1x-1xf32) <- (-1x768x-1x-1xf32, -1x768x1x1xf32) + multiply_24 = paddle._C_ops.multiply(swish_104, sigmoid_1) + del sigmoid_1, swish_104 + + # pd_op.conv2d: (-1x768x-1x-1xf32) <- (-1x768x-1x-1xf32, 768x768x1x1xf32) + conv2d_146 = paddle._C_ops.conv2d( + multiply_24, parameter_43, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del multiply_24, parameter_43 + + # pd_op.batch_norm_: (-1x768x-1x-1xf32, 768xf32, 768xf32, 768xf32, 768xf32, -1xui8) <- (-1x768x-1x-1xf32, 768xf32, 768xf32, 768xf32, 768xf32) + ( + batch_norm__834, + batch_norm__835, + batch_norm__836, + batch_norm__837, + batch_norm__838, + batch_norm__839, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_146, + parameter_42, + parameter_41, + parameter_40, + parameter_39, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_146, parameter_39, parameter_40, parameter_41, parameter_42 + + # pd_op.swish: (-1x768x-1x-1xf32) <- (-1x768x-1x-1xf32) + swish_106 = paddle._C_ops.swish(batch_norm__834) + del batch_norm__834 + + # pd_op.conv2d: (-1x68x-1x-1xf32) <- (-1x768x-1x-1xf32, 68x768x3x3xf32) + conv2d_147 = paddle._C_ops.conv2d( + swish_106, parameter_38, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_38, swish_106 + + # pd_op.reshape: (1x68x1x1xf32) <- (68xf32, 4xi64) + reshape_7 = paddle._C_ops.reshape(parameter_37, full_int_array_1) + del parameter_37 + + # pd_op.add: (-1x68x-1x-1xf32) <- (-1x68x-1x-1xf32, 1x68x1x1xf32) + add_59 = paddle._C_ops.add(conv2d_147, reshape_7) + del conv2d_147, reshape_7 + + # pd_op.full: (xi64) <- () + full_1 = paddle._C_ops.full( + [], float("-1"), paddle.int64, paddle.core.CPUPlace() + ) + + # pd_op.full: (xi64) <- () + full_2 = paddle._C_ops.full( + [], float("4"), paddle.int64, paddle.core.CPUPlace() + ) + + # pd_op.full: (xi64) <- () + full_3 = paddle._C_ops.full( + [], float("17"), paddle.int64, paddle.core.CPUPlace() + ) + + # builtin.combine: ([xi64, xi64, xi64, xi64]) <- (xi64, xi64, xi64, xi64) + combine_14 = [full_1, full_2, full_3, multiply_22] + + # pd_op.stack: (4xi64) <- ([xi64, xi64, xi64, xi64]) + stack_0 = paddle._C_ops.stack(combine_14, 0) + del combine_14 + + # pd_op.reshape: (-1x4x17x-1xf32) <- (-1x68x-1x-1xf32, 4xi64) + reshape_8 = paddle._C_ops.reshape(add_59, stack_0) + del add_59, stack_0 + + # pd_op.transpose: (-1x17x-1x4xf32) <- (-1x4x17x-1xf32) + transpose_0 = paddle._C_ops.transpose(reshape_8, [0, 2, 3, 1]) + del reshape_8 + + # pd_op.softmax: (-1x17x-1x4xf32) <- (-1x17x-1x4xf32) + softmax_0 = paddle._C_ops.softmax(transpose_0, 1) + del transpose_0 + + # pd_op.conv2d: (-1x1x-1x4xf32) <- (-1x17x-1x4xf32, 1x17x1x1xf32) + conv2d_148 = paddle._C_ops.conv2d( + softmax_0, parameter_36, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del softmax_0 + + # pd_op.squeeze: (-1x-1x4xf32) <- (-1x1x-1x4xf32, 1xi64) + squeeze_0 = paddle._C_ops.squeeze(conv2d_148, full_int_array_6) + del conv2d_148 + + # pd_op.sigmoid: (-1x4x-1x-1xf32) <- (-1x4x-1x-1xf32) + sigmoid_2 = paddle._C_ops.sigmoid(add_57) + del add_57 + + # builtin.combine: ([xi64, xi64, xi64]) <- (xi64, xi64, xi64) + combine_15 = [full_1, full_2, multiply_22] + del multiply_22 + + # pd_op.stack: (3xi64) <- ([xi64, xi64, xi64]) + stack_1 = paddle._C_ops.stack(combine_15, 0) + del combine_15 + + # pd_op.reshape: (-1x4x-1xf32) <- (-1x4x-1x-1xf32, 3xi64) + reshape_9 = paddle._C_ops.reshape(sigmoid_2, stack_1) + del sigmoid_2, stack_1 + + # pd_op.shape64: (4xi64) <- (-1x384x-1x-1xf32) + shape64_3 = paddle._C_ops.shape64(swish_94) + + # pd_op.slice: (xi64) <- (4xi64, 1xi64, 1xi64) + slice_3 = paddle._C_ops.slice( + shape64_3, [0], full_int_array_5, full_int_array_6, [1], [0] + ) + del shape64_3 + + # pd_op.shape64: (4xi64) <- (-1x384x-1x-1xf32) + shape64_4 = paddle._C_ops.shape64(swish_94) + + # pd_op.slice: (xi64) <- (4xi64, 1xi64, 1xi64) + slice_4 = paddle._C_ops.slice( + shape64_4, [0], full_int_array_7, full_int_array_8, [1], [0] + ) + del shape64_4 + + # pd_op.shape64: (4xi64) <- (-1x384x-1x-1xf32) + shape64_5 = paddle._C_ops.shape64(swish_94) + + # pd_op.slice: (xi64) <- (4xi64, 1xi64, 1xi64) + slice_5 = paddle._C_ops.slice( + shape64_5, [0], full_int_array_8, full_int_array_9, [1], [0] + ) + del shape64_5 + + # pd_op.multiply: (xi64) <- (xi64, xi64) + multiply_25 = paddle._C_ops.multiply(slice_4, slice_5) + del slice_4, slice_5 + + # pd_op.pool2d: (-1x384x1x1xf32) <- (-1x384x-1x-1xf32, 2xi64) + pool2d_4 = paddle._C_ops.pool2d( + swish_94, + full_int_array_10, + [1, 1], + [0, 0], + False, + True, + "NCHW", + "avg", + False, + True, + "EXPLICIT", + ) + + # pd_op.conv2d: (-1x384x1x1xf32) <- (-1x384x1x1xf32, 384x384x1x1xf32) + conv2d_149 = paddle._C_ops.conv2d( + pool2d_4, parameter_35, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_35 + + # pd_op.reshape: (1x384x1x1xf32) <- (384xf32, 4xi64) + reshape_10 = paddle._C_ops.reshape(parameter_34, full_int_array_1) + del parameter_34 + + # pd_op.add: (-1x384x1x1xf32) <- (-1x384x1x1xf32, 1x384x1x1xf32) + add_60 = paddle._C_ops.add(conv2d_149, reshape_10) + del conv2d_149, reshape_10 + + # pd_op.sigmoid: (-1x384x1x1xf32) <- (-1x384x1x1xf32) + sigmoid_3 = paddle._C_ops.sigmoid(add_60) + del add_60 + + # pd_op.multiply: (-1x384x-1x-1xf32) <- (-1x384x-1x-1xf32, -1x384x1x1xf32) + multiply_26 = paddle._C_ops.multiply(swish_94, sigmoid_3) + del sigmoid_3 + + # pd_op.conv2d: (-1x384x-1x-1xf32) <- (-1x384x-1x-1xf32, 384x384x1x1xf32) + conv2d_150 = paddle._C_ops.conv2d( + multiply_26, parameter_33, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del multiply_26, parameter_33 + + # pd_op.batch_norm_: (-1x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (-1x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__840, + batch_norm__841, + batch_norm__842, + batch_norm__843, + batch_norm__844, + batch_norm__845, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_150, + parameter_32, + parameter_31, + parameter_30, + parameter_29, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_150, parameter_29, parameter_30, parameter_31, parameter_32 + + # pd_op.swish: (-1x384x-1x-1xf32) <- (-1x384x-1x-1xf32) + swish_107 = paddle._C_ops.swish(batch_norm__840) + del batch_norm__840 + + # pd_op.add: (-1x384x-1x-1xf32) <- (-1x384x-1x-1xf32, -1x384x-1x-1xf32) + add_61 = paddle._C_ops.add(swish_107, swish_94) + del swish_107 + + # pd_op.conv2d: (-1x4x-1x-1xf32) <- (-1x384x-1x-1xf32, 4x384x3x3xf32) + conv2d_151 = paddle._C_ops.conv2d( + add_61, parameter_28, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del add_61, parameter_28 + + # pd_op.reshape: (1x4x1x1xf32) <- (4xf32, 4xi64) + reshape_11 = paddle._C_ops.reshape(parameter_27, full_int_array_1) + del parameter_27 + + # pd_op.add: (-1x4x-1x-1xf32) <- (-1x4x-1x-1xf32, 1x4x1x1xf32) + add_62 = paddle._C_ops.add(conv2d_151, reshape_11) + del conv2d_151, reshape_11 + + # pd_op.conv2d: (-1x384x1x1xf32) <- (-1x384x1x1xf32, 384x384x1x1xf32) + conv2d_152 = paddle._C_ops.conv2d( + pool2d_4, parameter_26, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_26, pool2d_4 + + # pd_op.reshape: (1x384x1x1xf32) <- (384xf32, 4xi64) + reshape_12 = paddle._C_ops.reshape(parameter_25, full_int_array_1) + del parameter_25 + + # pd_op.add: (-1x384x1x1xf32) <- (-1x384x1x1xf32, 1x384x1x1xf32) + add_63 = paddle._C_ops.add(conv2d_152, reshape_12) + del conv2d_152, reshape_12 + + # pd_op.sigmoid: (-1x384x1x1xf32) <- (-1x384x1x1xf32) + sigmoid_4 = paddle._C_ops.sigmoid(add_63) + del add_63 + + # pd_op.multiply: (-1x384x-1x-1xf32) <- (-1x384x-1x-1xf32, -1x384x1x1xf32) + multiply_27 = paddle._C_ops.multiply(swish_94, sigmoid_4) + del sigmoid_4, swish_94 + + # pd_op.conv2d: (-1x384x-1x-1xf32) <- (-1x384x-1x-1xf32, 384x384x1x1xf32) + conv2d_153 = paddle._C_ops.conv2d( + multiply_27, parameter_24, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del multiply_27, parameter_24 + + # pd_op.batch_norm_: (-1x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (-1x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__846, + batch_norm__847, + batch_norm__848, + batch_norm__849, + batch_norm__850, + batch_norm__851, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_153, + parameter_23, + parameter_22, + parameter_21, + parameter_20, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_153, parameter_20, parameter_21, parameter_22, parameter_23 + + # pd_op.swish: (-1x384x-1x-1xf32) <- (-1x384x-1x-1xf32) + swish_108 = paddle._C_ops.swish(batch_norm__846) + del batch_norm__846 + + # pd_op.conv2d: (-1x68x-1x-1xf32) <- (-1x384x-1x-1xf32, 68x384x3x3xf32) + conv2d_154 = paddle._C_ops.conv2d( + swish_108, parameter_19, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_19, swish_108 + + # pd_op.reshape: (1x68x1x1xf32) <- (68xf32, 4xi64) + reshape_13 = paddle._C_ops.reshape(parameter_18, full_int_array_1) + del parameter_18 + + # pd_op.add: (-1x68x-1x-1xf32) <- (-1x68x-1x-1xf32, 1x68x1x1xf32) + add_64 = paddle._C_ops.add(conv2d_154, reshape_13) + del conv2d_154, reshape_13 + + # builtin.combine: ([xi64, xi64, xi64, xi64]) <- (xi64, xi64, xi64, xi64) + combine_16 = [full_1, full_2, full_3, multiply_25] + + # pd_op.stack: (4xi64) <- ([xi64, xi64, xi64, xi64]) + stack_2 = paddle._C_ops.stack(combine_16, 0) + del combine_16 + + # pd_op.reshape: (-1x4x17x-1xf32) <- (-1x68x-1x-1xf32, 4xi64) + reshape_14 = paddle._C_ops.reshape(add_64, stack_2) + del add_64, stack_2 + + # pd_op.transpose: (-1x17x-1x4xf32) <- (-1x4x17x-1xf32) + transpose_1 = paddle._C_ops.transpose(reshape_14, [0, 2, 3, 1]) + del reshape_14 + + # pd_op.softmax: (-1x17x-1x4xf32) <- (-1x17x-1x4xf32) + softmax_1 = paddle._C_ops.softmax(transpose_1, 1) + del transpose_1 + + # pd_op.conv2d: (-1x1x-1x4xf32) <- (-1x17x-1x4xf32, 1x17x1x1xf32) + conv2d_155 = paddle._C_ops.conv2d( + softmax_1, parameter_36, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del softmax_1 + + # pd_op.squeeze: (-1x-1x4xf32) <- (-1x1x-1x4xf32, 1xi64) + squeeze_1 = paddle._C_ops.squeeze(conv2d_155, full_int_array_6) + del conv2d_155 + + # pd_op.sigmoid: (-1x4x-1x-1xf32) <- (-1x4x-1x-1xf32) + sigmoid_5 = paddle._C_ops.sigmoid(add_62) + del add_62 + + # builtin.combine: ([xi64, xi64, xi64]) <- (xi64, xi64, xi64) + combine_17 = [full_1, full_2, multiply_25] + del multiply_25 + + # pd_op.stack: (3xi64) <- ([xi64, xi64, xi64]) + stack_3 = paddle._C_ops.stack(combine_17, 0) + del combine_17 + + # pd_op.reshape: (-1x4x-1xf32) <- (-1x4x-1x-1xf32, 3xi64) + reshape_15 = paddle._C_ops.reshape(sigmoid_5, stack_3) + del sigmoid_5, stack_3 + + # pd_op.shape64: (4xi64) <- (-1x192x-1x-1xf32) + shape64_6 = paddle._C_ops.shape64(swish_84) + + # pd_op.slice: (xi64) <- (4xi64, 1xi64, 1xi64) + slice_6 = paddle._C_ops.slice( + shape64_6, [0], full_int_array_5, full_int_array_6, [1], [0] + ) + del full_int_array_5, shape64_6 + + # pd_op.shape64: (4xi64) <- (-1x192x-1x-1xf32) + shape64_7 = paddle._C_ops.shape64(swish_84) + + # pd_op.slice: (xi64) <- (4xi64, 1xi64, 1xi64) + slice_7 = paddle._C_ops.slice( + shape64_7, [0], full_int_array_7, full_int_array_8, [1], [0] + ) + del full_int_array_7, shape64_7 + + # pd_op.shape64: (4xi64) <- (-1x192x-1x-1xf32) + shape64_8 = paddle._C_ops.shape64(swish_84) + + # pd_op.slice: (xi64) <- (4xi64, 1xi64, 1xi64) + slice_8 = paddle._C_ops.slice( + shape64_8, [0], full_int_array_8, full_int_array_9, [1], [0] + ) + del full_int_array_8, full_int_array_9, shape64_8 + + # pd_op.multiply: (xi64) <- (xi64, xi64) + multiply_28 = paddle._C_ops.multiply(slice_7, slice_8) + del slice_7, slice_8 + + # pd_op.pool2d: (-1x192x1x1xf32) <- (-1x192x-1x-1xf32, 2xi64) + pool2d_5 = paddle._C_ops.pool2d( + swish_84, + full_int_array_10, + [1, 1], + [0, 0], + False, + True, + "NCHW", + "avg", + False, + True, + "EXPLICIT", + ) + del full_int_array_10 + + # pd_op.conv2d: (-1x192x1x1xf32) <- (-1x192x1x1xf32, 192x192x1x1xf32) + conv2d_156 = paddle._C_ops.conv2d( + pool2d_5, parameter_17, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_17 + + # pd_op.reshape: (1x192x1x1xf32) <- (192xf32, 4xi64) + reshape_16 = paddle._C_ops.reshape(parameter_16, full_int_array_1) + del parameter_16 + + # pd_op.add: (-1x192x1x1xf32) <- (-1x192x1x1xf32, 1x192x1x1xf32) + add_65 = paddle._C_ops.add(conv2d_156, reshape_16) + del conv2d_156, reshape_16 + + # pd_op.sigmoid: (-1x192x1x1xf32) <- (-1x192x1x1xf32) + sigmoid_6 = paddle._C_ops.sigmoid(add_65) + del add_65 + + # pd_op.multiply: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32, -1x192x1x1xf32) + multiply_29 = paddle._C_ops.multiply(swish_84, sigmoid_6) + del sigmoid_6 + + # pd_op.conv2d: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32, 192x192x1x1xf32) + conv2d_157 = paddle._C_ops.conv2d( + multiply_29, parameter_15, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del multiply_29, parameter_15 + + # pd_op.batch_norm_: (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__852, + batch_norm__853, + batch_norm__854, + batch_norm__855, + batch_norm__856, + batch_norm__857, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_157, + parameter_14, + parameter_13, + parameter_12, + parameter_11, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_157, parameter_11, parameter_12, parameter_13, parameter_14 + + # pd_op.swish: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32) + swish_109 = paddle._C_ops.swish(batch_norm__852) + del batch_norm__852 + + # pd_op.add: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32, -1x192x-1x-1xf32) + add_66 = paddle._C_ops.add(swish_109, swish_84) + del swish_109 + + # pd_op.conv2d: (-1x4x-1x-1xf32) <- (-1x192x-1x-1xf32, 4x192x3x3xf32) + conv2d_158 = paddle._C_ops.conv2d( + add_66, parameter_10, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del add_66, parameter_10 + + # pd_op.reshape: (1x4x1x1xf32) <- (4xf32, 4xi64) + reshape_17 = paddle._C_ops.reshape(parameter_9, full_int_array_1) + del parameter_9 + + # pd_op.add: (-1x4x-1x-1xf32) <- (-1x4x-1x-1xf32, 1x4x1x1xf32) + add_67 = paddle._C_ops.add(conv2d_158, reshape_17) + del conv2d_158, reshape_17 + + # pd_op.conv2d: (-1x192x1x1xf32) <- (-1x192x1x1xf32, 192x192x1x1xf32) + conv2d_159 = paddle._C_ops.conv2d( + pool2d_5, parameter_8, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_8, pool2d_5 + + # pd_op.reshape: (1x192x1x1xf32) <- (192xf32, 4xi64) + reshape_18 = paddle._C_ops.reshape(parameter_7, full_int_array_1) + del parameter_7 + + # pd_op.add: (-1x192x1x1xf32) <- (-1x192x1x1xf32, 1x192x1x1xf32) + add_68 = paddle._C_ops.add(conv2d_159, reshape_18) + del conv2d_159, reshape_18 + + # pd_op.sigmoid: (-1x192x1x1xf32) <- (-1x192x1x1xf32) + sigmoid_7 = paddle._C_ops.sigmoid(add_68) + del add_68 + + # pd_op.multiply: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32, -1x192x1x1xf32) + multiply_30 = paddle._C_ops.multiply(swish_84, sigmoid_7) + del sigmoid_7, swish_84 + + # pd_op.conv2d: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32, 192x192x1x1xf32) + conv2d_160 = paddle._C_ops.conv2d( + multiply_30, parameter_6, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del multiply_30, parameter_6 + + # pd_op.batch_norm_: (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__858, + batch_norm__859, + batch_norm__860, + batch_norm__861, + batch_norm__862, + batch_norm__863, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_160, + parameter_5, + parameter_4, + parameter_3, + parameter_2, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_160, parameter_2, parameter_3, parameter_4, parameter_5 + + # pd_op.swish: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32) + swish_110 = paddle._C_ops.swish(batch_norm__858) + del batch_norm__858 + + # pd_op.conv2d: (-1x68x-1x-1xf32) <- (-1x192x-1x-1xf32, 68x192x3x3xf32) + conv2d_161 = paddle._C_ops.conv2d( + swish_110, parameter_1, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_1, swish_110 + + # pd_op.reshape: (1x68x1x1xf32) <- (68xf32, 4xi64) + reshape_19 = paddle._C_ops.reshape(parameter_0, full_int_array_1) + del full_int_array_1, parameter_0 + + # pd_op.add: (-1x68x-1x-1xf32) <- (-1x68x-1x-1xf32, 1x68x1x1xf32) + add_69 = paddle._C_ops.add(conv2d_161, reshape_19) + del conv2d_161, reshape_19 + + # builtin.combine: ([xi64, xi64, xi64, xi64]) <- (xi64, xi64, xi64, xi64) + combine_18 = [full_1, full_2, full_3, multiply_28] + del full_3 + + # pd_op.stack: (4xi64) <- ([xi64, xi64, xi64, xi64]) + stack_4 = paddle._C_ops.stack(combine_18, 0) + del combine_18 + + # pd_op.reshape: (-1x4x17x-1xf32) <- (-1x68x-1x-1xf32, 4xi64) + reshape_20 = paddle._C_ops.reshape(add_69, stack_4) + del add_69, stack_4 + + # pd_op.transpose: (-1x17x-1x4xf32) <- (-1x4x17x-1xf32) + transpose_2 = paddle._C_ops.transpose(reshape_20, [0, 2, 3, 1]) + del reshape_20 + + # pd_op.softmax: (-1x17x-1x4xf32) <- (-1x17x-1x4xf32) + softmax_2 = paddle._C_ops.softmax(transpose_2, 1) + del transpose_2 + + # pd_op.conv2d: (-1x1x-1x4xf32) <- (-1x17x-1x4xf32, 1x17x1x1xf32) + conv2d_162 = paddle._C_ops.conv2d( + softmax_2, parameter_36, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_36, softmax_2 + + # pd_op.squeeze: (-1x-1x4xf32) <- (-1x1x-1x4xf32, 1xi64) + squeeze_2 = paddle._C_ops.squeeze(conv2d_162, full_int_array_6) + del conv2d_162, full_int_array_6 + + # pd_op.sigmoid: (-1x4x-1x-1xf32) <- (-1x4x-1x-1xf32) + sigmoid_8 = paddle._C_ops.sigmoid(add_67) + del add_67 + + # builtin.combine: ([xi64, xi64, xi64]) <- (xi64, xi64, xi64) + combine_19 = [full_1, full_2, multiply_28] + del full_1, full_2, multiply_28 + + # pd_op.stack: (3xi64) <- ([xi64, xi64, xi64]) + stack_5 = paddle._C_ops.stack(combine_19, 0) + del combine_19 + + # pd_op.reshape: (-1x4x-1xf32) <- (-1x4x-1x-1xf32, 3xi64) + reshape_21 = paddle._C_ops.reshape(sigmoid_8, stack_5) + del sigmoid_8, stack_5 + + # pd_op.full: (1xi32) <- () + full_4 = paddle._C_ops.full( + [1], float("-1"), paddle.int32, paddle.core.CPUPlace() + ) + + # builtin.combine: ([-1x4x-1xf32, -1x4x-1xf32, -1x4x-1xf32]) <- (-1x4x-1xf32, -1x4x-1xf32, -1x4x-1xf32) + combine_20 = [reshape_9, reshape_15, reshape_21] + del reshape_15, reshape_21, reshape_9 + + # pd_op.concat: (-1x4x-1xf32) <- ([-1x4x-1xf32, -1x4x-1xf32, -1x4x-1xf32], 1xi32) + concat_0 = paddle._C_ops.concat(combine_20, full_4) + del combine_20, full_4 + + # builtin.combine: ([-1x-1x4xf32, -1x-1x4xf32, -1x-1x4xf32]) <- (-1x-1x4xf32, -1x-1x4xf32, -1x-1x4xf32) + combine_21 = [squeeze_0, squeeze_1, squeeze_2] + del squeeze_0, squeeze_1, squeeze_2 + + # pd_op.concat: (-1x-1x4xf32) <- ([-1x-1x4xf32, -1x-1x4xf32, -1x-1x4xf32], 1xi32) + concat_1 = paddle._C_ops.concat(combine_21, full_0) + del combine_21, full_0 + + return concat_0, concat_1 diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus-L/subgraph_5/weight_meta.py b/paddle_samples/PaddleX/PP-YOLOE_plus-L/subgraph_5/weight_meta.py new file mode 100644 index 000000000..86b97343c --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE_plus-L/subgraph_5/weight_meta.py @@ -0,0 +1,8161 @@ +class Program_weight_tensor_parameter_0: + name = "parameter_0" + shape = [68] + dtype = "float32" + min_val = float("-0.00996713") + max_val = float("0.0296165") + mean = float("1.85551e-07") + std = float("0.00658747") + data = None + + +class Program_weight_tensor_parameter_1: + name = "parameter_1" + shape = [68, 192, 3, 3] + dtype = "float32" + min_val = float("-0.132365") + max_val = float("0.153351") + mean = float("5.83823e-08") + std = float("0.0069927") + data = None + + +class Program_weight_tensor_parameter_2: + name = "parameter_2" + shape = [192] + dtype = "float32" + min_val = float("-0.0440964") + max_val = float("0.204248") + mean = float("0.0505266") + std = float("0.0396607") + data = None + + +class Program_weight_tensor_parameter_3: + name = "parameter_3" + shape = [192] + dtype = "float32" + min_val = float("0.854628") + max_val = float("1.623") + mean = float("1.22222") + std = float("0.143373") + data = None + + +class Program_weight_tensor_parameter_4: + name = "parameter_4" + shape = [192] + dtype = "float32" + min_val = float("0.000124474") + max_val = float("0.00268664") + mean = float("0.000430455") + std = float("0.00032986") + data = None + + +class Program_weight_tensor_parameter_5: + name = "parameter_5" + shape = [192] + dtype = "float32" + min_val = float("-0.0352297") + max_val = float("0.0298138") + mean = float("-0.00349452") + std = float("0.0106524") + data = None + + +class Program_weight_tensor_parameter_6: + name = "parameter_6" + shape = [192, 192, 1, 1] + dtype = "float32" + min_val = float("-0.0520579") + max_val = float("0.075326") + mean = float("-0.00011553") + std = float("0.00541501") + data = None + + +class Program_weight_tensor_parameter_7: + name = "parameter_7" + shape = [192] + dtype = "float32" + min_val = float("-0.00468623") + max_val = float("0.00853988") + mean = float("3.18341e-05") + std = float("0.00259726") + data = None + + +class Program_weight_tensor_parameter_8: + name = "parameter_8" + shape = [192, 192, 1, 1] + dtype = "float32" + min_val = float("-0.00533172") + max_val = float("0.00945863") + mean = float("-9.35044e-05") + std = float("0.00138579") + data = None + + +class Program_weight_tensor_parameter_9: + name = "parameter_9" + shape = [4] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_10: + name = "parameter_10" + shape = [4, 192, 3, 3] + dtype = "float32" + min_val = float("-8.0039e-06") + max_val = float("0.00022716") + mean = float("1.07103e-05") + std = float("1.95264e-05") + data = None + + +class Program_weight_tensor_parameter_11: + name = "parameter_11" + shape = [192] + dtype = "float32" + min_val = float("-0.327873") + max_val = float("0.892505") + mean = float("0.35844") + std = float("0.270005") + data = None + + +class Program_weight_tensor_parameter_12: + name = "parameter_12" + shape = [192] + dtype = "float32" + min_val = float("1.0197") + max_val = float("1.7745") + mean = float("1.31881") + std = float("0.141386") + data = None + + +class Program_weight_tensor_parameter_13: + name = "parameter_13" + shape = [192] + dtype = "float32" + min_val = float("0.000191765") + max_val = float("0.00418178") + mean = float("0.00073364") + std = float("0.000566711") + data = None + + +class Program_weight_tensor_parameter_14: + name = "parameter_14" + shape = [192] + dtype = "float32" + min_val = float("-0.171816") + max_val = float("0.0388429") + mean = float("-0.024799") + std = float("0.0310049") + data = None + + +class Program_weight_tensor_parameter_15: + name = "parameter_15" + shape = [192, 192, 1, 1] + dtype = "float32" + min_val = float("-0.0758177") + max_val = float("0.0688388") + mean = float("-0.000505692") + std = float("0.0065208") + data = None + + +class Program_weight_tensor_parameter_16: + name = "parameter_16" + shape = [192] + dtype = "float32" + min_val = float("-0.0046322") + max_val = float("0.00955243") + mean = float("-0.0001086") + std = float("0.00181395") + data = None + + +class Program_weight_tensor_parameter_17: + name = "parameter_17" + shape = [192, 192, 1, 1] + dtype = "float32" + min_val = float("-0.0166797") + max_val = float("0.0150515") + mean = float("-1.40907e-05") + std = float("0.00152728") + data = None + + +class Program_weight_tensor_parameter_18: + name = "parameter_18" + shape = [68] + dtype = "float32" + min_val = float("-0.00414429") + max_val = float("0.0248245") + mean = float("1.70752e-07") + std = float("0.00516597") + data = None + + +class Program_weight_tensor_parameter_19: + name = "parameter_19" + shape = [68, 384, 3, 3] + dtype = "float32" + min_val = float("-0.0881466") + max_val = float("0.115817") + mean = float("3.10683e-08") + std = float("0.00468137") + data = None + + +class Program_weight_tensor_parameter_20: + name = "parameter_20" + shape = [384] + dtype = "float32" + min_val = float("-0.00505458") + max_val = float("0.0679229") + mean = float("0.025356") + std = float("0.0129557") + data = None + + +class Program_weight_tensor_parameter_21: + name = "parameter_21" + shape = [384] + dtype = "float32" + min_val = float("1.00103") + max_val = float("1.23541") + mean = float("1.10699") + std = float("0.0406781") + data = None + + +class Program_weight_tensor_parameter_22: + name = "parameter_22" + shape = [384] + dtype = "float32" + min_val = float("6.77016e-05") + max_val = float("0.00289487") + mean = float("0.00031114") + std = float("0.00031604") + data = None + + +class Program_weight_tensor_parameter_23: + name = "parameter_23" + shape = [384] + dtype = "float32" + min_val = float("-0.0401932") + max_val = float("0.0131579") + mean = float("-0.00637696") + std = float("0.00739159") + data = None + + +class Program_weight_tensor_parameter_24: + name = "parameter_24" + shape = [384, 384, 1, 1] + dtype = "float32" + min_val = float("-0.0495062") + max_val = float("0.06518") + mean = float("-8.72216e-05") + std = float("0.00262797") + data = None + + +class Program_weight_tensor_parameter_25: + name = "parameter_25" + shape = [384] + dtype = "float32" + min_val = float("-0.00259419") + max_val = float("0.00557747") + mean = float("9.36863e-05") + std = float("0.00147316") + data = None + + +class Program_weight_tensor_parameter_26: + name = "parameter_26" + shape = [384, 384, 1, 1] + dtype = "float32" + min_val = float("-0.00175705") + max_val = float("0.00490764") + mean = float("1.06471e-05") + std = float("0.000587436") + data = None + + +class Program_weight_tensor_parameter_27: + name = "parameter_27" + shape = [4] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_28: + name = "parameter_28" + shape = [4, 384, 3, 3] + dtype = "float32" + min_val = float("-2.18712e-06") + max_val = float("5.28604e-05") + mean = float("1.78963e-06") + std = float("3.72888e-06") + data = None + + +class Program_weight_tensor_parameter_29: + name = "parameter_29" + shape = [384] + dtype = "float32" + min_val = float("-0.150383") + max_val = float("0.452458") + mean = float("0.229981") + std = float("0.0998846") + data = None + + +class Program_weight_tensor_parameter_30: + name = "parameter_30" + shape = [384] + dtype = "float32" + min_val = float("1.00536") + max_val = float("1.40175") + mean = float("1.18904") + std = float("0.0599888") + data = None + + +class Program_weight_tensor_parameter_31: + name = "parameter_31" + shape = [384] + dtype = "float32" + min_val = float("0.000159485") + max_val = float("0.0036339") + mean = float("0.000711029") + std = float("0.000576938") + data = None + + +class Program_weight_tensor_parameter_32: + name = "parameter_32" + shape = [384] + dtype = "float32" + min_val = float("-0.108726") + max_val = float("0.0565601") + mean = float("-0.0264711") + std = float("0.0221625") + data = None + + +class Program_weight_tensor_parameter_33: + name = "parameter_33" + shape = [384, 384, 1, 1] + dtype = "float32" + min_val = float("-0.0481454") + max_val = float("0.0448686") + mean = float("-0.000359909") + std = float("0.00296123") + data = None + + +class Program_weight_tensor_parameter_34: + name = "parameter_34" + shape = [384] + dtype = "float32" + min_val = float("-0.00204396") + max_val = float("0.0090588") + mean = float("-3.67064e-06") + std = float("0.000961893") + data = None + + +class Program_weight_tensor_parameter_35: + name = "parameter_35" + shape = [384, 384, 1, 1] + dtype = "float32" + min_val = float("-0.00523821") + max_val = float("0.00886045") + mean = float("-4.69801e-06") + std = float("0.000620937") + data = None + + +class Program_weight_tensor_parameter_36: + name = "parameter_36" + shape = [1, 17, 1, 1] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_37: + name = "parameter_37" + shape = [68] + dtype = "float32" + min_val = float("-0.00290911") + max_val = float("0.0102058") + mean = float("1.30967e-07") + std = float("0.00299753") + data = None + + +class Program_weight_tensor_parameter_38: + name = "parameter_38" + shape = [68, 768, 3, 3] + dtype = "float32" + min_val = float("-0.0412324") + max_val = float("0.0740684") + mean = float("1.4179e-08") + std = float("0.00274764") + data = None + + +class Program_weight_tensor_parameter_39: + name = "parameter_39" + shape = [768] + dtype = "float32" + min_val = float("-0.0142152") + max_val = float("0.0471953") + mean = float("0.011051") + std = float("0.0102595") + data = None + + +class Program_weight_tensor_parameter_40: + name = "parameter_40" + shape = [768] + dtype = "float32" + min_val = float("1.01074") + max_val = float("1.20195") + mean = float("1.0671") + std = float("0.0223299") + data = None + + +class Program_weight_tensor_parameter_41: + name = "parameter_41" + shape = [768] + dtype = "float32" + min_val = float("3.69016e-05") + max_val = float("0.00134973") + mean = float("0.000155962") + std = float("0.00011097") + data = None + + +class Program_weight_tensor_parameter_42: + name = "parameter_42" + shape = [768] + dtype = "float32" + min_val = float("-0.0238644") + max_val = float("0.00811534") + mean = float("-0.00384896") + std = float("0.00342466") + data = None + + +class Program_weight_tensor_parameter_43: + name = "parameter_43" + shape = [768, 768, 1, 1] + dtype = "float32" + min_val = float("-0.035485") + max_val = float("0.0312209") + mean = float("-3.51706e-05") + std = float("0.00119332") + data = None + + +class Program_weight_tensor_parameter_44: + name = "parameter_44" + shape = [768] + dtype = "float32" + min_val = float("-0.00351526") + max_val = float("0.00217768") + mean = float("0.00010496") + std = float("0.000670444") + data = None + + +class Program_weight_tensor_parameter_45: + name = "parameter_45" + shape = [768, 768, 1, 1] + dtype = "float32" + min_val = float("-0.00237088") + max_val = float("0.00289222") + mean = float("2.74817e-05") + std = float("0.000210228") + data = None + + +class Program_weight_tensor_parameter_46: + name = "parameter_46" + shape = [4] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_47: + name = "parameter_47" + shape = [4, 768, 3, 3] + dtype = "float32" + min_val = float("-1.14922e-05") + max_val = float("0.000123784") + mean = float("4.64809e-06") + std = float("9.90396e-06") + data = None + + +class Program_weight_tensor_parameter_48: + name = "parameter_48" + shape = [768] + dtype = "float32" + min_val = float("-0.109578") + max_val = float("0.200768") + mean = float("0.093855") + std = float("0.0421135") + data = None + + +class Program_weight_tensor_parameter_49: + name = "parameter_49" + shape = [768] + dtype = "float32" + min_val = float("1.00954") + max_val = float("1.25402") + mean = float("1.08094") + std = float("0.0259851") + data = None + + +class Program_weight_tensor_parameter_50: + name = "parameter_50" + shape = [768] + dtype = "float32" + min_val = float("9.71614e-05") + max_val = float("0.00324689") + mean = float("0.000620876") + std = float("0.000449642") + data = None + + +class Program_weight_tensor_parameter_51: + name = "parameter_51" + shape = [768] + dtype = "float32" + min_val = float("-0.0505123") + max_val = float("0.0945691") + mean = float("-0.0192848") + std = float("0.0111623") + data = None + + +class Program_weight_tensor_parameter_52: + name = "parameter_52" + shape = [768, 768, 1, 1] + dtype = "float32" + min_val = float("-0.0486782") + max_val = float("0.031813") + mean = float("-0.000184113") + std = float("0.00129984") + data = None + + +class Program_weight_tensor_parameter_53: + name = "parameter_53" + shape = [768] + dtype = "float32" + min_val = float("-0.00524145") + max_val = float("0.00429624") + mean = float("1.59716e-05") + std = float("0.00044354") + data = None + + +class Program_weight_tensor_parameter_54: + name = "parameter_54" + shape = [768, 768, 1, 1] + dtype = "float32" + min_val = float("-0.0191194") + max_val = float("0.0353741") + mean = float("5.41618e-06") + std = float("0.000293837") + data = None + + +class Program_weight_tensor_parameter_55: + name = "parameter_55" + shape = [768] + dtype = "float32" + min_val = float("-0.175306") + max_val = float("0.211594") + mean = float("0.0847743") + std = float("0.0563497") + data = None + + +class Program_weight_tensor_parameter_56: + name = "parameter_56" + shape = [768] + dtype = "float32" + min_val = float("0.939778") + max_val = float("1.2976") + mean = float("1.06639") + std = float("0.0311293") + data = None + + +class Program_weight_tensor_parameter_57: + name = "parameter_57" + shape = [768] + dtype = "float32" + min_val = float("0.00100313") + max_val = float("0.03162") + mean = float("0.00343704") + std = float("0.00260456") + data = None + + +class Program_weight_tensor_parameter_58: + name = "parameter_58" + shape = [768] + dtype = "float32" + min_val = float("-0.150191") + max_val = float("0.100073") + mean = float("-0.0236812") + std = float("0.0231298") + data = None + + +class Program_weight_tensor_parameter_59: + name = "parameter_59" + shape = [768, 768, 1, 1] + dtype = "float32" + min_val = float("-0.048459") + max_val = float("0.0301639") + mean = float("-0.000116837") + std = float("0.00198315") + data = None + + +class Program_weight_tensor_parameter_60: + name = "parameter_60" + shape = [384] + dtype = "float32" + min_val = float("-0.138639") + max_val = float("0.0301922") + mean = float("-0.0181041") + std = float("0.0228687") + data = None + + +class Program_weight_tensor_parameter_61: + name = "parameter_61" + shape = [384] + dtype = "float32" + min_val = float("0.949936") + max_val = float("1.04659") + mean = float("0.989067") + std = float("0.0104758") + data = None + + +class Program_weight_tensor_parameter_62: + name = "parameter_62" + shape = [384] + dtype = "float32" + min_val = float("0.000424965") + max_val = float("0.00769354") + mean = float("0.00208629") + std = float("0.00118364") + data = None + + +class Program_weight_tensor_parameter_63: + name = "parameter_63" + shape = [384] + dtype = "float32" + min_val = float("-0.0576822") + max_val = float("0.0495397") + mean = float("0.000801726") + std = float("0.0154888") + data = None + + +class Program_weight_tensor_parameter_64: + name = "parameter_64" + shape = [384, 384, 1, 1] + dtype = "float32" + min_val = float("-0.030027") + max_val = float("0.0156948") + mean = float("1.51312e-07") + std = float("0.00151823") + data = None + + +class Program_weight_tensor_parameter_65: + name = "parameter_65" + shape = [384] + dtype = "float32" + min_val = float("-0.138639") + max_val = float("0.0301921") + mean = float("-0.0181041") + std = float("0.0228687") + data = None + + +class Program_weight_tensor_parameter_66: + name = "parameter_66" + shape = [384] + dtype = "float32" + min_val = float("0.969953") + max_val = float("1.13144") + mean = float("1.01743") + std = float("0.0170265") + data = None + + +class Program_weight_tensor_parameter_67: + name = "parameter_67" + shape = [384] + dtype = "float32" + min_val = float("0.00123184") + max_val = float("0.0151822") + mean = float("0.00352376") + std = float("0.00156689") + data = None + + +class Program_weight_tensor_parameter_68: + name = "parameter_68" + shape = [384] + dtype = "float32" + min_val = float("-0.113443") + max_val = float("0.119366") + mean = float("-0.0249755") + std = float("0.0249942") + data = None + + +class Program_weight_tensor_parameter_69: + name = "parameter_69" + shape = [384, 384, 3, 3] + dtype = "float32" + min_val = float("-0.0252708") + max_val = float("0.0260981") + mean = float("-4.55984e-05") + std = float("0.00102241") + data = None + + +class Program_weight_tensor_parameter_70: + name = "parameter_70" + shape = [384] + dtype = "float32" + min_val = float("-0.167348") + max_val = float("0.0194716") + mean = float("-0.0338105") + std = float("0.0272605") + data = None + + +class Program_weight_tensor_parameter_71: + name = "parameter_71" + shape = [384] + dtype = "float32" + min_val = float("0.977422") + max_val = float("1.12785") + mean = float("1.01688") + std = float("0.0237162") + data = None + + +class Program_weight_tensor_parameter_72: + name = "parameter_72" + shape = [384] + dtype = "float32" + min_val = float("0.0030012") + max_val = float("0.0356644") + mean = float("0.00889859") + std = float("0.00464026") + data = None + + +class Program_weight_tensor_parameter_73: + name = "parameter_73" + shape = [384] + dtype = "float32" + min_val = float("-0.187143") + max_val = float("0.21034") + mean = float("-0.0263546") + std = float("0.034034") + data = None + + +class Program_weight_tensor_parameter_74: + name = "parameter_74" + shape = [384, 384, 3, 3] + dtype = "float32" + min_val = float("-0.0247522") + max_val = float("0.0417196") + mean = float("-4.63696e-05") + std = float("0.00115897") + data = None + + +class Program_weight_tensor_parameter_75: + name = "parameter_75" + shape = [384] + dtype = "float32" + min_val = float("-0.102213") + max_val = float("0.0125531") + mean = float("-0.0345887") + std = float("0.0187949") + data = None + + +class Program_weight_tensor_parameter_76: + name = "parameter_76" + shape = [384] + dtype = "float32" + min_val = float("0.949338") + max_val = float("1.04659") + mean = float("0.991003") + std = float("0.00965035") + data = None + + +class Program_weight_tensor_parameter_77: + name = "parameter_77" + shape = [384] + dtype = "float32" + min_val = float("0.000650704") + max_val = float("0.00768371") + mean = float("0.00277456") + std = float("0.00139097") + data = None + + +class Program_weight_tensor_parameter_78: + name = "parameter_78" + shape = [384] + dtype = "float32" + min_val = float("-0.068838") + max_val = float("0.0330406") + mean = float("-0.00344055") + std = float("0.0126807") + data = None + + +class Program_weight_tensor_parameter_79: + name = "parameter_79" + shape = [384, 384, 1, 1] + dtype = "float32" + min_val = float("-0.0239432") + max_val = float("0.0196078") + mean = float("-6.64687e-05") + std = float("0.00158521") + data = None + + +class Program_weight_tensor_parameter_80: + name = "parameter_80" + shape = [384] + dtype = "float32" + min_val = float("-0.102213") + max_val = float("0.0125531") + mean = float("-0.0345887") + std = float("0.0187949") + data = None + + +class Program_weight_tensor_parameter_81: + name = "parameter_81" + shape = [384] + dtype = "float32" + min_val = float("0.961887") + max_val = float("1.10634") + mean = float("1.01796") + std = float("0.0175095") + data = None + + +class Program_weight_tensor_parameter_82: + name = "parameter_82" + shape = [384] + dtype = "float32" + min_val = float("0.0018589") + max_val = float("0.0216184") + mean = float("0.00498132") + std = float("0.00238917") + data = None + + +class Program_weight_tensor_parameter_83: + name = "parameter_83" + shape = [384] + dtype = "float32" + min_val = float("-0.113641") + max_val = float("0.132971") + mean = float("-0.0350247") + std = float("0.0275334") + data = None + + +class Program_weight_tensor_parameter_84: + name = "parameter_84" + shape = [384, 384, 3, 3] + dtype = "float32" + min_val = float("-0.0306037") + max_val = float("0.039106") + mean = float("-6.35244e-05") + std = float("0.00105171") + data = None + + +class Program_weight_tensor_parameter_85: + name = "parameter_85" + shape = [384] + dtype = "float32" + min_val = float("-0.087894") + max_val = float("0.0187151") + mean = float("-0.035007") + std = float("0.0190606") + data = None + + +class Program_weight_tensor_parameter_86: + name = "parameter_86" + shape = [384] + dtype = "float32" + min_val = float("0.937686") + max_val = float("1.1153") + mean = float("1.01357") + std = float("0.0260763") + data = None + + +class Program_weight_tensor_parameter_87: + name = "parameter_87" + shape = [384] + dtype = "float32" + min_val = float("0.00329362") + max_val = float("0.0370326") + mean = float("0.00917159") + std = float("0.004535") + data = None + + +class Program_weight_tensor_parameter_88: + name = "parameter_88" + shape = [384] + dtype = "float32" + min_val = float("-0.116448") + max_val = float("0.0690271") + mean = float("-0.0174781") + std = float("0.0305536") + data = None + + +class Program_weight_tensor_parameter_89: + name = "parameter_89" + shape = [384, 384, 3, 3] + dtype = "float32" + min_val = float("-0.0356642") + max_val = float("0.0369726") + mean = float("-4.09478e-05") + std = float("0.0012155") + data = None + + +class Program_weight_tensor_parameter_90: + name = "parameter_90" + shape = [384] + dtype = "float32" + min_val = float("-0.113264") + max_val = float("0.0139328") + mean = float("-0.0360276") + std = float("0.0194684") + data = None + + +class Program_weight_tensor_parameter_91: + name = "parameter_91" + shape = [384] + dtype = "float32" + min_val = float("0.932607") + max_val = float("1.02926") + mean = float("0.989413") + std = float("0.0107521") + data = None + + +class Program_weight_tensor_parameter_92: + name = "parameter_92" + shape = [384] + dtype = "float32" + min_val = float("0.000850876") + max_val = float("0.00768634") + mean = float("0.00328168") + std = float("0.00126507") + data = None + + +class Program_weight_tensor_parameter_93: + name = "parameter_93" + shape = [384] + dtype = "float32" + min_val = float("-0.0328926") + max_val = float("0.0412224") + mean = float("-0.00745687") + std = float("0.010839") + data = None + + +class Program_weight_tensor_parameter_94: + name = "parameter_94" + shape = [384, 384, 1, 1] + dtype = "float32" + min_val = float("-0.0246626") + max_val = float("0.0246294") + mean = float("-0.000134739") + std = float("0.00162451") + data = None + + +class Program_weight_tensor_parameter_95: + name = "parameter_95" + shape = [384] + dtype = "float32" + min_val = float("-0.113264") + max_val = float("0.0139328") + mean = float("-0.0360276") + std = float("0.0194684") + data = None + + +class Program_weight_tensor_parameter_96: + name = "parameter_96" + shape = [384] + dtype = "float32" + min_val = float("0.983995") + max_val = float("1.10634") + mean = float("1.02014") + std = float("0.0217389") + data = None + + +class Program_weight_tensor_parameter_97: + name = "parameter_97" + shape = [384] + dtype = "float32" + min_val = float("0.00264665") + max_val = float("0.023121") + mean = float("0.00648167") + std = float("0.00274806") + data = None + + +class Program_weight_tensor_parameter_98: + name = "parameter_98" + shape = [384] + dtype = "float32" + min_val = float("-0.123387") + max_val = float("0.0727253") + mean = float("-0.0176321") + std = float("0.0264909") + data = None + + +class Program_weight_tensor_parameter_99: + name = "parameter_99" + shape = [384, 384, 3, 3] + dtype = "float32" + min_val = float("-0.0270575") + max_val = float("0.0462384") + mean = float("-3.55445e-05") + std = float("0.00113617") + data = None + + +class Program_weight_tensor_parameter_100: + name = "parameter_100" + shape = [384] + dtype = "float32" + min_val = float("-0.104816") + max_val = float("0.0223762") + mean = float("-0.0364198") + std = float("0.0208798") + data = None + + +class Program_weight_tensor_parameter_101: + name = "parameter_101" + shape = [384] + dtype = "float32" + min_val = float("0.94874") + max_val = float("1.1165") + mean = float("1.01377") + std = float("0.0272987") + data = None + + +class Program_weight_tensor_parameter_102: + name = "parameter_102" + shape = [384] + dtype = "float32" + min_val = float("0.00343716") + max_val = float("0.0376123") + mean = float("0.0090883") + std = float("0.00464666") + data = None + + +class Program_weight_tensor_parameter_103: + name = "parameter_103" + shape = [384] + dtype = "float32" + min_val = float("-0.10852") + max_val = float("0.127486") + mean = float("-0.0306965") + std = float("0.0372253") + data = None + + +class Program_weight_tensor_parameter_104: + name = "parameter_104" + shape = [384, 384, 3, 3] + dtype = "float32" + min_val = float("-0.0198386") + max_val = float("0.030936") + mean = float("-5.54577e-05") + std = float("0.00127916") + data = None + + +class Program_weight_tensor_parameter_105: + name = "parameter_105" + shape = [384] + dtype = "float32" + min_val = float("-0.104081") + max_val = float("0.0447507") + mean = float("-0.0253876") + std = float("0.0148924") + data = None + + +class Program_weight_tensor_parameter_106: + name = "parameter_106" + shape = [384] + dtype = "float32" + min_val = float("0.976825") + max_val = float("1.08842") + mean = float("1.01085") + std = float("0.0167894") + data = None + + +class Program_weight_tensor_parameter_107: + name = "parameter_107" + shape = [384] + dtype = "float32" + min_val = float("0.00194344") + max_val = float("0.0134659") + mean = float("0.00358093") + std = float("0.00122632") + data = None + + +class Program_weight_tensor_parameter_108: + name = "parameter_108" + shape = [384] + dtype = "float32" + min_val = float("-0.0628121") + max_val = float("0.0450731") + mean = float("-0.0156591") + std = float("0.0180011") + data = None + + +class Program_weight_tensor_parameter_109: + name = "parameter_109" + shape = [384, 1152, 1, 1] + dtype = "float32" + min_val = float("-0.0512204") + max_val = float("0.054272") + mean = float("-7.40727e-05") + std = float("0.00193913") + data = None + + +class Program_weight_tensor_parameter_110: + name = "parameter_110" + shape = [384] + dtype = "float32" + min_val = float("-0.0410536") + max_val = float("0.01587") + mean = float("-0.0085433") + std = float("0.00822821") + data = None + + +class Program_weight_tensor_parameter_111: + name = "parameter_111" + shape = [384] + dtype = "float32" + min_val = float("0.964874") + max_val = float("1.05256") + mean = float("1.00968") + std = float("0.0113002") + data = None + + +class Program_weight_tensor_parameter_112: + name = "parameter_112" + shape = [384] + dtype = "float32" + min_val = float("0.00148058") + max_val = float("0.0172515") + mean = float("0.00279498") + std = float("0.00125745") + data = None + + +class Program_weight_tensor_parameter_113: + name = "parameter_113" + shape = [384] + dtype = "float32" + min_val = float("-0.0613616") + max_val = float("0.0450121") + mean = float("-0.0183066") + std = float("0.0175678") + data = None + + +class Program_weight_tensor_parameter_114: + name = "parameter_114" + shape = [384, 1152, 1, 1] + dtype = "float32" + min_val = float("-0.0416152") + max_val = float("0.034011") + mean = float("-8.95819e-05") + std = float("0.00171404") + data = None + + +class Program_weight_tensor_parameter_115: + name = "parameter_115" + shape = [384] + dtype = "float32" + min_val = float("-0.0517357") + max_val = float("0.00607691") + mean = float("-0.0160479") + std = float("0.00958104") + data = None + + +class Program_weight_tensor_parameter_116: + name = "parameter_116" + shape = [384] + dtype = "float32" + min_val = float("0.990579") + max_val = float("1.10515") + mean = float("1.02122") + std = float("0.0166168") + data = None + + +class Program_weight_tensor_parameter_117: + name = "parameter_117" + shape = [384] + dtype = "float32" + min_val = float("0.00250794") + max_val = float("0.0303554") + mean = float("0.00898847") + std = float("0.00426596") + data = None + + +class Program_weight_tensor_parameter_118: + name = "parameter_118" + shape = [384] + dtype = "float32" + min_val = float("-0.256873") + max_val = float("0.206661") + mean = float("-0.0286502") + std = float("0.0560866") + data = None + + +class Program_weight_tensor_parameter_119: + name = "parameter_119" + shape = [384, 384, 3, 3] + dtype = "float32" + min_val = float("-0.0249441") + max_val = float("0.0277039") + mean = float("-1.94354e-05") + std = float("0.00104416") + data = None + + +class Program_weight_tensor_parameter_120: + name = "parameter_120" + shape = [384] + dtype = "float32" + min_val = float("-0.222759") + max_val = float("0.492934") + mean = float("0.21931") + std = float("0.123919") + data = None + + +class Program_weight_tensor_parameter_121: + name = "parameter_121" + shape = [384] + dtype = "float32" + min_val = float("0.926931") + max_val = float("1.47785") + mean = float("1.14418") + std = float("0.0736893") + data = None + + +class Program_weight_tensor_parameter_122: + name = "parameter_122" + shape = [384] + dtype = "float32" + min_val = float("0.00264483") + max_val = float("0.0974121") + mean = float("0.00798104") + std = float("0.00654548") + data = None + + +class Program_weight_tensor_parameter_123: + name = "parameter_123" + shape = [384] + dtype = "float32" + min_val = float("-0.159565") + max_val = float("0.121315") + mean = float("-0.0255922") + std = float("0.03015") + data = None + + +class Program_weight_tensor_parameter_124: + name = "parameter_124" + shape = [384, 384, 1, 1] + dtype = "float32" + min_val = float("-0.0876303") + max_val = float("0.0602537") + mean = float("-0.000242493") + std = float("0.00453291") + data = None + + +class Program_weight_tensor_parameter_125: + name = "parameter_125" + shape = [192] + dtype = "float32" + min_val = float("-0.164436") + max_val = float("0.0462449") + mean = float("-0.0239848") + std = float("0.0389306") + data = None + + +class Program_weight_tensor_parameter_126: + name = "parameter_126" + shape = [192] + dtype = "float32" + min_val = float("0.846263") + max_val = float("1.05376") + mean = float("0.97547") + std = float("0.0236593") + data = None + + +class Program_weight_tensor_parameter_127: + name = "parameter_127" + shape = [192] + dtype = "float32" + min_val = float("0.000579722") + max_val = float("0.0224778") + mean = float("0.00388016") + std = float("0.00265473") + data = None + + +class Program_weight_tensor_parameter_128: + name = "parameter_128" + shape = [192] + dtype = "float32" + min_val = float("-0.0510712") + max_val = float("0.0759894") + mean = float("-0.00386027") + std = float("0.0154973") + data = None + + +class Program_weight_tensor_parameter_129: + name = "parameter_129" + shape = [192, 192, 1, 1] + dtype = "float32" + min_val = float("-0.050302") + max_val = float("0.0287473") + mean = float("-0.000136137") + std = float("0.00336423") + data = None + + +class Program_weight_tensor_parameter_130: + name = "parameter_130" + shape = [192] + dtype = "float32" + min_val = float("-0.164436") + max_val = float("0.0462449") + mean = float("-0.0239848") + std = float("0.0389306") + data = None + + +class Program_weight_tensor_parameter_131: + name = "parameter_131" + shape = [192] + dtype = "float32" + min_val = float("0.736017") + max_val = float("1.12247") + mean = float("1.02412") + std = float("0.0366284") + data = None + + +class Program_weight_tensor_parameter_132: + name = "parameter_132" + shape = [192] + dtype = "float32" + min_val = float("0.00276112") + max_val = float("0.0194419") + mean = float("0.0066911") + std = float("0.00265579") + data = None + + +class Program_weight_tensor_parameter_133: + name = "parameter_133" + shape = [192] + dtype = "float32" + min_val = float("-0.16004") + max_val = float("0.0998512") + mean = float("-0.0225358") + std = float("0.0339102") + data = None + + +class Program_weight_tensor_parameter_134: + name = "parameter_134" + shape = [192, 192, 3, 3] + dtype = "float32" + min_val = float("-0.0444225") + max_val = float("0.0402511") + mean = float("-7.11892e-05") + std = float("0.00226777") + data = None + + +class Program_weight_tensor_parameter_135: + name = "parameter_135" + shape = [192] + dtype = "float32" + min_val = float("-0.188768") + max_val = float("0.0421971") + mean = float("-0.0567874") + std = float("0.0481367") + data = None + + +class Program_weight_tensor_parameter_136: + name = "parameter_136" + shape = [192] + dtype = "float32" + min_val = float("0.900639") + max_val = float("1.18283") + mean = float("1.01749") + std = float("0.0479958") + data = None + + +class Program_weight_tensor_parameter_137: + name = "parameter_137" + shape = [192] + dtype = "float32" + min_val = float("0.00535257") + max_val = float("0.0763801") + mean = float("0.0170376") + std = float("0.0105864") + data = None + + +class Program_weight_tensor_parameter_138: + name = "parameter_138" + shape = [192] + dtype = "float32" + min_val = float("-0.240955") + max_val = float("0.297589") + mean = float("-0.0264239") + std = float("0.0412806") + data = None + + +class Program_weight_tensor_parameter_139: + name = "parameter_139" + shape = [192, 192, 3, 3] + dtype = "float32" + min_val = float("-0.0580625") + max_val = float("0.0640828") + mean = float("-9.47042e-05") + std = float("0.00257609") + data = None + + +class Program_weight_tensor_parameter_140: + name = "parameter_140" + shape = [192] + dtype = "float32" + min_val = float("-0.188722") + max_val = float("0.00866455") + mean = float("-0.0625057") + std = float("0.0326874") + data = None + + +class Program_weight_tensor_parameter_141: + name = "parameter_141" + shape = [192] + dtype = "float32" + min_val = float("0.925735") + max_val = float("1.04838") + mean = float("0.976103") + std = float("0.0176867") + data = None + + +class Program_weight_tensor_parameter_142: + name = "parameter_142" + shape = [192] + dtype = "float32" + min_val = float("0.00136816") + max_val = float("0.0126086") + mean = float("0.00472105") + std = float("0.00208163") + data = None + + +class Program_weight_tensor_parameter_143: + name = "parameter_143" + shape = [192] + dtype = "float32" + min_val = float("-0.0491501") + max_val = float("0.0347549") + mean = float("-0.0081364") + std = float("0.0128259") + data = None + + +class Program_weight_tensor_parameter_144: + name = "parameter_144" + shape = [192, 192, 1, 1] + dtype = "float32" + min_val = float("-0.0432741") + max_val = float("0.0265813") + mean = float("-0.0003395") + std = float("0.0033694") + data = None + + +class Program_weight_tensor_parameter_145: + name = "parameter_145" + shape = [192] + dtype = "float32" + min_val = float("-0.188722") + max_val = float("0.00866455") + mean = float("-0.0625057") + std = float("0.0326874") + data = None + + +class Program_weight_tensor_parameter_146: + name = "parameter_146" + shape = [192] + dtype = "float32" + min_val = float("0.972045") + max_val = float("1.15056") + mean = float("1.02587") + std = float("0.028794") + data = None + + +class Program_weight_tensor_parameter_147: + name = "parameter_147" + shape = [192] + dtype = "float32" + min_val = float("0.00273726") + max_val = float("0.035953") + mean = float("0.00833297") + std = float("0.004788") + data = None + + +class Program_weight_tensor_parameter_148: + name = "parameter_148" + shape = [192] + dtype = "float32" + min_val = float("-0.106149") + max_val = float("0.117302") + mean = float("-0.0227699") + std = float("0.0288669") + data = None + + +class Program_weight_tensor_parameter_149: + name = "parameter_149" + shape = [192, 192, 3, 3] + dtype = "float32" + min_val = float("-0.0431991") + max_val = float("0.0568112") + mean = float("-8.61466e-05") + std = float("0.00241602") + data = None + + +class Program_weight_tensor_parameter_150: + name = "parameter_150" + shape = [192] + dtype = "float32" + min_val = float("-0.186994") + max_val = float("0.0596707") + mean = float("-0.0739558") + std = float("0.0398551") + data = None + + +class Program_weight_tensor_parameter_151: + name = "parameter_151" + shape = [192] + dtype = "float32" + min_val = float("0.884207") + max_val = float("1.2133") + mean = float("1.0167") + std = float("0.0500698") + data = None + + +class Program_weight_tensor_parameter_152: + name = "parameter_152" + shape = [192] + dtype = "float32" + min_val = float("0.00600039") + max_val = float("0.0445964") + mean = float("0.0130705") + std = float("0.00628092") + data = None + + +class Program_weight_tensor_parameter_153: + name = "parameter_153" + shape = [192] + dtype = "float32" + min_val = float("-0.0739679") + max_val = float("0.0406422") + mean = float("-0.0181695") + std = float("0.022831") + data = None + + +class Program_weight_tensor_parameter_154: + name = "parameter_154" + shape = [192, 192, 3, 3] + dtype = "float32" + min_val = float("-0.0454997") + max_val = float("0.0702924") + mean = float("-8.84971e-05") + std = float("0.0027379") + data = None + + +class Program_weight_tensor_parameter_155: + name = "parameter_155" + shape = [192] + dtype = "float32" + min_val = float("-0.223812") + max_val = float("-0.011848") + mean = float("-0.0808429") + std = float("0.0411614") + data = None + + +class Program_weight_tensor_parameter_156: + name = "parameter_156" + shape = [192] + dtype = "float32" + min_val = float("0.905395") + max_val = float("1.02866") + mean = float("0.977772") + std = float("0.0224939") + data = None + + +class Program_weight_tensor_parameter_157: + name = "parameter_157" + shape = [192] + dtype = "float32" + min_val = float("0.00182472") + max_val = float("0.0140011") + mean = float("0.00488018") + std = float("0.00160896") + data = None + + +class Program_weight_tensor_parameter_158: + name = "parameter_158" + shape = [192] + dtype = "float32" + min_val = float("-0.0421578") + max_val = float("0.0455086") + mean = float("-0.0102449") + std = float("0.0184866") + data = None + + +class Program_weight_tensor_parameter_159: + name = "parameter_159" + shape = [192, 192, 1, 1] + dtype = "float32" + min_val = float("-0.0390054") + max_val = float("0.0688211") + mean = float("-0.000451601") + std = float("0.00386161") + data = None + + +class Program_weight_tensor_parameter_160: + name = "parameter_160" + shape = [192] + dtype = "float32" + min_val = float("-0.223812") + max_val = float("-0.011848") + mean = float("-0.0808429") + std = float("0.0411614") + data = None + + +class Program_weight_tensor_parameter_161: + name = "parameter_161" + shape = [192] + dtype = "float32" + min_val = float("0.949933") + max_val = float("1.11112") + mean = float("1.02292") + std = float("0.030073") + data = None + + +class Program_weight_tensor_parameter_162: + name = "parameter_162" + shape = [192] + dtype = "float32" + min_val = float("0.00467925") + max_val = float("0.0479479") + mean = float("0.0111132") + std = float("0.00613812") + data = None + + +class Program_weight_tensor_parameter_163: + name = "parameter_163" + shape = [192] + dtype = "float32" + min_val = float("-0.101233") + max_val = float("0.0755529") + mean = float("-0.0174453") + std = float("0.0302045") + data = None + + +class Program_weight_tensor_parameter_164: + name = "parameter_164" + shape = [192, 192, 3, 3] + dtype = "float32" + min_val = float("-0.0474729") + max_val = float("0.0539855") + mean = float("-7.31038e-05") + std = float("0.00259765") + data = None + + +class Program_weight_tensor_parameter_165: + name = "parameter_165" + shape = [192] + dtype = "float32" + min_val = float("-0.228542") + max_val = float("0.0777608") + mean = float("-0.0924101") + std = float("0.0451672") + data = None + + +class Program_weight_tensor_parameter_166: + name = "parameter_166" + shape = [192] + dtype = "float32" + min_val = float("0.890182") + max_val = float("1.19896") + mean = float("1.01861") + std = float("0.0532403") + data = None + + +class Program_weight_tensor_parameter_167: + name = "parameter_167" + shape = [192] + dtype = "float32" + min_val = float("0.00579067") + max_val = float("0.0572229") + mean = float("0.0147046") + std = float("0.0085496") + data = None + + +class Program_weight_tensor_parameter_168: + name = "parameter_168" + shape = [192] + dtype = "float32" + min_val = float("-0.147872") + max_val = float("0.114807") + mean = float("-0.0323967") + std = float("0.0333687") + data = None + + +class Program_weight_tensor_parameter_169: + name = "parameter_169" + shape = [192, 192, 3, 3] + dtype = "float32" + min_val = float("-0.0419213") + max_val = float("0.0860108") + mean = float("-0.000108934") + std = float("0.0030019") + data = None + + +class Program_weight_tensor_parameter_170: + name = "parameter_170" + shape = [192] + dtype = "float32" + min_val = float("-0.19326") + max_val = float("0.0143295") + mean = float("-0.0644048") + std = float("0.0302824") + data = None + + +class Program_weight_tensor_parameter_171: + name = "parameter_171" + shape = [192] + dtype = "float32" + min_val = float("0.927945") + max_val = float("1.15109") + mean = float("1.015") + std = float("0.0376052") + data = None + + +class Program_weight_tensor_parameter_172: + name = "parameter_172" + shape = [192] + dtype = "float32" + min_val = float("0.00306597") + max_val = float("0.0203842") + mean = float("0.00643735") + std = float("0.00249811") + data = None + + +class Program_weight_tensor_parameter_173: + name = "parameter_173" + shape = [192] + dtype = "float32" + min_val = float("-0.0752295") + max_val = float("0.0935841") + mean = float("-0.020241") + std = float("0.0237735") + data = None + + +class Program_weight_tensor_parameter_174: + name = "parameter_174" + shape = [192, 576, 1, 1] + dtype = "float32" + min_val = float("-0.0570792") + max_val = float("0.0685417") + mean = float("-0.000178261") + std = float("0.0043567") + data = None + + +class Program_weight_tensor_parameter_175: + name = "parameter_175" + shape = [192] + dtype = "float32" + min_val = float("-0.0981279") + max_val = float("0.0362243") + mean = float("-0.0135197") + std = float("0.0200556") + data = None + + +class Program_weight_tensor_parameter_176: + name = "parameter_176" + shape = [192] + dtype = "float32" + min_val = float("0.92722") + max_val = float("1.19339") + mean = float("1.00446") + std = float("0.0253754") + data = None + + +class Program_weight_tensor_parameter_177: + name = "parameter_177" + shape = [192] + dtype = "float32" + min_val = float("0.00242938") + max_val = float("0.0286347") + mean = float("0.00506947") + std = float("0.00262691") + data = None + + +class Program_weight_tensor_parameter_178: + name = "parameter_178" + shape = [192] + dtype = "float32" + min_val = float("-0.0575542") + max_val = float("0.0378027") + mean = float("-0.0131132") + std = float("0.0183883") + data = None + + +class Program_weight_tensor_parameter_179: + name = "parameter_179" + shape = [192, 576, 1, 1] + dtype = "float32" + min_val = float("-0.0970742") + max_val = float("0.0665634") + mean = float("-0.000110766") + std = float("0.00377498") + data = None + + +class Program_weight_tensor_parameter_180: + name = "parameter_180" + shape = [192] + dtype = "float32" + min_val = float("-0.156035") + max_val = float("-0.00048574") + mean = float("-0.0380548") + std = float("0.0212619") + data = None + + +class Program_weight_tensor_parameter_181: + name = "parameter_181" + shape = [192] + dtype = "float32" + min_val = float("0.923345") + max_val = float("1.24736") + mean = float("1.00888") + std = float("0.029844") + data = None + + +class Program_weight_tensor_parameter_182: + name = "parameter_182" + shape = [192] + dtype = "float32" + min_val = float("0.00440107") + max_val = float("0.0576727") + mean = float("0.013745") + std = float("0.00731603") + data = None + + +class Program_weight_tensor_parameter_183: + name = "parameter_183" + shape = [192] + dtype = "float32" + min_val = float("-0.431982") + max_val = float("0.576694") + mean = float("-0.0311932") + std = float("0.102866") + data = None + + +class Program_weight_tensor_parameter_184: + name = "parameter_184" + shape = [192, 192, 3, 3] + dtype = "float32" + min_val = float("-0.0482483") + max_val = float("0.0395703") + mean = float("-2.80417e-05") + std = float("0.00235958") + data = None + + +class Program_weight_tensor_parameter_185: + name = "parameter_185" + shape = [192] + dtype = "float32" + min_val = float("-0.549518") + max_val = float("1.15667") + mean = float("0.361587") + std = float("0.347055") + data = None + + +class Program_weight_tensor_parameter_186: + name = "parameter_186" + shape = [192] + dtype = "float32" + min_val = float("0.547838") + max_val = float("1.57375") + mean = float("1.15649") + std = float("0.184225") + data = None + + +class Program_weight_tensor_parameter_187: + name = "parameter_187" + shape = [192] + dtype = "float32" + min_val = float("0.00263969") + max_val = float("0.178377") + mean = float("0.0146839") + std = float("0.0148158") + data = None + + +class Program_weight_tensor_parameter_188: + name = "parameter_188" + shape = [192] + dtype = "float32" + min_val = float("-0.206901") + max_val = float("0.201284") + mean = float("-0.0220776") + std = float("0.0538491") + data = None + + +class Program_weight_tensor_parameter_189: + name = "parameter_189" + shape = [192, 192, 1, 1] + dtype = "float32" + min_val = float("-0.122958") + max_val = float("0.0975332") + mean = float("-0.000422197") + std = float("0.00948692") + data = None + + +class Program_weight_tensor_parameter_190: + name = "parameter_190" + shape = [96] + dtype = "float32" + min_val = float("-0.45597") + max_val = float("0.225384") + mean = float("-0.00899429") + std = float("0.142447") + data = None + + +class Program_weight_tensor_parameter_191: + name = "parameter_191" + shape = [96] + dtype = "float32" + min_val = float("0.768038") + max_val = float("1.23793") + mean = float("0.951894") + std = float("0.070433") + data = None + + +class Program_weight_tensor_parameter_192: + name = "parameter_192" + shape = [96] + dtype = "float32" + min_val = float("0.001789") + max_val = float("0.0296245") + mean = float("0.00790415") + std = float("0.00529182") + data = None + + +class Program_weight_tensor_parameter_193: + name = "parameter_193" + shape = [96] + dtype = "float32" + min_val = float("-0.0397087") + max_val = float("0.0776004") + mean = float("-0.00745076") + std = float("0.0174977") + data = None + + +class Program_weight_tensor_parameter_194: + name = "parameter_194" + shape = [96, 96, 1, 1] + dtype = "float32" + min_val = float("-0.0690797") + max_val = float("0.0479575") + mean = float("-0.000703745") + std = float("0.00708779") + data = None + + +class Program_weight_tensor_parameter_195: + name = "parameter_195" + shape = [96] + dtype = "float32" + min_val = float("-0.45597") + max_val = float("0.225384") + mean = float("-0.00899429") + std = float("0.142447") + data = None + + +class Program_weight_tensor_parameter_196: + name = "parameter_196" + shape = [96] + dtype = "float32" + min_val = float("0.511673") + max_val = float("1.27365") + mean = float("1.03139") + std = float("0.0951454") + data = None + + +class Program_weight_tensor_parameter_197: + name = "parameter_197" + shape = [96] + dtype = "float32" + min_val = float("0.00447103") + max_val = float("0.0391091") + mean = float("0.0147757") + std = float("0.00849336") + data = None + + +class Program_weight_tensor_parameter_198: + name = "parameter_198" + shape = [96] + dtype = "float32" + min_val = float("-0.301622") + max_val = float("0.115808") + mean = float("-0.0166235") + std = float("0.0543423") + data = None + + +class Program_weight_tensor_parameter_199: + name = "parameter_199" + shape = [96, 96, 3, 3] + dtype = "float32" + min_val = float("-0.074843") + max_val = float("0.0811793") + mean = float("-3.78438e-05") + std = float("0.00502658") + data = None + + +class Program_weight_tensor_parameter_200: + name = "parameter_200" + shape = [96] + dtype = "float32" + min_val = float("-0.699659") + max_val = float("0.489136") + mean = float("-0.110588") + std = float("0.19522") + data = None + + +class Program_weight_tensor_parameter_201: + name = "parameter_201" + shape = [96] + dtype = "float32" + min_val = float("0.729744") + max_val = float("1.6979") + mean = float("0.998907") + std = float("0.132252") + data = None + + +class Program_weight_tensor_parameter_202: + name = "parameter_202" + shape = [96] + dtype = "float32" + min_val = float("0.00619322") + max_val = float("0.0753382") + mean = float("0.019023") + std = float("0.0147592") + data = None + + +class Program_weight_tensor_parameter_203: + name = "parameter_203" + shape = [96] + dtype = "float32" + min_val = float("-0.164466") + max_val = float("0.123973") + mean = float("-0.0167661") + std = float("0.0517777") + data = None + + +class Program_weight_tensor_parameter_204: + name = "parameter_204" + shape = [96, 96, 3, 3] + dtype = "float32" + min_val = float("-0.0904058") + max_val = float("0.0674916") + mean = float("-0.000269681") + std = float("0.00563526") + data = None + + +class Program_weight_tensor_parameter_205: + name = "parameter_205" + shape = [96] + dtype = "float32" + min_val = float("-0.357406") + max_val = float("0.180909") + mean = float("-0.135683") + std = float("0.0938867") + data = None + + +class Program_weight_tensor_parameter_206: + name = "parameter_206" + shape = [96] + dtype = "float32" + min_val = float("0.633758") + max_val = float("1.02328") + mean = float("0.910489") + std = float("0.054703") + data = None + + +class Program_weight_tensor_parameter_207: + name = "parameter_207" + shape = [96] + dtype = "float32" + min_val = float("0.00226766") + max_val = float("0.0117187") + mean = float("0.00655901") + std = float("0.00211374") + data = None + + +class Program_weight_tensor_parameter_208: + name = "parameter_208" + shape = [96] + dtype = "float32" + min_val = float("-0.0398978") + max_val = float("0.0295094") + mean = float("-0.00629756") + std = float("0.0138696") + data = None + + +class Program_weight_tensor_parameter_209: + name = "parameter_209" + shape = [96, 96, 1, 1] + dtype = "float32" + min_val = float("-0.0530557") + max_val = float("0.0589347") + mean = float("-0.000721103") + std = float("0.00726949") + data = None + + +class Program_weight_tensor_parameter_210: + name = "parameter_210" + shape = [96] + dtype = "float32" + min_val = float("-0.357406") + max_val = float("0.180909") + mean = float("-0.135683") + std = float("0.0938867") + data = None + + +class Program_weight_tensor_parameter_211: + name = "parameter_211" + shape = [96] + dtype = "float32" + min_val = float("0.8186") + max_val = float("1.15653") + mean = float("1.02469") + std = float("0.059396") + data = None + + +class Program_weight_tensor_parameter_212: + name = "parameter_212" + shape = [96] + dtype = "float32" + min_val = float("0.00540755") + max_val = float("0.0561564") + mean = float("0.0139166") + std = float("0.00841941") + data = None + + +class Program_weight_tensor_parameter_213: + name = "parameter_213" + shape = [96] + dtype = "float32" + min_val = float("-0.0899866") + max_val = float("0.0314018") + mean = float("-0.0208995") + std = float("0.025033") + data = None + + +class Program_weight_tensor_parameter_214: + name = "parameter_214" + shape = [96, 96, 3, 3] + dtype = "float32" + min_val = float("-0.0688236") + max_val = float("0.0708707") + mean = float("-0.000262969") + std = float("0.00532725") + data = None + + +class Program_weight_tensor_parameter_215: + name = "parameter_215" + shape = [96] + dtype = "float32" + min_val = float("-0.480249") + max_val = float("0.160466") + mean = float("-0.163804") + std = float("0.129028") + data = None + + +class Program_weight_tensor_parameter_216: + name = "parameter_216" + shape = [96] + dtype = "float32" + min_val = float("0.78143") + max_val = float("1.29526") + mean = float("0.966656") + std = float("0.0977077") + data = None + + +class Program_weight_tensor_parameter_217: + name = "parameter_217" + shape = [96] + dtype = "float32" + min_val = float("0.0048265") + max_val = float("0.0582232") + mean = float("0.0117158") + std = float("0.00796343") + data = None + + +class Program_weight_tensor_parameter_218: + name = "parameter_218" + shape = [96] + dtype = "float32" + min_val = float("-0.130681") + max_val = float("0.0398643") + mean = float("0.00105047") + std = float("0.0297261") + data = None + + +class Program_weight_tensor_parameter_219: + name = "parameter_219" + shape = [96, 96, 3, 3] + dtype = "float32" + min_val = float("-0.0838623") + max_val = float("0.0655946") + mean = float("-0.0002875") + std = float("0.00621097") + data = None + + +class Program_weight_tensor_parameter_220: + name = "parameter_220" + shape = [96] + dtype = "float32" + min_val = float("-0.482468") + max_val = float("0.0678707") + mean = float("-0.164086") + std = float("0.113081") + data = None + + +class Program_weight_tensor_parameter_221: + name = "parameter_221" + shape = [96] + dtype = "float32" + min_val = float("0.729445") + max_val = float("1.00356") + mean = float("0.921722") + std = float("0.0524543") + data = None + + +class Program_weight_tensor_parameter_222: + name = "parameter_222" + shape = [96] + dtype = "float32" + min_val = float("0.00433347") + max_val = float("0.0204491") + mean = float("0.00997535") + std = float("0.00321772") + data = None + + +class Program_weight_tensor_parameter_223: + name = "parameter_223" + shape = [96] + dtype = "float32" + min_val = float("-0.0446457") + max_val = float("0.0320629") + mean = float("-0.0167648") + std = float("0.0172211") + data = None + + +class Program_weight_tensor_parameter_224: + name = "parameter_224" + shape = [96, 96, 1, 1] + dtype = "float32" + min_val = float("-0.0830632") + max_val = float("0.0609676") + mean = float("-0.00159531") + std = float("0.00857298") + data = None + + +class Program_weight_tensor_parameter_225: + name = "parameter_225" + shape = [96] + dtype = "float32" + min_val = float("-0.482468") + max_val = float("0.0678707") + mean = float("-0.164086") + std = float("0.113081") + data = None + + +class Program_weight_tensor_parameter_226: + name = "parameter_226" + shape = [96] + dtype = "float32" + min_val = float("0.771572") + max_val = float("1.15355") + mean = float("0.986257") + std = float("0.0571516") + data = None + + +class Program_weight_tensor_parameter_227: + name = "parameter_227" + shape = [96] + dtype = "float32" + min_val = float("0.00988202") + max_val = float("0.116393") + mean = float("0.0221825") + std = float("0.0148292") + data = None + + +class Program_weight_tensor_parameter_228: + name = "parameter_228" + shape = [96] + dtype = "float32" + min_val = float("-0.11947") + max_val = float("0.0604604") + mean = float("-0.0128069") + std = float("0.0343308") + data = None + + +class Program_weight_tensor_parameter_229: + name = "parameter_229" + shape = [96, 96, 3, 3] + dtype = "float32" + min_val = float("-0.0988696") + max_val = float("0.0787178") + mean = float("-0.000160458") + std = float("0.00610999") + data = None + + +class Program_weight_tensor_parameter_230: + name = "parameter_230" + shape = [96] + dtype = "float32" + min_val = float("-0.55586") + max_val = float("0.346647") + mean = float("-0.174731") + std = float("0.169548") + data = None + + +class Program_weight_tensor_parameter_231: + name = "parameter_231" + shape = [96] + dtype = "float32" + min_val = float("0.757047") + max_val = float("1.34058") + mean = float("0.957428") + std = float("0.110982") + data = None + + +class Program_weight_tensor_parameter_232: + name = "parameter_232" + shape = [96] + dtype = "float32" + min_val = float("0.0072924") + max_val = float("0.0565885") + mean = float("0.0158637") + std = float("0.0105653") + data = None + + +class Program_weight_tensor_parameter_233: + name = "parameter_233" + shape = [96] + dtype = "float32" + min_val = float("-0.114767") + max_val = float("0.17063") + mean = float("-0.00772215") + std = float("0.0609158") + data = None + + +class Program_weight_tensor_parameter_234: + name = "parameter_234" + shape = [96, 96, 3, 3] + dtype = "float32" + min_val = float("-0.117416") + max_val = float("0.106153") + mean = float("-0.000121096") + std = float("0.00709328") + data = None + + +class Program_weight_tensor_parameter_235: + name = "parameter_235" + shape = [96] + dtype = "float32" + min_val = float("-0.61179") + max_val = float("0.578869") + mean = float("-0.0791862") + std = float("0.248136") + data = None + + +class Program_weight_tensor_parameter_236: + name = "parameter_236" + shape = [96] + dtype = "float32" + min_val = float("0.660131") + max_val = float("1.23003") + mean = float("0.871774") + std = float("0.112507") + data = None + + +class Program_weight_tensor_parameter_237: + name = "parameter_237" + shape = [96] + dtype = "float32" + min_val = float("0.00710491") + max_val = float("0.0445585") + mean = float("0.0147412") + std = float("0.00680549") + data = None + + +class Program_weight_tensor_parameter_238: + name = "parameter_238" + shape = [96] + dtype = "float32" + min_val = float("-0.0931176") + max_val = float("0.0541358") + mean = float("-0.0149245") + std = float("0.0312047") + data = None + + +class Program_weight_tensor_parameter_239: + name = "parameter_239" + shape = [96, 448, 1, 1] + dtype = "float32" + min_val = float("-0.131264") + max_val = float("0.116267") + mean = float("-0.000328445") + std = float("0.009384") + data = None + + +class Program_weight_tensor_parameter_240: + name = "parameter_240" + shape = [96] + dtype = "float32" + min_val = float("-0.0948697") + max_val = float("0.224511") + mean = float("0.0617959") + std = float("0.0539032") + data = None + + +class Program_weight_tensor_parameter_241: + name = "parameter_241" + shape = [96] + dtype = "float32" + min_val = float("0.713603") + max_val = float("1.12367") + mean = float("0.935296") + std = float("0.0628703") + data = None + + +class Program_weight_tensor_parameter_242: + name = "parameter_242" + shape = [96] + dtype = "float32" + min_val = float("0.00170399") + max_val = float("0.0269514") + mean = float("0.00620616") + std = float("0.00275772") + data = None + + +class Program_weight_tensor_parameter_243: + name = "parameter_243" + shape = [96] + dtype = "float32" + min_val = float("-0.062198") + max_val = float("0.114507") + mean = float("-0.0207577") + std = float("0.0255889") + data = None + + +class Program_weight_tensor_parameter_244: + name = "parameter_244" + shape = [96, 448, 1, 1] + dtype = "float32" + min_val = float("-0.0878207") + max_val = float("0.0793284") + mean = float("-0.000117661") + std = float("0.006259") + data = None + + +class Program_weight_tensor_parameter_245: + name = "parameter_245" + shape = [192] + dtype = "float32" + min_val = float("-0.290627") + max_val = float("0.197489") + mean = float("-0.0642084") + std = float("0.0682552") + data = None + + +class Program_weight_tensor_parameter_246: + name = "parameter_246" + shape = [192] + dtype = "float32" + min_val = float("0.677585") + max_val = float("1.44589") + mean = float("0.889482") + std = float("0.0775531") + data = None + + +class Program_weight_tensor_parameter_247: + name = "parameter_247" + shape = [192] + dtype = "float32" + min_val = float("0.00615469") + max_val = float("0.0653019") + mean = float("0.013452") + std = float("0.00596706") + data = None + + +class Program_weight_tensor_parameter_248: + name = "parameter_248" + shape = [192] + dtype = "float32" + min_val = float("-0.148841") + max_val = float("0.0453696") + mean = float("-0.0305179") + std = float("0.0258069") + data = None + + +class Program_weight_tensor_parameter_249: + name = "parameter_249" + shape = [192, 384, 1, 1] + dtype = "float32" + min_val = float("-0.103954") + max_val = float("0.0889274") + mean = float("-0.000488273") + std = float("0.00677572") + data = None + + +class Program_weight_tensor_parameter_250: + name = "parameter_250" + shape = [384] + dtype = "float32" + min_val = float("-0.197184") + max_val = float("0.235944") + mean = float("-0.0649902") + std = float("0.0407184") + data = None + + +class Program_weight_tensor_parameter_251: + name = "parameter_251" + shape = [384] + dtype = "float32" + min_val = float("0.87395") + max_val = float("1.53027") + mean = float("1.02089") + std = float("0.0620815") + data = None + + +class Program_weight_tensor_parameter_252: + name = "parameter_252" + shape = [384] + dtype = "float32" + min_val = float("0.00549855") + max_val = float("0.0533697") + mean = float("0.00979429") + std = float("0.00454056") + data = None + + +class Program_weight_tensor_parameter_253: + name = "parameter_253" + shape = [384] + dtype = "float32" + min_val = float("-0.248796") + max_val = float("0.131533") + mean = float("-0.0383995") + std = float("0.0378875") + data = None + + +class Program_weight_tensor_parameter_254: + name = "parameter_254" + shape = [384, 384, 1, 1] + dtype = "float32" + min_val = float("-0.155662") + max_val = float("0.0954612") + mean = float("-0.000485357") + std = float("0.00632631") + data = None + + +class Program_weight_tensor_parameter_255: + name = "parameter_255" + shape = [192] + dtype = "float32" + min_val = float("-0.172982") + max_val = float("0.00602343") + mean = float("-0.0635736") + std = float("0.031811") + data = None + + +class Program_weight_tensor_parameter_256: + name = "parameter_256" + shape = [192] + dtype = "float32" + min_val = float("0.888688") + max_val = float("0.993558") + mean = float("0.952478") + std = float("0.0161577") + data = None + + +class Program_weight_tensor_parameter_257: + name = "parameter_257" + shape = [192] + dtype = "float32" + min_val = float("0.00348684") + max_val = float("0.0168184") + mean = float("0.00628357") + std = float("0.00200719") + data = None + + +class Program_weight_tensor_parameter_258: + name = "parameter_258" + shape = [192] + dtype = "float32" + min_val = float("-0.0659639") + max_val = float("0.0508781") + mean = float("-0.0199445") + std = float("0.0244522") + data = None + + +class Program_weight_tensor_parameter_259: + name = "parameter_259" + shape = [192, 192, 1, 1] + dtype = "float32" + min_val = float("-0.0503466") + max_val = float("0.0313236") + mean = float("-0.000610377") + std = float("0.0047421") + data = None + + +class Program_weight_tensor_parameter_260: + name = "parameter_260" + shape = [192] + dtype = "float32" + min_val = float("-0.172982") + max_val = float("0.00602343") + mean = float("-0.0635736") + std = float("0.031811") + data = None + + +class Program_weight_tensor_parameter_261: + name = "parameter_261" + shape = [192] + dtype = "float32" + min_val = float("0.947546") + max_val = float("1.03225") + mean = float("0.990347") + std = float("0.0162233") + data = None + + +class Program_weight_tensor_parameter_262: + name = "parameter_262" + shape = [192] + dtype = "float32" + min_val = float("0.00995711") + max_val = float("0.0401164") + mean = float("0.017634") + std = float("0.00573009") + data = None + + +class Program_weight_tensor_parameter_263: + name = "parameter_263" + shape = [192] + dtype = "float32" + min_val = float("-0.140772") + max_val = float("0.138155") + mean = float("-0.0239356") + std = float("0.0422014") + data = None + + +class Program_weight_tensor_parameter_264: + name = "parameter_264" + shape = [192, 192, 3, 3] + dtype = "float32" + min_val = float("-0.0482809") + max_val = float("0.0734527") + mean = float("-7.90712e-05") + std = float("0.0027116") + data = None + + +class Program_weight_tensor_parameter_265: + name = "parameter_265" + shape = [192] + dtype = "float32" + min_val = float("-0.211375") + max_val = float("-0.00263112") + mean = float("-0.0715416") + std = float("0.0343984") + data = None + + +class Program_weight_tensor_parameter_266: + name = "parameter_266" + shape = [192] + dtype = "float32" + min_val = float("0.941718") + max_val = float("1.15116") + mean = float("1.03082") + std = float("0.0423415") + data = None + + +class Program_weight_tensor_parameter_267: + name = "parameter_267" + shape = [192] + dtype = "float32" + min_val = float("0.0211078") + max_val = float("0.140703") + mean = float("0.0359457") + std = float("0.0113705") + data = None + + +class Program_weight_tensor_parameter_268: + name = "parameter_268" + shape = [192] + dtype = "float32" + min_val = float("-0.167852") + max_val = float("0.256733") + mean = float("-0.0421418") + std = float("0.0506957") + data = None + + +class Program_weight_tensor_parameter_269: + name = "parameter_269" + shape = [192, 192, 3, 3] + dtype = "float32" + min_val = float("-0.0667117") + max_val = float("0.059304") + mean = float("-0.000107451") + std = float("0.00324108") + data = None + + +class Program_weight_tensor_parameter_270: + name = "parameter_270" + shape = [192] + dtype = "float32" + min_val = float("-0.190843") + max_val = float("-0.00892811") + mean = float("-0.0684056") + std = float("0.030824") + data = None + + +class Program_weight_tensor_parameter_271: + name = "parameter_271" + shape = [192] + dtype = "float32" + min_val = float("0.946948") + max_val = float("1.04539") + mean = float("0.990344") + std = float("0.013334") + data = None + + +class Program_weight_tensor_parameter_272: + name = "parameter_272" + shape = [192] + dtype = "float32" + min_val = float("0.00204439") + max_val = float("0.0101563") + mean = float("0.00350592") + std = float("0.000926001") + data = None + + +class Program_weight_tensor_parameter_273: + name = "parameter_273" + shape = [192] + dtype = "float32" + min_val = float("-0.0741431") + max_val = float("0.0383866") + mean = float("-0.0189034") + std = float("0.0168987") + data = None + + +class Program_weight_tensor_parameter_274: + name = "parameter_274" + shape = [192, 192, 1, 1] + dtype = "float32" + min_val = float("-0.0319038") + max_val = float("0.0417245") + mean = float("-0.000600213") + std = float("0.00487987") + data = None + + +class Program_weight_tensor_parameter_275: + name = "parameter_275" + shape = [192] + dtype = "float32" + min_val = float("-0.190843") + max_val = float("-0.00892811") + mean = float("-0.0684056") + std = float("0.030824") + data = None + + +class Program_weight_tensor_parameter_276: + name = "parameter_276" + shape = [192] + dtype = "float32" + min_val = float("0.956509") + max_val = float("1.11411") + mean = float("1.00672") + std = float("0.0257334") + data = None + + +class Program_weight_tensor_parameter_277: + name = "parameter_277" + shape = [192] + dtype = "float32" + min_val = float("0.00625705") + max_val = float("0.0226068") + mean = float("0.0103997") + std = float("0.00287058") + data = None + + +class Program_weight_tensor_parameter_278: + name = "parameter_278" + shape = [192] + dtype = "float32" + min_val = float("-0.141039") + max_val = float("0.0702292") + mean = float("-0.0298093") + std = float("0.0313043") + data = None + + +class Program_weight_tensor_parameter_279: + name = "parameter_279" + shape = [192, 192, 3, 3] + dtype = "float32" + min_val = float("-0.0504256") + max_val = float("0.0761871") + mean = float("-0.000102443") + std = float("0.0027184") + data = None + + +class Program_weight_tensor_parameter_280: + name = "parameter_280" + shape = [192] + dtype = "float32" + min_val = float("-0.224642") + max_val = float("-0.0187006") + mean = float("-0.0912059") + std = float("0.0387278") + data = None + + +class Program_weight_tensor_parameter_281: + name = "parameter_281" + shape = [192] + dtype = "float32" + min_val = float("0.950234") + max_val = float("1.19239") + mean = float("1.02577") + std = float("0.0451849") + data = None + + +class Program_weight_tensor_parameter_282: + name = "parameter_282" + shape = [192] + dtype = "float32" + min_val = float("0.0202728") + max_val = float("0.0768753") + mean = float("0.0352012") + std = float("0.0111367") + data = None + + +class Program_weight_tensor_parameter_283: + name = "parameter_283" + shape = [192] + dtype = "float32" + min_val = float("-0.22549") + max_val = float("0.0963683") + mean = float("-0.0586879") + std = float("0.0627008") + data = None + + +class Program_weight_tensor_parameter_284: + name = "parameter_284" + shape = [192, 192, 3, 3] + dtype = "float32" + min_val = float("-0.0629141") + max_val = float("0.0883468") + mean = float("-0.000127578") + std = float("0.00336756") + data = None + + +class Program_weight_tensor_parameter_285: + name = "parameter_285" + shape = [192] + dtype = "float32" + min_val = float("-0.150226") + max_val = float("-0.00319821") + mean = float("-0.0660682") + std = float("0.0225506") + data = None + + +class Program_weight_tensor_parameter_286: + name = "parameter_286" + shape = [192] + dtype = "float32" + min_val = float("0.935894") + max_val = float("1.07288") + mean = float("1.0007") + std = float("0.0211788") + data = None + + +class Program_weight_tensor_parameter_287: + name = "parameter_287" + shape = [192] + dtype = "float32" + min_val = float("0.00178607") + max_val = float("0.00643779") + mean = float("0.00311503") + std = float("0.000840167") + data = None + + +class Program_weight_tensor_parameter_288: + name = "parameter_288" + shape = [192] + dtype = "float32" + min_val = float("-0.0642588") + max_val = float("0.0879255") + mean = float("-0.009792") + std = float("0.0179078") + data = None + + +class Program_weight_tensor_parameter_289: + name = "parameter_289" + shape = [192, 192, 1, 1] + dtype = "float32" + min_val = float("-0.029514") + max_val = float("0.043096") + mean = float("-0.000329408") + std = float("0.00549588") + data = None + + +class Program_weight_tensor_parameter_290: + name = "parameter_290" + shape = [192] + dtype = "float32" + min_val = float("-0.150226") + max_val = float("-0.00319822") + mean = float("-0.0660682") + std = float("0.0225506") + data = None + + +class Program_weight_tensor_parameter_291: + name = "parameter_291" + shape = [192] + dtype = "float32" + min_val = float("0.939778") + max_val = float("1.11505") + mean = float("0.995075") + std = float("0.0251252") + data = None + + +class Program_weight_tensor_parameter_292: + name = "parameter_292" + shape = [192] + dtype = "float32" + min_val = float("0.00642307") + max_val = float("0.0266081") + mean = float("0.0114071") + std = float("0.00308351") + data = None + + +class Program_weight_tensor_parameter_293: + name = "parameter_293" + shape = [192] + dtype = "float32" + min_val = float("-0.18069") + max_val = float("0.0972039") + mean = float("-0.0325111") + std = float("0.0330231") + data = None + + +class Program_weight_tensor_parameter_294: + name = "parameter_294" + shape = [192, 192, 3, 3] + dtype = "float32" + min_val = float("-0.0382342") + max_val = float("0.0664391") + mean = float("-0.000128083") + std = float("0.00267217") + data = None + + +class Program_weight_tensor_parameter_295: + name = "parameter_295" + shape = [192] + dtype = "float32" + min_val = float("-0.282528") + max_val = float("0.0109662") + mean = float("-0.106158") + std = float("0.0388174") + data = None + + +class Program_weight_tensor_parameter_296: + name = "parameter_296" + shape = [192] + dtype = "float32" + min_val = float("0.946051") + max_val = float("1.24915") + mean = float("1.02799") + std = float("0.0411221") + data = None + + +class Program_weight_tensor_parameter_297: + name = "parameter_297" + shape = [192] + dtype = "float32" + min_val = float("0.00896292") + max_val = float("0.0393411") + mean = float("0.0158995") + std = float("0.00471727") + data = None + + +class Program_weight_tensor_parameter_298: + name = "parameter_298" + shape = [192] + dtype = "float32" + min_val = float("-0.220858") + max_val = float("0.10809") + mean = float("-0.0372746") + std = float("0.044431") + data = None + + +class Program_weight_tensor_parameter_299: + name = "parameter_299" + shape = [192, 192, 3, 3] + dtype = "float32" + min_val = float("-0.0533745") + max_val = float("0.0594756") + mean = float("-0.000147957") + std = float("0.00371064") + data = None + + +class Program_weight_tensor_parameter_300: + name = "parameter_300" + shape = [192] + dtype = "float32" + min_val = float("-0.250169") + max_val = float("-0.0169445") + mean = float("-0.118185") + std = float("0.0428781") + data = None + + +class Program_weight_tensor_parameter_301: + name = "parameter_301" + shape = [192] + dtype = "float32" + min_val = float("0.916471") + max_val = float("1.13189") + mean = float("1.02572") + std = float("0.0416176") + data = None + + +class Program_weight_tensor_parameter_302: + name = "parameter_302" + shape = [192] + dtype = "float32" + min_val = float("0.00405995") + max_val = float("0.0116058") + mean = float("0.00629149") + std = float("0.00151752") + data = None + + +class Program_weight_tensor_parameter_303: + name = "parameter_303" + shape = [192] + dtype = "float32" + min_val = float("-0.117604") + max_val = float("0.0642248") + mean = float("0.0107736") + std = float("0.0236389") + data = None + + +class Program_weight_tensor_parameter_304: + name = "parameter_304" + shape = [192, 896, 1, 1] + dtype = "float32" + min_val = float("-0.0597477") + max_val = float("0.0854183") + mean = float("-0.000166658") + std = float("0.00511791") + data = None + + +class Program_weight_tensor_parameter_305: + name = "parameter_305" + shape = [192] + dtype = "float32" + min_val = float("-0.174475") + max_val = float("0.20701") + mean = float("-0.00689085") + std = float("0.0496763") + data = None + + +class Program_weight_tensor_parameter_306: + name = "parameter_306" + shape = [192] + dtype = "float32" + min_val = float("0.951544") + max_val = float("1.21569") + mean = float("1.05632") + std = float("0.0494344") + data = None + + +class Program_weight_tensor_parameter_307: + name = "parameter_307" + shape = [192] + dtype = "float32" + min_val = float("0.00423262") + max_val = float("0.0403318") + mean = float("0.00759489") + std = float("0.00311219") + data = None + + +class Program_weight_tensor_parameter_308: + name = "parameter_308" + shape = [192] + dtype = "float32" + min_val = float("-0.0961858") + max_val = float("0.0406769") + mean = float("-0.00802736") + std = float("0.0245313") + data = None + + +class Program_weight_tensor_parameter_309: + name = "parameter_309" + shape = [192, 896, 1, 1] + dtype = "float32" + min_val = float("-0.0579338") + max_val = float("0.208244") + mean = float("-0.000163925") + std = float("0.00519386") + data = None + + +class Program_weight_tensor_parameter_310: + name = "parameter_310" + shape = [384] + dtype = "float32" + min_val = float("-0.243733") + max_val = float("-0.0546405") + mean = float("-0.121279") + std = float("0.0329438") + data = None + + +class Program_weight_tensor_parameter_311: + name = "parameter_311" + shape = [384] + dtype = "float32" + min_val = float("0.821465") + max_val = float("1.01492") + mean = float("0.913551") + std = float("0.0255274") + data = None + + +class Program_weight_tensor_parameter_312: + name = "parameter_312" + shape = [384] + dtype = "float32" + min_val = float("0.00632715") + max_val = float("0.0367869") + mean = float("0.0101591") + std = float("0.00340929") + data = None + + +class Program_weight_tensor_parameter_313: + name = "parameter_313" + shape = [384] + dtype = "float32" + min_val = float("-0.0871009") + max_val = float("0.0863362") + mean = float("-0.0276529") + std = float("0.0233351") + data = None + + +class Program_weight_tensor_parameter_314: + name = "parameter_314" + shape = [384, 768, 1, 1] + dtype = "float32" + min_val = float("-0.0365944") + max_val = float("0.0462394") + mean = float("-0.000227368") + std = float("0.00408063") + data = None + + +class Program_weight_tensor_parameter_315: + name = "parameter_315" + shape = [768] + dtype = "float32" + min_val = float("-0.101787") + max_val = float("0.0665855") + mean = float("-0.0546584") + std = float("0.0146864") + data = None + + +class Program_weight_tensor_parameter_316: + name = "parameter_316" + shape = [768] + dtype = "float32" + min_val = float("0.95621") + max_val = float("1.13825") + mean = float("1.02215") + std = float("0.0205763") + data = None + + +class Program_weight_tensor_parameter_317: + name = "parameter_317" + shape = [768] + dtype = "float32" + min_val = float("0.0043328") + max_val = float("0.027335") + mean = float("0.00718712") + std = float("0.00194442") + data = None + + +class Program_weight_tensor_parameter_318: + name = "parameter_318" + shape = [768] + dtype = "float32" + min_val = float("-0.0981102") + max_val = float("0.0940955") + mean = float("-0.0370968") + std = float("0.0222538") + data = None + + +class Program_weight_tensor_parameter_319: + name = "parameter_319" + shape = [768, 768, 1, 1] + dtype = "float32" + min_val = float("-0.0461237") + max_val = float("0.0869437") + mean = float("-0.000256912") + std = float("0.00342022") + data = None + + +class Program_weight_tensor_parameter_320: + name = "parameter_320" + shape = [384] + dtype = "float32" + min_val = float("-0.151767") + max_val = float("0.0695731") + mean = float("-0.0384536") + std = float("0.0201375") + data = None + + +class Program_weight_tensor_parameter_321: + name = "parameter_321" + shape = [384] + dtype = "float32" + min_val = float("0.894036") + max_val = float("1.07238") + mean = float("0.984632") + std = float("0.0127924") + data = None + + +class Program_weight_tensor_parameter_322: + name = "parameter_322" + shape = [384] + dtype = "float32" + min_val = float("0.00290649") + max_val = float("0.0408954") + mean = float("0.00666489") + std = float("0.00326729") + data = None + + +class Program_weight_tensor_parameter_323: + name = "parameter_323" + shape = [384] + dtype = "float32" + min_val = float("-0.0653166") + max_val = float("0.0519308") + mean = float("-0.00623302") + std = float("0.0155997") + data = None + + +class Program_weight_tensor_parameter_324: + name = "parameter_324" + shape = [384, 384, 1, 1] + dtype = "float32" + min_val = float("-0.0309181") + max_val = float("0.0533571") + mean = float("-7.98848e-05") + std = float("0.00292507") + data = None + + +class Program_weight_tensor_parameter_325: + name = "parameter_325" + shape = [384] + dtype = "float32" + min_val = float("-0.151767") + max_val = float("0.0695731") + mean = float("-0.0384536") + std = float("0.0201375") + data = None + + +class Program_weight_tensor_parameter_326: + name = "parameter_326" + shape = [384] + dtype = "float32" + min_val = float("0.888673") + max_val = float("1.0755") + mean = float("0.996161") + std = float("0.0119175") + data = None + + +class Program_weight_tensor_parameter_327: + name = "parameter_327" + shape = [384] + dtype = "float32" + min_val = float("0.0142738") + max_val = float("0.311691") + mean = float("0.0422161") + std = float("0.0212825") + data = None + + +class Program_weight_tensor_parameter_328: + name = "parameter_328" + shape = [384] + dtype = "float32" + min_val = float("-0.214832") + max_val = float("0.109983") + mean = float("-0.0665168") + std = float("0.0509291") + data = None + + +class Program_weight_tensor_parameter_329: + name = "parameter_329" + shape = [384, 384, 3, 3] + dtype = "float32" + min_val = float("-0.0333794") + max_val = float("0.041775") + mean = float("-0.000107745") + std = float("0.00109587") + data = None + + +class Program_weight_tensor_parameter_330: + name = "parameter_330" + shape = [384] + dtype = "float32" + min_val = float("-0.0782042") + max_val = float("0.112772") + mean = float("-0.0178359") + std = float("0.0153444") + data = None + + +class Program_weight_tensor_parameter_331: + name = "parameter_331" + shape = [384] + dtype = "float32" + min_val = float("0.921308") + max_val = float("1.1661") + mean = float("1.01649") + std = float("0.0243616") + data = None + + +class Program_weight_tensor_parameter_332: + name = "parameter_332" + shape = [384] + dtype = "float32" + min_val = float("0.0102295") + max_val = float("0.115044") + mean = float("0.0305011") + std = float("0.0124013") + data = None + + +class Program_weight_tensor_parameter_333: + name = "parameter_333" + shape = [384] + dtype = "float32" + min_val = float("-0.153753") + max_val = float("0.147071") + mean = float("-0.0372654") + std = float("0.0461481") + data = None + + +class Program_weight_tensor_parameter_334: + name = "parameter_334" + shape = [384, 384, 3, 3] + dtype = "float32" + min_val = float("-0.0226589") + max_val = float("0.036417") + mean = float("-6.21237e-05") + std = float("0.00142638") + data = None + + +class Program_weight_tensor_parameter_335: + name = "parameter_335" + shape = [384] + dtype = "float32" + min_val = float("-0.069438") + max_val = float("0.0210057") + mean = float("-0.0221755") + std = float("0.0130288") + data = None + + +class Program_weight_tensor_parameter_336: + name = "parameter_336" + shape = [384] + dtype = "float32" + min_val = float("0.948363") + max_val = float("1.1661") + mean = float("1.01607") + std = float("0.0267976") + data = None + + +class Program_weight_tensor_parameter_337: + name = "parameter_337" + shape = [384] + dtype = "float32" + min_val = float("0.0303298") + max_val = float("0.235692") + mean = float("0.0873822") + std = float("0.0294964") + data = None + + +class Program_weight_tensor_parameter_338: + name = "parameter_338" + shape = [384] + dtype = "float32" + min_val = float("-1.41616") + max_val = float("1.37834") + mean = float("-0.0672685") + std = float("0.446602") + data = None + + +class Program_weight_tensor_parameter_339: + name = "parameter_339" + shape = [384, 1536, 1, 1] + dtype = "float32" + min_val = float("-0.0365696") + max_val = float("0.0477869") + mean = float("3.75444e-05") + std = float("0.00244321") + data = None + + +class Program_weight_tensor_parameter_340: + name = "parameter_340" + shape = [384] + dtype = "float32" + min_val = float("-0.0175498") + max_val = float("0.0243298") + mean = float("-0.00128851") + std = float("0.0065282") + data = None + + +class Program_weight_tensor_parameter_341: + name = "parameter_341" + shape = [384] + dtype = "float32" + min_val = float("0.972045") + max_val = float("1.06093") + mean = float("0.99618") + std = float("0.01202") + data = None + + +class Program_weight_tensor_parameter_342: + name = "parameter_342" + shape = [384] + dtype = "float32" + min_val = float("0.0015358") + max_val = float("0.00696368") + mean = float("0.00325261") + std = float("0.000776971") + data = None + + +class Program_weight_tensor_parameter_343: + name = "parameter_343" + shape = [384] + dtype = "float32" + min_val = float("-0.0833909") + max_val = float("0.0596304") + mean = float("-0.028885") + std = float("0.0177319") + data = None + + +class Program_weight_tensor_parameter_344: + name = "parameter_344" + shape = [384, 384, 1, 1] + dtype = "float32" + min_val = float("-0.0289284") + max_val = float("0.0278605") + mean = float("-0.000360372") + std = float("0.00262503") + data = None + + +class Program_weight_tensor_parameter_345: + name = "parameter_345" + shape = [384] + dtype = "float32" + min_val = float("-0.0175498") + max_val = float("0.0243298") + mean = float("-0.00128851") + std = float("0.0065282") + data = None + + +class Program_weight_tensor_parameter_346: + name = "parameter_346" + shape = [384] + dtype = "float32" + min_val = float("0.974435") + max_val = float("1.08363") + mean = float("1.00583") + std = float("0.017741") + data = None + + +class Program_weight_tensor_parameter_347: + name = "parameter_347" + shape = [384] + dtype = "float32" + min_val = float("0.00723957") + max_val = float("0.0375603") + mean = float("0.0161577") + std = float("0.00499723") + data = None + + +class Program_weight_tensor_parameter_348: + name = "parameter_348" + shape = [384] + dtype = "float32" + min_val = float("-0.252151") + max_val = float("0.0737095") + mean = float("-0.08036") + std = float("0.0422289") + data = None + + +class Program_weight_tensor_parameter_349: + name = "parameter_349" + shape = [384, 384, 3, 3] + dtype = "float32" + min_val = float("-0.0230068") + max_val = float("0.0485732") + mean = float("-0.000124469") + std = float("0.00111462") + data = None + + +class Program_weight_tensor_parameter_350: + name = "parameter_350" + shape = [384] + dtype = "float32" + min_val = float("-0.0466225") + max_val = float("0.00898686") + mean = float("-0.00779507") + std = float("0.00746548") + data = None + + +class Program_weight_tensor_parameter_351: + name = "parameter_351" + shape = [384] + dtype = "float32" + min_val = float("0.958003") + max_val = float("1.12965") + mean = float("1.01422") + std = float("0.0192962") + data = None + + +class Program_weight_tensor_parameter_352: + name = "parameter_352" + shape = [384] + dtype = "float32" + min_val = float("0.0315362") + max_val = float("0.132781") + mean = float("0.069239") + std = float("0.0176957") + data = None + + +class Program_weight_tensor_parameter_353: + name = "parameter_353" + shape = [384] + dtype = "float32" + min_val = float("-0.817765") + max_val = float("0.618692") + mean = float("-0.19556") + std = float("0.181151") + data = None + + +class Program_weight_tensor_parameter_354: + name = "parameter_354" + shape = [384, 384, 3, 3] + dtype = "float32" + min_val = float("-0.022563") + max_val = float("0.038276") + mean = float("-0.000122085") + std = float("0.00133203") + data = None + + +class Program_weight_tensor_parameter_355: + name = "parameter_355" + shape = [384] + dtype = "float32" + min_val = float("-0.033857") + max_val = float("0.0139088") + mean = float("-0.006985") + std = float("0.00755489") + data = None + + +class Program_weight_tensor_parameter_356: + name = "parameter_356" + shape = [384] + dtype = "float32" + min_val = float("0.986684") + max_val = float("1.03523") + mean = float("1.0021") + std = float("0.00693134") + data = None + + +class Program_weight_tensor_parameter_357: + name = "parameter_357" + shape = [384] + dtype = "float32" + min_val = float("0.0010937") + max_val = float("0.00473797") + mean = float("0.0019152") + std = float("0.000506383") + data = None + + +class Program_weight_tensor_parameter_358: + name = "parameter_358" + shape = [384] + dtype = "float32" + min_val = float("-0.0582268") + max_val = float("0.108843") + mean = float("-0.0163738") + std = float("0.0197979") + data = None + + +class Program_weight_tensor_parameter_359: + name = "parameter_359" + shape = [384, 384, 1, 1] + dtype = "float32" + min_val = float("-0.020788") + max_val = float("0.0286466") + mean = float("-0.000218693") + std = float("0.00228777") + data = None + + +class Program_weight_tensor_parameter_360: + name = "parameter_360" + shape = [384] + dtype = "float32" + min_val = float("-0.033857") + max_val = float("0.0139088") + mean = float("-0.006985") + std = float("0.00755489") + data = None + + +class Program_weight_tensor_parameter_361: + name = "parameter_361" + shape = [384] + dtype = "float32" + min_val = float("0.984593") + max_val = float("1.06746") + mean = float("1.00655") + std = float("0.0121664") + data = None + + +class Program_weight_tensor_parameter_362: + name = "parameter_362" + shape = [384] + dtype = "float32" + min_val = float("0.00450449") + max_val = float("0.0227935") + mean = float("0.00957872") + std = float("0.00312672") + data = None + + +class Program_weight_tensor_parameter_363: + name = "parameter_363" + shape = [384] + dtype = "float32" + min_val = float("-0.144327") + max_val = float("0.265646") + mean = float("-0.0565659") + std = float("0.048812") + data = None + + +class Program_weight_tensor_parameter_364: + name = "parameter_364" + shape = [384, 384, 3, 3] + dtype = "float32" + min_val = float("-0.0124244") + max_val = float("0.0276773") + mean = float("-9.55709e-05") + std = float("0.00094902") + data = None + + +class Program_weight_tensor_parameter_365: + name = "parameter_365" + shape = [384] + dtype = "float32" + min_val = float("-0.0512307") + max_val = float("0.00385635") + mean = float("-0.0195493") + std = float("0.00836547") + data = None + + +class Program_weight_tensor_parameter_366: + name = "parameter_366" + shape = [384] + dtype = "float32" + min_val = float("0.978618") + max_val = float("1.08483") + mean = float("1.01365") + std = float("0.0153886") + data = None + + +class Program_weight_tensor_parameter_367: + name = "parameter_367" + shape = [384] + dtype = "float32" + min_val = float("0.0063934") + max_val = float("0.0246998") + mean = float("0.0111198") + std = float("0.00257045") + data = None + + +class Program_weight_tensor_parameter_368: + name = "parameter_368" + shape = [384] + dtype = "float32" + min_val = float("-0.120871") + max_val = float("0.132915") + mean = float("-0.0260459") + std = float("0.0299784") + data = None + + +class Program_weight_tensor_parameter_369: + name = "parameter_369" + shape = [384, 384, 3, 3] + dtype = "float32" + min_val = float("-0.0142981") + max_val = float("0.024141") + mean = float("-4.79187e-05") + std = float("0.00131278") + data = None + + +class Program_weight_tensor_parameter_370: + name = "parameter_370" + shape = [384] + dtype = "float32" + min_val = float("-0.0676664") + max_val = float("0.0208998") + mean = float("-0.0318731") + std = float("0.0122307") + data = None + + +class Program_weight_tensor_parameter_371: + name = "parameter_371" + shape = [384] + dtype = "float32" + min_val = float("0.984256") + max_val = float("1.05794") + mean = float("1.01498") + std = float("0.0106765") + data = None + + +class Program_weight_tensor_parameter_372: + name = "parameter_372" + shape = [384] + dtype = "float32" + min_val = float("0.00788932") + max_val = float("0.0348657") + mean = float("0.0128533") + std = float("0.00302232") + data = None + + +class Program_weight_tensor_parameter_373: + name = "parameter_373" + shape = [384] + dtype = "float32" + min_val = float("-0.101719") + max_val = float("0.172283") + mean = float("-0.0367191") + std = float("0.0276733") + data = None + + +class Program_weight_tensor_parameter_374: + name = "parameter_374" + shape = [384, 1024, 1, 1] + dtype = "float32" + min_val = float("-0.0172054") + max_val = float("0.0376208") + mean = float("-0.000183184") + std = float("0.00259433") + data = None + + +class Program_weight_tensor_parameter_375: + name = "parameter_375" + shape = [384] + dtype = "float32" + min_val = float("-0.0230121") + max_val = float("0.0211028") + mean = float("5.7334e-05") + std = float("0.00782956") + data = None + + +class Program_weight_tensor_parameter_376: + name = "parameter_376" + shape = [384] + dtype = "float32" + min_val = float("0.99505") + max_val = float("1.08363") + mean = float("1.04199") + std = float("0.0135379") + data = None + + +class Program_weight_tensor_parameter_377: + name = "parameter_377" + shape = [384] + dtype = "float32" + min_val = float("0.0124186") + max_val = float("0.025482") + mean = float("0.0160088") + std = float("0.00200221") + data = None + + +class Program_weight_tensor_parameter_378: + name = "parameter_378" + shape = [384] + dtype = "float32" + min_val = float("-0.0886053") + max_val = float("0.033751") + mean = float("-0.0450013") + std = float("0.0179425") + data = None + + +class Program_weight_tensor_parameter_379: + name = "parameter_379" + shape = [384, 1024, 1, 1] + dtype = "float32" + min_val = float("-0.0428553") + max_val = float("0.0454061") + mean = float("-0.000216885") + std = float("0.00310941") + data = None + + +class Program_weight_tensor_parameter_380: + name = "parameter_380" + shape = [1024] + dtype = "float32" + min_val = float("-3.76748") + max_val = float("-0.735718") + mean = float("-2.19173") + std = float("0.429764") + data = None + + +class Program_weight_tensor_parameter_381: + name = "parameter_381" + shape = [1024] + dtype = "float32" + min_val = float("1.62261") + max_val = float("4.45166") + mean = float("3.08738") + std = float("0.25481") + data = None + + +class Program_weight_tensor_parameter_382: + name = "parameter_382" + shape = [1024] + dtype = "float32" + min_val = float("0.00255178") + max_val = float("0.0174347") + mean = float("0.00504102") + std = float("0.00123753") + data = None + + +class Program_weight_tensor_parameter_383: + name = "parameter_383" + shape = [1024] + dtype = "float32" + min_val = float("-0.0981028") + max_val = float("0.109197") + mean = float("-0.0429288") + std = float("0.0200304") + data = None + + +class Program_weight_tensor_parameter_384: + name = "parameter_384" + shape = [1024, 768, 1, 1] + dtype = "float32" + min_val = float("-0.0759075") + max_val = float("0.0960025") + mean = float("-0.000303492") + std = float("0.00327529") + data = None + + +class Program_weight_tensor_parameter_385: + name = "parameter_385" + shape = [768] + dtype = "float32" + min_val = float("-0.0164547") + max_val = float("0.00136048") + mean = float("-0.000808882") + std = float("0.00234169") + data = None + + +class Program_weight_tensor_parameter_386: + name = "parameter_386" + shape = [768, 768, 1, 1] + dtype = "float32" + min_val = float("-0.0859031") + max_val = float("0.147876") + mean = float("-0.000294646") + std = float("0.00174956") + data = None + + +class Program_weight_tensor_parameter_387: + name = "parameter_387" + shape = [384] + dtype = "float32" + min_val = float("-1.77797") + max_val = float("0.31184") + mean = float("-0.312311") + std = float("0.291454") + data = None + + +class Program_weight_tensor_parameter_388: + name = "parameter_388" + shape = [384] + dtype = "float32" + min_val = float("0.188879") + max_val = float("1.81779") + mean = float("0.610184") + std = float("0.262228") + data = None + + +class Program_weight_tensor_parameter_389: + name = "parameter_389" + shape = [384] + dtype = "float32" + min_val = float("4.15295e-05") + max_val = float("0.00124149") + mean = float("0.00018635") + std = float("0.000117554") + data = None + + +class Program_weight_tensor_parameter_390: + name = "parameter_390" + shape = [384] + dtype = "float32" + min_val = float("-0.0894214") + max_val = float("0.0525955") + mean = float("0.0172684") + std = float("0.0154183") + data = None + + +class Program_weight_tensor_parameter_391: + name = "parameter_391" + shape = [384, 384, 1, 1] + dtype = "float32" + min_val = float("-0.0239132") + max_val = float("0.0254034") + mean = float("-0.000294387") + std = float("0.00236383") + data = None + + +class Program_weight_tensor_parameter_392: + name = "parameter_392" + shape = [384] + dtype = "float32" + min_val = float("-1.77797") + max_val = float("0.31184") + mean = float("-0.312311") + std = float("0.291454") + data = None + + +class Program_weight_tensor_parameter_393: + name = "parameter_393" + shape = [384] + dtype = "float32" + min_val = float("0.332455") + max_val = float("2.60228") + mean = float("1.02626") + std = float("0.289998") + data = None + + +class Program_weight_tensor_parameter_394: + name = "parameter_394" + shape = [384] + dtype = "float32" + min_val = float("0.000319043") + max_val = float("0.00414738") + mean = float("0.00107632") + std = float("0.000507674") + data = None + + +class Program_weight_tensor_parameter_395: + name = "parameter_395" + shape = [384] + dtype = "float32" + min_val = float("-0.186705") + max_val = float("0.0905628") + mean = float("0.0170662") + std = float("0.021429") + data = None + + +class Program_weight_tensor_parameter_396: + name = "parameter_396" + shape = [384, 384, 3, 3] + dtype = "float32" + min_val = float("-0.0184804") + max_val = float("0.0310221") + mean = float("-3.75571e-05") + std = float("0.00148755") + data = None + + +class Program_weight_tensor_parameter_397: + name = "parameter_397" + shape = [384] + dtype = "float32" + min_val = float("-2.58674") + max_val = float("0.0349821") + mean = float("-1.57085") + std = float("0.417307") + data = None + + +class Program_weight_tensor_parameter_398: + name = "parameter_398" + shape = [384] + dtype = "float32" + min_val = float("0.525383") + max_val = float("1.6495") + mean = float("1.13854") + std = float("0.149789") + data = None + + +class Program_weight_tensor_parameter_399: + name = "parameter_399" + shape = [384] + dtype = "float32" + min_val = float("0.0256519") + max_val = float("0.110068") + mean = float("0.0524846") + std = float("0.0116704") + data = None + + +class Program_weight_tensor_parameter_400: + name = "parameter_400" + shape = [384] + dtype = "float32" + min_val = float("-0.778566") + max_val = float("0.344438") + mean = float("-0.221917") + std = float("0.103427") + data = None + + +class Program_weight_tensor_parameter_401: + name = "parameter_401" + shape = [384, 384, 3, 3] + dtype = "float32" + min_val = float("-0.0296622") + max_val = float("0.0537224") + mean = float("-0.000158945") + std = float("0.00197052") + data = None + + +class Program_weight_tensor_parameter_402: + name = "parameter_402" + shape = [384] + dtype = "float32" + min_val = float("-1.94349") + max_val = float("0.641156") + mean = float("-0.576479") + std = float("0.359073") + data = None + + +class Program_weight_tensor_parameter_403: + name = "parameter_403" + shape = [384] + dtype = "float32" + min_val = float("0.164837") + max_val = float("2.07047") + mean = float("0.56295") + std = float("0.227775") + data = None + + +class Program_weight_tensor_parameter_404: + name = "parameter_404" + shape = [384] + dtype = "float32" + min_val = float("6.5426e-05") + max_val = float("0.00116989") + mean = float("0.00025892") + std = float("0.000131517") + data = None + + +class Program_weight_tensor_parameter_405: + name = "parameter_405" + shape = [384] + dtype = "float32" + min_val = float("-0.034319") + max_val = float("0.060842") + mean = float("0.0169333") + std = float("0.0124885") + data = None + + +class Program_weight_tensor_parameter_406: + name = "parameter_406" + shape = [384, 384, 1, 1] + dtype = "float32" + min_val = float("-0.0211277") + max_val = float("0.0256464") + mean = float("-0.000316836") + std = float("0.00224938") + data = None + + +class Program_weight_tensor_parameter_407: + name = "parameter_407" + shape = [384] + dtype = "float32" + min_val = float("-1.94349") + max_val = float("0.641156") + mean = float("-0.576479") + std = float("0.359073") + data = None + + +class Program_weight_tensor_parameter_408: + name = "parameter_408" + shape = [384] + dtype = "float32" + min_val = float("0.582152") + max_val = float("2.16129") + mean = float("1.08524") + std = float("0.256663") + data = None + + +class Program_weight_tensor_parameter_409: + name = "parameter_409" + shape = [384] + dtype = "float32" + min_val = float("0.00078451") + max_val = float("0.00519898") + mean = float("0.00162415") + std = float("0.000518067") + data = None + + +class Program_weight_tensor_parameter_410: + name = "parameter_410" + shape = [384] + dtype = "float32" + min_val = float("-0.0628209") + max_val = float("0.0895942") + mean = float("0.0228312") + std = float("0.0198838") + data = None + + +class Program_weight_tensor_parameter_411: + name = "parameter_411" + shape = [384, 384, 3, 3] + dtype = "float32" + min_val = float("-0.0187271") + max_val = float("0.0312371") + mean = float("-6.33411e-05") + std = float("0.00157788") + data = None + + +class Program_weight_tensor_parameter_412: + name = "parameter_412" + shape = [384] + dtype = "float32" + min_val = float("-2.4015") + max_val = float("0.848053") + mean = float("-1.4071") + std = float("0.361673") + data = None + + +class Program_weight_tensor_parameter_413: + name = "parameter_413" + shape = [384] + dtype = "float32" + min_val = float("0.461972") + max_val = float("1.92206") + mean = float("1.16949") + std = float("0.148563") + data = None + + +class Program_weight_tensor_parameter_414: + name = "parameter_414" + shape = [384] + dtype = "float32" + min_val = float("0.0223589") + max_val = float("0.0650232") + mean = float("0.034113") + std = float("0.00657291") + data = None + + +class Program_weight_tensor_parameter_415: + name = "parameter_415" + shape = [384] + dtype = "float32" + min_val = float("-0.609242") + max_val = float("0.728225") + mean = float("-0.148877") + std = float("0.0847971") + data = None + + +class Program_weight_tensor_parameter_416: + name = "parameter_416" + shape = [384, 384, 3, 3] + dtype = "float32" + min_val = float("-0.0203116") + max_val = float("0.0405905") + mean = float("-0.000153735") + std = float("0.00198218") + data = None + + +class Program_weight_tensor_parameter_417: + name = "parameter_417" + shape = [384] + dtype = "float32" + min_val = float("-1.88015") + max_val = float("0.45251") + mean = float("-0.486805") + std = float("0.376833") + data = None + + +class Program_weight_tensor_parameter_418: + name = "parameter_418" + shape = [384] + dtype = "float32" + min_val = float("0.0772688") + max_val = float("2.12425") + mean = float("0.442764") + std = float("0.218251") + data = None + + +class Program_weight_tensor_parameter_419: + name = "parameter_419" + shape = [384] + dtype = "float32" + min_val = float("5.14732e-05") + max_val = float("0.00175808") + mean = float("0.000313916") + std = float("0.00017621") + data = None + + +class Program_weight_tensor_parameter_420: + name = "parameter_420" + shape = [384] + dtype = "float32" + min_val = float("-0.0350628") + max_val = float("0.0623597") + mean = float("0.0217881") + std = float("0.0146826") + data = None + + +class Program_weight_tensor_parameter_421: + name = "parameter_421" + shape = [384, 384, 1, 1] + dtype = "float32" + min_val = float("-0.0144766") + max_val = float("0.0264016") + mean = float("-0.000421792") + std = float("0.0019438") + data = None + + +class Program_weight_tensor_parameter_422: + name = "parameter_422" + shape = [384] + dtype = "float32" + min_val = float("-1.88015") + max_val = float("0.45251") + mean = float("-0.486805") + std = float("0.376833") + data = None + + +class Program_weight_tensor_parameter_423: + name = "parameter_423" + shape = [384] + dtype = "float32" + min_val = float("0.518812") + max_val = float("2.23061") + mean = float("1.05416") + std = float("0.261303") + data = None + + +class Program_weight_tensor_parameter_424: + name = "parameter_424" + shape = [384] + dtype = "float32" + min_val = float("0.000907154") + max_val = float("0.00473234") + mean = float("0.00216236") + std = float("0.000621055") + data = None + + +class Program_weight_tensor_parameter_425: + name = "parameter_425" + shape = [384] + dtype = "float32" + min_val = float("-0.156848") + max_val = float("0.0998884") + mean = float("0.02917") + std = float("0.023776") + data = None + + +class Program_weight_tensor_parameter_426: + name = "parameter_426" + shape = [384, 384, 3, 3] + dtype = "float32" + min_val = float("-0.0200188") + max_val = float("0.0377429") + mean = float("-6.77733e-05") + std = float("0.00166655") + data = None + + +class Program_weight_tensor_parameter_427: + name = "parameter_427" + shape = [384] + dtype = "float32" + min_val = float("-2.1601") + max_val = float("0.422034") + mean = float("-1.36858") + std = float("0.278385") + data = None + + +class Program_weight_tensor_parameter_428: + name = "parameter_428" + shape = [384] + dtype = "float32" + min_val = float("0.71356") + max_val = float("1.64114") + mean = float("1.14618") + std = float("0.102016") + data = None + + +class Program_weight_tensor_parameter_429: + name = "parameter_429" + shape = [384] + dtype = "float32" + min_val = float("0.015836") + max_val = float("0.0669088") + mean = float("0.0268458") + std = float("0.00709364") + data = None + + +class Program_weight_tensor_parameter_430: + name = "parameter_430" + shape = [384] + dtype = "float32" + min_val = float("-0.604472") + max_val = float("0.189662") + mean = float("-0.105803") + std = float("0.069063") + data = None + + +class Program_weight_tensor_parameter_431: + name = "parameter_431" + shape = [384, 384, 3, 3] + dtype = "float32" + min_val = float("-0.0259247") + max_val = float("0.0434116") + mean = float("-0.000126202") + std = float("0.00187571") + data = None + + +class Program_weight_tensor_parameter_432: + name = "parameter_432" + shape = [384] + dtype = "float32" + min_val = float("-2.9286") + max_val = float("1.66241") + mean = float("-0.761307") + std = float("0.644444") + data = None + + +class Program_weight_tensor_parameter_433: + name = "parameter_433" + shape = [384] + dtype = "float32" + min_val = float("0.953521") + max_val = float("2.92614") + mean = float("1.86813") + std = float("0.276938") + data = None + + +class Program_weight_tensor_parameter_434: + name = "parameter_434" + shape = [384] + dtype = "float32" + min_val = float("0.00145196") + max_val = float("0.00602303") + mean = float("0.00286041") + std = float("0.000620991") + data = None + + +class Program_weight_tensor_parameter_435: + name = "parameter_435" + shape = [384] + dtype = "float32" + min_val = float("-0.206077") + max_val = float("0.108202") + mean = float("0.0475519") + std = float("0.0246364") + data = None + + +class Program_weight_tensor_parameter_436: + name = "parameter_436" + shape = [384, 768, 1, 1] + dtype = "float32" + min_val = float("-0.0455732") + max_val = float("0.0357705") + mean = float("-0.000554005") + std = float("0.00431649") + data = None + + +class Program_weight_tensor_parameter_437: + name = "parameter_437" + shape = [384] + dtype = "float32" + min_val = float("-2.25212") + max_val = float("0.68194") + mean = float("-0.778669") + std = float("0.473843") + data = None + + +class Program_weight_tensor_parameter_438: + name = "parameter_438" + shape = [384] + dtype = "float32" + min_val = float("0.966069") + max_val = float("2.89985") + mean = float("2.1016") + std = float("0.306322") + data = None + + +class Program_weight_tensor_parameter_439: + name = "parameter_439" + shape = [384] + dtype = "float32" + min_val = float("0.000400888") + max_val = float("0.00504261") + mean = float("0.000981843") + std = float("0.000289437") + data = None + + +class Program_weight_tensor_parameter_440: + name = "parameter_440" + shape = [384] + dtype = "float32" + min_val = float("-0.0495509") + max_val = float("0.0680366") + mean = float("0.0216031") + std = float("0.0123324") + data = None + + +class Program_weight_tensor_parameter_441: + name = "parameter_441" + shape = [384, 768, 1, 1] + dtype = "float32" + min_val = float("-0.145612") + max_val = float("0.0575213") + mean = float("-0.00024107") + std = float("0.00300636") + data = None + + +class Program_weight_tensor_parameter_442: + name = "parameter_442" + shape = [768] + dtype = "float32" + min_val = float("-2.40748") + max_val = float("0.642802") + mean = float("-0.909529") + std = float("0.33999") + data = None + + +class Program_weight_tensor_parameter_443: + name = "parameter_443" + shape = [768] + dtype = "float32" + min_val = float("0.529868") + max_val = float("1.91302") + mean = float("0.921944") + std = float("0.149362") + data = None + + +class Program_weight_tensor_parameter_444: + name = "parameter_444" + shape = [768] + dtype = "float32" + min_val = float("0.00474618") + max_val = float("0.0390067") + mean = float("0.00819721") + std = float("0.00242367") + data = None + + +class Program_weight_tensor_parameter_445: + name = "parameter_445" + shape = [768] + dtype = "float32" + min_val = float("-0.182419") + max_val = float("0.16049") + mean = float("0.0278199") + std = float("0.0398523") + data = None + + +class Program_weight_tensor_parameter_446: + name = "parameter_446" + shape = [768, 512, 3, 3] + dtype = "float32" + min_val = float("-0.059099") + max_val = float("0.0444559") + mean = float("-7.08532e-05") + std = float("0.00199255") + data = None + + +class Program_weight_tensor_parameter_447: + name = "parameter_447" + shape = [512] + dtype = "float32" + min_val = float("-3.3982") + max_val = float("1.66922") + mean = float("-1.1631") + std = float("0.514405") + data = None + + +class Program_weight_tensor_parameter_448: + name = "parameter_448" + shape = [512] + dtype = "float32" + min_val = float("0.523518") + max_val = float("1.6758") + mean = float("1.11208") + std = float("0.148137") + data = None + + +class Program_weight_tensor_parameter_449: + name = "parameter_449" + shape = [512] + dtype = "float32" + min_val = float("0.000933487") + max_val = float("0.00746954") + mean = float("0.00344664") + std = float("0.000801527") + data = None + + +class Program_weight_tensor_parameter_450: + name = "parameter_450" + shape = [512] + dtype = "float32" + min_val = float("-0.110012") + max_val = float("0.0714297") + mean = float("-0.0365401") + std = float("0.0293225") + data = None + + +class Program_weight_tensor_parameter_451: + name = "parameter_451" + shape = [512, 384, 1, 1] + dtype = "float32" + min_val = float("-0.330875") + max_val = float("0.172913") + mean = float("-0.000459854") + std = float("0.00662096") + data = None + + +class Program_weight_tensor_parameter_452: + name = "parameter_452" + shape = [384] + dtype = "float32" + min_val = float("-0.0108223") + max_val = float("0.000907801") + mean = float("-0.0031106") + std = float("0.00227253") + data = None + + +class Program_weight_tensor_parameter_453: + name = "parameter_453" + shape = [384, 384, 1, 1] + dtype = "float32" + min_val = float("-0.223828") + max_val = float("0.20505") + mean = float("-0.00218766") + std = float("0.00489559") + data = None + + +class Program_weight_tensor_parameter_454: + name = "parameter_454" + shape = [192] + dtype = "float32" + min_val = float("-1.97815") + max_val = float("0.402075") + mean = float("-0.350999") + std = float("0.333852") + data = None + + +class Program_weight_tensor_parameter_455: + name = "parameter_455" + shape = [192] + dtype = "float32" + min_val = float("0.0527639") + max_val = float("2.1601") + mean = float("0.580979") + std = float("0.418931") + data = None + + +class Program_weight_tensor_parameter_456: + name = "parameter_456" + shape = [192] + dtype = "float32" + min_val = float("5.97182e-05") + max_val = float("0.00113282") + mean = float("0.000337135") + std = float("0.00017036") + data = None + + +class Program_weight_tensor_parameter_457: + name = "parameter_457" + shape = [192] + dtype = "float32" + min_val = float("-0.0272331") + max_val = float("0.0426344") + mean = float("0.00406202") + std = float("0.0112711") + data = None + + +class Program_weight_tensor_parameter_458: + name = "parameter_458" + shape = [192, 192, 1, 1] + dtype = "float32" + min_val = float("-0.0211821") + max_val = float("0.0576226") + mean = float("-0.000295795") + std = float("0.00359937") + data = None + + +class Program_weight_tensor_parameter_459: + name = "parameter_459" + shape = [192] + dtype = "float32" + min_val = float("-1.97815") + max_val = float("0.402075") + mean = float("-0.350999") + std = float("0.333852") + data = None + + +class Program_weight_tensor_parameter_460: + name = "parameter_460" + shape = [192] + dtype = "float32" + min_val = float("0.372396") + max_val = float("2.6943") + mean = float("1.20241") + std = float("0.492555") + data = None + + +class Program_weight_tensor_parameter_461: + name = "parameter_461" + shape = [192] + dtype = "float32" + min_val = float("0.000778601") + max_val = float("0.00986122") + mean = float("0.0028243") + std = float("0.0011229") + data = None + + +class Program_weight_tensor_parameter_462: + name = "parameter_462" + shape = [192] + dtype = "float32" + min_val = float("-0.0718662") + max_val = float("0.098836") + mean = float("0.0132633") + std = float("0.0284276") + data = None + + +class Program_weight_tensor_parameter_463: + name = "parameter_463" + shape = [192, 192, 3, 3] + dtype = "float32" + min_val = float("-0.0267941") + max_val = float("0.0380043") + mean = float("-0.000114945") + std = float("0.00270079") + data = None + + +class Program_weight_tensor_parameter_464: + name = "parameter_464" + shape = [192] + dtype = "float32" + min_val = float("-2.89627") + max_val = float("-0.181006") + mean = float("-1.31578") + std = float("0.40203") + data = None + + +class Program_weight_tensor_parameter_465: + name = "parameter_465" + shape = [192] + dtype = "float32" + min_val = float("0.695389") + max_val = float("2.10154") + mean = float("1.18272") + std = float("0.170823") + data = None + + +class Program_weight_tensor_parameter_466: + name = "parameter_466" + shape = [192] + dtype = "float32" + min_val = float("0.0380002") + max_val = float("0.186937") + mean = float("0.0783337") + std = float("0.0246352") + data = None + + +class Program_weight_tensor_parameter_467: + name = "parameter_467" + shape = [192] + dtype = "float32" + min_val = float("-1.80338") + max_val = float("1.39476") + mean = float("-0.158506") + std = float("0.279166") + data = None + + +class Program_weight_tensor_parameter_468: + name = "parameter_468" + shape = [192, 192, 3, 3] + dtype = "float32" + min_val = float("-0.0376485") + max_val = float("0.0442722") + mean = float("-0.000156605") + std = float("0.00324367") + data = None + + +class Program_weight_tensor_parameter_469: + name = "parameter_469" + shape = [192] + dtype = "float32" + min_val = float("-1.94409") + max_val = float("0.506739") + mean = float("-0.280964") + std = float("0.32113") + data = None + + +class Program_weight_tensor_parameter_470: + name = "parameter_470" + shape = [192] + dtype = "float32" + min_val = float("0.0470323") + max_val = float("1.77439") + mean = float("0.444469") + std = float("0.306448") + data = None + + +class Program_weight_tensor_parameter_471: + name = "parameter_471" + shape = [192] + dtype = "float32" + min_val = float("5.53183e-05") + max_val = float("0.00145499") + mean = float("0.000343414") + std = float("0.000218973") + data = None + + +class Program_weight_tensor_parameter_472: + name = "parameter_472" + shape = [192] + dtype = "float32" + min_val = float("-0.0324414") + max_val = float("0.0424277") + mean = float("0.0078867") + std = float("0.0106231") + data = None + + +class Program_weight_tensor_parameter_473: + name = "parameter_473" + shape = [192, 192, 1, 1] + dtype = "float32" + min_val = float("-0.0232793") + max_val = float("0.0340463") + mean = float("-0.000372018") + std = float("0.00343686") + data = None + + +class Program_weight_tensor_parameter_474: + name = "parameter_474" + shape = [192] + dtype = "float32" + min_val = float("-1.94409") + max_val = float("0.506739") + mean = float("-0.280964") + std = float("0.32113") + data = None + + +class Program_weight_tensor_parameter_475: + name = "parameter_475" + shape = [192] + dtype = "float32" + min_val = float("0.486392") + max_val = float("2.27602") + mean = float("1.14015") + std = float("0.376136") + data = None + + +class Program_weight_tensor_parameter_476: + name = "parameter_476" + shape = [192] + dtype = "float32" + min_val = float("0.001572") + max_val = float("0.00624474") + mean = float("0.00321651") + std = float("0.000843905") + data = None + + +class Program_weight_tensor_parameter_477: + name = "parameter_477" + shape = [192] + dtype = "float32" + min_val = float("-0.0611836") + max_val = float("0.0803488") + mean = float("0.0262682") + std = float("0.0244502") + data = None + + +class Program_weight_tensor_parameter_478: + name = "parameter_478" + shape = [192, 192, 3, 3] + dtype = "float32" + min_val = float("-0.021333") + max_val = float("0.031939") + mean = float("-0.000148173") + std = float("0.00289116") + data = None + + +class Program_weight_tensor_parameter_479: + name = "parameter_479" + shape = [192] + dtype = "float32" + min_val = float("-2.51265") + max_val = float("-0.125796") + mean = float("-1.28955") + std = float("0.445077") + data = None + + +class Program_weight_tensor_parameter_480: + name = "parameter_480" + shape = [192] + dtype = "float32" + min_val = float("0.655675") + max_val = float("1.67878") + mean = float("1.2032") + std = float("0.16689") + data = None + + +class Program_weight_tensor_parameter_481: + name = "parameter_481" + shape = [192] + dtype = "float32" + min_val = float("0.0291112") + max_val = float("0.117157") + mean = float("0.0521028") + std = float("0.0142132") + data = None + + +class Program_weight_tensor_parameter_482: + name = "parameter_482" + shape = [192] + dtype = "float32" + min_val = float("-1.75423") + max_val = float("0.269628") + mean = float("-0.0616626") + std = float("0.184866") + data = None + + +class Program_weight_tensor_parameter_483: + name = "parameter_483" + shape = [192, 192, 3, 3] + dtype = "float32" + min_val = float("-0.0358087") + max_val = float("0.0447245") + mean = float("-0.000190853") + std = float("0.00335683") + data = None + + +class Program_weight_tensor_parameter_484: + name = "parameter_484" + shape = [192] + dtype = "float32" + min_val = float("-1.76065") + max_val = float("0.463281") + mean = float("-0.263631") + std = float("0.335459") + data = None + + +class Program_weight_tensor_parameter_485: + name = "parameter_485" + shape = [192] + dtype = "float32" + min_val = float("0.00363281") + max_val = float("1.68356") + mean = float("0.352056") + std = float("0.252441") + data = None + + +class Program_weight_tensor_parameter_486: + name = "parameter_486" + shape = [192] + dtype = "float32" + min_val = float("9.9945e-07") + max_val = float("0.00178321") + mean = float("0.000315486") + std = float("0.000233695") + data = None + + +class Program_weight_tensor_parameter_487: + name = "parameter_487" + shape = [192] + dtype = "float32" + min_val = float("-0.0321653") + max_val = float("0.0434796") + mean = float("0.0100317") + std = float("0.010429") + data = None + + +class Program_weight_tensor_parameter_488: + name = "parameter_488" + shape = [192, 192, 1, 1] + dtype = "float32" + min_val = float("-0.0333151") + max_val = float("0.028244") + mean = float("-0.000424522") + std = float("0.00328881") + data = None + + +class Program_weight_tensor_parameter_489: + name = "parameter_489" + shape = [192] + dtype = "float32" + min_val = float("-1.76065") + max_val = float("0.463281") + mean = float("-0.263631") + std = float("0.335459") + data = None + + +class Program_weight_tensor_parameter_490: + name = "parameter_490" + shape = [192] + dtype = "float32" + min_val = float("0.407314") + max_val = float("1.98401") + mean = float("1.06799") + std = float("0.335081") + data = None + + +class Program_weight_tensor_parameter_491: + name = "parameter_491" + shape = [192] + dtype = "float32" + min_val = float("0.00143898") + max_val = float("0.00771346") + mean = float("0.00340617") + std = float("0.000937101") + data = None + + +class Program_weight_tensor_parameter_492: + name = "parameter_492" + shape = [192] + dtype = "float32" + min_val = float("-0.0318019") + max_val = float("0.0790559") + mean = float("0.0270601") + std = float("0.019367") + data = None + + +class Program_weight_tensor_parameter_493: + name = "parameter_493" + shape = [192, 192, 3, 3] + dtype = "float32" + min_val = float("-0.0285572") + max_val = float("0.0385687") + mean = float("-0.000143999") + std = float("0.00298657") + data = None + + +class Program_weight_tensor_parameter_494: + name = "parameter_494" + shape = [192] + dtype = "float32" + min_val = float("-2.50189") + max_val = float("0.137771") + mean = float("-1.24368") + std = float("0.425346") + data = None + + +class Program_weight_tensor_parameter_495: + name = "parameter_495" + shape = [192] + dtype = "float32" + min_val = float("0.657082") + max_val = float("1.81751") + mean = float("1.17096") + std = float("0.166144") + data = None + + +class Program_weight_tensor_parameter_496: + name = "parameter_496" + shape = [192] + dtype = "float32" + min_val = float("0.0188017") + max_val = float("0.0700368") + mean = float("0.0347428") + std = float("0.00799167") + data = None + + +class Program_weight_tensor_parameter_497: + name = "parameter_497" + shape = [192] + dtype = "float32" + min_val = float("-1.21798") + max_val = float("0.267509") + mean = float("-0.0357914") + std = float("0.12809") + data = None + + +class Program_weight_tensor_parameter_498: + name = "parameter_498" + shape = [192, 192, 3, 3] + dtype = "float32" + min_val = float("-0.03732") + max_val = float("0.0499939") + mean = float("-0.000197441") + std = float("0.00341168") + data = None + + +class Program_weight_tensor_parameter_499: + name = "parameter_499" + shape = [192] + dtype = "float32" + min_val = float("-2.08361") + max_val = float("0.526166") + mean = float("-0.273402") + std = float("0.375102") + data = None + + +class Program_weight_tensor_parameter_500: + name = "parameter_500" + shape = [192] + dtype = "float32" + min_val = float("0.000539323") + max_val = float("0.733627") + mean = float("0.211797") + std = float("0.136369") + data = None + + +class Program_weight_tensor_parameter_501: + name = "parameter_501" + shape = [192] + dtype = "float32" + min_val = float("5.73491e-08") + max_val = float("0.000709717") + mean = float("0.000185769") + std = float("0.000106476") + data = None + + +class Program_weight_tensor_parameter_502: + name = "parameter_502" + shape = [192] + dtype = "float32" + min_val = float("-0.024762") + max_val = float("0.0297538") + mean = float("0.00669963") + std = float("0.00867654") + data = None + + +class Program_weight_tensor_parameter_503: + name = "parameter_503" + shape = [192, 192, 1, 1] + dtype = "float32" + min_val = float("-0.0223003") + max_val = float("0.0285012") + mean = float("-0.00027288") + std = float("0.00290769") + data = None + + +class Program_weight_tensor_parameter_504: + name = "parameter_504" + shape = [192] + dtype = "float32" + min_val = float("-2.08361") + max_val = float("0.526166") + mean = float("-0.273402") + std = float("0.375102") + data = None + + +class Program_weight_tensor_parameter_505: + name = "parameter_505" + shape = [192] + dtype = "float32" + min_val = float("0.395992") + max_val = float("1.96799") + mean = float("0.961194") + std = float("0.304605") + data = None + + +class Program_weight_tensor_parameter_506: + name = "parameter_506" + shape = [192] + dtype = "float32" + min_val = float("0.00143872") + max_val = float("0.00758554") + mean = float("0.00340905") + std = float("0.000995444") + data = None + + +class Program_weight_tensor_parameter_507: + name = "parameter_507" + shape = [192] + dtype = "float32" + min_val = float("-0.0308625") + max_val = float("0.0974639") + mean = float("0.0355014") + std = float("0.0243965") + data = None + + +class Program_weight_tensor_parameter_508: + name = "parameter_508" + shape = [192, 192, 3, 3] + dtype = "float32" + min_val = float("-0.0300099") + max_val = float("0.0331466") + mean = float("-0.000174018") + std = float("0.00307819") + data = None + + +class Program_weight_tensor_parameter_509: + name = "parameter_509" + shape = [192] + dtype = "float32" + min_val = float("-2.74569") + max_val = float("-0.0811876") + mean = float("-1.23738") + std = float("0.435119") + data = None + + +class Program_weight_tensor_parameter_510: + name = "parameter_510" + shape = [192] + dtype = "float32" + min_val = float("0.763204") + max_val = float("1.6262") + mean = float("1.15469") + std = float("0.143358") + data = None + + +class Program_weight_tensor_parameter_511: + name = "parameter_511" + shape = [192] + dtype = "float32" + min_val = float("0.0159479") + max_val = float("0.0420721") + mean = float("0.0253839") + std = float("0.00548298") + data = None + + +class Program_weight_tensor_parameter_512: + name = "parameter_512" + shape = [192] + dtype = "float32" + min_val = float("-0.985135") + max_val = float("0.209935") + mean = float("-0.0465764") + std = float("0.111494") + data = None + + +class Program_weight_tensor_parameter_513: + name = "parameter_513" + shape = [192, 192, 3, 3] + dtype = "float32" + min_val = float("-0.0517747") + max_val = float("0.0533624") + mean = float("-0.000217609") + std = float("0.00340238") + data = None + + +class Program_weight_tensor_parameter_514: + name = "parameter_514" + shape = [192] + dtype = "float32" + min_val = float("-1.21509") + max_val = float("0.443206") + mean = float("-0.23316") + std = float("0.338913") + data = None + + +class Program_weight_tensor_parameter_515: + name = "parameter_515" + shape = [192] + dtype = "float32" + min_val = float("-0.000141425") + max_val = float("0.676871") + mean = float("0.191956") + std = float("0.120826") + data = None + + +class Program_weight_tensor_parameter_516: + name = "parameter_516" + shape = [192] + dtype = "float32" + min_val = float("1.88795e-10") + max_val = float("0.00060921") + mean = float("0.00019844") + std = float("0.000122163") + data = None + + +class Program_weight_tensor_parameter_517: + name = "parameter_517" + shape = [192] + dtype = "float32" + min_val = float("-0.0415572") + max_val = float("0.0356041") + mean = float("0.00645752") + std = float("0.0102765") + data = None + + +class Program_weight_tensor_parameter_518: + name = "parameter_518" + shape = [192, 192, 1, 1] + dtype = "float32" + min_val = float("-0.0362203") + max_val = float("0.0384156") + mean = float("-0.000250814") + std = float("0.00300878") + data = None + + +class Program_weight_tensor_parameter_519: + name = "parameter_519" + shape = [192] + dtype = "float32" + min_val = float("-1.21509") + max_val = float("0.443206") + mean = float("-0.23316") + std = float("0.338913") + data = None + + +class Program_weight_tensor_parameter_520: + name = "parameter_520" + shape = [192] + dtype = "float32" + min_val = float("0.38485") + max_val = float("1.57063") + mean = float("0.854584") + std = float("0.261022") + data = None + + +class Program_weight_tensor_parameter_521: + name = "parameter_521" + shape = [192] + dtype = "float32" + min_val = float("0.00126636") + max_val = float("0.00625458") + mean = float("0.00353727") + std = float("0.000960266") + data = None + + +class Program_weight_tensor_parameter_522: + name = "parameter_522" + shape = [192] + dtype = "float32" + min_val = float("-0.0709334") + max_val = float("0.0992781") + mean = float("0.0299011") + std = float("0.022397") + data = None + + +class Program_weight_tensor_parameter_523: + name = "parameter_523" + shape = [192, 192, 3, 3] + dtype = "float32" + min_val = float("-0.0303355") + max_val = float("0.033651") + mean = float("-0.000135556") + std = float("0.00307625") + data = None + + +class Program_weight_tensor_parameter_524: + name = "parameter_524" + shape = [192] + dtype = "float32" + min_val = float("-2.49114") + max_val = float("-0.133527") + mean = float("-1.25068") + std = float("0.419307") + data = None + + +class Program_weight_tensor_parameter_525: + name = "parameter_525" + shape = [192] + dtype = "float32" + min_val = float("0.68941") + max_val = float("1.52402") + mean = float("1.1287") + std = float("0.135387") + data = None + + +class Program_weight_tensor_parameter_526: + name = "parameter_526" + shape = [192] + dtype = "float32" + min_val = float("0.0102917") + max_val = float("0.0340277") + mean = float("0.0181781") + std = float("0.00444353") + data = None + + +class Program_weight_tensor_parameter_527: + name = "parameter_527" + shape = [192] + dtype = "float32" + min_val = float("-0.598499") + max_val = float("0.205069") + mean = float("-0.0404677") + std = float("0.0908447") + data = None + + +class Program_weight_tensor_parameter_528: + name = "parameter_528" + shape = [192, 192, 3, 3] + dtype = "float32" + min_val = float("-0.0539394") + max_val = float("0.0542177") + mean = float("-0.000198971") + std = float("0.00340909") + data = None + + +class Program_weight_tensor_parameter_529: + name = "parameter_529" + shape = [192] + dtype = "float32" + min_val = float("-1.22047") + max_val = float("0.496178") + mean = float("-0.168848") + std = float("0.293103") + data = None + + +class Program_weight_tensor_parameter_530: + name = "parameter_530" + shape = [192] + dtype = "float32" + min_val = float("0.00876153") + max_val = float("1.5288") + mean = float("0.237756") + std = float("0.211674") + data = None + + +class Program_weight_tensor_parameter_531: + name = "parameter_531" + shape = [192] + dtype = "float32" + min_val = float("1.74591e-05") + max_val = float("0.00624593") + mean = float("0.00045268") + std = float("0.000588624") + data = None + + +class Program_weight_tensor_parameter_532: + name = "parameter_532" + shape = [192] + dtype = "float32" + min_val = float("-0.065087") + max_val = float("0.0770018") + mean = float("0.00835075") + std = float("0.0148227") + data = None + + +class Program_weight_tensor_parameter_533: + name = "parameter_533" + shape = [192, 192, 1, 1] + dtype = "float32" + min_val = float("-0.0660848") + max_val = float("0.0277484") + mean = float("-0.000371825") + std = float("0.00364693") + data = None + + +class Program_weight_tensor_parameter_534: + name = "parameter_534" + shape = [192] + dtype = "float32" + min_val = float("-1.22047") + max_val = float("0.496178") + mean = float("-0.168848") + std = float("0.293103") + data = None + + +class Program_weight_tensor_parameter_535: + name = "parameter_535" + shape = [192] + dtype = "float32" + min_val = float("0.355908") + max_val = float("1.45471") + mean = float("0.758883") + std = float("0.217186") + data = None + + +class Program_weight_tensor_parameter_536: + name = "parameter_536" + shape = [192] + dtype = "float32" + min_val = float("0.00268518") + max_val = float("0.0127648") + mean = float("0.0059621") + std = float("0.00184013") + data = None + + +class Program_weight_tensor_parameter_537: + name = "parameter_537" + shape = [192] + dtype = "float32" + min_val = float("-0.0754021") + max_val = float("0.0955923") + mean = float("0.0355705") + std = float("0.0315093") + data = None + + +class Program_weight_tensor_parameter_538: + name = "parameter_538" + shape = [192, 192, 3, 3] + dtype = "float32" + min_val = float("-0.0700958") + max_val = float("0.056488") + mean = float("-0.000170134") + std = float("0.00301629") + data = None + + +class Program_weight_tensor_parameter_539: + name = "parameter_539" + shape = [192] + dtype = "float32" + min_val = float("-1.88075") + max_val = float("-0.211975") + mean = float("-1.14723") + std = float("0.326349") + data = None + + +class Program_weight_tensor_parameter_540: + name = "parameter_540" + shape = [192] + dtype = "float32" + min_val = float("0.79192") + max_val = float("1.60588") + mean = float("1.12491") + std = float("0.130114") + data = None + + +class Program_weight_tensor_parameter_541: + name = "parameter_541" + shape = [192] + dtype = "float32" + min_val = float("0.00790317") + max_val = float("0.0430673") + mean = float("0.0158827") + std = float("0.00476539") + data = None + + +class Program_weight_tensor_parameter_542: + name = "parameter_542" + shape = [192] + dtype = "float32" + min_val = float("-0.455479") + max_val = float("0.211552") + mean = float("-0.0369992") + std = float("0.0813136") + data = None + + +class Program_weight_tensor_parameter_543: + name = "parameter_543" + shape = [192, 192, 3, 3] + dtype = "float32" + min_val = float("-0.0610284") + max_val = float("0.0703754") + mean = float("-0.000151637") + std = float("0.00331491") + data = None + + +class Program_weight_tensor_parameter_544: + name = "parameter_544" + shape = [192] + dtype = "float32" + min_val = float("-2.86758") + max_val = float("1.58079") + mean = float("-0.0285524") + std = float("0.747555") + data = None + + +class Program_weight_tensor_parameter_545: + name = "parameter_545" + shape = [192] + dtype = "float32" + min_val = float("0.4869") + max_val = float("2.086") + mean = float("0.902744") + std = float("0.233413") + data = None + + +class Program_weight_tensor_parameter_546: + name = "parameter_546" + shape = [192] + dtype = "float32" + min_val = float("0.00633974") + max_val = float("0.0416042") + mean = float("0.0149685") + std = float("0.00593254") + data = None + + +class Program_weight_tensor_parameter_547: + name = "parameter_547" + shape = [192] + dtype = "float32" + min_val = float("-0.196625") + max_val = float("0.272012") + mean = float("-0.0347194") + std = float("0.049849") + data = None + + +class Program_weight_tensor_parameter_548: + name = "parameter_548" + shape = [192, 384, 1, 1] + dtype = "float32" + min_val = float("-0.090327") + max_val = float("0.0781438") + mean = float("-0.00049126") + std = float("0.00703695") + data = None + + +class Program_weight_tensor_parameter_549: + name = "parameter_549" + shape = [192] + dtype = "float32" + min_val = float("-2.97514") + max_val = float("1.66537") + mean = float("0.0963337") + std = float("0.664688") + data = None + + +class Program_weight_tensor_parameter_550: + name = "parameter_550" + shape = [192] + dtype = "float32" + min_val = float("0.833701") + max_val = float("5.56786") + mean = float("1.91679") + std = float("0.933226") + data = None + + +class Program_weight_tensor_parameter_551: + name = "parameter_551" + shape = [192] + dtype = "float32" + min_val = float("0.00262273") + max_val = float("0.0469049") + mean = float("0.00988616") + std = float("0.00423222") + data = None + + +class Program_weight_tensor_parameter_552: + name = "parameter_552" + shape = [192] + dtype = "float32" + min_val = float("-0.122926") + max_val = float("0.108141") + mean = float("-0.0158569") + std = float("0.0435084") + data = None + + +class Program_weight_tensor_parameter_553: + name = "parameter_553" + shape = [192, 384, 1, 1] + dtype = "float32" + min_val = float("-0.0689058") + max_val = float("0.112599") + mean = float("-0.000328781") + std = float("0.0065748") + data = None + + +class Program_weight_tensor_parameter_554: + name = "parameter_554" + shape = [384] + dtype = "float32" + min_val = float("-2.92973") + max_val = float("1.33102") + mean = float("-0.301138") + std = float("0.56416") + data = None + + +class Program_weight_tensor_parameter_555: + name = "parameter_555" + shape = [384] + dtype = "float32" + min_val = float("0.639938") + max_val = float("2.47799") + mean = float("1.16309") + std = float("0.258012") + data = None + + +class Program_weight_tensor_parameter_556: + name = "parameter_556" + shape = [384] + dtype = "float32" + min_val = float("0.00577942") + max_val = float("0.0604073") + mean = float("0.0143328") + std = float("0.00717715") + data = None + + +class Program_weight_tensor_parameter_557: + name = "parameter_557" + shape = [384] + dtype = "float32" + min_val = float("-0.172991") + max_val = float("0.184791") + mean = float("0.019209") + std = float("0.0533994") + data = None + + +class Program_weight_tensor_parameter_558: + name = "parameter_558" + shape = [384, 256, 3, 3] + dtype = "float32" + min_val = float("-0.0651152") + max_val = float("0.063191") + mean = float("-7.21353e-05") + std = float("0.00350596") + data = None + + +class Program_weight_tensor_parameter_559: + name = "parameter_559" + shape = [256] + dtype = "float32" + min_val = float("-2.04896") + max_val = float("1.29277") + mean = float("-0.925662") + std = float("0.541886") + data = None + + +class Program_weight_tensor_parameter_560: + name = "parameter_560" + shape = [256] + dtype = "float32" + min_val = float("0.52945") + max_val = float("1.69731") + mean = float("1.05619") + std = float("0.17661") + data = None + + +class Program_weight_tensor_parameter_561: + name = "parameter_561" + shape = [256] + dtype = "float32" + min_val = float("0.000674469") + max_val = float("0.0100212") + mean = float("0.00272823") + std = float("0.00122462") + data = None + + +class Program_weight_tensor_parameter_562: + name = "parameter_562" + shape = [256] + dtype = "float32" + min_val = float("-0.164943") + max_val = float("0.107017") + mean = float("-0.0346008") + std = float("0.0479287") + data = None + + +class Program_weight_tensor_parameter_563: + name = "parameter_563" + shape = [256, 192, 1, 1] + dtype = "float32" + min_val = float("-0.166132") + max_val = float("0.120738") + mean = float("-0.000624717") + std = float("0.0111393") + data = None + + +class Program_weight_tensor_parameter_564: + name = "parameter_564" + shape = [192] + dtype = "float32" + min_val = float("-0.0132669") + max_val = float("0.00100077") + mean = float("-0.00489103") + std = float("0.0031796") + data = None + + +class Program_weight_tensor_parameter_565: + name = "parameter_565" + shape = [192, 192, 1, 1] + dtype = "float32" + min_val = float("-0.309648") + max_val = float("0.197925") + mean = float("-0.00390845") + std = float("0.00946233") + data = None + + +class Program_weight_tensor_parameter_566: + name = "parameter_566" + shape = [96] + dtype = "float32" + min_val = float("-1.92138") + max_val = float("0.528173") + mean = float("-0.211283") + std = float("0.434644") + data = None + + +class Program_weight_tensor_parameter_567: + name = "parameter_567" + shape = [96] + dtype = "float32" + min_val = float("0.140912") + max_val = float("3.21894") + mean = float("0.636059") + std = float("0.66684") + data = None + + +class Program_weight_tensor_parameter_568: + name = "parameter_568" + shape = [96] + dtype = "float32" + min_val = float("7.25601e-05") + max_val = float("0.00138037") + mean = float("0.00035803") + std = float("0.000250121") + data = None + + +class Program_weight_tensor_parameter_569: + name = "parameter_569" + shape = [96] + dtype = "float32" + min_val = float("-0.0383932") + max_val = float("0.0476881") + mean = float("0.00524749") + std = float("0.0168741") + data = None + + +class Program_weight_tensor_parameter_570: + name = "parameter_570" + shape = [96, 96, 1, 1] + dtype = "float32" + min_val = float("-0.042012") + max_val = float("0.0802529") + mean = float("-0.000564225") + std = float("0.0064658") + data = None + + +class Program_weight_tensor_parameter_571: + name = "parameter_571" + shape = [96] + dtype = "float32" + min_val = float("-1.92138") + max_val = float("0.528173") + mean = float("-0.211283") + std = float("0.434644") + data = None + + +class Program_weight_tensor_parameter_572: + name = "parameter_572" + shape = [96] + dtype = "float32" + min_val = float("0.34997") + max_val = float("5.4603") + mean = float("1.08725") + std = float("0.880273") + data = None + + +class Program_weight_tensor_parameter_573: + name = "parameter_573" + shape = [96] + dtype = "float32" + min_val = float("0.000393061") + max_val = float("0.00600073") + mean = float("0.00206613") + std = float("0.00106655") + data = None + + +class Program_weight_tensor_parameter_574: + name = "parameter_574" + shape = [96] + dtype = "float32" + min_val = float("-0.0914739") + max_val = float("0.110662") + mean = float("0.0155218") + std = float("0.0385942") + data = None + + +class Program_weight_tensor_parameter_575: + name = "parameter_575" + shape = [96, 96, 3, 3] + dtype = "float32" + min_val = float("-0.0397242") + max_val = float("0.0585133") + mean = float("-0.000194232") + std = float("0.00474942") + data = None + + +class Program_weight_tensor_parameter_576: + name = "parameter_576" + shape = [96] + dtype = "float32" + min_val = float("-2.46962") + max_val = float("-0.0207386") + mean = float("-1.22629") + std = float("0.444824") + data = None + + +class Program_weight_tensor_parameter_577: + name = "parameter_577" + shape = [96] + dtype = "float32" + min_val = float("0.533115") + max_val = float("1.64651") + mean = float("0.94968") + std = float("0.173477") + data = None + + +class Program_weight_tensor_parameter_578: + name = "parameter_578" + shape = [96] + dtype = "float32" + min_val = float("0.0258755") + max_val = float("0.1224") + mean = float("0.0531712") + std = float("0.0215289") + data = None + + +class Program_weight_tensor_parameter_579: + name = "parameter_579" + shape = [96] + dtype = "float32" + min_val = float("-2.50049") + max_val = float("1.19932") + mean = float("-0.168452") + std = float("0.381676") + data = None + + +class Program_weight_tensor_parameter_580: + name = "parameter_580" + shape = [96, 96, 3, 3] + dtype = "float32" + min_val = float("-0.173549") + max_val = float("0.0871168") + mean = float("-0.000260514") + std = float("0.00589301") + data = None + + +class Program_weight_tensor_parameter_581: + name = "parameter_581" + shape = [96] + dtype = "float32" + min_val = float("-1.39137") + max_val = float("0.557076") + mean = float("-0.134021") + std = float("0.346667") + data = None + + +class Program_weight_tensor_parameter_582: + name = "parameter_582" + shape = [96] + dtype = "float32" + min_val = float("0.0456926") + max_val = float("1.87") + mean = float("0.460917") + std = float("0.367799") + data = None + + +class Program_weight_tensor_parameter_583: + name = "parameter_583" + shape = [96] + dtype = "float32" + min_val = float("7.8985e-05") + max_val = float("0.00257956") + mean = float("0.000633262") + std = float("0.000529372") + data = None + + +class Program_weight_tensor_parameter_584: + name = "parameter_584" + shape = [96] + dtype = "float32" + min_val = float("-0.0294464") + max_val = float("0.0390531") + mean = float("0.00618468") + std = float("0.0144829") + data = None + + +class Program_weight_tensor_parameter_585: + name = "parameter_585" + shape = [96, 96, 1, 1] + dtype = "float32" + min_val = float("-0.0499002") + max_val = float("0.03712") + mean = float("-0.000516494") + std = float("0.00601108") + data = None + + +class Program_weight_tensor_parameter_586: + name = "parameter_586" + shape = [96] + dtype = "float32" + min_val = float("-1.39137") + max_val = float("0.557076") + mean = float("-0.134021") + std = float("0.346667") + data = None + + +class Program_weight_tensor_parameter_587: + name = "parameter_587" + shape = [96] + dtype = "float32" + min_val = float("0.369434") + max_val = float("2.33578") + mean = float("0.904761") + std = float("0.427621") + data = None + + +class Program_weight_tensor_parameter_588: + name = "parameter_588" + shape = [96] + dtype = "float32" + min_val = float("0.0013064") + max_val = float("0.0114625") + mean = float("0.00366985") + std = float("0.00182431") + data = None + + +class Program_weight_tensor_parameter_589: + name = "parameter_589" + shape = [96] + dtype = "float32" + min_val = float("-0.0674013") + max_val = float("0.106024") + mean = float("0.0235788") + std = float("0.0301432") + data = None + + +class Program_weight_tensor_parameter_590: + name = "parameter_590" + shape = [96, 96, 3, 3] + dtype = "float32" + min_val = float("-0.0544699") + max_val = float("0.0432893") + mean = float("-0.000227051") + std = float("0.00481142") + data = None + + +class Program_weight_tensor_parameter_591: + name = "parameter_591" + shape = [96] + dtype = "float32" + min_val = float("-3.32769") + max_val = float("0.362732") + mean = float("-1.17761") + std = float("0.557882") + data = None + + +class Program_weight_tensor_parameter_592: + name = "parameter_592" + shape = [96] + dtype = "float32" + min_val = float("0.470895") + max_val = float("1.98413") + mean = float("1.04365") + std = float("0.239424") + data = None + + +class Program_weight_tensor_parameter_593: + name = "parameter_593" + shape = [96] + dtype = "float32" + min_val = float("0.0172223") + max_val = float("0.0742288") + mean = float("0.0330012") + std = float("0.0104026") + data = None + + +class Program_weight_tensor_parameter_594: + name = "parameter_594" + shape = [96] + dtype = "float32" + min_val = float("-0.649399") + max_val = float("0.431953") + mean = float("-0.0718373") + std = float("0.18395") + data = None + + +class Program_weight_tensor_parameter_595: + name = "parameter_595" + shape = [96, 96, 3, 3] + dtype = "float32" + min_val = float("-0.136311") + max_val = float("0.143453") + mean = float("-0.000302539") + std = float("0.0058329") + data = None + + +class Program_weight_tensor_parameter_596: + name = "parameter_596" + shape = [96] + dtype = "float32" + min_val = float("-1.25274") + max_val = float("0.579736") + mean = float("-0.110157") + std = float("0.290873") + data = None + + +class Program_weight_tensor_parameter_597: + name = "parameter_597" + shape = [96] + dtype = "float32" + min_val = float("0.0246593") + max_val = float("1.28142") + mean = float("0.323963") + std = float("0.193926") + data = None + + +class Program_weight_tensor_parameter_598: + name = "parameter_598" + shape = [96] + dtype = "float32" + min_val = float("3.03979e-05") + max_val = float("0.0033039") + mean = float("0.000564813") + std = float("0.000505444") + data = None + + +class Program_weight_tensor_parameter_599: + name = "parameter_599" + shape = [96] + dtype = "float32" + min_val = float("-0.0377552") + max_val = float("0.049578") + mean = float("0.0038562") + std = float("0.0144143") + data = None + + +class Program_weight_tensor_parameter_600: + name = "parameter_600" + shape = [96, 96, 1, 1] + dtype = "float32" + min_val = float("-0.0412896") + max_val = float("0.0431426") + mean = float("-0.000341595") + std = float("0.00616412") + data = None + + +class Program_weight_tensor_parameter_601: + name = "parameter_601" + shape = [96] + dtype = "float32" + min_val = float("-1.25274") + max_val = float("0.579736") + mean = float("-0.110157") + std = float("0.290873") + data = None + + +class Program_weight_tensor_parameter_602: + name = "parameter_602" + shape = [96] + dtype = "float32" + min_val = float("0.317522") + max_val = float("1.6746") + mean = float("0.750723") + std = float("0.258265") + data = None + + +class Program_weight_tensor_parameter_603: + name = "parameter_603" + shape = [96] + dtype = "float32" + min_val = float("0.00138595") + max_val = float("0.0112807") + mean = float("0.00427388") + std = float("0.00172965") + data = None + + +class Program_weight_tensor_parameter_604: + name = "parameter_604" + shape = [96] + dtype = "float32" + min_val = float("-0.0693035") + max_val = float("0.106113") + mean = float("0.0156645") + std = float("0.029297") + data = None + + +class Program_weight_tensor_parameter_605: + name = "parameter_605" + shape = [96, 96, 3, 3] + dtype = "float32" + min_val = float("-0.0707344") + max_val = float("0.0533381") + mean = float("-0.000195671") + std = float("0.00486483") + data = None + + +class Program_weight_tensor_parameter_606: + name = "parameter_606" + shape = [96] + dtype = "float32" + min_val = float("-3.5906") + max_val = float("0.291524") + mean = float("-1.12744") + std = float("0.574031") + data = None + + +class Program_weight_tensor_parameter_607: + name = "parameter_607" + shape = [96] + dtype = "float32" + min_val = float("0.519079") + max_val = float("2.19595") + mean = float("1.05638") + std = float("0.238992") + data = None + + +class Program_weight_tensor_parameter_608: + name = "parameter_608" + shape = [96] + dtype = "float32" + min_val = float("0.0152112") + max_val = float("0.0436628") + mean = float("0.0246659") + std = float("0.00537498") + data = None + + +class Program_weight_tensor_parameter_609: + name = "parameter_609" + shape = [96] + dtype = "float32" + min_val = float("-0.688628") + max_val = float("0.554991") + mean = float("-0.0233944") + std = float("0.159928") + data = None + + +class Program_weight_tensor_parameter_610: + name = "parameter_610" + shape = [96, 96, 3, 3] + dtype = "float32" + min_val = float("-0.0863777") + max_val = float("0.123252") + mean = float("-0.000287898") + std = float("0.0059129") + data = None + + +class Program_weight_tensor_parameter_611: + name = "parameter_611" + shape = [96] + dtype = "float32" + min_val = float("-0.894065") + max_val = float("0.528462") + mean = float("-0.160914") + std = float("0.280775") + data = None + + +class Program_weight_tensor_parameter_612: + name = "parameter_612" + shape = [96] + dtype = "float32" + min_val = float("0.0198977") + max_val = float("1.40929") + mean = float("0.324417") + std = float("0.214346") + data = None + + +class Program_weight_tensor_parameter_613: + name = "parameter_613" + shape = [96] + dtype = "float32" + min_val = float("2.3179e-05") + max_val = float("0.00304799") + mean = float("0.000599645") + std = float("0.000483623") + data = None + + +class Program_weight_tensor_parameter_614: + name = "parameter_614" + shape = [96] + dtype = "float32" + min_val = float("-0.0235075") + max_val = float("0.0488833") + mean = float("0.00764636") + std = float("0.0134397") + data = None + + +class Program_weight_tensor_parameter_615: + name = "parameter_615" + shape = [96, 96, 1, 1] + dtype = "float32" + min_val = float("-0.0516664") + max_val = float("0.0403338") + mean = float("-0.000650817") + std = float("0.006249") + data = None + + +class Program_weight_tensor_parameter_616: + name = "parameter_616" + shape = [96] + dtype = "float32" + min_val = float("-0.894065") + max_val = float("0.528462") + mean = float("-0.160914") + std = float("0.280775") + data = None + + +class Program_weight_tensor_parameter_617: + name = "parameter_617" + shape = [96] + dtype = "float32" + min_val = float("0.177671") + max_val = float("1.78574") + mean = float("0.712956") + std = float("0.285068") + data = None + + +class Program_weight_tensor_parameter_618: + name = "parameter_618" + shape = [96] + dtype = "float32" + min_val = float("0.000855722") + max_val = float("0.011962") + mean = float("0.00441699") + std = float("0.00176161") + data = None + + +class Program_weight_tensor_parameter_619: + name = "parameter_619" + shape = [96] + dtype = "float32" + min_val = float("-0.046195") + max_val = float("0.101491") + mean = float("0.0248708") + std = float("0.0287099") + data = None + + +class Program_weight_tensor_parameter_620: + name = "parameter_620" + shape = [96, 96, 3, 3] + dtype = "float32" + min_val = float("-0.0617188") + max_val = float("0.0545246") + mean = float("-0.000236251") + std = float("0.00488926") + data = None + + +class Program_weight_tensor_parameter_621: + name = "parameter_621" + shape = [96] + dtype = "float32" + min_val = float("-2.66323") + max_val = float("0.0623296") + mean = float("-1.06373") + std = float("0.489342") + data = None + + +class Program_weight_tensor_parameter_622: + name = "parameter_622" + shape = [96] + dtype = "float32" + min_val = float("0.516127") + max_val = float("1.74272") + mean = float("1.01959") + std = float("0.194249") + data = None + + +class Program_weight_tensor_parameter_623: + name = "parameter_623" + shape = [96] + dtype = "float32" + min_val = float("0.00966188") + max_val = float("0.0337769") + mean = float("0.0185695") + std = float("0.00483648") + data = None + + +class Program_weight_tensor_parameter_624: + name = "parameter_624" + shape = [96] + dtype = "float32" + min_val = float("-0.574086") + max_val = float("0.395306") + mean = float("-0.0490517") + std = float("0.150869") + data = None + + +class Program_weight_tensor_parameter_625: + name = "parameter_625" + shape = [96, 96, 3, 3] + dtype = "float32" + min_val = float("-0.0819348") + max_val = float("0.110116") + mean = float("-0.000329675") + std = float("0.0058075") + data = None + + +class Program_weight_tensor_parameter_626: + name = "parameter_626" + shape = [96] + dtype = "float32" + min_val = float("-0.980112") + max_val = float("0.483047") + mean = float("-0.136611") + std = float("0.277228") + data = None + + +class Program_weight_tensor_parameter_627: + name = "parameter_627" + shape = [96] + dtype = "float32" + min_val = float("0.0468125") + max_val = float("1.14578") + mean = float("0.29425") + std = float("0.17328") + data = None + + +class Program_weight_tensor_parameter_628: + name = "parameter_628" + shape = [96] + dtype = "float32" + min_val = float("0.000143071") + max_val = float("0.00493424") + mean = float("0.000913103") + std = float("0.000688244") + data = None + + +class Program_weight_tensor_parameter_629: + name = "parameter_629" + shape = [96] + dtype = "float32" + min_val = float("-0.0363086") + max_val = float("0.0523069") + mean = float("0.00479237") + std = float("0.0159931") + data = None + + +class Program_weight_tensor_parameter_630: + name = "parameter_630" + shape = [96, 96, 1, 1] + dtype = "float32" + min_val = float("-0.0661086") + max_val = float("0.0594125") + mean = float("-0.00059754") + std = float("0.00705215") + data = None + + +class Program_weight_tensor_parameter_631: + name = "parameter_631" + shape = [96] + dtype = "float32" + min_val = float("-0.980112") + max_val = float("0.483046") + mean = float("-0.136611") + std = float("0.277228") + data = None + + +class Program_weight_tensor_parameter_632: + name = "parameter_632" + shape = [96] + dtype = "float32" + min_val = float("0.245629") + max_val = float("1.70476") + mean = float("0.608342") + std = float("0.228754") + data = None + + +class Program_weight_tensor_parameter_633: + name = "parameter_633" + shape = [96] + dtype = "float32" + min_val = float("0.00269671") + max_val = float("0.0179759") + mean = float("0.00695992") + std = float("0.00256464") + data = None + + +class Program_weight_tensor_parameter_634: + name = "parameter_634" + shape = [96] + dtype = "float32" + min_val = float("-0.0419227") + max_val = float("0.111328") + mean = float("0.0162502") + std = float("0.0308395") + data = None + + +class Program_weight_tensor_parameter_635: + name = "parameter_635" + shape = [96, 96, 3, 3] + dtype = "float32" + min_val = float("-0.070362") + max_val = float("0.0475107") + mean = float("-0.000231982") + std = float("0.0049307") + data = None + + +class Program_weight_tensor_parameter_636: + name = "parameter_636" + shape = [96] + dtype = "float32" + min_val = float("-3.47468") + max_val = float("0.198878") + mean = float("-1.00518") + std = float("0.549489") + data = None + + +class Program_weight_tensor_parameter_637: + name = "parameter_637" + shape = [96] + dtype = "float32" + min_val = float("0.68523") + max_val = float("2.51384") + mean = float("1.07866") + std = float("0.21278") + data = None + + +class Program_weight_tensor_parameter_638: + name = "parameter_638" + shape = [96] + dtype = "float32" + min_val = float("0.0073715") + max_val = float("0.0337206") + mean = float("0.01499") + std = float("0.00482926") + data = None + + +class Program_weight_tensor_parameter_639: + name = "parameter_639" + shape = [96] + dtype = "float32" + min_val = float("-0.403211") + max_val = float("0.240247") + mean = float("-0.0331757") + std = float("0.129637") + data = None + + +class Program_weight_tensor_parameter_640: + name = "parameter_640" + shape = [96, 96, 3, 3] + dtype = "float32" + min_val = float("-0.0707057") + max_val = float("0.0823979") + mean = float("-0.000250044") + std = float("0.00594881") + data = None + + +class Program_weight_tensor_parameter_641: + name = "parameter_641" + shape = [96] + dtype = "float32" + min_val = float("-0.626965") + max_val = float("0.448835") + mean = float("-0.0820382") + std = float("0.255843") + data = None + + +class Program_weight_tensor_parameter_642: + name = "parameter_642" + shape = [96] + dtype = "float32" + min_val = float("0.0906111") + max_val = float("1.29038") + mean = float("0.307307") + std = float("0.194816") + data = None + + +class Program_weight_tensor_parameter_643: + name = "parameter_643" + shape = [96] + dtype = "float32" + min_val = float("0.000315305") + max_val = float("0.0148723") + mean = float("0.00305807") + std = float("0.00245914") + data = None + + +class Program_weight_tensor_parameter_644: + name = "parameter_644" + shape = [96] + dtype = "float32" + min_val = float("-0.0371389") + max_val = float("0.0183386") + mean = float("0.000249639") + std = float("0.00910407") + data = None + + +class Program_weight_tensor_parameter_645: + name = "parameter_645" + shape = [96, 96, 1, 1] + dtype = "float32" + min_val = float("-0.0911381") + max_val = float("0.0639953") + mean = float("-0.000941367") + std = float("0.00812654") + data = None + + +class Program_weight_tensor_parameter_646: + name = "parameter_646" + shape = [96] + dtype = "float32" + min_val = float("-0.626965") + max_val = float("0.448835") + mean = float("-0.0820382") + std = float("0.255843") + data = None + + +class Program_weight_tensor_parameter_647: + name = "parameter_647" + shape = [96] + dtype = "float32" + min_val = float("0.209511") + max_val = float("1.43917") + mean = float("0.531943") + std = float("0.259075") + data = None + + +class Program_weight_tensor_parameter_648: + name = "parameter_648" + shape = [96] + dtype = "float32" + min_val = float("0.00462531") + max_val = float("0.0495976") + mean = float("0.0190222") + std = float("0.00948176") + data = None + + +class Program_weight_tensor_parameter_649: + name = "parameter_649" + shape = [96] + dtype = "float32" + min_val = float("-0.108167") + max_val = float("0.0628188") + mean = float("-0.00562213") + std = float("0.0290345") + data = None + + +class Program_weight_tensor_parameter_650: + name = "parameter_650" + shape = [96, 96, 3, 3] + dtype = "float32" + min_val = float("-0.0925723") + max_val = float("0.056426") + mean = float("-0.000299437") + std = float("0.00485596") + data = None + + +class Program_weight_tensor_parameter_651: + name = "parameter_651" + shape = [96] + dtype = "float32" + min_val = float("-2.41584") + max_val = float("0.51672") + mean = float("-0.829512") + std = float("0.467964") + data = None + + +class Program_weight_tensor_parameter_652: + name = "parameter_652" + shape = [96] + dtype = "float32" + min_val = float("0.858214") + max_val = float("2.18042") + mean = float("1.27928") + std = float("0.209066") + data = None + + +class Program_weight_tensor_parameter_653: + name = "parameter_653" + shape = [96] + dtype = "float32" + min_val = float("0.00573305") + max_val = float("0.0283505") + mean = float("0.0129242") + std = float("0.0047369") + data = None + + +class Program_weight_tensor_parameter_654: + name = "parameter_654" + shape = [96] + dtype = "float32" + min_val = float("-0.468758") + max_val = float("0.237107") + mean = float("-0.0457111") + std = float("0.116263") + data = None + + +class Program_weight_tensor_parameter_655: + name = "parameter_655" + shape = [96, 96, 3, 3] + dtype = "float32" + min_val = float("-0.140288") + max_val = float("0.144017") + mean = float("-0.000168599") + std = float("0.00619743") + data = None + + +class Program_weight_tensor_parameter_656: + name = "parameter_656" + shape = [96] + dtype = "float32" + min_val = float("-3.17591") + max_val = float("1.88794") + mean = float("0.50081") + std = float("0.862147") + data = None + + +class Program_weight_tensor_parameter_657: + name = "parameter_657" + shape = [96] + dtype = "float32" + min_val = float("0.218511") + max_val = float("2.64172") + mean = float("0.557308") + std = float("0.321222") + data = None + + +class Program_weight_tensor_parameter_658: + name = "parameter_658" + shape = [96] + dtype = "float32" + min_val = float("0.0059309") + max_val = float("0.0854273") + mean = float("0.0187079") + std = float("0.0133142") + data = None + + +class Program_weight_tensor_parameter_659: + name = "parameter_659" + shape = [96] + dtype = "float32" + min_val = float("-0.21823") + max_val = float("0.243022") + mean = float("-0.0238119") + std = float("0.0717645") + data = None + + +class Program_weight_tensor_parameter_660: + name = "parameter_660" + shape = [96, 192, 1, 1] + dtype = "float32" + min_val = float("-0.159229") + max_val = float("0.164707") + mean = float("-0.000489464") + std = float("0.0123153") + data = None + + +class Program_weight_tensor_parameter_661: + name = "parameter_661" + shape = [96] + dtype = "float32" + min_val = float("-4.93686") + max_val = float("1.57224") + mean = float("0.382249") + std = float("1.05007") + data = None + + +class Program_weight_tensor_parameter_662: + name = "parameter_662" + shape = [96] + dtype = "float32" + min_val = float("0.408469") + max_val = float("6.77488") + mean = float("1.6992") + std = float("1.3056") + data = None + + +class Program_weight_tensor_parameter_663: + name = "parameter_663" + shape = [96] + dtype = "float32" + min_val = float("0.0027471") + max_val = float("0.101892") + mean = float("0.0161575") + std = float("0.0144822") + data = None + + +class Program_weight_tensor_parameter_664: + name = "parameter_664" + shape = [96] + dtype = "float32" + min_val = float("-0.113828") + max_val = float("0.255763") + mean = float("0.0304151") + std = float("0.0726062") + data = None + + +class Program_weight_tensor_parameter_665: + name = "parameter_665" + shape = [96, 192, 1, 1] + dtype = "float32" + min_val = float("-0.0865442") + max_val = float("0.13143") + mean = float("0.000256689") + std = float("0.0111353") + data = None + + +class Program_weight_tensor_parameter_666: + name = "parameter_666" + shape = [192] + dtype = "float32" + min_val = float("-2.27841") + max_val = float("1.74989") + mean = float("-0.126437") + std = float("0.740487") + data = None + + +class Program_weight_tensor_parameter_667: + name = "parameter_667" + shape = [192] + dtype = "float32" + min_val = float("0.633239") + max_val = float("2.97753") + mean = float("1.09234") + std = float("0.284333") + data = None + + +class Program_weight_tensor_parameter_668: + name = "parameter_668" + shape = [192] + dtype = "float32" + min_val = float("0.00605626") + max_val = float("0.124183") + mean = float("0.0225335") + std = float("0.0173782") + data = None + + +class Program_weight_tensor_parameter_669: + name = "parameter_669" + shape = [192] + dtype = "float32" + min_val = float("-0.363149") + max_val = float("0.203569") + mean = float("-0.0509608") + std = float("0.0932024") + data = None + + +class Program_weight_tensor_parameter_670: + name = "parameter_670" + shape = [192, 128, 3, 3] + dtype = "float32" + min_val = float("-0.0732508") + max_val = float("0.0817381") + mean = float("-0.000189264") + std = float("0.00581775") + data = None + + +class Program_weight_tensor_parameter_671: + name = "parameter_671" + shape = [128] + dtype = "float32" + min_val = float("-2.81739") + max_val = float("1.95963") + mean = float("-0.71131") + std = float("0.648704") + data = None + + +class Program_weight_tensor_parameter_672: + name = "parameter_672" + shape = [128] + dtype = "float32" + min_val = float("0.305831") + max_val = float("2.87595") + mean = float("1.02519") + std = float("0.278769") + data = None + + +class Program_weight_tensor_parameter_673: + name = "parameter_673" + shape = [128] + dtype = "float32" + min_val = float("0.000296253") + max_val = float("0.00655884") + mean = float("0.00182291") + std = float("0.000942289") + data = None + + +class Program_weight_tensor_parameter_674: + name = "parameter_674" + shape = [128] + dtype = "float32" + min_val = float("-0.238123") + max_val = float("0.218266") + mean = float("0.0102315") + std = float("0.0613663") + data = None + + +class Program_weight_tensor_parameter_675: + name = "parameter_675" + shape = [128, 96, 1, 1] + dtype = "float32" + min_val = float("-0.147384") + max_val = float("0.149703") + mean = float("-0.00110619") + std = float("0.0174083") + data = None + + +class Program_weight_tensor_parameter_676: + name = "parameter_676" + shape = [96] + dtype = "float32" + min_val = float("-0.0166051") + max_val = float("-0.00197342") + mean = float("-0.00771776") + std = float("0.0036883") + data = None + + +class Program_weight_tensor_parameter_677: + name = "parameter_677" + shape = [96, 96, 1, 1] + dtype = "float32" + min_val = float("-0.244486") + max_val = float("0.115264") + mean = float("-0.0084264") + std = float("0.0163364") + data = None + + +class Program_weight_tensor_parameter_678: + name = "parameter_678" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_679: + name = "parameter_679" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_680: + name = "parameter_680" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_681: + name = "parameter_681" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_682: + name = "parameter_682" + shape = [48, 48, 1, 1] + dtype = "float32" + min_val = float("-0.0451306") + max_val = float("0.0516016") + mean = float("-0.00123331") + std = float("0.0108119") + data = None + + +class Program_weight_tensor_parameter_683: + name = "parameter_683" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_684: + name = "parameter_684" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_685: + name = "parameter_685" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_686: + name = "parameter_686" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_687: + name = "parameter_687" + shape = [48, 48, 3, 3] + dtype = "float32" + min_val = float("-0.0522823") + max_val = float("0.061342") + mean = float("-0.000213753") + std = float("0.00862639") + data = None + + +class Program_weight_tensor_parameter_688: + name = "parameter_688" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_689: + name = "parameter_689" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_690: + name = "parameter_690" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_691: + name = "parameter_691" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_692: + name = "parameter_692" + shape = [48, 48, 3, 3] + dtype = "float32" + min_val = float("-0.070824") + max_val = float("0.0787998") + mean = float("-0.000460596") + std = float("0.00955491") + data = None + + +class Program_weight_tensor_parameter_693: + name = "parameter_693" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_694: + name = "parameter_694" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_695: + name = "parameter_695" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_696: + name = "parameter_696" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_697: + name = "parameter_697" + shape = [48, 48, 1, 1] + dtype = "float32" + min_val = float("-0.0664729") + max_val = float("0.0674885") + mean = float("-0.000911561") + std = float("0.0112967") + data = None + + +class Program_weight_tensor_parameter_698: + name = "parameter_698" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_699: + name = "parameter_699" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_700: + name = "parameter_700" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_701: + name = "parameter_701" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_702: + name = "parameter_702" + shape = [48, 48, 3, 3] + dtype = "float32" + min_val = float("-0.0618099") + max_val = float("0.0462764") + mean = float("-0.000494121") + std = float("0.00848653") + data = None + + +class Program_weight_tensor_parameter_703: + name = "parameter_703" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_704: + name = "parameter_704" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_705: + name = "parameter_705" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_706: + name = "parameter_706" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_707: + name = "parameter_707" + shape = [48, 48, 3, 3] + dtype = "float32" + min_val = float("-0.0903042") + max_val = float("0.0707569") + mean = float("-0.000342113") + std = float("0.00974196") + data = None + + +class Program_weight_tensor_parameter_708: + name = "parameter_708" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_709: + name = "parameter_709" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_710: + name = "parameter_710" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_711: + name = "parameter_711" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_712: + name = "parameter_712" + shape = [48, 48, 1, 1] + dtype = "float32" + min_val = float("-0.0793648") + max_val = float("0.0587558") + mean = float("-0.00142765") + std = float("0.0137979") + data = None + + +class Program_weight_tensor_parameter_713: + name = "parameter_713" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_714: + name = "parameter_714" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_715: + name = "parameter_715" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_716: + name = "parameter_716" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_717: + name = "parameter_717" + shape = [48, 48, 3, 3] + dtype = "float32" + min_val = float("-0.057566") + max_val = float("0.0709009") + mean = float("-0.000249497") + std = float("0.00895742") + data = None + + +class Program_weight_tensor_parameter_718: + name = "parameter_718" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_719: + name = "parameter_719" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_720: + name = "parameter_720" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_721: + name = "parameter_721" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_722: + name = "parameter_722" + shape = [48, 48, 3, 3] + dtype = "float32" + min_val = float("-0.10442") + max_val = float("0.0691325") + mean = float("-0.000258872") + std = float("0.0104529") + data = None + + +class Program_weight_tensor_parameter_723: + name = "parameter_723" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_724: + name = "parameter_724" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_725: + name = "parameter_725" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_726: + name = "parameter_726" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_727: + name = "parameter_727" + shape = [48, 96, 1, 1] + dtype = "float32" + min_val = float("-0.136744") + max_val = float("0.10005") + mean = float("-0.00171348") + std = float("0.0182976") + data = None + + +class Program_weight_tensor_parameter_728: + name = "parameter_728" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_729: + name = "parameter_729" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_730: + name = "parameter_730" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_731: + name = "parameter_731" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_732: + name = "parameter_732" + shape = [48, 96, 1, 1] + dtype = "float32" + min_val = float("-0.101881") + max_val = float("0.143004") + mean = float("-0.000443039") + std = float("0.0175817") + data = None + + +class Program_weight_tensor_parameter_733: + name = "parameter_733" + shape = [96] + dtype = "float32" + min_val = float("-3.42322") + max_val = float("3.28108") + mean = float("0.327995") + std = float("1.1473") + data = None + + +class Program_weight_tensor_parameter_734: + name = "parameter_734" + shape = [96] + dtype = "float32" + min_val = float("0.874948") + max_val = float("4.92491") + mean = float("1.92226") + std = float("0.754909") + data = None + + +class Program_weight_tensor_parameter_735: + name = "parameter_735" + shape = [96] + dtype = "float32" + min_val = float("0.349668") + max_val = float("14.7849") + mean = float("1.53931") + std = float("1.73025") + data = None + + +class Program_weight_tensor_parameter_736: + name = "parameter_736" + shape = [96] + dtype = "float32" + min_val = float("-1.04846") + max_val = float("1.45518") + mean = float("-0.203391") + std = float("0.490156") + data = None + + +class Program_weight_tensor_parameter_737: + name = "parameter_737" + shape = [96, 64, 3, 3] + dtype = "float32" + min_val = float("-0.112555") + max_val = float("0.100752") + mean = float("-0.000270096") + std = float("0.0100051") + data = None + + +class Program_weight_tensor_parameter_738: + name = "parameter_738" + shape = [64] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_739: + name = "parameter_739" + shape = [64] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_740: + name = "parameter_740" + shape = [64] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_741: + name = "parameter_741" + shape = [64] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_742: + name = "parameter_742" + shape = [64, 32, 3, 3] + dtype = "float32" + min_val = float("-0.126906") + max_val = float("0.131715") + mean = float("-0.000428282") + std = float("0.0154308") + data = None + + +class Program_weight_tensor_parameter_743: + name = "parameter_743" + shape = [32] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_744: + name = "parameter_744" + shape = [32] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_745: + name = "parameter_745" + shape = [32] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_746: + name = "parameter_746" + shape = [32] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_747: + name = "parameter_747" + shape = [32, 32, 3, 3] + dtype = "float32" + min_val = float("-0.2537") + max_val = float("0.156204") + mean = float("-0.000164754") + std = float("0.0200964") + data = None + + +class Program_weight_tensor_parameter_748: + name = "parameter_748" + shape = [32] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_749: + name = "parameter_749" + shape = [32] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_750: + name = "parameter_750" + shape = [32] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_751: + name = "parameter_751" + shape = [32] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_752: + name = "parameter_752" + shape = [32, 3, 3, 3] + dtype = "float32" + min_val = float("-0.247045") + max_val = float("0.224081") + mean = float("-0.00143914") + std = float("0.0546739") + data = None diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus-L/subgraph_6/graph_hash.txt b/paddle_samples/PaddleX/PP-YOLOE_plus-L/subgraph_6/graph_hash.txt new file mode 100644 index 000000000..64e121445 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE_plus-L/subgraph_6/graph_hash.txt @@ -0,0 +1 @@ +0621664d615e3c9d853112c02ae864f2d69542a45086eee97ddcd3e82bebddd6 \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus-L/subgraph_6/graph_net.json b/paddle_samples/PaddleX/PP-YOLOE_plus-L/subgraph_6/graph_net.json new file mode 100644 index 000000000..32219c0fa --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE_plus-L/subgraph_6/graph_net.json @@ -0,0 +1,6 @@ +{ + "framework": "paddle", + "model_name": "PP-YOLOE_plus-L", + "num_devices_required": 1, + "num_nodes_required": 1 +} \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus-L/subgraph_6/input_meta.py b/paddle_samples/PaddleX/PP-YOLOE_plus-L/subgraph_6/input_meta.py new file mode 100644 index 000000000..158913178 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE_plus-L/subgraph_6/input_meta.py @@ -0,0 +1,85 @@ +class Program_weight_tensor_data_0: + name = "data_0" + shape = [8, 1, 4116] + dtype = "float32" + max_val = float("1.0") + mean = float("0.00655977") + std = float("0.0807263") + data = None + + +class Program_weight_tensor_data_1: + name = "data_1" + shape = [8, 1, 27] + dtype = "int64" + min_val = 23 + max_val = 3689 + data = None + + +class Program_weight_tensor_data_2: + name = "data_2" + shape = [8, 1, 4116] + dtype = "float32" + max_val = float("0.695231") + mean = float("0.00688709") + std = float("0.0333326") + data = None + + +class Program_weight_tensor_data_3: + name = "data_3" + shape = [4116, 2] + dtype = "float32" + min_val = float("4.0") + max_val = float("444.0") + mean = float("224.0") + std = float("129.279") + data = None + + +class Program_weight_tensor_data_4: + name = "data_4" + shape = [8, 1, 4] + dtype = "float32" + data = [ + 35.3684, + 311.273, + 300.632, + 442.182, + 78.6168, + 241.764, + 110.41, + 266.46, + 195.413, + 193.28, + 236.373, + 224.0, + 130.415, + 227.85, + 158.792, + 248.688, + 360.901, + 338.022, + 374.268, + 347.726, + 304.951, + 76.6956, + 336.0, + 109.565, + 295.171, + 70.9146, + 350.609, + 111.437, + 18.5379, + 0.0, + 293.517, + 337.836, + ] + + +class Program_weight_tensor_data_5: + name = "data_5" + shape = [8, 1, 1] + dtype = "float32" + data = [1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0] diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus-L/subgraph_6/model.py b/paddle_samples/PaddleX/PP-YOLOE_plus-L/subgraph_6/model.py new file mode 100644 index 000000000..2ae07efe1 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE_plus-L/subgraph_6/model.py @@ -0,0 +1,247 @@ +import paddle + + +class GraphModule(paddle.nn.Layer): + def __init__(self): + super().__init__() + + def forward(self, data_0, data_1, data_2, data_3, data_4, data_5): + # pd_op.multiply: (8x1x4116xf32) <- (8x1x4116xf32, 8x1x4116xf32) + multiply_0 = paddle._C_ops.multiply(data_2, data_0) + del data_2 + + # pd_op.flatten: (8x4116xf32) <- (8x1x4116xf32) + flatten_0 = paddle._C_ops.flatten(multiply_0, 0, 1) + + # pd_op.flatten: (8x27xi64) <- (8x1x27xi64) + flatten_1 = paddle._C_ops.flatten(data_1, 0, 1) + del data_1 + + # pd_op.index_sample: (8x27xf32) <- (8x4116xf32, 8x27xi64) + index_sample_0 = paddle._C_ops.index_sample(flatten_0, flatten_1) + del flatten_0, flatten_1 + + # pd_op.full_int_array: (3xi64) <- () + full_int_array_0 = [8, 1, -1] + + # pd_op.reshape: (8x1x27xf32) <- (8x27xf32, 3xi64) + reshape_0 = paddle._C_ops.reshape(index_sample_0, full_int_array_0) + del full_int_array_0, index_sample_0 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_1 = [-1] + + # pd_op.mean: (8x1x1xf32) <- (8x1x27xf32, 1xi64) + mean_0 = paddle._C_ops.mean(reshape_0, full_int_array_1, True) + + # pd_op.subtract: (8x1x27xf32) <- (8x1x27xf32, 8x1x1xf32) + subtract_0 = paddle._C_ops.subtract(reshape_0, mean_0) + + # pd_op.pow: (8x1x27xf32) <- (8x1x27xf32) + pow_0 = paddle._C_ops.pow(subtract_0, float("2")) + del subtract_0 + + # pd_op.sum: (8x1x1xf32) <- (8x1x27xf32, 1xi64) + sum_0 = paddle._C_ops.sum(pow_0, full_int_array_1, paddle.float32, True) + del pow_0 + + # pd_op.numel: (xi64) <- (8x1x27xf32) + numel_0 = paddle._C_ops.numel(reshape_0) + del reshape_0 + + # pd_op.cast: (xi64) <- (xi64) + cast_0 = paddle._C_ops.cast(numel_0, paddle.int64) + del numel_0 + + # pd_op.numel: (xi64) <- (8x1x1xf32) + numel_1 = paddle._C_ops.numel(sum_0) + + # pd_op.cast: (xi64) <- (xi64) + cast_1 = paddle._C_ops.cast(numel_1, paddle.int64) + del numel_1 + + # pd_op.cast: (xf32) <- (xi64) + cast_2 = paddle._C_ops.cast(cast_0, paddle.float32) + del cast_0 + + # pd_op.cast: (xf32) <- (xi64) + cast_3 = paddle._C_ops.cast(cast_1, paddle.float32) + del cast_1 + + # pd_op.divide: (xf32) <- (xf32, xf32) + divide_0 = paddle._C_ops.divide(cast_2, cast_3) + del cast_2, cast_3 + + # pd_op.full: (1xf32) <- () + full_0 = paddle._C_ops.full( + [1], float("1"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.scale: (xf32) <- (xf32, 1xf32) + scale_0 = paddle._C_ops.scale(divide_0, full_0, float("-1"), True) + del divide_0, full_0 + + # pd_op.full: (1xf32) <- () + full_1 = paddle._C_ops.full( + [1], float("0"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.full_like: (xf32) <- (xf32, 1xf32) + full_like_0 = paddle._C_ops.full_like( + scale_0, full_1, paddle.float32, paddle.framework._current_expected_place() + ) + + # pd_op.maximum: (xf32) <- (xf32, xf32) + maximum_0 = paddle._C_ops.maximum(scale_0, full_like_0) + del full_like_0, scale_0 + + # pd_op.divide: (8x1x1xf32) <- (8x1x1xf32, xf32) + divide_1 = paddle._C_ops.divide(sum_0, maximum_0) + del maximum_0, sum_0 + + # pd_op.sqrt: (8x1x1xf32) <- (8x1x1xf32) + sqrt_0 = paddle._C_ops.sqrt(divide_1) + del divide_1 + + # pd_op.add: (8x1x1xf32) <- (8x1x1xf32, 8x1x1xf32) + add_0 = paddle._C_ops.add(mean_0, sqrt_0) + del mean_0, sqrt_0 + + # pd_op.greater_than: (8x1x4116xb) <- (8x1x4116xf32, 8x1x1xf32) + greater_than_1 = paddle._C_ops.greater_than(multiply_0, add_0) + del add_0, multiply_0 + + # pd_op.full_like: (8x1x4116xf32) <- (8x1x4116xf32, 1xf32) + full_like_1 = paddle._C_ops.full_like( + data_0, full_1, paddle.float32, paddle.framework._current_expected_place() + ) + del full_1 + + # pd_op.where: (8x1x4116xf32) <- (8x1x4116xb, 8x1x4116xf32, 8x1x4116xf32) + where_0 = paddle._C_ops.where(greater_than_1, data_0, full_like_1) + del data_0, full_like_1, greater_than_1 + + # pd_op.full_int_array: (2xi64) <- () + full_int_array_2 = [0, 1] + + # pd_op.unsqueeze: (1x1x4116x2xf32) <- (4116x2xf32, 2xi64) + unsqueeze_0 = paddle._C_ops.unsqueeze(data_3, full_int_array_2) + del data_3, full_int_array_2 + + # pd_op.full: (1xi32) <- () + full_2 = paddle._C_ops.full( + [1], float("3"), paddle.int32, paddle.core.CPUPlace() + ) + + # pd_op.split_with_num: ([1x1x4116x1xf32, 1x1x4116x1xf32]) <- (1x1x4116x2xf32, 1xi32) + split_with_num_0 = paddle._C_ops.split_with_num(unsqueeze_0, 2, full_2) + del unsqueeze_0 + + # builtin.split: (1x1x4116x1xf32, 1x1x4116x1xf32) <- ([1x1x4116x1xf32, 1x1x4116x1xf32]) + ( + split_0, + split_1, + ) = split_with_num_0 + del split_with_num_0 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_3 = [2] + + # pd_op.unsqueeze: (8x1x1x4xf32) <- (8x1x4xf32, 1xi64) + unsqueeze_1 = paddle._C_ops.unsqueeze(data_4, full_int_array_3) + del data_4, full_int_array_3 + + # pd_op.split_with_num: ([8x1x1x1xf32, 8x1x1x1xf32, 8x1x1x1xf32, 8x1x1x1xf32]) <- (8x1x1x4xf32, 1xi32) + split_with_num_1 = paddle._C_ops.split_with_num(unsqueeze_1, 4, full_2) + del full_2, unsqueeze_1 + + # builtin.split: (8x1x1x1xf32, 8x1x1x1xf32, 8x1x1x1xf32, 8x1x1x1xf32) <- ([8x1x1x1xf32, 8x1x1x1xf32, 8x1x1x1xf32, 8x1x1x1xf32]) + ( + split_2, + split_3, + split_4, + split_5, + ) = split_with_num_1 + del split_with_num_1 + + # pd_op.subtract: (8x1x4116x1xf32) <- (1x1x4116x1xf32, 8x1x1x1xf32) + subtract_1 = paddle._C_ops.subtract(split_0, split_2) + del split_2 + + # pd_op.subtract: (8x1x4116x1xf32) <- (1x1x4116x1xf32, 8x1x1x1xf32) + subtract_2 = paddle._C_ops.subtract(split_1, split_3) + del split_3 + + # pd_op.subtract: (8x1x4116x1xf32) <- (8x1x1x1xf32, 1x1x4116x1xf32) + subtract_3 = paddle._C_ops.subtract(split_4, split_0) + del split_0, split_4 + + # pd_op.subtract: (8x1x4116x1xf32) <- (8x1x1x1xf32, 1x1x4116x1xf32) + subtract_4 = paddle._C_ops.subtract(split_5, split_1) + del split_1, split_5 + + # pd_op.full: (1xi32) <- () + full_3 = paddle._C_ops.full( + [1], float("-1"), paddle.int32, paddle.core.CPUPlace() + ) + + # builtin.combine: ([8x1x4116x1xf32, 8x1x4116x1xf32, 8x1x4116x1xf32, 8x1x4116x1xf32]) <- (8x1x4116x1xf32, 8x1x4116x1xf32, 8x1x4116x1xf32, 8x1x4116x1xf32) + combine_0 = [subtract_1, subtract_2, subtract_3, subtract_4] + del subtract_1, subtract_2, subtract_3, subtract_4 + + # pd_op.concat: (8x1x4116x4xf32) <- ([8x1x4116x1xf32, 8x1x4116x1xf32, 8x1x4116x1xf32, 8x1x4116x1xf32], 1xi32) + concat_0 = paddle._C_ops.concat(combine_0, full_3) + del combine_0, full_3 + + # pd_op.min: (8x1x4116xf32) <- (8x1x4116x4xf32, 1xi64) + min_0 = paddle._C_ops.min(concat_0, full_int_array_1, False) + del concat_0, full_int_array_1 + + # pd_op.full: (xf32) <- () + full_4 = paddle._C_ops.full( + [], + float("1e-09"), + paddle.float32, + paddle.framework._current_expected_place(), + ) + + # pd_op.greater_than: (8x1x4116xb) <- (8x1x4116xf32, xf32) + greater_than_2 = paddle._C_ops.greater_than(min_0, full_4) + del full_4, min_0 + + # pd_op.cast: (8x1x4116xf32) <- (8x1x4116xb) + cast_4 = paddle._C_ops.cast(greater_than_2, paddle.float32) + del greater_than_2 + + # pd_op.multiply: (8x1x4116xf32) <- (8x1x4116xf32, 8x1x4116xf32) + multiply_1 = paddle._C_ops.multiply(where_0, cast_4) + del cast_4, where_0 + + # pd_op.multiply: (8x1x4116xf32) <- (8x1x4116xf32, 8x1x1xf32) + multiply_2 = paddle._C_ops.multiply(multiply_1, data_5) + del data_5, multiply_1 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_4 = [-2] + + # pd_op.sum: (8x4116xf32) <- (8x1x4116xf32, 1xi64) + sum_1 = paddle._C_ops.sum(multiply_2, full_int_array_4, None, False) + del full_int_array_4 + + # pd_op.full_int_array: (0xi64) <- () + full_int_array_5 = [] + + # pd_op.max: (xf32) <- (8x4116xf32, 0xi64) + max_0 = paddle._C_ops.max(sum_1, full_int_array_5, False) + del full_int_array_5 + + # pd_op.full: (xf32) <- () + full_5 = paddle._C_ops.full( + [], float("1"), paddle.float32, paddle.framework._current_expected_place() + ) + + # pd_op.greater_than: (xb) <- (xf32, xf32) + greater_than_0 = paddle._C_ops.greater_than(max_0, full_5) + del full_5, max_0, multiply_2, sum_1 + + return greater_than_0 diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus-L/subgraph_6/weight_meta.py b/paddle_samples/PaddleX/PP-YOLOE_plus-L/subgraph_6/weight_meta.py new file mode 100644 index 000000000..8b1378917 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE_plus-L/subgraph_6/weight_meta.py @@ -0,0 +1 @@ + diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus-L/subgraph_7/graph_hash.txt b/paddle_samples/PaddleX/PP-YOLOE_plus-L/subgraph_7/graph_hash.txt new file mode 100644 index 000000000..463698320 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE_plus-L/subgraph_7/graph_hash.txt @@ -0,0 +1 @@ +611aaf40802ae728109fb4c99495540bcf765813a1a25b37c13dde4b4b8683ee \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus-L/subgraph_7/graph_net.json b/paddle_samples/PaddleX/PP-YOLOE_plus-L/subgraph_7/graph_net.json new file mode 100644 index 000000000..32219c0fa --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE_plus-L/subgraph_7/graph_net.json @@ -0,0 +1,6 @@ +{ + "framework": "paddle", + "model_name": "PP-YOLOE_plus-L", + "num_devices_required": 1, + "num_nodes_required": 1 +} \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus-L/subgraph_7/input_meta.py b/paddle_samples/PaddleX/PP-YOLOE_plus-L/subgraph_7/input_meta.py new file mode 100644 index 000000000..fc3dcee91 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE_plus-L/subgraph_7/input_meta.py @@ -0,0 +1,38 @@ +class Program_weight_tensor_data_0: + name = "data_0" + shape = [2, 8400, 4] + dtype = "float32" + min_val = float("0.0512187") + max_val = float("14.5679") + mean = float("6.78357") + std = float("2.69712") + data = None + + +class Program_weight_tensor_data_1: + name = "data_1" + shape = [8400, 2] + dtype = "float32" + min_val = float("0.5") + max_val = float("79.5") + mean = float("34.7619") + std = float("22.9098") + data = None + + +class Program_weight_tensor_data_2: + name = "data_2" + shape = [8400, 1] + dtype = "float32" + min_val = float("8.0") + max_val = float("32.0") + mean = float("10.6667") + std = float("5.70157") + data = None + + +class Program_weight_tensor_data_3: + name = "data_3" + shape = [2, 2] + dtype = "float32" + data = [1.6, 2.397, 2.64463, 1.6] diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus-L/subgraph_7/model.py b/paddle_samples/PaddleX/PP-YOLOE_plus-L/subgraph_7/model.py new file mode 100644 index 000000000..561c0c35b --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE_plus-L/subgraph_7/model.py @@ -0,0 +1,94 @@ +import paddle + + +class GraphModule(paddle.nn.Layer): + def __init__(self): + super().__init__() + + def forward(self, data_0, data_1, data_2, data_3): + # pd_op.full: (1xi32) <- () + full_0 = paddle._C_ops.full( + [1], float("2"), paddle.int32, paddle.core.CPUPlace() + ) + + # pd_op.split_with_num: ([2x8400x2xf32, 2x8400x2xf32]) <- (2x8400x4xf32, 1xi32) + split_with_num_0 = paddle._C_ops.split_with_num(data_0, 2, full_0) + del data_0, full_0 + + # builtin.split: (2x8400x2xf32, 2x8400x2xf32) <- ([2x8400x2xf32, 2x8400x2xf32]) + ( + split_0, + split_1, + ) = split_with_num_0 + del split_with_num_0 + + # pd_op.full: (1xf32) <- () + full_1 = paddle._C_ops.full( + [1], float("-1"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.scale: (2x8400x2xf32) <- (2x8400x2xf32, 1xf32) + scale_0 = paddle._C_ops.scale(split_0, full_1, float("0"), True) + del full_1, split_0 + + # pd_op.add: (2x8400x2xf32) <- (2x8400x2xf32, 8400x2xf32) + add_0 = paddle._C_ops.add(scale_0, data_1) + del scale_0 + + # pd_op.add: (2x8400x2xf32) <- (2x8400x2xf32, 8400x2xf32) + add_1 = paddle._C_ops.add(split_1, data_1) + del data_1, split_1 + + # pd_op.full: (1xi32) <- () + full_2 = paddle._C_ops.full( + [1], float("-1"), paddle.int32, paddle.core.CPUPlace() + ) + + # builtin.combine: ([2x8400x2xf32, 2x8400x2xf32]) <- (2x8400x2xf32, 2x8400x2xf32) + combine_0 = [add_0, add_1] + del add_0, add_1 + + # pd_op.concat: (2x8400x4xf32) <- ([2x8400x2xf32, 2x8400x2xf32], 1xi32) + concat_0 = paddle._C_ops.concat(combine_0, full_2) + del combine_0 + + # pd_op.multiply: (2x8400x4xf32) <- (2x8400x4xf32, 8400x1xf32) + multiply_0 = paddle._C_ops.multiply(concat_0, data_2) + del concat_0, data_2 + + # pd_op.full: (1xi32) <- () + full_3 = paddle._C_ops.full( + [1], float("1"), paddle.int32, paddle.core.CPUPlace() + ) + + # pd_op.split_with_num: ([2x1xf32, 2x1xf32]) <- (2x2xf32, 1xi32) + split_with_num_1 = paddle._C_ops.split_with_num(data_3, 2, full_3) + del data_3, full_3 + + # builtin.split: (2x1xf32, 2x1xf32) <- ([2x1xf32, 2x1xf32]) + ( + split_2, + split_3, + ) = split_with_num_1 + del split_with_num_1 + + # builtin.combine: ([2x1xf32, 2x1xf32, 2x1xf32, 2x1xf32]) <- (2x1xf32, 2x1xf32, 2x1xf32, 2x1xf32) + combine_1 = [split_3, split_2, split_3, split_2] + del split_2, split_3 + + # pd_op.concat: (2x4xf32) <- ([2x1xf32, 2x1xf32, 2x1xf32, 2x1xf32], 1xi32) + concat_1 = paddle._C_ops.concat(combine_1, full_2) + del combine_1, full_2 + + # pd_op.full_int_array: (3xi64) <- () + full_int_array_0 = [-1, 1, 4] + + # pd_op.reshape: (2x1x4xf32) <- (2x4xf32, 3xi64) + reshape_0 = paddle._C_ops.reshape(concat_1, full_int_array_0) + del concat_1, full_int_array_0 + + # pd_op.divide: (2x8400x4xf32) <- (2x8400x4xf32, 2x1x4xf32) + divide_0 = paddle._C_ops.divide(multiply_0, reshape_0) + del multiply_0, reshape_0 + + return divide_0 diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus-L/subgraph_7/weight_meta.py b/paddle_samples/PaddleX/PP-YOLOE_plus-L/subgraph_7/weight_meta.py new file mode 100644 index 000000000..8b1378917 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE_plus-L/subgraph_7/weight_meta.py @@ -0,0 +1 @@ + diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus-L/subgraph_8/graph_hash.txt b/paddle_samples/PaddleX/PP-YOLOE_plus-L/subgraph_8/graph_hash.txt new file mode 100644 index 000000000..f0876b5b3 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE_plus-L/subgraph_8/graph_hash.txt @@ -0,0 +1 @@ +f34aa271170a2591bf3e52dc6e22e71b666cb770bb99ef237e369b16b462bfac \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus-L/subgraph_8/graph_net.json b/paddle_samples/PaddleX/PP-YOLOE_plus-L/subgraph_8/graph_net.json new file mode 100644 index 000000000..32219c0fa --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE_plus-L/subgraph_8/graph_net.json @@ -0,0 +1,6 @@ +{ + "framework": "paddle", + "model_name": "PP-YOLOE_plus-L", + "num_devices_required": 1, + "num_nodes_required": 1 +} \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus-L/subgraph_8/input_meta.py b/paddle_samples/PaddleX/PP-YOLOE_plus-L/subgraph_8/input_meta.py new file mode 100644 index 000000000..44c9b030c --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE_plus-L/subgraph_8/input_meta.py @@ -0,0 +1,7 @@ +class Program_weight_tensor_data_0: + name = "data_0" + shape = [8, 4116] + dtype = "int32" + min_val = 0 + max_val = 4 + data = None diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus-L/subgraph_8/model.py b/paddle_samples/PaddleX/PP-YOLOE_plus-L/subgraph_8/model.py new file mode 100644 index 000000000..8ca9a2697 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE_plus-L/subgraph_8/model.py @@ -0,0 +1,34 @@ +import paddle + + +class GraphModule(paddle.nn.Layer): + def __init__(self): + super().__init__() + + def forward(self, data_0): + # pd_op.full: (xi32) <- () + full_0 = paddle._C_ops.full( + [], float("4"), paddle.int32, paddle.framework._current_expected_place() + ) + + # pd_op.not_equal: (8x4116xb) <- (8x4116xi32, xi32) + not_equal_0 = paddle._C_ops.not_equal(data_0, full_0) + del data_0, full_0 + + # pd_op.full_int_array: (0xi64) <- () + full_int_array_0 = [] + + # pd_op.sum: (xi64) <- (8x4116xb, 0xi64) + sum_0 = paddle._C_ops.sum(not_equal_0, full_int_array_0, None, False) + del full_int_array_0 + + # pd_op.full: (xi64) <- () + full_1 = paddle._C_ops.full( + [], float("0"), paddle.int64, paddle.framework._current_expected_place() + ) + + # pd_op.greater_than: (xb) <- (xi64, xi64) + greater_than_0 = paddle._C_ops.greater_than(sum_0, full_1) + del full_1, not_equal_0, sum_0 + + return greater_than_0 diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus-L/subgraph_8/weight_meta.py b/paddle_samples/PaddleX/PP-YOLOE_plus-L/subgraph_8/weight_meta.py new file mode 100644 index 000000000..8b1378917 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE_plus-L/subgraph_8/weight_meta.py @@ -0,0 +1 @@ + diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus-L/subgraph_9/graph_hash.txt b/paddle_samples/PaddleX/PP-YOLOE_plus-L/subgraph_9/graph_hash.txt new file mode 100644 index 000000000..690b123d1 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE_plus-L/subgraph_9/graph_hash.txt @@ -0,0 +1 @@ +f74c5f134fbcd6078b4299d3edb74995ec2517b5f16472e409844e6a38edce19 \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus-L/subgraph_9/graph_net.json b/paddle_samples/PaddleX/PP-YOLOE_plus-L/subgraph_9/graph_net.json new file mode 100644 index 000000000..32219c0fa --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE_plus-L/subgraph_9/graph_net.json @@ -0,0 +1,6 @@ +{ + "framework": "paddle", + "model_name": "PP-YOLOE_plus-L", + "num_devices_required": 1, + "num_nodes_required": 1 +} \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus-L/subgraph_9/input_meta.py b/paddle_samples/PaddleX/PP-YOLOE_plus-L/subgraph_9/input_meta.py new file mode 100644 index 000000000..e7cef1f03 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE_plus-L/subgraph_9/input_meta.py @@ -0,0 +1,31 @@ +class Program_weight_tensor_data_0: + name = "data_0" + shape = [8, 768, 14, 14] + dtype = "float32" + min_val = float("-0.278465") + max_val = float("8.94092") + mean = float("0.271046") + std = float("0.622828") + data = None + + +class Program_weight_tensor_data_1: + name = "data_1" + shape = [8, 384, 28, 28] + dtype = "float32" + min_val = float("-0.278465") + max_val = float("11.2275") + mean = float("0.375277") + std = float("0.700531") + data = None + + +class Program_weight_tensor_data_2: + name = "data_2" + shape = [8, 192, 56, 56] + dtype = "float32" + min_val = float("-0.278465") + max_val = float("12.1434") + mean = float("0.487172") + std = float("0.770854") + data = None diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus-L/subgraph_9/model.py b/paddle_samples/PaddleX/PP-YOLOE_plus-L/subgraph_9/model.py new file mode 100644 index 000000000..f2c6c745a --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE_plus-L/subgraph_9/model.py @@ -0,0 +1,1050 @@ +import paddle + + +class GraphModule(paddle.nn.Layer): + def __init__(self): + super().__init__() + + def forward( + self, + parameter_0, + parameter_1, + parameter_2, + parameter_3, + parameter_4, + parameter_5, + parameter_6, + parameter_7, + parameter_8, + parameter_9, + parameter_10, + parameter_11, + parameter_12, + parameter_13, + parameter_14, + parameter_15, + parameter_16, + parameter_17, + parameter_18, + parameter_19, + parameter_20, + parameter_21, + parameter_22, + parameter_23, + parameter_24, + parameter_25, + parameter_26, + parameter_27, + parameter_28, + parameter_29, + parameter_30, + parameter_31, + parameter_32, + parameter_33, + parameter_34, + parameter_35, + parameter_36, + parameter_37, + parameter_38, + parameter_39, + parameter_40, + parameter_41, + parameter_42, + parameter_43, + parameter_44, + parameter_45, + parameter_46, + parameter_47, + parameter_48, + parameter_49, + parameter_50, + parameter_51, + parameter_52, + parameter_53, + data_0, + data_1, + data_2, + ): + # pd_op.full: (1xf64) <- () + full_0 = paddle._C_ops.full( + [1], float("0"), paddle.float64, paddle.core.CPUPlace() + ) + + # pd_op.full: (1xf64) <- () + full_1 = paddle._C_ops.full( + [1], float("14"), paddle.float64, paddle.core.CPUPlace() + ) + + # pd_op.full: (1xf64) <- () + full_2 = paddle._C_ops.full( + [1], float("1"), paddle.float64, paddle.core.CPUPlace() + ) + + # pd_op.arange: (14xi64) <- (1xf64, 1xf64, 1xf64) + arange_0 = paddle.arange(full_0, full_1, full_2, dtype="int64") + del full_1 + + # pd_op.cast: (14xf32) <- (14xi64) + cast_0 = paddle._C_ops.cast(arange_0, paddle.float32) + del arange_0 + + # pd_op.full: (1xf32) <- () + full_3 = paddle._C_ops.full( + [1], float("1"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.scale: (14xf32) <- (14xf32, 1xf32) + scale_0 = paddle._C_ops.scale(cast_0, full_3, float("0.5"), True) + del cast_0 + + # pd_op.full: (1xf32) <- () + full_4 = paddle._C_ops.full( + [1], float("32"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.scale: (14xf32) <- (14xf32, 1xf32) + scale_1 = paddle._C_ops.scale(scale_0, full_4, float("0"), True) + del full_4, scale_0 + + # builtin.combine: ([14xf32, 14xf32]) <- (14xf32, 14xf32) + combine_0 = [scale_1, scale_1] + del scale_1 + + # pd_op.meshgrid: ([14x14xf32, 14x14xf32]) <- ([14xf32, 14xf32]) + meshgrid_0 = paddle._C_ops.meshgrid(combine_0) + del combine_0 + + # builtin.split: (14x14xf32, 14x14xf32) <- ([14x14xf32, 14x14xf32]) + ( + split_0, + split_1, + ) = meshgrid_0 + del meshgrid_0 + + # pd_op.scale: (14x14xf32) <- (14x14xf32, 1xf32) + scale_2 = paddle._C_ops.scale(split_1, full_3, float("-80"), True) + + # pd_op.scale: (14x14xf32) <- (14x14xf32, 1xf32) + scale_3 = paddle._C_ops.scale(split_0, full_3, float("-80"), True) + + # pd_op.scale: (14x14xf32) <- (14x14xf32, 1xf32) + scale_4 = paddle._C_ops.scale(split_1, full_3, float("80"), True) + + # pd_op.scale: (14x14xf32) <- (14x14xf32, 1xf32) + scale_5 = paddle._C_ops.scale(split_0, full_3, float("80"), True) + + # builtin.combine: ([14x14xf32, 14x14xf32, 14x14xf32, 14x14xf32]) <- (14x14xf32, 14x14xf32, 14x14xf32, 14x14xf32) + combine_1 = [scale_2, scale_3, scale_4, scale_5] + del scale_2, scale_3, scale_4, scale_5 + + # pd_op.stack: (14x14x4xf32) <- ([14x14xf32, 14x14xf32, 14x14xf32, 14x14xf32]) + stack_0 = paddle._C_ops.stack(combine_1, -1) + del combine_1 + + # builtin.combine: ([14x14xf32, 14x14xf32]) <- (14x14xf32, 14x14xf32) + combine_2 = [split_1, split_0] + del split_0, split_1 + + # pd_op.stack: (14x14x2xf32) <- ([14x14xf32, 14x14xf32]) + stack_1 = paddle._C_ops.stack(combine_2, -1) + del combine_2 + + # pd_op.full_int_array: (2xi64) <- () + full_int_array_0 = [-1, 4] + + # pd_op.reshape: (196x4xf32) <- (14x14x4xf32, 2xi64) + reshape_0 = paddle._C_ops.reshape(stack_0, full_int_array_0) + del stack_0 + + # pd_op.full_int_array: (2xi64) <- () + full_int_array_1 = [-1, 2] + + # pd_op.reshape: (196x2xf32) <- (14x14x2xf32, 2xi64) + reshape_1 = paddle._C_ops.reshape(stack_1, full_int_array_1) + del stack_1 + + # pd_op.full: (196x1xf32) <- () + full_5 = paddle._C_ops.full( + [196, 1], + float("32"), + paddle.float32, + paddle.framework._current_expected_place(), + ) + + # pd_op.full: (1xf64) <- () + full_6 = paddle._C_ops.full( + [1], float("28"), paddle.float64, paddle.core.CPUPlace() + ) + + # pd_op.arange: (28xi64) <- (1xf64, 1xf64, 1xf64) + arange_1 = paddle.arange(full_0, full_6, full_2, dtype="int64") + del full_6 + + # pd_op.cast: (28xf32) <- (28xi64) + cast_1 = paddle._C_ops.cast(arange_1, paddle.float32) + del arange_1 + + # pd_op.scale: (28xf32) <- (28xf32, 1xf32) + scale_6 = paddle._C_ops.scale(cast_1, full_3, float("0.5"), True) + del cast_1 + + # pd_op.full: (1xf32) <- () + full_7 = paddle._C_ops.full( + [1], float("16"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.scale: (28xf32) <- (28xf32, 1xf32) + scale_7 = paddle._C_ops.scale(scale_6, full_7, float("0"), True) + del full_7, scale_6 + + # builtin.combine: ([28xf32, 28xf32]) <- (28xf32, 28xf32) + combine_3 = [scale_7, scale_7] + del scale_7 + + # pd_op.meshgrid: ([28x28xf32, 28x28xf32]) <- ([28xf32, 28xf32]) + meshgrid_1 = paddle._C_ops.meshgrid(combine_3) + del combine_3 + + # builtin.split: (28x28xf32, 28x28xf32) <- ([28x28xf32, 28x28xf32]) + ( + split_2, + split_3, + ) = meshgrid_1 + del meshgrid_1 + + # pd_op.scale: (28x28xf32) <- (28x28xf32, 1xf32) + scale_8 = paddle._C_ops.scale(split_3, full_3, float("-40"), True) + + # pd_op.scale: (28x28xf32) <- (28x28xf32, 1xf32) + scale_9 = paddle._C_ops.scale(split_2, full_3, float("-40"), True) + + # pd_op.scale: (28x28xf32) <- (28x28xf32, 1xf32) + scale_10 = paddle._C_ops.scale(split_3, full_3, float("40"), True) + + # pd_op.scale: (28x28xf32) <- (28x28xf32, 1xf32) + scale_11 = paddle._C_ops.scale(split_2, full_3, float("40"), True) + + # builtin.combine: ([28x28xf32, 28x28xf32, 28x28xf32, 28x28xf32]) <- (28x28xf32, 28x28xf32, 28x28xf32, 28x28xf32) + combine_4 = [scale_8, scale_9, scale_10, scale_11] + del scale_10, scale_11, scale_8, scale_9 + + # pd_op.stack: (28x28x4xf32) <- ([28x28xf32, 28x28xf32, 28x28xf32, 28x28xf32]) + stack_2 = paddle._C_ops.stack(combine_4, -1) + del combine_4 + + # builtin.combine: ([28x28xf32, 28x28xf32]) <- (28x28xf32, 28x28xf32) + combine_5 = [split_3, split_2] + del split_2, split_3 + + # pd_op.stack: (28x28x2xf32) <- ([28x28xf32, 28x28xf32]) + stack_3 = paddle._C_ops.stack(combine_5, -1) + del combine_5 + + # pd_op.reshape: (784x4xf32) <- (28x28x4xf32, 2xi64) + reshape_2 = paddle._C_ops.reshape(stack_2, full_int_array_0) + del stack_2 + + # pd_op.reshape: (784x2xf32) <- (28x28x2xf32, 2xi64) + reshape_3 = paddle._C_ops.reshape(stack_3, full_int_array_1) + del stack_3 + + # pd_op.full: (784x1xf32) <- () + full_8 = paddle._C_ops.full( + [784, 1], + float("16"), + paddle.float32, + paddle.framework._current_expected_place(), + ) + + # pd_op.full: (1xf64) <- () + full_9 = paddle._C_ops.full( + [1], float("56"), paddle.float64, paddle.core.CPUPlace() + ) + + # pd_op.arange: (56xi64) <- (1xf64, 1xf64, 1xf64) + arange_2 = paddle.arange(full_0, full_9, full_2, dtype="int64") + del full_0, full_2, full_9 + + # pd_op.cast: (56xf32) <- (56xi64) + cast_2 = paddle._C_ops.cast(arange_2, paddle.float32) + del arange_2 + + # pd_op.scale: (56xf32) <- (56xf32, 1xf32) + scale_12 = paddle._C_ops.scale(cast_2, full_3, float("0.5"), True) + del cast_2 + + # pd_op.full: (1xf32) <- () + full_10 = paddle._C_ops.full( + [1], float("8"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.scale: (56xf32) <- (56xf32, 1xf32) + scale_13 = paddle._C_ops.scale(scale_12, full_10, float("0"), True) + del full_10, scale_12 + + # builtin.combine: ([56xf32, 56xf32]) <- (56xf32, 56xf32) + combine_6 = [scale_13, scale_13] + del scale_13 + + # pd_op.meshgrid: ([56x56xf32, 56x56xf32]) <- ([56xf32, 56xf32]) + meshgrid_2 = paddle._C_ops.meshgrid(combine_6) + del combine_6 + + # builtin.split: (56x56xf32, 56x56xf32) <- ([56x56xf32, 56x56xf32]) + ( + split_4, + split_5, + ) = meshgrid_2 + del meshgrid_2 + + # pd_op.scale: (56x56xf32) <- (56x56xf32, 1xf32) + scale_14 = paddle._C_ops.scale(split_5, full_3, float("-20"), True) + + # pd_op.scale: (56x56xf32) <- (56x56xf32, 1xf32) + scale_15 = paddle._C_ops.scale(split_4, full_3, float("-20"), True) + + # pd_op.scale: (56x56xf32) <- (56x56xf32, 1xf32) + scale_16 = paddle._C_ops.scale(split_5, full_3, float("20"), True) + + # pd_op.scale: (56x56xf32) <- (56x56xf32, 1xf32) + scale_17 = paddle._C_ops.scale(split_4, full_3, float("20"), True) + del full_3 + + # builtin.combine: ([56x56xf32, 56x56xf32, 56x56xf32, 56x56xf32]) <- (56x56xf32, 56x56xf32, 56x56xf32, 56x56xf32) + combine_7 = [scale_14, scale_15, scale_16, scale_17] + del scale_14, scale_15, scale_16, scale_17 + + # pd_op.stack: (56x56x4xf32) <- ([56x56xf32, 56x56xf32, 56x56xf32, 56x56xf32]) + stack_4 = paddle._C_ops.stack(combine_7, -1) + del combine_7 + + # builtin.combine: ([56x56xf32, 56x56xf32]) <- (56x56xf32, 56x56xf32) + combine_8 = [split_5, split_4] + del split_4, split_5 + + # pd_op.stack: (56x56x2xf32) <- ([56x56xf32, 56x56xf32]) + stack_5 = paddle._C_ops.stack(combine_8, -1) + del combine_8 + + # pd_op.reshape: (3136x4xf32) <- (56x56x4xf32, 2xi64) + reshape_4 = paddle._C_ops.reshape(stack_4, full_int_array_0) + del full_int_array_0, stack_4 + + # pd_op.reshape: (3136x2xf32) <- (56x56x2xf32, 2xi64) + reshape_5 = paddle._C_ops.reshape(stack_5, full_int_array_1) + del full_int_array_1, stack_5 + + # pd_op.full: (3136x1xf32) <- () + full_11 = paddle._C_ops.full( + [3136, 1], + float("8"), + paddle.float32, + paddle.framework._current_expected_place(), + ) + + # pd_op.full: (1xi32) <- () + full_12 = paddle._C_ops.full( + [1], float("0"), paddle.int32, paddle.core.CPUPlace() + ) + + # builtin.combine: ([196x4xf32, 784x4xf32, 3136x4xf32]) <- (196x4xf32, 784x4xf32, 3136x4xf32) + combine_9 = [reshape_0, reshape_2, reshape_4] + + # pd_op.concat: (4116x4xf32) <- ([196x4xf32, 784x4xf32, 3136x4xf32], 1xi32) + concat_2 = paddle._C_ops.concat(combine_9, full_12) + del combine_9 + + # builtin.combine: ([196x2xf32, 784x2xf32, 3136x2xf32]) <- (196x2xf32, 784x2xf32, 3136x2xf32) + combine_10 = [reshape_1, reshape_3, reshape_5] + del reshape_1, reshape_3, reshape_5 + + # pd_op.concat: (4116x2xf32) <- ([196x2xf32, 784x2xf32, 3136x2xf32], 1xi32) + concat_3 = paddle._C_ops.concat(combine_10, full_12) + del combine_10 + + # builtin.combine: ([196x1xf32, 784x1xf32, 3136x1xf32]) <- (196x1xf32, 784x1xf32, 3136x1xf32) + combine_11 = [full_5, full_8, full_11] + del full_11, full_5, full_8 + + # pd_op.concat: (4116x1xf32) <- ([196x1xf32, 784x1xf32, 3136x1xf32], 1xi32) + concat_4 = paddle._C_ops.concat(combine_11, full_12) + del combine_11, full_12 + + # pd_op.full_int_array: (2xi64) <- () + full_int_array_2 = [1, 1] + + # pd_op.assign: (2xi64) <- (2xi64) + assign_0 = full_int_array_2 + + # pd_op.assign: (2xi64) <- (2xi64) + assign_1 = full_int_array_2 + + # pd_op.pool2d: (8x768x1x1xf32) <- (8x768x14x14xf32, 2xi64) + pool2d_0 = paddle._C_ops.pool2d( + data_0, + full_int_array_2, + [1, 1], + [0, 0], + False, + True, + "NCHW", + "avg", + False, + True, + "EXPLICIT", + ) + + # pd_op.conv2d: (8x768x1x1xf32) <- (8x768x1x1xf32, 768x768x1x1xf32) + conv2d_0 = paddle._C_ops.conv2d( + pool2d_0, parameter_53, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_53 + + # pd_op.full_int_array: (4xi64) <- () + full_int_array_3 = [1, -1, 1, 1] + + # pd_op.reshape: (1x768x1x1xf32) <- (768xf32, 4xi64) + reshape_6 = paddle._C_ops.reshape(parameter_52, full_int_array_3) + del parameter_52 + + # pd_op.add: (8x768x1x1xf32) <- (8x768x1x1xf32, 1x768x1x1xf32) + add_0 = paddle._C_ops.add(conv2d_0, reshape_6) + + # pd_op.sigmoid: (8x768x1x1xf32) <- (8x768x1x1xf32) + sigmoid_0 = paddle._C_ops.sigmoid(add_0) + del add_0 + + # pd_op.multiply: (8x768x14x14xf32) <- (8x768x14x14xf32, 8x768x1x1xf32) + multiply_0 = paddle._C_ops.multiply(data_0, sigmoid_0) + + # pd_op.conv2d: (8x768x14x14xf32) <- (8x768x14x14xf32, 768x768x1x1xf32) + conv2d_1 = paddle._C_ops.conv2d( + multiply_0, parameter_51, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_51 + + # pd_op.batch_norm_: (8x768x14x14xf32, 768xf32, 768xf32, 768xf32, 768xf32, -1xui8) <- (8x768x14x14xf32, 768xf32, 768xf32, 768xf32, 768xf32) + ( + batch_norm__0, + batch_norm__1, + batch_norm__2, + batch_norm__3, + batch_norm__4, + batch_norm__5, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_1, + parameter_50, + parameter_49, + parameter_48, + parameter_47, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_47, parameter_48, parameter_49, parameter_50 + + # pd_op.swish: (8x768x14x14xf32) <- (8x768x14x14xf32) + swish_0 = paddle._C_ops.swish(batch_norm__0) + + # pd_op.add: (8x768x14x14xf32) <- (8x768x14x14xf32, 8x768x14x14xf32) + add_1 = paddle._C_ops.add(swish_0, data_0) + + # pd_op.conv2d: (8x4x14x14xf32) <- (8x768x14x14xf32, 4x768x3x3xf32) + conv2d_2 = paddle._C_ops.conv2d( + add_1, parameter_46, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_46 + + # pd_op.reshape: (1x4x1x1xf32) <- (4xf32, 4xi64) + reshape_7 = paddle._C_ops.reshape(parameter_45, full_int_array_3) + del parameter_45 + + # pd_op.add: (8x4x14x14xf32) <- (8x4x14x14xf32, 1x4x1x1xf32) + add_2 = paddle._C_ops.add(conv2d_2, reshape_7) + + # pd_op.conv2d: (8x768x1x1xf32) <- (8x768x1x1xf32, 768x768x1x1xf32) + conv2d_3 = paddle._C_ops.conv2d( + pool2d_0, parameter_44, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_44 + + # pd_op.reshape: (1x768x1x1xf32) <- (768xf32, 4xi64) + reshape_8 = paddle._C_ops.reshape(parameter_43, full_int_array_3) + del parameter_43 + + # pd_op.add: (8x768x1x1xf32) <- (8x768x1x1xf32, 1x768x1x1xf32) + add_3 = paddle._C_ops.add(conv2d_3, reshape_8) + + # pd_op.sigmoid: (8x768x1x1xf32) <- (8x768x1x1xf32) + sigmoid_1 = paddle._C_ops.sigmoid(add_3) + del add_3 + + # pd_op.multiply: (8x768x14x14xf32) <- (8x768x14x14xf32, 8x768x1x1xf32) + multiply_1 = paddle._C_ops.multiply(data_0, sigmoid_1) + del data_0 + + # pd_op.conv2d: (8x768x14x14xf32) <- (8x768x14x14xf32, 768x768x1x1xf32) + conv2d_4 = paddle._C_ops.conv2d( + multiply_1, parameter_42, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_42 + + # pd_op.batch_norm_: (8x768x14x14xf32, 768xf32, 768xf32, 768xf32, 768xf32, -1xui8) <- (8x768x14x14xf32, 768xf32, 768xf32, 768xf32, 768xf32) + ( + batch_norm__6, + batch_norm__7, + batch_norm__8, + batch_norm__9, + batch_norm__10, + batch_norm__11, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_4, + parameter_41, + parameter_40, + parameter_39, + parameter_38, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_38, parameter_39, parameter_40, parameter_41 + + # pd_op.swish: (8x768x14x14xf32) <- (8x768x14x14xf32) + swish_1 = paddle._C_ops.swish(batch_norm__6) + + # pd_op.conv2d: (8x68x14x14xf32) <- (8x768x14x14xf32, 68x768x3x3xf32) + conv2d_5 = paddle._C_ops.conv2d( + swish_1, parameter_37, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_37 + + # pd_op.reshape: (1x68x1x1xf32) <- (68xf32, 4xi64) + reshape_9 = paddle._C_ops.reshape(parameter_36, full_int_array_3) + del parameter_36 + + # pd_op.add: (8x68x14x14xf32) <- (8x68x14x14xf32, 1x68x1x1xf32) + add_4 = paddle._C_ops.add(conv2d_5, reshape_9) + + # pd_op.sigmoid: (8x4x14x14xf32) <- (8x4x14x14xf32) + sigmoid_2 = paddle._C_ops.sigmoid(add_2) + del add_2 + + # pd_op.flatten: (8x4x196xf32) <- (8x4x14x14xf32) + flatten_0 = paddle._C_ops.flatten(sigmoid_2, 2, 3) + + # pd_op.transpose: (8x196x4xf32) <- (8x4x196xf32) + transpose_0 = paddle._C_ops.transpose(flatten_0, [0, 2, 1]) + del flatten_0 + + # pd_op.flatten: (8x68x196xf32) <- (8x68x14x14xf32) + flatten_1 = paddle._C_ops.flatten(add_4, 2, 3) + + # pd_op.transpose: (8x196x68xf32) <- (8x68x196xf32) + transpose_1 = paddle._C_ops.transpose(flatten_1, [0, 2, 1]) + del flatten_1 + + # pd_op.pool2d: (8x384x1x1xf32) <- (8x384x28x28xf32, 2xi64) + pool2d_1 = paddle._C_ops.pool2d( + data_1, + full_int_array_2, + [1, 1], + [0, 0], + False, + True, + "NCHW", + "avg", + False, + True, + "EXPLICIT", + ) + + # pd_op.conv2d: (8x384x1x1xf32) <- (8x384x1x1xf32, 384x384x1x1xf32) + conv2d_6 = paddle._C_ops.conv2d( + pool2d_1, parameter_35, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_35 + + # pd_op.reshape: (1x384x1x1xf32) <- (384xf32, 4xi64) + reshape_10 = paddle._C_ops.reshape(parameter_34, full_int_array_3) + del parameter_34 + + # pd_op.add: (8x384x1x1xf32) <- (8x384x1x1xf32, 1x384x1x1xf32) + add_5 = paddle._C_ops.add(conv2d_6, reshape_10) + + # pd_op.sigmoid: (8x384x1x1xf32) <- (8x384x1x1xf32) + sigmoid_3 = paddle._C_ops.sigmoid(add_5) + del add_5 + + # pd_op.multiply: (8x384x28x28xf32) <- (8x384x28x28xf32, 8x384x1x1xf32) + multiply_2 = paddle._C_ops.multiply(data_1, sigmoid_3) + + # pd_op.conv2d: (8x384x28x28xf32) <- (8x384x28x28xf32, 384x384x1x1xf32) + conv2d_7 = paddle._C_ops.conv2d( + multiply_2, parameter_33, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_33 + + # pd_op.batch_norm_: (8x384x28x28xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (8x384x28x28xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__12, + batch_norm__13, + batch_norm__14, + batch_norm__15, + batch_norm__16, + batch_norm__17, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_7, + parameter_32, + parameter_31, + parameter_30, + parameter_29, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_29, parameter_30, parameter_31, parameter_32 + + # pd_op.swish: (8x384x28x28xf32) <- (8x384x28x28xf32) + swish_2 = paddle._C_ops.swish(batch_norm__12) + + # pd_op.add: (8x384x28x28xf32) <- (8x384x28x28xf32, 8x384x28x28xf32) + add_6 = paddle._C_ops.add(swish_2, data_1) + + # pd_op.conv2d: (8x4x28x28xf32) <- (8x384x28x28xf32, 4x384x3x3xf32) + conv2d_8 = paddle._C_ops.conv2d( + add_6, parameter_28, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_28 + + # pd_op.reshape: (1x4x1x1xf32) <- (4xf32, 4xi64) + reshape_11 = paddle._C_ops.reshape(parameter_27, full_int_array_3) + del parameter_27 + + # pd_op.add: (8x4x28x28xf32) <- (8x4x28x28xf32, 1x4x1x1xf32) + add_7 = paddle._C_ops.add(conv2d_8, reshape_11) + + # pd_op.conv2d: (8x384x1x1xf32) <- (8x384x1x1xf32, 384x384x1x1xf32) + conv2d_9 = paddle._C_ops.conv2d( + pool2d_1, parameter_26, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_26 + + # pd_op.reshape: (1x384x1x1xf32) <- (384xf32, 4xi64) + reshape_12 = paddle._C_ops.reshape(parameter_25, full_int_array_3) + del parameter_25 + + # pd_op.add: (8x384x1x1xf32) <- (8x384x1x1xf32, 1x384x1x1xf32) + add_8 = paddle._C_ops.add(conv2d_9, reshape_12) + + # pd_op.sigmoid: (8x384x1x1xf32) <- (8x384x1x1xf32) + sigmoid_4 = paddle._C_ops.sigmoid(add_8) + del add_8 + + # pd_op.multiply: (8x384x28x28xf32) <- (8x384x28x28xf32, 8x384x1x1xf32) + multiply_3 = paddle._C_ops.multiply(data_1, sigmoid_4) + del data_1 + + # pd_op.conv2d: (8x384x28x28xf32) <- (8x384x28x28xf32, 384x384x1x1xf32) + conv2d_10 = paddle._C_ops.conv2d( + multiply_3, parameter_24, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_24 + + # pd_op.batch_norm_: (8x384x28x28xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (8x384x28x28xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__18, + batch_norm__19, + batch_norm__20, + batch_norm__21, + batch_norm__22, + batch_norm__23, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_10, + parameter_23, + parameter_22, + parameter_21, + parameter_20, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_20, parameter_21, parameter_22, parameter_23 + + # pd_op.swish: (8x384x28x28xf32) <- (8x384x28x28xf32) + swish_3 = paddle._C_ops.swish(batch_norm__18) + + # pd_op.conv2d: (8x68x28x28xf32) <- (8x384x28x28xf32, 68x384x3x3xf32) + conv2d_11 = paddle._C_ops.conv2d( + swish_3, parameter_19, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_19 + + # pd_op.reshape: (1x68x1x1xf32) <- (68xf32, 4xi64) + reshape_13 = paddle._C_ops.reshape(parameter_18, full_int_array_3) + del parameter_18 + + # pd_op.add: (8x68x28x28xf32) <- (8x68x28x28xf32, 1x68x1x1xf32) + add_9 = paddle._C_ops.add(conv2d_11, reshape_13) + + # pd_op.sigmoid: (8x4x28x28xf32) <- (8x4x28x28xf32) + sigmoid_5 = paddle._C_ops.sigmoid(add_7) + del add_7 + + # pd_op.flatten: (8x4x784xf32) <- (8x4x28x28xf32) + flatten_2 = paddle._C_ops.flatten(sigmoid_5, 2, 3) + + # pd_op.transpose: (8x784x4xf32) <- (8x4x784xf32) + transpose_2 = paddle._C_ops.transpose(flatten_2, [0, 2, 1]) + del flatten_2 + + # pd_op.flatten: (8x68x784xf32) <- (8x68x28x28xf32) + flatten_3 = paddle._C_ops.flatten(add_9, 2, 3) + + # pd_op.transpose: (8x784x68xf32) <- (8x68x784xf32) + transpose_3 = paddle._C_ops.transpose(flatten_3, [0, 2, 1]) + del flatten_3 + + # pd_op.pool2d: (8x192x1x1xf32) <- (8x192x56x56xf32, 2xi64) + pool2d_2 = paddle._C_ops.pool2d( + data_2, + full_int_array_2, + [1, 1], + [0, 0], + False, + True, + "NCHW", + "avg", + False, + True, + "EXPLICIT", + ) + + # pd_op.conv2d: (8x192x1x1xf32) <- (8x192x1x1xf32, 192x192x1x1xf32) + conv2d_12 = paddle._C_ops.conv2d( + pool2d_2, parameter_17, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_17 + + # pd_op.reshape: (1x192x1x1xf32) <- (192xf32, 4xi64) + reshape_14 = paddle._C_ops.reshape(parameter_16, full_int_array_3) + del parameter_16 + + # pd_op.add: (8x192x1x1xf32) <- (8x192x1x1xf32, 1x192x1x1xf32) + add_10 = paddle._C_ops.add(conv2d_12, reshape_14) + + # pd_op.sigmoid: (8x192x1x1xf32) <- (8x192x1x1xf32) + sigmoid_6 = paddle._C_ops.sigmoid(add_10) + del add_10 + + # pd_op.multiply: (8x192x56x56xf32) <- (8x192x56x56xf32, 8x192x1x1xf32) + multiply_4 = paddle._C_ops.multiply(data_2, sigmoid_6) + + # pd_op.conv2d: (8x192x56x56xf32) <- (8x192x56x56xf32, 192x192x1x1xf32) + conv2d_13 = paddle._C_ops.conv2d( + multiply_4, parameter_15, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_15 + + # pd_op.batch_norm_: (8x192x56x56xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (8x192x56x56xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__24, + batch_norm__25, + batch_norm__26, + batch_norm__27, + batch_norm__28, + batch_norm__29, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_13, + parameter_14, + parameter_13, + parameter_12, + parameter_11, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_11, parameter_12, parameter_13, parameter_14 + + # pd_op.swish: (8x192x56x56xf32) <- (8x192x56x56xf32) + swish_4 = paddle._C_ops.swish(batch_norm__24) + + # pd_op.add: (8x192x56x56xf32) <- (8x192x56x56xf32, 8x192x56x56xf32) + add_11 = paddle._C_ops.add(swish_4, data_2) + + # pd_op.conv2d: (8x4x56x56xf32) <- (8x192x56x56xf32, 4x192x3x3xf32) + conv2d_14 = paddle._C_ops.conv2d( + add_11, parameter_10, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_10 + + # pd_op.reshape: (1x4x1x1xf32) <- (4xf32, 4xi64) + reshape_15 = paddle._C_ops.reshape(parameter_9, full_int_array_3) + del parameter_9 + + # pd_op.add: (8x4x56x56xf32) <- (8x4x56x56xf32, 1x4x1x1xf32) + add_12 = paddle._C_ops.add(conv2d_14, reshape_15) + + # pd_op.conv2d: (8x192x1x1xf32) <- (8x192x1x1xf32, 192x192x1x1xf32) + conv2d_15 = paddle._C_ops.conv2d( + pool2d_2, parameter_8, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_8 + + # pd_op.reshape: (1x192x1x1xf32) <- (192xf32, 4xi64) + reshape_16 = paddle._C_ops.reshape(parameter_7, full_int_array_3) + del parameter_7 + + # pd_op.add: (8x192x1x1xf32) <- (8x192x1x1xf32, 1x192x1x1xf32) + add_13 = paddle._C_ops.add(conv2d_15, reshape_16) + + # pd_op.sigmoid: (8x192x1x1xf32) <- (8x192x1x1xf32) + sigmoid_7 = paddle._C_ops.sigmoid(add_13) + del add_13 + + # pd_op.multiply: (8x192x56x56xf32) <- (8x192x56x56xf32, 8x192x1x1xf32) + multiply_5 = paddle._C_ops.multiply(data_2, sigmoid_7) + del data_2 + + # pd_op.conv2d: (8x192x56x56xf32) <- (8x192x56x56xf32, 192x192x1x1xf32) + conv2d_16 = paddle._C_ops.conv2d( + multiply_5, parameter_6, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_6 + + # pd_op.batch_norm_: (8x192x56x56xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (8x192x56x56xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__30, + batch_norm__31, + batch_norm__32, + batch_norm__33, + batch_norm__34, + batch_norm__35, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_16, + parameter_5, + parameter_4, + parameter_3, + parameter_2, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_2, parameter_3, parameter_4, parameter_5 + + # pd_op.swish: (8x192x56x56xf32) <- (8x192x56x56xf32) + swish_5 = paddle._C_ops.swish(batch_norm__30) + + # pd_op.conv2d: (8x68x56x56xf32) <- (8x192x56x56xf32, 68x192x3x3xf32) + conv2d_17 = paddle._C_ops.conv2d( + swish_5, parameter_1, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_1 + + # pd_op.reshape: (1x68x1x1xf32) <- (68xf32, 4xi64) + reshape_17 = paddle._C_ops.reshape(parameter_0, full_int_array_3) + del full_int_array_3, parameter_0 + + # pd_op.add: (8x68x56x56xf32) <- (8x68x56x56xf32, 1x68x1x1xf32) + add_14 = paddle._C_ops.add(conv2d_17, reshape_17) + + # pd_op.sigmoid: (8x4x56x56xf32) <- (8x4x56x56xf32) + sigmoid_8 = paddle._C_ops.sigmoid(add_12) + del add_12 + + # pd_op.flatten: (8x4x3136xf32) <- (8x4x56x56xf32) + flatten_4 = paddle._C_ops.flatten(sigmoid_8, 2, 3) + + # pd_op.transpose: (8x3136x4xf32) <- (8x4x3136xf32) + transpose_4 = paddle._C_ops.transpose(flatten_4, [0, 2, 1]) + del flatten_4 + + # pd_op.flatten: (8x68x3136xf32) <- (8x68x56x56xf32) + flatten_5 = paddle._C_ops.flatten(add_14, 2, 3) + + # pd_op.transpose: (8x3136x68xf32) <- (8x68x3136xf32) + transpose_5 = paddle._C_ops.transpose(flatten_5, [0, 2, 1]) + del flatten_5 + + # pd_op.full: (1xi32) <- () + full_13 = paddle._C_ops.full( + [1], float("1"), paddle.int32, paddle.core.CPUPlace() + ) + + # pd_op.assign: (1xi32) <- (1xi32) + assign_2 = full_13 + + # builtin.combine: ([8x196x4xf32, 8x784x4xf32, 8x3136x4xf32]) <- (8x196x4xf32, 8x784x4xf32, 8x3136x4xf32) + combine_12 = [transpose_0, transpose_2, transpose_4] + + # pd_op.concat: (8x4116x4xf32) <- ([8x196x4xf32, 8x784x4xf32, 8x3136x4xf32], 1xi32) + concat_0 = paddle._C_ops.concat(combine_12, full_13) + del combine_12 + + # builtin.combine: ([8x196x68xf32, 8x784x68xf32, 8x3136x68xf32]) <- (8x196x68xf32, 8x784x68xf32, 8x3136x68xf32) + combine_13 = [transpose_1, transpose_3, transpose_5] + + # pd_op.concat: (8x4116x68xf32) <- ([8x196x68xf32, 8x784x68xf32, 8x3136x68xf32], 1xi32) + concat_1 = paddle._C_ops.concat(combine_13, full_13) + del ( + add_1, + add_11, + add_14, + add_4, + add_6, + add_9, + assign_0, + assign_1, + assign_2, + batch_norm__0, + batch_norm__1, + batch_norm__10, + batch_norm__11, + batch_norm__12, + batch_norm__13, + batch_norm__14, + batch_norm__15, + batch_norm__16, + batch_norm__17, + batch_norm__18, + batch_norm__19, + batch_norm__2, + batch_norm__20, + batch_norm__21, + batch_norm__22, + batch_norm__23, + batch_norm__24, + batch_norm__25, + batch_norm__26, + batch_norm__27, + batch_norm__28, + batch_norm__29, + batch_norm__3, + batch_norm__30, + batch_norm__31, + batch_norm__32, + batch_norm__33, + batch_norm__34, + batch_norm__35, + batch_norm__4, + batch_norm__5, + batch_norm__6, + batch_norm__7, + batch_norm__8, + batch_norm__9, + combine_13, + conv2d_0, + conv2d_1, + conv2d_10, + conv2d_11, + conv2d_12, + conv2d_13, + conv2d_14, + conv2d_15, + conv2d_16, + conv2d_17, + conv2d_2, + conv2d_3, + conv2d_4, + conv2d_5, + conv2d_6, + conv2d_7, + conv2d_8, + conv2d_9, + full_13, + full_int_array_2, + multiply_0, + multiply_1, + multiply_2, + multiply_3, + multiply_4, + multiply_5, + pool2d_0, + pool2d_1, + pool2d_2, + reshape_0, + reshape_10, + reshape_11, + reshape_12, + reshape_13, + reshape_14, + reshape_15, + reshape_16, + reshape_17, + reshape_2, + reshape_4, + reshape_6, + reshape_7, + reshape_8, + reshape_9, + sigmoid_0, + sigmoid_1, + sigmoid_2, + sigmoid_3, + sigmoid_4, + sigmoid_5, + sigmoid_6, + sigmoid_7, + sigmoid_8, + swish_0, + swish_1, + swish_2, + swish_3, + swish_4, + swish_5, + transpose_0, + transpose_1, + transpose_2, + transpose_3, + transpose_4, + transpose_5, + ) + + return concat_0, concat_1, concat_2, concat_3, concat_4 diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus-L/subgraph_9/weight_meta.py b/paddle_samples/PaddleX/PP-YOLOE_plus-L/subgraph_9/weight_meta.py new file mode 100644 index 000000000..42b452682 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE_plus-L/subgraph_9/weight_meta.py @@ -0,0 +1,574 @@ +class Program_weight_tensor_parameter_0: + name = "parameter_0" + shape = [68] + dtype = "float32" + min_val = float("-0.00994345") + max_val = float("0.0295467") + mean = float("1.85188e-07") + std = float("0.00657184") + data = None + + +class Program_weight_tensor_parameter_1: + name = "parameter_1" + shape = [68, 192, 3, 3] + dtype = "float32" + min_val = float("-0.132052") + max_val = float("0.152993") + mean = float("5.82659e-08") + std = float("0.00697617") + data = None + + +class Program_weight_tensor_parameter_2: + name = "parameter_2" + shape = [192] + dtype = "float32" + min_val = float("-0.0439922") + max_val = float("0.203765") + mean = float("0.0504072") + std = float("0.0395669") + data = None + + +class Program_weight_tensor_parameter_3: + name = "parameter_3" + shape = [192] + dtype = "float32" + min_val = float("0.852608") + max_val = float("1.61916") + mean = float("1.21933") + std = float("0.143034") + data = None + + +class Program_weight_tensor_parameter_4: + name = "parameter_4" + shape = [192] + dtype = "float32" + min_val = float("0.000117627") + max_val = float("0.0026124") + mean = float("0.000404459") + std = float("0.000312994") + data = None + + +class Program_weight_tensor_parameter_5: + name = "parameter_5" + shape = [192] + dtype = "float32" + min_val = float("-0.0352285") + max_val = float("0.029767") + mean = float("-0.0034756") + std = float("0.0106549") + data = None + + +class Program_weight_tensor_parameter_6: + name = "parameter_6" + shape = [192, 192, 1, 1] + dtype = "float32" + min_val = float("-0.0519368") + max_val = float("0.075145") + mean = float("-0.000115247") + std = float("0.00540221") + data = None + + +class Program_weight_tensor_parameter_7: + name = "parameter_7" + shape = [192] + dtype = "float32" + min_val = float("-0.00467515") + max_val = float("0.00851991") + mean = float("3.17583e-05") + std = float("0.00259113") + data = None + + +class Program_weight_tensor_parameter_8: + name = "parameter_8" + shape = [192, 192, 1, 1] + dtype = "float32" + min_val = float("-0.00531914") + max_val = float("0.00943649") + mean = float("-9.32844e-05") + std = float("0.00138252") + data = None + + +class Program_weight_tensor_parameter_9: + name = "parameter_9" + shape = [4] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_10: + name = "parameter_10" + shape = [4, 192, 3, 3] + dtype = "float32" + data = None + + +class Program_weight_tensor_parameter_11: + name = "parameter_11" + shape = [192] + dtype = "float32" + min_val = float("-0.327098") + max_val = float("0.890395") + mean = float("0.357592") + std = float("0.269366") + data = None + + +class Program_weight_tensor_parameter_12: + name = "parameter_12" + shape = [192] + dtype = "float32" + min_val = float("1.01729") + max_val = float("1.7703") + mean = float("1.31569") + std = float("0.141051") + data = None + + +class Program_weight_tensor_parameter_13: + name = "parameter_13" + shape = [192] + dtype = "float32" + min_val = float("0.000191938") + max_val = float("0.00371493") + mean = float("0.00070204") + std = float("0.000534277") + data = None + + +class Program_weight_tensor_parameter_14: + name = "parameter_14" + shape = [192] + dtype = "float32" + min_val = float("-0.172109") + max_val = float("0.0388642") + mean = float("-0.0248015") + std = float("0.0310403") + data = None + + +class Program_weight_tensor_parameter_15: + name = "parameter_15" + shape = [192, 192, 1, 1] + dtype = "float32" + min_val = float("-0.0756384") + max_val = float("0.068676") + mean = float("-0.0005045") + std = float("0.00650538") + data = None + + +class Program_weight_tensor_parameter_16: + name = "parameter_16" + shape = [192] + dtype = "float32" + min_val = float("-0.00462125") + max_val = float("0.00952984") + mean = float("-0.000108344") + std = float("0.00180966") + data = None + + +class Program_weight_tensor_parameter_17: + name = "parameter_17" + shape = [192, 192, 1, 1] + dtype = "float32" + min_val = float("-0.0166403") + max_val = float("0.015016") + mean = float("-1.40574e-05") + std = float("0.00152367") + data = None + + +class Program_weight_tensor_parameter_18: + name = "parameter_18" + shape = [68] + dtype = "float32" + min_val = float("-0.00413441") + max_val = float("0.0247658") + mean = float("1.70403e-07") + std = float("0.00515374") + data = None + + +class Program_weight_tensor_parameter_19: + name = "parameter_19" + shape = [68, 384, 3, 3] + dtype = "float32" + min_val = float("-0.0879382") + max_val = float("0.115543") + mean = float("3.09883e-08") + std = float("0.0046703") + data = None + + +class Program_weight_tensor_parameter_20: + name = "parameter_20" + shape = [384] + dtype = "float32" + min_val = float("-0.00504264") + max_val = float("0.0677623") + mean = float("0.025296") + std = float("0.012925") + data = None + + +class Program_weight_tensor_parameter_21: + name = "parameter_21" + shape = [384] + dtype = "float32" + min_val = float("0.998659") + max_val = float("1.23249") + mean = float("1.10437") + std = float("0.040582") + data = None + + +class Program_weight_tensor_parameter_22: + name = "parameter_22" + shape = [384] + dtype = "float32" + min_val = float("6.8383e-05") + max_val = float("0.00279866") + mean = float("0.000303046") + std = float("0.000306114") + data = None + + +class Program_weight_tensor_parameter_23: + name = "parameter_23" + shape = [384] + dtype = "float32" + min_val = float("-0.0400502") + max_val = float("0.0131297") + mean = float("-0.00632822") + std = float("0.00736343") + data = None + + +class Program_weight_tensor_parameter_24: + name = "parameter_24" + shape = [384, 384, 1, 1] + dtype = "float32" + min_val = float("-0.0493892") + max_val = float("0.0650265") + mean = float("-8.70135e-05") + std = float("0.00262175") + data = None + + +class Program_weight_tensor_parameter_25: + name = "parameter_25" + shape = [384] + dtype = "float32" + min_val = float("-0.00258806") + max_val = float("0.00556431") + mean = float("9.34648e-05") + std = float("0.00146967") + data = None + + +class Program_weight_tensor_parameter_26: + name = "parameter_26" + shape = [384, 384, 1, 1] + dtype = "float32" + min_val = float("-0.00175289") + max_val = float("0.00489605") + mean = float("1.06219e-05") + std = float("0.000586047") + data = None + + +class Program_weight_tensor_parameter_27: + name = "parameter_27" + shape = [4] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_28: + name = "parameter_28" + shape = [4, 384, 3, 3] + dtype = "float32" + data = None + + +class Program_weight_tensor_parameter_29: + name = "parameter_29" + shape = [384] + dtype = "float32" + min_val = float("-0.150027") + max_val = float("0.451389") + mean = float("0.229437") + std = float("0.0996485") + data = None + + +class Program_weight_tensor_parameter_30: + name = "parameter_30" + shape = [384] + dtype = "float32" + min_val = float("1.00298") + max_val = float("1.39843") + mean = float("1.18623") + std = float("0.059847") + data = None + + +class Program_weight_tensor_parameter_31: + name = "parameter_31" + shape = [384] + dtype = "float32" + min_val = float("0.000149666") + max_val = float("0.00370625") + mean = float("0.000713075") + std = float("0.000590199") + data = None + + +class Program_weight_tensor_parameter_32: + name = "parameter_32" + shape = [384] + dtype = "float32" + min_val = float("-0.108532") + max_val = float("0.0565238") + mean = float("-0.0264124") + std = float("0.0221279") + data = None + + +class Program_weight_tensor_parameter_33: + name = "parameter_33" + shape = [384, 384, 1, 1] + dtype = "float32" + min_val = float("-0.0480316") + max_val = float("0.0447625") + mean = float("-0.000359058") + std = float("0.00295423") + data = None + + +class Program_weight_tensor_parameter_34: + name = "parameter_34" + shape = [384] + dtype = "float32" + min_val = float("-0.00203913") + max_val = float("0.00903738") + mean = float("-3.66197e-06") + std = float("0.000959619") + data = None + + +class Program_weight_tensor_parameter_35: + name = "parameter_35" + shape = [384, 384, 1, 1] + dtype = "float32" + min_val = float("-0.00522582") + max_val = float("0.0088395") + mean = float("-4.68691e-06") + std = float("0.000619469") + data = None + + +class Program_weight_tensor_parameter_36: + name = "parameter_36" + shape = [68] + dtype = "float32" + min_val = float("-0.00290222") + max_val = float("0.0101817") + mean = float("1.30633e-07") + std = float("0.00299044") + data = None + + +class Program_weight_tensor_parameter_37: + name = "parameter_37" + shape = [68, 768, 3, 3] + dtype = "float32" + min_val = float("-0.0411349") + max_val = float("0.0738933") + mean = float("1.4159e-08") + std = float("0.00274115") + data = None + + +class Program_weight_tensor_parameter_38: + name = "parameter_38" + shape = [768] + dtype = "float32" + min_val = float("-0.0141815") + max_val = float("0.0470838") + mean = float("0.0110249") + std = float("0.0102353") + data = None + + +class Program_weight_tensor_parameter_39: + name = "parameter_39" + shape = [768] + dtype = "float32" + min_val = float("1.00835") + max_val = float("1.19911") + mean = float("1.06458") + std = float("0.0222771") + data = None + + +class Program_weight_tensor_parameter_40: + name = "parameter_40" + shape = [768] + dtype = "float32" + min_val = float("3.80062e-05") + max_val = float("0.00131862") + mean = float("0.000152402") + std = float("0.000108701") + data = None + + +class Program_weight_tensor_parameter_41: + name = "parameter_41" + shape = [768] + dtype = "float32" + min_val = float("-0.0236698") + max_val = float("0.00795434") + mean = float("-0.00383744") + std = float("0.00338712") + data = None + + +class Program_weight_tensor_parameter_42: + name = "parameter_42" + shape = [768, 768, 1, 1] + dtype = "float32" + min_val = float("-0.0354011") + max_val = float("0.0311472") + mean = float("-3.50875e-05") + std = float("0.0011905") + data = None + + +class Program_weight_tensor_parameter_43: + name = "parameter_43" + shape = [768] + dtype = "float32" + min_val = float("-0.00350695") + max_val = float("0.00217249") + mean = float("0.000104712") + std = float("0.000668859") + data = None + + +class Program_weight_tensor_parameter_44: + name = "parameter_44" + shape = [768, 768, 1, 1] + dtype = "float32" + min_val = float("-0.00236527") + max_val = float("0.00288539") + mean = float("2.74168e-05") + std = float("0.000209731") + data = None + + +class Program_weight_tensor_parameter_45: + name = "parameter_45" + shape = [4] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_46: + name = "parameter_46" + shape = [4, 768, 3, 3] + dtype = "float32" + data = None + + +class Program_weight_tensor_parameter_47: + name = "parameter_47" + shape = [768] + dtype = "float32" + min_val = float("-0.109319") + max_val = float("0.200294") + mean = float("0.0936331") + std = float("0.0420139") + data = None + + +class Program_weight_tensor_parameter_48: + name = "parameter_48" + shape = [768] + dtype = "float32" + min_val = float("1.00715") + max_val = float("1.25105") + mean = float("1.07838") + std = float("0.0259236") + data = None + + +class Program_weight_tensor_parameter_49: + name = "parameter_49" + shape = [768] + dtype = "float32" + min_val = float("9.94571e-05") + max_val = float("0.00338121") + mean = float("0.000633121") + std = float("0.000467813") + data = None + + +class Program_weight_tensor_parameter_50: + name = "parameter_50" + shape = [768] + dtype = "float32" + min_val = float("-0.0501712") + max_val = float("0.0941121") + mean = float("-0.0191505") + std = float("0.0110933") + data = None + + +class Program_weight_tensor_parameter_51: + name = "parameter_51" + shape = [768, 768, 1, 1] + dtype = "float32" + min_val = float("-0.0485631") + max_val = float("0.0317378") + mean = float("-0.000183678") + std = float("0.00129677") + data = None + + +class Program_weight_tensor_parameter_52: + name = "parameter_52" + shape = [768] + dtype = "float32" + min_val = float("-0.00522906") + max_val = float("0.00428608") + mean = float("1.59338e-05") + std = float("0.000442491") + data = None + + +class Program_weight_tensor_parameter_53: + name = "parameter_53" + shape = [768, 768, 1, 1] + dtype = "float32" + min_val = float("-0.0190742") + max_val = float("0.0352904") + mean = float("5.40338e-06") + std = float("0.000293143") + data = None diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus-M/subgraph_0/graph_hash.txt b/paddle_samples/PaddleX/PP-YOLOE_plus-M/subgraph_0/graph_hash.txt new file mode 100644 index 000000000..e30fb0baf --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE_plus-M/subgraph_0/graph_hash.txt @@ -0,0 +1 @@ +dcd5abc9718b27b6f605484f99e90ce1a8d7ade48c343d11c49b03f022454574 \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus-M/subgraph_0/graph_net.json b/paddle_samples/PaddleX/PP-YOLOE_plus-M/subgraph_0/graph_net.json new file mode 100644 index 000000000..1c6cb32da --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE_plus-M/subgraph_0/graph_net.json @@ -0,0 +1,6 @@ +{ + "framework": "paddle", + "model_name": "PP-YOLOE_plus-M", + "num_devices_required": 1, + "num_nodes_required": 1 +} \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus-M/subgraph_0/input_meta.py b/paddle_samples/PaddleX/PP-YOLOE_plus-M/subgraph_0/input_meta.py new file mode 100644 index 000000000..7003cd21c --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE_plus-M/subgraph_0/input_meta.py @@ -0,0 +1,31 @@ +class Program_weight_tensor_data_0: + name = "data_0" + shape = [8, 576, 22, 22] + dtype = "float32" + min_val = float("-0.278465") + max_val = float("8.89689") + mean = float("0.309719") + std = float("0.661205") + data = None + + +class Program_weight_tensor_data_1: + name = "data_1" + shape = [8, 288, 44, 44] + dtype = "float32" + min_val = float("-0.278465") + max_val = float("15.652") + mean = float("0.423072") + std = float("0.733633") + data = None + + +class Program_weight_tensor_data_2: + name = "data_2" + shape = [8, 144, 88, 88] + dtype = "float32" + min_val = float("-0.278465") + max_val = float("18.2364") + mean = float("0.519748") + std = float("0.784853") + data = None diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus-M/subgraph_0/model.py b/paddle_samples/PaddleX/PP-YOLOE_plus-M/subgraph_0/model.py new file mode 100644 index 000000000..0da4e04d9 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE_plus-M/subgraph_0/model.py @@ -0,0 +1,1050 @@ +import paddle + + +class GraphModule(paddle.nn.Layer): + def __init__(self): + super().__init__() + + def forward( + self, + parameter_0, + parameter_1, + parameter_2, + parameter_3, + parameter_4, + parameter_5, + parameter_6, + parameter_7, + parameter_8, + parameter_9, + parameter_10, + parameter_11, + parameter_12, + parameter_13, + parameter_14, + parameter_15, + parameter_16, + parameter_17, + parameter_18, + parameter_19, + parameter_20, + parameter_21, + parameter_22, + parameter_23, + parameter_24, + parameter_25, + parameter_26, + parameter_27, + parameter_28, + parameter_29, + parameter_30, + parameter_31, + parameter_32, + parameter_33, + parameter_34, + parameter_35, + parameter_36, + parameter_37, + parameter_38, + parameter_39, + parameter_40, + parameter_41, + parameter_42, + parameter_43, + parameter_44, + parameter_45, + parameter_46, + parameter_47, + parameter_48, + parameter_49, + parameter_50, + parameter_51, + parameter_52, + parameter_53, + data_0, + data_1, + data_2, + ): + # pd_op.full: (1xf64) <- () + full_0 = paddle._C_ops.full( + [1], float("0"), paddle.float64, paddle.core.CPUPlace() + ) + + # pd_op.full: (1xf64) <- () + full_1 = paddle._C_ops.full( + [1], float("22"), paddle.float64, paddle.core.CPUPlace() + ) + + # pd_op.full: (1xf64) <- () + full_2 = paddle._C_ops.full( + [1], float("1"), paddle.float64, paddle.core.CPUPlace() + ) + + # pd_op.arange: (22xi64) <- (1xf64, 1xf64, 1xf64) + arange_0 = paddle.arange(full_0, full_1, full_2, dtype="int64") + del full_1 + + # pd_op.cast: (22xf32) <- (22xi64) + cast_0 = paddle._C_ops.cast(arange_0, paddle.float32) + del arange_0 + + # pd_op.full: (1xf32) <- () + full_3 = paddle._C_ops.full( + [1], float("1"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.scale: (22xf32) <- (22xf32, 1xf32) + scale_0 = paddle._C_ops.scale(cast_0, full_3, float("0.5"), True) + del cast_0 + + # pd_op.full: (1xf32) <- () + full_4 = paddle._C_ops.full( + [1], float("32"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.scale: (22xf32) <- (22xf32, 1xf32) + scale_1 = paddle._C_ops.scale(scale_0, full_4, float("0"), True) + del full_4, scale_0 + + # builtin.combine: ([22xf32, 22xf32]) <- (22xf32, 22xf32) + combine_0 = [scale_1, scale_1] + del scale_1 + + # pd_op.meshgrid: ([22x22xf32, 22x22xf32]) <- ([22xf32, 22xf32]) + meshgrid_0 = paddle._C_ops.meshgrid(combine_0) + del combine_0 + + # builtin.split: (22x22xf32, 22x22xf32) <- ([22x22xf32, 22x22xf32]) + ( + split_0, + split_1, + ) = meshgrid_0 + del meshgrid_0 + + # pd_op.scale: (22x22xf32) <- (22x22xf32, 1xf32) + scale_2 = paddle._C_ops.scale(split_1, full_3, float("-80"), True) + + # pd_op.scale: (22x22xf32) <- (22x22xf32, 1xf32) + scale_3 = paddle._C_ops.scale(split_0, full_3, float("-80"), True) + + # pd_op.scale: (22x22xf32) <- (22x22xf32, 1xf32) + scale_4 = paddle._C_ops.scale(split_1, full_3, float("80"), True) + + # pd_op.scale: (22x22xf32) <- (22x22xf32, 1xf32) + scale_5 = paddle._C_ops.scale(split_0, full_3, float("80"), True) + + # builtin.combine: ([22x22xf32, 22x22xf32, 22x22xf32, 22x22xf32]) <- (22x22xf32, 22x22xf32, 22x22xf32, 22x22xf32) + combine_1 = [scale_2, scale_3, scale_4, scale_5] + del scale_2, scale_3, scale_4, scale_5 + + # pd_op.stack: (22x22x4xf32) <- ([22x22xf32, 22x22xf32, 22x22xf32, 22x22xf32]) + stack_0 = paddle._C_ops.stack(combine_1, -1) + del combine_1 + + # builtin.combine: ([22x22xf32, 22x22xf32]) <- (22x22xf32, 22x22xf32) + combine_2 = [split_1, split_0] + del split_0, split_1 + + # pd_op.stack: (22x22x2xf32) <- ([22x22xf32, 22x22xf32]) + stack_1 = paddle._C_ops.stack(combine_2, -1) + del combine_2 + + # pd_op.full_int_array: (2xi64) <- () + full_int_array_0 = [-1, 4] + + # pd_op.reshape: (484x4xf32) <- (22x22x4xf32, 2xi64) + reshape_0 = paddle._C_ops.reshape(stack_0, full_int_array_0) + del stack_0 + + # pd_op.full_int_array: (2xi64) <- () + full_int_array_1 = [-1, 2] + + # pd_op.reshape: (484x2xf32) <- (22x22x2xf32, 2xi64) + reshape_1 = paddle._C_ops.reshape(stack_1, full_int_array_1) + del stack_1 + + # pd_op.full: (484x1xf32) <- () + full_5 = paddle._C_ops.full( + [484, 1], + float("32"), + paddle.float32, + paddle.framework._current_expected_place(), + ) + + # pd_op.full: (1xf64) <- () + full_6 = paddle._C_ops.full( + [1], float("44"), paddle.float64, paddle.core.CPUPlace() + ) + + # pd_op.arange: (44xi64) <- (1xf64, 1xf64, 1xf64) + arange_1 = paddle.arange(full_0, full_6, full_2, dtype="int64") + del full_6 + + # pd_op.cast: (44xf32) <- (44xi64) + cast_1 = paddle._C_ops.cast(arange_1, paddle.float32) + del arange_1 + + # pd_op.scale: (44xf32) <- (44xf32, 1xf32) + scale_6 = paddle._C_ops.scale(cast_1, full_3, float("0.5"), True) + del cast_1 + + # pd_op.full: (1xf32) <- () + full_7 = paddle._C_ops.full( + [1], float("16"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.scale: (44xf32) <- (44xf32, 1xf32) + scale_7 = paddle._C_ops.scale(scale_6, full_7, float("0"), True) + del full_7, scale_6 + + # builtin.combine: ([44xf32, 44xf32]) <- (44xf32, 44xf32) + combine_3 = [scale_7, scale_7] + del scale_7 + + # pd_op.meshgrid: ([44x44xf32, 44x44xf32]) <- ([44xf32, 44xf32]) + meshgrid_1 = paddle._C_ops.meshgrid(combine_3) + del combine_3 + + # builtin.split: (44x44xf32, 44x44xf32) <- ([44x44xf32, 44x44xf32]) + ( + split_2, + split_3, + ) = meshgrid_1 + del meshgrid_1 + + # pd_op.scale: (44x44xf32) <- (44x44xf32, 1xf32) + scale_8 = paddle._C_ops.scale(split_3, full_3, float("-40"), True) + + # pd_op.scale: (44x44xf32) <- (44x44xf32, 1xf32) + scale_9 = paddle._C_ops.scale(split_2, full_3, float("-40"), True) + + # pd_op.scale: (44x44xf32) <- (44x44xf32, 1xf32) + scale_10 = paddle._C_ops.scale(split_3, full_3, float("40"), True) + + # pd_op.scale: (44x44xf32) <- (44x44xf32, 1xf32) + scale_11 = paddle._C_ops.scale(split_2, full_3, float("40"), True) + + # builtin.combine: ([44x44xf32, 44x44xf32, 44x44xf32, 44x44xf32]) <- (44x44xf32, 44x44xf32, 44x44xf32, 44x44xf32) + combine_4 = [scale_8, scale_9, scale_10, scale_11] + del scale_10, scale_11, scale_8, scale_9 + + # pd_op.stack: (44x44x4xf32) <- ([44x44xf32, 44x44xf32, 44x44xf32, 44x44xf32]) + stack_2 = paddle._C_ops.stack(combine_4, -1) + del combine_4 + + # builtin.combine: ([44x44xf32, 44x44xf32]) <- (44x44xf32, 44x44xf32) + combine_5 = [split_3, split_2] + del split_2, split_3 + + # pd_op.stack: (44x44x2xf32) <- ([44x44xf32, 44x44xf32]) + stack_3 = paddle._C_ops.stack(combine_5, -1) + del combine_5 + + # pd_op.reshape: (1936x4xf32) <- (44x44x4xf32, 2xi64) + reshape_2 = paddle._C_ops.reshape(stack_2, full_int_array_0) + del stack_2 + + # pd_op.reshape: (1936x2xf32) <- (44x44x2xf32, 2xi64) + reshape_3 = paddle._C_ops.reshape(stack_3, full_int_array_1) + del stack_3 + + # pd_op.full: (1936x1xf32) <- () + full_8 = paddle._C_ops.full( + [1936, 1], + float("16"), + paddle.float32, + paddle.framework._current_expected_place(), + ) + + # pd_op.full: (1xf64) <- () + full_9 = paddle._C_ops.full( + [1], float("88"), paddle.float64, paddle.core.CPUPlace() + ) + + # pd_op.arange: (88xi64) <- (1xf64, 1xf64, 1xf64) + arange_2 = paddle.arange(full_0, full_9, full_2, dtype="int64") + del full_0, full_2, full_9 + + # pd_op.cast: (88xf32) <- (88xi64) + cast_2 = paddle._C_ops.cast(arange_2, paddle.float32) + del arange_2 + + # pd_op.scale: (88xf32) <- (88xf32, 1xf32) + scale_12 = paddle._C_ops.scale(cast_2, full_3, float("0.5"), True) + del cast_2 + + # pd_op.full: (1xf32) <- () + full_10 = paddle._C_ops.full( + [1], float("8"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.scale: (88xf32) <- (88xf32, 1xf32) + scale_13 = paddle._C_ops.scale(scale_12, full_10, float("0"), True) + del full_10, scale_12 + + # builtin.combine: ([88xf32, 88xf32]) <- (88xf32, 88xf32) + combine_6 = [scale_13, scale_13] + del scale_13 + + # pd_op.meshgrid: ([88x88xf32, 88x88xf32]) <- ([88xf32, 88xf32]) + meshgrid_2 = paddle._C_ops.meshgrid(combine_6) + del combine_6 + + # builtin.split: (88x88xf32, 88x88xf32) <- ([88x88xf32, 88x88xf32]) + ( + split_4, + split_5, + ) = meshgrid_2 + del meshgrid_2 + + # pd_op.scale: (88x88xf32) <- (88x88xf32, 1xf32) + scale_14 = paddle._C_ops.scale(split_5, full_3, float("-20"), True) + + # pd_op.scale: (88x88xf32) <- (88x88xf32, 1xf32) + scale_15 = paddle._C_ops.scale(split_4, full_3, float("-20"), True) + + # pd_op.scale: (88x88xf32) <- (88x88xf32, 1xf32) + scale_16 = paddle._C_ops.scale(split_5, full_3, float("20"), True) + + # pd_op.scale: (88x88xf32) <- (88x88xf32, 1xf32) + scale_17 = paddle._C_ops.scale(split_4, full_3, float("20"), True) + del full_3 + + # builtin.combine: ([88x88xf32, 88x88xf32, 88x88xf32, 88x88xf32]) <- (88x88xf32, 88x88xf32, 88x88xf32, 88x88xf32) + combine_7 = [scale_14, scale_15, scale_16, scale_17] + del scale_14, scale_15, scale_16, scale_17 + + # pd_op.stack: (88x88x4xf32) <- ([88x88xf32, 88x88xf32, 88x88xf32, 88x88xf32]) + stack_4 = paddle._C_ops.stack(combine_7, -1) + del combine_7 + + # builtin.combine: ([88x88xf32, 88x88xf32]) <- (88x88xf32, 88x88xf32) + combine_8 = [split_5, split_4] + del split_4, split_5 + + # pd_op.stack: (88x88x2xf32) <- ([88x88xf32, 88x88xf32]) + stack_5 = paddle._C_ops.stack(combine_8, -1) + del combine_8 + + # pd_op.reshape: (7744x4xf32) <- (88x88x4xf32, 2xi64) + reshape_4 = paddle._C_ops.reshape(stack_4, full_int_array_0) + del full_int_array_0, stack_4 + + # pd_op.reshape: (7744x2xf32) <- (88x88x2xf32, 2xi64) + reshape_5 = paddle._C_ops.reshape(stack_5, full_int_array_1) + del full_int_array_1, stack_5 + + # pd_op.full: (7744x1xf32) <- () + full_11 = paddle._C_ops.full( + [7744, 1], + float("8"), + paddle.float32, + paddle.framework._current_expected_place(), + ) + + # pd_op.full: (1xi32) <- () + full_12 = paddle._C_ops.full( + [1], float("0"), paddle.int32, paddle.core.CPUPlace() + ) + + # builtin.combine: ([484x4xf32, 1936x4xf32, 7744x4xf32]) <- (484x4xf32, 1936x4xf32, 7744x4xf32) + combine_9 = [reshape_0, reshape_2, reshape_4] + + # pd_op.concat: (10164x4xf32) <- ([484x4xf32, 1936x4xf32, 7744x4xf32], 1xi32) + concat_2 = paddle._C_ops.concat(combine_9, full_12) + del combine_9 + + # builtin.combine: ([484x2xf32, 1936x2xf32, 7744x2xf32]) <- (484x2xf32, 1936x2xf32, 7744x2xf32) + combine_10 = [reshape_1, reshape_3, reshape_5] + del reshape_1, reshape_3, reshape_5 + + # pd_op.concat: (10164x2xf32) <- ([484x2xf32, 1936x2xf32, 7744x2xf32], 1xi32) + concat_3 = paddle._C_ops.concat(combine_10, full_12) + del combine_10 + + # builtin.combine: ([484x1xf32, 1936x1xf32, 7744x1xf32]) <- (484x1xf32, 1936x1xf32, 7744x1xf32) + combine_11 = [full_5, full_8, full_11] + del full_11, full_5, full_8 + + # pd_op.concat: (10164x1xf32) <- ([484x1xf32, 1936x1xf32, 7744x1xf32], 1xi32) + concat_4 = paddle._C_ops.concat(combine_11, full_12) + del combine_11, full_12 + + # pd_op.full_int_array: (2xi64) <- () + full_int_array_2 = [1, 1] + + # pd_op.assign: (2xi64) <- (2xi64) + assign_0 = full_int_array_2 + + # pd_op.assign: (2xi64) <- (2xi64) + assign_1 = full_int_array_2 + + # pd_op.pool2d: (8x576x1x1xf32) <- (8x576x22x22xf32, 2xi64) + pool2d_0 = paddle._C_ops.pool2d( + data_0, + full_int_array_2, + [1, 1], + [0, 0], + False, + True, + "NCHW", + "avg", + False, + True, + "EXPLICIT", + ) + + # pd_op.conv2d: (8x576x1x1xf32) <- (8x576x1x1xf32, 576x576x1x1xf32) + conv2d_0 = paddle._C_ops.conv2d( + pool2d_0, parameter_53, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_53 + + # pd_op.full_int_array: (4xi64) <- () + full_int_array_3 = [1, -1, 1, 1] + + # pd_op.reshape: (1x576x1x1xf32) <- (576xf32, 4xi64) + reshape_6 = paddle._C_ops.reshape(parameter_52, full_int_array_3) + del parameter_52 + + # pd_op.add: (8x576x1x1xf32) <- (8x576x1x1xf32, 1x576x1x1xf32) + add_0 = paddle._C_ops.add(conv2d_0, reshape_6) + + # pd_op.sigmoid: (8x576x1x1xf32) <- (8x576x1x1xf32) + sigmoid_0 = paddle._C_ops.sigmoid(add_0) + del add_0 + + # pd_op.multiply: (8x576x22x22xf32) <- (8x576x22x22xf32, 8x576x1x1xf32) + multiply_0 = paddle._C_ops.multiply(data_0, sigmoid_0) + + # pd_op.conv2d: (8x576x22x22xf32) <- (8x576x22x22xf32, 576x576x1x1xf32) + conv2d_1 = paddle._C_ops.conv2d( + multiply_0, parameter_51, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_51 + + # pd_op.batch_norm_: (8x576x22x22xf32, 576xf32, 576xf32, 576xf32, 576xf32, -1xui8) <- (8x576x22x22xf32, 576xf32, 576xf32, 576xf32, 576xf32) + ( + batch_norm__0, + batch_norm__1, + batch_norm__2, + batch_norm__3, + batch_norm__4, + batch_norm__5, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_1, + parameter_50, + parameter_49, + parameter_48, + parameter_47, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_47, parameter_48, parameter_49, parameter_50 + + # pd_op.swish: (8x576x22x22xf32) <- (8x576x22x22xf32) + swish_0 = paddle._C_ops.swish(batch_norm__0) + + # pd_op.add: (8x576x22x22xf32) <- (8x576x22x22xf32, 8x576x22x22xf32) + add_1 = paddle._C_ops.add(swish_0, data_0) + + # pd_op.conv2d: (8x4x22x22xf32) <- (8x576x22x22xf32, 4x576x3x3xf32) + conv2d_2 = paddle._C_ops.conv2d( + add_1, parameter_46, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_46 + + # pd_op.reshape: (1x4x1x1xf32) <- (4xf32, 4xi64) + reshape_7 = paddle._C_ops.reshape(parameter_45, full_int_array_3) + del parameter_45 + + # pd_op.add: (8x4x22x22xf32) <- (8x4x22x22xf32, 1x4x1x1xf32) + add_2 = paddle._C_ops.add(conv2d_2, reshape_7) + + # pd_op.conv2d: (8x576x1x1xf32) <- (8x576x1x1xf32, 576x576x1x1xf32) + conv2d_3 = paddle._C_ops.conv2d( + pool2d_0, parameter_44, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_44 + + # pd_op.reshape: (1x576x1x1xf32) <- (576xf32, 4xi64) + reshape_8 = paddle._C_ops.reshape(parameter_43, full_int_array_3) + del parameter_43 + + # pd_op.add: (8x576x1x1xf32) <- (8x576x1x1xf32, 1x576x1x1xf32) + add_3 = paddle._C_ops.add(conv2d_3, reshape_8) + + # pd_op.sigmoid: (8x576x1x1xf32) <- (8x576x1x1xf32) + sigmoid_1 = paddle._C_ops.sigmoid(add_3) + del add_3 + + # pd_op.multiply: (8x576x22x22xf32) <- (8x576x22x22xf32, 8x576x1x1xf32) + multiply_1 = paddle._C_ops.multiply(data_0, sigmoid_1) + del data_0 + + # pd_op.conv2d: (8x576x22x22xf32) <- (8x576x22x22xf32, 576x576x1x1xf32) + conv2d_4 = paddle._C_ops.conv2d( + multiply_1, parameter_42, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_42 + + # pd_op.batch_norm_: (8x576x22x22xf32, 576xf32, 576xf32, 576xf32, 576xf32, -1xui8) <- (8x576x22x22xf32, 576xf32, 576xf32, 576xf32, 576xf32) + ( + batch_norm__6, + batch_norm__7, + batch_norm__8, + batch_norm__9, + batch_norm__10, + batch_norm__11, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_4, + parameter_41, + parameter_40, + parameter_39, + parameter_38, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_38, parameter_39, parameter_40, parameter_41 + + # pd_op.swish: (8x576x22x22xf32) <- (8x576x22x22xf32) + swish_1 = paddle._C_ops.swish(batch_norm__6) + + # pd_op.conv2d: (8x68x22x22xf32) <- (8x576x22x22xf32, 68x576x3x3xf32) + conv2d_5 = paddle._C_ops.conv2d( + swish_1, parameter_37, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_37 + + # pd_op.reshape: (1x68x1x1xf32) <- (68xf32, 4xi64) + reshape_9 = paddle._C_ops.reshape(parameter_36, full_int_array_3) + del parameter_36 + + # pd_op.add: (8x68x22x22xf32) <- (8x68x22x22xf32, 1x68x1x1xf32) + add_4 = paddle._C_ops.add(conv2d_5, reshape_9) + + # pd_op.sigmoid: (8x4x22x22xf32) <- (8x4x22x22xf32) + sigmoid_2 = paddle._C_ops.sigmoid(add_2) + del add_2 + + # pd_op.flatten: (8x4x484xf32) <- (8x4x22x22xf32) + flatten_0 = paddle._C_ops.flatten(sigmoid_2, 2, 3) + + # pd_op.transpose: (8x484x4xf32) <- (8x4x484xf32) + transpose_0 = paddle._C_ops.transpose(flatten_0, [0, 2, 1]) + del flatten_0 + + # pd_op.flatten: (8x68x484xf32) <- (8x68x22x22xf32) + flatten_1 = paddle._C_ops.flatten(add_4, 2, 3) + + # pd_op.transpose: (8x484x68xf32) <- (8x68x484xf32) + transpose_1 = paddle._C_ops.transpose(flatten_1, [0, 2, 1]) + del flatten_1 + + # pd_op.pool2d: (8x288x1x1xf32) <- (8x288x44x44xf32, 2xi64) + pool2d_1 = paddle._C_ops.pool2d( + data_1, + full_int_array_2, + [1, 1], + [0, 0], + False, + True, + "NCHW", + "avg", + False, + True, + "EXPLICIT", + ) + + # pd_op.conv2d: (8x288x1x1xf32) <- (8x288x1x1xf32, 288x288x1x1xf32) + conv2d_6 = paddle._C_ops.conv2d( + pool2d_1, parameter_35, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_35 + + # pd_op.reshape: (1x288x1x1xf32) <- (288xf32, 4xi64) + reshape_10 = paddle._C_ops.reshape(parameter_34, full_int_array_3) + del parameter_34 + + # pd_op.add: (8x288x1x1xf32) <- (8x288x1x1xf32, 1x288x1x1xf32) + add_5 = paddle._C_ops.add(conv2d_6, reshape_10) + + # pd_op.sigmoid: (8x288x1x1xf32) <- (8x288x1x1xf32) + sigmoid_3 = paddle._C_ops.sigmoid(add_5) + del add_5 + + # pd_op.multiply: (8x288x44x44xf32) <- (8x288x44x44xf32, 8x288x1x1xf32) + multiply_2 = paddle._C_ops.multiply(data_1, sigmoid_3) + + # pd_op.conv2d: (8x288x44x44xf32) <- (8x288x44x44xf32, 288x288x1x1xf32) + conv2d_7 = paddle._C_ops.conv2d( + multiply_2, parameter_33, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_33 + + # pd_op.batch_norm_: (8x288x44x44xf32, 288xf32, 288xf32, 288xf32, 288xf32, -1xui8) <- (8x288x44x44xf32, 288xf32, 288xf32, 288xf32, 288xf32) + ( + batch_norm__12, + batch_norm__13, + batch_norm__14, + batch_norm__15, + batch_norm__16, + batch_norm__17, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_7, + parameter_32, + parameter_31, + parameter_30, + parameter_29, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_29, parameter_30, parameter_31, parameter_32 + + # pd_op.swish: (8x288x44x44xf32) <- (8x288x44x44xf32) + swish_2 = paddle._C_ops.swish(batch_norm__12) + + # pd_op.add: (8x288x44x44xf32) <- (8x288x44x44xf32, 8x288x44x44xf32) + add_6 = paddle._C_ops.add(swish_2, data_1) + + # pd_op.conv2d: (8x4x44x44xf32) <- (8x288x44x44xf32, 4x288x3x3xf32) + conv2d_8 = paddle._C_ops.conv2d( + add_6, parameter_28, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_28 + + # pd_op.reshape: (1x4x1x1xf32) <- (4xf32, 4xi64) + reshape_11 = paddle._C_ops.reshape(parameter_27, full_int_array_3) + del parameter_27 + + # pd_op.add: (8x4x44x44xf32) <- (8x4x44x44xf32, 1x4x1x1xf32) + add_7 = paddle._C_ops.add(conv2d_8, reshape_11) + + # pd_op.conv2d: (8x288x1x1xf32) <- (8x288x1x1xf32, 288x288x1x1xf32) + conv2d_9 = paddle._C_ops.conv2d( + pool2d_1, parameter_26, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_26 + + # pd_op.reshape: (1x288x1x1xf32) <- (288xf32, 4xi64) + reshape_12 = paddle._C_ops.reshape(parameter_25, full_int_array_3) + del parameter_25 + + # pd_op.add: (8x288x1x1xf32) <- (8x288x1x1xf32, 1x288x1x1xf32) + add_8 = paddle._C_ops.add(conv2d_9, reshape_12) + + # pd_op.sigmoid: (8x288x1x1xf32) <- (8x288x1x1xf32) + sigmoid_4 = paddle._C_ops.sigmoid(add_8) + del add_8 + + # pd_op.multiply: (8x288x44x44xf32) <- (8x288x44x44xf32, 8x288x1x1xf32) + multiply_3 = paddle._C_ops.multiply(data_1, sigmoid_4) + del data_1 + + # pd_op.conv2d: (8x288x44x44xf32) <- (8x288x44x44xf32, 288x288x1x1xf32) + conv2d_10 = paddle._C_ops.conv2d( + multiply_3, parameter_24, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_24 + + # pd_op.batch_norm_: (8x288x44x44xf32, 288xf32, 288xf32, 288xf32, 288xf32, -1xui8) <- (8x288x44x44xf32, 288xf32, 288xf32, 288xf32, 288xf32) + ( + batch_norm__18, + batch_norm__19, + batch_norm__20, + batch_norm__21, + batch_norm__22, + batch_norm__23, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_10, + parameter_23, + parameter_22, + parameter_21, + parameter_20, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_20, parameter_21, parameter_22, parameter_23 + + # pd_op.swish: (8x288x44x44xf32) <- (8x288x44x44xf32) + swish_3 = paddle._C_ops.swish(batch_norm__18) + + # pd_op.conv2d: (8x68x44x44xf32) <- (8x288x44x44xf32, 68x288x3x3xf32) + conv2d_11 = paddle._C_ops.conv2d( + swish_3, parameter_19, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_19 + + # pd_op.reshape: (1x68x1x1xf32) <- (68xf32, 4xi64) + reshape_13 = paddle._C_ops.reshape(parameter_18, full_int_array_3) + del parameter_18 + + # pd_op.add: (8x68x44x44xf32) <- (8x68x44x44xf32, 1x68x1x1xf32) + add_9 = paddle._C_ops.add(conv2d_11, reshape_13) + + # pd_op.sigmoid: (8x4x44x44xf32) <- (8x4x44x44xf32) + sigmoid_5 = paddle._C_ops.sigmoid(add_7) + del add_7 + + # pd_op.flatten: (8x4x1936xf32) <- (8x4x44x44xf32) + flatten_2 = paddle._C_ops.flatten(sigmoid_5, 2, 3) + + # pd_op.transpose: (8x1936x4xf32) <- (8x4x1936xf32) + transpose_2 = paddle._C_ops.transpose(flatten_2, [0, 2, 1]) + del flatten_2 + + # pd_op.flatten: (8x68x1936xf32) <- (8x68x44x44xf32) + flatten_3 = paddle._C_ops.flatten(add_9, 2, 3) + + # pd_op.transpose: (8x1936x68xf32) <- (8x68x1936xf32) + transpose_3 = paddle._C_ops.transpose(flatten_3, [0, 2, 1]) + del flatten_3 + + # pd_op.pool2d: (8x144x1x1xf32) <- (8x144x88x88xf32, 2xi64) + pool2d_2 = paddle._C_ops.pool2d( + data_2, + full_int_array_2, + [1, 1], + [0, 0], + False, + True, + "NCHW", + "avg", + False, + True, + "EXPLICIT", + ) + + # pd_op.conv2d: (8x144x1x1xf32) <- (8x144x1x1xf32, 144x144x1x1xf32) + conv2d_12 = paddle._C_ops.conv2d( + pool2d_2, parameter_17, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_17 + + # pd_op.reshape: (1x144x1x1xf32) <- (144xf32, 4xi64) + reshape_14 = paddle._C_ops.reshape(parameter_16, full_int_array_3) + del parameter_16 + + # pd_op.add: (8x144x1x1xf32) <- (8x144x1x1xf32, 1x144x1x1xf32) + add_10 = paddle._C_ops.add(conv2d_12, reshape_14) + + # pd_op.sigmoid: (8x144x1x1xf32) <- (8x144x1x1xf32) + sigmoid_6 = paddle._C_ops.sigmoid(add_10) + del add_10 + + # pd_op.multiply: (8x144x88x88xf32) <- (8x144x88x88xf32, 8x144x1x1xf32) + multiply_4 = paddle._C_ops.multiply(data_2, sigmoid_6) + + # pd_op.conv2d: (8x144x88x88xf32) <- (8x144x88x88xf32, 144x144x1x1xf32) + conv2d_13 = paddle._C_ops.conv2d( + multiply_4, parameter_15, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_15 + + # pd_op.batch_norm_: (8x144x88x88xf32, 144xf32, 144xf32, 144xf32, 144xf32, -1xui8) <- (8x144x88x88xf32, 144xf32, 144xf32, 144xf32, 144xf32) + ( + batch_norm__24, + batch_norm__25, + batch_norm__26, + batch_norm__27, + batch_norm__28, + batch_norm__29, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_13, + parameter_14, + parameter_13, + parameter_12, + parameter_11, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_11, parameter_12, parameter_13, parameter_14 + + # pd_op.swish: (8x144x88x88xf32) <- (8x144x88x88xf32) + swish_4 = paddle._C_ops.swish(batch_norm__24) + + # pd_op.add: (8x144x88x88xf32) <- (8x144x88x88xf32, 8x144x88x88xf32) + add_11 = paddle._C_ops.add(swish_4, data_2) + + # pd_op.conv2d: (8x4x88x88xf32) <- (8x144x88x88xf32, 4x144x3x3xf32) + conv2d_14 = paddle._C_ops.conv2d( + add_11, parameter_10, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_10 + + # pd_op.reshape: (1x4x1x1xf32) <- (4xf32, 4xi64) + reshape_15 = paddle._C_ops.reshape(parameter_9, full_int_array_3) + del parameter_9 + + # pd_op.add: (8x4x88x88xf32) <- (8x4x88x88xf32, 1x4x1x1xf32) + add_12 = paddle._C_ops.add(conv2d_14, reshape_15) + + # pd_op.conv2d: (8x144x1x1xf32) <- (8x144x1x1xf32, 144x144x1x1xf32) + conv2d_15 = paddle._C_ops.conv2d( + pool2d_2, parameter_8, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_8 + + # pd_op.reshape: (1x144x1x1xf32) <- (144xf32, 4xi64) + reshape_16 = paddle._C_ops.reshape(parameter_7, full_int_array_3) + del parameter_7 + + # pd_op.add: (8x144x1x1xf32) <- (8x144x1x1xf32, 1x144x1x1xf32) + add_13 = paddle._C_ops.add(conv2d_15, reshape_16) + + # pd_op.sigmoid: (8x144x1x1xf32) <- (8x144x1x1xf32) + sigmoid_7 = paddle._C_ops.sigmoid(add_13) + del add_13 + + # pd_op.multiply: (8x144x88x88xf32) <- (8x144x88x88xf32, 8x144x1x1xf32) + multiply_5 = paddle._C_ops.multiply(data_2, sigmoid_7) + del data_2 + + # pd_op.conv2d: (8x144x88x88xf32) <- (8x144x88x88xf32, 144x144x1x1xf32) + conv2d_16 = paddle._C_ops.conv2d( + multiply_5, parameter_6, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_6 + + # pd_op.batch_norm_: (8x144x88x88xf32, 144xf32, 144xf32, 144xf32, 144xf32, -1xui8) <- (8x144x88x88xf32, 144xf32, 144xf32, 144xf32, 144xf32) + ( + batch_norm__30, + batch_norm__31, + batch_norm__32, + batch_norm__33, + batch_norm__34, + batch_norm__35, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_16, + parameter_5, + parameter_4, + parameter_3, + parameter_2, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_2, parameter_3, parameter_4, parameter_5 + + # pd_op.swish: (8x144x88x88xf32) <- (8x144x88x88xf32) + swish_5 = paddle._C_ops.swish(batch_norm__30) + + # pd_op.conv2d: (8x68x88x88xf32) <- (8x144x88x88xf32, 68x144x3x3xf32) + conv2d_17 = paddle._C_ops.conv2d( + swish_5, parameter_1, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_1 + + # pd_op.reshape: (1x68x1x1xf32) <- (68xf32, 4xi64) + reshape_17 = paddle._C_ops.reshape(parameter_0, full_int_array_3) + del full_int_array_3, parameter_0 + + # pd_op.add: (8x68x88x88xf32) <- (8x68x88x88xf32, 1x68x1x1xf32) + add_14 = paddle._C_ops.add(conv2d_17, reshape_17) + + # pd_op.sigmoid: (8x4x88x88xf32) <- (8x4x88x88xf32) + sigmoid_8 = paddle._C_ops.sigmoid(add_12) + del add_12 + + # pd_op.flatten: (8x4x7744xf32) <- (8x4x88x88xf32) + flatten_4 = paddle._C_ops.flatten(sigmoid_8, 2, 3) + + # pd_op.transpose: (8x7744x4xf32) <- (8x4x7744xf32) + transpose_4 = paddle._C_ops.transpose(flatten_4, [0, 2, 1]) + del flatten_4 + + # pd_op.flatten: (8x68x7744xf32) <- (8x68x88x88xf32) + flatten_5 = paddle._C_ops.flatten(add_14, 2, 3) + + # pd_op.transpose: (8x7744x68xf32) <- (8x68x7744xf32) + transpose_5 = paddle._C_ops.transpose(flatten_5, [0, 2, 1]) + del flatten_5 + + # pd_op.full: (1xi32) <- () + full_13 = paddle._C_ops.full( + [1], float("1"), paddle.int32, paddle.core.CPUPlace() + ) + + # pd_op.assign: (1xi32) <- (1xi32) + assign_2 = full_13 + + # builtin.combine: ([8x484x4xf32, 8x1936x4xf32, 8x7744x4xf32]) <- (8x484x4xf32, 8x1936x4xf32, 8x7744x4xf32) + combine_12 = [transpose_0, transpose_2, transpose_4] + + # pd_op.concat: (8x10164x4xf32) <- ([8x484x4xf32, 8x1936x4xf32, 8x7744x4xf32], 1xi32) + concat_0 = paddle._C_ops.concat(combine_12, full_13) + del combine_12 + + # builtin.combine: ([8x484x68xf32, 8x1936x68xf32, 8x7744x68xf32]) <- (8x484x68xf32, 8x1936x68xf32, 8x7744x68xf32) + combine_13 = [transpose_1, transpose_3, transpose_5] + + # pd_op.concat: (8x10164x68xf32) <- ([8x484x68xf32, 8x1936x68xf32, 8x7744x68xf32], 1xi32) + concat_1 = paddle._C_ops.concat(combine_13, full_13) + del ( + add_1, + add_11, + add_14, + add_4, + add_6, + add_9, + assign_0, + assign_1, + assign_2, + batch_norm__0, + batch_norm__1, + batch_norm__10, + batch_norm__11, + batch_norm__12, + batch_norm__13, + batch_norm__14, + batch_norm__15, + batch_norm__16, + batch_norm__17, + batch_norm__18, + batch_norm__19, + batch_norm__2, + batch_norm__20, + batch_norm__21, + batch_norm__22, + batch_norm__23, + batch_norm__24, + batch_norm__25, + batch_norm__26, + batch_norm__27, + batch_norm__28, + batch_norm__29, + batch_norm__3, + batch_norm__30, + batch_norm__31, + batch_norm__32, + batch_norm__33, + batch_norm__34, + batch_norm__35, + batch_norm__4, + batch_norm__5, + batch_norm__6, + batch_norm__7, + batch_norm__8, + batch_norm__9, + combine_13, + conv2d_0, + conv2d_1, + conv2d_10, + conv2d_11, + conv2d_12, + conv2d_13, + conv2d_14, + conv2d_15, + conv2d_16, + conv2d_17, + conv2d_2, + conv2d_3, + conv2d_4, + conv2d_5, + conv2d_6, + conv2d_7, + conv2d_8, + conv2d_9, + full_13, + full_int_array_2, + multiply_0, + multiply_1, + multiply_2, + multiply_3, + multiply_4, + multiply_5, + pool2d_0, + pool2d_1, + pool2d_2, + reshape_0, + reshape_10, + reshape_11, + reshape_12, + reshape_13, + reshape_14, + reshape_15, + reshape_16, + reshape_17, + reshape_2, + reshape_4, + reshape_6, + reshape_7, + reshape_8, + reshape_9, + sigmoid_0, + sigmoid_1, + sigmoid_2, + sigmoid_3, + sigmoid_4, + sigmoid_5, + sigmoid_6, + sigmoid_7, + sigmoid_8, + swish_0, + swish_1, + swish_2, + swish_3, + swish_4, + swish_5, + transpose_0, + transpose_1, + transpose_2, + transpose_3, + transpose_4, + transpose_5, + ) + + return concat_0, concat_1, concat_2, concat_3, concat_4 diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus-M/subgraph_0/weight_meta.py b/paddle_samples/PaddleX/PP-YOLOE_plus-M/subgraph_0/weight_meta.py new file mode 100644 index 000000000..e74e505b8 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE_plus-M/subgraph_0/weight_meta.py @@ -0,0 +1,574 @@ +class Program_weight_tensor_parameter_0: + name = "parameter_0" + shape = [68] + dtype = "float32" + min_val = float("-0.0141311") + max_val = float("0.0241404") + mean = float("6.50325e-08") + std = float("0.00670338") + data = None + + +class Program_weight_tensor_parameter_1: + name = "parameter_1" + shape = [68, 144, 3, 3] + dtype = "float32" + min_val = float("-0.159403") + max_val = float("0.187672") + mean = float("6.14455e-08") + std = float("0.00826349") + data = None + + +class Program_weight_tensor_parameter_2: + name = "parameter_2" + shape = [144] + dtype = "float32" + min_val = float("-0.104117") + max_val = float("0.334805") + mean = float("0.0803525") + std = float("0.0949005") + data = None + + +class Program_weight_tensor_parameter_3: + name = "parameter_3" + shape = [144] + dtype = "float32" + min_val = float("0.833704") + max_val = float("2.14069") + mean = float("1.40199") + std = float("0.259198") + data = None + + +class Program_weight_tensor_parameter_4: + name = "parameter_4" + shape = [144] + dtype = "float32" + min_val = float("0.000159098") + max_val = float("0.00221604") + mean = float("0.00057246") + std = float("0.000352235") + data = None + + +class Program_weight_tensor_parameter_5: + name = "parameter_5" + shape = [144] + dtype = "float32" + min_val = float("-0.0501895") + max_val = float("0.0383549") + mean = float("-0.00743355") + std = float("0.0175877") + data = None + + +class Program_weight_tensor_parameter_6: + name = "parameter_6" + shape = [144, 144, 1, 1] + dtype = "float32" + min_val = float("-0.0724959") + max_val = float("0.0965262") + mean = float("-0.000264694") + std = float("0.00737951") + data = None + + +class Program_weight_tensor_parameter_7: + name = "parameter_7" + shape = [144] + dtype = "float32" + min_val = float("-0.00636106") + max_val = float("0.00678397") + mean = float("-0.000173716") + std = float("0.00320574") + data = None + + +class Program_weight_tensor_parameter_8: + name = "parameter_8" + shape = [144, 144, 1, 1] + dtype = "float32" + min_val = float("-0.0117988") + max_val = float("0.0153353") + mean = float("-0.000124252") + std = float("0.00220501") + data = None + + +class Program_weight_tensor_parameter_9: + name = "parameter_9" + shape = [4] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_10: + name = "parameter_10" + shape = [4, 144, 3, 3] + dtype = "float32" + data = None + + +class Program_weight_tensor_parameter_11: + name = "parameter_11" + shape = [144] + dtype = "float32" + min_val = float("-0.64158") + max_val = float("1.51361") + mean = float("0.436643") + std = float("0.396591") + data = None + + +class Program_weight_tensor_parameter_12: + name = "parameter_12" + shape = [144] + dtype = "float32" + min_val = float("0.912817") + max_val = float("2.11091") + mean = float("1.38863") + std = float("0.197354") + data = None + + +class Program_weight_tensor_parameter_13: + name = "parameter_13" + shape = [144] + dtype = "float32" + min_val = float("0.000203511") + max_val = float("0.00316607") + mean = float("0.000794668") + std = float("0.000505576") + data = None + + +class Program_weight_tensor_parameter_14: + name = "parameter_14" + shape = [144] + dtype = "float32" + min_val = float("-0.24633") + max_val = float("0.0360242") + mean = float("-0.0278702") + std = float("0.0405044") + data = None + + +class Program_weight_tensor_parameter_15: + name = "parameter_15" + shape = [144, 144, 1, 1] + dtype = "float32" + min_val = float("-0.0649435") + max_val = float("0.0801722") + mean = float("-0.000599471") + std = float("0.0088557") + data = None + + +class Program_weight_tensor_parameter_16: + name = "parameter_16" + shape = [144] + dtype = "float32" + min_val = float("-0.00555943") + max_val = float("0.0056399") + mean = float("-0.000279704") + std = float("0.00214688") + data = None + + +class Program_weight_tensor_parameter_17: + name = "parameter_17" + shape = [144, 144, 1, 1] + dtype = "float32" + min_val = float("-0.0293143") + max_val = float("0.0536777") + mean = float("-5.72244e-05") + std = float("0.00245901") + data = None + + +class Program_weight_tensor_parameter_18: + name = "parameter_18" + shape = [68] + dtype = "float32" + min_val = float("-0.00495954") + max_val = float("0.0272294") + mean = float("6.12054e-08") + std = float("0.00561274") + data = None + + +class Program_weight_tensor_parameter_19: + name = "parameter_19" + shape = [68, 288, 3, 3] + dtype = "float32" + min_val = float("-0.111167") + max_val = float("0.129179") + mean = float("3.28291e-08") + std = float("0.00572785") + data = None + + +class Program_weight_tensor_parameter_20: + name = "parameter_20" + shape = [288] + dtype = "float32" + min_val = float("-0.0173528") + max_val = float("0.146767") + mean = float("0.0535169") + std = float("0.0321121") + data = None + + +class Program_weight_tensor_parameter_21: + name = "parameter_21" + shape = [288] + dtype = "float32" + min_val = float("1.01431") + max_val = float("1.4495") + mean = float("1.22632") + std = float("0.0811625") + data = None + + +class Program_weight_tensor_parameter_22: + name = "parameter_22" + shape = [288] + dtype = "float32" + min_val = float("9.64994e-05") + max_val = float("0.00404225") + mean = float("0.000462447") + std = float("0.000446207") + data = None + + +class Program_weight_tensor_parameter_23: + name = "parameter_23" + shape = [288] + dtype = "float32" + min_val = float("-0.0516242") + max_val = float("0.0161539") + mean = float("-0.00783458") + std = float("0.00920113") + data = None + + +class Program_weight_tensor_parameter_24: + name = "parameter_24" + shape = [288, 288, 1, 1] + dtype = "float32" + min_val = float("-0.0594582") + max_val = float("0.0735974") + mean = float("-0.000137226") + std = float("0.00364254") + data = None + + +class Program_weight_tensor_parameter_25: + name = "parameter_25" + shape = [288] + dtype = "float32" + min_val = float("-0.00314669") + max_val = float("0.00620902") + mean = float("2.48414e-05") + std = float("0.00194775") + data = None + + +class Program_weight_tensor_parameter_26: + name = "parameter_26" + shape = [288, 288, 1, 1] + dtype = "float32" + min_val = float("-0.00388461") + max_val = float("0.00841306") + mean = float("-1.87954e-05") + std = float("0.000932023") + data = None + + +class Program_weight_tensor_parameter_27: + name = "parameter_27" + shape = [4] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_28: + name = "parameter_28" + shape = [4, 288, 3, 3] + dtype = "float32" + data = None + + +class Program_weight_tensor_parameter_29: + name = "parameter_29" + shape = [288] + dtype = "float32" + min_val = float("-0.27006") + max_val = float("0.770798") + mean = float("0.311538") + std = float("0.171771") + data = None + + +class Program_weight_tensor_parameter_30: + name = "parameter_30" + shape = [288] + dtype = "float32" + min_val = float("0.991505") + max_val = float("1.72132") + mean = float("1.2558") + std = float("0.0945899") + data = None + + +class Program_weight_tensor_parameter_31: + name = "parameter_31" + shape = [288] + dtype = "float32" + min_val = float("0.000232897") + max_val = float("0.00545131") + mean = float("0.00082235") + std = float("0.000645185") + data = None + + +class Program_weight_tensor_parameter_32: + name = "parameter_32" + shape = [288] + dtype = "float32" + min_val = float("-0.130475") + max_val = float("0.0693064") + mean = float("-0.0265702") + std = float("0.0287752") + data = None + + +class Program_weight_tensor_parameter_33: + name = "parameter_33" + shape = [288, 288, 1, 1] + dtype = "float32" + min_val = float("-0.0480156") + max_val = float("0.0592741") + mean = float("-0.000430968") + std = float("0.00425043") + data = None + + +class Program_weight_tensor_parameter_34: + name = "parameter_34" + shape = [288] + dtype = "float32" + min_val = float("-0.00285738") + max_val = float("0.00720467") + mean = float("-7.33536e-05") + std = float("0.0011726") + data = None + + +class Program_weight_tensor_parameter_35: + name = "parameter_35" + shape = [288, 288, 1, 1] + dtype = "float32" + min_val = float("-0.0122627") + max_val = float("0.0161886") + mean = float("-1.72547e-05") + std = float("0.000995486") + data = None + + +class Program_weight_tensor_parameter_36: + name = "parameter_36" + shape = [68] + dtype = "float32" + min_val = float("-0.00365418") + max_val = float("0.0135854") + mean = float("3.43716e-08") + std = float("0.00376475") + data = None + + +class Program_weight_tensor_parameter_37: + name = "parameter_37" + shape = [68, 576, 3, 3] + dtype = "float32" + min_val = float("-0.0663088") + max_val = float("0.0674867") + mean = float("1.74696e-08") + std = float("0.00359868") + data = None + + +class Program_weight_tensor_parameter_38: + name = "parameter_38" + shape = [576] + dtype = "float32" + min_val = float("-0.0421788") + max_val = float("0.113513") + mean = float("0.0222551") + std = float("0.0258343") + data = None + + +class Program_weight_tensor_parameter_39: + name = "parameter_39" + shape = [576] + dtype = "float32" + min_val = float("1.05067") + max_val = float("1.39463") + mean = float("1.14799") + std = float("0.042853") + data = None + + +class Program_weight_tensor_parameter_40: + name = "parameter_40" + shape = [576] + dtype = "float32" + min_val = float("5.01795e-05") + max_val = float("0.00261285") + mean = float("0.000242975") + std = float("0.000213831") + data = None + + +class Program_weight_tensor_parameter_41: + name = "parameter_41" + shape = [576] + dtype = "float32" + min_val = float("-0.0343018") + max_val = float("0.0199605") + mean = float("-0.00573212") + std = float("0.00540425") + data = None + + +class Program_weight_tensor_parameter_42: + name = "parameter_42" + shape = [576, 576, 1, 1] + dtype = "float32" + min_val = float("-0.0385789") + max_val = float("0.04172") + mean = float("-6.11845e-05") + std = float("0.00176385") + data = None + + +class Program_weight_tensor_parameter_43: + name = "parameter_43" + shape = [576] + dtype = "float32" + min_val = float("-0.00434694") + max_val = float("0.00341233") + mean = float("0.000100391") + std = float("0.00100061") + data = None + + +class Program_weight_tensor_parameter_44: + name = "parameter_44" + shape = [576, 576, 1, 1] + dtype = "float32" + min_val = float("-0.00355251") + max_val = float("0.00417294") + mean = float("2.83397e-05") + std = float("0.00036474") + data = None + + +class Program_weight_tensor_parameter_45: + name = "parameter_45" + shape = [4] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_46: + name = "parameter_46" + shape = [4, 576, 3, 3] + dtype = "float32" + data = None + + +class Program_weight_tensor_parameter_47: + name = "parameter_47" + shape = [576] + dtype = "float32" + min_val = float("-0.248224") + max_val = float("0.371203") + mean = float("0.155358") + std = float("0.0834839") + data = None + + +class Program_weight_tensor_parameter_48: + name = "parameter_48" + shape = [576] + dtype = "float32" + min_val = float("1.02385") + max_val = float("1.42504") + mean = float("1.13342") + std = float("0.0514955") + data = None + + +class Program_weight_tensor_parameter_49: + name = "parameter_49" + shape = [576] + dtype = "float32" + min_val = float("0.000121815") + max_val = float("0.00283888") + mean = float("0.000731042") + std = float("0.000526248") + data = None + + +class Program_weight_tensor_parameter_50: + name = "parameter_50" + shape = [576] + dtype = "float32" + min_val = float("-0.0731101") + max_val = float("0.0815961") + mean = float("-0.0215005") + std = float("0.0157613") + data = None + + +class Program_weight_tensor_parameter_51: + name = "parameter_51" + shape = [576, 576, 1, 1] + dtype = "float32" + min_val = float("-0.0599145") + max_val = float("0.0355292") + mean = float("-0.000237247") + std = float("0.00192816") + data = None + + +class Program_weight_tensor_parameter_52: + name = "parameter_52" + shape = [576] + dtype = "float32" + min_val = float("-0.00772796") + max_val = float("0.00635587") + mean = float("-2.65035e-05") + std = float("0.000684865") + data = None + + +class Program_weight_tensor_parameter_53: + name = "parameter_53" + shape = [576, 576, 1, 1] + dtype = "float32" + min_val = float("-0.0265895") + max_val = float("0.0452734") + mean = float("-1.04734e-06") + std = float("0.000523833") + data = None diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus-M/subgraph_1/graph_hash.txt b/paddle_samples/PaddleX/PP-YOLOE_plus-M/subgraph_1/graph_hash.txt new file mode 100644 index 000000000..8c9b2b120 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE_plus-M/subgraph_1/graph_hash.txt @@ -0,0 +1 @@ +905a514146c90ef236c43b3359c0945b67c9f3ddc63042b50bd2aa47acea4ebc \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus-M/subgraph_1/graph_net.json b/paddle_samples/PaddleX/PP-YOLOE_plus-M/subgraph_1/graph_net.json new file mode 100644 index 000000000..1c6cb32da --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE_plus-M/subgraph_1/graph_net.json @@ -0,0 +1,6 @@ +{ + "framework": "paddle", + "model_name": "PP-YOLOE_plus-M", + "num_devices_required": 1, + "num_nodes_required": 1 +} \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus-M/subgraph_1/input_meta.py b/paddle_samples/PaddleX/PP-YOLOE_plus-M/subgraph_1/input_meta.py new file mode 100644 index 000000000..26795f401 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE_plus-M/subgraph_1/input_meta.py @@ -0,0 +1,92 @@ +class Program_weight_tensor_data_0: + name = "data_0" + shape = [1] + dtype = "float32" + data = [1.0] + + +class Program_weight_tensor_data_1: + name = "data_1" + shape = [1] + dtype = "float32" + data = [1.0] + + +class Program_weight_tensor_data_2: + name = "data_2" + shape = [1] + dtype = "float32" + data = [1.0] + + +class Program_weight_tensor_data_3: + name = "data_3" + shape = [1] + dtype = "float32" + data = [1.0] + + +class Program_weight_tensor_data_4: + name = "data_4" + shape = [1] + dtype = "float32" + data = [1.0] + + +class Program_weight_tensor_data_5: + name = "data_5" + shape = [1] + dtype = "float32" + data = [1.0] + + +class Program_weight_tensor_data_6: + name = "data_6" + shape = [1] + dtype = "float32" + data = [1.0] + + +class Program_weight_tensor_data_7: + name = "data_7" + shape = [1] + dtype = "float32" + data = [1.0] + + +class Program_weight_tensor_data_8: + name = "data_8" + shape = [1] + dtype = "float32" + data = [1.0] + + +class Program_weight_tensor_data_9: + name = "data_9" + shape = [1] + dtype = "float32" + data = [1.0] + + +class Program_weight_tensor_data_10: + name = "data_10" + shape = [1] + dtype = "float32" + data = [1.0] + + +class Program_weight_tensor_data_11: + name = "data_11" + shape = [1] + dtype = "float32" + data = [1.0] + + +class Program_weight_tensor_data_12: + name = "data_12" + shape = [8, 3, 704, 704] + dtype = "float32" + max_val = float("1.0") + mean = float("0.530551") + std = float("0.181212") + data = None diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus-M/subgraph_1/model.py b/paddle_samples/PaddleX/PP-YOLOE_plus-M/subgraph_1/model.py new file mode 100644 index 000000000..f4842812c --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE_plus-M/subgraph_1/model.py @@ -0,0 +1,5794 @@ +import paddle + + +class GraphModule(paddle.nn.Layer): + def __init__(self): + super().__init__() + + def forward( + self, + parameter_0, + parameter_1, + parameter_2, + parameter_3, + parameter_4, + parameter_5, + parameter_6, + parameter_7, + parameter_8, + parameter_9, + parameter_10, + parameter_11, + parameter_12, + parameter_13, + parameter_14, + parameter_15, + parameter_16, + parameter_17, + parameter_18, + parameter_19, + parameter_20, + parameter_21, + parameter_22, + parameter_23, + parameter_24, + parameter_25, + parameter_26, + parameter_27, + parameter_28, + parameter_29, + parameter_30, + parameter_31, + parameter_32, + parameter_33, + parameter_34, + parameter_35, + parameter_36, + parameter_37, + parameter_38, + parameter_39, + parameter_40, + parameter_41, + parameter_42, + parameter_43, + parameter_44, + parameter_45, + parameter_46, + parameter_47, + parameter_48, + parameter_49, + parameter_50, + parameter_51, + parameter_52, + parameter_53, + parameter_54, + parameter_55, + parameter_56, + parameter_57, + parameter_58, + parameter_59, + parameter_60, + parameter_61, + parameter_62, + parameter_63, + parameter_64, + parameter_65, + parameter_66, + parameter_67, + parameter_68, + parameter_69, + parameter_70, + parameter_71, + parameter_72, + parameter_73, + parameter_74, + parameter_75, + parameter_76, + parameter_77, + parameter_78, + parameter_79, + parameter_80, + parameter_81, + parameter_82, + parameter_83, + parameter_84, + parameter_85, + parameter_86, + parameter_87, + parameter_88, + parameter_89, + parameter_90, + parameter_91, + parameter_92, + parameter_93, + parameter_94, + parameter_95, + parameter_96, + parameter_97, + parameter_98, + parameter_99, + parameter_100, + parameter_101, + parameter_102, + parameter_103, + parameter_104, + parameter_105, + parameter_106, + parameter_107, + parameter_108, + parameter_109, + parameter_110, + parameter_111, + parameter_112, + parameter_113, + parameter_114, + parameter_115, + parameter_116, + parameter_117, + parameter_118, + parameter_119, + parameter_120, + parameter_121, + parameter_122, + parameter_123, + parameter_124, + parameter_125, + parameter_126, + parameter_127, + parameter_128, + parameter_129, + parameter_130, + parameter_131, + parameter_132, + parameter_133, + parameter_134, + parameter_135, + parameter_136, + parameter_137, + parameter_138, + parameter_139, + parameter_140, + parameter_141, + parameter_142, + parameter_143, + parameter_144, + parameter_145, + parameter_146, + parameter_147, + parameter_148, + parameter_149, + parameter_150, + parameter_151, + parameter_152, + parameter_153, + parameter_154, + parameter_155, + parameter_156, + parameter_157, + parameter_158, + parameter_159, + parameter_160, + parameter_161, + parameter_162, + parameter_163, + parameter_164, + parameter_165, + parameter_166, + parameter_167, + parameter_168, + parameter_169, + parameter_170, + parameter_171, + parameter_172, + parameter_173, + parameter_174, + parameter_175, + parameter_176, + parameter_177, + parameter_178, + parameter_179, + parameter_180, + parameter_181, + parameter_182, + parameter_183, + parameter_184, + parameter_185, + parameter_186, + parameter_187, + parameter_188, + parameter_189, + parameter_190, + parameter_191, + parameter_192, + parameter_193, + parameter_194, + parameter_195, + parameter_196, + parameter_197, + parameter_198, + parameter_199, + parameter_200, + parameter_201, + parameter_202, + parameter_203, + parameter_204, + parameter_205, + parameter_206, + parameter_207, + parameter_208, + parameter_209, + parameter_210, + parameter_211, + parameter_212, + parameter_213, + parameter_214, + parameter_215, + parameter_216, + parameter_217, + parameter_218, + parameter_219, + parameter_220, + parameter_221, + parameter_222, + parameter_223, + parameter_224, + parameter_225, + parameter_226, + parameter_227, + parameter_228, + parameter_229, + parameter_230, + parameter_231, + parameter_232, + parameter_233, + parameter_234, + parameter_235, + parameter_236, + parameter_237, + parameter_238, + parameter_239, + parameter_240, + parameter_241, + parameter_242, + parameter_243, + parameter_244, + parameter_245, + parameter_246, + parameter_247, + parameter_248, + parameter_249, + parameter_250, + parameter_251, + parameter_252, + parameter_253, + parameter_254, + parameter_255, + parameter_256, + parameter_257, + parameter_258, + parameter_259, + parameter_260, + parameter_261, + parameter_262, + parameter_263, + parameter_264, + parameter_265, + parameter_266, + parameter_267, + parameter_268, + parameter_269, + parameter_270, + parameter_271, + parameter_272, + parameter_273, + parameter_274, + parameter_275, + parameter_276, + parameter_277, + parameter_278, + parameter_279, + parameter_280, + parameter_281, + parameter_282, + parameter_283, + parameter_284, + parameter_285, + parameter_286, + parameter_287, + parameter_288, + parameter_289, + parameter_290, + parameter_291, + parameter_292, + parameter_293, + parameter_294, + parameter_295, + parameter_296, + parameter_297, + parameter_298, + parameter_299, + parameter_300, + parameter_301, + parameter_302, + parameter_303, + parameter_304, + parameter_305, + parameter_306, + parameter_307, + parameter_308, + parameter_309, + parameter_310, + parameter_311, + parameter_312, + parameter_313, + parameter_314, + parameter_315, + parameter_316, + parameter_317, + parameter_318, + parameter_319, + parameter_320, + parameter_321, + parameter_322, + parameter_323, + parameter_324, + parameter_325, + parameter_326, + parameter_327, + parameter_328, + parameter_329, + parameter_330, + parameter_331, + parameter_332, + parameter_333, + parameter_334, + parameter_335, + parameter_336, + parameter_337, + parameter_338, + parameter_339, + parameter_340, + parameter_341, + parameter_342, + parameter_343, + parameter_344, + parameter_345, + parameter_346, + parameter_347, + parameter_348, + parameter_349, + parameter_350, + parameter_351, + parameter_352, + parameter_353, + parameter_354, + parameter_355, + parameter_356, + parameter_357, + parameter_358, + parameter_359, + parameter_360, + parameter_361, + parameter_362, + parameter_363, + parameter_364, + parameter_365, + parameter_366, + parameter_367, + parameter_368, + parameter_369, + parameter_370, + parameter_371, + parameter_372, + parameter_373, + parameter_374, + parameter_375, + parameter_376, + parameter_377, + parameter_378, + parameter_379, + parameter_380, + parameter_381, + parameter_382, + parameter_383, + parameter_384, + parameter_385, + parameter_386, + parameter_387, + parameter_388, + parameter_389, + parameter_390, + parameter_391, + parameter_392, + parameter_393, + parameter_394, + parameter_395, + parameter_396, + parameter_397, + parameter_398, + parameter_399, + parameter_400, + parameter_401, + parameter_402, + parameter_403, + parameter_404, + parameter_405, + parameter_406, + parameter_407, + parameter_408, + parameter_409, + parameter_410, + parameter_411, + parameter_412, + parameter_413, + parameter_414, + parameter_415, + parameter_416, + parameter_417, + parameter_418, + parameter_419, + parameter_420, + parameter_421, + parameter_422, + parameter_423, + parameter_424, + parameter_425, + parameter_426, + parameter_427, + parameter_428, + parameter_429, + parameter_430, + parameter_431, + parameter_432, + parameter_433, + parameter_434, + parameter_435, + parameter_436, + parameter_437, + parameter_438, + parameter_439, + parameter_440, + parameter_441, + parameter_442, + parameter_443, + parameter_444, + parameter_445, + parameter_446, + parameter_447, + parameter_448, + parameter_449, + parameter_450, + parameter_451, + parameter_452, + parameter_453, + parameter_454, + parameter_455, + parameter_456, + parameter_457, + parameter_458, + parameter_459, + parameter_460, + parameter_461, + parameter_462, + parameter_463, + parameter_464, + parameter_465, + parameter_466, + parameter_467, + parameter_468, + parameter_469, + parameter_470, + parameter_471, + parameter_472, + parameter_473, + parameter_474, + parameter_475, + parameter_476, + parameter_477, + parameter_478, + parameter_479, + parameter_480, + parameter_481, + parameter_482, + parameter_483, + parameter_484, + parameter_485, + parameter_486, + parameter_487, + parameter_488, + parameter_489, + parameter_490, + parameter_491, + parameter_492, + parameter_493, + parameter_494, + parameter_495, + parameter_496, + parameter_497, + parameter_498, + parameter_499, + parameter_500, + parameter_501, + parameter_502, + parameter_503, + parameter_504, + parameter_505, + parameter_506, + parameter_507, + parameter_508, + parameter_509, + parameter_510, + parameter_511, + parameter_512, + parameter_513, + parameter_514, + parameter_515, + parameter_516, + parameter_517, + parameter_518, + parameter_519, + parameter_520, + parameter_521, + parameter_522, + parameter_523, + parameter_524, + parameter_525, + parameter_526, + parameter_527, + parameter_528, + parameter_529, + parameter_530, + parameter_531, + parameter_532, + data_0, + data_1, + data_2, + data_3, + data_4, + data_5, + data_6, + data_7, + data_8, + data_9, + data_10, + data_11, + data_12, + ): + # pd_op.conv2d: (8x24x352x352xf32) <- (8x3x704x704xf32, 24x3x3x3xf32) + conv2d_0 = paddle._C_ops.conv2d( + data_12, parameter_532, [2, 2], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del data_12, parameter_532 + + # pd_op.batch_norm_: (8x24x352x352xf32, 24xf32, 24xf32, 24xf32, 24xf32, -1xui8) <- (8x24x352x352xf32, 24xf32, 24xf32, 24xf32, 24xf32) + ( + batch_norm__0, + batch_norm__1, + batch_norm__2, + batch_norm__3, + batch_norm__4, + batch_norm__5, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_0, + parameter_531, + parameter_530, + parameter_529, + parameter_528, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_528, parameter_529, parameter_530, parameter_531 + + # pd_op.swish: (8x24x352x352xf32) <- (8x24x352x352xf32) + swish_1 = paddle._C_ops.swish(batch_norm__0) + + # pd_op.conv2d: (8x24x352x352xf32) <- (8x24x352x352xf32, 24x24x3x3xf32) + conv2d_1 = paddle._C_ops.conv2d( + swish_1, parameter_527, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_527 + + # pd_op.batch_norm_: (8x24x352x352xf32, 24xf32, 24xf32, 24xf32, 24xf32, -1xui8) <- (8x24x352x352xf32, 24xf32, 24xf32, 24xf32, 24xf32) + ( + batch_norm__6, + batch_norm__7, + batch_norm__8, + batch_norm__9, + batch_norm__10, + batch_norm__11, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_1, + parameter_526, + parameter_525, + parameter_524, + parameter_523, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_523, parameter_524, parameter_525, parameter_526 + + # pd_op.swish: (8x24x352x352xf32) <- (8x24x352x352xf32) + swish_2 = paddle._C_ops.swish(batch_norm__6) + + # pd_op.conv2d: (8x48x352x352xf32) <- (8x24x352x352xf32, 48x24x3x3xf32) + conv2d_2 = paddle._C_ops.conv2d( + swish_2, parameter_522, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_522 + + # pd_op.batch_norm_: (8x48x352x352xf32, 48xf32, 48xf32, 48xf32, 48xf32, -1xui8) <- (8x48x352x352xf32, 48xf32, 48xf32, 48xf32, 48xf32) + ( + batch_norm__12, + batch_norm__13, + batch_norm__14, + batch_norm__15, + batch_norm__16, + batch_norm__17, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_2, + parameter_521, + parameter_520, + parameter_519, + parameter_518, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_518, parameter_519, parameter_520, parameter_521 + + # pd_op.swish: (8x48x352x352xf32) <- (8x48x352x352xf32) + swish_3 = paddle._C_ops.swish(batch_norm__12) + + # pd_op.conv2d: (8x72x176x176xf32) <- (8x48x352x352xf32, 72x48x3x3xf32) + conv2d_3 = paddle._C_ops.conv2d( + swish_3, parameter_517, [2, 2], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_517 + + # pd_op.batch_norm_: (8x72x176x176xf32, 72xf32, 72xf32, 72xf32, 72xf32, -1xui8) <- (8x72x176x176xf32, 72xf32, 72xf32, 72xf32, 72xf32) + ( + batch_norm__18, + batch_norm__19, + batch_norm__20, + batch_norm__21, + batch_norm__22, + batch_norm__23, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_3, + parameter_516, + parameter_515, + parameter_514, + parameter_513, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_513, parameter_514, parameter_515, parameter_516 + + # pd_op.swish: (8x72x176x176xf32) <- (8x72x176x176xf32) + swish_4 = paddle._C_ops.swish(batch_norm__18) + + # pd_op.conv2d: (8x36x176x176xf32) <- (8x72x176x176xf32, 36x72x1x1xf32) + conv2d_4 = paddle._C_ops.conv2d( + swish_4, parameter_512, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_512 + + # pd_op.batch_norm_: (8x36x176x176xf32, 36xf32, 36xf32, 36xf32, 36xf32, -1xui8) <- (8x36x176x176xf32, 36xf32, 36xf32, 36xf32, 36xf32) + ( + batch_norm__24, + batch_norm__25, + batch_norm__26, + batch_norm__27, + batch_norm__28, + batch_norm__29, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_4, + parameter_511, + parameter_510, + parameter_509, + parameter_508, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_508, parameter_509, parameter_510, parameter_511 + + # pd_op.swish: (8x36x176x176xf32) <- (8x36x176x176xf32) + swish_5 = paddle._C_ops.swish(batch_norm__24) + + # pd_op.conv2d: (8x36x176x176xf32) <- (8x72x176x176xf32, 36x72x1x1xf32) + conv2d_5 = paddle._C_ops.conv2d( + swish_4, parameter_507, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_507 + + # pd_op.batch_norm_: (8x36x176x176xf32, 36xf32, 36xf32, 36xf32, 36xf32, -1xui8) <- (8x36x176x176xf32, 36xf32, 36xf32, 36xf32, 36xf32) + ( + batch_norm__30, + batch_norm__31, + batch_norm__32, + batch_norm__33, + batch_norm__34, + batch_norm__35, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_5, + parameter_506, + parameter_505, + parameter_504, + parameter_503, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_503, parameter_504, parameter_505, parameter_506 + + # pd_op.swish: (8x36x176x176xf32) <- (8x36x176x176xf32) + swish_6 = paddle._C_ops.swish(batch_norm__30) + + # pd_op.conv2d: (8x36x176x176xf32) <- (8x36x176x176xf32, 36x36x3x3xf32) + conv2d_6 = paddle._C_ops.conv2d( + swish_6, parameter_502, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_502 + + # pd_op.batch_norm_: (8x36x176x176xf32, 36xf32, 36xf32, 36xf32, 36xf32, -1xui8) <- (8x36x176x176xf32, 36xf32, 36xf32, 36xf32, 36xf32) + ( + batch_norm__36, + batch_norm__37, + batch_norm__38, + batch_norm__39, + batch_norm__40, + batch_norm__41, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_6, + parameter_501, + parameter_500, + parameter_499, + parameter_498, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_498, parameter_499, parameter_500, parameter_501 + + # pd_op.swish: (8x36x176x176xf32) <- (8x36x176x176xf32) + swish_7 = paddle._C_ops.swish(batch_norm__36) + + # pd_op.conv2d: (8x36x176x176xf32) <- (8x36x176x176xf32, 36x36x3x3xf32) + conv2d_7 = paddle._C_ops.conv2d( + swish_7, parameter_497, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_497 + + # pd_op.batch_norm_: (8x36x176x176xf32, 36xf32, 36xf32, 36xf32, 36xf32, -1xui8) <- (8x36x176x176xf32, 36xf32, 36xf32, 36xf32, 36xf32) + ( + batch_norm__42, + batch_norm__43, + batch_norm__44, + batch_norm__45, + batch_norm__46, + batch_norm__47, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_7, + parameter_496, + parameter_495, + parameter_494, + parameter_493, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_493, parameter_494, parameter_495, parameter_496 + + # pd_op.conv2d: (8x36x176x176xf32) <- (8x36x176x176xf32, 36x36x1x1xf32) + conv2d_8 = paddle._C_ops.conv2d( + swish_7, parameter_492, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_492 + + # pd_op.batch_norm_: (8x36x176x176xf32, 36xf32, 36xf32, 36xf32, 36xf32, -1xui8) <- (8x36x176x176xf32, 36xf32, 36xf32, 36xf32, 36xf32) + ( + batch_norm__48, + batch_norm__49, + batch_norm__50, + batch_norm__51, + batch_norm__52, + batch_norm__53, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_8, + parameter_491, + parameter_490, + parameter_489, + parameter_488, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_488, parameter_489, parameter_490, parameter_491 + + # pd_op.multiply: (8x36x176x176xf32) <- (1xf32, 8x36x176x176xf32) + multiply_0 = paddle._C_ops.multiply(data_0, batch_norm__48) + del data_0 + + # pd_op.add: (8x36x176x176xf32) <- (8x36x176x176xf32, 8x36x176x176xf32) + add_0 = paddle._C_ops.add(batch_norm__42, multiply_0) + + # pd_op.swish: (8x36x176x176xf32) <- (8x36x176x176xf32) + swish_8 = paddle._C_ops.swish(add_0) + + # pd_op.add: (8x36x176x176xf32) <- (8x36x176x176xf32, 8x36x176x176xf32) + add_1 = paddle._C_ops.add(swish_6, swish_8) + + # pd_op.conv2d: (8x36x176x176xf32) <- (8x36x176x176xf32, 36x36x3x3xf32) + conv2d_9 = paddle._C_ops.conv2d( + add_1, parameter_487, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_487 + + # pd_op.batch_norm_: (8x36x176x176xf32, 36xf32, 36xf32, 36xf32, 36xf32, -1xui8) <- (8x36x176x176xf32, 36xf32, 36xf32, 36xf32, 36xf32) + ( + batch_norm__54, + batch_norm__55, + batch_norm__56, + batch_norm__57, + batch_norm__58, + batch_norm__59, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_9, + parameter_486, + parameter_485, + parameter_484, + parameter_483, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_483, parameter_484, parameter_485, parameter_486 + + # pd_op.swish: (8x36x176x176xf32) <- (8x36x176x176xf32) + swish_9 = paddle._C_ops.swish(batch_norm__54) + + # pd_op.conv2d: (8x36x176x176xf32) <- (8x36x176x176xf32, 36x36x3x3xf32) + conv2d_10 = paddle._C_ops.conv2d( + swish_9, parameter_482, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_482 + + # pd_op.batch_norm_: (8x36x176x176xf32, 36xf32, 36xf32, 36xf32, 36xf32, -1xui8) <- (8x36x176x176xf32, 36xf32, 36xf32, 36xf32, 36xf32) + ( + batch_norm__60, + batch_norm__61, + batch_norm__62, + batch_norm__63, + batch_norm__64, + batch_norm__65, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_10, + parameter_481, + parameter_480, + parameter_479, + parameter_478, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_478, parameter_479, parameter_480, parameter_481 + + # pd_op.conv2d: (8x36x176x176xf32) <- (8x36x176x176xf32, 36x36x1x1xf32) + conv2d_11 = paddle._C_ops.conv2d( + swish_9, parameter_477, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_477 + + # pd_op.batch_norm_: (8x36x176x176xf32, 36xf32, 36xf32, 36xf32, 36xf32, -1xui8) <- (8x36x176x176xf32, 36xf32, 36xf32, 36xf32, 36xf32) + ( + batch_norm__66, + batch_norm__67, + batch_norm__68, + batch_norm__69, + batch_norm__70, + batch_norm__71, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_11, + parameter_476, + parameter_475, + parameter_474, + parameter_473, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_473, parameter_474, parameter_475, parameter_476 + + # pd_op.multiply: (8x36x176x176xf32) <- (1xf32, 8x36x176x176xf32) + multiply_1 = paddle._C_ops.multiply(data_1, batch_norm__66) + del data_1 + + # pd_op.add: (8x36x176x176xf32) <- (8x36x176x176xf32, 8x36x176x176xf32) + add_2 = paddle._C_ops.add(batch_norm__60, multiply_1) + + # pd_op.swish: (8x36x176x176xf32) <- (8x36x176x176xf32) + swish_10 = paddle._C_ops.swish(add_2) + + # pd_op.add: (8x36x176x176xf32) <- (8x36x176x176xf32, 8x36x176x176xf32) + add_3 = paddle._C_ops.add(add_1, swish_10) + + # pd_op.full: (1xi32) <- () + full_0 = paddle._C_ops.full( + [1], float("1"), paddle.int32, paddle.core.CPUPlace() + ) + + # pd_op.assign: (1xi32) <- (1xi32) + assign_0 = full_0 + + # pd_op.assign: (1xi32) <- (1xi32) + assign_1 = full_0 + + # pd_op.assign: (1xi32) <- (1xi32) + assign_2 = full_0 + + # pd_op.assign: (1xi32) <- (1xi32) + assign_3 = full_0 + + # pd_op.assign: (1xi32) <- (1xi32) + assign_4 = full_0 + + # pd_op.assign: (1xi32) <- (1xi32) + assign_5 = full_0 + + # pd_op.assign: (1xi32) <- (1xi32) + assign_6 = full_0 + + # pd_op.assign: (1xi32) <- (1xi32) + assign_7 = full_0 + + # pd_op.assign: (1xi32) <- (1xi32) + assign_8 = full_0 + + # pd_op.assign: (1xi32) <- (1xi32) + assign_9 = full_0 + + # pd_op.assign: (1xi32) <- (1xi32) + assign_10 = full_0 + + # pd_op.assign: (1xi32) <- (1xi32) + assign_11 = full_0 + + # pd_op.assign: (1xi32) <- (1xi32) + assign_12 = full_0 + + # builtin.combine: ([8x36x176x176xf32, 8x36x176x176xf32]) <- (8x36x176x176xf32, 8x36x176x176xf32) + combine_0 = [swish_5, add_3] + + # pd_op.concat: (8x72x176x176xf32) <- ([8x36x176x176xf32, 8x36x176x176xf32], 1xi32) + concat_0 = paddle._C_ops.concat(combine_0, full_0) + del combine_0 + + # pd_op.full_int_array: (2xi64) <- () + full_int_array_0 = [2, 3] + + # pd_op.assign: (2xi64) <- (2xi64) + assign_13 = full_int_array_0 + + # pd_op.assign: (2xi64) <- (2xi64) + assign_14 = full_int_array_0 + + # pd_op.assign: (2xi64) <- (2xi64) + assign_15 = full_int_array_0 + + # pd_op.mean: (8x72x1x1xf32) <- (8x72x176x176xf32, 2xi64) + mean_0 = paddle._C_ops.mean(concat_0, full_int_array_0, True) + + # pd_op.conv2d: (8x72x1x1xf32) <- (8x72x1x1xf32, 72x72x1x1xf32) + conv2d_12 = paddle._C_ops.conv2d( + mean_0, parameter_472, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_472 + + # pd_op.full_int_array: (4xi64) <- () + full_int_array_1 = [1, -1, 1, 1] + + # pd_op.reshape: (1x72x1x1xf32) <- (72xf32, 4xi64) + reshape_0 = paddle._C_ops.reshape(parameter_471, full_int_array_1) + del parameter_471 + + # pd_op.add: (8x72x1x1xf32) <- (8x72x1x1xf32, 1x72x1x1xf32) + add_4 = paddle._C_ops.add(conv2d_12, reshape_0) + + # pd_op.hardsigmoid: (8x72x1x1xf32) <- (8x72x1x1xf32) + hardsigmoid_0 = paddle._C_ops.hardsigmoid( + add_4, float("0.166667"), float("0.5") + ) + del add_4 + + # pd_op.multiply: (8x72x176x176xf32) <- (8x72x176x176xf32, 8x72x1x1xf32) + multiply_2 = paddle._C_ops.multiply(concat_0, hardsigmoid_0) + + # pd_op.conv2d: (8x96x176x176xf32) <- (8x72x176x176xf32, 96x72x1x1xf32) + conv2d_13 = paddle._C_ops.conv2d( + multiply_2, parameter_470, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_470 + + # pd_op.batch_norm_: (8x96x176x176xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (8x96x176x176xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__72, + batch_norm__73, + batch_norm__74, + batch_norm__75, + batch_norm__76, + batch_norm__77, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_13, + parameter_469, + parameter_468, + parameter_467, + parameter_466, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_466, parameter_467, parameter_468, parameter_469 + + # pd_op.swish: (8x96x176x176xf32) <- (8x96x176x176xf32) + swish_11 = paddle._C_ops.swish(batch_norm__72) + + # pd_op.conv2d: (8x144x88x88xf32) <- (8x96x176x176xf32, 144x96x3x3xf32) + conv2d_14 = paddle._C_ops.conv2d( + swish_11, parameter_465, [2, 2], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_465 + + # pd_op.batch_norm_: (8x144x88x88xf32, 144xf32, 144xf32, 144xf32, 144xf32, -1xui8) <- (8x144x88x88xf32, 144xf32, 144xf32, 144xf32, 144xf32) + ( + batch_norm__78, + batch_norm__79, + batch_norm__80, + batch_norm__81, + batch_norm__82, + batch_norm__83, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_14, + parameter_464, + parameter_463, + parameter_462, + parameter_461, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_461, parameter_462, parameter_463, parameter_464 + + # pd_op.swish: (8x144x88x88xf32) <- (8x144x88x88xf32) + swish_12 = paddle._C_ops.swish(batch_norm__78) + + # pd_op.conv2d: (8x72x88x88xf32) <- (8x144x88x88xf32, 72x144x1x1xf32) + conv2d_15 = paddle._C_ops.conv2d( + swish_12, parameter_460, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_460 + + # pd_op.batch_norm_: (8x72x88x88xf32, 72xf32, 72xf32, 72xf32, 72xf32, -1xui8) <- (8x72x88x88xf32, 72xf32, 72xf32, 72xf32, 72xf32) + ( + batch_norm__84, + batch_norm__85, + batch_norm__86, + batch_norm__87, + batch_norm__88, + batch_norm__89, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_15, + parameter_459, + parameter_458, + parameter_457, + parameter_456, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_456, parameter_457, parameter_458, parameter_459 + + # pd_op.swish: (8x72x88x88xf32) <- (8x72x88x88xf32) + swish_13 = paddle._C_ops.swish(batch_norm__84) + + # pd_op.conv2d: (8x72x88x88xf32) <- (8x144x88x88xf32, 72x144x1x1xf32) + conv2d_16 = paddle._C_ops.conv2d( + swish_12, parameter_455, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_455 + + # pd_op.batch_norm_: (8x72x88x88xf32, 72xf32, 72xf32, 72xf32, 72xf32, -1xui8) <- (8x72x88x88xf32, 72xf32, 72xf32, 72xf32, 72xf32) + ( + batch_norm__90, + batch_norm__91, + batch_norm__92, + batch_norm__93, + batch_norm__94, + batch_norm__95, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_16, + parameter_454, + parameter_453, + parameter_452, + parameter_451, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_451, parameter_452, parameter_453, parameter_454 + + # pd_op.swish: (8x72x88x88xf32) <- (8x72x88x88xf32) + swish_14 = paddle._C_ops.swish(batch_norm__90) + + # pd_op.conv2d: (8x72x88x88xf32) <- (8x72x88x88xf32, 72x72x3x3xf32) + conv2d_17 = paddle._C_ops.conv2d( + swish_14, parameter_450, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_450 + + # pd_op.batch_norm_: (8x72x88x88xf32, 72xf32, 72xf32, 72xf32, 72xf32, -1xui8) <- (8x72x88x88xf32, 72xf32, 72xf32, 72xf32, 72xf32) + ( + batch_norm__96, + batch_norm__97, + batch_norm__98, + batch_norm__99, + batch_norm__100, + batch_norm__101, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_17, + parameter_449, + parameter_448, + parameter_447, + parameter_446, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_446, parameter_447, parameter_448, parameter_449 + + # pd_op.swish: (8x72x88x88xf32) <- (8x72x88x88xf32) + swish_15 = paddle._C_ops.swish(batch_norm__96) + + # pd_op.conv2d: (8x72x88x88xf32) <- (8x72x88x88xf32, 72x72x3x3xf32) + conv2d_18 = paddle._C_ops.conv2d( + swish_15, parameter_445, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_445 + + # pd_op.batch_norm_: (8x72x88x88xf32, 72xf32, 72xf32, 72xf32, 72xf32, -1xui8) <- (8x72x88x88xf32, 72xf32, 72xf32, 72xf32, 72xf32) + ( + batch_norm__102, + batch_norm__103, + batch_norm__104, + batch_norm__105, + batch_norm__106, + batch_norm__107, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_18, + parameter_444, + parameter_443, + parameter_442, + parameter_441, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_441, parameter_442, parameter_443, parameter_444 + + # pd_op.conv2d: (8x72x88x88xf32) <- (8x72x88x88xf32, 72x72x1x1xf32) + conv2d_19 = paddle._C_ops.conv2d( + swish_15, parameter_440, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_440 + + # pd_op.batch_norm_: (8x72x88x88xf32, 72xf32, 72xf32, 72xf32, 72xf32, -1xui8) <- (8x72x88x88xf32, 72xf32, 72xf32, 72xf32, 72xf32) + ( + batch_norm__108, + batch_norm__109, + batch_norm__110, + batch_norm__111, + batch_norm__112, + batch_norm__113, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_19, + parameter_439, + parameter_438, + parameter_437, + parameter_436, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_436, parameter_437, parameter_438, parameter_439 + + # pd_op.multiply: (8x72x88x88xf32) <- (1xf32, 8x72x88x88xf32) + multiply_3 = paddle._C_ops.multiply(data_2, batch_norm__108) + del data_2 + + # pd_op.add: (8x72x88x88xf32) <- (8x72x88x88xf32, 8x72x88x88xf32) + add_5 = paddle._C_ops.add(batch_norm__102, multiply_3) + + # pd_op.swish: (8x72x88x88xf32) <- (8x72x88x88xf32) + swish_16 = paddle._C_ops.swish(add_5) + + # pd_op.add: (8x72x88x88xf32) <- (8x72x88x88xf32, 8x72x88x88xf32) + add_6 = paddle._C_ops.add(swish_14, swish_16) + + # pd_op.conv2d: (8x72x88x88xf32) <- (8x72x88x88xf32, 72x72x3x3xf32) + conv2d_20 = paddle._C_ops.conv2d( + add_6, parameter_435, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_435 + + # pd_op.batch_norm_: (8x72x88x88xf32, 72xf32, 72xf32, 72xf32, 72xf32, -1xui8) <- (8x72x88x88xf32, 72xf32, 72xf32, 72xf32, 72xf32) + ( + batch_norm__114, + batch_norm__115, + batch_norm__116, + batch_norm__117, + batch_norm__118, + batch_norm__119, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_20, + parameter_434, + parameter_433, + parameter_432, + parameter_431, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_431, parameter_432, parameter_433, parameter_434 + + # pd_op.swish: (8x72x88x88xf32) <- (8x72x88x88xf32) + swish_17 = paddle._C_ops.swish(batch_norm__114) + + # pd_op.conv2d: (8x72x88x88xf32) <- (8x72x88x88xf32, 72x72x3x3xf32) + conv2d_21 = paddle._C_ops.conv2d( + swish_17, parameter_430, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_430 + + # pd_op.batch_norm_: (8x72x88x88xf32, 72xf32, 72xf32, 72xf32, 72xf32, -1xui8) <- (8x72x88x88xf32, 72xf32, 72xf32, 72xf32, 72xf32) + ( + batch_norm__120, + batch_norm__121, + batch_norm__122, + batch_norm__123, + batch_norm__124, + batch_norm__125, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_21, + parameter_429, + parameter_428, + parameter_427, + parameter_426, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_426, parameter_427, parameter_428, parameter_429 + + # pd_op.conv2d: (8x72x88x88xf32) <- (8x72x88x88xf32, 72x72x1x1xf32) + conv2d_22 = paddle._C_ops.conv2d( + swish_17, parameter_425, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_425 + + # pd_op.batch_norm_: (8x72x88x88xf32, 72xf32, 72xf32, 72xf32, 72xf32, -1xui8) <- (8x72x88x88xf32, 72xf32, 72xf32, 72xf32, 72xf32) + ( + batch_norm__126, + batch_norm__127, + batch_norm__128, + batch_norm__129, + batch_norm__130, + batch_norm__131, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_22, + parameter_424, + parameter_423, + parameter_422, + parameter_421, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_421, parameter_422, parameter_423, parameter_424 + + # pd_op.multiply: (8x72x88x88xf32) <- (1xf32, 8x72x88x88xf32) + multiply_4 = paddle._C_ops.multiply(data_3, batch_norm__126) + del data_3 + + # pd_op.add: (8x72x88x88xf32) <- (8x72x88x88xf32, 8x72x88x88xf32) + add_7 = paddle._C_ops.add(batch_norm__120, multiply_4) + + # pd_op.swish: (8x72x88x88xf32) <- (8x72x88x88xf32) + swish_18 = paddle._C_ops.swish(add_7) + + # pd_op.add: (8x72x88x88xf32) <- (8x72x88x88xf32, 8x72x88x88xf32) + add_8 = paddle._C_ops.add(add_6, swish_18) + + # pd_op.conv2d: (8x72x88x88xf32) <- (8x72x88x88xf32, 72x72x3x3xf32) + conv2d_23 = paddle._C_ops.conv2d( + add_8, parameter_420, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_420 + + # pd_op.batch_norm_: (8x72x88x88xf32, 72xf32, 72xf32, 72xf32, 72xf32, -1xui8) <- (8x72x88x88xf32, 72xf32, 72xf32, 72xf32, 72xf32) + ( + batch_norm__132, + batch_norm__133, + batch_norm__134, + batch_norm__135, + batch_norm__136, + batch_norm__137, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_23, + parameter_419, + parameter_418, + parameter_417, + parameter_416, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_416, parameter_417, parameter_418, parameter_419 + + # pd_op.swish: (8x72x88x88xf32) <- (8x72x88x88xf32) + swish_19 = paddle._C_ops.swish(batch_norm__132) + + # pd_op.conv2d: (8x72x88x88xf32) <- (8x72x88x88xf32, 72x72x3x3xf32) + conv2d_24 = paddle._C_ops.conv2d( + swish_19, parameter_415, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_415 + + # pd_op.batch_norm_: (8x72x88x88xf32, 72xf32, 72xf32, 72xf32, 72xf32, -1xui8) <- (8x72x88x88xf32, 72xf32, 72xf32, 72xf32, 72xf32) + ( + batch_norm__138, + batch_norm__139, + batch_norm__140, + batch_norm__141, + batch_norm__142, + batch_norm__143, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_24, + parameter_414, + parameter_413, + parameter_412, + parameter_411, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_411, parameter_412, parameter_413, parameter_414 + + # pd_op.conv2d: (8x72x88x88xf32) <- (8x72x88x88xf32, 72x72x1x1xf32) + conv2d_25 = paddle._C_ops.conv2d( + swish_19, parameter_410, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_410 + + # pd_op.batch_norm_: (8x72x88x88xf32, 72xf32, 72xf32, 72xf32, 72xf32, -1xui8) <- (8x72x88x88xf32, 72xf32, 72xf32, 72xf32, 72xf32) + ( + batch_norm__144, + batch_norm__145, + batch_norm__146, + batch_norm__147, + batch_norm__148, + batch_norm__149, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_25, + parameter_409, + parameter_408, + parameter_407, + parameter_406, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_406, parameter_407, parameter_408, parameter_409 + + # pd_op.multiply: (8x72x88x88xf32) <- (1xf32, 8x72x88x88xf32) + multiply_5 = paddle._C_ops.multiply(data_4, batch_norm__144) + del data_4 + + # pd_op.add: (8x72x88x88xf32) <- (8x72x88x88xf32, 8x72x88x88xf32) + add_9 = paddle._C_ops.add(batch_norm__138, multiply_5) + + # pd_op.swish: (8x72x88x88xf32) <- (8x72x88x88xf32) + swish_20 = paddle._C_ops.swish(add_9) + + # pd_op.add: (8x72x88x88xf32) <- (8x72x88x88xf32, 8x72x88x88xf32) + add_10 = paddle._C_ops.add(add_8, swish_20) + + # pd_op.conv2d: (8x72x88x88xf32) <- (8x72x88x88xf32, 72x72x3x3xf32) + conv2d_26 = paddle._C_ops.conv2d( + add_10, parameter_405, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_405 + + # pd_op.batch_norm_: (8x72x88x88xf32, 72xf32, 72xf32, 72xf32, 72xf32, -1xui8) <- (8x72x88x88xf32, 72xf32, 72xf32, 72xf32, 72xf32) + ( + batch_norm__150, + batch_norm__151, + batch_norm__152, + batch_norm__153, + batch_norm__154, + batch_norm__155, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_26, + parameter_404, + parameter_403, + parameter_402, + parameter_401, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_401, parameter_402, parameter_403, parameter_404 + + # pd_op.swish: (8x72x88x88xf32) <- (8x72x88x88xf32) + swish_21 = paddle._C_ops.swish(batch_norm__150) + + # pd_op.conv2d: (8x72x88x88xf32) <- (8x72x88x88xf32, 72x72x3x3xf32) + conv2d_27 = paddle._C_ops.conv2d( + swish_21, parameter_400, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_400 + + # pd_op.batch_norm_: (8x72x88x88xf32, 72xf32, 72xf32, 72xf32, 72xf32, -1xui8) <- (8x72x88x88xf32, 72xf32, 72xf32, 72xf32, 72xf32) + ( + batch_norm__156, + batch_norm__157, + batch_norm__158, + batch_norm__159, + batch_norm__160, + batch_norm__161, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_27, + parameter_399, + parameter_398, + parameter_397, + parameter_396, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_396, parameter_397, parameter_398, parameter_399 + + # pd_op.conv2d: (8x72x88x88xf32) <- (8x72x88x88xf32, 72x72x1x1xf32) + conv2d_28 = paddle._C_ops.conv2d( + swish_21, parameter_395, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_395 + + # pd_op.batch_norm_: (8x72x88x88xf32, 72xf32, 72xf32, 72xf32, 72xf32, -1xui8) <- (8x72x88x88xf32, 72xf32, 72xf32, 72xf32, 72xf32) + ( + batch_norm__162, + batch_norm__163, + batch_norm__164, + batch_norm__165, + batch_norm__166, + batch_norm__167, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_28, + parameter_394, + parameter_393, + parameter_392, + parameter_391, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_391, parameter_392, parameter_393, parameter_394 + + # pd_op.multiply: (8x72x88x88xf32) <- (1xf32, 8x72x88x88xf32) + multiply_6 = paddle._C_ops.multiply(data_5, batch_norm__162) + del data_5 + + # pd_op.add: (8x72x88x88xf32) <- (8x72x88x88xf32, 8x72x88x88xf32) + add_11 = paddle._C_ops.add(batch_norm__156, multiply_6) + + # pd_op.swish: (8x72x88x88xf32) <- (8x72x88x88xf32) + swish_22 = paddle._C_ops.swish(add_11) + + # pd_op.add: (8x72x88x88xf32) <- (8x72x88x88xf32, 8x72x88x88xf32) + add_12 = paddle._C_ops.add(add_10, swish_22) + + # builtin.combine: ([8x72x88x88xf32, 8x72x88x88xf32]) <- (8x72x88x88xf32, 8x72x88x88xf32) + combine_1 = [swish_13, add_12] + + # pd_op.concat: (8x144x88x88xf32) <- ([8x72x88x88xf32, 8x72x88x88xf32], 1xi32) + concat_1 = paddle._C_ops.concat(combine_1, full_0) + del combine_1 + + # pd_op.mean: (8x144x1x1xf32) <- (8x144x88x88xf32, 2xi64) + mean_1 = paddle._C_ops.mean(concat_1, full_int_array_0, True) + + # pd_op.conv2d: (8x144x1x1xf32) <- (8x144x1x1xf32, 144x144x1x1xf32) + conv2d_29 = paddle._C_ops.conv2d( + mean_1, parameter_390, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_390 + + # pd_op.reshape: (1x144x1x1xf32) <- (144xf32, 4xi64) + reshape_1 = paddle._C_ops.reshape(parameter_389, full_int_array_1) + del parameter_389 + + # pd_op.add: (8x144x1x1xf32) <- (8x144x1x1xf32, 1x144x1x1xf32) + add_13 = paddle._C_ops.add(conv2d_29, reshape_1) + + # pd_op.hardsigmoid: (8x144x1x1xf32) <- (8x144x1x1xf32) + hardsigmoid_1 = paddle._C_ops.hardsigmoid( + add_13, float("0.166667"), float("0.5") + ) + del add_13 + + # pd_op.multiply: (8x144x88x88xf32) <- (8x144x88x88xf32, 8x144x1x1xf32) + multiply_7 = paddle._C_ops.multiply(concat_1, hardsigmoid_1) + + # pd_op.conv2d: (8x192x88x88xf32) <- (8x144x88x88xf32, 192x144x1x1xf32) + conv2d_30 = paddle._C_ops.conv2d( + multiply_7, parameter_388, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_388 + + # pd_op.batch_norm_: (8x192x88x88xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (8x192x88x88xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__168, + batch_norm__169, + batch_norm__170, + batch_norm__171, + batch_norm__172, + batch_norm__173, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_30, + parameter_387, + parameter_386, + parameter_385, + parameter_384, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_384, parameter_385, parameter_386, parameter_387 + + # pd_op.swish: (8x192x88x88xf32) <- (8x192x88x88xf32) + swish_23 = paddle._C_ops.swish(batch_norm__168) + + # pd_op.conv2d: (8x288x44x44xf32) <- (8x192x88x88xf32, 288x192x3x3xf32) + conv2d_31 = paddle._C_ops.conv2d( + swish_23, parameter_383, [2, 2], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_383 + + # pd_op.batch_norm_: (8x288x44x44xf32, 288xf32, 288xf32, 288xf32, 288xf32, -1xui8) <- (8x288x44x44xf32, 288xf32, 288xf32, 288xf32, 288xf32) + ( + batch_norm__174, + batch_norm__175, + batch_norm__176, + batch_norm__177, + batch_norm__178, + batch_norm__179, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_31, + parameter_382, + parameter_381, + parameter_380, + parameter_379, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_379, parameter_380, parameter_381, parameter_382 + + # pd_op.swish: (8x288x44x44xf32) <- (8x288x44x44xf32) + swish_24 = paddle._C_ops.swish(batch_norm__174) + + # pd_op.conv2d: (8x144x44x44xf32) <- (8x288x44x44xf32, 144x288x1x1xf32) + conv2d_32 = paddle._C_ops.conv2d( + swish_24, parameter_378, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_378 + + # pd_op.batch_norm_: (8x144x44x44xf32, 144xf32, 144xf32, 144xf32, 144xf32, -1xui8) <- (8x144x44x44xf32, 144xf32, 144xf32, 144xf32, 144xf32) + ( + batch_norm__180, + batch_norm__181, + batch_norm__182, + batch_norm__183, + batch_norm__184, + batch_norm__185, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_32, + parameter_377, + parameter_376, + parameter_375, + parameter_374, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_374, parameter_375, parameter_376, parameter_377 + + # pd_op.swish: (8x144x44x44xf32) <- (8x144x44x44xf32) + swish_25 = paddle._C_ops.swish(batch_norm__180) + + # pd_op.conv2d: (8x144x44x44xf32) <- (8x288x44x44xf32, 144x288x1x1xf32) + conv2d_33 = paddle._C_ops.conv2d( + swish_24, parameter_373, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_373 + + # pd_op.batch_norm_: (8x144x44x44xf32, 144xf32, 144xf32, 144xf32, 144xf32, -1xui8) <- (8x144x44x44xf32, 144xf32, 144xf32, 144xf32, 144xf32) + ( + batch_norm__186, + batch_norm__187, + batch_norm__188, + batch_norm__189, + batch_norm__190, + batch_norm__191, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_33, + parameter_372, + parameter_371, + parameter_370, + parameter_369, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_369, parameter_370, parameter_371, parameter_372 + + # pd_op.swish: (8x144x44x44xf32) <- (8x144x44x44xf32) + swish_26 = paddle._C_ops.swish(batch_norm__186) + + # pd_op.conv2d: (8x144x44x44xf32) <- (8x144x44x44xf32, 144x144x3x3xf32) + conv2d_34 = paddle._C_ops.conv2d( + swish_26, parameter_368, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_368 + + # pd_op.batch_norm_: (8x144x44x44xf32, 144xf32, 144xf32, 144xf32, 144xf32, -1xui8) <- (8x144x44x44xf32, 144xf32, 144xf32, 144xf32, 144xf32) + ( + batch_norm__192, + batch_norm__193, + batch_norm__194, + batch_norm__195, + batch_norm__196, + batch_norm__197, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_34, + parameter_367, + parameter_366, + parameter_365, + parameter_364, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_364, parameter_365, parameter_366, parameter_367 + + # pd_op.swish: (8x144x44x44xf32) <- (8x144x44x44xf32) + swish_27 = paddle._C_ops.swish(batch_norm__192) + + # pd_op.conv2d: (8x144x44x44xf32) <- (8x144x44x44xf32, 144x144x3x3xf32) + conv2d_35 = paddle._C_ops.conv2d( + swish_27, parameter_363, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_363 + + # pd_op.batch_norm_: (8x144x44x44xf32, 144xf32, 144xf32, 144xf32, 144xf32, -1xui8) <- (8x144x44x44xf32, 144xf32, 144xf32, 144xf32, 144xf32) + ( + batch_norm__198, + batch_norm__199, + batch_norm__200, + batch_norm__201, + batch_norm__202, + batch_norm__203, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_35, + parameter_362, + parameter_361, + parameter_360, + parameter_359, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_359, parameter_360, parameter_361, parameter_362 + + # pd_op.conv2d: (8x144x44x44xf32) <- (8x144x44x44xf32, 144x144x1x1xf32) + conv2d_36 = paddle._C_ops.conv2d( + swish_27, parameter_358, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_358 + + # pd_op.batch_norm_: (8x144x44x44xf32, 144xf32, 144xf32, 144xf32, 144xf32, -1xui8) <- (8x144x44x44xf32, 144xf32, 144xf32, 144xf32, 144xf32) + ( + batch_norm__204, + batch_norm__205, + batch_norm__206, + batch_norm__207, + batch_norm__208, + batch_norm__209, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_36, + parameter_357, + parameter_356, + parameter_355, + parameter_354, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_354, parameter_355, parameter_356, parameter_357 + + # pd_op.multiply: (8x144x44x44xf32) <- (1xf32, 8x144x44x44xf32) + multiply_8 = paddle._C_ops.multiply(data_6, batch_norm__204) + del data_6 + + # pd_op.add: (8x144x44x44xf32) <- (8x144x44x44xf32, 8x144x44x44xf32) + add_14 = paddle._C_ops.add(batch_norm__198, multiply_8) + + # pd_op.swish: (8x144x44x44xf32) <- (8x144x44x44xf32) + swish_28 = paddle._C_ops.swish(add_14) + + # pd_op.add: (8x144x44x44xf32) <- (8x144x44x44xf32, 8x144x44x44xf32) + add_15 = paddle._C_ops.add(swish_26, swish_28) + + # pd_op.conv2d: (8x144x44x44xf32) <- (8x144x44x44xf32, 144x144x3x3xf32) + conv2d_37 = paddle._C_ops.conv2d( + add_15, parameter_353, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_353 + + # pd_op.batch_norm_: (8x144x44x44xf32, 144xf32, 144xf32, 144xf32, 144xf32, -1xui8) <- (8x144x44x44xf32, 144xf32, 144xf32, 144xf32, 144xf32) + ( + batch_norm__210, + batch_norm__211, + batch_norm__212, + batch_norm__213, + batch_norm__214, + batch_norm__215, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_37, + parameter_352, + parameter_351, + parameter_350, + parameter_349, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_349, parameter_350, parameter_351, parameter_352 + + # pd_op.swish: (8x144x44x44xf32) <- (8x144x44x44xf32) + swish_29 = paddle._C_ops.swish(batch_norm__210) + + # pd_op.conv2d: (8x144x44x44xf32) <- (8x144x44x44xf32, 144x144x3x3xf32) + conv2d_38 = paddle._C_ops.conv2d( + swish_29, parameter_348, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_348 + + # pd_op.batch_norm_: (8x144x44x44xf32, 144xf32, 144xf32, 144xf32, 144xf32, -1xui8) <- (8x144x44x44xf32, 144xf32, 144xf32, 144xf32, 144xf32) + ( + batch_norm__216, + batch_norm__217, + batch_norm__218, + batch_norm__219, + batch_norm__220, + batch_norm__221, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_38, + parameter_347, + parameter_346, + parameter_345, + parameter_344, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_344, parameter_345, parameter_346, parameter_347 + + # pd_op.conv2d: (8x144x44x44xf32) <- (8x144x44x44xf32, 144x144x1x1xf32) + conv2d_39 = paddle._C_ops.conv2d( + swish_29, parameter_343, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_343 + + # pd_op.batch_norm_: (8x144x44x44xf32, 144xf32, 144xf32, 144xf32, 144xf32, -1xui8) <- (8x144x44x44xf32, 144xf32, 144xf32, 144xf32, 144xf32) + ( + batch_norm__222, + batch_norm__223, + batch_norm__224, + batch_norm__225, + batch_norm__226, + batch_norm__227, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_39, + parameter_342, + parameter_341, + parameter_340, + parameter_339, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_339, parameter_340, parameter_341, parameter_342 + + # pd_op.multiply: (8x144x44x44xf32) <- (1xf32, 8x144x44x44xf32) + multiply_9 = paddle._C_ops.multiply(data_7, batch_norm__222) + del data_7 + + # pd_op.add: (8x144x44x44xf32) <- (8x144x44x44xf32, 8x144x44x44xf32) + add_16 = paddle._C_ops.add(batch_norm__216, multiply_9) + + # pd_op.swish: (8x144x44x44xf32) <- (8x144x44x44xf32) + swish_30 = paddle._C_ops.swish(add_16) + + # pd_op.add: (8x144x44x44xf32) <- (8x144x44x44xf32, 8x144x44x44xf32) + add_17 = paddle._C_ops.add(add_15, swish_30) + + # pd_op.conv2d: (8x144x44x44xf32) <- (8x144x44x44xf32, 144x144x3x3xf32) + conv2d_40 = paddle._C_ops.conv2d( + add_17, parameter_338, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_338 + + # pd_op.batch_norm_: (8x144x44x44xf32, 144xf32, 144xf32, 144xf32, 144xf32, -1xui8) <- (8x144x44x44xf32, 144xf32, 144xf32, 144xf32, 144xf32) + ( + batch_norm__228, + batch_norm__229, + batch_norm__230, + batch_norm__231, + batch_norm__232, + batch_norm__233, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_40, + parameter_337, + parameter_336, + parameter_335, + parameter_334, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_334, parameter_335, parameter_336, parameter_337 + + # pd_op.swish: (8x144x44x44xf32) <- (8x144x44x44xf32) + swish_31 = paddle._C_ops.swish(batch_norm__228) + + # pd_op.conv2d: (8x144x44x44xf32) <- (8x144x44x44xf32, 144x144x3x3xf32) + conv2d_41 = paddle._C_ops.conv2d( + swish_31, parameter_333, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_333 + + # pd_op.batch_norm_: (8x144x44x44xf32, 144xf32, 144xf32, 144xf32, 144xf32, -1xui8) <- (8x144x44x44xf32, 144xf32, 144xf32, 144xf32, 144xf32) + ( + batch_norm__234, + batch_norm__235, + batch_norm__236, + batch_norm__237, + batch_norm__238, + batch_norm__239, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_41, + parameter_332, + parameter_331, + parameter_330, + parameter_329, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_329, parameter_330, parameter_331, parameter_332 + + # pd_op.conv2d: (8x144x44x44xf32) <- (8x144x44x44xf32, 144x144x1x1xf32) + conv2d_42 = paddle._C_ops.conv2d( + swish_31, parameter_328, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_328 + + # pd_op.batch_norm_: (8x144x44x44xf32, 144xf32, 144xf32, 144xf32, 144xf32, -1xui8) <- (8x144x44x44xf32, 144xf32, 144xf32, 144xf32, 144xf32) + ( + batch_norm__240, + batch_norm__241, + batch_norm__242, + batch_norm__243, + batch_norm__244, + batch_norm__245, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_42, + parameter_327, + parameter_326, + parameter_325, + parameter_324, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_324, parameter_325, parameter_326, parameter_327 + + # pd_op.multiply: (8x144x44x44xf32) <- (1xf32, 8x144x44x44xf32) + multiply_10 = paddle._C_ops.multiply(data_8, batch_norm__240) + del data_8 + + # pd_op.add: (8x144x44x44xf32) <- (8x144x44x44xf32, 8x144x44x44xf32) + add_18 = paddle._C_ops.add(batch_norm__234, multiply_10) + + # pd_op.swish: (8x144x44x44xf32) <- (8x144x44x44xf32) + swish_32 = paddle._C_ops.swish(add_18) + + # pd_op.add: (8x144x44x44xf32) <- (8x144x44x44xf32, 8x144x44x44xf32) + add_19 = paddle._C_ops.add(add_17, swish_32) + + # pd_op.conv2d: (8x144x44x44xf32) <- (8x144x44x44xf32, 144x144x3x3xf32) + conv2d_43 = paddle._C_ops.conv2d( + add_19, parameter_323, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_323 + + # pd_op.batch_norm_: (8x144x44x44xf32, 144xf32, 144xf32, 144xf32, 144xf32, -1xui8) <- (8x144x44x44xf32, 144xf32, 144xf32, 144xf32, 144xf32) + ( + batch_norm__246, + batch_norm__247, + batch_norm__248, + batch_norm__249, + batch_norm__250, + batch_norm__251, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_43, + parameter_322, + parameter_321, + parameter_320, + parameter_319, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_319, parameter_320, parameter_321, parameter_322 + + # pd_op.swish: (8x144x44x44xf32) <- (8x144x44x44xf32) + swish_33 = paddle._C_ops.swish(batch_norm__246) + + # pd_op.conv2d: (8x144x44x44xf32) <- (8x144x44x44xf32, 144x144x3x3xf32) + conv2d_44 = paddle._C_ops.conv2d( + swish_33, parameter_318, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_318 + + # pd_op.batch_norm_: (8x144x44x44xf32, 144xf32, 144xf32, 144xf32, 144xf32, -1xui8) <- (8x144x44x44xf32, 144xf32, 144xf32, 144xf32, 144xf32) + ( + batch_norm__252, + batch_norm__253, + batch_norm__254, + batch_norm__255, + batch_norm__256, + batch_norm__257, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_44, + parameter_317, + parameter_316, + parameter_315, + parameter_314, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_314, parameter_315, parameter_316, parameter_317 + + # pd_op.conv2d: (8x144x44x44xf32) <- (8x144x44x44xf32, 144x144x1x1xf32) + conv2d_45 = paddle._C_ops.conv2d( + swish_33, parameter_313, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_313 + + # pd_op.batch_norm_: (8x144x44x44xf32, 144xf32, 144xf32, 144xf32, 144xf32, -1xui8) <- (8x144x44x44xf32, 144xf32, 144xf32, 144xf32, 144xf32) + ( + batch_norm__258, + batch_norm__259, + batch_norm__260, + batch_norm__261, + batch_norm__262, + batch_norm__263, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_45, + parameter_312, + parameter_311, + parameter_310, + parameter_309, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_309, parameter_310, parameter_311, parameter_312 + + # pd_op.multiply: (8x144x44x44xf32) <- (1xf32, 8x144x44x44xf32) + multiply_11 = paddle._C_ops.multiply(data_9, batch_norm__258) + del data_9 + + # pd_op.add: (8x144x44x44xf32) <- (8x144x44x44xf32, 8x144x44x44xf32) + add_20 = paddle._C_ops.add(batch_norm__252, multiply_11) + + # pd_op.swish: (8x144x44x44xf32) <- (8x144x44x44xf32) + swish_34 = paddle._C_ops.swish(add_20) + + # pd_op.add: (8x144x44x44xf32) <- (8x144x44x44xf32, 8x144x44x44xf32) + add_21 = paddle._C_ops.add(add_19, swish_34) + + # builtin.combine: ([8x144x44x44xf32, 8x144x44x44xf32]) <- (8x144x44x44xf32, 8x144x44x44xf32) + combine_2 = [swish_25, add_21] + + # pd_op.concat: (8x288x44x44xf32) <- ([8x144x44x44xf32, 8x144x44x44xf32], 1xi32) + concat_2 = paddle._C_ops.concat(combine_2, full_0) + del combine_2 + + # pd_op.mean: (8x288x1x1xf32) <- (8x288x44x44xf32, 2xi64) + mean_2 = paddle._C_ops.mean(concat_2, full_int_array_0, True) + + # pd_op.conv2d: (8x288x1x1xf32) <- (8x288x1x1xf32, 288x288x1x1xf32) + conv2d_46 = paddle._C_ops.conv2d( + mean_2, parameter_308, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_308 + + # pd_op.reshape: (1x288x1x1xf32) <- (288xf32, 4xi64) + reshape_2 = paddle._C_ops.reshape(parameter_307, full_int_array_1) + del parameter_307 + + # pd_op.add: (8x288x1x1xf32) <- (8x288x1x1xf32, 1x288x1x1xf32) + add_22 = paddle._C_ops.add(conv2d_46, reshape_2) + + # pd_op.hardsigmoid: (8x288x1x1xf32) <- (8x288x1x1xf32) + hardsigmoid_2 = paddle._C_ops.hardsigmoid( + add_22, float("0.166667"), float("0.5") + ) + del add_22 + + # pd_op.multiply: (8x288x44x44xf32) <- (8x288x44x44xf32, 8x288x1x1xf32) + multiply_12 = paddle._C_ops.multiply(concat_2, hardsigmoid_2) + + # pd_op.conv2d: (8x384x44x44xf32) <- (8x288x44x44xf32, 384x288x1x1xf32) + conv2d_47 = paddle._C_ops.conv2d( + multiply_12, parameter_306, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_306 + + # pd_op.batch_norm_: (8x384x44x44xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (8x384x44x44xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__264, + batch_norm__265, + batch_norm__266, + batch_norm__267, + batch_norm__268, + batch_norm__269, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_47, + parameter_305, + parameter_304, + parameter_303, + parameter_302, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_302, parameter_303, parameter_304, parameter_305 + + # pd_op.swish: (8x384x44x44xf32) <- (8x384x44x44xf32) + swish_35 = paddle._C_ops.swish(batch_norm__264) + + # pd_op.conv2d: (8x576x22x22xf32) <- (8x384x44x44xf32, 576x384x3x3xf32) + conv2d_48 = paddle._C_ops.conv2d( + swish_35, parameter_301, [2, 2], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_301 + + # pd_op.batch_norm_: (8x576x22x22xf32, 576xf32, 576xf32, 576xf32, 576xf32, -1xui8) <- (8x576x22x22xf32, 576xf32, 576xf32, 576xf32, 576xf32) + ( + batch_norm__270, + batch_norm__271, + batch_norm__272, + batch_norm__273, + batch_norm__274, + batch_norm__275, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_48, + parameter_300, + parameter_299, + parameter_298, + parameter_297, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_297, parameter_298, parameter_299, parameter_300 + + # pd_op.swish: (8x576x22x22xf32) <- (8x576x22x22xf32) + swish_36 = paddle._C_ops.swish(batch_norm__270) + + # pd_op.conv2d: (8x288x22x22xf32) <- (8x576x22x22xf32, 288x576x1x1xf32) + conv2d_49 = paddle._C_ops.conv2d( + swish_36, parameter_296, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_296 + + # pd_op.batch_norm_: (8x288x22x22xf32, 288xf32, 288xf32, 288xf32, 288xf32, -1xui8) <- (8x288x22x22xf32, 288xf32, 288xf32, 288xf32, 288xf32) + ( + batch_norm__276, + batch_norm__277, + batch_norm__278, + batch_norm__279, + batch_norm__280, + batch_norm__281, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_49, + parameter_295, + parameter_294, + parameter_293, + parameter_292, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_292, parameter_293, parameter_294, parameter_295 + + # pd_op.swish: (8x288x22x22xf32) <- (8x288x22x22xf32) + swish_37 = paddle._C_ops.swish(batch_norm__276) + + # pd_op.conv2d: (8x288x22x22xf32) <- (8x576x22x22xf32, 288x576x1x1xf32) + conv2d_50 = paddle._C_ops.conv2d( + swish_36, parameter_291, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_291 + + # pd_op.batch_norm_: (8x288x22x22xf32, 288xf32, 288xf32, 288xf32, 288xf32, -1xui8) <- (8x288x22x22xf32, 288xf32, 288xf32, 288xf32, 288xf32) + ( + batch_norm__282, + batch_norm__283, + batch_norm__284, + batch_norm__285, + batch_norm__286, + batch_norm__287, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_50, + parameter_290, + parameter_289, + parameter_288, + parameter_287, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_287, parameter_288, parameter_289, parameter_290 + + # pd_op.swish: (8x288x22x22xf32) <- (8x288x22x22xf32) + swish_38 = paddle._C_ops.swish(batch_norm__282) + + # pd_op.conv2d: (8x288x22x22xf32) <- (8x288x22x22xf32, 288x288x3x3xf32) + conv2d_51 = paddle._C_ops.conv2d( + swish_38, parameter_286, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_286 + + # pd_op.batch_norm_: (8x288x22x22xf32, 288xf32, 288xf32, 288xf32, 288xf32, -1xui8) <- (8x288x22x22xf32, 288xf32, 288xf32, 288xf32, 288xf32) + ( + batch_norm__288, + batch_norm__289, + batch_norm__290, + batch_norm__291, + batch_norm__292, + batch_norm__293, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_51, + parameter_285, + parameter_284, + parameter_283, + parameter_282, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_282, parameter_283, parameter_284, parameter_285 + + # pd_op.swish: (8x288x22x22xf32) <- (8x288x22x22xf32) + swish_39 = paddle._C_ops.swish(batch_norm__288) + + # pd_op.conv2d: (8x288x22x22xf32) <- (8x288x22x22xf32, 288x288x3x3xf32) + conv2d_52 = paddle._C_ops.conv2d( + swish_39, parameter_281, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_281 + + # pd_op.batch_norm_: (8x288x22x22xf32, 288xf32, 288xf32, 288xf32, 288xf32, -1xui8) <- (8x288x22x22xf32, 288xf32, 288xf32, 288xf32, 288xf32) + ( + batch_norm__294, + batch_norm__295, + batch_norm__296, + batch_norm__297, + batch_norm__298, + batch_norm__299, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_52, + parameter_280, + parameter_279, + parameter_278, + parameter_277, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_277, parameter_278, parameter_279, parameter_280 + + # pd_op.conv2d: (8x288x22x22xf32) <- (8x288x22x22xf32, 288x288x1x1xf32) + conv2d_53 = paddle._C_ops.conv2d( + swish_39, parameter_276, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_276 + + # pd_op.batch_norm_: (8x288x22x22xf32, 288xf32, 288xf32, 288xf32, 288xf32, -1xui8) <- (8x288x22x22xf32, 288xf32, 288xf32, 288xf32, 288xf32) + ( + batch_norm__300, + batch_norm__301, + batch_norm__302, + batch_norm__303, + batch_norm__304, + batch_norm__305, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_53, + parameter_275, + parameter_274, + parameter_273, + parameter_272, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_272, parameter_273, parameter_274, parameter_275 + + # pd_op.multiply: (8x288x22x22xf32) <- (1xf32, 8x288x22x22xf32) + multiply_13 = paddle._C_ops.multiply(data_10, batch_norm__300) + del data_10 + + # pd_op.add: (8x288x22x22xf32) <- (8x288x22x22xf32, 8x288x22x22xf32) + add_23 = paddle._C_ops.add(batch_norm__294, multiply_13) + + # pd_op.swish: (8x288x22x22xf32) <- (8x288x22x22xf32) + swish_40 = paddle._C_ops.swish(add_23) + + # pd_op.add: (8x288x22x22xf32) <- (8x288x22x22xf32, 8x288x22x22xf32) + add_24 = paddle._C_ops.add(swish_38, swish_40) + + # pd_op.conv2d: (8x288x22x22xf32) <- (8x288x22x22xf32, 288x288x3x3xf32) + conv2d_54 = paddle._C_ops.conv2d( + add_24, parameter_271, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_271 + + # pd_op.batch_norm_: (8x288x22x22xf32, 288xf32, 288xf32, 288xf32, 288xf32, -1xui8) <- (8x288x22x22xf32, 288xf32, 288xf32, 288xf32, 288xf32) + ( + batch_norm__306, + batch_norm__307, + batch_norm__308, + batch_norm__309, + batch_norm__310, + batch_norm__311, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_54, + parameter_270, + parameter_269, + parameter_268, + parameter_267, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_267, parameter_268, parameter_269, parameter_270 + + # pd_op.swish: (8x288x22x22xf32) <- (8x288x22x22xf32) + swish_41 = paddle._C_ops.swish(batch_norm__306) + + # pd_op.conv2d: (8x288x22x22xf32) <- (8x288x22x22xf32, 288x288x3x3xf32) + conv2d_55 = paddle._C_ops.conv2d( + swish_41, parameter_266, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_266 + + # pd_op.batch_norm_: (8x288x22x22xf32, 288xf32, 288xf32, 288xf32, 288xf32, -1xui8) <- (8x288x22x22xf32, 288xf32, 288xf32, 288xf32, 288xf32) + ( + batch_norm__312, + batch_norm__313, + batch_norm__314, + batch_norm__315, + batch_norm__316, + batch_norm__317, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_55, + parameter_265, + parameter_264, + parameter_263, + parameter_262, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_262, parameter_263, parameter_264, parameter_265 + + # pd_op.conv2d: (8x288x22x22xf32) <- (8x288x22x22xf32, 288x288x1x1xf32) + conv2d_56 = paddle._C_ops.conv2d( + swish_41, parameter_261, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_261 + + # pd_op.batch_norm_: (8x288x22x22xf32, 288xf32, 288xf32, 288xf32, 288xf32, -1xui8) <- (8x288x22x22xf32, 288xf32, 288xf32, 288xf32, 288xf32) + ( + batch_norm__318, + batch_norm__319, + batch_norm__320, + batch_norm__321, + batch_norm__322, + batch_norm__323, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_56, + parameter_260, + parameter_259, + parameter_258, + parameter_257, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_257, parameter_258, parameter_259, parameter_260 + + # pd_op.multiply: (8x288x22x22xf32) <- (1xf32, 8x288x22x22xf32) + multiply_14 = paddle._C_ops.multiply(data_11, batch_norm__318) + del data_11 + + # pd_op.add: (8x288x22x22xf32) <- (8x288x22x22xf32, 8x288x22x22xf32) + add_25 = paddle._C_ops.add(batch_norm__312, multiply_14) + + # pd_op.swish: (8x288x22x22xf32) <- (8x288x22x22xf32) + swish_42 = paddle._C_ops.swish(add_25) + + # pd_op.add: (8x288x22x22xf32) <- (8x288x22x22xf32, 8x288x22x22xf32) + add_26 = paddle._C_ops.add(add_24, swish_42) + + # builtin.combine: ([8x288x22x22xf32, 8x288x22x22xf32]) <- (8x288x22x22xf32, 8x288x22x22xf32) + combine_3 = [swish_37, add_26] + + # pd_op.concat: (8x576x22x22xf32) <- ([8x288x22x22xf32, 8x288x22x22xf32], 1xi32) + concat_3 = paddle._C_ops.concat(combine_3, full_0) + del combine_3 + + # pd_op.mean: (8x576x1x1xf32) <- (8x576x22x22xf32, 2xi64) + mean_3 = paddle._C_ops.mean(concat_3, full_int_array_0, True) + + # pd_op.conv2d: (8x576x1x1xf32) <- (8x576x1x1xf32, 576x576x1x1xf32) + conv2d_57 = paddle._C_ops.conv2d( + mean_3, parameter_256, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_256 + + # pd_op.reshape: (1x576x1x1xf32) <- (576xf32, 4xi64) + reshape_3 = paddle._C_ops.reshape(parameter_255, full_int_array_1) + del full_int_array_1, parameter_255 + + # pd_op.add: (8x576x1x1xf32) <- (8x576x1x1xf32, 1x576x1x1xf32) + add_27 = paddle._C_ops.add(conv2d_57, reshape_3) + + # pd_op.hardsigmoid: (8x576x1x1xf32) <- (8x576x1x1xf32) + hardsigmoid_3 = paddle._C_ops.hardsigmoid( + add_27, float("0.166667"), float("0.5") + ) + del add_27 + + # pd_op.multiply: (8x576x22x22xf32) <- (8x576x22x22xf32, 8x576x1x1xf32) + multiply_15 = paddle._C_ops.multiply(concat_3, hardsigmoid_3) + + # pd_op.conv2d: (8x768x22x22xf32) <- (8x576x22x22xf32, 768x576x1x1xf32) + conv2d_58 = paddle._C_ops.conv2d( + multiply_15, parameter_254, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_254 + + # pd_op.batch_norm_: (8x768x22x22xf32, 768xf32, 768xf32, 768xf32, 768xf32, -1xui8) <- (8x768x22x22xf32, 768xf32, 768xf32, 768xf32, 768xf32) + ( + batch_norm__324, + batch_norm__325, + batch_norm__326, + batch_norm__327, + batch_norm__328, + batch_norm__329, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_58, + parameter_253, + parameter_252, + parameter_251, + parameter_250, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_250, parameter_251, parameter_252, parameter_253 + + # pd_op.swish: (8x768x22x22xf32) <- (8x768x22x22xf32) + swish_43 = paddle._C_ops.swish(batch_norm__324) + + # pd_op.conv2d: (8x288x22x22xf32) <- (8x768x22x22xf32, 288x768x1x1xf32) + conv2d_59 = paddle._C_ops.conv2d( + swish_43, parameter_249, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_249 + + # pd_op.batch_norm_: (8x288x22x22xf32, 288xf32, 288xf32, 288xf32, 288xf32, -1xui8) <- (8x288x22x22xf32, 288xf32, 288xf32, 288xf32, 288xf32) + ( + batch_norm__330, + batch_norm__331, + batch_norm__332, + batch_norm__333, + batch_norm__334, + batch_norm__335, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_59, + parameter_248, + parameter_247, + parameter_246, + parameter_245, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_245, parameter_246, parameter_247, parameter_248 + + # pd_op.swish: (8x288x22x22xf32) <- (8x288x22x22xf32) + swish_44 = paddle._C_ops.swish(batch_norm__330) + + # pd_op.conv2d: (8x288x22x22xf32) <- (8x768x22x22xf32, 288x768x1x1xf32) + conv2d_60 = paddle._C_ops.conv2d( + swish_43, parameter_244, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_244 + + # pd_op.batch_norm_: (8x288x22x22xf32, 288xf32, 288xf32, 288xf32, 288xf32, -1xui8) <- (8x288x22x22xf32, 288xf32, 288xf32, 288xf32, 288xf32) + ( + batch_norm__336, + batch_norm__337, + batch_norm__338, + batch_norm__339, + batch_norm__340, + batch_norm__341, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_60, + parameter_243, + parameter_242, + parameter_241, + parameter_240, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_240, parameter_241, parameter_242, parameter_243 + + # pd_op.swish: (8x288x22x22xf32) <- (8x288x22x22xf32) + swish_45 = paddle._C_ops.swish(batch_norm__336) + + # pd_op.conv2d: (8x288x22x22xf32) <- (8x288x22x22xf32, 288x288x3x3xf32) + conv2d_61 = paddle._C_ops.conv2d( + swish_45, parameter_239, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_239 + + # pd_op.batch_norm_: (8x288x22x22xf32, 288xf32, 288xf32, 288xf32, 288xf32, -1xui8) <- (8x288x22x22xf32, 288xf32, 288xf32, 288xf32, 288xf32) + ( + batch_norm__342, + batch_norm__343, + batch_norm__344, + batch_norm__345, + batch_norm__346, + batch_norm__347, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_61, + parameter_238, + parameter_237, + parameter_236, + parameter_235, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_235, parameter_236, parameter_237, parameter_238 + + # pd_op.swish: (8x288x22x22xf32) <- (8x288x22x22xf32) + swish_46 = paddle._C_ops.swish(batch_norm__342) + + # pd_op.conv2d: (8x288x22x22xf32) <- (8x288x22x22xf32, 288x288x3x3xf32) + conv2d_62 = paddle._C_ops.conv2d( + swish_46, parameter_234, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_234 + + # pd_op.batch_norm_: (8x288x22x22xf32, 288xf32, 288xf32, 288xf32, 288xf32, -1xui8) <- (8x288x22x22xf32, 288xf32, 288xf32, 288xf32, 288xf32) + ( + batch_norm__348, + batch_norm__349, + batch_norm__350, + batch_norm__351, + batch_norm__352, + batch_norm__353, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_62, + parameter_233, + parameter_232, + parameter_231, + parameter_230, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_230, parameter_231, parameter_232, parameter_233 + + # pd_op.conv2d: (8x288x22x22xf32) <- (8x288x22x22xf32, 288x288x1x1xf32) + conv2d_63 = paddle._C_ops.conv2d( + swish_46, parameter_229, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_229 + + # pd_op.batch_norm_: (8x288x22x22xf32, 288xf32, 288xf32, 288xf32, 288xf32, -1xui8) <- (8x288x22x22xf32, 288xf32, 288xf32, 288xf32, 288xf32) + ( + batch_norm__354, + batch_norm__355, + batch_norm__356, + batch_norm__357, + batch_norm__358, + batch_norm__359, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_63, + parameter_228, + parameter_227, + parameter_226, + parameter_225, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_225, parameter_226, parameter_227, parameter_228 + + # pd_op.add: (8x288x22x22xf32) <- (8x288x22x22xf32, 8x288x22x22xf32) + add_28 = paddle._C_ops.add(batch_norm__348, batch_norm__354) + + # pd_op.swish: (8x288x22x22xf32) <- (8x288x22x22xf32) + swish_47 = paddle._C_ops.swish(add_28) + + # pd_op.full_int_array: (2xi64) <- () + full_int_array_2 = [5, 5] + + # pd_op.pool2d: (8x288x22x22xf32) <- (8x288x22x22xf32, 2xi64) + pool2d_0 = paddle._C_ops.pool2d( + swish_47, + full_int_array_2, + [1, 1], + [2, 2], + False, + True, + "NCHW", + "max", + False, + False, + "EXPLICIT", + ) + + # pd_op.full_int_array: (2xi64) <- () + full_int_array_3 = [9, 9] + + # pd_op.pool2d: (8x288x22x22xf32) <- (8x288x22x22xf32, 2xi64) + pool2d_1 = paddle._C_ops.pool2d( + swish_47, + full_int_array_3, + [1, 1], + [4, 4], + False, + True, + "NCHW", + "max", + False, + False, + "EXPLICIT", + ) + + # pd_op.full_int_array: (2xi64) <- () + full_int_array_4 = [13, 13] + + # pd_op.pool2d: (8x288x22x22xf32) <- (8x288x22x22xf32, 2xi64) + pool2d_2 = paddle._C_ops.pool2d( + swish_47, + full_int_array_4, + [1, 1], + [6, 6], + False, + True, + "NCHW", + "max", + False, + False, + "EXPLICIT", + ) + + # builtin.combine: ([8x288x22x22xf32, 8x288x22x22xf32, 8x288x22x22xf32, 8x288x22x22xf32]) <- (8x288x22x22xf32, 8x288x22x22xf32, 8x288x22x22xf32, 8x288x22x22xf32) + combine_4 = [swish_47, pool2d_0, pool2d_1, pool2d_2] + + # pd_op.concat: (8x1152x22x22xf32) <- ([8x288x22x22xf32, 8x288x22x22xf32, 8x288x22x22xf32, 8x288x22x22xf32], 1xi32) + concat_4 = paddle._C_ops.concat(combine_4, full_0) + del combine_4 + + # pd_op.conv2d: (8x288x22x22xf32) <- (8x1152x22x22xf32, 288x1152x1x1xf32) + conv2d_64 = paddle._C_ops.conv2d( + concat_4, parameter_224, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_224 + + # pd_op.batch_norm_: (8x288x22x22xf32, 288xf32, 288xf32, 288xf32, 288xf32, -1xui8) <- (8x288x22x22xf32, 288xf32, 288xf32, 288xf32, 288xf32) + ( + batch_norm__360, + batch_norm__361, + batch_norm__362, + batch_norm__363, + batch_norm__364, + batch_norm__365, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_64, + parameter_223, + parameter_222, + parameter_221, + parameter_220, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_220, parameter_221, parameter_222, parameter_223 + + # pd_op.swish: (8x288x22x22xf32) <- (8x288x22x22xf32) + swish_48 = paddle._C_ops.swish(batch_norm__360) + + # pd_op.conv2d: (8x288x22x22xf32) <- (8x288x22x22xf32, 288x288x3x3xf32) + conv2d_65 = paddle._C_ops.conv2d( + swish_48, parameter_219, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_219 + + # pd_op.batch_norm_: (8x288x22x22xf32, 288xf32, 288xf32, 288xf32, 288xf32, -1xui8) <- (8x288x22x22xf32, 288xf32, 288xf32, 288xf32, 288xf32) + ( + batch_norm__366, + batch_norm__367, + batch_norm__368, + batch_norm__369, + batch_norm__370, + batch_norm__371, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_65, + parameter_218, + parameter_217, + parameter_216, + parameter_215, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_215, parameter_216, parameter_217, parameter_218 + + # pd_op.swish: (8x288x22x22xf32) <- (8x288x22x22xf32) + swish_49 = paddle._C_ops.swish(batch_norm__366) + + # pd_op.conv2d: (8x288x22x22xf32) <- (8x288x22x22xf32, 288x288x3x3xf32) + conv2d_66 = paddle._C_ops.conv2d( + swish_49, parameter_214, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_214 + + # pd_op.batch_norm_: (8x288x22x22xf32, 288xf32, 288xf32, 288xf32, 288xf32, -1xui8) <- (8x288x22x22xf32, 288xf32, 288xf32, 288xf32, 288xf32) + ( + batch_norm__372, + batch_norm__373, + batch_norm__374, + batch_norm__375, + batch_norm__376, + batch_norm__377, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_66, + parameter_213, + parameter_212, + parameter_211, + parameter_210, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_210, parameter_211, parameter_212, parameter_213 + + # pd_op.conv2d: (8x288x22x22xf32) <- (8x288x22x22xf32, 288x288x1x1xf32) + conv2d_67 = paddle._C_ops.conv2d( + swish_49, parameter_209, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_209 + + # pd_op.batch_norm_: (8x288x22x22xf32, 288xf32, 288xf32, 288xf32, 288xf32, -1xui8) <- (8x288x22x22xf32, 288xf32, 288xf32, 288xf32, 288xf32) + ( + batch_norm__378, + batch_norm__379, + batch_norm__380, + batch_norm__381, + batch_norm__382, + batch_norm__383, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_67, + parameter_208, + parameter_207, + parameter_206, + parameter_205, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_205, parameter_206, parameter_207, parameter_208 + + # pd_op.add: (8x288x22x22xf32) <- (8x288x22x22xf32, 8x288x22x22xf32) + add_29 = paddle._C_ops.add(batch_norm__372, batch_norm__378) + + # pd_op.swish: (8x288x22x22xf32) <- (8x288x22x22xf32) + swish_50 = paddle._C_ops.swish(add_29) + + # builtin.combine: ([8x288x22x22xf32, 8x288x22x22xf32]) <- (8x288x22x22xf32, 8x288x22x22xf32) + combine_5 = [swish_44, swish_50] + + # pd_op.concat: (8x576x22x22xf32) <- ([8x288x22x22xf32, 8x288x22x22xf32], 1xi32) + concat_5 = paddle._C_ops.concat(combine_5, full_0) + del combine_5 + + # pd_op.conv2d: (8x576x22x22xf32) <- (8x576x22x22xf32, 576x576x1x1xf32) + conv2d_68 = paddle._C_ops.conv2d( + concat_5, parameter_204, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_204 + + # pd_op.batch_norm_: (8x576x22x22xf32, 576xf32, 576xf32, 576xf32, 576xf32, -1xui8) <- (8x576x22x22xf32, 576xf32, 576xf32, 576xf32, 576xf32) + ( + batch_norm__384, + batch_norm__385, + batch_norm__386, + batch_norm__387, + batch_norm__388, + batch_norm__389, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_68, + parameter_203, + parameter_202, + parameter_201, + parameter_200, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_200, parameter_201, parameter_202, parameter_203 + + # pd_op.swish: (8x576x22x22xf32) <- (8x576x22x22xf32) + swish_51 = paddle._C_ops.swish(batch_norm__384) + + # pd_op.conv2d: (8x288x22x22xf32) <- (8x576x22x22xf32, 288x576x1x1xf32) + conv2d_69 = paddle._C_ops.conv2d( + swish_51, parameter_199, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_199 + + # pd_op.batch_norm_: (8x288x22x22xf32, 288xf32, 288xf32, 288xf32, 288xf32, -1xui8) <- (8x288x22x22xf32, 288xf32, 288xf32, 288xf32, 288xf32) + ( + batch_norm__390, + batch_norm__391, + batch_norm__392, + batch_norm__393, + batch_norm__394, + batch_norm__395, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_69, + parameter_198, + parameter_197, + parameter_196, + parameter_195, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_195, parameter_196, parameter_197, parameter_198 + + # pd_op.swish: (8x288x22x22xf32) <- (8x288x22x22xf32) + swish_52 = paddle._C_ops.swish(batch_norm__390) + + # pd_op.nearest_interp: (8x288x44x44xf32) <- (8x288x22x22xf32, None, None, None) + nearest_interp_0 = paddle._C_ops.nearest_interp( + swish_52, + None, + None, + None, + "NCHW", + -1, + -1, + -1, + [float("2"), float("2")], + "nearest", + False, + 0, + ) + + # builtin.combine: ([8x288x44x44xf32, 8x384x44x44xf32]) <- (8x288x44x44xf32, 8x384x44x44xf32) + combine_6 = [nearest_interp_0, swish_35] + + # pd_op.concat: (8x672x44x44xf32) <- ([8x288x44x44xf32, 8x384x44x44xf32], 1xi32) + concat_6 = paddle._C_ops.concat(combine_6, full_0) + del combine_6 + + # pd_op.conv2d: (8x144x44x44xf32) <- (8x672x44x44xf32, 144x672x1x1xf32) + conv2d_70 = paddle._C_ops.conv2d( + concat_6, parameter_194, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_194 + + # pd_op.batch_norm_: (8x144x44x44xf32, 144xf32, 144xf32, 144xf32, 144xf32, -1xui8) <- (8x144x44x44xf32, 144xf32, 144xf32, 144xf32, 144xf32) + ( + batch_norm__396, + batch_norm__397, + batch_norm__398, + batch_norm__399, + batch_norm__400, + batch_norm__401, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_70, + parameter_193, + parameter_192, + parameter_191, + parameter_190, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_190, parameter_191, parameter_192, parameter_193 + + # pd_op.swish: (8x144x44x44xf32) <- (8x144x44x44xf32) + swish_53 = paddle._C_ops.swish(batch_norm__396) + + # pd_op.conv2d: (8x144x44x44xf32) <- (8x672x44x44xf32, 144x672x1x1xf32) + conv2d_71 = paddle._C_ops.conv2d( + concat_6, parameter_189, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_189 + + # pd_op.batch_norm_: (8x144x44x44xf32, 144xf32, 144xf32, 144xf32, 144xf32, -1xui8) <- (8x144x44x44xf32, 144xf32, 144xf32, 144xf32, 144xf32) + ( + batch_norm__402, + batch_norm__403, + batch_norm__404, + batch_norm__405, + batch_norm__406, + batch_norm__407, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_71, + parameter_188, + parameter_187, + parameter_186, + parameter_185, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_185, parameter_186, parameter_187, parameter_188 + + # pd_op.swish: (8x144x44x44xf32) <- (8x144x44x44xf32) + swish_54 = paddle._C_ops.swish(batch_norm__402) + + # pd_op.conv2d: (8x144x44x44xf32) <- (8x144x44x44xf32, 144x144x3x3xf32) + conv2d_72 = paddle._C_ops.conv2d( + swish_54, parameter_184, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_184 + + # pd_op.batch_norm_: (8x144x44x44xf32, 144xf32, 144xf32, 144xf32, 144xf32, -1xui8) <- (8x144x44x44xf32, 144xf32, 144xf32, 144xf32, 144xf32) + ( + batch_norm__408, + batch_norm__409, + batch_norm__410, + batch_norm__411, + batch_norm__412, + batch_norm__413, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_72, + parameter_183, + parameter_182, + parameter_181, + parameter_180, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_180, parameter_181, parameter_182, parameter_183 + + # pd_op.swish: (8x144x44x44xf32) <- (8x144x44x44xf32) + swish_55 = paddle._C_ops.swish(batch_norm__408) + + # pd_op.conv2d: (8x144x44x44xf32) <- (8x144x44x44xf32, 144x144x3x3xf32) + conv2d_73 = paddle._C_ops.conv2d( + swish_55, parameter_179, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_179 + + # pd_op.batch_norm_: (8x144x44x44xf32, 144xf32, 144xf32, 144xf32, 144xf32, -1xui8) <- (8x144x44x44xf32, 144xf32, 144xf32, 144xf32, 144xf32) + ( + batch_norm__414, + batch_norm__415, + batch_norm__416, + batch_norm__417, + batch_norm__418, + batch_norm__419, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_73, + parameter_178, + parameter_177, + parameter_176, + parameter_175, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_175, parameter_176, parameter_177, parameter_178 + + # pd_op.conv2d: (8x144x44x44xf32) <- (8x144x44x44xf32, 144x144x1x1xf32) + conv2d_74 = paddle._C_ops.conv2d( + swish_55, parameter_174, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_174 + + # pd_op.batch_norm_: (8x144x44x44xf32, 144xf32, 144xf32, 144xf32, 144xf32, -1xui8) <- (8x144x44x44xf32, 144xf32, 144xf32, 144xf32, 144xf32) + ( + batch_norm__420, + batch_norm__421, + batch_norm__422, + batch_norm__423, + batch_norm__424, + batch_norm__425, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_74, + parameter_173, + parameter_172, + parameter_171, + parameter_170, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_170, parameter_171, parameter_172, parameter_173 + + # pd_op.add: (8x144x44x44xf32) <- (8x144x44x44xf32, 8x144x44x44xf32) + add_30 = paddle._C_ops.add(batch_norm__414, batch_norm__420) + + # pd_op.swish: (8x144x44x44xf32) <- (8x144x44x44xf32) + swish_56 = paddle._C_ops.swish(add_30) + + # pd_op.conv2d: (8x144x44x44xf32) <- (8x144x44x44xf32, 144x144x3x3xf32) + conv2d_75 = paddle._C_ops.conv2d( + swish_56, parameter_169, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_169 + + # pd_op.batch_norm_: (8x144x44x44xf32, 144xf32, 144xf32, 144xf32, 144xf32, -1xui8) <- (8x144x44x44xf32, 144xf32, 144xf32, 144xf32, 144xf32) + ( + batch_norm__426, + batch_norm__427, + batch_norm__428, + batch_norm__429, + batch_norm__430, + batch_norm__431, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_75, + parameter_168, + parameter_167, + parameter_166, + parameter_165, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_165, parameter_166, parameter_167, parameter_168 + + # pd_op.swish: (8x144x44x44xf32) <- (8x144x44x44xf32) + swish_57 = paddle._C_ops.swish(batch_norm__426) + + # pd_op.conv2d: (8x144x44x44xf32) <- (8x144x44x44xf32, 144x144x3x3xf32) + conv2d_76 = paddle._C_ops.conv2d( + swish_57, parameter_164, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_164 + + # pd_op.batch_norm_: (8x144x44x44xf32, 144xf32, 144xf32, 144xf32, 144xf32, -1xui8) <- (8x144x44x44xf32, 144xf32, 144xf32, 144xf32, 144xf32) + ( + batch_norm__432, + batch_norm__433, + batch_norm__434, + batch_norm__435, + batch_norm__436, + batch_norm__437, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_76, + parameter_163, + parameter_162, + parameter_161, + parameter_160, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_160, parameter_161, parameter_162, parameter_163 + + # pd_op.conv2d: (8x144x44x44xf32) <- (8x144x44x44xf32, 144x144x1x1xf32) + conv2d_77 = paddle._C_ops.conv2d( + swish_57, parameter_159, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_159 + + # pd_op.batch_norm_: (8x144x44x44xf32, 144xf32, 144xf32, 144xf32, 144xf32, -1xui8) <- (8x144x44x44xf32, 144xf32, 144xf32, 144xf32, 144xf32) + ( + batch_norm__438, + batch_norm__439, + batch_norm__440, + batch_norm__441, + batch_norm__442, + batch_norm__443, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_77, + parameter_158, + parameter_157, + parameter_156, + parameter_155, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_155, parameter_156, parameter_157, parameter_158 + + # pd_op.add: (8x144x44x44xf32) <- (8x144x44x44xf32, 8x144x44x44xf32) + add_31 = paddle._C_ops.add(batch_norm__432, batch_norm__438) + + # pd_op.swish: (8x144x44x44xf32) <- (8x144x44x44xf32) + swish_58 = paddle._C_ops.swish(add_31) + + # builtin.combine: ([8x144x44x44xf32, 8x144x44x44xf32]) <- (8x144x44x44xf32, 8x144x44x44xf32) + combine_7 = [swish_53, swish_58] + + # pd_op.concat: (8x288x44x44xf32) <- ([8x144x44x44xf32, 8x144x44x44xf32], 1xi32) + concat_7 = paddle._C_ops.concat(combine_7, full_0) + del combine_7 + + # pd_op.conv2d: (8x288x44x44xf32) <- (8x288x44x44xf32, 288x288x1x1xf32) + conv2d_78 = paddle._C_ops.conv2d( + concat_7, parameter_154, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_154 + + # pd_op.batch_norm_: (8x288x44x44xf32, 288xf32, 288xf32, 288xf32, 288xf32, -1xui8) <- (8x288x44x44xf32, 288xf32, 288xf32, 288xf32, 288xf32) + ( + batch_norm__444, + batch_norm__445, + batch_norm__446, + batch_norm__447, + batch_norm__448, + batch_norm__449, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_78, + parameter_153, + parameter_152, + parameter_151, + parameter_150, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_150, parameter_151, parameter_152, parameter_153 + + # pd_op.swish: (8x288x44x44xf32) <- (8x288x44x44xf32) + swish_59 = paddle._C_ops.swish(batch_norm__444) + + # pd_op.conv2d: (8x144x44x44xf32) <- (8x288x44x44xf32, 144x288x1x1xf32) + conv2d_79 = paddle._C_ops.conv2d( + swish_59, parameter_149, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_149 + + # pd_op.batch_norm_: (8x144x44x44xf32, 144xf32, 144xf32, 144xf32, 144xf32, -1xui8) <- (8x144x44x44xf32, 144xf32, 144xf32, 144xf32, 144xf32) + ( + batch_norm__450, + batch_norm__451, + batch_norm__452, + batch_norm__453, + batch_norm__454, + batch_norm__455, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_79, + parameter_148, + parameter_147, + parameter_146, + parameter_145, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_145, parameter_146, parameter_147, parameter_148 + + # pd_op.swish: (8x144x44x44xf32) <- (8x144x44x44xf32) + swish_60 = paddle._C_ops.swish(batch_norm__450) + + # pd_op.nearest_interp: (8x144x88x88xf32) <- (8x144x44x44xf32, None, None, None) + nearest_interp_1 = paddle._C_ops.nearest_interp( + swish_60, + None, + None, + None, + "NCHW", + -1, + -1, + -1, + [float("2"), float("2")], + "nearest", + False, + 0, + ) + + # builtin.combine: ([8x144x88x88xf32, 8x192x88x88xf32]) <- (8x144x88x88xf32, 8x192x88x88xf32) + combine_8 = [nearest_interp_1, swish_23] + + # pd_op.concat: (8x336x88x88xf32) <- ([8x144x88x88xf32, 8x192x88x88xf32], 1xi32) + concat_8 = paddle._C_ops.concat(combine_8, full_0) + del combine_8 + + # pd_op.conv2d: (8x72x88x88xf32) <- (8x336x88x88xf32, 72x336x1x1xf32) + conv2d_80 = paddle._C_ops.conv2d( + concat_8, parameter_144, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_144 + + # pd_op.batch_norm_: (8x72x88x88xf32, 72xf32, 72xf32, 72xf32, 72xf32, -1xui8) <- (8x72x88x88xf32, 72xf32, 72xf32, 72xf32, 72xf32) + ( + batch_norm__456, + batch_norm__457, + batch_norm__458, + batch_norm__459, + batch_norm__460, + batch_norm__461, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_80, + parameter_143, + parameter_142, + parameter_141, + parameter_140, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_140, parameter_141, parameter_142, parameter_143 + + # pd_op.swish: (8x72x88x88xf32) <- (8x72x88x88xf32) + swish_61 = paddle._C_ops.swish(batch_norm__456) + + # pd_op.conv2d: (8x72x88x88xf32) <- (8x336x88x88xf32, 72x336x1x1xf32) + conv2d_81 = paddle._C_ops.conv2d( + concat_8, parameter_139, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_139 + + # pd_op.batch_norm_: (8x72x88x88xf32, 72xf32, 72xf32, 72xf32, 72xf32, -1xui8) <- (8x72x88x88xf32, 72xf32, 72xf32, 72xf32, 72xf32) + ( + batch_norm__462, + batch_norm__463, + batch_norm__464, + batch_norm__465, + batch_norm__466, + batch_norm__467, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_81, + parameter_138, + parameter_137, + parameter_136, + parameter_135, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_135, parameter_136, parameter_137, parameter_138 + + # pd_op.swish: (8x72x88x88xf32) <- (8x72x88x88xf32) + swish_62 = paddle._C_ops.swish(batch_norm__462) + + # pd_op.conv2d: (8x72x88x88xf32) <- (8x72x88x88xf32, 72x72x3x3xf32) + conv2d_82 = paddle._C_ops.conv2d( + swish_62, parameter_134, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_134 + + # pd_op.batch_norm_: (8x72x88x88xf32, 72xf32, 72xf32, 72xf32, 72xf32, -1xui8) <- (8x72x88x88xf32, 72xf32, 72xf32, 72xf32, 72xf32) + ( + batch_norm__468, + batch_norm__469, + batch_norm__470, + batch_norm__471, + batch_norm__472, + batch_norm__473, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_82, + parameter_133, + parameter_132, + parameter_131, + parameter_130, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_130, parameter_131, parameter_132, parameter_133 + + # pd_op.swish: (8x72x88x88xf32) <- (8x72x88x88xf32) + swish_63 = paddle._C_ops.swish(batch_norm__468) + + # pd_op.conv2d: (8x72x88x88xf32) <- (8x72x88x88xf32, 72x72x3x3xf32) + conv2d_83 = paddle._C_ops.conv2d( + swish_63, parameter_129, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_129 + + # pd_op.batch_norm_: (8x72x88x88xf32, 72xf32, 72xf32, 72xf32, 72xf32, -1xui8) <- (8x72x88x88xf32, 72xf32, 72xf32, 72xf32, 72xf32) + ( + batch_norm__474, + batch_norm__475, + batch_norm__476, + batch_norm__477, + batch_norm__478, + batch_norm__479, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_83, + parameter_128, + parameter_127, + parameter_126, + parameter_125, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_125, parameter_126, parameter_127, parameter_128 + + # pd_op.conv2d: (8x72x88x88xf32) <- (8x72x88x88xf32, 72x72x1x1xf32) + conv2d_84 = paddle._C_ops.conv2d( + swish_63, parameter_124, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_124 + + # pd_op.batch_norm_: (8x72x88x88xf32, 72xf32, 72xf32, 72xf32, 72xf32, -1xui8) <- (8x72x88x88xf32, 72xf32, 72xf32, 72xf32, 72xf32) + ( + batch_norm__480, + batch_norm__481, + batch_norm__482, + batch_norm__483, + batch_norm__484, + batch_norm__485, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_84, + parameter_123, + parameter_122, + parameter_121, + parameter_120, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_120, parameter_121, parameter_122, parameter_123 + + # pd_op.add: (8x72x88x88xf32) <- (8x72x88x88xf32, 8x72x88x88xf32) + add_32 = paddle._C_ops.add(batch_norm__474, batch_norm__480) + + # pd_op.swish: (8x72x88x88xf32) <- (8x72x88x88xf32) + swish_64 = paddle._C_ops.swish(add_32) + + # pd_op.conv2d: (8x72x88x88xf32) <- (8x72x88x88xf32, 72x72x3x3xf32) + conv2d_85 = paddle._C_ops.conv2d( + swish_64, parameter_119, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_119 + + # pd_op.batch_norm_: (8x72x88x88xf32, 72xf32, 72xf32, 72xf32, 72xf32, -1xui8) <- (8x72x88x88xf32, 72xf32, 72xf32, 72xf32, 72xf32) + ( + batch_norm__486, + batch_norm__487, + batch_norm__488, + batch_norm__489, + batch_norm__490, + batch_norm__491, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_85, + parameter_118, + parameter_117, + parameter_116, + parameter_115, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_115, parameter_116, parameter_117, parameter_118 + + # pd_op.swish: (8x72x88x88xf32) <- (8x72x88x88xf32) + swish_65 = paddle._C_ops.swish(batch_norm__486) + + # pd_op.conv2d: (8x72x88x88xf32) <- (8x72x88x88xf32, 72x72x3x3xf32) + conv2d_86 = paddle._C_ops.conv2d( + swish_65, parameter_114, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_114 + + # pd_op.batch_norm_: (8x72x88x88xf32, 72xf32, 72xf32, 72xf32, 72xf32, -1xui8) <- (8x72x88x88xf32, 72xf32, 72xf32, 72xf32, 72xf32) + ( + batch_norm__492, + batch_norm__493, + batch_norm__494, + batch_norm__495, + batch_norm__496, + batch_norm__497, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_86, + parameter_113, + parameter_112, + parameter_111, + parameter_110, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_110, parameter_111, parameter_112, parameter_113 + + # pd_op.conv2d: (8x72x88x88xf32) <- (8x72x88x88xf32, 72x72x1x1xf32) + conv2d_87 = paddle._C_ops.conv2d( + swish_65, parameter_109, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_109 + + # pd_op.batch_norm_: (8x72x88x88xf32, 72xf32, 72xf32, 72xf32, 72xf32, -1xui8) <- (8x72x88x88xf32, 72xf32, 72xf32, 72xf32, 72xf32) + ( + batch_norm__498, + batch_norm__499, + batch_norm__500, + batch_norm__501, + batch_norm__502, + batch_norm__503, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_87, + parameter_108, + parameter_107, + parameter_106, + parameter_105, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_105, parameter_106, parameter_107, parameter_108 + + # pd_op.add: (8x72x88x88xf32) <- (8x72x88x88xf32, 8x72x88x88xf32) + add_33 = paddle._C_ops.add(batch_norm__492, batch_norm__498) + + # pd_op.swish: (8x72x88x88xf32) <- (8x72x88x88xf32) + swish_66 = paddle._C_ops.swish(add_33) + + # builtin.combine: ([8x72x88x88xf32, 8x72x88x88xf32]) <- (8x72x88x88xf32, 8x72x88x88xf32) + combine_9 = [swish_61, swish_66] + + # pd_op.concat: (8x144x88x88xf32) <- ([8x72x88x88xf32, 8x72x88x88xf32], 1xi32) + concat_9 = paddle._C_ops.concat(combine_9, full_0) + del combine_9 + + # pd_op.conv2d: (8x144x88x88xf32) <- (8x144x88x88xf32, 144x144x1x1xf32) + conv2d_88 = paddle._C_ops.conv2d( + concat_9, parameter_104, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_104 + + # pd_op.batch_norm_: (8x144x88x88xf32, 144xf32, 144xf32, 144xf32, 144xf32, -1xui8) <- (8x144x88x88xf32, 144xf32, 144xf32, 144xf32, 144xf32) + ( + batch_norm__504, + batch_norm__505, + batch_norm__506, + batch_norm__507, + batch_norm__508, + batch_norm__509, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_88, + parameter_103, + parameter_102, + parameter_101, + parameter_100, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_100, parameter_101, parameter_102, parameter_103 + + # pd_op.swish: (8x144x88x88xf32) <- (8x144x88x88xf32) + swish_67 = paddle._C_ops.swish(batch_norm__504) + + # pd_op.conv2d: (8x144x44x44xf32) <- (8x144x88x88xf32, 144x144x3x3xf32) + conv2d_89 = paddle._C_ops.conv2d( + swish_67, parameter_99, [2, 2], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_99 + + # pd_op.batch_norm_: (8x144x44x44xf32, 144xf32, 144xf32, 144xf32, 144xf32, -1xui8) <- (8x144x44x44xf32, 144xf32, 144xf32, 144xf32, 144xf32) + ( + batch_norm__510, + batch_norm__511, + batch_norm__512, + batch_norm__513, + batch_norm__514, + batch_norm__515, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_89, + parameter_98, + parameter_97, + parameter_96, + parameter_95, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_95, parameter_96, parameter_97, parameter_98 + + # pd_op.swish: (8x144x44x44xf32) <- (8x144x44x44xf32) + swish_68 = paddle._C_ops.swish(batch_norm__510) + + # builtin.combine: ([8x144x44x44xf32, 8x288x44x44xf32]) <- (8x144x44x44xf32, 8x288x44x44xf32) + combine_10 = [swish_68, swish_59] + + # pd_op.concat: (8x432x44x44xf32) <- ([8x144x44x44xf32, 8x288x44x44xf32], 1xi32) + concat_10 = paddle._C_ops.concat(combine_10, full_0) + del combine_10 + + # pd_op.conv2d: (8x144x44x44xf32) <- (8x432x44x44xf32, 144x432x1x1xf32) + conv2d_90 = paddle._C_ops.conv2d( + concat_10, parameter_94, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_94 + + # pd_op.batch_norm_: (8x144x44x44xf32, 144xf32, 144xf32, 144xf32, 144xf32, -1xui8) <- (8x144x44x44xf32, 144xf32, 144xf32, 144xf32, 144xf32) + ( + batch_norm__516, + batch_norm__517, + batch_norm__518, + batch_norm__519, + batch_norm__520, + batch_norm__521, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_90, + parameter_93, + parameter_92, + parameter_91, + parameter_90, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_90, parameter_91, parameter_92, parameter_93 + + # pd_op.swish: (8x144x44x44xf32) <- (8x144x44x44xf32) + swish_69 = paddle._C_ops.swish(batch_norm__516) + + # pd_op.conv2d: (8x144x44x44xf32) <- (8x432x44x44xf32, 144x432x1x1xf32) + conv2d_91 = paddle._C_ops.conv2d( + concat_10, parameter_89, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_89 + + # pd_op.batch_norm_: (8x144x44x44xf32, 144xf32, 144xf32, 144xf32, 144xf32, -1xui8) <- (8x144x44x44xf32, 144xf32, 144xf32, 144xf32, 144xf32) + ( + batch_norm__522, + batch_norm__523, + batch_norm__524, + batch_norm__525, + batch_norm__526, + batch_norm__527, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_91, + parameter_88, + parameter_87, + parameter_86, + parameter_85, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_85, parameter_86, parameter_87, parameter_88 + + # pd_op.swish: (8x144x44x44xf32) <- (8x144x44x44xf32) + swish_70 = paddle._C_ops.swish(batch_norm__522) + + # pd_op.conv2d: (8x144x44x44xf32) <- (8x144x44x44xf32, 144x144x3x3xf32) + conv2d_92 = paddle._C_ops.conv2d( + swish_70, parameter_84, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_84 + + # pd_op.batch_norm_: (8x144x44x44xf32, 144xf32, 144xf32, 144xf32, 144xf32, -1xui8) <- (8x144x44x44xf32, 144xf32, 144xf32, 144xf32, 144xf32) + ( + batch_norm__528, + batch_norm__529, + batch_norm__530, + batch_norm__531, + batch_norm__532, + batch_norm__533, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_92, + parameter_83, + parameter_82, + parameter_81, + parameter_80, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_80, parameter_81, parameter_82, parameter_83 + + # pd_op.swish: (8x144x44x44xf32) <- (8x144x44x44xf32) + swish_71 = paddle._C_ops.swish(batch_norm__528) + + # pd_op.conv2d: (8x144x44x44xf32) <- (8x144x44x44xf32, 144x144x3x3xf32) + conv2d_93 = paddle._C_ops.conv2d( + swish_71, parameter_79, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_79 + + # pd_op.batch_norm_: (8x144x44x44xf32, 144xf32, 144xf32, 144xf32, 144xf32, -1xui8) <- (8x144x44x44xf32, 144xf32, 144xf32, 144xf32, 144xf32) + ( + batch_norm__534, + batch_norm__535, + batch_norm__536, + batch_norm__537, + batch_norm__538, + batch_norm__539, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_93, + parameter_78, + parameter_77, + parameter_76, + parameter_75, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_75, parameter_76, parameter_77, parameter_78 + + # pd_op.conv2d: (8x144x44x44xf32) <- (8x144x44x44xf32, 144x144x1x1xf32) + conv2d_94 = paddle._C_ops.conv2d( + swish_71, parameter_74, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_74 + + # pd_op.batch_norm_: (8x144x44x44xf32, 144xf32, 144xf32, 144xf32, 144xf32, -1xui8) <- (8x144x44x44xf32, 144xf32, 144xf32, 144xf32, 144xf32) + ( + batch_norm__540, + batch_norm__541, + batch_norm__542, + batch_norm__543, + batch_norm__544, + batch_norm__545, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_94, + parameter_73, + parameter_72, + parameter_71, + parameter_70, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_70, parameter_71, parameter_72, parameter_73 + + # pd_op.add: (8x144x44x44xf32) <- (8x144x44x44xf32, 8x144x44x44xf32) + add_34 = paddle._C_ops.add(batch_norm__534, batch_norm__540) + + # pd_op.swish: (8x144x44x44xf32) <- (8x144x44x44xf32) + swish_72 = paddle._C_ops.swish(add_34) + + # pd_op.conv2d: (8x144x44x44xf32) <- (8x144x44x44xf32, 144x144x3x3xf32) + conv2d_95 = paddle._C_ops.conv2d( + swish_72, parameter_69, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_69 + + # pd_op.batch_norm_: (8x144x44x44xf32, 144xf32, 144xf32, 144xf32, 144xf32, -1xui8) <- (8x144x44x44xf32, 144xf32, 144xf32, 144xf32, 144xf32) + ( + batch_norm__546, + batch_norm__547, + batch_norm__548, + batch_norm__549, + batch_norm__550, + batch_norm__551, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_95, + parameter_68, + parameter_67, + parameter_66, + parameter_65, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_65, parameter_66, parameter_67, parameter_68 + + # pd_op.swish: (8x144x44x44xf32) <- (8x144x44x44xf32) + swish_73 = paddle._C_ops.swish(batch_norm__546) + + # pd_op.conv2d: (8x144x44x44xf32) <- (8x144x44x44xf32, 144x144x3x3xf32) + conv2d_96 = paddle._C_ops.conv2d( + swish_73, parameter_64, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_64 + + # pd_op.batch_norm_: (8x144x44x44xf32, 144xf32, 144xf32, 144xf32, 144xf32, -1xui8) <- (8x144x44x44xf32, 144xf32, 144xf32, 144xf32, 144xf32) + ( + batch_norm__552, + batch_norm__553, + batch_norm__554, + batch_norm__555, + batch_norm__556, + batch_norm__557, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_96, + parameter_63, + parameter_62, + parameter_61, + parameter_60, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_60, parameter_61, parameter_62, parameter_63 + + # pd_op.conv2d: (8x144x44x44xf32) <- (8x144x44x44xf32, 144x144x1x1xf32) + conv2d_97 = paddle._C_ops.conv2d( + swish_73, parameter_59, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_59 + + # pd_op.batch_norm_: (8x144x44x44xf32, 144xf32, 144xf32, 144xf32, 144xf32, -1xui8) <- (8x144x44x44xf32, 144xf32, 144xf32, 144xf32, 144xf32) + ( + batch_norm__558, + batch_norm__559, + batch_norm__560, + batch_norm__561, + batch_norm__562, + batch_norm__563, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_97, + parameter_58, + parameter_57, + parameter_56, + parameter_55, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_55, parameter_56, parameter_57, parameter_58 + + # pd_op.add: (8x144x44x44xf32) <- (8x144x44x44xf32, 8x144x44x44xf32) + add_35 = paddle._C_ops.add(batch_norm__552, batch_norm__558) + + # pd_op.swish: (8x144x44x44xf32) <- (8x144x44x44xf32) + swish_74 = paddle._C_ops.swish(add_35) + + # builtin.combine: ([8x144x44x44xf32, 8x144x44x44xf32]) <- (8x144x44x44xf32, 8x144x44x44xf32) + combine_11 = [swish_69, swish_74] + + # pd_op.concat: (8x288x44x44xf32) <- ([8x144x44x44xf32, 8x144x44x44xf32], 1xi32) + concat_11 = paddle._C_ops.concat(combine_11, full_0) + del combine_11 + + # pd_op.conv2d: (8x288x44x44xf32) <- (8x288x44x44xf32, 288x288x1x1xf32) + conv2d_98 = paddle._C_ops.conv2d( + concat_11, parameter_54, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_54 + + # pd_op.batch_norm_: (8x288x44x44xf32, 288xf32, 288xf32, 288xf32, 288xf32, -1xui8) <- (8x288x44x44xf32, 288xf32, 288xf32, 288xf32, 288xf32) + ( + batch_norm__564, + batch_norm__565, + batch_norm__566, + batch_norm__567, + batch_norm__568, + batch_norm__569, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_98, + parameter_53, + parameter_52, + parameter_51, + parameter_50, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_50, parameter_51, parameter_52, parameter_53 + + # pd_op.swish: (8x288x44x44xf32) <- (8x288x44x44xf32) + swish_75 = paddle._C_ops.swish(batch_norm__564) + + # pd_op.conv2d: (8x288x22x22xf32) <- (8x288x44x44xf32, 288x288x3x3xf32) + conv2d_99 = paddle._C_ops.conv2d( + swish_75, parameter_49, [2, 2], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_49 + + # pd_op.batch_norm_: (8x288x22x22xf32, 288xf32, 288xf32, 288xf32, 288xf32, -1xui8) <- (8x288x22x22xf32, 288xf32, 288xf32, 288xf32, 288xf32) + ( + batch_norm__570, + batch_norm__571, + batch_norm__572, + batch_norm__573, + batch_norm__574, + batch_norm__575, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_99, + parameter_48, + parameter_47, + parameter_46, + parameter_45, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_45, parameter_46, parameter_47, parameter_48 + + # pd_op.swish: (8x288x22x22xf32) <- (8x288x22x22xf32) + swish_76 = paddle._C_ops.swish(batch_norm__570) + + # builtin.combine: ([8x288x22x22xf32, 8x576x22x22xf32]) <- (8x288x22x22xf32, 8x576x22x22xf32) + combine_12 = [swish_76, swish_51] + + # pd_op.concat: (8x864x22x22xf32) <- ([8x288x22x22xf32, 8x576x22x22xf32], 1xi32) + concat_12 = paddle._C_ops.concat(combine_12, full_0) + del combine_12 + + # pd_op.conv2d: (8x288x22x22xf32) <- (8x864x22x22xf32, 288x864x1x1xf32) + conv2d_100 = paddle._C_ops.conv2d( + concat_12, parameter_44, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_44 + + # pd_op.batch_norm_: (8x288x22x22xf32, 288xf32, 288xf32, 288xf32, 288xf32, -1xui8) <- (8x288x22x22xf32, 288xf32, 288xf32, 288xf32, 288xf32) + ( + batch_norm__576, + batch_norm__577, + batch_norm__578, + batch_norm__579, + batch_norm__580, + batch_norm__581, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_100, + parameter_43, + parameter_42, + parameter_41, + parameter_40, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_40, parameter_41, parameter_42, parameter_43 + + # pd_op.swish: (8x288x22x22xf32) <- (8x288x22x22xf32) + swish_77 = paddle._C_ops.swish(batch_norm__576) + + # pd_op.conv2d: (8x288x22x22xf32) <- (8x864x22x22xf32, 288x864x1x1xf32) + conv2d_101 = paddle._C_ops.conv2d( + concat_12, parameter_39, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_39 + + # pd_op.batch_norm_: (8x288x22x22xf32, 288xf32, 288xf32, 288xf32, 288xf32, -1xui8) <- (8x288x22x22xf32, 288xf32, 288xf32, 288xf32, 288xf32) + ( + batch_norm__582, + batch_norm__583, + batch_norm__584, + batch_norm__585, + batch_norm__586, + batch_norm__587, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_101, + parameter_38, + parameter_37, + parameter_36, + parameter_35, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_35, parameter_36, parameter_37, parameter_38 + + # pd_op.swish: (8x288x22x22xf32) <- (8x288x22x22xf32) + swish_78 = paddle._C_ops.swish(batch_norm__582) + + # pd_op.conv2d: (8x288x22x22xf32) <- (8x288x22x22xf32, 288x288x3x3xf32) + conv2d_102 = paddle._C_ops.conv2d( + swish_78, parameter_34, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_34 + + # pd_op.batch_norm_: (8x288x22x22xf32, 288xf32, 288xf32, 288xf32, 288xf32, -1xui8) <- (8x288x22x22xf32, 288xf32, 288xf32, 288xf32, 288xf32) + ( + batch_norm__588, + batch_norm__589, + batch_norm__590, + batch_norm__591, + batch_norm__592, + batch_norm__593, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_102, + parameter_33, + parameter_32, + parameter_31, + parameter_30, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_30, parameter_31, parameter_32, parameter_33 + + # pd_op.swish: (8x288x22x22xf32) <- (8x288x22x22xf32) + swish_79 = paddle._C_ops.swish(batch_norm__588) + + # pd_op.conv2d: (8x288x22x22xf32) <- (8x288x22x22xf32, 288x288x3x3xf32) + conv2d_103 = paddle._C_ops.conv2d( + swish_79, parameter_29, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_29 + + # pd_op.batch_norm_: (8x288x22x22xf32, 288xf32, 288xf32, 288xf32, 288xf32, -1xui8) <- (8x288x22x22xf32, 288xf32, 288xf32, 288xf32, 288xf32) + ( + batch_norm__594, + batch_norm__595, + batch_norm__596, + batch_norm__597, + batch_norm__598, + batch_norm__599, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_103, + parameter_28, + parameter_27, + parameter_26, + parameter_25, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_25, parameter_26, parameter_27, parameter_28 + + # pd_op.conv2d: (8x288x22x22xf32) <- (8x288x22x22xf32, 288x288x1x1xf32) + conv2d_104 = paddle._C_ops.conv2d( + swish_79, parameter_24, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_24 + + # pd_op.batch_norm_: (8x288x22x22xf32, 288xf32, 288xf32, 288xf32, 288xf32, -1xui8) <- (8x288x22x22xf32, 288xf32, 288xf32, 288xf32, 288xf32) + ( + batch_norm__600, + batch_norm__601, + batch_norm__602, + batch_norm__603, + batch_norm__604, + batch_norm__605, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_104, + parameter_23, + parameter_22, + parameter_21, + parameter_20, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_20, parameter_21, parameter_22, parameter_23 + + # pd_op.add: (8x288x22x22xf32) <- (8x288x22x22xf32, 8x288x22x22xf32) + add_36 = paddle._C_ops.add(batch_norm__594, batch_norm__600) + + # pd_op.swish: (8x288x22x22xf32) <- (8x288x22x22xf32) + swish_80 = paddle._C_ops.swish(add_36) + + # pd_op.conv2d: (8x288x22x22xf32) <- (8x288x22x22xf32, 288x288x3x3xf32) + conv2d_105 = paddle._C_ops.conv2d( + swish_80, parameter_19, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_19 + + # pd_op.batch_norm_: (8x288x22x22xf32, 288xf32, 288xf32, 288xf32, 288xf32, -1xui8) <- (8x288x22x22xf32, 288xf32, 288xf32, 288xf32, 288xf32) + ( + batch_norm__606, + batch_norm__607, + batch_norm__608, + batch_norm__609, + batch_norm__610, + batch_norm__611, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_105, + parameter_18, + parameter_17, + parameter_16, + parameter_15, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_15, parameter_16, parameter_17, parameter_18 + + # pd_op.swish: (8x288x22x22xf32) <- (8x288x22x22xf32) + swish_81 = paddle._C_ops.swish(batch_norm__606) + + # pd_op.conv2d: (8x288x22x22xf32) <- (8x288x22x22xf32, 288x288x3x3xf32) + conv2d_106 = paddle._C_ops.conv2d( + swish_81, parameter_14, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_14 + + # pd_op.batch_norm_: (8x288x22x22xf32, 288xf32, 288xf32, 288xf32, 288xf32, -1xui8) <- (8x288x22x22xf32, 288xf32, 288xf32, 288xf32, 288xf32) + ( + batch_norm__612, + batch_norm__613, + batch_norm__614, + batch_norm__615, + batch_norm__616, + batch_norm__617, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_106, + parameter_13, + parameter_12, + parameter_11, + parameter_10, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_10, parameter_11, parameter_12, parameter_13 + + # pd_op.conv2d: (8x288x22x22xf32) <- (8x288x22x22xf32, 288x288x1x1xf32) + conv2d_107 = paddle._C_ops.conv2d( + swish_81, parameter_9, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_9 + + # pd_op.batch_norm_: (8x288x22x22xf32, 288xf32, 288xf32, 288xf32, 288xf32, -1xui8) <- (8x288x22x22xf32, 288xf32, 288xf32, 288xf32, 288xf32) + ( + batch_norm__618, + batch_norm__619, + batch_norm__620, + batch_norm__621, + batch_norm__622, + batch_norm__623, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_107, + parameter_8, + parameter_7, + parameter_6, + parameter_5, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_5, parameter_6, parameter_7, parameter_8 + + # pd_op.add: (8x288x22x22xf32) <- (8x288x22x22xf32, 8x288x22x22xf32) + add_37 = paddle._C_ops.add(batch_norm__612, batch_norm__618) + + # pd_op.swish: (8x288x22x22xf32) <- (8x288x22x22xf32) + swish_82 = paddle._C_ops.swish(add_37) + + # builtin.combine: ([8x288x22x22xf32, 8x288x22x22xf32]) <- (8x288x22x22xf32, 8x288x22x22xf32) + combine_13 = [swish_77, swish_82] + + # pd_op.concat: (8x576x22x22xf32) <- ([8x288x22x22xf32, 8x288x22x22xf32], 1xi32) + concat_13 = paddle._C_ops.concat(combine_13, full_0) + del combine_13 + + # pd_op.conv2d: (8x576x22x22xf32) <- (8x576x22x22xf32, 576x576x1x1xf32) + conv2d_108 = paddle._C_ops.conv2d( + concat_13, parameter_4, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_4 + + # pd_op.batch_norm_: (8x576x22x22xf32, 576xf32, 576xf32, 576xf32, 576xf32, -1xui8) <- (8x576x22x22xf32, 576xf32, 576xf32, 576xf32, 576xf32) + ( + batch_norm__624, + batch_norm__625, + batch_norm__626, + batch_norm__627, + batch_norm__628, + batch_norm__629, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_108, + parameter_3, + parameter_2, + parameter_1, + parameter_0, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_0, parameter_1, parameter_2, parameter_3 + + # pd_op.swish: (8x576x22x22xf32) <- (8x576x22x22xf32) + swish_0 = paddle._C_ops.swish(batch_norm__624) + del ( + add_0, + add_1, + add_10, + add_11, + add_12, + add_14, + add_15, + add_16, + add_17, + add_18, + add_19, + add_2, + add_20, + add_21, + add_23, + add_24, + add_25, + add_26, + add_28, + add_29, + add_3, + add_30, + add_31, + add_32, + add_33, + add_34, + add_35, + add_36, + add_37, + add_5, + add_6, + add_7, + add_8, + add_9, + assign_0, + assign_1, + assign_10, + assign_11, + assign_12, + assign_13, + assign_14, + assign_15, + assign_2, + assign_3, + assign_4, + assign_5, + assign_6, + assign_7, + assign_8, + assign_9, + batch_norm__0, + batch_norm__1, + batch_norm__10, + batch_norm__100, + batch_norm__101, + batch_norm__102, + batch_norm__103, + batch_norm__104, + batch_norm__105, + batch_norm__106, + batch_norm__107, + batch_norm__108, + batch_norm__109, + batch_norm__11, + batch_norm__110, + batch_norm__111, + batch_norm__112, + batch_norm__113, + batch_norm__114, + batch_norm__115, + batch_norm__116, + batch_norm__117, + batch_norm__118, + batch_norm__119, + batch_norm__12, + batch_norm__120, + batch_norm__121, + batch_norm__122, + batch_norm__123, + batch_norm__124, + batch_norm__125, + batch_norm__126, + batch_norm__127, + batch_norm__128, + batch_norm__129, + batch_norm__13, + batch_norm__130, + batch_norm__131, + batch_norm__132, + batch_norm__133, + batch_norm__134, + batch_norm__135, + batch_norm__136, + batch_norm__137, + batch_norm__138, + batch_norm__139, + batch_norm__14, + batch_norm__140, + batch_norm__141, + batch_norm__142, + batch_norm__143, + batch_norm__144, + batch_norm__145, + batch_norm__146, + batch_norm__147, + batch_norm__148, + batch_norm__149, + batch_norm__15, + batch_norm__150, + batch_norm__151, + batch_norm__152, + batch_norm__153, + batch_norm__154, + batch_norm__155, + batch_norm__156, + batch_norm__157, + batch_norm__158, + batch_norm__159, + batch_norm__16, + batch_norm__160, + batch_norm__161, + batch_norm__162, + batch_norm__163, + batch_norm__164, + batch_norm__165, + batch_norm__166, + batch_norm__167, + batch_norm__168, + batch_norm__169, + batch_norm__17, + batch_norm__170, + batch_norm__171, + batch_norm__172, + batch_norm__173, + batch_norm__174, + batch_norm__175, + batch_norm__176, + batch_norm__177, + batch_norm__178, + batch_norm__179, + batch_norm__18, + batch_norm__180, + batch_norm__181, + batch_norm__182, + batch_norm__183, + batch_norm__184, + batch_norm__185, + batch_norm__186, + batch_norm__187, + batch_norm__188, + batch_norm__189, + batch_norm__19, + batch_norm__190, + batch_norm__191, + batch_norm__192, + batch_norm__193, + batch_norm__194, + batch_norm__195, + batch_norm__196, + batch_norm__197, + batch_norm__198, + batch_norm__199, + batch_norm__2, + batch_norm__20, + batch_norm__200, + batch_norm__201, + batch_norm__202, + batch_norm__203, + batch_norm__204, + batch_norm__205, + batch_norm__206, + batch_norm__207, + batch_norm__208, + batch_norm__209, + batch_norm__21, + batch_norm__210, + batch_norm__211, + batch_norm__212, + batch_norm__213, + batch_norm__214, + batch_norm__215, + batch_norm__216, + batch_norm__217, + batch_norm__218, + batch_norm__219, + batch_norm__22, + batch_norm__220, + batch_norm__221, + batch_norm__222, + batch_norm__223, + batch_norm__224, + batch_norm__225, + batch_norm__226, + batch_norm__227, + batch_norm__228, + batch_norm__229, + batch_norm__23, + batch_norm__230, + batch_norm__231, + batch_norm__232, + batch_norm__233, + batch_norm__234, + batch_norm__235, + batch_norm__236, + batch_norm__237, + batch_norm__238, + batch_norm__239, + batch_norm__24, + batch_norm__240, + batch_norm__241, + batch_norm__242, + batch_norm__243, + batch_norm__244, + batch_norm__245, + batch_norm__246, + batch_norm__247, + batch_norm__248, + batch_norm__249, + batch_norm__25, + batch_norm__250, + batch_norm__251, + batch_norm__252, + batch_norm__253, + batch_norm__254, + batch_norm__255, + batch_norm__256, + batch_norm__257, + batch_norm__258, + batch_norm__259, + batch_norm__26, + batch_norm__260, + batch_norm__261, + batch_norm__262, + batch_norm__263, + batch_norm__264, + batch_norm__265, + batch_norm__266, + batch_norm__267, + batch_norm__268, + batch_norm__269, + batch_norm__27, + batch_norm__270, + batch_norm__271, + batch_norm__272, + batch_norm__273, + batch_norm__274, + batch_norm__275, + batch_norm__276, + batch_norm__277, + batch_norm__278, + batch_norm__279, + batch_norm__28, + batch_norm__280, + batch_norm__281, + batch_norm__282, + batch_norm__283, + batch_norm__284, + batch_norm__285, + batch_norm__286, + batch_norm__287, + batch_norm__288, + batch_norm__289, + batch_norm__29, + batch_norm__290, + batch_norm__291, + batch_norm__292, + batch_norm__293, + batch_norm__294, + batch_norm__295, + batch_norm__296, + batch_norm__297, + batch_norm__298, + batch_norm__299, + batch_norm__3, + batch_norm__30, + batch_norm__300, + batch_norm__301, + batch_norm__302, + batch_norm__303, + batch_norm__304, + batch_norm__305, + batch_norm__306, + batch_norm__307, + batch_norm__308, + batch_norm__309, + batch_norm__31, + batch_norm__310, + batch_norm__311, + batch_norm__312, + batch_norm__313, + batch_norm__314, + batch_norm__315, + batch_norm__316, + batch_norm__317, + batch_norm__318, + batch_norm__319, + batch_norm__32, + batch_norm__320, + batch_norm__321, + batch_norm__322, + batch_norm__323, + batch_norm__324, + batch_norm__325, + batch_norm__326, + batch_norm__327, + batch_norm__328, + batch_norm__329, + batch_norm__33, + batch_norm__330, + batch_norm__331, + batch_norm__332, + batch_norm__333, + batch_norm__334, + batch_norm__335, + batch_norm__336, + batch_norm__337, + batch_norm__338, + batch_norm__339, + batch_norm__34, + batch_norm__340, + batch_norm__341, + batch_norm__342, + batch_norm__343, + batch_norm__344, + batch_norm__345, + batch_norm__346, + batch_norm__347, + batch_norm__348, + batch_norm__349, + batch_norm__35, + batch_norm__350, + batch_norm__351, + batch_norm__352, + batch_norm__353, + batch_norm__354, + batch_norm__355, + batch_norm__356, + batch_norm__357, + batch_norm__358, + batch_norm__359, + batch_norm__36, + batch_norm__360, + batch_norm__361, + batch_norm__362, + batch_norm__363, + batch_norm__364, + batch_norm__365, + batch_norm__366, + batch_norm__367, + batch_norm__368, + batch_norm__369, + batch_norm__37, + batch_norm__370, + batch_norm__371, + batch_norm__372, + batch_norm__373, + batch_norm__374, + batch_norm__375, + batch_norm__376, + batch_norm__377, + batch_norm__378, + batch_norm__379, + batch_norm__38, + batch_norm__380, + batch_norm__381, + batch_norm__382, + batch_norm__383, + batch_norm__384, + batch_norm__385, + batch_norm__386, + batch_norm__387, + batch_norm__388, + batch_norm__389, + batch_norm__39, + batch_norm__390, + batch_norm__391, + batch_norm__392, + batch_norm__393, + batch_norm__394, + batch_norm__395, + batch_norm__396, + batch_norm__397, + batch_norm__398, + batch_norm__399, + batch_norm__4, + batch_norm__40, + batch_norm__400, + batch_norm__401, + batch_norm__402, + batch_norm__403, + batch_norm__404, + batch_norm__405, + batch_norm__406, + batch_norm__407, + batch_norm__408, + batch_norm__409, + batch_norm__41, + batch_norm__410, + batch_norm__411, + batch_norm__412, + batch_norm__413, + batch_norm__414, + batch_norm__415, + batch_norm__416, + batch_norm__417, + batch_norm__418, + batch_norm__419, + batch_norm__42, + batch_norm__420, + batch_norm__421, + batch_norm__422, + batch_norm__423, + batch_norm__424, + batch_norm__425, + batch_norm__426, + batch_norm__427, + batch_norm__428, + batch_norm__429, + batch_norm__43, + batch_norm__430, + batch_norm__431, + batch_norm__432, + batch_norm__433, + batch_norm__434, + batch_norm__435, + batch_norm__436, + batch_norm__437, + batch_norm__438, + batch_norm__439, + batch_norm__44, + batch_norm__440, + batch_norm__441, + batch_norm__442, + batch_norm__443, + batch_norm__444, + batch_norm__445, + batch_norm__446, + batch_norm__447, + batch_norm__448, + batch_norm__449, + batch_norm__45, + batch_norm__450, + batch_norm__451, + batch_norm__452, + batch_norm__453, + batch_norm__454, + batch_norm__455, + batch_norm__456, + batch_norm__457, + batch_norm__458, + batch_norm__459, + batch_norm__46, + batch_norm__460, + batch_norm__461, + batch_norm__462, + batch_norm__463, + batch_norm__464, + batch_norm__465, + batch_norm__466, + batch_norm__467, + batch_norm__468, + batch_norm__469, + batch_norm__47, + batch_norm__470, + batch_norm__471, + batch_norm__472, + batch_norm__473, + batch_norm__474, + batch_norm__475, + batch_norm__476, + batch_norm__477, + batch_norm__478, + batch_norm__479, + batch_norm__48, + batch_norm__480, + batch_norm__481, + batch_norm__482, + batch_norm__483, + batch_norm__484, + batch_norm__485, + batch_norm__486, + batch_norm__487, + batch_norm__488, + batch_norm__489, + batch_norm__49, + batch_norm__490, + batch_norm__491, + batch_norm__492, + batch_norm__493, + batch_norm__494, + batch_norm__495, + batch_norm__496, + batch_norm__497, + batch_norm__498, + batch_norm__499, + batch_norm__5, + batch_norm__50, + batch_norm__500, + batch_norm__501, + batch_norm__502, + batch_norm__503, + batch_norm__504, + batch_norm__505, + batch_norm__506, + batch_norm__507, + batch_norm__508, + batch_norm__509, + batch_norm__51, + batch_norm__510, + batch_norm__511, + batch_norm__512, + batch_norm__513, + batch_norm__514, + batch_norm__515, + batch_norm__516, + batch_norm__517, + batch_norm__518, + batch_norm__519, + batch_norm__52, + batch_norm__520, + batch_norm__521, + batch_norm__522, + batch_norm__523, + batch_norm__524, + batch_norm__525, + batch_norm__526, + batch_norm__527, + batch_norm__528, + batch_norm__529, + batch_norm__53, + batch_norm__530, + batch_norm__531, + batch_norm__532, + batch_norm__533, + batch_norm__534, + batch_norm__535, + batch_norm__536, + batch_norm__537, + batch_norm__538, + batch_norm__539, + batch_norm__54, + batch_norm__540, + batch_norm__541, + batch_norm__542, + batch_norm__543, + batch_norm__544, + batch_norm__545, + batch_norm__546, + batch_norm__547, + batch_norm__548, + batch_norm__549, + batch_norm__55, + batch_norm__550, + batch_norm__551, + batch_norm__552, + batch_norm__553, + batch_norm__554, + batch_norm__555, + batch_norm__556, + batch_norm__557, + batch_norm__558, + batch_norm__559, + batch_norm__56, + batch_norm__560, + batch_norm__561, + batch_norm__562, + batch_norm__563, + batch_norm__564, + batch_norm__565, + batch_norm__566, + batch_norm__567, + batch_norm__568, + batch_norm__569, + batch_norm__57, + batch_norm__570, + batch_norm__571, + batch_norm__572, + batch_norm__573, + batch_norm__574, + batch_norm__575, + batch_norm__576, + batch_norm__577, + batch_norm__578, + batch_norm__579, + batch_norm__58, + batch_norm__580, + batch_norm__581, + batch_norm__582, + batch_norm__583, + batch_norm__584, + batch_norm__585, + batch_norm__586, + batch_norm__587, + batch_norm__588, + batch_norm__589, + batch_norm__59, + batch_norm__590, + batch_norm__591, + batch_norm__592, + batch_norm__593, + batch_norm__594, + batch_norm__595, + batch_norm__596, + batch_norm__597, + batch_norm__598, + batch_norm__599, + batch_norm__6, + batch_norm__60, + batch_norm__600, + batch_norm__601, + batch_norm__602, + batch_norm__603, + batch_norm__604, + batch_norm__605, + batch_norm__606, + batch_norm__607, + batch_norm__608, + batch_norm__609, + batch_norm__61, + batch_norm__610, + batch_norm__611, + batch_norm__612, + batch_norm__613, + batch_norm__614, + batch_norm__615, + batch_norm__616, + batch_norm__617, + batch_norm__618, + batch_norm__619, + batch_norm__62, + batch_norm__620, + batch_norm__621, + batch_norm__622, + batch_norm__623, + batch_norm__624, + batch_norm__625, + batch_norm__626, + batch_norm__627, + batch_norm__628, + batch_norm__629, + batch_norm__63, + batch_norm__64, + batch_norm__65, + batch_norm__66, + batch_norm__67, + batch_norm__68, + batch_norm__69, + batch_norm__7, + batch_norm__70, + batch_norm__71, + batch_norm__72, + batch_norm__73, + batch_norm__74, + batch_norm__75, + batch_norm__76, + batch_norm__77, + batch_norm__78, + batch_norm__79, + batch_norm__8, + batch_norm__80, + batch_norm__81, + batch_norm__82, + batch_norm__83, + batch_norm__84, + batch_norm__85, + batch_norm__86, + batch_norm__87, + batch_norm__88, + batch_norm__89, + batch_norm__9, + batch_norm__90, + batch_norm__91, + batch_norm__92, + batch_norm__93, + batch_norm__94, + batch_norm__95, + batch_norm__96, + batch_norm__97, + batch_norm__98, + batch_norm__99, + concat_0, + concat_1, + concat_10, + concat_11, + concat_12, + concat_13, + concat_2, + concat_3, + concat_4, + concat_5, + concat_6, + concat_7, + concat_8, + concat_9, + conv2d_0, + conv2d_1, + conv2d_10, + conv2d_100, + conv2d_101, + conv2d_102, + conv2d_103, + conv2d_104, + conv2d_105, + conv2d_106, + conv2d_107, + conv2d_108, + conv2d_11, + conv2d_12, + conv2d_13, + conv2d_14, + conv2d_15, + conv2d_16, + conv2d_17, + conv2d_18, + conv2d_19, + conv2d_2, + conv2d_20, + conv2d_21, + conv2d_22, + conv2d_23, + conv2d_24, + conv2d_25, + conv2d_26, + conv2d_27, + conv2d_28, + conv2d_29, + conv2d_3, + conv2d_30, + conv2d_31, + conv2d_32, + conv2d_33, + conv2d_34, + conv2d_35, + conv2d_36, + conv2d_37, + conv2d_38, + conv2d_39, + conv2d_4, + conv2d_40, + conv2d_41, + conv2d_42, + conv2d_43, + conv2d_44, + conv2d_45, + conv2d_46, + conv2d_47, + conv2d_48, + conv2d_49, + conv2d_5, + conv2d_50, + conv2d_51, + conv2d_52, + conv2d_53, + conv2d_54, + conv2d_55, + conv2d_56, + conv2d_57, + conv2d_58, + conv2d_59, + conv2d_6, + conv2d_60, + conv2d_61, + conv2d_62, + conv2d_63, + conv2d_64, + conv2d_65, + conv2d_66, + conv2d_67, + conv2d_68, + conv2d_69, + conv2d_7, + conv2d_70, + conv2d_71, + conv2d_72, + conv2d_73, + conv2d_74, + conv2d_75, + conv2d_76, + conv2d_77, + conv2d_78, + conv2d_79, + conv2d_8, + conv2d_80, + conv2d_81, + conv2d_82, + conv2d_83, + conv2d_84, + conv2d_85, + conv2d_86, + conv2d_87, + conv2d_88, + conv2d_89, + conv2d_9, + conv2d_90, + conv2d_91, + conv2d_92, + conv2d_93, + conv2d_94, + conv2d_95, + conv2d_96, + conv2d_97, + conv2d_98, + conv2d_99, + full_0, + full_int_array_0, + full_int_array_2, + full_int_array_3, + full_int_array_4, + hardsigmoid_0, + hardsigmoid_1, + hardsigmoid_2, + hardsigmoid_3, + mean_0, + mean_1, + mean_2, + mean_3, + multiply_0, + multiply_1, + multiply_10, + multiply_11, + multiply_12, + multiply_13, + multiply_14, + multiply_15, + multiply_2, + multiply_3, + multiply_4, + multiply_5, + multiply_6, + multiply_7, + multiply_8, + multiply_9, + nearest_interp_0, + nearest_interp_1, + pool2d_0, + pool2d_1, + pool2d_2, + reshape_0, + reshape_1, + reshape_2, + reshape_3, + swish_1, + swish_10, + swish_11, + swish_12, + swish_13, + swish_14, + swish_15, + swish_16, + swish_17, + swish_18, + swish_19, + swish_2, + swish_20, + swish_21, + swish_22, + swish_23, + swish_24, + swish_25, + swish_26, + swish_27, + swish_28, + swish_29, + swish_3, + swish_30, + swish_31, + swish_32, + swish_33, + swish_34, + swish_35, + swish_36, + swish_37, + swish_38, + swish_39, + swish_4, + swish_40, + swish_41, + swish_42, + swish_43, + swish_44, + swish_45, + swish_46, + swish_47, + swish_48, + swish_49, + swish_5, + swish_50, + swish_51, + swish_52, + swish_53, + swish_54, + swish_55, + swish_56, + swish_57, + swish_58, + swish_59, + swish_6, + swish_60, + swish_61, + swish_62, + swish_63, + swish_64, + swish_65, + swish_66, + swish_67, + swish_68, + swish_69, + swish_7, + swish_70, + swish_71, + swish_72, + swish_73, + swish_74, + swish_75, + swish_76, + swish_77, + swish_78, + swish_79, + swish_8, + swish_80, + swish_81, + swish_82, + swish_9, + ) + + return swish_0 diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus-M/subgraph_1/weight_meta.py b/paddle_samples/PaddleX/PP-YOLOE_plus-M/subgraph_1/weight_meta.py new file mode 100644 index 000000000..596382b8f --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE_plus-M/subgraph_1/weight_meta.py @@ -0,0 +1,5773 @@ +class Program_weight_tensor_parameter_0: + name = "parameter_0" + shape = [576] + dtype = "float32" + min_val = float("-0.362288") + max_val = float("0.449358") + mean = float("0.12914") + std = float("0.122983") + data = None + + +class Program_weight_tensor_parameter_1: + name = "parameter_1" + shape = [576] + dtype = "float32" + min_val = float("0.959833") + max_val = float("1.62381") + mean = float("1.10358") + std = float("0.059288") + data = None + + +class Program_weight_tensor_parameter_2: + name = "parameter_2" + shape = [576] + dtype = "float32" + min_val = float("0.00129316") + max_val = float("0.0483277") + mean = float("0.00458454") + std = float("0.00370082") + data = None + + +class Program_weight_tensor_parameter_3: + name = "parameter_3" + shape = [576] + dtype = "float32" + min_val = float("-0.174066") + max_val = float("0.102802") + mean = float("-0.0213059") + std = float("0.0225273") + data = None + + +class Program_weight_tensor_parameter_4: + name = "parameter_4" + shape = [576, 576, 1, 1] + dtype = "float32" + min_val = float("-0.0610506") + max_val = float("0.0399353") + mean = float("-0.000169931") + std = float("0.00294844") + data = None + + +class Program_weight_tensor_parameter_5: + name = "parameter_5" + shape = [288] + dtype = "float32" + min_val = float("-0.277053") + max_val = float("0.0487064") + mean = float("-0.0554469") + std = float("0.0590935") + data = None + + +class Program_weight_tensor_parameter_6: + name = "parameter_6" + shape = [288] + dtype = "float32" + min_val = float("0.91312") + max_val = float("1.07154") + mean = float("0.967624") + std = float("0.0227665") + data = None + + +class Program_weight_tensor_parameter_7: + name = "parameter_7" + shape = [288] + dtype = "float32" + min_val = float("0.000956441") + max_val = float("0.0136621") + mean = float("0.00301631") + std = float("0.00163072") + data = None + + +class Program_weight_tensor_parameter_8: + name = "parameter_8" + shape = [288] + dtype = "float32" + min_val = float("-0.0429144") + max_val = float("0.0539166") + mean = float("0.00715211") + std = float("0.0172047") + data = None + + +class Program_weight_tensor_parameter_9: + name = "parameter_9" + shape = [288, 288, 1, 1] + dtype = "float32" + min_val = float("-0.0343767") + max_val = float("0.024518") + mean = float("8.67399e-05") + std = float("0.00218595") + data = None + + +class Program_weight_tensor_parameter_10: + name = "parameter_10" + shape = [288] + dtype = "float32" + min_val = float("-0.277053") + max_val = float("0.0487064") + mean = float("-0.0554469") + std = float("0.0590935") + data = None + + +class Program_weight_tensor_parameter_11: + name = "parameter_11" + shape = [288] + dtype = "float32" + min_val = float("0.978689") + max_val = float("1.21401") + mean = float("1.04689") + std = float("0.0358429") + data = None + + +class Program_weight_tensor_parameter_12: + name = "parameter_12" + shape = [288] + dtype = "float32" + min_val = float("0.0017241") + max_val = float("0.0266633") + mean = float("0.00469037") + std = float("0.00218125") + data = None + + +class Program_weight_tensor_parameter_13: + name = "parameter_13" + shape = [288] + dtype = "float32" + min_val = float("-0.12266") + max_val = float("0.077494") + mean = float("-0.0287714") + std = float("0.0218897") + data = None + + +class Program_weight_tensor_parameter_14: + name = "parameter_14" + shape = [288, 288, 3, 3] + dtype = "float32" + min_val = float("-0.0376443") + max_val = float("0.0460261") + mean = float("-8.55224e-05") + std = float("0.00162721") + data = None + + +class Program_weight_tensor_parameter_15: + name = "parameter_15" + shape = [288] + dtype = "float32" + min_val = float("-0.38495") + max_val = float("0.0651747") + mean = float("-0.105368") + std = float("0.0694781") + data = None + + +class Program_weight_tensor_parameter_16: + name = "parameter_16" + shape = [288] + dtype = "float32" + min_val = float("0.898808") + max_val = float("1.31774") + mean = float("1.03947") + std = float("0.055923") + data = None + + +class Program_weight_tensor_parameter_17: + name = "parameter_17" + shape = [288] + dtype = "float32" + min_val = float("0.00432333") + max_val = float("0.0389504") + mean = float("0.0106644") + std = float("0.00484943") + data = None + + +class Program_weight_tensor_parameter_18: + name = "parameter_18" + shape = [288] + dtype = "float32" + min_val = float("-0.159985") + max_val = float("0.0928115") + mean = float("-0.00964363") + std = float("0.0254645") + data = None + + +class Program_weight_tensor_parameter_19: + name = "parameter_19" + shape = [288, 288, 3, 3] + dtype = "float32" + min_val = float("-0.0363236") + max_val = float("0.0522597") + mean = float("-8.2268e-05") + std = float("0.00184542") + data = None + + +class Program_weight_tensor_parameter_20: + name = "parameter_20" + shape = [288] + dtype = "float32" + min_val = float("-0.298694") + max_val = float("0.0134413") + mean = float("-0.109273") + std = float("0.0595775") + data = None + + +class Program_weight_tensor_parameter_21: + name = "parameter_21" + shape = [288] + dtype = "float32" + min_val = float("0.888376") + max_val = float("1.09061") + mean = float("0.966059") + std = float("0.0256589") + data = None + + +class Program_weight_tensor_parameter_22: + name = "parameter_22" + shape = [288] + dtype = "float32" + min_val = float("0.000965433") + max_val = float("0.0090447") + mean = float("0.00459926") + std = float("0.0015487") + data = None + + +class Program_weight_tensor_parameter_23: + name = "parameter_23" + shape = [288] + dtype = "float32" + min_val = float("-0.0509661") + max_val = float("0.0488733") + mean = float("0.00833798") + std = float("0.0143927") + data = None + + +class Program_weight_tensor_parameter_24: + name = "parameter_24" + shape = [288, 288, 1, 1] + dtype = "float32" + min_val = float("-0.0321773") + max_val = float("0.0294108") + mean = float("1.22889e-05") + std = float("0.00228888") + data = None + + +class Program_weight_tensor_parameter_25: + name = "parameter_25" + shape = [288] + dtype = "float32" + min_val = float("-0.298694") + max_val = float("0.0134413") + mean = float("-0.109273") + std = float("0.0595775") + data = None + + +class Program_weight_tensor_parameter_26: + name = "parameter_26" + shape = [288] + dtype = "float32" + min_val = float("0.960586") + max_val = float("1.1997") + mean = float("1.0432") + std = float("0.0390899") + data = None + + +class Program_weight_tensor_parameter_27: + name = "parameter_27" + shape = [288] + dtype = "float32" + min_val = float("0.00282242") + max_val = float("0.0237102") + mean = float("0.00738641") + std = float("0.00285128") + data = None + + +class Program_weight_tensor_parameter_28: + name = "parameter_28" + shape = [288] + dtype = "float32" + min_val = float("-0.138053") + max_val = float("0.0382676") + mean = float("-0.0345432") + std = float("0.0217736") + data = None + + +class Program_weight_tensor_parameter_29: + name = "parameter_29" + shape = [288, 288, 3, 3] + dtype = "float32" + min_val = float("-0.0383417") + max_val = float("0.0468039") + mean = float("-0.000103682") + std = float("0.00178619") + data = None + + +class Program_weight_tensor_parameter_30: + name = "parameter_30" + shape = [288] + dtype = "float32" + min_val = float("-0.296268") + max_val = float("0.139536") + mean = float("-0.0976987") + std = float("0.0598689") + data = None + + +class Program_weight_tensor_parameter_31: + name = "parameter_31" + shape = [288] + dtype = "float32" + min_val = float("0.855291") + max_val = float("1.30343") + mean = float("1.03224") + std = float("0.0728338") + data = None + + +class Program_weight_tensor_parameter_32: + name = "parameter_32" + shape = [288] + dtype = "float32" + min_val = float("0.00457156") + max_val = float("0.0460309") + mean = float("0.0135629") + std = float("0.00645769") + data = None + + +class Program_weight_tensor_parameter_33: + name = "parameter_33" + shape = [288] + dtype = "float32" + min_val = float("-0.131222") + max_val = float("0.119131") + mean = float("-0.0426526") + std = float("0.0357571") + data = None + + +class Program_weight_tensor_parameter_34: + name = "parameter_34" + shape = [288, 288, 3, 3] + dtype = "float32" + min_val = float("-0.0410139") + max_val = float("0.0537984") + mean = float("-0.000111361") + std = float("0.00198824") + data = None + + +class Program_weight_tensor_parameter_35: + name = "parameter_35" + shape = [288] + dtype = "float32" + min_val = float("-0.214792") + max_val = float("0.173212") + mean = float("-0.0674506") + std = float("0.0465222") + data = None + + +class Program_weight_tensor_parameter_36: + name = "parameter_36" + shape = [288] + dtype = "float32" + min_val = float("0.913066") + max_val = float("1.1699") + mean = float("1.02378") + std = float("0.0430341") + data = None + + +class Program_weight_tensor_parameter_37: + name = "parameter_37" + shape = [288] + dtype = "float32" + min_val = float("0.0025185") + max_val = float("0.0130036") + mean = float("0.00489458") + std = float("0.00151927") + data = None + + +class Program_weight_tensor_parameter_38: + name = "parameter_38" + shape = [288] + dtype = "float32" + min_val = float("-0.0677402") + max_val = float("0.0532608") + mean = float("-0.0157193") + std = float("0.020735") + data = None + + +class Program_weight_tensor_parameter_39: + name = "parameter_39" + shape = [288, 864, 1, 1] + dtype = "float32" + min_val = float("-0.0743289") + max_val = float("0.0840202") + mean = float("-0.000108799") + std = float("0.00289064") + data = None + + +class Program_weight_tensor_parameter_40: + name = "parameter_40" + shape = [288] + dtype = "float32" + min_val = float("-0.0914344") + max_val = float("0.0301427") + mean = float("-0.0277946") + std = float("0.0201338") + data = None + + +class Program_weight_tensor_parameter_41: + name = "parameter_41" + shape = [288] + dtype = "float32" + min_val = float("0.897317") + max_val = float("1.09538") + mean = float("1.00988") + std = float("0.0255168") + data = None + + +class Program_weight_tensor_parameter_42: + name = "parameter_42" + shape = [288] + dtype = "float32" + min_val = float("0.00229717") + max_val = float("0.0167512") + mean = float("0.00380674") + std = float("0.00142097") + data = None + + +class Program_weight_tensor_parameter_43: + name = "parameter_43" + shape = [288] + dtype = "float32" + min_val = float("-0.0669112") + max_val = float("0.0525785") + mean = float("-0.0173") + std = float("0.0186564") + data = None + + +class Program_weight_tensor_parameter_44: + name = "parameter_44" + shape = [288, 864, 1, 1] + dtype = "float32" + min_val = float("-0.0499498") + max_val = float("0.0411866") + mean = float("-0.000135864") + std = float("0.00250907") + data = None + + +class Program_weight_tensor_parameter_45: + name = "parameter_45" + shape = [288] + dtype = "float32" + min_val = float("-0.140256") + max_val = float("0.00420121") + mean = float("-0.0495368") + std = float("0.0251426") + data = None + + +class Program_weight_tensor_parameter_46: + name = "parameter_46" + shape = [288] + dtype = "float32" + min_val = float("0.942623") + max_val = float("1.21461") + mean = float("1.04834") + std = float("0.0345309") + data = None + + +class Program_weight_tensor_parameter_47: + name = "parameter_47" + shape = [288] + dtype = "float32" + min_val = float("0.00572061") + max_val = float("0.0562203") + mean = float("0.0142031") + std = float("0.00687571") + data = None + + +class Program_weight_tensor_parameter_48: + name = "parameter_48" + shape = [288] + dtype = "float32" + min_val = float("-0.368825") + max_val = float("0.182583") + mean = float("-0.0466586") + std = float("0.0759914") + data = None + + +class Program_weight_tensor_parameter_49: + name = "parameter_49" + shape = [288, 288, 3, 3] + dtype = "float32" + min_val = float("-0.0289565") + max_val = float("0.0391188") + mean = float("-4.00886e-05") + std = float("0.00156564") + data = None + + +class Program_weight_tensor_parameter_50: + name = "parameter_50" + shape = [288] + dtype = "float32" + min_val = float("-0.70149") + max_val = float("0.887479") + mean = float("0.278636") + std = float("0.233831") + data = None + + +class Program_weight_tensor_parameter_51: + name = "parameter_51" + shape = [288] + dtype = "float32" + min_val = float("0.664006") + max_val = float("1.51985") + mean = float("1.16301") + std = float("0.119319") + data = None + + +class Program_weight_tensor_parameter_52: + name = "parameter_52" + shape = [288] + dtype = "float32" + min_val = float("0.00257059") + max_val = float("0.0664791") + mean = float("0.0100099") + std = float("0.00635828") + data = None + + +class Program_weight_tensor_parameter_53: + name = "parameter_53" + shape = [288] + dtype = "float32" + min_val = float("-0.17167") + max_val = float("0.212116") + mean = float("-0.0230739") + std = float("0.0344705") + data = None + + +class Program_weight_tensor_parameter_54: + name = "parameter_54" + shape = [288, 288, 1, 1] + dtype = "float32" + min_val = float("-0.105698") + max_val = float("0.0859006") + mean = float("-0.00032157") + std = float("0.0064008") + data = None + + +class Program_weight_tensor_parameter_55: + name = "parameter_55" + shape = [144] + dtype = "float32" + min_val = float("-0.347933") + max_val = float("0.143528") + mean = float("-0.0668646") + std = float("0.0926285") + data = None + + +class Program_weight_tensor_parameter_56: + name = "parameter_56" + shape = [144] + dtype = "float32" + min_val = float("0.824888") + max_val = float("1.10432") + mean = float("0.930092") + std = float("0.0365765") + data = None + + +class Program_weight_tensor_parameter_57: + name = "parameter_57" + shape = [144] + dtype = "float32" + min_val = float("0.00142249") + max_val = float("0.0184272") + mean = float("0.005649") + std = float("0.00306018") + data = None + + +class Program_weight_tensor_parameter_58: + name = "parameter_58" + shape = [144] + dtype = "float32" + min_val = float("-0.0522616") + max_val = float("0.0508058") + mean = float("-0.00193035") + std = float("0.0130056") + data = None + + +class Program_weight_tensor_parameter_59: + name = "parameter_59" + shape = [144, 144, 1, 1] + dtype = "float32" + min_val = float("-0.0579274") + max_val = float("0.025865") + mean = float("-0.000238147") + std = float("0.00444248") + data = None + + +class Program_weight_tensor_parameter_60: + name = "parameter_60" + shape = [144] + dtype = "float32" + min_val = float("-0.347933") + max_val = float("0.143528") + mean = float("-0.0668646") + std = float("0.0926285") + data = None + + +class Program_weight_tensor_parameter_61: + name = "parameter_61" + shape = [144] + dtype = "float32" + min_val = float("0.690773") + max_val = float("1.27243") + mean = float("1.06102") + std = float("0.0788875") + data = None + + +class Program_weight_tensor_parameter_62: + name = "parameter_62" + shape = [144] + dtype = "float32" + min_val = float("0.00362904") + max_val = float("0.0248225") + mean = float("0.00887979") + std = float("0.0033185") + data = None + + +class Program_weight_tensor_parameter_63: + name = "parameter_63" + shape = [144] + dtype = "float32" + min_val = float("-0.116045") + max_val = float("0.0937839") + mean = float("-0.018649") + std = float("0.0283403") + data = None + + +class Program_weight_tensor_parameter_64: + name = "parameter_64" + shape = [144, 144, 3, 3] + dtype = "float32" + min_val = float("-0.0532218") + max_val = float("0.0526993") + mean = float("-0.000100581") + std = float("0.00343154") + data = None + + +class Program_weight_tensor_parameter_65: + name = "parameter_65" + shape = [144] + dtype = "float32" + min_val = float("-0.438865") + max_val = float("0.120462") + mean = float("-0.165154") + std = float("0.123462") + data = None + + +class Program_weight_tensor_parameter_66: + name = "parameter_66" + shape = [144] + dtype = "float32" + min_val = float("0.851416") + max_val = float("1.29568") + mean = float("1.03126") + std = float("0.0907014") + data = None + + +class Program_weight_tensor_parameter_67: + name = "parameter_67" + shape = [144] + dtype = "float32" + min_val = float("0.00441354") + max_val = float("0.0518493") + mean = float("0.0138971") + std = float("0.00679398") + data = None + + +class Program_weight_tensor_parameter_68: + name = "parameter_68" + shape = [144] + dtype = "float32" + min_val = float("-0.141425") + max_val = float("0.0860137") + mean = float("-0.00137147") + std = float("0.024053") + data = None + + +class Program_weight_tensor_parameter_69: + name = "parameter_69" + shape = [144, 144, 3, 3] + dtype = "float32" + min_val = float("-0.0543472") + max_val = float("0.0687619") + mean = float("-0.000164431") + std = float("0.00382929") + data = None + + +class Program_weight_tensor_parameter_70: + name = "parameter_70" + shape = [144] + dtype = "float32" + min_val = float("-0.440759") + max_val = float("0.028597") + mean = float("-0.192538") + std = float("0.0886186") + data = None + + +class Program_weight_tensor_parameter_71: + name = "parameter_71" + shape = [144] + dtype = "float32" + min_val = float("0.702981") + max_val = float("1.062") + mean = float("0.918179") + std = float("0.0515255") + data = None + + +class Program_weight_tensor_parameter_72: + name = "parameter_72" + shape = [144] + dtype = "float32" + min_val = float("0.00215835") + max_val = float("0.0134243") + mean = float("0.00671654") + std = float("0.00200577") + data = None + + +class Program_weight_tensor_parameter_73: + name = "parameter_73" + shape = [144] + dtype = "float32" + min_val = float("-0.0377384") + max_val = float("0.0318142") + mean = float("0.00818696") + std = float("0.0133059") + data = None + + +class Program_weight_tensor_parameter_74: + name = "parameter_74" + shape = [144, 144, 1, 1] + dtype = "float32" + min_val = float("-0.0501252") + max_val = float("0.0536087") + mean = float("-0.000306018") + std = float("0.00493908") + data = None + + +class Program_weight_tensor_parameter_75: + name = "parameter_75" + shape = [144] + dtype = "float32" + min_val = float("-0.440759") + max_val = float("0.028597") + mean = float("-0.192538") + std = float("0.0886186") + data = None + + +class Program_weight_tensor_parameter_76: + name = "parameter_76" + shape = [144] + dtype = "float32" + min_val = float("0.76617") + max_val = float("1.24322") + mean = float("1.05286") + std = float("0.0555308") + data = None + + +class Program_weight_tensor_parameter_77: + name = "parameter_77" + shape = [144] + dtype = "float32" + min_val = float("0.00535803") + max_val = float("0.0361625") + mean = float("0.012763") + std = float("0.00552772") + data = None + + +class Program_weight_tensor_parameter_78: + name = "parameter_78" + shape = [144] + dtype = "float32" + min_val = float("-0.103218") + max_val = float("0.0428863") + mean = float("-0.0169505") + std = float("0.018052") + data = None + + +class Program_weight_tensor_parameter_79: + name = "parameter_79" + shape = [144, 144, 3, 3] + dtype = "float32" + min_val = float("-0.0536258") + max_val = float("0.0680722") + mean = float("-0.000145796") + std = float("0.00380117") + data = None + + +class Program_weight_tensor_parameter_80: + name = "parameter_80" + shape = [144] + dtype = "float32" + min_val = float("-0.508139") + max_val = float("0.251534") + mean = float("-0.219016") + std = float("0.129626") + data = None + + +class Program_weight_tensor_parameter_81: + name = "parameter_81" + shape = [144] + dtype = "float32" + min_val = float("0.783159") + max_val = float("1.53055") + mean = float("1.02621") + std = float("0.123448") + data = None + + +class Program_weight_tensor_parameter_82: + name = "parameter_82" + shape = [144] + dtype = "float32" + min_val = float("0.00898156") + max_val = float("0.0585441") + mean = float("0.0184196") + std = float("0.00942207") + data = None + + +class Program_weight_tensor_parameter_83: + name = "parameter_83" + shape = [144] + dtype = "float32" + min_val = float("-0.112277") + max_val = float("0.0151302") + mean = float("-0.0389433") + std = float("0.020341") + data = None + + +class Program_weight_tensor_parameter_84: + name = "parameter_84" + shape = [144, 144, 3, 3] + dtype = "float32" + min_val = float("-0.0669898") + max_val = float("0.0822895") + mean = float("-0.000218159") + std = float("0.00430282") + data = None + + +class Program_weight_tensor_parameter_85: + name = "parameter_85" + shape = [144] + dtype = "float32" + min_val = float("-0.597168") + max_val = float("0.0569208") + mean = float("-0.154642") + std = float("0.0801667") + data = None + + +class Program_weight_tensor_parameter_86: + name = "parameter_86" + shape = [144] + dtype = "float32" + min_val = float("0.871386") + max_val = float("1.41197") + mean = float("1.02579") + std = float("0.071744") + data = None + + +class Program_weight_tensor_parameter_87: + name = "parameter_87" + shape = [144] + dtype = "float32" + min_val = float("0.00398803") + max_val = float("0.0235192") + mean = float("0.00800034") + std = float("0.00283519") + data = None + + +class Program_weight_tensor_parameter_88: + name = "parameter_88" + shape = [144] + dtype = "float32" + min_val = float("-0.0847656") + max_val = float("0.0635583") + mean = float("-0.0234772") + std = float("0.0233543") + data = None + + +class Program_weight_tensor_parameter_89: + name = "parameter_89" + shape = [144, 432, 1, 1] + dtype = "float32" + min_val = float("-0.06626") + max_val = float("0.0780548") + mean = float("-0.000295776") + std = float("0.0060698") + data = None + + +class Program_weight_tensor_parameter_90: + name = "parameter_90" + shape = [144] + dtype = "float32" + min_val = float("-0.145897") + max_val = float("0.0750571") + mean = float("-0.026613") + std = float("0.0382628") + data = None + + +class Program_weight_tensor_parameter_91: + name = "parameter_91" + shape = [144] + dtype = "float32" + min_val = float("0.885991") + max_val = float("1.40596") + mean = float("0.994105") + std = float("0.0556773") + data = None + + +class Program_weight_tensor_parameter_92: + name = "parameter_92" + shape = [144] + dtype = "float32" + min_val = float("0.0025042") + max_val = float("0.0281932") + mean = float("0.00618505") + std = float("0.0031663") + data = None + + +class Program_weight_tensor_parameter_93: + name = "parameter_93" + shape = [144] + dtype = "float32" + min_val = float("-0.0510581") + max_val = float("0.0420222") + mean = float("-0.0117801") + std = float("0.0193074") + data = None + + +class Program_weight_tensor_parameter_94: + name = "parameter_94" + shape = [144, 432, 1, 1] + dtype = "float32" + min_val = float("-0.064081") + max_val = float("0.0681386") + mean = float("-0.00013769") + std = float("0.00523615") + data = None + + +class Program_weight_tensor_parameter_95: + name = "parameter_95" + shape = [144] + dtype = "float32" + min_val = float("-0.257452") + max_val = float("0.015821") + mean = float("-0.0905176") + std = float("0.052893") + data = None + + +class Program_weight_tensor_parameter_96: + name = "parameter_96" + shape = [144] + dtype = "float32" + min_val = float("0.816109") + max_val = float("1.19195") + mean = float("1.02135") + std = float("0.0564293") + data = None + + +class Program_weight_tensor_parameter_97: + name = "parameter_97" + shape = [144] + dtype = "float32" + min_val = float("0.00645178") + max_val = float("0.0626324") + mean = float("0.0183814") + std = float("0.00939741") + data = None + + +class Program_weight_tensor_parameter_98: + name = "parameter_98" + shape = [144] + dtype = "float32" + min_val = float("-0.570869") + max_val = float("0.343259") + mean = float("-0.0108601") + std = float("0.126086") + data = None + + +class Program_weight_tensor_parameter_99: + name = "parameter_99" + shape = [144, 144, 3, 3] + dtype = "float32" + min_val = float("-0.0409236") + max_val = float("0.048868") + mean = float("-1.71686e-05") + std = float("0.00338041") + data = None + + +class Program_weight_tensor_parameter_100: + name = "parameter_100" + shape = [144] + dtype = "float32" + min_val = float("-0.792002") + max_val = float("2.01537") + mean = float("0.407788") + std = float("0.536583") + data = None + + +class Program_weight_tensor_parameter_101: + name = "parameter_101" + shape = [144] + dtype = "float32" + min_val = float("0.640513") + max_val = float("1.8596") + mean = float("1.10053") + std = float("0.254284") + data = None + + +class Program_weight_tensor_parameter_102: + name = "parameter_102" + shape = [144] + dtype = "float32" + min_val = float("0.00326116") + max_val = float("0.0638979") + mean = float("0.0177114") + std = float("0.0122087") + data = None + + +class Program_weight_tensor_parameter_103: + name = "parameter_103" + shape = [144] + dtype = "float32" + min_val = float("-0.212345") + max_val = float("0.21537") + mean = float("-0.01514") + std = float("0.0630484") + data = None + + +class Program_weight_tensor_parameter_104: + name = "parameter_104" + shape = [144, 144, 1, 1] + dtype = "float32" + min_val = float("-0.183115") + max_val = float("0.101079") + mean = float("-0.000557806") + std = float("0.0130118") + data = None + + +class Program_weight_tensor_parameter_105: + name = "parameter_105" + shape = [72] + dtype = "float32" + min_val = float("-0.595082") + max_val = float("0.495455") + mean = float("0.00878126") + std = float("0.279346") + data = None + + +class Program_weight_tensor_parameter_106: + name = "parameter_106" + shape = [72] + dtype = "float32" + min_val = float("0.543517") + max_val = float("1.27958") + mean = float("0.826736") + std = float("0.105059") + data = None + + +class Program_weight_tensor_parameter_107: + name = "parameter_107" + shape = [72] + dtype = "float32" + min_val = float("0.00122015") + max_val = float("0.0154407") + mean = float("0.00658974") + std = float("0.00404103") + data = None + + +class Program_weight_tensor_parameter_108: + name = "parameter_108" + shape = [72] + dtype = "float32" + min_val = float("-0.0446201") + max_val = float("0.064399") + mean = float("-0.00605426") + std = float("0.0159659") + data = None + + +class Program_weight_tensor_parameter_109: + name = "parameter_109" + shape = [72, 72, 1, 1] + dtype = "float32" + min_val = float("-0.0795237") + max_val = float("0.0571354") + mean = float("-0.000915699") + std = float("0.00851435") + data = None + + +class Program_weight_tensor_parameter_110: + name = "parameter_110" + shape = [72] + dtype = "float32" + min_val = float("-0.595082") + max_val = float("0.495455") + mean = float("0.00878126") + std = float("0.279346") + data = None + + +class Program_weight_tensor_parameter_111: + name = "parameter_111" + shape = [72] + dtype = "float32" + min_val = float("0.691058") + max_val = float("1.5848") + mean = float("1.06454") + std = float("0.138823") + data = None + + +class Program_weight_tensor_parameter_112: + name = "parameter_112" + shape = [72] + dtype = "float32" + min_val = float("0.00330791") + max_val = float("0.034607") + mean = float("0.0126356") + std = float("0.00726009") + data = None + + +class Program_weight_tensor_parameter_113: + name = "parameter_113" + shape = [72] + dtype = "float32" + min_val = float("-0.201838") + max_val = float("0.0693475") + mean = float("-0.0139591") + std = float("0.0478809") + data = None + + +class Program_weight_tensor_parameter_114: + name = "parameter_114" + shape = [72, 72, 3, 3] + dtype = "float32" + min_val = float("-0.08845") + max_val = float("0.0868661") + mean = float("-0.000129166") + std = float("0.00705182") + data = None + + +class Program_weight_tensor_parameter_115: + name = "parameter_115" + shape = [72] + dtype = "float32" + min_val = float("-0.786233") + max_val = float("0.655683") + mean = float("-0.316842") + std = float("0.293749") + data = None + + +class Program_weight_tensor_parameter_116: + name = "parameter_116" + shape = [72] + dtype = "float32" + min_val = float("0.318351") + max_val = float("2.16811") + mean = float("0.868578") + std = float("0.238406") + data = None + + +class Program_weight_tensor_parameter_117: + name = "parameter_117" + shape = [72] + dtype = "float32" + min_val = float("0.00376185") + max_val = float("0.028148") + mean = float("0.0104991") + std = float("0.00530672") + data = None + + +class Program_weight_tensor_parameter_118: + name = "parameter_118" + shape = [72] + dtype = "float32" + min_val = float("-0.0782533") + max_val = float("0.0793084") + mean = float("0.0180777") + std = float("0.0323085") + data = None + + +class Program_weight_tensor_parameter_119: + name = "parameter_119" + shape = [72, 72, 3, 3] + dtype = "float32" + min_val = float("-0.113851") + max_val = float("0.0908244") + mean = float("-0.000518535") + std = float("0.00805852") + data = None + + +class Program_weight_tensor_parameter_120: + name = "parameter_120" + shape = [72] + dtype = "float32" + min_val = float("-0.528435") + max_val = float("0.198908") + mean = float("-0.259137") + std = float("0.171541") + data = None + + +class Program_weight_tensor_parameter_121: + name = "parameter_121" + shape = [72] + dtype = "float32" + min_val = float("0.598134") + max_val = float("0.968256") + mean = float("0.79081") + std = float("0.0731823") + data = None + + +class Program_weight_tensor_parameter_122: + name = "parameter_122" + shape = [72] + dtype = "float32" + min_val = float("0.00316152") + max_val = float("0.0149694") + mean = float("0.0075567") + std = float("0.00213889") + data = None + + +class Program_weight_tensor_parameter_123: + name = "parameter_123" + shape = [72] + dtype = "float32" + min_val = float("-0.0505949") + max_val = float("0.0346683") + mean = float("0.0038429") + std = float("0.0160493") + data = None + + +class Program_weight_tensor_parameter_124: + name = "parameter_124" + shape = [72, 72, 1, 1] + dtype = "float32" + min_val = float("-0.0718139") + max_val = float("0.0559854") + mean = float("-0.0017447") + std = float("0.0102887") + data = None + + +class Program_weight_tensor_parameter_125: + name = "parameter_125" + shape = [72] + dtype = "float32" + min_val = float("-0.528435") + max_val = float("0.198908") + mean = float("-0.259137") + std = float("0.171541") + data = None + + +class Program_weight_tensor_parameter_126: + name = "parameter_126" + shape = [72] + dtype = "float32" + min_val = float("0.704769") + max_val = float("1.22593") + mean = float("0.978927") + std = float("0.110584") + data = None + + +class Program_weight_tensor_parameter_127: + name = "parameter_127" + shape = [72] + dtype = "float32" + min_val = float("0.00780471") + max_val = float("0.04569") + mean = float("0.0171251") + std = float("0.00772119") + data = None + + +class Program_weight_tensor_parameter_128: + name = "parameter_128" + shape = [72] + dtype = "float32" + min_val = float("-0.13032") + max_val = float("0.0724418") + mean = float("0.00295902") + std = float("0.0350437") + data = None + + +class Program_weight_tensor_parameter_129: + name = "parameter_129" + shape = [72, 72, 3, 3] + dtype = "float32" + min_val = float("-0.0925636") + max_val = float("0.118392") + mean = float("-0.000435324") + std = float("0.00814822") + data = None + + +class Program_weight_tensor_parameter_130: + name = "parameter_130" + shape = [72] + dtype = "float32" + min_val = float("-0.976006") + max_val = float("0.908048") + mean = float("-0.372466") + std = float("0.381594") + data = None + + +class Program_weight_tensor_parameter_131: + name = "parameter_131" + shape = [72] + dtype = "float32" + min_val = float("0.597147") + max_val = float("1.22176") + mean = float("0.865267") + std = float("0.120154") + data = None + + +class Program_weight_tensor_parameter_132: + name = "parameter_132" + shape = [72] + dtype = "float32" + min_val = float("0.0034554") + max_val = float("0.0480018") + mean = float("0.00893197") + std = float("0.00741501") + data = None + + +class Program_weight_tensor_parameter_133: + name = "parameter_133" + shape = [72] + dtype = "float32" + min_val = float("-0.240845") + max_val = float("0.240686") + mean = float("-0.0275796") + std = float("0.0894982") + data = None + + +class Program_weight_tensor_parameter_134: + name = "parameter_134" + shape = [72, 72, 3, 3] + dtype = "float32" + min_val = float("-0.0884518") + max_val = float("0.0878903") + mean = float("-0.000276912") + std = float("0.0091165") + data = None + + +class Program_weight_tensor_parameter_135: + name = "parameter_135" + shape = [72] + dtype = "float32" + min_val = float("-1.03398") + max_val = float("0.872583") + mean = float("-0.149511") + std = float("0.507671") + data = None + + +class Program_weight_tensor_parameter_136: + name = "parameter_136" + shape = [72] + dtype = "float32" + min_val = float("0.312956") + max_val = float("1.10253") + mean = float("0.653485") + std = float("0.173645") + data = None + + +class Program_weight_tensor_parameter_137: + name = "parameter_137" + shape = [72] + dtype = "float32" + min_val = float("0.00556569") + max_val = float("0.0440435") + mean = float("0.0134337") + std = float("0.0070351") + data = None + + +class Program_weight_tensor_parameter_138: + name = "parameter_138" + shape = [72] + dtype = "float32" + min_val = float("-0.142948") + max_val = float("0.124109") + mean = float("-0.00558489") + std = float("0.0503753") + data = None + + +class Program_weight_tensor_parameter_139: + name = "parameter_139" + shape = [72, 336, 1, 1] + dtype = "float32" + min_val = float("-0.146463") + max_val = float("0.12742") + mean = float("-0.000473706") + std = float("0.0119103") + data = None + + +class Program_weight_tensor_parameter_140: + name = "parameter_140" + shape = [72] + dtype = "float32" + min_val = float("-0.120063") + max_val = float("0.411402") + mean = float("0.165329") + std = float("0.110423") + data = None + + +class Program_weight_tensor_parameter_141: + name = "parameter_141" + shape = [72] + dtype = "float32" + min_val = float("0.658228") + max_val = float("1.34456") + mean = float("0.848023") + std = float("0.109043") + data = None + + +class Program_weight_tensor_parameter_142: + name = "parameter_142" + shape = [72] + dtype = "float32" + min_val = float("0.00163079") + max_val = float("0.030433") + mean = float("0.00603291") + std = float("0.00326364") + data = None + + +class Program_weight_tensor_parameter_143: + name = "parameter_143" + shape = [72] + dtype = "float32" + min_val = float("-0.0877211") + max_val = float("0.0744303") + mean = float("-0.00821605") + std = float("0.0308951") + data = None + + +class Program_weight_tensor_parameter_144: + name = "parameter_144" + shape = [72, 336, 1, 1] + dtype = "float32" + min_val = float("-0.10784") + max_val = float("0.106855") + mean = float("7.85357e-05") + std = float("0.00826513") + data = None + + +class Program_weight_tensor_parameter_145: + name = "parameter_145" + shape = [144] + dtype = "float32" + min_val = float("-0.475637") + max_val = float("0.167652") + mean = float("-0.0836531") + std = float("0.124281") + data = None + + +class Program_weight_tensor_parameter_146: + name = "parameter_146" + shape = [144] + dtype = "float32" + min_val = float("0.616228") + max_val = float("1.53919") + mean = float("0.783259") + std = float("0.11291") + data = None + + +class Program_weight_tensor_parameter_147: + name = "parameter_147" + shape = [144] + dtype = "float32" + min_val = float("0.00614064") + max_val = float("0.0604048") + mean = float("0.0126297") + std = float("0.00637213") + data = None + + +class Program_weight_tensor_parameter_148: + name = "parameter_148" + shape = [144] + dtype = "float32" + min_val = float("-0.1093") + max_val = float("0.0390007") + mean = float("-0.026206") + std = float("0.0243022") + data = None + + +class Program_weight_tensor_parameter_149: + name = "parameter_149" + shape = [144, 288, 1, 1] + dtype = "float32" + min_val = float("-0.0817999") + max_val = float("0.0912238") + mean = float("-0.000601268") + std = float("0.00880489") + data = None + + +class Program_weight_tensor_parameter_150: + name = "parameter_150" + shape = [288] + dtype = "float32" + min_val = float("-0.432954") + max_val = float("0.194678") + mean = float("-0.113055") + std = float("0.0835479") + data = None + + +class Program_weight_tensor_parameter_151: + name = "parameter_151" + shape = [288] + dtype = "float32" + min_val = float("0.799255") + max_val = float("1.51194") + mean = float("1.01004") + std = float("0.0940277") + data = None + + +class Program_weight_tensor_parameter_152: + name = "parameter_152" + shape = [288] + dtype = "float32" + min_val = float("0.00747447") + max_val = float("0.0420308") + mean = float("0.0128165") + std = float("0.00521457") + data = None + + +class Program_weight_tensor_parameter_153: + name = "parameter_153" + shape = [288] + dtype = "float32" + min_val = float("-0.199748") + max_val = float("0.153784") + mean = float("-0.0405561") + std = float("0.0425423") + data = None + + +class Program_weight_tensor_parameter_154: + name = "parameter_154" + shape = [288, 288, 1, 1] + dtype = "float32" + min_val = float("-0.105363") + max_val = float("0.11786") + mean = float("-0.000714379") + std = float("0.00838783") + data = None + + +class Program_weight_tensor_parameter_155: + name = "parameter_155" + shape = [144] + dtype = "float32" + min_val = float("-0.412839") + max_val = float("0.0253786") + mean = float("-0.106026") + std = float("0.0667888") + data = None + + +class Program_weight_tensor_parameter_156: + name = "parameter_156" + shape = [144] + dtype = "float32" + min_val = float("0.689866") + max_val = float("0.953353") + mean = float("0.875231") + std = float("0.0374441") + data = None + + +class Program_weight_tensor_parameter_157: + name = "parameter_157" + shape = [144] + dtype = "float32" + min_val = float("0.00388403") + max_val = float("0.0282957") + mean = float("0.00811244") + std = float("0.00272715") + data = None + + +class Program_weight_tensor_parameter_158: + name = "parameter_158" + shape = [144] + dtype = "float32" + min_val = float("-0.044499") + max_val = float("0.0423913") + mean = float("-0.0131713") + std = float("0.0192238") + data = None + + +class Program_weight_tensor_parameter_159: + name = "parameter_159" + shape = [144, 144, 1, 1] + dtype = "float32" + min_val = float("-0.0417203") + max_val = float("0.0335959") + mean = float("-0.000851064") + std = float("0.00589312") + data = None + + +class Program_weight_tensor_parameter_160: + name = "parameter_160" + shape = [144] + dtype = "float32" + min_val = float("-0.412839") + max_val = float("0.0253786") + mean = float("-0.106026") + std = float("0.0667888") + data = None + + +class Program_weight_tensor_parameter_161: + name = "parameter_161" + shape = [144] + dtype = "float32" + min_val = float("0.869598") + max_val = float("1.14128") + mean = float("0.98933") + std = float("0.0441688") + data = None + + +class Program_weight_tensor_parameter_162: + name = "parameter_162" + shape = [144] + dtype = "float32" + min_val = float("0.0085392") + max_val = float("0.0366966") + mean = float("0.0171334") + std = float("0.00533396") + data = None + + +class Program_weight_tensor_parameter_163: + name = "parameter_163" + shape = [144] + dtype = "float32" + min_val = float("-0.0865187") + max_val = float("0.0756034") + mean = float("-0.0217435") + std = float("0.0268494") + data = None + + +class Program_weight_tensor_parameter_164: + name = "parameter_164" + shape = [144, 144, 3, 3] + dtype = "float32" + min_val = float("-0.0706626") + max_val = float("0.126119") + mean = float("-0.000129692") + std = float("0.00417293") + data = None + + +class Program_weight_tensor_parameter_165: + name = "parameter_165" + shape = [144] + dtype = "float32" + min_val = float("-0.502008") + max_val = float("-0.00721809") + mean = float("-0.214768") + std = float("0.107079") + data = None + + +class Program_weight_tensor_parameter_166: + name = "parameter_166" + shape = [144] + dtype = "float32" + min_val = float("0.835917") + max_val = float("1.4006") + mean = float("1.05598") + std = float("0.0868177") + data = None + + +class Program_weight_tensor_parameter_167: + name = "parameter_167" + shape = [144] + dtype = "float32" + min_val = float("0.0207381") + max_val = float("0.118301") + mean = float("0.0345071") + std = float("0.0112799") + data = None + + +class Program_weight_tensor_parameter_168: + name = "parameter_168" + shape = [144] + dtype = "float32" + min_val = float("-0.111173") + max_val = float("0.0754279") + mean = float("-0.0292226") + std = float("0.029029") + data = None + + +class Program_weight_tensor_parameter_169: + name = "parameter_169" + shape = [144, 144, 3, 3] + dtype = "float32" + min_val = float("-0.0637213") + max_val = float("0.0943239") + mean = float("-0.000282505") + std = float("0.00487901") + data = None + + +class Program_weight_tensor_parameter_170: + name = "parameter_170" + shape = [144] + dtype = "float32" + min_val = float("-0.442179") + max_val = float("0.0299581") + mean = float("-0.208666") + std = float("0.0834879") + data = None + + +class Program_weight_tensor_parameter_171: + name = "parameter_171" + shape = [144] + dtype = "float32" + min_val = float("0.794784") + max_val = float("1.16393") + mean = float("0.941937") + std = float("0.0540021") + data = None + + +class Program_weight_tensor_parameter_172: + name = "parameter_172" + shape = [144] + dtype = "float32" + min_val = float("0.00237121") + max_val = float("0.0141578") + mean = float("0.0050522") + std = float("0.00156333") + data = None + + +class Program_weight_tensor_parameter_173: + name = "parameter_173" + shape = [144] + dtype = "float32" + min_val = float("-0.0397734") + max_val = float("0.0360309") + mean = float("-0.00888157") + std = float("0.0112361") + data = None + + +class Program_weight_tensor_parameter_174: + name = "parameter_174" + shape = [144, 144, 1, 1] + dtype = "float32" + min_val = float("-0.0445594") + max_val = float("0.0617702") + mean = float("-0.000723127") + std = float("0.0074341") + data = None + + +class Program_weight_tensor_parameter_175: + name = "parameter_175" + shape = [144] + dtype = "float32" + min_val = float("-0.442179") + max_val = float("0.0299581") + mean = float("-0.208666") + std = float("0.0834879") + data = None + + +class Program_weight_tensor_parameter_176: + name = "parameter_176" + shape = [144] + dtype = "float32" + min_val = float("0.8535") + max_val = float("1.20209") + mean = float("0.999954") + std = float("0.064928") + data = None + + +class Program_weight_tensor_parameter_177: + name = "parameter_177" + shape = [144] + dtype = "float32" + min_val = float("0.00955318") + max_val = float("0.0374721") + mean = float("0.0148773") + std = float("0.003795") + data = None + + +class Program_weight_tensor_parameter_178: + name = "parameter_178" + shape = [144] + dtype = "float32" + min_val = float("-0.065741") + max_val = float("0.0473994") + mean = float("-0.0151164") + std = float("0.022939") + data = None + + +class Program_weight_tensor_parameter_179: + name = "parameter_179" + shape = [144, 144, 3, 3] + dtype = "float32" + min_val = float("-0.0567906") + max_val = float("0.067963") + mean = float("-0.000190391") + std = float("0.00428503") + data = None + + +class Program_weight_tensor_parameter_180: + name = "parameter_180" + shape = [144] + dtype = "float32" + min_val = float("-0.617877") + max_val = float("-0.0112637") + mean = float("-0.270694") + std = float("0.107149") + data = None + + +class Program_weight_tensor_parameter_181: + name = "parameter_181" + shape = [144] + dtype = "float32" + min_val = float("0.885097") + max_val = float("1.60149") + mean = float("1.02764") + std = float("0.083049") + data = None + + +class Program_weight_tensor_parameter_182: + name = "parameter_182" + shape = [144] + dtype = "float32" + min_val = float("0.0113595") + max_val = float("0.0461146") + mean = float("0.0192173") + std = float("0.00533867") + data = None + + +class Program_weight_tensor_parameter_183: + name = "parameter_183" + shape = [144] + dtype = "float32" + min_val = float("-0.198802") + max_val = float("0.0810298") + mean = float("-0.0382598") + std = float("0.0334142") + data = None + + +class Program_weight_tensor_parameter_184: + name = "parameter_184" + shape = [144, 144, 3, 3] + dtype = "float32" + min_val = float("-0.0580541") + max_val = float("0.0719548") + mean = float("-0.000275777") + std = float("0.00531357") + data = None + + +class Program_weight_tensor_parameter_185: + name = "parameter_185" + shape = [144] + dtype = "float32" + min_val = float("-0.677544") + max_val = float("0.299334") + mean = float("-0.248679") + std = float("0.139757") + data = None + + +class Program_weight_tensor_parameter_186: + name = "parameter_186" + shape = [144] + dtype = "float32" + min_val = float("0.818217") + max_val = float("1.25628") + mean = float("1.01861") + std = float("0.0833089") + data = None + + +class Program_weight_tensor_parameter_187: + name = "parameter_187" + shape = [144] + dtype = "float32" + min_val = float("0.00518558") + max_val = float("0.0195469") + mean = float("0.00811605") + std = float("0.00209044") + data = None + + +class Program_weight_tensor_parameter_188: + name = "parameter_188" + shape = [144] + dtype = "float32" + min_val = float("-0.0686079") + max_val = float("0.11164") + mean = float("0.0120371") + std = float("0.0262681") + data = None + + +class Program_weight_tensor_parameter_189: + name = "parameter_189" + shape = [144, 672, 1, 1] + dtype = "float32" + min_val = float("-0.0517024") + max_val = float("0.0799785") + mean = float("-0.000298696") + std = float("0.00704493") + data = None + + +class Program_weight_tensor_parameter_190: + name = "parameter_190" + shape = [144] + dtype = "float32" + min_val = float("-0.219011") + max_val = float("0.481675") + mean = float("0.00623938") + std = float("0.099387") + data = None + + +class Program_weight_tensor_parameter_191: + name = "parameter_191" + shape = [144] + dtype = "float32" + min_val = float("0.940855") + max_val = float("1.30879") + mean = float("1.06488") + std = float("0.0758104") + data = None + + +class Program_weight_tensor_parameter_192: + name = "parameter_192" + shape = [144] + dtype = "float32" + min_val = float("0.004564") + max_val = float("0.0498838") + mean = float("0.00863895") + std = float("0.00418386") + data = None + + +class Program_weight_tensor_parameter_193: + name = "parameter_193" + shape = [144] + dtype = "float32" + min_val = float("-0.0737211") + max_val = float("0.0499879") + mean = float("-0.00426891") + std = float("0.0237441") + data = None + + +class Program_weight_tensor_parameter_194: + name = "parameter_194" + shape = [144, 672, 1, 1] + dtype = "float32" + min_val = float("-0.282458") + max_val = float("0.125755") + mean = float("-0.000223402") + std = float("0.0067218") + data = None + + +class Program_weight_tensor_parameter_195: + name = "parameter_195" + shape = [288] + dtype = "float32" + min_val = float("-0.469894") + max_val = float("-0.0767075") + mean = float("-0.241717") + std = float("0.0584654") + data = None + + +class Program_weight_tensor_parameter_196: + name = "parameter_196" + shape = [288] + dtype = "float32" + min_val = float("0.692232") + max_val = float("1.0465") + mean = float("0.8174") + std = float("0.0536684") + data = None + + +class Program_weight_tensor_parameter_197: + name = "parameter_197" + shape = [288] + dtype = "float32" + min_val = float("0.00712014") + max_val = float("0.0421997") + mean = float("0.011501") + std = float("0.00355806") + data = None + + +class Program_weight_tensor_parameter_198: + name = "parameter_198" + shape = [288] + dtype = "float32" + min_val = float("-0.0881356") + max_val = float("0.0729185") + mean = float("-0.024236") + std = float("0.0206652") + data = None + + +class Program_weight_tensor_parameter_199: + name = "parameter_199" + shape = [288, 576, 1, 1] + dtype = "float32" + min_val = float("-0.0677108") + max_val = float("0.0562247") + mean = float("-0.000355994") + std = float("0.00559678") + data = None + + +class Program_weight_tensor_parameter_200: + name = "parameter_200" + shape = [576] + dtype = "float32" + min_val = float("-0.22339") + max_val = float("0.236177") + mean = float("-0.12625") + std = float("0.0407308") + data = None + + +class Program_weight_tensor_parameter_201: + name = "parameter_201" + shape = [576] + dtype = "float32" + min_val = float("0.897616") + max_val = float("1.38115") + mean = float("1.04235") + std = float("0.0444433") + data = None + + +class Program_weight_tensor_parameter_202: + name = "parameter_202" + shape = [576] + dtype = "float32" + min_val = float("0.00619308") + max_val = float("0.0249644") + mean = float("0.0103028") + std = float("0.00255343") + data = None + + +class Program_weight_tensor_parameter_203: + name = "parameter_203" + shape = [576] + dtype = "float32" + min_val = float("-0.124002") + max_val = float("0.0974204") + mean = float("-0.0399363") + std = float("0.0247942") + data = None + + +class Program_weight_tensor_parameter_204: + name = "parameter_204" + shape = [576, 576, 1, 1] + dtype = "float32" + min_val = float("-0.0792843") + max_val = float("0.109509") + mean = float("-0.0003879") + std = float("0.00480209") + data = None + + +class Program_weight_tensor_parameter_205: + name = "parameter_205" + shape = [288] + dtype = "float32" + min_val = float("-0.238565") + max_val = float("0.268331") + mean = float("-0.0786572") + std = float("0.0529701") + data = None + + +class Program_weight_tensor_parameter_206: + name = "parameter_206" + shape = [288] + dtype = "float32" + min_val = float("0.780756") + max_val = float("1.062") + mean = float("0.947003") + std = float("0.0301211") + data = None + + +class Program_weight_tensor_parameter_207: + name = "parameter_207" + shape = [288] + dtype = "float32" + min_val = float("0.00354166") + max_val = float("0.0351247") + mean = float("0.00947589") + std = float("0.00396193") + data = None + + +class Program_weight_tensor_parameter_208: + name = "parameter_208" + shape = [288] + dtype = "float32" + min_val = float("-0.0619501") + max_val = float("0.0492365") + mean = float("-0.00897979") + std = float("0.0161089") + data = None + + +class Program_weight_tensor_parameter_209: + name = "parameter_209" + shape = [288, 288, 1, 1] + dtype = "float32" + min_val = float("-0.0359077") + max_val = float("0.0348259") + mean = float("-0.000152082") + std = float("0.00409594") + data = None + + +class Program_weight_tensor_parameter_210: + name = "parameter_210" + shape = [288] + dtype = "float32" + min_val = float("-0.238565") + max_val = float("0.268331") + mean = float("-0.0786572") + std = float("0.0529701") + data = None + + +class Program_weight_tensor_parameter_211: + name = "parameter_211" + shape = [288] + dtype = "float32" + min_val = float("0.871119") + max_val = float("1.23845") + mean = float("1.0049") + std = float("0.04152") + data = None + + +class Program_weight_tensor_parameter_212: + name = "parameter_212" + shape = [288] + dtype = "float32" + min_val = float("0.0153569") + max_val = float("0.266046") + mean = float("0.0463224") + std = float("0.0211782") + data = None + + +class Program_weight_tensor_parameter_213: + name = "parameter_213" + shape = [288] + dtype = "float32" + min_val = float("-0.277836") + max_val = float("0.0521646") + mean = float("-0.0775249") + std = float("0.0504685") + data = None + + +class Program_weight_tensor_parameter_214: + name = "parameter_214" + shape = [288, 288, 3, 3] + dtype = "float32" + min_val = float("-0.0444196") + max_val = float("0.0670252") + mean = float("-0.000174165") + std = float("0.00184631") + data = None + + +class Program_weight_tensor_parameter_215: + name = "parameter_215" + shape = [288] + dtype = "float32" + min_val = float("-0.154359") + max_val = float("0.175988") + mean = float("-0.0450327") + std = float("0.0404094") + data = None + + +class Program_weight_tensor_parameter_216: + name = "parameter_216" + shape = [288] + dtype = "float32" + min_val = float("0.903947") + max_val = float("1.21818") + mean = float("1.03597") + std = float("0.0582841") + data = None + + +class Program_weight_tensor_parameter_217: + name = "parameter_217" + shape = [288] + dtype = "float32" + min_val = float("0.0119644") + max_val = float("0.164862") + mean = float("0.0429402") + std = float("0.0179463") + data = None + + +class Program_weight_tensor_parameter_218: + name = "parameter_218" + shape = [288] + dtype = "float32" + min_val = float("-0.189831") + max_val = float("0.116786") + mean = float("-0.0534389") + std = float("0.0533372") + data = None + + +class Program_weight_tensor_parameter_219: + name = "parameter_219" + shape = [288, 288, 3, 3] + dtype = "float32" + min_val = float("-0.0348364") + max_val = float("0.0582555") + mean = float("-0.000119513") + std = float("0.00228045") + data = None + + +class Program_weight_tensor_parameter_220: + name = "parameter_220" + shape = [288] + dtype = "float32" + min_val = float("-0.228589") + max_val = float("0.0549134") + mean = float("-0.0677786") + std = float("0.0398048") + data = None + + +class Program_weight_tensor_parameter_221: + name = "parameter_221" + shape = [288] + dtype = "float32" + min_val = float("0.899106") + max_val = float("1.30581") + mean = float("1.04054") + std = float("0.0698986") + data = None + + +class Program_weight_tensor_parameter_222: + name = "parameter_222" + shape = [288] + dtype = "float32" + min_val = float("0.0391105") + max_val = float("0.393912") + mean = float("0.120364") + std = float("0.0423387") + data = None + + +class Program_weight_tensor_parameter_223: + name = "parameter_223" + shape = [288] + dtype = "float32" + min_val = float("-1.59927") + max_val = float("1.69301") + mean = float("-0.0713779") + std = float("0.48722") + data = None + + +class Program_weight_tensor_parameter_224: + name = "parameter_224" + shape = [288, 1152, 1, 1] + dtype = "float32" + min_val = float("-0.0737986") + max_val = float("0.0688939") + mean = float("4.73174e-05") + std = float("0.00374366") + data = None + + +class Program_weight_tensor_parameter_225: + name = "parameter_225" + shape = [288] + dtype = "float32" + min_val = float("-0.103986") + max_val = float("0.0383983") + mean = float("-0.00708182") + std = float("0.0181047") + data = None + + +class Program_weight_tensor_parameter_226: + name = "parameter_226" + shape = [288] + dtype = "float32" + min_val = float("0.89851") + max_val = float("1.15678") + mean = float("0.961384") + std = float("0.026265") + data = None + + +class Program_weight_tensor_parameter_227: + name = "parameter_227" + shape = [288] + dtype = "float32" + min_val = float("0.00292128") + max_val = float("0.0109473") + mean = float("0.00493734") + std = float("0.00120036") + data = None + + +class Program_weight_tensor_parameter_228: + name = "parameter_228" + shape = [288] + dtype = "float32" + min_val = float("-0.0974484") + max_val = float("0.0769811") + mean = float("-0.0477963") + std = float("0.0234812") + data = None + + +class Program_weight_tensor_parameter_229: + name = "parameter_229" + shape = [288, 288, 1, 1] + dtype = "float32" + min_val = float("-0.030412") + max_val = float("0.0398927") + mean = float("-0.000811791") + std = float("0.00405681") + data = None + + +class Program_weight_tensor_parameter_230: + name = "parameter_230" + shape = [288] + dtype = "float32" + min_val = float("-0.103986") + max_val = float("0.0383983") + mean = float("-0.00708182") + std = float("0.0181047") + data = None + + +class Program_weight_tensor_parameter_231: + name = "parameter_231" + shape = [288] + dtype = "float32" + min_val = float("0.923547") + max_val = float("1.3082") + mean = float("1.02502") + std = float("0.0618049") + data = None + + +class Program_weight_tensor_parameter_232: + name = "parameter_232" + shape = [288] + dtype = "float32" + min_val = float("0.00839344") + max_val = float("0.0474496") + mean = float("0.0212471") + std = float("0.00636327") + data = None + + +class Program_weight_tensor_parameter_233: + name = "parameter_233" + shape = [288] + dtype = "float32" + min_val = float("-0.229459") + max_val = float("0.103812") + mean = float("-0.107552") + std = float("0.0460312") + data = None + + +class Program_weight_tensor_parameter_234: + name = "parameter_234" + shape = [288, 288, 3, 3] + dtype = "float32" + min_val = float("-0.0336992") + max_val = float("0.0421067") + mean = float("-0.000230375") + std = float("0.00195064") + data = None + + +class Program_weight_tensor_parameter_235: + name = "parameter_235" + shape = [288] + dtype = "float32" + min_val = float("-0.223137") + max_val = float("0.0285677") + mean = float("-0.0470978") + std = float("0.0306722") + data = None + + +class Program_weight_tensor_parameter_236: + name = "parameter_236" + shape = [288] + dtype = "float32" + min_val = float("0.92772") + max_val = float("1.36008") + mean = float("1.05066") + std = float("0.0555744") + data = None + + +class Program_weight_tensor_parameter_237: + name = "parameter_237" + shape = [288] + dtype = "float32" + min_val = float("0.0125726") + max_val = float("0.0596855") + mean = float("0.0240369") + std = float("0.00548803") + data = None + + +class Program_weight_tensor_parameter_238: + name = "parameter_238" + shape = [288] + dtype = "float32" + min_val = float("-0.420535") + max_val = float("0.435379") + mean = float("-0.105324") + std = float("0.0745189") + data = None + + +class Program_weight_tensor_parameter_239: + name = "parameter_239" + shape = [288, 288, 3, 3] + dtype = "float32" + min_val = float("-0.0284336") + max_val = float("0.0366069") + mean = float("-0.000232251") + std = float("0.00238144") + data = None + + +class Program_weight_tensor_parameter_240: + name = "parameter_240" + shape = [288] + dtype = "float32" + min_val = float("-0.216098") + max_val = float("0.11883") + mean = float("-0.0630303") + std = float("0.051816") + data = None + + +class Program_weight_tensor_parameter_241: + name = "parameter_241" + shape = [288] + dtype = "float32" + min_val = float("0.985544") + max_val = float("1.24322") + mean = float("1.05359") + std = float("0.0334495") + data = None + + +class Program_weight_tensor_parameter_242: + name = "parameter_242" + shape = [288] + dtype = "float32" + min_val = float("0.0173404") + max_val = float("0.0690035") + mean = float("0.0265296") + std = float("0.00603239") + data = None + + +class Program_weight_tensor_parameter_243: + name = "parameter_243" + shape = [288] + dtype = "float32" + min_val = float("-0.175859") + max_val = float("0.18352") + mean = float("-0.0630552") + std = float("0.0449823") + data = None + + +class Program_weight_tensor_parameter_244: + name = "parameter_244" + shape = [288, 768, 1, 1] + dtype = "float32" + min_val = float("-0.0314934") + max_val = float("0.0604993") + mean = float("-0.000352562") + std = float("0.0041576") + data = None + + +class Program_weight_tensor_parameter_245: + name = "parameter_245" + shape = [288] + dtype = "float32" + min_val = float("-0.0811029") + max_val = float("0.0354142") + mean = float("-0.00932044") + std = float("0.0172845") + data = None + + +class Program_weight_tensor_parameter_246: + name = "parameter_246" + shape = [288] + dtype = "float32" + min_val = float("1.01192") + max_val = float("1.1389") + mean = float("1.07761") + std = float("0.0239546") + data = None + + +class Program_weight_tensor_parameter_247: + name = "parameter_247" + shape = [288] + dtype = "float32" + min_val = float("0.0179649") + max_val = float("0.0378785") + mean = float("0.0239671") + std = float("0.00298041") + data = None + + +class Program_weight_tensor_parameter_248: + name = "parameter_248" + shape = [288] + dtype = "float32" + min_val = float("-0.136712") + max_val = float("0.0275342") + mean = float("-0.0599637") + std = float("0.0260147") + data = None + + +class Program_weight_tensor_parameter_249: + name = "parameter_249" + shape = [288, 768, 1, 1] + dtype = "float32" + min_val = float("-0.0219609") + max_val = float("0.0394181") + mean = float("-0.000345307") + std = float("0.00411772") + data = None + + +class Program_weight_tensor_parameter_250: + name = "parameter_250" + shape = [768] + dtype = "float32" + min_val = float("-4.16215") + max_val = float("-0.102987") + mean = float("-2.2303") + std = float("0.545125") + data = None + + +class Program_weight_tensor_parameter_251: + name = "parameter_251" + shape = [768] + dtype = "float32" + min_val = float("1.67243") + max_val = float("4.69627") + mean = float("3.30597") + std = float("0.316598") + data = None + + +class Program_weight_tensor_parameter_252: + name = "parameter_252" + shape = [768] + dtype = "float32" + min_val = float("0.00334325") + max_val = float("0.0150318") + mean = float("0.0057207") + std = float("0.0013795") + data = None + + +class Program_weight_tensor_parameter_253: + name = "parameter_253" + shape = [768] + dtype = "float32" + min_val = float("-0.104136") + max_val = float("0.145484") + mean = float("-0.038804") + std = float("0.0225052") + data = None + + +class Program_weight_tensor_parameter_254: + name = "parameter_254" + shape = [768, 576, 1, 1] + dtype = "float32" + min_val = float("-0.0577984") + max_val = float("0.0708883") + mean = float("-0.000470629") + std = float("0.00452998") + data = None + + +class Program_weight_tensor_parameter_255: + name = "parameter_255" + shape = [576] + dtype = "float32" + min_val = float("-0.0193774") + max_val = float("0.00104216") + mean = float("-0.00108322") + std = float("0.00283016") + data = None + + +class Program_weight_tensor_parameter_256: + name = "parameter_256" + shape = [576, 576, 1, 1] + dtype = "float32" + min_val = float("-0.181513") + max_val = float("0.183256") + mean = float("-0.00033298") + std = float("0.00239475") + data = None + + +class Program_weight_tensor_parameter_257: + name = "parameter_257" + shape = [288] + dtype = "float32" + min_val = float("-2.05007") + max_val = float("0.975688") + mean = float("-0.271675") + std = float("0.352781") + data = None + + +class Program_weight_tensor_parameter_258: + name = "parameter_258" + shape = [288] + dtype = "float32" + min_val = float("0.134762") + max_val = float("2.1261") + mean = float("0.535896") + std = float("0.306173") + data = None + + +class Program_weight_tensor_parameter_259: + name = "parameter_259" + shape = [288] + dtype = "float32" + min_val = float("8.2363e-05") + max_val = float("0.00219491") + mean = float("0.000309341") + std = float("0.000233079") + data = None + + +class Program_weight_tensor_parameter_260: + name = "parameter_260" + shape = [288] + dtype = "float32" + min_val = float("-0.030111") + max_val = float("0.0566944") + mean = float("0.0182315") + std = float("0.0154086") + data = None + + +class Program_weight_tensor_parameter_261: + name = "parameter_261" + shape = [288, 288, 1, 1] + dtype = "float32" + min_val = float("-0.0361375") + max_val = float("0.0394624") + mean = float("-0.000415469") + std = float("0.00308512") + data = None + + +class Program_weight_tensor_parameter_262: + name = "parameter_262" + shape = [288] + dtype = "float32" + min_val = float("-2.05007") + max_val = float("0.975687") + mean = float("-0.271675") + std = float("0.352781") + data = None + + +class Program_weight_tensor_parameter_263: + name = "parameter_263" + shape = [288] + dtype = "float32" + min_val = float("0.484279") + max_val = float("2.77267") + mean = float("1.16085") + std = float("0.365954") + data = None + + +class Program_weight_tensor_parameter_264: + name = "parameter_264" + shape = [288] + dtype = "float32" + min_val = float("0.000848494") + max_val = float("0.00923929") + mean = float("0.00210399") + std = float("0.00104444") + data = None + + +class Program_weight_tensor_parameter_265: + name = "parameter_265" + shape = [288] + dtype = "float32" + min_val = float("-0.219087") + max_val = float("0.0986344") + mean = float("0.0209312") + std = float("0.0258247") + data = None + + +class Program_weight_tensor_parameter_266: + name = "parameter_266" + shape = [288, 288, 3, 3] + dtype = "float32" + min_val = float("-0.0292029") + max_val = float("0.0357589") + mean = float("-7.15803e-05") + std = float("0.00238931") + data = None + + +class Program_weight_tensor_parameter_267: + name = "parameter_267" + shape = [288] + dtype = "float32" + min_val = float("-3.04083") + max_val = float("0.900587") + mean = float("-1.62079") + std = float("0.520798") + data = None + + +class Program_weight_tensor_parameter_268: + name = "parameter_268" + shape = [288] + dtype = "float32" + min_val = float("0.411314") + max_val = float("1.85484") + mean = float("1.14027") + std = float("0.183152") + data = None + + +class Program_weight_tensor_parameter_269: + name = "parameter_269" + shape = [288] + dtype = "float32" + min_val = float("0.0252489") + max_val = float("0.18253") + mean = float("0.0494213") + std = float("0.0146748") + data = None + + +class Program_weight_tensor_parameter_270: + name = "parameter_270" + shape = [288] + dtype = "float32" + min_val = float("-1.07001") + max_val = float("0.355943") + mean = float("-0.158608") + std = float("0.114139") + data = None + + +class Program_weight_tensor_parameter_271: + name = "parameter_271" + shape = [288, 288, 3, 3] + dtype = "float32" + min_val = float("-0.0309852") + max_val = float("0.0582767") + mean = float("-0.000209758") + std = float("0.00298971") + data = None + + +class Program_weight_tensor_parameter_272: + name = "parameter_272" + shape = [288] + dtype = "float32" + min_val = float("-2.13711") + max_val = float("1.60076") + mean = float("-0.414977") + std = float("0.433246") + data = None + + +class Program_weight_tensor_parameter_273: + name = "parameter_273" + shape = [288] + dtype = "float32" + min_val = float("0.022627") + max_val = float("2.19672") + mean = float("0.42649") + std = float("0.285355") + data = None + + +class Program_weight_tensor_parameter_274: + name = "parameter_274" + shape = [288] + dtype = "float32" + min_val = float("1.88226e-05") + max_val = float("0.00268303") + mean = float("0.000480955") + std = float("0.000340505") + data = None + + +class Program_weight_tensor_parameter_275: + name = "parameter_275" + shape = [288] + dtype = "float32" + min_val = float("-0.0299058") + max_val = float("0.0725718") + mean = float("0.0237725") + std = float("0.0152738") + data = None + + +class Program_weight_tensor_parameter_276: + name = "parameter_276" + shape = [288, 288, 1, 1] + dtype = "float32" + min_val = float("-0.0252463") + max_val = float("0.0315433") + mean = float("-0.000586018") + std = float("0.00265428") + data = None + + +class Program_weight_tensor_parameter_277: + name = "parameter_277" + shape = [288] + dtype = "float32" + min_val = float("-2.13711") + max_val = float("1.60076") + mean = float("-0.414977") + std = float("0.433246") + data = None + + +class Program_weight_tensor_parameter_278: + name = "parameter_278" + shape = [288] + dtype = "float32" + min_val = float("0.468473") + max_val = float("2.4614") + mean = float("1.14055") + std = float("0.312897") + data = None + + +class Program_weight_tensor_parameter_279: + name = "parameter_279" + shape = [288] + dtype = "float32" + min_val = float("0.00165326") + max_val = float("0.0083321") + mean = float("0.00368068") + std = float("0.00118113") + data = None + + +class Program_weight_tensor_parameter_280: + name = "parameter_280" + shape = [288] + dtype = "float32" + min_val = float("-0.163598") + max_val = float("0.139773") + mean = float("0.0375055") + std = float("0.027891") + data = None + + +class Program_weight_tensor_parameter_281: + name = "parameter_281" + shape = [288, 288, 3, 3] + dtype = "float32" + min_val = float("-0.0299762") + max_val = float("0.0565302") + mean = float("-0.00011544") + std = float("0.00253383") + data = None + + +class Program_weight_tensor_parameter_282: + name = "parameter_282" + shape = [288] + dtype = "float32" + min_val = float("-2.63547") + max_val = float("0.379304") + mean = float("-1.38439") + std = float("0.384712") + data = None + + +class Program_weight_tensor_parameter_283: + name = "parameter_283" + shape = [288] + dtype = "float32" + min_val = float("0.556724") + max_val = float("1.70857") + mean = float("1.12356") + std = float("0.13898") + data = None + + +class Program_weight_tensor_parameter_284: + name = "parameter_284" + shape = [288] + dtype = "float32" + min_val = float("0.0161077") + max_val = float("0.0621791") + mean = float("0.0271423") + std = float("0.00800861") + data = None + + +class Program_weight_tensor_parameter_285: + name = "parameter_285" + shape = [288] + dtype = "float32" + min_val = float("-0.824244") + max_val = float("0.122359") + mean = float("-0.104615") + std = float("0.0753355") + data = None + + +class Program_weight_tensor_parameter_286: + name = "parameter_286" + shape = [288, 288, 3, 3] + dtype = "float32" + min_val = float("-0.0415185") + max_val = float("0.0561525") + mean = float("-0.000193528") + std = float("0.00279691") + data = None + + +class Program_weight_tensor_parameter_287: + name = "parameter_287" + shape = [288] + dtype = "float32" + min_val = float("-3.61908") + max_val = float("2.96334") + mean = float("-0.709588") + std = float("0.759529") + data = None + + +class Program_weight_tensor_parameter_288: + name = "parameter_288" + shape = [288] + dtype = "float32" + min_val = float("0.888572") + max_val = float("2.91565") + mean = float("1.63353") + std = float("0.310984") + data = None + + +class Program_weight_tensor_parameter_289: + name = "parameter_289" + shape = [288] + dtype = "float32" + min_val = float("0.00242173") + max_val = float("0.00861982") + mean = float("0.0042646") + std = float("0.00100104") + data = None + + +class Program_weight_tensor_parameter_290: + name = "parameter_290" + shape = [288] + dtype = "float32" + min_val = float("-0.179465") + max_val = float("0.114532") + mean = float("0.0476944") + std = float("0.0307821") + data = None + + +class Program_weight_tensor_parameter_291: + name = "parameter_291" + shape = [288, 576, 1, 1] + dtype = "float32" + min_val = float("-0.0931726") + max_val = float("0.0751882") + mean = float("-0.000785789") + std = float("0.00566247") + data = None + + +class Program_weight_tensor_parameter_292: + name = "parameter_292" + shape = [288] + dtype = "float32" + min_val = float("-2.85604") + max_val = float("0.478614") + mean = float("-0.680691") + std = float("0.575726") + data = None + + +class Program_weight_tensor_parameter_293: + name = "parameter_293" + shape = [288] + dtype = "float32" + min_val = float("0.973621") + max_val = float("3.51535") + mean = float("1.79775") + std = float("0.367279") + data = None + + +class Program_weight_tensor_parameter_294: + name = "parameter_294" + shape = [288] + dtype = "float32" + min_val = float("0.000865287") + max_val = float("0.00316432") + mean = float("0.00149136") + std = float("0.0003415") + data = None + + +class Program_weight_tensor_parameter_295: + name = "parameter_295" + shape = [288] + dtype = "float32" + min_val = float("-0.0344171") + max_val = float("0.0580723") + mean = float("0.0190986") + std = float("0.0150565") + data = None + + +class Program_weight_tensor_parameter_296: + name = "parameter_296" + shape = [288, 576, 1, 1] + dtype = "float32" + min_val = float("-0.0372277") + max_val = float("0.0839795") + mean = float("-0.000347443") + std = float("0.00428485") + data = None + + +class Program_weight_tensor_parameter_297: + name = "parameter_297" + shape = [576] + dtype = "float32" + min_val = float("-2.51505") + max_val = float("0.855887") + mean = float("-0.840214") + std = float("0.397648") + data = None + + +class Program_weight_tensor_parameter_298: + name = "parameter_298" + shape = [576] + dtype = "float32" + min_val = float("0.486873") + max_val = float("1.94844") + mean = float("0.893045") + std = float("0.178935") + data = None + + +class Program_weight_tensor_parameter_299: + name = "parameter_299" + shape = [576] + dtype = "float32" + min_val = float("0.00558801") + max_val = float("0.0475219") + mean = float("0.010594") + std = float("0.00320855") + data = None + + +class Program_weight_tensor_parameter_300: + name = "parameter_300" + shape = [576] + dtype = "float32" + min_val = float("-0.168528") + max_val = float("0.186722") + mean = float("0.0346847") + std = float("0.047648") + data = None + + +class Program_weight_tensor_parameter_301: + name = "parameter_301" + shape = [576, 384, 3, 3] + dtype = "float32" + min_val = float("-0.0259966") + max_val = float("0.0531089") + mean = float("-0.000103852") + std = float("0.0026927") + data = None + + +class Program_weight_tensor_parameter_302: + name = "parameter_302" + shape = [384] + dtype = "float32" + min_val = float("-2.61759") + max_val = float("1.27481") + mean = float("-1.09476") + std = float("0.545254") + data = None + + +class Program_weight_tensor_parameter_303: + name = "parameter_303" + shape = [384] + dtype = "float32" + min_val = float("0.407522") + max_val = float("1.55678") + mean = float("1.07226") + std = float("0.1548") + data = None + + +class Program_weight_tensor_parameter_304: + name = "parameter_304" + shape = [384] + dtype = "float32" + min_val = float("0.00156468") + max_val = float("0.00879687") + mean = float("0.00373744") + std = float("0.000985444") + data = None + + +class Program_weight_tensor_parameter_305: + name = "parameter_305" + shape = [384] + dtype = "float32" + min_val = float("-0.17477") + max_val = float("0.130171") + mean = float("-0.0408968") + std = float("0.0418347") + data = None + + +class Program_weight_tensor_parameter_306: + name = "parameter_306" + shape = [384, 288, 1, 1] + dtype = "float32" + min_val = float("-0.314334") + max_val = float("0.114485") + mean = float("-0.000692981") + std = float("0.00880167") + data = None + + +class Program_weight_tensor_parameter_307: + name = "parameter_307" + shape = [288] + dtype = "float32" + min_val = float("-0.0128831") + max_val = float("0.000944236") + mean = float("-0.00330884") + std = float("0.00268126") + data = None + + +class Program_weight_tensor_parameter_308: + name = "parameter_308" + shape = [288, 288, 1, 1] + dtype = "float32" + min_val = float("-0.298788") + max_val = float("0.214538") + mean = float("-0.0023525") + std = float("0.00622357") + data = None + + +class Program_weight_tensor_parameter_309: + name = "parameter_309" + shape = [144] + dtype = "float32" + min_val = float("-1.88584") + max_val = float("0.680942") + mean = float("-0.25001") + std = float("0.423566") + data = None + + +class Program_weight_tensor_parameter_310: + name = "parameter_310" + shape = [144] + dtype = "float32" + min_val = float("-3.38819e-06") + max_val = float("2.26826") + mean = float("0.457992") + std = float("0.464393") + data = None + + +class Program_weight_tensor_parameter_311: + name = "parameter_311" + shape = [144] + dtype = "float32" + min_val = float("4.68597e-12") + max_val = float("0.0014742") + mean = float("0.000373153") + std = float("0.000243895") + data = None + + +class Program_weight_tensor_parameter_312: + name = "parameter_312" + shape = [144] + dtype = "float32" + min_val = float("-0.0447123") + max_val = float("0.0332369") + mean = float("0.00410791") + std = float("0.0125988") + data = None + + +class Program_weight_tensor_parameter_313: + name = "parameter_313" + shape = [144, 144, 1, 1] + dtype = "float32" + min_val = float("-0.0347207") + max_val = float("0.0710957") + mean = float("-0.000373771") + std = float("0.00442868") + data = None + + +class Program_weight_tensor_parameter_314: + name = "parameter_314" + shape = [144] + dtype = "float32" + min_val = float("-1.88584") + max_val = float("0.680942") + mean = float("-0.25001") + std = float("0.423566") + data = None + + +class Program_weight_tensor_parameter_315: + name = "parameter_315" + shape = [144] + dtype = "float32" + min_val = float("0.417726") + max_val = float("3.72519") + mean = float("1.30585") + std = float("0.60647") + data = None + + +class Program_weight_tensor_parameter_316: + name = "parameter_316" + shape = [144] + dtype = "float32" + min_val = float("0.00121329") + max_val = float("0.00991923") + mean = float("0.00481243") + std = float("0.00164179") + data = None + + +class Program_weight_tensor_parameter_317: + name = "parameter_317" + shape = [144] + dtype = "float32" + min_val = float("-0.125801") + max_val = float("0.0996604") + mean = float("0.0236482") + std = float("0.034768") + data = None + + +class Program_weight_tensor_parameter_318: + name = "parameter_318" + shape = [144, 144, 3, 3] + dtype = "float32" + min_val = float("-0.0345242") + max_val = float("0.0469052") + mean = float("-0.000215196") + std = float("0.00418358") + data = None + + +class Program_weight_tensor_parameter_319: + name = "parameter_319" + shape = [144] + dtype = "float32" + min_val = float("-2.65693") + max_val = float("0.361398") + mean = float("-1.2627") + std = float("0.511504") + data = None + + +class Program_weight_tensor_parameter_320: + name = "parameter_320" + shape = [144] + dtype = "float32" + min_val = float("0.575176") + max_val = float("1.97228") + mean = float("1.17744") + std = float("0.186819") + data = None + + +class Program_weight_tensor_parameter_321: + name = "parameter_321" + shape = [144] + dtype = "float32" + min_val = float("0.0387542") + max_val = float("0.213018") + mean = float("0.0717756") + std = float("0.0240672") + data = None + + +class Program_weight_tensor_parameter_322: + name = "parameter_322" + shape = [144] + dtype = "float32" + min_val = float("-2.47583") + max_val = float("1.76755") + mean = float("-0.159923") + std = float("0.322012") + data = None + + +class Program_weight_tensor_parameter_323: + name = "parameter_323" + shape = [144, 144, 3, 3] + dtype = "float32" + min_val = float("-0.0496549") + max_val = float("0.0491722") + mean = float("-0.000280937") + std = float("0.00476909") + data = None + + +class Program_weight_tensor_parameter_324: + name = "parameter_324" + shape = [144] + dtype = "float32" + min_val = float("-1.73323") + max_val = float("0.691279") + mean = float("-0.184951") + std = float("0.412683") + data = None + + +class Program_weight_tensor_parameter_325: + name = "parameter_325" + shape = [144] + dtype = "float32" + min_val = float("0.000375565") + max_val = float("2.82265") + mean = float("0.327409") + std = float("0.355995") + data = None + + +class Program_weight_tensor_parameter_326: + name = "parameter_326" + shape = [144] + dtype = "float32" + min_val = float("2.41184e-08") + max_val = float("0.00489911") + mean = float("0.000485085") + std = float("0.000558486") + data = None + + +class Program_weight_tensor_parameter_327: + name = "parameter_327" + shape = [144] + dtype = "float32" + min_val = float("-0.0299872") + max_val = float("0.0403266") + mean = float("0.00806846") + std = float("0.0122646") + data = None + + +class Program_weight_tensor_parameter_328: + name = "parameter_328" + shape = [144, 144, 1, 1] + dtype = "float32" + min_val = float("-0.0510575") + max_val = float("0.0574652") + mean = float("-0.00049201") + std = float("0.00426793") + data = None + + +class Program_weight_tensor_parameter_329: + name = "parameter_329" + shape = [144] + dtype = "float32" + min_val = float("-1.73323") + max_val = float("0.691279") + mean = float("-0.184951") + std = float("0.412683") + data = None + + +class Program_weight_tensor_parameter_330: + name = "parameter_330" + shape = [144] + dtype = "float32" + min_val = float("0.374778") + max_val = float("3.02772") + mean = float("1.09097") + std = float("0.398768") + data = None + + +class Program_weight_tensor_parameter_331: + name = "parameter_331" + shape = [144] + dtype = "float32" + min_val = float("0.002096") + max_val = float("0.00999513") + mean = float("0.00547893") + std = float("0.00147556") + data = None + + +class Program_weight_tensor_parameter_332: + name = "parameter_332" + shape = [144] + dtype = "float32" + min_val = float("-0.0362089") + max_val = float("0.0739052") + mean = float("0.0248723") + std = float("0.0214283") + data = None + + +class Program_weight_tensor_parameter_333: + name = "parameter_333" + shape = [144, 144, 3, 3] + dtype = "float32" + min_val = float("-0.0386362") + max_val = float("0.0409861") + mean = float("-0.000193256") + std = float("0.00435136") + data = None + + +class Program_weight_tensor_parameter_334: + name = "parameter_334" + shape = [144] + dtype = "float32" + min_val = float("-3.02176") + max_val = float("0.118053") + mean = float("-1.27487") + std = float("0.572477") + data = None + + +class Program_weight_tensor_parameter_335: + name = "parameter_335" + shape = [144] + dtype = "float32" + min_val = float("0.674536") + max_val = float("1.94307") + mean = float("1.17468") + std = float("0.203345") + data = None + + +class Program_weight_tensor_parameter_336: + name = "parameter_336" + shape = [144] + dtype = "float32" + min_val = float("0.0213033") + max_val = float("0.0959501") + mean = float("0.0407162") + std = float("0.0106175") + data = None + + +class Program_weight_tensor_parameter_337: + name = "parameter_337" + shape = [144] + dtype = "float32" + min_val = float("-0.408281") + max_val = float("0.265723") + mean = float("-0.0454912") + std = float("0.131066") + data = None + + +class Program_weight_tensor_parameter_338: + name = "parameter_338" + shape = [144, 144, 3, 3] + dtype = "float32" + min_val = float("-0.0559826") + max_val = float("0.0749149") + mean = float("-0.00029916") + std = float("0.00485738") + data = None + + +class Program_weight_tensor_parameter_339: + name = "parameter_339" + shape = [144] + dtype = "float32" + min_val = float("-1.34813") + max_val = float("0.819893") + mean = float("-0.133414") + std = float("0.356979") + data = None + + +class Program_weight_tensor_parameter_340: + name = "parameter_340" + shape = [144] + dtype = "float32" + min_val = float("-1.04863e-08") + max_val = float("1.49061") + mean = float("0.184283") + std = float("0.153078") + data = None + + +class Program_weight_tensor_parameter_341: + name = "parameter_341" + shape = [144] + dtype = "float32" + min_val = float("4.51899e-17") + max_val = float("0.00236278") + mean = float("0.00025634") + std = float("0.000225224") + data = None + + +class Program_weight_tensor_parameter_342: + name = "parameter_342" + shape = [144] + dtype = "float32" + min_val = float("-0.046896") + max_val = float("0.0636725") + mean = float("0.00875622") + std = float("0.013008") + data = None + + +class Program_weight_tensor_parameter_343: + name = "parameter_343" + shape = [144, 144, 1, 1] + dtype = "float32" + min_val = float("-0.0354967") + max_val = float("0.0306929") + mean = float("-0.000398757") + std = float("0.00393071") + data = None + + +class Program_weight_tensor_parameter_344: + name = "parameter_344" + shape = [144] + dtype = "float32" + min_val = float("-1.34813") + max_val = float("0.819893") + mean = float("-0.133414") + std = float("0.356979") + data = None + + +class Program_weight_tensor_parameter_345: + name = "parameter_345" + shape = [144] + dtype = "float32" + min_val = float("0.304726") + max_val = float("1.84411") + mean = float("0.891657") + std = float("0.30034") + data = None + + +class Program_weight_tensor_parameter_346: + name = "parameter_346" + shape = [144] + dtype = "float32" + min_val = float("0.00215515") + max_val = float("0.0101051") + mean = float("0.00501806") + std = float("0.00141687") + data = None + + +class Program_weight_tensor_parameter_347: + name = "parameter_347" + shape = [144] + dtype = "float32" + min_val = float("-0.0276411") + max_val = float("0.111824") + mean = float("0.0395187") + std = float("0.0270464") + data = None + + +class Program_weight_tensor_parameter_348: + name = "parameter_348" + shape = [144, 144, 3, 3] + dtype = "float32" + min_val = float("-0.0391439") + max_val = float("0.0492761") + mean = float("-0.000231001") + std = float("0.00437446") + data = None + + +class Program_weight_tensor_parameter_349: + name = "parameter_349" + shape = [144] + dtype = "float32" + min_val = float("-2.72489") + max_val = float("0.0566489") + mean = float("-1.27061") + std = float("0.509992") + data = None + + +class Program_weight_tensor_parameter_350: + name = "parameter_350" + shape = [144] + dtype = "float32" + min_val = float("0.618913") + max_val = float("1.5544") + mean = float("1.10584") + std = float("0.164014") + data = None + + +class Program_weight_tensor_parameter_351: + name = "parameter_351" + shape = [144] + dtype = "float32" + min_val = float("0.0116758") + max_val = float("0.0377541") + mean = float("0.0231102") + std = float("0.00536137") + data = None + + +class Program_weight_tensor_parameter_352: + name = "parameter_352" + shape = [144] + dtype = "float32" + min_val = float("-0.553039") + max_val = float("0.193092") + mean = float("-0.0463312") + std = float("0.110957") + data = None + + +class Program_weight_tensor_parameter_353: + name = "parameter_353" + shape = [144, 144, 3, 3] + dtype = "float32" + min_val = float("-0.0544832") + max_val = float("0.0716752") + mean = float("-0.000309922") + std = float("0.00492764") + data = None + + +class Program_weight_tensor_parameter_354: + name = "parameter_354" + shape = [144] + dtype = "float32" + min_val = float("-1.91624") + max_val = float("0.64309") + mean = float("-0.124932") + std = float("0.357265") + data = None + + +class Program_weight_tensor_parameter_355: + name = "parameter_355" + shape = [144] + dtype = "float32" + min_val = float("6.92956e-11") + max_val = float("1.76213") + mean = float("0.238104") + std = float("0.270566") + data = None + + +class Program_weight_tensor_parameter_356: + name = "parameter_356" + shape = [144] + dtype = "float32" + min_val = float("3.35214e-19") + max_val = float("0.00952796") + mean = float("0.000696401") + std = float("0.00116309") + data = None + + +class Program_weight_tensor_parameter_357: + name = "parameter_357" + shape = [144] + dtype = "float32" + min_val = float("-0.0405747") + max_val = float("0.129474") + mean = float("0.0111479") + std = float("0.0223075") + data = None + + +class Program_weight_tensor_parameter_358: + name = "parameter_358" + shape = [144, 144, 1, 1] + dtype = "float32" + min_val = float("-0.0909429") + max_val = float("0.0542187") + mean = float("-0.000544528") + std = float("0.00556865") + data = None + + +class Program_weight_tensor_parameter_359: + name = "parameter_359" + shape = [144] + dtype = "float32" + min_val = float("-1.91624") + max_val = float("0.64309") + mean = float("-0.124932") + std = float("0.357265") + data = None + + +class Program_weight_tensor_parameter_360: + name = "parameter_360" + shape = [144] + dtype = "float32" + min_val = float("0.306862") + max_val = float("1.60924") + mean = float("0.739189") + std = float("0.255286") + data = None + + +class Program_weight_tensor_parameter_361: + name = "parameter_361" + shape = [144] + dtype = "float32" + min_val = float("0.00349496") + max_val = float("0.0164137") + mean = float("0.00848617") + std = float("0.00251853") + data = None + + +class Program_weight_tensor_parameter_362: + name = "parameter_362" + shape = [144] + dtype = "float32" + min_val = float("-0.0892949") + max_val = float("0.159054") + mean = float("0.0414926") + std = float("0.0442451") + data = None + + +class Program_weight_tensor_parameter_363: + name = "parameter_363" + shape = [144, 144, 3, 3] + dtype = "float32" + min_val = float("-0.0863245") + max_val = float("0.0691663") + mean = float("-0.000269533") + std = float("0.00432551") + data = None + + +class Program_weight_tensor_parameter_364: + name = "parameter_364" + shape = [144] + dtype = "float32" + min_val = float("-2.46021") + max_val = float("0.310727") + mean = float("-1.10998") + std = float("0.443475") + data = None + + +class Program_weight_tensor_parameter_365: + name = "parameter_365" + shape = [144] + dtype = "float32" + min_val = float("0.643666") + max_val = float("1.43636") + mean = float("1.10137") + std = float("0.148707") + data = None + + +class Program_weight_tensor_parameter_366: + name = "parameter_366" + shape = [144] + dtype = "float32" + min_val = float("0.00835284") + max_val = float("0.0428432") + mean = float("0.0185872") + std = float("0.00619535") + data = None + + +class Program_weight_tensor_parameter_367: + name = "parameter_367" + shape = [144] + dtype = "float32" + min_val = float("-0.401526") + max_val = float("0.17903") + mean = float("-0.0352466") + std = float("0.0899088") + data = None + + +class Program_weight_tensor_parameter_368: + name = "parameter_368" + shape = [144, 144, 3, 3] + dtype = "float32" + min_val = float("-0.125213") + max_val = float("0.13412") + mean = float("-0.000230254") + std = float("0.00506275") + data = None + + +class Program_weight_tensor_parameter_369: + name = "parameter_369" + shape = [144] + dtype = "float32" + min_val = float("-1.63308") + max_val = float("1.61461") + mean = float("0.00911913") + std = float("0.77167") + data = None + + +class Program_weight_tensor_parameter_370: + name = "parameter_370" + shape = [144] + dtype = "float32" + min_val = float("0.445442") + max_val = float("1.44232") + mean = float("0.78976") + std = float("0.199016") + data = None + + +class Program_weight_tensor_parameter_371: + name = "parameter_371" + shape = [144] + dtype = "float32" + min_val = float("0.00852235") + max_val = float("0.0484453") + mean = float("0.019143") + std = float("0.00781506") + data = None + + +class Program_weight_tensor_parameter_372: + name = "parameter_372" + shape = [144] + dtype = "float32" + min_val = float("-0.219843") + max_val = float("0.265749") + mean = float("-0.0338108") + std = float("0.0734836") + data = None + + +class Program_weight_tensor_parameter_373: + name = "parameter_373" + shape = [144, 288, 1, 1] + dtype = "float32" + min_val = float("-0.130866") + max_val = float("0.100965") + mean = float("-0.000735686") + std = float("0.00942121") + data = None + + +class Program_weight_tensor_parameter_374: + name = "parameter_374" + shape = [144] + dtype = "float32" + min_val = float("-3.93979") + max_val = float("1.48481") + mean = float("0.183797") + std = float("0.810382") + data = None + + +class Program_weight_tensor_parameter_375: + name = "parameter_375" + shape = [144] + dtype = "float32" + min_val = float("0.630551") + max_val = float("5.8003") + mean = float("1.61141") + std = float("1.07491") + data = None + + +class Program_weight_tensor_parameter_376: + name = "parameter_376" + shape = [144] + dtype = "float32" + min_val = float("0.00425413") + max_val = float("0.0518065") + mean = float("0.0126118") + std = float("0.00639388") + data = None + + +class Program_weight_tensor_parameter_377: + name = "parameter_377" + shape = [144] + dtype = "float32" + min_val = float("-0.165898") + max_val = float("0.120333") + mean = float("-0.0102457") + std = float("0.0633671") + data = None + + +class Program_weight_tensor_parameter_378: + name = "parameter_378" + shape = [144, 288, 1, 1] + dtype = "float32" + min_val = float("-0.0765962") + max_val = float("0.128155") + mean = float("-0.000375066") + std = float("0.00896105") + data = None + + +class Program_weight_tensor_parameter_379: + name = "parameter_379" + shape = [288] + dtype = "float32" + min_val = float("-3.31982") + max_val = float("1.67958") + mean = float("-0.217517") + std = float("0.687687") + data = None + + +class Program_weight_tensor_parameter_380: + name = "parameter_380" + shape = [288] + dtype = "float32" + min_val = float("0.643335") + max_val = float("3.55469") + mean = float("1.1155") + std = float("0.319082") + data = None + + +class Program_weight_tensor_parameter_381: + name = "parameter_381" + shape = [288] + dtype = "float32" + min_val = float("0.0059383") + max_val = float("0.0656923") + mean = float("0.0157728") + std = float("0.0081547") + data = None + + +class Program_weight_tensor_parameter_382: + name = "parameter_382" + shape = [288] + dtype = "float32" + min_val = float("-0.325535") + max_val = float("0.216548") + mean = float("0.0285558") + std = float("0.0857518") + data = None + + +class Program_weight_tensor_parameter_383: + name = "parameter_383" + shape = [288, 192, 3, 3] + dtype = "float32" + min_val = float("-0.0787451") + max_val = float("0.0724321") + mean = float("-0.000113514") + std = float("0.00476341") + data = None + + +class Program_weight_tensor_parameter_384: + name = "parameter_384" + shape = [192] + dtype = "float32" + min_val = float("-2.19434") + max_val = float("1.29449") + mean = float("-0.831981") + std = float("0.659789") + data = None + + +class Program_weight_tensor_parameter_385: + name = "parameter_385" + shape = [192] + dtype = "float32" + min_val = float("0.375517") + max_val = float("1.58122") + mean = float("0.952566") + std = float("0.217744") + data = None + + +class Program_weight_tensor_parameter_386: + name = "parameter_386" + shape = [192] + dtype = "float32" + min_val = float("0.00127096") + max_val = float("0.0109615") + mean = float("0.00338894") + std = float("0.00125674") + data = None + + +class Program_weight_tensor_parameter_387: + name = "parameter_387" + shape = [192] + dtype = "float32" + min_val = float("-0.303785") + max_val = float("0.270053") + mean = float("-0.0505746") + std = float("0.0847457") + data = None + + +class Program_weight_tensor_parameter_388: + name = "parameter_388" + shape = [192, 144, 1, 1] + dtype = "float32" + min_val = float("-0.14711") + max_val = float("0.125485") + mean = float("-0.00085414") + std = float("0.0147247") + data = None + + +class Program_weight_tensor_parameter_389: + name = "parameter_389" + shape = [144] + dtype = "float32" + min_val = float("-0.0128135") + max_val = float("0.00186763") + mean = float("-0.00515405") + std = float("0.00360318") + data = None + + +class Program_weight_tensor_parameter_390: + name = "parameter_390" + shape = [144, 144, 1, 1] + dtype = "float32" + min_val = float("-0.230754") + max_val = float("0.221493") + mean = float("-0.00425619") + std = float("0.0116581") + data = None + + +class Program_weight_tensor_parameter_391: + name = "parameter_391" + shape = [72] + dtype = "float32" + min_val = float("-1.59076") + max_val = float("0.878855") + mean = float("-0.104846") + std = float("0.483369") + data = None + + +class Program_weight_tensor_parameter_392: + name = "parameter_392" + shape = [72] + dtype = "float32" + min_val = float("0.0967285") + max_val = float("2.46975") + mean = float("0.436517") + std = float("0.391048") + data = None + + +class Program_weight_tensor_parameter_393: + name = "parameter_393" + shape = [72] + dtype = "float32" + min_val = float("0.00012673") + max_val = float("0.00214979") + mean = float("0.00068778") + std = float("0.000445108") + data = None + + +class Program_weight_tensor_parameter_394: + name = "parameter_394" + shape = [72] + dtype = "float32" + min_val = float("-0.0363738") + max_val = float("0.0317259") + mean = float("0.00016375") + std = float("0.0142081") + data = None + + +class Program_weight_tensor_parameter_395: + name = "parameter_395" + shape = [72, 72, 1, 1] + dtype = "float32" + min_val = float("-0.0557283") + max_val = float("0.0911011") + mean = float("-0.000460661") + std = float("0.00825811") + data = None + + +class Program_weight_tensor_parameter_396: + name = "parameter_396" + shape = [72] + dtype = "float32" + min_val = float("-1.59076") + max_val = float("0.878855") + mean = float("-0.104846") + std = float("0.483369") + data = None + + +class Program_weight_tensor_parameter_397: + name = "parameter_397" + shape = [72] + dtype = "float32" + min_val = float("0.350787") + max_val = float("4.87294") + mean = float("1.06553") + std = float("0.664298") + data = None + + +class Program_weight_tensor_parameter_398: + name = "parameter_398" + shape = [72] + dtype = "float32" + min_val = float("0.00164018") + max_val = float("0.0228997") + mean = float("0.00646402") + std = float("0.00332412") + data = None + + +class Program_weight_tensor_parameter_399: + name = "parameter_399" + shape = [72] + dtype = "float32" + min_val = float("-0.0790678") + max_val = float("0.114939") + mean = float("0.00645335") + std = float("0.0381131") + data = None + + +class Program_weight_tensor_parameter_400: + name = "parameter_400" + shape = [72, 72, 3, 3] + dtype = "float32" + min_val = float("-0.0822044") + max_val = float("0.108943") + mean = float("-0.000361009") + std = float("0.00693018") + data = None + + +class Program_weight_tensor_parameter_401: + name = "parameter_401" + shape = [72] + dtype = "float32" + min_val = float("-3.97079") + max_val = float("-0.178117") + mean = float("-1.14005") + std = float("0.5619") + data = None + + +class Program_weight_tensor_parameter_402: + name = "parameter_402" + shape = [72] + dtype = "float32" + min_val = float("0.759314") + max_val = float("2.01441") + mean = float("1.00457") + std = float("0.201055") + data = None + + +class Program_weight_tensor_parameter_403: + name = "parameter_403" + shape = [72] + dtype = "float32" + min_val = float("0.0230974") + max_val = float("0.206515") + mean = float("0.050033") + std = float("0.0283207") + data = None + + +class Program_weight_tensor_parameter_404: + name = "parameter_404" + shape = [72] + dtype = "float32" + min_val = float("-3.343") + max_val = float("0.634916") + mean = float("-0.171853") + std = float("0.439984") + data = None + + +class Program_weight_tensor_parameter_405: + name = "parameter_405" + shape = [72, 72, 3, 3] + dtype = "float32" + min_val = float("-0.0674171") + max_val = float("0.0896064") + mean = float("-0.000477673") + std = float("0.00804976") + data = None + + +class Program_weight_tensor_parameter_406: + name = "parameter_406" + shape = [72] + dtype = "float32" + min_val = float("-1.40656") + max_val = float("0.81421") + mean = float("-0.0673487") + std = float("0.399889") + data = None + + +class Program_weight_tensor_parameter_407: + name = "parameter_407" + shape = [72] + dtype = "float32" + min_val = float("0.0957317") + max_val = float("1.55261") + mean = float("0.345869") + std = float("0.248719") + data = None + + +class Program_weight_tensor_parameter_408: + name = "parameter_408" + shape = [72] + dtype = "float32" + min_val = float("0.00013382") + max_val = float("0.00322969") + mean = float("0.000662323") + std = float("0.000545394") + data = None + + +class Program_weight_tensor_parameter_409: + name = "parameter_409" + shape = [72] + dtype = "float32" + min_val = float("-0.0556241") + max_val = float("0.0709114") + mean = float("0.00866558") + std = float("0.0282055") + data = None + + +class Program_weight_tensor_parameter_410: + name = "parameter_410" + shape = [72, 72, 1, 1] + dtype = "float32" + min_val = float("-0.0592353") + max_val = float("0.0467115") + mean = float("-0.000599282") + std = float("0.00839905") + data = None + + +class Program_weight_tensor_parameter_411: + name = "parameter_411" + shape = [72] + dtype = "float32" + min_val = float("-1.40656") + max_val = float("0.81421") + mean = float("-0.0673487") + std = float("0.399889") + data = None + + +class Program_weight_tensor_parameter_412: + name = "parameter_412" + shape = [72] + dtype = "float32" + min_val = float("0.276959") + max_val = float("1.96811") + mean = float("0.859384") + std = float("0.349389") + data = None + + +class Program_weight_tensor_parameter_413: + name = "parameter_413" + shape = [72] + dtype = "float32" + min_val = float("0.00253484") + max_val = float("0.0149816") + mean = float("0.00546146") + std = float("0.00237279") + data = None + + +class Program_weight_tensor_parameter_414: + name = "parameter_414" + shape = [72] + dtype = "float32" + min_val = float("-0.213535") + max_val = float("0.178802") + mean = float("0.0213207") + std = float("0.0592638") + data = None + + +class Program_weight_tensor_parameter_415: + name = "parameter_415" + shape = [72, 72, 3, 3] + dtype = "float32" + min_val = float("-0.0446925") + max_val = float("0.0403311") + mean = float("-0.000398934") + std = float("0.00699352") + data = None + + +class Program_weight_tensor_parameter_416: + name = "parameter_416" + shape = [72] + dtype = "float32" + min_val = float("-2.73085") + max_val = float("1.9234") + mean = float("-1.1431") + std = float("0.581087") + data = None + + +class Program_weight_tensor_parameter_417: + name = "parameter_417" + shape = [72] + dtype = "float32" + min_val = float("0.269804") + max_val = float("1.83994") + mean = float("0.888989") + std = float("0.213116") + data = None + + +class Program_weight_tensor_parameter_418: + name = "parameter_418" + shape = [72] + dtype = "float32" + min_val = float("0.0118931") + max_val = float("0.0545938") + mean = float("0.022515") + std = float("0.00754715") + data = None + + +class Program_weight_tensor_parameter_419: + name = "parameter_419" + shape = [72] + dtype = "float32" + min_val = float("-0.576365") + max_val = float("0.510823") + mean = float("-0.0629126") + std = float("0.164233") + data = None + + +class Program_weight_tensor_parameter_420: + name = "parameter_420" + shape = [72, 72, 3, 3] + dtype = "float32" + min_val = float("-0.0666923") + max_val = float("0.0817573") + mean = float("-0.000500318") + std = float("0.00810628") + data = None + + +class Program_weight_tensor_parameter_421: + name = "parameter_421" + shape = [72] + dtype = "float32" + min_val = float("-1.42563") + max_val = float("0.649085") + mean = float("-0.0675341") + std = float("0.354801") + data = None + + +class Program_weight_tensor_parameter_422: + name = "parameter_422" + shape = [72] + dtype = "float32" + min_val = float("0.0693956") + max_val = float("1.95854") + mean = float("0.298075") + std = float("0.251204") + data = None + + +class Program_weight_tensor_parameter_423: + name = "parameter_423" + shape = [72] + dtype = "float32" + min_val = float("0.000177219") + max_val = float("0.00303454") + mean = float("0.000690537") + std = float("0.000424759") + data = None + + +class Program_weight_tensor_parameter_424: + name = "parameter_424" + shape = [72] + dtype = "float32" + min_val = float("-0.0832652") + max_val = float("0.069899") + mean = float("0.0141943") + std = float("0.0279763") + data = None + + +class Program_weight_tensor_parameter_425: + name = "parameter_425" + shape = [72, 72, 1, 1] + dtype = "float32" + min_val = float("-0.061438") + max_val = float("0.0553734") + mean = float("-0.00110888") + std = float("0.00912885") + data = None + + +class Program_weight_tensor_parameter_426: + name = "parameter_426" + shape = [72] + dtype = "float32" + min_val = float("-1.42563") + max_val = float("0.649085") + mean = float("-0.0675341") + std = float("0.354801") + data = None + + +class Program_weight_tensor_parameter_427: + name = "parameter_427" + shape = [72] + dtype = "float32" + min_val = float("0.228345") + max_val = float("2.65097") + mean = float("0.653015") + std = float("0.341866") + data = None + + +class Program_weight_tensor_parameter_428: + name = "parameter_428" + shape = [72] + dtype = "float32" + min_val = float("0.00230007") + max_val = float("0.0125217") + mean = float("0.00553601") + std = float("0.00211423") + data = None + + +class Program_weight_tensor_parameter_429: + name = "parameter_429" + shape = [72] + dtype = "float32" + min_val = float("-0.0686155") + max_val = float("0.131431") + mean = float("0.0190546") + std = float("0.0473793") + data = None + + +class Program_weight_tensor_parameter_430: + name = "parameter_430" + shape = [72, 72, 3, 3] + dtype = "float32" + min_val = float("-0.0501459") + max_val = float("0.0384498") + mean = float("-0.000380851") + std = float("0.00698063") + data = None + + +class Program_weight_tensor_parameter_431: + name = "parameter_431" + shape = [72] + dtype = "float32" + min_val = float("-1.72526") + max_val = float("1.80894") + mean = float("-0.951717") + std = float("0.483412") + data = None + + +class Program_weight_tensor_parameter_432: + name = "parameter_432" + shape = [72] + dtype = "float32" + min_val = float("0.29065") + max_val = float("1.66707") + mean = float("0.888159") + std = float("0.154606") + data = None + + +class Program_weight_tensor_parameter_433: + name = "parameter_433" + shape = [72] + dtype = "float32" + min_val = float("0.00680219") + max_val = float("0.0331497") + mean = float("0.0152776") + std = float("0.0049061") + data = None + + +class Program_weight_tensor_parameter_434: + name = "parameter_434" + shape = [72] + dtype = "float32" + min_val = float("-0.368372") + max_val = float("0.243631") + mean = float("-0.0386086") + std = float("0.125246") + data = None + + +class Program_weight_tensor_parameter_435: + name = "parameter_435" + shape = [72, 72, 3, 3] + dtype = "float32" + min_val = float("-0.0881553") + max_val = float("0.0852597") + mean = float("-0.000433402") + std = float("0.00815191") + data = None + + +class Program_weight_tensor_parameter_436: + name = "parameter_436" + shape = [72] + dtype = "float32" + min_val = float("-0.719076") + max_val = float("0.537556") + mean = float("-0.0115955") + std = float("0.314259") + data = None + + +class Program_weight_tensor_parameter_437: + name = "parameter_437" + shape = [72] + dtype = "float32" + min_val = float("0.0628701") + max_val = float("1.13353") + mean = float("0.303389") + std = float("0.184722") + data = None + + +class Program_weight_tensor_parameter_438: + name = "parameter_438" + shape = [72] + dtype = "float32" + min_val = float("0.000532147") + max_val = float("0.0104203") + mean = float("0.00239251") + std = float("0.00169115") + data = None + + +class Program_weight_tensor_parameter_439: + name = "parameter_439" + shape = [72] + dtype = "float32" + min_val = float("-0.0217242") + max_val = float("0.0767047") + mean = float("0.00748877") + std = float("0.0188698") + data = None + + +class Program_weight_tensor_parameter_440: + name = "parameter_440" + shape = [72, 72, 1, 1] + dtype = "float32" + min_val = float("-0.108991") + max_val = float("0.0548162") + mean = float("-0.00156636") + std = float("0.0102978") + data = None + + +class Program_weight_tensor_parameter_441: + name = "parameter_441" + shape = [72] + dtype = "float32" + min_val = float("-0.719076") + max_val = float("0.537556") + mean = float("-0.0115955") + std = float("0.314259") + data = None + + +class Program_weight_tensor_parameter_442: + name = "parameter_442" + shape = [72] + dtype = "float32" + min_val = float("0.194893") + max_val = float("1.27601") + mean = float("0.579013") + std = float("0.249602") + data = None + + +class Program_weight_tensor_parameter_443: + name = "parameter_443" + shape = [72] + dtype = "float32" + min_val = float("0.00662561") + max_val = float("0.0356459") + mean = float("0.0155175") + std = float("0.00620865") + data = None + + +class Program_weight_tensor_parameter_444: + name = "parameter_444" + shape = [72] + dtype = "float32" + min_val = float("-0.179595") + max_val = float("0.123187") + mean = float("0.0184164") + std = float("0.0480194") + data = None + + +class Program_weight_tensor_parameter_445: + name = "parameter_445" + shape = [72, 72, 3, 3] + dtype = "float32" + min_val = float("-0.0488563") + max_val = float("0.0581945") + mean = float("-0.000496131") + std = float("0.00708543") + data = None + + +class Program_weight_tensor_parameter_446: + name = "parameter_446" + shape = [72] + dtype = "float32" + min_val = float("-2.82742") + max_val = float("0.703279") + mean = float("-0.730947") + std = float("0.559333") + data = None + + +class Program_weight_tensor_parameter_447: + name = "parameter_447" + shape = [72] + dtype = "float32" + min_val = float("0.469684") + max_val = float("3.04441") + mean = float("1.01309") + std = float("0.304524") + data = None + + +class Program_weight_tensor_parameter_448: + name = "parameter_448" + shape = [72] + dtype = "float32" + min_val = float("0.00354013") + max_val = float("0.0271231") + mean = float("0.010493") + std = float("0.00503339") + data = None + + +class Program_weight_tensor_parameter_449: + name = "parameter_449" + shape = [72] + dtype = "float32" + min_val = float("-0.31754") + max_val = float("0.401533") + mean = float("-0.0483198") + std = float("0.110786") + data = None + + +class Program_weight_tensor_parameter_450: + name = "parameter_450" + shape = [72, 72, 3, 3] + dtype = "float32" + min_val = float("-0.139962") + max_val = float("0.14872") + mean = float("-0.000234565") + std = float("0.00843117") + data = None + + +class Program_weight_tensor_parameter_451: + name = "parameter_451" + shape = [72] + dtype = "float32" + min_val = float("-3.7383") + max_val = float("1.99732") + mean = float("0.293809") + std = float("0.798316") + data = None + + +class Program_weight_tensor_parameter_452: + name = "parameter_452" + shape = [72] + dtype = "float32" + min_val = float("0.251004") + max_val = float("2.94545") + mean = float("0.50446") + std = float("0.339681") + data = None + + +class Program_weight_tensor_parameter_453: + name = "parameter_453" + shape = [72] + dtype = "float32" + min_val = float("0.00712893") + max_val = float("0.045588") + mean = float("0.0159536") + std = float("0.00756811") + data = None + + +class Program_weight_tensor_parameter_454: + name = "parameter_454" + shape = [72] + dtype = "float32" + min_val = float("-0.351478") + max_val = float("0.326335") + mean = float("-0.0335179") + std = float("0.111688") + data = None + + +class Program_weight_tensor_parameter_455: + name = "parameter_455" + shape = [72, 144, 1, 1] + dtype = "float32" + min_val = float("-0.139796") + max_val = float("0.0940396") + mean = float("-0.00108851") + std = float("0.0155806") + data = None + + +class Program_weight_tensor_parameter_456: + name = "parameter_456" + shape = [72] + dtype = "float32" + min_val = float("-5.39255") + max_val = float("2.18957") + mean = float("0.478962") + std = float("1.18125") + data = None + + +class Program_weight_tensor_parameter_457: + name = "parameter_457" + shape = [72] + dtype = "float32" + min_val = float("0.406939") + max_val = float("7.0927") + mean = float("1.67928") + std = float("1.34144") + data = None + + +class Program_weight_tensor_parameter_458: + name = "parameter_458" + shape = [72] + dtype = "float32" + min_val = float("0.00300917") + max_val = float("0.0754504") + mean = float("0.0135636") + std = float("0.0119278") + data = None + + +class Program_weight_tensor_parameter_459: + name = "parameter_459" + shape = [72] + dtype = "float32" + min_val = float("-0.258965") + max_val = float("0.25035") + mean = float("0.00961389") + std = float("0.122304") + data = None + + +class Program_weight_tensor_parameter_460: + name = "parameter_460" + shape = [72, 144, 1, 1] + dtype = "float32" + min_val = float("-0.156371") + max_val = float("0.189106") + mean = float("-0.00016702") + std = float("0.0152465") + data = None + + +class Program_weight_tensor_parameter_461: + name = "parameter_461" + shape = [144] + dtype = "float32" + min_val = float("-2.47213") + max_val = float("2.77854") + mean = float("-0.025543") + std = float("0.853958") + data = None + + +class Program_weight_tensor_parameter_462: + name = "parameter_462" + shape = [144] + dtype = "float32" + min_val = float("0.47482") + max_val = float("3.78003") + mean = float("0.945869") + std = float("0.381916") + data = None + + +class Program_weight_tensor_parameter_463: + name = "parameter_463" + shape = [144] + dtype = "float32" + min_val = float("0.00494874") + max_val = float("0.221043") + mean = float("0.0273727") + std = float("0.02703") + data = None + + +class Program_weight_tensor_parameter_464: + name = "parameter_464" + shape = [144] + dtype = "float32" + min_val = float("-0.378532") + max_val = float("0.418616") + mean = float("-0.0428526") + std = float("0.108932") + data = None + + +class Program_weight_tensor_parameter_465: + name = "parameter_465" + shape = [144, 96, 3, 3] + dtype = "float32" + min_val = float("-0.107428") + max_val = float("0.0968212") + mean = float("-0.000287487") + std = float("0.00779752") + data = None + + +class Program_weight_tensor_parameter_466: + name = "parameter_466" + shape = [96] + dtype = "float32" + min_val = float("-2.31237") + max_val = float("1.28256") + mean = float("-0.534955") + std = float("0.66673") + data = None + + +class Program_weight_tensor_parameter_467: + name = "parameter_467" + shape = [96] + dtype = "float32" + min_val = float("0.362518") + max_val = float("2.86915") + mean = float("0.943309") + std = float("0.309066") + data = None + + +class Program_weight_tensor_parameter_468: + name = "parameter_468" + shape = [96] + dtype = "float32" + min_val = float("0.000454042") + max_val = float("0.00660616") + mean = float("0.00195527") + std = float("0.00106373") + data = None + + +class Program_weight_tensor_parameter_469: + name = "parameter_469" + shape = [96] + dtype = "float32" + min_val = float("-0.275751") + max_val = float("0.292198") + mean = float("0.0319012") + std = float("0.082052") + data = None + + +class Program_weight_tensor_parameter_470: + name = "parameter_470" + shape = [96, 72, 1, 1] + dtype = "float32" + min_val = float("-0.219741") + max_val = float("0.154344") + mean = float("-0.000672481") + std = float("0.0231241") + data = None + + +class Program_weight_tensor_parameter_471: + name = "parameter_471" + shape = [72] + dtype = "float32" + min_val = float("-0.0193754") + max_val = float("-0.00184666") + mean = float("-0.00902615") + std = float("0.00474913") + data = None + + +class Program_weight_tensor_parameter_472: + name = "parameter_472" + shape = [72, 72, 1, 1] + dtype = "float32" + min_val = float("-0.364431") + max_val = float("0.215412") + mean = float("-0.0105578") + std = float("0.0212144") + data = None + + +class Program_weight_tensor_parameter_473: + name = "parameter_473" + shape = [36] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_474: + name = "parameter_474" + shape = [36] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_475: + name = "parameter_475" + shape = [36] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_476: + name = "parameter_476" + shape = [36] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_477: + name = "parameter_477" + shape = [36, 36, 1, 1] + dtype = "float32" + min_val = float("-0.138814") + max_val = float("0.0708625") + mean = float("-0.00122225") + std = float("0.0154944") + data = None + + +class Program_weight_tensor_parameter_478: + name = "parameter_478" + shape = [36] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_479: + name = "parameter_479" + shape = [36] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_480: + name = "parameter_480" + shape = [36] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_481: + name = "parameter_481" + shape = [36] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_482: + name = "parameter_482" + shape = [36, 36, 3, 3] + dtype = "float32" + min_val = float("-0.086202") + max_val = float("0.0713741") + mean = float("-0.000367576") + std = float("0.0121533") + data = None + + +class Program_weight_tensor_parameter_483: + name = "parameter_483" + shape = [36] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_484: + name = "parameter_484" + shape = [36] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_485: + name = "parameter_485" + shape = [36] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_486: + name = "parameter_486" + shape = [36] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_487: + name = "parameter_487" + shape = [36, 36, 3, 3] + dtype = "float32" + min_val = float("-0.139611") + max_val = float("0.125855") + mean = float("-0.0003564") + std = float("0.0135025") + data = None + + +class Program_weight_tensor_parameter_488: + name = "parameter_488" + shape = [36] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_489: + name = "parameter_489" + shape = [36] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_490: + name = "parameter_490" + shape = [36] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_491: + name = "parameter_491" + shape = [36] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_492: + name = "parameter_492" + shape = [36, 36, 1, 1] + dtype = "float32" + min_val = float("-0.100885") + max_val = float("0.0787214") + mean = float("-0.00143063") + std = float("0.0190849") + data = None + + +class Program_weight_tensor_parameter_493: + name = "parameter_493" + shape = [36] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_494: + name = "parameter_494" + shape = [36] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_495: + name = "parameter_495" + shape = [36] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_496: + name = "parameter_496" + shape = [36] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_497: + name = "parameter_497" + shape = [36, 36, 3, 3] + dtype = "float32" + min_val = float("-0.0979402") + max_val = float("0.0724624") + mean = float("-0.000891189") + std = float("0.0127835") + data = None + + +class Program_weight_tensor_parameter_498: + name = "parameter_498" + shape = [36] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_499: + name = "parameter_499" + shape = [36] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_500: + name = "parameter_500" + shape = [36] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_501: + name = "parameter_501" + shape = [36] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_502: + name = "parameter_502" + shape = [36, 36, 3, 3] + dtype = "float32" + min_val = float("-0.138262") + max_val = float("0.123915") + mean = float("-0.000370891") + std = float("0.0148287") + data = None + + +class Program_weight_tensor_parameter_503: + name = "parameter_503" + shape = [36] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_504: + name = "parameter_504" + shape = [36] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_505: + name = "parameter_505" + shape = [36] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_506: + name = "parameter_506" + shape = [36] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_507: + name = "parameter_507" + shape = [36, 72, 1, 1] + dtype = "float32" + min_val = float("-0.174102") + max_val = float("0.146045") + mean = float("-0.00228237") + std = float("0.0249029") + data = None + + +class Program_weight_tensor_parameter_508: + name = "parameter_508" + shape = [36] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_509: + name = "parameter_509" + shape = [36] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_510: + name = "parameter_510" + shape = [36] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_511: + name = "parameter_511" + shape = [36] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_512: + name = "parameter_512" + shape = [36, 72, 1, 1] + dtype = "float32" + min_val = float("-0.141395") + max_val = float("0.14269") + mean = float("-0.000660208") + std = float("0.0237923") + data = None + + +class Program_weight_tensor_parameter_513: + name = "parameter_513" + shape = [72] + dtype = "float32" + min_val = float("-1.42265") + max_val = float("3.22325") + mean = float("0.693525") + std = float("1.23103") + data = None + + +class Program_weight_tensor_parameter_514: + name = "parameter_514" + shape = [72] + dtype = "float32" + min_val = float("1.06495") + max_val = float("4.15261") + mean = float("1.97902") + std = float("0.771541") + data = None + + +class Program_weight_tensor_parameter_515: + name = "parameter_515" + shape = [72] + dtype = "float32" + min_val = float("0.510513") + max_val = float("18.6427") + mean = float("2.31655") + std = float("2.26398") + data = None + + +class Program_weight_tensor_parameter_516: + name = "parameter_516" + shape = [72] + dtype = "float32" + min_val = float("-1.95175") + max_val = float("3.10183") + mean = float("-0.197797") + std = float("0.875803") + data = None + + +class Program_weight_tensor_parameter_517: + name = "parameter_517" + shape = [72, 48, 3, 3] + dtype = "float32" + min_val = float("-0.0878214") + max_val = float("0.13242") + mean = float("-0.000245829") + std = float("0.0130596") + data = None + + +class Program_weight_tensor_parameter_518: + name = "parameter_518" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_519: + name = "parameter_519" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_520: + name = "parameter_520" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_521: + name = "parameter_521" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_522: + name = "parameter_522" + shape = [48, 24, 3, 3] + dtype = "float32" + min_val = float("-0.189958") + max_val = float("0.140864") + mean = float("-0.00037868") + std = float("0.021555") + data = None + + +class Program_weight_tensor_parameter_523: + name = "parameter_523" + shape = [24] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_524: + name = "parameter_524" + shape = [24] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_525: + name = "parameter_525" + shape = [24] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_526: + name = "parameter_526" + shape = [24] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_527: + name = "parameter_527" + shape = [24, 24, 3, 3] + dtype = "float32" + min_val = float("-0.25864") + max_val = float("0.268717") + mean = float("-0.000404368") + std = float("0.0301086") + data = None + + +class Program_weight_tensor_parameter_528: + name = "parameter_528" + shape = [24] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_529: + name = "parameter_529" + shape = [24] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_530: + name = "parameter_530" + shape = [24] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_531: + name = "parameter_531" + shape = [24] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_532: + name = "parameter_532" + shape = [24, 3, 3, 3] + dtype = "float32" + min_val = float("-0.194937") + max_val = float("0.244468") + mean = float("-0.000195133") + std = float("0.0620297") + data = None diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus-M/subgraph_10/graph_hash.txt b/paddle_samples/PaddleX/PP-YOLOE_plus-M/subgraph_10/graph_hash.txt new file mode 100644 index 000000000..463698320 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE_plus-M/subgraph_10/graph_hash.txt @@ -0,0 +1 @@ +611aaf40802ae728109fb4c99495540bcf765813a1a25b37c13dde4b4b8683ee \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus-M/subgraph_10/graph_net.json b/paddle_samples/PaddleX/PP-YOLOE_plus-M/subgraph_10/graph_net.json new file mode 100644 index 000000000..1c6cb32da --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE_plus-M/subgraph_10/graph_net.json @@ -0,0 +1,6 @@ +{ + "framework": "paddle", + "model_name": "PP-YOLOE_plus-M", + "num_devices_required": 1, + "num_nodes_required": 1 +} \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus-M/subgraph_10/input_meta.py b/paddle_samples/PaddleX/PP-YOLOE_plus-M/subgraph_10/input_meta.py new file mode 100644 index 000000000..472007b37 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE_plus-M/subgraph_10/input_meta.py @@ -0,0 +1,38 @@ +class Program_weight_tensor_data_0: + name = "data_0" + shape = [2, 8400, 4] + dtype = "float32" + min_val = float("0.0746284") + max_val = float("15.1421") + mean = float("6.30923") + std = float("2.75071") + data = None + + +class Program_weight_tensor_data_1: + name = "data_1" + shape = [8400, 2] + dtype = "float32" + min_val = float("0.5") + max_val = float("79.5") + mean = float("34.7619") + std = float("22.9098") + data = None + + +class Program_weight_tensor_data_2: + name = "data_2" + shape = [8400, 1] + dtype = "float32" + min_val = float("8.0") + max_val = float("32.0") + mean = float("10.6667") + std = float("5.70157") + data = None + + +class Program_weight_tensor_data_3: + name = "data_3" + shape = [2, 2] + dtype = "float32" + data = [1.6, 2.397, 2.64463, 1.6] diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus-M/subgraph_10/model.py b/paddle_samples/PaddleX/PP-YOLOE_plus-M/subgraph_10/model.py new file mode 100644 index 000000000..561c0c35b --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE_plus-M/subgraph_10/model.py @@ -0,0 +1,94 @@ +import paddle + + +class GraphModule(paddle.nn.Layer): + def __init__(self): + super().__init__() + + def forward(self, data_0, data_1, data_2, data_3): + # pd_op.full: (1xi32) <- () + full_0 = paddle._C_ops.full( + [1], float("2"), paddle.int32, paddle.core.CPUPlace() + ) + + # pd_op.split_with_num: ([2x8400x2xf32, 2x8400x2xf32]) <- (2x8400x4xf32, 1xi32) + split_with_num_0 = paddle._C_ops.split_with_num(data_0, 2, full_0) + del data_0, full_0 + + # builtin.split: (2x8400x2xf32, 2x8400x2xf32) <- ([2x8400x2xf32, 2x8400x2xf32]) + ( + split_0, + split_1, + ) = split_with_num_0 + del split_with_num_0 + + # pd_op.full: (1xf32) <- () + full_1 = paddle._C_ops.full( + [1], float("-1"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.scale: (2x8400x2xf32) <- (2x8400x2xf32, 1xf32) + scale_0 = paddle._C_ops.scale(split_0, full_1, float("0"), True) + del full_1, split_0 + + # pd_op.add: (2x8400x2xf32) <- (2x8400x2xf32, 8400x2xf32) + add_0 = paddle._C_ops.add(scale_0, data_1) + del scale_0 + + # pd_op.add: (2x8400x2xf32) <- (2x8400x2xf32, 8400x2xf32) + add_1 = paddle._C_ops.add(split_1, data_1) + del data_1, split_1 + + # pd_op.full: (1xi32) <- () + full_2 = paddle._C_ops.full( + [1], float("-1"), paddle.int32, paddle.core.CPUPlace() + ) + + # builtin.combine: ([2x8400x2xf32, 2x8400x2xf32]) <- (2x8400x2xf32, 2x8400x2xf32) + combine_0 = [add_0, add_1] + del add_0, add_1 + + # pd_op.concat: (2x8400x4xf32) <- ([2x8400x2xf32, 2x8400x2xf32], 1xi32) + concat_0 = paddle._C_ops.concat(combine_0, full_2) + del combine_0 + + # pd_op.multiply: (2x8400x4xf32) <- (2x8400x4xf32, 8400x1xf32) + multiply_0 = paddle._C_ops.multiply(concat_0, data_2) + del concat_0, data_2 + + # pd_op.full: (1xi32) <- () + full_3 = paddle._C_ops.full( + [1], float("1"), paddle.int32, paddle.core.CPUPlace() + ) + + # pd_op.split_with_num: ([2x1xf32, 2x1xf32]) <- (2x2xf32, 1xi32) + split_with_num_1 = paddle._C_ops.split_with_num(data_3, 2, full_3) + del data_3, full_3 + + # builtin.split: (2x1xf32, 2x1xf32) <- ([2x1xf32, 2x1xf32]) + ( + split_2, + split_3, + ) = split_with_num_1 + del split_with_num_1 + + # builtin.combine: ([2x1xf32, 2x1xf32, 2x1xf32, 2x1xf32]) <- (2x1xf32, 2x1xf32, 2x1xf32, 2x1xf32) + combine_1 = [split_3, split_2, split_3, split_2] + del split_2, split_3 + + # pd_op.concat: (2x4xf32) <- ([2x1xf32, 2x1xf32, 2x1xf32, 2x1xf32], 1xi32) + concat_1 = paddle._C_ops.concat(combine_1, full_2) + del combine_1, full_2 + + # pd_op.full_int_array: (3xi64) <- () + full_int_array_0 = [-1, 1, 4] + + # pd_op.reshape: (2x1x4xf32) <- (2x4xf32, 3xi64) + reshape_0 = paddle._C_ops.reshape(concat_1, full_int_array_0) + del concat_1, full_int_array_0 + + # pd_op.divide: (2x8400x4xf32) <- (2x8400x4xf32, 2x1x4xf32) + divide_0 = paddle._C_ops.divide(multiply_0, reshape_0) + del multiply_0, reshape_0 + + return divide_0 diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus-M/subgraph_10/weight_meta.py b/paddle_samples/PaddleX/PP-YOLOE_plus-M/subgraph_10/weight_meta.py new file mode 100644 index 000000000..8b1378917 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE_plus-M/subgraph_10/weight_meta.py @@ -0,0 +1 @@ + diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus-M/subgraph_11/graph_hash.txt b/paddle_samples/PaddleX/PP-YOLOE_plus-M/subgraph_11/graph_hash.txt new file mode 100644 index 000000000..dd073cdbd --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE_plus-M/subgraph_11/graph_hash.txt @@ -0,0 +1 @@ +1579552a399f5b61d19d64b39a0d675865b1c5d0f8673f65021f53d8d7e39e2e \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus-M/subgraph_11/graph_net.json b/paddle_samples/PaddleX/PP-YOLOE_plus-M/subgraph_11/graph_net.json new file mode 100644 index 000000000..1c6cb32da --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE_plus-M/subgraph_11/graph_net.json @@ -0,0 +1,6 @@ +{ + "framework": "paddle", + "model_name": "PP-YOLOE_plus-M", + "num_devices_required": 1, + "num_nodes_required": 1 +} \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus-M/subgraph_11/input_meta.py b/paddle_samples/PaddleX/PP-YOLOE_plus-M/subgraph_11/input_meta.py new file mode 100644 index 000000000..880246b79 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE_plus-M/subgraph_11/input_meta.py @@ -0,0 +1,76 @@ +class Program_weight_tensor_data_0: + name = "data_0" + shape = [8, 1, 10164] + dtype = "float32" + max_val = float("1.0") + mean = float("0.000676407") + std = float("0.025999") + data = None + + +class Program_weight_tensor_data_1: + name = "data_1" + shape = [8, 1, 1] + dtype = "int32" + data = [0, 0, 0, 0, 0, 0, 0, 3] + + +class Program_weight_tensor_data_2: + name = "data_2" + shape = [8, 10164] + dtype = "float32" + max_val = float("1.0") + mean = float("0.000676407") + std = float("0.025999") + data = None + + +class Program_weight_tensor_data_3: + name = "data_3" + shape = [8, 1, 4] + dtype = "float32" + data = [ + 174.715, + 140.8, + 405.956, + 457.6, + 375.985, + 345.193, + 411.639, + 372.906, + 317.49, + 292.0, + 450.008, + 388.0, + 287.439, + 452.211, + 340.211, + 490.947, + 352.0, + 296.267, + 584.17, + 384.267, + 222.933, + 194.723, + 332.444, + 275.609, + 80.8974, + 117.694, + 116.531, + 143.688, + 124.847, + 201.813, + 433.498, + 633.6, + ] + + +class Program_weight_tensor_data_4: + name = "data_4" + shape = [8, 10164, 4] + dtype = "float32" + min_val = float("-271.994") + max_val = float("993.136") + mean = float("352.517") + std = float("213.87") + data = None diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus-M/subgraph_11/model.py b/paddle_samples/PaddleX/PP-YOLOE_plus-M/subgraph_11/model.py new file mode 100644 index 000000000..b0b1964b8 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE_plus-M/subgraph_11/model.py @@ -0,0 +1,287 @@ +import paddle + + +class GraphModule(paddle.nn.Layer): + def __init__(self): + super().__init__() + + def forward(self, data_0, data_1, data_2, data_3, data_4): + # pd_op.full: (1xi64) <- () + full_0 = paddle._C_ops.full( + [1], float("-2"), paddle.int64, paddle.core.CPUPlace() + ) + + # pd_op.argmax: (8x10164xi64) <- (8x1x10164xf32, 1xi64) + argmax_0 = paddle._C_ops.argmax(data_0, full_0, False, False, paddle.int64) + del full_0 + + # pd_op.full: (1xf64) <- () + full_1 = paddle._C_ops.full( + [1], float("0"), paddle.float64, paddle.core.CPUPlace() + ) + + # pd_op.full: (1xf64) <- () + full_2 = paddle._C_ops.full( + [1], float("8"), paddle.float64, paddle.core.CPUPlace() + ) + + # pd_op.full: (1xf64) <- () + full_3 = paddle._C_ops.full( + [1], float("1"), paddle.float64, paddle.core.CPUPlace() + ) + + # pd_op.arange: (8xi32) <- (1xf64, 1xf64, 1xf64) + arange_0 = paddle.arange(full_1, full_2, full_3, dtype="int32") + del full_1, full_2, full_3 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_0 = [-1] + + # pd_op.unsqueeze: (8x1xi32) <- (8xi32, 1xi64) + unsqueeze_0 = paddle._C_ops.unsqueeze(arange_0, full_int_array_0) + del arange_0 + + # pd_op.full: (1xf32) <- () + full_4 = paddle._C_ops.full( + [1], float("1"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.scale: (8x1xi32) <- (8x1xi32, 1xf32) + scale_0 = paddle._C_ops.scale(unsqueeze_0, full_4, float("0"), True) + del unsqueeze_0 + + # pd_op.cast: (8x1xi64) <- (8x1xi32) + cast_0 = paddle._C_ops.cast(scale_0, paddle.int64) + del scale_0 + + # pd_op.add: (8x10164xi64) <- (8x10164xi64, 8x1xi64) + add_0 = paddle._C_ops.add(argmax_0, cast_0) + del argmax_0, cast_0 + + # pd_op.flatten: (8xi32) <- (8x1x1xi32) + flatten_0 = paddle._C_ops.flatten(data_1, 0, 2) + del data_1 + + # pd_op.flatten: (81312xi64) <- (8x10164xi64) + flatten_1 = paddle._C_ops.flatten(add_0, 0, 1) + del add_0 + + # pd_op.full: (1xi32) <- () + full_5 = paddle._C_ops.full( + [1], float("0"), paddle.int32, paddle.core.CPUPlace() + ) + + # pd_op.gather: (81312xi32) <- (8xi32, 81312xi64, 1xi32) + gather_0 = paddle._C_ops.gather(flatten_0, flatten_1, full_5) + del flatten_0 + + # pd_op.full_int_array: (2xi64) <- () + full_int_array_1 = [8, 10164] + + # pd_op.reshape: (8x10164xi32) <- (81312xi32, 2xi64) + reshape_1 = paddle._C_ops.reshape(gather_0, full_int_array_1) + del full_int_array_1, gather_0 + + # pd_op.full: (xf32) <- () + full_6 = paddle._C_ops.full( + [], float("0"), paddle.float32, paddle.framework._current_expected_place() + ) + + # pd_op.greater_than: (8x10164xb) <- (8x10164xf32, xf32) + greater_than_0 = paddle._C_ops.greater_than(data_2, full_6) + del data_2, full_6 + + # pd_op.full: (1xf32) <- () + full_7 = paddle._C_ops.full( + [1], float("4"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.full_like: (8x10164xi32) <- (8x10164xi32, 1xf32) + full_like_0 = paddle._C_ops.full_like( + reshape_1, full_7, paddle.int32, paddle.framework._current_expected_place() + ) + del full_7 + + # pd_op.where: (8x10164xi32) <- (8x10164xb, 8x10164xi32, 8x10164xi32) + where_0 = paddle._C_ops.where(greater_than_0, reshape_1, full_like_0) + del full_like_0, greater_than_0, reshape_1 + + # pd_op.full_int_array: (2xi64) <- () + full_int_array_2 = [-1, 4] + + # pd_op.reshape: (8x4xf32) <- (8x1x4xf32, 2xi64) + reshape_2 = paddle._C_ops.reshape(data_3, full_int_array_2) + del full_int_array_2 + + # pd_op.gather: (81312x4xf32) <- (8x4xf32, 81312xi64, 1xi32) + gather_1 = paddle._C_ops.gather(reshape_2, flatten_1, full_5) + del flatten_1, full_5, reshape_2 + + # pd_op.full_int_array: (3xi64) <- () + full_int_array_3 = [8, 10164, 4] + + # pd_op.reshape: (8x10164x4xf32) <- (81312x4xf32, 3xi64) + reshape_0 = paddle._C_ops.reshape(gather_1, full_int_array_3) + del full_int_array_3, gather_1 + + # pd_op.full: (1xi32) <- () + full_8 = paddle._C_ops.full( + [1], float("5"), paddle.int32, paddle.core.CPUPlace() + ) + + # pd_op.one_hot: (8x10164x5xf32) <- (8x10164xi32, 1xi32) + one_hot_0 = paddle._C_ops.one_hot( + where_0 % paddle.cast(full_8, where_0.dtype), full_8 + ) + del full_8 + + # pd_op.full: (4xi64) <- () + full_9 = paddle._C_ops.full( + [4], float("0"), paddle.int64, paddle.framework._current_expected_place() + ) + + # pd_op.assign_value_: (4xi64) <- (4xi64) + assign_value__0 = paddle._C_ops.assign_value_( + full_9, + [4], + paddle.int64, + [float("0"), float("1"), float("2"), float("3")], + paddle.framework._current_expected_place(), + ) + del full_9 + + # pd_op.index_select: (8x10164x4xf32) <- (8x10164x5xf32, 4xi64) + index_select_0 = paddle._C_ops.index_select(one_hot_0, assign_value__0, -1) + del assign_value__0, one_hot_0 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_4 = [2] + + # pd_op.unsqueeze: (8x1x1x4xf32) <- (8x1x4xf32, 1xi64) + unsqueeze_1 = paddle._C_ops.unsqueeze(data_3, full_int_array_4) + del data_3 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_5 = [1] + + # pd_op.unsqueeze: (8x1x10164x4xf32) <- (8x10164x4xf32, 1xi64) + unsqueeze_2 = paddle._C_ops.unsqueeze(data_4, full_int_array_5) + del data_4, full_int_array_5 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_6 = [0] + + # pd_op.slice: (8x1x1x2xf32) <- (8x1x1x4xf32, 1xi64, 1xi64) + slice_0 = paddle._C_ops.slice( + unsqueeze_1, [3], full_int_array_6, full_int_array_4, [1], [] + ) + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_7 = [2147483647] + + # pd_op.slice: (8x1x1x2xf32) <- (8x1x1x4xf32, 1xi64, 1xi64) + slice_1 = paddle._C_ops.slice( + unsqueeze_1, [3], full_int_array_4, full_int_array_7, [1], [] + ) + del unsqueeze_1 + + # pd_op.slice: (8x1x10164x2xf32) <- (8x1x10164x4xf32, 1xi64, 1xi64) + slice_2 = paddle._C_ops.slice( + unsqueeze_2, [3], full_int_array_6, full_int_array_4, [1], [] + ) + del full_int_array_6 + + # pd_op.slice: (8x1x10164x2xf32) <- (8x1x10164x4xf32, 1xi64, 1xi64) + slice_3 = paddle._C_ops.slice( + unsqueeze_2, [3], full_int_array_4, full_int_array_7, [1], [] + ) + del full_int_array_4, full_int_array_7, unsqueeze_2 + + # pd_op.maximum: (8x1x10164x2xf32) <- (8x1x1x2xf32, 8x1x10164x2xf32) + maximum_0 = paddle._C_ops.maximum(slice_0, slice_2) + + # pd_op.minimum: (8x1x10164x2xf32) <- (8x1x1x2xf32, 8x1x10164x2xf32) + minimum_0 = paddle._C_ops.minimum(slice_1, slice_3) + + # pd_op.subtract: (8x1x10164x2xf32) <- (8x1x10164x2xf32, 8x1x10164x2xf32) + subtract_0 = paddle._C_ops.subtract(minimum_0, maximum_0) + del maximum_0, minimum_0 + + # pd_op.full: (1xf32) <- () + full_10 = paddle._C_ops.full( + [1], float("0"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.full: (1xf32) <- () + full_11 = paddle._C_ops.full( + [1], float("3.40282e+38"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.clip: (8x1x10164x2xf32) <- (8x1x10164x2xf32, 1xf32, 1xf32) + clip_0 = paddle._C_ops.clip(subtract_0, full_10, full_11) + del subtract_0 + + # pd_op.prod: (8x1x10164xf32) <- (8x1x10164x2xf32, 1xi64) + prod_0 = paddle._C_ops.prod(clip_0, full_int_array_0, False, False) + del clip_0 + + # pd_op.subtract: (8x1x1x2xf32) <- (8x1x1x2xf32, 8x1x1x2xf32) + subtract_1 = paddle._C_ops.subtract(slice_1, slice_0) + del slice_0, slice_1 + + # pd_op.clip: (8x1x1x2xf32) <- (8x1x1x2xf32, 1xf32, 1xf32) + clip_1 = paddle._C_ops.clip(subtract_1, full_10, full_11) + del subtract_1 + + # pd_op.prod: (8x1x1xf32) <- (8x1x1x2xf32, 1xi64) + prod_1 = paddle._C_ops.prod(clip_1, full_int_array_0, False, False) + del clip_1 + + # pd_op.subtract: (8x1x10164x2xf32) <- (8x1x10164x2xf32, 8x1x10164x2xf32) + subtract_2 = paddle._C_ops.subtract(slice_3, slice_2) + del slice_2, slice_3 + + # pd_op.clip: (8x1x10164x2xf32) <- (8x1x10164x2xf32, 1xf32, 1xf32) + clip_2 = paddle._C_ops.clip(subtract_2, full_10, full_11) + del full_10, full_11, subtract_2 + + # pd_op.prod: (8x1x10164xf32) <- (8x1x10164x2xf32, 1xi64) + prod_2 = paddle._C_ops.prod(clip_2, full_int_array_0, False, False) + del clip_2 + + # pd_op.add: (8x1x10164xf32) <- (8x1x1xf32, 8x1x10164xf32) + add_1 = paddle._C_ops.add(prod_1, prod_2) + del prod_1, prod_2 + + # pd_op.subtract: (8x1x10164xf32) <- (8x1x10164xf32, 8x1x10164xf32) + subtract_3 = paddle._C_ops.subtract(add_1, prod_0) + del add_1 + + # pd_op.scale: (8x1x10164xf32) <- (8x1x10164xf32, 1xf32) + scale_1 = paddle._C_ops.scale(subtract_3, full_4, float("1e-09"), True) + del full_4, subtract_3 + + # pd_op.divide: (8x1x10164xf32) <- (8x1x10164xf32, 8x1x10164xf32) + divide_0 = paddle._C_ops.divide(prod_0, scale_1) + del prod_0, scale_1 + + # pd_op.multiply: (8x1x10164xf32) <- (8x1x10164xf32, 8x1x10164xf32) + multiply_1 = paddle._C_ops.multiply(divide_0, data_0) + del data_0, divide_0 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_8 = [-2] + + # pd_op.max: (8x10164xf32) <- (8x1x10164xf32, 1xi64) + max_0 = paddle._C_ops.max(multiply_1, full_int_array_8, False) + del full_int_array_8, multiply_1 + + # pd_op.unsqueeze: (8x10164x1xf32) <- (8x10164xf32, 1xi64) + unsqueeze_3 = paddle._C_ops.unsqueeze(max_0, full_int_array_0) + del full_int_array_0, max_0 + + # pd_op.multiply: (8x10164x4xf32) <- (8x10164x4xf32, 8x10164x1xf32) + multiply_0 = paddle._C_ops.multiply(index_select_0, unsqueeze_3) + del index_select_0, unsqueeze_3, where_0 + + return reshape_0, multiply_0 diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus-M/subgraph_11/weight_meta.py b/paddle_samples/PaddleX/PP-YOLOE_plus-M/subgraph_11/weight_meta.py new file mode 100644 index 000000000..8b1378917 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE_plus-M/subgraph_11/weight_meta.py @@ -0,0 +1 @@ + diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus-M/subgraph_13/graph_hash.txt b/paddle_samples/PaddleX/PP-YOLOE_plus-M/subgraph_13/graph_hash.txt new file mode 100644 index 000000000..b08da1263 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE_plus-M/subgraph_13/graph_hash.txt @@ -0,0 +1 @@ +4d26d0110d82b3e8a1ea0c4059adeb25427870d4527791748b1197dfa32e25ef \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus-M/subgraph_13/graph_net.json b/paddle_samples/PaddleX/PP-YOLOE_plus-M/subgraph_13/graph_net.json new file mode 100644 index 000000000..1c6cb32da --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE_plus-M/subgraph_13/graph_net.json @@ -0,0 +1,6 @@ +{ + "framework": "paddle", + "model_name": "PP-YOLOE_plus-M", + "num_devices_required": 1, + "num_nodes_required": 1 +} \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus-M/subgraph_13/input_meta.py b/paddle_samples/PaddleX/PP-YOLOE_plus-M/subgraph_13/input_meta.py new file mode 100644 index 000000000..fb1f72140 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE_plus-M/subgraph_13/input_meta.py @@ -0,0 +1,19 @@ +class Program_weight_tensor_data_0: + name = "data_0" + shape = [] + dtype = "float32" + data = [0.136167] + + +class Program_weight_tensor_data_1: + name = "data_1" + shape = [] + dtype = "float32" + data = [0.78639] + + +class Program_weight_tensor_data_2: + name = "data_2" + shape = [] + dtype = "float32" + data = [3.99608] diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus-M/subgraph_13/model.py b/paddle_samples/PaddleX/PP-YOLOE_plus-M/subgraph_13/model.py new file mode 100644 index 000000000..4cccb2b8e --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE_plus-M/subgraph_13/model.py @@ -0,0 +1,43 @@ +import paddle + + +class GraphModule(paddle.nn.Layer): + def __init__(self): + super().__init__() + + def forward(self, data_0, data_1, data_2): + # pd_op.full: (1xf32) <- () + full_0 = paddle._C_ops.full( + [1], float("1"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.scale: (xf32) <- (xf32, 1xf32) + scale_0 = paddle._C_ops.scale(data_2, full_0, float("0"), True) + del data_2 + + # pd_op.full: (1xf32) <- () + full_1 = paddle._C_ops.full( + [1], float("2.5"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.scale: (xf32) <- (xf32, 1xf32) + scale_1 = paddle._C_ops.scale(data_0, full_1, float("0"), True) + del data_0 + + # pd_op.add: (xf32) <- (xf32, xf32) + add_1 = paddle._C_ops.add(scale_0, scale_1) + + # pd_op.full: (1xf32) <- () + full_2 = paddle._C_ops.full( + [1], float("0.5"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.scale: (xf32) <- (xf32, 1xf32) + scale_2 = paddle._C_ops.scale(data_1, full_2, float("0"), True) + del data_1 + + # pd_op.add: (xf32) <- (xf32, xf32) + add_0 = paddle._C_ops.add(add_1, scale_2) + del add_1, full_0, full_1, full_2, scale_0, scale_1, scale_2 + + return add_0 diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus-M/subgraph_13/weight_meta.py b/paddle_samples/PaddleX/PP-YOLOE_plus-M/subgraph_13/weight_meta.py new file mode 100644 index 000000000..8b1378917 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE_plus-M/subgraph_13/weight_meta.py @@ -0,0 +1 @@ + diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus-M/subgraph_14/graph_hash.txt b/paddle_samples/PaddleX/PP-YOLOE_plus-M/subgraph_14/graph_hash.txt new file mode 100644 index 000000000..64e121445 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE_plus-M/subgraph_14/graph_hash.txt @@ -0,0 +1 @@ +0621664d615e3c9d853112c02ae864f2d69542a45086eee97ddcd3e82bebddd6 \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus-M/subgraph_14/graph_net.json b/paddle_samples/PaddleX/PP-YOLOE_plus-M/subgraph_14/graph_net.json new file mode 100644 index 000000000..1c6cb32da --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE_plus-M/subgraph_14/graph_net.json @@ -0,0 +1,6 @@ +{ + "framework": "paddle", + "model_name": "PP-YOLOE_plus-M", + "num_devices_required": 1, + "num_nodes_required": 1 +} \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus-M/subgraph_14/input_meta.py b/paddle_samples/PaddleX/PP-YOLOE_plus-M/subgraph_14/input_meta.py new file mode 100644 index 000000000..1086ce1dd --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE_plus-M/subgraph_14/input_meta.py @@ -0,0 +1,85 @@ +class Program_weight_tensor_data_0: + name = "data_0" + shape = [8, 1, 10164] + dtype = "float32" + max_val = float("1.0") + mean = float("0.00265643") + std = float("0.0514721") + data = None + + +class Program_weight_tensor_data_1: + name = "data_1" + shape = [8, 1, 27] + dtype = "int64" + min_val = 47 + max_val = 7739 + data = None + + +class Program_weight_tensor_data_2: + name = "data_2" + shape = [8, 1, 10164] + dtype = "float32" + max_val = float("0.681174") + mean = float("0.00458845") + std = float("0.0264109") + data = None + + +class Program_weight_tensor_data_3: + name = "data_3" + shape = [10164, 2] + dtype = "float32" + min_val = float("4.0") + max_val = float("700.0") + mean = float("352.0") + std = float("203.197") + data = None + + +class Program_weight_tensor_data_4: + name = "data_4" + shape = [8, 1, 4] + dtype = "float32" + data = [ + 174.715, + 140.8, + 405.956, + 457.6, + 375.985, + 345.193, + 411.639, + 372.906, + 317.49, + 292.0, + 450.008, + 388.0, + 287.439, + 452.211, + 340.211, + 490.947, + 352.0, + 296.267, + 584.17, + 384.267, + 222.933, + 194.723, + 332.444, + 275.609, + 80.8974, + 117.694, + 116.531, + 143.688, + 124.847, + 201.813, + 433.498, + 633.6, + ] + + +class Program_weight_tensor_data_5: + name = "data_5" + shape = [8, 1, 1] + dtype = "float32" + data = [1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0] diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus-M/subgraph_14/model.py b/paddle_samples/PaddleX/PP-YOLOE_plus-M/subgraph_14/model.py new file mode 100644 index 000000000..51b356c21 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE_plus-M/subgraph_14/model.py @@ -0,0 +1,247 @@ +import paddle + + +class GraphModule(paddle.nn.Layer): + def __init__(self): + super().__init__() + + def forward(self, data_0, data_1, data_2, data_3, data_4, data_5): + # pd_op.multiply: (8x1x10164xf32) <- (8x1x10164xf32, 8x1x10164xf32) + multiply_0 = paddle._C_ops.multiply(data_2, data_0) + del data_2 + + # pd_op.flatten: (8x10164xf32) <- (8x1x10164xf32) + flatten_0 = paddle._C_ops.flatten(multiply_0, 0, 1) + + # pd_op.flatten: (8x27xi64) <- (8x1x27xi64) + flatten_1 = paddle._C_ops.flatten(data_1, 0, 1) + del data_1 + + # pd_op.index_sample: (8x27xf32) <- (8x10164xf32, 8x27xi64) + index_sample_0 = paddle._C_ops.index_sample(flatten_0, flatten_1) + del flatten_0, flatten_1 + + # pd_op.full_int_array: (3xi64) <- () + full_int_array_0 = [8, 1, -1] + + # pd_op.reshape: (8x1x27xf32) <- (8x27xf32, 3xi64) + reshape_0 = paddle._C_ops.reshape(index_sample_0, full_int_array_0) + del full_int_array_0, index_sample_0 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_1 = [-1] + + # pd_op.mean: (8x1x1xf32) <- (8x1x27xf32, 1xi64) + mean_0 = paddle._C_ops.mean(reshape_0, full_int_array_1, True) + + # pd_op.subtract: (8x1x27xf32) <- (8x1x27xf32, 8x1x1xf32) + subtract_0 = paddle._C_ops.subtract(reshape_0, mean_0) + + # pd_op.pow: (8x1x27xf32) <- (8x1x27xf32) + pow_0 = paddle._C_ops.pow(subtract_0, float("2")) + del subtract_0 + + # pd_op.sum: (8x1x1xf32) <- (8x1x27xf32, 1xi64) + sum_0 = paddle._C_ops.sum(pow_0, full_int_array_1, paddle.float32, True) + del pow_0 + + # pd_op.numel: (xi64) <- (8x1x27xf32) + numel_0 = paddle._C_ops.numel(reshape_0) + del reshape_0 + + # pd_op.cast: (xi64) <- (xi64) + cast_0 = paddle._C_ops.cast(numel_0, paddle.int64) + del numel_0 + + # pd_op.numel: (xi64) <- (8x1x1xf32) + numel_1 = paddle._C_ops.numel(sum_0) + + # pd_op.cast: (xi64) <- (xi64) + cast_1 = paddle._C_ops.cast(numel_1, paddle.int64) + del numel_1 + + # pd_op.cast: (xf32) <- (xi64) + cast_2 = paddle._C_ops.cast(cast_0, paddle.float32) + del cast_0 + + # pd_op.cast: (xf32) <- (xi64) + cast_3 = paddle._C_ops.cast(cast_1, paddle.float32) + del cast_1 + + # pd_op.divide: (xf32) <- (xf32, xf32) + divide_0 = paddle._C_ops.divide(cast_2, cast_3) + del cast_2, cast_3 + + # pd_op.full: (1xf32) <- () + full_0 = paddle._C_ops.full( + [1], float("1"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.scale: (xf32) <- (xf32, 1xf32) + scale_0 = paddle._C_ops.scale(divide_0, full_0, float("-1"), True) + del divide_0, full_0 + + # pd_op.full: (1xf32) <- () + full_1 = paddle._C_ops.full( + [1], float("0"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.full_like: (xf32) <- (xf32, 1xf32) + full_like_0 = paddle._C_ops.full_like( + scale_0, full_1, paddle.float32, paddle.framework._current_expected_place() + ) + + # pd_op.maximum: (xf32) <- (xf32, xf32) + maximum_0 = paddle._C_ops.maximum(scale_0, full_like_0) + del full_like_0, scale_0 + + # pd_op.divide: (8x1x1xf32) <- (8x1x1xf32, xf32) + divide_1 = paddle._C_ops.divide(sum_0, maximum_0) + del maximum_0, sum_0 + + # pd_op.sqrt: (8x1x1xf32) <- (8x1x1xf32) + sqrt_0 = paddle._C_ops.sqrt(divide_1) + del divide_1 + + # pd_op.add: (8x1x1xf32) <- (8x1x1xf32, 8x1x1xf32) + add_0 = paddle._C_ops.add(mean_0, sqrt_0) + del mean_0, sqrt_0 + + # pd_op.greater_than: (8x1x10164xb) <- (8x1x10164xf32, 8x1x1xf32) + greater_than_1 = paddle._C_ops.greater_than(multiply_0, add_0) + del add_0, multiply_0 + + # pd_op.full_like: (8x1x10164xf32) <- (8x1x10164xf32, 1xf32) + full_like_1 = paddle._C_ops.full_like( + data_0, full_1, paddle.float32, paddle.framework._current_expected_place() + ) + del full_1 + + # pd_op.where: (8x1x10164xf32) <- (8x1x10164xb, 8x1x10164xf32, 8x1x10164xf32) + where_0 = paddle._C_ops.where(greater_than_1, data_0, full_like_1) + del data_0, full_like_1, greater_than_1 + + # pd_op.full_int_array: (2xi64) <- () + full_int_array_2 = [0, 1] + + # pd_op.unsqueeze: (1x1x10164x2xf32) <- (10164x2xf32, 2xi64) + unsqueeze_0 = paddle._C_ops.unsqueeze(data_3, full_int_array_2) + del data_3, full_int_array_2 + + # pd_op.full: (1xi32) <- () + full_2 = paddle._C_ops.full( + [1], float("3"), paddle.int32, paddle.core.CPUPlace() + ) + + # pd_op.split_with_num: ([1x1x10164x1xf32, 1x1x10164x1xf32]) <- (1x1x10164x2xf32, 1xi32) + split_with_num_0 = paddle._C_ops.split_with_num(unsqueeze_0, 2, full_2) + del unsqueeze_0 + + # builtin.split: (1x1x10164x1xf32, 1x1x10164x1xf32) <- ([1x1x10164x1xf32, 1x1x10164x1xf32]) + ( + split_0, + split_1, + ) = split_with_num_0 + del split_with_num_0 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_3 = [2] + + # pd_op.unsqueeze: (8x1x1x4xf32) <- (8x1x4xf32, 1xi64) + unsqueeze_1 = paddle._C_ops.unsqueeze(data_4, full_int_array_3) + del data_4, full_int_array_3 + + # pd_op.split_with_num: ([8x1x1x1xf32, 8x1x1x1xf32, 8x1x1x1xf32, 8x1x1x1xf32]) <- (8x1x1x4xf32, 1xi32) + split_with_num_1 = paddle._C_ops.split_with_num(unsqueeze_1, 4, full_2) + del full_2, unsqueeze_1 + + # builtin.split: (8x1x1x1xf32, 8x1x1x1xf32, 8x1x1x1xf32, 8x1x1x1xf32) <- ([8x1x1x1xf32, 8x1x1x1xf32, 8x1x1x1xf32, 8x1x1x1xf32]) + ( + split_2, + split_3, + split_4, + split_5, + ) = split_with_num_1 + del split_with_num_1 + + # pd_op.subtract: (8x1x10164x1xf32) <- (1x1x10164x1xf32, 8x1x1x1xf32) + subtract_1 = paddle._C_ops.subtract(split_0, split_2) + del split_2 + + # pd_op.subtract: (8x1x10164x1xf32) <- (1x1x10164x1xf32, 8x1x1x1xf32) + subtract_2 = paddle._C_ops.subtract(split_1, split_3) + del split_3 + + # pd_op.subtract: (8x1x10164x1xf32) <- (8x1x1x1xf32, 1x1x10164x1xf32) + subtract_3 = paddle._C_ops.subtract(split_4, split_0) + del split_0, split_4 + + # pd_op.subtract: (8x1x10164x1xf32) <- (8x1x1x1xf32, 1x1x10164x1xf32) + subtract_4 = paddle._C_ops.subtract(split_5, split_1) + del split_1, split_5 + + # pd_op.full: (1xi32) <- () + full_3 = paddle._C_ops.full( + [1], float("-1"), paddle.int32, paddle.core.CPUPlace() + ) + + # builtin.combine: ([8x1x10164x1xf32, 8x1x10164x1xf32, 8x1x10164x1xf32, 8x1x10164x1xf32]) <- (8x1x10164x1xf32, 8x1x10164x1xf32, 8x1x10164x1xf32, 8x1x10164x1xf32) + combine_0 = [subtract_1, subtract_2, subtract_3, subtract_4] + del subtract_1, subtract_2, subtract_3, subtract_4 + + # pd_op.concat: (8x1x10164x4xf32) <- ([8x1x10164x1xf32, 8x1x10164x1xf32, 8x1x10164x1xf32, 8x1x10164x1xf32], 1xi32) + concat_0 = paddle._C_ops.concat(combine_0, full_3) + del combine_0, full_3 + + # pd_op.min: (8x1x10164xf32) <- (8x1x10164x4xf32, 1xi64) + min_0 = paddle._C_ops.min(concat_0, full_int_array_1, False) + del concat_0, full_int_array_1 + + # pd_op.full: (xf32) <- () + full_4 = paddle._C_ops.full( + [], + float("1e-09"), + paddle.float32, + paddle.framework._current_expected_place(), + ) + + # pd_op.greater_than: (8x1x10164xb) <- (8x1x10164xf32, xf32) + greater_than_2 = paddle._C_ops.greater_than(min_0, full_4) + del full_4, min_0 + + # pd_op.cast: (8x1x10164xf32) <- (8x1x10164xb) + cast_4 = paddle._C_ops.cast(greater_than_2, paddle.float32) + del greater_than_2 + + # pd_op.multiply: (8x1x10164xf32) <- (8x1x10164xf32, 8x1x10164xf32) + multiply_1 = paddle._C_ops.multiply(where_0, cast_4) + del cast_4, where_0 + + # pd_op.multiply: (8x1x10164xf32) <- (8x1x10164xf32, 8x1x1xf32) + multiply_2 = paddle._C_ops.multiply(multiply_1, data_5) + del data_5, multiply_1 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_4 = [-2] + + # pd_op.sum: (8x10164xf32) <- (8x1x10164xf32, 1xi64) + sum_1 = paddle._C_ops.sum(multiply_2, full_int_array_4, None, False) + del full_int_array_4 + + # pd_op.full_int_array: (0xi64) <- () + full_int_array_5 = [] + + # pd_op.max: (xf32) <- (8x10164xf32, 0xi64) + max_0 = paddle._C_ops.max(sum_1, full_int_array_5, False) + del full_int_array_5 + + # pd_op.full: (xf32) <- () + full_5 = paddle._C_ops.full( + [], float("1"), paddle.float32, paddle.framework._current_expected_place() + ) + + # pd_op.greater_than: (xb) <- (xf32, xf32) + greater_than_0 = paddle._C_ops.greater_than(max_0, full_5) + del full_5, max_0, multiply_2, sum_1 + + return greater_than_0 diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus-M/subgraph_14/weight_meta.py b/paddle_samples/PaddleX/PP-YOLOE_plus-M/subgraph_14/weight_meta.py new file mode 100644 index 000000000..8b1378917 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE_plus-M/subgraph_14/weight_meta.py @@ -0,0 +1 @@ + diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus-M/subgraph_15/graph_hash.txt b/paddle_samples/PaddleX/PP-YOLOE_plus-M/subgraph_15/graph_hash.txt new file mode 100644 index 000000000..c1f079b97 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE_plus-M/subgraph_15/graph_hash.txt @@ -0,0 +1 @@ +5b39a063673d2a842c85a42f33c248189fce8d0384b21578827b861bc8710041 \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus-M/subgraph_15/graph_net.json b/paddle_samples/PaddleX/PP-YOLOE_plus-M/subgraph_15/graph_net.json new file mode 100644 index 000000000..1c6cb32da --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE_plus-M/subgraph_15/graph_net.json @@ -0,0 +1,6 @@ +{ + "framework": "paddle", + "model_name": "PP-YOLOE_plus-M", + "num_devices_required": 1, + "num_nodes_required": 1 +} \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus-M/subgraph_15/input_meta.py b/paddle_samples/PaddleX/PP-YOLOE_plus-M/subgraph_15/input_meta.py new file mode 100644 index 000000000..2ac13c044 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE_plus-M/subgraph_15/input_meta.py @@ -0,0 +1,92 @@ +class Program_weight_tensor_data_0: + name = "data_0" + shape = [1] + dtype = "float32" + data = [1.00237] + + +class Program_weight_tensor_data_1: + name = "data_1" + shape = [1] + dtype = "float32" + data = [1.00237] + + +class Program_weight_tensor_data_2: + name = "data_2" + shape = [1] + dtype = "float32" + data = [1.00237] + + +class Program_weight_tensor_data_3: + name = "data_3" + shape = [1] + dtype = "float32" + data = [1.00237] + + +class Program_weight_tensor_data_4: + name = "data_4" + shape = [1] + dtype = "float32" + data = [1.00237] + + +class Program_weight_tensor_data_5: + name = "data_5" + shape = [1] + dtype = "float32" + data = [1.00237] + + +class Program_weight_tensor_data_6: + name = "data_6" + shape = [1] + dtype = "float32" + data = [1.00237] + + +class Program_weight_tensor_data_7: + name = "data_7" + shape = [1] + dtype = "float32" + data = [1.00237] + + +class Program_weight_tensor_data_8: + name = "data_8" + shape = [1] + dtype = "float32" + data = [1.00237] + + +class Program_weight_tensor_data_9: + name = "data_9" + shape = [1] + dtype = "float32" + data = [1.00237] + + +class Program_weight_tensor_data_10: + name = "data_10" + shape = [1] + dtype = "float32" + data = [1.00237] + + +class Program_weight_tensor_data_11: + name = "data_11" + shape = [1] + dtype = "float32" + data = [1.00237] + + +class Program_weight_tensor_data_12: + name = "data_12" + shape = [2, 3, 640, 640] + dtype = "float32" + max_val = float("1.0") + mean = float("0.471598") + std = float("0.270715") + data = None diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus-M/subgraph_15/model.py b/paddle_samples/PaddleX/PP-YOLOE_plus-M/subgraph_15/model.py new file mode 100644 index 000000000..530823f43 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE_plus-M/subgraph_15/model.py @@ -0,0 +1,5806 @@ +import paddle + + +class GraphModule(paddle.nn.Layer): + def __init__(self): + super().__init__() + + def forward( + self, + parameter_0, + parameter_1, + parameter_2, + parameter_3, + parameter_4, + parameter_5, + parameter_6, + parameter_7, + parameter_8, + parameter_9, + parameter_10, + parameter_11, + parameter_12, + parameter_13, + parameter_14, + parameter_15, + parameter_16, + parameter_17, + parameter_18, + parameter_19, + parameter_20, + parameter_21, + parameter_22, + parameter_23, + parameter_24, + parameter_25, + parameter_26, + parameter_27, + parameter_28, + parameter_29, + parameter_30, + parameter_31, + parameter_32, + parameter_33, + parameter_34, + parameter_35, + parameter_36, + parameter_37, + parameter_38, + parameter_39, + parameter_40, + parameter_41, + parameter_42, + parameter_43, + parameter_44, + parameter_45, + parameter_46, + parameter_47, + parameter_48, + parameter_49, + parameter_50, + parameter_51, + parameter_52, + parameter_53, + parameter_54, + parameter_55, + parameter_56, + parameter_57, + parameter_58, + parameter_59, + parameter_60, + parameter_61, + parameter_62, + parameter_63, + parameter_64, + parameter_65, + parameter_66, + parameter_67, + parameter_68, + parameter_69, + parameter_70, + parameter_71, + parameter_72, + parameter_73, + parameter_74, + parameter_75, + parameter_76, + parameter_77, + parameter_78, + parameter_79, + parameter_80, + parameter_81, + parameter_82, + parameter_83, + parameter_84, + parameter_85, + parameter_86, + parameter_87, + parameter_88, + parameter_89, + parameter_90, + parameter_91, + parameter_92, + parameter_93, + parameter_94, + parameter_95, + parameter_96, + parameter_97, + parameter_98, + parameter_99, + parameter_100, + parameter_101, + parameter_102, + parameter_103, + parameter_104, + parameter_105, + parameter_106, + parameter_107, + parameter_108, + parameter_109, + parameter_110, + parameter_111, + parameter_112, + parameter_113, + parameter_114, + parameter_115, + parameter_116, + parameter_117, + parameter_118, + parameter_119, + parameter_120, + parameter_121, + parameter_122, + parameter_123, + parameter_124, + parameter_125, + parameter_126, + parameter_127, + parameter_128, + parameter_129, + parameter_130, + parameter_131, + parameter_132, + parameter_133, + parameter_134, + parameter_135, + parameter_136, + parameter_137, + parameter_138, + parameter_139, + parameter_140, + parameter_141, + parameter_142, + parameter_143, + parameter_144, + parameter_145, + parameter_146, + parameter_147, + parameter_148, + parameter_149, + parameter_150, + parameter_151, + parameter_152, + parameter_153, + parameter_154, + parameter_155, + parameter_156, + parameter_157, + parameter_158, + parameter_159, + parameter_160, + parameter_161, + parameter_162, + parameter_163, + parameter_164, + parameter_165, + parameter_166, + parameter_167, + parameter_168, + parameter_169, + parameter_170, + parameter_171, + parameter_172, + parameter_173, + parameter_174, + parameter_175, + parameter_176, + parameter_177, + parameter_178, + parameter_179, + parameter_180, + parameter_181, + parameter_182, + parameter_183, + parameter_184, + parameter_185, + parameter_186, + parameter_187, + parameter_188, + parameter_189, + parameter_190, + parameter_191, + parameter_192, + parameter_193, + parameter_194, + parameter_195, + parameter_196, + parameter_197, + parameter_198, + parameter_199, + parameter_200, + parameter_201, + parameter_202, + parameter_203, + parameter_204, + parameter_205, + parameter_206, + parameter_207, + parameter_208, + parameter_209, + parameter_210, + parameter_211, + parameter_212, + parameter_213, + parameter_214, + parameter_215, + parameter_216, + parameter_217, + parameter_218, + parameter_219, + parameter_220, + parameter_221, + parameter_222, + parameter_223, + parameter_224, + parameter_225, + parameter_226, + parameter_227, + parameter_228, + parameter_229, + parameter_230, + parameter_231, + parameter_232, + parameter_233, + parameter_234, + parameter_235, + parameter_236, + parameter_237, + parameter_238, + parameter_239, + parameter_240, + parameter_241, + parameter_242, + parameter_243, + parameter_244, + parameter_245, + parameter_246, + parameter_247, + parameter_248, + parameter_249, + parameter_250, + parameter_251, + parameter_252, + parameter_253, + parameter_254, + parameter_255, + parameter_256, + parameter_257, + parameter_258, + parameter_259, + parameter_260, + parameter_261, + parameter_262, + parameter_263, + parameter_264, + parameter_265, + parameter_266, + parameter_267, + parameter_268, + parameter_269, + parameter_270, + parameter_271, + parameter_272, + parameter_273, + parameter_274, + parameter_275, + parameter_276, + parameter_277, + parameter_278, + parameter_279, + parameter_280, + parameter_281, + parameter_282, + parameter_283, + parameter_284, + parameter_285, + parameter_286, + parameter_287, + parameter_288, + parameter_289, + parameter_290, + parameter_291, + parameter_292, + parameter_293, + parameter_294, + parameter_295, + parameter_296, + parameter_297, + parameter_298, + parameter_299, + parameter_300, + parameter_301, + parameter_302, + parameter_303, + parameter_304, + parameter_305, + parameter_306, + parameter_307, + parameter_308, + parameter_309, + parameter_310, + parameter_311, + parameter_312, + parameter_313, + parameter_314, + parameter_315, + parameter_316, + parameter_317, + parameter_318, + parameter_319, + parameter_320, + parameter_321, + parameter_322, + parameter_323, + parameter_324, + parameter_325, + parameter_326, + parameter_327, + parameter_328, + parameter_329, + parameter_330, + parameter_331, + parameter_332, + parameter_333, + parameter_334, + parameter_335, + parameter_336, + parameter_337, + parameter_338, + parameter_339, + parameter_340, + parameter_341, + parameter_342, + parameter_343, + parameter_344, + parameter_345, + parameter_346, + parameter_347, + parameter_348, + parameter_349, + parameter_350, + parameter_351, + parameter_352, + parameter_353, + parameter_354, + parameter_355, + parameter_356, + parameter_357, + parameter_358, + parameter_359, + parameter_360, + parameter_361, + parameter_362, + parameter_363, + parameter_364, + parameter_365, + parameter_366, + parameter_367, + parameter_368, + parameter_369, + parameter_370, + parameter_371, + parameter_372, + parameter_373, + parameter_374, + parameter_375, + parameter_376, + parameter_377, + parameter_378, + parameter_379, + parameter_380, + parameter_381, + parameter_382, + parameter_383, + parameter_384, + parameter_385, + parameter_386, + parameter_387, + parameter_388, + parameter_389, + parameter_390, + parameter_391, + parameter_392, + parameter_393, + parameter_394, + parameter_395, + parameter_396, + parameter_397, + parameter_398, + parameter_399, + parameter_400, + parameter_401, + parameter_402, + parameter_403, + parameter_404, + parameter_405, + parameter_406, + parameter_407, + parameter_408, + parameter_409, + parameter_410, + parameter_411, + parameter_412, + parameter_413, + parameter_414, + parameter_415, + parameter_416, + parameter_417, + parameter_418, + parameter_419, + parameter_420, + parameter_421, + parameter_422, + parameter_423, + parameter_424, + parameter_425, + parameter_426, + parameter_427, + parameter_428, + parameter_429, + parameter_430, + parameter_431, + parameter_432, + parameter_433, + parameter_434, + parameter_435, + parameter_436, + parameter_437, + parameter_438, + parameter_439, + parameter_440, + parameter_441, + parameter_442, + parameter_443, + parameter_444, + parameter_445, + parameter_446, + parameter_447, + parameter_448, + parameter_449, + parameter_450, + parameter_451, + parameter_452, + parameter_453, + parameter_454, + parameter_455, + parameter_456, + parameter_457, + parameter_458, + parameter_459, + parameter_460, + parameter_461, + parameter_462, + parameter_463, + parameter_464, + parameter_465, + parameter_466, + parameter_467, + parameter_468, + parameter_469, + parameter_470, + parameter_471, + parameter_472, + parameter_473, + parameter_474, + parameter_475, + parameter_476, + parameter_477, + parameter_478, + parameter_479, + parameter_480, + parameter_481, + parameter_482, + parameter_483, + parameter_484, + parameter_485, + parameter_486, + parameter_487, + parameter_488, + parameter_489, + parameter_490, + parameter_491, + parameter_492, + parameter_493, + parameter_494, + parameter_495, + parameter_496, + parameter_497, + parameter_498, + parameter_499, + parameter_500, + parameter_501, + parameter_502, + parameter_503, + parameter_504, + parameter_505, + parameter_506, + parameter_507, + parameter_508, + parameter_509, + parameter_510, + parameter_511, + parameter_512, + parameter_513, + parameter_514, + parameter_515, + parameter_516, + parameter_517, + parameter_518, + parameter_519, + parameter_520, + parameter_521, + parameter_522, + parameter_523, + parameter_524, + parameter_525, + parameter_526, + parameter_527, + parameter_528, + parameter_529, + parameter_530, + parameter_531, + parameter_532, + parameter_533, + parameter_534, + parameter_535, + parameter_536, + parameter_537, + parameter_538, + parameter_539, + parameter_540, + parameter_541, + parameter_542, + parameter_543, + parameter_544, + parameter_545, + parameter_546, + parameter_547, + parameter_548, + parameter_549, + parameter_550, + parameter_551, + parameter_552, + parameter_553, + parameter_554, + parameter_555, + parameter_556, + parameter_557, + parameter_558, + parameter_559, + parameter_560, + parameter_561, + parameter_562, + parameter_563, + parameter_564, + parameter_565, + parameter_566, + parameter_567, + parameter_568, + parameter_569, + parameter_570, + parameter_571, + parameter_572, + parameter_573, + parameter_574, + parameter_575, + parameter_576, + parameter_577, + parameter_578, + parameter_579, + parameter_580, + parameter_581, + parameter_582, + parameter_583, + parameter_584, + parameter_585, + parameter_586, + parameter_587, + data_0, + data_1, + data_2, + data_3, + data_4, + data_5, + data_6, + data_7, + data_8, + data_9, + data_10, + data_11, + data_12, + ): + # pd_op.conv2d: (-1x24x-1x-1xf32) <- (-1x3x-1x-1xf32, 24x3x3x3xf32) + conv2d_0 = paddle._C_ops.conv2d( + data_12, parameter_587, [2, 2], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del data_12, parameter_587 + + # pd_op.batch_norm_: (-1x24x-1x-1xf32, 24xf32, 24xf32, 24xf32, 24xf32, -1xui8) <- (-1x24x-1x-1xf32, 24xf32, 24xf32, 24xf32, 24xf32) + ( + batch_norm__0, + batch_norm__1, + batch_norm__2, + batch_norm__3, + batch_norm__4, + batch_norm__5, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_0, + parameter_586, + parameter_585, + parameter_584, + parameter_583, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_0, parameter_583, parameter_584, parameter_585, parameter_586 + + # pd_op.swish: (-1x24x-1x-1xf32) <- (-1x24x-1x-1xf32) + swish_0 = paddle._C_ops.swish(batch_norm__0) + del batch_norm__0 + + # pd_op.conv2d: (-1x24x-1x-1xf32) <- (-1x24x-1x-1xf32, 24x24x3x3xf32) + conv2d_1 = paddle._C_ops.conv2d( + swish_0, parameter_582, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_582, swish_0 + + # pd_op.batch_norm_: (-1x24x-1x-1xf32, 24xf32, 24xf32, 24xf32, 24xf32, -1xui8) <- (-1x24x-1x-1xf32, 24xf32, 24xf32, 24xf32, 24xf32) + ( + batch_norm__6, + batch_norm__7, + batch_norm__8, + batch_norm__9, + batch_norm__10, + batch_norm__11, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_1, + parameter_581, + parameter_580, + parameter_579, + parameter_578, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_1, parameter_578, parameter_579, parameter_580, parameter_581 + + # pd_op.swish: (-1x24x-1x-1xf32) <- (-1x24x-1x-1xf32) + swish_1 = paddle._C_ops.swish(batch_norm__6) + del batch_norm__6 + + # pd_op.conv2d: (-1x48x-1x-1xf32) <- (-1x24x-1x-1xf32, 48x24x3x3xf32) + conv2d_2 = paddle._C_ops.conv2d( + swish_1, parameter_577, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_577, swish_1 + + # pd_op.batch_norm_: (-1x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32, -1xui8) <- (-1x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32) + ( + batch_norm__12, + batch_norm__13, + batch_norm__14, + batch_norm__15, + batch_norm__16, + batch_norm__17, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_2, + parameter_576, + parameter_575, + parameter_574, + parameter_573, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_2, parameter_573, parameter_574, parameter_575, parameter_576 + + # pd_op.swish: (-1x48x-1x-1xf32) <- (-1x48x-1x-1xf32) + swish_2 = paddle._C_ops.swish(batch_norm__12) + del batch_norm__12 + + # pd_op.conv2d: (-1x72x-1x-1xf32) <- (-1x48x-1x-1xf32, 72x48x3x3xf32) + conv2d_3 = paddle._C_ops.conv2d( + swish_2, parameter_572, [2, 2], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_572, swish_2 + + # pd_op.batch_norm_: (-1x72x-1x-1xf32, 72xf32, 72xf32, 72xf32, 72xf32, -1xui8) <- (-1x72x-1x-1xf32, 72xf32, 72xf32, 72xf32, 72xf32) + ( + batch_norm__18, + batch_norm__19, + batch_norm__20, + batch_norm__21, + batch_norm__22, + batch_norm__23, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_3, + parameter_571, + parameter_570, + parameter_569, + parameter_568, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_3, parameter_568, parameter_569, parameter_570, parameter_571 + + # pd_op.swish: (-1x72x-1x-1xf32) <- (-1x72x-1x-1xf32) + swish_3 = paddle._C_ops.swish(batch_norm__18) + del batch_norm__18 + + # pd_op.conv2d: (-1x36x-1x-1xf32) <- (-1x72x-1x-1xf32, 36x72x1x1xf32) + conv2d_4 = paddle._C_ops.conv2d( + swish_3, parameter_567, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_567 + + # pd_op.batch_norm_: (-1x36x-1x-1xf32, 36xf32, 36xf32, 36xf32, 36xf32, -1xui8) <- (-1x36x-1x-1xf32, 36xf32, 36xf32, 36xf32, 36xf32) + ( + batch_norm__24, + batch_norm__25, + batch_norm__26, + batch_norm__27, + batch_norm__28, + batch_norm__29, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_4, + parameter_566, + parameter_565, + parameter_564, + parameter_563, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_4, parameter_563, parameter_564, parameter_565, parameter_566 + + # pd_op.swish: (-1x36x-1x-1xf32) <- (-1x36x-1x-1xf32) + swish_4 = paddle._C_ops.swish(batch_norm__24) + del batch_norm__24 + + # pd_op.conv2d: (-1x36x-1x-1xf32) <- (-1x72x-1x-1xf32, 36x72x1x1xf32) + conv2d_5 = paddle._C_ops.conv2d( + swish_3, parameter_562, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_562, swish_3 + + # pd_op.batch_norm_: (-1x36x-1x-1xf32, 36xf32, 36xf32, 36xf32, 36xf32, -1xui8) <- (-1x36x-1x-1xf32, 36xf32, 36xf32, 36xf32, 36xf32) + ( + batch_norm__30, + batch_norm__31, + batch_norm__32, + batch_norm__33, + batch_norm__34, + batch_norm__35, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_5, + parameter_561, + parameter_560, + parameter_559, + parameter_558, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_5, parameter_558, parameter_559, parameter_560, parameter_561 + + # pd_op.swish: (-1x36x-1x-1xf32) <- (-1x36x-1x-1xf32) + swish_5 = paddle._C_ops.swish(batch_norm__30) + del batch_norm__30 + + # pd_op.conv2d: (-1x36x-1x-1xf32) <- (-1x36x-1x-1xf32, 36x36x3x3xf32) + conv2d_6 = paddle._C_ops.conv2d( + swish_5, parameter_557, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_557 + + # pd_op.batch_norm_: (-1x36x-1x-1xf32, 36xf32, 36xf32, 36xf32, 36xf32, -1xui8) <- (-1x36x-1x-1xf32, 36xf32, 36xf32, 36xf32, 36xf32) + ( + batch_norm__36, + batch_norm__37, + batch_norm__38, + batch_norm__39, + batch_norm__40, + batch_norm__41, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_6, + parameter_556, + parameter_555, + parameter_554, + parameter_553, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_6, parameter_553, parameter_554, parameter_555, parameter_556 + + # pd_op.swish: (-1x36x-1x-1xf32) <- (-1x36x-1x-1xf32) + swish_6 = paddle._C_ops.swish(batch_norm__36) + del batch_norm__36 + + # pd_op.conv2d: (-1x36x-1x-1xf32) <- (-1x36x-1x-1xf32, 36x36x3x3xf32) + conv2d_7 = paddle._C_ops.conv2d( + swish_6, parameter_552, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_552 + + # pd_op.batch_norm_: (-1x36x-1x-1xf32, 36xf32, 36xf32, 36xf32, 36xf32, -1xui8) <- (-1x36x-1x-1xf32, 36xf32, 36xf32, 36xf32, 36xf32) + ( + batch_norm__42, + batch_norm__43, + batch_norm__44, + batch_norm__45, + batch_norm__46, + batch_norm__47, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_7, + parameter_551, + parameter_550, + parameter_549, + parameter_548, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_7, parameter_548, parameter_549, parameter_550, parameter_551 + + # pd_op.conv2d: (-1x36x-1x-1xf32) <- (-1x36x-1x-1xf32, 36x36x1x1xf32) + conv2d_8 = paddle._C_ops.conv2d( + swish_6, parameter_547, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_547, swish_6 + + # pd_op.batch_norm_: (-1x36x-1x-1xf32, 36xf32, 36xf32, 36xf32, 36xf32, -1xui8) <- (-1x36x-1x-1xf32, 36xf32, 36xf32, 36xf32, 36xf32) + ( + batch_norm__48, + batch_norm__49, + batch_norm__50, + batch_norm__51, + batch_norm__52, + batch_norm__53, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_8, + parameter_546, + parameter_545, + parameter_544, + parameter_543, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_8, parameter_543, parameter_544, parameter_545, parameter_546 + + # pd_op.multiply: (-1x36x-1x-1xf32) <- (1xf32, -1x36x-1x-1xf32) + multiply_0 = paddle._C_ops.multiply(data_0, batch_norm__48) + del batch_norm__48, data_0 + + # pd_op.add: (-1x36x-1x-1xf32) <- (-1x36x-1x-1xf32, -1x36x-1x-1xf32) + add_0 = paddle._C_ops.add(batch_norm__42, multiply_0) + del batch_norm__42, multiply_0 + + # pd_op.swish: (-1x36x-1x-1xf32) <- (-1x36x-1x-1xf32) + swish_7 = paddle._C_ops.swish(add_0) + del add_0 + + # pd_op.add: (-1x36x-1x-1xf32) <- (-1x36x-1x-1xf32, -1x36x-1x-1xf32) + add_1 = paddle._C_ops.add(swish_5, swish_7) + del swish_5, swish_7 + + # pd_op.conv2d: (-1x36x-1x-1xf32) <- (-1x36x-1x-1xf32, 36x36x3x3xf32) + conv2d_9 = paddle._C_ops.conv2d( + add_1, parameter_542, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_542 + + # pd_op.batch_norm_: (-1x36x-1x-1xf32, 36xf32, 36xf32, 36xf32, 36xf32, -1xui8) <- (-1x36x-1x-1xf32, 36xf32, 36xf32, 36xf32, 36xf32) + ( + batch_norm__54, + batch_norm__55, + batch_norm__56, + batch_norm__57, + batch_norm__58, + batch_norm__59, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_9, + parameter_541, + parameter_540, + parameter_539, + parameter_538, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_9, parameter_538, parameter_539, parameter_540, parameter_541 + + # pd_op.swish: (-1x36x-1x-1xf32) <- (-1x36x-1x-1xf32) + swish_8 = paddle._C_ops.swish(batch_norm__54) + del batch_norm__54 + + # pd_op.conv2d: (-1x36x-1x-1xf32) <- (-1x36x-1x-1xf32, 36x36x3x3xf32) + conv2d_10 = paddle._C_ops.conv2d( + swish_8, parameter_537, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_537 + + # pd_op.batch_norm_: (-1x36x-1x-1xf32, 36xf32, 36xf32, 36xf32, 36xf32, -1xui8) <- (-1x36x-1x-1xf32, 36xf32, 36xf32, 36xf32, 36xf32) + ( + batch_norm__60, + batch_norm__61, + batch_norm__62, + batch_norm__63, + batch_norm__64, + batch_norm__65, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_10, + parameter_536, + parameter_535, + parameter_534, + parameter_533, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_10, parameter_533, parameter_534, parameter_535, parameter_536 + + # pd_op.conv2d: (-1x36x-1x-1xf32) <- (-1x36x-1x-1xf32, 36x36x1x1xf32) + conv2d_11 = paddle._C_ops.conv2d( + swish_8, parameter_532, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_532, swish_8 + + # pd_op.batch_norm_: (-1x36x-1x-1xf32, 36xf32, 36xf32, 36xf32, 36xf32, -1xui8) <- (-1x36x-1x-1xf32, 36xf32, 36xf32, 36xf32, 36xf32) + ( + batch_norm__66, + batch_norm__67, + batch_norm__68, + batch_norm__69, + batch_norm__70, + batch_norm__71, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_11, + parameter_531, + parameter_530, + parameter_529, + parameter_528, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_11, parameter_528, parameter_529, parameter_530, parameter_531 + + # pd_op.multiply: (-1x36x-1x-1xf32) <- (1xf32, -1x36x-1x-1xf32) + multiply_1 = paddle._C_ops.multiply(data_1, batch_norm__66) + del batch_norm__66, data_1 + + # pd_op.add: (-1x36x-1x-1xf32) <- (-1x36x-1x-1xf32, -1x36x-1x-1xf32) + add_2 = paddle._C_ops.add(batch_norm__60, multiply_1) + del batch_norm__60, multiply_1 + + # pd_op.swish: (-1x36x-1x-1xf32) <- (-1x36x-1x-1xf32) + swish_9 = paddle._C_ops.swish(add_2) + del add_2 + + # pd_op.add: (-1x36x-1x-1xf32) <- (-1x36x-1x-1xf32, -1x36x-1x-1xf32) + add_3 = paddle._C_ops.add(add_1, swish_9) + del add_1, swish_9 + + # pd_op.full: (1xi32) <- () + full_0 = paddle._C_ops.full( + [1], float("1"), paddle.int32, paddle.core.CPUPlace() + ) + + # builtin.combine: ([-1x36x-1x-1xf32, -1x36x-1x-1xf32]) <- (-1x36x-1x-1xf32, -1x36x-1x-1xf32) + combine_0 = [swish_4, add_3] + del add_3, swish_4 + + # pd_op.concat: (-1x72x-1x-1xf32) <- ([-1x36x-1x-1xf32, -1x36x-1x-1xf32], 1xi32) + concat_2 = paddle._C_ops.concat(combine_0, full_0) + del combine_0 + + # pd_op.full_int_array: (2xi64) <- () + full_int_array_0 = [2, 3] + + # pd_op.mean: (-1x72x1x1xf32) <- (-1x72x-1x-1xf32, 2xi64) + mean_0 = paddle._C_ops.mean(concat_2, full_int_array_0, True) + + # pd_op.conv2d: (-1x72x1x1xf32) <- (-1x72x1x1xf32, 72x72x1x1xf32) + conv2d_12 = paddle._C_ops.conv2d( + mean_0, parameter_527, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del mean_0, parameter_527 + + # pd_op.full_int_array: (4xi64) <- () + full_int_array_1 = [1, -1, 1, 1] + + # pd_op.reshape: (1x72x1x1xf32) <- (72xf32, 4xi64) + reshape_0 = paddle._C_ops.reshape(parameter_526, full_int_array_1) + del parameter_526 + + # pd_op.add: (-1x72x1x1xf32) <- (-1x72x1x1xf32, 1x72x1x1xf32) + add_4 = paddle._C_ops.add(conv2d_12, reshape_0) + del conv2d_12, reshape_0 + + # pd_op.hardsigmoid: (-1x72x1x1xf32) <- (-1x72x1x1xf32) + hardsigmoid_0 = paddle._C_ops.hardsigmoid( + add_4, float("0.166667"), float("0.5") + ) + del add_4 + + # pd_op.multiply: (-1x72x-1x-1xf32) <- (-1x72x-1x-1xf32, -1x72x1x1xf32) + multiply_2 = paddle._C_ops.multiply(concat_2, hardsigmoid_0) + del concat_2, hardsigmoid_0 + + # pd_op.conv2d: (-1x96x-1x-1xf32) <- (-1x72x-1x-1xf32, 96x72x1x1xf32) + conv2d_13 = paddle._C_ops.conv2d( + multiply_2, parameter_525, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del multiply_2, parameter_525 + + # pd_op.batch_norm_: (-1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (-1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__72, + batch_norm__73, + batch_norm__74, + batch_norm__75, + batch_norm__76, + batch_norm__77, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_13, + parameter_524, + parameter_523, + parameter_522, + parameter_521, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_13, parameter_521, parameter_522, parameter_523, parameter_524 + + # pd_op.swish: (-1x96x-1x-1xf32) <- (-1x96x-1x-1xf32) + swish_10 = paddle._C_ops.swish(batch_norm__72) + del batch_norm__72 + + # pd_op.conv2d: (-1x144x-1x-1xf32) <- (-1x96x-1x-1xf32, 144x96x3x3xf32) + conv2d_14 = paddle._C_ops.conv2d( + swish_10, parameter_520, [2, 2], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_520, swish_10 + + # pd_op.batch_norm_: (-1x144x-1x-1xf32, 144xf32, 144xf32, 144xf32, 144xf32, -1xui8) <- (-1x144x-1x-1xf32, 144xf32, 144xf32, 144xf32, 144xf32) + ( + batch_norm__78, + batch_norm__79, + batch_norm__80, + batch_norm__81, + batch_norm__82, + batch_norm__83, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_14, + parameter_519, + parameter_518, + parameter_517, + parameter_516, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_14, parameter_516, parameter_517, parameter_518, parameter_519 + + # pd_op.swish: (-1x144x-1x-1xf32) <- (-1x144x-1x-1xf32) + swish_11 = paddle._C_ops.swish(batch_norm__78) + del batch_norm__78 + + # pd_op.conv2d: (-1x72x-1x-1xf32) <- (-1x144x-1x-1xf32, 72x144x1x1xf32) + conv2d_15 = paddle._C_ops.conv2d( + swish_11, parameter_515, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_515 + + # pd_op.batch_norm_: (-1x72x-1x-1xf32, 72xf32, 72xf32, 72xf32, 72xf32, -1xui8) <- (-1x72x-1x-1xf32, 72xf32, 72xf32, 72xf32, 72xf32) + ( + batch_norm__84, + batch_norm__85, + batch_norm__86, + batch_norm__87, + batch_norm__88, + batch_norm__89, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_15, + parameter_514, + parameter_513, + parameter_512, + parameter_511, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_15, parameter_511, parameter_512, parameter_513, parameter_514 + + # pd_op.swish: (-1x72x-1x-1xf32) <- (-1x72x-1x-1xf32) + swish_12 = paddle._C_ops.swish(batch_norm__84) + del batch_norm__84 + + # pd_op.conv2d: (-1x72x-1x-1xf32) <- (-1x144x-1x-1xf32, 72x144x1x1xf32) + conv2d_16 = paddle._C_ops.conv2d( + swish_11, parameter_510, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_510, swish_11 + + # pd_op.batch_norm_: (-1x72x-1x-1xf32, 72xf32, 72xf32, 72xf32, 72xf32, -1xui8) <- (-1x72x-1x-1xf32, 72xf32, 72xf32, 72xf32, 72xf32) + ( + batch_norm__90, + batch_norm__91, + batch_norm__92, + batch_norm__93, + batch_norm__94, + batch_norm__95, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_16, + parameter_509, + parameter_508, + parameter_507, + parameter_506, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_16, parameter_506, parameter_507, parameter_508, parameter_509 + + # pd_op.swish: (-1x72x-1x-1xf32) <- (-1x72x-1x-1xf32) + swish_13 = paddle._C_ops.swish(batch_norm__90) + del batch_norm__90 + + # pd_op.conv2d: (-1x72x-1x-1xf32) <- (-1x72x-1x-1xf32, 72x72x3x3xf32) + conv2d_17 = paddle._C_ops.conv2d( + swish_13, parameter_505, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_505 + + # pd_op.batch_norm_: (-1x72x-1x-1xf32, 72xf32, 72xf32, 72xf32, 72xf32, -1xui8) <- (-1x72x-1x-1xf32, 72xf32, 72xf32, 72xf32, 72xf32) + ( + batch_norm__96, + batch_norm__97, + batch_norm__98, + batch_norm__99, + batch_norm__100, + batch_norm__101, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_17, + parameter_504, + parameter_503, + parameter_502, + parameter_501, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_17, parameter_501, parameter_502, parameter_503, parameter_504 + + # pd_op.swish: (-1x72x-1x-1xf32) <- (-1x72x-1x-1xf32) + swish_14 = paddle._C_ops.swish(batch_norm__96) + del batch_norm__96 + + # pd_op.conv2d: (-1x72x-1x-1xf32) <- (-1x72x-1x-1xf32, 72x72x3x3xf32) + conv2d_18 = paddle._C_ops.conv2d( + swish_14, parameter_500, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_500 + + # pd_op.batch_norm_: (-1x72x-1x-1xf32, 72xf32, 72xf32, 72xf32, 72xf32, -1xui8) <- (-1x72x-1x-1xf32, 72xf32, 72xf32, 72xf32, 72xf32) + ( + batch_norm__102, + batch_norm__103, + batch_norm__104, + batch_norm__105, + batch_norm__106, + batch_norm__107, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_18, + parameter_499, + parameter_498, + parameter_497, + parameter_496, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_18, parameter_496, parameter_497, parameter_498, parameter_499 + + # pd_op.conv2d: (-1x72x-1x-1xf32) <- (-1x72x-1x-1xf32, 72x72x1x1xf32) + conv2d_19 = paddle._C_ops.conv2d( + swish_14, parameter_495, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_495, swish_14 + + # pd_op.batch_norm_: (-1x72x-1x-1xf32, 72xf32, 72xf32, 72xf32, 72xf32, -1xui8) <- (-1x72x-1x-1xf32, 72xf32, 72xf32, 72xf32, 72xf32) + ( + batch_norm__108, + batch_norm__109, + batch_norm__110, + batch_norm__111, + batch_norm__112, + batch_norm__113, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_19, + parameter_494, + parameter_493, + parameter_492, + parameter_491, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_19, parameter_491, parameter_492, parameter_493, parameter_494 + + # pd_op.multiply: (-1x72x-1x-1xf32) <- (1xf32, -1x72x-1x-1xf32) + multiply_3 = paddle._C_ops.multiply(data_2, batch_norm__108) + del batch_norm__108, data_2 + + # pd_op.add: (-1x72x-1x-1xf32) <- (-1x72x-1x-1xf32, -1x72x-1x-1xf32) + add_5 = paddle._C_ops.add(batch_norm__102, multiply_3) + del batch_norm__102, multiply_3 + + # pd_op.swish: (-1x72x-1x-1xf32) <- (-1x72x-1x-1xf32) + swish_15 = paddle._C_ops.swish(add_5) + del add_5 + + # pd_op.add: (-1x72x-1x-1xf32) <- (-1x72x-1x-1xf32, -1x72x-1x-1xf32) + add_6 = paddle._C_ops.add(swish_13, swish_15) + del swish_13, swish_15 + + # pd_op.conv2d: (-1x72x-1x-1xf32) <- (-1x72x-1x-1xf32, 72x72x3x3xf32) + conv2d_20 = paddle._C_ops.conv2d( + add_6, parameter_490, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_490 + + # pd_op.batch_norm_: (-1x72x-1x-1xf32, 72xf32, 72xf32, 72xf32, 72xf32, -1xui8) <- (-1x72x-1x-1xf32, 72xf32, 72xf32, 72xf32, 72xf32) + ( + batch_norm__114, + batch_norm__115, + batch_norm__116, + batch_norm__117, + batch_norm__118, + batch_norm__119, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_20, + parameter_489, + parameter_488, + parameter_487, + parameter_486, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_20, parameter_486, parameter_487, parameter_488, parameter_489 + + # pd_op.swish: (-1x72x-1x-1xf32) <- (-1x72x-1x-1xf32) + swish_16 = paddle._C_ops.swish(batch_norm__114) + del batch_norm__114 + + # pd_op.conv2d: (-1x72x-1x-1xf32) <- (-1x72x-1x-1xf32, 72x72x3x3xf32) + conv2d_21 = paddle._C_ops.conv2d( + swish_16, parameter_485, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_485 + + # pd_op.batch_norm_: (-1x72x-1x-1xf32, 72xf32, 72xf32, 72xf32, 72xf32, -1xui8) <- (-1x72x-1x-1xf32, 72xf32, 72xf32, 72xf32, 72xf32) + ( + batch_norm__120, + batch_norm__121, + batch_norm__122, + batch_norm__123, + batch_norm__124, + batch_norm__125, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_21, + parameter_484, + parameter_483, + parameter_482, + parameter_481, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_21, parameter_481, parameter_482, parameter_483, parameter_484 + + # pd_op.conv2d: (-1x72x-1x-1xf32) <- (-1x72x-1x-1xf32, 72x72x1x1xf32) + conv2d_22 = paddle._C_ops.conv2d( + swish_16, parameter_480, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_480, swish_16 + + # pd_op.batch_norm_: (-1x72x-1x-1xf32, 72xf32, 72xf32, 72xf32, 72xf32, -1xui8) <- (-1x72x-1x-1xf32, 72xf32, 72xf32, 72xf32, 72xf32) + ( + batch_norm__126, + batch_norm__127, + batch_norm__128, + batch_norm__129, + batch_norm__130, + batch_norm__131, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_22, + parameter_479, + parameter_478, + parameter_477, + parameter_476, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_22, parameter_476, parameter_477, parameter_478, parameter_479 + + # pd_op.multiply: (-1x72x-1x-1xf32) <- (1xf32, -1x72x-1x-1xf32) + multiply_4 = paddle._C_ops.multiply(data_3, batch_norm__126) + del batch_norm__126, data_3 + + # pd_op.add: (-1x72x-1x-1xf32) <- (-1x72x-1x-1xf32, -1x72x-1x-1xf32) + add_7 = paddle._C_ops.add(batch_norm__120, multiply_4) + del batch_norm__120, multiply_4 + + # pd_op.swish: (-1x72x-1x-1xf32) <- (-1x72x-1x-1xf32) + swish_17 = paddle._C_ops.swish(add_7) + del add_7 + + # pd_op.add: (-1x72x-1x-1xf32) <- (-1x72x-1x-1xf32, -1x72x-1x-1xf32) + add_8 = paddle._C_ops.add(add_6, swish_17) + del add_6, swish_17 + + # pd_op.conv2d: (-1x72x-1x-1xf32) <- (-1x72x-1x-1xf32, 72x72x3x3xf32) + conv2d_23 = paddle._C_ops.conv2d( + add_8, parameter_475, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_475 + + # pd_op.batch_norm_: (-1x72x-1x-1xf32, 72xf32, 72xf32, 72xf32, 72xf32, -1xui8) <- (-1x72x-1x-1xf32, 72xf32, 72xf32, 72xf32, 72xf32) + ( + batch_norm__132, + batch_norm__133, + batch_norm__134, + batch_norm__135, + batch_norm__136, + batch_norm__137, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_23, + parameter_474, + parameter_473, + parameter_472, + parameter_471, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_23, parameter_471, parameter_472, parameter_473, parameter_474 + + # pd_op.swish: (-1x72x-1x-1xf32) <- (-1x72x-1x-1xf32) + swish_18 = paddle._C_ops.swish(batch_norm__132) + del batch_norm__132 + + # pd_op.conv2d: (-1x72x-1x-1xf32) <- (-1x72x-1x-1xf32, 72x72x3x3xf32) + conv2d_24 = paddle._C_ops.conv2d( + swish_18, parameter_470, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_470 + + # pd_op.batch_norm_: (-1x72x-1x-1xf32, 72xf32, 72xf32, 72xf32, 72xf32, -1xui8) <- (-1x72x-1x-1xf32, 72xf32, 72xf32, 72xf32, 72xf32) + ( + batch_norm__138, + batch_norm__139, + batch_norm__140, + batch_norm__141, + batch_norm__142, + batch_norm__143, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_24, + parameter_469, + parameter_468, + parameter_467, + parameter_466, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_24, parameter_466, parameter_467, parameter_468, parameter_469 + + # pd_op.conv2d: (-1x72x-1x-1xf32) <- (-1x72x-1x-1xf32, 72x72x1x1xf32) + conv2d_25 = paddle._C_ops.conv2d( + swish_18, parameter_465, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_465, swish_18 + + # pd_op.batch_norm_: (-1x72x-1x-1xf32, 72xf32, 72xf32, 72xf32, 72xf32, -1xui8) <- (-1x72x-1x-1xf32, 72xf32, 72xf32, 72xf32, 72xf32) + ( + batch_norm__144, + batch_norm__145, + batch_norm__146, + batch_norm__147, + batch_norm__148, + batch_norm__149, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_25, + parameter_464, + parameter_463, + parameter_462, + parameter_461, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_25, parameter_461, parameter_462, parameter_463, parameter_464 + + # pd_op.multiply: (-1x72x-1x-1xf32) <- (1xf32, -1x72x-1x-1xf32) + multiply_5 = paddle._C_ops.multiply(data_4, batch_norm__144) + del batch_norm__144, data_4 + + # pd_op.add: (-1x72x-1x-1xf32) <- (-1x72x-1x-1xf32, -1x72x-1x-1xf32) + add_9 = paddle._C_ops.add(batch_norm__138, multiply_5) + del batch_norm__138, multiply_5 + + # pd_op.swish: (-1x72x-1x-1xf32) <- (-1x72x-1x-1xf32) + swish_19 = paddle._C_ops.swish(add_9) + del add_9 + + # pd_op.add: (-1x72x-1x-1xf32) <- (-1x72x-1x-1xf32, -1x72x-1x-1xf32) + add_10 = paddle._C_ops.add(add_8, swish_19) + del add_8, swish_19 + + # pd_op.conv2d: (-1x72x-1x-1xf32) <- (-1x72x-1x-1xf32, 72x72x3x3xf32) + conv2d_26 = paddle._C_ops.conv2d( + add_10, parameter_460, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_460 + + # pd_op.batch_norm_: (-1x72x-1x-1xf32, 72xf32, 72xf32, 72xf32, 72xf32, -1xui8) <- (-1x72x-1x-1xf32, 72xf32, 72xf32, 72xf32, 72xf32) + ( + batch_norm__150, + batch_norm__151, + batch_norm__152, + batch_norm__153, + batch_norm__154, + batch_norm__155, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_26, + parameter_459, + parameter_458, + parameter_457, + parameter_456, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_26, parameter_456, parameter_457, parameter_458, parameter_459 + + # pd_op.swish: (-1x72x-1x-1xf32) <- (-1x72x-1x-1xf32) + swish_20 = paddle._C_ops.swish(batch_norm__150) + del batch_norm__150 + + # pd_op.conv2d: (-1x72x-1x-1xf32) <- (-1x72x-1x-1xf32, 72x72x3x3xf32) + conv2d_27 = paddle._C_ops.conv2d( + swish_20, parameter_455, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_455 + + # pd_op.batch_norm_: (-1x72x-1x-1xf32, 72xf32, 72xf32, 72xf32, 72xf32, -1xui8) <- (-1x72x-1x-1xf32, 72xf32, 72xf32, 72xf32, 72xf32) + ( + batch_norm__156, + batch_norm__157, + batch_norm__158, + batch_norm__159, + batch_norm__160, + batch_norm__161, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_27, + parameter_454, + parameter_453, + parameter_452, + parameter_451, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_27, parameter_451, parameter_452, parameter_453, parameter_454 + + # pd_op.conv2d: (-1x72x-1x-1xf32) <- (-1x72x-1x-1xf32, 72x72x1x1xf32) + conv2d_28 = paddle._C_ops.conv2d( + swish_20, parameter_450, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_450, swish_20 + + # pd_op.batch_norm_: (-1x72x-1x-1xf32, 72xf32, 72xf32, 72xf32, 72xf32, -1xui8) <- (-1x72x-1x-1xf32, 72xf32, 72xf32, 72xf32, 72xf32) + ( + batch_norm__162, + batch_norm__163, + batch_norm__164, + batch_norm__165, + batch_norm__166, + batch_norm__167, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_28, + parameter_449, + parameter_448, + parameter_447, + parameter_446, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_28, parameter_446, parameter_447, parameter_448, parameter_449 + + # pd_op.multiply: (-1x72x-1x-1xf32) <- (1xf32, -1x72x-1x-1xf32) + multiply_6 = paddle._C_ops.multiply(data_5, batch_norm__162) + del batch_norm__162, data_5 + + # pd_op.add: (-1x72x-1x-1xf32) <- (-1x72x-1x-1xf32, -1x72x-1x-1xf32) + add_11 = paddle._C_ops.add(batch_norm__156, multiply_6) + del batch_norm__156, multiply_6 + + # pd_op.swish: (-1x72x-1x-1xf32) <- (-1x72x-1x-1xf32) + swish_21 = paddle._C_ops.swish(add_11) + del add_11 + + # pd_op.add: (-1x72x-1x-1xf32) <- (-1x72x-1x-1xf32, -1x72x-1x-1xf32) + add_12 = paddle._C_ops.add(add_10, swish_21) + del add_10, swish_21 + + # builtin.combine: ([-1x72x-1x-1xf32, -1x72x-1x-1xf32]) <- (-1x72x-1x-1xf32, -1x72x-1x-1xf32) + combine_1 = [swish_12, add_12] + del add_12, swish_12 + + # pd_op.concat: (-1x144x-1x-1xf32) <- ([-1x72x-1x-1xf32, -1x72x-1x-1xf32], 1xi32) + concat_3 = paddle._C_ops.concat(combine_1, full_0) + del combine_1 + + # pd_op.mean: (-1x144x1x1xf32) <- (-1x144x-1x-1xf32, 2xi64) + mean_1 = paddle._C_ops.mean(concat_3, full_int_array_0, True) + + # pd_op.conv2d: (-1x144x1x1xf32) <- (-1x144x1x1xf32, 144x144x1x1xf32) + conv2d_29 = paddle._C_ops.conv2d( + mean_1, parameter_445, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del mean_1, parameter_445 + + # pd_op.reshape: (1x144x1x1xf32) <- (144xf32, 4xi64) + reshape_1 = paddle._C_ops.reshape(parameter_444, full_int_array_1) + del parameter_444 + + # pd_op.add: (-1x144x1x1xf32) <- (-1x144x1x1xf32, 1x144x1x1xf32) + add_13 = paddle._C_ops.add(conv2d_29, reshape_1) + del conv2d_29, reshape_1 + + # pd_op.hardsigmoid: (-1x144x1x1xf32) <- (-1x144x1x1xf32) + hardsigmoid_1 = paddle._C_ops.hardsigmoid( + add_13, float("0.166667"), float("0.5") + ) + del add_13 + + # pd_op.multiply: (-1x144x-1x-1xf32) <- (-1x144x-1x-1xf32, -1x144x1x1xf32) + multiply_7 = paddle._C_ops.multiply(concat_3, hardsigmoid_1) + del concat_3, hardsigmoid_1 + + # pd_op.conv2d: (-1x192x-1x-1xf32) <- (-1x144x-1x-1xf32, 192x144x1x1xf32) + conv2d_30 = paddle._C_ops.conv2d( + multiply_7, parameter_443, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del multiply_7, parameter_443 + + # pd_op.batch_norm_: (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__168, + batch_norm__169, + batch_norm__170, + batch_norm__171, + batch_norm__172, + batch_norm__173, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_30, + parameter_442, + parameter_441, + parameter_440, + parameter_439, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_30, parameter_439, parameter_440, parameter_441, parameter_442 + + # pd_op.swish: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32) + swish_22 = paddle._C_ops.swish(batch_norm__168) + del batch_norm__168 + + # pd_op.conv2d: (-1x288x-1x-1xf32) <- (-1x192x-1x-1xf32, 288x192x3x3xf32) + conv2d_31 = paddle._C_ops.conv2d( + swish_22, parameter_438, [2, 2], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_438 + + # pd_op.batch_norm_: (-1x288x-1x-1xf32, 288xf32, 288xf32, 288xf32, 288xf32, -1xui8) <- (-1x288x-1x-1xf32, 288xf32, 288xf32, 288xf32, 288xf32) + ( + batch_norm__174, + batch_norm__175, + batch_norm__176, + batch_norm__177, + batch_norm__178, + batch_norm__179, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_31, + parameter_437, + parameter_436, + parameter_435, + parameter_434, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_31, parameter_434, parameter_435, parameter_436, parameter_437 + + # pd_op.swish: (-1x288x-1x-1xf32) <- (-1x288x-1x-1xf32) + swish_23 = paddle._C_ops.swish(batch_norm__174) + del batch_norm__174 + + # pd_op.conv2d: (-1x144x-1x-1xf32) <- (-1x288x-1x-1xf32, 144x288x1x1xf32) + conv2d_32 = paddle._C_ops.conv2d( + swish_23, parameter_433, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_433 + + # pd_op.batch_norm_: (-1x144x-1x-1xf32, 144xf32, 144xf32, 144xf32, 144xf32, -1xui8) <- (-1x144x-1x-1xf32, 144xf32, 144xf32, 144xf32, 144xf32) + ( + batch_norm__180, + batch_norm__181, + batch_norm__182, + batch_norm__183, + batch_norm__184, + batch_norm__185, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_32, + parameter_432, + parameter_431, + parameter_430, + parameter_429, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_32, parameter_429, parameter_430, parameter_431, parameter_432 + + # pd_op.swish: (-1x144x-1x-1xf32) <- (-1x144x-1x-1xf32) + swish_24 = paddle._C_ops.swish(batch_norm__180) + del batch_norm__180 + + # pd_op.conv2d: (-1x144x-1x-1xf32) <- (-1x288x-1x-1xf32, 144x288x1x1xf32) + conv2d_33 = paddle._C_ops.conv2d( + swish_23, parameter_428, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_428, swish_23 + + # pd_op.batch_norm_: (-1x144x-1x-1xf32, 144xf32, 144xf32, 144xf32, 144xf32, -1xui8) <- (-1x144x-1x-1xf32, 144xf32, 144xf32, 144xf32, 144xf32) + ( + batch_norm__186, + batch_norm__187, + batch_norm__188, + batch_norm__189, + batch_norm__190, + batch_norm__191, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_33, + parameter_427, + parameter_426, + parameter_425, + parameter_424, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_33, parameter_424, parameter_425, parameter_426, parameter_427 + + # pd_op.swish: (-1x144x-1x-1xf32) <- (-1x144x-1x-1xf32) + swish_25 = paddle._C_ops.swish(batch_norm__186) + del batch_norm__186 + + # pd_op.conv2d: (-1x144x-1x-1xf32) <- (-1x144x-1x-1xf32, 144x144x3x3xf32) + conv2d_34 = paddle._C_ops.conv2d( + swish_25, parameter_423, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_423 + + # pd_op.batch_norm_: (-1x144x-1x-1xf32, 144xf32, 144xf32, 144xf32, 144xf32, -1xui8) <- (-1x144x-1x-1xf32, 144xf32, 144xf32, 144xf32, 144xf32) + ( + batch_norm__192, + batch_norm__193, + batch_norm__194, + batch_norm__195, + batch_norm__196, + batch_norm__197, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_34, + parameter_422, + parameter_421, + parameter_420, + parameter_419, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_34, parameter_419, parameter_420, parameter_421, parameter_422 + + # pd_op.swish: (-1x144x-1x-1xf32) <- (-1x144x-1x-1xf32) + swish_26 = paddle._C_ops.swish(batch_norm__192) + del batch_norm__192 + + # pd_op.conv2d: (-1x144x-1x-1xf32) <- (-1x144x-1x-1xf32, 144x144x3x3xf32) + conv2d_35 = paddle._C_ops.conv2d( + swish_26, parameter_418, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_418 + + # pd_op.batch_norm_: (-1x144x-1x-1xf32, 144xf32, 144xf32, 144xf32, 144xf32, -1xui8) <- (-1x144x-1x-1xf32, 144xf32, 144xf32, 144xf32, 144xf32) + ( + batch_norm__198, + batch_norm__199, + batch_norm__200, + batch_norm__201, + batch_norm__202, + batch_norm__203, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_35, + parameter_417, + parameter_416, + parameter_415, + parameter_414, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_35, parameter_414, parameter_415, parameter_416, parameter_417 + + # pd_op.conv2d: (-1x144x-1x-1xf32) <- (-1x144x-1x-1xf32, 144x144x1x1xf32) + conv2d_36 = paddle._C_ops.conv2d( + swish_26, parameter_413, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_413, swish_26 + + # pd_op.batch_norm_: (-1x144x-1x-1xf32, 144xf32, 144xf32, 144xf32, 144xf32, -1xui8) <- (-1x144x-1x-1xf32, 144xf32, 144xf32, 144xf32, 144xf32) + ( + batch_norm__204, + batch_norm__205, + batch_norm__206, + batch_norm__207, + batch_norm__208, + batch_norm__209, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_36, + parameter_412, + parameter_411, + parameter_410, + parameter_409, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_36, parameter_409, parameter_410, parameter_411, parameter_412 + + # pd_op.multiply: (-1x144x-1x-1xf32) <- (1xf32, -1x144x-1x-1xf32) + multiply_8 = paddle._C_ops.multiply(data_6, batch_norm__204) + del batch_norm__204, data_6 + + # pd_op.add: (-1x144x-1x-1xf32) <- (-1x144x-1x-1xf32, -1x144x-1x-1xf32) + add_14 = paddle._C_ops.add(batch_norm__198, multiply_8) + del batch_norm__198, multiply_8 + + # pd_op.swish: (-1x144x-1x-1xf32) <- (-1x144x-1x-1xf32) + swish_27 = paddle._C_ops.swish(add_14) + del add_14 + + # pd_op.add: (-1x144x-1x-1xf32) <- (-1x144x-1x-1xf32, -1x144x-1x-1xf32) + add_15 = paddle._C_ops.add(swish_25, swish_27) + del swish_25, swish_27 + + # pd_op.conv2d: (-1x144x-1x-1xf32) <- (-1x144x-1x-1xf32, 144x144x3x3xf32) + conv2d_37 = paddle._C_ops.conv2d( + add_15, parameter_408, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_408 + + # pd_op.batch_norm_: (-1x144x-1x-1xf32, 144xf32, 144xf32, 144xf32, 144xf32, -1xui8) <- (-1x144x-1x-1xf32, 144xf32, 144xf32, 144xf32, 144xf32) + ( + batch_norm__210, + batch_norm__211, + batch_norm__212, + batch_norm__213, + batch_norm__214, + batch_norm__215, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_37, + parameter_407, + parameter_406, + parameter_405, + parameter_404, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_37, parameter_404, parameter_405, parameter_406, parameter_407 + + # pd_op.swish: (-1x144x-1x-1xf32) <- (-1x144x-1x-1xf32) + swish_28 = paddle._C_ops.swish(batch_norm__210) + del batch_norm__210 + + # pd_op.conv2d: (-1x144x-1x-1xf32) <- (-1x144x-1x-1xf32, 144x144x3x3xf32) + conv2d_38 = paddle._C_ops.conv2d( + swish_28, parameter_403, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_403 + + # pd_op.batch_norm_: (-1x144x-1x-1xf32, 144xf32, 144xf32, 144xf32, 144xf32, -1xui8) <- (-1x144x-1x-1xf32, 144xf32, 144xf32, 144xf32, 144xf32) + ( + batch_norm__216, + batch_norm__217, + batch_norm__218, + batch_norm__219, + batch_norm__220, + batch_norm__221, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_38, + parameter_402, + parameter_401, + parameter_400, + parameter_399, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_38, parameter_399, parameter_400, parameter_401, parameter_402 + + # pd_op.conv2d: (-1x144x-1x-1xf32) <- (-1x144x-1x-1xf32, 144x144x1x1xf32) + conv2d_39 = paddle._C_ops.conv2d( + swish_28, parameter_398, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_398, swish_28 + + # pd_op.batch_norm_: (-1x144x-1x-1xf32, 144xf32, 144xf32, 144xf32, 144xf32, -1xui8) <- (-1x144x-1x-1xf32, 144xf32, 144xf32, 144xf32, 144xf32) + ( + batch_norm__222, + batch_norm__223, + batch_norm__224, + batch_norm__225, + batch_norm__226, + batch_norm__227, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_39, + parameter_397, + parameter_396, + parameter_395, + parameter_394, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_39, parameter_394, parameter_395, parameter_396, parameter_397 + + # pd_op.multiply: (-1x144x-1x-1xf32) <- (1xf32, -1x144x-1x-1xf32) + multiply_9 = paddle._C_ops.multiply(data_7, batch_norm__222) + del batch_norm__222, data_7 + + # pd_op.add: (-1x144x-1x-1xf32) <- (-1x144x-1x-1xf32, -1x144x-1x-1xf32) + add_16 = paddle._C_ops.add(batch_norm__216, multiply_9) + del batch_norm__216, multiply_9 + + # pd_op.swish: (-1x144x-1x-1xf32) <- (-1x144x-1x-1xf32) + swish_29 = paddle._C_ops.swish(add_16) + del add_16 + + # pd_op.add: (-1x144x-1x-1xf32) <- (-1x144x-1x-1xf32, -1x144x-1x-1xf32) + add_17 = paddle._C_ops.add(add_15, swish_29) + del add_15, swish_29 + + # pd_op.conv2d: (-1x144x-1x-1xf32) <- (-1x144x-1x-1xf32, 144x144x3x3xf32) + conv2d_40 = paddle._C_ops.conv2d( + add_17, parameter_393, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_393 + + # pd_op.batch_norm_: (-1x144x-1x-1xf32, 144xf32, 144xf32, 144xf32, 144xf32, -1xui8) <- (-1x144x-1x-1xf32, 144xf32, 144xf32, 144xf32, 144xf32) + ( + batch_norm__228, + batch_norm__229, + batch_norm__230, + batch_norm__231, + batch_norm__232, + batch_norm__233, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_40, + parameter_392, + parameter_391, + parameter_390, + parameter_389, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_40, parameter_389, parameter_390, parameter_391, parameter_392 + + # pd_op.swish: (-1x144x-1x-1xf32) <- (-1x144x-1x-1xf32) + swish_30 = paddle._C_ops.swish(batch_norm__228) + del batch_norm__228 + + # pd_op.conv2d: (-1x144x-1x-1xf32) <- (-1x144x-1x-1xf32, 144x144x3x3xf32) + conv2d_41 = paddle._C_ops.conv2d( + swish_30, parameter_388, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_388 + + # pd_op.batch_norm_: (-1x144x-1x-1xf32, 144xf32, 144xf32, 144xf32, 144xf32, -1xui8) <- (-1x144x-1x-1xf32, 144xf32, 144xf32, 144xf32, 144xf32) + ( + batch_norm__234, + batch_norm__235, + batch_norm__236, + batch_norm__237, + batch_norm__238, + batch_norm__239, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_41, + parameter_387, + parameter_386, + parameter_385, + parameter_384, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_41, parameter_384, parameter_385, parameter_386, parameter_387 + + # pd_op.conv2d: (-1x144x-1x-1xf32) <- (-1x144x-1x-1xf32, 144x144x1x1xf32) + conv2d_42 = paddle._C_ops.conv2d( + swish_30, parameter_383, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_383, swish_30 + + # pd_op.batch_norm_: (-1x144x-1x-1xf32, 144xf32, 144xf32, 144xf32, 144xf32, -1xui8) <- (-1x144x-1x-1xf32, 144xf32, 144xf32, 144xf32, 144xf32) + ( + batch_norm__240, + batch_norm__241, + batch_norm__242, + batch_norm__243, + batch_norm__244, + batch_norm__245, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_42, + parameter_382, + parameter_381, + parameter_380, + parameter_379, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_42, parameter_379, parameter_380, parameter_381, parameter_382 + + # pd_op.multiply: (-1x144x-1x-1xf32) <- (1xf32, -1x144x-1x-1xf32) + multiply_10 = paddle._C_ops.multiply(data_8, batch_norm__240) + del batch_norm__240, data_8 + + # pd_op.add: (-1x144x-1x-1xf32) <- (-1x144x-1x-1xf32, -1x144x-1x-1xf32) + add_18 = paddle._C_ops.add(batch_norm__234, multiply_10) + del batch_norm__234, multiply_10 + + # pd_op.swish: (-1x144x-1x-1xf32) <- (-1x144x-1x-1xf32) + swish_31 = paddle._C_ops.swish(add_18) + del add_18 + + # pd_op.add: (-1x144x-1x-1xf32) <- (-1x144x-1x-1xf32, -1x144x-1x-1xf32) + add_19 = paddle._C_ops.add(add_17, swish_31) + del add_17, swish_31 + + # pd_op.conv2d: (-1x144x-1x-1xf32) <- (-1x144x-1x-1xf32, 144x144x3x3xf32) + conv2d_43 = paddle._C_ops.conv2d( + add_19, parameter_378, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_378 + + # pd_op.batch_norm_: (-1x144x-1x-1xf32, 144xf32, 144xf32, 144xf32, 144xf32, -1xui8) <- (-1x144x-1x-1xf32, 144xf32, 144xf32, 144xf32, 144xf32) + ( + batch_norm__246, + batch_norm__247, + batch_norm__248, + batch_norm__249, + batch_norm__250, + batch_norm__251, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_43, + parameter_377, + parameter_376, + parameter_375, + parameter_374, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_43, parameter_374, parameter_375, parameter_376, parameter_377 + + # pd_op.swish: (-1x144x-1x-1xf32) <- (-1x144x-1x-1xf32) + swish_32 = paddle._C_ops.swish(batch_norm__246) + del batch_norm__246 + + # pd_op.conv2d: (-1x144x-1x-1xf32) <- (-1x144x-1x-1xf32, 144x144x3x3xf32) + conv2d_44 = paddle._C_ops.conv2d( + swish_32, parameter_373, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_373 + + # pd_op.batch_norm_: (-1x144x-1x-1xf32, 144xf32, 144xf32, 144xf32, 144xf32, -1xui8) <- (-1x144x-1x-1xf32, 144xf32, 144xf32, 144xf32, 144xf32) + ( + batch_norm__252, + batch_norm__253, + batch_norm__254, + batch_norm__255, + batch_norm__256, + batch_norm__257, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_44, + parameter_372, + parameter_371, + parameter_370, + parameter_369, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_44, parameter_369, parameter_370, parameter_371, parameter_372 + + # pd_op.conv2d: (-1x144x-1x-1xf32) <- (-1x144x-1x-1xf32, 144x144x1x1xf32) + conv2d_45 = paddle._C_ops.conv2d( + swish_32, parameter_368, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_368, swish_32 + + # pd_op.batch_norm_: (-1x144x-1x-1xf32, 144xf32, 144xf32, 144xf32, 144xf32, -1xui8) <- (-1x144x-1x-1xf32, 144xf32, 144xf32, 144xf32, 144xf32) + ( + batch_norm__258, + batch_norm__259, + batch_norm__260, + batch_norm__261, + batch_norm__262, + batch_norm__263, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_45, + parameter_367, + parameter_366, + parameter_365, + parameter_364, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_45, parameter_364, parameter_365, parameter_366, parameter_367 + + # pd_op.multiply: (-1x144x-1x-1xf32) <- (1xf32, -1x144x-1x-1xf32) + multiply_11 = paddle._C_ops.multiply(data_9, batch_norm__258) + del batch_norm__258, data_9 + + # pd_op.add: (-1x144x-1x-1xf32) <- (-1x144x-1x-1xf32, -1x144x-1x-1xf32) + add_20 = paddle._C_ops.add(batch_norm__252, multiply_11) + del batch_norm__252, multiply_11 + + # pd_op.swish: (-1x144x-1x-1xf32) <- (-1x144x-1x-1xf32) + swish_33 = paddle._C_ops.swish(add_20) + del add_20 + + # pd_op.add: (-1x144x-1x-1xf32) <- (-1x144x-1x-1xf32, -1x144x-1x-1xf32) + add_21 = paddle._C_ops.add(add_19, swish_33) + del add_19, swish_33 + + # builtin.combine: ([-1x144x-1x-1xf32, -1x144x-1x-1xf32]) <- (-1x144x-1x-1xf32, -1x144x-1x-1xf32) + combine_2 = [swish_24, add_21] + del add_21, swish_24 + + # pd_op.concat: (-1x288x-1x-1xf32) <- ([-1x144x-1x-1xf32, -1x144x-1x-1xf32], 1xi32) + concat_4 = paddle._C_ops.concat(combine_2, full_0) + del combine_2 + + # pd_op.mean: (-1x288x1x1xf32) <- (-1x288x-1x-1xf32, 2xi64) + mean_2 = paddle._C_ops.mean(concat_4, full_int_array_0, True) + + # pd_op.conv2d: (-1x288x1x1xf32) <- (-1x288x1x1xf32, 288x288x1x1xf32) + conv2d_46 = paddle._C_ops.conv2d( + mean_2, parameter_363, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del mean_2, parameter_363 + + # pd_op.reshape: (1x288x1x1xf32) <- (288xf32, 4xi64) + reshape_2 = paddle._C_ops.reshape(parameter_362, full_int_array_1) + del parameter_362 + + # pd_op.add: (-1x288x1x1xf32) <- (-1x288x1x1xf32, 1x288x1x1xf32) + add_22 = paddle._C_ops.add(conv2d_46, reshape_2) + del conv2d_46, reshape_2 + + # pd_op.hardsigmoid: (-1x288x1x1xf32) <- (-1x288x1x1xf32) + hardsigmoid_2 = paddle._C_ops.hardsigmoid( + add_22, float("0.166667"), float("0.5") + ) + del add_22 + + # pd_op.multiply: (-1x288x-1x-1xf32) <- (-1x288x-1x-1xf32, -1x288x1x1xf32) + multiply_12 = paddle._C_ops.multiply(concat_4, hardsigmoid_2) + del concat_4, hardsigmoid_2 + + # pd_op.conv2d: (-1x384x-1x-1xf32) <- (-1x288x-1x-1xf32, 384x288x1x1xf32) + conv2d_47 = paddle._C_ops.conv2d( + multiply_12, parameter_361, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del multiply_12, parameter_361 + + # pd_op.batch_norm_: (-1x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (-1x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__264, + batch_norm__265, + batch_norm__266, + batch_norm__267, + batch_norm__268, + batch_norm__269, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_47, + parameter_360, + parameter_359, + parameter_358, + parameter_357, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_47, parameter_357, parameter_358, parameter_359, parameter_360 + + # pd_op.swish: (-1x384x-1x-1xf32) <- (-1x384x-1x-1xf32) + swish_34 = paddle._C_ops.swish(batch_norm__264) + del batch_norm__264 + + # pd_op.conv2d: (-1x576x-1x-1xf32) <- (-1x384x-1x-1xf32, 576x384x3x3xf32) + conv2d_48 = paddle._C_ops.conv2d( + swish_34, parameter_356, [2, 2], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_356 + + # pd_op.batch_norm_: (-1x576x-1x-1xf32, 576xf32, 576xf32, 576xf32, 576xf32, -1xui8) <- (-1x576x-1x-1xf32, 576xf32, 576xf32, 576xf32, 576xf32) + ( + batch_norm__270, + batch_norm__271, + batch_norm__272, + batch_norm__273, + batch_norm__274, + batch_norm__275, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_48, + parameter_355, + parameter_354, + parameter_353, + parameter_352, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_48, parameter_352, parameter_353, parameter_354, parameter_355 + + # pd_op.swish: (-1x576x-1x-1xf32) <- (-1x576x-1x-1xf32) + swish_35 = paddle._C_ops.swish(batch_norm__270) + del batch_norm__270 + + # pd_op.conv2d: (-1x288x-1x-1xf32) <- (-1x576x-1x-1xf32, 288x576x1x1xf32) + conv2d_49 = paddle._C_ops.conv2d( + swish_35, parameter_351, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_351 + + # pd_op.batch_norm_: (-1x288x-1x-1xf32, 288xf32, 288xf32, 288xf32, 288xf32, -1xui8) <- (-1x288x-1x-1xf32, 288xf32, 288xf32, 288xf32, 288xf32) + ( + batch_norm__276, + batch_norm__277, + batch_norm__278, + batch_norm__279, + batch_norm__280, + batch_norm__281, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_49, + parameter_350, + parameter_349, + parameter_348, + parameter_347, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_49, parameter_347, parameter_348, parameter_349, parameter_350 + + # pd_op.swish: (-1x288x-1x-1xf32) <- (-1x288x-1x-1xf32) + swish_36 = paddle._C_ops.swish(batch_norm__276) + del batch_norm__276 + + # pd_op.conv2d: (-1x288x-1x-1xf32) <- (-1x576x-1x-1xf32, 288x576x1x1xf32) + conv2d_50 = paddle._C_ops.conv2d( + swish_35, parameter_346, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_346, swish_35 + + # pd_op.batch_norm_: (-1x288x-1x-1xf32, 288xf32, 288xf32, 288xf32, 288xf32, -1xui8) <- (-1x288x-1x-1xf32, 288xf32, 288xf32, 288xf32, 288xf32) + ( + batch_norm__282, + batch_norm__283, + batch_norm__284, + batch_norm__285, + batch_norm__286, + batch_norm__287, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_50, + parameter_345, + parameter_344, + parameter_343, + parameter_342, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_50, parameter_342, parameter_343, parameter_344, parameter_345 + + # pd_op.swish: (-1x288x-1x-1xf32) <- (-1x288x-1x-1xf32) + swish_37 = paddle._C_ops.swish(batch_norm__282) + del batch_norm__282 + + # pd_op.conv2d: (-1x288x-1x-1xf32) <- (-1x288x-1x-1xf32, 288x288x3x3xf32) + conv2d_51 = paddle._C_ops.conv2d( + swish_37, parameter_341, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_341 + + # pd_op.batch_norm_: (-1x288x-1x-1xf32, 288xf32, 288xf32, 288xf32, 288xf32, -1xui8) <- (-1x288x-1x-1xf32, 288xf32, 288xf32, 288xf32, 288xf32) + ( + batch_norm__288, + batch_norm__289, + batch_norm__290, + batch_norm__291, + batch_norm__292, + batch_norm__293, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_51, + parameter_340, + parameter_339, + parameter_338, + parameter_337, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_51, parameter_337, parameter_338, parameter_339, parameter_340 + + # pd_op.swish: (-1x288x-1x-1xf32) <- (-1x288x-1x-1xf32) + swish_38 = paddle._C_ops.swish(batch_norm__288) + del batch_norm__288 + + # pd_op.conv2d: (-1x288x-1x-1xf32) <- (-1x288x-1x-1xf32, 288x288x3x3xf32) + conv2d_52 = paddle._C_ops.conv2d( + swish_38, parameter_336, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_336 + + # pd_op.batch_norm_: (-1x288x-1x-1xf32, 288xf32, 288xf32, 288xf32, 288xf32, -1xui8) <- (-1x288x-1x-1xf32, 288xf32, 288xf32, 288xf32, 288xf32) + ( + batch_norm__294, + batch_norm__295, + batch_norm__296, + batch_norm__297, + batch_norm__298, + batch_norm__299, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_52, + parameter_335, + parameter_334, + parameter_333, + parameter_332, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_52, parameter_332, parameter_333, parameter_334, parameter_335 + + # pd_op.conv2d: (-1x288x-1x-1xf32) <- (-1x288x-1x-1xf32, 288x288x1x1xf32) + conv2d_53 = paddle._C_ops.conv2d( + swish_38, parameter_331, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_331, swish_38 + + # pd_op.batch_norm_: (-1x288x-1x-1xf32, 288xf32, 288xf32, 288xf32, 288xf32, -1xui8) <- (-1x288x-1x-1xf32, 288xf32, 288xf32, 288xf32, 288xf32) + ( + batch_norm__300, + batch_norm__301, + batch_norm__302, + batch_norm__303, + batch_norm__304, + batch_norm__305, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_53, + parameter_330, + parameter_329, + parameter_328, + parameter_327, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_53, parameter_327, parameter_328, parameter_329, parameter_330 + + # pd_op.multiply: (-1x288x-1x-1xf32) <- (1xf32, -1x288x-1x-1xf32) + multiply_13 = paddle._C_ops.multiply(data_10, batch_norm__300) + del batch_norm__300, data_10 + + # pd_op.add: (-1x288x-1x-1xf32) <- (-1x288x-1x-1xf32, -1x288x-1x-1xf32) + add_23 = paddle._C_ops.add(batch_norm__294, multiply_13) + del batch_norm__294, multiply_13 + + # pd_op.swish: (-1x288x-1x-1xf32) <- (-1x288x-1x-1xf32) + swish_39 = paddle._C_ops.swish(add_23) + del add_23 + + # pd_op.add: (-1x288x-1x-1xf32) <- (-1x288x-1x-1xf32, -1x288x-1x-1xf32) + add_24 = paddle._C_ops.add(swish_37, swish_39) + del swish_37, swish_39 + + # pd_op.conv2d: (-1x288x-1x-1xf32) <- (-1x288x-1x-1xf32, 288x288x3x3xf32) + conv2d_54 = paddle._C_ops.conv2d( + add_24, parameter_326, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_326 + + # pd_op.batch_norm_: (-1x288x-1x-1xf32, 288xf32, 288xf32, 288xf32, 288xf32, -1xui8) <- (-1x288x-1x-1xf32, 288xf32, 288xf32, 288xf32, 288xf32) + ( + batch_norm__306, + batch_norm__307, + batch_norm__308, + batch_norm__309, + batch_norm__310, + batch_norm__311, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_54, + parameter_325, + parameter_324, + parameter_323, + parameter_322, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_54, parameter_322, parameter_323, parameter_324, parameter_325 + + # pd_op.swish: (-1x288x-1x-1xf32) <- (-1x288x-1x-1xf32) + swish_40 = paddle._C_ops.swish(batch_norm__306) + del batch_norm__306 + + # pd_op.conv2d: (-1x288x-1x-1xf32) <- (-1x288x-1x-1xf32, 288x288x3x3xf32) + conv2d_55 = paddle._C_ops.conv2d( + swish_40, parameter_321, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_321 + + # pd_op.batch_norm_: (-1x288x-1x-1xf32, 288xf32, 288xf32, 288xf32, 288xf32, -1xui8) <- (-1x288x-1x-1xf32, 288xf32, 288xf32, 288xf32, 288xf32) + ( + batch_norm__312, + batch_norm__313, + batch_norm__314, + batch_norm__315, + batch_norm__316, + batch_norm__317, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_55, + parameter_320, + parameter_319, + parameter_318, + parameter_317, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_55, parameter_317, parameter_318, parameter_319, parameter_320 + + # pd_op.conv2d: (-1x288x-1x-1xf32) <- (-1x288x-1x-1xf32, 288x288x1x1xf32) + conv2d_56 = paddle._C_ops.conv2d( + swish_40, parameter_316, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_316, swish_40 + + # pd_op.batch_norm_: (-1x288x-1x-1xf32, 288xf32, 288xf32, 288xf32, 288xf32, -1xui8) <- (-1x288x-1x-1xf32, 288xf32, 288xf32, 288xf32, 288xf32) + ( + batch_norm__318, + batch_norm__319, + batch_norm__320, + batch_norm__321, + batch_norm__322, + batch_norm__323, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_56, + parameter_315, + parameter_314, + parameter_313, + parameter_312, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_56, parameter_312, parameter_313, parameter_314, parameter_315 + + # pd_op.multiply: (-1x288x-1x-1xf32) <- (1xf32, -1x288x-1x-1xf32) + multiply_14 = paddle._C_ops.multiply(data_11, batch_norm__318) + del batch_norm__318, data_11 + + # pd_op.add: (-1x288x-1x-1xf32) <- (-1x288x-1x-1xf32, -1x288x-1x-1xf32) + add_25 = paddle._C_ops.add(batch_norm__312, multiply_14) + del batch_norm__312, multiply_14 + + # pd_op.swish: (-1x288x-1x-1xf32) <- (-1x288x-1x-1xf32) + swish_41 = paddle._C_ops.swish(add_25) + del add_25 + + # pd_op.add: (-1x288x-1x-1xf32) <- (-1x288x-1x-1xf32, -1x288x-1x-1xf32) + add_26 = paddle._C_ops.add(add_24, swish_41) + del add_24, swish_41 + + # builtin.combine: ([-1x288x-1x-1xf32, -1x288x-1x-1xf32]) <- (-1x288x-1x-1xf32, -1x288x-1x-1xf32) + combine_3 = [swish_36, add_26] + del add_26, swish_36 + + # pd_op.concat: (-1x576x-1x-1xf32) <- ([-1x288x-1x-1xf32, -1x288x-1x-1xf32], 1xi32) + concat_5 = paddle._C_ops.concat(combine_3, full_0) + del combine_3 + + # pd_op.mean: (-1x576x1x1xf32) <- (-1x576x-1x-1xf32, 2xi64) + mean_3 = paddle._C_ops.mean(concat_5, full_int_array_0, True) + del full_int_array_0 + + # pd_op.conv2d: (-1x576x1x1xf32) <- (-1x576x1x1xf32, 576x576x1x1xf32) + conv2d_57 = paddle._C_ops.conv2d( + mean_3, parameter_311, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del mean_3, parameter_311 + + # pd_op.reshape: (1x576x1x1xf32) <- (576xf32, 4xi64) + reshape_3 = paddle._C_ops.reshape(parameter_310, full_int_array_1) + del parameter_310 + + # pd_op.add: (-1x576x1x1xf32) <- (-1x576x1x1xf32, 1x576x1x1xf32) + add_27 = paddle._C_ops.add(conv2d_57, reshape_3) + del conv2d_57, reshape_3 + + # pd_op.hardsigmoid: (-1x576x1x1xf32) <- (-1x576x1x1xf32) + hardsigmoid_3 = paddle._C_ops.hardsigmoid( + add_27, float("0.166667"), float("0.5") + ) + del add_27 + + # pd_op.multiply: (-1x576x-1x-1xf32) <- (-1x576x-1x-1xf32, -1x576x1x1xf32) + multiply_15 = paddle._C_ops.multiply(concat_5, hardsigmoid_3) + del concat_5, hardsigmoid_3 + + # pd_op.conv2d: (-1x768x-1x-1xf32) <- (-1x576x-1x-1xf32, 768x576x1x1xf32) + conv2d_58 = paddle._C_ops.conv2d( + multiply_15, parameter_309, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del multiply_15, parameter_309 + + # pd_op.batch_norm_: (-1x768x-1x-1xf32, 768xf32, 768xf32, 768xf32, 768xf32, -1xui8) <- (-1x768x-1x-1xf32, 768xf32, 768xf32, 768xf32, 768xf32) + ( + batch_norm__324, + batch_norm__325, + batch_norm__326, + batch_norm__327, + batch_norm__328, + batch_norm__329, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_58, + parameter_308, + parameter_307, + parameter_306, + parameter_305, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_58, parameter_305, parameter_306, parameter_307, parameter_308 + + # pd_op.swish: (-1x768x-1x-1xf32) <- (-1x768x-1x-1xf32) + swish_42 = paddle._C_ops.swish(batch_norm__324) + del batch_norm__324 + + # pd_op.conv2d: (-1x288x-1x-1xf32) <- (-1x768x-1x-1xf32, 288x768x1x1xf32) + conv2d_59 = paddle._C_ops.conv2d( + swish_42, parameter_304, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_304 + + # pd_op.batch_norm_: (-1x288x-1x-1xf32, 288xf32, 288xf32, 288xf32, 288xf32, -1xui8) <- (-1x288x-1x-1xf32, 288xf32, 288xf32, 288xf32, 288xf32) + ( + batch_norm__330, + batch_norm__331, + batch_norm__332, + batch_norm__333, + batch_norm__334, + batch_norm__335, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_59, + parameter_303, + parameter_302, + parameter_301, + parameter_300, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_59, parameter_300, parameter_301, parameter_302, parameter_303 + + # pd_op.swish: (-1x288x-1x-1xf32) <- (-1x288x-1x-1xf32) + swish_43 = paddle._C_ops.swish(batch_norm__330) + del batch_norm__330 + + # pd_op.conv2d: (-1x288x-1x-1xf32) <- (-1x768x-1x-1xf32, 288x768x1x1xf32) + conv2d_60 = paddle._C_ops.conv2d( + swish_42, parameter_299, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_299, swish_42 + + # pd_op.batch_norm_: (-1x288x-1x-1xf32, 288xf32, 288xf32, 288xf32, 288xf32, -1xui8) <- (-1x288x-1x-1xf32, 288xf32, 288xf32, 288xf32, 288xf32) + ( + batch_norm__336, + batch_norm__337, + batch_norm__338, + batch_norm__339, + batch_norm__340, + batch_norm__341, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_60, + parameter_298, + parameter_297, + parameter_296, + parameter_295, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_60, parameter_295, parameter_296, parameter_297, parameter_298 + + # pd_op.swish: (-1x288x-1x-1xf32) <- (-1x288x-1x-1xf32) + swish_44 = paddle._C_ops.swish(batch_norm__336) + del batch_norm__336 + + # pd_op.conv2d: (-1x288x-1x-1xf32) <- (-1x288x-1x-1xf32, 288x288x3x3xf32) + conv2d_61 = paddle._C_ops.conv2d( + swish_44, parameter_294, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_294, swish_44 + + # pd_op.batch_norm_: (-1x288x-1x-1xf32, 288xf32, 288xf32, 288xf32, 288xf32, -1xui8) <- (-1x288x-1x-1xf32, 288xf32, 288xf32, 288xf32, 288xf32) + ( + batch_norm__342, + batch_norm__343, + batch_norm__344, + batch_norm__345, + batch_norm__346, + batch_norm__347, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_61, + parameter_293, + parameter_292, + parameter_291, + parameter_290, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_61, parameter_290, parameter_291, parameter_292, parameter_293 + + # pd_op.swish: (-1x288x-1x-1xf32) <- (-1x288x-1x-1xf32) + swish_45 = paddle._C_ops.swish(batch_norm__342) + del batch_norm__342 + + # pd_op.conv2d: (-1x288x-1x-1xf32) <- (-1x288x-1x-1xf32, 288x288x3x3xf32) + conv2d_62 = paddle._C_ops.conv2d( + swish_45, parameter_289, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_289 + + # pd_op.batch_norm_: (-1x288x-1x-1xf32, 288xf32, 288xf32, 288xf32, 288xf32, -1xui8) <- (-1x288x-1x-1xf32, 288xf32, 288xf32, 288xf32, 288xf32) + ( + batch_norm__348, + batch_norm__349, + batch_norm__350, + batch_norm__351, + batch_norm__352, + batch_norm__353, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_62, + parameter_288, + parameter_287, + parameter_286, + parameter_285, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_62, parameter_285, parameter_286, parameter_287, parameter_288 + + # pd_op.conv2d: (-1x288x-1x-1xf32) <- (-1x288x-1x-1xf32, 288x288x1x1xf32) + conv2d_63 = paddle._C_ops.conv2d( + swish_45, parameter_284, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_284, swish_45 + + # pd_op.batch_norm_: (-1x288x-1x-1xf32, 288xf32, 288xf32, 288xf32, 288xf32, -1xui8) <- (-1x288x-1x-1xf32, 288xf32, 288xf32, 288xf32, 288xf32) + ( + batch_norm__354, + batch_norm__355, + batch_norm__356, + batch_norm__357, + batch_norm__358, + batch_norm__359, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_63, + parameter_283, + parameter_282, + parameter_281, + parameter_280, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_63, parameter_280, parameter_281, parameter_282, parameter_283 + + # pd_op.add: (-1x288x-1x-1xf32) <- (-1x288x-1x-1xf32, -1x288x-1x-1xf32) + add_28 = paddle._C_ops.add(batch_norm__348, batch_norm__354) + del batch_norm__348, batch_norm__354 + + # pd_op.swish: (-1x288x-1x-1xf32) <- (-1x288x-1x-1xf32) + swish_46 = paddle._C_ops.swish(add_28) + del add_28 + + # pd_op.full_int_array: (2xi64) <- () + full_int_array_2 = [5, 5] + + # pd_op.pool2d: (-1x288x-1x-1xf32) <- (-1x288x-1x-1xf32, 2xi64) + pool2d_0 = paddle._C_ops.pool2d( + swish_46, + full_int_array_2, + [1, 1], + [2, 2], + False, + True, + "NCHW", + "max", + False, + False, + "EXPLICIT", + ) + del full_int_array_2 + + # pd_op.full_int_array: (2xi64) <- () + full_int_array_3 = [9, 9] + + # pd_op.pool2d: (-1x288x-1x-1xf32) <- (-1x288x-1x-1xf32, 2xi64) + pool2d_1 = paddle._C_ops.pool2d( + swish_46, + full_int_array_3, + [1, 1], + [4, 4], + False, + True, + "NCHW", + "max", + False, + False, + "EXPLICIT", + ) + del full_int_array_3 + + # pd_op.full_int_array: (2xi64) <- () + full_int_array_4 = [13, 13] + + # pd_op.pool2d: (-1x288x-1x-1xf32) <- (-1x288x-1x-1xf32, 2xi64) + pool2d_2 = paddle._C_ops.pool2d( + swish_46, + full_int_array_4, + [1, 1], + [6, 6], + False, + True, + "NCHW", + "max", + False, + False, + "EXPLICIT", + ) + del full_int_array_4 + + # builtin.combine: ([-1x288x-1x-1xf32, -1x288x-1x-1xf32, -1x288x-1x-1xf32, -1x288x-1x-1xf32]) <- (-1x288x-1x-1xf32, -1x288x-1x-1xf32, -1x288x-1x-1xf32, -1x288x-1x-1xf32) + combine_4 = [swish_46, pool2d_0, pool2d_1, pool2d_2] + del pool2d_0, pool2d_1, pool2d_2, swish_46 + + # pd_op.concat: (-1x1152x-1x-1xf32) <- ([-1x288x-1x-1xf32, -1x288x-1x-1xf32, -1x288x-1x-1xf32, -1x288x-1x-1xf32], 1xi32) + concat_6 = paddle._C_ops.concat(combine_4, full_0) + del combine_4 + + # pd_op.conv2d: (-1x288x-1x-1xf32) <- (-1x1152x-1x-1xf32, 288x1152x1x1xf32) + conv2d_64 = paddle._C_ops.conv2d( + concat_6, parameter_279, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del concat_6, parameter_279 + + # pd_op.batch_norm_: (-1x288x-1x-1xf32, 288xf32, 288xf32, 288xf32, 288xf32, -1xui8) <- (-1x288x-1x-1xf32, 288xf32, 288xf32, 288xf32, 288xf32) + ( + batch_norm__360, + batch_norm__361, + batch_norm__362, + batch_norm__363, + batch_norm__364, + batch_norm__365, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_64, + parameter_278, + parameter_277, + parameter_276, + parameter_275, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_64, parameter_275, parameter_276, parameter_277, parameter_278 + + # pd_op.swish: (-1x288x-1x-1xf32) <- (-1x288x-1x-1xf32) + swish_47 = paddle._C_ops.swish(batch_norm__360) + del batch_norm__360 + + # pd_op.conv2d: (-1x288x-1x-1xf32) <- (-1x288x-1x-1xf32, 288x288x3x3xf32) + conv2d_65 = paddle._C_ops.conv2d( + swish_47, parameter_274, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_274, swish_47 + + # pd_op.batch_norm_: (-1x288x-1x-1xf32, 288xf32, 288xf32, 288xf32, 288xf32, -1xui8) <- (-1x288x-1x-1xf32, 288xf32, 288xf32, 288xf32, 288xf32) + ( + batch_norm__366, + batch_norm__367, + batch_norm__368, + batch_norm__369, + batch_norm__370, + batch_norm__371, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_65, + parameter_273, + parameter_272, + parameter_271, + parameter_270, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_65, parameter_270, parameter_271, parameter_272, parameter_273 + + # pd_op.swish: (-1x288x-1x-1xf32) <- (-1x288x-1x-1xf32) + swish_48 = paddle._C_ops.swish(batch_norm__366) + del batch_norm__366 + + # pd_op.conv2d: (-1x288x-1x-1xf32) <- (-1x288x-1x-1xf32, 288x288x3x3xf32) + conv2d_66 = paddle._C_ops.conv2d( + swish_48, parameter_269, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_269 + + # pd_op.batch_norm_: (-1x288x-1x-1xf32, 288xf32, 288xf32, 288xf32, 288xf32, -1xui8) <- (-1x288x-1x-1xf32, 288xf32, 288xf32, 288xf32, 288xf32) + ( + batch_norm__372, + batch_norm__373, + batch_norm__374, + batch_norm__375, + batch_norm__376, + batch_norm__377, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_66, + parameter_268, + parameter_267, + parameter_266, + parameter_265, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_66, parameter_265, parameter_266, parameter_267, parameter_268 + + # pd_op.conv2d: (-1x288x-1x-1xf32) <- (-1x288x-1x-1xf32, 288x288x1x1xf32) + conv2d_67 = paddle._C_ops.conv2d( + swish_48, parameter_264, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_264, swish_48 + + # pd_op.batch_norm_: (-1x288x-1x-1xf32, 288xf32, 288xf32, 288xf32, 288xf32, -1xui8) <- (-1x288x-1x-1xf32, 288xf32, 288xf32, 288xf32, 288xf32) + ( + batch_norm__378, + batch_norm__379, + batch_norm__380, + batch_norm__381, + batch_norm__382, + batch_norm__383, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_67, + parameter_263, + parameter_262, + parameter_261, + parameter_260, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_67, parameter_260, parameter_261, parameter_262, parameter_263 + + # pd_op.add: (-1x288x-1x-1xf32) <- (-1x288x-1x-1xf32, -1x288x-1x-1xf32) + add_29 = paddle._C_ops.add(batch_norm__372, batch_norm__378) + del batch_norm__372, batch_norm__378 + + # pd_op.swish: (-1x288x-1x-1xf32) <- (-1x288x-1x-1xf32) + swish_49 = paddle._C_ops.swish(add_29) + del add_29 + + # builtin.combine: ([-1x288x-1x-1xf32, -1x288x-1x-1xf32]) <- (-1x288x-1x-1xf32, -1x288x-1x-1xf32) + combine_5 = [swish_43, swish_49] + del swish_43, swish_49 + + # pd_op.concat: (-1x576x-1x-1xf32) <- ([-1x288x-1x-1xf32, -1x288x-1x-1xf32], 1xi32) + concat_7 = paddle._C_ops.concat(combine_5, full_0) + del combine_5 + + # pd_op.conv2d: (-1x576x-1x-1xf32) <- (-1x576x-1x-1xf32, 576x576x1x1xf32) + conv2d_68 = paddle._C_ops.conv2d( + concat_7, parameter_259, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del concat_7, parameter_259 + + # pd_op.batch_norm_: (-1x576x-1x-1xf32, 576xf32, 576xf32, 576xf32, 576xf32, -1xui8) <- (-1x576x-1x-1xf32, 576xf32, 576xf32, 576xf32, 576xf32) + ( + batch_norm__384, + batch_norm__385, + batch_norm__386, + batch_norm__387, + batch_norm__388, + batch_norm__389, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_68, + parameter_258, + parameter_257, + parameter_256, + parameter_255, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_68, parameter_255, parameter_256, parameter_257, parameter_258 + + # pd_op.swish: (-1x576x-1x-1xf32) <- (-1x576x-1x-1xf32) + swish_50 = paddle._C_ops.swish(batch_norm__384) + del batch_norm__384 + + # pd_op.conv2d: (-1x288x-1x-1xf32) <- (-1x576x-1x-1xf32, 288x576x1x1xf32) + conv2d_69 = paddle._C_ops.conv2d( + swish_50, parameter_254, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_254 + + # pd_op.batch_norm_: (-1x288x-1x-1xf32, 288xf32, 288xf32, 288xf32, 288xf32, -1xui8) <- (-1x288x-1x-1xf32, 288xf32, 288xf32, 288xf32, 288xf32) + ( + batch_norm__390, + batch_norm__391, + batch_norm__392, + batch_norm__393, + batch_norm__394, + batch_norm__395, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_69, + parameter_253, + parameter_252, + parameter_251, + parameter_250, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_69, parameter_250, parameter_251, parameter_252, parameter_253 + + # pd_op.swish: (-1x288x-1x-1xf32) <- (-1x288x-1x-1xf32) + swish_51 = paddle._C_ops.swish(batch_norm__390) + del batch_norm__390 + + # pd_op.nearest_interp: (-1x288x-1x-1xf32) <- (-1x288x-1x-1xf32, None, None, None) + nearest_interp_0 = paddle._C_ops.nearest_interp( + swish_51, + None, + None, + None, + "NCHW", + -1, + -1, + -1, + [float("2"), float("2")], + "nearest", + False, + 0, + ) + del swish_51 + + # builtin.combine: ([-1x288x-1x-1xf32, -1x384x-1x-1xf32]) <- (-1x288x-1x-1xf32, -1x384x-1x-1xf32) + combine_6 = [nearest_interp_0, swish_34] + del nearest_interp_0, swish_34 + + # pd_op.concat: (-1x672x-1x-1xf32) <- ([-1x288x-1x-1xf32, -1x384x-1x-1xf32], 1xi32) + concat_8 = paddle._C_ops.concat(combine_6, full_0) + del combine_6 + + # pd_op.conv2d: (-1x144x-1x-1xf32) <- (-1x672x-1x-1xf32, 144x672x1x1xf32) + conv2d_70 = paddle._C_ops.conv2d( + concat_8, parameter_249, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_249 + + # pd_op.batch_norm_: (-1x144x-1x-1xf32, 144xf32, 144xf32, 144xf32, 144xf32, -1xui8) <- (-1x144x-1x-1xf32, 144xf32, 144xf32, 144xf32, 144xf32) + ( + batch_norm__396, + batch_norm__397, + batch_norm__398, + batch_norm__399, + batch_norm__400, + batch_norm__401, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_70, + parameter_248, + parameter_247, + parameter_246, + parameter_245, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_70, parameter_245, parameter_246, parameter_247, parameter_248 + + # pd_op.swish: (-1x144x-1x-1xf32) <- (-1x144x-1x-1xf32) + swish_52 = paddle._C_ops.swish(batch_norm__396) + del batch_norm__396 + + # pd_op.conv2d: (-1x144x-1x-1xf32) <- (-1x672x-1x-1xf32, 144x672x1x1xf32) + conv2d_71 = paddle._C_ops.conv2d( + concat_8, parameter_244, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del concat_8, parameter_244 + + # pd_op.batch_norm_: (-1x144x-1x-1xf32, 144xf32, 144xf32, 144xf32, 144xf32, -1xui8) <- (-1x144x-1x-1xf32, 144xf32, 144xf32, 144xf32, 144xf32) + ( + batch_norm__402, + batch_norm__403, + batch_norm__404, + batch_norm__405, + batch_norm__406, + batch_norm__407, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_71, + parameter_243, + parameter_242, + parameter_241, + parameter_240, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_71, parameter_240, parameter_241, parameter_242, parameter_243 + + # pd_op.swish: (-1x144x-1x-1xf32) <- (-1x144x-1x-1xf32) + swish_53 = paddle._C_ops.swish(batch_norm__402) + del batch_norm__402 + + # pd_op.conv2d: (-1x144x-1x-1xf32) <- (-1x144x-1x-1xf32, 144x144x3x3xf32) + conv2d_72 = paddle._C_ops.conv2d( + swish_53, parameter_239, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_239, swish_53 + + # pd_op.batch_norm_: (-1x144x-1x-1xf32, 144xf32, 144xf32, 144xf32, 144xf32, -1xui8) <- (-1x144x-1x-1xf32, 144xf32, 144xf32, 144xf32, 144xf32) + ( + batch_norm__408, + batch_norm__409, + batch_norm__410, + batch_norm__411, + batch_norm__412, + batch_norm__413, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_72, + parameter_238, + parameter_237, + parameter_236, + parameter_235, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_72, parameter_235, parameter_236, parameter_237, parameter_238 + + # pd_op.swish: (-1x144x-1x-1xf32) <- (-1x144x-1x-1xf32) + swish_54 = paddle._C_ops.swish(batch_norm__408) + del batch_norm__408 + + # pd_op.conv2d: (-1x144x-1x-1xf32) <- (-1x144x-1x-1xf32, 144x144x3x3xf32) + conv2d_73 = paddle._C_ops.conv2d( + swish_54, parameter_234, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_234 + + # pd_op.batch_norm_: (-1x144x-1x-1xf32, 144xf32, 144xf32, 144xf32, 144xf32, -1xui8) <- (-1x144x-1x-1xf32, 144xf32, 144xf32, 144xf32, 144xf32) + ( + batch_norm__414, + batch_norm__415, + batch_norm__416, + batch_norm__417, + batch_norm__418, + batch_norm__419, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_73, + parameter_233, + parameter_232, + parameter_231, + parameter_230, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_73, parameter_230, parameter_231, parameter_232, parameter_233 + + # pd_op.conv2d: (-1x144x-1x-1xf32) <- (-1x144x-1x-1xf32, 144x144x1x1xf32) + conv2d_74 = paddle._C_ops.conv2d( + swish_54, parameter_229, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_229, swish_54 + + # pd_op.batch_norm_: (-1x144x-1x-1xf32, 144xf32, 144xf32, 144xf32, 144xf32, -1xui8) <- (-1x144x-1x-1xf32, 144xf32, 144xf32, 144xf32, 144xf32) + ( + batch_norm__420, + batch_norm__421, + batch_norm__422, + batch_norm__423, + batch_norm__424, + batch_norm__425, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_74, + parameter_228, + parameter_227, + parameter_226, + parameter_225, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_74, parameter_225, parameter_226, parameter_227, parameter_228 + + # pd_op.add: (-1x144x-1x-1xf32) <- (-1x144x-1x-1xf32, -1x144x-1x-1xf32) + add_30 = paddle._C_ops.add(batch_norm__414, batch_norm__420) + del batch_norm__414, batch_norm__420 + + # pd_op.swish: (-1x144x-1x-1xf32) <- (-1x144x-1x-1xf32) + swish_55 = paddle._C_ops.swish(add_30) + del add_30 + + # pd_op.conv2d: (-1x144x-1x-1xf32) <- (-1x144x-1x-1xf32, 144x144x3x3xf32) + conv2d_75 = paddle._C_ops.conv2d( + swish_55, parameter_224, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_224, swish_55 + + # pd_op.batch_norm_: (-1x144x-1x-1xf32, 144xf32, 144xf32, 144xf32, 144xf32, -1xui8) <- (-1x144x-1x-1xf32, 144xf32, 144xf32, 144xf32, 144xf32) + ( + batch_norm__426, + batch_norm__427, + batch_norm__428, + batch_norm__429, + batch_norm__430, + batch_norm__431, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_75, + parameter_223, + parameter_222, + parameter_221, + parameter_220, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_75, parameter_220, parameter_221, parameter_222, parameter_223 + + # pd_op.swish: (-1x144x-1x-1xf32) <- (-1x144x-1x-1xf32) + swish_56 = paddle._C_ops.swish(batch_norm__426) + del batch_norm__426 + + # pd_op.conv2d: (-1x144x-1x-1xf32) <- (-1x144x-1x-1xf32, 144x144x3x3xf32) + conv2d_76 = paddle._C_ops.conv2d( + swish_56, parameter_219, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_219 + + # pd_op.batch_norm_: (-1x144x-1x-1xf32, 144xf32, 144xf32, 144xf32, 144xf32, -1xui8) <- (-1x144x-1x-1xf32, 144xf32, 144xf32, 144xf32, 144xf32) + ( + batch_norm__432, + batch_norm__433, + batch_norm__434, + batch_norm__435, + batch_norm__436, + batch_norm__437, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_76, + parameter_218, + parameter_217, + parameter_216, + parameter_215, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_76, parameter_215, parameter_216, parameter_217, parameter_218 + + # pd_op.conv2d: (-1x144x-1x-1xf32) <- (-1x144x-1x-1xf32, 144x144x1x1xf32) + conv2d_77 = paddle._C_ops.conv2d( + swish_56, parameter_214, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_214, swish_56 + + # pd_op.batch_norm_: (-1x144x-1x-1xf32, 144xf32, 144xf32, 144xf32, 144xf32, -1xui8) <- (-1x144x-1x-1xf32, 144xf32, 144xf32, 144xf32, 144xf32) + ( + batch_norm__438, + batch_norm__439, + batch_norm__440, + batch_norm__441, + batch_norm__442, + batch_norm__443, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_77, + parameter_213, + parameter_212, + parameter_211, + parameter_210, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_77, parameter_210, parameter_211, parameter_212, parameter_213 + + # pd_op.add: (-1x144x-1x-1xf32) <- (-1x144x-1x-1xf32, -1x144x-1x-1xf32) + add_31 = paddle._C_ops.add(batch_norm__432, batch_norm__438) + del batch_norm__432, batch_norm__438 + + # pd_op.swish: (-1x144x-1x-1xf32) <- (-1x144x-1x-1xf32) + swish_57 = paddle._C_ops.swish(add_31) + del add_31 + + # builtin.combine: ([-1x144x-1x-1xf32, -1x144x-1x-1xf32]) <- (-1x144x-1x-1xf32, -1x144x-1x-1xf32) + combine_7 = [swish_52, swish_57] + del swish_52, swish_57 + + # pd_op.concat: (-1x288x-1x-1xf32) <- ([-1x144x-1x-1xf32, -1x144x-1x-1xf32], 1xi32) + concat_9 = paddle._C_ops.concat(combine_7, full_0) + del combine_7 + + # pd_op.conv2d: (-1x288x-1x-1xf32) <- (-1x288x-1x-1xf32, 288x288x1x1xf32) + conv2d_78 = paddle._C_ops.conv2d( + concat_9, parameter_209, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del concat_9, parameter_209 + + # pd_op.batch_norm_: (-1x288x-1x-1xf32, 288xf32, 288xf32, 288xf32, 288xf32, -1xui8) <- (-1x288x-1x-1xf32, 288xf32, 288xf32, 288xf32, 288xf32) + ( + batch_norm__444, + batch_norm__445, + batch_norm__446, + batch_norm__447, + batch_norm__448, + batch_norm__449, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_78, + parameter_208, + parameter_207, + parameter_206, + parameter_205, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_78, parameter_205, parameter_206, parameter_207, parameter_208 + + # pd_op.swish: (-1x288x-1x-1xf32) <- (-1x288x-1x-1xf32) + swish_58 = paddle._C_ops.swish(batch_norm__444) + del batch_norm__444 + + # pd_op.conv2d: (-1x144x-1x-1xf32) <- (-1x288x-1x-1xf32, 144x288x1x1xf32) + conv2d_79 = paddle._C_ops.conv2d( + swish_58, parameter_204, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_204 + + # pd_op.batch_norm_: (-1x144x-1x-1xf32, 144xf32, 144xf32, 144xf32, 144xf32, -1xui8) <- (-1x144x-1x-1xf32, 144xf32, 144xf32, 144xf32, 144xf32) + ( + batch_norm__450, + batch_norm__451, + batch_norm__452, + batch_norm__453, + batch_norm__454, + batch_norm__455, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_79, + parameter_203, + parameter_202, + parameter_201, + parameter_200, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_79, parameter_200, parameter_201, parameter_202, parameter_203 + + # pd_op.swish: (-1x144x-1x-1xf32) <- (-1x144x-1x-1xf32) + swish_59 = paddle._C_ops.swish(batch_norm__450) + del batch_norm__450 + + # pd_op.nearest_interp: (-1x144x-1x-1xf32) <- (-1x144x-1x-1xf32, None, None, None) + nearest_interp_1 = paddle._C_ops.nearest_interp( + swish_59, + None, + None, + None, + "NCHW", + -1, + -1, + -1, + [float("2"), float("2")], + "nearest", + False, + 0, + ) + del swish_59 + + # builtin.combine: ([-1x144x-1x-1xf32, -1x192x-1x-1xf32]) <- (-1x144x-1x-1xf32, -1x192x-1x-1xf32) + combine_8 = [nearest_interp_1, swish_22] + del nearest_interp_1, swish_22 + + # pd_op.concat: (-1x336x-1x-1xf32) <- ([-1x144x-1x-1xf32, -1x192x-1x-1xf32], 1xi32) + concat_10 = paddle._C_ops.concat(combine_8, full_0) + del combine_8 + + # pd_op.conv2d: (-1x72x-1x-1xf32) <- (-1x336x-1x-1xf32, 72x336x1x1xf32) + conv2d_80 = paddle._C_ops.conv2d( + concat_10, parameter_199, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_199 + + # pd_op.batch_norm_: (-1x72x-1x-1xf32, 72xf32, 72xf32, 72xf32, 72xf32, -1xui8) <- (-1x72x-1x-1xf32, 72xf32, 72xf32, 72xf32, 72xf32) + ( + batch_norm__456, + batch_norm__457, + batch_norm__458, + batch_norm__459, + batch_norm__460, + batch_norm__461, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_80, + parameter_198, + parameter_197, + parameter_196, + parameter_195, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_80, parameter_195, parameter_196, parameter_197, parameter_198 + + # pd_op.swish: (-1x72x-1x-1xf32) <- (-1x72x-1x-1xf32) + swish_60 = paddle._C_ops.swish(batch_norm__456) + del batch_norm__456 + + # pd_op.conv2d: (-1x72x-1x-1xf32) <- (-1x336x-1x-1xf32, 72x336x1x1xf32) + conv2d_81 = paddle._C_ops.conv2d( + concat_10, parameter_194, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del concat_10, parameter_194 + + # pd_op.batch_norm_: (-1x72x-1x-1xf32, 72xf32, 72xf32, 72xf32, 72xf32, -1xui8) <- (-1x72x-1x-1xf32, 72xf32, 72xf32, 72xf32, 72xf32) + ( + batch_norm__462, + batch_norm__463, + batch_norm__464, + batch_norm__465, + batch_norm__466, + batch_norm__467, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_81, + parameter_193, + parameter_192, + parameter_191, + parameter_190, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_81, parameter_190, parameter_191, parameter_192, parameter_193 + + # pd_op.swish: (-1x72x-1x-1xf32) <- (-1x72x-1x-1xf32) + swish_61 = paddle._C_ops.swish(batch_norm__462) + del batch_norm__462 + + # pd_op.conv2d: (-1x72x-1x-1xf32) <- (-1x72x-1x-1xf32, 72x72x3x3xf32) + conv2d_82 = paddle._C_ops.conv2d( + swish_61, parameter_189, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_189, swish_61 + + # pd_op.batch_norm_: (-1x72x-1x-1xf32, 72xf32, 72xf32, 72xf32, 72xf32, -1xui8) <- (-1x72x-1x-1xf32, 72xf32, 72xf32, 72xf32, 72xf32) + ( + batch_norm__468, + batch_norm__469, + batch_norm__470, + batch_norm__471, + batch_norm__472, + batch_norm__473, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_82, + parameter_188, + parameter_187, + parameter_186, + parameter_185, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_82, parameter_185, parameter_186, parameter_187, parameter_188 + + # pd_op.swish: (-1x72x-1x-1xf32) <- (-1x72x-1x-1xf32) + swish_62 = paddle._C_ops.swish(batch_norm__468) + del batch_norm__468 + + # pd_op.conv2d: (-1x72x-1x-1xf32) <- (-1x72x-1x-1xf32, 72x72x3x3xf32) + conv2d_83 = paddle._C_ops.conv2d( + swish_62, parameter_184, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_184 + + # pd_op.batch_norm_: (-1x72x-1x-1xf32, 72xf32, 72xf32, 72xf32, 72xf32, -1xui8) <- (-1x72x-1x-1xf32, 72xf32, 72xf32, 72xf32, 72xf32) + ( + batch_norm__474, + batch_norm__475, + batch_norm__476, + batch_norm__477, + batch_norm__478, + batch_norm__479, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_83, + parameter_183, + parameter_182, + parameter_181, + parameter_180, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_83, parameter_180, parameter_181, parameter_182, parameter_183 + + # pd_op.conv2d: (-1x72x-1x-1xf32) <- (-1x72x-1x-1xf32, 72x72x1x1xf32) + conv2d_84 = paddle._C_ops.conv2d( + swish_62, parameter_179, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_179, swish_62 + + # pd_op.batch_norm_: (-1x72x-1x-1xf32, 72xf32, 72xf32, 72xf32, 72xf32, -1xui8) <- (-1x72x-1x-1xf32, 72xf32, 72xf32, 72xf32, 72xf32) + ( + batch_norm__480, + batch_norm__481, + batch_norm__482, + batch_norm__483, + batch_norm__484, + batch_norm__485, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_84, + parameter_178, + parameter_177, + parameter_176, + parameter_175, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_84, parameter_175, parameter_176, parameter_177, parameter_178 + + # pd_op.add: (-1x72x-1x-1xf32) <- (-1x72x-1x-1xf32, -1x72x-1x-1xf32) + add_32 = paddle._C_ops.add(batch_norm__474, batch_norm__480) + del batch_norm__474, batch_norm__480 + + # pd_op.swish: (-1x72x-1x-1xf32) <- (-1x72x-1x-1xf32) + swish_63 = paddle._C_ops.swish(add_32) + del add_32 + + # pd_op.conv2d: (-1x72x-1x-1xf32) <- (-1x72x-1x-1xf32, 72x72x3x3xf32) + conv2d_85 = paddle._C_ops.conv2d( + swish_63, parameter_174, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_174, swish_63 + + # pd_op.batch_norm_: (-1x72x-1x-1xf32, 72xf32, 72xf32, 72xf32, 72xf32, -1xui8) <- (-1x72x-1x-1xf32, 72xf32, 72xf32, 72xf32, 72xf32) + ( + batch_norm__486, + batch_norm__487, + batch_norm__488, + batch_norm__489, + batch_norm__490, + batch_norm__491, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_85, + parameter_173, + parameter_172, + parameter_171, + parameter_170, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_85, parameter_170, parameter_171, parameter_172, parameter_173 + + # pd_op.swish: (-1x72x-1x-1xf32) <- (-1x72x-1x-1xf32) + swish_64 = paddle._C_ops.swish(batch_norm__486) + del batch_norm__486 + + # pd_op.conv2d: (-1x72x-1x-1xf32) <- (-1x72x-1x-1xf32, 72x72x3x3xf32) + conv2d_86 = paddle._C_ops.conv2d( + swish_64, parameter_169, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_169 + + # pd_op.batch_norm_: (-1x72x-1x-1xf32, 72xf32, 72xf32, 72xf32, 72xf32, -1xui8) <- (-1x72x-1x-1xf32, 72xf32, 72xf32, 72xf32, 72xf32) + ( + batch_norm__492, + batch_norm__493, + batch_norm__494, + batch_norm__495, + batch_norm__496, + batch_norm__497, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_86, + parameter_168, + parameter_167, + parameter_166, + parameter_165, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_86, parameter_165, parameter_166, parameter_167, parameter_168 + + # pd_op.conv2d: (-1x72x-1x-1xf32) <- (-1x72x-1x-1xf32, 72x72x1x1xf32) + conv2d_87 = paddle._C_ops.conv2d( + swish_64, parameter_164, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_164, swish_64 + + # pd_op.batch_norm_: (-1x72x-1x-1xf32, 72xf32, 72xf32, 72xf32, 72xf32, -1xui8) <- (-1x72x-1x-1xf32, 72xf32, 72xf32, 72xf32, 72xf32) + ( + batch_norm__498, + batch_norm__499, + batch_norm__500, + batch_norm__501, + batch_norm__502, + batch_norm__503, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_87, + parameter_163, + parameter_162, + parameter_161, + parameter_160, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_87, parameter_160, parameter_161, parameter_162, parameter_163 + + # pd_op.add: (-1x72x-1x-1xf32) <- (-1x72x-1x-1xf32, -1x72x-1x-1xf32) + add_33 = paddle._C_ops.add(batch_norm__492, batch_norm__498) + del batch_norm__492, batch_norm__498 + + # pd_op.swish: (-1x72x-1x-1xf32) <- (-1x72x-1x-1xf32) + swish_65 = paddle._C_ops.swish(add_33) + del add_33 + + # builtin.combine: ([-1x72x-1x-1xf32, -1x72x-1x-1xf32]) <- (-1x72x-1x-1xf32, -1x72x-1x-1xf32) + combine_9 = [swish_60, swish_65] + del swish_60, swish_65 + + # pd_op.concat: (-1x144x-1x-1xf32) <- ([-1x72x-1x-1xf32, -1x72x-1x-1xf32], 1xi32) + concat_11 = paddle._C_ops.concat(combine_9, full_0) + del combine_9 + + # pd_op.conv2d: (-1x144x-1x-1xf32) <- (-1x144x-1x-1xf32, 144x144x1x1xf32) + conv2d_88 = paddle._C_ops.conv2d( + concat_11, parameter_159, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del concat_11, parameter_159 + + # pd_op.batch_norm_: (-1x144x-1x-1xf32, 144xf32, 144xf32, 144xf32, 144xf32, -1xui8) <- (-1x144x-1x-1xf32, 144xf32, 144xf32, 144xf32, 144xf32) + ( + batch_norm__504, + batch_norm__505, + batch_norm__506, + batch_norm__507, + batch_norm__508, + batch_norm__509, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_88, + parameter_158, + parameter_157, + parameter_156, + parameter_155, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_88, parameter_155, parameter_156, parameter_157, parameter_158 + + # pd_op.swish: (-1x144x-1x-1xf32) <- (-1x144x-1x-1xf32) + swish_66 = paddle._C_ops.swish(batch_norm__504) + del batch_norm__504 + + # pd_op.conv2d: (-1x144x-1x-1xf32) <- (-1x144x-1x-1xf32, 144x144x3x3xf32) + conv2d_89 = paddle._C_ops.conv2d( + swish_66, parameter_154, [2, 2], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_154 + + # pd_op.batch_norm_: (-1x144x-1x-1xf32, 144xf32, 144xf32, 144xf32, 144xf32, -1xui8) <- (-1x144x-1x-1xf32, 144xf32, 144xf32, 144xf32, 144xf32) + ( + batch_norm__510, + batch_norm__511, + batch_norm__512, + batch_norm__513, + batch_norm__514, + batch_norm__515, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_89, + parameter_153, + parameter_152, + parameter_151, + parameter_150, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_89, parameter_150, parameter_151, parameter_152, parameter_153 + + # pd_op.swish: (-1x144x-1x-1xf32) <- (-1x144x-1x-1xf32) + swish_67 = paddle._C_ops.swish(batch_norm__510) + del batch_norm__510 + + # builtin.combine: ([-1x144x-1x-1xf32, -1x288x-1x-1xf32]) <- (-1x144x-1x-1xf32, -1x288x-1x-1xf32) + combine_10 = [swish_67, swish_58] + del swish_58, swish_67 + + # pd_op.concat: (-1x432x-1x-1xf32) <- ([-1x144x-1x-1xf32, -1x288x-1x-1xf32], 1xi32) + concat_12 = paddle._C_ops.concat(combine_10, full_0) + del combine_10 + + # pd_op.conv2d: (-1x144x-1x-1xf32) <- (-1x432x-1x-1xf32, 144x432x1x1xf32) + conv2d_90 = paddle._C_ops.conv2d( + concat_12, parameter_149, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_149 + + # pd_op.batch_norm_: (-1x144x-1x-1xf32, 144xf32, 144xf32, 144xf32, 144xf32, -1xui8) <- (-1x144x-1x-1xf32, 144xf32, 144xf32, 144xf32, 144xf32) + ( + batch_norm__516, + batch_norm__517, + batch_norm__518, + batch_norm__519, + batch_norm__520, + batch_norm__521, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_90, + parameter_148, + parameter_147, + parameter_146, + parameter_145, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_90, parameter_145, parameter_146, parameter_147, parameter_148 + + # pd_op.swish: (-1x144x-1x-1xf32) <- (-1x144x-1x-1xf32) + swish_68 = paddle._C_ops.swish(batch_norm__516) + del batch_norm__516 + + # pd_op.conv2d: (-1x144x-1x-1xf32) <- (-1x432x-1x-1xf32, 144x432x1x1xf32) + conv2d_91 = paddle._C_ops.conv2d( + concat_12, parameter_144, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del concat_12, parameter_144 + + # pd_op.batch_norm_: (-1x144x-1x-1xf32, 144xf32, 144xf32, 144xf32, 144xf32, -1xui8) <- (-1x144x-1x-1xf32, 144xf32, 144xf32, 144xf32, 144xf32) + ( + batch_norm__522, + batch_norm__523, + batch_norm__524, + batch_norm__525, + batch_norm__526, + batch_norm__527, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_91, + parameter_143, + parameter_142, + parameter_141, + parameter_140, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_91, parameter_140, parameter_141, parameter_142, parameter_143 + + # pd_op.swish: (-1x144x-1x-1xf32) <- (-1x144x-1x-1xf32) + swish_69 = paddle._C_ops.swish(batch_norm__522) + del batch_norm__522 + + # pd_op.conv2d: (-1x144x-1x-1xf32) <- (-1x144x-1x-1xf32, 144x144x3x3xf32) + conv2d_92 = paddle._C_ops.conv2d( + swish_69, parameter_139, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_139, swish_69 + + # pd_op.batch_norm_: (-1x144x-1x-1xf32, 144xf32, 144xf32, 144xf32, 144xf32, -1xui8) <- (-1x144x-1x-1xf32, 144xf32, 144xf32, 144xf32, 144xf32) + ( + batch_norm__528, + batch_norm__529, + batch_norm__530, + batch_norm__531, + batch_norm__532, + batch_norm__533, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_92, + parameter_138, + parameter_137, + parameter_136, + parameter_135, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_92, parameter_135, parameter_136, parameter_137, parameter_138 + + # pd_op.swish: (-1x144x-1x-1xf32) <- (-1x144x-1x-1xf32) + swish_70 = paddle._C_ops.swish(batch_norm__528) + del batch_norm__528 + + # pd_op.conv2d: (-1x144x-1x-1xf32) <- (-1x144x-1x-1xf32, 144x144x3x3xf32) + conv2d_93 = paddle._C_ops.conv2d( + swish_70, parameter_134, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_134 + + # pd_op.batch_norm_: (-1x144x-1x-1xf32, 144xf32, 144xf32, 144xf32, 144xf32, -1xui8) <- (-1x144x-1x-1xf32, 144xf32, 144xf32, 144xf32, 144xf32) + ( + batch_norm__534, + batch_norm__535, + batch_norm__536, + batch_norm__537, + batch_norm__538, + batch_norm__539, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_93, + parameter_133, + parameter_132, + parameter_131, + parameter_130, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_93, parameter_130, parameter_131, parameter_132, parameter_133 + + # pd_op.conv2d: (-1x144x-1x-1xf32) <- (-1x144x-1x-1xf32, 144x144x1x1xf32) + conv2d_94 = paddle._C_ops.conv2d( + swish_70, parameter_129, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_129, swish_70 + + # pd_op.batch_norm_: (-1x144x-1x-1xf32, 144xf32, 144xf32, 144xf32, 144xf32, -1xui8) <- (-1x144x-1x-1xf32, 144xf32, 144xf32, 144xf32, 144xf32) + ( + batch_norm__540, + batch_norm__541, + batch_norm__542, + batch_norm__543, + batch_norm__544, + batch_norm__545, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_94, + parameter_128, + parameter_127, + parameter_126, + parameter_125, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_94, parameter_125, parameter_126, parameter_127, parameter_128 + + # pd_op.add: (-1x144x-1x-1xf32) <- (-1x144x-1x-1xf32, -1x144x-1x-1xf32) + add_34 = paddle._C_ops.add(batch_norm__534, batch_norm__540) + del batch_norm__534, batch_norm__540 + + # pd_op.swish: (-1x144x-1x-1xf32) <- (-1x144x-1x-1xf32) + swish_71 = paddle._C_ops.swish(add_34) + del add_34 + + # pd_op.conv2d: (-1x144x-1x-1xf32) <- (-1x144x-1x-1xf32, 144x144x3x3xf32) + conv2d_95 = paddle._C_ops.conv2d( + swish_71, parameter_124, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_124, swish_71 + + # pd_op.batch_norm_: (-1x144x-1x-1xf32, 144xf32, 144xf32, 144xf32, 144xf32, -1xui8) <- (-1x144x-1x-1xf32, 144xf32, 144xf32, 144xf32, 144xf32) + ( + batch_norm__546, + batch_norm__547, + batch_norm__548, + batch_norm__549, + batch_norm__550, + batch_norm__551, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_95, + parameter_123, + parameter_122, + parameter_121, + parameter_120, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_95, parameter_120, parameter_121, parameter_122, parameter_123 + + # pd_op.swish: (-1x144x-1x-1xf32) <- (-1x144x-1x-1xf32) + swish_72 = paddle._C_ops.swish(batch_norm__546) + del batch_norm__546 + + # pd_op.conv2d: (-1x144x-1x-1xf32) <- (-1x144x-1x-1xf32, 144x144x3x3xf32) + conv2d_96 = paddle._C_ops.conv2d( + swish_72, parameter_119, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_119 + + # pd_op.batch_norm_: (-1x144x-1x-1xf32, 144xf32, 144xf32, 144xf32, 144xf32, -1xui8) <- (-1x144x-1x-1xf32, 144xf32, 144xf32, 144xf32, 144xf32) + ( + batch_norm__552, + batch_norm__553, + batch_norm__554, + batch_norm__555, + batch_norm__556, + batch_norm__557, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_96, + parameter_118, + parameter_117, + parameter_116, + parameter_115, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_96, parameter_115, parameter_116, parameter_117, parameter_118 + + # pd_op.conv2d: (-1x144x-1x-1xf32) <- (-1x144x-1x-1xf32, 144x144x1x1xf32) + conv2d_97 = paddle._C_ops.conv2d( + swish_72, parameter_114, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_114, swish_72 + + # pd_op.batch_norm_: (-1x144x-1x-1xf32, 144xf32, 144xf32, 144xf32, 144xf32, -1xui8) <- (-1x144x-1x-1xf32, 144xf32, 144xf32, 144xf32, 144xf32) + ( + batch_norm__558, + batch_norm__559, + batch_norm__560, + batch_norm__561, + batch_norm__562, + batch_norm__563, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_97, + parameter_113, + parameter_112, + parameter_111, + parameter_110, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_97, parameter_110, parameter_111, parameter_112, parameter_113 + + # pd_op.add: (-1x144x-1x-1xf32) <- (-1x144x-1x-1xf32, -1x144x-1x-1xf32) + add_35 = paddle._C_ops.add(batch_norm__552, batch_norm__558) + del batch_norm__552, batch_norm__558 + + # pd_op.swish: (-1x144x-1x-1xf32) <- (-1x144x-1x-1xf32) + swish_73 = paddle._C_ops.swish(add_35) + del add_35 + + # builtin.combine: ([-1x144x-1x-1xf32, -1x144x-1x-1xf32]) <- (-1x144x-1x-1xf32, -1x144x-1x-1xf32) + combine_11 = [swish_68, swish_73] + del swish_68, swish_73 + + # pd_op.concat: (-1x288x-1x-1xf32) <- ([-1x144x-1x-1xf32, -1x144x-1x-1xf32], 1xi32) + concat_13 = paddle._C_ops.concat(combine_11, full_0) + del combine_11 + + # pd_op.conv2d: (-1x288x-1x-1xf32) <- (-1x288x-1x-1xf32, 288x288x1x1xf32) + conv2d_98 = paddle._C_ops.conv2d( + concat_13, parameter_109, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del concat_13, parameter_109 + + # pd_op.batch_norm_: (-1x288x-1x-1xf32, 288xf32, 288xf32, 288xf32, 288xf32, -1xui8) <- (-1x288x-1x-1xf32, 288xf32, 288xf32, 288xf32, 288xf32) + ( + batch_norm__564, + batch_norm__565, + batch_norm__566, + batch_norm__567, + batch_norm__568, + batch_norm__569, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_98, + parameter_108, + parameter_107, + parameter_106, + parameter_105, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_98, parameter_105, parameter_106, parameter_107, parameter_108 + + # pd_op.swish: (-1x288x-1x-1xf32) <- (-1x288x-1x-1xf32) + swish_74 = paddle._C_ops.swish(batch_norm__564) + del batch_norm__564 + + # pd_op.conv2d: (-1x288x-1x-1xf32) <- (-1x288x-1x-1xf32, 288x288x3x3xf32) + conv2d_99 = paddle._C_ops.conv2d( + swish_74, parameter_104, [2, 2], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_104 + + # pd_op.batch_norm_: (-1x288x-1x-1xf32, 288xf32, 288xf32, 288xf32, 288xf32, -1xui8) <- (-1x288x-1x-1xf32, 288xf32, 288xf32, 288xf32, 288xf32) + ( + batch_norm__570, + batch_norm__571, + batch_norm__572, + batch_norm__573, + batch_norm__574, + batch_norm__575, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_99, + parameter_103, + parameter_102, + parameter_101, + parameter_100, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_99, parameter_100, parameter_101, parameter_102, parameter_103 + + # pd_op.swish: (-1x288x-1x-1xf32) <- (-1x288x-1x-1xf32) + swish_75 = paddle._C_ops.swish(batch_norm__570) + del batch_norm__570 + + # builtin.combine: ([-1x288x-1x-1xf32, -1x576x-1x-1xf32]) <- (-1x288x-1x-1xf32, -1x576x-1x-1xf32) + combine_12 = [swish_75, swish_50] + del swish_50, swish_75 + + # pd_op.concat: (-1x864x-1x-1xf32) <- ([-1x288x-1x-1xf32, -1x576x-1x-1xf32], 1xi32) + concat_14 = paddle._C_ops.concat(combine_12, full_0) + del combine_12 + + # pd_op.conv2d: (-1x288x-1x-1xf32) <- (-1x864x-1x-1xf32, 288x864x1x1xf32) + conv2d_100 = paddle._C_ops.conv2d( + concat_14, parameter_99, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_99 + + # pd_op.batch_norm_: (-1x288x-1x-1xf32, 288xf32, 288xf32, 288xf32, 288xf32, -1xui8) <- (-1x288x-1x-1xf32, 288xf32, 288xf32, 288xf32, 288xf32) + ( + batch_norm__576, + batch_norm__577, + batch_norm__578, + batch_norm__579, + batch_norm__580, + batch_norm__581, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_100, + parameter_98, + parameter_97, + parameter_96, + parameter_95, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_100, parameter_95, parameter_96, parameter_97, parameter_98 + + # pd_op.swish: (-1x288x-1x-1xf32) <- (-1x288x-1x-1xf32) + swish_76 = paddle._C_ops.swish(batch_norm__576) + del batch_norm__576 + + # pd_op.conv2d: (-1x288x-1x-1xf32) <- (-1x864x-1x-1xf32, 288x864x1x1xf32) + conv2d_101 = paddle._C_ops.conv2d( + concat_14, parameter_94, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del concat_14, parameter_94 + + # pd_op.batch_norm_: (-1x288x-1x-1xf32, 288xf32, 288xf32, 288xf32, 288xf32, -1xui8) <- (-1x288x-1x-1xf32, 288xf32, 288xf32, 288xf32, 288xf32) + ( + batch_norm__582, + batch_norm__583, + batch_norm__584, + batch_norm__585, + batch_norm__586, + batch_norm__587, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_101, + parameter_93, + parameter_92, + parameter_91, + parameter_90, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_101, parameter_90, parameter_91, parameter_92, parameter_93 + + # pd_op.swish: (-1x288x-1x-1xf32) <- (-1x288x-1x-1xf32) + swish_77 = paddle._C_ops.swish(batch_norm__582) + del batch_norm__582 + + # pd_op.conv2d: (-1x288x-1x-1xf32) <- (-1x288x-1x-1xf32, 288x288x3x3xf32) + conv2d_102 = paddle._C_ops.conv2d( + swish_77, parameter_89, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_89, swish_77 + + # pd_op.batch_norm_: (-1x288x-1x-1xf32, 288xf32, 288xf32, 288xf32, 288xf32, -1xui8) <- (-1x288x-1x-1xf32, 288xf32, 288xf32, 288xf32, 288xf32) + ( + batch_norm__588, + batch_norm__589, + batch_norm__590, + batch_norm__591, + batch_norm__592, + batch_norm__593, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_102, + parameter_88, + parameter_87, + parameter_86, + parameter_85, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_102, parameter_85, parameter_86, parameter_87, parameter_88 + + # pd_op.swish: (-1x288x-1x-1xf32) <- (-1x288x-1x-1xf32) + swish_78 = paddle._C_ops.swish(batch_norm__588) + del batch_norm__588 + + # pd_op.conv2d: (-1x288x-1x-1xf32) <- (-1x288x-1x-1xf32, 288x288x3x3xf32) + conv2d_103 = paddle._C_ops.conv2d( + swish_78, parameter_84, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_84 + + # pd_op.batch_norm_: (-1x288x-1x-1xf32, 288xf32, 288xf32, 288xf32, 288xf32, -1xui8) <- (-1x288x-1x-1xf32, 288xf32, 288xf32, 288xf32, 288xf32) + ( + batch_norm__594, + batch_norm__595, + batch_norm__596, + batch_norm__597, + batch_norm__598, + batch_norm__599, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_103, + parameter_83, + parameter_82, + parameter_81, + parameter_80, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_103, parameter_80, parameter_81, parameter_82, parameter_83 + + # pd_op.conv2d: (-1x288x-1x-1xf32) <- (-1x288x-1x-1xf32, 288x288x1x1xf32) + conv2d_104 = paddle._C_ops.conv2d( + swish_78, parameter_79, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_79, swish_78 + + # pd_op.batch_norm_: (-1x288x-1x-1xf32, 288xf32, 288xf32, 288xf32, 288xf32, -1xui8) <- (-1x288x-1x-1xf32, 288xf32, 288xf32, 288xf32, 288xf32) + ( + batch_norm__600, + batch_norm__601, + batch_norm__602, + batch_norm__603, + batch_norm__604, + batch_norm__605, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_104, + parameter_78, + parameter_77, + parameter_76, + parameter_75, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_104, parameter_75, parameter_76, parameter_77, parameter_78 + + # pd_op.add: (-1x288x-1x-1xf32) <- (-1x288x-1x-1xf32, -1x288x-1x-1xf32) + add_36 = paddle._C_ops.add(batch_norm__594, batch_norm__600) + del batch_norm__594, batch_norm__600 + + # pd_op.swish: (-1x288x-1x-1xf32) <- (-1x288x-1x-1xf32) + swish_79 = paddle._C_ops.swish(add_36) + del add_36 + + # pd_op.conv2d: (-1x288x-1x-1xf32) <- (-1x288x-1x-1xf32, 288x288x3x3xf32) + conv2d_105 = paddle._C_ops.conv2d( + swish_79, parameter_74, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_74, swish_79 + + # pd_op.batch_norm_: (-1x288x-1x-1xf32, 288xf32, 288xf32, 288xf32, 288xf32, -1xui8) <- (-1x288x-1x-1xf32, 288xf32, 288xf32, 288xf32, 288xf32) + ( + batch_norm__606, + batch_norm__607, + batch_norm__608, + batch_norm__609, + batch_norm__610, + batch_norm__611, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_105, + parameter_73, + parameter_72, + parameter_71, + parameter_70, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_105, parameter_70, parameter_71, parameter_72, parameter_73 + + # pd_op.swish: (-1x288x-1x-1xf32) <- (-1x288x-1x-1xf32) + swish_80 = paddle._C_ops.swish(batch_norm__606) + del batch_norm__606 + + # pd_op.conv2d: (-1x288x-1x-1xf32) <- (-1x288x-1x-1xf32, 288x288x3x3xf32) + conv2d_106 = paddle._C_ops.conv2d( + swish_80, parameter_69, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_69 + + # pd_op.batch_norm_: (-1x288x-1x-1xf32, 288xf32, 288xf32, 288xf32, 288xf32, -1xui8) <- (-1x288x-1x-1xf32, 288xf32, 288xf32, 288xf32, 288xf32) + ( + batch_norm__612, + batch_norm__613, + batch_norm__614, + batch_norm__615, + batch_norm__616, + batch_norm__617, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_106, + parameter_68, + parameter_67, + parameter_66, + parameter_65, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_106, parameter_65, parameter_66, parameter_67, parameter_68 + + # pd_op.conv2d: (-1x288x-1x-1xf32) <- (-1x288x-1x-1xf32, 288x288x1x1xf32) + conv2d_107 = paddle._C_ops.conv2d( + swish_80, parameter_64, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_64, swish_80 + + # pd_op.batch_norm_: (-1x288x-1x-1xf32, 288xf32, 288xf32, 288xf32, 288xf32, -1xui8) <- (-1x288x-1x-1xf32, 288xf32, 288xf32, 288xf32, 288xf32) + ( + batch_norm__618, + batch_norm__619, + batch_norm__620, + batch_norm__621, + batch_norm__622, + batch_norm__623, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_107, + parameter_63, + parameter_62, + parameter_61, + parameter_60, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_107, parameter_60, parameter_61, parameter_62, parameter_63 + + # pd_op.add: (-1x288x-1x-1xf32) <- (-1x288x-1x-1xf32, -1x288x-1x-1xf32) + add_37 = paddle._C_ops.add(batch_norm__612, batch_norm__618) + del batch_norm__612, batch_norm__618 + + # pd_op.swish: (-1x288x-1x-1xf32) <- (-1x288x-1x-1xf32) + swish_81 = paddle._C_ops.swish(add_37) + del add_37 + + # builtin.combine: ([-1x288x-1x-1xf32, -1x288x-1x-1xf32]) <- (-1x288x-1x-1xf32, -1x288x-1x-1xf32) + combine_13 = [swish_76, swish_81] + del swish_76, swish_81 + + # pd_op.concat: (-1x576x-1x-1xf32) <- ([-1x288x-1x-1xf32, -1x288x-1x-1xf32], 1xi32) + concat_15 = paddle._C_ops.concat(combine_13, full_0) + del combine_13 + + # pd_op.conv2d: (-1x576x-1x-1xf32) <- (-1x576x-1x-1xf32, 576x576x1x1xf32) + conv2d_108 = paddle._C_ops.conv2d( + concat_15, parameter_59, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del concat_15, parameter_59 + + # pd_op.batch_norm_: (-1x576x-1x-1xf32, 576xf32, 576xf32, 576xf32, 576xf32, -1xui8) <- (-1x576x-1x-1xf32, 576xf32, 576xf32, 576xf32, 576xf32) + ( + batch_norm__624, + batch_norm__625, + batch_norm__626, + batch_norm__627, + batch_norm__628, + batch_norm__629, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_108, + parameter_58, + parameter_57, + parameter_56, + parameter_55, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_108, parameter_55, parameter_56, parameter_57, parameter_58 + + # pd_op.swish: (-1x576x-1x-1xf32) <- (-1x576x-1x-1xf32) + swish_82 = paddle._C_ops.swish(batch_norm__624) + del batch_norm__624 + + # pd_op.shape64: (4xi64) <- (-1x576x-1x-1xf32) + shape64_0 = paddle._C_ops.shape64(swish_82) + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_5 = [0] + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_6 = [1] + + # pd_op.slice: (xi64) <- (4xi64, 1xi64, 1xi64) + slice_0 = paddle._C_ops.slice( + shape64_0, [0], full_int_array_5, full_int_array_6, [1], [0] + ) + del shape64_0 + + # pd_op.shape64: (4xi64) <- (-1x576x-1x-1xf32) + shape64_1 = paddle._C_ops.shape64(swish_82) + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_7 = [2] + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_8 = [3] + + # pd_op.slice: (xi64) <- (4xi64, 1xi64, 1xi64) + slice_1 = paddle._C_ops.slice( + shape64_1, [0], full_int_array_7, full_int_array_8, [1], [0] + ) + del shape64_1 + + # pd_op.shape64: (4xi64) <- (-1x576x-1x-1xf32) + shape64_2 = paddle._C_ops.shape64(swish_82) + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_9 = [4] + + # pd_op.slice: (xi64) <- (4xi64, 1xi64, 1xi64) + slice_2 = paddle._C_ops.slice( + shape64_2, [0], full_int_array_8, full_int_array_9, [1], [0] + ) + del shape64_2 + + # pd_op.multiply: (xi64) <- (xi64, xi64) + multiply_16 = paddle._C_ops.multiply(slice_1, slice_2) + del slice_1, slice_2 + + # pd_op.full_int_array: (2xi64) <- () + full_int_array_10 = [1, 1] + + # pd_op.pool2d: (-1x576x1x1xf32) <- (-1x576x-1x-1xf32, 2xi64) + pool2d_3 = paddle._C_ops.pool2d( + swish_82, + full_int_array_10, + [1, 1], + [0, 0], + False, + True, + "NCHW", + "avg", + False, + True, + "EXPLICIT", + ) + + # pd_op.conv2d: (-1x576x1x1xf32) <- (-1x576x1x1xf32, 576x576x1x1xf32) + conv2d_109 = paddle._C_ops.conv2d( + pool2d_3, parameter_54, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_54 + + # pd_op.reshape: (1x576x1x1xf32) <- (576xf32, 4xi64) + reshape_4 = paddle._C_ops.reshape(parameter_53, full_int_array_1) + del parameter_53 + + # pd_op.add: (-1x576x1x1xf32) <- (-1x576x1x1xf32, 1x576x1x1xf32) + add_38 = paddle._C_ops.add(conv2d_109, reshape_4) + del conv2d_109, reshape_4 + + # pd_op.sigmoid: (-1x576x1x1xf32) <- (-1x576x1x1xf32) + sigmoid_0 = paddle._C_ops.sigmoid(add_38) + del add_38 + + # pd_op.multiply: (-1x576x-1x-1xf32) <- (-1x576x-1x-1xf32, -1x576x1x1xf32) + multiply_17 = paddle._C_ops.multiply(swish_82, sigmoid_0) + del sigmoid_0 + + # pd_op.conv2d: (-1x576x-1x-1xf32) <- (-1x576x-1x-1xf32, 576x576x1x1xf32) + conv2d_110 = paddle._C_ops.conv2d( + multiply_17, parameter_52, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del multiply_17, parameter_52 + + # pd_op.batch_norm_: (-1x576x-1x-1xf32, 576xf32, 576xf32, 576xf32, 576xf32, -1xui8) <- (-1x576x-1x-1xf32, 576xf32, 576xf32, 576xf32, 576xf32) + ( + batch_norm__630, + batch_norm__631, + batch_norm__632, + batch_norm__633, + batch_norm__634, + batch_norm__635, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_110, + parameter_51, + parameter_50, + parameter_49, + parameter_48, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_110, parameter_48, parameter_49, parameter_50, parameter_51 + + # pd_op.swish: (-1x576x-1x-1xf32) <- (-1x576x-1x-1xf32) + swish_83 = paddle._C_ops.swish(batch_norm__630) + del batch_norm__630 + + # pd_op.add: (-1x576x-1x-1xf32) <- (-1x576x-1x-1xf32, -1x576x-1x-1xf32) + add_39 = paddle._C_ops.add(swish_83, swish_82) + del swish_83 + + # pd_op.conv2d: (-1x4x-1x-1xf32) <- (-1x576x-1x-1xf32, 4x576x3x3xf32) + conv2d_111 = paddle._C_ops.conv2d( + add_39, parameter_47, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del add_39, parameter_47 + + # pd_op.reshape: (1x4x1x1xf32) <- (4xf32, 4xi64) + reshape_5 = paddle._C_ops.reshape(parameter_46, full_int_array_1) + del parameter_46 + + # pd_op.add: (-1x4x-1x-1xf32) <- (-1x4x-1x-1xf32, 1x4x1x1xf32) + add_40 = paddle._C_ops.add(conv2d_111, reshape_5) + del conv2d_111, reshape_5 + + # pd_op.conv2d: (-1x576x1x1xf32) <- (-1x576x1x1xf32, 576x576x1x1xf32) + conv2d_112 = paddle._C_ops.conv2d( + pool2d_3, parameter_45, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_45, pool2d_3 + + # pd_op.reshape: (1x576x1x1xf32) <- (576xf32, 4xi64) + reshape_6 = paddle._C_ops.reshape(parameter_44, full_int_array_1) + del parameter_44 + + # pd_op.add: (-1x576x1x1xf32) <- (-1x576x1x1xf32, 1x576x1x1xf32) + add_41 = paddle._C_ops.add(conv2d_112, reshape_6) + del conv2d_112, reshape_6 + + # pd_op.sigmoid: (-1x576x1x1xf32) <- (-1x576x1x1xf32) + sigmoid_1 = paddle._C_ops.sigmoid(add_41) + del add_41 + + # pd_op.multiply: (-1x576x-1x-1xf32) <- (-1x576x-1x-1xf32, -1x576x1x1xf32) + multiply_18 = paddle._C_ops.multiply(swish_82, sigmoid_1) + del sigmoid_1, swish_82 + + # pd_op.conv2d: (-1x576x-1x-1xf32) <- (-1x576x-1x-1xf32, 576x576x1x1xf32) + conv2d_113 = paddle._C_ops.conv2d( + multiply_18, parameter_43, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del multiply_18, parameter_43 + + # pd_op.batch_norm_: (-1x576x-1x-1xf32, 576xf32, 576xf32, 576xf32, 576xf32, -1xui8) <- (-1x576x-1x-1xf32, 576xf32, 576xf32, 576xf32, 576xf32) + ( + batch_norm__636, + batch_norm__637, + batch_norm__638, + batch_norm__639, + batch_norm__640, + batch_norm__641, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_113, + parameter_42, + parameter_41, + parameter_40, + parameter_39, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_113, parameter_39, parameter_40, parameter_41, parameter_42 + + # pd_op.swish: (-1x576x-1x-1xf32) <- (-1x576x-1x-1xf32) + swish_84 = paddle._C_ops.swish(batch_norm__636) + del batch_norm__636 + + # pd_op.conv2d: (-1x68x-1x-1xf32) <- (-1x576x-1x-1xf32, 68x576x3x3xf32) + conv2d_114 = paddle._C_ops.conv2d( + swish_84, parameter_38, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_38, swish_84 + + # pd_op.reshape: (1x68x1x1xf32) <- (68xf32, 4xi64) + reshape_7 = paddle._C_ops.reshape(parameter_37, full_int_array_1) + del parameter_37 + + # pd_op.add: (-1x68x-1x-1xf32) <- (-1x68x-1x-1xf32, 1x68x1x1xf32) + add_42 = paddle._C_ops.add(conv2d_114, reshape_7) + del conv2d_114, reshape_7 + + # pd_op.full: (xi64) <- () + full_1 = paddle._C_ops.full( + [], float("-1"), paddle.int64, paddle.core.CPUPlace() + ) + + # pd_op.full: (xi64) <- () + full_2 = paddle._C_ops.full( + [], float("4"), paddle.int64, paddle.core.CPUPlace() + ) + + # pd_op.full: (xi64) <- () + full_3 = paddle._C_ops.full( + [], float("17"), paddle.int64, paddle.core.CPUPlace() + ) + + # builtin.combine: ([xi64, xi64, xi64, xi64]) <- (xi64, xi64, xi64, xi64) + combine_14 = [full_1, full_2, full_3, multiply_16] + + # pd_op.stack: (4xi64) <- ([xi64, xi64, xi64, xi64]) + stack_0 = paddle._C_ops.stack(combine_14, 0) + del combine_14 + + # pd_op.reshape: (-1x4x17x-1xf32) <- (-1x68x-1x-1xf32, 4xi64) + reshape_8 = paddle._C_ops.reshape(add_42, stack_0) + del add_42, stack_0 + + # pd_op.transpose: (-1x17x-1x4xf32) <- (-1x4x17x-1xf32) + transpose_0 = paddle._C_ops.transpose(reshape_8, [0, 2, 3, 1]) + del reshape_8 + + # pd_op.softmax: (-1x17x-1x4xf32) <- (-1x17x-1x4xf32) + softmax_0 = paddle._C_ops.softmax(transpose_0, 1) + del transpose_0 + + # pd_op.conv2d: (-1x1x-1x4xf32) <- (-1x17x-1x4xf32, 1x17x1x1xf32) + conv2d_115 = paddle._C_ops.conv2d( + softmax_0, parameter_36, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del softmax_0 + + # pd_op.squeeze: (-1x-1x4xf32) <- (-1x1x-1x4xf32, 1xi64) + squeeze_0 = paddle._C_ops.squeeze(conv2d_115, full_int_array_6) + del conv2d_115 + + # pd_op.sigmoid: (-1x4x-1x-1xf32) <- (-1x4x-1x-1xf32) + sigmoid_2 = paddle._C_ops.sigmoid(add_40) + del add_40 + + # builtin.combine: ([xi64, xi64, xi64]) <- (xi64, xi64, xi64) + combine_15 = [full_1, full_2, multiply_16] + del multiply_16 + + # pd_op.stack: (3xi64) <- ([xi64, xi64, xi64]) + stack_1 = paddle._C_ops.stack(combine_15, 0) + del combine_15 + + # pd_op.reshape: (-1x4x-1xf32) <- (-1x4x-1x-1xf32, 3xi64) + reshape_9 = paddle._C_ops.reshape(sigmoid_2, stack_1) + del sigmoid_2, stack_1 + + # pd_op.shape64: (4xi64) <- (-1x288x-1x-1xf32) + shape64_3 = paddle._C_ops.shape64(swish_74) + + # pd_op.slice: (xi64) <- (4xi64, 1xi64, 1xi64) + slice_3 = paddle._C_ops.slice( + shape64_3, [0], full_int_array_5, full_int_array_6, [1], [0] + ) + del shape64_3 + + # pd_op.shape64: (4xi64) <- (-1x288x-1x-1xf32) + shape64_4 = paddle._C_ops.shape64(swish_74) + + # pd_op.slice: (xi64) <- (4xi64, 1xi64, 1xi64) + slice_4 = paddle._C_ops.slice( + shape64_4, [0], full_int_array_7, full_int_array_8, [1], [0] + ) + del shape64_4 + + # pd_op.shape64: (4xi64) <- (-1x288x-1x-1xf32) + shape64_5 = paddle._C_ops.shape64(swish_74) + + # pd_op.slice: (xi64) <- (4xi64, 1xi64, 1xi64) + slice_5 = paddle._C_ops.slice( + shape64_5, [0], full_int_array_8, full_int_array_9, [1], [0] + ) + del shape64_5 + + # pd_op.multiply: (xi64) <- (xi64, xi64) + multiply_19 = paddle._C_ops.multiply(slice_4, slice_5) + del slice_4, slice_5 + + # pd_op.pool2d: (-1x288x1x1xf32) <- (-1x288x-1x-1xf32, 2xi64) + pool2d_4 = paddle._C_ops.pool2d( + swish_74, + full_int_array_10, + [1, 1], + [0, 0], + False, + True, + "NCHW", + "avg", + False, + True, + "EXPLICIT", + ) + + # pd_op.conv2d: (-1x288x1x1xf32) <- (-1x288x1x1xf32, 288x288x1x1xf32) + conv2d_116 = paddle._C_ops.conv2d( + pool2d_4, parameter_35, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_35 + + # pd_op.reshape: (1x288x1x1xf32) <- (288xf32, 4xi64) + reshape_10 = paddle._C_ops.reshape(parameter_34, full_int_array_1) + del parameter_34 + + # pd_op.add: (-1x288x1x1xf32) <- (-1x288x1x1xf32, 1x288x1x1xf32) + add_43 = paddle._C_ops.add(conv2d_116, reshape_10) + del conv2d_116, reshape_10 + + # pd_op.sigmoid: (-1x288x1x1xf32) <- (-1x288x1x1xf32) + sigmoid_3 = paddle._C_ops.sigmoid(add_43) + del add_43 + + # pd_op.multiply: (-1x288x-1x-1xf32) <- (-1x288x-1x-1xf32, -1x288x1x1xf32) + multiply_20 = paddle._C_ops.multiply(swish_74, sigmoid_3) + del sigmoid_3 + + # pd_op.conv2d: (-1x288x-1x-1xf32) <- (-1x288x-1x-1xf32, 288x288x1x1xf32) + conv2d_117 = paddle._C_ops.conv2d( + multiply_20, parameter_33, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del multiply_20, parameter_33 + + # pd_op.batch_norm_: (-1x288x-1x-1xf32, 288xf32, 288xf32, 288xf32, 288xf32, -1xui8) <- (-1x288x-1x-1xf32, 288xf32, 288xf32, 288xf32, 288xf32) + ( + batch_norm__642, + batch_norm__643, + batch_norm__644, + batch_norm__645, + batch_norm__646, + batch_norm__647, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_117, + parameter_32, + parameter_31, + parameter_30, + parameter_29, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_117, parameter_29, parameter_30, parameter_31, parameter_32 + + # pd_op.swish: (-1x288x-1x-1xf32) <- (-1x288x-1x-1xf32) + swish_85 = paddle._C_ops.swish(batch_norm__642) + del batch_norm__642 + + # pd_op.add: (-1x288x-1x-1xf32) <- (-1x288x-1x-1xf32, -1x288x-1x-1xf32) + add_44 = paddle._C_ops.add(swish_85, swish_74) + del swish_85 + + # pd_op.conv2d: (-1x4x-1x-1xf32) <- (-1x288x-1x-1xf32, 4x288x3x3xf32) + conv2d_118 = paddle._C_ops.conv2d( + add_44, parameter_28, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del add_44, parameter_28 + + # pd_op.reshape: (1x4x1x1xf32) <- (4xf32, 4xi64) + reshape_11 = paddle._C_ops.reshape(parameter_27, full_int_array_1) + del parameter_27 + + # pd_op.add: (-1x4x-1x-1xf32) <- (-1x4x-1x-1xf32, 1x4x1x1xf32) + add_45 = paddle._C_ops.add(conv2d_118, reshape_11) + del conv2d_118, reshape_11 + + # pd_op.conv2d: (-1x288x1x1xf32) <- (-1x288x1x1xf32, 288x288x1x1xf32) + conv2d_119 = paddle._C_ops.conv2d( + pool2d_4, parameter_26, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_26, pool2d_4 + + # pd_op.reshape: (1x288x1x1xf32) <- (288xf32, 4xi64) + reshape_12 = paddle._C_ops.reshape(parameter_25, full_int_array_1) + del parameter_25 + + # pd_op.add: (-1x288x1x1xf32) <- (-1x288x1x1xf32, 1x288x1x1xf32) + add_46 = paddle._C_ops.add(conv2d_119, reshape_12) + del conv2d_119, reshape_12 + + # pd_op.sigmoid: (-1x288x1x1xf32) <- (-1x288x1x1xf32) + sigmoid_4 = paddle._C_ops.sigmoid(add_46) + del add_46 + + # pd_op.multiply: (-1x288x-1x-1xf32) <- (-1x288x-1x-1xf32, -1x288x1x1xf32) + multiply_21 = paddle._C_ops.multiply(swish_74, sigmoid_4) + del sigmoid_4, swish_74 + + # pd_op.conv2d: (-1x288x-1x-1xf32) <- (-1x288x-1x-1xf32, 288x288x1x1xf32) + conv2d_120 = paddle._C_ops.conv2d( + multiply_21, parameter_24, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del multiply_21, parameter_24 + + # pd_op.batch_norm_: (-1x288x-1x-1xf32, 288xf32, 288xf32, 288xf32, 288xf32, -1xui8) <- (-1x288x-1x-1xf32, 288xf32, 288xf32, 288xf32, 288xf32) + ( + batch_norm__648, + batch_norm__649, + batch_norm__650, + batch_norm__651, + batch_norm__652, + batch_norm__653, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_120, + parameter_23, + parameter_22, + parameter_21, + parameter_20, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_120, parameter_20, parameter_21, parameter_22, parameter_23 + + # pd_op.swish: (-1x288x-1x-1xf32) <- (-1x288x-1x-1xf32) + swish_86 = paddle._C_ops.swish(batch_norm__648) + del batch_norm__648 + + # pd_op.conv2d: (-1x68x-1x-1xf32) <- (-1x288x-1x-1xf32, 68x288x3x3xf32) + conv2d_121 = paddle._C_ops.conv2d( + swish_86, parameter_19, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_19, swish_86 + + # pd_op.reshape: (1x68x1x1xf32) <- (68xf32, 4xi64) + reshape_13 = paddle._C_ops.reshape(parameter_18, full_int_array_1) + del parameter_18 + + # pd_op.add: (-1x68x-1x-1xf32) <- (-1x68x-1x-1xf32, 1x68x1x1xf32) + add_47 = paddle._C_ops.add(conv2d_121, reshape_13) + del conv2d_121, reshape_13 + + # builtin.combine: ([xi64, xi64, xi64, xi64]) <- (xi64, xi64, xi64, xi64) + combine_16 = [full_1, full_2, full_3, multiply_19] + + # pd_op.stack: (4xi64) <- ([xi64, xi64, xi64, xi64]) + stack_2 = paddle._C_ops.stack(combine_16, 0) + del combine_16 + + # pd_op.reshape: (-1x4x17x-1xf32) <- (-1x68x-1x-1xf32, 4xi64) + reshape_14 = paddle._C_ops.reshape(add_47, stack_2) + del add_47, stack_2 + + # pd_op.transpose: (-1x17x-1x4xf32) <- (-1x4x17x-1xf32) + transpose_1 = paddle._C_ops.transpose(reshape_14, [0, 2, 3, 1]) + del reshape_14 + + # pd_op.softmax: (-1x17x-1x4xf32) <- (-1x17x-1x4xf32) + softmax_1 = paddle._C_ops.softmax(transpose_1, 1) + del transpose_1 + + # pd_op.conv2d: (-1x1x-1x4xf32) <- (-1x17x-1x4xf32, 1x17x1x1xf32) + conv2d_122 = paddle._C_ops.conv2d( + softmax_1, parameter_36, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del softmax_1 + + # pd_op.squeeze: (-1x-1x4xf32) <- (-1x1x-1x4xf32, 1xi64) + squeeze_1 = paddle._C_ops.squeeze(conv2d_122, full_int_array_6) + del conv2d_122 + + # pd_op.sigmoid: (-1x4x-1x-1xf32) <- (-1x4x-1x-1xf32) + sigmoid_5 = paddle._C_ops.sigmoid(add_45) + del add_45 + + # builtin.combine: ([xi64, xi64, xi64]) <- (xi64, xi64, xi64) + combine_17 = [full_1, full_2, multiply_19] + del multiply_19 + + # pd_op.stack: (3xi64) <- ([xi64, xi64, xi64]) + stack_3 = paddle._C_ops.stack(combine_17, 0) + del combine_17 + + # pd_op.reshape: (-1x4x-1xf32) <- (-1x4x-1x-1xf32, 3xi64) + reshape_15 = paddle._C_ops.reshape(sigmoid_5, stack_3) + del sigmoid_5, stack_3 + + # pd_op.shape64: (4xi64) <- (-1x144x-1x-1xf32) + shape64_6 = paddle._C_ops.shape64(swish_66) + + # pd_op.slice: (xi64) <- (4xi64, 1xi64, 1xi64) + slice_6 = paddle._C_ops.slice( + shape64_6, [0], full_int_array_5, full_int_array_6, [1], [0] + ) + del full_int_array_5, shape64_6 + + # pd_op.shape64: (4xi64) <- (-1x144x-1x-1xf32) + shape64_7 = paddle._C_ops.shape64(swish_66) + + # pd_op.slice: (xi64) <- (4xi64, 1xi64, 1xi64) + slice_7 = paddle._C_ops.slice( + shape64_7, [0], full_int_array_7, full_int_array_8, [1], [0] + ) + del full_int_array_7, shape64_7 + + # pd_op.shape64: (4xi64) <- (-1x144x-1x-1xf32) + shape64_8 = paddle._C_ops.shape64(swish_66) + + # pd_op.slice: (xi64) <- (4xi64, 1xi64, 1xi64) + slice_8 = paddle._C_ops.slice( + shape64_8, [0], full_int_array_8, full_int_array_9, [1], [0] + ) + del full_int_array_8, full_int_array_9, shape64_8 + + # pd_op.multiply: (xi64) <- (xi64, xi64) + multiply_22 = paddle._C_ops.multiply(slice_7, slice_8) + del slice_7, slice_8 + + # pd_op.pool2d: (-1x144x1x1xf32) <- (-1x144x-1x-1xf32, 2xi64) + pool2d_5 = paddle._C_ops.pool2d( + swish_66, + full_int_array_10, + [1, 1], + [0, 0], + False, + True, + "NCHW", + "avg", + False, + True, + "EXPLICIT", + ) + del full_int_array_10 + + # pd_op.conv2d: (-1x144x1x1xf32) <- (-1x144x1x1xf32, 144x144x1x1xf32) + conv2d_123 = paddle._C_ops.conv2d( + pool2d_5, parameter_17, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_17 + + # pd_op.reshape: (1x144x1x1xf32) <- (144xf32, 4xi64) + reshape_16 = paddle._C_ops.reshape(parameter_16, full_int_array_1) + del parameter_16 + + # pd_op.add: (-1x144x1x1xf32) <- (-1x144x1x1xf32, 1x144x1x1xf32) + add_48 = paddle._C_ops.add(conv2d_123, reshape_16) + del conv2d_123, reshape_16 + + # pd_op.sigmoid: (-1x144x1x1xf32) <- (-1x144x1x1xf32) + sigmoid_6 = paddle._C_ops.sigmoid(add_48) + del add_48 + + # pd_op.multiply: (-1x144x-1x-1xf32) <- (-1x144x-1x-1xf32, -1x144x1x1xf32) + multiply_23 = paddle._C_ops.multiply(swish_66, sigmoid_6) + del sigmoid_6 + + # pd_op.conv2d: (-1x144x-1x-1xf32) <- (-1x144x-1x-1xf32, 144x144x1x1xf32) + conv2d_124 = paddle._C_ops.conv2d( + multiply_23, parameter_15, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del multiply_23, parameter_15 + + # pd_op.batch_norm_: (-1x144x-1x-1xf32, 144xf32, 144xf32, 144xf32, 144xf32, -1xui8) <- (-1x144x-1x-1xf32, 144xf32, 144xf32, 144xf32, 144xf32) + ( + batch_norm__654, + batch_norm__655, + batch_norm__656, + batch_norm__657, + batch_norm__658, + batch_norm__659, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_124, + parameter_14, + parameter_13, + parameter_12, + parameter_11, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_124, parameter_11, parameter_12, parameter_13, parameter_14 + + # pd_op.swish: (-1x144x-1x-1xf32) <- (-1x144x-1x-1xf32) + swish_87 = paddle._C_ops.swish(batch_norm__654) + del batch_norm__654 + + # pd_op.add: (-1x144x-1x-1xf32) <- (-1x144x-1x-1xf32, -1x144x-1x-1xf32) + add_49 = paddle._C_ops.add(swish_87, swish_66) + del swish_87 + + # pd_op.conv2d: (-1x4x-1x-1xf32) <- (-1x144x-1x-1xf32, 4x144x3x3xf32) + conv2d_125 = paddle._C_ops.conv2d( + add_49, parameter_10, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del add_49, parameter_10 + + # pd_op.reshape: (1x4x1x1xf32) <- (4xf32, 4xi64) + reshape_17 = paddle._C_ops.reshape(parameter_9, full_int_array_1) + del parameter_9 + + # pd_op.add: (-1x4x-1x-1xf32) <- (-1x4x-1x-1xf32, 1x4x1x1xf32) + add_50 = paddle._C_ops.add(conv2d_125, reshape_17) + del conv2d_125, reshape_17 + + # pd_op.conv2d: (-1x144x1x1xf32) <- (-1x144x1x1xf32, 144x144x1x1xf32) + conv2d_126 = paddle._C_ops.conv2d( + pool2d_5, parameter_8, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_8, pool2d_5 + + # pd_op.reshape: (1x144x1x1xf32) <- (144xf32, 4xi64) + reshape_18 = paddle._C_ops.reshape(parameter_7, full_int_array_1) + del parameter_7 + + # pd_op.add: (-1x144x1x1xf32) <- (-1x144x1x1xf32, 1x144x1x1xf32) + add_51 = paddle._C_ops.add(conv2d_126, reshape_18) + del conv2d_126, reshape_18 + + # pd_op.sigmoid: (-1x144x1x1xf32) <- (-1x144x1x1xf32) + sigmoid_7 = paddle._C_ops.sigmoid(add_51) + del add_51 + + # pd_op.multiply: (-1x144x-1x-1xf32) <- (-1x144x-1x-1xf32, -1x144x1x1xf32) + multiply_24 = paddle._C_ops.multiply(swish_66, sigmoid_7) + del sigmoid_7, swish_66 + + # pd_op.conv2d: (-1x144x-1x-1xf32) <- (-1x144x-1x-1xf32, 144x144x1x1xf32) + conv2d_127 = paddle._C_ops.conv2d( + multiply_24, parameter_6, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del multiply_24, parameter_6 + + # pd_op.batch_norm_: (-1x144x-1x-1xf32, 144xf32, 144xf32, 144xf32, 144xf32, -1xui8) <- (-1x144x-1x-1xf32, 144xf32, 144xf32, 144xf32, 144xf32) + ( + batch_norm__660, + batch_norm__661, + batch_norm__662, + batch_norm__663, + batch_norm__664, + batch_norm__665, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_127, + parameter_5, + parameter_4, + parameter_3, + parameter_2, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_127, parameter_2, parameter_3, parameter_4, parameter_5 + + # pd_op.swish: (-1x144x-1x-1xf32) <- (-1x144x-1x-1xf32) + swish_88 = paddle._C_ops.swish(batch_norm__660) + del batch_norm__660 + + # pd_op.conv2d: (-1x68x-1x-1xf32) <- (-1x144x-1x-1xf32, 68x144x3x3xf32) + conv2d_128 = paddle._C_ops.conv2d( + swish_88, parameter_1, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_1, swish_88 + + # pd_op.reshape: (1x68x1x1xf32) <- (68xf32, 4xi64) + reshape_19 = paddle._C_ops.reshape(parameter_0, full_int_array_1) + del full_int_array_1, parameter_0 + + # pd_op.add: (-1x68x-1x-1xf32) <- (-1x68x-1x-1xf32, 1x68x1x1xf32) + add_52 = paddle._C_ops.add(conv2d_128, reshape_19) + del conv2d_128, reshape_19 + + # builtin.combine: ([xi64, xi64, xi64, xi64]) <- (xi64, xi64, xi64, xi64) + combine_18 = [full_1, full_2, full_3, multiply_22] + del full_3 + + # pd_op.stack: (4xi64) <- ([xi64, xi64, xi64, xi64]) + stack_4 = paddle._C_ops.stack(combine_18, 0) + del combine_18 + + # pd_op.reshape: (-1x4x17x-1xf32) <- (-1x68x-1x-1xf32, 4xi64) + reshape_20 = paddle._C_ops.reshape(add_52, stack_4) + del add_52, stack_4 + + # pd_op.transpose: (-1x17x-1x4xf32) <- (-1x4x17x-1xf32) + transpose_2 = paddle._C_ops.transpose(reshape_20, [0, 2, 3, 1]) + del reshape_20 + + # pd_op.softmax: (-1x17x-1x4xf32) <- (-1x17x-1x4xf32) + softmax_2 = paddle._C_ops.softmax(transpose_2, 1) + del transpose_2 + + # pd_op.conv2d: (-1x1x-1x4xf32) <- (-1x17x-1x4xf32, 1x17x1x1xf32) + conv2d_129 = paddle._C_ops.conv2d( + softmax_2, parameter_36, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_36, softmax_2 + + # pd_op.squeeze: (-1x-1x4xf32) <- (-1x1x-1x4xf32, 1xi64) + squeeze_2 = paddle._C_ops.squeeze(conv2d_129, full_int_array_6) + del conv2d_129, full_int_array_6 + + # pd_op.sigmoid: (-1x4x-1x-1xf32) <- (-1x4x-1x-1xf32) + sigmoid_8 = paddle._C_ops.sigmoid(add_50) + del add_50 + + # builtin.combine: ([xi64, xi64, xi64]) <- (xi64, xi64, xi64) + combine_19 = [full_1, full_2, multiply_22] + del full_1, full_2, multiply_22 + + # pd_op.stack: (3xi64) <- ([xi64, xi64, xi64]) + stack_5 = paddle._C_ops.stack(combine_19, 0) + del combine_19 + + # pd_op.reshape: (-1x4x-1xf32) <- (-1x4x-1x-1xf32, 3xi64) + reshape_21 = paddle._C_ops.reshape(sigmoid_8, stack_5) + del sigmoid_8, stack_5 + + # pd_op.full: (1xi32) <- () + full_4 = paddle._C_ops.full( + [1], float("-1"), paddle.int32, paddle.core.CPUPlace() + ) + + # builtin.combine: ([-1x4x-1xf32, -1x4x-1xf32, -1x4x-1xf32]) <- (-1x4x-1xf32, -1x4x-1xf32, -1x4x-1xf32) + combine_20 = [reshape_9, reshape_15, reshape_21] + del reshape_15, reshape_21, reshape_9 + + # pd_op.concat: (-1x4x-1xf32) <- ([-1x4x-1xf32, -1x4x-1xf32, -1x4x-1xf32], 1xi32) + concat_0 = paddle._C_ops.concat(combine_20, full_4) + del combine_20, full_4 + + # builtin.combine: ([-1x-1x4xf32, -1x-1x4xf32, -1x-1x4xf32]) <- (-1x-1x4xf32, -1x-1x4xf32, -1x-1x4xf32) + combine_21 = [squeeze_0, squeeze_1, squeeze_2] + del squeeze_0, squeeze_1, squeeze_2 + + # pd_op.concat: (-1x-1x4xf32) <- ([-1x-1x4xf32, -1x-1x4xf32, -1x-1x4xf32], 1xi32) + concat_1 = paddle._C_ops.concat(combine_21, full_0) + del combine_21, full_0 + + return concat_0, concat_1 diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus-M/subgraph_15/weight_meta.py b/paddle_samples/PaddleX/PP-YOLOE_plus-M/subgraph_15/weight_meta.py new file mode 100644 index 000000000..49282087c --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE_plus-M/subgraph_15/weight_meta.py @@ -0,0 +1,6370 @@ +class Program_weight_tensor_parameter_0: + name = "parameter_0" + shape = [68] + dtype = "float32" + min_val = float("-0.0141635") + max_val = float("0.0241976") + mean = float("6.52944e-08") + std = float("0.00671931") + data = None + + +class Program_weight_tensor_parameter_1: + name = "parameter_1" + shape = [68, 144, 3, 3] + dtype = "float32" + min_val = float("-0.15978") + max_val = float("0.188116") + mean = float("6.16128e-08") + std = float("0.00828307") + data = None + + +class Program_weight_tensor_parameter_2: + name = "parameter_2" + shape = [144] + dtype = "float32" + min_val = float("-0.104364") + max_val = float("0.335598") + mean = float("0.0805429") + std = float("0.0951253") + data = None + + +class Program_weight_tensor_parameter_3: + name = "parameter_3" + shape = [144] + dtype = "float32" + min_val = float("0.83568") + max_val = float("2.14576") + mean = float("1.40531") + std = float("0.259813") + data = None + + +class Program_weight_tensor_parameter_4: + name = "parameter_4" + shape = [144] + dtype = "float32" + min_val = float("0.000157802") + max_val = float("0.00220136") + mean = float("0.000572424") + std = float("0.000355381") + data = None + + +class Program_weight_tensor_parameter_5: + name = "parameter_5" + shape = [144] + dtype = "float32" + min_val = float("-0.0503245") + max_val = float("0.0384794") + mean = float("-0.00745528") + std = float("0.0176331") + data = None + + +class Program_weight_tensor_parameter_6: + name = "parameter_6" + shape = [144, 144, 1, 1] + dtype = "float32" + min_val = float("-0.0726671") + max_val = float("0.0967547") + mean = float("-0.000265342") + std = float("0.00739699") + data = None + + +class Program_weight_tensor_parameter_7: + name = "parameter_7" + shape = [144] + dtype = "float32" + min_val = float("-0.00637615") + max_val = float("0.0068") + mean = float("-0.000174127") + std = float("0.00321334") + data = None + + +class Program_weight_tensor_parameter_8: + name = "parameter_8" + shape = [144, 144, 1, 1] + dtype = "float32" + min_val = float("-0.0118268") + max_val = float("0.0153714") + mean = float("-0.000124547") + std = float("0.00221024") + data = None + + +class Program_weight_tensor_parameter_9: + name = "parameter_9" + shape = [4] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_10: + name = "parameter_10" + shape = [4, 144, 3, 3] + dtype = "float32" + min_val = float("-6.03644e-06") + max_val = float("0.000338216") + mean = float("1.1862e-05") + std = float("2.42884e-05") + data = None + + +class Program_weight_tensor_parameter_11: + name = "parameter_11" + shape = [144] + dtype = "float32" + min_val = float("-0.6431") + max_val = float("1.5172") + mean = float("0.437678") + std = float("0.39753") + data = None + + +class Program_weight_tensor_parameter_12: + name = "parameter_12" + shape = [144] + dtype = "float32" + min_val = float("0.91498") + max_val = float("2.11591") + mean = float("1.39192") + std = float("0.197821") + data = None + + +class Program_weight_tensor_parameter_13: + name = "parameter_13" + shape = [144] + dtype = "float32" + min_val = float("0.000201827") + max_val = float("0.0033487") + mean = float("0.000776001") + std = float("0.000505157") + data = None + + +class Program_weight_tensor_parameter_14: + name = "parameter_14" + shape = [144] + dtype = "float32" + min_val = float("-0.246557") + max_val = float("0.035997") + mean = float("-0.0278908") + std = float("0.0405564") + data = None + + +class Program_weight_tensor_parameter_15: + name = "parameter_15" + shape = [144, 144, 1, 1] + dtype = "float32" + min_val = float("-0.0650974") + max_val = float("0.0803623") + mean = float("-0.000600887") + std = float("0.00887669") + data = None + + +class Program_weight_tensor_parameter_16: + name = "parameter_16" + shape = [144] + dtype = "float32" + min_val = float("-0.0055726") + max_val = float("0.00565327") + mean = float("-0.000280367") + std = float("0.00215197") + data = None + + +class Program_weight_tensor_parameter_17: + name = "parameter_17" + shape = [144, 144, 1, 1] + dtype = "float32" + min_val = float("-0.0293837") + max_val = float("0.0538049") + mean = float("-5.73599e-05") + std = float("0.00246484") + data = None + + +class Program_weight_tensor_parameter_18: + name = "parameter_18" + shape = [68] + dtype = "float32" + min_val = float("-0.00497141") + max_val = float("0.0272935") + mean = float("6.13218e-08") + std = float("0.00562603") + data = None + + +class Program_weight_tensor_parameter_19: + name = "parameter_19" + shape = [68, 288, 3, 3] + dtype = "float32" + min_val = float("-0.11143") + max_val = float("0.129486") + mean = float("3.29019e-08") + std = float("0.00574142") + data = None + + +class Program_weight_tensor_parameter_20: + name = "parameter_20" + shape = [288] + dtype = "float32" + min_val = float("-0.017394") + max_val = float("0.147115") + mean = float("0.0536437") + std = float("0.0321882") + data = None + + +class Program_weight_tensor_parameter_21: + name = "parameter_21" + shape = [288] + dtype = "float32" + min_val = float("1.01671") + max_val = float("1.45294") + mean = float("1.22922") + std = float("0.0813549") + data = None + + +class Program_weight_tensor_parameter_22: + name = "parameter_22" + shape = [288] + dtype = "float32" + min_val = float("9.58753e-05") + max_val = float("0.0041103") + mean = float("0.000468789") + std = float("0.000456147") + data = None + + +class Program_weight_tensor_parameter_23: + name = "parameter_23" + shape = [288] + dtype = "float32" + min_val = float("-0.0518176") + max_val = float("0.0162465") + mean = float("-0.00788198") + std = float("0.00923439") + data = None + + +class Program_weight_tensor_parameter_24: + name = "parameter_24" + shape = [288, 288, 1, 1] + dtype = "float32" + min_val = float("-0.0595991") + max_val = float("0.0737716") + mean = float("-0.000137555") + std = float("0.00365117") + data = None + + +class Program_weight_tensor_parameter_25: + name = "parameter_25" + shape = [288] + dtype = "float32" + min_val = float("-0.00315414") + max_val = float("0.00622371") + mean = float("2.49002e-05") + std = float("0.00195236") + data = None + + +class Program_weight_tensor_parameter_26: + name = "parameter_26" + shape = [288, 288, 1, 1] + dtype = "float32" + min_val = float("-0.00389382") + max_val = float("0.008433") + mean = float("-1.88398e-05") + std = float("0.000934231") + data = None + + +class Program_weight_tensor_parameter_27: + name = "parameter_27" + shape = [4] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_28: + name = "parameter_28" + shape = [4, 288, 3, 3] + dtype = "float32" + min_val = float("-5.34524e-06") + max_val = float("0.000115912") + mean = float("5.31803e-06") + std = float("7.70699e-06") + data = None + + +class Program_weight_tensor_parameter_29: + name = "parameter_29" + shape = [288] + dtype = "float32" + min_val = float("-0.2707") + max_val = float("0.772625") + mean = float("0.312276") + std = float("0.172178") + data = None + + +class Program_weight_tensor_parameter_30: + name = "parameter_30" + shape = [288] + dtype = "float32" + min_val = float("0.993855") + max_val = float("1.7254") + mean = float("1.25878") + std = float("0.0948141") + data = None + + +class Program_weight_tensor_parameter_31: + name = "parameter_31" + shape = [288] + dtype = "float32" + min_val = float("0.000222055") + max_val = float("0.00541883") + mean = float("0.000812938") + std = float("0.000640229") + data = None + + +class Program_weight_tensor_parameter_32: + name = "parameter_32" + shape = [288] + dtype = "float32" + min_val = float("-0.1308") + max_val = float("0.0697218") + mean = float("-0.0266256") + std = float("0.0288337") + data = None + + +class Program_weight_tensor_parameter_33: + name = "parameter_33" + shape = [288, 288, 1, 1] + dtype = "float32" + min_val = float("-0.0481293") + max_val = float("0.0594145") + mean = float("-0.000431989") + std = float("0.0042605") + data = None + + +class Program_weight_tensor_parameter_34: + name = "parameter_34" + shape = [288] + dtype = "float32" + min_val = float("-0.00286415") + max_val = float("0.00722174") + mean = float("-7.35274e-05") + std = float("0.00117538") + data = None + + +class Program_weight_tensor_parameter_35: + name = "parameter_35" + shape = [288, 288, 1, 1] + dtype = "float32" + min_val = float("-0.0122917") + max_val = float("0.016227") + mean = float("-1.72956e-05") + std = float("0.000997845") + data = None + + +class Program_weight_tensor_parameter_36: + name = "parameter_36" + shape = [1, 17, 1, 1] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_37: + name = "parameter_37" + shape = [68] + dtype = "float32" + min_val = float("-0.00366283") + max_val = float("0.0136174") + mean = float("3.45317e-08") + std = float("0.00377369") + data = None + + +class Program_weight_tensor_parameter_38: + name = "parameter_38" + shape = [68, 576, 3, 3] + dtype = "float32" + min_val = float("-0.066466") + max_val = float("0.0676466") + mean = float("1.75132e-08") + std = float("0.00360721") + data = None + + +class Program_weight_tensor_parameter_39: + name = "parameter_39" + shape = [576] + dtype = "float32" + min_val = float("-0.0422788") + max_val = float("0.113782") + mean = float("0.0223079") + std = float("0.0258955") + data = None + + +class Program_weight_tensor_parameter_40: + name = "parameter_40" + shape = [576] + dtype = "float32" + min_val = float("1.05316") + max_val = float("1.39794") + mean = float("1.15071") + std = float("0.0429545") + data = None + + +class Program_weight_tensor_parameter_41: + name = "parameter_41" + shape = [576] + dtype = "float32" + min_val = float("4.91637e-05") + max_val = float("0.00266511") + mean = float("0.000246075") + std = float("0.000221109") + data = None + + +class Program_weight_tensor_parameter_42: + name = "parameter_42" + shape = [576] + dtype = "float32" + min_val = float("-0.0345042") + max_val = float("0.0202052") + mean = float("-0.00576832") + std = float("0.00545338") + data = None + + +class Program_weight_tensor_parameter_43: + name = "parameter_43" + shape = [576, 576, 1, 1] + dtype = "float32" + min_val = float("-0.0386702") + max_val = float("0.0418189") + mean = float("-6.13284e-05") + std = float("0.00176803") + data = None + + +class Program_weight_tensor_parameter_44: + name = "parameter_44" + shape = [576] + dtype = "float32" + min_val = float("-0.00435722") + max_val = float("0.00342043") + mean = float("0.000100629") + std = float("0.00100298") + data = None + + +class Program_weight_tensor_parameter_45: + name = "parameter_45" + shape = [576, 576, 1, 1] + dtype = "float32" + min_val = float("-0.00356092") + max_val = float("0.00418283") + mean = float("2.84068e-05") + std = float("0.000365604") + data = None + + +class Program_weight_tensor_parameter_46: + name = "parameter_46" + shape = [4] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_47: + name = "parameter_47" + shape = [4, 576, 3, 3] + dtype = "float32" + min_val = float("-6.95821e-06") + max_val = float("0.000100905") + mean = float("3.36451e-06") + std = float("6.30444e-06") + data = None + + +class Program_weight_tensor_parameter_48: + name = "parameter_48" + shape = [576] + dtype = "float32" + min_val = float("-0.248813") + max_val = float("0.372082") + mean = float("0.155726") + std = float("0.0836817") + data = None + + +class Program_weight_tensor_parameter_49: + name = "parameter_49" + shape = [576] + dtype = "float32" + min_val = float("1.02627") + max_val = float("1.42841") + mean = float("1.1361") + std = float("0.0516175") + data = None + + +class Program_weight_tensor_parameter_50: + name = "parameter_50" + shape = [576] + dtype = "float32" + min_val = float("0.000120346") + max_val = float("0.00280838") + mean = float("0.000713572") + std = float("0.000507142") + data = None + + +class Program_weight_tensor_parameter_51: + name = "parameter_51" + shape = [576] + dtype = "float32" + min_val = float("-0.0736708") + max_val = float("0.0822341") + mean = float("-0.0216779") + std = float("0.0158763") + data = None + + +class Program_weight_tensor_parameter_52: + name = "parameter_52" + shape = [576, 576, 1, 1] + dtype = "float32" + min_val = float("-0.0600565") + max_val = float("0.0356134") + mean = float("-0.00023781") + std = float("0.00193273") + data = None + + +class Program_weight_tensor_parameter_53: + name = "parameter_53" + shape = [576] + dtype = "float32" + min_val = float("-0.00774628") + max_val = float("0.00637093") + mean = float("-2.65663e-05") + std = float("0.000686488") + data = None + + +class Program_weight_tensor_parameter_54: + name = "parameter_54" + shape = [576, 576, 1, 1] + dtype = "float32" + min_val = float("-0.0266525") + max_val = float("0.0453807") + mean = float("-1.04982e-06") + std = float("0.000525074") + data = None + + +class Program_weight_tensor_parameter_55: + name = "parameter_55" + shape = [576] + dtype = "float32" + min_val = float("-0.363147") + max_val = float("0.450423") + mean = float("0.129446") + std = float("0.123275") + data = None + + +class Program_weight_tensor_parameter_56: + name = "parameter_56" + shape = [576] + dtype = "float32" + min_val = float("0.962108") + max_val = float("1.62765") + mean = float("1.1062") + std = float("0.0594285") + data = None + + +class Program_weight_tensor_parameter_57: + name = "parameter_57" + shape = [576] + dtype = "float32" + min_val = float("0.00128466") + max_val = float("0.0464107") + mean = float("0.00458397") + std = float("0.00374471") + data = None + + +class Program_weight_tensor_parameter_58: + name = "parameter_58" + shape = [576] + dtype = "float32" + min_val = float("-0.177259") + max_val = float("0.103565") + mean = float("-0.0218799") + std = float("0.0230091") + data = None + + +class Program_weight_tensor_parameter_59: + name = "parameter_59" + shape = [576, 576, 1, 1] + dtype = "float32" + min_val = float("-0.0611952") + max_val = float("0.04003") + mean = float("-0.000170338") + std = float("0.00295543") + data = None + + +class Program_weight_tensor_parameter_60: + name = "parameter_60" + shape = [288] + dtype = "float32" + min_val = float("-0.27771") + max_val = float("0.0488218") + mean = float("-0.0555783") + std = float("0.0592335") + data = None + + +class Program_weight_tensor_parameter_61: + name = "parameter_61" + shape = [288] + dtype = "float32" + min_val = float("0.915284") + max_val = float("1.07407") + mean = float("0.969917") + std = float("0.0228204") + data = None + + +class Program_weight_tensor_parameter_62: + name = "parameter_62" + shape = [288] + dtype = "float32" + min_val = float("0.000953466") + max_val = float("0.0142773") + mean = float("0.00304707") + std = float("0.00176714") + data = None + + +class Program_weight_tensor_parameter_63: + name = "parameter_63" + shape = [288] + dtype = "float32" + min_val = float("-0.04362") + max_val = float("0.0544441") + mean = float("0.00710198") + std = float("0.0174711") + data = None + + +class Program_weight_tensor_parameter_64: + name = "parameter_64" + shape = [288, 288, 1, 1] + dtype = "float32" + min_val = float("-0.0344584") + max_val = float("0.0245762") + mean = float("8.69469e-05") + std = float("0.00219113") + data = None + + +class Program_weight_tensor_parameter_65: + name = "parameter_65" + shape = [288] + dtype = "float32" + min_val = float("-0.27771") + max_val = float("0.0488218") + mean = float("-0.0555783") + std = float("0.0592335") + data = None + + +class Program_weight_tensor_parameter_66: + name = "parameter_66" + shape = [288] + dtype = "float32" + min_val = float("0.981008") + max_val = float("1.21689") + mean = float("1.04938") + std = float("0.0359279") + data = None + + +class Program_weight_tensor_parameter_67: + name = "parameter_67" + shape = [288] + dtype = "float32" + min_val = float("0.00172209") + max_val = float("0.0250926") + mean = float("0.00470188") + std = float("0.00208766") + data = None + + +class Program_weight_tensor_parameter_68: + name = "parameter_68" + shape = [288] + dtype = "float32" + min_val = float("-0.12407") + max_val = float("0.0767199") + mean = float("-0.029451") + std = float("0.022194") + data = None + + +class Program_weight_tensor_parameter_69: + name = "parameter_69" + shape = [288, 288, 3, 3] + dtype = "float32" + min_val = float("-0.0377338") + max_val = float("0.0461348") + mean = float("-8.57263e-05") + std = float("0.00163106") + data = None + + +class Program_weight_tensor_parameter_70: + name = "parameter_70" + shape = [288] + dtype = "float32" + min_val = float("-0.385862") + max_val = float("0.0653291") + mean = float("-0.105618") + std = float("0.0696428") + data = None + + +class Program_weight_tensor_parameter_71: + name = "parameter_71" + shape = [288] + dtype = "float32" + min_val = float("0.900938") + max_val = float("1.32086") + mean = float("1.04193") + std = float("0.0560555") + data = None + + +class Program_weight_tensor_parameter_72: + name = "parameter_72" + shape = [288] + dtype = "float32" + min_val = float("0.00421839") + max_val = float("0.0367987") + mean = float("0.0103437") + std = float("0.00489499") + data = None + + +class Program_weight_tensor_parameter_73: + name = "parameter_73" + shape = [288] + dtype = "float32" + min_val = float("-0.153433") + max_val = float("0.0932305") + mean = float("-0.0112633") + std = float("0.0256948") + data = None + + +class Program_weight_tensor_parameter_74: + name = "parameter_74" + shape = [288, 288, 3, 3] + dtype = "float32" + min_val = float("-0.0364099") + max_val = float("0.0523836") + mean = float("-8.24632e-05") + std = float("0.00184979") + data = None + + +class Program_weight_tensor_parameter_75: + name = "parameter_75" + shape = [288] + dtype = "float32" + min_val = float("-0.299402") + max_val = float("0.0134731") + mean = float("-0.109532") + std = float("0.0597187") + data = None + + +class Program_weight_tensor_parameter_76: + name = "parameter_76" + shape = [288] + dtype = "float32" + min_val = float("0.890481") + max_val = float("1.0932") + mean = float("0.968348") + std = float("0.0257197") + data = None + + +class Program_weight_tensor_parameter_77: + name = "parameter_77" + shape = [288] + dtype = "float32" + min_val = float("0.00100979") + max_val = float("0.00878796") + mean = float("0.00444441") + std = float("0.00149939") + data = None + + +class Program_weight_tensor_parameter_78: + name = "parameter_78" + shape = [288] + dtype = "float32" + min_val = float("-0.0520888") + max_val = float("0.049478") + mean = float("0.00820149") + std = float("0.0145604") + data = None + + +class Program_weight_tensor_parameter_79: + name = "parameter_79" + shape = [288, 288, 1, 1] + dtype = "float32" + min_val = float("-0.0322537") + max_val = float("0.029478") + mean = float("1.23234e-05") + std = float("0.00229431") + data = None + + +class Program_weight_tensor_parameter_80: + name = "parameter_80" + shape = [288] + dtype = "float32" + min_val = float("-0.299402") + max_val = float("0.0134731") + mean = float("-0.109532") + std = float("0.0597187") + data = None + + +class Program_weight_tensor_parameter_81: + name = "parameter_81" + shape = [288] + dtype = "float32" + min_val = float("0.962862") + max_val = float("1.20254") + mean = float("1.04567") + std = float("0.0391825") + data = None + + +class Program_weight_tensor_parameter_82: + name = "parameter_82" + shape = [288] + dtype = "float32" + min_val = float("0.00284955") + max_val = float("0.0225437") + mean = float("0.007316") + std = float("0.00273474") + data = None + + +class Program_weight_tensor_parameter_83: + name = "parameter_83" + shape = [288] + dtype = "float32" + min_val = float("-0.140963") + max_val = float("0.0369856") + mean = float("-0.0354432") + std = float("0.0220809") + data = None + + +class Program_weight_tensor_parameter_84: + name = "parameter_84" + shape = [288, 288, 3, 3] + dtype = "float32" + min_val = float("-0.0384327") + max_val = float("0.0469147") + mean = float("-0.000103927") + std = float("0.00179042") + data = None + + +class Program_weight_tensor_parameter_85: + name = "parameter_85" + shape = [288] + dtype = "float32" + min_val = float("-0.29697") + max_val = float("0.139867") + mean = float("-0.0979302") + std = float("0.0600107") + data = None + + +class Program_weight_tensor_parameter_86: + name = "parameter_86" + shape = [288] + dtype = "float32" + min_val = float("0.857317") + max_val = float("1.30652") + mean = float("1.03469") + std = float("0.0730064") + data = None + + +class Program_weight_tensor_parameter_87: + name = "parameter_87" + shape = [288] + dtype = "float32" + min_val = float("0.00405577") + max_val = float("0.0443345") + mean = float("0.0130506") + std = float("0.00639368") + data = None + + +class Program_weight_tensor_parameter_88: + name = "parameter_88" + shape = [288] + dtype = "float32" + min_val = float("-0.133397") + max_val = float("0.122273") + mean = float("-0.0433752") + std = float("0.036443") + data = None + + +class Program_weight_tensor_parameter_89: + name = "parameter_89" + shape = [288, 288, 3, 3] + dtype = "float32" + min_val = float("-0.0411107") + max_val = float("0.0539263") + mean = float("-0.000111625") + std = float("0.00199295") + data = None + + +class Program_weight_tensor_parameter_90: + name = "parameter_90" + shape = [288] + dtype = "float32" + min_val = float("-0.215301") + max_val = float("0.173622") + mean = float("-0.0676104") + std = float("0.0466325") + data = None + + +class Program_weight_tensor_parameter_91: + name = "parameter_91" + shape = [288] + dtype = "float32" + min_val = float("0.91523") + max_val = float("1.17267") + mean = float("1.02621") + std = float("0.0431361") + data = None + + +class Program_weight_tensor_parameter_92: + name = "parameter_92" + shape = [288] + dtype = "float32" + min_val = float("0.00251899") + max_val = float("0.0117666") + mean = float("0.00474816") + std = float("0.00149593") + data = None + + +class Program_weight_tensor_parameter_93: + name = "parameter_93" + shape = [288] + dtype = "float32" + min_val = float("-0.0692205") + max_val = float("0.053793") + mean = float("-0.0160558") + std = float("0.0210432") + data = None + + +class Program_weight_tensor_parameter_94: + name = "parameter_94" + shape = [288, 864, 1, 1] + dtype = "float32" + min_val = float("-0.0745047") + max_val = float("0.0842196") + mean = float("-0.00010905") + std = float("0.00289749") + data = None + + +class Program_weight_tensor_parameter_95: + name = "parameter_95" + shape = [288] + dtype = "float32" + min_val = float("-0.0916511") + max_val = float("0.0302142") + mean = float("-0.0278605") + std = float("0.0201815") + data = None + + +class Program_weight_tensor_parameter_96: + name = "parameter_96" + shape = [288] + dtype = "float32" + min_val = float("0.899444") + max_val = float("1.09798") + mean = float("1.01228") + std = float("0.0255773") + data = None + + +class Program_weight_tensor_parameter_97: + name = "parameter_97" + shape = [288] + dtype = "float32" + min_val = float("0.00233029") + max_val = float("0.0161175") + mean = float("0.00376379") + std = float("0.00141808") + data = None + + +class Program_weight_tensor_parameter_98: + name = "parameter_98" + shape = [288] + dtype = "float32" + min_val = float("-0.0681009") + max_val = float("0.0535") + mean = float("-0.0176997") + std = float("0.018982") + data = None + + +class Program_weight_tensor_parameter_99: + name = "parameter_99" + shape = [288, 864, 1, 1] + dtype = "float32" + min_val = float("-0.0500682") + max_val = float("0.0412844") + mean = float("-0.000136185") + std = float("0.00251502") + data = None + + +class Program_weight_tensor_parameter_100: + name = "parameter_100" + shape = [288] + dtype = "float32" + min_val = float("-0.140589") + max_val = float("0.00421123") + mean = float("-0.0496541") + std = float("0.0252022") + data = None + + +class Program_weight_tensor_parameter_101: + name = "parameter_101" + shape = [288] + dtype = "float32" + min_val = float("0.944857") + max_val = float("1.21748") + mean = float("1.05083") + std = float("0.0346128") + data = None + + +class Program_weight_tensor_parameter_102: + name = "parameter_102" + shape = [288] + dtype = "float32" + min_val = float("0.00540006") + max_val = float("0.0523987") + mean = float("0.0137401") + std = float("0.00670132") + data = None + + +class Program_weight_tensor_parameter_103: + name = "parameter_103" + shape = [288] + dtype = "float32" + min_val = float("-0.369448") + max_val = float("0.183087") + mean = float("-0.0467212") + std = float("0.0761257") + data = None + + +class Program_weight_tensor_parameter_104: + name = "parameter_104" + shape = [288, 288, 3, 3] + dtype = "float32" + min_val = float("-0.029027") + max_val = float("0.0392117") + mean = float("-4.01833e-05") + std = float("0.00156935") + data = None + + +class Program_weight_tensor_parameter_105: + name = "parameter_105" + shape = [288] + dtype = "float32" + min_val = float("-0.703153") + max_val = float("0.889582") + mean = float("0.279296") + std = float("0.234385") + data = None + + +class Program_weight_tensor_parameter_106: + name = "parameter_106" + shape = [288] + dtype = "float32" + min_val = float("0.665579") + max_val = float("1.52345") + mean = float("1.16577") + std = float("0.119601") + data = None + + +class Program_weight_tensor_parameter_107: + name = "parameter_107" + shape = [288] + dtype = "float32" + min_val = float("0.00263439") + max_val = float("0.0673288") + mean = float("0.00986936") + std = float("0.00642903") + data = None + + +class Program_weight_tensor_parameter_108: + name = "parameter_108" + shape = [288] + dtype = "float32" + min_val = float("-0.172453") + max_val = float("0.213078") + mean = float("-0.0233247") + std = float("0.0347766") + data = None + + +class Program_weight_tensor_parameter_109: + name = "parameter_109" + shape = [288, 288, 1, 1] + dtype = "float32" + min_val = float("-0.10595") + max_val = float("0.0861042") + mean = float("-0.000322342") + std = float("0.00641596") + data = None + + +class Program_weight_tensor_parameter_110: + name = "parameter_110" + shape = [144] + dtype = "float32" + min_val = float("-0.348757") + max_val = float("0.143868") + mean = float("-0.0670231") + std = float("0.092848") + data = None + + +class Program_weight_tensor_parameter_111: + name = "parameter_111" + shape = [144] + dtype = "float32" + min_val = float("0.826843") + max_val = float("1.10694") + mean = float("0.932296") + std = float("0.0366632") + data = None + + +class Program_weight_tensor_parameter_112: + name = "parameter_112" + shape = [144] + dtype = "float32" + min_val = float("0.00141229") + max_val = float("0.0167029") + mean = float("0.00556534") + std = float("0.0030144") + data = None + + +class Program_weight_tensor_parameter_113: + name = "parameter_113" + shape = [144] + dtype = "float32" + min_val = float("-0.0528065") + max_val = float("0.0511357") + mean = float("-0.00205834") + std = float("0.0130524") + data = None + + +class Program_weight_tensor_parameter_114: + name = "parameter_114" + shape = [144, 144, 1, 1] + dtype = "float32" + min_val = float("-0.0580646") + max_val = float("0.0259261") + mean = float("-0.000238707") + std = float("0.004453") + data = None + + +class Program_weight_tensor_parameter_115: + name = "parameter_115" + shape = [144] + dtype = "float32" + min_val = float("-0.348757") + max_val = float("0.143868") + mean = float("-0.0670231") + std = float("0.092848") + data = None + + +class Program_weight_tensor_parameter_116: + name = "parameter_116" + shape = [144] + dtype = "float32" + min_val = float("0.69241") + max_val = float("1.27544") + mean = float("1.06354") + std = float("0.0790744") + data = None + + +class Program_weight_tensor_parameter_117: + name = "parameter_117" + shape = [144] + dtype = "float32" + min_val = float("0.00383033") + max_val = float("0.0240594") + mean = float("0.0089051") + std = float("0.00329963") + data = None + + +class Program_weight_tensor_parameter_118: + name = "parameter_118" + shape = [144] + dtype = "float32" + min_val = float("-0.116614") + max_val = float("0.0942855") + mean = float("-0.0189062") + std = float("0.0284546") + data = None + + +class Program_weight_tensor_parameter_119: + name = "parameter_119" + shape = [144, 144, 3, 3] + dtype = "float32" + min_val = float("-0.0533475") + max_val = float("0.0528243") + mean = float("-0.000100816") + std = float("0.00343967") + data = None + + +class Program_weight_tensor_parameter_120: + name = "parameter_120" + shape = [144] + dtype = "float32" + min_val = float("-0.439905") + max_val = float("0.120748") + mean = float("-0.165545") + std = float("0.123755") + data = None + + +class Program_weight_tensor_parameter_121: + name = "parameter_121" + shape = [144] + dtype = "float32" + min_val = float("0.853433") + max_val = float("1.29875") + mean = float("1.03371") + std = float("0.0909163") + data = None + + +class Program_weight_tensor_parameter_122: + name = "parameter_122" + shape = [144] + dtype = "float32" + min_val = float("0.00423297") + max_val = float("0.0438086") + mean = float("0.013521") + std = float("0.00655558") + data = None + + +class Program_weight_tensor_parameter_123: + name = "parameter_123" + shape = [144] + dtype = "float32" + min_val = float("-0.139") + max_val = float("0.0849961") + mean = float("-0.00217177") + std = float("0.0237809") + data = None + + +class Program_weight_tensor_parameter_124: + name = "parameter_124" + shape = [144, 144, 3, 3] + dtype = "float32" + min_val = float("-0.0544763") + max_val = float("0.0689249") + mean = float("-0.000164816") + std = float("0.00383836") + data = None + + +class Program_weight_tensor_parameter_125: + name = "parameter_125" + shape = [144] + dtype = "float32" + min_val = float("-0.441804") + max_val = float("0.028665") + mean = float("-0.192994") + std = float("0.0888286") + data = None + + +class Program_weight_tensor_parameter_126: + name = "parameter_126" + shape = [144] + dtype = "float32" + min_val = float("0.704647") + max_val = float("1.06451") + mean = float("0.920355") + std = float("0.0516476") + data = None + + +class Program_weight_tensor_parameter_127: + name = "parameter_127" + shape = [144] + dtype = "float32" + min_val = float("0.00221389") + max_val = float("0.0126339") + mean = float("0.00634418") + std = float("0.00185267") + data = None + + +class Program_weight_tensor_parameter_128: + name = "parameter_128" + shape = [144] + dtype = "float32" + min_val = float("-0.037796") + max_val = float("0.0319353") + mean = float("0.00816013") + std = float("0.0133567") + data = None + + +class Program_weight_tensor_parameter_129: + name = "parameter_129" + shape = [144, 144, 1, 1] + dtype = "float32" + min_val = float("-0.0502432") + max_val = float("0.053736") + mean = float("-0.000306758") + std = float("0.00495078") + data = None + + +class Program_weight_tensor_parameter_130: + name = "parameter_130" + shape = [144] + dtype = "float32" + min_val = float("-0.441804") + max_val = float("0.028665") + mean = float("-0.192994") + std = float("0.0888286") + data = None + + +class Program_weight_tensor_parameter_131: + name = "parameter_131" + shape = [144] + dtype = "float32" + min_val = float("0.767985") + max_val = float("1.24616") + mean = float("1.05535") + std = float("0.0556624") + data = None + + +class Program_weight_tensor_parameter_132: + name = "parameter_132" + shape = [144] + dtype = "float32" + min_val = float("0.00524932") + max_val = float("0.0354973") + mean = float("0.0124491") + std = float("0.00528304") + data = None + + +class Program_weight_tensor_parameter_133: + name = "parameter_133" + shape = [144] + dtype = "float32" + min_val = float("-0.104281") + max_val = float("0.0433549") + mean = float("-0.017271") + std = float("0.0183086") + data = None + + +class Program_weight_tensor_parameter_134: + name = "parameter_134" + shape = [144, 144, 3, 3] + dtype = "float32" + min_val = float("-0.0537528") + max_val = float("0.0682326") + mean = float("-0.000146146") + std = float("0.00381018") + data = None + + +class Program_weight_tensor_parameter_135: + name = "parameter_135" + shape = [144] + dtype = "float32" + min_val = float("-0.509343") + max_val = float("0.25213") + mean = float("-0.219535") + std = float("0.129933") + data = None + + +class Program_weight_tensor_parameter_136: + name = "parameter_136" + shape = [144] + dtype = "float32" + min_val = float("0.785015") + max_val = float("1.53418") + mean = float("1.02864") + std = float("0.12374") + data = None + + +class Program_weight_tensor_parameter_137: + name = "parameter_137" + shape = [144] + dtype = "float32" + min_val = float("0.00862567") + max_val = float("0.0571773") + mean = float("0.0178791") + std = float("0.00917033") + data = None + + +class Program_weight_tensor_parameter_138: + name = "parameter_138" + shape = [144] + dtype = "float32" + min_val = float("-0.111979") + max_val = float("0.0136057") + mean = float("-0.0389999") + std = float("0.0203047") + data = None + + +class Program_weight_tensor_parameter_139: + name = "parameter_139" + shape = [144, 144, 3, 3] + dtype = "float32" + min_val = float("-0.0671493") + max_val = float("0.0824849") + mean = float("-0.000218676") + std = float("0.00431301") + data = None + + +class Program_weight_tensor_parameter_140: + name = "parameter_140" + shape = [144] + dtype = "float32" + min_val = float("-0.598583") + max_val = float("0.0570557") + mean = float("-0.155009") + std = float("0.0803566") + data = None + + +class Program_weight_tensor_parameter_141: + name = "parameter_141" + shape = [144] + dtype = "float32" + min_val = float("0.873451") + max_val = float("1.41532") + mean = float("1.02822") + std = float("0.071914") + data = None + + +class Program_weight_tensor_parameter_142: + name = "parameter_142" + shape = [144] + dtype = "float32" + min_val = float("0.00392816") + max_val = float("0.0241202") + mean = float("0.00791989") + std = float("0.00288505") + data = None + + +class Program_weight_tensor_parameter_143: + name = "parameter_143" + shape = [144] + dtype = "float32" + min_val = float("-0.0850175") + max_val = float("0.063316") + mean = float("-0.0234895") + std = float("0.0233601") + data = None + + +class Program_weight_tensor_parameter_144: + name = "parameter_144" + shape = [144, 432, 1, 1] + dtype = "float32" + min_val = float("-0.0664166") + max_val = float("0.0782398") + mean = float("-0.000296481") + std = float("0.00608418") + data = None + + +class Program_weight_tensor_parameter_145: + name = "parameter_145" + shape = [144] + dtype = "float32" + min_val = float("-0.146243") + max_val = float("0.075235") + mean = float("-0.026676") + std = float("0.0383535") + data = None + + +class Program_weight_tensor_parameter_146: + name = "parameter_146" + shape = [144] + dtype = "float32" + min_val = float("0.888091") + max_val = float("1.40929") + mean = float("0.996461") + std = float("0.0558093") + data = None + + +class Program_weight_tensor_parameter_147: + name = "parameter_147" + shape = [144] + dtype = "float32" + min_val = float("0.00247041") + max_val = float("0.0278519") + mean = float("0.00613474") + std = float("0.00320836") + data = None + + +class Program_weight_tensor_parameter_148: + name = "parameter_148" + shape = [144] + dtype = "float32" + min_val = float("-0.0513007") + max_val = float("0.0419876") + mean = float("-0.0117934") + std = float("0.0193503") + data = None + + +class Program_weight_tensor_parameter_149: + name = "parameter_149" + shape = [144, 432, 1, 1] + dtype = "float32" + min_val = float("-0.0642335") + max_val = float("0.0683001") + mean = float("-0.00013801") + std = float("0.00524856") + data = None + + +class Program_weight_tensor_parameter_150: + name = "parameter_150" + shape = [144] + dtype = "float32" + min_val = float("-0.258062") + max_val = float("0.0158585") + mean = float("-0.0907321") + std = float("0.0530183") + data = None + + +class Program_weight_tensor_parameter_151: + name = "parameter_151" + shape = [144] + dtype = "float32" + min_val = float("0.818043") + max_val = float("1.19478") + mean = float("1.02377") + std = float("0.056563") + data = None + + +class Program_weight_tensor_parameter_152: + name = "parameter_152" + shape = [144] + dtype = "float32" + min_val = float("0.0060931") + max_val = float("0.0623377") + mean = float("0.0181503") + std = float("0.0094012") + data = None + + +class Program_weight_tensor_parameter_153: + name = "parameter_153" + shape = [144] + dtype = "float32" + min_val = float("-0.571436") + max_val = float("0.34374") + mean = float("-0.0108407") + std = float("0.126187") + data = None + + +class Program_weight_tensor_parameter_154: + name = "parameter_154" + shape = [144, 144, 3, 3] + dtype = "float32" + min_val = float("-0.0410201") + max_val = float("0.048983") + mean = float("-1.72048e-05") + std = float("0.00338842") + data = None + + +class Program_weight_tensor_parameter_155: + name = "parameter_155" + shape = [144] + dtype = "float32" + min_val = float("-0.793878") + max_val = float("2.02014") + mean = float("0.408754") + std = float("0.537855") + data = None + + +class Program_weight_tensor_parameter_156: + name = "parameter_156" + shape = [144] + dtype = "float32" + min_val = float("0.642031") + max_val = float("1.86401") + mean = float("1.10314") + std = float("0.254887") + data = None + + +class Program_weight_tensor_parameter_157: + name = "parameter_157" + shape = [144] + dtype = "float32" + min_val = float("0.00315688") + max_val = float("0.0651796") + mean = float("0.0177255") + std = float("0.0123227") + data = None + + +class Program_weight_tensor_parameter_158: + name = "parameter_158" + shape = [144] + dtype = "float32" + min_val = float("-0.211434") + max_val = float("0.214478") + mean = float("-0.0150602") + std = float("0.062693") + data = None + + +class Program_weight_tensor_parameter_159: + name = "parameter_159" + shape = [144, 144, 1, 1] + dtype = "float32" + min_val = float("-0.18355") + max_val = float("0.101318") + mean = float("-0.000559139") + std = float("0.0130426") + data = None + + +class Program_weight_tensor_parameter_160: + name = "parameter_160" + shape = [72] + dtype = "float32" + min_val = float("-0.596492") + max_val = float("0.496629") + mean = float("0.00880207") + std = float("0.280008") + data = None + + +class Program_weight_tensor_parameter_161: + name = "parameter_161" + shape = [72] + dtype = "float32" + min_val = float("0.544805") + max_val = float("1.28262") + mean = float("0.828695") + std = float("0.105308") + data = None + + +class Program_weight_tensor_parameter_162: + name = "parameter_162" + shape = [72] + dtype = "float32" + min_val = float("0.00121121") + max_val = float("0.0154097") + mean = float("0.00657741") + std = float("0.00405399") + data = None + + +class Program_weight_tensor_parameter_163: + name = "parameter_163" + shape = [72] + dtype = "float32" + min_val = float("-0.0445254") + max_val = float("0.0643771") + mean = float("-0.00597889") + std = float("0.0159114") + data = None + + +class Program_weight_tensor_parameter_164: + name = "parameter_164" + shape = [72, 72, 1, 1] + dtype = "float32" + min_val = float("-0.079712") + max_val = float("0.0572723") + mean = float("-0.000917905") + std = float("0.00853453") + data = None + + +class Program_weight_tensor_parameter_165: + name = "parameter_165" + shape = [72] + dtype = "float32" + min_val = float("-0.596492") + max_val = float("0.496629") + mean = float("0.00880207") + std = float("0.280008") + data = None + + +class Program_weight_tensor_parameter_166: + name = "parameter_166" + shape = [72] + dtype = "float32" + min_val = float("0.692696") + max_val = float("1.58855") + mean = float("1.06707") + std = float("0.139152") + data = None + + +class Program_weight_tensor_parameter_167: + name = "parameter_167" + shape = [72] + dtype = "float32" + min_val = float("0.00325206") + max_val = float("0.0341689") + mean = float("0.0125694") + std = float("0.00721053") + data = None + + +class Program_weight_tensor_parameter_168: + name = "parameter_168" + shape = [72] + dtype = "float32" + min_val = float("-0.202431") + max_val = float("0.069641") + mean = float("-0.0138793") + std = float("0.0479398") + data = None + + +class Program_weight_tensor_parameter_169: + name = "parameter_169" + shape = [72, 72, 3, 3] + dtype = "float32" + min_val = float("-0.0886597") + max_val = float("0.0870701") + mean = float("-0.000129493") + std = float("0.00706853") + data = None + + +class Program_weight_tensor_parameter_170: + name = "parameter_170" + shape = [72] + dtype = "float32" + min_val = float("-0.788096") + max_val = float("0.657236") + mean = float("-0.317593") + std = float("0.294445") + data = None + + +class Program_weight_tensor_parameter_171: + name = "parameter_171" + shape = [72] + dtype = "float32" + min_val = float("0.319105") + max_val = float("2.17325") + mean = float("0.870637") + std = float("0.238971") + data = None + + +class Program_weight_tensor_parameter_172: + name = "parameter_172" + shape = [72] + dtype = "float32" + min_val = float("0.00358424") + max_val = float("0.0266004") + mean = float("0.0101194") + std = float("0.00504532") + data = None + + +class Program_weight_tensor_parameter_173: + name = "parameter_173" + shape = [72] + dtype = "float32" + min_val = float("-0.0783159") + max_val = float("0.0807353") + mean = float("0.0185967") + std = float("0.0324074") + data = None + + +class Program_weight_tensor_parameter_174: + name = "parameter_174" + shape = [72, 72, 3, 3] + dtype = "float32" + min_val = float("-0.114122") + max_val = float("0.0910359") + mean = float("-0.00051978") + std = float("0.00807761") + data = None + + +class Program_weight_tensor_parameter_175: + name = "parameter_175" + shape = [72] + dtype = "float32" + min_val = float("-0.529687") + max_val = float("0.199379") + mean = float("-0.259751") + std = float("0.171947") + data = None + + +class Program_weight_tensor_parameter_176: + name = "parameter_176" + shape = [72] + dtype = "float32" + min_val = float("0.599551") + max_val = float("0.970551") + mean = float("0.792683") + std = float("0.0733557") + data = None + + +class Program_weight_tensor_parameter_177: + name = "parameter_177" + shape = [72] + dtype = "float32" + min_val = float("0.00301817") + max_val = float("0.0147212") + mean = float("0.00748466") + std = float("0.00212384") + data = None + + +class Program_weight_tensor_parameter_178: + name = "parameter_178" + shape = [72] + dtype = "float32" + min_val = float("-0.0505147") + max_val = float("0.0345987") + mean = float("0.00396601") + std = float("0.0159843") + data = None + + +class Program_weight_tensor_parameter_179: + name = "parameter_179" + shape = [72, 72, 1, 1] + dtype = "float32" + min_val = float("-0.0719846") + max_val = float("0.0561072") + mean = float("-0.00174883") + std = float("0.010313") + data = None + + +class Program_weight_tensor_parameter_180: + name = "parameter_180" + shape = [72] + dtype = "float32" + min_val = float("-0.529687") + max_val = float("0.199379") + mean = float("-0.259751") + std = float("0.171947") + data = None + + +class Program_weight_tensor_parameter_181: + name = "parameter_181" + shape = [72] + dtype = "float32" + min_val = float("0.706439") + max_val = float("1.22884") + mean = float("0.981247") + std = float("0.110846") + data = None + + +class Program_weight_tensor_parameter_182: + name = "parameter_182" + shape = [72] + dtype = "float32" + min_val = float("0.00767675") + max_val = float("0.0447227") + mean = float("0.0168985") + std = float("0.0076087") + data = None + + +class Program_weight_tensor_parameter_183: + name = "parameter_183" + shape = [72] + dtype = "float32" + min_val = float("-0.130446") + max_val = float("0.0730463") + mean = float("0.00326599") + std = float("0.0351423") + data = None + + +class Program_weight_tensor_parameter_184: + name = "parameter_184" + shape = [72, 72, 3, 3] + dtype = "float32" + min_val = float("-0.0927827") + max_val = float("0.118671") + mean = float("-0.000436366") + std = float("0.00816753") + data = None + + +class Program_weight_tensor_parameter_185: + name = "parameter_185" + shape = [72] + dtype = "float32" + min_val = float("-0.978319") + max_val = float("0.910199") + mean = float("-0.373348") + std = float("0.382498") + data = None + + +class Program_weight_tensor_parameter_186: + name = "parameter_186" + shape = [72] + dtype = "float32" + min_val = float("0.598562") + max_val = float("1.22465") + mean = float("0.867317") + std = float("0.120439") + data = None + + +class Program_weight_tensor_parameter_187: + name = "parameter_187" + shape = [72] + dtype = "float32" + min_val = float("0.00336641") + max_val = float("0.0470094") + mean = float("0.00884736") + std = float("0.00728432") + data = None + + +class Program_weight_tensor_parameter_188: + name = "parameter_188" + shape = [72] + dtype = "float32" + min_val = float("-0.241371") + max_val = float("0.2411") + mean = float("-0.027551") + std = float("0.0896444") + data = None + + +class Program_weight_tensor_parameter_189: + name = "parameter_189" + shape = [72, 72, 3, 3] + dtype = "float32" + min_val = float("-0.0886619") + max_val = float("0.0880968") + mean = float("-0.00027759") + std = float("0.0091381") + data = None + + +class Program_weight_tensor_parameter_190: + name = "parameter_190" + shape = [72] + dtype = "float32" + min_val = float("-1.03643") + max_val = float("0.87465") + mean = float("-0.149865") + std = float("0.508874") + data = None + + +class Program_weight_tensor_parameter_191: + name = "parameter_191" + shape = [72] + dtype = "float32" + min_val = float("0.313694") + max_val = float("1.10515") + mean = float("0.655033") + std = float("0.174056") + data = None + + +class Program_weight_tensor_parameter_192: + name = "parameter_192" + shape = [72] + dtype = "float32" + min_val = float("0.0053502") + max_val = float("0.0455901") + mean = float("0.0132061") + std = float("0.00705897") + data = None + + +class Program_weight_tensor_parameter_193: + name = "parameter_193" + shape = [72] + dtype = "float32" + min_val = float("-0.14339") + max_val = float("0.123691") + mean = float("-0.0055304") + std = float("0.0504267") + data = None + + +class Program_weight_tensor_parameter_194: + name = "parameter_194" + shape = [72, 336, 1, 1] + dtype = "float32" + min_val = float("-0.14681") + max_val = float("0.127726") + mean = float("-0.000474756") + std = float("0.0119385") + data = None + + +class Program_weight_tensor_parameter_195: + name = "parameter_195" + shape = [72] + dtype = "float32" + min_val = float("-0.120348") + max_val = float("0.412377") + mean = float("0.165721") + std = float("0.110684") + data = None + + +class Program_weight_tensor_parameter_196: + name = "parameter_196" + shape = [72] + dtype = "float32" + min_val = float("0.659788") + max_val = float("1.34775") + mean = float("0.850032") + std = float("0.109302") + data = None + + +class Program_weight_tensor_parameter_197: + name = "parameter_197" + shape = [72] + dtype = "float32" + min_val = float("0.00159057") + max_val = float("0.0317446") + mean = float("0.00599932") + std = float("0.00340858") + data = None + + +class Program_weight_tensor_parameter_198: + name = "parameter_198" + shape = [72] + dtype = "float32" + min_val = float("-0.0880301") + max_val = float("0.074714") + mean = float("-0.00829577") + std = float("0.030976") + data = None + + +class Program_weight_tensor_parameter_199: + name = "parameter_199" + shape = [72, 336, 1, 1] + dtype = "float32" + min_val = float("-0.108096") + max_val = float("0.107109") + mean = float("7.87305e-05") + std = float("0.00828471") + data = None + + +class Program_weight_tensor_parameter_200: + name = "parameter_200" + shape = [144] + dtype = "float32" + min_val = float("-0.476764") + max_val = float("0.168049") + mean = float("-0.0838514") + std = float("0.124575") + data = None + + +class Program_weight_tensor_parameter_201: + name = "parameter_201" + shape = [144] + dtype = "float32" + min_val = float("0.617689") + max_val = float("1.54284") + mean = float("0.785115") + std = float("0.113178") + data = None + + +class Program_weight_tensor_parameter_202: + name = "parameter_202" + shape = [144] + dtype = "float32" + min_val = float("0.00602933") + max_val = float("0.0596091") + mean = float("0.0124482") + std = float("0.00635021") + data = None + + +class Program_weight_tensor_parameter_203: + name = "parameter_203" + shape = [144] + dtype = "float32" + min_val = float("-0.109409") + max_val = float("0.0390456") + mean = float("-0.0261988") + std = float("0.0242817") + data = None + + +class Program_weight_tensor_parameter_204: + name = "parameter_204" + shape = [144, 288, 1, 1] + dtype = "float32" + min_val = float("-0.0819936") + max_val = float("0.0914399") + mean = float("-0.000602675") + std = float("0.00882576") + data = None + + +class Program_weight_tensor_parameter_205: + name = "parameter_205" + shape = [288] + dtype = "float32" + min_val = float("-0.43398") + max_val = float("0.195139") + mean = float("-0.113323") + std = float("0.0837459") + data = None + + +class Program_weight_tensor_parameter_206: + name = "parameter_206" + shape = [288] + dtype = "float32" + min_val = float("0.801149") + max_val = float("1.51552") + mean = float("1.01244") + std = float("0.0942505") + data = None + + +class Program_weight_tensor_parameter_207: + name = "parameter_207" + shape = [288] + dtype = "float32" + min_val = float("0.00697899") + max_val = float("0.0426641") + mean = float("0.0125514") + std = float("0.00524946") + data = None + + +class Program_weight_tensor_parameter_208: + name = "parameter_208" + shape = [288] + dtype = "float32" + min_val = float("-0.199677") + max_val = float("0.152914") + mean = float("-0.0401824") + std = float("0.042326") + data = None + + +class Program_weight_tensor_parameter_209: + name = "parameter_209" + shape = [288, 288, 1, 1] + dtype = "float32" + min_val = float("-0.105612") + max_val = float("0.118139") + mean = float("-0.000716065") + std = float("0.00840771") + data = None + + +class Program_weight_tensor_parameter_210: + name = "parameter_210" + shape = [144] + dtype = "float32" + min_val = float("-0.413817") + max_val = float("0.0254389") + mean = float("-0.106277") + std = float("0.066947") + data = None + + +class Program_weight_tensor_parameter_211: + name = "parameter_211" + shape = [144] + dtype = "float32" + min_val = float("0.691501") + max_val = float("0.955612") + mean = float("0.877305") + std = float("0.0375328") + data = None + + +class Program_weight_tensor_parameter_212: + name = "parameter_212" + shape = [144] + dtype = "float32" + min_val = float("0.00387808") + max_val = float("0.028644") + mean = float("0.00817049") + std = float("0.00276292") + data = None + + +class Program_weight_tensor_parameter_213: + name = "parameter_213" + shape = [144] + dtype = "float32" + min_val = float("-0.0443968") + max_val = float("0.042331") + mean = float("-0.0131527") + std = float("0.0192302") + data = None + + +class Program_weight_tensor_parameter_214: + name = "parameter_214" + shape = [144, 144, 1, 1] + dtype = "float32" + min_val = float("-0.0418187") + max_val = float("0.0336758") + mean = float("-0.000853057") + std = float("0.00590709") + data = None + + +class Program_weight_tensor_parameter_215: + name = "parameter_215" + shape = [144] + dtype = "float32" + min_val = float("-0.413817") + max_val = float("0.0254389") + mean = float("-0.106277") + std = float("0.066947") + data = None + + +class Program_weight_tensor_parameter_216: + name = "parameter_216" + shape = [144] + dtype = "float32" + min_val = float("0.871658") + max_val = float("1.14399") + mean = float("0.991674") + std = float("0.0442735") + data = None + + +class Program_weight_tensor_parameter_217: + name = "parameter_217" + shape = [144] + dtype = "float32" + min_val = float("0.00847572") + max_val = float("0.037261") + mean = float("0.0170862") + std = float("0.00540559") + data = None + + +class Program_weight_tensor_parameter_218: + name = "parameter_218" + shape = [144] + dtype = "float32" + min_val = float("-0.0858501") + max_val = float("0.0752316") + mean = float("-0.0215803") + std = float("0.0267547") + data = None + + +class Program_weight_tensor_parameter_219: + name = "parameter_219" + shape = [144, 144, 3, 3] + dtype = "float32" + min_val = float("-0.0708304") + max_val = float("0.126419") + mean = float("-0.000129981") + std = float("0.00418282") + data = None + + +class Program_weight_tensor_parameter_220: + name = "parameter_220" + shape = [144] + dtype = "float32" + min_val = float("-0.503198") + max_val = float("-0.00723544") + mean = float("-0.215277") + std = float("0.107333") + data = None + + +class Program_weight_tensor_parameter_221: + name = "parameter_221" + shape = [144] + dtype = "float32" + min_val = float("0.837897") + max_val = float("1.40392") + mean = float("1.05848") + std = float("0.0870235") + data = None + + +class Program_weight_tensor_parameter_222: + name = "parameter_222" + shape = [144] + dtype = "float32" + min_val = float("0.0204089") + max_val = float("0.114905") + mean = float("0.0339913") + std = float("0.0111585") + data = None + + +class Program_weight_tensor_parameter_223: + name = "parameter_223" + shape = [144] + dtype = "float32" + min_val = float("-0.115567") + max_val = float("0.074236") + mean = float("-0.0297392") + std = float("0.0292001") + data = None + + +class Program_weight_tensor_parameter_224: + name = "parameter_224" + shape = [144, 144, 3, 3] + dtype = "float32" + min_val = float("-0.0638722") + max_val = float("0.094548") + mean = float("-0.00028317") + std = float("0.00489057") + data = None + + +class Program_weight_tensor_parameter_225: + name = "parameter_225" + shape = [144] + dtype = "float32" + min_val = float("-0.443227") + max_val = float("0.0300293") + mean = float("-0.20916") + std = float("0.0836857") + data = None + + +class Program_weight_tensor_parameter_226: + name = "parameter_226" + shape = [144] + dtype = "float32" + min_val = float("0.796667") + max_val = float("1.16669") + mean = float("0.944169") + std = float("0.0541301") + data = None + + +class Program_weight_tensor_parameter_227: + name = "parameter_227" + shape = [144] + dtype = "float32" + min_val = float("0.0023532") + max_val = float("0.0139004") + mean = float("0.00492937") + std = float("0.00150275") + data = None + + +class Program_weight_tensor_parameter_228: + name = "parameter_228" + shape = [144] + dtype = "float32" + min_val = float("-0.0396228") + max_val = float("0.0357555") + mean = float("-0.00883661") + std = float("0.0111889") + data = None + + +class Program_weight_tensor_parameter_229: + name = "parameter_229" + shape = [144, 144, 1, 1] + dtype = "float32" + min_val = float("-0.0446649") + max_val = float("0.0619167") + mean = float("-0.000724838") + std = float("0.00745171") + data = None + + +class Program_weight_tensor_parameter_230: + name = "parameter_230" + shape = [144] + dtype = "float32" + min_val = float("-0.443227") + max_val = float("0.0300293") + mean = float("-0.20916") + std = float("0.0836857") + data = None + + +class Program_weight_tensor_parameter_231: + name = "parameter_231" + shape = [144] + dtype = "float32" + min_val = float("0.855523") + max_val = float("1.20493") + mean = float("1.00232") + std = float("0.0650819") + data = None + + +class Program_weight_tensor_parameter_232: + name = "parameter_232" + shape = [144] + dtype = "float32" + min_val = float("0.00909786") + max_val = float("0.0362024") + mean = float("0.0145287") + std = float("0.00368455") + data = None + + +class Program_weight_tensor_parameter_233: + name = "parameter_233" + shape = [144] + dtype = "float32" + min_val = float("-0.0654417") + max_val = float("0.0471346") + mean = float("-0.0150238") + std = float("0.0228131") + data = None + + +class Program_weight_tensor_parameter_234: + name = "parameter_234" + shape = [144, 144, 3, 3] + dtype = "float32" + min_val = float("-0.0569246") + max_val = float("0.0681235") + mean = float("-0.000190838") + std = float("0.00429518") + data = None + + +class Program_weight_tensor_parameter_235: + name = "parameter_235" + shape = [144] + dtype = "float32" + min_val = float("-0.619341") + max_val = float("-0.0112904") + mean = float("-0.271335") + std = float("0.107403") + data = None + + +class Program_weight_tensor_parameter_236: + name = "parameter_236" + shape = [144] + dtype = "float32" + min_val = float("0.887194") + max_val = float("1.60529") + mean = float("1.03008") + std = float("0.0832458") + data = None + + +class Program_weight_tensor_parameter_237: + name = "parameter_237" + shape = [144] + dtype = "float32" + min_val = float("0.0111345") + max_val = float("0.0421569") + mean = float("0.0185696") + std = float("0.00509881") + data = None + + +class Program_weight_tensor_parameter_238: + name = "parameter_238" + shape = [144] + dtype = "float32" + min_val = float("-0.196773") + max_val = float("0.0810831") + mean = float("-0.0379885") + std = float("0.0331759") + data = None + + +class Program_weight_tensor_parameter_239: + name = "parameter_239" + shape = [144, 144, 3, 3] + dtype = "float32" + min_val = float("-0.0581918") + max_val = float("0.0721246") + mean = float("-0.000276431") + std = float("0.00532616") + data = None + + +class Program_weight_tensor_parameter_240: + name = "parameter_240" + shape = [144] + dtype = "float32" + min_val = float("-0.67915") + max_val = float("0.300043") + mean = float("-0.249268") + std = float("0.140088") + data = None + + +class Program_weight_tensor_parameter_241: + name = "parameter_241" + shape = [144] + dtype = "float32" + min_val = float("0.820156") + max_val = float("1.25926") + mean = float("1.02103") + std = float("0.0835064") + data = None + + +class Program_weight_tensor_parameter_242: + name = "parameter_242" + shape = [144] + dtype = "float32" + min_val = float("0.00487426") + max_val = float("0.0195417") + mean = float("0.00798593") + std = float("0.00214001") + data = None + + +class Program_weight_tensor_parameter_243: + name = "parameter_243" + shape = [144] + dtype = "float32" + min_val = float("-0.0685193") + max_val = float("0.111677") + mean = float("0.012203") + std = float("0.0263654") + data = None + + +class Program_weight_tensor_parameter_244: + name = "parameter_244" + shape = [144, 672, 1, 1] + dtype = "float32" + min_val = float("-0.0518257") + max_val = float("0.0801678") + mean = float("-0.000299396") + std = float("0.00706162") + data = None + + +class Program_weight_tensor_parameter_245: + name = "parameter_245" + shape = [144] + dtype = "float32" + min_val = float("-0.219529") + max_val = float("0.482816") + mean = float("0.00625415") + std = float("0.0996225") + data = None + + +class Program_weight_tensor_parameter_246: + name = "parameter_246" + shape = [144] + dtype = "float32" + min_val = float("0.943085") + max_val = float("1.31189") + mean = float("1.06741") + std = float("0.0759901") + data = None + + +class Program_weight_tensor_parameter_247: + name = "parameter_247" + shape = [144] + dtype = "float32" + min_val = float("0.00430579") + max_val = float("0.0498874") + mean = float("0.00849452") + std = float("0.00419529") + data = None + + +class Program_weight_tensor_parameter_248: + name = "parameter_248" + shape = [144] + dtype = "float32" + min_val = float("-0.0734484") + max_val = float("0.0501055") + mean = float("-0.00422856") + std = float("0.0238347") + data = None + + +class Program_weight_tensor_parameter_249: + name = "parameter_249" + shape = [144, 672, 1, 1] + dtype = "float32" + min_val = float("-0.283127") + max_val = float("0.126053") + mean = float("-0.000223953") + std = float("0.00673773") + data = None + + +class Program_weight_tensor_parameter_250: + name = "parameter_250" + shape = [288] + dtype = "float32" + min_val = float("-0.471007") + max_val = float("-0.0768897") + mean = float("-0.24229") + std = float("0.058604") + data = None + + +class Program_weight_tensor_parameter_251: + name = "parameter_251" + shape = [288] + dtype = "float32" + min_val = float("0.693872") + max_val = float("1.04898") + mean = float("0.819337") + std = float("0.0537956") + data = None + + +class Program_weight_tensor_parameter_252: + name = "parameter_252" + shape = [288] + dtype = "float32" + min_val = float("0.00693365") + max_val = float("0.0444148") + mean = float("0.0112387") + std = float("0.0036165") + data = None + + +class Program_weight_tensor_parameter_253: + name = "parameter_253" + shape = [288] + dtype = "float32" + min_val = float("-0.0895123") + max_val = float("0.0744079") + mean = float("-0.0247192") + std = float("0.0210024") + data = None + + +class Program_weight_tensor_parameter_254: + name = "parameter_254" + shape = [288, 576, 1, 1] + dtype = "float32" + min_val = float("-0.0678711") + max_val = float("0.0563575") + mean = float("-0.000356836") + std = float("0.00561004") + data = None + + +class Program_weight_tensor_parameter_255: + name = "parameter_255" + shape = [576] + dtype = "float32" + min_val = float("-0.22392") + max_val = float("0.236737") + mean = float("-0.12655") + std = float("0.0408273") + data = None + + +class Program_weight_tensor_parameter_256: + name = "parameter_256" + shape = [576] + dtype = "float32" + min_val = float("0.899743") + max_val = float("1.38442") + mean = float("1.04482") + std = float("0.0445486") + data = None + + +class Program_weight_tensor_parameter_257: + name = "parameter_257" + shape = [576] + dtype = "float32" + min_val = float("0.00534322") + max_val = float("0.0242086") + mean = float("0.00935106") + std = float("0.00235956") + data = None + + +class Program_weight_tensor_parameter_258: + name = "parameter_258" + shape = [576] + dtype = "float32" + min_val = float("-0.11956") + max_val = float("0.0966195") + mean = float("-0.0385181") + std = float("0.0241664") + data = None + + +class Program_weight_tensor_parameter_259: + name = "parameter_259" + shape = [576, 576, 1, 1] + dtype = "float32" + min_val = float("-0.0794723") + max_val = float("0.109769") + mean = float("-0.00038882") + std = float("0.00481347") + data = None + + +class Program_weight_tensor_parameter_260: + name = "parameter_260" + shape = [288] + dtype = "float32" + min_val = float("-0.23913") + max_val = float("0.268967") + mean = float("-0.0788435") + std = float("0.0530956") + data = None + + +class Program_weight_tensor_parameter_261: + name = "parameter_261" + shape = [288] + dtype = "float32" + min_val = float("0.782606") + max_val = float("1.06451") + mean = float("0.949247") + std = float("0.0301924") + data = None + + +class Program_weight_tensor_parameter_262: + name = "parameter_262" + shape = [288] + dtype = "float32" + min_val = float("0.00297758") + max_val = float("0.0373329") + mean = float("0.00987717") + std = float("0.0043303") + data = None + + +class Program_weight_tensor_parameter_263: + name = "parameter_263" + shape = [288] + dtype = "float32" + min_val = float("-0.0628018") + max_val = float("0.0495571") + mean = float("-0.00912493") + std = float("0.0163036") + data = None + + +class Program_weight_tensor_parameter_264: + name = "parameter_264" + shape = [288, 288, 1, 1] + dtype = "float32" + min_val = float("-0.0359925") + max_val = float("0.0349088") + mean = float("-0.000152439") + std = float("0.00410565") + data = None + + +class Program_weight_tensor_parameter_265: + name = "parameter_265" + shape = [288] + dtype = "float32" + min_val = float("-0.23913") + max_val = float("0.268967") + mean = float("-0.0788435") + std = float("0.0530956") + data = None + + +class Program_weight_tensor_parameter_266: + name = "parameter_266" + shape = [288] + dtype = "float32" + min_val = float("0.873184") + max_val = float("1.24138") + mean = float("1.00729") + std = float("0.0416184") + data = None + + +class Program_weight_tensor_parameter_267: + name = "parameter_267" + shape = [288] + dtype = "float32" + min_val = float("0.0144312") + max_val = float("0.267786") + mean = float("0.0465309") + std = float("0.0213648") + data = None + + +class Program_weight_tensor_parameter_268: + name = "parameter_268" + shape = [288] + dtype = "float32" + min_val = float("-0.278993") + max_val = float("0.0513671") + mean = float("-0.0773057") + std = float("0.0501911") + data = None + + +class Program_weight_tensor_parameter_269: + name = "parameter_269" + shape = [288, 288, 3, 3] + dtype = "float32" + min_val = float("-0.0445249") + max_val = float("0.067184") + mean = float("-0.000174577") + std = float("0.00185068") + data = None + + +class Program_weight_tensor_parameter_270: + name = "parameter_270" + shape = [288] + dtype = "float32" + min_val = float("-0.154725") + max_val = float("0.176405") + mean = float("-0.0451395") + std = float("0.0405051") + data = None + + +class Program_weight_tensor_parameter_271: + name = "parameter_271" + shape = [288] + dtype = "float32" + min_val = float("0.906089") + max_val = float("1.22107") + mean = float("1.03843") + std = float("0.0584222") + data = None + + +class Program_weight_tensor_parameter_272: + name = "parameter_272" + shape = [288] + dtype = "float32" + min_val = float("0.012856") + max_val = float("0.161846") + mean = float("0.0405644") + std = float("0.0172627") + data = None + + +class Program_weight_tensor_parameter_273: + name = "parameter_273" + shape = [288] + dtype = "float32" + min_val = float("-0.188737") + max_val = float("0.116279") + mean = float("-0.0533434") + std = float("0.0531372") + data = None + + +class Program_weight_tensor_parameter_274: + name = "parameter_274" + shape = [288, 288, 3, 3] + dtype = "float32" + min_val = float("-0.0349189") + max_val = float("0.0583932") + mean = float("-0.000119797") + std = float("0.00228585") + data = None + + +class Program_weight_tensor_parameter_275: + name = "parameter_275" + shape = [288] + dtype = "float32" + min_val = float("-0.22913") + max_val = float("0.0550435") + mean = float("-0.0679392") + std = float("0.0398991") + data = None + + +class Program_weight_tensor_parameter_276: + name = "parameter_276" + shape = [288] + dtype = "float32" + min_val = float("0.901237") + max_val = float("1.30891") + mean = float("1.04301") + std = float("0.0700643") + data = None + + +class Program_weight_tensor_parameter_277: + name = "parameter_277" + shape = [288] + dtype = "float32" + min_val = float("0.0398981") + max_val = float("0.306777") + mean = float("0.105215") + std = float("0.0367725") + data = None + + +class Program_weight_tensor_parameter_278: + name = "parameter_278" + shape = [288] + dtype = "float32" + min_val = float("-1.6438") + max_val = float("1.76347") + mean = float("-0.0742338") + std = float("0.501869") + data = None + + +class Program_weight_tensor_parameter_279: + name = "parameter_279" + shape = [288, 1152, 1, 1] + dtype = "float32" + min_val = float("-0.0739741") + max_val = float("0.0690574") + mean = float("4.74459e-05") + std = float("0.00375253") + data = None + + +class Program_weight_tensor_parameter_280: + name = "parameter_280" + shape = [288] + dtype = "float32" + min_val = float("-0.104232") + max_val = float("0.0384892") + mean = float("-0.0070986") + std = float("0.0181476") + data = None + + +class Program_weight_tensor_parameter_281: + name = "parameter_281" + shape = [288] + dtype = "float32" + min_val = float("0.900639") + max_val = float("1.15952") + mean = float("0.963662") + std = float("0.0263273") + data = None + + +class Program_weight_tensor_parameter_282: + name = "parameter_282" + shape = [288] + dtype = "float32" + min_val = float("0.00287171") + max_val = float("0.0106048") + mean = float("0.0049299") + std = float("0.00108551") + data = None + + +class Program_weight_tensor_parameter_283: + name = "parameter_283" + shape = [288] + dtype = "float32" + min_val = float("-0.0988844") + max_val = float("0.0789783") + mean = float("-0.0485279") + std = float("0.0238655") + data = None + + +class Program_weight_tensor_parameter_284: + name = "parameter_284" + shape = [288, 288, 1, 1] + dtype = "float32" + min_val = float("-0.0304843") + max_val = float("0.0399859") + mean = float("-0.000813718") + std = float("0.00406643") + data = None + + +class Program_weight_tensor_parameter_285: + name = "parameter_285" + shape = [288] + dtype = "float32" + min_val = float("-0.104232") + max_val = float("0.0384892") + mean = float("-0.0070986") + std = float("0.0181476") + data = None + + +class Program_weight_tensor_parameter_286: + name = "parameter_286" + shape = [288] + dtype = "float32" + min_val = float("0.925735") + max_val = float("1.3113") + mean = float("1.02745") + std = float("0.0619514") + data = None + + +class Program_weight_tensor_parameter_287: + name = "parameter_287" + shape = [288] + dtype = "float32" + min_val = float("0.00770713") + max_val = float("0.0389126") + mean = float("0.0204063") + std = float("0.00536096") + data = None + + +class Program_weight_tensor_parameter_288: + name = "parameter_288" + shape = [288] + dtype = "float32" + min_val = float("-0.234289") + max_val = float("0.10658") + mean = float("-0.109382") + std = float("0.0469286") + data = None + + +class Program_weight_tensor_parameter_289: + name = "parameter_289" + shape = [288, 288, 3, 3] + dtype = "float32" + min_val = float("-0.0337794") + max_val = float("0.0422064") + mean = float("-0.000230922") + std = float("0.00195527") + data = None + + +class Program_weight_tensor_parameter_290: + name = "parameter_290" + shape = [288] + dtype = "float32" + min_val = float("-0.223666") + max_val = float("0.0286354") + mean = float("-0.0472094") + std = float("0.0307449") + data = None + + +class Program_weight_tensor_parameter_291: + name = "parameter_291" + shape = [288] + dtype = "float32" + min_val = float("0.929918") + max_val = float("1.3633") + mean = float("1.05315") + std = float("0.0557061") + data = None + + +class Program_weight_tensor_parameter_292: + name = "parameter_292" + shape = [288] + dtype = "float32" + min_val = float("0.011915") + max_val = float("0.0576644") + mean = float("0.0215698") + std = float("0.00513117") + data = None + + +class Program_weight_tensor_parameter_293: + name = "parameter_293" + shape = [288] + dtype = "float32" + min_val = float("-0.424449") + max_val = float("0.4419") + mean = float("-0.106397") + std = float("0.0754344") + data = None + + +class Program_weight_tensor_parameter_294: + name = "parameter_294" + shape = [288, 288, 3, 3] + dtype = "float32" + min_val = float("-0.028501") + max_val = float("0.0366931") + mean = float("-0.000232809") + std = float("0.00238709") + data = None + + +class Program_weight_tensor_parameter_295: + name = "parameter_295" + shape = [288] + dtype = "float32" + min_val = float("-0.21661") + max_val = float("0.119111") + mean = float("-0.0631797") + std = float("0.0519387") + data = None + + +class Program_weight_tensor_parameter_296: + name = "parameter_296" + shape = [288] + dtype = "float32" + min_val = float("0.987879") + max_val = float("1.24617") + mean = float("1.05608") + std = float("0.0335288") + data = None + + +class Program_weight_tensor_parameter_297: + name = "parameter_297" + shape = [288] + dtype = "float32" + min_val = float("0.0167741") + max_val = float("0.063874") + mean = float("0.0243677") + std = float("0.00554264") + data = None + + +class Program_weight_tensor_parameter_298: + name = "parameter_298" + shape = [288] + dtype = "float32" + min_val = float("-0.171478") + max_val = float("0.180474") + mean = float("-0.0617349") + std = float("0.0437105") + data = None + + +class Program_weight_tensor_parameter_299: + name = "parameter_299" + shape = [288, 768, 1, 1] + dtype = "float32" + min_val = float("-0.0315675") + max_val = float("0.0606433") + mean = float("-0.000353396") + std = float("0.00416745") + data = None + + +class Program_weight_tensor_parameter_300: + name = "parameter_300" + shape = [288] + dtype = "float32" + min_val = float("-0.0812952") + max_val = float("0.0354981") + mean = float("-0.00934253") + std = float("0.0173255") + data = None + + +class Program_weight_tensor_parameter_301: + name = "parameter_301" + shape = [288] + dtype = "float32" + min_val = float("1.01432") + max_val = float("1.1416") + mean = float("1.08017") + std = float("0.0240114") + data = None + + +class Program_weight_tensor_parameter_302: + name = "parameter_302" + shape = [288] + dtype = "float32" + min_val = float("0.0168412") + max_val = float("0.0350895") + mean = float("0.0225807") + std = float("0.00281515") + data = None + + +class Program_weight_tensor_parameter_303: + name = "parameter_303" + shape = [288] + dtype = "float32" + min_val = float("-0.136888") + max_val = float("0.0259121") + mean = float("-0.0590415") + std = float("0.025695") + data = None + + +class Program_weight_tensor_parameter_304: + name = "parameter_304" + shape = [288, 768, 1, 1] + dtype = "float32" + min_val = float("-0.0220127") + max_val = float("0.0395105") + mean = float("-0.000346125") + std = float("0.00412748") + data = None + + +class Program_weight_tensor_parameter_305: + name = "parameter_305" + shape = [768] + dtype = "float32" + min_val = float("-4.17201") + max_val = float("-0.103231") + mean = float("-2.23559") + std = float("0.546417") + data = None + + +class Program_weight_tensor_parameter_306: + name = "parameter_306" + shape = [768] + dtype = "float32" + min_val = float("1.67639") + max_val = float("4.7074") + mean = float("3.3138") + std = float("0.317348") + data = None + + +class Program_weight_tensor_parameter_307: + name = "parameter_307" + shape = [768] + dtype = "float32" + min_val = float("0.0031352") + max_val = float("0.0144649") + mean = float("0.00530109") + std = float("0.00129942") + data = None + + +class Program_weight_tensor_parameter_308: + name = "parameter_308" + shape = [768] + dtype = "float32" + min_val = float("-0.102364") + max_val = float("0.140331") + mean = float("-0.0380092") + std = float("0.0222492") + data = None + + +class Program_weight_tensor_parameter_309: + name = "parameter_309" + shape = [768, 576, 1, 1] + dtype = "float32" + min_val = float("-0.0579352") + max_val = float("0.0710566") + mean = float("-0.00047174") + std = float("0.00454072") + data = None + + +class Program_weight_tensor_parameter_310: + name = "parameter_310" + shape = [576] + dtype = "float32" + min_val = float("-0.0194227") + max_val = float("0.00104465") + mean = float("-0.00108579") + std = float("0.00283685") + data = None + + +class Program_weight_tensor_parameter_311: + name = "parameter_311" + shape = [576, 576, 1, 1] + dtype = "float32" + min_val = float("-0.181942") + max_val = float("0.183689") + mean = float("-0.000333769") + std = float("0.00240042") + data = None + + +class Program_weight_tensor_parameter_312: + name = "parameter_312" + shape = [288] + dtype = "float32" + min_val = float("-2.05493") + max_val = float("0.978") + mean = float("-0.272319") + std = float("0.353617") + data = None + + +class Program_weight_tensor_parameter_313: + name = "parameter_313" + shape = [288] + dtype = "float32" + min_val = float("0.135081") + max_val = float("2.13114") + mean = float("0.537166") + std = float("0.306899") + data = None + + +class Program_weight_tensor_parameter_314: + name = "parameter_314" + shape = [288] + dtype = "float32" + min_val = float("7.69798e-05") + max_val = float("0.00216471") + mean = float("0.000305136") + std = float("0.000236517") + data = None + + +class Program_weight_tensor_parameter_315: + name = "parameter_315" + shape = [288] + dtype = "float32" + min_val = float("-0.0302303") + max_val = float("0.0570347") + mean = float("0.0184598") + std = float("0.0154999") + data = None + + +class Program_weight_tensor_parameter_316: + name = "parameter_316" + shape = [288, 288, 1, 1] + dtype = "float32" + min_val = float("-0.0362218") + max_val = float("0.039556") + mean = float("-0.000416452") + std = float("0.00309243") + data = None + + +class Program_weight_tensor_parameter_317: + name = "parameter_317" + shape = [288] + dtype = "float32" + min_val = float("-2.05493") + max_val = float("0.978") + mean = float("-0.272319") + std = float("0.353617") + data = None + + +class Program_weight_tensor_parameter_318: + name = "parameter_318" + shape = [288] + dtype = "float32" + min_val = float("0.485427") + max_val = float("2.77924") + mean = float("1.1636") + std = float("0.366821") + data = None + + +class Program_weight_tensor_parameter_319: + name = "parameter_319" + shape = [288] + dtype = "float32" + min_val = float("0.000702807") + max_val = float("0.00951581") + mean = float("0.00190549") + std = float("0.00103207") + data = None + + +class Program_weight_tensor_parameter_320: + name = "parameter_320" + shape = [288] + dtype = "float32" + min_val = float("-0.222082") + max_val = float("0.100345") + mean = float("0.0213653") + std = float("0.0261282") + data = None + + +class Program_weight_tensor_parameter_321: + name = "parameter_321" + shape = [288, 288, 3, 3] + dtype = "float32" + min_val = float("-0.0292711") + max_val = float("0.035843") + mean = float("-7.17498e-05") + std = float("0.00239497") + data = None + + +class Program_weight_tensor_parameter_322: + name = "parameter_322" + shape = [288] + dtype = "float32" + min_val = float("-3.04804") + max_val = float("0.902721") + mean = float("-1.62463") + std = float("0.522032") + data = None + + +class Program_weight_tensor_parameter_323: + name = "parameter_323" + shape = [288] + dtype = "float32" + min_val = float("0.412288") + max_val = float("1.85924") + mean = float("1.14297") + std = float("0.183586") + data = None + + +class Program_weight_tensor_parameter_324: + name = "parameter_324" + shape = [288] + dtype = "float32" + min_val = float("0.0227719") + max_val = float("0.185075") + mean = float("0.0456612") + std = float("0.0139034") + data = None + + +class Program_weight_tensor_parameter_325: + name = "parameter_325" + shape = [288] + dtype = "float32" + min_val = float("-1.05487") + max_val = float("0.350092") + mean = float("-0.153281") + std = float("0.112535") + data = None + + +class Program_weight_tensor_parameter_326: + name = "parameter_326" + shape = [288, 288, 3, 3] + dtype = "float32" + min_val = float("-0.0310577") + max_val = float("0.0584148") + mean = float("-0.00021026") + std = float("0.00299679") + data = None + + +class Program_weight_tensor_parameter_327: + name = "parameter_327" + shape = [288] + dtype = "float32" + min_val = float("-2.14217") + max_val = float("1.60455") + mean = float("-0.41596") + std = float("0.434273") + data = None + + +class Program_weight_tensor_parameter_328: + name = "parameter_328" + shape = [288] + dtype = "float32" + min_val = float("0.0226806") + max_val = float("2.20193") + mean = float("0.427501") + std = float("0.286031") + data = None + + +class Program_weight_tensor_parameter_329: + name = "parameter_329" + shape = [288] + dtype = "float32" + min_val = float("1.90422e-05") + max_val = float("0.00251891") + mean = float("0.000473033") + std = float("0.00032871") + data = None + + +class Program_weight_tensor_parameter_330: + name = "parameter_330" + shape = [288] + dtype = "float32" + min_val = float("-0.0304152") + max_val = float("0.0734532") + mean = float("0.0241604") + std = float("0.0155045") + data = None + + +class Program_weight_tensor_parameter_331: + name = "parameter_331" + shape = [288, 288, 1, 1] + dtype = "float32" + min_val = float("-0.0253061") + max_val = float("0.0316181") + mean = float("-0.000587413") + std = float("0.00266057") + data = None + + +class Program_weight_tensor_parameter_332: + name = "parameter_332" + shape = [288] + dtype = "float32" + min_val = float("-2.14217") + max_val = float("1.60455") + mean = float("-0.41596") + std = float("0.434273") + data = None + + +class Program_weight_tensor_parameter_333: + name = "parameter_333" + shape = [288] + dtype = "float32" + min_val = float("0.469583") + max_val = float("2.46723") + mean = float("1.14325") + std = float("0.313639") + data = None + + +class Program_weight_tensor_parameter_334: + name = "parameter_334" + shape = [288] + dtype = "float32" + min_val = float("0.00155497") + max_val = float("0.0075944") + mean = float("0.00342237") + std = float("0.00109591") + data = None + + +class Program_weight_tensor_parameter_335: + name = "parameter_335" + shape = [288] + dtype = "float32" + min_val = float("-0.166374") + max_val = float("0.142145") + mean = float("0.0385124") + std = float("0.0284128") + data = None + + +class Program_weight_tensor_parameter_336: + name = "parameter_336" + shape = [288, 288, 3, 3] + dtype = "float32" + min_val = float("-0.0300473") + max_val = float("0.0566634") + mean = float("-0.000115715") + std = float("0.00253983") + data = None + + +class Program_weight_tensor_parameter_337: + name = "parameter_337" + shape = [288] + dtype = "float32" + min_val = float("-2.64172") + max_val = float("0.380203") + mean = float("-1.38767") + std = float("0.385623") + data = None + + +class Program_weight_tensor_parameter_338: + name = "parameter_338" + shape = [288] + dtype = "float32" + min_val = float("0.558043") + max_val = float("1.71262") + mean = float("1.12622") + std = float("0.13931") + data = None + + +class Program_weight_tensor_parameter_339: + name = "parameter_339" + shape = [288] + dtype = "float32" + min_val = float("0.0148874") + max_val = float("0.0607992") + mean = float("0.0260852") + std = float("0.00787152") + data = None + + +class Program_weight_tensor_parameter_340: + name = "parameter_340" + shape = [288] + dtype = "float32" + min_val = float("-0.824043") + max_val = float("0.12343") + mean = float("-0.10388") + std = float("0.0750312") + data = None + + +class Program_weight_tensor_parameter_341: + name = "parameter_341" + shape = [288, 288, 3, 3] + dtype = "float32" + min_val = float("-0.0416163") + max_val = float("0.0562858") + mean = float("-0.000193988") + std = float("0.00280354") + data = None + + +class Program_weight_tensor_parameter_342: + name = "parameter_342" + shape = [288] + dtype = "float32" + min_val = float("-3.62765") + max_val = float("2.97036") + mean = float("-0.711269") + std = float("0.761329") + data = None + + +class Program_weight_tensor_parameter_343: + name = "parameter_343" + shape = [288] + dtype = "float32" + min_val = float("0.890678") + max_val = float("2.92256") + mean = float("1.6374") + std = float("0.311721") + data = None + + +class Program_weight_tensor_parameter_344: + name = "parameter_344" + shape = [288] + dtype = "float32" + min_val = float("0.00239225") + max_val = float("0.00831343") + mean = float("0.00407401") + std = float("0.000948454") + data = None + + +class Program_weight_tensor_parameter_345: + name = "parameter_345" + shape = [288] + dtype = "float32" + min_val = float("-0.182535") + max_val = float("0.115867") + mean = float("0.0486434") + std = float("0.031021") + data = None + + +class Program_weight_tensor_parameter_346: + name = "parameter_346" + shape = [288, 576, 1, 1] + dtype = "float32" + min_val = float("-0.0933941") + max_val = float("0.0753658") + mean = float("-0.000787655") + std = float("0.00567589") + data = None + + +class Program_weight_tensor_parameter_347: + name = "parameter_347" + shape = [288] + dtype = "float32" + min_val = float("-2.8628") + max_val = float("0.479748") + mean = float("-0.682305") + std = float("0.57709") + data = None + + +class Program_weight_tensor_parameter_348: + name = "parameter_348" + shape = [288] + dtype = "float32" + min_val = float("0.975929") + max_val = float("3.52368") + mean = float("1.80201") + std = float("0.36815") + data = None + + +class Program_weight_tensor_parameter_349: + name = "parameter_349" + shape = [288] + dtype = "float32" + min_val = float("0.000804451") + max_val = float("0.00292711") + mean = float("0.00143956") + std = float("0.000325738") + data = None + + +class Program_weight_tensor_parameter_350: + name = "parameter_350" + shape = [288] + dtype = "float32" + min_val = float("-0.0342743") + max_val = float("0.0590277") + mean = float("0.0195141") + std = float("0.0151883") + data = None + + +class Program_weight_tensor_parameter_351: + name = "parameter_351" + shape = [288, 576, 1, 1] + dtype = "float32" + min_val = float("-0.0373152") + max_val = float("0.0841779") + mean = float("-0.00034827") + std = float("0.004295") + data = None + + +class Program_weight_tensor_parameter_352: + name = "parameter_352" + shape = [576] + dtype = "float32" + min_val = float("-2.52101") + max_val = float("0.857915") + mean = float("-0.842205") + std = float("0.39859") + data = None + + +class Program_weight_tensor_parameter_353: + name = "parameter_353" + shape = [576] + dtype = "float32" + min_val = float("0.488026") + max_val = float("1.95305") + mean = float("0.895161") + std = float("0.179359") + data = None + + +class Program_weight_tensor_parameter_354: + name = "parameter_354" + shape = [576] + dtype = "float32" + min_val = float("0.00558504") + max_val = float("0.0454555") + mean = float("0.0102845") + std = float("0.00312323") + data = None + + +class Program_weight_tensor_parameter_355: + name = "parameter_355" + shape = [576] + dtype = "float32" + min_val = float("-0.170477") + max_val = float("0.187667") + mean = float("0.0352537") + std = float("0.0479894") + data = None + + +class Program_weight_tensor_parameter_356: + name = "parameter_356" + shape = [576, 384, 3, 3] + dtype = "float32" + min_val = float("-0.0260577") + max_val = float("0.0532348") + mean = float("-0.0001041") + std = float("0.00269908") + data = None + + +class Program_weight_tensor_parameter_357: + name = "parameter_357" + shape = [384] + dtype = "float32" + min_val = float("-2.62379") + max_val = float("1.27783") + mean = float("-1.09735") + std = float("0.546546") + data = None + + +class Program_weight_tensor_parameter_358: + name = "parameter_358" + shape = [384] + dtype = "float32" + min_val = float("0.408486") + max_val = float("1.56047") + mean = float("1.0748") + std = float("0.155167") + data = None + + +class Program_weight_tensor_parameter_359: + name = "parameter_359" + shape = [384] + dtype = "float32" + min_val = float("0.00140498") + max_val = float("0.00887108") + mean = float("0.00364747") + std = float("0.000973317") + data = None + + +class Program_weight_tensor_parameter_360: + name = "parameter_360" + shape = [384] + dtype = "float32" + min_val = float("-0.173561") + max_val = float("0.131263") + mean = float("-0.0405442") + std = float("0.0419344") + data = None + + +class Program_weight_tensor_parameter_361: + name = "parameter_361" + shape = [384, 288, 1, 1] + dtype = "float32" + min_val = float("-0.315079") + max_val = float("0.114758") + mean = float("-0.000694619") + std = float("0.00882253") + data = None + + +class Program_weight_tensor_parameter_362: + name = "parameter_362" + shape = [288] + dtype = "float32" + min_val = float("-0.0129129") + max_val = float("0.00094621") + mean = float("-0.00331667") + std = float("0.00268758") + data = None + + +class Program_weight_tensor_parameter_363: + name = "parameter_363" + shape = [288, 288, 1, 1] + dtype = "float32" + min_val = float("-0.299495") + max_val = float("0.215047") + mean = float("-0.00235806") + std = float("0.0062383") + data = None + + +class Program_weight_tensor_parameter_364: + name = "parameter_364" + shape = [144] + dtype = "float32" + min_val = float("-1.89031") + max_val = float("0.682555") + mean = float("-0.250602") + std = float("0.42457") + data = None + + +class Program_weight_tensor_parameter_365: + name = "parameter_365" + shape = [144] + dtype = "float32" + min_val = float("-3.39574e-06") + max_val = float("2.27363") + mean = float("0.459077") + std = float("0.465494") + data = None + + +class Program_weight_tensor_parameter_366: + name = "parameter_366" + shape = [144] + dtype = "float32" + min_val = float("4.41383e-12") + max_val = float("0.00139051") + mean = float("0.000354333") + std = float("0.000234562") + data = None + + +class Program_weight_tensor_parameter_367: + name = "parameter_367" + shape = [144] + dtype = "float32" + min_val = float("-0.0453964") + max_val = float("0.0337442") + mean = float("0.00432955") + std = float("0.0128423") + data = None + + +class Program_weight_tensor_parameter_368: + name = "parameter_368" + shape = [144, 144, 1, 1] + dtype = "float32" + min_val = float("-0.0348046") + max_val = float("0.0712641") + mean = float("-0.000374649") + std = float("0.00443917") + data = None + + +class Program_weight_tensor_parameter_369: + name = "parameter_369" + shape = [144] + dtype = "float32" + min_val = float("-1.89031") + max_val = float("0.682555") + mean = float("-0.250602") + std = float("0.42457") + data = None + + +class Program_weight_tensor_parameter_370: + name = "parameter_370" + shape = [144] + dtype = "float32" + min_val = float("0.418716") + max_val = float("3.73401") + mean = float("1.30895") + std = float("0.607907") + data = None + + +class Program_weight_tensor_parameter_371: + name = "parameter_371" + shape = [144] + dtype = "float32" + min_val = float("0.00100253") + max_val = float("0.00898137") + mean = float("0.00451763") + std = float("0.00157334") + data = None + + +class Program_weight_tensor_parameter_372: + name = "parameter_372" + shape = [144] + dtype = "float32" + min_val = float("-0.12794") + max_val = float("0.101272") + mean = float("0.0247504") + std = float("0.0355174") + data = None + + +class Program_weight_tensor_parameter_373: + name = "parameter_373" + shape = [144, 144, 3, 3] + dtype = "float32" + min_val = float("-0.0346061") + max_val = float("0.0470164") + mean = float("-0.000215707") + std = float("0.0041935") + data = None + + +class Program_weight_tensor_parameter_374: + name = "parameter_374" + shape = [144] + dtype = "float32" + min_val = float("-2.66323") + max_val = float("0.362254") + mean = float("-1.26569") + std = float("0.512716") + data = None + + +class Program_weight_tensor_parameter_375: + name = "parameter_375" + shape = [144] + dtype = "float32" + min_val = float("0.576539") + max_val = float("1.97695") + mean = float("1.18023") + std = float("0.187262") + data = None + + +class Program_weight_tensor_parameter_376: + name = "parameter_376" + shape = [144] + dtype = "float32" + min_val = float("0.0377575") + max_val = float("0.21017") + mean = float("0.0696304") + std = float("0.0235545") + data = None + + +class Program_weight_tensor_parameter_377: + name = "parameter_377" + shape = [144] + dtype = "float32" + min_val = float("-2.48186") + max_val = float("1.76252") + mean = float("-0.157181") + std = float("0.322131") + data = None + + +class Program_weight_tensor_parameter_378: + name = "parameter_378" + shape = [144, 144, 3, 3] + dtype = "float32" + min_val = float("-0.0497725") + max_val = float("0.0492881") + mean = float("-0.000281602") + std = float("0.00478039") + data = None + + +class Program_weight_tensor_parameter_379: + name = "parameter_379" + shape = [144] + dtype = "float32" + min_val = float("-1.73734") + max_val = float("0.692917") + mean = float("-0.18539") + std = float("0.413661") + data = None + + +class Program_weight_tensor_parameter_380: + name = "parameter_380" + shape = [144] + dtype = "float32" + min_val = float("0.000376458") + max_val = float("2.82934") + mean = float("0.328185") + std = float("0.356839") + data = None + + +class Program_weight_tensor_parameter_381: + name = "parameter_381" + shape = [144] + dtype = "float32" + min_val = float("2.39692e-08") + max_val = float("0.0041016") + mean = float("0.000455802") + std = float("0.000479833") + data = None + + +class Program_weight_tensor_parameter_382: + name = "parameter_382" + shape = [144] + dtype = "float32" + min_val = float("-0.030728") + max_val = float("0.0415527") + mean = float("0.00831477") + std = float("0.0125762") + data = None + + +class Program_weight_tensor_parameter_383: + name = "parameter_383" + shape = [144, 144, 1, 1] + dtype = "float32" + min_val = float("-0.051179") + max_val = float("0.0576014") + mean = float("-0.000493177") + std = float("0.00427804") + data = None + + +class Program_weight_tensor_parameter_384: + name = "parameter_384" + shape = [144] + dtype = "float32" + min_val = float("-1.73734") + max_val = float("0.692917") + mean = float("-0.18539") + std = float("0.413661") + data = None + + +class Program_weight_tensor_parameter_385: + name = "parameter_385" + shape = [144] + dtype = "float32" + min_val = float("0.375666") + max_val = float("3.03489") + mean = float("1.09356") + std = float("0.399713") + data = None + + +class Program_weight_tensor_parameter_386: + name = "parameter_386" + shape = [144] + dtype = "float32" + min_val = float("0.00208899") + max_val = float("0.00974547") + mean = float("0.00520382") + std = float("0.0013558") + data = None + + +class Program_weight_tensor_parameter_387: + name = "parameter_387" + shape = [144] + dtype = "float32" + min_val = float("-0.0355158") + max_val = float("0.0761709") + mean = float("0.0259472") + std = float("0.0220464") + data = None + + +class Program_weight_tensor_parameter_388: + name = "parameter_388" + shape = [144, 144, 3, 3] + dtype = "float32" + min_val = float("-0.0387272") + max_val = float("0.0410833") + mean = float("-0.000193716") + std = float("0.00436167") + data = None + + +class Program_weight_tensor_parameter_389: + name = "parameter_389" + shape = [144] + dtype = "float32" + min_val = float("-3.02892") + max_val = float("0.118332") + mean = float("-1.27789") + std = float("0.573834") + data = None + + +class Program_weight_tensor_parameter_390: + name = "parameter_390" + shape = [144] + dtype = "float32" + min_val = float("0.676134") + max_val = float("1.94767") + mean = float("1.17746") + std = float("0.203827") + data = None + + +class Program_weight_tensor_parameter_391: + name = "parameter_391" + shape = [144] + dtype = "float32" + min_val = float("0.0206076") + max_val = float("0.0888994") + mean = float("0.0394175") + std = float("0.0100858") + data = None + + +class Program_weight_tensor_parameter_392: + name = "parameter_392" + shape = [144] + dtype = "float32" + min_val = float("-0.408893") + max_val = float("0.265643") + mean = float("-0.0431433") + std = float("0.13118") + data = None + + +class Program_weight_tensor_parameter_393: + name = "parameter_393" + shape = [144, 144, 3, 3] + dtype = "float32" + min_val = float("-0.0561149") + max_val = float("0.0750932") + mean = float("-0.00029987") + std = float("0.00486889") + data = None + + +class Program_weight_tensor_parameter_394: + name = "parameter_394" + shape = [144] + dtype = "float32" + min_val = float("-1.35133") + max_val = float("0.821835") + mean = float("-0.13373") + std = float("0.357825") + data = None + + +class Program_weight_tensor_parameter_395: + name = "parameter_395" + shape = [144] + dtype = "float32" + min_val = float("-1.05123e-08") + max_val = float("1.49414") + mean = float("0.18472") + std = float("0.153441") + data = None + + +class Program_weight_tensor_parameter_396: + name = "parameter_396" + shape = [144] + dtype = "float32" + min_val = float("4.16705e-17") + max_val = float("0.00240365") + mean = float("0.000251688") + std = float("0.000227807") + data = None + + +class Program_weight_tensor_parameter_397: + name = "parameter_397" + shape = [144] + dtype = "float32" + min_val = float("-0.047473") + max_val = float("0.0646974") + mean = float("0.00888849") + std = float("0.0132203") + data = None + + +class Program_weight_tensor_parameter_398: + name = "parameter_398" + shape = [144, 144, 1, 1] + dtype = "float32" + min_val = float("-0.0355803") + max_val = float("0.0307658") + mean = float("-0.000399692") + std = float("0.00394002") + data = None + + +class Program_weight_tensor_parameter_399: + name = "parameter_399" + shape = [144] + dtype = "float32" + min_val = float("-1.35133") + max_val = float("0.821835") + mean = float("-0.13373") + std = float("0.357825") + data = None + + +class Program_weight_tensor_parameter_400: + name = "parameter_400" + shape = [144] + dtype = "float32" + min_val = float("0.305448") + max_val = float("1.84848") + mean = float("0.89377") + std = float("0.301052") + data = None + + +class Program_weight_tensor_parameter_401: + name = "parameter_401" + shape = [144] + dtype = "float32" + min_val = float("0.00208198") + max_val = float("0.00965758") + mean = float("0.00489852") + std = float("0.00141206") + data = None + + +class Program_weight_tensor_parameter_402: + name = "parameter_402" + shape = [144] + dtype = "float32" + min_val = float("-0.0282734") + max_val = float("0.114052") + mean = float("0.0403195") + std = float("0.0275993") + data = None + + +class Program_weight_tensor_parameter_403: + name = "parameter_403" + shape = [144, 144, 3, 3] + dtype = "float32" + min_val = float("-0.0392374") + max_val = float("0.0493932") + mean = float("-0.000231543") + std = float("0.00438483") + data = None + + +class Program_weight_tensor_parameter_404: + name = "parameter_404" + shape = [144] + dtype = "float32" + min_val = float("-2.73135") + max_val = float("0.0567831") + mean = float("-1.27362") + std = float("0.5112") + data = None + + +class Program_weight_tensor_parameter_405: + name = "parameter_405" + shape = [144] + dtype = "float32" + min_val = float("0.62038") + max_val = float("1.55808") + mean = float("1.10846") + std = float("0.164403") + data = None + + +class Program_weight_tensor_parameter_406: + name = "parameter_406" + shape = [144] + dtype = "float32" + min_val = float("0.0113622") + max_val = float("0.0371877") + mean = float("0.0225402") + std = float("0.00518884") + data = None + + +class Program_weight_tensor_parameter_407: + name = "parameter_407" + shape = [144] + dtype = "float32" + min_val = float("-0.55178") + max_val = float("0.193574") + mean = float("-0.0453446") + std = float("0.110957") + data = None + + +class Program_weight_tensor_parameter_408: + name = "parameter_408" + shape = [144, 144, 3, 3] + dtype = "float32" + min_val = float("-0.0546125") + max_val = float("0.0718452") + mean = float("-0.000310657") + std = float("0.00493932") + data = None + + +class Program_weight_tensor_parameter_409: + name = "parameter_409" + shape = [144] + dtype = "float32" + min_val = float("-1.92079") + max_val = float("0.644614") + mean = float("-0.125228") + std = float("0.358111") + data = None + + +class Program_weight_tensor_parameter_410: + name = "parameter_410" + shape = [144] + dtype = "float32" + min_val = float("6.93466e-11") + max_val = float("1.7663") + mean = float("0.238668") + std = float("0.271207") + data = None + + +class Program_weight_tensor_parameter_411: + name = "parameter_411" + shape = [144] + dtype = "float32" + min_val = float("3.34966e-19") + max_val = float("0.0106171") + mean = float("0.000715393") + std = float("0.00128468") + data = None + + +class Program_weight_tensor_parameter_412: + name = "parameter_412" + shape = [144] + dtype = "float32" + min_val = float("-0.0415258") + max_val = float("0.130452") + mean = float("0.0113087") + std = float("0.0225185") + data = None + + +class Program_weight_tensor_parameter_413: + name = "parameter_413" + shape = [144, 144, 1, 1] + dtype = "float32" + min_val = float("-0.0911586") + max_val = float("0.0543513") + mean = float("-0.000545815") + std = float("0.00558185") + data = None + + +class Program_weight_tensor_parameter_414: + name = "parameter_414" + shape = [144] + dtype = "float32" + min_val = float("-1.92079") + max_val = float("0.644614") + mean = float("-0.125228") + std = float("0.358111") + data = None + + +class Program_weight_tensor_parameter_415: + name = "parameter_415" + shape = [144] + dtype = "float32" + min_val = float("0.30759") + max_val = float("1.61305") + mean = float("0.74094") + std = float("0.255891") + data = None + + +class Program_weight_tensor_parameter_416: + name = "parameter_416" + shape = [144] + dtype = "float32" + min_val = float("0.00349199") + max_val = float("0.0167988") + mean = float("0.00831531") + std = float("0.00253196") + data = None + + +class Program_weight_tensor_parameter_417: + name = "parameter_417" + shape = [144] + dtype = "float32" + min_val = float("-0.0930515") + max_val = float("0.160494") + mean = float("0.0426139") + std = float("0.0448304") + data = None + + +class Program_weight_tensor_parameter_418: + name = "parameter_418" + shape = [144, 144, 3, 3] + dtype = "float32" + min_val = float("-0.0865275") + max_val = float("0.0693328") + mean = float("-0.000270166") + std = float("0.00433576") + data = None + + +class Program_weight_tensor_parameter_419: + name = "parameter_419" + shape = [144] + dtype = "float32" + min_val = float("-2.46604") + max_val = float("0.311463") + mean = float("-1.11261") + std = float("0.444526") + data = None + + +class Program_weight_tensor_parameter_420: + name = "parameter_420" + shape = [144] + dtype = "float32" + min_val = float("0.645192") + max_val = float("1.43977") + mean = float("1.10398") + std = float("0.149059") + data = None + + +class Program_weight_tensor_parameter_421: + name = "parameter_421" + shape = [144] + dtype = "float32" + min_val = float("0.00792923") + max_val = float("0.0410344") + mean = float("0.017982") + std = float("0.00594959") + data = None + + +class Program_weight_tensor_parameter_422: + name = "parameter_422" + shape = [144] + dtype = "float32" + min_val = float("-0.402604") + max_val = float("0.181192") + mean = float("-0.0350284") + std = float("0.0901153") + data = None + + +class Program_weight_tensor_parameter_423: + name = "parameter_423" + shape = [144, 144, 3, 3] + dtype = "float32" + min_val = float("-0.12551") + max_val = float("0.134437") + mean = float("-0.000230808") + std = float("0.00507475") + data = None + + +class Program_weight_tensor_parameter_424: + name = "parameter_424" + shape = [144] + dtype = "float32" + min_val = float("-1.63695") + max_val = float("1.61843") + mean = float("0.00914071") + std = float("0.773499") + data = None + + +class Program_weight_tensor_parameter_425: + name = "parameter_425" + shape = [144] + dtype = "float32" + min_val = float("0.446498") + max_val = float("1.44574") + mean = float("0.791631") + std = float("0.199487") + data = None + + +class Program_weight_tensor_parameter_426: + name = "parameter_426" + shape = [144] + dtype = "float32" + min_val = float("0.00803846") + max_val = float("0.0483941") + mean = float("0.0185568") + std = float("0.00763975") + data = None + + +class Program_weight_tensor_parameter_427: + name = "parameter_427" + shape = [144] + dtype = "float32" + min_val = float("-0.220237") + max_val = float("0.262194") + mean = float("-0.0333374") + std = float("0.0733832") + data = None + + +class Program_weight_tensor_parameter_428: + name = "parameter_428" + shape = [144, 288, 1, 1] + dtype = "float32" + min_val = float("-0.131175") + max_val = float("0.101205") + mean = float("-0.0007374") + std = float("0.00944354") + data = None + + +class Program_weight_tensor_parameter_429: + name = "parameter_429" + shape = [144] + dtype = "float32" + min_val = float("-3.94913") + max_val = float("1.48833") + mean = float("0.184232") + std = float("0.812302") + data = None + + +class Program_weight_tensor_parameter_430: + name = "parameter_430" + shape = [144] + dtype = "float32" + min_val = float("0.632046") + max_val = float("5.81404") + mean = float("1.61523") + std = float("1.07746") + data = None + + +class Program_weight_tensor_parameter_431: + name = "parameter_431" + shape = [144] + dtype = "float32" + min_val = float("0.0042963") + max_val = float("0.0490893") + mean = float("0.0122438") + std = float("0.00606181") + data = None + + +class Program_weight_tensor_parameter_432: + name = "parameter_432" + shape = [144] + dtype = "float32" + min_val = float("-0.166662") + max_val = float("0.120288") + mean = float("-0.00996138") + std = float("0.0632725") + data = None + + +class Program_weight_tensor_parameter_433: + name = "parameter_433" + shape = [144, 288, 1, 1] + dtype = "float32" + min_val = float("-0.0767773") + max_val = float("0.128459") + mean = float("-0.000375959") + std = float("0.00898229") + data = None + + +class Program_weight_tensor_parameter_434: + name = "parameter_434" + shape = [288] + dtype = "float32" + min_val = float("-3.32769") + max_val = float("1.68356") + mean = float("-0.218033") + std = float("0.689316") + data = None + + +class Program_weight_tensor_parameter_435: + name = "parameter_435" + shape = [288] + dtype = "float32" + min_val = float("0.644859") + max_val = float("3.56312") + mean = float("1.11815") + std = float("0.319838") + data = None + + +class Program_weight_tensor_parameter_436: + name = "parameter_436" + shape = [288] + dtype = "float32" + min_val = float("0.00576419") + max_val = float("0.0664151") + mean = float("0.0156741") + std = float("0.00811449") + data = None + + +class Program_weight_tensor_parameter_437: + name = "parameter_437" + shape = [288] + dtype = "float32" + min_val = float("-0.326028") + max_val = float("0.217421") + mean = float("0.0293076") + std = float("0.0862963") + data = None + + +class Program_weight_tensor_parameter_438: + name = "parameter_438" + shape = [288, 192, 3, 3] + dtype = "float32" + min_val = float("-0.0789327") + max_val = float("0.0726046") + mean = float("-0.000113775") + std = float("0.00477469") + data = None + + +class Program_weight_tensor_parameter_439: + name = "parameter_439" + shape = [192] + dtype = "float32" + min_val = float("-2.19954") + max_val = float("1.29755") + mean = float("-0.833953") + std = float("0.661352") + data = None + + +class Program_weight_tensor_parameter_440: + name = "parameter_440" + shape = [192] + dtype = "float32" + min_val = float("0.376407") + max_val = float("1.58497") + mean = float("0.954824") + std = float("0.21826") + data = None + + +class Program_weight_tensor_parameter_441: + name = "parameter_441" + shape = [192] + dtype = "float32" + min_val = float("0.00123879") + max_val = float("0.0100177") + mean = float("0.00334585") + std = float("0.00120554") + data = None + + +class Program_weight_tensor_parameter_442: + name = "parameter_442" + shape = [192] + dtype = "float32" + min_val = float("-0.305203") + max_val = float("0.270228") + mean = float("-0.0509251") + std = float("0.0850592") + data = None + + +class Program_weight_tensor_parameter_443: + name = "parameter_443" + shape = [192, 144, 1, 1] + dtype = "float32" + min_val = float("-0.147457") + max_val = float("0.125776") + mean = float("-0.000856162") + std = float("0.0147595") + data = None + + +class Program_weight_tensor_parameter_444: + name = "parameter_444" + shape = [144] + dtype = "float32" + min_val = float("-0.0128446") + max_val = float("0.00187189") + mean = float("-0.00516629") + std = float("0.00361178") + data = None + + +class Program_weight_tensor_parameter_445: + name = "parameter_445" + shape = [144, 144, 1, 1] + dtype = "float32" + min_val = float("-0.231297") + max_val = float("0.222024") + mean = float("-0.0042663") + std = float("0.0116858") + data = None + + +class Program_weight_tensor_parameter_446: + name = "parameter_446" + shape = [72] + dtype = "float32" + min_val = float("-1.59453") + max_val = float("0.880938") + mean = float("-0.105095") + std = float("0.484514") + data = None + + +class Program_weight_tensor_parameter_447: + name = "parameter_447" + shape = [72] + dtype = "float32" + min_val = float("0.0969578") + max_val = float("2.4756") + mean = float("0.437551") + std = float("0.391975") + data = None + + +class Program_weight_tensor_parameter_448: + name = "parameter_448" + shape = [72] + dtype = "float32" + min_val = float("0.000120925") + max_val = float("0.00213528") + mean = float("0.000683241") + std = float("0.000440938") + data = None + + +class Program_weight_tensor_parameter_449: + name = "parameter_449" + shape = [72] + dtype = "float32" + min_val = float("-0.0372676") + max_val = float("0.0322531") + mean = float("0.000230328") + std = float("0.0144331") + data = None + + +class Program_weight_tensor_parameter_450: + name = "parameter_450" + shape = [72, 72, 1, 1] + dtype = "float32" + min_val = float("-0.0558609") + max_val = float("0.0913162") + mean = float("-0.000461775") + std = float("0.00827768") + data = None + + +class Program_weight_tensor_parameter_451: + name = "parameter_451" + shape = [72] + dtype = "float32" + min_val = float("-1.59453") + max_val = float("0.880938") + mean = float("-0.105095") + std = float("0.484514") + data = None + + +class Program_weight_tensor_parameter_452: + name = "parameter_452" + shape = [72] + dtype = "float32" + min_val = float("0.351618") + max_val = float("4.88449") + mean = float("1.06806") + std = float("0.665872") + data = None + + +class Program_weight_tensor_parameter_453: + name = "parameter_453" + shape = [72] + dtype = "float32" + min_val = float("0.00145915") + max_val = float("0.02274") + mean = float("0.00638047") + std = float("0.00330097") + data = None + + +class Program_weight_tensor_parameter_454: + name = "parameter_454" + shape = [72] + dtype = "float32" + min_val = float("-0.0780665") + max_val = float("0.116729") + mean = float("0.0070792") + std = float("0.0386135") + data = None + + +class Program_weight_tensor_parameter_455: + name = "parameter_455" + shape = [72, 72, 3, 3] + dtype = "float32" + min_val = float("-0.0823999") + max_val = float("0.109201") + mean = float("-0.000361883") + std = float("0.00694661") + data = None + + +class Program_weight_tensor_parameter_456: + name = "parameter_456" + shape = [72] + dtype = "float32" + min_val = float("-3.9802") + max_val = float("-0.178539") + mean = float("-1.14275") + std = float("0.563232") + data = None + + +class Program_weight_tensor_parameter_457: + name = "parameter_457" + shape = [72] + dtype = "float32" + min_val = float("0.761114") + max_val = float("2.01918") + mean = float("1.00695") + std = float("0.201531") + data = None + + +class Program_weight_tensor_parameter_458: + name = "parameter_458" + shape = [72] + dtype = "float32" + min_val = float("0.0233433") + max_val = float("0.211234") + mean = float("0.0493984") + std = float("0.0278563") + data = None + + +class Program_weight_tensor_parameter_459: + name = "parameter_459" + shape = [72] + dtype = "float32" + min_val = float("-3.35838") + max_val = float("0.64805") + mean = float("-0.172059") + std = float("0.442294") + data = None + + +class Program_weight_tensor_parameter_460: + name = "parameter_460" + shape = [72, 72, 3, 3] + dtype = "float32" + min_val = float("-0.0675785") + max_val = float("0.0898198") + mean = float("-0.000478771") + std = float("0.00806884") + data = None + + +class Program_weight_tensor_parameter_461: + name = "parameter_461" + shape = [72] + dtype = "float32" + min_val = float("-1.40989") + max_val = float("0.81614") + mean = float("-0.0675083") + std = float("0.400837") + data = None + + +class Program_weight_tensor_parameter_462: + name = "parameter_462" + shape = [72] + dtype = "float32" + min_val = float("0.0959584") + max_val = float("1.55629") + mean = float("0.346688") + std = float("0.249308") + data = None + + +class Program_weight_tensor_parameter_463: + name = "parameter_463" + shape = [72] + dtype = "float32" + min_val = float("0.000134639") + max_val = float("0.0029982") + mean = float("0.000643173") + std = float("0.000523054") + data = None + + +class Program_weight_tensor_parameter_464: + name = "parameter_464" + shape = [72] + dtype = "float32" + min_val = float("-0.0562932") + max_val = float("0.0718023") + mean = float("0.00880714") + std = float("0.0284848") + data = None + + +class Program_weight_tensor_parameter_465: + name = "parameter_465" + shape = [72, 72, 1, 1] + dtype = "float32" + min_val = float("-0.059374") + max_val = float("0.0468202") + mean = float("-0.00060072") + std = float("0.00841895") + data = None + + +class Program_weight_tensor_parameter_466: + name = "parameter_466" + shape = [72] + dtype = "float32" + min_val = float("-1.40989") + max_val = float("0.81614") + mean = float("-0.0675083") + std = float("0.400837") + data = None + + +class Program_weight_tensor_parameter_467: + name = "parameter_467" + shape = [72] + dtype = "float32" + min_val = float("0.277615") + max_val = float("1.97277") + mean = float("0.861421") + std = float("0.350217") + data = None + + +class Program_weight_tensor_parameter_468: + name = "parameter_468" + shape = [72] + dtype = "float32" + min_val = float("0.00246978") + max_val = float("0.0141959") + mean = float("0.00528593") + std = float("0.0022058") + data = None + + +class Program_weight_tensor_parameter_469: + name = "parameter_469" + shape = [72] + dtype = "float32" + min_val = float("-0.214044") + max_val = float("0.181833") + mean = float("0.0221895") + std = float("0.0597059") + data = None + + +class Program_weight_tensor_parameter_470: + name = "parameter_470" + shape = [72, 72, 3, 3] + dtype = "float32" + min_val = float("-0.0447981") + max_val = float("0.040427") + mean = float("-0.000399935") + std = float("0.00701009") + data = None + + +class Program_weight_tensor_parameter_471: + name = "parameter_471" + shape = [72] + dtype = "float32" + min_val = float("-2.73732") + max_val = float("1.92796") + mean = float("-1.14581") + std = float("0.582464") + data = None + + +class Program_weight_tensor_parameter_472: + name = "parameter_472" + shape = [72] + dtype = "float32" + min_val = float("0.270445") + max_val = float("1.8443") + mean = float("0.891096") + std = float("0.213621") + data = None + + +class Program_weight_tensor_parameter_473: + name = "parameter_473" + shape = [72] + dtype = "float32" + min_val = float("0.0120423") + max_val = float("0.0631383") + mean = float("0.0225737") + std = float("0.00801368") + data = None + + +class Program_weight_tensor_parameter_474: + name = "parameter_474" + shape = [72] + dtype = "float32" + min_val = float("-0.570576") + max_val = float("0.529122") + mean = float("-0.0639033") + std = float("0.165627") + data = None + + +class Program_weight_tensor_parameter_475: + name = "parameter_475" + shape = [72, 72, 3, 3] + dtype = "float32" + min_val = float("-0.066849") + max_val = float("0.0819528") + mean = float("-0.000501507") + std = float("0.00812549") + data = None + + +class Program_weight_tensor_parameter_476: + name = "parameter_476" + shape = [72] + dtype = "float32" + min_val = float("-1.42901") + max_val = float("0.650624") + mean = float("-0.067694") + std = float("0.355642") + data = None + + +class Program_weight_tensor_parameter_477: + name = "parameter_477" + shape = [72] + dtype = "float32" + min_val = float("0.0695595") + max_val = float("1.96318") + mean = float("0.298781") + std = float("0.2518") + data = None + + +class Program_weight_tensor_parameter_478: + name = "parameter_478" + shape = [72] + dtype = "float32" + min_val = float("0.000165661") + max_val = float("0.00254699") + mean = float("0.000671496") + std = float("0.000381215") + data = None + + +class Program_weight_tensor_parameter_479: + name = "parameter_479" + shape = [72] + dtype = "float32" + min_val = float("-0.0852135") + max_val = float("0.0739923") + mean = float("0.0145054") + std = float("0.0282282") + data = None + + +class Program_weight_tensor_parameter_480: + name = "parameter_480" + shape = [72, 72, 1, 1] + dtype = "float32" + min_val = float("-0.061587") + max_val = float("0.0555039") + mean = float("-0.00111145") + std = float("0.00915049") + data = None + + +class Program_weight_tensor_parameter_481: + name = "parameter_481" + shape = [72] + dtype = "float32" + min_val = float("-1.42901") + max_val = float("0.650624") + mean = float("-0.067694") + std = float("0.355642") + data = None + + +class Program_weight_tensor_parameter_482: + name = "parameter_482" + shape = [72] + dtype = "float32" + min_val = float("0.228885") + max_val = float("2.65725") + mean = float("0.654562") + std = float("0.342676") + data = None + + +class Program_weight_tensor_parameter_483: + name = "parameter_483" + shape = [72] + dtype = "float32" + min_val = float("0.00221666") + max_val = float("0.0113837") + mean = float("0.00528644") + std = float("0.00192438") + data = None + + +class Program_weight_tensor_parameter_484: + name = "parameter_484" + shape = [72] + dtype = "float32" + min_val = float("-0.0689213") + max_val = float("0.134873") + mean = float("0.0202346") + std = float("0.0478195") + data = None + + +class Program_weight_tensor_parameter_485: + name = "parameter_485" + shape = [72, 72, 3, 3] + dtype = "float32" + min_val = float("-0.0502643") + max_val = float("0.0385429") + mean = float("-0.000381721") + std = float("0.00699717") + data = None + + +class Program_weight_tensor_parameter_486: + name = "parameter_486" + shape = [72] + dtype = "float32" + min_val = float("-1.72935") + max_val = float("1.81323") + mean = float("-0.953972") + std = float("0.484557") + data = None + + +class Program_weight_tensor_parameter_487: + name = "parameter_487" + shape = [72] + dtype = "float32" + min_val = float("0.291338") + max_val = float("1.67102") + mean = float("0.890264") + std = float("0.154972") + data = None + + +class Program_weight_tensor_parameter_488: + name = "parameter_488" + shape = [72] + dtype = "float32" + min_val = float("0.00650387") + max_val = float("0.0314521") + mean = float("0.0153011") + std = float("0.00489433") + data = None + + +class Program_weight_tensor_parameter_489: + name = "parameter_489" + shape = [72] + dtype = "float32" + min_val = float("-0.371426") + max_val = float("0.24566") + mean = float("-0.0388813") + std = float("0.125483") + data = None + + +class Program_weight_tensor_parameter_490: + name = "parameter_490" + shape = [72, 72, 3, 3] + dtype = "float32" + min_val = float("-0.0883656") + max_val = float("0.0854602") + mean = float("-0.000434419") + std = float("0.00817123") + data = None + + +class Program_weight_tensor_parameter_491: + name = "parameter_491" + shape = [72] + dtype = "float32" + min_val = float("-0.72078") + max_val = float("0.53883") + mean = float("-0.0116229") + std = float("0.315004") + data = None + + +class Program_weight_tensor_parameter_492: + name = "parameter_492" + shape = [72] + dtype = "float32" + min_val = float("0.0630193") + max_val = float("1.13622") + mean = float("0.304108") + std = float("0.18516") + data = None + + +class Program_weight_tensor_parameter_493: + name = "parameter_493" + shape = [72] + dtype = "float32" + min_val = float("0.000596299") + max_val = float("0.0104308") + mean = float("0.00238262") + std = float("0.00179682") + data = None + + +class Program_weight_tensor_parameter_494: + name = "parameter_494" + shape = [72] + dtype = "float32" + min_val = float("-0.0220883") + max_val = float("0.0767462") + mean = float("0.00800126") + std = float("0.0185227") + data = None + + +class Program_weight_tensor_parameter_495: + name = "parameter_495" + shape = [72, 72, 1, 1] + dtype = "float32" + min_val = float("-0.10925") + max_val = float("0.0549459") + mean = float("-0.00157008") + std = float("0.0103222") + data = None + + +class Program_weight_tensor_parameter_496: + name = "parameter_496" + shape = [72] + dtype = "float32" + min_val = float("-0.72078") + max_val = float("0.53883") + mean = float("-0.0116229") + std = float("0.315004") + data = None + + +class Program_weight_tensor_parameter_497: + name = "parameter_497" + shape = [72] + dtype = "float32" + min_val = float("0.195354") + max_val = float("1.27903") + mean = float("0.580385") + std = float("0.250193") + data = None + + +class Program_weight_tensor_parameter_498: + name = "parameter_498" + shape = [72] + dtype = "float32" + min_val = float("0.00628037") + max_val = float("0.0320368") + mean = float("0.0150653") + std = float("0.00543441") + data = None + + +class Program_weight_tensor_parameter_499: + name = "parameter_499" + shape = [72] + dtype = "float32" + min_val = float("-0.173879") + max_val = float("0.126666") + mean = float("0.0195401") + std = float("0.0459446") + data = None + + +class Program_weight_tensor_parameter_500: + name = "parameter_500" + shape = [72, 72, 3, 3] + dtype = "float32" + min_val = float("-0.0489714") + max_val = float("0.0583292") + mean = float("-0.000497285") + std = float("0.00710222") + data = None + + +class Program_weight_tensor_parameter_501: + name = "parameter_501" + shape = [72] + dtype = "float32" + min_val = float("-2.83412") + max_val = float("0.704945") + mean = float("-0.732679") + std = float("0.560659") + data = None + + +class Program_weight_tensor_parameter_502: + name = "parameter_502" + shape = [72] + dtype = "float32" + min_val = float("0.470798") + max_val = float("3.05163") + mean = float("1.0155") + std = float("0.305246") + data = None + + +class Program_weight_tensor_parameter_503: + name = "parameter_503" + shape = [72] + dtype = "float32" + min_val = float("0.00353406") + max_val = float("0.0255573") + mean = float("0.0103341") + std = float("0.00485984") + data = None + + +class Program_weight_tensor_parameter_504: + name = "parameter_504" + shape = [72] + dtype = "float32" + min_val = float("-0.318927") + max_val = float("0.403846") + mean = float("-0.0481203") + std = float("0.111077") + data = None + + +class Program_weight_tensor_parameter_505: + name = "parameter_505" + shape = [72, 72, 3, 3] + dtype = "float32" + min_val = float("-0.140292") + max_val = float("0.149074") + mean = float("-0.000235178") + std = float("0.00845115") + data = None + + +class Program_weight_tensor_parameter_506: + name = "parameter_506" + shape = [72] + dtype = "float32" + min_val = float("-3.74716") + max_val = float("2.00205") + mean = float("0.294505") + std = float("0.800208") + data = None + + +class Program_weight_tensor_parameter_507: + name = "parameter_507" + shape = [72] + dtype = "float32" + min_val = float("0.251599") + max_val = float("2.95243") + mean = float("0.505656") + std = float("0.340486") + data = None + + +class Program_weight_tensor_parameter_508: + name = "parameter_508" + shape = [72] + dtype = "float32" + min_val = float("0.00662379") + max_val = float("0.0404531") + mean = float("0.0152965") + std = float("0.00696086") + data = None + + +class Program_weight_tensor_parameter_509: + name = "parameter_509" + shape = [72] + dtype = "float32" + min_val = float("-0.337189") + max_val = float("0.326696") + mean = float("-0.0315403") + std = float("0.110722") + data = None + + +class Program_weight_tensor_parameter_510: + name = "parameter_510" + shape = [72, 144, 1, 1] + dtype = "float32" + min_val = float("-0.140141") + max_val = float("0.0942677") + mean = float("-0.00109115") + std = float("0.0156176") + data = None + + +class Program_weight_tensor_parameter_511: + name = "parameter_511" + shape = [72] + dtype = "float32" + min_val = float("-5.40533") + max_val = float("2.19476") + mean = float("0.480097") + std = float("1.18405") + data = None + + +class Program_weight_tensor_parameter_512: + name = "parameter_512" + shape = [72] + dtype = "float32" + min_val = float("0.407901") + max_val = float("7.10951") + mean = float("1.68326") + std = float("1.34462") + data = None + + +class Program_weight_tensor_parameter_513: + name = "parameter_513" + shape = [72] + dtype = "float32" + min_val = float("0.00341379") + max_val = float("0.0737263") + mean = float("0.0131538") + std = float("0.0117571") + data = None + + +class Program_weight_tensor_parameter_514: + name = "parameter_514" + shape = [72] + dtype = "float32" + min_val = float("-0.254707") + max_val = float("0.243852") + mean = float("0.0114279") + std = float("0.121176") + data = None + + +class Program_weight_tensor_parameter_515: + name = "parameter_515" + shape = [72, 144, 1, 1] + dtype = "float32" + min_val = float("-0.156741") + max_val = float("0.189555") + mean = float("-0.000167284") + std = float("0.0152826") + data = None + + +class Program_weight_tensor_parameter_516: + name = "parameter_516" + shape = [144] + dtype = "float32" + min_val = float("-2.47799") + max_val = float("2.78512") + mean = float("-0.0256035") + std = float("0.855982") + data = None + + +class Program_weight_tensor_parameter_517: + name = "parameter_517" + shape = [144] + dtype = "float32" + min_val = float("0.475945") + max_val = float("3.78899") + mean = float("0.948111") + std = float("0.382821") + data = None + + +class Program_weight_tensor_parameter_518: + name = "parameter_518" + shape = [144] + dtype = "float32" + min_val = float("0.00505316") + max_val = float("0.222987") + mean = float("0.0272953") + std = float("0.0270536") + data = None + + +class Program_weight_tensor_parameter_519: + name = "parameter_519" + shape = [144] + dtype = "float32" + min_val = float("-0.374849") + max_val = float("0.421126") + mean = float("-0.040238") + std = float("0.109009") + data = None + + +class Program_weight_tensor_parameter_520: + name = "parameter_520" + shape = [144, 96, 3, 3] + dtype = "float32" + min_val = float("-0.107682") + max_val = float("0.0970517") + mean = float("-0.000288181") + std = float("0.00781599") + data = None + + +class Program_weight_tensor_parameter_521: + name = "parameter_521" + shape = [96] + dtype = "float32" + min_val = float("-2.31785") + max_val = float("1.2856") + mean = float("-0.536223") + std = float("0.66831") + data = None + + +class Program_weight_tensor_parameter_522: + name = "parameter_522" + shape = [96] + dtype = "float32" + min_val = float("0.363377") + max_val = float("2.87595") + mean = float("0.945544") + std = float("0.309798") + data = None + + +class Program_weight_tensor_parameter_523: + name = "parameter_523" + shape = [96] + dtype = "float32" + min_val = float("0.000401939") + max_val = float("0.0066261") + mean = float("0.00193929") + std = float("0.00108543") + data = None + + +class Program_weight_tensor_parameter_524: + name = "parameter_524" + shape = [96] + dtype = "float32" + min_val = float("-0.276558") + max_val = float("0.293873") + mean = float("0.032138") + std = float("0.0824672") + data = None + + +class Program_weight_tensor_parameter_525: + name = "parameter_525" + shape = [96, 72, 1, 1] + dtype = "float32" + min_val = float("-0.220262") + max_val = float("0.154706") + mean = float("-0.000674063") + std = float("0.0231789") + data = None + + +class Program_weight_tensor_parameter_526: + name = "parameter_526" + shape = [72] + dtype = "float32" + min_val = float("-0.0194254") + max_val = float("-0.00185158") + mean = float("-0.00904749") + std = float("0.00476033") + data = None + + +class Program_weight_tensor_parameter_527: + name = "parameter_527" + shape = [72, 72, 1, 1] + dtype = "float32" + min_val = float("-0.365295") + max_val = float("0.215913") + mean = float("-0.0105827") + std = float("0.0212646") + data = None + + +class Program_weight_tensor_parameter_528: + name = "parameter_528" + shape = [36] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_529: + name = "parameter_529" + shape = [36] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_530: + name = "parameter_530" + shape = [36] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_531: + name = "parameter_531" + shape = [36] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_532: + name = "parameter_532" + shape = [36, 36, 1, 1] + dtype = "float32" + min_val = float("-0.13914") + max_val = float("0.0710278") + mean = float("-0.00122495") + std = float("0.0155312") + data = None + + +class Program_weight_tensor_parameter_533: + name = "parameter_533" + shape = [36] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_534: + name = "parameter_534" + shape = [36] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_535: + name = "parameter_535" + shape = [36] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_536: + name = "parameter_536" + shape = [36] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_537: + name = "parameter_537" + shape = [36, 36, 3, 3] + dtype = "float32" + min_val = float("-0.0864069") + max_val = float("0.0715444") + mean = float("-0.000368309") + std = float("0.0121821") + data = None + + +class Program_weight_tensor_parameter_538: + name = "parameter_538" + shape = [36] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_539: + name = "parameter_539" + shape = [36] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_540: + name = "parameter_540" + shape = [36] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_541: + name = "parameter_541" + shape = [36] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_542: + name = "parameter_542" + shape = [36, 36, 3, 3] + dtype = "float32" + min_val = float("-0.139936") + max_val = float("0.126155") + mean = float("-0.000357288") + std = float("0.0135344") + data = None + + +class Program_weight_tensor_parameter_543: + name = "parameter_543" + shape = [36] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_544: + name = "parameter_544" + shape = [36] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_545: + name = "parameter_545" + shape = [36] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_546: + name = "parameter_546" + shape = [36] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_547: + name = "parameter_547" + shape = [36, 36, 1, 1] + dtype = "float32" + min_val = float("-0.101119") + max_val = float("0.078908") + mean = float("-0.00143404") + std = float("0.0191301") + data = None + + +class Program_weight_tensor_parameter_548: + name = "parameter_548" + shape = [36] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_549: + name = "parameter_549" + shape = [36] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_550: + name = "parameter_550" + shape = [36] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_551: + name = "parameter_551" + shape = [36] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_552: + name = "parameter_552" + shape = [36, 36, 3, 3] + dtype = "float32" + min_val = float("-0.0981736") + max_val = float("0.072635") + mean = float("-0.000893312") + std = float("0.0128138") + data = None + + +class Program_weight_tensor_parameter_553: + name = "parameter_553" + shape = [36] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_554: + name = "parameter_554" + shape = [36] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_555: + name = "parameter_555" + shape = [36] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_556: + name = "parameter_556" + shape = [36] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_557: + name = "parameter_557" + shape = [36, 36, 3, 3] + dtype = "float32" + min_val = float("-0.138589") + max_val = float("0.124213") + mean = float("-0.000371635") + std = float("0.0148638") + data = None + + +class Program_weight_tensor_parameter_558: + name = "parameter_558" + shape = [36] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_559: + name = "parameter_559" + shape = [36] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_560: + name = "parameter_560" + shape = [36] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_561: + name = "parameter_561" + shape = [36] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_562: + name = "parameter_562" + shape = [36, 72, 1, 1] + dtype = "float32" + min_val = float("-0.174523") + max_val = float("0.146394") + mean = float("-0.0022875") + std = float("0.0249619") + data = None + + +class Program_weight_tensor_parameter_563: + name = "parameter_563" + shape = [36] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_564: + name = "parameter_564" + shape = [36] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_565: + name = "parameter_565" + shape = [36] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_566: + name = "parameter_566" + shape = [36] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_567: + name = "parameter_567" + shape = [36, 72, 1, 1] + dtype = "float32" + min_val = float("-0.141728") + max_val = float("0.14303") + mean = float("-0.000661605") + std = float("0.0238487") + data = None + + +class Program_weight_tensor_parameter_568: + name = "parameter_568" + shape = [72] + dtype = "float32" + min_val = float("-1.42602") + max_val = float("3.23089") + mean = float("0.695169") + std = float("1.23395") + data = None + + +class Program_weight_tensor_parameter_569: + name = "parameter_569" + shape = [72] + dtype = "float32" + min_val = float("1.06748") + max_val = float("4.16245") + mean = float("1.98371") + std = float("0.773369") + data = None + + +class Program_weight_tensor_parameter_570: + name = "parameter_570" + shape = [72] + dtype = "float32" + min_val = float("0.463827") + max_val = float("20.7924") + mean = float("2.40174") + std = float("2.50279") + data = None + + +class Program_weight_tensor_parameter_571: + name = "parameter_571" + shape = [72] + dtype = "float32" + min_val = float("-1.94543") + max_val = float("3.15499") + mean = float("-0.190338") + std = float("0.839007") + data = None + + +class Program_weight_tensor_parameter_572: + name = "parameter_572" + shape = [72, 48, 3, 3] + dtype = "float32" + min_val = float("-0.0880297") + max_val = float("0.132734") + mean = float("-0.000246572") + std = float("0.0130906") + data = None + + +class Program_weight_tensor_parameter_573: + name = "parameter_573" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_574: + name = "parameter_574" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_575: + name = "parameter_575" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_576: + name = "parameter_576" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_577: + name = "parameter_577" + shape = [48, 24, 3, 3] + dtype = "float32" + min_val = float("-0.190405") + max_val = float("0.141206") + mean = float("-0.000379777") + std = float("0.021606") + data = None + + +class Program_weight_tensor_parameter_578: + name = "parameter_578" + shape = [24] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_579: + name = "parameter_579" + shape = [24] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_580: + name = "parameter_580" + shape = [24] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_581: + name = "parameter_581" + shape = [24] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_582: + name = "parameter_582" + shape = [24, 24, 3, 3] + dtype = "float32" + min_val = float("-0.259253") + max_val = float("0.269373") + mean = float("-0.000405256") + std = float("0.03018") + data = None + + +class Program_weight_tensor_parameter_583: + name = "parameter_583" + shape = [24] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_584: + name = "parameter_584" + shape = [24] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_585: + name = "parameter_585" + shape = [24] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_586: + name = "parameter_586" + shape = [24] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_587: + name = "parameter_587" + shape = [24, 3, 3, 3] + dtype = "float32" + min_val = float("-0.195438") + max_val = float("0.245061") + mean = float("-0.000197873") + std = float("0.0621767") + data = None diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus-M/subgraph_16/graph_hash.txt b/paddle_samples/PaddleX/PP-YOLOE_plus-M/subgraph_16/graph_hash.txt new file mode 100644 index 000000000..90e73cbfa --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE_plus-M/subgraph_16/graph_hash.txt @@ -0,0 +1 @@ +0a0002dc4306801597d8ccf7195c18d5e87a708a3d9df65495993580fffe3755 \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus-M/subgraph_16/graph_net.json b/paddle_samples/PaddleX/PP-YOLOE_plus-M/subgraph_16/graph_net.json new file mode 100644 index 000000000..1c6cb32da --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE_plus-M/subgraph_16/graph_net.json @@ -0,0 +1,6 @@ +{ + "framework": "paddle", + "model_name": "PP-YOLOE_plus-M", + "num_devices_required": 1, + "num_nodes_required": 1 +} \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus-M/subgraph_16/input_meta.py b/paddle_samples/PaddleX/PP-YOLOE_plus-M/subgraph_16/input_meta.py new file mode 100644 index 000000000..875b4336f --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE_plus-M/subgraph_16/input_meta.py @@ -0,0 +1,38 @@ +class Program_weight_tensor_data_0: + name = "data_0" + shape = [8, 1, 484] + dtype = "float32" + min_val = float("5.90699") + max_val = float("811.08") + mean = float("296.32") + std = float("131.267") + data = None + + +class Program_weight_tensor_data_1: + name = "data_1" + shape = [8, 1, 1936] + dtype = "float32" + min_val = float("1.87254") + max_val = float("822.389") + mean = float("296.473") + std = float("131.407") + data = None + + +class Program_weight_tensor_data_2: + name = "data_2" + shape = [8, 1, 7744] + dtype = "float32" + min_val = float("0.279916") + max_val = float("828.044") + mean = float("296.512") + std = float("131.442") + data = None + + +class Program_weight_tensor_data_3: + name = "data_3" + shape = [8, 1, 1] + dtype = "float32" + data = [1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0] diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus-M/subgraph_16/model.py b/paddle_samples/PaddleX/PP-YOLOE_plus-M/subgraph_16/model.py new file mode 100644 index 000000000..1ec00353c --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE_plus-M/subgraph_16/model.py @@ -0,0 +1,131 @@ +import paddle + + +class GraphModule(paddle.nn.Layer): + def __init__(self): + super().__init__() + + def forward(self, data_0, data_1, data_2, data_3): + # pd_op.full: (1xi32) <- () + full_0 = paddle._C_ops.full( + [1], float("9"), paddle.int32, paddle.core.CPUPlace() + ) + + # pd_op.topk: (8x1x9xf32, 8x1x9xi64) <- (8x1x484xf32, 1xi32) + topk_0, topk_1 = (lambda x, f: f(x))( + paddle._C_ops.topk(data_0, full_0, -1, False, True), + lambda out: out if isinstance(out, (list, tuple)) else (out, None), + ) + del data_0 + + # pd_op.full: (1xf32) <- () + full_1 = paddle._C_ops.full( + [1], float("1"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.scale: (8x1x9xi64) <- (8x1x9xi64, 1xf32) + scale_0 = paddle._C_ops.scale(topk_1, full_1, float("0"), True) + + # pd_op.full: (1xi32) <- () + full_2 = paddle._C_ops.full( + [1], float("484"), paddle.int32, paddle.core.CPUPlace() + ) + + # pd_op.one_hot: (8x1x9x484xf32) <- (8x1x9xi64, 1xi32) + one_hot_0 = paddle._C_ops.one_hot( + topk_1 % paddle.cast(full_2, topk_1.dtype), full_2 + ) + del full_2, topk_1 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_0 = [-2] + + # pd_op.sum: (8x1x484xf32) <- (8x1x9x484xf32, 1xi64) + sum_0 = paddle._C_ops.sum(one_hot_0, full_int_array_0, None, False) + del one_hot_0 + + # pd_op.multiply: (8x1x484xf32) <- (8x1x484xf32, 8x1x1xf32) + multiply_0 = paddle._C_ops.multiply(sum_0, data_3) + del sum_0 + + # pd_op.topk: (8x1x9xf32, 8x1x9xi64) <- (8x1x1936xf32, 1xi32) + topk_2, topk_3 = (lambda x, f: f(x))( + paddle._C_ops.topk(data_1, full_0, -1, False, True), + lambda out: out if isinstance(out, (list, tuple)) else (out, None), + ) + del data_1 + + # pd_op.scale: (8x1x9xi64) <- (8x1x9xi64, 1xf32) + scale_1 = paddle._C_ops.scale(topk_3, full_1, float("484"), True) + + # pd_op.full: (1xi32) <- () + full_3 = paddle._C_ops.full( + [1], float("1936"), paddle.int32, paddle.core.CPUPlace() + ) + + # pd_op.one_hot: (8x1x9x1936xf32) <- (8x1x9xi64, 1xi32) + one_hot_1 = paddle._C_ops.one_hot( + topk_3 % paddle.cast(full_3, topk_3.dtype), full_3 + ) + del full_3, topk_3 + + # pd_op.sum: (8x1x1936xf32) <- (8x1x9x1936xf32, 1xi64) + sum_1 = paddle._C_ops.sum(one_hot_1, full_int_array_0, None, False) + del one_hot_1 + + # pd_op.multiply: (8x1x1936xf32) <- (8x1x1936xf32, 8x1x1xf32) + multiply_1 = paddle._C_ops.multiply(sum_1, data_3) + del sum_1 + + # pd_op.topk: (8x1x9xf32, 8x1x9xi64) <- (8x1x7744xf32, 1xi32) + topk_4, topk_5 = (lambda x, f: f(x))( + paddle._C_ops.topk(data_2, full_0, -1, False, True), + lambda out: out if isinstance(out, (list, tuple)) else (out, None), + ) + del data_2, full_0 + + # pd_op.scale: (8x1x9xi64) <- (8x1x9xi64, 1xf32) + scale_2 = paddle._C_ops.scale(topk_5, full_1, float("2420"), True) + del full_1 + + # pd_op.full: (1xi32) <- () + full_4 = paddle._C_ops.full( + [1], float("7744"), paddle.int32, paddle.core.CPUPlace() + ) + + # pd_op.one_hot: (8x1x9x7744xf32) <- (8x1x9xi64, 1xi32) + one_hot_2 = paddle._C_ops.one_hot( + topk_5 % paddle.cast(full_4, topk_5.dtype), full_4 + ) + del full_4, topk_5 + + # pd_op.sum: (8x1x7744xf32) <- (8x1x9x7744xf32, 1xi64) + sum_2 = paddle._C_ops.sum(one_hot_2, full_int_array_0, None, False) + del full_int_array_0, one_hot_2 + + # pd_op.multiply: (8x1x7744xf32) <- (8x1x7744xf32, 8x1x1xf32) + multiply_2 = paddle._C_ops.multiply(sum_2, data_3) + del data_3, sum_2 + + # pd_op.full: (1xi32) <- () + full_5 = paddle._C_ops.full( + [1], float("-1"), paddle.int32, paddle.core.CPUPlace() + ) + + # builtin.combine: ([8x1x484xf32, 8x1x1936xf32, 8x1x7744xf32]) <- (8x1x484xf32, 8x1x1936xf32, 8x1x7744xf32) + combine_0 = [multiply_0, multiply_1, multiply_2] + del multiply_0, multiply_1, multiply_2 + + # pd_op.concat: (8x1x10164xf32) <- ([8x1x484xf32, 8x1x1936xf32, 8x1x7744xf32], 1xi32) + concat_0 = paddle._C_ops.concat(combine_0, full_5) + del combine_0 + + # builtin.combine: ([8x1x9xi64, 8x1x9xi64, 8x1x9xi64]) <- (8x1x9xi64, 8x1x9xi64, 8x1x9xi64) + combine_1 = [scale_0, scale_1, scale_2] + del scale_0, scale_1, scale_2 + + # pd_op.concat: (8x1x27xi64) <- ([8x1x9xi64, 8x1x9xi64, 8x1x9xi64], 1xi32) + concat_1 = paddle._C_ops.concat(combine_1, full_5) + del combine_1, full_5 + + return concat_0, concat_1 diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus-M/subgraph_16/weight_meta.py b/paddle_samples/PaddleX/PP-YOLOE_plus-M/subgraph_16/weight_meta.py new file mode 100644 index 000000000..8b1378917 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE_plus-M/subgraph_16/weight_meta.py @@ -0,0 +1 @@ + diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus-M/subgraph_17/graph_hash.txt b/paddle_samples/PaddleX/PP-YOLOE_plus-M/subgraph_17/graph_hash.txt new file mode 100644 index 000000000..06c2e2c81 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE_plus-M/subgraph_17/graph_hash.txt @@ -0,0 +1 @@ +e4523a4b80f6d91bda08e60737d8d55feb6499fbd5650e10a21e653f89b2a688 \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus-M/subgraph_17/graph_net.json b/paddle_samples/PaddleX/PP-YOLOE_plus-M/subgraph_17/graph_net.json new file mode 100644 index 000000000..1c6cb32da --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE_plus-M/subgraph_17/graph_net.json @@ -0,0 +1,6 @@ +{ + "framework": "paddle", + "model_name": "PP-YOLOE_plus-M", + "num_devices_required": 1, + "num_nodes_required": 1 +} \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus-M/subgraph_17/input_meta.py b/paddle_samples/PaddleX/PP-YOLOE_plus-M/subgraph_17/input_meta.py new file mode 100644 index 000000000..b50ace722 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE_plus-M/subgraph_17/input_meta.py @@ -0,0 +1,31 @@ +class Program_weight_tensor_data_0: + name = "data_0" + shape = [8, 10164, 68] + dtype = "float32" + min_val = float("-7.06467") + max_val = float("15.2111") + mean = float("2.71498e-05") + std = float("1.60341") + data = None + + +class Program_weight_tensor_data_1: + name = "data_1" + shape = [10164, 2] + dtype = "float32" + min_val = float("4.0") + max_val = float("700.0") + mean = float("352.0") + std = float("203.197") + data = None + + +class Program_weight_tensor_data_2: + name = "data_2" + shape = [10164, 1] + dtype = "float32" + min_val = float("8.0") + max_val = float("32.0") + mean = float("10.6667") + std = float("5.70157") + data = None diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus-M/subgraph_17/model.py b/paddle_samples/PaddleX/PP-YOLOE_plus-M/subgraph_17/model.py new file mode 100644 index 000000000..bc3dd4f0e --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE_plus-M/subgraph_17/model.py @@ -0,0 +1,158 @@ +import paddle + + +class GraphModule(paddle.nn.Layer): + def __init__(self): + super().__init__() + + def forward(self, parameter_0, data_0, data_1, data_2): + # pd_op.divide: (10164x2xf32) <- (10164x2xf32, 10164x1xf32) + divide_0 = paddle._C_ops.divide(data_1, data_2) + del data_1 + + # pd_op.shape64: (3xi64) <- (8x10164x68xf32) + shape64_0 = paddle._C_ops.shape64(data_0) + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_0 = [0] + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_1 = [1] + + # pd_op.assign: (1xi64) <- (1xi64) + assign_0 = full_int_array_1 + + # pd_op.slice: (xi64) <- (3xi64, 1xi64, 1xi64) + slice_0 = paddle._C_ops.slice( + shape64_0, [0], full_int_array_0, full_int_array_1, [1], [0] + ) + del full_int_array_0 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_2 = [2] + + # pd_op.slice: (xi64) <- (3xi64, 1xi64, 1xi64) + slice_1 = paddle._C_ops.slice( + shape64_0, [0], full_int_array_1, full_int_array_2, [1], [0] + ) + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_3 = [3] + + # pd_op.slice: (xi64) <- (3xi64, 1xi64, 1xi64) + slice_2 = paddle._C_ops.slice( + shape64_0, [0], full_int_array_2, full_int_array_3, [1], [0] + ) + del full_int_array_2, full_int_array_3, shape64_0 + + # pd_op.full: (xi64) <- () + full_0 = paddle._C_ops.full( + [], float("-1"), paddle.int64, paddle.core.CPUPlace() + ) + + # pd_op.full: (xi64) <- () + full_1 = paddle._C_ops.full( + [], float("4"), paddle.int64, paddle.core.CPUPlace() + ) + + # pd_op.full: (xi64) <- () + full_2 = paddle._C_ops.full( + [], float("17"), paddle.int64, paddle.core.CPUPlace() + ) + + # builtin.combine: ([xi64, xi64, xi64, xi64]) <- (xi64, xi64, xi64, xi64) + combine_0 = [full_0, slice_1, full_1, full_2] + del full_0, full_1, full_2, slice_1 + + # pd_op.stack: (4xi64) <- ([xi64, xi64, xi64, xi64]) + stack_0 = paddle._C_ops.stack(combine_0, 0) + del combine_0 + + # pd_op.reshape: (-1x-1x4x17xf32) <- (8x10164x68xf32, 4xi64) + reshape_0 = paddle._C_ops.reshape(data_0, stack_0) + del data_0, stack_0 + + # pd_op.softmax: (-1x-1x4x17xf32) <- (-1x-1x4x17xf32) + softmax_0 = paddle._C_ops.softmax(reshape_0, -1) + del reshape_0 + + # pd_op.transpose: (-1x17x-1x4xf32) <- (-1x-1x4x17xf32) + transpose_0 = paddle._C_ops.transpose(softmax_0, [0, 3, 1, 2]) + + # pd_op.conv2d: (-1x1x-1x4xf32) <- (-1x17x-1x4xf32, 1x17x1x1xf32) + conv2d_0 = paddle._C_ops.conv2d( + transpose_0, parameter_0, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_0 + + # pd_op.squeeze: (-1x-1x4xf32) <- (-1x1x-1x4xf32, 1xi64) + squeeze_0 = paddle._C_ops.squeeze(conv2d_0, full_int_array_1) + del full_int_array_1 + + # pd_op.full: (1xi32) <- () + full_3 = paddle._C_ops.full( + [1], float("2"), paddle.int32, paddle.core.CPUPlace() + ) + + # pd_op.split_with_num: ([-1x-1x2xf32, -1x-1x2xf32]) <- (-1x-1x4xf32, 1xi32) + split_with_num_0 = paddle._C_ops.split_with_num(squeeze_0, 2, full_3) + del squeeze_0 + + # builtin.split: (-1x-1x2xf32, -1x-1x2xf32) <- ([-1x-1x2xf32, -1x-1x2xf32]) + ( + split_0, + split_1, + ) = split_with_num_0 + del split_with_num_0 + + # pd_op.full: (1xf32) <- () + full_4 = paddle._C_ops.full( + [1], float("-1"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.scale: (-1x-1x2xf32) <- (-1x-1x2xf32, 1xf32) + scale_0 = paddle._C_ops.scale(split_0, full_4, float("0"), True) + del split_0 + + # pd_op.add: (-1x10164x2xf32) <- (-1x-1x2xf32, 10164x2xf32) + add_0 = paddle._C_ops.add(scale_0, divide_0) + + # pd_op.add: (-1x10164x2xf32) <- (-1x-1x2xf32, 10164x2xf32) + add_1 = paddle._C_ops.add(split_1, divide_0) + + # pd_op.full: (1xi32) <- () + full_5 = paddle._C_ops.full( + [1], float("-1"), paddle.int32, paddle.core.CPUPlace() + ) + + # builtin.combine: ([-1x10164x2xf32, -1x10164x2xf32]) <- (-1x10164x2xf32, -1x10164x2xf32) + combine_1 = [add_0, add_1] + + # pd_op.concat: (-1x10164x4xf32) <- ([-1x10164x2xf32, -1x10164x2xf32], 1xi32) + concat_0 = paddle._C_ops.concat(combine_1, full_5) + del combine_1 + + # pd_op.share_data_: (-1x10164x4xf32) <- (-1x10164x4xf32) + share_data__0 = concat_0.detach() + + # pd_op.multiply: (-1x10164x4xf32) <- (-1x10164x4xf32, 10164x1xf32) + multiply_0 = paddle._C_ops.multiply(share_data__0, data_2) + del ( + add_0, + add_1, + assign_0, + concat_0, + conv2d_0, + data_2, + divide_0, + full_3, + full_4, + full_5, + scale_0, + share_data__0, + softmax_0, + split_1, + transpose_0, + ) + + return multiply_0 diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus-M/subgraph_17/weight_meta.py b/paddle_samples/PaddleX/PP-YOLOE_plus-M/subgraph_17/weight_meta.py new file mode 100644 index 000000000..28198680e --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE_plus-M/subgraph_17/weight_meta.py @@ -0,0 +1,7 @@ +class Program_weight_tensor_parameter_0: + name = "parameter_0" + shape = [1, 17, 1, 1] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus-M/subgraph_2/graph_hash.txt b/paddle_samples/PaddleX/PP-YOLOE_plus-M/subgraph_2/graph_hash.txt new file mode 100644 index 000000000..2f9daab91 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE_plus-M/subgraph_2/graph_hash.txt @@ -0,0 +1 @@ +237f1666acd796c265f31ddd9bc78ab95e4da70095a07017ec39287d25ff30bd \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus-M/subgraph_2/graph_net.json b/paddle_samples/PaddleX/PP-YOLOE_plus-M/subgraph_2/graph_net.json new file mode 100644 index 000000000..1c6cb32da --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE_plus-M/subgraph_2/graph_net.json @@ -0,0 +1,6 @@ +{ + "framework": "paddle", + "model_name": "PP-YOLOE_plus-M", + "num_devices_required": 1, + "num_nodes_required": 1 +} \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus-M/subgraph_2/input_meta.py b/paddle_samples/PaddleX/PP-YOLOE_plus-M/subgraph_2/input_meta.py new file mode 100644 index 000000000..42b68473c --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE_plus-M/subgraph_2/input_meta.py @@ -0,0 +1,68 @@ +class Program_weight_tensor_data_0: + name = "data_0" + shape = [8, 10164] + dtype = "bool" + min_val = 0 + max_val = 2 + data = None + + +class Program_weight_tensor_data_1: + name = "data_1" + shape = [8, 10164, 4] + dtype = "float32" + min_val = float("-8.4998") + max_val = float("93.297") + mean = float("38.2803") + std = float("25.8039") + data = None + + +class Program_weight_tensor_data_2: + name = "data_2" + shape = [8, 10164, 4] + dtype = "float32" + min_val = float("2.52804") + max_val = float("79.2") + mean = float("34.6196") + std = float("18.1525") + data = None + + +class Program_weight_tensor_data_3: + name = "data_3" + shape = [8, 10164, 4] + dtype = "float32" + max_val = float("0.941962") + mean = float("0.000134148") + std = float("0.0107789") + data = None + + +class Program_weight_tensor_data_4: + name = "data_4" + shape = [] + dtype = "float32" + data = [43.6314] + + +class Program_weight_tensor_data_5: + name = "data_5" + shape = [8, 10164, 68] + dtype = "float32" + min_val = float("-7.06467") + max_val = float("15.2111") + mean = float("2.71498e-05") + std = float("1.60341") + data = None + + +class Program_weight_tensor_data_6: + name = "data_6" + shape = [10164, 2] + dtype = "float32" + min_val = float("0.5") + max_val = float("87.5") + mean = float("38.2381") + std = float("25.2012") + data = None diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus-M/subgraph_2/model.py b/paddle_samples/PaddleX/PP-YOLOE_plus-M/subgraph_2/model.py new file mode 100644 index 000000000..adbc73554 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE_plus-M/subgraph_2/model.py @@ -0,0 +1,509 @@ +import paddle + + +class GraphModule(paddle.nn.Layer): + def __init__(self): + super().__init__() + + def forward(self, data_0, data_1, data_2, data_3, data_4, data_5, data_6): + # pd_op.cast: (8x10164xi32) <- (8x10164xb) + cast_0 = paddle._C_ops.cast(data_0, paddle.int32) + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_0 = [-1] + + # pd_op.assign: (1xi64) <- (1xi64) + assign_0 = full_int_array_0 + + # pd_op.assign: (1xi64) <- (1xi64) + assign_1 = full_int_array_0 + + # pd_op.assign: (1xi64) <- (1xi64) + assign_2 = full_int_array_0 + + # pd_op.unsqueeze: (8x10164x1xi32) <- (8x10164xi32, 1xi64) + unsqueeze_0 = paddle._C_ops.unsqueeze(cast_0, full_int_array_0) + del cast_0 + + # pd_op.full_int_array: (3xi64) <- () + full_int_array_1 = [1, 1, 4] + + # pd_op.tile: (8x10164x4xi32) <- (8x10164x1xi32, 3xi64) + tile_0 = paddle._C_ops.tile(unsqueeze_0, full_int_array_1) + del full_int_array_1, unsqueeze_0 + + # pd_op.cast: (8x10164x4xb) <- (8x10164x4xi32) + cast_1 = paddle._C_ops.cast(tile_0, paddle.bool) + del tile_0 + + # pd_op.masked_select: (-1xf32) <- (8x10164x4xf32, 8x10164x4xb) + masked_select_0 = paddle._C_ops.masked_select(data_1, cast_1) + del data_1 + + # pd_op.full_int_array: (2xi64) <- () + full_int_array_2 = [-1, 4] + + # pd_op.reshape: (-1x4xf32) <- (-1xf32, 2xi64) + reshape_0 = paddle._C_ops.reshape(masked_select_0, full_int_array_2) + + # pd_op.masked_select: (-1xf32) <- (8x10164x4xf32, 8x10164x4xb) + masked_select_1 = paddle._C_ops.masked_select(data_2, cast_1) + + # pd_op.reshape: (-1x4xf32) <- (-1xf32, 2xi64) + reshape_1 = paddle._C_ops.reshape(masked_select_1, full_int_array_2) + del masked_select_1 + + # pd_op.sum: (8x10164xf32) <- (8x10164x4xf32, 1xi64) + sum_0 = paddle._C_ops.sum(data_3, full_int_array_0, None, False) + del data_3 + + # pd_op.masked_select: (-1xf32) <- (8x10164xf32, 8x10164xb) + masked_select_2 = paddle._C_ops.masked_select(sum_0, data_0) + del sum_0 + + # pd_op.unsqueeze: (-1x1xf32) <- (-1xf32, 1xi64) + unsqueeze_1 = paddle._C_ops.unsqueeze(masked_select_2, full_int_array_0) + del masked_select_2 + + # pd_op.subtract: (-1x4xf32) <- (-1x4xf32, -1x4xf32) + subtract_0 = paddle._C_ops.subtract(reshape_0, reshape_1) + + # pd_op.abs: (-1x4xf32) <- (-1x4xf32) + abs_0 = paddle._C_ops.abs(subtract_0) + + # pd_op.mean_all: (xf32) <- (-1x4xf32) + mean_all_0 = paddle._C_ops.mean_all(abs_0) + + # pd_op.full: (1xi32) <- () + full_0 = paddle._C_ops.full( + [1], float("1"), paddle.int32, paddle.core.CPUPlace() + ) + + # pd_op.split_with_num: ([-1x1xf32, -1x1xf32, -1x1xf32, -1x1xf32]) <- (-1x4xf32, 1xi32) + split_with_num_0 = paddle._C_ops.split_with_num(reshape_0, 4, full_0) + + # builtin.split: (-1x1xf32, -1x1xf32, -1x1xf32, -1x1xf32) <- ([-1x1xf32, -1x1xf32, -1x1xf32, -1x1xf32]) + ( + split_0, + split_1, + split_2, + split_3, + ) = split_with_num_0 + del split_with_num_0 + + # pd_op.split_with_num: ([-1x1xf32, -1x1xf32, -1x1xf32, -1x1xf32]) <- (-1x4xf32, 1xi32) + split_with_num_1 = paddle._C_ops.split_with_num(reshape_1, 4, full_0) + + # builtin.split: (-1x1xf32, -1x1xf32, -1x1xf32, -1x1xf32) <- ([-1x1xf32, -1x1xf32, -1x1xf32, -1x1xf32]) + ( + split_4, + split_5, + split_6, + split_7, + ) = split_with_num_1 + del split_with_num_1 + + # pd_op.maximum: (-1x1xf32) <- (-1x1xf32, -1x1xf32) + maximum_0 = paddle._C_ops.maximum(split_0, split_4) + + # pd_op.maximum: (-1x1xf32) <- (-1x1xf32, -1x1xf32) + maximum_1 = paddle._C_ops.maximum(split_1, split_5) + + # pd_op.minimum: (-1x1xf32) <- (-1x1xf32, -1x1xf32) + minimum_0 = paddle._C_ops.minimum(split_2, split_6) + + # pd_op.minimum: (-1x1xf32) <- (-1x1xf32, -1x1xf32) + minimum_1 = paddle._C_ops.minimum(split_3, split_7) + + # pd_op.subtract: (-1x1xf32) <- (-1x1xf32, -1x1xf32) + subtract_1 = paddle._C_ops.subtract(minimum_0, maximum_0) + + # pd_op.full: (1xf32) <- () + full_1 = paddle._C_ops.full( + [1], float("0"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.assign: (1xf32) <- (1xf32) + assign_3 = full_1 + + # pd_op.full: (1xf32) <- () + full_2 = paddle._C_ops.full( + [1], float("3.40282e+38"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.assign: (1xf32) <- (1xf32) + assign_4 = full_2 + + # pd_op.clip: (-1x1xf32) <- (-1x1xf32, 1xf32, 1xf32) + clip_0 = paddle._C_ops.clip(subtract_1, full_1, full_2) + + # pd_op.subtract: (-1x1xf32) <- (-1x1xf32, -1x1xf32) + subtract_2 = paddle._C_ops.subtract(minimum_1, maximum_1) + + # pd_op.clip: (-1x1xf32) <- (-1x1xf32, 1xf32, 1xf32) + clip_1 = paddle._C_ops.clip(subtract_2, full_1, full_2) + + # pd_op.multiply: (-1x1xf32) <- (-1x1xf32, -1x1xf32) + multiply_0 = paddle._C_ops.multiply(clip_0, clip_1) + + # pd_op.subtract: (-1x1xf32) <- (-1x1xf32, -1x1xf32) + subtract_3 = paddle._C_ops.subtract(split_2, split_0) + + # pd_op.subtract: (-1x1xf32) <- (-1x1xf32, -1x1xf32) + subtract_4 = paddle._C_ops.subtract(split_3, split_1) + + # pd_op.multiply: (-1x1xf32) <- (-1x1xf32, -1x1xf32) + multiply_1 = paddle._C_ops.multiply(subtract_3, subtract_4) + + # pd_op.subtract: (-1x1xf32) <- (-1x1xf32, -1x1xf32) + subtract_5 = paddle._C_ops.subtract(split_6, split_4) + + # pd_op.subtract: (-1x1xf32) <- (-1x1xf32, -1x1xf32) + subtract_6 = paddle._C_ops.subtract(split_7, split_5) + + # pd_op.multiply: (-1x1xf32) <- (-1x1xf32, -1x1xf32) + multiply_2 = paddle._C_ops.multiply(subtract_5, subtract_6) + del subtract_5, subtract_6 + + # pd_op.add: (-1x1xf32) <- (-1x1xf32, -1x1xf32) + add_0 = paddle._C_ops.add(multiply_1, multiply_2) + + # pd_op.subtract: (-1x1xf32) <- (-1x1xf32, -1x1xf32) + subtract_7 = paddle._C_ops.subtract(add_0, multiply_0) + + # pd_op.full: (1xf32) <- () + full_3 = paddle._C_ops.full( + [1], float("1"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.assign: (1xf32) <- (1xf32) + assign_5 = full_3 + + # pd_op.assign: (1xf32) <- (1xf32) + assign_6 = full_3 + + # pd_op.scale: (-1x1xf32) <- (-1x1xf32, 1xf32) + scale_0 = paddle._C_ops.scale(subtract_7, full_3, float("1e-10"), True) + del subtract_7 + + # pd_op.divide: (-1x1xf32) <- (-1x1xf32, -1x1xf32) + divide_2 = paddle._C_ops.divide(multiply_0, scale_0) + + # pd_op.minimum: (-1x1xf32) <- (-1x1xf32, -1x1xf32) + minimum_2 = paddle._C_ops.minimum(split_0, split_4) + + # pd_op.minimum: (-1x1xf32) <- (-1x1xf32, -1x1xf32) + minimum_3 = paddle._C_ops.minimum(split_1, split_5) + + # pd_op.maximum: (-1x1xf32) <- (-1x1xf32, -1x1xf32) + maximum_2 = paddle._C_ops.maximum(split_2, split_6) + + # pd_op.maximum: (-1x1xf32) <- (-1x1xf32, -1x1xf32) + maximum_3 = paddle._C_ops.maximum(split_3, split_7) + + # pd_op.subtract: (-1x1xf32) <- (-1x1xf32, -1x1xf32) + subtract_8 = paddle._C_ops.subtract(maximum_2, minimum_2) + + # pd_op.subtract: (-1x1xf32) <- (-1x1xf32, -1x1xf32) + subtract_9 = paddle._C_ops.subtract(maximum_3, minimum_3) + + # pd_op.multiply: (-1x1xf32) <- (-1x1xf32, -1x1xf32) + multiply_3 = paddle._C_ops.multiply(subtract_8, subtract_9) + + # pd_op.scale: (-1x1xf32) <- (-1x1xf32, 1xf32) + scale_1 = paddle._C_ops.scale(multiply_3, full_3, float("1e-10"), True) + del multiply_3 + + # pd_op.subtract: (-1x1xf32) <- (-1x1xf32, -1x1xf32) + subtract_10 = paddle._C_ops.subtract(scale_1, scale_0) + + # pd_op.divide: (-1x1xf32) <- (-1x1xf32, -1x1xf32) + divide_3 = paddle._C_ops.divide(subtract_10, scale_1) + + # pd_op.subtract: (-1x1xf32) <- (-1x1xf32, -1x1xf32) + subtract_11 = paddle._C_ops.subtract(divide_2, divide_3) + + # pd_op.full: (1xf32) <- () + full_4 = paddle._C_ops.full( + [1], float("-1"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.scale: (-1x1xf32) <- (-1x1xf32, 1xf32) + scale_2 = paddle._C_ops.scale(subtract_11, full_4, float("1"), True) + del subtract_11 + + # pd_op.scale: (-1x1xf32) <- (-1x1xf32, 1xf32) + scale_3 = paddle._C_ops.scale(scale_2, full_3, float("0"), True) + del scale_2 + + # pd_op.multiply: (-1x1xf32) <- (-1x1xf32, -1x1xf32) + multiply_4 = paddle._C_ops.multiply(scale_3, unsqueeze_1) + + # pd_op.full_int_array: (0xi64) <- () + full_int_array_3 = [] + + # pd_op.assign: (0xi64) <- (0xi64) + assign_7 = full_int_array_3 + + # pd_op.sum: (xf32) <- (-1x1xf32, 0xi64) + sum_1 = paddle._C_ops.sum(multiply_4, full_int_array_3, None, False) + + # pd_op.divide: (xf32) <- (xf32, xf32) + divide_0 = paddle._C_ops.divide(sum_1, data_4) + + # pd_op.unsqueeze: (8x10164x1xb) <- (8x10164xb, 1xi64) + unsqueeze_2 = paddle._C_ops.unsqueeze(data_0, full_int_array_0) + del data_0 + + # pd_op.cast: (8x10164x1xi32) <- (8x10164x1xb) + cast_2 = paddle._C_ops.cast(unsqueeze_2, paddle.int32) + del unsqueeze_2 + + # pd_op.full_int_array: (3xi64) <- () + full_int_array_4 = [1, 1, 68] + + # pd_op.tile: (8x10164x68xi32) <- (8x10164x1xi32, 3xi64) + tile_1 = paddle._C_ops.tile(cast_2, full_int_array_4) + del cast_2, full_int_array_4 + + # pd_op.cast: (8x10164x68xb) <- (8x10164x68xi32) + cast_3 = paddle._C_ops.cast(tile_1, paddle.bool) + del tile_1 + + # pd_op.masked_select: (-1xf32) <- (8x10164x68xf32, 8x10164x68xb) + masked_select_3 = paddle._C_ops.masked_select(data_5, cast_3) + del data_5 + + # pd_op.full_int_array: (3xi64) <- () + full_int_array_5 = [-1, 4, 17] + + # pd_op.reshape: (-1x4x17xf32) <- (-1xf32, 3xi64) + reshape_2 = paddle._C_ops.reshape(masked_select_3, full_int_array_5) + del full_int_array_5 + + # pd_op.full: (1xi32) <- () + full_5 = paddle._C_ops.full( + [1], float("2"), paddle.int32, paddle.core.CPUPlace() + ) + + # pd_op.split_with_num: ([8x10164x2xf32, 8x10164x2xf32]) <- (8x10164x4xf32, 1xi32) + split_with_num_2 = paddle._C_ops.split_with_num(data_2, 2, full_5) + del data_2, full_5 + + # builtin.split: (8x10164x2xf32, 8x10164x2xf32) <- ([8x10164x2xf32, 8x10164x2xf32]) + ( + split_8, + split_9, + ) = split_with_num_2 + del split_with_num_2 + + # pd_op.subtract: (8x10164x2xf32) <- (10164x2xf32, 8x10164x2xf32) + subtract_12 = paddle._C_ops.subtract(data_6, split_8) + del split_8 + + # pd_op.subtract: (8x10164x2xf32) <- (8x10164x2xf32, 10164x2xf32) + subtract_13 = paddle._C_ops.subtract(split_9, data_6) + del data_6, split_9 + + # pd_op.full: (1xi32) <- () + full_6 = paddle._C_ops.full( + [1], float("-1"), paddle.int32, paddle.core.CPUPlace() + ) + + # builtin.combine: ([8x10164x2xf32, 8x10164x2xf32]) <- (8x10164x2xf32, 8x10164x2xf32) + combine_0 = [subtract_12, subtract_13] + del subtract_12, subtract_13 + + # pd_op.concat: (8x10164x4xf32) <- ([8x10164x2xf32, 8x10164x2xf32], 1xi32) + concat_0 = paddle._C_ops.concat(combine_0, full_6) + del combine_0, full_6 + + # pd_op.full: (1xf32) <- () + full_7 = paddle._C_ops.full( + [1], float("15.99"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.clip: (8x10164x4xf32) <- (8x10164x4xf32, 1xf32, 1xf32) + clip_2 = paddle._C_ops.clip(concat_0, full_1, full_7) + del concat_0, full_7 + + # pd_op.masked_select: (-1xf32) <- (8x10164x4xf32, 8x10164x4xb) + masked_select_4 = paddle._C_ops.masked_select(clip_2, cast_1) + del clip_2 + + # pd_op.reshape: (-1x4xf32) <- (-1xf32, 2xi64) + reshape_3 = paddle._C_ops.reshape(masked_select_4, full_int_array_2) + del full_int_array_2, masked_select_4 + + # pd_op.floor: (-1x4xf32) <- (-1x4xf32) + floor_0 = paddle._C_ops.floor(reshape_3) + + # pd_op.cast: (-1x4xi64) <- (-1x4xf32) + cast_4 = paddle._C_ops.cast(floor_0, paddle.int64) + del floor_0 + + # pd_op.scale: (-1x4xi64) <- (-1x4xi64, 1xf32) + scale_4 = paddle._C_ops.scale(cast_4, full_3, float("1"), True) + + # pd_op.cast: (-1x4xf32) <- (-1x4xi64) + cast_5 = paddle._C_ops.cast(scale_4, paddle.float32) + + # pd_op.subtract: (-1x4xf32) <- (-1x4xf32, -1x4xf32) + subtract_14 = paddle._C_ops.subtract(cast_5, reshape_3) + del cast_5, reshape_3 + + # pd_op.scale: (-1x4xf32) <- (-1x4xf32, 1xf32) + scale_5 = paddle._C_ops.scale(subtract_14, full_4, float("1"), True) + + # pd_op.scale: (-1x4xi64) <- (-1x4xi64, 1xf32) + scale_6 = paddle._C_ops.scale(cast_4, full_3, float("0"), True) + del cast_4 + + # pd_op.unsqueeze: (-1x4x1xi64) <- (-1x4xi64, 1xi64) + unsqueeze_3 = paddle._C_ops.unsqueeze(scale_6, full_int_array_0) + del scale_6 + + # pd_op.cross_entropy_with_softmax: (-1x4x17xf32, -1x4x1xf32) <- (-1x4x17xf32, -1x4x1xi64) + cross_entropy_with_softmax_0, cross_entropy_with_softmax_2 = ( + lambda x, f: f(x) + )( + paddle._C_ops.cross_entropy_with_softmax( + reshape_2, unsqueeze_3, False, True, True, -100, -1 + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None), + ) + + # pd_op.squeeze: (-1x4xf32) <- (-1x4x1xf32, 1xi64) + squeeze_0 = paddle._C_ops.squeeze( + cross_entropy_with_softmax_2, full_int_array_0 + ) + + # pd_op.multiply: (-1x4xf32) <- (-1x4xf32, -1x4xf32) + multiply_5 = paddle._C_ops.multiply(squeeze_0, subtract_14) + + # pd_op.scale: (-1x4xi64) <- (-1x4xi64, 1xf32) + scale_7 = paddle._C_ops.scale(scale_4, full_3, float("0"), True) + del scale_4 + + # pd_op.unsqueeze: (-1x4x1xi64) <- (-1x4xi64, 1xi64) + unsqueeze_4 = paddle._C_ops.unsqueeze(scale_7, full_int_array_0) + del scale_7 + + # pd_op.cross_entropy_with_softmax: (-1x4x17xf32, -1x4x1xf32) <- (-1x4x17xf32, -1x4x1xi64) + cross_entropy_with_softmax_1, cross_entropy_with_softmax_3 = ( + lambda x, f: f(x) + )( + paddle._C_ops.cross_entropy_with_softmax( + reshape_2, unsqueeze_4, False, True, True, -100, -1 + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None), + ) + del reshape_2 + + # pd_op.squeeze: (-1x4xf32) <- (-1x4x1xf32, 1xi64) + squeeze_1 = paddle._C_ops.squeeze( + cross_entropy_with_softmax_3, full_int_array_0 + ) + + # pd_op.multiply: (-1x4xf32) <- (-1x4xf32, -1x4xf32) + multiply_6 = paddle._C_ops.multiply(squeeze_1, scale_5) + + # pd_op.add: (-1x4xf32) <- (-1x4xf32, -1x4xf32) + add_1 = paddle._C_ops.add(multiply_5, multiply_6) + + # pd_op.mean: (-1x1xf32) <- (-1x4xf32, 1xi64) + mean_0 = paddle._C_ops.mean(add_1, full_int_array_0, True) + del full_int_array_0 + + # pd_op.multiply: (-1x1xf32) <- (-1x1xf32, -1x1xf32) + multiply_7 = paddle._C_ops.multiply(mean_0, unsqueeze_1) + + # pd_op.sum: (xf32) <- (-1x1xf32, 0xi64) + sum_2 = paddle._C_ops.sum(multiply_7, full_int_array_3, None, False) + + # pd_op.divide: (xf32) <- (xf32, xf32) + divide_1 = paddle._C_ops.divide(sum_2, data_4) + del ( + abs_0, + add_0, + add_1, + assign_0, + assign_1, + assign_2, + assign_3, + assign_4, + assign_5, + assign_6, + assign_7, + cast_1, + cast_3, + clip_0, + clip_1, + cross_entropy_with_softmax_2, + cross_entropy_with_softmax_3, + data_4, + divide_2, + divide_3, + full_0, + full_1, + full_2, + full_3, + full_4, + full_int_array_3, + masked_select_0, + masked_select_3, + maximum_0, + maximum_1, + maximum_2, + maximum_3, + mean_0, + minimum_0, + minimum_1, + minimum_2, + minimum_3, + multiply_0, + multiply_1, + multiply_2, + multiply_4, + multiply_5, + multiply_6, + multiply_7, + reshape_0, + reshape_1, + scale_0, + scale_1, + scale_3, + scale_5, + split_0, + split_1, + split_2, + split_3, + split_4, + split_5, + split_6, + split_7, + squeeze_0, + squeeze_1, + subtract_0, + subtract_1, + subtract_10, + subtract_14, + subtract_2, + subtract_3, + subtract_4, + subtract_8, + subtract_9, + sum_1, + sum_2, + unsqueeze_1, + unsqueeze_3, + unsqueeze_4, + ) + + return ( + cross_entropy_with_softmax_0, + cross_entropy_with_softmax_1, + mean_all_0, + divide_0, + divide_1, + ) diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus-M/subgraph_2/weight_meta.py b/paddle_samples/PaddleX/PP-YOLOE_plus-M/subgraph_2/weight_meta.py new file mode 100644 index 000000000..8b1378917 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE_plus-M/subgraph_2/weight_meta.py @@ -0,0 +1 @@ + diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus-M/subgraph_3/graph_hash.txt b/paddle_samples/PaddleX/PP-YOLOE_plus-M/subgraph_3/graph_hash.txt new file mode 100644 index 000000000..504c9bb0f --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE_plus-M/subgraph_3/graph_hash.txt @@ -0,0 +1 @@ +ecf678dd2052f1907293d06bf3fd26d9af9924bd94292392430215512db3f6b4 \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus-M/subgraph_3/graph_net.json b/paddle_samples/PaddleX/PP-YOLOE_plus-M/subgraph_3/graph_net.json new file mode 100644 index 000000000..1c6cb32da --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE_plus-M/subgraph_3/graph_net.json @@ -0,0 +1,6 @@ +{ + "framework": "paddle", + "model_name": "PP-YOLOE_plus-M", + "num_devices_required": 1, + "num_nodes_required": 1 +} \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus-M/subgraph_3/input_meta.py b/paddle_samples/PaddleX/PP-YOLOE_plus-M/subgraph_3/input_meta.py new file mode 100644 index 000000000..d835b4269 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE_plus-M/subgraph_3/input_meta.py @@ -0,0 +1,49 @@ +class Program_weight_tensor_data_0: + name = "data_0" + shape = [10164, 4] + dtype = "float32" + min_val = float("-64.0") + max_val = float("768.0") + mean = float("352.0") + std = float("205.435") + data = None + + +class Program_weight_tensor_data_1: + name = "data_1" + shape = [8, 1, 4] + dtype = "float32" + data = [ + 174.715, + 140.8, + 405.956, + 457.6, + 375.985, + 345.193, + 411.639, + 372.906, + 317.49, + 292.0, + 450.008, + 388.0, + 287.439, + 452.211, + 340.211, + 490.947, + 352.0, + 296.267, + 584.17, + 384.267, + 222.933, + 194.723, + 332.444, + 275.609, + 80.8974, + 117.694, + 116.531, + 143.688, + 124.847, + 201.813, + 433.498, + 633.6, + ] diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus-M/subgraph_3/model.py b/paddle_samples/PaddleX/PP-YOLOE_plus-M/subgraph_3/model.py new file mode 100644 index 000000000..d9f7c4e5e --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE_plus-M/subgraph_3/model.py @@ -0,0 +1,263 @@ +import paddle + + +class GraphModule(paddle.nn.Layer): + def __init__(self): + super().__init__() + + def forward(self, data_0, data_1): + # pd_op.full_int_array: (2xi64) <- () + full_int_array_0 = [-1, 4] + + # pd_op.reshape: (8x4xf32) <- (8x1x4xf32, 2xi64) + reshape_2 = paddle._C_ops.reshape(data_1, full_int_array_0) + del data_1, full_int_array_0 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_1 = [1] + + # pd_op.unsqueeze: (8x1x4xf32) <- (8x4xf32, 1xi64) + unsqueeze_0 = paddle._C_ops.unsqueeze(reshape_2, full_int_array_1) + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_2 = [0] + + # pd_op.unsqueeze: (1x10164x4xf32) <- (10164x4xf32, 1xi64) + unsqueeze_1 = paddle._C_ops.unsqueeze(data_0, full_int_array_2) + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_3 = [2] + + # pd_op.slice: (8x1x2xf32) <- (8x1x4xf32, 1xi64, 1xi64) + slice_0 = paddle._C_ops.slice( + unsqueeze_0, [2], full_int_array_2, full_int_array_3, [1], [] + ) + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_4 = [2147483647] + + # pd_op.slice: (8x1x2xf32) <- (8x1x4xf32, 1xi64, 1xi64) + slice_1 = paddle._C_ops.slice( + unsqueeze_0, [2], full_int_array_3, full_int_array_4, [1], [] + ) + del unsqueeze_0 + + # pd_op.slice: (1x10164x2xf32) <- (1x10164x4xf32, 1xi64, 1xi64) + slice_2 = paddle._C_ops.slice( + unsqueeze_1, [2], full_int_array_2, full_int_array_3, [1], [] + ) + + # pd_op.slice: (1x10164x2xf32) <- (1x10164x4xf32, 1xi64, 1xi64) + slice_3 = paddle._C_ops.slice( + unsqueeze_1, [2], full_int_array_3, full_int_array_4, [1], [] + ) + del full_int_array_4, unsqueeze_1 + + # pd_op.maximum: (8x10164x2xf32) <- (8x1x2xf32, 1x10164x2xf32) + maximum_0 = paddle._C_ops.maximum(slice_0, slice_2) + + # pd_op.minimum: (8x10164x2xf32) <- (8x1x2xf32, 1x10164x2xf32) + minimum_0 = paddle._C_ops.minimum(slice_1, slice_3) + + # pd_op.subtract: (8x10164x2xf32) <- (8x10164x2xf32, 8x10164x2xf32) + subtract_0 = paddle._C_ops.subtract(minimum_0, maximum_0) + del maximum_0, minimum_0 + + # pd_op.full: (1xf32) <- () + full_0 = paddle._C_ops.full( + [1], float("0"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.full: (1xf32) <- () + full_1 = paddle._C_ops.full( + [1], float("3.40282e+38"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.clip: (8x10164x2xf32) <- (8x10164x2xf32, 1xf32, 1xf32) + clip_0 = paddle._C_ops.clip(subtract_0, full_0, full_1) + del subtract_0 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_5 = [-1] + + # pd_op.prod: (8x10164xf32) <- (8x10164x2xf32, 1xi64) + prod_0 = paddle._C_ops.prod(clip_0, full_int_array_5, False, False) + del clip_0 + + # pd_op.subtract: (8x1x2xf32) <- (8x1x2xf32, 8x1x2xf32) + subtract_1 = paddle._C_ops.subtract(slice_1, slice_0) + del slice_0, slice_1 + + # pd_op.clip: (8x1x2xf32) <- (8x1x2xf32, 1xf32, 1xf32) + clip_1 = paddle._C_ops.clip(subtract_1, full_0, full_1) + del subtract_1 + + # pd_op.prod: (8x1xf32) <- (8x1x2xf32, 1xi64) + prod_1 = paddle._C_ops.prod(clip_1, full_int_array_5, False, False) + del clip_1 + + # pd_op.subtract: (1x10164x2xf32) <- (1x10164x2xf32, 1x10164x2xf32) + subtract_2 = paddle._C_ops.subtract(slice_3, slice_2) + del slice_2, slice_3 + + # pd_op.clip: (1x10164x2xf32) <- (1x10164x2xf32, 1xf32, 1xf32) + clip_2 = paddle._C_ops.clip(subtract_2, full_0, full_1) + del full_0, full_1, subtract_2 + + # pd_op.prod: (1x10164xf32) <- (1x10164x2xf32, 1xi64) + prod_2 = paddle._C_ops.prod(clip_2, full_int_array_5, False, False) + del clip_2, full_int_array_5 + + # pd_op.add: (8x10164xf32) <- (8x1xf32, 1x10164xf32) + add_0 = paddle._C_ops.add(prod_1, prod_2) + del prod_1, prod_2 + + # pd_op.subtract: (8x10164xf32) <- (8x10164xf32, 8x10164xf32) + subtract_3 = paddle._C_ops.subtract(add_0, prod_0) + del add_0 + + # pd_op.full: (1xf32) <- () + full_2 = paddle._C_ops.full( + [1], float("1"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.scale: (8x10164xf32) <- (8x10164xf32, 1xf32) + scale_0 = paddle._C_ops.scale(subtract_3, full_2, float("1e-10"), True) + del full_2, subtract_3 + + # pd_op.divide: (8x10164xf32) <- (8x10164xf32, 8x10164xf32) + divide_0 = paddle._C_ops.divide(prod_0, scale_0) + del prod_0, scale_0 + + # pd_op.full_int_array: (3xi64) <- () + full_int_array_6 = [8, -1, 10164] + + # pd_op.reshape: (8x1x10164xf32) <- (8x10164xf32, 3xi64) + reshape_1 = paddle._C_ops.reshape(divide_0, full_int_array_6) + del divide_0 + + # pd_op.slice: (8xf32) <- (8x4xf32, 1xi64, 1xi64) + slice_4 = paddle._C_ops.slice( + reshape_2, [1], full_int_array_2, full_int_array_1, [1], [1] + ) + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_7 = [3] + + # pd_op.slice: (8xf32) <- (8x4xf32, 1xi64, 1xi64) + slice_5 = paddle._C_ops.slice( + reshape_2, [1], full_int_array_3, full_int_array_7, [1], [1] + ) + + # pd_op.add: (8xf32) <- (8xf32, 8xf32) + add_1 = paddle._C_ops.add(slice_4, slice_5) + del slice_4, slice_5 + + # pd_op.full: (1xf32) <- () + full_3 = paddle._C_ops.full( + [1], float("0.5"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.scale: (8xf32) <- (8xf32, 1xf32) + scale_1 = paddle._C_ops.scale(add_1, full_3, float("0"), True) + del add_1 + + # pd_op.slice: (8xf32) <- (8x4xf32, 1xi64, 1xi64) + slice_6 = paddle._C_ops.slice( + reshape_2, [1], full_int_array_1, full_int_array_3, [1], [1] + ) + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_8 = [4] + + # pd_op.slice: (8xf32) <- (8x4xf32, 1xi64, 1xi64) + slice_7 = paddle._C_ops.slice( + reshape_2, [1], full_int_array_7, full_int_array_8, [1], [1] + ) + del reshape_2 + + # pd_op.add: (8xf32) <- (8xf32, 8xf32) + add_2 = paddle._C_ops.add(slice_6, slice_7) + del slice_6, slice_7 + + # pd_op.scale: (8xf32) <- (8xf32, 1xf32) + scale_2 = paddle._C_ops.scale(add_2, full_3, float("0"), True) + del add_2 + + # builtin.combine: ([8xf32, 8xf32]) <- (8xf32, 8xf32) + combine_0 = [scale_1, scale_2] + del scale_1, scale_2 + + # pd_op.stack: (8x2xf32) <- ([8xf32, 8xf32]) + stack_0 = paddle._C_ops.stack(combine_0, -1) + del combine_0 + + # pd_op.unsqueeze: (8x1x2xf32) <- (8x2xf32, 1xi64) + unsqueeze_2 = paddle._C_ops.unsqueeze(stack_0, full_int_array_1) + del stack_0 + + # pd_op.slice: (10164xf32) <- (10164x4xf32, 1xi64, 1xi64) + slice_8 = paddle._C_ops.slice( + data_0, [1], full_int_array_2, full_int_array_1, [1], [1] + ) + + # pd_op.slice: (10164xf32) <- (10164x4xf32, 1xi64, 1xi64) + slice_9 = paddle._C_ops.slice( + data_0, [1], full_int_array_3, full_int_array_7, [1], [1] + ) + + # pd_op.add: (10164xf32) <- (10164xf32, 10164xf32) + add_3 = paddle._C_ops.add(slice_8, slice_9) + del slice_8, slice_9 + + # pd_op.scale: (10164xf32) <- (10164xf32, 1xf32) + scale_3 = paddle._C_ops.scale(add_3, full_3, float("0"), True) + del add_3 + + # pd_op.slice: (10164xf32) <- (10164x4xf32, 1xi64, 1xi64) + slice_10 = paddle._C_ops.slice( + data_0, [1], full_int_array_1, full_int_array_3, [1], [1] + ) + del full_int_array_1, full_int_array_3 + + # pd_op.slice: (10164xf32) <- (10164x4xf32, 1xi64, 1xi64) + slice_11 = paddle._C_ops.slice( + data_0, [1], full_int_array_7, full_int_array_8, [1], [1] + ) + del data_0, full_int_array_7, full_int_array_8 + + # pd_op.add: (10164xf32) <- (10164xf32, 10164xf32) + add_4 = paddle._C_ops.add(slice_10, slice_11) + del slice_10, slice_11 + + # pd_op.scale: (10164xf32) <- (10164xf32, 1xf32) + scale_4 = paddle._C_ops.scale(add_4, full_3, float("0"), True) + del add_4, full_3 + + # builtin.combine: ([10164xf32, 10164xf32]) <- (10164xf32, 10164xf32) + combine_1 = [scale_3, scale_4] + del scale_3, scale_4 + + # pd_op.stack: (10164x2xf32) <- ([10164xf32, 10164xf32]) + stack_1 = paddle._C_ops.stack(combine_1, -1) + del combine_1 + + # pd_op.unsqueeze: (1x10164x2xf32) <- (10164x2xf32, 1xi64) + unsqueeze_3 = paddle._C_ops.unsqueeze(stack_1, full_int_array_2) + del full_int_array_2 + + # pd_op.subtract: (8x10164x2xf32) <- (8x1x2xf32, 1x10164x2xf32) + subtract_4 = paddle._C_ops.subtract(unsqueeze_2, unsqueeze_3) + del unsqueeze_2, unsqueeze_3 + + # pd_op.p_norm: (8x10164xf32) <- (8x10164x2xf32) + p_norm_0 = paddle._C_ops.p_norm( + subtract_4, float("2"), -1, float("1e-12"), False, False + ) + del subtract_4 + + # pd_op.reshape: (8x1x10164xf32) <- (8x10164xf32, 3xi64) + reshape_0 = paddle._C_ops.reshape(p_norm_0, full_int_array_6) + del full_int_array_6, p_norm_0, stack_1 + + return reshape_0, reshape_1 diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus-M/subgraph_3/weight_meta.py b/paddle_samples/PaddleX/PP-YOLOE_plus-M/subgraph_3/weight_meta.py new file mode 100644 index 000000000..8b1378917 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE_plus-M/subgraph_3/weight_meta.py @@ -0,0 +1 @@ + diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus-M/subgraph_4/graph_hash.txt b/paddle_samples/PaddleX/PP-YOLOE_plus-M/subgraph_4/graph_hash.txt new file mode 100644 index 000000000..a90272c96 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE_plus-M/subgraph_4/graph_hash.txt @@ -0,0 +1 @@ +32cd2923bc0a61a7c5f1bc48378db5c4de25399c2f9388132502f296c7a71760 \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus-M/subgraph_4/graph_net.json b/paddle_samples/PaddleX/PP-YOLOE_plus-M/subgraph_4/graph_net.json new file mode 100644 index 000000000..1c6cb32da --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE_plus-M/subgraph_4/graph_net.json @@ -0,0 +1,6 @@ +{ + "framework": "paddle", + "model_name": "PP-YOLOE_plus-M", + "num_devices_required": 1, + "num_nodes_required": 1 +} \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus-M/subgraph_4/input_meta.py b/paddle_samples/PaddleX/PP-YOLOE_plus-M/subgraph_4/input_meta.py new file mode 100644 index 000000000..d14444825 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE_plus-M/subgraph_4/input_meta.py @@ -0,0 +1,73 @@ +class Program_weight_tensor_data_0: + name = "data_0" + shape = [] + dtype = "int64" + data = [22] + + +class Program_weight_tensor_data_1: + name = "data_1" + shape = [] + dtype = "int64" + data = [22] + + +class Program_weight_tensor_data_2: + name = "data_2" + shape = [] + dtype = "int64" + data = [44] + + +class Program_weight_tensor_data_3: + name = "data_3" + shape = [] + dtype = "int64" + data = [44] + + +class Program_weight_tensor_data_4: + name = "data_4" + shape = [] + dtype = "int64" + data = [88] + + +class Program_weight_tensor_data_5: + name = "data_5" + shape = [] + dtype = "int64" + data = [88] + + +class Program_weight_tensor_data_6: + name = "data_6" + shape = [8, 576, 22, 22] + dtype = "float32" + min_val = float("-0.278465") + max_val = float("11.3441") + mean = float("0.308556") + std = float("0.659097") + data = None + + +class Program_weight_tensor_data_7: + name = "data_7" + shape = [8, 288, 44, 44] + dtype = "float32" + min_val = float("-0.278465") + max_val = float("12.5041") + mean = float("0.419507") + std = float("0.74104") + data = None + + +class Program_weight_tensor_data_8: + name = "data_8" + shape = [8, 144, 88, 88] + dtype = "float32" + min_val = float("-0.278465") + max_val = float("17.9308") + mean = float("0.521464") + std = float("0.797879") + data = None diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus-M/subgraph_4/model.py b/paddle_samples/PaddleX/PP-YOLOE_plus-M/subgraph_4/model.py new file mode 100644 index 000000000..4f3efca14 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE_plus-M/subgraph_4/model.py @@ -0,0 +1,1144 @@ +import paddle + + +class GraphModule(paddle.nn.Layer): + def __init__(self): + super().__init__() + + def forward( + self, + parameter_0, + parameter_1, + parameter_2, + parameter_3, + parameter_4, + parameter_5, + parameter_6, + parameter_7, + parameter_8, + parameter_9, + parameter_10, + parameter_11, + parameter_12, + parameter_13, + parameter_14, + parameter_15, + parameter_16, + parameter_17, + parameter_18, + parameter_19, + parameter_20, + parameter_21, + parameter_22, + parameter_23, + parameter_24, + parameter_25, + parameter_26, + parameter_27, + parameter_28, + parameter_29, + parameter_30, + parameter_31, + parameter_32, + parameter_33, + parameter_34, + parameter_35, + parameter_36, + parameter_37, + parameter_38, + parameter_39, + parameter_40, + parameter_41, + parameter_42, + parameter_43, + parameter_44, + parameter_45, + parameter_46, + parameter_47, + parameter_48, + parameter_49, + parameter_50, + parameter_51, + parameter_52, + parameter_53, + data_0, + data_1, + data_2, + data_3, + data_4, + data_5, + data_6, + data_7, + data_8, + ): + # pd_op.full: (1xi64) <- () + full_0 = paddle._C_ops.full( + [1], float("0"), paddle.int64, paddle.core.CPUPlace() + ) + + # pd_op.full: (1xi64) <- () + full_1 = paddle._C_ops.full( + [1], float("1"), paddle.int64, paddle.core.CPUPlace() + ) + + # pd_op.arange: (-1xi64) <- (1xi64, xi64, 1xi64) + arange_0 = paddle.arange(full_0, data_1, full_1, dtype="int64") + del data_1 + + # pd_op.cast: (-1xf32) <- (-1xi64) + cast_0 = paddle._C_ops.cast(arange_0, paddle.float32) + del arange_0 + + # pd_op.full: (1xf32) <- () + full_2 = paddle._C_ops.full( + [1], float("1"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.scale: (-1xf32) <- (-1xf32, 1xf32) + scale_0 = paddle._C_ops.scale(cast_0, full_2, float("0.5"), True) + del cast_0 + + # pd_op.full: (1xf32) <- () + full_3 = paddle._C_ops.full( + [1], float("32"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.scale: (-1xf32) <- (-1xf32, 1xf32) + scale_1 = paddle._C_ops.scale(scale_0, full_3, float("0"), True) + del scale_0 + + # pd_op.arange: (-1xi64) <- (1xi64, xi64, 1xi64) + arange_1 = paddle.arange(full_0, data_0, full_1, dtype="int64") + del data_0 + + # pd_op.cast: (-1xf32) <- (-1xi64) + cast_1 = paddle._C_ops.cast(arange_1, paddle.float32) + del arange_1 + + # pd_op.scale: (-1xf32) <- (-1xf32, 1xf32) + scale_2 = paddle._C_ops.scale(cast_1, full_2, float("0.5"), True) + del cast_1 + + # pd_op.scale: (-1xf32) <- (-1xf32, 1xf32) + scale_3 = paddle._C_ops.scale(scale_2, full_3, float("0"), True) + del scale_2 + + # builtin.combine: ([-1xf32, -1xf32]) <- (-1xf32, -1xf32) + combine_0 = [scale_3, scale_1] + del scale_1, scale_3 + + # pd_op.meshgrid: ([-1x-1xf32, -1x-1xf32]) <- ([-1xf32, -1xf32]) + meshgrid_0 = paddle._C_ops.meshgrid(combine_0) + del combine_0 + + # builtin.split: (-1x-1xf32, -1x-1xf32) <- ([-1x-1xf32, -1x-1xf32]) + ( + split_0, + split_1, + ) = meshgrid_0 + del meshgrid_0 + + # pd_op.scale: (-1x-1xf32) <- (-1x-1xf32, 1xf32) + scale_4 = paddle._C_ops.scale(split_1, full_2, float("-80"), True) + + # pd_op.scale: (-1x-1xf32) <- (-1x-1xf32, 1xf32) + scale_5 = paddle._C_ops.scale(split_0, full_2, float("-80"), True) + + # pd_op.scale: (-1x-1xf32) <- (-1x-1xf32, 1xf32) + scale_6 = paddle._C_ops.scale(split_1, full_2, float("80"), True) + + # pd_op.scale: (-1x-1xf32) <- (-1x-1xf32, 1xf32) + scale_7 = paddle._C_ops.scale(split_0, full_2, float("80"), True) + + # builtin.combine: ([-1x-1xf32, -1x-1xf32, -1x-1xf32, -1x-1xf32]) <- (-1x-1xf32, -1x-1xf32, -1x-1xf32, -1x-1xf32) + combine_1 = [scale_4, scale_5, scale_6, scale_7] + del scale_4, scale_5, scale_6, scale_7 + + # pd_op.stack: (-1x-1x4xf32) <- ([-1x-1xf32, -1x-1xf32, -1x-1xf32, -1x-1xf32]) + stack_0 = paddle._C_ops.stack(combine_1, -1) + del combine_1 + + # builtin.combine: ([-1x-1xf32, -1x-1xf32]) <- (-1x-1xf32, -1x-1xf32) + combine_2 = [split_1, split_0] + del split_0, split_1 + + # pd_op.stack: (-1x-1x2xf32) <- ([-1x-1xf32, -1x-1xf32]) + stack_1 = paddle._C_ops.stack(combine_2, -1) + del combine_2 + + # pd_op.full_int_array: (2xi64) <- () + full_int_array_0 = [-1, 4] + + # pd_op.reshape: (-1x4xf32) <- (-1x-1x4xf32, 2xi64) + reshape_0 = paddle._C_ops.reshape(stack_0, full_int_array_0) + del stack_0 + + # pd_op.full_int_array: (2xi64) <- () + full_int_array_1 = [-1, 2] + + # pd_op.reshape: (-1x2xf32) <- (-1x-1x2xf32, 2xi64) + reshape_1 = paddle._C_ops.reshape(stack_1, full_int_array_1) + del stack_1 + + # pd_op.shape64: (2xi64) <- (-1x4xf32) + shape64_0 = paddle._C_ops.shape64(reshape_0) + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_2 = [0] + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_3 = [1] + + # pd_op.slice: (xi64) <- (2xi64, 1xi64, 1xi64) + slice_0 = paddle._C_ops.slice( + shape64_0, [0], full_int_array_2, full_int_array_3, [1], [0] + ) + del shape64_0 + + # pd_op.full: (xi64) <- () + full_4 = paddle._C_ops.full( + [], float("1"), paddle.int64, paddle.core.CPUPlace() + ) + + # builtin.combine: ([xi64, xi64]) <- (xi64, xi64) + combine_3 = [slice_0, full_4] + + # pd_op.stack: (2xi64) <- ([xi64, xi64]) + stack_2 = paddle._C_ops.stack(combine_3, 0) + del combine_3 + + # pd_op.full_with_tensor: (-1x1xf32) <- (1xf32, 2xi64) + full_with_tensor_0 = paddle._C_ops.full_with_tensor( + full_3, stack_2, paddle.float32 + ) + del full_3, stack_2 + + # pd_op.arange: (-1xi64) <- (1xi64, xi64, 1xi64) + arange_2 = paddle.arange(full_0, data_3, full_1, dtype="int64") + del data_3 + + # pd_op.cast: (-1xf32) <- (-1xi64) + cast_2 = paddle._C_ops.cast(arange_2, paddle.float32) + del arange_2 + + # pd_op.scale: (-1xf32) <- (-1xf32, 1xf32) + scale_8 = paddle._C_ops.scale(cast_2, full_2, float("0.5"), True) + del cast_2 + + # pd_op.full: (1xf32) <- () + full_5 = paddle._C_ops.full( + [1], float("16"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.scale: (-1xf32) <- (-1xf32, 1xf32) + scale_9 = paddle._C_ops.scale(scale_8, full_5, float("0"), True) + del scale_8 + + # pd_op.arange: (-1xi64) <- (1xi64, xi64, 1xi64) + arange_3 = paddle.arange(full_0, data_2, full_1, dtype="int64") + del data_2 + + # pd_op.cast: (-1xf32) <- (-1xi64) + cast_3 = paddle._C_ops.cast(arange_3, paddle.float32) + del arange_3 + + # pd_op.scale: (-1xf32) <- (-1xf32, 1xf32) + scale_10 = paddle._C_ops.scale(cast_3, full_2, float("0.5"), True) + del cast_3 + + # pd_op.scale: (-1xf32) <- (-1xf32, 1xf32) + scale_11 = paddle._C_ops.scale(scale_10, full_5, float("0"), True) + del scale_10 + + # builtin.combine: ([-1xf32, -1xf32]) <- (-1xf32, -1xf32) + combine_4 = [scale_11, scale_9] + del scale_11, scale_9 + + # pd_op.meshgrid: ([-1x-1xf32, -1x-1xf32]) <- ([-1xf32, -1xf32]) + meshgrid_1 = paddle._C_ops.meshgrid(combine_4) + del combine_4 + + # builtin.split: (-1x-1xf32, -1x-1xf32) <- ([-1x-1xf32, -1x-1xf32]) + ( + split_2, + split_3, + ) = meshgrid_1 + del meshgrid_1 + + # pd_op.scale: (-1x-1xf32) <- (-1x-1xf32, 1xf32) + scale_12 = paddle._C_ops.scale(split_3, full_2, float("-40"), True) + + # pd_op.scale: (-1x-1xf32) <- (-1x-1xf32, 1xf32) + scale_13 = paddle._C_ops.scale(split_2, full_2, float("-40"), True) + + # pd_op.scale: (-1x-1xf32) <- (-1x-1xf32, 1xf32) + scale_14 = paddle._C_ops.scale(split_3, full_2, float("40"), True) + + # pd_op.scale: (-1x-1xf32) <- (-1x-1xf32, 1xf32) + scale_15 = paddle._C_ops.scale(split_2, full_2, float("40"), True) + + # builtin.combine: ([-1x-1xf32, -1x-1xf32, -1x-1xf32, -1x-1xf32]) <- (-1x-1xf32, -1x-1xf32, -1x-1xf32, -1x-1xf32) + combine_5 = [scale_12, scale_13, scale_14, scale_15] + del scale_12, scale_13, scale_14, scale_15 + + # pd_op.stack: (-1x-1x4xf32) <- ([-1x-1xf32, -1x-1xf32, -1x-1xf32, -1x-1xf32]) + stack_3 = paddle._C_ops.stack(combine_5, -1) + del combine_5 + + # builtin.combine: ([-1x-1xf32, -1x-1xf32]) <- (-1x-1xf32, -1x-1xf32) + combine_6 = [split_3, split_2] + del split_2, split_3 + + # pd_op.stack: (-1x-1x2xf32) <- ([-1x-1xf32, -1x-1xf32]) + stack_4 = paddle._C_ops.stack(combine_6, -1) + del combine_6 + + # pd_op.reshape: (-1x4xf32) <- (-1x-1x4xf32, 2xi64) + reshape_2 = paddle._C_ops.reshape(stack_3, full_int_array_0) + del stack_3 + + # pd_op.reshape: (-1x2xf32) <- (-1x-1x2xf32, 2xi64) + reshape_3 = paddle._C_ops.reshape(stack_4, full_int_array_1) + del stack_4 + + # pd_op.shape64: (2xi64) <- (-1x4xf32) + shape64_1 = paddle._C_ops.shape64(reshape_2) + + # pd_op.slice: (xi64) <- (2xi64, 1xi64, 1xi64) + slice_1 = paddle._C_ops.slice( + shape64_1, [0], full_int_array_2, full_int_array_3, [1], [0] + ) + del shape64_1 + + # builtin.combine: ([xi64, xi64]) <- (xi64, xi64) + combine_7 = [slice_1, full_4] + + # pd_op.stack: (2xi64) <- ([xi64, xi64]) + stack_5 = paddle._C_ops.stack(combine_7, 0) + del combine_7 + + # pd_op.full_with_tensor: (-1x1xf32) <- (1xf32, 2xi64) + full_with_tensor_1 = paddle._C_ops.full_with_tensor( + full_5, stack_5, paddle.float32 + ) + del full_5, stack_5 + + # pd_op.arange: (-1xi64) <- (1xi64, xi64, 1xi64) + arange_4 = paddle.arange(full_0, data_5, full_1, dtype="int64") + del data_5 + + # pd_op.cast: (-1xf32) <- (-1xi64) + cast_4 = paddle._C_ops.cast(arange_4, paddle.float32) + del arange_4 + + # pd_op.scale: (-1xf32) <- (-1xf32, 1xf32) + scale_16 = paddle._C_ops.scale(cast_4, full_2, float("0.5"), True) + del cast_4 + + # pd_op.full: (1xf32) <- () + full_6 = paddle._C_ops.full( + [1], float("8"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.scale: (-1xf32) <- (-1xf32, 1xf32) + scale_17 = paddle._C_ops.scale(scale_16, full_6, float("0"), True) + del scale_16 + + # pd_op.arange: (-1xi64) <- (1xi64, xi64, 1xi64) + arange_5 = paddle.arange(full_0, data_4, full_1, dtype="int64") + del data_4, full_0, full_1 + + # pd_op.cast: (-1xf32) <- (-1xi64) + cast_5 = paddle._C_ops.cast(arange_5, paddle.float32) + del arange_5 + + # pd_op.scale: (-1xf32) <- (-1xf32, 1xf32) + scale_18 = paddle._C_ops.scale(cast_5, full_2, float("0.5"), True) + del cast_5 + + # pd_op.scale: (-1xf32) <- (-1xf32, 1xf32) + scale_19 = paddle._C_ops.scale(scale_18, full_6, float("0"), True) + del scale_18 + + # builtin.combine: ([-1xf32, -1xf32]) <- (-1xf32, -1xf32) + combine_8 = [scale_19, scale_17] + del scale_17, scale_19 + + # pd_op.meshgrid: ([-1x-1xf32, -1x-1xf32]) <- ([-1xf32, -1xf32]) + meshgrid_2 = paddle._C_ops.meshgrid(combine_8) + del combine_8 + + # builtin.split: (-1x-1xf32, -1x-1xf32) <- ([-1x-1xf32, -1x-1xf32]) + ( + split_4, + split_5, + ) = meshgrid_2 + del meshgrid_2 + + # pd_op.scale: (-1x-1xf32) <- (-1x-1xf32, 1xf32) + scale_20 = paddle._C_ops.scale(split_5, full_2, float("-20"), True) + + # pd_op.scale: (-1x-1xf32) <- (-1x-1xf32, 1xf32) + scale_21 = paddle._C_ops.scale(split_4, full_2, float("-20"), True) + + # pd_op.scale: (-1x-1xf32) <- (-1x-1xf32, 1xf32) + scale_22 = paddle._C_ops.scale(split_5, full_2, float("20"), True) + + # pd_op.scale: (-1x-1xf32) <- (-1x-1xf32, 1xf32) + scale_23 = paddle._C_ops.scale(split_4, full_2, float("20"), True) + del full_2 + + # builtin.combine: ([-1x-1xf32, -1x-1xf32, -1x-1xf32, -1x-1xf32]) <- (-1x-1xf32, -1x-1xf32, -1x-1xf32, -1x-1xf32) + combine_9 = [scale_20, scale_21, scale_22, scale_23] + del scale_20, scale_21, scale_22, scale_23 + + # pd_op.stack: (-1x-1x4xf32) <- ([-1x-1xf32, -1x-1xf32, -1x-1xf32, -1x-1xf32]) + stack_6 = paddle._C_ops.stack(combine_9, -1) + del combine_9 + + # builtin.combine: ([-1x-1xf32, -1x-1xf32]) <- (-1x-1xf32, -1x-1xf32) + combine_10 = [split_5, split_4] + del split_4, split_5 + + # pd_op.stack: (-1x-1x2xf32) <- ([-1x-1xf32, -1x-1xf32]) + stack_7 = paddle._C_ops.stack(combine_10, -1) + del combine_10 + + # pd_op.reshape: (-1x4xf32) <- (-1x-1x4xf32, 2xi64) + reshape_4 = paddle._C_ops.reshape(stack_6, full_int_array_0) + del full_int_array_0, stack_6 + + # pd_op.reshape: (-1x2xf32) <- (-1x-1x2xf32, 2xi64) + reshape_5 = paddle._C_ops.reshape(stack_7, full_int_array_1) + del full_int_array_1, stack_7 + + # pd_op.shape64: (2xi64) <- (-1x4xf32) + shape64_2 = paddle._C_ops.shape64(reshape_4) + + # pd_op.slice: (xi64) <- (2xi64, 1xi64, 1xi64) + slice_2 = paddle._C_ops.slice( + shape64_2, [0], full_int_array_2, full_int_array_3, [1], [0] + ) + del full_int_array_2, full_int_array_3, shape64_2 + + # builtin.combine: ([xi64, xi64]) <- (xi64, xi64) + combine_11 = [slice_2, full_4] + del full_4 + + # pd_op.stack: (2xi64) <- ([xi64, xi64]) + stack_8 = paddle._C_ops.stack(combine_11, 0) + del combine_11 + + # pd_op.full_with_tensor: (-1x1xf32) <- (1xf32, 2xi64) + full_with_tensor_2 = paddle._C_ops.full_with_tensor( + full_6, stack_8, paddle.float32 + ) + del full_6, stack_8 + + # pd_op.full: (1xi32) <- () + full_7 = paddle._C_ops.full( + [1], float("0"), paddle.int32, paddle.core.CPUPlace() + ) + + # builtin.combine: ([-1x4xf32, -1x4xf32, -1x4xf32]) <- (-1x4xf32, -1x4xf32, -1x4xf32) + combine_12 = [reshape_0, reshape_2, reshape_4] + del reshape_0, reshape_2, reshape_4 + + # pd_op.concat: (-1x4xf32) <- ([-1x4xf32, -1x4xf32, -1x4xf32], 1xi32) + concat_2 = paddle._C_ops.concat(combine_12, full_7) + del combine_12 + + # builtin.combine: ([-1x2xf32, -1x2xf32, -1x2xf32]) <- (-1x2xf32, -1x2xf32, -1x2xf32) + combine_13 = [reshape_1, reshape_3, reshape_5] + del reshape_1, reshape_3, reshape_5 + + # pd_op.concat: (-1x2xf32) <- ([-1x2xf32, -1x2xf32, -1x2xf32], 1xi32) + concat_3 = paddle._C_ops.concat(combine_13, full_7) + del combine_13 + + # builtin.combine: ([-1x1xf32, -1x1xf32, -1x1xf32]) <- (-1x1xf32, -1x1xf32, -1x1xf32) + combine_14 = [full_with_tensor_0, full_with_tensor_1, full_with_tensor_2] + del full_with_tensor_0, full_with_tensor_1, full_with_tensor_2 + + # pd_op.concat: (-1x1xf32) <- ([-1x1xf32, -1x1xf32, -1x1xf32], 1xi32) + concat_4 = paddle._C_ops.concat(combine_14, full_7) + del combine_14, full_7 + + # pd_op.full_int_array: (2xi64) <- () + full_int_array_4 = [1, 1] + + # pd_op.assign: (2xi64) <- (2xi64) + assign_0 = full_int_array_4 + + # pd_op.assign: (2xi64) <- (2xi64) + assign_1 = full_int_array_4 + + # pd_op.pool2d: (8x576x1x1xf32) <- (8x576x-1x-1xf32, 2xi64) + pool2d_0 = paddle._C_ops.pool2d( + data_6, + full_int_array_4, + [1, 1], + [0, 0], + False, + True, + "NCHW", + "avg", + False, + True, + "EXPLICIT", + ) + + # pd_op.conv2d: (8x576x1x1xf32) <- (8x576x1x1xf32, 576x576x1x1xf32) + conv2d_0 = paddle._C_ops.conv2d( + pool2d_0, parameter_53, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_53 + + # pd_op.full_int_array: (4xi64) <- () + full_int_array_5 = [1, -1, 1, 1] + + # pd_op.reshape: (1x576x1x1xf32) <- (576xf32, 4xi64) + reshape_6 = paddle._C_ops.reshape(parameter_52, full_int_array_5) + del parameter_52 + + # pd_op.add: (8x576x1x1xf32) <- (8x576x1x1xf32, 1x576x1x1xf32) + add_0 = paddle._C_ops.add(conv2d_0, reshape_6) + + # pd_op.sigmoid: (8x576x1x1xf32) <- (8x576x1x1xf32) + sigmoid_0 = paddle._C_ops.sigmoid(add_0) + del add_0 + + # pd_op.multiply: (8x576x-1x-1xf32) <- (8x576x-1x-1xf32, 8x576x1x1xf32) + multiply_0 = paddle._C_ops.multiply(data_6, sigmoid_0) + + # pd_op.conv2d: (8x576x-1x-1xf32) <- (8x576x-1x-1xf32, 576x576x1x1xf32) + conv2d_1 = paddle._C_ops.conv2d( + multiply_0, parameter_51, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_51 + + # pd_op.batch_norm_: (8x576x-1x-1xf32, 576xf32, 576xf32, 576xf32, 576xf32, -1xui8) <- (8x576x-1x-1xf32, 576xf32, 576xf32, 576xf32, 576xf32) + ( + batch_norm__0, + batch_norm__1, + batch_norm__2, + batch_norm__3, + batch_norm__4, + batch_norm__5, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_1, + parameter_50, + parameter_49, + parameter_48, + parameter_47, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_47, parameter_48, parameter_49, parameter_50 + + # pd_op.swish: (8x576x-1x-1xf32) <- (8x576x-1x-1xf32) + swish_0 = paddle._C_ops.swish(batch_norm__0) + + # pd_op.add: (8x576x-1x-1xf32) <- (8x576x-1x-1xf32, 8x576x-1x-1xf32) + add_1 = paddle._C_ops.add(swish_0, data_6) + + # pd_op.conv2d: (8x4x-1x-1xf32) <- (8x576x-1x-1xf32, 4x576x3x3xf32) + conv2d_2 = paddle._C_ops.conv2d( + add_1, parameter_46, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_46 + + # pd_op.reshape: (1x4x1x1xf32) <- (4xf32, 4xi64) + reshape_7 = paddle._C_ops.reshape(parameter_45, full_int_array_5) + del parameter_45 + + # pd_op.add: (8x4x-1x-1xf32) <- (8x4x-1x-1xf32, 1x4x1x1xf32) + add_2 = paddle._C_ops.add(conv2d_2, reshape_7) + + # pd_op.conv2d: (8x576x1x1xf32) <- (8x576x1x1xf32, 576x576x1x1xf32) + conv2d_3 = paddle._C_ops.conv2d( + pool2d_0, parameter_44, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_44 + + # pd_op.reshape: (1x576x1x1xf32) <- (576xf32, 4xi64) + reshape_8 = paddle._C_ops.reshape(parameter_43, full_int_array_5) + del parameter_43 + + # pd_op.add: (8x576x1x1xf32) <- (8x576x1x1xf32, 1x576x1x1xf32) + add_3 = paddle._C_ops.add(conv2d_3, reshape_8) + + # pd_op.sigmoid: (8x576x1x1xf32) <- (8x576x1x1xf32) + sigmoid_1 = paddle._C_ops.sigmoid(add_3) + del add_3 + + # pd_op.multiply: (8x576x-1x-1xf32) <- (8x576x-1x-1xf32, 8x576x1x1xf32) + multiply_1 = paddle._C_ops.multiply(data_6, sigmoid_1) + del data_6 + + # pd_op.conv2d: (8x576x-1x-1xf32) <- (8x576x-1x-1xf32, 576x576x1x1xf32) + conv2d_4 = paddle._C_ops.conv2d( + multiply_1, parameter_42, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_42 + + # pd_op.batch_norm_: (8x576x-1x-1xf32, 576xf32, 576xf32, 576xf32, 576xf32, -1xui8) <- (8x576x-1x-1xf32, 576xf32, 576xf32, 576xf32, 576xf32) + ( + batch_norm__6, + batch_norm__7, + batch_norm__8, + batch_norm__9, + batch_norm__10, + batch_norm__11, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_4, + parameter_41, + parameter_40, + parameter_39, + parameter_38, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_38, parameter_39, parameter_40, parameter_41 + + # pd_op.swish: (8x576x-1x-1xf32) <- (8x576x-1x-1xf32) + swish_1 = paddle._C_ops.swish(batch_norm__6) + + # pd_op.conv2d: (8x68x-1x-1xf32) <- (8x576x-1x-1xf32, 68x576x3x3xf32) + conv2d_5 = paddle._C_ops.conv2d( + swish_1, parameter_37, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_37 + + # pd_op.reshape: (1x68x1x1xf32) <- (68xf32, 4xi64) + reshape_9 = paddle._C_ops.reshape(parameter_36, full_int_array_5) + del parameter_36 + + # pd_op.add: (8x68x-1x-1xf32) <- (8x68x-1x-1xf32, 1x68x1x1xf32) + add_4 = paddle._C_ops.add(conv2d_5, reshape_9) + + # pd_op.sigmoid: (8x4x-1x-1xf32) <- (8x4x-1x-1xf32) + sigmoid_2 = paddle._C_ops.sigmoid(add_2) + del add_2 + + # pd_op.flatten: (8x4x-1xf32) <- (8x4x-1x-1xf32) + flatten_0 = paddle._C_ops.flatten(sigmoid_2, 2, 3) + + # pd_op.transpose: (8x-1x4xf32) <- (8x4x-1xf32) + transpose_0 = paddle._C_ops.transpose(flatten_0, [0, 2, 1]) + del flatten_0 + + # pd_op.flatten: (8x68x-1xf32) <- (8x68x-1x-1xf32) + flatten_1 = paddle._C_ops.flatten(add_4, 2, 3) + + # pd_op.transpose: (8x-1x68xf32) <- (8x68x-1xf32) + transpose_1 = paddle._C_ops.transpose(flatten_1, [0, 2, 1]) + del flatten_1 + + # pd_op.pool2d: (8x288x1x1xf32) <- (8x288x-1x-1xf32, 2xi64) + pool2d_1 = paddle._C_ops.pool2d( + data_7, + full_int_array_4, + [1, 1], + [0, 0], + False, + True, + "NCHW", + "avg", + False, + True, + "EXPLICIT", + ) + + # pd_op.conv2d: (8x288x1x1xf32) <- (8x288x1x1xf32, 288x288x1x1xf32) + conv2d_6 = paddle._C_ops.conv2d( + pool2d_1, parameter_35, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_35 + + # pd_op.reshape: (1x288x1x1xf32) <- (288xf32, 4xi64) + reshape_10 = paddle._C_ops.reshape(parameter_34, full_int_array_5) + del parameter_34 + + # pd_op.add: (8x288x1x1xf32) <- (8x288x1x1xf32, 1x288x1x1xf32) + add_5 = paddle._C_ops.add(conv2d_6, reshape_10) + + # pd_op.sigmoid: (8x288x1x1xf32) <- (8x288x1x1xf32) + sigmoid_3 = paddle._C_ops.sigmoid(add_5) + del add_5 + + # pd_op.multiply: (8x288x-1x-1xf32) <- (8x288x-1x-1xf32, 8x288x1x1xf32) + multiply_2 = paddle._C_ops.multiply(data_7, sigmoid_3) + + # pd_op.conv2d: (8x288x-1x-1xf32) <- (8x288x-1x-1xf32, 288x288x1x1xf32) + conv2d_7 = paddle._C_ops.conv2d( + multiply_2, parameter_33, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_33 + + # pd_op.batch_norm_: (8x288x-1x-1xf32, 288xf32, 288xf32, 288xf32, 288xf32, -1xui8) <- (8x288x-1x-1xf32, 288xf32, 288xf32, 288xf32, 288xf32) + ( + batch_norm__12, + batch_norm__13, + batch_norm__14, + batch_norm__15, + batch_norm__16, + batch_norm__17, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_7, + parameter_32, + parameter_31, + parameter_30, + parameter_29, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_29, parameter_30, parameter_31, parameter_32 + + # pd_op.swish: (8x288x-1x-1xf32) <- (8x288x-1x-1xf32) + swish_2 = paddle._C_ops.swish(batch_norm__12) + + # pd_op.add: (8x288x-1x-1xf32) <- (8x288x-1x-1xf32, 8x288x-1x-1xf32) + add_6 = paddle._C_ops.add(swish_2, data_7) + + # pd_op.conv2d: (8x4x-1x-1xf32) <- (8x288x-1x-1xf32, 4x288x3x3xf32) + conv2d_8 = paddle._C_ops.conv2d( + add_6, parameter_28, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_28 + + # pd_op.reshape: (1x4x1x1xf32) <- (4xf32, 4xi64) + reshape_11 = paddle._C_ops.reshape(parameter_27, full_int_array_5) + del parameter_27 + + # pd_op.add: (8x4x-1x-1xf32) <- (8x4x-1x-1xf32, 1x4x1x1xf32) + add_7 = paddle._C_ops.add(conv2d_8, reshape_11) + + # pd_op.conv2d: (8x288x1x1xf32) <- (8x288x1x1xf32, 288x288x1x1xf32) + conv2d_9 = paddle._C_ops.conv2d( + pool2d_1, parameter_26, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_26 + + # pd_op.reshape: (1x288x1x1xf32) <- (288xf32, 4xi64) + reshape_12 = paddle._C_ops.reshape(parameter_25, full_int_array_5) + del parameter_25 + + # pd_op.add: (8x288x1x1xf32) <- (8x288x1x1xf32, 1x288x1x1xf32) + add_8 = paddle._C_ops.add(conv2d_9, reshape_12) + + # pd_op.sigmoid: (8x288x1x1xf32) <- (8x288x1x1xf32) + sigmoid_4 = paddle._C_ops.sigmoid(add_8) + del add_8 + + # pd_op.multiply: (8x288x-1x-1xf32) <- (8x288x-1x-1xf32, 8x288x1x1xf32) + multiply_3 = paddle._C_ops.multiply(data_7, sigmoid_4) + del data_7 + + # pd_op.conv2d: (8x288x-1x-1xf32) <- (8x288x-1x-1xf32, 288x288x1x1xf32) + conv2d_10 = paddle._C_ops.conv2d( + multiply_3, parameter_24, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_24 + + # pd_op.batch_norm_: (8x288x-1x-1xf32, 288xf32, 288xf32, 288xf32, 288xf32, -1xui8) <- (8x288x-1x-1xf32, 288xf32, 288xf32, 288xf32, 288xf32) + ( + batch_norm__18, + batch_norm__19, + batch_norm__20, + batch_norm__21, + batch_norm__22, + batch_norm__23, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_10, + parameter_23, + parameter_22, + parameter_21, + parameter_20, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_20, parameter_21, parameter_22, parameter_23 + + # pd_op.swish: (8x288x-1x-1xf32) <- (8x288x-1x-1xf32) + swish_3 = paddle._C_ops.swish(batch_norm__18) + + # pd_op.conv2d: (8x68x-1x-1xf32) <- (8x288x-1x-1xf32, 68x288x3x3xf32) + conv2d_11 = paddle._C_ops.conv2d( + swish_3, parameter_19, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_19 + + # pd_op.reshape: (1x68x1x1xf32) <- (68xf32, 4xi64) + reshape_13 = paddle._C_ops.reshape(parameter_18, full_int_array_5) + del parameter_18 + + # pd_op.add: (8x68x-1x-1xf32) <- (8x68x-1x-1xf32, 1x68x1x1xf32) + add_9 = paddle._C_ops.add(conv2d_11, reshape_13) + + # pd_op.sigmoid: (8x4x-1x-1xf32) <- (8x4x-1x-1xf32) + sigmoid_5 = paddle._C_ops.sigmoid(add_7) + del add_7 + + # pd_op.flatten: (8x4x-1xf32) <- (8x4x-1x-1xf32) + flatten_2 = paddle._C_ops.flatten(sigmoid_5, 2, 3) + + # pd_op.transpose: (8x-1x4xf32) <- (8x4x-1xf32) + transpose_2 = paddle._C_ops.transpose(flatten_2, [0, 2, 1]) + del flatten_2 + + # pd_op.flatten: (8x68x-1xf32) <- (8x68x-1x-1xf32) + flatten_3 = paddle._C_ops.flatten(add_9, 2, 3) + + # pd_op.transpose: (8x-1x68xf32) <- (8x68x-1xf32) + transpose_3 = paddle._C_ops.transpose(flatten_3, [0, 2, 1]) + del flatten_3 + + # pd_op.pool2d: (8x144x1x1xf32) <- (8x144x-1x-1xf32, 2xi64) + pool2d_2 = paddle._C_ops.pool2d( + data_8, + full_int_array_4, + [1, 1], + [0, 0], + False, + True, + "NCHW", + "avg", + False, + True, + "EXPLICIT", + ) + + # pd_op.conv2d: (8x144x1x1xf32) <- (8x144x1x1xf32, 144x144x1x1xf32) + conv2d_12 = paddle._C_ops.conv2d( + pool2d_2, parameter_17, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_17 + + # pd_op.reshape: (1x144x1x1xf32) <- (144xf32, 4xi64) + reshape_14 = paddle._C_ops.reshape(parameter_16, full_int_array_5) + del parameter_16 + + # pd_op.add: (8x144x1x1xf32) <- (8x144x1x1xf32, 1x144x1x1xf32) + add_10 = paddle._C_ops.add(conv2d_12, reshape_14) + + # pd_op.sigmoid: (8x144x1x1xf32) <- (8x144x1x1xf32) + sigmoid_6 = paddle._C_ops.sigmoid(add_10) + del add_10 + + # pd_op.multiply: (8x144x-1x-1xf32) <- (8x144x-1x-1xf32, 8x144x1x1xf32) + multiply_4 = paddle._C_ops.multiply(data_8, sigmoid_6) + + # pd_op.conv2d: (8x144x-1x-1xf32) <- (8x144x-1x-1xf32, 144x144x1x1xf32) + conv2d_13 = paddle._C_ops.conv2d( + multiply_4, parameter_15, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_15 + + # pd_op.batch_norm_: (8x144x-1x-1xf32, 144xf32, 144xf32, 144xf32, 144xf32, -1xui8) <- (8x144x-1x-1xf32, 144xf32, 144xf32, 144xf32, 144xf32) + ( + batch_norm__24, + batch_norm__25, + batch_norm__26, + batch_norm__27, + batch_norm__28, + batch_norm__29, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_13, + parameter_14, + parameter_13, + parameter_12, + parameter_11, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_11, parameter_12, parameter_13, parameter_14 + + # pd_op.swish: (8x144x-1x-1xf32) <- (8x144x-1x-1xf32) + swish_4 = paddle._C_ops.swish(batch_norm__24) + + # pd_op.add: (8x144x-1x-1xf32) <- (8x144x-1x-1xf32, 8x144x-1x-1xf32) + add_11 = paddle._C_ops.add(swish_4, data_8) + + # pd_op.conv2d: (8x4x-1x-1xf32) <- (8x144x-1x-1xf32, 4x144x3x3xf32) + conv2d_14 = paddle._C_ops.conv2d( + add_11, parameter_10, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_10 + + # pd_op.reshape: (1x4x1x1xf32) <- (4xf32, 4xi64) + reshape_15 = paddle._C_ops.reshape(parameter_9, full_int_array_5) + del parameter_9 + + # pd_op.add: (8x4x-1x-1xf32) <- (8x4x-1x-1xf32, 1x4x1x1xf32) + add_12 = paddle._C_ops.add(conv2d_14, reshape_15) + + # pd_op.conv2d: (8x144x1x1xf32) <- (8x144x1x1xf32, 144x144x1x1xf32) + conv2d_15 = paddle._C_ops.conv2d( + pool2d_2, parameter_8, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_8 + + # pd_op.reshape: (1x144x1x1xf32) <- (144xf32, 4xi64) + reshape_16 = paddle._C_ops.reshape(parameter_7, full_int_array_5) + del parameter_7 + + # pd_op.add: (8x144x1x1xf32) <- (8x144x1x1xf32, 1x144x1x1xf32) + add_13 = paddle._C_ops.add(conv2d_15, reshape_16) + + # pd_op.sigmoid: (8x144x1x1xf32) <- (8x144x1x1xf32) + sigmoid_7 = paddle._C_ops.sigmoid(add_13) + del add_13 + + # pd_op.multiply: (8x144x-1x-1xf32) <- (8x144x-1x-1xf32, 8x144x1x1xf32) + multiply_5 = paddle._C_ops.multiply(data_8, sigmoid_7) + del data_8 + + # pd_op.conv2d: (8x144x-1x-1xf32) <- (8x144x-1x-1xf32, 144x144x1x1xf32) + conv2d_16 = paddle._C_ops.conv2d( + multiply_5, parameter_6, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_6 + + # pd_op.batch_norm_: (8x144x-1x-1xf32, 144xf32, 144xf32, 144xf32, 144xf32, -1xui8) <- (8x144x-1x-1xf32, 144xf32, 144xf32, 144xf32, 144xf32) + ( + batch_norm__30, + batch_norm__31, + batch_norm__32, + batch_norm__33, + batch_norm__34, + batch_norm__35, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_16, + parameter_5, + parameter_4, + parameter_3, + parameter_2, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_2, parameter_3, parameter_4, parameter_5 + + # pd_op.swish: (8x144x-1x-1xf32) <- (8x144x-1x-1xf32) + swish_5 = paddle._C_ops.swish(batch_norm__30) + + # pd_op.conv2d: (8x68x-1x-1xf32) <- (8x144x-1x-1xf32, 68x144x3x3xf32) + conv2d_17 = paddle._C_ops.conv2d( + swish_5, parameter_1, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_1 + + # pd_op.reshape: (1x68x1x1xf32) <- (68xf32, 4xi64) + reshape_17 = paddle._C_ops.reshape(parameter_0, full_int_array_5) + del full_int_array_5, parameter_0 + + # pd_op.add: (8x68x-1x-1xf32) <- (8x68x-1x-1xf32, 1x68x1x1xf32) + add_14 = paddle._C_ops.add(conv2d_17, reshape_17) + + # pd_op.sigmoid: (8x4x-1x-1xf32) <- (8x4x-1x-1xf32) + sigmoid_8 = paddle._C_ops.sigmoid(add_12) + del add_12 + + # pd_op.flatten: (8x4x-1xf32) <- (8x4x-1x-1xf32) + flatten_4 = paddle._C_ops.flatten(sigmoid_8, 2, 3) + + # pd_op.transpose: (8x-1x4xf32) <- (8x4x-1xf32) + transpose_4 = paddle._C_ops.transpose(flatten_4, [0, 2, 1]) + del flatten_4 + + # pd_op.flatten: (8x68x-1xf32) <- (8x68x-1x-1xf32) + flatten_5 = paddle._C_ops.flatten(add_14, 2, 3) + + # pd_op.transpose: (8x-1x68xf32) <- (8x68x-1xf32) + transpose_5 = paddle._C_ops.transpose(flatten_5, [0, 2, 1]) + del flatten_5 + + # pd_op.full: (1xi32) <- () + full_8 = paddle._C_ops.full( + [1], float("1"), paddle.int32, paddle.core.CPUPlace() + ) + + # pd_op.assign: (1xi32) <- (1xi32) + assign_2 = full_8 + + # builtin.combine: ([8x-1x4xf32, 8x-1x4xf32, 8x-1x4xf32]) <- (8x-1x4xf32, 8x-1x4xf32, 8x-1x4xf32) + combine_15 = [transpose_0, transpose_2, transpose_4] + + # pd_op.concat: (8x-1x4xf32) <- ([8x-1x4xf32, 8x-1x4xf32, 8x-1x4xf32], 1xi32) + concat_0 = paddle._C_ops.concat(combine_15, full_8) + del combine_15 + + # builtin.combine: ([8x-1x68xf32, 8x-1x68xf32, 8x-1x68xf32]) <- (8x-1x68xf32, 8x-1x68xf32, 8x-1x68xf32) + combine_16 = [transpose_1, transpose_3, transpose_5] + + # pd_op.concat: (8x-1x68xf32) <- ([8x-1x68xf32, 8x-1x68xf32, 8x-1x68xf32], 1xi32) + concat_1 = paddle._C_ops.concat(combine_16, full_8) + del ( + add_1, + add_11, + add_14, + add_4, + add_6, + add_9, + assign_0, + assign_1, + assign_2, + batch_norm__0, + batch_norm__1, + batch_norm__10, + batch_norm__11, + batch_norm__12, + batch_norm__13, + batch_norm__14, + batch_norm__15, + batch_norm__16, + batch_norm__17, + batch_norm__18, + batch_norm__19, + batch_norm__2, + batch_norm__20, + batch_norm__21, + batch_norm__22, + batch_norm__23, + batch_norm__24, + batch_norm__25, + batch_norm__26, + batch_norm__27, + batch_norm__28, + batch_norm__29, + batch_norm__3, + batch_norm__30, + batch_norm__31, + batch_norm__32, + batch_norm__33, + batch_norm__34, + batch_norm__35, + batch_norm__4, + batch_norm__5, + batch_norm__6, + batch_norm__7, + batch_norm__8, + batch_norm__9, + combine_16, + conv2d_0, + conv2d_1, + conv2d_10, + conv2d_11, + conv2d_12, + conv2d_13, + conv2d_14, + conv2d_15, + conv2d_16, + conv2d_17, + conv2d_2, + conv2d_3, + conv2d_4, + conv2d_5, + conv2d_6, + conv2d_7, + conv2d_8, + conv2d_9, + full_8, + full_int_array_4, + multiply_0, + multiply_1, + multiply_2, + multiply_3, + multiply_4, + multiply_5, + pool2d_0, + pool2d_1, + pool2d_2, + reshape_10, + reshape_11, + reshape_12, + reshape_13, + reshape_14, + reshape_15, + reshape_16, + reshape_17, + reshape_6, + reshape_7, + reshape_8, + reshape_9, + sigmoid_0, + sigmoid_1, + sigmoid_2, + sigmoid_3, + sigmoid_4, + sigmoid_5, + sigmoid_6, + sigmoid_7, + sigmoid_8, + slice_0, + slice_1, + slice_2, + swish_0, + swish_1, + swish_2, + swish_3, + swish_4, + swish_5, + transpose_0, + transpose_1, + transpose_2, + transpose_3, + transpose_4, + transpose_5, + ) + + return concat_0, concat_1, concat_2, concat_3, concat_4 diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus-M/subgraph_4/weight_meta.py b/paddle_samples/PaddleX/PP-YOLOE_plus-M/subgraph_4/weight_meta.py new file mode 100644 index 000000000..0fbfee683 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE_plus-M/subgraph_4/weight_meta.py @@ -0,0 +1,586 @@ +class Program_weight_tensor_parameter_0: + name = "parameter_0" + shape = [68] + dtype = "float32" + min_val = float("-0.0141298") + max_val = float("0.0241404") + mean = float("6.51635e-08") + std = float("0.00670344") + data = None + + +class Program_weight_tensor_parameter_1: + name = "parameter_1" + shape = [68, 144, 3, 3] + dtype = "float32" + min_val = float("-0.159403") + max_val = float("0.18767") + mean = float("6.14746e-08") + std = float("0.00826349") + data = None + + +class Program_weight_tensor_parameter_2: + name = "parameter_2" + shape = [144] + dtype = "float32" + min_val = float("-0.104117") + max_val = float("0.334805") + mean = float("0.0803525") + std = float("0.0949004") + data = None + + +class Program_weight_tensor_parameter_3: + name = "parameter_3" + shape = [144] + dtype = "float32" + min_val = float("0.833704") + max_val = float("2.14069") + mean = float("1.40199") + std = float("0.259198") + data = None + + +class Program_weight_tensor_parameter_4: + name = "parameter_4" + shape = [144] + dtype = "float32" + min_val = float("0.000157642") + max_val = float("0.00219112") + mean = float("0.000569831") + std = float("0.000354466") + data = None + + +class Program_weight_tensor_parameter_5: + name = "parameter_5" + shape = [144] + dtype = "float32" + min_val = float("-0.0501844") + max_val = float("0.038398") + mean = float("-0.00743265") + std = float("0.0175874") + data = None + + +class Program_weight_tensor_parameter_6: + name = "parameter_6" + shape = [144, 144, 1, 1] + dtype = "float32" + min_val = float("-0.0724953") + max_val = float("0.0965259") + mean = float("-0.000264722") + std = float("0.00737951") + data = None + + +class Program_weight_tensor_parameter_7: + name = "parameter_7" + shape = [144] + dtype = "float32" + min_val = float("-0.00636108") + max_val = float("0.00678391") + mean = float("-0.000173716") + std = float("0.00320574") + data = None + + +class Program_weight_tensor_parameter_8: + name = "parameter_8" + shape = [144, 144, 1, 1] + dtype = "float32" + min_val = float("-0.0117989") + max_val = float("0.015335") + mean = float("-0.000124252") + std = float("0.00220502") + data = None + + +class Program_weight_tensor_parameter_9: + name = "parameter_9" + shape = [4] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_10: + name = "parameter_10" + shape = [4, 144, 3, 3] + dtype = "float32" + min_val = float("-7.87108e-06") + max_val = float("0.000443999") + mean = float("1.5363e-05") + std = float("3.18915e-05") + data = None + + +class Program_weight_tensor_parameter_11: + name = "parameter_11" + shape = [144] + dtype = "float32" + min_val = float("-0.64158") + max_val = float("1.51361") + mean = float("0.436643") + std = float("0.396591") + data = None + + +class Program_weight_tensor_parameter_12: + name = "parameter_12" + shape = [144] + dtype = "float32" + min_val = float("0.912817") + max_val = float("2.11091") + mean = float("1.38863") + std = float("0.197354") + data = None + + +class Program_weight_tensor_parameter_13: + name = "parameter_13" + shape = [144] + dtype = "float32" + min_val = float("0.000201053") + max_val = float("0.00337522") + mean = float("0.000771889") + std = float("0.000504667") + data = None + + +class Program_weight_tensor_parameter_14: + name = "parameter_14" + shape = [144] + dtype = "float32" + min_val = float("-0.2459") + max_val = float("0.0358757") + mean = float("-0.0278203") + std = float("0.0404512") + data = None + + +class Program_weight_tensor_parameter_15: + name = "parameter_15" + shape = [144, 144, 1, 1] + dtype = "float32" + min_val = float("-0.0649436") + max_val = float("0.0801723") + mean = float("-0.000599465") + std = float("0.0088557") + data = None + + +class Program_weight_tensor_parameter_16: + name = "parameter_16" + shape = [144] + dtype = "float32" + min_val = float("-0.00555943") + max_val = float("0.0056399") + mean = float("-0.000279704") + std = float("0.00214688") + data = None + + +class Program_weight_tensor_parameter_17: + name = "parameter_17" + shape = [144, 144, 1, 1] + dtype = "float32" + min_val = float("-0.0293142") + max_val = float("0.0536777") + mean = float("-5.72243e-05") + std = float("0.00245901") + data = None + + +class Program_weight_tensor_parameter_18: + name = "parameter_18" + shape = [68] + dtype = "float32" + min_val = float("-0.0049597") + max_val = float("0.0272289") + mean = float("6.11326e-08") + std = float("0.00561272") + data = None + + +class Program_weight_tensor_parameter_19: + name = "parameter_19" + shape = [68, 288, 3, 3] + dtype = "float32" + min_val = float("-0.111167") + max_val = float("0.12918") + mean = float("3.28073e-08") + std = float("0.00572785") + data = None + + +class Program_weight_tensor_parameter_20: + name = "parameter_20" + shape = [288] + dtype = "float32" + min_val = float("-0.0173529") + max_val = float("0.146767") + mean = float("0.0535169") + std = float("0.0321121") + data = None + + +class Program_weight_tensor_parameter_21: + name = "parameter_21" + shape = [288] + dtype = "float32" + min_val = float("1.01431") + max_val = float("1.4495") + mean = float("1.22632") + std = float("0.0811625") + data = None + + +class Program_weight_tensor_parameter_22: + name = "parameter_22" + shape = [288] + dtype = "float32" + min_val = float("9.53404e-05") + max_val = float("0.00409233") + mean = float("0.000467458") + std = float("0.000455701") + data = None + + +class Program_weight_tensor_parameter_23: + name = "parameter_23" + shape = [288] + dtype = "float32" + min_val = float("-0.0517031") + max_val = float("0.0162222") + mean = float("-0.00786597") + std = float("0.00921386") + data = None + + +class Program_weight_tensor_parameter_24: + name = "parameter_24" + shape = [288, 288, 1, 1] + dtype = "float32" + min_val = float("-0.0594582") + max_val = float("0.0735971") + mean = float("-0.00013723") + std = float("0.00364254") + data = None + + +class Program_weight_tensor_parameter_25: + name = "parameter_25" + shape = [288] + dtype = "float32" + min_val = float("-0.00314669") + max_val = float("0.00620898") + mean = float("2.48414e-05") + std = float("0.00194774") + data = None + + +class Program_weight_tensor_parameter_26: + name = "parameter_26" + shape = [288, 288, 1, 1] + dtype = "float32" + min_val = float("-0.00388461") + max_val = float("0.00841306") + mean = float("-1.87953e-05") + std = float("0.000932022") + data = None + + +class Program_weight_tensor_parameter_27: + name = "parameter_27" + shape = [4] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_28: + name = "parameter_28" + shape = [4, 288, 3, 3] + dtype = "float32" + min_val = float("-6.48766e-06") + max_val = float("0.000140856") + mean = float("6.52221e-06") + std = float("9.39411e-06") + data = None + + +class Program_weight_tensor_parameter_29: + name = "parameter_29" + shape = [288] + dtype = "float32" + min_val = float("-0.27006") + max_val = float("0.770798") + mean = float("0.311538") + std = float("0.171771") + data = None + + +class Program_weight_tensor_parameter_30: + name = "parameter_30" + shape = [288] + dtype = "float32" + min_val = float("0.991505") + max_val = float("1.72132") + mean = float("1.2558") + std = float("0.0945899") + data = None + + +class Program_weight_tensor_parameter_31: + name = "parameter_31" + shape = [288] + dtype = "float32" + min_val = float("0.000219746") + max_val = float("0.0054385") + mean = float("0.000809585") + std = float("0.000639851") + data = None + + +class Program_weight_tensor_parameter_32: + name = "parameter_32" + shape = [288] + dtype = "float32" + min_val = float("-0.130478") + max_val = float("0.0695712") + mean = float("-0.0265584") + std = float("0.0287591") + data = None + + +class Program_weight_tensor_parameter_33: + name = "parameter_33" + shape = [288, 288, 1, 1] + dtype = "float32" + min_val = float("-0.0480156") + max_val = float("0.0592741") + mean = float("-0.000430968") + std = float("0.00425043") + data = None + + +class Program_weight_tensor_parameter_34: + name = "parameter_34" + shape = [288] + dtype = "float32" + min_val = float("-0.00285738") + max_val = float("0.00720467") + mean = float("-7.33536e-05") + std = float("0.0011726") + data = None + + +class Program_weight_tensor_parameter_35: + name = "parameter_35" + shape = [288, 288, 1, 1] + dtype = "float32" + min_val = float("-0.0122627") + max_val = float("0.0161886") + mean = float("-1.72547e-05") + std = float("0.000995486") + data = None + + +class Program_weight_tensor_parameter_36: + name = "parameter_36" + shape = [68] + dtype = "float32" + min_val = float("-0.00365417") + max_val = float("0.0135851") + mean = float("3.44444e-08") + std = float("0.00376477") + data = None + + +class Program_weight_tensor_parameter_37: + name = "parameter_37" + shape = [68, 576, 3, 3] + dtype = "float32" + min_val = float("-0.0663088") + max_val = float("0.0674867") + mean = float("1.74732e-08") + std = float("0.00359868") + data = None + + +class Program_weight_tensor_parameter_38: + name = "parameter_38" + shape = [576] + dtype = "float32" + min_val = float("-0.0421788") + max_val = float("0.113513") + mean = float("0.0222551") + std = float("0.0258343") + data = None + + +class Program_weight_tensor_parameter_39: + name = "parameter_39" + shape = [576] + dtype = "float32" + min_val = float("1.05067") + max_val = float("1.39463") + mean = float("1.14799") + std = float("0.042853") + data = None + + +class Program_weight_tensor_parameter_40: + name = "parameter_40" + shape = [576] + dtype = "float32" + min_val = float("4.89493e-05") + max_val = float("0.00264842") + mean = float("0.000245751") + std = float("0.000220246") + data = None + + +class Program_weight_tensor_parameter_41: + name = "parameter_41" + shape = [576] + dtype = "float32" + min_val = float("-0.0344271") + max_val = float("0.0201663") + mean = float("-0.00575569") + std = float("0.00544377") + data = None + + +class Program_weight_tensor_parameter_42: + name = "parameter_42" + shape = [576, 576, 1, 1] + dtype = "float32" + min_val = float("-0.0385787") + max_val = float("0.0417201") + mean = float("-6.1183e-05") + std = float("0.00176385") + data = None + + +class Program_weight_tensor_parameter_43: + name = "parameter_43" + shape = [576] + dtype = "float32" + min_val = float("-0.00434691") + max_val = float("0.00341236") + mean = float("0.000100391") + std = float("0.00100061") + data = None + + +class Program_weight_tensor_parameter_44: + name = "parameter_44" + shape = [576, 576, 1, 1] + dtype = "float32" + min_val = float("-0.0035525") + max_val = float("0.00417295") + mean = float("2.83396e-05") + std = float("0.00036474") + data = None + + +class Program_weight_tensor_parameter_45: + name = "parameter_45" + shape = [4] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_46: + name = "parameter_46" + shape = [4, 576, 3, 3] + dtype = "float32" + min_val = float("-8.78041e-06") + max_val = float("0.000132008") + mean = float("4.36014e-06") + std = float("8.07886e-06") + data = None + + +class Program_weight_tensor_parameter_47: + name = "parameter_47" + shape = [576] + dtype = "float32" + min_val = float("-0.248224") + max_val = float("0.371203") + mean = float("0.155358") + std = float("0.0834839") + data = None + + +class Program_weight_tensor_parameter_48: + name = "parameter_48" + shape = [576] + dtype = "float32" + min_val = float("1.02385") + max_val = float("1.42504") + mean = float("1.13342") + std = float("0.0514955") + data = None + + +class Program_weight_tensor_parameter_49: + name = "parameter_49" + shape = [576] + dtype = "float32" + min_val = float("0.000119442") + max_val = float("0.00279863") + mean = float("0.000709705") + std = float("0.00050333") + data = None + + +class Program_weight_tensor_parameter_50: + name = "parameter_50" + shape = [576] + dtype = "float32" + min_val = float("-0.0735115") + max_val = float("0.0820593") + mean = float("-0.0216337") + std = float("0.0158423") + data = None + + +class Program_weight_tensor_parameter_51: + name = "parameter_51" + shape = [576, 576, 1, 1] + dtype = "float32" + min_val = float("-0.0599145") + max_val = float("0.0355292") + mean = float("-0.000237247") + std = float("0.00192816") + data = None + + +class Program_weight_tensor_parameter_52: + name = "parameter_52" + shape = [576] + dtype = "float32" + min_val = float("-0.00772796") + max_val = float("0.00635587") + mean = float("-2.65035e-05") + std = float("0.000684865") + data = None + + +class Program_weight_tensor_parameter_53: + name = "parameter_53" + shape = [576, 576, 1, 1] + dtype = "float32" + min_val = float("-0.0265895") + max_val = float("0.0452734") + mean = float("-1.04734e-06") + std = float("0.000523833") + data = None diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus-M/subgraph_6/graph_hash.txt b/paddle_samples/PaddleX/PP-YOLOE_plus-M/subgraph_6/graph_hash.txt new file mode 100644 index 000000000..df6fb86a6 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE_plus-M/subgraph_6/graph_hash.txt @@ -0,0 +1 @@ +0edecae6372779122c0886fc76c53f28d45d32c115180d22e204d37f4f709ce2 \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus-M/subgraph_6/graph_net.json b/paddle_samples/PaddleX/PP-YOLOE_plus-M/subgraph_6/graph_net.json new file mode 100644 index 000000000..1c6cb32da --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE_plus-M/subgraph_6/graph_net.json @@ -0,0 +1,6 @@ +{ + "framework": "paddle", + "model_name": "PP-YOLOE_plus-M", + "num_devices_required": 1, + "num_nodes_required": 1 +} \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus-M/subgraph_6/input_meta.py b/paddle_samples/PaddleX/PP-YOLOE_plus-M/subgraph_6/input_meta.py new file mode 100644 index 000000000..23b4d80c0 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE_plus-M/subgraph_6/input_meta.py @@ -0,0 +1,28 @@ +class Program_weight_tensor_data_0: + name = "data_0" + shape = [8, 10164, 4] + dtype = "float32" + min_val = float("0.01") + max_val = float("0.01") + mean = float("0.01") + std = float("2.79397e-09") + data = None + + +class Program_weight_tensor_data_1: + name = "data_1" + shape = [8, 10164] + dtype = "int32" + min_val = 0 + max_val = 4 + data = None + + +class Program_weight_tensor_data_2: + name = "data_2" + shape = [8, 10164, 4] + dtype = "float32" + max_val = float("0.941962") + mean = float("0.000134148") + std = float("0.0107789") + data = None diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus-M/subgraph_6/model.py b/paddle_samples/PaddleX/PP-YOLOE_plus-M/subgraph_6/model.py new file mode 100644 index 000000000..dbaa99912 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE_plus-M/subgraph_6/model.py @@ -0,0 +1,110 @@ +import paddle + + +class GraphModule(paddle.nn.Layer): + def __init__(self): + super().__init__() + + def forward(self, data_0, data_1, data_2): + # pd_op.full: (1xi32) <- () + full_0 = paddle._C_ops.full( + [1], float("5"), paddle.int32, paddle.core.CPUPlace() + ) + + # pd_op.one_hot: (8x10164x5xf32) <- (8x10164xi32, 1xi32) + one_hot_0 = paddle._C_ops.one_hot( + data_1 % paddle.cast(full_0, data_1.dtype), full_0 + ) + del data_1, full_0 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_0 = [0] + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_1 = [-1] + + # pd_op.slice: (8x10164x4xf32) <- (8x10164x5xf32, 1xi64, 1xi64) + slice_0 = paddle._C_ops.slice( + one_hot_0, [2], full_int_array_0, full_int_array_1, [1], [] + ) + del full_int_array_0, full_int_array_1, one_hot_0 + + # pd_op.pow: (8x10164x4xf32) <- (8x10164x4xf32) + pow_0 = paddle._C_ops.pow(data_0, float("2")) + + # pd_op.full: (1xf32) <- () + full_1 = paddle._C_ops.full( + [1], float("0.75"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.scale: (8x10164x4xf32) <- (8x10164x4xf32, 1xf32) + scale_0 = paddle._C_ops.scale(pow_0, full_1, float("0"), True) + del pow_0 + + # pd_op.full: (1xf32) <- () + full_2 = paddle._C_ops.full( + [1], float("-1"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.scale: (8x10164x4xf32) <- (8x10164x4xf32, 1xf32) + scale_1 = paddle._C_ops.scale(slice_0, full_2, float("1"), True) + del full_2 + + # pd_op.multiply: (8x10164x4xf32) <- (8x10164x4xf32, 8x10164x4xf32) + multiply_0 = paddle._C_ops.multiply(scale_0, scale_1) + + # pd_op.multiply: (8x10164x4xf32) <- (8x10164x4xf32, 8x10164x4xf32) + multiply_1 = paddle._C_ops.multiply(data_2, slice_0) + del slice_0 + + # pd_op.add: (8x10164x4xf32) <- (8x10164x4xf32, 8x10164x4xf32) + add_0 = paddle._C_ops.add(multiply_0, multiply_1) + + # pd_op.bce_loss: (8x10164x4xf32) <- (8x10164x4xf32, 8x10164x4xf32) + bce_loss_0 = paddle._C_ops.bce_loss(data_0, data_2) + del data_0 + + # pd_op.multiply: (8x10164x4xf32) <- (8x10164x4xf32, 8x10164x4xf32) + multiply_2 = paddle._C_ops.multiply(bce_loss_0, add_0) + + # pd_op.full_int_array: (0xi64) <- () + full_int_array_2 = [] + + # pd_op.sum: (xf32) <- (8x10164x4xf32, 0xi64) + sum_0 = paddle._C_ops.sum(multiply_2, full_int_array_2, None, False) + + # pd_op.sum: (xf32) <- (8x10164x4xf32, 0xi64) + sum_1 = paddle._C_ops.sum(data_2, full_int_array_2, None, False) + del data_2 + + # pd_op.full: (1xf32) <- () + full_3 = paddle._C_ops.full( + [1], float("1"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.full: (1xf32) <- () + full_4 = paddle._C_ops.full( + [1], float("3.40282e+38"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.clip: (xf32) <- (xf32, 1xf32, 1xf32) + clip_0 = paddle._C_ops.clip(sum_1, full_3, full_4) + del full_3, full_4, sum_1 + + # pd_op.divide: (xf32) <- (xf32, xf32) + divide_0 = paddle._C_ops.divide(sum_0, clip_0) + del ( + add_0, + bce_loss_0, + clip_0, + full_1, + full_int_array_2, + multiply_0, + multiply_1, + multiply_2, + scale_0, + scale_1, + sum_0, + ) + + return divide_0 diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus-M/subgraph_6/weight_meta.py b/paddle_samples/PaddleX/PP-YOLOE_plus-M/subgraph_6/weight_meta.py new file mode 100644 index 000000000..8b1378917 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE_plus-M/subgraph_6/weight_meta.py @@ -0,0 +1 @@ + diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus-M/subgraph_7/graph_hash.txt b/paddle_samples/PaddleX/PP-YOLOE_plus-M/subgraph_7/graph_hash.txt new file mode 100644 index 000000000..f48f7fa71 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE_plus-M/subgraph_7/graph_hash.txt @@ -0,0 +1 @@ +8db43af37301e6c5df30b75f04449dc531bb9453471e6360b201d2b66ed93ff3 \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus-M/subgraph_7/graph_net.json b/paddle_samples/PaddleX/PP-YOLOE_plus-M/subgraph_7/graph_net.json new file mode 100644 index 000000000..1c6cb32da --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE_plus-M/subgraph_7/graph_net.json @@ -0,0 +1,6 @@ +{ + "framework": "paddle", + "model_name": "PP-YOLOE_plus-M", + "num_devices_required": 1, + "num_nodes_required": 1 +} \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus-M/subgraph_7/input_meta.py b/paddle_samples/PaddleX/PP-YOLOE_plus-M/subgraph_7/input_meta.py new file mode 100644 index 000000000..cca2497dd --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE_plus-M/subgraph_7/input_meta.py @@ -0,0 +1,38 @@ +class Program_weight_tensor_data_0: + name = "data_0" + shape = [] + dtype = "int64" + data = [1] + + +class Program_weight_tensor_data_1: + name = "data_1" + shape = [8, 10164, 68] + dtype = "float32" + min_val = float("-7.58872") + max_val = float("15.3673") + mean = float("2.68906e-05") + std = float("1.599") + data = None + + +class Program_weight_tensor_data_2: + name = "data_2" + shape = [10164, 2] + dtype = "float32" + min_val = float("4.0") + max_val = float("700.0") + mean = float("352.0") + std = float("203.197") + data = None + + +class Program_weight_tensor_data_3: + name = "data_3" + shape = [10164, 1] + dtype = "float32" + min_val = float("8.0") + max_val = float("32.0") + mean = float("10.6667") + std = float("5.70157") + data = None diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus-M/subgraph_7/model.py b/paddle_samples/PaddleX/PP-YOLOE_plus-M/subgraph_7/model.py new file mode 100644 index 000000000..2991b2954 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE_plus-M/subgraph_7/model.py @@ -0,0 +1,188 @@ +import paddle + + +class GraphModule(paddle.nn.Layer): + def __init__(self): + super().__init__() + + def forward(self, parameter_0, data_0, data_1, data_2, data_3): + # pd_op.divide: (-1x2xf32) <- (-1x2xf32, -1x1xf32) + divide_0 = paddle._C_ops.divide(data_2, data_3) + del data_2 + + # pd_op.shape64: (3xi64) <- (8x-1x68xf32) + shape64_0 = paddle._C_ops.shape64(data_1) + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_0 = [0] + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_1 = [1] + + # pd_op.assign: (1xi64) <- (1xi64) + assign_0 = full_int_array_1 + + # pd_op.slice: (xi64) <- (3xi64, 1xi64, 1xi64) + slice_0 = paddle._C_ops.slice( + shape64_0, [0], full_int_array_0, full_int_array_1, [1], [0] + ) + del full_int_array_0 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_2 = [2] + + # pd_op.slice: (xi64) <- (3xi64, 1xi64, 1xi64) + slice_1 = paddle._C_ops.slice( + shape64_0, [0], full_int_array_1, full_int_array_2, [1], [0] + ) + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_3 = [3] + + # pd_op.slice: (xi64) <- (3xi64, 1xi64, 1xi64) + slice_2 = paddle._C_ops.slice( + shape64_0, [0], full_int_array_2, full_int_array_3, [1], [0] + ) + del full_int_array_2, full_int_array_3, shape64_0 + + # pd_op.full: (xi64) <- () + full_0 = paddle._C_ops.full( + [], float("-1"), paddle.int64, paddle.core.CPUPlace() + ) + + # pd_op.full: (xi64) <- () + full_1 = paddle._C_ops.full( + [], float("4"), paddle.int64, paddle.core.CPUPlace() + ) + + # pd_op.full: (xi64) <- () + full_2 = paddle._C_ops.full( + [], float("17"), paddle.int64, paddle.core.CPUPlace() + ) + + # builtin.combine: ([xi64, xi64, xi64, xi64]) <- (xi64, xi64, xi64, xi64) + combine_0 = [full_0, slice_1, full_1, full_2] + del full_0, full_1, full_2, slice_1 + + # pd_op.stack: (4xi64) <- ([xi64, xi64, xi64, xi64]) + stack_0 = paddle._C_ops.stack(combine_0, 0) + del combine_0 + + # pd_op.reshape: (-1x-1x4x17xf32) <- (8x-1x68xf32, 4xi64) + reshape_0 = paddle._C_ops.reshape(data_1, stack_0) + del data_1, stack_0 + + # pd_op.softmax: (-1x-1x4x17xf32) <- (-1x-1x4x17xf32) + softmax_0 = paddle._C_ops.softmax(reshape_0, -1) + del reshape_0 + + # pd_op.transpose: (-1x17x-1x4xf32) <- (-1x-1x4x17xf32) + transpose_0 = paddle._C_ops.transpose(softmax_0, [0, 3, 1, 2]) + + # pd_op.conv2d: (-1x1x-1x4xf32) <- (-1x17x-1x4xf32, 1x17x1x1xf32) + conv2d_0 = paddle._C_ops.conv2d( + transpose_0, parameter_0, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_0 + + # pd_op.squeeze: (-1x-1x4xf32) <- (-1x1x-1x4xf32, 1xi64) + squeeze_0 = paddle._C_ops.squeeze(conv2d_0, full_int_array_1) + del full_int_array_1 + + # pd_op.full: (1xi32) <- () + full_3 = paddle._C_ops.full( + [1], float("2"), paddle.int32, paddle.core.CPUPlace() + ) + + # pd_op.split_with_num: ([-1x-1x2xf32, -1x-1x2xf32]) <- (-1x-1x4xf32, 1xi32) + split_with_num_0 = paddle._C_ops.split_with_num(squeeze_0, 2, full_3) + del squeeze_0 + + # builtin.split: (-1x-1x2xf32, -1x-1x2xf32) <- ([-1x-1x2xf32, -1x-1x2xf32]) + ( + split_0, + split_1, + ) = split_with_num_0 + del split_with_num_0 + + # pd_op.full: (1xf32) <- () + full_4 = paddle._C_ops.full( + [1], float("-1"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.scale: (-1x-1x2xf32) <- (-1x-1x2xf32, 1xf32) + scale_0 = paddle._C_ops.scale(split_0, full_4, float("0"), True) + del split_0 + + # pd_op.add: (-1x-1x2xf32) <- (-1x-1x2xf32, -1x2xf32) + add_0 = paddle._C_ops.add(scale_0, divide_0) + + # pd_op.add: (-1x-1x2xf32) <- (-1x-1x2xf32, -1x2xf32) + add_1 = paddle._C_ops.add(split_1, divide_0) + + # pd_op.full: (1xi32) <- () + full_5 = paddle._C_ops.full( + [1], float("-1"), paddle.int32, paddle.core.CPUPlace() + ) + + # builtin.combine: ([-1x-1x2xf32, -1x-1x2xf32]) <- (-1x-1x2xf32, -1x-1x2xf32) + combine_1 = [add_0, add_1] + + # pd_op.concat: (-1x-1x4xf32) <- ([-1x-1x2xf32, -1x-1x2xf32], 1xi32) + concat_0 = paddle._C_ops.concat(combine_1, full_5) + del combine_1 + + # pd_op.full: (xi64) <- () + full_6 = paddle._C_ops.full( + [], float("30"), paddle.int64, paddle.framework._current_expected_place() + ) + + # pd_op.less_than: (xb) <- (xi64, xi64) + less_than_0 = paddle._C_ops.less_than(data_0, full_6) + del data_0, full_6 + + # pd_op.cast: (xi64) <- (xb) + cast_0 = paddle._C_ops.cast(less_than_0, paddle.int64) + del less_than_0 + + # pd_op.full: (xi64) <- () + full_7 = paddle._C_ops.full( + [], float("0"), paddle.int64, paddle.framework._current_expected_place() + ) + + # pd_op.not_equal: (xb) <- (xi64, xi64) + not_equal_0 = paddle._C_ops.not_equal(cast_0, full_7) + del cast_0 + + # pd_op.cast: (xi64) <- (xb) + cast_1 = paddle._C_ops.cast(not_equal_0, paddle.int64) + del not_equal_0 + + # pd_op.equal: (xb) <- (xi64, xi64) + equal_0 = paddle._C_ops.equal(cast_1, full_7) + del cast_1, full_7 + + # pd_op.share_data_: (-1x-1x4xf32) <- (-1x-1x4xf32) + share_data__0 = concat_0.detach() + + # pd_op.multiply: (-1x-1x4xf32) <- (-1x-1x4xf32, -1x1xf32) + multiply_0 = paddle._C_ops.multiply(share_data__0, data_3) + del ( + add_0, + add_1, + assign_0, + concat_0, + conv2d_0, + data_3, + divide_0, + full_3, + full_4, + full_5, + scale_0, + share_data__0, + softmax_0, + split_1, + transpose_0, + ) + + return multiply_0 diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus-M/subgraph_7/weight_meta.py b/paddle_samples/PaddleX/PP-YOLOE_plus-M/subgraph_7/weight_meta.py new file mode 100644 index 000000000..28198680e --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE_plus-M/subgraph_7/weight_meta.py @@ -0,0 +1,7 @@ +class Program_weight_tensor_parameter_0: + name = "parameter_0" + shape = [1, 17, 1, 1] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus-M/subgraph_9/graph_hash.txt b/paddle_samples/PaddleX/PP-YOLOE_plus-M/subgraph_9/graph_hash.txt new file mode 100644 index 000000000..f0876b5b3 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE_plus-M/subgraph_9/graph_hash.txt @@ -0,0 +1 @@ +f34aa271170a2591bf3e52dc6e22e71b666cb770bb99ef237e369b16b462bfac \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus-M/subgraph_9/graph_net.json b/paddle_samples/PaddleX/PP-YOLOE_plus-M/subgraph_9/graph_net.json new file mode 100644 index 000000000..1c6cb32da --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE_plus-M/subgraph_9/graph_net.json @@ -0,0 +1,6 @@ +{ + "framework": "paddle", + "model_name": "PP-YOLOE_plus-M", + "num_devices_required": 1, + "num_nodes_required": 1 +} \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus-M/subgraph_9/input_meta.py b/paddle_samples/PaddleX/PP-YOLOE_plus-M/subgraph_9/input_meta.py new file mode 100644 index 000000000..df3e2838c --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE_plus-M/subgraph_9/input_meta.py @@ -0,0 +1,7 @@ +class Program_weight_tensor_data_0: + name = "data_0" + shape = [8, 10164] + dtype = "int32" + min_val = 0 + max_val = 4 + data = None diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus-M/subgraph_9/model.py b/paddle_samples/PaddleX/PP-YOLOE_plus-M/subgraph_9/model.py new file mode 100644 index 000000000..d07b6549b --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE_plus-M/subgraph_9/model.py @@ -0,0 +1,34 @@ +import paddle + + +class GraphModule(paddle.nn.Layer): + def __init__(self): + super().__init__() + + def forward(self, data_0): + # pd_op.full: (xi32) <- () + full_0 = paddle._C_ops.full( + [], float("4"), paddle.int32, paddle.framework._current_expected_place() + ) + + # pd_op.not_equal: (8x10164xb) <- (8x10164xi32, xi32) + not_equal_0 = paddle._C_ops.not_equal(data_0, full_0) + del data_0, full_0 + + # pd_op.full_int_array: (0xi64) <- () + full_int_array_0 = [] + + # pd_op.sum: (xi64) <- (8x10164xb, 0xi64) + sum_0 = paddle._C_ops.sum(not_equal_0, full_int_array_0, None, False) + del full_int_array_0 + + # pd_op.full: (xi64) <- () + full_1 = paddle._C_ops.full( + [], float("0"), paddle.int64, paddle.framework._current_expected_place() + ) + + # pd_op.greater_than: (xb) <- (xi64, xi64) + greater_than_0 = paddle._C_ops.greater_than(sum_0, full_1) + del full_1, not_equal_0, sum_0 + + return greater_than_0 diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus-M/subgraph_9/weight_meta.py b/paddle_samples/PaddleX/PP-YOLOE_plus-M/subgraph_9/weight_meta.py new file mode 100644 index 000000000..8b1378917 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE_plus-M/subgraph_9/weight_meta.py @@ -0,0 +1 @@ + diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_0/graph_hash.txt b/paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_0/graph_hash.txt new file mode 100644 index 000000000..b3b436c8b --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_0/graph_hash.txt @@ -0,0 +1 @@ +6490cc553bfd8efe25a6bcddb96022c016e6b36651b6c1cd94673f681bb3e5ae \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_0/graph_net.json b/paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_0/graph_net.json new file mode 100644 index 000000000..469954d4b --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_0/graph_net.json @@ -0,0 +1,6 @@ +{ + "framework": "paddle", + "model_name": "PP-YOLOE_plus-S", + "num_devices_required": 1, + "num_nodes_required": 1 +} \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_0/input_meta.py b/paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_0/input_meta.py new file mode 100644 index 000000000..dd2312193 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_0/input_meta.py @@ -0,0 +1,33 @@ +class Program_weight_tensor_data_0: + name = "data_0" + shape = [] + dtype = "int64" + data = [3] + + +class Program_weight_tensor_data_1: + name = "data_1" + shape = [] + dtype = "int64" + data = [6069] + + +class Program_weight_tensor_data_2: + name = "data_2" + shape = [6069, 4] + dtype = "float32" + min_val = float("-64.0") + max_val = float("608.0") + mean = float("272.0") + std = float("159.886") + data = None + + +class Program_weight_tensor_data_3: + name = "data_3" + shape = [8, 3, 4] + dtype = "float32" + max_val = float("509.089") + mean = float("160.185") + std = float("176.435") + data = None diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_0/model.py b/paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_0/model.py new file mode 100644 index 000000000..2095b8ba2 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_0/model.py @@ -0,0 +1,303 @@ +import paddle + + +class GraphModule(paddle.nn.Layer): + def __init__(self): + super().__init__() + + def forward(self, data_0, data_1, data_2, data_3): + # pd_op.full: (xi64) <- () + full_0 = paddle._C_ops.full( + [], float("0"), paddle.int64, paddle.framework._current_expected_place() + ) + + # pd_op.equal: (xb) <- (xi64, xi64) + equal_0 = paddle._C_ops.equal(data_0, full_0) + del data_0 + + # pd_op.cast: (xi64) <- (xb) + cast_0 = paddle._C_ops.cast(equal_0, paddle.int64) + del equal_0 + + # pd_op.not_equal: (xb) <- (xi64, xi64) + not_equal_0 = paddle._C_ops.not_equal(cast_0, full_0) + del cast_0 + + # pd_op.cast: (xi64) <- (xb) + cast_1 = paddle._C_ops.cast(not_equal_0, paddle.int64) + del not_equal_0 + + # pd_op.equal: (xb) <- (xi64, xi64) + equal_1 = paddle._C_ops.equal(cast_1, full_0) + del cast_1, full_0 + + # pd_op.full_int_array: (2xi64) <- () + full_int_array_0 = [-1, 4] + + # pd_op.reshape: (-1x4xf32) <- (8x-1x4xf32, 2xi64) + reshape_2 = paddle._C_ops.reshape(data_3, full_int_array_0) + del data_3, full_int_array_0 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_1 = [1] + + # pd_op.unsqueeze: (-1x1x4xf32) <- (-1x4xf32, 1xi64) + unsqueeze_0 = paddle._C_ops.unsqueeze(reshape_2, full_int_array_1) + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_2 = [0] + + # pd_op.unsqueeze: (1x-1x4xf32) <- (-1x4xf32, 1xi64) + unsqueeze_1 = paddle._C_ops.unsqueeze(data_2, full_int_array_2) + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_3 = [2] + + # pd_op.slice: (-1x1x2xf32) <- (-1x1x4xf32, 1xi64, 1xi64) + slice_0 = paddle._C_ops.slice( + unsqueeze_0, [2], full_int_array_2, full_int_array_3, [1], [] + ) + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_4 = [2147483647] + + # pd_op.slice: (-1x1x2xf32) <- (-1x1x4xf32, 1xi64, 1xi64) + slice_1 = paddle._C_ops.slice( + unsqueeze_0, [2], full_int_array_3, full_int_array_4, [1], [] + ) + del unsqueeze_0 + + # pd_op.slice: (1x-1x2xf32) <- (1x-1x4xf32, 1xi64, 1xi64) + slice_2 = paddle._C_ops.slice( + unsqueeze_1, [2], full_int_array_2, full_int_array_3, [1], [] + ) + + # pd_op.slice: (1x-1x2xf32) <- (1x-1x4xf32, 1xi64, 1xi64) + slice_3 = paddle._C_ops.slice( + unsqueeze_1, [2], full_int_array_3, full_int_array_4, [1], [] + ) + del full_int_array_4, unsqueeze_1 + + # pd_op.maximum: (-1x-1x2xf32) <- (-1x1x2xf32, 1x-1x2xf32) + maximum_0 = paddle._C_ops.maximum(slice_0, slice_2) + + # pd_op.minimum: (-1x-1x2xf32) <- (-1x1x2xf32, 1x-1x2xf32) + minimum_0 = paddle._C_ops.minimum(slice_1, slice_3) + + # pd_op.subtract: (-1x-1x2xf32) <- (-1x-1x2xf32, -1x-1x2xf32) + subtract_0 = paddle._C_ops.subtract(minimum_0, maximum_0) + del maximum_0, minimum_0 + + # pd_op.full: (1xf32) <- () + full_1 = paddle._C_ops.full( + [1], float("0"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.full: (1xf32) <- () + full_2 = paddle._C_ops.full( + [1], float("3.40282e+38"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.clip: (-1x-1x2xf32) <- (-1x-1x2xf32, 1xf32, 1xf32) + clip_0 = paddle._C_ops.clip(subtract_0, full_1, full_2) + del subtract_0 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_5 = [-1] + + # pd_op.prod: (-1x-1xf32) <- (-1x-1x2xf32, 1xi64) + prod_0 = paddle._C_ops.prod(clip_0, full_int_array_5, False, False) + del clip_0 + + # pd_op.subtract: (-1x1x2xf32) <- (-1x1x2xf32, -1x1x2xf32) + subtract_1 = paddle._C_ops.subtract(slice_1, slice_0) + del slice_0, slice_1 + + # pd_op.clip: (-1x1x2xf32) <- (-1x1x2xf32, 1xf32, 1xf32) + clip_1 = paddle._C_ops.clip(subtract_1, full_1, full_2) + del subtract_1 + + # pd_op.prod: (-1x1xf32) <- (-1x1x2xf32, 1xi64) + prod_1 = paddle._C_ops.prod(clip_1, full_int_array_5, False, False) + del clip_1 + + # pd_op.subtract: (1x-1x2xf32) <- (1x-1x2xf32, 1x-1x2xf32) + subtract_2 = paddle._C_ops.subtract(slice_3, slice_2) + del slice_2, slice_3 + + # pd_op.clip: (1x-1x2xf32) <- (1x-1x2xf32, 1xf32, 1xf32) + clip_2 = paddle._C_ops.clip(subtract_2, full_1, full_2) + del full_1, full_2, subtract_2 + + # pd_op.prod: (1x-1xf32) <- (1x-1x2xf32, 1xi64) + prod_2 = paddle._C_ops.prod(clip_2, full_int_array_5, False, False) + del clip_2, full_int_array_5 + + # pd_op.add: (-1x-1xf32) <- (-1x1xf32, 1x-1xf32) + add_0 = paddle._C_ops.add(prod_1, prod_2) + del prod_1, prod_2 + + # pd_op.subtract: (-1x-1xf32) <- (-1x-1xf32, -1x-1xf32) + subtract_3 = paddle._C_ops.subtract(add_0, prod_0) + del add_0 + + # pd_op.full: (1xf32) <- () + full_3 = paddle._C_ops.full( + [1], float("1"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.scale: (-1x-1xf32) <- (-1x-1xf32, 1xf32) + scale_0 = paddle._C_ops.scale(subtract_3, full_3, float("1e-10"), True) + del full_3, subtract_3 + + # pd_op.divide: (-1x-1xf32) <- (-1x-1xf32, -1x-1xf32) + divide_0 = paddle._C_ops.divide(prod_0, scale_0) + del prod_0, scale_0 + + # pd_op.full: (xi64) <- () + full_4 = paddle._C_ops.full( + [], float("8"), paddle.int64, paddle.core.CPUPlace() + ) + + # pd_op.full: (xi64) <- () + full_5 = paddle._C_ops.full( + [], float("-1"), paddle.int64, paddle.core.CPUPlace() + ) + + # builtin.combine: ([xi64, xi64, xi64]) <- (xi64, xi64, xi64) + combine_0 = [full_4, full_5, data_1] + del data_1, full_4, full_5 + + # pd_op.stack: (3xi64) <- ([xi64, xi64, xi64]) + stack_0 = paddle._C_ops.stack(combine_0, 0) + del combine_0 + + # pd_op.reshape: (8x-1x-1xf32) <- (-1x-1xf32, 3xi64) + reshape_1 = paddle._C_ops.reshape(divide_0, stack_0) + del divide_0 + + # pd_op.slice: (-1xf32) <- (-1x4xf32, 1xi64, 1xi64) + slice_4 = paddle._C_ops.slice( + reshape_2, [1], full_int_array_2, full_int_array_1, [1], [1] + ) + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_6 = [3] + + # pd_op.slice: (-1xf32) <- (-1x4xf32, 1xi64, 1xi64) + slice_5 = paddle._C_ops.slice( + reshape_2, [1], full_int_array_3, full_int_array_6, [1], [1] + ) + + # pd_op.add: (-1xf32) <- (-1xf32, -1xf32) + add_1 = paddle._C_ops.add(slice_4, slice_5) + del slice_4, slice_5 + + # pd_op.full: (1xf32) <- () + full_6 = paddle._C_ops.full( + [1], float("0.5"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.scale: (-1xf32) <- (-1xf32, 1xf32) + scale_1 = paddle._C_ops.scale(add_1, full_6, float("0"), True) + del add_1 + + # pd_op.slice: (-1xf32) <- (-1x4xf32, 1xi64, 1xi64) + slice_6 = paddle._C_ops.slice( + reshape_2, [1], full_int_array_1, full_int_array_3, [1], [1] + ) + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_7 = [4] + + # pd_op.slice: (-1xf32) <- (-1x4xf32, 1xi64, 1xi64) + slice_7 = paddle._C_ops.slice( + reshape_2, [1], full_int_array_6, full_int_array_7, [1], [1] + ) + del reshape_2 + + # pd_op.add: (-1xf32) <- (-1xf32, -1xf32) + add_2 = paddle._C_ops.add(slice_6, slice_7) + del slice_6, slice_7 + + # pd_op.scale: (-1xf32) <- (-1xf32, 1xf32) + scale_2 = paddle._C_ops.scale(add_2, full_6, float("0"), True) + del add_2 + + # builtin.combine: ([-1xf32, -1xf32]) <- (-1xf32, -1xf32) + combine_1 = [scale_1, scale_2] + del scale_1, scale_2 + + # pd_op.stack: (-1x2xf32) <- ([-1xf32, -1xf32]) + stack_1 = paddle._C_ops.stack(combine_1, -1) + del combine_1 + + # pd_op.unsqueeze: (-1x1x2xf32) <- (-1x2xf32, 1xi64) + unsqueeze_2 = paddle._C_ops.unsqueeze(stack_1, full_int_array_1) + del stack_1 + + # pd_op.slice: (-1xf32) <- (-1x4xf32, 1xi64, 1xi64) + slice_8 = paddle._C_ops.slice( + data_2, [1], full_int_array_2, full_int_array_1, [1], [1] + ) + + # pd_op.slice: (-1xf32) <- (-1x4xf32, 1xi64, 1xi64) + slice_9 = paddle._C_ops.slice( + data_2, [1], full_int_array_3, full_int_array_6, [1], [1] + ) + + # pd_op.add: (-1xf32) <- (-1xf32, -1xf32) + add_3 = paddle._C_ops.add(slice_8, slice_9) + del slice_8, slice_9 + + # pd_op.scale: (-1xf32) <- (-1xf32, 1xf32) + scale_3 = paddle._C_ops.scale(add_3, full_6, float("0"), True) + del add_3 + + # pd_op.slice: (-1xf32) <- (-1x4xf32, 1xi64, 1xi64) + slice_10 = paddle._C_ops.slice( + data_2, [1], full_int_array_1, full_int_array_3, [1], [1] + ) + del full_int_array_1, full_int_array_3 + + # pd_op.slice: (-1xf32) <- (-1x4xf32, 1xi64, 1xi64) + slice_11 = paddle._C_ops.slice( + data_2, [1], full_int_array_6, full_int_array_7, [1], [1] + ) + del data_2, full_int_array_6, full_int_array_7 + + # pd_op.add: (-1xf32) <- (-1xf32, -1xf32) + add_4 = paddle._C_ops.add(slice_10, slice_11) + del slice_10, slice_11 + + # pd_op.scale: (-1xf32) <- (-1xf32, 1xf32) + scale_4 = paddle._C_ops.scale(add_4, full_6, float("0"), True) + del add_4, full_6 + + # builtin.combine: ([-1xf32, -1xf32]) <- (-1xf32, -1xf32) + combine_2 = [scale_3, scale_4] + del scale_3, scale_4 + + # pd_op.stack: (-1x2xf32) <- ([-1xf32, -1xf32]) + stack_2 = paddle._C_ops.stack(combine_2, -1) + del combine_2 + + # pd_op.unsqueeze: (1x-1x2xf32) <- (-1x2xf32, 1xi64) + unsqueeze_3 = paddle._C_ops.unsqueeze(stack_2, full_int_array_2) + del full_int_array_2 + + # pd_op.subtract: (-1x-1x2xf32) <- (-1x1x2xf32, 1x-1x2xf32) + subtract_4 = paddle._C_ops.subtract(unsqueeze_2, unsqueeze_3) + del unsqueeze_2, unsqueeze_3 + + # pd_op.p_norm: (-1x-1xf32) <- (-1x-1x2xf32) + p_norm_0 = paddle._C_ops.p_norm( + subtract_4, float("2"), -1, float("1e-12"), False, False + ) + del subtract_4 + + # pd_op.reshape: (8x-1x-1xf32) <- (-1x-1xf32, 3xi64) + reshape_0 = paddle._C_ops.reshape(p_norm_0, stack_0) + del p_norm_0, stack_0, stack_2 + + return reshape_0, reshape_1 diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_0/weight_meta.py b/paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_0/weight_meta.py new file mode 100644 index 000000000..8b1378917 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_0/weight_meta.py @@ -0,0 +1 @@ + diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_1/graph_hash.txt b/paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_1/graph_hash.txt new file mode 100644 index 000000000..f0876b5b3 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_1/graph_hash.txt @@ -0,0 +1 @@ +f34aa271170a2591bf3e52dc6e22e71b666cb770bb99ef237e369b16b462bfac \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_1/graph_net.json b/paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_1/graph_net.json new file mode 100644 index 000000000..469954d4b --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_1/graph_net.json @@ -0,0 +1,6 @@ +{ + "framework": "paddle", + "model_name": "PP-YOLOE_plus-S", + "num_devices_required": 1, + "num_nodes_required": 1 +} \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_1/input_meta.py b/paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_1/input_meta.py new file mode 100644 index 000000000..345a6dd31 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_1/input_meta.py @@ -0,0 +1,7 @@ +class Program_weight_tensor_data_0: + name = "data_0" + shape = [8, 7581] + dtype = "int32" + min_val = 0 + max_val = 4 + data = None diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_1/model.py b/paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_1/model.py new file mode 100644 index 000000000..261f6be22 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_1/model.py @@ -0,0 +1,34 @@ +import paddle + + +class GraphModule(paddle.nn.Layer): + def __init__(self): + super().__init__() + + def forward(self, data_0): + # pd_op.full: (xi32) <- () + full_0 = paddle._C_ops.full( + [], float("4"), paddle.int32, paddle.framework._current_expected_place() + ) + + # pd_op.not_equal: (8x7581xb) <- (8x7581xi32, xi32) + not_equal_0 = paddle._C_ops.not_equal(data_0, full_0) + del data_0, full_0 + + # pd_op.full_int_array: (0xi64) <- () + full_int_array_0 = [] + + # pd_op.sum: (xi64) <- (8x7581xb, 0xi64) + sum_0 = paddle._C_ops.sum(not_equal_0, full_int_array_0, None, False) + del full_int_array_0 + + # pd_op.full: (xi64) <- () + full_1 = paddle._C_ops.full( + [], float("0"), paddle.int64, paddle.framework._current_expected_place() + ) + + # pd_op.greater_than: (xb) <- (xi64, xi64) + greater_than_0 = paddle._C_ops.greater_than(sum_0, full_1) + del full_1, not_equal_0, sum_0 + + return greater_than_0 diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_1/weight_meta.py b/paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_1/weight_meta.py new file mode 100644 index 000000000..8b1378917 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_1/weight_meta.py @@ -0,0 +1 @@ + diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_10/graph_hash.txt b/paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_10/graph_hash.txt new file mode 100644 index 000000000..03a7b6b29 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_10/graph_hash.txt @@ -0,0 +1 @@ +576e796dddb1c9463be097477eb9b76fc566d49529154ed8afd0bc3681a6d4f1 \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_10/graph_net.json b/paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_10/graph_net.json new file mode 100644 index 000000000..469954d4b --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_10/graph_net.json @@ -0,0 +1,6 @@ +{ + "framework": "paddle", + "model_name": "PP-YOLOE_plus-S", + "num_devices_required": 1, + "num_nodes_required": 1 +} \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_10/input_meta.py b/paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_10/input_meta.py new file mode 100644 index 000000000..3975da0b4 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_10/input_meta.py @@ -0,0 +1,98 @@ +class Program_weight_tensor_data_0: + name = "data_0" + shape = [] + dtype = "int64" + data = [289] + + +class Program_weight_tensor_data_1: + name = "data_1" + shape = [] + dtype = "int64" + data = [1445] + + +class Program_weight_tensor_data_2: + name = "data_2" + shape = [] + dtype = "int64" + data = [289] + + +class Program_weight_tensor_data_3: + name = "data_3" + shape = [] + dtype = "int64" + data = [1156] + + +class Program_weight_tensor_data_4: + name = "data_4" + shape = [] + dtype = "int64" + data = [4624] + + +class Program_weight_tensor_data_5: + name = "data_5" + shape = [8, 3, 289] + dtype = "float32" + min_val = float("0.73041") + max_val = float("746.705") + mean = float("331.525") + std = float("157.979") + data = None + + +class Program_weight_tensor_data_6: + name = "data_6" + shape = [8, 3, 1156] + dtype = "float32" + min_val = float("1.37479") + max_val = float("758.018") + mean = float("331.673") + std = float("158.075") + data = None + + +class Program_weight_tensor_data_7: + name = "data_7" + shape = [8, 3, 4624] + dtype = "float32" + min_val = float("0.511387") + max_val = float("763.675") + mean = float("331.71") + std = float("158.098") + data = None + + +class Program_weight_tensor_data_8: + name = "data_8" + shape = [8, 3, 1] + dtype = "float32" + data = [ + 1.0, + 0.0, + 0.0, + 1.0, + 0.0, + 0.0, + 1.0, + 1.0, + 1.0, + 1.0, + 0.0, + 0.0, + 1.0, + 1.0, + 1.0, + 1.0, + 0.0, + 0.0, + 1.0, + 1.0, + 0.0, + 1.0, + 0.0, + 0.0, + ] diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_10/model.py b/paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_10/model.py new file mode 100644 index 000000000..6429236b1 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_10/model.py @@ -0,0 +1,120 @@ +import paddle + + +class GraphModule(paddle.nn.Layer): + def __init__(self): + super().__init__() + + def forward( + self, data_0, data_1, data_2, data_3, data_4, data_5, data_6, data_7, data_8 + ): + # pd_op.full: (1xi32) <- () + full_0 = paddle._C_ops.full( + [1], float("9"), paddle.int32, paddle.core.CPUPlace() + ) + + # pd_op.topk: (8x-1x9xf32, 8x-1x9xi64) <- (8x-1x-1xf32, 1xi32) + topk_0, topk_1 = (lambda x, f: f(x))( + paddle._C_ops.topk(data_5, full_0, -1, False, True), + lambda out: out if isinstance(out, (list, tuple)) else (out, None), + ) + del data_5 + + # pd_op.full: (1xf32) <- () + full_1 = paddle._C_ops.full( + [1], float("1"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.scale: (8x-1x9xi64) <- (8x-1x9xi64, 1xf32) + scale_0 = paddle._C_ops.scale(topk_1, full_1, float("0"), True) + del full_1 + + # pd_op.one_hot: (8x-1x9x-1xf32) <- (8x-1x9xi64, xi64) + one_hot_0 = paddle._C_ops.one_hot( + topk_1 % paddle.cast(data_2, topk_1.dtype), data_2 + ) + del data_2, topk_1 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_0 = [-2] + + # pd_op.sum: (8x-1x-1xf32) <- (8x-1x9x-1xf32, 1xi64) + sum_0 = paddle._C_ops.sum(one_hot_0, full_int_array_0, None, False) + del one_hot_0 + + # pd_op.multiply: (8x-1x-1xf32) <- (8x-1x-1xf32, 8x-1x1xf32) + multiply_0 = paddle._C_ops.multiply(sum_0, data_8) + del sum_0 + + # pd_op.topk: (8x-1x9xf32, 8x-1x9xi64) <- (8x-1x-1xf32, 1xi32) + topk_2, topk_3 = (lambda x, f: f(x))( + paddle._C_ops.topk(data_6, full_0, -1, False, True), + lambda out: out if isinstance(out, (list, tuple)) else (out, None), + ) + del data_6 + + # pd_op.add: (8x-1x9xi64) <- (8x-1x9xi64, xi64) + add_0 = paddle._C_ops.add(topk_3, data_0) + del data_0 + + # pd_op.one_hot: (8x-1x9x-1xf32) <- (8x-1x9xi64, xi64) + one_hot_1 = paddle._C_ops.one_hot( + topk_3 % paddle.cast(data_3, topk_3.dtype), data_3 + ) + del data_3, topk_3 + + # pd_op.sum: (8x-1x-1xf32) <- (8x-1x9x-1xf32, 1xi64) + sum_1 = paddle._C_ops.sum(one_hot_1, full_int_array_0, None, False) + del one_hot_1 + + # pd_op.multiply: (8x-1x-1xf32) <- (8x-1x-1xf32, 8x-1x1xf32) + multiply_1 = paddle._C_ops.multiply(sum_1, data_8) + del sum_1 + + # pd_op.topk: (8x-1x9xf32, 8x-1x9xi64) <- (8x-1x-1xf32, 1xi32) + topk_4, topk_5 = (lambda x, f: f(x))( + paddle._C_ops.topk(data_7, full_0, -1, False, True), + lambda out: out if isinstance(out, (list, tuple)) else (out, None), + ) + del data_7, full_0 + + # pd_op.add: (8x-1x9xi64) <- (8x-1x9xi64, xi64) + add_1 = paddle._C_ops.add(topk_5, data_1) + del data_1 + + # pd_op.one_hot: (8x-1x9x-1xf32) <- (8x-1x9xi64, xi64) + one_hot_2 = paddle._C_ops.one_hot( + topk_5 % paddle.cast(data_4, topk_5.dtype), data_4 + ) + del data_4, topk_5 + + # pd_op.sum: (8x-1x-1xf32) <- (8x-1x9x-1xf32, 1xi64) + sum_2 = paddle._C_ops.sum(one_hot_2, full_int_array_0, None, False) + del full_int_array_0, one_hot_2 + + # pd_op.multiply: (8x-1x-1xf32) <- (8x-1x-1xf32, 8x-1x1xf32) + multiply_2 = paddle._C_ops.multiply(sum_2, data_8) + del data_8, sum_2 + + # pd_op.full: (1xi32) <- () + full_2 = paddle._C_ops.full( + [1], float("-1"), paddle.int32, paddle.core.CPUPlace() + ) + + # builtin.combine: ([8x-1x-1xf32, 8x-1x-1xf32, 8x-1x-1xf32]) <- (8x-1x-1xf32, 8x-1x-1xf32, 8x-1x-1xf32) + combine_0 = [multiply_0, multiply_1, multiply_2] + del multiply_0, multiply_1, multiply_2 + + # pd_op.concat: (8x-1x-1xf32) <- ([8x-1x-1xf32, 8x-1x-1xf32, 8x-1x-1xf32], 1xi32) + concat_0 = paddle._C_ops.concat(combine_0, full_2) + del combine_0 + + # builtin.combine: ([8x-1x9xi64, 8x-1x9xi64, 8x-1x9xi64]) <- (8x-1x9xi64, 8x-1x9xi64, 8x-1x9xi64) + combine_1 = [scale_0, add_0, add_1] + del add_0, add_1, scale_0 + + # pd_op.concat: (8x-1x27xi64) <- ([8x-1x9xi64, 8x-1x9xi64, 8x-1x9xi64], 1xi32) + concat_1 = paddle._C_ops.concat(combine_1, full_2) + del combine_1, full_2 + + return concat_0, concat_1 diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_10/weight_meta.py b/paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_10/weight_meta.py new file mode 100644 index 000000000..8b1378917 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_10/weight_meta.py @@ -0,0 +1 @@ + diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_11/graph_hash.txt b/paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_11/graph_hash.txt new file mode 100644 index 000000000..b08da1263 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_11/graph_hash.txt @@ -0,0 +1 @@ +4d26d0110d82b3e8a1ea0c4059adeb25427870d4527791748b1197dfa32e25ef \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_11/graph_net.json b/paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_11/graph_net.json new file mode 100644 index 000000000..469954d4b --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_11/graph_net.json @@ -0,0 +1,6 @@ +{ + "framework": "paddle", + "model_name": "PP-YOLOE_plus-S", + "num_devices_required": 1, + "num_nodes_required": 1 +} \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_11/input_meta.py b/paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_11/input_meta.py new file mode 100644 index 000000000..e6422a4f0 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_11/input_meta.py @@ -0,0 +1,19 @@ +class Program_weight_tensor_data_0: + name = "data_0" + shape = [] + dtype = "float32" + data = [0.180049] + + +class Program_weight_tensor_data_1: + name = "data_1" + shape = [] + dtype = "float32" + data = [0.780114] + + +class Program_weight_tensor_data_2: + name = "data_2" + shape = [] + dtype = "float32" + data = [3.79787] diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_11/model.py b/paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_11/model.py new file mode 100644 index 000000000..4cccb2b8e --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_11/model.py @@ -0,0 +1,43 @@ +import paddle + + +class GraphModule(paddle.nn.Layer): + def __init__(self): + super().__init__() + + def forward(self, data_0, data_1, data_2): + # pd_op.full: (1xf32) <- () + full_0 = paddle._C_ops.full( + [1], float("1"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.scale: (xf32) <- (xf32, 1xf32) + scale_0 = paddle._C_ops.scale(data_2, full_0, float("0"), True) + del data_2 + + # pd_op.full: (1xf32) <- () + full_1 = paddle._C_ops.full( + [1], float("2.5"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.scale: (xf32) <- (xf32, 1xf32) + scale_1 = paddle._C_ops.scale(data_0, full_1, float("0"), True) + del data_0 + + # pd_op.add: (xf32) <- (xf32, xf32) + add_1 = paddle._C_ops.add(scale_0, scale_1) + + # pd_op.full: (1xf32) <- () + full_2 = paddle._C_ops.full( + [1], float("0.5"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.scale: (xf32) <- (xf32, 1xf32) + scale_2 = paddle._C_ops.scale(data_1, full_2, float("0"), True) + del data_1 + + # pd_op.add: (xf32) <- (xf32, xf32) + add_0 = paddle._C_ops.add(add_1, scale_2) + del add_1, full_0, full_1, full_2, scale_0, scale_1, scale_2 + + return add_0 diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_11/weight_meta.py b/paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_11/weight_meta.py new file mode 100644 index 000000000..8b1378917 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_11/weight_meta.py @@ -0,0 +1 @@ + diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_12/graph_hash.txt b/paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_12/graph_hash.txt new file mode 100644 index 000000000..b543568a7 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_12/graph_hash.txt @@ -0,0 +1 @@ +42e184615f6f96cecc56992123e65e294dfcbd7742d8a37b4b66c28fff78eae8 \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_12/graph_net.json b/paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_12/graph_net.json new file mode 100644 index 000000000..469954d4b --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_12/graph_net.json @@ -0,0 +1,6 @@ +{ + "framework": "paddle", + "model_name": "PP-YOLOE_plus-S", + "num_devices_required": 1, + "num_nodes_required": 1 +} \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_12/input_meta.py b/paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_12/input_meta.py new file mode 100644 index 000000000..f404aeedd --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_12/input_meta.py @@ -0,0 +1,76 @@ +class Program_weight_tensor_data_0: + name = "data_0" + shape = [8, 1, 7581] + dtype = "float32" + max_val = float("1.0") + mean = float("0.000659544") + std = float("0.0256731") + data = None + + +class Program_weight_tensor_data_1: + name = "data_1" + shape = [8, 1, 1] + dtype = "int32" + data = [0, 0, 0, 0, 0, 0, 0, 3] + + +class Program_weight_tensor_data_2: + name = "data_2" + shape = [8, 7581] + dtype = "float32" + max_val = float("1.0") + mean = float("0.000659544") + std = float("0.0256731") + data = None + + +class Program_weight_tensor_data_3: + name = "data_3" + shape = [8, 1, 4] + dtype = "float32" + data = [ + 293.867, + 183.92, + 385.067, + 252.32, + 128.25, + 149.839, + 389.5, + 314.085, + 271.573, + 249.28, + 368.853, + 322.24, + 146.891, + 220.195, + 206.924, + 270.587, + 354.97, + 368.42, + 411.401, + 400.364, + 319.774, + 182.615, + 359.925, + 211.618, + 115.589, + 133.082, + 201.125, + 227.021, + 402.595, + 243.692, + 451.344, + 319.18, + ] + + +class Program_weight_tensor_data_4: + name = "data_4" + shape = [8, 7581, 4] + dtype = "float32" + min_val = float("-306.461") + max_val = float("915.943") + mean = float("304.98") + std = float("188.516") + data = None diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_12/model.py b/paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_12/model.py new file mode 100644 index 000000000..a6512b0d9 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_12/model.py @@ -0,0 +1,287 @@ +import paddle + + +class GraphModule(paddle.nn.Layer): + def __init__(self): + super().__init__() + + def forward(self, data_0, data_1, data_2, data_3, data_4): + # pd_op.full: (1xi64) <- () + full_0 = paddle._C_ops.full( + [1], float("-2"), paddle.int64, paddle.core.CPUPlace() + ) + + # pd_op.argmax: (8x7581xi64) <- (8x1x7581xf32, 1xi64) + argmax_0 = paddle._C_ops.argmax(data_0, full_0, False, False, paddle.int64) + del full_0 + + # pd_op.full: (1xf64) <- () + full_1 = paddle._C_ops.full( + [1], float("0"), paddle.float64, paddle.core.CPUPlace() + ) + + # pd_op.full: (1xf64) <- () + full_2 = paddle._C_ops.full( + [1], float("8"), paddle.float64, paddle.core.CPUPlace() + ) + + # pd_op.full: (1xf64) <- () + full_3 = paddle._C_ops.full( + [1], float("1"), paddle.float64, paddle.core.CPUPlace() + ) + + # pd_op.arange: (8xi32) <- (1xf64, 1xf64, 1xf64) + arange_0 = paddle.arange(full_1, full_2, full_3, dtype="int32") + del full_1, full_2, full_3 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_0 = [-1] + + # pd_op.unsqueeze: (8x1xi32) <- (8xi32, 1xi64) + unsqueeze_0 = paddle._C_ops.unsqueeze(arange_0, full_int_array_0) + del arange_0 + + # pd_op.full: (1xf32) <- () + full_4 = paddle._C_ops.full( + [1], float("1"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.scale: (8x1xi32) <- (8x1xi32, 1xf32) + scale_0 = paddle._C_ops.scale(unsqueeze_0, full_4, float("0"), True) + del unsqueeze_0 + + # pd_op.cast: (8x1xi64) <- (8x1xi32) + cast_0 = paddle._C_ops.cast(scale_0, paddle.int64) + del scale_0 + + # pd_op.add: (8x7581xi64) <- (8x7581xi64, 8x1xi64) + add_0 = paddle._C_ops.add(argmax_0, cast_0) + del argmax_0, cast_0 + + # pd_op.flatten: (8xi32) <- (8x1x1xi32) + flatten_0 = paddle._C_ops.flatten(data_1, 0, 2) + del data_1 + + # pd_op.flatten: (60648xi64) <- (8x7581xi64) + flatten_1 = paddle._C_ops.flatten(add_0, 0, 1) + del add_0 + + # pd_op.full: (1xi32) <- () + full_5 = paddle._C_ops.full( + [1], float("0"), paddle.int32, paddle.core.CPUPlace() + ) + + # pd_op.gather: (60648xi32) <- (8xi32, 60648xi64, 1xi32) + gather_0 = paddle._C_ops.gather(flatten_0, flatten_1, full_5) + del flatten_0 + + # pd_op.full_int_array: (2xi64) <- () + full_int_array_1 = [8, 7581] + + # pd_op.reshape: (8x7581xi32) <- (60648xi32, 2xi64) + reshape_1 = paddle._C_ops.reshape(gather_0, full_int_array_1) + del full_int_array_1, gather_0 + + # pd_op.full: (xf32) <- () + full_6 = paddle._C_ops.full( + [], float("0"), paddle.float32, paddle.framework._current_expected_place() + ) + + # pd_op.greater_than: (8x7581xb) <- (8x7581xf32, xf32) + greater_than_0 = paddle._C_ops.greater_than(data_2, full_6) + del data_2, full_6 + + # pd_op.full: (1xf32) <- () + full_7 = paddle._C_ops.full( + [1], float("4"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.full_like: (8x7581xi32) <- (8x7581xi32, 1xf32) + full_like_0 = paddle._C_ops.full_like( + reshape_1, full_7, paddle.int32, paddle.framework._current_expected_place() + ) + del full_7 + + # pd_op.where: (8x7581xi32) <- (8x7581xb, 8x7581xi32, 8x7581xi32) + where_0 = paddle._C_ops.where(greater_than_0, reshape_1, full_like_0) + del full_like_0, greater_than_0, reshape_1 + + # pd_op.full_int_array: (2xi64) <- () + full_int_array_2 = [-1, 4] + + # pd_op.reshape: (8x4xf32) <- (8x1x4xf32, 2xi64) + reshape_2 = paddle._C_ops.reshape(data_3, full_int_array_2) + del full_int_array_2 + + # pd_op.gather: (60648x4xf32) <- (8x4xf32, 60648xi64, 1xi32) + gather_1 = paddle._C_ops.gather(reshape_2, flatten_1, full_5) + del flatten_1, full_5, reshape_2 + + # pd_op.full_int_array: (3xi64) <- () + full_int_array_3 = [8, 7581, 4] + + # pd_op.reshape: (8x7581x4xf32) <- (60648x4xf32, 3xi64) + reshape_0 = paddle._C_ops.reshape(gather_1, full_int_array_3) + del full_int_array_3, gather_1 + + # pd_op.full: (1xi32) <- () + full_8 = paddle._C_ops.full( + [1], float("5"), paddle.int32, paddle.core.CPUPlace() + ) + + # pd_op.one_hot: (8x7581x5xf32) <- (8x7581xi32, 1xi32) + one_hot_0 = paddle._C_ops.one_hot( + where_0 % paddle.cast(full_8, where_0.dtype), full_8 + ) + del full_8 + + # pd_op.full: (4xi64) <- () + full_9 = paddle._C_ops.full( + [4], float("0"), paddle.int64, paddle.framework._current_expected_place() + ) + + # pd_op.assign_value_: (4xi64) <- (4xi64) + assign_value__0 = paddle._C_ops.assign_value_( + full_9, + [4], + paddle.int64, + [float("0"), float("1"), float("2"), float("3")], + paddle.framework._current_expected_place(), + ) + del full_9 + + # pd_op.index_select: (8x7581x4xf32) <- (8x7581x5xf32, 4xi64) + index_select_0 = paddle._C_ops.index_select(one_hot_0, assign_value__0, -1) + del assign_value__0, one_hot_0 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_4 = [2] + + # pd_op.unsqueeze: (8x1x1x4xf32) <- (8x1x4xf32, 1xi64) + unsqueeze_1 = paddle._C_ops.unsqueeze(data_3, full_int_array_4) + del data_3 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_5 = [1] + + # pd_op.unsqueeze: (8x1x7581x4xf32) <- (8x7581x4xf32, 1xi64) + unsqueeze_2 = paddle._C_ops.unsqueeze(data_4, full_int_array_5) + del data_4, full_int_array_5 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_6 = [0] + + # pd_op.slice: (8x1x1x2xf32) <- (8x1x1x4xf32, 1xi64, 1xi64) + slice_0 = paddle._C_ops.slice( + unsqueeze_1, [3], full_int_array_6, full_int_array_4, [1], [] + ) + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_7 = [2147483647] + + # pd_op.slice: (8x1x1x2xf32) <- (8x1x1x4xf32, 1xi64, 1xi64) + slice_1 = paddle._C_ops.slice( + unsqueeze_1, [3], full_int_array_4, full_int_array_7, [1], [] + ) + del unsqueeze_1 + + # pd_op.slice: (8x1x7581x2xf32) <- (8x1x7581x4xf32, 1xi64, 1xi64) + slice_2 = paddle._C_ops.slice( + unsqueeze_2, [3], full_int_array_6, full_int_array_4, [1], [] + ) + del full_int_array_6 + + # pd_op.slice: (8x1x7581x2xf32) <- (8x1x7581x4xf32, 1xi64, 1xi64) + slice_3 = paddle._C_ops.slice( + unsqueeze_2, [3], full_int_array_4, full_int_array_7, [1], [] + ) + del full_int_array_4, full_int_array_7, unsqueeze_2 + + # pd_op.maximum: (8x1x7581x2xf32) <- (8x1x1x2xf32, 8x1x7581x2xf32) + maximum_0 = paddle._C_ops.maximum(slice_0, slice_2) + + # pd_op.minimum: (8x1x7581x2xf32) <- (8x1x1x2xf32, 8x1x7581x2xf32) + minimum_0 = paddle._C_ops.minimum(slice_1, slice_3) + + # pd_op.subtract: (8x1x7581x2xf32) <- (8x1x7581x2xf32, 8x1x7581x2xf32) + subtract_0 = paddle._C_ops.subtract(minimum_0, maximum_0) + del maximum_0, minimum_0 + + # pd_op.full: (1xf32) <- () + full_10 = paddle._C_ops.full( + [1], float("0"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.full: (1xf32) <- () + full_11 = paddle._C_ops.full( + [1], float("3.40282e+38"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.clip: (8x1x7581x2xf32) <- (8x1x7581x2xf32, 1xf32, 1xf32) + clip_0 = paddle._C_ops.clip(subtract_0, full_10, full_11) + del subtract_0 + + # pd_op.prod: (8x1x7581xf32) <- (8x1x7581x2xf32, 1xi64) + prod_0 = paddle._C_ops.prod(clip_0, full_int_array_0, False, False) + del clip_0 + + # pd_op.subtract: (8x1x1x2xf32) <- (8x1x1x2xf32, 8x1x1x2xf32) + subtract_1 = paddle._C_ops.subtract(slice_1, slice_0) + del slice_0, slice_1 + + # pd_op.clip: (8x1x1x2xf32) <- (8x1x1x2xf32, 1xf32, 1xf32) + clip_1 = paddle._C_ops.clip(subtract_1, full_10, full_11) + del subtract_1 + + # pd_op.prod: (8x1x1xf32) <- (8x1x1x2xf32, 1xi64) + prod_1 = paddle._C_ops.prod(clip_1, full_int_array_0, False, False) + del clip_1 + + # pd_op.subtract: (8x1x7581x2xf32) <- (8x1x7581x2xf32, 8x1x7581x2xf32) + subtract_2 = paddle._C_ops.subtract(slice_3, slice_2) + del slice_2, slice_3 + + # pd_op.clip: (8x1x7581x2xf32) <- (8x1x7581x2xf32, 1xf32, 1xf32) + clip_2 = paddle._C_ops.clip(subtract_2, full_10, full_11) + del full_10, full_11, subtract_2 + + # pd_op.prod: (8x1x7581xf32) <- (8x1x7581x2xf32, 1xi64) + prod_2 = paddle._C_ops.prod(clip_2, full_int_array_0, False, False) + del clip_2 + + # pd_op.add: (8x1x7581xf32) <- (8x1x1xf32, 8x1x7581xf32) + add_1 = paddle._C_ops.add(prod_1, prod_2) + del prod_1, prod_2 + + # pd_op.subtract: (8x1x7581xf32) <- (8x1x7581xf32, 8x1x7581xf32) + subtract_3 = paddle._C_ops.subtract(add_1, prod_0) + del add_1 + + # pd_op.scale: (8x1x7581xf32) <- (8x1x7581xf32, 1xf32) + scale_1 = paddle._C_ops.scale(subtract_3, full_4, float("1e-09"), True) + del full_4, subtract_3 + + # pd_op.divide: (8x1x7581xf32) <- (8x1x7581xf32, 8x1x7581xf32) + divide_0 = paddle._C_ops.divide(prod_0, scale_1) + del prod_0, scale_1 + + # pd_op.multiply: (8x1x7581xf32) <- (8x1x7581xf32, 8x1x7581xf32) + multiply_1 = paddle._C_ops.multiply(divide_0, data_0) + del data_0, divide_0 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_8 = [-2] + + # pd_op.max: (8x7581xf32) <- (8x1x7581xf32, 1xi64) + max_0 = paddle._C_ops.max(multiply_1, full_int_array_8, False) + del full_int_array_8, multiply_1 + + # pd_op.unsqueeze: (8x7581x1xf32) <- (8x7581xf32, 1xi64) + unsqueeze_3 = paddle._C_ops.unsqueeze(max_0, full_int_array_0) + del full_int_array_0, max_0 + + # pd_op.multiply: (8x7581x4xf32) <- (8x7581x4xf32, 8x7581x1xf32) + multiply_0 = paddle._C_ops.multiply(index_select_0, unsqueeze_3) + del index_select_0, unsqueeze_3, where_0 + + return reshape_0, multiply_0 diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_12/weight_meta.py b/paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_12/weight_meta.py new file mode 100644 index 000000000..8b1378917 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_12/weight_meta.py @@ -0,0 +1 @@ + diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_13/graph_hash.txt b/paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_13/graph_hash.txt new file mode 100644 index 000000000..48f9cc2af --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_13/graph_hash.txt @@ -0,0 +1 @@ +8c3a658f63f5d96dc089da6403b65de7b000c817cbbdf585fcbddf0b4ead5739 \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_13/graph_net.json b/paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_13/graph_net.json new file mode 100644 index 000000000..469954d4b --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_13/graph_net.json @@ -0,0 +1,6 @@ +{ + "framework": "paddle", + "model_name": "PP-YOLOE_plus-S", + "num_devices_required": 1, + "num_nodes_required": 1 +} \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_13/input_meta.py b/paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_13/input_meta.py new file mode 100644 index 000000000..a227ad9d6 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_13/input_meta.py @@ -0,0 +1,38 @@ +class Program_weight_tensor_data_0: + name = "data_0" + shape = [] + dtype = "int64" + data = [1] + + +class Program_weight_tensor_data_1: + name = "data_1" + shape = [8, 9261, 68] + dtype = "float32" + min_val = float("-8.42398") + max_val = float("17.0644") + mean = float("2.27673e-05") + std = float("1.68882") + data = None + + +class Program_weight_tensor_data_2: + name = "data_2" + shape = [9261, 2] + dtype = "float32" + min_val = float("4.0") + max_val = float("668.0") + mean = float("336.0") + std = float("193.958") + data = None + + +class Program_weight_tensor_data_3: + name = "data_3" + shape = [9261, 1] + dtype = "float32" + min_val = float("8.0") + max_val = float("32.0") + mean = float("10.6667") + std = float("5.70157") + data = None diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_13/model.py b/paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_13/model.py new file mode 100644 index 000000000..177500710 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_13/model.py @@ -0,0 +1,188 @@ +import paddle + + +class GraphModule(paddle.nn.Layer): + def __init__(self): + super().__init__() + + def forward(self, parameter_0, data_0, data_1, data_2, data_3): + # pd_op.divide: (-1x2xf32) <- (-1x2xf32, -1x1xf32) + divide_0 = paddle._C_ops.divide(data_2, data_3) + del data_2 + + # pd_op.shape64: (3xi64) <- (8x-1x68xf32) + shape64_0 = paddle._C_ops.shape64(data_1) + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_0 = [0] + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_1 = [1] + + # pd_op.assign: (1xi64) <- (1xi64) + assign_0 = full_int_array_1 + + # pd_op.slice: (xi64) <- (3xi64, 1xi64, 1xi64) + slice_0 = paddle._C_ops.slice( + shape64_0, [0], full_int_array_0, full_int_array_1, [1], [0] + ) + del full_int_array_0 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_2 = [2] + + # pd_op.slice: (xi64) <- (3xi64, 1xi64, 1xi64) + slice_1 = paddle._C_ops.slice( + shape64_0, [0], full_int_array_1, full_int_array_2, [1], [0] + ) + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_3 = [3] + + # pd_op.slice: (xi64) <- (3xi64, 1xi64, 1xi64) + slice_2 = paddle._C_ops.slice( + shape64_0, [0], full_int_array_2, full_int_array_3, [1], [0] + ) + del full_int_array_2, full_int_array_3, shape64_0 + + # pd_op.full: (xi64) <- () + full_0 = paddle._C_ops.full( + [], float("-1"), paddle.int64, paddle.core.CPUPlace() + ) + + # pd_op.full: (xi64) <- () + full_1 = paddle._C_ops.full( + [], float("4"), paddle.int64, paddle.core.CPUPlace() + ) + + # pd_op.full: (xi64) <- () + full_2 = paddle._C_ops.full( + [], float("17"), paddle.int64, paddle.core.CPUPlace() + ) + + # builtin.combine: ([xi64, xi64, xi64, xi64]) <- (xi64, xi64, xi64, xi64) + combine_0 = [full_0, slice_1, full_1, full_2] + del full_0, full_1, full_2, slice_1 + + # pd_op.stack: (4xi64) <- ([xi64, xi64, xi64, xi64]) + stack_0 = paddle._C_ops.stack(combine_0, 0) + del combine_0 + + # pd_op.reshape: (-1x-1x4x17xf32) <- (8x-1x68xf32, 4xi64) + reshape_0 = paddle._C_ops.reshape(data_1, stack_0) + del data_1, stack_0 + + # pd_op.softmax: (-1x-1x4x17xf32) <- (-1x-1x4x17xf32) + softmax_0 = paddle._C_ops.softmax(reshape_0, -1) + del reshape_0 + + # pd_op.transpose: (-1x17x-1x4xf32) <- (-1x-1x4x17xf32) + transpose_0 = paddle._C_ops.transpose(softmax_0, [0, 3, 1, 2]) + + # pd_op.conv2d: (-1x1x-1x4xf32) <- (-1x17x-1x4xf32, 1x17x1x1xf32) + conv2d_0 = paddle._C_ops.conv2d( + transpose_0, parameter_0, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_0 + + # pd_op.squeeze: (-1x-1x4xf32) <- (-1x1x-1x4xf32, 1xi64) + squeeze_0 = paddle._C_ops.squeeze(conv2d_0, full_int_array_1) + del full_int_array_1 + + # pd_op.full: (1xi32) <- () + full_3 = paddle._C_ops.full( + [1], float("2"), paddle.int32, paddle.core.CPUPlace() + ) + + # pd_op.split_with_num: ([-1x-1x2xf32, -1x-1x2xf32]) <- (-1x-1x4xf32, 1xi32) + split_with_num_0 = paddle._C_ops.split_with_num(squeeze_0, 2, full_3) + del squeeze_0 + + # builtin.split: (-1x-1x2xf32, -1x-1x2xf32) <- ([-1x-1x2xf32, -1x-1x2xf32]) + ( + split_0, + split_1, + ) = split_with_num_0 + del split_with_num_0 + + # pd_op.full: (1xf32) <- () + full_4 = paddle._C_ops.full( + [1], float("-1"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.scale: (-1x-1x2xf32) <- (-1x-1x2xf32, 1xf32) + scale_0 = paddle._C_ops.scale(split_0, full_4, float("0"), True) + del split_0 + + # pd_op.add: (-1x-1x2xf32) <- (-1x-1x2xf32, -1x2xf32) + add_0 = paddle._C_ops.add(scale_0, divide_0) + + # pd_op.add: (-1x-1x2xf32) <- (-1x-1x2xf32, -1x2xf32) + add_1 = paddle._C_ops.add(split_1, divide_0) + + # pd_op.full: (1xi32) <- () + full_5 = paddle._C_ops.full( + [1], float("-1"), paddle.int32, paddle.core.CPUPlace() + ) + + # builtin.combine: ([-1x-1x2xf32, -1x-1x2xf32]) <- (-1x-1x2xf32, -1x-1x2xf32) + combine_1 = [add_0, add_1] + + # pd_op.concat: (-1x-1x4xf32) <- ([-1x-1x2xf32, -1x-1x2xf32], 1xi32) + concat_0 = paddle._C_ops.concat(combine_1, full_5) + del combine_1 + + # pd_op.full: (xi64) <- () + full_6 = paddle._C_ops.full( + [], float("100"), paddle.int64, paddle.framework._current_expected_place() + ) + + # pd_op.less_than: (xb) <- (xi64, xi64) + less_than_0 = paddle._C_ops.less_than(data_0, full_6) + del data_0, full_6 + + # pd_op.cast: (xi64) <- (xb) + cast_0 = paddle._C_ops.cast(less_than_0, paddle.int64) + del less_than_0 + + # pd_op.full: (xi64) <- () + full_7 = paddle._C_ops.full( + [], float("0"), paddle.int64, paddle.framework._current_expected_place() + ) + + # pd_op.not_equal: (xb) <- (xi64, xi64) + not_equal_0 = paddle._C_ops.not_equal(cast_0, full_7) + del cast_0 + + # pd_op.cast: (xi64) <- (xb) + cast_1 = paddle._C_ops.cast(not_equal_0, paddle.int64) + del not_equal_0 + + # pd_op.equal: (xb) <- (xi64, xi64) + equal_0 = paddle._C_ops.equal(cast_1, full_7) + del cast_1, full_7 + + # pd_op.share_data_: (-1x-1x4xf32) <- (-1x-1x4xf32) + share_data__0 = concat_0.detach() + + # pd_op.multiply: (-1x-1x4xf32) <- (-1x-1x4xf32, -1x1xf32) + multiply_0 = paddle._C_ops.multiply(share_data__0, data_3) + del ( + add_0, + add_1, + assign_0, + concat_0, + conv2d_0, + data_3, + divide_0, + full_3, + full_4, + full_5, + scale_0, + share_data__0, + softmax_0, + split_1, + transpose_0, + ) + + return multiply_0 diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_13/weight_meta.py b/paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_13/weight_meta.py new file mode 100644 index 000000000..28198680e --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_13/weight_meta.py @@ -0,0 +1,7 @@ +class Program_weight_tensor_parameter_0: + name = "parameter_0" + shape = [1, 17, 1, 1] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_14/graph_hash.txt b/paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_14/graph_hash.txt new file mode 100644 index 000000000..19a6a7486 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_14/graph_hash.txt @@ -0,0 +1 @@ +f00b56f762b48c06ba6bd348567cc70cacaf2c88f5db73f5a23278722ad0441d \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_14/graph_net.json b/paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_14/graph_net.json new file mode 100644 index 000000000..469954d4b --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_14/graph_net.json @@ -0,0 +1,6 @@ +{ + "framework": "paddle", + "model_name": "PP-YOLOE_plus-S", + "num_devices_required": 1, + "num_nodes_required": 1 +} \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_14/input_meta.py b/paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_14/input_meta.py new file mode 100644 index 000000000..6d8ee32fc --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_14/input_meta.py @@ -0,0 +1,38 @@ +class Program_weight_tensor_data_0: + name = "data_0" + shape = [8, 1, 361] + dtype = "float32" + min_val = float("5.46685") + max_val = float("598.12") + mean = float("252.702") + std = float("108.596") + data = None + + +class Program_weight_tensor_data_1: + name = "data_1" + shape = [8, 1, 1444] + dtype = "float32" + min_val = float("3.29832") + max_val = float("609.43") + mean = float("252.88") + std = float("108.77") + data = None + + +class Program_weight_tensor_data_2: + name = "data_2" + shape = [8, 1, 5776] + dtype = "float32" + min_val = float("1.12676") + max_val = float("615.085") + mean = float("252.925") + std = float("108.813") + data = None + + +class Program_weight_tensor_data_3: + name = "data_3" + shape = [8, 1, 1] + dtype = "float32" + data = [1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0] diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_14/model.py b/paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_14/model.py new file mode 100644 index 000000000..aeda8d7e9 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_14/model.py @@ -0,0 +1,131 @@ +import paddle + + +class GraphModule(paddle.nn.Layer): + def __init__(self): + super().__init__() + + def forward(self, data_0, data_1, data_2, data_3): + # pd_op.full: (1xi32) <- () + full_0 = paddle._C_ops.full( + [1], float("9"), paddle.int32, paddle.core.CPUPlace() + ) + + # pd_op.topk: (8x1x9xf32, 8x1x9xi64) <- (8x1x361xf32, 1xi32) + topk_0, topk_1 = (lambda x, f: f(x))( + paddle._C_ops.topk(data_0, full_0, -1, False, True), + lambda out: out if isinstance(out, (list, tuple)) else (out, None), + ) + del data_0 + + # pd_op.full: (1xf32) <- () + full_1 = paddle._C_ops.full( + [1], float("1"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.scale: (8x1x9xi64) <- (8x1x9xi64, 1xf32) + scale_0 = paddle._C_ops.scale(topk_1, full_1, float("0"), True) + + # pd_op.full: (1xi32) <- () + full_2 = paddle._C_ops.full( + [1], float("361"), paddle.int32, paddle.core.CPUPlace() + ) + + # pd_op.one_hot: (8x1x9x361xf32) <- (8x1x9xi64, 1xi32) + one_hot_0 = paddle._C_ops.one_hot( + topk_1 % paddle.cast(full_2, topk_1.dtype), full_2 + ) + del full_2, topk_1 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_0 = [-2] + + # pd_op.sum: (8x1x361xf32) <- (8x1x9x361xf32, 1xi64) + sum_0 = paddle._C_ops.sum(one_hot_0, full_int_array_0, None, False) + del one_hot_0 + + # pd_op.multiply: (8x1x361xf32) <- (8x1x361xf32, 8x1x1xf32) + multiply_0 = paddle._C_ops.multiply(sum_0, data_3) + del sum_0 + + # pd_op.topk: (8x1x9xf32, 8x1x9xi64) <- (8x1x1444xf32, 1xi32) + topk_2, topk_3 = (lambda x, f: f(x))( + paddle._C_ops.topk(data_1, full_0, -1, False, True), + lambda out: out if isinstance(out, (list, tuple)) else (out, None), + ) + del data_1 + + # pd_op.scale: (8x1x9xi64) <- (8x1x9xi64, 1xf32) + scale_1 = paddle._C_ops.scale(topk_3, full_1, float("361"), True) + + # pd_op.full: (1xi32) <- () + full_3 = paddle._C_ops.full( + [1], float("1444"), paddle.int32, paddle.core.CPUPlace() + ) + + # pd_op.one_hot: (8x1x9x1444xf32) <- (8x1x9xi64, 1xi32) + one_hot_1 = paddle._C_ops.one_hot( + topk_3 % paddle.cast(full_3, topk_3.dtype), full_3 + ) + del full_3, topk_3 + + # pd_op.sum: (8x1x1444xf32) <- (8x1x9x1444xf32, 1xi64) + sum_1 = paddle._C_ops.sum(one_hot_1, full_int_array_0, None, False) + del one_hot_1 + + # pd_op.multiply: (8x1x1444xf32) <- (8x1x1444xf32, 8x1x1xf32) + multiply_1 = paddle._C_ops.multiply(sum_1, data_3) + del sum_1 + + # pd_op.topk: (8x1x9xf32, 8x1x9xi64) <- (8x1x5776xf32, 1xi32) + topk_4, topk_5 = (lambda x, f: f(x))( + paddle._C_ops.topk(data_2, full_0, -1, False, True), + lambda out: out if isinstance(out, (list, tuple)) else (out, None), + ) + del data_2, full_0 + + # pd_op.scale: (8x1x9xi64) <- (8x1x9xi64, 1xf32) + scale_2 = paddle._C_ops.scale(topk_5, full_1, float("1805"), True) + del full_1 + + # pd_op.full: (1xi32) <- () + full_4 = paddle._C_ops.full( + [1], float("5776"), paddle.int32, paddle.core.CPUPlace() + ) + + # pd_op.one_hot: (8x1x9x5776xf32) <- (8x1x9xi64, 1xi32) + one_hot_2 = paddle._C_ops.one_hot( + topk_5 % paddle.cast(full_4, topk_5.dtype), full_4 + ) + del full_4, topk_5 + + # pd_op.sum: (8x1x5776xf32) <- (8x1x9x5776xf32, 1xi64) + sum_2 = paddle._C_ops.sum(one_hot_2, full_int_array_0, None, False) + del full_int_array_0, one_hot_2 + + # pd_op.multiply: (8x1x5776xf32) <- (8x1x5776xf32, 8x1x1xf32) + multiply_2 = paddle._C_ops.multiply(sum_2, data_3) + del data_3, sum_2 + + # pd_op.full: (1xi32) <- () + full_5 = paddle._C_ops.full( + [1], float("-1"), paddle.int32, paddle.core.CPUPlace() + ) + + # builtin.combine: ([8x1x361xf32, 8x1x1444xf32, 8x1x5776xf32]) <- (8x1x361xf32, 8x1x1444xf32, 8x1x5776xf32) + combine_0 = [multiply_0, multiply_1, multiply_2] + del multiply_0, multiply_1, multiply_2 + + # pd_op.concat: (8x1x7581xf32) <- ([8x1x361xf32, 8x1x1444xf32, 8x1x5776xf32], 1xi32) + concat_0 = paddle._C_ops.concat(combine_0, full_5) + del combine_0 + + # builtin.combine: ([8x1x9xi64, 8x1x9xi64, 8x1x9xi64]) <- (8x1x9xi64, 8x1x9xi64, 8x1x9xi64) + combine_1 = [scale_0, scale_1, scale_2] + del scale_0, scale_1, scale_2 + + # pd_op.concat: (8x1x27xi64) <- ([8x1x9xi64, 8x1x9xi64, 8x1x9xi64], 1xi32) + concat_1 = paddle._C_ops.concat(combine_1, full_5) + del combine_1, full_5 + + return concat_0, concat_1 diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_14/weight_meta.py b/paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_14/weight_meta.py new file mode 100644 index 000000000..8b1378917 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_14/weight_meta.py @@ -0,0 +1 @@ + diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_15/graph_hash.txt b/paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_15/graph_hash.txt new file mode 100644 index 000000000..68a340abd --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_15/graph_hash.txt @@ -0,0 +1 @@ +ab6d73d0a64827dfc3d86c89e20b92de656589ccd700aa1d955ce4d12d055477 \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_15/graph_net.json b/paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_15/graph_net.json new file mode 100644 index 000000000..469954d4b --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_15/graph_net.json @@ -0,0 +1,6 @@ +{ + "framework": "paddle", + "model_name": "PP-YOLOE_plus-S", + "num_devices_required": 1, + "num_nodes_required": 1 +} \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_15/input_meta.py b/paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_15/input_meta.py new file mode 100644 index 000000000..26db2798e --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_15/input_meta.py @@ -0,0 +1,49 @@ +class Program_weight_tensor_data_0: + name = "data_0" + shape = [7581, 4] + dtype = "float32" + min_val = float("-64.0") + max_val = float("672.0") + mean = float("304.0") + std = float("178.066") + data = None + + +class Program_weight_tensor_data_1: + name = "data_1" + shape = [8, 1, 4] + dtype = "float32" + data = [ + 293.867, + 183.92, + 385.067, + 252.32, + 128.25, + 149.839, + 389.5, + 314.085, + 271.573, + 249.28, + 368.853, + 322.24, + 146.891, + 220.195, + 206.924, + 270.587, + 354.97, + 368.42, + 411.401, + 400.364, + 319.774, + 182.615, + 359.925, + 211.618, + 115.589, + 133.082, + 201.125, + 227.021, + 402.595, + 243.692, + 451.344, + 319.18, + ] diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_15/model.py b/paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_15/model.py new file mode 100644 index 000000000..bc7c69e4d --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_15/model.py @@ -0,0 +1,263 @@ +import paddle + + +class GraphModule(paddle.nn.Layer): + def __init__(self): + super().__init__() + + def forward(self, data_0, data_1): + # pd_op.full_int_array: (2xi64) <- () + full_int_array_0 = [-1, 4] + + # pd_op.reshape: (8x4xf32) <- (8x1x4xf32, 2xi64) + reshape_2 = paddle._C_ops.reshape(data_1, full_int_array_0) + del data_1, full_int_array_0 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_1 = [1] + + # pd_op.unsqueeze: (8x1x4xf32) <- (8x4xf32, 1xi64) + unsqueeze_0 = paddle._C_ops.unsqueeze(reshape_2, full_int_array_1) + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_2 = [0] + + # pd_op.unsqueeze: (1x7581x4xf32) <- (7581x4xf32, 1xi64) + unsqueeze_1 = paddle._C_ops.unsqueeze(data_0, full_int_array_2) + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_3 = [2] + + # pd_op.slice: (8x1x2xf32) <- (8x1x4xf32, 1xi64, 1xi64) + slice_0 = paddle._C_ops.slice( + unsqueeze_0, [2], full_int_array_2, full_int_array_3, [1], [] + ) + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_4 = [2147483647] + + # pd_op.slice: (8x1x2xf32) <- (8x1x4xf32, 1xi64, 1xi64) + slice_1 = paddle._C_ops.slice( + unsqueeze_0, [2], full_int_array_3, full_int_array_4, [1], [] + ) + del unsqueeze_0 + + # pd_op.slice: (1x7581x2xf32) <- (1x7581x4xf32, 1xi64, 1xi64) + slice_2 = paddle._C_ops.slice( + unsqueeze_1, [2], full_int_array_2, full_int_array_3, [1], [] + ) + + # pd_op.slice: (1x7581x2xf32) <- (1x7581x4xf32, 1xi64, 1xi64) + slice_3 = paddle._C_ops.slice( + unsqueeze_1, [2], full_int_array_3, full_int_array_4, [1], [] + ) + del full_int_array_4, unsqueeze_1 + + # pd_op.maximum: (8x7581x2xf32) <- (8x1x2xf32, 1x7581x2xf32) + maximum_0 = paddle._C_ops.maximum(slice_0, slice_2) + + # pd_op.minimum: (8x7581x2xf32) <- (8x1x2xf32, 1x7581x2xf32) + minimum_0 = paddle._C_ops.minimum(slice_1, slice_3) + + # pd_op.subtract: (8x7581x2xf32) <- (8x7581x2xf32, 8x7581x2xf32) + subtract_0 = paddle._C_ops.subtract(minimum_0, maximum_0) + del maximum_0, minimum_0 + + # pd_op.full: (1xf32) <- () + full_0 = paddle._C_ops.full( + [1], float("0"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.full: (1xf32) <- () + full_1 = paddle._C_ops.full( + [1], float("3.40282e+38"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.clip: (8x7581x2xf32) <- (8x7581x2xf32, 1xf32, 1xf32) + clip_0 = paddle._C_ops.clip(subtract_0, full_0, full_1) + del subtract_0 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_5 = [-1] + + # pd_op.prod: (8x7581xf32) <- (8x7581x2xf32, 1xi64) + prod_0 = paddle._C_ops.prod(clip_0, full_int_array_5, False, False) + del clip_0 + + # pd_op.subtract: (8x1x2xf32) <- (8x1x2xf32, 8x1x2xf32) + subtract_1 = paddle._C_ops.subtract(slice_1, slice_0) + del slice_0, slice_1 + + # pd_op.clip: (8x1x2xf32) <- (8x1x2xf32, 1xf32, 1xf32) + clip_1 = paddle._C_ops.clip(subtract_1, full_0, full_1) + del subtract_1 + + # pd_op.prod: (8x1xf32) <- (8x1x2xf32, 1xi64) + prod_1 = paddle._C_ops.prod(clip_1, full_int_array_5, False, False) + del clip_1 + + # pd_op.subtract: (1x7581x2xf32) <- (1x7581x2xf32, 1x7581x2xf32) + subtract_2 = paddle._C_ops.subtract(slice_3, slice_2) + del slice_2, slice_3 + + # pd_op.clip: (1x7581x2xf32) <- (1x7581x2xf32, 1xf32, 1xf32) + clip_2 = paddle._C_ops.clip(subtract_2, full_0, full_1) + del full_0, full_1, subtract_2 + + # pd_op.prod: (1x7581xf32) <- (1x7581x2xf32, 1xi64) + prod_2 = paddle._C_ops.prod(clip_2, full_int_array_5, False, False) + del clip_2, full_int_array_5 + + # pd_op.add: (8x7581xf32) <- (8x1xf32, 1x7581xf32) + add_0 = paddle._C_ops.add(prod_1, prod_2) + del prod_1, prod_2 + + # pd_op.subtract: (8x7581xf32) <- (8x7581xf32, 8x7581xf32) + subtract_3 = paddle._C_ops.subtract(add_0, prod_0) + del add_0 + + # pd_op.full: (1xf32) <- () + full_2 = paddle._C_ops.full( + [1], float("1"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.scale: (8x7581xf32) <- (8x7581xf32, 1xf32) + scale_0 = paddle._C_ops.scale(subtract_3, full_2, float("1e-10"), True) + del full_2, subtract_3 + + # pd_op.divide: (8x7581xf32) <- (8x7581xf32, 8x7581xf32) + divide_0 = paddle._C_ops.divide(prod_0, scale_0) + del prod_0, scale_0 + + # pd_op.full_int_array: (3xi64) <- () + full_int_array_6 = [8, -1, 7581] + + # pd_op.reshape: (8x1x7581xf32) <- (8x7581xf32, 3xi64) + reshape_1 = paddle._C_ops.reshape(divide_0, full_int_array_6) + del divide_0 + + # pd_op.slice: (8xf32) <- (8x4xf32, 1xi64, 1xi64) + slice_4 = paddle._C_ops.slice( + reshape_2, [1], full_int_array_2, full_int_array_1, [1], [1] + ) + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_7 = [3] + + # pd_op.slice: (8xf32) <- (8x4xf32, 1xi64, 1xi64) + slice_5 = paddle._C_ops.slice( + reshape_2, [1], full_int_array_3, full_int_array_7, [1], [1] + ) + + # pd_op.add: (8xf32) <- (8xf32, 8xf32) + add_1 = paddle._C_ops.add(slice_4, slice_5) + del slice_4, slice_5 + + # pd_op.full: (1xf32) <- () + full_3 = paddle._C_ops.full( + [1], float("0.5"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.scale: (8xf32) <- (8xf32, 1xf32) + scale_1 = paddle._C_ops.scale(add_1, full_3, float("0"), True) + del add_1 + + # pd_op.slice: (8xf32) <- (8x4xf32, 1xi64, 1xi64) + slice_6 = paddle._C_ops.slice( + reshape_2, [1], full_int_array_1, full_int_array_3, [1], [1] + ) + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_8 = [4] + + # pd_op.slice: (8xf32) <- (8x4xf32, 1xi64, 1xi64) + slice_7 = paddle._C_ops.slice( + reshape_2, [1], full_int_array_7, full_int_array_8, [1], [1] + ) + del reshape_2 + + # pd_op.add: (8xf32) <- (8xf32, 8xf32) + add_2 = paddle._C_ops.add(slice_6, slice_7) + del slice_6, slice_7 + + # pd_op.scale: (8xf32) <- (8xf32, 1xf32) + scale_2 = paddle._C_ops.scale(add_2, full_3, float("0"), True) + del add_2 + + # builtin.combine: ([8xf32, 8xf32]) <- (8xf32, 8xf32) + combine_0 = [scale_1, scale_2] + del scale_1, scale_2 + + # pd_op.stack: (8x2xf32) <- ([8xf32, 8xf32]) + stack_0 = paddle._C_ops.stack(combine_0, -1) + del combine_0 + + # pd_op.unsqueeze: (8x1x2xf32) <- (8x2xf32, 1xi64) + unsqueeze_2 = paddle._C_ops.unsqueeze(stack_0, full_int_array_1) + del stack_0 + + # pd_op.slice: (7581xf32) <- (7581x4xf32, 1xi64, 1xi64) + slice_8 = paddle._C_ops.slice( + data_0, [1], full_int_array_2, full_int_array_1, [1], [1] + ) + + # pd_op.slice: (7581xf32) <- (7581x4xf32, 1xi64, 1xi64) + slice_9 = paddle._C_ops.slice( + data_0, [1], full_int_array_3, full_int_array_7, [1], [1] + ) + + # pd_op.add: (7581xf32) <- (7581xf32, 7581xf32) + add_3 = paddle._C_ops.add(slice_8, slice_9) + del slice_8, slice_9 + + # pd_op.scale: (7581xf32) <- (7581xf32, 1xf32) + scale_3 = paddle._C_ops.scale(add_3, full_3, float("0"), True) + del add_3 + + # pd_op.slice: (7581xf32) <- (7581x4xf32, 1xi64, 1xi64) + slice_10 = paddle._C_ops.slice( + data_0, [1], full_int_array_1, full_int_array_3, [1], [1] + ) + del full_int_array_1, full_int_array_3 + + # pd_op.slice: (7581xf32) <- (7581x4xf32, 1xi64, 1xi64) + slice_11 = paddle._C_ops.slice( + data_0, [1], full_int_array_7, full_int_array_8, [1], [1] + ) + del data_0, full_int_array_7, full_int_array_8 + + # pd_op.add: (7581xf32) <- (7581xf32, 7581xf32) + add_4 = paddle._C_ops.add(slice_10, slice_11) + del slice_10, slice_11 + + # pd_op.scale: (7581xf32) <- (7581xf32, 1xf32) + scale_4 = paddle._C_ops.scale(add_4, full_3, float("0"), True) + del add_4, full_3 + + # builtin.combine: ([7581xf32, 7581xf32]) <- (7581xf32, 7581xf32) + combine_1 = [scale_3, scale_4] + del scale_3, scale_4 + + # pd_op.stack: (7581x2xf32) <- ([7581xf32, 7581xf32]) + stack_1 = paddle._C_ops.stack(combine_1, -1) + del combine_1 + + # pd_op.unsqueeze: (1x7581x2xf32) <- (7581x2xf32, 1xi64) + unsqueeze_3 = paddle._C_ops.unsqueeze(stack_1, full_int_array_2) + del full_int_array_2 + + # pd_op.subtract: (8x7581x2xf32) <- (8x1x2xf32, 1x7581x2xf32) + subtract_4 = paddle._C_ops.subtract(unsqueeze_2, unsqueeze_3) + del unsqueeze_2, unsqueeze_3 + + # pd_op.p_norm: (8x7581xf32) <- (8x7581x2xf32) + p_norm_0 = paddle._C_ops.p_norm( + subtract_4, float("2"), -1, float("1e-12"), False, False + ) + del subtract_4 + + # pd_op.reshape: (8x1x7581xf32) <- (8x7581xf32, 3xi64) + reshape_0 = paddle._C_ops.reshape(p_norm_0, full_int_array_6) + del full_int_array_6, p_norm_0, stack_1 + + return reshape_0, reshape_1 diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_15/weight_meta.py b/paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_15/weight_meta.py new file mode 100644 index 000000000..8b1378917 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_15/weight_meta.py @@ -0,0 +1 @@ + diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_16/graph_hash.txt b/paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_16/graph_hash.txt new file mode 100644 index 000000000..64e121445 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_16/graph_hash.txt @@ -0,0 +1 @@ +0621664d615e3c9d853112c02ae864f2d69542a45086eee97ddcd3e82bebddd6 \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_16/graph_net.json b/paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_16/graph_net.json new file mode 100644 index 000000000..469954d4b --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_16/graph_net.json @@ -0,0 +1,6 @@ +{ + "framework": "paddle", + "model_name": "PP-YOLOE_plus-S", + "num_devices_required": 1, + "num_nodes_required": 1 +} \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_16/input_meta.py b/paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_16/input_meta.py new file mode 100644 index 000000000..a3bf9d3c9 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_16/input_meta.py @@ -0,0 +1,85 @@ +class Program_weight_tensor_data_0: + name = "data_0" + shape = [8, 1, 7581] + dtype = "float32" + max_val = float("1.0") + mean = float("0.00356154") + std = float("0.0595722") + data = None + + +class Program_weight_tensor_data_1: + name = "data_1" + shape = [8, 1, 27] + dtype = "int64" + min_val = 80 + max_val = 5577 + data = None + + +class Program_weight_tensor_data_2: + name = "data_2" + shape = [8, 1, 7581] + dtype = "float32" + max_val = float("0.763597") + mean = float("0.00535653") + std = float("0.0337602") + data = None + + +class Program_weight_tensor_data_3: + name = "data_3" + shape = [7581, 2] + dtype = "float32" + min_val = float("4.0") + max_val = float("604.0") + mean = float("304.0") + std = float("175.48") + data = None + + +class Program_weight_tensor_data_4: + name = "data_4" + shape = [8, 1, 4] + dtype = "float32" + data = [ + 293.867, + 183.92, + 385.067, + 252.32, + 128.25, + 149.839, + 389.5, + 314.085, + 271.573, + 249.28, + 368.853, + 322.24, + 146.891, + 220.195, + 206.924, + 270.587, + 354.97, + 368.42, + 411.401, + 400.364, + 319.774, + 182.615, + 359.925, + 211.618, + 115.589, + 133.082, + 201.125, + 227.021, + 402.595, + 243.692, + 451.344, + 319.18, + ] + + +class Program_weight_tensor_data_5: + name = "data_5" + shape = [8, 1, 1] + dtype = "float32" + data = [1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0] diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_16/model.py b/paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_16/model.py new file mode 100644 index 000000000..726214153 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_16/model.py @@ -0,0 +1,247 @@ +import paddle + + +class GraphModule(paddle.nn.Layer): + def __init__(self): + super().__init__() + + def forward(self, data_0, data_1, data_2, data_3, data_4, data_5): + # pd_op.multiply: (8x1x7581xf32) <- (8x1x7581xf32, 8x1x7581xf32) + multiply_0 = paddle._C_ops.multiply(data_2, data_0) + del data_2 + + # pd_op.flatten: (8x7581xf32) <- (8x1x7581xf32) + flatten_0 = paddle._C_ops.flatten(multiply_0, 0, 1) + + # pd_op.flatten: (8x27xi64) <- (8x1x27xi64) + flatten_1 = paddle._C_ops.flatten(data_1, 0, 1) + del data_1 + + # pd_op.index_sample: (8x27xf32) <- (8x7581xf32, 8x27xi64) + index_sample_0 = paddle._C_ops.index_sample(flatten_0, flatten_1) + del flatten_0, flatten_1 + + # pd_op.full_int_array: (3xi64) <- () + full_int_array_0 = [8, 1, -1] + + # pd_op.reshape: (8x1x27xf32) <- (8x27xf32, 3xi64) + reshape_0 = paddle._C_ops.reshape(index_sample_0, full_int_array_0) + del full_int_array_0, index_sample_0 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_1 = [-1] + + # pd_op.mean: (8x1x1xf32) <- (8x1x27xf32, 1xi64) + mean_0 = paddle._C_ops.mean(reshape_0, full_int_array_1, True) + + # pd_op.subtract: (8x1x27xf32) <- (8x1x27xf32, 8x1x1xf32) + subtract_0 = paddle._C_ops.subtract(reshape_0, mean_0) + + # pd_op.pow: (8x1x27xf32) <- (8x1x27xf32) + pow_0 = paddle._C_ops.pow(subtract_0, float("2")) + del subtract_0 + + # pd_op.sum: (8x1x1xf32) <- (8x1x27xf32, 1xi64) + sum_0 = paddle._C_ops.sum(pow_0, full_int_array_1, paddle.float32, True) + del pow_0 + + # pd_op.numel: (xi64) <- (8x1x27xf32) + numel_0 = paddle._C_ops.numel(reshape_0) + del reshape_0 + + # pd_op.cast: (xi64) <- (xi64) + cast_0 = paddle._C_ops.cast(numel_0, paddle.int64) + del numel_0 + + # pd_op.numel: (xi64) <- (8x1x1xf32) + numel_1 = paddle._C_ops.numel(sum_0) + + # pd_op.cast: (xi64) <- (xi64) + cast_1 = paddle._C_ops.cast(numel_1, paddle.int64) + del numel_1 + + # pd_op.cast: (xf32) <- (xi64) + cast_2 = paddle._C_ops.cast(cast_0, paddle.float32) + del cast_0 + + # pd_op.cast: (xf32) <- (xi64) + cast_3 = paddle._C_ops.cast(cast_1, paddle.float32) + del cast_1 + + # pd_op.divide: (xf32) <- (xf32, xf32) + divide_0 = paddle._C_ops.divide(cast_2, cast_3) + del cast_2, cast_3 + + # pd_op.full: (1xf32) <- () + full_0 = paddle._C_ops.full( + [1], float("1"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.scale: (xf32) <- (xf32, 1xf32) + scale_0 = paddle._C_ops.scale(divide_0, full_0, float("-1"), True) + del divide_0, full_0 + + # pd_op.full: (1xf32) <- () + full_1 = paddle._C_ops.full( + [1], float("0"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.full_like: (xf32) <- (xf32, 1xf32) + full_like_0 = paddle._C_ops.full_like( + scale_0, full_1, paddle.float32, paddle.framework._current_expected_place() + ) + + # pd_op.maximum: (xf32) <- (xf32, xf32) + maximum_0 = paddle._C_ops.maximum(scale_0, full_like_0) + del full_like_0, scale_0 + + # pd_op.divide: (8x1x1xf32) <- (8x1x1xf32, xf32) + divide_1 = paddle._C_ops.divide(sum_0, maximum_0) + del maximum_0, sum_0 + + # pd_op.sqrt: (8x1x1xf32) <- (8x1x1xf32) + sqrt_0 = paddle._C_ops.sqrt(divide_1) + del divide_1 + + # pd_op.add: (8x1x1xf32) <- (8x1x1xf32, 8x1x1xf32) + add_0 = paddle._C_ops.add(mean_0, sqrt_0) + del mean_0, sqrt_0 + + # pd_op.greater_than: (8x1x7581xb) <- (8x1x7581xf32, 8x1x1xf32) + greater_than_1 = paddle._C_ops.greater_than(multiply_0, add_0) + del add_0, multiply_0 + + # pd_op.full_like: (8x1x7581xf32) <- (8x1x7581xf32, 1xf32) + full_like_1 = paddle._C_ops.full_like( + data_0, full_1, paddle.float32, paddle.framework._current_expected_place() + ) + del full_1 + + # pd_op.where: (8x1x7581xf32) <- (8x1x7581xb, 8x1x7581xf32, 8x1x7581xf32) + where_0 = paddle._C_ops.where(greater_than_1, data_0, full_like_1) + del data_0, full_like_1, greater_than_1 + + # pd_op.full_int_array: (2xi64) <- () + full_int_array_2 = [0, 1] + + # pd_op.unsqueeze: (1x1x7581x2xf32) <- (7581x2xf32, 2xi64) + unsqueeze_0 = paddle._C_ops.unsqueeze(data_3, full_int_array_2) + del data_3, full_int_array_2 + + # pd_op.full: (1xi32) <- () + full_2 = paddle._C_ops.full( + [1], float("3"), paddle.int32, paddle.core.CPUPlace() + ) + + # pd_op.split_with_num: ([1x1x7581x1xf32, 1x1x7581x1xf32]) <- (1x1x7581x2xf32, 1xi32) + split_with_num_0 = paddle._C_ops.split_with_num(unsqueeze_0, 2, full_2) + del unsqueeze_0 + + # builtin.split: (1x1x7581x1xf32, 1x1x7581x1xf32) <- ([1x1x7581x1xf32, 1x1x7581x1xf32]) + ( + split_0, + split_1, + ) = split_with_num_0 + del split_with_num_0 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_3 = [2] + + # pd_op.unsqueeze: (8x1x1x4xf32) <- (8x1x4xf32, 1xi64) + unsqueeze_1 = paddle._C_ops.unsqueeze(data_4, full_int_array_3) + del data_4, full_int_array_3 + + # pd_op.split_with_num: ([8x1x1x1xf32, 8x1x1x1xf32, 8x1x1x1xf32, 8x1x1x1xf32]) <- (8x1x1x4xf32, 1xi32) + split_with_num_1 = paddle._C_ops.split_with_num(unsqueeze_1, 4, full_2) + del full_2, unsqueeze_1 + + # builtin.split: (8x1x1x1xf32, 8x1x1x1xf32, 8x1x1x1xf32, 8x1x1x1xf32) <- ([8x1x1x1xf32, 8x1x1x1xf32, 8x1x1x1xf32, 8x1x1x1xf32]) + ( + split_2, + split_3, + split_4, + split_5, + ) = split_with_num_1 + del split_with_num_1 + + # pd_op.subtract: (8x1x7581x1xf32) <- (1x1x7581x1xf32, 8x1x1x1xf32) + subtract_1 = paddle._C_ops.subtract(split_0, split_2) + del split_2 + + # pd_op.subtract: (8x1x7581x1xf32) <- (1x1x7581x1xf32, 8x1x1x1xf32) + subtract_2 = paddle._C_ops.subtract(split_1, split_3) + del split_3 + + # pd_op.subtract: (8x1x7581x1xf32) <- (8x1x1x1xf32, 1x1x7581x1xf32) + subtract_3 = paddle._C_ops.subtract(split_4, split_0) + del split_0, split_4 + + # pd_op.subtract: (8x1x7581x1xf32) <- (8x1x1x1xf32, 1x1x7581x1xf32) + subtract_4 = paddle._C_ops.subtract(split_5, split_1) + del split_1, split_5 + + # pd_op.full: (1xi32) <- () + full_3 = paddle._C_ops.full( + [1], float("-1"), paddle.int32, paddle.core.CPUPlace() + ) + + # builtin.combine: ([8x1x7581x1xf32, 8x1x7581x1xf32, 8x1x7581x1xf32, 8x1x7581x1xf32]) <- (8x1x7581x1xf32, 8x1x7581x1xf32, 8x1x7581x1xf32, 8x1x7581x1xf32) + combine_0 = [subtract_1, subtract_2, subtract_3, subtract_4] + del subtract_1, subtract_2, subtract_3, subtract_4 + + # pd_op.concat: (8x1x7581x4xf32) <- ([8x1x7581x1xf32, 8x1x7581x1xf32, 8x1x7581x1xf32, 8x1x7581x1xf32], 1xi32) + concat_0 = paddle._C_ops.concat(combine_0, full_3) + del combine_0, full_3 + + # pd_op.min: (8x1x7581xf32) <- (8x1x7581x4xf32, 1xi64) + min_0 = paddle._C_ops.min(concat_0, full_int_array_1, False) + del concat_0, full_int_array_1 + + # pd_op.full: (xf32) <- () + full_4 = paddle._C_ops.full( + [], + float("1e-09"), + paddle.float32, + paddle.framework._current_expected_place(), + ) + + # pd_op.greater_than: (8x1x7581xb) <- (8x1x7581xf32, xf32) + greater_than_2 = paddle._C_ops.greater_than(min_0, full_4) + del full_4, min_0 + + # pd_op.cast: (8x1x7581xf32) <- (8x1x7581xb) + cast_4 = paddle._C_ops.cast(greater_than_2, paddle.float32) + del greater_than_2 + + # pd_op.multiply: (8x1x7581xf32) <- (8x1x7581xf32, 8x1x7581xf32) + multiply_1 = paddle._C_ops.multiply(where_0, cast_4) + del cast_4, where_0 + + # pd_op.multiply: (8x1x7581xf32) <- (8x1x7581xf32, 8x1x1xf32) + multiply_2 = paddle._C_ops.multiply(multiply_1, data_5) + del data_5, multiply_1 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_4 = [-2] + + # pd_op.sum: (8x7581xf32) <- (8x1x7581xf32, 1xi64) + sum_1 = paddle._C_ops.sum(multiply_2, full_int_array_4, None, False) + del full_int_array_4 + + # pd_op.full_int_array: (0xi64) <- () + full_int_array_5 = [] + + # pd_op.max: (xf32) <- (8x7581xf32, 0xi64) + max_0 = paddle._C_ops.max(sum_1, full_int_array_5, False) + del full_int_array_5 + + # pd_op.full: (xf32) <- () + full_5 = paddle._C_ops.full( + [], float("1"), paddle.float32, paddle.framework._current_expected_place() + ) + + # pd_op.greater_than: (xb) <- (xf32, xf32) + greater_than_0 = paddle._C_ops.greater_than(max_0, full_5) + del full_5, max_0, multiply_2, sum_1 + + return greater_than_0 diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_16/weight_meta.py b/paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_16/weight_meta.py new file mode 100644 index 000000000..8b1378917 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_16/weight_meta.py @@ -0,0 +1 @@ + diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_17/graph_hash.txt b/paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_17/graph_hash.txt new file mode 100644 index 000000000..463698320 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_17/graph_hash.txt @@ -0,0 +1 @@ +611aaf40802ae728109fb4c99495540bcf765813a1a25b37c13dde4b4b8683ee \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_17/graph_net.json b/paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_17/graph_net.json new file mode 100644 index 000000000..469954d4b --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_17/graph_net.json @@ -0,0 +1,6 @@ +{ + "framework": "paddle", + "model_name": "PP-YOLOE_plus-S", + "num_devices_required": 1, + "num_nodes_required": 1 +} \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_17/input_meta.py b/paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_17/input_meta.py new file mode 100644 index 000000000..c72fce767 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_17/input_meta.py @@ -0,0 +1,38 @@ +class Program_weight_tensor_data_0: + name = "data_0" + shape = [2, 8400, 4] + dtype = "float32" + min_val = float("0.046846") + max_val = float("15.2149") + mean = float("6.21812") + std = float("3.00187") + data = None + + +class Program_weight_tensor_data_1: + name = "data_1" + shape = [8400, 2] + dtype = "float32" + min_val = float("0.5") + max_val = float("79.5") + mean = float("34.7619") + std = float("22.9098") + data = None + + +class Program_weight_tensor_data_2: + name = "data_2" + shape = [8400, 1] + dtype = "float32" + min_val = float("8.0") + max_val = float("32.0") + mean = float("10.6667") + std = float("5.70157") + data = None + + +class Program_weight_tensor_data_3: + name = "data_3" + shape = [2, 2] + dtype = "float32" + data = [1.6, 2.397, 2.64463, 1.6] diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_17/model.py b/paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_17/model.py new file mode 100644 index 000000000..561c0c35b --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_17/model.py @@ -0,0 +1,94 @@ +import paddle + + +class GraphModule(paddle.nn.Layer): + def __init__(self): + super().__init__() + + def forward(self, data_0, data_1, data_2, data_3): + # pd_op.full: (1xi32) <- () + full_0 = paddle._C_ops.full( + [1], float("2"), paddle.int32, paddle.core.CPUPlace() + ) + + # pd_op.split_with_num: ([2x8400x2xf32, 2x8400x2xf32]) <- (2x8400x4xf32, 1xi32) + split_with_num_0 = paddle._C_ops.split_with_num(data_0, 2, full_0) + del data_0, full_0 + + # builtin.split: (2x8400x2xf32, 2x8400x2xf32) <- ([2x8400x2xf32, 2x8400x2xf32]) + ( + split_0, + split_1, + ) = split_with_num_0 + del split_with_num_0 + + # pd_op.full: (1xf32) <- () + full_1 = paddle._C_ops.full( + [1], float("-1"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.scale: (2x8400x2xf32) <- (2x8400x2xf32, 1xf32) + scale_0 = paddle._C_ops.scale(split_0, full_1, float("0"), True) + del full_1, split_0 + + # pd_op.add: (2x8400x2xf32) <- (2x8400x2xf32, 8400x2xf32) + add_0 = paddle._C_ops.add(scale_0, data_1) + del scale_0 + + # pd_op.add: (2x8400x2xf32) <- (2x8400x2xf32, 8400x2xf32) + add_1 = paddle._C_ops.add(split_1, data_1) + del data_1, split_1 + + # pd_op.full: (1xi32) <- () + full_2 = paddle._C_ops.full( + [1], float("-1"), paddle.int32, paddle.core.CPUPlace() + ) + + # builtin.combine: ([2x8400x2xf32, 2x8400x2xf32]) <- (2x8400x2xf32, 2x8400x2xf32) + combine_0 = [add_0, add_1] + del add_0, add_1 + + # pd_op.concat: (2x8400x4xf32) <- ([2x8400x2xf32, 2x8400x2xf32], 1xi32) + concat_0 = paddle._C_ops.concat(combine_0, full_2) + del combine_0 + + # pd_op.multiply: (2x8400x4xf32) <- (2x8400x4xf32, 8400x1xf32) + multiply_0 = paddle._C_ops.multiply(concat_0, data_2) + del concat_0, data_2 + + # pd_op.full: (1xi32) <- () + full_3 = paddle._C_ops.full( + [1], float("1"), paddle.int32, paddle.core.CPUPlace() + ) + + # pd_op.split_with_num: ([2x1xf32, 2x1xf32]) <- (2x2xf32, 1xi32) + split_with_num_1 = paddle._C_ops.split_with_num(data_3, 2, full_3) + del data_3, full_3 + + # builtin.split: (2x1xf32, 2x1xf32) <- ([2x1xf32, 2x1xf32]) + ( + split_2, + split_3, + ) = split_with_num_1 + del split_with_num_1 + + # builtin.combine: ([2x1xf32, 2x1xf32, 2x1xf32, 2x1xf32]) <- (2x1xf32, 2x1xf32, 2x1xf32, 2x1xf32) + combine_1 = [split_3, split_2, split_3, split_2] + del split_2, split_3 + + # pd_op.concat: (2x4xf32) <- ([2x1xf32, 2x1xf32, 2x1xf32, 2x1xf32], 1xi32) + concat_1 = paddle._C_ops.concat(combine_1, full_2) + del combine_1, full_2 + + # pd_op.full_int_array: (3xi64) <- () + full_int_array_0 = [-1, 1, 4] + + # pd_op.reshape: (2x1x4xf32) <- (2x4xf32, 3xi64) + reshape_0 = paddle._C_ops.reshape(concat_1, full_int_array_0) + del concat_1, full_int_array_0 + + # pd_op.divide: (2x8400x4xf32) <- (2x8400x4xf32, 2x1x4xf32) + divide_0 = paddle._C_ops.divide(multiply_0, reshape_0) + del multiply_0, reshape_0 + + return divide_0 diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_17/weight_meta.py b/paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_17/weight_meta.py new file mode 100644 index 000000000..8b1378917 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_17/weight_meta.py @@ -0,0 +1 @@ + diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_18/graph_hash.txt b/paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_18/graph_hash.txt new file mode 100644 index 000000000..47096223e --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_18/graph_hash.txt @@ -0,0 +1 @@ +34be229c6a80fe6038498c3f804db3a9ddb98cc1739d81aa46402cdec43f7140 \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_18/graph_net.json b/paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_18/graph_net.json new file mode 100644 index 000000000..469954d4b --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_18/graph_net.json @@ -0,0 +1,6 @@ +{ + "framework": "paddle", + "model_name": "PP-YOLOE_plus-S", + "num_devices_required": 1, + "num_nodes_required": 1 +} \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_18/input_meta.py b/paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_18/input_meta.py new file mode 100644 index 000000000..3b728bafb --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_18/input_meta.py @@ -0,0 +1,87 @@ +class Program_weight_tensor_data_0: + name = "data_0" + shape = [] + dtype = "int64" + data = [3] + + +class Program_weight_tensor_data_1: + name = "data_1" + shape = [8, 3, 6069] + dtype = "float32" + max_val = float("1.0") + mean = float("0.00240979") + std = float("0.0490304") + data = None + + +class Program_weight_tensor_data_2: + name = "data_2" + shape = [8, 3, 27] + dtype = "int64" + min_val = 0 + max_val = 5765 + data = None + + +class Program_weight_tensor_data_3: + name = "data_3" + shape = [8, 3, 6069] + dtype = "float32" + max_val = float("0.7235") + mean = float("0.00264871") + std = float("0.0241552") + data = None + + +class Program_weight_tensor_data_4: + name = "data_4" + shape = [6069, 2] + dtype = "float32" + min_val = float("4.0") + max_val = float("540.0") + mean = float("272.0") + std = float("157.0") + data = None + + +class Program_weight_tensor_data_5: + name = "data_5" + shape = [8, 3, 4] + dtype = "float32" + max_val = float("509.089") + mean = float("160.185") + std = float("176.435") + data = None + + +class Program_weight_tensor_data_6: + name = "data_6" + shape = [8, 3, 1] + dtype = "float32" + data = [ + 1.0, + 0.0, + 0.0, + 1.0, + 0.0, + 0.0, + 1.0, + 1.0, + 1.0, + 1.0, + 0.0, + 0.0, + 1.0, + 1.0, + 1.0, + 1.0, + 0.0, + 0.0, + 1.0, + 1.0, + 0.0, + 1.0, + 0.0, + 0.0, + ] diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_18/model.py b/paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_18/model.py new file mode 100644 index 000000000..a6d9e0a3a --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_18/model.py @@ -0,0 +1,262 @@ +import paddle + + +class GraphModule(paddle.nn.Layer): + def __init__(self): + super().__init__() + + def forward(self, data_0, data_1, data_2, data_3, data_4, data_5, data_6): + # pd_op.multiply: (8x-1x-1xf32) <- (8x-1x-1xf32, 8x-1x-1xf32) + multiply_0 = paddle._C_ops.multiply(data_3, data_1) + del data_3 + + # pd_op.flatten: (-1x-1xf32) <- (8x-1x-1xf32) + flatten_0 = paddle._C_ops.flatten(multiply_0, 0, 1) + + # pd_op.flatten: (-1x27xi64) <- (8x-1x27xi64) + flatten_1 = paddle._C_ops.flatten(data_2, 0, 1) + del data_2 + + # pd_op.index_sample: (-1x27xf32) <- (-1x-1xf32, -1x27xi64) + index_sample_0 = paddle._C_ops.index_sample(flatten_0, flatten_1) + del flatten_0, flatten_1 + + # pd_op.full: (xi64) <- () + full_0 = paddle._C_ops.full( + [], float("8"), paddle.int64, paddle.core.CPUPlace() + ) + + # pd_op.full: (xi64) <- () + full_1 = paddle._C_ops.full( + [], float("-1"), paddle.int64, paddle.core.CPUPlace() + ) + + # builtin.combine: ([xi64, xi64, xi64]) <- (xi64, xi64, xi64) + combine_0 = [full_0, data_0, full_1] + del data_0, full_0, full_1 + + # pd_op.stack: (3xi64) <- ([xi64, xi64, xi64]) + stack_0 = paddle._C_ops.stack(combine_0, 0) + del combine_0 + + # pd_op.reshape: (8x-1x-1xf32) <- (-1x27xf32, 3xi64) + reshape_0 = paddle._C_ops.reshape(index_sample_0, stack_0) + del index_sample_0, stack_0 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_0 = [-1] + + # pd_op.mean: (8x-1x1xf32) <- (8x-1x-1xf32, 1xi64) + mean_0 = paddle._C_ops.mean(reshape_0, full_int_array_0, True) + + # pd_op.subtract: (8x-1x-1xf32) <- (8x-1x-1xf32, 8x-1x1xf32) + subtract_0 = paddle._C_ops.subtract(reshape_0, mean_0) + + # pd_op.pow: (8x-1x-1xf32) <- (8x-1x-1xf32) + pow_0 = paddle._C_ops.pow(subtract_0, float("2")) + del subtract_0 + + # pd_op.sum: (8x-1x1xf32) <- (8x-1x-1xf32, 1xi64) + sum_0 = paddle._C_ops.sum(pow_0, full_int_array_0, paddle.float32, True) + del pow_0 + + # pd_op.numel: (xi64) <- (8x-1x-1xf32) + numel_0 = paddle._C_ops.numel(reshape_0) + del reshape_0 + + # pd_op.cast: (xi64) <- (xi64) + cast_0 = paddle._C_ops.cast(numel_0, paddle.int64) + del numel_0 + + # pd_op.numel: (xi64) <- (8x-1x1xf32) + numel_1 = paddle._C_ops.numel(sum_0) + + # pd_op.cast: (xi64) <- (xi64) + cast_1 = paddle._C_ops.cast(numel_1, paddle.int64) + del numel_1 + + # pd_op.cast: (xf32) <- (xi64) + cast_2 = paddle._C_ops.cast(cast_0, paddle.float32) + del cast_0 + + # pd_op.cast: (xf32) <- (xi64) + cast_3 = paddle._C_ops.cast(cast_1, paddle.float32) + del cast_1 + + # pd_op.divide: (xf32) <- (xf32, xf32) + divide_0 = paddle._C_ops.divide(cast_2, cast_3) + del cast_2, cast_3 + + # pd_op.full: (1xf32) <- () + full_2 = paddle._C_ops.full( + [1], float("1"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.scale: (xf32) <- (xf32, 1xf32) + scale_0 = paddle._C_ops.scale(divide_0, full_2, float("-1"), True) + del divide_0, full_2 + + # pd_op.full: (1xf32) <- () + full_3 = paddle._C_ops.full( + [1], float("0"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.full_like: (xf32) <- (xf32, 1xf32) + full_like_0 = paddle._C_ops.full_like( + scale_0, full_3, paddle.float32, paddle.framework._current_expected_place() + ) + + # pd_op.maximum: (xf32) <- (xf32, xf32) + maximum_0 = paddle._C_ops.maximum(scale_0, full_like_0) + del full_like_0, scale_0 + + # pd_op.divide: (8x-1x1xf32) <- (8x-1x1xf32, xf32) + divide_1 = paddle._C_ops.divide(sum_0, maximum_0) + del maximum_0, sum_0 + + # pd_op.sqrt: (8x-1x1xf32) <- (8x-1x1xf32) + sqrt_0 = paddle._C_ops.sqrt(divide_1) + del divide_1 + + # pd_op.add: (8x-1x1xf32) <- (8x-1x1xf32, 8x-1x1xf32) + add_0 = paddle._C_ops.add(mean_0, sqrt_0) + del mean_0, sqrt_0 + + # pd_op.greater_than: (8x-1x-1xb) <- (8x-1x-1xf32, 8x-1x1xf32) + greater_than_1 = paddle._C_ops.greater_than(multiply_0, add_0) + del add_0, multiply_0 + + # pd_op.full_like: (8x-1x-1xf32) <- (8x-1x-1xf32, 1xf32) + full_like_1 = paddle._C_ops.full_like( + data_1, full_3, paddle.float32, paddle.framework._current_expected_place() + ) + del full_3 + + # pd_op.where: (8x-1x-1xf32) <- (8x-1x-1xb, 8x-1x-1xf32, 8x-1x-1xf32) + where_0 = paddle._C_ops.where(greater_than_1, data_1, full_like_1) + del data_1, full_like_1, greater_than_1 + + # pd_op.full_int_array: (2xi64) <- () + full_int_array_1 = [0, 1] + + # pd_op.unsqueeze: (1x1x-1x2xf32) <- (-1x2xf32, 2xi64) + unsqueeze_0 = paddle._C_ops.unsqueeze(data_4, full_int_array_1) + del data_4, full_int_array_1 + + # pd_op.full: (1xi32) <- () + full_4 = paddle._C_ops.full( + [1], float("3"), paddle.int32, paddle.core.CPUPlace() + ) + + # pd_op.split_with_num: ([1x1x-1x1xf32, 1x1x-1x1xf32]) <- (1x1x-1x2xf32, 1xi32) + split_with_num_0 = paddle._C_ops.split_with_num(unsqueeze_0, 2, full_4) + del unsqueeze_0 + + # builtin.split: (1x1x-1x1xf32, 1x1x-1x1xf32) <- ([1x1x-1x1xf32, 1x1x-1x1xf32]) + ( + split_0, + split_1, + ) = split_with_num_0 + del split_with_num_0 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_2 = [2] + + # pd_op.unsqueeze: (8x-1x1x4xf32) <- (8x-1x4xf32, 1xi64) + unsqueeze_1 = paddle._C_ops.unsqueeze(data_5, full_int_array_2) + del data_5, full_int_array_2 + + # pd_op.split_with_num: ([8x-1x1x1xf32, 8x-1x1x1xf32, 8x-1x1x1xf32, 8x-1x1x1xf32]) <- (8x-1x1x4xf32, 1xi32) + split_with_num_1 = paddle._C_ops.split_with_num(unsqueeze_1, 4, full_4) + del full_4, unsqueeze_1 + + # builtin.split: (8x-1x1x1xf32, 8x-1x1x1xf32, 8x-1x1x1xf32, 8x-1x1x1xf32) <- ([8x-1x1x1xf32, 8x-1x1x1xf32, 8x-1x1x1xf32, 8x-1x1x1xf32]) + ( + split_2, + split_3, + split_4, + split_5, + ) = split_with_num_1 + del split_with_num_1 + + # pd_op.subtract: (8x-1x-1x1xf32) <- (1x1x-1x1xf32, 8x-1x1x1xf32) + subtract_1 = paddle._C_ops.subtract(split_0, split_2) + del split_2 + + # pd_op.subtract: (8x-1x-1x1xf32) <- (1x1x-1x1xf32, 8x-1x1x1xf32) + subtract_2 = paddle._C_ops.subtract(split_1, split_3) + del split_3 + + # pd_op.subtract: (8x-1x-1x1xf32) <- (8x-1x1x1xf32, 1x1x-1x1xf32) + subtract_3 = paddle._C_ops.subtract(split_4, split_0) + del split_0, split_4 + + # pd_op.subtract: (8x-1x-1x1xf32) <- (8x-1x1x1xf32, 1x1x-1x1xf32) + subtract_4 = paddle._C_ops.subtract(split_5, split_1) + del split_1, split_5 + + # pd_op.full: (1xi32) <- () + full_5 = paddle._C_ops.full( + [1], float("-1"), paddle.int32, paddle.core.CPUPlace() + ) + + # builtin.combine: ([8x-1x-1x1xf32, 8x-1x-1x1xf32, 8x-1x-1x1xf32, 8x-1x-1x1xf32]) <- (8x-1x-1x1xf32, 8x-1x-1x1xf32, 8x-1x-1x1xf32, 8x-1x-1x1xf32) + combine_1 = [subtract_1, subtract_2, subtract_3, subtract_4] + del subtract_1, subtract_2, subtract_3, subtract_4 + + # pd_op.concat: (8x-1x-1x4xf32) <- ([8x-1x-1x1xf32, 8x-1x-1x1xf32, 8x-1x-1x1xf32, 8x-1x-1x1xf32], 1xi32) + concat_0 = paddle._C_ops.concat(combine_1, full_5) + del combine_1, full_5 + + # pd_op.min: (8x-1x-1xf32) <- (8x-1x-1x4xf32, 1xi64) + min_0 = paddle._C_ops.min(concat_0, full_int_array_0, False) + del concat_0, full_int_array_0 + + # pd_op.full: (xf32) <- () + full_6 = paddle._C_ops.full( + [], + float("1e-09"), + paddle.float32, + paddle.framework._current_expected_place(), + ) + + # pd_op.greater_than: (8x-1x-1xb) <- (8x-1x-1xf32, xf32) + greater_than_2 = paddle._C_ops.greater_than(min_0, full_6) + del full_6, min_0 + + # pd_op.cast: (8x-1x-1xf32) <- (8x-1x-1xb) + cast_4 = paddle._C_ops.cast(greater_than_2, paddle.float32) + del greater_than_2 + + # pd_op.multiply: (8x-1x-1xf32) <- (8x-1x-1xf32, 8x-1x-1xf32) + multiply_1 = paddle._C_ops.multiply(where_0, cast_4) + del cast_4, where_0 + + # pd_op.multiply: (8x-1x-1xf32) <- (8x-1x-1xf32, 8x-1x1xf32) + multiply_2 = paddle._C_ops.multiply(multiply_1, data_6) + del data_6, multiply_1 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_3 = [-2] + + # pd_op.sum: (8x-1xf32) <- (8x-1x-1xf32, 1xi64) + sum_1 = paddle._C_ops.sum(multiply_2, full_int_array_3, None, False) + del full_int_array_3 + + # pd_op.full_int_array: (0xi64) <- () + full_int_array_4 = [] + + # pd_op.max: (xf32) <- (8x-1xf32, 0xi64) + max_0 = paddle._C_ops.max(sum_1, full_int_array_4, False) + del full_int_array_4 + + # pd_op.full: (xf32) <- () + full_7 = paddle._C_ops.full( + [], float("1"), paddle.float32, paddle.framework._current_expected_place() + ) + + # pd_op.greater_than: (xb) <- (xf32, xf32) + greater_than_0 = paddle._C_ops.greater_than(max_0, full_7) + del full_7, max_0, multiply_2, sum_1 + + return greater_than_0 diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_18/weight_meta.py b/paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_18/weight_meta.py new file mode 100644 index 000000000..8b1378917 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_18/weight_meta.py @@ -0,0 +1 @@ + diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_2/graph_hash.txt b/paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_2/graph_hash.txt new file mode 100644 index 000000000..24bfcda48 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_2/graph_hash.txt @@ -0,0 +1 @@ +6118b74c6cea4582fed887d697e8ba8dc4d4b2ee9d3ce0ac46f01fdaefb305ca \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_2/graph_net.json b/paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_2/graph_net.json new file mode 100644 index 000000000..469954d4b --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_2/graph_net.json @@ -0,0 +1,6 @@ +{ + "framework": "paddle", + "model_name": "PP-YOLOE_plus-S", + "num_devices_required": 1, + "num_nodes_required": 1 +} \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_2/input_meta.py b/paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_2/input_meta.py new file mode 100644 index 000000000..73bf4ff3d --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_2/input_meta.py @@ -0,0 +1,51 @@ +class Program_weight_tensor_data_0: + name = "data_0" + shape = [1] + dtype = "float32" + data = [1.0] + + +class Program_weight_tensor_data_1: + name = "data_1" + shape = [1] + dtype = "float32" + data = [1.0] + + +class Program_weight_tensor_data_2: + name = "data_2" + shape = [1] + dtype = "float32" + data = [1.0] + + +class Program_weight_tensor_data_3: + name = "data_3" + shape = [1] + dtype = "float32" + data = [1.0] + + +class Program_weight_tensor_data_4: + name = "data_4" + shape = [1] + dtype = "float32" + data = [1.0] + + +class Program_weight_tensor_data_5: + name = "data_5" + shape = [1] + dtype = "float32" + data = [1.0] + + +class Program_weight_tensor_data_6: + name = "data_6" + shape = [8, 3, 608, 608] + dtype = "float32" + min_val = float("-0.114997") + max_val = float("1.19114") + mean = float("0.58047") + std = float("0.226042") + data = None diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_2/model.py b/paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_2/model.py new file mode 100644 index 000000000..9cf2f8e45 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_2/model.py @@ -0,0 +1,4084 @@ +import paddle + + +class GraphModule(paddle.nn.Layer): + def __init__(self): + super().__init__() + + def forward( + self, + parameter_0, + parameter_1, + parameter_2, + parameter_3, + parameter_4, + parameter_5, + parameter_6, + parameter_7, + parameter_8, + parameter_9, + parameter_10, + parameter_11, + parameter_12, + parameter_13, + parameter_14, + parameter_15, + parameter_16, + parameter_17, + parameter_18, + parameter_19, + parameter_20, + parameter_21, + parameter_22, + parameter_23, + parameter_24, + parameter_25, + parameter_26, + parameter_27, + parameter_28, + parameter_29, + parameter_30, + parameter_31, + parameter_32, + parameter_33, + parameter_34, + parameter_35, + parameter_36, + parameter_37, + parameter_38, + parameter_39, + parameter_40, + parameter_41, + parameter_42, + parameter_43, + parameter_44, + parameter_45, + parameter_46, + parameter_47, + parameter_48, + parameter_49, + parameter_50, + parameter_51, + parameter_52, + parameter_53, + parameter_54, + parameter_55, + parameter_56, + parameter_57, + parameter_58, + parameter_59, + parameter_60, + parameter_61, + parameter_62, + parameter_63, + parameter_64, + parameter_65, + parameter_66, + parameter_67, + parameter_68, + parameter_69, + parameter_70, + parameter_71, + parameter_72, + parameter_73, + parameter_74, + parameter_75, + parameter_76, + parameter_77, + parameter_78, + parameter_79, + parameter_80, + parameter_81, + parameter_82, + parameter_83, + parameter_84, + parameter_85, + parameter_86, + parameter_87, + parameter_88, + parameter_89, + parameter_90, + parameter_91, + parameter_92, + parameter_93, + parameter_94, + parameter_95, + parameter_96, + parameter_97, + parameter_98, + parameter_99, + parameter_100, + parameter_101, + parameter_102, + parameter_103, + parameter_104, + parameter_105, + parameter_106, + parameter_107, + parameter_108, + parameter_109, + parameter_110, + parameter_111, + parameter_112, + parameter_113, + parameter_114, + parameter_115, + parameter_116, + parameter_117, + parameter_118, + parameter_119, + parameter_120, + parameter_121, + parameter_122, + parameter_123, + parameter_124, + parameter_125, + parameter_126, + parameter_127, + parameter_128, + parameter_129, + parameter_130, + parameter_131, + parameter_132, + parameter_133, + parameter_134, + parameter_135, + parameter_136, + parameter_137, + parameter_138, + parameter_139, + parameter_140, + parameter_141, + parameter_142, + parameter_143, + parameter_144, + parameter_145, + parameter_146, + parameter_147, + parameter_148, + parameter_149, + parameter_150, + parameter_151, + parameter_152, + parameter_153, + parameter_154, + parameter_155, + parameter_156, + parameter_157, + parameter_158, + parameter_159, + parameter_160, + parameter_161, + parameter_162, + parameter_163, + parameter_164, + parameter_165, + parameter_166, + parameter_167, + parameter_168, + parameter_169, + parameter_170, + parameter_171, + parameter_172, + parameter_173, + parameter_174, + parameter_175, + parameter_176, + parameter_177, + parameter_178, + parameter_179, + parameter_180, + parameter_181, + parameter_182, + parameter_183, + parameter_184, + parameter_185, + parameter_186, + parameter_187, + parameter_188, + parameter_189, + parameter_190, + parameter_191, + parameter_192, + parameter_193, + parameter_194, + parameter_195, + parameter_196, + parameter_197, + parameter_198, + parameter_199, + parameter_200, + parameter_201, + parameter_202, + parameter_203, + parameter_204, + parameter_205, + parameter_206, + parameter_207, + parameter_208, + parameter_209, + parameter_210, + parameter_211, + parameter_212, + parameter_213, + parameter_214, + parameter_215, + parameter_216, + parameter_217, + parameter_218, + parameter_219, + parameter_220, + parameter_221, + parameter_222, + parameter_223, + parameter_224, + parameter_225, + parameter_226, + parameter_227, + parameter_228, + parameter_229, + parameter_230, + parameter_231, + parameter_232, + parameter_233, + parameter_234, + parameter_235, + parameter_236, + parameter_237, + parameter_238, + parameter_239, + parameter_240, + parameter_241, + parameter_242, + parameter_243, + parameter_244, + parameter_245, + parameter_246, + parameter_247, + parameter_248, + parameter_249, + parameter_250, + parameter_251, + parameter_252, + parameter_253, + parameter_254, + parameter_255, + parameter_256, + parameter_257, + parameter_258, + parameter_259, + parameter_260, + parameter_261, + parameter_262, + parameter_263, + parameter_264, + parameter_265, + parameter_266, + parameter_267, + parameter_268, + parameter_269, + parameter_270, + parameter_271, + parameter_272, + parameter_273, + parameter_274, + parameter_275, + parameter_276, + parameter_277, + parameter_278, + parameter_279, + parameter_280, + parameter_281, + parameter_282, + parameter_283, + parameter_284, + parameter_285, + parameter_286, + parameter_287, + parameter_288, + parameter_289, + parameter_290, + parameter_291, + parameter_292, + parameter_293, + parameter_294, + parameter_295, + parameter_296, + parameter_297, + parameter_298, + parameter_299, + parameter_300, + parameter_301, + parameter_302, + parameter_303, + parameter_304, + parameter_305, + parameter_306, + parameter_307, + parameter_308, + parameter_309, + parameter_310, + parameter_311, + parameter_312, + parameter_313, + parameter_314, + parameter_315, + parameter_316, + parameter_317, + parameter_318, + parameter_319, + parameter_320, + parameter_321, + parameter_322, + parameter_323, + parameter_324, + parameter_325, + parameter_326, + parameter_327, + parameter_328, + parameter_329, + parameter_330, + parameter_331, + parameter_332, + parameter_333, + parameter_334, + parameter_335, + parameter_336, + parameter_337, + parameter_338, + parameter_339, + parameter_340, + parameter_341, + parameter_342, + parameter_343, + parameter_344, + parameter_345, + parameter_346, + parameter_347, + parameter_348, + parameter_349, + parameter_350, + parameter_351, + parameter_352, + parameter_353, + parameter_354, + parameter_355, + parameter_356, + parameter_357, + parameter_358, + parameter_359, + parameter_360, + parameter_361, + parameter_362, + parameter_363, + parameter_364, + parameter_365, + parameter_366, + parameter_367, + data_0, + data_1, + data_2, + data_3, + data_4, + data_5, + data_6, + ): + # pd_op.conv2d: (8x16x304x304xf32) <- (8x3x608x608xf32, 16x3x3x3xf32) + conv2d_0 = paddle._C_ops.conv2d( + data_6, parameter_367, [2, 2], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del data_6, parameter_367 + + # pd_op.batch_norm_: (8x16x304x304xf32, 16xf32, 16xf32, 16xf32, 16xf32, -1xui8) <- (8x16x304x304xf32, 16xf32, 16xf32, 16xf32, 16xf32) + ( + batch_norm__0, + batch_norm__1, + batch_norm__2, + batch_norm__3, + batch_norm__4, + batch_norm__5, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_0, + parameter_366, + parameter_365, + parameter_364, + parameter_363, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_363, parameter_364, parameter_365, parameter_366 + + # pd_op.swish: (8x16x304x304xf32) <- (8x16x304x304xf32) + swish_1 = paddle._C_ops.swish(batch_norm__0) + + # pd_op.conv2d: (8x16x304x304xf32) <- (8x16x304x304xf32, 16x16x3x3xf32) + conv2d_1 = paddle._C_ops.conv2d( + swish_1, parameter_362, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_362 + + # pd_op.batch_norm_: (8x16x304x304xf32, 16xf32, 16xf32, 16xf32, 16xf32, -1xui8) <- (8x16x304x304xf32, 16xf32, 16xf32, 16xf32, 16xf32) + ( + batch_norm__6, + batch_norm__7, + batch_norm__8, + batch_norm__9, + batch_norm__10, + batch_norm__11, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_1, + parameter_361, + parameter_360, + parameter_359, + parameter_358, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_358, parameter_359, parameter_360, parameter_361 + + # pd_op.swish: (8x16x304x304xf32) <- (8x16x304x304xf32) + swish_2 = paddle._C_ops.swish(batch_norm__6) + + # pd_op.conv2d: (8x32x304x304xf32) <- (8x16x304x304xf32, 32x16x3x3xf32) + conv2d_2 = paddle._C_ops.conv2d( + swish_2, parameter_357, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_357 + + # pd_op.batch_norm_: (8x32x304x304xf32, 32xf32, 32xf32, 32xf32, 32xf32, -1xui8) <- (8x32x304x304xf32, 32xf32, 32xf32, 32xf32, 32xf32) + ( + batch_norm__12, + batch_norm__13, + batch_norm__14, + batch_norm__15, + batch_norm__16, + batch_norm__17, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_2, + parameter_356, + parameter_355, + parameter_354, + parameter_353, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_353, parameter_354, parameter_355, parameter_356 + + # pd_op.swish: (8x32x304x304xf32) <- (8x32x304x304xf32) + swish_3 = paddle._C_ops.swish(batch_norm__12) + + # pd_op.conv2d: (8x48x152x152xf32) <- (8x32x304x304xf32, 48x32x3x3xf32) + conv2d_3 = paddle._C_ops.conv2d( + swish_3, parameter_352, [2, 2], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_352 + + # pd_op.batch_norm_: (8x48x152x152xf32, 48xf32, 48xf32, 48xf32, 48xf32, -1xui8) <- (8x48x152x152xf32, 48xf32, 48xf32, 48xf32, 48xf32) + ( + batch_norm__18, + batch_norm__19, + batch_norm__20, + batch_norm__21, + batch_norm__22, + batch_norm__23, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_3, + parameter_351, + parameter_350, + parameter_349, + parameter_348, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_348, parameter_349, parameter_350, parameter_351 + + # pd_op.swish: (8x48x152x152xf32) <- (8x48x152x152xf32) + swish_4 = paddle._C_ops.swish(batch_norm__18) + + # pd_op.conv2d: (8x24x152x152xf32) <- (8x48x152x152xf32, 24x48x1x1xf32) + conv2d_4 = paddle._C_ops.conv2d( + swish_4, parameter_347, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_347 + + # pd_op.batch_norm_: (8x24x152x152xf32, 24xf32, 24xf32, 24xf32, 24xf32, -1xui8) <- (8x24x152x152xf32, 24xf32, 24xf32, 24xf32, 24xf32) + ( + batch_norm__24, + batch_norm__25, + batch_norm__26, + batch_norm__27, + batch_norm__28, + batch_norm__29, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_4, + parameter_346, + parameter_345, + parameter_344, + parameter_343, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_343, parameter_344, parameter_345, parameter_346 + + # pd_op.swish: (8x24x152x152xf32) <- (8x24x152x152xf32) + swish_5 = paddle._C_ops.swish(batch_norm__24) + + # pd_op.conv2d: (8x24x152x152xf32) <- (8x48x152x152xf32, 24x48x1x1xf32) + conv2d_5 = paddle._C_ops.conv2d( + swish_4, parameter_342, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_342 + + # pd_op.batch_norm_: (8x24x152x152xf32, 24xf32, 24xf32, 24xf32, 24xf32, -1xui8) <- (8x24x152x152xf32, 24xf32, 24xf32, 24xf32, 24xf32) + ( + batch_norm__30, + batch_norm__31, + batch_norm__32, + batch_norm__33, + batch_norm__34, + batch_norm__35, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_5, + parameter_341, + parameter_340, + parameter_339, + parameter_338, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_338, parameter_339, parameter_340, parameter_341 + + # pd_op.swish: (8x24x152x152xf32) <- (8x24x152x152xf32) + swish_6 = paddle._C_ops.swish(batch_norm__30) + + # pd_op.conv2d: (8x24x152x152xf32) <- (8x24x152x152xf32, 24x24x3x3xf32) + conv2d_6 = paddle._C_ops.conv2d( + swish_6, parameter_337, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_337 + + # pd_op.batch_norm_: (8x24x152x152xf32, 24xf32, 24xf32, 24xf32, 24xf32, -1xui8) <- (8x24x152x152xf32, 24xf32, 24xf32, 24xf32, 24xf32) + ( + batch_norm__36, + batch_norm__37, + batch_norm__38, + batch_norm__39, + batch_norm__40, + batch_norm__41, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_6, + parameter_336, + parameter_335, + parameter_334, + parameter_333, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_333, parameter_334, parameter_335, parameter_336 + + # pd_op.swish: (8x24x152x152xf32) <- (8x24x152x152xf32) + swish_7 = paddle._C_ops.swish(batch_norm__36) + + # pd_op.conv2d: (8x24x152x152xf32) <- (8x24x152x152xf32, 24x24x3x3xf32) + conv2d_7 = paddle._C_ops.conv2d( + swish_7, parameter_332, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_332 + + # pd_op.batch_norm_: (8x24x152x152xf32, 24xf32, 24xf32, 24xf32, 24xf32, -1xui8) <- (8x24x152x152xf32, 24xf32, 24xf32, 24xf32, 24xf32) + ( + batch_norm__42, + batch_norm__43, + batch_norm__44, + batch_norm__45, + batch_norm__46, + batch_norm__47, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_7, + parameter_331, + parameter_330, + parameter_329, + parameter_328, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_328, parameter_329, parameter_330, parameter_331 + + # pd_op.conv2d: (8x24x152x152xf32) <- (8x24x152x152xf32, 24x24x1x1xf32) + conv2d_8 = paddle._C_ops.conv2d( + swish_7, parameter_327, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_327 + + # pd_op.batch_norm_: (8x24x152x152xf32, 24xf32, 24xf32, 24xf32, 24xf32, -1xui8) <- (8x24x152x152xf32, 24xf32, 24xf32, 24xf32, 24xf32) + ( + batch_norm__48, + batch_norm__49, + batch_norm__50, + batch_norm__51, + batch_norm__52, + batch_norm__53, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_8, + parameter_326, + parameter_325, + parameter_324, + parameter_323, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_323, parameter_324, parameter_325, parameter_326 + + # pd_op.multiply: (8x24x152x152xf32) <- (1xf32, 8x24x152x152xf32) + multiply_0 = paddle._C_ops.multiply(data_0, batch_norm__48) + del data_0 + + # pd_op.add: (8x24x152x152xf32) <- (8x24x152x152xf32, 8x24x152x152xf32) + add_0 = paddle._C_ops.add(batch_norm__42, multiply_0) + + # pd_op.swish: (8x24x152x152xf32) <- (8x24x152x152xf32) + swish_8 = paddle._C_ops.swish(add_0) + + # pd_op.add: (8x24x152x152xf32) <- (8x24x152x152xf32, 8x24x152x152xf32) + add_1 = paddle._C_ops.add(swish_6, swish_8) + + # pd_op.full: (1xi32) <- () + full_0 = paddle._C_ops.full( + [1], float("1"), paddle.int32, paddle.core.CPUPlace() + ) + + # pd_op.assign: (1xi32) <- (1xi32) + assign_0 = full_0 + + # pd_op.assign: (1xi32) <- (1xi32) + assign_1 = full_0 + + # pd_op.assign: (1xi32) <- (1xi32) + assign_2 = full_0 + + # pd_op.assign: (1xi32) <- (1xi32) + assign_3 = full_0 + + # pd_op.assign: (1xi32) <- (1xi32) + assign_4 = full_0 + + # pd_op.assign: (1xi32) <- (1xi32) + assign_5 = full_0 + + # pd_op.assign: (1xi32) <- (1xi32) + assign_6 = full_0 + + # pd_op.assign: (1xi32) <- (1xi32) + assign_7 = full_0 + + # pd_op.assign: (1xi32) <- (1xi32) + assign_8 = full_0 + + # pd_op.assign: (1xi32) <- (1xi32) + assign_9 = full_0 + + # pd_op.assign: (1xi32) <- (1xi32) + assign_10 = full_0 + + # pd_op.assign: (1xi32) <- (1xi32) + assign_11 = full_0 + + # pd_op.assign: (1xi32) <- (1xi32) + assign_12 = full_0 + + # builtin.combine: ([8x24x152x152xf32, 8x24x152x152xf32]) <- (8x24x152x152xf32, 8x24x152x152xf32) + combine_0 = [swish_5, add_1] + + # pd_op.concat: (8x48x152x152xf32) <- ([8x24x152x152xf32, 8x24x152x152xf32], 1xi32) + concat_0 = paddle._C_ops.concat(combine_0, full_0) + del combine_0 + + # pd_op.full_int_array: (2xi64) <- () + full_int_array_0 = [2, 3] + + # pd_op.assign: (2xi64) <- (2xi64) + assign_13 = full_int_array_0 + + # pd_op.assign: (2xi64) <- (2xi64) + assign_14 = full_int_array_0 + + # pd_op.assign: (2xi64) <- (2xi64) + assign_15 = full_int_array_0 + + # pd_op.mean: (8x48x1x1xf32) <- (8x48x152x152xf32, 2xi64) + mean_0 = paddle._C_ops.mean(concat_0, full_int_array_0, True) + + # pd_op.conv2d: (8x48x1x1xf32) <- (8x48x1x1xf32, 48x48x1x1xf32) + conv2d_9 = paddle._C_ops.conv2d( + mean_0, parameter_322, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_322 + + # pd_op.full_int_array: (4xi64) <- () + full_int_array_1 = [1, -1, 1, 1] + + # pd_op.reshape: (1x48x1x1xf32) <- (48xf32, 4xi64) + reshape_0 = paddle._C_ops.reshape(parameter_321, full_int_array_1) + del parameter_321 + + # pd_op.add: (8x48x1x1xf32) <- (8x48x1x1xf32, 1x48x1x1xf32) + add_2 = paddle._C_ops.add(conv2d_9, reshape_0) + + # pd_op.hardsigmoid: (8x48x1x1xf32) <- (8x48x1x1xf32) + hardsigmoid_0 = paddle._C_ops.hardsigmoid( + add_2, float("0.166667"), float("0.5") + ) + del add_2 + + # pd_op.multiply: (8x48x152x152xf32) <- (8x48x152x152xf32, 8x48x1x1xf32) + multiply_1 = paddle._C_ops.multiply(concat_0, hardsigmoid_0) + + # pd_op.conv2d: (8x64x152x152xf32) <- (8x48x152x152xf32, 64x48x1x1xf32) + conv2d_10 = paddle._C_ops.conv2d( + multiply_1, parameter_320, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_320 + + # pd_op.batch_norm_: (8x64x152x152xf32, 64xf32, 64xf32, 64xf32, 64xf32, -1xui8) <- (8x64x152x152xf32, 64xf32, 64xf32, 64xf32, 64xf32) + ( + batch_norm__54, + batch_norm__55, + batch_norm__56, + batch_norm__57, + batch_norm__58, + batch_norm__59, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_10, + parameter_319, + parameter_318, + parameter_317, + parameter_316, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_316, parameter_317, parameter_318, parameter_319 + + # pd_op.swish: (8x64x152x152xf32) <- (8x64x152x152xf32) + swish_9 = paddle._C_ops.swish(batch_norm__54) + + # pd_op.conv2d: (8x96x76x76xf32) <- (8x64x152x152xf32, 96x64x3x3xf32) + conv2d_11 = paddle._C_ops.conv2d( + swish_9, parameter_315, [2, 2], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_315 + + # pd_op.batch_norm_: (8x96x76x76xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (8x96x76x76xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__60, + batch_norm__61, + batch_norm__62, + batch_norm__63, + batch_norm__64, + batch_norm__65, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_11, + parameter_314, + parameter_313, + parameter_312, + parameter_311, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_311, parameter_312, parameter_313, parameter_314 + + # pd_op.swish: (8x96x76x76xf32) <- (8x96x76x76xf32) + swish_10 = paddle._C_ops.swish(batch_norm__60) + + # pd_op.conv2d: (8x48x76x76xf32) <- (8x96x76x76xf32, 48x96x1x1xf32) + conv2d_12 = paddle._C_ops.conv2d( + swish_10, parameter_310, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_310 + + # pd_op.batch_norm_: (8x48x76x76xf32, 48xf32, 48xf32, 48xf32, 48xf32, -1xui8) <- (8x48x76x76xf32, 48xf32, 48xf32, 48xf32, 48xf32) + ( + batch_norm__66, + batch_norm__67, + batch_norm__68, + batch_norm__69, + batch_norm__70, + batch_norm__71, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_12, + parameter_309, + parameter_308, + parameter_307, + parameter_306, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_306, parameter_307, parameter_308, parameter_309 + + # pd_op.swish: (8x48x76x76xf32) <- (8x48x76x76xf32) + swish_11 = paddle._C_ops.swish(batch_norm__66) + + # pd_op.conv2d: (8x48x76x76xf32) <- (8x96x76x76xf32, 48x96x1x1xf32) + conv2d_13 = paddle._C_ops.conv2d( + swish_10, parameter_305, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_305 + + # pd_op.batch_norm_: (8x48x76x76xf32, 48xf32, 48xf32, 48xf32, 48xf32, -1xui8) <- (8x48x76x76xf32, 48xf32, 48xf32, 48xf32, 48xf32) + ( + batch_norm__72, + batch_norm__73, + batch_norm__74, + batch_norm__75, + batch_norm__76, + batch_norm__77, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_13, + parameter_304, + parameter_303, + parameter_302, + parameter_301, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_301, parameter_302, parameter_303, parameter_304 + + # pd_op.swish: (8x48x76x76xf32) <- (8x48x76x76xf32) + swish_12 = paddle._C_ops.swish(batch_norm__72) + + # pd_op.conv2d: (8x48x76x76xf32) <- (8x48x76x76xf32, 48x48x3x3xf32) + conv2d_14 = paddle._C_ops.conv2d( + swish_12, parameter_300, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_300 + + # pd_op.batch_norm_: (8x48x76x76xf32, 48xf32, 48xf32, 48xf32, 48xf32, -1xui8) <- (8x48x76x76xf32, 48xf32, 48xf32, 48xf32, 48xf32) + ( + batch_norm__78, + batch_norm__79, + batch_norm__80, + batch_norm__81, + batch_norm__82, + batch_norm__83, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_14, + parameter_299, + parameter_298, + parameter_297, + parameter_296, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_296, parameter_297, parameter_298, parameter_299 + + # pd_op.swish: (8x48x76x76xf32) <- (8x48x76x76xf32) + swish_13 = paddle._C_ops.swish(batch_norm__78) + + # pd_op.conv2d: (8x48x76x76xf32) <- (8x48x76x76xf32, 48x48x3x3xf32) + conv2d_15 = paddle._C_ops.conv2d( + swish_13, parameter_295, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_295 + + # pd_op.batch_norm_: (8x48x76x76xf32, 48xf32, 48xf32, 48xf32, 48xf32, -1xui8) <- (8x48x76x76xf32, 48xf32, 48xf32, 48xf32, 48xf32) + ( + batch_norm__84, + batch_norm__85, + batch_norm__86, + batch_norm__87, + batch_norm__88, + batch_norm__89, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_15, + parameter_294, + parameter_293, + parameter_292, + parameter_291, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_291, parameter_292, parameter_293, parameter_294 + + # pd_op.conv2d: (8x48x76x76xf32) <- (8x48x76x76xf32, 48x48x1x1xf32) + conv2d_16 = paddle._C_ops.conv2d( + swish_13, parameter_290, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_290 + + # pd_op.batch_norm_: (8x48x76x76xf32, 48xf32, 48xf32, 48xf32, 48xf32, -1xui8) <- (8x48x76x76xf32, 48xf32, 48xf32, 48xf32, 48xf32) + ( + batch_norm__90, + batch_norm__91, + batch_norm__92, + batch_norm__93, + batch_norm__94, + batch_norm__95, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_16, + parameter_289, + parameter_288, + parameter_287, + parameter_286, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_286, parameter_287, parameter_288, parameter_289 + + # pd_op.multiply: (8x48x76x76xf32) <- (1xf32, 8x48x76x76xf32) + multiply_2 = paddle._C_ops.multiply(data_1, batch_norm__90) + del data_1 + + # pd_op.add: (8x48x76x76xf32) <- (8x48x76x76xf32, 8x48x76x76xf32) + add_3 = paddle._C_ops.add(batch_norm__84, multiply_2) + + # pd_op.swish: (8x48x76x76xf32) <- (8x48x76x76xf32) + swish_14 = paddle._C_ops.swish(add_3) + + # pd_op.add: (8x48x76x76xf32) <- (8x48x76x76xf32, 8x48x76x76xf32) + add_4 = paddle._C_ops.add(swish_12, swish_14) + + # pd_op.conv2d: (8x48x76x76xf32) <- (8x48x76x76xf32, 48x48x3x3xf32) + conv2d_17 = paddle._C_ops.conv2d( + add_4, parameter_285, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_285 + + # pd_op.batch_norm_: (8x48x76x76xf32, 48xf32, 48xf32, 48xf32, 48xf32, -1xui8) <- (8x48x76x76xf32, 48xf32, 48xf32, 48xf32, 48xf32) + ( + batch_norm__96, + batch_norm__97, + batch_norm__98, + batch_norm__99, + batch_norm__100, + batch_norm__101, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_17, + parameter_284, + parameter_283, + parameter_282, + parameter_281, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_281, parameter_282, parameter_283, parameter_284 + + # pd_op.swish: (8x48x76x76xf32) <- (8x48x76x76xf32) + swish_15 = paddle._C_ops.swish(batch_norm__96) + + # pd_op.conv2d: (8x48x76x76xf32) <- (8x48x76x76xf32, 48x48x3x3xf32) + conv2d_18 = paddle._C_ops.conv2d( + swish_15, parameter_280, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_280 + + # pd_op.batch_norm_: (8x48x76x76xf32, 48xf32, 48xf32, 48xf32, 48xf32, -1xui8) <- (8x48x76x76xf32, 48xf32, 48xf32, 48xf32, 48xf32) + ( + batch_norm__102, + batch_norm__103, + batch_norm__104, + batch_norm__105, + batch_norm__106, + batch_norm__107, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_18, + parameter_279, + parameter_278, + parameter_277, + parameter_276, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_276, parameter_277, parameter_278, parameter_279 + + # pd_op.conv2d: (8x48x76x76xf32) <- (8x48x76x76xf32, 48x48x1x1xf32) + conv2d_19 = paddle._C_ops.conv2d( + swish_15, parameter_275, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_275 + + # pd_op.batch_norm_: (8x48x76x76xf32, 48xf32, 48xf32, 48xf32, 48xf32, -1xui8) <- (8x48x76x76xf32, 48xf32, 48xf32, 48xf32, 48xf32) + ( + batch_norm__108, + batch_norm__109, + batch_norm__110, + batch_norm__111, + batch_norm__112, + batch_norm__113, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_19, + parameter_274, + parameter_273, + parameter_272, + parameter_271, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_271, parameter_272, parameter_273, parameter_274 + + # pd_op.multiply: (8x48x76x76xf32) <- (1xf32, 8x48x76x76xf32) + multiply_3 = paddle._C_ops.multiply(data_2, batch_norm__108) + del data_2 + + # pd_op.add: (8x48x76x76xf32) <- (8x48x76x76xf32, 8x48x76x76xf32) + add_5 = paddle._C_ops.add(batch_norm__102, multiply_3) + + # pd_op.swish: (8x48x76x76xf32) <- (8x48x76x76xf32) + swish_16 = paddle._C_ops.swish(add_5) + + # pd_op.add: (8x48x76x76xf32) <- (8x48x76x76xf32, 8x48x76x76xf32) + add_6 = paddle._C_ops.add(add_4, swish_16) + + # builtin.combine: ([8x48x76x76xf32, 8x48x76x76xf32]) <- (8x48x76x76xf32, 8x48x76x76xf32) + combine_1 = [swish_11, add_6] + + # pd_op.concat: (8x96x76x76xf32) <- ([8x48x76x76xf32, 8x48x76x76xf32], 1xi32) + concat_1 = paddle._C_ops.concat(combine_1, full_0) + del combine_1 + + # pd_op.mean: (8x96x1x1xf32) <- (8x96x76x76xf32, 2xi64) + mean_1 = paddle._C_ops.mean(concat_1, full_int_array_0, True) + + # pd_op.conv2d: (8x96x1x1xf32) <- (8x96x1x1xf32, 96x96x1x1xf32) + conv2d_20 = paddle._C_ops.conv2d( + mean_1, parameter_270, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_270 + + # pd_op.reshape: (1x96x1x1xf32) <- (96xf32, 4xi64) + reshape_1 = paddle._C_ops.reshape(parameter_269, full_int_array_1) + del parameter_269 + + # pd_op.add: (8x96x1x1xf32) <- (8x96x1x1xf32, 1x96x1x1xf32) + add_7 = paddle._C_ops.add(conv2d_20, reshape_1) + + # pd_op.hardsigmoid: (8x96x1x1xf32) <- (8x96x1x1xf32) + hardsigmoid_1 = paddle._C_ops.hardsigmoid( + add_7, float("0.166667"), float("0.5") + ) + del add_7 + + # pd_op.multiply: (8x96x76x76xf32) <- (8x96x76x76xf32, 8x96x1x1xf32) + multiply_4 = paddle._C_ops.multiply(concat_1, hardsigmoid_1) + + # pd_op.conv2d: (8x128x76x76xf32) <- (8x96x76x76xf32, 128x96x1x1xf32) + conv2d_21 = paddle._C_ops.conv2d( + multiply_4, parameter_268, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_268 + + # pd_op.batch_norm_: (8x128x76x76xf32, 128xf32, 128xf32, 128xf32, 128xf32, -1xui8) <- (8x128x76x76xf32, 128xf32, 128xf32, 128xf32, 128xf32) + ( + batch_norm__114, + batch_norm__115, + batch_norm__116, + batch_norm__117, + batch_norm__118, + batch_norm__119, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_21, + parameter_267, + parameter_266, + parameter_265, + parameter_264, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_264, parameter_265, parameter_266, parameter_267 + + # pd_op.swish: (8x128x76x76xf32) <- (8x128x76x76xf32) + swish_17 = paddle._C_ops.swish(batch_norm__114) + + # pd_op.conv2d: (8x192x38x38xf32) <- (8x128x76x76xf32, 192x128x3x3xf32) + conv2d_22 = paddle._C_ops.conv2d( + swish_17, parameter_263, [2, 2], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_263 + + # pd_op.batch_norm_: (8x192x38x38xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (8x192x38x38xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__120, + batch_norm__121, + batch_norm__122, + batch_norm__123, + batch_norm__124, + batch_norm__125, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_22, + parameter_262, + parameter_261, + parameter_260, + parameter_259, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_259, parameter_260, parameter_261, parameter_262 + + # pd_op.swish: (8x192x38x38xf32) <- (8x192x38x38xf32) + swish_18 = paddle._C_ops.swish(batch_norm__120) + + # pd_op.conv2d: (8x96x38x38xf32) <- (8x192x38x38xf32, 96x192x1x1xf32) + conv2d_23 = paddle._C_ops.conv2d( + swish_18, parameter_258, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_258 + + # pd_op.batch_norm_: (8x96x38x38xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (8x96x38x38xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__126, + batch_norm__127, + batch_norm__128, + batch_norm__129, + batch_norm__130, + batch_norm__131, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_23, + parameter_257, + parameter_256, + parameter_255, + parameter_254, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_254, parameter_255, parameter_256, parameter_257 + + # pd_op.swish: (8x96x38x38xf32) <- (8x96x38x38xf32) + swish_19 = paddle._C_ops.swish(batch_norm__126) + + # pd_op.conv2d: (8x96x38x38xf32) <- (8x192x38x38xf32, 96x192x1x1xf32) + conv2d_24 = paddle._C_ops.conv2d( + swish_18, parameter_253, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_253 + + # pd_op.batch_norm_: (8x96x38x38xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (8x96x38x38xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__132, + batch_norm__133, + batch_norm__134, + batch_norm__135, + batch_norm__136, + batch_norm__137, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_24, + parameter_252, + parameter_251, + parameter_250, + parameter_249, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_249, parameter_250, parameter_251, parameter_252 + + # pd_op.swish: (8x96x38x38xf32) <- (8x96x38x38xf32) + swish_20 = paddle._C_ops.swish(batch_norm__132) + + # pd_op.conv2d: (8x96x38x38xf32) <- (8x96x38x38xf32, 96x96x3x3xf32) + conv2d_25 = paddle._C_ops.conv2d( + swish_20, parameter_248, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_248 + + # pd_op.batch_norm_: (8x96x38x38xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (8x96x38x38xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__138, + batch_norm__139, + batch_norm__140, + batch_norm__141, + batch_norm__142, + batch_norm__143, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_25, + parameter_247, + parameter_246, + parameter_245, + parameter_244, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_244, parameter_245, parameter_246, parameter_247 + + # pd_op.swish: (8x96x38x38xf32) <- (8x96x38x38xf32) + swish_21 = paddle._C_ops.swish(batch_norm__138) + + # pd_op.conv2d: (8x96x38x38xf32) <- (8x96x38x38xf32, 96x96x3x3xf32) + conv2d_26 = paddle._C_ops.conv2d( + swish_21, parameter_243, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_243 + + # pd_op.batch_norm_: (8x96x38x38xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (8x96x38x38xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__144, + batch_norm__145, + batch_norm__146, + batch_norm__147, + batch_norm__148, + batch_norm__149, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_26, + parameter_242, + parameter_241, + parameter_240, + parameter_239, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_239, parameter_240, parameter_241, parameter_242 + + # pd_op.conv2d: (8x96x38x38xf32) <- (8x96x38x38xf32, 96x96x1x1xf32) + conv2d_27 = paddle._C_ops.conv2d( + swish_21, parameter_238, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_238 + + # pd_op.batch_norm_: (8x96x38x38xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (8x96x38x38xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__150, + batch_norm__151, + batch_norm__152, + batch_norm__153, + batch_norm__154, + batch_norm__155, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_27, + parameter_237, + parameter_236, + parameter_235, + parameter_234, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_234, parameter_235, parameter_236, parameter_237 + + # pd_op.multiply: (8x96x38x38xf32) <- (1xf32, 8x96x38x38xf32) + multiply_5 = paddle._C_ops.multiply(data_3, batch_norm__150) + del data_3 + + # pd_op.add: (8x96x38x38xf32) <- (8x96x38x38xf32, 8x96x38x38xf32) + add_8 = paddle._C_ops.add(batch_norm__144, multiply_5) + + # pd_op.swish: (8x96x38x38xf32) <- (8x96x38x38xf32) + swish_22 = paddle._C_ops.swish(add_8) + + # pd_op.add: (8x96x38x38xf32) <- (8x96x38x38xf32, 8x96x38x38xf32) + add_9 = paddle._C_ops.add(swish_20, swish_22) + + # pd_op.conv2d: (8x96x38x38xf32) <- (8x96x38x38xf32, 96x96x3x3xf32) + conv2d_28 = paddle._C_ops.conv2d( + add_9, parameter_233, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_233 + + # pd_op.batch_norm_: (8x96x38x38xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (8x96x38x38xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__156, + batch_norm__157, + batch_norm__158, + batch_norm__159, + batch_norm__160, + batch_norm__161, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_28, + parameter_232, + parameter_231, + parameter_230, + parameter_229, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_229, parameter_230, parameter_231, parameter_232 + + # pd_op.swish: (8x96x38x38xf32) <- (8x96x38x38xf32) + swish_23 = paddle._C_ops.swish(batch_norm__156) + + # pd_op.conv2d: (8x96x38x38xf32) <- (8x96x38x38xf32, 96x96x3x3xf32) + conv2d_29 = paddle._C_ops.conv2d( + swish_23, parameter_228, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_228 + + # pd_op.batch_norm_: (8x96x38x38xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (8x96x38x38xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__162, + batch_norm__163, + batch_norm__164, + batch_norm__165, + batch_norm__166, + batch_norm__167, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_29, + parameter_227, + parameter_226, + parameter_225, + parameter_224, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_224, parameter_225, parameter_226, parameter_227 + + # pd_op.conv2d: (8x96x38x38xf32) <- (8x96x38x38xf32, 96x96x1x1xf32) + conv2d_30 = paddle._C_ops.conv2d( + swish_23, parameter_223, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_223 + + # pd_op.batch_norm_: (8x96x38x38xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (8x96x38x38xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__168, + batch_norm__169, + batch_norm__170, + batch_norm__171, + batch_norm__172, + batch_norm__173, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_30, + parameter_222, + parameter_221, + parameter_220, + parameter_219, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_219, parameter_220, parameter_221, parameter_222 + + # pd_op.multiply: (8x96x38x38xf32) <- (1xf32, 8x96x38x38xf32) + multiply_6 = paddle._C_ops.multiply(data_4, batch_norm__168) + del data_4 + + # pd_op.add: (8x96x38x38xf32) <- (8x96x38x38xf32, 8x96x38x38xf32) + add_10 = paddle._C_ops.add(batch_norm__162, multiply_6) + + # pd_op.swish: (8x96x38x38xf32) <- (8x96x38x38xf32) + swish_24 = paddle._C_ops.swish(add_10) + + # pd_op.add: (8x96x38x38xf32) <- (8x96x38x38xf32, 8x96x38x38xf32) + add_11 = paddle._C_ops.add(add_9, swish_24) + + # builtin.combine: ([8x96x38x38xf32, 8x96x38x38xf32]) <- (8x96x38x38xf32, 8x96x38x38xf32) + combine_2 = [swish_19, add_11] + + # pd_op.concat: (8x192x38x38xf32) <- ([8x96x38x38xf32, 8x96x38x38xf32], 1xi32) + concat_2 = paddle._C_ops.concat(combine_2, full_0) + del combine_2 + + # pd_op.mean: (8x192x1x1xf32) <- (8x192x38x38xf32, 2xi64) + mean_2 = paddle._C_ops.mean(concat_2, full_int_array_0, True) + + # pd_op.conv2d: (8x192x1x1xf32) <- (8x192x1x1xf32, 192x192x1x1xf32) + conv2d_31 = paddle._C_ops.conv2d( + mean_2, parameter_218, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_218 + + # pd_op.reshape: (1x192x1x1xf32) <- (192xf32, 4xi64) + reshape_2 = paddle._C_ops.reshape(parameter_217, full_int_array_1) + del parameter_217 + + # pd_op.add: (8x192x1x1xf32) <- (8x192x1x1xf32, 1x192x1x1xf32) + add_12 = paddle._C_ops.add(conv2d_31, reshape_2) + + # pd_op.hardsigmoid: (8x192x1x1xf32) <- (8x192x1x1xf32) + hardsigmoid_2 = paddle._C_ops.hardsigmoid( + add_12, float("0.166667"), float("0.5") + ) + del add_12 + + # pd_op.multiply: (8x192x38x38xf32) <- (8x192x38x38xf32, 8x192x1x1xf32) + multiply_7 = paddle._C_ops.multiply(concat_2, hardsigmoid_2) + + # pd_op.conv2d: (8x256x38x38xf32) <- (8x192x38x38xf32, 256x192x1x1xf32) + conv2d_32 = paddle._C_ops.conv2d( + multiply_7, parameter_216, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_216 + + # pd_op.batch_norm_: (8x256x38x38xf32, 256xf32, 256xf32, 256xf32, 256xf32, -1xui8) <- (8x256x38x38xf32, 256xf32, 256xf32, 256xf32, 256xf32) + ( + batch_norm__174, + batch_norm__175, + batch_norm__176, + batch_norm__177, + batch_norm__178, + batch_norm__179, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_32, + parameter_215, + parameter_214, + parameter_213, + parameter_212, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_212, parameter_213, parameter_214, parameter_215 + + # pd_op.swish: (8x256x38x38xf32) <- (8x256x38x38xf32) + swish_25 = paddle._C_ops.swish(batch_norm__174) + + # pd_op.conv2d: (8x384x19x19xf32) <- (8x256x38x38xf32, 384x256x3x3xf32) + conv2d_33 = paddle._C_ops.conv2d( + swish_25, parameter_211, [2, 2], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_211 + + # pd_op.batch_norm_: (8x384x19x19xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (8x384x19x19xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__180, + batch_norm__181, + batch_norm__182, + batch_norm__183, + batch_norm__184, + batch_norm__185, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_33, + parameter_210, + parameter_209, + parameter_208, + parameter_207, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_207, parameter_208, parameter_209, parameter_210 + + # pd_op.swish: (8x384x19x19xf32) <- (8x384x19x19xf32) + swish_26 = paddle._C_ops.swish(batch_norm__180) + + # pd_op.conv2d: (8x192x19x19xf32) <- (8x384x19x19xf32, 192x384x1x1xf32) + conv2d_34 = paddle._C_ops.conv2d( + swish_26, parameter_206, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_206 + + # pd_op.batch_norm_: (8x192x19x19xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (8x192x19x19xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__186, + batch_norm__187, + batch_norm__188, + batch_norm__189, + batch_norm__190, + batch_norm__191, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_34, + parameter_205, + parameter_204, + parameter_203, + parameter_202, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_202, parameter_203, parameter_204, parameter_205 + + # pd_op.swish: (8x192x19x19xf32) <- (8x192x19x19xf32) + swish_27 = paddle._C_ops.swish(batch_norm__186) + + # pd_op.conv2d: (8x192x19x19xf32) <- (8x384x19x19xf32, 192x384x1x1xf32) + conv2d_35 = paddle._C_ops.conv2d( + swish_26, parameter_201, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_201 + + # pd_op.batch_norm_: (8x192x19x19xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (8x192x19x19xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__192, + batch_norm__193, + batch_norm__194, + batch_norm__195, + batch_norm__196, + batch_norm__197, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_35, + parameter_200, + parameter_199, + parameter_198, + parameter_197, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_197, parameter_198, parameter_199, parameter_200 + + # pd_op.swish: (8x192x19x19xf32) <- (8x192x19x19xf32) + swish_28 = paddle._C_ops.swish(batch_norm__192) + + # pd_op.conv2d: (8x192x19x19xf32) <- (8x192x19x19xf32, 192x192x3x3xf32) + conv2d_36 = paddle._C_ops.conv2d( + swish_28, parameter_196, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_196 + + # pd_op.batch_norm_: (8x192x19x19xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (8x192x19x19xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__198, + batch_norm__199, + batch_norm__200, + batch_norm__201, + batch_norm__202, + batch_norm__203, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_36, + parameter_195, + parameter_194, + parameter_193, + parameter_192, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_192, parameter_193, parameter_194, parameter_195 + + # pd_op.swish: (8x192x19x19xf32) <- (8x192x19x19xf32) + swish_29 = paddle._C_ops.swish(batch_norm__198) + + # pd_op.conv2d: (8x192x19x19xf32) <- (8x192x19x19xf32, 192x192x3x3xf32) + conv2d_37 = paddle._C_ops.conv2d( + swish_29, parameter_191, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_191 + + # pd_op.batch_norm_: (8x192x19x19xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (8x192x19x19xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__204, + batch_norm__205, + batch_norm__206, + batch_norm__207, + batch_norm__208, + batch_norm__209, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_37, + parameter_190, + parameter_189, + parameter_188, + parameter_187, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_187, parameter_188, parameter_189, parameter_190 + + # pd_op.conv2d: (8x192x19x19xf32) <- (8x192x19x19xf32, 192x192x1x1xf32) + conv2d_38 = paddle._C_ops.conv2d( + swish_29, parameter_186, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_186 + + # pd_op.batch_norm_: (8x192x19x19xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (8x192x19x19xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__210, + batch_norm__211, + batch_norm__212, + batch_norm__213, + batch_norm__214, + batch_norm__215, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_38, + parameter_185, + parameter_184, + parameter_183, + parameter_182, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_182, parameter_183, parameter_184, parameter_185 + + # pd_op.multiply: (8x192x19x19xf32) <- (1xf32, 8x192x19x19xf32) + multiply_8 = paddle._C_ops.multiply(data_5, batch_norm__210) + del data_5 + + # pd_op.add: (8x192x19x19xf32) <- (8x192x19x19xf32, 8x192x19x19xf32) + add_13 = paddle._C_ops.add(batch_norm__204, multiply_8) + + # pd_op.swish: (8x192x19x19xf32) <- (8x192x19x19xf32) + swish_30 = paddle._C_ops.swish(add_13) + + # pd_op.add: (8x192x19x19xf32) <- (8x192x19x19xf32, 8x192x19x19xf32) + add_14 = paddle._C_ops.add(swish_28, swish_30) + + # builtin.combine: ([8x192x19x19xf32, 8x192x19x19xf32]) <- (8x192x19x19xf32, 8x192x19x19xf32) + combine_3 = [swish_27, add_14] + + # pd_op.concat: (8x384x19x19xf32) <- ([8x192x19x19xf32, 8x192x19x19xf32], 1xi32) + concat_3 = paddle._C_ops.concat(combine_3, full_0) + del combine_3 + + # pd_op.mean: (8x384x1x1xf32) <- (8x384x19x19xf32, 2xi64) + mean_3 = paddle._C_ops.mean(concat_3, full_int_array_0, True) + + # pd_op.conv2d: (8x384x1x1xf32) <- (8x384x1x1xf32, 384x384x1x1xf32) + conv2d_39 = paddle._C_ops.conv2d( + mean_3, parameter_181, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_181 + + # pd_op.reshape: (1x384x1x1xf32) <- (384xf32, 4xi64) + reshape_3 = paddle._C_ops.reshape(parameter_180, full_int_array_1) + del full_int_array_1, parameter_180 + + # pd_op.add: (8x384x1x1xf32) <- (8x384x1x1xf32, 1x384x1x1xf32) + add_15 = paddle._C_ops.add(conv2d_39, reshape_3) + + # pd_op.hardsigmoid: (8x384x1x1xf32) <- (8x384x1x1xf32) + hardsigmoid_3 = paddle._C_ops.hardsigmoid( + add_15, float("0.166667"), float("0.5") + ) + del add_15 + + # pd_op.multiply: (8x384x19x19xf32) <- (8x384x19x19xf32, 8x384x1x1xf32) + multiply_9 = paddle._C_ops.multiply(concat_3, hardsigmoid_3) + + # pd_op.conv2d: (8x512x19x19xf32) <- (8x384x19x19xf32, 512x384x1x1xf32) + conv2d_40 = paddle._C_ops.conv2d( + multiply_9, parameter_179, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_179 + + # pd_op.batch_norm_: (8x512x19x19xf32, 512xf32, 512xf32, 512xf32, 512xf32, -1xui8) <- (8x512x19x19xf32, 512xf32, 512xf32, 512xf32, 512xf32) + ( + batch_norm__216, + batch_norm__217, + batch_norm__218, + batch_norm__219, + batch_norm__220, + batch_norm__221, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_40, + parameter_178, + parameter_177, + parameter_176, + parameter_175, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_175, parameter_176, parameter_177, parameter_178 + + # pd_op.swish: (8x512x19x19xf32) <- (8x512x19x19xf32) + swish_31 = paddle._C_ops.swish(batch_norm__216) + + # pd_op.conv2d: (8x192x19x19xf32) <- (8x512x19x19xf32, 192x512x1x1xf32) + conv2d_41 = paddle._C_ops.conv2d( + swish_31, parameter_174, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_174 + + # pd_op.batch_norm_: (8x192x19x19xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (8x192x19x19xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__222, + batch_norm__223, + batch_norm__224, + batch_norm__225, + batch_norm__226, + batch_norm__227, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_41, + parameter_173, + parameter_172, + parameter_171, + parameter_170, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_170, parameter_171, parameter_172, parameter_173 + + # pd_op.swish: (8x192x19x19xf32) <- (8x192x19x19xf32) + swish_32 = paddle._C_ops.swish(batch_norm__222) + + # pd_op.conv2d: (8x192x19x19xf32) <- (8x512x19x19xf32, 192x512x1x1xf32) + conv2d_42 = paddle._C_ops.conv2d( + swish_31, parameter_169, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_169 + + # pd_op.batch_norm_: (8x192x19x19xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (8x192x19x19xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__228, + batch_norm__229, + batch_norm__230, + batch_norm__231, + batch_norm__232, + batch_norm__233, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_42, + parameter_168, + parameter_167, + parameter_166, + parameter_165, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_165, parameter_166, parameter_167, parameter_168 + + # pd_op.swish: (8x192x19x19xf32) <- (8x192x19x19xf32) + swish_33 = paddle._C_ops.swish(batch_norm__228) + + # pd_op.conv2d: (8x192x19x19xf32) <- (8x192x19x19xf32, 192x192x3x3xf32) + conv2d_43 = paddle._C_ops.conv2d( + swish_33, parameter_164, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_164 + + # pd_op.batch_norm_: (8x192x19x19xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (8x192x19x19xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__234, + batch_norm__235, + batch_norm__236, + batch_norm__237, + batch_norm__238, + batch_norm__239, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_43, + parameter_163, + parameter_162, + parameter_161, + parameter_160, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_160, parameter_161, parameter_162, parameter_163 + + # pd_op.swish: (8x192x19x19xf32) <- (8x192x19x19xf32) + swish_34 = paddle._C_ops.swish(batch_norm__234) + + # pd_op.conv2d: (8x192x19x19xf32) <- (8x192x19x19xf32, 192x192x3x3xf32) + conv2d_44 = paddle._C_ops.conv2d( + swish_34, parameter_159, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_159 + + # pd_op.batch_norm_: (8x192x19x19xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (8x192x19x19xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__240, + batch_norm__241, + batch_norm__242, + batch_norm__243, + batch_norm__244, + batch_norm__245, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_44, + parameter_158, + parameter_157, + parameter_156, + parameter_155, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_155, parameter_156, parameter_157, parameter_158 + + # pd_op.conv2d: (8x192x19x19xf32) <- (8x192x19x19xf32, 192x192x1x1xf32) + conv2d_45 = paddle._C_ops.conv2d( + swish_34, parameter_154, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_154 + + # pd_op.batch_norm_: (8x192x19x19xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (8x192x19x19xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__246, + batch_norm__247, + batch_norm__248, + batch_norm__249, + batch_norm__250, + batch_norm__251, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_45, + parameter_153, + parameter_152, + parameter_151, + parameter_150, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_150, parameter_151, parameter_152, parameter_153 + + # pd_op.add: (8x192x19x19xf32) <- (8x192x19x19xf32, 8x192x19x19xf32) + add_16 = paddle._C_ops.add(batch_norm__240, batch_norm__246) + + # pd_op.swish: (8x192x19x19xf32) <- (8x192x19x19xf32) + swish_35 = paddle._C_ops.swish(add_16) + + # pd_op.full_int_array: (2xi64) <- () + full_int_array_2 = [5, 5] + + # pd_op.pool2d: (8x192x19x19xf32) <- (8x192x19x19xf32, 2xi64) + pool2d_0 = paddle._C_ops.pool2d( + swish_35, + full_int_array_2, + [1, 1], + [2, 2], + False, + True, + "NCHW", + "max", + False, + False, + "EXPLICIT", + ) + + # pd_op.full_int_array: (2xi64) <- () + full_int_array_3 = [9, 9] + + # pd_op.pool2d: (8x192x19x19xf32) <- (8x192x19x19xf32, 2xi64) + pool2d_1 = paddle._C_ops.pool2d( + swish_35, + full_int_array_3, + [1, 1], + [4, 4], + False, + True, + "NCHW", + "max", + False, + False, + "EXPLICIT", + ) + + # pd_op.full_int_array: (2xi64) <- () + full_int_array_4 = [13, 13] + + # pd_op.pool2d: (8x192x19x19xf32) <- (8x192x19x19xf32, 2xi64) + pool2d_2 = paddle._C_ops.pool2d( + swish_35, + full_int_array_4, + [1, 1], + [6, 6], + False, + True, + "NCHW", + "max", + False, + False, + "EXPLICIT", + ) + + # builtin.combine: ([8x192x19x19xf32, 8x192x19x19xf32, 8x192x19x19xf32, 8x192x19x19xf32]) <- (8x192x19x19xf32, 8x192x19x19xf32, 8x192x19x19xf32, 8x192x19x19xf32) + combine_4 = [swish_35, pool2d_0, pool2d_1, pool2d_2] + + # pd_op.concat: (8x768x19x19xf32) <- ([8x192x19x19xf32, 8x192x19x19xf32, 8x192x19x19xf32, 8x192x19x19xf32], 1xi32) + concat_4 = paddle._C_ops.concat(combine_4, full_0) + del combine_4 + + # pd_op.conv2d: (8x192x19x19xf32) <- (8x768x19x19xf32, 192x768x1x1xf32) + conv2d_46 = paddle._C_ops.conv2d( + concat_4, parameter_149, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_149 + + # pd_op.batch_norm_: (8x192x19x19xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (8x192x19x19xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__252, + batch_norm__253, + batch_norm__254, + batch_norm__255, + batch_norm__256, + batch_norm__257, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_46, + parameter_148, + parameter_147, + parameter_146, + parameter_145, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_145, parameter_146, parameter_147, parameter_148 + + # pd_op.swish: (8x192x19x19xf32) <- (8x192x19x19xf32) + swish_36 = paddle._C_ops.swish(batch_norm__252) + + # builtin.combine: ([8x192x19x19xf32, 8x192x19x19xf32]) <- (8x192x19x19xf32, 8x192x19x19xf32) + combine_5 = [swish_32, swish_36] + + # pd_op.concat: (8x384x19x19xf32) <- ([8x192x19x19xf32, 8x192x19x19xf32], 1xi32) + concat_5 = paddle._C_ops.concat(combine_5, full_0) + del combine_5 + + # pd_op.conv2d: (8x384x19x19xf32) <- (8x384x19x19xf32, 384x384x1x1xf32) + conv2d_47 = paddle._C_ops.conv2d( + concat_5, parameter_144, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_144 + + # pd_op.batch_norm_: (8x384x19x19xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (8x384x19x19xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__258, + batch_norm__259, + batch_norm__260, + batch_norm__261, + batch_norm__262, + batch_norm__263, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_47, + parameter_143, + parameter_142, + parameter_141, + parameter_140, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_140, parameter_141, parameter_142, parameter_143 + + # pd_op.swish: (8x384x19x19xf32) <- (8x384x19x19xf32) + swish_37 = paddle._C_ops.swish(batch_norm__258) + + # pd_op.conv2d: (8x192x19x19xf32) <- (8x384x19x19xf32, 192x384x1x1xf32) + conv2d_48 = paddle._C_ops.conv2d( + swish_37, parameter_139, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_139 + + # pd_op.batch_norm_: (8x192x19x19xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (8x192x19x19xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__264, + batch_norm__265, + batch_norm__266, + batch_norm__267, + batch_norm__268, + batch_norm__269, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_48, + parameter_138, + parameter_137, + parameter_136, + parameter_135, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_135, parameter_136, parameter_137, parameter_138 + + # pd_op.swish: (8x192x19x19xf32) <- (8x192x19x19xf32) + swish_38 = paddle._C_ops.swish(batch_norm__264) + + # pd_op.nearest_interp: (8x192x38x38xf32) <- (8x192x19x19xf32, None, None, None) + nearest_interp_0 = paddle._C_ops.nearest_interp( + swish_38, + None, + None, + None, + "NCHW", + -1, + -1, + -1, + [float("2"), float("2")], + "nearest", + False, + 0, + ) + + # builtin.combine: ([8x192x38x38xf32, 8x256x38x38xf32]) <- (8x192x38x38xf32, 8x256x38x38xf32) + combine_6 = [nearest_interp_0, swish_25] + + # pd_op.concat: (8x448x38x38xf32) <- ([8x192x38x38xf32, 8x256x38x38xf32], 1xi32) + concat_6 = paddle._C_ops.concat(combine_6, full_0) + del combine_6 + + # pd_op.conv2d: (8x96x38x38xf32) <- (8x448x38x38xf32, 96x448x1x1xf32) + conv2d_49 = paddle._C_ops.conv2d( + concat_6, parameter_134, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_134 + + # pd_op.batch_norm_: (8x96x38x38xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (8x96x38x38xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__270, + batch_norm__271, + batch_norm__272, + batch_norm__273, + batch_norm__274, + batch_norm__275, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_49, + parameter_133, + parameter_132, + parameter_131, + parameter_130, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_130, parameter_131, parameter_132, parameter_133 + + # pd_op.swish: (8x96x38x38xf32) <- (8x96x38x38xf32) + swish_39 = paddle._C_ops.swish(batch_norm__270) + + # pd_op.conv2d: (8x96x38x38xf32) <- (8x448x38x38xf32, 96x448x1x1xf32) + conv2d_50 = paddle._C_ops.conv2d( + concat_6, parameter_129, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_129 + + # pd_op.batch_norm_: (8x96x38x38xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (8x96x38x38xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__276, + batch_norm__277, + batch_norm__278, + batch_norm__279, + batch_norm__280, + batch_norm__281, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_50, + parameter_128, + parameter_127, + parameter_126, + parameter_125, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_125, parameter_126, parameter_127, parameter_128 + + # pd_op.swish: (8x96x38x38xf32) <- (8x96x38x38xf32) + swish_40 = paddle._C_ops.swish(batch_norm__276) + + # pd_op.conv2d: (8x96x38x38xf32) <- (8x96x38x38xf32, 96x96x3x3xf32) + conv2d_51 = paddle._C_ops.conv2d( + swish_40, parameter_124, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_124 + + # pd_op.batch_norm_: (8x96x38x38xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (8x96x38x38xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__282, + batch_norm__283, + batch_norm__284, + batch_norm__285, + batch_norm__286, + batch_norm__287, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_51, + parameter_123, + parameter_122, + parameter_121, + parameter_120, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_120, parameter_121, parameter_122, parameter_123 + + # pd_op.swish: (8x96x38x38xf32) <- (8x96x38x38xf32) + swish_41 = paddle._C_ops.swish(batch_norm__282) + + # pd_op.conv2d: (8x96x38x38xf32) <- (8x96x38x38xf32, 96x96x3x3xf32) + conv2d_52 = paddle._C_ops.conv2d( + swish_41, parameter_119, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_119 + + # pd_op.batch_norm_: (8x96x38x38xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (8x96x38x38xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__288, + batch_norm__289, + batch_norm__290, + batch_norm__291, + batch_norm__292, + batch_norm__293, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_52, + parameter_118, + parameter_117, + parameter_116, + parameter_115, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_115, parameter_116, parameter_117, parameter_118 + + # pd_op.conv2d: (8x96x38x38xf32) <- (8x96x38x38xf32, 96x96x1x1xf32) + conv2d_53 = paddle._C_ops.conv2d( + swish_41, parameter_114, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_114 + + # pd_op.batch_norm_: (8x96x38x38xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (8x96x38x38xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__294, + batch_norm__295, + batch_norm__296, + batch_norm__297, + batch_norm__298, + batch_norm__299, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_53, + parameter_113, + parameter_112, + parameter_111, + parameter_110, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_110, parameter_111, parameter_112, parameter_113 + + # pd_op.add: (8x96x38x38xf32) <- (8x96x38x38xf32, 8x96x38x38xf32) + add_17 = paddle._C_ops.add(batch_norm__288, batch_norm__294) + + # pd_op.swish: (8x96x38x38xf32) <- (8x96x38x38xf32) + swish_42 = paddle._C_ops.swish(add_17) + + # builtin.combine: ([8x96x38x38xf32, 8x96x38x38xf32]) <- (8x96x38x38xf32, 8x96x38x38xf32) + combine_7 = [swish_39, swish_42] + + # pd_op.concat: (8x192x38x38xf32) <- ([8x96x38x38xf32, 8x96x38x38xf32], 1xi32) + concat_7 = paddle._C_ops.concat(combine_7, full_0) + del combine_7 + + # pd_op.conv2d: (8x192x38x38xf32) <- (8x192x38x38xf32, 192x192x1x1xf32) + conv2d_54 = paddle._C_ops.conv2d( + concat_7, parameter_109, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_109 + + # pd_op.batch_norm_: (8x192x38x38xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (8x192x38x38xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__300, + batch_norm__301, + batch_norm__302, + batch_norm__303, + batch_norm__304, + batch_norm__305, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_54, + parameter_108, + parameter_107, + parameter_106, + parameter_105, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_105, parameter_106, parameter_107, parameter_108 + + # pd_op.swish: (8x192x38x38xf32) <- (8x192x38x38xf32) + swish_43 = paddle._C_ops.swish(batch_norm__300) + + # pd_op.conv2d: (8x96x38x38xf32) <- (8x192x38x38xf32, 96x192x1x1xf32) + conv2d_55 = paddle._C_ops.conv2d( + swish_43, parameter_104, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_104 + + # pd_op.batch_norm_: (8x96x38x38xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (8x96x38x38xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__306, + batch_norm__307, + batch_norm__308, + batch_norm__309, + batch_norm__310, + batch_norm__311, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_55, + parameter_103, + parameter_102, + parameter_101, + parameter_100, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_100, parameter_101, parameter_102, parameter_103 + + # pd_op.swish: (8x96x38x38xf32) <- (8x96x38x38xf32) + swish_44 = paddle._C_ops.swish(batch_norm__306) + + # pd_op.nearest_interp: (8x96x76x76xf32) <- (8x96x38x38xf32, None, None, None) + nearest_interp_1 = paddle._C_ops.nearest_interp( + swish_44, + None, + None, + None, + "NCHW", + -1, + -1, + -1, + [float("2"), float("2")], + "nearest", + False, + 0, + ) + + # builtin.combine: ([8x96x76x76xf32, 8x128x76x76xf32]) <- (8x96x76x76xf32, 8x128x76x76xf32) + combine_8 = [nearest_interp_1, swish_17] + + # pd_op.concat: (8x224x76x76xf32) <- ([8x96x76x76xf32, 8x128x76x76xf32], 1xi32) + concat_8 = paddle._C_ops.concat(combine_8, full_0) + del combine_8 + + # pd_op.conv2d: (8x48x76x76xf32) <- (8x224x76x76xf32, 48x224x1x1xf32) + conv2d_56 = paddle._C_ops.conv2d( + concat_8, parameter_99, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_99 + + # pd_op.batch_norm_: (8x48x76x76xf32, 48xf32, 48xf32, 48xf32, 48xf32, -1xui8) <- (8x48x76x76xf32, 48xf32, 48xf32, 48xf32, 48xf32) + ( + batch_norm__312, + batch_norm__313, + batch_norm__314, + batch_norm__315, + batch_norm__316, + batch_norm__317, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_56, + parameter_98, + parameter_97, + parameter_96, + parameter_95, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_95, parameter_96, parameter_97, parameter_98 + + # pd_op.swish: (8x48x76x76xf32) <- (8x48x76x76xf32) + swish_45 = paddle._C_ops.swish(batch_norm__312) + + # pd_op.conv2d: (8x48x76x76xf32) <- (8x224x76x76xf32, 48x224x1x1xf32) + conv2d_57 = paddle._C_ops.conv2d( + concat_8, parameter_94, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_94 + + # pd_op.batch_norm_: (8x48x76x76xf32, 48xf32, 48xf32, 48xf32, 48xf32, -1xui8) <- (8x48x76x76xf32, 48xf32, 48xf32, 48xf32, 48xf32) + ( + batch_norm__318, + batch_norm__319, + batch_norm__320, + batch_norm__321, + batch_norm__322, + batch_norm__323, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_57, + parameter_93, + parameter_92, + parameter_91, + parameter_90, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_90, parameter_91, parameter_92, parameter_93 + + # pd_op.swish: (8x48x76x76xf32) <- (8x48x76x76xf32) + swish_46 = paddle._C_ops.swish(batch_norm__318) + + # pd_op.conv2d: (8x48x76x76xf32) <- (8x48x76x76xf32, 48x48x3x3xf32) + conv2d_58 = paddle._C_ops.conv2d( + swish_46, parameter_89, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_89 + + # pd_op.batch_norm_: (8x48x76x76xf32, 48xf32, 48xf32, 48xf32, 48xf32, -1xui8) <- (8x48x76x76xf32, 48xf32, 48xf32, 48xf32, 48xf32) + ( + batch_norm__324, + batch_norm__325, + batch_norm__326, + batch_norm__327, + batch_norm__328, + batch_norm__329, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_58, + parameter_88, + parameter_87, + parameter_86, + parameter_85, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_85, parameter_86, parameter_87, parameter_88 + + # pd_op.swish: (8x48x76x76xf32) <- (8x48x76x76xf32) + swish_47 = paddle._C_ops.swish(batch_norm__324) + + # pd_op.conv2d: (8x48x76x76xf32) <- (8x48x76x76xf32, 48x48x3x3xf32) + conv2d_59 = paddle._C_ops.conv2d( + swish_47, parameter_84, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_84 + + # pd_op.batch_norm_: (8x48x76x76xf32, 48xf32, 48xf32, 48xf32, 48xf32, -1xui8) <- (8x48x76x76xf32, 48xf32, 48xf32, 48xf32, 48xf32) + ( + batch_norm__330, + batch_norm__331, + batch_norm__332, + batch_norm__333, + batch_norm__334, + batch_norm__335, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_59, + parameter_83, + parameter_82, + parameter_81, + parameter_80, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_80, parameter_81, parameter_82, parameter_83 + + # pd_op.conv2d: (8x48x76x76xf32) <- (8x48x76x76xf32, 48x48x1x1xf32) + conv2d_60 = paddle._C_ops.conv2d( + swish_47, parameter_79, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_79 + + # pd_op.batch_norm_: (8x48x76x76xf32, 48xf32, 48xf32, 48xf32, 48xf32, -1xui8) <- (8x48x76x76xf32, 48xf32, 48xf32, 48xf32, 48xf32) + ( + batch_norm__336, + batch_norm__337, + batch_norm__338, + batch_norm__339, + batch_norm__340, + batch_norm__341, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_60, + parameter_78, + parameter_77, + parameter_76, + parameter_75, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_75, parameter_76, parameter_77, parameter_78 + + # pd_op.add: (8x48x76x76xf32) <- (8x48x76x76xf32, 8x48x76x76xf32) + add_18 = paddle._C_ops.add(batch_norm__330, batch_norm__336) + + # pd_op.swish: (8x48x76x76xf32) <- (8x48x76x76xf32) + swish_48 = paddle._C_ops.swish(add_18) + + # builtin.combine: ([8x48x76x76xf32, 8x48x76x76xf32]) <- (8x48x76x76xf32, 8x48x76x76xf32) + combine_9 = [swish_45, swish_48] + + # pd_op.concat: (8x96x76x76xf32) <- ([8x48x76x76xf32, 8x48x76x76xf32], 1xi32) + concat_9 = paddle._C_ops.concat(combine_9, full_0) + del combine_9 + + # pd_op.conv2d: (8x96x76x76xf32) <- (8x96x76x76xf32, 96x96x1x1xf32) + conv2d_61 = paddle._C_ops.conv2d( + concat_9, parameter_74, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_74 + + # pd_op.batch_norm_: (8x96x76x76xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (8x96x76x76xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__342, + batch_norm__343, + batch_norm__344, + batch_norm__345, + batch_norm__346, + batch_norm__347, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_61, + parameter_73, + parameter_72, + parameter_71, + parameter_70, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_70, parameter_71, parameter_72, parameter_73 + + # pd_op.swish: (8x96x76x76xf32) <- (8x96x76x76xf32) + swish_49 = paddle._C_ops.swish(batch_norm__342) + + # pd_op.conv2d: (8x96x38x38xf32) <- (8x96x76x76xf32, 96x96x3x3xf32) + conv2d_62 = paddle._C_ops.conv2d( + swish_49, parameter_69, [2, 2], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_69 + + # pd_op.batch_norm_: (8x96x38x38xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (8x96x38x38xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__348, + batch_norm__349, + batch_norm__350, + batch_norm__351, + batch_norm__352, + batch_norm__353, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_62, + parameter_68, + parameter_67, + parameter_66, + parameter_65, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_65, parameter_66, parameter_67, parameter_68 + + # pd_op.swish: (8x96x38x38xf32) <- (8x96x38x38xf32) + swish_50 = paddle._C_ops.swish(batch_norm__348) + + # builtin.combine: ([8x96x38x38xf32, 8x192x38x38xf32]) <- (8x96x38x38xf32, 8x192x38x38xf32) + combine_10 = [swish_50, swish_43] + + # pd_op.concat: (8x288x38x38xf32) <- ([8x96x38x38xf32, 8x192x38x38xf32], 1xi32) + concat_10 = paddle._C_ops.concat(combine_10, full_0) + del combine_10 + + # pd_op.conv2d: (8x96x38x38xf32) <- (8x288x38x38xf32, 96x288x1x1xf32) + conv2d_63 = paddle._C_ops.conv2d( + concat_10, parameter_64, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_64 + + # pd_op.batch_norm_: (8x96x38x38xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (8x96x38x38xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__354, + batch_norm__355, + batch_norm__356, + batch_norm__357, + batch_norm__358, + batch_norm__359, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_63, + parameter_63, + parameter_62, + parameter_61, + parameter_60, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_60, parameter_61, parameter_62, parameter_63 + + # pd_op.swish: (8x96x38x38xf32) <- (8x96x38x38xf32) + swish_51 = paddle._C_ops.swish(batch_norm__354) + + # pd_op.conv2d: (8x96x38x38xf32) <- (8x288x38x38xf32, 96x288x1x1xf32) + conv2d_64 = paddle._C_ops.conv2d( + concat_10, parameter_59, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_59 + + # pd_op.batch_norm_: (8x96x38x38xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (8x96x38x38xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__360, + batch_norm__361, + batch_norm__362, + batch_norm__363, + batch_norm__364, + batch_norm__365, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_64, + parameter_58, + parameter_57, + parameter_56, + parameter_55, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_55, parameter_56, parameter_57, parameter_58 + + # pd_op.swish: (8x96x38x38xf32) <- (8x96x38x38xf32) + swish_52 = paddle._C_ops.swish(batch_norm__360) + + # pd_op.conv2d: (8x96x38x38xf32) <- (8x96x38x38xf32, 96x96x3x3xf32) + conv2d_65 = paddle._C_ops.conv2d( + swish_52, parameter_54, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_54 + + # pd_op.batch_norm_: (8x96x38x38xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (8x96x38x38xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__366, + batch_norm__367, + batch_norm__368, + batch_norm__369, + batch_norm__370, + batch_norm__371, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_65, + parameter_53, + parameter_52, + parameter_51, + parameter_50, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_50, parameter_51, parameter_52, parameter_53 + + # pd_op.swish: (8x96x38x38xf32) <- (8x96x38x38xf32) + swish_53 = paddle._C_ops.swish(batch_norm__366) + + # pd_op.conv2d: (8x96x38x38xf32) <- (8x96x38x38xf32, 96x96x3x3xf32) + conv2d_66 = paddle._C_ops.conv2d( + swish_53, parameter_49, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_49 + + # pd_op.batch_norm_: (8x96x38x38xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (8x96x38x38xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__372, + batch_norm__373, + batch_norm__374, + batch_norm__375, + batch_norm__376, + batch_norm__377, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_66, + parameter_48, + parameter_47, + parameter_46, + parameter_45, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_45, parameter_46, parameter_47, parameter_48 + + # pd_op.conv2d: (8x96x38x38xf32) <- (8x96x38x38xf32, 96x96x1x1xf32) + conv2d_67 = paddle._C_ops.conv2d( + swish_53, parameter_44, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_44 + + # pd_op.batch_norm_: (8x96x38x38xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (8x96x38x38xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__378, + batch_norm__379, + batch_norm__380, + batch_norm__381, + batch_norm__382, + batch_norm__383, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_67, + parameter_43, + parameter_42, + parameter_41, + parameter_40, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_40, parameter_41, parameter_42, parameter_43 + + # pd_op.add: (8x96x38x38xf32) <- (8x96x38x38xf32, 8x96x38x38xf32) + add_19 = paddle._C_ops.add(batch_norm__372, batch_norm__378) + + # pd_op.swish: (8x96x38x38xf32) <- (8x96x38x38xf32) + swish_54 = paddle._C_ops.swish(add_19) + + # builtin.combine: ([8x96x38x38xf32, 8x96x38x38xf32]) <- (8x96x38x38xf32, 8x96x38x38xf32) + combine_11 = [swish_51, swish_54] + + # pd_op.concat: (8x192x38x38xf32) <- ([8x96x38x38xf32, 8x96x38x38xf32], 1xi32) + concat_11 = paddle._C_ops.concat(combine_11, full_0) + del combine_11 + + # pd_op.conv2d: (8x192x38x38xf32) <- (8x192x38x38xf32, 192x192x1x1xf32) + conv2d_68 = paddle._C_ops.conv2d( + concat_11, parameter_39, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_39 + + # pd_op.batch_norm_: (8x192x38x38xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (8x192x38x38xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__384, + batch_norm__385, + batch_norm__386, + batch_norm__387, + batch_norm__388, + batch_norm__389, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_68, + parameter_38, + parameter_37, + parameter_36, + parameter_35, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_35, parameter_36, parameter_37, parameter_38 + + # pd_op.swish: (8x192x38x38xf32) <- (8x192x38x38xf32) + swish_55 = paddle._C_ops.swish(batch_norm__384) + + # pd_op.conv2d: (8x192x19x19xf32) <- (8x192x38x38xf32, 192x192x3x3xf32) + conv2d_69 = paddle._C_ops.conv2d( + swish_55, parameter_34, [2, 2], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_34 + + # pd_op.batch_norm_: (8x192x19x19xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (8x192x19x19xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__390, + batch_norm__391, + batch_norm__392, + batch_norm__393, + batch_norm__394, + batch_norm__395, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_69, + parameter_33, + parameter_32, + parameter_31, + parameter_30, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_30, parameter_31, parameter_32, parameter_33 + + # pd_op.swish: (8x192x19x19xf32) <- (8x192x19x19xf32) + swish_56 = paddle._C_ops.swish(batch_norm__390) + + # builtin.combine: ([8x192x19x19xf32, 8x384x19x19xf32]) <- (8x192x19x19xf32, 8x384x19x19xf32) + combine_12 = [swish_56, swish_37] + + # pd_op.concat: (8x576x19x19xf32) <- ([8x192x19x19xf32, 8x384x19x19xf32], 1xi32) + concat_12 = paddle._C_ops.concat(combine_12, full_0) + del combine_12 + + # pd_op.conv2d: (8x192x19x19xf32) <- (8x576x19x19xf32, 192x576x1x1xf32) + conv2d_70 = paddle._C_ops.conv2d( + concat_12, parameter_29, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_29 + + # pd_op.batch_norm_: (8x192x19x19xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (8x192x19x19xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__396, + batch_norm__397, + batch_norm__398, + batch_norm__399, + batch_norm__400, + batch_norm__401, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_70, + parameter_28, + parameter_27, + parameter_26, + parameter_25, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_25, parameter_26, parameter_27, parameter_28 + + # pd_op.swish: (8x192x19x19xf32) <- (8x192x19x19xf32) + swish_57 = paddle._C_ops.swish(batch_norm__396) + + # pd_op.conv2d: (8x192x19x19xf32) <- (8x576x19x19xf32, 192x576x1x1xf32) + conv2d_71 = paddle._C_ops.conv2d( + concat_12, parameter_24, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_24 + + # pd_op.batch_norm_: (8x192x19x19xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (8x192x19x19xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__402, + batch_norm__403, + batch_norm__404, + batch_norm__405, + batch_norm__406, + batch_norm__407, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_71, + parameter_23, + parameter_22, + parameter_21, + parameter_20, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_20, parameter_21, parameter_22, parameter_23 + + # pd_op.swish: (8x192x19x19xf32) <- (8x192x19x19xf32) + swish_58 = paddle._C_ops.swish(batch_norm__402) + + # pd_op.conv2d: (8x192x19x19xf32) <- (8x192x19x19xf32, 192x192x3x3xf32) + conv2d_72 = paddle._C_ops.conv2d( + swish_58, parameter_19, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_19 + + # pd_op.batch_norm_: (8x192x19x19xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (8x192x19x19xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__408, + batch_norm__409, + batch_norm__410, + batch_norm__411, + batch_norm__412, + batch_norm__413, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_72, + parameter_18, + parameter_17, + parameter_16, + parameter_15, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_15, parameter_16, parameter_17, parameter_18 + + # pd_op.swish: (8x192x19x19xf32) <- (8x192x19x19xf32) + swish_59 = paddle._C_ops.swish(batch_norm__408) + + # pd_op.conv2d: (8x192x19x19xf32) <- (8x192x19x19xf32, 192x192x3x3xf32) + conv2d_73 = paddle._C_ops.conv2d( + swish_59, parameter_14, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_14 + + # pd_op.batch_norm_: (8x192x19x19xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (8x192x19x19xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__414, + batch_norm__415, + batch_norm__416, + batch_norm__417, + batch_norm__418, + batch_norm__419, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_73, + parameter_13, + parameter_12, + parameter_11, + parameter_10, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_10, parameter_11, parameter_12, parameter_13 + + # pd_op.conv2d: (8x192x19x19xf32) <- (8x192x19x19xf32, 192x192x1x1xf32) + conv2d_74 = paddle._C_ops.conv2d( + swish_59, parameter_9, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_9 + + # pd_op.batch_norm_: (8x192x19x19xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (8x192x19x19xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__420, + batch_norm__421, + batch_norm__422, + batch_norm__423, + batch_norm__424, + batch_norm__425, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_74, + parameter_8, + parameter_7, + parameter_6, + parameter_5, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_5, parameter_6, parameter_7, parameter_8 + + # pd_op.add: (8x192x19x19xf32) <- (8x192x19x19xf32, 8x192x19x19xf32) + add_20 = paddle._C_ops.add(batch_norm__414, batch_norm__420) + + # pd_op.swish: (8x192x19x19xf32) <- (8x192x19x19xf32) + swish_60 = paddle._C_ops.swish(add_20) + + # builtin.combine: ([8x192x19x19xf32, 8x192x19x19xf32]) <- (8x192x19x19xf32, 8x192x19x19xf32) + combine_13 = [swish_57, swish_60] + + # pd_op.concat: (8x384x19x19xf32) <- ([8x192x19x19xf32, 8x192x19x19xf32], 1xi32) + concat_13 = paddle._C_ops.concat(combine_13, full_0) + del combine_13 + + # pd_op.conv2d: (8x384x19x19xf32) <- (8x384x19x19xf32, 384x384x1x1xf32) + conv2d_75 = paddle._C_ops.conv2d( + concat_13, parameter_4, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_4 + + # pd_op.batch_norm_: (8x384x19x19xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (8x384x19x19xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__426, + batch_norm__427, + batch_norm__428, + batch_norm__429, + batch_norm__430, + batch_norm__431, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_75, + parameter_3, + parameter_2, + parameter_1, + parameter_0, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_0, parameter_1, parameter_2, parameter_3 + + # pd_op.swish: (8x384x19x19xf32) <- (8x384x19x19xf32) + swish_0 = paddle._C_ops.swish(batch_norm__426) + del ( + add_0, + add_1, + add_10, + add_11, + add_13, + add_14, + add_16, + add_17, + add_18, + add_19, + add_20, + add_3, + add_4, + add_5, + add_6, + add_8, + add_9, + assign_0, + assign_1, + assign_10, + assign_11, + assign_12, + assign_13, + assign_14, + assign_15, + assign_2, + assign_3, + assign_4, + assign_5, + assign_6, + assign_7, + assign_8, + assign_9, + batch_norm__0, + batch_norm__1, + batch_norm__10, + batch_norm__100, + batch_norm__101, + batch_norm__102, + batch_norm__103, + batch_norm__104, + batch_norm__105, + batch_norm__106, + batch_norm__107, + batch_norm__108, + batch_norm__109, + batch_norm__11, + batch_norm__110, + batch_norm__111, + batch_norm__112, + batch_norm__113, + batch_norm__114, + batch_norm__115, + batch_norm__116, + batch_norm__117, + batch_norm__118, + batch_norm__119, + batch_norm__12, + batch_norm__120, + batch_norm__121, + batch_norm__122, + batch_norm__123, + batch_norm__124, + batch_norm__125, + batch_norm__126, + batch_norm__127, + batch_norm__128, + batch_norm__129, + batch_norm__13, + batch_norm__130, + batch_norm__131, + batch_norm__132, + batch_norm__133, + batch_norm__134, + batch_norm__135, + batch_norm__136, + batch_norm__137, + batch_norm__138, + batch_norm__139, + batch_norm__14, + batch_norm__140, + batch_norm__141, + batch_norm__142, + batch_norm__143, + batch_norm__144, + batch_norm__145, + batch_norm__146, + batch_norm__147, + batch_norm__148, + batch_norm__149, + batch_norm__15, + batch_norm__150, + batch_norm__151, + batch_norm__152, + batch_norm__153, + batch_norm__154, + batch_norm__155, + batch_norm__156, + batch_norm__157, + batch_norm__158, + batch_norm__159, + batch_norm__16, + batch_norm__160, + batch_norm__161, + batch_norm__162, + batch_norm__163, + batch_norm__164, + batch_norm__165, + batch_norm__166, + batch_norm__167, + batch_norm__168, + batch_norm__169, + batch_norm__17, + batch_norm__170, + batch_norm__171, + batch_norm__172, + batch_norm__173, + batch_norm__174, + batch_norm__175, + batch_norm__176, + batch_norm__177, + batch_norm__178, + batch_norm__179, + batch_norm__18, + batch_norm__180, + batch_norm__181, + batch_norm__182, + batch_norm__183, + batch_norm__184, + batch_norm__185, + batch_norm__186, + batch_norm__187, + batch_norm__188, + batch_norm__189, + batch_norm__19, + batch_norm__190, + batch_norm__191, + batch_norm__192, + batch_norm__193, + batch_norm__194, + batch_norm__195, + batch_norm__196, + batch_norm__197, + batch_norm__198, + batch_norm__199, + batch_norm__2, + batch_norm__20, + batch_norm__200, + batch_norm__201, + batch_norm__202, + batch_norm__203, + batch_norm__204, + batch_norm__205, + batch_norm__206, + batch_norm__207, + batch_norm__208, + batch_norm__209, + batch_norm__21, + batch_norm__210, + batch_norm__211, + batch_norm__212, + batch_norm__213, + batch_norm__214, + batch_norm__215, + batch_norm__216, + batch_norm__217, + batch_norm__218, + batch_norm__219, + batch_norm__22, + batch_norm__220, + batch_norm__221, + batch_norm__222, + batch_norm__223, + batch_norm__224, + batch_norm__225, + batch_norm__226, + batch_norm__227, + batch_norm__228, + batch_norm__229, + batch_norm__23, + batch_norm__230, + batch_norm__231, + batch_norm__232, + batch_norm__233, + batch_norm__234, + batch_norm__235, + batch_norm__236, + batch_norm__237, + batch_norm__238, + batch_norm__239, + batch_norm__24, + batch_norm__240, + batch_norm__241, + batch_norm__242, + batch_norm__243, + batch_norm__244, + batch_norm__245, + batch_norm__246, + batch_norm__247, + batch_norm__248, + batch_norm__249, + batch_norm__25, + batch_norm__250, + batch_norm__251, + batch_norm__252, + batch_norm__253, + batch_norm__254, + batch_norm__255, + batch_norm__256, + batch_norm__257, + batch_norm__258, + batch_norm__259, + batch_norm__26, + batch_norm__260, + batch_norm__261, + batch_norm__262, + batch_norm__263, + batch_norm__264, + batch_norm__265, + batch_norm__266, + batch_norm__267, + batch_norm__268, + batch_norm__269, + batch_norm__27, + batch_norm__270, + batch_norm__271, + batch_norm__272, + batch_norm__273, + batch_norm__274, + batch_norm__275, + batch_norm__276, + batch_norm__277, + batch_norm__278, + batch_norm__279, + batch_norm__28, + batch_norm__280, + batch_norm__281, + batch_norm__282, + batch_norm__283, + batch_norm__284, + batch_norm__285, + batch_norm__286, + batch_norm__287, + batch_norm__288, + batch_norm__289, + batch_norm__29, + batch_norm__290, + batch_norm__291, + batch_norm__292, + batch_norm__293, + batch_norm__294, + batch_norm__295, + batch_norm__296, + batch_norm__297, + batch_norm__298, + batch_norm__299, + batch_norm__3, + batch_norm__30, + batch_norm__300, + batch_norm__301, + batch_norm__302, + batch_norm__303, + batch_norm__304, + batch_norm__305, + batch_norm__306, + batch_norm__307, + batch_norm__308, + batch_norm__309, + batch_norm__31, + batch_norm__310, + batch_norm__311, + batch_norm__312, + batch_norm__313, + batch_norm__314, + batch_norm__315, + batch_norm__316, + batch_norm__317, + batch_norm__318, + batch_norm__319, + batch_norm__32, + batch_norm__320, + batch_norm__321, + batch_norm__322, + batch_norm__323, + batch_norm__324, + batch_norm__325, + batch_norm__326, + batch_norm__327, + batch_norm__328, + batch_norm__329, + batch_norm__33, + batch_norm__330, + batch_norm__331, + batch_norm__332, + batch_norm__333, + batch_norm__334, + batch_norm__335, + batch_norm__336, + batch_norm__337, + batch_norm__338, + batch_norm__339, + batch_norm__34, + batch_norm__340, + batch_norm__341, + batch_norm__342, + batch_norm__343, + batch_norm__344, + batch_norm__345, + batch_norm__346, + batch_norm__347, + batch_norm__348, + batch_norm__349, + batch_norm__35, + batch_norm__350, + batch_norm__351, + batch_norm__352, + batch_norm__353, + batch_norm__354, + batch_norm__355, + batch_norm__356, + batch_norm__357, + batch_norm__358, + batch_norm__359, + batch_norm__36, + batch_norm__360, + batch_norm__361, + batch_norm__362, + batch_norm__363, + batch_norm__364, + batch_norm__365, + batch_norm__366, + batch_norm__367, + batch_norm__368, + batch_norm__369, + batch_norm__37, + batch_norm__370, + batch_norm__371, + batch_norm__372, + batch_norm__373, + batch_norm__374, + batch_norm__375, + batch_norm__376, + batch_norm__377, + batch_norm__378, + batch_norm__379, + batch_norm__38, + batch_norm__380, + batch_norm__381, + batch_norm__382, + batch_norm__383, + batch_norm__384, + batch_norm__385, + batch_norm__386, + batch_norm__387, + batch_norm__388, + batch_norm__389, + batch_norm__39, + batch_norm__390, + batch_norm__391, + batch_norm__392, + batch_norm__393, + batch_norm__394, + batch_norm__395, + batch_norm__396, + batch_norm__397, + batch_norm__398, + batch_norm__399, + batch_norm__4, + batch_norm__40, + batch_norm__400, + batch_norm__401, + batch_norm__402, + batch_norm__403, + batch_norm__404, + batch_norm__405, + batch_norm__406, + batch_norm__407, + batch_norm__408, + batch_norm__409, + batch_norm__41, + batch_norm__410, + batch_norm__411, + batch_norm__412, + batch_norm__413, + batch_norm__414, + batch_norm__415, + batch_norm__416, + batch_norm__417, + batch_norm__418, + batch_norm__419, + batch_norm__42, + batch_norm__420, + batch_norm__421, + batch_norm__422, + batch_norm__423, + batch_norm__424, + batch_norm__425, + batch_norm__426, + batch_norm__427, + batch_norm__428, + batch_norm__429, + batch_norm__43, + batch_norm__430, + batch_norm__431, + batch_norm__44, + batch_norm__45, + batch_norm__46, + batch_norm__47, + batch_norm__48, + batch_norm__49, + batch_norm__5, + batch_norm__50, + batch_norm__51, + batch_norm__52, + batch_norm__53, + batch_norm__54, + batch_norm__55, + batch_norm__56, + batch_norm__57, + batch_norm__58, + batch_norm__59, + batch_norm__6, + batch_norm__60, + batch_norm__61, + batch_norm__62, + batch_norm__63, + batch_norm__64, + batch_norm__65, + batch_norm__66, + batch_norm__67, + batch_norm__68, + batch_norm__69, + batch_norm__7, + batch_norm__70, + batch_norm__71, + batch_norm__72, + batch_norm__73, + batch_norm__74, + batch_norm__75, + batch_norm__76, + batch_norm__77, + batch_norm__78, + batch_norm__79, + batch_norm__8, + batch_norm__80, + batch_norm__81, + batch_norm__82, + batch_norm__83, + batch_norm__84, + batch_norm__85, + batch_norm__86, + batch_norm__87, + batch_norm__88, + batch_norm__89, + batch_norm__9, + batch_norm__90, + batch_norm__91, + batch_norm__92, + batch_norm__93, + batch_norm__94, + batch_norm__95, + batch_norm__96, + batch_norm__97, + batch_norm__98, + batch_norm__99, + concat_0, + concat_1, + concat_10, + concat_11, + concat_12, + concat_13, + concat_2, + concat_3, + concat_4, + concat_5, + concat_6, + concat_7, + concat_8, + concat_9, + conv2d_0, + conv2d_1, + conv2d_10, + conv2d_11, + conv2d_12, + conv2d_13, + conv2d_14, + conv2d_15, + conv2d_16, + conv2d_17, + conv2d_18, + conv2d_19, + conv2d_2, + conv2d_20, + conv2d_21, + conv2d_22, + conv2d_23, + conv2d_24, + conv2d_25, + conv2d_26, + conv2d_27, + conv2d_28, + conv2d_29, + conv2d_3, + conv2d_30, + conv2d_31, + conv2d_32, + conv2d_33, + conv2d_34, + conv2d_35, + conv2d_36, + conv2d_37, + conv2d_38, + conv2d_39, + conv2d_4, + conv2d_40, + conv2d_41, + conv2d_42, + conv2d_43, + conv2d_44, + conv2d_45, + conv2d_46, + conv2d_47, + conv2d_48, + conv2d_49, + conv2d_5, + conv2d_50, + conv2d_51, + conv2d_52, + conv2d_53, + conv2d_54, + conv2d_55, + conv2d_56, + conv2d_57, + conv2d_58, + conv2d_59, + conv2d_6, + conv2d_60, + conv2d_61, + conv2d_62, + conv2d_63, + conv2d_64, + conv2d_65, + conv2d_66, + conv2d_67, + conv2d_68, + conv2d_69, + conv2d_7, + conv2d_70, + conv2d_71, + conv2d_72, + conv2d_73, + conv2d_74, + conv2d_75, + conv2d_8, + conv2d_9, + full_0, + full_int_array_0, + full_int_array_2, + full_int_array_3, + full_int_array_4, + hardsigmoid_0, + hardsigmoid_1, + hardsigmoid_2, + hardsigmoid_3, + mean_0, + mean_1, + mean_2, + mean_3, + multiply_0, + multiply_1, + multiply_2, + multiply_3, + multiply_4, + multiply_5, + multiply_6, + multiply_7, + multiply_8, + multiply_9, + nearest_interp_0, + nearest_interp_1, + pool2d_0, + pool2d_1, + pool2d_2, + reshape_0, + reshape_1, + reshape_2, + reshape_3, + swish_1, + swish_10, + swish_11, + swish_12, + swish_13, + swish_14, + swish_15, + swish_16, + swish_17, + swish_18, + swish_19, + swish_2, + swish_20, + swish_21, + swish_22, + swish_23, + swish_24, + swish_25, + swish_26, + swish_27, + swish_28, + swish_29, + swish_3, + swish_30, + swish_31, + swish_32, + swish_33, + swish_34, + swish_35, + swish_36, + swish_37, + swish_38, + swish_39, + swish_4, + swish_40, + swish_41, + swish_42, + swish_43, + swish_44, + swish_45, + swish_46, + swish_47, + swish_48, + swish_49, + swish_5, + swish_50, + swish_51, + swish_52, + swish_53, + swish_54, + swish_55, + swish_56, + swish_57, + swish_58, + swish_59, + swish_6, + swish_60, + swish_7, + swish_8, + swish_9, + ) + + return swish_0 diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_2/weight_meta.py b/paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_2/weight_meta.py new file mode 100644 index 000000000..967a27eb7 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_2/weight_meta.py @@ -0,0 +1,3860 @@ +class Program_weight_tensor_parameter_0: + name = "parameter_0" + shape = [384] + dtype = "float32" + min_val = float("-0.450706") + max_val = float("0.52146") + mean = float("0.182226") + std = float("0.146415") + data = None + + +class Program_weight_tensor_parameter_1: + name = "parameter_1" + shape = [384] + dtype = "float32" + min_val = float("0.925537") + max_val = float("1.48805") + mean = float("1.14298") + std = float("0.0747658") + data = None + + +class Program_weight_tensor_parameter_2: + name = "parameter_2" + shape = [384] + dtype = "float32" + min_val = float("0.00224876") + max_val = float("0.0354085") + mean = float("0.00544018") + std = float("0.00335432") + data = None + + +class Program_weight_tensor_parameter_3: + name = "parameter_3" + shape = [384] + dtype = "float32" + min_val = float("-0.159796") + max_val = float("0.0873697") + mean = float("-0.0187517") + std = float("0.0243291") + data = None + + +class Program_weight_tensor_parameter_4: + name = "parameter_4" + shape = [384, 384, 1, 1] + dtype = "float32" + min_val = float("-0.0772811") + max_val = float("0.0557972") + mean = float("-0.000266412") + std = float("0.00468745") + data = None + + +class Program_weight_tensor_parameter_5: + name = "parameter_5" + shape = [192] + dtype = "float32" + min_val = float("-0.276133") + max_val = float("0.0743098") + mean = float("-0.0552236") + std = float("0.0672621") + data = None + + +class Program_weight_tensor_parameter_6: + name = "parameter_6" + shape = [192] + dtype = "float32" + min_val = float("0.877645") + max_val = float("1.05425") + mean = float("0.956633") + std = float("0.0247756") + data = None + + +class Program_weight_tensor_parameter_7: + name = "parameter_7" + shape = [192] + dtype = "float32" + min_val = float("0.00112957") + max_val = float("0.0134419") + mean = float("0.00480867") + std = float("0.00223458") + data = None + + +class Program_weight_tensor_parameter_8: + name = "parameter_8" + shape = [192] + dtype = "float32" + min_val = float("-0.0338997") + max_val = float("0.0525469") + mean = float("0.00513458") + std = float("0.0157006") + data = None + + +class Program_weight_tensor_parameter_9: + name = "parameter_9" + shape = [192, 192, 1, 1] + dtype = "float32" + min_val = float("-0.044604") + max_val = float("0.0273353") + mean = float("-6.21397e-05") + std = float("0.00331635") + data = None + + +class Program_weight_tensor_parameter_10: + name = "parameter_10" + shape = [192] + dtype = "float32" + min_val = float("-0.276133") + max_val = float("0.0743098") + mean = float("-0.0552236") + std = float("0.0672621") + data = None + + +class Program_weight_tensor_parameter_11: + name = "parameter_11" + shape = [192] + dtype = "float32" + min_val = float("0.959652") + max_val = float("1.24388") + mean = float("1.06242") + std = float("0.049384") + data = None + + +class Program_weight_tensor_parameter_12: + name = "parameter_12" + shape = [192] + dtype = "float32" + min_val = float("0.00208341") + max_val = float("0.0142051") + mean = float("0.00609325") + std = float("0.00204231") + data = None + + +class Program_weight_tensor_parameter_13: + name = "parameter_13" + shape = [192] + dtype = "float32" + min_val = float("-0.127776") + max_val = float("0.0885761") + mean = float("-0.0184653") + std = float("0.0221624") + data = None + + +class Program_weight_tensor_parameter_14: + name = "parameter_14" + shape = [192, 192, 3, 3] + dtype = "float32" + min_val = float("-0.0607241") + max_val = float("0.0494932") + mean = float("-9.31432e-05") + std = float("0.00280569") + data = None + + +class Program_weight_tensor_parameter_15: + name = "parameter_15" + shape = [192] + dtype = "float32" + min_val = float("-0.358121") + max_val = float("0.228948") + mean = float("-0.116357") + std = float("0.0906356") + data = None + + +class Program_weight_tensor_parameter_16: + name = "parameter_16" + shape = [192] + dtype = "float32" + min_val = float("0.871088") + max_val = float("1.53592") + mean = float("1.03502") + std = float("0.0822495") + data = None + + +class Program_weight_tensor_parameter_17: + name = "parameter_17" + shape = [192] + dtype = "float32" + min_val = float("0.00586839") + max_val = float("0.0434824") + mean = float("0.0160895") + std = float("0.00644341") + data = None + + +class Program_weight_tensor_parameter_18: + name = "parameter_18" + shape = [192] + dtype = "float32" + min_val = float("-0.145118") + max_val = float("0.0784797") + mean = float("-0.0378564") + std = float("0.0346139") + data = None + + +class Program_weight_tensor_parameter_19: + name = "parameter_19" + shape = [192, 192, 3, 3] + dtype = "float32" + min_val = float("-0.0614253") + max_val = float("0.0708489") + mean = float("-0.000170705") + std = float("0.00319013") + data = None + + +class Program_weight_tensor_parameter_20: + name = "parameter_20" + shape = [192] + dtype = "float32" + min_val = float("-0.262068") + max_val = float("0.09553") + mean = float("-0.0686182") + std = float("0.0651695") + data = None + + +class Program_weight_tensor_parameter_21: + name = "parameter_21" + shape = [192] + dtype = "float32" + min_val = float("0.907651") + max_val = float("1.18361") + mean = float("1.01459") + std = float("0.0483038") + data = None + + +class Program_weight_tensor_parameter_22: + name = "parameter_22" + shape = [192] + dtype = "float32" + min_val = float("0.00343907") + max_val = float("0.0132316") + mean = float("0.00579178") + std = float("0.00148763") + data = None + + +class Program_weight_tensor_parameter_23: + name = "parameter_23" + shape = [192] + dtype = "float32" + min_val = float("-0.0930804") + max_val = float("0.0472074") + mean = float("-0.0153468") + std = float("0.0212931") + data = None + + +class Program_weight_tensor_parameter_24: + name = "parameter_24" + shape = [192, 576, 1, 1] + dtype = "float32" + min_val = float("-0.104555") + max_val = float("0.105497") + mean = float("-0.000158033") + std = float("0.00452851") + data = None + + +class Program_weight_tensor_parameter_25: + name = "parameter_25" + shape = [192] + dtype = "float32" + min_val = float("-0.108168") + max_val = float("0.0105602") + mean = float("-0.0387002") + std = float("0.0209386") + data = None + + +class Program_weight_tensor_parameter_26: + name = "parameter_26" + shape = [192] + dtype = "float32" + min_val = float("0.829759") + max_val = float("1.16334") + mean = float("0.995295") + std = float("0.0339548") + data = None + + +class Program_weight_tensor_parameter_27: + name = "parameter_27" + shape = [192] + dtype = "float32" + min_val = float("0.00265792") + max_val = float("0.0141198") + mean = float("0.00435067") + std = float("0.00159212") + data = None + + +class Program_weight_tensor_parameter_28: + name = "parameter_28" + shape = [192] + dtype = "float32" + min_val = float("-0.0728807") + max_val = float("0.0392266") + mean = float("-0.0199514") + std = float("0.0198008") + data = None + + +class Program_weight_tensor_parameter_29: + name = "parameter_29" + shape = [192, 576, 1, 1] + dtype = "float32" + min_val = float("-0.0458267") + max_val = float("0.059734") + mean = float("-0.000231074") + std = float("0.00386824") + data = None + + +class Program_weight_tensor_parameter_30: + name = "parameter_30" + shape = [192] + dtype = "float32" + min_val = float("-0.182514") + max_val = float("0.00641605") + mean = float("-0.0674347") + std = float("0.0360599") + data = None + + +class Program_weight_tensor_parameter_31: + name = "parameter_31" + shape = [192] + dtype = "float32" + min_val = float("0.818331") + max_val = float("1.19136") + mean = float("1.03604") + std = float("0.0472366") + data = None + + +class Program_weight_tensor_parameter_32: + name = "parameter_32" + shape = [192] + dtype = "float32" + min_val = float("0.00902086") + max_val = float("0.0543467") + mean = float("0.018424") + std = float("0.00784929") + data = None + + +class Program_weight_tensor_parameter_33: + name = "parameter_33" + shape = [192] + dtype = "float32" + min_val = float("-0.291809") + max_val = float("0.243533") + mean = float("-0.0401521") + std = float("0.0919981") + data = None + + +class Program_weight_tensor_parameter_34: + name = "parameter_34" + shape = [192, 192, 3, 3] + dtype = "float32" + min_val = float("-0.039074") + max_val = float("0.0392967") + mean = float("-5.13376e-05") + std = float("0.00249765") + data = None + + +class Program_weight_tensor_parameter_35: + name = "parameter_35" + shape = [192] + dtype = "float32" + min_val = float("-0.424052") + max_val = float("1.13103") + mean = float("0.358366") + std = float("0.274805") + data = None + + +class Program_weight_tensor_parameter_36: + name = "parameter_36" + shape = [192] + dtype = "float32" + min_val = float("0.684119") + max_val = float("1.64638") + mean = float("1.20799") + std = float("0.162533") + data = None + + +class Program_weight_tensor_parameter_37: + name = "parameter_37" + shape = [192] + dtype = "float32" + min_val = float("0.003583") + max_val = float("0.0502877") + mean = float("0.0113198") + std = float("0.00614715") + data = None + + +class Program_weight_tensor_parameter_38: + name = "parameter_38" + shape = [192] + dtype = "float32" + min_val = float("-0.138562") + max_val = float("0.119869") + mean = float("-0.0221705") + std = float("0.03046") + data = None + + +class Program_weight_tensor_parameter_39: + name = "parameter_39" + shape = [192, 192, 1, 1] + dtype = "float32" + min_val = float("-0.12775") + max_val = float("0.0930521") + mean = float("-0.000473718") + std = float("0.00932047") + data = None + + +class Program_weight_tensor_parameter_40: + name = "parameter_40" + shape = [96] + dtype = "float32" + min_val = float("-0.284378") + max_val = float("0.174195") + mean = float("-0.0664645") + std = float("0.0970068") + data = None + + +class Program_weight_tensor_parameter_41: + name = "parameter_41" + shape = [96] + dtype = "float32" + min_val = float("0.804918") + max_val = float("1.19904") + mean = float("0.919478") + std = float("0.0569877") + data = None + + +class Program_weight_tensor_parameter_42: + name = "parameter_42" + shape = [96] + dtype = "float32" + min_val = float("0.00191088") + max_val = float("0.0158765") + mean = float("0.0069753") + std = float("0.0033937") + data = None + + +class Program_weight_tensor_parameter_43: + name = "parameter_43" + shape = [96] + dtype = "float32" + min_val = float("-0.0277279") + max_val = float("0.0331859") + mean = float("0.0032385") + std = float("0.0131136") + data = None + + +class Program_weight_tensor_parameter_44: + name = "parameter_44" + shape = [96, 96, 1, 1] + dtype = "float32" + min_val = float("-0.0693136") + max_val = float("0.0316587") + mean = float("-0.000293189") + std = float("0.00597032") + data = None + + +class Program_weight_tensor_parameter_45: + name = "parameter_45" + shape = [96] + dtype = "float32" + min_val = float("-0.284378") + max_val = float("0.174195") + mean = float("-0.0664645") + std = float("0.0970068") + data = None + + +class Program_weight_tensor_parameter_46: + name = "parameter_46" + shape = [96] + dtype = "float32" + min_val = float("0.920731") + max_val = float("1.34456") + mean = float("1.07084") + std = float("0.0643743") + data = None + + +class Program_weight_tensor_parameter_47: + name = "parameter_47" + shape = [96] + dtype = "float32" + min_val = float("0.00338125") + max_val = float("0.0204988") + mean = float("0.00865494") + std = float("0.00325484") + data = None + + +class Program_weight_tensor_parameter_48: + name = "parameter_48" + shape = [96] + dtype = "float32" + min_val = float("-0.105409") + max_val = float("0.0895303") + mean = float("-0.0194451") + std = float("0.0251518") + data = None + + +class Program_weight_tensor_parameter_49: + name = "parameter_49" + shape = [96, 96, 3, 3] + dtype = "float32" + min_val = float("-0.0951475") + max_val = float("0.0756852") + mean = float("-0.000128547") + std = float("0.00534567") + data = None + + +class Program_weight_tensor_parameter_50: + name = "parameter_50" + shape = [96] + dtype = "float32" + min_val = float("-0.582433") + max_val = float("0.267976") + mean = float("-0.198315") + std = float("0.13798") + data = None + + +class Program_weight_tensor_parameter_51: + name = "parameter_51" + shape = [96] + dtype = "float32" + min_val = float("0.755139") + max_val = float("1.48346") + mean = float("1.0017") + std = float("0.110224") + data = None + + +class Program_weight_tensor_parameter_52: + name = "parameter_52" + shape = [96] + dtype = "float32" + min_val = float("0.0107474") + max_val = float("0.052763") + mean = float("0.0208157") + std = float("0.00822832") + data = None + + +class Program_weight_tensor_parameter_53: + name = "parameter_53" + shape = [96] + dtype = "float32" + min_val = float("-0.0722031") + max_val = float("0.0807325") + mean = float("-0.0250319") + std = float("0.025652") + data = None + + +class Program_weight_tensor_parameter_54: + name = "parameter_54" + shape = [96, 96, 3, 3] + dtype = "float32" + min_val = float("-0.0916632") + max_val = float("0.0809162") + mean = float("-0.000290506") + std = float("0.00617745") + data = None + + +class Program_weight_tensor_parameter_55: + name = "parameter_55" + shape = [96] + dtype = "float32" + min_val = float("-0.479078") + max_val = float("0.192269") + mean = float("-0.136833") + std = float("0.101114") + data = None + + +class Program_weight_tensor_parameter_56: + name = "parameter_56" + shape = [96] + dtype = "float32" + min_val = float("0.849031") + max_val = float("1.22832") + mean = float("0.997207") + std = float("0.0756268") + data = None + + +class Program_weight_tensor_parameter_57: + name = "parameter_57" + shape = [96] + dtype = "float32" + min_val = float("0.00409541") + max_val = float("0.0210482") + mean = float("0.00850018") + std = float("0.00283408") + data = None + + +class Program_weight_tensor_parameter_58: + name = "parameter_58" + shape = [96] + dtype = "float32" + min_val = float("-0.10468") + max_val = float("0.0397063") + mean = float("-0.0216645") + std = float("0.0226585") + data = None + + +class Program_weight_tensor_parameter_59: + name = "parameter_59" + shape = [96, 288, 1, 1] + dtype = "float32" + min_val = float("-0.0855408") + max_val = float("0.0862786") + mean = float("-0.000339383") + std = float("0.00835961") + data = None + + +class Program_weight_tensor_parameter_60: + name = "parameter_60" + shape = [96] + dtype = "float32" + min_val = float("-0.130759") + max_val = float("0.049814") + mean = float("-0.0245787") + std = float("0.0350186") + data = None + + +class Program_weight_tensor_parameter_61: + name = "parameter_61" + shape = [96] + dtype = "float32" + min_val = float("0.81062") + max_val = float("1.39403") + mean = float("0.955302") + std = float("0.0697366") + data = None + + +class Program_weight_tensor_parameter_62: + name = "parameter_62" + shape = [96] + dtype = "float32" + min_val = float("0.002652") + max_val = float("0.0210958") + mean = float("0.00687459") + std = float("0.00270533") + data = None + + +class Program_weight_tensor_parameter_63: + name = "parameter_63" + shape = [96] + dtype = "float32" + min_val = float("-0.0657455") + max_val = float("0.0517673") + mean = float("-0.00634293") + std = float("0.0227447") + data = None + + +class Program_weight_tensor_parameter_64: + name = "parameter_64" + shape = [96, 288, 1, 1] + dtype = "float32" + min_val = float("-0.0936364") + max_val = float("0.0948791") + mean = float("1.11592e-05") + std = float("0.00729492") + data = None + + +class Program_weight_tensor_parameter_65: + name = "parameter_65" + shape = [96] + dtype = "float32" + min_val = float("-0.269002") + max_val = float("0.0719861") + mean = float("-0.0893869") + std = float("0.070801") + data = None + + +class Program_weight_tensor_parameter_66: + name = "parameter_66" + shape = [96] + dtype = "float32" + min_val = float("0.71414") + max_val = float("1.15797") + mean = float("1.00479") + std = float("0.0742199") + data = None + + +class Program_weight_tensor_parameter_67: + name = "parameter_67" + shape = [96] + dtype = "float32" + min_val = float("0.00761574") + max_val = float("0.0637429") + mean = float("0.0185346") + std = float("0.0105084") + data = None + + +class Program_weight_tensor_parameter_68: + name = "parameter_68" + shape = [96] + dtype = "float32" + min_val = float("-0.451267") + max_val = float("0.705044") + mean = float("0.00106013") + std = float("0.174648") + data = None + + +class Program_weight_tensor_parameter_69: + name = "parameter_69" + shape = [96, 96, 3, 3] + dtype = "float32" + min_val = float("-0.0502454") + max_val = float("0.0565142") + mean = float("-3.59798e-05") + std = float("0.00515044") + data = None + + +class Program_weight_tensor_parameter_70: + name = "parameter_70" + shape = [96] + dtype = "float32" + min_val = float("-0.71049") + max_val = float("1.7804") + mean = float("0.562331") + std = float("0.570085") + data = None + + +class Program_weight_tensor_parameter_71: + name = "parameter_71" + shape = [96] + dtype = "float32" + min_val = float("0.506139") + max_val = float("1.75258") + mean = float("1.18696") + std = float("0.280418") + data = None + + +class Program_weight_tensor_parameter_72: + name = "parameter_72" + shape = [96] + dtype = "float32" + min_val = float("0.0028799") + max_val = float("0.0677115") + mean = float("0.022765") + std = float("0.0143864") + data = None + + +class Program_weight_tensor_parameter_73: + name = "parameter_73" + shape = [96] + dtype = "float32" + min_val = float("-0.192297") + max_val = float("0.152433") + mean = float("-0.0251299") + std = float("0.0572357") + data = None + + +class Program_weight_tensor_parameter_74: + name = "parameter_74" + shape = [96, 96, 1, 1] + dtype = "float32" + min_val = float("-0.162418") + max_val = float("0.117265") + mean = float("-0.000864719") + std = float("0.018084") + data = None + + +class Program_weight_tensor_parameter_75: + name = "parameter_75" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_76: + name = "parameter_76" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_77: + name = "parameter_77" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_78: + name = "parameter_78" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_79: + name = "parameter_79" + shape = [48, 48, 1, 1] + dtype = "float32" + min_val = float("-0.0654612") + max_val = float("0.0646363") + mean = float("-0.00350378") + std = float("0.0113046") + data = None + + +class Program_weight_tensor_parameter_80: + name = "parameter_80" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_81: + name = "parameter_81" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_82: + name = "parameter_82" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_83: + name = "parameter_83" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_84: + name = "parameter_84" + shape = [48, 48, 3, 3] + dtype = "float32" + min_val = float("-0.129902") + max_val = float("0.149226") + mean = float("5.08592e-05") + std = float("0.0106461") + data = None + + +class Program_weight_tensor_parameter_85: + name = "parameter_85" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_86: + name = "parameter_86" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_87: + name = "parameter_87" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_88: + name = "parameter_88" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_89: + name = "parameter_89" + shape = [48, 48, 3, 3] + dtype = "float32" + min_val = float("-0.0916908") + max_val = float("0.0950793") + mean = float("-0.000513281") + std = float("0.0119073") + data = None + + +class Program_weight_tensor_parameter_90: + name = "parameter_90" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_91: + name = "parameter_91" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_92: + name = "parameter_92" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_93: + name = "parameter_93" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_94: + name = "parameter_94" + shape = [48, 224, 1, 1] + dtype = "float32" + min_val = float("-0.16761") + max_val = float("0.13684") + mean = float("-0.000461688") + std = float("0.0150787") + data = None + + +class Program_weight_tensor_parameter_95: + name = "parameter_95" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_96: + name = "parameter_96" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_97: + name = "parameter_97" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_98: + name = "parameter_98" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_99: + name = "parameter_99" + shape = [48, 224, 1, 1] + dtype = "float32" + min_val = float("-0.0769409") + max_val = float("0.0978909") + mean = float("0.000344751") + std = float("0.0113345") + data = None + + +class Program_weight_tensor_parameter_100: + name = "parameter_100" + shape = [96] + dtype = "float32" + min_val = float("-0.266698") + max_val = float("0.36797") + mean = float("0.0277794") + std = float("0.150291") + data = None + + +class Program_weight_tensor_parameter_101: + name = "parameter_101" + shape = [96] + dtype = "float32" + min_val = float("0.569143") + max_val = float("1.53396") + mean = float("0.84154") + std = float("0.134028") + data = None + + +class Program_weight_tensor_parameter_102: + name = "parameter_102" + shape = [96] + dtype = "float32" + min_val = float("0.00586312") + max_val = float("0.0583423") + mean = float("0.0165729") + std = float("0.00785593") + data = None + + +class Program_weight_tensor_parameter_103: + name = "parameter_103" + shape = [96] + dtype = "float32" + min_val = float("-0.176735") + max_val = float("0.041626") + mean = float("-0.0302253") + std = float("0.0341462") + data = None + + +class Program_weight_tensor_parameter_104: + name = "parameter_104" + shape = [96, 192, 1, 1] + dtype = "float32" + min_val = float("-0.0852068") + max_val = float("0.106669") + mean = float("-0.00057725") + std = float("0.0117889") + data = None + + +class Program_weight_tensor_parameter_105: + name = "parameter_105" + shape = [192] + dtype = "float32" + min_val = float("-0.397637") + max_val = float("0.224708") + mean = float("-0.0466631") + std = float("0.107683") + data = None + + +class Program_weight_tensor_parameter_106: + name = "parameter_106" + shape = [192] + dtype = "float32" + min_val = float("0.718852") + max_val = float("1.48201") + mean = float("0.968325") + std = float("0.108705") + data = None + + +class Program_weight_tensor_parameter_107: + name = "parameter_107" + shape = [192] + dtype = "float32" + min_val = float("0.0065768") + max_val = float("0.0388136") + mean = float("0.0133664") + std = float("0.00506924") + data = None + + +class Program_weight_tensor_parameter_108: + name = "parameter_108" + shape = [192] + dtype = "float32" + min_val = float("-0.142548") + max_val = float("0.16354") + mean = float("-0.0255622") + std = float("0.0469188") + data = None + + +class Program_weight_tensor_parameter_109: + name = "parameter_109" + shape = [192, 192, 1, 1] + dtype = "float32" + min_val = float("-0.100007") + max_val = float("0.115004") + mean = float("-0.000677374") + std = float("0.0113962") + data = None + + +class Program_weight_tensor_parameter_110: + name = "parameter_110" + shape = [96] + dtype = "float32" + min_val = float("-0.321616") + max_val = float("0.195437") + mean = float("-0.0414815") + std = float("0.0949684") + data = None + + +class Program_weight_tensor_parameter_111: + name = "parameter_111" + shape = [96] + dtype = "float32" + min_val = float("0.6847") + max_val = float("0.973901") + mean = float("0.867706") + std = float("0.0504862") + data = None + + +class Program_weight_tensor_parameter_112: + name = "parameter_112" + shape = [96] + dtype = "float32" + min_val = float("0.00334166") + max_val = float("0.0258505") + mean = float("0.0100103") + std = float("0.00362185") + data = None + + +class Program_weight_tensor_parameter_113: + name = "parameter_113" + shape = [96] + dtype = "float32" + min_val = float("-0.0465356") + max_val = float("0.0383199") + mean = float("-0.0114904") + std = float("0.0190198") + data = None + + +class Program_weight_tensor_parameter_114: + name = "parameter_114" + shape = [96, 96, 1, 1] + dtype = "float32" + min_val = float("-0.0383231") + max_val = float("0.0488906") + mean = float("-0.00124563") + std = float("0.00787586") + data = None + + +class Program_weight_tensor_parameter_115: + name = "parameter_115" + shape = [96] + dtype = "float32" + min_val = float("-0.321616") + max_val = float("0.195437") + mean = float("-0.0414815") + std = float("0.0949684") + data = None + + +class Program_weight_tensor_parameter_116: + name = "parameter_116" + shape = [96] + dtype = "float32" + min_val = float("0.881977") + max_val = float("1.2301") + mean = float("1.01016") + std = float("0.0561357") + data = None + + +class Program_weight_tensor_parameter_117: + name = "parameter_117" + shape = [96] + dtype = "float32" + min_val = float("0.00770206") + max_val = float("0.0342138") + mean = float("0.0160933") + std = float("0.00575289") + data = None + + +class Program_weight_tensor_parameter_118: + name = "parameter_118" + shape = [96] + dtype = "float32" + min_val = float("-0.0887963") + max_val = float("0.0226557") + mean = float("-0.0176117") + std = float("0.0226057") + data = None + + +class Program_weight_tensor_parameter_119: + name = "parameter_119" + shape = [96, 96, 3, 3] + dtype = "float32" + min_val = float("-0.127461") + max_val = float("0.113478") + mean = float("-0.000167953") + std = float("0.00669815") + data = None + + +class Program_weight_tensor_parameter_120: + name = "parameter_120" + shape = [96] + dtype = "float32" + min_val = float("-0.672352") + max_val = float("0.151602") + mean = float("-0.204445") + std = float("0.159341") + data = None + + +class Program_weight_tensor_parameter_121: + name = "parameter_121" + shape = [96] + dtype = "float32" + min_val = float("0.641984") + max_val = float("1.31595") + mean = float("1.00011") + std = float("0.144909") + data = None + + +class Program_weight_tensor_parameter_122: + name = "parameter_122" + shape = [96] + dtype = "float32" + min_val = float("0.0137156") + max_val = float("0.0382381") + mean = float("0.023566") + std = float("0.00562841") + data = None + + +class Program_weight_tensor_parameter_123: + name = "parameter_123" + shape = [96] + dtype = "float32" + min_val = float("-0.0920378") + max_val = float("0.0477898") + mean = float("-0.020158") + std = float("0.0308748") + data = None + + +class Program_weight_tensor_parameter_124: + name = "parameter_124" + shape = [96, 96, 3, 3] + dtype = "float32" + min_val = float("-0.160626") + max_val = float("0.139674") + mean = float("-0.000323086") + std = float("0.00770904") + data = None + + +class Program_weight_tensor_parameter_125: + name = "parameter_125" + shape = [96] + dtype = "float32" + min_val = float("-0.61895") + max_val = float("0.426186") + mean = float("-0.179624") + std = float("0.22152") + data = None + + +class Program_weight_tensor_parameter_126: + name = "parameter_126" + shape = [96] + dtype = "float32" + min_val = float("0.709539") + max_val = float("1.31654") + mean = float("0.943253") + std = float("0.106295") + data = None + + +class Program_weight_tensor_parameter_127: + name = "parameter_127" + shape = [96] + dtype = "float32" + min_val = float("0.00553064") + max_val = float("0.0219517") + mean = float("0.0102789") + std = float("0.00336495") + data = None + + +class Program_weight_tensor_parameter_128: + name = "parameter_128" + shape = [96] + dtype = "float32" + min_val = float("-0.190973") + max_val = float("0.128121") + mean = float("0.00361859") + std = float("0.0456614") + data = None + + +class Program_weight_tensor_parameter_129: + name = "parameter_129" + shape = [96, 448, 1, 1] + dtype = "float32" + min_val = float("-0.175646") + max_val = float("0.167444") + mean = float("-0.000470258") + std = float("0.00969802") + data = None + + +class Program_weight_tensor_parameter_130: + name = "parameter_130" + shape = [96] + dtype = "float32" + min_val = float("-0.182659") + max_val = float("0.360408") + mean = float("0.0343849") + std = float("0.0998371") + data = None + + +class Program_weight_tensor_parameter_131: + name = "parameter_131" + shape = [96] + dtype = "float32" + min_val = float("0.821013") + max_val = float("1.15844") + mean = float("0.971088") + std = float("0.0759417") + data = None + + +class Program_weight_tensor_parameter_132: + name = "parameter_132" + shape = [96] + dtype = "float32" + min_val = float("0.004441") + max_val = float("0.0223555") + mean = float("0.00954333") + std = float("0.00308552") + data = None + + +class Program_weight_tensor_parameter_133: + name = "parameter_133" + shape = [96] + dtype = "float32" + min_val = float("-0.103749") + max_val = float("0.0566759") + mean = float("-0.0102728") + std = float("0.0291581") + data = None + + +class Program_weight_tensor_parameter_134: + name = "parameter_134" + shape = [96, 448, 1, 1] + dtype = "float32" + min_val = float("-0.236464") + max_val = float("0.105988") + mean = float("-0.00028822") + std = float("0.00871548") + data = None + + +class Program_weight_tensor_parameter_135: + name = "parameter_135" + shape = [192] + dtype = "float32" + min_val = float("-0.420244") + max_val = float("-0.00582075") + mean = float("-0.182415") + std = float("0.0745906") + data = None + + +class Program_weight_tensor_parameter_136: + name = "parameter_136" + shape = [192] + dtype = "float32" + min_val = float("0.679742") + max_val = float("1.20118") + mean = float("0.870665") + std = float("0.0787823") + data = None + + +class Program_weight_tensor_parameter_137: + name = "parameter_137" + shape = [192] + dtype = "float32" + min_val = float("0.00887022") + max_val = float("0.0507228") + mean = float("0.0143523") + std = float("0.00477251") + data = None + + +class Program_weight_tensor_parameter_138: + name = "parameter_138" + shape = [192] + dtype = "float32" + min_val = float("-0.0859951") + max_val = float("0.0545426") + mean = float("-0.031055") + std = float("0.0236366") + data = None + + +class Program_weight_tensor_parameter_139: + name = "parameter_139" + shape = [192, 384, 1, 1] + dtype = "float32" + min_val = float("-0.0883702") + max_val = float("0.0663709") + mean = float("-0.000621833") + std = float("0.00803492") + data = None + + +class Program_weight_tensor_parameter_140: + name = "parameter_140" + shape = [384] + dtype = "float32" + min_val = float("-0.335926") + max_val = float("0.199578") + mean = float("-0.117025") + std = float("0.0609248") + data = None + + +class Program_weight_tensor_parameter_141: + name = "parameter_141" + shape = [384] + dtype = "float32" + min_val = float("0.863976") + max_val = float("1.36398") + mean = float("1.03143") + std = float("0.0679777") + data = None + + +class Program_weight_tensor_parameter_142: + name = "parameter_142" + shape = [384] + dtype = "float32" + min_val = float("0.00476312") + max_val = float("0.0571694") + mean = float("0.00991089") + std = float("0.00336396") + data = None + + +class Program_weight_tensor_parameter_143: + name = "parameter_143" + shape = [384] + dtype = "float32" + min_val = float("-0.0984785") + max_val = float("0.0793809") + mean = float("-0.0314827") + std = float("0.0273287") + data = None + + +class Program_weight_tensor_parameter_144: + name = "parameter_144" + shape = [384, 384, 1, 1] + dtype = "float32" + min_val = float("-0.0983331") + max_val = float("0.10911") + mean = float("-0.000522936") + std = float("0.00708429") + data = None + + +class Program_weight_tensor_parameter_145: + name = "parameter_145" + shape = [192] + dtype = "float32" + min_val = float("-0.328356") + max_val = float("0.162274") + mean = float("-0.0886748") + std = float("0.0800476") + data = None + + +class Program_weight_tensor_parameter_146: + name = "parameter_146" + shape = [192] + dtype = "float32" + min_val = float("0.852012") + max_val = float("1.4349") + mean = float("1.03935") + std = float("0.110213") + data = None + + +class Program_weight_tensor_parameter_147: + name = "parameter_147" + shape = [192] + dtype = "float32" + min_val = float("0.0255484") + max_val = float("0.153558") + mean = float("0.0683483") + std = float("0.0237636") + data = None + + +class Program_weight_tensor_parameter_148: + name = "parameter_148" + shape = [192] + dtype = "float32" + min_val = float("-0.970626") + max_val = float("0.878297") + mean = float("-0.0638026") + std = float("0.284438") + data = None + + +class Program_weight_tensor_parameter_149: + name = "parameter_149" + shape = [192, 768, 1, 1] + dtype = "float32" + min_val = float("-0.104303") + max_val = float("0.131967") + mean = float("-6.7284e-05") + std = float("0.00605651") + data = None + + +class Program_weight_tensor_parameter_150: + name = "parameter_150" + shape = [192] + dtype = "float32" + min_val = float("-0.0749801") + max_val = float("0.144465") + mean = float("0.0131904") + std = float("0.0377144") + data = None + + +class Program_weight_tensor_parameter_151: + name = "parameter_151" + shape = [192] + dtype = "float32" + min_val = float("0.813562") + max_val = float("0.998659") + mean = float("0.919921") + std = float("0.0313324") + data = None + + +class Program_weight_tensor_parameter_152: + name = "parameter_152" + shape = [192] + dtype = "float32" + min_val = float("0.00691915") + max_val = float("0.0320553") + mean = float("0.0192789") + std = float("0.00511946") + data = None + + +class Program_weight_tensor_parameter_153: + name = "parameter_153" + shape = [192] + dtype = "float32" + min_val = float("-0.0720374") + max_val = float("0.0433744") + mean = float("-0.0362539") + std = float("0.0278674") + data = None + + +class Program_weight_tensor_parameter_154: + name = "parameter_154" + shape = [192, 192, 1, 1] + dtype = "float32" + min_val = float("-0.0294137") + max_val = float("0.0294097") + mean = float("-0.000990693") + std = float("0.00447555") + data = None + + +class Program_weight_tensor_parameter_155: + name = "parameter_155" + shape = [192] + dtype = "float32" + min_val = float("-0.0749801") + max_val = float("0.144465") + mean = float("0.0131904") + std = float("0.0377144") + data = None + + +class Program_weight_tensor_parameter_156: + name = "parameter_156" + shape = [192] + dtype = "float32" + min_val = float("0.9") + max_val = float("1.29627") + mean = float("1.06358") + std = float("0.075645") + data = None + + +class Program_weight_tensor_parameter_157: + name = "parameter_157" + shape = [192] + dtype = "float32" + min_val = float("0.0172514") + max_val = float("0.0518902") + mean = float("0.0276756") + std = float("0.00661815") + data = None + + +class Program_weight_tensor_parameter_158: + name = "parameter_158" + shape = [192] + dtype = "float32" + min_val = float("-0.228306") + max_val = float("0.0666773") + mean = float("-0.121332") + std = float("0.0426081") + data = None + + +class Program_weight_tensor_parameter_159: + name = "parameter_159" + shape = [192, 192, 3, 3] + dtype = "float32" + min_val = float("-0.0561363") + max_val = float("0.0510965") + mean = float("-0.000425446") + std = float("0.00389525") + data = None + + +class Program_weight_tensor_parameter_160: + name = "parameter_160" + shape = [192] + dtype = "float32" + min_val = float("-0.270955") + max_val = float("0.15419") + mean = float("-0.0764769") + std = float("0.0616048") + data = None + + +class Program_weight_tensor_parameter_161: + name = "parameter_161" + shape = [192] + dtype = "float32" + min_val = float("0.866044") + max_val = float("1.41773") + mean = float("1.06959") + std = float("0.101828") + data = None + + +class Program_weight_tensor_parameter_162: + name = "parameter_162" + shape = [192] + dtype = "float32" + min_val = float("0.0203242") + max_val = float("0.0576355") + mean = float("0.0318878") + std = float("0.00740082") + data = None + + +class Program_weight_tensor_parameter_163: + name = "parameter_163" + shape = [192] + dtype = "float32" + min_val = float("-0.39533") + max_val = float("0.0995992") + mean = float("-0.0946007") + std = float("0.0588711") + data = None + + +class Program_weight_tensor_parameter_164: + name = "parameter_164" + shape = [192, 192, 3, 3] + dtype = "float32" + min_val = float("-0.0922815") + max_val = float("0.0488793") + mean = float("-0.000364063") + std = float("0.00415924") + data = None + + +class Program_weight_tensor_parameter_165: + name = "parameter_165" + shape = [192] + dtype = "float32" + min_val = float("-0.295338") + max_val = float("0.243329") + mean = float("-0.0885643") + std = float("0.0825072") + data = None + + +class Program_weight_tensor_parameter_166: + name = "parameter_166" + shape = [192] + dtype = "float32" + min_val = float("0.914548") + max_val = float("1.33085") + mean = float("1.05785") + std = float("0.064384") + data = None + + +class Program_weight_tensor_parameter_167: + name = "parameter_167" + shape = [192] + dtype = "float32" + min_val = float("0.0032043") + max_val = float("0.0103348") + mean = float("0.00472581") + std = float("0.00112905") + data = None + + +class Program_weight_tensor_parameter_168: + name = "parameter_168" + shape = [192] + dtype = "float32" + min_val = float("-0.110933") + max_val = float("0.115115") + mean = float("0.0291353") + std = float("0.0248894") + data = None + + +class Program_weight_tensor_parameter_169: + name = "parameter_169" + shape = [192, 512, 1, 1] + dtype = "float32" + min_val = float("-0.105731") + max_val = float("0.0883432") + mean = float("-0.000733147") + std = float("0.00692353") + data = None + + +class Program_weight_tensor_parameter_170: + name = "parameter_170" + shape = [192] + dtype = "float32" + min_val = float("-0.120666") + max_val = float("0.0362845") + mean = float("-0.0202549") + std = float("0.0232495") + data = None + + +class Program_weight_tensor_parameter_171: + name = "parameter_171" + shape = [192] + dtype = "float32" + min_val = float("0.877685") + max_val = float("1.17308") + mean = float("0.966915") + std = float("0.0422082") + data = None + + +class Program_weight_tensor_parameter_172: + name = "parameter_172" + shape = [192] + dtype = "float32" + min_val = float("0.00222831") + max_val = float("0.00648351") + mean = float("0.00319464") + std = float("0.000552085") + data = None + + +class Program_weight_tensor_parameter_173: + name = "parameter_173" + shape = [192] + dtype = "float32" + min_val = float("-0.031455") + max_val = float("0.0619138") + mean = float("0.0241332") + std = float("0.0165234") + data = None + + +class Program_weight_tensor_parameter_174: + name = "parameter_174" + shape = [192, 512, 1, 1] + dtype = "float32" + min_val = float("-0.0308802") + max_val = float("0.0442232") + mean = float("-0.000546836") + std = float("0.00571441") + data = None + + +class Program_weight_tensor_parameter_175: + name = "parameter_175" + shape = [512] + dtype = "float32" + min_val = float("-4.76781") + max_val = float("-0.174428") + mean = float("-2.2669") + std = float("0.762285") + data = None + + +class Program_weight_tensor_parameter_176: + name = "parameter_176" + shape = [512] + dtype = "float32" + min_val = float("2.03338") + max_val = float("5.28168") + mean = float("3.71825") + std = float("0.499998") + data = None + + +class Program_weight_tensor_parameter_177: + name = "parameter_177" + shape = [512] + dtype = "float32" + min_val = float("0.010459") + max_val = float("0.085339") + mean = float("0.0334804") + std = float("0.0110447") + data = None + + +class Program_weight_tensor_parameter_178: + name = "parameter_178" + shape = [512] + dtype = "float32" + min_val = float("-0.160667") + max_val = float("0.0535426") + mean = float("-0.0744458") + std = float("0.0270276") + data = None + + +class Program_weight_tensor_parameter_179: + name = "parameter_179" + shape = [512, 384, 1, 1] + dtype = "float32" + min_val = float("-0.101408") + max_val = float("0.109192") + mean = float("-0.000803899") + std = float("0.00709943") + data = None + + +class Program_weight_tensor_parameter_180: + name = "parameter_180" + shape = [384] + dtype = "float32" + min_val = float("-0.027407") + max_val = float("0.014353") + mean = float("-0.0027137") + std = float("0.00518115") + data = None + + +class Program_weight_tensor_parameter_181: + name = "parameter_181" + shape = [384, 384, 1, 1] + dtype = "float32" + min_val = float("-0.22661") + max_val = float("0.220699") + mean = float("-0.000792589") + std = float("0.00463333") + data = None + + +class Program_weight_tensor_parameter_182: + name = "parameter_182" + shape = [192] + dtype = "float32" + min_val = float("-2.40536") + max_val = float("2.26844") + mean = float("-0.267433") + std = float("0.511374") + data = None + + +class Program_weight_tensor_parameter_183: + name = "parameter_183" + shape = [192] + dtype = "float32" + min_val = float("0.146531") + max_val = float("2.07897") + mean = float("0.463298") + std = float("0.320665") + data = None + + +class Program_weight_tensor_parameter_184: + name = "parameter_184" + shape = [192] + dtype = "float32" + min_val = float("0.000219101") + max_val = float("0.0114658") + mean = float("0.000852367") + std = float("0.000939902") + data = None + + +class Program_weight_tensor_parameter_185: + name = "parameter_185" + shape = [192] + dtype = "float32" + min_val = float("-0.0361954") + max_val = float("0.0547528") + mean = float("0.0063812") + std = float("0.0152427") + data = None + + +class Program_weight_tensor_parameter_186: + name = "parameter_186" + shape = [192, 192, 1, 1] + dtype = "float32" + min_val = float("-0.0363316") + max_val = float("0.0572526") + mean = float("-0.000266525") + std = float("0.00420768") + data = None + + +class Program_weight_tensor_parameter_187: + name = "parameter_187" + shape = [192] + dtype = "float32" + min_val = float("-2.40536") + max_val = float("2.26844") + mean = float("-0.267433") + std = float("0.511374") + data = None + + +class Program_weight_tensor_parameter_188: + name = "parameter_188" + shape = [192] + dtype = "float32" + min_val = float("0.645518") + max_val = float("2.8122") + mean = float("1.33468") + std = float("0.427552") + data = None + + +class Program_weight_tensor_parameter_189: + name = "parameter_189" + shape = [192] + dtype = "float32" + min_val = float("0.00250137") + max_val = float("0.0602622") + mean = float("0.00665862") + std = float("0.00492451") + data = None + + +class Program_weight_tensor_parameter_190: + name = "parameter_190" + shape = [192] + dtype = "float32" + min_val = float("-0.207353") + max_val = float("0.172126") + mean = float("0.0144385") + std = float("0.0435308") + data = None + + +class Program_weight_tensor_parameter_191: + name = "parameter_191" + shape = [192, 192, 3, 3] + dtype = "float32" + min_val = float("-0.0742124") + max_val = float("0.0497618") + mean = float("-0.000136963") + std = float("0.00409281") + data = None + + +class Program_weight_tensor_parameter_192: + name = "parameter_192" + shape = [192] + dtype = "float32" + min_val = float("-3.28286") + max_val = float("1.0769") + mean = float("-1.32291") + std = float("0.629466") + data = None + + +class Program_weight_tensor_parameter_193: + name = "parameter_193" + shape = [192] + dtype = "float32" + min_val = float("0.512509") + max_val = float("1.92399") + mean = float("1.15476") + std = float("0.223688") + data = None + + +class Program_weight_tensor_parameter_194: + name = "parameter_194" + shape = [192] + dtype = "float32" + min_val = float("0.01008") + max_val = float("0.201711") + mean = float("0.0252622") + std = float("0.0162344") + data = None + + +class Program_weight_tensor_parameter_195: + name = "parameter_195" + shape = [192] + dtype = "float32" + min_val = float("-0.855284") + max_val = float("0.11159") + mean = float("-0.102575") + std = float("0.0938577") + data = None + + +class Program_weight_tensor_parameter_196: + name = "parameter_196" + shape = [192, 192, 3, 3] + dtype = "float32" + min_val = float("-0.0638156") + max_val = float("0.0783146") + mean = float("-0.000334785") + std = float("0.00464713") + data = None + + +class Program_weight_tensor_parameter_197: + name = "parameter_197" + shape = [192] + dtype = "float32" + min_val = float("-3.81222") + max_val = float("3.63205") + mean = float("-0.648291") + std = float("0.906652") + data = None + + +class Program_weight_tensor_parameter_198: + name = "parameter_198" + shape = [192] + dtype = "float32" + min_val = float("0.699217") + max_val = float("4.2146") + mean = float("1.51918") + std = float("0.440637") + data = None + + +class Program_weight_tensor_parameter_199: + name = "parameter_199" + shape = [192] + dtype = "float32" + min_val = float("0.00405284") + max_val = float("0.0366049") + mean = float("0.00960669") + std = float("0.0045059") + data = None + + +class Program_weight_tensor_parameter_200: + name = "parameter_200" + shape = [192] + dtype = "float32" + min_val = float("-0.115638") + max_val = float("0.100216") + mean = float("0.0306937") + std = float("0.031391") + data = None + + +class Program_weight_tensor_parameter_201: + name = "parameter_201" + shape = [192, 384, 1, 1] + dtype = "float32" + min_val = float("-0.101629") + max_val = float("0.0816241") + mean = float("-0.00122409") + std = float("0.00805339") + data = None + + +class Program_weight_tensor_parameter_202: + name = "parameter_202" + shape = [192] + dtype = "float32" + min_val = float("-2.92399") + max_val = float("0.898631") + mean = float("-0.416807") + std = float("0.668269") + data = None + + +class Program_weight_tensor_parameter_203: + name = "parameter_203" + shape = [192] + dtype = "float32" + min_val = float("0.777794") + max_val = float("3.23336") + mean = float("1.44686") + std = float("0.409381") + data = None + + +class Program_weight_tensor_parameter_204: + name = "parameter_204" + shape = [192] + dtype = "float32" + min_val = float("0.00152121") + max_val = float("0.00625599") + mean = float("0.00273966") + std = float("0.000786619") + data = None + + +class Program_weight_tensor_parameter_205: + name = "parameter_205" + shape = [192] + dtype = "float32" + min_val = float("-0.0557254") + max_val = float("0.0535186") + mean = float("0.0115914") + std = float("0.0195176") + data = None + + +class Program_weight_tensor_parameter_206: + name = "parameter_206" + shape = [192, 384, 1, 1] + dtype = "float32" + min_val = float("-0.064095") + max_val = float("0.0977198") + mean = float("-0.000523671") + std = float("0.00652262") + data = None + + +class Program_weight_tensor_parameter_207: + name = "parameter_207" + shape = [384] + dtype = "float32" + min_val = float("-2.76185") + max_val = float("1.15618") + mean = float("-0.671709") + std = float("0.494309") + data = None + + +class Program_weight_tensor_parameter_208: + name = "parameter_208" + shape = [384] + dtype = "float32" + min_val = float("0.436993") + max_val = float("1.90976") + mean = float("0.85973") + std = float("0.234184") + data = None + + +class Program_weight_tensor_parameter_209: + name = "parameter_209" + shape = [384] + dtype = "float32" + min_val = float("0.00444153") + max_val = float("0.0382871") + mean = float("0.0104879") + std = float("0.003922") + data = None + + +class Program_weight_tensor_parameter_210: + name = "parameter_210" + shape = [384] + dtype = "float32" + min_val = float("-0.470345") + max_val = float("0.319146") + mean = float("0.0151895") + std = float("0.0709956") + data = None + + +class Program_weight_tensor_parameter_211: + name = "parameter_211" + shape = [384, 256, 3, 3] + dtype = "float32" + min_val = float("-0.067804") + max_val = float("0.0610524") + mean = float("-0.000133771") + std = float("0.00383323") + data = None + + +class Program_weight_tensor_parameter_212: + name = "parameter_212" + shape = [256] + dtype = "float32" + min_val = float("-2.80954") + max_val = float("1.39404") + mean = float("-0.938473") + std = float("0.61454") + data = None + + +class Program_weight_tensor_parameter_213: + name = "parameter_213" + shape = [256] + dtype = "float32" + min_val = float("0.389802") + max_val = float("1.70795") + mean = float("0.926893") + std = float("0.16692") + data = None + + +class Program_weight_tensor_parameter_214: + name = "parameter_214" + shape = [256] + dtype = "float32" + min_val = float("0.000375443") + max_val = float("0.00331703") + mean = float("0.00142137") + std = float("0.000415875") + data = None + + +class Program_weight_tensor_parameter_215: + name = "parameter_215" + shape = [256] + dtype = "float32" + min_val = float("-0.156638") + max_val = float("0.107008") + mean = float("-0.0411981") + std = float("0.0494957") + data = None + + +class Program_weight_tensor_parameter_216: + name = "parameter_216" + shape = [256, 192, 1, 1] + dtype = "float32" + min_val = float("-0.272875") + max_val = float("0.156785") + mean = float("-0.000978623") + std = float("0.0121502") + data = None + + +class Program_weight_tensor_parameter_217: + name = "parameter_217" + shape = [192] + dtype = "float32" + min_val = float("-0.0155376") + max_val = float("-9.29592e-05") + mean = float("-0.00674759") + std = float("0.00353871") + data = None + + +class Program_weight_tensor_parameter_218: + name = "parameter_218" + shape = [192, 192, 1, 1] + dtype = "float32" + min_val = float("-0.184883") + max_val = float("0.236496") + mean = float("-0.00500735") + std = float("0.0105781") + data = None + + +class Program_weight_tensor_parameter_219: + name = "parameter_219" + shape = [96] + dtype = "float32" + min_val = float("-2.29568") + max_val = float("0.839012") + mean = float("-0.0662728") + std = float("0.522822") + data = None + + +class Program_weight_tensor_parameter_220: + name = "parameter_220" + shape = [96] + dtype = "float32" + min_val = float("-0.11529") + max_val = float("2.24322") + mean = float("0.317766") + std = float("0.347186") + data = None + + +class Program_weight_tensor_parameter_221: + name = "parameter_221" + shape = [96] + dtype = "float32" + min_val = float("1.17632e-10") + max_val = float("0.00304022") + mean = float("0.000529268") + std = float("0.000402421") + data = None + + +class Program_weight_tensor_parameter_222: + name = "parameter_222" + shape = [96] + dtype = "float32" + min_val = float("-0.045674") + max_val = float("0.0600435") + mean = float("0.00390685") + std = float("0.0164981") + data = None + + +class Program_weight_tensor_parameter_223: + name = "parameter_223" + shape = [96, 96, 1, 1] + dtype = "float32" + min_val = float("-0.0465458") + max_val = float("0.0675376") + mean = float("-0.000292824") + std = float("0.00610372") + data = None + + +class Program_weight_tensor_parameter_224: + name = "parameter_224" + shape = [96] + dtype = "float32" + min_val = float("-2.29568") + max_val = float("0.839012") + mean = float("-0.0662728") + std = float("0.522822") + data = None + + +class Program_weight_tensor_parameter_225: + name = "parameter_225" + shape = [96] + dtype = "float32" + min_val = float("0.472161") + max_val = float("3.2493") + mean = float("1.27864") + std = float("0.621977") + data = None + + +class Program_weight_tensor_parameter_226: + name = "parameter_226" + shape = [96] + dtype = "float32" + min_val = float("0.00195915") + max_val = float("0.0247459") + mean = float("0.00893168") + std = float("0.00387999") + data = None + + +class Program_weight_tensor_parameter_227: + name = "parameter_227" + shape = [96] + dtype = "float32" + min_val = float("-0.14497") + max_val = float("0.137582") + mean = float("0.0165178") + std = float("0.0496303") + data = None + + +class Program_weight_tensor_parameter_228: + name = "parameter_228" + shape = [96, 96, 3, 3] + dtype = "float32" + min_val = float("-0.143351") + max_val = float("0.112777") + mean = float("-0.000237648") + std = float("0.00679816") + data = None + + +class Program_weight_tensor_parameter_229: + name = "parameter_229" + shape = [96] + dtype = "float32" + min_val = float("-2.80238") + max_val = float("1.42683") + mean = float("-1.0339") + std = float("0.701161") + data = None + + +class Program_weight_tensor_parameter_230: + name = "parameter_230" + shape = [96] + dtype = "float32" + min_val = float("0.367938") + max_val = float("1.99374") + mean = float("1.07012") + std = float("0.229802") + data = None + + +class Program_weight_tensor_parameter_231: + name = "parameter_231" + shape = [96] + dtype = "float32" + min_val = float("0.0152909") + max_val = float("0.104597") + mean = float("0.0380174") + std = float("0.0127577") + data = None + + +class Program_weight_tensor_parameter_232: + name = "parameter_232" + shape = [96] + dtype = "float32" + min_val = float("-1.68343") + max_val = float("0.743617") + mean = float("-0.156479") + std = float("0.263721") + data = None + + +class Program_weight_tensor_parameter_233: + name = "parameter_233" + shape = [96, 96, 3, 3] + dtype = "float32" + min_val = float("-0.0522225") + max_val = float("0.0716628") + mean = float("-0.000481845") + std = float("0.00750564") + data = None + + +class Program_weight_tensor_parameter_234: + name = "parameter_234" + shape = [96] + dtype = "float32" + min_val = float("-2.54128") + max_val = float("0.876212") + mean = float("-0.005007") + std = float("0.506933") + data = None + + +class Program_weight_tensor_parameter_235: + name = "parameter_235" + shape = [96] + dtype = "float32" + min_val = float("-0.0971537") + max_val = float("3.23994") + mean = float("0.302983") + std = float("0.397795") + data = None + + +class Program_weight_tensor_parameter_236: + name = "parameter_236" + shape = [96] + dtype = "float32" + min_val = float("0.000126891") + max_val = float("0.0146926") + mean = float("0.00151932") + std = float("0.00186117") + data = None + + +class Program_weight_tensor_parameter_237: + name = "parameter_237" + shape = [96] + dtype = "float32" + min_val = float("-0.0587587") + max_val = float("0.0734595") + mean = float("0.0120365") + std = float("0.0238263") + data = None + + +class Program_weight_tensor_parameter_238: + name = "parameter_238" + shape = [96, 96, 1, 1] + dtype = "float32" + min_val = float("-0.119008") + max_val = float("0.0875981") + mean = float("-0.00096425") + std = float("0.00805062") + data = None + + +class Program_weight_tensor_parameter_239: + name = "parameter_239" + shape = [96] + dtype = "float32" + min_val = float("-2.54128") + max_val = float("0.876212") + mean = float("-0.005007") + std = float("0.506933") + data = None + + +class Program_weight_tensor_parameter_240: + name = "parameter_240" + shape = [96] + dtype = "float32" + min_val = float("0.393347") + max_val = float("2.99076") + mean = float("0.921351") + std = float("0.400768") + data = None + + +class Program_weight_tensor_parameter_241: + name = "parameter_241" + shape = [96] + dtype = "float32" + min_val = float("0.00618077") + max_val = float("0.039653") + mean = float("0.0183527") + std = float("0.00708549") + data = None + + +class Program_weight_tensor_parameter_242: + name = "parameter_242" + shape = [96] + dtype = "float32" + min_val = float("-0.142604") + max_val = float("0.161747") + mean = float("0.0294198") + std = float("0.0577067") + data = None + + +class Program_weight_tensor_parameter_243: + name = "parameter_243" + shape = [96, 96, 3, 3] + dtype = "float32" + min_val = float("-0.0774473") + max_val = float("0.052628") + mean = float("-0.000440045") + std = float("0.00685381") + data = None + + +class Program_weight_tensor_parameter_244: + name = "parameter_244" + shape = [96] + dtype = "float32" + min_val = float("-2.062") + max_val = float("1.50373") + mean = float("-0.855895") + std = float("0.652403") + data = None + + +class Program_weight_tensor_parameter_245: + name = "parameter_245" + shape = [96] + dtype = "float32" + min_val = float("0.441068") + max_val = float("1.99468") + mean = float("1.08919") + std = float("0.247491") + data = None + + +class Program_weight_tensor_parameter_246: + name = "parameter_246" + shape = [96] + dtype = "float32" + min_val = float("0.0050545") + max_val = float("0.0379817") + mean = float("0.0143521") + std = float("0.00529401") + data = None + + +class Program_weight_tensor_parameter_247: + name = "parameter_247" + shape = [96] + dtype = "float32" + min_val = float("-0.613634") + max_val = float("0.127403") + mean = float("-0.0633655") + std = float("0.100532") + data = None + + +class Program_weight_tensor_parameter_248: + name = "parameter_248" + shape = [96, 96, 3, 3] + dtype = "float32" + min_val = float("-0.135192") + max_val = float("0.133051") + mean = float("-0.000381622") + std = float("0.00797293") + data = None + + +class Program_weight_tensor_parameter_249: + name = "parameter_249" + shape = [96] + dtype = "float32" + min_val = float("-1.49598") + max_val = float("1.85759") + mean = float("0.0862224") + std = float("0.863993") + data = None + + +class Program_weight_tensor_parameter_250: + name = "parameter_250" + shape = [96] + dtype = "float32" + min_val = float("0.273964") + max_val = float("1.34454") + mean = float("0.689485") + std = float("0.270949") + data = None + + +class Program_weight_tensor_parameter_251: + name = "parameter_251" + shape = [96] + dtype = "float32" + min_val = float("0.0068113") + max_val = float("0.052969") + mean = float("0.0206432") + std = float("0.0104895") + data = None + + +class Program_weight_tensor_parameter_252: + name = "parameter_252" + shape = [96] + dtype = "float32" + min_val = float("-0.380598") + max_val = float("0.286382") + mean = float("-0.0564395") + std = float("0.114373") + data = None + + +class Program_weight_tensor_parameter_253: + name = "parameter_253" + shape = [96, 192, 1, 1] + dtype = "float32" + min_val = float("-0.148644") + max_val = float("0.140053") + mean = float("-0.00119548") + std = float("0.0127651") + data = None + + +class Program_weight_tensor_parameter_254: + name = "parameter_254" + shape = [96] + dtype = "float32" + min_val = float("-2.53532") + max_val = float("1.6563") + mean = float("0.394338") + std = float("0.699666") + data = None + + +class Program_weight_tensor_parameter_255: + name = "parameter_255" + shape = [96] + dtype = "float32" + min_val = float("0.394136") + max_val = float("4.68197") + mean = float("1.36722") + std = float("0.958113") + data = None + + +class Program_weight_tensor_parameter_256: + name = "parameter_256" + shape = [96] + dtype = "float32" + min_val = float("0.00403087") + max_val = float("0.0492702") + mean = float("0.0143867") + std = float("0.00802537") + data = None + + +class Program_weight_tensor_parameter_257: + name = "parameter_257" + shape = [96] + dtype = "float32" + min_val = float("-0.190402") + max_val = float("0.230031") + mean = float("-0.00194779") + std = float("0.104444") + data = None + + +class Program_weight_tensor_parameter_258: + name = "parameter_258" + shape = [96, 192, 1, 1] + dtype = "float32" + min_val = float("-0.0894104") + max_val = float("0.126069") + mean = float("-0.000491") + std = float("0.0127678") + data = None + + +class Program_weight_tensor_parameter_259: + name = "parameter_259" + shape = [192] + dtype = "float32" + min_val = float("-4.57943") + max_val = float("2.01073") + mean = float("-0.0578738") + std = float("0.875707") + data = None + + +class Program_weight_tensor_parameter_260: + name = "parameter_260" + shape = [192] + dtype = "float32" + min_val = float("0.532264") + max_val = float("4.33383") + mean = float("1.03984") + std = float("0.431407") + data = None + + +class Program_weight_tensor_parameter_261: + name = "parameter_261" + shape = [192] + dtype = "float32" + min_val = float("0.00481604") + max_val = float("0.062718") + mean = float("0.0147859") + std = float("0.00921379") + data = None + + +class Program_weight_tensor_parameter_262: + name = "parameter_262" + shape = [192] + dtype = "float32" + min_val = float("-0.274056") + max_val = float("0.256445") + mean = float("0.00184935") + std = float("0.0928256") + data = None + + +class Program_weight_tensor_parameter_263: + name = "parameter_263" + shape = [192, 128, 3, 3] + dtype = "float32" + min_val = float("-0.0984626") + max_val = float("0.111095") + mean = float("-0.00020139") + std = float("0.00662037") + data = None + + +class Program_weight_tensor_parameter_264: + name = "parameter_264" + shape = [128] + dtype = "float32" + min_val = float("-2.15976") + max_val = float("1.44334") + mean = float("-0.608759") + std = float("0.628359") + data = None + + +class Program_weight_tensor_parameter_265: + name = "parameter_265" + shape = [128] + dtype = "float32" + min_val = float("0.31086") + max_val = float("2.21325") + mean = float("0.789284") + std = float("0.218225") + data = None + + +class Program_weight_tensor_parameter_266: + name = "parameter_266" + shape = [128] + dtype = "float32" + min_val = float("0.000662309") + max_val = float("0.00794347") + mean = float("0.00245652") + std = float("0.00121097") + data = None + + +class Program_weight_tensor_parameter_267: + name = "parameter_267" + shape = [128] + dtype = "float32" + min_val = float("-0.276074") + max_val = float("0.308749") + mean = float("-0.0515459") + std = float("0.092419") + data = None + + +class Program_weight_tensor_parameter_268: + name = "parameter_268" + shape = [128, 96, 1, 1] + dtype = "float32" + min_val = float("-0.175655") + max_val = float("0.188679") + mean = float("-0.0012914") + std = float("0.020039") + data = None + + +class Program_weight_tensor_parameter_269: + name = "parameter_269" + shape = [96] + dtype = "float32" + min_val = float("-0.0202559") + max_val = float("0.00207627") + mean = float("-0.0082197") + std = float("0.00576011") + data = None + + +class Program_weight_tensor_parameter_270: + name = "parameter_270" + shape = [96, 96, 1, 1] + dtype = "float32" + min_val = float("-0.286041") + max_val = float("0.244927") + mean = float("-0.00624295") + std = float("0.0174508") + data = None + + +class Program_weight_tensor_parameter_271: + name = "parameter_271" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_272: + name = "parameter_272" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_273: + name = "parameter_273" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_274: + name = "parameter_274" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_275: + name = "parameter_275" + shape = [48, 48, 1, 1] + dtype = "float32" + min_val = float("-0.0596608") + max_val = float("0.0809373") + mean = float("-0.000900288") + std = float("0.0116278") + data = None + + +class Program_weight_tensor_parameter_276: + name = "parameter_276" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_277: + name = "parameter_277" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_278: + name = "parameter_278" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_279: + name = "parameter_279" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_280: + name = "parameter_280" + shape = [48, 48, 3, 3] + dtype = "float32" + min_val = float("-0.0891862") + max_val = float("0.0773858") + mean = float("-0.00039301") + std = float("0.0111837") + data = None + + +class Program_weight_tensor_parameter_281: + name = "parameter_281" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_282: + name = "parameter_282" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_283: + name = "parameter_283" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_284: + name = "parameter_284" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_285: + name = "parameter_285" + shape = [48, 48, 3, 3] + dtype = "float32" + min_val = float("-0.0785488") + max_val = float("0.100261") + mean = float("-0.00068996") + std = float("0.0120552") + data = None + + +class Program_weight_tensor_parameter_286: + name = "parameter_286" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_287: + name = "parameter_287" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_288: + name = "parameter_288" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_289: + name = "parameter_289" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_290: + name = "parameter_290" + shape = [48, 48, 1, 1] + dtype = "float32" + min_val = float("-0.0640984") + max_val = float("0.0758217") + mean = float("-0.00229075") + std = float("0.0140689") + data = None + + +class Program_weight_tensor_parameter_291: + name = "parameter_291" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_292: + name = "parameter_292" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_293: + name = "parameter_293" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_294: + name = "parameter_294" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_295: + name = "parameter_295" + shape = [48, 48, 3, 3] + dtype = "float32" + min_val = float("-0.0652072") + max_val = float("0.0530709") + mean = float("-0.000737479") + std = float("0.0108797") + data = None + + +class Program_weight_tensor_parameter_296: + name = "parameter_296" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_297: + name = "parameter_297" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_298: + name = "parameter_298" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_299: + name = "parameter_299" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_300: + name = "parameter_300" + shape = [48, 48, 3, 3] + dtype = "float32" + min_val = float("-0.123766") + max_val = float("0.0765891") + mean = float("-0.000650629") + std = float("0.0128752") + data = None + + +class Program_weight_tensor_parameter_301: + name = "parameter_301" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_302: + name = "parameter_302" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_303: + name = "parameter_303" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_304: + name = "parameter_304" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_305: + name = "parameter_305" + shape = [48, 96, 1, 1] + dtype = "float32" + min_val = float("-0.128712") + max_val = float("0.12443") + mean = float("-0.0019459") + std = float("0.0199632") + data = None + + +class Program_weight_tensor_parameter_306: + name = "parameter_306" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_307: + name = "parameter_307" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_308: + name = "parameter_308" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_309: + name = "parameter_309" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_310: + name = "parameter_310" + shape = [48, 96, 1, 1] + dtype = "float32" + min_val = float("-0.135295") + max_val = float("0.181086") + mean = float("0.00033488") + std = float("0.0208889") + data = None + + +class Program_weight_tensor_parameter_311: + name = "parameter_311" + shape = [96] + dtype = "float32" + min_val = float("-3.45574") + max_val = float("3.81341") + mean = float("0.315615") + std = float("1.19869") + data = None + + +class Program_weight_tensor_parameter_312: + name = "parameter_312" + shape = [96] + dtype = "float32" + min_val = float("0.533118") + max_val = float("5.50224") + mean = float("1.05142") + std = float("0.546053") + data = None + + +class Program_weight_tensor_parameter_313: + name = "parameter_313" + shape = [96] + dtype = "float32" + min_val = float("0.0102217") + max_val = float("0.144296") + mean = float("0.0269266") + std = float("0.0203922") + data = None + + +class Program_weight_tensor_parameter_314: + name = "parameter_314" + shape = [96] + dtype = "float32" + min_val = float("-0.590422") + max_val = float("0.413305") + mean = float("-0.029958") + std = float("0.143642") + data = None + + +class Program_weight_tensor_parameter_315: + name = "parameter_315" + shape = [96, 64, 3, 3] + dtype = "float32" + min_val = float("-0.101228") + max_val = float("0.0983101") + mean = float("-0.00027473") + std = float("0.010584") + data = None + + +class Program_weight_tensor_parameter_316: + name = "parameter_316" + shape = [64] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_317: + name = "parameter_317" + shape = [64] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_318: + name = "parameter_318" + shape = [64] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_319: + name = "parameter_319" + shape = [64] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_320: + name = "parameter_320" + shape = [64, 48, 1, 1] + dtype = "float32" + min_val = float("-0.180249") + max_val = float("0.174075") + mean = float("-0.00103405") + std = float("0.0302083") + data = None + + +class Program_weight_tensor_parameter_321: + name = "parameter_321" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_322: + name = "parameter_322" + shape = [48, 48, 1, 1] + dtype = "float32" + min_val = float("-0.166837") + max_val = float("0.132668") + mean = float("-0.0166174") + std = float("0.0268842") + data = None + + +class Program_weight_tensor_parameter_323: + name = "parameter_323" + shape = [24] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_324: + name = "parameter_324" + shape = [24] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_325: + name = "parameter_325" + shape = [24] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_326: + name = "parameter_326" + shape = [24] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_327: + name = "parameter_327" + shape = [24, 24, 1, 1] + dtype = "float32" + min_val = float("-0.0974583") + max_val = float("0.0993063") + mean = float("0.00100131") + std = float("0.0257208") + data = None + + +class Program_weight_tensor_parameter_328: + name = "parameter_328" + shape = [24] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_329: + name = "parameter_329" + shape = [24] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_330: + name = "parameter_330" + shape = [24] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_331: + name = "parameter_331" + shape = [24] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_332: + name = "parameter_332" + shape = [24, 24, 3, 3] + dtype = "float32" + min_val = float("-0.131819") + max_val = float("0.0865634") + mean = float("-0.000750737") + std = float("0.0185028") + data = None + + +class Program_weight_tensor_parameter_333: + name = "parameter_333" + shape = [24] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_334: + name = "parameter_334" + shape = [24] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_335: + name = "parameter_335" + shape = [24] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_336: + name = "parameter_336" + shape = [24] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_337: + name = "parameter_337" + shape = [24, 24, 3, 3] + dtype = "float32" + min_val = float("-0.158151") + max_val = float("0.154311") + mean = float("-0.000380242") + std = float("0.0212492") + data = None + + +class Program_weight_tensor_parameter_338: + name = "parameter_338" + shape = [24] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_339: + name = "parameter_339" + shape = [24] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_340: + name = "parameter_340" + shape = [24] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_341: + name = "parameter_341" + shape = [24] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_342: + name = "parameter_342" + shape = [24, 48, 1, 1] + dtype = "float32" + min_val = float("-0.144738") + max_val = float("0.126205") + mean = float("-0.0028876") + std = float("0.0320284") + data = None + + +class Program_weight_tensor_parameter_343: + name = "parameter_343" + shape = [24] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_344: + name = "parameter_344" + shape = [24] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_345: + name = "parameter_345" + shape = [24] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_346: + name = "parameter_346" + shape = [24] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_347: + name = "parameter_347" + shape = [24, 48, 1, 1] + dtype = "float32" + min_val = float("-0.158658") + max_val = float("0.143071") + mean = float("-0.000643447") + std = float("0.0340868") + data = None + + +class Program_weight_tensor_parameter_348: + name = "parameter_348" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_349: + name = "parameter_349" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_350: + name = "parameter_350" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_351: + name = "parameter_351" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_352: + name = "parameter_352" + shape = [48, 32, 3, 3] + dtype = "float32" + min_val = float("-0.13255") + max_val = float("0.117655") + mean = float("7.60057e-06") + std = float("0.0177458") + data = None + + +class Program_weight_tensor_parameter_353: + name = "parameter_353" + shape = [32] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_354: + name = "parameter_354" + shape = [32] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_355: + name = "parameter_355" + shape = [32] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_356: + name = "parameter_356" + shape = [32] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_357: + name = "parameter_357" + shape = [32, 16, 3, 3] + dtype = "float32" + min_val = float("-0.232932") + max_val = float("0.250176") + mean = float("-0.000217418") + std = float("0.0310294") + data = None + + +class Program_weight_tensor_parameter_358: + name = "parameter_358" + shape = [16] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_359: + name = "parameter_359" + shape = [16] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_360: + name = "parameter_360" + shape = [16] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_361: + name = "parameter_361" + shape = [16] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_362: + name = "parameter_362" + shape = [16, 16, 3, 3] + dtype = "float32" + min_val = float("-0.248222") + max_val = float("0.281216") + mean = float("0.000470158") + std = float("0.0424696") + data = None + + +class Program_weight_tensor_parameter_363: + name = "parameter_363" + shape = [16] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_364: + name = "parameter_364" + shape = [16] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_365: + name = "parameter_365" + shape = [16] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_366: + name = "parameter_366" + shape = [16] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_367: + name = "parameter_367" + shape = [16, 3, 3, 3] + dtype = "float32" + min_val = float("-0.181947") + max_val = float("0.257835") + mean = float("-0.0013955") + std = float("0.0567986") + data = None diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_3/graph_hash.txt b/paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_3/graph_hash.txt new file mode 100644 index 000000000..a90272c96 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_3/graph_hash.txt @@ -0,0 +1 @@ +32cd2923bc0a61a7c5f1bc48378db5c4de25399c2f9388132502f296c7a71760 \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_3/graph_net.json b/paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_3/graph_net.json new file mode 100644 index 000000000..469954d4b --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_3/graph_net.json @@ -0,0 +1,6 @@ +{ + "framework": "paddle", + "model_name": "PP-YOLOE_plus-S", + "num_devices_required": 1, + "num_nodes_required": 1 +} \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_3/input_meta.py b/paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_3/input_meta.py new file mode 100644 index 000000000..84a3911fd --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_3/input_meta.py @@ -0,0 +1,73 @@ +class Program_weight_tensor_data_0: + name = "data_0" + shape = [] + dtype = "int64" + data = [17] + + +class Program_weight_tensor_data_1: + name = "data_1" + shape = [] + dtype = "int64" + data = [17] + + +class Program_weight_tensor_data_2: + name = "data_2" + shape = [] + dtype = "int64" + data = [34] + + +class Program_weight_tensor_data_3: + name = "data_3" + shape = [] + dtype = "int64" + data = [34] + + +class Program_weight_tensor_data_4: + name = "data_4" + shape = [] + dtype = "int64" + data = [68] + + +class Program_weight_tensor_data_5: + name = "data_5" + shape = [] + dtype = "int64" + data = [68] + + +class Program_weight_tensor_data_6: + name = "data_6" + shape = [8, 384, 17, 17] + dtype = "float32" + min_val = float("-0.278465") + max_val = float("8.64449") + mean = float("0.355814") + std = float("0.702778") + data = None + + +class Program_weight_tensor_data_7: + name = "data_7" + shape = [8, 192, 34, 34] + dtype = "float32" + min_val = float("-0.278465") + max_val = float("11.2424") + mean = float("0.492179") + std = float("0.772278") + data = None + + +class Program_weight_tensor_data_8: + name = "data_8" + shape = [8, 96, 68, 68] + dtype = "float32" + min_val = float("-0.278465") + max_val = float("13.364") + mean = float("0.661066") + std = float("0.821531") + data = None diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_3/model.py b/paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_3/model.py new file mode 100644 index 000000000..f2bd878fb --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_3/model.py @@ -0,0 +1,1144 @@ +import paddle + + +class GraphModule(paddle.nn.Layer): + def __init__(self): + super().__init__() + + def forward( + self, + parameter_0, + parameter_1, + parameter_2, + parameter_3, + parameter_4, + parameter_5, + parameter_6, + parameter_7, + parameter_8, + parameter_9, + parameter_10, + parameter_11, + parameter_12, + parameter_13, + parameter_14, + parameter_15, + parameter_16, + parameter_17, + parameter_18, + parameter_19, + parameter_20, + parameter_21, + parameter_22, + parameter_23, + parameter_24, + parameter_25, + parameter_26, + parameter_27, + parameter_28, + parameter_29, + parameter_30, + parameter_31, + parameter_32, + parameter_33, + parameter_34, + parameter_35, + parameter_36, + parameter_37, + parameter_38, + parameter_39, + parameter_40, + parameter_41, + parameter_42, + parameter_43, + parameter_44, + parameter_45, + parameter_46, + parameter_47, + parameter_48, + parameter_49, + parameter_50, + parameter_51, + parameter_52, + parameter_53, + data_0, + data_1, + data_2, + data_3, + data_4, + data_5, + data_6, + data_7, + data_8, + ): + # pd_op.full: (1xi64) <- () + full_0 = paddle._C_ops.full( + [1], float("0"), paddle.int64, paddle.core.CPUPlace() + ) + + # pd_op.full: (1xi64) <- () + full_1 = paddle._C_ops.full( + [1], float("1"), paddle.int64, paddle.core.CPUPlace() + ) + + # pd_op.arange: (-1xi64) <- (1xi64, xi64, 1xi64) + arange_0 = paddle.arange(full_0, data_1, full_1, dtype="int64") + del data_1 + + # pd_op.cast: (-1xf32) <- (-1xi64) + cast_0 = paddle._C_ops.cast(arange_0, paddle.float32) + del arange_0 + + # pd_op.full: (1xf32) <- () + full_2 = paddle._C_ops.full( + [1], float("1"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.scale: (-1xf32) <- (-1xf32, 1xf32) + scale_0 = paddle._C_ops.scale(cast_0, full_2, float("0.5"), True) + del cast_0 + + # pd_op.full: (1xf32) <- () + full_3 = paddle._C_ops.full( + [1], float("32"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.scale: (-1xf32) <- (-1xf32, 1xf32) + scale_1 = paddle._C_ops.scale(scale_0, full_3, float("0"), True) + del scale_0 + + # pd_op.arange: (-1xi64) <- (1xi64, xi64, 1xi64) + arange_1 = paddle.arange(full_0, data_0, full_1, dtype="int64") + del data_0 + + # pd_op.cast: (-1xf32) <- (-1xi64) + cast_1 = paddle._C_ops.cast(arange_1, paddle.float32) + del arange_1 + + # pd_op.scale: (-1xf32) <- (-1xf32, 1xf32) + scale_2 = paddle._C_ops.scale(cast_1, full_2, float("0.5"), True) + del cast_1 + + # pd_op.scale: (-1xf32) <- (-1xf32, 1xf32) + scale_3 = paddle._C_ops.scale(scale_2, full_3, float("0"), True) + del scale_2 + + # builtin.combine: ([-1xf32, -1xf32]) <- (-1xf32, -1xf32) + combine_0 = [scale_3, scale_1] + del scale_1, scale_3 + + # pd_op.meshgrid: ([-1x-1xf32, -1x-1xf32]) <- ([-1xf32, -1xf32]) + meshgrid_0 = paddle._C_ops.meshgrid(combine_0) + del combine_0 + + # builtin.split: (-1x-1xf32, -1x-1xf32) <- ([-1x-1xf32, -1x-1xf32]) + ( + split_0, + split_1, + ) = meshgrid_0 + del meshgrid_0 + + # pd_op.scale: (-1x-1xf32) <- (-1x-1xf32, 1xf32) + scale_4 = paddle._C_ops.scale(split_1, full_2, float("-80"), True) + + # pd_op.scale: (-1x-1xf32) <- (-1x-1xf32, 1xf32) + scale_5 = paddle._C_ops.scale(split_0, full_2, float("-80"), True) + + # pd_op.scale: (-1x-1xf32) <- (-1x-1xf32, 1xf32) + scale_6 = paddle._C_ops.scale(split_1, full_2, float("80"), True) + + # pd_op.scale: (-1x-1xf32) <- (-1x-1xf32, 1xf32) + scale_7 = paddle._C_ops.scale(split_0, full_2, float("80"), True) + + # builtin.combine: ([-1x-1xf32, -1x-1xf32, -1x-1xf32, -1x-1xf32]) <- (-1x-1xf32, -1x-1xf32, -1x-1xf32, -1x-1xf32) + combine_1 = [scale_4, scale_5, scale_6, scale_7] + del scale_4, scale_5, scale_6, scale_7 + + # pd_op.stack: (-1x-1x4xf32) <- ([-1x-1xf32, -1x-1xf32, -1x-1xf32, -1x-1xf32]) + stack_0 = paddle._C_ops.stack(combine_1, -1) + del combine_1 + + # builtin.combine: ([-1x-1xf32, -1x-1xf32]) <- (-1x-1xf32, -1x-1xf32) + combine_2 = [split_1, split_0] + del split_0, split_1 + + # pd_op.stack: (-1x-1x2xf32) <- ([-1x-1xf32, -1x-1xf32]) + stack_1 = paddle._C_ops.stack(combine_2, -1) + del combine_2 + + # pd_op.full_int_array: (2xi64) <- () + full_int_array_0 = [-1, 4] + + # pd_op.reshape: (-1x4xf32) <- (-1x-1x4xf32, 2xi64) + reshape_0 = paddle._C_ops.reshape(stack_0, full_int_array_0) + del stack_0 + + # pd_op.full_int_array: (2xi64) <- () + full_int_array_1 = [-1, 2] + + # pd_op.reshape: (-1x2xf32) <- (-1x-1x2xf32, 2xi64) + reshape_1 = paddle._C_ops.reshape(stack_1, full_int_array_1) + del stack_1 + + # pd_op.shape64: (2xi64) <- (-1x4xf32) + shape64_0 = paddle._C_ops.shape64(reshape_0) + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_2 = [0] + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_3 = [1] + + # pd_op.slice: (xi64) <- (2xi64, 1xi64, 1xi64) + slice_0 = paddle._C_ops.slice( + shape64_0, [0], full_int_array_2, full_int_array_3, [1], [0] + ) + del shape64_0 + + # pd_op.full: (xi64) <- () + full_4 = paddle._C_ops.full( + [], float("1"), paddle.int64, paddle.core.CPUPlace() + ) + + # builtin.combine: ([xi64, xi64]) <- (xi64, xi64) + combine_3 = [slice_0, full_4] + + # pd_op.stack: (2xi64) <- ([xi64, xi64]) + stack_2 = paddle._C_ops.stack(combine_3, 0) + del combine_3 + + # pd_op.full_with_tensor: (-1x1xf32) <- (1xf32, 2xi64) + full_with_tensor_0 = paddle._C_ops.full_with_tensor( + full_3, stack_2, paddle.float32 + ) + del full_3, stack_2 + + # pd_op.arange: (-1xi64) <- (1xi64, xi64, 1xi64) + arange_2 = paddle.arange(full_0, data_3, full_1, dtype="int64") + del data_3 + + # pd_op.cast: (-1xf32) <- (-1xi64) + cast_2 = paddle._C_ops.cast(arange_2, paddle.float32) + del arange_2 + + # pd_op.scale: (-1xf32) <- (-1xf32, 1xf32) + scale_8 = paddle._C_ops.scale(cast_2, full_2, float("0.5"), True) + del cast_2 + + # pd_op.full: (1xf32) <- () + full_5 = paddle._C_ops.full( + [1], float("16"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.scale: (-1xf32) <- (-1xf32, 1xf32) + scale_9 = paddle._C_ops.scale(scale_8, full_5, float("0"), True) + del scale_8 + + # pd_op.arange: (-1xi64) <- (1xi64, xi64, 1xi64) + arange_3 = paddle.arange(full_0, data_2, full_1, dtype="int64") + del data_2 + + # pd_op.cast: (-1xf32) <- (-1xi64) + cast_3 = paddle._C_ops.cast(arange_3, paddle.float32) + del arange_3 + + # pd_op.scale: (-1xf32) <- (-1xf32, 1xf32) + scale_10 = paddle._C_ops.scale(cast_3, full_2, float("0.5"), True) + del cast_3 + + # pd_op.scale: (-1xf32) <- (-1xf32, 1xf32) + scale_11 = paddle._C_ops.scale(scale_10, full_5, float("0"), True) + del scale_10 + + # builtin.combine: ([-1xf32, -1xf32]) <- (-1xf32, -1xf32) + combine_4 = [scale_11, scale_9] + del scale_11, scale_9 + + # pd_op.meshgrid: ([-1x-1xf32, -1x-1xf32]) <- ([-1xf32, -1xf32]) + meshgrid_1 = paddle._C_ops.meshgrid(combine_4) + del combine_4 + + # builtin.split: (-1x-1xf32, -1x-1xf32) <- ([-1x-1xf32, -1x-1xf32]) + ( + split_2, + split_3, + ) = meshgrid_1 + del meshgrid_1 + + # pd_op.scale: (-1x-1xf32) <- (-1x-1xf32, 1xf32) + scale_12 = paddle._C_ops.scale(split_3, full_2, float("-40"), True) + + # pd_op.scale: (-1x-1xf32) <- (-1x-1xf32, 1xf32) + scale_13 = paddle._C_ops.scale(split_2, full_2, float("-40"), True) + + # pd_op.scale: (-1x-1xf32) <- (-1x-1xf32, 1xf32) + scale_14 = paddle._C_ops.scale(split_3, full_2, float("40"), True) + + # pd_op.scale: (-1x-1xf32) <- (-1x-1xf32, 1xf32) + scale_15 = paddle._C_ops.scale(split_2, full_2, float("40"), True) + + # builtin.combine: ([-1x-1xf32, -1x-1xf32, -1x-1xf32, -1x-1xf32]) <- (-1x-1xf32, -1x-1xf32, -1x-1xf32, -1x-1xf32) + combine_5 = [scale_12, scale_13, scale_14, scale_15] + del scale_12, scale_13, scale_14, scale_15 + + # pd_op.stack: (-1x-1x4xf32) <- ([-1x-1xf32, -1x-1xf32, -1x-1xf32, -1x-1xf32]) + stack_3 = paddle._C_ops.stack(combine_5, -1) + del combine_5 + + # builtin.combine: ([-1x-1xf32, -1x-1xf32]) <- (-1x-1xf32, -1x-1xf32) + combine_6 = [split_3, split_2] + del split_2, split_3 + + # pd_op.stack: (-1x-1x2xf32) <- ([-1x-1xf32, -1x-1xf32]) + stack_4 = paddle._C_ops.stack(combine_6, -1) + del combine_6 + + # pd_op.reshape: (-1x4xf32) <- (-1x-1x4xf32, 2xi64) + reshape_2 = paddle._C_ops.reshape(stack_3, full_int_array_0) + del stack_3 + + # pd_op.reshape: (-1x2xf32) <- (-1x-1x2xf32, 2xi64) + reshape_3 = paddle._C_ops.reshape(stack_4, full_int_array_1) + del stack_4 + + # pd_op.shape64: (2xi64) <- (-1x4xf32) + shape64_1 = paddle._C_ops.shape64(reshape_2) + + # pd_op.slice: (xi64) <- (2xi64, 1xi64, 1xi64) + slice_1 = paddle._C_ops.slice( + shape64_1, [0], full_int_array_2, full_int_array_3, [1], [0] + ) + del shape64_1 + + # builtin.combine: ([xi64, xi64]) <- (xi64, xi64) + combine_7 = [slice_1, full_4] + + # pd_op.stack: (2xi64) <- ([xi64, xi64]) + stack_5 = paddle._C_ops.stack(combine_7, 0) + del combine_7 + + # pd_op.full_with_tensor: (-1x1xf32) <- (1xf32, 2xi64) + full_with_tensor_1 = paddle._C_ops.full_with_tensor( + full_5, stack_5, paddle.float32 + ) + del full_5, stack_5 + + # pd_op.arange: (-1xi64) <- (1xi64, xi64, 1xi64) + arange_4 = paddle.arange(full_0, data_5, full_1, dtype="int64") + del data_5 + + # pd_op.cast: (-1xf32) <- (-1xi64) + cast_4 = paddle._C_ops.cast(arange_4, paddle.float32) + del arange_4 + + # pd_op.scale: (-1xf32) <- (-1xf32, 1xf32) + scale_16 = paddle._C_ops.scale(cast_4, full_2, float("0.5"), True) + del cast_4 + + # pd_op.full: (1xf32) <- () + full_6 = paddle._C_ops.full( + [1], float("8"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.scale: (-1xf32) <- (-1xf32, 1xf32) + scale_17 = paddle._C_ops.scale(scale_16, full_6, float("0"), True) + del scale_16 + + # pd_op.arange: (-1xi64) <- (1xi64, xi64, 1xi64) + arange_5 = paddle.arange(full_0, data_4, full_1, dtype="int64") + del data_4, full_0, full_1 + + # pd_op.cast: (-1xf32) <- (-1xi64) + cast_5 = paddle._C_ops.cast(arange_5, paddle.float32) + del arange_5 + + # pd_op.scale: (-1xf32) <- (-1xf32, 1xf32) + scale_18 = paddle._C_ops.scale(cast_5, full_2, float("0.5"), True) + del cast_5 + + # pd_op.scale: (-1xf32) <- (-1xf32, 1xf32) + scale_19 = paddle._C_ops.scale(scale_18, full_6, float("0"), True) + del scale_18 + + # builtin.combine: ([-1xf32, -1xf32]) <- (-1xf32, -1xf32) + combine_8 = [scale_19, scale_17] + del scale_17, scale_19 + + # pd_op.meshgrid: ([-1x-1xf32, -1x-1xf32]) <- ([-1xf32, -1xf32]) + meshgrid_2 = paddle._C_ops.meshgrid(combine_8) + del combine_8 + + # builtin.split: (-1x-1xf32, -1x-1xf32) <- ([-1x-1xf32, -1x-1xf32]) + ( + split_4, + split_5, + ) = meshgrid_2 + del meshgrid_2 + + # pd_op.scale: (-1x-1xf32) <- (-1x-1xf32, 1xf32) + scale_20 = paddle._C_ops.scale(split_5, full_2, float("-20"), True) + + # pd_op.scale: (-1x-1xf32) <- (-1x-1xf32, 1xf32) + scale_21 = paddle._C_ops.scale(split_4, full_2, float("-20"), True) + + # pd_op.scale: (-1x-1xf32) <- (-1x-1xf32, 1xf32) + scale_22 = paddle._C_ops.scale(split_5, full_2, float("20"), True) + + # pd_op.scale: (-1x-1xf32) <- (-1x-1xf32, 1xf32) + scale_23 = paddle._C_ops.scale(split_4, full_2, float("20"), True) + del full_2 + + # builtin.combine: ([-1x-1xf32, -1x-1xf32, -1x-1xf32, -1x-1xf32]) <- (-1x-1xf32, -1x-1xf32, -1x-1xf32, -1x-1xf32) + combine_9 = [scale_20, scale_21, scale_22, scale_23] + del scale_20, scale_21, scale_22, scale_23 + + # pd_op.stack: (-1x-1x4xf32) <- ([-1x-1xf32, -1x-1xf32, -1x-1xf32, -1x-1xf32]) + stack_6 = paddle._C_ops.stack(combine_9, -1) + del combine_9 + + # builtin.combine: ([-1x-1xf32, -1x-1xf32]) <- (-1x-1xf32, -1x-1xf32) + combine_10 = [split_5, split_4] + del split_4, split_5 + + # pd_op.stack: (-1x-1x2xf32) <- ([-1x-1xf32, -1x-1xf32]) + stack_7 = paddle._C_ops.stack(combine_10, -1) + del combine_10 + + # pd_op.reshape: (-1x4xf32) <- (-1x-1x4xf32, 2xi64) + reshape_4 = paddle._C_ops.reshape(stack_6, full_int_array_0) + del full_int_array_0, stack_6 + + # pd_op.reshape: (-1x2xf32) <- (-1x-1x2xf32, 2xi64) + reshape_5 = paddle._C_ops.reshape(stack_7, full_int_array_1) + del full_int_array_1, stack_7 + + # pd_op.shape64: (2xi64) <- (-1x4xf32) + shape64_2 = paddle._C_ops.shape64(reshape_4) + + # pd_op.slice: (xi64) <- (2xi64, 1xi64, 1xi64) + slice_2 = paddle._C_ops.slice( + shape64_2, [0], full_int_array_2, full_int_array_3, [1], [0] + ) + del full_int_array_2, full_int_array_3, shape64_2 + + # builtin.combine: ([xi64, xi64]) <- (xi64, xi64) + combine_11 = [slice_2, full_4] + del full_4 + + # pd_op.stack: (2xi64) <- ([xi64, xi64]) + stack_8 = paddle._C_ops.stack(combine_11, 0) + del combine_11 + + # pd_op.full_with_tensor: (-1x1xf32) <- (1xf32, 2xi64) + full_with_tensor_2 = paddle._C_ops.full_with_tensor( + full_6, stack_8, paddle.float32 + ) + del full_6, stack_8 + + # pd_op.full: (1xi32) <- () + full_7 = paddle._C_ops.full( + [1], float("0"), paddle.int32, paddle.core.CPUPlace() + ) + + # builtin.combine: ([-1x4xf32, -1x4xf32, -1x4xf32]) <- (-1x4xf32, -1x4xf32, -1x4xf32) + combine_12 = [reshape_0, reshape_2, reshape_4] + del reshape_0, reshape_2, reshape_4 + + # pd_op.concat: (-1x4xf32) <- ([-1x4xf32, -1x4xf32, -1x4xf32], 1xi32) + concat_2 = paddle._C_ops.concat(combine_12, full_7) + del combine_12 + + # builtin.combine: ([-1x2xf32, -1x2xf32, -1x2xf32]) <- (-1x2xf32, -1x2xf32, -1x2xf32) + combine_13 = [reshape_1, reshape_3, reshape_5] + del reshape_1, reshape_3, reshape_5 + + # pd_op.concat: (-1x2xf32) <- ([-1x2xf32, -1x2xf32, -1x2xf32], 1xi32) + concat_3 = paddle._C_ops.concat(combine_13, full_7) + del combine_13 + + # builtin.combine: ([-1x1xf32, -1x1xf32, -1x1xf32]) <- (-1x1xf32, -1x1xf32, -1x1xf32) + combine_14 = [full_with_tensor_0, full_with_tensor_1, full_with_tensor_2] + del full_with_tensor_0, full_with_tensor_1, full_with_tensor_2 + + # pd_op.concat: (-1x1xf32) <- ([-1x1xf32, -1x1xf32, -1x1xf32], 1xi32) + concat_4 = paddle._C_ops.concat(combine_14, full_7) + del combine_14, full_7 + + # pd_op.full_int_array: (2xi64) <- () + full_int_array_4 = [1, 1] + + # pd_op.assign: (2xi64) <- (2xi64) + assign_0 = full_int_array_4 + + # pd_op.assign: (2xi64) <- (2xi64) + assign_1 = full_int_array_4 + + # pd_op.pool2d: (8x384x1x1xf32) <- (8x384x-1x-1xf32, 2xi64) + pool2d_0 = paddle._C_ops.pool2d( + data_6, + full_int_array_4, + [1, 1], + [0, 0], + False, + True, + "NCHW", + "avg", + False, + True, + "EXPLICIT", + ) + + # pd_op.conv2d: (8x384x1x1xf32) <- (8x384x1x1xf32, 384x384x1x1xf32) + conv2d_0 = paddle._C_ops.conv2d( + pool2d_0, parameter_53, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_53 + + # pd_op.full_int_array: (4xi64) <- () + full_int_array_5 = [1, -1, 1, 1] + + # pd_op.reshape: (1x384x1x1xf32) <- (384xf32, 4xi64) + reshape_6 = paddle._C_ops.reshape(parameter_52, full_int_array_5) + del parameter_52 + + # pd_op.add: (8x384x1x1xf32) <- (8x384x1x1xf32, 1x384x1x1xf32) + add_0 = paddle._C_ops.add(conv2d_0, reshape_6) + + # pd_op.sigmoid: (8x384x1x1xf32) <- (8x384x1x1xf32) + sigmoid_0 = paddle._C_ops.sigmoid(add_0) + del add_0 + + # pd_op.multiply: (8x384x-1x-1xf32) <- (8x384x-1x-1xf32, 8x384x1x1xf32) + multiply_0 = paddle._C_ops.multiply(data_6, sigmoid_0) + + # pd_op.conv2d: (8x384x-1x-1xf32) <- (8x384x-1x-1xf32, 384x384x1x1xf32) + conv2d_1 = paddle._C_ops.conv2d( + multiply_0, parameter_51, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_51 + + # pd_op.batch_norm_: (8x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (8x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__0, + batch_norm__1, + batch_norm__2, + batch_norm__3, + batch_norm__4, + batch_norm__5, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_1, + parameter_50, + parameter_49, + parameter_48, + parameter_47, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_47, parameter_48, parameter_49, parameter_50 + + # pd_op.swish: (8x384x-1x-1xf32) <- (8x384x-1x-1xf32) + swish_0 = paddle._C_ops.swish(batch_norm__0) + + # pd_op.add: (8x384x-1x-1xf32) <- (8x384x-1x-1xf32, 8x384x-1x-1xf32) + add_1 = paddle._C_ops.add(swish_0, data_6) + + # pd_op.conv2d: (8x4x-1x-1xf32) <- (8x384x-1x-1xf32, 4x384x3x3xf32) + conv2d_2 = paddle._C_ops.conv2d( + add_1, parameter_46, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_46 + + # pd_op.reshape: (1x4x1x1xf32) <- (4xf32, 4xi64) + reshape_7 = paddle._C_ops.reshape(parameter_45, full_int_array_5) + del parameter_45 + + # pd_op.add: (8x4x-1x-1xf32) <- (8x4x-1x-1xf32, 1x4x1x1xf32) + add_2 = paddle._C_ops.add(conv2d_2, reshape_7) + + # pd_op.conv2d: (8x384x1x1xf32) <- (8x384x1x1xf32, 384x384x1x1xf32) + conv2d_3 = paddle._C_ops.conv2d( + pool2d_0, parameter_44, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_44 + + # pd_op.reshape: (1x384x1x1xf32) <- (384xf32, 4xi64) + reshape_8 = paddle._C_ops.reshape(parameter_43, full_int_array_5) + del parameter_43 + + # pd_op.add: (8x384x1x1xf32) <- (8x384x1x1xf32, 1x384x1x1xf32) + add_3 = paddle._C_ops.add(conv2d_3, reshape_8) + + # pd_op.sigmoid: (8x384x1x1xf32) <- (8x384x1x1xf32) + sigmoid_1 = paddle._C_ops.sigmoid(add_3) + del add_3 + + # pd_op.multiply: (8x384x-1x-1xf32) <- (8x384x-1x-1xf32, 8x384x1x1xf32) + multiply_1 = paddle._C_ops.multiply(data_6, sigmoid_1) + del data_6 + + # pd_op.conv2d: (8x384x-1x-1xf32) <- (8x384x-1x-1xf32, 384x384x1x1xf32) + conv2d_4 = paddle._C_ops.conv2d( + multiply_1, parameter_42, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_42 + + # pd_op.batch_norm_: (8x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (8x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__6, + batch_norm__7, + batch_norm__8, + batch_norm__9, + batch_norm__10, + batch_norm__11, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_4, + parameter_41, + parameter_40, + parameter_39, + parameter_38, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_38, parameter_39, parameter_40, parameter_41 + + # pd_op.swish: (8x384x-1x-1xf32) <- (8x384x-1x-1xf32) + swish_1 = paddle._C_ops.swish(batch_norm__6) + + # pd_op.conv2d: (8x68x-1x-1xf32) <- (8x384x-1x-1xf32, 68x384x3x3xf32) + conv2d_5 = paddle._C_ops.conv2d( + swish_1, parameter_37, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_37 + + # pd_op.reshape: (1x68x1x1xf32) <- (68xf32, 4xi64) + reshape_9 = paddle._C_ops.reshape(parameter_36, full_int_array_5) + del parameter_36 + + # pd_op.add: (8x68x-1x-1xf32) <- (8x68x-1x-1xf32, 1x68x1x1xf32) + add_4 = paddle._C_ops.add(conv2d_5, reshape_9) + + # pd_op.sigmoid: (8x4x-1x-1xf32) <- (8x4x-1x-1xf32) + sigmoid_2 = paddle._C_ops.sigmoid(add_2) + del add_2 + + # pd_op.flatten: (8x4x-1xf32) <- (8x4x-1x-1xf32) + flatten_0 = paddle._C_ops.flatten(sigmoid_2, 2, 3) + + # pd_op.transpose: (8x-1x4xf32) <- (8x4x-1xf32) + transpose_0 = paddle._C_ops.transpose(flatten_0, [0, 2, 1]) + del flatten_0 + + # pd_op.flatten: (8x68x-1xf32) <- (8x68x-1x-1xf32) + flatten_1 = paddle._C_ops.flatten(add_4, 2, 3) + + # pd_op.transpose: (8x-1x68xf32) <- (8x68x-1xf32) + transpose_1 = paddle._C_ops.transpose(flatten_1, [0, 2, 1]) + del flatten_1 + + # pd_op.pool2d: (8x192x1x1xf32) <- (8x192x-1x-1xf32, 2xi64) + pool2d_1 = paddle._C_ops.pool2d( + data_7, + full_int_array_4, + [1, 1], + [0, 0], + False, + True, + "NCHW", + "avg", + False, + True, + "EXPLICIT", + ) + + # pd_op.conv2d: (8x192x1x1xf32) <- (8x192x1x1xf32, 192x192x1x1xf32) + conv2d_6 = paddle._C_ops.conv2d( + pool2d_1, parameter_35, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_35 + + # pd_op.reshape: (1x192x1x1xf32) <- (192xf32, 4xi64) + reshape_10 = paddle._C_ops.reshape(parameter_34, full_int_array_5) + del parameter_34 + + # pd_op.add: (8x192x1x1xf32) <- (8x192x1x1xf32, 1x192x1x1xf32) + add_5 = paddle._C_ops.add(conv2d_6, reshape_10) + + # pd_op.sigmoid: (8x192x1x1xf32) <- (8x192x1x1xf32) + sigmoid_3 = paddle._C_ops.sigmoid(add_5) + del add_5 + + # pd_op.multiply: (8x192x-1x-1xf32) <- (8x192x-1x-1xf32, 8x192x1x1xf32) + multiply_2 = paddle._C_ops.multiply(data_7, sigmoid_3) + + # pd_op.conv2d: (8x192x-1x-1xf32) <- (8x192x-1x-1xf32, 192x192x1x1xf32) + conv2d_7 = paddle._C_ops.conv2d( + multiply_2, parameter_33, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_33 + + # pd_op.batch_norm_: (8x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (8x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__12, + batch_norm__13, + batch_norm__14, + batch_norm__15, + batch_norm__16, + batch_norm__17, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_7, + parameter_32, + parameter_31, + parameter_30, + parameter_29, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_29, parameter_30, parameter_31, parameter_32 + + # pd_op.swish: (8x192x-1x-1xf32) <- (8x192x-1x-1xf32) + swish_2 = paddle._C_ops.swish(batch_norm__12) + + # pd_op.add: (8x192x-1x-1xf32) <- (8x192x-1x-1xf32, 8x192x-1x-1xf32) + add_6 = paddle._C_ops.add(swish_2, data_7) + + # pd_op.conv2d: (8x4x-1x-1xf32) <- (8x192x-1x-1xf32, 4x192x3x3xf32) + conv2d_8 = paddle._C_ops.conv2d( + add_6, parameter_28, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_28 + + # pd_op.reshape: (1x4x1x1xf32) <- (4xf32, 4xi64) + reshape_11 = paddle._C_ops.reshape(parameter_27, full_int_array_5) + del parameter_27 + + # pd_op.add: (8x4x-1x-1xf32) <- (8x4x-1x-1xf32, 1x4x1x1xf32) + add_7 = paddle._C_ops.add(conv2d_8, reshape_11) + + # pd_op.conv2d: (8x192x1x1xf32) <- (8x192x1x1xf32, 192x192x1x1xf32) + conv2d_9 = paddle._C_ops.conv2d( + pool2d_1, parameter_26, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_26 + + # pd_op.reshape: (1x192x1x1xf32) <- (192xf32, 4xi64) + reshape_12 = paddle._C_ops.reshape(parameter_25, full_int_array_5) + del parameter_25 + + # pd_op.add: (8x192x1x1xf32) <- (8x192x1x1xf32, 1x192x1x1xf32) + add_8 = paddle._C_ops.add(conv2d_9, reshape_12) + + # pd_op.sigmoid: (8x192x1x1xf32) <- (8x192x1x1xf32) + sigmoid_4 = paddle._C_ops.sigmoid(add_8) + del add_8 + + # pd_op.multiply: (8x192x-1x-1xf32) <- (8x192x-1x-1xf32, 8x192x1x1xf32) + multiply_3 = paddle._C_ops.multiply(data_7, sigmoid_4) + del data_7 + + # pd_op.conv2d: (8x192x-1x-1xf32) <- (8x192x-1x-1xf32, 192x192x1x1xf32) + conv2d_10 = paddle._C_ops.conv2d( + multiply_3, parameter_24, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_24 + + # pd_op.batch_norm_: (8x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (8x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__18, + batch_norm__19, + batch_norm__20, + batch_norm__21, + batch_norm__22, + batch_norm__23, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_10, + parameter_23, + parameter_22, + parameter_21, + parameter_20, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_20, parameter_21, parameter_22, parameter_23 + + # pd_op.swish: (8x192x-1x-1xf32) <- (8x192x-1x-1xf32) + swish_3 = paddle._C_ops.swish(batch_norm__18) + + # pd_op.conv2d: (8x68x-1x-1xf32) <- (8x192x-1x-1xf32, 68x192x3x3xf32) + conv2d_11 = paddle._C_ops.conv2d( + swish_3, parameter_19, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_19 + + # pd_op.reshape: (1x68x1x1xf32) <- (68xf32, 4xi64) + reshape_13 = paddle._C_ops.reshape(parameter_18, full_int_array_5) + del parameter_18 + + # pd_op.add: (8x68x-1x-1xf32) <- (8x68x-1x-1xf32, 1x68x1x1xf32) + add_9 = paddle._C_ops.add(conv2d_11, reshape_13) + + # pd_op.sigmoid: (8x4x-1x-1xf32) <- (8x4x-1x-1xf32) + sigmoid_5 = paddle._C_ops.sigmoid(add_7) + del add_7 + + # pd_op.flatten: (8x4x-1xf32) <- (8x4x-1x-1xf32) + flatten_2 = paddle._C_ops.flatten(sigmoid_5, 2, 3) + + # pd_op.transpose: (8x-1x4xf32) <- (8x4x-1xf32) + transpose_2 = paddle._C_ops.transpose(flatten_2, [0, 2, 1]) + del flatten_2 + + # pd_op.flatten: (8x68x-1xf32) <- (8x68x-1x-1xf32) + flatten_3 = paddle._C_ops.flatten(add_9, 2, 3) + + # pd_op.transpose: (8x-1x68xf32) <- (8x68x-1xf32) + transpose_3 = paddle._C_ops.transpose(flatten_3, [0, 2, 1]) + del flatten_3 + + # pd_op.pool2d: (8x96x1x1xf32) <- (8x96x-1x-1xf32, 2xi64) + pool2d_2 = paddle._C_ops.pool2d( + data_8, + full_int_array_4, + [1, 1], + [0, 0], + False, + True, + "NCHW", + "avg", + False, + True, + "EXPLICIT", + ) + + # pd_op.conv2d: (8x96x1x1xf32) <- (8x96x1x1xf32, 96x96x1x1xf32) + conv2d_12 = paddle._C_ops.conv2d( + pool2d_2, parameter_17, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_17 + + # pd_op.reshape: (1x96x1x1xf32) <- (96xf32, 4xi64) + reshape_14 = paddle._C_ops.reshape(parameter_16, full_int_array_5) + del parameter_16 + + # pd_op.add: (8x96x1x1xf32) <- (8x96x1x1xf32, 1x96x1x1xf32) + add_10 = paddle._C_ops.add(conv2d_12, reshape_14) + + # pd_op.sigmoid: (8x96x1x1xf32) <- (8x96x1x1xf32) + sigmoid_6 = paddle._C_ops.sigmoid(add_10) + del add_10 + + # pd_op.multiply: (8x96x-1x-1xf32) <- (8x96x-1x-1xf32, 8x96x1x1xf32) + multiply_4 = paddle._C_ops.multiply(data_8, sigmoid_6) + + # pd_op.conv2d: (8x96x-1x-1xf32) <- (8x96x-1x-1xf32, 96x96x1x1xf32) + conv2d_13 = paddle._C_ops.conv2d( + multiply_4, parameter_15, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_15 + + # pd_op.batch_norm_: (8x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (8x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__24, + batch_norm__25, + batch_norm__26, + batch_norm__27, + batch_norm__28, + batch_norm__29, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_13, + parameter_14, + parameter_13, + parameter_12, + parameter_11, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_11, parameter_12, parameter_13, parameter_14 + + # pd_op.swish: (8x96x-1x-1xf32) <- (8x96x-1x-1xf32) + swish_4 = paddle._C_ops.swish(batch_norm__24) + + # pd_op.add: (8x96x-1x-1xf32) <- (8x96x-1x-1xf32, 8x96x-1x-1xf32) + add_11 = paddle._C_ops.add(swish_4, data_8) + + # pd_op.conv2d: (8x4x-1x-1xf32) <- (8x96x-1x-1xf32, 4x96x3x3xf32) + conv2d_14 = paddle._C_ops.conv2d( + add_11, parameter_10, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_10 + + # pd_op.reshape: (1x4x1x1xf32) <- (4xf32, 4xi64) + reshape_15 = paddle._C_ops.reshape(parameter_9, full_int_array_5) + del parameter_9 + + # pd_op.add: (8x4x-1x-1xf32) <- (8x4x-1x-1xf32, 1x4x1x1xf32) + add_12 = paddle._C_ops.add(conv2d_14, reshape_15) + + # pd_op.conv2d: (8x96x1x1xf32) <- (8x96x1x1xf32, 96x96x1x1xf32) + conv2d_15 = paddle._C_ops.conv2d( + pool2d_2, parameter_8, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_8 + + # pd_op.reshape: (1x96x1x1xf32) <- (96xf32, 4xi64) + reshape_16 = paddle._C_ops.reshape(parameter_7, full_int_array_5) + del parameter_7 + + # pd_op.add: (8x96x1x1xf32) <- (8x96x1x1xf32, 1x96x1x1xf32) + add_13 = paddle._C_ops.add(conv2d_15, reshape_16) + + # pd_op.sigmoid: (8x96x1x1xf32) <- (8x96x1x1xf32) + sigmoid_7 = paddle._C_ops.sigmoid(add_13) + del add_13 + + # pd_op.multiply: (8x96x-1x-1xf32) <- (8x96x-1x-1xf32, 8x96x1x1xf32) + multiply_5 = paddle._C_ops.multiply(data_8, sigmoid_7) + del data_8 + + # pd_op.conv2d: (8x96x-1x-1xf32) <- (8x96x-1x-1xf32, 96x96x1x1xf32) + conv2d_16 = paddle._C_ops.conv2d( + multiply_5, parameter_6, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_6 + + # pd_op.batch_norm_: (8x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (8x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__30, + batch_norm__31, + batch_norm__32, + batch_norm__33, + batch_norm__34, + batch_norm__35, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_16, + parameter_5, + parameter_4, + parameter_3, + parameter_2, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_2, parameter_3, parameter_4, parameter_5 + + # pd_op.swish: (8x96x-1x-1xf32) <- (8x96x-1x-1xf32) + swish_5 = paddle._C_ops.swish(batch_norm__30) + + # pd_op.conv2d: (8x68x-1x-1xf32) <- (8x96x-1x-1xf32, 68x96x3x3xf32) + conv2d_17 = paddle._C_ops.conv2d( + swish_5, parameter_1, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_1 + + # pd_op.reshape: (1x68x1x1xf32) <- (68xf32, 4xi64) + reshape_17 = paddle._C_ops.reshape(parameter_0, full_int_array_5) + del full_int_array_5, parameter_0 + + # pd_op.add: (8x68x-1x-1xf32) <- (8x68x-1x-1xf32, 1x68x1x1xf32) + add_14 = paddle._C_ops.add(conv2d_17, reshape_17) + + # pd_op.sigmoid: (8x4x-1x-1xf32) <- (8x4x-1x-1xf32) + sigmoid_8 = paddle._C_ops.sigmoid(add_12) + del add_12 + + # pd_op.flatten: (8x4x-1xf32) <- (8x4x-1x-1xf32) + flatten_4 = paddle._C_ops.flatten(sigmoid_8, 2, 3) + + # pd_op.transpose: (8x-1x4xf32) <- (8x4x-1xf32) + transpose_4 = paddle._C_ops.transpose(flatten_4, [0, 2, 1]) + del flatten_4 + + # pd_op.flatten: (8x68x-1xf32) <- (8x68x-1x-1xf32) + flatten_5 = paddle._C_ops.flatten(add_14, 2, 3) + + # pd_op.transpose: (8x-1x68xf32) <- (8x68x-1xf32) + transpose_5 = paddle._C_ops.transpose(flatten_5, [0, 2, 1]) + del flatten_5 + + # pd_op.full: (1xi32) <- () + full_8 = paddle._C_ops.full( + [1], float("1"), paddle.int32, paddle.core.CPUPlace() + ) + + # pd_op.assign: (1xi32) <- (1xi32) + assign_2 = full_8 + + # builtin.combine: ([8x-1x4xf32, 8x-1x4xf32, 8x-1x4xf32]) <- (8x-1x4xf32, 8x-1x4xf32, 8x-1x4xf32) + combine_15 = [transpose_0, transpose_2, transpose_4] + + # pd_op.concat: (8x-1x4xf32) <- ([8x-1x4xf32, 8x-1x4xf32, 8x-1x4xf32], 1xi32) + concat_0 = paddle._C_ops.concat(combine_15, full_8) + del combine_15 + + # builtin.combine: ([8x-1x68xf32, 8x-1x68xf32, 8x-1x68xf32]) <- (8x-1x68xf32, 8x-1x68xf32, 8x-1x68xf32) + combine_16 = [transpose_1, transpose_3, transpose_5] + + # pd_op.concat: (8x-1x68xf32) <- ([8x-1x68xf32, 8x-1x68xf32, 8x-1x68xf32], 1xi32) + concat_1 = paddle._C_ops.concat(combine_16, full_8) + del ( + add_1, + add_11, + add_14, + add_4, + add_6, + add_9, + assign_0, + assign_1, + assign_2, + batch_norm__0, + batch_norm__1, + batch_norm__10, + batch_norm__11, + batch_norm__12, + batch_norm__13, + batch_norm__14, + batch_norm__15, + batch_norm__16, + batch_norm__17, + batch_norm__18, + batch_norm__19, + batch_norm__2, + batch_norm__20, + batch_norm__21, + batch_norm__22, + batch_norm__23, + batch_norm__24, + batch_norm__25, + batch_norm__26, + batch_norm__27, + batch_norm__28, + batch_norm__29, + batch_norm__3, + batch_norm__30, + batch_norm__31, + batch_norm__32, + batch_norm__33, + batch_norm__34, + batch_norm__35, + batch_norm__4, + batch_norm__5, + batch_norm__6, + batch_norm__7, + batch_norm__8, + batch_norm__9, + combine_16, + conv2d_0, + conv2d_1, + conv2d_10, + conv2d_11, + conv2d_12, + conv2d_13, + conv2d_14, + conv2d_15, + conv2d_16, + conv2d_17, + conv2d_2, + conv2d_3, + conv2d_4, + conv2d_5, + conv2d_6, + conv2d_7, + conv2d_8, + conv2d_9, + full_8, + full_int_array_4, + multiply_0, + multiply_1, + multiply_2, + multiply_3, + multiply_4, + multiply_5, + pool2d_0, + pool2d_1, + pool2d_2, + reshape_10, + reshape_11, + reshape_12, + reshape_13, + reshape_14, + reshape_15, + reshape_16, + reshape_17, + reshape_6, + reshape_7, + reshape_8, + reshape_9, + sigmoid_0, + sigmoid_1, + sigmoid_2, + sigmoid_3, + sigmoid_4, + sigmoid_5, + sigmoid_6, + sigmoid_7, + sigmoid_8, + slice_0, + slice_1, + slice_2, + swish_0, + swish_1, + swish_2, + swish_3, + swish_4, + swish_5, + transpose_0, + transpose_1, + transpose_2, + transpose_3, + transpose_4, + transpose_5, + ) + + return concat_0, concat_1, concat_2, concat_3, concat_4 diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_3/weight_meta.py b/paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_3/weight_meta.py new file mode 100644 index 000000000..9fec2ec15 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_3/weight_meta.py @@ -0,0 +1,574 @@ +class Program_weight_tensor_parameter_0: + name = "parameter_0" + shape = [68] + dtype = "float32" + min_val = float("-0.0229941") + max_val = float("0.0263249") + mean = float("1.68962e-07") + std = float("0.00782107") + data = None + + +class Program_weight_tensor_parameter_1: + name = "parameter_1" + shape = [68, 96, 3, 3] + dtype = "float32" + min_val = float("-0.216665") + max_val = float("0.245336") + mean = float("6.82048e-08") + std = float("0.0110405") + data = None + + +class Program_weight_tensor_parameter_2: + name = "parameter_2" + shape = [96] + dtype = "float32" + min_val = float("-0.114231") + max_val = float("0.345241") + mean = float("0.110106") + std = float("0.109589") + data = None + + +class Program_weight_tensor_parameter_3: + name = "parameter_3" + shape = [96] + dtype = "float32" + min_val = float("0.951611") + max_val = float("2.25895") + mean = float("1.522") + std = float("0.267379") + data = None + + +class Program_weight_tensor_parameter_4: + name = "parameter_4" + shape = [96] + dtype = "float32" + min_val = float("0.000136801") + max_val = float("0.00214021") + mean = float("0.000625404") + std = float("0.000354932") + data = None + + +class Program_weight_tensor_parameter_5: + name = "parameter_5" + shape = [96] + dtype = "float32" + min_val = float("-0.0922486") + max_val = float("0.0579038") + mean = float("-0.00946644") + std = float("0.0294865") + data = None + + +class Program_weight_tensor_parameter_6: + name = "parameter_6" + shape = [96, 96, 1, 1] + dtype = "float32" + min_val = float("-0.0846481") + max_val = float("0.10483") + mean = float("-0.000528428") + std = float("0.0106624") + data = None + + +class Program_weight_tensor_parameter_7: + name = "parameter_7" + shape = [96] + dtype = "float32" + min_val = float("-0.0079894") + max_val = float("0.00812595") + mean = float("-0.000271483") + std = float("0.00379003") + data = None + + +class Program_weight_tensor_parameter_8: + name = "parameter_8" + shape = [96, 96, 1, 1] + dtype = "float32" + min_val = float("-0.0212609") + max_val = float("0.0287945") + mean = float("-0.000187913") + std = float("0.00352347") + data = None + + +class Program_weight_tensor_parameter_9: + name = "parameter_9" + shape = [4] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_10: + name = "parameter_10" + shape = [4, 96, 3, 3] + dtype = "float32" + data = None + + +class Program_weight_tensor_parameter_11: + name = "parameter_11" + shape = [96] + dtype = "float32" + min_val = float("-0.996572") + max_val = float("1.70332") + mean = float("0.559311") + std = float("0.524005") + data = None + + +class Program_weight_tensor_parameter_12: + name = "parameter_12" + shape = [96] + dtype = "float32" + min_val = float("0.792689") + max_val = float("2.07047") + mean = float("1.47132") + std = float("0.229403") + data = None + + +class Program_weight_tensor_parameter_13: + name = "parameter_13" + shape = [96] + dtype = "float32" + min_val = float("0.00022484") + max_val = float("0.00448412") + mean = float("0.000858744") + std = float("0.000636719") + data = None + + +class Program_weight_tensor_parameter_14: + name = "parameter_14" + shape = [96] + dtype = "float32" + min_val = float("-0.176553") + max_val = float("0.0753245") + mean = float("-0.0197509") + std = float("0.0460557") + data = None + + +class Program_weight_tensor_parameter_15: + name = "parameter_15" + shape = [96, 96, 1, 1] + dtype = "float32" + min_val = float("-0.0743776") + max_val = float("0.0880323") + mean = float("-0.000570647") + std = float("0.012508") + data = None + + +class Program_weight_tensor_parameter_16: + name = "parameter_16" + shape = [96] + dtype = "float32" + min_val = float("-0.00676565") + max_val = float("0.00492153") + mean = float("-0.000612802") + std = float("0.00255407") + data = None + + +class Program_weight_tensor_parameter_17: + name = "parameter_17" + shape = [96, 96, 1, 1] + dtype = "float32" + min_val = float("-0.043525") + max_val = float("0.0582567") + mean = float("-0.00018279") + std = float("0.0042631") + data = None + + +class Program_weight_tensor_parameter_18: + name = "parameter_18" + shape = [68] + dtype = "float32" + min_val = float("-0.00984357") + max_val = float("0.0293888") + mean = float("1.52388e-07") + std = float("0.00653862") + data = None + + +class Program_weight_tensor_parameter_19: + name = "parameter_19" + shape = [68, 192, 3, 3] + dtype = "float32" + min_val = float("-0.137555") + max_val = float("0.18066") + mean = float("3.79951e-08") + std = float("0.00772962") + data = None + + +class Program_weight_tensor_parameter_20: + name = "parameter_20" + shape = [192] + dtype = "float32" + min_val = float("-0.0192267") + max_val = float("0.1653") + mean = float("0.0777397") + std = float("0.0384083") + data = None + + +class Program_weight_tensor_parameter_21: + name = "parameter_21" + shape = [192] + dtype = "float32" + min_val = float("1.08048") + max_val = float("1.51042") + mean = float("1.29801") + std = float("0.0860117") + data = None + + +class Program_weight_tensor_parameter_22: + name = "parameter_22" + shape = [192] + dtype = "float32" + min_val = float("0.000156743") + max_val = float("0.00374036") + mean = float("0.000656391") + std = float("0.000569957") + data = None + + +class Program_weight_tensor_parameter_23: + name = "parameter_23" + shape = [192] + dtype = "float32" + min_val = float("-0.0536493") + max_val = float("0.0209889") + mean = float("-0.00903805") + std = float("0.0148205") + data = None + + +class Program_weight_tensor_parameter_24: + name = "parameter_24" + shape = [192, 192, 1, 1] + dtype = "float32" + min_val = float("-0.0641417") + max_val = float("0.0914771") + mean = float("-0.000210773") + std = float("0.00541254") + data = None + + +class Program_weight_tensor_parameter_25: + name = "parameter_25" + shape = [192] + dtype = "float32" + min_val = float("-0.00648734") + max_val = float("0.00687982") + mean = float("-2.83236e-05") + std = float("0.00265378") + data = None + + +class Program_weight_tensor_parameter_26: + name = "parameter_26" + shape = [192, 192, 1, 1] + dtype = "float32" + min_val = float("-0.0070265") + max_val = float("0.0152681") + mean = float("-8.24342e-05") + std = float("0.00152885") + data = None + + +class Program_weight_tensor_parameter_27: + name = "parameter_27" + shape = [4] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_28: + name = "parameter_28" + shape = [4, 192, 3, 3] + dtype = "float32" + data = None + + +class Program_weight_tensor_parameter_29: + name = "parameter_29" + shape = [192] + dtype = "float32" + min_val = float("-0.288786") + max_val = float("1.00357") + mean = float("0.405895") + std = float("0.236302") + data = None + + +class Program_weight_tensor_parameter_30: + name = "parameter_30" + shape = [192] + dtype = "float32" + min_val = float("1.04173") + max_val = float("1.8382") + mean = float("1.34299") + std = float("0.125899") + data = None + + +class Program_weight_tensor_parameter_31: + name = "parameter_31" + shape = [192] + dtype = "float32" + min_val = float("0.00021029") + max_val = float("0.00328742") + mean = float("0.000871509") + std = float("0.000455449") + data = None + + +class Program_weight_tensor_parameter_32: + name = "parameter_32" + shape = [192] + dtype = "float32" + min_val = float("-0.208817") + max_val = float("0.0601885") + mean = float("-0.0220861") + std = float("0.0361983") + data = None + + +class Program_weight_tensor_parameter_33: + name = "parameter_33" + shape = [192, 192, 1, 1] + dtype = "float32" + min_val = float("-0.0526798") + max_val = float("0.0671481") + mean = float("-0.000453435") + std = float("0.00636127") + data = None + + +class Program_weight_tensor_parameter_34: + name = "parameter_34" + shape = [192] + dtype = "float32" + min_val = float("-0.00471173") + max_val = float("0.00393583") + mean = float("-0.00017237") + std = float("0.00138911") + data = None + + +class Program_weight_tensor_parameter_35: + name = "parameter_35" + shape = [192, 192, 1, 1] + dtype = "float32" + min_val = float("-0.0225373") + max_val = float("0.0350491") + mean = float("-8.71826e-05") + std = float("0.0017218") + data = None + + +class Program_weight_tensor_parameter_36: + name = "parameter_36" + shape = [68] + dtype = "float32" + min_val = float("-0.0072836") + max_val = float("0.0216282") + mean = float("1.38214e-07") + std = float("0.00555111") + data = None + + +class Program_weight_tensor_parameter_37: + name = "parameter_37" + shape = [68, 384, 3, 3] + dtype = "float32" + min_val = float("-0.0978405") + max_val = float("0.104037") + mean = float("2.30502e-08") + std = float("0.00527193") + data = None + + +class Program_weight_tensor_parameter_38: + name = "parameter_38" + shape = [384] + dtype = "float32" + min_val = float("-0.0244935") + max_val = float("0.149728") + mean = float("0.0398749") + std = float("0.0313707") + data = None + + +class Program_weight_tensor_parameter_39: + name = "parameter_39" + shape = [384] + dtype = "float32" + min_val = float("1.05469") + max_val = float("1.41205") + mean = float("1.21373") + std = float("0.0535024") + data = None + + +class Program_weight_tensor_parameter_40: + name = "parameter_40" + shape = [384] + dtype = "float32" + min_val = float("8.06332e-05") + max_val = float("0.00316415") + mean = float("0.000363984") + std = float("0.000297961") + data = None + + +class Program_weight_tensor_parameter_41: + name = "parameter_41" + shape = [384] + dtype = "float32" + min_val = float("-0.0316022") + max_val = float("0.0111698") + mean = float("-0.00617109") + std = float("0.00790058") + data = None + + +class Program_weight_tensor_parameter_42: + name = "parameter_42" + shape = [384, 384, 1, 1] + dtype = "float32" + min_val = float("-0.0484511") + max_val = float("0.0528094") + mean = float("-8.64854e-05") + std = float("0.00284584") + data = None + + +class Program_weight_tensor_parameter_43: + name = "parameter_43" + shape = [384] + dtype = "float32" + min_val = float("-0.00679313") + max_val = float("0.00399402") + mean = float("0.00010105") + std = float("0.00156319") + data = None + + +class Program_weight_tensor_parameter_44: + name = "parameter_44" + shape = [384, 384, 1, 1] + dtype = "float32" + min_val = float("-0.00722398") + max_val = float("0.0108513") + mean = float("2.60358e-05") + std = float("0.000708852") + data = None + + +class Program_weight_tensor_parameter_45: + name = "parameter_45" + shape = [4] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_46: + name = "parameter_46" + shape = [4, 384, 3, 3] + dtype = "float32" + data = None + + +class Program_weight_tensor_parameter_47: + name = "parameter_47" + shape = [384] + dtype = "float32" + min_val = float("-0.406643") + max_val = float("0.61006") + mean = float("0.213653") + std = float("0.10936") + data = None + + +class Program_weight_tensor_parameter_48: + name = "parameter_48" + shape = [384] + dtype = "float32" + min_val = float("1.062") + max_val = float("1.46115") + mean = float("1.20014") + std = float("0.0646501") + data = None + + +class Program_weight_tensor_parameter_49: + name = "parameter_49" + shape = [384] + dtype = "float32" + min_val = float("0.000172993") + max_val = float("0.00614556") + mean = float("0.000655354") + std = float("0.000444518") + data = None + + +class Program_weight_tensor_parameter_50: + name = "parameter_50" + shape = [384] + dtype = "float32" + min_val = float("-0.0945672") + max_val = float("0.0673284") + mean = float("-0.0222186") + std = float("0.0202051") + data = None + + +class Program_weight_tensor_parameter_51: + name = "parameter_51" + shape = [384, 384, 1, 1] + dtype = "float32" + min_val = float("-0.0597933") + max_val = float("0.044957") + mean = float("-0.000332548") + std = float("0.00321912") + data = None + + +class Program_weight_tensor_parameter_52: + name = "parameter_52" + shape = [384] + dtype = "float32" + min_val = float("-0.0102817") + max_val = float("0.00718453") + mean = float("-8.70751e-05") + std = float("0.00107382") + data = None + + +class Program_weight_tensor_parameter_53: + name = "parameter_53" + shape = [384, 384, 1, 1] + dtype = "float32" + min_val = float("-0.0279581") + max_val = float("0.0411796") + mean = float("-2.84154e-05") + std = float("0.000970307") + data = None diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_4/graph_hash.txt b/paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_4/graph_hash.txt new file mode 100644 index 000000000..df6fb86a6 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_4/graph_hash.txt @@ -0,0 +1 @@ +0edecae6372779122c0886fc76c53f28d45d32c115180d22e204d37f4f709ce2 \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_4/graph_net.json b/paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_4/graph_net.json new file mode 100644 index 000000000..469954d4b --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_4/graph_net.json @@ -0,0 +1,6 @@ +{ + "framework": "paddle", + "model_name": "PP-YOLOE_plus-S", + "num_devices_required": 1, + "num_nodes_required": 1 +} \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_4/input_meta.py b/paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_4/input_meta.py new file mode 100644 index 000000000..d3b17e895 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_4/input_meta.py @@ -0,0 +1,28 @@ +class Program_weight_tensor_data_0: + name = "data_0" + shape = [8, 6069, 4] + dtype = "float32" + min_val = float("0.01") + max_val = float("0.01") + mean = float("0.01") + std = float("2.79397e-09") + data = None + + +class Program_weight_tensor_data_1: + name = "data_1" + shape = [8, 6069] + dtype = "int32" + min_val = 0 + max_val = 4 + data = None + + +class Program_weight_tensor_data_2: + name = "data_2" + shape = [8, 6069, 4] + dtype = "float32" + max_val = float("0.947428") + mean = float("0.000306383") + std = float("0.0159417") + data = None diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_4/model.py b/paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_4/model.py new file mode 100644 index 000000000..efa9ddc2e --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_4/model.py @@ -0,0 +1,110 @@ +import paddle + + +class GraphModule(paddle.nn.Layer): + def __init__(self): + super().__init__() + + def forward(self, data_0, data_1, data_2): + # pd_op.full: (1xi32) <- () + full_0 = paddle._C_ops.full( + [1], float("5"), paddle.int32, paddle.core.CPUPlace() + ) + + # pd_op.one_hot: (8x-1x5xf32) <- (8x-1xi32, 1xi32) + one_hot_0 = paddle._C_ops.one_hot( + data_1 % paddle.cast(full_0, data_1.dtype), full_0 + ) + del data_1, full_0 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_0 = [0] + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_1 = [-1] + + # pd_op.slice: (8x-1x4xf32) <- (8x-1x5xf32, 1xi64, 1xi64) + slice_0 = paddle._C_ops.slice( + one_hot_0, [2], full_int_array_0, full_int_array_1, [1], [] + ) + del full_int_array_0, full_int_array_1, one_hot_0 + + # pd_op.pow: (8x-1x4xf32) <- (8x-1x4xf32) + pow_0 = paddle._C_ops.pow(data_0, float("2")) + + # pd_op.full: (1xf32) <- () + full_1 = paddle._C_ops.full( + [1], float("0.75"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.scale: (8x-1x4xf32) <- (8x-1x4xf32, 1xf32) + scale_0 = paddle._C_ops.scale(pow_0, full_1, float("0"), True) + del pow_0 + + # pd_op.full: (1xf32) <- () + full_2 = paddle._C_ops.full( + [1], float("-1"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.scale: (8x-1x4xf32) <- (8x-1x4xf32, 1xf32) + scale_1 = paddle._C_ops.scale(slice_0, full_2, float("1"), True) + del full_2 + + # pd_op.multiply: (8x-1x4xf32) <- (8x-1x4xf32, 8x-1x4xf32) + multiply_0 = paddle._C_ops.multiply(scale_0, scale_1) + + # pd_op.multiply: (8x-1x4xf32) <- (8x-1x4xf32, 8x-1x4xf32) + multiply_1 = paddle._C_ops.multiply(data_2, slice_0) + del slice_0 + + # pd_op.add: (8x-1x4xf32) <- (8x-1x4xf32, 8x-1x4xf32) + add_0 = paddle._C_ops.add(multiply_0, multiply_1) + + # pd_op.bce_loss: (8x-1x4xf32) <- (8x-1x4xf32, 8x-1x4xf32) + bce_loss_0 = paddle._C_ops.bce_loss(data_0, data_2) + del data_0 + + # pd_op.multiply: (8x-1x4xf32) <- (8x-1x4xf32, 8x-1x4xf32) + multiply_2 = paddle._C_ops.multiply(bce_loss_0, add_0) + + # pd_op.full_int_array: (0xi64) <- () + full_int_array_2 = [] + + # pd_op.sum: (xf32) <- (8x-1x4xf32, 0xi64) + sum_0 = paddle._C_ops.sum(multiply_2, full_int_array_2, None, False) + + # pd_op.sum: (xf32) <- (8x-1x4xf32, 0xi64) + sum_1 = paddle._C_ops.sum(data_2, full_int_array_2, None, False) + del data_2 + + # pd_op.full: (1xf32) <- () + full_3 = paddle._C_ops.full( + [1], float("1"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.full: (1xf32) <- () + full_4 = paddle._C_ops.full( + [1], float("3.40282e+38"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.clip: (xf32) <- (xf32, 1xf32, 1xf32) + clip_0 = paddle._C_ops.clip(sum_1, full_3, full_4) + del full_3, full_4, sum_1 + + # pd_op.divide: (xf32) <- (xf32, xf32) + divide_0 = paddle._C_ops.divide(sum_0, clip_0) + del ( + add_0, + bce_loss_0, + clip_0, + full_1, + full_int_array_2, + multiply_0, + multiply_1, + multiply_2, + scale_0, + scale_1, + sum_0, + ) + + return divide_0 diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_4/weight_meta.py b/paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_4/weight_meta.py new file mode 100644 index 000000000..8b1378917 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_4/weight_meta.py @@ -0,0 +1 @@ + diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_5/graph_hash.txt b/paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_5/graph_hash.txt new file mode 100644 index 000000000..6198709b8 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_5/graph_hash.txt @@ -0,0 +1 @@ +fb732d9fddca2e574feb08da62b81b28bc53bcf9dfe5a1522a8ffb71ef252d85 \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_5/graph_net.json b/paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_5/graph_net.json new file mode 100644 index 000000000..469954d4b --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_5/graph_net.json @@ -0,0 +1,6 @@ +{ + "framework": "paddle", + "model_name": "PP-YOLOE_plus-S", + "num_devices_required": 1, + "num_nodes_required": 1 +} \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_5/input_meta.py b/paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_5/input_meta.py new file mode 100644 index 000000000..d723c82b2 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_5/input_meta.py @@ -0,0 +1,31 @@ +class Program_weight_tensor_data_0: + name = "data_0" + shape = [8, 384, 19, 19] + dtype = "float32" + min_val = float("-0.278465") + max_val = float("8.78535") + mean = float("0.35365") + std = float("0.695275") + data = None + + +class Program_weight_tensor_data_1: + name = "data_1" + shape = [8, 192, 38, 38] + dtype = "float32" + min_val = float("-0.278465") + max_val = float("11.5127") + mean = float("0.490758") + std = float("0.770054") + data = None + + +class Program_weight_tensor_data_2: + name = "data_2" + shape = [8, 96, 76, 76] + dtype = "float32" + min_val = float("-0.278465") + max_val = float("14.8687") + mean = float("0.657976") + std = float("0.812163") + data = None diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_5/model.py b/paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_5/model.py new file mode 100644 index 000000000..5d52bd2e1 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_5/model.py @@ -0,0 +1,1050 @@ +import paddle + + +class GraphModule(paddle.nn.Layer): + def __init__(self): + super().__init__() + + def forward( + self, + parameter_0, + parameter_1, + parameter_2, + parameter_3, + parameter_4, + parameter_5, + parameter_6, + parameter_7, + parameter_8, + parameter_9, + parameter_10, + parameter_11, + parameter_12, + parameter_13, + parameter_14, + parameter_15, + parameter_16, + parameter_17, + parameter_18, + parameter_19, + parameter_20, + parameter_21, + parameter_22, + parameter_23, + parameter_24, + parameter_25, + parameter_26, + parameter_27, + parameter_28, + parameter_29, + parameter_30, + parameter_31, + parameter_32, + parameter_33, + parameter_34, + parameter_35, + parameter_36, + parameter_37, + parameter_38, + parameter_39, + parameter_40, + parameter_41, + parameter_42, + parameter_43, + parameter_44, + parameter_45, + parameter_46, + parameter_47, + parameter_48, + parameter_49, + parameter_50, + parameter_51, + parameter_52, + parameter_53, + data_0, + data_1, + data_2, + ): + # pd_op.full: (1xf64) <- () + full_0 = paddle._C_ops.full( + [1], float("0"), paddle.float64, paddle.core.CPUPlace() + ) + + # pd_op.full: (1xf64) <- () + full_1 = paddle._C_ops.full( + [1], float("19"), paddle.float64, paddle.core.CPUPlace() + ) + + # pd_op.full: (1xf64) <- () + full_2 = paddle._C_ops.full( + [1], float("1"), paddle.float64, paddle.core.CPUPlace() + ) + + # pd_op.arange: (19xi64) <- (1xf64, 1xf64, 1xf64) + arange_0 = paddle.arange(full_0, full_1, full_2, dtype="int64") + del full_1 + + # pd_op.cast: (19xf32) <- (19xi64) + cast_0 = paddle._C_ops.cast(arange_0, paddle.float32) + del arange_0 + + # pd_op.full: (1xf32) <- () + full_3 = paddle._C_ops.full( + [1], float("1"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.scale: (19xf32) <- (19xf32, 1xf32) + scale_0 = paddle._C_ops.scale(cast_0, full_3, float("0.5"), True) + del cast_0 + + # pd_op.full: (1xf32) <- () + full_4 = paddle._C_ops.full( + [1], float("32"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.scale: (19xf32) <- (19xf32, 1xf32) + scale_1 = paddle._C_ops.scale(scale_0, full_4, float("0"), True) + del full_4, scale_0 + + # builtin.combine: ([19xf32, 19xf32]) <- (19xf32, 19xf32) + combine_0 = [scale_1, scale_1] + del scale_1 + + # pd_op.meshgrid: ([19x19xf32, 19x19xf32]) <- ([19xf32, 19xf32]) + meshgrid_0 = paddle._C_ops.meshgrid(combine_0) + del combine_0 + + # builtin.split: (19x19xf32, 19x19xf32) <- ([19x19xf32, 19x19xf32]) + ( + split_0, + split_1, + ) = meshgrid_0 + del meshgrid_0 + + # pd_op.scale: (19x19xf32) <- (19x19xf32, 1xf32) + scale_2 = paddle._C_ops.scale(split_1, full_3, float("-80"), True) + + # pd_op.scale: (19x19xf32) <- (19x19xf32, 1xf32) + scale_3 = paddle._C_ops.scale(split_0, full_3, float("-80"), True) + + # pd_op.scale: (19x19xf32) <- (19x19xf32, 1xf32) + scale_4 = paddle._C_ops.scale(split_1, full_3, float("80"), True) + + # pd_op.scale: (19x19xf32) <- (19x19xf32, 1xf32) + scale_5 = paddle._C_ops.scale(split_0, full_3, float("80"), True) + + # builtin.combine: ([19x19xf32, 19x19xf32, 19x19xf32, 19x19xf32]) <- (19x19xf32, 19x19xf32, 19x19xf32, 19x19xf32) + combine_1 = [scale_2, scale_3, scale_4, scale_5] + del scale_2, scale_3, scale_4, scale_5 + + # pd_op.stack: (19x19x4xf32) <- ([19x19xf32, 19x19xf32, 19x19xf32, 19x19xf32]) + stack_0 = paddle._C_ops.stack(combine_1, -1) + del combine_1 + + # builtin.combine: ([19x19xf32, 19x19xf32]) <- (19x19xf32, 19x19xf32) + combine_2 = [split_1, split_0] + del split_0, split_1 + + # pd_op.stack: (19x19x2xf32) <- ([19x19xf32, 19x19xf32]) + stack_1 = paddle._C_ops.stack(combine_2, -1) + del combine_2 + + # pd_op.full_int_array: (2xi64) <- () + full_int_array_0 = [-1, 4] + + # pd_op.reshape: (361x4xf32) <- (19x19x4xf32, 2xi64) + reshape_0 = paddle._C_ops.reshape(stack_0, full_int_array_0) + del stack_0 + + # pd_op.full_int_array: (2xi64) <- () + full_int_array_1 = [-1, 2] + + # pd_op.reshape: (361x2xf32) <- (19x19x2xf32, 2xi64) + reshape_1 = paddle._C_ops.reshape(stack_1, full_int_array_1) + del stack_1 + + # pd_op.full: (361x1xf32) <- () + full_5 = paddle._C_ops.full( + [361, 1], + float("32"), + paddle.float32, + paddle.framework._current_expected_place(), + ) + + # pd_op.full: (1xf64) <- () + full_6 = paddle._C_ops.full( + [1], float("38"), paddle.float64, paddle.core.CPUPlace() + ) + + # pd_op.arange: (38xi64) <- (1xf64, 1xf64, 1xf64) + arange_1 = paddle.arange(full_0, full_6, full_2, dtype="int64") + del full_6 + + # pd_op.cast: (38xf32) <- (38xi64) + cast_1 = paddle._C_ops.cast(arange_1, paddle.float32) + del arange_1 + + # pd_op.scale: (38xf32) <- (38xf32, 1xf32) + scale_6 = paddle._C_ops.scale(cast_1, full_3, float("0.5"), True) + del cast_1 + + # pd_op.full: (1xf32) <- () + full_7 = paddle._C_ops.full( + [1], float("16"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.scale: (38xf32) <- (38xf32, 1xf32) + scale_7 = paddle._C_ops.scale(scale_6, full_7, float("0"), True) + del full_7, scale_6 + + # builtin.combine: ([38xf32, 38xf32]) <- (38xf32, 38xf32) + combine_3 = [scale_7, scale_7] + del scale_7 + + # pd_op.meshgrid: ([38x38xf32, 38x38xf32]) <- ([38xf32, 38xf32]) + meshgrid_1 = paddle._C_ops.meshgrid(combine_3) + del combine_3 + + # builtin.split: (38x38xf32, 38x38xf32) <- ([38x38xf32, 38x38xf32]) + ( + split_2, + split_3, + ) = meshgrid_1 + del meshgrid_1 + + # pd_op.scale: (38x38xf32) <- (38x38xf32, 1xf32) + scale_8 = paddle._C_ops.scale(split_3, full_3, float("-40"), True) + + # pd_op.scale: (38x38xf32) <- (38x38xf32, 1xf32) + scale_9 = paddle._C_ops.scale(split_2, full_3, float("-40"), True) + + # pd_op.scale: (38x38xf32) <- (38x38xf32, 1xf32) + scale_10 = paddle._C_ops.scale(split_3, full_3, float("40"), True) + + # pd_op.scale: (38x38xf32) <- (38x38xf32, 1xf32) + scale_11 = paddle._C_ops.scale(split_2, full_3, float("40"), True) + + # builtin.combine: ([38x38xf32, 38x38xf32, 38x38xf32, 38x38xf32]) <- (38x38xf32, 38x38xf32, 38x38xf32, 38x38xf32) + combine_4 = [scale_8, scale_9, scale_10, scale_11] + del scale_10, scale_11, scale_8, scale_9 + + # pd_op.stack: (38x38x4xf32) <- ([38x38xf32, 38x38xf32, 38x38xf32, 38x38xf32]) + stack_2 = paddle._C_ops.stack(combine_4, -1) + del combine_4 + + # builtin.combine: ([38x38xf32, 38x38xf32]) <- (38x38xf32, 38x38xf32) + combine_5 = [split_3, split_2] + del split_2, split_3 + + # pd_op.stack: (38x38x2xf32) <- ([38x38xf32, 38x38xf32]) + stack_3 = paddle._C_ops.stack(combine_5, -1) + del combine_5 + + # pd_op.reshape: (1444x4xf32) <- (38x38x4xf32, 2xi64) + reshape_2 = paddle._C_ops.reshape(stack_2, full_int_array_0) + del stack_2 + + # pd_op.reshape: (1444x2xf32) <- (38x38x2xf32, 2xi64) + reshape_3 = paddle._C_ops.reshape(stack_3, full_int_array_1) + del stack_3 + + # pd_op.full: (1444x1xf32) <- () + full_8 = paddle._C_ops.full( + [1444, 1], + float("16"), + paddle.float32, + paddle.framework._current_expected_place(), + ) + + # pd_op.full: (1xf64) <- () + full_9 = paddle._C_ops.full( + [1], float("76"), paddle.float64, paddle.core.CPUPlace() + ) + + # pd_op.arange: (76xi64) <- (1xf64, 1xf64, 1xf64) + arange_2 = paddle.arange(full_0, full_9, full_2, dtype="int64") + del full_0, full_2, full_9 + + # pd_op.cast: (76xf32) <- (76xi64) + cast_2 = paddle._C_ops.cast(arange_2, paddle.float32) + del arange_2 + + # pd_op.scale: (76xf32) <- (76xf32, 1xf32) + scale_12 = paddle._C_ops.scale(cast_2, full_3, float("0.5"), True) + del cast_2 + + # pd_op.full: (1xf32) <- () + full_10 = paddle._C_ops.full( + [1], float("8"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.scale: (76xf32) <- (76xf32, 1xf32) + scale_13 = paddle._C_ops.scale(scale_12, full_10, float("0"), True) + del full_10, scale_12 + + # builtin.combine: ([76xf32, 76xf32]) <- (76xf32, 76xf32) + combine_6 = [scale_13, scale_13] + del scale_13 + + # pd_op.meshgrid: ([76x76xf32, 76x76xf32]) <- ([76xf32, 76xf32]) + meshgrid_2 = paddle._C_ops.meshgrid(combine_6) + del combine_6 + + # builtin.split: (76x76xf32, 76x76xf32) <- ([76x76xf32, 76x76xf32]) + ( + split_4, + split_5, + ) = meshgrid_2 + del meshgrid_2 + + # pd_op.scale: (76x76xf32) <- (76x76xf32, 1xf32) + scale_14 = paddle._C_ops.scale(split_5, full_3, float("-20"), True) + + # pd_op.scale: (76x76xf32) <- (76x76xf32, 1xf32) + scale_15 = paddle._C_ops.scale(split_4, full_3, float("-20"), True) + + # pd_op.scale: (76x76xf32) <- (76x76xf32, 1xf32) + scale_16 = paddle._C_ops.scale(split_5, full_3, float("20"), True) + + # pd_op.scale: (76x76xf32) <- (76x76xf32, 1xf32) + scale_17 = paddle._C_ops.scale(split_4, full_3, float("20"), True) + del full_3 + + # builtin.combine: ([76x76xf32, 76x76xf32, 76x76xf32, 76x76xf32]) <- (76x76xf32, 76x76xf32, 76x76xf32, 76x76xf32) + combine_7 = [scale_14, scale_15, scale_16, scale_17] + del scale_14, scale_15, scale_16, scale_17 + + # pd_op.stack: (76x76x4xf32) <- ([76x76xf32, 76x76xf32, 76x76xf32, 76x76xf32]) + stack_4 = paddle._C_ops.stack(combine_7, -1) + del combine_7 + + # builtin.combine: ([76x76xf32, 76x76xf32]) <- (76x76xf32, 76x76xf32) + combine_8 = [split_5, split_4] + del split_4, split_5 + + # pd_op.stack: (76x76x2xf32) <- ([76x76xf32, 76x76xf32]) + stack_5 = paddle._C_ops.stack(combine_8, -1) + del combine_8 + + # pd_op.reshape: (5776x4xf32) <- (76x76x4xf32, 2xi64) + reshape_4 = paddle._C_ops.reshape(stack_4, full_int_array_0) + del full_int_array_0, stack_4 + + # pd_op.reshape: (5776x2xf32) <- (76x76x2xf32, 2xi64) + reshape_5 = paddle._C_ops.reshape(stack_5, full_int_array_1) + del full_int_array_1, stack_5 + + # pd_op.full: (5776x1xf32) <- () + full_11 = paddle._C_ops.full( + [5776, 1], + float("8"), + paddle.float32, + paddle.framework._current_expected_place(), + ) + + # pd_op.full: (1xi32) <- () + full_12 = paddle._C_ops.full( + [1], float("0"), paddle.int32, paddle.core.CPUPlace() + ) + + # builtin.combine: ([361x4xf32, 1444x4xf32, 5776x4xf32]) <- (361x4xf32, 1444x4xf32, 5776x4xf32) + combine_9 = [reshape_0, reshape_2, reshape_4] + + # pd_op.concat: (7581x4xf32) <- ([361x4xf32, 1444x4xf32, 5776x4xf32], 1xi32) + concat_2 = paddle._C_ops.concat(combine_9, full_12) + del combine_9 + + # builtin.combine: ([361x2xf32, 1444x2xf32, 5776x2xf32]) <- (361x2xf32, 1444x2xf32, 5776x2xf32) + combine_10 = [reshape_1, reshape_3, reshape_5] + del reshape_1, reshape_3, reshape_5 + + # pd_op.concat: (7581x2xf32) <- ([361x2xf32, 1444x2xf32, 5776x2xf32], 1xi32) + concat_3 = paddle._C_ops.concat(combine_10, full_12) + del combine_10 + + # builtin.combine: ([361x1xf32, 1444x1xf32, 5776x1xf32]) <- (361x1xf32, 1444x1xf32, 5776x1xf32) + combine_11 = [full_5, full_8, full_11] + del full_11, full_5, full_8 + + # pd_op.concat: (7581x1xf32) <- ([361x1xf32, 1444x1xf32, 5776x1xf32], 1xi32) + concat_4 = paddle._C_ops.concat(combine_11, full_12) + del combine_11, full_12 + + # pd_op.full_int_array: (2xi64) <- () + full_int_array_2 = [1, 1] + + # pd_op.assign: (2xi64) <- (2xi64) + assign_0 = full_int_array_2 + + # pd_op.assign: (2xi64) <- (2xi64) + assign_1 = full_int_array_2 + + # pd_op.pool2d: (8x384x1x1xf32) <- (8x384x19x19xf32, 2xi64) + pool2d_0 = paddle._C_ops.pool2d( + data_0, + full_int_array_2, + [1, 1], + [0, 0], + False, + True, + "NCHW", + "avg", + False, + True, + "EXPLICIT", + ) + + # pd_op.conv2d: (8x384x1x1xf32) <- (8x384x1x1xf32, 384x384x1x1xf32) + conv2d_0 = paddle._C_ops.conv2d( + pool2d_0, parameter_53, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_53 + + # pd_op.full_int_array: (4xi64) <- () + full_int_array_3 = [1, -1, 1, 1] + + # pd_op.reshape: (1x384x1x1xf32) <- (384xf32, 4xi64) + reshape_6 = paddle._C_ops.reshape(parameter_52, full_int_array_3) + del parameter_52 + + # pd_op.add: (8x384x1x1xf32) <- (8x384x1x1xf32, 1x384x1x1xf32) + add_0 = paddle._C_ops.add(conv2d_0, reshape_6) + + # pd_op.sigmoid: (8x384x1x1xf32) <- (8x384x1x1xf32) + sigmoid_0 = paddle._C_ops.sigmoid(add_0) + del add_0 + + # pd_op.multiply: (8x384x19x19xf32) <- (8x384x19x19xf32, 8x384x1x1xf32) + multiply_0 = paddle._C_ops.multiply(data_0, sigmoid_0) + + # pd_op.conv2d: (8x384x19x19xf32) <- (8x384x19x19xf32, 384x384x1x1xf32) + conv2d_1 = paddle._C_ops.conv2d( + multiply_0, parameter_51, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_51 + + # pd_op.batch_norm_: (8x384x19x19xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (8x384x19x19xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__0, + batch_norm__1, + batch_norm__2, + batch_norm__3, + batch_norm__4, + batch_norm__5, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_1, + parameter_50, + parameter_49, + parameter_48, + parameter_47, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_47, parameter_48, parameter_49, parameter_50 + + # pd_op.swish: (8x384x19x19xf32) <- (8x384x19x19xf32) + swish_0 = paddle._C_ops.swish(batch_norm__0) + + # pd_op.add: (8x384x19x19xf32) <- (8x384x19x19xf32, 8x384x19x19xf32) + add_1 = paddle._C_ops.add(swish_0, data_0) + + # pd_op.conv2d: (8x4x19x19xf32) <- (8x384x19x19xf32, 4x384x3x3xf32) + conv2d_2 = paddle._C_ops.conv2d( + add_1, parameter_46, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_46 + + # pd_op.reshape: (1x4x1x1xf32) <- (4xf32, 4xi64) + reshape_7 = paddle._C_ops.reshape(parameter_45, full_int_array_3) + del parameter_45 + + # pd_op.add: (8x4x19x19xf32) <- (8x4x19x19xf32, 1x4x1x1xf32) + add_2 = paddle._C_ops.add(conv2d_2, reshape_7) + + # pd_op.conv2d: (8x384x1x1xf32) <- (8x384x1x1xf32, 384x384x1x1xf32) + conv2d_3 = paddle._C_ops.conv2d( + pool2d_0, parameter_44, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_44 + + # pd_op.reshape: (1x384x1x1xf32) <- (384xf32, 4xi64) + reshape_8 = paddle._C_ops.reshape(parameter_43, full_int_array_3) + del parameter_43 + + # pd_op.add: (8x384x1x1xf32) <- (8x384x1x1xf32, 1x384x1x1xf32) + add_3 = paddle._C_ops.add(conv2d_3, reshape_8) + + # pd_op.sigmoid: (8x384x1x1xf32) <- (8x384x1x1xf32) + sigmoid_1 = paddle._C_ops.sigmoid(add_3) + del add_3 + + # pd_op.multiply: (8x384x19x19xf32) <- (8x384x19x19xf32, 8x384x1x1xf32) + multiply_1 = paddle._C_ops.multiply(data_0, sigmoid_1) + del data_0 + + # pd_op.conv2d: (8x384x19x19xf32) <- (8x384x19x19xf32, 384x384x1x1xf32) + conv2d_4 = paddle._C_ops.conv2d( + multiply_1, parameter_42, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_42 + + # pd_op.batch_norm_: (8x384x19x19xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (8x384x19x19xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__6, + batch_norm__7, + batch_norm__8, + batch_norm__9, + batch_norm__10, + batch_norm__11, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_4, + parameter_41, + parameter_40, + parameter_39, + parameter_38, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_38, parameter_39, parameter_40, parameter_41 + + # pd_op.swish: (8x384x19x19xf32) <- (8x384x19x19xf32) + swish_1 = paddle._C_ops.swish(batch_norm__6) + + # pd_op.conv2d: (8x68x19x19xf32) <- (8x384x19x19xf32, 68x384x3x3xf32) + conv2d_5 = paddle._C_ops.conv2d( + swish_1, parameter_37, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_37 + + # pd_op.reshape: (1x68x1x1xf32) <- (68xf32, 4xi64) + reshape_9 = paddle._C_ops.reshape(parameter_36, full_int_array_3) + del parameter_36 + + # pd_op.add: (8x68x19x19xf32) <- (8x68x19x19xf32, 1x68x1x1xf32) + add_4 = paddle._C_ops.add(conv2d_5, reshape_9) + + # pd_op.sigmoid: (8x4x19x19xf32) <- (8x4x19x19xf32) + sigmoid_2 = paddle._C_ops.sigmoid(add_2) + del add_2 + + # pd_op.flatten: (8x4x361xf32) <- (8x4x19x19xf32) + flatten_0 = paddle._C_ops.flatten(sigmoid_2, 2, 3) + + # pd_op.transpose: (8x361x4xf32) <- (8x4x361xf32) + transpose_0 = paddle._C_ops.transpose(flatten_0, [0, 2, 1]) + del flatten_0 + + # pd_op.flatten: (8x68x361xf32) <- (8x68x19x19xf32) + flatten_1 = paddle._C_ops.flatten(add_4, 2, 3) + + # pd_op.transpose: (8x361x68xf32) <- (8x68x361xf32) + transpose_1 = paddle._C_ops.transpose(flatten_1, [0, 2, 1]) + del flatten_1 + + # pd_op.pool2d: (8x192x1x1xf32) <- (8x192x38x38xf32, 2xi64) + pool2d_1 = paddle._C_ops.pool2d( + data_1, + full_int_array_2, + [1, 1], + [0, 0], + False, + True, + "NCHW", + "avg", + False, + True, + "EXPLICIT", + ) + + # pd_op.conv2d: (8x192x1x1xf32) <- (8x192x1x1xf32, 192x192x1x1xf32) + conv2d_6 = paddle._C_ops.conv2d( + pool2d_1, parameter_35, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_35 + + # pd_op.reshape: (1x192x1x1xf32) <- (192xf32, 4xi64) + reshape_10 = paddle._C_ops.reshape(parameter_34, full_int_array_3) + del parameter_34 + + # pd_op.add: (8x192x1x1xf32) <- (8x192x1x1xf32, 1x192x1x1xf32) + add_5 = paddle._C_ops.add(conv2d_6, reshape_10) + + # pd_op.sigmoid: (8x192x1x1xf32) <- (8x192x1x1xf32) + sigmoid_3 = paddle._C_ops.sigmoid(add_5) + del add_5 + + # pd_op.multiply: (8x192x38x38xf32) <- (8x192x38x38xf32, 8x192x1x1xf32) + multiply_2 = paddle._C_ops.multiply(data_1, sigmoid_3) + + # pd_op.conv2d: (8x192x38x38xf32) <- (8x192x38x38xf32, 192x192x1x1xf32) + conv2d_7 = paddle._C_ops.conv2d( + multiply_2, parameter_33, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_33 + + # pd_op.batch_norm_: (8x192x38x38xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (8x192x38x38xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__12, + batch_norm__13, + batch_norm__14, + batch_norm__15, + batch_norm__16, + batch_norm__17, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_7, + parameter_32, + parameter_31, + parameter_30, + parameter_29, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_29, parameter_30, parameter_31, parameter_32 + + # pd_op.swish: (8x192x38x38xf32) <- (8x192x38x38xf32) + swish_2 = paddle._C_ops.swish(batch_norm__12) + + # pd_op.add: (8x192x38x38xf32) <- (8x192x38x38xf32, 8x192x38x38xf32) + add_6 = paddle._C_ops.add(swish_2, data_1) + + # pd_op.conv2d: (8x4x38x38xf32) <- (8x192x38x38xf32, 4x192x3x3xf32) + conv2d_8 = paddle._C_ops.conv2d( + add_6, parameter_28, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_28 + + # pd_op.reshape: (1x4x1x1xf32) <- (4xf32, 4xi64) + reshape_11 = paddle._C_ops.reshape(parameter_27, full_int_array_3) + del parameter_27 + + # pd_op.add: (8x4x38x38xf32) <- (8x4x38x38xf32, 1x4x1x1xf32) + add_7 = paddle._C_ops.add(conv2d_8, reshape_11) + + # pd_op.conv2d: (8x192x1x1xf32) <- (8x192x1x1xf32, 192x192x1x1xf32) + conv2d_9 = paddle._C_ops.conv2d( + pool2d_1, parameter_26, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_26 + + # pd_op.reshape: (1x192x1x1xf32) <- (192xf32, 4xi64) + reshape_12 = paddle._C_ops.reshape(parameter_25, full_int_array_3) + del parameter_25 + + # pd_op.add: (8x192x1x1xf32) <- (8x192x1x1xf32, 1x192x1x1xf32) + add_8 = paddle._C_ops.add(conv2d_9, reshape_12) + + # pd_op.sigmoid: (8x192x1x1xf32) <- (8x192x1x1xf32) + sigmoid_4 = paddle._C_ops.sigmoid(add_8) + del add_8 + + # pd_op.multiply: (8x192x38x38xf32) <- (8x192x38x38xf32, 8x192x1x1xf32) + multiply_3 = paddle._C_ops.multiply(data_1, sigmoid_4) + del data_1 + + # pd_op.conv2d: (8x192x38x38xf32) <- (8x192x38x38xf32, 192x192x1x1xf32) + conv2d_10 = paddle._C_ops.conv2d( + multiply_3, parameter_24, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_24 + + # pd_op.batch_norm_: (8x192x38x38xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (8x192x38x38xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__18, + batch_norm__19, + batch_norm__20, + batch_norm__21, + batch_norm__22, + batch_norm__23, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_10, + parameter_23, + parameter_22, + parameter_21, + parameter_20, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_20, parameter_21, parameter_22, parameter_23 + + # pd_op.swish: (8x192x38x38xf32) <- (8x192x38x38xf32) + swish_3 = paddle._C_ops.swish(batch_norm__18) + + # pd_op.conv2d: (8x68x38x38xf32) <- (8x192x38x38xf32, 68x192x3x3xf32) + conv2d_11 = paddle._C_ops.conv2d( + swish_3, parameter_19, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_19 + + # pd_op.reshape: (1x68x1x1xf32) <- (68xf32, 4xi64) + reshape_13 = paddle._C_ops.reshape(parameter_18, full_int_array_3) + del parameter_18 + + # pd_op.add: (8x68x38x38xf32) <- (8x68x38x38xf32, 1x68x1x1xf32) + add_9 = paddle._C_ops.add(conv2d_11, reshape_13) + + # pd_op.sigmoid: (8x4x38x38xf32) <- (8x4x38x38xf32) + sigmoid_5 = paddle._C_ops.sigmoid(add_7) + del add_7 + + # pd_op.flatten: (8x4x1444xf32) <- (8x4x38x38xf32) + flatten_2 = paddle._C_ops.flatten(sigmoid_5, 2, 3) + + # pd_op.transpose: (8x1444x4xf32) <- (8x4x1444xf32) + transpose_2 = paddle._C_ops.transpose(flatten_2, [0, 2, 1]) + del flatten_2 + + # pd_op.flatten: (8x68x1444xf32) <- (8x68x38x38xf32) + flatten_3 = paddle._C_ops.flatten(add_9, 2, 3) + + # pd_op.transpose: (8x1444x68xf32) <- (8x68x1444xf32) + transpose_3 = paddle._C_ops.transpose(flatten_3, [0, 2, 1]) + del flatten_3 + + # pd_op.pool2d: (8x96x1x1xf32) <- (8x96x76x76xf32, 2xi64) + pool2d_2 = paddle._C_ops.pool2d( + data_2, + full_int_array_2, + [1, 1], + [0, 0], + False, + True, + "NCHW", + "avg", + False, + True, + "EXPLICIT", + ) + + # pd_op.conv2d: (8x96x1x1xf32) <- (8x96x1x1xf32, 96x96x1x1xf32) + conv2d_12 = paddle._C_ops.conv2d( + pool2d_2, parameter_17, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_17 + + # pd_op.reshape: (1x96x1x1xf32) <- (96xf32, 4xi64) + reshape_14 = paddle._C_ops.reshape(parameter_16, full_int_array_3) + del parameter_16 + + # pd_op.add: (8x96x1x1xf32) <- (8x96x1x1xf32, 1x96x1x1xf32) + add_10 = paddle._C_ops.add(conv2d_12, reshape_14) + + # pd_op.sigmoid: (8x96x1x1xf32) <- (8x96x1x1xf32) + sigmoid_6 = paddle._C_ops.sigmoid(add_10) + del add_10 + + # pd_op.multiply: (8x96x76x76xf32) <- (8x96x76x76xf32, 8x96x1x1xf32) + multiply_4 = paddle._C_ops.multiply(data_2, sigmoid_6) + + # pd_op.conv2d: (8x96x76x76xf32) <- (8x96x76x76xf32, 96x96x1x1xf32) + conv2d_13 = paddle._C_ops.conv2d( + multiply_4, parameter_15, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_15 + + # pd_op.batch_norm_: (8x96x76x76xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (8x96x76x76xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__24, + batch_norm__25, + batch_norm__26, + batch_norm__27, + batch_norm__28, + batch_norm__29, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_13, + parameter_14, + parameter_13, + parameter_12, + parameter_11, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_11, parameter_12, parameter_13, parameter_14 + + # pd_op.swish: (8x96x76x76xf32) <- (8x96x76x76xf32) + swish_4 = paddle._C_ops.swish(batch_norm__24) + + # pd_op.add: (8x96x76x76xf32) <- (8x96x76x76xf32, 8x96x76x76xf32) + add_11 = paddle._C_ops.add(swish_4, data_2) + + # pd_op.conv2d: (8x4x76x76xf32) <- (8x96x76x76xf32, 4x96x3x3xf32) + conv2d_14 = paddle._C_ops.conv2d( + add_11, parameter_10, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_10 + + # pd_op.reshape: (1x4x1x1xf32) <- (4xf32, 4xi64) + reshape_15 = paddle._C_ops.reshape(parameter_9, full_int_array_3) + del parameter_9 + + # pd_op.add: (8x4x76x76xf32) <- (8x4x76x76xf32, 1x4x1x1xf32) + add_12 = paddle._C_ops.add(conv2d_14, reshape_15) + + # pd_op.conv2d: (8x96x1x1xf32) <- (8x96x1x1xf32, 96x96x1x1xf32) + conv2d_15 = paddle._C_ops.conv2d( + pool2d_2, parameter_8, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_8 + + # pd_op.reshape: (1x96x1x1xf32) <- (96xf32, 4xi64) + reshape_16 = paddle._C_ops.reshape(parameter_7, full_int_array_3) + del parameter_7 + + # pd_op.add: (8x96x1x1xf32) <- (8x96x1x1xf32, 1x96x1x1xf32) + add_13 = paddle._C_ops.add(conv2d_15, reshape_16) + + # pd_op.sigmoid: (8x96x1x1xf32) <- (8x96x1x1xf32) + sigmoid_7 = paddle._C_ops.sigmoid(add_13) + del add_13 + + # pd_op.multiply: (8x96x76x76xf32) <- (8x96x76x76xf32, 8x96x1x1xf32) + multiply_5 = paddle._C_ops.multiply(data_2, sigmoid_7) + del data_2 + + # pd_op.conv2d: (8x96x76x76xf32) <- (8x96x76x76xf32, 96x96x1x1xf32) + conv2d_16 = paddle._C_ops.conv2d( + multiply_5, parameter_6, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_6 + + # pd_op.batch_norm_: (8x96x76x76xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (8x96x76x76xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__30, + batch_norm__31, + batch_norm__32, + batch_norm__33, + batch_norm__34, + batch_norm__35, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_16, + parameter_5, + parameter_4, + parameter_3, + parameter_2, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_2, parameter_3, parameter_4, parameter_5 + + # pd_op.swish: (8x96x76x76xf32) <- (8x96x76x76xf32) + swish_5 = paddle._C_ops.swish(batch_norm__30) + + # pd_op.conv2d: (8x68x76x76xf32) <- (8x96x76x76xf32, 68x96x3x3xf32) + conv2d_17 = paddle._C_ops.conv2d( + swish_5, parameter_1, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_1 + + # pd_op.reshape: (1x68x1x1xf32) <- (68xf32, 4xi64) + reshape_17 = paddle._C_ops.reshape(parameter_0, full_int_array_3) + del full_int_array_3, parameter_0 + + # pd_op.add: (8x68x76x76xf32) <- (8x68x76x76xf32, 1x68x1x1xf32) + add_14 = paddle._C_ops.add(conv2d_17, reshape_17) + + # pd_op.sigmoid: (8x4x76x76xf32) <- (8x4x76x76xf32) + sigmoid_8 = paddle._C_ops.sigmoid(add_12) + del add_12 + + # pd_op.flatten: (8x4x5776xf32) <- (8x4x76x76xf32) + flatten_4 = paddle._C_ops.flatten(sigmoid_8, 2, 3) + + # pd_op.transpose: (8x5776x4xf32) <- (8x4x5776xf32) + transpose_4 = paddle._C_ops.transpose(flatten_4, [0, 2, 1]) + del flatten_4 + + # pd_op.flatten: (8x68x5776xf32) <- (8x68x76x76xf32) + flatten_5 = paddle._C_ops.flatten(add_14, 2, 3) + + # pd_op.transpose: (8x5776x68xf32) <- (8x68x5776xf32) + transpose_5 = paddle._C_ops.transpose(flatten_5, [0, 2, 1]) + del flatten_5 + + # pd_op.full: (1xi32) <- () + full_13 = paddle._C_ops.full( + [1], float("1"), paddle.int32, paddle.core.CPUPlace() + ) + + # pd_op.assign: (1xi32) <- (1xi32) + assign_2 = full_13 + + # builtin.combine: ([8x361x4xf32, 8x1444x4xf32, 8x5776x4xf32]) <- (8x361x4xf32, 8x1444x4xf32, 8x5776x4xf32) + combine_12 = [transpose_0, transpose_2, transpose_4] + + # pd_op.concat: (8x7581x4xf32) <- ([8x361x4xf32, 8x1444x4xf32, 8x5776x4xf32], 1xi32) + concat_0 = paddle._C_ops.concat(combine_12, full_13) + del combine_12 + + # builtin.combine: ([8x361x68xf32, 8x1444x68xf32, 8x5776x68xf32]) <- (8x361x68xf32, 8x1444x68xf32, 8x5776x68xf32) + combine_13 = [transpose_1, transpose_3, transpose_5] + + # pd_op.concat: (8x7581x68xf32) <- ([8x361x68xf32, 8x1444x68xf32, 8x5776x68xf32], 1xi32) + concat_1 = paddle._C_ops.concat(combine_13, full_13) + del ( + add_1, + add_11, + add_14, + add_4, + add_6, + add_9, + assign_0, + assign_1, + assign_2, + batch_norm__0, + batch_norm__1, + batch_norm__10, + batch_norm__11, + batch_norm__12, + batch_norm__13, + batch_norm__14, + batch_norm__15, + batch_norm__16, + batch_norm__17, + batch_norm__18, + batch_norm__19, + batch_norm__2, + batch_norm__20, + batch_norm__21, + batch_norm__22, + batch_norm__23, + batch_norm__24, + batch_norm__25, + batch_norm__26, + batch_norm__27, + batch_norm__28, + batch_norm__29, + batch_norm__3, + batch_norm__30, + batch_norm__31, + batch_norm__32, + batch_norm__33, + batch_norm__34, + batch_norm__35, + batch_norm__4, + batch_norm__5, + batch_norm__6, + batch_norm__7, + batch_norm__8, + batch_norm__9, + combine_13, + conv2d_0, + conv2d_1, + conv2d_10, + conv2d_11, + conv2d_12, + conv2d_13, + conv2d_14, + conv2d_15, + conv2d_16, + conv2d_17, + conv2d_2, + conv2d_3, + conv2d_4, + conv2d_5, + conv2d_6, + conv2d_7, + conv2d_8, + conv2d_9, + full_13, + full_int_array_2, + multiply_0, + multiply_1, + multiply_2, + multiply_3, + multiply_4, + multiply_5, + pool2d_0, + pool2d_1, + pool2d_2, + reshape_0, + reshape_10, + reshape_11, + reshape_12, + reshape_13, + reshape_14, + reshape_15, + reshape_16, + reshape_17, + reshape_2, + reshape_4, + reshape_6, + reshape_7, + reshape_8, + reshape_9, + sigmoid_0, + sigmoid_1, + sigmoid_2, + sigmoid_3, + sigmoid_4, + sigmoid_5, + sigmoid_6, + sigmoid_7, + sigmoid_8, + swish_0, + swish_1, + swish_2, + swish_3, + swish_4, + swish_5, + transpose_0, + transpose_1, + transpose_2, + transpose_3, + transpose_4, + transpose_5, + ) + + return concat_0, concat_1, concat_2, concat_3, concat_4 diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_5/weight_meta.py b/paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_5/weight_meta.py new file mode 100644 index 000000000..1a00f0475 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_5/weight_meta.py @@ -0,0 +1,574 @@ +class Program_weight_tensor_parameter_0: + name = "parameter_0" + shape = [68] + dtype = "float32" + min_val = float("-0.0229941") + max_val = float("0.0263249") + mean = float("1.68962e-07") + std = float("0.00782107") + data = None + + +class Program_weight_tensor_parameter_1: + name = "parameter_1" + shape = [68, 96, 3, 3] + dtype = "float32" + min_val = float("-0.216665") + max_val = float("0.245336") + mean = float("6.82048e-08") + std = float("0.0110405") + data = None + + +class Program_weight_tensor_parameter_2: + name = "parameter_2" + shape = [96] + dtype = "float32" + min_val = float("-0.114231") + max_val = float("0.345241") + mean = float("0.110106") + std = float("0.109589") + data = None + + +class Program_weight_tensor_parameter_3: + name = "parameter_3" + shape = [96] + dtype = "float32" + min_val = float("0.951611") + max_val = float("2.25895") + mean = float("1.522") + std = float("0.267379") + data = None + + +class Program_weight_tensor_parameter_4: + name = "parameter_4" + shape = [96] + dtype = "float32" + min_val = float("0.000135322") + max_val = float("0.00213796") + mean = float("0.000625074") + std = float("0.000356282") + data = None + + +class Program_weight_tensor_parameter_5: + name = "parameter_5" + shape = [96] + dtype = "float32" + min_val = float("-0.0923399") + max_val = float("0.0580043") + mean = float("-0.00945176") + std = float("0.0295038") + data = None + + +class Program_weight_tensor_parameter_6: + name = "parameter_6" + shape = [96, 96, 1, 1] + dtype = "float32" + min_val = float("-0.0846481") + max_val = float("0.10483") + mean = float("-0.000528428") + std = float("0.0106624") + data = None + + +class Program_weight_tensor_parameter_7: + name = "parameter_7" + shape = [96] + dtype = "float32" + min_val = float("-0.0079894") + max_val = float("0.00812595") + mean = float("-0.000271483") + std = float("0.00379003") + data = None + + +class Program_weight_tensor_parameter_8: + name = "parameter_8" + shape = [96, 96, 1, 1] + dtype = "float32" + min_val = float("-0.0212609") + max_val = float("0.0287945") + mean = float("-0.000187913") + std = float("0.00352347") + data = None + + +class Program_weight_tensor_parameter_9: + name = "parameter_9" + shape = [4] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_10: + name = "parameter_10" + shape = [4, 96, 3, 3] + dtype = "float32" + data = None + + +class Program_weight_tensor_parameter_11: + name = "parameter_11" + shape = [96] + dtype = "float32" + min_val = float("-0.996572") + max_val = float("1.70332") + mean = float("0.559311") + std = float("0.524005") + data = None + + +class Program_weight_tensor_parameter_12: + name = "parameter_12" + shape = [96] + dtype = "float32" + min_val = float("0.792689") + max_val = float("2.07047") + mean = float("1.47132") + std = float("0.229403") + data = None + + +class Program_weight_tensor_parameter_13: + name = "parameter_13" + shape = [96] + dtype = "float32" + min_val = float("0.000222326") + max_val = float("0.00442872") + mean = float("0.000863741") + std = float("0.000635312") + data = None + + +class Program_weight_tensor_parameter_14: + name = "parameter_14" + shape = [96] + dtype = "float32" + min_val = float("-0.176674") + max_val = float("0.075408") + mean = float("-0.0197606") + std = float("0.0460865") + data = None + + +class Program_weight_tensor_parameter_15: + name = "parameter_15" + shape = [96, 96, 1, 1] + dtype = "float32" + min_val = float("-0.0743776") + max_val = float("0.0880323") + mean = float("-0.000570647") + std = float("0.012508") + data = None + + +class Program_weight_tensor_parameter_16: + name = "parameter_16" + shape = [96] + dtype = "float32" + min_val = float("-0.00676565") + max_val = float("0.00492153") + mean = float("-0.000612802") + std = float("0.00255407") + data = None + + +class Program_weight_tensor_parameter_17: + name = "parameter_17" + shape = [96, 96, 1, 1] + dtype = "float32" + min_val = float("-0.043525") + max_val = float("0.0582567") + mean = float("-0.00018279") + std = float("0.0042631") + data = None + + +class Program_weight_tensor_parameter_18: + name = "parameter_18" + shape = [68] + dtype = "float32" + min_val = float("-0.00984357") + max_val = float("0.0293888") + mean = float("1.52388e-07") + std = float("0.00653862") + data = None + + +class Program_weight_tensor_parameter_19: + name = "parameter_19" + shape = [68, 192, 3, 3] + dtype = "float32" + min_val = float("-0.137555") + max_val = float("0.18066") + mean = float("3.79951e-08") + std = float("0.00772962") + data = None + + +class Program_weight_tensor_parameter_20: + name = "parameter_20" + shape = [192] + dtype = "float32" + min_val = float("-0.0192267") + max_val = float("0.1653") + mean = float("0.0777397") + std = float("0.0384083") + data = None + + +class Program_weight_tensor_parameter_21: + name = "parameter_21" + shape = [192] + dtype = "float32" + min_val = float("1.08048") + max_val = float("1.51042") + mean = float("1.29801") + std = float("0.0860117") + data = None + + +class Program_weight_tensor_parameter_22: + name = "parameter_22" + shape = [192] + dtype = "float32" + min_val = float("0.000156138") + max_val = float("0.00377708") + mean = float("0.000654388") + std = float("0.000570834") + data = None + + +class Program_weight_tensor_parameter_23: + name = "parameter_23" + shape = [192] + dtype = "float32" + min_val = float("-0.0536431") + max_val = float("0.0209922") + mean = float("-0.00903018") + std = float("0.0148113") + data = None + + +class Program_weight_tensor_parameter_24: + name = "parameter_24" + shape = [192, 192, 1, 1] + dtype = "float32" + min_val = float("-0.0641417") + max_val = float("0.0914771") + mean = float("-0.000210773") + std = float("0.00541254") + data = None + + +class Program_weight_tensor_parameter_25: + name = "parameter_25" + shape = [192] + dtype = "float32" + min_val = float("-0.00648734") + max_val = float("0.00687982") + mean = float("-2.83236e-05") + std = float("0.00265378") + data = None + + +class Program_weight_tensor_parameter_26: + name = "parameter_26" + shape = [192, 192, 1, 1] + dtype = "float32" + min_val = float("-0.0070265") + max_val = float("0.0152681") + mean = float("-8.24342e-05") + std = float("0.00152885") + data = None + + +class Program_weight_tensor_parameter_27: + name = "parameter_27" + shape = [4] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_28: + name = "parameter_28" + shape = [4, 192, 3, 3] + dtype = "float32" + data = None + + +class Program_weight_tensor_parameter_29: + name = "parameter_29" + shape = [192] + dtype = "float32" + min_val = float("-0.288786") + max_val = float("1.00357") + mean = float("0.405895") + std = float("0.236302") + data = None + + +class Program_weight_tensor_parameter_30: + name = "parameter_30" + shape = [192] + dtype = "float32" + min_val = float("1.04173") + max_val = float("1.8382") + mean = float("1.34299") + std = float("0.125899") + data = None + + +class Program_weight_tensor_parameter_31: + name = "parameter_31" + shape = [192] + dtype = "float32" + min_val = float("0.000208768") + max_val = float("0.00327218") + mean = float("0.000875438") + std = float("0.000454644") + data = None + + +class Program_weight_tensor_parameter_32: + name = "parameter_32" + shape = [192] + dtype = "float32" + min_val = float("-0.208877") + max_val = float("0.0601016") + mean = float("-0.0220966") + std = float("0.0362007") + data = None + + +class Program_weight_tensor_parameter_33: + name = "parameter_33" + shape = [192, 192, 1, 1] + dtype = "float32" + min_val = float("-0.0526798") + max_val = float("0.0671481") + mean = float("-0.000453435") + std = float("0.00636127") + data = None + + +class Program_weight_tensor_parameter_34: + name = "parameter_34" + shape = [192] + dtype = "float32" + min_val = float("-0.00471173") + max_val = float("0.00393583") + mean = float("-0.00017237") + std = float("0.00138911") + data = None + + +class Program_weight_tensor_parameter_35: + name = "parameter_35" + shape = [192, 192, 1, 1] + dtype = "float32" + min_val = float("-0.0225373") + max_val = float("0.0350491") + mean = float("-8.71826e-05") + std = float("0.0017218") + data = None + + +class Program_weight_tensor_parameter_36: + name = "parameter_36" + shape = [68] + dtype = "float32" + min_val = float("-0.0072836") + max_val = float("0.0216282") + mean = float("1.38214e-07") + std = float("0.00555111") + data = None + + +class Program_weight_tensor_parameter_37: + name = "parameter_37" + shape = [68, 384, 3, 3] + dtype = "float32" + min_val = float("-0.0978405") + max_val = float("0.104037") + mean = float("2.30502e-08") + std = float("0.00527193") + data = None + + +class Program_weight_tensor_parameter_38: + name = "parameter_38" + shape = [384] + dtype = "float32" + min_val = float("-0.0244935") + max_val = float("0.149728") + mean = float("0.0398749") + std = float("0.0313707") + data = None + + +class Program_weight_tensor_parameter_39: + name = "parameter_39" + shape = [384] + dtype = "float32" + min_val = float("1.05469") + max_val = float("1.41205") + mean = float("1.21373") + std = float("0.0535024") + data = None + + +class Program_weight_tensor_parameter_40: + name = "parameter_40" + shape = [384] + dtype = "float32" + min_val = float("8.11952e-05") + max_val = float("0.00318438") + mean = float("0.000361911") + std = float("0.000299112") + data = None + + +class Program_weight_tensor_parameter_41: + name = "parameter_41" + shape = [384] + dtype = "float32" + min_val = float("-0.0315599") + max_val = float("0.0111448") + mean = float("-0.00616571") + std = float("0.00788742") + data = None + + +class Program_weight_tensor_parameter_42: + name = "parameter_42" + shape = [384, 384, 1, 1] + dtype = "float32" + min_val = float("-0.0484511") + max_val = float("0.0528094") + mean = float("-8.64854e-05") + std = float("0.00284584") + data = None + + +class Program_weight_tensor_parameter_43: + name = "parameter_43" + shape = [384] + dtype = "float32" + min_val = float("-0.00679313") + max_val = float("0.00399402") + mean = float("0.00010105") + std = float("0.00156319") + data = None + + +class Program_weight_tensor_parameter_44: + name = "parameter_44" + shape = [384, 384, 1, 1] + dtype = "float32" + min_val = float("-0.00722398") + max_val = float("0.0108513") + mean = float("2.60358e-05") + std = float("0.000708852") + data = None + + +class Program_weight_tensor_parameter_45: + name = "parameter_45" + shape = [4] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_46: + name = "parameter_46" + shape = [4, 384, 3, 3] + dtype = "float32" + data = None + + +class Program_weight_tensor_parameter_47: + name = "parameter_47" + shape = [384] + dtype = "float32" + min_val = float("-0.406643") + max_val = float("0.61006") + mean = float("0.213653") + std = float("0.10936") + data = None + + +class Program_weight_tensor_parameter_48: + name = "parameter_48" + shape = [384] + dtype = "float32" + min_val = float("1.062") + max_val = float("1.46115") + mean = float("1.20014") + std = float("0.0646501") + data = None + + +class Program_weight_tensor_parameter_49: + name = "parameter_49" + shape = [384] + dtype = "float32" + min_val = float("0.000174404") + max_val = float("0.00611086") + mean = float("0.000656368") + std = float("0.000443838") + data = None + + +class Program_weight_tensor_parameter_50: + name = "parameter_50" + shape = [384] + dtype = "float32" + min_val = float("-0.0944483") + max_val = float("0.0672218") + mean = float("-0.022189") + std = float("0.0201837") + data = None + + +class Program_weight_tensor_parameter_51: + name = "parameter_51" + shape = [384, 384, 1, 1] + dtype = "float32" + min_val = float("-0.0597933") + max_val = float("0.044957") + mean = float("-0.000332548") + std = float("0.00321912") + data = None + + +class Program_weight_tensor_parameter_52: + name = "parameter_52" + shape = [384] + dtype = "float32" + min_val = float("-0.0102817") + max_val = float("0.00718453") + mean = float("-8.70751e-05") + std = float("0.00107382") + data = None + + +class Program_weight_tensor_parameter_53: + name = "parameter_53" + shape = [384, 384, 1, 1] + dtype = "float32" + min_val = float("-0.0279581") + max_val = float("0.0411796") + mean = float("-2.84154e-05") + std = float("0.000970307") + data = None diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_6/graph_hash.txt b/paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_6/graph_hash.txt new file mode 100644 index 000000000..2f9daab91 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_6/graph_hash.txt @@ -0,0 +1 @@ +237f1666acd796c265f31ddd9bc78ab95e4da70095a07017ec39287d25ff30bd \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_6/graph_net.json b/paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_6/graph_net.json new file mode 100644 index 000000000..469954d4b --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_6/graph_net.json @@ -0,0 +1,6 @@ +{ + "framework": "paddle", + "model_name": "PP-YOLOE_plus-S", + "num_devices_required": 1, + "num_nodes_required": 1 +} \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_6/input_meta.py b/paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_6/input_meta.py new file mode 100644 index 000000000..c1011e115 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_6/input_meta.py @@ -0,0 +1,68 @@ +class Program_weight_tensor_data_0: + name = "data_0" + shape = [8, 6069] + dtype = "bool" + min_val = 0 + max_val = 2 + data = None + + +class Program_weight_tensor_data_1: + name = "data_1" + shape = [8, 6069, 4] + dtype = "float32" + min_val = float("-8.529") + max_val = float("76.0107") + mean = float("29.6152") + std = float("20.3617") + data = None + + +class Program_weight_tensor_data_2: + name = "data_2" + shape = [8, 6069, 4] + dtype = "float32" + min_val = float("1.81333") + max_val = float("63.6361") + mean = float("32.5413") + std = float("15.5905") + data = None + + +class Program_weight_tensor_data_3: + name = "data_3" + shape = [8, 6069, 4] + dtype = "float32" + max_val = float("0.947428") + mean = float("0.000306383") + std = float("0.0159417") + data = None + + +class Program_weight_tensor_data_4: + name = "data_4" + shape = [] + dtype = "float32" + data = [59.5021] + + +class Program_weight_tensor_data_5: + name = "data_5" + shape = [8, 6069, 68] + dtype = "float32" + min_val = float("-6.64472") + max_val = float("17.9933") + mean = float("2.30072e-05") + std = float("1.72456") + data = None + + +class Program_weight_tensor_data_6: + name = "data_6" + shape = [6069, 2] + dtype = "float32" + min_val = float("0.5") + max_val = float("67.5") + mean = float("29.5476") + std = float("19.4728") + data = None diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_6/model.py b/paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_6/model.py new file mode 100644 index 000000000..f0e2587e3 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_6/model.py @@ -0,0 +1,509 @@ +import paddle + + +class GraphModule(paddle.nn.Layer): + def __init__(self): + super().__init__() + + def forward(self, data_0, data_1, data_2, data_3, data_4, data_5, data_6): + # pd_op.cast: (8x-1xi32) <- (8x-1xb) + cast_0 = paddle._C_ops.cast(data_0, paddle.int32) + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_0 = [-1] + + # pd_op.assign: (1xi64) <- (1xi64) + assign_0 = full_int_array_0 + + # pd_op.assign: (1xi64) <- (1xi64) + assign_1 = full_int_array_0 + + # pd_op.assign: (1xi64) <- (1xi64) + assign_2 = full_int_array_0 + + # pd_op.unsqueeze: (8x-1x1xi32) <- (8x-1xi32, 1xi64) + unsqueeze_0 = paddle._C_ops.unsqueeze(cast_0, full_int_array_0) + del cast_0 + + # pd_op.full_int_array: (3xi64) <- () + full_int_array_1 = [1, 1, 4] + + # pd_op.tile: (8x-1x4xi32) <- (8x-1x1xi32, 3xi64) + tile_0 = paddle._C_ops.tile(unsqueeze_0, full_int_array_1) + del full_int_array_1, unsqueeze_0 + + # pd_op.cast: (8x-1x4xb) <- (8x-1x4xi32) + cast_1 = paddle._C_ops.cast(tile_0, paddle.bool) + del tile_0 + + # pd_op.masked_select: (-1xf32) <- (8x-1x4xf32, 8x-1x4xb) + masked_select_0 = paddle._C_ops.masked_select(data_1, cast_1) + del data_1 + + # pd_op.full_int_array: (2xi64) <- () + full_int_array_2 = [-1, 4] + + # pd_op.reshape: (-1x4xf32) <- (-1xf32, 2xi64) + reshape_0 = paddle._C_ops.reshape(masked_select_0, full_int_array_2) + + # pd_op.masked_select: (-1xf32) <- (8x-1x4xf32, 8x-1x4xb) + masked_select_1 = paddle._C_ops.masked_select(data_2, cast_1) + + # pd_op.reshape: (-1x4xf32) <- (-1xf32, 2xi64) + reshape_1 = paddle._C_ops.reshape(masked_select_1, full_int_array_2) + del masked_select_1 + + # pd_op.sum: (8x-1xf32) <- (8x-1x4xf32, 1xi64) + sum_0 = paddle._C_ops.sum(data_3, full_int_array_0, None, False) + del data_3 + + # pd_op.masked_select: (-1xf32) <- (8x-1xf32, 8x-1xb) + masked_select_2 = paddle._C_ops.masked_select(sum_0, data_0) + del sum_0 + + # pd_op.unsqueeze: (-1x1xf32) <- (-1xf32, 1xi64) + unsqueeze_1 = paddle._C_ops.unsqueeze(masked_select_2, full_int_array_0) + del masked_select_2 + + # pd_op.subtract: (-1x4xf32) <- (-1x4xf32, -1x4xf32) + subtract_0 = paddle._C_ops.subtract(reshape_0, reshape_1) + + # pd_op.abs: (-1x4xf32) <- (-1x4xf32) + abs_0 = paddle._C_ops.abs(subtract_0) + + # pd_op.mean_all: (xf32) <- (-1x4xf32) + mean_all_0 = paddle._C_ops.mean_all(abs_0) + + # pd_op.full: (1xi32) <- () + full_0 = paddle._C_ops.full( + [1], float("1"), paddle.int32, paddle.core.CPUPlace() + ) + + # pd_op.split_with_num: ([-1x1xf32, -1x1xf32, -1x1xf32, -1x1xf32]) <- (-1x4xf32, 1xi32) + split_with_num_0 = paddle._C_ops.split_with_num(reshape_0, 4, full_0) + + # builtin.split: (-1x1xf32, -1x1xf32, -1x1xf32, -1x1xf32) <- ([-1x1xf32, -1x1xf32, -1x1xf32, -1x1xf32]) + ( + split_0, + split_1, + split_2, + split_3, + ) = split_with_num_0 + del split_with_num_0 + + # pd_op.split_with_num: ([-1x1xf32, -1x1xf32, -1x1xf32, -1x1xf32]) <- (-1x4xf32, 1xi32) + split_with_num_1 = paddle._C_ops.split_with_num(reshape_1, 4, full_0) + + # builtin.split: (-1x1xf32, -1x1xf32, -1x1xf32, -1x1xf32) <- ([-1x1xf32, -1x1xf32, -1x1xf32, -1x1xf32]) + ( + split_4, + split_5, + split_6, + split_7, + ) = split_with_num_1 + del split_with_num_1 + + # pd_op.maximum: (-1x1xf32) <- (-1x1xf32, -1x1xf32) + maximum_0 = paddle._C_ops.maximum(split_0, split_4) + + # pd_op.maximum: (-1x1xf32) <- (-1x1xf32, -1x1xf32) + maximum_1 = paddle._C_ops.maximum(split_1, split_5) + + # pd_op.minimum: (-1x1xf32) <- (-1x1xf32, -1x1xf32) + minimum_0 = paddle._C_ops.minimum(split_2, split_6) + + # pd_op.minimum: (-1x1xf32) <- (-1x1xf32, -1x1xf32) + minimum_1 = paddle._C_ops.minimum(split_3, split_7) + + # pd_op.subtract: (-1x1xf32) <- (-1x1xf32, -1x1xf32) + subtract_1 = paddle._C_ops.subtract(minimum_0, maximum_0) + + # pd_op.full: (1xf32) <- () + full_1 = paddle._C_ops.full( + [1], float("0"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.assign: (1xf32) <- (1xf32) + assign_3 = full_1 + + # pd_op.full: (1xf32) <- () + full_2 = paddle._C_ops.full( + [1], float("3.40282e+38"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.assign: (1xf32) <- (1xf32) + assign_4 = full_2 + + # pd_op.clip: (-1x1xf32) <- (-1x1xf32, 1xf32, 1xf32) + clip_0 = paddle._C_ops.clip(subtract_1, full_1, full_2) + + # pd_op.subtract: (-1x1xf32) <- (-1x1xf32, -1x1xf32) + subtract_2 = paddle._C_ops.subtract(minimum_1, maximum_1) + + # pd_op.clip: (-1x1xf32) <- (-1x1xf32, 1xf32, 1xf32) + clip_1 = paddle._C_ops.clip(subtract_2, full_1, full_2) + + # pd_op.multiply: (-1x1xf32) <- (-1x1xf32, -1x1xf32) + multiply_0 = paddle._C_ops.multiply(clip_0, clip_1) + + # pd_op.subtract: (-1x1xf32) <- (-1x1xf32, -1x1xf32) + subtract_3 = paddle._C_ops.subtract(split_2, split_0) + + # pd_op.subtract: (-1x1xf32) <- (-1x1xf32, -1x1xf32) + subtract_4 = paddle._C_ops.subtract(split_3, split_1) + + # pd_op.multiply: (-1x1xf32) <- (-1x1xf32, -1x1xf32) + multiply_1 = paddle._C_ops.multiply(subtract_3, subtract_4) + + # pd_op.subtract: (-1x1xf32) <- (-1x1xf32, -1x1xf32) + subtract_5 = paddle._C_ops.subtract(split_6, split_4) + + # pd_op.subtract: (-1x1xf32) <- (-1x1xf32, -1x1xf32) + subtract_6 = paddle._C_ops.subtract(split_7, split_5) + + # pd_op.multiply: (-1x1xf32) <- (-1x1xf32, -1x1xf32) + multiply_2 = paddle._C_ops.multiply(subtract_5, subtract_6) + del subtract_5, subtract_6 + + # pd_op.add: (-1x1xf32) <- (-1x1xf32, -1x1xf32) + add_0 = paddle._C_ops.add(multiply_1, multiply_2) + + # pd_op.subtract: (-1x1xf32) <- (-1x1xf32, -1x1xf32) + subtract_7 = paddle._C_ops.subtract(add_0, multiply_0) + + # pd_op.full: (1xf32) <- () + full_3 = paddle._C_ops.full( + [1], float("1"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.assign: (1xf32) <- (1xf32) + assign_5 = full_3 + + # pd_op.assign: (1xf32) <- (1xf32) + assign_6 = full_3 + + # pd_op.scale: (-1x1xf32) <- (-1x1xf32, 1xf32) + scale_0 = paddle._C_ops.scale(subtract_7, full_3, float("1e-10"), True) + del subtract_7 + + # pd_op.divide: (-1x1xf32) <- (-1x1xf32, -1x1xf32) + divide_2 = paddle._C_ops.divide(multiply_0, scale_0) + + # pd_op.minimum: (-1x1xf32) <- (-1x1xf32, -1x1xf32) + minimum_2 = paddle._C_ops.minimum(split_0, split_4) + + # pd_op.minimum: (-1x1xf32) <- (-1x1xf32, -1x1xf32) + minimum_3 = paddle._C_ops.minimum(split_1, split_5) + + # pd_op.maximum: (-1x1xf32) <- (-1x1xf32, -1x1xf32) + maximum_2 = paddle._C_ops.maximum(split_2, split_6) + + # pd_op.maximum: (-1x1xf32) <- (-1x1xf32, -1x1xf32) + maximum_3 = paddle._C_ops.maximum(split_3, split_7) + + # pd_op.subtract: (-1x1xf32) <- (-1x1xf32, -1x1xf32) + subtract_8 = paddle._C_ops.subtract(maximum_2, minimum_2) + + # pd_op.subtract: (-1x1xf32) <- (-1x1xf32, -1x1xf32) + subtract_9 = paddle._C_ops.subtract(maximum_3, minimum_3) + + # pd_op.multiply: (-1x1xf32) <- (-1x1xf32, -1x1xf32) + multiply_3 = paddle._C_ops.multiply(subtract_8, subtract_9) + + # pd_op.scale: (-1x1xf32) <- (-1x1xf32, 1xf32) + scale_1 = paddle._C_ops.scale(multiply_3, full_3, float("1e-10"), True) + del multiply_3 + + # pd_op.subtract: (-1x1xf32) <- (-1x1xf32, -1x1xf32) + subtract_10 = paddle._C_ops.subtract(scale_1, scale_0) + + # pd_op.divide: (-1x1xf32) <- (-1x1xf32, -1x1xf32) + divide_3 = paddle._C_ops.divide(subtract_10, scale_1) + + # pd_op.subtract: (-1x1xf32) <- (-1x1xf32, -1x1xf32) + subtract_11 = paddle._C_ops.subtract(divide_2, divide_3) + + # pd_op.full: (1xf32) <- () + full_4 = paddle._C_ops.full( + [1], float("-1"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.scale: (-1x1xf32) <- (-1x1xf32, 1xf32) + scale_2 = paddle._C_ops.scale(subtract_11, full_4, float("1"), True) + del subtract_11 + + # pd_op.scale: (-1x1xf32) <- (-1x1xf32, 1xf32) + scale_3 = paddle._C_ops.scale(scale_2, full_3, float("0"), True) + del scale_2 + + # pd_op.multiply: (-1x1xf32) <- (-1x1xf32, -1x1xf32) + multiply_4 = paddle._C_ops.multiply(scale_3, unsqueeze_1) + + # pd_op.full_int_array: (0xi64) <- () + full_int_array_3 = [] + + # pd_op.assign: (0xi64) <- (0xi64) + assign_7 = full_int_array_3 + + # pd_op.sum: (xf32) <- (-1x1xf32, 0xi64) + sum_1 = paddle._C_ops.sum(multiply_4, full_int_array_3, None, False) + + # pd_op.divide: (xf32) <- (xf32, xf32) + divide_0 = paddle._C_ops.divide(sum_1, data_4) + + # pd_op.unsqueeze: (8x-1x1xb) <- (8x-1xb, 1xi64) + unsqueeze_2 = paddle._C_ops.unsqueeze(data_0, full_int_array_0) + del data_0 + + # pd_op.cast: (8x-1x1xi32) <- (8x-1x1xb) + cast_2 = paddle._C_ops.cast(unsqueeze_2, paddle.int32) + del unsqueeze_2 + + # pd_op.full_int_array: (3xi64) <- () + full_int_array_4 = [1, 1, 68] + + # pd_op.tile: (8x-1x68xi32) <- (8x-1x1xi32, 3xi64) + tile_1 = paddle._C_ops.tile(cast_2, full_int_array_4) + del cast_2, full_int_array_4 + + # pd_op.cast: (8x-1x68xb) <- (8x-1x68xi32) + cast_3 = paddle._C_ops.cast(tile_1, paddle.bool) + del tile_1 + + # pd_op.masked_select: (-1xf32) <- (8x-1x68xf32, 8x-1x68xb) + masked_select_3 = paddle._C_ops.masked_select(data_5, cast_3) + del data_5 + + # pd_op.full_int_array: (3xi64) <- () + full_int_array_5 = [-1, 4, 17] + + # pd_op.reshape: (-1x4x17xf32) <- (-1xf32, 3xi64) + reshape_2 = paddle._C_ops.reshape(masked_select_3, full_int_array_5) + del full_int_array_5 + + # pd_op.full: (1xi32) <- () + full_5 = paddle._C_ops.full( + [1], float("2"), paddle.int32, paddle.core.CPUPlace() + ) + + # pd_op.split_with_num: ([8x-1x2xf32, 8x-1x2xf32]) <- (8x-1x4xf32, 1xi32) + split_with_num_2 = paddle._C_ops.split_with_num(data_2, 2, full_5) + del data_2, full_5 + + # builtin.split: (8x-1x2xf32, 8x-1x2xf32) <- ([8x-1x2xf32, 8x-1x2xf32]) + ( + split_8, + split_9, + ) = split_with_num_2 + del split_with_num_2 + + # pd_op.subtract: (8x-1x2xf32) <- (-1x2xf32, 8x-1x2xf32) + subtract_12 = paddle._C_ops.subtract(data_6, split_8) + del split_8 + + # pd_op.subtract: (8x-1x2xf32) <- (8x-1x2xf32, -1x2xf32) + subtract_13 = paddle._C_ops.subtract(split_9, data_6) + del data_6, split_9 + + # pd_op.full: (1xi32) <- () + full_6 = paddle._C_ops.full( + [1], float("-1"), paddle.int32, paddle.core.CPUPlace() + ) + + # builtin.combine: ([8x-1x2xf32, 8x-1x2xf32]) <- (8x-1x2xf32, 8x-1x2xf32) + combine_0 = [subtract_12, subtract_13] + del subtract_12, subtract_13 + + # pd_op.concat: (8x-1x4xf32) <- ([8x-1x2xf32, 8x-1x2xf32], 1xi32) + concat_0 = paddle._C_ops.concat(combine_0, full_6) + del combine_0, full_6 + + # pd_op.full: (1xf32) <- () + full_7 = paddle._C_ops.full( + [1], float("15.99"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.clip: (8x-1x4xf32) <- (8x-1x4xf32, 1xf32, 1xf32) + clip_2 = paddle._C_ops.clip(concat_0, full_1, full_7) + del concat_0, full_7 + + # pd_op.masked_select: (-1xf32) <- (8x-1x4xf32, 8x-1x4xb) + masked_select_4 = paddle._C_ops.masked_select(clip_2, cast_1) + del clip_2 + + # pd_op.reshape: (-1x4xf32) <- (-1xf32, 2xi64) + reshape_3 = paddle._C_ops.reshape(masked_select_4, full_int_array_2) + del full_int_array_2, masked_select_4 + + # pd_op.floor: (-1x4xf32) <- (-1x4xf32) + floor_0 = paddle._C_ops.floor(reshape_3) + + # pd_op.cast: (-1x4xi64) <- (-1x4xf32) + cast_4 = paddle._C_ops.cast(floor_0, paddle.int64) + del floor_0 + + # pd_op.scale: (-1x4xi64) <- (-1x4xi64, 1xf32) + scale_4 = paddle._C_ops.scale(cast_4, full_3, float("1"), True) + + # pd_op.cast: (-1x4xf32) <- (-1x4xi64) + cast_5 = paddle._C_ops.cast(scale_4, paddle.float32) + + # pd_op.subtract: (-1x4xf32) <- (-1x4xf32, -1x4xf32) + subtract_14 = paddle._C_ops.subtract(cast_5, reshape_3) + del cast_5, reshape_3 + + # pd_op.scale: (-1x4xf32) <- (-1x4xf32, 1xf32) + scale_5 = paddle._C_ops.scale(subtract_14, full_4, float("1"), True) + + # pd_op.scale: (-1x4xi64) <- (-1x4xi64, 1xf32) + scale_6 = paddle._C_ops.scale(cast_4, full_3, float("0"), True) + del cast_4 + + # pd_op.unsqueeze: (-1x4x1xi64) <- (-1x4xi64, 1xi64) + unsqueeze_3 = paddle._C_ops.unsqueeze(scale_6, full_int_array_0) + del scale_6 + + # pd_op.cross_entropy_with_softmax: (-1x4x17xf32, -1x4x1xf32) <- (-1x4x17xf32, -1x4x1xi64) + cross_entropy_with_softmax_0, cross_entropy_with_softmax_2 = ( + lambda x, f: f(x) + )( + paddle._C_ops.cross_entropy_with_softmax( + reshape_2, unsqueeze_3, False, True, True, -100, -1 + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None), + ) + + # pd_op.squeeze: (-1x4xf32) <- (-1x4x1xf32, 1xi64) + squeeze_0 = paddle._C_ops.squeeze( + cross_entropy_with_softmax_2, full_int_array_0 + ) + + # pd_op.multiply: (-1x4xf32) <- (-1x4xf32, -1x4xf32) + multiply_5 = paddle._C_ops.multiply(squeeze_0, subtract_14) + + # pd_op.scale: (-1x4xi64) <- (-1x4xi64, 1xf32) + scale_7 = paddle._C_ops.scale(scale_4, full_3, float("0"), True) + del scale_4 + + # pd_op.unsqueeze: (-1x4x1xi64) <- (-1x4xi64, 1xi64) + unsqueeze_4 = paddle._C_ops.unsqueeze(scale_7, full_int_array_0) + del scale_7 + + # pd_op.cross_entropy_with_softmax: (-1x4x17xf32, -1x4x1xf32) <- (-1x4x17xf32, -1x4x1xi64) + cross_entropy_with_softmax_1, cross_entropy_with_softmax_3 = ( + lambda x, f: f(x) + )( + paddle._C_ops.cross_entropy_with_softmax( + reshape_2, unsqueeze_4, False, True, True, -100, -1 + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None), + ) + del reshape_2 + + # pd_op.squeeze: (-1x4xf32) <- (-1x4x1xf32, 1xi64) + squeeze_1 = paddle._C_ops.squeeze( + cross_entropy_with_softmax_3, full_int_array_0 + ) + + # pd_op.multiply: (-1x4xf32) <- (-1x4xf32, -1x4xf32) + multiply_6 = paddle._C_ops.multiply(squeeze_1, scale_5) + + # pd_op.add: (-1x4xf32) <- (-1x4xf32, -1x4xf32) + add_1 = paddle._C_ops.add(multiply_5, multiply_6) + + # pd_op.mean: (-1x1xf32) <- (-1x4xf32, 1xi64) + mean_0 = paddle._C_ops.mean(add_1, full_int_array_0, True) + del full_int_array_0 + + # pd_op.multiply: (-1x1xf32) <- (-1x1xf32, -1x1xf32) + multiply_7 = paddle._C_ops.multiply(mean_0, unsqueeze_1) + + # pd_op.sum: (xf32) <- (-1x1xf32, 0xi64) + sum_2 = paddle._C_ops.sum(multiply_7, full_int_array_3, None, False) + + # pd_op.divide: (xf32) <- (xf32, xf32) + divide_1 = paddle._C_ops.divide(sum_2, data_4) + del ( + abs_0, + add_0, + add_1, + assign_0, + assign_1, + assign_2, + assign_3, + assign_4, + assign_5, + assign_6, + assign_7, + cast_1, + cast_3, + clip_0, + clip_1, + cross_entropy_with_softmax_2, + cross_entropy_with_softmax_3, + data_4, + divide_2, + divide_3, + full_0, + full_1, + full_2, + full_3, + full_4, + full_int_array_3, + masked_select_0, + masked_select_3, + maximum_0, + maximum_1, + maximum_2, + maximum_3, + mean_0, + minimum_0, + minimum_1, + minimum_2, + minimum_3, + multiply_0, + multiply_1, + multiply_2, + multiply_4, + multiply_5, + multiply_6, + multiply_7, + reshape_0, + reshape_1, + scale_0, + scale_1, + scale_3, + scale_5, + split_0, + split_1, + split_2, + split_3, + split_4, + split_5, + split_6, + split_7, + squeeze_0, + squeeze_1, + subtract_0, + subtract_1, + subtract_10, + subtract_14, + subtract_2, + subtract_3, + subtract_4, + subtract_8, + subtract_9, + sum_1, + sum_2, + unsqueeze_1, + unsqueeze_3, + unsqueeze_4, + ) + + return ( + cross_entropy_with_softmax_0, + cross_entropy_with_softmax_1, + mean_all_0, + divide_0, + divide_1, + ) diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_6/weight_meta.py b/paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_6/weight_meta.py new file mode 100644 index 000000000..8b1378917 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_6/weight_meta.py @@ -0,0 +1 @@ + diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_7/graph_hash.txt b/paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_7/graph_hash.txt new file mode 100644 index 000000000..1f67a84ff --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_7/graph_hash.txt @@ -0,0 +1 @@ +a7e683199f714648457015168b4be16a53e004c283c3d7ca5ebc5418fe27ea75 \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_7/graph_net.json b/paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_7/graph_net.json new file mode 100644 index 000000000..469954d4b --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_7/graph_net.json @@ -0,0 +1,6 @@ +{ + "framework": "paddle", + "model_name": "PP-YOLOE_plus-S", + "num_devices_required": 1, + "num_nodes_required": 1 +} \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_7/input_meta.py b/paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_7/input_meta.py new file mode 100644 index 000000000..6a7998d8b --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_7/input_meta.py @@ -0,0 +1,50 @@ +class Program_weight_tensor_data_0: + name = "data_0" + shape = [1] + dtype = "float32" + data = [1.00236] + + +class Program_weight_tensor_data_1: + name = "data_1" + shape = [1] + dtype = "float32" + data = [1.00237] + + +class Program_weight_tensor_data_2: + name = "data_2" + shape = [1] + dtype = "float32" + data = [1.00238] + + +class Program_weight_tensor_data_3: + name = "data_3" + shape = [1] + dtype = "float32" + data = [1.00237] + + +class Program_weight_tensor_data_4: + name = "data_4" + shape = [1] + dtype = "float32" + data = [1.00237] + + +class Program_weight_tensor_data_5: + name = "data_5" + shape = [1] + dtype = "float32" + data = [1.00237] + + +class Program_weight_tensor_data_6: + name = "data_6" + shape = [2, 3, 640, 640] + dtype = "float32" + max_val = float("1.0") + mean = float("0.471598") + std = float("0.270715") + data = None diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_7/model.py b/paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_7/model.py new file mode 100644 index 000000000..fb40c60b8 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_7/model.py @@ -0,0 +1,4333 @@ +import paddle + + +class GraphModule(paddle.nn.Layer): + def __init__(self): + super().__init__() + + def forward( + self, + parameter_0, + parameter_1, + parameter_2, + parameter_3, + parameter_4, + parameter_5, + parameter_6, + parameter_7, + parameter_8, + parameter_9, + parameter_10, + parameter_11, + parameter_12, + parameter_13, + parameter_14, + parameter_15, + parameter_16, + parameter_17, + parameter_18, + parameter_19, + parameter_20, + parameter_21, + parameter_22, + parameter_23, + parameter_24, + parameter_25, + parameter_26, + parameter_27, + parameter_28, + parameter_29, + parameter_30, + parameter_31, + parameter_32, + parameter_33, + parameter_34, + parameter_35, + parameter_36, + parameter_37, + parameter_38, + parameter_39, + parameter_40, + parameter_41, + parameter_42, + parameter_43, + parameter_44, + parameter_45, + parameter_46, + parameter_47, + parameter_48, + parameter_49, + parameter_50, + parameter_51, + parameter_52, + parameter_53, + parameter_54, + parameter_55, + parameter_56, + parameter_57, + parameter_58, + parameter_59, + parameter_60, + parameter_61, + parameter_62, + parameter_63, + parameter_64, + parameter_65, + parameter_66, + parameter_67, + parameter_68, + parameter_69, + parameter_70, + parameter_71, + parameter_72, + parameter_73, + parameter_74, + parameter_75, + parameter_76, + parameter_77, + parameter_78, + parameter_79, + parameter_80, + parameter_81, + parameter_82, + parameter_83, + parameter_84, + parameter_85, + parameter_86, + parameter_87, + parameter_88, + parameter_89, + parameter_90, + parameter_91, + parameter_92, + parameter_93, + parameter_94, + parameter_95, + parameter_96, + parameter_97, + parameter_98, + parameter_99, + parameter_100, + parameter_101, + parameter_102, + parameter_103, + parameter_104, + parameter_105, + parameter_106, + parameter_107, + parameter_108, + parameter_109, + parameter_110, + parameter_111, + parameter_112, + parameter_113, + parameter_114, + parameter_115, + parameter_116, + parameter_117, + parameter_118, + parameter_119, + parameter_120, + parameter_121, + parameter_122, + parameter_123, + parameter_124, + parameter_125, + parameter_126, + parameter_127, + parameter_128, + parameter_129, + parameter_130, + parameter_131, + parameter_132, + parameter_133, + parameter_134, + parameter_135, + parameter_136, + parameter_137, + parameter_138, + parameter_139, + parameter_140, + parameter_141, + parameter_142, + parameter_143, + parameter_144, + parameter_145, + parameter_146, + parameter_147, + parameter_148, + parameter_149, + parameter_150, + parameter_151, + parameter_152, + parameter_153, + parameter_154, + parameter_155, + parameter_156, + parameter_157, + parameter_158, + parameter_159, + parameter_160, + parameter_161, + parameter_162, + parameter_163, + parameter_164, + parameter_165, + parameter_166, + parameter_167, + parameter_168, + parameter_169, + parameter_170, + parameter_171, + parameter_172, + parameter_173, + parameter_174, + parameter_175, + parameter_176, + parameter_177, + parameter_178, + parameter_179, + parameter_180, + parameter_181, + parameter_182, + parameter_183, + parameter_184, + parameter_185, + parameter_186, + parameter_187, + parameter_188, + parameter_189, + parameter_190, + parameter_191, + parameter_192, + parameter_193, + parameter_194, + parameter_195, + parameter_196, + parameter_197, + parameter_198, + parameter_199, + parameter_200, + parameter_201, + parameter_202, + parameter_203, + parameter_204, + parameter_205, + parameter_206, + parameter_207, + parameter_208, + parameter_209, + parameter_210, + parameter_211, + parameter_212, + parameter_213, + parameter_214, + parameter_215, + parameter_216, + parameter_217, + parameter_218, + parameter_219, + parameter_220, + parameter_221, + parameter_222, + parameter_223, + parameter_224, + parameter_225, + parameter_226, + parameter_227, + parameter_228, + parameter_229, + parameter_230, + parameter_231, + parameter_232, + parameter_233, + parameter_234, + parameter_235, + parameter_236, + parameter_237, + parameter_238, + parameter_239, + parameter_240, + parameter_241, + parameter_242, + parameter_243, + parameter_244, + parameter_245, + parameter_246, + parameter_247, + parameter_248, + parameter_249, + parameter_250, + parameter_251, + parameter_252, + parameter_253, + parameter_254, + parameter_255, + parameter_256, + parameter_257, + parameter_258, + parameter_259, + parameter_260, + parameter_261, + parameter_262, + parameter_263, + parameter_264, + parameter_265, + parameter_266, + parameter_267, + parameter_268, + parameter_269, + parameter_270, + parameter_271, + parameter_272, + parameter_273, + parameter_274, + parameter_275, + parameter_276, + parameter_277, + parameter_278, + parameter_279, + parameter_280, + parameter_281, + parameter_282, + parameter_283, + parameter_284, + parameter_285, + parameter_286, + parameter_287, + parameter_288, + parameter_289, + parameter_290, + parameter_291, + parameter_292, + parameter_293, + parameter_294, + parameter_295, + parameter_296, + parameter_297, + parameter_298, + parameter_299, + parameter_300, + parameter_301, + parameter_302, + parameter_303, + parameter_304, + parameter_305, + parameter_306, + parameter_307, + parameter_308, + parameter_309, + parameter_310, + parameter_311, + parameter_312, + parameter_313, + parameter_314, + parameter_315, + parameter_316, + parameter_317, + parameter_318, + parameter_319, + parameter_320, + parameter_321, + parameter_322, + parameter_323, + parameter_324, + parameter_325, + parameter_326, + parameter_327, + parameter_328, + parameter_329, + parameter_330, + parameter_331, + parameter_332, + parameter_333, + parameter_334, + parameter_335, + parameter_336, + parameter_337, + parameter_338, + parameter_339, + parameter_340, + parameter_341, + parameter_342, + parameter_343, + parameter_344, + parameter_345, + parameter_346, + parameter_347, + parameter_348, + parameter_349, + parameter_350, + parameter_351, + parameter_352, + parameter_353, + parameter_354, + parameter_355, + parameter_356, + parameter_357, + parameter_358, + parameter_359, + parameter_360, + parameter_361, + parameter_362, + parameter_363, + parameter_364, + parameter_365, + parameter_366, + parameter_367, + parameter_368, + parameter_369, + parameter_370, + parameter_371, + parameter_372, + parameter_373, + parameter_374, + parameter_375, + parameter_376, + parameter_377, + parameter_378, + parameter_379, + parameter_380, + parameter_381, + parameter_382, + parameter_383, + parameter_384, + parameter_385, + parameter_386, + parameter_387, + parameter_388, + parameter_389, + parameter_390, + parameter_391, + parameter_392, + parameter_393, + parameter_394, + parameter_395, + parameter_396, + parameter_397, + parameter_398, + parameter_399, + parameter_400, + parameter_401, + parameter_402, + parameter_403, + parameter_404, + parameter_405, + parameter_406, + parameter_407, + parameter_408, + parameter_409, + parameter_410, + parameter_411, + parameter_412, + parameter_413, + parameter_414, + parameter_415, + parameter_416, + parameter_417, + parameter_418, + parameter_419, + parameter_420, + parameter_421, + parameter_422, + data_0, + data_1, + data_2, + data_3, + data_4, + data_5, + data_6, + ): + # pd_op.conv2d: (-1x16x-1x-1xf32) <- (-1x3x-1x-1xf32, 16x3x3x3xf32) + conv2d_0 = paddle._C_ops.conv2d( + data_6, parameter_422, [2, 2], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del data_6, parameter_422 + + # pd_op.batch_norm_: (-1x16x-1x-1xf32, 16xf32, 16xf32, 16xf32, 16xf32, -1xui8) <- (-1x16x-1x-1xf32, 16xf32, 16xf32, 16xf32, 16xf32) + ( + batch_norm__0, + batch_norm__1, + batch_norm__2, + batch_norm__3, + batch_norm__4, + batch_norm__5, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_0, + parameter_421, + parameter_420, + parameter_419, + parameter_418, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_0, parameter_418, parameter_419, parameter_420, parameter_421 + + # pd_op.swish: (-1x16x-1x-1xf32) <- (-1x16x-1x-1xf32) + swish_0 = paddle._C_ops.swish(batch_norm__0) + del batch_norm__0 + + # pd_op.conv2d: (-1x16x-1x-1xf32) <- (-1x16x-1x-1xf32, 16x16x3x3xf32) + conv2d_1 = paddle._C_ops.conv2d( + swish_0, parameter_417, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_417, swish_0 + + # pd_op.batch_norm_: (-1x16x-1x-1xf32, 16xf32, 16xf32, 16xf32, 16xf32, -1xui8) <- (-1x16x-1x-1xf32, 16xf32, 16xf32, 16xf32, 16xf32) + ( + batch_norm__6, + batch_norm__7, + batch_norm__8, + batch_norm__9, + batch_norm__10, + batch_norm__11, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_1, + parameter_416, + parameter_415, + parameter_414, + parameter_413, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_1, parameter_413, parameter_414, parameter_415, parameter_416 + + # pd_op.swish: (-1x16x-1x-1xf32) <- (-1x16x-1x-1xf32) + swish_1 = paddle._C_ops.swish(batch_norm__6) + del batch_norm__6 + + # pd_op.conv2d: (-1x32x-1x-1xf32) <- (-1x16x-1x-1xf32, 32x16x3x3xf32) + conv2d_2 = paddle._C_ops.conv2d( + swish_1, parameter_412, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_412, swish_1 + + # pd_op.batch_norm_: (-1x32x-1x-1xf32, 32xf32, 32xf32, 32xf32, 32xf32, -1xui8) <- (-1x32x-1x-1xf32, 32xf32, 32xf32, 32xf32, 32xf32) + ( + batch_norm__12, + batch_norm__13, + batch_norm__14, + batch_norm__15, + batch_norm__16, + batch_norm__17, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_2, + parameter_411, + parameter_410, + parameter_409, + parameter_408, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_2, parameter_408, parameter_409, parameter_410, parameter_411 + + # pd_op.swish: (-1x32x-1x-1xf32) <- (-1x32x-1x-1xf32) + swish_2 = paddle._C_ops.swish(batch_norm__12) + del batch_norm__12 + + # pd_op.conv2d: (-1x48x-1x-1xf32) <- (-1x32x-1x-1xf32, 48x32x3x3xf32) + conv2d_3 = paddle._C_ops.conv2d( + swish_2, parameter_407, [2, 2], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_407, swish_2 + + # pd_op.batch_norm_: (-1x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32, -1xui8) <- (-1x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32) + ( + batch_norm__18, + batch_norm__19, + batch_norm__20, + batch_norm__21, + batch_norm__22, + batch_norm__23, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_3, + parameter_406, + parameter_405, + parameter_404, + parameter_403, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_3, parameter_403, parameter_404, parameter_405, parameter_406 + + # pd_op.swish: (-1x48x-1x-1xf32) <- (-1x48x-1x-1xf32) + swish_3 = paddle._C_ops.swish(batch_norm__18) + del batch_norm__18 + + # pd_op.conv2d: (-1x24x-1x-1xf32) <- (-1x48x-1x-1xf32, 24x48x1x1xf32) + conv2d_4 = paddle._C_ops.conv2d( + swish_3, parameter_402, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_402 + + # pd_op.batch_norm_: (-1x24x-1x-1xf32, 24xf32, 24xf32, 24xf32, 24xf32, -1xui8) <- (-1x24x-1x-1xf32, 24xf32, 24xf32, 24xf32, 24xf32) + ( + batch_norm__24, + batch_norm__25, + batch_norm__26, + batch_norm__27, + batch_norm__28, + batch_norm__29, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_4, + parameter_401, + parameter_400, + parameter_399, + parameter_398, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_4, parameter_398, parameter_399, parameter_400, parameter_401 + + # pd_op.swish: (-1x24x-1x-1xf32) <- (-1x24x-1x-1xf32) + swish_4 = paddle._C_ops.swish(batch_norm__24) + del batch_norm__24 + + # pd_op.conv2d: (-1x24x-1x-1xf32) <- (-1x48x-1x-1xf32, 24x48x1x1xf32) + conv2d_5 = paddle._C_ops.conv2d( + swish_3, parameter_397, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_397, swish_3 + + # pd_op.batch_norm_: (-1x24x-1x-1xf32, 24xf32, 24xf32, 24xf32, 24xf32, -1xui8) <- (-1x24x-1x-1xf32, 24xf32, 24xf32, 24xf32, 24xf32) + ( + batch_norm__30, + batch_norm__31, + batch_norm__32, + batch_norm__33, + batch_norm__34, + batch_norm__35, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_5, + parameter_396, + parameter_395, + parameter_394, + parameter_393, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_5, parameter_393, parameter_394, parameter_395, parameter_396 + + # pd_op.swish: (-1x24x-1x-1xf32) <- (-1x24x-1x-1xf32) + swish_5 = paddle._C_ops.swish(batch_norm__30) + del batch_norm__30 + + # pd_op.conv2d: (-1x24x-1x-1xf32) <- (-1x24x-1x-1xf32, 24x24x3x3xf32) + conv2d_6 = paddle._C_ops.conv2d( + swish_5, parameter_392, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_392 + + # pd_op.batch_norm_: (-1x24x-1x-1xf32, 24xf32, 24xf32, 24xf32, 24xf32, -1xui8) <- (-1x24x-1x-1xf32, 24xf32, 24xf32, 24xf32, 24xf32) + ( + batch_norm__36, + batch_norm__37, + batch_norm__38, + batch_norm__39, + batch_norm__40, + batch_norm__41, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_6, + parameter_391, + parameter_390, + parameter_389, + parameter_388, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_6, parameter_388, parameter_389, parameter_390, parameter_391 + + # pd_op.swish: (-1x24x-1x-1xf32) <- (-1x24x-1x-1xf32) + swish_6 = paddle._C_ops.swish(batch_norm__36) + del batch_norm__36 + + # pd_op.conv2d: (-1x24x-1x-1xf32) <- (-1x24x-1x-1xf32, 24x24x3x3xf32) + conv2d_7 = paddle._C_ops.conv2d( + swish_6, parameter_387, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_387 + + # pd_op.batch_norm_: (-1x24x-1x-1xf32, 24xf32, 24xf32, 24xf32, 24xf32, -1xui8) <- (-1x24x-1x-1xf32, 24xf32, 24xf32, 24xf32, 24xf32) + ( + batch_norm__42, + batch_norm__43, + batch_norm__44, + batch_norm__45, + batch_norm__46, + batch_norm__47, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_7, + parameter_386, + parameter_385, + parameter_384, + parameter_383, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_7, parameter_383, parameter_384, parameter_385, parameter_386 + + # pd_op.conv2d: (-1x24x-1x-1xf32) <- (-1x24x-1x-1xf32, 24x24x1x1xf32) + conv2d_8 = paddle._C_ops.conv2d( + swish_6, parameter_382, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_382, swish_6 + + # pd_op.batch_norm_: (-1x24x-1x-1xf32, 24xf32, 24xf32, 24xf32, 24xf32, -1xui8) <- (-1x24x-1x-1xf32, 24xf32, 24xf32, 24xf32, 24xf32) + ( + batch_norm__48, + batch_norm__49, + batch_norm__50, + batch_norm__51, + batch_norm__52, + batch_norm__53, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_8, + parameter_381, + parameter_380, + parameter_379, + parameter_378, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_8, parameter_378, parameter_379, parameter_380, parameter_381 + + # pd_op.multiply: (-1x24x-1x-1xf32) <- (1xf32, -1x24x-1x-1xf32) + multiply_0 = paddle._C_ops.multiply(data_0, batch_norm__48) + del batch_norm__48, data_0 + + # pd_op.add: (-1x24x-1x-1xf32) <- (-1x24x-1x-1xf32, -1x24x-1x-1xf32) + add_0 = paddle._C_ops.add(batch_norm__42, multiply_0) + del batch_norm__42, multiply_0 + + # pd_op.swish: (-1x24x-1x-1xf32) <- (-1x24x-1x-1xf32) + swish_7 = paddle._C_ops.swish(add_0) + del add_0 + + # pd_op.add: (-1x24x-1x-1xf32) <- (-1x24x-1x-1xf32, -1x24x-1x-1xf32) + add_1 = paddle._C_ops.add(swish_5, swish_7) + del swish_5, swish_7 + + # pd_op.full: (1xi32) <- () + full_0 = paddle._C_ops.full( + [1], float("1"), paddle.int32, paddle.core.CPUPlace() + ) + + # builtin.combine: ([-1x24x-1x-1xf32, -1x24x-1x-1xf32]) <- (-1x24x-1x-1xf32, -1x24x-1x-1xf32) + combine_0 = [swish_4, add_1] + del add_1, swish_4 + + # pd_op.concat: (-1x48x-1x-1xf32) <- ([-1x24x-1x-1xf32, -1x24x-1x-1xf32], 1xi32) + concat_2 = paddle._C_ops.concat(combine_0, full_0) + del combine_0 + + # pd_op.full_int_array: (2xi64) <- () + full_int_array_0 = [2, 3] + + # pd_op.mean: (-1x48x1x1xf32) <- (-1x48x-1x-1xf32, 2xi64) + mean_0 = paddle._C_ops.mean(concat_2, full_int_array_0, True) + + # pd_op.conv2d: (-1x48x1x1xf32) <- (-1x48x1x1xf32, 48x48x1x1xf32) + conv2d_9 = paddle._C_ops.conv2d( + mean_0, parameter_377, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del mean_0, parameter_377 + + # pd_op.full_int_array: (4xi64) <- () + full_int_array_1 = [1, -1, 1, 1] + + # pd_op.reshape: (1x48x1x1xf32) <- (48xf32, 4xi64) + reshape_0 = paddle._C_ops.reshape(parameter_376, full_int_array_1) + del parameter_376 + + # pd_op.add: (-1x48x1x1xf32) <- (-1x48x1x1xf32, 1x48x1x1xf32) + add_2 = paddle._C_ops.add(conv2d_9, reshape_0) + del conv2d_9, reshape_0 + + # pd_op.hardsigmoid: (-1x48x1x1xf32) <- (-1x48x1x1xf32) + hardsigmoid_0 = paddle._C_ops.hardsigmoid( + add_2, float("0.166667"), float("0.5") + ) + del add_2 + + # pd_op.multiply: (-1x48x-1x-1xf32) <- (-1x48x-1x-1xf32, -1x48x1x1xf32) + multiply_1 = paddle._C_ops.multiply(concat_2, hardsigmoid_0) + del concat_2, hardsigmoid_0 + + # pd_op.conv2d: (-1x64x-1x-1xf32) <- (-1x48x-1x-1xf32, 64x48x1x1xf32) + conv2d_10 = paddle._C_ops.conv2d( + multiply_1, parameter_375, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del multiply_1, parameter_375 + + # pd_op.batch_norm_: (-1x64x-1x-1xf32, 64xf32, 64xf32, 64xf32, 64xf32, -1xui8) <- (-1x64x-1x-1xf32, 64xf32, 64xf32, 64xf32, 64xf32) + ( + batch_norm__54, + batch_norm__55, + batch_norm__56, + batch_norm__57, + batch_norm__58, + batch_norm__59, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_10, + parameter_374, + parameter_373, + parameter_372, + parameter_371, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_10, parameter_371, parameter_372, parameter_373, parameter_374 + + # pd_op.swish: (-1x64x-1x-1xf32) <- (-1x64x-1x-1xf32) + swish_8 = paddle._C_ops.swish(batch_norm__54) + del batch_norm__54 + + # pd_op.conv2d: (-1x96x-1x-1xf32) <- (-1x64x-1x-1xf32, 96x64x3x3xf32) + conv2d_11 = paddle._C_ops.conv2d( + swish_8, parameter_370, [2, 2], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_370, swish_8 + + # pd_op.batch_norm_: (-1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (-1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__60, + batch_norm__61, + batch_norm__62, + batch_norm__63, + batch_norm__64, + batch_norm__65, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_11, + parameter_369, + parameter_368, + parameter_367, + parameter_366, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_11, parameter_366, parameter_367, parameter_368, parameter_369 + + # pd_op.swish: (-1x96x-1x-1xf32) <- (-1x96x-1x-1xf32) + swish_9 = paddle._C_ops.swish(batch_norm__60) + del batch_norm__60 + + # pd_op.conv2d: (-1x48x-1x-1xf32) <- (-1x96x-1x-1xf32, 48x96x1x1xf32) + conv2d_12 = paddle._C_ops.conv2d( + swish_9, parameter_365, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_365 + + # pd_op.batch_norm_: (-1x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32, -1xui8) <- (-1x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32) + ( + batch_norm__66, + batch_norm__67, + batch_norm__68, + batch_norm__69, + batch_norm__70, + batch_norm__71, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_12, + parameter_364, + parameter_363, + parameter_362, + parameter_361, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_12, parameter_361, parameter_362, parameter_363, parameter_364 + + # pd_op.swish: (-1x48x-1x-1xf32) <- (-1x48x-1x-1xf32) + swish_10 = paddle._C_ops.swish(batch_norm__66) + del batch_norm__66 + + # pd_op.conv2d: (-1x48x-1x-1xf32) <- (-1x96x-1x-1xf32, 48x96x1x1xf32) + conv2d_13 = paddle._C_ops.conv2d( + swish_9, parameter_360, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_360, swish_9 + + # pd_op.batch_norm_: (-1x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32, -1xui8) <- (-1x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32) + ( + batch_norm__72, + batch_norm__73, + batch_norm__74, + batch_norm__75, + batch_norm__76, + batch_norm__77, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_13, + parameter_359, + parameter_358, + parameter_357, + parameter_356, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_13, parameter_356, parameter_357, parameter_358, parameter_359 + + # pd_op.swish: (-1x48x-1x-1xf32) <- (-1x48x-1x-1xf32) + swish_11 = paddle._C_ops.swish(batch_norm__72) + del batch_norm__72 + + # pd_op.conv2d: (-1x48x-1x-1xf32) <- (-1x48x-1x-1xf32, 48x48x3x3xf32) + conv2d_14 = paddle._C_ops.conv2d( + swish_11, parameter_355, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_355 + + # pd_op.batch_norm_: (-1x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32, -1xui8) <- (-1x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32) + ( + batch_norm__78, + batch_norm__79, + batch_norm__80, + batch_norm__81, + batch_norm__82, + batch_norm__83, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_14, + parameter_354, + parameter_353, + parameter_352, + parameter_351, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_14, parameter_351, parameter_352, parameter_353, parameter_354 + + # pd_op.swish: (-1x48x-1x-1xf32) <- (-1x48x-1x-1xf32) + swish_12 = paddle._C_ops.swish(batch_norm__78) + del batch_norm__78 + + # pd_op.conv2d: (-1x48x-1x-1xf32) <- (-1x48x-1x-1xf32, 48x48x3x3xf32) + conv2d_15 = paddle._C_ops.conv2d( + swish_12, parameter_350, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_350 + + # pd_op.batch_norm_: (-1x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32, -1xui8) <- (-1x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32) + ( + batch_norm__84, + batch_norm__85, + batch_norm__86, + batch_norm__87, + batch_norm__88, + batch_norm__89, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_15, + parameter_349, + parameter_348, + parameter_347, + parameter_346, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_15, parameter_346, parameter_347, parameter_348, parameter_349 + + # pd_op.conv2d: (-1x48x-1x-1xf32) <- (-1x48x-1x-1xf32, 48x48x1x1xf32) + conv2d_16 = paddle._C_ops.conv2d( + swish_12, parameter_345, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_345, swish_12 + + # pd_op.batch_norm_: (-1x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32, -1xui8) <- (-1x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32) + ( + batch_norm__90, + batch_norm__91, + batch_norm__92, + batch_norm__93, + batch_norm__94, + batch_norm__95, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_16, + parameter_344, + parameter_343, + parameter_342, + parameter_341, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_16, parameter_341, parameter_342, parameter_343, parameter_344 + + # pd_op.multiply: (-1x48x-1x-1xf32) <- (1xf32, -1x48x-1x-1xf32) + multiply_2 = paddle._C_ops.multiply(data_1, batch_norm__90) + del batch_norm__90, data_1 + + # pd_op.add: (-1x48x-1x-1xf32) <- (-1x48x-1x-1xf32, -1x48x-1x-1xf32) + add_3 = paddle._C_ops.add(batch_norm__84, multiply_2) + del batch_norm__84, multiply_2 + + # pd_op.swish: (-1x48x-1x-1xf32) <- (-1x48x-1x-1xf32) + swish_13 = paddle._C_ops.swish(add_3) + del add_3 + + # pd_op.add: (-1x48x-1x-1xf32) <- (-1x48x-1x-1xf32, -1x48x-1x-1xf32) + add_4 = paddle._C_ops.add(swish_11, swish_13) + del swish_11, swish_13 + + # pd_op.conv2d: (-1x48x-1x-1xf32) <- (-1x48x-1x-1xf32, 48x48x3x3xf32) + conv2d_17 = paddle._C_ops.conv2d( + add_4, parameter_340, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_340 + + # pd_op.batch_norm_: (-1x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32, -1xui8) <- (-1x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32) + ( + batch_norm__96, + batch_norm__97, + batch_norm__98, + batch_norm__99, + batch_norm__100, + batch_norm__101, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_17, + parameter_339, + parameter_338, + parameter_337, + parameter_336, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_17, parameter_336, parameter_337, parameter_338, parameter_339 + + # pd_op.swish: (-1x48x-1x-1xf32) <- (-1x48x-1x-1xf32) + swish_14 = paddle._C_ops.swish(batch_norm__96) + del batch_norm__96 + + # pd_op.conv2d: (-1x48x-1x-1xf32) <- (-1x48x-1x-1xf32, 48x48x3x3xf32) + conv2d_18 = paddle._C_ops.conv2d( + swish_14, parameter_335, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_335 + + # pd_op.batch_norm_: (-1x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32, -1xui8) <- (-1x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32) + ( + batch_norm__102, + batch_norm__103, + batch_norm__104, + batch_norm__105, + batch_norm__106, + batch_norm__107, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_18, + parameter_334, + parameter_333, + parameter_332, + parameter_331, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_18, parameter_331, parameter_332, parameter_333, parameter_334 + + # pd_op.conv2d: (-1x48x-1x-1xf32) <- (-1x48x-1x-1xf32, 48x48x1x1xf32) + conv2d_19 = paddle._C_ops.conv2d( + swish_14, parameter_330, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_330, swish_14 + + # pd_op.batch_norm_: (-1x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32, -1xui8) <- (-1x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32) + ( + batch_norm__108, + batch_norm__109, + batch_norm__110, + batch_norm__111, + batch_norm__112, + batch_norm__113, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_19, + parameter_329, + parameter_328, + parameter_327, + parameter_326, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_19, parameter_326, parameter_327, parameter_328, parameter_329 + + # pd_op.multiply: (-1x48x-1x-1xf32) <- (1xf32, -1x48x-1x-1xf32) + multiply_3 = paddle._C_ops.multiply(data_2, batch_norm__108) + del batch_norm__108, data_2 + + # pd_op.add: (-1x48x-1x-1xf32) <- (-1x48x-1x-1xf32, -1x48x-1x-1xf32) + add_5 = paddle._C_ops.add(batch_norm__102, multiply_3) + del batch_norm__102, multiply_3 + + # pd_op.swish: (-1x48x-1x-1xf32) <- (-1x48x-1x-1xf32) + swish_15 = paddle._C_ops.swish(add_5) + del add_5 + + # pd_op.add: (-1x48x-1x-1xf32) <- (-1x48x-1x-1xf32, -1x48x-1x-1xf32) + add_6 = paddle._C_ops.add(add_4, swish_15) + del add_4, swish_15 + + # builtin.combine: ([-1x48x-1x-1xf32, -1x48x-1x-1xf32]) <- (-1x48x-1x-1xf32, -1x48x-1x-1xf32) + combine_1 = [swish_10, add_6] + del add_6, swish_10 + + # pd_op.concat: (-1x96x-1x-1xf32) <- ([-1x48x-1x-1xf32, -1x48x-1x-1xf32], 1xi32) + concat_3 = paddle._C_ops.concat(combine_1, full_0) + del combine_1 + + # pd_op.mean: (-1x96x1x1xf32) <- (-1x96x-1x-1xf32, 2xi64) + mean_1 = paddle._C_ops.mean(concat_3, full_int_array_0, True) + + # pd_op.conv2d: (-1x96x1x1xf32) <- (-1x96x1x1xf32, 96x96x1x1xf32) + conv2d_20 = paddle._C_ops.conv2d( + mean_1, parameter_325, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del mean_1, parameter_325 + + # pd_op.reshape: (1x96x1x1xf32) <- (96xf32, 4xi64) + reshape_1 = paddle._C_ops.reshape(parameter_324, full_int_array_1) + del parameter_324 + + # pd_op.add: (-1x96x1x1xf32) <- (-1x96x1x1xf32, 1x96x1x1xf32) + add_7 = paddle._C_ops.add(conv2d_20, reshape_1) + del conv2d_20, reshape_1 + + # pd_op.hardsigmoid: (-1x96x1x1xf32) <- (-1x96x1x1xf32) + hardsigmoid_1 = paddle._C_ops.hardsigmoid( + add_7, float("0.166667"), float("0.5") + ) + del add_7 + + # pd_op.multiply: (-1x96x-1x-1xf32) <- (-1x96x-1x-1xf32, -1x96x1x1xf32) + multiply_4 = paddle._C_ops.multiply(concat_3, hardsigmoid_1) + del concat_3, hardsigmoid_1 + + # pd_op.conv2d: (-1x128x-1x-1xf32) <- (-1x96x-1x-1xf32, 128x96x1x1xf32) + conv2d_21 = paddle._C_ops.conv2d( + multiply_4, parameter_323, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del multiply_4, parameter_323 + + # pd_op.batch_norm_: (-1x128x-1x-1xf32, 128xf32, 128xf32, 128xf32, 128xf32, -1xui8) <- (-1x128x-1x-1xf32, 128xf32, 128xf32, 128xf32, 128xf32) + ( + batch_norm__114, + batch_norm__115, + batch_norm__116, + batch_norm__117, + batch_norm__118, + batch_norm__119, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_21, + parameter_322, + parameter_321, + parameter_320, + parameter_319, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_21, parameter_319, parameter_320, parameter_321, parameter_322 + + # pd_op.swish: (-1x128x-1x-1xf32) <- (-1x128x-1x-1xf32) + swish_16 = paddle._C_ops.swish(batch_norm__114) + del batch_norm__114 + + # pd_op.conv2d: (-1x192x-1x-1xf32) <- (-1x128x-1x-1xf32, 192x128x3x3xf32) + conv2d_22 = paddle._C_ops.conv2d( + swish_16, parameter_318, [2, 2], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_318 + + # pd_op.batch_norm_: (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__120, + batch_norm__121, + batch_norm__122, + batch_norm__123, + batch_norm__124, + batch_norm__125, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_22, + parameter_317, + parameter_316, + parameter_315, + parameter_314, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_22, parameter_314, parameter_315, parameter_316, parameter_317 + + # pd_op.swish: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32) + swish_17 = paddle._C_ops.swish(batch_norm__120) + del batch_norm__120 + + # pd_op.conv2d: (-1x96x-1x-1xf32) <- (-1x192x-1x-1xf32, 96x192x1x1xf32) + conv2d_23 = paddle._C_ops.conv2d( + swish_17, parameter_313, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_313 + + # pd_op.batch_norm_: (-1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (-1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__126, + batch_norm__127, + batch_norm__128, + batch_norm__129, + batch_norm__130, + batch_norm__131, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_23, + parameter_312, + parameter_311, + parameter_310, + parameter_309, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_23, parameter_309, parameter_310, parameter_311, parameter_312 + + # pd_op.swish: (-1x96x-1x-1xf32) <- (-1x96x-1x-1xf32) + swish_18 = paddle._C_ops.swish(batch_norm__126) + del batch_norm__126 + + # pd_op.conv2d: (-1x96x-1x-1xf32) <- (-1x192x-1x-1xf32, 96x192x1x1xf32) + conv2d_24 = paddle._C_ops.conv2d( + swish_17, parameter_308, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_308, swish_17 + + # pd_op.batch_norm_: (-1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (-1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__132, + batch_norm__133, + batch_norm__134, + batch_norm__135, + batch_norm__136, + batch_norm__137, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_24, + parameter_307, + parameter_306, + parameter_305, + parameter_304, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_24, parameter_304, parameter_305, parameter_306, parameter_307 + + # pd_op.swish: (-1x96x-1x-1xf32) <- (-1x96x-1x-1xf32) + swish_19 = paddle._C_ops.swish(batch_norm__132) + del batch_norm__132 + + # pd_op.conv2d: (-1x96x-1x-1xf32) <- (-1x96x-1x-1xf32, 96x96x3x3xf32) + conv2d_25 = paddle._C_ops.conv2d( + swish_19, parameter_303, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_303 + + # pd_op.batch_norm_: (-1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (-1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__138, + batch_norm__139, + batch_norm__140, + batch_norm__141, + batch_norm__142, + batch_norm__143, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_25, + parameter_302, + parameter_301, + parameter_300, + parameter_299, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_25, parameter_299, parameter_300, parameter_301, parameter_302 + + # pd_op.swish: (-1x96x-1x-1xf32) <- (-1x96x-1x-1xf32) + swish_20 = paddle._C_ops.swish(batch_norm__138) + del batch_norm__138 + + # pd_op.conv2d: (-1x96x-1x-1xf32) <- (-1x96x-1x-1xf32, 96x96x3x3xf32) + conv2d_26 = paddle._C_ops.conv2d( + swish_20, parameter_298, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_298 + + # pd_op.batch_norm_: (-1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (-1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__144, + batch_norm__145, + batch_norm__146, + batch_norm__147, + batch_norm__148, + batch_norm__149, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_26, + parameter_297, + parameter_296, + parameter_295, + parameter_294, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_26, parameter_294, parameter_295, parameter_296, parameter_297 + + # pd_op.conv2d: (-1x96x-1x-1xf32) <- (-1x96x-1x-1xf32, 96x96x1x1xf32) + conv2d_27 = paddle._C_ops.conv2d( + swish_20, parameter_293, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_293, swish_20 + + # pd_op.batch_norm_: (-1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (-1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__150, + batch_norm__151, + batch_norm__152, + batch_norm__153, + batch_norm__154, + batch_norm__155, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_27, + parameter_292, + parameter_291, + parameter_290, + parameter_289, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_27, parameter_289, parameter_290, parameter_291, parameter_292 + + # pd_op.multiply: (-1x96x-1x-1xf32) <- (1xf32, -1x96x-1x-1xf32) + multiply_5 = paddle._C_ops.multiply(data_3, batch_norm__150) + del batch_norm__150, data_3 + + # pd_op.add: (-1x96x-1x-1xf32) <- (-1x96x-1x-1xf32, -1x96x-1x-1xf32) + add_8 = paddle._C_ops.add(batch_norm__144, multiply_5) + del batch_norm__144, multiply_5 + + # pd_op.swish: (-1x96x-1x-1xf32) <- (-1x96x-1x-1xf32) + swish_21 = paddle._C_ops.swish(add_8) + del add_8 + + # pd_op.add: (-1x96x-1x-1xf32) <- (-1x96x-1x-1xf32, -1x96x-1x-1xf32) + add_9 = paddle._C_ops.add(swish_19, swish_21) + del swish_19, swish_21 + + # pd_op.conv2d: (-1x96x-1x-1xf32) <- (-1x96x-1x-1xf32, 96x96x3x3xf32) + conv2d_28 = paddle._C_ops.conv2d( + add_9, parameter_288, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_288 + + # pd_op.batch_norm_: (-1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (-1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__156, + batch_norm__157, + batch_norm__158, + batch_norm__159, + batch_norm__160, + batch_norm__161, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_28, + parameter_287, + parameter_286, + parameter_285, + parameter_284, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_28, parameter_284, parameter_285, parameter_286, parameter_287 + + # pd_op.swish: (-1x96x-1x-1xf32) <- (-1x96x-1x-1xf32) + swish_22 = paddle._C_ops.swish(batch_norm__156) + del batch_norm__156 + + # pd_op.conv2d: (-1x96x-1x-1xf32) <- (-1x96x-1x-1xf32, 96x96x3x3xf32) + conv2d_29 = paddle._C_ops.conv2d( + swish_22, parameter_283, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_283 + + # pd_op.batch_norm_: (-1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (-1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__162, + batch_norm__163, + batch_norm__164, + batch_norm__165, + batch_norm__166, + batch_norm__167, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_29, + parameter_282, + parameter_281, + parameter_280, + parameter_279, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_29, parameter_279, parameter_280, parameter_281, parameter_282 + + # pd_op.conv2d: (-1x96x-1x-1xf32) <- (-1x96x-1x-1xf32, 96x96x1x1xf32) + conv2d_30 = paddle._C_ops.conv2d( + swish_22, parameter_278, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_278, swish_22 + + # pd_op.batch_norm_: (-1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (-1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__168, + batch_norm__169, + batch_norm__170, + batch_norm__171, + batch_norm__172, + batch_norm__173, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_30, + parameter_277, + parameter_276, + parameter_275, + parameter_274, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_30, parameter_274, parameter_275, parameter_276, parameter_277 + + # pd_op.multiply: (-1x96x-1x-1xf32) <- (1xf32, -1x96x-1x-1xf32) + multiply_6 = paddle._C_ops.multiply(data_4, batch_norm__168) + del batch_norm__168, data_4 + + # pd_op.add: (-1x96x-1x-1xf32) <- (-1x96x-1x-1xf32, -1x96x-1x-1xf32) + add_10 = paddle._C_ops.add(batch_norm__162, multiply_6) + del batch_norm__162, multiply_6 + + # pd_op.swish: (-1x96x-1x-1xf32) <- (-1x96x-1x-1xf32) + swish_23 = paddle._C_ops.swish(add_10) + del add_10 + + # pd_op.add: (-1x96x-1x-1xf32) <- (-1x96x-1x-1xf32, -1x96x-1x-1xf32) + add_11 = paddle._C_ops.add(add_9, swish_23) + del add_9, swish_23 + + # builtin.combine: ([-1x96x-1x-1xf32, -1x96x-1x-1xf32]) <- (-1x96x-1x-1xf32, -1x96x-1x-1xf32) + combine_2 = [swish_18, add_11] + del add_11, swish_18 + + # pd_op.concat: (-1x192x-1x-1xf32) <- ([-1x96x-1x-1xf32, -1x96x-1x-1xf32], 1xi32) + concat_4 = paddle._C_ops.concat(combine_2, full_0) + del combine_2 + + # pd_op.mean: (-1x192x1x1xf32) <- (-1x192x-1x-1xf32, 2xi64) + mean_2 = paddle._C_ops.mean(concat_4, full_int_array_0, True) + + # pd_op.conv2d: (-1x192x1x1xf32) <- (-1x192x1x1xf32, 192x192x1x1xf32) + conv2d_31 = paddle._C_ops.conv2d( + mean_2, parameter_273, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del mean_2, parameter_273 + + # pd_op.reshape: (1x192x1x1xf32) <- (192xf32, 4xi64) + reshape_2 = paddle._C_ops.reshape(parameter_272, full_int_array_1) + del parameter_272 + + # pd_op.add: (-1x192x1x1xf32) <- (-1x192x1x1xf32, 1x192x1x1xf32) + add_12 = paddle._C_ops.add(conv2d_31, reshape_2) + del conv2d_31, reshape_2 + + # pd_op.hardsigmoid: (-1x192x1x1xf32) <- (-1x192x1x1xf32) + hardsigmoid_2 = paddle._C_ops.hardsigmoid( + add_12, float("0.166667"), float("0.5") + ) + del add_12 + + # pd_op.multiply: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32, -1x192x1x1xf32) + multiply_7 = paddle._C_ops.multiply(concat_4, hardsigmoid_2) + del concat_4, hardsigmoid_2 + + # pd_op.conv2d: (-1x256x-1x-1xf32) <- (-1x192x-1x-1xf32, 256x192x1x1xf32) + conv2d_32 = paddle._C_ops.conv2d( + multiply_7, parameter_271, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del multiply_7, parameter_271 + + # pd_op.batch_norm_: (-1x256x-1x-1xf32, 256xf32, 256xf32, 256xf32, 256xf32, -1xui8) <- (-1x256x-1x-1xf32, 256xf32, 256xf32, 256xf32, 256xf32) + ( + batch_norm__174, + batch_norm__175, + batch_norm__176, + batch_norm__177, + batch_norm__178, + batch_norm__179, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_32, + parameter_270, + parameter_269, + parameter_268, + parameter_267, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_32, parameter_267, parameter_268, parameter_269, parameter_270 + + # pd_op.swish: (-1x256x-1x-1xf32) <- (-1x256x-1x-1xf32) + swish_24 = paddle._C_ops.swish(batch_norm__174) + del batch_norm__174 + + # pd_op.conv2d: (-1x384x-1x-1xf32) <- (-1x256x-1x-1xf32, 384x256x3x3xf32) + conv2d_33 = paddle._C_ops.conv2d( + swish_24, parameter_266, [2, 2], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_266 + + # pd_op.batch_norm_: (-1x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (-1x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__180, + batch_norm__181, + batch_norm__182, + batch_norm__183, + batch_norm__184, + batch_norm__185, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_33, + parameter_265, + parameter_264, + parameter_263, + parameter_262, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_33, parameter_262, parameter_263, parameter_264, parameter_265 + + # pd_op.swish: (-1x384x-1x-1xf32) <- (-1x384x-1x-1xf32) + swish_25 = paddle._C_ops.swish(batch_norm__180) + del batch_norm__180 + + # pd_op.conv2d: (-1x192x-1x-1xf32) <- (-1x384x-1x-1xf32, 192x384x1x1xf32) + conv2d_34 = paddle._C_ops.conv2d( + swish_25, parameter_261, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_261 + + # pd_op.batch_norm_: (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__186, + batch_norm__187, + batch_norm__188, + batch_norm__189, + batch_norm__190, + batch_norm__191, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_34, + parameter_260, + parameter_259, + parameter_258, + parameter_257, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_34, parameter_257, parameter_258, parameter_259, parameter_260 + + # pd_op.swish: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32) + swish_26 = paddle._C_ops.swish(batch_norm__186) + del batch_norm__186 + + # pd_op.conv2d: (-1x192x-1x-1xf32) <- (-1x384x-1x-1xf32, 192x384x1x1xf32) + conv2d_35 = paddle._C_ops.conv2d( + swish_25, parameter_256, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_256, swish_25 + + # pd_op.batch_norm_: (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__192, + batch_norm__193, + batch_norm__194, + batch_norm__195, + batch_norm__196, + batch_norm__197, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_35, + parameter_255, + parameter_254, + parameter_253, + parameter_252, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_35, parameter_252, parameter_253, parameter_254, parameter_255 + + # pd_op.swish: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32) + swish_27 = paddle._C_ops.swish(batch_norm__192) + del batch_norm__192 + + # pd_op.conv2d: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32, 192x192x3x3xf32) + conv2d_36 = paddle._C_ops.conv2d( + swish_27, parameter_251, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_251 + + # pd_op.batch_norm_: (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__198, + batch_norm__199, + batch_norm__200, + batch_norm__201, + batch_norm__202, + batch_norm__203, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_36, + parameter_250, + parameter_249, + parameter_248, + parameter_247, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_36, parameter_247, parameter_248, parameter_249, parameter_250 + + # pd_op.swish: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32) + swish_28 = paddle._C_ops.swish(batch_norm__198) + del batch_norm__198 + + # pd_op.conv2d: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32, 192x192x3x3xf32) + conv2d_37 = paddle._C_ops.conv2d( + swish_28, parameter_246, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_246 + + # pd_op.batch_norm_: (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__204, + batch_norm__205, + batch_norm__206, + batch_norm__207, + batch_norm__208, + batch_norm__209, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_37, + parameter_245, + parameter_244, + parameter_243, + parameter_242, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_37, parameter_242, parameter_243, parameter_244, parameter_245 + + # pd_op.conv2d: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32, 192x192x1x1xf32) + conv2d_38 = paddle._C_ops.conv2d( + swish_28, parameter_241, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_241, swish_28 + + # pd_op.batch_norm_: (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__210, + batch_norm__211, + batch_norm__212, + batch_norm__213, + batch_norm__214, + batch_norm__215, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_38, + parameter_240, + parameter_239, + parameter_238, + parameter_237, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_38, parameter_237, parameter_238, parameter_239, parameter_240 + + # pd_op.multiply: (-1x192x-1x-1xf32) <- (1xf32, -1x192x-1x-1xf32) + multiply_8 = paddle._C_ops.multiply(data_5, batch_norm__210) + del batch_norm__210, data_5 + + # pd_op.add: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32, -1x192x-1x-1xf32) + add_13 = paddle._C_ops.add(batch_norm__204, multiply_8) + del batch_norm__204, multiply_8 + + # pd_op.swish: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32) + swish_29 = paddle._C_ops.swish(add_13) + del add_13 + + # pd_op.add: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32, -1x192x-1x-1xf32) + add_14 = paddle._C_ops.add(swish_27, swish_29) + del swish_27, swish_29 + + # builtin.combine: ([-1x192x-1x-1xf32, -1x192x-1x-1xf32]) <- (-1x192x-1x-1xf32, -1x192x-1x-1xf32) + combine_3 = [swish_26, add_14] + del add_14, swish_26 + + # pd_op.concat: (-1x384x-1x-1xf32) <- ([-1x192x-1x-1xf32, -1x192x-1x-1xf32], 1xi32) + concat_5 = paddle._C_ops.concat(combine_3, full_0) + del combine_3 + + # pd_op.mean: (-1x384x1x1xf32) <- (-1x384x-1x-1xf32, 2xi64) + mean_3 = paddle._C_ops.mean(concat_5, full_int_array_0, True) + del full_int_array_0 + + # pd_op.conv2d: (-1x384x1x1xf32) <- (-1x384x1x1xf32, 384x384x1x1xf32) + conv2d_39 = paddle._C_ops.conv2d( + mean_3, parameter_236, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del mean_3, parameter_236 + + # pd_op.reshape: (1x384x1x1xf32) <- (384xf32, 4xi64) + reshape_3 = paddle._C_ops.reshape(parameter_235, full_int_array_1) + del parameter_235 + + # pd_op.add: (-1x384x1x1xf32) <- (-1x384x1x1xf32, 1x384x1x1xf32) + add_15 = paddle._C_ops.add(conv2d_39, reshape_3) + del conv2d_39, reshape_3 + + # pd_op.hardsigmoid: (-1x384x1x1xf32) <- (-1x384x1x1xf32) + hardsigmoid_3 = paddle._C_ops.hardsigmoid( + add_15, float("0.166667"), float("0.5") + ) + del add_15 + + # pd_op.multiply: (-1x384x-1x-1xf32) <- (-1x384x-1x-1xf32, -1x384x1x1xf32) + multiply_9 = paddle._C_ops.multiply(concat_5, hardsigmoid_3) + del concat_5, hardsigmoid_3 + + # pd_op.conv2d: (-1x512x-1x-1xf32) <- (-1x384x-1x-1xf32, 512x384x1x1xf32) + conv2d_40 = paddle._C_ops.conv2d( + multiply_9, parameter_234, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del multiply_9, parameter_234 + + # pd_op.batch_norm_: (-1x512x-1x-1xf32, 512xf32, 512xf32, 512xf32, 512xf32, -1xui8) <- (-1x512x-1x-1xf32, 512xf32, 512xf32, 512xf32, 512xf32) + ( + batch_norm__216, + batch_norm__217, + batch_norm__218, + batch_norm__219, + batch_norm__220, + batch_norm__221, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_40, + parameter_233, + parameter_232, + parameter_231, + parameter_230, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_40, parameter_230, parameter_231, parameter_232, parameter_233 + + # pd_op.swish: (-1x512x-1x-1xf32) <- (-1x512x-1x-1xf32) + swish_30 = paddle._C_ops.swish(batch_norm__216) + del batch_norm__216 + + # pd_op.conv2d: (-1x192x-1x-1xf32) <- (-1x512x-1x-1xf32, 192x512x1x1xf32) + conv2d_41 = paddle._C_ops.conv2d( + swish_30, parameter_229, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_229 + + # pd_op.batch_norm_: (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__222, + batch_norm__223, + batch_norm__224, + batch_norm__225, + batch_norm__226, + batch_norm__227, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_41, + parameter_228, + parameter_227, + parameter_226, + parameter_225, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_41, parameter_225, parameter_226, parameter_227, parameter_228 + + # pd_op.swish: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32) + swish_31 = paddle._C_ops.swish(batch_norm__222) + del batch_norm__222 + + # pd_op.conv2d: (-1x192x-1x-1xf32) <- (-1x512x-1x-1xf32, 192x512x1x1xf32) + conv2d_42 = paddle._C_ops.conv2d( + swish_30, parameter_224, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_224, swish_30 + + # pd_op.batch_norm_: (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__228, + batch_norm__229, + batch_norm__230, + batch_norm__231, + batch_norm__232, + batch_norm__233, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_42, + parameter_223, + parameter_222, + parameter_221, + parameter_220, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_42, parameter_220, parameter_221, parameter_222, parameter_223 + + # pd_op.swish: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32) + swish_32 = paddle._C_ops.swish(batch_norm__228) + del batch_norm__228 + + # pd_op.conv2d: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32, 192x192x3x3xf32) + conv2d_43 = paddle._C_ops.conv2d( + swish_32, parameter_219, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_219, swish_32 + + # pd_op.batch_norm_: (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__234, + batch_norm__235, + batch_norm__236, + batch_norm__237, + batch_norm__238, + batch_norm__239, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_43, + parameter_218, + parameter_217, + parameter_216, + parameter_215, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_43, parameter_215, parameter_216, parameter_217, parameter_218 + + # pd_op.swish: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32) + swish_33 = paddle._C_ops.swish(batch_norm__234) + del batch_norm__234 + + # pd_op.conv2d: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32, 192x192x3x3xf32) + conv2d_44 = paddle._C_ops.conv2d( + swish_33, parameter_214, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_214 + + # pd_op.batch_norm_: (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__240, + batch_norm__241, + batch_norm__242, + batch_norm__243, + batch_norm__244, + batch_norm__245, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_44, + parameter_213, + parameter_212, + parameter_211, + parameter_210, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_44, parameter_210, parameter_211, parameter_212, parameter_213 + + # pd_op.conv2d: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32, 192x192x1x1xf32) + conv2d_45 = paddle._C_ops.conv2d( + swish_33, parameter_209, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_209, swish_33 + + # pd_op.batch_norm_: (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__246, + batch_norm__247, + batch_norm__248, + batch_norm__249, + batch_norm__250, + batch_norm__251, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_45, + parameter_208, + parameter_207, + parameter_206, + parameter_205, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_45, parameter_205, parameter_206, parameter_207, parameter_208 + + # pd_op.add: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32, -1x192x-1x-1xf32) + add_16 = paddle._C_ops.add(batch_norm__240, batch_norm__246) + del batch_norm__240, batch_norm__246 + + # pd_op.swish: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32) + swish_34 = paddle._C_ops.swish(add_16) + del add_16 + + # pd_op.full_int_array: (2xi64) <- () + full_int_array_2 = [5, 5] + + # pd_op.pool2d: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32, 2xi64) + pool2d_0 = paddle._C_ops.pool2d( + swish_34, + full_int_array_2, + [1, 1], + [2, 2], + False, + True, + "NCHW", + "max", + False, + False, + "EXPLICIT", + ) + del full_int_array_2 + + # pd_op.full_int_array: (2xi64) <- () + full_int_array_3 = [9, 9] + + # pd_op.pool2d: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32, 2xi64) + pool2d_1 = paddle._C_ops.pool2d( + swish_34, + full_int_array_3, + [1, 1], + [4, 4], + False, + True, + "NCHW", + "max", + False, + False, + "EXPLICIT", + ) + del full_int_array_3 + + # pd_op.full_int_array: (2xi64) <- () + full_int_array_4 = [13, 13] + + # pd_op.pool2d: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32, 2xi64) + pool2d_2 = paddle._C_ops.pool2d( + swish_34, + full_int_array_4, + [1, 1], + [6, 6], + False, + True, + "NCHW", + "max", + False, + False, + "EXPLICIT", + ) + del full_int_array_4 + + # builtin.combine: ([-1x192x-1x-1xf32, -1x192x-1x-1xf32, -1x192x-1x-1xf32, -1x192x-1x-1xf32]) <- (-1x192x-1x-1xf32, -1x192x-1x-1xf32, -1x192x-1x-1xf32, -1x192x-1x-1xf32) + combine_4 = [swish_34, pool2d_0, pool2d_1, pool2d_2] + del pool2d_0, pool2d_1, pool2d_2, swish_34 + + # pd_op.concat: (-1x768x-1x-1xf32) <- ([-1x192x-1x-1xf32, -1x192x-1x-1xf32, -1x192x-1x-1xf32, -1x192x-1x-1xf32], 1xi32) + concat_6 = paddle._C_ops.concat(combine_4, full_0) + del combine_4 + + # pd_op.conv2d: (-1x192x-1x-1xf32) <- (-1x768x-1x-1xf32, 192x768x1x1xf32) + conv2d_46 = paddle._C_ops.conv2d( + concat_6, parameter_204, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del concat_6, parameter_204 + + # pd_op.batch_norm_: (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__252, + batch_norm__253, + batch_norm__254, + batch_norm__255, + batch_norm__256, + batch_norm__257, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_46, + parameter_203, + parameter_202, + parameter_201, + parameter_200, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_46, parameter_200, parameter_201, parameter_202, parameter_203 + + # pd_op.swish: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32) + swish_35 = paddle._C_ops.swish(batch_norm__252) + del batch_norm__252 + + # builtin.combine: ([-1x192x-1x-1xf32, -1x192x-1x-1xf32]) <- (-1x192x-1x-1xf32, -1x192x-1x-1xf32) + combine_5 = [swish_31, swish_35] + del swish_31, swish_35 + + # pd_op.concat: (-1x384x-1x-1xf32) <- ([-1x192x-1x-1xf32, -1x192x-1x-1xf32], 1xi32) + concat_7 = paddle._C_ops.concat(combine_5, full_0) + del combine_5 + + # pd_op.conv2d: (-1x384x-1x-1xf32) <- (-1x384x-1x-1xf32, 384x384x1x1xf32) + conv2d_47 = paddle._C_ops.conv2d( + concat_7, parameter_199, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del concat_7, parameter_199 + + # pd_op.batch_norm_: (-1x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (-1x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__258, + batch_norm__259, + batch_norm__260, + batch_norm__261, + batch_norm__262, + batch_norm__263, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_47, + parameter_198, + parameter_197, + parameter_196, + parameter_195, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_47, parameter_195, parameter_196, parameter_197, parameter_198 + + # pd_op.swish: (-1x384x-1x-1xf32) <- (-1x384x-1x-1xf32) + swish_36 = paddle._C_ops.swish(batch_norm__258) + del batch_norm__258 + + # pd_op.conv2d: (-1x192x-1x-1xf32) <- (-1x384x-1x-1xf32, 192x384x1x1xf32) + conv2d_48 = paddle._C_ops.conv2d( + swish_36, parameter_194, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_194 + + # pd_op.batch_norm_: (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__264, + batch_norm__265, + batch_norm__266, + batch_norm__267, + batch_norm__268, + batch_norm__269, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_48, + parameter_193, + parameter_192, + parameter_191, + parameter_190, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_48, parameter_190, parameter_191, parameter_192, parameter_193 + + # pd_op.swish: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32) + swish_37 = paddle._C_ops.swish(batch_norm__264) + del batch_norm__264 + + # pd_op.nearest_interp: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32, None, None, None) + nearest_interp_0 = paddle._C_ops.nearest_interp( + swish_37, + None, + None, + None, + "NCHW", + -1, + -1, + -1, + [float("2"), float("2")], + "nearest", + False, + 0, + ) + del swish_37 + + # builtin.combine: ([-1x192x-1x-1xf32, -1x256x-1x-1xf32]) <- (-1x192x-1x-1xf32, -1x256x-1x-1xf32) + combine_6 = [nearest_interp_0, swish_24] + del nearest_interp_0, swish_24 + + # pd_op.concat: (-1x448x-1x-1xf32) <- ([-1x192x-1x-1xf32, -1x256x-1x-1xf32], 1xi32) + concat_8 = paddle._C_ops.concat(combine_6, full_0) + del combine_6 + + # pd_op.conv2d: (-1x96x-1x-1xf32) <- (-1x448x-1x-1xf32, 96x448x1x1xf32) + conv2d_49 = paddle._C_ops.conv2d( + concat_8, parameter_189, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_189 + + # pd_op.batch_norm_: (-1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (-1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__270, + batch_norm__271, + batch_norm__272, + batch_norm__273, + batch_norm__274, + batch_norm__275, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_49, + parameter_188, + parameter_187, + parameter_186, + parameter_185, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_49, parameter_185, parameter_186, parameter_187, parameter_188 + + # pd_op.swish: (-1x96x-1x-1xf32) <- (-1x96x-1x-1xf32) + swish_38 = paddle._C_ops.swish(batch_norm__270) + del batch_norm__270 + + # pd_op.conv2d: (-1x96x-1x-1xf32) <- (-1x448x-1x-1xf32, 96x448x1x1xf32) + conv2d_50 = paddle._C_ops.conv2d( + concat_8, parameter_184, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del concat_8, parameter_184 + + # pd_op.batch_norm_: (-1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (-1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__276, + batch_norm__277, + batch_norm__278, + batch_norm__279, + batch_norm__280, + batch_norm__281, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_50, + parameter_183, + parameter_182, + parameter_181, + parameter_180, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_50, parameter_180, parameter_181, parameter_182, parameter_183 + + # pd_op.swish: (-1x96x-1x-1xf32) <- (-1x96x-1x-1xf32) + swish_39 = paddle._C_ops.swish(batch_norm__276) + del batch_norm__276 + + # pd_op.conv2d: (-1x96x-1x-1xf32) <- (-1x96x-1x-1xf32, 96x96x3x3xf32) + conv2d_51 = paddle._C_ops.conv2d( + swish_39, parameter_179, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_179, swish_39 + + # pd_op.batch_norm_: (-1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (-1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__282, + batch_norm__283, + batch_norm__284, + batch_norm__285, + batch_norm__286, + batch_norm__287, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_51, + parameter_178, + parameter_177, + parameter_176, + parameter_175, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_51, parameter_175, parameter_176, parameter_177, parameter_178 + + # pd_op.swish: (-1x96x-1x-1xf32) <- (-1x96x-1x-1xf32) + swish_40 = paddle._C_ops.swish(batch_norm__282) + del batch_norm__282 + + # pd_op.conv2d: (-1x96x-1x-1xf32) <- (-1x96x-1x-1xf32, 96x96x3x3xf32) + conv2d_52 = paddle._C_ops.conv2d( + swish_40, parameter_174, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_174 + + # pd_op.batch_norm_: (-1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (-1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__288, + batch_norm__289, + batch_norm__290, + batch_norm__291, + batch_norm__292, + batch_norm__293, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_52, + parameter_173, + parameter_172, + parameter_171, + parameter_170, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_52, parameter_170, parameter_171, parameter_172, parameter_173 + + # pd_op.conv2d: (-1x96x-1x-1xf32) <- (-1x96x-1x-1xf32, 96x96x1x1xf32) + conv2d_53 = paddle._C_ops.conv2d( + swish_40, parameter_169, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_169, swish_40 + + # pd_op.batch_norm_: (-1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (-1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__294, + batch_norm__295, + batch_norm__296, + batch_norm__297, + batch_norm__298, + batch_norm__299, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_53, + parameter_168, + parameter_167, + parameter_166, + parameter_165, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_53, parameter_165, parameter_166, parameter_167, parameter_168 + + # pd_op.add: (-1x96x-1x-1xf32) <- (-1x96x-1x-1xf32, -1x96x-1x-1xf32) + add_17 = paddle._C_ops.add(batch_norm__288, batch_norm__294) + del batch_norm__288, batch_norm__294 + + # pd_op.swish: (-1x96x-1x-1xf32) <- (-1x96x-1x-1xf32) + swish_41 = paddle._C_ops.swish(add_17) + del add_17 + + # builtin.combine: ([-1x96x-1x-1xf32, -1x96x-1x-1xf32]) <- (-1x96x-1x-1xf32, -1x96x-1x-1xf32) + combine_7 = [swish_38, swish_41] + del swish_38, swish_41 + + # pd_op.concat: (-1x192x-1x-1xf32) <- ([-1x96x-1x-1xf32, -1x96x-1x-1xf32], 1xi32) + concat_9 = paddle._C_ops.concat(combine_7, full_0) + del combine_7 + + # pd_op.conv2d: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32, 192x192x1x1xf32) + conv2d_54 = paddle._C_ops.conv2d( + concat_9, parameter_164, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del concat_9, parameter_164 + + # pd_op.batch_norm_: (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__300, + batch_norm__301, + batch_norm__302, + batch_norm__303, + batch_norm__304, + batch_norm__305, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_54, + parameter_163, + parameter_162, + parameter_161, + parameter_160, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_54, parameter_160, parameter_161, parameter_162, parameter_163 + + # pd_op.swish: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32) + swish_42 = paddle._C_ops.swish(batch_norm__300) + del batch_norm__300 + + # pd_op.conv2d: (-1x96x-1x-1xf32) <- (-1x192x-1x-1xf32, 96x192x1x1xf32) + conv2d_55 = paddle._C_ops.conv2d( + swish_42, parameter_159, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_159 + + # pd_op.batch_norm_: (-1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (-1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__306, + batch_norm__307, + batch_norm__308, + batch_norm__309, + batch_norm__310, + batch_norm__311, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_55, + parameter_158, + parameter_157, + parameter_156, + parameter_155, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_55, parameter_155, parameter_156, parameter_157, parameter_158 + + # pd_op.swish: (-1x96x-1x-1xf32) <- (-1x96x-1x-1xf32) + swish_43 = paddle._C_ops.swish(batch_norm__306) + del batch_norm__306 + + # pd_op.nearest_interp: (-1x96x-1x-1xf32) <- (-1x96x-1x-1xf32, None, None, None) + nearest_interp_1 = paddle._C_ops.nearest_interp( + swish_43, + None, + None, + None, + "NCHW", + -1, + -1, + -1, + [float("2"), float("2")], + "nearest", + False, + 0, + ) + del swish_43 + + # builtin.combine: ([-1x96x-1x-1xf32, -1x128x-1x-1xf32]) <- (-1x96x-1x-1xf32, -1x128x-1x-1xf32) + combine_8 = [nearest_interp_1, swish_16] + del nearest_interp_1, swish_16 + + # pd_op.concat: (-1x224x-1x-1xf32) <- ([-1x96x-1x-1xf32, -1x128x-1x-1xf32], 1xi32) + concat_10 = paddle._C_ops.concat(combine_8, full_0) + del combine_8 + + # pd_op.conv2d: (-1x48x-1x-1xf32) <- (-1x224x-1x-1xf32, 48x224x1x1xf32) + conv2d_56 = paddle._C_ops.conv2d( + concat_10, parameter_154, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_154 + + # pd_op.batch_norm_: (-1x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32, -1xui8) <- (-1x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32) + ( + batch_norm__312, + batch_norm__313, + batch_norm__314, + batch_norm__315, + batch_norm__316, + batch_norm__317, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_56, + parameter_153, + parameter_152, + parameter_151, + parameter_150, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_56, parameter_150, parameter_151, parameter_152, parameter_153 + + # pd_op.swish: (-1x48x-1x-1xf32) <- (-1x48x-1x-1xf32) + swish_44 = paddle._C_ops.swish(batch_norm__312) + del batch_norm__312 + + # pd_op.conv2d: (-1x48x-1x-1xf32) <- (-1x224x-1x-1xf32, 48x224x1x1xf32) + conv2d_57 = paddle._C_ops.conv2d( + concat_10, parameter_149, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del concat_10, parameter_149 + + # pd_op.batch_norm_: (-1x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32, -1xui8) <- (-1x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32) + ( + batch_norm__318, + batch_norm__319, + batch_norm__320, + batch_norm__321, + batch_norm__322, + batch_norm__323, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_57, + parameter_148, + parameter_147, + parameter_146, + parameter_145, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_57, parameter_145, parameter_146, parameter_147, parameter_148 + + # pd_op.swish: (-1x48x-1x-1xf32) <- (-1x48x-1x-1xf32) + swish_45 = paddle._C_ops.swish(batch_norm__318) + del batch_norm__318 + + # pd_op.conv2d: (-1x48x-1x-1xf32) <- (-1x48x-1x-1xf32, 48x48x3x3xf32) + conv2d_58 = paddle._C_ops.conv2d( + swish_45, parameter_144, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_144, swish_45 + + # pd_op.batch_norm_: (-1x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32, -1xui8) <- (-1x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32) + ( + batch_norm__324, + batch_norm__325, + batch_norm__326, + batch_norm__327, + batch_norm__328, + batch_norm__329, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_58, + parameter_143, + parameter_142, + parameter_141, + parameter_140, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_58, parameter_140, parameter_141, parameter_142, parameter_143 + + # pd_op.swish: (-1x48x-1x-1xf32) <- (-1x48x-1x-1xf32) + swish_46 = paddle._C_ops.swish(batch_norm__324) + del batch_norm__324 + + # pd_op.conv2d: (-1x48x-1x-1xf32) <- (-1x48x-1x-1xf32, 48x48x3x3xf32) + conv2d_59 = paddle._C_ops.conv2d( + swish_46, parameter_139, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_139 + + # pd_op.batch_norm_: (-1x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32, -1xui8) <- (-1x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32) + ( + batch_norm__330, + batch_norm__331, + batch_norm__332, + batch_norm__333, + batch_norm__334, + batch_norm__335, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_59, + parameter_138, + parameter_137, + parameter_136, + parameter_135, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_59, parameter_135, parameter_136, parameter_137, parameter_138 + + # pd_op.conv2d: (-1x48x-1x-1xf32) <- (-1x48x-1x-1xf32, 48x48x1x1xf32) + conv2d_60 = paddle._C_ops.conv2d( + swish_46, parameter_134, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_134, swish_46 + + # pd_op.batch_norm_: (-1x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32, -1xui8) <- (-1x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32) + ( + batch_norm__336, + batch_norm__337, + batch_norm__338, + batch_norm__339, + batch_norm__340, + batch_norm__341, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_60, + parameter_133, + parameter_132, + parameter_131, + parameter_130, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_60, parameter_130, parameter_131, parameter_132, parameter_133 + + # pd_op.add: (-1x48x-1x-1xf32) <- (-1x48x-1x-1xf32, -1x48x-1x-1xf32) + add_18 = paddle._C_ops.add(batch_norm__330, batch_norm__336) + del batch_norm__330, batch_norm__336 + + # pd_op.swish: (-1x48x-1x-1xf32) <- (-1x48x-1x-1xf32) + swish_47 = paddle._C_ops.swish(add_18) + del add_18 + + # builtin.combine: ([-1x48x-1x-1xf32, -1x48x-1x-1xf32]) <- (-1x48x-1x-1xf32, -1x48x-1x-1xf32) + combine_9 = [swish_44, swish_47] + del swish_44, swish_47 + + # pd_op.concat: (-1x96x-1x-1xf32) <- ([-1x48x-1x-1xf32, -1x48x-1x-1xf32], 1xi32) + concat_11 = paddle._C_ops.concat(combine_9, full_0) + del combine_9 + + # pd_op.conv2d: (-1x96x-1x-1xf32) <- (-1x96x-1x-1xf32, 96x96x1x1xf32) + conv2d_61 = paddle._C_ops.conv2d( + concat_11, parameter_129, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del concat_11, parameter_129 + + # pd_op.batch_norm_: (-1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (-1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__342, + batch_norm__343, + batch_norm__344, + batch_norm__345, + batch_norm__346, + batch_norm__347, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_61, + parameter_128, + parameter_127, + parameter_126, + parameter_125, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_61, parameter_125, parameter_126, parameter_127, parameter_128 + + # pd_op.swish: (-1x96x-1x-1xf32) <- (-1x96x-1x-1xf32) + swish_48 = paddle._C_ops.swish(batch_norm__342) + del batch_norm__342 + + # pd_op.conv2d: (-1x96x-1x-1xf32) <- (-1x96x-1x-1xf32, 96x96x3x3xf32) + conv2d_62 = paddle._C_ops.conv2d( + swish_48, parameter_124, [2, 2], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_124 + + # pd_op.batch_norm_: (-1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (-1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__348, + batch_norm__349, + batch_norm__350, + batch_norm__351, + batch_norm__352, + batch_norm__353, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_62, + parameter_123, + parameter_122, + parameter_121, + parameter_120, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_62, parameter_120, parameter_121, parameter_122, parameter_123 + + # pd_op.swish: (-1x96x-1x-1xf32) <- (-1x96x-1x-1xf32) + swish_49 = paddle._C_ops.swish(batch_norm__348) + del batch_norm__348 + + # builtin.combine: ([-1x96x-1x-1xf32, -1x192x-1x-1xf32]) <- (-1x96x-1x-1xf32, -1x192x-1x-1xf32) + combine_10 = [swish_49, swish_42] + del swish_42, swish_49 + + # pd_op.concat: (-1x288x-1x-1xf32) <- ([-1x96x-1x-1xf32, -1x192x-1x-1xf32], 1xi32) + concat_12 = paddle._C_ops.concat(combine_10, full_0) + del combine_10 + + # pd_op.conv2d: (-1x96x-1x-1xf32) <- (-1x288x-1x-1xf32, 96x288x1x1xf32) + conv2d_63 = paddle._C_ops.conv2d( + concat_12, parameter_119, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_119 + + # pd_op.batch_norm_: (-1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (-1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__354, + batch_norm__355, + batch_norm__356, + batch_norm__357, + batch_norm__358, + batch_norm__359, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_63, + parameter_118, + parameter_117, + parameter_116, + parameter_115, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_63, parameter_115, parameter_116, parameter_117, parameter_118 + + # pd_op.swish: (-1x96x-1x-1xf32) <- (-1x96x-1x-1xf32) + swish_50 = paddle._C_ops.swish(batch_norm__354) + del batch_norm__354 + + # pd_op.conv2d: (-1x96x-1x-1xf32) <- (-1x288x-1x-1xf32, 96x288x1x1xf32) + conv2d_64 = paddle._C_ops.conv2d( + concat_12, parameter_114, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del concat_12, parameter_114 + + # pd_op.batch_norm_: (-1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (-1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__360, + batch_norm__361, + batch_norm__362, + batch_norm__363, + batch_norm__364, + batch_norm__365, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_64, + parameter_113, + parameter_112, + parameter_111, + parameter_110, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_64, parameter_110, parameter_111, parameter_112, parameter_113 + + # pd_op.swish: (-1x96x-1x-1xf32) <- (-1x96x-1x-1xf32) + swish_51 = paddle._C_ops.swish(batch_norm__360) + del batch_norm__360 + + # pd_op.conv2d: (-1x96x-1x-1xf32) <- (-1x96x-1x-1xf32, 96x96x3x3xf32) + conv2d_65 = paddle._C_ops.conv2d( + swish_51, parameter_109, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_109, swish_51 + + # pd_op.batch_norm_: (-1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (-1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__366, + batch_norm__367, + batch_norm__368, + batch_norm__369, + batch_norm__370, + batch_norm__371, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_65, + parameter_108, + parameter_107, + parameter_106, + parameter_105, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_65, parameter_105, parameter_106, parameter_107, parameter_108 + + # pd_op.swish: (-1x96x-1x-1xf32) <- (-1x96x-1x-1xf32) + swish_52 = paddle._C_ops.swish(batch_norm__366) + del batch_norm__366 + + # pd_op.conv2d: (-1x96x-1x-1xf32) <- (-1x96x-1x-1xf32, 96x96x3x3xf32) + conv2d_66 = paddle._C_ops.conv2d( + swish_52, parameter_104, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_104 + + # pd_op.batch_norm_: (-1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (-1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__372, + batch_norm__373, + batch_norm__374, + batch_norm__375, + batch_norm__376, + batch_norm__377, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_66, + parameter_103, + parameter_102, + parameter_101, + parameter_100, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_66, parameter_100, parameter_101, parameter_102, parameter_103 + + # pd_op.conv2d: (-1x96x-1x-1xf32) <- (-1x96x-1x-1xf32, 96x96x1x1xf32) + conv2d_67 = paddle._C_ops.conv2d( + swish_52, parameter_99, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_99, swish_52 + + # pd_op.batch_norm_: (-1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (-1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__378, + batch_norm__379, + batch_norm__380, + batch_norm__381, + batch_norm__382, + batch_norm__383, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_67, + parameter_98, + parameter_97, + parameter_96, + parameter_95, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_67, parameter_95, parameter_96, parameter_97, parameter_98 + + # pd_op.add: (-1x96x-1x-1xf32) <- (-1x96x-1x-1xf32, -1x96x-1x-1xf32) + add_19 = paddle._C_ops.add(batch_norm__372, batch_norm__378) + del batch_norm__372, batch_norm__378 + + # pd_op.swish: (-1x96x-1x-1xf32) <- (-1x96x-1x-1xf32) + swish_53 = paddle._C_ops.swish(add_19) + del add_19 + + # builtin.combine: ([-1x96x-1x-1xf32, -1x96x-1x-1xf32]) <- (-1x96x-1x-1xf32, -1x96x-1x-1xf32) + combine_11 = [swish_50, swish_53] + del swish_50, swish_53 + + # pd_op.concat: (-1x192x-1x-1xf32) <- ([-1x96x-1x-1xf32, -1x96x-1x-1xf32], 1xi32) + concat_13 = paddle._C_ops.concat(combine_11, full_0) + del combine_11 + + # pd_op.conv2d: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32, 192x192x1x1xf32) + conv2d_68 = paddle._C_ops.conv2d( + concat_13, parameter_94, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del concat_13, parameter_94 + + # pd_op.batch_norm_: (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__384, + batch_norm__385, + batch_norm__386, + batch_norm__387, + batch_norm__388, + batch_norm__389, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_68, + parameter_93, + parameter_92, + parameter_91, + parameter_90, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_68, parameter_90, parameter_91, parameter_92, parameter_93 + + # pd_op.swish: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32) + swish_54 = paddle._C_ops.swish(batch_norm__384) + del batch_norm__384 + + # pd_op.conv2d: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32, 192x192x3x3xf32) + conv2d_69 = paddle._C_ops.conv2d( + swish_54, parameter_89, [2, 2], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_89 + + # pd_op.batch_norm_: (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__390, + batch_norm__391, + batch_norm__392, + batch_norm__393, + batch_norm__394, + batch_norm__395, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_69, + parameter_88, + parameter_87, + parameter_86, + parameter_85, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_69, parameter_85, parameter_86, parameter_87, parameter_88 + + # pd_op.swish: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32) + swish_55 = paddle._C_ops.swish(batch_norm__390) + del batch_norm__390 + + # builtin.combine: ([-1x192x-1x-1xf32, -1x384x-1x-1xf32]) <- (-1x192x-1x-1xf32, -1x384x-1x-1xf32) + combine_12 = [swish_55, swish_36] + del swish_36, swish_55 + + # pd_op.concat: (-1x576x-1x-1xf32) <- ([-1x192x-1x-1xf32, -1x384x-1x-1xf32], 1xi32) + concat_14 = paddle._C_ops.concat(combine_12, full_0) + del combine_12 + + # pd_op.conv2d: (-1x192x-1x-1xf32) <- (-1x576x-1x-1xf32, 192x576x1x1xf32) + conv2d_70 = paddle._C_ops.conv2d( + concat_14, parameter_84, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_84 + + # pd_op.batch_norm_: (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__396, + batch_norm__397, + batch_norm__398, + batch_norm__399, + batch_norm__400, + batch_norm__401, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_70, + parameter_83, + parameter_82, + parameter_81, + parameter_80, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_70, parameter_80, parameter_81, parameter_82, parameter_83 + + # pd_op.swish: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32) + swish_56 = paddle._C_ops.swish(batch_norm__396) + del batch_norm__396 + + # pd_op.conv2d: (-1x192x-1x-1xf32) <- (-1x576x-1x-1xf32, 192x576x1x1xf32) + conv2d_71 = paddle._C_ops.conv2d( + concat_14, parameter_79, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del concat_14, parameter_79 + + # pd_op.batch_norm_: (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__402, + batch_norm__403, + batch_norm__404, + batch_norm__405, + batch_norm__406, + batch_norm__407, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_71, + parameter_78, + parameter_77, + parameter_76, + parameter_75, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_71, parameter_75, parameter_76, parameter_77, parameter_78 + + # pd_op.swish: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32) + swish_57 = paddle._C_ops.swish(batch_norm__402) + del batch_norm__402 + + # pd_op.conv2d: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32, 192x192x3x3xf32) + conv2d_72 = paddle._C_ops.conv2d( + swish_57, parameter_74, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_74, swish_57 + + # pd_op.batch_norm_: (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__408, + batch_norm__409, + batch_norm__410, + batch_norm__411, + batch_norm__412, + batch_norm__413, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_72, + parameter_73, + parameter_72, + parameter_71, + parameter_70, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_72, parameter_70, parameter_71, parameter_72, parameter_73 + + # pd_op.swish: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32) + swish_58 = paddle._C_ops.swish(batch_norm__408) + del batch_norm__408 + + # pd_op.conv2d: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32, 192x192x3x3xf32) + conv2d_73 = paddle._C_ops.conv2d( + swish_58, parameter_69, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_69 + + # pd_op.batch_norm_: (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__414, + batch_norm__415, + batch_norm__416, + batch_norm__417, + batch_norm__418, + batch_norm__419, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_73, + parameter_68, + parameter_67, + parameter_66, + parameter_65, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_73, parameter_65, parameter_66, parameter_67, parameter_68 + + # pd_op.conv2d: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32, 192x192x1x1xf32) + conv2d_74 = paddle._C_ops.conv2d( + swish_58, parameter_64, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_64, swish_58 + + # pd_op.batch_norm_: (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__420, + batch_norm__421, + batch_norm__422, + batch_norm__423, + batch_norm__424, + batch_norm__425, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_74, + parameter_63, + parameter_62, + parameter_61, + parameter_60, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_74, parameter_60, parameter_61, parameter_62, parameter_63 + + # pd_op.add: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32, -1x192x-1x-1xf32) + add_20 = paddle._C_ops.add(batch_norm__414, batch_norm__420) + del batch_norm__414, batch_norm__420 + + # pd_op.swish: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32) + swish_59 = paddle._C_ops.swish(add_20) + del add_20 + + # builtin.combine: ([-1x192x-1x-1xf32, -1x192x-1x-1xf32]) <- (-1x192x-1x-1xf32, -1x192x-1x-1xf32) + combine_13 = [swish_56, swish_59] + del swish_56, swish_59 + + # pd_op.concat: (-1x384x-1x-1xf32) <- ([-1x192x-1x-1xf32, -1x192x-1x-1xf32], 1xi32) + concat_15 = paddle._C_ops.concat(combine_13, full_0) + del combine_13 + + # pd_op.conv2d: (-1x384x-1x-1xf32) <- (-1x384x-1x-1xf32, 384x384x1x1xf32) + conv2d_75 = paddle._C_ops.conv2d( + concat_15, parameter_59, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del concat_15, parameter_59 + + # pd_op.batch_norm_: (-1x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (-1x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__426, + batch_norm__427, + batch_norm__428, + batch_norm__429, + batch_norm__430, + batch_norm__431, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_75, + parameter_58, + parameter_57, + parameter_56, + parameter_55, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_75, parameter_55, parameter_56, parameter_57, parameter_58 + + # pd_op.swish: (-1x384x-1x-1xf32) <- (-1x384x-1x-1xf32) + swish_60 = paddle._C_ops.swish(batch_norm__426) + del batch_norm__426 + + # pd_op.shape64: (4xi64) <- (-1x384x-1x-1xf32) + shape64_0 = paddle._C_ops.shape64(swish_60) + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_5 = [0] + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_6 = [1] + + # pd_op.slice: (xi64) <- (4xi64, 1xi64, 1xi64) + slice_0 = paddle._C_ops.slice( + shape64_0, [0], full_int_array_5, full_int_array_6, [1], [0] + ) + del shape64_0 + + # pd_op.shape64: (4xi64) <- (-1x384x-1x-1xf32) + shape64_1 = paddle._C_ops.shape64(swish_60) + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_7 = [2] + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_8 = [3] + + # pd_op.slice: (xi64) <- (4xi64, 1xi64, 1xi64) + slice_1 = paddle._C_ops.slice( + shape64_1, [0], full_int_array_7, full_int_array_8, [1], [0] + ) + del shape64_1 + + # pd_op.shape64: (4xi64) <- (-1x384x-1x-1xf32) + shape64_2 = paddle._C_ops.shape64(swish_60) + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_9 = [4] + + # pd_op.slice: (xi64) <- (4xi64, 1xi64, 1xi64) + slice_2 = paddle._C_ops.slice( + shape64_2, [0], full_int_array_8, full_int_array_9, [1], [0] + ) + del shape64_2 + + # pd_op.multiply: (xi64) <- (xi64, xi64) + multiply_10 = paddle._C_ops.multiply(slice_1, slice_2) + del slice_1, slice_2 + + # pd_op.full_int_array: (2xi64) <- () + full_int_array_10 = [1, 1] + + # pd_op.pool2d: (-1x384x1x1xf32) <- (-1x384x-1x-1xf32, 2xi64) + pool2d_3 = paddle._C_ops.pool2d( + swish_60, + full_int_array_10, + [1, 1], + [0, 0], + False, + True, + "NCHW", + "avg", + False, + True, + "EXPLICIT", + ) + + # pd_op.conv2d: (-1x384x1x1xf32) <- (-1x384x1x1xf32, 384x384x1x1xf32) + conv2d_76 = paddle._C_ops.conv2d( + pool2d_3, parameter_54, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_54 + + # pd_op.reshape: (1x384x1x1xf32) <- (384xf32, 4xi64) + reshape_4 = paddle._C_ops.reshape(parameter_53, full_int_array_1) + del parameter_53 + + # pd_op.add: (-1x384x1x1xf32) <- (-1x384x1x1xf32, 1x384x1x1xf32) + add_21 = paddle._C_ops.add(conv2d_76, reshape_4) + del conv2d_76, reshape_4 + + # pd_op.sigmoid: (-1x384x1x1xf32) <- (-1x384x1x1xf32) + sigmoid_0 = paddle._C_ops.sigmoid(add_21) + del add_21 + + # pd_op.multiply: (-1x384x-1x-1xf32) <- (-1x384x-1x-1xf32, -1x384x1x1xf32) + multiply_11 = paddle._C_ops.multiply(swish_60, sigmoid_0) + del sigmoid_0 + + # pd_op.conv2d: (-1x384x-1x-1xf32) <- (-1x384x-1x-1xf32, 384x384x1x1xf32) + conv2d_77 = paddle._C_ops.conv2d( + multiply_11, parameter_52, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del multiply_11, parameter_52 + + # pd_op.batch_norm_: (-1x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (-1x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__432, + batch_norm__433, + batch_norm__434, + batch_norm__435, + batch_norm__436, + batch_norm__437, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_77, + parameter_51, + parameter_50, + parameter_49, + parameter_48, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_77, parameter_48, parameter_49, parameter_50, parameter_51 + + # pd_op.swish: (-1x384x-1x-1xf32) <- (-1x384x-1x-1xf32) + swish_61 = paddle._C_ops.swish(batch_norm__432) + del batch_norm__432 + + # pd_op.add: (-1x384x-1x-1xf32) <- (-1x384x-1x-1xf32, -1x384x-1x-1xf32) + add_22 = paddle._C_ops.add(swish_61, swish_60) + del swish_61 + + # pd_op.conv2d: (-1x4x-1x-1xf32) <- (-1x384x-1x-1xf32, 4x384x3x3xf32) + conv2d_78 = paddle._C_ops.conv2d( + add_22, parameter_47, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del add_22, parameter_47 + + # pd_op.reshape: (1x4x1x1xf32) <- (4xf32, 4xi64) + reshape_5 = paddle._C_ops.reshape(parameter_46, full_int_array_1) + del parameter_46 + + # pd_op.add: (-1x4x-1x-1xf32) <- (-1x4x-1x-1xf32, 1x4x1x1xf32) + add_23 = paddle._C_ops.add(conv2d_78, reshape_5) + del conv2d_78, reshape_5 + + # pd_op.conv2d: (-1x384x1x1xf32) <- (-1x384x1x1xf32, 384x384x1x1xf32) + conv2d_79 = paddle._C_ops.conv2d( + pool2d_3, parameter_45, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_45, pool2d_3 + + # pd_op.reshape: (1x384x1x1xf32) <- (384xf32, 4xi64) + reshape_6 = paddle._C_ops.reshape(parameter_44, full_int_array_1) + del parameter_44 + + # pd_op.add: (-1x384x1x1xf32) <- (-1x384x1x1xf32, 1x384x1x1xf32) + add_24 = paddle._C_ops.add(conv2d_79, reshape_6) + del conv2d_79, reshape_6 + + # pd_op.sigmoid: (-1x384x1x1xf32) <- (-1x384x1x1xf32) + sigmoid_1 = paddle._C_ops.sigmoid(add_24) + del add_24 + + # pd_op.multiply: (-1x384x-1x-1xf32) <- (-1x384x-1x-1xf32, -1x384x1x1xf32) + multiply_12 = paddle._C_ops.multiply(swish_60, sigmoid_1) + del sigmoid_1, swish_60 + + # pd_op.conv2d: (-1x384x-1x-1xf32) <- (-1x384x-1x-1xf32, 384x384x1x1xf32) + conv2d_80 = paddle._C_ops.conv2d( + multiply_12, parameter_43, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del multiply_12, parameter_43 + + # pd_op.batch_norm_: (-1x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (-1x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__438, + batch_norm__439, + batch_norm__440, + batch_norm__441, + batch_norm__442, + batch_norm__443, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_80, + parameter_42, + parameter_41, + parameter_40, + parameter_39, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_80, parameter_39, parameter_40, parameter_41, parameter_42 + + # pd_op.swish: (-1x384x-1x-1xf32) <- (-1x384x-1x-1xf32) + swish_62 = paddle._C_ops.swish(batch_norm__438) + del batch_norm__438 + + # pd_op.conv2d: (-1x68x-1x-1xf32) <- (-1x384x-1x-1xf32, 68x384x3x3xf32) + conv2d_81 = paddle._C_ops.conv2d( + swish_62, parameter_38, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_38, swish_62 + + # pd_op.reshape: (1x68x1x1xf32) <- (68xf32, 4xi64) + reshape_7 = paddle._C_ops.reshape(parameter_37, full_int_array_1) + del parameter_37 + + # pd_op.add: (-1x68x-1x-1xf32) <- (-1x68x-1x-1xf32, 1x68x1x1xf32) + add_25 = paddle._C_ops.add(conv2d_81, reshape_7) + del conv2d_81, reshape_7 + + # pd_op.full: (xi64) <- () + full_1 = paddle._C_ops.full( + [], float("-1"), paddle.int64, paddle.core.CPUPlace() + ) + + # pd_op.full: (xi64) <- () + full_2 = paddle._C_ops.full( + [], float("4"), paddle.int64, paddle.core.CPUPlace() + ) + + # pd_op.full: (xi64) <- () + full_3 = paddle._C_ops.full( + [], float("17"), paddle.int64, paddle.core.CPUPlace() + ) + + # builtin.combine: ([xi64, xi64, xi64, xi64]) <- (xi64, xi64, xi64, xi64) + combine_14 = [full_1, full_2, full_3, multiply_10] + + # pd_op.stack: (4xi64) <- ([xi64, xi64, xi64, xi64]) + stack_0 = paddle._C_ops.stack(combine_14, 0) + del combine_14 + + # pd_op.reshape: (-1x4x17x-1xf32) <- (-1x68x-1x-1xf32, 4xi64) + reshape_8 = paddle._C_ops.reshape(add_25, stack_0) + del add_25, stack_0 + + # pd_op.transpose: (-1x17x-1x4xf32) <- (-1x4x17x-1xf32) + transpose_0 = paddle._C_ops.transpose(reshape_8, [0, 2, 3, 1]) + del reshape_8 + + # pd_op.softmax: (-1x17x-1x4xf32) <- (-1x17x-1x4xf32) + softmax_0 = paddle._C_ops.softmax(transpose_0, 1) + del transpose_0 + + # pd_op.conv2d: (-1x1x-1x4xf32) <- (-1x17x-1x4xf32, 1x17x1x1xf32) + conv2d_82 = paddle._C_ops.conv2d( + softmax_0, parameter_36, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del softmax_0 + + # pd_op.squeeze: (-1x-1x4xf32) <- (-1x1x-1x4xf32, 1xi64) + squeeze_0 = paddle._C_ops.squeeze(conv2d_82, full_int_array_6) + del conv2d_82 + + # pd_op.sigmoid: (-1x4x-1x-1xf32) <- (-1x4x-1x-1xf32) + sigmoid_2 = paddle._C_ops.sigmoid(add_23) + del add_23 + + # builtin.combine: ([xi64, xi64, xi64]) <- (xi64, xi64, xi64) + combine_15 = [full_1, full_2, multiply_10] + del multiply_10 + + # pd_op.stack: (3xi64) <- ([xi64, xi64, xi64]) + stack_1 = paddle._C_ops.stack(combine_15, 0) + del combine_15 + + # pd_op.reshape: (-1x4x-1xf32) <- (-1x4x-1x-1xf32, 3xi64) + reshape_9 = paddle._C_ops.reshape(sigmoid_2, stack_1) + del sigmoid_2, stack_1 + + # pd_op.shape64: (4xi64) <- (-1x192x-1x-1xf32) + shape64_3 = paddle._C_ops.shape64(swish_54) + + # pd_op.slice: (xi64) <- (4xi64, 1xi64, 1xi64) + slice_3 = paddle._C_ops.slice( + shape64_3, [0], full_int_array_5, full_int_array_6, [1], [0] + ) + del shape64_3 + + # pd_op.shape64: (4xi64) <- (-1x192x-1x-1xf32) + shape64_4 = paddle._C_ops.shape64(swish_54) + + # pd_op.slice: (xi64) <- (4xi64, 1xi64, 1xi64) + slice_4 = paddle._C_ops.slice( + shape64_4, [0], full_int_array_7, full_int_array_8, [1], [0] + ) + del shape64_4 + + # pd_op.shape64: (4xi64) <- (-1x192x-1x-1xf32) + shape64_5 = paddle._C_ops.shape64(swish_54) + + # pd_op.slice: (xi64) <- (4xi64, 1xi64, 1xi64) + slice_5 = paddle._C_ops.slice( + shape64_5, [0], full_int_array_8, full_int_array_9, [1], [0] + ) + del shape64_5 + + # pd_op.multiply: (xi64) <- (xi64, xi64) + multiply_13 = paddle._C_ops.multiply(slice_4, slice_5) + del slice_4, slice_5 + + # pd_op.pool2d: (-1x192x1x1xf32) <- (-1x192x-1x-1xf32, 2xi64) + pool2d_4 = paddle._C_ops.pool2d( + swish_54, + full_int_array_10, + [1, 1], + [0, 0], + False, + True, + "NCHW", + "avg", + False, + True, + "EXPLICIT", + ) + + # pd_op.conv2d: (-1x192x1x1xf32) <- (-1x192x1x1xf32, 192x192x1x1xf32) + conv2d_83 = paddle._C_ops.conv2d( + pool2d_4, parameter_35, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_35 + + # pd_op.reshape: (1x192x1x1xf32) <- (192xf32, 4xi64) + reshape_10 = paddle._C_ops.reshape(parameter_34, full_int_array_1) + del parameter_34 + + # pd_op.add: (-1x192x1x1xf32) <- (-1x192x1x1xf32, 1x192x1x1xf32) + add_26 = paddle._C_ops.add(conv2d_83, reshape_10) + del conv2d_83, reshape_10 + + # pd_op.sigmoid: (-1x192x1x1xf32) <- (-1x192x1x1xf32) + sigmoid_3 = paddle._C_ops.sigmoid(add_26) + del add_26 + + # pd_op.multiply: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32, -1x192x1x1xf32) + multiply_14 = paddle._C_ops.multiply(swish_54, sigmoid_3) + del sigmoid_3 + + # pd_op.conv2d: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32, 192x192x1x1xf32) + conv2d_84 = paddle._C_ops.conv2d( + multiply_14, parameter_33, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del multiply_14, parameter_33 + + # pd_op.batch_norm_: (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__444, + batch_norm__445, + batch_norm__446, + batch_norm__447, + batch_norm__448, + batch_norm__449, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_84, + parameter_32, + parameter_31, + parameter_30, + parameter_29, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_84, parameter_29, parameter_30, parameter_31, parameter_32 + + # pd_op.swish: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32) + swish_63 = paddle._C_ops.swish(batch_norm__444) + del batch_norm__444 + + # pd_op.add: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32, -1x192x-1x-1xf32) + add_27 = paddle._C_ops.add(swish_63, swish_54) + del swish_63 + + # pd_op.conv2d: (-1x4x-1x-1xf32) <- (-1x192x-1x-1xf32, 4x192x3x3xf32) + conv2d_85 = paddle._C_ops.conv2d( + add_27, parameter_28, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del add_27, parameter_28 + + # pd_op.reshape: (1x4x1x1xf32) <- (4xf32, 4xi64) + reshape_11 = paddle._C_ops.reshape(parameter_27, full_int_array_1) + del parameter_27 + + # pd_op.add: (-1x4x-1x-1xf32) <- (-1x4x-1x-1xf32, 1x4x1x1xf32) + add_28 = paddle._C_ops.add(conv2d_85, reshape_11) + del conv2d_85, reshape_11 + + # pd_op.conv2d: (-1x192x1x1xf32) <- (-1x192x1x1xf32, 192x192x1x1xf32) + conv2d_86 = paddle._C_ops.conv2d( + pool2d_4, parameter_26, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_26, pool2d_4 + + # pd_op.reshape: (1x192x1x1xf32) <- (192xf32, 4xi64) + reshape_12 = paddle._C_ops.reshape(parameter_25, full_int_array_1) + del parameter_25 + + # pd_op.add: (-1x192x1x1xf32) <- (-1x192x1x1xf32, 1x192x1x1xf32) + add_29 = paddle._C_ops.add(conv2d_86, reshape_12) + del conv2d_86, reshape_12 + + # pd_op.sigmoid: (-1x192x1x1xf32) <- (-1x192x1x1xf32) + sigmoid_4 = paddle._C_ops.sigmoid(add_29) + del add_29 + + # pd_op.multiply: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32, -1x192x1x1xf32) + multiply_15 = paddle._C_ops.multiply(swish_54, sigmoid_4) + del sigmoid_4, swish_54 + + # pd_op.conv2d: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32, 192x192x1x1xf32) + conv2d_87 = paddle._C_ops.conv2d( + multiply_15, parameter_24, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del multiply_15, parameter_24 + + # pd_op.batch_norm_: (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__450, + batch_norm__451, + batch_norm__452, + batch_norm__453, + batch_norm__454, + batch_norm__455, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_87, + parameter_23, + parameter_22, + parameter_21, + parameter_20, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_87, parameter_20, parameter_21, parameter_22, parameter_23 + + # pd_op.swish: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32) + swish_64 = paddle._C_ops.swish(batch_norm__450) + del batch_norm__450 + + # pd_op.conv2d: (-1x68x-1x-1xf32) <- (-1x192x-1x-1xf32, 68x192x3x3xf32) + conv2d_88 = paddle._C_ops.conv2d( + swish_64, parameter_19, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_19, swish_64 + + # pd_op.reshape: (1x68x1x1xf32) <- (68xf32, 4xi64) + reshape_13 = paddle._C_ops.reshape(parameter_18, full_int_array_1) + del parameter_18 + + # pd_op.add: (-1x68x-1x-1xf32) <- (-1x68x-1x-1xf32, 1x68x1x1xf32) + add_30 = paddle._C_ops.add(conv2d_88, reshape_13) + del conv2d_88, reshape_13 + + # builtin.combine: ([xi64, xi64, xi64, xi64]) <- (xi64, xi64, xi64, xi64) + combine_16 = [full_1, full_2, full_3, multiply_13] + + # pd_op.stack: (4xi64) <- ([xi64, xi64, xi64, xi64]) + stack_2 = paddle._C_ops.stack(combine_16, 0) + del combine_16 + + # pd_op.reshape: (-1x4x17x-1xf32) <- (-1x68x-1x-1xf32, 4xi64) + reshape_14 = paddle._C_ops.reshape(add_30, stack_2) + del add_30, stack_2 + + # pd_op.transpose: (-1x17x-1x4xf32) <- (-1x4x17x-1xf32) + transpose_1 = paddle._C_ops.transpose(reshape_14, [0, 2, 3, 1]) + del reshape_14 + + # pd_op.softmax: (-1x17x-1x4xf32) <- (-1x17x-1x4xf32) + softmax_1 = paddle._C_ops.softmax(transpose_1, 1) + del transpose_1 + + # pd_op.conv2d: (-1x1x-1x4xf32) <- (-1x17x-1x4xf32, 1x17x1x1xf32) + conv2d_89 = paddle._C_ops.conv2d( + softmax_1, parameter_36, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del softmax_1 + + # pd_op.squeeze: (-1x-1x4xf32) <- (-1x1x-1x4xf32, 1xi64) + squeeze_1 = paddle._C_ops.squeeze(conv2d_89, full_int_array_6) + del conv2d_89 + + # pd_op.sigmoid: (-1x4x-1x-1xf32) <- (-1x4x-1x-1xf32) + sigmoid_5 = paddle._C_ops.sigmoid(add_28) + del add_28 + + # builtin.combine: ([xi64, xi64, xi64]) <- (xi64, xi64, xi64) + combine_17 = [full_1, full_2, multiply_13] + del multiply_13 + + # pd_op.stack: (3xi64) <- ([xi64, xi64, xi64]) + stack_3 = paddle._C_ops.stack(combine_17, 0) + del combine_17 + + # pd_op.reshape: (-1x4x-1xf32) <- (-1x4x-1x-1xf32, 3xi64) + reshape_15 = paddle._C_ops.reshape(sigmoid_5, stack_3) + del sigmoid_5, stack_3 + + # pd_op.shape64: (4xi64) <- (-1x96x-1x-1xf32) + shape64_6 = paddle._C_ops.shape64(swish_48) + + # pd_op.slice: (xi64) <- (4xi64, 1xi64, 1xi64) + slice_6 = paddle._C_ops.slice( + shape64_6, [0], full_int_array_5, full_int_array_6, [1], [0] + ) + del full_int_array_5, shape64_6 + + # pd_op.shape64: (4xi64) <- (-1x96x-1x-1xf32) + shape64_7 = paddle._C_ops.shape64(swish_48) + + # pd_op.slice: (xi64) <- (4xi64, 1xi64, 1xi64) + slice_7 = paddle._C_ops.slice( + shape64_7, [0], full_int_array_7, full_int_array_8, [1], [0] + ) + del full_int_array_7, shape64_7 + + # pd_op.shape64: (4xi64) <- (-1x96x-1x-1xf32) + shape64_8 = paddle._C_ops.shape64(swish_48) + + # pd_op.slice: (xi64) <- (4xi64, 1xi64, 1xi64) + slice_8 = paddle._C_ops.slice( + shape64_8, [0], full_int_array_8, full_int_array_9, [1], [0] + ) + del full_int_array_8, full_int_array_9, shape64_8 + + # pd_op.multiply: (xi64) <- (xi64, xi64) + multiply_16 = paddle._C_ops.multiply(slice_7, slice_8) + del slice_7, slice_8 + + # pd_op.pool2d: (-1x96x1x1xf32) <- (-1x96x-1x-1xf32, 2xi64) + pool2d_5 = paddle._C_ops.pool2d( + swish_48, + full_int_array_10, + [1, 1], + [0, 0], + False, + True, + "NCHW", + "avg", + False, + True, + "EXPLICIT", + ) + del full_int_array_10 + + # pd_op.conv2d: (-1x96x1x1xf32) <- (-1x96x1x1xf32, 96x96x1x1xf32) + conv2d_90 = paddle._C_ops.conv2d( + pool2d_5, parameter_17, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_17 + + # pd_op.reshape: (1x96x1x1xf32) <- (96xf32, 4xi64) + reshape_16 = paddle._C_ops.reshape(parameter_16, full_int_array_1) + del parameter_16 + + # pd_op.add: (-1x96x1x1xf32) <- (-1x96x1x1xf32, 1x96x1x1xf32) + add_31 = paddle._C_ops.add(conv2d_90, reshape_16) + del conv2d_90, reshape_16 + + # pd_op.sigmoid: (-1x96x1x1xf32) <- (-1x96x1x1xf32) + sigmoid_6 = paddle._C_ops.sigmoid(add_31) + del add_31 + + # pd_op.multiply: (-1x96x-1x-1xf32) <- (-1x96x-1x-1xf32, -1x96x1x1xf32) + multiply_17 = paddle._C_ops.multiply(swish_48, sigmoid_6) + del sigmoid_6 + + # pd_op.conv2d: (-1x96x-1x-1xf32) <- (-1x96x-1x-1xf32, 96x96x1x1xf32) + conv2d_91 = paddle._C_ops.conv2d( + multiply_17, parameter_15, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del multiply_17, parameter_15 + + # pd_op.batch_norm_: (-1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (-1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__456, + batch_norm__457, + batch_norm__458, + batch_norm__459, + batch_norm__460, + batch_norm__461, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_91, + parameter_14, + parameter_13, + parameter_12, + parameter_11, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_91, parameter_11, parameter_12, parameter_13, parameter_14 + + # pd_op.swish: (-1x96x-1x-1xf32) <- (-1x96x-1x-1xf32) + swish_65 = paddle._C_ops.swish(batch_norm__456) + del batch_norm__456 + + # pd_op.add: (-1x96x-1x-1xf32) <- (-1x96x-1x-1xf32, -1x96x-1x-1xf32) + add_32 = paddle._C_ops.add(swish_65, swish_48) + del swish_65 + + # pd_op.conv2d: (-1x4x-1x-1xf32) <- (-1x96x-1x-1xf32, 4x96x3x3xf32) + conv2d_92 = paddle._C_ops.conv2d( + add_32, parameter_10, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del add_32, parameter_10 + + # pd_op.reshape: (1x4x1x1xf32) <- (4xf32, 4xi64) + reshape_17 = paddle._C_ops.reshape(parameter_9, full_int_array_1) + del parameter_9 + + # pd_op.add: (-1x4x-1x-1xf32) <- (-1x4x-1x-1xf32, 1x4x1x1xf32) + add_33 = paddle._C_ops.add(conv2d_92, reshape_17) + del conv2d_92, reshape_17 + + # pd_op.conv2d: (-1x96x1x1xf32) <- (-1x96x1x1xf32, 96x96x1x1xf32) + conv2d_93 = paddle._C_ops.conv2d( + pool2d_5, parameter_8, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_8, pool2d_5 + + # pd_op.reshape: (1x96x1x1xf32) <- (96xf32, 4xi64) + reshape_18 = paddle._C_ops.reshape(parameter_7, full_int_array_1) + del parameter_7 + + # pd_op.add: (-1x96x1x1xf32) <- (-1x96x1x1xf32, 1x96x1x1xf32) + add_34 = paddle._C_ops.add(conv2d_93, reshape_18) + del conv2d_93, reshape_18 + + # pd_op.sigmoid: (-1x96x1x1xf32) <- (-1x96x1x1xf32) + sigmoid_7 = paddle._C_ops.sigmoid(add_34) + del add_34 + + # pd_op.multiply: (-1x96x-1x-1xf32) <- (-1x96x-1x-1xf32, -1x96x1x1xf32) + multiply_18 = paddle._C_ops.multiply(swish_48, sigmoid_7) + del sigmoid_7, swish_48 + + # pd_op.conv2d: (-1x96x-1x-1xf32) <- (-1x96x-1x-1xf32, 96x96x1x1xf32) + conv2d_94 = paddle._C_ops.conv2d( + multiply_18, parameter_6, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del multiply_18, parameter_6 + + # pd_op.batch_norm_: (-1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (-1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__462, + batch_norm__463, + batch_norm__464, + batch_norm__465, + batch_norm__466, + batch_norm__467, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_94, + parameter_5, + parameter_4, + parameter_3, + parameter_2, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_94, parameter_2, parameter_3, parameter_4, parameter_5 + + # pd_op.swish: (-1x96x-1x-1xf32) <- (-1x96x-1x-1xf32) + swish_66 = paddle._C_ops.swish(batch_norm__462) + del batch_norm__462 + + # pd_op.conv2d: (-1x68x-1x-1xf32) <- (-1x96x-1x-1xf32, 68x96x3x3xf32) + conv2d_95 = paddle._C_ops.conv2d( + swish_66, parameter_1, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_1, swish_66 + + # pd_op.reshape: (1x68x1x1xf32) <- (68xf32, 4xi64) + reshape_19 = paddle._C_ops.reshape(parameter_0, full_int_array_1) + del full_int_array_1, parameter_0 + + # pd_op.add: (-1x68x-1x-1xf32) <- (-1x68x-1x-1xf32, 1x68x1x1xf32) + add_35 = paddle._C_ops.add(conv2d_95, reshape_19) + del conv2d_95, reshape_19 + + # builtin.combine: ([xi64, xi64, xi64, xi64]) <- (xi64, xi64, xi64, xi64) + combine_18 = [full_1, full_2, full_3, multiply_16] + del full_3 + + # pd_op.stack: (4xi64) <- ([xi64, xi64, xi64, xi64]) + stack_4 = paddle._C_ops.stack(combine_18, 0) + del combine_18 + + # pd_op.reshape: (-1x4x17x-1xf32) <- (-1x68x-1x-1xf32, 4xi64) + reshape_20 = paddle._C_ops.reshape(add_35, stack_4) + del add_35, stack_4 + + # pd_op.transpose: (-1x17x-1x4xf32) <- (-1x4x17x-1xf32) + transpose_2 = paddle._C_ops.transpose(reshape_20, [0, 2, 3, 1]) + del reshape_20 + + # pd_op.softmax: (-1x17x-1x4xf32) <- (-1x17x-1x4xf32) + softmax_2 = paddle._C_ops.softmax(transpose_2, 1) + del transpose_2 + + # pd_op.conv2d: (-1x1x-1x4xf32) <- (-1x17x-1x4xf32, 1x17x1x1xf32) + conv2d_96 = paddle._C_ops.conv2d( + softmax_2, parameter_36, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_36, softmax_2 + + # pd_op.squeeze: (-1x-1x4xf32) <- (-1x1x-1x4xf32, 1xi64) + squeeze_2 = paddle._C_ops.squeeze(conv2d_96, full_int_array_6) + del conv2d_96, full_int_array_6 + + # pd_op.sigmoid: (-1x4x-1x-1xf32) <- (-1x4x-1x-1xf32) + sigmoid_8 = paddle._C_ops.sigmoid(add_33) + del add_33 + + # builtin.combine: ([xi64, xi64, xi64]) <- (xi64, xi64, xi64) + combine_19 = [full_1, full_2, multiply_16] + del full_1, full_2, multiply_16 + + # pd_op.stack: (3xi64) <- ([xi64, xi64, xi64]) + stack_5 = paddle._C_ops.stack(combine_19, 0) + del combine_19 + + # pd_op.reshape: (-1x4x-1xf32) <- (-1x4x-1x-1xf32, 3xi64) + reshape_21 = paddle._C_ops.reshape(sigmoid_8, stack_5) + del sigmoid_8, stack_5 + + # pd_op.full: (1xi32) <- () + full_4 = paddle._C_ops.full( + [1], float("-1"), paddle.int32, paddle.core.CPUPlace() + ) + + # builtin.combine: ([-1x4x-1xf32, -1x4x-1xf32, -1x4x-1xf32]) <- (-1x4x-1xf32, -1x4x-1xf32, -1x4x-1xf32) + combine_20 = [reshape_9, reshape_15, reshape_21] + del reshape_15, reshape_21, reshape_9 + + # pd_op.concat: (-1x4x-1xf32) <- ([-1x4x-1xf32, -1x4x-1xf32, -1x4x-1xf32], 1xi32) + concat_0 = paddle._C_ops.concat(combine_20, full_4) + del combine_20, full_4 + + # builtin.combine: ([-1x-1x4xf32, -1x-1x4xf32, -1x-1x4xf32]) <- (-1x-1x4xf32, -1x-1x4xf32, -1x-1x4xf32) + combine_21 = [squeeze_0, squeeze_1, squeeze_2] + del squeeze_0, squeeze_1, squeeze_2 + + # pd_op.concat: (-1x-1x4xf32) <- ([-1x-1x4xf32, -1x-1x4xf32, -1x-1x4xf32], 1xi32) + concat_1 = paddle._C_ops.concat(combine_21, full_0) + del combine_21, full_0 + + return concat_0, concat_1 diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_7/weight_meta.py b/paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_7/weight_meta.py new file mode 100644 index 000000000..22b944fb1 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_7/weight_meta.py @@ -0,0 +1,4457 @@ +class Program_weight_tensor_parameter_0: + name = "parameter_0" + shape = [68] + dtype = "float32" + min_val = float("-0.0230492") + max_val = float("0.0263862") + mean = float("1.6921e-07") + std = float("0.00783963") + data = None + + +class Program_weight_tensor_parameter_1: + name = "parameter_1" + shape = [68, 96, 3, 3] + dtype = "float32" + min_val = float("-0.217178") + max_val = float("0.245921") + mean = float("6.83212e-08") + std = float("0.0110666") + data = None + + +class Program_weight_tensor_parameter_2: + name = "parameter_2" + shape = [96] + dtype = "float32" + min_val = float("-0.114502") + max_val = float("0.346059") + mean = float("0.110367") + std = float("0.109849") + data = None + + +class Program_weight_tensor_parameter_3: + name = "parameter_3" + shape = [96] + dtype = "float32" + min_val = float("0.953865") + max_val = float("2.2643") + mean = float("1.52561") + std = float("0.268013") + data = None + + +class Program_weight_tensor_parameter_4: + name = "parameter_4" + shape = [96] + dtype = "float32" + min_val = float("0.000140072") + max_val = float("0.00214768") + mean = float("0.000633748") + std = float("0.000356685") + data = None + + +class Program_weight_tensor_parameter_5: + name = "parameter_5" + shape = [96] + dtype = "float32" + min_val = float("-0.0925875") + max_val = float("0.0581396") + mean = float("-0.00951707") + std = float("0.0295988") + data = None + + +class Program_weight_tensor_parameter_6: + name = "parameter_6" + shape = [96, 96, 1, 1] + dtype = "float32" + min_val = float("-0.0848489") + max_val = float("0.10508") + mean = float("-0.000529765") + std = float("0.0106877") + data = None + + +class Program_weight_tensor_parameter_7: + name = "parameter_7" + shape = [96] + dtype = "float32" + min_val = float("-0.00800827") + max_val = float("0.0081451") + mean = float("-0.000272127") + std = float("0.003799") + data = None + + +class Program_weight_tensor_parameter_8: + name = "parameter_8" + shape = [96, 96, 1, 1] + dtype = "float32" + min_val = float("-0.0213112") + max_val = float("0.0288629") + mean = float("-0.00018836") + std = float("0.00353181") + data = None + + +class Program_weight_tensor_parameter_9: + name = "parameter_9" + shape = [4] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_10: + name = "parameter_10" + shape = [4, 96, 3, 3] + dtype = "float32" + min_val = float("-8.94465e-06") + max_val = float("0.000223992") + mean = float("1.89384e-05") + std = float("3.09239e-05") + data = None + + +class Program_weight_tensor_parameter_11: + name = "parameter_11" + shape = [96] + dtype = "float32" + min_val = float("-0.998934") + max_val = float("1.70735") + mean = float("0.560636") + std = float("0.525246") + data = None + + +class Program_weight_tensor_parameter_12: + name = "parameter_12" + shape = [96] + dtype = "float32" + min_val = float("0.794568") + max_val = float("2.07538") + mean = float("1.47481") + std = float("0.229946") + data = None + + +class Program_weight_tensor_parameter_13: + name = "parameter_13" + shape = [96] + dtype = "float32" + min_val = float("0.000240262") + max_val = float("0.00433043") + mean = float("0.000863772") + std = float("0.000623405") + data = None + + +class Program_weight_tensor_parameter_14: + name = "parameter_14" + shape = [96] + dtype = "float32" + min_val = float("-0.177072") + max_val = float("0.0755824") + mean = float("-0.0197895") + std = float("0.0461985") + data = None + + +class Program_weight_tensor_parameter_15: + name = "parameter_15" + shape = [96, 96, 1, 1] + dtype = "float32" + min_val = float("-0.0745538") + max_val = float("0.088241") + mean = float("-0.000571988") + std = float("0.0125377") + data = None + + +class Program_weight_tensor_parameter_16: + name = "parameter_16" + shape = [96] + dtype = "float32" + min_val = float("-0.00678168") + max_val = float("0.00493317") + mean = float("-0.000614254") + std = float("0.00256013") + data = None + + +class Program_weight_tensor_parameter_17: + name = "parameter_17" + shape = [96, 96, 1, 1] + dtype = "float32" + min_val = float("-0.0436281") + max_val = float("0.0583947") + mean = float("-0.000183223") + std = float("0.0042732") + data = None + + +class Program_weight_tensor_parameter_18: + name = "parameter_18" + shape = [68] + dtype = "float32" + min_val = float("-0.00986684") + max_val = float("0.0294587") + mean = float("1.5281e-07") + std = float("0.00655415") + data = None + + +class Program_weight_tensor_parameter_19: + name = "parameter_19" + shape = [68, 192, 3, 3] + dtype = "float32" + min_val = float("-0.137881") + max_val = float("0.181087") + mean = float("3.80678e-08") + std = float("0.00774793") + data = None + + +class Program_weight_tensor_parameter_20: + name = "parameter_20" + shape = [192] + dtype = "float32" + min_val = float("-0.0192722") + max_val = float("0.165692") + mean = float("0.0779239") + std = float("0.0384993") + data = None + + +class Program_weight_tensor_parameter_21: + name = "parameter_21" + shape = [192] + dtype = "float32" + min_val = float("1.08304") + max_val = float("1.514") + mean = float("1.30109") + std = float("0.0862155") + data = None + + +class Program_weight_tensor_parameter_22: + name = "parameter_22" + shape = [192] + dtype = "float32" + min_val = float("0.00016077") + max_val = float("0.00375221") + mean = float("0.000671767") + std = float("0.000576734") + data = None + + +class Program_weight_tensor_parameter_23: + name = "parameter_23" + shape = [192] + dtype = "float32" + min_val = float("-0.0538932") + max_val = float("0.0210107") + mean = float("-0.00908768") + std = float("0.0148884") + data = None + + +class Program_weight_tensor_parameter_24: + name = "parameter_24" + shape = [192, 192, 1, 1] + dtype = "float32" + min_val = float("-0.0642939") + max_val = float("0.0916942") + mean = float("-0.000211285") + std = float("0.00542537") + data = None + + +class Program_weight_tensor_parameter_25: + name = "parameter_25" + shape = [192] + dtype = "float32" + min_val = float("-0.00650271") + max_val = float("0.00689612") + mean = float("-2.83907e-05") + std = float("0.00266007") + data = None + + +class Program_weight_tensor_parameter_26: + name = "parameter_26" + shape = [192, 192, 1, 1] + dtype = "float32" + min_val = float("-0.00704313") + max_val = float("0.0153043") + mean = float("-8.26294e-05") + std = float("0.00153248") + data = None + + +class Program_weight_tensor_parameter_27: + name = "parameter_27" + shape = [4] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_28: + name = "parameter_28" + shape = [4, 192, 3, 3] + dtype = "float32" + min_val = float("-6.5479e-06") + max_val = float("0.000137505") + mean = float("7.70272e-06") + std = float("1.45118e-05") + data = None + + +class Program_weight_tensor_parameter_29: + name = "parameter_29" + shape = [192] + dtype = "float32" + min_val = float("-0.28947") + max_val = float("1.00595") + mean = float("0.406857") + std = float("0.236862") + data = None + + +class Program_weight_tensor_parameter_30: + name = "parameter_30" + shape = [192] + dtype = "float32" + min_val = float("1.0442") + max_val = float("1.84255") + mean = float("1.34617") + std = float("0.126198") + data = None + + +class Program_weight_tensor_parameter_31: + name = "parameter_31" + shape = [192] + dtype = "float32" + min_val = float("0.000212463") + max_val = float("0.00312835") + mean = float("0.000877846") + std = float("0.000447036") + data = None + + +class Program_weight_tensor_parameter_32: + name = "parameter_32" + shape = [192] + dtype = "float32" + min_val = float("-0.209347") + max_val = float("0.060547") + mean = float("-0.0221397") + std = float("0.0363101") + data = None + + +class Program_weight_tensor_parameter_33: + name = "parameter_33" + shape = [192, 192, 1, 1] + dtype = "float32" + min_val = float("-0.0528046") + max_val = float("0.0673072") + mean = float("-0.000454508") + std = float("0.00637634") + data = None + + +class Program_weight_tensor_parameter_34: + name = "parameter_34" + shape = [192] + dtype = "float32" + min_val = float("-0.0047229") + max_val = float("0.00394516") + mean = float("-0.000172778") + std = float("0.0013924") + data = None + + +class Program_weight_tensor_parameter_35: + name = "parameter_35" + shape = [192, 192, 1, 1] + dtype = "float32" + min_val = float("-0.0225907") + max_val = float("0.0351321") + mean = float("-8.73892e-05") + std = float("0.00172588") + data = None + + +class Program_weight_tensor_parameter_36: + name = "parameter_36" + shape = [1, 17, 1, 1] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_37: + name = "parameter_37" + shape = [68] + dtype = "float32" + min_val = float("-0.00730085") + max_val = float("0.0216794") + mean = float("1.38476e-07") + std = float("0.00556427") + data = None + + +class Program_weight_tensor_parameter_38: + name = "parameter_38" + shape = [68, 384, 3, 3] + dtype = "float32" + min_val = float("-0.0980724") + max_val = float("0.104284") + mean = float("2.31084e-08") + std = float("0.00528442") + data = None + + +class Program_weight_tensor_parameter_39: + name = "parameter_39" + shape = [384] + dtype = "float32" + min_val = float("-0.0245516") + max_val = float("0.150083") + mean = float("0.0399693") + std = float("0.0314451") + data = None + + +class Program_weight_tensor_parameter_40: + name = "parameter_40" + shape = [384] + dtype = "float32" + min_val = float("1.05719") + max_val = float("1.4154") + mean = float("1.2166") + std = float("0.0536291") + data = None + + +class Program_weight_tensor_parameter_41: + name = "parameter_41" + shape = [384] + dtype = "float32" + min_val = float("9.51162e-05") + max_val = float("0.00325844") + mean = float("0.000376625") + std = float("0.000302781") + data = None + + +class Program_weight_tensor_parameter_42: + name = "parameter_42" + shape = [384] + dtype = "float32" + min_val = float("-0.0319383") + max_val = float("0.0112822") + mean = float("-0.00622045") + std = float("0.00797912") + data = None + + +class Program_weight_tensor_parameter_43: + name = "parameter_43" + shape = [384, 384, 1, 1] + dtype = "float32" + min_val = float("-0.0485659") + max_val = float("0.0529346") + mean = float("-8.66897e-05") + std = float("0.00285258") + data = None + + +class Program_weight_tensor_parameter_44: + name = "parameter_44" + shape = [384] + dtype = "float32" + min_val = float("-0.00680921") + max_val = float("0.00400345") + mean = float("0.000101289") + std = float("0.0015669") + data = None + + +class Program_weight_tensor_parameter_45: + name = "parameter_45" + shape = [384, 384, 1, 1] + dtype = "float32" + min_val = float("-0.00724109") + max_val = float("0.010877") + mean = float("2.60975e-05") + std = float("0.000710531") + data = None + + +class Program_weight_tensor_parameter_46: + name = "parameter_46" + shape = [4] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_47: + name = "parameter_47" + shape = [4, 384, 3, 3] + dtype = "float32" + min_val = float("-6.75702e-06") + max_val = float("0.000129978") + mean = float("3.2843e-06") + std = float("8.19373e-06") + data = None + + +class Program_weight_tensor_parameter_48: + name = "parameter_48" + shape = [384] + dtype = "float32" + min_val = float("-0.407607") + max_val = float("0.611505") + mean = float("0.214159") + std = float("0.109619") + data = None + + +class Program_weight_tensor_parameter_49: + name = "parameter_49" + shape = [384] + dtype = "float32" + min_val = float("1.06451") + max_val = float("1.46461") + mean = float("1.20299") + std = float("0.0648033") + data = None + + +class Program_weight_tensor_parameter_50: + name = "parameter_50" + shape = [384] + dtype = "float32" + min_val = float("0.000178596") + max_val = float("0.00605344") + mean = float("0.000652901") + std = float("0.000436307") + data = None + + +class Program_weight_tensor_parameter_51: + name = "parameter_51" + shape = [384] + dtype = "float32" + min_val = float("-0.0952803") + max_val = float("0.067952") + mean = float("-0.0223828") + std = float("0.0203432") + data = None + + +class Program_weight_tensor_parameter_52: + name = "parameter_52" + shape = [384, 384, 1, 1] + dtype = "float32" + min_val = float("-0.059935") + max_val = float("0.0450635") + mean = float("-0.000333336") + std = float("0.00322675") + data = None + + +class Program_weight_tensor_parameter_53: + name = "parameter_53" + shape = [384] + dtype = "float32" + min_val = float("-0.0103061") + max_val = float("0.00720155") + mean = float("-8.72814e-05") + std = float("0.00107636") + data = None + + +class Program_weight_tensor_parameter_54: + name = "parameter_54" + shape = [384, 384, 1, 1] + dtype = "float32" + min_val = float("-0.0280243") + max_val = float("0.0412772") + mean = float("-2.84827e-05") + std = float("0.000972606") + data = None + + +class Program_weight_tensor_parameter_55: + name = "parameter_55" + shape = [384] + dtype = "float32" + min_val = float("-0.451774") + max_val = float("0.522696") + mean = float("0.182658") + std = float("0.146762") + data = None + + +class Program_weight_tensor_parameter_56: + name = "parameter_56" + shape = [384] + dtype = "float32" + min_val = float("0.927731") + max_val = float("1.49158") + mean = float("1.14569") + std = float("0.0749429") + data = None + + +class Program_weight_tensor_parameter_57: + name = "parameter_57" + shape = [384] + dtype = "float32" + min_val = float("0.00221275") + max_val = float("0.0348489") + mean = float("0.00537283") + std = float("0.00324614") + data = None + + +class Program_weight_tensor_parameter_58: + name = "parameter_58" + shape = [384] + dtype = "float32" + min_val = float("-0.162644") + max_val = float("0.0915041") + mean = float("-0.0192399") + std = float("0.0247508") + data = None + + +class Program_weight_tensor_parameter_59: + name = "parameter_59" + shape = [384, 384, 1, 1] + dtype = "float32" + min_val = float("-0.0774644") + max_val = float("0.0559315") + mean = float("-0.000267047") + std = float("0.00469856") + data = None + + +class Program_weight_tensor_parameter_60: + name = "parameter_60" + shape = [192] + dtype = "float32" + min_val = float("-0.276788") + max_val = float("0.0744858") + mean = float("-0.0553545") + std = float("0.0674215") + data = None + + +class Program_weight_tensor_parameter_61: + name = "parameter_61" + shape = [192] + dtype = "float32" + min_val = float("0.879725") + max_val = float("1.05675") + mean = float("0.9589") + std = float("0.0248343") + data = None + + +class Program_weight_tensor_parameter_62: + name = "parameter_62" + shape = [192] + dtype = "float32" + min_val = float("0.0011034") + max_val = float("0.0122326") + mean = float("0.00466736") + std = float("0.00201243") + data = None + + +class Program_weight_tensor_parameter_63: + name = "parameter_63" + shape = [192] + dtype = "float32" + min_val = float("-0.0344828") + max_val = float("0.0534198") + mean = float("0.00506268") + std = float("0.0159408") + data = None + + +class Program_weight_tensor_parameter_64: + name = "parameter_64" + shape = [192, 192, 1, 1] + dtype = "float32" + min_val = float("-0.0447105") + max_val = float("0.0274001") + mean = float("-6.22922e-05") + std = float("0.0033242") + data = None + + +class Program_weight_tensor_parameter_65: + name = "parameter_65" + shape = [192] + dtype = "float32" + min_val = float("-0.276788") + max_val = float("0.0744858") + mean = float("-0.0553545") + std = float("0.0674215") + data = None + + +class Program_weight_tensor_parameter_66: + name = "parameter_66" + shape = [192] + dtype = "float32" + min_val = float("0.961926") + max_val = float("1.24683") + mean = float("1.06494") + std = float("0.049501") + data = None + + +class Program_weight_tensor_parameter_67: + name = "parameter_67" + shape = [192] + dtype = "float32" + min_val = float("0.00223975") + max_val = float("0.0149979") + mean = float("0.00641623") + std = float("0.00218922") + data = None + + +class Program_weight_tensor_parameter_68: + name = "parameter_68" + shape = [192] + dtype = "float32" + min_val = float("-0.132274") + max_val = float("0.0890586") + mean = float("-0.0193578") + std = float("0.0227857") + data = None + + +class Program_weight_tensor_parameter_69: + name = "parameter_69" + shape = [192, 192, 3, 3] + dtype = "float32" + min_val = float("-0.0608679") + max_val = float("0.0496112") + mean = float("-9.33668e-05") + std = float("0.00281234") + data = None + + +class Program_weight_tensor_parameter_70: + name = "parameter_70" + shape = [192] + dtype = "float32" + min_val = float("-0.358969") + max_val = float("0.22949") + mean = float("-0.116632") + std = float("0.0908503") + data = None + + +class Program_weight_tensor_parameter_71: + name = "parameter_71" + shape = [192] + dtype = "float32" + min_val = float("0.873152") + max_val = float("1.53956") + mean = float("1.03748") + std = float("0.0824444") + data = None + + +class Program_weight_tensor_parameter_72: + name = "parameter_72" + shape = [192] + dtype = "float32" + min_val = float("0.00599936") + max_val = float("0.0451748") + mean = float("0.0156687") + std = float("0.00623842") + data = None + + +class Program_weight_tensor_parameter_73: + name = "parameter_73" + shape = [192] + dtype = "float32" + min_val = float("-0.149167") + max_val = float("0.0797484") + mean = float("-0.0391818") + std = float("0.0355056") + data = None + + +class Program_weight_tensor_parameter_74: + name = "parameter_74" + shape = [192, 192, 3, 3] + dtype = "float32" + min_val = float("-0.0615707") + max_val = float("0.0710171") + mean = float("-0.00017111") + std = float("0.00319769") + data = None + + +class Program_weight_tensor_parameter_75: + name = "parameter_75" + shape = [192] + dtype = "float32" + min_val = float("-0.262689") + max_val = float("0.0957564") + mean = float("-0.0687808") + std = float("0.0653239") + data = None + + +class Program_weight_tensor_parameter_76: + name = "parameter_76" + shape = [192] + dtype = "float32" + min_val = float("0.909802") + max_val = float("1.18641") + mean = float("1.01699") + std = float("0.0484182") + data = None + + +class Program_weight_tensor_parameter_77: + name = "parameter_77" + shape = [192] + dtype = "float32" + min_val = float("0.00324199") + max_val = float("0.013483") + mean = float("0.00562048") + std = float("0.00149769") + data = None + + +class Program_weight_tensor_parameter_78: + name = "parameter_78" + shape = [192] + dtype = "float32" + min_val = float("-0.0949616") + max_val = float("0.0476639") + mean = float("-0.0156487") + std = float("0.0216088") + data = None + + +class Program_weight_tensor_parameter_79: + name = "parameter_79" + shape = [192, 576, 1, 1] + dtype = "float32" + min_val = float("-0.104801") + max_val = float("0.105747") + mean = float("-0.000158392") + std = float("0.00453924") + data = None + + +class Program_weight_tensor_parameter_80: + name = "parameter_80" + shape = [192] + dtype = "float32" + min_val = float("-0.108424") + max_val = float("0.0105853") + mean = float("-0.0387919") + std = float("0.0209882") + data = None + + +class Program_weight_tensor_parameter_81: + name = "parameter_81" + shape = [192] + dtype = "float32" + min_val = float("0.831725") + max_val = float("1.1661") + mean = float("0.997653") + std = float("0.0340353") + data = None + + +class Program_weight_tensor_parameter_82: + name = "parameter_82" + shape = [192] + dtype = "float32" + min_val = float("0.00246664") + max_val = float("0.0124406") + mean = float("0.00423672") + std = float("0.00147969") + data = None + + +class Program_weight_tensor_parameter_83: + name = "parameter_83" + shape = [192] + dtype = "float32" + min_val = float("-0.0741036") + max_val = float("0.0394352") + mean = float("-0.020305") + std = float("0.0200782") + data = None + + +class Program_weight_tensor_parameter_84: + name = "parameter_84" + shape = [192, 576, 1, 1] + dtype = "float32" + min_val = float("-0.0459353") + max_val = float("0.0598756") + mean = float("-0.000231622") + std = float("0.00387741") + data = None + + +class Program_weight_tensor_parameter_85: + name = "parameter_85" + shape = [192] + dtype = "float32" + min_val = float("-0.182946") + max_val = float("0.00643118") + mean = float("-0.0675945") + std = float("0.0361454") + data = None + + +class Program_weight_tensor_parameter_86: + name = "parameter_86" + shape = [192] + dtype = "float32" + min_val = float("0.820271") + max_val = float("1.19418") + mean = float("1.03849") + std = float("0.0473485") + data = None + + +class Program_weight_tensor_parameter_87: + name = "parameter_87" + shape = [192] + dtype = "float32" + min_val = float("0.00830286") + max_val = float("0.0523696") + mean = float("0.0181004") + std = float("0.00781457") + data = None + + +class Program_weight_tensor_parameter_88: + name = "parameter_88" + shape = [192] + dtype = "float32" + min_val = float("-0.295419") + max_val = float("0.243282") + mean = float("-0.0406729") + std = float("0.0924218") + data = None + + +class Program_weight_tensor_parameter_89: + name = "parameter_89" + shape = [192, 192, 3, 3] + dtype = "float32" + min_val = float("-0.0391666") + max_val = float("0.03939") + mean = float("-5.14529e-05") + std = float("0.00250357") + data = None + + +class Program_weight_tensor_parameter_90: + name = "parameter_90" + shape = [192] + dtype = "float32" + min_val = float("-0.425057") + max_val = float("1.13371") + mean = float("0.359215") + std = float("0.275456") + data = None + + +class Program_weight_tensor_parameter_91: + name = "parameter_91" + shape = [192] + dtype = "float32" + min_val = float("0.68574") + max_val = float("1.65028") + mean = float("1.21085") + std = float("0.162918") + data = None + + +class Program_weight_tensor_parameter_92: + name = "parameter_92" + shape = [192] + dtype = "float32" + min_val = float("0.00346673") + max_val = float("0.0501321") + mean = float("0.0111589") + std = float("0.00604341") + data = None + + +class Program_weight_tensor_parameter_93: + name = "parameter_93" + shape = [192] + dtype = "float32" + min_val = float("-0.140668") + max_val = float("0.122762") + mean = float("-0.0226308") + std = float("0.0310462") + data = None + + +class Program_weight_tensor_parameter_94: + name = "parameter_94" + shape = [192, 192, 1, 1] + dtype = "float32" + min_val = float("-0.128052") + max_val = float("0.093273") + mean = float("-0.000474807") + std = float("0.00934256") + data = None + + +class Program_weight_tensor_parameter_95: + name = "parameter_95" + shape = [96] + dtype = "float32" + min_val = float("-0.285052") + max_val = float("0.174608") + mean = float("-0.066622") + std = float("0.0972367") + data = None + + +class Program_weight_tensor_parameter_96: + name = "parameter_96" + shape = [96] + dtype = "float32" + min_val = float("0.806825") + max_val = float("1.20189") + mean = float("0.921657") + std = float("0.0571227") + data = None + + +class Program_weight_tensor_parameter_97: + name = "parameter_97" + shape = [96] + dtype = "float32" + min_val = float("0.00190752") + max_val = float("0.0151942") + mean = float("0.0067712") + std = float("0.00323168") + data = None + + +class Program_weight_tensor_parameter_98: + name = "parameter_98" + shape = [96] + dtype = "float32" + min_val = float("-0.0280543") + max_val = float("0.0334148") + mean = float("0.00324371") + std = float("0.0131979") + data = None + + +class Program_weight_tensor_parameter_99: + name = "parameter_99" + shape = [96, 96, 1, 1] + dtype = "float32" + min_val = float("-0.0694781") + max_val = float("0.0317322") + mean = float("-0.000293877") + std = float("0.00598447") + data = None + + +class Program_weight_tensor_parameter_100: + name = "parameter_100" + shape = [96] + dtype = "float32" + min_val = float("-0.285052") + max_val = float("0.174608") + mean = float("-0.066622") + std = float("0.0972367") + data = None + + +class Program_weight_tensor_parameter_101: + name = "parameter_101" + shape = [96] + dtype = "float32" + min_val = float("0.922913") + max_val = float("1.34775") + mean = float("1.07338") + std = float("0.0645268") + data = None + + +class Program_weight_tensor_parameter_102: + name = "parameter_102" + shape = [96] + dtype = "float32" + min_val = float("0.00333117") + max_val = float("0.0203563") + mean = float("0.00882651") + std = float("0.00330765") + data = None + + +class Program_weight_tensor_parameter_103: + name = "parameter_103" + shape = [96] + dtype = "float32" + min_val = float("-0.106469") + max_val = float("0.0925807") + mean = float("-0.0197629") + std = float("0.0255564") + data = None + + +class Program_weight_tensor_parameter_104: + name = "parameter_104" + shape = [96, 96, 3, 3] + dtype = "float32" + min_val = float("-0.0953723") + max_val = float("0.0758644") + mean = float("-0.000128828") + std = float("0.00535834") + data = None + + +class Program_weight_tensor_parameter_105: + name = "parameter_105" + shape = [96] + dtype = "float32" + min_val = float("-0.583814") + max_val = float("0.268611") + mean = float("-0.198785") + std = float("0.138307") + data = None + + +class Program_weight_tensor_parameter_106: + name = "parameter_106" + shape = [96] + dtype = "float32" + min_val = float("0.756929") + max_val = float("1.48697") + mean = float("1.00407") + std = float("0.110485") + data = None + + +class Program_weight_tensor_parameter_107: + name = "parameter_107" + shape = [96] + dtype = "float32" + min_val = float("0.010506") + max_val = float("0.0513283") + mean = float("0.0202061") + std = float("0.00815952") + data = None + + +class Program_weight_tensor_parameter_108: + name = "parameter_108" + shape = [96] + dtype = "float32" + min_val = float("-0.0734088") + max_val = float("0.0802522") + mean = float("-0.0253763") + std = float("0.0257196") + data = None + + +class Program_weight_tensor_parameter_109: + name = "parameter_109" + shape = [96, 96, 3, 3] + dtype = "float32" + min_val = float("-0.0918806") + max_val = float("0.0811105") + mean = float("-0.000291193") + std = float("0.00619209") + data = None + + +class Program_weight_tensor_parameter_110: + name = "parameter_110" + shape = [96] + dtype = "float32" + min_val = float("-0.480213") + max_val = float("0.192724") + mean = float("-0.137157") + std = float("0.101354") + data = None + + +class Program_weight_tensor_parameter_111: + name = "parameter_111" + shape = [96] + dtype = "float32" + min_val = float("0.851043") + max_val = float("1.23123") + mean = float("0.99957") + std = float("0.0758061") + data = None + + +class Program_weight_tensor_parameter_112: + name = "parameter_112" + shape = [96] + dtype = "float32" + min_val = float("0.00399954") + max_val = float("0.0215343") + mean = float("0.00841287") + std = float("0.00287986") + data = None + + +class Program_weight_tensor_parameter_113: + name = "parameter_113" + shape = [96] + dtype = "float32" + min_val = float("-0.10504") + max_val = float("0.0398062") + mean = float("-0.021676") + std = float("0.0226645") + data = None + + +class Program_weight_tensor_parameter_114: + name = "parameter_114" + shape = [96, 288, 1, 1] + dtype = "float32" + min_val = float("-0.0857424") + max_val = float("0.0864841") + mean = float("-0.000340161") + std = float("0.00837942") + data = None + + +class Program_weight_tensor_parameter_115: + name = "parameter_115" + shape = [96] + dtype = "float32" + min_val = float("-0.131069") + max_val = float("0.049932") + mean = float("-0.024637") + std = float("0.0351016") + data = None + + +class Program_weight_tensor_parameter_116: + name = "parameter_116" + shape = [96] + dtype = "float32" + min_val = float("0.812541") + max_val = float("1.39733") + mean = float("0.957566") + std = float("0.0699019") + data = None + + +class Program_weight_tensor_parameter_117: + name = "parameter_117" + shape = [96] + dtype = "float32" + min_val = float("0.00269299") + max_val = float("0.0211662") + mean = float("0.00681022") + std = float("0.00271985") + data = None + + +class Program_weight_tensor_parameter_118: + name = "parameter_118" + shape = [96] + dtype = "float32" + min_val = float("-0.0659133") + max_val = float("0.0518171") + mean = float("-0.00634417") + std = float("0.0227762") + data = None + + +class Program_weight_tensor_parameter_119: + name = "parameter_119" + shape = [96, 288, 1, 1] + dtype = "float32" + min_val = float("-0.0938574") + max_val = float("0.0951039") + mean = float("1.11907e-05") + std = float("0.00731221") + data = None + + +class Program_weight_tensor_parameter_120: + name = "parameter_120" + shape = [96] + dtype = "float32" + min_val = float("-0.269639") + max_val = float("0.0721567") + mean = float("-0.0895986") + std = float("0.0709687") + data = None + + +class Program_weight_tensor_parameter_121: + name = "parameter_121" + shape = [96] + dtype = "float32" + min_val = float("0.715833") + max_val = float("1.16072") + mean = float("1.00717") + std = float("0.0743957") + data = None + + +class Program_weight_tensor_parameter_122: + name = "parameter_122" + shape = [96] + dtype = "float32" + min_val = float("0.00763385") + max_val = float("0.0621682") + mean = float("0.0184128") + std = float("0.0105064") + data = None + + +class Program_weight_tensor_parameter_123: + name = "parameter_123" + shape = [96] + dtype = "float32" + min_val = float("-0.454732") + max_val = float("0.708394") + mean = float("0.000779481") + std = float("0.175367") + data = None + + +class Program_weight_tensor_parameter_124: + name = "parameter_124" + shape = [96, 96, 3, 3] + dtype = "float32" + min_val = float("-0.0503643") + max_val = float("0.0566473") + mean = float("-3.60464e-05") + std = float("0.00516265") + data = None + + +class Program_weight_tensor_parameter_125: + name = "parameter_125" + shape = [96] + dtype = "float32" + min_val = float("-0.712173") + max_val = float("1.78462") + mean = float("0.563663") + std = float("0.571436") + data = None + + +class Program_weight_tensor_parameter_126: + name = "parameter_126" + shape = [96] + dtype = "float32" + min_val = float("0.507336") + max_val = float("1.75674") + mean = float("1.18977") + std = float("0.281082") + data = None + + +class Program_weight_tensor_parameter_127: + name = "parameter_127" + shape = [96] + dtype = "float32" + min_val = float("0.00285949") + max_val = float("0.067077") + mean = float("0.0223457") + std = float("0.0141142") + data = None + + +class Program_weight_tensor_parameter_128: + name = "parameter_128" + shape = [96] + dtype = "float32" + min_val = float("-0.19432") + max_val = float("0.152997") + mean = float("-0.0256683") + std = float("0.0579434") + data = None + + +class Program_weight_tensor_parameter_129: + name = "parameter_129" + shape = [96, 96, 1, 1] + dtype = "float32" + min_val = float("-0.162807") + max_val = float("0.117542") + mean = float("-0.000866689") + std = float("0.0181268") + data = None + + +class Program_weight_tensor_parameter_130: + name = "parameter_130" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_131: + name = "parameter_131" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_132: + name = "parameter_132" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_133: + name = "parameter_133" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_134: + name = "parameter_134" + shape = [48, 48, 1, 1] + dtype = "float32" + min_val = float("-0.0656168") + max_val = float("0.0647894") + mean = float("-0.00351211") + std = float("0.0113314") + data = None + + +class Program_weight_tensor_parameter_135: + name = "parameter_135" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_136: + name = "parameter_136" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_137: + name = "parameter_137" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_138: + name = "parameter_138" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_139: + name = "parameter_139" + shape = [48, 48, 3, 3] + dtype = "float32" + min_val = float("-0.130208") + max_val = float("0.149582") + mean = float("5.09703e-05") + std = float("0.0106713") + data = None + + +class Program_weight_tensor_parameter_140: + name = "parameter_140" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_141: + name = "parameter_141" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_142: + name = "parameter_142" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_143: + name = "parameter_143" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_144: + name = "parameter_144" + shape = [48, 48, 3, 3] + dtype = "float32" + min_val = float("-0.0919085") + max_val = float("0.0953069") + mean = float("-0.000514532") + std = float("0.0119355") + data = None + + +class Program_weight_tensor_parameter_145: + name = "parameter_145" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_146: + name = "parameter_146" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_147: + name = "parameter_147" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_148: + name = "parameter_148" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_149: + name = "parameter_149" + shape = [48, 224, 1, 1] + dtype = "float32" + min_val = float("-0.168008") + max_val = float("0.137157") + mean = float("-0.000462758") + std = float("0.0151144") + data = None + + +class Program_weight_tensor_parameter_150: + name = "parameter_150" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_151: + name = "parameter_151" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_152: + name = "parameter_152" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_153: + name = "parameter_153" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_154: + name = "parameter_154" + shape = [48, 224, 1, 1] + dtype = "float32" + min_val = float("-0.0771231") + max_val = float("0.098127") + mean = float("0.00034558") + std = float("0.0113613") + data = None + + +class Program_weight_tensor_parameter_155: + name = "parameter_155" + shape = [96] + dtype = "float32" + min_val = float("-0.26733") + max_val = float("0.368842") + mean = float("0.0278452") + std = float("0.150647") + data = None + + +class Program_weight_tensor_parameter_156: + name = "parameter_156" + shape = [96] + dtype = "float32" + min_val = float("0.570492") + max_val = float("1.53759") + mean = float("0.843534") + std = float("0.134345") + data = None + + +class Program_weight_tensor_parameter_157: + name = "parameter_157" + shape = [96] + dtype = "float32" + min_val = float("0.00564964") + max_val = float("0.0590641") + mean = float("0.0163964") + std = float("0.00799543") + data = None + + +class Program_weight_tensor_parameter_158: + name = "parameter_158" + shape = [96] + dtype = "float32" + min_val = float("-0.176967") + max_val = float("0.0414518") + mean = float("-0.0301629") + std = float("0.0341383") + data = None + + +class Program_weight_tensor_parameter_159: + name = "parameter_159" + shape = [96, 192, 1, 1] + dtype = "float32" + min_val = float("-0.0854084") + max_val = float("0.106922") + mean = float("-0.000578601") + std = float("0.0118169") + data = None + + +class Program_weight_tensor_parameter_160: + name = "parameter_160" + shape = [192] + dtype = "float32" + min_val = float("-0.39858") + max_val = float("0.22524") + mean = float("-0.0467737") + std = float("0.107938") + data = None + + +class Program_weight_tensor_parameter_161: + name = "parameter_161" + shape = [192] + dtype = "float32" + min_val = float("0.720555") + max_val = float("1.48552") + mean = float("0.97062") + std = float("0.108962") + data = None + + +class Program_weight_tensor_parameter_162: + name = "parameter_162" + shape = [192] + dtype = "float32" + min_val = float("0.00632478") + max_val = float("0.0376313") + mean = float("0.0130181") + std = float("0.00503881") + data = None + + +class Program_weight_tensor_parameter_163: + name = "parameter_163" + shape = [192] + dtype = "float32" + min_val = float("-0.143116") + max_val = float("0.162113") + mean = float("-0.0253695") + std = float("0.0469194") + data = None + + +class Program_weight_tensor_parameter_164: + name = "parameter_164" + shape = [192, 192, 1, 1] + dtype = "float32" + min_val = float("-0.100244") + max_val = float("0.115277") + mean = float("-0.000678981") + std = float("0.0114232") + data = None + + +class Program_weight_tensor_parameter_165: + name = "parameter_165" + shape = [96] + dtype = "float32" + min_val = float("-0.322378") + max_val = float("0.1959") + mean = float("-0.0415798") + std = float("0.0951934") + data = None + + +class Program_weight_tensor_parameter_166: + name = "parameter_166" + shape = [96] + dtype = "float32" + min_val = float("0.686323") + max_val = float("0.976209") + mean = float("0.869762") + std = float("0.0506059") + data = None + + +class Program_weight_tensor_parameter_167: + name = "parameter_167" + shape = [96] + dtype = "float32" + min_val = float("0.00354582") + max_val = float("0.0256175") + mean = float("0.00997218") + std = float("0.00369666") + data = None + + +class Program_weight_tensor_parameter_168: + name = "parameter_168" + shape = [96] + dtype = "float32" + min_val = float("-0.0465464") + max_val = float("0.0382274") + mean = float("-0.01146") + std = float("0.0189564") + data = None + + +class Program_weight_tensor_parameter_169: + name = "parameter_169" + shape = [96, 96, 1, 1] + dtype = "float32" + min_val = float("-0.0384146") + max_val = float("0.0490063") + mean = float("-0.0012486") + std = float("0.00789452") + data = None + + +class Program_weight_tensor_parameter_170: + name = "parameter_170" + shape = [96] + dtype = "float32" + min_val = float("-0.322378") + max_val = float("0.1959") + mean = float("-0.0415798") + std = float("0.0951934") + data = None + + +class Program_weight_tensor_parameter_171: + name = "parameter_171" + shape = [96] + dtype = "float32" + min_val = float("0.884067") + max_val = float("1.23302") + mean = float("1.01256") + std = float("0.0562687") + data = None + + +class Program_weight_tensor_parameter_172: + name = "parameter_172" + shape = [96] + dtype = "float32" + min_val = float("0.0075868") + max_val = float("0.0331966") + mean = float("0.0160023") + std = float("0.00570454") + data = None + + +class Program_weight_tensor_parameter_173: + name = "parameter_173" + shape = [96] + dtype = "float32" + min_val = float("-0.0886841") + max_val = float("0.0227937") + mean = float("-0.0174502") + std = float("0.022592") + data = None + + +class Program_weight_tensor_parameter_174: + name = "parameter_174" + shape = [96, 96, 3, 3] + dtype = "float32" + min_val = float("-0.127764") + max_val = float("0.113746") + mean = float("-0.000168342") + std = float("0.00671402") + data = None + + +class Program_weight_tensor_parameter_175: + name = "parameter_175" + shape = [96] + dtype = "float32" + min_val = float("-0.673945") + max_val = float("0.151961") + mean = float("-0.204929") + std = float("0.159718") + data = None + + +class Program_weight_tensor_parameter_176: + name = "parameter_176" + shape = [96] + dtype = "float32" + min_val = float("0.643505") + max_val = float("1.31906") + mean = float("1.00248") + std = float("0.145252") + data = None + + +class Program_weight_tensor_parameter_177: + name = "parameter_177" + shape = [96] + dtype = "float32" + min_val = float("0.0137231") + max_val = float("0.0387135") + mean = float("0.0231619") + std = float("0.00544315") + data = None + + +class Program_weight_tensor_parameter_178: + name = "parameter_178" + shape = [96] + dtype = "float32" + min_val = float("-0.0918689") + max_val = float("0.0482563") + mean = float("-0.0198259") + std = float("0.0309823") + data = None + + +class Program_weight_tensor_parameter_179: + name = "parameter_179" + shape = [96, 96, 3, 3] + dtype = "float32" + min_val = float("-0.161004") + max_val = float("0.140004") + mean = float("-0.000323844") + std = float("0.00772731") + data = None + + +class Program_weight_tensor_parameter_180: + name = "parameter_180" + shape = [96] + dtype = "float32" + min_val = float("-0.620416") + max_val = float("0.427196") + mean = float("-0.18005") + std = float("0.222045") + data = None + + +class Program_weight_tensor_parameter_181: + name = "parameter_181" + shape = [96] + dtype = "float32" + min_val = float("0.711219") + max_val = float("1.31966") + mean = float("0.945489") + std = float("0.106547") + data = None + + +class Program_weight_tensor_parameter_182: + name = "parameter_182" + shape = [96] + dtype = "float32" + min_val = float("0.00521793") + max_val = float("0.0223373") + mean = float("0.00990973") + std = float("0.00333274") + data = None + + +class Program_weight_tensor_parameter_183: + name = "parameter_183" + shape = [96] + dtype = "float32" + min_val = float("-0.192205") + max_val = float("0.130392") + mean = float("0.00392371") + std = float("0.0460678") + data = None + + +class Program_weight_tensor_parameter_184: + name = "parameter_184" + shape = [96, 448, 1, 1] + dtype = "float32" + min_val = float("-0.176062") + max_val = float("0.167841") + mean = float("-0.000471326") + std = float("0.009721") + data = None + + +class Program_weight_tensor_parameter_185: + name = "parameter_185" + shape = [96] + dtype = "float32" + min_val = float("-0.183092") + max_val = float("0.361262") + mean = float("0.0344664") + std = float("0.100074") + data = None + + +class Program_weight_tensor_parameter_186: + name = "parameter_186" + shape = [96] + dtype = "float32" + min_val = float("0.822959") + max_val = float("1.16119") + mean = float("0.97339") + std = float("0.0761217") + data = None + + +class Program_weight_tensor_parameter_187: + name = "parameter_187" + shape = [96] + dtype = "float32" + min_val = float("0.00431213") + max_val = float("0.0216783") + mean = float("0.00914052") + std = float("0.00294183") + data = None + + +class Program_weight_tensor_parameter_188: + name = "parameter_188" + shape = [96] + dtype = "float32" + min_val = float("-0.104841") + max_val = float("0.0578619") + mean = float("-0.0103319") + std = float("0.029502") + data = None + + +class Program_weight_tensor_parameter_189: + name = "parameter_189" + shape = [96, 448, 1, 1] + dtype = "float32" + min_val = float("-0.237025") + max_val = float("0.106242") + mean = float("-0.000288879") + std = float("0.00873613") + data = None + + +class Program_weight_tensor_parameter_190: + name = "parameter_190" + shape = [192] + dtype = "float32" + min_val = float("-0.42124") + max_val = float("-0.00583464") + mean = float("-0.182847") + std = float("0.0747674") + data = None + + +class Program_weight_tensor_parameter_191: + name = "parameter_191" + shape = [192] + dtype = "float32" + min_val = float("0.681353") + max_val = float("1.20403") + mean = float("0.872728") + std = float("0.078969") + data = None + + +class Program_weight_tensor_parameter_192: + name = "parameter_192" + shape = [192] + dtype = "float32" + min_val = float("0.00778368") + max_val = float("0.0490296") + mean = float("0.0135942") + std = float("0.00459161") + data = None + + +class Program_weight_tensor_parameter_193: + name = "parameter_193" + shape = [192] + dtype = "float32" + min_val = float("-0.0866722") + max_val = float("0.0550815") + mean = float("-0.0314623") + std = float("0.023929") + data = None + + +class Program_weight_tensor_parameter_194: + name = "parameter_194" + shape = [192, 384, 1, 1] + dtype = "float32" + min_val = float("-0.0885796") + max_val = float("0.0665274") + mean = float("-0.000623302") + std = float("0.00805396") + data = None + + +class Program_weight_tensor_parameter_195: + name = "parameter_195" + shape = [384] + dtype = "float32" + min_val = float("-0.336722") + max_val = float("0.200051") + mean = float("-0.117303") + std = float("0.0610692") + data = None + + +class Program_weight_tensor_parameter_196: + name = "parameter_196" + shape = [384] + dtype = "float32" + min_val = float("0.866024") + max_val = float("1.36721") + mean = float("1.03388") + std = float("0.0681388") + data = None + + +class Program_weight_tensor_parameter_197: + name = "parameter_197" + shape = [384] + dtype = "float32" + min_val = float("0.00486193") + max_val = float("0.055935") + mean = float("0.00926043") + std = float("0.00323892") + data = None + + +class Program_weight_tensor_parameter_198: + name = "parameter_198" + shape = [384] + dtype = "float32" + min_val = float("-0.100111") + max_val = float("0.0807501") + mean = float("-0.0319328") + std = float("0.0277642") + data = None + + +class Program_weight_tensor_parameter_199: + name = "parameter_199" + shape = [384, 384, 1, 1] + dtype = "float32" + min_val = float("-0.0985658") + max_val = float("0.109368") + mean = float("-0.000524172") + std = float("0.00710108") + data = None + + +class Program_weight_tensor_parameter_200: + name = "parameter_200" + shape = [192] + dtype = "float32" + min_val = float("-0.329134") + max_val = float("0.162659") + mean = float("-0.0888849") + std = float("0.0802373") + data = None + + +class Program_weight_tensor_parameter_201: + name = "parameter_201" + shape = [192] + dtype = "float32" + min_val = float("0.854031") + max_val = float("1.4383") + mean = float("1.04181") + std = float("0.110475") + data = None + + +class Program_weight_tensor_parameter_202: + name = "parameter_202" + shape = [192] + dtype = "float32" + min_val = float("0.0265882") + max_val = float("0.153482") + mean = float("0.0578785") + std = float("0.0207561") + data = None + + +class Program_weight_tensor_parameter_203: + name = "parameter_203" + shape = [192] + dtype = "float32" + min_val = float("-1.01163") + max_val = float("0.927788") + mean = float("-0.0675584") + std = float("0.291373") + data = None + + +class Program_weight_tensor_parameter_204: + name = "parameter_204" + shape = [192, 768, 1, 1] + dtype = "float32" + min_val = float("-0.10455") + max_val = float("0.132281") + mean = float("-6.74648e-05") + std = float("0.00607087") + data = None + + +class Program_weight_tensor_parameter_205: + name = "parameter_205" + shape = [192] + dtype = "float32" + min_val = float("-0.0751579") + max_val = float("0.144808") + mean = float("0.0132216") + std = float("0.0378038") + data = None + + +class Program_weight_tensor_parameter_206: + name = "parameter_206" + shape = [192] + dtype = "float32" + min_val = float("0.81549") + max_val = float("1.00103") + mean = float("0.922101") + std = float("0.0314067") + data = None + + +class Program_weight_tensor_parameter_207: + name = "parameter_207" + shape = [192] + dtype = "float32" + min_val = float("0.00659761") + max_val = float("0.0298718") + mean = float("0.0179919") + std = float("0.00478609") + data = None + + +class Program_weight_tensor_parameter_208: + name = "parameter_208" + shape = [192] + dtype = "float32" + min_val = float("-0.0739894") + max_val = float("0.044871") + mean = float("-0.0371711") + std = float("0.0286948") + data = None + + +class Program_weight_tensor_parameter_209: + name = "parameter_209" + shape = [192, 192, 1, 1] + dtype = "float32" + min_val = float("-0.0294836") + max_val = float("0.029479") + mean = float("-0.000993034") + std = float("0.00448616") + data = None + + +class Program_weight_tensor_parameter_210: + name = "parameter_210" + shape = [192] + dtype = "float32" + min_val = float("-0.0751579") + max_val = float("0.144808") + mean = float("0.0132216") + std = float("0.0378038") + data = None + + +class Program_weight_tensor_parameter_211: + name = "parameter_211" + shape = [192] + dtype = "float32" + min_val = float("0.902133") + max_val = float("1.29935") + mean = float("1.0661") + std = float("0.0758243") + data = None + + +class Program_weight_tensor_parameter_212: + name = "parameter_212" + shape = [192] + dtype = "float32" + min_val = float("0.018898") + max_val = float("0.0499427") + mean = float("0.0287098") + std = float("0.00605409") + data = None + + +class Program_weight_tensor_parameter_213: + name = "parameter_213" + shape = [192] + dtype = "float32" + min_val = float("-0.231898") + max_val = float("0.0688819") + mean = float("-0.125601") + std = float("0.0439123") + data = None + + +class Program_weight_tensor_parameter_214: + name = "parameter_214" + shape = [192, 192, 3, 3] + dtype = "float32" + min_val = float("-0.0562691") + max_val = float("0.0512178") + mean = float("-0.000426457") + std = float("0.00390448") + data = None + + +class Program_weight_tensor_parameter_215: + name = "parameter_215" + shape = [192] + dtype = "float32" + min_val = float("-0.271597") + max_val = float("0.154555") + mean = float("-0.0766582") + std = float("0.0617508") + data = None + + +class Program_weight_tensor_parameter_216: + name = "parameter_216" + shape = [192] + dtype = "float32" + min_val = float("0.868096") + max_val = float("1.42109") + mean = float("1.07213") + std = float("0.102069") + data = None + + +class Program_weight_tensor_parameter_217: + name = "parameter_217" + shape = [192] + dtype = "float32" + min_val = float("0.0171819") + max_val = float("0.0687327") + mean = float("0.0287449") + std = float("0.00770581") + data = None + + +class Program_weight_tensor_parameter_218: + name = "parameter_218" + shape = [192] + dtype = "float32" + min_val = float("-0.400371") + max_val = float("0.104118") + mean = float("-0.0957541") + std = float("0.0600117") + data = None + + +class Program_weight_tensor_parameter_219: + name = "parameter_219" + shape = [192, 192, 3, 3] + dtype = "float32" + min_val = float("-0.0925001") + max_val = float("0.0489937") + mean = float("-0.000364906") + std = float("0.0041691") + data = None + + +class Program_weight_tensor_parameter_220: + name = "parameter_220" + shape = [192] + dtype = "float32" + min_val = float("-0.296038") + max_val = float("0.243906") + mean = float("-0.0887742") + std = float("0.0827027") + data = None + + +class Program_weight_tensor_parameter_221: + name = "parameter_221" + shape = [192] + dtype = "float32" + min_val = float("0.916715") + max_val = float("1.334") + mean = float("1.06036") + std = float("0.0645366") + data = None + + +class Program_weight_tensor_parameter_222: + name = "parameter_222" + shape = [192] + dtype = "float32" + min_val = float("0.00325066") + max_val = float("0.0110054") + mean = float("0.00473589") + std = float("0.001239") + data = None + + +class Program_weight_tensor_parameter_223: + name = "parameter_223" + shape = [192] + dtype = "float32" + min_val = float("-0.113973") + max_val = float("0.123224") + mean = float("0.0297054") + std = float("0.0258378") + data = None + + +class Program_weight_tensor_parameter_224: + name = "parameter_224" + shape = [192, 512, 1, 1] + dtype = "float32" + min_val = float("-0.105981") + max_val = float("0.0885541") + mean = float("-0.000734873") + std = float("0.00693994") + data = None + + +class Program_weight_tensor_parameter_225: + name = "parameter_225" + shape = [192] + dtype = "float32" + min_val = float("-0.120952") + max_val = float("0.0363702") + mean = float("-0.0203029") + std = float("0.0233046") + data = None + + +class Program_weight_tensor_parameter_226: + name = "parameter_226" + shape = [192] + dtype = "float32" + min_val = float("0.879765") + max_val = float("1.17586") + mean = float("0.969206") + std = float("0.0423082") + data = None + + +class Program_weight_tensor_parameter_227: + name = "parameter_227" + shape = [192] + dtype = "float32" + min_val = float("0.00211224") + max_val = float("0.0070068") + mean = float("0.00324433") + std = float("0.000567153") + data = None + + +class Program_weight_tensor_parameter_228: + name = "parameter_228" + shape = [192] + dtype = "float32" + min_val = float("-0.0339767") + max_val = float("0.0643795") + mean = float("0.0238762") + std = float("0.0164626") + data = None + + +class Program_weight_tensor_parameter_229: + name = "parameter_229" + shape = [192, 512, 1, 1] + dtype = "float32" + min_val = float("-0.0309541") + max_val = float("0.0443272") + mean = float("-0.000548112") + std = float("0.00572795") + data = None + + +class Program_weight_tensor_parameter_230: + name = "parameter_230" + shape = [512] + dtype = "float32" + min_val = float("-4.77911") + max_val = float("-0.174842") + mean = float("-2.27228") + std = float("0.764092") + data = None + + +class Program_weight_tensor_parameter_231: + name = "parameter_231" + shape = [512] + dtype = "float32" + min_val = float("2.0382") + max_val = float("5.2942") + mean = float("3.72706") + std = float("0.501183") + data = None + + +class Program_weight_tensor_parameter_232: + name = "parameter_232" + shape = [512] + dtype = "float32" + min_val = float("0.00977152") + max_val = float("0.0833149") + mean = float("0.032131") + std = float("0.0108973") + data = None + + +class Program_weight_tensor_parameter_233: + name = "parameter_233" + shape = [512] + dtype = "float32" + min_val = float("-0.160608") + max_val = float("0.0518526") + mean = float("-0.0739815") + std = float("0.0271363") + data = None + + +class Program_weight_tensor_parameter_234: + name = "parameter_234" + shape = [512, 384, 1, 1] + dtype = "float32" + min_val = float("-0.101649") + max_val = float("0.10945") + mean = float("-0.000805805") + std = float("0.00711625") + data = None + + +class Program_weight_tensor_parameter_235: + name = "parameter_235" + shape = [384] + dtype = "float32" + min_val = float("-0.0274719") + max_val = float("0.0143867") + mean = float("-0.00272012") + std = float("0.00519342") + data = None + + +class Program_weight_tensor_parameter_236: + name = "parameter_236" + shape = [384, 384, 1, 1] + dtype = "float32" + min_val = float("-0.227148") + max_val = float("0.221222") + mean = float("-0.000794466") + std = float("0.0046443") + data = None + + +class Program_weight_tensor_parameter_237: + name = "parameter_237" + shape = [192] + dtype = "float32" + min_val = float("-2.41106") + max_val = float("2.27381") + mean = float("-0.268067") + std = float("0.512586") + data = None + + +class Program_weight_tensor_parameter_238: + name = "parameter_238" + shape = [192] + dtype = "float32" + min_val = float("0.146878") + max_val = float("2.08389") + mean = float("0.464396") + std = float("0.321425") + data = None + + +class Program_weight_tensor_parameter_239: + name = "parameter_239" + shape = [192] + dtype = "float32" + min_val = float("0.000205663") + max_val = float("0.0111251") + mean = float("0.000819936") + std = float("0.000910607") + data = None + + +class Program_weight_tensor_parameter_240: + name = "parameter_240" + shape = [192] + dtype = "float32" + min_val = float("-0.0364641") + max_val = float("0.0557305") + mean = float("0.00655687") + std = float("0.015401") + data = None + + +class Program_weight_tensor_parameter_241: + name = "parameter_241" + shape = [192, 192, 1, 1] + dtype = "float32" + min_val = float("-0.0364203") + max_val = float("0.0573823") + mean = float("-0.000267145") + std = float("0.00421765") + data = None + + +class Program_weight_tensor_parameter_242: + name = "parameter_242" + shape = [192] + dtype = "float32" + min_val = float("-2.41106") + max_val = float("2.27382") + mean = float("-0.268067") + std = float("0.512586") + data = None + + +class Program_weight_tensor_parameter_243: + name = "parameter_243" + shape = [192] + dtype = "float32" + min_val = float("0.647048") + max_val = float("2.81886") + mean = float("1.33784") + std = float("0.428565") + data = None + + +class Program_weight_tensor_parameter_244: + name = "parameter_244" + shape = [192] + dtype = "float32" + min_val = float("0.00234535") + max_val = float("0.0577714") + mean = float("0.00615694") + std = float("0.0047529") + data = None + + +class Program_weight_tensor_parameter_245: + name = "parameter_245" + shape = [192] + dtype = "float32" + min_val = float("-0.211218") + max_val = float("0.175478") + mean = float("0.0152133") + std = float("0.0442249") + data = None + + +class Program_weight_tensor_parameter_246: + name = "parameter_246" + shape = [192, 192, 3, 3] + dtype = "float32" + min_val = float("-0.0743898") + max_val = float("0.0498769") + mean = float("-0.000137273") + std = float("0.0041025") + data = None + + +class Program_weight_tensor_parameter_247: + name = "parameter_247" + shape = [192] + dtype = "float32" + min_val = float("-3.29064") + max_val = float("1.07945") + mean = float("-1.32605") + std = float("0.630957") + data = None + + +class Program_weight_tensor_parameter_248: + name = "parameter_248" + shape = [192] + dtype = "float32" + min_val = float("0.513724") + max_val = float("1.92855") + mean = float("1.1575") + std = float("0.224218") + data = None + + +class Program_weight_tensor_parameter_249: + name = "parameter_249" + shape = [192] + dtype = "float32" + min_val = float("0.00926842") + max_val = float("0.189796") + mean = float("0.0238316") + std = float("0.015369") + data = None + + +class Program_weight_tensor_parameter_250: + name = "parameter_250" + shape = [192] + dtype = "float32" + min_val = float("-0.863732") + max_val = float("0.114053") + mean = float("-0.100492") + std = float("0.0943872") + data = None + + +class Program_weight_tensor_parameter_251: + name = "parameter_251" + shape = [192, 192, 3, 3] + dtype = "float32" + min_val = float("-0.0639678") + max_val = float("0.078498") + mean = float("-0.000335567") + std = float("0.00465814") + data = None + + +class Program_weight_tensor_parameter_252: + name = "parameter_252" + shape = [192] + dtype = "float32" + min_val = float("-3.82125") + max_val = float("3.64066") + mean = float("-0.649827") + std = float("0.9088") + data = None + + +class Program_weight_tensor_parameter_253: + name = "parameter_253" + shape = [192] + dtype = "float32" + min_val = float("0.700873") + max_val = float("4.22459") + mean = float("1.52278") + std = float("0.441681") + data = None + + +class Program_weight_tensor_parameter_254: + name = "parameter_254" + shape = [192] + dtype = "float32" + min_val = float("0.00373812") + max_val = float("0.0336142") + mean = float("0.00914042") + std = float("0.00437939") + data = None + + +class Program_weight_tensor_parameter_255: + name = "parameter_255" + shape = [192] + dtype = "float32" + min_val = float("-0.116975") + max_val = float("0.100974") + mean = float("0.0318221") + std = float("0.0317647") + data = None + + +class Program_weight_tensor_parameter_256: + name = "parameter_256" + shape = [192, 384, 1, 1] + dtype = "float32" + min_val = float("-0.101871") + max_val = float("0.0818271") + mean = float("-0.00122698") + std = float("0.00807248") + data = None + + +class Program_weight_tensor_parameter_257: + name = "parameter_257" + shape = [192] + dtype = "float32" + min_val = float("-2.93092") + max_val = float("0.900761") + mean = float("-0.417795") + std = float("0.669852") + data = None + + +class Program_weight_tensor_parameter_258: + name = "parameter_258" + shape = [192] + dtype = "float32" + min_val = float("0.779637") + max_val = float("3.24102") + mean = float("1.45029") + std = float("0.410351") + data = None + + +class Program_weight_tensor_parameter_259: + name = "parameter_259" + shape = [192] + dtype = "float32" + min_val = float("0.00146687") + max_val = float("0.00578144") + mean = float("0.002633") + std = float("0.000734089") + data = None + + +class Program_weight_tensor_parameter_260: + name = "parameter_260" + shape = [192] + dtype = "float32" + min_val = float("-0.0562443") + max_val = float("0.0545669") + mean = float("0.0120637") + std = float("0.0196875") + data = None + + +class Program_weight_tensor_parameter_261: + name = "parameter_261" + shape = [192, 384, 1, 1] + dtype = "float32" + min_val = float("-0.0642477") + max_val = float("0.0979527") + mean = float("-0.000524908") + std = float("0.00653808") + data = None + + +class Program_weight_tensor_parameter_262: + name = "parameter_262" + shape = [384] + dtype = "float32" + min_val = float("-2.76839") + max_val = float("1.15892") + mean = float("-0.673301") + std = float("0.495481") + data = None + + +class Program_weight_tensor_parameter_263: + name = "parameter_263" + shape = [384] + dtype = "float32" + min_val = float("0.43803") + max_val = float("1.91428") + mean = float("0.861767") + std = float("0.234739") + data = None + + +class Program_weight_tensor_parameter_264: + name = "parameter_264" + shape = [384] + dtype = "float32" + min_val = float("0.00413913") + max_val = float("0.0393713") + mean = float("0.00990608") + std = float("0.00384697") + data = None + + +class Program_weight_tensor_parameter_265: + name = "parameter_265" + shape = [384] + dtype = "float32" + min_val = float("-0.478303") + max_val = float("0.321931") + mean = float("0.0163698") + std = float("0.0720831") + data = None + + +class Program_weight_tensor_parameter_266: + name = "parameter_266" + shape = [384, 256, 3, 3] + dtype = "float32" + min_val = float("-0.0679634") + max_val = float("0.0611975") + mean = float("-0.000134097") + std = float("0.00384231") + data = None + + +class Program_weight_tensor_parameter_267: + name = "parameter_267" + shape = [256] + dtype = "float32" + min_val = float("-2.8162") + max_val = float("1.39734") + mean = float("-0.940697") + std = float("0.615996") + data = None + + +class Program_weight_tensor_parameter_268: + name = "parameter_268" + shape = [256] + dtype = "float32" + min_val = float("0.390726") + max_val = float("1.712") + mean = float("0.92909") + std = float("0.167316") + data = None + + +class Program_weight_tensor_parameter_269: + name = "parameter_269" + shape = [256] + dtype = "float32" + min_val = float("0.000355226") + max_val = float("0.00321611") + mean = float("0.00138761") + std = float("0.000408317") + data = None + + +class Program_weight_tensor_parameter_270: + name = "parameter_270" + shape = [256] + dtype = "float32" + min_val = float("-0.156286") + max_val = float("0.107421") + mean = float("-0.040855") + std = float("0.0496864") + data = None + + +class Program_weight_tensor_parameter_271: + name = "parameter_271" + shape = [256, 192, 1, 1] + dtype = "float32" + min_val = float("-0.27352") + max_val = float("0.157156") + mean = float("-0.000980957") + std = float("0.012179") + data = None + + +class Program_weight_tensor_parameter_272: + name = "parameter_272" + shape = [192] + dtype = "float32" + min_val = float("-0.0155768") + max_val = float("-9.36271e-05") + mean = float("-0.00676362") + std = float("0.00354717") + data = None + + +class Program_weight_tensor_parameter_273: + name = "parameter_273" + shape = [192, 192, 1, 1] + dtype = "float32" + min_val = float("-0.185322") + max_val = float("0.237056") + mean = float("-0.00501925") + std = float("0.0106032") + data = None + + +class Program_weight_tensor_parameter_274: + name = "parameter_274" + shape = [96] + dtype = "float32" + min_val = float("-2.30112") + max_val = float("0.841001") + mean = float("-0.0664297") + std = float("0.524061") + data = None + + +class Program_weight_tensor_parameter_275: + name = "parameter_275" + shape = [96] + dtype = "float32" + min_val = float("-0.115564") + max_val = float("2.24853") + mean = float("0.318519") + std = float("0.348008") + data = None + + +class Program_weight_tensor_parameter_276: + name = "parameter_276" + shape = [96] + dtype = "float32" + min_val = float("1.16563e-10") + max_val = float("0.00300976") + mean = float("0.000517046") + std = float("0.00039977") + data = None + + +class Program_weight_tensor_parameter_277: + name = "parameter_277" + shape = [96] + dtype = "float32" + min_val = float("-0.0461895") + max_val = float("0.0599696") + mean = float("0.00410402") + std = float("0.0168172") + data = None + + +class Program_weight_tensor_parameter_278: + name = "parameter_278" + shape = [96, 96, 1, 1] + dtype = "float32" + min_val = float("-0.0466564") + max_val = float("0.0676969") + mean = float("-0.000293502") + std = float("0.00611818") + data = None + + +class Program_weight_tensor_parameter_279: + name = "parameter_279" + shape = [96] + dtype = "float32" + min_val = float("-2.30112") + max_val = float("0.841001") + mean = float("-0.0664297") + std = float("0.524061") + data = None + + +class Program_weight_tensor_parameter_280: + name = "parameter_280" + shape = [96] + dtype = "float32" + min_val = float("0.473281") + max_val = float("3.257") + mean = float("1.28167") + std = float("0.623451") + data = None + + +class Program_weight_tensor_parameter_281: + name = "parameter_281" + shape = [96] + dtype = "float32" + min_val = float("0.00164849") + max_val = float("0.0263845") + mean = float("0.00875255") + std = float("0.00413355") + data = None + + +class Program_weight_tensor_parameter_282: + name = "parameter_282" + shape = [96] + dtype = "float32" + min_val = float("-0.146718") + max_val = float("0.139382") + mean = float("0.0178106") + std = float("0.0508589") + data = None + + +class Program_weight_tensor_parameter_283: + name = "parameter_283" + shape = [96, 96, 3, 3] + dtype = "float32" + min_val = float("-0.143685") + max_val = float("0.113047") + mean = float("-0.00023819") + std = float("0.00681427") + data = None + + +class Program_weight_tensor_parameter_284: + name = "parameter_284" + shape = [96] + dtype = "float32" + min_val = float("-2.80903") + max_val = float("1.43021") + mean = float("-1.03635") + std = float("0.702823") + data = None + + +class Program_weight_tensor_parameter_285: + name = "parameter_285" + shape = [96] + dtype = "float32" + min_val = float("0.368812") + max_val = float("1.99847") + mean = float("1.07266") + std = float("0.230346") + data = None + + +class Program_weight_tensor_parameter_286: + name = "parameter_286" + shape = [96] + dtype = "float32" + min_val = float("0.0155431") + max_val = float("0.0983109") + mean = float("0.0370436") + std = float("0.012129") + data = None + + +class Program_weight_tensor_parameter_287: + name = "parameter_287" + shape = [96] + dtype = "float32" + min_val = float("-1.68824") + max_val = float("0.737955") + mean = float("-0.153639") + std = float("0.263982") + data = None + + +class Program_weight_tensor_parameter_288: + name = "parameter_288" + shape = [96, 96, 3, 3] + dtype = "float32" + min_val = float("-0.0523411") + max_val = float("0.0718337") + mean = float("-0.000482966") + std = float("0.00752343") + data = None + + +class Program_weight_tensor_parameter_289: + name = "parameter_289" + shape = [96] + dtype = "float32" + min_val = float("-2.5473") + max_val = float("0.878288") + mean = float("-0.00501883") + std = float("0.508135") + data = None + + +class Program_weight_tensor_parameter_290: + name = "parameter_290" + shape = [96] + dtype = "float32" + min_val = float("-0.0973851") + max_val = float("3.24762") + mean = float("0.303701") + std = float("0.398738") + data = None + + +class Program_weight_tensor_parameter_291: + name = "parameter_291" + shape = [96] + dtype = "float32" + min_val = float("0.00012403") + max_val = float("0.0148205") + mean = float("0.00152153") + std = float("0.00193682") + data = None + + +class Program_weight_tensor_parameter_292: + name = "parameter_292" + shape = [96] + dtype = "float32" + min_val = float("-0.0591962") + max_val = float("0.0766134") + mean = float("0.0125442") + std = float("0.0243314") + data = None + + +class Program_weight_tensor_parameter_293: + name = "parameter_293" + shape = [96, 96, 1, 1] + dtype = "float32" + min_val = float("-0.11929") + max_val = float("0.0878058") + mean = float("-0.000966536") + std = float("0.0080697") + data = None + + +class Program_weight_tensor_parameter_294: + name = "parameter_294" + shape = [96] + dtype = "float32" + min_val = float("-2.5473") + max_val = float("0.878288") + mean = float("-0.00501883") + std = float("0.508135") + data = None + + +class Program_weight_tensor_parameter_295: + name = "parameter_295" + shape = [96] + dtype = "float32" + min_val = float("0.394281") + max_val = float("2.99785") + mean = float("0.923535") + std = float("0.401717") + data = None + + +class Program_weight_tensor_parameter_296: + name = "parameter_296" + shape = [96] + dtype = "float32" + min_val = float("0.005866") + max_val = float("0.0413556") + mean = float("0.0180644") + std = float("0.00733924") + data = None + + +class Program_weight_tensor_parameter_297: + name = "parameter_297" + shape = [96] + dtype = "float32" + min_val = float("-0.140991") + max_val = float("0.168817") + mean = float("0.0320032") + std = float("0.0586912") + data = None + + +class Program_weight_tensor_parameter_298: + name = "parameter_298" + shape = [96, 96, 3, 3] + dtype = "float32" + min_val = float("-0.0776292") + max_val = float("0.0527524") + mean = float("-0.000441065") + std = float("0.00687005") + data = None + + +class Program_weight_tensor_parameter_299: + name = "parameter_299" + shape = [96] + dtype = "float32" + min_val = float("-2.06688") + max_val = float("1.50729") + mean = float("-0.857923") + std = float("0.653949") + data = None + + +class Program_weight_tensor_parameter_300: + name = "parameter_300" + shape = [96] + dtype = "float32" + min_val = float("0.442113") + max_val = float("1.99941") + mean = float("1.09177") + std = float("0.248078") + data = None + + +class Program_weight_tensor_parameter_301: + name = "parameter_301" + shape = [96] + dtype = "float32" + min_val = float("0.00485225") + max_val = float("0.0356651") + mean = float("0.0140594") + std = float("0.00512839") + data = None + + +class Program_weight_tensor_parameter_302: + name = "parameter_302" + shape = [96] + dtype = "float32" + min_val = float("-0.617608") + max_val = float("0.129806") + mean = float("-0.0631251") + std = float("0.101093") + data = None + + +class Program_weight_tensor_parameter_303: + name = "parameter_303" + shape = [96, 96, 3, 3] + dtype = "float32" + min_val = float("-0.135507") + max_val = float("0.133371") + mean = float("-0.000382566") + std = float("0.00799182") + data = None + + +class Program_weight_tensor_parameter_304: + name = "parameter_304" + shape = [96] + dtype = "float32" + min_val = float("-1.49952") + max_val = float("1.862") + mean = float("0.0864269") + std = float("0.866041") + data = None + + +class Program_weight_tensor_parameter_305: + name = "parameter_305" + shape = [96] + dtype = "float32" + min_val = float("0.274613") + max_val = float("1.34772") + mean = float("0.691119") + std = float("0.271591") + data = None + + +class Program_weight_tensor_parameter_306: + name = "parameter_306" + shape = [96] + dtype = "float32" + min_val = float("0.006796") + max_val = float("0.0499783") + mean = float("0.0196862") + std = float("0.00997736") + data = None + + +class Program_weight_tensor_parameter_307: + name = "parameter_307" + shape = [96] + dtype = "float32" + min_val = float("-0.381106") + max_val = float("0.278458") + mean = float("-0.0546723") + std = float("0.113558") + data = None + + +class Program_weight_tensor_parameter_308: + name = "parameter_308" + shape = [96, 192, 1, 1] + dtype = "float32" + min_val = float("-0.149001") + max_val = float("0.140387") + mean = float("-0.00119829") + std = float("0.0127954") + data = None + + +class Program_weight_tensor_parameter_309: + name = "parameter_309" + shape = [96] + dtype = "float32" + min_val = float("-2.54133") + max_val = float("1.66022") + mean = float("0.395272") + std = float("0.701324") + data = None + + +class Program_weight_tensor_parameter_310: + name = "parameter_310" + shape = [96] + dtype = "float32" + min_val = float("0.39507") + max_val = float("4.69306") + mean = float("1.37046") + std = float("0.960383") + data = None + + +class Program_weight_tensor_parameter_311: + name = "parameter_311" + shape = [96] + dtype = "float32" + min_val = float("0.00404621") + max_val = float("0.0462624") + mean = float("0.0137152") + std = float("0.00745147") + data = None + + +class Program_weight_tensor_parameter_312: + name = "parameter_312" + shape = [96] + dtype = "float32" + min_val = float("-0.188786") + max_val = float("0.230307") + mean = float("-0.00047936") + std = float("0.104303") + data = None + + +class Program_weight_tensor_parameter_313: + name = "parameter_313" + shape = [96, 192, 1, 1] + dtype = "float32" + min_val = float("-0.0896216") + max_val = float("0.126368") + mean = float("-0.000492174") + std = float("0.012798") + data = None + + +class Program_weight_tensor_parameter_314: + name = "parameter_314" + shape = [192] + dtype = "float32" + min_val = float("-4.59029") + max_val = float("2.0155") + mean = float("-0.0580109") + std = float("0.877782") + data = None + + +class Program_weight_tensor_parameter_315: + name = "parameter_315" + shape = [192] + dtype = "float32" + min_val = float("0.533524") + max_val = float("4.3441") + mean = float("1.0423") + std = float("0.43243") + data = None + + +class Program_weight_tensor_parameter_316: + name = "parameter_316" + shape = [192] + dtype = "float32" + min_val = float("0.00470566") + max_val = float("0.0620636") + mean = float("0.014299") + std = float("0.00889918") + data = None + + +class Program_weight_tensor_parameter_317: + name = "parameter_317" + shape = [192] + dtype = "float32" + min_val = float("-0.272855") + max_val = float("0.258272") + mean = float("0.00418542") + std = float("0.0930486") + data = None + + +class Program_weight_tensor_parameter_318: + name = "parameter_318" + shape = [192, 128, 3, 3] + dtype = "float32" + min_val = float("-0.0986878") + max_val = float("0.111357") + mean = float("-0.000201864") + std = float("0.00663606") + data = None + + +class Program_weight_tensor_parameter_319: + name = "parameter_319" + shape = [128] + dtype = "float32" + min_val = float("-2.16488") + max_val = float("1.44676") + mean = float("-0.610202") + std = float("0.629848") + data = None + + +class Program_weight_tensor_parameter_320: + name = "parameter_320" + shape = [128] + dtype = "float32" + min_val = float("0.311593") + max_val = float("2.21849") + mean = float("0.791154") + std = float("0.218742") + data = None + + +class Program_weight_tensor_parameter_321: + name = "parameter_321" + shape = [128] + dtype = "float32" + min_val = float("0.000645733") + max_val = float("0.00741024") + mean = float("0.0024279") + std = float("0.00115745") + data = None + + +class Program_weight_tensor_parameter_322: + name = "parameter_322" + shape = [128] + dtype = "float32" + min_val = float("-0.276983") + max_val = float("0.309512") + mean = float("-0.0515203") + std = float("0.0926434") + data = None + + +class Program_weight_tensor_parameter_323: + name = "parameter_323" + shape = [128, 96, 1, 1] + dtype = "float32" + min_val = float("-0.176073") + max_val = float("0.189129") + mean = float("-0.00129442") + std = float("0.0200864") + data = None + + +class Program_weight_tensor_parameter_324: + name = "parameter_324" + shape = [96] + dtype = "float32" + min_val = float("-0.0203083") + max_val = float("0.00208148") + mean = float("-0.00823925") + std = float("0.00577396") + data = None + + +class Program_weight_tensor_parameter_325: + name = "parameter_325" + shape = [96, 96, 1, 1] + dtype = "float32" + min_val = float("-0.286719") + max_val = float("0.245506") + mean = float("-0.00625776") + std = float("0.0174922") + data = None + + +class Program_weight_tensor_parameter_326: + name = "parameter_326" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_327: + name = "parameter_327" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_328: + name = "parameter_328" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_329: + name = "parameter_329" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_330: + name = "parameter_330" + shape = [48, 48, 1, 1] + dtype = "float32" + min_val = float("-0.0598021") + max_val = float("0.0811294") + mean = float("-0.00090241") + std = float("0.0116553") + data = None + + +class Program_weight_tensor_parameter_331: + name = "parameter_331" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_332: + name = "parameter_332" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_333: + name = "parameter_333" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_334: + name = "parameter_334" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_335: + name = "parameter_335" + shape = [48, 48, 3, 3] + dtype = "float32" + min_val = float("-0.0893973") + max_val = float("0.077563") + mean = float("-0.000393921") + std = float("0.0112102") + data = None + + +class Program_weight_tensor_parameter_336: + name = "parameter_336" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_337: + name = "parameter_337" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_338: + name = "parameter_338" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_339: + name = "parameter_339" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_340: + name = "parameter_340" + shape = [48, 48, 3, 3] + dtype = "float32" + min_val = float("-0.0787366") + max_val = float("0.1005") + mean = float("-0.000691606") + std = float("0.0120837") + data = None + + +class Program_weight_tensor_parameter_341: + name = "parameter_341" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_342: + name = "parameter_342" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_343: + name = "parameter_343" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_344: + name = "parameter_344" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_345: + name = "parameter_345" + shape = [48, 48, 1, 1] + dtype = "float32" + min_val = float("-0.0642506") + max_val = float("0.0760078") + mean = float("-0.00229616") + std = float("0.0141023") + data = None + + +class Program_weight_tensor_parameter_346: + name = "parameter_346" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_347: + name = "parameter_347" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_348: + name = "parameter_348" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_349: + name = "parameter_349" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_350: + name = "parameter_350" + shape = [48, 48, 3, 3] + dtype = "float32" + min_val = float("-0.0653637") + max_val = float("0.0531955") + mean = float("-0.000739256") + std = float("0.0109055") + data = None + + +class Program_weight_tensor_parameter_351: + name = "parameter_351" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_352: + name = "parameter_352" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_353: + name = "parameter_353" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_354: + name = "parameter_354" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_355: + name = "parameter_355" + shape = [48, 48, 3, 3] + dtype = "float32" + min_val = float("-0.124063") + max_val = float("0.0767722") + mean = float("-0.000652225") + std = float("0.0129057") + data = None + + +class Program_weight_tensor_parameter_356: + name = "parameter_356" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_357: + name = "parameter_357" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_358: + name = "parameter_358" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_359: + name = "parameter_359" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_360: + name = "parameter_360" + shape = [48, 96, 1, 1] + dtype = "float32" + min_val = float("-0.129018") + max_val = float("0.124719") + mean = float("-0.00195061") + std = float("0.0200105") + data = None + + +class Program_weight_tensor_parameter_361: + name = "parameter_361" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_362: + name = "parameter_362" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_363: + name = "parameter_363" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_364: + name = "parameter_364" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_365: + name = "parameter_365" + shape = [48, 96, 1, 1] + dtype = "float32" + min_val = float("-0.135617") + max_val = float("0.181517") + mean = float("0.000335818") + std = float("0.0209384") + data = None + + +class Program_weight_tensor_parameter_366: + name = "parameter_366" + shape = [96] + dtype = "float32" + min_val = float("-3.46393") + max_val = float("3.82245") + mean = float("0.316363") + std = float("1.20153") + data = None + + +class Program_weight_tensor_parameter_367: + name = "parameter_367" + shape = [96] + dtype = "float32" + min_val = float("0.534383") + max_val = float("5.51527") + mean = float("1.05391") + std = float("0.547347") + data = None + + +class Program_weight_tensor_parameter_368: + name = "parameter_368" + shape = [96] + dtype = "float32" + min_val = float("0.00978595") + max_val = float("0.144704") + mean = float("0.0264654") + std = float("0.0202695") + data = None + + +class Program_weight_tensor_parameter_369: + name = "parameter_369" + shape = [96] + dtype = "float32" + min_val = float("-0.591327") + max_val = float("0.416823") + mean = float("-0.0293186") + std = float("0.143967") + data = None + + +class Program_weight_tensor_parameter_370: + name = "parameter_370" + shape = [96, 64, 3, 3] + dtype = "float32" + min_val = float("-0.101468") + max_val = float("0.098544") + mean = float("-0.000275316") + std = float("0.0106091") + data = None + + +class Program_weight_tensor_parameter_371: + name = "parameter_371" + shape = [64] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_372: + name = "parameter_372" + shape = [64] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_373: + name = "parameter_373" + shape = [64] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_374: + name = "parameter_374" + shape = [64] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_375: + name = "parameter_375" + shape = [64, 48, 1, 1] + dtype = "float32" + min_val = float("-0.180678") + max_val = float("0.17449") + mean = float("-0.00103651") + std = float("0.0302799") + data = None + + +class Program_weight_tensor_parameter_376: + name = "parameter_376" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_377: + name = "parameter_377" + shape = [48, 48, 1, 1] + dtype = "float32" + min_val = float("-0.167227") + max_val = float("0.132986") + mean = float("-0.0166566") + std = float("0.0269477") + data = None + + +class Program_weight_tensor_parameter_378: + name = "parameter_378" + shape = [24] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_379: + name = "parameter_379" + shape = [24] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_380: + name = "parameter_380" + shape = [24] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_381: + name = "parameter_381" + shape = [24] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_382: + name = "parameter_382" + shape = [24, 24, 1, 1] + dtype = "float32" + min_val = float("-0.0976876") + max_val = float("0.0995377") + mean = float("0.00100382") + std = float("0.0257818") + data = None + + +class Program_weight_tensor_parameter_383: + name = "parameter_383" + shape = [24] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_384: + name = "parameter_384" + shape = [24] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_385: + name = "parameter_385" + shape = [24] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_386: + name = "parameter_386" + shape = [24] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_387: + name = "parameter_387" + shape = [24, 24, 3, 3] + dtype = "float32" + min_val = float("-0.13213") + max_val = float("0.0867777") + mean = float("-0.000752429") + std = float("0.0185467") + data = None + + +class Program_weight_tensor_parameter_388: + name = "parameter_388" + shape = [24] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_389: + name = "parameter_389" + shape = [24] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_390: + name = "parameter_390" + shape = [24] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_391: + name = "parameter_391" + shape = [24] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_392: + name = "parameter_392" + shape = [24, 24, 3, 3] + dtype = "float32" + min_val = float("-0.158527") + max_val = float("0.154681") + mean = float("-0.000380716") + std = float("0.0212995") + data = None + + +class Program_weight_tensor_parameter_393: + name = "parameter_393" + shape = [24] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_394: + name = "parameter_394" + shape = [24] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_395: + name = "parameter_395" + shape = [24] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_396: + name = "parameter_396" + shape = [24] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_397: + name = "parameter_397" + shape = [24, 48, 1, 1] + dtype = "float32" + min_val = float("-0.145078") + max_val = float("0.126502") + mean = float("-0.0028948") + std = float("0.0321043") + data = None + + +class Program_weight_tensor_parameter_398: + name = "parameter_398" + shape = [24] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_399: + name = "parameter_399" + shape = [24] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_400: + name = "parameter_400" + shape = [24] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_401: + name = "parameter_401" + shape = [24] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_402: + name = "parameter_402" + shape = [24, 48, 1, 1] + dtype = "float32" + min_val = float("-0.159032") + max_val = float("0.143413") + mean = float("-0.000644695") + std = float("0.0341675") + data = None + + +class Program_weight_tensor_parameter_403: + name = "parameter_403" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_404: + name = "parameter_404" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_405: + name = "parameter_405" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_406: + name = "parameter_406" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_407: + name = "parameter_407" + shape = [48, 32, 3, 3] + dtype = "float32" + min_val = float("-0.132866") + max_val = float("0.117934") + mean = float("7.46602e-06") + std = float("0.0177879") + data = None + + +class Program_weight_tensor_parameter_408: + name = "parameter_408" + shape = [32] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_409: + name = "parameter_409" + shape = [32] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_410: + name = "parameter_410" + shape = [32] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_411: + name = "parameter_411" + shape = [32] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_412: + name = "parameter_412" + shape = [32, 16, 3, 3] + dtype = "float32" + min_val = float("-0.233486") + max_val = float("0.250765") + mean = float("-0.000217703") + std = float("0.0311029") + data = None + + +class Program_weight_tensor_parameter_413: + name = "parameter_413" + shape = [16] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_414: + name = "parameter_414" + shape = [16] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_415: + name = "parameter_415" + shape = [16] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_416: + name = "parameter_416" + shape = [16] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_417: + name = "parameter_417" + shape = [16, 16, 3, 3] + dtype = "float32" + min_val = float("-0.24881") + max_val = float("0.281888") + mean = float("0.000471201") + std = float("0.0425702") + data = None + + +class Program_weight_tensor_parameter_418: + name = "parameter_418" + shape = [16] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_419: + name = "parameter_419" + shape = [16] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_420: + name = "parameter_420" + shape = [16] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_421: + name = "parameter_421" + shape = [16] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_422: + name = "parameter_422" + shape = [16, 3, 3, 3] + dtype = "float32" + min_val = float("-0.18237") + max_val = float("0.258446") + mean = float("-0.00139947") + std = float("0.0569332") + data = None diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_8/graph_hash.txt b/paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_8/graph_hash.txt new file mode 100644 index 000000000..b50f88ce3 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_8/graph_hash.txt @@ -0,0 +1 @@ +d807d9117d9ce029721550bdd5354682553d85490a71e92888f424c4238f045b \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_8/graph_net.json b/paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_8/graph_net.json new file mode 100644 index 000000000..469954d4b --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_8/graph_net.json @@ -0,0 +1,6 @@ +{ + "framework": "paddle", + "model_name": "PP-YOLOE_plus-S", + "num_devices_required": 1, + "num_nodes_required": 1 +} \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_8/input_meta.py b/paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_8/input_meta.py new file mode 100644 index 000000000..25d19e1c4 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_8/input_meta.py @@ -0,0 +1,60 @@ +class Program_weight_tensor_data_0: + name = "data_0" + shape = [] + dtype = "int64" + data = [3] + + +class Program_weight_tensor_data_1: + name = "data_1" + shape = [] + dtype = "int64" + data = [6069] + + +class Program_weight_tensor_data_2: + name = "data_2" + shape = [8, 3, 6069] + dtype = "float32" + max_val = float("1.0") + mean = float("0.000508046") + std = float("0.0225342") + data = None + + +class Program_weight_tensor_data_3: + name = "data_3" + shape = [8, 3, 1] + dtype = "int32" + data = [0, 0, 0, 3, 0, 0, 0, 1, 2, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0] + + +class Program_weight_tensor_data_4: + name = "data_4" + shape = [8, 6069] + dtype = "float32" + max_val = float("1.0") + mean = float("0.00152414") + std = float("0.0390105") + data = None + + +class Program_weight_tensor_data_5: + name = "data_5" + shape = [8, 3, 4] + dtype = "float32" + max_val = float("509.089") + mean = float("160.185") + std = float("176.435") + data = None + + +class Program_weight_tensor_data_6: + name = "data_6" + shape = [8, 6069, 4] + dtype = "float32" + min_val = float("-272.115") + max_val = float("831.985") + mean = float("272.715") + std = float("171.769") + data = None diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_8/model.py b/paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_8/model.py new file mode 100644 index 000000000..77efae021 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_8/model.py @@ -0,0 +1,310 @@ +import paddle + + +class GraphModule(paddle.nn.Layer): + def __init__(self): + super().__init__() + + def forward(self, data_0, data_1, data_2, data_3, data_4, data_5, data_6): + # pd_op.full: (1xi64) <- () + full_0 = paddle._C_ops.full( + [1], float("-2"), paddle.int64, paddle.core.CPUPlace() + ) + + # pd_op.argmax: (8x-1xi64) <- (8x-1x-1xf32, 1xi64) + argmax_0 = paddle._C_ops.argmax(data_2, full_0, False, False, paddle.int64) + del full_0 + + # pd_op.full: (1xf64) <- () + full_1 = paddle._C_ops.full( + [1], float("0"), paddle.float64, paddle.core.CPUPlace() + ) + + # pd_op.full: (1xf64) <- () + full_2 = paddle._C_ops.full( + [1], float("8"), paddle.float64, paddle.core.CPUPlace() + ) + + # pd_op.full: (1xf64) <- () + full_3 = paddle._C_ops.full( + [1], float("1"), paddle.float64, paddle.core.CPUPlace() + ) + + # pd_op.arange: (8xi32) <- (1xf64, 1xf64, 1xf64) + arange_0 = paddle.arange(full_1, full_2, full_3, dtype="int32") + del full_1, full_2, full_3 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_0 = [-1] + + # pd_op.unsqueeze: (8x1xi32) <- (8xi32, 1xi64) + unsqueeze_0 = paddle._C_ops.unsqueeze(arange_0, full_int_array_0) + del arange_0 + + # pd_op.cast: (xi32) <- (xi64) + cast_0 = paddle._C_ops.cast(data_0, paddle.int32) + del data_0 + + # pd_op.multiply: (8x1xi32) <- (8x1xi32, xi32) + multiply_1 = paddle._C_ops.multiply(unsqueeze_0, cast_0) + del cast_0, unsqueeze_0 + + # pd_op.cast: (8x1xi64) <- (8x1xi32) + cast_1 = paddle._C_ops.cast(multiply_1, paddle.int64) + del multiply_1 + + # pd_op.add: (8x-1xi64) <- (8x-1xi64, 8x1xi64) + add_0 = paddle._C_ops.add(argmax_0, cast_1) + del argmax_0, cast_1 + + # pd_op.flatten: (-1xi32) <- (8x-1x1xi32) + flatten_0 = paddle._C_ops.flatten(data_3, 0, 2) + del data_3 + + # pd_op.flatten: (-1xi64) <- (8x-1xi64) + flatten_1 = paddle._C_ops.flatten(add_0, 0, 1) + del add_0 + + # pd_op.full: (1xi32) <- () + full_4 = paddle._C_ops.full( + [1], float("0"), paddle.int32, paddle.core.CPUPlace() + ) + + # pd_op.gather: (-1xi32) <- (-1xi32, -1xi64, 1xi32) + gather_0 = paddle._C_ops.gather(flatten_0, flatten_1, full_4) + del flatten_0 + + # pd_op.full: (xi64) <- () + full_5 = paddle._C_ops.full( + [], float("8"), paddle.int64, paddle.core.CPUPlace() + ) + + # builtin.combine: ([xi64, xi64]) <- (xi64, xi64) + combine_0 = [full_5, data_1] + + # pd_op.stack: (2xi64) <- ([xi64, xi64]) + stack_0 = paddle._C_ops.stack(combine_0, 0) + del combine_0 + + # pd_op.reshape: (8x-1xi32) <- (-1xi32, 2xi64) + reshape_1 = paddle._C_ops.reshape(gather_0, stack_0) + del gather_0, stack_0 + + # pd_op.full: (xf32) <- () + full_6 = paddle._C_ops.full( + [], float("0"), paddle.float32, paddle.framework._current_expected_place() + ) + + # pd_op.greater_than: (8x-1xb) <- (8x-1xf32, xf32) + greater_than_0 = paddle._C_ops.greater_than(data_4, full_6) + del data_4, full_6 + + # pd_op.full: (1xf32) <- () + full_7 = paddle._C_ops.full( + [1], float("4"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.full_like: (8x-1xi32) <- (8x-1xi32, 1xf32) + full_like_0 = paddle._C_ops.full_like( + reshape_1, full_7, paddle.int32, paddle.framework._current_expected_place() + ) + del full_7 + + # pd_op.where: (8x-1xi32) <- (8x-1xb, 8x-1xi32, 8x-1xi32) + where_0 = paddle._C_ops.where(greater_than_0, reshape_1, full_like_0) + del full_like_0, greater_than_0, reshape_1 + + # pd_op.full_int_array: (2xi64) <- () + full_int_array_1 = [-1, 4] + + # pd_op.reshape: (-1x4xf32) <- (8x-1x4xf32, 2xi64) + reshape_2 = paddle._C_ops.reshape(data_5, full_int_array_1) + del full_int_array_1 + + # pd_op.gather: (-1x4xf32) <- (-1x4xf32, -1xi64, 1xi32) + gather_1 = paddle._C_ops.gather(reshape_2, flatten_1, full_4) + del flatten_1, full_4, reshape_2 + + # pd_op.full: (xi64) <- () + full_8 = paddle._C_ops.full( + [], float("4"), paddle.int64, paddle.core.CPUPlace() + ) + + # builtin.combine: ([xi64, xi64, xi64]) <- (xi64, xi64, xi64) + combine_1 = [full_5, data_1, full_8] + del data_1, full_5, full_8 + + # pd_op.stack: (3xi64) <- ([xi64, xi64, xi64]) + stack_1 = paddle._C_ops.stack(combine_1, 0) + del combine_1 + + # pd_op.reshape: (8x-1x4xf32) <- (-1x4xf32, 3xi64) + reshape_0 = paddle._C_ops.reshape(gather_1, stack_1) + del gather_1, stack_1 + + # pd_op.full: (1xi32) <- () + full_9 = paddle._C_ops.full( + [1], float("5"), paddle.int32, paddle.core.CPUPlace() + ) + + # pd_op.one_hot: (8x-1x5xf32) <- (8x-1xi32, 1xi32) + one_hot_0 = paddle._C_ops.one_hot( + where_0 % paddle.cast(full_9, where_0.dtype), full_9 + ) + del full_9 + + # pd_op.full: (4xi64) <- () + full_10 = paddle._C_ops.full( + [4], float("0"), paddle.int64, paddle.framework._current_expected_place() + ) + + # pd_op.assign_value_: (4xi64) <- (4xi64) + assign_value__0 = paddle._C_ops.assign_value_( + full_10, + [4], + paddle.int64, + [float("0"), float("1"), float("2"), float("3")], + paddle.framework._current_expected_place(), + ) + del full_10 + + # pd_op.index_select: (8x-1x4xf32) <- (8x-1x5xf32, 4xi64) + index_select_0 = paddle._C_ops.index_select(one_hot_0, assign_value__0, -1) + del assign_value__0, one_hot_0 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_2 = [2] + + # pd_op.unsqueeze: (8x-1x1x4xf32) <- (8x-1x4xf32, 1xi64) + unsqueeze_1 = paddle._C_ops.unsqueeze(data_5, full_int_array_2) + del data_5 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_3 = [1] + + # pd_op.unsqueeze: (8x1x-1x4xf32) <- (8x-1x4xf32, 1xi64) + unsqueeze_2 = paddle._C_ops.unsqueeze(data_6, full_int_array_3) + del data_6, full_int_array_3 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_4 = [0] + + # pd_op.slice: (8x-1x1x2xf32) <- (8x-1x1x4xf32, 1xi64, 1xi64) + slice_0 = paddle._C_ops.slice( + unsqueeze_1, [3], full_int_array_4, full_int_array_2, [1], [] + ) + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_5 = [2147483647] + + # pd_op.slice: (8x-1x1x2xf32) <- (8x-1x1x4xf32, 1xi64, 1xi64) + slice_1 = paddle._C_ops.slice( + unsqueeze_1, [3], full_int_array_2, full_int_array_5, [1], [] + ) + del unsqueeze_1 + + # pd_op.slice: (8x1x-1x2xf32) <- (8x1x-1x4xf32, 1xi64, 1xi64) + slice_2 = paddle._C_ops.slice( + unsqueeze_2, [3], full_int_array_4, full_int_array_2, [1], [] + ) + del full_int_array_4 + + # pd_op.slice: (8x1x-1x2xf32) <- (8x1x-1x4xf32, 1xi64, 1xi64) + slice_3 = paddle._C_ops.slice( + unsqueeze_2, [3], full_int_array_2, full_int_array_5, [1], [] + ) + del full_int_array_2, full_int_array_5, unsqueeze_2 + + # pd_op.maximum: (8x-1x-1x2xf32) <- (8x-1x1x2xf32, 8x1x-1x2xf32) + maximum_0 = paddle._C_ops.maximum(slice_0, slice_2) + + # pd_op.minimum: (8x-1x-1x2xf32) <- (8x-1x1x2xf32, 8x1x-1x2xf32) + minimum_0 = paddle._C_ops.minimum(slice_1, slice_3) + + # pd_op.subtract: (8x-1x-1x2xf32) <- (8x-1x-1x2xf32, 8x-1x-1x2xf32) + subtract_0 = paddle._C_ops.subtract(minimum_0, maximum_0) + del maximum_0, minimum_0 + + # pd_op.full: (1xf32) <- () + full_11 = paddle._C_ops.full( + [1], float("0"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.full: (1xf32) <- () + full_12 = paddle._C_ops.full( + [1], float("3.40282e+38"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.clip: (8x-1x-1x2xf32) <- (8x-1x-1x2xf32, 1xf32, 1xf32) + clip_0 = paddle._C_ops.clip(subtract_0, full_11, full_12) + del subtract_0 + + # pd_op.prod: (8x-1x-1xf32) <- (8x-1x-1x2xf32, 1xi64) + prod_0 = paddle._C_ops.prod(clip_0, full_int_array_0, False, False) + del clip_0 + + # pd_op.subtract: (8x-1x1x2xf32) <- (8x-1x1x2xf32, 8x-1x1x2xf32) + subtract_1 = paddle._C_ops.subtract(slice_1, slice_0) + del slice_0, slice_1 + + # pd_op.clip: (8x-1x1x2xf32) <- (8x-1x1x2xf32, 1xf32, 1xf32) + clip_1 = paddle._C_ops.clip(subtract_1, full_11, full_12) + del subtract_1 + + # pd_op.prod: (8x-1x1xf32) <- (8x-1x1x2xf32, 1xi64) + prod_1 = paddle._C_ops.prod(clip_1, full_int_array_0, False, False) + del clip_1 + + # pd_op.subtract: (8x1x-1x2xf32) <- (8x1x-1x2xf32, 8x1x-1x2xf32) + subtract_2 = paddle._C_ops.subtract(slice_3, slice_2) + del slice_2, slice_3 + + # pd_op.clip: (8x1x-1x2xf32) <- (8x1x-1x2xf32, 1xf32, 1xf32) + clip_2 = paddle._C_ops.clip(subtract_2, full_11, full_12) + del full_11, full_12, subtract_2 + + # pd_op.prod: (8x1x-1xf32) <- (8x1x-1x2xf32, 1xi64) + prod_2 = paddle._C_ops.prod(clip_2, full_int_array_0, False, False) + del clip_2 + + # pd_op.add: (8x-1x-1xf32) <- (8x-1x1xf32, 8x1x-1xf32) + add_1 = paddle._C_ops.add(prod_1, prod_2) + del prod_1, prod_2 + + # pd_op.subtract: (8x-1x-1xf32) <- (8x-1x-1xf32, 8x-1x-1xf32) + subtract_3 = paddle._C_ops.subtract(add_1, prod_0) + del add_1 + + # pd_op.full: (1xf32) <- () + full_13 = paddle._C_ops.full( + [1], float("1"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.scale: (8x-1x-1xf32) <- (8x-1x-1xf32, 1xf32) + scale_0 = paddle._C_ops.scale(subtract_3, full_13, float("1e-09"), True) + del full_13, subtract_3 + + # pd_op.divide: (8x-1x-1xf32) <- (8x-1x-1xf32, 8x-1x-1xf32) + divide_0 = paddle._C_ops.divide(prod_0, scale_0) + del prod_0, scale_0 + + # pd_op.multiply: (8x-1x-1xf32) <- (8x-1x-1xf32, 8x-1x-1xf32) + multiply_2 = paddle._C_ops.multiply(divide_0, data_2) + del data_2, divide_0 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_6 = [-2] + + # pd_op.max: (8x-1xf32) <- (8x-1x-1xf32, 1xi64) + max_0 = paddle._C_ops.max(multiply_2, full_int_array_6, False) + del full_int_array_6, multiply_2 + + # pd_op.unsqueeze: (8x-1x1xf32) <- (8x-1xf32, 1xi64) + unsqueeze_3 = paddle._C_ops.unsqueeze(max_0, full_int_array_0) + del full_int_array_0, max_0 + + # pd_op.multiply: (8x-1x4xf32) <- (8x-1x4xf32, 8x-1x1xf32) + multiply_0 = paddle._C_ops.multiply(index_select_0, unsqueeze_3) + del index_select_0, unsqueeze_3, where_0 + + return reshape_0, multiply_0 diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_8/weight_meta.py b/paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_8/weight_meta.py new file mode 100644 index 000000000..8b1378917 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_8/weight_meta.py @@ -0,0 +1 @@ + diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_9/graph_hash.txt b/paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_9/graph_hash.txt new file mode 100644 index 000000000..06c2e2c81 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_9/graph_hash.txt @@ -0,0 +1 @@ +e4523a4b80f6d91bda08e60737d8d55feb6499fbd5650e10a21e653f89b2a688 \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_9/graph_net.json b/paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_9/graph_net.json new file mode 100644 index 000000000..469954d4b --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_9/graph_net.json @@ -0,0 +1,6 @@ +{ + "framework": "paddle", + "model_name": "PP-YOLOE_plus-S", + "num_devices_required": 1, + "num_nodes_required": 1 +} \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_9/input_meta.py b/paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_9/input_meta.py new file mode 100644 index 000000000..d715a84f5 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_9/input_meta.py @@ -0,0 +1,31 @@ +class Program_weight_tensor_data_0: + name = "data_0" + shape = [8, 7581, 68] + dtype = "float32" + min_val = float("-7.67037") + max_val = float("18.2582") + mean = float("2.29958e-05") + std = float("1.73649") + data = None + + +class Program_weight_tensor_data_1: + name = "data_1" + shape = [7581, 2] + dtype = "float32" + min_val = float("4.0") + max_val = float("604.0") + mean = float("304.0") + std = float("175.48") + data = None + + +class Program_weight_tensor_data_2: + name = "data_2" + shape = [7581, 1] + dtype = "float32" + min_val = float("8.0") + max_val = float("32.0") + mean = float("10.6667") + std = float("5.70157") + data = None diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_9/model.py b/paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_9/model.py new file mode 100644 index 000000000..f8afeafae --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_9/model.py @@ -0,0 +1,158 @@ +import paddle + + +class GraphModule(paddle.nn.Layer): + def __init__(self): + super().__init__() + + def forward(self, parameter_0, data_0, data_1, data_2): + # pd_op.divide: (7581x2xf32) <- (7581x2xf32, 7581x1xf32) + divide_0 = paddle._C_ops.divide(data_1, data_2) + del data_1 + + # pd_op.shape64: (3xi64) <- (8x7581x68xf32) + shape64_0 = paddle._C_ops.shape64(data_0) + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_0 = [0] + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_1 = [1] + + # pd_op.assign: (1xi64) <- (1xi64) + assign_0 = full_int_array_1 + + # pd_op.slice: (xi64) <- (3xi64, 1xi64, 1xi64) + slice_0 = paddle._C_ops.slice( + shape64_0, [0], full_int_array_0, full_int_array_1, [1], [0] + ) + del full_int_array_0 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_2 = [2] + + # pd_op.slice: (xi64) <- (3xi64, 1xi64, 1xi64) + slice_1 = paddle._C_ops.slice( + shape64_0, [0], full_int_array_1, full_int_array_2, [1], [0] + ) + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_3 = [3] + + # pd_op.slice: (xi64) <- (3xi64, 1xi64, 1xi64) + slice_2 = paddle._C_ops.slice( + shape64_0, [0], full_int_array_2, full_int_array_3, [1], [0] + ) + del full_int_array_2, full_int_array_3, shape64_0 + + # pd_op.full: (xi64) <- () + full_0 = paddle._C_ops.full( + [], float("-1"), paddle.int64, paddle.core.CPUPlace() + ) + + # pd_op.full: (xi64) <- () + full_1 = paddle._C_ops.full( + [], float("4"), paddle.int64, paddle.core.CPUPlace() + ) + + # pd_op.full: (xi64) <- () + full_2 = paddle._C_ops.full( + [], float("17"), paddle.int64, paddle.core.CPUPlace() + ) + + # builtin.combine: ([xi64, xi64, xi64, xi64]) <- (xi64, xi64, xi64, xi64) + combine_0 = [full_0, slice_1, full_1, full_2] + del full_0, full_1, full_2, slice_1 + + # pd_op.stack: (4xi64) <- ([xi64, xi64, xi64, xi64]) + stack_0 = paddle._C_ops.stack(combine_0, 0) + del combine_0 + + # pd_op.reshape: (-1x-1x4x17xf32) <- (8x7581x68xf32, 4xi64) + reshape_0 = paddle._C_ops.reshape(data_0, stack_0) + del data_0, stack_0 + + # pd_op.softmax: (-1x-1x4x17xf32) <- (-1x-1x4x17xf32) + softmax_0 = paddle._C_ops.softmax(reshape_0, -1) + del reshape_0 + + # pd_op.transpose: (-1x17x-1x4xf32) <- (-1x-1x4x17xf32) + transpose_0 = paddle._C_ops.transpose(softmax_0, [0, 3, 1, 2]) + + # pd_op.conv2d: (-1x1x-1x4xf32) <- (-1x17x-1x4xf32, 1x17x1x1xf32) + conv2d_0 = paddle._C_ops.conv2d( + transpose_0, parameter_0, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_0 + + # pd_op.squeeze: (-1x-1x4xf32) <- (-1x1x-1x4xf32, 1xi64) + squeeze_0 = paddle._C_ops.squeeze(conv2d_0, full_int_array_1) + del full_int_array_1 + + # pd_op.full: (1xi32) <- () + full_3 = paddle._C_ops.full( + [1], float("2"), paddle.int32, paddle.core.CPUPlace() + ) + + # pd_op.split_with_num: ([-1x-1x2xf32, -1x-1x2xf32]) <- (-1x-1x4xf32, 1xi32) + split_with_num_0 = paddle._C_ops.split_with_num(squeeze_0, 2, full_3) + del squeeze_0 + + # builtin.split: (-1x-1x2xf32, -1x-1x2xf32) <- ([-1x-1x2xf32, -1x-1x2xf32]) + ( + split_0, + split_1, + ) = split_with_num_0 + del split_with_num_0 + + # pd_op.full: (1xf32) <- () + full_4 = paddle._C_ops.full( + [1], float("-1"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.scale: (-1x-1x2xf32) <- (-1x-1x2xf32, 1xf32) + scale_0 = paddle._C_ops.scale(split_0, full_4, float("0"), True) + del split_0 + + # pd_op.add: (-1x7581x2xf32) <- (-1x-1x2xf32, 7581x2xf32) + add_0 = paddle._C_ops.add(scale_0, divide_0) + + # pd_op.add: (-1x7581x2xf32) <- (-1x-1x2xf32, 7581x2xf32) + add_1 = paddle._C_ops.add(split_1, divide_0) + + # pd_op.full: (1xi32) <- () + full_5 = paddle._C_ops.full( + [1], float("-1"), paddle.int32, paddle.core.CPUPlace() + ) + + # builtin.combine: ([-1x7581x2xf32, -1x7581x2xf32]) <- (-1x7581x2xf32, -1x7581x2xf32) + combine_1 = [add_0, add_1] + + # pd_op.concat: (-1x7581x4xf32) <- ([-1x7581x2xf32, -1x7581x2xf32], 1xi32) + concat_0 = paddle._C_ops.concat(combine_1, full_5) + del combine_1 + + # pd_op.share_data_: (-1x7581x4xf32) <- (-1x7581x4xf32) + share_data__0 = concat_0.detach() + + # pd_op.multiply: (-1x7581x4xf32) <- (-1x7581x4xf32, 7581x1xf32) + multiply_0 = paddle._C_ops.multiply(share_data__0, data_2) + del ( + add_0, + add_1, + assign_0, + concat_0, + conv2d_0, + data_2, + divide_0, + full_3, + full_4, + full_5, + scale_0, + share_data__0, + softmax_0, + split_1, + transpose_0, + ) + + return multiply_0 diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_9/weight_meta.py b/paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_9/weight_meta.py new file mode 100644 index 000000000..28198680e --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_9/weight_meta.py @@ -0,0 +1,7 @@ +class Program_weight_tensor_parameter_0: + name = "parameter_0" + shape = [1, 17, 1, 1] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/subgraph_0/graph_hash.txt b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/subgraph_0/graph_hash.txt new file mode 100644 index 000000000..44b201d32 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/subgraph_0/graph_hash.txt @@ -0,0 +1 @@ +4e7e6ee2874f6062a0fb0f9f01605da0630c431ba471c345582d30d102a90ff5 \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/subgraph_0/graph_net.json b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/subgraph_0/graph_net.json new file mode 100644 index 000000000..8b4fccfd1 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/subgraph_0/graph_net.json @@ -0,0 +1,6 @@ +{ + "framework": "paddle", + "model_name": "PP-YOLOE_plus_SOD-L", + "num_devices_required": 1, + "num_nodes_required": 1 +} \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/subgraph_0/input_meta.py b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/subgraph_0/input_meta.py new file mode 100644 index 000000000..ebfe3bf7c --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/subgraph_0/input_meta.py @@ -0,0 +1,119 @@ +class Program_weight_tensor_data_0: + name = "data_0" + shape = [1024, 3072] + dtype = "float32" + min_val = float("-0.0319338") + max_val = float("0.0317944") + mean = float("-5.17613e-06") + std = float("0.0176378") + data = None + + +class Program_weight_tensor_data_1: + name = "data_1" + shape = [3072] + dtype = "float32" + min_val = float("-0.000621416") + max_val = float("0.000743397") + mean = float("-4.93151e-06") + std = float("0.000159931") + data = None + + +class Program_weight_tensor_data_2: + name = "data_2" + shape = [1024, 3072] + dtype = "float32" + min_val = float("-0.0310116") + max_val = float("0.031039") + mean = float("-4.13369e-06") + std = float("0.0176368") + data = None + + +class Program_weight_tensor_data_3: + name = "data_3" + shape = [3072] + dtype = "float32" + min_val = float("-0.000442003") + max_val = float("0.000385991") + mean = float("-8.05444e-07") + std = float("0.000101625") + data = None + + +class Program_weight_tensor_data_4: + name = "data_4" + shape = [1024, 3072] + dtype = "float32" + min_val = float("-0.0310191") + max_val = float("0.0309323") + mean = float("-4.0744e-06") + std = float("0.0176363") + data = None + + +class Program_weight_tensor_data_5: + name = "data_5" + shape = [3072] + dtype = "float32" + min_val = float("-0.000274454") + max_val = float("0.000291758") + mean = float("-1.13595e-07") + std = float("6.90172e-05") + data = None + + +class Program_weight_tensor_data_6: + name = "data_6" + shape = [1024, 3072] + dtype = "float32" + min_val = float("-0.030929") + max_val = float("0.0309595") + mean = float("-4.0499e-06") + std = float("0.017636") + data = None + + +class Program_weight_tensor_data_7: + name = "data_7" + shape = [3072] + dtype = "float32" + min_val = float("-0.0002782") + max_val = float("0.000249259") + mean = float("3.53385e-07") + std = float("5.71797e-05") + data = None + + +class Program_weight_tensor_data_8: + name = "data_8" + shape = [2, 256, 64, 64] + dtype = "float32" + min_val = float("-0.278465") + max_val = float("8.55235") + mean = float("-0.0636016") + std = float("0.381313") + data = None + + +class Program_weight_tensor_data_9: + name = "data_9" + shape = [2, 512, 32, 32] + dtype = "float32" + min_val = float("-0.278465") + max_val = float("9.7138") + mean = float("-0.0996885") + std = float("0.330529") + data = None + + +class Program_weight_tensor_data_10: + name = "data_10" + shape = [2, 1024, 16, 16] + dtype = "float32" + min_val = float("-0.278465") + max_val = float("20.4558") + mean = float("0.284252") + std = float("1.12508") + data = None diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/subgraph_0/model.py b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/subgraph_0/model.py new file mode 100644 index 000000000..501cc0ef5 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/subgraph_0/model.py @@ -0,0 +1,4857 @@ +import paddle + + +class GraphModule(paddle.nn.Layer): + def __init__(self): + super().__init__() + + def forward( + self, + parameter_0, + parameter_1, + parameter_2, + parameter_3, + parameter_4, + parameter_5, + parameter_6, + parameter_7, + parameter_8, + parameter_9, + parameter_10, + parameter_11, + parameter_12, + parameter_13, + parameter_14, + parameter_15, + parameter_16, + parameter_17, + parameter_18, + parameter_19, + parameter_20, + parameter_21, + parameter_22, + parameter_23, + parameter_24, + parameter_25, + parameter_26, + parameter_27, + parameter_28, + parameter_29, + parameter_30, + parameter_31, + parameter_32, + parameter_33, + parameter_34, + parameter_35, + parameter_36, + parameter_37, + parameter_38, + parameter_39, + parameter_40, + parameter_41, + parameter_42, + parameter_43, + parameter_44, + parameter_45, + parameter_46, + parameter_47, + parameter_48, + parameter_49, + parameter_50, + parameter_51, + parameter_52, + parameter_53, + parameter_54, + parameter_55, + parameter_56, + parameter_57, + parameter_58, + parameter_59, + parameter_60, + parameter_61, + parameter_62, + parameter_63, + parameter_64, + parameter_65, + parameter_66, + parameter_67, + parameter_68, + parameter_69, + parameter_70, + parameter_71, + parameter_72, + parameter_73, + parameter_74, + parameter_75, + parameter_76, + parameter_77, + parameter_78, + parameter_79, + parameter_80, + parameter_81, + parameter_82, + parameter_83, + parameter_84, + parameter_85, + parameter_86, + parameter_87, + parameter_88, + parameter_89, + parameter_90, + parameter_91, + parameter_92, + parameter_93, + parameter_94, + parameter_95, + parameter_96, + parameter_97, + parameter_98, + parameter_99, + parameter_100, + parameter_101, + parameter_102, + parameter_103, + parameter_104, + parameter_105, + parameter_106, + parameter_107, + parameter_108, + parameter_109, + parameter_110, + parameter_111, + parameter_112, + parameter_113, + parameter_114, + parameter_115, + parameter_116, + parameter_117, + parameter_118, + parameter_119, + parameter_120, + parameter_121, + parameter_122, + parameter_123, + parameter_124, + parameter_125, + parameter_126, + parameter_127, + parameter_128, + parameter_129, + parameter_130, + parameter_131, + parameter_132, + parameter_133, + parameter_134, + parameter_135, + parameter_136, + parameter_137, + parameter_138, + parameter_139, + parameter_140, + parameter_141, + parameter_142, + parameter_143, + parameter_144, + parameter_145, + parameter_146, + parameter_147, + parameter_148, + parameter_149, + parameter_150, + parameter_151, + parameter_152, + parameter_153, + parameter_154, + parameter_155, + parameter_156, + parameter_157, + parameter_158, + parameter_159, + parameter_160, + parameter_161, + parameter_162, + parameter_163, + parameter_164, + parameter_165, + parameter_166, + parameter_167, + parameter_168, + parameter_169, + parameter_170, + parameter_171, + parameter_172, + parameter_173, + parameter_174, + parameter_175, + parameter_176, + parameter_177, + parameter_178, + parameter_179, + parameter_180, + parameter_181, + parameter_182, + parameter_183, + parameter_184, + parameter_185, + parameter_186, + parameter_187, + parameter_188, + parameter_189, + parameter_190, + parameter_191, + parameter_192, + parameter_193, + parameter_194, + parameter_195, + parameter_196, + parameter_197, + parameter_198, + parameter_199, + parameter_200, + parameter_201, + parameter_202, + parameter_203, + parameter_204, + parameter_205, + parameter_206, + parameter_207, + parameter_208, + parameter_209, + parameter_210, + parameter_211, + parameter_212, + parameter_213, + parameter_214, + parameter_215, + parameter_216, + parameter_217, + parameter_218, + parameter_219, + parameter_220, + parameter_221, + parameter_222, + parameter_223, + parameter_224, + parameter_225, + parameter_226, + parameter_227, + parameter_228, + parameter_229, + parameter_230, + parameter_231, + parameter_232, + parameter_233, + parameter_234, + parameter_235, + parameter_236, + parameter_237, + parameter_238, + parameter_239, + parameter_240, + parameter_241, + parameter_242, + parameter_243, + parameter_244, + parameter_245, + parameter_246, + parameter_247, + parameter_248, + parameter_249, + parameter_250, + parameter_251, + parameter_252, + parameter_253, + parameter_254, + parameter_255, + parameter_256, + parameter_257, + parameter_258, + parameter_259, + parameter_260, + parameter_261, + parameter_262, + parameter_263, + parameter_264, + parameter_265, + parameter_266, + parameter_267, + parameter_268, + parameter_269, + parameter_270, + parameter_271, + parameter_272, + parameter_273, + parameter_274, + parameter_275, + parameter_276, + parameter_277, + parameter_278, + parameter_279, + parameter_280, + parameter_281, + parameter_282, + parameter_283, + parameter_284, + parameter_285, + parameter_286, + parameter_287, + parameter_288, + parameter_289, + parameter_290, + parameter_291, + parameter_292, + parameter_293, + parameter_294, + parameter_295, + parameter_296, + parameter_297, + parameter_298, + parameter_299, + parameter_300, + parameter_301, + parameter_302, + parameter_303, + parameter_304, + parameter_305, + parameter_306, + parameter_307, + parameter_308, + parameter_309, + parameter_310, + parameter_311, + parameter_312, + parameter_313, + parameter_314, + parameter_315, + parameter_316, + parameter_317, + parameter_318, + parameter_319, + parameter_320, + parameter_321, + parameter_322, + parameter_323, + parameter_324, + parameter_325, + parameter_326, + parameter_327, + parameter_328, + parameter_329, + parameter_330, + parameter_331, + parameter_332, + parameter_333, + parameter_334, + parameter_335, + parameter_336, + parameter_337, + parameter_338, + parameter_339, + parameter_340, + parameter_341, + parameter_342, + parameter_343, + parameter_344, + parameter_345, + parameter_346, + parameter_347, + parameter_348, + parameter_349, + parameter_350, + parameter_351, + parameter_352, + parameter_353, + parameter_354, + parameter_355, + parameter_356, + parameter_357, + parameter_358, + parameter_359, + parameter_360, + parameter_361, + parameter_362, + parameter_363, + parameter_364, + data_0, + data_1, + data_2, + data_3, + data_4, + data_5, + data_6, + data_7, + data_8, + data_9, + data_10, + ): + # pd_op.flatten: (2x1024x256xf32) <- (2x1024x16x16xf32) + flatten_0 = paddle._C_ops.flatten(data_10, 2, 3) + del data_10 + + # pd_op.transpose: (2x256x1024xf32) <- (2x1024x256xf32) + transpose_0 = paddle._C_ops.transpose(flatten_0, [0, 2, 1]) + del flatten_0 + + # pd_op.full: (1xf64) <- () + full_0 = paddle._C_ops.full( + [1], float("0"), paddle.float64, paddle.core.CPUPlace() + ) + + # pd_op.full: (1xf64) <- () + full_1 = paddle._C_ops.full( + [1], float("16"), paddle.float64, paddle.core.CPUPlace() + ) + + # pd_op.full: (1xf64) <- () + full_2 = paddle._C_ops.full( + [1], float("1"), paddle.float64, paddle.core.CPUPlace() + ) + + # pd_op.arange: (16xf32) <- (1xf64, 1xf64, 1xf64) + arange_0 = paddle.arange(full_0, full_1, full_2, dtype="float32") + del full_1 + + # builtin.combine: ([16xf32, 16xf32]) <- (16xf32, 16xf32) + combine_0 = [arange_0, arange_0] + del arange_0 + + # pd_op.meshgrid: ([16x16xf32, 16x16xf32]) <- ([16xf32, 16xf32]) + meshgrid_0 = paddle._C_ops.meshgrid(combine_0) + del combine_0 + + # builtin.split: (16x16xf32, 16x16xf32) <- ([16x16xf32, 16x16xf32]) + ( + split_0, + split_1, + ) = meshgrid_0 + del meshgrid_0 + + # pd_op.full: (1xf64) <- () + full_3 = paddle._C_ops.full( + [1], float("256"), paddle.float64, paddle.core.CPUPlace() + ) + + # pd_op.arange: (256xf32) <- (1xf64, 1xf64, 1xf64) + arange_1 = paddle.arange(full_0, full_3, full_2, dtype="float32") + del full_0, full_2, full_3 + + # pd_op.full: (1xf32) <- () + full_4 = paddle._C_ops.full( + [1], float("0.00390625"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.scale: (256xf32) <- (256xf32, 1xf32) + scale_0 = paddle._C_ops.scale(arange_1, full_4, float("0"), True) + del arange_1, full_4 + + # pd_op.full: (256xf32) <- () + full_5 = paddle._C_ops.full( + [256], + float("10000"), + paddle.float32, + paddle.framework._current_expected_place(), + ) + + # pd_op.elementwise_pow: (256xf32) <- (256xf32, 256xf32) + elementwise_pow_0 = paddle._C_ops.elementwise_pow(full_5, scale_0) + del full_5, scale_0 + + # pd_op.full: (256xf32) <- () + full_6 = paddle._C_ops.full( + [256], + float("1"), + paddle.float32, + paddle.framework._current_expected_place(), + ) + + # pd_op.divide: (256xf32) <- (256xf32, 256xf32) + divide_0 = paddle._C_ops.divide(full_6, elementwise_pow_0) + del elementwise_pow_0, full_6 + + # pd_op.flatten: (256xf32) <- (16x16xf32) + flatten_1 = paddle._C_ops.flatten(split_0, 0, 1) + del split_0 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_0 = [1] + + # pd_op.unsqueeze: (256x1xf32) <- (256xf32, 1xi64) + unsqueeze_0 = paddle._C_ops.unsqueeze(flatten_1, full_int_array_0) + del flatten_1 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_1 = [0] + + # pd_op.assign: (1xi64) <- (1xi64) + assign_0 = full_int_array_1 + + # pd_op.assign: (1xi64) <- (1xi64) + assign_1 = full_int_array_1 + + # pd_op.assign: (1xi64) <- (1xi64) + assign_2 = full_int_array_1 + + # pd_op.assign: (1xi64) <- (1xi64) + assign_3 = full_int_array_1 + + # pd_op.assign: (1xi64) <- (1xi64) + assign_4 = full_int_array_1 + + # pd_op.assign: (1xi64) <- (1xi64) + assign_5 = full_int_array_1 + + # pd_op.assign: (1xi64) <- (1xi64) + assign_6 = full_int_array_1 + + # pd_op.assign: (1xi64) <- (1xi64) + assign_7 = full_int_array_1 + + # pd_op.unsqueeze: (1x256xf32) <- (256xf32, 1xi64) + unsqueeze_1 = paddle._C_ops.unsqueeze(divide_0, full_int_array_1) + del divide_0 + + # pd_op.matmul: (256x256xf32) <- (256x1xf32, 1x256xf32) + matmul_0 = paddle._C_ops.matmul(unsqueeze_0, unsqueeze_1, False, False) + del unsqueeze_0 + + # pd_op.flatten: (256xf32) <- (16x16xf32) + flatten_2 = paddle._C_ops.flatten(split_1, 0, 1) + del split_1 + + # pd_op.unsqueeze: (256x1xf32) <- (256xf32, 1xi64) + unsqueeze_2 = paddle._C_ops.unsqueeze(flatten_2, full_int_array_0) + del flatten_2, full_int_array_0 + + # pd_op.matmul: (256x256xf32) <- (256x1xf32, 1x256xf32) + matmul_1 = paddle._C_ops.matmul(unsqueeze_2, unsqueeze_1, False, False) + del unsqueeze_1, unsqueeze_2 + + # pd_op.sin: (256x256xf32) <- (256x256xf32) + sin_0 = paddle._C_ops.sin(matmul_0) + + # pd_op.cos: (256x256xf32) <- (256x256xf32) + cos_0 = paddle._C_ops.cos(matmul_0) + del matmul_0 + + # pd_op.sin: (256x256xf32) <- (256x256xf32) + sin_1 = paddle._C_ops.sin(matmul_1) + + # pd_op.cos: (256x256xf32) <- (256x256xf32) + cos_1 = paddle._C_ops.cos(matmul_1) + del matmul_1 + + # pd_op.full: (1xi32) <- () + full_7 = paddle._C_ops.full( + [1], float("1"), paddle.int32, paddle.core.CPUPlace() + ) + + # pd_op.assign: (1xi32) <- (1xi32) + assign_8 = full_7 + + # pd_op.assign: (1xi32) <- (1xi32) + assign_9 = full_7 + + # pd_op.assign: (1xi32) <- (1xi32) + assign_10 = full_7 + + # pd_op.assign: (1xi32) <- (1xi32) + assign_11 = full_7 + + # pd_op.assign: (1xi32) <- (1xi32) + assign_12 = full_7 + + # pd_op.assign: (1xi32) <- (1xi32) + assign_13 = full_7 + + # pd_op.assign: (1xi32) <- (1xi32) + assign_14 = full_7 + + # pd_op.assign: (1xi32) <- (1xi32) + assign_15 = full_7 + + # pd_op.assign: (1xi32) <- (1xi32) + assign_16 = full_7 + + # pd_op.assign: (1xi32) <- (1xi32) + assign_17 = full_7 + + # builtin.combine: ([256x256xf32, 256x256xf32, 256x256xf32, 256x256xf32]) <- (256x256xf32, 256x256xf32, 256x256xf32, 256x256xf32) + combine_1 = [sin_0, cos_0, sin_1, cos_1] + del cos_0, cos_1, sin_0, sin_1 + + # pd_op.concat: (256x1024xf32) <- ([256x256xf32, 256x256xf32, 256x256xf32, 256x256xf32], 1xi32) + concat_0 = paddle._C_ops.concat(combine_1, full_7) + del combine_1 + + # pd_op.unsqueeze: (1x256x1024xf32) <- (256x1024xf32, 1xi64) + unsqueeze_3 = paddle._C_ops.unsqueeze(concat_0, full_int_array_1) + del concat_0 + + # pd_op.add: (2x256x1024xf32) <- (2x256x1024xf32, 1x256x1024xf32) + add_0 = paddle._C_ops.add(transpose_0, unsqueeze_3) + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_2 = [1024] + + # pd_op.assign: (1xi64) <- (1xi64) + assign_18 = full_int_array_2 + + # pd_op.assign: (1xi64) <- (1xi64) + assign_19 = full_int_array_2 + + # pd_op.assign: (1xi64) <- (1xi64) + assign_20 = full_int_array_2 + + # pd_op.assign: (1xi64) <- (1xi64) + assign_21 = full_int_array_2 + + # pd_op.assign: (1xi64) <- (1xi64) + assign_22 = full_int_array_2 + + # pd_op.assign: (1xi64) <- (1xi64) + assign_23 = full_int_array_2 + + # pd_op.assign: (1xi64) <- (1xi64) + assign_24 = full_int_array_2 + + # pd_op.assign: (1xi64) <- (1xi64) + assign_25 = full_int_array_2 + + # pd_op.assign: (1xi64) <- (1xi64) + assign_26 = full_int_array_2 + + # pd_op.assign: (1xi64) <- (1xi64) + assign_27 = full_int_array_2 + + # pd_op.assign: (1xi64) <- (1xi64) + assign_28 = full_int_array_2 + + # pd_op.assign: (1xi64) <- (1xi64) + assign_29 = full_int_array_2 + + # pd_op.assign: (1xi64) <- (1xi64) + assign_30 = full_int_array_2 + + # pd_op.assign: (1xi64) <- (1xi64) + assign_31 = full_int_array_2 + + # pd_op.assign: (1xi64) <- (1xi64) + assign_32 = full_int_array_2 + + # pd_op.slice: (1024x1024xf32) <- (1024x3072xf32, 1xi64, 1xi64) + slice_0 = paddle._C_ops.slice( + data_0, [1], full_int_array_1, full_int_array_2, [1], [] + ) + + # pd_op.slice: (1024xf32) <- (3072xf32, 1xi64, 1xi64) + slice_1 = paddle._C_ops.slice( + data_1, [0], full_int_array_1, full_int_array_2, [1], [] + ) + + # pd_op.matmul: (2x256x1024xf32) <- (2x256x1024xf32, 1024x1024xf32) + matmul_2 = paddle._C_ops.matmul(add_0, slice_0, False, False) + + # pd_op.add: (2x256x1024xf32) <- (2x256x1024xf32, 1024xf32) + add_1 = paddle._C_ops.add(matmul_2, slice_1) + + # pd_op.full_int_array: (4xi64) <- () + full_int_array_3 = [0, 0, 4, 256] + + # pd_op.reshape: (2x256x4x256xf32) <- (2x256x1024xf32, 4xi64) + reshape_0 = paddle._C_ops.reshape(add_1, full_int_array_3) + + # pd_op.transpose: (2x4x256x256xf32) <- (2x256x4x256xf32) + transpose_1 = paddle._C_ops.transpose(reshape_0, [0, 2, 1, 3]) + del reshape_0 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_4 = [2048] + + # pd_op.assign: (1xi64) <- (1xi64) + assign_33 = full_int_array_4 + + # pd_op.assign: (1xi64) <- (1xi64) + assign_34 = full_int_array_4 + + # pd_op.assign: (1xi64) <- (1xi64) + assign_35 = full_int_array_4 + + # pd_op.assign: (1xi64) <- (1xi64) + assign_36 = full_int_array_4 + + # pd_op.assign: (1xi64) <- (1xi64) + assign_37 = full_int_array_4 + + # pd_op.assign: (1xi64) <- (1xi64) + assign_38 = full_int_array_4 + + # pd_op.assign: (1xi64) <- (1xi64) + assign_39 = full_int_array_4 + + # pd_op.assign: (1xi64) <- (1xi64) + assign_40 = full_int_array_4 + + # pd_op.assign: (1xi64) <- (1xi64) + assign_41 = full_int_array_4 + + # pd_op.assign: (1xi64) <- (1xi64) + assign_42 = full_int_array_4 + + # pd_op.assign: (1xi64) <- (1xi64) + assign_43 = full_int_array_4 + + # pd_op.assign: (1xi64) <- (1xi64) + assign_44 = full_int_array_4 + + # pd_op.assign: (1xi64) <- (1xi64) + assign_45 = full_int_array_4 + + # pd_op.assign: (1xi64) <- (1xi64) + assign_46 = full_int_array_4 + + # pd_op.assign: (1xi64) <- (1xi64) + assign_47 = full_int_array_4 + + # pd_op.slice: (1024x1024xf32) <- (1024x3072xf32, 1xi64, 1xi64) + slice_2 = paddle._C_ops.slice( + data_0, [1], full_int_array_2, full_int_array_4, [1], [] + ) + + # pd_op.slice: (1024xf32) <- (3072xf32, 1xi64, 1xi64) + slice_3 = paddle._C_ops.slice( + data_1, [0], full_int_array_2, full_int_array_4, [1], [] + ) + + # pd_op.matmul: (2x256x1024xf32) <- (2x256x1024xf32, 1024x1024xf32) + matmul_3 = paddle._C_ops.matmul(add_0, slice_2, False, False) + + # pd_op.add: (2x256x1024xf32) <- (2x256x1024xf32, 1024xf32) + add_2 = paddle._C_ops.add(matmul_3, slice_3) + + # pd_op.reshape: (2x256x4x256xf32) <- (2x256x1024xf32, 4xi64) + reshape_1 = paddle._C_ops.reshape(add_2, full_int_array_3) + + # pd_op.transpose: (2x4x256x256xf32) <- (2x256x4x256xf32) + transpose_2 = paddle._C_ops.transpose(reshape_1, [0, 2, 1, 3]) + del reshape_1 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_5 = [2147483647] + + # pd_op.assign: (1xi64) <- (1xi64) + assign_48 = full_int_array_5 + + # pd_op.assign: (1xi64) <- (1xi64) + assign_49 = full_int_array_5 + + # pd_op.assign: (1xi64) <- (1xi64) + assign_50 = full_int_array_5 + + # pd_op.assign: (1xi64) <- (1xi64) + assign_51 = full_int_array_5 + + # pd_op.assign: (1xi64) <- (1xi64) + assign_52 = full_int_array_5 + + # pd_op.assign: (1xi64) <- (1xi64) + assign_53 = full_int_array_5 + + # pd_op.assign: (1xi64) <- (1xi64) + assign_54 = full_int_array_5 + + # pd_op.slice: (1024x1024xf32) <- (1024x3072xf32, 1xi64, 1xi64) + slice_4 = paddle._C_ops.slice( + data_0, [1], full_int_array_4, full_int_array_5, [1], [] + ) + del data_0 + + # pd_op.slice: (1024xf32) <- (3072xf32, 1xi64, 1xi64) + slice_5 = paddle._C_ops.slice( + data_1, [0], full_int_array_4, full_int_array_5, [1], [] + ) + del data_1 + + # pd_op.matmul: (2x256x1024xf32) <- (2x256x1024xf32, 1024x1024xf32) + matmul_4 = paddle._C_ops.matmul(transpose_0, slice_4, False, False) + + # pd_op.add: (2x256x1024xf32) <- (2x256x1024xf32, 1024xf32) + add_3 = paddle._C_ops.add(matmul_4, slice_5) + + # pd_op.reshape: (2x256x4x256xf32) <- (2x256x1024xf32, 4xi64) + reshape_2 = paddle._C_ops.reshape(add_3, full_int_array_3) + + # pd_op.transpose: (2x4x256x256xf32) <- (2x256x4x256xf32) + transpose_3 = paddle._C_ops.transpose(reshape_2, [0, 2, 1, 3]) + del reshape_2 + + # pd_op.matmul: (2x4x256x256xf32) <- (2x4x256x256xf32, 2x4x256x256xf32) + matmul_5 = paddle._C_ops.matmul(transpose_1, transpose_2, False, True) + + # pd_op.full: (1xf32) <- () + full_8 = paddle._C_ops.full( + [1], float("0.0625"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.assign: (1xf32) <- (1xf32) + assign_55 = full_8 + + # pd_op.assign: (1xf32) <- (1xf32) + assign_56 = full_8 + + # pd_op.assign: (1xf32) <- (1xf32) + assign_57 = full_8 + + # pd_op.scale: (2x4x256x256xf32) <- (2x4x256x256xf32, 1xf32) + scale_1 = paddle._C_ops.scale(matmul_5, full_8, float("0"), True) + del matmul_5 + + # pd_op.softmax: (2x4x256x256xf32) <- (2x4x256x256xf32) + softmax_0 = paddle._C_ops.softmax(scale_1, -1) + del scale_1 + + # pd_op.full: (1xf32) <- () + full_9 = paddle._C_ops.full( + [1], float("0.1"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.assign: (1xf32) <- (1xf32) + assign_58 = full_9 + + # pd_op.assign: (1xf32) <- (1xf32) + assign_59 = full_9 + + # pd_op.assign: (1xf32) <- (1xf32) + assign_60 = full_9 + + # pd_op.assign: (1xf32) <- (1xf32) + assign_61 = full_9 + + # pd_op.assign: (1xf32) <- (1xf32) + assign_62 = full_9 + + # pd_op.assign: (1xf32) <- (1xf32) + assign_63 = full_9 + + # pd_op.assign: (1xf32) <- (1xf32) + assign_64 = full_9 + + # pd_op.assign: (1xf32) <- (1xf32) + assign_65 = full_9 + + # pd_op.assign: (1xf32) <- (1xf32) + assign_66 = full_9 + + # pd_op.assign: (1xf32) <- (1xf32) + assign_67 = full_9 + + # pd_op.assign: (1xf32) <- (1xf32) + assign_68 = full_9 + + # pd_op.assign: (1xf32) <- (1xf32) + assign_69 = full_9 + + # pd_op.assign: (1xf32) <- (1xf32) + assign_70 = full_9 + + # pd_op.assign: (1xf32) <- (1xf32) + assign_71 = full_9 + + # pd_op.assign: (1xf32) <- (1xf32) + assign_72 = full_9 + + # pd_op.dropout: (2x4x256x256xf32, 2x4x256x256xui8) <- (2x4x256x256xf32, None, 1xf32) + dropout_0, dropout_1 = (lambda x, f: f(x))( + paddle._C_ops.dropout( + softmax_0, None, full_9, False, "upscale_in_train", 0, False + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None), + ) + + # pd_op.matmul: (2x4x256x256xf32) <- (2x4x256x256xf32, 2x4x256x256xf32) + matmul_6 = paddle._C_ops.matmul(dropout_0, transpose_3, False, False) + + # pd_op.transpose: (2x256x4x256xf32) <- (2x4x256x256xf32) + transpose_4 = paddle._C_ops.transpose(matmul_6, [0, 2, 1, 3]) + del matmul_6 + + # pd_op.full_int_array: (3xi64) <- () + full_int_array_6 = [0, 0, 1024] + + # pd_op.reshape: (2x256x1024xf32) <- (2x256x4x256xf32, 3xi64) + reshape_3 = paddle._C_ops.reshape(transpose_4, full_int_array_6) + + # pd_op.matmul: (2x256x1024xf32) <- (2x256x1024xf32, 1024x1024xf32) + matmul_7 = paddle._C_ops.matmul(reshape_3, parameter_364, False, False) + del parameter_364 + + # pd_op.add: (2x256x1024xf32) <- (2x256x1024xf32, 1024xf32) + add_4 = paddle._C_ops.add(matmul_7, parameter_363) + del parameter_363 + + # pd_op.dropout: (2x256x1024xf32, 2x256x1024xui8) <- (2x256x1024xf32, None, 1xf32) + dropout_2, dropout_3 = (lambda x, f: f(x))( + paddle._C_ops.dropout( + add_4, None, full_9, False, "upscale_in_train", 0, False + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None), + ) + del add_4 + + # pd_op.add: (2x256x1024xf32) <- (2x256x1024xf32, 2x256x1024xf32) + add_5 = paddle._C_ops.add(transpose_0, dropout_2) + + # pd_op.layer_norm: (2x256x1024xf32, 2x256xf32, 2x256xf32) <- (2x256x1024xf32, 1024xf32, 1024xf32) + layer_norm_0, layer_norm_1, layer_norm_2 = (lambda x, f: f(x))( + paddle._C_ops.layer_norm( + add_5, parameter_362, parameter_361, float("1e-05"), 2 + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None, None), + ) + del parameter_361, parameter_362 + + # pd_op.matmul: (2x256x2048xf32) <- (2x256x1024xf32, 1024x2048xf32) + matmul_8 = paddle._C_ops.matmul(layer_norm_0, parameter_360, False, False) + del parameter_360 + + # pd_op.add: (2x256x2048xf32) <- (2x256x2048xf32, 2048xf32) + add_6 = paddle._C_ops.add(matmul_8, parameter_359) + del parameter_359 + + # pd_op.gelu: (2x256x2048xf32) <- (2x256x2048xf32) + gelu_0 = paddle._C_ops.gelu(add_6, False) + + # pd_op.dropout: (2x256x2048xf32, 2x256x2048xui8) <- (2x256x2048xf32, None, 1xf32) + dropout_4, dropout_5 = (lambda x, f: f(x))( + paddle._C_ops.dropout( + gelu_0, None, full_9, False, "upscale_in_train", 0, False + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None), + ) + del gelu_0 + + # pd_op.matmul: (2x256x1024xf32) <- (2x256x2048xf32, 2048x1024xf32) + matmul_9 = paddle._C_ops.matmul(dropout_4, parameter_358, False, False) + del parameter_358 + + # pd_op.add: (2x256x1024xf32) <- (2x256x1024xf32, 1024xf32) + add_7 = paddle._C_ops.add(matmul_9, parameter_357) + del parameter_357 + + # pd_op.dropout: (2x256x1024xf32, 2x256x1024xui8) <- (2x256x1024xf32, None, 1xf32) + dropout_6, dropout_7 = (lambda x, f: f(x))( + paddle._C_ops.dropout( + add_7, None, full_9, False, "upscale_in_train", 0, False + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None), + ) + del add_7 + + # pd_op.add: (2x256x1024xf32) <- (2x256x1024xf32, 2x256x1024xf32) + add_8 = paddle._C_ops.add(layer_norm_0, dropout_6) + + # pd_op.layer_norm: (2x256x1024xf32, 2x256xf32, 2x256xf32) <- (2x256x1024xf32, 1024xf32, 1024xf32) + layer_norm_3, layer_norm_4, layer_norm_5 = (lambda x, f: f(x))( + paddle._C_ops.layer_norm( + add_8, parameter_356, parameter_355, float("1e-05"), 2 + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None, None), + ) + del parameter_355, parameter_356 + + # pd_op.add: (2x256x1024xf32) <- (2x256x1024xf32, 1x256x1024xf32) + add_9 = paddle._C_ops.add(layer_norm_3, unsqueeze_3) + + # pd_op.slice: (1024x1024xf32) <- (1024x3072xf32, 1xi64, 1xi64) + slice_6 = paddle._C_ops.slice( + data_2, [1], full_int_array_1, full_int_array_2, [1], [] + ) + + # pd_op.slice: (1024xf32) <- (3072xf32, 1xi64, 1xi64) + slice_7 = paddle._C_ops.slice( + data_3, [0], full_int_array_1, full_int_array_2, [1], [] + ) + + # pd_op.matmul: (2x256x1024xf32) <- (2x256x1024xf32, 1024x1024xf32) + matmul_10 = paddle._C_ops.matmul(add_9, slice_6, False, False) + + # pd_op.add: (2x256x1024xf32) <- (2x256x1024xf32, 1024xf32) + add_10 = paddle._C_ops.add(matmul_10, slice_7) + + # pd_op.reshape: (2x256x4x256xf32) <- (2x256x1024xf32, 4xi64) + reshape_4 = paddle._C_ops.reshape(add_10, full_int_array_3) + + # pd_op.transpose: (2x4x256x256xf32) <- (2x256x4x256xf32) + transpose_5 = paddle._C_ops.transpose(reshape_4, [0, 2, 1, 3]) + del reshape_4 + + # pd_op.slice: (1024x1024xf32) <- (1024x3072xf32, 1xi64, 1xi64) + slice_8 = paddle._C_ops.slice( + data_2, [1], full_int_array_2, full_int_array_4, [1], [] + ) + + # pd_op.slice: (1024xf32) <- (3072xf32, 1xi64, 1xi64) + slice_9 = paddle._C_ops.slice( + data_3, [0], full_int_array_2, full_int_array_4, [1], [] + ) + + # pd_op.matmul: (2x256x1024xf32) <- (2x256x1024xf32, 1024x1024xf32) + matmul_11 = paddle._C_ops.matmul(add_9, slice_8, False, False) + + # pd_op.add: (2x256x1024xf32) <- (2x256x1024xf32, 1024xf32) + add_11 = paddle._C_ops.add(matmul_11, slice_9) + + # pd_op.reshape: (2x256x4x256xf32) <- (2x256x1024xf32, 4xi64) + reshape_5 = paddle._C_ops.reshape(add_11, full_int_array_3) + + # pd_op.transpose: (2x4x256x256xf32) <- (2x256x4x256xf32) + transpose_6 = paddle._C_ops.transpose(reshape_5, [0, 2, 1, 3]) + del reshape_5 + + # pd_op.slice: (1024x1024xf32) <- (1024x3072xf32, 1xi64, 1xi64) + slice_10 = paddle._C_ops.slice( + data_2, [1], full_int_array_4, full_int_array_5, [1], [] + ) + del data_2 + + # pd_op.slice: (1024xf32) <- (3072xf32, 1xi64, 1xi64) + slice_11 = paddle._C_ops.slice( + data_3, [0], full_int_array_4, full_int_array_5, [1], [] + ) + del data_3 + + # pd_op.matmul: (2x256x1024xf32) <- (2x256x1024xf32, 1024x1024xf32) + matmul_12 = paddle._C_ops.matmul(layer_norm_3, slice_10, False, False) + + # pd_op.add: (2x256x1024xf32) <- (2x256x1024xf32, 1024xf32) + add_12 = paddle._C_ops.add(matmul_12, slice_11) + + # pd_op.reshape: (2x256x4x256xf32) <- (2x256x1024xf32, 4xi64) + reshape_6 = paddle._C_ops.reshape(add_12, full_int_array_3) + + # pd_op.transpose: (2x4x256x256xf32) <- (2x256x4x256xf32) + transpose_7 = paddle._C_ops.transpose(reshape_6, [0, 2, 1, 3]) + del reshape_6 + + # pd_op.matmul: (2x4x256x256xf32) <- (2x4x256x256xf32, 2x4x256x256xf32) + matmul_13 = paddle._C_ops.matmul(transpose_5, transpose_6, False, True) + + # pd_op.scale: (2x4x256x256xf32) <- (2x4x256x256xf32, 1xf32) + scale_2 = paddle._C_ops.scale(matmul_13, full_8, float("0"), True) + del matmul_13 + + # pd_op.softmax: (2x4x256x256xf32) <- (2x4x256x256xf32) + softmax_1 = paddle._C_ops.softmax(scale_2, -1) + del scale_2 + + # pd_op.dropout: (2x4x256x256xf32, 2x4x256x256xui8) <- (2x4x256x256xf32, None, 1xf32) + dropout_8, dropout_9 = (lambda x, f: f(x))( + paddle._C_ops.dropout( + softmax_1, None, full_9, False, "upscale_in_train", 0, False + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None), + ) + + # pd_op.matmul: (2x4x256x256xf32) <- (2x4x256x256xf32, 2x4x256x256xf32) + matmul_14 = paddle._C_ops.matmul(dropout_8, transpose_7, False, False) + + # pd_op.transpose: (2x256x4x256xf32) <- (2x4x256x256xf32) + transpose_8 = paddle._C_ops.transpose(matmul_14, [0, 2, 1, 3]) + del matmul_14 + + # pd_op.reshape: (2x256x1024xf32) <- (2x256x4x256xf32, 3xi64) + reshape_7 = paddle._C_ops.reshape(transpose_8, full_int_array_6) + + # pd_op.matmul: (2x256x1024xf32) <- (2x256x1024xf32, 1024x1024xf32) + matmul_15 = paddle._C_ops.matmul(reshape_7, parameter_354, False, False) + del parameter_354 + + # pd_op.add: (2x256x1024xf32) <- (2x256x1024xf32, 1024xf32) + add_13 = paddle._C_ops.add(matmul_15, parameter_353) + del parameter_353 + + # pd_op.dropout: (2x256x1024xf32, 2x256x1024xui8) <- (2x256x1024xf32, None, 1xf32) + dropout_10, dropout_11 = (lambda x, f: f(x))( + paddle._C_ops.dropout( + add_13, None, full_9, False, "upscale_in_train", 0, False + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None), + ) + del add_13 + + # pd_op.add: (2x256x1024xf32) <- (2x256x1024xf32, 2x256x1024xf32) + add_14 = paddle._C_ops.add(layer_norm_3, dropout_10) + + # pd_op.layer_norm: (2x256x1024xf32, 2x256xf32, 2x256xf32) <- (2x256x1024xf32, 1024xf32, 1024xf32) + layer_norm_6, layer_norm_7, layer_norm_8 = (lambda x, f: f(x))( + paddle._C_ops.layer_norm( + add_14, parameter_352, parameter_351, float("1e-05"), 2 + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None, None), + ) + del parameter_351, parameter_352 + + # pd_op.matmul: (2x256x2048xf32) <- (2x256x1024xf32, 1024x2048xf32) + matmul_16 = paddle._C_ops.matmul(layer_norm_6, parameter_350, False, False) + del parameter_350 + + # pd_op.add: (2x256x2048xf32) <- (2x256x2048xf32, 2048xf32) + add_15 = paddle._C_ops.add(matmul_16, parameter_349) + del parameter_349 + + # pd_op.gelu: (2x256x2048xf32) <- (2x256x2048xf32) + gelu_1 = paddle._C_ops.gelu(add_15, False) + + # pd_op.dropout: (2x256x2048xf32, 2x256x2048xui8) <- (2x256x2048xf32, None, 1xf32) + dropout_12, dropout_13 = (lambda x, f: f(x))( + paddle._C_ops.dropout( + gelu_1, None, full_9, False, "upscale_in_train", 0, False + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None), + ) + del gelu_1 + + # pd_op.matmul: (2x256x1024xf32) <- (2x256x2048xf32, 2048x1024xf32) + matmul_17 = paddle._C_ops.matmul(dropout_12, parameter_348, False, False) + del parameter_348 + + # pd_op.add: (2x256x1024xf32) <- (2x256x1024xf32, 1024xf32) + add_16 = paddle._C_ops.add(matmul_17, parameter_347) + del parameter_347 + + # pd_op.dropout: (2x256x1024xf32, 2x256x1024xui8) <- (2x256x1024xf32, None, 1xf32) + dropout_14, dropout_15 = (lambda x, f: f(x))( + paddle._C_ops.dropout( + add_16, None, full_9, False, "upscale_in_train", 0, False + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None), + ) + del add_16 + + # pd_op.add: (2x256x1024xf32) <- (2x256x1024xf32, 2x256x1024xf32) + add_17 = paddle._C_ops.add(layer_norm_6, dropout_14) + + # pd_op.layer_norm: (2x256x1024xf32, 2x256xf32, 2x256xf32) <- (2x256x1024xf32, 1024xf32, 1024xf32) + layer_norm_9, layer_norm_10, layer_norm_11 = (lambda x, f: f(x))( + paddle._C_ops.layer_norm( + add_17, parameter_346, parameter_345, float("1e-05"), 2 + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None, None), + ) + del parameter_345, parameter_346 + + # pd_op.add: (2x256x1024xf32) <- (2x256x1024xf32, 1x256x1024xf32) + add_18 = paddle._C_ops.add(layer_norm_9, unsqueeze_3) + + # pd_op.slice: (1024x1024xf32) <- (1024x3072xf32, 1xi64, 1xi64) + slice_12 = paddle._C_ops.slice( + data_4, [1], full_int_array_1, full_int_array_2, [1], [] + ) + + # pd_op.slice: (1024xf32) <- (3072xf32, 1xi64, 1xi64) + slice_13 = paddle._C_ops.slice( + data_5, [0], full_int_array_1, full_int_array_2, [1], [] + ) + + # pd_op.matmul: (2x256x1024xf32) <- (2x256x1024xf32, 1024x1024xf32) + matmul_18 = paddle._C_ops.matmul(add_18, slice_12, False, False) + + # pd_op.add: (2x256x1024xf32) <- (2x256x1024xf32, 1024xf32) + add_19 = paddle._C_ops.add(matmul_18, slice_13) + + # pd_op.reshape: (2x256x4x256xf32) <- (2x256x1024xf32, 4xi64) + reshape_8 = paddle._C_ops.reshape(add_19, full_int_array_3) + + # pd_op.transpose: (2x4x256x256xf32) <- (2x256x4x256xf32) + transpose_9 = paddle._C_ops.transpose(reshape_8, [0, 2, 1, 3]) + del reshape_8 + + # pd_op.slice: (1024x1024xf32) <- (1024x3072xf32, 1xi64, 1xi64) + slice_14 = paddle._C_ops.slice( + data_4, [1], full_int_array_2, full_int_array_4, [1], [] + ) + + # pd_op.slice: (1024xf32) <- (3072xf32, 1xi64, 1xi64) + slice_15 = paddle._C_ops.slice( + data_5, [0], full_int_array_2, full_int_array_4, [1], [] + ) + + # pd_op.matmul: (2x256x1024xf32) <- (2x256x1024xf32, 1024x1024xf32) + matmul_19 = paddle._C_ops.matmul(add_18, slice_14, False, False) + + # pd_op.add: (2x256x1024xf32) <- (2x256x1024xf32, 1024xf32) + add_20 = paddle._C_ops.add(matmul_19, slice_15) + + # pd_op.reshape: (2x256x4x256xf32) <- (2x256x1024xf32, 4xi64) + reshape_9 = paddle._C_ops.reshape(add_20, full_int_array_3) + + # pd_op.transpose: (2x4x256x256xf32) <- (2x256x4x256xf32) + transpose_10 = paddle._C_ops.transpose(reshape_9, [0, 2, 1, 3]) + del reshape_9 + + # pd_op.slice: (1024x1024xf32) <- (1024x3072xf32, 1xi64, 1xi64) + slice_16 = paddle._C_ops.slice( + data_4, [1], full_int_array_4, full_int_array_5, [1], [] + ) + del data_4 + + # pd_op.slice: (1024xf32) <- (3072xf32, 1xi64, 1xi64) + slice_17 = paddle._C_ops.slice( + data_5, [0], full_int_array_4, full_int_array_5, [1], [] + ) + del data_5 + + # pd_op.matmul: (2x256x1024xf32) <- (2x256x1024xf32, 1024x1024xf32) + matmul_20 = paddle._C_ops.matmul(layer_norm_9, slice_16, False, False) + + # pd_op.add: (2x256x1024xf32) <- (2x256x1024xf32, 1024xf32) + add_21 = paddle._C_ops.add(matmul_20, slice_17) + + # pd_op.reshape: (2x256x4x256xf32) <- (2x256x1024xf32, 4xi64) + reshape_10 = paddle._C_ops.reshape(add_21, full_int_array_3) + + # pd_op.transpose: (2x4x256x256xf32) <- (2x256x4x256xf32) + transpose_11 = paddle._C_ops.transpose(reshape_10, [0, 2, 1, 3]) + del reshape_10 + + # pd_op.matmul: (2x4x256x256xf32) <- (2x4x256x256xf32, 2x4x256x256xf32) + matmul_21 = paddle._C_ops.matmul(transpose_9, transpose_10, False, True) + + # pd_op.scale: (2x4x256x256xf32) <- (2x4x256x256xf32, 1xf32) + scale_3 = paddle._C_ops.scale(matmul_21, full_8, float("0"), True) + del matmul_21 + + # pd_op.softmax: (2x4x256x256xf32) <- (2x4x256x256xf32) + softmax_2 = paddle._C_ops.softmax(scale_3, -1) + del scale_3 + + # pd_op.dropout: (2x4x256x256xf32, 2x4x256x256xui8) <- (2x4x256x256xf32, None, 1xf32) + dropout_16, dropout_17 = (lambda x, f: f(x))( + paddle._C_ops.dropout( + softmax_2, None, full_9, False, "upscale_in_train", 0, False + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None), + ) + + # pd_op.matmul: (2x4x256x256xf32) <- (2x4x256x256xf32, 2x4x256x256xf32) + matmul_22 = paddle._C_ops.matmul(dropout_16, transpose_11, False, False) + + # pd_op.transpose: (2x256x4x256xf32) <- (2x4x256x256xf32) + transpose_12 = paddle._C_ops.transpose(matmul_22, [0, 2, 1, 3]) + del matmul_22 + + # pd_op.reshape: (2x256x1024xf32) <- (2x256x4x256xf32, 3xi64) + reshape_11 = paddle._C_ops.reshape(transpose_12, full_int_array_6) + + # pd_op.matmul: (2x256x1024xf32) <- (2x256x1024xf32, 1024x1024xf32) + matmul_23 = paddle._C_ops.matmul(reshape_11, parameter_344, False, False) + del parameter_344 + + # pd_op.add: (2x256x1024xf32) <- (2x256x1024xf32, 1024xf32) + add_22 = paddle._C_ops.add(matmul_23, parameter_343) + del parameter_343 + + # pd_op.dropout: (2x256x1024xf32, 2x256x1024xui8) <- (2x256x1024xf32, None, 1xf32) + dropout_18, dropout_19 = (lambda x, f: f(x))( + paddle._C_ops.dropout( + add_22, None, full_9, False, "upscale_in_train", 0, False + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None), + ) + del add_22 + + # pd_op.add: (2x256x1024xf32) <- (2x256x1024xf32, 2x256x1024xf32) + add_23 = paddle._C_ops.add(layer_norm_9, dropout_18) + + # pd_op.layer_norm: (2x256x1024xf32, 2x256xf32, 2x256xf32) <- (2x256x1024xf32, 1024xf32, 1024xf32) + layer_norm_12, layer_norm_13, layer_norm_14 = (lambda x, f: f(x))( + paddle._C_ops.layer_norm( + add_23, parameter_342, parameter_341, float("1e-05"), 2 + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None, None), + ) + del parameter_341, parameter_342 + + # pd_op.matmul: (2x256x2048xf32) <- (2x256x1024xf32, 1024x2048xf32) + matmul_24 = paddle._C_ops.matmul(layer_norm_12, parameter_340, False, False) + del parameter_340 + + # pd_op.add: (2x256x2048xf32) <- (2x256x2048xf32, 2048xf32) + add_24 = paddle._C_ops.add(matmul_24, parameter_339) + del parameter_339 + + # pd_op.gelu: (2x256x2048xf32) <- (2x256x2048xf32) + gelu_2 = paddle._C_ops.gelu(add_24, False) + + # pd_op.dropout: (2x256x2048xf32, 2x256x2048xui8) <- (2x256x2048xf32, None, 1xf32) + dropout_20, dropout_21 = (lambda x, f: f(x))( + paddle._C_ops.dropout( + gelu_2, None, full_9, False, "upscale_in_train", 0, False + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None), + ) + del gelu_2 + + # pd_op.matmul: (2x256x1024xf32) <- (2x256x2048xf32, 2048x1024xf32) + matmul_25 = paddle._C_ops.matmul(dropout_20, parameter_338, False, False) + del parameter_338 + + # pd_op.add: (2x256x1024xf32) <- (2x256x1024xf32, 1024xf32) + add_25 = paddle._C_ops.add(matmul_25, parameter_337) + del parameter_337 + + # pd_op.dropout: (2x256x1024xf32, 2x256x1024xui8) <- (2x256x1024xf32, None, 1xf32) + dropout_22, dropout_23 = (lambda x, f: f(x))( + paddle._C_ops.dropout( + add_25, None, full_9, False, "upscale_in_train", 0, False + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None), + ) + del add_25 + + # pd_op.add: (2x256x1024xf32) <- (2x256x1024xf32, 2x256x1024xf32) + add_26 = paddle._C_ops.add(layer_norm_12, dropout_22) + + # pd_op.layer_norm: (2x256x1024xf32, 2x256xf32, 2x256xf32) <- (2x256x1024xf32, 1024xf32, 1024xf32) + layer_norm_15, layer_norm_16, layer_norm_17 = (lambda x, f: f(x))( + paddle._C_ops.layer_norm( + add_26, parameter_336, parameter_335, float("1e-05"), 2 + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None, None), + ) + del parameter_335, parameter_336 + + # pd_op.add: (2x256x1024xf32) <- (2x256x1024xf32, 1x256x1024xf32) + add_27 = paddle._C_ops.add(layer_norm_15, unsqueeze_3) + + # pd_op.slice: (1024x1024xf32) <- (1024x3072xf32, 1xi64, 1xi64) + slice_18 = paddle._C_ops.slice( + data_6, [1], full_int_array_1, full_int_array_2, [1], [] + ) + + # pd_op.slice: (1024xf32) <- (3072xf32, 1xi64, 1xi64) + slice_19 = paddle._C_ops.slice( + data_7, [0], full_int_array_1, full_int_array_2, [1], [] + ) + del full_int_array_1 + + # pd_op.matmul: (2x256x1024xf32) <- (2x256x1024xf32, 1024x1024xf32) + matmul_26 = paddle._C_ops.matmul(add_27, slice_18, False, False) + + # pd_op.add: (2x256x1024xf32) <- (2x256x1024xf32, 1024xf32) + add_28 = paddle._C_ops.add(matmul_26, slice_19) + + # pd_op.reshape: (2x256x4x256xf32) <- (2x256x1024xf32, 4xi64) + reshape_12 = paddle._C_ops.reshape(add_28, full_int_array_3) + + # pd_op.transpose: (2x4x256x256xf32) <- (2x256x4x256xf32) + transpose_13 = paddle._C_ops.transpose(reshape_12, [0, 2, 1, 3]) + del reshape_12 + + # pd_op.slice: (1024x1024xf32) <- (1024x3072xf32, 1xi64, 1xi64) + slice_20 = paddle._C_ops.slice( + data_6, [1], full_int_array_2, full_int_array_4, [1], [] + ) + + # pd_op.slice: (1024xf32) <- (3072xf32, 1xi64, 1xi64) + slice_21 = paddle._C_ops.slice( + data_7, [0], full_int_array_2, full_int_array_4, [1], [] + ) + + # pd_op.matmul: (2x256x1024xf32) <- (2x256x1024xf32, 1024x1024xf32) + matmul_27 = paddle._C_ops.matmul(add_27, slice_20, False, False) + + # pd_op.add: (2x256x1024xf32) <- (2x256x1024xf32, 1024xf32) + add_29 = paddle._C_ops.add(matmul_27, slice_21) + + # pd_op.reshape: (2x256x4x256xf32) <- (2x256x1024xf32, 4xi64) + reshape_13 = paddle._C_ops.reshape(add_29, full_int_array_3) + + # pd_op.transpose: (2x4x256x256xf32) <- (2x256x4x256xf32) + transpose_14 = paddle._C_ops.transpose(reshape_13, [0, 2, 1, 3]) + del reshape_13 + + # pd_op.slice: (1024x1024xf32) <- (1024x3072xf32, 1xi64, 1xi64) + slice_22 = paddle._C_ops.slice( + data_6, [1], full_int_array_4, full_int_array_5, [1], [] + ) + del data_6 + + # pd_op.slice: (1024xf32) <- (3072xf32, 1xi64, 1xi64) + slice_23 = paddle._C_ops.slice( + data_7, [0], full_int_array_4, full_int_array_5, [1], [] + ) + del data_7 + + # pd_op.matmul: (2x256x1024xf32) <- (2x256x1024xf32, 1024x1024xf32) + matmul_28 = paddle._C_ops.matmul(layer_norm_15, slice_22, False, False) + + # pd_op.add: (2x256x1024xf32) <- (2x256x1024xf32, 1024xf32) + add_30 = paddle._C_ops.add(matmul_28, slice_23) + + # pd_op.reshape: (2x256x4x256xf32) <- (2x256x1024xf32, 4xi64) + reshape_14 = paddle._C_ops.reshape(add_30, full_int_array_3) + del full_int_array_3 + + # pd_op.transpose: (2x4x256x256xf32) <- (2x256x4x256xf32) + transpose_15 = paddle._C_ops.transpose(reshape_14, [0, 2, 1, 3]) + del reshape_14 + + # pd_op.matmul: (2x4x256x256xf32) <- (2x4x256x256xf32, 2x4x256x256xf32) + matmul_29 = paddle._C_ops.matmul(transpose_13, transpose_14, False, True) + + # pd_op.scale: (2x4x256x256xf32) <- (2x4x256x256xf32, 1xf32) + scale_4 = paddle._C_ops.scale(matmul_29, full_8, float("0"), True) + del matmul_29 + + # pd_op.softmax: (2x4x256x256xf32) <- (2x4x256x256xf32) + softmax_3 = paddle._C_ops.softmax(scale_4, -1) + del scale_4 + + # pd_op.dropout: (2x4x256x256xf32, 2x4x256x256xui8) <- (2x4x256x256xf32, None, 1xf32) + dropout_24, dropout_25 = (lambda x, f: f(x))( + paddle._C_ops.dropout( + softmax_3, None, full_9, False, "upscale_in_train", 0, False + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None), + ) + + # pd_op.matmul: (2x4x256x256xf32) <- (2x4x256x256xf32, 2x4x256x256xf32) + matmul_30 = paddle._C_ops.matmul(dropout_24, transpose_15, False, False) + + # pd_op.transpose: (2x256x4x256xf32) <- (2x4x256x256xf32) + transpose_16 = paddle._C_ops.transpose(matmul_30, [0, 2, 1, 3]) + del matmul_30 + + # pd_op.reshape: (2x256x1024xf32) <- (2x256x4x256xf32, 3xi64) + reshape_15 = paddle._C_ops.reshape(transpose_16, full_int_array_6) + del full_int_array_6 + + # pd_op.matmul: (2x256x1024xf32) <- (2x256x1024xf32, 1024x1024xf32) + matmul_31 = paddle._C_ops.matmul(reshape_15, parameter_334, False, False) + del parameter_334 + + # pd_op.add: (2x256x1024xf32) <- (2x256x1024xf32, 1024xf32) + add_31 = paddle._C_ops.add(matmul_31, parameter_333) + del parameter_333 + + # pd_op.dropout: (2x256x1024xf32, 2x256x1024xui8) <- (2x256x1024xf32, None, 1xf32) + dropout_26, dropout_27 = (lambda x, f: f(x))( + paddle._C_ops.dropout( + add_31, None, full_9, False, "upscale_in_train", 0, False + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None), + ) + del add_31 + + # pd_op.add: (2x256x1024xf32) <- (2x256x1024xf32, 2x256x1024xf32) + add_32 = paddle._C_ops.add(layer_norm_15, dropout_26) + + # pd_op.layer_norm: (2x256x1024xf32, 2x256xf32, 2x256xf32) <- (2x256x1024xf32, 1024xf32, 1024xf32) + layer_norm_18, layer_norm_19, layer_norm_20 = (lambda x, f: f(x))( + paddle._C_ops.layer_norm( + add_32, parameter_332, parameter_331, float("1e-05"), 2 + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None, None), + ) + del parameter_331, parameter_332 + + # pd_op.matmul: (2x256x2048xf32) <- (2x256x1024xf32, 1024x2048xf32) + matmul_32 = paddle._C_ops.matmul(layer_norm_18, parameter_330, False, False) + del parameter_330 + + # pd_op.add: (2x256x2048xf32) <- (2x256x2048xf32, 2048xf32) + add_33 = paddle._C_ops.add(matmul_32, parameter_329) + del parameter_329 + + # pd_op.gelu: (2x256x2048xf32) <- (2x256x2048xf32) + gelu_3 = paddle._C_ops.gelu(add_33, False) + + # pd_op.dropout: (2x256x2048xf32, 2x256x2048xui8) <- (2x256x2048xf32, None, 1xf32) + dropout_28, dropout_29 = (lambda x, f: f(x))( + paddle._C_ops.dropout( + gelu_3, None, full_9, False, "upscale_in_train", 0, False + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None), + ) + del gelu_3 + + # pd_op.matmul: (2x256x1024xf32) <- (2x256x2048xf32, 2048x1024xf32) + matmul_33 = paddle._C_ops.matmul(dropout_28, parameter_328, False, False) + del parameter_328 + + # pd_op.add: (2x256x1024xf32) <- (2x256x1024xf32, 1024xf32) + add_34 = paddle._C_ops.add(matmul_33, parameter_327) + del parameter_327 + + # pd_op.dropout: (2x256x1024xf32, 2x256x1024xui8) <- (2x256x1024xf32, None, 1xf32) + dropout_30, dropout_31 = (lambda x, f: f(x))( + paddle._C_ops.dropout( + add_34, None, full_9, False, "upscale_in_train", 0, False + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None), + ) + del add_34 + + # pd_op.add: (2x256x1024xf32) <- (2x256x1024xf32, 2x256x1024xf32) + add_35 = paddle._C_ops.add(layer_norm_18, dropout_30) + + # pd_op.layer_norm: (2x256x1024xf32, 2x256xf32, 2x256xf32) <- (2x256x1024xf32, 1024xf32, 1024xf32) + layer_norm_21, layer_norm_22, layer_norm_23 = (lambda x, f: f(x))( + paddle._C_ops.layer_norm( + add_35, parameter_326, parameter_325, float("1e-05"), 2 + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None, None), + ) + del parameter_325, parameter_326 + + # pd_op.transpose: (2x1024x256xf32) <- (2x256x1024xf32) + transpose_17 = paddle._C_ops.transpose(layer_norm_21, [0, 2, 1]) + del layer_norm_21 + + # pd_op.full_int_array: (4xi64) <- () + full_int_array_7 = [2, 1024, 16, 16] + + # pd_op.reshape: (2x1024x16x16xf32) <- (2x1024x256xf32, 4xi64) + reshape_16 = paddle._C_ops.reshape(transpose_17, full_int_array_7) + del full_int_array_7 + + # pd_op.conv2d: (2x384x16x16xf32) <- (2x1024x16x16xf32, 384x1024x1x1xf32) + conv2d_0 = paddle._C_ops.conv2d( + reshape_16, parameter_324, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_324 + + # pd_op.batch_norm_: (2x384x16x16xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x16x16xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__0, + batch_norm__1, + batch_norm__2, + batch_norm__3, + batch_norm__4, + batch_norm__5, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_0, + parameter_323, + parameter_322, + parameter_321, + parameter_320, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_320, parameter_321, parameter_322, parameter_323 + + # pd_op.swish: (2x384x16x16xf32) <- (2x384x16x16xf32) + swish_1 = paddle._C_ops.swish(batch_norm__0) + + # pd_op.conv2d: (2x384x16x16xf32) <- (2x1024x16x16xf32, 384x1024x1x1xf32) + conv2d_1 = paddle._C_ops.conv2d( + reshape_16, parameter_319, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_319 + + # pd_op.batch_norm_: (2x384x16x16xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x16x16xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__6, + batch_norm__7, + batch_norm__8, + batch_norm__9, + batch_norm__10, + batch_norm__11, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_1, + parameter_318, + parameter_317, + parameter_316, + parameter_315, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_315, parameter_316, parameter_317, parameter_318 + + # pd_op.swish: (2x384x16x16xf32) <- (2x384x16x16xf32) + swish_2 = paddle._C_ops.swish(batch_norm__6) + + # pd_op.conv2d: (2x384x16x16xf32) <- (2x384x16x16xf32, 384x384x3x3xf32) + conv2d_2 = paddle._C_ops.conv2d( + swish_2, parameter_314, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_314 + + # pd_op.batch_norm_: (2x384x16x16xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x16x16xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__12, + batch_norm__13, + batch_norm__14, + batch_norm__15, + batch_norm__16, + batch_norm__17, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_2, + parameter_313, + parameter_312, + parameter_311, + parameter_310, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_310, parameter_311, parameter_312, parameter_313 + + # pd_op.swish: (2x384x16x16xf32) <- (2x384x16x16xf32) + swish_3 = paddle._C_ops.swish(batch_norm__12) + + # pd_op.conv2d: (2x384x16x16xf32) <- (2x384x16x16xf32, 384x384x3x3xf32) + conv2d_3 = paddle._C_ops.conv2d( + swish_3, parameter_309, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_309 + + # pd_op.batch_norm_: (2x384x16x16xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x16x16xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__18, + batch_norm__19, + batch_norm__20, + batch_norm__21, + batch_norm__22, + batch_norm__23, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_3, + parameter_308, + parameter_307, + parameter_306, + parameter_305, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_305, parameter_306, parameter_307, parameter_308 + + # pd_op.conv2d: (2x384x16x16xf32) <- (2x384x16x16xf32, 384x384x1x1xf32) + conv2d_4 = paddle._C_ops.conv2d( + swish_3, parameter_304, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_304 + + # pd_op.batch_norm_: (2x384x16x16xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x16x16xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__24, + batch_norm__25, + batch_norm__26, + batch_norm__27, + batch_norm__28, + batch_norm__29, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_4, + parameter_303, + parameter_302, + parameter_301, + parameter_300, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_300, parameter_301, parameter_302, parameter_303 + + # pd_op.add: (2x384x16x16xf32) <- (2x384x16x16xf32, 2x384x16x16xf32) + add_36 = paddle._C_ops.add(batch_norm__18, batch_norm__24) + + # pd_op.swish: (2x384x16x16xf32) <- (2x384x16x16xf32) + swish_4 = paddle._C_ops.swish(add_36) + + # pd_op.conv2d: (2x384x16x16xf32) <- (2x384x16x16xf32, 384x384x3x3xf32) + conv2d_5 = paddle._C_ops.conv2d( + swish_4, parameter_299, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_299 + + # pd_op.batch_norm_: (2x384x16x16xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x16x16xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__30, + batch_norm__31, + batch_norm__32, + batch_norm__33, + batch_norm__34, + batch_norm__35, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_5, + parameter_298, + parameter_297, + parameter_296, + parameter_295, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_295, parameter_296, parameter_297, parameter_298 + + # pd_op.swish: (2x384x16x16xf32) <- (2x384x16x16xf32) + swish_5 = paddle._C_ops.swish(batch_norm__30) + + # pd_op.conv2d: (2x384x16x16xf32) <- (2x384x16x16xf32, 384x384x3x3xf32) + conv2d_6 = paddle._C_ops.conv2d( + swish_5, parameter_294, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_294 + + # pd_op.batch_norm_: (2x384x16x16xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x16x16xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__36, + batch_norm__37, + batch_norm__38, + batch_norm__39, + batch_norm__40, + batch_norm__41, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_6, + parameter_293, + parameter_292, + parameter_291, + parameter_290, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_290, parameter_291, parameter_292, parameter_293 + + # pd_op.conv2d: (2x384x16x16xf32) <- (2x384x16x16xf32, 384x384x1x1xf32) + conv2d_7 = paddle._C_ops.conv2d( + swish_5, parameter_289, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_289 + + # pd_op.batch_norm_: (2x384x16x16xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x16x16xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__42, + batch_norm__43, + batch_norm__44, + batch_norm__45, + batch_norm__46, + batch_norm__47, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_7, + parameter_288, + parameter_287, + parameter_286, + parameter_285, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_285, parameter_286, parameter_287, parameter_288 + + # pd_op.add: (2x384x16x16xf32) <- (2x384x16x16xf32, 2x384x16x16xf32) + add_37 = paddle._C_ops.add(batch_norm__36, batch_norm__42) + + # pd_op.swish: (2x384x16x16xf32) <- (2x384x16x16xf32) + swish_6 = paddle._C_ops.swish(add_37) + + # pd_op.full_int_array: (2xi64) <- () + full_int_array_8 = [5, 5] + + # pd_op.pool2d: (2x384x16x16xf32) <- (2x384x16x16xf32, 2xi64) + pool2d_0 = paddle._C_ops.pool2d( + swish_6, + full_int_array_8, + [1, 1], + [2, 2], + False, + True, + "NCHW", + "max", + False, + False, + "EXPLICIT", + ) + + # pd_op.full_int_array: (2xi64) <- () + full_int_array_9 = [9, 9] + + # pd_op.pool2d: (2x384x16x16xf32) <- (2x384x16x16xf32, 2xi64) + pool2d_1 = paddle._C_ops.pool2d( + swish_6, + full_int_array_9, + [1, 1], + [4, 4], + False, + True, + "NCHW", + "max", + False, + False, + "EXPLICIT", + ) + + # pd_op.full_int_array: (2xi64) <- () + full_int_array_10 = [13, 13] + + # pd_op.pool2d: (2x384x16x16xf32) <- (2x384x16x16xf32, 2xi64) + pool2d_2 = paddle._C_ops.pool2d( + swish_6, + full_int_array_10, + [1, 1], + [6, 6], + False, + True, + "NCHW", + "max", + False, + False, + "EXPLICIT", + ) + + # builtin.combine: ([2x384x16x16xf32, 2x384x16x16xf32, 2x384x16x16xf32, 2x384x16x16xf32]) <- (2x384x16x16xf32, 2x384x16x16xf32, 2x384x16x16xf32, 2x384x16x16xf32) + combine_2 = [swish_6, pool2d_0, pool2d_1, pool2d_2] + + # pd_op.concat: (2x1536x16x16xf32) <- ([2x384x16x16xf32, 2x384x16x16xf32, 2x384x16x16xf32, 2x384x16x16xf32], 1xi32) + concat_1 = paddle._C_ops.concat(combine_2, full_7) + del combine_2 + + # pd_op.conv2d: (2x384x16x16xf32) <- (2x1536x16x16xf32, 384x1536x1x1xf32) + conv2d_8 = paddle._C_ops.conv2d( + concat_1, parameter_284, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_284 + + # pd_op.batch_norm_: (2x384x16x16xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x16x16xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__48, + batch_norm__49, + batch_norm__50, + batch_norm__51, + batch_norm__52, + batch_norm__53, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_8, + parameter_283, + parameter_282, + parameter_281, + parameter_280, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_280, parameter_281, parameter_282, parameter_283 + + # pd_op.swish: (2x384x16x16xf32) <- (2x384x16x16xf32) + swish_7 = paddle._C_ops.swish(batch_norm__48) + + # pd_op.conv2d: (2x384x16x16xf32) <- (2x384x16x16xf32, 384x384x3x3xf32) + conv2d_9 = paddle._C_ops.conv2d( + swish_7, parameter_279, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_279 + + # pd_op.batch_norm_: (2x384x16x16xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x16x16xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__54, + batch_norm__55, + batch_norm__56, + batch_norm__57, + batch_norm__58, + batch_norm__59, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_9, + parameter_278, + parameter_277, + parameter_276, + parameter_275, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_275, parameter_276, parameter_277, parameter_278 + + # pd_op.swish: (2x384x16x16xf32) <- (2x384x16x16xf32) + swish_8 = paddle._C_ops.swish(batch_norm__54) + + # pd_op.conv2d: (2x384x16x16xf32) <- (2x384x16x16xf32, 384x384x3x3xf32) + conv2d_10 = paddle._C_ops.conv2d( + swish_8, parameter_274, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_274 + + # pd_op.batch_norm_: (2x384x16x16xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x16x16xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__60, + batch_norm__61, + batch_norm__62, + batch_norm__63, + batch_norm__64, + batch_norm__65, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_10, + parameter_273, + parameter_272, + parameter_271, + parameter_270, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_270, parameter_271, parameter_272, parameter_273 + + # pd_op.conv2d: (2x384x16x16xf32) <- (2x384x16x16xf32, 384x384x1x1xf32) + conv2d_11 = paddle._C_ops.conv2d( + swish_8, parameter_269, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_269 + + # pd_op.batch_norm_: (2x384x16x16xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x16x16xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__66, + batch_norm__67, + batch_norm__68, + batch_norm__69, + batch_norm__70, + batch_norm__71, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_11, + parameter_268, + parameter_267, + parameter_266, + parameter_265, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_265, parameter_266, parameter_267, parameter_268 + + # pd_op.add: (2x384x16x16xf32) <- (2x384x16x16xf32, 2x384x16x16xf32) + add_38 = paddle._C_ops.add(batch_norm__60, batch_norm__66) + + # pd_op.swish: (2x384x16x16xf32) <- (2x384x16x16xf32) + swish_9 = paddle._C_ops.swish(add_38) + + # builtin.combine: ([2x384x16x16xf32, 2x384x16x16xf32]) <- (2x384x16x16xf32, 2x384x16x16xf32) + combine_3 = [swish_1, swish_9] + + # pd_op.concat: (2x768x16x16xf32) <- ([2x384x16x16xf32, 2x384x16x16xf32], 1xi32) + concat_2 = paddle._C_ops.concat(combine_3, full_7) + del combine_3 + + # pd_op.conv2d: (2x768x16x16xf32) <- (2x768x16x16xf32, 768x768x1x1xf32) + conv2d_12 = paddle._C_ops.conv2d( + concat_2, parameter_264, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_264 + + # pd_op.batch_norm_: (2x768x16x16xf32, 768xf32, 768xf32, 768xf32, 768xf32, -1xui8) <- (2x768x16x16xf32, 768xf32, 768xf32, 768xf32, 768xf32) + ( + batch_norm__72, + batch_norm__73, + batch_norm__74, + batch_norm__75, + batch_norm__76, + batch_norm__77, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_12, + parameter_263, + parameter_262, + parameter_261, + parameter_260, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_260, parameter_261, parameter_262, parameter_263 + + # pd_op.swish: (2x768x16x16xf32) <- (2x768x16x16xf32) + swish_10 = paddle._C_ops.swish(batch_norm__72) + + # pd_op.conv2d: (2x384x16x16xf32) <- (2x768x16x16xf32, 384x768x1x1xf32) + conv2d_13 = paddle._C_ops.conv2d( + swish_10, parameter_259, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_259 + + # pd_op.batch_norm_: (2x384x16x16xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x16x16xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__78, + batch_norm__79, + batch_norm__80, + batch_norm__81, + batch_norm__82, + batch_norm__83, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_13, + parameter_258, + parameter_257, + parameter_256, + parameter_255, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_255, parameter_256, parameter_257, parameter_258 + + # pd_op.swish: (2x384x16x16xf32) <- (2x384x16x16xf32) + swish_11 = paddle._C_ops.swish(batch_norm__78) + + # pd_op.nearest_interp: (2x384x32x32xf32) <- (2x384x16x16xf32, None, None, None) + nearest_interp_0 = paddle._C_ops.nearest_interp( + swish_11, + None, + None, + None, + "NCHW", + -1, + -1, + -1, + [float("2"), float("2")], + "nearest", + False, + 0, + ) + + # builtin.combine: ([2x384x32x32xf32, 2x512x-1x-1xf32]) <- (2x384x32x32xf32, 2x512x-1x-1xf32) + combine_4 = [nearest_interp_0, data_9] + del data_9 + + # pd_op.concat: (2x896x32x32xf32) <- ([2x384x32x32xf32, 2x512x-1x-1xf32], 1xi32) + concat_3 = paddle._C_ops.concat(combine_4, full_7) + del combine_4 + + # pd_op.conv2d: (2x192x32x32xf32) <- (2x896x32x32xf32, 192x896x1x1xf32) + conv2d_14 = paddle._C_ops.conv2d( + concat_3, parameter_254, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_254 + + # pd_op.batch_norm_: (2x192x32x32xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x32x32xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__84, + batch_norm__85, + batch_norm__86, + batch_norm__87, + batch_norm__88, + batch_norm__89, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_14, + parameter_253, + parameter_252, + parameter_251, + parameter_250, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_250, parameter_251, parameter_252, parameter_253 + + # pd_op.swish: (2x192x32x32xf32) <- (2x192x32x32xf32) + swish_12 = paddle._C_ops.swish(batch_norm__84) + + # pd_op.conv2d: (2x192x32x32xf32) <- (2x896x32x32xf32, 192x896x1x1xf32) + conv2d_15 = paddle._C_ops.conv2d( + concat_3, parameter_249, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_249 + + # pd_op.batch_norm_: (2x192x32x32xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x32x32xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__90, + batch_norm__91, + batch_norm__92, + batch_norm__93, + batch_norm__94, + batch_norm__95, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_15, + parameter_248, + parameter_247, + parameter_246, + parameter_245, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_245, parameter_246, parameter_247, parameter_248 + + # pd_op.swish: (2x192x32x32xf32) <- (2x192x32x32xf32) + swish_13 = paddle._C_ops.swish(batch_norm__90) + + # pd_op.conv2d: (2x192x32x32xf32) <- (2x192x32x32xf32, 192x192x3x3xf32) + conv2d_16 = paddle._C_ops.conv2d( + swish_13, parameter_244, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_244 + + # pd_op.batch_norm_: (2x192x32x32xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x32x32xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__96, + batch_norm__97, + batch_norm__98, + batch_norm__99, + batch_norm__100, + batch_norm__101, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_16, + parameter_243, + parameter_242, + parameter_241, + parameter_240, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_240, parameter_241, parameter_242, parameter_243 + + # pd_op.swish: (2x192x32x32xf32) <- (2x192x32x32xf32) + swish_14 = paddle._C_ops.swish(batch_norm__96) + + # pd_op.conv2d: (2x192x32x32xf32) <- (2x192x32x32xf32, 192x192x3x3xf32) + conv2d_17 = paddle._C_ops.conv2d( + swish_14, parameter_239, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_239 + + # pd_op.batch_norm_: (2x192x32x32xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x32x32xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__102, + batch_norm__103, + batch_norm__104, + batch_norm__105, + batch_norm__106, + batch_norm__107, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_17, + parameter_238, + parameter_237, + parameter_236, + parameter_235, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_235, parameter_236, parameter_237, parameter_238 + + # pd_op.conv2d: (2x192x32x32xf32) <- (2x192x32x32xf32, 192x192x1x1xf32) + conv2d_18 = paddle._C_ops.conv2d( + swish_14, parameter_234, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_234 + + # pd_op.batch_norm_: (2x192x32x32xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x32x32xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__108, + batch_norm__109, + batch_norm__110, + batch_norm__111, + batch_norm__112, + batch_norm__113, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_18, + parameter_233, + parameter_232, + parameter_231, + parameter_230, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_230, parameter_231, parameter_232, parameter_233 + + # pd_op.add: (2x192x32x32xf32) <- (2x192x32x32xf32, 2x192x32x32xf32) + add_39 = paddle._C_ops.add(batch_norm__102, batch_norm__108) + + # pd_op.swish: (2x192x32x32xf32) <- (2x192x32x32xf32) + swish_15 = paddle._C_ops.swish(add_39) + + # pd_op.conv2d: (2x192x32x32xf32) <- (2x192x32x32xf32, 192x192x3x3xf32) + conv2d_19 = paddle._C_ops.conv2d( + swish_15, parameter_229, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_229 + + # pd_op.batch_norm_: (2x192x32x32xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x32x32xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__114, + batch_norm__115, + batch_norm__116, + batch_norm__117, + batch_norm__118, + batch_norm__119, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_19, + parameter_228, + parameter_227, + parameter_226, + parameter_225, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_225, parameter_226, parameter_227, parameter_228 + + # pd_op.swish: (2x192x32x32xf32) <- (2x192x32x32xf32) + swish_16 = paddle._C_ops.swish(batch_norm__114) + + # pd_op.conv2d: (2x192x32x32xf32) <- (2x192x32x32xf32, 192x192x3x3xf32) + conv2d_20 = paddle._C_ops.conv2d( + swish_16, parameter_224, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_224 + + # pd_op.batch_norm_: (2x192x32x32xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x32x32xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__120, + batch_norm__121, + batch_norm__122, + batch_norm__123, + batch_norm__124, + batch_norm__125, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_20, + parameter_223, + parameter_222, + parameter_221, + parameter_220, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_220, parameter_221, parameter_222, parameter_223 + + # pd_op.conv2d: (2x192x32x32xf32) <- (2x192x32x32xf32, 192x192x1x1xf32) + conv2d_21 = paddle._C_ops.conv2d( + swish_16, parameter_219, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_219 + + # pd_op.batch_norm_: (2x192x32x32xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x32x32xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__126, + batch_norm__127, + batch_norm__128, + batch_norm__129, + batch_norm__130, + batch_norm__131, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_21, + parameter_218, + parameter_217, + parameter_216, + parameter_215, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_215, parameter_216, parameter_217, parameter_218 + + # pd_op.add: (2x192x32x32xf32) <- (2x192x32x32xf32, 2x192x32x32xf32) + add_40 = paddle._C_ops.add(batch_norm__120, batch_norm__126) + + # pd_op.swish: (2x192x32x32xf32) <- (2x192x32x32xf32) + swish_17 = paddle._C_ops.swish(add_40) + + # pd_op.conv2d: (2x192x32x32xf32) <- (2x192x32x32xf32, 192x192x3x3xf32) + conv2d_22 = paddle._C_ops.conv2d( + swish_17, parameter_214, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_214 + + # pd_op.batch_norm_: (2x192x32x32xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x32x32xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__132, + batch_norm__133, + batch_norm__134, + batch_norm__135, + batch_norm__136, + batch_norm__137, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_22, + parameter_213, + parameter_212, + parameter_211, + parameter_210, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_210, parameter_211, parameter_212, parameter_213 + + # pd_op.swish: (2x192x32x32xf32) <- (2x192x32x32xf32) + swish_18 = paddle._C_ops.swish(batch_norm__132) + + # pd_op.conv2d: (2x192x32x32xf32) <- (2x192x32x32xf32, 192x192x3x3xf32) + conv2d_23 = paddle._C_ops.conv2d( + swish_18, parameter_209, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_209 + + # pd_op.batch_norm_: (2x192x32x32xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x32x32xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__138, + batch_norm__139, + batch_norm__140, + batch_norm__141, + batch_norm__142, + batch_norm__143, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_23, + parameter_208, + parameter_207, + parameter_206, + parameter_205, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_205, parameter_206, parameter_207, parameter_208 + + # pd_op.conv2d: (2x192x32x32xf32) <- (2x192x32x32xf32, 192x192x1x1xf32) + conv2d_24 = paddle._C_ops.conv2d( + swish_18, parameter_204, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_204 + + # pd_op.batch_norm_: (2x192x32x32xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x32x32xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__144, + batch_norm__145, + batch_norm__146, + batch_norm__147, + batch_norm__148, + batch_norm__149, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_24, + parameter_203, + parameter_202, + parameter_201, + parameter_200, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_200, parameter_201, parameter_202, parameter_203 + + # pd_op.add: (2x192x32x32xf32) <- (2x192x32x32xf32, 2x192x32x32xf32) + add_41 = paddle._C_ops.add(batch_norm__138, batch_norm__144) + + # pd_op.swish: (2x192x32x32xf32) <- (2x192x32x32xf32) + swish_19 = paddle._C_ops.swish(add_41) + + # builtin.combine: ([2x192x32x32xf32, 2x192x32x32xf32]) <- (2x192x32x32xf32, 2x192x32x32xf32) + combine_5 = [swish_12, swish_19] + + # pd_op.concat: (2x384x32x32xf32) <- ([2x192x32x32xf32, 2x192x32x32xf32], 1xi32) + concat_4 = paddle._C_ops.concat(combine_5, full_7) + del combine_5 + + # pd_op.conv2d: (2x384x32x32xf32) <- (2x384x32x32xf32, 384x384x1x1xf32) + conv2d_25 = paddle._C_ops.conv2d( + concat_4, parameter_199, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_199 + + # pd_op.batch_norm_: (2x384x32x32xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x32x32xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__150, + batch_norm__151, + batch_norm__152, + batch_norm__153, + batch_norm__154, + batch_norm__155, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_25, + parameter_198, + parameter_197, + parameter_196, + parameter_195, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_195, parameter_196, parameter_197, parameter_198 + + # pd_op.swish: (2x384x32x32xf32) <- (2x384x32x32xf32) + swish_20 = paddle._C_ops.swish(batch_norm__150) + + # pd_op.conv2d: (2x192x32x32xf32) <- (2x384x32x32xf32, 192x384x1x1xf32) + conv2d_26 = paddle._C_ops.conv2d( + swish_20, parameter_194, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_194 + + # pd_op.batch_norm_: (2x192x32x32xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x32x32xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__156, + batch_norm__157, + batch_norm__158, + batch_norm__159, + batch_norm__160, + batch_norm__161, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_26, + parameter_193, + parameter_192, + parameter_191, + parameter_190, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_190, parameter_191, parameter_192, parameter_193 + + # pd_op.swish: (2x192x32x32xf32) <- (2x192x32x32xf32) + swish_21 = paddle._C_ops.swish(batch_norm__156) + + # pd_op.nearest_interp: (2x192x64x64xf32) <- (2x192x32x32xf32, None, None, None) + nearest_interp_1 = paddle._C_ops.nearest_interp( + swish_21, + None, + None, + None, + "NCHW", + -1, + -1, + -1, + [float("2"), float("2")], + "nearest", + False, + 0, + ) + + # builtin.combine: ([2x192x64x64xf32, 2x256x-1x-1xf32]) <- (2x192x64x64xf32, 2x256x-1x-1xf32) + combine_6 = [nearest_interp_1, data_8] + del data_8 + + # pd_op.concat: (2x448x64x64xf32) <- ([2x192x64x64xf32, 2x256x-1x-1xf32], 1xi32) + concat_5 = paddle._C_ops.concat(combine_6, full_7) + del combine_6 + + # pd_op.conv2d: (2x96x64x64xf32) <- (2x448x64x64xf32, 96x448x1x1xf32) + conv2d_27 = paddle._C_ops.conv2d( + concat_5, parameter_189, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_189 + + # pd_op.batch_norm_: (2x96x64x64xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x64x64xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__162, + batch_norm__163, + batch_norm__164, + batch_norm__165, + batch_norm__166, + batch_norm__167, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_27, + parameter_188, + parameter_187, + parameter_186, + parameter_185, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_185, parameter_186, parameter_187, parameter_188 + + # pd_op.swish: (2x96x64x64xf32) <- (2x96x64x64xf32) + swish_22 = paddle._C_ops.swish(batch_norm__162) + + # pd_op.conv2d: (2x96x64x64xf32) <- (2x448x64x64xf32, 96x448x1x1xf32) + conv2d_28 = paddle._C_ops.conv2d( + concat_5, parameter_184, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_184 + + # pd_op.batch_norm_: (2x96x64x64xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x64x64xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__168, + batch_norm__169, + batch_norm__170, + batch_norm__171, + batch_norm__172, + batch_norm__173, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_28, + parameter_183, + parameter_182, + parameter_181, + parameter_180, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_180, parameter_181, parameter_182, parameter_183 + + # pd_op.swish: (2x96x64x64xf32) <- (2x96x64x64xf32) + swish_23 = paddle._C_ops.swish(batch_norm__168) + + # pd_op.conv2d: (2x96x64x64xf32) <- (2x96x64x64xf32, 96x96x3x3xf32) + conv2d_29 = paddle._C_ops.conv2d( + swish_23, parameter_179, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_179 + + # pd_op.batch_norm_: (2x96x64x64xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x64x64xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__174, + batch_norm__175, + batch_norm__176, + batch_norm__177, + batch_norm__178, + batch_norm__179, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_29, + parameter_178, + parameter_177, + parameter_176, + parameter_175, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_175, parameter_176, parameter_177, parameter_178 + + # pd_op.swish: (2x96x64x64xf32) <- (2x96x64x64xf32) + swish_24 = paddle._C_ops.swish(batch_norm__174) + + # pd_op.conv2d: (2x96x64x64xf32) <- (2x96x64x64xf32, 96x96x3x3xf32) + conv2d_30 = paddle._C_ops.conv2d( + swish_24, parameter_174, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_174 + + # pd_op.batch_norm_: (2x96x64x64xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x64x64xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__180, + batch_norm__181, + batch_norm__182, + batch_norm__183, + batch_norm__184, + batch_norm__185, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_30, + parameter_173, + parameter_172, + parameter_171, + parameter_170, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_170, parameter_171, parameter_172, parameter_173 + + # pd_op.conv2d: (2x96x64x64xf32) <- (2x96x64x64xf32, 96x96x1x1xf32) + conv2d_31 = paddle._C_ops.conv2d( + swish_24, parameter_169, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_169 + + # pd_op.batch_norm_: (2x96x64x64xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x64x64xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__186, + batch_norm__187, + batch_norm__188, + batch_norm__189, + batch_norm__190, + batch_norm__191, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_31, + parameter_168, + parameter_167, + parameter_166, + parameter_165, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_165, parameter_166, parameter_167, parameter_168 + + # pd_op.add: (2x96x64x64xf32) <- (2x96x64x64xf32, 2x96x64x64xf32) + add_42 = paddle._C_ops.add(batch_norm__180, batch_norm__186) + + # pd_op.swish: (2x96x64x64xf32) <- (2x96x64x64xf32) + swish_25 = paddle._C_ops.swish(add_42) + + # pd_op.conv2d: (2x96x64x64xf32) <- (2x96x64x64xf32, 96x96x3x3xf32) + conv2d_32 = paddle._C_ops.conv2d( + swish_25, parameter_164, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_164 + + # pd_op.batch_norm_: (2x96x64x64xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x64x64xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__192, + batch_norm__193, + batch_norm__194, + batch_norm__195, + batch_norm__196, + batch_norm__197, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_32, + parameter_163, + parameter_162, + parameter_161, + parameter_160, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_160, parameter_161, parameter_162, parameter_163 + + # pd_op.swish: (2x96x64x64xf32) <- (2x96x64x64xf32) + swish_26 = paddle._C_ops.swish(batch_norm__192) + + # pd_op.conv2d: (2x96x64x64xf32) <- (2x96x64x64xf32, 96x96x3x3xf32) + conv2d_33 = paddle._C_ops.conv2d( + swish_26, parameter_159, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_159 + + # pd_op.batch_norm_: (2x96x64x64xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x64x64xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__198, + batch_norm__199, + batch_norm__200, + batch_norm__201, + batch_norm__202, + batch_norm__203, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_33, + parameter_158, + parameter_157, + parameter_156, + parameter_155, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_155, parameter_156, parameter_157, parameter_158 + + # pd_op.conv2d: (2x96x64x64xf32) <- (2x96x64x64xf32, 96x96x1x1xf32) + conv2d_34 = paddle._C_ops.conv2d( + swish_26, parameter_154, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_154 + + # pd_op.batch_norm_: (2x96x64x64xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x64x64xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__204, + batch_norm__205, + batch_norm__206, + batch_norm__207, + batch_norm__208, + batch_norm__209, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_34, + parameter_153, + parameter_152, + parameter_151, + parameter_150, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_150, parameter_151, parameter_152, parameter_153 + + # pd_op.add: (2x96x64x64xf32) <- (2x96x64x64xf32, 2x96x64x64xf32) + add_43 = paddle._C_ops.add(batch_norm__198, batch_norm__204) + + # pd_op.swish: (2x96x64x64xf32) <- (2x96x64x64xf32) + swish_27 = paddle._C_ops.swish(add_43) + + # pd_op.conv2d: (2x96x64x64xf32) <- (2x96x64x64xf32, 96x96x3x3xf32) + conv2d_35 = paddle._C_ops.conv2d( + swish_27, parameter_149, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_149 + + # pd_op.batch_norm_: (2x96x64x64xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x64x64xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__210, + batch_norm__211, + batch_norm__212, + batch_norm__213, + batch_norm__214, + batch_norm__215, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_35, + parameter_148, + parameter_147, + parameter_146, + parameter_145, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_145, parameter_146, parameter_147, parameter_148 + + # pd_op.swish: (2x96x64x64xf32) <- (2x96x64x64xf32) + swish_28 = paddle._C_ops.swish(batch_norm__210) + + # pd_op.conv2d: (2x96x64x64xf32) <- (2x96x64x64xf32, 96x96x3x3xf32) + conv2d_36 = paddle._C_ops.conv2d( + swish_28, parameter_144, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_144 + + # pd_op.batch_norm_: (2x96x64x64xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x64x64xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__216, + batch_norm__217, + batch_norm__218, + batch_norm__219, + batch_norm__220, + batch_norm__221, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_36, + parameter_143, + parameter_142, + parameter_141, + parameter_140, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_140, parameter_141, parameter_142, parameter_143 + + # pd_op.conv2d: (2x96x64x64xf32) <- (2x96x64x64xf32, 96x96x1x1xf32) + conv2d_37 = paddle._C_ops.conv2d( + swish_28, parameter_139, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_139 + + # pd_op.batch_norm_: (2x96x64x64xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x64x64xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__222, + batch_norm__223, + batch_norm__224, + batch_norm__225, + batch_norm__226, + batch_norm__227, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_37, + parameter_138, + parameter_137, + parameter_136, + parameter_135, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_135, parameter_136, parameter_137, parameter_138 + + # pd_op.add: (2x96x64x64xf32) <- (2x96x64x64xf32, 2x96x64x64xf32) + add_44 = paddle._C_ops.add(batch_norm__216, batch_norm__222) + + # pd_op.swish: (2x96x64x64xf32) <- (2x96x64x64xf32) + swish_29 = paddle._C_ops.swish(add_44) + + # builtin.combine: ([2x96x64x64xf32, 2x96x64x64xf32]) <- (2x96x64x64xf32, 2x96x64x64xf32) + combine_7 = [swish_22, swish_29] + + # pd_op.concat: (2x192x64x64xf32) <- ([2x96x64x64xf32, 2x96x64x64xf32], 1xi32) + concat_6 = paddle._C_ops.concat(combine_7, full_7) + del combine_7 + + # pd_op.conv2d: (2x192x64x64xf32) <- (2x192x64x64xf32, 192x192x1x1xf32) + conv2d_38 = paddle._C_ops.conv2d( + concat_6, parameter_134, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_134 + + # pd_op.batch_norm_: (2x192x64x64xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x64x64xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__228, + batch_norm__229, + batch_norm__230, + batch_norm__231, + batch_norm__232, + batch_norm__233, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_38, + parameter_133, + parameter_132, + parameter_131, + parameter_130, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_130, parameter_131, parameter_132, parameter_133 + + # pd_op.swish: (2x192x64x64xf32) <- (2x192x64x64xf32) + swish_30 = paddle._C_ops.swish(batch_norm__228) + + # pd_op.conv2d: (2x192x32x32xf32) <- (2x192x64x64xf32, 192x192x3x3xf32) + conv2d_39 = paddle._C_ops.conv2d( + swish_30, parameter_129, [2, 2], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_129 + + # pd_op.batch_norm_: (2x192x32x32xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x32x32xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__234, + batch_norm__235, + batch_norm__236, + batch_norm__237, + batch_norm__238, + batch_norm__239, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_39, + parameter_128, + parameter_127, + parameter_126, + parameter_125, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_125, parameter_126, parameter_127, parameter_128 + + # pd_op.swish: (2x192x32x32xf32) <- (2x192x32x32xf32) + swish_31 = paddle._C_ops.swish(batch_norm__234) + + # builtin.combine: ([2x192x32x32xf32, 2x384x32x32xf32]) <- (2x192x32x32xf32, 2x384x32x32xf32) + combine_8 = [swish_31, swish_20] + + # pd_op.concat: (2x576x32x32xf32) <- ([2x192x32x32xf32, 2x384x32x32xf32], 1xi32) + concat_7 = paddle._C_ops.concat(combine_8, full_7) + del combine_8 + + # pd_op.conv2d: (2x192x32x32xf32) <- (2x576x32x32xf32, 192x576x1x1xf32) + conv2d_40 = paddle._C_ops.conv2d( + concat_7, parameter_124, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_124 + + # pd_op.batch_norm_: (2x192x32x32xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x32x32xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__240, + batch_norm__241, + batch_norm__242, + batch_norm__243, + batch_norm__244, + batch_norm__245, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_40, + parameter_123, + parameter_122, + parameter_121, + parameter_120, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_120, parameter_121, parameter_122, parameter_123 + + # pd_op.swish: (2x192x32x32xf32) <- (2x192x32x32xf32) + swish_32 = paddle._C_ops.swish(batch_norm__240) + + # pd_op.conv2d: (2x192x32x32xf32) <- (2x576x32x32xf32, 192x576x1x1xf32) + conv2d_41 = paddle._C_ops.conv2d( + concat_7, parameter_119, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_119 + + # pd_op.batch_norm_: (2x192x32x32xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x32x32xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__246, + batch_norm__247, + batch_norm__248, + batch_norm__249, + batch_norm__250, + batch_norm__251, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_41, + parameter_118, + parameter_117, + parameter_116, + parameter_115, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_115, parameter_116, parameter_117, parameter_118 + + # pd_op.swish: (2x192x32x32xf32) <- (2x192x32x32xf32) + swish_33 = paddle._C_ops.swish(batch_norm__246) + + # pd_op.conv2d: (2x192x32x32xf32) <- (2x192x32x32xf32, 192x192x3x3xf32) + conv2d_42 = paddle._C_ops.conv2d( + swish_33, parameter_114, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_114 + + # pd_op.batch_norm_: (2x192x32x32xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x32x32xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__252, + batch_norm__253, + batch_norm__254, + batch_norm__255, + batch_norm__256, + batch_norm__257, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_42, + parameter_113, + parameter_112, + parameter_111, + parameter_110, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_110, parameter_111, parameter_112, parameter_113 + + # pd_op.swish: (2x192x32x32xf32) <- (2x192x32x32xf32) + swish_34 = paddle._C_ops.swish(batch_norm__252) + + # pd_op.conv2d: (2x192x32x32xf32) <- (2x192x32x32xf32, 192x192x3x3xf32) + conv2d_43 = paddle._C_ops.conv2d( + swish_34, parameter_109, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_109 + + # pd_op.batch_norm_: (2x192x32x32xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x32x32xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__258, + batch_norm__259, + batch_norm__260, + batch_norm__261, + batch_norm__262, + batch_norm__263, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_43, + parameter_108, + parameter_107, + parameter_106, + parameter_105, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_105, parameter_106, parameter_107, parameter_108 + + # pd_op.conv2d: (2x192x32x32xf32) <- (2x192x32x32xf32, 192x192x1x1xf32) + conv2d_44 = paddle._C_ops.conv2d( + swish_34, parameter_104, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_104 + + # pd_op.batch_norm_: (2x192x32x32xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x32x32xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__264, + batch_norm__265, + batch_norm__266, + batch_norm__267, + batch_norm__268, + batch_norm__269, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_44, + parameter_103, + parameter_102, + parameter_101, + parameter_100, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_100, parameter_101, parameter_102, parameter_103 + + # pd_op.add: (2x192x32x32xf32) <- (2x192x32x32xf32, 2x192x32x32xf32) + add_45 = paddle._C_ops.add(batch_norm__258, batch_norm__264) + + # pd_op.swish: (2x192x32x32xf32) <- (2x192x32x32xf32) + swish_35 = paddle._C_ops.swish(add_45) + + # pd_op.conv2d: (2x192x32x32xf32) <- (2x192x32x32xf32, 192x192x3x3xf32) + conv2d_45 = paddle._C_ops.conv2d( + swish_35, parameter_99, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_99 + + # pd_op.batch_norm_: (2x192x32x32xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x32x32xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__270, + batch_norm__271, + batch_norm__272, + batch_norm__273, + batch_norm__274, + batch_norm__275, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_45, + parameter_98, + parameter_97, + parameter_96, + parameter_95, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_95, parameter_96, parameter_97, parameter_98 + + # pd_op.swish: (2x192x32x32xf32) <- (2x192x32x32xf32) + swish_36 = paddle._C_ops.swish(batch_norm__270) + + # pd_op.conv2d: (2x192x32x32xf32) <- (2x192x32x32xf32, 192x192x3x3xf32) + conv2d_46 = paddle._C_ops.conv2d( + swish_36, parameter_94, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_94 + + # pd_op.batch_norm_: (2x192x32x32xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x32x32xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__276, + batch_norm__277, + batch_norm__278, + batch_norm__279, + batch_norm__280, + batch_norm__281, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_46, + parameter_93, + parameter_92, + parameter_91, + parameter_90, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_90, parameter_91, parameter_92, parameter_93 + + # pd_op.conv2d: (2x192x32x32xf32) <- (2x192x32x32xf32, 192x192x1x1xf32) + conv2d_47 = paddle._C_ops.conv2d( + swish_36, parameter_89, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_89 + + # pd_op.batch_norm_: (2x192x32x32xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x32x32xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__282, + batch_norm__283, + batch_norm__284, + batch_norm__285, + batch_norm__286, + batch_norm__287, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_47, + parameter_88, + parameter_87, + parameter_86, + parameter_85, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_85, parameter_86, parameter_87, parameter_88 + + # pd_op.add: (2x192x32x32xf32) <- (2x192x32x32xf32, 2x192x32x32xf32) + add_46 = paddle._C_ops.add(batch_norm__276, batch_norm__282) + + # pd_op.swish: (2x192x32x32xf32) <- (2x192x32x32xf32) + swish_37 = paddle._C_ops.swish(add_46) + + # pd_op.conv2d: (2x192x32x32xf32) <- (2x192x32x32xf32, 192x192x3x3xf32) + conv2d_48 = paddle._C_ops.conv2d( + swish_37, parameter_84, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_84 + + # pd_op.batch_norm_: (2x192x32x32xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x32x32xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__288, + batch_norm__289, + batch_norm__290, + batch_norm__291, + batch_norm__292, + batch_norm__293, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_48, + parameter_83, + parameter_82, + parameter_81, + parameter_80, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_80, parameter_81, parameter_82, parameter_83 + + # pd_op.swish: (2x192x32x32xf32) <- (2x192x32x32xf32) + swish_38 = paddle._C_ops.swish(batch_norm__288) + + # pd_op.conv2d: (2x192x32x32xf32) <- (2x192x32x32xf32, 192x192x3x3xf32) + conv2d_49 = paddle._C_ops.conv2d( + swish_38, parameter_79, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_79 + + # pd_op.batch_norm_: (2x192x32x32xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x32x32xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__294, + batch_norm__295, + batch_norm__296, + batch_norm__297, + batch_norm__298, + batch_norm__299, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_49, + parameter_78, + parameter_77, + parameter_76, + parameter_75, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_75, parameter_76, parameter_77, parameter_78 + + # pd_op.conv2d: (2x192x32x32xf32) <- (2x192x32x32xf32, 192x192x1x1xf32) + conv2d_50 = paddle._C_ops.conv2d( + swish_38, parameter_74, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_74 + + # pd_op.batch_norm_: (2x192x32x32xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x32x32xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__300, + batch_norm__301, + batch_norm__302, + batch_norm__303, + batch_norm__304, + batch_norm__305, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_50, + parameter_73, + parameter_72, + parameter_71, + parameter_70, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_70, parameter_71, parameter_72, parameter_73 + + # pd_op.add: (2x192x32x32xf32) <- (2x192x32x32xf32, 2x192x32x32xf32) + add_47 = paddle._C_ops.add(batch_norm__294, batch_norm__300) + + # pd_op.swish: (2x192x32x32xf32) <- (2x192x32x32xf32) + swish_39 = paddle._C_ops.swish(add_47) + + # builtin.combine: ([2x192x32x32xf32, 2x192x32x32xf32]) <- (2x192x32x32xf32, 2x192x32x32xf32) + combine_9 = [swish_32, swish_39] + + # pd_op.concat: (2x384x32x32xf32) <- ([2x192x32x32xf32, 2x192x32x32xf32], 1xi32) + concat_8 = paddle._C_ops.concat(combine_9, full_7) + del combine_9 + + # pd_op.conv2d: (2x384x32x32xf32) <- (2x384x32x32xf32, 384x384x1x1xf32) + conv2d_51 = paddle._C_ops.conv2d( + concat_8, parameter_69, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_69 + + # pd_op.batch_norm_: (2x384x32x32xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x32x32xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__306, + batch_norm__307, + batch_norm__308, + batch_norm__309, + batch_norm__310, + batch_norm__311, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_51, + parameter_68, + parameter_67, + parameter_66, + parameter_65, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_65, parameter_66, parameter_67, parameter_68 + + # pd_op.swish: (2x384x32x32xf32) <- (2x384x32x32xf32) + swish_40 = paddle._C_ops.swish(batch_norm__306) + + # pd_op.conv2d: (2x384x16x16xf32) <- (2x384x32x32xf32, 384x384x3x3xf32) + conv2d_52 = paddle._C_ops.conv2d( + swish_40, parameter_64, [2, 2], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_64 + + # pd_op.batch_norm_: (2x384x16x16xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x16x16xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__312, + batch_norm__313, + batch_norm__314, + batch_norm__315, + batch_norm__316, + batch_norm__317, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_52, + parameter_63, + parameter_62, + parameter_61, + parameter_60, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_60, parameter_61, parameter_62, parameter_63 + + # pd_op.swish: (2x384x16x16xf32) <- (2x384x16x16xf32) + swish_41 = paddle._C_ops.swish(batch_norm__312) + + # builtin.combine: ([2x384x16x16xf32, 2x768x16x16xf32]) <- (2x384x16x16xf32, 2x768x16x16xf32) + combine_10 = [swish_41, swish_10] + + # pd_op.concat: (2x1152x16x16xf32) <- ([2x384x16x16xf32, 2x768x16x16xf32], 1xi32) + concat_9 = paddle._C_ops.concat(combine_10, full_7) + del combine_10 + + # pd_op.conv2d: (2x384x16x16xf32) <- (2x1152x16x16xf32, 384x1152x1x1xf32) + conv2d_53 = paddle._C_ops.conv2d( + concat_9, parameter_59, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_59 + + # pd_op.batch_norm_: (2x384x16x16xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x16x16xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__318, + batch_norm__319, + batch_norm__320, + batch_norm__321, + batch_norm__322, + batch_norm__323, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_53, + parameter_58, + parameter_57, + parameter_56, + parameter_55, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_55, parameter_56, parameter_57, parameter_58 + + # pd_op.swish: (2x384x16x16xf32) <- (2x384x16x16xf32) + swish_42 = paddle._C_ops.swish(batch_norm__318) + + # pd_op.conv2d: (2x384x16x16xf32) <- (2x1152x16x16xf32, 384x1152x1x1xf32) + conv2d_54 = paddle._C_ops.conv2d( + concat_9, parameter_54, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_54 + + # pd_op.batch_norm_: (2x384x16x16xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x16x16xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__324, + batch_norm__325, + batch_norm__326, + batch_norm__327, + batch_norm__328, + batch_norm__329, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_54, + parameter_53, + parameter_52, + parameter_51, + parameter_50, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_50, parameter_51, parameter_52, parameter_53 + + # pd_op.swish: (2x384x16x16xf32) <- (2x384x16x16xf32) + swish_43 = paddle._C_ops.swish(batch_norm__324) + + # pd_op.conv2d: (2x384x16x16xf32) <- (2x384x16x16xf32, 384x384x3x3xf32) + conv2d_55 = paddle._C_ops.conv2d( + swish_43, parameter_49, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_49 + + # pd_op.batch_norm_: (2x384x16x16xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x16x16xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__330, + batch_norm__331, + batch_norm__332, + batch_norm__333, + batch_norm__334, + batch_norm__335, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_55, + parameter_48, + parameter_47, + parameter_46, + parameter_45, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_45, parameter_46, parameter_47, parameter_48 + + # pd_op.swish: (2x384x16x16xf32) <- (2x384x16x16xf32) + swish_44 = paddle._C_ops.swish(batch_norm__330) + + # pd_op.conv2d: (2x384x16x16xf32) <- (2x384x16x16xf32, 384x384x3x3xf32) + conv2d_56 = paddle._C_ops.conv2d( + swish_44, parameter_44, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_44 + + # pd_op.batch_norm_: (2x384x16x16xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x16x16xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__336, + batch_norm__337, + batch_norm__338, + batch_norm__339, + batch_norm__340, + batch_norm__341, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_56, + parameter_43, + parameter_42, + parameter_41, + parameter_40, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_40, parameter_41, parameter_42, parameter_43 + + # pd_op.conv2d: (2x384x16x16xf32) <- (2x384x16x16xf32, 384x384x1x1xf32) + conv2d_57 = paddle._C_ops.conv2d( + swish_44, parameter_39, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_39 + + # pd_op.batch_norm_: (2x384x16x16xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x16x16xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__342, + batch_norm__343, + batch_norm__344, + batch_norm__345, + batch_norm__346, + batch_norm__347, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_57, + parameter_38, + parameter_37, + parameter_36, + parameter_35, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_35, parameter_36, parameter_37, parameter_38 + + # pd_op.add: (2x384x16x16xf32) <- (2x384x16x16xf32, 2x384x16x16xf32) + add_48 = paddle._C_ops.add(batch_norm__336, batch_norm__342) + + # pd_op.swish: (2x384x16x16xf32) <- (2x384x16x16xf32) + swish_45 = paddle._C_ops.swish(add_48) + + # pd_op.conv2d: (2x384x16x16xf32) <- (2x384x16x16xf32, 384x384x3x3xf32) + conv2d_58 = paddle._C_ops.conv2d( + swish_45, parameter_34, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_34 + + # pd_op.batch_norm_: (2x384x16x16xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x16x16xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__348, + batch_norm__349, + batch_norm__350, + batch_norm__351, + batch_norm__352, + batch_norm__353, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_58, + parameter_33, + parameter_32, + parameter_31, + parameter_30, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_30, parameter_31, parameter_32, parameter_33 + + # pd_op.swish: (2x384x16x16xf32) <- (2x384x16x16xf32) + swish_46 = paddle._C_ops.swish(batch_norm__348) + + # pd_op.conv2d: (2x384x16x16xf32) <- (2x384x16x16xf32, 384x384x3x3xf32) + conv2d_59 = paddle._C_ops.conv2d( + swish_46, parameter_29, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_29 + + # pd_op.batch_norm_: (2x384x16x16xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x16x16xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__354, + batch_norm__355, + batch_norm__356, + batch_norm__357, + batch_norm__358, + batch_norm__359, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_59, + parameter_28, + parameter_27, + parameter_26, + parameter_25, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_25, parameter_26, parameter_27, parameter_28 + + # pd_op.conv2d: (2x384x16x16xf32) <- (2x384x16x16xf32, 384x384x1x1xf32) + conv2d_60 = paddle._C_ops.conv2d( + swish_46, parameter_24, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_24 + + # pd_op.batch_norm_: (2x384x16x16xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x16x16xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__360, + batch_norm__361, + batch_norm__362, + batch_norm__363, + batch_norm__364, + batch_norm__365, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_60, + parameter_23, + parameter_22, + parameter_21, + parameter_20, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_20, parameter_21, parameter_22, parameter_23 + + # pd_op.add: (2x384x16x16xf32) <- (2x384x16x16xf32, 2x384x16x16xf32) + add_49 = paddle._C_ops.add(batch_norm__354, batch_norm__360) + + # pd_op.swish: (2x384x16x16xf32) <- (2x384x16x16xf32) + swish_47 = paddle._C_ops.swish(add_49) + + # pd_op.conv2d: (2x384x16x16xf32) <- (2x384x16x16xf32, 384x384x3x3xf32) + conv2d_61 = paddle._C_ops.conv2d( + swish_47, parameter_19, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_19 + + # pd_op.batch_norm_: (2x384x16x16xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x16x16xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__366, + batch_norm__367, + batch_norm__368, + batch_norm__369, + batch_norm__370, + batch_norm__371, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_61, + parameter_18, + parameter_17, + parameter_16, + parameter_15, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_15, parameter_16, parameter_17, parameter_18 + + # pd_op.swish: (2x384x16x16xf32) <- (2x384x16x16xf32) + swish_48 = paddle._C_ops.swish(batch_norm__366) + + # pd_op.conv2d: (2x384x16x16xf32) <- (2x384x16x16xf32, 384x384x3x3xf32) + conv2d_62 = paddle._C_ops.conv2d( + swish_48, parameter_14, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_14 + + # pd_op.batch_norm_: (2x384x16x16xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x16x16xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__372, + batch_norm__373, + batch_norm__374, + batch_norm__375, + batch_norm__376, + batch_norm__377, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_62, + parameter_13, + parameter_12, + parameter_11, + parameter_10, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_10, parameter_11, parameter_12, parameter_13 + + # pd_op.conv2d: (2x384x16x16xf32) <- (2x384x16x16xf32, 384x384x1x1xf32) + conv2d_63 = paddle._C_ops.conv2d( + swish_48, parameter_9, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_9 + + # pd_op.batch_norm_: (2x384x16x16xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x16x16xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__378, + batch_norm__379, + batch_norm__380, + batch_norm__381, + batch_norm__382, + batch_norm__383, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_63, + parameter_8, + parameter_7, + parameter_6, + parameter_5, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_5, parameter_6, parameter_7, parameter_8 + + # pd_op.add: (2x384x16x16xf32) <- (2x384x16x16xf32, 2x384x16x16xf32) + add_50 = paddle._C_ops.add(batch_norm__372, batch_norm__378) + + # pd_op.swish: (2x384x16x16xf32) <- (2x384x16x16xf32) + swish_49 = paddle._C_ops.swish(add_50) + + # builtin.combine: ([2x384x16x16xf32, 2x384x16x16xf32]) <- (2x384x16x16xf32, 2x384x16x16xf32) + combine_11 = [swish_42, swish_49] + + # pd_op.concat: (2x768x16x16xf32) <- ([2x384x16x16xf32, 2x384x16x16xf32], 1xi32) + concat_10 = paddle._C_ops.concat(combine_11, full_7) + del combine_11, full_7 + + # pd_op.conv2d: (2x768x16x16xf32) <- (2x768x16x16xf32, 768x768x1x1xf32) + conv2d_64 = paddle._C_ops.conv2d( + concat_10, parameter_4, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_4 + + # pd_op.batch_norm_: (2x768x16x16xf32, 768xf32, 768xf32, 768xf32, 768xf32, -1xui8) <- (2x768x16x16xf32, 768xf32, 768xf32, 768xf32, 768xf32) + ( + batch_norm__384, + batch_norm__385, + batch_norm__386, + batch_norm__387, + batch_norm__388, + batch_norm__389, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_64, + parameter_3, + parameter_2, + parameter_1, + parameter_0, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_0, parameter_1, parameter_2, parameter_3 + + # pd_op.swish: (2x768x16x16xf32) <- (2x768x16x16xf32) + swish_0 = paddle._C_ops.swish(batch_norm__384) + del ( + add_0, + add_1, + add_10, + add_11, + add_12, + add_14, + add_15, + add_17, + add_18, + add_19, + add_2, + add_20, + add_21, + add_23, + add_24, + add_26, + add_27, + add_28, + add_29, + add_3, + add_30, + add_32, + add_33, + add_35, + add_36, + add_37, + add_38, + add_39, + add_40, + add_41, + add_42, + add_43, + add_44, + add_45, + add_46, + add_47, + add_48, + add_49, + add_5, + add_50, + add_6, + add_8, + add_9, + assign_0, + assign_1, + assign_10, + assign_11, + assign_12, + assign_13, + assign_14, + assign_15, + assign_16, + assign_17, + assign_18, + assign_19, + assign_2, + assign_20, + assign_21, + assign_22, + assign_23, + assign_24, + assign_25, + assign_26, + assign_27, + assign_28, + assign_29, + assign_3, + assign_30, + assign_31, + assign_32, + assign_33, + assign_34, + assign_35, + assign_36, + assign_37, + assign_38, + assign_39, + assign_4, + assign_40, + assign_41, + assign_42, + assign_43, + assign_44, + assign_45, + assign_46, + assign_47, + assign_48, + assign_49, + assign_5, + assign_50, + assign_51, + assign_52, + assign_53, + assign_54, + assign_55, + assign_56, + assign_57, + assign_58, + assign_59, + assign_6, + assign_60, + assign_61, + assign_62, + assign_63, + assign_64, + assign_65, + assign_66, + assign_67, + assign_68, + assign_69, + assign_7, + assign_70, + assign_71, + assign_72, + assign_8, + assign_9, + batch_norm__0, + batch_norm__1, + batch_norm__10, + batch_norm__100, + batch_norm__101, + batch_norm__102, + batch_norm__103, + batch_norm__104, + batch_norm__105, + batch_norm__106, + batch_norm__107, + batch_norm__108, + batch_norm__109, + batch_norm__11, + batch_norm__110, + batch_norm__111, + batch_norm__112, + batch_norm__113, + batch_norm__114, + batch_norm__115, + batch_norm__116, + batch_norm__117, + batch_norm__118, + batch_norm__119, + batch_norm__12, + batch_norm__120, + batch_norm__121, + batch_norm__122, + batch_norm__123, + batch_norm__124, + batch_norm__125, + batch_norm__126, + batch_norm__127, + batch_norm__128, + batch_norm__129, + batch_norm__13, + batch_norm__130, + batch_norm__131, + batch_norm__132, + batch_norm__133, + batch_norm__134, + batch_norm__135, + batch_norm__136, + batch_norm__137, + batch_norm__138, + batch_norm__139, + batch_norm__14, + batch_norm__140, + batch_norm__141, + batch_norm__142, + batch_norm__143, + batch_norm__144, + batch_norm__145, + batch_norm__146, + batch_norm__147, + batch_norm__148, + batch_norm__149, + batch_norm__15, + batch_norm__150, + batch_norm__151, + batch_norm__152, + batch_norm__153, + batch_norm__154, + batch_norm__155, + batch_norm__156, + batch_norm__157, + batch_norm__158, + batch_norm__159, + batch_norm__16, + batch_norm__160, + batch_norm__161, + batch_norm__162, + batch_norm__163, + batch_norm__164, + batch_norm__165, + batch_norm__166, + batch_norm__167, + batch_norm__168, + batch_norm__169, + batch_norm__17, + batch_norm__170, + batch_norm__171, + batch_norm__172, + batch_norm__173, + batch_norm__174, + batch_norm__175, + batch_norm__176, + batch_norm__177, + batch_norm__178, + batch_norm__179, + batch_norm__18, + batch_norm__180, + batch_norm__181, + batch_norm__182, + batch_norm__183, + batch_norm__184, + batch_norm__185, + batch_norm__186, + batch_norm__187, + batch_norm__188, + batch_norm__189, + batch_norm__19, + batch_norm__190, + batch_norm__191, + batch_norm__192, + batch_norm__193, + batch_norm__194, + batch_norm__195, + batch_norm__196, + batch_norm__197, + batch_norm__198, + batch_norm__199, + batch_norm__2, + batch_norm__20, + batch_norm__200, + batch_norm__201, + batch_norm__202, + batch_norm__203, + batch_norm__204, + batch_norm__205, + batch_norm__206, + batch_norm__207, + batch_norm__208, + batch_norm__209, + batch_norm__21, + batch_norm__210, + batch_norm__211, + batch_norm__212, + batch_norm__213, + batch_norm__214, + batch_norm__215, + batch_norm__216, + batch_norm__217, + batch_norm__218, + batch_norm__219, + batch_norm__22, + batch_norm__220, + batch_norm__221, + batch_norm__222, + batch_norm__223, + batch_norm__224, + batch_norm__225, + batch_norm__226, + batch_norm__227, + batch_norm__228, + batch_norm__229, + batch_norm__23, + batch_norm__230, + batch_norm__231, + batch_norm__232, + batch_norm__233, + batch_norm__234, + batch_norm__235, + batch_norm__236, + batch_norm__237, + batch_norm__238, + batch_norm__239, + batch_norm__24, + batch_norm__240, + batch_norm__241, + batch_norm__242, + batch_norm__243, + batch_norm__244, + batch_norm__245, + batch_norm__246, + batch_norm__247, + batch_norm__248, + batch_norm__249, + batch_norm__25, + batch_norm__250, + batch_norm__251, + batch_norm__252, + batch_norm__253, + batch_norm__254, + batch_norm__255, + batch_norm__256, + batch_norm__257, + batch_norm__258, + batch_norm__259, + batch_norm__26, + batch_norm__260, + batch_norm__261, + batch_norm__262, + batch_norm__263, + batch_norm__264, + batch_norm__265, + batch_norm__266, + batch_norm__267, + batch_norm__268, + batch_norm__269, + batch_norm__27, + batch_norm__270, + batch_norm__271, + batch_norm__272, + batch_norm__273, + batch_norm__274, + batch_norm__275, + batch_norm__276, + batch_norm__277, + batch_norm__278, + batch_norm__279, + batch_norm__28, + batch_norm__280, + batch_norm__281, + batch_norm__282, + batch_norm__283, + batch_norm__284, + batch_norm__285, + batch_norm__286, + batch_norm__287, + batch_norm__288, + batch_norm__289, + batch_norm__29, + batch_norm__290, + batch_norm__291, + batch_norm__292, + batch_norm__293, + batch_norm__294, + batch_norm__295, + batch_norm__296, + batch_norm__297, + batch_norm__298, + batch_norm__299, + batch_norm__3, + batch_norm__30, + batch_norm__300, + batch_norm__301, + batch_norm__302, + batch_norm__303, + batch_norm__304, + batch_norm__305, + batch_norm__306, + batch_norm__307, + batch_norm__308, + batch_norm__309, + batch_norm__31, + batch_norm__310, + batch_norm__311, + batch_norm__312, + batch_norm__313, + batch_norm__314, + batch_norm__315, + batch_norm__316, + batch_norm__317, + batch_norm__318, + batch_norm__319, + batch_norm__32, + batch_norm__320, + batch_norm__321, + batch_norm__322, + batch_norm__323, + batch_norm__324, + batch_norm__325, + batch_norm__326, + batch_norm__327, + batch_norm__328, + batch_norm__329, + batch_norm__33, + batch_norm__330, + batch_norm__331, + batch_norm__332, + batch_norm__333, + batch_norm__334, + batch_norm__335, + batch_norm__336, + batch_norm__337, + batch_norm__338, + batch_norm__339, + batch_norm__34, + batch_norm__340, + batch_norm__341, + batch_norm__342, + batch_norm__343, + batch_norm__344, + batch_norm__345, + batch_norm__346, + batch_norm__347, + batch_norm__348, + batch_norm__349, + batch_norm__35, + batch_norm__350, + batch_norm__351, + batch_norm__352, + batch_norm__353, + batch_norm__354, + batch_norm__355, + batch_norm__356, + batch_norm__357, + batch_norm__358, + batch_norm__359, + batch_norm__36, + batch_norm__360, + batch_norm__361, + batch_norm__362, + batch_norm__363, + batch_norm__364, + batch_norm__365, + batch_norm__366, + batch_norm__367, + batch_norm__368, + batch_norm__369, + batch_norm__37, + batch_norm__370, + batch_norm__371, + batch_norm__372, + batch_norm__373, + batch_norm__374, + batch_norm__375, + batch_norm__376, + batch_norm__377, + batch_norm__378, + batch_norm__379, + batch_norm__38, + batch_norm__380, + batch_norm__381, + batch_norm__382, + batch_norm__383, + batch_norm__384, + batch_norm__385, + batch_norm__386, + batch_norm__387, + batch_norm__388, + batch_norm__389, + batch_norm__39, + batch_norm__4, + batch_norm__40, + batch_norm__41, + batch_norm__42, + batch_norm__43, + batch_norm__44, + batch_norm__45, + batch_norm__46, + batch_norm__47, + batch_norm__48, + batch_norm__49, + batch_norm__5, + batch_norm__50, + batch_norm__51, + batch_norm__52, + batch_norm__53, + batch_norm__54, + batch_norm__55, + batch_norm__56, + batch_norm__57, + batch_norm__58, + batch_norm__59, + batch_norm__6, + batch_norm__60, + batch_norm__61, + batch_norm__62, + batch_norm__63, + batch_norm__64, + batch_norm__65, + batch_norm__66, + batch_norm__67, + batch_norm__68, + batch_norm__69, + batch_norm__7, + batch_norm__70, + batch_norm__71, + batch_norm__72, + batch_norm__73, + batch_norm__74, + batch_norm__75, + batch_norm__76, + batch_norm__77, + batch_norm__78, + batch_norm__79, + batch_norm__8, + batch_norm__80, + batch_norm__81, + batch_norm__82, + batch_norm__83, + batch_norm__84, + batch_norm__85, + batch_norm__86, + batch_norm__87, + batch_norm__88, + batch_norm__89, + batch_norm__9, + batch_norm__90, + batch_norm__91, + batch_norm__92, + batch_norm__93, + batch_norm__94, + batch_norm__95, + batch_norm__96, + batch_norm__97, + batch_norm__98, + batch_norm__99, + concat_1, + concat_10, + concat_2, + concat_3, + concat_4, + concat_5, + concat_6, + concat_7, + concat_8, + concat_9, + conv2d_0, + conv2d_1, + conv2d_10, + conv2d_11, + conv2d_12, + conv2d_13, + conv2d_14, + conv2d_15, + conv2d_16, + conv2d_17, + conv2d_18, + conv2d_19, + conv2d_2, + conv2d_20, + conv2d_21, + conv2d_22, + conv2d_23, + conv2d_24, + conv2d_25, + conv2d_26, + conv2d_27, + conv2d_28, + conv2d_29, + conv2d_3, + conv2d_30, + conv2d_31, + conv2d_32, + conv2d_33, + conv2d_34, + conv2d_35, + conv2d_36, + conv2d_37, + conv2d_38, + conv2d_39, + conv2d_4, + conv2d_40, + conv2d_41, + conv2d_42, + conv2d_43, + conv2d_44, + conv2d_45, + conv2d_46, + conv2d_47, + conv2d_48, + conv2d_49, + conv2d_5, + conv2d_50, + conv2d_51, + conv2d_52, + conv2d_53, + conv2d_54, + conv2d_55, + conv2d_56, + conv2d_57, + conv2d_58, + conv2d_59, + conv2d_6, + conv2d_60, + conv2d_61, + conv2d_62, + conv2d_63, + conv2d_64, + conv2d_7, + conv2d_8, + conv2d_9, + dropout_0, + dropout_1, + dropout_10, + dropout_11, + dropout_12, + dropout_13, + dropout_14, + dropout_15, + dropout_16, + dropout_17, + dropout_18, + dropout_19, + dropout_2, + dropout_20, + dropout_21, + dropout_22, + dropout_23, + dropout_24, + dropout_25, + dropout_26, + dropout_27, + dropout_28, + dropout_29, + dropout_3, + dropout_30, + dropout_31, + dropout_4, + dropout_5, + dropout_6, + dropout_7, + dropout_8, + dropout_9, + full_8, + full_9, + full_int_array_10, + full_int_array_2, + full_int_array_4, + full_int_array_5, + full_int_array_8, + full_int_array_9, + layer_norm_0, + layer_norm_1, + layer_norm_10, + layer_norm_11, + layer_norm_12, + layer_norm_13, + layer_norm_14, + layer_norm_15, + layer_norm_16, + layer_norm_17, + layer_norm_18, + layer_norm_19, + layer_norm_2, + layer_norm_20, + layer_norm_22, + layer_norm_23, + layer_norm_3, + layer_norm_4, + layer_norm_5, + layer_norm_6, + layer_norm_7, + layer_norm_8, + layer_norm_9, + matmul_10, + matmul_11, + matmul_12, + matmul_15, + matmul_16, + matmul_17, + matmul_18, + matmul_19, + matmul_2, + matmul_20, + matmul_23, + matmul_24, + matmul_25, + matmul_26, + matmul_27, + matmul_28, + matmul_3, + matmul_31, + matmul_32, + matmul_33, + matmul_4, + matmul_7, + matmul_8, + matmul_9, + nearest_interp_0, + nearest_interp_1, + pool2d_0, + pool2d_1, + pool2d_2, + reshape_11, + reshape_15, + reshape_16, + reshape_3, + reshape_7, + slice_0, + slice_1, + slice_10, + slice_11, + slice_12, + slice_13, + slice_14, + slice_15, + slice_16, + slice_17, + slice_18, + slice_19, + slice_2, + slice_20, + slice_21, + slice_22, + slice_23, + slice_3, + slice_4, + slice_5, + slice_6, + slice_7, + slice_8, + slice_9, + softmax_0, + softmax_1, + softmax_2, + softmax_3, + swish_1, + swish_10, + swish_11, + swish_12, + swish_13, + swish_14, + swish_15, + swish_16, + swish_17, + swish_18, + swish_19, + swish_2, + swish_20, + swish_21, + swish_22, + swish_23, + swish_24, + swish_25, + swish_26, + swish_27, + swish_28, + swish_29, + swish_3, + swish_30, + swish_31, + swish_32, + swish_33, + swish_34, + swish_35, + swish_36, + swish_37, + swish_38, + swish_39, + swish_4, + swish_40, + swish_41, + swish_42, + swish_43, + swish_44, + swish_45, + swish_46, + swish_47, + swish_48, + swish_49, + swish_5, + swish_6, + swish_7, + swish_8, + swish_9, + transpose_0, + transpose_1, + transpose_10, + transpose_11, + transpose_12, + transpose_13, + transpose_14, + transpose_15, + transpose_16, + transpose_17, + transpose_2, + transpose_3, + transpose_4, + transpose_5, + transpose_6, + transpose_7, + transpose_8, + transpose_9, + unsqueeze_3, + ) + + return swish_0 diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/subgraph_0/weight_meta.py b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/subgraph_0/weight_meta.py new file mode 100644 index 000000000..d2d4fb565 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/subgraph_0/weight_meta.py @@ -0,0 +1,4013 @@ +class Program_weight_tensor_parameter_0: + name = "parameter_0" + shape = [768] + dtype = "float32" + min_val = float("-0.17593") + max_val = float("0.21086") + mean = float("0.0834788") + std = float("0.0566121") + data = None + + +class Program_weight_tensor_parameter_1: + name = "parameter_1" + shape = [768] + dtype = "float32" + min_val = float("0.939951") + max_val = float("1.29826") + mean = float("1.064") + std = float("0.031232") + data = None + + +class Program_weight_tensor_parameter_2: + name = "parameter_2" + shape = [768] + dtype = "float32" + min_val = float("0.00128034") + max_val = float("0.0459973") + mean = float("0.0061553") + std = float("0.00386394") + data = None + + +class Program_weight_tensor_parameter_3: + name = "parameter_3" + shape = [768] + dtype = "float32" + min_val = float("-0.129403") + max_val = float("0.0511612") + mean = float("-0.0268484") + std = float("0.0273184") + data = None + + +class Program_weight_tensor_parameter_4: + name = "parameter_4" + shape = [768, 768, 1, 1] + dtype = "float32" + min_val = float("-0.0515617") + max_val = float("0.0379045") + mean = float("-0.000141602") + std = float("0.00240107") + data = None + + +class Program_weight_tensor_parameter_5: + name = "parameter_5" + shape = [384] + dtype = "float32" + min_val = float("-0.141652") + max_val = float("0.0305712") + mean = float("-0.0187927") + std = float("0.0234487") + data = None + + +class Program_weight_tensor_parameter_6: + name = "parameter_6" + shape = [384] + dtype = "float32" + min_val = float("0.945803") + max_val = float("1.04446") + mean = float("0.986675") + std = float("0.0105807") + data = None + + +class Program_weight_tensor_parameter_7: + name = "parameter_7" + shape = [384] + dtype = "float32" + min_val = float("0.000826368") + max_val = float("0.0165504") + mean = float("0.0042159") + std = float("0.00271334") + data = None + + +class Program_weight_tensor_parameter_8: + name = "parameter_8" + shape = [384] + dtype = "float32" + min_val = float("-0.0550643") + max_val = float("0.0591576") + mean = float("0.00270637") + std = float("0.0215795") + data = None + + +class Program_weight_tensor_parameter_9: + name = "parameter_9" + shape = [384, 384, 1, 1] + dtype = "float32" + min_val = float("-0.0308378") + max_val = float("0.0199196") + mean = float("2.09116e-05") + std = float("0.00184746") + data = None + + +class Program_weight_tensor_parameter_10: + name = "parameter_10" + shape = [384] + dtype = "float32" + min_val = float("-0.141652") + max_val = float("0.0305712") + mean = float("-0.0187927") + std = float("0.0234487") + data = None + + +class Program_weight_tensor_parameter_11: + name = "parameter_11" + shape = [384] + dtype = "float32" + min_val = float("0.968047") + max_val = float("1.13059") + mean = float("1.01542") + std = float("0.0171839") + data = None + + +class Program_weight_tensor_parameter_12: + name = "parameter_12" + shape = [384] + dtype = "float32" + min_val = float("0.00227541") + max_val = float("0.0355423") + mean = float("0.00704835") + std = float("0.00406077") + data = None + + +class Program_weight_tensor_parameter_13: + name = "parameter_13" + shape = [384] + dtype = "float32" + min_val = float("-0.17319") + max_val = float("0.125086") + mean = float("-0.0400023") + std = float("0.0316758") + data = None + + +class Program_weight_tensor_parameter_14: + name = "parameter_14" + shape = [384, 384, 3, 3] + dtype = "float32" + min_val = float("-0.0283361") + max_val = float("0.0330584") + mean = float("-7.38122e-05") + std = float("0.00125417") + data = None + + +class Program_weight_tensor_parameter_15: + name = "parameter_15" + shape = [384] + dtype = "float32" + min_val = float("-0.170186") + max_val = float("0.0209453") + mean = float("-0.0348788") + std = float("0.027926") + data = None + + +class Program_weight_tensor_parameter_16: + name = "parameter_16" + shape = [384] + dtype = "float32" + min_val = float("0.975256") + max_val = float("1.12591") + mean = float("1.01501") + std = float("0.0240755") + data = None + + +class Program_weight_tensor_parameter_17: + name = "parameter_17" + shape = [384] + dtype = "float32" + min_val = float("0.00655882") + max_val = float("0.183112") + mean = float("0.0252295") + std = float("0.0163531") + data = None + + +class Program_weight_tensor_parameter_18: + name = "parameter_18" + shape = [384] + dtype = "float32" + min_val = float("-0.246438") + max_val = float("0.418065") + mean = float("-0.0426188") + std = float("0.0522812") + data = None + + +class Program_weight_tensor_parameter_19: + name = "parameter_19" + shape = [384, 384, 3, 3] + dtype = "float32" + min_val = float("-0.0311732") + max_val = float("0.0503283") + mean = float("-6.05423e-05") + std = float("0.00141653") + data = None + + +class Program_weight_tensor_parameter_20: + name = "parameter_20" + shape = [384] + dtype = "float32" + min_val = float("-0.105187") + max_val = float("0.0129825") + mean = float("-0.0357886") + std = float("0.0193112") + data = None + + +class Program_weight_tensor_parameter_21: + name = "parameter_21" + shape = [384] + dtype = "float32" + min_val = float("0.945566") + max_val = float("1.0451") + mean = float("0.98866") + std = float("0.0098489") + data = None + + +class Program_weight_tensor_parameter_22: + name = "parameter_22" + shape = [384] + dtype = "float32" + min_val = float("0.00064691") + max_val = float("0.0205807") + mean = float("0.00349649") + std = float("0.00216562") + data = None + + +class Program_weight_tensor_parameter_23: + name = "parameter_23" + shape = [384] + dtype = "float32" + min_val = float("-0.0783438") + max_val = float("0.0410111") + mean = float("-0.00212154") + std = float("0.0160542") + data = None + + +class Program_weight_tensor_parameter_24: + name = "parameter_24" + shape = [384, 384, 1, 1] + dtype = "float32" + min_val = float("-0.0260461") + max_val = float("0.024809") + mean = float("-5.13316e-05") + std = float("0.00195167") + data = None + + +class Program_weight_tensor_parameter_25: + name = "parameter_25" + shape = [384] + dtype = "float32" + min_val = float("-0.105187") + max_val = float("0.0129825") + mean = float("-0.0357886") + std = float("0.0193112") + data = None + + +class Program_weight_tensor_parameter_26: + name = "parameter_26" + shape = [384] + dtype = "float32" + min_val = float("0.959553") + max_val = float("1.10507") + mean = float("1.01607") + std = float("0.017749") + data = None + + +class Program_weight_tensor_parameter_27: + name = "parameter_27" + shape = [384] + dtype = "float32" + min_val = float("0.00246563") + max_val = float("0.0233788") + mean = float("0.00753233") + std = float("0.0032878") + data = None + + +class Program_weight_tensor_parameter_28: + name = "parameter_28" + shape = [384] + dtype = "float32" + min_val = float("-0.160408") + max_val = float("0.247429") + mean = float("-0.0455298") + std = float("0.0367156") + data = None + + +class Program_weight_tensor_parameter_29: + name = "parameter_29" + shape = [384, 384, 3, 3] + dtype = "float32" + min_val = float("-0.0342413") + max_val = float("0.0490689") + mean = float("-8.25369e-05") + std = float("0.00126683") + data = None + + +class Program_weight_tensor_parameter_30: + name = "parameter_30" + shape = [384] + dtype = "float32" + min_val = float("-0.0896544") + max_val = float("0.0192772") + mean = float("-0.036069") + std = float("0.0194634") + data = None + + +class Program_weight_tensor_parameter_31: + name = "parameter_31" + shape = [384] + dtype = "float32" + min_val = float("0.933175") + max_val = float("1.1146") + mean = float("1.01167") + std = float("0.0265813") + data = None + + +class Program_weight_tensor_parameter_32: + name = "parameter_32" + shape = [384] + dtype = "float32" + min_val = float("0.00508576") + max_val = float("0.0630274") + mean = float("0.0178223") + std = float("0.00924354") + data = None + + +class Program_weight_tensor_parameter_33: + name = "parameter_33" + shape = [384] + dtype = "float32" + min_val = float("-0.188576") + max_val = float("0.102106") + mean = float("-0.024539") + std = float("0.0481108") + data = None + + +class Program_weight_tensor_parameter_34: + name = "parameter_34" + shape = [384, 384, 3, 3] + dtype = "float32" + min_val = float("-0.03891") + max_val = float("0.0474712") + mean = float("-5.22659e-05") + std = float("0.00144712") + data = None + + +class Program_weight_tensor_parameter_35: + name = "parameter_35" + shape = [384] + dtype = "float32" + min_val = float("-0.116304") + max_val = float("0.0162109") + mean = float("-0.0373543") + std = float("0.0201432") + data = None + + +class Program_weight_tensor_parameter_36: + name = "parameter_36" + shape = [384] + dtype = "float32" + min_val = float("0.929317") + max_val = float("1.02782") + mean = float("0.987068") + std = float("0.0110352") + data = None + + +class Program_weight_tensor_parameter_37: + name = "parameter_37" + shape = [384] + dtype = "float32" + min_val = float("0.00121281") + max_val = float("0.00972727") + mean = float("0.00387918") + std = float("0.0014146") + data = None + + +class Program_weight_tensor_parameter_38: + name = "parameter_38" + shape = [384] + dtype = "float32" + min_val = float("-0.0546195") + max_val = float("0.0373569") + mean = float("-0.00766286") + std = float("0.0127826") + data = None + + +class Program_weight_tensor_parameter_39: + name = "parameter_39" + shape = [384, 384, 1, 1] + dtype = "float32" + min_val = float("-0.0370487") + max_val = float("0.0266811") + mean = float("-0.000137537") + std = float("0.00195837") + data = None + + +class Program_weight_tensor_parameter_40: + name = "parameter_40" + shape = [384] + dtype = "float32" + min_val = float("-0.116304") + max_val = float("0.0162109") + mean = float("-0.0373543") + std = float("0.0201432") + data = None + + +class Program_weight_tensor_parameter_41: + name = "parameter_41" + shape = [384] + dtype = "float32" + min_val = float("0.98123") + max_val = float("1.10689") + mean = float("1.01832") + std = float("0.0222072") + data = None + + +class Program_weight_tensor_parameter_42: + name = "parameter_42" + shape = [384] + dtype = "float32" + min_val = float("0.00431935") + max_val = float("0.027251") + mean = float("0.0103621") + std = float("0.00427527") + data = None + + +class Program_weight_tensor_parameter_43: + name = "parameter_43" + shape = [384] + dtype = "float32" + min_val = float("-0.162702") + max_val = float("0.0987408") + mean = float("-0.022229") + std = float("0.0329557") + data = None + + +class Program_weight_tensor_parameter_44: + name = "parameter_44" + shape = [384, 384, 3, 3] + dtype = "float32" + min_val = float("-0.0342559") + max_val = float("0.0596349") + mean = float("-4.36875e-05") + std = float("0.00131956") + data = None + + +class Program_weight_tensor_parameter_45: + name = "parameter_45" + shape = [384] + dtype = "float32" + min_val = float("-0.10708") + max_val = float("0.0239016") + mean = float("-0.0375156") + std = float("0.0214475") + data = None + + +class Program_weight_tensor_parameter_46: + name = "parameter_46" + shape = [384] + dtype = "float32" + min_val = float("0.944782") + max_val = float("1.11463") + mean = float("1.01186") + std = float("0.0277861") + data = None + + +class Program_weight_tensor_parameter_47: + name = "parameter_47" + shape = [384] + dtype = "float32" + min_val = float("0.00517106") + max_val = float("0.0641467") + mean = float("0.0132815") + std = float("0.00639881") + data = None + + +class Program_weight_tensor_parameter_48: + name = "parameter_48" + shape = [384] + dtype = "float32" + min_val = float("-0.147393") + max_val = float("0.123318") + mean = float("-0.0427475") + std = float("0.0467635") + data = None + + +class Program_weight_tensor_parameter_49: + name = "parameter_49" + shape = [384, 384, 3, 3] + dtype = "float32" + min_val = float("-0.0265762") + max_val = float("0.0412332") + mean = float("-7.49204e-05") + std = float("0.00147166") + data = None + + +class Program_weight_tensor_parameter_50: + name = "parameter_50" + shape = [384] + dtype = "float32" + min_val = float("-0.106797") + max_val = float("0.0466791") + mean = float("-0.0263049") + std = float("0.0154084") + data = None + + +class Program_weight_tensor_parameter_51: + name = "parameter_51" + shape = [384] + dtype = "float32" + min_val = float("0.973685") + max_val = float("1.08651") + mean = float("1.00904") + std = float("0.0171201") + data = None + + +class Program_weight_tensor_parameter_52: + name = "parameter_52" + shape = [384] + dtype = "float32" + min_val = float("0.00212031") + max_val = float("0.016262") + mean = float("0.00476274") + std = float("0.00178448") + data = None + + +class Program_weight_tensor_parameter_53: + name = "parameter_53" + shape = [384] + dtype = "float32" + min_val = float("-0.0942358") + max_val = float("0.0732549") + mean = float("-0.0170946") + std = float("0.025708") + data = None + + +class Program_weight_tensor_parameter_54: + name = "parameter_54" + shape = [384, 1152, 1, 1] + dtype = "float32" + min_val = float("-0.0600919") + max_val = float("0.0698524") + mean = float("-7.84512e-05") + std = float("0.00221114") + data = None + + +class Program_weight_tensor_parameter_55: + name = "parameter_55" + shape = [384] + dtype = "float32" + min_val = float("-0.0425269") + max_val = float("0.0160945") + mean = float("-0.00899787") + std = float("0.0084152") + data = None + + +class Program_weight_tensor_parameter_56: + name = "parameter_56" + shape = [384] + dtype = "float32" + min_val = float("0.959381") + max_val = float("1.05138") + mean = float("1.0079") + std = float("0.0115872") + data = None + + +class Program_weight_tensor_parameter_57: + name = "parameter_57" + shape = [384] + dtype = "float32" + min_val = float("0.00163009") + max_val = float("0.0293019") + mean = float("0.00413516") + std = float("0.00189705") + data = None + + +class Program_weight_tensor_parameter_58: + name = "parameter_58" + shape = [384] + dtype = "float32" + min_val = float("-0.101541") + max_val = float("0.0982145") + mean = float("-0.0218958") + std = float("0.0241032") + data = None + + +class Program_weight_tensor_parameter_59: + name = "parameter_59" + shape = [384, 1152, 1, 1] + dtype = "float32" + min_val = float("-0.0252361") + max_val = float("0.0409913") + mean = float("-0.000104762") + std = float("0.00201566") + data = None + + +class Program_weight_tensor_parameter_60: + name = "parameter_60" + shape = [384] + dtype = "float32" + min_val = float("-0.0530204") + max_val = float("0.00596346") + mean = float("-0.0166175") + std = float("0.00987677") + data = None + + +class Program_weight_tensor_parameter_61: + name = "parameter_61" + shape = [384] + dtype = "float32" + min_val = float("0.988638") + max_val = float("1.10406") + mean = float("1.0196") + std = float("0.0169013") + data = None + + +class Program_weight_tensor_parameter_62: + name = "parameter_62" + shape = [384] + dtype = "float32" + min_val = float("0.00406037") + max_val = float("0.0469132") + mean = float("0.0125359") + std = float("0.00694272") + data = None + + +class Program_weight_tensor_parameter_63: + name = "parameter_63" + shape = [384] + dtype = "float32" + min_val = float("-0.36556") + max_val = float("0.200382") + mean = float("-0.0416361") + std = float("0.0643306") + data = None + + +class Program_weight_tensor_parameter_64: + name = "parameter_64" + shape = [384, 384, 3, 3] + dtype = "float32" + min_val = float("-0.0201862") + max_val = float("0.031451") + mean = float("-3.02081e-05") + std = float("0.00114789") + data = None + + +class Program_weight_tensor_parameter_65: + name = "parameter_65" + shape = [384] + dtype = "float32" + min_val = float("-0.222757") + max_val = float("0.492149") + mean = float("0.21719") + std = float("0.124238") + data = None + + +class Program_weight_tensor_parameter_66: + name = "parameter_66" + shape = [384] + dtype = "float32" + min_val = float("0.919224") + max_val = float("1.48061") + mean = float("1.14128") + std = float("0.0737788") + data = None + + +class Program_weight_tensor_parameter_67: + name = "parameter_67" + shape = [384] + dtype = "float32" + min_val = float("0.00376869") + max_val = float("0.0586683") + mean = float("0.0116254") + std = float("0.00583103") + data = None + + +class Program_weight_tensor_parameter_68: + name = "parameter_68" + shape = [384] + dtype = "float32" + min_val = float("-0.155941") + max_val = float("0.0846452") + mean = float("-0.0284778") + std = float("0.0330229") + data = None + + +class Program_weight_tensor_parameter_69: + name = "parameter_69" + shape = [384, 384, 1, 1] + dtype = "float32" + min_val = float("-0.0888912") + max_val = float("0.0965183") + mean = float("-0.000337013") + std = float("0.00511324") + data = None + + +class Program_weight_tensor_parameter_70: + name = "parameter_70" + shape = [192] + dtype = "float32" + min_val = float("-0.166128") + max_val = float("0.0467049") + mean = float("-0.0250411") + std = float("0.0394649") + data = None + + +class Program_weight_tensor_parameter_71: + name = "parameter_71" + shape = [192] + dtype = "float32" + min_val = float("0.840981") + max_val = float("1.05113") + mean = float("0.972821") + std = float("0.0237636") + data = None + + +class Program_weight_tensor_parameter_72: + name = "parameter_72" + shape = [192] + dtype = "float32" + min_val = float("0.00150294") + max_val = float("0.0248346") + mean = float("0.00510081") + std = float("0.00302799") + data = None + + +class Program_weight_tensor_parameter_73: + name = "parameter_73" + shape = [192] + dtype = "float32" + min_val = float("-0.0627516") + max_val = float("0.0799353") + mean = float("-0.00481074") + std = float("0.0191172") + data = None + + +class Program_weight_tensor_parameter_74: + name = "parameter_74" + shape = [192, 192, 1, 1] + dtype = "float32" + min_val = float("-0.0448658") + max_val = float("0.0336826") + mean = float("-0.00015715") + std = float("0.00377127") + data = None + + +class Program_weight_tensor_parameter_75: + name = "parameter_75" + shape = [192] + dtype = "float32" + min_val = float("-0.166128") + max_val = float("0.0467049") + mean = float("-0.0250411") + std = float("0.0394649") + data = None + + +class Program_weight_tensor_parameter_76: + name = "parameter_76" + shape = [192] + dtype = "float32" + min_val = float("0.729211") + max_val = float("1.12261") + mean = float("1.02194") + std = float("0.0372634") + data = None + + +class Program_weight_tensor_parameter_77: + name = "parameter_77" + shape = [192] + dtype = "float32" + min_val = float("0.00576289") + max_val = float("0.0748888") + mean = float("0.0166601") + std = float("0.00854005") + data = None + + +class Program_weight_tensor_parameter_78: + name = "parameter_78" + shape = [192] + dtype = "float32" + min_val = float("-0.19336") + max_val = float("0.0877759") + mean = float("-0.0413079") + std = float("0.0432136") + data = None + + +class Program_weight_tensor_parameter_79: + name = "parameter_79" + shape = [192, 192, 3, 3] + dtype = "float32" + min_val = float("-0.0346467") + max_val = float("0.0452672") + mean = float("-0.000137324") + std = float("0.00244934") + data = None + + +class Program_weight_tensor_parameter_80: + name = "parameter_80" + shape = [192] + dtype = "float32" + min_val = float("-0.19142") + max_val = float("0.0441436") + mean = float("-0.058024") + std = float("0.0490546") + data = None + + +class Program_weight_tensor_parameter_81: + name = "parameter_81" + shape = [192] + dtype = "float32" + min_val = float("0.897164") + max_val = float("1.18718") + mean = float("1.01553") + std = float("0.0484562") + data = None + + +class Program_weight_tensor_parameter_82: + name = "parameter_82" + shape = [192] + dtype = "float32" + min_val = float("0.0129814") + max_val = float("0.153703") + mean = float("0.0348507") + std = float("0.018234") + data = None + + +class Program_weight_tensor_parameter_83: + name = "parameter_83" + shape = [192] + dtype = "float32" + min_val = float("-0.330032") + max_val = float("0.47282") + mean = float("-0.0418917") + std = float("0.0610293") + data = None + + +class Program_weight_tensor_parameter_84: + name = "parameter_84" + shape = [192, 192, 3, 3] + dtype = "float32" + min_val = float("-0.0436644") + max_val = float("0.075913") + mean = float("-0.00010198") + std = float("0.00273803") + data = None + + +class Program_weight_tensor_parameter_85: + name = "parameter_85" + shape = [192] + dtype = "float32" + min_val = float("-0.191734") + max_val = float("0.00856311") + mean = float("-0.0642178") + std = float("0.0333761") + data = None + + +class Program_weight_tensor_parameter_86: + name = "parameter_86" + shape = [192] + dtype = "float32" + min_val = float("0.922072") + max_val = float("1.04654") + mean = float("0.97362") + std = float("0.0179925") + data = None + + +class Program_weight_tensor_parameter_87: + name = "parameter_87" + shape = [192] + dtype = "float32" + min_val = float("0.00120205") + max_val = float("0.010442") + mean = float("0.00383901") + std = float("0.0015349") + data = None + + +class Program_weight_tensor_parameter_88: + name = "parameter_88" + shape = [192] + dtype = "float32" + min_val = float("-0.0564217") + max_val = float("0.0355745") + mean = float("-0.0072537") + std = float("0.0145576") + data = None + + +class Program_weight_tensor_parameter_89: + name = "parameter_89" + shape = [192, 192, 1, 1] + dtype = "float32" + min_val = float("-0.0390012") + max_val = float("0.0317125") + mean = float("-0.000327587") + std = float("0.00370887") + data = None + + +class Program_weight_tensor_parameter_90: + name = "parameter_90" + shape = [192] + dtype = "float32" + min_val = float("-0.191734") + max_val = float("0.00856312") + mean = float("-0.0642178") + std = float("0.0333761") + data = None + + +class Program_weight_tensor_parameter_91: + name = "parameter_91" + shape = [192] + dtype = "float32" + min_val = float("0.967916") + max_val = float("1.14774") + mean = float("1.02404") + std = float("0.0293681") + data = None + + +class Program_weight_tensor_parameter_92: + name = "parameter_92" + shape = [192] + dtype = "float32" + min_val = float("0.00374527") + max_val = float("0.0468002") + mean = float("0.010768") + std = float("0.00616027") + data = None + + +class Program_weight_tensor_parameter_93: + name = "parameter_93" + shape = [192] + dtype = "float32" + min_val = float("-0.143258") + max_val = float("0.131648") + mean = float("-0.0335563") + std = float("0.0345961") + data = None + + +class Program_weight_tensor_parameter_94: + name = "parameter_94" + shape = [192, 192, 3, 3] + dtype = "float32" + min_val = float("-0.0433313") + max_val = float("0.052202") + mean = float("-0.000128388") + std = float("0.00248489") + data = None + + +class Program_weight_tensor_parameter_95: + name = "parameter_95" + shape = [192] + dtype = "float32" + min_val = float("-0.188938") + max_val = float("0.0617287") + mean = float("-0.0755865") + std = float("0.0405709") + data = None + + +class Program_weight_tensor_parameter_96: + name = "parameter_96" + shape = [192] + dtype = "float32" + min_val = float("0.882366") + max_val = float("1.21794") + mean = float("1.01474") + std = float("0.0507117") + data = None + + +class Program_weight_tensor_parameter_97: + name = "parameter_97" + shape = [192] + dtype = "float32" + min_val = float("0.00742839") + max_val = float("0.0667317") + mean = float("0.0226131") + std = float("0.0119971") + data = None + + +class Program_weight_tensor_parameter_98: + name = "parameter_98" + shape = [192] + dtype = "float32" + min_val = float("-0.112149") + max_val = float("0.0451237") + mean = float("-0.024385") + std = float("0.0305071") + data = None + + +class Program_weight_tensor_parameter_99: + name = "parameter_99" + shape = [192, 192, 3, 3] + dtype = "float32" + min_val = float("-0.0452761") + max_val = float("0.0804255") + mean = float("-0.000100801") + std = float("0.00285943") + data = None + + +class Program_weight_tensor_parameter_100: + name = "parameter_100" + shape = [192] + dtype = "float32" + min_val = float("-0.22934") + max_val = float("-0.0102474") + mean = float("-0.0831804") + std = float("0.0422282") + data = None + + +class Program_weight_tensor_parameter_101: + name = "parameter_101" + shape = [192] + dtype = "float32" + min_val = float("0.900661") + max_val = float("1.02788") + mean = float("0.975272") + std = float("0.0229725") + data = None + + +class Program_weight_tensor_parameter_102: + name = "parameter_102" + shape = [192] + dtype = "float32" + min_val = float("0.00155422") + max_val = float("0.0132741") + mean = float("0.004831") + std = float("0.0016725") + data = None + + +class Program_weight_tensor_parameter_103: + name = "parameter_103" + shape = [192] + dtype = "float32" + min_val = float("-0.0356059") + max_val = float("0.0443197") + mean = float("-0.00885301") + std = float("0.0162402") + data = None + + +class Program_weight_tensor_parameter_104: + name = "parameter_104" + shape = [192, 192, 1, 1] + dtype = "float32" + min_val = float("-0.041394") + max_val = float("0.0722911") + mean = float("-0.000424123") + std = float("0.00418307") + data = None + + +class Program_weight_tensor_parameter_105: + name = "parameter_105" + shape = [192] + dtype = "float32" + min_val = float("-0.22934") + max_val = float("-0.0102474") + mean = float("-0.0831804") + std = float("0.0422282") + data = None + + +class Program_weight_tensor_parameter_106: + name = "parameter_106" + shape = [192] + dtype = "float32" + min_val = float("0.947225") + max_val = float("1.11076") + mean = float("1.02102") + std = float("0.0305613") + data = None + + +class Program_weight_tensor_parameter_107: + name = "parameter_107" + shape = [192] + dtype = "float32" + min_val = float("0.00649826") + max_val = float("0.0541705") + mean = float("0.0147115") + std = float("0.00679964") + data = None + + +class Program_weight_tensor_parameter_108: + name = "parameter_108" + shape = [192] + dtype = "float32" + min_val = float("-0.126828") + max_val = float("0.0979872") + mean = float("-0.0173331") + std = float("0.034963") + data = None + + +class Program_weight_tensor_parameter_109: + name = "parameter_109" + shape = [192, 192, 3, 3] + dtype = "float32" + min_val = float("-0.0444401") + max_val = float("0.0493977") + mean = float("-7.54037e-05") + std = float("0.00263669") + data = None + + +class Program_weight_tensor_parameter_110: + name = "parameter_110" + shape = [192] + dtype = "float32" + min_val = float("-0.234041") + max_val = float("0.0809241") + mean = float("-0.0946904") + std = float("0.0462546") + data = None + + +class Program_weight_tensor_parameter_111: + name = "parameter_111" + shape = [192] + dtype = "float32" + min_val = float("0.88642") + max_val = float("1.20416") + mean = float("1.01671") + std = float("0.0539438") + data = None + + +class Program_weight_tensor_parameter_112: + name = "parameter_112" + shape = [192] + dtype = "float32" + min_val = float("0.00749466") + max_val = float("0.0749259") + mean = float("0.0180823") + std = float("0.0094692") + data = None + + +class Program_weight_tensor_parameter_113: + name = "parameter_113" + shape = [192] + dtype = "float32" + min_val = float("-0.163972") + max_val = float("0.079249") + mean = float("-0.0385191") + std = float("0.0397991") + data = None + + +class Program_weight_tensor_parameter_114: + name = "parameter_114" + shape = [192, 192, 3, 3] + dtype = "float32" + min_val = float("-0.0389136") + max_val = float("0.0871182") + mean = float("-0.000134501") + std = float("0.00310309") + data = None + + +class Program_weight_tensor_parameter_115: + name = "parameter_115" + shape = [192] + dtype = "float32" + min_val = float("-0.200194") + max_val = float("0.0157496") + mean = float("-0.0662762") + std = float("0.0312062") + data = None + + +class Program_weight_tensor_parameter_116: + name = "parameter_116" + shape = [192] + dtype = "float32" + min_val = float("0.925357") + max_val = float("1.15236") + mean = float("1.0133") + std = float("0.0383996") + data = None + + +class Program_weight_tensor_parameter_117: + name = "parameter_117" + shape = [192] + dtype = "float32" + min_val = float("0.00361369") + max_val = float("0.027304") + mean = float("0.00813006") + std = float("0.0031377") + data = None + + +class Program_weight_tensor_parameter_118: + name = "parameter_118" + shape = [192] + dtype = "float32" + min_val = float("-0.0928788") + max_val = float("0.136946") + mean = float("-0.0222842") + std = float("0.0313823") + data = None + + +class Program_weight_tensor_parameter_119: + name = "parameter_119" + shape = [192, 576, 1, 1] + dtype = "float32" + min_val = float("-0.0594216") + max_val = float("0.0592715") + mean = float("-0.000195728") + std = float("0.00449961") + data = None + + +class Program_weight_tensor_parameter_120: + name = "parameter_120" + shape = [192] + dtype = "float32" + min_val = float("-0.099842") + max_val = float("0.0381452") + mean = float("-0.0139723") + std = float("0.0205436") + data = None + + +class Program_weight_tensor_parameter_121: + name = "parameter_121" + shape = [192] + dtype = "float32" + min_val = float("0.922504") + max_val = float("1.19789") + mean = float("1.00313") + std = float("0.0257759") + data = None + + +class Program_weight_tensor_parameter_122: + name = "parameter_122" + shape = [192] + dtype = "float32" + min_val = float("0.00356623") + max_val = float("0.0452433") + mean = float("0.0108165") + std = float("0.00641041") + data = None + + +class Program_weight_tensor_parameter_123: + name = "parameter_123" + shape = [192] + dtype = "float32" + min_val = float("-0.079912") + max_val = float("0.0542728") + mean = float("-0.0186546") + std = float("0.0237587") + data = None + + +class Program_weight_tensor_parameter_124: + name = "parameter_124" + shape = [192, 576, 1, 1] + dtype = "float32" + min_val = float("-0.0668755") + max_val = float("0.102237") + mean = float("-0.000165968") + std = float("0.0044361") + data = None + + +class Program_weight_tensor_parameter_125: + name = "parameter_125" + shape = [192] + dtype = "float32" + min_val = float("-0.159159") + max_val = float("-0.000950146") + mean = float("-0.0390258") + std = float("0.0217828") + data = None + + +class Program_weight_tensor_parameter_126: + name = "parameter_126" + shape = [192] + dtype = "float32" + min_val = float("0.92161") + max_val = float("1.24952") + mean = float("1.00821") + std = float("0.0303995") + data = None + + +class Program_weight_tensor_parameter_127: + name = "parameter_127" + shape = [192] + dtype = "float32" + min_val = float("0.00655937") + max_val = float("0.0628452") + mean = float("0.0202715") + std = float("0.00915402") + data = None + + +class Program_weight_tensor_parameter_128: + name = "parameter_128" + shape = [192] + dtype = "float32" + min_val = float("-0.332431") + max_val = float("0.237212") + mean = float("-0.0512185") + std = float("0.088857") + data = None + + +class Program_weight_tensor_parameter_129: + name = "parameter_129" + shape = [192, 192, 3, 3] + dtype = "float32" + min_val = float("-0.0500878") + max_val = float("0.0679367") + mean = float("-4.96188e-05") + std = float("0.00284657") + data = None + + +class Program_weight_tensor_parameter_130: + name = "parameter_130" + shape = [192] + dtype = "float32" + min_val = float("-0.554458") + max_val = float("1.14247") + mean = float("0.353056") + std = float("0.345181") + data = None + + +class Program_weight_tensor_parameter_131: + name = "parameter_131" + shape = [192] + dtype = "float32" + min_val = float("0.545476") + max_val = float("1.57363") + mean = float("1.15035") + std = float("0.183509") + data = None + + +class Program_weight_tensor_parameter_132: + name = "parameter_132" + shape = [192] + dtype = "float32" + min_val = float("0.0103054") + max_val = float("0.220449") + mean = float("0.0404988") + std = float("0.026474") + data = None + + +class Program_weight_tensor_parameter_133: + name = "parameter_133" + shape = [192] + dtype = "float32" + min_val = float("-0.375239") + max_val = float("0.257657") + mean = float("-0.0653435") + std = float("0.0721543") + data = None + + +class Program_weight_tensor_parameter_134: + name = "parameter_134" + shape = [192, 192, 1, 1] + dtype = "float32" + min_val = float("-0.153439") + max_val = float("0.133673") + mean = float("-0.0012256") + std = float("0.0133282") + data = None + + +class Program_weight_tensor_parameter_135: + name = "parameter_135" + shape = [96] + dtype = "float32" + min_val = float("-0.457854") + max_val = float("0.237777") + mean = float("-0.00923912") + std = float("0.145025") + data = None + + +class Program_weight_tensor_parameter_136: + name = "parameter_136" + shape = [96] + dtype = "float32" + min_val = float("0.761621") + max_val = float("1.23301") + mean = float("0.94899") + std = float("0.0714356") + data = None + + +class Program_weight_tensor_parameter_137: + name = "parameter_137" + shape = [96] + dtype = "float32" + min_val = float("0.00276598") + max_val = float("0.0559674") + mean = float("0.0149648") + std = float("0.0103316") + data = None + + +class Program_weight_tensor_parameter_138: + name = "parameter_138" + shape = [96] + dtype = "float32" + min_val = float("-0.0835422") + max_val = float("0.102849") + mean = float("-0.0154583") + std = float("0.0290241") + data = None + + +class Program_weight_tensor_parameter_139: + name = "parameter_139" + shape = [96, 96, 1, 1] + dtype = "float32" + min_val = float("-0.0875722") + max_val = float("0.0849306") + mean = float("-0.00143079") + std = float("0.0111386") + data = None + + +class Program_weight_tensor_parameter_140: + name = "parameter_140" + shape = [96] + dtype = "float32" + min_val = float("-0.457854") + max_val = float("0.237777") + mean = float("-0.00923912") + std = float("0.145025") + data = None + + +class Program_weight_tensor_parameter_141: + name = "parameter_141" + shape = [96] + dtype = "float32" + min_val = float("0.507613") + max_val = float("1.26896") + mean = float("1.02934") + std = float("0.0965434") + data = None + + +class Program_weight_tensor_parameter_142: + name = "parameter_142" + shape = [96] + dtype = "float32" + min_val = float("0.0131902") + max_val = float("0.171935") + mean = float("0.0470073") + std = float("0.0280841") + data = None + + +class Program_weight_tensor_parameter_143: + name = "parameter_143" + shape = [96] + dtype = "float32" + min_val = float("-0.311337") + max_val = float("0.119977") + mean = float("-0.0492666") + std = float("0.0774246") + data = None + + +class Program_weight_tensor_parameter_144: + name = "parameter_144" + shape = [96, 96, 3, 3] + dtype = "float32" + min_val = float("-0.0902187") + max_val = float("0.0894315") + mean = float("-0.000319421") + std = float("0.00677892") + data = None + + +class Program_weight_tensor_parameter_145: + name = "parameter_145" + shape = [96] + dtype = "float32" + min_val = float("-0.702465") + max_val = float("0.490645") + mean = float("-0.113318") + std = float("0.198998") + data = None + + +class Program_weight_tensor_parameter_146: + name = "parameter_146" + shape = [96] + dtype = "float32" + min_val = float("0.718336") + max_val = float("1.71672") + mean = float("0.996065") + std = float("0.134586") + data = None + + +class Program_weight_tensor_parameter_147: + name = "parameter_147" + shape = [96] + dtype = "float32" + min_val = float("0.0156182") + max_val = float("0.214741") + mean = float("0.0606337") + std = float("0.0439268") + data = None + + +class Program_weight_tensor_parameter_148: + name = "parameter_148" + shape = [96] + dtype = "float32" + min_val = float("-0.22081") + max_val = float("0.178116") + mean = float("-0.0507328") + std = float("0.0712817") + data = None + + +class Program_weight_tensor_parameter_149: + name = "parameter_149" + shape = [96, 96, 3, 3] + dtype = "float32" + min_val = float("-0.129042") + max_val = float("0.105236") + mean = float("-0.000537157") + std = float("0.0076853") + data = None + + +class Program_weight_tensor_parameter_150: + name = "parameter_150" + shape = [96] + dtype = "float32" + min_val = float("-0.365428") + max_val = float("0.189562") + mean = float("-0.138684") + std = float("0.0965022") + data = None + + +class Program_weight_tensor_parameter_151: + name = "parameter_151" + shape = [96] + dtype = "float32" + min_val = float("0.628597") + max_val = float("1.02554") + mean = float("0.906981") + std = float("0.055845") + data = None + + +class Program_weight_tensor_parameter_152: + name = "parameter_152" + shape = [96] + dtype = "float32" + min_val = float("0.00380216") + max_val = float("0.0247438") + mean = float("0.0110218") + std = float("0.00430675") + data = None + + +class Program_weight_tensor_parameter_153: + name = "parameter_153" + shape = [96] + dtype = "float32" + min_val = float("-0.0841579") + max_val = float("0.0461001") + mean = float("-0.0109588") + std = float("0.0213491") + data = None + + +class Program_weight_tensor_parameter_154: + name = "parameter_154" + shape = [96, 96, 1, 1] + dtype = "float32" + min_val = float("-0.083138") + max_val = float("0.0832948") + mean = float("-0.00124059") + std = float("0.0111694") + data = None + + +class Program_weight_tensor_parameter_155: + name = "parameter_155" + shape = [96] + dtype = "float32" + min_val = float("-0.365428") + max_val = float("0.189562") + mean = float("-0.138684") + std = float("0.0965022") + data = None + + +class Program_weight_tensor_parameter_156: + name = "parameter_156" + shape = [96] + dtype = "float32" + min_val = float("0.807841") + max_val = float("1.15746") + mean = float("1.02167") + std = float("0.0611298") + data = None + + +class Program_weight_tensor_parameter_157: + name = "parameter_157" + shape = [96] + dtype = "float32" + min_val = float("0.0108195") + max_val = float("0.148192") + mean = float("0.036773") + std = float("0.0294918") + data = None + + +class Program_weight_tensor_parameter_158: + name = "parameter_158" + shape = [96] + dtype = "float32" + min_val = float("-0.207341") + max_val = float("0.0663917") + mean = float("-0.0430707") + std = float("0.0421016") + data = None + + +class Program_weight_tensor_parameter_159: + name = "parameter_159" + shape = [96, 96, 3, 3] + dtype = "float32" + min_val = float("-0.0789797") + max_val = float("0.0757237") + mean = float("-0.000521731") + std = float("0.00687652") + data = None + + +class Program_weight_tensor_parameter_160: + name = "parameter_160" + shape = [96] + dtype = "float32" + min_val = float("-0.488688") + max_val = float("0.168914") + mean = float("-0.167698") + std = float("0.131647") + data = None + + +class Program_weight_tensor_parameter_161: + name = "parameter_161" + shape = [96] + dtype = "float32" + min_val = float("0.774906") + max_val = float("1.29288") + mean = float("0.963601") + std = float("0.0984794") + data = None + + +class Program_weight_tensor_parameter_162: + name = "parameter_162" + shape = [96] + dtype = "float32" + min_val = float("0.0127489") + max_val = float("0.126099") + mean = float("0.0322453") + std = float("0.0182029") + data = None + + +class Program_weight_tensor_parameter_163: + name = "parameter_163" + shape = [96] + dtype = "float32" + min_val = float("-0.193135") + max_val = float("0.0883238") + mean = float("0.00109701") + std = float("0.0471492") + data = None + + +class Program_weight_tensor_parameter_164: + name = "parameter_164" + shape = [96, 96, 3, 3] + dtype = "float32" + min_val = float("-0.128344") + max_val = float("0.113257") + mean = float("-0.000448245") + std = float("0.00839856") + data = None + + +class Program_weight_tensor_parameter_165: + name = "parameter_165" + shape = [96] + dtype = "float32" + min_val = float("-0.492587") + max_val = float("0.0643527") + mean = float("-0.168974") + std = float("0.115023") + data = None + + +class Program_weight_tensor_parameter_166: + name = "parameter_166" + shape = [96] + dtype = "float32" + min_val = float("0.725101") + max_val = float("1.00372") + mean = float("0.919859") + std = float("0.0526473") + data = None + + +class Program_weight_tensor_parameter_167: + name = "parameter_167" + shape = [96] + dtype = "float32" + min_val = float("0.00594957") + max_val = float("0.0371473") + mean = float("0.0162052") + std = float("0.00583046") + data = None + + +class Program_weight_tensor_parameter_168: + name = "parameter_168" + shape = [96] + dtype = "float32" + min_val = float("-0.0664638") + max_val = float("0.0436441") + mean = float("-0.0231787") + std = float("0.0227166") + data = None + + +class Program_weight_tensor_parameter_169: + name = "parameter_169" + shape = [96, 96, 1, 1] + dtype = "float32" + min_val = float("-0.108003") + max_val = float("0.0831197") + mean = float("-0.0024275") + std = float("0.012646") + data = None + + +class Program_weight_tensor_parameter_170: + name = "parameter_170" + shape = [96] + dtype = "float32" + min_val = float("-0.492587") + max_val = float("0.0643527") + mean = float("-0.168974") + std = float("0.115023") + data = None + + +class Program_weight_tensor_parameter_171: + name = "parameter_171" + shape = [96] + dtype = "float32" + min_val = float("0.75981") + max_val = float("1.15375") + mean = float("0.981077") + std = float("0.0584923") + data = None + + +class Program_weight_tensor_parameter_172: + name = "parameter_172" + shape = [96] + dtype = "float32" + min_val = float("0.01686") + max_val = float("0.220839") + mean = float("0.0498523") + std = float("0.0353254") + data = None + + +class Program_weight_tensor_parameter_173: + name = "parameter_173" + shape = [96] + dtype = "float32" + min_val = float("-0.249547") + max_val = float("0.0988174") + mean = float("-0.0166119") + std = float("0.0490991") + data = None + + +class Program_weight_tensor_parameter_174: + name = "parameter_174" + shape = [96, 96, 3, 3] + dtype = "float32" + min_val = float("-0.115412") + max_val = float("0.0894897") + mean = float("-0.000237206") + std = float("0.00784247") + data = None + + +class Program_weight_tensor_parameter_175: + name = "parameter_175" + shape = [96] + dtype = "float32" + min_val = float("-0.567385") + max_val = float("0.348517") + mean = float("-0.179707") + std = float("0.173622") + data = None + + +class Program_weight_tensor_parameter_176: + name = "parameter_176" + shape = [96] + dtype = "float32" + min_val = float("0.773713") + max_val = float("1.33702") + mean = float("0.955273") + std = float("0.110933") + data = None + + +class Program_weight_tensor_parameter_177: + name = "parameter_177" + shape = [96] + dtype = "float32" + min_val = float("0.0154188") + max_val = float("0.11259") + mean = float("0.0348921") + std = float("0.0191628") + data = None + + +class Program_weight_tensor_parameter_178: + name = "parameter_178" + shape = [96] + dtype = "float32" + min_val = float("-0.18856") + max_val = float("0.254903") + mean = float("-0.0274094") + std = float("0.0971786") + data = None + + +class Program_weight_tensor_parameter_179: + name = "parameter_179" + shape = [96, 96, 3, 3] + dtype = "float32" + min_val = float("-0.161116") + max_val = float("0.142243") + mean = float("-0.000331302") + std = float("0.00951338") + data = None + + +class Program_weight_tensor_parameter_180: + name = "parameter_180" + shape = [96] + dtype = "float32" + min_val = float("-0.627743") + max_val = float("0.598476") + mean = float("-0.0826542") + std = float("0.256341") + data = None + + +class Program_weight_tensor_parameter_181: + name = "parameter_181" + shape = [96] + dtype = "float32" + min_val = float("0.652364") + max_val = float("1.22668") + mean = float("0.866563") + std = float("0.114959") + data = None + + +class Program_weight_tensor_parameter_182: + name = "parameter_182" + shape = [96] + dtype = "float32" + min_val = float("0.0123555") + max_val = float("0.0834337") + mean = float("0.0304794") + std = float("0.0137422") + data = None + + +class Program_weight_tensor_parameter_183: + name = "parameter_183" + shape = [96] + dtype = "float32" + min_val = float("-0.119118") + max_val = float("0.0902295") + mean = float("-0.0133537") + std = float("0.042899") + data = None + + +class Program_weight_tensor_parameter_184: + name = "parameter_184" + shape = [96, 448, 1, 1] + dtype = "float32" + min_val = float("-0.162411") + max_val = float("0.186967") + mean = float("-0.000579591") + std = float("0.0123577") + data = None + + +class Program_weight_tensor_parameter_185: + name = "parameter_185" + shape = [96] + dtype = "float32" + min_val = float("-0.0984357") + max_val = float("0.230087") + mean = float("0.0612119") + std = float("0.0550457") + data = None + + +class Program_weight_tensor_parameter_186: + name = "parameter_186" + shape = [96] + dtype = "float32" + min_val = float("0.692173") + max_val = float("1.12825") + mean = float("0.931777") + std = float("0.0641341") + data = None + + +class Program_weight_tensor_parameter_187: + name = "parameter_187" + shape = [96] + dtype = "float32" + min_val = float("0.00657121") + max_val = float("0.0747688") + mean = float("0.0164236") + std = float("0.0094724") + data = None + + +class Program_weight_tensor_parameter_188: + name = "parameter_188" + shape = [96] + dtype = "float32" + min_val = float("-0.13542") + max_val = float("0.161402") + mean = float("-0.0186236") + std = float("0.0392959") + data = None + + +class Program_weight_tensor_parameter_189: + name = "parameter_189" + shape = [96, 448, 1, 1] + dtype = "float32" + min_val = float("-0.105077") + max_val = float("0.136428") + mean = float("-0.000334941") + std = float("0.00872905") + data = None + + +class Program_weight_tensor_parameter_190: + name = "parameter_190" + shape = [192] + dtype = "float32" + min_val = float("-0.296924") + max_val = float("0.196739") + mean = float("-0.0669185") + std = float("0.0696887") + data = None + + +class Program_weight_tensor_parameter_191: + name = "parameter_191" + shape = [192] + dtype = "float32" + min_val = float("0.672189") + max_val = float("1.45538") + mean = float("0.8844") + std = float("0.0784394") + data = None + + +class Program_weight_tensor_parameter_192: + name = "parameter_192" + shape = [192] + dtype = "float32" + min_val = float("0.0106377") + max_val = float("0.12621") + mean = float("0.025827") + std = float("0.0135329") + data = None + + +class Program_weight_tensor_parameter_193: + name = "parameter_193" + shape = [192] + dtype = "float32" + min_val = float("-0.151993") + max_val = float("0.0466724") + mean = float("-0.0401242") + std = float("0.0395042") + data = None + + +class Program_weight_tensor_parameter_194: + name = "parameter_194" + shape = [192, 384, 1, 1] + dtype = "float32" + min_val = float("-0.09011") + max_val = float("0.110601") + mean = float("-0.000636282") + std = float("0.00794971") + data = None + + +class Program_weight_tensor_parameter_195: + name = "parameter_195" + shape = [384] + dtype = "float32" + min_val = float("-0.202377") + max_val = float("0.239034") + mean = float("-0.0675247") + std = float("0.0415874") + data = None + + +class Program_weight_tensor_parameter_196: + name = "parameter_196" + shape = [384] + dtype = "float32" + min_val = float("0.872037") + max_val = float("1.54192") + mean = float("1.01901") + std = float("0.0633661") + data = None + + +class Program_weight_tensor_parameter_197: + name = "parameter_197" + shape = [384] + dtype = "float32" + min_val = float("0.00690737") + max_val = float("0.102286") + mean = float("0.016741") + std = float("0.00926454") + data = None + + +class Program_weight_tensor_parameter_198: + name = "parameter_198" + shape = [384] + dtype = "float32" + min_val = float("-0.30192") + max_val = float("0.151785") + mean = float("-0.0519392") + std = float("0.0455961") + data = None + + +class Program_weight_tensor_parameter_199: + name = "parameter_199" + shape = [384, 384, 1, 1] + dtype = "float32" + min_val = float("-0.102527") + max_val = float("0.0968663") + mean = float("-0.000665021") + std = float("0.00717806") + data = None + + +class Program_weight_tensor_parameter_200: + name = "parameter_200" + shape = [192] + dtype = "float32" + min_val = float("-0.177082") + max_val = float("0.00550043") + mean = float("-0.0655704") + std = float("0.0324993") + data = None + + +class Program_weight_tensor_parameter_201: + name = "parameter_201" + shape = [192] + dtype = "float32" + min_val = float("0.884724") + max_val = float("0.992225") + mean = float("0.949266") + std = float("0.0164193") + data = None + + +class Program_weight_tensor_parameter_202: + name = "parameter_202" + shape = [192] + dtype = "float32" + min_val = float("0.00434639") + max_val = float("0.0247681") + mean = float("0.00946304") + std = float("0.00329659") + data = None + + +class Program_weight_tensor_parameter_203: + name = "parameter_203" + shape = [192] + dtype = "float32" + min_val = float("-0.0824826") + max_val = float("0.0758727") + mean = float("-0.0236236") + std = float("0.0316304") + data = None + + +class Program_weight_tensor_parameter_204: + name = "parameter_204" + shape = [192, 192, 1, 1] + dtype = "float32" + min_val = float("-0.0496576") + max_val = float("0.0380292") + mean = float("-0.000704511") + std = float("0.00527088") + data = None + + +class Program_weight_tensor_parameter_205: + name = "parameter_205" + shape = [192] + dtype = "float32" + min_val = float("-0.177082") + max_val = float("0.00550043") + mean = float("-0.0655704") + std = float("0.0324993") + data = None + + +class Program_weight_tensor_parameter_206: + name = "parameter_206" + shape = [192] + dtype = "float32" + min_val = float("0.944794") + max_val = float("1.03167") + mean = float("0.98787") + std = float("0.0166147") + data = None + + +class Program_weight_tensor_parameter_207: + name = "parameter_207" + shape = [192] + dtype = "float32" + min_val = float("0.015971") + max_val = float("0.10218") + mean = float("0.0372047") + std = float("0.0146329") + data = None + + +class Program_weight_tensor_parameter_208: + name = "parameter_208" + shape = [192] + dtype = "float32" + min_val = float("-0.221258") + max_val = float("0.187402") + mean = float("-0.0255219") + std = float("0.0665376") + data = None + + +class Program_weight_tensor_parameter_209: + name = "parameter_209" + shape = [192, 192, 3, 3] + dtype = "float32" + min_val = float("-0.042883") + max_val = float("0.0579923") + mean = float("-7.45574e-05") + std = float("0.00288029") + data = None + + +class Program_weight_tensor_parameter_210: + name = "parameter_210" + shape = [192] + dtype = "float32" + min_val = float("-0.216422") + max_val = float("-0.00156736") + mean = float("-0.0741046") + std = float("0.0353894") + data = None + + +class Program_weight_tensor_parameter_211: + name = "parameter_211" + shape = [192] + dtype = "float32" + min_val = float("0.939999") + max_val = float("1.1549") + mean = float("1.02948") + std = float("0.0431468") + data = None + + +class Program_weight_tensor_parameter_212: + name = "parameter_212" + shape = [192] + dtype = "float32" + min_val = float("0.035852") + max_val = float("0.234595") + mean = float("0.0699051") + std = float("0.0247571") + data = None + + +class Program_weight_tensor_parameter_213: + name = "parameter_213" + shape = [192] + dtype = "float32" + min_val = float("-0.205727") + max_val = float("0.293756") + mean = float("-0.049762") + std = float("0.0808749") + data = None + + +class Program_weight_tensor_parameter_214: + name = "parameter_214" + shape = [192, 192, 3, 3] + dtype = "float32" + min_val = float("-0.0535654") + max_val = float("0.0565942") + mean = float("-0.000104197") + std = float("0.00352101") + data = None + + +class Program_weight_tensor_parameter_215: + name = "parameter_215" + shape = [192] + dtype = "float32" + min_val = float("-0.19684") + max_val = float("-0.00996682") + mean = float("-0.0711686") + std = float("0.0319146") + data = None + + +class Program_weight_tensor_parameter_216: + name = "parameter_216" + shape = [192] + dtype = "float32" + min_val = float("0.94418") + max_val = float("1.04844") + mean = float("0.987928") + std = float("0.0137845") + data = None + + +class Program_weight_tensor_parameter_217: + name = "parameter_217" + shape = [192] + dtype = "float32" + min_val = float("0.00237687") + max_val = float("0.0111027") + mean = float("0.00414842") + std = float("0.00111323") + data = None + + +class Program_weight_tensor_parameter_218: + name = "parameter_218" + shape = [192] + dtype = "float32" + min_val = float("-0.0903061") + max_val = float("0.0468839") + mean = float("-0.0217104") + std = float("0.0220275") + data = None + + +class Program_weight_tensor_parameter_219: + name = "parameter_219" + shape = [192, 192, 1, 1] + dtype = "float32" + min_val = float("-0.0308364") + max_val = float("0.0470939") + mean = float("-0.000696513") + std = float("0.00548679") + data = None + + +class Program_weight_tensor_parameter_220: + name = "parameter_220" + shape = [192] + dtype = "float32" + min_val = float("-0.19684") + max_val = float("-0.00996682") + mean = float("-0.0711686") + std = float("0.0319146") + data = None + + +class Program_weight_tensor_parameter_221: + name = "parameter_221" + shape = [192] + dtype = "float32" + min_val = float("0.953911") + max_val = float("1.11241") + mean = float("1.00461") + std = float("0.0263991") + data = None + + +class Program_weight_tensor_parameter_222: + name = "parameter_222" + shape = [192] + dtype = "float32" + min_val = float("0.00855705") + max_val = float("0.0591742") + mean = float("0.0171133") + std = float("0.0059456") + data = None + + +class Program_weight_tensor_parameter_223: + name = "parameter_223" + shape = [192] + dtype = "float32" + min_val = float("-0.218261") + max_val = float("0.0929411") + mean = float("-0.0423262") + std = float("0.0452567") + data = None + + +class Program_weight_tensor_parameter_224: + name = "parameter_224" + shape = [192, 192, 3, 3] + dtype = "float32" + min_val = float("-0.0395419") + max_val = float("0.0647335") + mean = float("-0.000147698") + std = float("0.00290824") + data = None + + +class Program_weight_tensor_parameter_225: + name = "parameter_225" + shape = [192] + dtype = "float32" + min_val = float("-0.232541") + max_val = float("-0.0186103") + mean = float("-0.0942687") + std = float("0.0399968") + data = None + + +class Program_weight_tensor_parameter_226: + name = "parameter_226" + shape = [192] + dtype = "float32" + min_val = float("0.946583") + max_val = float("1.19103") + mean = float("1.02415") + std = float("0.0459868") + data = None + + +class Program_weight_tensor_parameter_227: + name = "parameter_227" + shape = [192] + dtype = "float32" + min_val = float("0.0311423") + max_val = float("0.148231") + mean = float("0.064838") + std = float("0.0208839") + data = None + + +class Program_weight_tensor_parameter_228: + name = "parameter_228" + shape = [192] + dtype = "float32" + min_val = float("-0.352284") + max_val = float("0.211228") + mean = float("-0.0951507") + std = float("0.0981732") + data = None + + +class Program_weight_tensor_parameter_229: + name = "parameter_229" + shape = [192, 192, 3, 3] + dtype = "float32" + min_val = float("-0.0524013") + max_val = float("0.0680883") + mean = float("-0.00017336") + std = float("0.00366809") + data = None + + +class Program_weight_tensor_parameter_230: + name = "parameter_230" + shape = [192] + dtype = "float32" + min_val = float("-0.154817") + max_val = float("-0.00101251") + mean = float("-0.0685474") + std = float("0.0233228") + data = None + + +class Program_weight_tensor_parameter_231: + name = "parameter_231" + shape = [192] + dtype = "float32" + min_val = float("0.932745") + max_val = float("1.07073") + mean = float("0.998751") + std = float("0.0218682") + data = None + + +class Program_weight_tensor_parameter_232: + name = "parameter_232" + shape = [192] + dtype = "float32" + min_val = float("0.0020065") + max_val = float("0.00825559") + mean = float("0.00382878") + std = float("0.00108412") + data = None + + +class Program_weight_tensor_parameter_233: + name = "parameter_233" + shape = [192] + dtype = "float32" + min_val = float("-0.0781428") + max_val = float("0.0989118") + mean = float("-0.0117154") + std = float("0.0205612") + data = None + + +class Program_weight_tensor_parameter_234: + name = "parameter_234" + shape = [192, 192, 1, 1] + dtype = "float32" + min_val = float("-0.034175") + max_val = float("0.0501296") + mean = float("-0.000389987") + std = float("0.0061454") + data = None + + +class Program_weight_tensor_parameter_235: + name = "parameter_235" + shape = [192] + dtype = "float32" + min_val = float("-0.154817") + max_val = float("-0.00101253") + mean = float("-0.0685474") + std = float("0.0233228") + data = None + + +class Program_weight_tensor_parameter_236: + name = "parameter_236" + shape = [192] + dtype = "float32" + min_val = float("0.935809") + max_val = float("1.11369") + mean = float("0.992462") + std = float("0.0258318") + data = None + + +class Program_weight_tensor_parameter_237: + name = "parameter_237" + shape = [192] + dtype = "float32" + min_val = float("0.00876716") + max_val = float("0.0429823") + mean = float("0.0174659") + std = float("0.00530219") + data = None + + +class Program_weight_tensor_parameter_238: + name = "parameter_238" + shape = [192] + dtype = "float32" + min_val = float("-0.257697") + max_val = float("0.132166") + mean = float("-0.0435913") + std = float("0.047072") + data = None + + +class Program_weight_tensor_parameter_239: + name = "parameter_239" + shape = [192, 192, 3, 3] + dtype = "float32" + min_val = float("-0.0292566") + max_val = float("0.0524168") + mean = float("-0.000170851") + std = float("0.00289402") + data = None + + +class Program_weight_tensor_parameter_240: + name = "parameter_240" + shape = [192] + dtype = "float32" + min_val = float("-0.288746") + max_val = float("0.0148075") + mean = float("-0.10971") + std = float("0.0400284") + data = None + + +class Program_weight_tensor_parameter_241: + name = "parameter_241" + shape = [192] + dtype = "float32" + min_val = float("0.944098") + max_val = float("1.25857") + mean = float("1.02656") + std = float("0.041926") + data = None + + +class Program_weight_tensor_parameter_242: + name = "parameter_242" + shape = [192] + dtype = "float32" + min_val = float("0.012381") + max_val = float("0.0580644") + mean = float("0.0266098") + std = float("0.00890016") + data = None + + +class Program_weight_tensor_parameter_243: + name = "parameter_243" + shape = [192] + dtype = "float32" + min_val = float("-0.35467") + max_val = float("0.123935") + mean = float("-0.048729") + std = float("0.0584562") + data = None + + +class Program_weight_tensor_parameter_244: + name = "parameter_244" + shape = [192, 192, 3, 3] + dtype = "float32" + min_val = float("-0.0569475") + max_val = float("0.0674861") + mean = float("-0.000194673") + std = float("0.00414306") + data = None + + +class Program_weight_tensor_parameter_245: + name = "parameter_245" + shape = [192] + dtype = "float32" + min_val = float("-0.257099") + max_val = float("-0.0137211") + mean = float("-0.121798") + std = float("0.0441838") + data = None + + +class Program_weight_tensor_parameter_246: + name = "parameter_246" + shape = [192] + dtype = "float32" + min_val = float("0.916438") + max_val = float("1.13707") + mean = float("1.02436") + std = float("0.0422659") + data = None + + +class Program_weight_tensor_parameter_247: + name = "parameter_247" + shape = [192] + dtype = "float32" + min_val = float("0.00484966") + max_val = float("0.0244652") + mean = float("0.0104799") + std = float("0.00323732") + data = None + + +class Program_weight_tensor_parameter_248: + name = "parameter_248" + shape = [192] + dtype = "float32" + min_val = float("-0.129556") + max_val = float("0.0970983") + mean = float("0.014798") + std = float("0.0298998") + data = None + + +class Program_weight_tensor_parameter_249: + name = "parameter_249" + shape = [192, 896, 1, 1] + dtype = "float32" + min_val = float("-0.0720097") + max_val = float("0.0969831") + mean = float("-0.000189668") + std = float("0.00582763") + data = None + + +class Program_weight_tensor_parameter_250: + name = "parameter_250" + shape = [192] + dtype = "float32" + min_val = float("-0.177731") + max_val = float("0.212692") + mean = float("-0.0075501") + std = float("0.0506781") + data = None + + +class Program_weight_tensor_parameter_251: + name = "parameter_251" + shape = [192] + dtype = "float32" + min_val = float("0.954658") + max_val = float("1.21643") + mean = float("1.05592") + std = float("0.0497885") + data = None + + +class Program_weight_tensor_parameter_252: + name = "parameter_252" + shape = [192] + dtype = "float32" + min_val = float("0.0079636") + max_val = float("0.0505556") + mean = float("0.016929") + std = float("0.00688512") + data = None + + +class Program_weight_tensor_parameter_253: + name = "parameter_253" + shape = [192] + dtype = "float32" + min_val = float("-0.0784235") + max_val = float("0.0907545") + mean = float("-0.00213312") + std = float("0.0311144") + data = None + + +class Program_weight_tensor_parameter_254: + name = "parameter_254" + shape = [192, 896, 1, 1] + dtype = "float32" + min_val = float("-0.0605225") + max_val = float("0.102851") + mean = float("-0.000209048") + std = float("0.00623752") + data = None + + +class Program_weight_tensor_parameter_255: + name = "parameter_255" + shape = [384] + dtype = "float32" + min_val = float("-0.249998") + max_val = float("-0.0574475") + mean = float("-0.125166") + std = float("0.0336739") + data = None + + +class Program_weight_tensor_parameter_256: + name = "parameter_256" + shape = [384] + dtype = "float32" + min_val = float("0.816067") + max_val = float("1.01535") + mean = float("0.909294") + std = float("0.0258078") + data = None + + +class Program_weight_tensor_parameter_257: + name = "parameter_257" + shape = [384] + dtype = "float32" + min_val = float("0.00970916") + max_val = float("0.0941424") + mean = float("0.0239885") + std = float("0.0108973") + data = None + + +class Program_weight_tensor_parameter_258: + name = "parameter_258" + shape = [384] + dtype = "float32" + min_val = float("-0.16177") + max_val = float("0.0916908") + mean = float("-0.0400885") + std = float("0.0400372") + data = None + + +class Program_weight_tensor_parameter_259: + name = "parameter_259" + shape = [384, 768, 1, 1] + dtype = "float32" + min_val = float("-0.0319605") + max_val = float("0.0347704") + mean = float("-0.000311508") + std = float("0.00449518") + data = None + + +class Program_weight_tensor_parameter_260: + name = "parameter_260" + shape = [768] + dtype = "float32" + min_val = float("-0.104735") + max_val = float("0.0725414") + mean = float("-0.05688") + std = float("0.0152722") + data = None + + +class Program_weight_tensor_parameter_261: + name = "parameter_261" + shape = [768] + dtype = "float32" + min_val = float("0.952534") + max_val = float("1.14216") + mean = float("1.02086") + std = float("0.0209588") + data = None + + +class Program_weight_tensor_parameter_262: + name = "parameter_262" + shape = [768] + dtype = "float32" + min_val = float("0.00397334") + max_val = float("0.028897") + mean = float("0.00852235") + std = float("0.00315198") + data = None + + +class Program_weight_tensor_parameter_263: + name = "parameter_263" + shape = [768] + dtype = "float32" + min_val = float("-0.106239") + max_val = float("0.0867516") + mean = float("-0.0301911") + std = float("0.0256457") + data = None + + +class Program_weight_tensor_parameter_264: + name = "parameter_264" + shape = [768, 768, 1, 1] + dtype = "float32" + min_val = float("-0.0561458") + max_val = float("0.104051") + mean = float("-0.000280565") + std = float("0.0038253") + data = None + + +class Program_weight_tensor_parameter_265: + name = "parameter_265" + shape = [384] + dtype = "float32" + min_val = float("-0.158374") + max_val = float("0.0744864") + mean = float("-0.0400398") + std = float("0.0206667") + data = None + + +class Program_weight_tensor_parameter_266: + name = "parameter_266" + shape = [384] + dtype = "float32" + min_val = float("0.88852") + max_val = float("1.07531") + mean = float("0.98215") + std = float("0.0131728") + data = None + + +class Program_weight_tensor_parameter_267: + name = "parameter_267" + shape = [384] + dtype = "float32" + min_val = float("0.00585062") + max_val = float("0.0927637") + mean = float("0.0229403") + std = float("0.00935616") + data = None + + +class Program_weight_tensor_parameter_268: + name = "parameter_268" + shape = [384] + dtype = "float32" + min_val = float("-0.0732053") + max_val = float("0.0601313") + mean = float("-0.00486842") + std = float("0.0261273") + data = None + + +class Program_weight_tensor_parameter_269: + name = "parameter_269" + shape = [384, 384, 1, 1] + dtype = "float32" + min_val = float("-0.0355253") + max_val = float("0.0687467") + mean = float("-5.36077e-05") + std = float("0.00327518") + data = None + + +class Program_weight_tensor_parameter_270: + name = "parameter_270" + shape = [384] + dtype = "float32" + min_val = float("-0.158374") + max_val = float("0.0744864") + mean = float("-0.0400398") + std = float("0.0206668") + data = None + + +class Program_weight_tensor_parameter_271: + name = "parameter_271" + shape = [384] + dtype = "float32" + min_val = float("0.880903") + max_val = float("1.07755") + mean = float("0.993864") + std = float("0.0122549") + data = None + + +class Program_weight_tensor_parameter_272: + name = "parameter_272" + shape = [384] + dtype = "float32" + min_val = float("0.0289945") + max_val = float("0.64482") + mean = float("0.148266") + std = float("0.0608358") + data = None + + +class Program_weight_tensor_parameter_273: + name = "parameter_273" + shape = [384] + dtype = "float32" + min_val = float("-0.277106") + max_val = float("0.125814") + mean = float("-0.0743739") + std = float("0.0810322") + data = None + + +class Program_weight_tensor_parameter_274: + name = "parameter_274" + shape = [384, 384, 3, 3] + dtype = "float32" + min_val = float("-0.0402386") + max_val = float("0.0446868") + mean = float("-0.000120118") + std = float("0.00122843") + data = None + + +class Program_weight_tensor_parameter_275: + name = "parameter_275" + shape = [384] + dtype = "float32" + min_val = float("-0.0801753") + max_val = float("0.116977") + mean = float("-0.018999") + std = float("0.0160145") + data = None + + +class Program_weight_tensor_parameter_276: + name = "parameter_276" + shape = [384] + dtype = "float32" + min_val = float("0.920419") + max_val = float("1.16701") + mean = float("1.01503") + std = float("0.0247144") + data = None + + +class Program_weight_tensor_parameter_277: + name = "parameter_277" + shape = [384] + dtype = "float32" + min_val = float("0.0210253") + max_val = float("0.203687") + mean = float("0.0669733") + std = float("0.0296874") + data = None + + +class Program_weight_tensor_parameter_278: + name = "parameter_278" + shape = [384] + dtype = "float32" + min_val = float("-0.230141") + max_val = float("0.205537") + mean = float("-0.0202866") + std = float("0.0737303") + data = None + + +class Program_weight_tensor_parameter_279: + name = "parameter_279" + shape = [384, 384, 3, 3] + dtype = "float32" + min_val = float("-0.0237287") + max_val = float("0.0318855") + mean = float("-3.22147e-05") + std = float("0.00160868") + data = None + + +class Program_weight_tensor_parameter_280: + name = "parameter_280" + shape = [384] + dtype = "float32" + min_val = float("-0.0734614") + max_val = float("0.0209444") + mean = float("-0.0234929") + std = float("0.0134657") + data = None + + +class Program_weight_tensor_parameter_281: + name = "parameter_281" + shape = [384] + dtype = "float32" + min_val = float("0.946") + max_val = float("1.1693") + mean = float("1.01467") + std = float("0.0274099") + data = None + + +class Program_weight_tensor_parameter_282: + name = "parameter_282" + shape = [384] + dtype = "float32" + min_val = float("0.0523474") + max_val = float("0.340149") + mean = float("0.149206") + std = float("0.0561371") + data = None + + +class Program_weight_tensor_parameter_283: + name = "parameter_283" + shape = [384] + dtype = "float32" + min_val = float("-1.64069") + max_val = float("1.90126") + mean = float("0.0266222") + std = float("0.565403") + data = None + + +class Program_weight_tensor_parameter_284: + name = "parameter_284" + shape = [384, 1536, 1, 1] + dtype = "float32" + min_val = float("-0.0463621") + max_val = float("0.0539784") + mean = float("8.39383e-05") + std = float("0.00279941") + data = None + + +class Program_weight_tensor_parameter_285: + name = "parameter_285" + shape = [384] + dtype = "float32" + min_val = float("-0.0183378") + max_val = float("0.0257996") + mean = float("-0.00146177") + std = float("0.00679518") + data = None + + +class Program_weight_tensor_parameter_286: + name = "parameter_286" + shape = [384] + dtype = "float32" + min_val = float("0.969527") + max_val = float("1.06065") + mean = float("0.993845") + std = float("0.0122859") + data = None + + +class Program_weight_tensor_parameter_287: + name = "parameter_287" + shape = [384] + dtype = "float32" + min_val = float("0.00272728") + max_val = float("0.0147253") + mean = float("0.00634305") + std = float("0.0023073") + data = None + + +class Program_weight_tensor_parameter_288: + name = "parameter_288" + shape = [384] + dtype = "float32" + min_val = float("-0.100717") + max_val = float("0.0509565") + mean = float("-0.0389208") + std = float("0.022756") + data = None + + +class Program_weight_tensor_parameter_289: + name = "parameter_289" + shape = [384, 384, 1, 1] + dtype = "float32" + min_val = float("-0.0314869") + max_val = float("0.0413918") + mean = float("-0.000483676") + std = float("0.00306659") + data = None + + +class Program_weight_tensor_parameter_290: + name = "parameter_290" + shape = [384] + dtype = "float32" + min_val = float("-0.0183378") + max_val = float("0.0257996") + mean = float("-0.00146177") + std = float("0.00679518") + data = None + + +class Program_weight_tensor_parameter_291: + name = "parameter_291" + shape = [384] + dtype = "float32" + min_val = float("0.97189") + max_val = float("1.08657") + mean = float("1.00365") + std = float("0.018176") + data = None + + +class Program_weight_tensor_parameter_292: + name = "parameter_292" + shape = [384] + dtype = "float32" + min_val = float("0.0130017") + max_val = float("0.0989012") + mean = float("0.0359469") + std = float("0.0153852") + data = None + + +class Program_weight_tensor_parameter_293: + name = "parameter_293" + shape = [384] + dtype = "float32" + min_val = float("-0.273278") + max_val = float("0.10845") + mean = float("-0.116688") + std = float("0.0514342") + data = None + + +class Program_weight_tensor_parameter_294: + name = "parameter_294" + shape = [384, 384, 3, 3] + dtype = "float32" + min_val = float("-0.028964") + max_val = float("0.0665864") + mean = float("-0.000178433") + std = float("0.00128551") + data = None + + +class Program_weight_tensor_parameter_295: + name = "parameter_295" + shape = [384] + dtype = "float32" + min_val = float("-0.0494566") + max_val = float("0.00858871") + mean = float("-0.00839612") + std = float("0.00776322") + data = None + + +class Program_weight_tensor_parameter_296: + name = "parameter_296" + shape = [384] + dtype = "float32" + min_val = float("0.954264") + max_val = float("1.13763") + mean = float("1.01254") + std = float("0.0201669") + data = None + + +class Program_weight_tensor_parameter_297: + name = "parameter_297" + shape = [384] + dtype = "float32" + min_val = float("0.0690257") + max_val = float("0.342444") + mean = float("0.152005") + std = float("0.0453564") + data = None + + +class Program_weight_tensor_parameter_298: + name = "parameter_298" + shape = [384] + dtype = "float32" + min_val = float("-1.18126") + max_val = float("0.812549") + mean = float("-0.221221") + std = float("0.25406") + data = None + + +class Program_weight_tensor_parameter_299: + name = "parameter_299" + shape = [384, 384, 3, 3] + dtype = "float32" + min_val = float("-0.0230725") + max_val = float("0.0511013") + mean = float("-0.000132069") + std = float("0.00152735") + data = None + + +class Program_weight_tensor_parameter_300: + name = "parameter_300" + shape = [384] + dtype = "float32" + min_val = float("-0.03582") + max_val = float("0.013894") + mean = float("-0.00764659") + std = float("0.00787963") + data = None + + +class Program_weight_tensor_parameter_301: + name = "parameter_301" + shape = [384] + dtype = "float32" + min_val = float("0.98416") + max_val = float("1.03457") + mean = float("0.999921") + std = float("0.00712975") + data = None + + +class Program_weight_tensor_parameter_302: + name = "parameter_302" + shape = [384] + dtype = "float32" + min_val = float("0.00189091") + max_val = float("0.0107445") + mean = float("0.00361452") + std = float("0.00117918") + data = None + + +class Program_weight_tensor_parameter_303: + name = "parameter_303" + shape = [384] + dtype = "float32" + min_val = float("-0.0808105") + max_val = float("0.126535") + mean = float("-0.020492") + std = float("0.0225772") + data = None + + +class Program_weight_tensor_parameter_304: + name = "parameter_304" + shape = [384, 384, 1, 1] + dtype = "float32" + min_val = float("-0.0193719") + max_val = float("0.0329754") + mean = float("-0.000270687") + std = float("0.00265595") + data = None + + +class Program_weight_tensor_parameter_305: + name = "parameter_305" + shape = [384] + dtype = "float32" + min_val = float("-0.03582") + max_val = float("0.013894") + mean = float("-0.00764659") + std = float("0.00787963") + data = None + + +class Program_weight_tensor_parameter_306: + name = "parameter_306" + shape = [384] + dtype = "float32" + min_val = float("0.98196") + max_val = float("1.0674") + mean = float("1.00455") + std = float("0.012659") + data = None + + +class Program_weight_tensor_parameter_307: + name = "parameter_307" + shape = [384] + dtype = "float32" + min_val = float("0.00898203") + max_val = float("0.0485558") + mean = float("0.0213143") + std = float("0.00745083") + data = None + + +class Program_weight_tensor_parameter_308: + name = "parameter_308" + shape = [384] + dtype = "float32" + min_val = float("-0.216625") + max_val = float("0.318883") + mean = float("-0.0712176") + std = float("0.0617026") + data = None + + +class Program_weight_tensor_parameter_309: + name = "parameter_309" + shape = [384, 384, 3, 3] + dtype = "float32" + min_val = float("-0.0112868") + max_val = float("0.0329742") + mean = float("-0.000116809") + std = float("0.00107866") + data = None + + +class Program_weight_tensor_parameter_310: + name = "parameter_310" + shape = [384] + dtype = "float32" + min_val = float("-0.0529998") + max_val = float("0.00370845") + mean = float("-0.0206399") + std = float("0.00869029") + data = None + + +class Program_weight_tensor_parameter_311: + name = "parameter_311" + shape = [384] + dtype = "float32" + min_val = float("0.975688") + max_val = float("1.08475") + mean = float("1.01197") + std = float("0.0159581") + data = None + + +class Program_weight_tensor_parameter_312: + name = "parameter_312" + shape = [384] + dtype = "float32" + min_val = float("0.0109077") + max_val = float("0.0725685") + mean = float("0.0279482") + std = float("0.00887735") + data = None + + +class Program_weight_tensor_parameter_313: + name = "parameter_313" + shape = [384] + dtype = "float32" + min_val = float("-0.177563") + max_val = float("0.207379") + mean = float("-0.0370171") + std = float("0.0487798") + data = None + + +class Program_weight_tensor_parameter_314: + name = "parameter_314" + shape = [384, 384, 3, 3] + dtype = "float32" + min_val = float("-0.0145819") + max_val = float("0.0244233") + mean = float("-6.37425e-05") + std = float("0.00148773") + data = None + + +class Program_weight_tensor_parameter_315: + name = "parameter_315" + shape = [384] + dtype = "float32" + min_val = float("-0.0699624") + max_val = float("0.0213861") + mean = float("-0.0333955") + std = float("0.0126425") + data = None + + +class Program_weight_tensor_parameter_316: + name = "parameter_316" + shape = [384] + dtype = "float32" + min_val = float("0.981936") + max_val = float("1.05597") + mean = float("1.01336") + std = float("0.0107862") + data = None + + +class Program_weight_tensor_parameter_317: + name = "parameter_317" + shape = [384] + dtype = "float32" + min_val = float("0.00653592") + max_val = float("0.0256607") + mean = float("0.012104") + std = float("0.00302117") + data = None + + +class Program_weight_tensor_parameter_318: + name = "parameter_318" + shape = [384] + dtype = "float32" + min_val = float("-0.156458") + max_val = float("0.0708725") + mean = float("-0.0161311") + std = float("0.0370947") + data = None + + +class Program_weight_tensor_parameter_319: + name = "parameter_319" + shape = [384, 1024, 1, 1] + dtype = "float32" + min_val = float("-0.0178611") + max_val = float("0.0467557") + mean = float("-0.000196971") + std = float("0.00306959") + data = None + + +class Program_weight_tensor_parameter_320: + name = "parameter_320" + shape = [384] + dtype = "float32" + min_val = float("-0.0243531") + max_val = float("0.0209122") + mean = float("-0.00040372") + std = float("0.00795315") + data = None + + +class Program_weight_tensor_parameter_321: + name = "parameter_321" + shape = [384] + dtype = "float32" + min_val = float("0.994157") + max_val = float("1.08381") + mean = float("1.04111") + std = float("0.013655") + data = None + + +class Program_weight_tensor_parameter_322: + name = "parameter_322" + shape = [384] + dtype = "float32" + min_val = float("0.0103671") + max_val = float("0.0377362") + mean = float("0.0175769") + std = float("0.00395923") + data = None + + +class Program_weight_tensor_parameter_323: + name = "parameter_323" + shape = [384] + dtype = "float32" + min_val = float("-0.162936") + max_val = float("0.133847") + mean = float("-0.00518069") + std = float("0.0495544") + data = None + + +class Program_weight_tensor_parameter_324: + name = "parameter_324" + shape = [384, 1024, 1, 1] + dtype = "float32" + min_val = float("-0.0386115") + max_val = float("0.0272944") + mean = float("-0.000230863") + std = float("0.00376315") + data = None + + +class Program_weight_tensor_parameter_325: + name = "parameter_325" + shape = [1024] + dtype = "float32" + min_val = float("-2.88823e-10") + max_val = float("3.63778e-10") + mean = float("3.24102e-12") + std = float("8.4459e-11") + data = None + + +class Program_weight_tensor_parameter_326: + name = "parameter_326" + shape = [1024] + dtype = "float32" + min_val = float("0.797327") + max_val = float("0.801973") + mean = float("0.798407") + std = float("0.000349556") + data = None + + +class Program_weight_tensor_parameter_327: + name = "parameter_327" + shape = [1024] + dtype = "float32" + min_val = float("-0.0176963") + max_val = float("0.0176828") + mean = float("0.000103088") + std = float("0.0103724") + data = None + + +class Program_weight_tensor_parameter_328: + name = "parameter_328" + shape = [2048, 1024] + dtype = "float32" + min_val = float("-0.0179954") + max_val = float("0.0179553") + mean = float("-3.37946e-07") + std = float("0.0101899") + data = None + + +class Program_weight_tensor_parameter_329: + name = "parameter_329" + shape = [2048] + dtype = "float32" + min_val = float("-0.024964") + max_val = float("0.0249375") + mean = float("-0.000264493") + std = float("0.0140501") + data = None + + +class Program_weight_tensor_parameter_330: + name = "parameter_330" + shape = [1024, 2048] + dtype = "float32" + min_val = float("-0.025213") + max_val = float("0.0251935") + mean = float("-1.07218e-06") + std = float("0.0144034") + data = None + + +class Program_weight_tensor_parameter_331: + name = "parameter_331" + shape = [1024] + dtype = "float32" + min_val = float("-0.000557004") + max_val = float("0.000245059") + mean = float("2.64757e-07") + std = float("9.20968e-05") + data = None + + +class Program_weight_tensor_parameter_332: + name = "parameter_332" + shape = [1024] + dtype = "float32" + min_val = float("0.796704") + max_val = float("0.802199") + mean = float("0.79841") + std = float("0.000404307") + data = None + + +class Program_weight_tensor_parameter_333: + name = "parameter_333" + shape = [1024] + dtype = "float32" + min_val = float("-0.000420681") + max_val = float("0.000306216") + mean = float("3.46383e-06") + std = float("9.74894e-05") + data = None + + +class Program_weight_tensor_parameter_334: + name = "parameter_334" + shape = [1024, 1024] + dtype = "float32" + min_val = float("-0.0435607") + max_val = float("0.0434689") + mean = float("8.92965e-06") + std = float("0.024934") + data = None + + +class Program_weight_tensor_parameter_335: + name = "parameter_335" + shape = [1024] + dtype = "float32" + min_val = float("-0.000379084") + max_val = float("0.000252131") + mean = float("1.80921e-05") + std = float("9.16523e-05") + data = None + + +class Program_weight_tensor_parameter_336: + name = "parameter_336" + shape = [1024] + dtype = "float32" + min_val = float("0.796854") + max_val = float("0.80208") + mean = float("0.798424") + std = float("0.00039248") + data = None + + +class Program_weight_tensor_parameter_337: + name = "parameter_337" + shape = [1024] + dtype = "float32" + min_val = float("-0.0176884") + max_val = float("0.0176084") + mean = float("0.00010221") + std = float("0.0103695") + data = None + + +class Program_weight_tensor_parameter_338: + name = "parameter_338" + shape = [2048, 1024] + dtype = "float32" + min_val = float("-0.0179903") + max_val = float("0.0178857") + mean = float("-4.51313e-07") + std = float("0.01019") + data = None + + +class Program_weight_tensor_parameter_339: + name = "parameter_339" + shape = [2048] + dtype = "float32" + min_val = float("-0.0249602") + max_val = float("0.0249311") + mean = float("-0.000262739") + std = float("0.0140492") + data = None + + +class Program_weight_tensor_parameter_340: + name = "parameter_340" + shape = [1024, 2048] + dtype = "float32" + min_val = float("-0.0251983") + max_val = float("0.0251516") + mean = float("-1.07229e-06") + std = float("0.0144035") + data = None + + +class Program_weight_tensor_parameter_341: + name = "parameter_341" + shape = [1024] + dtype = "float32" + min_val = float("-0.000353508") + max_val = float("0.000242511") + mean = float("-4.0388e-07") + std = float("8.7987e-05") + data = None + + +class Program_weight_tensor_parameter_342: + name = "parameter_342" + shape = [1024] + dtype = "float32" + min_val = float("0.797092") + max_val = float("0.801797") + mean = float("0.798409") + std = float("0.000360448") + data = None + + +class Program_weight_tensor_parameter_343: + name = "parameter_343" + shape = [1024] + dtype = "float32" + min_val = float("-0.000374712") + max_val = float("0.000305062") + mean = float("1.49159e-06") + std = float("9.94299e-05") + data = None + + +class Program_weight_tensor_parameter_344: + name = "parameter_344" + shape = [1024, 1024] + dtype = "float32" + min_val = float("-0.0434393") + max_val = float("0.0434331") + mean = float("8.89655e-06") + std = float("0.0249341") + data = None + + +class Program_weight_tensor_parameter_345: + name = "parameter_345" + shape = [1024] + dtype = "float32" + min_val = float("-0.000426724") + max_val = float("0.000459694") + mean = float("2.53446e-05") + std = float("0.000129157") + data = None + + +class Program_weight_tensor_parameter_346: + name = "parameter_346" + shape = [1024] + dtype = "float32" + min_val = float("0.797253") + max_val = float("0.801781") + mean = float("0.798438") + std = float("0.00035901") + data = None + + +class Program_weight_tensor_parameter_347: + name = "parameter_347" + shape = [1024] + dtype = "float32" + min_val = float("-0.0177134") + max_val = float("0.0176221") + mean = float("0.000102172") + std = float("0.01036") + data = None + + +class Program_weight_tensor_parameter_348: + name = "parameter_348" + shape = [2048, 1024] + dtype = "float32" + min_val = float("-0.017946") + max_val = float("0.0179338") + mean = float("-5.06726e-07") + std = float("0.01019") + data = None + + +class Program_weight_tensor_parameter_349: + name = "parameter_349" + shape = [2048] + dtype = "float32" + min_val = float("-0.0249899") + max_val = float("0.0249092") + mean = float("-0.000261854") + std = float("0.0140484") + data = None + + +class Program_weight_tensor_parameter_350: + name = "parameter_350" + shape = [1024, 2048] + dtype = "float32" + min_val = float("-0.0251343") + max_val = float("0.0251634") + mean = float("-1.07238e-06") + std = float("0.0144034") + data = None + + +class Program_weight_tensor_parameter_351: + name = "parameter_351" + shape = [1024] + dtype = "float32" + min_val = float("-0.000531457") + max_val = float("0.000448132") + mean = float("-3.47331e-07") + std = float("0.000134888") + data = None + + +class Program_weight_tensor_parameter_352: + name = "parameter_352" + shape = [1024] + dtype = "float32" + min_val = float("0.797309") + max_val = float("0.801477") + mean = float("0.798407") + std = float("0.000340343") + data = None + + +class Program_weight_tensor_parameter_353: + name = "parameter_353" + shape = [1024] + dtype = "float32" + min_val = float("-0.000502346") + max_val = float("0.000480712") + mean = float("-1.72751e-06") + std = float("0.000140261") + data = None + + +class Program_weight_tensor_parameter_354: + name = "parameter_354" + shape = [1024, 1024] + dtype = "float32" + min_val = float("-0.043449") + max_val = float("0.043476") + mean = float("8.89798e-06") + std = float("0.0249342") + data = None + + +class Program_weight_tensor_parameter_355: + name = "parameter_355" + shape = [1024] + dtype = "float32" + min_val = float("-0.000656123") + max_val = float("0.000741278") + mean = float("3.05207e-05") + std = float("0.000218397") + data = None + + +class Program_weight_tensor_parameter_356: + name = "parameter_356" + shape = [1024] + dtype = "float32" + min_val = float("0.797423") + max_val = float("0.80151") + mean = float("0.798455") + std = float("0.00035798") + data = None + + +class Program_weight_tensor_parameter_357: + name = "parameter_357" + shape = [1024] + dtype = "float32" + min_val = float("-0.017719") + max_val = float("0.0177638") + mean = float("0.00010197") + std = float("0.010356") + data = None + + +class Program_weight_tensor_parameter_358: + name = "parameter_358" + shape = [2048, 1024] + dtype = "float32" + min_val = float("-0.0180143") + max_val = float("0.0179167") + mean = float("-4.95239e-07") + std = float("0.0101901") + data = None + + +class Program_weight_tensor_parameter_359: + name = "parameter_359" + shape = [2048] + dtype = "float32" + min_val = float("-0.0249285") + max_val = float("0.0249061") + mean = float("-0.000259738") + std = float("0.0140479") + data = None + + +class Program_weight_tensor_parameter_360: + name = "parameter_360" + shape = [1024, 2048] + dtype = "float32" + min_val = float("-0.0251573") + max_val = float("0.0252251") + mean = float("-1.07239e-06") + std = float("0.0144034") + data = None + + +class Program_weight_tensor_parameter_361: + name = "parameter_361" + shape = [1024] + dtype = "float32" + min_val = float("-0.00072914") + max_val = float("0.000772766") + mean = float("-6.95543e-07") + std = float("0.000228393") + data = None + + +class Program_weight_tensor_parameter_362: + name = "parameter_362" + shape = [1024] + dtype = "float32" + min_val = float("0.797344") + max_val = float("0.801222") + mean = float("0.798406") + std = float("0.000357014") + data = None + + +class Program_weight_tensor_parameter_363: + name = "parameter_363" + shape = [1024] + dtype = "float32" + min_val = float("-0.000786932") + max_val = float("0.000885928") + mean = float("-2.95098e-07") + std = float("0.000237011") + data = None + + +class Program_weight_tensor_parameter_364: + name = "parameter_364" + shape = [1024, 1024] + dtype = "float32" + min_val = float("-0.043733") + max_val = float("0.0438102") + mean = float("8.92999e-06") + std = float("0.0249347") + data = None diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/subgraph_1/graph_hash.txt b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/subgraph_1/graph_hash.txt new file mode 100644 index 000000000..a90272c96 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/subgraph_1/graph_hash.txt @@ -0,0 +1 @@ +32cd2923bc0a61a7c5f1bc48378db5c4de25399c2f9388132502f296c7a71760 \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/subgraph_1/graph_net.json b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/subgraph_1/graph_net.json new file mode 100644 index 000000000..8b4fccfd1 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/subgraph_1/graph_net.json @@ -0,0 +1,6 @@ +{ + "framework": "paddle", + "model_name": "PP-YOLOE_plus_SOD-L", + "num_devices_required": 1, + "num_nodes_required": 1 +} \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/subgraph_1/input_meta.py b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/subgraph_1/input_meta.py new file mode 100644 index 000000000..36ef80eba --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/subgraph_1/input_meta.py @@ -0,0 +1,73 @@ +class Program_weight_tensor_data_0: + name = "data_0" + shape = [] + dtype = "int64" + data = [21] + + +class Program_weight_tensor_data_1: + name = "data_1" + shape = [] + dtype = "int64" + data = [21] + + +class Program_weight_tensor_data_2: + name = "data_2" + shape = [] + dtype = "int64" + data = [42] + + +class Program_weight_tensor_data_3: + name = "data_3" + shape = [] + dtype = "int64" + data = [42] + + +class Program_weight_tensor_data_4: + name = "data_4" + shape = [] + dtype = "int64" + data = [84] + + +class Program_weight_tensor_data_5: + name = "data_5" + shape = [] + dtype = "int64" + data = [84] + + +class Program_weight_tensor_data_6: + name = "data_6" + shape = [2, 768, 21, 21] + dtype = "float32" + min_val = float("-0.278465") + max_val = float("16.1843") + mean = float("0.147783") + std = float("0.640828") + data = None + + +class Program_weight_tensor_data_7: + name = "data_7" + shape = [2, 384, 42, 42] + dtype = "float32" + min_val = float("-0.278465") + max_val = float("27.7299") + mean = float("0.204054") + std = float("0.900832") + data = None + + +class Program_weight_tensor_data_8: + name = "data_8" + shape = [2, 192, 84, 84] + dtype = "float32" + min_val = float("-0.278465") + max_val = float("499.504") + mean = float("12.5639") + std = float("18.7273") + data = None diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/subgraph_1/model.py b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/subgraph_1/model.py new file mode 100644 index 000000000..84b06b995 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/subgraph_1/model.py @@ -0,0 +1,1144 @@ +import paddle + + +class GraphModule(paddle.nn.Layer): + def __init__(self): + super().__init__() + + def forward( + self, + parameter_0, + parameter_1, + parameter_2, + parameter_3, + parameter_4, + parameter_5, + parameter_6, + parameter_7, + parameter_8, + parameter_9, + parameter_10, + parameter_11, + parameter_12, + parameter_13, + parameter_14, + parameter_15, + parameter_16, + parameter_17, + parameter_18, + parameter_19, + parameter_20, + parameter_21, + parameter_22, + parameter_23, + parameter_24, + parameter_25, + parameter_26, + parameter_27, + parameter_28, + parameter_29, + parameter_30, + parameter_31, + parameter_32, + parameter_33, + parameter_34, + parameter_35, + parameter_36, + parameter_37, + parameter_38, + parameter_39, + parameter_40, + parameter_41, + parameter_42, + parameter_43, + parameter_44, + parameter_45, + parameter_46, + parameter_47, + parameter_48, + parameter_49, + parameter_50, + parameter_51, + parameter_52, + parameter_53, + data_0, + data_1, + data_2, + data_3, + data_4, + data_5, + data_6, + data_7, + data_8, + ): + # pd_op.full: (1xi64) <- () + full_0 = paddle._C_ops.full( + [1], float("0"), paddle.int64, paddle.core.CPUPlace() + ) + + # pd_op.full: (1xi64) <- () + full_1 = paddle._C_ops.full( + [1], float("1"), paddle.int64, paddle.core.CPUPlace() + ) + + # pd_op.arange: (-1xi64) <- (1xi64, xi64, 1xi64) + arange_0 = paddle.arange(full_0, data_1, full_1, dtype="int64") + del data_1 + + # pd_op.cast: (-1xf32) <- (-1xi64) + cast_0 = paddle._C_ops.cast(arange_0, paddle.float32) + del arange_0 + + # pd_op.full: (1xf32) <- () + full_2 = paddle._C_ops.full( + [1], float("1"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.scale: (-1xf32) <- (-1xf32, 1xf32) + scale_0 = paddle._C_ops.scale(cast_0, full_2, float("0.5"), True) + del cast_0 + + # pd_op.full: (1xf32) <- () + full_3 = paddle._C_ops.full( + [1], float("32"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.scale: (-1xf32) <- (-1xf32, 1xf32) + scale_1 = paddle._C_ops.scale(scale_0, full_3, float("0"), True) + del scale_0 + + # pd_op.arange: (-1xi64) <- (1xi64, xi64, 1xi64) + arange_1 = paddle.arange(full_0, data_0, full_1, dtype="int64") + del data_0 + + # pd_op.cast: (-1xf32) <- (-1xi64) + cast_1 = paddle._C_ops.cast(arange_1, paddle.float32) + del arange_1 + + # pd_op.scale: (-1xf32) <- (-1xf32, 1xf32) + scale_2 = paddle._C_ops.scale(cast_1, full_2, float("0.5"), True) + del cast_1 + + # pd_op.scale: (-1xf32) <- (-1xf32, 1xf32) + scale_3 = paddle._C_ops.scale(scale_2, full_3, float("0"), True) + del scale_2 + + # builtin.combine: ([-1xf32, -1xf32]) <- (-1xf32, -1xf32) + combine_0 = [scale_3, scale_1] + del scale_1, scale_3 + + # pd_op.meshgrid: ([-1x-1xf32, -1x-1xf32]) <- ([-1xf32, -1xf32]) + meshgrid_0 = paddle._C_ops.meshgrid(combine_0) + del combine_0 + + # builtin.split: (-1x-1xf32, -1x-1xf32) <- ([-1x-1xf32, -1x-1xf32]) + ( + split_0, + split_1, + ) = meshgrid_0 + del meshgrid_0 + + # pd_op.scale: (-1x-1xf32) <- (-1x-1xf32, 1xf32) + scale_4 = paddle._C_ops.scale(split_1, full_2, float("-80"), True) + + # pd_op.scale: (-1x-1xf32) <- (-1x-1xf32, 1xf32) + scale_5 = paddle._C_ops.scale(split_0, full_2, float("-80"), True) + + # pd_op.scale: (-1x-1xf32) <- (-1x-1xf32, 1xf32) + scale_6 = paddle._C_ops.scale(split_1, full_2, float("80"), True) + + # pd_op.scale: (-1x-1xf32) <- (-1x-1xf32, 1xf32) + scale_7 = paddle._C_ops.scale(split_0, full_2, float("80"), True) + + # builtin.combine: ([-1x-1xf32, -1x-1xf32, -1x-1xf32, -1x-1xf32]) <- (-1x-1xf32, -1x-1xf32, -1x-1xf32, -1x-1xf32) + combine_1 = [scale_4, scale_5, scale_6, scale_7] + del scale_4, scale_5, scale_6, scale_7 + + # pd_op.stack: (-1x-1x4xf32) <- ([-1x-1xf32, -1x-1xf32, -1x-1xf32, -1x-1xf32]) + stack_0 = paddle._C_ops.stack(combine_1, -1) + del combine_1 + + # builtin.combine: ([-1x-1xf32, -1x-1xf32]) <- (-1x-1xf32, -1x-1xf32) + combine_2 = [split_1, split_0] + del split_0, split_1 + + # pd_op.stack: (-1x-1x2xf32) <- ([-1x-1xf32, -1x-1xf32]) + stack_1 = paddle._C_ops.stack(combine_2, -1) + del combine_2 + + # pd_op.full_int_array: (2xi64) <- () + full_int_array_0 = [-1, 4] + + # pd_op.reshape: (-1x4xf32) <- (-1x-1x4xf32, 2xi64) + reshape_0 = paddle._C_ops.reshape(stack_0, full_int_array_0) + del stack_0 + + # pd_op.full_int_array: (2xi64) <- () + full_int_array_1 = [-1, 2] + + # pd_op.reshape: (-1x2xf32) <- (-1x-1x2xf32, 2xi64) + reshape_1 = paddle._C_ops.reshape(stack_1, full_int_array_1) + del stack_1 + + # pd_op.shape64: (2xi64) <- (-1x4xf32) + shape64_0 = paddle._C_ops.shape64(reshape_0) + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_2 = [0] + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_3 = [1] + + # pd_op.slice: (xi64) <- (2xi64, 1xi64, 1xi64) + slice_0 = paddle._C_ops.slice( + shape64_0, [0], full_int_array_2, full_int_array_3, [1], [0] + ) + del shape64_0 + + # pd_op.full: (xi64) <- () + full_4 = paddle._C_ops.full( + [], float("1"), paddle.int64, paddle.core.CPUPlace() + ) + + # builtin.combine: ([xi64, xi64]) <- (xi64, xi64) + combine_3 = [slice_0, full_4] + + # pd_op.stack: (2xi64) <- ([xi64, xi64]) + stack_2 = paddle._C_ops.stack(combine_3, 0) + del combine_3 + + # pd_op.full_with_tensor: (-1x1xf32) <- (1xf32, 2xi64) + full_with_tensor_0 = paddle._C_ops.full_with_tensor( + full_3, stack_2, paddle.float32 + ) + del full_3, stack_2 + + # pd_op.arange: (-1xi64) <- (1xi64, xi64, 1xi64) + arange_2 = paddle.arange(full_0, data_3, full_1, dtype="int64") + del data_3 + + # pd_op.cast: (-1xf32) <- (-1xi64) + cast_2 = paddle._C_ops.cast(arange_2, paddle.float32) + del arange_2 + + # pd_op.scale: (-1xf32) <- (-1xf32, 1xf32) + scale_8 = paddle._C_ops.scale(cast_2, full_2, float("0.5"), True) + del cast_2 + + # pd_op.full: (1xf32) <- () + full_5 = paddle._C_ops.full( + [1], float("16"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.scale: (-1xf32) <- (-1xf32, 1xf32) + scale_9 = paddle._C_ops.scale(scale_8, full_5, float("0"), True) + del scale_8 + + # pd_op.arange: (-1xi64) <- (1xi64, xi64, 1xi64) + arange_3 = paddle.arange(full_0, data_2, full_1, dtype="int64") + del data_2 + + # pd_op.cast: (-1xf32) <- (-1xi64) + cast_3 = paddle._C_ops.cast(arange_3, paddle.float32) + del arange_3 + + # pd_op.scale: (-1xf32) <- (-1xf32, 1xf32) + scale_10 = paddle._C_ops.scale(cast_3, full_2, float("0.5"), True) + del cast_3 + + # pd_op.scale: (-1xf32) <- (-1xf32, 1xf32) + scale_11 = paddle._C_ops.scale(scale_10, full_5, float("0"), True) + del scale_10 + + # builtin.combine: ([-1xf32, -1xf32]) <- (-1xf32, -1xf32) + combine_4 = [scale_11, scale_9] + del scale_11, scale_9 + + # pd_op.meshgrid: ([-1x-1xf32, -1x-1xf32]) <- ([-1xf32, -1xf32]) + meshgrid_1 = paddle._C_ops.meshgrid(combine_4) + del combine_4 + + # builtin.split: (-1x-1xf32, -1x-1xf32) <- ([-1x-1xf32, -1x-1xf32]) + ( + split_2, + split_3, + ) = meshgrid_1 + del meshgrid_1 + + # pd_op.scale: (-1x-1xf32) <- (-1x-1xf32, 1xf32) + scale_12 = paddle._C_ops.scale(split_3, full_2, float("-40"), True) + + # pd_op.scale: (-1x-1xf32) <- (-1x-1xf32, 1xf32) + scale_13 = paddle._C_ops.scale(split_2, full_2, float("-40"), True) + + # pd_op.scale: (-1x-1xf32) <- (-1x-1xf32, 1xf32) + scale_14 = paddle._C_ops.scale(split_3, full_2, float("40"), True) + + # pd_op.scale: (-1x-1xf32) <- (-1x-1xf32, 1xf32) + scale_15 = paddle._C_ops.scale(split_2, full_2, float("40"), True) + + # builtin.combine: ([-1x-1xf32, -1x-1xf32, -1x-1xf32, -1x-1xf32]) <- (-1x-1xf32, -1x-1xf32, -1x-1xf32, -1x-1xf32) + combine_5 = [scale_12, scale_13, scale_14, scale_15] + del scale_12, scale_13, scale_14, scale_15 + + # pd_op.stack: (-1x-1x4xf32) <- ([-1x-1xf32, -1x-1xf32, -1x-1xf32, -1x-1xf32]) + stack_3 = paddle._C_ops.stack(combine_5, -1) + del combine_5 + + # builtin.combine: ([-1x-1xf32, -1x-1xf32]) <- (-1x-1xf32, -1x-1xf32) + combine_6 = [split_3, split_2] + del split_2, split_3 + + # pd_op.stack: (-1x-1x2xf32) <- ([-1x-1xf32, -1x-1xf32]) + stack_4 = paddle._C_ops.stack(combine_6, -1) + del combine_6 + + # pd_op.reshape: (-1x4xf32) <- (-1x-1x4xf32, 2xi64) + reshape_2 = paddle._C_ops.reshape(stack_3, full_int_array_0) + del stack_3 + + # pd_op.reshape: (-1x2xf32) <- (-1x-1x2xf32, 2xi64) + reshape_3 = paddle._C_ops.reshape(stack_4, full_int_array_1) + del stack_4 + + # pd_op.shape64: (2xi64) <- (-1x4xf32) + shape64_1 = paddle._C_ops.shape64(reshape_2) + + # pd_op.slice: (xi64) <- (2xi64, 1xi64, 1xi64) + slice_1 = paddle._C_ops.slice( + shape64_1, [0], full_int_array_2, full_int_array_3, [1], [0] + ) + del shape64_1 + + # builtin.combine: ([xi64, xi64]) <- (xi64, xi64) + combine_7 = [slice_1, full_4] + + # pd_op.stack: (2xi64) <- ([xi64, xi64]) + stack_5 = paddle._C_ops.stack(combine_7, 0) + del combine_7 + + # pd_op.full_with_tensor: (-1x1xf32) <- (1xf32, 2xi64) + full_with_tensor_1 = paddle._C_ops.full_with_tensor( + full_5, stack_5, paddle.float32 + ) + del full_5, stack_5 + + # pd_op.arange: (-1xi64) <- (1xi64, xi64, 1xi64) + arange_4 = paddle.arange(full_0, data_5, full_1, dtype="int64") + del data_5 + + # pd_op.cast: (-1xf32) <- (-1xi64) + cast_4 = paddle._C_ops.cast(arange_4, paddle.float32) + del arange_4 + + # pd_op.scale: (-1xf32) <- (-1xf32, 1xf32) + scale_16 = paddle._C_ops.scale(cast_4, full_2, float("0.5"), True) + del cast_4 + + # pd_op.full: (1xf32) <- () + full_6 = paddle._C_ops.full( + [1], float("8"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.scale: (-1xf32) <- (-1xf32, 1xf32) + scale_17 = paddle._C_ops.scale(scale_16, full_6, float("0"), True) + del scale_16 + + # pd_op.arange: (-1xi64) <- (1xi64, xi64, 1xi64) + arange_5 = paddle.arange(full_0, data_4, full_1, dtype="int64") + del data_4, full_0, full_1 + + # pd_op.cast: (-1xf32) <- (-1xi64) + cast_5 = paddle._C_ops.cast(arange_5, paddle.float32) + del arange_5 + + # pd_op.scale: (-1xf32) <- (-1xf32, 1xf32) + scale_18 = paddle._C_ops.scale(cast_5, full_2, float("0.5"), True) + del cast_5 + + # pd_op.scale: (-1xf32) <- (-1xf32, 1xf32) + scale_19 = paddle._C_ops.scale(scale_18, full_6, float("0"), True) + del scale_18 + + # builtin.combine: ([-1xf32, -1xf32]) <- (-1xf32, -1xf32) + combine_8 = [scale_19, scale_17] + del scale_17, scale_19 + + # pd_op.meshgrid: ([-1x-1xf32, -1x-1xf32]) <- ([-1xf32, -1xf32]) + meshgrid_2 = paddle._C_ops.meshgrid(combine_8) + del combine_8 + + # builtin.split: (-1x-1xf32, -1x-1xf32) <- ([-1x-1xf32, -1x-1xf32]) + ( + split_4, + split_5, + ) = meshgrid_2 + del meshgrid_2 + + # pd_op.scale: (-1x-1xf32) <- (-1x-1xf32, 1xf32) + scale_20 = paddle._C_ops.scale(split_5, full_2, float("-20"), True) + + # pd_op.scale: (-1x-1xf32) <- (-1x-1xf32, 1xf32) + scale_21 = paddle._C_ops.scale(split_4, full_2, float("-20"), True) + + # pd_op.scale: (-1x-1xf32) <- (-1x-1xf32, 1xf32) + scale_22 = paddle._C_ops.scale(split_5, full_2, float("20"), True) + + # pd_op.scale: (-1x-1xf32) <- (-1x-1xf32, 1xf32) + scale_23 = paddle._C_ops.scale(split_4, full_2, float("20"), True) + del full_2 + + # builtin.combine: ([-1x-1xf32, -1x-1xf32, -1x-1xf32, -1x-1xf32]) <- (-1x-1xf32, -1x-1xf32, -1x-1xf32, -1x-1xf32) + combine_9 = [scale_20, scale_21, scale_22, scale_23] + del scale_20, scale_21, scale_22, scale_23 + + # pd_op.stack: (-1x-1x4xf32) <- ([-1x-1xf32, -1x-1xf32, -1x-1xf32, -1x-1xf32]) + stack_6 = paddle._C_ops.stack(combine_9, -1) + del combine_9 + + # builtin.combine: ([-1x-1xf32, -1x-1xf32]) <- (-1x-1xf32, -1x-1xf32) + combine_10 = [split_5, split_4] + del split_4, split_5 + + # pd_op.stack: (-1x-1x2xf32) <- ([-1x-1xf32, -1x-1xf32]) + stack_7 = paddle._C_ops.stack(combine_10, -1) + del combine_10 + + # pd_op.reshape: (-1x4xf32) <- (-1x-1x4xf32, 2xi64) + reshape_4 = paddle._C_ops.reshape(stack_6, full_int_array_0) + del full_int_array_0, stack_6 + + # pd_op.reshape: (-1x2xf32) <- (-1x-1x2xf32, 2xi64) + reshape_5 = paddle._C_ops.reshape(stack_7, full_int_array_1) + del full_int_array_1, stack_7 + + # pd_op.shape64: (2xi64) <- (-1x4xf32) + shape64_2 = paddle._C_ops.shape64(reshape_4) + + # pd_op.slice: (xi64) <- (2xi64, 1xi64, 1xi64) + slice_2 = paddle._C_ops.slice( + shape64_2, [0], full_int_array_2, full_int_array_3, [1], [0] + ) + del full_int_array_2, full_int_array_3, shape64_2 + + # builtin.combine: ([xi64, xi64]) <- (xi64, xi64) + combine_11 = [slice_2, full_4] + del full_4 + + # pd_op.stack: (2xi64) <- ([xi64, xi64]) + stack_8 = paddle._C_ops.stack(combine_11, 0) + del combine_11 + + # pd_op.full_with_tensor: (-1x1xf32) <- (1xf32, 2xi64) + full_with_tensor_2 = paddle._C_ops.full_with_tensor( + full_6, stack_8, paddle.float32 + ) + del full_6, stack_8 + + # pd_op.full: (1xi32) <- () + full_7 = paddle._C_ops.full( + [1], float("0"), paddle.int32, paddle.core.CPUPlace() + ) + + # builtin.combine: ([-1x4xf32, -1x4xf32, -1x4xf32]) <- (-1x4xf32, -1x4xf32, -1x4xf32) + combine_12 = [reshape_0, reshape_2, reshape_4] + del reshape_0, reshape_2, reshape_4 + + # pd_op.concat: (-1x4xf32) <- ([-1x4xf32, -1x4xf32, -1x4xf32], 1xi32) + concat_2 = paddle._C_ops.concat(combine_12, full_7) + del combine_12 + + # builtin.combine: ([-1x2xf32, -1x2xf32, -1x2xf32]) <- (-1x2xf32, -1x2xf32, -1x2xf32) + combine_13 = [reshape_1, reshape_3, reshape_5] + del reshape_1, reshape_3, reshape_5 + + # pd_op.concat: (-1x2xf32) <- ([-1x2xf32, -1x2xf32, -1x2xf32], 1xi32) + concat_3 = paddle._C_ops.concat(combine_13, full_7) + del combine_13 + + # builtin.combine: ([-1x1xf32, -1x1xf32, -1x1xf32]) <- (-1x1xf32, -1x1xf32, -1x1xf32) + combine_14 = [full_with_tensor_0, full_with_tensor_1, full_with_tensor_2] + del full_with_tensor_0, full_with_tensor_1, full_with_tensor_2 + + # pd_op.concat: (-1x1xf32) <- ([-1x1xf32, -1x1xf32, -1x1xf32], 1xi32) + concat_4 = paddle._C_ops.concat(combine_14, full_7) + del combine_14, full_7 + + # pd_op.full_int_array: (2xi64) <- () + full_int_array_4 = [1, 1] + + # pd_op.assign: (2xi64) <- (2xi64) + assign_0 = full_int_array_4 + + # pd_op.assign: (2xi64) <- (2xi64) + assign_1 = full_int_array_4 + + # pd_op.pool2d: (2x768x1x1xf32) <- (2x768x-1x-1xf32, 2xi64) + pool2d_0 = paddle._C_ops.pool2d( + data_6, + full_int_array_4, + [1, 1], + [0, 0], + False, + True, + "NCHW", + "avg", + False, + True, + "EXPLICIT", + ) + + # pd_op.conv2d: (2x768x1x1xf32) <- (2x768x1x1xf32, 768x768x1x1xf32) + conv2d_0 = paddle._C_ops.conv2d( + pool2d_0, parameter_53, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_53 + + # pd_op.full_int_array: (4xi64) <- () + full_int_array_5 = [1, -1, 1, 1] + + # pd_op.reshape: (1x768x1x1xf32) <- (768xf32, 4xi64) + reshape_6 = paddle._C_ops.reshape(parameter_52, full_int_array_5) + del parameter_52 + + # pd_op.add: (2x768x1x1xf32) <- (2x768x1x1xf32, 1x768x1x1xf32) + add_0 = paddle._C_ops.add(conv2d_0, reshape_6) + + # pd_op.sigmoid: (2x768x1x1xf32) <- (2x768x1x1xf32) + sigmoid_0 = paddle._C_ops.sigmoid(add_0) + del add_0 + + # pd_op.multiply: (2x768x-1x-1xf32) <- (2x768x-1x-1xf32, 2x768x1x1xf32) + multiply_0 = paddle._C_ops.multiply(data_6, sigmoid_0) + + # pd_op.conv2d: (2x768x-1x-1xf32) <- (2x768x-1x-1xf32, 768x768x1x1xf32) + conv2d_1 = paddle._C_ops.conv2d( + multiply_0, parameter_51, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_51 + + # pd_op.batch_norm_: (2x768x-1x-1xf32, 768xf32, 768xf32, 768xf32, 768xf32, -1xui8) <- (2x768x-1x-1xf32, 768xf32, 768xf32, 768xf32, 768xf32) + ( + batch_norm__0, + batch_norm__1, + batch_norm__2, + batch_norm__3, + batch_norm__4, + batch_norm__5, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_1, + parameter_50, + parameter_49, + parameter_48, + parameter_47, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_47, parameter_48, parameter_49, parameter_50 + + # pd_op.swish: (2x768x-1x-1xf32) <- (2x768x-1x-1xf32) + swish_0 = paddle._C_ops.swish(batch_norm__0) + + # pd_op.add: (2x768x-1x-1xf32) <- (2x768x-1x-1xf32, 2x768x-1x-1xf32) + add_1 = paddle._C_ops.add(swish_0, data_6) + + # pd_op.conv2d: (2x10x-1x-1xf32) <- (2x768x-1x-1xf32, 10x768x3x3xf32) + conv2d_2 = paddle._C_ops.conv2d( + add_1, parameter_46, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_46 + + # pd_op.reshape: (1x10x1x1xf32) <- (10xf32, 4xi64) + reshape_7 = paddle._C_ops.reshape(parameter_45, full_int_array_5) + del parameter_45 + + # pd_op.add: (2x10x-1x-1xf32) <- (2x10x-1x-1xf32, 1x10x1x1xf32) + add_2 = paddle._C_ops.add(conv2d_2, reshape_7) + + # pd_op.conv2d: (2x768x1x1xf32) <- (2x768x1x1xf32, 768x768x1x1xf32) + conv2d_3 = paddle._C_ops.conv2d( + pool2d_0, parameter_44, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_44 + + # pd_op.reshape: (1x768x1x1xf32) <- (768xf32, 4xi64) + reshape_8 = paddle._C_ops.reshape(parameter_43, full_int_array_5) + del parameter_43 + + # pd_op.add: (2x768x1x1xf32) <- (2x768x1x1xf32, 1x768x1x1xf32) + add_3 = paddle._C_ops.add(conv2d_3, reshape_8) + + # pd_op.sigmoid: (2x768x1x1xf32) <- (2x768x1x1xf32) + sigmoid_1 = paddle._C_ops.sigmoid(add_3) + del add_3 + + # pd_op.multiply: (2x768x-1x-1xf32) <- (2x768x-1x-1xf32, 2x768x1x1xf32) + multiply_1 = paddle._C_ops.multiply(data_6, sigmoid_1) + del data_6 + + # pd_op.conv2d: (2x768x-1x-1xf32) <- (2x768x-1x-1xf32, 768x768x1x1xf32) + conv2d_4 = paddle._C_ops.conv2d( + multiply_1, parameter_42, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_42 + + # pd_op.batch_norm_: (2x768x-1x-1xf32, 768xf32, 768xf32, 768xf32, 768xf32, -1xui8) <- (2x768x-1x-1xf32, 768xf32, 768xf32, 768xf32, 768xf32) + ( + batch_norm__6, + batch_norm__7, + batch_norm__8, + batch_norm__9, + batch_norm__10, + batch_norm__11, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_4, + parameter_41, + parameter_40, + parameter_39, + parameter_38, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_38, parameter_39, parameter_40, parameter_41 + + # pd_op.swish: (2x768x-1x-1xf32) <- (2x768x-1x-1xf32) + swish_1 = paddle._C_ops.swish(batch_norm__6) + + # pd_op.conv2d: (2x40x-1x-1xf32) <- (2x768x-1x-1xf32, 40x768x3x3xf32) + conv2d_5 = paddle._C_ops.conv2d( + swish_1, parameter_37, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_37 + + # pd_op.reshape: (1x40x1x1xf32) <- (40xf32, 4xi64) + reshape_9 = paddle._C_ops.reshape(parameter_36, full_int_array_5) + del parameter_36 + + # pd_op.add: (2x40x-1x-1xf32) <- (2x40x-1x-1xf32, 1x40x1x1xf32) + add_4 = paddle._C_ops.add(conv2d_5, reshape_9) + + # pd_op.sigmoid: (2x10x-1x-1xf32) <- (2x10x-1x-1xf32) + sigmoid_2 = paddle._C_ops.sigmoid(add_2) + del add_2 + + # pd_op.flatten: (2x10x-1xf32) <- (2x10x-1x-1xf32) + flatten_0 = paddle._C_ops.flatten(sigmoid_2, 2, 3) + + # pd_op.transpose: (2x-1x10xf32) <- (2x10x-1xf32) + transpose_0 = paddle._C_ops.transpose(flatten_0, [0, 2, 1]) + del flatten_0 + + # pd_op.flatten: (2x40x-1xf32) <- (2x40x-1x-1xf32) + flatten_1 = paddle._C_ops.flatten(add_4, 2, 3) + + # pd_op.transpose: (2x-1x40xf32) <- (2x40x-1xf32) + transpose_1 = paddle._C_ops.transpose(flatten_1, [0, 2, 1]) + del flatten_1 + + # pd_op.pool2d: (2x384x1x1xf32) <- (2x384x-1x-1xf32, 2xi64) + pool2d_1 = paddle._C_ops.pool2d( + data_7, + full_int_array_4, + [1, 1], + [0, 0], + False, + True, + "NCHW", + "avg", + False, + True, + "EXPLICIT", + ) + + # pd_op.conv2d: (2x384x1x1xf32) <- (2x384x1x1xf32, 384x384x1x1xf32) + conv2d_6 = paddle._C_ops.conv2d( + pool2d_1, parameter_35, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_35 + + # pd_op.reshape: (1x384x1x1xf32) <- (384xf32, 4xi64) + reshape_10 = paddle._C_ops.reshape(parameter_34, full_int_array_5) + del parameter_34 + + # pd_op.add: (2x384x1x1xf32) <- (2x384x1x1xf32, 1x384x1x1xf32) + add_5 = paddle._C_ops.add(conv2d_6, reshape_10) + + # pd_op.sigmoid: (2x384x1x1xf32) <- (2x384x1x1xf32) + sigmoid_3 = paddle._C_ops.sigmoid(add_5) + del add_5 + + # pd_op.multiply: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32, 2x384x1x1xf32) + multiply_2 = paddle._C_ops.multiply(data_7, sigmoid_3) + + # pd_op.conv2d: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32, 384x384x1x1xf32) + conv2d_7 = paddle._C_ops.conv2d( + multiply_2, parameter_33, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_33 + + # pd_op.batch_norm_: (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__12, + batch_norm__13, + batch_norm__14, + batch_norm__15, + batch_norm__16, + batch_norm__17, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_7, + parameter_32, + parameter_31, + parameter_30, + parameter_29, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_29, parameter_30, parameter_31, parameter_32 + + # pd_op.swish: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32) + swish_2 = paddle._C_ops.swish(batch_norm__12) + + # pd_op.add: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32, 2x384x-1x-1xf32) + add_6 = paddle._C_ops.add(swish_2, data_7) + + # pd_op.conv2d: (2x10x-1x-1xf32) <- (2x384x-1x-1xf32, 10x384x3x3xf32) + conv2d_8 = paddle._C_ops.conv2d( + add_6, parameter_28, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_28 + + # pd_op.reshape: (1x10x1x1xf32) <- (10xf32, 4xi64) + reshape_11 = paddle._C_ops.reshape(parameter_27, full_int_array_5) + del parameter_27 + + # pd_op.add: (2x10x-1x-1xf32) <- (2x10x-1x-1xf32, 1x10x1x1xf32) + add_7 = paddle._C_ops.add(conv2d_8, reshape_11) + + # pd_op.conv2d: (2x384x1x1xf32) <- (2x384x1x1xf32, 384x384x1x1xf32) + conv2d_9 = paddle._C_ops.conv2d( + pool2d_1, parameter_26, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_26 + + # pd_op.reshape: (1x384x1x1xf32) <- (384xf32, 4xi64) + reshape_12 = paddle._C_ops.reshape(parameter_25, full_int_array_5) + del parameter_25 + + # pd_op.add: (2x384x1x1xf32) <- (2x384x1x1xf32, 1x384x1x1xf32) + add_8 = paddle._C_ops.add(conv2d_9, reshape_12) + + # pd_op.sigmoid: (2x384x1x1xf32) <- (2x384x1x1xf32) + sigmoid_4 = paddle._C_ops.sigmoid(add_8) + del add_8 + + # pd_op.multiply: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32, 2x384x1x1xf32) + multiply_3 = paddle._C_ops.multiply(data_7, sigmoid_4) + del data_7 + + # pd_op.conv2d: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32, 384x384x1x1xf32) + conv2d_10 = paddle._C_ops.conv2d( + multiply_3, parameter_24, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_24 + + # pd_op.batch_norm_: (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__18, + batch_norm__19, + batch_norm__20, + batch_norm__21, + batch_norm__22, + batch_norm__23, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_10, + parameter_23, + parameter_22, + parameter_21, + parameter_20, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_20, parameter_21, parameter_22, parameter_23 + + # pd_op.swish: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32) + swish_3 = paddle._C_ops.swish(batch_norm__18) + + # pd_op.conv2d: (2x40x-1x-1xf32) <- (2x384x-1x-1xf32, 40x384x3x3xf32) + conv2d_11 = paddle._C_ops.conv2d( + swish_3, parameter_19, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_19 + + # pd_op.reshape: (1x40x1x1xf32) <- (40xf32, 4xi64) + reshape_13 = paddle._C_ops.reshape(parameter_18, full_int_array_5) + del parameter_18 + + # pd_op.add: (2x40x-1x-1xf32) <- (2x40x-1x-1xf32, 1x40x1x1xf32) + add_9 = paddle._C_ops.add(conv2d_11, reshape_13) + + # pd_op.sigmoid: (2x10x-1x-1xf32) <- (2x10x-1x-1xf32) + sigmoid_5 = paddle._C_ops.sigmoid(add_7) + del add_7 + + # pd_op.flatten: (2x10x-1xf32) <- (2x10x-1x-1xf32) + flatten_2 = paddle._C_ops.flatten(sigmoid_5, 2, 3) + + # pd_op.transpose: (2x-1x10xf32) <- (2x10x-1xf32) + transpose_2 = paddle._C_ops.transpose(flatten_2, [0, 2, 1]) + del flatten_2 + + # pd_op.flatten: (2x40x-1xf32) <- (2x40x-1x-1xf32) + flatten_3 = paddle._C_ops.flatten(add_9, 2, 3) + + # pd_op.transpose: (2x-1x40xf32) <- (2x40x-1xf32) + transpose_3 = paddle._C_ops.transpose(flatten_3, [0, 2, 1]) + del flatten_3 + + # pd_op.pool2d: (2x192x1x1xf32) <- (2x192x-1x-1xf32, 2xi64) + pool2d_2 = paddle._C_ops.pool2d( + data_8, + full_int_array_4, + [1, 1], + [0, 0], + False, + True, + "NCHW", + "avg", + False, + True, + "EXPLICIT", + ) + + # pd_op.conv2d: (2x192x1x1xf32) <- (2x192x1x1xf32, 192x192x1x1xf32) + conv2d_12 = paddle._C_ops.conv2d( + pool2d_2, parameter_17, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_17 + + # pd_op.reshape: (1x192x1x1xf32) <- (192xf32, 4xi64) + reshape_14 = paddle._C_ops.reshape(parameter_16, full_int_array_5) + del parameter_16 + + # pd_op.add: (2x192x1x1xf32) <- (2x192x1x1xf32, 1x192x1x1xf32) + add_10 = paddle._C_ops.add(conv2d_12, reshape_14) + + # pd_op.sigmoid: (2x192x1x1xf32) <- (2x192x1x1xf32) + sigmoid_6 = paddle._C_ops.sigmoid(add_10) + del add_10 + + # pd_op.multiply: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 2x192x1x1xf32) + multiply_4 = paddle._C_ops.multiply(data_8, sigmoid_6) + + # pd_op.conv2d: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 192x192x1x1xf32) + conv2d_13 = paddle._C_ops.conv2d( + multiply_4, parameter_15, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_15 + + # pd_op.batch_norm_: (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__24, + batch_norm__25, + batch_norm__26, + batch_norm__27, + batch_norm__28, + batch_norm__29, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_13, + parameter_14, + parameter_13, + parameter_12, + parameter_11, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_11, parameter_12, parameter_13, parameter_14 + + # pd_op.swish: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32) + swish_4 = paddle._C_ops.swish(batch_norm__24) + + # pd_op.add: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 2x192x-1x-1xf32) + add_11 = paddle._C_ops.add(swish_4, data_8) + + # pd_op.conv2d: (2x10x-1x-1xf32) <- (2x192x-1x-1xf32, 10x192x3x3xf32) + conv2d_14 = paddle._C_ops.conv2d( + add_11, parameter_10, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_10 + + # pd_op.reshape: (1x10x1x1xf32) <- (10xf32, 4xi64) + reshape_15 = paddle._C_ops.reshape(parameter_9, full_int_array_5) + del parameter_9 + + # pd_op.add: (2x10x-1x-1xf32) <- (2x10x-1x-1xf32, 1x10x1x1xf32) + add_12 = paddle._C_ops.add(conv2d_14, reshape_15) + + # pd_op.conv2d: (2x192x1x1xf32) <- (2x192x1x1xf32, 192x192x1x1xf32) + conv2d_15 = paddle._C_ops.conv2d( + pool2d_2, parameter_8, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_8 + + # pd_op.reshape: (1x192x1x1xf32) <- (192xf32, 4xi64) + reshape_16 = paddle._C_ops.reshape(parameter_7, full_int_array_5) + del parameter_7 + + # pd_op.add: (2x192x1x1xf32) <- (2x192x1x1xf32, 1x192x1x1xf32) + add_13 = paddle._C_ops.add(conv2d_15, reshape_16) + + # pd_op.sigmoid: (2x192x1x1xf32) <- (2x192x1x1xf32) + sigmoid_7 = paddle._C_ops.sigmoid(add_13) + del add_13 + + # pd_op.multiply: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 2x192x1x1xf32) + multiply_5 = paddle._C_ops.multiply(data_8, sigmoid_7) + del data_8 + + # pd_op.conv2d: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 192x192x1x1xf32) + conv2d_16 = paddle._C_ops.conv2d( + multiply_5, parameter_6, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_6 + + # pd_op.batch_norm_: (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__30, + batch_norm__31, + batch_norm__32, + batch_norm__33, + batch_norm__34, + batch_norm__35, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_16, + parameter_5, + parameter_4, + parameter_3, + parameter_2, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_2, parameter_3, parameter_4, parameter_5 + + # pd_op.swish: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32) + swish_5 = paddle._C_ops.swish(batch_norm__30) + + # pd_op.conv2d: (2x40x-1x-1xf32) <- (2x192x-1x-1xf32, 40x192x3x3xf32) + conv2d_17 = paddle._C_ops.conv2d( + swish_5, parameter_1, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_1 + + # pd_op.reshape: (1x40x1x1xf32) <- (40xf32, 4xi64) + reshape_17 = paddle._C_ops.reshape(parameter_0, full_int_array_5) + del full_int_array_5, parameter_0 + + # pd_op.add: (2x40x-1x-1xf32) <- (2x40x-1x-1xf32, 1x40x1x1xf32) + add_14 = paddle._C_ops.add(conv2d_17, reshape_17) + + # pd_op.sigmoid: (2x10x-1x-1xf32) <- (2x10x-1x-1xf32) + sigmoid_8 = paddle._C_ops.sigmoid(add_12) + del add_12 + + # pd_op.flatten: (2x10x-1xf32) <- (2x10x-1x-1xf32) + flatten_4 = paddle._C_ops.flatten(sigmoid_8, 2, 3) + + # pd_op.transpose: (2x-1x10xf32) <- (2x10x-1xf32) + transpose_4 = paddle._C_ops.transpose(flatten_4, [0, 2, 1]) + del flatten_4 + + # pd_op.flatten: (2x40x-1xf32) <- (2x40x-1x-1xf32) + flatten_5 = paddle._C_ops.flatten(add_14, 2, 3) + + # pd_op.transpose: (2x-1x40xf32) <- (2x40x-1xf32) + transpose_5 = paddle._C_ops.transpose(flatten_5, [0, 2, 1]) + del flatten_5 + + # pd_op.full: (1xi32) <- () + full_8 = paddle._C_ops.full( + [1], float("1"), paddle.int32, paddle.core.CPUPlace() + ) + + # pd_op.assign: (1xi32) <- (1xi32) + assign_2 = full_8 + + # builtin.combine: ([2x-1x10xf32, 2x-1x10xf32, 2x-1x10xf32]) <- (2x-1x10xf32, 2x-1x10xf32, 2x-1x10xf32) + combine_15 = [transpose_0, transpose_2, transpose_4] + + # pd_op.concat: (2x-1x10xf32) <- ([2x-1x10xf32, 2x-1x10xf32, 2x-1x10xf32], 1xi32) + concat_0 = paddle._C_ops.concat(combine_15, full_8) + del combine_15 + + # builtin.combine: ([2x-1x40xf32, 2x-1x40xf32, 2x-1x40xf32]) <- (2x-1x40xf32, 2x-1x40xf32, 2x-1x40xf32) + combine_16 = [transpose_1, transpose_3, transpose_5] + + # pd_op.concat: (2x-1x40xf32) <- ([2x-1x40xf32, 2x-1x40xf32, 2x-1x40xf32], 1xi32) + concat_1 = paddle._C_ops.concat(combine_16, full_8) + del ( + add_1, + add_11, + add_14, + add_4, + add_6, + add_9, + assign_0, + assign_1, + assign_2, + batch_norm__0, + batch_norm__1, + batch_norm__10, + batch_norm__11, + batch_norm__12, + batch_norm__13, + batch_norm__14, + batch_norm__15, + batch_norm__16, + batch_norm__17, + batch_norm__18, + batch_norm__19, + batch_norm__2, + batch_norm__20, + batch_norm__21, + batch_norm__22, + batch_norm__23, + batch_norm__24, + batch_norm__25, + batch_norm__26, + batch_norm__27, + batch_norm__28, + batch_norm__29, + batch_norm__3, + batch_norm__30, + batch_norm__31, + batch_norm__32, + batch_norm__33, + batch_norm__34, + batch_norm__35, + batch_norm__4, + batch_norm__5, + batch_norm__6, + batch_norm__7, + batch_norm__8, + batch_norm__9, + combine_16, + conv2d_0, + conv2d_1, + conv2d_10, + conv2d_11, + conv2d_12, + conv2d_13, + conv2d_14, + conv2d_15, + conv2d_16, + conv2d_17, + conv2d_2, + conv2d_3, + conv2d_4, + conv2d_5, + conv2d_6, + conv2d_7, + conv2d_8, + conv2d_9, + full_8, + full_int_array_4, + multiply_0, + multiply_1, + multiply_2, + multiply_3, + multiply_4, + multiply_5, + pool2d_0, + pool2d_1, + pool2d_2, + reshape_10, + reshape_11, + reshape_12, + reshape_13, + reshape_14, + reshape_15, + reshape_16, + reshape_17, + reshape_6, + reshape_7, + reshape_8, + reshape_9, + sigmoid_0, + sigmoid_1, + sigmoid_2, + sigmoid_3, + sigmoid_4, + sigmoid_5, + sigmoid_6, + sigmoid_7, + sigmoid_8, + slice_0, + slice_1, + slice_2, + swish_0, + swish_1, + swish_2, + swish_3, + swish_4, + swish_5, + transpose_0, + transpose_1, + transpose_2, + transpose_3, + transpose_4, + transpose_5, + ) + + return concat_0, concat_1, concat_2, concat_3, concat_4 diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/subgraph_1/weight_meta.py b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/subgraph_1/weight_meta.py new file mode 100644 index 000000000..7ebfc8bcf --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/subgraph_1/weight_meta.py @@ -0,0 +1,580 @@ +class Program_weight_tensor_parameter_0: + name = "parameter_0" + shape = [40] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_1: + name = "parameter_1" + shape = [40, 192, 3, 3] + dtype = "float32" + min_val = float("-0.200034") + max_val = float("0.204555") + mean = float("1.49957e-08") + std = float("0.0116846") + data = None + + +class Program_weight_tensor_parameter_2: + name = "parameter_2" + shape = [192] + dtype = "float32" + min_val = float("-0.049655") + max_val = float("0.233417") + mean = float("0.0551577") + std = float("0.0442585") + data = None + + +class Program_weight_tensor_parameter_3: + name = "parameter_3" + shape = [192] + dtype = "float32" + min_val = float("0.836511") + max_val = float("1.62756") + mean = float("1.22136") + std = float("0.145365") + data = None + + +class Program_weight_tensor_parameter_4: + name = "parameter_4" + shape = [192] + dtype = "float32" + min_val = float("0.00534518") + max_val = float("5.22517") + mean = float("0.466305") + std = float("0.726268") + data = None + + +class Program_weight_tensor_parameter_5: + name = "parameter_5" + shape = [192] + dtype = "float32" + min_val = float("-8.55821") + max_val = float("10.0832") + mean = float("0.104165") + std = float("2.83033") + data = None + + +class Program_weight_tensor_parameter_6: + name = "parameter_6" + shape = [192, 192, 1, 1] + dtype = "float32" + min_val = float("-0.101586") + max_val = float("0.138874") + mean = float("-0.000767801") + std = float("0.0120829") + data = None + + +class Program_weight_tensor_parameter_7: + name = "parameter_7" + shape = [192] + dtype = "float32" + min_val = float("-0.00860419") + max_val = float("0.0156884") + mean = float("-0.000140933") + std = float("0.00406783") + data = None + + +class Program_weight_tensor_parameter_8: + name = "parameter_8" + shape = [192, 192, 1, 1] + dtype = "float32" + min_val = float("-0.0102778") + max_val = float("0.0182908") + mean = float("-0.000259715") + std = float("0.00201379") + data = None + + +class Program_weight_tensor_parameter_9: + name = "parameter_9" + shape = [10] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_10: + name = "parameter_10" + shape = [10, 192, 3, 3] + dtype = "float32" + min_val = float("-1151.38") + max_val = float("133.782") + mean = float("-21.9826") + std = float("92.2655") + data = None + + +class Program_weight_tensor_parameter_11: + name = "parameter_11" + shape = [192] + dtype = "float32" + min_val = float("-83.505") + max_val = float("85.9246") + mean = float("2.58206") + std = float("26.6954") + data = None + + +class Program_weight_tensor_parameter_12: + name = "parameter_12" + shape = [192] + dtype = "float32" + min_val = float("-14.1722") + max_val = float("24.9711") + mean = float("-0.553052") + std = float("5.93472") + data = None + + +class Program_weight_tensor_parameter_13: + name = "parameter_13" + shape = [192] + dtype = "float32" + min_val = float("2.32866") + max_val = float("16618600.0") + mean = float("525947.0") + std = float("1853320.0") + data = None + + +class Program_weight_tensor_parameter_14: + name = "parameter_14" + shape = [192] + dtype = "float32" + min_val = float("-12504.7") + max_val = float("7911.06") + mean = float("-492.832") + std = float("2441.54") + data = None + + +class Program_weight_tensor_parameter_15: + name = "parameter_15" + shape = [192, 192, 1, 1] + dtype = "float32" + min_val = float("-138.688") + max_val = float("100.601") + mean = float("-0.0990701") + std = float("4.16445") + data = None + + +class Program_weight_tensor_parameter_16: + name = "parameter_16" + shape = [192] + dtype = "float32" + min_val = float("-10.9738") + max_val = float("7.09472") + mean = float("-0.172259") + std = float("1.7704") + data = None + + +class Program_weight_tensor_parameter_17: + name = "parameter_17" + shape = [192, 192, 1, 1] + dtype = "float32" + min_val = float("-21.7725") + max_val = float("14.0694") + mean = float("-0.0616887") + std = float("0.957234") + data = None + + +class Program_weight_tensor_parameter_18: + name = "parameter_18" + shape = [40] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_19: + name = "parameter_19" + shape = [40, 384, 3, 3] + dtype = "float32" + min_val = float("-0.124724") + max_val = float("0.129603") + mean = float("3.40515e-09") + std = float("0.00677564") + data = None + + +class Program_weight_tensor_parameter_20: + name = "parameter_20" + shape = [384] + dtype = "float32" + min_val = float("-0.00274099") + max_val = float("0.100798") + mean = float("0.03273") + std = float("0.0175032") + data = None + + +class Program_weight_tensor_parameter_21: + name = "parameter_21" + shape = [384] + dtype = "float32" + min_val = float("0.999002") + max_val = float("1.24047") + mean = float("1.10664") + std = float("0.0410217") + data = None + + +class Program_weight_tensor_parameter_22: + name = "parameter_22" + shape = [384] + dtype = "float32" + min_val = float("0.00338575") + max_val = float("0.497954") + mean = float("0.0471292") + std = float("0.05553") + data = None + + +class Program_weight_tensor_parameter_23: + name = "parameter_23" + shape = [384] + dtype = "float32" + min_val = float("-0.202557") + max_val = float("0.162712") + mean = float("-0.0211337") + std = float("0.0479677") + data = None + + +class Program_weight_tensor_parameter_24: + name = "parameter_24" + shape = [384, 384, 1, 1] + dtype = "float32" + min_val = float("-0.0592913") + max_val = float("0.0683296") + mean = float("-0.000516701") + std = float("0.00403998") + data = None + + +class Program_weight_tensor_parameter_25: + name = "parameter_25" + shape = [384] + dtype = "float32" + min_val = float("-0.00282189") + max_val = float("0.00800835") + mean = float("4.07712e-05") + std = float("0.00164155") + data = None + + +class Program_weight_tensor_parameter_26: + name = "parameter_26" + shape = [384, 384, 1, 1] + dtype = "float32" + min_val = float("-0.00191414") + max_val = float("0.00571394") + mean = float("-3.68985e-05") + std = float("0.00061666") + data = None + + +class Program_weight_tensor_parameter_27: + name = "parameter_27" + shape = [10] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_28: + name = "parameter_28" + shape = [10, 384, 3, 3] + dtype = "float32" + min_val = float("-4.35504") + max_val = float("0.450249") + mean = float("-0.170375") + std = float("0.297706") + data = None + + +class Program_weight_tensor_parameter_29: + name = "parameter_29" + shape = [384] + dtype = "float32" + min_val = float("-0.129636") + max_val = float("0.537228") + mean = float("0.252657") + std = float("0.116675") + data = None + + +class Program_weight_tensor_parameter_30: + name = "parameter_30" + shape = [384] + dtype = "float32" + min_val = float("0.994039") + max_val = float("1.41192") + mean = float("1.16982") + std = float("0.0589143") + data = None + + +class Program_weight_tensor_parameter_31: + name = "parameter_31" + shape = [384] + dtype = "float32" + min_val = float("0.189816") + max_val = float("625.421") + mean = float("13.2176") + std = float("39.326") + data = None + + +class Program_weight_tensor_parameter_32: + name = "parameter_32" + shape = [384] + dtype = "float32" + min_val = float("-6.46967") + max_val = float("2.53183") + mean = float("-0.259474") + std = float("0.862378") + data = None + + +class Program_weight_tensor_parameter_33: + name = "parameter_33" + shape = [384, 384, 1, 1] + dtype = "float32" + min_val = float("-0.372426") + max_val = float("0.933267") + mean = float("-0.00500754") + std = float("0.034042") + data = None + + +class Program_weight_tensor_parameter_34: + name = "parameter_34" + shape = [384] + dtype = "float32" + min_val = float("-0.168065") + max_val = float("0.0324648") + mean = float("0.000330264") + std = float("0.0172051") + data = None + + +class Program_weight_tensor_parameter_35: + name = "parameter_35" + shape = [384, 384, 1, 1] + dtype = "float32" + min_val = float("-0.137078") + max_val = float("0.0270712") + mean = float("-0.000467459") + std = float("0.00704374") + data = None + + +class Program_weight_tensor_parameter_36: + name = "parameter_36" + shape = [40] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_37: + name = "parameter_37" + shape = [40, 768, 3, 3] + dtype = "float32" + min_val = float("-0.0663824") + max_val = float("0.0420303") + mean = float("4.07454e-10") + std = float("0.00430679") + data = None + + +class Program_weight_tensor_parameter_38: + name = "parameter_38" + shape = [768] + dtype = "float32" + min_val = float("-0.0211256") + max_val = float("0.0557602") + mean = float("0.00994946") + std = float("0.0117441") + data = None + + +class Program_weight_tensor_parameter_39: + name = "parameter_39" + shape = [768] + dtype = "float32" + min_val = float("1.0067") + max_val = float("1.19979") + mean = float("1.06442") + std = float("0.0224675") + data = None + + +class Program_weight_tensor_parameter_40: + name = "parameter_40" + shape = [768] + dtype = "float32" + min_val = float("0.00543727") + max_val = float("4.44191") + mean = float("0.147009") + std = float("0.218322") + data = None + + +class Program_weight_tensor_parameter_41: + name = "parameter_41" + shape = [768] + dtype = "float32" + min_val = float("-0.239306") + max_val = float("0.308633") + mean = float("-0.0116159") + std = float("0.0711555") + data = None + + +class Program_weight_tensor_parameter_42: + name = "parameter_42" + shape = [768, 768, 1, 1] + dtype = "float32" + min_val = float("-0.0315101") + max_val = float("0.0300214") + mean = float("-0.00021905") + std = float("0.00273367") + data = None + + +class Program_weight_tensor_parameter_43: + name = "parameter_43" + shape = [768] + dtype = "float32" + min_val = float("-0.00399512") + max_val = float("0.00318684") + mean = float("6.30584e-05") + std = float("0.00082816") + data = None + + +class Program_weight_tensor_parameter_44: + name = "parameter_44" + shape = [768, 768, 1, 1] + dtype = "float32" + min_val = float("-0.00263886") + max_val = float("0.00246544") + mean = float("4.12622e-06") + std = float("0.000248146") + data = None + + +class Program_weight_tensor_parameter_45: + name = "parameter_45" + shape = [10] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_46: + name = "parameter_46" + shape = [10, 768, 3, 3] + dtype = "float32" + min_val = float("-3.66048") + max_val = float("0.4728") + mean = float("-0.108391") + std = float("0.234778") + data = None + + +class Program_weight_tensor_parameter_47: + name = "parameter_47" + shape = [768] + dtype = "float32" + min_val = float("-0.093079") + max_val = float("0.260714") + mean = float("0.112569") + std = float("0.0586991") + data = None + + +class Program_weight_tensor_parameter_48: + name = "parameter_48" + shape = [768] + dtype = "float32" + min_val = float("0.977893") + max_val = float("1.22745") + mean = float("1.06571") + std = float("0.0280202") + data = None + + +class Program_weight_tensor_parameter_49: + name = "parameter_49" + shape = [768] + dtype = "float32" + min_val = float("0.0580549") + max_val = float("178.552") + mean = float("7.98546") + std = float("16.2805") + data = None + + +class Program_weight_tensor_parameter_50: + name = "parameter_50" + shape = [768] + dtype = "float32" + min_val = float("-3.133") + max_val = float("2.19298") + mean = float("-0.123927") + std = float("0.472723") + data = None + + +class Program_weight_tensor_parameter_51: + name = "parameter_51" + shape = [768, 768, 1, 1] + dtype = "float32" + min_val = float("-0.352391") + max_val = float("0.641069") + mean = float("-0.00191347") + std = float("0.0277056") + data = None + + +class Program_weight_tensor_parameter_52: + name = "parameter_52" + shape = [768] + dtype = "float32" + min_val = float("-0.0409679") + max_val = float("0.024778") + mean = float("-2.53984e-05") + std = float("0.00726461") + data = None + + +class Program_weight_tensor_parameter_53: + name = "parameter_53" + shape = [768, 768, 1, 1] + dtype = "float32" + min_val = float("-0.0172154") + max_val = float("0.0204702") + mean = float("-6.95149e-05") + std = float("0.00176879") + data = None diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/subgraph_10/graph_hash.txt b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/subgraph_10/graph_hash.txt new file mode 100644 index 000000000..463698320 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/subgraph_10/graph_hash.txt @@ -0,0 +1 @@ +611aaf40802ae728109fb4c99495540bcf765813a1a25b37c13dde4b4b8683ee \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/subgraph_10/graph_net.json b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/subgraph_10/graph_net.json new file mode 100644 index 000000000..8b4fccfd1 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/subgraph_10/graph_net.json @@ -0,0 +1,6 @@ +{ + "framework": "paddle", + "model_name": "PP-YOLOE_plus_SOD-L", + "num_devices_required": 1, + "num_nodes_required": 1 +} \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/subgraph_10/input_meta.py b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/subgraph_10/input_meta.py new file mode 100644 index 000000000..3c170c64e --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/subgraph_10/input_meta.py @@ -0,0 +1,38 @@ +class Program_weight_tensor_data_0: + name = "data_0" + shape = [1, 8400, 4] + dtype = "float32" + min_val = float("0.00768409") + max_val = float("2.99242") + mean = float("1.93544") + std = float("0.747544") + data = None + + +class Program_weight_tensor_data_1: + name = "data_1" + shape = [8400, 2] + dtype = "float32" + min_val = float("0.5") + max_val = float("79.5") + mean = float("34.7619") + std = float("22.9098") + data = None + + +class Program_weight_tensor_data_2: + name = "data_2" + shape = [8400, 1] + dtype = "float32" + min_val = float("8.0") + max_val = float("32.0") + mean = float("10.6667") + std = float("5.70157") + data = None + + +class Program_weight_tensor_data_3: + name = "data_3" + shape = [1, 2] + dtype = "float32" + data = [0.836601, 0.470588] diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/subgraph_10/model.py b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/subgraph_10/model.py new file mode 100644 index 000000000..5d4c5e86c --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/subgraph_10/model.py @@ -0,0 +1,94 @@ +import paddle + + +class GraphModule(paddle.nn.Layer): + def __init__(self): + super().__init__() + + def forward(self, data_0, data_1, data_2, data_3): + # pd_op.full: (1xi32) <- () + full_0 = paddle._C_ops.full( + [1], float("2"), paddle.int32, paddle.core.CPUPlace() + ) + + # pd_op.split_with_num: ([1x8400x2xf32, 1x8400x2xf32]) <- (1x8400x4xf32, 1xi32) + split_with_num_0 = paddle._C_ops.split_with_num(data_0, 2, full_0) + del data_0, full_0 + + # builtin.split: (1x8400x2xf32, 1x8400x2xf32) <- ([1x8400x2xf32, 1x8400x2xf32]) + ( + split_0, + split_1, + ) = split_with_num_0 + del split_with_num_0 + + # pd_op.full: (1xf32) <- () + full_1 = paddle._C_ops.full( + [1], float("-1"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.scale: (1x8400x2xf32) <- (1x8400x2xf32, 1xf32) + scale_0 = paddle._C_ops.scale(split_0, full_1, float("0"), True) + del full_1, split_0 + + # pd_op.add: (1x8400x2xf32) <- (1x8400x2xf32, 8400x2xf32) + add_0 = paddle._C_ops.add(scale_0, data_1) + del scale_0 + + # pd_op.add: (1x8400x2xf32) <- (1x8400x2xf32, 8400x2xf32) + add_1 = paddle._C_ops.add(split_1, data_1) + del data_1, split_1 + + # pd_op.full: (1xi32) <- () + full_2 = paddle._C_ops.full( + [1], float("-1"), paddle.int32, paddle.core.CPUPlace() + ) + + # builtin.combine: ([1x8400x2xf32, 1x8400x2xf32]) <- (1x8400x2xf32, 1x8400x2xf32) + combine_0 = [add_0, add_1] + del add_0, add_1 + + # pd_op.concat: (1x8400x4xf32) <- ([1x8400x2xf32, 1x8400x2xf32], 1xi32) + concat_0 = paddle._C_ops.concat(combine_0, full_2) + del combine_0 + + # pd_op.multiply: (1x8400x4xf32) <- (1x8400x4xf32, 8400x1xf32) + multiply_0 = paddle._C_ops.multiply(concat_0, data_2) + del concat_0, data_2 + + # pd_op.full: (1xi32) <- () + full_3 = paddle._C_ops.full( + [1], float("1"), paddle.int32, paddle.core.CPUPlace() + ) + + # pd_op.split_with_num: ([1x1xf32, 1x1xf32]) <- (1x2xf32, 1xi32) + split_with_num_1 = paddle._C_ops.split_with_num(data_3, 2, full_3) + del data_3, full_3 + + # builtin.split: (1x1xf32, 1x1xf32) <- ([1x1xf32, 1x1xf32]) + ( + split_2, + split_3, + ) = split_with_num_1 + del split_with_num_1 + + # builtin.combine: ([1x1xf32, 1x1xf32, 1x1xf32, 1x1xf32]) <- (1x1xf32, 1x1xf32, 1x1xf32, 1x1xf32) + combine_1 = [split_3, split_2, split_3, split_2] + del split_2, split_3 + + # pd_op.concat: (1x4xf32) <- ([1x1xf32, 1x1xf32, 1x1xf32, 1x1xf32], 1xi32) + concat_1 = paddle._C_ops.concat(combine_1, full_2) + del combine_1, full_2 + + # pd_op.full_int_array: (3xi64) <- () + full_int_array_0 = [-1, 1, 4] + + # pd_op.reshape: (1x1x4xf32) <- (1x4xf32, 3xi64) + reshape_0 = paddle._C_ops.reshape(concat_1, full_int_array_0) + del concat_1, full_int_array_0 + + # pd_op.divide: (1x8400x4xf32) <- (1x8400x4xf32, 1x1x4xf32) + divide_0 = paddle._C_ops.divide(multiply_0, reshape_0) + del multiply_0, reshape_0 + + return divide_0 diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/subgraph_10/weight_meta.py b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/subgraph_10/weight_meta.py new file mode 100644 index 000000000..8b1378917 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/subgraph_10/weight_meta.py @@ -0,0 +1 @@ + diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/subgraph_11/graph_hash.txt b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/subgraph_11/graph_hash.txt new file mode 100644 index 000000000..c22f27fef --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/subgraph_11/graph_hash.txt @@ -0,0 +1 @@ +7dca5f9adbbd5ed82b1a226d0df398f3eb50454e9abf47936b65d6ecf998ef9f \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/subgraph_11/graph_net.json b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/subgraph_11/graph_net.json new file mode 100644 index 000000000..8b4fccfd1 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/subgraph_11/graph_net.json @@ -0,0 +1,6 @@ +{ + "framework": "paddle", + "model_name": "PP-YOLOE_plus_SOD-L", + "num_devices_required": 1, + "num_nodes_required": 1 +} \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/subgraph_11/input_meta.py b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/subgraph_11/input_meta.py new file mode 100644 index 000000000..f4ab533fd --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/subgraph_11/input_meta.py @@ -0,0 +1,64 @@ +class Program_weight_tensor_data_0: + name = "data_0" + shape = [2, 3549] + dtype = "float32" + max_val = float("26.0") + mean = float("0.0874894") + std = float("0.880638") + data = None + + +class Program_weight_tensor_data_1: + name = "data_1" + shape = [2, 49, 3549] + dtype = "float32" + max_val = float("0.980323") + mean = float("0.000678688") + std = float("0.0200074") + data = None + + +class Program_weight_tensor_data_2: + name = "data_2" + shape = [2, 49, 3549] + dtype = "float32" + max_val = float("1.0") + mean = float("0.0017855") + std = float("0.0422174") + data = None + + +class Program_weight_tensor_data_3: + name = "data_3" + shape = [2, 1] + dtype = "int32" + data = [0, 1] + + +class Program_weight_tensor_data_4: + name = "data_4" + shape = [2, 49, 1] + dtype = "int32" + min_val = 0 + max_val = 8 + data = None + + +class Program_weight_tensor_data_5: + name = "data_5" + shape = [2, 49, 4] + dtype = "float32" + max_val = float("408.482") + mean = float("110.196") + std = float("133.414") + data = None + + +class Program_weight_tensor_data_6: + name = "data_6" + shape = [2, 49, 3549] + dtype = "float32" + max_val = float("0.795764") + mean = float("4.47944e-05") + std = float("0.00452556") + data = None diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/subgraph_11/model.py b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/subgraph_11/model.py new file mode 100644 index 000000000..2cc272861 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/subgraph_11/model.py @@ -0,0 +1,244 @@ +import paddle + + +class GraphModule(paddle.nn.Layer): + def __init__(self): + super().__init__() + + def forward(self, data_0, data_1, data_2, data_3, data_4, data_5, data_6): + # pd_op.full_int_array: (1xi64) <- () + full_int_array_0 = [1] + + # pd_op.unsqueeze: (2x1x3549xf32) <- (2x3549xf32, 1xi64) + unsqueeze_0 = paddle._C_ops.unsqueeze(data_0, full_int_array_0) + del data_0, full_int_array_0 + + # pd_op.full: (xf32) <- () + full_0 = paddle._C_ops.full( + [], float("1"), paddle.float32, paddle.framework._current_expected_place() + ) + + # pd_op.greater_than: (2x1x3549xb) <- (2x1x3549xf32, xf32) + greater_than_0 = paddle._C_ops.greater_than(unsqueeze_0, full_0) + del full_0, unsqueeze_0 + + # pd_op.full_int_array: (3xi64) <- () + full_int_array_1 = [1, 49, 1] + + # pd_op.tile: (2x49x3549xb) <- (2x1x3549xb, 3xi64) + tile_0 = paddle._C_ops.tile(greater_than_0, full_int_array_1) + del full_int_array_1, greater_than_0 + + # pd_op.multiply: (2x49x3549xf32) <- (2x49x3549xf32, 2x49x3549xf32) + multiply_1 = paddle._C_ops.multiply(data_1, data_2) + + # pd_op.full: (1xi64) <- () + full_1 = paddle._C_ops.full( + [1], float("-2"), paddle.int64, paddle.core.CPUPlace() + ) + + # pd_op.argmax: (2x3549xi64) <- (2x49x3549xf32, 1xi64) + argmax_0 = paddle._C_ops.argmax(multiply_1, full_1, False, False, paddle.int64) + del multiply_1 + + # pd_op.full: (1xi32) <- () + full_2 = paddle._C_ops.full( + [1], float("49"), paddle.int32, paddle.core.CPUPlace() + ) + + # pd_op.one_hot: (2x3549x49xf32) <- (2x3549xi64, 1xi32) + one_hot_0 = paddle._C_ops.one_hot( + argmax_0 % paddle.cast(full_2, argmax_0.dtype), full_2 + ) + del argmax_0, full_2 + + # pd_op.transpose: (2x49x3549xf32) <- (2x3549x49xf32) + transpose_0 = paddle._C_ops.transpose(one_hot_0, [0, 2, 1]) + del one_hot_0 + + # pd_op.where: (2x49x3549xf32) <- (2x49x3549xb, 2x49x3549xf32, 2x49x3549xf32) + where_0 = paddle._C_ops.where(tile_0, transpose_0, data_2) + del data_2, tile_0, transpose_0 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_2 = [-2] + + # pd_op.sum: (2x3549xf32) <- (2x49x3549xf32, 1xi64) + sum_0 = paddle._C_ops.sum(where_0, full_int_array_2, None, False) + + # pd_op.argmax: (2x3549xi64) <- (2x49x3549xf32, 1xi64) + argmax_1 = paddle._C_ops.argmax(where_0, full_1, False, False, paddle.int64) + del full_1 + + # pd_op.full: (1xf32) <- () + full_3 = paddle._C_ops.full( + [1], float("49"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.scale: (2x1xi32) <- (2x1xi32, 1xf32) + scale_0 = paddle._C_ops.scale(data_3, full_3, float("0"), True) + del data_3, full_3 + + # pd_op.cast: (2x1xi64) <- (2x1xi32) + cast_0 = paddle._C_ops.cast(scale_0, paddle.int64) + del scale_0 + + # pd_op.add: (2x3549xi64) <- (2x3549xi64, 2x1xi64) + add_0 = paddle._C_ops.add(argmax_1, cast_0) + del argmax_1, cast_0 + + # pd_op.flatten: (98xi32) <- (2x49x1xi32) + flatten_0 = paddle._C_ops.flatten(data_4, 0, 2) + del data_4 + + # pd_op.flatten: (7098xi64) <- (2x3549xi64) + flatten_1 = paddle._C_ops.flatten(add_0, 0, 1) + del add_0 + + # pd_op.full: (1xi32) <- () + full_4 = paddle._C_ops.full( + [1], float("0"), paddle.int32, paddle.core.CPUPlace() + ) + + # pd_op.gather: (7098xi32) <- (98xi32, 7098xi64, 1xi32) + gather_0 = paddle._C_ops.gather(flatten_0, flatten_1, full_4) + del flatten_0 + + # pd_op.full_int_array: (2xi64) <- () + full_int_array_3 = [2, 3549] + + # pd_op.reshape: (2x3549xi32) <- (7098xi32, 2xi64) + reshape_1 = paddle._C_ops.reshape(gather_0, full_int_array_3) + del full_int_array_3, gather_0 + + # pd_op.full: (xf32) <- () + full_5 = paddle._C_ops.full( + [], float("0"), paddle.float32, paddle.framework._current_expected_place() + ) + + # pd_op.greater_than: (2x3549xb) <- (2x3549xf32, xf32) + greater_than_1 = paddle._C_ops.greater_than(sum_0, full_5) + del full_5, sum_0 + + # pd_op.full: (1xf32) <- () + full_6 = paddle._C_ops.full( + [1], float("10"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.full_like: (2x3549xi32) <- (2x3549xi32, 1xf32) + full_like_0 = paddle._C_ops.full_like( + reshape_1, full_6, paddle.int32, paddle.framework._current_expected_place() + ) + del full_6 + + # pd_op.where: (2x3549xi32) <- (2x3549xb, 2x3549xi32, 2x3549xi32) + where_1 = paddle._C_ops.where(greater_than_1, reshape_1, full_like_0) + del full_like_0, greater_than_1, reshape_1 + + # pd_op.full_int_array: (2xi64) <- () + full_int_array_4 = [-1, 4] + + # pd_op.reshape: (98x4xf32) <- (2x49x4xf32, 2xi64) + reshape_2 = paddle._C_ops.reshape(data_5, full_int_array_4) + del data_5, full_int_array_4 + + # pd_op.gather: (7098x4xf32) <- (98x4xf32, 7098xi64, 1xi32) + gather_1 = paddle._C_ops.gather(reshape_2, flatten_1, full_4) + del flatten_1, full_4, reshape_2 + + # pd_op.full_int_array: (3xi64) <- () + full_int_array_5 = [2, 3549, 4] + + # pd_op.reshape: (2x3549x4xf32) <- (7098x4xf32, 3xi64) + reshape_0 = paddle._C_ops.reshape(gather_1, full_int_array_5) + del full_int_array_5, gather_1 + + # pd_op.full: (1xi32) <- () + full_7 = paddle._C_ops.full( + [1], float("11"), paddle.int32, paddle.core.CPUPlace() + ) + + # pd_op.one_hot: (2x3549x11xf32) <- (2x3549xi32, 1xi32) + one_hot_1 = paddle._C_ops.one_hot( + where_1 % paddle.cast(full_7, where_1.dtype), full_7 + ) + del full_7 + + # pd_op.full: (10xi64) <- () + full_8 = paddle._C_ops.full( + [10], float("0"), paddle.int64, paddle.framework._current_expected_place() + ) + + # pd_op.assign_value_: (10xi64) <- (10xi64) + assign_value__0 = paddle._C_ops.assign_value_( + full_8, + [10], + paddle.int64, + [ + float("0"), + float("1"), + float("2"), + float("3"), + float("4"), + float("5"), + float("6"), + float("7"), + float("8"), + float("9"), + ], + paddle.framework._current_expected_place(), + ) + del full_8 + + # pd_op.index_select: (2x3549x10xf32) <- (2x3549x11xf32, 10xi64) + index_select_0 = paddle._C_ops.index_select(one_hot_1, assign_value__0, -1) + del assign_value__0, one_hot_1 + + # pd_op.multiply: (2x49x3549xf32) <- (2x49x3549xf32, 2x49x3549xf32) + multiply_2 = paddle._C_ops.multiply(data_6, where_0) + del data_6 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_6 = [-1] + + # pd_op.max: (2x49x1xf32) <- (2x49x3549xf32, 1xi64) + max_0 = paddle._C_ops.max(multiply_2, full_int_array_6, True) + + # pd_op.multiply: (2x49x3549xf32) <- (2x49x3549xf32, 2x49x3549xf32) + multiply_3 = paddle._C_ops.multiply(data_1, where_0) + del data_1, where_0 + + # pd_op.max: (2x49x1xf32) <- (2x49x3549xf32, 1xi64) + max_1 = paddle._C_ops.max(multiply_3, full_int_array_6, True) + del multiply_3 + + # pd_op.full: (1xf32) <- () + full_9 = paddle._C_ops.full( + [1], float("1"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.scale: (2x49x1xf32) <- (2x49x1xf32, 1xf32) + scale_1 = paddle._C_ops.scale(max_0, full_9, float("1e-09"), True) + del full_9, max_0 + + # pd_op.divide: (2x49x3549xf32) <- (2x49x3549xf32, 2x49x1xf32) + divide_0 = paddle._C_ops.divide(multiply_2, scale_1) + del multiply_2, scale_1 + + # pd_op.multiply: (2x49x3549xf32) <- (2x49x3549xf32, 2x49x1xf32) + multiply_4 = paddle._C_ops.multiply(divide_0, max_1) + del divide_0, max_1 + + # pd_op.max: (2x3549xf32) <- (2x49x3549xf32, 1xi64) + max_2 = paddle._C_ops.max(multiply_4, full_int_array_2, False) + del full_int_array_2, multiply_4 + + # pd_op.unsqueeze: (2x3549x1xf32) <- (2x3549xf32, 1xi64) + unsqueeze_1 = paddle._C_ops.unsqueeze(max_2, full_int_array_6) + del full_int_array_6, max_2 + + # pd_op.multiply: (2x3549x10xf32) <- (2x3549x10xf32, 2x3549x1xf32) + multiply_0 = paddle._C_ops.multiply(index_select_0, unsqueeze_1) + del index_select_0, unsqueeze_1, where_1 + + return reshape_0, multiply_0 diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/subgraph_11/weight_meta.py b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/subgraph_11/weight_meta.py new file mode 100644 index 000000000..8b1378917 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/subgraph_11/weight_meta.py @@ -0,0 +1 @@ + diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/subgraph_12/graph_hash.txt b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/subgraph_12/graph_hash.txt new file mode 100644 index 000000000..a28abb868 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/subgraph_12/graph_hash.txt @@ -0,0 +1 @@ +f60d5bbaab8b228442ebb3c058e4d6372677794bde1ea09b0e24c99fbe4ce211 \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/subgraph_12/graph_net.json b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/subgraph_12/graph_net.json new file mode 100644 index 000000000..8b4fccfd1 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/subgraph_12/graph_net.json @@ -0,0 +1,6 @@ +{ + "framework": "paddle", + "model_name": "PP-YOLOE_plus_SOD-L", + "num_devices_required": 1, + "num_nodes_required": 1 +} \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/subgraph_12/input_meta.py b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/subgraph_12/input_meta.py new file mode 100644 index 000000000..38a68a034 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/subgraph_12/input_meta.py @@ -0,0 +1,222 @@ +class Program_weight_tensor_data_0: + name = "data_0" + shape = [1] + dtype = "float32" + data = [0.699884] + + +class Program_weight_tensor_data_1: + name = "data_1" + shape = [1] + dtype = "float32" + data = [0.667963] + + +class Program_weight_tensor_data_2: + name = "data_2" + shape = [1] + dtype = "float32" + data = [0.675792] + + +class Program_weight_tensor_data_3: + name = "data_3" + shape = [1] + dtype = "float32" + data = [0.676071] + + +class Program_weight_tensor_data_4: + name = "data_4" + shape = [1] + dtype = "float32" + data = [0.658719] + + +class Program_weight_tensor_data_5: + name = "data_5" + shape = [1] + dtype = "float32" + data = [0.620637] + + +class Program_weight_tensor_data_6: + name = "data_6" + shape = [1] + dtype = "float32" + data = [0.637685] + + +class Program_weight_tensor_data_7: + name = "data_7" + shape = [1] + dtype = "float32" + data = [0.619238] + + +class Program_weight_tensor_data_8: + name = "data_8" + shape = [1] + dtype = "float32" + data = [0.773168] + + +class Program_weight_tensor_data_9: + name = "data_9" + shape = [1] + dtype = "float32" + data = [0.635316] + + +class Program_weight_tensor_data_10: + name = "data_10" + shape = [1] + dtype = "float32" + data = [0.623672] + + +class Program_weight_tensor_data_11: + name = "data_11" + shape = [1] + dtype = "float32" + data = [0.620323] + + +class Program_weight_tensor_data_12: + name = "data_12" + shape = [1] + dtype = "float32" + data = [0.621219] + + +class Program_weight_tensor_data_13: + name = "data_13" + shape = [1] + dtype = "float32" + data = [0.624329] + + +class Program_weight_tensor_data_14: + name = "data_14" + shape = [1] + dtype = "float32" + data = [0.733117] + + +class Program_weight_tensor_data_15: + name = "data_15" + shape = [1] + dtype = "float32" + data = [0.557224] + + +class Program_weight_tensor_data_16: + name = "data_16" + shape = [1] + dtype = "float32" + data = [0.579909] + + +class Program_weight_tensor_data_17: + name = "data_17" + shape = [1] + dtype = "float32" + data = [0.70327] + + +class Program_weight_tensor_data_18: + name = "data_18" + shape = [1024, 3072] + dtype = "float32" + min_val = float("-0.0319247") + max_val = float("0.0317725") + mean = float("-5.10487e-06") + std = float("0.0176379") + data = None + + +class Program_weight_tensor_data_19: + name = "data_19" + shape = [3072] + dtype = "float32" + min_val = float("-0.000610453") + max_val = float("0.000772214") + mean = float("-4.76504e-06") + std = float("0.000159152") + data = None + + +class Program_weight_tensor_data_20: + name = "data_20" + shape = [1024, 3072] + dtype = "float32" + min_val = float("-0.0310115") + max_val = float("0.0310403") + mean = float("-4.13224e-06") + std = float("0.0176369") + data = None + + +class Program_weight_tensor_data_21: + name = "data_21" + shape = [3072] + dtype = "float32" + min_val = float("-0.000442001") + max_val = float("0.0003857") + mean = float("-8.27006e-07") + std = float("0.000101626") + data = None + + +class Program_weight_tensor_data_22: + name = "data_22" + shape = [1024, 3072] + dtype = "float32" + min_val = float("-0.0310211") + max_val = float("0.0309302") + mean = float("-4.07149e-06") + std = float("0.0176364") + data = None + + +class Program_weight_tensor_data_23: + name = "data_23" + shape = [3072] + dtype = "float32" + min_val = float("-0.00027446") + max_val = float("0.000292125") + mean = float("-1.05896e-07") + std = float("6.90506e-05") + data = None + + +class Program_weight_tensor_data_24: + name = "data_24" + shape = [1024, 3072] + dtype = "float32" + min_val = float("-0.030934") + max_val = float("0.0309738") + mean = float("-4.04736e-06") + std = float("0.017636") + data = None + + +class Program_weight_tensor_data_25: + name = "data_25" + shape = [3072] + dtype = "float32" + min_val = float("-0.000277695") + max_val = float("0.000255924") + mean = float("3.36681e-07") + std = float("5.71105e-05") + data = None + + +class Program_weight_tensor_data_26: + name = "data_26" + shape = [2, 3, 416, 416] + dtype = "float32" + max_val = float("1.0") + mean = float("0.333385") + std = float("0.180644") + data = None diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/subgraph_12/model.py b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/subgraph_12/model.py new file mode 100644 index 000000000..0e4271fc6 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/subgraph_12/model.py @@ -0,0 +1,8874 @@ +import paddle + + +class GraphModule(paddle.nn.Layer): + def __init__(self): + super().__init__() + + def forward( + self, + parameter_0, + parameter_1, + parameter_2, + parameter_3, + parameter_4, + parameter_5, + parameter_6, + parameter_7, + parameter_8, + parameter_9, + parameter_10, + parameter_11, + parameter_12, + parameter_13, + parameter_14, + parameter_15, + parameter_16, + parameter_17, + parameter_18, + parameter_19, + parameter_20, + parameter_21, + parameter_22, + parameter_23, + parameter_24, + parameter_25, + parameter_26, + parameter_27, + parameter_28, + parameter_29, + parameter_30, + parameter_31, + parameter_32, + parameter_33, + parameter_34, + parameter_35, + parameter_36, + parameter_37, + parameter_38, + parameter_39, + parameter_40, + parameter_41, + parameter_42, + parameter_43, + parameter_44, + parameter_45, + parameter_46, + parameter_47, + parameter_48, + parameter_49, + parameter_50, + parameter_51, + parameter_52, + parameter_53, + parameter_54, + parameter_55, + parameter_56, + parameter_57, + parameter_58, + parameter_59, + parameter_60, + parameter_61, + parameter_62, + parameter_63, + parameter_64, + parameter_65, + parameter_66, + parameter_67, + parameter_68, + parameter_69, + parameter_70, + parameter_71, + parameter_72, + parameter_73, + parameter_74, + parameter_75, + parameter_76, + parameter_77, + parameter_78, + parameter_79, + parameter_80, + parameter_81, + parameter_82, + parameter_83, + parameter_84, + parameter_85, + parameter_86, + parameter_87, + parameter_88, + parameter_89, + parameter_90, + parameter_91, + parameter_92, + parameter_93, + parameter_94, + parameter_95, + parameter_96, + parameter_97, + parameter_98, + parameter_99, + parameter_100, + parameter_101, + parameter_102, + parameter_103, + parameter_104, + parameter_105, + parameter_106, + parameter_107, + parameter_108, + parameter_109, + parameter_110, + parameter_111, + parameter_112, + parameter_113, + parameter_114, + parameter_115, + parameter_116, + parameter_117, + parameter_118, + parameter_119, + parameter_120, + parameter_121, + parameter_122, + parameter_123, + parameter_124, + parameter_125, + parameter_126, + parameter_127, + parameter_128, + parameter_129, + parameter_130, + parameter_131, + parameter_132, + parameter_133, + parameter_134, + parameter_135, + parameter_136, + parameter_137, + parameter_138, + parameter_139, + parameter_140, + parameter_141, + parameter_142, + parameter_143, + parameter_144, + parameter_145, + parameter_146, + parameter_147, + parameter_148, + parameter_149, + parameter_150, + parameter_151, + parameter_152, + parameter_153, + parameter_154, + parameter_155, + parameter_156, + parameter_157, + parameter_158, + parameter_159, + parameter_160, + parameter_161, + parameter_162, + parameter_163, + parameter_164, + parameter_165, + parameter_166, + parameter_167, + parameter_168, + parameter_169, + parameter_170, + parameter_171, + parameter_172, + parameter_173, + parameter_174, + parameter_175, + parameter_176, + parameter_177, + parameter_178, + parameter_179, + parameter_180, + parameter_181, + parameter_182, + parameter_183, + parameter_184, + parameter_185, + parameter_186, + parameter_187, + parameter_188, + parameter_189, + parameter_190, + parameter_191, + parameter_192, + parameter_193, + parameter_194, + parameter_195, + parameter_196, + parameter_197, + parameter_198, + parameter_199, + parameter_200, + parameter_201, + parameter_202, + parameter_203, + parameter_204, + parameter_205, + parameter_206, + parameter_207, + parameter_208, + parameter_209, + parameter_210, + parameter_211, + parameter_212, + parameter_213, + parameter_214, + parameter_215, + parameter_216, + parameter_217, + parameter_218, + parameter_219, + parameter_220, + parameter_221, + parameter_222, + parameter_223, + parameter_224, + parameter_225, + parameter_226, + parameter_227, + parameter_228, + parameter_229, + parameter_230, + parameter_231, + parameter_232, + parameter_233, + parameter_234, + parameter_235, + parameter_236, + parameter_237, + parameter_238, + parameter_239, + parameter_240, + parameter_241, + parameter_242, + parameter_243, + parameter_244, + parameter_245, + parameter_246, + parameter_247, + parameter_248, + parameter_249, + parameter_250, + parameter_251, + parameter_252, + parameter_253, + parameter_254, + parameter_255, + parameter_256, + parameter_257, + parameter_258, + parameter_259, + parameter_260, + parameter_261, + parameter_262, + parameter_263, + parameter_264, + parameter_265, + parameter_266, + parameter_267, + parameter_268, + parameter_269, + parameter_270, + parameter_271, + parameter_272, + parameter_273, + parameter_274, + parameter_275, + parameter_276, + parameter_277, + parameter_278, + parameter_279, + parameter_280, + parameter_281, + parameter_282, + parameter_283, + parameter_284, + parameter_285, + parameter_286, + parameter_287, + parameter_288, + parameter_289, + parameter_290, + parameter_291, + parameter_292, + parameter_293, + parameter_294, + parameter_295, + parameter_296, + parameter_297, + parameter_298, + parameter_299, + parameter_300, + parameter_301, + parameter_302, + parameter_303, + parameter_304, + parameter_305, + parameter_306, + parameter_307, + parameter_308, + parameter_309, + parameter_310, + parameter_311, + parameter_312, + parameter_313, + parameter_314, + parameter_315, + parameter_316, + parameter_317, + parameter_318, + parameter_319, + parameter_320, + parameter_321, + parameter_322, + parameter_323, + parameter_324, + parameter_325, + parameter_326, + parameter_327, + parameter_328, + parameter_329, + parameter_330, + parameter_331, + parameter_332, + parameter_333, + parameter_334, + parameter_335, + parameter_336, + parameter_337, + parameter_338, + parameter_339, + parameter_340, + parameter_341, + parameter_342, + parameter_343, + parameter_344, + parameter_345, + parameter_346, + parameter_347, + parameter_348, + parameter_349, + parameter_350, + parameter_351, + parameter_352, + parameter_353, + parameter_354, + parameter_355, + parameter_356, + parameter_357, + parameter_358, + parameter_359, + parameter_360, + parameter_361, + parameter_362, + parameter_363, + parameter_364, + parameter_365, + parameter_366, + parameter_367, + parameter_368, + parameter_369, + parameter_370, + parameter_371, + parameter_372, + parameter_373, + parameter_374, + parameter_375, + parameter_376, + parameter_377, + parameter_378, + parameter_379, + parameter_380, + parameter_381, + parameter_382, + parameter_383, + parameter_384, + parameter_385, + parameter_386, + parameter_387, + parameter_388, + parameter_389, + parameter_390, + parameter_391, + parameter_392, + parameter_393, + parameter_394, + parameter_395, + parameter_396, + parameter_397, + parameter_398, + parameter_399, + parameter_400, + parameter_401, + parameter_402, + parameter_403, + parameter_404, + parameter_405, + parameter_406, + parameter_407, + parameter_408, + parameter_409, + parameter_410, + parameter_411, + parameter_412, + parameter_413, + parameter_414, + parameter_415, + parameter_416, + parameter_417, + parameter_418, + parameter_419, + parameter_420, + parameter_421, + parameter_422, + parameter_423, + parameter_424, + parameter_425, + parameter_426, + parameter_427, + parameter_428, + parameter_429, + parameter_430, + parameter_431, + parameter_432, + parameter_433, + parameter_434, + parameter_435, + parameter_436, + parameter_437, + parameter_438, + parameter_439, + parameter_440, + parameter_441, + parameter_442, + parameter_443, + parameter_444, + parameter_445, + parameter_446, + parameter_447, + parameter_448, + parameter_449, + parameter_450, + parameter_451, + parameter_452, + parameter_453, + parameter_454, + parameter_455, + parameter_456, + parameter_457, + parameter_458, + parameter_459, + parameter_460, + parameter_461, + parameter_462, + parameter_463, + parameter_464, + parameter_465, + parameter_466, + parameter_467, + parameter_468, + parameter_469, + parameter_470, + parameter_471, + parameter_472, + parameter_473, + parameter_474, + parameter_475, + parameter_476, + parameter_477, + parameter_478, + parameter_479, + parameter_480, + parameter_481, + parameter_482, + parameter_483, + parameter_484, + parameter_485, + parameter_486, + parameter_487, + parameter_488, + parameter_489, + parameter_490, + parameter_491, + parameter_492, + parameter_493, + parameter_494, + parameter_495, + parameter_496, + parameter_497, + parameter_498, + parameter_499, + parameter_500, + parameter_501, + parameter_502, + parameter_503, + parameter_504, + parameter_505, + parameter_506, + parameter_507, + parameter_508, + parameter_509, + parameter_510, + parameter_511, + parameter_512, + parameter_513, + parameter_514, + parameter_515, + parameter_516, + parameter_517, + parameter_518, + parameter_519, + parameter_520, + parameter_521, + parameter_522, + parameter_523, + parameter_524, + parameter_525, + parameter_526, + parameter_527, + parameter_528, + parameter_529, + parameter_530, + parameter_531, + parameter_532, + parameter_533, + parameter_534, + parameter_535, + parameter_536, + parameter_537, + parameter_538, + parameter_539, + parameter_540, + parameter_541, + parameter_542, + parameter_543, + parameter_544, + parameter_545, + parameter_546, + parameter_547, + parameter_548, + parameter_549, + parameter_550, + parameter_551, + parameter_552, + parameter_553, + parameter_554, + parameter_555, + parameter_556, + parameter_557, + parameter_558, + parameter_559, + parameter_560, + parameter_561, + parameter_562, + parameter_563, + parameter_564, + parameter_565, + parameter_566, + parameter_567, + parameter_568, + parameter_569, + parameter_570, + parameter_571, + parameter_572, + parameter_573, + parameter_574, + parameter_575, + parameter_576, + parameter_577, + parameter_578, + parameter_579, + parameter_580, + parameter_581, + parameter_582, + parameter_583, + parameter_584, + parameter_585, + parameter_586, + parameter_587, + parameter_588, + parameter_589, + parameter_590, + parameter_591, + parameter_592, + parameter_593, + parameter_594, + parameter_595, + parameter_596, + parameter_597, + parameter_598, + parameter_599, + parameter_600, + parameter_601, + parameter_602, + parameter_603, + parameter_604, + parameter_605, + parameter_606, + parameter_607, + parameter_608, + parameter_609, + parameter_610, + parameter_611, + parameter_612, + parameter_613, + parameter_614, + parameter_615, + parameter_616, + parameter_617, + parameter_618, + parameter_619, + parameter_620, + parameter_621, + parameter_622, + parameter_623, + parameter_624, + parameter_625, + parameter_626, + parameter_627, + parameter_628, + parameter_629, + parameter_630, + parameter_631, + parameter_632, + parameter_633, + parameter_634, + parameter_635, + parameter_636, + parameter_637, + parameter_638, + parameter_639, + parameter_640, + parameter_641, + parameter_642, + parameter_643, + parameter_644, + parameter_645, + parameter_646, + parameter_647, + parameter_648, + parameter_649, + parameter_650, + parameter_651, + parameter_652, + parameter_653, + parameter_654, + parameter_655, + parameter_656, + parameter_657, + parameter_658, + parameter_659, + parameter_660, + parameter_661, + parameter_662, + parameter_663, + parameter_664, + parameter_665, + parameter_666, + parameter_667, + parameter_668, + parameter_669, + parameter_670, + parameter_671, + parameter_672, + parameter_673, + parameter_674, + parameter_675, + parameter_676, + parameter_677, + parameter_678, + parameter_679, + parameter_680, + parameter_681, + parameter_682, + parameter_683, + parameter_684, + parameter_685, + parameter_686, + parameter_687, + parameter_688, + parameter_689, + parameter_690, + parameter_691, + parameter_692, + parameter_693, + parameter_694, + parameter_695, + parameter_696, + parameter_697, + parameter_698, + parameter_699, + parameter_700, + parameter_701, + parameter_702, + parameter_703, + parameter_704, + parameter_705, + parameter_706, + parameter_707, + parameter_708, + parameter_709, + parameter_710, + parameter_711, + parameter_712, + parameter_713, + parameter_714, + parameter_715, + parameter_716, + parameter_717, + parameter_718, + parameter_719, + parameter_720, + parameter_721, + parameter_722, + parameter_723, + parameter_724, + parameter_725, + parameter_726, + parameter_727, + parameter_728, + parameter_729, + parameter_730, + parameter_731, + parameter_732, + parameter_733, + parameter_734, + parameter_735, + parameter_736, + parameter_737, + data_0, + data_1, + data_2, + data_3, + data_4, + data_5, + data_6, + data_7, + data_8, + data_9, + data_10, + data_11, + data_12, + data_13, + data_14, + data_15, + data_16, + data_17, + data_18, + data_19, + data_20, + data_21, + data_22, + data_23, + data_24, + data_25, + data_26, + ): + # pd_op.conv2d: (2x32x208x208xf32) <- (2x3x416x416xf32, 32x3x3x3xf32) + conv2d_0 = paddle._C_ops.conv2d( + data_26, parameter_737, [2, 2], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del data_26, parameter_737 + + # pd_op.batch_norm_: (2x32x208x208xf32, 32xf32, 32xf32, 32xf32, 32xf32, -1xui8) <- (2x32x208x208xf32, 32xf32, 32xf32, 32xf32, 32xf32) + ( + batch_norm__0, + batch_norm__1, + batch_norm__2, + batch_norm__3, + batch_norm__4, + batch_norm__5, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_0, + parameter_736, + parameter_735, + parameter_734, + parameter_733, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_733, parameter_734, parameter_735, parameter_736 + + # pd_op.swish: (2x32x208x208xf32) <- (2x32x208x208xf32) + swish_1 = paddle._C_ops.swish(batch_norm__0) + + # pd_op.conv2d: (2x32x208x208xf32) <- (2x32x208x208xf32, 32x32x3x3xf32) + conv2d_1 = paddle._C_ops.conv2d( + swish_1, parameter_732, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_732 + + # pd_op.batch_norm_: (2x32x208x208xf32, 32xf32, 32xf32, 32xf32, 32xf32, -1xui8) <- (2x32x208x208xf32, 32xf32, 32xf32, 32xf32, 32xf32) + ( + batch_norm__6, + batch_norm__7, + batch_norm__8, + batch_norm__9, + batch_norm__10, + batch_norm__11, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_1, + parameter_731, + parameter_730, + parameter_729, + parameter_728, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_728, parameter_729, parameter_730, parameter_731 + + # pd_op.swish: (2x32x208x208xf32) <- (2x32x208x208xf32) + swish_2 = paddle._C_ops.swish(batch_norm__6) + + # pd_op.conv2d: (2x64x208x208xf32) <- (2x32x208x208xf32, 64x32x3x3xf32) + conv2d_2 = paddle._C_ops.conv2d( + swish_2, parameter_727, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_727 + + # pd_op.batch_norm_: (2x64x208x208xf32, 64xf32, 64xf32, 64xf32, 64xf32, -1xui8) <- (2x64x208x208xf32, 64xf32, 64xf32, 64xf32, 64xf32) + ( + batch_norm__12, + batch_norm__13, + batch_norm__14, + batch_norm__15, + batch_norm__16, + batch_norm__17, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_2, + parameter_726, + parameter_725, + parameter_724, + parameter_723, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_723, parameter_724, parameter_725, parameter_726 + + # pd_op.swish: (2x64x208x208xf32) <- (2x64x208x208xf32) + swish_3 = paddle._C_ops.swish(batch_norm__12) + + # pd_op.conv2d: (2x96x104x104xf32) <- (2x64x208x208xf32, 96x64x3x3xf32) + conv2d_3 = paddle._C_ops.conv2d( + swish_3, parameter_722, [2, 2], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_722 + + # pd_op.batch_norm_: (2x96x104x104xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x104x104xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__18, + batch_norm__19, + batch_norm__20, + batch_norm__21, + batch_norm__22, + batch_norm__23, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_3, + parameter_721, + parameter_720, + parameter_719, + parameter_718, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_718, parameter_719, parameter_720, parameter_721 + + # pd_op.swish: (2x96x104x104xf32) <- (2x96x104x104xf32) + swish_4 = paddle._C_ops.swish(batch_norm__18) + + # pd_op.conv2d: (2x48x104x104xf32) <- (2x96x104x104xf32, 48x96x1x1xf32) + conv2d_4 = paddle._C_ops.conv2d( + swish_4, parameter_717, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_717 + + # pd_op.batch_norm_: (2x48x104x104xf32, 48xf32, 48xf32, 48xf32, 48xf32, -1xui8) <- (2x48x104x104xf32, 48xf32, 48xf32, 48xf32, 48xf32) + ( + batch_norm__24, + batch_norm__25, + batch_norm__26, + batch_norm__27, + batch_norm__28, + batch_norm__29, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_4, + parameter_716, + parameter_715, + parameter_714, + parameter_713, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_713, parameter_714, parameter_715, parameter_716 + + # pd_op.swish: (2x48x104x104xf32) <- (2x48x104x104xf32) + swish_5 = paddle._C_ops.swish(batch_norm__24) + + # pd_op.conv2d: (2x48x104x104xf32) <- (2x96x104x104xf32, 48x96x1x1xf32) + conv2d_5 = paddle._C_ops.conv2d( + swish_4, parameter_712, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_712 + + # pd_op.batch_norm_: (2x48x104x104xf32, 48xf32, 48xf32, 48xf32, 48xf32, -1xui8) <- (2x48x104x104xf32, 48xf32, 48xf32, 48xf32, 48xf32) + ( + batch_norm__30, + batch_norm__31, + batch_norm__32, + batch_norm__33, + batch_norm__34, + batch_norm__35, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_5, + parameter_711, + parameter_710, + parameter_709, + parameter_708, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_708, parameter_709, parameter_710, parameter_711 + + # pd_op.swish: (2x48x104x104xf32) <- (2x48x104x104xf32) + swish_6 = paddle._C_ops.swish(batch_norm__30) + + # pd_op.conv2d: (2x48x104x104xf32) <- (2x48x104x104xf32, 48x48x3x3xf32) + conv2d_6 = paddle._C_ops.conv2d( + swish_6, parameter_707, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_707 + + # pd_op.batch_norm_: (2x48x104x104xf32, 48xf32, 48xf32, 48xf32, 48xf32, -1xui8) <- (2x48x104x104xf32, 48xf32, 48xf32, 48xf32, 48xf32) + ( + batch_norm__36, + batch_norm__37, + batch_norm__38, + batch_norm__39, + batch_norm__40, + batch_norm__41, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_6, + parameter_706, + parameter_705, + parameter_704, + parameter_703, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_703, parameter_704, parameter_705, parameter_706 + + # pd_op.swish: (2x48x104x104xf32) <- (2x48x104x104xf32) + swish_7 = paddle._C_ops.swish(batch_norm__36) + + # pd_op.conv2d: (2x48x104x104xf32) <- (2x48x104x104xf32, 48x48x3x3xf32) + conv2d_7 = paddle._C_ops.conv2d( + swish_7, parameter_702, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_702 + + # pd_op.batch_norm_: (2x48x104x104xf32, 48xf32, 48xf32, 48xf32, 48xf32, -1xui8) <- (2x48x104x104xf32, 48xf32, 48xf32, 48xf32, 48xf32) + ( + batch_norm__42, + batch_norm__43, + batch_norm__44, + batch_norm__45, + batch_norm__46, + batch_norm__47, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_7, + parameter_701, + parameter_700, + parameter_699, + parameter_698, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_698, parameter_699, parameter_700, parameter_701 + + # pd_op.conv2d: (2x48x104x104xf32) <- (2x48x104x104xf32, 48x48x1x1xf32) + conv2d_8 = paddle._C_ops.conv2d( + swish_7, parameter_697, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_697 + + # pd_op.batch_norm_: (2x48x104x104xf32, 48xf32, 48xf32, 48xf32, 48xf32, -1xui8) <- (2x48x104x104xf32, 48xf32, 48xf32, 48xf32, 48xf32) + ( + batch_norm__48, + batch_norm__49, + batch_norm__50, + batch_norm__51, + batch_norm__52, + batch_norm__53, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_8, + parameter_696, + parameter_695, + parameter_694, + parameter_693, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_693, parameter_694, parameter_695, parameter_696 + + # pd_op.multiply: (2x48x104x104xf32) <- (1xf32, 2x48x104x104xf32) + multiply_0 = paddle._C_ops.multiply(data_0, batch_norm__48) + del data_0 + + # pd_op.add: (2x48x104x104xf32) <- (2x48x104x104xf32, 2x48x104x104xf32) + add_0 = paddle._C_ops.add(batch_norm__42, multiply_0) + + # pd_op.swish: (2x48x104x104xf32) <- (2x48x104x104xf32) + swish_8 = paddle._C_ops.swish(add_0) + + # pd_op.add: (2x48x104x104xf32) <- (2x48x104x104xf32, 2x48x104x104xf32) + add_1 = paddle._C_ops.add(swish_6, swish_8) + + # pd_op.conv2d: (2x48x104x104xf32) <- (2x48x104x104xf32, 48x48x3x3xf32) + conv2d_9 = paddle._C_ops.conv2d( + add_1, parameter_692, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_692 + + # pd_op.batch_norm_: (2x48x104x104xf32, 48xf32, 48xf32, 48xf32, 48xf32, -1xui8) <- (2x48x104x104xf32, 48xf32, 48xf32, 48xf32, 48xf32) + ( + batch_norm__54, + batch_norm__55, + batch_norm__56, + batch_norm__57, + batch_norm__58, + batch_norm__59, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_9, + parameter_691, + parameter_690, + parameter_689, + parameter_688, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_688, parameter_689, parameter_690, parameter_691 + + # pd_op.swish: (2x48x104x104xf32) <- (2x48x104x104xf32) + swish_9 = paddle._C_ops.swish(batch_norm__54) + + # pd_op.conv2d: (2x48x104x104xf32) <- (2x48x104x104xf32, 48x48x3x3xf32) + conv2d_10 = paddle._C_ops.conv2d( + swish_9, parameter_687, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_687 + + # pd_op.batch_norm_: (2x48x104x104xf32, 48xf32, 48xf32, 48xf32, 48xf32, -1xui8) <- (2x48x104x104xf32, 48xf32, 48xf32, 48xf32, 48xf32) + ( + batch_norm__60, + batch_norm__61, + batch_norm__62, + batch_norm__63, + batch_norm__64, + batch_norm__65, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_10, + parameter_686, + parameter_685, + parameter_684, + parameter_683, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_683, parameter_684, parameter_685, parameter_686 + + # pd_op.conv2d: (2x48x104x104xf32) <- (2x48x104x104xf32, 48x48x1x1xf32) + conv2d_11 = paddle._C_ops.conv2d( + swish_9, parameter_682, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_682 + + # pd_op.batch_norm_: (2x48x104x104xf32, 48xf32, 48xf32, 48xf32, 48xf32, -1xui8) <- (2x48x104x104xf32, 48xf32, 48xf32, 48xf32, 48xf32) + ( + batch_norm__66, + batch_norm__67, + batch_norm__68, + batch_norm__69, + batch_norm__70, + batch_norm__71, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_11, + parameter_681, + parameter_680, + parameter_679, + parameter_678, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_678, parameter_679, parameter_680, parameter_681 + + # pd_op.multiply: (2x48x104x104xf32) <- (1xf32, 2x48x104x104xf32) + multiply_1 = paddle._C_ops.multiply(data_1, batch_norm__66) + del data_1 + + # pd_op.add: (2x48x104x104xf32) <- (2x48x104x104xf32, 2x48x104x104xf32) + add_2 = paddle._C_ops.add(batch_norm__60, multiply_1) + + # pd_op.swish: (2x48x104x104xf32) <- (2x48x104x104xf32) + swish_10 = paddle._C_ops.swish(add_2) + + # pd_op.add: (2x48x104x104xf32) <- (2x48x104x104xf32, 2x48x104x104xf32) + add_3 = paddle._C_ops.add(add_1, swish_10) + + # pd_op.conv2d: (2x48x104x104xf32) <- (2x48x104x104xf32, 48x48x3x3xf32) + conv2d_12 = paddle._C_ops.conv2d( + add_3, parameter_677, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_677 + + # pd_op.batch_norm_: (2x48x104x104xf32, 48xf32, 48xf32, 48xf32, 48xf32, -1xui8) <- (2x48x104x104xf32, 48xf32, 48xf32, 48xf32, 48xf32) + ( + batch_norm__72, + batch_norm__73, + batch_norm__74, + batch_norm__75, + batch_norm__76, + batch_norm__77, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_12, + parameter_676, + parameter_675, + parameter_674, + parameter_673, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_673, parameter_674, parameter_675, parameter_676 + + # pd_op.swish: (2x48x104x104xf32) <- (2x48x104x104xf32) + swish_11 = paddle._C_ops.swish(batch_norm__72) + + # pd_op.conv2d: (2x48x104x104xf32) <- (2x48x104x104xf32, 48x48x3x3xf32) + conv2d_13 = paddle._C_ops.conv2d( + swish_11, parameter_672, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_672 + + # pd_op.batch_norm_: (2x48x104x104xf32, 48xf32, 48xf32, 48xf32, 48xf32, -1xui8) <- (2x48x104x104xf32, 48xf32, 48xf32, 48xf32, 48xf32) + ( + batch_norm__78, + batch_norm__79, + batch_norm__80, + batch_norm__81, + batch_norm__82, + batch_norm__83, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_13, + parameter_671, + parameter_670, + parameter_669, + parameter_668, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_668, parameter_669, parameter_670, parameter_671 + + # pd_op.conv2d: (2x48x104x104xf32) <- (2x48x104x104xf32, 48x48x1x1xf32) + conv2d_14 = paddle._C_ops.conv2d( + swish_11, parameter_667, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_667 + + # pd_op.batch_norm_: (2x48x104x104xf32, 48xf32, 48xf32, 48xf32, 48xf32, -1xui8) <- (2x48x104x104xf32, 48xf32, 48xf32, 48xf32, 48xf32) + ( + batch_norm__84, + batch_norm__85, + batch_norm__86, + batch_norm__87, + batch_norm__88, + batch_norm__89, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_14, + parameter_666, + parameter_665, + parameter_664, + parameter_663, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_663, parameter_664, parameter_665, parameter_666 + + # pd_op.multiply: (2x48x104x104xf32) <- (1xf32, 2x48x104x104xf32) + multiply_2 = paddle._C_ops.multiply(data_2, batch_norm__84) + del data_2 + + # pd_op.add: (2x48x104x104xf32) <- (2x48x104x104xf32, 2x48x104x104xf32) + add_4 = paddle._C_ops.add(batch_norm__78, multiply_2) + + # pd_op.swish: (2x48x104x104xf32) <- (2x48x104x104xf32) + swish_12 = paddle._C_ops.swish(add_4) + + # pd_op.add: (2x48x104x104xf32) <- (2x48x104x104xf32, 2x48x104x104xf32) + add_5 = paddle._C_ops.add(add_3, swish_12) + + # pd_op.full: (1xi32) <- () + full_0 = paddle._C_ops.full( + [1], float("1"), paddle.int32, paddle.core.CPUPlace() + ) + + # pd_op.assign: (1xi32) <- (1xi32) + assign_0 = full_0 + + # pd_op.assign: (1xi32) <- (1xi32) + assign_1 = full_0 + + # pd_op.assign: (1xi32) <- (1xi32) + assign_2 = full_0 + + # pd_op.assign: (1xi32) <- (1xi32) + assign_3 = full_0 + + # pd_op.assign: (1xi32) <- (1xi32) + assign_4 = full_0 + + # pd_op.assign: (1xi32) <- (1xi32) + assign_5 = full_0 + + # pd_op.assign: (1xi32) <- (1xi32) + assign_6 = full_0 + + # pd_op.assign: (1xi32) <- (1xi32) + assign_7 = full_0 + + # pd_op.assign: (1xi32) <- (1xi32) + assign_8 = full_0 + + # pd_op.assign: (1xi32) <- (1xi32) + assign_9 = full_0 + + # pd_op.assign: (1xi32) <- (1xi32) + assign_10 = full_0 + + # pd_op.assign: (1xi32) <- (1xi32) + assign_11 = full_0 + + # pd_op.assign: (1xi32) <- (1xi32) + assign_12 = full_0 + + # builtin.combine: ([2x48x104x104xf32, 2x48x104x104xf32]) <- (2x48x104x104xf32, 2x48x104x104xf32) + combine_0 = [swish_5, add_5] + + # pd_op.concat: (2x96x104x104xf32) <- ([2x48x104x104xf32, 2x48x104x104xf32], 1xi32) + concat_0 = paddle._C_ops.concat(combine_0, full_0) + del combine_0 + + # pd_op.full_int_array: (2xi64) <- () + full_int_array_0 = [2, 3] + + # pd_op.assign: (2xi64) <- (2xi64) + assign_13 = full_int_array_0 + + # pd_op.assign: (2xi64) <- (2xi64) + assign_14 = full_int_array_0 + + # pd_op.assign: (2xi64) <- (2xi64) + assign_15 = full_int_array_0 + + # pd_op.mean: (2x96x1x1xf32) <- (2x96x104x104xf32, 2xi64) + mean_0 = paddle._C_ops.mean(concat_0, full_int_array_0, True) + + # pd_op.conv2d: (2x96x1x1xf32) <- (2x96x1x1xf32, 96x96x1x1xf32) + conv2d_15 = paddle._C_ops.conv2d( + mean_0, parameter_662, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_662 + + # pd_op.full_int_array: (4xi64) <- () + full_int_array_1 = [1, -1, 1, 1] + + # pd_op.reshape: (1x96x1x1xf32) <- (96xf32, 4xi64) + reshape_0 = paddle._C_ops.reshape(parameter_661, full_int_array_1) + del parameter_661 + + # pd_op.add: (2x96x1x1xf32) <- (2x96x1x1xf32, 1x96x1x1xf32) + add_6 = paddle._C_ops.add(conv2d_15, reshape_0) + + # pd_op.hardsigmoid: (2x96x1x1xf32) <- (2x96x1x1xf32) + hardsigmoid_0 = paddle._C_ops.hardsigmoid( + add_6, float("0.166667"), float("0.5") + ) + del add_6 + + # pd_op.multiply: (2x96x104x104xf32) <- (2x96x104x104xf32, 2x96x1x1xf32) + multiply_3 = paddle._C_ops.multiply(concat_0, hardsigmoid_0) + + # pd_op.conv2d: (2x128x104x104xf32) <- (2x96x104x104xf32, 128x96x1x1xf32) + conv2d_16 = paddle._C_ops.conv2d( + multiply_3, parameter_660, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_660 + + # pd_op.batch_norm_: (2x128x104x104xf32, 128xf32, 128xf32, 128xf32, 128xf32, -1xui8) <- (2x128x104x104xf32, 128xf32, 128xf32, 128xf32, 128xf32) + ( + batch_norm__90, + batch_norm__91, + batch_norm__92, + batch_norm__93, + batch_norm__94, + batch_norm__95, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_16, + parameter_659, + parameter_658, + parameter_657, + parameter_656, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_656, parameter_657, parameter_658, parameter_659 + + # pd_op.swish: (2x128x104x104xf32) <- (2x128x104x104xf32) + swish_13 = paddle._C_ops.swish(batch_norm__90) + + # pd_op.conv2d: (2x192x52x52xf32) <- (2x128x104x104xf32, 192x128x3x3xf32) + conv2d_17 = paddle._C_ops.conv2d( + swish_13, parameter_655, [2, 2], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_655 + + # pd_op.batch_norm_: (2x192x52x52xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x52x52xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__96, + batch_norm__97, + batch_norm__98, + batch_norm__99, + batch_norm__100, + batch_norm__101, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_17, + parameter_654, + parameter_653, + parameter_652, + parameter_651, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_651, parameter_652, parameter_653, parameter_654 + + # pd_op.swish: (2x192x52x52xf32) <- (2x192x52x52xf32) + swish_14 = paddle._C_ops.swish(batch_norm__96) + + # pd_op.conv2d: (2x96x52x52xf32) <- (2x192x52x52xf32, 96x192x1x1xf32) + conv2d_18 = paddle._C_ops.conv2d( + swish_14, parameter_650, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_650 + + # pd_op.batch_norm_: (2x96x52x52xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x52x52xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__102, + batch_norm__103, + batch_norm__104, + batch_norm__105, + batch_norm__106, + batch_norm__107, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_18, + parameter_649, + parameter_648, + parameter_647, + parameter_646, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_646, parameter_647, parameter_648, parameter_649 + + # pd_op.swish: (2x96x52x52xf32) <- (2x96x52x52xf32) + swish_15 = paddle._C_ops.swish(batch_norm__102) + + # pd_op.conv2d: (2x96x52x52xf32) <- (2x192x52x52xf32, 96x192x1x1xf32) + conv2d_19 = paddle._C_ops.conv2d( + swish_14, parameter_645, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_645 + + # pd_op.batch_norm_: (2x96x52x52xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x52x52xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__108, + batch_norm__109, + batch_norm__110, + batch_norm__111, + batch_norm__112, + batch_norm__113, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_19, + parameter_644, + parameter_643, + parameter_642, + parameter_641, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_641, parameter_642, parameter_643, parameter_644 + + # pd_op.swish: (2x96x52x52xf32) <- (2x96x52x52xf32) + swish_16 = paddle._C_ops.swish(batch_norm__108) + + # pd_op.conv2d: (2x96x52x52xf32) <- (2x96x52x52xf32, 96x96x3x3xf32) + conv2d_20 = paddle._C_ops.conv2d( + swish_16, parameter_640, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_640 + + # pd_op.batch_norm_: (2x96x52x52xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x52x52xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__114, + batch_norm__115, + batch_norm__116, + batch_norm__117, + batch_norm__118, + batch_norm__119, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_20, + parameter_639, + parameter_638, + parameter_637, + parameter_636, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_636, parameter_637, parameter_638, parameter_639 + + # pd_op.swish: (2x96x52x52xf32) <- (2x96x52x52xf32) + swish_17 = paddle._C_ops.swish(batch_norm__114) + + # pd_op.conv2d: (2x96x52x52xf32) <- (2x96x52x52xf32, 96x96x3x3xf32) + conv2d_21 = paddle._C_ops.conv2d( + swish_17, parameter_635, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_635 + + # pd_op.batch_norm_: (2x96x52x52xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x52x52xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__120, + batch_norm__121, + batch_norm__122, + batch_norm__123, + batch_norm__124, + batch_norm__125, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_21, + parameter_634, + parameter_633, + parameter_632, + parameter_631, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_631, parameter_632, parameter_633, parameter_634 + + # pd_op.conv2d: (2x96x52x52xf32) <- (2x96x52x52xf32, 96x96x1x1xf32) + conv2d_22 = paddle._C_ops.conv2d( + swish_17, parameter_630, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_630 + + # pd_op.batch_norm_: (2x96x52x52xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x52x52xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__126, + batch_norm__127, + batch_norm__128, + batch_norm__129, + batch_norm__130, + batch_norm__131, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_22, + parameter_629, + parameter_628, + parameter_627, + parameter_626, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_626, parameter_627, parameter_628, parameter_629 + + # pd_op.multiply: (2x96x52x52xf32) <- (1xf32, 2x96x52x52xf32) + multiply_4 = paddle._C_ops.multiply(data_3, batch_norm__126) + del data_3 + + # pd_op.add: (2x96x52x52xf32) <- (2x96x52x52xf32, 2x96x52x52xf32) + add_7 = paddle._C_ops.add(batch_norm__120, multiply_4) + + # pd_op.swish: (2x96x52x52xf32) <- (2x96x52x52xf32) + swish_18 = paddle._C_ops.swish(add_7) + + # pd_op.add: (2x96x52x52xf32) <- (2x96x52x52xf32, 2x96x52x52xf32) + add_8 = paddle._C_ops.add(swish_16, swish_18) + + # pd_op.conv2d: (2x96x52x52xf32) <- (2x96x52x52xf32, 96x96x3x3xf32) + conv2d_23 = paddle._C_ops.conv2d( + add_8, parameter_625, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_625 + + # pd_op.batch_norm_: (2x96x52x52xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x52x52xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__132, + batch_norm__133, + batch_norm__134, + batch_norm__135, + batch_norm__136, + batch_norm__137, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_23, + parameter_624, + parameter_623, + parameter_622, + parameter_621, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_621, parameter_622, parameter_623, parameter_624 + + # pd_op.swish: (2x96x52x52xf32) <- (2x96x52x52xf32) + swish_19 = paddle._C_ops.swish(batch_norm__132) + + # pd_op.conv2d: (2x96x52x52xf32) <- (2x96x52x52xf32, 96x96x3x3xf32) + conv2d_24 = paddle._C_ops.conv2d( + swish_19, parameter_620, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_620 + + # pd_op.batch_norm_: (2x96x52x52xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x52x52xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__138, + batch_norm__139, + batch_norm__140, + batch_norm__141, + batch_norm__142, + batch_norm__143, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_24, + parameter_619, + parameter_618, + parameter_617, + parameter_616, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_616, parameter_617, parameter_618, parameter_619 + + # pd_op.conv2d: (2x96x52x52xf32) <- (2x96x52x52xf32, 96x96x1x1xf32) + conv2d_25 = paddle._C_ops.conv2d( + swish_19, parameter_615, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_615 + + # pd_op.batch_norm_: (2x96x52x52xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x52x52xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__144, + batch_norm__145, + batch_norm__146, + batch_norm__147, + batch_norm__148, + batch_norm__149, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_25, + parameter_614, + parameter_613, + parameter_612, + parameter_611, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_611, parameter_612, parameter_613, parameter_614 + + # pd_op.multiply: (2x96x52x52xf32) <- (1xf32, 2x96x52x52xf32) + multiply_5 = paddle._C_ops.multiply(data_4, batch_norm__144) + del data_4 + + # pd_op.add: (2x96x52x52xf32) <- (2x96x52x52xf32, 2x96x52x52xf32) + add_9 = paddle._C_ops.add(batch_norm__138, multiply_5) + + # pd_op.swish: (2x96x52x52xf32) <- (2x96x52x52xf32) + swish_20 = paddle._C_ops.swish(add_9) + + # pd_op.add: (2x96x52x52xf32) <- (2x96x52x52xf32, 2x96x52x52xf32) + add_10 = paddle._C_ops.add(add_8, swish_20) + + # pd_op.conv2d: (2x96x52x52xf32) <- (2x96x52x52xf32, 96x96x3x3xf32) + conv2d_26 = paddle._C_ops.conv2d( + add_10, parameter_610, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_610 + + # pd_op.batch_norm_: (2x96x52x52xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x52x52xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__150, + batch_norm__151, + batch_norm__152, + batch_norm__153, + batch_norm__154, + batch_norm__155, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_26, + parameter_609, + parameter_608, + parameter_607, + parameter_606, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_606, parameter_607, parameter_608, parameter_609 + + # pd_op.swish: (2x96x52x52xf32) <- (2x96x52x52xf32) + swish_21 = paddle._C_ops.swish(batch_norm__150) + + # pd_op.conv2d: (2x96x52x52xf32) <- (2x96x52x52xf32, 96x96x3x3xf32) + conv2d_27 = paddle._C_ops.conv2d( + swish_21, parameter_605, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_605 + + # pd_op.batch_norm_: (2x96x52x52xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x52x52xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__156, + batch_norm__157, + batch_norm__158, + batch_norm__159, + batch_norm__160, + batch_norm__161, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_27, + parameter_604, + parameter_603, + parameter_602, + parameter_601, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_601, parameter_602, parameter_603, parameter_604 + + # pd_op.conv2d: (2x96x52x52xf32) <- (2x96x52x52xf32, 96x96x1x1xf32) + conv2d_28 = paddle._C_ops.conv2d( + swish_21, parameter_600, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_600 + + # pd_op.batch_norm_: (2x96x52x52xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x52x52xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__162, + batch_norm__163, + batch_norm__164, + batch_norm__165, + batch_norm__166, + batch_norm__167, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_28, + parameter_599, + parameter_598, + parameter_597, + parameter_596, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_596, parameter_597, parameter_598, parameter_599 + + # pd_op.multiply: (2x96x52x52xf32) <- (1xf32, 2x96x52x52xf32) + multiply_6 = paddle._C_ops.multiply(data_5, batch_norm__162) + del data_5 + + # pd_op.add: (2x96x52x52xf32) <- (2x96x52x52xf32, 2x96x52x52xf32) + add_11 = paddle._C_ops.add(batch_norm__156, multiply_6) + + # pd_op.swish: (2x96x52x52xf32) <- (2x96x52x52xf32) + swish_22 = paddle._C_ops.swish(add_11) + + # pd_op.add: (2x96x52x52xf32) <- (2x96x52x52xf32, 2x96x52x52xf32) + add_12 = paddle._C_ops.add(add_10, swish_22) + + # pd_op.conv2d: (2x96x52x52xf32) <- (2x96x52x52xf32, 96x96x3x3xf32) + conv2d_29 = paddle._C_ops.conv2d( + add_12, parameter_595, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_595 + + # pd_op.batch_norm_: (2x96x52x52xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x52x52xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__168, + batch_norm__169, + batch_norm__170, + batch_norm__171, + batch_norm__172, + batch_norm__173, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_29, + parameter_594, + parameter_593, + parameter_592, + parameter_591, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_591, parameter_592, parameter_593, parameter_594 + + # pd_op.swish: (2x96x52x52xf32) <- (2x96x52x52xf32) + swish_23 = paddle._C_ops.swish(batch_norm__168) + + # pd_op.conv2d: (2x96x52x52xf32) <- (2x96x52x52xf32, 96x96x3x3xf32) + conv2d_30 = paddle._C_ops.conv2d( + swish_23, parameter_590, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_590 + + # pd_op.batch_norm_: (2x96x52x52xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x52x52xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__174, + batch_norm__175, + batch_norm__176, + batch_norm__177, + batch_norm__178, + batch_norm__179, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_30, + parameter_589, + parameter_588, + parameter_587, + parameter_586, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_586, parameter_587, parameter_588, parameter_589 + + # pd_op.conv2d: (2x96x52x52xf32) <- (2x96x52x52xf32, 96x96x1x1xf32) + conv2d_31 = paddle._C_ops.conv2d( + swish_23, parameter_585, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_585 + + # pd_op.batch_norm_: (2x96x52x52xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x52x52xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__180, + batch_norm__181, + batch_norm__182, + batch_norm__183, + batch_norm__184, + batch_norm__185, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_31, + parameter_584, + parameter_583, + parameter_582, + parameter_581, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_581, parameter_582, parameter_583, parameter_584 + + # pd_op.multiply: (2x96x52x52xf32) <- (1xf32, 2x96x52x52xf32) + multiply_7 = paddle._C_ops.multiply(data_6, batch_norm__180) + del data_6 + + # pd_op.add: (2x96x52x52xf32) <- (2x96x52x52xf32, 2x96x52x52xf32) + add_13 = paddle._C_ops.add(batch_norm__174, multiply_7) + + # pd_op.swish: (2x96x52x52xf32) <- (2x96x52x52xf32) + swish_24 = paddle._C_ops.swish(add_13) + + # pd_op.add: (2x96x52x52xf32) <- (2x96x52x52xf32, 2x96x52x52xf32) + add_14 = paddle._C_ops.add(add_12, swish_24) + + # pd_op.conv2d: (2x96x52x52xf32) <- (2x96x52x52xf32, 96x96x3x3xf32) + conv2d_32 = paddle._C_ops.conv2d( + add_14, parameter_580, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_580 + + # pd_op.batch_norm_: (2x96x52x52xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x52x52xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__186, + batch_norm__187, + batch_norm__188, + batch_norm__189, + batch_norm__190, + batch_norm__191, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_32, + parameter_579, + parameter_578, + parameter_577, + parameter_576, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_576, parameter_577, parameter_578, parameter_579 + + # pd_op.swish: (2x96x52x52xf32) <- (2x96x52x52xf32) + swish_25 = paddle._C_ops.swish(batch_norm__186) + + # pd_op.conv2d: (2x96x52x52xf32) <- (2x96x52x52xf32, 96x96x3x3xf32) + conv2d_33 = paddle._C_ops.conv2d( + swish_25, parameter_575, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_575 + + # pd_op.batch_norm_: (2x96x52x52xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x52x52xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__192, + batch_norm__193, + batch_norm__194, + batch_norm__195, + batch_norm__196, + batch_norm__197, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_33, + parameter_574, + parameter_573, + parameter_572, + parameter_571, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_571, parameter_572, parameter_573, parameter_574 + + # pd_op.conv2d: (2x96x52x52xf32) <- (2x96x52x52xf32, 96x96x1x1xf32) + conv2d_34 = paddle._C_ops.conv2d( + swish_25, parameter_570, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_570 + + # pd_op.batch_norm_: (2x96x52x52xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x52x52xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__198, + batch_norm__199, + batch_norm__200, + batch_norm__201, + batch_norm__202, + batch_norm__203, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_34, + parameter_569, + parameter_568, + parameter_567, + parameter_566, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_566, parameter_567, parameter_568, parameter_569 + + # pd_op.multiply: (2x96x52x52xf32) <- (1xf32, 2x96x52x52xf32) + multiply_8 = paddle._C_ops.multiply(data_7, batch_norm__198) + del data_7 + + # pd_op.add: (2x96x52x52xf32) <- (2x96x52x52xf32, 2x96x52x52xf32) + add_15 = paddle._C_ops.add(batch_norm__192, multiply_8) + + # pd_op.swish: (2x96x52x52xf32) <- (2x96x52x52xf32) + swish_26 = paddle._C_ops.swish(add_15) + + # pd_op.add: (2x96x52x52xf32) <- (2x96x52x52xf32, 2x96x52x52xf32) + add_16 = paddle._C_ops.add(add_14, swish_26) + + # pd_op.conv2d: (2x96x52x52xf32) <- (2x96x52x52xf32, 96x96x3x3xf32) + conv2d_35 = paddle._C_ops.conv2d( + add_16, parameter_565, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_565 + + # pd_op.batch_norm_: (2x96x52x52xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x52x52xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__204, + batch_norm__205, + batch_norm__206, + batch_norm__207, + batch_norm__208, + batch_norm__209, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_35, + parameter_564, + parameter_563, + parameter_562, + parameter_561, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_561, parameter_562, parameter_563, parameter_564 + + # pd_op.swish: (2x96x52x52xf32) <- (2x96x52x52xf32) + swish_27 = paddle._C_ops.swish(batch_norm__204) + + # pd_op.conv2d: (2x96x52x52xf32) <- (2x96x52x52xf32, 96x96x3x3xf32) + conv2d_36 = paddle._C_ops.conv2d( + swish_27, parameter_560, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_560 + + # pd_op.batch_norm_: (2x96x52x52xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x52x52xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__210, + batch_norm__211, + batch_norm__212, + batch_norm__213, + batch_norm__214, + batch_norm__215, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_36, + parameter_559, + parameter_558, + parameter_557, + parameter_556, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_556, parameter_557, parameter_558, parameter_559 + + # pd_op.conv2d: (2x96x52x52xf32) <- (2x96x52x52xf32, 96x96x1x1xf32) + conv2d_37 = paddle._C_ops.conv2d( + swish_27, parameter_555, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_555 + + # pd_op.batch_norm_: (2x96x52x52xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x52x52xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__216, + batch_norm__217, + batch_norm__218, + batch_norm__219, + batch_norm__220, + batch_norm__221, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_37, + parameter_554, + parameter_553, + parameter_552, + parameter_551, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_551, parameter_552, parameter_553, parameter_554 + + # pd_op.multiply: (2x96x52x52xf32) <- (1xf32, 2x96x52x52xf32) + multiply_9 = paddle._C_ops.multiply(data_8, batch_norm__216) + del data_8 + + # pd_op.add: (2x96x52x52xf32) <- (2x96x52x52xf32, 2x96x52x52xf32) + add_17 = paddle._C_ops.add(batch_norm__210, multiply_9) + + # pd_op.swish: (2x96x52x52xf32) <- (2x96x52x52xf32) + swish_28 = paddle._C_ops.swish(add_17) + + # pd_op.add: (2x96x52x52xf32) <- (2x96x52x52xf32, 2x96x52x52xf32) + add_18 = paddle._C_ops.add(add_16, swish_28) + + # builtin.combine: ([2x96x52x52xf32, 2x96x52x52xf32]) <- (2x96x52x52xf32, 2x96x52x52xf32) + combine_1 = [swish_15, add_18] + + # pd_op.concat: (2x192x52x52xf32) <- ([2x96x52x52xf32, 2x96x52x52xf32], 1xi32) + concat_1 = paddle._C_ops.concat(combine_1, full_0) + del combine_1 + + # pd_op.mean: (2x192x1x1xf32) <- (2x192x52x52xf32, 2xi64) + mean_1 = paddle._C_ops.mean(concat_1, full_int_array_0, True) + + # pd_op.conv2d: (2x192x1x1xf32) <- (2x192x1x1xf32, 192x192x1x1xf32) + conv2d_38 = paddle._C_ops.conv2d( + mean_1, parameter_550, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_550 + + # pd_op.reshape: (1x192x1x1xf32) <- (192xf32, 4xi64) + reshape_1 = paddle._C_ops.reshape(parameter_549, full_int_array_1) + del parameter_549 + + # pd_op.add: (2x192x1x1xf32) <- (2x192x1x1xf32, 1x192x1x1xf32) + add_19 = paddle._C_ops.add(conv2d_38, reshape_1) + + # pd_op.hardsigmoid: (2x192x1x1xf32) <- (2x192x1x1xf32) + hardsigmoid_1 = paddle._C_ops.hardsigmoid( + add_19, float("0.166667"), float("0.5") + ) + del add_19 + + # pd_op.multiply: (2x192x52x52xf32) <- (2x192x52x52xf32, 2x192x1x1xf32) + multiply_10 = paddle._C_ops.multiply(concat_1, hardsigmoid_1) + + # pd_op.conv2d: (2x256x52x52xf32) <- (2x192x52x52xf32, 256x192x1x1xf32) + conv2d_39 = paddle._C_ops.conv2d( + multiply_10, parameter_548, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_548 + + # pd_op.batch_norm_: (2x256x52x52xf32, 256xf32, 256xf32, 256xf32, 256xf32, -1xui8) <- (2x256x52x52xf32, 256xf32, 256xf32, 256xf32, 256xf32) + ( + batch_norm__222, + batch_norm__223, + batch_norm__224, + batch_norm__225, + batch_norm__226, + batch_norm__227, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_39, + parameter_547, + parameter_546, + parameter_545, + parameter_544, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_544, parameter_545, parameter_546, parameter_547 + + # pd_op.swish: (2x256x52x52xf32) <- (2x256x52x52xf32) + swish_29 = paddle._C_ops.swish(batch_norm__222) + + # pd_op.conv2d: (2x384x26x26xf32) <- (2x256x52x52xf32, 384x256x3x3xf32) + conv2d_40 = paddle._C_ops.conv2d( + swish_29, parameter_543, [2, 2], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_543 + + # pd_op.batch_norm_: (2x384x26x26xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x26x26xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__228, + batch_norm__229, + batch_norm__230, + batch_norm__231, + batch_norm__232, + batch_norm__233, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_40, + parameter_542, + parameter_541, + parameter_540, + parameter_539, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_539, parameter_540, parameter_541, parameter_542 + + # pd_op.swish: (2x384x26x26xf32) <- (2x384x26x26xf32) + swish_30 = paddle._C_ops.swish(batch_norm__228) + + # pd_op.conv2d: (2x192x26x26xf32) <- (2x384x26x26xf32, 192x384x1x1xf32) + conv2d_41 = paddle._C_ops.conv2d( + swish_30, parameter_538, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_538 + + # pd_op.batch_norm_: (2x192x26x26xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x26x26xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__234, + batch_norm__235, + batch_norm__236, + batch_norm__237, + batch_norm__238, + batch_norm__239, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_41, + parameter_537, + parameter_536, + parameter_535, + parameter_534, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_534, parameter_535, parameter_536, parameter_537 + + # pd_op.swish: (2x192x26x26xf32) <- (2x192x26x26xf32) + swish_31 = paddle._C_ops.swish(batch_norm__234) + + # pd_op.conv2d: (2x192x26x26xf32) <- (2x384x26x26xf32, 192x384x1x1xf32) + conv2d_42 = paddle._C_ops.conv2d( + swish_30, parameter_533, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_533 + + # pd_op.batch_norm_: (2x192x26x26xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x26x26xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__240, + batch_norm__241, + batch_norm__242, + batch_norm__243, + batch_norm__244, + batch_norm__245, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_42, + parameter_532, + parameter_531, + parameter_530, + parameter_529, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_529, parameter_530, parameter_531, parameter_532 + + # pd_op.swish: (2x192x26x26xf32) <- (2x192x26x26xf32) + swish_32 = paddle._C_ops.swish(batch_norm__240) + + # pd_op.conv2d: (2x192x26x26xf32) <- (2x192x26x26xf32, 192x192x3x3xf32) + conv2d_43 = paddle._C_ops.conv2d( + swish_32, parameter_528, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_528 + + # pd_op.batch_norm_: (2x192x26x26xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x26x26xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__246, + batch_norm__247, + batch_norm__248, + batch_norm__249, + batch_norm__250, + batch_norm__251, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_43, + parameter_527, + parameter_526, + parameter_525, + parameter_524, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_524, parameter_525, parameter_526, parameter_527 + + # pd_op.swish: (2x192x26x26xf32) <- (2x192x26x26xf32) + swish_33 = paddle._C_ops.swish(batch_norm__246) + + # pd_op.conv2d: (2x192x26x26xf32) <- (2x192x26x26xf32, 192x192x3x3xf32) + conv2d_44 = paddle._C_ops.conv2d( + swish_33, parameter_523, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_523 + + # pd_op.batch_norm_: (2x192x26x26xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x26x26xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__252, + batch_norm__253, + batch_norm__254, + batch_norm__255, + batch_norm__256, + batch_norm__257, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_44, + parameter_522, + parameter_521, + parameter_520, + parameter_519, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_519, parameter_520, parameter_521, parameter_522 + + # pd_op.conv2d: (2x192x26x26xf32) <- (2x192x26x26xf32, 192x192x1x1xf32) + conv2d_45 = paddle._C_ops.conv2d( + swish_33, parameter_518, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_518 + + # pd_op.batch_norm_: (2x192x26x26xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x26x26xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__258, + batch_norm__259, + batch_norm__260, + batch_norm__261, + batch_norm__262, + batch_norm__263, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_45, + parameter_517, + parameter_516, + parameter_515, + parameter_514, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_514, parameter_515, parameter_516, parameter_517 + + # pd_op.multiply: (2x192x26x26xf32) <- (1xf32, 2x192x26x26xf32) + multiply_11 = paddle._C_ops.multiply(data_9, batch_norm__258) + del data_9 + + # pd_op.add: (2x192x26x26xf32) <- (2x192x26x26xf32, 2x192x26x26xf32) + add_20 = paddle._C_ops.add(batch_norm__252, multiply_11) + + # pd_op.swish: (2x192x26x26xf32) <- (2x192x26x26xf32) + swish_34 = paddle._C_ops.swish(add_20) + + # pd_op.add: (2x192x26x26xf32) <- (2x192x26x26xf32, 2x192x26x26xf32) + add_21 = paddle._C_ops.add(swish_32, swish_34) + + # pd_op.conv2d: (2x192x26x26xf32) <- (2x192x26x26xf32, 192x192x3x3xf32) + conv2d_46 = paddle._C_ops.conv2d( + add_21, parameter_513, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_513 + + # pd_op.batch_norm_: (2x192x26x26xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x26x26xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__264, + batch_norm__265, + batch_norm__266, + batch_norm__267, + batch_norm__268, + batch_norm__269, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_46, + parameter_512, + parameter_511, + parameter_510, + parameter_509, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_509, parameter_510, parameter_511, parameter_512 + + # pd_op.swish: (2x192x26x26xf32) <- (2x192x26x26xf32) + swish_35 = paddle._C_ops.swish(batch_norm__264) + + # pd_op.conv2d: (2x192x26x26xf32) <- (2x192x26x26xf32, 192x192x3x3xf32) + conv2d_47 = paddle._C_ops.conv2d( + swish_35, parameter_508, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_508 + + # pd_op.batch_norm_: (2x192x26x26xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x26x26xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__270, + batch_norm__271, + batch_norm__272, + batch_norm__273, + batch_norm__274, + batch_norm__275, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_47, + parameter_507, + parameter_506, + parameter_505, + parameter_504, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_504, parameter_505, parameter_506, parameter_507 + + # pd_op.conv2d: (2x192x26x26xf32) <- (2x192x26x26xf32, 192x192x1x1xf32) + conv2d_48 = paddle._C_ops.conv2d( + swish_35, parameter_503, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_503 + + # pd_op.batch_norm_: (2x192x26x26xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x26x26xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__276, + batch_norm__277, + batch_norm__278, + batch_norm__279, + batch_norm__280, + batch_norm__281, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_48, + parameter_502, + parameter_501, + parameter_500, + parameter_499, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_499, parameter_500, parameter_501, parameter_502 + + # pd_op.multiply: (2x192x26x26xf32) <- (1xf32, 2x192x26x26xf32) + multiply_12 = paddle._C_ops.multiply(data_10, batch_norm__276) + del data_10 + + # pd_op.add: (2x192x26x26xf32) <- (2x192x26x26xf32, 2x192x26x26xf32) + add_22 = paddle._C_ops.add(batch_norm__270, multiply_12) + + # pd_op.swish: (2x192x26x26xf32) <- (2x192x26x26xf32) + swish_36 = paddle._C_ops.swish(add_22) + + # pd_op.add: (2x192x26x26xf32) <- (2x192x26x26xf32, 2x192x26x26xf32) + add_23 = paddle._C_ops.add(add_21, swish_36) + + # pd_op.conv2d: (2x192x26x26xf32) <- (2x192x26x26xf32, 192x192x3x3xf32) + conv2d_49 = paddle._C_ops.conv2d( + add_23, parameter_498, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_498 + + # pd_op.batch_norm_: (2x192x26x26xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x26x26xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__282, + batch_norm__283, + batch_norm__284, + batch_norm__285, + batch_norm__286, + batch_norm__287, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_49, + parameter_497, + parameter_496, + parameter_495, + parameter_494, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_494, parameter_495, parameter_496, parameter_497 + + # pd_op.swish: (2x192x26x26xf32) <- (2x192x26x26xf32) + swish_37 = paddle._C_ops.swish(batch_norm__282) + + # pd_op.conv2d: (2x192x26x26xf32) <- (2x192x26x26xf32, 192x192x3x3xf32) + conv2d_50 = paddle._C_ops.conv2d( + swish_37, parameter_493, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_493 + + # pd_op.batch_norm_: (2x192x26x26xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x26x26xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__288, + batch_norm__289, + batch_norm__290, + batch_norm__291, + batch_norm__292, + batch_norm__293, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_50, + parameter_492, + parameter_491, + parameter_490, + parameter_489, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_489, parameter_490, parameter_491, parameter_492 + + # pd_op.conv2d: (2x192x26x26xf32) <- (2x192x26x26xf32, 192x192x1x1xf32) + conv2d_51 = paddle._C_ops.conv2d( + swish_37, parameter_488, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_488 + + # pd_op.batch_norm_: (2x192x26x26xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x26x26xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__294, + batch_norm__295, + batch_norm__296, + batch_norm__297, + batch_norm__298, + batch_norm__299, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_51, + parameter_487, + parameter_486, + parameter_485, + parameter_484, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_484, parameter_485, parameter_486, parameter_487 + + # pd_op.multiply: (2x192x26x26xf32) <- (1xf32, 2x192x26x26xf32) + multiply_13 = paddle._C_ops.multiply(data_11, batch_norm__294) + del data_11 + + # pd_op.add: (2x192x26x26xf32) <- (2x192x26x26xf32, 2x192x26x26xf32) + add_24 = paddle._C_ops.add(batch_norm__288, multiply_13) + + # pd_op.swish: (2x192x26x26xf32) <- (2x192x26x26xf32) + swish_38 = paddle._C_ops.swish(add_24) + + # pd_op.add: (2x192x26x26xf32) <- (2x192x26x26xf32, 2x192x26x26xf32) + add_25 = paddle._C_ops.add(add_23, swish_38) + + # pd_op.conv2d: (2x192x26x26xf32) <- (2x192x26x26xf32, 192x192x3x3xf32) + conv2d_52 = paddle._C_ops.conv2d( + add_25, parameter_483, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_483 + + # pd_op.batch_norm_: (2x192x26x26xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x26x26xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__300, + batch_norm__301, + batch_norm__302, + batch_norm__303, + batch_norm__304, + batch_norm__305, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_52, + parameter_482, + parameter_481, + parameter_480, + parameter_479, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_479, parameter_480, parameter_481, parameter_482 + + # pd_op.swish: (2x192x26x26xf32) <- (2x192x26x26xf32) + swish_39 = paddle._C_ops.swish(batch_norm__300) + + # pd_op.conv2d: (2x192x26x26xf32) <- (2x192x26x26xf32, 192x192x3x3xf32) + conv2d_53 = paddle._C_ops.conv2d( + swish_39, parameter_478, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_478 + + # pd_op.batch_norm_: (2x192x26x26xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x26x26xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__306, + batch_norm__307, + batch_norm__308, + batch_norm__309, + batch_norm__310, + batch_norm__311, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_53, + parameter_477, + parameter_476, + parameter_475, + parameter_474, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_474, parameter_475, parameter_476, parameter_477 + + # pd_op.conv2d: (2x192x26x26xf32) <- (2x192x26x26xf32, 192x192x1x1xf32) + conv2d_54 = paddle._C_ops.conv2d( + swish_39, parameter_473, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_473 + + # pd_op.batch_norm_: (2x192x26x26xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x26x26xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__312, + batch_norm__313, + batch_norm__314, + batch_norm__315, + batch_norm__316, + batch_norm__317, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_54, + parameter_472, + parameter_471, + parameter_470, + parameter_469, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_469, parameter_470, parameter_471, parameter_472 + + # pd_op.multiply: (2x192x26x26xf32) <- (1xf32, 2x192x26x26xf32) + multiply_14 = paddle._C_ops.multiply(data_12, batch_norm__312) + del data_12 + + # pd_op.add: (2x192x26x26xf32) <- (2x192x26x26xf32, 2x192x26x26xf32) + add_26 = paddle._C_ops.add(batch_norm__306, multiply_14) + + # pd_op.swish: (2x192x26x26xf32) <- (2x192x26x26xf32) + swish_40 = paddle._C_ops.swish(add_26) + + # pd_op.add: (2x192x26x26xf32) <- (2x192x26x26xf32, 2x192x26x26xf32) + add_27 = paddle._C_ops.add(add_25, swish_40) + + # pd_op.conv2d: (2x192x26x26xf32) <- (2x192x26x26xf32, 192x192x3x3xf32) + conv2d_55 = paddle._C_ops.conv2d( + add_27, parameter_468, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_468 + + # pd_op.batch_norm_: (2x192x26x26xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x26x26xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__318, + batch_norm__319, + batch_norm__320, + batch_norm__321, + batch_norm__322, + batch_norm__323, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_55, + parameter_467, + parameter_466, + parameter_465, + parameter_464, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_464, parameter_465, parameter_466, parameter_467 + + # pd_op.swish: (2x192x26x26xf32) <- (2x192x26x26xf32) + swish_41 = paddle._C_ops.swish(batch_norm__318) + + # pd_op.conv2d: (2x192x26x26xf32) <- (2x192x26x26xf32, 192x192x3x3xf32) + conv2d_56 = paddle._C_ops.conv2d( + swish_41, parameter_463, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_463 + + # pd_op.batch_norm_: (2x192x26x26xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x26x26xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__324, + batch_norm__325, + batch_norm__326, + batch_norm__327, + batch_norm__328, + batch_norm__329, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_56, + parameter_462, + parameter_461, + parameter_460, + parameter_459, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_459, parameter_460, parameter_461, parameter_462 + + # pd_op.conv2d: (2x192x26x26xf32) <- (2x192x26x26xf32, 192x192x1x1xf32) + conv2d_57 = paddle._C_ops.conv2d( + swish_41, parameter_458, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_458 + + # pd_op.batch_norm_: (2x192x26x26xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x26x26xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__330, + batch_norm__331, + batch_norm__332, + batch_norm__333, + batch_norm__334, + batch_norm__335, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_57, + parameter_457, + parameter_456, + parameter_455, + parameter_454, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_454, parameter_455, parameter_456, parameter_457 + + # pd_op.multiply: (2x192x26x26xf32) <- (1xf32, 2x192x26x26xf32) + multiply_15 = paddle._C_ops.multiply(data_13, batch_norm__330) + del data_13 + + # pd_op.add: (2x192x26x26xf32) <- (2x192x26x26xf32, 2x192x26x26xf32) + add_28 = paddle._C_ops.add(batch_norm__324, multiply_15) + + # pd_op.swish: (2x192x26x26xf32) <- (2x192x26x26xf32) + swish_42 = paddle._C_ops.swish(add_28) + + # pd_op.add: (2x192x26x26xf32) <- (2x192x26x26xf32, 2x192x26x26xf32) + add_29 = paddle._C_ops.add(add_27, swish_42) + + # pd_op.conv2d: (2x192x26x26xf32) <- (2x192x26x26xf32, 192x192x3x3xf32) + conv2d_58 = paddle._C_ops.conv2d( + add_29, parameter_453, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_453 + + # pd_op.batch_norm_: (2x192x26x26xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x26x26xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__336, + batch_norm__337, + batch_norm__338, + batch_norm__339, + batch_norm__340, + batch_norm__341, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_58, + parameter_452, + parameter_451, + parameter_450, + parameter_449, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_449, parameter_450, parameter_451, parameter_452 + + # pd_op.swish: (2x192x26x26xf32) <- (2x192x26x26xf32) + swish_43 = paddle._C_ops.swish(batch_norm__336) + + # pd_op.conv2d: (2x192x26x26xf32) <- (2x192x26x26xf32, 192x192x3x3xf32) + conv2d_59 = paddle._C_ops.conv2d( + swish_43, parameter_448, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_448 + + # pd_op.batch_norm_: (2x192x26x26xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x26x26xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__342, + batch_norm__343, + batch_norm__344, + batch_norm__345, + batch_norm__346, + batch_norm__347, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_59, + parameter_447, + parameter_446, + parameter_445, + parameter_444, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_444, parameter_445, parameter_446, parameter_447 + + # pd_op.conv2d: (2x192x26x26xf32) <- (2x192x26x26xf32, 192x192x1x1xf32) + conv2d_60 = paddle._C_ops.conv2d( + swish_43, parameter_443, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_443 + + # pd_op.batch_norm_: (2x192x26x26xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x26x26xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__348, + batch_norm__349, + batch_norm__350, + batch_norm__351, + batch_norm__352, + batch_norm__353, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_60, + parameter_442, + parameter_441, + parameter_440, + parameter_439, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_439, parameter_440, parameter_441, parameter_442 + + # pd_op.multiply: (2x192x26x26xf32) <- (1xf32, 2x192x26x26xf32) + multiply_16 = paddle._C_ops.multiply(data_14, batch_norm__348) + del data_14 + + # pd_op.add: (2x192x26x26xf32) <- (2x192x26x26xf32, 2x192x26x26xf32) + add_30 = paddle._C_ops.add(batch_norm__342, multiply_16) + + # pd_op.swish: (2x192x26x26xf32) <- (2x192x26x26xf32) + swish_44 = paddle._C_ops.swish(add_30) + + # pd_op.add: (2x192x26x26xf32) <- (2x192x26x26xf32, 2x192x26x26xf32) + add_31 = paddle._C_ops.add(add_29, swish_44) + + # builtin.combine: ([2x192x26x26xf32, 2x192x26x26xf32]) <- (2x192x26x26xf32, 2x192x26x26xf32) + combine_2 = [swish_31, add_31] + + # pd_op.concat: (2x384x26x26xf32) <- ([2x192x26x26xf32, 2x192x26x26xf32], 1xi32) + concat_2 = paddle._C_ops.concat(combine_2, full_0) + del combine_2 + + # pd_op.mean: (2x384x1x1xf32) <- (2x384x26x26xf32, 2xi64) + mean_2 = paddle._C_ops.mean(concat_2, full_int_array_0, True) + + # pd_op.conv2d: (2x384x1x1xf32) <- (2x384x1x1xf32, 384x384x1x1xf32) + conv2d_61 = paddle._C_ops.conv2d( + mean_2, parameter_438, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_438 + + # pd_op.reshape: (1x384x1x1xf32) <- (384xf32, 4xi64) + reshape_2 = paddle._C_ops.reshape(parameter_437, full_int_array_1) + del parameter_437 + + # pd_op.add: (2x384x1x1xf32) <- (2x384x1x1xf32, 1x384x1x1xf32) + add_32 = paddle._C_ops.add(conv2d_61, reshape_2) + + # pd_op.hardsigmoid: (2x384x1x1xf32) <- (2x384x1x1xf32) + hardsigmoid_2 = paddle._C_ops.hardsigmoid( + add_32, float("0.166667"), float("0.5") + ) + del add_32 + + # pd_op.multiply: (2x384x26x26xf32) <- (2x384x26x26xf32, 2x384x1x1xf32) + multiply_17 = paddle._C_ops.multiply(concat_2, hardsigmoid_2) + + # pd_op.conv2d: (2x512x26x26xf32) <- (2x384x26x26xf32, 512x384x1x1xf32) + conv2d_62 = paddle._C_ops.conv2d( + multiply_17, parameter_436, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_436 + + # pd_op.batch_norm_: (2x512x26x26xf32, 512xf32, 512xf32, 512xf32, 512xf32, -1xui8) <- (2x512x26x26xf32, 512xf32, 512xf32, 512xf32, 512xf32) + ( + batch_norm__354, + batch_norm__355, + batch_norm__356, + batch_norm__357, + batch_norm__358, + batch_norm__359, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_62, + parameter_435, + parameter_434, + parameter_433, + parameter_432, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_432, parameter_433, parameter_434, parameter_435 + + # pd_op.swish: (2x512x26x26xf32) <- (2x512x26x26xf32) + swish_45 = paddle._C_ops.swish(batch_norm__354) + + # pd_op.conv2d: (2x768x13x13xf32) <- (2x512x26x26xf32, 768x512x3x3xf32) + conv2d_63 = paddle._C_ops.conv2d( + swish_45, parameter_431, [2, 2], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_431 + + # pd_op.batch_norm_: (2x768x13x13xf32, 768xf32, 768xf32, 768xf32, 768xf32, -1xui8) <- (2x768x13x13xf32, 768xf32, 768xf32, 768xf32, 768xf32) + ( + batch_norm__360, + batch_norm__361, + batch_norm__362, + batch_norm__363, + batch_norm__364, + batch_norm__365, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_63, + parameter_430, + parameter_429, + parameter_428, + parameter_427, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_427, parameter_428, parameter_429, parameter_430 + + # pd_op.swish: (2x768x13x13xf32) <- (2x768x13x13xf32) + swish_46 = paddle._C_ops.swish(batch_norm__360) + + # pd_op.conv2d: (2x384x13x13xf32) <- (2x768x13x13xf32, 384x768x1x1xf32) + conv2d_64 = paddle._C_ops.conv2d( + swish_46, parameter_426, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_426 + + # pd_op.batch_norm_: (2x384x13x13xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x13x13xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__366, + batch_norm__367, + batch_norm__368, + batch_norm__369, + batch_norm__370, + batch_norm__371, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_64, + parameter_425, + parameter_424, + parameter_423, + parameter_422, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_422, parameter_423, parameter_424, parameter_425 + + # pd_op.swish: (2x384x13x13xf32) <- (2x384x13x13xf32) + swish_47 = paddle._C_ops.swish(batch_norm__366) + + # pd_op.conv2d: (2x384x13x13xf32) <- (2x768x13x13xf32, 384x768x1x1xf32) + conv2d_65 = paddle._C_ops.conv2d( + swish_46, parameter_421, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_421 + + # pd_op.batch_norm_: (2x384x13x13xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x13x13xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__372, + batch_norm__373, + batch_norm__374, + batch_norm__375, + batch_norm__376, + batch_norm__377, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_65, + parameter_420, + parameter_419, + parameter_418, + parameter_417, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_417, parameter_418, parameter_419, parameter_420 + + # pd_op.swish: (2x384x13x13xf32) <- (2x384x13x13xf32) + swish_48 = paddle._C_ops.swish(batch_norm__372) + + # pd_op.conv2d: (2x384x13x13xf32) <- (2x384x13x13xf32, 384x384x3x3xf32) + conv2d_66 = paddle._C_ops.conv2d( + swish_48, parameter_416, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_416 + + # pd_op.batch_norm_: (2x384x13x13xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x13x13xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__378, + batch_norm__379, + batch_norm__380, + batch_norm__381, + batch_norm__382, + batch_norm__383, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_66, + parameter_415, + parameter_414, + parameter_413, + parameter_412, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_412, parameter_413, parameter_414, parameter_415 + + # pd_op.swish: (2x384x13x13xf32) <- (2x384x13x13xf32) + swish_49 = paddle._C_ops.swish(batch_norm__378) + + # pd_op.conv2d: (2x384x13x13xf32) <- (2x384x13x13xf32, 384x384x3x3xf32) + conv2d_67 = paddle._C_ops.conv2d( + swish_49, parameter_411, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_411 + + # pd_op.batch_norm_: (2x384x13x13xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x13x13xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__384, + batch_norm__385, + batch_norm__386, + batch_norm__387, + batch_norm__388, + batch_norm__389, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_67, + parameter_410, + parameter_409, + parameter_408, + parameter_407, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_407, parameter_408, parameter_409, parameter_410 + + # pd_op.conv2d: (2x384x13x13xf32) <- (2x384x13x13xf32, 384x384x1x1xf32) + conv2d_68 = paddle._C_ops.conv2d( + swish_49, parameter_406, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_406 + + # pd_op.batch_norm_: (2x384x13x13xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x13x13xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__390, + batch_norm__391, + batch_norm__392, + batch_norm__393, + batch_norm__394, + batch_norm__395, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_68, + parameter_405, + parameter_404, + parameter_403, + parameter_402, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_402, parameter_403, parameter_404, parameter_405 + + # pd_op.multiply: (2x384x13x13xf32) <- (1xf32, 2x384x13x13xf32) + multiply_18 = paddle._C_ops.multiply(data_15, batch_norm__390) + del data_15 + + # pd_op.add: (2x384x13x13xf32) <- (2x384x13x13xf32, 2x384x13x13xf32) + add_33 = paddle._C_ops.add(batch_norm__384, multiply_18) + + # pd_op.swish: (2x384x13x13xf32) <- (2x384x13x13xf32) + swish_50 = paddle._C_ops.swish(add_33) + + # pd_op.add: (2x384x13x13xf32) <- (2x384x13x13xf32, 2x384x13x13xf32) + add_34 = paddle._C_ops.add(swish_48, swish_50) + + # pd_op.conv2d: (2x384x13x13xf32) <- (2x384x13x13xf32, 384x384x3x3xf32) + conv2d_69 = paddle._C_ops.conv2d( + add_34, parameter_401, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_401 + + # pd_op.batch_norm_: (2x384x13x13xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x13x13xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__396, + batch_norm__397, + batch_norm__398, + batch_norm__399, + batch_norm__400, + batch_norm__401, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_69, + parameter_400, + parameter_399, + parameter_398, + parameter_397, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_397, parameter_398, parameter_399, parameter_400 + + # pd_op.swish: (2x384x13x13xf32) <- (2x384x13x13xf32) + swish_51 = paddle._C_ops.swish(batch_norm__396) + + # pd_op.conv2d: (2x384x13x13xf32) <- (2x384x13x13xf32, 384x384x3x3xf32) + conv2d_70 = paddle._C_ops.conv2d( + swish_51, parameter_396, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_396 + + # pd_op.batch_norm_: (2x384x13x13xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x13x13xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__402, + batch_norm__403, + batch_norm__404, + batch_norm__405, + batch_norm__406, + batch_norm__407, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_70, + parameter_395, + parameter_394, + parameter_393, + parameter_392, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_392, parameter_393, parameter_394, parameter_395 + + # pd_op.conv2d: (2x384x13x13xf32) <- (2x384x13x13xf32, 384x384x1x1xf32) + conv2d_71 = paddle._C_ops.conv2d( + swish_51, parameter_391, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_391 + + # pd_op.batch_norm_: (2x384x13x13xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x13x13xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__408, + batch_norm__409, + batch_norm__410, + batch_norm__411, + batch_norm__412, + batch_norm__413, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_71, + parameter_390, + parameter_389, + parameter_388, + parameter_387, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_387, parameter_388, parameter_389, parameter_390 + + # pd_op.multiply: (2x384x13x13xf32) <- (1xf32, 2x384x13x13xf32) + multiply_19 = paddle._C_ops.multiply(data_16, batch_norm__408) + del data_16 + + # pd_op.add: (2x384x13x13xf32) <- (2x384x13x13xf32, 2x384x13x13xf32) + add_35 = paddle._C_ops.add(batch_norm__402, multiply_19) + + # pd_op.swish: (2x384x13x13xf32) <- (2x384x13x13xf32) + swish_52 = paddle._C_ops.swish(add_35) + + # pd_op.add: (2x384x13x13xf32) <- (2x384x13x13xf32, 2x384x13x13xf32) + add_36 = paddle._C_ops.add(add_34, swish_52) + + # pd_op.conv2d: (2x384x13x13xf32) <- (2x384x13x13xf32, 384x384x3x3xf32) + conv2d_72 = paddle._C_ops.conv2d( + add_36, parameter_386, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_386 + + # pd_op.batch_norm_: (2x384x13x13xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x13x13xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__414, + batch_norm__415, + batch_norm__416, + batch_norm__417, + batch_norm__418, + batch_norm__419, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_72, + parameter_385, + parameter_384, + parameter_383, + parameter_382, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_382, parameter_383, parameter_384, parameter_385 + + # pd_op.swish: (2x384x13x13xf32) <- (2x384x13x13xf32) + swish_53 = paddle._C_ops.swish(batch_norm__414) + + # pd_op.conv2d: (2x384x13x13xf32) <- (2x384x13x13xf32, 384x384x3x3xf32) + conv2d_73 = paddle._C_ops.conv2d( + swish_53, parameter_381, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_381 + + # pd_op.batch_norm_: (2x384x13x13xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x13x13xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__420, + batch_norm__421, + batch_norm__422, + batch_norm__423, + batch_norm__424, + batch_norm__425, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_73, + parameter_380, + parameter_379, + parameter_378, + parameter_377, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_377, parameter_378, parameter_379, parameter_380 + + # pd_op.conv2d: (2x384x13x13xf32) <- (2x384x13x13xf32, 384x384x1x1xf32) + conv2d_74 = paddle._C_ops.conv2d( + swish_53, parameter_376, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_376 + + # pd_op.batch_norm_: (2x384x13x13xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x13x13xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__426, + batch_norm__427, + batch_norm__428, + batch_norm__429, + batch_norm__430, + batch_norm__431, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_74, + parameter_375, + parameter_374, + parameter_373, + parameter_372, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_372, parameter_373, parameter_374, parameter_375 + + # pd_op.multiply: (2x384x13x13xf32) <- (1xf32, 2x384x13x13xf32) + multiply_20 = paddle._C_ops.multiply(data_17, batch_norm__426) + del data_17 + + # pd_op.add: (2x384x13x13xf32) <- (2x384x13x13xf32, 2x384x13x13xf32) + add_37 = paddle._C_ops.add(batch_norm__420, multiply_20) + + # pd_op.swish: (2x384x13x13xf32) <- (2x384x13x13xf32) + swish_54 = paddle._C_ops.swish(add_37) + + # pd_op.add: (2x384x13x13xf32) <- (2x384x13x13xf32, 2x384x13x13xf32) + add_38 = paddle._C_ops.add(add_36, swish_54) + + # builtin.combine: ([2x384x13x13xf32, 2x384x13x13xf32]) <- (2x384x13x13xf32, 2x384x13x13xf32) + combine_3 = [swish_47, add_38] + + # pd_op.concat: (2x768x13x13xf32) <- ([2x384x13x13xf32, 2x384x13x13xf32], 1xi32) + concat_3 = paddle._C_ops.concat(combine_3, full_0) + del combine_3 + + # pd_op.mean: (2x768x1x1xf32) <- (2x768x13x13xf32, 2xi64) + mean_3 = paddle._C_ops.mean(concat_3, full_int_array_0, True) + + # pd_op.conv2d: (2x768x1x1xf32) <- (2x768x1x1xf32, 768x768x1x1xf32) + conv2d_75 = paddle._C_ops.conv2d( + mean_3, parameter_371, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_371 + + # pd_op.reshape: (1x768x1x1xf32) <- (768xf32, 4xi64) + reshape_3 = paddle._C_ops.reshape(parameter_370, full_int_array_1) + del full_int_array_1, parameter_370 + + # pd_op.add: (2x768x1x1xf32) <- (2x768x1x1xf32, 1x768x1x1xf32) + add_39 = paddle._C_ops.add(conv2d_75, reshape_3) + + # pd_op.hardsigmoid: (2x768x1x1xf32) <- (2x768x1x1xf32) + hardsigmoid_3 = paddle._C_ops.hardsigmoid( + add_39, float("0.166667"), float("0.5") + ) + del add_39 + + # pd_op.multiply: (2x768x13x13xf32) <- (2x768x13x13xf32, 2x768x1x1xf32) + multiply_21 = paddle._C_ops.multiply(concat_3, hardsigmoid_3) + + # pd_op.conv2d: (2x1024x13x13xf32) <- (2x768x13x13xf32, 1024x768x1x1xf32) + conv2d_76 = paddle._C_ops.conv2d( + multiply_21, parameter_369, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_369 + + # pd_op.batch_norm_: (2x1024x13x13xf32, 1024xf32, 1024xf32, 1024xf32, 1024xf32, -1xui8) <- (2x1024x13x13xf32, 1024xf32, 1024xf32, 1024xf32, 1024xf32) + ( + batch_norm__432, + batch_norm__433, + batch_norm__434, + batch_norm__435, + batch_norm__436, + batch_norm__437, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_76, + parameter_368, + parameter_367, + parameter_366, + parameter_365, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_365, parameter_366, parameter_367, parameter_368 + + # pd_op.swish: (2x1024x13x13xf32) <- (2x1024x13x13xf32) + swish_55 = paddle._C_ops.swish(batch_norm__432) + + # pd_op.flatten: (2x1024x169xf32) <- (2x1024x13x13xf32) + flatten_0 = paddle._C_ops.flatten(swish_55, 2, 3) + + # pd_op.transpose: (2x169x1024xf32) <- (2x1024x169xf32) + transpose_0 = paddle._C_ops.transpose(flatten_0, [0, 2, 1]) + del flatten_0 + + # pd_op.full: (1xf64) <- () + full_1 = paddle._C_ops.full( + [1], float("0"), paddle.float64, paddle.core.CPUPlace() + ) + + # pd_op.full: (1xf64) <- () + full_2 = paddle._C_ops.full( + [1], float("13"), paddle.float64, paddle.core.CPUPlace() + ) + + # pd_op.full: (1xf64) <- () + full_3 = paddle._C_ops.full( + [1], float("1"), paddle.float64, paddle.core.CPUPlace() + ) + + # pd_op.arange: (13xf32) <- (1xf64, 1xf64, 1xf64) + arange_0 = paddle.arange(full_1, full_2, full_3, dtype="float32") + del full_2 + + # builtin.combine: ([13xf32, 13xf32]) <- (13xf32, 13xf32) + combine_4 = [arange_0, arange_0] + del arange_0 + + # pd_op.meshgrid: ([13x13xf32, 13x13xf32]) <- ([13xf32, 13xf32]) + meshgrid_0 = paddle._C_ops.meshgrid(combine_4) + del combine_4 + + # builtin.split: (13x13xf32, 13x13xf32) <- ([13x13xf32, 13x13xf32]) + ( + split_0, + split_1, + ) = meshgrid_0 + del meshgrid_0 + + # pd_op.full: (1xf64) <- () + full_4 = paddle._C_ops.full( + [1], float("256"), paddle.float64, paddle.core.CPUPlace() + ) + + # pd_op.arange: (256xf32) <- (1xf64, 1xf64, 1xf64) + arange_1 = paddle.arange(full_1, full_4, full_3, dtype="float32") + del full_1, full_3, full_4 + + # pd_op.full: (1xf32) <- () + full_5 = paddle._C_ops.full( + [1], float("0.00390625"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.scale: (256xf32) <- (256xf32, 1xf32) + scale_0 = paddle._C_ops.scale(arange_1, full_5, float("0"), True) + del arange_1, full_5 + + # pd_op.full: (256xf32) <- () + full_6 = paddle._C_ops.full( + [256], + float("10000"), + paddle.float32, + paddle.framework._current_expected_place(), + ) + + # pd_op.elementwise_pow: (256xf32) <- (256xf32, 256xf32) + elementwise_pow_0 = paddle._C_ops.elementwise_pow(full_6, scale_0) + del full_6, scale_0 + + # pd_op.full: (256xf32) <- () + full_7 = paddle._C_ops.full( + [256], + float("1"), + paddle.float32, + paddle.framework._current_expected_place(), + ) + + # pd_op.divide: (256xf32) <- (256xf32, 256xf32) + divide_0 = paddle._C_ops.divide(full_7, elementwise_pow_0) + del elementwise_pow_0, full_7 + + # pd_op.flatten: (169xf32) <- (13x13xf32) + flatten_1 = paddle._C_ops.flatten(split_0, 0, 1) + del split_0 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_2 = [1] + + # pd_op.unsqueeze: (169x1xf32) <- (169xf32, 1xi64) + unsqueeze_0 = paddle._C_ops.unsqueeze(flatten_1, full_int_array_2) + del flatten_1 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_3 = [0] + + # pd_op.assign: (1xi64) <- (1xi64) + assign_16 = full_int_array_3 + + # pd_op.assign: (1xi64) <- (1xi64) + assign_17 = full_int_array_3 + + # pd_op.assign: (1xi64) <- (1xi64) + assign_18 = full_int_array_3 + + # pd_op.assign: (1xi64) <- (1xi64) + assign_19 = full_int_array_3 + + # pd_op.assign: (1xi64) <- (1xi64) + assign_20 = full_int_array_3 + + # pd_op.assign: (1xi64) <- (1xi64) + assign_21 = full_int_array_3 + + # pd_op.assign: (1xi64) <- (1xi64) + assign_22 = full_int_array_3 + + # pd_op.assign: (1xi64) <- (1xi64) + assign_23 = full_int_array_3 + + # pd_op.unsqueeze: (1x256xf32) <- (256xf32, 1xi64) + unsqueeze_1 = paddle._C_ops.unsqueeze(divide_0, full_int_array_3) + del divide_0 + + # pd_op.matmul: (169x256xf32) <- (169x1xf32, 1x256xf32) + matmul_0 = paddle._C_ops.matmul(unsqueeze_0, unsqueeze_1, False, False) + del unsqueeze_0 + + # pd_op.flatten: (169xf32) <- (13x13xf32) + flatten_2 = paddle._C_ops.flatten(split_1, 0, 1) + del split_1 + + # pd_op.unsqueeze: (169x1xf32) <- (169xf32, 1xi64) + unsqueeze_2 = paddle._C_ops.unsqueeze(flatten_2, full_int_array_2) + del flatten_2, full_int_array_2 + + # pd_op.matmul: (169x256xf32) <- (169x1xf32, 1x256xf32) + matmul_1 = paddle._C_ops.matmul(unsqueeze_2, unsqueeze_1, False, False) + del unsqueeze_1, unsqueeze_2 + + # pd_op.sin: (169x256xf32) <- (169x256xf32) + sin_0 = paddle._C_ops.sin(matmul_0) + + # pd_op.cos: (169x256xf32) <- (169x256xf32) + cos_0 = paddle._C_ops.cos(matmul_0) + del matmul_0 + + # pd_op.sin: (169x256xf32) <- (169x256xf32) + sin_1 = paddle._C_ops.sin(matmul_1) + + # pd_op.cos: (169x256xf32) <- (169x256xf32) + cos_1 = paddle._C_ops.cos(matmul_1) + del matmul_1 + + # builtin.combine: ([169x256xf32, 169x256xf32, 169x256xf32, 169x256xf32]) <- (169x256xf32, 169x256xf32, 169x256xf32, 169x256xf32) + combine_5 = [sin_0, cos_0, sin_1, cos_1] + del cos_0, cos_1, sin_0, sin_1 + + # pd_op.concat: (169x1024xf32) <- ([169x256xf32, 169x256xf32, 169x256xf32, 169x256xf32], 1xi32) + concat_4 = paddle._C_ops.concat(combine_5, full_0) + del combine_5 + + # pd_op.unsqueeze: (1x169x1024xf32) <- (169x1024xf32, 1xi64) + unsqueeze_3 = paddle._C_ops.unsqueeze(concat_4, full_int_array_3) + del concat_4 + + # pd_op.add: (2x169x1024xf32) <- (2x169x1024xf32, 1x169x1024xf32) + add_40 = paddle._C_ops.add(transpose_0, unsqueeze_3) + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_4 = [1024] + + # pd_op.assign: (1xi64) <- (1xi64) + assign_24 = full_int_array_4 + + # pd_op.assign: (1xi64) <- (1xi64) + assign_25 = full_int_array_4 + + # pd_op.assign: (1xi64) <- (1xi64) + assign_26 = full_int_array_4 + + # pd_op.assign: (1xi64) <- (1xi64) + assign_27 = full_int_array_4 + + # pd_op.assign: (1xi64) <- (1xi64) + assign_28 = full_int_array_4 + + # pd_op.assign: (1xi64) <- (1xi64) + assign_29 = full_int_array_4 + + # pd_op.assign: (1xi64) <- (1xi64) + assign_30 = full_int_array_4 + + # pd_op.assign: (1xi64) <- (1xi64) + assign_31 = full_int_array_4 + + # pd_op.assign: (1xi64) <- (1xi64) + assign_32 = full_int_array_4 + + # pd_op.assign: (1xi64) <- (1xi64) + assign_33 = full_int_array_4 + + # pd_op.assign: (1xi64) <- (1xi64) + assign_34 = full_int_array_4 + + # pd_op.assign: (1xi64) <- (1xi64) + assign_35 = full_int_array_4 + + # pd_op.assign: (1xi64) <- (1xi64) + assign_36 = full_int_array_4 + + # pd_op.assign: (1xi64) <- (1xi64) + assign_37 = full_int_array_4 + + # pd_op.assign: (1xi64) <- (1xi64) + assign_38 = full_int_array_4 + + # pd_op.slice: (1024x1024xf32) <- (1024x3072xf32, 1xi64, 1xi64) + slice_0 = paddle._C_ops.slice( + data_18, [1], full_int_array_3, full_int_array_4, [1], [] + ) + + # pd_op.slice: (1024xf32) <- (3072xf32, 1xi64, 1xi64) + slice_1 = paddle._C_ops.slice( + data_19, [0], full_int_array_3, full_int_array_4, [1], [] + ) + + # pd_op.matmul: (2x169x1024xf32) <- (2x169x1024xf32, 1024x1024xf32) + matmul_2 = paddle._C_ops.matmul(add_40, slice_0, False, False) + + # pd_op.add: (2x169x1024xf32) <- (2x169x1024xf32, 1024xf32) + add_41 = paddle._C_ops.add(matmul_2, slice_1) + + # pd_op.full_int_array: (4xi64) <- () + full_int_array_5 = [0, 0, 4, 256] + + # pd_op.reshape: (2x169x4x256xf32) <- (2x169x1024xf32, 4xi64) + reshape_4 = paddle._C_ops.reshape(add_41, full_int_array_5) + + # pd_op.transpose: (2x4x169x256xf32) <- (2x169x4x256xf32) + transpose_1 = paddle._C_ops.transpose(reshape_4, [0, 2, 1, 3]) + del reshape_4 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_6 = [2048] + + # pd_op.assign: (1xi64) <- (1xi64) + assign_39 = full_int_array_6 + + # pd_op.assign: (1xi64) <- (1xi64) + assign_40 = full_int_array_6 + + # pd_op.assign: (1xi64) <- (1xi64) + assign_41 = full_int_array_6 + + # pd_op.assign: (1xi64) <- (1xi64) + assign_42 = full_int_array_6 + + # pd_op.assign: (1xi64) <- (1xi64) + assign_43 = full_int_array_6 + + # pd_op.assign: (1xi64) <- (1xi64) + assign_44 = full_int_array_6 + + # pd_op.assign: (1xi64) <- (1xi64) + assign_45 = full_int_array_6 + + # pd_op.assign: (1xi64) <- (1xi64) + assign_46 = full_int_array_6 + + # pd_op.assign: (1xi64) <- (1xi64) + assign_47 = full_int_array_6 + + # pd_op.assign: (1xi64) <- (1xi64) + assign_48 = full_int_array_6 + + # pd_op.assign: (1xi64) <- (1xi64) + assign_49 = full_int_array_6 + + # pd_op.assign: (1xi64) <- (1xi64) + assign_50 = full_int_array_6 + + # pd_op.assign: (1xi64) <- (1xi64) + assign_51 = full_int_array_6 + + # pd_op.assign: (1xi64) <- (1xi64) + assign_52 = full_int_array_6 + + # pd_op.assign: (1xi64) <- (1xi64) + assign_53 = full_int_array_6 + + # pd_op.slice: (1024x1024xf32) <- (1024x3072xf32, 1xi64, 1xi64) + slice_2 = paddle._C_ops.slice( + data_18, [1], full_int_array_4, full_int_array_6, [1], [] + ) + + # pd_op.slice: (1024xf32) <- (3072xf32, 1xi64, 1xi64) + slice_3 = paddle._C_ops.slice( + data_19, [0], full_int_array_4, full_int_array_6, [1], [] + ) + + # pd_op.matmul: (2x169x1024xf32) <- (2x169x1024xf32, 1024x1024xf32) + matmul_3 = paddle._C_ops.matmul(add_40, slice_2, False, False) + + # pd_op.add: (2x169x1024xf32) <- (2x169x1024xf32, 1024xf32) + add_42 = paddle._C_ops.add(matmul_3, slice_3) + + # pd_op.reshape: (2x169x4x256xf32) <- (2x169x1024xf32, 4xi64) + reshape_5 = paddle._C_ops.reshape(add_42, full_int_array_5) + + # pd_op.transpose: (2x4x169x256xf32) <- (2x169x4x256xf32) + transpose_2 = paddle._C_ops.transpose(reshape_5, [0, 2, 1, 3]) + del reshape_5 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_7 = [2147483647] + + # pd_op.assign: (1xi64) <- (1xi64) + assign_54 = full_int_array_7 + + # pd_op.assign: (1xi64) <- (1xi64) + assign_55 = full_int_array_7 + + # pd_op.assign: (1xi64) <- (1xi64) + assign_56 = full_int_array_7 + + # pd_op.assign: (1xi64) <- (1xi64) + assign_57 = full_int_array_7 + + # pd_op.assign: (1xi64) <- (1xi64) + assign_58 = full_int_array_7 + + # pd_op.assign: (1xi64) <- (1xi64) + assign_59 = full_int_array_7 + + # pd_op.assign: (1xi64) <- (1xi64) + assign_60 = full_int_array_7 + + # pd_op.slice: (1024x1024xf32) <- (1024x3072xf32, 1xi64, 1xi64) + slice_4 = paddle._C_ops.slice( + data_18, [1], full_int_array_6, full_int_array_7, [1], [] + ) + del data_18 + + # pd_op.slice: (1024xf32) <- (3072xf32, 1xi64, 1xi64) + slice_5 = paddle._C_ops.slice( + data_19, [0], full_int_array_6, full_int_array_7, [1], [] + ) + del data_19 + + # pd_op.matmul: (2x169x1024xf32) <- (2x169x1024xf32, 1024x1024xf32) + matmul_4 = paddle._C_ops.matmul(transpose_0, slice_4, False, False) + + # pd_op.add: (2x169x1024xf32) <- (2x169x1024xf32, 1024xf32) + add_43 = paddle._C_ops.add(matmul_4, slice_5) + + # pd_op.reshape: (2x169x4x256xf32) <- (2x169x1024xf32, 4xi64) + reshape_6 = paddle._C_ops.reshape(add_43, full_int_array_5) + + # pd_op.transpose: (2x4x169x256xf32) <- (2x169x4x256xf32) + transpose_3 = paddle._C_ops.transpose(reshape_6, [0, 2, 1, 3]) + del reshape_6 + + # pd_op.matmul: (2x4x169x169xf32) <- (2x4x169x256xf32, 2x4x169x256xf32) + matmul_5 = paddle._C_ops.matmul(transpose_1, transpose_2, False, True) + + # pd_op.full: (1xf32) <- () + full_8 = paddle._C_ops.full( + [1], float("0.0625"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.assign: (1xf32) <- (1xf32) + assign_61 = full_8 + + # pd_op.assign: (1xf32) <- (1xf32) + assign_62 = full_8 + + # pd_op.assign: (1xf32) <- (1xf32) + assign_63 = full_8 + + # pd_op.scale: (2x4x169x169xf32) <- (2x4x169x169xf32, 1xf32) + scale_1 = paddle._C_ops.scale(matmul_5, full_8, float("0"), True) + del matmul_5 + + # pd_op.softmax: (2x4x169x169xf32) <- (2x4x169x169xf32) + softmax_0 = paddle._C_ops.softmax(scale_1, -1) + del scale_1 + + # pd_op.full: (1xf32) <- () + full_9 = paddle._C_ops.full( + [1], float("0.1"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.assign: (1xf32) <- (1xf32) + assign_64 = full_9 + + # pd_op.assign: (1xf32) <- (1xf32) + assign_65 = full_9 + + # pd_op.assign: (1xf32) <- (1xf32) + assign_66 = full_9 + + # pd_op.assign: (1xf32) <- (1xf32) + assign_67 = full_9 + + # pd_op.assign: (1xf32) <- (1xf32) + assign_68 = full_9 + + # pd_op.assign: (1xf32) <- (1xf32) + assign_69 = full_9 + + # pd_op.assign: (1xf32) <- (1xf32) + assign_70 = full_9 + + # pd_op.assign: (1xf32) <- (1xf32) + assign_71 = full_9 + + # pd_op.assign: (1xf32) <- (1xf32) + assign_72 = full_9 + + # pd_op.assign: (1xf32) <- (1xf32) + assign_73 = full_9 + + # pd_op.assign: (1xf32) <- (1xf32) + assign_74 = full_9 + + # pd_op.assign: (1xf32) <- (1xf32) + assign_75 = full_9 + + # pd_op.assign: (1xf32) <- (1xf32) + assign_76 = full_9 + + # pd_op.assign: (1xf32) <- (1xf32) + assign_77 = full_9 + + # pd_op.assign: (1xf32) <- (1xf32) + assign_78 = full_9 + + # pd_op.dropout: (2x4x169x169xf32, 2x4x169x169xui8) <- (2x4x169x169xf32, None, 1xf32) + dropout_0, dropout_1 = (lambda x, f: f(x))( + paddle._C_ops.dropout( + softmax_0, None, full_9, False, "upscale_in_train", 0, False + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None), + ) + + # pd_op.matmul: (2x4x169x256xf32) <- (2x4x169x169xf32, 2x4x169x256xf32) + matmul_6 = paddle._C_ops.matmul(dropout_0, transpose_3, False, False) + + # pd_op.transpose: (2x169x4x256xf32) <- (2x4x169x256xf32) + transpose_4 = paddle._C_ops.transpose(matmul_6, [0, 2, 1, 3]) + del matmul_6 + + # pd_op.full_int_array: (3xi64) <- () + full_int_array_8 = [0, 0, 1024] + + # pd_op.reshape: (2x169x1024xf32) <- (2x169x4x256xf32, 3xi64) + reshape_7 = paddle._C_ops.reshape(transpose_4, full_int_array_8) + + # pd_op.matmul: (2x169x1024xf32) <- (2x169x1024xf32, 1024x1024xf32) + matmul_7 = paddle._C_ops.matmul(reshape_7, parameter_364, False, False) + del parameter_364 + + # pd_op.add: (2x169x1024xf32) <- (2x169x1024xf32, 1024xf32) + add_44 = paddle._C_ops.add(matmul_7, parameter_363) + del parameter_363 + + # pd_op.dropout: (2x169x1024xf32, 2x169x1024xui8) <- (2x169x1024xf32, None, 1xf32) + dropout_2, dropout_3 = (lambda x, f: f(x))( + paddle._C_ops.dropout( + add_44, None, full_9, False, "upscale_in_train", 0, False + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None), + ) + del add_44 + + # pd_op.add: (2x169x1024xf32) <- (2x169x1024xf32, 2x169x1024xf32) + add_45 = paddle._C_ops.add(transpose_0, dropout_2) + + # pd_op.layer_norm: (2x169x1024xf32, 2x169xf32, 2x169xf32) <- (2x169x1024xf32, 1024xf32, 1024xf32) + layer_norm_0, layer_norm_1, layer_norm_2 = (lambda x, f: f(x))( + paddle._C_ops.layer_norm( + add_45, parameter_362, parameter_361, float("1e-05"), 2 + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None, None), + ) + del parameter_361, parameter_362 + + # pd_op.matmul: (2x169x2048xf32) <- (2x169x1024xf32, 1024x2048xf32) + matmul_8 = paddle._C_ops.matmul(layer_norm_0, parameter_360, False, False) + del parameter_360 + + # pd_op.add: (2x169x2048xf32) <- (2x169x2048xf32, 2048xf32) + add_46 = paddle._C_ops.add(matmul_8, parameter_359) + del parameter_359 + + # pd_op.gelu: (2x169x2048xf32) <- (2x169x2048xf32) + gelu_0 = paddle._C_ops.gelu(add_46, False) + + # pd_op.dropout: (2x169x2048xf32, 2x169x2048xui8) <- (2x169x2048xf32, None, 1xf32) + dropout_4, dropout_5 = (lambda x, f: f(x))( + paddle._C_ops.dropout( + gelu_0, None, full_9, False, "upscale_in_train", 0, False + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None), + ) + del gelu_0 + + # pd_op.matmul: (2x169x1024xf32) <- (2x169x2048xf32, 2048x1024xf32) + matmul_9 = paddle._C_ops.matmul(dropout_4, parameter_358, False, False) + del parameter_358 + + # pd_op.add: (2x169x1024xf32) <- (2x169x1024xf32, 1024xf32) + add_47 = paddle._C_ops.add(matmul_9, parameter_357) + del parameter_357 + + # pd_op.dropout: (2x169x1024xf32, 2x169x1024xui8) <- (2x169x1024xf32, None, 1xf32) + dropout_6, dropout_7 = (lambda x, f: f(x))( + paddle._C_ops.dropout( + add_47, None, full_9, False, "upscale_in_train", 0, False + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None), + ) + del add_47 + + # pd_op.add: (2x169x1024xf32) <- (2x169x1024xf32, 2x169x1024xf32) + add_48 = paddle._C_ops.add(layer_norm_0, dropout_6) + + # pd_op.layer_norm: (2x169x1024xf32, 2x169xf32, 2x169xf32) <- (2x169x1024xf32, 1024xf32, 1024xf32) + layer_norm_3, layer_norm_4, layer_norm_5 = (lambda x, f: f(x))( + paddle._C_ops.layer_norm( + add_48, parameter_356, parameter_355, float("1e-05"), 2 + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None, None), + ) + del parameter_355, parameter_356 + + # pd_op.add: (2x169x1024xf32) <- (2x169x1024xf32, 1x169x1024xf32) + add_49 = paddle._C_ops.add(layer_norm_3, unsqueeze_3) + + # pd_op.slice: (1024x1024xf32) <- (1024x3072xf32, 1xi64, 1xi64) + slice_6 = paddle._C_ops.slice( + data_20, [1], full_int_array_3, full_int_array_4, [1], [] + ) + + # pd_op.slice: (1024xf32) <- (3072xf32, 1xi64, 1xi64) + slice_7 = paddle._C_ops.slice( + data_21, [0], full_int_array_3, full_int_array_4, [1], [] + ) + + # pd_op.matmul: (2x169x1024xf32) <- (2x169x1024xf32, 1024x1024xf32) + matmul_10 = paddle._C_ops.matmul(add_49, slice_6, False, False) + + # pd_op.add: (2x169x1024xf32) <- (2x169x1024xf32, 1024xf32) + add_50 = paddle._C_ops.add(matmul_10, slice_7) + + # pd_op.reshape: (2x169x4x256xf32) <- (2x169x1024xf32, 4xi64) + reshape_8 = paddle._C_ops.reshape(add_50, full_int_array_5) + + # pd_op.transpose: (2x4x169x256xf32) <- (2x169x4x256xf32) + transpose_5 = paddle._C_ops.transpose(reshape_8, [0, 2, 1, 3]) + del reshape_8 + + # pd_op.slice: (1024x1024xf32) <- (1024x3072xf32, 1xi64, 1xi64) + slice_8 = paddle._C_ops.slice( + data_20, [1], full_int_array_4, full_int_array_6, [1], [] + ) + + # pd_op.slice: (1024xf32) <- (3072xf32, 1xi64, 1xi64) + slice_9 = paddle._C_ops.slice( + data_21, [0], full_int_array_4, full_int_array_6, [1], [] + ) + + # pd_op.matmul: (2x169x1024xf32) <- (2x169x1024xf32, 1024x1024xf32) + matmul_11 = paddle._C_ops.matmul(add_49, slice_8, False, False) + + # pd_op.add: (2x169x1024xf32) <- (2x169x1024xf32, 1024xf32) + add_51 = paddle._C_ops.add(matmul_11, slice_9) + + # pd_op.reshape: (2x169x4x256xf32) <- (2x169x1024xf32, 4xi64) + reshape_9 = paddle._C_ops.reshape(add_51, full_int_array_5) + + # pd_op.transpose: (2x4x169x256xf32) <- (2x169x4x256xf32) + transpose_6 = paddle._C_ops.transpose(reshape_9, [0, 2, 1, 3]) + del reshape_9 + + # pd_op.slice: (1024x1024xf32) <- (1024x3072xf32, 1xi64, 1xi64) + slice_10 = paddle._C_ops.slice( + data_20, [1], full_int_array_6, full_int_array_7, [1], [] + ) + del data_20 + + # pd_op.slice: (1024xf32) <- (3072xf32, 1xi64, 1xi64) + slice_11 = paddle._C_ops.slice( + data_21, [0], full_int_array_6, full_int_array_7, [1], [] + ) + del data_21 + + # pd_op.matmul: (2x169x1024xf32) <- (2x169x1024xf32, 1024x1024xf32) + matmul_12 = paddle._C_ops.matmul(layer_norm_3, slice_10, False, False) + + # pd_op.add: (2x169x1024xf32) <- (2x169x1024xf32, 1024xf32) + add_52 = paddle._C_ops.add(matmul_12, slice_11) + + # pd_op.reshape: (2x169x4x256xf32) <- (2x169x1024xf32, 4xi64) + reshape_10 = paddle._C_ops.reshape(add_52, full_int_array_5) + + # pd_op.transpose: (2x4x169x256xf32) <- (2x169x4x256xf32) + transpose_7 = paddle._C_ops.transpose(reshape_10, [0, 2, 1, 3]) + del reshape_10 + + # pd_op.matmul: (2x4x169x169xf32) <- (2x4x169x256xf32, 2x4x169x256xf32) + matmul_13 = paddle._C_ops.matmul(transpose_5, transpose_6, False, True) + + # pd_op.scale: (2x4x169x169xf32) <- (2x4x169x169xf32, 1xf32) + scale_2 = paddle._C_ops.scale(matmul_13, full_8, float("0"), True) + del matmul_13 + + # pd_op.softmax: (2x4x169x169xf32) <- (2x4x169x169xf32) + softmax_1 = paddle._C_ops.softmax(scale_2, -1) + del scale_2 + + # pd_op.dropout: (2x4x169x169xf32, 2x4x169x169xui8) <- (2x4x169x169xf32, None, 1xf32) + dropout_8, dropout_9 = (lambda x, f: f(x))( + paddle._C_ops.dropout( + softmax_1, None, full_9, False, "upscale_in_train", 0, False + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None), + ) + + # pd_op.matmul: (2x4x169x256xf32) <- (2x4x169x169xf32, 2x4x169x256xf32) + matmul_14 = paddle._C_ops.matmul(dropout_8, transpose_7, False, False) + + # pd_op.transpose: (2x169x4x256xf32) <- (2x4x169x256xf32) + transpose_8 = paddle._C_ops.transpose(matmul_14, [0, 2, 1, 3]) + del matmul_14 + + # pd_op.reshape: (2x169x1024xf32) <- (2x169x4x256xf32, 3xi64) + reshape_11 = paddle._C_ops.reshape(transpose_8, full_int_array_8) + + # pd_op.matmul: (2x169x1024xf32) <- (2x169x1024xf32, 1024x1024xf32) + matmul_15 = paddle._C_ops.matmul(reshape_11, parameter_354, False, False) + del parameter_354 + + # pd_op.add: (2x169x1024xf32) <- (2x169x1024xf32, 1024xf32) + add_53 = paddle._C_ops.add(matmul_15, parameter_353) + del parameter_353 + + # pd_op.dropout: (2x169x1024xf32, 2x169x1024xui8) <- (2x169x1024xf32, None, 1xf32) + dropout_10, dropout_11 = (lambda x, f: f(x))( + paddle._C_ops.dropout( + add_53, None, full_9, False, "upscale_in_train", 0, False + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None), + ) + del add_53 + + # pd_op.add: (2x169x1024xf32) <- (2x169x1024xf32, 2x169x1024xf32) + add_54 = paddle._C_ops.add(layer_norm_3, dropout_10) + + # pd_op.layer_norm: (2x169x1024xf32, 2x169xf32, 2x169xf32) <- (2x169x1024xf32, 1024xf32, 1024xf32) + layer_norm_6, layer_norm_7, layer_norm_8 = (lambda x, f: f(x))( + paddle._C_ops.layer_norm( + add_54, parameter_352, parameter_351, float("1e-05"), 2 + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None, None), + ) + del parameter_351, parameter_352 + + # pd_op.matmul: (2x169x2048xf32) <- (2x169x1024xf32, 1024x2048xf32) + matmul_16 = paddle._C_ops.matmul(layer_norm_6, parameter_350, False, False) + del parameter_350 + + # pd_op.add: (2x169x2048xf32) <- (2x169x2048xf32, 2048xf32) + add_55 = paddle._C_ops.add(matmul_16, parameter_349) + del parameter_349 + + # pd_op.gelu: (2x169x2048xf32) <- (2x169x2048xf32) + gelu_1 = paddle._C_ops.gelu(add_55, False) + + # pd_op.dropout: (2x169x2048xf32, 2x169x2048xui8) <- (2x169x2048xf32, None, 1xf32) + dropout_12, dropout_13 = (lambda x, f: f(x))( + paddle._C_ops.dropout( + gelu_1, None, full_9, False, "upscale_in_train", 0, False + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None), + ) + del gelu_1 + + # pd_op.matmul: (2x169x1024xf32) <- (2x169x2048xf32, 2048x1024xf32) + matmul_17 = paddle._C_ops.matmul(dropout_12, parameter_348, False, False) + del parameter_348 + + # pd_op.add: (2x169x1024xf32) <- (2x169x1024xf32, 1024xf32) + add_56 = paddle._C_ops.add(matmul_17, parameter_347) + del parameter_347 + + # pd_op.dropout: (2x169x1024xf32, 2x169x1024xui8) <- (2x169x1024xf32, None, 1xf32) + dropout_14, dropout_15 = (lambda x, f: f(x))( + paddle._C_ops.dropout( + add_56, None, full_9, False, "upscale_in_train", 0, False + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None), + ) + del add_56 + + # pd_op.add: (2x169x1024xf32) <- (2x169x1024xf32, 2x169x1024xf32) + add_57 = paddle._C_ops.add(layer_norm_6, dropout_14) + + # pd_op.layer_norm: (2x169x1024xf32, 2x169xf32, 2x169xf32) <- (2x169x1024xf32, 1024xf32, 1024xf32) + layer_norm_9, layer_norm_10, layer_norm_11 = (lambda x, f: f(x))( + paddle._C_ops.layer_norm( + add_57, parameter_346, parameter_345, float("1e-05"), 2 + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None, None), + ) + del parameter_345, parameter_346 + + # pd_op.add: (2x169x1024xf32) <- (2x169x1024xf32, 1x169x1024xf32) + add_58 = paddle._C_ops.add(layer_norm_9, unsqueeze_3) + + # pd_op.slice: (1024x1024xf32) <- (1024x3072xf32, 1xi64, 1xi64) + slice_12 = paddle._C_ops.slice( + data_22, [1], full_int_array_3, full_int_array_4, [1], [] + ) + + # pd_op.slice: (1024xf32) <- (3072xf32, 1xi64, 1xi64) + slice_13 = paddle._C_ops.slice( + data_23, [0], full_int_array_3, full_int_array_4, [1], [] + ) + + # pd_op.matmul: (2x169x1024xf32) <- (2x169x1024xf32, 1024x1024xf32) + matmul_18 = paddle._C_ops.matmul(add_58, slice_12, False, False) + + # pd_op.add: (2x169x1024xf32) <- (2x169x1024xf32, 1024xf32) + add_59 = paddle._C_ops.add(matmul_18, slice_13) + + # pd_op.reshape: (2x169x4x256xf32) <- (2x169x1024xf32, 4xi64) + reshape_12 = paddle._C_ops.reshape(add_59, full_int_array_5) + + # pd_op.transpose: (2x4x169x256xf32) <- (2x169x4x256xf32) + transpose_9 = paddle._C_ops.transpose(reshape_12, [0, 2, 1, 3]) + del reshape_12 + + # pd_op.slice: (1024x1024xf32) <- (1024x3072xf32, 1xi64, 1xi64) + slice_14 = paddle._C_ops.slice( + data_22, [1], full_int_array_4, full_int_array_6, [1], [] + ) + + # pd_op.slice: (1024xf32) <- (3072xf32, 1xi64, 1xi64) + slice_15 = paddle._C_ops.slice( + data_23, [0], full_int_array_4, full_int_array_6, [1], [] + ) + + # pd_op.matmul: (2x169x1024xf32) <- (2x169x1024xf32, 1024x1024xf32) + matmul_19 = paddle._C_ops.matmul(add_58, slice_14, False, False) + + # pd_op.add: (2x169x1024xf32) <- (2x169x1024xf32, 1024xf32) + add_60 = paddle._C_ops.add(matmul_19, slice_15) + + # pd_op.reshape: (2x169x4x256xf32) <- (2x169x1024xf32, 4xi64) + reshape_13 = paddle._C_ops.reshape(add_60, full_int_array_5) + + # pd_op.transpose: (2x4x169x256xf32) <- (2x169x4x256xf32) + transpose_10 = paddle._C_ops.transpose(reshape_13, [0, 2, 1, 3]) + del reshape_13 + + # pd_op.slice: (1024x1024xf32) <- (1024x3072xf32, 1xi64, 1xi64) + slice_16 = paddle._C_ops.slice( + data_22, [1], full_int_array_6, full_int_array_7, [1], [] + ) + del data_22 + + # pd_op.slice: (1024xf32) <- (3072xf32, 1xi64, 1xi64) + slice_17 = paddle._C_ops.slice( + data_23, [0], full_int_array_6, full_int_array_7, [1], [] + ) + del data_23 + + # pd_op.matmul: (2x169x1024xf32) <- (2x169x1024xf32, 1024x1024xf32) + matmul_20 = paddle._C_ops.matmul(layer_norm_9, slice_16, False, False) + + # pd_op.add: (2x169x1024xf32) <- (2x169x1024xf32, 1024xf32) + add_61 = paddle._C_ops.add(matmul_20, slice_17) + + # pd_op.reshape: (2x169x4x256xf32) <- (2x169x1024xf32, 4xi64) + reshape_14 = paddle._C_ops.reshape(add_61, full_int_array_5) + + # pd_op.transpose: (2x4x169x256xf32) <- (2x169x4x256xf32) + transpose_11 = paddle._C_ops.transpose(reshape_14, [0, 2, 1, 3]) + del reshape_14 + + # pd_op.matmul: (2x4x169x169xf32) <- (2x4x169x256xf32, 2x4x169x256xf32) + matmul_21 = paddle._C_ops.matmul(transpose_9, transpose_10, False, True) + + # pd_op.scale: (2x4x169x169xf32) <- (2x4x169x169xf32, 1xf32) + scale_3 = paddle._C_ops.scale(matmul_21, full_8, float("0"), True) + del matmul_21 + + # pd_op.softmax: (2x4x169x169xf32) <- (2x4x169x169xf32) + softmax_2 = paddle._C_ops.softmax(scale_3, -1) + del scale_3 + + # pd_op.dropout: (2x4x169x169xf32, 2x4x169x169xui8) <- (2x4x169x169xf32, None, 1xf32) + dropout_16, dropout_17 = (lambda x, f: f(x))( + paddle._C_ops.dropout( + softmax_2, None, full_9, False, "upscale_in_train", 0, False + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None), + ) + + # pd_op.matmul: (2x4x169x256xf32) <- (2x4x169x169xf32, 2x4x169x256xf32) + matmul_22 = paddle._C_ops.matmul(dropout_16, transpose_11, False, False) + + # pd_op.transpose: (2x169x4x256xf32) <- (2x4x169x256xf32) + transpose_12 = paddle._C_ops.transpose(matmul_22, [0, 2, 1, 3]) + del matmul_22 + + # pd_op.reshape: (2x169x1024xf32) <- (2x169x4x256xf32, 3xi64) + reshape_15 = paddle._C_ops.reshape(transpose_12, full_int_array_8) + + # pd_op.matmul: (2x169x1024xf32) <- (2x169x1024xf32, 1024x1024xf32) + matmul_23 = paddle._C_ops.matmul(reshape_15, parameter_344, False, False) + del parameter_344 + + # pd_op.add: (2x169x1024xf32) <- (2x169x1024xf32, 1024xf32) + add_62 = paddle._C_ops.add(matmul_23, parameter_343) + del parameter_343 + + # pd_op.dropout: (2x169x1024xf32, 2x169x1024xui8) <- (2x169x1024xf32, None, 1xf32) + dropout_18, dropout_19 = (lambda x, f: f(x))( + paddle._C_ops.dropout( + add_62, None, full_9, False, "upscale_in_train", 0, False + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None), + ) + del add_62 + + # pd_op.add: (2x169x1024xf32) <- (2x169x1024xf32, 2x169x1024xf32) + add_63 = paddle._C_ops.add(layer_norm_9, dropout_18) + + # pd_op.layer_norm: (2x169x1024xf32, 2x169xf32, 2x169xf32) <- (2x169x1024xf32, 1024xf32, 1024xf32) + layer_norm_12, layer_norm_13, layer_norm_14 = (lambda x, f: f(x))( + paddle._C_ops.layer_norm( + add_63, parameter_342, parameter_341, float("1e-05"), 2 + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None, None), + ) + del parameter_341, parameter_342 + + # pd_op.matmul: (2x169x2048xf32) <- (2x169x1024xf32, 1024x2048xf32) + matmul_24 = paddle._C_ops.matmul(layer_norm_12, parameter_340, False, False) + del parameter_340 + + # pd_op.add: (2x169x2048xf32) <- (2x169x2048xf32, 2048xf32) + add_64 = paddle._C_ops.add(matmul_24, parameter_339) + del parameter_339 + + # pd_op.gelu: (2x169x2048xf32) <- (2x169x2048xf32) + gelu_2 = paddle._C_ops.gelu(add_64, False) + + # pd_op.dropout: (2x169x2048xf32, 2x169x2048xui8) <- (2x169x2048xf32, None, 1xf32) + dropout_20, dropout_21 = (lambda x, f: f(x))( + paddle._C_ops.dropout( + gelu_2, None, full_9, False, "upscale_in_train", 0, False + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None), + ) + del gelu_2 + + # pd_op.matmul: (2x169x1024xf32) <- (2x169x2048xf32, 2048x1024xf32) + matmul_25 = paddle._C_ops.matmul(dropout_20, parameter_338, False, False) + del parameter_338 + + # pd_op.add: (2x169x1024xf32) <- (2x169x1024xf32, 1024xf32) + add_65 = paddle._C_ops.add(matmul_25, parameter_337) + del parameter_337 + + # pd_op.dropout: (2x169x1024xf32, 2x169x1024xui8) <- (2x169x1024xf32, None, 1xf32) + dropout_22, dropout_23 = (lambda x, f: f(x))( + paddle._C_ops.dropout( + add_65, None, full_9, False, "upscale_in_train", 0, False + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None), + ) + del add_65 + + # pd_op.add: (2x169x1024xf32) <- (2x169x1024xf32, 2x169x1024xf32) + add_66 = paddle._C_ops.add(layer_norm_12, dropout_22) + + # pd_op.layer_norm: (2x169x1024xf32, 2x169xf32, 2x169xf32) <- (2x169x1024xf32, 1024xf32, 1024xf32) + layer_norm_15, layer_norm_16, layer_norm_17 = (lambda x, f: f(x))( + paddle._C_ops.layer_norm( + add_66, parameter_336, parameter_335, float("1e-05"), 2 + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None, None), + ) + del parameter_335, parameter_336 + + # pd_op.add: (2x169x1024xf32) <- (2x169x1024xf32, 1x169x1024xf32) + add_67 = paddle._C_ops.add(layer_norm_15, unsqueeze_3) + + # pd_op.slice: (1024x1024xf32) <- (1024x3072xf32, 1xi64, 1xi64) + slice_18 = paddle._C_ops.slice( + data_24, [1], full_int_array_3, full_int_array_4, [1], [] + ) + + # pd_op.slice: (1024xf32) <- (3072xf32, 1xi64, 1xi64) + slice_19 = paddle._C_ops.slice( + data_25, [0], full_int_array_3, full_int_array_4, [1], [] + ) + del full_int_array_3 + + # pd_op.matmul: (2x169x1024xf32) <- (2x169x1024xf32, 1024x1024xf32) + matmul_26 = paddle._C_ops.matmul(add_67, slice_18, False, False) + + # pd_op.add: (2x169x1024xf32) <- (2x169x1024xf32, 1024xf32) + add_68 = paddle._C_ops.add(matmul_26, slice_19) + + # pd_op.reshape: (2x169x4x256xf32) <- (2x169x1024xf32, 4xi64) + reshape_16 = paddle._C_ops.reshape(add_68, full_int_array_5) + + # pd_op.transpose: (2x4x169x256xf32) <- (2x169x4x256xf32) + transpose_13 = paddle._C_ops.transpose(reshape_16, [0, 2, 1, 3]) + del reshape_16 + + # pd_op.slice: (1024x1024xf32) <- (1024x3072xf32, 1xi64, 1xi64) + slice_20 = paddle._C_ops.slice( + data_24, [1], full_int_array_4, full_int_array_6, [1], [] + ) + + # pd_op.slice: (1024xf32) <- (3072xf32, 1xi64, 1xi64) + slice_21 = paddle._C_ops.slice( + data_25, [0], full_int_array_4, full_int_array_6, [1], [] + ) + + # pd_op.matmul: (2x169x1024xf32) <- (2x169x1024xf32, 1024x1024xf32) + matmul_27 = paddle._C_ops.matmul(add_67, slice_20, False, False) + + # pd_op.add: (2x169x1024xf32) <- (2x169x1024xf32, 1024xf32) + add_69 = paddle._C_ops.add(matmul_27, slice_21) + + # pd_op.reshape: (2x169x4x256xf32) <- (2x169x1024xf32, 4xi64) + reshape_17 = paddle._C_ops.reshape(add_69, full_int_array_5) + + # pd_op.transpose: (2x4x169x256xf32) <- (2x169x4x256xf32) + transpose_14 = paddle._C_ops.transpose(reshape_17, [0, 2, 1, 3]) + del reshape_17 + + # pd_op.slice: (1024x1024xf32) <- (1024x3072xf32, 1xi64, 1xi64) + slice_22 = paddle._C_ops.slice( + data_24, [1], full_int_array_6, full_int_array_7, [1], [] + ) + del data_24 + + # pd_op.slice: (1024xf32) <- (3072xf32, 1xi64, 1xi64) + slice_23 = paddle._C_ops.slice( + data_25, [0], full_int_array_6, full_int_array_7, [1], [] + ) + del data_25 + + # pd_op.matmul: (2x169x1024xf32) <- (2x169x1024xf32, 1024x1024xf32) + matmul_28 = paddle._C_ops.matmul(layer_norm_15, slice_22, False, False) + + # pd_op.add: (2x169x1024xf32) <- (2x169x1024xf32, 1024xf32) + add_70 = paddle._C_ops.add(matmul_28, slice_23) + + # pd_op.reshape: (2x169x4x256xf32) <- (2x169x1024xf32, 4xi64) + reshape_18 = paddle._C_ops.reshape(add_70, full_int_array_5) + del full_int_array_5 + + # pd_op.transpose: (2x4x169x256xf32) <- (2x169x4x256xf32) + transpose_15 = paddle._C_ops.transpose(reshape_18, [0, 2, 1, 3]) + del reshape_18 + + # pd_op.matmul: (2x4x169x169xf32) <- (2x4x169x256xf32, 2x4x169x256xf32) + matmul_29 = paddle._C_ops.matmul(transpose_13, transpose_14, False, True) + + # pd_op.scale: (2x4x169x169xf32) <- (2x4x169x169xf32, 1xf32) + scale_4 = paddle._C_ops.scale(matmul_29, full_8, float("0"), True) + del matmul_29 + + # pd_op.softmax: (2x4x169x169xf32) <- (2x4x169x169xf32) + softmax_3 = paddle._C_ops.softmax(scale_4, -1) + del scale_4 + + # pd_op.dropout: (2x4x169x169xf32, 2x4x169x169xui8) <- (2x4x169x169xf32, None, 1xf32) + dropout_24, dropout_25 = (lambda x, f: f(x))( + paddle._C_ops.dropout( + softmax_3, None, full_9, False, "upscale_in_train", 0, False + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None), + ) + + # pd_op.matmul: (2x4x169x256xf32) <- (2x4x169x169xf32, 2x4x169x256xf32) + matmul_30 = paddle._C_ops.matmul(dropout_24, transpose_15, False, False) + + # pd_op.transpose: (2x169x4x256xf32) <- (2x4x169x256xf32) + transpose_16 = paddle._C_ops.transpose(matmul_30, [0, 2, 1, 3]) + del matmul_30 + + # pd_op.reshape: (2x169x1024xf32) <- (2x169x4x256xf32, 3xi64) + reshape_19 = paddle._C_ops.reshape(transpose_16, full_int_array_8) + del full_int_array_8 + + # pd_op.matmul: (2x169x1024xf32) <- (2x169x1024xf32, 1024x1024xf32) + matmul_31 = paddle._C_ops.matmul(reshape_19, parameter_334, False, False) + del parameter_334 + + # pd_op.add: (2x169x1024xf32) <- (2x169x1024xf32, 1024xf32) + add_71 = paddle._C_ops.add(matmul_31, parameter_333) + del parameter_333 + + # pd_op.dropout: (2x169x1024xf32, 2x169x1024xui8) <- (2x169x1024xf32, None, 1xf32) + dropout_26, dropout_27 = (lambda x, f: f(x))( + paddle._C_ops.dropout( + add_71, None, full_9, False, "upscale_in_train", 0, False + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None), + ) + del add_71 + + # pd_op.add: (2x169x1024xf32) <- (2x169x1024xf32, 2x169x1024xf32) + add_72 = paddle._C_ops.add(layer_norm_15, dropout_26) + + # pd_op.layer_norm: (2x169x1024xf32, 2x169xf32, 2x169xf32) <- (2x169x1024xf32, 1024xf32, 1024xf32) + layer_norm_18, layer_norm_19, layer_norm_20 = (lambda x, f: f(x))( + paddle._C_ops.layer_norm( + add_72, parameter_332, parameter_331, float("1e-05"), 2 + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None, None), + ) + del parameter_331, parameter_332 + + # pd_op.matmul: (2x169x2048xf32) <- (2x169x1024xf32, 1024x2048xf32) + matmul_32 = paddle._C_ops.matmul(layer_norm_18, parameter_330, False, False) + del parameter_330 + + # pd_op.add: (2x169x2048xf32) <- (2x169x2048xf32, 2048xf32) + add_73 = paddle._C_ops.add(matmul_32, parameter_329) + del parameter_329 + + # pd_op.gelu: (2x169x2048xf32) <- (2x169x2048xf32) + gelu_3 = paddle._C_ops.gelu(add_73, False) + + # pd_op.dropout: (2x169x2048xf32, 2x169x2048xui8) <- (2x169x2048xf32, None, 1xf32) + dropout_28, dropout_29 = (lambda x, f: f(x))( + paddle._C_ops.dropout( + gelu_3, None, full_9, False, "upscale_in_train", 0, False + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None), + ) + del gelu_3 + + # pd_op.matmul: (2x169x1024xf32) <- (2x169x2048xf32, 2048x1024xf32) + matmul_33 = paddle._C_ops.matmul(dropout_28, parameter_328, False, False) + del parameter_328 + + # pd_op.add: (2x169x1024xf32) <- (2x169x1024xf32, 1024xf32) + add_74 = paddle._C_ops.add(matmul_33, parameter_327) + del parameter_327 + + # pd_op.dropout: (2x169x1024xf32, 2x169x1024xui8) <- (2x169x1024xf32, None, 1xf32) + dropout_30, dropout_31 = (lambda x, f: f(x))( + paddle._C_ops.dropout( + add_74, None, full_9, False, "upscale_in_train", 0, False + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None), + ) + del add_74 + + # pd_op.add: (2x169x1024xf32) <- (2x169x1024xf32, 2x169x1024xf32) + add_75 = paddle._C_ops.add(layer_norm_18, dropout_30) + + # pd_op.layer_norm: (2x169x1024xf32, 2x169xf32, 2x169xf32) <- (2x169x1024xf32, 1024xf32, 1024xf32) + layer_norm_21, layer_norm_22, layer_norm_23 = (lambda x, f: f(x))( + paddle._C_ops.layer_norm( + add_75, parameter_326, parameter_325, float("1e-05"), 2 + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None, None), + ) + del parameter_325, parameter_326 + + # pd_op.transpose: (2x1024x169xf32) <- (2x169x1024xf32) + transpose_17 = paddle._C_ops.transpose(layer_norm_21, [0, 2, 1]) + del layer_norm_21 + + # pd_op.full_int_array: (4xi64) <- () + full_int_array_9 = [2, 1024, 13, 13] + + # pd_op.reshape: (2x1024x13x13xf32) <- (2x1024x169xf32, 4xi64) + reshape_20 = paddle._C_ops.reshape(transpose_17, full_int_array_9) + del full_int_array_9 + + # pd_op.conv2d: (2x384x13x13xf32) <- (2x1024x13x13xf32, 384x1024x1x1xf32) + conv2d_77 = paddle._C_ops.conv2d( + reshape_20, parameter_324, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_324 + + # pd_op.batch_norm_: (2x384x13x13xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x13x13xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__438, + batch_norm__439, + batch_norm__440, + batch_norm__441, + batch_norm__442, + batch_norm__443, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_77, + parameter_323, + parameter_322, + parameter_321, + parameter_320, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_320, parameter_321, parameter_322, parameter_323 + + # pd_op.swish: (2x384x13x13xf32) <- (2x384x13x13xf32) + swish_56 = paddle._C_ops.swish(batch_norm__438) + + # pd_op.conv2d: (2x384x13x13xf32) <- (2x1024x13x13xf32, 384x1024x1x1xf32) + conv2d_78 = paddle._C_ops.conv2d( + reshape_20, parameter_319, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_319 + + # pd_op.batch_norm_: (2x384x13x13xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x13x13xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__444, + batch_norm__445, + batch_norm__446, + batch_norm__447, + batch_norm__448, + batch_norm__449, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_78, + parameter_318, + parameter_317, + parameter_316, + parameter_315, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_315, parameter_316, parameter_317, parameter_318 + + # pd_op.swish: (2x384x13x13xf32) <- (2x384x13x13xf32) + swish_57 = paddle._C_ops.swish(batch_norm__444) + + # pd_op.conv2d: (2x384x13x13xf32) <- (2x384x13x13xf32, 384x384x3x3xf32) + conv2d_79 = paddle._C_ops.conv2d( + swish_57, parameter_314, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_314 + + # pd_op.batch_norm_: (2x384x13x13xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x13x13xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__450, + batch_norm__451, + batch_norm__452, + batch_norm__453, + batch_norm__454, + batch_norm__455, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_79, + parameter_313, + parameter_312, + parameter_311, + parameter_310, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_310, parameter_311, parameter_312, parameter_313 + + # pd_op.swish: (2x384x13x13xf32) <- (2x384x13x13xf32) + swish_58 = paddle._C_ops.swish(batch_norm__450) + + # pd_op.conv2d: (2x384x13x13xf32) <- (2x384x13x13xf32, 384x384x3x3xf32) + conv2d_80 = paddle._C_ops.conv2d( + swish_58, parameter_309, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_309 + + # pd_op.batch_norm_: (2x384x13x13xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x13x13xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__456, + batch_norm__457, + batch_norm__458, + batch_norm__459, + batch_norm__460, + batch_norm__461, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_80, + parameter_308, + parameter_307, + parameter_306, + parameter_305, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_305, parameter_306, parameter_307, parameter_308 + + # pd_op.conv2d: (2x384x13x13xf32) <- (2x384x13x13xf32, 384x384x1x1xf32) + conv2d_81 = paddle._C_ops.conv2d( + swish_58, parameter_304, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_304 + + # pd_op.batch_norm_: (2x384x13x13xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x13x13xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__462, + batch_norm__463, + batch_norm__464, + batch_norm__465, + batch_norm__466, + batch_norm__467, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_81, + parameter_303, + parameter_302, + parameter_301, + parameter_300, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_300, parameter_301, parameter_302, parameter_303 + + # pd_op.add: (2x384x13x13xf32) <- (2x384x13x13xf32, 2x384x13x13xf32) + add_76 = paddle._C_ops.add(batch_norm__456, batch_norm__462) + + # pd_op.swish: (2x384x13x13xf32) <- (2x384x13x13xf32) + swish_59 = paddle._C_ops.swish(add_76) + + # pd_op.conv2d: (2x384x13x13xf32) <- (2x384x13x13xf32, 384x384x3x3xf32) + conv2d_82 = paddle._C_ops.conv2d( + swish_59, parameter_299, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_299 + + # pd_op.batch_norm_: (2x384x13x13xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x13x13xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__468, + batch_norm__469, + batch_norm__470, + batch_norm__471, + batch_norm__472, + batch_norm__473, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_82, + parameter_298, + parameter_297, + parameter_296, + parameter_295, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_295, parameter_296, parameter_297, parameter_298 + + # pd_op.swish: (2x384x13x13xf32) <- (2x384x13x13xf32) + swish_60 = paddle._C_ops.swish(batch_norm__468) + + # pd_op.conv2d: (2x384x13x13xf32) <- (2x384x13x13xf32, 384x384x3x3xf32) + conv2d_83 = paddle._C_ops.conv2d( + swish_60, parameter_294, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_294 + + # pd_op.batch_norm_: (2x384x13x13xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x13x13xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__474, + batch_norm__475, + batch_norm__476, + batch_norm__477, + batch_norm__478, + batch_norm__479, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_83, + parameter_293, + parameter_292, + parameter_291, + parameter_290, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_290, parameter_291, parameter_292, parameter_293 + + # pd_op.conv2d: (2x384x13x13xf32) <- (2x384x13x13xf32, 384x384x1x1xf32) + conv2d_84 = paddle._C_ops.conv2d( + swish_60, parameter_289, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_289 + + # pd_op.batch_norm_: (2x384x13x13xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x13x13xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__480, + batch_norm__481, + batch_norm__482, + batch_norm__483, + batch_norm__484, + batch_norm__485, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_84, + parameter_288, + parameter_287, + parameter_286, + parameter_285, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_285, parameter_286, parameter_287, parameter_288 + + # pd_op.add: (2x384x13x13xf32) <- (2x384x13x13xf32, 2x384x13x13xf32) + add_77 = paddle._C_ops.add(batch_norm__474, batch_norm__480) + + # pd_op.swish: (2x384x13x13xf32) <- (2x384x13x13xf32) + swish_61 = paddle._C_ops.swish(add_77) + + # pd_op.full_int_array: (2xi64) <- () + full_int_array_10 = [5, 5] + + # pd_op.pool2d: (2x384x13x13xf32) <- (2x384x13x13xf32, 2xi64) + pool2d_0 = paddle._C_ops.pool2d( + swish_61, + full_int_array_10, + [1, 1], + [2, 2], + False, + True, + "NCHW", + "max", + False, + False, + "EXPLICIT", + ) + + # pd_op.full_int_array: (2xi64) <- () + full_int_array_11 = [9, 9] + + # pd_op.pool2d: (2x384x13x13xf32) <- (2x384x13x13xf32, 2xi64) + pool2d_1 = paddle._C_ops.pool2d( + swish_61, + full_int_array_11, + [1, 1], + [4, 4], + False, + True, + "NCHW", + "max", + False, + False, + "EXPLICIT", + ) + + # pd_op.full_int_array: (2xi64) <- () + full_int_array_12 = [13, 13] + + # pd_op.pool2d: (2x384x13x13xf32) <- (2x384x13x13xf32, 2xi64) + pool2d_2 = paddle._C_ops.pool2d( + swish_61, + full_int_array_12, + [1, 1], + [6, 6], + False, + True, + "NCHW", + "max", + False, + False, + "EXPLICIT", + ) + + # builtin.combine: ([2x384x13x13xf32, 2x384x13x13xf32, 2x384x13x13xf32, 2x384x13x13xf32]) <- (2x384x13x13xf32, 2x384x13x13xf32, 2x384x13x13xf32, 2x384x13x13xf32) + combine_6 = [swish_61, pool2d_0, pool2d_1, pool2d_2] + + # pd_op.concat: (2x1536x13x13xf32) <- ([2x384x13x13xf32, 2x384x13x13xf32, 2x384x13x13xf32, 2x384x13x13xf32], 1xi32) + concat_5 = paddle._C_ops.concat(combine_6, full_0) + del combine_6 + + # pd_op.conv2d: (2x384x13x13xf32) <- (2x1536x13x13xf32, 384x1536x1x1xf32) + conv2d_85 = paddle._C_ops.conv2d( + concat_5, parameter_284, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_284 + + # pd_op.batch_norm_: (2x384x13x13xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x13x13xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__486, + batch_norm__487, + batch_norm__488, + batch_norm__489, + batch_norm__490, + batch_norm__491, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_85, + parameter_283, + parameter_282, + parameter_281, + parameter_280, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_280, parameter_281, parameter_282, parameter_283 + + # pd_op.swish: (2x384x13x13xf32) <- (2x384x13x13xf32) + swish_62 = paddle._C_ops.swish(batch_norm__486) + + # pd_op.conv2d: (2x384x13x13xf32) <- (2x384x13x13xf32, 384x384x3x3xf32) + conv2d_86 = paddle._C_ops.conv2d( + swish_62, parameter_279, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_279 + + # pd_op.batch_norm_: (2x384x13x13xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x13x13xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__492, + batch_norm__493, + batch_norm__494, + batch_norm__495, + batch_norm__496, + batch_norm__497, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_86, + parameter_278, + parameter_277, + parameter_276, + parameter_275, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_275, parameter_276, parameter_277, parameter_278 + + # pd_op.swish: (2x384x13x13xf32) <- (2x384x13x13xf32) + swish_63 = paddle._C_ops.swish(batch_norm__492) + + # pd_op.conv2d: (2x384x13x13xf32) <- (2x384x13x13xf32, 384x384x3x3xf32) + conv2d_87 = paddle._C_ops.conv2d( + swish_63, parameter_274, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_274 + + # pd_op.batch_norm_: (2x384x13x13xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x13x13xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__498, + batch_norm__499, + batch_norm__500, + batch_norm__501, + batch_norm__502, + batch_norm__503, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_87, + parameter_273, + parameter_272, + parameter_271, + parameter_270, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_270, parameter_271, parameter_272, parameter_273 + + # pd_op.conv2d: (2x384x13x13xf32) <- (2x384x13x13xf32, 384x384x1x1xf32) + conv2d_88 = paddle._C_ops.conv2d( + swish_63, parameter_269, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_269 + + # pd_op.batch_norm_: (2x384x13x13xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x13x13xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__504, + batch_norm__505, + batch_norm__506, + batch_norm__507, + batch_norm__508, + batch_norm__509, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_88, + parameter_268, + parameter_267, + parameter_266, + parameter_265, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_265, parameter_266, parameter_267, parameter_268 + + # pd_op.add: (2x384x13x13xf32) <- (2x384x13x13xf32, 2x384x13x13xf32) + add_78 = paddle._C_ops.add(batch_norm__498, batch_norm__504) + + # pd_op.swish: (2x384x13x13xf32) <- (2x384x13x13xf32) + swish_64 = paddle._C_ops.swish(add_78) + + # builtin.combine: ([2x384x13x13xf32, 2x384x13x13xf32]) <- (2x384x13x13xf32, 2x384x13x13xf32) + combine_7 = [swish_56, swish_64] + + # pd_op.concat: (2x768x13x13xf32) <- ([2x384x13x13xf32, 2x384x13x13xf32], 1xi32) + concat_6 = paddle._C_ops.concat(combine_7, full_0) + del combine_7 + + # pd_op.conv2d: (2x768x13x13xf32) <- (2x768x13x13xf32, 768x768x1x1xf32) + conv2d_89 = paddle._C_ops.conv2d( + concat_6, parameter_264, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_264 + + # pd_op.batch_norm_: (2x768x13x13xf32, 768xf32, 768xf32, 768xf32, 768xf32, -1xui8) <- (2x768x13x13xf32, 768xf32, 768xf32, 768xf32, 768xf32) + ( + batch_norm__510, + batch_norm__511, + batch_norm__512, + batch_norm__513, + batch_norm__514, + batch_norm__515, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_89, + parameter_263, + parameter_262, + parameter_261, + parameter_260, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_260, parameter_261, parameter_262, parameter_263 + + # pd_op.swish: (2x768x13x13xf32) <- (2x768x13x13xf32) + swish_65 = paddle._C_ops.swish(batch_norm__510) + + # pd_op.conv2d: (2x384x13x13xf32) <- (2x768x13x13xf32, 384x768x1x1xf32) + conv2d_90 = paddle._C_ops.conv2d( + swish_65, parameter_259, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_259 + + # pd_op.batch_norm_: (2x384x13x13xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x13x13xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__516, + batch_norm__517, + batch_norm__518, + batch_norm__519, + batch_norm__520, + batch_norm__521, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_90, + parameter_258, + parameter_257, + parameter_256, + parameter_255, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_255, parameter_256, parameter_257, parameter_258 + + # pd_op.swish: (2x384x13x13xf32) <- (2x384x13x13xf32) + swish_66 = paddle._C_ops.swish(batch_norm__516) + + # pd_op.nearest_interp: (2x384x26x26xf32) <- (2x384x13x13xf32, None, None, None) + nearest_interp_0 = paddle._C_ops.nearest_interp( + swish_66, + None, + None, + None, + "NCHW", + -1, + -1, + -1, + [float("2"), float("2")], + "nearest", + False, + 0, + ) + + # builtin.combine: ([2x384x26x26xf32, 2x512x26x26xf32]) <- (2x384x26x26xf32, 2x512x26x26xf32) + combine_8 = [nearest_interp_0, swish_45] + + # pd_op.concat: (2x896x26x26xf32) <- ([2x384x26x26xf32, 2x512x26x26xf32], 1xi32) + concat_7 = paddle._C_ops.concat(combine_8, full_0) + del combine_8 + + # pd_op.conv2d: (2x192x26x26xf32) <- (2x896x26x26xf32, 192x896x1x1xf32) + conv2d_91 = paddle._C_ops.conv2d( + concat_7, parameter_254, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_254 + + # pd_op.batch_norm_: (2x192x26x26xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x26x26xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__522, + batch_norm__523, + batch_norm__524, + batch_norm__525, + batch_norm__526, + batch_norm__527, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_91, + parameter_253, + parameter_252, + parameter_251, + parameter_250, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_250, parameter_251, parameter_252, parameter_253 + + # pd_op.swish: (2x192x26x26xf32) <- (2x192x26x26xf32) + swish_67 = paddle._C_ops.swish(batch_norm__522) + + # pd_op.conv2d: (2x192x26x26xf32) <- (2x896x26x26xf32, 192x896x1x1xf32) + conv2d_92 = paddle._C_ops.conv2d( + concat_7, parameter_249, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_249 + + # pd_op.batch_norm_: (2x192x26x26xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x26x26xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__528, + batch_norm__529, + batch_norm__530, + batch_norm__531, + batch_norm__532, + batch_norm__533, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_92, + parameter_248, + parameter_247, + parameter_246, + parameter_245, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_245, parameter_246, parameter_247, parameter_248 + + # pd_op.swish: (2x192x26x26xf32) <- (2x192x26x26xf32) + swish_68 = paddle._C_ops.swish(batch_norm__528) + + # pd_op.conv2d: (2x192x26x26xf32) <- (2x192x26x26xf32, 192x192x3x3xf32) + conv2d_93 = paddle._C_ops.conv2d( + swish_68, parameter_244, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_244 + + # pd_op.batch_norm_: (2x192x26x26xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x26x26xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__534, + batch_norm__535, + batch_norm__536, + batch_norm__537, + batch_norm__538, + batch_norm__539, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_93, + parameter_243, + parameter_242, + parameter_241, + parameter_240, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_240, parameter_241, parameter_242, parameter_243 + + # pd_op.swish: (2x192x26x26xf32) <- (2x192x26x26xf32) + swish_69 = paddle._C_ops.swish(batch_norm__534) + + # pd_op.conv2d: (2x192x26x26xf32) <- (2x192x26x26xf32, 192x192x3x3xf32) + conv2d_94 = paddle._C_ops.conv2d( + swish_69, parameter_239, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_239 + + # pd_op.batch_norm_: (2x192x26x26xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x26x26xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__540, + batch_norm__541, + batch_norm__542, + batch_norm__543, + batch_norm__544, + batch_norm__545, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_94, + parameter_238, + parameter_237, + parameter_236, + parameter_235, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_235, parameter_236, parameter_237, parameter_238 + + # pd_op.conv2d: (2x192x26x26xf32) <- (2x192x26x26xf32, 192x192x1x1xf32) + conv2d_95 = paddle._C_ops.conv2d( + swish_69, parameter_234, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_234 + + # pd_op.batch_norm_: (2x192x26x26xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x26x26xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__546, + batch_norm__547, + batch_norm__548, + batch_norm__549, + batch_norm__550, + batch_norm__551, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_95, + parameter_233, + parameter_232, + parameter_231, + parameter_230, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_230, parameter_231, parameter_232, parameter_233 + + # pd_op.add: (2x192x26x26xf32) <- (2x192x26x26xf32, 2x192x26x26xf32) + add_79 = paddle._C_ops.add(batch_norm__540, batch_norm__546) + + # pd_op.swish: (2x192x26x26xf32) <- (2x192x26x26xf32) + swish_70 = paddle._C_ops.swish(add_79) + + # pd_op.conv2d: (2x192x26x26xf32) <- (2x192x26x26xf32, 192x192x3x3xf32) + conv2d_96 = paddle._C_ops.conv2d( + swish_70, parameter_229, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_229 + + # pd_op.batch_norm_: (2x192x26x26xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x26x26xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__552, + batch_norm__553, + batch_norm__554, + batch_norm__555, + batch_norm__556, + batch_norm__557, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_96, + parameter_228, + parameter_227, + parameter_226, + parameter_225, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_225, parameter_226, parameter_227, parameter_228 + + # pd_op.swish: (2x192x26x26xf32) <- (2x192x26x26xf32) + swish_71 = paddle._C_ops.swish(batch_norm__552) + + # pd_op.conv2d: (2x192x26x26xf32) <- (2x192x26x26xf32, 192x192x3x3xf32) + conv2d_97 = paddle._C_ops.conv2d( + swish_71, parameter_224, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_224 + + # pd_op.batch_norm_: (2x192x26x26xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x26x26xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__558, + batch_norm__559, + batch_norm__560, + batch_norm__561, + batch_norm__562, + batch_norm__563, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_97, + parameter_223, + parameter_222, + parameter_221, + parameter_220, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_220, parameter_221, parameter_222, parameter_223 + + # pd_op.conv2d: (2x192x26x26xf32) <- (2x192x26x26xf32, 192x192x1x1xf32) + conv2d_98 = paddle._C_ops.conv2d( + swish_71, parameter_219, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_219 + + # pd_op.batch_norm_: (2x192x26x26xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x26x26xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__564, + batch_norm__565, + batch_norm__566, + batch_norm__567, + batch_norm__568, + batch_norm__569, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_98, + parameter_218, + parameter_217, + parameter_216, + parameter_215, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_215, parameter_216, parameter_217, parameter_218 + + # pd_op.add: (2x192x26x26xf32) <- (2x192x26x26xf32, 2x192x26x26xf32) + add_80 = paddle._C_ops.add(batch_norm__558, batch_norm__564) + + # pd_op.swish: (2x192x26x26xf32) <- (2x192x26x26xf32) + swish_72 = paddle._C_ops.swish(add_80) + + # pd_op.conv2d: (2x192x26x26xf32) <- (2x192x26x26xf32, 192x192x3x3xf32) + conv2d_99 = paddle._C_ops.conv2d( + swish_72, parameter_214, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_214 + + # pd_op.batch_norm_: (2x192x26x26xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x26x26xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__570, + batch_norm__571, + batch_norm__572, + batch_norm__573, + batch_norm__574, + batch_norm__575, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_99, + parameter_213, + parameter_212, + parameter_211, + parameter_210, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_210, parameter_211, parameter_212, parameter_213 + + # pd_op.swish: (2x192x26x26xf32) <- (2x192x26x26xf32) + swish_73 = paddle._C_ops.swish(batch_norm__570) + + # pd_op.conv2d: (2x192x26x26xf32) <- (2x192x26x26xf32, 192x192x3x3xf32) + conv2d_100 = paddle._C_ops.conv2d( + swish_73, parameter_209, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_209 + + # pd_op.batch_norm_: (2x192x26x26xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x26x26xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__576, + batch_norm__577, + batch_norm__578, + batch_norm__579, + batch_norm__580, + batch_norm__581, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_100, + parameter_208, + parameter_207, + parameter_206, + parameter_205, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_205, parameter_206, parameter_207, parameter_208 + + # pd_op.conv2d: (2x192x26x26xf32) <- (2x192x26x26xf32, 192x192x1x1xf32) + conv2d_101 = paddle._C_ops.conv2d( + swish_73, parameter_204, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_204 + + # pd_op.batch_norm_: (2x192x26x26xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x26x26xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__582, + batch_norm__583, + batch_norm__584, + batch_norm__585, + batch_norm__586, + batch_norm__587, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_101, + parameter_203, + parameter_202, + parameter_201, + parameter_200, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_200, parameter_201, parameter_202, parameter_203 + + # pd_op.add: (2x192x26x26xf32) <- (2x192x26x26xf32, 2x192x26x26xf32) + add_81 = paddle._C_ops.add(batch_norm__576, batch_norm__582) + + # pd_op.swish: (2x192x26x26xf32) <- (2x192x26x26xf32) + swish_74 = paddle._C_ops.swish(add_81) + + # builtin.combine: ([2x192x26x26xf32, 2x192x26x26xf32]) <- (2x192x26x26xf32, 2x192x26x26xf32) + combine_9 = [swish_67, swish_74] + + # pd_op.concat: (2x384x26x26xf32) <- ([2x192x26x26xf32, 2x192x26x26xf32], 1xi32) + concat_8 = paddle._C_ops.concat(combine_9, full_0) + del combine_9 + + # pd_op.conv2d: (2x384x26x26xf32) <- (2x384x26x26xf32, 384x384x1x1xf32) + conv2d_102 = paddle._C_ops.conv2d( + concat_8, parameter_199, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_199 + + # pd_op.batch_norm_: (2x384x26x26xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x26x26xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__588, + batch_norm__589, + batch_norm__590, + batch_norm__591, + batch_norm__592, + batch_norm__593, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_102, + parameter_198, + parameter_197, + parameter_196, + parameter_195, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_195, parameter_196, parameter_197, parameter_198 + + # pd_op.swish: (2x384x26x26xf32) <- (2x384x26x26xf32) + swish_75 = paddle._C_ops.swish(batch_norm__588) + + # pd_op.conv2d: (2x192x26x26xf32) <- (2x384x26x26xf32, 192x384x1x1xf32) + conv2d_103 = paddle._C_ops.conv2d( + swish_75, parameter_194, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_194 + + # pd_op.batch_norm_: (2x192x26x26xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x26x26xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__594, + batch_norm__595, + batch_norm__596, + batch_norm__597, + batch_norm__598, + batch_norm__599, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_103, + parameter_193, + parameter_192, + parameter_191, + parameter_190, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_190, parameter_191, parameter_192, parameter_193 + + # pd_op.swish: (2x192x26x26xf32) <- (2x192x26x26xf32) + swish_76 = paddle._C_ops.swish(batch_norm__594) + + # pd_op.nearest_interp: (2x192x52x52xf32) <- (2x192x26x26xf32, None, None, None) + nearest_interp_1 = paddle._C_ops.nearest_interp( + swish_76, + None, + None, + None, + "NCHW", + -1, + -1, + -1, + [float("2"), float("2")], + "nearest", + False, + 0, + ) + + # builtin.combine: ([2x192x52x52xf32, 2x256x52x52xf32]) <- (2x192x52x52xf32, 2x256x52x52xf32) + combine_10 = [nearest_interp_1, swish_29] + + # pd_op.concat: (2x448x52x52xf32) <- ([2x192x52x52xf32, 2x256x52x52xf32], 1xi32) + concat_9 = paddle._C_ops.concat(combine_10, full_0) + del combine_10 + + # pd_op.conv2d: (2x96x52x52xf32) <- (2x448x52x52xf32, 96x448x1x1xf32) + conv2d_104 = paddle._C_ops.conv2d( + concat_9, parameter_189, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_189 + + # pd_op.batch_norm_: (2x96x52x52xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x52x52xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__600, + batch_norm__601, + batch_norm__602, + batch_norm__603, + batch_norm__604, + batch_norm__605, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_104, + parameter_188, + parameter_187, + parameter_186, + parameter_185, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_185, parameter_186, parameter_187, parameter_188 + + # pd_op.swish: (2x96x52x52xf32) <- (2x96x52x52xf32) + swish_77 = paddle._C_ops.swish(batch_norm__600) + + # pd_op.conv2d: (2x96x52x52xf32) <- (2x448x52x52xf32, 96x448x1x1xf32) + conv2d_105 = paddle._C_ops.conv2d( + concat_9, parameter_184, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_184 + + # pd_op.batch_norm_: (2x96x52x52xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x52x52xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__606, + batch_norm__607, + batch_norm__608, + batch_norm__609, + batch_norm__610, + batch_norm__611, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_105, + parameter_183, + parameter_182, + parameter_181, + parameter_180, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_180, parameter_181, parameter_182, parameter_183 + + # pd_op.swish: (2x96x52x52xf32) <- (2x96x52x52xf32) + swish_78 = paddle._C_ops.swish(batch_norm__606) + + # pd_op.conv2d: (2x96x52x52xf32) <- (2x96x52x52xf32, 96x96x3x3xf32) + conv2d_106 = paddle._C_ops.conv2d( + swish_78, parameter_179, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_179 + + # pd_op.batch_norm_: (2x96x52x52xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x52x52xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__612, + batch_norm__613, + batch_norm__614, + batch_norm__615, + batch_norm__616, + batch_norm__617, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_106, + parameter_178, + parameter_177, + parameter_176, + parameter_175, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_175, parameter_176, parameter_177, parameter_178 + + # pd_op.swish: (2x96x52x52xf32) <- (2x96x52x52xf32) + swish_79 = paddle._C_ops.swish(batch_norm__612) + + # pd_op.conv2d: (2x96x52x52xf32) <- (2x96x52x52xf32, 96x96x3x3xf32) + conv2d_107 = paddle._C_ops.conv2d( + swish_79, parameter_174, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_174 + + # pd_op.batch_norm_: (2x96x52x52xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x52x52xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__618, + batch_norm__619, + batch_norm__620, + batch_norm__621, + batch_norm__622, + batch_norm__623, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_107, + parameter_173, + parameter_172, + parameter_171, + parameter_170, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_170, parameter_171, parameter_172, parameter_173 + + # pd_op.conv2d: (2x96x52x52xf32) <- (2x96x52x52xf32, 96x96x1x1xf32) + conv2d_108 = paddle._C_ops.conv2d( + swish_79, parameter_169, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_169 + + # pd_op.batch_norm_: (2x96x52x52xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x52x52xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__624, + batch_norm__625, + batch_norm__626, + batch_norm__627, + batch_norm__628, + batch_norm__629, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_108, + parameter_168, + parameter_167, + parameter_166, + parameter_165, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_165, parameter_166, parameter_167, parameter_168 + + # pd_op.add: (2x96x52x52xf32) <- (2x96x52x52xf32, 2x96x52x52xf32) + add_82 = paddle._C_ops.add(batch_norm__618, batch_norm__624) + + # pd_op.swish: (2x96x52x52xf32) <- (2x96x52x52xf32) + swish_80 = paddle._C_ops.swish(add_82) + + # pd_op.conv2d: (2x96x52x52xf32) <- (2x96x52x52xf32, 96x96x3x3xf32) + conv2d_109 = paddle._C_ops.conv2d( + swish_80, parameter_164, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_164 + + # pd_op.batch_norm_: (2x96x52x52xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x52x52xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__630, + batch_norm__631, + batch_norm__632, + batch_norm__633, + batch_norm__634, + batch_norm__635, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_109, + parameter_163, + parameter_162, + parameter_161, + parameter_160, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_160, parameter_161, parameter_162, parameter_163 + + # pd_op.swish: (2x96x52x52xf32) <- (2x96x52x52xf32) + swish_81 = paddle._C_ops.swish(batch_norm__630) + + # pd_op.conv2d: (2x96x52x52xf32) <- (2x96x52x52xf32, 96x96x3x3xf32) + conv2d_110 = paddle._C_ops.conv2d( + swish_81, parameter_159, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_159 + + # pd_op.batch_norm_: (2x96x52x52xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x52x52xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__636, + batch_norm__637, + batch_norm__638, + batch_norm__639, + batch_norm__640, + batch_norm__641, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_110, + parameter_158, + parameter_157, + parameter_156, + parameter_155, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_155, parameter_156, parameter_157, parameter_158 + + # pd_op.conv2d: (2x96x52x52xf32) <- (2x96x52x52xf32, 96x96x1x1xf32) + conv2d_111 = paddle._C_ops.conv2d( + swish_81, parameter_154, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_154 + + # pd_op.batch_norm_: (2x96x52x52xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x52x52xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__642, + batch_norm__643, + batch_norm__644, + batch_norm__645, + batch_norm__646, + batch_norm__647, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_111, + parameter_153, + parameter_152, + parameter_151, + parameter_150, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_150, parameter_151, parameter_152, parameter_153 + + # pd_op.add: (2x96x52x52xf32) <- (2x96x52x52xf32, 2x96x52x52xf32) + add_83 = paddle._C_ops.add(batch_norm__636, batch_norm__642) + + # pd_op.swish: (2x96x52x52xf32) <- (2x96x52x52xf32) + swish_82 = paddle._C_ops.swish(add_83) + + # pd_op.conv2d: (2x96x52x52xf32) <- (2x96x52x52xf32, 96x96x3x3xf32) + conv2d_112 = paddle._C_ops.conv2d( + swish_82, parameter_149, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_149 + + # pd_op.batch_norm_: (2x96x52x52xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x52x52xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__648, + batch_norm__649, + batch_norm__650, + batch_norm__651, + batch_norm__652, + batch_norm__653, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_112, + parameter_148, + parameter_147, + parameter_146, + parameter_145, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_145, parameter_146, parameter_147, parameter_148 + + # pd_op.swish: (2x96x52x52xf32) <- (2x96x52x52xf32) + swish_83 = paddle._C_ops.swish(batch_norm__648) + + # pd_op.conv2d: (2x96x52x52xf32) <- (2x96x52x52xf32, 96x96x3x3xf32) + conv2d_113 = paddle._C_ops.conv2d( + swish_83, parameter_144, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_144 + + # pd_op.batch_norm_: (2x96x52x52xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x52x52xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__654, + batch_norm__655, + batch_norm__656, + batch_norm__657, + batch_norm__658, + batch_norm__659, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_113, + parameter_143, + parameter_142, + parameter_141, + parameter_140, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_140, parameter_141, parameter_142, parameter_143 + + # pd_op.conv2d: (2x96x52x52xf32) <- (2x96x52x52xf32, 96x96x1x1xf32) + conv2d_114 = paddle._C_ops.conv2d( + swish_83, parameter_139, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_139 + + # pd_op.batch_norm_: (2x96x52x52xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x52x52xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__660, + batch_norm__661, + batch_norm__662, + batch_norm__663, + batch_norm__664, + batch_norm__665, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_114, + parameter_138, + parameter_137, + parameter_136, + parameter_135, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_135, parameter_136, parameter_137, parameter_138 + + # pd_op.add: (2x96x52x52xf32) <- (2x96x52x52xf32, 2x96x52x52xf32) + add_84 = paddle._C_ops.add(batch_norm__654, batch_norm__660) + + # pd_op.swish: (2x96x52x52xf32) <- (2x96x52x52xf32) + swish_84 = paddle._C_ops.swish(add_84) + + # builtin.combine: ([2x96x52x52xf32, 2x96x52x52xf32]) <- (2x96x52x52xf32, 2x96x52x52xf32) + combine_11 = [swish_77, swish_84] + + # pd_op.concat: (2x192x52x52xf32) <- ([2x96x52x52xf32, 2x96x52x52xf32], 1xi32) + concat_10 = paddle._C_ops.concat(combine_11, full_0) + del combine_11 + + # pd_op.conv2d: (2x192x52x52xf32) <- (2x192x52x52xf32, 192x192x1x1xf32) + conv2d_115 = paddle._C_ops.conv2d( + concat_10, parameter_134, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_134 + + # pd_op.batch_norm_: (2x192x52x52xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x52x52xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__666, + batch_norm__667, + batch_norm__668, + batch_norm__669, + batch_norm__670, + batch_norm__671, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_115, + parameter_133, + parameter_132, + parameter_131, + parameter_130, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_130, parameter_131, parameter_132, parameter_133 + + # pd_op.swish: (2x192x52x52xf32) <- (2x192x52x52xf32) + swish_85 = paddle._C_ops.swish(batch_norm__666) + + # pd_op.conv2d: (2x192x26x26xf32) <- (2x192x52x52xf32, 192x192x3x3xf32) + conv2d_116 = paddle._C_ops.conv2d( + swish_85, parameter_129, [2, 2], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_129 + + # pd_op.batch_norm_: (2x192x26x26xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x26x26xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__672, + batch_norm__673, + batch_norm__674, + batch_norm__675, + batch_norm__676, + batch_norm__677, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_116, + parameter_128, + parameter_127, + parameter_126, + parameter_125, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_125, parameter_126, parameter_127, parameter_128 + + # pd_op.swish: (2x192x26x26xf32) <- (2x192x26x26xf32) + swish_86 = paddle._C_ops.swish(batch_norm__672) + + # builtin.combine: ([2x192x26x26xf32, 2x384x26x26xf32]) <- (2x192x26x26xf32, 2x384x26x26xf32) + combine_12 = [swish_86, swish_75] + + # pd_op.concat: (2x576x26x26xf32) <- ([2x192x26x26xf32, 2x384x26x26xf32], 1xi32) + concat_11 = paddle._C_ops.concat(combine_12, full_0) + del combine_12 + + # pd_op.conv2d: (2x192x26x26xf32) <- (2x576x26x26xf32, 192x576x1x1xf32) + conv2d_117 = paddle._C_ops.conv2d( + concat_11, parameter_124, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_124 + + # pd_op.batch_norm_: (2x192x26x26xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x26x26xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__678, + batch_norm__679, + batch_norm__680, + batch_norm__681, + batch_norm__682, + batch_norm__683, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_117, + parameter_123, + parameter_122, + parameter_121, + parameter_120, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_120, parameter_121, parameter_122, parameter_123 + + # pd_op.swish: (2x192x26x26xf32) <- (2x192x26x26xf32) + swish_87 = paddle._C_ops.swish(batch_norm__678) + + # pd_op.conv2d: (2x192x26x26xf32) <- (2x576x26x26xf32, 192x576x1x1xf32) + conv2d_118 = paddle._C_ops.conv2d( + concat_11, parameter_119, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_119 + + # pd_op.batch_norm_: (2x192x26x26xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x26x26xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__684, + batch_norm__685, + batch_norm__686, + batch_norm__687, + batch_norm__688, + batch_norm__689, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_118, + parameter_118, + parameter_117, + parameter_116, + parameter_115, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_115, parameter_116, parameter_117, parameter_118 + + # pd_op.swish: (2x192x26x26xf32) <- (2x192x26x26xf32) + swish_88 = paddle._C_ops.swish(batch_norm__684) + + # pd_op.conv2d: (2x192x26x26xf32) <- (2x192x26x26xf32, 192x192x3x3xf32) + conv2d_119 = paddle._C_ops.conv2d( + swish_88, parameter_114, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_114 + + # pd_op.batch_norm_: (2x192x26x26xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x26x26xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__690, + batch_norm__691, + batch_norm__692, + batch_norm__693, + batch_norm__694, + batch_norm__695, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_119, + parameter_113, + parameter_112, + parameter_111, + parameter_110, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_110, parameter_111, parameter_112, parameter_113 + + # pd_op.swish: (2x192x26x26xf32) <- (2x192x26x26xf32) + swish_89 = paddle._C_ops.swish(batch_norm__690) + + # pd_op.conv2d: (2x192x26x26xf32) <- (2x192x26x26xf32, 192x192x3x3xf32) + conv2d_120 = paddle._C_ops.conv2d( + swish_89, parameter_109, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_109 + + # pd_op.batch_norm_: (2x192x26x26xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x26x26xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__696, + batch_norm__697, + batch_norm__698, + batch_norm__699, + batch_norm__700, + batch_norm__701, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_120, + parameter_108, + parameter_107, + parameter_106, + parameter_105, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_105, parameter_106, parameter_107, parameter_108 + + # pd_op.conv2d: (2x192x26x26xf32) <- (2x192x26x26xf32, 192x192x1x1xf32) + conv2d_121 = paddle._C_ops.conv2d( + swish_89, parameter_104, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_104 + + # pd_op.batch_norm_: (2x192x26x26xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x26x26xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__702, + batch_norm__703, + batch_norm__704, + batch_norm__705, + batch_norm__706, + batch_norm__707, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_121, + parameter_103, + parameter_102, + parameter_101, + parameter_100, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_100, parameter_101, parameter_102, parameter_103 + + # pd_op.add: (2x192x26x26xf32) <- (2x192x26x26xf32, 2x192x26x26xf32) + add_85 = paddle._C_ops.add(batch_norm__696, batch_norm__702) + + # pd_op.swish: (2x192x26x26xf32) <- (2x192x26x26xf32) + swish_90 = paddle._C_ops.swish(add_85) + + # pd_op.conv2d: (2x192x26x26xf32) <- (2x192x26x26xf32, 192x192x3x3xf32) + conv2d_122 = paddle._C_ops.conv2d( + swish_90, parameter_99, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_99 + + # pd_op.batch_norm_: (2x192x26x26xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x26x26xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__708, + batch_norm__709, + batch_norm__710, + batch_norm__711, + batch_norm__712, + batch_norm__713, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_122, + parameter_98, + parameter_97, + parameter_96, + parameter_95, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_95, parameter_96, parameter_97, parameter_98 + + # pd_op.swish: (2x192x26x26xf32) <- (2x192x26x26xf32) + swish_91 = paddle._C_ops.swish(batch_norm__708) + + # pd_op.conv2d: (2x192x26x26xf32) <- (2x192x26x26xf32, 192x192x3x3xf32) + conv2d_123 = paddle._C_ops.conv2d( + swish_91, parameter_94, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_94 + + # pd_op.batch_norm_: (2x192x26x26xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x26x26xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__714, + batch_norm__715, + batch_norm__716, + batch_norm__717, + batch_norm__718, + batch_norm__719, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_123, + parameter_93, + parameter_92, + parameter_91, + parameter_90, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_90, parameter_91, parameter_92, parameter_93 + + # pd_op.conv2d: (2x192x26x26xf32) <- (2x192x26x26xf32, 192x192x1x1xf32) + conv2d_124 = paddle._C_ops.conv2d( + swish_91, parameter_89, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_89 + + # pd_op.batch_norm_: (2x192x26x26xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x26x26xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__720, + batch_norm__721, + batch_norm__722, + batch_norm__723, + batch_norm__724, + batch_norm__725, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_124, + parameter_88, + parameter_87, + parameter_86, + parameter_85, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_85, parameter_86, parameter_87, parameter_88 + + # pd_op.add: (2x192x26x26xf32) <- (2x192x26x26xf32, 2x192x26x26xf32) + add_86 = paddle._C_ops.add(batch_norm__714, batch_norm__720) + + # pd_op.swish: (2x192x26x26xf32) <- (2x192x26x26xf32) + swish_92 = paddle._C_ops.swish(add_86) + + # pd_op.conv2d: (2x192x26x26xf32) <- (2x192x26x26xf32, 192x192x3x3xf32) + conv2d_125 = paddle._C_ops.conv2d( + swish_92, parameter_84, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_84 + + # pd_op.batch_norm_: (2x192x26x26xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x26x26xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__726, + batch_norm__727, + batch_norm__728, + batch_norm__729, + batch_norm__730, + batch_norm__731, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_125, + parameter_83, + parameter_82, + parameter_81, + parameter_80, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_80, parameter_81, parameter_82, parameter_83 + + # pd_op.swish: (2x192x26x26xf32) <- (2x192x26x26xf32) + swish_93 = paddle._C_ops.swish(batch_norm__726) + + # pd_op.conv2d: (2x192x26x26xf32) <- (2x192x26x26xf32, 192x192x3x3xf32) + conv2d_126 = paddle._C_ops.conv2d( + swish_93, parameter_79, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_79 + + # pd_op.batch_norm_: (2x192x26x26xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x26x26xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__732, + batch_norm__733, + batch_norm__734, + batch_norm__735, + batch_norm__736, + batch_norm__737, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_126, + parameter_78, + parameter_77, + parameter_76, + parameter_75, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_75, parameter_76, parameter_77, parameter_78 + + # pd_op.conv2d: (2x192x26x26xf32) <- (2x192x26x26xf32, 192x192x1x1xf32) + conv2d_127 = paddle._C_ops.conv2d( + swish_93, parameter_74, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_74 + + # pd_op.batch_norm_: (2x192x26x26xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x26x26xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__738, + batch_norm__739, + batch_norm__740, + batch_norm__741, + batch_norm__742, + batch_norm__743, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_127, + parameter_73, + parameter_72, + parameter_71, + parameter_70, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_70, parameter_71, parameter_72, parameter_73 + + # pd_op.add: (2x192x26x26xf32) <- (2x192x26x26xf32, 2x192x26x26xf32) + add_87 = paddle._C_ops.add(batch_norm__732, batch_norm__738) + + # pd_op.swish: (2x192x26x26xf32) <- (2x192x26x26xf32) + swish_94 = paddle._C_ops.swish(add_87) + + # builtin.combine: ([2x192x26x26xf32, 2x192x26x26xf32]) <- (2x192x26x26xf32, 2x192x26x26xf32) + combine_13 = [swish_87, swish_94] + + # pd_op.concat: (2x384x26x26xf32) <- ([2x192x26x26xf32, 2x192x26x26xf32], 1xi32) + concat_12 = paddle._C_ops.concat(combine_13, full_0) + del combine_13 + + # pd_op.conv2d: (2x384x26x26xf32) <- (2x384x26x26xf32, 384x384x1x1xf32) + conv2d_128 = paddle._C_ops.conv2d( + concat_12, parameter_69, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_69 + + # pd_op.batch_norm_: (2x384x26x26xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x26x26xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__744, + batch_norm__745, + batch_norm__746, + batch_norm__747, + batch_norm__748, + batch_norm__749, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_128, + parameter_68, + parameter_67, + parameter_66, + parameter_65, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_65, parameter_66, parameter_67, parameter_68 + + # pd_op.swish: (2x384x26x26xf32) <- (2x384x26x26xf32) + swish_95 = paddle._C_ops.swish(batch_norm__744) + + # pd_op.conv2d: (2x384x13x13xf32) <- (2x384x26x26xf32, 384x384x3x3xf32) + conv2d_129 = paddle._C_ops.conv2d( + swish_95, parameter_64, [2, 2], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_64 + + # pd_op.batch_norm_: (2x384x13x13xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x13x13xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__750, + batch_norm__751, + batch_norm__752, + batch_norm__753, + batch_norm__754, + batch_norm__755, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_129, + parameter_63, + parameter_62, + parameter_61, + parameter_60, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_60, parameter_61, parameter_62, parameter_63 + + # pd_op.swish: (2x384x13x13xf32) <- (2x384x13x13xf32) + swish_96 = paddle._C_ops.swish(batch_norm__750) + + # builtin.combine: ([2x384x13x13xf32, 2x768x13x13xf32]) <- (2x384x13x13xf32, 2x768x13x13xf32) + combine_14 = [swish_96, swish_65] + + # pd_op.concat: (2x1152x13x13xf32) <- ([2x384x13x13xf32, 2x768x13x13xf32], 1xi32) + concat_13 = paddle._C_ops.concat(combine_14, full_0) + del combine_14 + + # pd_op.conv2d: (2x384x13x13xf32) <- (2x1152x13x13xf32, 384x1152x1x1xf32) + conv2d_130 = paddle._C_ops.conv2d( + concat_13, parameter_59, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_59 + + # pd_op.batch_norm_: (2x384x13x13xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x13x13xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__756, + batch_norm__757, + batch_norm__758, + batch_norm__759, + batch_norm__760, + batch_norm__761, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_130, + parameter_58, + parameter_57, + parameter_56, + parameter_55, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_55, parameter_56, parameter_57, parameter_58 + + # pd_op.swish: (2x384x13x13xf32) <- (2x384x13x13xf32) + swish_97 = paddle._C_ops.swish(batch_norm__756) + + # pd_op.conv2d: (2x384x13x13xf32) <- (2x1152x13x13xf32, 384x1152x1x1xf32) + conv2d_131 = paddle._C_ops.conv2d( + concat_13, parameter_54, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_54 + + # pd_op.batch_norm_: (2x384x13x13xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x13x13xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__762, + batch_norm__763, + batch_norm__764, + batch_norm__765, + batch_norm__766, + batch_norm__767, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_131, + parameter_53, + parameter_52, + parameter_51, + parameter_50, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_50, parameter_51, parameter_52, parameter_53 + + # pd_op.swish: (2x384x13x13xf32) <- (2x384x13x13xf32) + swish_98 = paddle._C_ops.swish(batch_norm__762) + + # pd_op.conv2d: (2x384x13x13xf32) <- (2x384x13x13xf32, 384x384x3x3xf32) + conv2d_132 = paddle._C_ops.conv2d( + swish_98, parameter_49, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_49 + + # pd_op.batch_norm_: (2x384x13x13xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x13x13xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__768, + batch_norm__769, + batch_norm__770, + batch_norm__771, + batch_norm__772, + batch_norm__773, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_132, + parameter_48, + parameter_47, + parameter_46, + parameter_45, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_45, parameter_46, parameter_47, parameter_48 + + # pd_op.swish: (2x384x13x13xf32) <- (2x384x13x13xf32) + swish_99 = paddle._C_ops.swish(batch_norm__768) + + # pd_op.conv2d: (2x384x13x13xf32) <- (2x384x13x13xf32, 384x384x3x3xf32) + conv2d_133 = paddle._C_ops.conv2d( + swish_99, parameter_44, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_44 + + # pd_op.batch_norm_: (2x384x13x13xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x13x13xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__774, + batch_norm__775, + batch_norm__776, + batch_norm__777, + batch_norm__778, + batch_norm__779, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_133, + parameter_43, + parameter_42, + parameter_41, + parameter_40, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_40, parameter_41, parameter_42, parameter_43 + + # pd_op.conv2d: (2x384x13x13xf32) <- (2x384x13x13xf32, 384x384x1x1xf32) + conv2d_134 = paddle._C_ops.conv2d( + swish_99, parameter_39, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_39 + + # pd_op.batch_norm_: (2x384x13x13xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x13x13xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__780, + batch_norm__781, + batch_norm__782, + batch_norm__783, + batch_norm__784, + batch_norm__785, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_134, + parameter_38, + parameter_37, + parameter_36, + parameter_35, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_35, parameter_36, parameter_37, parameter_38 + + # pd_op.add: (2x384x13x13xf32) <- (2x384x13x13xf32, 2x384x13x13xf32) + add_88 = paddle._C_ops.add(batch_norm__774, batch_norm__780) + + # pd_op.swish: (2x384x13x13xf32) <- (2x384x13x13xf32) + swish_100 = paddle._C_ops.swish(add_88) + + # pd_op.conv2d: (2x384x13x13xf32) <- (2x384x13x13xf32, 384x384x3x3xf32) + conv2d_135 = paddle._C_ops.conv2d( + swish_100, parameter_34, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_34 + + # pd_op.batch_norm_: (2x384x13x13xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x13x13xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__786, + batch_norm__787, + batch_norm__788, + batch_norm__789, + batch_norm__790, + batch_norm__791, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_135, + parameter_33, + parameter_32, + parameter_31, + parameter_30, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_30, parameter_31, parameter_32, parameter_33 + + # pd_op.swish: (2x384x13x13xf32) <- (2x384x13x13xf32) + swish_101 = paddle._C_ops.swish(batch_norm__786) + + # pd_op.conv2d: (2x384x13x13xf32) <- (2x384x13x13xf32, 384x384x3x3xf32) + conv2d_136 = paddle._C_ops.conv2d( + swish_101, parameter_29, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_29 + + # pd_op.batch_norm_: (2x384x13x13xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x13x13xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__792, + batch_norm__793, + batch_norm__794, + batch_norm__795, + batch_norm__796, + batch_norm__797, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_136, + parameter_28, + parameter_27, + parameter_26, + parameter_25, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_25, parameter_26, parameter_27, parameter_28 + + # pd_op.conv2d: (2x384x13x13xf32) <- (2x384x13x13xf32, 384x384x1x1xf32) + conv2d_137 = paddle._C_ops.conv2d( + swish_101, parameter_24, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_24 + + # pd_op.batch_norm_: (2x384x13x13xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x13x13xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__798, + batch_norm__799, + batch_norm__800, + batch_norm__801, + batch_norm__802, + batch_norm__803, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_137, + parameter_23, + parameter_22, + parameter_21, + parameter_20, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_20, parameter_21, parameter_22, parameter_23 + + # pd_op.add: (2x384x13x13xf32) <- (2x384x13x13xf32, 2x384x13x13xf32) + add_89 = paddle._C_ops.add(batch_norm__792, batch_norm__798) + + # pd_op.swish: (2x384x13x13xf32) <- (2x384x13x13xf32) + swish_102 = paddle._C_ops.swish(add_89) + + # pd_op.conv2d: (2x384x13x13xf32) <- (2x384x13x13xf32, 384x384x3x3xf32) + conv2d_138 = paddle._C_ops.conv2d( + swish_102, parameter_19, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_19 + + # pd_op.batch_norm_: (2x384x13x13xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x13x13xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__804, + batch_norm__805, + batch_norm__806, + batch_norm__807, + batch_norm__808, + batch_norm__809, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_138, + parameter_18, + parameter_17, + parameter_16, + parameter_15, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_15, parameter_16, parameter_17, parameter_18 + + # pd_op.swish: (2x384x13x13xf32) <- (2x384x13x13xf32) + swish_103 = paddle._C_ops.swish(batch_norm__804) + + # pd_op.conv2d: (2x384x13x13xf32) <- (2x384x13x13xf32, 384x384x3x3xf32) + conv2d_139 = paddle._C_ops.conv2d( + swish_103, parameter_14, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_14 + + # pd_op.batch_norm_: (2x384x13x13xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x13x13xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__810, + batch_norm__811, + batch_norm__812, + batch_norm__813, + batch_norm__814, + batch_norm__815, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_139, + parameter_13, + parameter_12, + parameter_11, + parameter_10, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_10, parameter_11, parameter_12, parameter_13 + + # pd_op.conv2d: (2x384x13x13xf32) <- (2x384x13x13xf32, 384x384x1x1xf32) + conv2d_140 = paddle._C_ops.conv2d( + swish_103, parameter_9, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_9 + + # pd_op.batch_norm_: (2x384x13x13xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x13x13xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__816, + batch_norm__817, + batch_norm__818, + batch_norm__819, + batch_norm__820, + batch_norm__821, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_140, + parameter_8, + parameter_7, + parameter_6, + parameter_5, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_5, parameter_6, parameter_7, parameter_8 + + # pd_op.add: (2x384x13x13xf32) <- (2x384x13x13xf32, 2x384x13x13xf32) + add_90 = paddle._C_ops.add(batch_norm__810, batch_norm__816) + + # pd_op.swish: (2x384x13x13xf32) <- (2x384x13x13xf32) + swish_104 = paddle._C_ops.swish(add_90) + + # builtin.combine: ([2x384x13x13xf32, 2x384x13x13xf32]) <- (2x384x13x13xf32, 2x384x13x13xf32) + combine_15 = [swish_97, swish_104] + + # pd_op.concat: (2x768x13x13xf32) <- ([2x384x13x13xf32, 2x384x13x13xf32], 1xi32) + concat_14 = paddle._C_ops.concat(combine_15, full_0) + del combine_15 + + # pd_op.conv2d: (2x768x13x13xf32) <- (2x768x13x13xf32, 768x768x1x1xf32) + conv2d_141 = paddle._C_ops.conv2d( + concat_14, parameter_4, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_4 + + # pd_op.batch_norm_: (2x768x13x13xf32, 768xf32, 768xf32, 768xf32, 768xf32, -1xui8) <- (2x768x13x13xf32, 768xf32, 768xf32, 768xf32, 768xf32) + ( + batch_norm__822, + batch_norm__823, + batch_norm__824, + batch_norm__825, + batch_norm__826, + batch_norm__827, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_141, + parameter_3, + parameter_2, + parameter_1, + parameter_0, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_0, parameter_1, parameter_2, parameter_3 + + # pd_op.swish: (2x768x13x13xf32) <- (2x768x13x13xf32) + swish_0 = paddle._C_ops.swish(batch_norm__822) + del ( + add_0, + add_1, + add_10, + add_11, + add_12, + add_13, + add_14, + add_15, + add_16, + add_17, + add_18, + add_2, + add_20, + add_21, + add_22, + add_23, + add_24, + add_25, + add_26, + add_27, + add_28, + add_29, + add_3, + add_30, + add_31, + add_33, + add_34, + add_35, + add_36, + add_37, + add_38, + add_4, + add_40, + add_41, + add_42, + add_43, + add_45, + add_46, + add_48, + add_49, + add_5, + add_50, + add_51, + add_52, + add_54, + add_55, + add_57, + add_58, + add_59, + add_60, + add_61, + add_63, + add_64, + add_66, + add_67, + add_68, + add_69, + add_7, + add_70, + add_72, + add_73, + add_75, + add_76, + add_77, + add_78, + add_79, + add_8, + add_80, + add_81, + add_82, + add_83, + add_84, + add_85, + add_86, + add_87, + add_88, + add_89, + add_9, + add_90, + assign_0, + assign_1, + assign_10, + assign_11, + assign_12, + assign_13, + assign_14, + assign_15, + assign_16, + assign_17, + assign_18, + assign_19, + assign_2, + assign_20, + assign_21, + assign_22, + assign_23, + assign_24, + assign_25, + assign_26, + assign_27, + assign_28, + assign_29, + assign_3, + assign_30, + assign_31, + assign_32, + assign_33, + assign_34, + assign_35, + assign_36, + assign_37, + assign_38, + assign_39, + assign_4, + assign_40, + assign_41, + assign_42, + assign_43, + assign_44, + assign_45, + assign_46, + assign_47, + assign_48, + assign_49, + assign_5, + assign_50, + assign_51, + assign_52, + assign_53, + assign_54, + assign_55, + assign_56, + assign_57, + assign_58, + assign_59, + assign_6, + assign_60, + assign_61, + assign_62, + assign_63, + assign_64, + assign_65, + assign_66, + assign_67, + assign_68, + assign_69, + assign_7, + assign_70, + assign_71, + assign_72, + assign_73, + assign_74, + assign_75, + assign_76, + assign_77, + assign_78, + assign_8, + assign_9, + batch_norm__0, + batch_norm__1, + batch_norm__10, + batch_norm__100, + batch_norm__101, + batch_norm__102, + batch_norm__103, + batch_norm__104, + batch_norm__105, + batch_norm__106, + batch_norm__107, + batch_norm__108, + batch_norm__109, + batch_norm__11, + batch_norm__110, + batch_norm__111, + batch_norm__112, + batch_norm__113, + batch_norm__114, + batch_norm__115, + batch_norm__116, + batch_norm__117, + batch_norm__118, + batch_norm__119, + batch_norm__12, + batch_norm__120, + batch_norm__121, + batch_norm__122, + batch_norm__123, + batch_norm__124, + batch_norm__125, + batch_norm__126, + batch_norm__127, + batch_norm__128, + batch_norm__129, + batch_norm__13, + batch_norm__130, + batch_norm__131, + batch_norm__132, + batch_norm__133, + batch_norm__134, + batch_norm__135, + batch_norm__136, + batch_norm__137, + batch_norm__138, + batch_norm__139, + batch_norm__14, + batch_norm__140, + batch_norm__141, + batch_norm__142, + batch_norm__143, + batch_norm__144, + batch_norm__145, + batch_norm__146, + batch_norm__147, + batch_norm__148, + batch_norm__149, + batch_norm__15, + batch_norm__150, + batch_norm__151, + batch_norm__152, + batch_norm__153, + batch_norm__154, + batch_norm__155, + batch_norm__156, + batch_norm__157, + batch_norm__158, + batch_norm__159, + batch_norm__16, + batch_norm__160, + batch_norm__161, + batch_norm__162, + batch_norm__163, + batch_norm__164, + batch_norm__165, + batch_norm__166, + batch_norm__167, + batch_norm__168, + batch_norm__169, + batch_norm__17, + batch_norm__170, + batch_norm__171, + batch_norm__172, + batch_norm__173, + batch_norm__174, + batch_norm__175, + batch_norm__176, + batch_norm__177, + batch_norm__178, + batch_norm__179, + batch_norm__18, + batch_norm__180, + batch_norm__181, + batch_norm__182, + batch_norm__183, + batch_norm__184, + batch_norm__185, + batch_norm__186, + batch_norm__187, + batch_norm__188, + batch_norm__189, + batch_norm__19, + batch_norm__190, + batch_norm__191, + batch_norm__192, + batch_norm__193, + batch_norm__194, + batch_norm__195, + batch_norm__196, + batch_norm__197, + batch_norm__198, + batch_norm__199, + batch_norm__2, + batch_norm__20, + batch_norm__200, + batch_norm__201, + batch_norm__202, + batch_norm__203, + batch_norm__204, + batch_norm__205, + batch_norm__206, + batch_norm__207, + batch_norm__208, + batch_norm__209, + batch_norm__21, + batch_norm__210, + batch_norm__211, + batch_norm__212, + batch_norm__213, + batch_norm__214, + batch_norm__215, + batch_norm__216, + batch_norm__217, + batch_norm__218, + batch_norm__219, + batch_norm__22, + batch_norm__220, + batch_norm__221, + batch_norm__222, + batch_norm__223, + batch_norm__224, + batch_norm__225, + batch_norm__226, + batch_norm__227, + batch_norm__228, + batch_norm__229, + batch_norm__23, + batch_norm__230, + batch_norm__231, + batch_norm__232, + batch_norm__233, + batch_norm__234, + batch_norm__235, + batch_norm__236, + batch_norm__237, + batch_norm__238, + batch_norm__239, + batch_norm__24, + batch_norm__240, + batch_norm__241, + batch_norm__242, + batch_norm__243, + batch_norm__244, + batch_norm__245, + batch_norm__246, + batch_norm__247, + batch_norm__248, + batch_norm__249, + batch_norm__25, + batch_norm__250, + batch_norm__251, + batch_norm__252, + batch_norm__253, + batch_norm__254, + batch_norm__255, + batch_norm__256, + batch_norm__257, + batch_norm__258, + batch_norm__259, + batch_norm__26, + batch_norm__260, + batch_norm__261, + batch_norm__262, + batch_norm__263, + batch_norm__264, + batch_norm__265, + batch_norm__266, + batch_norm__267, + batch_norm__268, + batch_norm__269, + batch_norm__27, + batch_norm__270, + batch_norm__271, + batch_norm__272, + batch_norm__273, + batch_norm__274, + batch_norm__275, + batch_norm__276, + batch_norm__277, + batch_norm__278, + batch_norm__279, + batch_norm__28, + batch_norm__280, + batch_norm__281, + batch_norm__282, + batch_norm__283, + batch_norm__284, + batch_norm__285, + batch_norm__286, + batch_norm__287, + batch_norm__288, + batch_norm__289, + batch_norm__29, + batch_norm__290, + batch_norm__291, + batch_norm__292, + batch_norm__293, + batch_norm__294, + batch_norm__295, + batch_norm__296, + batch_norm__297, + batch_norm__298, + batch_norm__299, + batch_norm__3, + batch_norm__30, + batch_norm__300, + batch_norm__301, + batch_norm__302, + batch_norm__303, + batch_norm__304, + batch_norm__305, + batch_norm__306, + batch_norm__307, + batch_norm__308, + batch_norm__309, + batch_norm__31, + batch_norm__310, + batch_norm__311, + batch_norm__312, + batch_norm__313, + batch_norm__314, + batch_norm__315, + batch_norm__316, + batch_norm__317, + batch_norm__318, + batch_norm__319, + batch_norm__32, + batch_norm__320, + batch_norm__321, + batch_norm__322, + batch_norm__323, + batch_norm__324, + batch_norm__325, + batch_norm__326, + batch_norm__327, + batch_norm__328, + batch_norm__329, + batch_norm__33, + batch_norm__330, + batch_norm__331, + batch_norm__332, + batch_norm__333, + batch_norm__334, + batch_norm__335, + batch_norm__336, + batch_norm__337, + batch_norm__338, + batch_norm__339, + batch_norm__34, + batch_norm__340, + batch_norm__341, + batch_norm__342, + batch_norm__343, + batch_norm__344, + batch_norm__345, + batch_norm__346, + batch_norm__347, + batch_norm__348, + batch_norm__349, + batch_norm__35, + batch_norm__350, + batch_norm__351, + batch_norm__352, + batch_norm__353, + batch_norm__354, + batch_norm__355, + batch_norm__356, + batch_norm__357, + batch_norm__358, + batch_norm__359, + batch_norm__36, + batch_norm__360, + batch_norm__361, + batch_norm__362, + batch_norm__363, + batch_norm__364, + batch_norm__365, + batch_norm__366, + batch_norm__367, + batch_norm__368, + batch_norm__369, + batch_norm__37, + batch_norm__370, + batch_norm__371, + batch_norm__372, + batch_norm__373, + batch_norm__374, + batch_norm__375, + batch_norm__376, + batch_norm__377, + batch_norm__378, + batch_norm__379, + batch_norm__38, + batch_norm__380, + batch_norm__381, + batch_norm__382, + batch_norm__383, + batch_norm__384, + batch_norm__385, + batch_norm__386, + batch_norm__387, + batch_norm__388, + batch_norm__389, + batch_norm__39, + batch_norm__390, + batch_norm__391, + batch_norm__392, + batch_norm__393, + batch_norm__394, + batch_norm__395, + batch_norm__396, + batch_norm__397, + batch_norm__398, + batch_norm__399, + batch_norm__4, + batch_norm__40, + batch_norm__400, + batch_norm__401, + batch_norm__402, + batch_norm__403, + batch_norm__404, + batch_norm__405, + batch_norm__406, + batch_norm__407, + batch_norm__408, + batch_norm__409, + batch_norm__41, + batch_norm__410, + batch_norm__411, + batch_norm__412, + batch_norm__413, + batch_norm__414, + batch_norm__415, + batch_norm__416, + batch_norm__417, + batch_norm__418, + batch_norm__419, + batch_norm__42, + batch_norm__420, + batch_norm__421, + batch_norm__422, + batch_norm__423, + batch_norm__424, + batch_norm__425, + batch_norm__426, + batch_norm__427, + batch_norm__428, + batch_norm__429, + batch_norm__43, + batch_norm__430, + batch_norm__431, + batch_norm__432, + batch_norm__433, + batch_norm__434, + batch_norm__435, + batch_norm__436, + batch_norm__437, + batch_norm__438, + batch_norm__439, + batch_norm__44, + batch_norm__440, + batch_norm__441, + batch_norm__442, + batch_norm__443, + batch_norm__444, + batch_norm__445, + batch_norm__446, + batch_norm__447, + batch_norm__448, + batch_norm__449, + batch_norm__45, + batch_norm__450, + batch_norm__451, + batch_norm__452, + batch_norm__453, + batch_norm__454, + batch_norm__455, + batch_norm__456, + batch_norm__457, + batch_norm__458, + batch_norm__459, + batch_norm__46, + batch_norm__460, + batch_norm__461, + batch_norm__462, + batch_norm__463, + batch_norm__464, + batch_norm__465, + batch_norm__466, + batch_norm__467, + batch_norm__468, + batch_norm__469, + batch_norm__47, + batch_norm__470, + batch_norm__471, + batch_norm__472, + batch_norm__473, + batch_norm__474, + batch_norm__475, + batch_norm__476, + batch_norm__477, + batch_norm__478, + batch_norm__479, + batch_norm__48, + batch_norm__480, + batch_norm__481, + batch_norm__482, + batch_norm__483, + batch_norm__484, + batch_norm__485, + batch_norm__486, + batch_norm__487, + batch_norm__488, + batch_norm__489, + batch_norm__49, + batch_norm__490, + batch_norm__491, + batch_norm__492, + batch_norm__493, + batch_norm__494, + batch_norm__495, + batch_norm__496, + batch_norm__497, + batch_norm__498, + batch_norm__499, + batch_norm__5, + batch_norm__50, + batch_norm__500, + batch_norm__501, + batch_norm__502, + batch_norm__503, + batch_norm__504, + batch_norm__505, + batch_norm__506, + batch_norm__507, + batch_norm__508, + batch_norm__509, + batch_norm__51, + batch_norm__510, + batch_norm__511, + batch_norm__512, + batch_norm__513, + batch_norm__514, + batch_norm__515, + batch_norm__516, + batch_norm__517, + batch_norm__518, + batch_norm__519, + batch_norm__52, + batch_norm__520, + batch_norm__521, + batch_norm__522, + batch_norm__523, + batch_norm__524, + batch_norm__525, + batch_norm__526, + batch_norm__527, + batch_norm__528, + batch_norm__529, + batch_norm__53, + batch_norm__530, + batch_norm__531, + batch_norm__532, + batch_norm__533, + batch_norm__534, + batch_norm__535, + batch_norm__536, + batch_norm__537, + batch_norm__538, + batch_norm__539, + batch_norm__54, + batch_norm__540, + batch_norm__541, + batch_norm__542, + batch_norm__543, + batch_norm__544, + batch_norm__545, + batch_norm__546, + batch_norm__547, + batch_norm__548, + batch_norm__549, + batch_norm__55, + batch_norm__550, + batch_norm__551, + batch_norm__552, + batch_norm__553, + batch_norm__554, + batch_norm__555, + batch_norm__556, + batch_norm__557, + batch_norm__558, + batch_norm__559, + batch_norm__56, + batch_norm__560, + batch_norm__561, + batch_norm__562, + batch_norm__563, + batch_norm__564, + batch_norm__565, + batch_norm__566, + batch_norm__567, + batch_norm__568, + batch_norm__569, + batch_norm__57, + batch_norm__570, + batch_norm__571, + batch_norm__572, + batch_norm__573, + batch_norm__574, + batch_norm__575, + batch_norm__576, + batch_norm__577, + batch_norm__578, + batch_norm__579, + batch_norm__58, + batch_norm__580, + batch_norm__581, + batch_norm__582, + batch_norm__583, + batch_norm__584, + batch_norm__585, + batch_norm__586, + batch_norm__587, + batch_norm__588, + batch_norm__589, + batch_norm__59, + batch_norm__590, + batch_norm__591, + batch_norm__592, + batch_norm__593, + batch_norm__594, + batch_norm__595, + batch_norm__596, + batch_norm__597, + batch_norm__598, + batch_norm__599, + batch_norm__6, + batch_norm__60, + batch_norm__600, + batch_norm__601, + batch_norm__602, + batch_norm__603, + batch_norm__604, + batch_norm__605, + batch_norm__606, + batch_norm__607, + batch_norm__608, + batch_norm__609, + batch_norm__61, + batch_norm__610, + batch_norm__611, + batch_norm__612, + batch_norm__613, + batch_norm__614, + batch_norm__615, + batch_norm__616, + batch_norm__617, + batch_norm__618, + batch_norm__619, + batch_norm__62, + batch_norm__620, + batch_norm__621, + batch_norm__622, + batch_norm__623, + batch_norm__624, + batch_norm__625, + batch_norm__626, + batch_norm__627, + batch_norm__628, + batch_norm__629, + batch_norm__63, + batch_norm__630, + batch_norm__631, + batch_norm__632, + batch_norm__633, + batch_norm__634, + batch_norm__635, + batch_norm__636, + batch_norm__637, + batch_norm__638, + batch_norm__639, + batch_norm__64, + batch_norm__640, + batch_norm__641, + batch_norm__642, + batch_norm__643, + batch_norm__644, + batch_norm__645, + batch_norm__646, + batch_norm__647, + batch_norm__648, + batch_norm__649, + batch_norm__65, + batch_norm__650, + batch_norm__651, + batch_norm__652, + batch_norm__653, + batch_norm__654, + batch_norm__655, + batch_norm__656, + batch_norm__657, + batch_norm__658, + batch_norm__659, + batch_norm__66, + batch_norm__660, + batch_norm__661, + batch_norm__662, + batch_norm__663, + batch_norm__664, + batch_norm__665, + batch_norm__666, + batch_norm__667, + batch_norm__668, + batch_norm__669, + batch_norm__67, + batch_norm__670, + batch_norm__671, + batch_norm__672, + batch_norm__673, + batch_norm__674, + batch_norm__675, + batch_norm__676, + batch_norm__677, + batch_norm__678, + batch_norm__679, + batch_norm__68, + batch_norm__680, + batch_norm__681, + batch_norm__682, + batch_norm__683, + batch_norm__684, + batch_norm__685, + batch_norm__686, + batch_norm__687, + batch_norm__688, + batch_norm__689, + batch_norm__69, + batch_norm__690, + batch_norm__691, + batch_norm__692, + batch_norm__693, + batch_norm__694, + batch_norm__695, + batch_norm__696, + batch_norm__697, + batch_norm__698, + batch_norm__699, + batch_norm__7, + batch_norm__70, + batch_norm__700, + batch_norm__701, + batch_norm__702, + batch_norm__703, + batch_norm__704, + batch_norm__705, + batch_norm__706, + batch_norm__707, + batch_norm__708, + batch_norm__709, + batch_norm__71, + batch_norm__710, + batch_norm__711, + batch_norm__712, + batch_norm__713, + batch_norm__714, + batch_norm__715, + batch_norm__716, + batch_norm__717, + batch_norm__718, + batch_norm__719, + batch_norm__72, + batch_norm__720, + batch_norm__721, + batch_norm__722, + batch_norm__723, + batch_norm__724, + batch_norm__725, + batch_norm__726, + batch_norm__727, + batch_norm__728, + batch_norm__729, + batch_norm__73, + batch_norm__730, + batch_norm__731, + batch_norm__732, + batch_norm__733, + batch_norm__734, + batch_norm__735, + batch_norm__736, + batch_norm__737, + batch_norm__738, + batch_norm__739, + batch_norm__74, + batch_norm__740, + batch_norm__741, + batch_norm__742, + batch_norm__743, + batch_norm__744, + batch_norm__745, + batch_norm__746, + batch_norm__747, + batch_norm__748, + batch_norm__749, + batch_norm__75, + batch_norm__750, + batch_norm__751, + batch_norm__752, + batch_norm__753, + batch_norm__754, + batch_norm__755, + batch_norm__756, + batch_norm__757, + batch_norm__758, + batch_norm__759, + batch_norm__76, + batch_norm__760, + batch_norm__761, + batch_norm__762, + batch_norm__763, + batch_norm__764, + batch_norm__765, + batch_norm__766, + batch_norm__767, + batch_norm__768, + batch_norm__769, + batch_norm__77, + batch_norm__770, + batch_norm__771, + batch_norm__772, + batch_norm__773, + batch_norm__774, + batch_norm__775, + batch_norm__776, + batch_norm__777, + batch_norm__778, + batch_norm__779, + batch_norm__78, + batch_norm__780, + batch_norm__781, + batch_norm__782, + batch_norm__783, + batch_norm__784, + batch_norm__785, + batch_norm__786, + batch_norm__787, + batch_norm__788, + batch_norm__789, + batch_norm__79, + batch_norm__790, + batch_norm__791, + batch_norm__792, + batch_norm__793, + batch_norm__794, + batch_norm__795, + batch_norm__796, + batch_norm__797, + batch_norm__798, + batch_norm__799, + batch_norm__8, + batch_norm__80, + batch_norm__800, + batch_norm__801, + batch_norm__802, + batch_norm__803, + batch_norm__804, + batch_norm__805, + batch_norm__806, + batch_norm__807, + batch_norm__808, + batch_norm__809, + batch_norm__81, + batch_norm__810, + batch_norm__811, + batch_norm__812, + batch_norm__813, + batch_norm__814, + batch_norm__815, + batch_norm__816, + batch_norm__817, + batch_norm__818, + batch_norm__819, + batch_norm__82, + batch_norm__820, + batch_norm__821, + batch_norm__822, + batch_norm__823, + batch_norm__824, + batch_norm__825, + batch_norm__826, + batch_norm__827, + batch_norm__83, + batch_norm__84, + batch_norm__85, + batch_norm__86, + batch_norm__87, + batch_norm__88, + batch_norm__89, + batch_norm__9, + batch_norm__90, + batch_norm__91, + batch_norm__92, + batch_norm__93, + batch_norm__94, + batch_norm__95, + batch_norm__96, + batch_norm__97, + batch_norm__98, + batch_norm__99, + concat_0, + concat_1, + concat_10, + concat_11, + concat_12, + concat_13, + concat_14, + concat_2, + concat_3, + concat_5, + concat_6, + concat_7, + concat_8, + concat_9, + conv2d_0, + conv2d_1, + conv2d_10, + conv2d_100, + conv2d_101, + conv2d_102, + conv2d_103, + conv2d_104, + conv2d_105, + conv2d_106, + conv2d_107, + conv2d_108, + conv2d_109, + conv2d_11, + conv2d_110, + conv2d_111, + conv2d_112, + conv2d_113, + conv2d_114, + conv2d_115, + conv2d_116, + conv2d_117, + conv2d_118, + conv2d_119, + conv2d_12, + conv2d_120, + conv2d_121, + conv2d_122, + conv2d_123, + conv2d_124, + conv2d_125, + conv2d_126, + conv2d_127, + conv2d_128, + conv2d_129, + conv2d_13, + conv2d_130, + conv2d_131, + conv2d_132, + conv2d_133, + conv2d_134, + conv2d_135, + conv2d_136, + conv2d_137, + conv2d_138, + conv2d_139, + conv2d_14, + conv2d_140, + conv2d_141, + conv2d_15, + conv2d_16, + conv2d_17, + conv2d_18, + conv2d_19, + conv2d_2, + conv2d_20, + conv2d_21, + conv2d_22, + conv2d_23, + conv2d_24, + conv2d_25, + conv2d_26, + conv2d_27, + conv2d_28, + conv2d_29, + conv2d_3, + conv2d_30, + conv2d_31, + conv2d_32, + conv2d_33, + conv2d_34, + conv2d_35, + conv2d_36, + conv2d_37, + conv2d_38, + conv2d_39, + conv2d_4, + conv2d_40, + conv2d_41, + conv2d_42, + conv2d_43, + conv2d_44, + conv2d_45, + conv2d_46, + conv2d_47, + conv2d_48, + conv2d_49, + conv2d_5, + conv2d_50, + conv2d_51, + conv2d_52, + conv2d_53, + conv2d_54, + conv2d_55, + conv2d_56, + conv2d_57, + conv2d_58, + conv2d_59, + conv2d_6, + conv2d_60, + conv2d_61, + conv2d_62, + conv2d_63, + conv2d_64, + conv2d_65, + conv2d_66, + conv2d_67, + conv2d_68, + conv2d_69, + conv2d_7, + conv2d_70, + conv2d_71, + conv2d_72, + conv2d_73, + conv2d_74, + conv2d_75, + conv2d_76, + conv2d_77, + conv2d_78, + conv2d_79, + conv2d_8, + conv2d_80, + conv2d_81, + conv2d_82, + conv2d_83, + conv2d_84, + conv2d_85, + conv2d_86, + conv2d_87, + conv2d_88, + conv2d_89, + conv2d_9, + conv2d_90, + conv2d_91, + conv2d_92, + conv2d_93, + conv2d_94, + conv2d_95, + conv2d_96, + conv2d_97, + conv2d_98, + conv2d_99, + dropout_0, + dropout_1, + dropout_10, + dropout_11, + dropout_12, + dropout_13, + dropout_14, + dropout_15, + dropout_16, + dropout_17, + dropout_18, + dropout_19, + dropout_2, + dropout_20, + dropout_21, + dropout_22, + dropout_23, + dropout_24, + dropout_25, + dropout_26, + dropout_27, + dropout_28, + dropout_29, + dropout_3, + dropout_30, + dropout_31, + dropout_4, + dropout_5, + dropout_6, + dropout_7, + dropout_8, + dropout_9, + full_0, + full_8, + full_9, + full_int_array_0, + full_int_array_10, + full_int_array_11, + full_int_array_12, + full_int_array_4, + full_int_array_6, + full_int_array_7, + hardsigmoid_0, + hardsigmoid_1, + hardsigmoid_2, + hardsigmoid_3, + layer_norm_0, + layer_norm_1, + layer_norm_10, + layer_norm_11, + layer_norm_12, + layer_norm_13, + layer_norm_14, + layer_norm_15, + layer_norm_16, + layer_norm_17, + layer_norm_18, + layer_norm_19, + layer_norm_2, + layer_norm_20, + layer_norm_22, + layer_norm_23, + layer_norm_3, + layer_norm_4, + layer_norm_5, + layer_norm_6, + layer_norm_7, + layer_norm_8, + layer_norm_9, + matmul_10, + matmul_11, + matmul_12, + matmul_15, + matmul_16, + matmul_17, + matmul_18, + matmul_19, + matmul_2, + matmul_20, + matmul_23, + matmul_24, + matmul_25, + matmul_26, + matmul_27, + matmul_28, + matmul_3, + matmul_31, + matmul_32, + matmul_33, + matmul_4, + matmul_7, + matmul_8, + matmul_9, + mean_0, + mean_1, + mean_2, + mean_3, + multiply_0, + multiply_1, + multiply_10, + multiply_11, + multiply_12, + multiply_13, + multiply_14, + multiply_15, + multiply_16, + multiply_17, + multiply_18, + multiply_19, + multiply_2, + multiply_20, + multiply_21, + multiply_3, + multiply_4, + multiply_5, + multiply_6, + multiply_7, + multiply_8, + multiply_9, + nearest_interp_0, + nearest_interp_1, + pool2d_0, + pool2d_1, + pool2d_2, + reshape_0, + reshape_1, + reshape_11, + reshape_15, + reshape_19, + reshape_2, + reshape_20, + reshape_3, + reshape_7, + slice_0, + slice_1, + slice_10, + slice_11, + slice_12, + slice_13, + slice_14, + slice_15, + slice_16, + slice_17, + slice_18, + slice_19, + slice_2, + slice_20, + slice_21, + slice_22, + slice_23, + slice_3, + slice_4, + slice_5, + slice_6, + slice_7, + slice_8, + slice_9, + softmax_0, + softmax_1, + softmax_2, + softmax_3, + swish_1, + swish_10, + swish_100, + swish_101, + swish_102, + swish_103, + swish_104, + swish_11, + swish_12, + swish_13, + swish_14, + swish_15, + swish_16, + swish_17, + swish_18, + swish_19, + swish_2, + swish_20, + swish_21, + swish_22, + swish_23, + swish_24, + swish_25, + swish_26, + swish_27, + swish_28, + swish_29, + swish_3, + swish_30, + swish_31, + swish_32, + swish_33, + swish_34, + swish_35, + swish_36, + swish_37, + swish_38, + swish_39, + swish_4, + swish_40, + swish_41, + swish_42, + swish_43, + swish_44, + swish_45, + swish_46, + swish_47, + swish_48, + swish_49, + swish_5, + swish_50, + swish_51, + swish_52, + swish_53, + swish_54, + swish_55, + swish_56, + swish_57, + swish_58, + swish_59, + swish_6, + swish_60, + swish_61, + swish_62, + swish_63, + swish_64, + swish_65, + swish_66, + swish_67, + swish_68, + swish_69, + swish_7, + swish_70, + swish_71, + swish_72, + swish_73, + swish_74, + swish_75, + swish_76, + swish_77, + swish_78, + swish_79, + swish_8, + swish_80, + swish_81, + swish_82, + swish_83, + swish_84, + swish_85, + swish_86, + swish_87, + swish_88, + swish_89, + swish_9, + swish_90, + swish_91, + swish_92, + swish_93, + swish_94, + swish_95, + swish_96, + swish_97, + swish_98, + swish_99, + transpose_0, + transpose_1, + transpose_10, + transpose_11, + transpose_12, + transpose_13, + transpose_14, + transpose_15, + transpose_16, + transpose_17, + transpose_2, + transpose_3, + transpose_4, + transpose_5, + transpose_6, + transpose_7, + transpose_8, + transpose_9, + unsqueeze_3, + ) + + return swish_0 diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/subgraph_12/weight_meta.py b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/subgraph_12/weight_meta.py new file mode 100644 index 000000000..5dcac1a4a --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/subgraph_12/weight_meta.py @@ -0,0 +1,8004 @@ +class Program_weight_tensor_parameter_0: + name = "parameter_0" + shape = [768] + dtype = "float32" + min_val = float("-0.175929") + max_val = float("0.21086") + mean = float("0.0834788") + std = float("0.0566121") + data = None + + +class Program_weight_tensor_parameter_1: + name = "parameter_1" + shape = [768] + dtype = "float32" + min_val = float("0.939955") + max_val = float("1.29826") + mean = float("1.064") + std = float("0.031232") + data = None + + +class Program_weight_tensor_parameter_2: + name = "parameter_2" + shape = [768] + dtype = "float32" + min_val = float("0.00130768") + max_val = float("0.0463603") + mean = float("0.00625725") + std = float("0.00393263") + data = None + + +class Program_weight_tensor_parameter_3: + name = "parameter_3" + shape = [768] + dtype = "float32" + min_val = float("-0.127586") + max_val = float("0.0517413") + mean = float("-0.02654") + std = float("0.0270736") + data = None + + +class Program_weight_tensor_parameter_4: + name = "parameter_4" + shape = [768, 768, 1, 1] + dtype = "float32" + min_val = float("-0.0515626") + max_val = float("0.0379063") + mean = float("-0.00014164") + std = float("0.00240107") + data = None + + +class Program_weight_tensor_parameter_5: + name = "parameter_5" + shape = [384] + dtype = "float32" + min_val = float("-0.141651") + max_val = float("0.0305715") + mean = float("-0.0187927") + std = float("0.0234486") + data = None + + +class Program_weight_tensor_parameter_6: + name = "parameter_6" + shape = [384] + dtype = "float32" + min_val = float("0.945806") + max_val = float("1.04446") + mean = float("0.986675") + std = float("0.0105808") + data = None + + +class Program_weight_tensor_parameter_7: + name = "parameter_7" + shape = [384] + dtype = "float32" + min_val = float("0.000827797") + max_val = float("0.0161073") + mean = float("0.00424179") + std = float("0.00259626") + data = None + + +class Program_weight_tensor_parameter_8: + name = "parameter_8" + shape = [384] + dtype = "float32" + min_val = float("-0.0553707") + max_val = float("0.0596015") + mean = float("0.00271009") + std = float("0.0216885") + data = None + + +class Program_weight_tensor_parameter_9: + name = "parameter_9" + shape = [384, 384, 1, 1] + dtype = "float32" + min_val = float("-0.0308331") + max_val = float("0.0199146") + mean = float("2.09539e-05") + std = float("0.00184746") + data = None + + +class Program_weight_tensor_parameter_10: + name = "parameter_10" + shape = [384] + dtype = "float32" + min_val = float("-0.141651") + max_val = float("0.0305715") + mean = float("-0.0187927") + std = float("0.0234486") + data = None + + +class Program_weight_tensor_parameter_11: + name = "parameter_11" + shape = [384] + dtype = "float32" + min_val = float("0.968047") + max_val = float("1.13059") + mean = float("1.01542") + std = float("0.0171839") + data = None + + +class Program_weight_tensor_parameter_12: + name = "parameter_12" + shape = [384] + dtype = "float32" + min_val = float("0.00219862") + max_val = float("0.0391499") + mean = float("0.00725225") + std = float("0.00432626") + data = None + + +class Program_weight_tensor_parameter_13: + name = "parameter_13" + shape = [384] + dtype = "float32" + min_val = float("-0.173733") + max_val = float("0.12517") + mean = float("-0.0400312") + std = float("0.0316991") + data = None + + +class Program_weight_tensor_parameter_14: + name = "parameter_14" + shape = [384, 384, 3, 3] + dtype = "float32" + min_val = float("-0.0283367") + max_val = float("0.0330588") + mean = float("-7.3825e-05") + std = float("0.00125417") + data = None + + +class Program_weight_tensor_parameter_15: + name = "parameter_15" + shape = [384] + dtype = "float32" + min_val = float("-0.170186") + max_val = float("0.0209452") + mean = float("-0.0348788") + std = float("0.0279259") + data = None + + +class Program_weight_tensor_parameter_16: + name = "parameter_16" + shape = [384] + dtype = "float32" + min_val = float("0.975256") + max_val = float("1.12591") + mean = float("1.01501") + std = float("0.0240755") + data = None + + +class Program_weight_tensor_parameter_17: + name = "parameter_17" + shape = [384] + dtype = "float32" + min_val = float("0.00658639") + max_val = float("0.20726") + mean = float("0.0257203") + std = float("0.0173571") + data = None + + +class Program_weight_tensor_parameter_18: + name = "parameter_18" + shape = [384] + dtype = "float32" + min_val = float("-0.245253") + max_val = float("0.416056") + mean = float("-0.0421834") + std = float("0.0522217") + data = None + + +class Program_weight_tensor_parameter_19: + name = "parameter_19" + shape = [384, 384, 3, 3] + dtype = "float32" + min_val = float("-0.0311751") + max_val = float("0.0503276") + mean = float("-6.05092e-05") + std = float("0.00141653") + data = None + + +class Program_weight_tensor_parameter_20: + name = "parameter_20" + shape = [384] + dtype = "float32" + min_val = float("-0.105187") + max_val = float("0.0129827") + mean = float("-0.0357886") + std = float("0.0193112") + data = None + + +class Program_weight_tensor_parameter_21: + name = "parameter_21" + shape = [384] + dtype = "float32" + min_val = float("0.945568") + max_val = float("1.0451") + mean = float("0.98866") + std = float("0.00984887") + data = None + + +class Program_weight_tensor_parameter_22: + name = "parameter_22" + shape = [384] + dtype = "float32" + min_val = float("0.000652651") + max_val = float("0.0220653") + mean = float("0.00339318") + std = float("0.00205907") + data = None + + +class Program_weight_tensor_parameter_23: + name = "parameter_23" + shape = [384] + dtype = "float32" + min_val = float("-0.077936") + max_val = float("0.0409903") + mean = float("-0.0020853") + std = float("0.01599") + data = None + + +class Program_weight_tensor_parameter_24: + name = "parameter_24" + shape = [384, 384, 1, 1] + dtype = "float32" + min_val = float("-0.0260426") + max_val = float("0.0248082") + mean = float("-5.14195e-05") + std = float("0.00195167") + data = None + + +class Program_weight_tensor_parameter_25: + name = "parameter_25" + shape = [384] + dtype = "float32" + min_val = float("-0.105187") + max_val = float("0.0129827") + mean = float("-0.0357886") + std = float("0.0193112") + data = None + + +class Program_weight_tensor_parameter_26: + name = "parameter_26" + shape = [384] + dtype = "float32" + min_val = float("0.959552") + max_val = float("1.10507") + mean = float("1.01607") + std = float("0.0177491") + data = None + + +class Program_weight_tensor_parameter_27: + name = "parameter_27" + shape = [384] + dtype = "float32" + min_val = float("0.0024964") + max_val = float("0.0250836") + mean = float("0.00780127") + std = float("0.00352904") + data = None + + +class Program_weight_tensor_parameter_28: + name = "parameter_28" + shape = [384] + dtype = "float32" + min_val = float("-0.160103") + max_val = float("0.245929") + mean = float("-0.0452978") + std = float("0.0365583") + data = None + + +class Program_weight_tensor_parameter_29: + name = "parameter_29" + shape = [384, 384, 3, 3] + dtype = "float32" + min_val = float("-0.034241") + max_val = float("0.0490696") + mean = float("-8.25271e-05") + std = float("0.00126683") + data = None + + +class Program_weight_tensor_parameter_30: + name = "parameter_30" + shape = [384] + dtype = "float32" + min_val = float("-0.0896542") + max_val = float("0.0192769") + mean = float("-0.036069") + std = float("0.0194634") + data = None + + +class Program_weight_tensor_parameter_31: + name = "parameter_31" + shape = [384] + dtype = "float32" + min_val = float("0.933175") + max_val = float("1.1146") + mean = float("1.01167") + std = float("0.0265813") + data = None + + +class Program_weight_tensor_parameter_32: + name = "parameter_32" + shape = [384] + dtype = "float32" + min_val = float("0.00448167") + max_val = float("0.0632357") + mean = float("0.0163368") + std = float("0.00906915") + data = None + + +class Program_weight_tensor_parameter_33: + name = "parameter_33" + shape = [384] + dtype = "float32" + min_val = float("-0.168694") + max_val = float("0.0937087") + mean = float("-0.01934") + std = float("0.0451176") + data = None + + +class Program_weight_tensor_parameter_34: + name = "parameter_34" + shape = [384, 384, 3, 3] + dtype = "float32" + min_val = float("-0.0389109") + max_val = float("0.0474721") + mean = float("-5.22495e-05") + std = float("0.00144713") + data = None + + +class Program_weight_tensor_parameter_35: + name = "parameter_35" + shape = [384] + dtype = "float32" + min_val = float("-0.116304") + max_val = float("0.016211") + mean = float("-0.0373544") + std = float("0.0201432") + data = None + + +class Program_weight_tensor_parameter_36: + name = "parameter_36" + shape = [384] + dtype = "float32" + min_val = float("0.929317") + max_val = float("1.02782") + mean = float("0.987068") + std = float("0.0110352") + data = None + + +class Program_weight_tensor_parameter_37: + name = "parameter_37" + shape = [384] + dtype = "float32" + min_val = float("0.00119145") + max_val = float("0.0108677") + mean = float("0.00443524") + std = float("0.00164967") + data = None + + +class Program_weight_tensor_parameter_38: + name = "parameter_38" + shape = [384] + dtype = "float32" + min_val = float("-0.0535005") + max_val = float("0.0365936") + mean = float("-0.0073878") + std = float("0.0125324") + data = None + + +class Program_weight_tensor_parameter_39: + name = "parameter_39" + shape = [384, 384, 1, 1] + dtype = "float32" + min_val = float("-0.037048") + max_val = float("0.0266802") + mean = float("-0.000137547") + std = float("0.00195837") + data = None + + +class Program_weight_tensor_parameter_40: + name = "parameter_40" + shape = [384] + dtype = "float32" + min_val = float("-0.116304") + max_val = float("0.016211") + mean = float("-0.0373544") + std = float("0.0201432") + data = None + + +class Program_weight_tensor_parameter_41: + name = "parameter_41" + shape = [384] + dtype = "float32" + min_val = float("0.98123") + max_val = float("1.10689") + mean = float("1.01832") + std = float("0.0222072") + data = None + + +class Program_weight_tensor_parameter_42: + name = "parameter_42" + shape = [384] + dtype = "float32" + min_val = float("0.00438825") + max_val = float("0.0317931") + mean = float("0.0111305") + std = float("0.00497164") + data = None + + +class Program_weight_tensor_parameter_43: + name = "parameter_43" + shape = [384] + dtype = "float32" + min_val = float("-0.158148") + max_val = float("0.0963003") + mean = float("-0.0215295") + std = float("0.032226") + data = None + + +class Program_weight_tensor_parameter_44: + name = "parameter_44" + shape = [384, 384, 3, 3] + dtype = "float32" + min_val = float("-0.0342558") + max_val = float("0.0596347") + mean = float("-4.36682e-05") + std = float("0.00131956") + data = None + + +class Program_weight_tensor_parameter_45: + name = "parameter_45" + shape = [384] + dtype = "float32" + min_val = float("-0.10708") + max_val = float("0.0239013") + mean = float("-0.0375156") + std = float("0.0214475") + data = None + + +class Program_weight_tensor_parameter_46: + name = "parameter_46" + shape = [384] + dtype = "float32" + min_val = float("0.944782") + max_val = float("1.11463") + mean = float("1.01186") + std = float("0.0277861") + data = None + + +class Program_weight_tensor_parameter_47: + name = "parameter_47" + shape = [384] + dtype = "float32" + min_val = float("0.00510026") + max_val = float("0.073565") + mean = float("0.0136809") + std = float("0.00702878") + data = None + + +class Program_weight_tensor_parameter_48: + name = "parameter_48" + shape = [384] + dtype = "float32" + min_val = float("-0.145833") + max_val = float("0.12243") + mean = float("-0.0419585") + std = float("0.0460457") + data = None + + +class Program_weight_tensor_parameter_49: + name = "parameter_49" + shape = [384, 384, 3, 3] + dtype = "float32" + min_val = float("-0.0265761") + max_val = float("0.0412318") + mean = float("-7.48799e-05") + std = float("0.00147167") + data = None + + +class Program_weight_tensor_parameter_50: + name = "parameter_50" + shape = [384] + dtype = "float32" + min_val = float("-0.106796") + max_val = float("0.0466792") + mean = float("-0.0263049") + std = float("0.0154085") + data = None + + +class Program_weight_tensor_parameter_51: + name = "parameter_51" + shape = [384] + dtype = "float32" + min_val = float("0.973685") + max_val = float("1.08651") + mean = float("1.00904") + std = float("0.0171201") + data = None + + +class Program_weight_tensor_parameter_52: + name = "parameter_52" + shape = [384] + dtype = "float32" + min_val = float("0.00211342") + max_val = float("0.0171623") + mean = float("0.00491392") + std = float("0.00189255") + data = None + + +class Program_weight_tensor_parameter_53: + name = "parameter_53" + shape = [384] + dtype = "float32" + min_val = float("-0.0938724") + max_val = float("0.0726207") + mean = float("-0.0169301") + std = float("0.0255402") + data = None + + +class Program_weight_tensor_parameter_54: + name = "parameter_54" + shape = [384, 1152, 1, 1] + dtype = "float32" + min_val = float("-0.0600923") + max_val = float("0.0698518") + mean = float("-7.83959e-05") + std = float("0.00221115") + data = None + + +class Program_weight_tensor_parameter_55: + name = "parameter_55" + shape = [384] + dtype = "float32" + min_val = float("-0.0425267") + max_val = float("0.0160945") + mean = float("-0.00899786") + std = float("0.00841522") + data = None + + +class Program_weight_tensor_parameter_56: + name = "parameter_56" + shape = [384] + dtype = "float32" + min_val = float("0.959381") + max_val = float("1.05138") + mean = float("1.0079") + std = float("0.0115872") + data = None + + +class Program_weight_tensor_parameter_57: + name = "parameter_57" + shape = [384] + dtype = "float32" + min_val = float("0.00177107") + max_val = float("0.0283498") + mean = float("0.00429961") + std = float("0.00192508") + data = None + + +class Program_weight_tensor_parameter_58: + name = "parameter_58" + shape = [384] + dtype = "float32" + min_val = float("-0.101028") + max_val = float("0.097759") + mean = float("-0.0218202") + std = float("0.0240243") + data = None + + +class Program_weight_tensor_parameter_59: + name = "parameter_59" + shape = [384, 1152, 1, 1] + dtype = "float32" + min_val = float("-0.0252393") + max_val = float("0.0409905") + mean = float("-0.000104687") + std = float("0.00201567") + data = None + + +class Program_weight_tensor_parameter_60: + name = "parameter_60" + shape = [384] + dtype = "float32" + min_val = float("-0.0530202") + max_val = float("0.00596341") + mean = float("-0.0166175") + std = float("0.00987673") + data = None + + +class Program_weight_tensor_parameter_61: + name = "parameter_61" + shape = [384] + dtype = "float32" + min_val = float("0.988638") + max_val = float("1.10406") + mean = float("1.0196") + std = float("0.0169012") + data = None + + +class Program_weight_tensor_parameter_62: + name = "parameter_62" + shape = [384] + dtype = "float32" + min_val = float("0.00403058") + max_val = float("0.0513783") + mean = float("0.0126843") + std = float("0.00717747") + data = None + + +class Program_weight_tensor_parameter_63: + name = "parameter_63" + shape = [384] + dtype = "float32" + min_val = float("-0.365522") + max_val = float("0.198026") + mean = float("-0.0414412") + std = float("0.0639995") + data = None + + +class Program_weight_tensor_parameter_64: + name = "parameter_64" + shape = [384, 384, 3, 3] + dtype = "float32" + min_val = float("-0.0201867") + max_val = float("0.0314514") + mean = float("-3.01993e-05") + std = float("0.00114789") + data = None + + +class Program_weight_tensor_parameter_65: + name = "parameter_65" + shape = [384] + dtype = "float32" + min_val = float("-0.22273") + max_val = float("0.49215") + mean = float("0.217192") + std = float("0.124233") + data = None + + +class Program_weight_tensor_parameter_66: + name = "parameter_66" + shape = [384] + dtype = "float32" + min_val = float("0.919294") + max_val = float("1.48063") + mean = float("1.14128") + std = float("0.0737757") + data = None + + +class Program_weight_tensor_parameter_67: + name = "parameter_67" + shape = [384] + dtype = "float32" + min_val = float("0.00389442") + max_val = float("0.0590123") + mean = float("0.011774") + std = float("0.00590078") + data = None + + +class Program_weight_tensor_parameter_68: + name = "parameter_68" + shape = [384] + dtype = "float32" + min_val = float("-0.153266") + max_val = float("0.0818695") + mean = float("-0.0278176") + std = float("0.0324133") + data = None + + +class Program_weight_tensor_parameter_69: + name = "parameter_69" + shape = [384, 384, 1, 1] + dtype = "float32" + min_val = float("-0.0888812") + max_val = float("0.0966329") + mean = float("-0.000338059") + std = float("0.00511266") + data = None + + +class Program_weight_tensor_parameter_70: + name = "parameter_70" + shape = [192] + dtype = "float32" + min_val = float("-0.166124") + max_val = float("0.0467039") + mean = float("-0.0250411") + std = float("0.0394646") + data = None + + +class Program_weight_tensor_parameter_71: + name = "parameter_71" + shape = [192] + dtype = "float32" + min_val = float("0.84107") + max_val = float("1.05105") + mean = float("0.97282") + std = float("0.0237598") + data = None + + +class Program_weight_tensor_parameter_72: + name = "parameter_72" + shape = [192] + dtype = "float32" + min_val = float("0.00162489") + max_val = float("0.0270337") + mean = float("0.00519677") + std = float("0.00308321") + data = None + + +class Program_weight_tensor_parameter_73: + name = "parameter_73" + shape = [192] + dtype = "float32" + min_val = float("-0.0629501") + max_val = float("0.0796146") + mean = float("-0.00468793") + std = float("0.0191205") + data = None + + +class Program_weight_tensor_parameter_74: + name = "parameter_74" + shape = [192, 192, 1, 1] + dtype = "float32" + min_val = float("-0.0448536") + max_val = float("0.0336517") + mean = float("-0.000157415") + std = float("0.00377073") + data = None + + +class Program_weight_tensor_parameter_75: + name = "parameter_75" + shape = [192] + dtype = "float32" + min_val = float("-0.166124") + max_val = float("0.0467039") + mean = float("-0.0250411") + std = float("0.0394646") + data = None + + +class Program_weight_tensor_parameter_76: + name = "parameter_76" + shape = [192] + dtype = "float32" + min_val = float("0.729345") + max_val = float("1.12261") + mean = float("1.02194") + std = float("0.0372571") + data = None + + +class Program_weight_tensor_parameter_77: + name = "parameter_77" + shape = [192] + dtype = "float32" + min_val = float("0.00562341") + max_val = float("0.0781491") + mean = float("0.0170834") + std = float("0.0090384") + data = None + + +class Program_weight_tensor_parameter_78: + name = "parameter_78" + shape = [192] + dtype = "float32" + min_val = float("-0.191415") + max_val = float("0.0874893") + mean = float("-0.0410796") + std = float("0.042934") + data = None + + +class Program_weight_tensor_parameter_79: + name = "parameter_79" + shape = [192, 192, 3, 3] + dtype = "float32" + min_val = float("-0.0346474") + max_val = float("0.0452792") + mean = float("-0.00013752") + std = float("0.00244921") + data = None + + +class Program_weight_tensor_parameter_80: + name = "parameter_80" + shape = [192] + dtype = "float32" + min_val = float("-0.191424") + max_val = float("0.0441491") + mean = float("-0.0580252") + std = float("0.0490538") + data = None + + +class Program_weight_tensor_parameter_81: + name = "parameter_81" + shape = [192] + dtype = "float32" + min_val = float("0.897189") + max_val = float("1.18714") + mean = float("1.01553") + std = float("0.048456") + data = None + + +class Program_weight_tensor_parameter_82: + name = "parameter_82" + shape = [192] + dtype = "float32" + min_val = float("0.0130131") + max_val = float("0.155912") + mean = float("0.0355598") + std = float("0.0183742") + data = None + + +class Program_weight_tensor_parameter_83: + name = "parameter_83" + shape = [192] + dtype = "float32" + min_val = float("-0.322411") + max_val = float("0.461446") + mean = float("-0.0402579") + std = float("0.0600098") + data = None + + +class Program_weight_tensor_parameter_84: + name = "parameter_84" + shape = [192, 192, 3, 3] + dtype = "float32" + min_val = float("-0.0436326") + max_val = float("0.0759463") + mean = float("-0.00010151") + std = float("0.00273793") + data = None + + +class Program_weight_tensor_parameter_85: + name = "parameter_85" + shape = [192] + dtype = "float32" + min_val = float("-0.191731") + max_val = float("0.00856722") + mean = float("-0.0642182") + std = float("0.033376") + data = None + + +class Program_weight_tensor_parameter_86: + name = "parameter_86" + shape = [192] + dtype = "float32" + min_val = float("0.922072") + max_val = float("1.04657") + mean = float("0.97362") + std = float("0.017993") + data = None + + +class Program_weight_tensor_parameter_87: + name = "parameter_87" + shape = [192] + dtype = "float32" + min_val = float("0.00120452") + max_val = float("0.0109813") + mean = float("0.00399435") + std = float("0.00161666") + data = None + + +class Program_weight_tensor_parameter_88: + name = "parameter_88" + shape = [192] + dtype = "float32" + min_val = float("-0.0561207") + max_val = float("0.0353681") + mean = float("-0.00713616") + std = float("0.0144372") + data = None + + +class Program_weight_tensor_parameter_89: + name = "parameter_89" + shape = [192, 192, 1, 1] + dtype = "float32" + min_val = float("-0.0389677") + max_val = float("0.0317083") + mean = float("-0.000327359") + std = float("0.00370864") + data = None + + +class Program_weight_tensor_parameter_90: + name = "parameter_90" + shape = [192] + dtype = "float32" + min_val = float("-0.191731") + max_val = float("0.00856722") + mean = float("-0.0642182") + std = float("0.033376") + data = None + + +class Program_weight_tensor_parameter_91: + name = "parameter_91" + shape = [192] + dtype = "float32" + min_val = float("0.967917") + max_val = float("1.14773") + mean = float("1.02404") + std = float("0.0293683") + data = None + + +class Program_weight_tensor_parameter_92: + name = "parameter_92" + shape = [192] + dtype = "float32" + min_val = float("0.00374803") + max_val = float("0.0495216") + mean = float("0.010846") + std = float("0.00639043") + data = None + + +class Program_weight_tensor_parameter_93: + name = "parameter_93" + shape = [192] + dtype = "float32" + min_val = float("-0.141797") + max_val = float("0.12988") + mean = float("-0.0331827") + std = float("0.0341888") + data = None + + +class Program_weight_tensor_parameter_94: + name = "parameter_94" + shape = [192, 192, 3, 3] + dtype = "float32" + min_val = float("-0.0433361") + max_val = float("0.0521915") + mean = float("-0.000128374") + std = float("0.00248482") + data = None + + +class Program_weight_tensor_parameter_95: + name = "parameter_95" + shape = [192] + dtype = "float32" + min_val = float("-0.188937") + max_val = float("0.0617064") + mean = float("-0.0755865") + std = float("0.0405704") + data = None + + +class Program_weight_tensor_parameter_96: + name = "parameter_96" + shape = [192] + dtype = "float32" + min_val = float("0.88236") + max_val = float("1.21791") + mean = float("1.01474") + std = float("0.050711") + data = None + + +class Program_weight_tensor_parameter_97: + name = "parameter_97" + shape = [192] + dtype = "float32" + min_val = float("0.00731951") + max_val = float("0.0706393") + mean = float("0.0229053") + std = float("0.0124686") + data = None + + +class Program_weight_tensor_parameter_98: + name = "parameter_98" + shape = [192] + dtype = "float32" + min_val = float("-0.105024") + max_val = float("0.045449") + mean = float("-0.0227613") + std = float("0.0300699") + data = None + + +class Program_weight_tensor_parameter_99: + name = "parameter_99" + shape = [192, 192, 3, 3] + dtype = "float32" + min_val = float("-0.0453344") + max_val = float("0.0804613") + mean = float("-0.000102284") + std = float("0.00285929") + data = None + + +class Program_weight_tensor_parameter_100: + name = "parameter_100" + shape = [192] + dtype = "float32" + min_val = float("-0.229338") + max_val = float("-0.0102477") + mean = float("-0.0831807") + std = float("0.0422279") + data = None + + +class Program_weight_tensor_parameter_101: + name = "parameter_101" + shape = [192] + dtype = "float32" + min_val = float("0.900655") + max_val = float("1.0279") + mean = float("0.975271") + std = float("0.0229719") + data = None + + +class Program_weight_tensor_parameter_102: + name = "parameter_102" + shape = [192] + dtype = "float32" + min_val = float("0.00158349") + max_val = float("0.0138258") + mean = float("0.00506183") + std = float("0.00178933") + data = None + + +class Program_weight_tensor_parameter_103: + name = "parameter_103" + shape = [192] + dtype = "float32" + min_val = float("-0.0350209") + max_val = float("0.0440672") + mean = float("-0.00863895") + std = float("0.0159988") + data = None + + +class Program_weight_tensor_parameter_104: + name = "parameter_104" + shape = [192, 192, 1, 1] + dtype = "float32" + min_val = float("-0.0414024") + max_val = float("0.072312") + mean = float("-0.000424235") + std = float("0.00418287") + data = None + + +class Program_weight_tensor_parameter_105: + name = "parameter_105" + shape = [192] + dtype = "float32" + min_val = float("-0.229338") + max_val = float("-0.0102477") + mean = float("-0.0831807") + std = float("0.0422279") + data = None + + +class Program_weight_tensor_parameter_106: + name = "parameter_106" + shape = [192] + dtype = "float32" + min_val = float("0.947228") + max_val = float("1.11076") + mean = float("1.02102") + std = float("0.0305612") + data = None + + +class Program_weight_tensor_parameter_107: + name = "parameter_107" + shape = [192] + dtype = "float32" + min_val = float("0.00636548") + max_val = float("0.0561151") + mean = float("0.0150209") + std = float("0.00712578") + data = None + + +class Program_weight_tensor_parameter_108: + name = "parameter_108" + shape = [192] + dtype = "float32" + min_val = float("-0.12433") + max_val = float("0.09672") + mean = float("-0.0168594") + std = float("0.0346152") + data = None + + +class Program_weight_tensor_parameter_109: + name = "parameter_109" + shape = [192, 192, 3, 3] + dtype = "float32" + min_val = float("-0.0444124") + max_val = float("0.0493834") + mean = float("-7.49687e-05") + std = float("0.00263663") + data = None + + +class Program_weight_tensor_parameter_110: + name = "parameter_110" + shape = [192] + dtype = "float32" + min_val = float("-0.234043") + max_val = float("0.0809248") + mean = float("-0.0946909") + std = float("0.0462546") + data = None + + +class Program_weight_tensor_parameter_111: + name = "parameter_111" + shape = [192] + dtype = "float32" + min_val = float("0.886425") + max_val = float("1.20415") + mean = float("1.01671") + std = float("0.0539421") + data = None + + +class Program_weight_tensor_parameter_112: + name = "parameter_112" + shape = [192] + dtype = "float32" + min_val = float("0.00720534") + max_val = float("0.0816429") + mean = float("0.0188148") + std = float("0.0102621") + data = None + + +class Program_weight_tensor_parameter_113: + name = "parameter_113" + shape = [192] + dtype = "float32" + min_val = float("-0.162095") + max_val = float("0.0789193") + mean = float("-0.0380274") + std = float("0.039478") + data = None + + +class Program_weight_tensor_parameter_114: + name = "parameter_114" + shape = [192, 192, 3, 3] + dtype = "float32" + min_val = float("-0.0388947") + max_val = float("0.087211") + mean = float("-0.000134346") + std = float("0.00310298") + data = None + + +class Program_weight_tensor_parameter_115: + name = "parameter_115" + shape = [192] + dtype = "float32" + min_val = float("-0.200195") + max_val = float("0.0157584") + mean = float("-0.0662765") + std = float("0.0312062") + data = None + + +class Program_weight_tensor_parameter_116: + name = "parameter_116" + shape = [192] + dtype = "float32" + min_val = float("0.925347") + max_val = float("1.15235") + mean = float("1.0133") + std = float("0.0383988") + data = None + + +class Program_weight_tensor_parameter_117: + name = "parameter_117" + shape = [192] + dtype = "float32" + min_val = float("0.00390106") + max_val = float("0.0290639") + mean = float("0.00837401") + std = float("0.00327435") + data = None + + +class Program_weight_tensor_parameter_118: + name = "parameter_118" + shape = [192] + dtype = "float32" + min_val = float("-0.0919967") + max_val = float("0.135726") + mean = float("-0.022066") + std = float("0.0311243") + data = None + + +class Program_weight_tensor_parameter_119: + name = "parameter_119" + shape = [192, 576, 1, 1] + dtype = "float32" + min_val = float("-0.0594804") + max_val = float("0.0592757") + mean = float("-0.000196378") + std = float("0.00449942") + data = None + + +class Program_weight_tensor_parameter_120: + name = "parameter_120" + shape = [192] + dtype = "float32" + min_val = float("-0.0998406") + max_val = float("0.0381397") + mean = float("-0.0139719") + std = float("0.0205426") + data = None + + +class Program_weight_tensor_parameter_121: + name = "parameter_121" + shape = [192] + dtype = "float32" + min_val = float("0.92253") + max_val = float("1.19791") + mean = float("1.00313") + std = float("0.0257744") + data = None + + +class Program_weight_tensor_parameter_122: + name = "parameter_122" + shape = [192] + dtype = "float32" + min_val = float("0.00364414") + max_val = float("0.0462398") + mean = float("0.0110928") + std = float("0.00672902") + data = None + + +class Program_weight_tensor_parameter_123: + name = "parameter_123" + shape = [192] + dtype = "float32" + min_val = float("-0.0792517") + max_val = float("0.0537729") + mean = float("-0.0185521") + std = float("0.0235437") + data = None + + +class Program_weight_tensor_parameter_124: + name = "parameter_124" + shape = [192, 576, 1, 1] + dtype = "float32" + min_val = float("-0.0668912") + max_val = float("0.102316") + mean = float("-0.000166523") + std = float("0.00443571") + data = None + + +class Program_weight_tensor_parameter_125: + name = "parameter_125" + shape = [192] + dtype = "float32" + min_val = float("-0.159157") + max_val = float("-0.000957455") + mean = float("-0.0390269") + std = float("0.0217825") + data = None + + +class Program_weight_tensor_parameter_126: + name = "parameter_126" + shape = [192] + dtype = "float32" + min_val = float("0.921604") + max_val = float("1.24953") + mean = float("1.00821") + std = float("0.0303984") + data = None + + +class Program_weight_tensor_parameter_127: + name = "parameter_127" + shape = [192] + dtype = "float32" + min_val = float("0.00642947") + max_val = float("0.0670086") + mean = float("0.0210833") + std = float("0.00953439") + data = None + + +class Program_weight_tensor_parameter_128: + name = "parameter_128" + shape = [192] + dtype = "float32" + min_val = float("-0.330766") + max_val = float("0.234299") + mean = float("-0.0509451") + std = float("0.0885345") + data = None + + +class Program_weight_tensor_parameter_129: + name = "parameter_129" + shape = [192, 192, 3, 3] + dtype = "float32" + min_val = float("-0.0500862") + max_val = float("0.0679449") + mean = float("-5.08306e-05") + std = float("0.0028463") + data = None + + +class Program_weight_tensor_parameter_130: + name = "parameter_130" + shape = [192] + dtype = "float32" + min_val = float("-0.554317") + max_val = float("1.14248") + mean = float("0.353082") + std = float("0.345155") + data = None + + +class Program_weight_tensor_parameter_131: + name = "parameter_131" + shape = [192] + dtype = "float32" + min_val = float("0.546484") + max_val = float("1.57412") + mean = float("1.15038") + std = float("0.183458") + data = None + + +class Program_weight_tensor_parameter_132: + name = "parameter_132" + shape = [192] + dtype = "float32" + min_val = float("0.0100657") + max_val = float("0.221031") + mean = float("0.0407891") + std = float("0.0263878") + data = None + + +class Program_weight_tensor_parameter_133: + name = "parameter_133" + shape = [192] + dtype = "float32" + min_val = float("-0.377989") + max_val = float("0.257639") + mean = float("-0.0649794") + std = float("0.0721023") + data = None + + +class Program_weight_tensor_parameter_134: + name = "parameter_134" + shape = [192, 192, 1, 1] + dtype = "float32" + min_val = float("-0.153545") + max_val = float("0.134074") + mean = float("-0.00121304") + std = float("0.0133081") + data = None + + +class Program_weight_tensor_parameter_135: + name = "parameter_135" + shape = [96] + dtype = "float32" + min_val = float("-0.457832") + max_val = float("0.237746") + mean = float("-0.00924267") + std = float("0.145032") + data = None + + +class Program_weight_tensor_parameter_136: + name = "parameter_136" + shape = [96] + dtype = "float32" + min_val = float("0.761673") + max_val = float("1.23302") + mean = float("0.949003") + std = float("0.0714195") + data = None + + +class Program_weight_tensor_parameter_137: + name = "parameter_137" + shape = [96] + dtype = "float32" + min_val = float("0.00289003") + max_val = float("0.0548184") + mean = float("0.0147543") + std = float("0.0100438") + data = None + + +class Program_weight_tensor_parameter_138: + name = "parameter_138" + shape = [96] + dtype = "float32" + min_val = float("-0.0823141") + max_val = float("0.101545") + mean = float("-0.015142") + std = float("0.0287704") + data = None + + +class Program_weight_tensor_parameter_139: + name = "parameter_139" + shape = [96, 96, 1, 1] + dtype = "float32" + min_val = float("-0.0870691") + max_val = float("0.0850803") + mean = float("-0.00142159") + std = float("0.01113") + data = None + + +class Program_weight_tensor_parameter_140: + name = "parameter_140" + shape = [96] + dtype = "float32" + min_val = float("-0.457832") + max_val = float("0.237746") + mean = float("-0.00924267") + std = float("0.145032") + data = None + + +class Program_weight_tensor_parameter_141: + name = "parameter_141" + shape = [96] + dtype = "float32" + min_val = float("0.507912") + max_val = float("1.26895") + mean = float("1.02934") + std = float("0.096504") + data = None + + +class Program_weight_tensor_parameter_142: + name = "parameter_142" + shape = [96] + dtype = "float32" + min_val = float("0.0126569") + max_val = float("0.170178") + mean = float("0.046809") + std = float("0.0277218") + data = None + + +class Program_weight_tensor_parameter_143: + name = "parameter_143" + shape = [96] + dtype = "float32" + min_val = float("-0.311825") + max_val = float("0.120203") + mean = float("-0.0489369") + std = float("0.077116") + data = None + + +class Program_weight_tensor_parameter_144: + name = "parameter_144" + shape = [96, 96, 3, 3] + dtype = "float32" + min_val = float("-0.0902007") + max_val = float("0.0894371") + mean = float("-0.000317607") + std = float("0.0067737") + data = None + + +class Program_weight_tensor_parameter_145: + name = "parameter_145" + shape = [96] + dtype = "float32" + min_val = float("-0.702428") + max_val = float("0.490659") + mean = float("-0.113325") + std = float("0.198985") + data = None + + +class Program_weight_tensor_parameter_146: + name = "parameter_146" + shape = [96] + dtype = "float32" + min_val = float("0.718272") + max_val = float("1.71659") + mean = float("0.996064") + std = float("0.134561") + data = None + + +class Program_weight_tensor_parameter_147: + name = "parameter_147" + shape = [96] + dtype = "float32" + min_val = float("0.0158852") + max_val = float("0.220156") + mean = float("0.0611441") + std = float("0.0432503") + data = None + + +class Program_weight_tensor_parameter_148: + name = "parameter_148" + shape = [96] + dtype = "float32" + min_val = float("-0.218411") + max_val = float("0.176567") + mean = float("-0.04823") + std = float("0.0699582") + data = None + + +class Program_weight_tensor_parameter_149: + name = "parameter_149" + shape = [96, 96, 3, 3] + dtype = "float32" + min_val = float("-0.128433") + max_val = float("0.105681") + mean = float("-0.00053974") + std = float("0.00768105") + data = None + + +class Program_weight_tensor_parameter_150: + name = "parameter_150" + shape = [96] + dtype = "float32" + min_val = float("-0.365465") + max_val = float("0.189556") + mean = float("-0.138691") + std = float("0.0965007") + data = None + + +class Program_weight_tensor_parameter_151: + name = "parameter_151" + shape = [96] + dtype = "float32" + min_val = float("0.628614") + max_val = float("1.02635") + mean = float("0.906997") + std = float("0.055833") + data = None + + +class Program_weight_tensor_parameter_152: + name = "parameter_152" + shape = [96] + dtype = "float32" + min_val = float("0.00386648") + max_val = float("0.0250898") + mean = float("0.0113332") + std = float("0.00452204") + data = None + + +class Program_weight_tensor_parameter_153: + name = "parameter_153" + shape = [96] + dtype = "float32" + min_val = float("-0.0829535") + max_val = float("0.0455066") + mean = float("-0.0107845") + std = float("0.0210116") + data = None + + +class Program_weight_tensor_parameter_154: + name = "parameter_154" + shape = [96, 96, 1, 1] + dtype = "float32" + min_val = float("-0.0826433") + max_val = float("0.0836331") + mean = float("-0.00123565") + std = float("0.0111633") + data = None + + +class Program_weight_tensor_parameter_155: + name = "parameter_155" + shape = [96] + dtype = "float32" + min_val = float("-0.365465") + max_val = float("0.189556") + mean = float("-0.138691") + std = float("0.0965007") + data = None + + +class Program_weight_tensor_parameter_156: + name = "parameter_156" + shape = [96] + dtype = "float32" + min_val = float("0.808147") + max_val = float("1.15745") + mean = float("1.02165") + std = float("0.061107") + data = None + + +class Program_weight_tensor_parameter_157: + name = "parameter_157" + shape = [96] + dtype = "float32" + min_val = float("0.0105243") + max_val = float("0.146319") + mean = float("0.0374655") + std = float("0.0298762") + data = None + + +class Program_weight_tensor_parameter_158: + name = "parameter_158" + shape = [96] + dtype = "float32" + min_val = float("-0.202738") + max_val = float("0.0648004") + mean = float("-0.042437") + std = float("0.0413335") + data = None + + +class Program_weight_tensor_parameter_159: + name = "parameter_159" + shape = [96, 96, 3, 3] + dtype = "float32" + min_val = float("-0.0790843") + max_val = float("0.0756688") + mean = float("-0.000522856") + std = float("0.00687379") + data = None + + +class Program_weight_tensor_parameter_160: + name = "parameter_160" + shape = [96] + dtype = "float32" + min_val = float("-0.488696") + max_val = float("0.1689") + mean = float("-0.167703") + std = float("0.131642") + data = None + + +class Program_weight_tensor_parameter_161: + name = "parameter_161" + shape = [96] + dtype = "float32" + min_val = float("0.775323") + max_val = float("1.29288") + mean = float("0.963604") + std = float("0.0984425") + data = None + + +class Program_weight_tensor_parameter_162: + name = "parameter_162" + shape = [96] + dtype = "float32" + min_val = float("0.0130451") + max_val = float("0.138622") + mean = float("0.0336631") + std = float("0.0190887") + data = None + + +class Program_weight_tensor_parameter_163: + name = "parameter_163" + shape = [96] + dtype = "float32" + min_val = float("-0.186613") + max_val = float("0.0841258") + mean = float("-5.39021e-05") + std = float("0.0459065") + data = None + + +class Program_weight_tensor_parameter_164: + name = "parameter_164" + shape = [96, 96, 3, 3] + dtype = "float32" + min_val = float("-0.128553") + max_val = float("0.113025") + mean = float("-0.000437448") + std = float("0.00839485") + data = None + + +class Program_weight_tensor_parameter_165: + name = "parameter_165" + shape = [96] + dtype = "float32" + min_val = float("-0.492552") + max_val = float("0.0643671") + mean = float("-0.168979") + std = float("0.115008") + data = None + + +class Program_weight_tensor_parameter_166: + name = "parameter_166" + shape = [96] + dtype = "float32" + min_val = float("0.725079") + max_val = float("1.00348") + mean = float("0.919861") + std = float("0.0526643") + data = None + + +class Program_weight_tensor_parameter_167: + name = "parameter_167" + shape = [96] + dtype = "float32" + min_val = float("0.00612442") + max_val = float("0.0365536") + mean = float("0.016419") + std = float("0.00586326") + data = None + + +class Program_weight_tensor_parameter_168: + name = "parameter_168" + shape = [96] + dtype = "float32" + min_val = float("-0.0660382") + max_val = float("0.043413") + mean = float("-0.0231643") + std = float("0.0225905") + data = None + + +class Program_weight_tensor_parameter_169: + name = "parameter_169" + shape = [96, 96, 1, 1] + dtype = "float32" + min_val = float("-0.107707") + max_val = float("0.0826476") + mean = float("-0.00240553") + std = float("0.0126403") + data = None + + +class Program_weight_tensor_parameter_170: + name = "parameter_170" + shape = [96] + dtype = "float32" + min_val = float("-0.492552") + max_val = float("0.0643671") + mean = float("-0.168979") + std = float("0.115008") + data = None + + +class Program_weight_tensor_parameter_171: + name = "parameter_171" + shape = [96] + dtype = "float32" + min_val = float("0.759592") + max_val = float("1.15371") + mean = float("0.981072") + std = float("0.058465") + data = None + + +class Program_weight_tensor_parameter_172: + name = "parameter_172" + shape = [96] + dtype = "float32" + min_val = float("0.0172157") + max_val = float("0.224833") + mean = float("0.0507609") + std = float("0.0362346") + data = None + + +class Program_weight_tensor_parameter_173: + name = "parameter_173" + shape = [96] + dtype = "float32" + min_val = float("-0.248811") + max_val = float("0.0998924") + mean = float("-0.0166411") + std = float("0.0490542") + data = None + + +class Program_weight_tensor_parameter_174: + name = "parameter_174" + shape = [96, 96, 3, 3] + dtype = "float32" + min_val = float("-0.116242") + max_val = float("0.0898586") + mean = float("-0.000231126") + std = float("0.00783901") + data = None + + +class Program_weight_tensor_parameter_175: + name = "parameter_175" + shape = [96] + dtype = "float32" + min_val = float("-0.567319") + max_val = float("0.348494") + mean = float("-0.179712") + std = float("0.173626") + data = None + + +class Program_weight_tensor_parameter_176: + name = "parameter_176" + shape = [96] + dtype = "float32" + min_val = float("0.772527") + max_val = float("1.33704") + mean = float("0.955269") + std = float("0.110943") + data = None + + +class Program_weight_tensor_parameter_177: + name = "parameter_177" + shape = [96] + dtype = "float32" + min_val = float("0.0160524") + max_val = float("0.114434") + mean = float("0.036106") + std = float("0.0195034") + data = None + + +class Program_weight_tensor_parameter_178: + name = "parameter_178" + shape = [96] + dtype = "float32" + min_val = float("-0.189984") + max_val = float("0.255512") + mean = float("-0.0275046") + std = float("0.0974112") + data = None + + +class Program_weight_tensor_parameter_179: + name = "parameter_179" + shape = [96, 96, 3, 3] + dtype = "float32" + min_val = float("-0.161713") + max_val = float("0.142438") + mean = float("-0.000316146") + std = float("0.00950798") + data = None + + +class Program_weight_tensor_parameter_180: + name = "parameter_180" + shape = [96] + dtype = "float32" + min_val = float("-0.627686") + max_val = float("0.598483") + mean = float("-0.082655") + std = float("0.256323") + data = None + + +class Program_weight_tensor_parameter_181: + name = "parameter_181" + shape = [96] + dtype = "float32" + min_val = float("0.653052") + max_val = float("1.22671") + mean = float("0.866558") + std = float("0.114981") + data = None + + +class Program_weight_tensor_parameter_182: + name = "parameter_182" + shape = [96] + dtype = "float32" + min_val = float("0.0123443") + max_val = float("0.0857623") + mean = float("0.0307528") + std = float("0.01414") + data = None + + +class Program_weight_tensor_parameter_183: + name = "parameter_183" + shape = [96] + dtype = "float32" + min_val = float("-0.117179") + max_val = float("0.0889702") + mean = float("-0.0137918") + std = float("0.041907") + data = None + + +class Program_weight_tensor_parameter_184: + name = "parameter_184" + shape = [96, 448, 1, 1] + dtype = "float32" + min_val = float("-0.162211") + max_val = float("0.186586") + mean = float("-0.00057998") + std = float("0.0123504") + data = None + + +class Program_weight_tensor_parameter_185: + name = "parameter_185" + shape = [96] + dtype = "float32" + min_val = float("-0.0984774") + max_val = float("0.230057") + mean = float("0.0612113") + std = float("0.0550468") + data = None + + +class Program_weight_tensor_parameter_186: + name = "parameter_186" + shape = [96] + dtype = "float32" + min_val = float("0.692561") + max_val = float("1.12833") + mean = float("0.931782") + std = float("0.0640907") + data = None + + +class Program_weight_tensor_parameter_187: + name = "parameter_187" + shape = [96] + dtype = "float32" + min_val = float("0.00681771") + max_val = float("0.0772474") + mean = float("0.0165796") + std = float("0.00961681") + data = None + + +class Program_weight_tensor_parameter_188: + name = "parameter_188" + shape = [96] + dtype = "float32" + min_val = float("-0.133097") + max_val = float("0.161018") + mean = float("-0.0184376") + std = float("0.0386089") + data = None + + +class Program_weight_tensor_parameter_189: + name = "parameter_189" + shape = [96, 448, 1, 1] + dtype = "float32" + min_val = float("-0.104838") + max_val = float("0.136041") + mean = float("-0.000352218") + std = float("0.00870856") + data = None + + +class Program_weight_tensor_parameter_190: + name = "parameter_190" + shape = [192] + dtype = "float32" + min_val = float("-0.296963") + max_val = float("0.196688") + mean = float("-0.0669209") + std = float("0.0696946") + data = None + + +class Program_weight_tensor_parameter_191: + name = "parameter_191" + shape = [192] + dtype = "float32" + min_val = float("0.672164") + max_val = float("1.45538") + mean = float("0.884399") + std = float("0.0784254") + data = None + + +class Program_weight_tensor_parameter_192: + name = "parameter_192" + shape = [192] + dtype = "float32" + min_val = float("0.0110872") + max_val = float("0.127506") + mean = float("0.0262649") + std = float("0.0134352") + data = None + + +class Program_weight_tensor_parameter_193: + name = "parameter_193" + shape = [192] + dtype = "float32" + min_val = float("-0.151007") + max_val = float("0.0461615") + mean = float("-0.0397354") + std = float("0.0392124") + data = None + + +class Program_weight_tensor_parameter_194: + name = "parameter_194" + shape = [192, 384, 1, 1] + dtype = "float32" + min_val = float("-0.0904828") + max_val = float("0.110909") + mean = float("-0.000644138") + std = float("0.00794554") + data = None + + +class Program_weight_tensor_parameter_195: + name = "parameter_195" + shape = [384] + dtype = "float32" + min_val = float("-0.202322") + max_val = float("0.238987") + mean = float("-0.0675229") + std = float("0.0415855") + data = None + + +class Program_weight_tensor_parameter_196: + name = "parameter_196" + shape = [384] + dtype = "float32" + min_val = float("0.872032") + max_val = float("1.54191") + mean = float("1.019") + std = float("0.063367") + data = None + + +class Program_weight_tensor_parameter_197: + name = "parameter_197" + shape = [384] + dtype = "float32" + min_val = float("0.00721296") + max_val = float("0.101353") + mean = float("0.0168635") + std = float("0.00915068") + data = None + + +class Program_weight_tensor_parameter_198: + name = "parameter_198" + shape = [384] + dtype = "float32" + min_val = float("-0.297588") + max_val = float("0.152976") + mean = float("-0.051365") + std = float("0.0447816") + data = None + + +class Program_weight_tensor_parameter_199: + name = "parameter_199" + shape = [384, 384, 1, 1] + dtype = "float32" + min_val = float("-0.102299") + max_val = float("0.0969442") + mean = float("-0.000664132") + std = float("0.00717478") + data = None + + +class Program_weight_tensor_parameter_200: + name = "parameter_200" + shape = [192] + dtype = "float32" + min_val = float("-0.177096") + max_val = float("0.00551918") + mean = float("-0.0655662") + std = float("0.0325016") + data = None + + +class Program_weight_tensor_parameter_201: + name = "parameter_201" + shape = [192] + dtype = "float32" + min_val = float("0.884728") + max_val = float("0.992155") + mean = float("0.94926") + std = float("0.0164178") + data = None + + +class Program_weight_tensor_parameter_202: + name = "parameter_202" + shape = [192] + dtype = "float32" + min_val = float("0.00441221") + max_val = float("0.0261083") + mean = float("0.01005") + std = float("0.00351877") + data = None + + +class Program_weight_tensor_parameter_203: + name = "parameter_203" + shape = [192] + dtype = "float32" + min_val = float("-0.0812598") + max_val = float("0.0748845") + mean = float("-0.0233222") + std = float("0.0310809") + data = None + + +class Program_weight_tensor_parameter_204: + name = "parameter_204" + shape = [192, 192, 1, 1] + dtype = "float32" + min_val = float("-0.0495063") + max_val = float("0.0379562") + mean = float("-0.000707927") + std = float("0.00526785") + data = None + + +class Program_weight_tensor_parameter_205: + name = "parameter_205" + shape = [192] + dtype = "float32" + min_val = float("-0.177096") + max_val = float("0.00551918") + mean = float("-0.0655662") + std = float("0.0325016") + data = None + + +class Program_weight_tensor_parameter_206: + name = "parameter_206" + shape = [192] + dtype = "float32" + min_val = float("0.944815") + max_val = float("1.03167") + mean = float("0.987873") + std = float("0.016613") + data = None + + +class Program_weight_tensor_parameter_207: + name = "parameter_207" + shape = [192] + dtype = "float32" + min_val = float("0.016531") + max_val = float("0.109259") + mean = float("0.0394289") + std = float("0.0158791") + data = None + + +class Program_weight_tensor_parameter_208: + name = "parameter_208" + shape = [192] + dtype = "float32" + min_val = float("-0.21924") + max_val = float("0.183769") + mean = float("-0.0255868") + std = float("0.0652744") + data = None + + +class Program_weight_tensor_parameter_209: + name = "parameter_209" + shape = [192, 192, 3, 3] + dtype = "float32" + min_val = float("-0.0428771") + max_val = float("0.0579551") + mean = float("-7.64353e-05") + std = float("0.00287948") + data = None + + +class Program_weight_tensor_parameter_210: + name = "parameter_210" + shape = [192] + dtype = "float32" + min_val = float("-0.216413") + max_val = float("-0.00156605") + mean = float("-0.0741052") + std = float("0.0353871") + data = None + + +class Program_weight_tensor_parameter_211: + name = "parameter_211" + shape = [192] + dtype = "float32" + min_val = float("0.939878") + max_val = float("1.15492") + mean = float("1.02948") + std = float("0.0431484") + data = None + + +class Program_weight_tensor_parameter_212: + name = "parameter_212" + shape = [192] + dtype = "float32" + min_val = float("0.0381921") + max_val = float("0.244841") + mean = float("0.0704194") + std = float("0.0260547") + data = None + + +class Program_weight_tensor_parameter_213: + name = "parameter_213" + shape = [192] + dtype = "float32" + min_val = float("-0.194913") + max_val = float("0.284776") + mean = float("-0.0487736") + std = float("0.0787153") + data = None + + +class Program_weight_tensor_parameter_214: + name = "parameter_214" + shape = [192, 192, 3, 3] + dtype = "float32" + min_val = float("-0.0537036") + max_val = float("0.0569212") + mean = float("-0.000101544") + std = float("0.00352016") + data = None + + +class Program_weight_tensor_parameter_215: + name = "parameter_215" + shape = [192] + dtype = "float32" + min_val = float("-0.196865") + max_val = float("-0.00996621") + mean = float("-0.0711692") + std = float("0.0319161") + data = None + + +class Program_weight_tensor_parameter_216: + name = "parameter_216" + shape = [192] + dtype = "float32" + min_val = float("0.944171") + max_val = float("1.04842") + mean = float("0.987927") + std = float("0.0137867") + data = None + + +class Program_weight_tensor_parameter_217: + name = "parameter_217" + shape = [192] + dtype = "float32" + min_val = float("0.00232538") + max_val = float("0.0114743") + mean = float("0.00426258") + std = float("0.0011511") + data = None + + +class Program_weight_tensor_parameter_218: + name = "parameter_218" + shape = [192] + dtype = "float32" + min_val = float("-0.0893703") + max_val = float("0.0463746") + mean = float("-0.0214534") + std = float("0.0217776") + data = None + + +class Program_weight_tensor_parameter_219: + name = "parameter_219" + shape = [192, 192, 1, 1] + dtype = "float32" + min_val = float("-0.0308295") + max_val = float("0.0472366") + mean = float("-0.000698195") + std = float("0.00548431") + data = None + + +class Program_weight_tensor_parameter_220: + name = "parameter_220" + shape = [192] + dtype = "float32" + min_val = float("-0.196865") + max_val = float("-0.00996621") + mean = float("-0.0711692") + std = float("0.0319161") + data = None + + +class Program_weight_tensor_parameter_221: + name = "parameter_221" + shape = [192] + dtype = "float32" + min_val = float("0.953905") + max_val = float("1.11243") + mean = float("1.00461") + std = float("0.0264007") + data = None + + +class Program_weight_tensor_parameter_222: + name = "parameter_222" + shape = [192] + dtype = "float32" + min_val = float("0.00885181") + max_val = float("0.0602316") + mean = float("0.0174647") + std = float("0.00611072") + data = None + + +class Program_weight_tensor_parameter_223: + name = "parameter_223" + shape = [192] + dtype = "float32" + min_val = float("-0.214638") + max_val = float("0.0916921") + mean = float("-0.042109") + std = float("0.0445113") + data = None + + +class Program_weight_tensor_parameter_224: + name = "parameter_224" + shape = [192, 192, 3, 3] + dtype = "float32" + min_val = float("-0.0395263") + max_val = float("0.0647994") + mean = float("-0.000147334") + std = float("0.00290748") + data = None + + +class Program_weight_tensor_parameter_225: + name = "parameter_225" + shape = [192] + dtype = "float32" + min_val = float("-0.23254") + max_val = float("-0.0186192") + mean = float("-0.094269") + std = float("0.0399954") + data = None + + +class Program_weight_tensor_parameter_226: + name = "parameter_226" + shape = [192] + dtype = "float32" + min_val = float("0.94661") + max_val = float("1.1911") + mean = float("1.02415") + std = float("0.0459878") + data = None + + +class Program_weight_tensor_parameter_227: + name = "parameter_227" + shape = [192] + dtype = "float32" + min_val = float("0.0325254") + max_val = float("0.154956") + mean = float("0.066483") + std = float("0.0222298") + data = None + + +class Program_weight_tensor_parameter_228: + name = "parameter_228" + shape = [192] + dtype = "float32" + min_val = float("-0.348988") + max_val = float("0.20982") + mean = float("-0.0943644") + std = float("0.0968837") + data = None + + +class Program_weight_tensor_parameter_229: + name = "parameter_229" + shape = [192, 192, 3, 3] + dtype = "float32" + min_val = float("-0.0523599") + max_val = float("0.0680231") + mean = float("-0.000170588") + std = float("0.00366692") + data = None + + +class Program_weight_tensor_parameter_230: + name = "parameter_230" + shape = [192] + dtype = "float32" + min_val = float("-0.154829") + max_val = float("-0.00101215") + mean = float("-0.0685481") + std = float("0.0233247") + data = None + + +class Program_weight_tensor_parameter_231: + name = "parameter_231" + shape = [192] + dtype = "float32" + min_val = float("0.932711") + max_val = float("1.07089") + mean = float("0.998751") + std = float("0.0218702") + data = None + + +class Program_weight_tensor_parameter_232: + name = "parameter_232" + shape = [192] + dtype = "float32" + min_val = float("0.00202058") + max_val = float("0.0085046") + mean = float("0.00390999") + std = float("0.00110762") + data = None + + +class Program_weight_tensor_parameter_233: + name = "parameter_233" + shape = [192] + dtype = "float32" + min_val = float("-0.0777916") + max_val = float("0.098492") + mean = float("-0.0116677") + std = float("0.020466") + data = None + + +class Program_weight_tensor_parameter_234: + name = "parameter_234" + shape = [192, 192, 1, 1] + dtype = "float32" + min_val = float("-0.0340527") + max_val = float("0.0501646") + mean = float("-0.00038914") + std = float("0.00614264") + data = None + + +class Program_weight_tensor_parameter_235: + name = "parameter_235" + shape = [192] + dtype = "float32" + min_val = float("-0.15483") + max_val = float("-0.00101216") + mean = float("-0.0685481") + std = float("0.0233247") + data = None + + +class Program_weight_tensor_parameter_236: + name = "parameter_236" + shape = [192] + dtype = "float32" + min_val = float("0.935817") + max_val = float("1.11381") + mean = float("0.992462") + std = float("0.0258361") + data = None + + +class Program_weight_tensor_parameter_237: + name = "parameter_237" + shape = [192] + dtype = "float32" + min_val = float("0.00891321") + max_val = float("0.0478413") + mean = float("0.0181253") + std = float("0.00570544") + data = None + + +class Program_weight_tensor_parameter_238: + name = "parameter_238" + shape = [192] + dtype = "float32" + min_val = float("-0.258263") + max_val = float("0.130443") + mean = float("-0.0435931") + std = float("0.0468497") + data = None + + +class Program_weight_tensor_parameter_239: + name = "parameter_239" + shape = [192, 192, 3, 3] + dtype = "float32" + min_val = float("-0.0292394") + max_val = float("0.0522577") + mean = float("-0.00017048") + std = float("0.00289304") + data = None + + +class Program_weight_tensor_parameter_240: + name = "parameter_240" + shape = [192] + dtype = "float32" + min_val = float("-0.288771") + max_val = float("0.0148396") + mean = float("-0.109714") + std = float("0.0400291") + data = None + + +class Program_weight_tensor_parameter_241: + name = "parameter_241" + shape = [192] + dtype = "float32" + min_val = float("0.944044") + max_val = float("1.25876") + mean = float("1.02656") + std = float("0.0419352") + data = None + + +class Program_weight_tensor_parameter_242: + name = "parameter_242" + shape = [192] + dtype = "float32" + min_val = float("0.0137713") + max_val = float("0.0682612") + mean = float("0.0280618") + std = float("0.00984345") + data = None + + +class Program_weight_tensor_parameter_243: + name = "parameter_243" + shape = [192] + dtype = "float32" + min_val = float("-0.354673") + max_val = float("0.124192") + mean = float("-0.0487223") + std = float("0.0583851") + data = None + + +class Program_weight_tensor_parameter_244: + name = "parameter_244" + shape = [192, 192, 3, 3] + dtype = "float32" + min_val = float("-0.0564756") + max_val = float("0.0674707") + mean = float("-0.000194109") + std = float("0.00414155") + data = None + + +class Program_weight_tensor_parameter_245: + name = "parameter_245" + shape = [192] + dtype = "float32" + min_val = float("-0.257095") + max_val = float("-0.01369") + mean = float("-0.121797") + std = float("0.0441852") + data = None + + +class Program_weight_tensor_parameter_246: + name = "parameter_246" + shape = [192] + dtype = "float32" + min_val = float("0.916459") + max_val = float("1.13702") + mean = float("1.02436") + std = float("0.0422629") + data = None + + +class Program_weight_tensor_parameter_247: + name = "parameter_247" + shape = [192] + dtype = "float32" + min_val = float("0.00516469") + max_val = float("0.0229026") + mean = float("0.0106646") + std = float("0.00317044") + data = None + + +class Program_weight_tensor_parameter_248: + name = "parameter_248" + shape = [192] + dtype = "float32" + min_val = float("-0.127284") + max_val = float("0.0963655") + mean = float("0.0144008") + std = float("0.029143") + data = None + + +class Program_weight_tensor_parameter_249: + name = "parameter_249" + shape = [192, 896, 1, 1] + dtype = "float32" + min_val = float("-0.0721174") + max_val = float("0.0971018") + mean = float("-0.000190287") + std = float("0.00582491") + data = None + + +class Program_weight_tensor_parameter_250: + name = "parameter_250" + shape = [192] + dtype = "float32" + min_val = float("-0.177705") + max_val = float("0.21267") + mean = float("-0.00755062") + std = float("0.0506748") + data = None + + +class Program_weight_tensor_parameter_251: + name = "parameter_251" + shape = [192] + dtype = "float32" + min_val = float("0.954707") + max_val = float("1.21638") + mean = float("1.05592") + std = float("0.0497891") + data = None + + +class Program_weight_tensor_parameter_252: + name = "parameter_252" + shape = [192] + dtype = "float32" + min_val = float("0.00824461") + max_val = float("0.0577322") + mean = float("0.0175512") + std = float("0.00716064") + data = None + + +class Program_weight_tensor_parameter_253: + name = "parameter_253" + shape = [192] + dtype = "float32" + min_val = float("-0.0769287") + max_val = float("0.0893626") + mean = float("-0.00207579") + std = float("0.0304575") + data = None + + +class Program_weight_tensor_parameter_254: + name = "parameter_254" + shape = [192, 896, 1, 1] + dtype = "float32" + min_val = float("-0.0604261") + max_val = float("0.102952") + mean = float("-0.000212686") + std = float("0.00623353") + data = None + + +class Program_weight_tensor_parameter_255: + name = "parameter_255" + shape = [384] + dtype = "float32" + min_val = float("-0.249989") + max_val = float("-0.0574309") + mean = float("-0.125167") + std = float("0.0336736") + data = None + + +class Program_weight_tensor_parameter_256: + name = "parameter_256" + shape = [384] + dtype = "float32" + min_val = float("0.816049") + max_val = float("1.01536") + mean = float("0.909295") + std = float("0.0258085") + data = None + + +class Program_weight_tensor_parameter_257: + name = "parameter_257" + shape = [384] + dtype = "float32" + min_val = float("0.0103681") + max_val = float("0.0948348") + mean = float("0.0266136") + std = float("0.0121593") + data = None + + +class Program_weight_tensor_parameter_258: + name = "parameter_258" + shape = [384] + dtype = "float32" + min_val = float("-0.16079") + max_val = float("0.0920586") + mean = float("-0.0398222") + std = float("0.0395335") + data = None + + +class Program_weight_tensor_parameter_259: + name = "parameter_259" + shape = [384, 768, 1, 1] + dtype = "float32" + min_val = float("-0.03192") + max_val = float("0.0346747") + mean = float("-0.000309907") + std = float("0.00449398") + data = None + + +class Program_weight_tensor_parameter_260: + name = "parameter_260" + shape = [768] + dtype = "float32" + min_val = float("-0.104731") + max_val = float("0.0725498") + mean = float("-0.0568804") + std = float("0.0152729") + data = None + + +class Program_weight_tensor_parameter_261: + name = "parameter_261" + shape = [768] + dtype = "float32" + min_val = float("0.952515") + max_val = float("1.14217") + mean = float("1.02086") + std = float("0.0209603") + data = None + + +class Program_weight_tensor_parameter_262: + name = "parameter_262" + shape = [768] + dtype = "float32" + min_val = float("0.00400082") + max_val = float("0.0306736") + mean = float("0.00912551") + std = float("0.00348196") + data = None + + +class Program_weight_tensor_parameter_263: + name = "parameter_263" + shape = [768] + dtype = "float32" + min_val = float("-0.108131") + max_val = float("0.0840492") + mean = float("-0.0308524") + std = float("0.0256791") + data = None + + +class Program_weight_tensor_parameter_264: + name = "parameter_264" + shape = [768, 768, 1, 1] + dtype = "float32" + min_val = float("-0.0561444") + max_val = float("0.104026") + mean = float("-0.00028012") + std = float("0.00382445") + data = None + + +class Program_weight_tensor_parameter_265: + name = "parameter_265" + shape = [384] + dtype = "float32" + min_val = float("-0.158352") + max_val = float("0.074486") + mean = float("-0.0400406") + std = float("0.0206674") + data = None + + +class Program_weight_tensor_parameter_266: + name = "parameter_266" + shape = [384] + dtype = "float32" + min_val = float("0.888537") + max_val = float("1.07535") + mean = float("0.982149") + std = float("0.0131757") + data = None + + +class Program_weight_tensor_parameter_267: + name = "parameter_267" + shape = [384] + dtype = "float32" + min_val = float("0.00629694") + max_val = float("0.0917591") + mean = float("0.0233191") + std = float("0.00930269") + data = None + + +class Program_weight_tensor_parameter_268: + name = "parameter_268" + shape = [384] + dtype = "float32" + min_val = float("-0.0725783") + max_val = float("0.0597257") + mean = float("-0.0049494") + std = float("0.0256969") + data = None + + +class Program_weight_tensor_parameter_269: + name = "parameter_269" + shape = [384, 384, 1, 1] + dtype = "float32" + min_val = float("-0.0355484") + max_val = float("0.0687831") + mean = float("-5.36416e-05") + std = float("0.00327465") + data = None + + +class Program_weight_tensor_parameter_270: + name = "parameter_270" + shape = [384] + dtype = "float32" + min_val = float("-0.158353") + max_val = float("0.074486") + mean = float("-0.0400406") + std = float("0.0206674") + data = None + + +class Program_weight_tensor_parameter_271: + name = "parameter_271" + shape = [384] + dtype = "float32" + min_val = float("0.880933") + max_val = float("1.0776") + mean = float("0.993865") + std = float("0.0122579") + data = None + + +class Program_weight_tensor_parameter_272: + name = "parameter_272" + shape = [384] + dtype = "float32" + min_val = float("0.0302136") + max_val = float("0.658467") + mean = float("0.153161") + std = float("0.0615601") + data = None + + +class Program_weight_tensor_parameter_273: + name = "parameter_273" + shape = [384] + dtype = "float32" + min_val = float("-0.280458") + max_val = float("0.127177") + mean = float("-0.0754248") + std = float("0.0821892") + data = None + + +class Program_weight_tensor_parameter_274: + name = "parameter_274" + shape = [384, 384, 3, 3] + dtype = "float32" + min_val = float("-0.0402524") + max_val = float("0.0446942") + mean = float("-0.000120035") + std = float("0.00122819") + data = None + + +class Program_weight_tensor_parameter_275: + name = "parameter_275" + shape = [384] + dtype = "float32" + min_val = float("-0.080174") + max_val = float("0.116977") + mean = float("-0.0189992") + std = float("0.0160148") + data = None + + +class Program_weight_tensor_parameter_276: + name = "parameter_276" + shape = [384] + dtype = "float32" + min_val = float("0.920426") + max_val = float("1.16701") + mean = float("1.01503") + std = float("0.0247134") + data = None + + +class Program_weight_tensor_parameter_277: + name = "parameter_277" + shape = [384] + dtype = "float32" + min_val = float("0.0258173") + max_val = float("0.191023") + mean = float("0.0694403") + std = float("0.0298835") + data = None + + +class Program_weight_tensor_parameter_278: + name = "parameter_278" + shape = [384] + dtype = "float32" + min_val = float("-0.231545") + max_val = float("0.20914") + mean = float("-0.0203256") + std = float("0.0747077") + data = None + + +class Program_weight_tensor_parameter_279: + name = "parameter_279" + shape = [384, 384, 3, 3] + dtype = "float32" + min_val = float("-0.023736") + max_val = float("0.03185") + mean = float("-3.10455e-05") + std = float("0.00160831") + data = None + + +class Program_weight_tensor_parameter_280: + name = "parameter_280" + shape = [384] + dtype = "float32" + min_val = float("-0.0734011") + max_val = float("0.0209518") + mean = float("-0.0234929") + std = float("0.0134643") + data = None + + +class Program_weight_tensor_parameter_281: + name = "parameter_281" + shape = [384] + dtype = "float32" + min_val = float("0.946001") + max_val = float("1.1693") + mean = float("1.01467") + std = float("0.0274094") + data = None + + +class Program_weight_tensor_parameter_282: + name = "parameter_282" + shape = [384] + dtype = "float32" + min_val = float("0.0574212") + max_val = float("0.36113") + mean = float("0.167754") + std = float("0.0624421") + data = None + + +class Program_weight_tensor_parameter_283: + name = "parameter_283" + shape = [384] + dtype = "float32" + min_val = float("-1.55759") + max_val = float("1.75104") + mean = float("0.0310697") + std = float("0.530417") + data = None + + +class Program_weight_tensor_parameter_284: + name = "parameter_284" + shape = [384, 1536, 1, 1] + dtype = "float32" + min_val = float("-0.0464008") + max_val = float("0.0539612") + mean = float("8.40176e-05") + std = float("0.00279856") + data = None + + +class Program_weight_tensor_parameter_285: + name = "parameter_285" + shape = [384] + dtype = "float32" + min_val = float("-0.0183405") + max_val = float("0.0258023") + mean = float("-0.00146113") + std = float("0.00679536") + data = None + + +class Program_weight_tensor_parameter_286: + name = "parameter_286" + shape = [384] + dtype = "float32" + min_val = float("0.969528") + max_val = float("1.06063") + mean = float("0.993845") + std = float("0.0122858") + data = None + + +class Program_weight_tensor_parameter_287: + name = "parameter_287" + shape = [384] + dtype = "float32" + min_val = float("0.00270127") + max_val = float("0.0149795") + mean = float("0.00630368") + std = float("0.00229884") + data = None + + +class Program_weight_tensor_parameter_288: + name = "parameter_288" + shape = [384] + dtype = "float32" + min_val = float("-0.100294") + max_val = float("0.0506795") + mean = float("-0.0387404") + std = float("0.0226508") + data = None + + +class Program_weight_tensor_parameter_289: + name = "parameter_289" + shape = [384, 384, 1, 1] + dtype = "float32" + min_val = float("-0.0312313") + max_val = float("0.0414999") + mean = float("-0.000484357") + std = float("0.00306542") + data = None + + +class Program_weight_tensor_parameter_290: + name = "parameter_290" + shape = [384] + dtype = "float32" + min_val = float("-0.0183405") + max_val = float("0.0258023") + mean = float("-0.00146113") + std = float("0.00679536") + data = None + + +class Program_weight_tensor_parameter_291: + name = "parameter_291" + shape = [384] + dtype = "float32" + min_val = float("0.971893") + max_val = float("1.08657") + mean = float("1.00365") + std = float("0.0181767") + data = None + + +class Program_weight_tensor_parameter_292: + name = "parameter_292" + shape = [384] + dtype = "float32" + min_val = float("0.0120607") + max_val = float("0.104138") + mean = float("0.0369785") + std = float("0.0161136") + data = None + + +class Program_weight_tensor_parameter_293: + name = "parameter_293" + shape = [384] + dtype = "float32" + min_val = float("-0.26934") + max_val = float("0.110834") + mean = float("-0.115324") + std = float("0.0508614") + data = None + + +class Program_weight_tensor_parameter_294: + name = "parameter_294" + shape = [384, 384, 3, 3] + dtype = "float32" + min_val = float("-0.0290225") + max_val = float("0.0663413") + mean = float("-0.000179167") + std = float("0.00128493") + data = None + + +class Program_weight_tensor_parameter_295: + name = "parameter_295" + shape = [384] + dtype = "float32" + min_val = float("-0.0494678") + max_val = float("0.00858064") + mean = float("-0.00839597") + std = float("0.00776335") + data = None + + +class Program_weight_tensor_parameter_296: + name = "parameter_296" + shape = [384] + dtype = "float32" + min_val = float("0.95427") + max_val = float("1.13764") + mean = float("1.01254") + std = float("0.0201656") + data = None + + +class Program_weight_tensor_parameter_297: + name = "parameter_297" + shape = [384] + dtype = "float32" + min_val = float("0.0686687") + max_val = float("0.339855") + mean = float("0.163513") + std = float("0.0492742") + data = None + + +class Program_weight_tensor_parameter_298: + name = "parameter_298" + shape = [384] + dtype = "float32" + min_val = float("-1.18586") + max_val = float("0.819546") + mean = float("-0.222925") + std = float("0.255668") + data = None + + +class Program_weight_tensor_parameter_299: + name = "parameter_299" + shape = [384, 384, 3, 3] + dtype = "float32" + min_val = float("-0.0230308") + max_val = float("0.0510876") + mean = float("-0.000132883") + std = float("0.00152671") + data = None + + +class Program_weight_tensor_parameter_300: + name = "parameter_300" + shape = [384] + dtype = "float32" + min_val = float("-0.0358263") + max_val = float("0.0138961") + mean = float("-0.00764663") + std = float("0.00787851") + data = None + + +class Program_weight_tensor_parameter_301: + name = "parameter_301" + shape = [384] + dtype = "float32" + min_val = float("0.984161") + max_val = float("1.03457") + mean = float("0.999922") + std = float("0.00712994") + data = None + + +class Program_weight_tensor_parameter_302: + name = "parameter_302" + shape = [384] + dtype = "float32" + min_val = float("0.00175415") + max_val = float("0.0108139") + mean = float("0.00364709") + std = float("0.0011719") + data = None + + +class Program_weight_tensor_parameter_303: + name = "parameter_303" + shape = [384] + dtype = "float32" + min_val = float("-0.0809601") + max_val = float("0.126515") + mean = float("-0.0205001") + std = float("0.0225741") + data = None + + +class Program_weight_tensor_parameter_304: + name = "parameter_304" + shape = [384, 384, 1, 1] + dtype = "float32" + min_val = float("-0.0193951") + max_val = float("0.033031") + mean = float("-0.00027113") + std = float("0.00265482") + data = None + + +class Program_weight_tensor_parameter_305: + name = "parameter_305" + shape = [384] + dtype = "float32" + min_val = float("-0.0358263") + max_val = float("0.0138961") + mean = float("-0.00764663") + std = float("0.00787851") + data = None + + +class Program_weight_tensor_parameter_306: + name = "parameter_306" + shape = [384] + dtype = "float32" + min_val = float("0.981952") + max_val = float("1.06739") + mean = float("1.00455") + std = float("0.0126595") + data = None + + +class Program_weight_tensor_parameter_307: + name = "parameter_307" + shape = [384] + dtype = "float32" + min_val = float("0.00890186") + max_val = float("0.0527898") + mean = float("0.0229235") + std = float("0.00801757") + data = None + + +class Program_weight_tensor_parameter_308: + name = "parameter_308" + shape = [384] + dtype = "float32" + min_val = float("-0.216942") + max_val = float("0.320178") + mean = float("-0.0713609") + std = float("0.0619825") + data = None + + +class Program_weight_tensor_parameter_309: + name = "parameter_309" + shape = [384, 384, 3, 3] + dtype = "float32" + min_val = float("-0.011273") + max_val = float("0.0330076") + mean = float("-0.000116379") + std = float("0.00107823") + data = None + + +class Program_weight_tensor_parameter_310: + name = "parameter_310" + shape = [384] + dtype = "float32" + min_val = float("-0.0530152") + max_val = float("0.00371186") + mean = float("-0.02064") + std = float("0.00869095") + data = None + + +class Program_weight_tensor_parameter_311: + name = "parameter_311" + shape = [384] + dtype = "float32" + min_val = float("0.975684") + max_val = float("1.08474") + mean = float("1.01197") + std = float("0.0159573") + data = None + + +class Program_weight_tensor_parameter_312: + name = "parameter_312" + shape = [384] + dtype = "float32" + min_val = float("0.0111248") + max_val = float("0.075422") + mean = float("0.0304294") + std = float("0.0099954") + data = None + + +class Program_weight_tensor_parameter_313: + name = "parameter_313" + shape = [384] + dtype = "float32" + min_val = float("-0.17639") + max_val = float("0.208749") + mean = float("-0.0371536") + std = float("0.0489761") + data = None + + +class Program_weight_tensor_parameter_314: + name = "parameter_314" + shape = [384, 384, 3, 3] + dtype = "float32" + min_val = float("-0.0145313") + max_val = float("0.0244098") + mean = float("-6.42304e-05") + std = float("0.00148685") + data = None + + +class Program_weight_tensor_parameter_315: + name = "parameter_315" + shape = [384] + dtype = "float32" + min_val = float("-0.0699692") + max_val = float("0.0213726") + mean = float("-0.0333959") + std = float("0.0126416") + data = None + + +class Program_weight_tensor_parameter_316: + name = "parameter_316" + shape = [384] + dtype = "float32" + min_val = float("0.981916") + max_val = float("1.05598") + mean = float("1.01336") + std = float("0.0107863") + data = None + + +class Program_weight_tensor_parameter_317: + name = "parameter_317" + shape = [384] + dtype = "float32" + min_val = float("0.0069754") + max_val = float("0.0280088") + mean = float("0.0131181") + std = float("0.00318771") + data = None + + +class Program_weight_tensor_parameter_318: + name = "parameter_318" + shape = [384] + dtype = "float32" + min_val = float("-0.145777") + max_val = float("0.0727488") + mean = float("-0.0135891") + std = float("0.034538") + data = None + + +class Program_weight_tensor_parameter_319: + name = "parameter_319" + shape = [384, 1024, 1, 1] + dtype = "float32" + min_val = float("-0.017823") + max_val = float("0.0467678") + mean = float("-0.000196972") + std = float("0.00306745") + data = None + + +class Program_weight_tensor_parameter_320: + name = "parameter_320" + shape = [384] + dtype = "float32" + min_val = float("-0.0243502") + max_val = float("0.0209146") + mean = float("-0.000403346") + std = float("0.00795216") + data = None + + +class Program_weight_tensor_parameter_321: + name = "parameter_321" + shape = [384] + dtype = "float32" + min_val = float("0.994149") + max_val = float("1.08382") + mean = float("1.04111") + std = float("0.0136566") + data = None + + +class Program_weight_tensor_parameter_322: + name = "parameter_322" + shape = [384] + dtype = "float32" + min_val = float("0.0116675") + max_val = float("0.0524889") + mean = float("0.0209671") + std = float("0.00553338") + data = None + + +class Program_weight_tensor_parameter_323: + name = "parameter_323" + shape = [384] + dtype = "float32" + min_val = float("-0.150832") + max_val = float("0.146748") + mean = float("-0.00513308") + std = float("0.045991") + data = None + + +class Program_weight_tensor_parameter_324: + name = "parameter_324" + shape = [384, 1024, 1, 1] + dtype = "float32" + min_val = float("-0.0386333") + max_val = float("0.0273306") + mean = float("-0.000230864") + std = float("0.00376096") + data = None + + +class Program_weight_tensor_parameter_325: + name = "parameter_325" + shape = [1024] + dtype = "float32" + min_val = float("-2.92289e-10") + max_val = float("3.60219e-10") + mean = float("3.09639e-12") + std = float("8.43535e-11") + data = None + + +class Program_weight_tensor_parameter_326: + name = "parameter_326" + shape = [1024] + dtype = "float32" + min_val = float("0.797367") + max_val = float("0.801926") + mean = float("0.79841") + std = float("0.000347528") + data = None + + +class Program_weight_tensor_parameter_327: + name = "parameter_327" + shape = [1024] + dtype = "float32" + min_val = float("-0.0176922") + max_val = float("0.0176774") + mean = float("0.000103427") + std = float("0.0103725") + data = None + + +class Program_weight_tensor_parameter_328: + name = "parameter_328" + shape = [2048, 1024] + dtype = "float32" + min_val = float("-0.0180007") + max_val = float("0.0179557") + mean = float("-3.21152e-07") + std = float("0.01019") + data = None + + +class Program_weight_tensor_parameter_329: + name = "parameter_329" + shape = [2048] + dtype = "float32" + min_val = float("-0.0249658") + max_val = float("0.0249348") + mean = float("-0.000264432") + std = float("0.0140502") + data = None + + +class Program_weight_tensor_parameter_330: + name = "parameter_330" + shape = [1024, 2048] + dtype = "float32" + min_val = float("-0.0252063") + max_val = float("0.025197") + mean = float("-1.07218e-06") + std = float("0.0144035") + data = None + + +class Program_weight_tensor_parameter_331: + name = "parameter_331" + shape = [1024] + dtype = "float32" + min_val = float("-0.000555217") + max_val = float("0.000243272") + mean = float("1.9929e-07") + std = float("9.19554e-05") + data = None + + +class Program_weight_tensor_parameter_332: + name = "parameter_332" + shape = [1024] + dtype = "float32" + min_val = float("0.7967") + max_val = float("0.802162") + mean = float("0.798413") + std = float("0.000401989") + data = None + + +class Program_weight_tensor_parameter_333: + name = "parameter_333" + shape = [1024] + dtype = "float32" + min_val = float("-0.000419711") + max_val = float("0.000305357") + mean = float("3.42389e-06") + std = float("9.72847e-05") + data = None + + +class Program_weight_tensor_parameter_334: + name = "parameter_334" + shape = [1024, 1024] + dtype = "float32" + min_val = float("-0.0435604") + max_val = float("0.0434697") + mean = float("8.92987e-06") + std = float("0.0249341") + data = None + + +class Program_weight_tensor_parameter_335: + name = "parameter_335" + shape = [1024] + dtype = "float32" + min_val = float("-0.0003762") + max_val = float("0.000251332") + mean = float("1.8105e-05") + std = float("9.15252e-05") + data = None + + +class Program_weight_tensor_parameter_336: + name = "parameter_336" + shape = [1024] + dtype = "float32" + min_val = float("0.796851") + max_val = float("0.802025") + mean = float("0.798428") + std = float("0.00039013") + data = None + + +class Program_weight_tensor_parameter_337: + name = "parameter_337" + shape = [1024] + dtype = "float32" + min_val = float("-0.0176874") + max_val = float("0.0176099") + mean = float("0.000102328") + std = float("0.0103695") + data = None + + +class Program_weight_tensor_parameter_338: + name = "parameter_338" + shape = [2048, 1024] + dtype = "float32" + min_val = float("-0.0179828") + max_val = float("0.0178747") + mean = float("-4.45837e-07") + std = float("0.01019") + data = None + + +class Program_weight_tensor_parameter_339: + name = "parameter_339" + shape = [2048] + dtype = "float32" + min_val = float("-0.024962") + max_val = float("0.0249286") + mean = float("-0.000262687") + std = float("0.0140493") + data = None + + +class Program_weight_tensor_parameter_340: + name = "parameter_340" + shape = [1024, 2048] + dtype = "float32" + min_val = float("-0.0251902") + max_val = float("0.0251499") + mean = float("-1.07229e-06") + std = float("0.0144035") + data = None + + +class Program_weight_tensor_parameter_341: + name = "parameter_341" + shape = [1024] + dtype = "float32" + min_val = float("-0.000350131") + max_val = float("0.000241698") + mean = float("-4.47589e-07") + std = float("8.78869e-05") + data = None + + +class Program_weight_tensor_parameter_342: + name = "parameter_342" + shape = [1024] + dtype = "float32" + min_val = float("0.797093") + max_val = float("0.80173") + mean = float("0.798412") + std = float("0.000358076") + data = None + + +class Program_weight_tensor_parameter_343: + name = "parameter_343" + shape = [1024] + dtype = "float32" + min_val = float("-0.000363068") + max_val = float("0.000309618") + mean = float("1.5817e-06") + std = float("9.93162e-05") + data = None + + +class Program_weight_tensor_parameter_344: + name = "parameter_344" + shape = [1024, 1024] + dtype = "float32" + min_val = float("-0.0434436") + max_val = float("0.0434242") + mean = float("8.89642e-06") + std = float("0.0249342") + data = None + + +class Program_weight_tensor_parameter_345: + name = "parameter_345" + shape = [1024] + dtype = "float32" + min_val = float("-0.000420154") + max_val = float("0.00045328") + mean = float("2.54749e-05") + std = float("0.000129154") + data = None + + +class Program_weight_tensor_parameter_346: + name = "parameter_346" + shape = [1024] + dtype = "float32" + min_val = float("0.797252") + max_val = float("0.801713") + mean = float("0.798442") + std = float("0.000356685") + data = None + + +class Program_weight_tensor_parameter_347: + name = "parameter_347" + shape = [1024] + dtype = "float32" + min_val = float("-0.0177165") + max_val = float("0.0176232") + mean = float("0.000101918") + std = float("0.01036") + data = None + + +class Program_weight_tensor_parameter_348: + name = "parameter_348" + shape = [2048, 1024] + dtype = "float32" + min_val = float("-0.0179359") + max_val = float("0.0179271") + mean = float("-5.19214e-07") + std = float("0.01019") + data = None + + +class Program_weight_tensor_parameter_349: + name = "parameter_349" + shape = [2048] + dtype = "float32" + min_val = float("-0.0249926") + max_val = float("0.024906") + mean = float("-0.000261807") + std = float("0.0140485") + data = None + + +class Program_weight_tensor_parameter_350: + name = "parameter_350" + shape = [1024, 2048] + dtype = "float32" + min_val = float("-0.0251321") + max_val = float("0.0251645") + mean = float("-1.07239e-06") + std = float("0.0144035") + data = None + + +class Program_weight_tensor_parameter_351: + name = "parameter_351" + shape = [1024] + dtype = "float32" + min_val = float("-0.000524045") + max_val = float("0.000440655") + mean = float("-3.94412e-07") + std = float("0.000134893") + data = None + + +class Program_weight_tensor_parameter_352: + name = "parameter_352" + shape = [1024] + dtype = "float32" + min_val = float("0.797309") + max_val = float("0.801401") + mean = float("0.798411") + std = float("0.000338184") + data = None + + +class Program_weight_tensor_parameter_353: + name = "parameter_353" + shape = [1024] + dtype = "float32" + min_val = float("-0.000493111") + max_val = float("0.000473844") + mean = float("-1.84034e-06") + std = float("0.000140404") + data = None + + +class Program_weight_tensor_parameter_354: + name = "parameter_354" + shape = [1024, 1024] + dtype = "float32" + min_val = float("-0.043449") + max_val = float("0.0434786") + mean = float("8.89802e-06") + std = float("0.0249343") + data = None + + +class Program_weight_tensor_parameter_355: + name = "parameter_355" + shape = [1024] + dtype = "float32" + min_val = float("-0.000657995") + max_val = float("0.000731658") + mean = float("3.06102e-05") + std = float("0.000218356") + data = None + + +class Program_weight_tensor_parameter_356: + name = "parameter_356" + shape = [1024] + dtype = "float32" + min_val = float("0.797424") + max_val = float("0.801436") + mean = float("0.798458") + std = float("0.000356085") + data = None + + +class Program_weight_tensor_parameter_357: + name = "parameter_357" + shape = [1024] + dtype = "float32" + min_val = float("-0.0177131") + max_val = float("0.0177688") + mean = float("0.000101971") + std = float("0.0103559") + data = None + + +class Program_weight_tensor_parameter_358: + name = "parameter_358" + shape = [2048, 1024] + dtype = "float32" + min_val = float("-0.0180121") + max_val = float("0.0179253") + mean = float("-4.95311e-07") + std = float("0.0101901") + data = None + + +class Program_weight_tensor_parameter_359: + name = "parameter_359" + shape = [2048] + dtype = "float32" + min_val = float("-0.024929") + max_val = float("0.0249049") + mean = float("-0.000259673") + std = float("0.0140481") + data = None + + +class Program_weight_tensor_parameter_360: + name = "parameter_360" + shape = [1024, 2048] + dtype = "float32" + min_val = float("-0.0251524") + max_val = float("0.0252285") + mean = float("-1.0724e-06") + std = float("0.0144035") + data = None + + +class Program_weight_tensor_parameter_361: + name = "parameter_361" + shape = [1024] + dtype = "float32" + min_val = float("-0.000733503") + max_val = float("0.00075958") + mean = float("-7.75532e-07") + std = float("0.000228197") + data = None + + +class Program_weight_tensor_parameter_362: + name = "parameter_362" + shape = [1024] + dtype = "float32" + min_val = float("0.797348") + max_val = float("0.801141") + mean = float("0.79841") + std = float("0.000355182") + data = None + + +class Program_weight_tensor_parameter_363: + name = "parameter_363" + shape = [1024] + dtype = "float32" + min_val = float("-0.000783512") + max_val = float("0.000888767") + mean = float("-2.37833e-07") + std = float("0.000237219") + data = None + + +class Program_weight_tensor_parameter_364: + name = "parameter_364" + shape = [1024, 1024] + dtype = "float32" + min_val = float("-0.0437281") + max_val = float("0.0438114") + mean = float("8.92961e-06") + std = float("0.0249348") + data = None + + +class Program_weight_tensor_parameter_365: + name = "parameter_365" + shape = [1024] + dtype = "float32" + min_val = float("-3.7594") + max_val = float("-0.734446") + mean = float("-2.18722") + std = float("0.428724") + data = None + + +class Program_weight_tensor_parameter_366: + name = "parameter_366" + shape = [1024] + dtype = "float32" + min_val = float("1.61913") + max_val = float("4.44136") + mean = float("3.08039") + std = float("0.25425") + data = None + + +class Program_weight_tensor_parameter_367: + name = "parameter_367" + shape = [1024] + dtype = "float32" + min_val = float("0.00445214") + max_val = float("0.0229447") + mean = float("0.00885079") + std = float("0.00174582") + data = None + + +class Program_weight_tensor_parameter_368: + name = "parameter_368" + shape = [1024] + dtype = "float32" + min_val = float("-0.140092") + max_val = float("0.122679") + mean = float("-0.0555511") + std = float("0.0303054") + data = None + + +class Program_weight_tensor_parameter_369: + name = "parameter_369" + shape = [1024, 768, 1, 1] + dtype = "float32" + min_val = float("-0.0427729") + max_val = float("0.0695573") + mean = float("-0.000391863") + std = float("0.00403905") + data = None + + +class Program_weight_tensor_parameter_370: + name = "parameter_370" + shape = [768] + dtype = "float32" + min_val = float("-0.014467") + max_val = float("0.00131875") + mean = float("-0.000761015") + std = float("0.00204153") + data = None + + +class Program_weight_tensor_parameter_371: + name = "parameter_371" + shape = [768, 768, 1, 1] + dtype = "float32" + min_val = float("-0.0787519") + max_val = float("0.135878") + mean = float("-0.000282851") + std = float("0.0016268") + data = None + + +class Program_weight_tensor_parameter_372: + name = "parameter_372" + shape = [384] + dtype = "float32" + min_val = float("-1.77402") + max_val = float("0.318654") + mean = float("-0.310798") + std = float("0.291236") + data = None + + +class Program_weight_tensor_parameter_373: + name = "parameter_373" + shape = [384] + dtype = "float32" + min_val = float("0.188523") + max_val = float("1.82125") + mean = float("0.609641") + std = float("0.262607") + data = None + + +class Program_weight_tensor_parameter_374: + name = "parameter_374" + shape = [384] + dtype = "float32" + min_val = float("5.24774e-05") + max_val = float("0.00104929") + mean = float("0.000233242") + std = float("0.000113289") + data = None + + +class Program_weight_tensor_parameter_375: + name = "parameter_375" + shape = [384] + dtype = "float32" + min_val = float("-0.0915885") + max_val = float("0.074148") + mean = float("0.0209669") + std = float("0.0171605") + data = None + + +class Program_weight_tensor_parameter_376: + name = "parameter_376" + shape = [384, 384, 1, 1] + dtype = "float32" + min_val = float("-0.020214") + max_val = float("0.0255239") + mean = float("-0.000361046") + std = float("0.00271802") + data = None + + +class Program_weight_tensor_parameter_377: + name = "parameter_377" + shape = [384] + dtype = "float32" + min_val = float("-1.77402") + max_val = float("0.318949") + mean = float("-0.310739") + std = float("0.291254") + data = None + + +class Program_weight_tensor_parameter_378: + name = "parameter_378" + shape = [384] + dtype = "float32" + min_val = float("0.334653") + max_val = float("2.60511") + mean = float("1.02603") + std = float("0.290253") + data = None + + +class Program_weight_tensor_parameter_379: + name = "parameter_379" + shape = [384] + dtype = "float32" + min_val = float("0.000614651") + max_val = float("0.00608489") + mean = float("0.00209955") + std = float("0.000752612") + data = None + + +class Program_weight_tensor_parameter_380: + name = "parameter_380" + shape = [384] + dtype = "float32" + min_val = float("-0.228713") + max_val = float("0.112503") + mean = float("0.0217264") + std = float("0.036913") + data = None + + +class Program_weight_tensor_parameter_381: + name = "parameter_381" + shape = [384, 384, 3, 3] + dtype = "float32" + min_val = float("-0.0190584") + max_val = float("0.0259183") + mean = float("-4.76047e-05") + std = float("0.0017617") + data = None + + +class Program_weight_tensor_parameter_382: + name = "parameter_382" + shape = [384] + dtype = "float32" + min_val = float("-2.58225") + max_val = float("0.0329867") + mean = float("-1.56843") + std = float("0.415962") + data = None + + +class Program_weight_tensor_parameter_383: + name = "parameter_383" + shape = [384] + dtype = "float32" + min_val = float("0.52002") + max_val = float("1.64429") + mean = float("1.13566") + std = float("0.149475") + data = None + + +class Program_weight_tensor_parameter_384: + name = "parameter_384" + shape = [384] + dtype = "float32" + min_val = float("0.0409059") + max_val = float("0.240151") + mean = float("0.08781") + std = float("0.0241685") + data = None + + +class Program_weight_tensor_parameter_385: + name = "parameter_385" + shape = [384] + dtype = "float32" + min_val = float("-0.904392") + max_val = float("0.384792") + mean = float("-0.257009") + std = float("0.123376") + data = None + + +class Program_weight_tensor_parameter_386: + name = "parameter_386" + shape = [384, 384, 3, 3] + dtype = "float32" + min_val = float("-0.0213207") + max_val = float("0.0602371") + mean = float("-0.000201951") + std = float("0.00231308") + data = None + + +class Program_weight_tensor_parameter_387: + name = "parameter_387" + shape = [384] + dtype = "float32" + min_val = float("-1.93927") + max_val = float("0.644474") + mean = float("-0.574884") + std = float("0.358671") + data = None + + +class Program_weight_tensor_parameter_388: + name = "parameter_388" + shape = [384] + dtype = "float32" + min_val = float("0.163873") + max_val = float("2.06585") + mean = float("0.562027") + std = float("0.227242") + data = None + + +class Program_weight_tensor_parameter_389: + name = "parameter_389" + shape = [384] + dtype = "float32" + min_val = float("7.74518e-05") + max_val = float("0.00147627") + mean = float("0.000262123") + std = float("0.000127782") + data = None + + +class Program_weight_tensor_parameter_390: + name = "parameter_390" + shape = [384] + dtype = "float32" + min_val = float("-0.0472137") + max_val = float("0.0687191") + mean = float("0.0210509") + std = float("0.0147693") + data = None + + +class Program_weight_tensor_parameter_391: + name = "parameter_391" + shape = [384, 384, 1, 1] + dtype = "float32" + min_val = float("-0.0246209") + max_val = float("0.0323191") + mean = float("-0.00038074") + std = float("0.00249603") + data = None + + +class Program_weight_tensor_parameter_392: + name = "parameter_392" + shape = [384] + dtype = "float32" + min_val = float("-1.93932") + max_val = float("0.645257") + mean = float("-0.574812") + std = float("0.358742") + data = None + + +class Program_weight_tensor_parameter_393: + name = "parameter_393" + shape = [384] + dtype = "float32" + min_val = float("0.58315") + max_val = float("2.15642") + mean = float("1.08405") + std = float("0.255745") + data = None + + +class Program_weight_tensor_parameter_394: + name = "parameter_394" + shape = [384] + dtype = "float32" + min_val = float("0.00138889") + max_val = float("0.00913889") + mean = float("0.00296031") + std = float("0.000878211") + data = None + + +class Program_weight_tensor_parameter_395: + name = "parameter_395" + shape = [384] + dtype = "float32" + min_val = float("-0.082843") + max_val = float("0.147398") + mean = float("0.0337157") + std = float("0.0397786") + data = None + + +class Program_weight_tensor_parameter_396: + name = "parameter_396" + shape = [384, 384, 3, 3] + dtype = "float32" + min_val = float("-0.017236") + max_val = float("0.0310435") + mean = float("-8.47071e-05") + std = float("0.00189556") + data = None + + +class Program_weight_tensor_parameter_397: + name = "parameter_397" + shape = [384] + dtype = "float32" + min_val = float("-2.39591") + max_val = float("0.845752") + mean = float("-1.40539") + std = float("0.360596") + data = None + + +class Program_weight_tensor_parameter_398: + name = "parameter_398" + shape = [384] + dtype = "float32" + min_val = float("0.453112") + max_val = float("1.91948") + mean = float("1.16636") + std = float("0.14802") + data = None + + +class Program_weight_tensor_parameter_399: + name = "parameter_399" + shape = [384] + dtype = "float32" + min_val = float("0.0310517") + max_val = float("0.140233") + mean = float("0.0619149") + std = float("0.0160952") + data = None + + +class Program_weight_tensor_parameter_400: + name = "parameter_400" + shape = [384] + dtype = "float32" + min_val = float("-0.74615") + max_val = float("0.831533") + mean = float("-0.183365") + std = float("0.11049") + data = None + + +class Program_weight_tensor_parameter_401: + name = "parameter_401" + shape = [384, 384, 3, 3] + dtype = "float32" + min_val = float("-0.0259567") + max_val = float("0.0450409") + mean = float("-0.000200361") + std = float("0.00234146") + data = None + + +class Program_weight_tensor_parameter_402: + name = "parameter_402" + shape = [384] + dtype = "float32" + min_val = float("-1.8762") + max_val = float("0.453243") + mean = float("-0.485339") + std = float("0.376467") + data = None + + +class Program_weight_tensor_parameter_403: + name = "parameter_403" + shape = [384] + dtype = "float32" + min_val = float("0.0773354") + max_val = float("2.11925") + mean = float("0.441956") + std = float("0.217663") + data = None + + +class Program_weight_tensor_parameter_404: + name = "parameter_404" + shape = [384] + dtype = "float32" + min_val = float("6.06445e-05") + max_val = float("0.00132946") + mean = float("0.000308842") + std = float("0.000151133") + data = None + + +class Program_weight_tensor_parameter_405: + name = "parameter_405" + shape = [384] + dtype = "float32" + min_val = float("-0.0476186") + max_val = float("0.0717926") + mean = float("0.0252544") + std = float("0.0165171") + data = None + + +class Program_weight_tensor_parameter_406: + name = "parameter_406" + shape = [384, 384, 1, 1] + dtype = "float32" + min_val = float("-0.0207296") + max_val = float("0.0301957") + mean = float("-0.000479918") + std = float("0.0021441") + data = None + + +class Program_weight_tensor_parameter_407: + name = "parameter_407" + shape = [384] + dtype = "float32" + min_val = float("-1.87654") + max_val = float("0.453653") + mean = float("-0.485263") + std = float("0.376563") + data = None + + +class Program_weight_tensor_parameter_408: + name = "parameter_408" + shape = [384] + dtype = "float32" + min_val = float("0.521871") + max_val = float("2.22439") + mean = float("1.05289") + std = float("0.260102") + data = None + + +class Program_weight_tensor_parameter_409: + name = "parameter_409" + shape = [384] + dtype = "float32" + min_val = float("0.00183356") + max_val = float("0.00905176") + mean = float("0.00403889") + std = float("0.0012136") + data = None + + +class Program_weight_tensor_parameter_410: + name = "parameter_410" + shape = [384] + dtype = "float32" + min_val = float("-0.210488") + max_val = float("0.180984") + mean = float("0.039756") + std = float("0.0449489") + data = None + + +class Program_weight_tensor_parameter_411: + name = "parameter_411" + shape = [384, 384, 3, 3] + dtype = "float32" + min_val = float("-0.0177497") + max_val = float("0.036737") + mean = float("-9.16795e-05") + std = float("0.00200706") + data = None + + +class Program_weight_tensor_parameter_412: + name = "parameter_412" + shape = [384] + dtype = "float32" + min_val = float("-2.15635") + max_val = float("0.418177") + mean = float("-1.36712") + std = float("0.277468") + data = None + + +class Program_weight_tensor_parameter_413: + name = "parameter_413" + shape = [384] + dtype = "float32" + min_val = float("0.706134") + max_val = float("1.6357") + mean = float("1.14301") + std = float("0.101583") + data = None + + +class Program_weight_tensor_parameter_414: + name = "parameter_414" + shape = [384] + dtype = "float32" + min_val = float("0.0221089") + max_val = float("0.144688") + mean = float("0.0472828") + std = float("0.013291") + data = None + + +class Program_weight_tensor_parameter_415: + name = "parameter_415" + shape = [384] + dtype = "float32" + min_val = float("-0.690683") + max_val = float("0.206204") + mean = float("-0.128898") + std = float("0.0935638") + data = None + + +class Program_weight_tensor_parameter_416: + name = "parameter_416" + shape = [384, 384, 3, 3] + dtype = "float32" + min_val = float("-0.0274071") + max_val = float("0.0448565") + mean = float("-0.000158418") + std = float("0.00223888") + data = None + + +class Program_weight_tensor_parameter_417: + name = "parameter_417" + shape = [384] + dtype = "float32" + min_val = float("-2.9232") + max_val = float("1.66463") + mean = float("-0.760372") + std = float("0.643546") + data = None + + +class Program_weight_tensor_parameter_418: + name = "parameter_418" + shape = [384] + dtype = "float32" + min_val = float("0.953224") + max_val = float("2.91794") + mean = float("1.86322") + std = float("0.27618") + data = None + + +class Program_weight_tensor_parameter_419: + name = "parameter_419" + shape = [384] + dtype = "float32" + min_val = float("0.00282756") + max_val = float("0.0125667") + mean = float("0.00523085") + std = float("0.0013343") + data = None + + +class Program_weight_tensor_parameter_420: + name = "parameter_420" + shape = [384] + dtype = "float32" + min_val = float("-0.250212") + max_val = float("0.146125") + mean = float("0.0636405") + std = float("0.0327087") + data = None + + +class Program_weight_tensor_parameter_421: + name = "parameter_421" + shape = [384, 768, 1, 1] + dtype = "float32" + min_val = float("-0.0371909") + max_val = float("0.0509187") + mean = float("-0.000727671") + std = float("0.00522845") + data = None + + +class Program_weight_tensor_parameter_422: + name = "parameter_422" + shape = [384] + dtype = "float32" + min_val = float("-2.2471") + max_val = float("0.681977") + mean = float("-0.777142") + std = float("0.472903") + data = None + + +class Program_weight_tensor_parameter_423: + name = "parameter_423" + shape = [384] + dtype = "float32" + min_val = float("0.965853") + max_val = float("2.89359") + mean = float("2.09705") + std = float("0.305433") + data = None + + +class Program_weight_tensor_parameter_424: + name = "parameter_424" + shape = [384] + dtype = "float32" + min_val = float("0.000815531") + max_val = float("0.00405601") + mean = float("0.00200318") + std = float("0.000443934") + data = None + + +class Program_weight_tensor_parameter_425: + name = "parameter_425" + shape = [384] + dtype = "float32" + min_val = float("-0.0161045") + max_val = float("0.0799797") + mean = float("0.0350115") + std = float("0.0164865") + data = None + + +class Program_weight_tensor_parameter_426: + name = "parameter_426" + shape = [384, 768, 1, 1] + dtype = "float32" + min_val = float("-0.0815437") + max_val = float("0.0646253") + mean = float("-0.000388202") + std = float("0.00359255") + data = None + + +class Program_weight_tensor_parameter_427: + name = "parameter_427" + shape = [768] + dtype = "float32" + min_val = float("-2.40199") + max_val = float("0.642394") + mean = float("-0.908374") + std = float("0.339302") + data = None + + +class Program_weight_tensor_parameter_428: + name = "parameter_428" + shape = [768] + dtype = "float32" + min_val = float("0.530297") + max_val = float("1.90727") + mean = float("0.919687") + std = float("0.149179") + data = None + + +class Program_weight_tensor_parameter_429: + name = "parameter_429" + shape = [768] + dtype = "float32" + min_val = float("0.00640934") + max_val = float("0.0572679") + mean = float("0.0157251") + std = float("0.0047052") + data = None + + +class Program_weight_tensor_parameter_430: + name = "parameter_430" + shape = [768] + dtype = "float32" + min_val = float("-0.235794") + max_val = float("0.254524") + mean = float("0.0393271") + std = float("0.0563154") + data = None + + +class Program_weight_tensor_parameter_431: + name = "parameter_431" + shape = [768, 512, 3, 3] + dtype = "float32" + min_val = float("-0.0378314") + max_val = float("0.0543419") + mean = float("-9.75912e-05") + std = float("0.00233888") + data = None + + +class Program_weight_tensor_parameter_432: + name = "parameter_432" + shape = [512] + dtype = "float32" + min_val = float("-3.38998") + max_val = float("1.66652") + mean = float("-1.16179") + std = float("0.513719") + data = None + + +class Program_weight_tensor_parameter_433: + name = "parameter_433" + shape = [512] + dtype = "float32" + min_val = float("0.523767") + max_val = float("1.67712") + mean = float("1.11122") + std = float("0.148184") + data = None + + +class Program_weight_tensor_parameter_434: + name = "parameter_434" + shape = [512] + dtype = "float32" + min_val = float("0.00248322") + max_val = float("0.0169425") + mean = float("0.00762328") + std = float("0.00205743") + data = None + + +class Program_weight_tensor_parameter_435: + name = "parameter_435" + shape = [512] + dtype = "float32" + min_val = float("-0.172258") + max_val = float("0.0979883") + mean = float("-0.0487286") + std = float("0.0396462") + data = None + + +class Program_weight_tensor_parameter_436: + name = "parameter_436" + shape = [512, 384, 1, 1] + dtype = "float32" + min_val = float("-0.202262") + max_val = float("0.184296") + mean = float("-0.000573477") + std = float("0.00792306") + data = None + + +class Program_weight_tensor_parameter_437: + name = "parameter_437" + shape = [384] + dtype = "float32" + min_val = float("-0.0100703") + max_val = float("0.00138871") + mean = float("-0.00295173") + std = float("0.00227127") + data = None + + +class Program_weight_tensor_parameter_438: + name = "parameter_438" + shape = [384, 384, 1, 1] + dtype = "float32" + min_val = float("-0.202729") + max_val = float("0.140205") + mean = float("-0.002055") + std = float("0.00490701") + data = None + + +class Program_weight_tensor_parameter_439: + name = "parameter_439" + shape = [192] + dtype = "float32" + min_val = float("-1.97045") + max_val = float("0.409864") + mean = float("-0.348766") + std = float("0.333488") + data = None + + +class Program_weight_tensor_parameter_440: + name = "parameter_440" + shape = [192] + dtype = "float32" + min_val = float("0.0528864") + max_val = float("2.15987") + mean = float("0.581255") + std = float("0.419833") + data = None + + +class Program_weight_tensor_parameter_441: + name = "parameter_441" + shape = [192] + dtype = "float32" + min_val = float("9.0619e-05") + max_val = float("0.0013381") + mean = float("0.000452295") + std = float("0.000216487") + data = None + + +class Program_weight_tensor_parameter_442: + name = "parameter_442" + shape = [192] + dtype = "float32" + min_val = float("-0.0346181") + max_val = float("0.054258") + mean = float("0.00535595") + std = float("0.0149315") + data = None + + +class Program_weight_tensor_parameter_443: + name = "parameter_443" + shape = [192, 192, 1, 1] + dtype = "float32" + min_val = float("-0.023487") + max_val = float("0.0581182") + mean = float("-0.000339748") + std = float("0.0040934") + data = None + + +class Program_weight_tensor_parameter_444: + name = "parameter_444" + shape = [192] + dtype = "float32" + min_val = float("-1.97037") + max_val = float("0.410702") + mean = float("-0.34863") + std = float("0.333546") + data = None + + +class Program_weight_tensor_parameter_445: + name = "parameter_445" + shape = [192] + dtype = "float32" + min_val = float("0.372338") + max_val = float("2.70216") + mean = float("1.20181") + std = float("0.493699") + data = None + + +class Program_weight_tensor_parameter_446: + name = "parameter_446" + shape = [192] + dtype = "float32" + min_val = float("0.00127295") + max_val = float("0.0154499") + mean = float("0.00513167") + std = float("0.00187691") + data = None + + +class Program_weight_tensor_parameter_447: + name = "parameter_447" + shape = [192] + dtype = "float32" + min_val = float("-0.0977349") + max_val = float("0.146963") + mean = float("0.0204027") + std = float("0.0429259") + data = None + + +class Program_weight_tensor_parameter_448: + name = "parameter_448" + shape = [192, 192, 3, 3] + dtype = "float32" + min_val = float("-0.0289902") + max_val = float("0.0378296") + mean = float("-0.000154473") + std = float("0.00313532") + data = None + + +class Program_weight_tensor_parameter_449: + name = "parameter_449" + shape = [192] + dtype = "float32" + min_val = float("-2.89065") + max_val = float("-0.176734") + mean = float("-1.31453") + std = float("0.40113") + data = None + + +class Program_weight_tensor_parameter_450: + name = "parameter_450" + shape = [192] + dtype = "float32" + min_val = float("0.696524") + max_val = float("2.09454") + mean = float("1.17918") + std = float("0.169868") + data = None + + +class Program_weight_tensor_parameter_451: + name = "parameter_451" + shape = [192] + dtype = "float32" + min_val = float("0.0632461") + max_val = float("0.338318") + mean = float("0.131968") + std = float("0.0437735") + data = None + + +class Program_weight_tensor_parameter_452: + name = "parameter_452" + shape = [192] + dtype = "float32" + min_val = float("-2.50976") + max_val = float("1.70367") + mean = float("-0.20284") + std = float("0.378719") + data = None + + +class Program_weight_tensor_parameter_453: + name = "parameter_453" + shape = [192, 192, 3, 3] + dtype = "float32" + min_val = float("-0.0331927") + max_val = float("0.0456383") + mean = float("-0.000188198") + std = float("0.00374306") + data = None + + +class Program_weight_tensor_parameter_454: + name = "parameter_454" + shape = [192] + dtype = "float32" + min_val = float("-1.9404") + max_val = float("0.513024") + mean = float("-0.279434") + std = float("0.321452") + data = None + + +class Program_weight_tensor_parameter_455: + name = "parameter_455" + shape = [192] + dtype = "float32" + min_val = float("0.0454025") + max_val = float("1.77027") + mean = float("0.444331") + std = float("0.305722") + data = None + + +class Program_weight_tensor_parameter_456: + name = "parameter_456" + shape = [192] + dtype = "float32" + min_val = float("7.46909e-05") + max_val = float("0.00134485") + mean = float("0.000400773") + std = float("0.000214016") + data = None + + +class Program_weight_tensor_parameter_457: + name = "parameter_457" + shape = [192] + dtype = "float32" + min_val = float("-0.0293086") + max_val = float("0.0470179") + mean = float("0.00801703") + std = float("0.0116545") + data = None + + +class Program_weight_tensor_parameter_458: + name = "parameter_458" + shape = [192, 192, 1, 1] + dtype = "float32" + min_val = float("-0.0234926") + max_val = float("0.036738") + mean = float("-0.000377237") + std = float("0.00377417") + data = None + + +class Program_weight_tensor_parameter_459: + name = "parameter_459" + shape = [192] + dtype = "float32" + min_val = float("-1.94044") + max_val = float("0.51462") + mean = float("-0.279235") + std = float("0.321666") + data = None + + +class Program_weight_tensor_parameter_460: + name = "parameter_460" + shape = [192] + dtype = "float32" + min_val = float("0.483074") + max_val = float("2.27001") + mean = float("1.13833") + std = float("0.37563") + data = None + + +class Program_weight_tensor_parameter_461: + name = "parameter_461" + shape = [192] + dtype = "float32" + min_val = float("0.00274472") + max_val = float("0.0142561") + mean = float("0.00601192") + std = float("0.0018096") + data = None + + +class Program_weight_tensor_parameter_462: + name = "parameter_462" + shape = [192] + dtype = "float32" + min_val = float("-0.0926083") + max_val = float("0.111934") + mean = float("0.0327612") + std = float("0.0355469") + data = None + + +class Program_weight_tensor_parameter_463: + name = "parameter_463" + shape = [192, 192, 3, 3] + dtype = "float32" + min_val = float("-0.0231072") + max_val = float("0.038718") + mean = float("-0.000192078") + std = float("0.00338604") + data = None + + +class Program_weight_tensor_parameter_464: + name = "parameter_464" + shape = [192] + dtype = "float32" + min_val = float("-2.50828") + max_val = float("-0.123237") + mean = float("-1.28886") + std = float("0.44374") + data = None + + +class Program_weight_tensor_parameter_465: + name = "parameter_465" + shape = [192] + dtype = "float32" + min_val = float("0.65494") + max_val = float("1.66968") + mean = float("1.19938") + std = float("0.166128") + data = None + + +class Program_weight_tensor_parameter_466: + name = "parameter_466" + shape = [192] + dtype = "float32" + min_val = float("0.0467958") + max_val = float("0.20027") + mean = float("0.0945377") + std = float("0.0272574") + data = None + + +class Program_weight_tensor_parameter_467: + name = "parameter_467" + shape = [192] + dtype = "float32" + min_val = float("-2.14487") + max_val = float("0.410589") + mean = float("-0.110743") + std = float("0.24642") + data = None + + +class Program_weight_tensor_parameter_468: + name = "parameter_468" + shape = [192, 192, 3, 3] + dtype = "float32" + min_val = float("-0.0362254") + max_val = float("0.0508084") + mean = float("-0.000238085") + std = float("0.00389331") + data = None + + +class Program_weight_tensor_parameter_469: + name = "parameter_469" + shape = [192] + dtype = "float32" + min_val = float("-1.7573") + max_val = float("0.468575") + mean = float("-0.262432") + std = float("0.335818") + data = None + + +class Program_weight_tensor_parameter_470: + name = "parameter_470" + shape = [192] + dtype = "float32" + min_val = float("0.00295124") + max_val = float("1.67875") + mean = float("0.351961") + std = float("0.251699") + data = None + + +class Program_weight_tensor_parameter_471: + name = "parameter_471" + shape = [192] + dtype = "float32" + min_val = float("9.28523e-07") + max_val = float("0.00191867") + mean = float("0.000359659") + std = float("0.00024946") + data = None + + +class Program_weight_tensor_parameter_472: + name = "parameter_472" + shape = [192] + dtype = "float32" + min_val = float("-0.0373738") + max_val = float("0.0528657") + mean = float("0.0101716") + std = float("0.0121908") + data = None + + +class Program_weight_tensor_parameter_473: + name = "parameter_473" + shape = [192, 192, 1, 1] + dtype = "float32" + min_val = float("-0.0303466") + max_val = float("0.0356195") + mean = float("-0.000425557") + std = float("0.0036432") + data = None + + +class Program_weight_tensor_parameter_474: + name = "parameter_474" + shape = [192] + dtype = "float32" + min_val = float("-1.7573") + max_val = float("0.470016") + mean = float("-0.262262") + std = float("0.336041") + data = None + + +class Program_weight_tensor_parameter_475: + name = "parameter_475" + shape = [192] + dtype = "float32" + min_val = float("0.406102") + max_val = float("1.97794") + mean = float("1.06588") + std = float("0.334156") + data = None + + +class Program_weight_tensor_parameter_476: + name = "parameter_476" + shape = [192] + dtype = "float32" + min_val = float("0.0026697") + max_val = float("0.0132838") + mean = float("0.00612262") + std = float("0.00179786") + data = None + + +class Program_weight_tensor_parameter_477: + name = "parameter_477" + shape = [192] + dtype = "float32" + min_val = float("-0.0636079") + max_val = float("0.115567") + mean = float("0.035464") + std = float("0.0321331") + data = None + + +class Program_weight_tensor_parameter_478: + name = "parameter_478" + shape = [192, 192, 3, 3] + dtype = "float32" + min_val = float("-0.0321474") + max_val = float("0.0388371") + mean = float("-0.000190596") + std = float("0.00354187") + data = None + + +class Program_weight_tensor_parameter_479: + name = "parameter_479" + shape = [192] + dtype = "float32" + min_val = float("-2.49735") + max_val = float("0.137985") + mean = float("-1.24334") + std = float("0.424316") + data = None + + +class Program_weight_tensor_parameter_480: + name = "parameter_480" + shape = [192] + dtype = "float32" + min_val = float("0.652126") + max_val = float("1.80991") + mean = float("1.16717") + std = float("0.165409") + data = None + + +class Program_weight_tensor_parameter_481: + name = "parameter_481" + shape = [192] + dtype = "float32" + min_val = float("0.0309664") + max_val = float("0.139012") + mean = float("0.0677931") + std = float("0.0174696") + data = None + + +class Program_weight_tensor_parameter_482: + name = "parameter_482" + shape = [192] + dtype = "float32" + min_val = float("-1.51706") + max_val = float("0.284541") + mean = float("-0.0982665") + std = float("0.179401") + data = None + + +class Program_weight_tensor_parameter_483: + name = "parameter_483" + shape = [192, 192, 3, 3] + dtype = "float32" + min_val = float("-0.05013") + max_val = float("0.0656662") + mean = float("-0.000261502") + std = float("0.00399974") + data = None + + +class Program_weight_tensor_parameter_484: + name = "parameter_484" + shape = [192] + dtype = "float32" + min_val = float("-2.07916") + max_val = float("0.533363") + mean = float("-0.272351") + std = float("0.375289") + data = None + + +class Program_weight_tensor_parameter_485: + name = "parameter_485" + shape = [192] + dtype = "float32" + min_val = float("0.000510371") + max_val = float("0.732354") + mean = float("0.211968") + std = float("0.136272") + data = None + + +class Program_weight_tensor_parameter_486: + name = "parameter_486" + shape = [192] + dtype = "float32" + min_val = float("6.2328e-08") + max_val = float("0.00079658") + mean = float("0.00024174") + std = float("0.00013494") + data = None + + +class Program_weight_tensor_parameter_487: + name = "parameter_487" + shape = [192] + dtype = "float32" + min_val = float("-0.0197338") + max_val = float("0.031677") + mean = float("0.00620505") + std = float("0.0092414") + data = None + + +class Program_weight_tensor_parameter_488: + name = "parameter_488" + shape = [192, 192, 1, 1] + dtype = "float32" + min_val = float("-0.0202783") + max_val = float("0.036136") + mean = float("-0.000265605") + std = float("0.00319736") + data = None + + +class Program_weight_tensor_parameter_489: + name = "parameter_489" + shape = [192] + dtype = "float32" + min_val = float("-2.07922") + max_val = float("0.535166") + mean = float("-0.272236") + std = float("0.375502") + data = None + + +class Program_weight_tensor_parameter_490: + name = "parameter_490" + shape = [192] + dtype = "float32" + min_val = float("0.396505") + max_val = float("1.96272") + mean = float("0.958924") + std = float("0.303858") + data = None + + +class Program_weight_tensor_parameter_491: + name = "parameter_491" + shape = [192] + dtype = "float32" + min_val = float("0.00305567") + max_val = float("0.014764") + mean = float("0.00641083") + std = float("0.00196591") + data = None + + +class Program_weight_tensor_parameter_492: + name = "parameter_492" + shape = [192] + dtype = "float32" + min_val = float("-0.0910836") + max_val = float("0.162129") + mean = float("0.0386063") + std = float("0.0345053") + data = None + + +class Program_weight_tensor_parameter_493: + name = "parameter_493" + shape = [192, 192, 3, 3] + dtype = "float32" + min_val = float("-0.0299549") + max_val = float("0.0371106") + mean = float("-0.000205046") + std = float("0.00364104") + data = None + + +class Program_weight_tensor_parameter_494: + name = "parameter_494" + shape = [192] + dtype = "float32" + min_val = float("-2.74084") + max_val = float("-0.0810353") + mean = float("-1.23693") + std = float("0.434057") + data = None + + +class Program_weight_tensor_parameter_495: + name = "parameter_495" + shape = [192] + dtype = "float32" + min_val = float("0.761623") + max_val = float("1.62105") + mean = float("1.15096") + std = float("0.142541") + data = None + + +class Program_weight_tensor_parameter_496: + name = "parameter_496" + shape = [192] + dtype = "float32" + min_val = float("0.0272966") + max_val = float("0.103735") + mean = float("0.0487407") + std = float("0.0115761") + data = None + + +class Program_weight_tensor_parameter_497: + name = "parameter_497" + shape = [192] + dtype = "float32" + min_val = float("-1.23827") + max_val = float("0.28535") + mean = float("-0.0748347") + std = float("0.164085") + data = None + + +class Program_weight_tensor_parameter_498: + name = "parameter_498" + shape = [192, 192, 3, 3] + dtype = "float32" + min_val = float("-0.0531238") + max_val = float("0.0579085") + mean = float("-0.000268921") + std = float("0.00396934") + data = None + + +class Program_weight_tensor_parameter_499: + name = "parameter_499" + shape = [192] + dtype = "float32" + min_val = float("-1.21219") + max_val = float("0.446681") + mean = float("-0.232278") + std = float("0.339349") + data = None + + +class Program_weight_tensor_parameter_500: + name = "parameter_500" + shape = [192] + dtype = "float32" + min_val = float("-9.82711e-05") + max_val = float("0.677789") + mean = float("0.192032") + std = float("0.120727") + data = None + + +class Program_weight_tensor_parameter_501: + name = "parameter_501" + shape = [192] + dtype = "float32" + min_val = float("2.25949e-10") + max_val = float("0.000865962") + mean = float("0.000239023") + std = float("0.000143426") + data = None + + +class Program_weight_tensor_parameter_502: + name = "parameter_502" + shape = [192] + dtype = "float32" + min_val = float("-0.0494718") + max_val = float("0.0374457") + mean = float("0.00677273") + std = float("0.0117019") + data = None + + +class Program_weight_tensor_parameter_503: + name = "parameter_503" + shape = [192, 192, 1, 1] + dtype = "float32" + min_val = float("-0.0342199") + max_val = float("0.0396943") + mean = float("-0.000272099") + std = float("0.00329482") + data = None + + +class Program_weight_tensor_parameter_504: + name = "parameter_504" + shape = [192] + dtype = "float32" + min_val = float("-1.21223") + max_val = float("0.447751") + mean = float("-0.232181") + std = float("0.33961") + data = None + + +class Program_weight_tensor_parameter_505: + name = "parameter_505" + shape = [192] + dtype = "float32" + min_val = float("0.382831") + max_val = float("1.56386") + mean = float("0.852099") + std = float("0.259991") + data = None + + +class Program_weight_tensor_parameter_506: + name = "parameter_506" + shape = [192] + dtype = "float32" + min_val = float("0.00222243") + max_val = float("0.013094") + mean = float("0.00622857") + std = float("0.00178192") + data = None + + +class Program_weight_tensor_parameter_507: + name = "parameter_507" + shape = [192] + dtype = "float32" + min_val = float("-0.0846332") + max_val = float("0.142415") + mean = float("0.0388704") + std = float("0.03792") + data = None + + +class Program_weight_tensor_parameter_508: + name = "parameter_508" + shape = [192, 192, 3, 3] + dtype = "float32" + min_val = float("-0.0323048") + max_val = float("0.0364338") + mean = float("-0.000186547") + std = float("0.00363857") + data = None + + +class Program_weight_tensor_parameter_509: + name = "parameter_509" + shape = [192] + dtype = "float32" + min_val = float("-2.48701") + max_val = float("-0.131293") + mean = float("-1.25014") + std = float("0.418255") + data = None + + +class Program_weight_tensor_parameter_510: + name = "parameter_510" + shape = [192] + dtype = "float32" + min_val = float("0.689678") + max_val = float("1.5199") + mean = float("1.12491") + std = float("0.13482") + data = None + + +class Program_weight_tensor_parameter_511: + name = "parameter_511" + shape = [192] + dtype = "float32" + min_val = float("0.0185507") + max_val = float("0.061415") + mean = float("0.0351899") + std = float("0.00879212") + data = None + + +class Program_weight_tensor_parameter_512: + name = "parameter_512" + shape = [192] + dtype = "float32" + min_val = float("-0.717377") + max_val = float("0.320847") + mean = float("-0.0746543") + std = float("0.131126") + data = None + + +class Program_weight_tensor_parameter_513: + name = "parameter_513" + shape = [192, 192, 3, 3] + dtype = "float32" + min_val = float("-0.0610342") + max_val = float("0.0592016") + mean = float("-0.000277763") + std = float("0.00397261") + data = None + + +class Program_weight_tensor_parameter_514: + name = "parameter_514" + shape = [192] + dtype = "float32" + min_val = float("-1.21753") + max_val = float("0.499396") + mean = float("-0.167678") + std = float("0.2936") + data = None + + +class Program_weight_tensor_parameter_515: + name = "parameter_515" + shape = [192] + dtype = "float32" + min_val = float("0.00836385") + max_val = float("1.53625") + mean = float("0.238111") + std = float("0.211728") + data = None + + +class Program_weight_tensor_parameter_516: + name = "parameter_516" + shape = [192] + dtype = "float32" + min_val = float("1.9816e-05") + max_val = float("0.00693944") + mean = float("0.000506424") + std = float("0.00066743") + data = None + + +class Program_weight_tensor_parameter_517: + name = "parameter_517" + shape = [192] + dtype = "float32" + min_val = float("-0.0656722") + max_val = float("0.0862214") + mean = float("0.00951742") + std = float("0.0164341") + data = None + + +class Program_weight_tensor_parameter_518: + name = "parameter_518" + shape = [192, 192, 1, 1] + dtype = "float32" + min_val = float("-0.0600528") + max_val = float("0.0312537") + mean = float("-0.000425532") + std = float("0.00397123") + data = None + + +class Program_weight_tensor_parameter_519: + name = "parameter_519" + shape = [192] + dtype = "float32" + min_val = float("-1.21747") + max_val = float("0.500448") + mean = float("-0.167516") + std = float("0.293818") + data = None + + +class Program_weight_tensor_parameter_520: + name = "parameter_520" + shape = [192] + dtype = "float32" + min_val = float("0.354999") + max_val = float("1.44989") + mean = float("0.756941") + std = float("0.21662") + data = None + + +class Program_weight_tensor_parameter_521: + name = "parameter_521" + shape = [192] + dtype = "float32" + min_val = float("0.00437457") + max_val = float("0.0169983") + mean = float("0.00911743") + std = float("0.0026827") + data = None + + +class Program_weight_tensor_parameter_522: + name = "parameter_522" + shape = [192] + dtype = "float32" + min_val = float("-0.159743") + max_val = float("0.154142") + mean = float("0.0493949") + std = float("0.0451797") + data = None + + +class Program_weight_tensor_parameter_523: + name = "parameter_523" + shape = [192, 192, 3, 3] + dtype = "float32" + min_val = float("-0.062497") + max_val = float("0.0530577") + mean = float("-0.000241352") + std = float("0.00357809") + data = None + + +class Program_weight_tensor_parameter_524: + name = "parameter_524" + shape = [192] + dtype = "float32" + min_val = float("-1.87905") + max_val = float("-0.211382") + mean = float("-1.14643") + std = float("0.325653") + data = None + + +class Program_weight_tensor_parameter_525: + name = "parameter_525" + shape = [192] + dtype = "float32" + min_val = float("0.788784") + max_val = float("1.59753") + mean = float("1.12152") + std = float("0.12987") + data = None + + +class Program_weight_tensor_parameter_526: + name = "parameter_526" + shape = [192] + dtype = "float32" + min_val = float("0.0159247") + max_val = float("0.0763614") + mean = float("0.0315734") + std = float("0.00929052") + data = None + + +class Program_weight_tensor_parameter_527: + name = "parameter_527" + shape = [192] + dtype = "float32" + min_val = float("-0.690131") + max_val = float("0.284936") + mean = float("-0.0667142") + std = float("0.130814") + data = None + + +class Program_weight_tensor_parameter_528: + name = "parameter_528" + shape = [192, 192, 3, 3] + dtype = "float32" + min_val = float("-0.062874") + max_val = float("0.076648") + mean = float("-0.000213471") + std = float("0.00383126") + data = None + + +class Program_weight_tensor_parameter_529: + name = "parameter_529" + shape = [192] + dtype = "float32" + min_val = float("-2.86217") + max_val = float("1.58057") + mean = float("-0.0275412") + std = float("0.747651") + data = None + + +class Program_weight_tensor_parameter_530: + name = "parameter_530" + shape = [192] + dtype = "float32" + min_val = float("0.487672") + max_val = float("2.0776") + mean = float("0.90163") + std = float("0.232007") + data = None + + +class Program_weight_tensor_parameter_531: + name = "parameter_531" + shape = [192] + dtype = "float32" + min_val = float("0.00962562") + max_val = float("0.0593409") + mean = float("0.0232174") + std = float("0.00900384") + data = None + + +class Program_weight_tensor_parameter_532: + name = "parameter_532" + shape = [192] + dtype = "float32" + min_val = float("-0.230196") + max_val = float("0.297365") + mean = float("-0.0377198") + std = float("0.0596344") + data = None + + +class Program_weight_tensor_parameter_533: + name = "parameter_533" + shape = [192, 384, 1, 1] + dtype = "float32" + min_val = float("-0.108831") + max_val = float("0.0931739") + mean = float("-0.000512323") + std = float("0.00842399") + data = None + + +class Program_weight_tensor_parameter_534: + name = "parameter_534" + shape = [192] + dtype = "float32" + min_val = float("-2.96764") + max_val = float("1.66844") + mean = float("0.0968476") + std = float("0.663233") + data = None + + +class Program_weight_tensor_parameter_535: + name = "parameter_535" + shape = [192] + dtype = "float32" + min_val = float("0.830791") + max_val = float("5.55835") + mean = float("1.91342") + std = float("0.933379") + data = None + + +class Program_weight_tensor_parameter_536: + name = "parameter_536" + shape = [192] + dtype = "float32" + min_val = float("0.00601536") + max_val = float("0.0460481") + mean = float("0.0175059") + std = float("0.00564168") + data = None + + +class Program_weight_tensor_parameter_537: + name = "parameter_537" + shape = [192] + dtype = "float32" + min_val = float("-0.133093") + max_val = float("0.157686") + mean = float("-0.0238439") + std = float("0.0565348") + data = None + + +class Program_weight_tensor_parameter_538: + name = "parameter_538" + shape = [192, 384, 1, 1] + dtype = "float32" + min_val = float("-0.0985625") + max_val = float("0.0941202") + mean = float("-0.000511784") + std = float("0.00783691") + data = None + + +class Program_weight_tensor_parameter_539: + name = "parameter_539" + shape = [384] + dtype = "float32" + min_val = float("-2.92359") + max_val = float("1.32666") + mean = float("-0.301116") + std = float("0.563662") + data = None + + +class Program_weight_tensor_parameter_540: + name = "parameter_540" + shape = [384] + dtype = "float32" + min_val = float("0.631853") + max_val = float("2.47541") + mean = float("1.15998") + std = float("0.257348") + data = None + + +class Program_weight_tensor_parameter_541: + name = "parameter_541" + shape = [384] + dtype = "float32" + min_val = float("0.0103628") + max_val = float("0.113663") + mean = float("0.0263639") + std = float("0.0126689") + data = None + + +class Program_weight_tensor_parameter_542: + name = "parameter_542" + shape = [384] + dtype = "float32" + min_val = float("-0.269684") + max_val = float("0.245058") + mean = float("0.022821") + std = float("0.0693499") + data = None + + +class Program_weight_tensor_parameter_543: + name = "parameter_543" + shape = [384, 256, 3, 3] + dtype = "float32" + min_val = float("-0.0753194") + max_val = float("0.0720032") + mean = float("-0.000103466") + std = float("0.00421781") + data = None + + +class Program_weight_tensor_parameter_544: + name = "parameter_544" + shape = [256] + dtype = "float32" + min_val = float("-2.04502") + max_val = float("1.28816") + mean = float("-0.924614") + std = float("0.543015") + data = None + + +class Program_weight_tensor_parameter_545: + name = "parameter_545" + shape = [256] + dtype = "float32" + min_val = float("0.517239") + max_val = float("1.68961") + mean = float("1.05432") + std = float("0.176149") + data = None + + +class Program_weight_tensor_parameter_546: + name = "parameter_546" + shape = [256] + dtype = "float32" + min_val = float("0.00196874") + max_val = float("0.02692") + mean = float("0.00629128") + std = float("0.00300317") + data = None + + +class Program_weight_tensor_parameter_547: + name = "parameter_547" + shape = [256] + dtype = "float32" + min_val = float("-0.230499") + max_val = float("0.154945") + mean = float("-0.0516552") + std = float("0.0688298") + data = None + + +class Program_weight_tensor_parameter_548: + name = "parameter_548" + shape = [256, 192, 1, 1] + dtype = "float32" + min_val = float("-0.206154") + max_val = float("0.170783") + mean = float("-0.000884197") + std = float("0.0145162") + data = None + + +class Program_weight_tensor_parameter_549: + name = "parameter_549" + shape = [192] + dtype = "float32" + min_val = float("-0.0139357") + max_val = float("0.00388361") + mean = float("-0.00495662") + std = float("0.00371291") + data = None + + +class Program_weight_tensor_parameter_550: + name = "parameter_550" + shape = [192, 192, 1, 1] + dtype = "float32" + min_val = float("-0.347135") + max_val = float("0.228777") + mean = float("-0.00389388") + std = float("0.0106293") + data = None + + +class Program_weight_tensor_parameter_551: + name = "parameter_551" + shape = [96] + dtype = "float32" + min_val = float("-1.91355") + max_val = float("0.53303") + mean = float("-0.208939") + std = float("0.434311") + data = None + + +class Program_weight_tensor_parameter_552: + name = "parameter_552" + shape = [96] + dtype = "float32" + min_val = float("0.142427") + max_val = float("3.22988") + mean = float("0.635833") + std = float("0.668487") + data = None + + +class Program_weight_tensor_parameter_553: + name = "parameter_553" + shape = [96] + dtype = "float32" + min_val = float("7.75639e-05") + max_val = float("0.00243254") + mean = float("0.000585507") + std = float("0.000428968") + data = None + + +class Program_weight_tensor_parameter_554: + name = "parameter_554" + shape = [96] + dtype = "float32" + min_val = float("-0.0546921") + max_val = float("0.0598506") + mean = float("0.0051419") + std = float("0.0215625") + data = None + + +class Program_weight_tensor_parameter_555: + name = "parameter_555" + shape = [96, 96, 1, 1] + dtype = "float32" + min_val = float("-0.0500852") + max_val = float("0.0932317") + mean = float("-0.000561284") + std = float("0.00794853") + data = None + + +class Program_weight_tensor_parameter_556: + name = "parameter_556" + shape = [96] + dtype = "float32" + min_val = float("-1.91314") + max_val = float("0.534306") + mean = float("-0.208596") + std = float("0.434435") + data = None + + +class Program_weight_tensor_parameter_557: + name = "parameter_557" + shape = [96] + dtype = "float32" + min_val = float("0.343774") + max_val = float("5.47118") + mean = float("1.08565") + std = float("0.88383") + data = None + + +class Program_weight_tensor_parameter_558: + name = "parameter_558" + shape = [96] + dtype = "float32" + min_val = float("0.000976934") + max_val = float("0.0156948") + mean = float("0.0053425") + std = float("0.00272306") + data = None + + +class Program_weight_tensor_parameter_559: + name = "parameter_559" + shape = [96] + dtype = "float32" + min_val = float("-0.137727") + max_val = float("0.212796") + mean = float("0.0123751") + std = float("0.0612578") + data = None + + +class Program_weight_tensor_parameter_560: + name = "parameter_560" + shape = [96, 96, 3, 3] + dtype = "float32" + min_val = float("-0.0398886") + max_val = float("0.0746673") + mean = float("-0.000229692") + std = float("0.00588155") + data = None + + +class Program_weight_tensor_parameter_561: + name = "parameter_561" + shape = [96] + dtype = "float32" + min_val = float("-2.46605") + max_val = float("-0.0202143") + mean = float("-1.22676") + std = float("0.443304") + data = None + + +class Program_weight_tensor_parameter_562: + name = "parameter_562" + shape = [96] + dtype = "float32" + min_val = float("0.542082") + max_val = float("1.6433") + mean = float("0.945634") + std = float("0.172529") + data = None + + +class Program_weight_tensor_parameter_563: + name = "parameter_563" + shape = [96] + dtype = "float32" + min_val = float("0.0406212") + max_val = float("0.236841") + mean = float("0.0868745") + std = float("0.0368113") + data = None + + +class Program_weight_tensor_parameter_564: + name = "parameter_564" + shape = [96] + dtype = "float32" + min_val = float("-2.80804") + max_val = float("1.61985") + mean = float("-0.194669") + std = float("0.469655") + data = None + + +class Program_weight_tensor_parameter_565: + name = "parameter_565" + shape = [96, 96, 3, 3] + dtype = "float32" + min_val = float("-0.150203") + max_val = float("0.114223") + mean = float("-0.000376735") + std = float("0.00724688") + data = None + + +class Program_weight_tensor_parameter_566: + name = "parameter_566" + shape = [96] + dtype = "float32" + min_val = float("-1.38826") + max_val = float("0.562406") + mean = float("-0.132909") + std = float("0.347394") + data = None + + +class Program_weight_tensor_parameter_567: + name = "parameter_567" + shape = [96] + dtype = "float32" + min_val = float("0.0453402") + max_val = float("1.86504") + mean = float("0.460875") + std = float("0.366369") + data = None + + +class Program_weight_tensor_parameter_568: + name = "parameter_568" + shape = [96] + dtype = "float32" + min_val = float("7.68974e-05") + max_val = float("0.00276882") + mean = float("0.000760156") + std = float("0.000616821") + data = None + + +class Program_weight_tensor_parameter_569: + name = "parameter_569" + shape = [96] + dtype = "float32" + min_val = float("-0.0484682") + max_val = float("0.0463877") + mean = float("0.00677392") + std = float("0.017635") + data = None + + +class Program_weight_tensor_parameter_570: + name = "parameter_570" + shape = [96, 96, 1, 1] + dtype = "float32" + min_val = float("-0.0483138") + max_val = float("0.0415922") + mean = float("-0.000498568") + std = float("0.00710731") + data = None + + +class Program_weight_tensor_parameter_571: + name = "parameter_571" + shape = [96] + dtype = "float32" + min_val = float("-1.38834") + max_val = float("0.5648") + mean = float("-0.13256") + std = float("0.347894") + data = None + + +class Program_weight_tensor_parameter_572: + name = "parameter_572" + shape = [96] + dtype = "float32" + min_val = float("0.370504") + max_val = float("2.32822") + mean = float("0.901933") + std = float("0.426522") + data = None + + +class Program_weight_tensor_parameter_573: + name = "parameter_573" + shape = [96] + dtype = "float32" + min_val = float("0.00320483") + max_val = float("0.0242439") + mean = float("0.00920914") + std = float("0.00476152") + data = None + + +class Program_weight_tensor_parameter_574: + name = "parameter_574" + shape = [96] + dtype = "float32" + min_val = float("-0.0963095") + max_val = float("0.121293") + mean = float("0.0354751") + std = float("0.0431439") + data = None + + +class Program_weight_tensor_parameter_575: + name = "parameter_575" + shape = [96, 96, 3, 3] + dtype = "float32" + min_val = float("-0.058655") + max_val = float("0.0591114") + mean = float("-0.000356621") + std = float("0.0059174") + data = None + + +class Program_weight_tensor_parameter_576: + name = "parameter_576" + shape = [96] + dtype = "float32" + min_val = float("-3.31955") + max_val = float("0.36603") + mean = float("-1.17895") + std = float("0.556023") + data = None + + +class Program_weight_tensor_parameter_577: + name = "parameter_577" + shape = [96] + dtype = "float32" + min_val = float("0.473098") + max_val = float("1.98183") + mean = float("1.03911") + std = float("0.238708") + data = None + + +class Program_weight_tensor_parameter_578: + name = "parameter_578" + shape = [96] + dtype = "float32" + min_val = float("0.0285476") + max_val = float("0.145477") + mean = float("0.0548077") + std = float("0.0170506") + data = None + + +class Program_weight_tensor_parameter_579: + name = "parameter_579" + shape = [96] + dtype = "float32" + min_val = float("-1.25068") + max_val = float("0.505193") + mean = float("-0.0605176") + std = float("0.268142") + data = None + + +class Program_weight_tensor_parameter_580: + name = "parameter_580" + shape = [96, 96, 3, 3] + dtype = "float32" + min_val = float("-0.147666") + max_val = float("0.152112") + mean = float("-0.000410438") + std = float("0.00711818") + data = None + + +class Program_weight_tensor_parameter_581: + name = "parameter_581" + shape = [96] + dtype = "float32" + min_val = float("-1.24956") + max_val = float("0.58267") + mean = float("-0.109749") + std = float("0.291966") + data = None + + +class Program_weight_tensor_parameter_582: + name = "parameter_582" + shape = [96] + dtype = "float32" + min_val = float("0.0243293") + max_val = float("1.27785") + mean = float("0.324816") + std = float("0.192866") + data = None + + +class Program_weight_tensor_parameter_583: + name = "parameter_583" + shape = [96] + dtype = "float32" + min_val = float("6.31792e-05") + max_val = float("0.00359895") + mean = float("0.000713188") + std = float("0.000575582") + data = None + + +class Program_weight_tensor_parameter_584: + name = "parameter_584" + shape = [96] + dtype = "float32" + min_val = float("-0.0383061") + max_val = float("0.050179") + mean = float("0.00405305") + std = float("0.016189") + data = None + + +class Program_weight_tensor_parameter_585: + name = "parameter_585" + shape = [96, 96, 1, 1] + dtype = "float32" + min_val = float("-0.0448708") + max_val = float("0.0573038") + mean = float("-0.000336044") + std = float("0.00726838") + data = None + + +class Program_weight_tensor_parameter_586: + name = "parameter_586" + shape = [96] + dtype = "float32" + min_val = float("-1.24942") + max_val = float("0.584539") + mean = float("-0.109552") + std = float("0.292478") + data = None + + +class Program_weight_tensor_parameter_587: + name = "parameter_587" + shape = [96] + dtype = "float32" + min_val = float("0.315495") + max_val = float("1.67063") + mean = float("0.747087") + std = float("0.257847") + data = None + + +class Program_weight_tensor_parameter_588: + name = "parameter_588" + shape = [96] + dtype = "float32" + min_val = float("0.00339766") + max_val = float("0.0255152") + mean = float("0.0102502") + std = float("0.00411405") + data = None + + +class Program_weight_tensor_parameter_589: + name = "parameter_589" + shape = [96] + dtype = "float32" + min_val = float("-0.0545808") + max_val = float("0.144753") + mean = float("0.0274454") + std = float("0.0383073") + data = None + + +class Program_weight_tensor_parameter_590: + name = "parameter_590" + shape = [96, 96, 3, 3] + dtype = "float32" + min_val = float("-0.065253") + max_val = float("0.0583777") + mean = float("-0.000331097") + std = float("0.00602268") + data = None + + +class Program_weight_tensor_parameter_591: + name = "parameter_591" + shape = [96] + dtype = "float32" + min_val = float("-3.58296") + max_val = float("0.290726") + mean = float("-1.12856") + std = float("0.572409") + data = None + + +class Program_weight_tensor_parameter_592: + name = "parameter_592" + shape = [96] + dtype = "float32" + min_val = float("0.511106") + max_val = float("2.19165") + mean = float("1.05198") + std = float("0.238255") + data = None + + +class Program_weight_tensor_parameter_593: + name = "parameter_593" + shape = [96] + dtype = "float32" + min_val = float("0.0202502") + max_val = float("0.0763383") + mean = float("0.0399884") + std = float("0.00963234") + data = None + + +class Program_weight_tensor_parameter_594: + name = "parameter_594" + shape = [96] + dtype = "float32" + min_val = float("-0.823777") + max_val = float("0.397341") + mean = float("-0.0477408") + std = float("0.195386") + data = None + + +class Program_weight_tensor_parameter_595: + name = "parameter_595" + shape = [96, 96, 3, 3] + dtype = "float32" + min_val = float("-0.0973524") + max_val = float("0.130681") + mean = float("-0.000422376") + std = float("0.00719502") + data = None + + +class Program_weight_tensor_parameter_596: + name = "parameter_596" + shape = [96] + dtype = "float32" + min_val = float("-0.892064") + max_val = float("0.529384") + mean = float("-0.160709") + std = float("0.281574") + data = None + + +class Program_weight_tensor_parameter_597: + name = "parameter_597" + shape = [96] + dtype = "float32" + min_val = float("0.0191223") + max_val = float("1.40524") + mean = float("0.32501") + std = float("0.213327") + data = None + + +class Program_weight_tensor_parameter_598: + name = "parameter_598" + shape = [96] + dtype = "float32" + min_val = float("4.82579e-05") + max_val = float("0.00368813") + mean = float("0.000731321") + std = float("0.0005637") + data = None + + +class Program_weight_tensor_parameter_599: + name = "parameter_599" + shape = [96] + dtype = "float32" + min_val = float("-0.0327526") + max_val = float("0.0463647") + mean = float("0.00722598") + std = float("0.0145649") + data = None + + +class Program_weight_tensor_parameter_600: + name = "parameter_600" + shape = [96, 96, 1, 1] + dtype = "float32" + min_val = float("-0.0499906") + max_val = float("0.0448114") + mean = float("-0.000606145") + std = float("0.00724394") + data = None + + +class Program_weight_tensor_parameter_601: + name = "parameter_601" + shape = [96] + dtype = "float32" + min_val = float("-0.891955") + max_val = float("0.530721") + mean = float("-0.160571") + std = float("0.281998") + data = None + + +class Program_weight_tensor_parameter_602: + name = "parameter_602" + shape = [96] + dtype = "float32" + min_val = float("0.17446") + max_val = float("1.78047") + mean = float("0.708571") + std = float("0.284378") + data = None + + +class Program_weight_tensor_parameter_603: + name = "parameter_603" + shape = [96] + dtype = "float32" + min_val = float("0.00236192") + max_val = float("0.0258909") + mean = float("0.0102105") + std = float("0.00395084") + data = None + + +class Program_weight_tensor_parameter_604: + name = "parameter_604" + shape = [96] + dtype = "float32" + min_val = float("-0.0582992") + max_val = float("0.137218") + mean = float("0.0409603") + std = float("0.0377027") + data = None + + +class Program_weight_tensor_parameter_605: + name = "parameter_605" + shape = [96, 96, 3, 3] + dtype = "float32" + min_val = float("-0.057305") + max_val = float("0.0650381") + mean = float("-0.000417143") + std = float("0.00601776") + data = None + + +class Program_weight_tensor_parameter_606: + name = "parameter_606" + shape = [96] + dtype = "float32" + min_val = float("-2.65777") + max_val = float("0.065358") + mean = float("-1.06432") + std = float("0.48826") + data = None + + +class Program_weight_tensor_parameter_607: + name = "parameter_607" + shape = [96] + dtype = "float32" + min_val = float("0.512951") + max_val = float("1.73806") + mean = float("1.01547") + std = float("0.193357") + data = None + + +class Program_weight_tensor_parameter_608: + name = "parameter_608" + shape = [96] + dtype = "float32" + min_val = float("0.0176905") + max_val = float("0.0567785") + mean = float("0.0307593") + std = float("0.00710222") + data = None + + +class Program_weight_tensor_parameter_609: + name = "parameter_609" + shape = [96] + dtype = "float32" + min_val = float("-0.762613") + max_val = float("0.609475") + mean = float("-0.0648606") + std = float("0.194567") + data = None + + +class Program_weight_tensor_parameter_610: + name = "parameter_610" + shape = [96, 96, 3, 3] + dtype = "float32" + min_val = float("-0.0738037") + max_val = float("0.125248") + mean = float("-0.000426247") + std = float("0.0069708") + data = None + + +class Program_weight_tensor_parameter_611: + name = "parameter_611" + shape = [96] + dtype = "float32" + min_val = float("-0.978262") + max_val = float("0.489992") + mean = float("-0.136691") + std = float("0.278636") + data = None + + +class Program_weight_tensor_parameter_612: + name = "parameter_612" + shape = [96] + dtype = "float32" + min_val = float("0.0498074") + max_val = float("1.1462") + mean = float("0.296075") + std = float("0.172323") + data = None + + +class Program_weight_tensor_parameter_613: + name = "parameter_613" + shape = [96] + dtype = "float32" + min_val = float("0.000185263") + max_val = float("0.00518845") + mean = float("0.00108541") + std = float("0.000730374") + data = None + + +class Program_weight_tensor_parameter_614: + name = "parameter_614" + shape = [96] + dtype = "float32" + min_val = float("-0.041706") + max_val = float("0.0562903") + mean = float("0.00553756") + std = float("0.017998") + data = None + + +class Program_weight_tensor_parameter_615: + name = "parameter_615" + shape = [96, 96, 1, 1] + dtype = "float32" + min_val = float("-0.0731207") + max_val = float("0.0763792") + mean = float("-0.000594618") + std = float("0.00825765") + data = None + + +class Program_weight_tensor_parameter_616: + name = "parameter_616" + shape = [96] + dtype = "float32" + min_val = float("-0.978083") + max_val = float("0.492448") + mean = float("-0.136655") + std = float("0.279122") + data = None + + +class Program_weight_tensor_parameter_617: + name = "parameter_617" + shape = [96] + dtype = "float32" + min_val = float("0.236133") + max_val = float("1.69671") + mean = float("0.603953") + std = float("0.228164") + data = None + + +class Program_weight_tensor_parameter_618: + name = "parameter_618" + shape = [96] + dtype = "float32" + min_val = float("0.00623834") + max_val = float("0.0304043") + mean = float("0.0139144") + std = float("0.00496797") + data = None + + +class Program_weight_tensor_parameter_619: + name = "parameter_619" + shape = [96] + dtype = "float32" + min_val = float("-0.0709982") + max_val = float("0.13525") + mean = float("0.0270257") + std = float("0.0460507") + data = None + + +class Program_weight_tensor_parameter_620: + name = "parameter_620" + shape = [96, 96, 3, 3] + dtype = "float32" + min_val = float("-0.0654835") + max_val = float("0.0522648") + mean = float("-0.00036204") + std = float("0.0060426") + data = None + + +class Program_weight_tensor_parameter_621: + name = "parameter_621" + shape = [96] + dtype = "float32" + min_val = float("-3.46434") + max_val = float("0.199609") + mean = float("-1.00527") + std = float("0.548081") + data = None + + +class Program_weight_tensor_parameter_622: + name = "parameter_622" + shape = [96] + dtype = "float32" + min_val = float("0.686506") + max_val = float("2.51291") + mean = float("1.07427") + std = float("0.212412") + data = None + + +class Program_weight_tensor_parameter_623: + name = "parameter_623" + shape = [96] + dtype = "float32" + min_val = float("0.0132607") + max_val = float("0.0547669") + mean = float("0.0263345") + std = float("0.00858541") + data = None + + +class Program_weight_tensor_parameter_624: + name = "parameter_624" + shape = [96] + dtype = "float32" + min_val = float("-0.483153") + max_val = float("0.528087") + mean = float("-0.0517666") + std = float("0.193156") + data = None + + +class Program_weight_tensor_parameter_625: + name = "parameter_625" + shape = [96, 96, 3, 3] + dtype = "float32" + min_val = float("-0.0824841") + max_val = float("0.0934753") + mean = float("-0.000357672") + std = float("0.00712731") + data = None + + +class Program_weight_tensor_parameter_626: + name = "parameter_626" + shape = [96] + dtype = "float32" + min_val = float("-0.625302") + max_val = float("0.449836") + mean = float("-0.0825559") + std = float("0.256738") + data = None + + +class Program_weight_tensor_parameter_627: + name = "parameter_627" + shape = [96] + dtype = "float32" + min_val = float("0.0910018") + max_val = float("1.30085") + mean = float("0.309049") + std = float("0.196412") + data = None + + +class Program_weight_tensor_parameter_628: + name = "parameter_628" + shape = [96] + dtype = "float32" + min_val = float("0.000380277") + max_val = float("0.0176497") + mean = float("0.00357657") + std = float("0.00282864") + data = None + + +class Program_weight_tensor_parameter_629: + name = "parameter_629" + shape = [96] + dtype = "float32" + min_val = float("-0.035932") + max_val = float("0.0300925") + mean = float("-5.114e-05") + std = float("0.0106361") + data = None + + +class Program_weight_tensor_parameter_630: + name = "parameter_630" + shape = [96, 96, 1, 1] + dtype = "float32" + min_val = float("-0.0925016") + max_val = float("0.0753255") + mean = float("-0.00105853") + std = float("0.00936655") + data = None + + +class Program_weight_tensor_parameter_631: + name = "parameter_631" + shape = [96] + dtype = "float32" + min_val = float("-0.625183") + max_val = float("0.450937") + mean = float("-0.082575") + std = float("0.257081") + data = None + + +class Program_weight_tensor_parameter_632: + name = "parameter_632" + shape = [96] + dtype = "float32" + min_val = float("0.210658") + max_val = float("1.42703") + mean = float("0.527208") + std = float("0.258269") + data = None + + +class Program_weight_tensor_parameter_633: + name = "parameter_633" + shape = [96] + dtype = "float32" + min_val = float("0.0103464") + max_val = float("0.0958287") + mean = float("0.0339795") + std = float("0.0172322") + data = None + + +class Program_weight_tensor_parameter_634: + name = "parameter_634" + shape = [96] + dtype = "float32" + min_val = float("-0.108648") + max_val = float("0.0906186") + mean = float("-0.00832054") + std = float("0.0379588") + data = None + + +class Program_weight_tensor_parameter_635: + name = "parameter_635" + shape = [96, 96, 3, 3] + dtype = "float32" + min_val = float("-0.0885375") + max_val = float("0.0525934") + mean = float("-0.000466484") + std = float("0.00584459") + data = None + + +class Program_weight_tensor_parameter_636: + name = "parameter_636" + shape = [96] + dtype = "float32" + min_val = float("-2.40893") + max_val = float("0.508421") + mean = float("-0.828862") + std = float("0.467337") + data = None + + +class Program_weight_tensor_parameter_637: + name = "parameter_637" + shape = [96] + dtype = "float32" + min_val = float("0.853968") + max_val = float("2.18309") + mean = float("1.27545") + std = float("0.208741") + data = None + + +class Program_weight_tensor_parameter_638: + name = "parameter_638" + shape = [96] + dtype = "float32" + min_val = float("0.0115478") + max_val = float("0.0463068") + mean = float("0.0216413") + std = float("0.00767502") + data = None + + +class Program_weight_tensor_parameter_639: + name = "parameter_639" + shape = [96] + dtype = "float32" + min_val = float("-0.571223") + max_val = float("0.473029") + mean = float("-0.053671") + std = float("0.173924") + data = None + + +class Program_weight_tensor_parameter_640: + name = "parameter_640" + shape = [96, 96, 3, 3] + dtype = "float32" + min_val = float("-0.15411") + max_val = float("0.150524") + mean = float("-0.000241604") + std = float("0.00722176") + data = None + + +class Program_weight_tensor_parameter_641: + name = "parameter_641" + shape = [96] + dtype = "float32" + min_val = float("-3.16609") + max_val = float("1.88989") + mean = float("0.501666") + std = float("0.861493") + data = None + + +class Program_weight_tensor_parameter_642: + name = "parameter_642" + shape = [96] + dtype = "float32" + min_val = float("0.214988") + max_val = float("2.6299") + mean = float("0.562885") + std = float("0.31708") + data = None + + +class Program_weight_tensor_parameter_643: + name = "parameter_643" + shape = [96] + dtype = "float32" + min_val = float("0.00741665") + max_val = float("0.158098") + mean = float("0.0323603") + std = float("0.0239189") + data = None + + +class Program_weight_tensor_parameter_644: + name = "parameter_644" + shape = [96] + dtype = "float32" + min_val = float("-0.27197") + max_val = float("0.329568") + mean = float("-0.0148612") + std = float("0.0939931") + data = None + + +class Program_weight_tensor_parameter_645: + name = "parameter_645" + shape = [96, 192, 1, 1] + dtype = "float32" + min_val = float("-0.186901") + max_val = float("0.225419") + mean = float("-0.000291508") + std = float("0.0156297") + data = None + + +class Program_weight_tensor_parameter_646: + name = "parameter_646" + shape = [96] + dtype = "float32" + min_val = float("-4.92284") + max_val = float("1.57998") + mean = float("0.384603") + std = float("1.04888") + data = None + + +class Program_weight_tensor_parameter_647: + name = "parameter_647" + shape = [96] + dtype = "float32" + min_val = float("0.414126") + max_val = float("6.78093") + mean = float("1.69449") + std = float("1.30795") + data = None + + +class Program_weight_tensor_parameter_648: + name = "parameter_648" + shape = [96] + dtype = "float32" + min_val = float("0.00527536") + max_val = float("0.274604") + mean = float("0.0382764") + std = float("0.0355398") + data = None + + +class Program_weight_tensor_parameter_649: + name = "parameter_649" + shape = [96] + dtype = "float32" + min_val = float("-0.171845") + max_val = float("0.443762") + mean = float("0.0466766") + std = float("0.0965758") + data = None + + +class Program_weight_tensor_parameter_650: + name = "parameter_650" + shape = [96, 192, 1, 1] + dtype = "float32" + min_val = float("-0.116975") + max_val = float("0.156029") + mean = float("0.000440768") + std = float("0.0149691") + data = None + + +class Program_weight_tensor_parameter_651: + name = "parameter_651" + shape = [192] + dtype = "float32" + min_val = float("-2.27475") + max_val = float("1.75104") + mean = float("-0.126037") + std = float("0.740702") + data = None + + +class Program_weight_tensor_parameter_652: + name = "parameter_652" + shape = [192] + dtype = "float32" + min_val = float("0.632268") + max_val = float("2.97322") + mean = float("1.08733") + std = float("0.283408") + data = None + + +class Program_weight_tensor_parameter_653: + name = "parameter_653" + shape = [192] + dtype = "float32" + min_val = float("0.0110312") + max_val = float("0.234931") + mean = float("0.0439587") + std = float("0.0319644") + data = None + + +class Program_weight_tensor_parameter_654: + name = "parameter_654" + shape = [192] + dtype = "float32" + min_val = float("-0.578422") + max_val = float("0.269069") + mean = float("-0.0941015") + std = float("0.118583") + data = None + + +class Program_weight_tensor_parameter_655: + name = "parameter_655" + shape = [192, 128, 3, 3] + dtype = "float32" + min_val = float("-0.0856428") + max_val = float("0.123627") + mean = float("-0.000225745") + std = float("0.00765725") + data = None + + +class Program_weight_tensor_parameter_656: + name = "parameter_656" + shape = [128] + dtype = "float32" + min_val = float("-2.81597") + max_val = float("1.9636") + mean = float("-0.71259") + std = float("0.647835") + data = None + + +class Program_weight_tensor_parameter_657: + name = "parameter_657" + shape = [128] + dtype = "float32" + min_val = float("0.311227") + max_val = float("2.8783") + mean = float("1.01845") + std = float("0.278722") + data = None + + +class Program_weight_tensor_parameter_658: + name = "parameter_658" + shape = [128] + dtype = "float32" + min_val = float("0.000859604") + max_val = float("0.015639") + mean = float("0.00453842") + std = float("0.00230768") + data = None + + +class Program_weight_tensor_parameter_659: + name = "parameter_659" + shape = [128] + dtype = "float32" + min_val = float("-0.237838") + max_val = float("0.261934") + mean = float("0.00314431") + std = float("0.0867318") + data = None + + +class Program_weight_tensor_parameter_660: + name = "parameter_660" + shape = [128, 96, 1, 1] + dtype = "float32" + min_val = float("-0.171773") + max_val = float("0.211127") + mean = float("-0.00142636") + std = float("0.0224525") + data = None + + +class Program_weight_tensor_parameter_661: + name = "parameter_661" + shape = [96] + dtype = "float32" + min_val = float("-0.0180386") + max_val = float("3.78007e-05") + mean = float("-0.00735479") + std = float("0.00450801") + data = None + + +class Program_weight_tensor_parameter_662: + name = "parameter_662" + shape = [96, 96, 1, 1] + dtype = "float32" + min_val = float("-0.30281") + max_val = float("0.123007") + mean = float("-0.00790532") + std = float("0.0180213") + data = None + + +class Program_weight_tensor_parameter_663: + name = "parameter_663" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_664: + name = "parameter_664" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_665: + name = "parameter_665" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_666: + name = "parameter_666" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_667: + name = "parameter_667" + shape = [48, 48, 1, 1] + dtype = "float32" + min_val = float("-0.0501789") + max_val = float("0.0563261") + mean = float("-0.00170388") + std = float("0.0129798") + data = None + + +class Program_weight_tensor_parameter_668: + name = "parameter_668" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_669: + name = "parameter_669" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_670: + name = "parameter_670" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_671: + name = "parameter_671" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_672: + name = "parameter_672" + shape = [48, 48, 3, 3] + dtype = "float32" + min_val = float("-0.0578676") + max_val = float("0.0799749") + mean = float("-0.000509865") + std = float("0.0110281") + data = None + + +class Program_weight_tensor_parameter_673: + name = "parameter_673" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_674: + name = "parameter_674" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_675: + name = "parameter_675" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_676: + name = "parameter_676" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_677: + name = "parameter_677" + shape = [48, 48, 3, 3] + dtype = "float32" + min_val = float("-0.0925274") + max_val = float("0.0949158") + mean = float("-0.00064859") + std = float("0.0123667") + data = None + + +class Program_weight_tensor_parameter_678: + name = "parameter_678" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_679: + name = "parameter_679" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_680: + name = "parameter_680" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_681: + name = "parameter_681" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_682: + name = "parameter_682" + shape = [48, 48, 1, 1] + dtype = "float32" + min_val = float("-0.0727088") + max_val = float("0.0782992") + mean = float("-0.00102365") + std = float("0.0139349") + data = None + + +class Program_weight_tensor_parameter_683: + name = "parameter_683" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_684: + name = "parameter_684" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_685: + name = "parameter_685" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_686: + name = "parameter_686" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_687: + name = "parameter_687" + shape = [48, 48, 3, 3] + dtype = "float32" + min_val = float("-0.0621898") + max_val = float("0.0692526") + mean = float("-0.000822014") + std = float("0.0111057") + data = None + + +class Program_weight_tensor_parameter_688: + name = "parameter_688" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_689: + name = "parameter_689" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_690: + name = "parameter_690" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_691: + name = "parameter_691" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_692: + name = "parameter_692" + shape = [48, 48, 3, 3] + dtype = "float32" + min_val = float("-0.11162") + max_val = float("0.0943574") + mean = float("-0.000368661") + std = float("0.0125785") + data = None + + +class Program_weight_tensor_parameter_693: + name = "parameter_693" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_694: + name = "parameter_694" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_695: + name = "parameter_695" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_696: + name = "parameter_696" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_697: + name = "parameter_697" + shape = [48, 48, 1, 1] + dtype = "float32" + min_val = float("-0.0944494") + max_val = float("0.0702451") + mean = float("-0.00185301") + std = float("0.0172184") + data = None + + +class Program_weight_tensor_parameter_698: + name = "parameter_698" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_699: + name = "parameter_699" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_700: + name = "parameter_700" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_701: + name = "parameter_701" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_702: + name = "parameter_702" + shape = [48, 48, 3, 3] + dtype = "float32" + min_val = float("-0.0691644") + max_val = float("0.0974384") + mean = float("-0.000506655") + std = float("0.011691") + data = None + + +class Program_weight_tensor_parameter_703: + name = "parameter_703" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_704: + name = "parameter_704" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_705: + name = "parameter_705" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_706: + name = "parameter_706" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_707: + name = "parameter_707" + shape = [48, 48, 3, 3] + dtype = "float32" + min_val = float("-0.133213") + max_val = float("0.0905212") + mean = float("-0.000334254") + std = float("0.0134452") + data = None + + +class Program_weight_tensor_parameter_708: + name = "parameter_708" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_709: + name = "parameter_709" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_710: + name = "parameter_710" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_711: + name = "parameter_711" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_712: + name = "parameter_712" + shape = [48, 96, 1, 1] + dtype = "float32" + min_val = float("-0.17806") + max_val = float("0.14305") + mean = float("-0.00229242") + std = float("0.0246641") + data = None + + +class Program_weight_tensor_parameter_713: + name = "parameter_713" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_714: + name = "parameter_714" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_715: + name = "parameter_715" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_716: + name = "parameter_716" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_717: + name = "parameter_717" + shape = [48, 96, 1, 1] + dtype = "float32" + min_val = float("-0.135012") + max_val = float("0.178483") + mean = float("-0.0004429") + std = float("0.0226955") + data = None + + +class Program_weight_tensor_parameter_718: + name = "parameter_718" + shape = [96] + dtype = "float32" + min_val = float("-3.40701") + max_val = float("3.27538") + mean = float("0.329531") + std = float("1.14502") + data = None + + +class Program_weight_tensor_parameter_719: + name = "parameter_719" + shape = [96] + dtype = "float32" + min_val = float("0.865919") + max_val = float("4.91404") + mean = float("1.91603") + std = float("0.752783") + data = None + + +class Program_weight_tensor_parameter_720: + name = "parameter_720" + shape = [96] + dtype = "float32" + min_val = float("0.704881") + max_val = float("31.7293") + mean = float("2.73326") + std = float("3.48853") + data = None + + +class Program_weight_tensor_parameter_721: + name = "parameter_721" + shape = [96] + dtype = "float32" + min_val = float("-1.47461") + max_val = float("2.59735") + mean = float("-0.288555") + std = float("0.730674") + data = None + + +class Program_weight_tensor_parameter_722: + name = "parameter_722" + shape = [96, 64, 3, 3] + dtype = "float32" + min_val = float("-0.110689") + max_val = float("0.13859") + mean = float("-0.000360127") + std = float("0.0133189") + data = None + + +class Program_weight_tensor_parameter_723: + name = "parameter_723" + shape = [64] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_724: + name = "parameter_724" + shape = [64] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_725: + name = "parameter_725" + shape = [64] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_726: + name = "parameter_726" + shape = [64] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_727: + name = "parameter_727" + shape = [64, 32, 3, 3] + dtype = "float32" + min_val = float("-0.179264") + max_val = float("0.162144") + mean = float("-0.000679023") + std = float("0.020536") + data = None + + +class Program_weight_tensor_parameter_728: + name = "parameter_728" + shape = [32] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_729: + name = "parameter_729" + shape = [32] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_730: + name = "parameter_730" + shape = [32] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_731: + name = "parameter_731" + shape = [32] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_732: + name = "parameter_732" + shape = [32, 32, 3, 3] + dtype = "float32" + min_val = float("-0.347786") + max_val = float("0.218964") + mean = float("-0.000199571") + std = float("0.0261033") + data = None + + +class Program_weight_tensor_parameter_733: + name = "parameter_733" + shape = [32] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_734: + name = "parameter_734" + shape = [32] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_735: + name = "parameter_735" + shape = [32] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_736: + name = "parameter_736" + shape = [32] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_737: + name = "parameter_737" + shape = [32, 3, 3, 3] + dtype = "float32" + min_val = float("-0.317155") + max_val = float("0.280865") + mean = float("-0.00214957") + std = float("0.0702742") + data = None diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/subgraph_13/graph_hash.txt b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/subgraph_13/graph_hash.txt new file mode 100644 index 000000000..f7e774060 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/subgraph_13/graph_hash.txt @@ -0,0 +1 @@ +b3c06a5ff7d63f2fddf054f6dc5423b30986582d3ae6be2b7b5c8465e31f6fac \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/subgraph_13/graph_net.json b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/subgraph_13/graph_net.json new file mode 100644 index 000000000..8b4fccfd1 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/subgraph_13/graph_net.json @@ -0,0 +1,6 @@ +{ + "framework": "paddle", + "model_name": "PP-YOLOE_plus_SOD-L", + "num_devices_required": 1, + "num_nodes_required": 1 +} \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/subgraph_13/input_meta.py b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/subgraph_13/input_meta.py new file mode 100644 index 000000000..707a3ce59 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/subgraph_13/input_meta.py @@ -0,0 +1,28 @@ +class Program_weight_tensor_data_0: + name = "data_0" + shape = [2, 3549, 10] + dtype = "float32" + min_val = float("1.49258e-10") + max_val = float("0.896854") + mean = float("0.00647097") + std = float("0.0257209") + data = None + + +class Program_weight_tensor_data_1: + name = "data_1" + shape = [2, 3549] + dtype = "int32" + min_val = 0 + max_val = 10 + data = None + + +class Program_weight_tensor_data_2: + name = "data_2" + shape = [2, 3549, 10] + dtype = "float32" + max_val = float("0.980323") + mean = float("0.000730272") + std = float("0.020635") + data = None diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/subgraph_13/model.py b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/subgraph_13/model.py new file mode 100644 index 000000000..c085c8396 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/subgraph_13/model.py @@ -0,0 +1,110 @@ +import paddle + + +class GraphModule(paddle.nn.Layer): + def __init__(self): + super().__init__() + + def forward(self, data_0, data_1, data_2): + # pd_op.full: (1xi32) <- () + full_0 = paddle._C_ops.full( + [1], float("11"), paddle.int32, paddle.core.CPUPlace() + ) + + # pd_op.one_hot: (2x3549x11xf32) <- (2x3549xi32, 1xi32) + one_hot_0 = paddle._C_ops.one_hot( + data_1 % paddle.cast(full_0, data_1.dtype), full_0 + ) + del data_1, full_0 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_0 = [0] + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_1 = [-1] + + # pd_op.slice: (2x3549x10xf32) <- (2x3549x11xf32, 1xi64, 1xi64) + slice_0 = paddle._C_ops.slice( + one_hot_0, [2], full_int_array_0, full_int_array_1, [1], [] + ) + del full_int_array_0, full_int_array_1, one_hot_0 + + # pd_op.pow: (2x3549x10xf32) <- (2x3549x10xf32) + pow_0 = paddle._C_ops.pow(data_0, float("2")) + + # pd_op.full: (1xf32) <- () + full_1 = paddle._C_ops.full( + [1], float("0.75"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.scale: (2x3549x10xf32) <- (2x3549x10xf32, 1xf32) + scale_0 = paddle._C_ops.scale(pow_0, full_1, float("0"), True) + del pow_0 + + # pd_op.full: (1xf32) <- () + full_2 = paddle._C_ops.full( + [1], float("-1"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.scale: (2x3549x10xf32) <- (2x3549x10xf32, 1xf32) + scale_1 = paddle._C_ops.scale(slice_0, full_2, float("1"), True) + del full_2 + + # pd_op.multiply: (2x3549x10xf32) <- (2x3549x10xf32, 2x3549x10xf32) + multiply_0 = paddle._C_ops.multiply(scale_0, scale_1) + + # pd_op.multiply: (2x3549x10xf32) <- (2x3549x10xf32, 2x3549x10xf32) + multiply_1 = paddle._C_ops.multiply(data_2, slice_0) + del slice_0 + + # pd_op.add: (2x3549x10xf32) <- (2x3549x10xf32, 2x3549x10xf32) + add_0 = paddle._C_ops.add(multiply_0, multiply_1) + + # pd_op.bce_loss: (2x3549x10xf32) <- (2x3549x10xf32, 2x3549x10xf32) + bce_loss_0 = paddle._C_ops.bce_loss(data_0, data_2) + del data_0 + + # pd_op.multiply: (2x3549x10xf32) <- (2x3549x10xf32, 2x3549x10xf32) + multiply_2 = paddle._C_ops.multiply(bce_loss_0, add_0) + + # pd_op.full_int_array: (0xi64) <- () + full_int_array_2 = [] + + # pd_op.sum: (xf32) <- (2x3549x10xf32, 0xi64) + sum_0 = paddle._C_ops.sum(multiply_2, full_int_array_2, None, False) + + # pd_op.sum: (xf32) <- (2x3549x10xf32, 0xi64) + sum_1 = paddle._C_ops.sum(data_2, full_int_array_2, None, False) + del data_2 + + # pd_op.full: (1xf32) <- () + full_3 = paddle._C_ops.full( + [1], float("1"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.full: (1xf32) <- () + full_4 = paddle._C_ops.full( + [1], float("3.40282e+38"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.clip: (xf32) <- (xf32, 1xf32, 1xf32) + clip_0 = paddle._C_ops.clip(sum_1, full_3, full_4) + del full_3, full_4, sum_1 + + # pd_op.divide: (xf32) <- (xf32, xf32) + divide_0 = paddle._C_ops.divide(sum_0, clip_0) + del ( + add_0, + bce_loss_0, + clip_0, + full_1, + full_int_array_2, + multiply_0, + multiply_1, + multiply_2, + scale_0, + scale_1, + sum_0, + ) + + return divide_0 diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/subgraph_13/weight_meta.py b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/subgraph_13/weight_meta.py new file mode 100644 index 000000000..8b1378917 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/subgraph_13/weight_meta.py @@ -0,0 +1 @@ + diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/subgraph_14/graph_hash.txt b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/subgraph_14/graph_hash.txt new file mode 100644 index 000000000..a0378c0af --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/subgraph_14/graph_hash.txt @@ -0,0 +1 @@ +232d82e8296e7401dfecdd4d02b443354860807e4c42c73d06bee142cc219425 \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/subgraph_14/graph_net.json b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/subgraph_14/graph_net.json new file mode 100644 index 000000000..8b4fccfd1 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/subgraph_14/graph_net.json @@ -0,0 +1,6 @@ +{ + "framework": "paddle", + "model_name": "PP-YOLOE_plus_SOD-L", + "num_devices_required": 1, + "num_nodes_required": 1 +} \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/subgraph_14/input_meta.py b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/subgraph_14/input_meta.py new file mode 100644 index 000000000..e3dbd3196 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/subgraph_14/input_meta.py @@ -0,0 +1,31 @@ +class Program_weight_tensor_data_0: + name = "data_0" + shape = [2, 768, 13, 13] + dtype = "float32" + min_val = float("-0.278465") + max_val = float("6.95786") + mean = float("0.267869") + std = float("0.616575") + data = None + + +class Program_weight_tensor_data_1: + name = "data_1" + shape = [2, 384, 26, 26] + dtype = "float32" + min_val = float("-0.278465") + max_val = float("9.64291") + mean = float("0.362652") + std = float("0.73159") + data = None + + +class Program_weight_tensor_data_2: + name = "data_2" + shape = [2, 192, 52, 52] + dtype = "float32" + min_val = float("-0.278465") + max_val = float("15.0864") + mean = float("0.450223") + std = float("0.759773") + data = None diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/subgraph_14/model.py b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/subgraph_14/model.py new file mode 100644 index 000000000..f79f7a54d --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/subgraph_14/model.py @@ -0,0 +1,1050 @@ +import paddle + + +class GraphModule(paddle.nn.Layer): + def __init__(self): + super().__init__() + + def forward( + self, + parameter_0, + parameter_1, + parameter_2, + parameter_3, + parameter_4, + parameter_5, + parameter_6, + parameter_7, + parameter_8, + parameter_9, + parameter_10, + parameter_11, + parameter_12, + parameter_13, + parameter_14, + parameter_15, + parameter_16, + parameter_17, + parameter_18, + parameter_19, + parameter_20, + parameter_21, + parameter_22, + parameter_23, + parameter_24, + parameter_25, + parameter_26, + parameter_27, + parameter_28, + parameter_29, + parameter_30, + parameter_31, + parameter_32, + parameter_33, + parameter_34, + parameter_35, + parameter_36, + parameter_37, + parameter_38, + parameter_39, + parameter_40, + parameter_41, + parameter_42, + parameter_43, + parameter_44, + parameter_45, + parameter_46, + parameter_47, + parameter_48, + parameter_49, + parameter_50, + parameter_51, + parameter_52, + parameter_53, + data_0, + data_1, + data_2, + ): + # pd_op.full: (1xf64) <- () + full_0 = paddle._C_ops.full( + [1], float("0"), paddle.float64, paddle.core.CPUPlace() + ) + + # pd_op.full: (1xf64) <- () + full_1 = paddle._C_ops.full( + [1], float("13"), paddle.float64, paddle.core.CPUPlace() + ) + + # pd_op.full: (1xf64) <- () + full_2 = paddle._C_ops.full( + [1], float("1"), paddle.float64, paddle.core.CPUPlace() + ) + + # pd_op.arange: (13xi64) <- (1xf64, 1xf64, 1xf64) + arange_0 = paddle.arange(full_0, full_1, full_2, dtype="int64") + del full_1 + + # pd_op.cast: (13xf32) <- (13xi64) + cast_0 = paddle._C_ops.cast(arange_0, paddle.float32) + del arange_0 + + # pd_op.full: (1xf32) <- () + full_3 = paddle._C_ops.full( + [1], float("1"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.scale: (13xf32) <- (13xf32, 1xf32) + scale_0 = paddle._C_ops.scale(cast_0, full_3, float("0.5"), True) + del cast_0 + + # pd_op.full: (1xf32) <- () + full_4 = paddle._C_ops.full( + [1], float("32"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.scale: (13xf32) <- (13xf32, 1xf32) + scale_1 = paddle._C_ops.scale(scale_0, full_4, float("0"), True) + del full_4, scale_0 + + # builtin.combine: ([13xf32, 13xf32]) <- (13xf32, 13xf32) + combine_0 = [scale_1, scale_1] + del scale_1 + + # pd_op.meshgrid: ([13x13xf32, 13x13xf32]) <- ([13xf32, 13xf32]) + meshgrid_0 = paddle._C_ops.meshgrid(combine_0) + del combine_0 + + # builtin.split: (13x13xf32, 13x13xf32) <- ([13x13xf32, 13x13xf32]) + ( + split_0, + split_1, + ) = meshgrid_0 + del meshgrid_0 + + # pd_op.scale: (13x13xf32) <- (13x13xf32, 1xf32) + scale_2 = paddle._C_ops.scale(split_1, full_3, float("-80"), True) + + # pd_op.scale: (13x13xf32) <- (13x13xf32, 1xf32) + scale_3 = paddle._C_ops.scale(split_0, full_3, float("-80"), True) + + # pd_op.scale: (13x13xf32) <- (13x13xf32, 1xf32) + scale_4 = paddle._C_ops.scale(split_1, full_3, float("80"), True) + + # pd_op.scale: (13x13xf32) <- (13x13xf32, 1xf32) + scale_5 = paddle._C_ops.scale(split_0, full_3, float("80"), True) + + # builtin.combine: ([13x13xf32, 13x13xf32, 13x13xf32, 13x13xf32]) <- (13x13xf32, 13x13xf32, 13x13xf32, 13x13xf32) + combine_1 = [scale_2, scale_3, scale_4, scale_5] + del scale_2, scale_3, scale_4, scale_5 + + # pd_op.stack: (13x13x4xf32) <- ([13x13xf32, 13x13xf32, 13x13xf32, 13x13xf32]) + stack_0 = paddle._C_ops.stack(combine_1, -1) + del combine_1 + + # builtin.combine: ([13x13xf32, 13x13xf32]) <- (13x13xf32, 13x13xf32) + combine_2 = [split_1, split_0] + del split_0, split_1 + + # pd_op.stack: (13x13x2xf32) <- ([13x13xf32, 13x13xf32]) + stack_1 = paddle._C_ops.stack(combine_2, -1) + del combine_2 + + # pd_op.full_int_array: (2xi64) <- () + full_int_array_0 = [-1, 4] + + # pd_op.reshape: (169x4xf32) <- (13x13x4xf32, 2xi64) + reshape_0 = paddle._C_ops.reshape(stack_0, full_int_array_0) + del stack_0 + + # pd_op.full_int_array: (2xi64) <- () + full_int_array_1 = [-1, 2] + + # pd_op.reshape: (169x2xf32) <- (13x13x2xf32, 2xi64) + reshape_1 = paddle._C_ops.reshape(stack_1, full_int_array_1) + del stack_1 + + # pd_op.full: (169x1xf32) <- () + full_5 = paddle._C_ops.full( + [169, 1], + float("32"), + paddle.float32, + paddle.framework._current_expected_place(), + ) + + # pd_op.full: (1xf64) <- () + full_6 = paddle._C_ops.full( + [1], float("26"), paddle.float64, paddle.core.CPUPlace() + ) + + # pd_op.arange: (26xi64) <- (1xf64, 1xf64, 1xf64) + arange_1 = paddle.arange(full_0, full_6, full_2, dtype="int64") + del full_6 + + # pd_op.cast: (26xf32) <- (26xi64) + cast_1 = paddle._C_ops.cast(arange_1, paddle.float32) + del arange_1 + + # pd_op.scale: (26xf32) <- (26xf32, 1xf32) + scale_6 = paddle._C_ops.scale(cast_1, full_3, float("0.5"), True) + del cast_1 + + # pd_op.full: (1xf32) <- () + full_7 = paddle._C_ops.full( + [1], float("16"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.scale: (26xf32) <- (26xf32, 1xf32) + scale_7 = paddle._C_ops.scale(scale_6, full_7, float("0"), True) + del full_7, scale_6 + + # builtin.combine: ([26xf32, 26xf32]) <- (26xf32, 26xf32) + combine_3 = [scale_7, scale_7] + del scale_7 + + # pd_op.meshgrid: ([26x26xf32, 26x26xf32]) <- ([26xf32, 26xf32]) + meshgrid_1 = paddle._C_ops.meshgrid(combine_3) + del combine_3 + + # builtin.split: (26x26xf32, 26x26xf32) <- ([26x26xf32, 26x26xf32]) + ( + split_2, + split_3, + ) = meshgrid_1 + del meshgrid_1 + + # pd_op.scale: (26x26xf32) <- (26x26xf32, 1xf32) + scale_8 = paddle._C_ops.scale(split_3, full_3, float("-40"), True) + + # pd_op.scale: (26x26xf32) <- (26x26xf32, 1xf32) + scale_9 = paddle._C_ops.scale(split_2, full_3, float("-40"), True) + + # pd_op.scale: (26x26xf32) <- (26x26xf32, 1xf32) + scale_10 = paddle._C_ops.scale(split_3, full_3, float("40"), True) + + # pd_op.scale: (26x26xf32) <- (26x26xf32, 1xf32) + scale_11 = paddle._C_ops.scale(split_2, full_3, float("40"), True) + + # builtin.combine: ([26x26xf32, 26x26xf32, 26x26xf32, 26x26xf32]) <- (26x26xf32, 26x26xf32, 26x26xf32, 26x26xf32) + combine_4 = [scale_8, scale_9, scale_10, scale_11] + del scale_10, scale_11, scale_8, scale_9 + + # pd_op.stack: (26x26x4xf32) <- ([26x26xf32, 26x26xf32, 26x26xf32, 26x26xf32]) + stack_2 = paddle._C_ops.stack(combine_4, -1) + del combine_4 + + # builtin.combine: ([26x26xf32, 26x26xf32]) <- (26x26xf32, 26x26xf32) + combine_5 = [split_3, split_2] + del split_2, split_3 + + # pd_op.stack: (26x26x2xf32) <- ([26x26xf32, 26x26xf32]) + stack_3 = paddle._C_ops.stack(combine_5, -1) + del combine_5 + + # pd_op.reshape: (676x4xf32) <- (26x26x4xf32, 2xi64) + reshape_2 = paddle._C_ops.reshape(stack_2, full_int_array_0) + del stack_2 + + # pd_op.reshape: (676x2xf32) <- (26x26x2xf32, 2xi64) + reshape_3 = paddle._C_ops.reshape(stack_3, full_int_array_1) + del stack_3 + + # pd_op.full: (676x1xf32) <- () + full_8 = paddle._C_ops.full( + [676, 1], + float("16"), + paddle.float32, + paddle.framework._current_expected_place(), + ) + + # pd_op.full: (1xf64) <- () + full_9 = paddle._C_ops.full( + [1], float("52"), paddle.float64, paddle.core.CPUPlace() + ) + + # pd_op.arange: (52xi64) <- (1xf64, 1xf64, 1xf64) + arange_2 = paddle.arange(full_0, full_9, full_2, dtype="int64") + del full_0, full_2, full_9 + + # pd_op.cast: (52xf32) <- (52xi64) + cast_2 = paddle._C_ops.cast(arange_2, paddle.float32) + del arange_2 + + # pd_op.scale: (52xf32) <- (52xf32, 1xf32) + scale_12 = paddle._C_ops.scale(cast_2, full_3, float("0.5"), True) + del cast_2 + + # pd_op.full: (1xf32) <- () + full_10 = paddle._C_ops.full( + [1], float("8"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.scale: (52xf32) <- (52xf32, 1xf32) + scale_13 = paddle._C_ops.scale(scale_12, full_10, float("0"), True) + del full_10, scale_12 + + # builtin.combine: ([52xf32, 52xf32]) <- (52xf32, 52xf32) + combine_6 = [scale_13, scale_13] + del scale_13 + + # pd_op.meshgrid: ([52x52xf32, 52x52xf32]) <- ([52xf32, 52xf32]) + meshgrid_2 = paddle._C_ops.meshgrid(combine_6) + del combine_6 + + # builtin.split: (52x52xf32, 52x52xf32) <- ([52x52xf32, 52x52xf32]) + ( + split_4, + split_5, + ) = meshgrid_2 + del meshgrid_2 + + # pd_op.scale: (52x52xf32) <- (52x52xf32, 1xf32) + scale_14 = paddle._C_ops.scale(split_5, full_3, float("-20"), True) + + # pd_op.scale: (52x52xf32) <- (52x52xf32, 1xf32) + scale_15 = paddle._C_ops.scale(split_4, full_3, float("-20"), True) + + # pd_op.scale: (52x52xf32) <- (52x52xf32, 1xf32) + scale_16 = paddle._C_ops.scale(split_5, full_3, float("20"), True) + + # pd_op.scale: (52x52xf32) <- (52x52xf32, 1xf32) + scale_17 = paddle._C_ops.scale(split_4, full_3, float("20"), True) + del full_3 + + # builtin.combine: ([52x52xf32, 52x52xf32, 52x52xf32, 52x52xf32]) <- (52x52xf32, 52x52xf32, 52x52xf32, 52x52xf32) + combine_7 = [scale_14, scale_15, scale_16, scale_17] + del scale_14, scale_15, scale_16, scale_17 + + # pd_op.stack: (52x52x4xf32) <- ([52x52xf32, 52x52xf32, 52x52xf32, 52x52xf32]) + stack_4 = paddle._C_ops.stack(combine_7, -1) + del combine_7 + + # builtin.combine: ([52x52xf32, 52x52xf32]) <- (52x52xf32, 52x52xf32) + combine_8 = [split_5, split_4] + del split_4, split_5 + + # pd_op.stack: (52x52x2xf32) <- ([52x52xf32, 52x52xf32]) + stack_5 = paddle._C_ops.stack(combine_8, -1) + del combine_8 + + # pd_op.reshape: (2704x4xf32) <- (52x52x4xf32, 2xi64) + reshape_4 = paddle._C_ops.reshape(stack_4, full_int_array_0) + del full_int_array_0, stack_4 + + # pd_op.reshape: (2704x2xf32) <- (52x52x2xf32, 2xi64) + reshape_5 = paddle._C_ops.reshape(stack_5, full_int_array_1) + del full_int_array_1, stack_5 + + # pd_op.full: (2704x1xf32) <- () + full_11 = paddle._C_ops.full( + [2704, 1], + float("8"), + paddle.float32, + paddle.framework._current_expected_place(), + ) + + # pd_op.full: (1xi32) <- () + full_12 = paddle._C_ops.full( + [1], float("0"), paddle.int32, paddle.core.CPUPlace() + ) + + # builtin.combine: ([169x4xf32, 676x4xf32, 2704x4xf32]) <- (169x4xf32, 676x4xf32, 2704x4xf32) + combine_9 = [reshape_0, reshape_2, reshape_4] + + # pd_op.concat: (3549x4xf32) <- ([169x4xf32, 676x4xf32, 2704x4xf32], 1xi32) + concat_2 = paddle._C_ops.concat(combine_9, full_12) + del combine_9 + + # builtin.combine: ([169x2xf32, 676x2xf32, 2704x2xf32]) <- (169x2xf32, 676x2xf32, 2704x2xf32) + combine_10 = [reshape_1, reshape_3, reshape_5] + del reshape_1, reshape_3, reshape_5 + + # pd_op.concat: (3549x2xf32) <- ([169x2xf32, 676x2xf32, 2704x2xf32], 1xi32) + concat_3 = paddle._C_ops.concat(combine_10, full_12) + del combine_10 + + # builtin.combine: ([169x1xf32, 676x1xf32, 2704x1xf32]) <- (169x1xf32, 676x1xf32, 2704x1xf32) + combine_11 = [full_5, full_8, full_11] + del full_11, full_5, full_8 + + # pd_op.concat: (3549x1xf32) <- ([169x1xf32, 676x1xf32, 2704x1xf32], 1xi32) + concat_4 = paddle._C_ops.concat(combine_11, full_12) + del combine_11, full_12 + + # pd_op.full_int_array: (2xi64) <- () + full_int_array_2 = [1, 1] + + # pd_op.assign: (2xi64) <- (2xi64) + assign_0 = full_int_array_2 + + # pd_op.assign: (2xi64) <- (2xi64) + assign_1 = full_int_array_2 + + # pd_op.pool2d: (2x768x1x1xf32) <- (2x768x13x13xf32, 2xi64) + pool2d_0 = paddle._C_ops.pool2d( + data_0, + full_int_array_2, + [1, 1], + [0, 0], + False, + True, + "NCHW", + "avg", + False, + True, + "EXPLICIT", + ) + + # pd_op.conv2d: (2x768x1x1xf32) <- (2x768x1x1xf32, 768x768x1x1xf32) + conv2d_0 = paddle._C_ops.conv2d( + pool2d_0, parameter_53, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_53 + + # pd_op.full_int_array: (4xi64) <- () + full_int_array_3 = [1, -1, 1, 1] + + # pd_op.reshape: (1x768x1x1xf32) <- (768xf32, 4xi64) + reshape_6 = paddle._C_ops.reshape(parameter_52, full_int_array_3) + del parameter_52 + + # pd_op.add: (2x768x1x1xf32) <- (2x768x1x1xf32, 1x768x1x1xf32) + add_0 = paddle._C_ops.add(conv2d_0, reshape_6) + + # pd_op.sigmoid: (2x768x1x1xf32) <- (2x768x1x1xf32) + sigmoid_0 = paddle._C_ops.sigmoid(add_0) + del add_0 + + # pd_op.multiply: (2x768x13x13xf32) <- (2x768x13x13xf32, 2x768x1x1xf32) + multiply_0 = paddle._C_ops.multiply(data_0, sigmoid_0) + + # pd_op.conv2d: (2x768x13x13xf32) <- (2x768x13x13xf32, 768x768x1x1xf32) + conv2d_1 = paddle._C_ops.conv2d( + multiply_0, parameter_51, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_51 + + # pd_op.batch_norm_: (2x768x13x13xf32, 768xf32, 768xf32, 768xf32, 768xf32, -1xui8) <- (2x768x13x13xf32, 768xf32, 768xf32, 768xf32, 768xf32) + ( + batch_norm__0, + batch_norm__1, + batch_norm__2, + batch_norm__3, + batch_norm__4, + batch_norm__5, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_1, + parameter_50, + parameter_49, + parameter_48, + parameter_47, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_47, parameter_48, parameter_49, parameter_50 + + # pd_op.swish: (2x768x13x13xf32) <- (2x768x13x13xf32) + swish_0 = paddle._C_ops.swish(batch_norm__0) + + # pd_op.add: (2x768x13x13xf32) <- (2x768x13x13xf32, 2x768x13x13xf32) + add_1 = paddle._C_ops.add(swish_0, data_0) + + # pd_op.conv2d: (2x10x13x13xf32) <- (2x768x13x13xf32, 10x768x3x3xf32) + conv2d_2 = paddle._C_ops.conv2d( + add_1, parameter_46, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_46 + + # pd_op.reshape: (1x10x1x1xf32) <- (10xf32, 4xi64) + reshape_7 = paddle._C_ops.reshape(parameter_45, full_int_array_3) + del parameter_45 + + # pd_op.add: (2x10x13x13xf32) <- (2x10x13x13xf32, 1x10x1x1xf32) + add_2 = paddle._C_ops.add(conv2d_2, reshape_7) + + # pd_op.conv2d: (2x768x1x1xf32) <- (2x768x1x1xf32, 768x768x1x1xf32) + conv2d_3 = paddle._C_ops.conv2d( + pool2d_0, parameter_44, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_44 + + # pd_op.reshape: (1x768x1x1xf32) <- (768xf32, 4xi64) + reshape_8 = paddle._C_ops.reshape(parameter_43, full_int_array_3) + del parameter_43 + + # pd_op.add: (2x768x1x1xf32) <- (2x768x1x1xf32, 1x768x1x1xf32) + add_3 = paddle._C_ops.add(conv2d_3, reshape_8) + + # pd_op.sigmoid: (2x768x1x1xf32) <- (2x768x1x1xf32) + sigmoid_1 = paddle._C_ops.sigmoid(add_3) + del add_3 + + # pd_op.multiply: (2x768x13x13xf32) <- (2x768x13x13xf32, 2x768x1x1xf32) + multiply_1 = paddle._C_ops.multiply(data_0, sigmoid_1) + del data_0 + + # pd_op.conv2d: (2x768x13x13xf32) <- (2x768x13x13xf32, 768x768x1x1xf32) + conv2d_4 = paddle._C_ops.conv2d( + multiply_1, parameter_42, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_42 + + # pd_op.batch_norm_: (2x768x13x13xf32, 768xf32, 768xf32, 768xf32, 768xf32, -1xui8) <- (2x768x13x13xf32, 768xf32, 768xf32, 768xf32, 768xf32) + ( + batch_norm__6, + batch_norm__7, + batch_norm__8, + batch_norm__9, + batch_norm__10, + batch_norm__11, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_4, + parameter_41, + parameter_40, + parameter_39, + parameter_38, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_38, parameter_39, parameter_40, parameter_41 + + # pd_op.swish: (2x768x13x13xf32) <- (2x768x13x13xf32) + swish_1 = paddle._C_ops.swish(batch_norm__6) + + # pd_op.conv2d: (2x40x13x13xf32) <- (2x768x13x13xf32, 40x768x3x3xf32) + conv2d_5 = paddle._C_ops.conv2d( + swish_1, parameter_37, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_37 + + # pd_op.reshape: (1x40x1x1xf32) <- (40xf32, 4xi64) + reshape_9 = paddle._C_ops.reshape(parameter_36, full_int_array_3) + del parameter_36 + + # pd_op.add: (2x40x13x13xf32) <- (2x40x13x13xf32, 1x40x1x1xf32) + add_4 = paddle._C_ops.add(conv2d_5, reshape_9) + + # pd_op.sigmoid: (2x10x13x13xf32) <- (2x10x13x13xf32) + sigmoid_2 = paddle._C_ops.sigmoid(add_2) + del add_2 + + # pd_op.flatten: (2x10x169xf32) <- (2x10x13x13xf32) + flatten_0 = paddle._C_ops.flatten(sigmoid_2, 2, 3) + + # pd_op.transpose: (2x169x10xf32) <- (2x10x169xf32) + transpose_0 = paddle._C_ops.transpose(flatten_0, [0, 2, 1]) + del flatten_0 + + # pd_op.flatten: (2x40x169xf32) <- (2x40x13x13xf32) + flatten_1 = paddle._C_ops.flatten(add_4, 2, 3) + + # pd_op.transpose: (2x169x40xf32) <- (2x40x169xf32) + transpose_1 = paddle._C_ops.transpose(flatten_1, [0, 2, 1]) + del flatten_1 + + # pd_op.pool2d: (2x384x1x1xf32) <- (2x384x26x26xf32, 2xi64) + pool2d_1 = paddle._C_ops.pool2d( + data_1, + full_int_array_2, + [1, 1], + [0, 0], + False, + True, + "NCHW", + "avg", + False, + True, + "EXPLICIT", + ) + + # pd_op.conv2d: (2x384x1x1xf32) <- (2x384x1x1xf32, 384x384x1x1xf32) + conv2d_6 = paddle._C_ops.conv2d( + pool2d_1, parameter_35, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_35 + + # pd_op.reshape: (1x384x1x1xf32) <- (384xf32, 4xi64) + reshape_10 = paddle._C_ops.reshape(parameter_34, full_int_array_3) + del parameter_34 + + # pd_op.add: (2x384x1x1xf32) <- (2x384x1x1xf32, 1x384x1x1xf32) + add_5 = paddle._C_ops.add(conv2d_6, reshape_10) + + # pd_op.sigmoid: (2x384x1x1xf32) <- (2x384x1x1xf32) + sigmoid_3 = paddle._C_ops.sigmoid(add_5) + del add_5 + + # pd_op.multiply: (2x384x26x26xf32) <- (2x384x26x26xf32, 2x384x1x1xf32) + multiply_2 = paddle._C_ops.multiply(data_1, sigmoid_3) + + # pd_op.conv2d: (2x384x26x26xf32) <- (2x384x26x26xf32, 384x384x1x1xf32) + conv2d_7 = paddle._C_ops.conv2d( + multiply_2, parameter_33, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_33 + + # pd_op.batch_norm_: (2x384x26x26xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x26x26xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__12, + batch_norm__13, + batch_norm__14, + batch_norm__15, + batch_norm__16, + batch_norm__17, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_7, + parameter_32, + parameter_31, + parameter_30, + parameter_29, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_29, parameter_30, parameter_31, parameter_32 + + # pd_op.swish: (2x384x26x26xf32) <- (2x384x26x26xf32) + swish_2 = paddle._C_ops.swish(batch_norm__12) + + # pd_op.add: (2x384x26x26xf32) <- (2x384x26x26xf32, 2x384x26x26xf32) + add_6 = paddle._C_ops.add(swish_2, data_1) + + # pd_op.conv2d: (2x10x26x26xf32) <- (2x384x26x26xf32, 10x384x3x3xf32) + conv2d_8 = paddle._C_ops.conv2d( + add_6, parameter_28, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_28 + + # pd_op.reshape: (1x10x1x1xf32) <- (10xf32, 4xi64) + reshape_11 = paddle._C_ops.reshape(parameter_27, full_int_array_3) + del parameter_27 + + # pd_op.add: (2x10x26x26xf32) <- (2x10x26x26xf32, 1x10x1x1xf32) + add_7 = paddle._C_ops.add(conv2d_8, reshape_11) + + # pd_op.conv2d: (2x384x1x1xf32) <- (2x384x1x1xf32, 384x384x1x1xf32) + conv2d_9 = paddle._C_ops.conv2d( + pool2d_1, parameter_26, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_26 + + # pd_op.reshape: (1x384x1x1xf32) <- (384xf32, 4xi64) + reshape_12 = paddle._C_ops.reshape(parameter_25, full_int_array_3) + del parameter_25 + + # pd_op.add: (2x384x1x1xf32) <- (2x384x1x1xf32, 1x384x1x1xf32) + add_8 = paddle._C_ops.add(conv2d_9, reshape_12) + + # pd_op.sigmoid: (2x384x1x1xf32) <- (2x384x1x1xf32) + sigmoid_4 = paddle._C_ops.sigmoid(add_8) + del add_8 + + # pd_op.multiply: (2x384x26x26xf32) <- (2x384x26x26xf32, 2x384x1x1xf32) + multiply_3 = paddle._C_ops.multiply(data_1, sigmoid_4) + del data_1 + + # pd_op.conv2d: (2x384x26x26xf32) <- (2x384x26x26xf32, 384x384x1x1xf32) + conv2d_10 = paddle._C_ops.conv2d( + multiply_3, parameter_24, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_24 + + # pd_op.batch_norm_: (2x384x26x26xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x26x26xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__18, + batch_norm__19, + batch_norm__20, + batch_norm__21, + batch_norm__22, + batch_norm__23, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_10, + parameter_23, + parameter_22, + parameter_21, + parameter_20, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_20, parameter_21, parameter_22, parameter_23 + + # pd_op.swish: (2x384x26x26xf32) <- (2x384x26x26xf32) + swish_3 = paddle._C_ops.swish(batch_norm__18) + + # pd_op.conv2d: (2x40x26x26xf32) <- (2x384x26x26xf32, 40x384x3x3xf32) + conv2d_11 = paddle._C_ops.conv2d( + swish_3, parameter_19, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_19 + + # pd_op.reshape: (1x40x1x1xf32) <- (40xf32, 4xi64) + reshape_13 = paddle._C_ops.reshape(parameter_18, full_int_array_3) + del parameter_18 + + # pd_op.add: (2x40x26x26xf32) <- (2x40x26x26xf32, 1x40x1x1xf32) + add_9 = paddle._C_ops.add(conv2d_11, reshape_13) + + # pd_op.sigmoid: (2x10x26x26xf32) <- (2x10x26x26xf32) + sigmoid_5 = paddle._C_ops.sigmoid(add_7) + del add_7 + + # pd_op.flatten: (2x10x676xf32) <- (2x10x26x26xf32) + flatten_2 = paddle._C_ops.flatten(sigmoid_5, 2, 3) + + # pd_op.transpose: (2x676x10xf32) <- (2x10x676xf32) + transpose_2 = paddle._C_ops.transpose(flatten_2, [0, 2, 1]) + del flatten_2 + + # pd_op.flatten: (2x40x676xf32) <- (2x40x26x26xf32) + flatten_3 = paddle._C_ops.flatten(add_9, 2, 3) + + # pd_op.transpose: (2x676x40xf32) <- (2x40x676xf32) + transpose_3 = paddle._C_ops.transpose(flatten_3, [0, 2, 1]) + del flatten_3 + + # pd_op.pool2d: (2x192x1x1xf32) <- (2x192x52x52xf32, 2xi64) + pool2d_2 = paddle._C_ops.pool2d( + data_2, + full_int_array_2, + [1, 1], + [0, 0], + False, + True, + "NCHW", + "avg", + False, + True, + "EXPLICIT", + ) + + # pd_op.conv2d: (2x192x1x1xf32) <- (2x192x1x1xf32, 192x192x1x1xf32) + conv2d_12 = paddle._C_ops.conv2d( + pool2d_2, parameter_17, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_17 + + # pd_op.reshape: (1x192x1x1xf32) <- (192xf32, 4xi64) + reshape_14 = paddle._C_ops.reshape(parameter_16, full_int_array_3) + del parameter_16 + + # pd_op.add: (2x192x1x1xf32) <- (2x192x1x1xf32, 1x192x1x1xf32) + add_10 = paddle._C_ops.add(conv2d_12, reshape_14) + + # pd_op.sigmoid: (2x192x1x1xf32) <- (2x192x1x1xf32) + sigmoid_6 = paddle._C_ops.sigmoid(add_10) + del add_10 + + # pd_op.multiply: (2x192x52x52xf32) <- (2x192x52x52xf32, 2x192x1x1xf32) + multiply_4 = paddle._C_ops.multiply(data_2, sigmoid_6) + + # pd_op.conv2d: (2x192x52x52xf32) <- (2x192x52x52xf32, 192x192x1x1xf32) + conv2d_13 = paddle._C_ops.conv2d( + multiply_4, parameter_15, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_15 + + # pd_op.batch_norm_: (2x192x52x52xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x52x52xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__24, + batch_norm__25, + batch_norm__26, + batch_norm__27, + batch_norm__28, + batch_norm__29, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_13, + parameter_14, + parameter_13, + parameter_12, + parameter_11, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_11, parameter_12, parameter_13, parameter_14 + + # pd_op.swish: (2x192x52x52xf32) <- (2x192x52x52xf32) + swish_4 = paddle._C_ops.swish(batch_norm__24) + + # pd_op.add: (2x192x52x52xf32) <- (2x192x52x52xf32, 2x192x52x52xf32) + add_11 = paddle._C_ops.add(swish_4, data_2) + + # pd_op.conv2d: (2x10x52x52xf32) <- (2x192x52x52xf32, 10x192x3x3xf32) + conv2d_14 = paddle._C_ops.conv2d( + add_11, parameter_10, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_10 + + # pd_op.reshape: (1x10x1x1xf32) <- (10xf32, 4xi64) + reshape_15 = paddle._C_ops.reshape(parameter_9, full_int_array_3) + del parameter_9 + + # pd_op.add: (2x10x52x52xf32) <- (2x10x52x52xf32, 1x10x1x1xf32) + add_12 = paddle._C_ops.add(conv2d_14, reshape_15) + + # pd_op.conv2d: (2x192x1x1xf32) <- (2x192x1x1xf32, 192x192x1x1xf32) + conv2d_15 = paddle._C_ops.conv2d( + pool2d_2, parameter_8, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_8 + + # pd_op.reshape: (1x192x1x1xf32) <- (192xf32, 4xi64) + reshape_16 = paddle._C_ops.reshape(parameter_7, full_int_array_3) + del parameter_7 + + # pd_op.add: (2x192x1x1xf32) <- (2x192x1x1xf32, 1x192x1x1xf32) + add_13 = paddle._C_ops.add(conv2d_15, reshape_16) + + # pd_op.sigmoid: (2x192x1x1xf32) <- (2x192x1x1xf32) + sigmoid_7 = paddle._C_ops.sigmoid(add_13) + del add_13 + + # pd_op.multiply: (2x192x52x52xf32) <- (2x192x52x52xf32, 2x192x1x1xf32) + multiply_5 = paddle._C_ops.multiply(data_2, sigmoid_7) + del data_2 + + # pd_op.conv2d: (2x192x52x52xf32) <- (2x192x52x52xf32, 192x192x1x1xf32) + conv2d_16 = paddle._C_ops.conv2d( + multiply_5, parameter_6, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_6 + + # pd_op.batch_norm_: (2x192x52x52xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x52x52xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__30, + batch_norm__31, + batch_norm__32, + batch_norm__33, + batch_norm__34, + batch_norm__35, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_16, + parameter_5, + parameter_4, + parameter_3, + parameter_2, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_2, parameter_3, parameter_4, parameter_5 + + # pd_op.swish: (2x192x52x52xf32) <- (2x192x52x52xf32) + swish_5 = paddle._C_ops.swish(batch_norm__30) + + # pd_op.conv2d: (2x40x52x52xf32) <- (2x192x52x52xf32, 40x192x3x3xf32) + conv2d_17 = paddle._C_ops.conv2d( + swish_5, parameter_1, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_1 + + # pd_op.reshape: (1x40x1x1xf32) <- (40xf32, 4xi64) + reshape_17 = paddle._C_ops.reshape(parameter_0, full_int_array_3) + del full_int_array_3, parameter_0 + + # pd_op.add: (2x40x52x52xf32) <- (2x40x52x52xf32, 1x40x1x1xf32) + add_14 = paddle._C_ops.add(conv2d_17, reshape_17) + + # pd_op.sigmoid: (2x10x52x52xf32) <- (2x10x52x52xf32) + sigmoid_8 = paddle._C_ops.sigmoid(add_12) + del add_12 + + # pd_op.flatten: (2x10x2704xf32) <- (2x10x52x52xf32) + flatten_4 = paddle._C_ops.flatten(sigmoid_8, 2, 3) + + # pd_op.transpose: (2x2704x10xf32) <- (2x10x2704xf32) + transpose_4 = paddle._C_ops.transpose(flatten_4, [0, 2, 1]) + del flatten_4 + + # pd_op.flatten: (2x40x2704xf32) <- (2x40x52x52xf32) + flatten_5 = paddle._C_ops.flatten(add_14, 2, 3) + + # pd_op.transpose: (2x2704x40xf32) <- (2x40x2704xf32) + transpose_5 = paddle._C_ops.transpose(flatten_5, [0, 2, 1]) + del flatten_5 + + # pd_op.full: (1xi32) <- () + full_13 = paddle._C_ops.full( + [1], float("1"), paddle.int32, paddle.core.CPUPlace() + ) + + # pd_op.assign: (1xi32) <- (1xi32) + assign_2 = full_13 + + # builtin.combine: ([2x169x10xf32, 2x676x10xf32, 2x2704x10xf32]) <- (2x169x10xf32, 2x676x10xf32, 2x2704x10xf32) + combine_12 = [transpose_0, transpose_2, transpose_4] + + # pd_op.concat: (2x3549x10xf32) <- ([2x169x10xf32, 2x676x10xf32, 2x2704x10xf32], 1xi32) + concat_0 = paddle._C_ops.concat(combine_12, full_13) + del combine_12 + + # builtin.combine: ([2x169x40xf32, 2x676x40xf32, 2x2704x40xf32]) <- (2x169x40xf32, 2x676x40xf32, 2x2704x40xf32) + combine_13 = [transpose_1, transpose_3, transpose_5] + + # pd_op.concat: (2x3549x40xf32) <- ([2x169x40xf32, 2x676x40xf32, 2x2704x40xf32], 1xi32) + concat_1 = paddle._C_ops.concat(combine_13, full_13) + del ( + add_1, + add_11, + add_14, + add_4, + add_6, + add_9, + assign_0, + assign_1, + assign_2, + batch_norm__0, + batch_norm__1, + batch_norm__10, + batch_norm__11, + batch_norm__12, + batch_norm__13, + batch_norm__14, + batch_norm__15, + batch_norm__16, + batch_norm__17, + batch_norm__18, + batch_norm__19, + batch_norm__2, + batch_norm__20, + batch_norm__21, + batch_norm__22, + batch_norm__23, + batch_norm__24, + batch_norm__25, + batch_norm__26, + batch_norm__27, + batch_norm__28, + batch_norm__29, + batch_norm__3, + batch_norm__30, + batch_norm__31, + batch_norm__32, + batch_norm__33, + batch_norm__34, + batch_norm__35, + batch_norm__4, + batch_norm__5, + batch_norm__6, + batch_norm__7, + batch_norm__8, + batch_norm__9, + combine_13, + conv2d_0, + conv2d_1, + conv2d_10, + conv2d_11, + conv2d_12, + conv2d_13, + conv2d_14, + conv2d_15, + conv2d_16, + conv2d_17, + conv2d_2, + conv2d_3, + conv2d_4, + conv2d_5, + conv2d_6, + conv2d_7, + conv2d_8, + conv2d_9, + full_13, + full_int_array_2, + multiply_0, + multiply_1, + multiply_2, + multiply_3, + multiply_4, + multiply_5, + pool2d_0, + pool2d_1, + pool2d_2, + reshape_0, + reshape_10, + reshape_11, + reshape_12, + reshape_13, + reshape_14, + reshape_15, + reshape_16, + reshape_17, + reshape_2, + reshape_4, + reshape_6, + reshape_7, + reshape_8, + reshape_9, + sigmoid_0, + sigmoid_1, + sigmoid_2, + sigmoid_3, + sigmoid_4, + sigmoid_5, + sigmoid_6, + sigmoid_7, + sigmoid_8, + swish_0, + swish_1, + swish_2, + swish_3, + swish_4, + swish_5, + transpose_0, + transpose_1, + transpose_2, + transpose_3, + transpose_4, + transpose_5, + ) + + return concat_0, concat_1, concat_2, concat_3, concat_4 diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/subgraph_14/weight_meta.py b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/subgraph_14/weight_meta.py new file mode 100644 index 000000000..f22fa6bed --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/subgraph_14/weight_meta.py @@ -0,0 +1,580 @@ +class Program_weight_tensor_parameter_0: + name = "parameter_0" + shape = [40] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_1: + name = "parameter_1" + shape = [40, 192, 3, 3] + dtype = "float32" + min_val = float("-0.215457") + max_val = float("0.207607") + mean = float("1.58034e-08") + std = float("0.0115184") + data = None + + +class Program_weight_tensor_parameter_2: + name = "parameter_2" + shape = [192] + dtype = "float32" + min_val = float("-0.0451387") + max_val = float("0.203431") + mean = float("0.0497256") + std = float("0.040343") + data = None + + +class Program_weight_tensor_parameter_3: + name = "parameter_3" + shape = [192] + dtype = "float32" + min_val = float("0.83467") + max_val = float("1.62755") + mean = float("1.22081") + std = float("0.14523") + data = None + + +class Program_weight_tensor_parameter_4: + name = "parameter_4" + shape = [192] + dtype = "float32" + min_val = float("0.000391431") + max_val = float("0.0359133") + mean = float("0.0049604") + std = float("0.0056841") + data = None + + +class Program_weight_tensor_parameter_5: + name = "parameter_5" + shape = [192] + dtype = "float32" + min_val = float("-0.142213") + max_val = float("0.0737932") + mean = float("-0.0146741") + std = float("0.0326218") + data = None + + +class Program_weight_tensor_parameter_6: + name = "parameter_6" + shape = [192, 192, 1, 1] + dtype = "float32" + min_val = float("-0.104082") + max_val = float("0.138351") + mean = float("-0.000622137") + std = float("0.00964682") + data = None + + +class Program_weight_tensor_parameter_7: + name = "parameter_7" + shape = [192] + dtype = "float32" + min_val = float("-0.00943066") + max_val = float("0.0118578") + mean = float("-0.000140995") + std = float("0.00403013") + data = None + + +class Program_weight_tensor_parameter_8: + name = "parameter_8" + shape = [192, 192, 1, 1] + dtype = "float32" + min_val = float("-0.010438") + max_val = float("0.0143864") + mean = float("-0.000238138") + std = float("0.00199836") + data = None + + +class Program_weight_tensor_parameter_9: + name = "parameter_9" + shape = [10] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_10: + name = "parameter_10" + shape = [10, 192, 3, 3] + dtype = "float32" + min_val = float("-0.155328") + max_val = float("0.0821954") + mean = float("-0.00108265") + std = float("0.010028") + data = None + + +class Program_weight_tensor_parameter_11: + name = "parameter_11" + shape = [192] + dtype = "float32" + min_val = float("-0.331288") + max_val = float("0.890505") + mean = float("0.355807") + std = float("0.271411") + data = None + + +class Program_weight_tensor_parameter_12: + name = "parameter_12" + shape = [192] + dtype = "float32" + min_val = float("1.01341") + max_val = float("1.77578") + mean = float("1.31543") + std = float("0.143697") + data = None + + +class Program_weight_tensor_parameter_13: + name = "parameter_13" + shape = [192] + dtype = "float32" + min_val = float("0.000391762") + max_val = float("0.0198018") + mean = float("0.00265108") + std = float("0.00261202") + data = None + + +class Program_weight_tensor_parameter_14: + name = "parameter_14" + shape = [192] + dtype = "float32" + min_val = float("-0.22062") + max_val = float("0.103545") + mean = float("0.00150594") + std = float("0.0408963") + data = None + + +class Program_weight_tensor_parameter_15: + name = "parameter_15" + shape = [192, 192, 1, 1] + dtype = "float32" + min_val = float("-0.0694736") + max_val = float("0.0687723") + mean = float("-0.000579734") + std = float("0.00775465") + data = None + + +class Program_weight_tensor_parameter_16: + name = "parameter_16" + shape = [192] + dtype = "float32" + min_val = float("-0.00470782") + max_val = float("0.0149258") + mean = float("-0.000145767") + std = float("0.00231494") + data = None + + +class Program_weight_tensor_parameter_17: + name = "parameter_17" + shape = [192, 192, 1, 1] + dtype = "float32" + min_val = float("-0.0102954") + max_val = float("0.0167369") + mean = float("-0.000107913") + std = float("0.00147629") + data = None + + +class Program_weight_tensor_parameter_18: + name = "parameter_18" + shape = [40] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_19: + name = "parameter_19" + shape = [40, 384, 3, 3] + dtype = "float32" + min_val = float("-0.130644") + max_val = float("0.124065") + mean = float("3.53248e-09") + std = float("0.00555108") + data = None + + +class Program_weight_tensor_parameter_20: + name = "parameter_20" + shape = [384] + dtype = "float32" + min_val = float("-0.00565629") + max_val = float("0.0694938") + mean = float("0.0255991") + std = float("0.013234") + data = None + + +class Program_weight_tensor_parameter_21: + name = "parameter_21" + shape = [384] + dtype = "float32" + min_val = float("0.995084") + max_val = float("1.23807") + mean = float("1.10602") + std = float("0.0411488") + data = None + + +class Program_weight_tensor_parameter_22: + name = "parameter_22" + shape = [384] + dtype = "float32" + min_val = float("0.000152943") + max_val = float("0.0235401") + mean = float("0.00230126") + std = float("0.00317939") + data = None + + +class Program_weight_tensor_parameter_23: + name = "parameter_23" + shape = [384] + dtype = "float32" + min_val = float("-0.109279") + max_val = float("0.0248954") + mean = float("-0.0146434") + std = float("0.0174376") + data = None + + +class Program_weight_tensor_parameter_24: + name = "parameter_24" + shape = [384, 384, 1, 1] + dtype = "float32" + min_val = float("-0.0597621") + max_val = float("0.0678606") + mean = float("-0.000218408") + std = float("0.00349073") + data = None + + +class Program_weight_tensor_parameter_25: + name = "parameter_25" + shape = [384] + dtype = "float32" + min_val = float("-0.00279589") + max_val = float("0.00705934") + mean = float("4.34391e-05") + std = float("0.00164054") + data = None + + +class Program_weight_tensor_parameter_26: + name = "parameter_26" + shape = [384, 384, 1, 1] + dtype = "float32" + min_val = float("-0.00188505") + max_val = float("0.00562992") + mean = float("-1.61818e-05") + std = float("0.000623837") + data = None + + +class Program_weight_tensor_parameter_27: + name = "parameter_27" + shape = [10] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_28: + name = "parameter_28" + shape = [10, 384, 3, 3] + dtype = "float32" + min_val = float("-0.061127") + max_val = float("0.0504336") + mean = float("-0.000654017") + std = float("0.00386427") + data = None + + +class Program_weight_tensor_parameter_29: + name = "parameter_29" + shape = [384] + dtype = "float32" + min_val = float("-0.153059") + max_val = float("0.452839") + mean = float("0.229253") + std = float("0.100281") + data = None + + +class Program_weight_tensor_parameter_30: + name = "parameter_30" + shape = [384] + dtype = "float32" + min_val = float("1.00375") + max_val = float("1.40227") + mean = float("1.18657") + std = float("0.0603744") + data = None + + +class Program_weight_tensor_parameter_31: + name = "parameter_31" + shape = [384] + dtype = "float32" + min_val = float("0.000127294") + max_val = float("0.0092646") + mean = float("0.000925538") + std = float("0.00119457") + data = None + + +class Program_weight_tensor_parameter_32: + name = "parameter_32" + shape = [384] + dtype = "float32" + min_val = float("-0.101711") + max_val = float("0.0546088") + mean = float("-0.0151804") + std = float("0.021309") + data = None + + +class Program_weight_tensor_parameter_33: + name = "parameter_33" + shape = [384, 384, 1, 1] + dtype = "float32" + min_val = float("-0.0387199") + max_val = float("0.0394125") + mean = float("-0.000238713") + std = float("0.00285694") + data = None + + +class Program_weight_tensor_parameter_34: + name = "parameter_34" + shape = [384] + dtype = "float32" + min_val = float("-0.0018716") + max_val = float("0.00897811") + mean = float("-1.61196e-05") + std = float("0.000950585") + data = None + + +class Program_weight_tensor_parameter_35: + name = "parameter_35" + shape = [384, 384, 1, 1] + dtype = "float32" + min_val = float("-0.00524054") + max_val = float("0.0067083") + mean = float("-1.34954e-05") + std = float("0.000498936") + data = None + + +class Program_weight_tensor_parameter_36: + name = "parameter_36" + shape = [40] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_37: + name = "parameter_37" + shape = [40, 768, 3, 3] + dtype = "float32" + min_val = float("-0.026125") + max_val = float("0.0225343") + mean = float("4.24734e-10") + std = float("0.00120831") + data = None + + +class Program_weight_tensor_parameter_38: + name = "parameter_38" + shape = [768] + dtype = "float32" + min_val = float("-0.0143455") + max_val = float("0.0479343") + mean = float("0.011343") + std = float("0.0104744") + data = None + + +class Program_weight_tensor_parameter_39: + name = "parameter_39" + shape = [768] + dtype = "float32" + min_val = float("1.00871") + max_val = float("1.20125") + mean = float("1.06606") + std = float("0.0224878") + data = None + + +class Program_weight_tensor_parameter_40: + name = "parameter_40" + shape = [768] + dtype = "float32" + min_val = float("2.90668e-05") + max_val = float("0.00222375") + mean = float("0.000232789") + std = float("0.00023903") + data = None + + +class Program_weight_tensor_parameter_41: + name = "parameter_41" + shape = [768] + dtype = "float32" + min_val = float("-0.0193291") + max_val = float("0.00665622") + mean = float("-0.00490728") + std = float("0.00361794") + data = None + + +class Program_weight_tensor_parameter_42: + name = "parameter_42" + shape = [768, 768, 1, 1] + dtype = "float32" + min_val = float("-0.0330898") + max_val = float("0.0328938") + mean = float("-4.54468e-05") + std = float("0.00135348") + data = None + + +class Program_weight_tensor_parameter_43: + name = "parameter_43" + shape = [768] + dtype = "float32" + min_val = float("-0.00379849") + max_val = float("0.00239524") + mean = float("6.6179e-05") + std = float("0.000794849") + data = None + + +class Program_weight_tensor_parameter_44: + name = "parameter_44" + shape = [768, 768, 1, 1] + dtype = "float32" + min_val = float("-0.00252116") + max_val = float("0.00212404") + mean = float("1.63958e-05") + std = float("0.000240311") + data = None + + +class Program_weight_tensor_parameter_45: + name = "parameter_45" + shape = [10] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_46: + name = "parameter_46" + shape = [10, 768, 3, 3] + dtype = "float32" + min_val = float("-0.0130168") + max_val = float("0.0109645") + mean = float("-0.000356094") + std = float("0.00119879") + data = None + + +class Program_weight_tensor_parameter_47: + name = "parameter_47" + shape = [768] + dtype = "float32" + min_val = float("-0.110945") + max_val = float("0.199926") + mean = float("0.0933965") + std = float("0.0422407") + data = None + + +class Program_weight_tensor_parameter_48: + name = "parameter_48" + shape = [768] + dtype = "float32" + min_val = float("1.00782") + max_val = float("1.25528") + mean = float("1.0788") + std = float("0.0262008") + data = None + + +class Program_weight_tensor_parameter_49: + name = "parameter_49" + shape = [768] + dtype = "float32" + min_val = float("7.62735e-05") + max_val = float("0.0023225") + mean = float("0.000461701") + std = float("0.000294913") + data = None + + +class Program_weight_tensor_parameter_50: + name = "parameter_50" + shape = [768] + dtype = "float32" + min_val = float("-0.0691247") + max_val = float("0.0463259") + mean = float("-0.0156783") + std = float("0.0145452") + data = None + + +class Program_weight_tensor_parameter_51: + name = "parameter_51" + shape = [768, 768, 1, 1] + dtype = "float32" + min_val = float("-0.0514247") + max_val = float("0.0234939") + mean = float("-0.000146637") + std = float("0.00147456") + data = None + + +class Program_weight_tensor_parameter_52: + name = "parameter_52" + shape = [768] + dtype = "float32" + min_val = float("-0.00117735") + max_val = float("0.00378489") + mean = float("3.28085e-06") + std = float("0.000453985") + data = None + + +class Program_weight_tensor_parameter_53: + name = "parameter_53" + shape = [768, 768, 1, 1] + dtype = "float32" + min_val = float("-0.0118892") + max_val = float("0.0217941") + mean = float("2.14802e-06") + std = float("0.000238365") + data = None diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/subgraph_18/graph_hash.txt b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/subgraph_18/graph_hash.txt new file mode 100644 index 000000000..b08da1263 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/subgraph_18/graph_hash.txt @@ -0,0 +1 @@ +4d26d0110d82b3e8a1ea0c4059adeb25427870d4527791748b1197dfa32e25ef \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/subgraph_18/graph_net.json b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/subgraph_18/graph_net.json new file mode 100644 index 000000000..8b4fccfd1 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/subgraph_18/graph_net.json @@ -0,0 +1,6 @@ +{ + "framework": "paddle", + "model_name": "PP-YOLOE_plus_SOD-L", + "num_devices_required": 1, + "num_nodes_required": 1 +} \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/subgraph_18/input_meta.py b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/subgraph_18/input_meta.py new file mode 100644 index 000000000..812c05090 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/subgraph_18/input_meta.py @@ -0,0 +1,19 @@ +class Program_weight_tensor_data_0: + name = "data_0" + shape = [] + dtype = "float32" + data = [0.311244] + + +class Program_weight_tensor_data_1: + name = "data_1" + shape = [] + dtype = "float32" + data = [0.695293] + + +class Program_weight_tensor_data_2: + name = "data_2" + shape = [] + dtype = "float32" + data = [0.894052] diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/subgraph_18/model.py b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/subgraph_18/model.py new file mode 100644 index 000000000..4cccb2b8e --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/subgraph_18/model.py @@ -0,0 +1,43 @@ +import paddle + + +class GraphModule(paddle.nn.Layer): + def __init__(self): + super().__init__() + + def forward(self, data_0, data_1, data_2): + # pd_op.full: (1xf32) <- () + full_0 = paddle._C_ops.full( + [1], float("1"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.scale: (xf32) <- (xf32, 1xf32) + scale_0 = paddle._C_ops.scale(data_2, full_0, float("0"), True) + del data_2 + + # pd_op.full: (1xf32) <- () + full_1 = paddle._C_ops.full( + [1], float("2.5"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.scale: (xf32) <- (xf32, 1xf32) + scale_1 = paddle._C_ops.scale(data_0, full_1, float("0"), True) + del data_0 + + # pd_op.add: (xf32) <- (xf32, xf32) + add_1 = paddle._C_ops.add(scale_0, scale_1) + + # pd_op.full: (1xf32) <- () + full_2 = paddle._C_ops.full( + [1], float("0.5"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.scale: (xf32) <- (xf32, 1xf32) + scale_2 = paddle._C_ops.scale(data_1, full_2, float("0"), True) + del data_1 + + # pd_op.add: (xf32) <- (xf32, xf32) + add_0 = paddle._C_ops.add(add_1, scale_2) + del add_1, full_0, full_1, full_2, scale_0, scale_1, scale_2 + + return add_0 diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/subgraph_18/weight_meta.py b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/subgraph_18/weight_meta.py new file mode 100644 index 000000000..8b1378917 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/subgraph_18/weight_meta.py @@ -0,0 +1 @@ + diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/subgraph_2/graph_hash.txt b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/subgraph_2/graph_hash.txt new file mode 100644 index 000000000..d7a44c4ca --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/subgraph_2/graph_hash.txt @@ -0,0 +1 @@ +5866f5c88d45db322dda97cc81b9a2145ea211417cab73af1890d9d8753ae5cb \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/subgraph_2/graph_net.json b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/subgraph_2/graph_net.json new file mode 100644 index 000000000..8b4fccfd1 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/subgraph_2/graph_net.json @@ -0,0 +1,6 @@ +{ + "framework": "paddle", + "model_name": "PP-YOLOE_plus_SOD-L", + "num_devices_required": 1, + "num_nodes_required": 1 +} \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/subgraph_2/input_meta.py b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/subgraph_2/input_meta.py new file mode 100644 index 000000000..8361f35e8 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/subgraph_2/input_meta.py @@ -0,0 +1,84 @@ +class Program_weight_tensor_data_0: + name = "data_0" + shape = [2, 3, 5376] + dtype = "float32" + max_val = float("1.0") + mean = float("0.00124008") + std = float("0.0351929") + data = None + + +class Program_weight_tensor_data_1: + name = "data_1" + shape = [2, 1] + dtype = "int32" + data = [0, 1] + + +class Program_weight_tensor_data_2: + name = "data_2" + shape = [2, 3, 1] + dtype = "int32" + data = [4, 3, 3, 3, 1, 0] + + +class Program_weight_tensor_data_3: + name = "data_3" + shape = [2, 5376] + dtype = "float32" + max_val = float("1.0") + mean = float("0.00372024") + std = float("0.0608802") + data = None + + +class Program_weight_tensor_data_4: + name = "data_4" + shape = [2, 3, 4] + dtype = "float32" + data = [ + 270.791, + 234.887, + 332.231, + 356.289, + 38.6844, + 240.165, + 99.3659, + 448.66, + 476.35, + 311.423, + 512.0, + 504.082, + 2.03175, + 161.292, + 9.34603, + 181.971, + 39.619, + 61.622, + 40.8381, + 69.0662, + 0.0, + 0.0, + 0.0, + 0.0, + ] + + +class Program_weight_tensor_data_5: + name = "data_5" + shape = [2, 3, 5376] + dtype = "float32" + max_val = float("0.00886879") + mean = float("8.72339e-07") + std = float("6.13309e-05") + data = None + + +class Program_weight_tensor_data_6: + name = "data_6" + shape = [2, 3, 5376] + dtype = "float32" + max_val = float("0.712006") + mean = float("0.00169782") + std = float("0.0189379") + data = None diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/subgraph_2/model.py b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/subgraph_2/model.py new file mode 100644 index 000000000..c7c749882 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/subgraph_2/model.py @@ -0,0 +1,192 @@ +import paddle + + +class GraphModule(paddle.nn.Layer): + def __init__(self): + super().__init__() + + def forward(self, data_0, data_1, data_2, data_3, data_4, data_5, data_6): + # pd_op.full: (1xi64) <- () + full_0 = paddle._C_ops.full( + [1], float("-2"), paddle.int64, paddle.core.CPUPlace() + ) + + # pd_op.argmax: (2x5376xi64) <- (2x3x5376xf32, 1xi64) + argmax_0 = paddle._C_ops.argmax(data_0, full_0, False, False, paddle.int64) + del full_0 + + # pd_op.full: (1xf32) <- () + full_1 = paddle._C_ops.full( + [1], float("3"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.scale: (2x1xi32) <- (2x1xi32, 1xf32) + scale_0 = paddle._C_ops.scale(data_1, full_1, float("0"), True) + del data_1, full_1 + + # pd_op.cast: (2x1xi64) <- (2x1xi32) + cast_0 = paddle._C_ops.cast(scale_0, paddle.int64) + del scale_0 + + # pd_op.add: (2x5376xi64) <- (2x5376xi64, 2x1xi64) + add_0 = paddle._C_ops.add(argmax_0, cast_0) + del argmax_0, cast_0 + + # pd_op.flatten: (6xi32) <- (2x3x1xi32) + flatten_0 = paddle._C_ops.flatten(data_2, 0, 2) + del data_2 + + # pd_op.flatten: (10752xi64) <- (2x5376xi64) + flatten_1 = paddle._C_ops.flatten(add_0, 0, 1) + del add_0 + + # pd_op.full: (1xi32) <- () + full_2 = paddle._C_ops.full( + [1], float("0"), paddle.int32, paddle.core.CPUPlace() + ) + + # pd_op.gather: (10752xi32) <- (6xi32, 10752xi64, 1xi32) + gather_0 = paddle._C_ops.gather(flatten_0, flatten_1, full_2) + del flatten_0 + + # pd_op.full_int_array: (2xi64) <- () + full_int_array_0 = [2, 5376] + + # pd_op.reshape: (2x5376xi32) <- (10752xi32, 2xi64) + reshape_1 = paddle._C_ops.reshape(gather_0, full_int_array_0) + del full_int_array_0, gather_0 + + # pd_op.full: (xf32) <- () + full_3 = paddle._C_ops.full( + [], float("0"), paddle.float32, paddle.framework._current_expected_place() + ) + + # pd_op.greater_than: (2x5376xb) <- (2x5376xf32, xf32) + greater_than_0 = paddle._C_ops.greater_than(data_3, full_3) + del data_3, full_3 + + # pd_op.full: (1xf32) <- () + full_4 = paddle._C_ops.full( + [1], float("10"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.full_like: (2x5376xi32) <- (2x5376xi32, 1xf32) + full_like_0 = paddle._C_ops.full_like( + reshape_1, full_4, paddle.int32, paddle.framework._current_expected_place() + ) + del full_4 + + # pd_op.where: (2x5376xi32) <- (2x5376xb, 2x5376xi32, 2x5376xi32) + where_0 = paddle._C_ops.where(greater_than_0, reshape_1, full_like_0) + del full_like_0, greater_than_0, reshape_1 + + # pd_op.full_int_array: (2xi64) <- () + full_int_array_1 = [-1, 4] + + # pd_op.reshape: (6x4xf32) <- (2x3x4xf32, 2xi64) + reshape_2 = paddle._C_ops.reshape(data_4, full_int_array_1) + del data_4, full_int_array_1 + + # pd_op.gather: (10752x4xf32) <- (6x4xf32, 10752xi64, 1xi32) + gather_1 = paddle._C_ops.gather(reshape_2, flatten_1, full_2) + del flatten_1, full_2, reshape_2 + + # pd_op.full_int_array: (3xi64) <- () + full_int_array_2 = [2, 5376, 4] + + # pd_op.reshape: (2x5376x4xf32) <- (10752x4xf32, 3xi64) + reshape_0 = paddle._C_ops.reshape(gather_1, full_int_array_2) + del full_int_array_2, gather_1 + + # pd_op.full: (1xi32) <- () + full_5 = paddle._C_ops.full( + [1], float("11"), paddle.int32, paddle.core.CPUPlace() + ) + + # pd_op.one_hot: (2x5376x11xf32) <- (2x5376xi32, 1xi32) + one_hot_0 = paddle._C_ops.one_hot( + where_0 % paddle.cast(full_5, where_0.dtype), full_5 + ) + del full_5 + + # pd_op.full: (10xi64) <- () + full_6 = paddle._C_ops.full( + [10], float("0"), paddle.int64, paddle.framework._current_expected_place() + ) + + # pd_op.assign_value_: (10xi64) <- (10xi64) + assign_value__0 = paddle._C_ops.assign_value_( + full_6, + [10], + paddle.int64, + [ + float("0"), + float("1"), + float("2"), + float("3"), + float("4"), + float("5"), + float("6"), + float("7"), + float("8"), + float("9"), + ], + paddle.framework._current_expected_place(), + ) + del full_6 + + # pd_op.index_select: (2x5376x10xf32) <- (2x5376x11xf32, 10xi64) + index_select_0 = paddle._C_ops.index_select(one_hot_0, assign_value__0, -1) + del assign_value__0, one_hot_0 + + # pd_op.multiply: (2x3x5376xf32) <- (2x3x5376xf32, 2x3x5376xf32) + multiply_1 = paddle._C_ops.multiply(data_5, data_0) + del data_5 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_3 = [-1] + + # pd_op.max: (2x3x1xf32) <- (2x3x5376xf32, 1xi64) + max_0 = paddle._C_ops.max(multiply_1, full_int_array_3, True) + + # pd_op.multiply: (2x3x5376xf32) <- (2x3x5376xf32, 2x3x5376xf32) + multiply_2 = paddle._C_ops.multiply(data_6, data_0) + del data_0, data_6 + + # pd_op.max: (2x3x1xf32) <- (2x3x5376xf32, 1xi64) + max_1 = paddle._C_ops.max(multiply_2, full_int_array_3, True) + del multiply_2 + + # pd_op.full: (1xf32) <- () + full_7 = paddle._C_ops.full( + [1], float("1"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.scale: (2x3x1xf32) <- (2x3x1xf32, 1xf32) + scale_1 = paddle._C_ops.scale(max_0, full_7, float("1e-09"), True) + del full_7, max_0 + + # pd_op.divide: (2x3x5376xf32) <- (2x3x5376xf32, 2x3x1xf32) + divide_0 = paddle._C_ops.divide(multiply_1, scale_1) + del multiply_1, scale_1 + + # pd_op.multiply: (2x3x5376xf32) <- (2x3x5376xf32, 2x3x1xf32) + multiply_3 = paddle._C_ops.multiply(divide_0, max_1) + del divide_0, max_1 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_4 = [-2] + + # pd_op.max: (2x5376xf32) <- (2x3x5376xf32, 1xi64) + max_2 = paddle._C_ops.max(multiply_3, full_int_array_4, False) + del full_int_array_4, multiply_3 + + # pd_op.unsqueeze: (2x5376x1xf32) <- (2x5376xf32, 1xi64) + unsqueeze_0 = paddle._C_ops.unsqueeze(max_2, full_int_array_3) + del full_int_array_3, max_2 + + # pd_op.multiply: (2x5376x10xf32) <- (2x5376x10xf32, 2x5376x1xf32) + multiply_0 = paddle._C_ops.multiply(index_select_0, unsqueeze_0) + del index_select_0, unsqueeze_0, where_0 + + return reshape_0, multiply_0 diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/subgraph_2/weight_meta.py b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/subgraph_2/weight_meta.py new file mode 100644 index 000000000..8b1378917 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/subgraph_2/weight_meta.py @@ -0,0 +1 @@ + diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/subgraph_3/graph_hash.txt b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/subgraph_3/graph_hash.txt new file mode 100644 index 000000000..3b4c91958 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/subgraph_3/graph_hash.txt @@ -0,0 +1 @@ +2c26a537c16306bff9e45eef0a8e89a4bfc4ef2474aa9287527ef7c6aae5a086 \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/subgraph_3/graph_net.json b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/subgraph_3/graph_net.json new file mode 100644 index 000000000..8b4fccfd1 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/subgraph_3/graph_net.json @@ -0,0 +1,6 @@ +{ + "framework": "paddle", + "model_name": "PP-YOLOE_plus_SOD-L", + "num_devices_required": 1, + "num_nodes_required": 1 +} \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/subgraph_3/input_meta.py b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/subgraph_3/input_meta.py new file mode 100644 index 000000000..b3f3bee9e --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/subgraph_3/input_meta.py @@ -0,0 +1,42 @@ +class Program_weight_tensor_data_0: + name = "data_0" + shape = [2, 12096, 10] + dtype = "float32" + min_val = float("5.85845e-11") + max_val = float("0.891047") + mean = float("0.00620757") + std = float("0.0222101") + data = None + + +class Program_weight_tensor_data_1: + name = "data_1" + shape = [2, 12096, 40] + dtype = "float32" + min_val = float("-15.4735") + max_val = float("25.7451") + mean = float("0.798417") + std = float("2.11877") + data = None + + +class Program_weight_tensor_data_2: + name = "data_2" + shape = [12096, 2] + dtype = "float32" + min_val = float("4.0") + max_val = float("764.0") + mean = float("384.0") + std = float("221.675") + data = None + + +class Program_weight_tensor_data_3: + name = "data_3" + shape = [12096, 1] + dtype = "float32" + min_val = float("8.0") + max_val = float("32.0") + mean = float("10.6667") + std = float("5.70157") + data = None diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/subgraph_3/model.py b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/subgraph_3/model.py new file mode 100644 index 000000000..ffb87dfd6 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/subgraph_3/model.py @@ -0,0 +1,162 @@ +import paddle + + +class GraphModule(paddle.nn.Layer): + def __init__(self): + super().__init__() + + def forward(self, parameter_0, data_0, data_1, data_2, data_3): + # pd_op.divide: (-1x2xf32) <- (-1x2xf32, -1x1xf32) + divide_0 = paddle._C_ops.divide(data_2, data_3) + del data_2 + + # pd_op.shape64: (3xi64) <- (2x-1x40xf32) + shape64_0 = paddle._C_ops.shape64(data_1) + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_0 = [0] + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_1 = [1] + + # pd_op.assign: (1xi64) <- (1xi64) + assign_0 = full_int_array_1 + + # pd_op.slice: (xi64) <- (3xi64, 1xi64, 1xi64) + slice_0 = paddle._C_ops.slice( + shape64_0, [0], full_int_array_0, full_int_array_1, [1], [0] + ) + del full_int_array_0 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_2 = [2] + + # pd_op.slice: (xi64) <- (3xi64, 1xi64, 1xi64) + slice_1 = paddle._C_ops.slice( + shape64_0, [0], full_int_array_1, full_int_array_2, [1], [0] + ) + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_3 = [3] + + # pd_op.slice: (xi64) <- (3xi64, 1xi64, 1xi64) + slice_2 = paddle._C_ops.slice( + shape64_0, [0], full_int_array_2, full_int_array_3, [1], [0] + ) + del full_int_array_2, full_int_array_3, shape64_0 + + # pd_op.full: (xi64) <- () + full_0 = paddle._C_ops.full( + [], float("-1"), paddle.int64, paddle.core.CPUPlace() + ) + + # pd_op.full: (xi64) <- () + full_1 = paddle._C_ops.full( + [], float("4"), paddle.int64, paddle.core.CPUPlace() + ) + + # pd_op.full: (xi64) <- () + full_2 = paddle._C_ops.full( + [], float("10"), paddle.int64, paddle.core.CPUPlace() + ) + + # builtin.combine: ([xi64, xi64, xi64, xi64]) <- (xi64, xi64, xi64, xi64) + combine_0 = [full_0, slice_1, full_1, full_2] + del full_0, full_1, full_2, slice_1 + + # pd_op.stack: (4xi64) <- ([xi64, xi64, xi64, xi64]) + stack_0 = paddle._C_ops.stack(combine_0, 0) + del combine_0 + + # pd_op.reshape: (-1x-1x4x10xf32) <- (2x-1x40xf32, 4xi64) + reshape_0 = paddle._C_ops.reshape(data_1, stack_0) + del data_1, stack_0 + + # pd_op.softmax: (-1x-1x4x10xf32) <- (-1x-1x4x10xf32) + softmax_0 = paddle._C_ops.softmax(reshape_0, -1) + del reshape_0 + + # pd_op.transpose: (-1x10x-1x4xf32) <- (-1x-1x4x10xf32) + transpose_0 = paddle._C_ops.transpose(softmax_0, [0, 3, 1, 2]) + + # pd_op.conv2d: (-1x1x-1x4xf32) <- (-1x10x-1x4xf32, 1x10x1x1xf32) + conv2d_0 = paddle._C_ops.conv2d( + transpose_0, parameter_0, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_0 + + # pd_op.squeeze: (-1x-1x4xf32) <- (-1x1x-1x4xf32, 1xi64) + squeeze_0 = paddle._C_ops.squeeze(conv2d_0, full_int_array_1) + del full_int_array_1 + + # pd_op.full: (1xi32) <- () + full_3 = paddle._C_ops.full( + [1], float("2"), paddle.int32, paddle.core.CPUPlace() + ) + + # pd_op.split_with_num: ([-1x-1x2xf32, -1x-1x2xf32]) <- (-1x-1x4xf32, 1xi32) + split_with_num_0 = paddle._C_ops.split_with_num(squeeze_0, 2, full_3) + del squeeze_0 + + # builtin.split: (-1x-1x2xf32, -1x-1x2xf32) <- ([-1x-1x2xf32, -1x-1x2xf32]) + ( + split_0, + split_1, + ) = split_with_num_0 + del split_with_num_0 + + # pd_op.full: (1xf32) <- () + full_4 = paddle._C_ops.full( + [1], float("-1"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.scale: (-1x-1x2xf32) <- (-1x-1x2xf32, 1xf32) + scale_0 = paddle._C_ops.scale(split_0, full_4, float("0"), True) + del split_0 + + # pd_op.add: (-1x-1x2xf32) <- (-1x-1x2xf32, -1x2xf32) + add_0 = paddle._C_ops.add(scale_0, divide_0) + + # pd_op.add: (-1x-1x2xf32) <- (-1x-1x2xf32, -1x2xf32) + add_1 = paddle._C_ops.add(split_1, divide_0) + + # pd_op.full: (1xi32) <- () + full_5 = paddle._C_ops.full( + [1], float("-1"), paddle.int32, paddle.core.CPUPlace() + ) + + # builtin.combine: ([-1x-1x2xf32, -1x-1x2xf32]) <- (-1x-1x2xf32, -1x-1x2xf32) + combine_1 = [add_0, add_1] + + # pd_op.concat: (-1x-1x4xf32) <- ([-1x-1x2xf32, -1x-1x2xf32], 1xi32) + concat_0 = paddle._C_ops.concat(combine_1, full_5) + del combine_1 + + # pd_op.share_data_: (2x-1x10xf32) <- (2x-1x10xf32) + share_data__0 = data_0.detach() + del data_0 + + # pd_op.share_data_: (-1x-1x4xf32) <- (-1x-1x4xf32) + share_data__1 = concat_0.detach() + + # pd_op.multiply: (-1x-1x4xf32) <- (-1x-1x4xf32, -1x1xf32) + multiply_0 = paddle._C_ops.multiply(share_data__1, data_3) + del ( + add_0, + add_1, + assign_0, + concat_0, + conv2d_0, + data_3, + divide_0, + full_3, + full_4, + full_5, + scale_0, + share_data__1, + softmax_0, + split_1, + transpose_0, + ) + + return share_data__0, multiply_0 diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/subgraph_3/weight_meta.py b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/subgraph_3/weight_meta.py new file mode 100644 index 000000000..88fef0bea --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/subgraph_3/weight_meta.py @@ -0,0 +1,7 @@ +class Program_weight_tensor_parameter_0: + name = "parameter_0" + shape = [1, 10, 1, 1] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/subgraph_4/graph_hash.txt b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/subgraph_4/graph_hash.txt new file mode 100644 index 000000000..1dc0e3cd9 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/subgraph_4/graph_hash.txt @@ -0,0 +1 @@ +1ebfa0731cd404fc0d11b70f0637266da6c267aa7f705df9df9bd0ea79785b39 \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/subgraph_4/graph_net.json b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/subgraph_4/graph_net.json new file mode 100644 index 000000000..8b4fccfd1 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/subgraph_4/graph_net.json @@ -0,0 +1,6 @@ +{ + "framework": "paddle", + "model_name": "PP-YOLOE_plus_SOD-L", + "num_devices_required": 1, + "num_nodes_required": 1 +} \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/subgraph_4/input_meta.py b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/subgraph_4/input_meta.py new file mode 100644 index 000000000..b94668a44 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/subgraph_4/input_meta.py @@ -0,0 +1,134 @@ +class Program_weight_tensor_data_0: + name = "data_0" + shape = [1] + dtype = "float32" + data = [0.699884] + + +class Program_weight_tensor_data_1: + name = "data_1" + shape = [1] + dtype = "float32" + data = [0.667963] + + +class Program_weight_tensor_data_2: + name = "data_2" + shape = [1] + dtype = "float32" + data = [0.675792] + + +class Program_weight_tensor_data_3: + name = "data_3" + shape = [1] + dtype = "float32" + data = [0.676071] + + +class Program_weight_tensor_data_4: + name = "data_4" + shape = [1] + dtype = "float32" + data = [0.658719] + + +class Program_weight_tensor_data_5: + name = "data_5" + shape = [1] + dtype = "float32" + data = [0.620637] + + +class Program_weight_tensor_data_6: + name = "data_6" + shape = [1] + dtype = "float32" + data = [0.637685] + + +class Program_weight_tensor_data_7: + name = "data_7" + shape = [1] + dtype = "float32" + data = [0.619238] + + +class Program_weight_tensor_data_8: + name = "data_8" + shape = [1] + dtype = "float32" + data = [0.773168] + + +class Program_weight_tensor_data_9: + name = "data_9" + shape = [1] + dtype = "float32" + data = [0.635316] + + +class Program_weight_tensor_data_10: + name = "data_10" + shape = [1] + dtype = "float32" + data = [0.623672] + + +class Program_weight_tensor_data_11: + name = "data_11" + shape = [1] + dtype = "float32" + data = [0.620323] + + +class Program_weight_tensor_data_12: + name = "data_12" + shape = [1] + dtype = "float32" + data = [0.621219] + + +class Program_weight_tensor_data_13: + name = "data_13" + shape = [1] + dtype = "float32" + data = [0.624329] + + +class Program_weight_tensor_data_14: + name = "data_14" + shape = [1] + dtype = "float32" + data = [0.733117] + + +class Program_weight_tensor_data_15: + name = "data_15" + shape = [1] + dtype = "float32" + data = [0.557224] + + +class Program_weight_tensor_data_16: + name = "data_16" + shape = [1] + dtype = "float32" + data = [0.579909] + + +class Program_weight_tensor_data_17: + name = "data_17" + shape = [1] + dtype = "float32" + data = [0.70327] + + +class Program_weight_tensor_data_18: + name = "data_18" + shape = [2, 3, 768, 768] + dtype = "float32" + max_val = float("0.933333") + mean = float("0.380665") + std = float("0.139647") + data = None diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/subgraph_4/model.py b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/subgraph_4/model.py new file mode 100644 index 000000000..331f6a597 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/subgraph_4/model.py @@ -0,0 +1,4040 @@ +import paddle + + +class GraphModule(paddle.nn.Layer): + def __init__(self): + super().__init__() + + def forward( + self, + parameter_0, + parameter_1, + parameter_2, + parameter_3, + parameter_4, + parameter_5, + parameter_6, + parameter_7, + parameter_8, + parameter_9, + parameter_10, + parameter_11, + parameter_12, + parameter_13, + parameter_14, + parameter_15, + parameter_16, + parameter_17, + parameter_18, + parameter_19, + parameter_20, + parameter_21, + parameter_22, + parameter_23, + parameter_24, + parameter_25, + parameter_26, + parameter_27, + parameter_28, + parameter_29, + parameter_30, + parameter_31, + parameter_32, + parameter_33, + parameter_34, + parameter_35, + parameter_36, + parameter_37, + parameter_38, + parameter_39, + parameter_40, + parameter_41, + parameter_42, + parameter_43, + parameter_44, + parameter_45, + parameter_46, + parameter_47, + parameter_48, + parameter_49, + parameter_50, + parameter_51, + parameter_52, + parameter_53, + parameter_54, + parameter_55, + parameter_56, + parameter_57, + parameter_58, + parameter_59, + parameter_60, + parameter_61, + parameter_62, + parameter_63, + parameter_64, + parameter_65, + parameter_66, + parameter_67, + parameter_68, + parameter_69, + parameter_70, + parameter_71, + parameter_72, + parameter_73, + parameter_74, + parameter_75, + parameter_76, + parameter_77, + parameter_78, + parameter_79, + parameter_80, + parameter_81, + parameter_82, + parameter_83, + parameter_84, + parameter_85, + parameter_86, + parameter_87, + parameter_88, + parameter_89, + parameter_90, + parameter_91, + parameter_92, + parameter_93, + parameter_94, + parameter_95, + parameter_96, + parameter_97, + parameter_98, + parameter_99, + parameter_100, + parameter_101, + parameter_102, + parameter_103, + parameter_104, + parameter_105, + parameter_106, + parameter_107, + parameter_108, + parameter_109, + parameter_110, + parameter_111, + parameter_112, + parameter_113, + parameter_114, + parameter_115, + parameter_116, + parameter_117, + parameter_118, + parameter_119, + parameter_120, + parameter_121, + parameter_122, + parameter_123, + parameter_124, + parameter_125, + parameter_126, + parameter_127, + parameter_128, + parameter_129, + parameter_130, + parameter_131, + parameter_132, + parameter_133, + parameter_134, + parameter_135, + parameter_136, + parameter_137, + parameter_138, + parameter_139, + parameter_140, + parameter_141, + parameter_142, + parameter_143, + parameter_144, + parameter_145, + parameter_146, + parameter_147, + parameter_148, + parameter_149, + parameter_150, + parameter_151, + parameter_152, + parameter_153, + parameter_154, + parameter_155, + parameter_156, + parameter_157, + parameter_158, + parameter_159, + parameter_160, + parameter_161, + parameter_162, + parameter_163, + parameter_164, + parameter_165, + parameter_166, + parameter_167, + parameter_168, + parameter_169, + parameter_170, + parameter_171, + parameter_172, + parameter_173, + parameter_174, + parameter_175, + parameter_176, + parameter_177, + parameter_178, + parameter_179, + parameter_180, + parameter_181, + parameter_182, + parameter_183, + parameter_184, + parameter_185, + parameter_186, + parameter_187, + parameter_188, + parameter_189, + parameter_190, + parameter_191, + parameter_192, + parameter_193, + parameter_194, + parameter_195, + parameter_196, + parameter_197, + parameter_198, + parameter_199, + parameter_200, + parameter_201, + parameter_202, + parameter_203, + parameter_204, + parameter_205, + parameter_206, + parameter_207, + parameter_208, + parameter_209, + parameter_210, + parameter_211, + parameter_212, + parameter_213, + parameter_214, + parameter_215, + parameter_216, + parameter_217, + parameter_218, + parameter_219, + parameter_220, + parameter_221, + parameter_222, + parameter_223, + parameter_224, + parameter_225, + parameter_226, + parameter_227, + parameter_228, + parameter_229, + parameter_230, + parameter_231, + parameter_232, + parameter_233, + parameter_234, + parameter_235, + parameter_236, + parameter_237, + parameter_238, + parameter_239, + parameter_240, + parameter_241, + parameter_242, + parameter_243, + parameter_244, + parameter_245, + parameter_246, + parameter_247, + parameter_248, + parameter_249, + parameter_250, + parameter_251, + parameter_252, + parameter_253, + parameter_254, + parameter_255, + parameter_256, + parameter_257, + parameter_258, + parameter_259, + parameter_260, + parameter_261, + parameter_262, + parameter_263, + parameter_264, + parameter_265, + parameter_266, + parameter_267, + parameter_268, + parameter_269, + parameter_270, + parameter_271, + parameter_272, + parameter_273, + parameter_274, + parameter_275, + parameter_276, + parameter_277, + parameter_278, + parameter_279, + parameter_280, + parameter_281, + parameter_282, + parameter_283, + parameter_284, + parameter_285, + parameter_286, + parameter_287, + parameter_288, + parameter_289, + parameter_290, + parameter_291, + parameter_292, + parameter_293, + parameter_294, + parameter_295, + parameter_296, + parameter_297, + parameter_298, + parameter_299, + parameter_300, + parameter_301, + parameter_302, + parameter_303, + parameter_304, + parameter_305, + parameter_306, + parameter_307, + parameter_308, + parameter_309, + parameter_310, + parameter_311, + parameter_312, + parameter_313, + parameter_314, + parameter_315, + parameter_316, + parameter_317, + parameter_318, + parameter_319, + parameter_320, + parameter_321, + parameter_322, + parameter_323, + parameter_324, + parameter_325, + parameter_326, + parameter_327, + parameter_328, + parameter_329, + parameter_330, + parameter_331, + parameter_332, + parameter_333, + parameter_334, + parameter_335, + parameter_336, + parameter_337, + parameter_338, + parameter_339, + parameter_340, + parameter_341, + parameter_342, + parameter_343, + parameter_344, + parameter_345, + parameter_346, + parameter_347, + parameter_348, + parameter_349, + parameter_350, + parameter_351, + parameter_352, + parameter_353, + parameter_354, + parameter_355, + parameter_356, + parameter_357, + parameter_358, + parameter_359, + parameter_360, + parameter_361, + parameter_362, + parameter_363, + parameter_364, + parameter_365, + parameter_366, + parameter_367, + parameter_368, + parameter_369, + parameter_370, + parameter_371, + parameter_372, + data_0, + data_1, + data_2, + data_3, + data_4, + data_5, + data_6, + data_7, + data_8, + data_9, + data_10, + data_11, + data_12, + data_13, + data_14, + data_15, + data_16, + data_17, + data_18, + ): + # pd_op.conv2d: (2x32x-1x-1xf32) <- (2x3x-1x-1xf32, 32x3x3x3xf32) + conv2d_0 = paddle._C_ops.conv2d( + data_18, parameter_372, [2, 2], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del data_18, parameter_372 + + # pd_op.batch_norm_: (2x32x-1x-1xf32, 32xf32, 32xf32, 32xf32, 32xf32, -1xui8) <- (2x32x-1x-1xf32, 32xf32, 32xf32, 32xf32, 32xf32) + ( + batch_norm__0, + batch_norm__1, + batch_norm__2, + batch_norm__3, + batch_norm__4, + batch_norm__5, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_0, + parameter_371, + parameter_370, + parameter_369, + parameter_368, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_368, parameter_369, parameter_370, parameter_371 + + # pd_op.swish: (2x32x-1x-1xf32) <- (2x32x-1x-1xf32) + swish_1 = paddle._C_ops.swish(batch_norm__0) + + # pd_op.conv2d: (2x32x-1x-1xf32) <- (2x32x-1x-1xf32, 32x32x3x3xf32) + conv2d_1 = paddle._C_ops.conv2d( + swish_1, parameter_367, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_367 + + # pd_op.batch_norm_: (2x32x-1x-1xf32, 32xf32, 32xf32, 32xf32, 32xf32, -1xui8) <- (2x32x-1x-1xf32, 32xf32, 32xf32, 32xf32, 32xf32) + ( + batch_norm__6, + batch_norm__7, + batch_norm__8, + batch_norm__9, + batch_norm__10, + batch_norm__11, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_1, + parameter_366, + parameter_365, + parameter_364, + parameter_363, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_363, parameter_364, parameter_365, parameter_366 + + # pd_op.swish: (2x32x-1x-1xf32) <- (2x32x-1x-1xf32) + swish_2 = paddle._C_ops.swish(batch_norm__6) + + # pd_op.conv2d: (2x64x-1x-1xf32) <- (2x32x-1x-1xf32, 64x32x3x3xf32) + conv2d_2 = paddle._C_ops.conv2d( + swish_2, parameter_362, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_362 + + # pd_op.batch_norm_: (2x64x-1x-1xf32, 64xf32, 64xf32, 64xf32, 64xf32, -1xui8) <- (2x64x-1x-1xf32, 64xf32, 64xf32, 64xf32, 64xf32) + ( + batch_norm__12, + batch_norm__13, + batch_norm__14, + batch_norm__15, + batch_norm__16, + batch_norm__17, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_2, + parameter_361, + parameter_360, + parameter_359, + parameter_358, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_358, parameter_359, parameter_360, parameter_361 + + # pd_op.swish: (2x64x-1x-1xf32) <- (2x64x-1x-1xf32) + swish_3 = paddle._C_ops.swish(batch_norm__12) + + # pd_op.conv2d: (2x96x-1x-1xf32) <- (2x64x-1x-1xf32, 96x64x3x3xf32) + conv2d_3 = paddle._C_ops.conv2d( + swish_3, parameter_357, [2, 2], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_357 + + # pd_op.batch_norm_: (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__18, + batch_norm__19, + batch_norm__20, + batch_norm__21, + batch_norm__22, + batch_norm__23, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_3, + parameter_356, + parameter_355, + parameter_354, + parameter_353, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_353, parameter_354, parameter_355, parameter_356 + + # pd_op.swish: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32) + swish_4 = paddle._C_ops.swish(batch_norm__18) + + # pd_op.conv2d: (2x48x-1x-1xf32) <- (2x96x-1x-1xf32, 48x96x1x1xf32) + conv2d_4 = paddle._C_ops.conv2d( + swish_4, parameter_352, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_352 + + # pd_op.batch_norm_: (2x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32, -1xui8) <- (2x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32) + ( + batch_norm__24, + batch_norm__25, + batch_norm__26, + batch_norm__27, + batch_norm__28, + batch_norm__29, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_4, + parameter_351, + parameter_350, + parameter_349, + parameter_348, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_348, parameter_349, parameter_350, parameter_351 + + # pd_op.swish: (2x48x-1x-1xf32) <- (2x48x-1x-1xf32) + swish_5 = paddle._C_ops.swish(batch_norm__24) + + # pd_op.conv2d: (2x48x-1x-1xf32) <- (2x96x-1x-1xf32, 48x96x1x1xf32) + conv2d_5 = paddle._C_ops.conv2d( + swish_4, parameter_347, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_347 + + # pd_op.batch_norm_: (2x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32, -1xui8) <- (2x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32) + ( + batch_norm__30, + batch_norm__31, + batch_norm__32, + batch_norm__33, + batch_norm__34, + batch_norm__35, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_5, + parameter_346, + parameter_345, + parameter_344, + parameter_343, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_343, parameter_344, parameter_345, parameter_346 + + # pd_op.swish: (2x48x-1x-1xf32) <- (2x48x-1x-1xf32) + swish_6 = paddle._C_ops.swish(batch_norm__30) + + # pd_op.conv2d: (2x48x-1x-1xf32) <- (2x48x-1x-1xf32, 48x48x3x3xf32) + conv2d_6 = paddle._C_ops.conv2d( + swish_6, parameter_342, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_342 + + # pd_op.batch_norm_: (2x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32, -1xui8) <- (2x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32) + ( + batch_norm__36, + batch_norm__37, + batch_norm__38, + batch_norm__39, + batch_norm__40, + batch_norm__41, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_6, + parameter_341, + parameter_340, + parameter_339, + parameter_338, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_338, parameter_339, parameter_340, parameter_341 + + # pd_op.swish: (2x48x-1x-1xf32) <- (2x48x-1x-1xf32) + swish_7 = paddle._C_ops.swish(batch_norm__36) + + # pd_op.conv2d: (2x48x-1x-1xf32) <- (2x48x-1x-1xf32, 48x48x3x3xf32) + conv2d_7 = paddle._C_ops.conv2d( + swish_7, parameter_337, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_337 + + # pd_op.batch_norm_: (2x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32, -1xui8) <- (2x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32) + ( + batch_norm__42, + batch_norm__43, + batch_norm__44, + batch_norm__45, + batch_norm__46, + batch_norm__47, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_7, + parameter_336, + parameter_335, + parameter_334, + parameter_333, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_333, parameter_334, parameter_335, parameter_336 + + # pd_op.conv2d: (2x48x-1x-1xf32) <- (2x48x-1x-1xf32, 48x48x1x1xf32) + conv2d_8 = paddle._C_ops.conv2d( + swish_7, parameter_332, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_332 + + # pd_op.batch_norm_: (2x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32, -1xui8) <- (2x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32) + ( + batch_norm__48, + batch_norm__49, + batch_norm__50, + batch_norm__51, + batch_norm__52, + batch_norm__53, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_8, + parameter_331, + parameter_330, + parameter_329, + parameter_328, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_328, parameter_329, parameter_330, parameter_331 + + # pd_op.multiply: (2x48x-1x-1xf32) <- (1xf32, 2x48x-1x-1xf32) + multiply_0 = paddle._C_ops.multiply(data_0, batch_norm__48) + del data_0 + + # pd_op.add: (2x48x-1x-1xf32) <- (2x48x-1x-1xf32, 2x48x-1x-1xf32) + add_0 = paddle._C_ops.add(batch_norm__42, multiply_0) + + # pd_op.swish: (2x48x-1x-1xf32) <- (2x48x-1x-1xf32) + swish_8 = paddle._C_ops.swish(add_0) + + # pd_op.add: (2x48x-1x-1xf32) <- (2x48x-1x-1xf32, 2x48x-1x-1xf32) + add_1 = paddle._C_ops.add(swish_6, swish_8) + + # pd_op.conv2d: (2x48x-1x-1xf32) <- (2x48x-1x-1xf32, 48x48x3x3xf32) + conv2d_9 = paddle._C_ops.conv2d( + add_1, parameter_327, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_327 + + # pd_op.batch_norm_: (2x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32, -1xui8) <- (2x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32) + ( + batch_norm__54, + batch_norm__55, + batch_norm__56, + batch_norm__57, + batch_norm__58, + batch_norm__59, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_9, + parameter_326, + parameter_325, + parameter_324, + parameter_323, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_323, parameter_324, parameter_325, parameter_326 + + # pd_op.swish: (2x48x-1x-1xf32) <- (2x48x-1x-1xf32) + swish_9 = paddle._C_ops.swish(batch_norm__54) + + # pd_op.conv2d: (2x48x-1x-1xf32) <- (2x48x-1x-1xf32, 48x48x3x3xf32) + conv2d_10 = paddle._C_ops.conv2d( + swish_9, parameter_322, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_322 + + # pd_op.batch_norm_: (2x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32, -1xui8) <- (2x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32) + ( + batch_norm__60, + batch_norm__61, + batch_norm__62, + batch_norm__63, + batch_norm__64, + batch_norm__65, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_10, + parameter_321, + parameter_320, + parameter_319, + parameter_318, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_318, parameter_319, parameter_320, parameter_321 + + # pd_op.conv2d: (2x48x-1x-1xf32) <- (2x48x-1x-1xf32, 48x48x1x1xf32) + conv2d_11 = paddle._C_ops.conv2d( + swish_9, parameter_317, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_317 + + # pd_op.batch_norm_: (2x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32, -1xui8) <- (2x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32) + ( + batch_norm__66, + batch_norm__67, + batch_norm__68, + batch_norm__69, + batch_norm__70, + batch_norm__71, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_11, + parameter_316, + parameter_315, + parameter_314, + parameter_313, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_313, parameter_314, parameter_315, parameter_316 + + # pd_op.multiply: (2x48x-1x-1xf32) <- (1xf32, 2x48x-1x-1xf32) + multiply_1 = paddle._C_ops.multiply(data_1, batch_norm__66) + del data_1 + + # pd_op.add: (2x48x-1x-1xf32) <- (2x48x-1x-1xf32, 2x48x-1x-1xf32) + add_2 = paddle._C_ops.add(batch_norm__60, multiply_1) + + # pd_op.swish: (2x48x-1x-1xf32) <- (2x48x-1x-1xf32) + swish_10 = paddle._C_ops.swish(add_2) + + # pd_op.add: (2x48x-1x-1xf32) <- (2x48x-1x-1xf32, 2x48x-1x-1xf32) + add_3 = paddle._C_ops.add(add_1, swish_10) + + # pd_op.conv2d: (2x48x-1x-1xf32) <- (2x48x-1x-1xf32, 48x48x3x3xf32) + conv2d_12 = paddle._C_ops.conv2d( + add_3, parameter_312, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_312 + + # pd_op.batch_norm_: (2x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32, -1xui8) <- (2x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32) + ( + batch_norm__72, + batch_norm__73, + batch_norm__74, + batch_norm__75, + batch_norm__76, + batch_norm__77, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_12, + parameter_311, + parameter_310, + parameter_309, + parameter_308, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_308, parameter_309, parameter_310, parameter_311 + + # pd_op.swish: (2x48x-1x-1xf32) <- (2x48x-1x-1xf32) + swish_11 = paddle._C_ops.swish(batch_norm__72) + + # pd_op.conv2d: (2x48x-1x-1xf32) <- (2x48x-1x-1xf32, 48x48x3x3xf32) + conv2d_13 = paddle._C_ops.conv2d( + swish_11, parameter_307, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_307 + + # pd_op.batch_norm_: (2x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32, -1xui8) <- (2x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32) + ( + batch_norm__78, + batch_norm__79, + batch_norm__80, + batch_norm__81, + batch_norm__82, + batch_norm__83, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_13, + parameter_306, + parameter_305, + parameter_304, + parameter_303, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_303, parameter_304, parameter_305, parameter_306 + + # pd_op.conv2d: (2x48x-1x-1xf32) <- (2x48x-1x-1xf32, 48x48x1x1xf32) + conv2d_14 = paddle._C_ops.conv2d( + swish_11, parameter_302, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_302 + + # pd_op.batch_norm_: (2x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32, -1xui8) <- (2x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32) + ( + batch_norm__84, + batch_norm__85, + batch_norm__86, + batch_norm__87, + batch_norm__88, + batch_norm__89, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_14, + parameter_301, + parameter_300, + parameter_299, + parameter_298, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_298, parameter_299, parameter_300, parameter_301 + + # pd_op.multiply: (2x48x-1x-1xf32) <- (1xf32, 2x48x-1x-1xf32) + multiply_2 = paddle._C_ops.multiply(data_2, batch_norm__84) + del data_2 + + # pd_op.add: (2x48x-1x-1xf32) <- (2x48x-1x-1xf32, 2x48x-1x-1xf32) + add_4 = paddle._C_ops.add(batch_norm__78, multiply_2) + + # pd_op.swish: (2x48x-1x-1xf32) <- (2x48x-1x-1xf32) + swish_12 = paddle._C_ops.swish(add_4) + + # pd_op.add: (2x48x-1x-1xf32) <- (2x48x-1x-1xf32, 2x48x-1x-1xf32) + add_5 = paddle._C_ops.add(add_3, swish_12) + + # pd_op.full: (1xi32) <- () + full_0 = paddle._C_ops.full( + [1], float("1"), paddle.int32, paddle.core.CPUPlace() + ) + + # pd_op.assign: (1xi32) <- (1xi32) + assign_0 = full_0 + + # pd_op.assign: (1xi32) <- (1xi32) + assign_1 = full_0 + + # pd_op.assign: (1xi32) <- (1xi32) + assign_2 = full_0 + + # builtin.combine: ([2x48x-1x-1xf32, 2x48x-1x-1xf32]) <- (2x48x-1x-1xf32, 2x48x-1x-1xf32) + combine_0 = [swish_5, add_5] + + # pd_op.concat: (2x96x-1x-1xf32) <- ([2x48x-1x-1xf32, 2x48x-1x-1xf32], 1xi32) + concat_0 = paddle._C_ops.concat(combine_0, full_0) + del combine_0 + + # pd_op.full_int_array: (2xi64) <- () + full_int_array_0 = [2, 3] + + # pd_op.assign: (2xi64) <- (2xi64) + assign_3 = full_int_array_0 + + # pd_op.assign: (2xi64) <- (2xi64) + assign_4 = full_int_array_0 + + # pd_op.assign: (2xi64) <- (2xi64) + assign_5 = full_int_array_0 + + # pd_op.mean: (2x96x1x1xf32) <- (2x96x-1x-1xf32, 2xi64) + mean_0 = paddle._C_ops.mean(concat_0, full_int_array_0, True) + + # pd_op.conv2d: (2x96x1x1xf32) <- (2x96x1x1xf32, 96x96x1x1xf32) + conv2d_15 = paddle._C_ops.conv2d( + mean_0, parameter_297, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_297 + + # pd_op.full_int_array: (4xi64) <- () + full_int_array_1 = [1, -1, 1, 1] + + # pd_op.reshape: (1x96x1x1xf32) <- (96xf32, 4xi64) + reshape_0 = paddle._C_ops.reshape(parameter_296, full_int_array_1) + del parameter_296 + + # pd_op.add: (2x96x1x1xf32) <- (2x96x1x1xf32, 1x96x1x1xf32) + add_6 = paddle._C_ops.add(conv2d_15, reshape_0) + + # pd_op.hardsigmoid: (2x96x1x1xf32) <- (2x96x1x1xf32) + hardsigmoid_0 = paddle._C_ops.hardsigmoid( + add_6, float("0.166667"), float("0.5") + ) + del add_6 + + # pd_op.multiply: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, 2x96x1x1xf32) + multiply_3 = paddle._C_ops.multiply(concat_0, hardsigmoid_0) + + # pd_op.conv2d: (2x128x-1x-1xf32) <- (2x96x-1x-1xf32, 128x96x1x1xf32) + conv2d_16 = paddle._C_ops.conv2d( + multiply_3, parameter_295, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_295 + + # pd_op.batch_norm_: (2x128x-1x-1xf32, 128xf32, 128xf32, 128xf32, 128xf32, -1xui8) <- (2x128x-1x-1xf32, 128xf32, 128xf32, 128xf32, 128xf32) + ( + batch_norm__90, + batch_norm__91, + batch_norm__92, + batch_norm__93, + batch_norm__94, + batch_norm__95, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_16, + parameter_294, + parameter_293, + parameter_292, + parameter_291, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_291, parameter_292, parameter_293, parameter_294 + + # pd_op.swish: (2x128x-1x-1xf32) <- (2x128x-1x-1xf32) + swish_13 = paddle._C_ops.swish(batch_norm__90) + + # pd_op.conv2d: (2x192x-1x-1xf32) <- (2x128x-1x-1xf32, 192x128x3x3xf32) + conv2d_17 = paddle._C_ops.conv2d( + swish_13, parameter_290, [2, 2], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_290 + + # pd_op.batch_norm_: (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__96, + batch_norm__97, + batch_norm__98, + batch_norm__99, + batch_norm__100, + batch_norm__101, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_17, + parameter_289, + parameter_288, + parameter_287, + parameter_286, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_286, parameter_287, parameter_288, parameter_289 + + # pd_op.swish: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32) + swish_14 = paddle._C_ops.swish(batch_norm__96) + + # pd_op.conv2d: (2x96x-1x-1xf32) <- (2x192x-1x-1xf32, 96x192x1x1xf32) + conv2d_18 = paddle._C_ops.conv2d( + swish_14, parameter_285, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_285 + + # pd_op.batch_norm_: (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__102, + batch_norm__103, + batch_norm__104, + batch_norm__105, + batch_norm__106, + batch_norm__107, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_18, + parameter_284, + parameter_283, + parameter_282, + parameter_281, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_281, parameter_282, parameter_283, parameter_284 + + # pd_op.swish: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32) + swish_15 = paddle._C_ops.swish(batch_norm__102) + + # pd_op.conv2d: (2x96x-1x-1xf32) <- (2x192x-1x-1xf32, 96x192x1x1xf32) + conv2d_19 = paddle._C_ops.conv2d( + swish_14, parameter_280, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_280 + + # pd_op.batch_norm_: (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__108, + batch_norm__109, + batch_norm__110, + batch_norm__111, + batch_norm__112, + batch_norm__113, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_19, + parameter_279, + parameter_278, + parameter_277, + parameter_276, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_276, parameter_277, parameter_278, parameter_279 + + # pd_op.swish: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32) + swish_16 = paddle._C_ops.swish(batch_norm__108) + + # pd_op.conv2d: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, 96x96x3x3xf32) + conv2d_20 = paddle._C_ops.conv2d( + swish_16, parameter_275, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_275 + + # pd_op.batch_norm_: (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__114, + batch_norm__115, + batch_norm__116, + batch_norm__117, + batch_norm__118, + batch_norm__119, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_20, + parameter_274, + parameter_273, + parameter_272, + parameter_271, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_271, parameter_272, parameter_273, parameter_274 + + # pd_op.swish: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32) + swish_17 = paddle._C_ops.swish(batch_norm__114) + + # pd_op.conv2d: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, 96x96x3x3xf32) + conv2d_21 = paddle._C_ops.conv2d( + swish_17, parameter_270, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_270 + + # pd_op.batch_norm_: (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__120, + batch_norm__121, + batch_norm__122, + batch_norm__123, + batch_norm__124, + batch_norm__125, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_21, + parameter_269, + parameter_268, + parameter_267, + parameter_266, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_266, parameter_267, parameter_268, parameter_269 + + # pd_op.conv2d: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, 96x96x1x1xf32) + conv2d_22 = paddle._C_ops.conv2d( + swish_17, parameter_265, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_265 + + # pd_op.batch_norm_: (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__126, + batch_norm__127, + batch_norm__128, + batch_norm__129, + batch_norm__130, + batch_norm__131, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_22, + parameter_264, + parameter_263, + parameter_262, + parameter_261, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_261, parameter_262, parameter_263, parameter_264 + + # pd_op.multiply: (2x96x-1x-1xf32) <- (1xf32, 2x96x-1x-1xf32) + multiply_4 = paddle._C_ops.multiply(data_3, batch_norm__126) + del data_3 + + # pd_op.add: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, 2x96x-1x-1xf32) + add_7 = paddle._C_ops.add(batch_norm__120, multiply_4) + + # pd_op.swish: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32) + swish_18 = paddle._C_ops.swish(add_7) + + # pd_op.add: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, 2x96x-1x-1xf32) + add_8 = paddle._C_ops.add(swish_16, swish_18) + + # pd_op.conv2d: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, 96x96x3x3xf32) + conv2d_23 = paddle._C_ops.conv2d( + add_8, parameter_260, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_260 + + # pd_op.batch_norm_: (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__132, + batch_norm__133, + batch_norm__134, + batch_norm__135, + batch_norm__136, + batch_norm__137, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_23, + parameter_259, + parameter_258, + parameter_257, + parameter_256, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_256, parameter_257, parameter_258, parameter_259 + + # pd_op.swish: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32) + swish_19 = paddle._C_ops.swish(batch_norm__132) + + # pd_op.conv2d: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, 96x96x3x3xf32) + conv2d_24 = paddle._C_ops.conv2d( + swish_19, parameter_255, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_255 + + # pd_op.batch_norm_: (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__138, + batch_norm__139, + batch_norm__140, + batch_norm__141, + batch_norm__142, + batch_norm__143, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_24, + parameter_254, + parameter_253, + parameter_252, + parameter_251, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_251, parameter_252, parameter_253, parameter_254 + + # pd_op.conv2d: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, 96x96x1x1xf32) + conv2d_25 = paddle._C_ops.conv2d( + swish_19, parameter_250, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_250 + + # pd_op.batch_norm_: (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__144, + batch_norm__145, + batch_norm__146, + batch_norm__147, + batch_norm__148, + batch_norm__149, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_25, + parameter_249, + parameter_248, + parameter_247, + parameter_246, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_246, parameter_247, parameter_248, parameter_249 + + # pd_op.multiply: (2x96x-1x-1xf32) <- (1xf32, 2x96x-1x-1xf32) + multiply_5 = paddle._C_ops.multiply(data_4, batch_norm__144) + del data_4 + + # pd_op.add: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, 2x96x-1x-1xf32) + add_9 = paddle._C_ops.add(batch_norm__138, multiply_5) + + # pd_op.swish: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32) + swish_20 = paddle._C_ops.swish(add_9) + + # pd_op.add: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, 2x96x-1x-1xf32) + add_10 = paddle._C_ops.add(add_8, swish_20) + + # pd_op.conv2d: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, 96x96x3x3xf32) + conv2d_26 = paddle._C_ops.conv2d( + add_10, parameter_245, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_245 + + # pd_op.batch_norm_: (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__150, + batch_norm__151, + batch_norm__152, + batch_norm__153, + batch_norm__154, + batch_norm__155, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_26, + parameter_244, + parameter_243, + parameter_242, + parameter_241, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_241, parameter_242, parameter_243, parameter_244 + + # pd_op.swish: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32) + swish_21 = paddle._C_ops.swish(batch_norm__150) + + # pd_op.conv2d: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, 96x96x3x3xf32) + conv2d_27 = paddle._C_ops.conv2d( + swish_21, parameter_240, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_240 + + # pd_op.batch_norm_: (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__156, + batch_norm__157, + batch_norm__158, + batch_norm__159, + batch_norm__160, + batch_norm__161, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_27, + parameter_239, + parameter_238, + parameter_237, + parameter_236, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_236, parameter_237, parameter_238, parameter_239 + + # pd_op.conv2d: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, 96x96x1x1xf32) + conv2d_28 = paddle._C_ops.conv2d( + swish_21, parameter_235, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_235 + + # pd_op.batch_norm_: (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__162, + batch_norm__163, + batch_norm__164, + batch_norm__165, + batch_norm__166, + batch_norm__167, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_28, + parameter_234, + parameter_233, + parameter_232, + parameter_231, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_231, parameter_232, parameter_233, parameter_234 + + # pd_op.multiply: (2x96x-1x-1xf32) <- (1xf32, 2x96x-1x-1xf32) + multiply_6 = paddle._C_ops.multiply(data_5, batch_norm__162) + del data_5 + + # pd_op.add: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, 2x96x-1x-1xf32) + add_11 = paddle._C_ops.add(batch_norm__156, multiply_6) + + # pd_op.swish: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32) + swish_22 = paddle._C_ops.swish(add_11) + + # pd_op.add: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, 2x96x-1x-1xf32) + add_12 = paddle._C_ops.add(add_10, swish_22) + + # pd_op.conv2d: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, 96x96x3x3xf32) + conv2d_29 = paddle._C_ops.conv2d( + add_12, parameter_230, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_230 + + # pd_op.batch_norm_: (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__168, + batch_norm__169, + batch_norm__170, + batch_norm__171, + batch_norm__172, + batch_norm__173, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_29, + parameter_229, + parameter_228, + parameter_227, + parameter_226, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_226, parameter_227, parameter_228, parameter_229 + + # pd_op.swish: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32) + swish_23 = paddle._C_ops.swish(batch_norm__168) + + # pd_op.conv2d: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, 96x96x3x3xf32) + conv2d_30 = paddle._C_ops.conv2d( + swish_23, parameter_225, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_225 + + # pd_op.batch_norm_: (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__174, + batch_norm__175, + batch_norm__176, + batch_norm__177, + batch_norm__178, + batch_norm__179, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_30, + parameter_224, + parameter_223, + parameter_222, + parameter_221, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_221, parameter_222, parameter_223, parameter_224 + + # pd_op.conv2d: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, 96x96x1x1xf32) + conv2d_31 = paddle._C_ops.conv2d( + swish_23, parameter_220, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_220 + + # pd_op.batch_norm_: (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__180, + batch_norm__181, + batch_norm__182, + batch_norm__183, + batch_norm__184, + batch_norm__185, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_31, + parameter_219, + parameter_218, + parameter_217, + parameter_216, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_216, parameter_217, parameter_218, parameter_219 + + # pd_op.multiply: (2x96x-1x-1xf32) <- (1xf32, 2x96x-1x-1xf32) + multiply_7 = paddle._C_ops.multiply(data_6, batch_norm__180) + del data_6 + + # pd_op.add: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, 2x96x-1x-1xf32) + add_13 = paddle._C_ops.add(batch_norm__174, multiply_7) + + # pd_op.swish: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32) + swish_24 = paddle._C_ops.swish(add_13) + + # pd_op.add: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, 2x96x-1x-1xf32) + add_14 = paddle._C_ops.add(add_12, swish_24) + + # pd_op.conv2d: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, 96x96x3x3xf32) + conv2d_32 = paddle._C_ops.conv2d( + add_14, parameter_215, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_215 + + # pd_op.batch_norm_: (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__186, + batch_norm__187, + batch_norm__188, + batch_norm__189, + batch_norm__190, + batch_norm__191, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_32, + parameter_214, + parameter_213, + parameter_212, + parameter_211, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_211, parameter_212, parameter_213, parameter_214 + + # pd_op.swish: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32) + swish_25 = paddle._C_ops.swish(batch_norm__186) + + # pd_op.conv2d: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, 96x96x3x3xf32) + conv2d_33 = paddle._C_ops.conv2d( + swish_25, parameter_210, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_210 + + # pd_op.batch_norm_: (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__192, + batch_norm__193, + batch_norm__194, + batch_norm__195, + batch_norm__196, + batch_norm__197, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_33, + parameter_209, + parameter_208, + parameter_207, + parameter_206, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_206, parameter_207, parameter_208, parameter_209 + + # pd_op.conv2d: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, 96x96x1x1xf32) + conv2d_34 = paddle._C_ops.conv2d( + swish_25, parameter_205, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_205 + + # pd_op.batch_norm_: (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__198, + batch_norm__199, + batch_norm__200, + batch_norm__201, + batch_norm__202, + batch_norm__203, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_34, + parameter_204, + parameter_203, + parameter_202, + parameter_201, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_201, parameter_202, parameter_203, parameter_204 + + # pd_op.multiply: (2x96x-1x-1xf32) <- (1xf32, 2x96x-1x-1xf32) + multiply_8 = paddle._C_ops.multiply(data_7, batch_norm__198) + del data_7 + + # pd_op.add: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, 2x96x-1x-1xf32) + add_15 = paddle._C_ops.add(batch_norm__192, multiply_8) + + # pd_op.swish: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32) + swish_26 = paddle._C_ops.swish(add_15) + + # pd_op.add: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, 2x96x-1x-1xf32) + add_16 = paddle._C_ops.add(add_14, swish_26) + + # pd_op.conv2d: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, 96x96x3x3xf32) + conv2d_35 = paddle._C_ops.conv2d( + add_16, parameter_200, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_200 + + # pd_op.batch_norm_: (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__204, + batch_norm__205, + batch_norm__206, + batch_norm__207, + batch_norm__208, + batch_norm__209, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_35, + parameter_199, + parameter_198, + parameter_197, + parameter_196, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_196, parameter_197, parameter_198, parameter_199 + + # pd_op.swish: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32) + swish_27 = paddle._C_ops.swish(batch_norm__204) + + # pd_op.conv2d: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, 96x96x3x3xf32) + conv2d_36 = paddle._C_ops.conv2d( + swish_27, parameter_195, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_195 + + # pd_op.batch_norm_: (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__210, + batch_norm__211, + batch_norm__212, + batch_norm__213, + batch_norm__214, + batch_norm__215, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_36, + parameter_194, + parameter_193, + parameter_192, + parameter_191, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_191, parameter_192, parameter_193, parameter_194 + + # pd_op.conv2d: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, 96x96x1x1xf32) + conv2d_37 = paddle._C_ops.conv2d( + swish_27, parameter_190, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_190 + + # pd_op.batch_norm_: (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__216, + batch_norm__217, + batch_norm__218, + batch_norm__219, + batch_norm__220, + batch_norm__221, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_37, + parameter_189, + parameter_188, + parameter_187, + parameter_186, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_186, parameter_187, parameter_188, parameter_189 + + # pd_op.multiply: (2x96x-1x-1xf32) <- (1xf32, 2x96x-1x-1xf32) + multiply_9 = paddle._C_ops.multiply(data_8, batch_norm__216) + del data_8 + + # pd_op.add: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, 2x96x-1x-1xf32) + add_17 = paddle._C_ops.add(batch_norm__210, multiply_9) + + # pd_op.swish: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32) + swish_28 = paddle._C_ops.swish(add_17) + + # pd_op.add: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, 2x96x-1x-1xf32) + add_18 = paddle._C_ops.add(add_16, swish_28) + + # builtin.combine: ([2x96x-1x-1xf32, 2x96x-1x-1xf32]) <- (2x96x-1x-1xf32, 2x96x-1x-1xf32) + combine_1 = [swish_15, add_18] + + # pd_op.concat: (2x192x-1x-1xf32) <- ([2x96x-1x-1xf32, 2x96x-1x-1xf32], 1xi32) + concat_1 = paddle._C_ops.concat(combine_1, full_0) + del combine_1 + + # pd_op.mean: (2x192x1x1xf32) <- (2x192x-1x-1xf32, 2xi64) + mean_1 = paddle._C_ops.mean(concat_1, full_int_array_0, True) + + # pd_op.conv2d: (2x192x1x1xf32) <- (2x192x1x1xf32, 192x192x1x1xf32) + conv2d_38 = paddle._C_ops.conv2d( + mean_1, parameter_185, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_185 + + # pd_op.reshape: (1x192x1x1xf32) <- (192xf32, 4xi64) + reshape_1 = paddle._C_ops.reshape(parameter_184, full_int_array_1) + del parameter_184 + + # pd_op.add: (2x192x1x1xf32) <- (2x192x1x1xf32, 1x192x1x1xf32) + add_19 = paddle._C_ops.add(conv2d_38, reshape_1) + + # pd_op.hardsigmoid: (2x192x1x1xf32) <- (2x192x1x1xf32) + hardsigmoid_1 = paddle._C_ops.hardsigmoid( + add_19, float("0.166667"), float("0.5") + ) + del add_19 + + # pd_op.multiply: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 2x192x1x1xf32) + multiply_10 = paddle._C_ops.multiply(concat_1, hardsigmoid_1) + + # pd_op.conv2d: (2x256x-1x-1xf32) <- (2x192x-1x-1xf32, 256x192x1x1xf32) + conv2d_39 = paddle._C_ops.conv2d( + multiply_10, parameter_183, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_183 + + # pd_op.batch_norm_: (2x256x-1x-1xf32, 256xf32, 256xf32, 256xf32, 256xf32, -1xui8) <- (2x256x-1x-1xf32, 256xf32, 256xf32, 256xf32, 256xf32) + ( + batch_norm__222, + batch_norm__223, + batch_norm__224, + batch_norm__225, + batch_norm__226, + batch_norm__227, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_39, + parameter_182, + parameter_181, + parameter_180, + parameter_179, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_179, parameter_180, parameter_181, parameter_182 + + # pd_op.swish: (2x256x-1x-1xf32) <- (2x256x-1x-1xf32) + swish_29 = paddle._C_ops.swish(batch_norm__222) + + # pd_op.conv2d: (2x384x-1x-1xf32) <- (2x256x-1x-1xf32, 384x256x3x3xf32) + conv2d_40 = paddle._C_ops.conv2d( + swish_29, parameter_178, [2, 2], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_178 + + # pd_op.batch_norm_: (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__228, + batch_norm__229, + batch_norm__230, + batch_norm__231, + batch_norm__232, + batch_norm__233, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_40, + parameter_177, + parameter_176, + parameter_175, + parameter_174, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_174, parameter_175, parameter_176, parameter_177 + + # pd_op.swish: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32) + swish_30 = paddle._C_ops.swish(batch_norm__228) + + # pd_op.conv2d: (2x192x-1x-1xf32) <- (2x384x-1x-1xf32, 192x384x1x1xf32) + conv2d_41 = paddle._C_ops.conv2d( + swish_30, parameter_173, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_173 + + # pd_op.batch_norm_: (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__234, + batch_norm__235, + batch_norm__236, + batch_norm__237, + batch_norm__238, + batch_norm__239, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_41, + parameter_172, + parameter_171, + parameter_170, + parameter_169, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_169, parameter_170, parameter_171, parameter_172 + + # pd_op.swish: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32) + swish_31 = paddle._C_ops.swish(batch_norm__234) + + # pd_op.conv2d: (2x192x-1x-1xf32) <- (2x384x-1x-1xf32, 192x384x1x1xf32) + conv2d_42 = paddle._C_ops.conv2d( + swish_30, parameter_168, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_168 + + # pd_op.batch_norm_: (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__240, + batch_norm__241, + batch_norm__242, + batch_norm__243, + batch_norm__244, + batch_norm__245, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_42, + parameter_167, + parameter_166, + parameter_165, + parameter_164, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_164, parameter_165, parameter_166, parameter_167 + + # pd_op.swish: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32) + swish_32 = paddle._C_ops.swish(batch_norm__240) + + # pd_op.conv2d: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 192x192x3x3xf32) + conv2d_43 = paddle._C_ops.conv2d( + swish_32, parameter_163, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_163 + + # pd_op.batch_norm_: (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__246, + batch_norm__247, + batch_norm__248, + batch_norm__249, + batch_norm__250, + batch_norm__251, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_43, + parameter_162, + parameter_161, + parameter_160, + parameter_159, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_159, parameter_160, parameter_161, parameter_162 + + # pd_op.swish: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32) + swish_33 = paddle._C_ops.swish(batch_norm__246) + + # pd_op.conv2d: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 192x192x3x3xf32) + conv2d_44 = paddle._C_ops.conv2d( + swish_33, parameter_158, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_158 + + # pd_op.batch_norm_: (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__252, + batch_norm__253, + batch_norm__254, + batch_norm__255, + batch_norm__256, + batch_norm__257, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_44, + parameter_157, + parameter_156, + parameter_155, + parameter_154, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_154, parameter_155, parameter_156, parameter_157 + + # pd_op.conv2d: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 192x192x1x1xf32) + conv2d_45 = paddle._C_ops.conv2d( + swish_33, parameter_153, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_153 + + # pd_op.batch_norm_: (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__258, + batch_norm__259, + batch_norm__260, + batch_norm__261, + batch_norm__262, + batch_norm__263, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_45, + parameter_152, + parameter_151, + parameter_150, + parameter_149, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_149, parameter_150, parameter_151, parameter_152 + + # pd_op.multiply: (2x192x-1x-1xf32) <- (1xf32, 2x192x-1x-1xf32) + multiply_11 = paddle._C_ops.multiply(data_9, batch_norm__258) + del data_9 + + # pd_op.add: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 2x192x-1x-1xf32) + add_20 = paddle._C_ops.add(batch_norm__252, multiply_11) + + # pd_op.swish: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32) + swish_34 = paddle._C_ops.swish(add_20) + + # pd_op.add: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 2x192x-1x-1xf32) + add_21 = paddle._C_ops.add(swish_32, swish_34) + + # pd_op.conv2d: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 192x192x3x3xf32) + conv2d_46 = paddle._C_ops.conv2d( + add_21, parameter_148, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_148 + + # pd_op.batch_norm_: (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__264, + batch_norm__265, + batch_norm__266, + batch_norm__267, + batch_norm__268, + batch_norm__269, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_46, + parameter_147, + parameter_146, + parameter_145, + parameter_144, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_144, parameter_145, parameter_146, parameter_147 + + # pd_op.swish: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32) + swish_35 = paddle._C_ops.swish(batch_norm__264) + + # pd_op.conv2d: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 192x192x3x3xf32) + conv2d_47 = paddle._C_ops.conv2d( + swish_35, parameter_143, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_143 + + # pd_op.batch_norm_: (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__270, + batch_norm__271, + batch_norm__272, + batch_norm__273, + batch_norm__274, + batch_norm__275, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_47, + parameter_142, + parameter_141, + parameter_140, + parameter_139, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_139, parameter_140, parameter_141, parameter_142 + + # pd_op.conv2d: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 192x192x1x1xf32) + conv2d_48 = paddle._C_ops.conv2d( + swish_35, parameter_138, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_138 + + # pd_op.batch_norm_: (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__276, + batch_norm__277, + batch_norm__278, + batch_norm__279, + batch_norm__280, + batch_norm__281, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_48, + parameter_137, + parameter_136, + parameter_135, + parameter_134, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_134, parameter_135, parameter_136, parameter_137 + + # pd_op.multiply: (2x192x-1x-1xf32) <- (1xf32, 2x192x-1x-1xf32) + multiply_12 = paddle._C_ops.multiply(data_10, batch_norm__276) + del data_10 + + # pd_op.add: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 2x192x-1x-1xf32) + add_22 = paddle._C_ops.add(batch_norm__270, multiply_12) + + # pd_op.swish: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32) + swish_36 = paddle._C_ops.swish(add_22) + + # pd_op.add: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 2x192x-1x-1xf32) + add_23 = paddle._C_ops.add(add_21, swish_36) + + # pd_op.conv2d: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 192x192x3x3xf32) + conv2d_49 = paddle._C_ops.conv2d( + add_23, parameter_133, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_133 + + # pd_op.batch_norm_: (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__282, + batch_norm__283, + batch_norm__284, + batch_norm__285, + batch_norm__286, + batch_norm__287, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_49, + parameter_132, + parameter_131, + parameter_130, + parameter_129, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_129, parameter_130, parameter_131, parameter_132 + + # pd_op.swish: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32) + swish_37 = paddle._C_ops.swish(batch_norm__282) + + # pd_op.conv2d: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 192x192x3x3xf32) + conv2d_50 = paddle._C_ops.conv2d( + swish_37, parameter_128, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_128 + + # pd_op.batch_norm_: (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__288, + batch_norm__289, + batch_norm__290, + batch_norm__291, + batch_norm__292, + batch_norm__293, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_50, + parameter_127, + parameter_126, + parameter_125, + parameter_124, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_124, parameter_125, parameter_126, parameter_127 + + # pd_op.conv2d: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 192x192x1x1xf32) + conv2d_51 = paddle._C_ops.conv2d( + swish_37, parameter_123, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_123 + + # pd_op.batch_norm_: (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__294, + batch_norm__295, + batch_norm__296, + batch_norm__297, + batch_norm__298, + batch_norm__299, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_51, + parameter_122, + parameter_121, + parameter_120, + parameter_119, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_119, parameter_120, parameter_121, parameter_122 + + # pd_op.multiply: (2x192x-1x-1xf32) <- (1xf32, 2x192x-1x-1xf32) + multiply_13 = paddle._C_ops.multiply(data_11, batch_norm__294) + del data_11 + + # pd_op.add: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 2x192x-1x-1xf32) + add_24 = paddle._C_ops.add(batch_norm__288, multiply_13) + + # pd_op.swish: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32) + swish_38 = paddle._C_ops.swish(add_24) + + # pd_op.add: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 2x192x-1x-1xf32) + add_25 = paddle._C_ops.add(add_23, swish_38) + + # pd_op.conv2d: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 192x192x3x3xf32) + conv2d_52 = paddle._C_ops.conv2d( + add_25, parameter_118, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_118 + + # pd_op.batch_norm_: (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__300, + batch_norm__301, + batch_norm__302, + batch_norm__303, + batch_norm__304, + batch_norm__305, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_52, + parameter_117, + parameter_116, + parameter_115, + parameter_114, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_114, parameter_115, parameter_116, parameter_117 + + # pd_op.swish: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32) + swish_39 = paddle._C_ops.swish(batch_norm__300) + + # pd_op.conv2d: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 192x192x3x3xf32) + conv2d_53 = paddle._C_ops.conv2d( + swish_39, parameter_113, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_113 + + # pd_op.batch_norm_: (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__306, + batch_norm__307, + batch_norm__308, + batch_norm__309, + batch_norm__310, + batch_norm__311, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_53, + parameter_112, + parameter_111, + parameter_110, + parameter_109, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_109, parameter_110, parameter_111, parameter_112 + + # pd_op.conv2d: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 192x192x1x1xf32) + conv2d_54 = paddle._C_ops.conv2d( + swish_39, parameter_108, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_108 + + # pd_op.batch_norm_: (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__312, + batch_norm__313, + batch_norm__314, + batch_norm__315, + batch_norm__316, + batch_norm__317, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_54, + parameter_107, + parameter_106, + parameter_105, + parameter_104, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_104, parameter_105, parameter_106, parameter_107 + + # pd_op.multiply: (2x192x-1x-1xf32) <- (1xf32, 2x192x-1x-1xf32) + multiply_14 = paddle._C_ops.multiply(data_12, batch_norm__312) + del data_12 + + # pd_op.add: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 2x192x-1x-1xf32) + add_26 = paddle._C_ops.add(batch_norm__306, multiply_14) + + # pd_op.swish: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32) + swish_40 = paddle._C_ops.swish(add_26) + + # pd_op.add: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 2x192x-1x-1xf32) + add_27 = paddle._C_ops.add(add_25, swish_40) + + # pd_op.conv2d: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 192x192x3x3xf32) + conv2d_55 = paddle._C_ops.conv2d( + add_27, parameter_103, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_103 + + # pd_op.batch_norm_: (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__318, + batch_norm__319, + batch_norm__320, + batch_norm__321, + batch_norm__322, + batch_norm__323, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_55, + parameter_102, + parameter_101, + parameter_100, + parameter_99, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_100, parameter_101, parameter_102, parameter_99 + + # pd_op.swish: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32) + swish_41 = paddle._C_ops.swish(batch_norm__318) + + # pd_op.conv2d: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 192x192x3x3xf32) + conv2d_56 = paddle._C_ops.conv2d( + swish_41, parameter_98, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_98 + + # pd_op.batch_norm_: (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__324, + batch_norm__325, + batch_norm__326, + batch_norm__327, + batch_norm__328, + batch_norm__329, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_56, + parameter_97, + parameter_96, + parameter_95, + parameter_94, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_94, parameter_95, parameter_96, parameter_97 + + # pd_op.conv2d: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 192x192x1x1xf32) + conv2d_57 = paddle._C_ops.conv2d( + swish_41, parameter_93, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_93 + + # pd_op.batch_norm_: (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__330, + batch_norm__331, + batch_norm__332, + batch_norm__333, + batch_norm__334, + batch_norm__335, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_57, + parameter_92, + parameter_91, + parameter_90, + parameter_89, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_89, parameter_90, parameter_91, parameter_92 + + # pd_op.multiply: (2x192x-1x-1xf32) <- (1xf32, 2x192x-1x-1xf32) + multiply_15 = paddle._C_ops.multiply(data_13, batch_norm__330) + del data_13 + + # pd_op.add: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 2x192x-1x-1xf32) + add_28 = paddle._C_ops.add(batch_norm__324, multiply_15) + + # pd_op.swish: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32) + swish_42 = paddle._C_ops.swish(add_28) + + # pd_op.add: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 2x192x-1x-1xf32) + add_29 = paddle._C_ops.add(add_27, swish_42) + + # pd_op.conv2d: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 192x192x3x3xf32) + conv2d_58 = paddle._C_ops.conv2d( + add_29, parameter_88, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_88 + + # pd_op.batch_norm_: (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__336, + batch_norm__337, + batch_norm__338, + batch_norm__339, + batch_norm__340, + batch_norm__341, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_58, + parameter_87, + parameter_86, + parameter_85, + parameter_84, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_84, parameter_85, parameter_86, parameter_87 + + # pd_op.swish: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32) + swish_43 = paddle._C_ops.swish(batch_norm__336) + + # pd_op.conv2d: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 192x192x3x3xf32) + conv2d_59 = paddle._C_ops.conv2d( + swish_43, parameter_83, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_83 + + # pd_op.batch_norm_: (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__342, + batch_norm__343, + batch_norm__344, + batch_norm__345, + batch_norm__346, + batch_norm__347, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_59, + parameter_82, + parameter_81, + parameter_80, + parameter_79, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_79, parameter_80, parameter_81, parameter_82 + + # pd_op.conv2d: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 192x192x1x1xf32) + conv2d_60 = paddle._C_ops.conv2d( + swish_43, parameter_78, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_78 + + # pd_op.batch_norm_: (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__348, + batch_norm__349, + batch_norm__350, + batch_norm__351, + batch_norm__352, + batch_norm__353, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_60, + parameter_77, + parameter_76, + parameter_75, + parameter_74, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_74, parameter_75, parameter_76, parameter_77 + + # pd_op.multiply: (2x192x-1x-1xf32) <- (1xf32, 2x192x-1x-1xf32) + multiply_16 = paddle._C_ops.multiply(data_14, batch_norm__348) + del data_14 + + # pd_op.add: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 2x192x-1x-1xf32) + add_30 = paddle._C_ops.add(batch_norm__342, multiply_16) + + # pd_op.swish: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32) + swish_44 = paddle._C_ops.swish(add_30) + + # pd_op.add: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 2x192x-1x-1xf32) + add_31 = paddle._C_ops.add(add_29, swish_44) + + # builtin.combine: ([2x192x-1x-1xf32, 2x192x-1x-1xf32]) <- (2x192x-1x-1xf32, 2x192x-1x-1xf32) + combine_2 = [swish_31, add_31] + + # pd_op.concat: (2x384x-1x-1xf32) <- ([2x192x-1x-1xf32, 2x192x-1x-1xf32], 1xi32) + concat_2 = paddle._C_ops.concat(combine_2, full_0) + del combine_2 + + # pd_op.mean: (2x384x1x1xf32) <- (2x384x-1x-1xf32, 2xi64) + mean_2 = paddle._C_ops.mean(concat_2, full_int_array_0, True) + + # pd_op.conv2d: (2x384x1x1xf32) <- (2x384x1x1xf32, 384x384x1x1xf32) + conv2d_61 = paddle._C_ops.conv2d( + mean_2, parameter_73, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_73 + + # pd_op.reshape: (1x384x1x1xf32) <- (384xf32, 4xi64) + reshape_2 = paddle._C_ops.reshape(parameter_72, full_int_array_1) + del parameter_72 + + # pd_op.add: (2x384x1x1xf32) <- (2x384x1x1xf32, 1x384x1x1xf32) + add_32 = paddle._C_ops.add(conv2d_61, reshape_2) + + # pd_op.hardsigmoid: (2x384x1x1xf32) <- (2x384x1x1xf32) + hardsigmoid_2 = paddle._C_ops.hardsigmoid( + add_32, float("0.166667"), float("0.5") + ) + del add_32 + + # pd_op.multiply: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32, 2x384x1x1xf32) + multiply_17 = paddle._C_ops.multiply(concat_2, hardsigmoid_2) + + # pd_op.conv2d: (2x512x-1x-1xf32) <- (2x384x-1x-1xf32, 512x384x1x1xf32) + conv2d_62 = paddle._C_ops.conv2d( + multiply_17, parameter_71, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_71 + + # pd_op.batch_norm_: (2x512x-1x-1xf32, 512xf32, 512xf32, 512xf32, 512xf32, -1xui8) <- (2x512x-1x-1xf32, 512xf32, 512xf32, 512xf32, 512xf32) + ( + batch_norm__354, + batch_norm__355, + batch_norm__356, + batch_norm__357, + batch_norm__358, + batch_norm__359, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_62, + parameter_70, + parameter_69, + parameter_68, + parameter_67, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_67, parameter_68, parameter_69, parameter_70 + + # pd_op.swish: (2x512x-1x-1xf32) <- (2x512x-1x-1xf32) + swish_45 = paddle._C_ops.swish(batch_norm__354) + + # pd_op.conv2d: (2x768x-1x-1xf32) <- (2x512x-1x-1xf32, 768x512x3x3xf32) + conv2d_63 = paddle._C_ops.conv2d( + swish_45, parameter_66, [2, 2], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_66 + + # pd_op.batch_norm_: (2x768x-1x-1xf32, 768xf32, 768xf32, 768xf32, 768xf32, -1xui8) <- (2x768x-1x-1xf32, 768xf32, 768xf32, 768xf32, 768xf32) + ( + batch_norm__360, + batch_norm__361, + batch_norm__362, + batch_norm__363, + batch_norm__364, + batch_norm__365, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_63, + parameter_65, + parameter_64, + parameter_63, + parameter_62, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_62, parameter_63, parameter_64, parameter_65 + + # pd_op.swish: (2x768x-1x-1xf32) <- (2x768x-1x-1xf32) + swish_46 = paddle._C_ops.swish(batch_norm__360) + + # pd_op.conv2d: (2x384x-1x-1xf32) <- (2x768x-1x-1xf32, 384x768x1x1xf32) + conv2d_64 = paddle._C_ops.conv2d( + swish_46, parameter_61, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_61 + + # pd_op.batch_norm_: (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__366, + batch_norm__367, + batch_norm__368, + batch_norm__369, + batch_norm__370, + batch_norm__371, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_64, + parameter_60, + parameter_59, + parameter_58, + parameter_57, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_57, parameter_58, parameter_59, parameter_60 + + # pd_op.swish: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32) + swish_47 = paddle._C_ops.swish(batch_norm__366) + + # pd_op.conv2d: (2x384x-1x-1xf32) <- (2x768x-1x-1xf32, 384x768x1x1xf32) + conv2d_65 = paddle._C_ops.conv2d( + swish_46, parameter_56, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_56 + + # pd_op.batch_norm_: (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__372, + batch_norm__373, + batch_norm__374, + batch_norm__375, + batch_norm__376, + batch_norm__377, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_65, + parameter_55, + parameter_54, + parameter_53, + parameter_52, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_52, parameter_53, parameter_54, parameter_55 + + # pd_op.swish: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32) + swish_48 = paddle._C_ops.swish(batch_norm__372) + + # pd_op.conv2d: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32, 384x384x3x3xf32) + conv2d_66 = paddle._C_ops.conv2d( + swish_48, parameter_51, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_51 + + # pd_op.batch_norm_: (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__378, + batch_norm__379, + batch_norm__380, + batch_norm__381, + batch_norm__382, + batch_norm__383, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_66, + parameter_50, + parameter_49, + parameter_48, + parameter_47, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_47, parameter_48, parameter_49, parameter_50 + + # pd_op.swish: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32) + swish_49 = paddle._C_ops.swish(batch_norm__378) + + # pd_op.conv2d: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32, 384x384x3x3xf32) + conv2d_67 = paddle._C_ops.conv2d( + swish_49, parameter_46, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_46 + + # pd_op.batch_norm_: (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__384, + batch_norm__385, + batch_norm__386, + batch_norm__387, + batch_norm__388, + batch_norm__389, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_67, + parameter_45, + parameter_44, + parameter_43, + parameter_42, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_42, parameter_43, parameter_44, parameter_45 + + # pd_op.conv2d: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32, 384x384x1x1xf32) + conv2d_68 = paddle._C_ops.conv2d( + swish_49, parameter_41, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_41 + + # pd_op.batch_norm_: (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__390, + batch_norm__391, + batch_norm__392, + batch_norm__393, + batch_norm__394, + batch_norm__395, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_68, + parameter_40, + parameter_39, + parameter_38, + parameter_37, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_37, parameter_38, parameter_39, parameter_40 + + # pd_op.multiply: (2x384x-1x-1xf32) <- (1xf32, 2x384x-1x-1xf32) + multiply_18 = paddle._C_ops.multiply(data_15, batch_norm__390) + del data_15 + + # pd_op.add: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32, 2x384x-1x-1xf32) + add_33 = paddle._C_ops.add(batch_norm__384, multiply_18) + + # pd_op.swish: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32) + swish_50 = paddle._C_ops.swish(add_33) + + # pd_op.add: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32, 2x384x-1x-1xf32) + add_34 = paddle._C_ops.add(swish_48, swish_50) + + # pd_op.conv2d: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32, 384x384x3x3xf32) + conv2d_69 = paddle._C_ops.conv2d( + add_34, parameter_36, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_36 + + # pd_op.batch_norm_: (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__396, + batch_norm__397, + batch_norm__398, + batch_norm__399, + batch_norm__400, + batch_norm__401, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_69, + parameter_35, + parameter_34, + parameter_33, + parameter_32, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_32, parameter_33, parameter_34, parameter_35 + + # pd_op.swish: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32) + swish_51 = paddle._C_ops.swish(batch_norm__396) + + # pd_op.conv2d: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32, 384x384x3x3xf32) + conv2d_70 = paddle._C_ops.conv2d( + swish_51, parameter_31, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_31 + + # pd_op.batch_norm_: (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__402, + batch_norm__403, + batch_norm__404, + batch_norm__405, + batch_norm__406, + batch_norm__407, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_70, + parameter_30, + parameter_29, + parameter_28, + parameter_27, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_27, parameter_28, parameter_29, parameter_30 + + # pd_op.conv2d: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32, 384x384x1x1xf32) + conv2d_71 = paddle._C_ops.conv2d( + swish_51, parameter_26, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_26 + + # pd_op.batch_norm_: (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__408, + batch_norm__409, + batch_norm__410, + batch_norm__411, + batch_norm__412, + batch_norm__413, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_71, + parameter_25, + parameter_24, + parameter_23, + parameter_22, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_22, parameter_23, parameter_24, parameter_25 + + # pd_op.multiply: (2x384x-1x-1xf32) <- (1xf32, 2x384x-1x-1xf32) + multiply_19 = paddle._C_ops.multiply(data_16, batch_norm__408) + del data_16 + + # pd_op.add: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32, 2x384x-1x-1xf32) + add_35 = paddle._C_ops.add(batch_norm__402, multiply_19) + + # pd_op.swish: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32) + swish_52 = paddle._C_ops.swish(add_35) + + # pd_op.add: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32, 2x384x-1x-1xf32) + add_36 = paddle._C_ops.add(add_34, swish_52) + + # pd_op.conv2d: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32, 384x384x3x3xf32) + conv2d_72 = paddle._C_ops.conv2d( + add_36, parameter_21, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_21 + + # pd_op.batch_norm_: (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__414, + batch_norm__415, + batch_norm__416, + batch_norm__417, + batch_norm__418, + batch_norm__419, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_72, + parameter_20, + parameter_19, + parameter_18, + parameter_17, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_17, parameter_18, parameter_19, parameter_20 + + # pd_op.swish: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32) + swish_53 = paddle._C_ops.swish(batch_norm__414) + + # pd_op.conv2d: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32, 384x384x3x3xf32) + conv2d_73 = paddle._C_ops.conv2d( + swish_53, parameter_16, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_16 + + # pd_op.batch_norm_: (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__420, + batch_norm__421, + batch_norm__422, + batch_norm__423, + batch_norm__424, + batch_norm__425, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_73, + parameter_15, + parameter_14, + parameter_13, + parameter_12, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_12, parameter_13, parameter_14, parameter_15 + + # pd_op.conv2d: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32, 384x384x1x1xf32) + conv2d_74 = paddle._C_ops.conv2d( + swish_53, parameter_11, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_11 + + # pd_op.batch_norm_: (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__426, + batch_norm__427, + batch_norm__428, + batch_norm__429, + batch_norm__430, + batch_norm__431, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_74, + parameter_10, + parameter_9, + parameter_8, + parameter_7, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_10, parameter_7, parameter_8, parameter_9 + + # pd_op.multiply: (2x384x-1x-1xf32) <- (1xf32, 2x384x-1x-1xf32) + multiply_20 = paddle._C_ops.multiply(data_17, batch_norm__426) + del data_17 + + # pd_op.add: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32, 2x384x-1x-1xf32) + add_37 = paddle._C_ops.add(batch_norm__420, multiply_20) + + # pd_op.swish: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32) + swish_54 = paddle._C_ops.swish(add_37) + + # pd_op.add: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32, 2x384x-1x-1xf32) + add_38 = paddle._C_ops.add(add_36, swish_54) + + # builtin.combine: ([2x384x-1x-1xf32, 2x384x-1x-1xf32]) <- (2x384x-1x-1xf32, 2x384x-1x-1xf32) + combine_3 = [swish_47, add_38] + + # pd_op.concat: (2x768x-1x-1xf32) <- ([2x384x-1x-1xf32, 2x384x-1x-1xf32], 1xi32) + concat_3 = paddle._C_ops.concat(combine_3, full_0) + del combine_3 + + # pd_op.mean: (2x768x1x1xf32) <- (2x768x-1x-1xf32, 2xi64) + mean_3 = paddle._C_ops.mean(concat_3, full_int_array_0, True) + + # pd_op.conv2d: (2x768x1x1xf32) <- (2x768x1x1xf32, 768x768x1x1xf32) + conv2d_75 = paddle._C_ops.conv2d( + mean_3, parameter_6, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_6 + + # pd_op.reshape: (1x768x1x1xf32) <- (768xf32, 4xi64) + reshape_3 = paddle._C_ops.reshape(parameter_5, full_int_array_1) + del full_int_array_1, parameter_5 + + # pd_op.add: (2x768x1x1xf32) <- (2x768x1x1xf32, 1x768x1x1xf32) + add_39 = paddle._C_ops.add(conv2d_75, reshape_3) + + # pd_op.hardsigmoid: (2x768x1x1xf32) <- (2x768x1x1xf32) + hardsigmoid_3 = paddle._C_ops.hardsigmoid( + add_39, float("0.166667"), float("0.5") + ) + del add_39 + + # pd_op.multiply: (2x768x-1x-1xf32) <- (2x768x-1x-1xf32, 2x768x1x1xf32) + multiply_21 = paddle._C_ops.multiply(concat_3, hardsigmoid_3) + + # pd_op.conv2d: (2x1024x-1x-1xf32) <- (2x768x-1x-1xf32, 1024x768x1x1xf32) + conv2d_76 = paddle._C_ops.conv2d( + multiply_21, parameter_4, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_4 + + # pd_op.batch_norm_: (2x1024x-1x-1xf32, 1024xf32, 1024xf32, 1024xf32, 1024xf32, -1xui8) <- (2x1024x-1x-1xf32, 1024xf32, 1024xf32, 1024xf32, 1024xf32) + ( + batch_norm__432, + batch_norm__433, + batch_norm__434, + batch_norm__435, + batch_norm__436, + batch_norm__437, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_76, + parameter_3, + parameter_2, + parameter_1, + parameter_0, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_0, parameter_1, parameter_2, parameter_3 + + # pd_op.swish: (2x1024x-1x-1xf32) <- (2x1024x-1x-1xf32) + swish_0 = paddle._C_ops.swish(batch_norm__432) + del ( + add_0, + add_1, + add_10, + add_11, + add_12, + add_13, + add_14, + add_15, + add_16, + add_17, + add_18, + add_2, + add_20, + add_21, + add_22, + add_23, + add_24, + add_25, + add_26, + add_27, + add_28, + add_29, + add_3, + add_30, + add_31, + add_33, + add_34, + add_35, + add_36, + add_37, + add_38, + add_4, + add_5, + add_7, + add_8, + add_9, + assign_0, + assign_1, + assign_2, + assign_3, + assign_4, + assign_5, + batch_norm__0, + batch_norm__1, + batch_norm__10, + batch_norm__100, + batch_norm__101, + batch_norm__102, + batch_norm__103, + batch_norm__104, + batch_norm__105, + batch_norm__106, + batch_norm__107, + batch_norm__108, + batch_norm__109, + batch_norm__11, + batch_norm__110, + batch_norm__111, + batch_norm__112, + batch_norm__113, + batch_norm__114, + batch_norm__115, + batch_norm__116, + batch_norm__117, + batch_norm__118, + batch_norm__119, + batch_norm__12, + batch_norm__120, + batch_norm__121, + batch_norm__122, + batch_norm__123, + batch_norm__124, + batch_norm__125, + batch_norm__126, + batch_norm__127, + batch_norm__128, + batch_norm__129, + batch_norm__13, + batch_norm__130, + batch_norm__131, + batch_norm__132, + batch_norm__133, + batch_norm__134, + batch_norm__135, + batch_norm__136, + batch_norm__137, + batch_norm__138, + batch_norm__139, + batch_norm__14, + batch_norm__140, + batch_norm__141, + batch_norm__142, + batch_norm__143, + batch_norm__144, + batch_norm__145, + batch_norm__146, + batch_norm__147, + batch_norm__148, + batch_norm__149, + batch_norm__15, + batch_norm__150, + batch_norm__151, + batch_norm__152, + batch_norm__153, + batch_norm__154, + batch_norm__155, + batch_norm__156, + batch_norm__157, + batch_norm__158, + batch_norm__159, + batch_norm__16, + batch_norm__160, + batch_norm__161, + batch_norm__162, + batch_norm__163, + batch_norm__164, + batch_norm__165, + batch_norm__166, + batch_norm__167, + batch_norm__168, + batch_norm__169, + batch_norm__17, + batch_norm__170, + batch_norm__171, + batch_norm__172, + batch_norm__173, + batch_norm__174, + batch_norm__175, + batch_norm__176, + batch_norm__177, + batch_norm__178, + batch_norm__179, + batch_norm__18, + batch_norm__180, + batch_norm__181, + batch_norm__182, + batch_norm__183, + batch_norm__184, + batch_norm__185, + batch_norm__186, + batch_norm__187, + batch_norm__188, + batch_norm__189, + batch_norm__19, + batch_norm__190, + batch_norm__191, + batch_norm__192, + batch_norm__193, + batch_norm__194, + batch_norm__195, + batch_norm__196, + batch_norm__197, + batch_norm__198, + batch_norm__199, + batch_norm__2, + batch_norm__20, + batch_norm__200, + batch_norm__201, + batch_norm__202, + batch_norm__203, + batch_norm__204, + batch_norm__205, + batch_norm__206, + batch_norm__207, + batch_norm__208, + batch_norm__209, + batch_norm__21, + batch_norm__210, + batch_norm__211, + batch_norm__212, + batch_norm__213, + batch_norm__214, + batch_norm__215, + batch_norm__216, + batch_norm__217, + batch_norm__218, + batch_norm__219, + batch_norm__22, + batch_norm__220, + batch_norm__221, + batch_norm__222, + batch_norm__223, + batch_norm__224, + batch_norm__225, + batch_norm__226, + batch_norm__227, + batch_norm__228, + batch_norm__229, + batch_norm__23, + batch_norm__230, + batch_norm__231, + batch_norm__232, + batch_norm__233, + batch_norm__234, + batch_norm__235, + batch_norm__236, + batch_norm__237, + batch_norm__238, + batch_norm__239, + batch_norm__24, + batch_norm__240, + batch_norm__241, + batch_norm__242, + batch_norm__243, + batch_norm__244, + batch_norm__245, + batch_norm__246, + batch_norm__247, + batch_norm__248, + batch_norm__249, + batch_norm__25, + batch_norm__250, + batch_norm__251, + batch_norm__252, + batch_norm__253, + batch_norm__254, + batch_norm__255, + batch_norm__256, + batch_norm__257, + batch_norm__258, + batch_norm__259, + batch_norm__26, + batch_norm__260, + batch_norm__261, + batch_norm__262, + batch_norm__263, + batch_norm__264, + batch_norm__265, + batch_norm__266, + batch_norm__267, + batch_norm__268, + batch_norm__269, + batch_norm__27, + batch_norm__270, + batch_norm__271, + batch_norm__272, + batch_norm__273, + batch_norm__274, + batch_norm__275, + batch_norm__276, + batch_norm__277, + batch_norm__278, + batch_norm__279, + batch_norm__28, + batch_norm__280, + batch_norm__281, + batch_norm__282, + batch_norm__283, + batch_norm__284, + batch_norm__285, + batch_norm__286, + batch_norm__287, + batch_norm__288, + batch_norm__289, + batch_norm__29, + batch_norm__290, + batch_norm__291, + batch_norm__292, + batch_norm__293, + batch_norm__294, + batch_norm__295, + batch_norm__296, + batch_norm__297, + batch_norm__298, + batch_norm__299, + batch_norm__3, + batch_norm__30, + batch_norm__300, + batch_norm__301, + batch_norm__302, + batch_norm__303, + batch_norm__304, + batch_norm__305, + batch_norm__306, + batch_norm__307, + batch_norm__308, + batch_norm__309, + batch_norm__31, + batch_norm__310, + batch_norm__311, + batch_norm__312, + batch_norm__313, + batch_norm__314, + batch_norm__315, + batch_norm__316, + batch_norm__317, + batch_norm__318, + batch_norm__319, + batch_norm__32, + batch_norm__320, + batch_norm__321, + batch_norm__322, + batch_norm__323, + batch_norm__324, + batch_norm__325, + batch_norm__326, + batch_norm__327, + batch_norm__328, + batch_norm__329, + batch_norm__33, + batch_norm__330, + batch_norm__331, + batch_norm__332, + batch_norm__333, + batch_norm__334, + batch_norm__335, + batch_norm__336, + batch_norm__337, + batch_norm__338, + batch_norm__339, + batch_norm__34, + batch_norm__340, + batch_norm__341, + batch_norm__342, + batch_norm__343, + batch_norm__344, + batch_norm__345, + batch_norm__346, + batch_norm__347, + batch_norm__348, + batch_norm__349, + batch_norm__35, + batch_norm__350, + batch_norm__351, + batch_norm__352, + batch_norm__353, + batch_norm__354, + batch_norm__355, + batch_norm__356, + batch_norm__357, + batch_norm__358, + batch_norm__359, + batch_norm__36, + batch_norm__360, + batch_norm__361, + batch_norm__362, + batch_norm__363, + batch_norm__364, + batch_norm__365, + batch_norm__366, + batch_norm__367, + batch_norm__368, + batch_norm__369, + batch_norm__37, + batch_norm__370, + batch_norm__371, + batch_norm__372, + batch_norm__373, + batch_norm__374, + batch_norm__375, + batch_norm__376, + batch_norm__377, + batch_norm__378, + batch_norm__379, + batch_norm__38, + batch_norm__380, + batch_norm__381, + batch_norm__382, + batch_norm__383, + batch_norm__384, + batch_norm__385, + batch_norm__386, + batch_norm__387, + batch_norm__388, + batch_norm__389, + batch_norm__39, + batch_norm__390, + batch_norm__391, + batch_norm__392, + batch_norm__393, + batch_norm__394, + batch_norm__395, + batch_norm__396, + batch_norm__397, + batch_norm__398, + batch_norm__399, + batch_norm__4, + batch_norm__40, + batch_norm__400, + batch_norm__401, + batch_norm__402, + batch_norm__403, + batch_norm__404, + batch_norm__405, + batch_norm__406, + batch_norm__407, + batch_norm__408, + batch_norm__409, + batch_norm__41, + batch_norm__410, + batch_norm__411, + batch_norm__412, + batch_norm__413, + batch_norm__414, + batch_norm__415, + batch_norm__416, + batch_norm__417, + batch_norm__418, + batch_norm__419, + batch_norm__42, + batch_norm__420, + batch_norm__421, + batch_norm__422, + batch_norm__423, + batch_norm__424, + batch_norm__425, + batch_norm__426, + batch_norm__427, + batch_norm__428, + batch_norm__429, + batch_norm__43, + batch_norm__430, + batch_norm__431, + batch_norm__432, + batch_norm__433, + batch_norm__434, + batch_norm__435, + batch_norm__436, + batch_norm__437, + batch_norm__44, + batch_norm__45, + batch_norm__46, + batch_norm__47, + batch_norm__48, + batch_norm__49, + batch_norm__5, + batch_norm__50, + batch_norm__51, + batch_norm__52, + batch_norm__53, + batch_norm__54, + batch_norm__55, + batch_norm__56, + batch_norm__57, + batch_norm__58, + batch_norm__59, + batch_norm__6, + batch_norm__60, + batch_norm__61, + batch_norm__62, + batch_norm__63, + batch_norm__64, + batch_norm__65, + batch_norm__66, + batch_norm__67, + batch_norm__68, + batch_norm__69, + batch_norm__7, + batch_norm__70, + batch_norm__71, + batch_norm__72, + batch_norm__73, + batch_norm__74, + batch_norm__75, + batch_norm__76, + batch_norm__77, + batch_norm__78, + batch_norm__79, + batch_norm__8, + batch_norm__80, + batch_norm__81, + batch_norm__82, + batch_norm__83, + batch_norm__84, + batch_norm__85, + batch_norm__86, + batch_norm__87, + batch_norm__88, + batch_norm__89, + batch_norm__9, + batch_norm__90, + batch_norm__91, + batch_norm__92, + batch_norm__93, + batch_norm__94, + batch_norm__95, + batch_norm__96, + batch_norm__97, + batch_norm__98, + batch_norm__99, + concat_0, + concat_1, + concat_2, + concat_3, + conv2d_0, + conv2d_1, + conv2d_10, + conv2d_11, + conv2d_12, + conv2d_13, + conv2d_14, + conv2d_15, + conv2d_16, + conv2d_17, + conv2d_18, + conv2d_19, + conv2d_2, + conv2d_20, + conv2d_21, + conv2d_22, + conv2d_23, + conv2d_24, + conv2d_25, + conv2d_26, + conv2d_27, + conv2d_28, + conv2d_29, + conv2d_3, + conv2d_30, + conv2d_31, + conv2d_32, + conv2d_33, + conv2d_34, + conv2d_35, + conv2d_36, + conv2d_37, + conv2d_38, + conv2d_39, + conv2d_4, + conv2d_40, + conv2d_41, + conv2d_42, + conv2d_43, + conv2d_44, + conv2d_45, + conv2d_46, + conv2d_47, + conv2d_48, + conv2d_49, + conv2d_5, + conv2d_50, + conv2d_51, + conv2d_52, + conv2d_53, + conv2d_54, + conv2d_55, + conv2d_56, + conv2d_57, + conv2d_58, + conv2d_59, + conv2d_6, + conv2d_60, + conv2d_61, + conv2d_62, + conv2d_63, + conv2d_64, + conv2d_65, + conv2d_66, + conv2d_67, + conv2d_68, + conv2d_69, + conv2d_7, + conv2d_70, + conv2d_71, + conv2d_72, + conv2d_73, + conv2d_74, + conv2d_75, + conv2d_76, + conv2d_8, + conv2d_9, + full_0, + full_int_array_0, + hardsigmoid_0, + hardsigmoid_1, + hardsigmoid_2, + hardsigmoid_3, + mean_0, + mean_1, + mean_2, + mean_3, + multiply_0, + multiply_1, + multiply_10, + multiply_11, + multiply_12, + multiply_13, + multiply_14, + multiply_15, + multiply_16, + multiply_17, + multiply_18, + multiply_19, + multiply_2, + multiply_20, + multiply_21, + multiply_3, + multiply_4, + multiply_5, + multiply_6, + multiply_7, + multiply_8, + multiply_9, + reshape_0, + reshape_1, + reshape_2, + reshape_3, + swish_1, + swish_10, + swish_11, + swish_12, + swish_13, + swish_14, + swish_15, + swish_16, + swish_17, + swish_18, + swish_19, + swish_2, + swish_20, + swish_21, + swish_22, + swish_23, + swish_24, + swish_25, + swish_26, + swish_27, + swish_28, + swish_29, + swish_3, + swish_30, + swish_31, + swish_32, + swish_33, + swish_34, + swish_35, + swish_36, + swish_37, + swish_38, + swish_39, + swish_4, + swish_40, + swish_41, + swish_42, + swish_43, + swish_44, + swish_45, + swish_46, + swish_47, + swish_48, + swish_49, + swish_5, + swish_50, + swish_51, + swish_52, + swish_53, + swish_54, + swish_6, + swish_7, + swish_8, + swish_9, + ) + + return swish_0 diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/subgraph_4/weight_meta.py b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/subgraph_4/weight_meta.py new file mode 100644 index 000000000..f5b3eb50d --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/subgraph_4/weight_meta.py @@ -0,0 +1,3989 @@ +class Program_weight_tensor_parameter_0: + name = "parameter_0" + shape = [1024] + dtype = "float32" + min_val = float("-3.7594") + max_val = float("-0.734446") + mean = float("-2.18722") + std = float("0.428724") + data = None + + +class Program_weight_tensor_parameter_1: + name = "parameter_1" + shape = [1024] + dtype = "float32" + min_val = float("1.61913") + max_val = float("4.44136") + mean = float("3.08039") + std = float("0.25425") + data = None + + +class Program_weight_tensor_parameter_2: + name = "parameter_2" + shape = [1024] + dtype = "float32" + min_val = float("0.00437889") + max_val = float("0.0223173") + mean = float("0.00872826") + std = float("0.00173091") + data = None + + +class Program_weight_tensor_parameter_3: + name = "parameter_3" + shape = [1024] + dtype = "float32" + min_val = float("-0.140825") + max_val = float("0.123559") + mean = float("-0.0558655") + std = float("0.0304319") + data = None + + +class Program_weight_tensor_parameter_4: + name = "parameter_4" + shape = [1024, 768, 1, 1] + dtype = "float32" + min_val = float("-0.0427729") + max_val = float("0.0695573") + mean = float("-0.000391863") + std = float("0.00403905") + data = None + + +class Program_weight_tensor_parameter_5: + name = "parameter_5" + shape = [768] + dtype = "float32" + min_val = float("-0.014467") + max_val = float("0.00131875") + mean = float("-0.000761015") + std = float("0.00204153") + data = None + + +class Program_weight_tensor_parameter_6: + name = "parameter_6" + shape = [768, 768, 1, 1] + dtype = "float32" + min_val = float("-0.0787519") + max_val = float("0.135878") + mean = float("-0.000282851") + std = float("0.0016268") + data = None + + +class Program_weight_tensor_parameter_7: + name = "parameter_7" + shape = [384] + dtype = "float32" + min_val = float("-1.77402") + max_val = float("0.318654") + mean = float("-0.310798") + std = float("0.291236") + data = None + + +class Program_weight_tensor_parameter_8: + name = "parameter_8" + shape = [384] + dtype = "float32" + min_val = float("0.188523") + max_val = float("1.82125") + mean = float("0.609641") + std = float("0.262607") + data = None + + +class Program_weight_tensor_parameter_9: + name = "parameter_9" + shape = [384] + dtype = "float32" + min_val = float("5.23505e-05") + max_val = float("0.00107224") + mean = float("0.000231574") + std = float("0.000114041") + data = None + + +class Program_weight_tensor_parameter_10: + name = "parameter_10" + shape = [384] + dtype = "float32" + min_val = float("-0.0913202") + max_val = float("0.0738998") + mean = float("0.0208982") + std = float("0.0171143") + data = None + + +class Program_weight_tensor_parameter_11: + name = "parameter_11" + shape = [384, 384, 1, 1] + dtype = "float32" + min_val = float("-0.020214") + max_val = float("0.0255239") + mean = float("-0.000361046") + std = float("0.00271802") + data = None + + +class Program_weight_tensor_parameter_12: + name = "parameter_12" + shape = [384] + dtype = "float32" + min_val = float("-1.77402") + max_val = float("0.318949") + mean = float("-0.310739") + std = float("0.291254") + data = None + + +class Program_weight_tensor_parameter_13: + name = "parameter_13" + shape = [384] + dtype = "float32" + min_val = float("0.334653") + max_val = float("2.60511") + mean = float("1.02603") + std = float("0.290253") + data = None + + +class Program_weight_tensor_parameter_14: + name = "parameter_14" + shape = [384] + dtype = "float32" + min_val = float("0.000593004") + max_val = float("0.00596735") + mean = float("0.00204404") + std = float("0.00073718") + data = None + + +class Program_weight_tensor_parameter_15: + name = "parameter_15" + shape = [384] + dtype = "float32" + min_val = float("-0.227759") + max_val = float("0.112191") + mean = float("0.0216656") + std = float("0.0367499") + data = None + + +class Program_weight_tensor_parameter_16: + name = "parameter_16" + shape = [384, 384, 3, 3] + dtype = "float32" + min_val = float("-0.0190584") + max_val = float("0.0259183") + mean = float("-4.76047e-05") + std = float("0.0017617") + data = None + + +class Program_weight_tensor_parameter_17: + name = "parameter_17" + shape = [384] + dtype = "float32" + min_val = float("-2.58225") + max_val = float("0.0329867") + mean = float("-1.56843") + std = float("0.415962") + data = None + + +class Program_weight_tensor_parameter_18: + name = "parameter_18" + shape = [384] + dtype = "float32" + min_val = float("0.52002") + max_val = float("1.64429") + mean = float("1.13566") + std = float("0.149475") + data = None + + +class Program_weight_tensor_parameter_19: + name = "parameter_19" + shape = [384] + dtype = "float32" + min_val = float("0.0404637") + max_val = float("0.23") + mean = float("0.0856589") + std = float("0.0233439") + data = None + + +class Program_weight_tensor_parameter_20: + name = "parameter_20" + shape = [384] + dtype = "float32" + min_val = float("-0.9033") + max_val = float("0.385661") + mean = float("-0.25773") + std = float("0.123402") + data = None + + +class Program_weight_tensor_parameter_21: + name = "parameter_21" + shape = [384, 384, 3, 3] + dtype = "float32" + min_val = float("-0.0213207") + max_val = float("0.0602371") + mean = float("-0.000201951") + std = float("0.00231308") + data = None + + +class Program_weight_tensor_parameter_22: + name = "parameter_22" + shape = [384] + dtype = "float32" + min_val = float("-1.93927") + max_val = float("0.644474") + mean = float("-0.574884") + std = float("0.358671") + data = None + + +class Program_weight_tensor_parameter_23: + name = "parameter_23" + shape = [384] + dtype = "float32" + min_val = float("0.163873") + max_val = float("2.06585") + mean = float("0.562027") + std = float("0.227242") + data = None + + +class Program_weight_tensor_parameter_24: + name = "parameter_24" + shape = [384] + dtype = "float32" + min_val = float("7.65946e-05") + max_val = float("0.00146603") + mean = float("0.000260801") + std = float("0.000127115") + data = None + + +class Program_weight_tensor_parameter_25: + name = "parameter_25" + shape = [384] + dtype = "float32" + min_val = float("-0.047045") + max_val = float("0.0685866") + mean = float("0.0209886") + std = float("0.0147269") + data = None + + +class Program_weight_tensor_parameter_26: + name = "parameter_26" + shape = [384, 384, 1, 1] + dtype = "float32" + min_val = float("-0.0246209") + max_val = float("0.0323191") + mean = float("-0.00038074") + std = float("0.00249603") + data = None + + +class Program_weight_tensor_parameter_27: + name = "parameter_27" + shape = [384] + dtype = "float32" + min_val = float("-1.93932") + max_val = float("0.645257") + mean = float("-0.574812") + std = float("0.358742") + data = None + + +class Program_weight_tensor_parameter_28: + name = "parameter_28" + shape = [384] + dtype = "float32" + min_val = float("0.58315") + max_val = float("2.15642") + mean = float("1.08405") + std = float("0.255745") + data = None + + +class Program_weight_tensor_parameter_29: + name = "parameter_29" + shape = [384] + dtype = "float32" + min_val = float("0.0013599") + max_val = float("0.00896475") + mean = float("0.00289759") + std = float("0.000860853") + data = None + + +class Program_weight_tensor_parameter_30: + name = "parameter_30" + shape = [384] + dtype = "float32" + min_val = float("-0.0821017") + max_val = float("0.146645") + mean = float("0.0336192") + std = float("0.0396185") + data = None + + +class Program_weight_tensor_parameter_31: + name = "parameter_31" + shape = [384, 384, 3, 3] + dtype = "float32" + min_val = float("-0.017236") + max_val = float("0.0310435") + mean = float("-8.47071e-05") + std = float("0.00189556") + data = None + + +class Program_weight_tensor_parameter_32: + name = "parameter_32" + shape = [384] + dtype = "float32" + min_val = float("-2.39591") + max_val = float("0.845752") + mean = float("-1.40539") + std = float("0.360596") + data = None + + +class Program_weight_tensor_parameter_33: + name = "parameter_33" + shape = [384] + dtype = "float32" + min_val = float("0.453112") + max_val = float("1.91948") + mean = float("1.16636") + std = float("0.14802") + data = None + + +class Program_weight_tensor_parameter_34: + name = "parameter_34" + shape = [384] + dtype = "float32" + min_val = float("0.0300933") + max_val = float("0.138775") + mean = float("0.0607843") + std = float("0.0156745") + data = None + + +class Program_weight_tensor_parameter_35: + name = "parameter_35" + shape = [384] + dtype = "float32" + min_val = float("-0.749117") + max_val = float("0.836662") + mean = float("-0.184023") + std = float("0.110734") + data = None + + +class Program_weight_tensor_parameter_36: + name = "parameter_36" + shape = [384, 384, 3, 3] + dtype = "float32" + min_val = float("-0.0259567") + max_val = float("0.0450409") + mean = float("-0.000200361") + std = float("0.00234146") + data = None + + +class Program_weight_tensor_parameter_37: + name = "parameter_37" + shape = [384] + dtype = "float32" + min_val = float("-1.8762") + max_val = float("0.453243") + mean = float("-0.485339") + std = float("0.376467") + data = None + + +class Program_weight_tensor_parameter_38: + name = "parameter_38" + shape = [384] + dtype = "float32" + min_val = float("0.0773354") + max_val = float("2.11925") + mean = float("0.441956") + std = float("0.217663") + data = None + + +class Program_weight_tensor_parameter_39: + name = "parameter_39" + shape = [384] + dtype = "float32" + min_val = float("6.01092e-05") + max_val = float("0.00133181") + mean = float("0.000306226") + std = float("0.000149439") + data = None + + +class Program_weight_tensor_parameter_40: + name = "parameter_40" + shape = [384] + dtype = "float32" + min_val = float("-0.0475575") + max_val = float("0.071738") + mean = float("0.0252212") + std = float("0.0164931") + data = None + + +class Program_weight_tensor_parameter_41: + name = "parameter_41" + shape = [384, 384, 1, 1] + dtype = "float32" + min_val = float("-0.0207296") + max_val = float("0.0301957") + mean = float("-0.000479918") + std = float("0.0021441") + data = None + + +class Program_weight_tensor_parameter_42: + name = "parameter_42" + shape = [384] + dtype = "float32" + min_val = float("-1.87654") + max_val = float("0.453653") + mean = float("-0.485263") + std = float("0.376563") + data = None + + +class Program_weight_tensor_parameter_43: + name = "parameter_43" + shape = [384] + dtype = "float32" + min_val = float("0.521871") + max_val = float("2.22439") + mean = float("1.05289") + std = float("0.260102") + data = None + + +class Program_weight_tensor_parameter_44: + name = "parameter_44" + shape = [384] + dtype = "float32" + min_val = float("0.00177683") + max_val = float("0.00907934") + mean = float("0.0039468") + std = float("0.00118085") + data = None + + +class Program_weight_tensor_parameter_45: + name = "parameter_45" + shape = [384] + dtype = "float32" + min_val = float("-0.209845") + max_val = float("0.180608") + mean = float("0.0397265") + std = float("0.04484") + data = None + + +class Program_weight_tensor_parameter_46: + name = "parameter_46" + shape = [384, 384, 3, 3] + dtype = "float32" + min_val = float("-0.0177497") + max_val = float("0.036737") + mean = float("-9.16795e-05") + std = float("0.00200706") + data = None + + +class Program_weight_tensor_parameter_47: + name = "parameter_47" + shape = [384] + dtype = "float32" + min_val = float("-2.15635") + max_val = float("0.418177") + mean = float("-1.36712") + std = float("0.277468") + data = None + + +class Program_weight_tensor_parameter_48: + name = "parameter_48" + shape = [384] + dtype = "float32" + min_val = float("0.706134") + max_val = float("1.6357") + mean = float("1.14301") + std = float("0.101583") + data = None + + +class Program_weight_tensor_parameter_49: + name = "parameter_49" + shape = [384] + dtype = "float32" + min_val = float("0.0216198") + max_val = float("0.138164") + mean = float("0.046727") + std = float("0.0129664") + data = None + + +class Program_weight_tensor_parameter_50: + name = "parameter_50" + shape = [384] + dtype = "float32" + min_val = float("-0.694464") + max_val = float("0.208372") + mean = float("-0.129315") + std = float("0.0938846") + data = None + + +class Program_weight_tensor_parameter_51: + name = "parameter_51" + shape = [384, 384, 3, 3] + dtype = "float32" + min_val = float("-0.0274071") + max_val = float("0.0448565") + mean = float("-0.000158418") + std = float("0.00223888") + data = None + + +class Program_weight_tensor_parameter_52: + name = "parameter_52" + shape = [384] + dtype = "float32" + min_val = float("-2.9232") + max_val = float("1.66463") + mean = float("-0.760372") + std = float("0.643546") + data = None + + +class Program_weight_tensor_parameter_53: + name = "parameter_53" + shape = [384] + dtype = "float32" + min_val = float("0.953224") + max_val = float("2.91794") + mean = float("1.86322") + std = float("0.27618") + data = None + + +class Program_weight_tensor_parameter_54: + name = "parameter_54" + shape = [384] + dtype = "float32" + min_val = float("0.00275058") + max_val = float("0.01231") + mean = float("0.00516812") + std = float("0.00130652") + data = None + + +class Program_weight_tensor_parameter_55: + name = "parameter_55" + shape = [384] + dtype = "float32" + min_val = float("-0.249794") + max_val = float("0.145992") + mean = float("0.0635442") + std = float("0.0326689") + data = None + + +class Program_weight_tensor_parameter_56: + name = "parameter_56" + shape = [384, 768, 1, 1] + dtype = "float32" + min_val = float("-0.0371909") + max_val = float("0.0509187") + mean = float("-0.000727671") + std = float("0.00522845") + data = None + + +class Program_weight_tensor_parameter_57: + name = "parameter_57" + shape = [384] + dtype = "float32" + min_val = float("-2.2471") + max_val = float("0.681977") + mean = float("-0.777142") + std = float("0.472903") + data = None + + +class Program_weight_tensor_parameter_58: + name = "parameter_58" + shape = [384] + dtype = "float32" + min_val = float("0.965853") + max_val = float("2.89359") + mean = float("2.09705") + std = float("0.305433") + data = None + + +class Program_weight_tensor_parameter_59: + name = "parameter_59" + shape = [384] + dtype = "float32" + min_val = float("0.000799495") + max_val = float("0.00402168") + mean = float("0.00198534") + std = float("0.0004402") + data = None + + +class Program_weight_tensor_parameter_60: + name = "parameter_60" + shape = [384] + dtype = "float32" + min_val = float("-0.0161372") + max_val = float("0.0799072") + mean = float("0.0349754") + std = float("0.0164707") + data = None + + +class Program_weight_tensor_parameter_61: + name = "parameter_61" + shape = [384, 768, 1, 1] + dtype = "float32" + min_val = float("-0.0815437") + max_val = float("0.0646253") + mean = float("-0.000388202") + std = float("0.00359255") + data = None + + +class Program_weight_tensor_parameter_62: + name = "parameter_62" + shape = [768] + dtype = "float32" + min_val = float("-2.40199") + max_val = float("0.642394") + mean = float("-0.908374") + std = float("0.339302") + data = None + + +class Program_weight_tensor_parameter_63: + name = "parameter_63" + shape = [768] + dtype = "float32" + min_val = float("0.530297") + max_val = float("1.90727") + mean = float("0.919687") + std = float("0.149179") + data = None + + +class Program_weight_tensor_parameter_64: + name = "parameter_64" + shape = [768] + dtype = "float32" + min_val = float("0.00625688") + max_val = float("0.056665") + mean = float("0.0153943") + std = float("0.00459564") + data = None + + +class Program_weight_tensor_parameter_65: + name = "parameter_65" + shape = [768] + dtype = "float32" + min_val = float("-0.235652") + max_val = float("0.254751") + mean = float("0.0393354") + std = float("0.0563281") + data = None + + +class Program_weight_tensor_parameter_66: + name = "parameter_66" + shape = [768, 512, 3, 3] + dtype = "float32" + min_val = float("-0.0378314") + max_val = float("0.0543419") + mean = float("-9.75912e-05") + std = float("0.00233888") + data = None + + +class Program_weight_tensor_parameter_67: + name = "parameter_67" + shape = [512] + dtype = "float32" + min_val = float("-3.38998") + max_val = float("1.66652") + mean = float("-1.16179") + std = float("0.513719") + data = None + + +class Program_weight_tensor_parameter_68: + name = "parameter_68" + shape = [512] + dtype = "float32" + min_val = float("0.523767") + max_val = float("1.67712") + mean = float("1.11122") + std = float("0.148184") + data = None + + +class Program_weight_tensor_parameter_69: + name = "parameter_69" + shape = [512] + dtype = "float32" + min_val = float("0.00233511") + max_val = float("0.0167819") + mean = float("0.00761769") + std = float("0.00204484") + data = None + + +class Program_weight_tensor_parameter_70: + name = "parameter_70" + shape = [512] + dtype = "float32" + min_val = float("-0.172067") + max_val = float("0.0981938") + mean = float("-0.0487285") + std = float("0.0396677") + data = None + + +class Program_weight_tensor_parameter_71: + name = "parameter_71" + shape = [512, 384, 1, 1] + dtype = "float32" + min_val = float("-0.202262") + max_val = float("0.184296") + mean = float("-0.000573477") + std = float("0.00792306") + data = None + + +class Program_weight_tensor_parameter_72: + name = "parameter_72" + shape = [384] + dtype = "float32" + min_val = float("-0.0100703") + max_val = float("0.00138871") + mean = float("-0.00295173") + std = float("0.00227127") + data = None + + +class Program_weight_tensor_parameter_73: + name = "parameter_73" + shape = [384, 384, 1, 1] + dtype = "float32" + min_val = float("-0.202729") + max_val = float("0.140205") + mean = float("-0.002055") + std = float("0.00490701") + data = None + + +class Program_weight_tensor_parameter_74: + name = "parameter_74" + shape = [192] + dtype = "float32" + min_val = float("-1.97045") + max_val = float("0.409864") + mean = float("-0.348766") + std = float("0.333488") + data = None + + +class Program_weight_tensor_parameter_75: + name = "parameter_75" + shape = [192] + dtype = "float32" + min_val = float("0.0528864") + max_val = float("2.15987") + mean = float("0.581255") + std = float("0.419833") + data = None + + +class Program_weight_tensor_parameter_76: + name = "parameter_76" + shape = [192] + dtype = "float32" + min_val = float("8.99309e-05") + max_val = float("0.00136239") + mean = float("0.00045405") + std = float("0.000217854") + data = None + + +class Program_weight_tensor_parameter_77: + name = "parameter_77" + shape = [192] + dtype = "float32" + min_val = float("-0.0345233") + max_val = float("0.0542267") + mean = float("0.00534646") + std = float("0.0149125") + data = None + + +class Program_weight_tensor_parameter_78: + name = "parameter_78" + shape = [192, 192, 1, 1] + dtype = "float32" + min_val = float("-0.023487") + max_val = float("0.0581182") + mean = float("-0.000339748") + std = float("0.0040934") + data = None + + +class Program_weight_tensor_parameter_79: + name = "parameter_79" + shape = [192] + dtype = "float32" + min_val = float("-1.97037") + max_val = float("0.410702") + mean = float("-0.34863") + std = float("0.333546") + data = None + + +class Program_weight_tensor_parameter_80: + name = "parameter_80" + shape = [192] + dtype = "float32" + min_val = float("0.372338") + max_val = float("2.70216") + mean = float("1.20181") + std = float("0.493699") + data = None + + +class Program_weight_tensor_parameter_81: + name = "parameter_81" + shape = [192] + dtype = "float32" + min_val = float("0.00122552") + max_val = float("0.0156617") + mean = float("0.00510239") + std = float("0.00188493") + data = None + + +class Program_weight_tensor_parameter_82: + name = "parameter_82" + shape = [192] + dtype = "float32" + min_val = float("-0.097226") + max_val = float("0.146797") + mean = float("0.0203808") + std = float("0.0428675") + data = None + + +class Program_weight_tensor_parameter_83: + name = "parameter_83" + shape = [192, 192, 3, 3] + dtype = "float32" + min_val = float("-0.0289902") + max_val = float("0.0378296") + mean = float("-0.000154473") + std = float("0.00313532") + data = None + + +class Program_weight_tensor_parameter_84: + name = "parameter_84" + shape = [192] + dtype = "float32" + min_val = float("-2.89065") + max_val = float("-0.176734") + mean = float("-1.31453") + std = float("0.40113") + data = None + + +class Program_weight_tensor_parameter_85: + name = "parameter_85" + shape = [192] + dtype = "float32" + min_val = float("0.696524") + max_val = float("2.09454") + mean = float("1.17918") + std = float("0.169868") + data = None + + +class Program_weight_tensor_parameter_86: + name = "parameter_86" + shape = [192] + dtype = "float32" + min_val = float("0.0626606") + max_val = float("0.335775") + mean = float("0.130864") + std = float("0.0432639") + data = None + + +class Program_weight_tensor_parameter_87: + name = "parameter_87" + shape = [192] + dtype = "float32" + min_val = float("-2.50771") + max_val = float("1.70173") + mean = float("-0.202725") + std = float("0.37842") + data = None + + +class Program_weight_tensor_parameter_88: + name = "parameter_88" + shape = [192, 192, 3, 3] + dtype = "float32" + min_val = float("-0.0331927") + max_val = float("0.0456383") + mean = float("-0.000188198") + std = float("0.00374306") + data = None + + +class Program_weight_tensor_parameter_89: + name = "parameter_89" + shape = [192] + dtype = "float32" + min_val = float("-1.9404") + max_val = float("0.513024") + mean = float("-0.279434") + std = float("0.321452") + data = None + + +class Program_weight_tensor_parameter_90: + name = "parameter_90" + shape = [192] + dtype = "float32" + min_val = float("0.0454025") + max_val = float("1.77027") + mean = float("0.444331") + std = float("0.305722") + data = None + + +class Program_weight_tensor_parameter_91: + name = "parameter_91" + shape = [192] + dtype = "float32" + min_val = float("7.44986e-05") + max_val = float("0.00137158") + mean = float("0.000401239") + std = float("0.000216438") + data = None + + +class Program_weight_tensor_parameter_92: + name = "parameter_92" + shape = [192] + dtype = "float32" + min_val = float("-0.029272") + max_val = float("0.0469656") + mean = float("0.00801306") + std = float("0.0116412") + data = None + + +class Program_weight_tensor_parameter_93: + name = "parameter_93" + shape = [192, 192, 1, 1] + dtype = "float32" + min_val = float("-0.0234926") + max_val = float("0.036738") + mean = float("-0.000377237") + std = float("0.00377417") + data = None + + +class Program_weight_tensor_parameter_94: + name = "parameter_94" + shape = [192] + dtype = "float32" + min_val = float("-1.94044") + max_val = float("0.51462") + mean = float("-0.279235") + std = float("0.321666") + data = None + + +class Program_weight_tensor_parameter_95: + name = "parameter_95" + shape = [192] + dtype = "float32" + min_val = float("0.483074") + max_val = float("2.27001") + mean = float("1.13833") + std = float("0.37563") + data = None + + +class Program_weight_tensor_parameter_96: + name = "parameter_96" + shape = [192] + dtype = "float32" + min_val = float("0.00270072") + max_val = float("0.014292") + mean = float("0.00597179") + std = float("0.00181526") + data = None + + +class Program_weight_tensor_parameter_97: + name = "parameter_97" + shape = [192] + dtype = "float32" + min_val = float("-0.0923253") + max_val = float("0.111642") + mean = float("0.0327645") + std = float("0.0355125") + data = None + + +class Program_weight_tensor_parameter_98: + name = "parameter_98" + shape = [192, 192, 3, 3] + dtype = "float32" + min_val = float("-0.0231072") + max_val = float("0.038718") + mean = float("-0.000192078") + std = float("0.00338604") + data = None + + +class Program_weight_tensor_parameter_99: + name = "parameter_99" + shape = [192] + dtype = "float32" + min_val = float("-2.50828") + max_val = float("-0.123237") + mean = float("-1.28886") + std = float("0.44374") + data = None + + +class Program_weight_tensor_parameter_100: + name = "parameter_100" + shape = [192] + dtype = "float32" + min_val = float("0.65494") + max_val = float("1.66968") + mean = float("1.19938") + std = float("0.166128") + data = None + + +class Program_weight_tensor_parameter_101: + name = "parameter_101" + shape = [192] + dtype = "float32" + min_val = float("0.0463808") + max_val = float("0.199074") + mean = float("0.0939914") + std = float("0.0271592") + data = None + + +class Program_weight_tensor_parameter_102: + name = "parameter_102" + shape = [192] + dtype = "float32" + min_val = float("-2.14238") + max_val = float("0.410379") + mean = float("-0.110821") + std = float("0.246177") + data = None + + +class Program_weight_tensor_parameter_103: + name = "parameter_103" + shape = [192, 192, 3, 3] + dtype = "float32" + min_val = float("-0.0362254") + max_val = float("0.0508084") + mean = float("-0.000238085") + std = float("0.00389331") + data = None + + +class Program_weight_tensor_parameter_104: + name = "parameter_104" + shape = [192] + dtype = "float32" + min_val = float("-1.7573") + max_val = float("0.468575") + mean = float("-0.262432") + std = float("0.335818") + data = None + + +class Program_weight_tensor_parameter_105: + name = "parameter_105" + shape = [192] + dtype = "float32" + min_val = float("0.00295124") + max_val = float("1.67875") + mean = float("0.351961") + std = float("0.251699") + data = None + + +class Program_weight_tensor_parameter_106: + name = "parameter_106" + shape = [192] + dtype = "float32" + min_val = float("9.30792e-07") + max_val = float("0.00191072") + mean = float("0.000361058") + std = float("0.000248966") + data = None + + +class Program_weight_tensor_parameter_107: + name = "parameter_107" + shape = [192] + dtype = "float32" + min_val = float("-0.0372993") + max_val = float("0.0527515") + mean = float("0.0101454") + std = float("0.0121623") + data = None + + +class Program_weight_tensor_parameter_108: + name = "parameter_108" + shape = [192, 192, 1, 1] + dtype = "float32" + min_val = float("-0.0303466") + max_val = float("0.0356195") + mean = float("-0.000425557") + std = float("0.0036432") + data = None + + +class Program_weight_tensor_parameter_109: + name = "parameter_109" + shape = [192] + dtype = "float32" + min_val = float("-1.7573") + max_val = float("0.470016") + mean = float("-0.262262") + std = float("0.336041") + data = None + + +class Program_weight_tensor_parameter_110: + name = "parameter_110" + shape = [192] + dtype = "float32" + min_val = float("0.406102") + max_val = float("1.97794") + mean = float("1.06588") + std = float("0.334156") + data = None + + +class Program_weight_tensor_parameter_111: + name = "parameter_111" + shape = [192] + dtype = "float32" + min_val = float("0.00267891") + max_val = float("0.013042") + mean = float("0.00612078") + std = float("0.00178677") + data = None + + +class Program_weight_tensor_parameter_112: + name = "parameter_112" + shape = [192] + dtype = "float32" + min_val = float("-0.0635846") + max_val = float("0.115227") + mean = float("0.0353282") + std = float("0.0320147") + data = None + + +class Program_weight_tensor_parameter_113: + name = "parameter_113" + shape = [192, 192, 3, 3] + dtype = "float32" + min_val = float("-0.0321474") + max_val = float("0.0388371") + mean = float("-0.000190596") + std = float("0.00354187") + data = None + + +class Program_weight_tensor_parameter_114: + name = "parameter_114" + shape = [192] + dtype = "float32" + min_val = float("-2.49735") + max_val = float("0.137985") + mean = float("-1.24334") + std = float("0.424316") + data = None + + +class Program_weight_tensor_parameter_115: + name = "parameter_115" + shape = [192] + dtype = "float32" + min_val = float("0.652126") + max_val = float("1.80991") + mean = float("1.16717") + std = float("0.165409") + data = None + + +class Program_weight_tensor_parameter_116: + name = "parameter_116" + shape = [192] + dtype = "float32" + min_val = float("0.0307206") + max_val = float("0.141566") + mean = float("0.0673632") + std = float("0.0172678") + data = None + + +class Program_weight_tensor_parameter_117: + name = "parameter_117" + shape = [192] + dtype = "float32" + min_val = float("-1.51549") + max_val = float("0.284447") + mean = float("-0.0982023") + std = float("0.179232") + data = None + + +class Program_weight_tensor_parameter_118: + name = "parameter_118" + shape = [192, 192, 3, 3] + dtype = "float32" + min_val = float("-0.05013") + max_val = float("0.0656662") + mean = float("-0.000261502") + std = float("0.00399974") + data = None + + +class Program_weight_tensor_parameter_119: + name = "parameter_119" + shape = [192] + dtype = "float32" + min_val = float("-2.07916") + max_val = float("0.533363") + mean = float("-0.272351") + std = float("0.375289") + data = None + + +class Program_weight_tensor_parameter_120: + name = "parameter_120" + shape = [192] + dtype = "float32" + min_val = float("0.000510371") + max_val = float("0.732354") + mean = float("0.211968") + std = float("0.136272") + data = None + + +class Program_weight_tensor_parameter_121: + name = "parameter_121" + shape = [192] + dtype = "float32" + min_val = float("6.27846e-08") + max_val = float("0.0007887") + mean = float("0.000243037") + std = float("0.000135288") + data = None + + +class Program_weight_tensor_parameter_122: + name = "parameter_122" + shape = [192] + dtype = "float32" + min_val = float("-0.0197245") + max_val = float("0.0315593") + mean = float("0.00618711") + std = float("0.00922289") + data = None + + +class Program_weight_tensor_parameter_123: + name = "parameter_123" + shape = [192, 192, 1, 1] + dtype = "float32" + min_val = float("-0.0202783") + max_val = float("0.036136") + mean = float("-0.000265605") + std = float("0.00319736") + data = None + + +class Program_weight_tensor_parameter_124: + name = "parameter_124" + shape = [192] + dtype = "float32" + min_val = float("-2.07922") + max_val = float("0.535166") + mean = float("-0.272236") + std = float("0.375502") + data = None + + +class Program_weight_tensor_parameter_125: + name = "parameter_125" + shape = [192] + dtype = "float32" + min_val = float("0.396505") + max_val = float("1.96272") + mean = float("0.958924") + std = float("0.303858") + data = None + + +class Program_weight_tensor_parameter_126: + name = "parameter_126" + shape = [192] + dtype = "float32" + min_val = float("0.00316561") + max_val = float("0.0147681") + mean = float("0.00641687") + std = float("0.00196185") + data = None + + +class Program_weight_tensor_parameter_127: + name = "parameter_127" + shape = [192] + dtype = "float32" + min_val = float("-0.0910185") + max_val = float("0.161293") + mean = float("0.0384701") + std = float("0.0343716") + data = None + + +class Program_weight_tensor_parameter_128: + name = "parameter_128" + shape = [192, 192, 3, 3] + dtype = "float32" + min_val = float("-0.0299549") + max_val = float("0.0371106") + mean = float("-0.000205046") + std = float("0.00364104") + data = None + + +class Program_weight_tensor_parameter_129: + name = "parameter_129" + shape = [192] + dtype = "float32" + min_val = float("-2.74084") + max_val = float("-0.0810353") + mean = float("-1.23693") + std = float("0.434057") + data = None + + +class Program_weight_tensor_parameter_130: + name = "parameter_130" + shape = [192] + dtype = "float32" + min_val = float("0.761623") + max_val = float("1.62105") + mean = float("1.15096") + std = float("0.142541") + data = None + + +class Program_weight_tensor_parameter_131: + name = "parameter_131" + shape = [192] + dtype = "float32" + min_val = float("0.0268285") + max_val = float("0.102609") + mean = float("0.0483826") + std = float("0.0115209") + data = None + + +class Program_weight_tensor_parameter_132: + name = "parameter_132" + shape = [192] + dtype = "float32" + min_val = float("-1.23693") + max_val = float("0.284811") + mean = float("-0.0747383") + std = float("0.163901") + data = None + + +class Program_weight_tensor_parameter_133: + name = "parameter_133" + shape = [192, 192, 3, 3] + dtype = "float32" + min_val = float("-0.0531238") + max_val = float("0.0579085") + mean = float("-0.000268921") + std = float("0.00396934") + data = None + + +class Program_weight_tensor_parameter_134: + name = "parameter_134" + shape = [192] + dtype = "float32" + min_val = float("-1.21219") + max_val = float("0.446681") + mean = float("-0.232278") + std = float("0.339349") + data = None + + +class Program_weight_tensor_parameter_135: + name = "parameter_135" + shape = [192] + dtype = "float32" + min_val = float("-9.82711e-05") + max_val = float("0.677789") + mean = float("0.192032") + std = float("0.120727") + data = None + + +class Program_weight_tensor_parameter_136: + name = "parameter_136" + shape = [192] + dtype = "float32" + min_val = float("2.25073e-10") + max_val = float("0.000874414") + mean = float("0.000240801") + std = float("0.000144458") + data = None + + +class Program_weight_tensor_parameter_137: + name = "parameter_137" + shape = [192] + dtype = "float32" + min_val = float("-0.0493103") + max_val = float("0.0373767") + mean = float("0.00675281") + std = float("0.0116645") + data = None + + +class Program_weight_tensor_parameter_138: + name = "parameter_138" + shape = [192, 192, 1, 1] + dtype = "float32" + min_val = float("-0.0342199") + max_val = float("0.0396943") + mean = float("-0.000272099") + std = float("0.00329482") + data = None + + +class Program_weight_tensor_parameter_139: + name = "parameter_139" + shape = [192] + dtype = "float32" + min_val = float("-1.21223") + max_val = float("0.447751") + mean = float("-0.232181") + std = float("0.33961") + data = None + + +class Program_weight_tensor_parameter_140: + name = "parameter_140" + shape = [192] + dtype = "float32" + min_val = float("0.382831") + max_val = float("1.56386") + mean = float("0.852099") + std = float("0.259991") + data = None + + +class Program_weight_tensor_parameter_141: + name = "parameter_141" + shape = [192] + dtype = "float32" + min_val = float("0.00221295") + max_val = float("0.013395") + mean = float("0.00625423") + std = float("0.00179181") + data = None + + +class Program_weight_tensor_parameter_142: + name = "parameter_142" + shape = [192] + dtype = "float32" + min_val = float("-0.0844719") + max_val = float("0.142009") + mean = float("0.0387308") + std = float("0.0378044") + data = None + + +class Program_weight_tensor_parameter_143: + name = "parameter_143" + shape = [192, 192, 3, 3] + dtype = "float32" + min_val = float("-0.0323048") + max_val = float("0.0364338") + mean = float("-0.000186547") + std = float("0.00363857") + data = None + + +class Program_weight_tensor_parameter_144: + name = "parameter_144" + shape = [192] + dtype = "float32" + min_val = float("-2.48701") + max_val = float("-0.131293") + mean = float("-1.25014") + std = float("0.418255") + data = None + + +class Program_weight_tensor_parameter_145: + name = "parameter_145" + shape = [192] + dtype = "float32" + min_val = float("0.689678") + max_val = float("1.5199") + mean = float("1.12491") + std = float("0.13482") + data = None + + +class Program_weight_tensor_parameter_146: + name = "parameter_146" + shape = [192] + dtype = "float32" + min_val = float("0.0183928") + max_val = float("0.0607598") + mean = float("0.0349167") + std = float("0.00875797") + data = None + + +class Program_weight_tensor_parameter_147: + name = "parameter_147" + shape = [192] + dtype = "float32" + min_val = float("-0.716469") + max_val = float("0.320455") + mean = float("-0.0746187") + std = float("0.131028") + data = None + + +class Program_weight_tensor_parameter_148: + name = "parameter_148" + shape = [192, 192, 3, 3] + dtype = "float32" + min_val = float("-0.0610342") + max_val = float("0.0592016") + mean = float("-0.000277763") + std = float("0.00397261") + data = None + + +class Program_weight_tensor_parameter_149: + name = "parameter_149" + shape = [192] + dtype = "float32" + min_val = float("-1.21753") + max_val = float("0.499396") + mean = float("-0.167678") + std = float("0.2936") + data = None + + +class Program_weight_tensor_parameter_150: + name = "parameter_150" + shape = [192] + dtype = "float32" + min_val = float("0.00836385") + max_val = float("1.53625") + mean = float("0.238111") + std = float("0.211728") + data = None + + +class Program_weight_tensor_parameter_151: + name = "parameter_151" + shape = [192] + dtype = "float32" + min_val = float("1.96593e-05") + max_val = float("0.00679754") + mean = float("0.000504925") + std = float("0.000658808") + data = None + + +class Program_weight_tensor_parameter_152: + name = "parameter_152" + shape = [192] + dtype = "float32" + min_val = float("-0.0656068") + max_val = float("0.0861781") + mean = float("0.00950526") + std = float("0.0164034") + data = None + + +class Program_weight_tensor_parameter_153: + name = "parameter_153" + shape = [192, 192, 1, 1] + dtype = "float32" + min_val = float("-0.0600528") + max_val = float("0.0312537") + mean = float("-0.000425532") + std = float("0.00397123") + data = None + + +class Program_weight_tensor_parameter_154: + name = "parameter_154" + shape = [192] + dtype = "float32" + min_val = float("-1.21747") + max_val = float("0.500448") + mean = float("-0.167516") + std = float("0.293818") + data = None + + +class Program_weight_tensor_parameter_155: + name = "parameter_155" + shape = [192] + dtype = "float32" + min_val = float("0.354999") + max_val = float("1.44989") + mean = float("0.756941") + std = float("0.21662") + data = None + + +class Program_weight_tensor_parameter_156: + name = "parameter_156" + shape = [192] + dtype = "float32" + min_val = float("0.00436972") + max_val = float("0.0168976") + mean = float("0.00908178") + std = float("0.00268591") + data = None + + +class Program_weight_tensor_parameter_157: + name = "parameter_157" + shape = [192] + dtype = "float32" + min_val = float("-0.15942") + max_val = float("0.153659") + mean = float("0.0492924") + std = float("0.0450903") + data = None + + +class Program_weight_tensor_parameter_158: + name = "parameter_158" + shape = [192, 192, 3, 3] + dtype = "float32" + min_val = float("-0.062497") + max_val = float("0.0530577") + mean = float("-0.000241352") + std = float("0.00357809") + data = None + + +class Program_weight_tensor_parameter_159: + name = "parameter_159" + shape = [192] + dtype = "float32" + min_val = float("-1.87905") + max_val = float("-0.211382") + mean = float("-1.14643") + std = float("0.325653") + data = None + + +class Program_weight_tensor_parameter_160: + name = "parameter_160" + shape = [192] + dtype = "float32" + min_val = float("0.788784") + max_val = float("1.59753") + mean = float("1.12152") + std = float("0.12987") + data = None + + +class Program_weight_tensor_parameter_161: + name = "parameter_161" + shape = [192] + dtype = "float32" + min_val = float("0.0156512") + max_val = float("0.0757502") + mean = float("0.0313837") + std = float("0.00924696") + data = None + + +class Program_weight_tensor_parameter_162: + name = "parameter_162" + shape = [192] + dtype = "float32" + min_val = float("-0.689275") + max_val = float("0.284905") + mean = float("-0.0666799") + std = float("0.130681") + data = None + + +class Program_weight_tensor_parameter_163: + name = "parameter_163" + shape = [192, 192, 3, 3] + dtype = "float32" + min_val = float("-0.062874") + max_val = float("0.076648") + mean = float("-0.000213471") + std = float("0.00383126") + data = None + + +class Program_weight_tensor_parameter_164: + name = "parameter_164" + shape = [192] + dtype = "float32" + min_val = float("-2.86217") + max_val = float("1.58057") + mean = float("-0.0275412") + std = float("0.747651") + data = None + + +class Program_weight_tensor_parameter_165: + name = "parameter_165" + shape = [192] + dtype = "float32" + min_val = float("0.487672") + max_val = float("2.0776") + mean = float("0.90163") + std = float("0.232007") + data = None + + +class Program_weight_tensor_parameter_166: + name = "parameter_166" + shape = [192] + dtype = "float32" + min_val = float("0.00975444") + max_val = float("0.0591661") + mean = float("0.0230902") + std = float("0.00900034") + data = None + + +class Program_weight_tensor_parameter_167: + name = "parameter_167" + shape = [192] + dtype = "float32" + min_val = float("-0.230409") + max_val = float("0.297734") + mean = float("-0.0377667") + std = float("0.0596951") + data = None + + +class Program_weight_tensor_parameter_168: + name = "parameter_168" + shape = [192, 384, 1, 1] + dtype = "float32" + min_val = float("-0.108831") + max_val = float("0.0931739") + mean = float("-0.000512323") + std = float("0.00842399") + data = None + + +class Program_weight_tensor_parameter_169: + name = "parameter_169" + shape = [192] + dtype = "float32" + min_val = float("-2.96764") + max_val = float("1.66844") + mean = float("0.0968476") + std = float("0.663233") + data = None + + +class Program_weight_tensor_parameter_170: + name = "parameter_170" + shape = [192] + dtype = "float32" + min_val = float("0.830791") + max_val = float("5.55835") + mean = float("1.91342") + std = float("0.933379") + data = None + + +class Program_weight_tensor_parameter_171: + name = "parameter_171" + shape = [192] + dtype = "float32" + min_val = float("0.00614721") + max_val = float("0.0464744") + mean = float("0.0174745") + std = float("0.00563515") + data = None + + +class Program_weight_tensor_parameter_172: + name = "parameter_172" + shape = [192] + dtype = "float32" + min_val = float("-0.133354") + max_val = float("0.157826") + mean = float("-0.0239396") + std = float("0.0565686") + data = None + + +class Program_weight_tensor_parameter_173: + name = "parameter_173" + shape = [192, 384, 1, 1] + dtype = "float32" + min_val = float("-0.0985625") + max_val = float("0.0941202") + mean = float("-0.000511784") + std = float("0.00783691") + data = None + + +class Program_weight_tensor_parameter_174: + name = "parameter_174" + shape = [384] + dtype = "float32" + min_val = float("-2.92359") + max_val = float("1.32666") + mean = float("-0.301116") + std = float("0.563662") + data = None + + +class Program_weight_tensor_parameter_175: + name = "parameter_175" + shape = [384] + dtype = "float32" + min_val = float("0.631853") + max_val = float("2.47541") + mean = float("1.15998") + std = float("0.257348") + data = None + + +class Program_weight_tensor_parameter_176: + name = "parameter_176" + shape = [384] + dtype = "float32" + min_val = float("0.0104507") + max_val = float("0.111288") + mean = float("0.0262303") + std = float("0.0126363") + data = None + + +class Program_weight_tensor_parameter_177: + name = "parameter_177" + shape = [384] + dtype = "float32" + min_val = float("-0.269997") + max_val = float("0.244719") + mean = float("0.0226896") + std = float("0.0692565") + data = None + + +class Program_weight_tensor_parameter_178: + name = "parameter_178" + shape = [384, 256, 3, 3] + dtype = "float32" + min_val = float("-0.0753194") + max_val = float("0.0720032") + mean = float("-0.000103466") + std = float("0.00421781") + data = None + + +class Program_weight_tensor_parameter_179: + name = "parameter_179" + shape = [256] + dtype = "float32" + min_val = float("-2.04502") + max_val = float("1.28816") + mean = float("-0.924614") + std = float("0.543015") + data = None + + +class Program_weight_tensor_parameter_180: + name = "parameter_180" + shape = [256] + dtype = "float32" + min_val = float("0.517239") + max_val = float("1.68961") + mean = float("1.05432") + std = float("0.176149") + data = None + + +class Program_weight_tensor_parameter_181: + name = "parameter_181" + shape = [256] + dtype = "float32" + min_val = float("0.00195657") + max_val = float("0.0265577") + mean = float("0.00628818") + std = float("0.00298858") + data = None + + +class Program_weight_tensor_parameter_182: + name = "parameter_182" + shape = [256] + dtype = "float32" + min_val = float("-0.230372") + max_val = float("0.154861") + mean = float("-0.0517653") + std = float("0.0687788") + data = None + + +class Program_weight_tensor_parameter_183: + name = "parameter_183" + shape = [256, 192, 1, 1] + dtype = "float32" + min_val = float("-0.206154") + max_val = float("0.170783") + mean = float("-0.000884197") + std = float("0.0145162") + data = None + + +class Program_weight_tensor_parameter_184: + name = "parameter_184" + shape = [192] + dtype = "float32" + min_val = float("-0.0139357") + max_val = float("0.00388361") + mean = float("-0.00495662") + std = float("0.00371291") + data = None + + +class Program_weight_tensor_parameter_185: + name = "parameter_185" + shape = [192, 192, 1, 1] + dtype = "float32" + min_val = float("-0.347135") + max_val = float("0.228777") + mean = float("-0.00389388") + std = float("0.0106293") + data = None + + +class Program_weight_tensor_parameter_186: + name = "parameter_186" + shape = [96] + dtype = "float32" + min_val = float("-1.91355") + max_val = float("0.53303") + mean = float("-0.208939") + std = float("0.434311") + data = None + + +class Program_weight_tensor_parameter_187: + name = "parameter_187" + shape = [96] + dtype = "float32" + min_val = float("0.142427") + max_val = float("3.22988") + mean = float("0.635833") + std = float("0.668487") + data = None + + +class Program_weight_tensor_parameter_188: + name = "parameter_188" + shape = [96] + dtype = "float32" + min_val = float("7.85249e-05") + max_val = float("0.00245675") + mean = float("0.000588646") + std = float("0.000433443") + data = None + + +class Program_weight_tensor_parameter_189: + name = "parameter_189" + shape = [96] + dtype = "float32" + min_val = float("-0.0546919") + max_val = float("0.0598857") + mean = float("0.00511828") + std = float("0.0215456") + data = None + + +class Program_weight_tensor_parameter_190: + name = "parameter_190" + shape = [96, 96, 1, 1] + dtype = "float32" + min_val = float("-0.0500852") + max_val = float("0.0932317") + mean = float("-0.000561284") + std = float("0.00794853") + data = None + + +class Program_weight_tensor_parameter_191: + name = "parameter_191" + shape = [96] + dtype = "float32" + min_val = float("-1.91314") + max_val = float("0.534306") + mean = float("-0.208596") + std = float("0.434435") + data = None + + +class Program_weight_tensor_parameter_192: + name = "parameter_192" + shape = [96] + dtype = "float32" + min_val = float("0.343774") + max_val = float("5.47118") + mean = float("1.08565") + std = float("0.88383") + data = None + + +class Program_weight_tensor_parameter_193: + name = "parameter_193" + shape = [96] + dtype = "float32" + min_val = float("0.000942165") + max_val = float("0.0158414") + mean = float("0.00535059") + std = float("0.00274901") + data = None + + +class Program_weight_tensor_parameter_194: + name = "parameter_194" + shape = [96] + dtype = "float32" + min_val = float("-0.137594") + max_val = float("0.212423") + mean = float("0.0122222") + std = float("0.0611614") + data = None + + +class Program_weight_tensor_parameter_195: + name = "parameter_195" + shape = [96, 96, 3, 3] + dtype = "float32" + min_val = float("-0.0398886") + max_val = float("0.0746673") + mean = float("-0.000229692") + std = float("0.00588155") + data = None + + +class Program_weight_tensor_parameter_196: + name = "parameter_196" + shape = [96] + dtype = "float32" + min_val = float("-2.46605") + max_val = float("-0.0202143") + mean = float("-1.22676") + std = float("0.443304") + data = None + + +class Program_weight_tensor_parameter_197: + name = "parameter_197" + shape = [96] + dtype = "float32" + min_val = float("0.542082") + max_val = float("1.6433") + mean = float("0.945634") + std = float("0.172529") + data = None + + +class Program_weight_tensor_parameter_198: + name = "parameter_198" + shape = [96] + dtype = "float32" + min_val = float("0.041446") + max_val = float("0.237068") + mean = float("0.0867119") + std = float("0.0369728") + data = None + + +class Program_weight_tensor_parameter_199: + name = "parameter_199" + shape = [96] + dtype = "float32" + min_val = float("-2.80547") + max_val = float("1.61972") + mean = float("-0.194801") + std = float("0.469392") + data = None + + +class Program_weight_tensor_parameter_200: + name = "parameter_200" + shape = [96, 96, 3, 3] + dtype = "float32" + min_val = float("-0.150203") + max_val = float("0.114223") + mean = float("-0.000376735") + std = float("0.00724688") + data = None + + +class Program_weight_tensor_parameter_201: + name = "parameter_201" + shape = [96] + dtype = "float32" + min_val = float("-1.38826") + max_val = float("0.562406") + mean = float("-0.132909") + std = float("0.347394") + data = None + + +class Program_weight_tensor_parameter_202: + name = "parameter_202" + shape = [96] + dtype = "float32" + min_val = float("0.0453402") + max_val = float("1.86504") + mean = float("0.460875") + std = float("0.366369") + data = None + + +class Program_weight_tensor_parameter_203: + name = "parameter_203" + shape = [96] + dtype = "float32" + min_val = float("7.55835e-05") + max_val = float("0.00277897") + mean = float("0.000754646") + std = float("0.000610909") + data = None + + +class Program_weight_tensor_parameter_204: + name = "parameter_204" + shape = [96] + dtype = "float32" + min_val = float("-0.048531") + max_val = float("0.0464578") + mean = float("0.00676756") + std = float("0.0176703") + data = None + + +class Program_weight_tensor_parameter_205: + name = "parameter_205" + shape = [96, 96, 1, 1] + dtype = "float32" + min_val = float("-0.0483138") + max_val = float("0.0415922") + mean = float("-0.000498568") + std = float("0.00710731") + data = None + + +class Program_weight_tensor_parameter_206: + name = "parameter_206" + shape = [96] + dtype = "float32" + min_val = float("-1.38834") + max_val = float("0.5648") + mean = float("-0.13256") + std = float("0.347894") + data = None + + +class Program_weight_tensor_parameter_207: + name = "parameter_207" + shape = [96] + dtype = "float32" + min_val = float("0.370504") + max_val = float("2.32822") + mean = float("0.901933") + std = float("0.426522") + data = None + + +class Program_weight_tensor_parameter_208: + name = "parameter_208" + shape = [96] + dtype = "float32" + min_val = float("0.00323837") + max_val = float("0.0234632") + mean = float("0.00912274") + std = float("0.00470534") + data = None + + +class Program_weight_tensor_parameter_209: + name = "parameter_209" + shape = [96] + dtype = "float32" + min_val = float("-0.0965735") + max_val = float("0.12145") + mean = float("0.0354439") + std = float("0.0432668") + data = None + + +class Program_weight_tensor_parameter_210: + name = "parameter_210" + shape = [96, 96, 3, 3] + dtype = "float32" + min_val = float("-0.058655") + max_val = float("0.0591114") + mean = float("-0.000356621") + std = float("0.0059174") + data = None + + +class Program_weight_tensor_parameter_211: + name = "parameter_211" + shape = [96] + dtype = "float32" + min_val = float("-3.31955") + max_val = float("0.36603") + mean = float("-1.17895") + std = float("0.556023") + data = None + + +class Program_weight_tensor_parameter_212: + name = "parameter_212" + shape = [96] + dtype = "float32" + min_val = float("0.473098") + max_val = float("1.98183") + mean = float("1.03911") + std = float("0.238708") + data = None + + +class Program_weight_tensor_parameter_213: + name = "parameter_213" + shape = [96] + dtype = "float32" + min_val = float("0.0282091") + max_val = float("0.14579") + mean = float("0.0545779") + std = float("0.0168222") + data = None + + +class Program_weight_tensor_parameter_214: + name = "parameter_214" + shape = [96] + dtype = "float32" + min_val = float("-1.24896") + max_val = float("0.504954") + mean = float("-0.0599842") + std = float("0.268023") + data = None + + +class Program_weight_tensor_parameter_215: + name = "parameter_215" + shape = [96, 96, 3, 3] + dtype = "float32" + min_val = float("-0.147666") + max_val = float("0.152112") + mean = float("-0.000410438") + std = float("0.00711818") + data = None + + +class Program_weight_tensor_parameter_216: + name = "parameter_216" + shape = [96] + dtype = "float32" + min_val = float("-1.24956") + max_val = float("0.58267") + mean = float("-0.109749") + std = float("0.291966") + data = None + + +class Program_weight_tensor_parameter_217: + name = "parameter_217" + shape = [96] + dtype = "float32" + min_val = float("0.0243293") + max_val = float("1.27785") + mean = float("0.324816") + std = float("0.192866") + data = None + + +class Program_weight_tensor_parameter_218: + name = "parameter_218" + shape = [96] + dtype = "float32" + min_val = float("6.35226e-05") + max_val = float("0.00358684") + mean = float("0.000713081") + std = float("0.000576864") + data = None + + +class Program_weight_tensor_parameter_219: + name = "parameter_219" + shape = [96] + dtype = "float32" + min_val = float("-0.0385319") + max_val = float("0.0503831") + mean = float("0.00407058") + std = float("0.0162571") + data = None + + +class Program_weight_tensor_parameter_220: + name = "parameter_220" + shape = [96, 96, 1, 1] + dtype = "float32" + min_val = float("-0.0448708") + max_val = float("0.0573038") + mean = float("-0.000336044") + std = float("0.00726838") + data = None + + +class Program_weight_tensor_parameter_221: + name = "parameter_221" + shape = [96] + dtype = "float32" + min_val = float("-1.24942") + max_val = float("0.584539") + mean = float("-0.109552") + std = float("0.292478") + data = None + + +class Program_weight_tensor_parameter_222: + name = "parameter_222" + shape = [96] + dtype = "float32" + min_val = float("0.315495") + max_val = float("1.67063") + mean = float("0.747087") + std = float("0.257847") + data = None + + +class Program_weight_tensor_parameter_223: + name = "parameter_223" + shape = [96] + dtype = "float32" + min_val = float("0.00338498") + max_val = float("0.0252591") + mean = float("0.0101982") + std = float("0.00406466") + data = None + + +class Program_weight_tensor_parameter_224: + name = "parameter_224" + shape = [96] + dtype = "float32" + min_val = float("-0.0546488") + max_val = float("0.145267") + mean = float("0.0275953") + std = float("0.038381") + data = None + + +class Program_weight_tensor_parameter_225: + name = "parameter_225" + shape = [96, 96, 3, 3] + dtype = "float32" + min_val = float("-0.065253") + max_val = float("0.0583777") + mean = float("-0.000331097") + std = float("0.00602268") + data = None + + +class Program_weight_tensor_parameter_226: + name = "parameter_226" + shape = [96] + dtype = "float32" + min_val = float("-3.58296") + max_val = float("0.290726") + mean = float("-1.12856") + std = float("0.572409") + data = None + + +class Program_weight_tensor_parameter_227: + name = "parameter_227" + shape = [96] + dtype = "float32" + min_val = float("0.511106") + max_val = float("2.19165") + mean = float("1.05198") + std = float("0.238255") + data = None + + +class Program_weight_tensor_parameter_228: + name = "parameter_228" + shape = [96] + dtype = "float32" + min_val = float("0.0201149") + max_val = float("0.0748228") + mean = float("0.0399945") + std = float("0.0094601") + data = None + + +class Program_weight_tensor_parameter_229: + name = "parameter_229" + shape = [96] + dtype = "float32" + min_val = float("-0.822261") + max_val = float("0.396367") + mean = float("-0.0472576") + std = float("0.195175") + data = None + + +class Program_weight_tensor_parameter_230: + name = "parameter_230" + shape = [96, 96, 3, 3] + dtype = "float32" + min_val = float("-0.0973524") + max_val = float("0.130681") + mean = float("-0.000422376") + std = float("0.00719502") + data = None + + +class Program_weight_tensor_parameter_231: + name = "parameter_231" + shape = [96] + dtype = "float32" + min_val = float("-0.892064") + max_val = float("0.529384") + mean = float("-0.160709") + std = float("0.281574") + data = None + + +class Program_weight_tensor_parameter_232: + name = "parameter_232" + shape = [96] + dtype = "float32" + min_val = float("0.0191223") + max_val = float("1.40524") + mean = float("0.32501") + std = float("0.213327") + data = None + + +class Program_weight_tensor_parameter_233: + name = "parameter_233" + shape = [96] + dtype = "float32" + min_val = float("4.77273e-05") + max_val = float("0.00366742") + mean = float("0.000733131") + std = float("0.000561341") + data = None + + +class Program_weight_tensor_parameter_234: + name = "parameter_234" + shape = [96] + dtype = "float32" + min_val = float("-0.0328433") + max_val = float("0.0466064") + mean = float("0.00724984") + std = float("0.0146293") + data = None + + +class Program_weight_tensor_parameter_235: + name = "parameter_235" + shape = [96, 96, 1, 1] + dtype = "float32" + min_val = float("-0.0499906") + max_val = float("0.0448114") + mean = float("-0.000606145") + std = float("0.00724394") + data = None + + +class Program_weight_tensor_parameter_236: + name = "parameter_236" + shape = [96] + dtype = "float32" + min_val = float("-0.891955") + max_val = float("0.530721") + mean = float("-0.160571") + std = float("0.281998") + data = None + + +class Program_weight_tensor_parameter_237: + name = "parameter_237" + shape = [96] + dtype = "float32" + min_val = float("0.17446") + max_val = float("1.78047") + mean = float("0.708571") + std = float("0.284378") + data = None + + +class Program_weight_tensor_parameter_238: + name = "parameter_238" + shape = [96] + dtype = "float32" + min_val = float("0.00236768") + max_val = float("0.0256587") + mean = float("0.0102153") + std = float("0.00392496") + data = None + + +class Program_weight_tensor_parameter_239: + name = "parameter_239" + shape = [96] + dtype = "float32" + min_val = float("-0.0582404") + max_val = float("0.137905") + mean = float("0.0410397") + std = float("0.0378938") + data = None + + +class Program_weight_tensor_parameter_240: + name = "parameter_240" + shape = [96, 96, 3, 3] + dtype = "float32" + min_val = float("-0.057305") + max_val = float("0.0650381") + mean = float("-0.000417143") + std = float("0.00601776") + data = None + + +class Program_weight_tensor_parameter_241: + name = "parameter_241" + shape = [96] + dtype = "float32" + min_val = float("-2.65777") + max_val = float("0.065358") + mean = float("-1.06432") + std = float("0.48826") + data = None + + +class Program_weight_tensor_parameter_242: + name = "parameter_242" + shape = [96] + dtype = "float32" + min_val = float("0.512951") + max_val = float("1.73806") + mean = float("1.01547") + std = float("0.193357") + data = None + + +class Program_weight_tensor_parameter_243: + name = "parameter_243" + shape = [96] + dtype = "float32" + min_val = float("0.0176967") + max_val = float("0.0574371") + mean = float("0.0307481") + std = float("0.00699669") + data = None + + +class Program_weight_tensor_parameter_244: + name = "parameter_244" + shape = [96] + dtype = "float32" + min_val = float("-0.761559") + max_val = float("0.60821") + mean = float("-0.0646145") + std = float("0.194302") + data = None + + +class Program_weight_tensor_parameter_245: + name = "parameter_245" + shape = [96, 96, 3, 3] + dtype = "float32" + min_val = float("-0.0738037") + max_val = float("0.125248") + mean = float("-0.000426247") + std = float("0.0069708") + data = None + + +class Program_weight_tensor_parameter_246: + name = "parameter_246" + shape = [96] + dtype = "float32" + min_val = float("-0.978262") + max_val = float("0.489992") + mean = float("-0.136691") + std = float("0.278636") + data = None + + +class Program_weight_tensor_parameter_247: + name = "parameter_247" + shape = [96] + dtype = "float32" + min_val = float("0.0498074") + max_val = float("1.1462") + mean = float("0.296075") + std = float("0.172323") + data = None + + +class Program_weight_tensor_parameter_248: + name = "parameter_248" + shape = [96] + dtype = "float32" + min_val = float("0.000180546") + max_val = float("0.00509335") + mean = float("0.00108394") + std = float("0.00072352") + data = None + + +class Program_weight_tensor_parameter_249: + name = "parameter_249" + shape = [96] + dtype = "float32" + min_val = float("-0.041806") + max_val = float("0.0564684") + mean = float("0.00557215") + std = float("0.0180702") + data = None + + +class Program_weight_tensor_parameter_250: + name = "parameter_250" + shape = [96, 96, 1, 1] + dtype = "float32" + min_val = float("-0.0731207") + max_val = float("0.0763792") + mean = float("-0.000594618") + std = float("0.00825765") + data = None + + +class Program_weight_tensor_parameter_251: + name = "parameter_251" + shape = [96] + dtype = "float32" + min_val = float("-0.978083") + max_val = float("0.492448") + mean = float("-0.136655") + std = float("0.279122") + data = None + + +class Program_weight_tensor_parameter_252: + name = "parameter_252" + shape = [96] + dtype = "float32" + min_val = float("0.236133") + max_val = float("1.69671") + mean = float("0.603953") + std = float("0.228164") + data = None + + +class Program_weight_tensor_parameter_253: + name = "parameter_253" + shape = [96] + dtype = "float32" + min_val = float("0.00612804") + max_val = float("0.0281159") + mean = float("0.0137457") + std = float("0.00478723") + data = None + + +class Program_weight_tensor_parameter_254: + name = "parameter_254" + shape = [96] + dtype = "float32" + min_val = float("-0.0713362") + max_val = float("0.135694") + mean = float("0.0273635") + std = float("0.0460879") + data = None + + +class Program_weight_tensor_parameter_255: + name = "parameter_255" + shape = [96, 96, 3, 3] + dtype = "float32" + min_val = float("-0.0654835") + max_val = float("0.0522648") + mean = float("-0.00036204") + std = float("0.0060426") + data = None + + +class Program_weight_tensor_parameter_256: + name = "parameter_256" + shape = [96] + dtype = "float32" + min_val = float("-3.46434") + max_val = float("0.199609") + mean = float("-1.00527") + std = float("0.548081") + data = None + + +class Program_weight_tensor_parameter_257: + name = "parameter_257" + shape = [96] + dtype = "float32" + min_val = float("0.686506") + max_val = float("2.51291") + mean = float("1.07427") + std = float("0.212412") + data = None + + +class Program_weight_tensor_parameter_258: + name = "parameter_258" + shape = [96] + dtype = "float32" + min_val = float("0.013513") + max_val = float("0.0542585") + mean = float("0.0262785") + std = float("0.00851322") + data = None + + +class Program_weight_tensor_parameter_259: + name = "parameter_259" + shape = [96] + dtype = "float32" + min_val = float("-0.482054") + max_val = float("0.527892") + mean = float("-0.0515599") + std = float("0.192746") + data = None + + +class Program_weight_tensor_parameter_260: + name = "parameter_260" + shape = [96, 96, 3, 3] + dtype = "float32" + min_val = float("-0.0824841") + max_val = float("0.0934753") + mean = float("-0.000357672") + std = float("0.00712731") + data = None + + +class Program_weight_tensor_parameter_261: + name = "parameter_261" + shape = [96] + dtype = "float32" + min_val = float("-0.625302") + max_val = float("0.449836") + mean = float("-0.0825559") + std = float("0.256738") + data = None + + +class Program_weight_tensor_parameter_262: + name = "parameter_262" + shape = [96] + dtype = "float32" + min_val = float("0.0910018") + max_val = float("1.30085") + mean = float("0.309049") + std = float("0.196412") + data = None + + +class Program_weight_tensor_parameter_263: + name = "parameter_263" + shape = [96] + dtype = "float32" + min_val = float("0.000380114") + max_val = float("0.0177806") + mean = float("0.00357894") + std = float("0.00283759") + data = None + + +class Program_weight_tensor_parameter_264: + name = "parameter_264" + shape = [96] + dtype = "float32" + min_val = float("-0.0360021") + max_val = float("0.0301813") + mean = float("-5.04728e-05") + std = float("0.0106632") + data = None + + +class Program_weight_tensor_parameter_265: + name = "parameter_265" + shape = [96, 96, 1, 1] + dtype = "float32" + min_val = float("-0.0925016") + max_val = float("0.0753255") + mean = float("-0.00105853") + std = float("0.00936655") + data = None + + +class Program_weight_tensor_parameter_266: + name = "parameter_266" + shape = [96] + dtype = "float32" + min_val = float("-0.625183") + max_val = float("0.450937") + mean = float("-0.082575") + std = float("0.257081") + data = None + + +class Program_weight_tensor_parameter_267: + name = "parameter_267" + shape = [96] + dtype = "float32" + min_val = float("0.210658") + max_val = float("1.42703") + mean = float("0.527208") + std = float("0.258269") + data = None + + +class Program_weight_tensor_parameter_268: + name = "parameter_268" + shape = [96] + dtype = "float32" + min_val = float("0.0102276") + max_val = float("0.0952331") + mean = float("0.0336542") + std = float("0.0171249") + data = None + + +class Program_weight_tensor_parameter_269: + name = "parameter_269" + shape = [96] + dtype = "float32" + min_val = float("-0.108112") + max_val = float("0.0898917") + mean = float("-0.00831331") + std = float("0.0381217") + data = None + + +class Program_weight_tensor_parameter_270: + name = "parameter_270" + shape = [96, 96, 3, 3] + dtype = "float32" + min_val = float("-0.0885375") + max_val = float("0.0525934") + mean = float("-0.000466484") + std = float("0.00584459") + data = None + + +class Program_weight_tensor_parameter_271: + name = "parameter_271" + shape = [96] + dtype = "float32" + min_val = float("-2.40893") + max_val = float("0.508421") + mean = float("-0.828862") + std = float("0.467337") + data = None + + +class Program_weight_tensor_parameter_272: + name = "parameter_272" + shape = [96] + dtype = "float32" + min_val = float("0.853968") + max_val = float("2.18309") + mean = float("1.27545") + std = float("0.208741") + data = None + + +class Program_weight_tensor_parameter_273: + name = "parameter_273" + shape = [96] + dtype = "float32" + min_val = float("0.011454") + max_val = float("0.0464459") + mean = float("0.0216361") + std = float("0.00778855") + data = None + + +class Program_weight_tensor_parameter_274: + name = "parameter_274" + shape = [96] + dtype = "float32" + min_val = float("-0.570491") + max_val = float("0.473016") + mean = float("-0.0532507") + std = float("0.173783") + data = None + + +class Program_weight_tensor_parameter_275: + name = "parameter_275" + shape = [96, 96, 3, 3] + dtype = "float32" + min_val = float("-0.15411") + max_val = float("0.150524") + mean = float("-0.000241604") + std = float("0.00722176") + data = None + + +class Program_weight_tensor_parameter_276: + name = "parameter_276" + shape = [96] + dtype = "float32" + min_val = float("-3.16609") + max_val = float("1.88989") + mean = float("0.501666") + std = float("0.861493") + data = None + + +class Program_weight_tensor_parameter_277: + name = "parameter_277" + shape = [96] + dtype = "float32" + min_val = float("0.214988") + max_val = float("2.6299") + mean = float("0.562885") + std = float("0.31708") + data = None + + +class Program_weight_tensor_parameter_278: + name = "parameter_278" + shape = [96] + dtype = "float32" + min_val = float("0.00763171") + max_val = float("0.154967") + mean = float("0.0323459") + std = float("0.0238839") + data = None + + +class Program_weight_tensor_parameter_279: + name = "parameter_279" + shape = [96] + dtype = "float32" + min_val = float("-0.271678") + max_val = float("0.329389") + mean = float("-0.0147432") + std = float("0.0938868") + data = None + + +class Program_weight_tensor_parameter_280: + name = "parameter_280" + shape = [96, 192, 1, 1] + dtype = "float32" + min_val = float("-0.186901") + max_val = float("0.225419") + mean = float("-0.000291508") + std = float("0.0156297") + data = None + + +class Program_weight_tensor_parameter_281: + name = "parameter_281" + shape = [96] + dtype = "float32" + min_val = float("-4.92284") + max_val = float("1.57998") + mean = float("0.384603") + std = float("1.04888") + data = None + + +class Program_weight_tensor_parameter_282: + name = "parameter_282" + shape = [96] + dtype = "float32" + min_val = float("0.414126") + max_val = float("6.78093") + mean = float("1.69449") + std = float("1.30795") + data = None + + +class Program_weight_tensor_parameter_283: + name = "parameter_283" + shape = [96] + dtype = "float32" + min_val = float("0.00531954") + max_val = float("0.272953") + mean = float("0.0381955") + std = float("0.0353968") + data = None + + +class Program_weight_tensor_parameter_284: + name = "parameter_284" + shape = [96] + dtype = "float32" + min_val = float("-0.17223") + max_val = float("0.443347") + mean = float("0.0466399") + std = float("0.0966002") + data = None + + +class Program_weight_tensor_parameter_285: + name = "parameter_285" + shape = [96, 192, 1, 1] + dtype = "float32" + min_val = float("-0.116975") + max_val = float("0.156029") + mean = float("0.000440768") + std = float("0.0149691") + data = None + + +class Program_weight_tensor_parameter_286: + name = "parameter_286" + shape = [192] + dtype = "float32" + min_val = float("-2.27475") + max_val = float("1.75104") + mean = float("-0.126037") + std = float("0.740702") + data = None + + +class Program_weight_tensor_parameter_287: + name = "parameter_287" + shape = [192] + dtype = "float32" + min_val = float("0.632268") + max_val = float("2.97322") + mean = float("1.08733") + std = float("0.283408") + data = None + + +class Program_weight_tensor_parameter_288: + name = "parameter_288" + shape = [192] + dtype = "float32" + min_val = float("0.0109431") + max_val = float("0.233442") + mean = float("0.0433778") + std = float("0.0318779") + data = None + + +class Program_weight_tensor_parameter_289: + name = "parameter_289" + shape = [192] + dtype = "float32" + min_val = float("-0.576887") + max_val = float("0.269966") + mean = float("-0.0934605") + std = float("0.118655") + data = None + + +class Program_weight_tensor_parameter_290: + name = "parameter_290" + shape = [192, 128, 3, 3] + dtype = "float32" + min_val = float("-0.0856428") + max_val = float("0.123627") + mean = float("-0.000225745") + std = float("0.00765725") + data = None + + +class Program_weight_tensor_parameter_291: + name = "parameter_291" + shape = [128] + dtype = "float32" + min_val = float("-2.81597") + max_val = float("1.9636") + mean = float("-0.71259") + std = float("0.647835") + data = None + + +class Program_weight_tensor_parameter_292: + name = "parameter_292" + shape = [128] + dtype = "float32" + min_val = float("0.311227") + max_val = float("2.8783") + mean = float("1.01845") + std = float("0.278722") + data = None + + +class Program_weight_tensor_parameter_293: + name = "parameter_293" + shape = [128] + dtype = "float32" + min_val = float("0.000843216") + max_val = float("0.0154502") + mean = float("0.00453611") + std = float("0.00230447") + data = None + + +class Program_weight_tensor_parameter_294: + name = "parameter_294" + shape = [128] + dtype = "float32" + min_val = float("-0.237755") + max_val = float("0.26225") + mean = float("0.00315393") + std = float("0.0867451") + data = None + + +class Program_weight_tensor_parameter_295: + name = "parameter_295" + shape = [128, 96, 1, 1] + dtype = "float32" + min_val = float("-0.171773") + max_val = float("0.211127") + mean = float("-0.00142636") + std = float("0.0224525") + data = None + + +class Program_weight_tensor_parameter_296: + name = "parameter_296" + shape = [96] + dtype = "float32" + min_val = float("-0.0180386") + max_val = float("3.78007e-05") + mean = float("-0.00735479") + std = float("0.00450801") + data = None + + +class Program_weight_tensor_parameter_297: + name = "parameter_297" + shape = [96, 96, 1, 1] + dtype = "float32" + min_val = float("-0.30281") + max_val = float("0.123007") + mean = float("-0.00790532") + std = float("0.0180213") + data = None + + +class Program_weight_tensor_parameter_298: + name = "parameter_298" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_299: + name = "parameter_299" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_300: + name = "parameter_300" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_301: + name = "parameter_301" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_302: + name = "parameter_302" + shape = [48, 48, 1, 1] + dtype = "float32" + min_val = float("-0.0501789") + max_val = float("0.0563261") + mean = float("-0.00170388") + std = float("0.0129798") + data = None + + +class Program_weight_tensor_parameter_303: + name = "parameter_303" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_304: + name = "parameter_304" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_305: + name = "parameter_305" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_306: + name = "parameter_306" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_307: + name = "parameter_307" + shape = [48, 48, 3, 3] + dtype = "float32" + min_val = float("-0.0578676") + max_val = float("0.0799749") + mean = float("-0.000509865") + std = float("0.0110281") + data = None + + +class Program_weight_tensor_parameter_308: + name = "parameter_308" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_309: + name = "parameter_309" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_310: + name = "parameter_310" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_311: + name = "parameter_311" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_312: + name = "parameter_312" + shape = [48, 48, 3, 3] + dtype = "float32" + min_val = float("-0.0925274") + max_val = float("0.0949158") + mean = float("-0.00064859") + std = float("0.0123667") + data = None + + +class Program_weight_tensor_parameter_313: + name = "parameter_313" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_314: + name = "parameter_314" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_315: + name = "parameter_315" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_316: + name = "parameter_316" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_317: + name = "parameter_317" + shape = [48, 48, 1, 1] + dtype = "float32" + min_val = float("-0.0727088") + max_val = float("0.0782992") + mean = float("-0.00102365") + std = float("0.0139349") + data = None + + +class Program_weight_tensor_parameter_318: + name = "parameter_318" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_319: + name = "parameter_319" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_320: + name = "parameter_320" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_321: + name = "parameter_321" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_322: + name = "parameter_322" + shape = [48, 48, 3, 3] + dtype = "float32" + min_val = float("-0.0621898") + max_val = float("0.0692526") + mean = float("-0.000822014") + std = float("0.0111057") + data = None + + +class Program_weight_tensor_parameter_323: + name = "parameter_323" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_324: + name = "parameter_324" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_325: + name = "parameter_325" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_326: + name = "parameter_326" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_327: + name = "parameter_327" + shape = [48, 48, 3, 3] + dtype = "float32" + min_val = float("-0.11162") + max_val = float("0.0943574") + mean = float("-0.000368661") + std = float("0.0125785") + data = None + + +class Program_weight_tensor_parameter_328: + name = "parameter_328" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_329: + name = "parameter_329" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_330: + name = "parameter_330" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_331: + name = "parameter_331" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_332: + name = "parameter_332" + shape = [48, 48, 1, 1] + dtype = "float32" + min_val = float("-0.0944494") + max_val = float("0.0702451") + mean = float("-0.00185301") + std = float("0.0172184") + data = None + + +class Program_weight_tensor_parameter_333: + name = "parameter_333" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_334: + name = "parameter_334" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_335: + name = "parameter_335" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_336: + name = "parameter_336" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_337: + name = "parameter_337" + shape = [48, 48, 3, 3] + dtype = "float32" + min_val = float("-0.0691644") + max_val = float("0.0974384") + mean = float("-0.000506655") + std = float("0.011691") + data = None + + +class Program_weight_tensor_parameter_338: + name = "parameter_338" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_339: + name = "parameter_339" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_340: + name = "parameter_340" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_341: + name = "parameter_341" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_342: + name = "parameter_342" + shape = [48, 48, 3, 3] + dtype = "float32" + min_val = float("-0.133213") + max_val = float("0.0905212") + mean = float("-0.000334254") + std = float("0.0134452") + data = None + + +class Program_weight_tensor_parameter_343: + name = "parameter_343" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_344: + name = "parameter_344" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_345: + name = "parameter_345" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_346: + name = "parameter_346" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_347: + name = "parameter_347" + shape = [48, 96, 1, 1] + dtype = "float32" + min_val = float("-0.17806") + max_val = float("0.14305") + mean = float("-0.00229242") + std = float("0.0246641") + data = None + + +class Program_weight_tensor_parameter_348: + name = "parameter_348" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_349: + name = "parameter_349" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_350: + name = "parameter_350" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_351: + name = "parameter_351" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_352: + name = "parameter_352" + shape = [48, 96, 1, 1] + dtype = "float32" + min_val = float("-0.135012") + max_val = float("0.178483") + mean = float("-0.0004429") + std = float("0.0226955") + data = None + + +class Program_weight_tensor_parameter_353: + name = "parameter_353" + shape = [96] + dtype = "float32" + min_val = float("-3.40701") + max_val = float("3.27538") + mean = float("0.329531") + std = float("1.14502") + data = None + + +class Program_weight_tensor_parameter_354: + name = "parameter_354" + shape = [96] + dtype = "float32" + min_val = float("0.865919") + max_val = float("4.91404") + mean = float("1.91603") + std = float("0.752783") + data = None + + +class Program_weight_tensor_parameter_355: + name = "parameter_355" + shape = [96] + dtype = "float32" + min_val = float("0.705639") + max_val = float("32.368") + mean = float("2.73559") + std = float("3.53943") + data = None + + +class Program_weight_tensor_parameter_356: + name = "parameter_356" + shape = [96] + dtype = "float32" + min_val = float("-1.47426") + max_val = float("2.58312") + mean = float("-0.286183") + std = float("0.722428") + data = None + + +class Program_weight_tensor_parameter_357: + name = "parameter_357" + shape = [96, 64, 3, 3] + dtype = "float32" + min_val = float("-0.110689") + max_val = float("0.13859") + mean = float("-0.000360127") + std = float("0.0133189") + data = None + + +class Program_weight_tensor_parameter_358: + name = "parameter_358" + shape = [64] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_359: + name = "parameter_359" + shape = [64] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_360: + name = "parameter_360" + shape = [64] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_361: + name = "parameter_361" + shape = [64] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_362: + name = "parameter_362" + shape = [64, 32, 3, 3] + dtype = "float32" + min_val = float("-0.179264") + max_val = float("0.162144") + mean = float("-0.000679023") + std = float("0.020536") + data = None + + +class Program_weight_tensor_parameter_363: + name = "parameter_363" + shape = [32] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_364: + name = "parameter_364" + shape = [32] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_365: + name = "parameter_365" + shape = [32] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_366: + name = "parameter_366" + shape = [32] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_367: + name = "parameter_367" + shape = [32, 32, 3, 3] + dtype = "float32" + min_val = float("-0.347786") + max_val = float("0.218964") + mean = float("-0.000199571") + std = float("0.0261033") + data = None + + +class Program_weight_tensor_parameter_368: + name = "parameter_368" + shape = [32] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_369: + name = "parameter_369" + shape = [32] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_370: + name = "parameter_370" + shape = [32] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_371: + name = "parameter_371" + shape = [32] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_372: + name = "parameter_372" + shape = [32, 3, 3, 3] + dtype = "float32" + min_val = float("-0.317155") + max_val = float("0.280865") + mean = float("-0.00214957") + std = float("0.0702742") + data = None diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/subgraph_5/graph_hash.txt b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/subgraph_5/graph_hash.txt new file mode 100644 index 000000000..a0da828a8 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/subgraph_5/graph_hash.txt @@ -0,0 +1 @@ +92e7456bc3fe2575d932c96b8342e31711029bd21b23c8a9bf17b02309d4c336 \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/subgraph_5/graph_net.json b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/subgraph_5/graph_net.json new file mode 100644 index 000000000..8b4fccfd1 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/subgraph_5/graph_net.json @@ -0,0 +1,6 @@ +{ + "framework": "paddle", + "model_name": "PP-YOLOE_plus_SOD-L", + "num_devices_required": 1, + "num_nodes_required": 1 +} \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/subgraph_5/input_meta.py b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/subgraph_5/input_meta.py new file mode 100644 index 000000000..765fadabf --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/subgraph_5/input_meta.py @@ -0,0 +1,71 @@ +class Program_weight_tensor_data_0: + name = "data_0" + shape = [2, 3549, 10] + dtype = "float32" + min_val = float("1.49258e-10") + max_val = float("0.896854") + mean = float("0.00647097") + std = float("0.0257209") + data = None + + +class Program_weight_tensor_data_1: + name = "data_1" + shape = [2, 3549, 4] + dtype = "float32" + min_val = float("-112.857") + max_val = float("515.47") + mean = float("207.765") + std = float("123.433") + data = None + + +class Program_weight_tensor_data_2: + name = "data_2" + shape = [3549, 2] + dtype = "float32" + min_val = float("4.0") + max_val = float("412.0") + mean = float("208.0") + std = float("120.038") + data = None + + +class Program_weight_tensor_data_3: + name = "data_3" + shape = [3549, 1] + dtype = "float32" + min_val = float("8.0") + max_val = float("32.0") + mean = float("10.6667") + std = float("5.70157") + data = None + + +class Program_weight_tensor_data_4: + name = "data_4" + shape = [2, 49, 1] + dtype = "int32" + min_val = 0 + max_val = 8 + data = None + + +class Program_weight_tensor_data_5: + name = "data_5" + shape = [2, 49, 4] + dtype = "float32" + max_val = float("408.482") + mean = float("110.196") + std = float("133.414") + data = None + + +class Program_weight_tensor_data_6: + name = "data_6" + shape = [2, 49, 1] + dtype = "float32" + max_val = float("1.0") + mean = float("0.571429") + std = float("0.494872") + data = None diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/subgraph_5/model.py b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/subgraph_5/model.py new file mode 100644 index 000000000..2cdd157ba --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/subgraph_5/model.py @@ -0,0 +1,504 @@ +import paddle + + +class GraphModule(paddle.nn.Layer): + def __init__(self): + super().__init__() + + def forward(self, data_0, data_1, data_2, data_3, data_4, data_5, data_6): + # pd_op.full_int_array: (1xi64) <- () + full_int_array_0 = [2] + + # pd_op.unsqueeze: (2x49x1x4xf32) <- (2x49x4xf32, 1xi64) + unsqueeze_0 = paddle._C_ops.unsqueeze(data_5, full_int_array_0) + del data_5 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_1 = [1] + + # pd_op.unsqueeze: (2x1x3549x4xf32) <- (2x3549x4xf32, 1xi64) + unsqueeze_1 = paddle._C_ops.unsqueeze(data_1, full_int_array_1) + del data_1, full_int_array_1 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_2 = [0] + + # pd_op.slice: (2x49x1x2xf32) <- (2x49x1x4xf32, 1xi64, 1xi64) + slice_0 = paddle._C_ops.slice( + unsqueeze_0, [3], full_int_array_2, full_int_array_0, [1], [] + ) + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_3 = [2147483647] + + # pd_op.slice: (2x49x1x2xf32) <- (2x49x1x4xf32, 1xi64, 1xi64) + slice_1 = paddle._C_ops.slice( + unsqueeze_0, [3], full_int_array_0, full_int_array_3, [1], [] + ) + + # pd_op.slice: (2x1x3549x2xf32) <- (2x1x3549x4xf32, 1xi64, 1xi64) + slice_2 = paddle._C_ops.slice( + unsqueeze_1, [3], full_int_array_2, full_int_array_0, [1], [] + ) + del full_int_array_2 + + # pd_op.slice: (2x1x3549x2xf32) <- (2x1x3549x4xf32, 1xi64, 1xi64) + slice_3 = paddle._C_ops.slice( + unsqueeze_1, [3], full_int_array_0, full_int_array_3, [1], [] + ) + del full_int_array_0, full_int_array_3, unsqueeze_1 + + # pd_op.maximum: (2x49x3549x2xf32) <- (2x49x1x2xf32, 2x1x3549x2xf32) + maximum_0 = paddle._C_ops.maximum(slice_0, slice_2) + + # pd_op.minimum: (2x49x3549x2xf32) <- (2x49x1x2xf32, 2x1x3549x2xf32) + minimum_0 = paddle._C_ops.minimum(slice_1, slice_3) + + # pd_op.subtract: (2x49x3549x2xf32) <- (2x49x3549x2xf32, 2x49x3549x2xf32) + subtract_0 = paddle._C_ops.subtract(minimum_0, maximum_0) + del maximum_0, minimum_0 + + # pd_op.full: (1xf32) <- () + full_0 = paddle._C_ops.full( + [1], float("0"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.full: (1xf32) <- () + full_1 = paddle._C_ops.full( + [1], float("3.40282e+38"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.clip: (2x49x3549x2xf32) <- (2x49x3549x2xf32, 1xf32, 1xf32) + clip_0 = paddle._C_ops.clip(subtract_0, full_0, full_1) + del subtract_0 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_4 = [-1] + + # pd_op.prod: (2x49x3549xf32) <- (2x49x3549x2xf32, 1xi64) + prod_0 = paddle._C_ops.prod(clip_0, full_int_array_4, False, False) + del clip_0 + + # pd_op.subtract: (2x49x1x2xf32) <- (2x49x1x2xf32, 2x49x1x2xf32) + subtract_1 = paddle._C_ops.subtract(slice_1, slice_0) + del slice_0, slice_1 + + # pd_op.clip: (2x49x1x2xf32) <- (2x49x1x2xf32, 1xf32, 1xf32) + clip_1 = paddle._C_ops.clip(subtract_1, full_0, full_1) + del subtract_1 + + # pd_op.prod: (2x49x1xf32) <- (2x49x1x2xf32, 1xi64) + prod_1 = paddle._C_ops.prod(clip_1, full_int_array_4, False, False) + del clip_1 + + # pd_op.subtract: (2x1x3549x2xf32) <- (2x1x3549x2xf32, 2x1x3549x2xf32) + subtract_2 = paddle._C_ops.subtract(slice_3, slice_2) + del slice_2, slice_3 + + # pd_op.clip: (2x1x3549x2xf32) <- (2x1x3549x2xf32, 1xf32, 1xf32) + clip_2 = paddle._C_ops.clip(subtract_2, full_0, full_1) + del full_1, subtract_2 + + # pd_op.prod: (2x1x3549xf32) <- (2x1x3549x2xf32, 1xi64) + prod_2 = paddle._C_ops.prod(clip_2, full_int_array_4, False, False) + del clip_2 + + # pd_op.add: (2x49x3549xf32) <- (2x49x1xf32, 2x1x3549xf32) + add_0 = paddle._C_ops.add(prod_1, prod_2) + del prod_1, prod_2 + + # pd_op.subtract: (2x49x3549xf32) <- (2x49x3549xf32, 2x49x3549xf32) + subtract_3 = paddle._C_ops.subtract(add_0, prod_0) + del add_0 + + # pd_op.full: (1xf32) <- () + full_2 = paddle._C_ops.full( + [1], float("1"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.scale: (2x49x3549xf32) <- (2x49x3549xf32, 1xf32) + scale_0 = paddle._C_ops.scale(subtract_3, full_2, float("1e-09"), True) + del subtract_3 + + # pd_op.divide: (2x49x3549xf32) <- (2x49x3549xf32, 2x49x3549xf32) + divide_0 = paddle._C_ops.divide(prod_0, scale_0) + del prod_0, scale_0 + + # pd_op.transpose: (2x10x3549xf32) <- (2x3549x10xf32) + transpose_0 = paddle._C_ops.transpose(data_0, [0, 2, 1]) + del data_0 + + # pd_op.full: (1xf64) <- () + full_3 = paddle._C_ops.full( + [1], float("0"), paddle.float64, paddle.core.CPUPlace() + ) + + # pd_op.full: (1xf64) <- () + full_4 = paddle._C_ops.full( + [1], float("2"), paddle.float64, paddle.core.CPUPlace() + ) + + # pd_op.full: (1xf64) <- () + full_5 = paddle._C_ops.full( + [1], float("1"), paddle.float64, paddle.core.CPUPlace() + ) + + # pd_op.arange: (2xi32) <- (1xf64, 1xf64, 1xf64) + arange_0 = paddle.arange(full_3, full_4, full_5, dtype="int32") + del full_3, full_4, full_5 + + # pd_op.unsqueeze: (2x1xi32) <- (2xi32, 1xi64) + unsqueeze_2 = paddle._C_ops.unsqueeze(arange_0, full_int_array_4) + del arange_0 + + # pd_op.full_int_array: (2xi64) <- () + full_int_array_5 = [1, 49] + + # pd_op.tile: (2x49xi32) <- (2x1xi32, 2xi64) + tile_0 = paddle._C_ops.tile(unsqueeze_2, full_int_array_5) + del full_int_array_5 + + # pd_op.squeeze: (2x49xi32) <- (2x49x1xi32, 1xi64) + squeeze_0 = paddle._C_ops.squeeze(data_4, full_int_array_4) + del data_4 + + # builtin.combine: ([2x49xi32, 2x49xi32]) <- (2x49xi32, 2x49xi32) + combine_0 = [tile_0, squeeze_0] + del squeeze_0, tile_0 + + # pd_op.stack: (2x49x2xi32) <- ([2x49xi32, 2x49xi32]) + stack_0 = paddle._C_ops.stack(combine_0, -1) + del combine_0 + + # pd_op.gather_nd: (2x49x3549xf32) <- (2x10x3549xf32, 2x49x2xi32) + gather_nd_0 = paddle._C_ops.gather_nd(transpose_0, stack_0) + del stack_0, transpose_0 + + # pd_op.pow: (2x49x3549xf32) <- (2x49x3549xf32) + pow_0 = paddle._C_ops.pow(gather_nd_0, float("1")) + del gather_nd_0 + + # pd_op.pow: (2x49x3549xf32) <- (2x49x3549xf32) + pow_1 = paddle._C_ops.pow(divide_0, float("6")) + + # pd_op.multiply: (2x49x3549xf32) <- (2x49x3549xf32, 2x49x3549xf32) + multiply_0 = paddle._C_ops.multiply(pow_0, pow_1) + del pow_0, pow_1 + + # pd_op.multiply: (2x49x3549xf32) <- (2x49x3549xf32, 2x49x1xf32) + multiply_1 = paddle._C_ops.multiply(multiply_0, data_6) + del multiply_0 + + # pd_op.scale: (3549x1xf32) <- (3549x1xf32, 1xf32) + scale_1 = paddle._C_ops.scale(data_3, full_2, float("0"), True) + del data_3, full_2 + + # pd_op.full_int_array: (2xi64) <- () + full_int_array_6 = [0, 1] + + # pd_op.unsqueeze: (1x1x3549x2xf32) <- (3549x2xf32, 2xi64) + unsqueeze_3 = paddle._C_ops.unsqueeze(data_2, full_int_array_6) + del data_2 + + # pd_op.full: (1xi32) <- () + full_6 = paddle._C_ops.full( + [1], float("3"), paddle.int32, paddle.core.CPUPlace() + ) + + # pd_op.split_with_num: ([1x1x3549x1xf32, 1x1x3549x1xf32]) <- (1x1x3549x2xf32, 1xi32) + split_with_num_0 = paddle._C_ops.split_with_num(unsqueeze_3, 2, full_6) + del unsqueeze_3 + + # builtin.split: (1x1x3549x1xf32, 1x1x3549x1xf32) <- ([1x1x3549x1xf32, 1x1x3549x1xf32]) + ( + split_0, + split_1, + ) = split_with_num_0 + del split_with_num_0 + + # pd_op.split_with_num: ([2x49x1x1xf32, 2x49x1x1xf32, 2x49x1x1xf32, 2x49x1x1xf32]) <- (2x49x1x4xf32, 1xi32) + split_with_num_1 = paddle._C_ops.split_with_num(unsqueeze_0, 4, full_6) + del full_6, unsqueeze_0 + + # builtin.split: (2x49x1x1xf32, 2x49x1x1xf32, 2x49x1x1xf32, 2x49x1x1xf32) <- ([2x49x1x1xf32, 2x49x1x1xf32, 2x49x1x1xf32, 2x49x1x1xf32]) + ( + split_2, + split_3, + split_4, + split_5, + ) = split_with_num_1 + del split_with_num_1 + + # pd_op.subtract: (2x49x3549x1xf32) <- (1x1x3549x1xf32, 2x49x1x1xf32) + subtract_4 = paddle._C_ops.subtract(split_0, split_2) + + # pd_op.subtract: (2x49x3549x1xf32) <- (1x1x3549x1xf32, 2x49x1x1xf32) + subtract_5 = paddle._C_ops.subtract(split_1, split_3) + + # pd_op.subtract: (2x49x3549x1xf32) <- (2x49x1x1xf32, 1x1x3549x1xf32) + subtract_6 = paddle._C_ops.subtract(split_4, split_0) + + # pd_op.subtract: (2x49x3549x1xf32) <- (2x49x1x1xf32, 1x1x3549x1xf32) + subtract_7 = paddle._C_ops.subtract(split_5, split_1) + + # pd_op.full: (1xi32) <- () + full_7 = paddle._C_ops.full( + [1], float("-1"), paddle.int32, paddle.core.CPUPlace() + ) + + # builtin.combine: ([2x49x3549x1xf32, 2x49x3549x1xf32, 2x49x3549x1xf32, 2x49x3549x1xf32]) <- (2x49x3549x1xf32, 2x49x3549x1xf32, 2x49x3549x1xf32, 2x49x3549x1xf32) + combine_1 = [subtract_4, subtract_5, subtract_6, subtract_7] + del subtract_4, subtract_5, subtract_6, subtract_7 + + # pd_op.concat: (2x49x3549x4xf32) <- ([2x49x3549x1xf32, 2x49x3549x1xf32, 2x49x3549x1xf32, 2x49x3549x1xf32], 1xi32) + concat_0 = paddle._C_ops.concat(combine_1, full_7) + del combine_1 + + # pd_op.min: (2x49x3549xf32) <- (2x49x3549x4xf32, 1xi64) + min_0 = paddle._C_ops.min(concat_0, full_int_array_4, False) + del concat_0 + + # pd_op.full: (xf32) <- () + full_8 = paddle._C_ops.full( + [], + float("1e-09"), + paddle.float32, + paddle.framework._current_expected_place(), + ) + + # pd_op.greater_than: (2x49x3549xb) <- (2x49x3549xf32, xf32) + greater_than_1 = paddle._C_ops.greater_than(min_0, full_8) + del min_0 + + # pd_op.unsqueeze: (1x1x3549x1xf32) <- (3549x1xf32, 2xi64) + unsqueeze_4 = paddle._C_ops.unsqueeze(scale_1, full_int_array_6) + del full_int_array_6, scale_1 + + # pd_op.add: (2x49x1x1xf32) <- (2x49x1x1xf32, 2x49x1x1xf32) + add_1 = paddle._C_ops.add(split_2, split_4) + del split_2, split_4 + + # pd_op.full: (1xf32) <- () + full_9 = paddle._C_ops.full( + [1], float("0.5"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.scale: (2x49x1x1xf32) <- (2x49x1x1xf32, 1xf32) + scale_2 = paddle._C_ops.scale(add_1, full_9, float("0"), True) + del add_1 + + # pd_op.add: (2x49x1x1xf32) <- (2x49x1x1xf32, 2x49x1x1xf32) + add_2 = paddle._C_ops.add(split_3, split_5) + del split_3, split_5 + + # pd_op.scale: (2x49x1x1xf32) <- (2x49x1x1xf32, 1xf32) + scale_3 = paddle._C_ops.scale(add_2, full_9, float("0"), True) + del add_2, full_9 + + # pd_op.subtract: (2x49x3549x1xf32) <- (2x49x1x1xf32, 1x1x3549x1xf32) + subtract_8 = paddle._C_ops.subtract(scale_2, unsqueeze_4) + + # pd_op.subtract: (2x49x3549x1xf32) <- (1x1x3549x1xf32, 2x49x3549x1xf32) + subtract_9 = paddle._C_ops.subtract(split_0, subtract_8) + del subtract_8 + + # pd_op.subtract: (2x49x3549x1xf32) <- (2x49x1x1xf32, 1x1x3549x1xf32) + subtract_10 = paddle._C_ops.subtract(scale_3, unsqueeze_4) + + # pd_op.subtract: (2x49x3549x1xf32) <- (1x1x3549x1xf32, 2x49x3549x1xf32) + subtract_11 = paddle._C_ops.subtract(split_1, subtract_10) + del subtract_10 + + # pd_op.add: (2x49x3549x1xf32) <- (2x49x1x1xf32, 1x1x3549x1xf32) + add_3 = paddle._C_ops.add(scale_2, unsqueeze_4) + del scale_2 + + # pd_op.subtract: (2x49x3549x1xf32) <- (2x49x3549x1xf32, 1x1x3549x1xf32) + subtract_12 = paddle._C_ops.subtract(add_3, split_0) + del add_3, split_0 + + # pd_op.add: (2x49x3549x1xf32) <- (2x49x1x1xf32, 1x1x3549x1xf32) + add_4 = paddle._C_ops.add(scale_3, unsqueeze_4) + del scale_3, unsqueeze_4 + + # pd_op.subtract: (2x49x3549x1xf32) <- (2x49x3549x1xf32, 1x1x3549x1xf32) + subtract_13 = paddle._C_ops.subtract(add_4, split_1) + del add_4, split_1 + + # builtin.combine: ([2x49x3549x1xf32, 2x49x3549x1xf32, 2x49x3549x1xf32, 2x49x3549x1xf32]) <- (2x49x3549x1xf32, 2x49x3549x1xf32, 2x49x3549x1xf32, 2x49x3549x1xf32) + combine_2 = [subtract_9, subtract_11, subtract_12, subtract_13] + del subtract_11, subtract_12, subtract_13, subtract_9 + + # pd_op.concat: (2x49x3549x4xf32) <- ([2x49x3549x1xf32, 2x49x3549x1xf32, 2x49x3549x1xf32, 2x49x3549x1xf32], 1xi32) + concat_1 = paddle._C_ops.concat(combine_2, full_7) + del combine_2, full_7 + + # pd_op.min: (2x49x3549xf32) <- (2x49x3549x4xf32, 1xi64) + min_1 = paddle._C_ops.min(concat_1, full_int_array_4, False) + del concat_1 + + # pd_op.greater_than: (2x49x3549xb) <- (2x49x3549xf32, xf32) + greater_than_2 = paddle._C_ops.greater_than(min_1, full_8) + del full_8, min_1 + + # pd_op.cast: (2x49x3549xf32) <- (2x49x3549xb) + cast_0 = paddle._C_ops.cast(greater_than_1, paddle.float32) + del greater_than_1 + + # pd_op.cast: (2x49x3549xf32) <- (2x49x3549xb) + cast_1 = paddle._C_ops.cast(greater_than_2, paddle.float32) + del greater_than_2 + + # pd_op.multiply: (2x49x3549xf32) <- (2x49x3549xf32, 2x49x1xf32) + multiply_2 = paddle._C_ops.multiply(cast_0, data_6) + del cast_0 + + # pd_op.multiply: (2x49x3549xf32) <- (2x49x3549xf32, 2x49x1xf32) + multiply_3 = paddle._C_ops.multiply(cast_1, data_6) + del cast_1 + + # pd_op.sum: (2x49x1xf32) <- (2x49x3549xf32, 1xi64) + sum_0 = paddle._C_ops.sum(multiply_2, full_int_array_4, None, True) + del full_int_array_4 + + # pd_op.full: (xf32) <- () + full_10 = paddle._C_ops.full( + [], float("0"), paddle.float32, paddle.framework._current_expected_place() + ) + + # pd_op.equal: (2x49x1xb) <- (2x49x1xf32, xf32) + equal_0 = paddle._C_ops.equal(sum_0, full_10) + del sum_0 + + # pd_op.add: (2x49x3549xf32) <- (2x49x3549xf32, 2x49x3549xf32) + add_5 = paddle._C_ops.add(multiply_1, multiply_3) + + # pd_op.full_like: (2x49x3549xf32) <- (2x49x3549xf32, 1xf32) + full_like_0 = paddle._C_ops.full_like( + add_5, full_0, paddle.float32, paddle.framework._current_expected_place() + ) + + # pd_op.full_like: (2x49x3549xf32) <- (2x49x3549xf32, 1xf32) + full_like_1 = paddle._C_ops.full_like( + multiply_1, + full_0, + paddle.float32, + paddle.framework._current_expected_place(), + ) + + # pd_op.full_like: (2x49x1xb) <- (2x49x1xb, 1xf32) + full_like_2 = paddle._C_ops.full_like( + equal_0, full_0, paddle.bool, paddle.framework._current_expected_place() + ) + del full_0 + + # pd_op.cast: (2x49x1xf32) <- (2x49x1xb) + cast_2 = paddle._C_ops.cast(full_like_2, paddle.float32) + del full_like_2 + + # pd_op.cast: (2x49x1xf32) <- (2x49x1xb) + cast_3 = paddle._C_ops.cast(equal_0, paddle.float32) + del equal_0 + + # pd_op.add: (2x49x3549xf32) <- (2x49x3549xf32, 2x49x3549xf32) + add_6 = paddle._C_ops.add(full_like_0, full_like_1) + del full_like_0, full_like_1 + + # pd_op.add: (2x49x3549xf32) <- (2x49x3549xf32, 2x49x1xf32) + add_7 = paddle._C_ops.add(add_6, cast_2) + del add_6, cast_2 + + # pd_op.add: (2x49x3549xf32) <- (2x49x3549xf32, 2x49x3549xf32) + add_8 = paddle._C_ops.add(add_5, add_7) + del add_5 + + # pd_op.add: (2x49x3549xf32) <- (2x49x3549xf32, 2x49x3549xf32) + add_9 = paddle._C_ops.add(multiply_1, add_7) + + # pd_op.add: (2x49x3549xf32) <- (2x49x1xf32, 2x49x3549xf32) + add_10 = paddle._C_ops.add(cast_3, add_7) + del add_7, cast_3 + + # pd_op.cast: (2x49x3549xb) <- (2x49x3549xf32) + cast_4 = paddle._C_ops.cast(add_10, paddle.bool) + del add_10 + + # pd_op.where: (2x49x3549xf32) <- (2x49x3549xb, 2x49x3549xf32, 2x49x3549xf32) + where_0 = paddle._C_ops.where(cast_4, add_8, add_9) + del add_8, add_9, cast_4 + + # pd_op.full: (1xi32) <- () + full_11 = paddle._C_ops.full( + [1], float("13"), paddle.int32, paddle.core.CPUPlace() + ) + + # pd_op.topk: (2x49x13xf32, 2x49x13xi64) <- (2x49x3549xf32, 1xi32) + topk_0, topk_1 = (lambda x, f: f(x))( + paddle._C_ops.topk(where_0, full_11, -1, True, True), + lambda out: out if isinstance(out, (list, tuple)) else (out, None), + ) + del full_11, where_0 + + # pd_op.full: (1xi32) <- () + full_12 = paddle._C_ops.full( + [1], float("3549"), paddle.int32, paddle.core.CPUPlace() + ) + + # pd_op.one_hot: (2x49x13x3549xf32) <- (2x49x13xi64, 1xi32) + one_hot_0 = paddle._C_ops.one_hot( + topk_1 % paddle.cast(full_12, topk_1.dtype), full_12 + ) + del full_12, topk_1 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_7 = [-2] + + # pd_op.sum: (2x49x3549xf32) <- (2x49x13x3549xf32, 1xi64) + sum_1 = paddle._C_ops.sum(one_hot_0, full_int_array_7, None, False) + del one_hot_0 + + # pd_op.multiply: (2x49x3549xf32) <- (2x49x3549xf32, 2x49x1xf32) + multiply_4 = paddle._C_ops.multiply(sum_1, data_6) + del data_6, sum_1 + + # pd_op.greater_than: (2x49x3549xb) <- (2x49x3549xf32, xf32) + greater_than_3 = paddle._C_ops.greater_than(multiply_3, full_10) + del multiply_3 + + # pd_op.greater_than: (2x49x3549xb) <- (2x49x3549xf32, xf32) + greater_than_4 = paddle._C_ops.greater_than(multiply_2, full_10) + del full_10, multiply_2 + + # pd_op.bitwise_or: (2x49x3549xb) <- (2x49x3549xb, 2x49x3549xb) + bitwise_or_0 = paddle._C_ops.bitwise_or(greater_than_3, greater_than_4) + del greater_than_3, greater_than_4 + + # pd_op.cast: (2x49x3549xf32) <- (2x49x3549xb) + cast_5 = paddle._C_ops.cast(bitwise_or_0, paddle.float32) + del bitwise_or_0 + + # pd_op.multiply: (2x49x3549xf32) <- (2x49x3549xf32, 2x49x3549xf32) + multiply_5 = paddle._C_ops.multiply(multiply_4, cast_5) + del cast_5, multiply_4 + + # pd_op.sum: (2x3549xf32) <- (2x49x3549xf32, 1xi64) + sum_2 = paddle._C_ops.sum(multiply_5, full_int_array_7, None, False) + del full_int_array_7 + + # pd_op.full_int_array: (0xi64) <- () + full_int_array_8 = [] + + # pd_op.max: (xf32) <- (2x3549xf32, 0xi64) + max_0 = paddle._C_ops.max(sum_2, full_int_array_8, False) + del full_int_array_8 + + # pd_op.full: (xf32) <- () + full_13 = paddle._C_ops.full( + [], float("1"), paddle.float32, paddle.framework._current_expected_place() + ) + + # pd_op.greater_than: (xb) <- (xf32, xf32) + greater_than_0 = paddle._C_ops.greater_than(max_0, full_13) + del divide_0, full_13, max_0, multiply_1, multiply_5, sum_2, unsqueeze_2 + + return greater_than_0 diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/subgraph_5/weight_meta.py b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/subgraph_5/weight_meta.py new file mode 100644 index 000000000..8b1378917 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/subgraph_5/weight_meta.py @@ -0,0 +1 @@ + diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/subgraph_6/graph_hash.txt b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/subgraph_6/graph_hash.txt new file mode 100644 index 000000000..26ca02140 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/subgraph_6/graph_hash.txt @@ -0,0 +1 @@ +8a2815233af5ac01ede0f5b0291f7fbde332769a8e8425665222d469c075cedc \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/subgraph_6/graph_net.json b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/subgraph_6/graph_net.json new file mode 100644 index 000000000..8b4fccfd1 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/subgraph_6/graph_net.json @@ -0,0 +1,6 @@ +{ + "framework": "paddle", + "model_name": "PP-YOLOE_plus_SOD-L", + "num_devices_required": 1, + "num_nodes_required": 1 +} \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/subgraph_6/input_meta.py b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/subgraph_6/input_meta.py new file mode 100644 index 000000000..6041bc113 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/subgraph_6/input_meta.py @@ -0,0 +1,67 @@ +class Program_weight_tensor_data_0: + name = "data_0" + shape = [2, 3549] + dtype = "bool" + min_val = 0 + max_val = 2 + data = None + + +class Program_weight_tensor_data_1: + name = "data_1" + shape = [2, 3549, 4] + dtype = "float32" + min_val = float("-3.52679") + max_val = float("54.3718") + mean = float("22.566") + std = float("15.0427") + data = None + + +class Program_weight_tensor_data_2: + name = "data_2" + shape = [2, 3549, 4] + dtype = "float32" + max_val = float("51.0602") + mean = float("19.8807") + std = float("16.6824") + data = None + + +class Program_weight_tensor_data_3: + name = "data_3" + shape = [2, 3549, 10] + dtype = "float32" + max_val = float("0.980323") + mean = float("0.000730272") + std = float("0.020635") + data = None + + +class Program_weight_tensor_data_4: + name = "data_4" + shape = [] + dtype = "float32" + data = [51.8347] + + +class Program_weight_tensor_data_5: + name = "data_5" + shape = [2, 3549, 40] + dtype = "float32" + min_val = float("-11.2006") + max_val = float("19.6674") + mean = float("0.798417") + std = float("2.05193") + data = None + + +class Program_weight_tensor_data_6: + name = "data_6" + shape = [3549, 2] + dtype = "float32" + min_val = float("0.5") + max_val = float("51.5") + mean = float("22.5952") + std = float("14.8898") + data = None diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/subgraph_6/model.py b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/subgraph_6/model.py new file mode 100644 index 000000000..e520d1038 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/subgraph_6/model.py @@ -0,0 +1,514 @@ +import paddle + + +class GraphModule(paddle.nn.Layer): + def __init__(self): + super().__init__() + + def forward(self, data_0, data_1, data_2, data_3, data_4, data_5, data_6): + # pd_op.cast: (2x3549xi32) <- (2x3549xb) + cast_0 = paddle._C_ops.cast(data_0, paddle.int32) + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_0 = [-1] + + # pd_op.assign: (1xi64) <- (1xi64) + assign_0 = full_int_array_0 + + # pd_op.assign: (1xi64) <- (1xi64) + assign_1 = full_int_array_0 + + # pd_op.assign: (1xi64) <- (1xi64) + assign_2 = full_int_array_0 + + # pd_op.unsqueeze: (2x3549x1xi32) <- (2x3549xi32, 1xi64) + unsqueeze_0 = paddle._C_ops.unsqueeze(cast_0, full_int_array_0) + del cast_0 + + # pd_op.full_int_array: (3xi64) <- () + full_int_array_1 = [1, 1, 4] + + # pd_op.tile: (2x3549x4xi32) <- (2x3549x1xi32, 3xi64) + tile_0 = paddle._C_ops.tile(unsqueeze_0, full_int_array_1) + del full_int_array_1, unsqueeze_0 + + # pd_op.cast: (2x3549x4xb) <- (2x3549x4xi32) + cast_1 = paddle._C_ops.cast(tile_0, paddle.bool) + del tile_0 + + # pd_op.masked_select: (-1xf32) <- (2x3549x4xf32, 2x3549x4xb) + masked_select_0 = paddle._C_ops.masked_select(data_1, cast_1) + del data_1 + + # pd_op.full_int_array: (2xi64) <- () + full_int_array_2 = [-1, 4] + + # pd_op.reshape: (-1x4xf32) <- (-1xf32, 2xi64) + reshape_0 = paddle._C_ops.reshape(masked_select_0, full_int_array_2) + + # pd_op.masked_select: (-1xf32) <- (2x3549x4xf32, 2x3549x4xb) + masked_select_1 = paddle._C_ops.masked_select(data_2, cast_1) + + # pd_op.reshape: (-1x4xf32) <- (-1xf32, 2xi64) + reshape_1 = paddle._C_ops.reshape(masked_select_1, full_int_array_2) + del masked_select_1 + + # pd_op.sum: (2x3549xf32) <- (2x3549x10xf32, 1xi64) + sum_0 = paddle._C_ops.sum(data_3, full_int_array_0, None, False) + del data_3 + + # pd_op.masked_select: (-1xf32) <- (2x3549xf32, 2x3549xb) + masked_select_2 = paddle._C_ops.masked_select(sum_0, data_0) + del sum_0 + + # pd_op.unsqueeze: (-1x1xf32) <- (-1xf32, 1xi64) + unsqueeze_1 = paddle._C_ops.unsqueeze(masked_select_2, full_int_array_0) + del masked_select_2 + + # pd_op.subtract: (-1x4xf32) <- (-1x4xf32, -1x4xf32) + subtract_0 = paddle._C_ops.subtract(reshape_0, reshape_1) + + # pd_op.abs: (-1x4xf32) <- (-1x4xf32) + abs_0 = paddle._C_ops.abs(subtract_0) + + # pd_op.mean_all: (xf32) <- (-1x4xf32) + mean_all_0 = paddle._C_ops.mean_all(abs_0) + + # pd_op.full: (1xi32) <- () + full_0 = paddle._C_ops.full( + [1], float("1"), paddle.int32, paddle.core.CPUPlace() + ) + + # pd_op.split_with_num: ([-1x1xf32, -1x1xf32, -1x1xf32, -1x1xf32]) <- (-1x4xf32, 1xi32) + split_with_num_0 = paddle._C_ops.split_with_num(reshape_0, 4, full_0) + + # builtin.split: (-1x1xf32, -1x1xf32, -1x1xf32, -1x1xf32) <- ([-1x1xf32, -1x1xf32, -1x1xf32, -1x1xf32]) + ( + split_0, + split_1, + split_2, + split_3, + ) = split_with_num_0 + del split_with_num_0 + + # pd_op.split_with_num: ([-1x1xf32, -1x1xf32, -1x1xf32, -1x1xf32]) <- (-1x4xf32, 1xi32) + split_with_num_1 = paddle._C_ops.split_with_num(reshape_1, 4, full_0) + + # builtin.split: (-1x1xf32, -1x1xf32, -1x1xf32, -1x1xf32) <- ([-1x1xf32, -1x1xf32, -1x1xf32, -1x1xf32]) + ( + split_4, + split_5, + split_6, + split_7, + ) = split_with_num_1 + del split_with_num_1 + + # pd_op.maximum: (-1x1xf32) <- (-1x1xf32, -1x1xf32) + maximum_0 = paddle._C_ops.maximum(split_0, split_4) + + # pd_op.maximum: (-1x1xf32) <- (-1x1xf32, -1x1xf32) + maximum_1 = paddle._C_ops.maximum(split_1, split_5) + + # pd_op.minimum: (-1x1xf32) <- (-1x1xf32, -1x1xf32) + minimum_0 = paddle._C_ops.minimum(split_2, split_6) + + # pd_op.minimum: (-1x1xf32) <- (-1x1xf32, -1x1xf32) + minimum_1 = paddle._C_ops.minimum(split_3, split_7) + + # pd_op.subtract: (-1x1xf32) <- (-1x1xf32, -1x1xf32) + subtract_1 = paddle._C_ops.subtract(minimum_0, maximum_0) + + # pd_op.full: (1xf32) <- () + full_1 = paddle._C_ops.full( + [1], float("0"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.assign: (1xf32) <- (1xf32) + assign_3 = full_1 + + # pd_op.full: (1xf32) <- () + full_2 = paddle._C_ops.full( + [1], float("3.40282e+38"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.assign: (1xf32) <- (1xf32) + assign_4 = full_2 + + # pd_op.clip: (-1x1xf32) <- (-1x1xf32, 1xf32, 1xf32) + clip_0 = paddle._C_ops.clip(subtract_1, full_1, full_2) + + # pd_op.subtract: (-1x1xf32) <- (-1x1xf32, -1x1xf32) + subtract_2 = paddle._C_ops.subtract(minimum_1, maximum_1) + + # pd_op.clip: (-1x1xf32) <- (-1x1xf32, 1xf32, 1xf32) + clip_1 = paddle._C_ops.clip(subtract_2, full_1, full_2) + + # pd_op.multiply: (-1x1xf32) <- (-1x1xf32, -1x1xf32) + multiply_0 = paddle._C_ops.multiply(clip_0, clip_1) + + # pd_op.subtract: (-1x1xf32) <- (-1x1xf32, -1x1xf32) + subtract_3 = paddle._C_ops.subtract(split_2, split_0) + + # pd_op.subtract: (-1x1xf32) <- (-1x1xf32, -1x1xf32) + subtract_4 = paddle._C_ops.subtract(split_3, split_1) + + # pd_op.multiply: (-1x1xf32) <- (-1x1xf32, -1x1xf32) + multiply_1 = paddle._C_ops.multiply(subtract_3, subtract_4) + + # pd_op.subtract: (-1x1xf32) <- (-1x1xf32, -1x1xf32) + subtract_5 = paddle._C_ops.subtract(split_6, split_4) + + # pd_op.subtract: (-1x1xf32) <- (-1x1xf32, -1x1xf32) + subtract_6 = paddle._C_ops.subtract(split_7, split_5) + + # pd_op.multiply: (-1x1xf32) <- (-1x1xf32, -1x1xf32) + multiply_2 = paddle._C_ops.multiply(subtract_5, subtract_6) + del subtract_5, subtract_6 + + # pd_op.add: (-1x1xf32) <- (-1x1xf32, -1x1xf32) + add_0 = paddle._C_ops.add(multiply_1, multiply_2) + + # pd_op.subtract: (-1x1xf32) <- (-1x1xf32, -1x1xf32) + subtract_7 = paddle._C_ops.subtract(add_0, multiply_0) + + # pd_op.full: (1xf32) <- () + full_3 = paddle._C_ops.full( + [1], float("1"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.assign: (1xf32) <- (1xf32) + assign_5 = full_3 + + # pd_op.assign: (1xf32) <- (1xf32) + assign_6 = full_3 + + # pd_op.scale: (-1x1xf32) <- (-1x1xf32, 1xf32) + scale_0 = paddle._C_ops.scale(subtract_7, full_3, float("1e-10"), True) + del subtract_7 + + # pd_op.divide: (-1x1xf32) <- (-1x1xf32, -1x1xf32) + divide_2 = paddle._C_ops.divide(multiply_0, scale_0) + + # pd_op.minimum: (-1x1xf32) <- (-1x1xf32, -1x1xf32) + minimum_2 = paddle._C_ops.minimum(split_0, split_4) + + # pd_op.minimum: (-1x1xf32) <- (-1x1xf32, -1x1xf32) + minimum_3 = paddle._C_ops.minimum(split_1, split_5) + + # pd_op.maximum: (-1x1xf32) <- (-1x1xf32, -1x1xf32) + maximum_2 = paddle._C_ops.maximum(split_2, split_6) + + # pd_op.maximum: (-1x1xf32) <- (-1x1xf32, -1x1xf32) + maximum_3 = paddle._C_ops.maximum(split_3, split_7) + + # pd_op.subtract: (-1x1xf32) <- (-1x1xf32, -1x1xf32) + subtract_8 = paddle._C_ops.subtract(maximum_2, minimum_2) + + # pd_op.subtract: (-1x1xf32) <- (-1x1xf32, -1x1xf32) + subtract_9 = paddle._C_ops.subtract(maximum_3, minimum_3) + + # pd_op.multiply: (-1x1xf32) <- (-1x1xf32, -1x1xf32) + multiply_3 = paddle._C_ops.multiply(subtract_8, subtract_9) + + # pd_op.scale: (-1x1xf32) <- (-1x1xf32, 1xf32) + scale_1 = paddle._C_ops.scale(multiply_3, full_3, float("1e-10"), True) + del multiply_3 + + # pd_op.subtract: (-1x1xf32) <- (-1x1xf32, -1x1xf32) + subtract_10 = paddle._C_ops.subtract(scale_1, scale_0) + + # pd_op.divide: (-1x1xf32) <- (-1x1xf32, -1x1xf32) + divide_3 = paddle._C_ops.divide(subtract_10, scale_1) + + # pd_op.subtract: (-1x1xf32) <- (-1x1xf32, -1x1xf32) + subtract_11 = paddle._C_ops.subtract(divide_2, divide_3) + + # pd_op.full: (1xf32) <- () + full_4 = paddle._C_ops.full( + [1], float("-1"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.scale: (-1x1xf32) <- (-1x1xf32, 1xf32) + scale_2 = paddle._C_ops.scale(subtract_11, full_4, float("1"), True) + del subtract_11 + + # pd_op.scale: (-1x1xf32) <- (-1x1xf32, 1xf32) + scale_3 = paddle._C_ops.scale(scale_2, full_3, float("0"), True) + del scale_2 + + # pd_op.multiply: (-1x1xf32) <- (-1x1xf32, -1x1xf32) + multiply_4 = paddle._C_ops.multiply(scale_3, unsqueeze_1) + + # pd_op.full_int_array: (0xi64) <- () + full_int_array_3 = [] + + # pd_op.assign: (0xi64) <- (0xi64) + assign_7 = full_int_array_3 + + # pd_op.sum: (xf32) <- (-1x1xf32, 0xi64) + sum_1 = paddle._C_ops.sum(multiply_4, full_int_array_3, None, False) + + # pd_op.divide: (xf32) <- (xf32, xf32) + divide_0 = paddle._C_ops.divide(sum_1, data_4) + + # pd_op.unsqueeze: (2x3549x1xb) <- (2x3549xb, 1xi64) + unsqueeze_2 = paddle._C_ops.unsqueeze(data_0, full_int_array_0) + del data_0 + + # pd_op.cast: (2x3549x1xi32) <- (2x3549x1xb) + cast_2 = paddle._C_ops.cast(unsqueeze_2, paddle.int32) + del unsqueeze_2 + + # pd_op.full_int_array: (3xi64) <- () + full_int_array_4 = [1, 1, 40] + + # pd_op.tile: (2x3549x40xi32) <- (2x3549x1xi32, 3xi64) + tile_1 = paddle._C_ops.tile(cast_2, full_int_array_4) + del cast_2, full_int_array_4 + + # pd_op.cast: (2x3549x40xb) <- (2x3549x40xi32) + cast_3 = paddle._C_ops.cast(tile_1, paddle.bool) + del tile_1 + + # pd_op.masked_select: (-1xf32) <- (2x3549x40xf32, 2x3549x40xb) + masked_select_3 = paddle._C_ops.masked_select(data_5, cast_3) + del data_5 + + # pd_op.full_int_array: (3xi64) <- () + full_int_array_5 = [-1, 4, 10] + + # pd_op.reshape: (-1x4x10xf32) <- (-1xf32, 3xi64) + reshape_2 = paddle._C_ops.reshape(masked_select_3, full_int_array_5) + del full_int_array_5 + + # pd_op.full: (1xi32) <- () + full_5 = paddle._C_ops.full( + [1], float("2"), paddle.int32, paddle.core.CPUPlace() + ) + + # pd_op.split_with_num: ([2x3549x2xf32, 2x3549x2xf32]) <- (2x3549x4xf32, 1xi32) + split_with_num_2 = paddle._C_ops.split_with_num(data_2, 2, full_5) + del data_2, full_5 + + # builtin.split: (2x3549x2xf32, 2x3549x2xf32) <- ([2x3549x2xf32, 2x3549x2xf32]) + ( + split_8, + split_9, + ) = split_with_num_2 + del split_with_num_2 + + # pd_op.subtract: (2x3549x2xf32) <- (3549x2xf32, 2x3549x2xf32) + subtract_12 = paddle._C_ops.subtract(data_6, split_8) + del split_8 + + # pd_op.subtract: (2x3549x2xf32) <- (2x3549x2xf32, 3549x2xf32) + subtract_13 = paddle._C_ops.subtract(split_9, data_6) + del data_6, split_9 + + # pd_op.full: (1xi32) <- () + full_6 = paddle._C_ops.full( + [1], float("-1"), paddle.int32, paddle.core.CPUPlace() + ) + + # builtin.combine: ([2x3549x2xf32, 2x3549x2xf32]) <- (2x3549x2xf32, 2x3549x2xf32) + combine_0 = [subtract_12, subtract_13] + del subtract_12, subtract_13 + + # pd_op.concat: (2x3549x4xf32) <- ([2x3549x2xf32, 2x3549x2xf32], 1xi32) + concat_0 = paddle._C_ops.concat(combine_0, full_6) + del combine_0, full_6 + + # pd_op.full: (1xf32) <- () + full_7 = paddle._C_ops.full( + [1], float("-2"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.full: (1xf32) <- () + full_8 = paddle._C_ops.full( + [1], float("6.99"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.clip: (2x3549x4xf32) <- (2x3549x4xf32, 1xf32, 1xf32) + clip_2 = paddle._C_ops.clip(concat_0, full_7, full_8) + del concat_0, full_7, full_8 + + # pd_op.masked_select: (-1xf32) <- (2x3549x4xf32, 2x3549x4xb) + masked_select_4 = paddle._C_ops.masked_select(clip_2, cast_1) + del clip_2 + + # pd_op.reshape: (-1x4xf32) <- (-1xf32, 2xi64) + reshape_3 = paddle._C_ops.reshape(masked_select_4, full_int_array_2) + del full_int_array_2, masked_select_4 + + # pd_op.floor: (-1x4xf32) <- (-1x4xf32) + floor_0 = paddle._C_ops.floor(reshape_3) + + # pd_op.cast: (-1x4xi64) <- (-1x4xf32) + cast_4 = paddle._C_ops.cast(floor_0, paddle.int64) + del floor_0 + + # pd_op.scale: (-1x4xi64) <- (-1x4xi64, 1xf32) + scale_4 = paddle._C_ops.scale(cast_4, full_3, float("1"), True) + + # pd_op.cast: (-1x4xf32) <- (-1x4xi64) + cast_5 = paddle._C_ops.cast(scale_4, paddle.float32) + + # pd_op.subtract: (-1x4xf32) <- (-1x4xf32, -1x4xf32) + subtract_14 = paddle._C_ops.subtract(cast_5, reshape_3) + del cast_5, reshape_3 + + # pd_op.scale: (-1x4xf32) <- (-1x4xf32, 1xf32) + scale_5 = paddle._C_ops.scale(subtract_14, full_4, float("1"), True) + + # pd_op.scale: (-1x4xi64) <- (-1x4xi64, 1xf32) + scale_6 = paddle._C_ops.scale(cast_4, full_3, float("2"), True) + del cast_4 + + # pd_op.unsqueeze: (-1x4x1xi64) <- (-1x4xi64, 1xi64) + unsqueeze_3 = paddle._C_ops.unsqueeze(scale_6, full_int_array_0) + del scale_6 + + # pd_op.cross_entropy_with_softmax: (-1x4x10xf32, -1x4x1xf32) <- (-1x4x10xf32, -1x4x1xi64) + cross_entropy_with_softmax_0, cross_entropy_with_softmax_2 = ( + lambda x, f: f(x) + )( + paddle._C_ops.cross_entropy_with_softmax( + reshape_2, unsqueeze_3, False, True, True, -100, -1 + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None), + ) + + # pd_op.squeeze: (-1x4xf32) <- (-1x4x1xf32, 1xi64) + squeeze_0 = paddle._C_ops.squeeze( + cross_entropy_with_softmax_2, full_int_array_0 + ) + + # pd_op.multiply: (-1x4xf32) <- (-1x4xf32, -1x4xf32) + multiply_5 = paddle._C_ops.multiply(squeeze_0, subtract_14) + + # pd_op.scale: (-1x4xi64) <- (-1x4xi64, 1xf32) + scale_7 = paddle._C_ops.scale(scale_4, full_3, float("2"), True) + del scale_4 + + # pd_op.unsqueeze: (-1x4x1xi64) <- (-1x4xi64, 1xi64) + unsqueeze_4 = paddle._C_ops.unsqueeze(scale_7, full_int_array_0) + del scale_7 + + # pd_op.cross_entropy_with_softmax: (-1x4x10xf32, -1x4x1xf32) <- (-1x4x10xf32, -1x4x1xi64) + cross_entropy_with_softmax_1, cross_entropy_with_softmax_3 = ( + lambda x, f: f(x) + )( + paddle._C_ops.cross_entropy_with_softmax( + reshape_2, unsqueeze_4, False, True, True, -100, -1 + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None), + ) + del reshape_2 + + # pd_op.squeeze: (-1x4xf32) <- (-1x4x1xf32, 1xi64) + squeeze_1 = paddle._C_ops.squeeze( + cross_entropy_with_softmax_3, full_int_array_0 + ) + + # pd_op.multiply: (-1x4xf32) <- (-1x4xf32, -1x4xf32) + multiply_6 = paddle._C_ops.multiply(squeeze_1, scale_5) + + # pd_op.add: (-1x4xf32) <- (-1x4xf32, -1x4xf32) + add_1 = paddle._C_ops.add(multiply_5, multiply_6) + + # pd_op.mean: (-1x1xf32) <- (-1x4xf32, 1xi64) + mean_0 = paddle._C_ops.mean(add_1, full_int_array_0, True) + del full_int_array_0 + + # pd_op.multiply: (-1x1xf32) <- (-1x1xf32, -1x1xf32) + multiply_7 = paddle._C_ops.multiply(mean_0, unsqueeze_1) + + # pd_op.sum: (xf32) <- (-1x1xf32, 0xi64) + sum_2 = paddle._C_ops.sum(multiply_7, full_int_array_3, None, False) + + # pd_op.divide: (xf32) <- (xf32, xf32) + divide_1 = paddle._C_ops.divide(sum_2, data_4) + del ( + abs_0, + add_0, + add_1, + assign_0, + assign_1, + assign_2, + assign_3, + assign_4, + assign_5, + assign_6, + assign_7, + cast_1, + cast_3, + clip_0, + clip_1, + cross_entropy_with_softmax_2, + cross_entropy_with_softmax_3, + data_4, + divide_2, + divide_3, + full_0, + full_1, + full_2, + full_3, + full_4, + full_int_array_3, + masked_select_0, + masked_select_3, + maximum_0, + maximum_1, + maximum_2, + maximum_3, + mean_0, + minimum_0, + minimum_1, + minimum_2, + minimum_3, + multiply_0, + multiply_1, + multiply_2, + multiply_4, + multiply_5, + multiply_6, + multiply_7, + reshape_0, + reshape_1, + scale_0, + scale_1, + scale_3, + scale_5, + split_0, + split_1, + split_2, + split_3, + split_4, + split_5, + split_6, + split_7, + squeeze_0, + squeeze_1, + subtract_0, + subtract_1, + subtract_10, + subtract_14, + subtract_2, + subtract_3, + subtract_4, + subtract_8, + subtract_9, + sum_1, + sum_2, + unsqueeze_1, + unsqueeze_3, + unsqueeze_4, + ) + + return ( + cross_entropy_with_softmax_0, + cross_entropy_with_softmax_1, + mean_all_0, + divide_0, + divide_1, + ) diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/subgraph_6/weight_meta.py b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/subgraph_6/weight_meta.py new file mode 100644 index 000000000..8b1378917 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/subgraph_6/weight_meta.py @@ -0,0 +1 @@ + diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/subgraph_8/graph_hash.txt b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/subgraph_8/graph_hash.txt new file mode 100644 index 000000000..084577211 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/subgraph_8/graph_hash.txt @@ -0,0 +1 @@ +e9163af023b6284e12d8e094164eb61c7fb313e0b3f9a53ed5592562b0ac28a6 \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/subgraph_8/graph_net.json b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/subgraph_8/graph_net.json new file mode 100644 index 000000000..8b4fccfd1 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/subgraph_8/graph_net.json @@ -0,0 +1,6 @@ +{ + "framework": "paddle", + "model_name": "PP-YOLOE_plus_SOD-L", + "num_devices_required": 1, + "num_nodes_required": 1 +} \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/subgraph_8/input_meta.py b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/subgraph_8/input_meta.py new file mode 100644 index 000000000..7a270e845 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/subgraph_8/input_meta.py @@ -0,0 +1,233 @@ +class Program_weight_tensor_data_0: + name = "data_0" + shape = [1] + dtype = "float32" + data = [3.32824] + + +class Program_weight_tensor_data_1: + name = "data_1" + shape = [1] + dtype = "float32" + data = [0.0732286] + + +class Program_weight_tensor_data_2: + name = "data_2" + shape = [1] + dtype = "float32" + data = [2.19723] + + +class Program_weight_tensor_data_3: + name = "data_3" + shape = [1] + dtype = "float32" + data = [-1.08555] + + +class Program_weight_tensor_data_4: + name = "data_4" + shape = [1] + dtype = "float32" + data = [1.71785] + + +class Program_weight_tensor_data_5: + name = "data_5" + shape = [1] + dtype = "float32" + data = [1.13331] + + +class Program_weight_tensor_data_6: + name = "data_6" + shape = [1] + dtype = "float32" + data = [0.590431] + + +class Program_weight_tensor_data_7: + name = "data_7" + shape = [1] + dtype = "float32" + data = [0.708919] + + +class Program_weight_tensor_data_8: + name = "data_8" + shape = [1] + dtype = "float32" + data = [0.743773] + + +class Program_weight_tensor_data_9: + name = "data_9" + shape = [1] + dtype = "float32" + data = [0.858462] + + +class Program_weight_tensor_data_10: + name = "data_10" + shape = [1] + dtype = "float32" + data = [0.636941] + + +class Program_weight_tensor_data_11: + name = "data_11" + shape = [1] + dtype = "float32" + data = [0.828404] + + +class Program_weight_tensor_data_12: + name = "data_12" + shape = [1] + dtype = "float32" + data = [0.370716] + + +class Program_weight_tensor_data_13: + name = "data_13" + shape = [1] + dtype = "float32" + data = [0.993379] + + +class Program_weight_tensor_data_14: + name = "data_14" + shape = [1] + dtype = "float32" + data = [1.17653] + + +class Program_weight_tensor_data_15: + name = "data_15" + shape = [1] + dtype = "float32" + data = [0.50449] + + +class Program_weight_tensor_data_16: + name = "data_16" + shape = [1] + dtype = "float32" + data = [0.633712] + + +class Program_weight_tensor_data_17: + name = "data_17" + shape = [1] + dtype = "float32" + data = [0.683349] + + +class Program_weight_tensor_data_18: + name = "data_18" + shape = [1024, 3072] + dtype = "float32" + min_val = float("-0.168174") + max_val = float("0.148061") + mean = float("-3.61898e-05") + std = float("0.018088") + data = None + + +class Program_weight_tensor_data_19: + name = "data_19" + shape = [3072] + dtype = "float32" + min_val = float("-0.0404125") + max_val = float("0.0382792") + mean = float("0.000304818") + std = float("0.00730327") + data = None + + +class Program_weight_tensor_data_20: + name = "data_20" + shape = [1024, 3072] + dtype = "float32" + min_val = float("-0.0819269") + max_val = float("0.0975206") + mean = float("-8.50294e-06") + std = float("0.0176017") + data = None + + +class Program_weight_tensor_data_21: + name = "data_21" + shape = [3072] + dtype = "float32" + min_val = float("-0.0329042") + max_val = float("0.0303536") + mean = float("-7.16758e-05") + std = float("0.00591918") + data = None + + +class Program_weight_tensor_data_22: + name = "data_22" + shape = [1024, 3072] + dtype = "float32" + min_val = float("-0.535505") + max_val = float("0.535918") + mean = float("-4.04094e-06") + std = float("0.0286549") + data = None + + +class Program_weight_tensor_data_23: + name = "data_23" + shape = [3072] + dtype = "float32" + min_val = float("-0.144567") + max_val = float("0.151832") + mean = float("8.68538e-05") + std = float("0.028637") + data = None + + +class Program_weight_tensor_data_24: + name = "data_24" + shape = [1024, 3072] + dtype = "float32" + min_val = float("-0.410122") + max_val = float("0.357528") + mean = float("-1.73642e-05") + std = float("0.0248621") + data = None + + +class Program_weight_tensor_data_25: + name = "data_25" + shape = [3072] + dtype = "float32" + min_val = float("-0.118434") + max_val = float("0.157878") + mean = float("0.000208634") + std = float("0.025849") + data = None + + +class Program_weight_tensor_data_26: + name = "data_26" + shape = [1, 3, 640, 640] + dtype = "float32" + max_val = float("1.0") + mean = float("0.467665") + std = float("0.176432") + data = None + + +class Program_weight_tensor_data_27: + name = "data_27" + shape = [1, 400, 1024] + dtype = "float32" + min_val = float("-1.0") + max_val = float("1.0") + mean = float("0.444201") + std = float("0.550168") + data = None diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/subgraph_8/model.py b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/subgraph_8/model.py new file mode 100644 index 000000000..ceff0732f --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/subgraph_8/model.py @@ -0,0 +1,8235 @@ +import paddle + + +class GraphModule(paddle.nn.Layer): + def __init__(self): + super().__init__() + + def forward( + self, + parameter_0, + parameter_1, + parameter_2, + parameter_3, + parameter_4, + parameter_5, + parameter_6, + parameter_7, + parameter_8, + parameter_9, + parameter_10, + parameter_11, + parameter_12, + parameter_13, + parameter_14, + parameter_15, + parameter_16, + parameter_17, + parameter_18, + parameter_19, + parameter_20, + parameter_21, + parameter_22, + parameter_23, + parameter_24, + parameter_25, + parameter_26, + parameter_27, + parameter_28, + parameter_29, + parameter_30, + parameter_31, + parameter_32, + parameter_33, + parameter_34, + parameter_35, + parameter_36, + parameter_37, + parameter_38, + parameter_39, + parameter_40, + parameter_41, + parameter_42, + parameter_43, + parameter_44, + parameter_45, + parameter_46, + parameter_47, + parameter_48, + parameter_49, + parameter_50, + parameter_51, + parameter_52, + parameter_53, + parameter_54, + parameter_55, + parameter_56, + parameter_57, + parameter_58, + parameter_59, + parameter_60, + parameter_61, + parameter_62, + parameter_63, + parameter_64, + parameter_65, + parameter_66, + parameter_67, + parameter_68, + parameter_69, + parameter_70, + parameter_71, + parameter_72, + parameter_73, + parameter_74, + parameter_75, + parameter_76, + parameter_77, + parameter_78, + parameter_79, + parameter_80, + parameter_81, + parameter_82, + parameter_83, + parameter_84, + parameter_85, + parameter_86, + parameter_87, + parameter_88, + parameter_89, + parameter_90, + parameter_91, + parameter_92, + parameter_93, + parameter_94, + parameter_95, + parameter_96, + parameter_97, + parameter_98, + parameter_99, + parameter_100, + parameter_101, + parameter_102, + parameter_103, + parameter_104, + parameter_105, + parameter_106, + parameter_107, + parameter_108, + parameter_109, + parameter_110, + parameter_111, + parameter_112, + parameter_113, + parameter_114, + parameter_115, + parameter_116, + parameter_117, + parameter_118, + parameter_119, + parameter_120, + parameter_121, + parameter_122, + parameter_123, + parameter_124, + parameter_125, + parameter_126, + parameter_127, + parameter_128, + parameter_129, + parameter_130, + parameter_131, + parameter_132, + parameter_133, + parameter_134, + parameter_135, + parameter_136, + parameter_137, + parameter_138, + parameter_139, + parameter_140, + parameter_141, + parameter_142, + parameter_143, + parameter_144, + parameter_145, + parameter_146, + parameter_147, + parameter_148, + parameter_149, + parameter_150, + parameter_151, + parameter_152, + parameter_153, + parameter_154, + parameter_155, + parameter_156, + parameter_157, + parameter_158, + parameter_159, + parameter_160, + parameter_161, + parameter_162, + parameter_163, + parameter_164, + parameter_165, + parameter_166, + parameter_167, + parameter_168, + parameter_169, + parameter_170, + parameter_171, + parameter_172, + parameter_173, + parameter_174, + parameter_175, + parameter_176, + parameter_177, + parameter_178, + parameter_179, + parameter_180, + parameter_181, + parameter_182, + parameter_183, + parameter_184, + parameter_185, + parameter_186, + parameter_187, + parameter_188, + parameter_189, + parameter_190, + parameter_191, + parameter_192, + parameter_193, + parameter_194, + parameter_195, + parameter_196, + parameter_197, + parameter_198, + parameter_199, + parameter_200, + parameter_201, + parameter_202, + parameter_203, + parameter_204, + parameter_205, + parameter_206, + parameter_207, + parameter_208, + parameter_209, + parameter_210, + parameter_211, + parameter_212, + parameter_213, + parameter_214, + parameter_215, + parameter_216, + parameter_217, + parameter_218, + parameter_219, + parameter_220, + parameter_221, + parameter_222, + parameter_223, + parameter_224, + parameter_225, + parameter_226, + parameter_227, + parameter_228, + parameter_229, + parameter_230, + parameter_231, + parameter_232, + parameter_233, + parameter_234, + parameter_235, + parameter_236, + parameter_237, + parameter_238, + parameter_239, + parameter_240, + parameter_241, + parameter_242, + parameter_243, + parameter_244, + parameter_245, + parameter_246, + parameter_247, + parameter_248, + parameter_249, + parameter_250, + parameter_251, + parameter_252, + parameter_253, + parameter_254, + parameter_255, + parameter_256, + parameter_257, + parameter_258, + parameter_259, + parameter_260, + parameter_261, + parameter_262, + parameter_263, + parameter_264, + parameter_265, + parameter_266, + parameter_267, + parameter_268, + parameter_269, + parameter_270, + parameter_271, + parameter_272, + parameter_273, + parameter_274, + parameter_275, + parameter_276, + parameter_277, + parameter_278, + parameter_279, + parameter_280, + parameter_281, + parameter_282, + parameter_283, + parameter_284, + parameter_285, + parameter_286, + parameter_287, + parameter_288, + parameter_289, + parameter_290, + parameter_291, + parameter_292, + parameter_293, + parameter_294, + parameter_295, + parameter_296, + parameter_297, + parameter_298, + parameter_299, + parameter_300, + parameter_301, + parameter_302, + parameter_303, + parameter_304, + parameter_305, + parameter_306, + parameter_307, + parameter_308, + parameter_309, + parameter_310, + parameter_311, + parameter_312, + parameter_313, + parameter_314, + parameter_315, + parameter_316, + parameter_317, + parameter_318, + parameter_319, + parameter_320, + parameter_321, + parameter_322, + parameter_323, + parameter_324, + parameter_325, + parameter_326, + parameter_327, + parameter_328, + parameter_329, + parameter_330, + parameter_331, + parameter_332, + parameter_333, + parameter_334, + parameter_335, + parameter_336, + parameter_337, + parameter_338, + parameter_339, + parameter_340, + parameter_341, + parameter_342, + parameter_343, + parameter_344, + parameter_345, + parameter_346, + parameter_347, + parameter_348, + parameter_349, + parameter_350, + parameter_351, + parameter_352, + parameter_353, + parameter_354, + parameter_355, + parameter_356, + parameter_357, + parameter_358, + parameter_359, + parameter_360, + parameter_361, + parameter_362, + parameter_363, + parameter_364, + parameter_365, + parameter_366, + parameter_367, + parameter_368, + parameter_369, + parameter_370, + parameter_371, + parameter_372, + parameter_373, + parameter_374, + parameter_375, + parameter_376, + parameter_377, + parameter_378, + parameter_379, + parameter_380, + parameter_381, + parameter_382, + parameter_383, + parameter_384, + parameter_385, + parameter_386, + parameter_387, + parameter_388, + parameter_389, + parameter_390, + parameter_391, + parameter_392, + parameter_393, + parameter_394, + parameter_395, + parameter_396, + parameter_397, + parameter_398, + parameter_399, + parameter_400, + parameter_401, + parameter_402, + parameter_403, + parameter_404, + parameter_405, + parameter_406, + parameter_407, + parameter_408, + parameter_409, + parameter_410, + parameter_411, + parameter_412, + parameter_413, + parameter_414, + parameter_415, + parameter_416, + parameter_417, + parameter_418, + parameter_419, + parameter_420, + parameter_421, + parameter_422, + parameter_423, + parameter_424, + parameter_425, + parameter_426, + parameter_427, + parameter_428, + parameter_429, + parameter_430, + parameter_431, + parameter_432, + parameter_433, + parameter_434, + parameter_435, + parameter_436, + parameter_437, + parameter_438, + parameter_439, + parameter_440, + parameter_441, + parameter_442, + parameter_443, + parameter_444, + parameter_445, + parameter_446, + parameter_447, + parameter_448, + parameter_449, + parameter_450, + parameter_451, + parameter_452, + parameter_453, + parameter_454, + parameter_455, + parameter_456, + parameter_457, + parameter_458, + parameter_459, + parameter_460, + parameter_461, + parameter_462, + parameter_463, + parameter_464, + parameter_465, + parameter_466, + parameter_467, + parameter_468, + parameter_469, + parameter_470, + parameter_471, + parameter_472, + parameter_473, + parameter_474, + parameter_475, + parameter_476, + parameter_477, + parameter_478, + parameter_479, + parameter_480, + parameter_481, + parameter_482, + parameter_483, + parameter_484, + parameter_485, + parameter_486, + parameter_487, + parameter_488, + parameter_489, + parameter_490, + parameter_491, + parameter_492, + parameter_493, + parameter_494, + parameter_495, + parameter_496, + parameter_497, + parameter_498, + parameter_499, + parameter_500, + parameter_501, + parameter_502, + parameter_503, + parameter_504, + parameter_505, + parameter_506, + parameter_507, + parameter_508, + parameter_509, + parameter_510, + parameter_511, + parameter_512, + parameter_513, + parameter_514, + parameter_515, + parameter_516, + parameter_517, + parameter_518, + parameter_519, + parameter_520, + parameter_521, + parameter_522, + parameter_523, + parameter_524, + parameter_525, + parameter_526, + parameter_527, + parameter_528, + parameter_529, + parameter_530, + parameter_531, + parameter_532, + parameter_533, + parameter_534, + parameter_535, + parameter_536, + parameter_537, + parameter_538, + parameter_539, + parameter_540, + parameter_541, + parameter_542, + parameter_543, + parameter_544, + parameter_545, + parameter_546, + parameter_547, + parameter_548, + parameter_549, + parameter_550, + parameter_551, + parameter_552, + parameter_553, + parameter_554, + parameter_555, + parameter_556, + parameter_557, + parameter_558, + parameter_559, + parameter_560, + parameter_561, + parameter_562, + parameter_563, + parameter_564, + parameter_565, + parameter_566, + parameter_567, + parameter_568, + parameter_569, + parameter_570, + parameter_571, + parameter_572, + parameter_573, + parameter_574, + parameter_575, + parameter_576, + parameter_577, + parameter_578, + parameter_579, + parameter_580, + parameter_581, + parameter_582, + parameter_583, + parameter_584, + parameter_585, + parameter_586, + parameter_587, + parameter_588, + parameter_589, + parameter_590, + parameter_591, + parameter_592, + parameter_593, + parameter_594, + parameter_595, + parameter_596, + parameter_597, + parameter_598, + parameter_599, + parameter_600, + parameter_601, + parameter_602, + parameter_603, + parameter_604, + parameter_605, + parameter_606, + parameter_607, + parameter_608, + parameter_609, + parameter_610, + parameter_611, + parameter_612, + parameter_613, + parameter_614, + parameter_615, + parameter_616, + parameter_617, + parameter_618, + parameter_619, + parameter_620, + parameter_621, + parameter_622, + parameter_623, + parameter_624, + parameter_625, + parameter_626, + parameter_627, + parameter_628, + parameter_629, + parameter_630, + parameter_631, + parameter_632, + parameter_633, + parameter_634, + parameter_635, + parameter_636, + parameter_637, + parameter_638, + parameter_639, + parameter_640, + parameter_641, + parameter_642, + parameter_643, + parameter_644, + parameter_645, + parameter_646, + parameter_647, + parameter_648, + parameter_649, + parameter_650, + parameter_651, + parameter_652, + parameter_653, + parameter_654, + parameter_655, + parameter_656, + parameter_657, + parameter_658, + parameter_659, + parameter_660, + parameter_661, + parameter_662, + parameter_663, + parameter_664, + parameter_665, + parameter_666, + parameter_667, + parameter_668, + parameter_669, + parameter_670, + parameter_671, + parameter_672, + parameter_673, + parameter_674, + parameter_675, + parameter_676, + parameter_677, + parameter_678, + parameter_679, + parameter_680, + parameter_681, + parameter_682, + parameter_683, + parameter_684, + parameter_685, + parameter_686, + parameter_687, + parameter_688, + parameter_689, + parameter_690, + parameter_691, + parameter_692, + parameter_693, + parameter_694, + parameter_695, + parameter_696, + parameter_697, + parameter_698, + parameter_699, + parameter_700, + parameter_701, + parameter_702, + parameter_703, + parameter_704, + parameter_705, + parameter_706, + parameter_707, + parameter_708, + parameter_709, + parameter_710, + parameter_711, + parameter_712, + parameter_713, + parameter_714, + parameter_715, + parameter_716, + parameter_717, + parameter_718, + parameter_719, + parameter_720, + parameter_721, + parameter_722, + parameter_723, + parameter_724, + parameter_725, + parameter_726, + parameter_727, + parameter_728, + parameter_729, + parameter_730, + parameter_731, + parameter_732, + parameter_733, + parameter_734, + parameter_735, + parameter_736, + parameter_737, + parameter_738, + parameter_739, + parameter_740, + parameter_741, + parameter_742, + parameter_743, + parameter_744, + parameter_745, + parameter_746, + parameter_747, + parameter_748, + parameter_749, + parameter_750, + parameter_751, + parameter_752, + parameter_753, + parameter_754, + parameter_755, + parameter_756, + parameter_757, + parameter_758, + parameter_759, + parameter_760, + parameter_761, + parameter_762, + parameter_763, + parameter_764, + parameter_765, + parameter_766, + parameter_767, + parameter_768, + parameter_769, + parameter_770, + parameter_771, + parameter_772, + parameter_773, + parameter_774, + parameter_775, + parameter_776, + parameter_777, + parameter_778, + parameter_779, + parameter_780, + parameter_781, + parameter_782, + parameter_783, + parameter_784, + parameter_785, + parameter_786, + parameter_787, + parameter_788, + parameter_789, + parameter_790, + parameter_791, + parameter_792, + data_0, + data_1, + data_2, + data_3, + data_4, + data_5, + data_6, + data_7, + data_8, + data_9, + data_10, + data_11, + data_12, + data_13, + data_14, + data_15, + data_16, + data_17, + data_18, + data_19, + data_20, + data_21, + data_22, + data_23, + data_24, + data_25, + data_26, + data_27, + ): + # pd_op.conv2d: (-1x32x-1x-1xf32) <- (-1x3x-1x-1xf32, 32x3x3x3xf32) + conv2d_0 = paddle._C_ops.conv2d( + data_26, parameter_792, [2, 2], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del data_26, parameter_792 + + # pd_op.batch_norm_: (-1x32x-1x-1xf32, 32xf32, 32xf32, 32xf32, 32xf32, -1xui8) <- (-1x32x-1x-1xf32, 32xf32, 32xf32, 32xf32, 32xf32) + ( + batch_norm__0, + batch_norm__1, + batch_norm__2, + batch_norm__3, + batch_norm__4, + batch_norm__5, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_0, + parameter_791, + parameter_790, + parameter_789, + parameter_788, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_0, parameter_788, parameter_789, parameter_790, parameter_791 + + # pd_op.swish: (-1x32x-1x-1xf32) <- (-1x32x-1x-1xf32) + swish_0 = paddle._C_ops.swish(batch_norm__0) + del batch_norm__0 + + # pd_op.conv2d: (-1x32x-1x-1xf32) <- (-1x32x-1x-1xf32, 32x32x3x3xf32) + conv2d_1 = paddle._C_ops.conv2d( + swish_0, parameter_787, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_787, swish_0 + + # pd_op.batch_norm_: (-1x32x-1x-1xf32, 32xf32, 32xf32, 32xf32, 32xf32, -1xui8) <- (-1x32x-1x-1xf32, 32xf32, 32xf32, 32xf32, 32xf32) + ( + batch_norm__6, + batch_norm__7, + batch_norm__8, + batch_norm__9, + batch_norm__10, + batch_norm__11, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_1, + parameter_786, + parameter_785, + parameter_784, + parameter_783, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_1, parameter_783, parameter_784, parameter_785, parameter_786 + + # pd_op.swish: (-1x32x-1x-1xf32) <- (-1x32x-1x-1xf32) + swish_1 = paddle._C_ops.swish(batch_norm__6) + del batch_norm__6 + + # pd_op.conv2d: (-1x64x-1x-1xf32) <- (-1x32x-1x-1xf32, 64x32x3x3xf32) + conv2d_2 = paddle._C_ops.conv2d( + swish_1, parameter_782, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_782, swish_1 + + # pd_op.batch_norm_: (-1x64x-1x-1xf32, 64xf32, 64xf32, 64xf32, 64xf32, -1xui8) <- (-1x64x-1x-1xf32, 64xf32, 64xf32, 64xf32, 64xf32) + ( + batch_norm__12, + batch_norm__13, + batch_norm__14, + batch_norm__15, + batch_norm__16, + batch_norm__17, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_2, + parameter_781, + parameter_780, + parameter_779, + parameter_778, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_2, parameter_778, parameter_779, parameter_780, parameter_781 + + # pd_op.swish: (-1x64x-1x-1xf32) <- (-1x64x-1x-1xf32) + swish_2 = paddle._C_ops.swish(batch_norm__12) + del batch_norm__12 + + # pd_op.conv2d: (-1x96x-1x-1xf32) <- (-1x64x-1x-1xf32, 96x64x3x3xf32) + conv2d_3 = paddle._C_ops.conv2d( + swish_2, parameter_777, [2, 2], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_777, swish_2 + + # pd_op.batch_norm_: (-1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (-1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__18, + batch_norm__19, + batch_norm__20, + batch_norm__21, + batch_norm__22, + batch_norm__23, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_3, + parameter_776, + parameter_775, + parameter_774, + parameter_773, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_3, parameter_773, parameter_774, parameter_775, parameter_776 + + # pd_op.swish: (-1x96x-1x-1xf32) <- (-1x96x-1x-1xf32) + swish_3 = paddle._C_ops.swish(batch_norm__18) + del batch_norm__18 + + # pd_op.conv2d: (-1x48x-1x-1xf32) <- (-1x96x-1x-1xf32, 48x96x1x1xf32) + conv2d_4 = paddle._C_ops.conv2d( + swish_3, parameter_772, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_772 + + # pd_op.batch_norm_: (-1x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32, -1xui8) <- (-1x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32) + ( + batch_norm__24, + batch_norm__25, + batch_norm__26, + batch_norm__27, + batch_norm__28, + batch_norm__29, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_4, + parameter_771, + parameter_770, + parameter_769, + parameter_768, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_4, parameter_768, parameter_769, parameter_770, parameter_771 + + # pd_op.swish: (-1x48x-1x-1xf32) <- (-1x48x-1x-1xf32) + swish_4 = paddle._C_ops.swish(batch_norm__24) + del batch_norm__24 + + # pd_op.conv2d: (-1x48x-1x-1xf32) <- (-1x96x-1x-1xf32, 48x96x1x1xf32) + conv2d_5 = paddle._C_ops.conv2d( + swish_3, parameter_767, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_767, swish_3 + + # pd_op.batch_norm_: (-1x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32, -1xui8) <- (-1x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32) + ( + batch_norm__30, + batch_norm__31, + batch_norm__32, + batch_norm__33, + batch_norm__34, + batch_norm__35, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_5, + parameter_766, + parameter_765, + parameter_764, + parameter_763, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_5, parameter_763, parameter_764, parameter_765, parameter_766 + + # pd_op.swish: (-1x48x-1x-1xf32) <- (-1x48x-1x-1xf32) + swish_5 = paddle._C_ops.swish(batch_norm__30) + del batch_norm__30 + + # pd_op.conv2d: (-1x48x-1x-1xf32) <- (-1x48x-1x-1xf32, 48x48x3x3xf32) + conv2d_6 = paddle._C_ops.conv2d( + swish_5, parameter_762, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_762 + + # pd_op.batch_norm_: (-1x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32, -1xui8) <- (-1x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32) + ( + batch_norm__36, + batch_norm__37, + batch_norm__38, + batch_norm__39, + batch_norm__40, + batch_norm__41, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_6, + parameter_761, + parameter_760, + parameter_759, + parameter_758, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_6, parameter_758, parameter_759, parameter_760, parameter_761 + + # pd_op.swish: (-1x48x-1x-1xf32) <- (-1x48x-1x-1xf32) + swish_6 = paddle._C_ops.swish(batch_norm__36) + del batch_norm__36 + + # pd_op.conv2d: (-1x48x-1x-1xf32) <- (-1x48x-1x-1xf32, 48x48x3x3xf32) + conv2d_7 = paddle._C_ops.conv2d( + swish_6, parameter_757, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_757 + + # pd_op.batch_norm_: (-1x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32, -1xui8) <- (-1x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32) + ( + batch_norm__42, + batch_norm__43, + batch_norm__44, + batch_norm__45, + batch_norm__46, + batch_norm__47, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_7, + parameter_756, + parameter_755, + parameter_754, + parameter_753, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_7, parameter_753, parameter_754, parameter_755, parameter_756 + + # pd_op.conv2d: (-1x48x-1x-1xf32) <- (-1x48x-1x-1xf32, 48x48x1x1xf32) + conv2d_8 = paddle._C_ops.conv2d( + swish_6, parameter_752, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_752, swish_6 + + # pd_op.batch_norm_: (-1x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32, -1xui8) <- (-1x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32) + ( + batch_norm__48, + batch_norm__49, + batch_norm__50, + batch_norm__51, + batch_norm__52, + batch_norm__53, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_8, + parameter_751, + parameter_750, + parameter_749, + parameter_748, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_8, parameter_748, parameter_749, parameter_750, parameter_751 + + # pd_op.multiply: (-1x48x-1x-1xf32) <- (1xf32, -1x48x-1x-1xf32) + multiply_0 = paddle._C_ops.multiply(data_0, batch_norm__48) + del batch_norm__48, data_0 + + # pd_op.add: (-1x48x-1x-1xf32) <- (-1x48x-1x-1xf32, -1x48x-1x-1xf32) + add_0 = paddle._C_ops.add(batch_norm__42, multiply_0) + del batch_norm__42, multiply_0 + + # pd_op.swish: (-1x48x-1x-1xf32) <- (-1x48x-1x-1xf32) + swish_7 = paddle._C_ops.swish(add_0) + del add_0 + + # pd_op.add: (-1x48x-1x-1xf32) <- (-1x48x-1x-1xf32, -1x48x-1x-1xf32) + add_1 = paddle._C_ops.add(swish_5, swish_7) + del swish_5, swish_7 + + # pd_op.conv2d: (-1x48x-1x-1xf32) <- (-1x48x-1x-1xf32, 48x48x3x3xf32) + conv2d_9 = paddle._C_ops.conv2d( + add_1, parameter_747, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_747 + + # pd_op.batch_norm_: (-1x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32, -1xui8) <- (-1x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32) + ( + batch_norm__54, + batch_norm__55, + batch_norm__56, + batch_norm__57, + batch_norm__58, + batch_norm__59, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_9, + parameter_746, + parameter_745, + parameter_744, + parameter_743, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_9, parameter_743, parameter_744, parameter_745, parameter_746 + + # pd_op.swish: (-1x48x-1x-1xf32) <- (-1x48x-1x-1xf32) + swish_8 = paddle._C_ops.swish(batch_norm__54) + del batch_norm__54 + + # pd_op.conv2d: (-1x48x-1x-1xf32) <- (-1x48x-1x-1xf32, 48x48x3x3xf32) + conv2d_10 = paddle._C_ops.conv2d( + swish_8, parameter_742, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_742 + + # pd_op.batch_norm_: (-1x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32, -1xui8) <- (-1x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32) + ( + batch_norm__60, + batch_norm__61, + batch_norm__62, + batch_norm__63, + batch_norm__64, + batch_norm__65, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_10, + parameter_741, + parameter_740, + parameter_739, + parameter_738, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_10, parameter_738, parameter_739, parameter_740, parameter_741 + + # pd_op.conv2d: (-1x48x-1x-1xf32) <- (-1x48x-1x-1xf32, 48x48x1x1xf32) + conv2d_11 = paddle._C_ops.conv2d( + swish_8, parameter_737, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_737, swish_8 + + # pd_op.batch_norm_: (-1x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32, -1xui8) <- (-1x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32) + ( + batch_norm__66, + batch_norm__67, + batch_norm__68, + batch_norm__69, + batch_norm__70, + batch_norm__71, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_11, + parameter_736, + parameter_735, + parameter_734, + parameter_733, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_11, parameter_733, parameter_734, parameter_735, parameter_736 + + # pd_op.multiply: (-1x48x-1x-1xf32) <- (1xf32, -1x48x-1x-1xf32) + multiply_1 = paddle._C_ops.multiply(data_1, batch_norm__66) + del batch_norm__66, data_1 + + # pd_op.add: (-1x48x-1x-1xf32) <- (-1x48x-1x-1xf32, -1x48x-1x-1xf32) + add_2 = paddle._C_ops.add(batch_norm__60, multiply_1) + del batch_norm__60, multiply_1 + + # pd_op.swish: (-1x48x-1x-1xf32) <- (-1x48x-1x-1xf32) + swish_9 = paddle._C_ops.swish(add_2) + del add_2 + + # pd_op.add: (-1x48x-1x-1xf32) <- (-1x48x-1x-1xf32, -1x48x-1x-1xf32) + add_3 = paddle._C_ops.add(add_1, swish_9) + del add_1, swish_9 + + # pd_op.conv2d: (-1x48x-1x-1xf32) <- (-1x48x-1x-1xf32, 48x48x3x3xf32) + conv2d_12 = paddle._C_ops.conv2d( + add_3, parameter_732, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_732 + + # pd_op.batch_norm_: (-1x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32, -1xui8) <- (-1x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32) + ( + batch_norm__72, + batch_norm__73, + batch_norm__74, + batch_norm__75, + batch_norm__76, + batch_norm__77, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_12, + parameter_731, + parameter_730, + parameter_729, + parameter_728, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_12, parameter_728, parameter_729, parameter_730, parameter_731 + + # pd_op.swish: (-1x48x-1x-1xf32) <- (-1x48x-1x-1xf32) + swish_10 = paddle._C_ops.swish(batch_norm__72) + del batch_norm__72 + + # pd_op.conv2d: (-1x48x-1x-1xf32) <- (-1x48x-1x-1xf32, 48x48x3x3xf32) + conv2d_13 = paddle._C_ops.conv2d( + swish_10, parameter_727, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_727 + + # pd_op.batch_norm_: (-1x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32, -1xui8) <- (-1x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32) + ( + batch_norm__78, + batch_norm__79, + batch_norm__80, + batch_norm__81, + batch_norm__82, + batch_norm__83, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_13, + parameter_726, + parameter_725, + parameter_724, + parameter_723, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_13, parameter_723, parameter_724, parameter_725, parameter_726 + + # pd_op.conv2d: (-1x48x-1x-1xf32) <- (-1x48x-1x-1xf32, 48x48x1x1xf32) + conv2d_14 = paddle._C_ops.conv2d( + swish_10, parameter_722, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_722, swish_10 + + # pd_op.batch_norm_: (-1x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32, -1xui8) <- (-1x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32) + ( + batch_norm__84, + batch_norm__85, + batch_norm__86, + batch_norm__87, + batch_norm__88, + batch_norm__89, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_14, + parameter_721, + parameter_720, + parameter_719, + parameter_718, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_14, parameter_718, parameter_719, parameter_720, parameter_721 + + # pd_op.multiply: (-1x48x-1x-1xf32) <- (1xf32, -1x48x-1x-1xf32) + multiply_2 = paddle._C_ops.multiply(data_2, batch_norm__84) + del batch_norm__84, data_2 + + # pd_op.add: (-1x48x-1x-1xf32) <- (-1x48x-1x-1xf32, -1x48x-1x-1xf32) + add_4 = paddle._C_ops.add(batch_norm__78, multiply_2) + del batch_norm__78, multiply_2 + + # pd_op.swish: (-1x48x-1x-1xf32) <- (-1x48x-1x-1xf32) + swish_11 = paddle._C_ops.swish(add_4) + del add_4 + + # pd_op.add: (-1x48x-1x-1xf32) <- (-1x48x-1x-1xf32, -1x48x-1x-1xf32) + add_5 = paddle._C_ops.add(add_3, swish_11) + del add_3, swish_11 + + # pd_op.full: (1xi32) <- () + full_0 = paddle._C_ops.full( + [1], float("1"), paddle.int32, paddle.core.CPUPlace() + ) + + # builtin.combine: ([-1x48x-1x-1xf32, -1x48x-1x-1xf32]) <- (-1x48x-1x-1xf32, -1x48x-1x-1xf32) + combine_0 = [swish_4, add_5] + del add_5, swish_4 + + # pd_op.concat: (-1x96x-1x-1xf32) <- ([-1x48x-1x-1xf32, -1x48x-1x-1xf32], 1xi32) + concat_2 = paddle._C_ops.concat(combine_0, full_0) + del combine_0 + + # pd_op.full_int_array: (2xi64) <- () + full_int_array_0 = [2, 3] + + # pd_op.mean: (-1x96x1x1xf32) <- (-1x96x-1x-1xf32, 2xi64) + mean_0 = paddle._C_ops.mean(concat_2, full_int_array_0, True) + + # pd_op.conv2d: (-1x96x1x1xf32) <- (-1x96x1x1xf32, 96x96x1x1xf32) + conv2d_15 = paddle._C_ops.conv2d( + mean_0, parameter_717, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del mean_0, parameter_717 + + # pd_op.full_int_array: (4xi64) <- () + full_int_array_1 = [1, -1, 1, 1] + + # pd_op.reshape: (1x96x1x1xf32) <- (96xf32, 4xi64) + reshape_0 = paddle._C_ops.reshape(parameter_716, full_int_array_1) + del parameter_716 + + # pd_op.add: (-1x96x1x1xf32) <- (-1x96x1x1xf32, 1x96x1x1xf32) + add_6 = paddle._C_ops.add(conv2d_15, reshape_0) + del conv2d_15, reshape_0 + + # pd_op.hardsigmoid: (-1x96x1x1xf32) <- (-1x96x1x1xf32) + hardsigmoid_0 = paddle._C_ops.hardsigmoid( + add_6, float("0.166667"), float("0.5") + ) + del add_6 + + # pd_op.multiply: (-1x96x-1x-1xf32) <- (-1x96x-1x-1xf32, -1x96x1x1xf32) + multiply_3 = paddle._C_ops.multiply(concat_2, hardsigmoid_0) + del concat_2, hardsigmoid_0 + + # pd_op.conv2d: (-1x128x-1x-1xf32) <- (-1x96x-1x-1xf32, 128x96x1x1xf32) + conv2d_16 = paddle._C_ops.conv2d( + multiply_3, parameter_715, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del multiply_3, parameter_715 + + # pd_op.batch_norm_: (-1x128x-1x-1xf32, 128xf32, 128xf32, 128xf32, 128xf32, -1xui8) <- (-1x128x-1x-1xf32, 128xf32, 128xf32, 128xf32, 128xf32) + ( + batch_norm__90, + batch_norm__91, + batch_norm__92, + batch_norm__93, + batch_norm__94, + batch_norm__95, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_16, + parameter_714, + parameter_713, + parameter_712, + parameter_711, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_16, parameter_711, parameter_712, parameter_713, parameter_714 + + # pd_op.swish: (-1x128x-1x-1xf32) <- (-1x128x-1x-1xf32) + swish_12 = paddle._C_ops.swish(batch_norm__90) + del batch_norm__90 + + # pd_op.conv2d: (-1x192x-1x-1xf32) <- (-1x128x-1x-1xf32, 192x128x3x3xf32) + conv2d_17 = paddle._C_ops.conv2d( + swish_12, parameter_710, [2, 2], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_710, swish_12 + + # pd_op.batch_norm_: (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__96, + batch_norm__97, + batch_norm__98, + batch_norm__99, + batch_norm__100, + batch_norm__101, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_17, + parameter_709, + parameter_708, + parameter_707, + parameter_706, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_17, parameter_706, parameter_707, parameter_708, parameter_709 + + # pd_op.swish: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32) + swish_13 = paddle._C_ops.swish(batch_norm__96) + del batch_norm__96 + + # pd_op.conv2d: (-1x96x-1x-1xf32) <- (-1x192x-1x-1xf32, 96x192x1x1xf32) + conv2d_18 = paddle._C_ops.conv2d( + swish_13, parameter_705, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_705 + + # pd_op.batch_norm_: (-1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (-1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__102, + batch_norm__103, + batch_norm__104, + batch_norm__105, + batch_norm__106, + batch_norm__107, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_18, + parameter_704, + parameter_703, + parameter_702, + parameter_701, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_18, parameter_701, parameter_702, parameter_703, parameter_704 + + # pd_op.swish: (-1x96x-1x-1xf32) <- (-1x96x-1x-1xf32) + swish_14 = paddle._C_ops.swish(batch_norm__102) + del batch_norm__102 + + # pd_op.conv2d: (-1x96x-1x-1xf32) <- (-1x192x-1x-1xf32, 96x192x1x1xf32) + conv2d_19 = paddle._C_ops.conv2d( + swish_13, parameter_700, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_700, swish_13 + + # pd_op.batch_norm_: (-1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (-1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__108, + batch_norm__109, + batch_norm__110, + batch_norm__111, + batch_norm__112, + batch_norm__113, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_19, + parameter_699, + parameter_698, + parameter_697, + parameter_696, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_19, parameter_696, parameter_697, parameter_698, parameter_699 + + # pd_op.swish: (-1x96x-1x-1xf32) <- (-1x96x-1x-1xf32) + swish_15 = paddle._C_ops.swish(batch_norm__108) + del batch_norm__108 + + # pd_op.conv2d: (-1x96x-1x-1xf32) <- (-1x96x-1x-1xf32, 96x96x3x3xf32) + conv2d_20 = paddle._C_ops.conv2d( + swish_15, parameter_695, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_695 + + # pd_op.batch_norm_: (-1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (-1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__114, + batch_norm__115, + batch_norm__116, + batch_norm__117, + batch_norm__118, + batch_norm__119, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_20, + parameter_694, + parameter_693, + parameter_692, + parameter_691, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_20, parameter_691, parameter_692, parameter_693, parameter_694 + + # pd_op.swish: (-1x96x-1x-1xf32) <- (-1x96x-1x-1xf32) + swish_16 = paddle._C_ops.swish(batch_norm__114) + del batch_norm__114 + + # pd_op.conv2d: (-1x96x-1x-1xf32) <- (-1x96x-1x-1xf32, 96x96x3x3xf32) + conv2d_21 = paddle._C_ops.conv2d( + swish_16, parameter_690, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_690 + + # pd_op.batch_norm_: (-1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (-1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__120, + batch_norm__121, + batch_norm__122, + batch_norm__123, + batch_norm__124, + batch_norm__125, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_21, + parameter_689, + parameter_688, + parameter_687, + parameter_686, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_21, parameter_686, parameter_687, parameter_688, parameter_689 + + # pd_op.conv2d: (-1x96x-1x-1xf32) <- (-1x96x-1x-1xf32, 96x96x1x1xf32) + conv2d_22 = paddle._C_ops.conv2d( + swish_16, parameter_685, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_685, swish_16 + + # pd_op.batch_norm_: (-1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (-1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__126, + batch_norm__127, + batch_norm__128, + batch_norm__129, + batch_norm__130, + batch_norm__131, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_22, + parameter_684, + parameter_683, + parameter_682, + parameter_681, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_22, parameter_681, parameter_682, parameter_683, parameter_684 + + # pd_op.multiply: (-1x96x-1x-1xf32) <- (1xf32, -1x96x-1x-1xf32) + multiply_4 = paddle._C_ops.multiply(data_3, batch_norm__126) + del batch_norm__126, data_3 + + # pd_op.add: (-1x96x-1x-1xf32) <- (-1x96x-1x-1xf32, -1x96x-1x-1xf32) + add_7 = paddle._C_ops.add(batch_norm__120, multiply_4) + del batch_norm__120, multiply_4 + + # pd_op.swish: (-1x96x-1x-1xf32) <- (-1x96x-1x-1xf32) + swish_17 = paddle._C_ops.swish(add_7) + del add_7 + + # pd_op.add: (-1x96x-1x-1xf32) <- (-1x96x-1x-1xf32, -1x96x-1x-1xf32) + add_8 = paddle._C_ops.add(swish_15, swish_17) + del swish_15, swish_17 + + # pd_op.conv2d: (-1x96x-1x-1xf32) <- (-1x96x-1x-1xf32, 96x96x3x3xf32) + conv2d_23 = paddle._C_ops.conv2d( + add_8, parameter_680, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_680 + + # pd_op.batch_norm_: (-1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (-1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__132, + batch_norm__133, + batch_norm__134, + batch_norm__135, + batch_norm__136, + batch_norm__137, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_23, + parameter_679, + parameter_678, + parameter_677, + parameter_676, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_23, parameter_676, parameter_677, parameter_678, parameter_679 + + # pd_op.swish: (-1x96x-1x-1xf32) <- (-1x96x-1x-1xf32) + swish_18 = paddle._C_ops.swish(batch_norm__132) + del batch_norm__132 + + # pd_op.conv2d: (-1x96x-1x-1xf32) <- (-1x96x-1x-1xf32, 96x96x3x3xf32) + conv2d_24 = paddle._C_ops.conv2d( + swish_18, parameter_675, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_675 + + # pd_op.batch_norm_: (-1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (-1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__138, + batch_norm__139, + batch_norm__140, + batch_norm__141, + batch_norm__142, + batch_norm__143, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_24, + parameter_674, + parameter_673, + parameter_672, + parameter_671, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_24, parameter_671, parameter_672, parameter_673, parameter_674 + + # pd_op.conv2d: (-1x96x-1x-1xf32) <- (-1x96x-1x-1xf32, 96x96x1x1xf32) + conv2d_25 = paddle._C_ops.conv2d( + swish_18, parameter_670, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_670, swish_18 + + # pd_op.batch_norm_: (-1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (-1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__144, + batch_norm__145, + batch_norm__146, + batch_norm__147, + batch_norm__148, + batch_norm__149, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_25, + parameter_669, + parameter_668, + parameter_667, + parameter_666, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_25, parameter_666, parameter_667, parameter_668, parameter_669 + + # pd_op.multiply: (-1x96x-1x-1xf32) <- (1xf32, -1x96x-1x-1xf32) + multiply_5 = paddle._C_ops.multiply(data_4, batch_norm__144) + del batch_norm__144, data_4 + + # pd_op.add: (-1x96x-1x-1xf32) <- (-1x96x-1x-1xf32, -1x96x-1x-1xf32) + add_9 = paddle._C_ops.add(batch_norm__138, multiply_5) + del batch_norm__138, multiply_5 + + # pd_op.swish: (-1x96x-1x-1xf32) <- (-1x96x-1x-1xf32) + swish_19 = paddle._C_ops.swish(add_9) + del add_9 + + # pd_op.add: (-1x96x-1x-1xf32) <- (-1x96x-1x-1xf32, -1x96x-1x-1xf32) + add_10 = paddle._C_ops.add(add_8, swish_19) + del add_8, swish_19 + + # pd_op.conv2d: (-1x96x-1x-1xf32) <- (-1x96x-1x-1xf32, 96x96x3x3xf32) + conv2d_26 = paddle._C_ops.conv2d( + add_10, parameter_665, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_665 + + # pd_op.batch_norm_: (-1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (-1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__150, + batch_norm__151, + batch_norm__152, + batch_norm__153, + batch_norm__154, + batch_norm__155, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_26, + parameter_664, + parameter_663, + parameter_662, + parameter_661, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_26, parameter_661, parameter_662, parameter_663, parameter_664 + + # pd_op.swish: (-1x96x-1x-1xf32) <- (-1x96x-1x-1xf32) + swish_20 = paddle._C_ops.swish(batch_norm__150) + del batch_norm__150 + + # pd_op.conv2d: (-1x96x-1x-1xf32) <- (-1x96x-1x-1xf32, 96x96x3x3xf32) + conv2d_27 = paddle._C_ops.conv2d( + swish_20, parameter_660, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_660 + + # pd_op.batch_norm_: (-1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (-1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__156, + batch_norm__157, + batch_norm__158, + batch_norm__159, + batch_norm__160, + batch_norm__161, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_27, + parameter_659, + parameter_658, + parameter_657, + parameter_656, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_27, parameter_656, parameter_657, parameter_658, parameter_659 + + # pd_op.conv2d: (-1x96x-1x-1xf32) <- (-1x96x-1x-1xf32, 96x96x1x1xf32) + conv2d_28 = paddle._C_ops.conv2d( + swish_20, parameter_655, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_655, swish_20 + + # pd_op.batch_norm_: (-1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (-1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__162, + batch_norm__163, + batch_norm__164, + batch_norm__165, + batch_norm__166, + batch_norm__167, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_28, + parameter_654, + parameter_653, + parameter_652, + parameter_651, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_28, parameter_651, parameter_652, parameter_653, parameter_654 + + # pd_op.multiply: (-1x96x-1x-1xf32) <- (1xf32, -1x96x-1x-1xf32) + multiply_6 = paddle._C_ops.multiply(data_5, batch_norm__162) + del batch_norm__162, data_5 + + # pd_op.add: (-1x96x-1x-1xf32) <- (-1x96x-1x-1xf32, -1x96x-1x-1xf32) + add_11 = paddle._C_ops.add(batch_norm__156, multiply_6) + del batch_norm__156, multiply_6 + + # pd_op.swish: (-1x96x-1x-1xf32) <- (-1x96x-1x-1xf32) + swish_21 = paddle._C_ops.swish(add_11) + del add_11 + + # pd_op.add: (-1x96x-1x-1xf32) <- (-1x96x-1x-1xf32, -1x96x-1x-1xf32) + add_12 = paddle._C_ops.add(add_10, swish_21) + del add_10, swish_21 + + # pd_op.conv2d: (-1x96x-1x-1xf32) <- (-1x96x-1x-1xf32, 96x96x3x3xf32) + conv2d_29 = paddle._C_ops.conv2d( + add_12, parameter_650, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_650 + + # pd_op.batch_norm_: (-1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (-1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__168, + batch_norm__169, + batch_norm__170, + batch_norm__171, + batch_norm__172, + batch_norm__173, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_29, + parameter_649, + parameter_648, + parameter_647, + parameter_646, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_29, parameter_646, parameter_647, parameter_648, parameter_649 + + # pd_op.swish: (-1x96x-1x-1xf32) <- (-1x96x-1x-1xf32) + swish_22 = paddle._C_ops.swish(batch_norm__168) + del batch_norm__168 + + # pd_op.conv2d: (-1x96x-1x-1xf32) <- (-1x96x-1x-1xf32, 96x96x3x3xf32) + conv2d_30 = paddle._C_ops.conv2d( + swish_22, parameter_645, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_645 + + # pd_op.batch_norm_: (-1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (-1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__174, + batch_norm__175, + batch_norm__176, + batch_norm__177, + batch_norm__178, + batch_norm__179, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_30, + parameter_644, + parameter_643, + parameter_642, + parameter_641, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_30, parameter_641, parameter_642, parameter_643, parameter_644 + + # pd_op.conv2d: (-1x96x-1x-1xf32) <- (-1x96x-1x-1xf32, 96x96x1x1xf32) + conv2d_31 = paddle._C_ops.conv2d( + swish_22, parameter_640, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_640, swish_22 + + # pd_op.batch_norm_: (-1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (-1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__180, + batch_norm__181, + batch_norm__182, + batch_norm__183, + batch_norm__184, + batch_norm__185, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_31, + parameter_639, + parameter_638, + parameter_637, + parameter_636, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_31, parameter_636, parameter_637, parameter_638, parameter_639 + + # pd_op.multiply: (-1x96x-1x-1xf32) <- (1xf32, -1x96x-1x-1xf32) + multiply_7 = paddle._C_ops.multiply(data_6, batch_norm__180) + del batch_norm__180, data_6 + + # pd_op.add: (-1x96x-1x-1xf32) <- (-1x96x-1x-1xf32, -1x96x-1x-1xf32) + add_13 = paddle._C_ops.add(batch_norm__174, multiply_7) + del batch_norm__174, multiply_7 + + # pd_op.swish: (-1x96x-1x-1xf32) <- (-1x96x-1x-1xf32) + swish_23 = paddle._C_ops.swish(add_13) + del add_13 + + # pd_op.add: (-1x96x-1x-1xf32) <- (-1x96x-1x-1xf32, -1x96x-1x-1xf32) + add_14 = paddle._C_ops.add(add_12, swish_23) + del add_12, swish_23 + + # pd_op.conv2d: (-1x96x-1x-1xf32) <- (-1x96x-1x-1xf32, 96x96x3x3xf32) + conv2d_32 = paddle._C_ops.conv2d( + add_14, parameter_635, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_635 + + # pd_op.batch_norm_: (-1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (-1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__186, + batch_norm__187, + batch_norm__188, + batch_norm__189, + batch_norm__190, + batch_norm__191, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_32, + parameter_634, + parameter_633, + parameter_632, + parameter_631, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_32, parameter_631, parameter_632, parameter_633, parameter_634 + + # pd_op.swish: (-1x96x-1x-1xf32) <- (-1x96x-1x-1xf32) + swish_24 = paddle._C_ops.swish(batch_norm__186) + del batch_norm__186 + + # pd_op.conv2d: (-1x96x-1x-1xf32) <- (-1x96x-1x-1xf32, 96x96x3x3xf32) + conv2d_33 = paddle._C_ops.conv2d( + swish_24, parameter_630, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_630 + + # pd_op.batch_norm_: (-1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (-1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__192, + batch_norm__193, + batch_norm__194, + batch_norm__195, + batch_norm__196, + batch_norm__197, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_33, + parameter_629, + parameter_628, + parameter_627, + parameter_626, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_33, parameter_626, parameter_627, parameter_628, parameter_629 + + # pd_op.conv2d: (-1x96x-1x-1xf32) <- (-1x96x-1x-1xf32, 96x96x1x1xf32) + conv2d_34 = paddle._C_ops.conv2d( + swish_24, parameter_625, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_625, swish_24 + + # pd_op.batch_norm_: (-1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (-1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__198, + batch_norm__199, + batch_norm__200, + batch_norm__201, + batch_norm__202, + batch_norm__203, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_34, + parameter_624, + parameter_623, + parameter_622, + parameter_621, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_34, parameter_621, parameter_622, parameter_623, parameter_624 + + # pd_op.multiply: (-1x96x-1x-1xf32) <- (1xf32, -1x96x-1x-1xf32) + multiply_8 = paddle._C_ops.multiply(data_7, batch_norm__198) + del batch_norm__198, data_7 + + # pd_op.add: (-1x96x-1x-1xf32) <- (-1x96x-1x-1xf32, -1x96x-1x-1xf32) + add_15 = paddle._C_ops.add(batch_norm__192, multiply_8) + del batch_norm__192, multiply_8 + + # pd_op.swish: (-1x96x-1x-1xf32) <- (-1x96x-1x-1xf32) + swish_25 = paddle._C_ops.swish(add_15) + del add_15 + + # pd_op.add: (-1x96x-1x-1xf32) <- (-1x96x-1x-1xf32, -1x96x-1x-1xf32) + add_16 = paddle._C_ops.add(add_14, swish_25) + del add_14, swish_25 + + # pd_op.conv2d: (-1x96x-1x-1xf32) <- (-1x96x-1x-1xf32, 96x96x3x3xf32) + conv2d_35 = paddle._C_ops.conv2d( + add_16, parameter_620, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_620 + + # pd_op.batch_norm_: (-1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (-1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__204, + batch_norm__205, + batch_norm__206, + batch_norm__207, + batch_norm__208, + batch_norm__209, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_35, + parameter_619, + parameter_618, + parameter_617, + parameter_616, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_35, parameter_616, parameter_617, parameter_618, parameter_619 + + # pd_op.swish: (-1x96x-1x-1xf32) <- (-1x96x-1x-1xf32) + swish_26 = paddle._C_ops.swish(batch_norm__204) + del batch_norm__204 + + # pd_op.conv2d: (-1x96x-1x-1xf32) <- (-1x96x-1x-1xf32, 96x96x3x3xf32) + conv2d_36 = paddle._C_ops.conv2d( + swish_26, parameter_615, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_615 + + # pd_op.batch_norm_: (-1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (-1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__210, + batch_norm__211, + batch_norm__212, + batch_norm__213, + batch_norm__214, + batch_norm__215, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_36, + parameter_614, + parameter_613, + parameter_612, + parameter_611, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_36, parameter_611, parameter_612, parameter_613, parameter_614 + + # pd_op.conv2d: (-1x96x-1x-1xf32) <- (-1x96x-1x-1xf32, 96x96x1x1xf32) + conv2d_37 = paddle._C_ops.conv2d( + swish_26, parameter_610, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_610, swish_26 + + # pd_op.batch_norm_: (-1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (-1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__216, + batch_norm__217, + batch_norm__218, + batch_norm__219, + batch_norm__220, + batch_norm__221, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_37, + parameter_609, + parameter_608, + parameter_607, + parameter_606, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_37, parameter_606, parameter_607, parameter_608, parameter_609 + + # pd_op.multiply: (-1x96x-1x-1xf32) <- (1xf32, -1x96x-1x-1xf32) + multiply_9 = paddle._C_ops.multiply(data_8, batch_norm__216) + del batch_norm__216, data_8 + + # pd_op.add: (-1x96x-1x-1xf32) <- (-1x96x-1x-1xf32, -1x96x-1x-1xf32) + add_17 = paddle._C_ops.add(batch_norm__210, multiply_9) + del batch_norm__210, multiply_9 + + # pd_op.swish: (-1x96x-1x-1xf32) <- (-1x96x-1x-1xf32) + swish_27 = paddle._C_ops.swish(add_17) + del add_17 + + # pd_op.add: (-1x96x-1x-1xf32) <- (-1x96x-1x-1xf32, -1x96x-1x-1xf32) + add_18 = paddle._C_ops.add(add_16, swish_27) + del add_16, swish_27 + + # builtin.combine: ([-1x96x-1x-1xf32, -1x96x-1x-1xf32]) <- (-1x96x-1x-1xf32, -1x96x-1x-1xf32) + combine_1 = [swish_14, add_18] + del add_18, swish_14 + + # pd_op.concat: (-1x192x-1x-1xf32) <- ([-1x96x-1x-1xf32, -1x96x-1x-1xf32], 1xi32) + concat_3 = paddle._C_ops.concat(combine_1, full_0) + del combine_1 + + # pd_op.mean: (-1x192x1x1xf32) <- (-1x192x-1x-1xf32, 2xi64) + mean_1 = paddle._C_ops.mean(concat_3, full_int_array_0, True) + + # pd_op.conv2d: (-1x192x1x1xf32) <- (-1x192x1x1xf32, 192x192x1x1xf32) + conv2d_38 = paddle._C_ops.conv2d( + mean_1, parameter_605, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del mean_1, parameter_605 + + # pd_op.reshape: (1x192x1x1xf32) <- (192xf32, 4xi64) + reshape_1 = paddle._C_ops.reshape(parameter_604, full_int_array_1) + del parameter_604 + + # pd_op.add: (-1x192x1x1xf32) <- (-1x192x1x1xf32, 1x192x1x1xf32) + add_19 = paddle._C_ops.add(conv2d_38, reshape_1) + del conv2d_38, reshape_1 + + # pd_op.hardsigmoid: (-1x192x1x1xf32) <- (-1x192x1x1xf32) + hardsigmoid_1 = paddle._C_ops.hardsigmoid( + add_19, float("0.166667"), float("0.5") + ) + del add_19 + + # pd_op.multiply: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32, -1x192x1x1xf32) + multiply_10 = paddle._C_ops.multiply(concat_3, hardsigmoid_1) + del concat_3, hardsigmoid_1 + + # pd_op.conv2d: (-1x256x-1x-1xf32) <- (-1x192x-1x-1xf32, 256x192x1x1xf32) + conv2d_39 = paddle._C_ops.conv2d( + multiply_10, parameter_603, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del multiply_10, parameter_603 + + # pd_op.batch_norm_: (-1x256x-1x-1xf32, 256xf32, 256xf32, 256xf32, 256xf32, -1xui8) <- (-1x256x-1x-1xf32, 256xf32, 256xf32, 256xf32, 256xf32) + ( + batch_norm__222, + batch_norm__223, + batch_norm__224, + batch_norm__225, + batch_norm__226, + batch_norm__227, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_39, + parameter_602, + parameter_601, + parameter_600, + parameter_599, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_39, parameter_599, parameter_600, parameter_601, parameter_602 + + # pd_op.swish: (-1x256x-1x-1xf32) <- (-1x256x-1x-1xf32) + swish_28 = paddle._C_ops.swish(batch_norm__222) + del batch_norm__222 + + # pd_op.conv2d: (-1x384x-1x-1xf32) <- (-1x256x-1x-1xf32, 384x256x3x3xf32) + conv2d_40 = paddle._C_ops.conv2d( + swish_28, parameter_598, [2, 2], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_598 + + # pd_op.batch_norm_: (-1x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (-1x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__228, + batch_norm__229, + batch_norm__230, + batch_norm__231, + batch_norm__232, + batch_norm__233, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_40, + parameter_597, + parameter_596, + parameter_595, + parameter_594, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_40, parameter_594, parameter_595, parameter_596, parameter_597 + + # pd_op.swish: (-1x384x-1x-1xf32) <- (-1x384x-1x-1xf32) + swish_29 = paddle._C_ops.swish(batch_norm__228) + del batch_norm__228 + + # pd_op.conv2d: (-1x192x-1x-1xf32) <- (-1x384x-1x-1xf32, 192x384x1x1xf32) + conv2d_41 = paddle._C_ops.conv2d( + swish_29, parameter_593, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_593 + + # pd_op.batch_norm_: (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__234, + batch_norm__235, + batch_norm__236, + batch_norm__237, + batch_norm__238, + batch_norm__239, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_41, + parameter_592, + parameter_591, + parameter_590, + parameter_589, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_41, parameter_589, parameter_590, parameter_591, parameter_592 + + # pd_op.swish: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32) + swish_30 = paddle._C_ops.swish(batch_norm__234) + del batch_norm__234 + + # pd_op.conv2d: (-1x192x-1x-1xf32) <- (-1x384x-1x-1xf32, 192x384x1x1xf32) + conv2d_42 = paddle._C_ops.conv2d( + swish_29, parameter_588, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_588, swish_29 + + # pd_op.batch_norm_: (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__240, + batch_norm__241, + batch_norm__242, + batch_norm__243, + batch_norm__244, + batch_norm__245, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_42, + parameter_587, + parameter_586, + parameter_585, + parameter_584, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_42, parameter_584, parameter_585, parameter_586, parameter_587 + + # pd_op.swish: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32) + swish_31 = paddle._C_ops.swish(batch_norm__240) + del batch_norm__240 + + # pd_op.conv2d: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32, 192x192x3x3xf32) + conv2d_43 = paddle._C_ops.conv2d( + swish_31, parameter_583, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_583 + + # pd_op.batch_norm_: (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__246, + batch_norm__247, + batch_norm__248, + batch_norm__249, + batch_norm__250, + batch_norm__251, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_43, + parameter_582, + parameter_581, + parameter_580, + parameter_579, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_43, parameter_579, parameter_580, parameter_581, parameter_582 + + # pd_op.swish: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32) + swish_32 = paddle._C_ops.swish(batch_norm__246) + del batch_norm__246 + + # pd_op.conv2d: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32, 192x192x3x3xf32) + conv2d_44 = paddle._C_ops.conv2d( + swish_32, parameter_578, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_578 + + # pd_op.batch_norm_: (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__252, + batch_norm__253, + batch_norm__254, + batch_norm__255, + batch_norm__256, + batch_norm__257, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_44, + parameter_577, + parameter_576, + parameter_575, + parameter_574, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_44, parameter_574, parameter_575, parameter_576, parameter_577 + + # pd_op.conv2d: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32, 192x192x1x1xf32) + conv2d_45 = paddle._C_ops.conv2d( + swish_32, parameter_573, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_573, swish_32 + + # pd_op.batch_norm_: (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__258, + batch_norm__259, + batch_norm__260, + batch_norm__261, + batch_norm__262, + batch_norm__263, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_45, + parameter_572, + parameter_571, + parameter_570, + parameter_569, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_45, parameter_569, parameter_570, parameter_571, parameter_572 + + # pd_op.multiply: (-1x192x-1x-1xf32) <- (1xf32, -1x192x-1x-1xf32) + multiply_11 = paddle._C_ops.multiply(data_9, batch_norm__258) + del batch_norm__258, data_9 + + # pd_op.add: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32, -1x192x-1x-1xf32) + add_20 = paddle._C_ops.add(batch_norm__252, multiply_11) + del batch_norm__252, multiply_11 + + # pd_op.swish: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32) + swish_33 = paddle._C_ops.swish(add_20) + del add_20 + + # pd_op.add: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32, -1x192x-1x-1xf32) + add_21 = paddle._C_ops.add(swish_31, swish_33) + del swish_31, swish_33 + + # pd_op.conv2d: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32, 192x192x3x3xf32) + conv2d_46 = paddle._C_ops.conv2d( + add_21, parameter_568, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_568 + + # pd_op.batch_norm_: (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__264, + batch_norm__265, + batch_norm__266, + batch_norm__267, + batch_norm__268, + batch_norm__269, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_46, + parameter_567, + parameter_566, + parameter_565, + parameter_564, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_46, parameter_564, parameter_565, parameter_566, parameter_567 + + # pd_op.swish: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32) + swish_34 = paddle._C_ops.swish(batch_norm__264) + del batch_norm__264 + + # pd_op.conv2d: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32, 192x192x3x3xf32) + conv2d_47 = paddle._C_ops.conv2d( + swish_34, parameter_563, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_563 + + # pd_op.batch_norm_: (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__270, + batch_norm__271, + batch_norm__272, + batch_norm__273, + batch_norm__274, + batch_norm__275, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_47, + parameter_562, + parameter_561, + parameter_560, + parameter_559, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_47, parameter_559, parameter_560, parameter_561, parameter_562 + + # pd_op.conv2d: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32, 192x192x1x1xf32) + conv2d_48 = paddle._C_ops.conv2d( + swish_34, parameter_558, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_558, swish_34 + + # pd_op.batch_norm_: (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__276, + batch_norm__277, + batch_norm__278, + batch_norm__279, + batch_norm__280, + batch_norm__281, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_48, + parameter_557, + parameter_556, + parameter_555, + parameter_554, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_48, parameter_554, parameter_555, parameter_556, parameter_557 + + # pd_op.multiply: (-1x192x-1x-1xf32) <- (1xf32, -1x192x-1x-1xf32) + multiply_12 = paddle._C_ops.multiply(data_10, batch_norm__276) + del batch_norm__276, data_10 + + # pd_op.add: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32, -1x192x-1x-1xf32) + add_22 = paddle._C_ops.add(batch_norm__270, multiply_12) + del batch_norm__270, multiply_12 + + # pd_op.swish: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32) + swish_35 = paddle._C_ops.swish(add_22) + del add_22 + + # pd_op.add: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32, -1x192x-1x-1xf32) + add_23 = paddle._C_ops.add(add_21, swish_35) + del add_21, swish_35 + + # pd_op.conv2d: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32, 192x192x3x3xf32) + conv2d_49 = paddle._C_ops.conv2d( + add_23, parameter_553, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_553 + + # pd_op.batch_norm_: (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__282, + batch_norm__283, + batch_norm__284, + batch_norm__285, + batch_norm__286, + batch_norm__287, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_49, + parameter_552, + parameter_551, + parameter_550, + parameter_549, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_49, parameter_549, parameter_550, parameter_551, parameter_552 + + # pd_op.swish: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32) + swish_36 = paddle._C_ops.swish(batch_norm__282) + del batch_norm__282 + + # pd_op.conv2d: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32, 192x192x3x3xf32) + conv2d_50 = paddle._C_ops.conv2d( + swish_36, parameter_548, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_548 + + # pd_op.batch_norm_: (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__288, + batch_norm__289, + batch_norm__290, + batch_norm__291, + batch_norm__292, + batch_norm__293, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_50, + parameter_547, + parameter_546, + parameter_545, + parameter_544, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_50, parameter_544, parameter_545, parameter_546, parameter_547 + + # pd_op.conv2d: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32, 192x192x1x1xf32) + conv2d_51 = paddle._C_ops.conv2d( + swish_36, parameter_543, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_543, swish_36 + + # pd_op.batch_norm_: (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__294, + batch_norm__295, + batch_norm__296, + batch_norm__297, + batch_norm__298, + batch_norm__299, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_51, + parameter_542, + parameter_541, + parameter_540, + parameter_539, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_51, parameter_539, parameter_540, parameter_541, parameter_542 + + # pd_op.multiply: (-1x192x-1x-1xf32) <- (1xf32, -1x192x-1x-1xf32) + multiply_13 = paddle._C_ops.multiply(data_11, batch_norm__294) + del batch_norm__294, data_11 + + # pd_op.add: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32, -1x192x-1x-1xf32) + add_24 = paddle._C_ops.add(batch_norm__288, multiply_13) + del batch_norm__288, multiply_13 + + # pd_op.swish: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32) + swish_37 = paddle._C_ops.swish(add_24) + del add_24 + + # pd_op.add: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32, -1x192x-1x-1xf32) + add_25 = paddle._C_ops.add(add_23, swish_37) + del add_23, swish_37 + + # pd_op.conv2d: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32, 192x192x3x3xf32) + conv2d_52 = paddle._C_ops.conv2d( + add_25, parameter_538, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_538 + + # pd_op.batch_norm_: (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__300, + batch_norm__301, + batch_norm__302, + batch_norm__303, + batch_norm__304, + batch_norm__305, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_52, + parameter_537, + parameter_536, + parameter_535, + parameter_534, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_52, parameter_534, parameter_535, parameter_536, parameter_537 + + # pd_op.swish: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32) + swish_38 = paddle._C_ops.swish(batch_norm__300) + del batch_norm__300 + + # pd_op.conv2d: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32, 192x192x3x3xf32) + conv2d_53 = paddle._C_ops.conv2d( + swish_38, parameter_533, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_533 + + # pd_op.batch_norm_: (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__306, + batch_norm__307, + batch_norm__308, + batch_norm__309, + batch_norm__310, + batch_norm__311, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_53, + parameter_532, + parameter_531, + parameter_530, + parameter_529, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_53, parameter_529, parameter_530, parameter_531, parameter_532 + + # pd_op.conv2d: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32, 192x192x1x1xf32) + conv2d_54 = paddle._C_ops.conv2d( + swish_38, parameter_528, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_528, swish_38 + + # pd_op.batch_norm_: (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__312, + batch_norm__313, + batch_norm__314, + batch_norm__315, + batch_norm__316, + batch_norm__317, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_54, + parameter_527, + parameter_526, + parameter_525, + parameter_524, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_54, parameter_524, parameter_525, parameter_526, parameter_527 + + # pd_op.multiply: (-1x192x-1x-1xf32) <- (1xf32, -1x192x-1x-1xf32) + multiply_14 = paddle._C_ops.multiply(data_12, batch_norm__312) + del batch_norm__312, data_12 + + # pd_op.add: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32, -1x192x-1x-1xf32) + add_26 = paddle._C_ops.add(batch_norm__306, multiply_14) + del batch_norm__306, multiply_14 + + # pd_op.swish: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32) + swish_39 = paddle._C_ops.swish(add_26) + del add_26 + + # pd_op.add: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32, -1x192x-1x-1xf32) + add_27 = paddle._C_ops.add(add_25, swish_39) + del add_25, swish_39 + + # pd_op.conv2d: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32, 192x192x3x3xf32) + conv2d_55 = paddle._C_ops.conv2d( + add_27, parameter_523, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_523 + + # pd_op.batch_norm_: (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__318, + batch_norm__319, + batch_norm__320, + batch_norm__321, + batch_norm__322, + batch_norm__323, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_55, + parameter_522, + parameter_521, + parameter_520, + parameter_519, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_55, parameter_519, parameter_520, parameter_521, parameter_522 + + # pd_op.swish: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32) + swish_40 = paddle._C_ops.swish(batch_norm__318) + del batch_norm__318 + + # pd_op.conv2d: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32, 192x192x3x3xf32) + conv2d_56 = paddle._C_ops.conv2d( + swish_40, parameter_518, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_518 + + # pd_op.batch_norm_: (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__324, + batch_norm__325, + batch_norm__326, + batch_norm__327, + batch_norm__328, + batch_norm__329, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_56, + parameter_517, + parameter_516, + parameter_515, + parameter_514, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_56, parameter_514, parameter_515, parameter_516, parameter_517 + + # pd_op.conv2d: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32, 192x192x1x1xf32) + conv2d_57 = paddle._C_ops.conv2d( + swish_40, parameter_513, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_513, swish_40 + + # pd_op.batch_norm_: (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__330, + batch_norm__331, + batch_norm__332, + batch_norm__333, + batch_norm__334, + batch_norm__335, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_57, + parameter_512, + parameter_511, + parameter_510, + parameter_509, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_57, parameter_509, parameter_510, parameter_511, parameter_512 + + # pd_op.multiply: (-1x192x-1x-1xf32) <- (1xf32, -1x192x-1x-1xf32) + multiply_15 = paddle._C_ops.multiply(data_13, batch_norm__330) + del batch_norm__330, data_13 + + # pd_op.add: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32, -1x192x-1x-1xf32) + add_28 = paddle._C_ops.add(batch_norm__324, multiply_15) + del batch_norm__324, multiply_15 + + # pd_op.swish: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32) + swish_41 = paddle._C_ops.swish(add_28) + del add_28 + + # pd_op.add: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32, -1x192x-1x-1xf32) + add_29 = paddle._C_ops.add(add_27, swish_41) + del add_27, swish_41 + + # pd_op.conv2d: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32, 192x192x3x3xf32) + conv2d_58 = paddle._C_ops.conv2d( + add_29, parameter_508, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_508 + + # pd_op.batch_norm_: (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__336, + batch_norm__337, + batch_norm__338, + batch_norm__339, + batch_norm__340, + batch_norm__341, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_58, + parameter_507, + parameter_506, + parameter_505, + parameter_504, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_58, parameter_504, parameter_505, parameter_506, parameter_507 + + # pd_op.swish: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32) + swish_42 = paddle._C_ops.swish(batch_norm__336) + del batch_norm__336 + + # pd_op.conv2d: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32, 192x192x3x3xf32) + conv2d_59 = paddle._C_ops.conv2d( + swish_42, parameter_503, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_503 + + # pd_op.batch_norm_: (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__342, + batch_norm__343, + batch_norm__344, + batch_norm__345, + batch_norm__346, + batch_norm__347, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_59, + parameter_502, + parameter_501, + parameter_500, + parameter_499, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_59, parameter_499, parameter_500, parameter_501, parameter_502 + + # pd_op.conv2d: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32, 192x192x1x1xf32) + conv2d_60 = paddle._C_ops.conv2d( + swish_42, parameter_498, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_498, swish_42 + + # pd_op.batch_norm_: (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__348, + batch_norm__349, + batch_norm__350, + batch_norm__351, + batch_norm__352, + batch_norm__353, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_60, + parameter_497, + parameter_496, + parameter_495, + parameter_494, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_60, parameter_494, parameter_495, parameter_496, parameter_497 + + # pd_op.multiply: (-1x192x-1x-1xf32) <- (1xf32, -1x192x-1x-1xf32) + multiply_16 = paddle._C_ops.multiply(data_14, batch_norm__348) + del batch_norm__348, data_14 + + # pd_op.add: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32, -1x192x-1x-1xf32) + add_30 = paddle._C_ops.add(batch_norm__342, multiply_16) + del batch_norm__342, multiply_16 + + # pd_op.swish: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32) + swish_43 = paddle._C_ops.swish(add_30) + del add_30 + + # pd_op.add: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32, -1x192x-1x-1xf32) + add_31 = paddle._C_ops.add(add_29, swish_43) + del add_29, swish_43 + + # builtin.combine: ([-1x192x-1x-1xf32, -1x192x-1x-1xf32]) <- (-1x192x-1x-1xf32, -1x192x-1x-1xf32) + combine_2 = [swish_30, add_31] + del add_31, swish_30 + + # pd_op.concat: (-1x384x-1x-1xf32) <- ([-1x192x-1x-1xf32, -1x192x-1x-1xf32], 1xi32) + concat_4 = paddle._C_ops.concat(combine_2, full_0) + del combine_2 + + # pd_op.mean: (-1x384x1x1xf32) <- (-1x384x-1x-1xf32, 2xi64) + mean_2 = paddle._C_ops.mean(concat_4, full_int_array_0, True) + + # pd_op.conv2d: (-1x384x1x1xf32) <- (-1x384x1x1xf32, 384x384x1x1xf32) + conv2d_61 = paddle._C_ops.conv2d( + mean_2, parameter_493, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del mean_2, parameter_493 + + # pd_op.reshape: (1x384x1x1xf32) <- (384xf32, 4xi64) + reshape_2 = paddle._C_ops.reshape(parameter_492, full_int_array_1) + del parameter_492 + + # pd_op.add: (-1x384x1x1xf32) <- (-1x384x1x1xf32, 1x384x1x1xf32) + add_32 = paddle._C_ops.add(conv2d_61, reshape_2) + del conv2d_61, reshape_2 + + # pd_op.hardsigmoid: (-1x384x1x1xf32) <- (-1x384x1x1xf32) + hardsigmoid_2 = paddle._C_ops.hardsigmoid( + add_32, float("0.166667"), float("0.5") + ) + del add_32 + + # pd_op.multiply: (-1x384x-1x-1xf32) <- (-1x384x-1x-1xf32, -1x384x1x1xf32) + multiply_17 = paddle._C_ops.multiply(concat_4, hardsigmoid_2) + del concat_4, hardsigmoid_2 + + # pd_op.conv2d: (-1x512x-1x-1xf32) <- (-1x384x-1x-1xf32, 512x384x1x1xf32) + conv2d_62 = paddle._C_ops.conv2d( + multiply_17, parameter_491, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del multiply_17, parameter_491 + + # pd_op.batch_norm_: (-1x512x-1x-1xf32, 512xf32, 512xf32, 512xf32, 512xf32, -1xui8) <- (-1x512x-1x-1xf32, 512xf32, 512xf32, 512xf32, 512xf32) + ( + batch_norm__354, + batch_norm__355, + batch_norm__356, + batch_norm__357, + batch_norm__358, + batch_norm__359, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_62, + parameter_490, + parameter_489, + parameter_488, + parameter_487, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_62, parameter_487, parameter_488, parameter_489, parameter_490 + + # pd_op.swish: (-1x512x-1x-1xf32) <- (-1x512x-1x-1xf32) + swish_44 = paddle._C_ops.swish(batch_norm__354) + del batch_norm__354 + + # pd_op.conv2d: (-1x768x-1x-1xf32) <- (-1x512x-1x-1xf32, 768x512x3x3xf32) + conv2d_63 = paddle._C_ops.conv2d( + swish_44, parameter_486, [2, 2], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_486 + + # pd_op.batch_norm_: (-1x768x-1x-1xf32, 768xf32, 768xf32, 768xf32, 768xf32, -1xui8) <- (-1x768x-1x-1xf32, 768xf32, 768xf32, 768xf32, 768xf32) + ( + batch_norm__360, + batch_norm__361, + batch_norm__362, + batch_norm__363, + batch_norm__364, + batch_norm__365, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_63, + parameter_485, + parameter_484, + parameter_483, + parameter_482, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_63, parameter_482, parameter_483, parameter_484, parameter_485 + + # pd_op.swish: (-1x768x-1x-1xf32) <- (-1x768x-1x-1xf32) + swish_45 = paddle._C_ops.swish(batch_norm__360) + del batch_norm__360 + + # pd_op.conv2d: (-1x384x-1x-1xf32) <- (-1x768x-1x-1xf32, 384x768x1x1xf32) + conv2d_64 = paddle._C_ops.conv2d( + swish_45, parameter_481, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_481 + + # pd_op.batch_norm_: (-1x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (-1x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__366, + batch_norm__367, + batch_norm__368, + batch_norm__369, + batch_norm__370, + batch_norm__371, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_64, + parameter_480, + parameter_479, + parameter_478, + parameter_477, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_64, parameter_477, parameter_478, parameter_479, parameter_480 + + # pd_op.swish: (-1x384x-1x-1xf32) <- (-1x384x-1x-1xf32) + swish_46 = paddle._C_ops.swish(batch_norm__366) + del batch_norm__366 + + # pd_op.conv2d: (-1x384x-1x-1xf32) <- (-1x768x-1x-1xf32, 384x768x1x1xf32) + conv2d_65 = paddle._C_ops.conv2d( + swish_45, parameter_476, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_476, swish_45 + + # pd_op.batch_norm_: (-1x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (-1x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__372, + batch_norm__373, + batch_norm__374, + batch_norm__375, + batch_norm__376, + batch_norm__377, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_65, + parameter_475, + parameter_474, + parameter_473, + parameter_472, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_65, parameter_472, parameter_473, parameter_474, parameter_475 + + # pd_op.swish: (-1x384x-1x-1xf32) <- (-1x384x-1x-1xf32) + swish_47 = paddle._C_ops.swish(batch_norm__372) + del batch_norm__372 + + # pd_op.conv2d: (-1x384x-1x-1xf32) <- (-1x384x-1x-1xf32, 384x384x3x3xf32) + conv2d_66 = paddle._C_ops.conv2d( + swish_47, parameter_471, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_471 + + # pd_op.batch_norm_: (-1x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (-1x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__378, + batch_norm__379, + batch_norm__380, + batch_norm__381, + batch_norm__382, + batch_norm__383, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_66, + parameter_470, + parameter_469, + parameter_468, + parameter_467, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_66, parameter_467, parameter_468, parameter_469, parameter_470 + + # pd_op.swish: (-1x384x-1x-1xf32) <- (-1x384x-1x-1xf32) + swish_48 = paddle._C_ops.swish(batch_norm__378) + del batch_norm__378 + + # pd_op.conv2d: (-1x384x-1x-1xf32) <- (-1x384x-1x-1xf32, 384x384x3x3xf32) + conv2d_67 = paddle._C_ops.conv2d( + swish_48, parameter_466, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_466 + + # pd_op.batch_norm_: (-1x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (-1x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__384, + batch_norm__385, + batch_norm__386, + batch_norm__387, + batch_norm__388, + batch_norm__389, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_67, + parameter_465, + parameter_464, + parameter_463, + parameter_462, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_67, parameter_462, parameter_463, parameter_464, parameter_465 + + # pd_op.conv2d: (-1x384x-1x-1xf32) <- (-1x384x-1x-1xf32, 384x384x1x1xf32) + conv2d_68 = paddle._C_ops.conv2d( + swish_48, parameter_461, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_461, swish_48 + + # pd_op.batch_norm_: (-1x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (-1x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__390, + batch_norm__391, + batch_norm__392, + batch_norm__393, + batch_norm__394, + batch_norm__395, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_68, + parameter_460, + parameter_459, + parameter_458, + parameter_457, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_68, parameter_457, parameter_458, parameter_459, parameter_460 + + # pd_op.multiply: (-1x384x-1x-1xf32) <- (1xf32, -1x384x-1x-1xf32) + multiply_18 = paddle._C_ops.multiply(data_15, batch_norm__390) + del batch_norm__390, data_15 + + # pd_op.add: (-1x384x-1x-1xf32) <- (-1x384x-1x-1xf32, -1x384x-1x-1xf32) + add_33 = paddle._C_ops.add(batch_norm__384, multiply_18) + del batch_norm__384, multiply_18 + + # pd_op.swish: (-1x384x-1x-1xf32) <- (-1x384x-1x-1xf32) + swish_49 = paddle._C_ops.swish(add_33) + del add_33 + + # pd_op.add: (-1x384x-1x-1xf32) <- (-1x384x-1x-1xf32, -1x384x-1x-1xf32) + add_34 = paddle._C_ops.add(swish_47, swish_49) + del swish_47, swish_49 + + # pd_op.conv2d: (-1x384x-1x-1xf32) <- (-1x384x-1x-1xf32, 384x384x3x3xf32) + conv2d_69 = paddle._C_ops.conv2d( + add_34, parameter_456, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_456 + + # pd_op.batch_norm_: (-1x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (-1x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__396, + batch_norm__397, + batch_norm__398, + batch_norm__399, + batch_norm__400, + batch_norm__401, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_69, + parameter_455, + parameter_454, + parameter_453, + parameter_452, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_69, parameter_452, parameter_453, parameter_454, parameter_455 + + # pd_op.swish: (-1x384x-1x-1xf32) <- (-1x384x-1x-1xf32) + swish_50 = paddle._C_ops.swish(batch_norm__396) + del batch_norm__396 + + # pd_op.conv2d: (-1x384x-1x-1xf32) <- (-1x384x-1x-1xf32, 384x384x3x3xf32) + conv2d_70 = paddle._C_ops.conv2d( + swish_50, parameter_451, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_451 + + # pd_op.batch_norm_: (-1x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (-1x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__402, + batch_norm__403, + batch_norm__404, + batch_norm__405, + batch_norm__406, + batch_norm__407, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_70, + parameter_450, + parameter_449, + parameter_448, + parameter_447, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_70, parameter_447, parameter_448, parameter_449, parameter_450 + + # pd_op.conv2d: (-1x384x-1x-1xf32) <- (-1x384x-1x-1xf32, 384x384x1x1xf32) + conv2d_71 = paddle._C_ops.conv2d( + swish_50, parameter_446, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_446, swish_50 + + # pd_op.batch_norm_: (-1x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (-1x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__408, + batch_norm__409, + batch_norm__410, + batch_norm__411, + batch_norm__412, + batch_norm__413, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_71, + parameter_445, + parameter_444, + parameter_443, + parameter_442, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_71, parameter_442, parameter_443, parameter_444, parameter_445 + + # pd_op.multiply: (-1x384x-1x-1xf32) <- (1xf32, -1x384x-1x-1xf32) + multiply_19 = paddle._C_ops.multiply(data_16, batch_norm__408) + del batch_norm__408, data_16 + + # pd_op.add: (-1x384x-1x-1xf32) <- (-1x384x-1x-1xf32, -1x384x-1x-1xf32) + add_35 = paddle._C_ops.add(batch_norm__402, multiply_19) + del batch_norm__402, multiply_19 + + # pd_op.swish: (-1x384x-1x-1xf32) <- (-1x384x-1x-1xf32) + swish_51 = paddle._C_ops.swish(add_35) + del add_35 + + # pd_op.add: (-1x384x-1x-1xf32) <- (-1x384x-1x-1xf32, -1x384x-1x-1xf32) + add_36 = paddle._C_ops.add(add_34, swish_51) + del add_34, swish_51 + + # pd_op.conv2d: (-1x384x-1x-1xf32) <- (-1x384x-1x-1xf32, 384x384x3x3xf32) + conv2d_72 = paddle._C_ops.conv2d( + add_36, parameter_441, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_441 + + # pd_op.batch_norm_: (-1x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (-1x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__414, + batch_norm__415, + batch_norm__416, + batch_norm__417, + batch_norm__418, + batch_norm__419, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_72, + parameter_440, + parameter_439, + parameter_438, + parameter_437, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_72, parameter_437, parameter_438, parameter_439, parameter_440 + + # pd_op.swish: (-1x384x-1x-1xf32) <- (-1x384x-1x-1xf32) + swish_52 = paddle._C_ops.swish(batch_norm__414) + del batch_norm__414 + + # pd_op.conv2d: (-1x384x-1x-1xf32) <- (-1x384x-1x-1xf32, 384x384x3x3xf32) + conv2d_73 = paddle._C_ops.conv2d( + swish_52, parameter_436, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_436 + + # pd_op.batch_norm_: (-1x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (-1x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__420, + batch_norm__421, + batch_norm__422, + batch_norm__423, + batch_norm__424, + batch_norm__425, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_73, + parameter_435, + parameter_434, + parameter_433, + parameter_432, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_73, parameter_432, parameter_433, parameter_434, parameter_435 + + # pd_op.conv2d: (-1x384x-1x-1xf32) <- (-1x384x-1x-1xf32, 384x384x1x1xf32) + conv2d_74 = paddle._C_ops.conv2d( + swish_52, parameter_431, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_431, swish_52 + + # pd_op.batch_norm_: (-1x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (-1x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__426, + batch_norm__427, + batch_norm__428, + batch_norm__429, + batch_norm__430, + batch_norm__431, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_74, + parameter_430, + parameter_429, + parameter_428, + parameter_427, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_74, parameter_427, parameter_428, parameter_429, parameter_430 + + # pd_op.multiply: (-1x384x-1x-1xf32) <- (1xf32, -1x384x-1x-1xf32) + multiply_20 = paddle._C_ops.multiply(data_17, batch_norm__426) + del batch_norm__426, data_17 + + # pd_op.add: (-1x384x-1x-1xf32) <- (-1x384x-1x-1xf32, -1x384x-1x-1xf32) + add_37 = paddle._C_ops.add(batch_norm__420, multiply_20) + del batch_norm__420, multiply_20 + + # pd_op.swish: (-1x384x-1x-1xf32) <- (-1x384x-1x-1xf32) + swish_53 = paddle._C_ops.swish(add_37) + del add_37 + + # pd_op.add: (-1x384x-1x-1xf32) <- (-1x384x-1x-1xf32, -1x384x-1x-1xf32) + add_38 = paddle._C_ops.add(add_36, swish_53) + del add_36, swish_53 + + # builtin.combine: ([-1x384x-1x-1xf32, -1x384x-1x-1xf32]) <- (-1x384x-1x-1xf32, -1x384x-1x-1xf32) + combine_3 = [swish_46, add_38] + del add_38, swish_46 + + # pd_op.concat: (-1x768x-1x-1xf32) <- ([-1x384x-1x-1xf32, -1x384x-1x-1xf32], 1xi32) + concat_5 = paddle._C_ops.concat(combine_3, full_0) + del combine_3 + + # pd_op.mean: (-1x768x1x1xf32) <- (-1x768x-1x-1xf32, 2xi64) + mean_3 = paddle._C_ops.mean(concat_5, full_int_array_0, True) + del full_int_array_0 + + # pd_op.conv2d: (-1x768x1x1xf32) <- (-1x768x1x1xf32, 768x768x1x1xf32) + conv2d_75 = paddle._C_ops.conv2d( + mean_3, parameter_426, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del mean_3, parameter_426 + + # pd_op.reshape: (1x768x1x1xf32) <- (768xf32, 4xi64) + reshape_3 = paddle._C_ops.reshape(parameter_425, full_int_array_1) + del parameter_425 + + # pd_op.add: (-1x768x1x1xf32) <- (-1x768x1x1xf32, 1x768x1x1xf32) + add_39 = paddle._C_ops.add(conv2d_75, reshape_3) + del conv2d_75, reshape_3 + + # pd_op.hardsigmoid: (-1x768x1x1xf32) <- (-1x768x1x1xf32) + hardsigmoid_3 = paddle._C_ops.hardsigmoid( + add_39, float("0.166667"), float("0.5") + ) + del add_39 + + # pd_op.multiply: (-1x768x-1x-1xf32) <- (-1x768x-1x-1xf32, -1x768x1x1xf32) + multiply_21 = paddle._C_ops.multiply(concat_5, hardsigmoid_3) + del concat_5, hardsigmoid_3 + + # pd_op.conv2d: (-1x1024x-1x-1xf32) <- (-1x768x-1x-1xf32, 1024x768x1x1xf32) + conv2d_76 = paddle._C_ops.conv2d( + multiply_21, parameter_424, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del multiply_21, parameter_424 + + # pd_op.batch_norm_: (-1x1024x-1x-1xf32, 1024xf32, 1024xf32, 1024xf32, 1024xf32, -1xui8) <- (-1x1024x-1x-1xf32, 1024xf32, 1024xf32, 1024xf32, 1024xf32) + ( + batch_norm__432, + batch_norm__433, + batch_norm__434, + batch_norm__435, + batch_norm__436, + batch_norm__437, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_76, + parameter_423, + parameter_422, + parameter_421, + parameter_420, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_76, parameter_420, parameter_421, parameter_422, parameter_423 + + # pd_op.swish: (-1x1024x-1x-1xf32) <- (-1x1024x-1x-1xf32) + swish_54 = paddle._C_ops.swish(batch_norm__432) + del batch_norm__432 + + # pd_op.shape64: (4xi64) <- (-1x1024x-1x-1xf32) + shape64_0 = paddle._C_ops.shape64(swish_54) + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_2 = [0] + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_3 = [1] + + # pd_op.slice: (xi64) <- (4xi64, 1xi64, 1xi64) + slice_0 = paddle._C_ops.slice( + shape64_0, [0], full_int_array_2, full_int_array_3, [1], [0] + ) + del shape64_0 + + # pd_op.shape64: (4xi64) <- (-1x1024x-1x-1xf32) + shape64_1 = paddle._C_ops.shape64(swish_54) + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_4 = [2] + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_5 = [3] + + # pd_op.slice: (xi64) <- (4xi64, 1xi64, 1xi64) + slice_1 = paddle._C_ops.slice( + shape64_1, [0], full_int_array_4, full_int_array_5, [1], [0] + ) + del shape64_1 + + # pd_op.shape64: (4xi64) <- (-1x1024x-1x-1xf32) + shape64_2 = paddle._C_ops.shape64(swish_54) + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_6 = [4] + + # pd_op.slice: (xi64) <- (4xi64, 1xi64, 1xi64) + slice_2 = paddle._C_ops.slice( + shape64_2, [0], full_int_array_5, full_int_array_6, [1], [0] + ) + del shape64_2 + + # pd_op.flatten: (-1x1024x-1xf32) <- (-1x1024x-1x-1xf32) + flatten_0 = paddle._C_ops.flatten(swish_54, 2, 3) + del swish_54 + + # pd_op.transpose: (-1x-1x1024xf32) <- (-1x1024x-1xf32) + transpose_0 = paddle._C_ops.transpose(flatten_0, [0, 2, 1]) + del flatten_0 + + # pd_op.add: (-1x400x1024xf32) <- (-1x-1x1024xf32, 1x400x1024xf32) + add_40 = paddle._C_ops.add(transpose_0, data_27) + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_7 = [1024] + + # pd_op.slice: (1024x1024xf32) <- (1024x3072xf32, 1xi64, 1xi64) + slice_3 = paddle._C_ops.slice( + data_18, [1], full_int_array_2, full_int_array_7, [1], [] + ) + + # pd_op.slice: (1024xf32) <- (3072xf32, 1xi64, 1xi64) + slice_4 = paddle._C_ops.slice( + data_19, [0], full_int_array_2, full_int_array_7, [1], [] + ) + + # pd_op.matmul: (-1x400x1024xf32) <- (-1x400x1024xf32, 1024x1024xf32) + matmul_0 = paddle._C_ops.matmul(add_40, slice_3, False, False) + del slice_3 + + # pd_op.add: (-1x400x1024xf32) <- (-1x400x1024xf32, 1024xf32) + add_41 = paddle._C_ops.add(matmul_0, slice_4) + del matmul_0, slice_4 + + # pd_op.full_int_array: (4xi64) <- () + full_int_array_8 = [0, 0, 4, 256] + + # pd_op.reshape: (-1x400x4x256xf32) <- (-1x400x1024xf32, 4xi64) + reshape_4 = paddle._C_ops.reshape(add_41, full_int_array_8) + del add_41 + + # pd_op.transpose: (-1x4x400x256xf32) <- (-1x400x4x256xf32) + transpose_1 = paddle._C_ops.transpose(reshape_4, [0, 2, 1, 3]) + del reshape_4 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_9 = [2048] + + # pd_op.slice: (1024x1024xf32) <- (1024x3072xf32, 1xi64, 1xi64) + slice_5 = paddle._C_ops.slice( + data_18, [1], full_int_array_7, full_int_array_9, [1], [] + ) + + # pd_op.slice: (1024xf32) <- (3072xf32, 1xi64, 1xi64) + slice_6 = paddle._C_ops.slice( + data_19, [0], full_int_array_7, full_int_array_9, [1], [] + ) + + # pd_op.matmul: (-1x400x1024xf32) <- (-1x400x1024xf32, 1024x1024xf32) + matmul_1 = paddle._C_ops.matmul(add_40, slice_5, False, False) + del add_40, slice_5 + + # pd_op.add: (-1x400x1024xf32) <- (-1x400x1024xf32, 1024xf32) + add_42 = paddle._C_ops.add(matmul_1, slice_6) + del matmul_1, slice_6 + + # pd_op.reshape: (-1x400x4x256xf32) <- (-1x400x1024xf32, 4xi64) + reshape_5 = paddle._C_ops.reshape(add_42, full_int_array_8) + del add_42 + + # pd_op.transpose: (-1x4x400x256xf32) <- (-1x400x4x256xf32) + transpose_2 = paddle._C_ops.transpose(reshape_5, [0, 2, 1, 3]) + del reshape_5 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_10 = [2147483647] + + # pd_op.slice: (1024x1024xf32) <- (1024x3072xf32, 1xi64, 1xi64) + slice_7 = paddle._C_ops.slice( + data_18, [1], full_int_array_9, full_int_array_10, [1], [] + ) + del data_18 + + # pd_op.slice: (1024xf32) <- (3072xf32, 1xi64, 1xi64) + slice_8 = paddle._C_ops.slice( + data_19, [0], full_int_array_9, full_int_array_10, [1], [] + ) + del data_19 + + # pd_op.matmul: (-1x-1x1024xf32) <- (-1x-1x1024xf32, 1024x1024xf32) + matmul_2 = paddle._C_ops.matmul(transpose_0, slice_7, False, False) + del slice_7 + + # pd_op.add: (-1x-1x1024xf32) <- (-1x-1x1024xf32, 1024xf32) + add_43 = paddle._C_ops.add(matmul_2, slice_8) + del matmul_2, slice_8 + + # pd_op.reshape: (-1x-1x4x256xf32) <- (-1x-1x1024xf32, 4xi64) + reshape_6 = paddle._C_ops.reshape(add_43, full_int_array_8) + del add_43 + + # pd_op.transpose: (-1x4x-1x256xf32) <- (-1x-1x4x256xf32) + transpose_3 = paddle._C_ops.transpose(reshape_6, [0, 2, 1, 3]) + del reshape_6 + + # pd_op.matmul: (-1x4x400x400xf32) <- (-1x4x400x256xf32, -1x4x400x256xf32) + matmul_3 = paddle._C_ops.matmul(transpose_1, transpose_2, False, True) + del transpose_1, transpose_2 + + # pd_op.full: (1xf32) <- () + full_1 = paddle._C_ops.full( + [1], float("0.0625"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.scale: (-1x4x400x400xf32) <- (-1x4x400x400xf32, 1xf32) + scale_0 = paddle._C_ops.scale(matmul_3, full_1, float("0"), True) + del matmul_3 + + # pd_op.softmax: (-1x4x400x400xf32) <- (-1x4x400x400xf32) + softmax_0 = paddle._C_ops.softmax(scale_0, -1) + del scale_0 + + # pd_op.full: (1xf32) <- () + full_2 = paddle._C_ops.full( + [1], float("0.1"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.dropout: (-1x4x400x400xf32, -1x4x400x400xui8) <- (-1x4x400x400xf32, None, 1xf32) + dropout_0, dropout_1 = (lambda x, f: f(x))( + paddle._C_ops.dropout( + softmax_0, None, full_2, True, "upscale_in_train", 0, False + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None), + ) + del softmax_0 + + # pd_op.matmul: (-1x4x400x256xf32) <- (-1x4x400x400xf32, -1x4x-1x256xf32) + matmul_4 = paddle._C_ops.matmul(dropout_0, transpose_3, False, False) + del dropout_0, transpose_3 + + # pd_op.transpose: (-1x400x4x256xf32) <- (-1x4x400x256xf32) + transpose_4 = paddle._C_ops.transpose(matmul_4, [0, 2, 1, 3]) + del matmul_4 + + # pd_op.shape64: (4xi64) <- (-1x400x4x256xf32) + shape64_3 = paddle._C_ops.shape64(transpose_4) + + # pd_op.slice: (xi64) <- (4xi64, 1xi64, 1xi64) + slice_9 = paddle._C_ops.slice( + shape64_3, [0], full_int_array_2, full_int_array_3, [1], [0] + ) + del shape64_3 + + # pd_op.full_int_array: (3xi64) <- () + full_int_array_11 = [0, 0, 1024] + + # pd_op.reshape: (-1x400x1024xf32) <- (-1x400x4x256xf32, 3xi64) + reshape_7 = paddle._C_ops.reshape(transpose_4, full_int_array_11) + del transpose_4 + + # pd_op.matmul: (-1x400x1024xf32) <- (-1x400x1024xf32, 1024x1024xf32) + matmul_5 = paddle._C_ops.matmul(reshape_7, parameter_419, False, False) + del parameter_419, reshape_7 + + # pd_op.add: (-1x400x1024xf32) <- (-1x400x1024xf32, 1024xf32) + add_44 = paddle._C_ops.add(matmul_5, parameter_418) + del matmul_5, parameter_418 + + # pd_op.dropout: (-1x400x1024xf32, -1x400x1024xui8) <- (-1x400x1024xf32, None, 1xf32) + dropout_2, dropout_3 = (lambda x, f: f(x))( + paddle._C_ops.dropout( + add_44, None, full_2, True, "upscale_in_train", 0, False + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None), + ) + del add_44 + + # pd_op.add: (-1x400x1024xf32) <- (-1x-1x1024xf32, -1x400x1024xf32) + add_45 = paddle._C_ops.add(transpose_0, dropout_2) + del dropout_2, transpose_0 + + # pd_op.layer_norm: (-1x400x1024xf32, -1x400xf32, -1x400xf32) <- (-1x400x1024xf32, 1024xf32, 1024xf32) + layer_norm_0, layer_norm_1, layer_norm_2 = (lambda x, f: f(x))( + paddle._C_ops.layer_norm( + add_45, parameter_417, parameter_416, float("1e-05"), 2 + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None, None), + ) + del add_45, parameter_416, parameter_417 + + # pd_op.matmul: (-1x400x2048xf32) <- (-1x400x1024xf32, 1024x2048xf32) + matmul_6 = paddle._C_ops.matmul(layer_norm_0, parameter_415, False, False) + del parameter_415 + + # pd_op.add: (-1x400x2048xf32) <- (-1x400x2048xf32, 2048xf32) + add_46 = paddle._C_ops.add(matmul_6, parameter_414) + del matmul_6, parameter_414 + + # pd_op.gelu: (-1x400x2048xf32) <- (-1x400x2048xf32) + gelu_0 = paddle._C_ops.gelu(add_46, False) + del add_46 + + # pd_op.dropout: (-1x400x2048xf32, -1x400x2048xui8) <- (-1x400x2048xf32, None, 1xf32) + dropout_4, dropout_5 = (lambda x, f: f(x))( + paddle._C_ops.dropout( + gelu_0, None, full_2, True, "upscale_in_train", 0, False + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None), + ) + del gelu_0 + + # pd_op.matmul: (-1x400x1024xf32) <- (-1x400x2048xf32, 2048x1024xf32) + matmul_7 = paddle._C_ops.matmul(dropout_4, parameter_413, False, False) + del dropout_4, parameter_413 + + # pd_op.add: (-1x400x1024xf32) <- (-1x400x1024xf32, 1024xf32) + add_47 = paddle._C_ops.add(matmul_7, parameter_412) + del matmul_7, parameter_412 + + # pd_op.dropout: (-1x400x1024xf32, -1x400x1024xui8) <- (-1x400x1024xf32, None, 1xf32) + dropout_6, dropout_7 = (lambda x, f: f(x))( + paddle._C_ops.dropout( + add_47, None, full_2, True, "upscale_in_train", 0, False + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None), + ) + del add_47 + + # pd_op.add: (-1x400x1024xf32) <- (-1x400x1024xf32, -1x400x1024xf32) + add_48 = paddle._C_ops.add(layer_norm_0, dropout_6) + del dropout_6, layer_norm_0 + + # pd_op.layer_norm: (-1x400x1024xf32, -1x400xf32, -1x400xf32) <- (-1x400x1024xf32, 1024xf32, 1024xf32) + layer_norm_3, layer_norm_4, layer_norm_5 = (lambda x, f: f(x))( + paddle._C_ops.layer_norm( + add_48, parameter_411, parameter_410, float("1e-05"), 2 + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None, None), + ) + del add_48, parameter_410, parameter_411 + + # pd_op.add: (-1x400x1024xf32) <- (-1x400x1024xf32, 1x400x1024xf32) + add_49 = paddle._C_ops.add(layer_norm_3, data_27) + + # pd_op.slice: (1024x1024xf32) <- (1024x3072xf32, 1xi64, 1xi64) + slice_10 = paddle._C_ops.slice( + data_20, [1], full_int_array_2, full_int_array_7, [1], [] + ) + + # pd_op.slice: (1024xf32) <- (3072xf32, 1xi64, 1xi64) + slice_11 = paddle._C_ops.slice( + data_21, [0], full_int_array_2, full_int_array_7, [1], [] + ) + + # pd_op.matmul: (-1x400x1024xf32) <- (-1x400x1024xf32, 1024x1024xf32) + matmul_8 = paddle._C_ops.matmul(add_49, slice_10, False, False) + del slice_10 + + # pd_op.add: (-1x400x1024xf32) <- (-1x400x1024xf32, 1024xf32) + add_50 = paddle._C_ops.add(matmul_8, slice_11) + del matmul_8, slice_11 + + # pd_op.reshape: (-1x400x4x256xf32) <- (-1x400x1024xf32, 4xi64) + reshape_8 = paddle._C_ops.reshape(add_50, full_int_array_8) + del add_50 + + # pd_op.transpose: (-1x4x400x256xf32) <- (-1x400x4x256xf32) + transpose_5 = paddle._C_ops.transpose(reshape_8, [0, 2, 1, 3]) + del reshape_8 + + # pd_op.slice: (1024x1024xf32) <- (1024x3072xf32, 1xi64, 1xi64) + slice_12 = paddle._C_ops.slice( + data_20, [1], full_int_array_7, full_int_array_9, [1], [] + ) + + # pd_op.slice: (1024xf32) <- (3072xf32, 1xi64, 1xi64) + slice_13 = paddle._C_ops.slice( + data_21, [0], full_int_array_7, full_int_array_9, [1], [] + ) + + # pd_op.matmul: (-1x400x1024xf32) <- (-1x400x1024xf32, 1024x1024xf32) + matmul_9 = paddle._C_ops.matmul(add_49, slice_12, False, False) + del add_49, slice_12 + + # pd_op.add: (-1x400x1024xf32) <- (-1x400x1024xf32, 1024xf32) + add_51 = paddle._C_ops.add(matmul_9, slice_13) + del matmul_9, slice_13 + + # pd_op.reshape: (-1x400x4x256xf32) <- (-1x400x1024xf32, 4xi64) + reshape_9 = paddle._C_ops.reshape(add_51, full_int_array_8) + del add_51 + + # pd_op.transpose: (-1x4x400x256xf32) <- (-1x400x4x256xf32) + transpose_6 = paddle._C_ops.transpose(reshape_9, [0, 2, 1, 3]) + del reshape_9 + + # pd_op.slice: (1024x1024xf32) <- (1024x3072xf32, 1xi64, 1xi64) + slice_14 = paddle._C_ops.slice( + data_20, [1], full_int_array_9, full_int_array_10, [1], [] + ) + del data_20 + + # pd_op.slice: (1024xf32) <- (3072xf32, 1xi64, 1xi64) + slice_15 = paddle._C_ops.slice( + data_21, [0], full_int_array_9, full_int_array_10, [1], [] + ) + del data_21 + + # pd_op.matmul: (-1x400x1024xf32) <- (-1x400x1024xf32, 1024x1024xf32) + matmul_10 = paddle._C_ops.matmul(layer_norm_3, slice_14, False, False) + del slice_14 + + # pd_op.add: (-1x400x1024xf32) <- (-1x400x1024xf32, 1024xf32) + add_52 = paddle._C_ops.add(matmul_10, slice_15) + del matmul_10, slice_15 + + # pd_op.reshape: (-1x400x4x256xf32) <- (-1x400x1024xf32, 4xi64) + reshape_10 = paddle._C_ops.reshape(add_52, full_int_array_8) + del add_52 + + # pd_op.transpose: (-1x4x400x256xf32) <- (-1x400x4x256xf32) + transpose_7 = paddle._C_ops.transpose(reshape_10, [0, 2, 1, 3]) + del reshape_10 + + # pd_op.matmul: (-1x4x400x400xf32) <- (-1x4x400x256xf32, -1x4x400x256xf32) + matmul_11 = paddle._C_ops.matmul(transpose_5, transpose_6, False, True) + del transpose_5, transpose_6 + + # pd_op.scale: (-1x4x400x400xf32) <- (-1x4x400x400xf32, 1xf32) + scale_1 = paddle._C_ops.scale(matmul_11, full_1, float("0"), True) + del matmul_11 + + # pd_op.softmax: (-1x4x400x400xf32) <- (-1x4x400x400xf32) + softmax_1 = paddle._C_ops.softmax(scale_1, -1) + del scale_1 + + # pd_op.dropout: (-1x4x400x400xf32, -1x4x400x400xui8) <- (-1x4x400x400xf32, None, 1xf32) + dropout_8, dropout_9 = (lambda x, f: f(x))( + paddle._C_ops.dropout( + softmax_1, None, full_2, True, "upscale_in_train", 0, False + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None), + ) + del softmax_1 + + # pd_op.matmul: (-1x4x400x256xf32) <- (-1x4x400x400xf32, -1x4x400x256xf32) + matmul_12 = paddle._C_ops.matmul(dropout_8, transpose_7, False, False) + del dropout_8, transpose_7 + + # pd_op.transpose: (-1x400x4x256xf32) <- (-1x4x400x256xf32) + transpose_8 = paddle._C_ops.transpose(matmul_12, [0, 2, 1, 3]) + del matmul_12 + + # pd_op.shape64: (4xi64) <- (-1x400x4x256xf32) + shape64_4 = paddle._C_ops.shape64(transpose_8) + + # pd_op.slice: (xi64) <- (4xi64, 1xi64, 1xi64) + slice_16 = paddle._C_ops.slice( + shape64_4, [0], full_int_array_2, full_int_array_3, [1], [0] + ) + del shape64_4 + + # pd_op.reshape: (-1x400x1024xf32) <- (-1x400x4x256xf32, 3xi64) + reshape_11 = paddle._C_ops.reshape(transpose_8, full_int_array_11) + del transpose_8 + + # pd_op.matmul: (-1x400x1024xf32) <- (-1x400x1024xf32, 1024x1024xf32) + matmul_13 = paddle._C_ops.matmul(reshape_11, parameter_409, False, False) + del parameter_409, reshape_11 + + # pd_op.add: (-1x400x1024xf32) <- (-1x400x1024xf32, 1024xf32) + add_53 = paddle._C_ops.add(matmul_13, parameter_408) + del matmul_13, parameter_408 + + # pd_op.dropout: (-1x400x1024xf32, -1x400x1024xui8) <- (-1x400x1024xf32, None, 1xf32) + dropout_10, dropout_11 = (lambda x, f: f(x))( + paddle._C_ops.dropout( + add_53, None, full_2, True, "upscale_in_train", 0, False + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None), + ) + del add_53 + + # pd_op.add: (-1x400x1024xf32) <- (-1x400x1024xf32, -1x400x1024xf32) + add_54 = paddle._C_ops.add(layer_norm_3, dropout_10) + del dropout_10, layer_norm_3 + + # pd_op.layer_norm: (-1x400x1024xf32, -1x400xf32, -1x400xf32) <- (-1x400x1024xf32, 1024xf32, 1024xf32) + layer_norm_6, layer_norm_7, layer_norm_8 = (lambda x, f: f(x))( + paddle._C_ops.layer_norm( + add_54, parameter_407, parameter_406, float("1e-05"), 2 + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None, None), + ) + del add_54, parameter_406, parameter_407 + + # pd_op.matmul: (-1x400x2048xf32) <- (-1x400x1024xf32, 1024x2048xf32) + matmul_14 = paddle._C_ops.matmul(layer_norm_6, parameter_405, False, False) + del parameter_405 + + # pd_op.add: (-1x400x2048xf32) <- (-1x400x2048xf32, 2048xf32) + add_55 = paddle._C_ops.add(matmul_14, parameter_404) + del matmul_14, parameter_404 + + # pd_op.gelu: (-1x400x2048xf32) <- (-1x400x2048xf32) + gelu_1 = paddle._C_ops.gelu(add_55, False) + del add_55 + + # pd_op.dropout: (-1x400x2048xf32, -1x400x2048xui8) <- (-1x400x2048xf32, None, 1xf32) + dropout_12, dropout_13 = (lambda x, f: f(x))( + paddle._C_ops.dropout( + gelu_1, None, full_2, True, "upscale_in_train", 0, False + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None), + ) + del gelu_1 + + # pd_op.matmul: (-1x400x1024xf32) <- (-1x400x2048xf32, 2048x1024xf32) + matmul_15 = paddle._C_ops.matmul(dropout_12, parameter_403, False, False) + del dropout_12, parameter_403 + + # pd_op.add: (-1x400x1024xf32) <- (-1x400x1024xf32, 1024xf32) + add_56 = paddle._C_ops.add(matmul_15, parameter_402) + del matmul_15, parameter_402 + + # pd_op.dropout: (-1x400x1024xf32, -1x400x1024xui8) <- (-1x400x1024xf32, None, 1xf32) + dropout_14, dropout_15 = (lambda x, f: f(x))( + paddle._C_ops.dropout( + add_56, None, full_2, True, "upscale_in_train", 0, False + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None), + ) + del add_56 + + # pd_op.add: (-1x400x1024xf32) <- (-1x400x1024xf32, -1x400x1024xf32) + add_57 = paddle._C_ops.add(layer_norm_6, dropout_14) + del dropout_14, layer_norm_6 + + # pd_op.layer_norm: (-1x400x1024xf32, -1x400xf32, -1x400xf32) <- (-1x400x1024xf32, 1024xf32, 1024xf32) + layer_norm_9, layer_norm_10, layer_norm_11 = (lambda x, f: f(x))( + paddle._C_ops.layer_norm( + add_57, parameter_401, parameter_400, float("1e-05"), 2 + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None, None), + ) + del add_57, parameter_400, parameter_401 + + # pd_op.add: (-1x400x1024xf32) <- (-1x400x1024xf32, 1x400x1024xf32) + add_58 = paddle._C_ops.add(layer_norm_9, data_27) + + # pd_op.slice: (1024x1024xf32) <- (1024x3072xf32, 1xi64, 1xi64) + slice_17 = paddle._C_ops.slice( + data_22, [1], full_int_array_2, full_int_array_7, [1], [] + ) + + # pd_op.slice: (1024xf32) <- (3072xf32, 1xi64, 1xi64) + slice_18 = paddle._C_ops.slice( + data_23, [0], full_int_array_2, full_int_array_7, [1], [] + ) + + # pd_op.matmul: (-1x400x1024xf32) <- (-1x400x1024xf32, 1024x1024xf32) + matmul_16 = paddle._C_ops.matmul(add_58, slice_17, False, False) + del slice_17 + + # pd_op.add: (-1x400x1024xf32) <- (-1x400x1024xf32, 1024xf32) + add_59 = paddle._C_ops.add(matmul_16, slice_18) + del matmul_16, slice_18 + + # pd_op.reshape: (-1x400x4x256xf32) <- (-1x400x1024xf32, 4xi64) + reshape_12 = paddle._C_ops.reshape(add_59, full_int_array_8) + del add_59 + + # pd_op.transpose: (-1x4x400x256xf32) <- (-1x400x4x256xf32) + transpose_9 = paddle._C_ops.transpose(reshape_12, [0, 2, 1, 3]) + del reshape_12 + + # pd_op.slice: (1024x1024xf32) <- (1024x3072xf32, 1xi64, 1xi64) + slice_19 = paddle._C_ops.slice( + data_22, [1], full_int_array_7, full_int_array_9, [1], [] + ) + + # pd_op.slice: (1024xf32) <- (3072xf32, 1xi64, 1xi64) + slice_20 = paddle._C_ops.slice( + data_23, [0], full_int_array_7, full_int_array_9, [1], [] + ) + + # pd_op.matmul: (-1x400x1024xf32) <- (-1x400x1024xf32, 1024x1024xf32) + matmul_17 = paddle._C_ops.matmul(add_58, slice_19, False, False) + del add_58, slice_19 + + # pd_op.add: (-1x400x1024xf32) <- (-1x400x1024xf32, 1024xf32) + add_60 = paddle._C_ops.add(matmul_17, slice_20) + del matmul_17, slice_20 + + # pd_op.reshape: (-1x400x4x256xf32) <- (-1x400x1024xf32, 4xi64) + reshape_13 = paddle._C_ops.reshape(add_60, full_int_array_8) + del add_60 + + # pd_op.transpose: (-1x4x400x256xf32) <- (-1x400x4x256xf32) + transpose_10 = paddle._C_ops.transpose(reshape_13, [0, 2, 1, 3]) + del reshape_13 + + # pd_op.slice: (1024x1024xf32) <- (1024x3072xf32, 1xi64, 1xi64) + slice_21 = paddle._C_ops.slice( + data_22, [1], full_int_array_9, full_int_array_10, [1], [] + ) + del data_22 + + # pd_op.slice: (1024xf32) <- (3072xf32, 1xi64, 1xi64) + slice_22 = paddle._C_ops.slice( + data_23, [0], full_int_array_9, full_int_array_10, [1], [] + ) + del data_23 + + # pd_op.matmul: (-1x400x1024xf32) <- (-1x400x1024xf32, 1024x1024xf32) + matmul_18 = paddle._C_ops.matmul(layer_norm_9, slice_21, False, False) + del slice_21 + + # pd_op.add: (-1x400x1024xf32) <- (-1x400x1024xf32, 1024xf32) + add_61 = paddle._C_ops.add(matmul_18, slice_22) + del matmul_18, slice_22 + + # pd_op.reshape: (-1x400x4x256xf32) <- (-1x400x1024xf32, 4xi64) + reshape_14 = paddle._C_ops.reshape(add_61, full_int_array_8) + del add_61 + + # pd_op.transpose: (-1x4x400x256xf32) <- (-1x400x4x256xf32) + transpose_11 = paddle._C_ops.transpose(reshape_14, [0, 2, 1, 3]) + del reshape_14 + + # pd_op.matmul: (-1x4x400x400xf32) <- (-1x4x400x256xf32, -1x4x400x256xf32) + matmul_19 = paddle._C_ops.matmul(transpose_9, transpose_10, False, True) + del transpose_10, transpose_9 + + # pd_op.scale: (-1x4x400x400xf32) <- (-1x4x400x400xf32, 1xf32) + scale_2 = paddle._C_ops.scale(matmul_19, full_1, float("0"), True) + del matmul_19 + + # pd_op.softmax: (-1x4x400x400xf32) <- (-1x4x400x400xf32) + softmax_2 = paddle._C_ops.softmax(scale_2, -1) + del scale_2 + + # pd_op.dropout: (-1x4x400x400xf32, -1x4x400x400xui8) <- (-1x4x400x400xf32, None, 1xf32) + dropout_16, dropout_17 = (lambda x, f: f(x))( + paddle._C_ops.dropout( + softmax_2, None, full_2, True, "upscale_in_train", 0, False + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None), + ) + del softmax_2 + + # pd_op.matmul: (-1x4x400x256xf32) <- (-1x4x400x400xf32, -1x4x400x256xf32) + matmul_20 = paddle._C_ops.matmul(dropout_16, transpose_11, False, False) + del dropout_16, transpose_11 + + # pd_op.transpose: (-1x400x4x256xf32) <- (-1x4x400x256xf32) + transpose_12 = paddle._C_ops.transpose(matmul_20, [0, 2, 1, 3]) + del matmul_20 + + # pd_op.shape64: (4xi64) <- (-1x400x4x256xf32) + shape64_5 = paddle._C_ops.shape64(transpose_12) + + # pd_op.slice: (xi64) <- (4xi64, 1xi64, 1xi64) + slice_23 = paddle._C_ops.slice( + shape64_5, [0], full_int_array_2, full_int_array_3, [1], [0] + ) + del shape64_5 + + # pd_op.reshape: (-1x400x1024xf32) <- (-1x400x4x256xf32, 3xi64) + reshape_15 = paddle._C_ops.reshape(transpose_12, full_int_array_11) + del transpose_12 + + # pd_op.matmul: (-1x400x1024xf32) <- (-1x400x1024xf32, 1024x1024xf32) + matmul_21 = paddle._C_ops.matmul(reshape_15, parameter_399, False, False) + del parameter_399, reshape_15 + + # pd_op.add: (-1x400x1024xf32) <- (-1x400x1024xf32, 1024xf32) + add_62 = paddle._C_ops.add(matmul_21, parameter_398) + del matmul_21, parameter_398 + + # pd_op.dropout: (-1x400x1024xf32, -1x400x1024xui8) <- (-1x400x1024xf32, None, 1xf32) + dropout_18, dropout_19 = (lambda x, f: f(x))( + paddle._C_ops.dropout( + add_62, None, full_2, True, "upscale_in_train", 0, False + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None), + ) + del add_62 + + # pd_op.add: (-1x400x1024xf32) <- (-1x400x1024xf32, -1x400x1024xf32) + add_63 = paddle._C_ops.add(layer_norm_9, dropout_18) + del dropout_18, layer_norm_9 + + # pd_op.layer_norm: (-1x400x1024xf32, -1x400xf32, -1x400xf32) <- (-1x400x1024xf32, 1024xf32, 1024xf32) + layer_norm_12, layer_norm_13, layer_norm_14 = (lambda x, f: f(x))( + paddle._C_ops.layer_norm( + add_63, parameter_397, parameter_396, float("1e-05"), 2 + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None, None), + ) + del add_63, parameter_396, parameter_397 + + # pd_op.matmul: (-1x400x2048xf32) <- (-1x400x1024xf32, 1024x2048xf32) + matmul_22 = paddle._C_ops.matmul(layer_norm_12, parameter_395, False, False) + del parameter_395 + + # pd_op.add: (-1x400x2048xf32) <- (-1x400x2048xf32, 2048xf32) + add_64 = paddle._C_ops.add(matmul_22, parameter_394) + del matmul_22, parameter_394 + + # pd_op.gelu: (-1x400x2048xf32) <- (-1x400x2048xf32) + gelu_2 = paddle._C_ops.gelu(add_64, False) + del add_64 + + # pd_op.dropout: (-1x400x2048xf32, -1x400x2048xui8) <- (-1x400x2048xf32, None, 1xf32) + dropout_20, dropout_21 = (lambda x, f: f(x))( + paddle._C_ops.dropout( + gelu_2, None, full_2, True, "upscale_in_train", 0, False + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None), + ) + del gelu_2 + + # pd_op.matmul: (-1x400x1024xf32) <- (-1x400x2048xf32, 2048x1024xf32) + matmul_23 = paddle._C_ops.matmul(dropout_20, parameter_393, False, False) + del dropout_20, parameter_393 + + # pd_op.add: (-1x400x1024xf32) <- (-1x400x1024xf32, 1024xf32) + add_65 = paddle._C_ops.add(matmul_23, parameter_392) + del matmul_23, parameter_392 + + # pd_op.dropout: (-1x400x1024xf32, -1x400x1024xui8) <- (-1x400x1024xf32, None, 1xf32) + dropout_22, dropout_23 = (lambda x, f: f(x))( + paddle._C_ops.dropout( + add_65, None, full_2, True, "upscale_in_train", 0, False + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None), + ) + del add_65 + + # pd_op.add: (-1x400x1024xf32) <- (-1x400x1024xf32, -1x400x1024xf32) + add_66 = paddle._C_ops.add(layer_norm_12, dropout_22) + del dropout_22, layer_norm_12 + + # pd_op.layer_norm: (-1x400x1024xf32, -1x400xf32, -1x400xf32) <- (-1x400x1024xf32, 1024xf32, 1024xf32) + layer_norm_15, layer_norm_16, layer_norm_17 = (lambda x, f: f(x))( + paddle._C_ops.layer_norm( + add_66, parameter_391, parameter_390, float("1e-05"), 2 + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None, None), + ) + del add_66, parameter_390, parameter_391 + + # pd_op.add: (-1x400x1024xf32) <- (-1x400x1024xf32, 1x400x1024xf32) + add_67 = paddle._C_ops.add(layer_norm_15, data_27) + del data_27 + + # pd_op.slice: (1024x1024xf32) <- (1024x3072xf32, 1xi64, 1xi64) + slice_24 = paddle._C_ops.slice( + data_24, [1], full_int_array_2, full_int_array_7, [1], [] + ) + + # pd_op.slice: (1024xf32) <- (3072xf32, 1xi64, 1xi64) + slice_25 = paddle._C_ops.slice( + data_25, [0], full_int_array_2, full_int_array_7, [1], [] + ) + + # pd_op.matmul: (-1x400x1024xf32) <- (-1x400x1024xf32, 1024x1024xf32) + matmul_24 = paddle._C_ops.matmul(add_67, slice_24, False, False) + del slice_24 + + # pd_op.add: (-1x400x1024xf32) <- (-1x400x1024xf32, 1024xf32) + add_68 = paddle._C_ops.add(matmul_24, slice_25) + del matmul_24, slice_25 + + # pd_op.reshape: (-1x400x4x256xf32) <- (-1x400x1024xf32, 4xi64) + reshape_16 = paddle._C_ops.reshape(add_68, full_int_array_8) + del add_68 + + # pd_op.transpose: (-1x4x400x256xf32) <- (-1x400x4x256xf32) + transpose_13 = paddle._C_ops.transpose(reshape_16, [0, 2, 1, 3]) + del reshape_16 + + # pd_op.slice: (1024x1024xf32) <- (1024x3072xf32, 1xi64, 1xi64) + slice_26 = paddle._C_ops.slice( + data_24, [1], full_int_array_7, full_int_array_9, [1], [] + ) + + # pd_op.slice: (1024xf32) <- (3072xf32, 1xi64, 1xi64) + slice_27 = paddle._C_ops.slice( + data_25, [0], full_int_array_7, full_int_array_9, [1], [] + ) + del full_int_array_7 + + # pd_op.matmul: (-1x400x1024xf32) <- (-1x400x1024xf32, 1024x1024xf32) + matmul_25 = paddle._C_ops.matmul(add_67, slice_26, False, False) + del add_67, slice_26 + + # pd_op.add: (-1x400x1024xf32) <- (-1x400x1024xf32, 1024xf32) + add_69 = paddle._C_ops.add(matmul_25, slice_27) + del matmul_25, slice_27 + + # pd_op.reshape: (-1x400x4x256xf32) <- (-1x400x1024xf32, 4xi64) + reshape_17 = paddle._C_ops.reshape(add_69, full_int_array_8) + del add_69 + + # pd_op.transpose: (-1x4x400x256xf32) <- (-1x400x4x256xf32) + transpose_14 = paddle._C_ops.transpose(reshape_17, [0, 2, 1, 3]) + del reshape_17 + + # pd_op.slice: (1024x1024xf32) <- (1024x3072xf32, 1xi64, 1xi64) + slice_28 = paddle._C_ops.slice( + data_24, [1], full_int_array_9, full_int_array_10, [1], [] + ) + del data_24 + + # pd_op.slice: (1024xf32) <- (3072xf32, 1xi64, 1xi64) + slice_29 = paddle._C_ops.slice( + data_25, [0], full_int_array_9, full_int_array_10, [1], [] + ) + del data_25, full_int_array_10, full_int_array_9 + + # pd_op.matmul: (-1x400x1024xf32) <- (-1x400x1024xf32, 1024x1024xf32) + matmul_26 = paddle._C_ops.matmul(layer_norm_15, slice_28, False, False) + del slice_28 + + # pd_op.add: (-1x400x1024xf32) <- (-1x400x1024xf32, 1024xf32) + add_70 = paddle._C_ops.add(matmul_26, slice_29) + del matmul_26, slice_29 + + # pd_op.reshape: (-1x400x4x256xf32) <- (-1x400x1024xf32, 4xi64) + reshape_18 = paddle._C_ops.reshape(add_70, full_int_array_8) + del add_70, full_int_array_8 + + # pd_op.transpose: (-1x4x400x256xf32) <- (-1x400x4x256xf32) + transpose_15 = paddle._C_ops.transpose(reshape_18, [0, 2, 1, 3]) + del reshape_18 + + # pd_op.matmul: (-1x4x400x400xf32) <- (-1x4x400x256xf32, -1x4x400x256xf32) + matmul_27 = paddle._C_ops.matmul(transpose_13, transpose_14, False, True) + del transpose_13, transpose_14 + + # pd_op.scale: (-1x4x400x400xf32) <- (-1x4x400x400xf32, 1xf32) + scale_3 = paddle._C_ops.scale(matmul_27, full_1, float("0"), True) + del full_1, matmul_27 + + # pd_op.softmax: (-1x4x400x400xf32) <- (-1x4x400x400xf32) + softmax_3 = paddle._C_ops.softmax(scale_3, -1) + del scale_3 + + # pd_op.dropout: (-1x4x400x400xf32, -1x4x400x400xui8) <- (-1x4x400x400xf32, None, 1xf32) + dropout_24, dropout_25 = (lambda x, f: f(x))( + paddle._C_ops.dropout( + softmax_3, None, full_2, True, "upscale_in_train", 0, False + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None), + ) + del softmax_3 + + # pd_op.matmul: (-1x4x400x256xf32) <- (-1x4x400x400xf32, -1x4x400x256xf32) + matmul_28 = paddle._C_ops.matmul(dropout_24, transpose_15, False, False) + del dropout_24, transpose_15 + + # pd_op.transpose: (-1x400x4x256xf32) <- (-1x4x400x256xf32) + transpose_16 = paddle._C_ops.transpose(matmul_28, [0, 2, 1, 3]) + del matmul_28 + + # pd_op.shape64: (4xi64) <- (-1x400x4x256xf32) + shape64_6 = paddle._C_ops.shape64(transpose_16) + + # pd_op.slice: (xi64) <- (4xi64, 1xi64, 1xi64) + slice_30 = paddle._C_ops.slice( + shape64_6, [0], full_int_array_2, full_int_array_3, [1], [0] + ) + del shape64_6 + + # pd_op.reshape: (-1x400x1024xf32) <- (-1x400x4x256xf32, 3xi64) + reshape_19 = paddle._C_ops.reshape(transpose_16, full_int_array_11) + del full_int_array_11, transpose_16 + + # pd_op.matmul: (-1x400x1024xf32) <- (-1x400x1024xf32, 1024x1024xf32) + matmul_29 = paddle._C_ops.matmul(reshape_19, parameter_389, False, False) + del parameter_389, reshape_19 + + # pd_op.add: (-1x400x1024xf32) <- (-1x400x1024xf32, 1024xf32) + add_71 = paddle._C_ops.add(matmul_29, parameter_388) + del matmul_29, parameter_388 + + # pd_op.dropout: (-1x400x1024xf32, -1x400x1024xui8) <- (-1x400x1024xf32, None, 1xf32) + dropout_26, dropout_27 = (lambda x, f: f(x))( + paddle._C_ops.dropout( + add_71, None, full_2, True, "upscale_in_train", 0, False + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None), + ) + del add_71 + + # pd_op.add: (-1x400x1024xf32) <- (-1x400x1024xf32, -1x400x1024xf32) + add_72 = paddle._C_ops.add(layer_norm_15, dropout_26) + del dropout_26, layer_norm_15 + + # pd_op.layer_norm: (-1x400x1024xf32, -1x400xf32, -1x400xf32) <- (-1x400x1024xf32, 1024xf32, 1024xf32) + layer_norm_18, layer_norm_19, layer_norm_20 = (lambda x, f: f(x))( + paddle._C_ops.layer_norm( + add_72, parameter_387, parameter_386, float("1e-05"), 2 + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None, None), + ) + del add_72, parameter_386, parameter_387 + + # pd_op.matmul: (-1x400x2048xf32) <- (-1x400x1024xf32, 1024x2048xf32) + matmul_30 = paddle._C_ops.matmul(layer_norm_18, parameter_385, False, False) + del parameter_385 + + # pd_op.add: (-1x400x2048xf32) <- (-1x400x2048xf32, 2048xf32) + add_73 = paddle._C_ops.add(matmul_30, parameter_384) + del matmul_30, parameter_384 + + # pd_op.gelu: (-1x400x2048xf32) <- (-1x400x2048xf32) + gelu_3 = paddle._C_ops.gelu(add_73, False) + del add_73 + + # pd_op.dropout: (-1x400x2048xf32, -1x400x2048xui8) <- (-1x400x2048xf32, None, 1xf32) + dropout_28, dropout_29 = (lambda x, f: f(x))( + paddle._C_ops.dropout( + gelu_3, None, full_2, True, "upscale_in_train", 0, False + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None), + ) + del gelu_3 + + # pd_op.matmul: (-1x400x1024xf32) <- (-1x400x2048xf32, 2048x1024xf32) + matmul_31 = paddle._C_ops.matmul(dropout_28, parameter_383, False, False) + del dropout_28, parameter_383 + + # pd_op.add: (-1x400x1024xf32) <- (-1x400x1024xf32, 1024xf32) + add_74 = paddle._C_ops.add(matmul_31, parameter_382) + del matmul_31, parameter_382 + + # pd_op.dropout: (-1x400x1024xf32, -1x400x1024xui8) <- (-1x400x1024xf32, None, 1xf32) + dropout_30, dropout_31 = (lambda x, f: f(x))( + paddle._C_ops.dropout( + add_74, None, full_2, True, "upscale_in_train", 0, False + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None), + ) + del add_74, full_2 + + # pd_op.add: (-1x400x1024xf32) <- (-1x400x1024xf32, -1x400x1024xf32) + add_75 = paddle._C_ops.add(layer_norm_18, dropout_30) + del dropout_30, layer_norm_18 + + # pd_op.layer_norm: (-1x400x1024xf32, -1x400xf32, -1x400xf32) <- (-1x400x1024xf32, 1024xf32, 1024xf32) + layer_norm_21, layer_norm_22, layer_norm_23 = (lambda x, f: f(x))( + paddle._C_ops.layer_norm( + add_75, parameter_381, parameter_380, float("1e-05"), 2 + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None, None), + ) + del add_75, parameter_380, parameter_381 + + # pd_op.transpose: (-1x1024x400xf32) <- (-1x400x1024xf32) + transpose_17 = paddle._C_ops.transpose(layer_norm_21, [0, 2, 1]) + del layer_norm_21 + + # pd_op.full: (xi64) <- () + full_3 = paddle._C_ops.full( + [], float("1024"), paddle.int64, paddle.core.CPUPlace() + ) + + # builtin.combine: ([xi64, xi64, xi64, xi64]) <- (xi64, xi64, xi64, xi64) + combine_4 = [slice_0, full_3, slice_1, slice_2] + del full_3, slice_0, slice_1, slice_2 + + # pd_op.stack: (4xi64) <- ([xi64, xi64, xi64, xi64]) + stack_0 = paddle._C_ops.stack(combine_4, 0) + del combine_4 + + # pd_op.reshape: (-1x1024x-1x-1xf32) <- (-1x1024x400xf32, 4xi64) + reshape_20 = paddle._C_ops.reshape(transpose_17, stack_0) + del stack_0, transpose_17 + + # pd_op.conv2d: (-1x384x-1x-1xf32) <- (-1x1024x-1x-1xf32, 384x1024x1x1xf32) + conv2d_77 = paddle._C_ops.conv2d( + reshape_20, parameter_379, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_379 + + # pd_op.batch_norm_: (-1x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (-1x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__438, + batch_norm__439, + batch_norm__440, + batch_norm__441, + batch_norm__442, + batch_norm__443, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_77, + parameter_378, + parameter_377, + parameter_376, + parameter_375, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_77, parameter_375, parameter_376, parameter_377, parameter_378 + + # pd_op.swish: (-1x384x-1x-1xf32) <- (-1x384x-1x-1xf32) + swish_55 = paddle._C_ops.swish(batch_norm__438) + del batch_norm__438 + + # pd_op.conv2d: (-1x384x-1x-1xf32) <- (-1x1024x-1x-1xf32, 384x1024x1x1xf32) + conv2d_78 = paddle._C_ops.conv2d( + reshape_20, parameter_374, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_374, reshape_20 + + # pd_op.batch_norm_: (-1x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (-1x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__444, + batch_norm__445, + batch_norm__446, + batch_norm__447, + batch_norm__448, + batch_norm__449, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_78, + parameter_373, + parameter_372, + parameter_371, + parameter_370, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_78, parameter_370, parameter_371, parameter_372, parameter_373 + + # pd_op.swish: (-1x384x-1x-1xf32) <- (-1x384x-1x-1xf32) + swish_56 = paddle._C_ops.swish(batch_norm__444) + del batch_norm__444 + + # pd_op.conv2d: (-1x384x-1x-1xf32) <- (-1x384x-1x-1xf32, 384x384x3x3xf32) + conv2d_79 = paddle._C_ops.conv2d( + swish_56, parameter_369, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_369, swish_56 + + # pd_op.batch_norm_: (-1x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (-1x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__450, + batch_norm__451, + batch_norm__452, + batch_norm__453, + batch_norm__454, + batch_norm__455, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_79, + parameter_368, + parameter_367, + parameter_366, + parameter_365, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_79, parameter_365, parameter_366, parameter_367, parameter_368 + + # pd_op.swish: (-1x384x-1x-1xf32) <- (-1x384x-1x-1xf32) + swish_57 = paddle._C_ops.swish(batch_norm__450) + del batch_norm__450 + + # pd_op.conv2d: (-1x384x-1x-1xf32) <- (-1x384x-1x-1xf32, 384x384x3x3xf32) + conv2d_80 = paddle._C_ops.conv2d( + swish_57, parameter_364, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_364 + + # pd_op.batch_norm_: (-1x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (-1x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__456, + batch_norm__457, + batch_norm__458, + batch_norm__459, + batch_norm__460, + batch_norm__461, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_80, + parameter_363, + parameter_362, + parameter_361, + parameter_360, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_80, parameter_360, parameter_361, parameter_362, parameter_363 + + # pd_op.conv2d: (-1x384x-1x-1xf32) <- (-1x384x-1x-1xf32, 384x384x1x1xf32) + conv2d_81 = paddle._C_ops.conv2d( + swish_57, parameter_359, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_359, swish_57 + + # pd_op.batch_norm_: (-1x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (-1x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__462, + batch_norm__463, + batch_norm__464, + batch_norm__465, + batch_norm__466, + batch_norm__467, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_81, + parameter_358, + parameter_357, + parameter_356, + parameter_355, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_81, parameter_355, parameter_356, parameter_357, parameter_358 + + # pd_op.add: (-1x384x-1x-1xf32) <- (-1x384x-1x-1xf32, -1x384x-1x-1xf32) + add_76 = paddle._C_ops.add(batch_norm__456, batch_norm__462) + del batch_norm__456, batch_norm__462 + + # pd_op.swish: (-1x384x-1x-1xf32) <- (-1x384x-1x-1xf32) + swish_58 = paddle._C_ops.swish(add_76) + del add_76 + + # pd_op.conv2d: (-1x384x-1x-1xf32) <- (-1x384x-1x-1xf32, 384x384x3x3xf32) + conv2d_82 = paddle._C_ops.conv2d( + swish_58, parameter_354, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_354, swish_58 + + # pd_op.batch_norm_: (-1x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (-1x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__468, + batch_norm__469, + batch_norm__470, + batch_norm__471, + batch_norm__472, + batch_norm__473, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_82, + parameter_353, + parameter_352, + parameter_351, + parameter_350, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_82, parameter_350, parameter_351, parameter_352, parameter_353 + + # pd_op.swish: (-1x384x-1x-1xf32) <- (-1x384x-1x-1xf32) + swish_59 = paddle._C_ops.swish(batch_norm__468) + del batch_norm__468 + + # pd_op.conv2d: (-1x384x-1x-1xf32) <- (-1x384x-1x-1xf32, 384x384x3x3xf32) + conv2d_83 = paddle._C_ops.conv2d( + swish_59, parameter_349, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_349 + + # pd_op.batch_norm_: (-1x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (-1x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__474, + batch_norm__475, + batch_norm__476, + batch_norm__477, + batch_norm__478, + batch_norm__479, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_83, + parameter_348, + parameter_347, + parameter_346, + parameter_345, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_83, parameter_345, parameter_346, parameter_347, parameter_348 + + # pd_op.conv2d: (-1x384x-1x-1xf32) <- (-1x384x-1x-1xf32, 384x384x1x1xf32) + conv2d_84 = paddle._C_ops.conv2d( + swish_59, parameter_344, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_344, swish_59 + + # pd_op.batch_norm_: (-1x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (-1x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__480, + batch_norm__481, + batch_norm__482, + batch_norm__483, + batch_norm__484, + batch_norm__485, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_84, + parameter_343, + parameter_342, + parameter_341, + parameter_340, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_84, parameter_340, parameter_341, parameter_342, parameter_343 + + # pd_op.add: (-1x384x-1x-1xf32) <- (-1x384x-1x-1xf32, -1x384x-1x-1xf32) + add_77 = paddle._C_ops.add(batch_norm__474, batch_norm__480) + del batch_norm__474, batch_norm__480 + + # pd_op.swish: (-1x384x-1x-1xf32) <- (-1x384x-1x-1xf32) + swish_60 = paddle._C_ops.swish(add_77) + del add_77 + + # pd_op.full_int_array: (2xi64) <- () + full_int_array_12 = [5, 5] + + # pd_op.pool2d: (-1x384x-1x-1xf32) <- (-1x384x-1x-1xf32, 2xi64) + pool2d_0 = paddle._C_ops.pool2d( + swish_60, + full_int_array_12, + [1, 1], + [2, 2], + False, + True, + "NCHW", + "max", + False, + False, + "EXPLICIT", + ) + del full_int_array_12 + + # pd_op.full_int_array: (2xi64) <- () + full_int_array_13 = [9, 9] + + # pd_op.pool2d: (-1x384x-1x-1xf32) <- (-1x384x-1x-1xf32, 2xi64) + pool2d_1 = paddle._C_ops.pool2d( + swish_60, + full_int_array_13, + [1, 1], + [4, 4], + False, + True, + "NCHW", + "max", + False, + False, + "EXPLICIT", + ) + del full_int_array_13 + + # pd_op.full_int_array: (2xi64) <- () + full_int_array_14 = [13, 13] + + # pd_op.pool2d: (-1x384x-1x-1xf32) <- (-1x384x-1x-1xf32, 2xi64) + pool2d_2 = paddle._C_ops.pool2d( + swish_60, + full_int_array_14, + [1, 1], + [6, 6], + False, + True, + "NCHW", + "max", + False, + False, + "EXPLICIT", + ) + del full_int_array_14 + + # builtin.combine: ([-1x384x-1x-1xf32, -1x384x-1x-1xf32, -1x384x-1x-1xf32, -1x384x-1x-1xf32]) <- (-1x384x-1x-1xf32, -1x384x-1x-1xf32, -1x384x-1x-1xf32, -1x384x-1x-1xf32) + combine_5 = [swish_60, pool2d_0, pool2d_1, pool2d_2] + del pool2d_0, pool2d_1, pool2d_2, swish_60 + + # pd_op.concat: (-1x1536x-1x-1xf32) <- ([-1x384x-1x-1xf32, -1x384x-1x-1xf32, -1x384x-1x-1xf32, -1x384x-1x-1xf32], 1xi32) + concat_6 = paddle._C_ops.concat(combine_5, full_0) + del combine_5 + + # pd_op.conv2d: (-1x384x-1x-1xf32) <- (-1x1536x-1x-1xf32, 384x1536x1x1xf32) + conv2d_85 = paddle._C_ops.conv2d( + concat_6, parameter_339, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del concat_6, parameter_339 + + # pd_op.batch_norm_: (-1x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (-1x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__486, + batch_norm__487, + batch_norm__488, + batch_norm__489, + batch_norm__490, + batch_norm__491, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_85, + parameter_338, + parameter_337, + parameter_336, + parameter_335, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_85, parameter_335, parameter_336, parameter_337, parameter_338 + + # pd_op.swish: (-1x384x-1x-1xf32) <- (-1x384x-1x-1xf32) + swish_61 = paddle._C_ops.swish(batch_norm__486) + del batch_norm__486 + + # pd_op.conv2d: (-1x384x-1x-1xf32) <- (-1x384x-1x-1xf32, 384x384x3x3xf32) + conv2d_86 = paddle._C_ops.conv2d( + swish_61, parameter_334, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_334, swish_61 + + # pd_op.batch_norm_: (-1x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (-1x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__492, + batch_norm__493, + batch_norm__494, + batch_norm__495, + batch_norm__496, + batch_norm__497, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_86, + parameter_333, + parameter_332, + parameter_331, + parameter_330, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_86, parameter_330, parameter_331, parameter_332, parameter_333 + + # pd_op.swish: (-1x384x-1x-1xf32) <- (-1x384x-1x-1xf32) + swish_62 = paddle._C_ops.swish(batch_norm__492) + del batch_norm__492 + + # pd_op.conv2d: (-1x384x-1x-1xf32) <- (-1x384x-1x-1xf32, 384x384x3x3xf32) + conv2d_87 = paddle._C_ops.conv2d( + swish_62, parameter_329, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_329 + + # pd_op.batch_norm_: (-1x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (-1x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__498, + batch_norm__499, + batch_norm__500, + batch_norm__501, + batch_norm__502, + batch_norm__503, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_87, + parameter_328, + parameter_327, + parameter_326, + parameter_325, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_87, parameter_325, parameter_326, parameter_327, parameter_328 + + # pd_op.conv2d: (-1x384x-1x-1xf32) <- (-1x384x-1x-1xf32, 384x384x1x1xf32) + conv2d_88 = paddle._C_ops.conv2d( + swish_62, parameter_324, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_324, swish_62 + + # pd_op.batch_norm_: (-1x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (-1x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__504, + batch_norm__505, + batch_norm__506, + batch_norm__507, + batch_norm__508, + batch_norm__509, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_88, + parameter_323, + parameter_322, + parameter_321, + parameter_320, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_88, parameter_320, parameter_321, parameter_322, parameter_323 + + # pd_op.add: (-1x384x-1x-1xf32) <- (-1x384x-1x-1xf32, -1x384x-1x-1xf32) + add_78 = paddle._C_ops.add(batch_norm__498, batch_norm__504) + del batch_norm__498, batch_norm__504 + + # pd_op.swish: (-1x384x-1x-1xf32) <- (-1x384x-1x-1xf32) + swish_63 = paddle._C_ops.swish(add_78) + del add_78 + + # builtin.combine: ([-1x384x-1x-1xf32, -1x384x-1x-1xf32]) <- (-1x384x-1x-1xf32, -1x384x-1x-1xf32) + combine_6 = [swish_55, swish_63] + del swish_55, swish_63 + + # pd_op.concat: (-1x768x-1x-1xf32) <- ([-1x384x-1x-1xf32, -1x384x-1x-1xf32], 1xi32) + concat_7 = paddle._C_ops.concat(combine_6, full_0) + del combine_6 + + # pd_op.conv2d: (-1x768x-1x-1xf32) <- (-1x768x-1x-1xf32, 768x768x1x1xf32) + conv2d_89 = paddle._C_ops.conv2d( + concat_7, parameter_319, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del concat_7, parameter_319 + + # pd_op.batch_norm_: (-1x768x-1x-1xf32, 768xf32, 768xf32, 768xf32, 768xf32, -1xui8) <- (-1x768x-1x-1xf32, 768xf32, 768xf32, 768xf32, 768xf32) + ( + batch_norm__510, + batch_norm__511, + batch_norm__512, + batch_norm__513, + batch_norm__514, + batch_norm__515, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_89, + parameter_318, + parameter_317, + parameter_316, + parameter_315, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_89, parameter_315, parameter_316, parameter_317, parameter_318 + + # pd_op.swish: (-1x768x-1x-1xf32) <- (-1x768x-1x-1xf32) + swish_64 = paddle._C_ops.swish(batch_norm__510) + del batch_norm__510 + + # pd_op.conv2d: (-1x384x-1x-1xf32) <- (-1x768x-1x-1xf32, 384x768x1x1xf32) + conv2d_90 = paddle._C_ops.conv2d( + swish_64, parameter_314, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_314 + + # pd_op.batch_norm_: (-1x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (-1x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__516, + batch_norm__517, + batch_norm__518, + batch_norm__519, + batch_norm__520, + batch_norm__521, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_90, + parameter_313, + parameter_312, + parameter_311, + parameter_310, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_90, parameter_310, parameter_311, parameter_312, parameter_313 + + # pd_op.swish: (-1x384x-1x-1xf32) <- (-1x384x-1x-1xf32) + swish_65 = paddle._C_ops.swish(batch_norm__516) + del batch_norm__516 + + # pd_op.nearest_interp: (-1x384x-1x-1xf32) <- (-1x384x-1x-1xf32, None, None, None) + nearest_interp_0 = paddle._C_ops.nearest_interp( + swish_65, + None, + None, + None, + "NCHW", + -1, + -1, + -1, + [float("2"), float("2")], + "nearest", + False, + 0, + ) + del swish_65 + + # builtin.combine: ([-1x384x-1x-1xf32, -1x512x-1x-1xf32]) <- (-1x384x-1x-1xf32, -1x512x-1x-1xf32) + combine_7 = [nearest_interp_0, swish_44] + del nearest_interp_0, swish_44 + + # pd_op.concat: (-1x896x-1x-1xf32) <- ([-1x384x-1x-1xf32, -1x512x-1x-1xf32], 1xi32) + concat_8 = paddle._C_ops.concat(combine_7, full_0) + del combine_7 + + # pd_op.conv2d: (-1x192x-1x-1xf32) <- (-1x896x-1x-1xf32, 192x896x1x1xf32) + conv2d_91 = paddle._C_ops.conv2d( + concat_8, parameter_309, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_309 + + # pd_op.batch_norm_: (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__522, + batch_norm__523, + batch_norm__524, + batch_norm__525, + batch_norm__526, + batch_norm__527, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_91, + parameter_308, + parameter_307, + parameter_306, + parameter_305, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_91, parameter_305, parameter_306, parameter_307, parameter_308 + + # pd_op.swish: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32) + swish_66 = paddle._C_ops.swish(batch_norm__522) + del batch_norm__522 + + # pd_op.conv2d: (-1x192x-1x-1xf32) <- (-1x896x-1x-1xf32, 192x896x1x1xf32) + conv2d_92 = paddle._C_ops.conv2d( + concat_8, parameter_304, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del concat_8, parameter_304 + + # pd_op.batch_norm_: (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__528, + batch_norm__529, + batch_norm__530, + batch_norm__531, + batch_norm__532, + batch_norm__533, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_92, + parameter_303, + parameter_302, + parameter_301, + parameter_300, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_92, parameter_300, parameter_301, parameter_302, parameter_303 + + # pd_op.swish: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32) + swish_67 = paddle._C_ops.swish(batch_norm__528) + del batch_norm__528 + + # pd_op.conv2d: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32, 192x192x3x3xf32) + conv2d_93 = paddle._C_ops.conv2d( + swish_67, parameter_299, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_299, swish_67 + + # pd_op.batch_norm_: (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__534, + batch_norm__535, + batch_norm__536, + batch_norm__537, + batch_norm__538, + batch_norm__539, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_93, + parameter_298, + parameter_297, + parameter_296, + parameter_295, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_93, parameter_295, parameter_296, parameter_297, parameter_298 + + # pd_op.swish: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32) + swish_68 = paddle._C_ops.swish(batch_norm__534) + del batch_norm__534 + + # pd_op.conv2d: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32, 192x192x3x3xf32) + conv2d_94 = paddle._C_ops.conv2d( + swish_68, parameter_294, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_294 + + # pd_op.batch_norm_: (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__540, + batch_norm__541, + batch_norm__542, + batch_norm__543, + batch_norm__544, + batch_norm__545, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_94, + parameter_293, + parameter_292, + parameter_291, + parameter_290, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_94, parameter_290, parameter_291, parameter_292, parameter_293 + + # pd_op.conv2d: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32, 192x192x1x1xf32) + conv2d_95 = paddle._C_ops.conv2d( + swish_68, parameter_289, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_289, swish_68 + + # pd_op.batch_norm_: (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__546, + batch_norm__547, + batch_norm__548, + batch_norm__549, + batch_norm__550, + batch_norm__551, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_95, + parameter_288, + parameter_287, + parameter_286, + parameter_285, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_95, parameter_285, parameter_286, parameter_287, parameter_288 + + # pd_op.add: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32, -1x192x-1x-1xf32) + add_79 = paddle._C_ops.add(batch_norm__540, batch_norm__546) + del batch_norm__540, batch_norm__546 + + # pd_op.swish: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32) + swish_69 = paddle._C_ops.swish(add_79) + del add_79 + + # pd_op.conv2d: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32, 192x192x3x3xf32) + conv2d_96 = paddle._C_ops.conv2d( + swish_69, parameter_284, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_284, swish_69 + + # pd_op.batch_norm_: (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__552, + batch_norm__553, + batch_norm__554, + batch_norm__555, + batch_norm__556, + batch_norm__557, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_96, + parameter_283, + parameter_282, + parameter_281, + parameter_280, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_96, parameter_280, parameter_281, parameter_282, parameter_283 + + # pd_op.swish: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32) + swish_70 = paddle._C_ops.swish(batch_norm__552) + del batch_norm__552 + + # pd_op.conv2d: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32, 192x192x3x3xf32) + conv2d_97 = paddle._C_ops.conv2d( + swish_70, parameter_279, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_279 + + # pd_op.batch_norm_: (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__558, + batch_norm__559, + batch_norm__560, + batch_norm__561, + batch_norm__562, + batch_norm__563, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_97, + parameter_278, + parameter_277, + parameter_276, + parameter_275, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_97, parameter_275, parameter_276, parameter_277, parameter_278 + + # pd_op.conv2d: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32, 192x192x1x1xf32) + conv2d_98 = paddle._C_ops.conv2d( + swish_70, parameter_274, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_274, swish_70 + + # pd_op.batch_norm_: (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__564, + batch_norm__565, + batch_norm__566, + batch_norm__567, + batch_norm__568, + batch_norm__569, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_98, + parameter_273, + parameter_272, + parameter_271, + parameter_270, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_98, parameter_270, parameter_271, parameter_272, parameter_273 + + # pd_op.add: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32, -1x192x-1x-1xf32) + add_80 = paddle._C_ops.add(batch_norm__558, batch_norm__564) + del batch_norm__558, batch_norm__564 + + # pd_op.swish: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32) + swish_71 = paddle._C_ops.swish(add_80) + del add_80 + + # pd_op.conv2d: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32, 192x192x3x3xf32) + conv2d_99 = paddle._C_ops.conv2d( + swish_71, parameter_269, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_269, swish_71 + + # pd_op.batch_norm_: (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__570, + batch_norm__571, + batch_norm__572, + batch_norm__573, + batch_norm__574, + batch_norm__575, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_99, + parameter_268, + parameter_267, + parameter_266, + parameter_265, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_99, parameter_265, parameter_266, parameter_267, parameter_268 + + # pd_op.swish: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32) + swish_72 = paddle._C_ops.swish(batch_norm__570) + del batch_norm__570 + + # pd_op.conv2d: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32, 192x192x3x3xf32) + conv2d_100 = paddle._C_ops.conv2d( + swish_72, parameter_264, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_264 + + # pd_op.batch_norm_: (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__576, + batch_norm__577, + batch_norm__578, + batch_norm__579, + batch_norm__580, + batch_norm__581, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_100, + parameter_263, + parameter_262, + parameter_261, + parameter_260, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_100, parameter_260, parameter_261, parameter_262, parameter_263 + + # pd_op.conv2d: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32, 192x192x1x1xf32) + conv2d_101 = paddle._C_ops.conv2d( + swish_72, parameter_259, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_259, swish_72 + + # pd_op.batch_norm_: (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__582, + batch_norm__583, + batch_norm__584, + batch_norm__585, + batch_norm__586, + batch_norm__587, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_101, + parameter_258, + parameter_257, + parameter_256, + parameter_255, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_101, parameter_255, parameter_256, parameter_257, parameter_258 + + # pd_op.add: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32, -1x192x-1x-1xf32) + add_81 = paddle._C_ops.add(batch_norm__576, batch_norm__582) + del batch_norm__576, batch_norm__582 + + # pd_op.swish: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32) + swish_73 = paddle._C_ops.swish(add_81) + del add_81 + + # builtin.combine: ([-1x192x-1x-1xf32, -1x192x-1x-1xf32]) <- (-1x192x-1x-1xf32, -1x192x-1x-1xf32) + combine_8 = [swish_66, swish_73] + del swish_66, swish_73 + + # pd_op.concat: (-1x384x-1x-1xf32) <- ([-1x192x-1x-1xf32, -1x192x-1x-1xf32], 1xi32) + concat_9 = paddle._C_ops.concat(combine_8, full_0) + del combine_8 + + # pd_op.conv2d: (-1x384x-1x-1xf32) <- (-1x384x-1x-1xf32, 384x384x1x1xf32) + conv2d_102 = paddle._C_ops.conv2d( + concat_9, parameter_254, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del concat_9, parameter_254 + + # pd_op.batch_norm_: (-1x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (-1x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__588, + batch_norm__589, + batch_norm__590, + batch_norm__591, + batch_norm__592, + batch_norm__593, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_102, + parameter_253, + parameter_252, + parameter_251, + parameter_250, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_102, parameter_250, parameter_251, parameter_252, parameter_253 + + # pd_op.swish: (-1x384x-1x-1xf32) <- (-1x384x-1x-1xf32) + swish_74 = paddle._C_ops.swish(batch_norm__588) + del batch_norm__588 + + # pd_op.conv2d: (-1x192x-1x-1xf32) <- (-1x384x-1x-1xf32, 192x384x1x1xf32) + conv2d_103 = paddle._C_ops.conv2d( + swish_74, parameter_249, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_249 + + # pd_op.batch_norm_: (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__594, + batch_norm__595, + batch_norm__596, + batch_norm__597, + batch_norm__598, + batch_norm__599, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_103, + parameter_248, + parameter_247, + parameter_246, + parameter_245, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_103, parameter_245, parameter_246, parameter_247, parameter_248 + + # pd_op.swish: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32) + swish_75 = paddle._C_ops.swish(batch_norm__594) + del batch_norm__594 + + # pd_op.nearest_interp: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32, None, None, None) + nearest_interp_1 = paddle._C_ops.nearest_interp( + swish_75, + None, + None, + None, + "NCHW", + -1, + -1, + -1, + [float("2"), float("2")], + "nearest", + False, + 0, + ) + del swish_75 + + # builtin.combine: ([-1x192x-1x-1xf32, -1x256x-1x-1xf32]) <- (-1x192x-1x-1xf32, -1x256x-1x-1xf32) + combine_9 = [nearest_interp_1, swish_28] + del nearest_interp_1, swish_28 + + # pd_op.concat: (-1x448x-1x-1xf32) <- ([-1x192x-1x-1xf32, -1x256x-1x-1xf32], 1xi32) + concat_10 = paddle._C_ops.concat(combine_9, full_0) + del combine_9 + + # pd_op.conv2d: (-1x96x-1x-1xf32) <- (-1x448x-1x-1xf32, 96x448x1x1xf32) + conv2d_104 = paddle._C_ops.conv2d( + concat_10, parameter_244, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_244 + + # pd_op.batch_norm_: (-1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (-1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__600, + batch_norm__601, + batch_norm__602, + batch_norm__603, + batch_norm__604, + batch_norm__605, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_104, + parameter_243, + parameter_242, + parameter_241, + parameter_240, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_104, parameter_240, parameter_241, parameter_242, parameter_243 + + # pd_op.swish: (-1x96x-1x-1xf32) <- (-1x96x-1x-1xf32) + swish_76 = paddle._C_ops.swish(batch_norm__600) + del batch_norm__600 + + # pd_op.conv2d: (-1x96x-1x-1xf32) <- (-1x448x-1x-1xf32, 96x448x1x1xf32) + conv2d_105 = paddle._C_ops.conv2d( + concat_10, parameter_239, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del concat_10, parameter_239 + + # pd_op.batch_norm_: (-1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (-1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__606, + batch_norm__607, + batch_norm__608, + batch_norm__609, + batch_norm__610, + batch_norm__611, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_105, + parameter_238, + parameter_237, + parameter_236, + parameter_235, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_105, parameter_235, parameter_236, parameter_237, parameter_238 + + # pd_op.swish: (-1x96x-1x-1xf32) <- (-1x96x-1x-1xf32) + swish_77 = paddle._C_ops.swish(batch_norm__606) + del batch_norm__606 + + # pd_op.conv2d: (-1x96x-1x-1xf32) <- (-1x96x-1x-1xf32, 96x96x3x3xf32) + conv2d_106 = paddle._C_ops.conv2d( + swish_77, parameter_234, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_234, swish_77 + + # pd_op.batch_norm_: (-1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (-1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__612, + batch_norm__613, + batch_norm__614, + batch_norm__615, + batch_norm__616, + batch_norm__617, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_106, + parameter_233, + parameter_232, + parameter_231, + parameter_230, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_106, parameter_230, parameter_231, parameter_232, parameter_233 + + # pd_op.swish: (-1x96x-1x-1xf32) <- (-1x96x-1x-1xf32) + swish_78 = paddle._C_ops.swish(batch_norm__612) + del batch_norm__612 + + # pd_op.conv2d: (-1x96x-1x-1xf32) <- (-1x96x-1x-1xf32, 96x96x3x3xf32) + conv2d_107 = paddle._C_ops.conv2d( + swish_78, parameter_229, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_229 + + # pd_op.batch_norm_: (-1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (-1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__618, + batch_norm__619, + batch_norm__620, + batch_norm__621, + batch_norm__622, + batch_norm__623, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_107, + parameter_228, + parameter_227, + parameter_226, + parameter_225, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_107, parameter_225, parameter_226, parameter_227, parameter_228 + + # pd_op.conv2d: (-1x96x-1x-1xf32) <- (-1x96x-1x-1xf32, 96x96x1x1xf32) + conv2d_108 = paddle._C_ops.conv2d( + swish_78, parameter_224, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_224, swish_78 + + # pd_op.batch_norm_: (-1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (-1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__624, + batch_norm__625, + batch_norm__626, + batch_norm__627, + batch_norm__628, + batch_norm__629, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_108, + parameter_223, + parameter_222, + parameter_221, + parameter_220, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_108, parameter_220, parameter_221, parameter_222, parameter_223 + + # pd_op.add: (-1x96x-1x-1xf32) <- (-1x96x-1x-1xf32, -1x96x-1x-1xf32) + add_82 = paddle._C_ops.add(batch_norm__618, batch_norm__624) + del batch_norm__618, batch_norm__624 + + # pd_op.swish: (-1x96x-1x-1xf32) <- (-1x96x-1x-1xf32) + swish_79 = paddle._C_ops.swish(add_82) + del add_82 + + # pd_op.conv2d: (-1x96x-1x-1xf32) <- (-1x96x-1x-1xf32, 96x96x3x3xf32) + conv2d_109 = paddle._C_ops.conv2d( + swish_79, parameter_219, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_219, swish_79 + + # pd_op.batch_norm_: (-1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (-1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__630, + batch_norm__631, + batch_norm__632, + batch_norm__633, + batch_norm__634, + batch_norm__635, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_109, + parameter_218, + parameter_217, + parameter_216, + parameter_215, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_109, parameter_215, parameter_216, parameter_217, parameter_218 + + # pd_op.swish: (-1x96x-1x-1xf32) <- (-1x96x-1x-1xf32) + swish_80 = paddle._C_ops.swish(batch_norm__630) + del batch_norm__630 + + # pd_op.conv2d: (-1x96x-1x-1xf32) <- (-1x96x-1x-1xf32, 96x96x3x3xf32) + conv2d_110 = paddle._C_ops.conv2d( + swish_80, parameter_214, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_214 + + # pd_op.batch_norm_: (-1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (-1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__636, + batch_norm__637, + batch_norm__638, + batch_norm__639, + batch_norm__640, + batch_norm__641, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_110, + parameter_213, + parameter_212, + parameter_211, + parameter_210, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_110, parameter_210, parameter_211, parameter_212, parameter_213 + + # pd_op.conv2d: (-1x96x-1x-1xf32) <- (-1x96x-1x-1xf32, 96x96x1x1xf32) + conv2d_111 = paddle._C_ops.conv2d( + swish_80, parameter_209, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_209, swish_80 + + # pd_op.batch_norm_: (-1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (-1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__642, + batch_norm__643, + batch_norm__644, + batch_norm__645, + batch_norm__646, + batch_norm__647, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_111, + parameter_208, + parameter_207, + parameter_206, + parameter_205, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_111, parameter_205, parameter_206, parameter_207, parameter_208 + + # pd_op.add: (-1x96x-1x-1xf32) <- (-1x96x-1x-1xf32, -1x96x-1x-1xf32) + add_83 = paddle._C_ops.add(batch_norm__636, batch_norm__642) + del batch_norm__636, batch_norm__642 + + # pd_op.swish: (-1x96x-1x-1xf32) <- (-1x96x-1x-1xf32) + swish_81 = paddle._C_ops.swish(add_83) + del add_83 + + # pd_op.conv2d: (-1x96x-1x-1xf32) <- (-1x96x-1x-1xf32, 96x96x3x3xf32) + conv2d_112 = paddle._C_ops.conv2d( + swish_81, parameter_204, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_204, swish_81 + + # pd_op.batch_norm_: (-1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (-1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__648, + batch_norm__649, + batch_norm__650, + batch_norm__651, + batch_norm__652, + batch_norm__653, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_112, + parameter_203, + parameter_202, + parameter_201, + parameter_200, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_112, parameter_200, parameter_201, parameter_202, parameter_203 + + # pd_op.swish: (-1x96x-1x-1xf32) <- (-1x96x-1x-1xf32) + swish_82 = paddle._C_ops.swish(batch_norm__648) + del batch_norm__648 + + # pd_op.conv2d: (-1x96x-1x-1xf32) <- (-1x96x-1x-1xf32, 96x96x3x3xf32) + conv2d_113 = paddle._C_ops.conv2d( + swish_82, parameter_199, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_199 + + # pd_op.batch_norm_: (-1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (-1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__654, + batch_norm__655, + batch_norm__656, + batch_norm__657, + batch_norm__658, + batch_norm__659, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_113, + parameter_198, + parameter_197, + parameter_196, + parameter_195, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_113, parameter_195, parameter_196, parameter_197, parameter_198 + + # pd_op.conv2d: (-1x96x-1x-1xf32) <- (-1x96x-1x-1xf32, 96x96x1x1xf32) + conv2d_114 = paddle._C_ops.conv2d( + swish_82, parameter_194, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_194, swish_82 + + # pd_op.batch_norm_: (-1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (-1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__660, + batch_norm__661, + batch_norm__662, + batch_norm__663, + batch_norm__664, + batch_norm__665, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_114, + parameter_193, + parameter_192, + parameter_191, + parameter_190, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_114, parameter_190, parameter_191, parameter_192, parameter_193 + + # pd_op.add: (-1x96x-1x-1xf32) <- (-1x96x-1x-1xf32, -1x96x-1x-1xf32) + add_84 = paddle._C_ops.add(batch_norm__654, batch_norm__660) + del batch_norm__654, batch_norm__660 + + # pd_op.swish: (-1x96x-1x-1xf32) <- (-1x96x-1x-1xf32) + swish_83 = paddle._C_ops.swish(add_84) + del add_84 + + # builtin.combine: ([-1x96x-1x-1xf32, -1x96x-1x-1xf32]) <- (-1x96x-1x-1xf32, -1x96x-1x-1xf32) + combine_10 = [swish_76, swish_83] + del swish_76, swish_83 + + # pd_op.concat: (-1x192x-1x-1xf32) <- ([-1x96x-1x-1xf32, -1x96x-1x-1xf32], 1xi32) + concat_11 = paddle._C_ops.concat(combine_10, full_0) + del combine_10 + + # pd_op.conv2d: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32, 192x192x1x1xf32) + conv2d_115 = paddle._C_ops.conv2d( + concat_11, parameter_189, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del concat_11, parameter_189 + + # pd_op.batch_norm_: (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__666, + batch_norm__667, + batch_norm__668, + batch_norm__669, + batch_norm__670, + batch_norm__671, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_115, + parameter_188, + parameter_187, + parameter_186, + parameter_185, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_115, parameter_185, parameter_186, parameter_187, parameter_188 + + # pd_op.swish: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32) + swish_84 = paddle._C_ops.swish(batch_norm__666) + del batch_norm__666 + + # pd_op.conv2d: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32, 192x192x3x3xf32) + conv2d_116 = paddle._C_ops.conv2d( + swish_84, parameter_184, [2, 2], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_184 + + # pd_op.batch_norm_: (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__672, + batch_norm__673, + batch_norm__674, + batch_norm__675, + batch_norm__676, + batch_norm__677, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_116, + parameter_183, + parameter_182, + parameter_181, + parameter_180, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_116, parameter_180, parameter_181, parameter_182, parameter_183 + + # pd_op.swish: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32) + swish_85 = paddle._C_ops.swish(batch_norm__672) + del batch_norm__672 + + # builtin.combine: ([-1x192x-1x-1xf32, -1x384x-1x-1xf32]) <- (-1x192x-1x-1xf32, -1x384x-1x-1xf32) + combine_11 = [swish_85, swish_74] + del swish_74, swish_85 + + # pd_op.concat: (-1x576x-1x-1xf32) <- ([-1x192x-1x-1xf32, -1x384x-1x-1xf32], 1xi32) + concat_12 = paddle._C_ops.concat(combine_11, full_0) + del combine_11 + + # pd_op.conv2d: (-1x192x-1x-1xf32) <- (-1x576x-1x-1xf32, 192x576x1x1xf32) + conv2d_117 = paddle._C_ops.conv2d( + concat_12, parameter_179, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_179 + + # pd_op.batch_norm_: (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__678, + batch_norm__679, + batch_norm__680, + batch_norm__681, + batch_norm__682, + batch_norm__683, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_117, + parameter_178, + parameter_177, + parameter_176, + parameter_175, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_117, parameter_175, parameter_176, parameter_177, parameter_178 + + # pd_op.swish: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32) + swish_86 = paddle._C_ops.swish(batch_norm__678) + del batch_norm__678 + + # pd_op.conv2d: (-1x192x-1x-1xf32) <- (-1x576x-1x-1xf32, 192x576x1x1xf32) + conv2d_118 = paddle._C_ops.conv2d( + concat_12, parameter_174, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del concat_12, parameter_174 + + # pd_op.batch_norm_: (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__684, + batch_norm__685, + batch_norm__686, + batch_norm__687, + batch_norm__688, + batch_norm__689, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_118, + parameter_173, + parameter_172, + parameter_171, + parameter_170, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_118, parameter_170, parameter_171, parameter_172, parameter_173 + + # pd_op.swish: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32) + swish_87 = paddle._C_ops.swish(batch_norm__684) + del batch_norm__684 + + # pd_op.conv2d: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32, 192x192x3x3xf32) + conv2d_119 = paddle._C_ops.conv2d( + swish_87, parameter_169, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_169, swish_87 + + # pd_op.batch_norm_: (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__690, + batch_norm__691, + batch_norm__692, + batch_norm__693, + batch_norm__694, + batch_norm__695, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_119, + parameter_168, + parameter_167, + parameter_166, + parameter_165, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_119, parameter_165, parameter_166, parameter_167, parameter_168 + + # pd_op.swish: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32) + swish_88 = paddle._C_ops.swish(batch_norm__690) + del batch_norm__690 + + # pd_op.conv2d: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32, 192x192x3x3xf32) + conv2d_120 = paddle._C_ops.conv2d( + swish_88, parameter_164, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_164 + + # pd_op.batch_norm_: (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__696, + batch_norm__697, + batch_norm__698, + batch_norm__699, + batch_norm__700, + batch_norm__701, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_120, + parameter_163, + parameter_162, + parameter_161, + parameter_160, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_120, parameter_160, parameter_161, parameter_162, parameter_163 + + # pd_op.conv2d: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32, 192x192x1x1xf32) + conv2d_121 = paddle._C_ops.conv2d( + swish_88, parameter_159, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_159, swish_88 + + # pd_op.batch_norm_: (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__702, + batch_norm__703, + batch_norm__704, + batch_norm__705, + batch_norm__706, + batch_norm__707, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_121, + parameter_158, + parameter_157, + parameter_156, + parameter_155, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_121, parameter_155, parameter_156, parameter_157, parameter_158 + + # pd_op.add: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32, -1x192x-1x-1xf32) + add_85 = paddle._C_ops.add(batch_norm__696, batch_norm__702) + del batch_norm__696, batch_norm__702 + + # pd_op.swish: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32) + swish_89 = paddle._C_ops.swish(add_85) + del add_85 + + # pd_op.conv2d: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32, 192x192x3x3xf32) + conv2d_122 = paddle._C_ops.conv2d( + swish_89, parameter_154, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_154, swish_89 + + # pd_op.batch_norm_: (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__708, + batch_norm__709, + batch_norm__710, + batch_norm__711, + batch_norm__712, + batch_norm__713, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_122, + parameter_153, + parameter_152, + parameter_151, + parameter_150, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_122, parameter_150, parameter_151, parameter_152, parameter_153 + + # pd_op.swish: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32) + swish_90 = paddle._C_ops.swish(batch_norm__708) + del batch_norm__708 + + # pd_op.conv2d: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32, 192x192x3x3xf32) + conv2d_123 = paddle._C_ops.conv2d( + swish_90, parameter_149, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_149 + + # pd_op.batch_norm_: (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__714, + batch_norm__715, + batch_norm__716, + batch_norm__717, + batch_norm__718, + batch_norm__719, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_123, + parameter_148, + parameter_147, + parameter_146, + parameter_145, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_123, parameter_145, parameter_146, parameter_147, parameter_148 + + # pd_op.conv2d: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32, 192x192x1x1xf32) + conv2d_124 = paddle._C_ops.conv2d( + swish_90, parameter_144, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_144, swish_90 + + # pd_op.batch_norm_: (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__720, + batch_norm__721, + batch_norm__722, + batch_norm__723, + batch_norm__724, + batch_norm__725, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_124, + parameter_143, + parameter_142, + parameter_141, + parameter_140, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_124, parameter_140, parameter_141, parameter_142, parameter_143 + + # pd_op.add: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32, -1x192x-1x-1xf32) + add_86 = paddle._C_ops.add(batch_norm__714, batch_norm__720) + del batch_norm__714, batch_norm__720 + + # pd_op.swish: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32) + swish_91 = paddle._C_ops.swish(add_86) + del add_86 + + # pd_op.conv2d: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32, 192x192x3x3xf32) + conv2d_125 = paddle._C_ops.conv2d( + swish_91, parameter_139, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_139, swish_91 + + # pd_op.batch_norm_: (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__726, + batch_norm__727, + batch_norm__728, + batch_norm__729, + batch_norm__730, + batch_norm__731, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_125, + parameter_138, + parameter_137, + parameter_136, + parameter_135, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_125, parameter_135, parameter_136, parameter_137, parameter_138 + + # pd_op.swish: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32) + swish_92 = paddle._C_ops.swish(batch_norm__726) + del batch_norm__726 + + # pd_op.conv2d: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32, 192x192x3x3xf32) + conv2d_126 = paddle._C_ops.conv2d( + swish_92, parameter_134, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_134 + + # pd_op.batch_norm_: (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__732, + batch_norm__733, + batch_norm__734, + batch_norm__735, + batch_norm__736, + batch_norm__737, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_126, + parameter_133, + parameter_132, + parameter_131, + parameter_130, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_126, parameter_130, parameter_131, parameter_132, parameter_133 + + # pd_op.conv2d: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32, 192x192x1x1xf32) + conv2d_127 = paddle._C_ops.conv2d( + swish_92, parameter_129, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_129, swish_92 + + # pd_op.batch_norm_: (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__738, + batch_norm__739, + batch_norm__740, + batch_norm__741, + batch_norm__742, + batch_norm__743, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_127, + parameter_128, + parameter_127, + parameter_126, + parameter_125, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_127, parameter_125, parameter_126, parameter_127, parameter_128 + + # pd_op.add: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32, -1x192x-1x-1xf32) + add_87 = paddle._C_ops.add(batch_norm__732, batch_norm__738) + del batch_norm__732, batch_norm__738 + + # pd_op.swish: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32) + swish_93 = paddle._C_ops.swish(add_87) + del add_87 + + # builtin.combine: ([-1x192x-1x-1xf32, -1x192x-1x-1xf32]) <- (-1x192x-1x-1xf32, -1x192x-1x-1xf32) + combine_12 = [swish_86, swish_93] + del swish_86, swish_93 + + # pd_op.concat: (-1x384x-1x-1xf32) <- ([-1x192x-1x-1xf32, -1x192x-1x-1xf32], 1xi32) + concat_13 = paddle._C_ops.concat(combine_12, full_0) + del combine_12 + + # pd_op.conv2d: (-1x384x-1x-1xf32) <- (-1x384x-1x-1xf32, 384x384x1x1xf32) + conv2d_128 = paddle._C_ops.conv2d( + concat_13, parameter_124, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del concat_13, parameter_124 + + # pd_op.batch_norm_: (-1x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (-1x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__744, + batch_norm__745, + batch_norm__746, + batch_norm__747, + batch_norm__748, + batch_norm__749, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_128, + parameter_123, + parameter_122, + parameter_121, + parameter_120, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_128, parameter_120, parameter_121, parameter_122, parameter_123 + + # pd_op.swish: (-1x384x-1x-1xf32) <- (-1x384x-1x-1xf32) + swish_94 = paddle._C_ops.swish(batch_norm__744) + del batch_norm__744 + + # pd_op.conv2d: (-1x384x-1x-1xf32) <- (-1x384x-1x-1xf32, 384x384x3x3xf32) + conv2d_129 = paddle._C_ops.conv2d( + swish_94, parameter_119, [2, 2], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_119 + + # pd_op.batch_norm_: (-1x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (-1x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__750, + batch_norm__751, + batch_norm__752, + batch_norm__753, + batch_norm__754, + batch_norm__755, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_129, + parameter_118, + parameter_117, + parameter_116, + parameter_115, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_129, parameter_115, parameter_116, parameter_117, parameter_118 + + # pd_op.swish: (-1x384x-1x-1xf32) <- (-1x384x-1x-1xf32) + swish_95 = paddle._C_ops.swish(batch_norm__750) + del batch_norm__750 + + # builtin.combine: ([-1x384x-1x-1xf32, -1x768x-1x-1xf32]) <- (-1x384x-1x-1xf32, -1x768x-1x-1xf32) + combine_13 = [swish_95, swish_64] + del swish_64, swish_95 + + # pd_op.concat: (-1x1152x-1x-1xf32) <- ([-1x384x-1x-1xf32, -1x768x-1x-1xf32], 1xi32) + concat_14 = paddle._C_ops.concat(combine_13, full_0) + del combine_13 + + # pd_op.conv2d: (-1x384x-1x-1xf32) <- (-1x1152x-1x-1xf32, 384x1152x1x1xf32) + conv2d_130 = paddle._C_ops.conv2d( + concat_14, parameter_114, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_114 + + # pd_op.batch_norm_: (-1x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (-1x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__756, + batch_norm__757, + batch_norm__758, + batch_norm__759, + batch_norm__760, + batch_norm__761, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_130, + parameter_113, + parameter_112, + parameter_111, + parameter_110, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_130, parameter_110, parameter_111, parameter_112, parameter_113 + + # pd_op.swish: (-1x384x-1x-1xf32) <- (-1x384x-1x-1xf32) + swish_96 = paddle._C_ops.swish(batch_norm__756) + del batch_norm__756 + + # pd_op.conv2d: (-1x384x-1x-1xf32) <- (-1x1152x-1x-1xf32, 384x1152x1x1xf32) + conv2d_131 = paddle._C_ops.conv2d( + concat_14, parameter_109, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del concat_14, parameter_109 + + # pd_op.batch_norm_: (-1x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (-1x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__762, + batch_norm__763, + batch_norm__764, + batch_norm__765, + batch_norm__766, + batch_norm__767, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_131, + parameter_108, + parameter_107, + parameter_106, + parameter_105, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_131, parameter_105, parameter_106, parameter_107, parameter_108 + + # pd_op.swish: (-1x384x-1x-1xf32) <- (-1x384x-1x-1xf32) + swish_97 = paddle._C_ops.swish(batch_norm__762) + del batch_norm__762 + + # pd_op.conv2d: (-1x384x-1x-1xf32) <- (-1x384x-1x-1xf32, 384x384x3x3xf32) + conv2d_132 = paddle._C_ops.conv2d( + swish_97, parameter_104, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_104, swish_97 + + # pd_op.batch_norm_: (-1x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (-1x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__768, + batch_norm__769, + batch_norm__770, + batch_norm__771, + batch_norm__772, + batch_norm__773, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_132, + parameter_103, + parameter_102, + parameter_101, + parameter_100, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_132, parameter_100, parameter_101, parameter_102, parameter_103 + + # pd_op.swish: (-1x384x-1x-1xf32) <- (-1x384x-1x-1xf32) + swish_98 = paddle._C_ops.swish(batch_norm__768) + del batch_norm__768 + + # pd_op.conv2d: (-1x384x-1x-1xf32) <- (-1x384x-1x-1xf32, 384x384x3x3xf32) + conv2d_133 = paddle._C_ops.conv2d( + swish_98, parameter_99, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_99 + + # pd_op.batch_norm_: (-1x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (-1x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__774, + batch_norm__775, + batch_norm__776, + batch_norm__777, + batch_norm__778, + batch_norm__779, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_133, + parameter_98, + parameter_97, + parameter_96, + parameter_95, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_133, parameter_95, parameter_96, parameter_97, parameter_98 + + # pd_op.conv2d: (-1x384x-1x-1xf32) <- (-1x384x-1x-1xf32, 384x384x1x1xf32) + conv2d_134 = paddle._C_ops.conv2d( + swish_98, parameter_94, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_94, swish_98 + + # pd_op.batch_norm_: (-1x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (-1x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__780, + batch_norm__781, + batch_norm__782, + batch_norm__783, + batch_norm__784, + batch_norm__785, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_134, + parameter_93, + parameter_92, + parameter_91, + parameter_90, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_134, parameter_90, parameter_91, parameter_92, parameter_93 + + # pd_op.add: (-1x384x-1x-1xf32) <- (-1x384x-1x-1xf32, -1x384x-1x-1xf32) + add_88 = paddle._C_ops.add(batch_norm__774, batch_norm__780) + del batch_norm__774, batch_norm__780 + + # pd_op.swish: (-1x384x-1x-1xf32) <- (-1x384x-1x-1xf32) + swish_99 = paddle._C_ops.swish(add_88) + del add_88 + + # pd_op.conv2d: (-1x384x-1x-1xf32) <- (-1x384x-1x-1xf32, 384x384x3x3xf32) + conv2d_135 = paddle._C_ops.conv2d( + swish_99, parameter_89, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_89, swish_99 + + # pd_op.batch_norm_: (-1x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (-1x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__786, + batch_norm__787, + batch_norm__788, + batch_norm__789, + batch_norm__790, + batch_norm__791, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_135, + parameter_88, + parameter_87, + parameter_86, + parameter_85, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_135, parameter_85, parameter_86, parameter_87, parameter_88 + + # pd_op.swish: (-1x384x-1x-1xf32) <- (-1x384x-1x-1xf32) + swish_100 = paddle._C_ops.swish(batch_norm__786) + del batch_norm__786 + + # pd_op.conv2d: (-1x384x-1x-1xf32) <- (-1x384x-1x-1xf32, 384x384x3x3xf32) + conv2d_136 = paddle._C_ops.conv2d( + swish_100, parameter_84, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_84 + + # pd_op.batch_norm_: (-1x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (-1x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__792, + batch_norm__793, + batch_norm__794, + batch_norm__795, + batch_norm__796, + batch_norm__797, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_136, + parameter_83, + parameter_82, + parameter_81, + parameter_80, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_136, parameter_80, parameter_81, parameter_82, parameter_83 + + # pd_op.conv2d: (-1x384x-1x-1xf32) <- (-1x384x-1x-1xf32, 384x384x1x1xf32) + conv2d_137 = paddle._C_ops.conv2d( + swish_100, parameter_79, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_79, swish_100 + + # pd_op.batch_norm_: (-1x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (-1x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__798, + batch_norm__799, + batch_norm__800, + batch_norm__801, + batch_norm__802, + batch_norm__803, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_137, + parameter_78, + parameter_77, + parameter_76, + parameter_75, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_137, parameter_75, parameter_76, parameter_77, parameter_78 + + # pd_op.add: (-1x384x-1x-1xf32) <- (-1x384x-1x-1xf32, -1x384x-1x-1xf32) + add_89 = paddle._C_ops.add(batch_norm__792, batch_norm__798) + del batch_norm__792, batch_norm__798 + + # pd_op.swish: (-1x384x-1x-1xf32) <- (-1x384x-1x-1xf32) + swish_101 = paddle._C_ops.swish(add_89) + del add_89 + + # pd_op.conv2d: (-1x384x-1x-1xf32) <- (-1x384x-1x-1xf32, 384x384x3x3xf32) + conv2d_138 = paddle._C_ops.conv2d( + swish_101, parameter_74, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_74, swish_101 + + # pd_op.batch_norm_: (-1x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (-1x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__804, + batch_norm__805, + batch_norm__806, + batch_norm__807, + batch_norm__808, + batch_norm__809, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_138, + parameter_73, + parameter_72, + parameter_71, + parameter_70, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_138, parameter_70, parameter_71, parameter_72, parameter_73 + + # pd_op.swish: (-1x384x-1x-1xf32) <- (-1x384x-1x-1xf32) + swish_102 = paddle._C_ops.swish(batch_norm__804) + del batch_norm__804 + + # pd_op.conv2d: (-1x384x-1x-1xf32) <- (-1x384x-1x-1xf32, 384x384x3x3xf32) + conv2d_139 = paddle._C_ops.conv2d( + swish_102, parameter_69, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_69 + + # pd_op.batch_norm_: (-1x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (-1x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__810, + batch_norm__811, + batch_norm__812, + batch_norm__813, + batch_norm__814, + batch_norm__815, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_139, + parameter_68, + parameter_67, + parameter_66, + parameter_65, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_139, parameter_65, parameter_66, parameter_67, parameter_68 + + # pd_op.conv2d: (-1x384x-1x-1xf32) <- (-1x384x-1x-1xf32, 384x384x1x1xf32) + conv2d_140 = paddle._C_ops.conv2d( + swish_102, parameter_64, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_64, swish_102 + + # pd_op.batch_norm_: (-1x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (-1x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__816, + batch_norm__817, + batch_norm__818, + batch_norm__819, + batch_norm__820, + batch_norm__821, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_140, + parameter_63, + parameter_62, + parameter_61, + parameter_60, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_140, parameter_60, parameter_61, parameter_62, parameter_63 + + # pd_op.add: (-1x384x-1x-1xf32) <- (-1x384x-1x-1xf32, -1x384x-1x-1xf32) + add_90 = paddle._C_ops.add(batch_norm__810, batch_norm__816) + del batch_norm__810, batch_norm__816 + + # pd_op.swish: (-1x384x-1x-1xf32) <- (-1x384x-1x-1xf32) + swish_103 = paddle._C_ops.swish(add_90) + del add_90 + + # builtin.combine: ([-1x384x-1x-1xf32, -1x384x-1x-1xf32]) <- (-1x384x-1x-1xf32, -1x384x-1x-1xf32) + combine_14 = [swish_96, swish_103] + del swish_103, swish_96 + + # pd_op.concat: (-1x768x-1x-1xf32) <- ([-1x384x-1x-1xf32, -1x384x-1x-1xf32], 1xi32) + concat_15 = paddle._C_ops.concat(combine_14, full_0) + del combine_14 + + # pd_op.conv2d: (-1x768x-1x-1xf32) <- (-1x768x-1x-1xf32, 768x768x1x1xf32) + conv2d_141 = paddle._C_ops.conv2d( + concat_15, parameter_59, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del concat_15, parameter_59 + + # pd_op.batch_norm_: (-1x768x-1x-1xf32, 768xf32, 768xf32, 768xf32, 768xf32, -1xui8) <- (-1x768x-1x-1xf32, 768xf32, 768xf32, 768xf32, 768xf32) + ( + batch_norm__822, + batch_norm__823, + batch_norm__824, + batch_norm__825, + batch_norm__826, + batch_norm__827, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_141, + parameter_58, + parameter_57, + parameter_56, + parameter_55, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_141, parameter_55, parameter_56, parameter_57, parameter_58 + + # pd_op.swish: (-1x768x-1x-1xf32) <- (-1x768x-1x-1xf32) + swish_104 = paddle._C_ops.swish(batch_norm__822) + del batch_norm__822 + + # pd_op.shape64: (4xi64) <- (-1x768x-1x-1xf32) + shape64_7 = paddle._C_ops.shape64(swish_104) + + # pd_op.slice: (xi64) <- (4xi64, 1xi64, 1xi64) + slice_31 = paddle._C_ops.slice( + shape64_7, [0], full_int_array_2, full_int_array_3, [1], [0] + ) + del shape64_7 + + # pd_op.shape64: (4xi64) <- (-1x768x-1x-1xf32) + shape64_8 = paddle._C_ops.shape64(swish_104) + + # pd_op.slice: (xi64) <- (4xi64, 1xi64, 1xi64) + slice_32 = paddle._C_ops.slice( + shape64_8, [0], full_int_array_4, full_int_array_5, [1], [0] + ) + del shape64_8 + + # pd_op.shape64: (4xi64) <- (-1x768x-1x-1xf32) + shape64_9 = paddle._C_ops.shape64(swish_104) + + # pd_op.slice: (xi64) <- (4xi64, 1xi64, 1xi64) + slice_33 = paddle._C_ops.slice( + shape64_9, [0], full_int_array_5, full_int_array_6, [1], [0] + ) + del shape64_9 + + # pd_op.multiply: (xi64) <- (xi64, xi64) + multiply_22 = paddle._C_ops.multiply(slice_32, slice_33) + del slice_32, slice_33 + + # pd_op.full_int_array: (2xi64) <- () + full_int_array_15 = [1, 1] + + # pd_op.pool2d: (-1x768x1x1xf32) <- (-1x768x-1x-1xf32, 2xi64) + pool2d_3 = paddle._C_ops.pool2d( + swish_104, + full_int_array_15, + [1, 1], + [0, 0], + False, + True, + "NCHW", + "avg", + False, + True, + "EXPLICIT", + ) + + # pd_op.conv2d: (-1x768x1x1xf32) <- (-1x768x1x1xf32, 768x768x1x1xf32) + conv2d_142 = paddle._C_ops.conv2d( + pool2d_3, parameter_54, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_54 + + # pd_op.reshape: (1x768x1x1xf32) <- (768xf32, 4xi64) + reshape_21 = paddle._C_ops.reshape(parameter_53, full_int_array_1) + del parameter_53 + + # pd_op.add: (-1x768x1x1xf32) <- (-1x768x1x1xf32, 1x768x1x1xf32) + add_91 = paddle._C_ops.add(conv2d_142, reshape_21) + del conv2d_142, reshape_21 + + # pd_op.sigmoid: (-1x768x1x1xf32) <- (-1x768x1x1xf32) + sigmoid_0 = paddle._C_ops.sigmoid(add_91) + del add_91 + + # pd_op.multiply: (-1x768x-1x-1xf32) <- (-1x768x-1x-1xf32, -1x768x1x1xf32) + multiply_23 = paddle._C_ops.multiply(swish_104, sigmoid_0) + del sigmoid_0 + + # pd_op.conv2d: (-1x768x-1x-1xf32) <- (-1x768x-1x-1xf32, 768x768x1x1xf32) + conv2d_143 = paddle._C_ops.conv2d( + multiply_23, parameter_52, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del multiply_23, parameter_52 + + # pd_op.batch_norm_: (-1x768x-1x-1xf32, 768xf32, 768xf32, 768xf32, 768xf32, -1xui8) <- (-1x768x-1x-1xf32, 768xf32, 768xf32, 768xf32, 768xf32) + ( + batch_norm__828, + batch_norm__829, + batch_norm__830, + batch_norm__831, + batch_norm__832, + batch_norm__833, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_143, + parameter_51, + parameter_50, + parameter_49, + parameter_48, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_143, parameter_48, parameter_49, parameter_50, parameter_51 + + # pd_op.swish: (-1x768x-1x-1xf32) <- (-1x768x-1x-1xf32) + swish_105 = paddle._C_ops.swish(batch_norm__828) + del batch_norm__828 + + # pd_op.add: (-1x768x-1x-1xf32) <- (-1x768x-1x-1xf32, -1x768x-1x-1xf32) + add_92 = paddle._C_ops.add(swish_105, swish_104) + del swish_105 + + # pd_op.conv2d: (-1x10x-1x-1xf32) <- (-1x768x-1x-1xf32, 10x768x3x3xf32) + conv2d_144 = paddle._C_ops.conv2d( + add_92, parameter_47, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del add_92, parameter_47 + + # pd_op.reshape: (1x10x1x1xf32) <- (10xf32, 4xi64) + reshape_22 = paddle._C_ops.reshape(parameter_46, full_int_array_1) + del parameter_46 + + # pd_op.add: (-1x10x-1x-1xf32) <- (-1x10x-1x-1xf32, 1x10x1x1xf32) + add_93 = paddle._C_ops.add(conv2d_144, reshape_22) + del conv2d_144, reshape_22 + + # pd_op.conv2d: (-1x768x1x1xf32) <- (-1x768x1x1xf32, 768x768x1x1xf32) + conv2d_145 = paddle._C_ops.conv2d( + pool2d_3, parameter_45, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_45, pool2d_3 + + # pd_op.reshape: (1x768x1x1xf32) <- (768xf32, 4xi64) + reshape_23 = paddle._C_ops.reshape(parameter_44, full_int_array_1) + del parameter_44 + + # pd_op.add: (-1x768x1x1xf32) <- (-1x768x1x1xf32, 1x768x1x1xf32) + add_94 = paddle._C_ops.add(conv2d_145, reshape_23) + del conv2d_145, reshape_23 + + # pd_op.sigmoid: (-1x768x1x1xf32) <- (-1x768x1x1xf32) + sigmoid_1 = paddle._C_ops.sigmoid(add_94) + del add_94 + + # pd_op.multiply: (-1x768x-1x-1xf32) <- (-1x768x-1x-1xf32, -1x768x1x1xf32) + multiply_24 = paddle._C_ops.multiply(swish_104, sigmoid_1) + del sigmoid_1, swish_104 + + # pd_op.conv2d: (-1x768x-1x-1xf32) <- (-1x768x-1x-1xf32, 768x768x1x1xf32) + conv2d_146 = paddle._C_ops.conv2d( + multiply_24, parameter_43, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del multiply_24, parameter_43 + + # pd_op.batch_norm_: (-1x768x-1x-1xf32, 768xf32, 768xf32, 768xf32, 768xf32, -1xui8) <- (-1x768x-1x-1xf32, 768xf32, 768xf32, 768xf32, 768xf32) + ( + batch_norm__834, + batch_norm__835, + batch_norm__836, + batch_norm__837, + batch_norm__838, + batch_norm__839, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_146, + parameter_42, + parameter_41, + parameter_40, + parameter_39, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_146, parameter_39, parameter_40, parameter_41, parameter_42 + + # pd_op.swish: (-1x768x-1x-1xf32) <- (-1x768x-1x-1xf32) + swish_106 = paddle._C_ops.swish(batch_norm__834) + del batch_norm__834 + + # pd_op.conv2d: (-1x40x-1x-1xf32) <- (-1x768x-1x-1xf32, 40x768x3x3xf32) + conv2d_147 = paddle._C_ops.conv2d( + swish_106, parameter_38, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_38, swish_106 + + # pd_op.reshape: (1x40x1x1xf32) <- (40xf32, 4xi64) + reshape_24 = paddle._C_ops.reshape(parameter_37, full_int_array_1) + del parameter_37 + + # pd_op.add: (-1x40x-1x-1xf32) <- (-1x40x-1x-1xf32, 1x40x1x1xf32) + add_95 = paddle._C_ops.add(conv2d_147, reshape_24) + del conv2d_147, reshape_24 + + # pd_op.full: (xi64) <- () + full_4 = paddle._C_ops.full( + [], float("-1"), paddle.int64, paddle.core.CPUPlace() + ) + + # pd_op.full: (xi64) <- () + full_5 = paddle._C_ops.full( + [], float("4"), paddle.int64, paddle.core.CPUPlace() + ) + + # pd_op.full: (xi64) <- () + full_6 = paddle._C_ops.full( + [], float("10"), paddle.int64, paddle.core.CPUPlace() + ) + + # builtin.combine: ([xi64, xi64, xi64, xi64]) <- (xi64, xi64, xi64, xi64) + combine_15 = [full_4, full_5, full_6, multiply_22] + + # pd_op.stack: (4xi64) <- ([xi64, xi64, xi64, xi64]) + stack_1 = paddle._C_ops.stack(combine_15, 0) + del combine_15 + + # pd_op.reshape: (-1x4x10x-1xf32) <- (-1x40x-1x-1xf32, 4xi64) + reshape_25 = paddle._C_ops.reshape(add_95, stack_1) + del add_95, stack_1 + + # pd_op.transpose: (-1x10x-1x4xf32) <- (-1x4x10x-1xf32) + transpose_18 = paddle._C_ops.transpose(reshape_25, [0, 2, 3, 1]) + del reshape_25 + + # pd_op.softmax: (-1x10x-1x4xf32) <- (-1x10x-1x4xf32) + softmax_4 = paddle._C_ops.softmax(transpose_18, 1) + del transpose_18 + + # pd_op.conv2d: (-1x1x-1x4xf32) <- (-1x10x-1x4xf32, 1x10x1x1xf32) + conv2d_148 = paddle._C_ops.conv2d( + softmax_4, parameter_36, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del softmax_4 + + # pd_op.squeeze: (-1x-1x4xf32) <- (-1x1x-1x4xf32, 1xi64) + squeeze_0 = paddle._C_ops.squeeze(conv2d_148, full_int_array_3) + del conv2d_148 + + # pd_op.sigmoid: (-1x10x-1x-1xf32) <- (-1x10x-1x-1xf32) + sigmoid_2 = paddle._C_ops.sigmoid(add_93) + del add_93 + + # builtin.combine: ([xi64, xi64, xi64]) <- (xi64, xi64, xi64) + combine_16 = [full_4, full_6, multiply_22] + del multiply_22 + + # pd_op.stack: (3xi64) <- ([xi64, xi64, xi64]) + stack_2 = paddle._C_ops.stack(combine_16, 0) + del combine_16 + + # pd_op.reshape: (-1x10x-1xf32) <- (-1x10x-1x-1xf32, 3xi64) + reshape_26 = paddle._C_ops.reshape(sigmoid_2, stack_2) + del sigmoid_2, stack_2 + + # pd_op.shape64: (4xi64) <- (-1x384x-1x-1xf32) + shape64_10 = paddle._C_ops.shape64(swish_94) + + # pd_op.slice: (xi64) <- (4xi64, 1xi64, 1xi64) + slice_34 = paddle._C_ops.slice( + shape64_10, [0], full_int_array_2, full_int_array_3, [1], [0] + ) + del shape64_10 + + # pd_op.shape64: (4xi64) <- (-1x384x-1x-1xf32) + shape64_11 = paddle._C_ops.shape64(swish_94) + + # pd_op.slice: (xi64) <- (4xi64, 1xi64, 1xi64) + slice_35 = paddle._C_ops.slice( + shape64_11, [0], full_int_array_4, full_int_array_5, [1], [0] + ) + del shape64_11 + + # pd_op.shape64: (4xi64) <- (-1x384x-1x-1xf32) + shape64_12 = paddle._C_ops.shape64(swish_94) + + # pd_op.slice: (xi64) <- (4xi64, 1xi64, 1xi64) + slice_36 = paddle._C_ops.slice( + shape64_12, [0], full_int_array_5, full_int_array_6, [1], [0] + ) + del shape64_12 + + # pd_op.multiply: (xi64) <- (xi64, xi64) + multiply_25 = paddle._C_ops.multiply(slice_35, slice_36) + del slice_35, slice_36 + + # pd_op.pool2d: (-1x384x1x1xf32) <- (-1x384x-1x-1xf32, 2xi64) + pool2d_4 = paddle._C_ops.pool2d( + swish_94, + full_int_array_15, + [1, 1], + [0, 0], + False, + True, + "NCHW", + "avg", + False, + True, + "EXPLICIT", + ) + + # pd_op.conv2d: (-1x384x1x1xf32) <- (-1x384x1x1xf32, 384x384x1x1xf32) + conv2d_149 = paddle._C_ops.conv2d( + pool2d_4, parameter_35, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_35 + + # pd_op.reshape: (1x384x1x1xf32) <- (384xf32, 4xi64) + reshape_27 = paddle._C_ops.reshape(parameter_34, full_int_array_1) + del parameter_34 + + # pd_op.add: (-1x384x1x1xf32) <- (-1x384x1x1xf32, 1x384x1x1xf32) + add_96 = paddle._C_ops.add(conv2d_149, reshape_27) + del conv2d_149, reshape_27 + + # pd_op.sigmoid: (-1x384x1x1xf32) <- (-1x384x1x1xf32) + sigmoid_3 = paddle._C_ops.sigmoid(add_96) + del add_96 + + # pd_op.multiply: (-1x384x-1x-1xf32) <- (-1x384x-1x-1xf32, -1x384x1x1xf32) + multiply_26 = paddle._C_ops.multiply(swish_94, sigmoid_3) + del sigmoid_3 + + # pd_op.conv2d: (-1x384x-1x-1xf32) <- (-1x384x-1x-1xf32, 384x384x1x1xf32) + conv2d_150 = paddle._C_ops.conv2d( + multiply_26, parameter_33, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del multiply_26, parameter_33 + + # pd_op.batch_norm_: (-1x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (-1x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__840, + batch_norm__841, + batch_norm__842, + batch_norm__843, + batch_norm__844, + batch_norm__845, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_150, + parameter_32, + parameter_31, + parameter_30, + parameter_29, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_150, parameter_29, parameter_30, parameter_31, parameter_32 + + # pd_op.swish: (-1x384x-1x-1xf32) <- (-1x384x-1x-1xf32) + swish_107 = paddle._C_ops.swish(batch_norm__840) + del batch_norm__840 + + # pd_op.add: (-1x384x-1x-1xf32) <- (-1x384x-1x-1xf32, -1x384x-1x-1xf32) + add_97 = paddle._C_ops.add(swish_107, swish_94) + del swish_107 + + # pd_op.conv2d: (-1x10x-1x-1xf32) <- (-1x384x-1x-1xf32, 10x384x3x3xf32) + conv2d_151 = paddle._C_ops.conv2d( + add_97, parameter_28, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del add_97, parameter_28 + + # pd_op.reshape: (1x10x1x1xf32) <- (10xf32, 4xi64) + reshape_28 = paddle._C_ops.reshape(parameter_27, full_int_array_1) + del parameter_27 + + # pd_op.add: (-1x10x-1x-1xf32) <- (-1x10x-1x-1xf32, 1x10x1x1xf32) + add_98 = paddle._C_ops.add(conv2d_151, reshape_28) + del conv2d_151, reshape_28 + + # pd_op.conv2d: (-1x384x1x1xf32) <- (-1x384x1x1xf32, 384x384x1x1xf32) + conv2d_152 = paddle._C_ops.conv2d( + pool2d_4, parameter_26, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_26, pool2d_4 + + # pd_op.reshape: (1x384x1x1xf32) <- (384xf32, 4xi64) + reshape_29 = paddle._C_ops.reshape(parameter_25, full_int_array_1) + del parameter_25 + + # pd_op.add: (-1x384x1x1xf32) <- (-1x384x1x1xf32, 1x384x1x1xf32) + add_99 = paddle._C_ops.add(conv2d_152, reshape_29) + del conv2d_152, reshape_29 + + # pd_op.sigmoid: (-1x384x1x1xf32) <- (-1x384x1x1xf32) + sigmoid_4 = paddle._C_ops.sigmoid(add_99) + del add_99 + + # pd_op.multiply: (-1x384x-1x-1xf32) <- (-1x384x-1x-1xf32, -1x384x1x1xf32) + multiply_27 = paddle._C_ops.multiply(swish_94, sigmoid_4) + del sigmoid_4, swish_94 + + # pd_op.conv2d: (-1x384x-1x-1xf32) <- (-1x384x-1x-1xf32, 384x384x1x1xf32) + conv2d_153 = paddle._C_ops.conv2d( + multiply_27, parameter_24, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del multiply_27, parameter_24 + + # pd_op.batch_norm_: (-1x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (-1x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__846, + batch_norm__847, + batch_norm__848, + batch_norm__849, + batch_norm__850, + batch_norm__851, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_153, + parameter_23, + parameter_22, + parameter_21, + parameter_20, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_153, parameter_20, parameter_21, parameter_22, parameter_23 + + # pd_op.swish: (-1x384x-1x-1xf32) <- (-1x384x-1x-1xf32) + swish_108 = paddle._C_ops.swish(batch_norm__846) + del batch_norm__846 + + # pd_op.conv2d: (-1x40x-1x-1xf32) <- (-1x384x-1x-1xf32, 40x384x3x3xf32) + conv2d_154 = paddle._C_ops.conv2d( + swish_108, parameter_19, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_19, swish_108 + + # pd_op.reshape: (1x40x1x1xf32) <- (40xf32, 4xi64) + reshape_30 = paddle._C_ops.reshape(parameter_18, full_int_array_1) + del parameter_18 + + # pd_op.add: (-1x40x-1x-1xf32) <- (-1x40x-1x-1xf32, 1x40x1x1xf32) + add_100 = paddle._C_ops.add(conv2d_154, reshape_30) + del conv2d_154, reshape_30 + + # builtin.combine: ([xi64, xi64, xi64, xi64]) <- (xi64, xi64, xi64, xi64) + combine_17 = [full_4, full_5, full_6, multiply_25] + + # pd_op.stack: (4xi64) <- ([xi64, xi64, xi64, xi64]) + stack_3 = paddle._C_ops.stack(combine_17, 0) + del combine_17 + + # pd_op.reshape: (-1x4x10x-1xf32) <- (-1x40x-1x-1xf32, 4xi64) + reshape_31 = paddle._C_ops.reshape(add_100, stack_3) + del add_100, stack_3 + + # pd_op.transpose: (-1x10x-1x4xf32) <- (-1x4x10x-1xf32) + transpose_19 = paddle._C_ops.transpose(reshape_31, [0, 2, 3, 1]) + del reshape_31 + + # pd_op.softmax: (-1x10x-1x4xf32) <- (-1x10x-1x4xf32) + softmax_5 = paddle._C_ops.softmax(transpose_19, 1) + del transpose_19 + + # pd_op.conv2d: (-1x1x-1x4xf32) <- (-1x10x-1x4xf32, 1x10x1x1xf32) + conv2d_155 = paddle._C_ops.conv2d( + softmax_5, parameter_36, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del softmax_5 + + # pd_op.squeeze: (-1x-1x4xf32) <- (-1x1x-1x4xf32, 1xi64) + squeeze_1 = paddle._C_ops.squeeze(conv2d_155, full_int_array_3) + del conv2d_155 + + # pd_op.sigmoid: (-1x10x-1x-1xf32) <- (-1x10x-1x-1xf32) + sigmoid_5 = paddle._C_ops.sigmoid(add_98) + del add_98 + + # builtin.combine: ([xi64, xi64, xi64]) <- (xi64, xi64, xi64) + combine_18 = [full_4, full_6, multiply_25] + del multiply_25 + + # pd_op.stack: (3xi64) <- ([xi64, xi64, xi64]) + stack_4 = paddle._C_ops.stack(combine_18, 0) + del combine_18 + + # pd_op.reshape: (-1x10x-1xf32) <- (-1x10x-1x-1xf32, 3xi64) + reshape_32 = paddle._C_ops.reshape(sigmoid_5, stack_4) + del sigmoid_5, stack_4 + + # pd_op.shape64: (4xi64) <- (-1x192x-1x-1xf32) + shape64_13 = paddle._C_ops.shape64(swish_84) + + # pd_op.slice: (xi64) <- (4xi64, 1xi64, 1xi64) + slice_37 = paddle._C_ops.slice( + shape64_13, [0], full_int_array_2, full_int_array_3, [1], [0] + ) + del full_int_array_2, shape64_13 + + # pd_op.shape64: (4xi64) <- (-1x192x-1x-1xf32) + shape64_14 = paddle._C_ops.shape64(swish_84) + + # pd_op.slice: (xi64) <- (4xi64, 1xi64, 1xi64) + slice_38 = paddle._C_ops.slice( + shape64_14, [0], full_int_array_4, full_int_array_5, [1], [0] + ) + del full_int_array_4, shape64_14 + + # pd_op.shape64: (4xi64) <- (-1x192x-1x-1xf32) + shape64_15 = paddle._C_ops.shape64(swish_84) + + # pd_op.slice: (xi64) <- (4xi64, 1xi64, 1xi64) + slice_39 = paddle._C_ops.slice( + shape64_15, [0], full_int_array_5, full_int_array_6, [1], [0] + ) + del full_int_array_5, full_int_array_6, shape64_15 + + # pd_op.multiply: (xi64) <- (xi64, xi64) + multiply_28 = paddle._C_ops.multiply(slice_38, slice_39) + del slice_38, slice_39 + + # pd_op.pool2d: (-1x192x1x1xf32) <- (-1x192x-1x-1xf32, 2xi64) + pool2d_5 = paddle._C_ops.pool2d( + swish_84, + full_int_array_15, + [1, 1], + [0, 0], + False, + True, + "NCHW", + "avg", + False, + True, + "EXPLICIT", + ) + del full_int_array_15 + + # pd_op.conv2d: (-1x192x1x1xf32) <- (-1x192x1x1xf32, 192x192x1x1xf32) + conv2d_156 = paddle._C_ops.conv2d( + pool2d_5, parameter_17, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_17 + + # pd_op.reshape: (1x192x1x1xf32) <- (192xf32, 4xi64) + reshape_33 = paddle._C_ops.reshape(parameter_16, full_int_array_1) + del parameter_16 + + # pd_op.add: (-1x192x1x1xf32) <- (-1x192x1x1xf32, 1x192x1x1xf32) + add_101 = paddle._C_ops.add(conv2d_156, reshape_33) + del conv2d_156, reshape_33 + + # pd_op.sigmoid: (-1x192x1x1xf32) <- (-1x192x1x1xf32) + sigmoid_6 = paddle._C_ops.sigmoid(add_101) + del add_101 + + # pd_op.multiply: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32, -1x192x1x1xf32) + multiply_29 = paddle._C_ops.multiply(swish_84, sigmoid_6) + del sigmoid_6 + + # pd_op.conv2d: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32, 192x192x1x1xf32) + conv2d_157 = paddle._C_ops.conv2d( + multiply_29, parameter_15, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del multiply_29, parameter_15 + + # pd_op.batch_norm_: (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__852, + batch_norm__853, + batch_norm__854, + batch_norm__855, + batch_norm__856, + batch_norm__857, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_157, + parameter_14, + parameter_13, + parameter_12, + parameter_11, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_157, parameter_11, parameter_12, parameter_13, parameter_14 + + # pd_op.swish: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32) + swish_109 = paddle._C_ops.swish(batch_norm__852) + del batch_norm__852 + + # pd_op.add: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32, -1x192x-1x-1xf32) + add_102 = paddle._C_ops.add(swish_109, swish_84) + del swish_109 + + # pd_op.conv2d: (-1x10x-1x-1xf32) <- (-1x192x-1x-1xf32, 10x192x3x3xf32) + conv2d_158 = paddle._C_ops.conv2d( + add_102, parameter_10, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del add_102, parameter_10 + + # pd_op.reshape: (1x10x1x1xf32) <- (10xf32, 4xi64) + reshape_34 = paddle._C_ops.reshape(parameter_9, full_int_array_1) + del parameter_9 + + # pd_op.add: (-1x10x-1x-1xf32) <- (-1x10x-1x-1xf32, 1x10x1x1xf32) + add_103 = paddle._C_ops.add(conv2d_158, reshape_34) + del conv2d_158, reshape_34 + + # pd_op.conv2d: (-1x192x1x1xf32) <- (-1x192x1x1xf32, 192x192x1x1xf32) + conv2d_159 = paddle._C_ops.conv2d( + pool2d_5, parameter_8, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_8, pool2d_5 + + # pd_op.reshape: (1x192x1x1xf32) <- (192xf32, 4xi64) + reshape_35 = paddle._C_ops.reshape(parameter_7, full_int_array_1) + del parameter_7 + + # pd_op.add: (-1x192x1x1xf32) <- (-1x192x1x1xf32, 1x192x1x1xf32) + add_104 = paddle._C_ops.add(conv2d_159, reshape_35) + del conv2d_159, reshape_35 + + # pd_op.sigmoid: (-1x192x1x1xf32) <- (-1x192x1x1xf32) + sigmoid_7 = paddle._C_ops.sigmoid(add_104) + del add_104 + + # pd_op.multiply: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32, -1x192x1x1xf32) + multiply_30 = paddle._C_ops.multiply(swish_84, sigmoid_7) + del sigmoid_7, swish_84 + + # pd_op.conv2d: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32, 192x192x1x1xf32) + conv2d_160 = paddle._C_ops.conv2d( + multiply_30, parameter_6, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del multiply_30, parameter_6 + + # pd_op.batch_norm_: (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__858, + batch_norm__859, + batch_norm__860, + batch_norm__861, + batch_norm__862, + batch_norm__863, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_160, + parameter_5, + parameter_4, + parameter_3, + parameter_2, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_160, parameter_2, parameter_3, parameter_4, parameter_5 + + # pd_op.swish: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32) + swish_110 = paddle._C_ops.swish(batch_norm__858) + del batch_norm__858 + + # pd_op.conv2d: (-1x40x-1x-1xf32) <- (-1x192x-1x-1xf32, 40x192x3x3xf32) + conv2d_161 = paddle._C_ops.conv2d( + swish_110, parameter_1, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_1, swish_110 + + # pd_op.reshape: (1x40x1x1xf32) <- (40xf32, 4xi64) + reshape_36 = paddle._C_ops.reshape(parameter_0, full_int_array_1) + del full_int_array_1, parameter_0 + + # pd_op.add: (-1x40x-1x-1xf32) <- (-1x40x-1x-1xf32, 1x40x1x1xf32) + add_105 = paddle._C_ops.add(conv2d_161, reshape_36) + del conv2d_161, reshape_36 + + # builtin.combine: ([xi64, xi64, xi64, xi64]) <- (xi64, xi64, xi64, xi64) + combine_19 = [full_4, full_5, full_6, multiply_28] + del full_5 + + # pd_op.stack: (4xi64) <- ([xi64, xi64, xi64, xi64]) + stack_5 = paddle._C_ops.stack(combine_19, 0) + del combine_19 + + # pd_op.reshape: (-1x4x10x-1xf32) <- (-1x40x-1x-1xf32, 4xi64) + reshape_37 = paddle._C_ops.reshape(add_105, stack_5) + del add_105, stack_5 + + # pd_op.transpose: (-1x10x-1x4xf32) <- (-1x4x10x-1xf32) + transpose_20 = paddle._C_ops.transpose(reshape_37, [0, 2, 3, 1]) + del reshape_37 + + # pd_op.softmax: (-1x10x-1x4xf32) <- (-1x10x-1x4xf32) + softmax_6 = paddle._C_ops.softmax(transpose_20, 1) + del transpose_20 + + # pd_op.conv2d: (-1x1x-1x4xf32) <- (-1x10x-1x4xf32, 1x10x1x1xf32) + conv2d_162 = paddle._C_ops.conv2d( + softmax_6, parameter_36, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_36, softmax_6 + + # pd_op.squeeze: (-1x-1x4xf32) <- (-1x1x-1x4xf32, 1xi64) + squeeze_2 = paddle._C_ops.squeeze(conv2d_162, full_int_array_3) + del conv2d_162, full_int_array_3 + + # pd_op.sigmoid: (-1x10x-1x-1xf32) <- (-1x10x-1x-1xf32) + sigmoid_8 = paddle._C_ops.sigmoid(add_103) + del add_103 + + # builtin.combine: ([xi64, xi64, xi64]) <- (xi64, xi64, xi64) + combine_20 = [full_4, full_6, multiply_28] + del full_4, full_6, multiply_28 + + # pd_op.stack: (3xi64) <- ([xi64, xi64, xi64]) + stack_6 = paddle._C_ops.stack(combine_20, 0) + del combine_20 + + # pd_op.reshape: (-1x10x-1xf32) <- (-1x10x-1x-1xf32, 3xi64) + reshape_38 = paddle._C_ops.reshape(sigmoid_8, stack_6) + del sigmoid_8, stack_6 + + # pd_op.full: (1xi32) <- () + full_7 = paddle._C_ops.full( + [1], float("-1"), paddle.int32, paddle.core.CPUPlace() + ) + + # builtin.combine: ([-1x10x-1xf32, -1x10x-1xf32, -1x10x-1xf32]) <- (-1x10x-1xf32, -1x10x-1xf32, -1x10x-1xf32) + combine_21 = [reshape_26, reshape_32, reshape_38] + del reshape_26, reshape_32, reshape_38 + + # pd_op.concat: (-1x10x-1xf32) <- ([-1x10x-1xf32, -1x10x-1xf32, -1x10x-1xf32], 1xi32) + concat_0 = paddle._C_ops.concat(combine_21, full_7) + del combine_21, full_7 + + # builtin.combine: ([-1x-1x4xf32, -1x-1x4xf32, -1x-1x4xf32]) <- (-1x-1x4xf32, -1x-1x4xf32, -1x-1x4xf32) + combine_22 = [squeeze_0, squeeze_1, squeeze_2] + del squeeze_0, squeeze_1, squeeze_2 + + # pd_op.concat: (-1x-1x4xf32) <- ([-1x-1x4xf32, -1x-1x4xf32, -1x-1x4xf32], 1xi32) + concat_1 = paddle._C_ops.concat(combine_22, full_0) + del combine_22, full_0 + + return concat_0, concat_1 diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/subgraph_8/weight_meta.py b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/subgraph_8/weight_meta.py new file mode 100644 index 000000000..c934d8553 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/subgraph_8/weight_meta.py @@ -0,0 +1,8595 @@ +class Program_weight_tensor_parameter_0: + name = "parameter_0" + shape = [40] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_1: + name = "parameter_1" + shape = [40, 192, 3, 3] + dtype = "float32" + min_val = float("-0.200868") + max_val = float("0.205409") + mean = float("1.50903e-08") + std = float("0.0117333") + data = None + + +class Program_weight_tensor_parameter_2: + name = "parameter_2" + shape = [192] + dtype = "float32" + min_val = float("-0.0496614") + max_val = float("0.233447") + mean = float("0.0551649") + std = float("0.0442642") + data = None + + +class Program_weight_tensor_parameter_3: + name = "parameter_3" + shape = [192] + dtype = "float32" + min_val = float("0.83662") + max_val = float("1.62777") + mean = float("1.22152") + std = float("0.145383") + data = None + + +class Program_weight_tensor_parameter_4: + name = "parameter_4" + shape = [192] + dtype = "float32" + min_val = float("0.00517837") + max_val = float("5.37372") + mean = float("0.472226") + std = float("0.745068") + data = None + + +class Program_weight_tensor_parameter_5: + name = "parameter_5" + shape = [192] + dtype = "float32" + min_val = float("-8.59248") + max_val = float("10.1229") + mean = float("0.104301") + std = float("2.84169") + data = None + + +class Program_weight_tensor_parameter_6: + name = "parameter_6" + shape = [192, 192, 1, 1] + dtype = "float32" + min_val = float("-0.10201") + max_val = float("0.139453") + mean = float("-0.000771004") + std = float("0.0121333") + data = None + + +class Program_weight_tensor_parameter_7: + name = "parameter_7" + shape = [192] + dtype = "float32" + min_val = float("-0.00864008") + max_val = float("0.0157539") + mean = float("-0.000141521") + std = float("0.00408479") + data = None + + +class Program_weight_tensor_parameter_8: + name = "parameter_8" + shape = [192, 192, 1, 1] + dtype = "float32" + min_val = float("-0.0103206") + max_val = float("0.0183671") + mean = float("-0.000260799") + std = float("0.00202219") + data = None + + +class Program_weight_tensor_parameter_9: + name = "parameter_9" + shape = [10] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_10: + name = "parameter_10" + shape = [10, 192, 3, 3] + dtype = "float32" + min_val = float("-1156.18") + max_val = float("134.34") + mean = float("-22.0743") + std = float("92.6504") + data = None + + +class Program_weight_tensor_parameter_11: + name = "parameter_11" + shape = [192] + dtype = "float32" + min_val = float("-83.5158") + max_val = float("85.9358") + mean = float("2.58239") + std = float("26.6989") + data = None + + +class Program_weight_tensor_parameter_12: + name = "parameter_12" + shape = [192] + dtype = "float32" + min_val = float("-14.174") + max_val = float("24.9743") + mean = float("-0.553124") + std = float("5.93549") + data = None + + +class Program_weight_tensor_parameter_13: + name = "parameter_13" + shape = [192] + dtype = "float32" + min_val = float("2.28992") + max_val = float("16941700.0") + mean = float("535823.0") + std = float("1889860.0") + data = None + + +class Program_weight_tensor_parameter_14: + name = "parameter_14" + shape = [192] + dtype = "float32" + min_val = float("-12545.3") + max_val = float("7938.65") + mean = float("-494.603") + std = float("2450.0") + data = None + + +class Program_weight_tensor_parameter_15: + name = "parameter_15" + shape = [192, 192, 1, 1] + dtype = "float32" + min_val = float("-139.267") + max_val = float("101.02") + mean = float("-0.0994834") + std = float("4.18182") + data = None + + +class Program_weight_tensor_parameter_16: + name = "parameter_16" + shape = [192] + dtype = "float32" + min_val = float("-11.0196") + max_val = float("7.12431") + mean = float("-0.172978") + std = float("1.77779") + data = None + + +class Program_weight_tensor_parameter_17: + name = "parameter_17" + shape = [192, 192, 1, 1] + dtype = "float32" + min_val = float("-21.8633") + max_val = float("14.1281") + mean = float("-0.061946") + std = float("0.961227") + data = None + + +class Program_weight_tensor_parameter_18: + name = "parameter_18" + shape = [40] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_19: + name = "parameter_19" + shape = [40, 384, 3, 3] + dtype = "float32" + min_val = float("-0.125302") + max_val = float("0.130317") + mean = float("3.40515e-09") + std = float("0.00680734") + data = None + + +class Program_weight_tensor_parameter_20: + name = "parameter_20" + shape = [384] + dtype = "float32" + min_val = float("-0.00280162") + max_val = float("0.100804") + mean = float("0.0327571") + std = float("0.0175274") + data = None + + +class Program_weight_tensor_parameter_21: + name = "parameter_21" + shape = [384] + dtype = "float32" + min_val = float("0.99914") + max_val = float("1.24063") + mean = float("1.1068") + std = float("0.0410271") + data = None + + +class Program_weight_tensor_parameter_22: + name = "parameter_22" + shape = [384] + dtype = "float32" + min_val = float("0.00351367") + max_val = float("0.502965") + mean = float("0.0475644") + std = float("0.0560398") + data = None + + +class Program_weight_tensor_parameter_23: + name = "parameter_23" + shape = [384] + dtype = "float32" + min_val = float("-0.20324") + max_val = float("0.163306") + mean = float("-0.0211934") + std = float("0.0480836") + data = None + + +class Program_weight_tensor_parameter_24: + name = "parameter_24" + shape = [384, 384, 1, 1] + dtype = "float32" + min_val = float("-0.0595396") + max_val = float("0.0686159") + mean = float("-0.000519359") + std = float("0.00405674") + data = None + + +class Program_weight_tensor_parameter_25: + name = "parameter_25" + shape = [384] + dtype = "float32" + min_val = float("-0.00283386") + max_val = float("0.00804157") + mean = float("4.09459e-05") + std = float("0.00164843") + data = None + + +class Program_weight_tensor_parameter_26: + name = "parameter_26" + shape = [384, 384, 1, 1] + dtype = "float32" + min_val = float("-0.00192633") + max_val = float("0.00573757") + mean = float("-3.72961e-05") + std = float("0.000619199") + data = None + + +class Program_weight_tensor_parameter_27: + name = "parameter_27" + shape = [10] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_28: + name = "parameter_28" + shape = [10, 384, 3, 3] + dtype = "float32" + min_val = float("-4.37321") + max_val = float("0.452128") + mean = float("-0.17109") + std = float("0.298945") + data = None + + +class Program_weight_tensor_parameter_29: + name = "parameter_29" + shape = [384] + dtype = "float32" + min_val = float("-0.129645") + max_val = float("0.537367") + mean = float("0.252723") + std = float("0.116707") + data = None + + +class Program_weight_tensor_parameter_30: + name = "parameter_30" + shape = [384] + dtype = "float32" + min_val = float("0.994167") + max_val = float("1.41211") + mean = float("1.16997") + std = float("0.0589217") + data = None + + +class Program_weight_tensor_parameter_31: + name = "parameter_31" + shape = [384] + dtype = "float32" + min_val = float("0.190338") + max_val = float("627.738") + mean = float("13.2438") + std = float("39.4458") + data = None + + +class Program_weight_tensor_parameter_32: + name = "parameter_32" + shape = [384] + dtype = "float32" + min_val = float("-6.47831") + max_val = float("2.53274") + mean = float("-0.258943") + std = float("0.862987") + data = None + + +class Program_weight_tensor_parameter_33: + name = "parameter_33" + shape = [384, 384, 1, 1] + dtype = "float32" + min_val = float("-0.37398") + max_val = float("0.93716") + mean = float("-0.0050287") + std = float("0.034184") + data = None + + +class Program_weight_tensor_parameter_34: + name = "parameter_34" + shape = [384] + dtype = "float32" + min_val = float("-0.168767") + max_val = float("0.0326032") + mean = float("0.00033161") + std = float("0.0172771") + data = None + + +class Program_weight_tensor_parameter_35: + name = "parameter_35" + shape = [384, 384, 1, 1] + dtype = "float32" + min_val = float("-0.13765") + max_val = float("0.0271847") + mean = float("-0.000469481") + std = float("0.00707315") + data = None + + +class Program_weight_tensor_parameter_36: + name = "parameter_36" + shape = [1, 10, 1, 1] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_37: + name = "parameter_37" + shape = [40] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_38: + name = "parameter_38" + shape = [40, 768, 3, 3] + dtype = "float32" + min_val = float("-0.0666495") + max_val = float("0.0422192") + mean = float("4.22006e-10") + std = float("0.004326") + data = None + + +class Program_weight_tensor_parameter_39: + name = "parameter_39" + shape = [768] + dtype = "float32" + min_val = float("-0.0211301") + max_val = float("0.0557583") + mean = float("0.00994542") + std = float("0.0117475") + data = None + + +class Program_weight_tensor_parameter_40: + name = "parameter_40" + shape = [768] + dtype = "float32" + min_val = float("1.00683") + max_val = float("1.19995") + mean = float("1.06456") + std = float("0.0224702") + data = None + + +class Program_weight_tensor_parameter_41: + name = "parameter_41" + shape = [768] + dtype = "float32" + min_val = float("0.00536813") + max_val = float("4.48397") + mean = float("0.148381") + std = float("0.22041") + data = None + + +class Program_weight_tensor_parameter_42: + name = "parameter_42" + shape = [768] + dtype = "float32" + min_val = float("-0.2391") + max_val = float("0.308194") + mean = float("-0.0115961") + std = float("0.071089") + data = None + + +class Program_weight_tensor_parameter_43: + name = "parameter_43" + shape = [768, 768, 1, 1] + dtype = "float32" + min_val = float("-0.0316374") + max_val = float("0.030148") + mean = float("-0.000219685") + std = float("0.00274506") + data = None + + +class Program_weight_tensor_parameter_44: + name = "parameter_44" + shape = [768] + dtype = "float32" + min_val = float("-0.00401297") + max_val = float("0.00319868") + mean = float("6.33231e-05") + std = float("0.000831526") + data = None + + +class Program_weight_tensor_parameter_45: + name = "parameter_45" + shape = [768, 768, 1, 1] + dtype = "float32" + min_val = float("-0.00264943") + max_val = float("0.00247676") + mean = float("4.17753e-06") + std = float("0.000249163") + data = None + + +class Program_weight_tensor_parameter_46: + name = "parameter_46" + shape = [10] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_47: + name = "parameter_47" + shape = [10, 768, 3, 3] + dtype = "float32" + min_val = float("-3.67573") + max_val = float("0.47477") + mean = float("-0.108836") + std = float("0.235747") + data = None + + +class Program_weight_tensor_parameter_48: + name = "parameter_48" + shape = [768] + dtype = "float32" + min_val = float("-0.0929864") + max_val = float("0.260814") + mean = float("0.112641") + std = float("0.0586929") + data = None + + +class Program_weight_tensor_parameter_49: + name = "parameter_49" + shape = [768] + dtype = "float32" + min_val = float("0.978014") + max_val = float("1.2276") + mean = float("1.06585") + std = float("0.0280238") + data = None + + +class Program_weight_tensor_parameter_50: + name = "parameter_50" + shape = [768] + dtype = "float32" + min_val = float("0.0583214") + max_val = float("179.503") + mean = float("8.00532") + std = float("16.3237") + data = None + + +class Program_weight_tensor_parameter_51: + name = "parameter_51" + shape = [768] + dtype = "float32" + min_val = float("-3.13569") + max_val = float("2.18092") + mean = float("-0.125865") + std = float("0.472168") + data = None + + +class Program_weight_tensor_parameter_52: + name = "parameter_52" + shape = [768, 768, 1, 1] + dtype = "float32" + min_val = float("-0.353861") + max_val = float("0.643743") + mean = float("-0.00192258") + std = float("0.0278211") + data = None + + +class Program_weight_tensor_parameter_53: + name = "parameter_53" + shape = [768] + dtype = "float32" + min_val = float("-0.0411452") + max_val = float("0.024878") + mean = float("-2.52466e-05") + std = float("0.00729347") + data = None + + +class Program_weight_tensor_parameter_54: + name = "parameter_54" + shape = [768, 768, 1, 1] + dtype = "float32" + min_val = float("-0.0172894") + max_val = float("0.0205533") + mean = float("-6.97847e-05") + std = float("0.00177578") + data = None + + +class Program_weight_tensor_parameter_55: + name = "parameter_55" + shape = [768] + dtype = "float32" + min_val = float("-0.148835") + max_val = float("0.366507") + mean = float("0.109619") + std = float("0.0784726") + data = None + + +class Program_weight_tensor_parameter_56: + name = "parameter_56" + shape = [768] + dtype = "float32" + min_val = float("0.857119") + max_val = float("1.27215") + mean = float("1.05537") + std = float("0.0395249") + data = None + + +class Program_weight_tensor_parameter_57: + name = "parameter_57" + shape = [768] + dtype = "float32" + min_val = float("2.1452") + max_val = float("15653.5") + mean = float("111.411") + std = float("608.25") + data = None + + +class Program_weight_tensor_parameter_58: + name = "parameter_58" + shape = [768] + dtype = "float32" + min_val = float("-4.93063") + max_val = float("14.695") + mean = float("-0.317615") + std = float("1.44253") + data = None + + +class Program_weight_tensor_parameter_59: + name = "parameter_59" + shape = [768, 768, 1, 1] + dtype = "float32" + min_val = float("-0.547982") + max_val = float("1.31648") + mean = float("-0.00228986") + std = float("0.031089") + data = None + + +class Program_weight_tensor_parameter_60: + name = "parameter_60" + shape = [384] + dtype = "float32" + min_val = float("-0.142261") + max_val = float("0.0236224") + mean = float("-0.0251959") + std = float("0.0245419") + data = None + + +class Program_weight_tensor_parameter_61: + name = "parameter_61" + shape = [384] + dtype = "float32" + min_val = float("0.923585") + max_val = float("1.06744") + mean = float("0.990921") + std = float("0.0255245") + data = None + + +class Program_weight_tensor_parameter_62: + name = "parameter_62" + shape = [384] + dtype = "float32" + min_val = float("0.0622324") + max_val = float("493.49") + mean = float("19.4794") + std = float("46.1366") + data = None + + +class Program_weight_tensor_parameter_63: + name = "parameter_63" + shape = [384] + dtype = "float32" + min_val = float("-1.45105") + max_val = float("2.69314") + mean = float("0.0290829") + std = float("0.54902") + data = None + + +class Program_weight_tensor_parameter_64: + name = "parameter_64" + shape = [384, 384, 1, 1] + dtype = "float32" + min_val = float("-0.528789") + max_val = float("0.475083") + mean = float("0.000636297") + std = float("0.0382416") + data = None + + +class Program_weight_tensor_parameter_65: + name = "parameter_65" + shape = [384] + dtype = "float32" + min_val = float("-0.142261") + max_val = float("0.0236224") + mean = float("-0.0251959") + std = float("0.0245419") + data = None + + +class Program_weight_tensor_parameter_66: + name = "parameter_66" + shape = [384] + dtype = "float32" + min_val = float("0.956171") + max_val = float("1.12145") + mean = float("1.01722") + std = float("0.0276561") + data = None + + +class Program_weight_tensor_parameter_67: + name = "parameter_67" + shape = [384] + dtype = "float32" + min_val = float("0.329317") + max_val = float("3828.52") + mean = float("104.542") + std = float("264.784") + data = None + + +class Program_weight_tensor_parameter_68: + name = "parameter_68" + shape = [384] + dtype = "float32" + min_val = float("-8.30504") + max_val = float("7.29752") + mean = float("-0.046446") + std = float("1.50992") + data = None + + +class Program_weight_tensor_parameter_69: + name = "parameter_69" + shape = [384, 384, 3, 3] + dtype = "float32" + min_val = float("-0.216923") + max_val = float("0.225482") + mean = float("-0.000140598") + std = float("0.0129397") + data = None + + +class Program_weight_tensor_parameter_70: + name = "parameter_70" + shape = [384] + dtype = "float32" + min_val = float("-0.186619") + max_val = float("0.0253096") + mean = float("-0.0403406") + std = float("0.0301782") + data = None + + +class Program_weight_tensor_parameter_71: + name = "parameter_71" + shape = [384] + dtype = "float32" + min_val = float("0.930641") + max_val = float("1.15944") + mean = float("1.0145") + std = float("0.0373315") + data = None + + +class Program_weight_tensor_parameter_72: + name = "parameter_72" + shape = [384] + dtype = "float32" + min_val = float("2.04808") + max_val = float("21266.9") + mean = float("589.689") + std = float("2078.57") + data = None + + +class Program_weight_tensor_parameter_73: + name = "parameter_73" + shape = [384] + dtype = "float32" + min_val = float("-24.5893") + max_val = float("30.4673") + mean = float("0.404088") + std = float("5.12896") + data = None + + +class Program_weight_tensor_parameter_74: + name = "parameter_74" + shape = [384, 384, 3, 3] + dtype = "float32" + min_val = float("-0.338033") + max_val = float("0.42615") + mean = float("0.000530178") + std = float("0.0222656") + data = None + + +class Program_weight_tensor_parameter_75: + name = "parameter_75" + shape = [384] + dtype = "float32" + min_val = float("-0.119217") + max_val = float("0.0124743") + mean = float("-0.0405258") + std = float("0.0215146") + data = None + + +class Program_weight_tensor_parameter_76: + name = "parameter_76" + shape = [384] + dtype = "float32" + min_val = float("0.923648") + max_val = float("1.04368") + mean = float("0.989661") + std = float("0.0170925") + data = None + + +class Program_weight_tensor_parameter_77: + name = "parameter_77" + shape = [384] + dtype = "float32" + min_val = float("0.0605594") + max_val = float("2562.44") + mean = float("30.3737") + std = float("153.669") + data = None + + +class Program_weight_tensor_parameter_78: + name = "parameter_78" + shape = [384] + dtype = "float32" + min_val = float("-2.00406") + max_val = float("9.78589") + mean = float("0.0692858") + std = float("0.922895") + data = None + + +class Program_weight_tensor_parameter_79: + name = "parameter_79" + shape = [384, 384, 1, 1] + dtype = "float32" + min_val = float("-0.359065") + max_val = float("0.736107") + mean = float("0.00142685") + std = float("0.0416064") + data = None + + +class Program_weight_tensor_parameter_80: + name = "parameter_80" + shape = [384] + dtype = "float32" + min_val = float("-0.119217") + max_val = float("0.0124743") + mean = float("-0.0405258") + std = float("0.0215146") + data = None + + +class Program_weight_tensor_parameter_81: + name = "parameter_81" + shape = [384] + dtype = "float32" + min_val = float("0.940925") + max_val = float("1.10279") + mean = float("1.01574") + std = float("0.0223556") + data = None + + +class Program_weight_tensor_parameter_82: + name = "parameter_82" + shape = [384] + dtype = "float32" + min_val = float("1.38621") + max_val = float("4812.1") + mean = float("159.271") + std = float("503.316") + data = None + + +class Program_weight_tensor_parameter_83: + name = "parameter_83" + shape = [384] + dtype = "float32" + min_val = float("-5.88263") + max_val = float("15.8533") + mean = float("0.140376") + std = float("2.40236") + data = None + + +class Program_weight_tensor_parameter_84: + name = "parameter_84" + shape = [384, 384, 3, 3] + dtype = "float32" + min_val = float("-0.226706") + max_val = float("0.208603") + mean = float("0.000307493") + std = float("0.0144029") + data = None + + +class Program_weight_tensor_parameter_85: + name = "parameter_85" + shape = [384] + dtype = "float32" + min_val = float("-0.130515") + max_val = float("0.0142189") + mean = float("-0.0397866") + std = float("0.0225046") + data = None + + +class Program_weight_tensor_parameter_86: + name = "parameter_86" + shape = [384] + dtype = "float32" + min_val = float("0.862983") + max_val = float("1.11419") + mean = float("1.01096") + std = float("0.0331669") + data = None + + +class Program_weight_tensor_parameter_87: + name = "parameter_87" + shape = [384] + dtype = "float32" + min_val = float("2.23371") + max_val = float("4709.64") + mean = float("260.249") + std = float("508.379") + data = None + + +class Program_weight_tensor_parameter_88: + name = "parameter_88" + shape = [384] + dtype = "float32" + min_val = float("-13.5512") + max_val = float("16.4082") + mean = float("0.360129") + std = float("3.43025") + data = None + + +class Program_weight_tensor_parameter_89: + name = "parameter_89" + shape = [384, 384, 3, 3] + dtype = "float32" + min_val = float("-0.191356") + max_val = float("0.178253") + mean = float("0.000526337") + std = float("0.0137294") + data = None + + +class Program_weight_tensor_parameter_90: + name = "parameter_90" + shape = [384] + dtype = "float32" + min_val = float("-0.113425") + max_val = float("0.0127038") + mean = float("-0.0408835") + std = float("0.0214936") + data = None + + +class Program_weight_tensor_parameter_91: + name = "parameter_91" + shape = [384] + dtype = "float32" + min_val = float("0.934591") + max_val = float("1.02911") + mean = float("0.987626") + std = float("0.0127672") + data = None + + +class Program_weight_tensor_parameter_92: + name = "parameter_92" + shape = [384] + dtype = "float32" + min_val = float("0.0466938") + max_val = float("166.308") + mean = float("8.70658") + std = float("15.7428") + data = None + + +class Program_weight_tensor_parameter_93: + name = "parameter_93" + shape = [384] + dtype = "float32" + min_val = float("-1.61681") + max_val = float("2.24513") + mean = float("0.120268") + std = float("0.507015") + data = None + + +class Program_weight_tensor_parameter_94: + name = "parameter_94" + shape = [384, 384, 1, 1] + dtype = "float32" + min_val = float("-0.229179") + max_val = float("0.225991") + mean = float("0.00223037") + std = float("0.0276474") + data = None + + +class Program_weight_tensor_parameter_95: + name = "parameter_95" + shape = [384] + dtype = "float32" + min_val = float("-0.113425") + max_val = float("0.0127038") + mean = float("-0.0408835") + std = float("0.0214936") + data = None + + +class Program_weight_tensor_parameter_96: + name = "parameter_96" + shape = [384] + dtype = "float32" + min_val = float("0.966253") + max_val = float("1.11851") + mean = float("1.01774") + std = float("0.0232224") + data = None + + +class Program_weight_tensor_parameter_97: + name = "parameter_97" + shape = [384] + dtype = "float32" + min_val = float("0.388381") + max_val = float("3807.62") + mean = float("53.0047") + std = float("216.966") + data = None + + +class Program_weight_tensor_parameter_98: + name = "parameter_98" + shape = [384] + dtype = "float32" + min_val = float("-9.05284") + max_val = float("5.84204") + mean = float("0.111327") + std = float("1.23517") + data = None + + +class Program_weight_tensor_parameter_99: + name = "parameter_99" + shape = [384, 384, 3, 3] + dtype = "float32" + min_val = float("-0.237817") + max_val = float("0.124946") + mean = float("0.000100381") + std = float("0.011587") + data = None + + +class Program_weight_tensor_parameter_100: + name = "parameter_100" + shape = [384] + dtype = "float32" + min_val = float("-0.103321") + max_val = float("0.0232727") + mean = float("-0.0418488") + std = float("0.0225603") + data = None + + +class Program_weight_tensor_parameter_101: + name = "parameter_101" + shape = [384] + dtype = "float32" + min_val = float("0.929001") + max_val = float("1.1158") + mean = float("1.01191") + std = float("0.0303721") + data = None + + +class Program_weight_tensor_parameter_102: + name = "parameter_102" + shape = [384] + dtype = "float32" + min_val = float("0.323487") + max_val = float("2311.03") + mean = float("79.6394") + std = float("153.489") + data = None + + +class Program_weight_tensor_parameter_103: + name = "parameter_103" + shape = [384] + dtype = "float32" + min_val = float("-6.12644") + max_val = float("14.665") + mean = float("0.49423") + std = float("2.21458") + data = None + + +class Program_weight_tensor_parameter_104: + name = "parameter_104" + shape = [384, 384, 3, 3] + dtype = "float32" + min_val = float("-0.339134") + max_val = float("0.191022") + mean = float("0.000881583") + std = float("0.0173865") + data = None + + +class Program_weight_tensor_parameter_105: + name = "parameter_105" + shape = [384] + dtype = "float32" + min_val = float("-0.109396") + max_val = float("0.0442887") + mean = float("-0.0296739") + std = float("0.017334") + data = None + + +class Program_weight_tensor_parameter_106: + name = "parameter_106" + shape = [384] + dtype = "float32" + min_val = float("0.930867") + max_val = float("1.07806") + mean = float("1.00932") + std = float("0.0225248") + data = None + + +class Program_weight_tensor_parameter_107: + name = "parameter_107" + shape = [384] + dtype = "float32" + min_val = float("0.146648") + max_val = float("626.166") + mean = float("15.3179") + std = float("43.6414") + data = None + + +class Program_weight_tensor_parameter_108: + name = "parameter_108" + shape = [384] + dtype = "float32" + min_val = float("-6.69579") + max_val = float("5.18983") + mean = float("0.32161") + std = float("0.980842") + data = None + + +class Program_weight_tensor_parameter_109: + name = "parameter_109" + shape = [384, 1152, 1, 1] + dtype = "float32" + min_val = float("-0.560359") + max_val = float("0.446273") + mean = float("0.00139089") + std = float("0.0337706") + data = None + + +class Program_weight_tensor_parameter_110: + name = "parameter_110" + shape = [384] + dtype = "float32" + min_val = float("-0.0578452") + max_val = float("0.016811") + mean = float("-0.0161156") + std = float("0.011502") + data = None + + +class Program_weight_tensor_parameter_111: + name = "parameter_111" + shape = [384] + dtype = "float32" + min_val = float("0.920085") + max_val = float("1.10058") + mean = float("1.00226") + std = float("0.0184711") + data = None + + +class Program_weight_tensor_parameter_112: + name = "parameter_112" + shape = [384] + dtype = "float32" + min_val = float("0.495073") + max_val = float("876.99") + mean = float("8.39435") + std = float("45.7821") + data = None + + +class Program_weight_tensor_parameter_113: + name = "parameter_113" + shape = [384] + dtype = "float32" + min_val = float("-4.00856") + max_val = float("1.42008") + mean = float("-0.218361") + std = float("0.649639") + data = None + + +class Program_weight_tensor_parameter_114: + name = "parameter_114" + shape = [384, 1152, 1, 1] + dtype = "float32" + min_val = float("-0.290536") + max_val = float("0.175016") + mean = float("-0.00070814") + std = float("0.0138241") + data = None + + +class Program_weight_tensor_parameter_115: + name = "parameter_115" + shape = [384] + dtype = "float32" + min_val = float("-0.0540258") + max_val = float("0.0115933") + mean = float("-0.0157488") + std = float("0.0104757") + data = None + + +class Program_weight_tensor_parameter_116: + name = "parameter_116" + shape = [384] + dtype = "float32" + min_val = float("0.987802") + max_val = float("1.11339") + mean = float("1.02541") + std = float("0.019391") + data = None + + +class Program_weight_tensor_parameter_117: + name = "parameter_117" + shape = [384] + dtype = "float32" + min_val = float("0.320507") + max_val = float("596.285") + mean = float("38.1824") + std = float("79.9607") + data = None + + +class Program_weight_tensor_parameter_118: + name = "parameter_118" + shape = [384] + dtype = "float32" + min_val = float("-6.7222") + max_val = float("6.5412") + mean = float("-0.0320944") + std = float("1.4521") + data = None + + +class Program_weight_tensor_parameter_119: + name = "parameter_119" + shape = [384, 384, 3, 3] + dtype = "float32" + min_val = float("-0.0639514") + max_val = float("0.0601372") + mean = float("-8.51741e-05") + std = float("0.00526146") + data = None + + +class Program_weight_tensor_parameter_120: + name = "parameter_120" + shape = [384] + dtype = "float32" + min_val = float("-0.216482") + max_val = float("0.607924") + mean = float("0.238936") + std = float("0.14305") + data = None + + +class Program_weight_tensor_parameter_121: + name = "parameter_121" + shape = [384] + dtype = "float32" + min_val = float("0.677427") + max_val = float("1.50605") + mean = float("1.12914") + std = float("0.0838432") + data = None + + +class Program_weight_tensor_parameter_122: + name = "parameter_122" + shape = [384] + dtype = "float32" + min_val = float("1.74736") + max_val = float("22695.2") + mean = float("204.124") + std = float("1630.7") + data = None + + +class Program_weight_tensor_parameter_123: + name = "parameter_123" + shape = [384] + dtype = "float32" + min_val = float("-10.7392") + max_val = float("6.92707") + mean = float("0.0292054") + std = float("0.881457") + data = None + + +class Program_weight_tensor_parameter_124: + name = "parameter_124" + shape = [384, 384, 1, 1] + dtype = "float32" + min_val = float("-3.55448") + max_val = float("1.46716") + mean = float("0.00287851") + std = float("0.0619363") + data = None + + +class Program_weight_tensor_parameter_125: + name = "parameter_125" + shape = [192] + dtype = "float32" + min_val = float("-0.188818") + max_val = float("0.0459523") + mean = float("-0.0294237") + std = float("0.0400839") + data = None + + +class Program_weight_tensor_parameter_126: + name = "parameter_126" + shape = [192] + dtype = "float32" + min_val = float("0.839771") + max_val = float("1.06967") + mean = float("0.976243") + std = float("0.0259122") + data = None + + +class Program_weight_tensor_parameter_127: + name = "parameter_127" + shape = [192] + dtype = "float32" + min_val = float("0.0133959") + max_val = float("1454.17") + mean = float("13.1647") + std = float("105.548") + data = None + + +class Program_weight_tensor_parameter_128: + name = "parameter_128" + shape = [192] + dtype = "float32" + min_val = float("-0.261237") + max_val = float("1.28075") + mean = float("0.0271209") + std = float("0.114701") + data = None + + +class Program_weight_tensor_parameter_129: + name = "parameter_129" + shape = [192, 192, 1, 1] + dtype = "float32" + min_val = float("-0.123591") + max_val = float("0.43619") + mean = float("0.00447012") + std = float("0.0296425") + data = None + + +class Program_weight_tensor_parameter_130: + name = "parameter_130" + shape = [192] + dtype = "float32" + min_val = float("-0.188818") + max_val = float("0.0459523") + mean = float("-0.0294237") + std = float("0.0400839") + data = None + + +class Program_weight_tensor_parameter_131: + name = "parameter_131" + shape = [192] + dtype = "float32" + min_val = float("0.721948") + max_val = float("1.13657") + mean = float("1.02394") + std = float("0.0395189") + data = None + + +class Program_weight_tensor_parameter_132: + name = "parameter_132" + shape = [192] + dtype = "float32" + min_val = float("0.142781") + max_val = float("538.324") + mean = float("19.9641") + std = float("54.0569") + data = None + + +class Program_weight_tensor_parameter_133: + name = "parameter_133" + shape = [192] + dtype = "float32" + min_val = float("-0.376722") + max_val = float("0.838454") + mean = float("0.0359573") + std = float("0.16185") + data = None + + +class Program_weight_tensor_parameter_134: + name = "parameter_134" + shape = [192, 192, 3, 3] + dtype = "float32" + min_val = float("-0.0490945") + max_val = float("0.0667615") + mean = float("0.00072352") + std = float("0.00630598") + data = None + + +class Program_weight_tensor_parameter_135: + name = "parameter_135" + shape = [192] + dtype = "float32" + min_val = float("-0.197783") + max_val = float("0.0410661") + mean = float("-0.0617541") + std = float("0.0482775") + data = None + + +class Program_weight_tensor_parameter_136: + name = "parameter_136" + shape = [192] + dtype = "float32" + min_val = float("0.894643") + max_val = float("1.18384") + mean = float("1.01562") + std = float("0.0486636") + data = None + + +class Program_weight_tensor_parameter_137: + name = "parameter_137" + shape = [192] + dtype = "float32" + min_val = float("1.08082") + max_val = float("908.404") + mean = float("45.0674") + std = float("96.6794") + data = None + + +class Program_weight_tensor_parameter_138: + name = "parameter_138" + shape = [192] + dtype = "float32" + min_val = float("-0.933607") + max_val = float("1.82089") + mean = float("0.102296") + std = float("0.412254") + data = None + + +class Program_weight_tensor_parameter_139: + name = "parameter_139" + shape = [192, 192, 3, 3] + dtype = "float32" + min_val = float("-0.0519582") + max_val = float("0.0691943") + mean = float("0.000736163") + std = float("0.00618428") + data = None + + +class Program_weight_tensor_parameter_140: + name = "parameter_140" + shape = [192] + dtype = "float32" + min_val = float("-0.192937") + max_val = float("0.00891605") + mean = float("-0.0669654") + std = float("0.0334937") + data = None + + +class Program_weight_tensor_parameter_141: + name = "parameter_141" + shape = [192] + dtype = "float32" + min_val = float("0.921792") + max_val = float("1.05091") + mean = float("0.974424") + std = float("0.0180091") + data = None + + +class Program_weight_tensor_parameter_142: + name = "parameter_142" + shape = [192] + dtype = "float32" + min_val = float("0.00757897") + max_val = float("5.59766") + mean = float("0.426099") + std = float("0.667531") + data = None + + +class Program_weight_tensor_parameter_143: + name = "parameter_143" + shape = [192] + dtype = "float32" + min_val = float("-0.113501") + max_val = float("0.219786") + mean = float("0.0198383") + std = float("0.0428864") + data = None + + +class Program_weight_tensor_parameter_144: + name = "parameter_144" + shape = [192, 192, 1, 1] + dtype = "float32" + min_val = float("-0.280343") + max_val = float("0.242364") + mean = float("0.00234259") + std = float("0.0137845") + data = None + + +class Program_weight_tensor_parameter_145: + name = "parameter_145" + shape = [192] + dtype = "float32" + min_val = float("-0.192937") + max_val = float("0.00891605") + mean = float("-0.0669654") + std = float("0.0334937") + data = None + + +class Program_weight_tensor_parameter_146: + name = "parameter_146" + shape = [192] + dtype = "float32" + min_val = float("0.965058") + max_val = float("1.14639") + mean = float("1.02358") + std = float("0.0295349") + data = None + + +class Program_weight_tensor_parameter_147: + name = "parameter_147" + shape = [192] + dtype = "float32" + min_val = float("0.160794") + max_val = float("55.6905") + mean = float("2.44066") + std = float("4.39657") + data = None + + +class Program_weight_tensor_parameter_148: + name = "parameter_148" + shape = [192] + dtype = "float32" + min_val = float("-0.264263") + max_val = float("0.791243") + mean = float("0.04945") + std = float("0.117164") + data = None + + +class Program_weight_tensor_parameter_149: + name = "parameter_149" + shape = [192, 192, 3, 3] + dtype = "float32" + min_val = float("-0.0585351") + max_val = float("0.0927808") + mean = float("0.000685876") + std = float("0.00518043") + data = None + + +class Program_weight_tensor_parameter_150: + name = "parameter_150" + shape = [192] + dtype = "float32" + min_val = float("-0.196647") + max_val = float("0.0641564") + mean = float("-0.0774401") + std = float("0.0414081") + data = None + + +class Program_weight_tensor_parameter_151: + name = "parameter_151" + shape = [192] + dtype = "float32" + min_val = float("0.877502") + max_val = float("1.23877") + mean = float("1.01492") + std = float("0.0523329") + data = None + + +class Program_weight_tensor_parameter_152: + name = "parameter_152" + shape = [192] + dtype = "float32" + min_val = float("0.566361") + max_val = float("908.179") + mean = float("23.4539") + std = float("69.3761") + data = None + + +class Program_weight_tensor_parameter_153: + name = "parameter_153" + shape = [192] + dtype = "float32" + min_val = float("-4.28743") + max_val = float("1.23655") + mean = float("0.0695418") + std = float("0.513951") + data = None + + +class Program_weight_tensor_parameter_154: + name = "parameter_154" + shape = [192, 192, 3, 3] + dtype = "float32" + min_val = float("-0.135687") + max_val = float("0.125448") + mean = float("0.000810476") + std = float("0.00986455") + data = None + + +class Program_weight_tensor_parameter_155: + name = "parameter_155" + shape = [192] + dtype = "float32" + min_val = float("-0.23144") + max_val = float("-0.0126862") + mean = float("-0.0851822") + std = float("0.0422856") + data = None + + +class Program_weight_tensor_parameter_156: + name = "parameter_156" + shape = [192] + dtype = "float32" + min_val = float("0.899519") + max_val = float("1.03514") + mean = float("0.976157") + std = float("0.0242367") + data = None + + +class Program_weight_tensor_parameter_157: + name = "parameter_157" + shape = [192] + dtype = "float32" + min_val = float("0.00813932") + max_val = float("152.929") + mean = float("3.6385") + std = float("11.7244") + data = None + + +class Program_weight_tensor_parameter_158: + name = "parameter_158" + shape = [192] + dtype = "float32" + min_val = float("-0.318432") + max_val = float("0.187761") + mean = float("-0.0014536") + std = float("0.0590097") + data = None + + +class Program_weight_tensor_parameter_159: + name = "parameter_159" + shape = [192, 192, 1, 1] + dtype = "float32" + min_val = float("-0.566113") + max_val = float("0.374272") + mean = float("-0.00203655") + std = float("0.033229") + data = None + + +class Program_weight_tensor_parameter_160: + name = "parameter_160" + shape = [192] + dtype = "float32" + min_val = float("-0.23144") + max_val = float("-0.0126862") + mean = float("-0.0851822") + std = float("0.0422856") + data = None + + +class Program_weight_tensor_parameter_161: + name = "parameter_161" + shape = [192] + dtype = "float32" + min_val = float("0.94299") + max_val = float("1.11066") + mean = float("1.02006") + std = float("0.0312105") + data = None + + +class Program_weight_tensor_parameter_162: + name = "parameter_162" + shape = [192] + dtype = "float32" + min_val = float("0.1289") + max_val = float("1699.95") + mean = float("28.0305") + std = float("128.999") + data = None + + +class Program_weight_tensor_parameter_163: + name = "parameter_163" + shape = [192] + dtype = "float32" + min_val = float("-1.81173") + max_val = float("0.674127") + mean = float("0.0159434") + std = float("0.239598") + data = None + + +class Program_weight_tensor_parameter_164: + name = "parameter_164" + shape = [192, 192, 3, 3] + dtype = "float32" + min_val = float("-0.137869") + max_val = float("0.224919") + mean = float("-0.000155728") + std = float("0.0111564") + data = None + + +class Program_weight_tensor_parameter_165: + name = "parameter_165" + shape = [192] + dtype = "float32" + min_val = float("-0.233221") + max_val = float("0.0637355") + mean = float("-0.0967178") + std = float("0.0459598") + data = None + + +class Program_weight_tensor_parameter_166: + name = "parameter_166" + shape = [192] + dtype = "float32" + min_val = float("0.895324") + max_val = float("1.24271") + mean = float("1.01717") + std = float("0.0593265") + data = None + + +class Program_weight_tensor_parameter_167: + name = "parameter_167" + shape = [192] + dtype = "float32" + min_val = float("0.156623") + max_val = float("607.838") + mean = float("54.6281") + std = float("95.2855") + data = None + + +class Program_weight_tensor_parameter_168: + name = "parameter_168" + shape = [192] + dtype = "float32" + min_val = float("-1.62265") + max_val = float("1.34409") + mean = float("0.005687") + std = float("0.403979") + data = None + + +class Program_weight_tensor_parameter_169: + name = "parameter_169" + shape = [192, 192, 3, 3] + dtype = "float32" + min_val = float("-0.235146") + max_val = float("0.2559") + mean = float("-0.000272163") + std = float("0.0222664") + data = None + + +class Program_weight_tensor_parameter_170: + name = "parameter_170" + shape = [192] + dtype = "float32" + min_val = float("-0.199795") + max_val = float("0.0207518") + mean = float("-0.0665751") + std = float("0.0322603") + data = None + + +class Program_weight_tensor_parameter_171: + name = "parameter_171" + shape = [192] + dtype = "float32" + min_val = float("0.838902") + max_val = float("1.28467") + mean = float("1.01429") + std = float("0.0597393") + data = None + + +class Program_weight_tensor_parameter_172: + name = "parameter_172" + shape = [192] + dtype = "float32" + min_val = float("0.0720484") + max_val = float("1139.6") + mean = float("65.0503") + std = float("132.66") + data = None + + +class Program_weight_tensor_parameter_173: + name = "parameter_173" + shape = [192] + dtype = "float32" + min_val = float("-7.56675") + max_val = float("2.4842") + mean = float("-0.383258") + std = float("1.53872") + data = None + + +class Program_weight_tensor_parameter_174: + name = "parameter_174" + shape = [192, 576, 1, 1] + dtype = "float32" + min_val = float("-0.570014") + max_val = float("1.07892") + mean = float("-0.000455192") + std = float("0.0620441") + data = None + + +class Program_weight_tensor_parameter_175: + name = "parameter_175" + shape = [192] + dtype = "float32" + min_val = float("-0.104993") + max_val = float("0.0307948") + mean = float("-0.0245968") + std = float("0.024377") + data = None + + +class Program_weight_tensor_parameter_176: + name = "parameter_176" + shape = [192] + dtype = "float32" + min_val = float("0.822217") + max_val = float("1.21452") + mean = float("1.00159") + std = float("0.0456234") + data = None + + +class Program_weight_tensor_parameter_177: + name = "parameter_177" + shape = [192] + dtype = "float32" + min_val = float("0.108174") + max_val = float("1899.67") + mean = float("46.8811") + std = float("186.384") + data = None + + +class Program_weight_tensor_parameter_178: + name = "parameter_178" + shape = [192] + dtype = "float32" + min_val = float("-6.03851") + max_val = float("2.66203") + mean = float("0.0745216") + std = float("0.996719") + data = None + + +class Program_weight_tensor_parameter_179: + name = "parameter_179" + shape = [192, 576, 1, 1] + dtype = "float32" + min_val = float("-0.874328") + max_val = float("0.287848") + mean = float("-0.00271476") + std = float("0.0440209") + data = None + + +class Program_weight_tensor_parameter_180: + name = "parameter_180" + shape = [192] + dtype = "float32" + min_val = float("-0.168512") + max_val = float("0.00330333") + mean = float("-0.0443386") + std = float("0.0224484") + data = None + + +class Program_weight_tensor_parameter_181: + name = "parameter_181" + shape = [192] + dtype = "float32" + min_val = float("0.849694") + max_val = float("1.19077") + mean = float("0.991767") + std = float("0.0408626") + data = None + + +class Program_weight_tensor_parameter_182: + name = "parameter_182" + shape = [192] + dtype = "float32" + min_val = float("6.16524") + max_val = float("124201.0") + mean = float("2816.92") + std = float("9762.79") + data = None + + +class Program_weight_tensor_parameter_183: + name = "parameter_183" + shape = [192] + dtype = "float32" + min_val = float("-516.507") + max_val = float("458.567") + mean = float("-22.2018") + std = float("117.069") + data = None + + +class Program_weight_tensor_parameter_184: + name = "parameter_184" + shape = [192, 192, 3, 3] + dtype = "float32" + min_val = float("-0.399414") + max_val = float("0.235883") + mean = float("-0.0021386") + std = float("0.0211608") + data = None + + +class Program_weight_tensor_parameter_185: + name = "parameter_185" + shape = [192] + dtype = "float32" + min_val = float("-85.128") + max_val = float("88.3635") + mean = float("3.58989") + std = float("27.7568") + data = None + + +class Program_weight_tensor_parameter_186: + name = "parameter_186" + shape = [192] + dtype = "float32" + min_val = float("-31.9346") + max_val = float("19.7089") + mean = float("0.428102") + std = float("7.9625") + data = None + + +class Program_weight_tensor_parameter_187: + name = "parameter_187" + shape = [192] + dtype = "float32" + min_val = float("4.93657") + max_val = float("817684.0") + mean = float("36364.9") + std = float("89921.0") + data = None + + +class Program_weight_tensor_parameter_188: + name = "parameter_188" + shape = [192] + dtype = "float32" + min_val = float("-162.528") + max_val = float("314.565") + mean = float("4.32602") + std = float("60.1911") + data = None + + +class Program_weight_tensor_parameter_189: + name = "parameter_189" + shape = [192, 192, 1, 1] + dtype = "float32" + min_val = float("-27.6825") + max_val = float("13.0687") + mean = float("-0.022105") + std = float("1.26716") + data = None + + +class Program_weight_tensor_parameter_190: + name = "parameter_190" + shape = [96] + dtype = "float32" + min_val = float("-7.24863") + max_val = float("5.05502") + mean = float("-0.962893") + std = float("2.20937") + data = None + + +class Program_weight_tensor_parameter_191: + name = "parameter_191" + shape = [96] + dtype = "float32" + min_val = float("-3.53429") + max_val = float("3.92679") + mean = float("0.710033") + std = float("1.35103") + data = None + + +class Program_weight_tensor_parameter_192: + name = "parameter_192" + shape = [96] + dtype = "float32" + min_val = float("0.0751746") + max_val = float("5214.13") + mean = float("468.433") + std = float("1111.49") + data = None + + +class Program_weight_tensor_parameter_193: + name = "parameter_193" + shape = [96] + dtype = "float32" + min_val = float("-45.3576") + max_val = float("94.342") + mean = float("6.78995") + std = float("24.5091") + data = None + + +class Program_weight_tensor_parameter_194: + name = "parameter_194" + shape = [96, 96, 1, 1] + dtype = "float32" + min_val = float("-19.379") + max_val = float("5.94629") + mean = float("0.0933808") + std = float("0.838543") + data = None + + +class Program_weight_tensor_parameter_195: + name = "parameter_195" + shape = [96] + dtype = "float32" + min_val = float("-7.24863") + max_val = float("5.05502") + mean = float("-0.962893") + std = float("2.20937") + data = None + + +class Program_weight_tensor_parameter_196: + name = "parameter_196" + shape = [96] + dtype = "float32" + min_val = float("-5.32819") + max_val = float("4.14921") + mean = float("0.636519") + std = float("1.50817") + data = None + + +class Program_weight_tensor_parameter_197: + name = "parameter_197" + shape = [96] + dtype = "float32" + min_val = float("1.04384") + max_val = float("59220.1") + mean = float("2569.29") + std = float("8056.44") + data = None + + +class Program_weight_tensor_parameter_198: + name = "parameter_198" + shape = [96] + dtype = "float32" + min_val = float("-202.998") + max_val = float("339.698") + mean = float("7.6026") + std = float("67.6599") + data = None + + +class Program_weight_tensor_parameter_199: + name = "parameter_199" + shape = [96, 96, 3, 3] + dtype = "float32" + min_val = float("-7.61018") + max_val = float("2.06596") + mean = float("0.00408673") + std = float("0.270734") + data = None + + +class Program_weight_tensor_parameter_200: + name = "parameter_200" + shape = [96] + dtype = "float32" + min_val = float("-12.7487") + max_val = float("6.04207") + mean = float("-0.970305") + std = float("3.03245") + data = None + + +class Program_weight_tensor_parameter_201: + name = "parameter_201" + shape = [96] + dtype = "float32" + min_val = float("-6.54554") + max_val = float("4.74788") + mean = float("0.995298") + std = float("1.40525") + data = None + + +class Program_weight_tensor_parameter_202: + name = "parameter_202" + shape = [96] + dtype = "float32" + min_val = float("9.39318") + max_val = float("2190150.0") + mean = float("66154.5") + std = float("240596.0") + data = None + + +class Program_weight_tensor_parameter_203: + name = "parameter_203" + shape = [96] + dtype = "float32" + min_val = float("-80.81") + max_val = float("224.697") + mean = float("9.21888") + std = float("37.6238") + data = None + + +class Program_weight_tensor_parameter_204: + name = "parameter_204" + shape = [96, 96, 3, 3] + dtype = "float32" + min_val = float("-3.09795") + max_val = float("6.23125") + mean = float("0.039322") + std = float("0.339853") + data = None + + +class Program_weight_tensor_parameter_205: + name = "parameter_205" + shape = [96] + dtype = "float32" + min_val = float("-6.57041") + max_val = float("1.23513") + mean = float("-1.08467") + std = float("1.31742") + data = None + + +class Program_weight_tensor_parameter_206: + name = "parameter_206" + shape = [96] + dtype = "float32" + min_val = float("-4.57105") + max_val = float("7.43697") + mean = float("0.761073") + std = float("1.58355") + data = None + + +class Program_weight_tensor_parameter_207: + name = "parameter_207" + shape = [96] + dtype = "float32" + min_val = float("0.122882") + max_val = float("5384610.0") + mean = float("110211.0") + std = float("572085.0") + data = None + + +class Program_weight_tensor_parameter_208: + name = "parameter_208" + shape = [96] + dtype = "float32" + min_val = float("-48.2301") + max_val = float("166.551") + mean = float("6.24412") + std = float("24.2542") + data = None + + +class Program_weight_tensor_parameter_209: + name = "parameter_209" + shape = [96, 96, 1, 1] + dtype = "float32" + min_val = float("-6.4048") + max_val = float("25.2239") + mean = float("0.251097") + std = float("1.3616") + data = None + + +class Program_weight_tensor_parameter_210: + name = "parameter_210" + shape = [96] + dtype = "float32" + min_val = float("-6.57041") + max_val = float("1.23513") + mean = float("-1.08467") + std = float("1.31742") + data = None + + +class Program_weight_tensor_parameter_211: + name = "parameter_211" + shape = [96] + dtype = "float32" + min_val = float("-4.84455") + max_val = float("4.21014") + mean = float("0.997804") + std = float("1.25033") + data = None + + +class Program_weight_tensor_parameter_212: + name = "parameter_212" + shape = [96] + dtype = "float32" + min_val = float("8.7857") + max_val = float("11788800.0") + mean = float("727757.0") + std = float("1936730.0") + data = None + + +class Program_weight_tensor_parameter_213: + name = "parameter_213" + shape = [96] + dtype = "float32" + min_val = float("-305.931") + max_val = float("314.393") + mean = float("20.0074") + std = float("74.2719") + data = None + + +class Program_weight_tensor_parameter_214: + name = "parameter_214" + shape = [96, 96, 3, 3] + dtype = "float32" + min_val = float("-4.16137") + max_val = float("5.97165") + mean = float("0.112215") + std = float("0.461145") + data = None + + +class Program_weight_tensor_parameter_215: + name = "parameter_215" + shape = [96] + dtype = "float32" + min_val = float("-6.38172") + max_val = float("0.373498") + mean = float("-1.33479") + std = float("1.05978") + data = None + + +class Program_weight_tensor_parameter_216: + name = "parameter_216" + shape = [96] + dtype = "float32" + min_val = float("-5.79719") + max_val = float("12.1783") + mean = float("0.960027") + std = float("3.2153") + data = None + + +class Program_weight_tensor_parameter_217: + name = "parameter_217" + shape = [96] + dtype = "float32" + min_val = float("45.1963") + max_val = float("16654400.0") + mean = float("673305.0") + std = float("2003700.0") + data = None + + +class Program_weight_tensor_parameter_218: + name = "parameter_218" + shape = [96] + dtype = "float32" + min_val = float("-355.958") + max_val = float("408.085") + mean = float("8.33521") + std = float("84.948") + data = None + + +class Program_weight_tensor_parameter_219: + name = "parameter_219" + shape = [96, 96, 3, 3] + dtype = "float32" + min_val = float("-5.59114") + max_val = float("6.22526") + mean = float("0.0518284") + std = float("0.488386") + data = None + + +class Program_weight_tensor_parameter_220: + name = "parameter_220" + shape = [96] + dtype = "float32" + min_val = float("-6.40528") + max_val = float("0.439558") + mean = float("-1.04892") + std = float("1.44668") + data = None + + +class Program_weight_tensor_parameter_221: + name = "parameter_221" + shape = [96] + dtype = "float32" + min_val = float("-7.47233") + max_val = float("12.2951") + mean = float("0.896125") + std = float("2.48316") + data = None + + +class Program_weight_tensor_parameter_222: + name = "parameter_222" + shape = [96] + dtype = "float32" + min_val = float("0.227469") + max_val = float("674599.0") + mean = float("19715.9") + std = float("80090.6") + data = None + + +class Program_weight_tensor_parameter_223: + name = "parameter_223" + shape = [96] + dtype = "float32" + min_val = float("-91.0419") + max_val = float("72.2002") + mean = float("2.37886") + std = float("17.3356") + data = None + + +class Program_weight_tensor_parameter_224: + name = "parameter_224" + shape = [96, 96, 1, 1] + dtype = "float32" + min_val = float("-11.5461") + max_val = float("22.862") + mean = float("0.0613126") + std = float("1.12064") + data = None + + +class Program_weight_tensor_parameter_225: + name = "parameter_225" + shape = [96] + dtype = "float32" + min_val = float("-6.40528") + max_val = float("0.439558") + mean = float("-1.04892") + std = float("1.44668") + data = None + + +class Program_weight_tensor_parameter_226: + name = "parameter_226" + shape = [96] + dtype = "float32" + min_val = float("-5.55409") + max_val = float("8.57283") + mean = float("0.767581") + std = float("1.91915") + data = None + + +class Program_weight_tensor_parameter_227: + name = "parameter_227" + shape = [96] + dtype = "float32" + min_val = float("36.8885") + max_val = float("1641000.0") + mean = float("154074.0") + std = float("333545.0") + data = None + + +class Program_weight_tensor_parameter_228: + name = "parameter_228" + shape = [96] + dtype = "float32" + min_val = float("-148.097") + max_val = float("151.343") + mean = float("8.60596") + std = float("46.4808") + data = None + + +class Program_weight_tensor_parameter_229: + name = "parameter_229" + shape = [96, 96, 3, 3] + dtype = "float32" + min_val = float("-6.37265") + max_val = float("6.48435") + mean = float("0.0246406") + std = float("0.404267") + data = None + + +class Program_weight_tensor_parameter_230: + name = "parameter_230" + shape = [96] + dtype = "float32" + min_val = float("-12.1058") + max_val = float("2.65046") + mean = float("-1.35949") + std = float("2.31774") + data = None + + +class Program_weight_tensor_parameter_231: + name = "parameter_231" + shape = [96] + dtype = "float32" + min_val = float("-5.35437") + max_val = float("15.6996") + mean = float("0.620018") + std = float("3.80279") + data = None + + +class Program_weight_tensor_parameter_232: + name = "parameter_232" + shape = [96] + dtype = "float32" + min_val = float("119.771") + max_val = float("2429810.0") + mean = float("193542.0") + std = float("392172.0") + data = None + + +class Program_weight_tensor_parameter_233: + name = "parameter_233" + shape = [96] + dtype = "float32" + min_val = float("-183.549") + max_val = float("100.622") + mean = float("1.67747") + std = float("40.3903") + data = None + + +class Program_weight_tensor_parameter_234: + name = "parameter_234" + shape = [96, 96, 3, 3] + dtype = "float32" + min_val = float("-5.05043") + max_val = float("3.64647") + mean = float("-0.00681032") + std = float("0.486976") + data = None + + +class Program_weight_tensor_parameter_235: + name = "parameter_235" + shape = [96] + dtype = "float32" + min_val = float("-6.51727") + max_val = float("4.07312") + mean = float("-0.742646") + std = float("1.54691") + data = None + + +class Program_weight_tensor_parameter_236: + name = "parameter_236" + shape = [96] + dtype = "float32" + min_val = float("-17.2429") + max_val = float("16.1753") + mean = float("0.496246") + std = float("4.30544") + data = None + + +class Program_weight_tensor_parameter_237: + name = "parameter_237" + shape = [96] + dtype = "float32" + min_val = float("33.613") + max_val = float("328553.0") + mean = float("25107.8") + std = float("56405.7") + data = None + + +class Program_weight_tensor_parameter_238: + name = "parameter_238" + shape = [96] + dtype = "float32" + min_val = float("-61.9912") + max_val = float("47.6969") + mean = float("-1.08568") + std = float("14.1662") + data = None + + +class Program_weight_tensor_parameter_239: + name = "parameter_239" + shape = [96, 448, 1, 1] + dtype = "float32" + min_val = float("-22.5977") + max_val = float("9.97906") + mean = float("0.00384249") + std = float("1.00918") + data = None + + +class Program_weight_tensor_parameter_240: + name = "parameter_240" + shape = [96] + dtype = "float32" + min_val = float("-2.39003") + max_val = float("1.00251") + mean = float("-0.133253") + std = float("0.503822") + data = None + + +class Program_weight_tensor_parameter_241: + name = "parameter_241" + shape = [96] + dtype = "float32" + min_val = float("-4.56305") + max_val = float("6.14021") + mean = float("1.38317") + std = float("1.84076") + data = None + + +class Program_weight_tensor_parameter_242: + name = "parameter_242" + shape = [96] + dtype = "float32" + min_val = float("5.07934") + max_val = float("29574.8") + mean = float("1194.75") + std = float("3229.19") + data = None + + +class Program_weight_tensor_parameter_243: + name = "parameter_243" + shape = [96] + dtype = "float32" + min_val = float("-4.92322") + max_val = float("20.0659") + mean = float("1.71861") + std = float("4.04769") + data = None + + +class Program_weight_tensor_parameter_244: + name = "parameter_244" + shape = [96, 448, 1, 1] + dtype = "float32" + min_val = float("-3.49471") + max_val = float("1.20362") + mean = float("0.00515059") + std = float("0.189079") + data = None + + +class Program_weight_tensor_parameter_245: + name = "parameter_245" + shape = [192] + dtype = "float32" + min_val = float("-6.61152") + max_val = float("1.7835") + mean = float("-0.419521") + std = float("1.07353") + data = None + + +class Program_weight_tensor_parameter_246: + name = "parameter_246" + shape = [192] + dtype = "float32" + min_val = float("-14.4931") + max_val = float("16.6779") + mean = float("0.0422574") + std = float("2.46703") + data = None + + +class Program_weight_tensor_parameter_247: + name = "parameter_247" + shape = [192] + dtype = "float32" + min_val = float("0.28209") + max_val = float("76246.9") + mean = float("6772.73") + std = float("14681.3") + data = None + + +class Program_weight_tensor_parameter_248: + name = "parameter_248" + shape = [192] + dtype = "float32" + min_val = float("-31.4966") + max_val = float("41.7212") + mean = float("1.71254") + std = float("7.38567") + data = None + + +class Program_weight_tensor_parameter_249: + name = "parameter_249" + shape = [192, 384, 1, 1] + dtype = "float32" + min_val = float("-3.49758") + max_val = float("2.55718") + mean = float("0.0265243") + std = float("0.348243") + data = None + + +class Program_weight_tensor_parameter_250: + name = "parameter_250" + shape = [384] + dtype = "float32" + min_val = float("-1.87838") + max_val = float("1.09629") + mean = float("-0.183799") + std = float("0.345718") + data = None + + +class Program_weight_tensor_parameter_251: + name = "parameter_251" + shape = [384] + dtype = "float32" + min_val = float("-3.0247") + max_val = float("2.74203") + mean = float("0.964417") + std = float("0.845217") + data = None + + +class Program_weight_tensor_parameter_252: + name = "parameter_252" + shape = [384] + dtype = "float32" + min_val = float("0.258076") + max_val = float("117117.0") + mean = float("2098.36") + std = float("9309.06") + data = None + + +class Program_weight_tensor_parameter_253: + name = "parameter_253" + shape = [384] + dtype = "float32" + min_val = float("-37.8342") + max_val = float("38.526") + mean = float("2.07375") + std = float("5.93701") + data = None + + +class Program_weight_tensor_parameter_254: + name = "parameter_254" + shape = [384, 384, 1, 1] + dtype = "float32" + min_val = float("-3.75963") + max_val = float("3.45352") + mean = float("-0.00284906") + std = float("0.205711") + data = None + + +class Program_weight_tensor_parameter_255: + name = "parameter_255" + shape = [192] + dtype = "float32" + min_val = float("-0.377474") + max_val = float("0.270252") + mean = float("-0.09324") + std = float("0.113903") + data = None + + +class Program_weight_tensor_parameter_256: + name = "parameter_256" + shape = [192] + dtype = "float32" + min_val = float("0.589898") + max_val = float("2.19572") + mean = float("1.10833") + std = float("0.298081") + data = None + + +class Program_weight_tensor_parameter_257: + name = "parameter_257" + shape = [192] + dtype = "float32" + min_val = float("0.0224855") + max_val = float("6349.83") + mean = float("93.5002") + std = float("543.789") + data = None + + +class Program_weight_tensor_parameter_258: + name = "parameter_258" + shape = [192] + dtype = "float32" + min_val = float("-0.711802") + max_val = float("1.86246") + mean = float("0.0422734") + std = float("0.271396") + data = None + + +class Program_weight_tensor_parameter_259: + name = "parameter_259" + shape = [192, 192, 1, 1] + dtype = "float32" + min_val = float("-0.513946") + max_val = float("1.90858") + mean = float("0.0211941") + std = float("0.126887") + data = None + + +class Program_weight_tensor_parameter_260: + name = "parameter_260" + shape = [192] + dtype = "float32" + min_val = float("-0.377474") + max_val = float("0.270252") + mean = float("-0.09324") + std = float("0.113903") + data = None + + +class Program_weight_tensor_parameter_261: + name = "parameter_261" + shape = [192] + dtype = "float32" + min_val = float("0.664841") + max_val = float("2.1235") + mean = float("1.14039") + std = float("0.294828") + data = None + + +class Program_weight_tensor_parameter_262: + name = "parameter_262" + shape = [192] + dtype = "float32" + min_val = float("0.447646") + max_val = float("9622.59") + mean = float("555.521") + std = float("1466.93") + data = None + + +class Program_weight_tensor_parameter_263: + name = "parameter_263" + shape = [192] + dtype = "float32" + min_val = float("-1.42131") + max_val = float("4.63902") + mean = float("0.15401") + std = float("0.797684") + data = None + + +class Program_weight_tensor_parameter_264: + name = "parameter_264" + shape = [192, 192, 3, 3] + dtype = "float32" + min_val = float("-0.200786") + max_val = float("0.349522") + mean = float("0.00864049") + std = float("0.0419031") + data = None + + +class Program_weight_tensor_parameter_265: + name = "parameter_265" + shape = [192] + dtype = "float32" + min_val = float("-0.376564") + max_val = float("0.0843041") + mean = float("-0.165316") + std = float("0.0820009") + data = None + + +class Program_weight_tensor_parameter_266: + name = "parameter_266" + shape = [192] + dtype = "float32" + min_val = float("0.618528") + max_val = float("1.87359") + mean = float("1.00495") + std = float("0.220123") + data = None + + +class Program_weight_tensor_parameter_267: + name = "parameter_267" + shape = [192] + dtype = "float32" + min_val = float("4.40675") + max_val = float("11495.1") + mean = float("1075.25") + std = float("1952.54") + data = None + + +class Program_weight_tensor_parameter_268: + name = "parameter_268" + shape = [192] + dtype = "float32" + min_val = float("-7.62537") + max_val = float("8.84763") + mean = float("0.605399") + std = float("2.44971") + data = None + + +class Program_weight_tensor_parameter_269: + name = "parameter_269" + shape = [192, 192, 3, 3] + dtype = "float32" + min_val = float("-0.610252") + max_val = float("0.46974") + mean = float("0.00434873") + std = float("0.0378261") + data = None + + +class Program_weight_tensor_parameter_270: + name = "parameter_270" + shape = [192] + dtype = "float32" + min_val = float("-0.396329") + max_val = float("0.0895731") + mean = float("-0.135227") + std = float("0.0909936") + data = None + + +class Program_weight_tensor_parameter_271: + name = "parameter_271" + shape = [192] + dtype = "float32" + min_val = float("0.724836") + max_val = float("1.31254") + mean = float("0.989403") + std = float("0.0899495") + data = None + + +class Program_weight_tensor_parameter_272: + name = "parameter_272" + shape = [192] + dtype = "float32" + min_val = float("0.0366499") + max_val = float("2579.48") + mean = float("94.136") + std = float("297.849") + data = None + + +class Program_weight_tensor_parameter_273: + name = "parameter_273" + shape = [192] + dtype = "float32" + min_val = float("-1.21638") + max_val = float("3.00263") + mean = float("0.19362") + std = float("0.597692") + data = None + + +class Program_weight_tensor_parameter_274: + name = "parameter_274" + shape = [192, 192, 1, 1] + dtype = "float32" + min_val = float("-1.50522") + max_val = float("2.56014") + mean = float("0.0167357") + std = float("0.128452") + data = None + + +class Program_weight_tensor_parameter_275: + name = "parameter_275" + shape = [192] + dtype = "float32" + min_val = float("-0.396329") + max_val = float("0.0895731") + mean = float("-0.135227") + std = float("0.0909936") + data = None + + +class Program_weight_tensor_parameter_276: + name = "parameter_276" + shape = [192] + dtype = "float32" + min_val = float("0.702365") + max_val = float("1.48726") + mean = float("0.978343") + std = float("0.0932404") + data = None + + +class Program_weight_tensor_parameter_277: + name = "parameter_277" + shape = [192] + dtype = "float32" + min_val = float("0.341776") + max_val = float("10383.4") + mean = float("546.718") + std = float("1146.4") + data = None + + +class Program_weight_tensor_parameter_278: + name = "parameter_278" + shape = [192] + dtype = "float32" + min_val = float("-4.04849") + max_val = float("7.68697") + mean = float("0.626776") + std = float("1.72632") + data = None + + +class Program_weight_tensor_parameter_279: + name = "parameter_279" + shape = [192, 192, 3, 3] + dtype = "float32" + min_val = float("-0.496227") + max_val = float("0.685374") + mean = float("0.00417206") + std = float("0.0441635") + data = None + + +class Program_weight_tensor_parameter_280: + name = "parameter_280" + shape = [192] + dtype = "float32" + min_val = float("-1.64459") + max_val = float("0.267005") + mean = float("-0.224858") + std = float("0.296988") + data = None + + +class Program_weight_tensor_parameter_281: + name = "parameter_281" + shape = [192] + dtype = "float32" + min_val = float("-1.27679") + max_val = float("2.832") + mean = float("1.02513") + std = float("0.465622") + data = None + + +class Program_weight_tensor_parameter_282: + name = "parameter_282" + shape = [192] + dtype = "float32" + min_val = float("5.86811") + max_val = float("186650.0") + mean = float("7734.42") + std = float("21154.4") + data = None + + +class Program_weight_tensor_parameter_283: + name = "parameter_283" + shape = [192] + dtype = "float32" + min_val = float("-13.8475") + max_val = float("24.3198") + mean = float("1.04016") + std = float("5.19036") + data = None + + +class Program_weight_tensor_parameter_284: + name = "parameter_284" + shape = [192, 192, 3, 3] + dtype = "float32" + min_val = float("-2.35083") + max_val = float("1.83667") + mean = float("-0.000570115") + std = float("0.103887") + data = None + + +class Program_weight_tensor_parameter_285: + name = "parameter_285" + shape = [192] + dtype = "float32" + min_val = float("-1.03838") + max_val = float("0.354784") + mean = float("-0.15507") + std = float("0.21939") + data = None + + +class Program_weight_tensor_parameter_286: + name = "parameter_286" + shape = [192] + dtype = "float32" + min_val = float("-1.45219") + max_val = float("2.97832") + mean = float("0.989616") + std = float("0.41137") + data = None + + +class Program_weight_tensor_parameter_287: + name = "parameter_287" + shape = [192] + dtype = "float32" + min_val = float("0.0529003") + max_val = float("11535.5") + mean = float("313.393") + std = float("1268.61") + data = None + + +class Program_weight_tensor_parameter_288: + name = "parameter_288" + shape = [192] + dtype = "float32" + min_val = float("-5.17365") + max_val = float("2.80353") + mean = float("-0.0123197") + std = float("0.835049") + data = None + + +class Program_weight_tensor_parameter_289: + name = "parameter_289" + shape = [192, 192, 1, 1] + dtype = "float32" + min_val = float("-4.9408") + max_val = float("2.27411") + mean = float("0.000442447") + std = float("0.213835") + data = None + + +class Program_weight_tensor_parameter_290: + name = "parameter_290" + shape = [192] + dtype = "float32" + min_val = float("-1.03838") + max_val = float("0.354784") + mean = float("-0.15507") + std = float("0.21939") + data = None + + +class Program_weight_tensor_parameter_291: + name = "parameter_291" + shape = [192] + dtype = "float32" + min_val = float("-1.73076") + max_val = float("4.22948") + mean = float("0.992476") + std = float("0.513532") + data = None + + +class Program_weight_tensor_parameter_292: + name = "parameter_292" + shape = [192] + dtype = "float32" + min_val = float("1.28858") + max_val = float("72884.6") + mean = float("1632.57") + std = float("6195.79") + data = None + + +class Program_weight_tensor_parameter_293: + name = "parameter_293" + shape = [192] + dtype = "float32" + min_val = float("-13.4733") + max_val = float("7.33824") + mean = float("0.00635399") + std = float("2.2987") + data = None + + +class Program_weight_tensor_parameter_294: + name = "parameter_294" + shape = [192, 192, 3, 3] + dtype = "float32" + min_val = float("-2.75576") + max_val = float("1.65718") + mean = float("0.00141057") + std = float("0.0847456") + data = None + + +class Program_weight_tensor_parameter_295: + name = "parameter_295" + shape = [192] + dtype = "float32" + min_val = float("-1.54041") + max_val = float("1.11627") + mean = float("-0.2651") + std = float("0.234523") + data = None + + +class Program_weight_tensor_parameter_296: + name = "parameter_296" + shape = [192] + dtype = "float32" + min_val = float("-1.63593") + max_val = float("4.58953") + mean = float("1.00964") + std = float("0.703509") + data = None + + +class Program_weight_tensor_parameter_297: + name = "parameter_297" + shape = [192] + dtype = "float32" + min_val = float("2.05421") + max_val = float("104887.0") + mean = float("4244.7") + std = float("11719.7") + data = None + + +class Program_weight_tensor_parameter_298: + name = "parameter_298" + shape = [192] + dtype = "float32" + min_val = float("-25.8375") + max_val = float("52.7078") + mean = float("1.02666") + std = float("7.5325") + data = None + + +class Program_weight_tensor_parameter_299: + name = "parameter_299" + shape = [192, 192, 3, 3] + dtype = "float32" + min_val = float("-2.79383") + max_val = float("1.88844") + mean = float("-0.00365623") + std = float("0.104147") + data = None + + +class Program_weight_tensor_parameter_300: + name = "parameter_300" + shape = [192] + dtype = "float32" + min_val = float("-0.871095") + max_val = float("1.01126") + mean = float("-0.217258") + std = float("0.291713") + data = None + + +class Program_weight_tensor_parameter_301: + name = "parameter_301" + shape = [192] + dtype = "float32" + min_val = float("-1.96925") + max_val = float("3.64939") + mean = float("1.00405") + std = float("0.722218") + data = None + + +class Program_weight_tensor_parameter_302: + name = "parameter_302" + shape = [192] + dtype = "float32" + min_val = float("0.620813") + max_val = float("27316.4") + mean = float("1238.85") + std = float("3200.83") + data = None + + +class Program_weight_tensor_parameter_303: + name = "parameter_303" + shape = [192] + dtype = "float32" + min_val = float("-12.1711") + max_val = float("17.836") + mean = float("0.562202") + std = float("3.62838") + data = None + + +class Program_weight_tensor_parameter_304: + name = "parameter_304" + shape = [192, 896, 1, 1] + dtype = "float32" + min_val = float("-3.17099") + max_val = float("1.29685") + mean = float("-0.0023653") + std = float("0.108022") + data = None + + +class Program_weight_tensor_parameter_305: + name = "parameter_305" + shape = [192] + dtype = "float32" + min_val = float("-1.10782") + max_val = float("0.300641") + mean = float("-0.173971") + std = float("0.264032") + data = None + + +class Program_weight_tensor_parameter_306: + name = "parameter_306" + shape = [192] + dtype = "float32" + min_val = float("-1.04342") + max_val = float("2.59244") + mean = float("0.671735") + std = float("0.577557") + data = None + + +class Program_weight_tensor_parameter_307: + name = "parameter_307" + shape = [192] + dtype = "float32" + min_val = float("0.398919") + max_val = float("62548.8") + mean = float("1538.4") + std = float("5143.75") + data = None + + +class Program_weight_tensor_parameter_308: + name = "parameter_308" + shape = [192] + dtype = "float32" + min_val = float("-16.9307") + max_val = float("5.62477") + mean = float("-1.1326") + std = float("2.74663") + data = None + + +class Program_weight_tensor_parameter_309: + name = "parameter_309" + shape = [192, 896, 1, 1] + dtype = "float32" + min_val = float("-1.82279") + max_val = float("2.47484") + mean = float("0.00901672") + std = float("0.107436") + data = None + + +class Program_weight_tensor_parameter_310: + name = "parameter_310" + shape = [384] + dtype = "float32" + min_val = float("-1.13258") + max_val = float("0.552573") + mean = float("-0.197087") + std = float("0.242407") + data = None + + +class Program_weight_tensor_parameter_311: + name = "parameter_311" + shape = [384] + dtype = "float32" + min_val = float("-2.53691") + max_val = float("1.43108") + mean = float("0.682152") + std = float("0.594817") + data = None + + +class Program_weight_tensor_parameter_312: + name = "parameter_312" + shape = [384] + dtype = "float32" + min_val = float("0.191916") + max_val = float("2843.48") + mean = float("114.134") + std = float("305.508") + data = None + + +class Program_weight_tensor_parameter_313: + name = "parameter_313" + shape = [384] + dtype = "float32" + min_val = float("-24.6995") + max_val = float("10.5962") + mean = float("-0.476045") + std = float("2.8163") + data = None + + +class Program_weight_tensor_parameter_314: + name = "parameter_314" + shape = [384, 768, 1, 1] + dtype = "float32" + min_val = float("-1.24276") + max_val = float("0.692904") + mean = float("-0.00886373") + std = float("0.0955856") + data = None + + +class Program_weight_tensor_parameter_315: + name = "parameter_315" + shape = [768] + dtype = "float32" + min_val = float("-0.552757") + max_val = float("0.267353") + mean = float("-0.0446819") + std = float("0.0843754") + data = None + + +class Program_weight_tensor_parameter_316: + name = "parameter_316" + shape = [768] + dtype = "float32" + min_val = float("-0.652821") + max_val = float("1.76439") + mean = float("1.00747") + std = float("0.251713") + data = None + + +class Program_weight_tensor_parameter_317: + name = "parameter_317" + shape = [768] + dtype = "float32" + min_val = float("0.776778") + max_val = float("7746.52") + mean = float("431.196") + std = float("833.862") + data = None + + +class Program_weight_tensor_parameter_318: + name = "parameter_318" + shape = [768] + dtype = "float32" + min_val = float("-41.0995") + max_val = float("25.7342") + mean = float("3.18905") + std = float("8.42379") + data = None + + +class Program_weight_tensor_parameter_319: + name = "parameter_319" + shape = [768, 768, 1, 1] + dtype = "float32" + min_val = float("-1.4092") + max_val = float("1.23702") + mean = float("-0.000496878") + std = float("0.0816775") + data = None + + +class Program_weight_tensor_parameter_320: + name = "parameter_320" + shape = [384] + dtype = "float32" + min_val = float("-0.644666") + max_val = float("0.835279") + mean = float("0.00588657") + std = float("0.183308") + data = None + + +class Program_weight_tensor_parameter_321: + name = "parameter_321" + shape = [384] + dtype = "float32" + min_val = float("0.330389") + max_val = float("1.98606") + mean = float("1.01943") + std = float("0.246731") + data = None + + +class Program_weight_tensor_parameter_322: + name = "parameter_322" + shape = [384] + dtype = "float32" + min_val = float("0.17996") + max_val = float("722.077") + mean = float("49.876") + std = float("90.3738") + data = None + + +class Program_weight_tensor_parameter_323: + name = "parameter_323" + shape = [384] + dtype = "float32" + min_val = float("-8.71472") + max_val = float("7.07804") + mean = float("-0.0401495") + std = float("2.17987") + data = None + + +class Program_weight_tensor_parameter_324: + name = "parameter_324" + shape = [384, 384, 1, 1] + dtype = "float32" + min_val = float("-0.945908") + max_val = float("0.673776") + mean = float("-0.0019768") + std = float("0.0900507") + data = None + + +class Program_weight_tensor_parameter_325: + name = "parameter_325" + shape = [384] + dtype = "float32" + min_val = float("-0.644666") + max_val = float("0.835279") + mean = float("0.00588657") + std = float("0.183308") + data = None + + +class Program_weight_tensor_parameter_326: + name = "parameter_326" + shape = [384] + dtype = "float32" + min_val = float("0.369886") + max_val = float("1.99554") + mean = float("1.01704") + std = float("0.242599") + data = None + + +class Program_weight_tensor_parameter_327: + name = "parameter_327" + shape = [384] + dtype = "float32" + min_val = float("1.10774") + max_val = float("5474.29") + mean = float("285.755") + std = float("535.771") + data = None + + +class Program_weight_tensor_parameter_328: + name = "parameter_328" + shape = [384] + dtype = "float32" + min_val = float("-18.3584") + max_val = float("28.6474") + mean = float("0.580143") + std = float("5.75435") + data = None + + +class Program_weight_tensor_parameter_329: + name = "parameter_329" + shape = [384, 384, 3, 3] + dtype = "float32" + min_val = float("-0.300868") + max_val = float("0.28201") + mean = float("0.0005649") + std = float("0.0293953") + data = None + + +class Program_weight_tensor_parameter_330: + name = "parameter_330" + shape = [384] + dtype = "float32" + min_val = float("-0.428626") + max_val = float("0.483915") + mean = float("0.00707735") + std = float("0.142579") + data = None + + +class Program_weight_tensor_parameter_331: + name = "parameter_331" + shape = [384] + dtype = "float32" + min_val = float("0.0548827") + max_val = float("2.17216") + mean = float("1.02118") + std = float("0.25626") + data = None + + +class Program_weight_tensor_parameter_332: + name = "parameter_332" + shape = [384] + dtype = "float32" + min_val = float("5.82237") + max_val = float("71105.7") + mean = float("966.456") + std = float("3856.37") + data = None + + +class Program_weight_tensor_parameter_333: + name = "parameter_333" + shape = [384] + dtype = "float32" + min_val = float("-74.904") + max_val = float("45.7392") + mean = float("-4.55969") + std = float("15.2973") + data = None + + +class Program_weight_tensor_parameter_334: + name = "parameter_334" + shape = [384, 384, 3, 3] + dtype = "float32" + min_val = float("-1.28932") + max_val = float("0.64534") + mean = float("-0.00766119") + std = float("0.0588517") + data = None + + +class Program_weight_tensor_parameter_335: + name = "parameter_335" + shape = [384] + dtype = "float32" + min_val = float("-0.417859") + max_val = float("0.567516") + mean = float("0.0358606") + std = float("0.145139") + data = None + + +class Program_weight_tensor_parameter_336: + name = "parameter_336" + shape = [384] + dtype = "float32" + min_val = float("-0.692244") + max_val = float("2.91955") + mean = float("0.989476") + std = float("0.366097") + data = None + + +class Program_weight_tensor_parameter_337: + name = "parameter_337" + shape = [384] + dtype = "float32" + min_val = float("35.6891") + max_val = float("527628.0") + mean = float("15940.6") + std = float("54157.5") + data = None + + +class Program_weight_tensor_parameter_338: + name = "parameter_338" + shape = [384] + dtype = "float32" + min_val = float("-1304.56") + max_val = float("1018.05") + mean = float("11.2757") + std = float("221.509") + data = None + + +class Program_weight_tensor_parameter_339: + name = "parameter_339" + shape = [384, 1536, 1, 1] + dtype = "float32" + min_val = float("-1.70677") + max_val = float("0.933125") + mean = float("-0.00166836") + std = float("0.0885713") + data = None + + +class Program_weight_tensor_parameter_340: + name = "parameter_340" + shape = [384] + dtype = "float32" + min_val = float("-0.0873874") + max_val = float("0.120578") + mean = float("-0.000954519") + std = float("0.0228511") + data = None + + +class Program_weight_tensor_parameter_341: + name = "parameter_341" + shape = [384] + dtype = "float32" + min_val = float("0.237824") + max_val = float("1.58927") + mean = float("0.995856") + std = float("0.165535") + data = None + + +class Program_weight_tensor_parameter_342: + name = "parameter_342" + shape = [384] + dtype = "float32" + min_val = float("0.722547") + max_val = float("6460.57") + mean = float("77.691") + std = float("369.085") + data = None + + +class Program_weight_tensor_parameter_343: + name = "parameter_343" + shape = [384] + dtype = "float32" + min_val = float("-20.8843") + max_val = float("8.9376") + mean = float("-0.0632021") + std = float("2.59641") + data = None + + +class Program_weight_tensor_parameter_344: + name = "parameter_344" + shape = [384, 384, 1, 1] + dtype = "float32" + min_val = float("-1.57816") + max_val = float("0.862954") + mean = float("-0.00389417") + std = float("0.0817908") + data = None + + +class Program_weight_tensor_parameter_345: + name = "parameter_345" + shape = [384] + dtype = "float32" + min_val = float("-0.0873874") + max_val = float("0.120578") + mean = float("-0.000954519") + std = float("0.0228511") + data = None + + +class Program_weight_tensor_parameter_346: + name = "parameter_346" + shape = [384] + dtype = "float32" + min_val = float("-0.0396481") + max_val = float("1.62807") + mean = float("1.00169") + std = float("0.153603") + data = None + + +class Program_weight_tensor_parameter_347: + name = "parameter_347" + shape = [384] + dtype = "float32" + min_val = float("6.51221") + max_val = float("5904.92") + mean = float("267.165") + std = float("563.207") + data = None + + +class Program_weight_tensor_parameter_348: + name = "parameter_348" + shape = [384] + dtype = "float32" + min_val = float("-26.0803") + max_val = float("22.4756") + mean = float("-0.244403") + std = float("5.5182") + data = None + + +class Program_weight_tensor_parameter_349: + name = "parameter_349" + shape = [384, 384, 3, 3] + dtype = "float32" + min_val = float("-0.331772") + max_val = float("0.275082") + mean = float("-0.00114864") + std = float("0.0288421") + data = None + + +class Program_weight_tensor_parameter_350: + name = "parameter_350" + shape = [384] + dtype = "float32" + min_val = float("-0.169758") + max_val = float("0.387136") + mean = float("-0.0135191") + std = float("0.0872435") + data = None + + +class Program_weight_tensor_parameter_351: + name = "parameter_351" + shape = [384] + dtype = "float32" + min_val = float("0.530164") + max_val = float("1.95399") + mean = float("1.02728") + std = float("0.170907") + data = None + + +class Program_weight_tensor_parameter_352: + name = "parameter_352" + shape = [384] + dtype = "float32" + min_val = float("34.4183") + max_val = float("28418.4") + mean = float("1029.76") + std = float("2701.61") + data = None + + +class Program_weight_tensor_parameter_353: + name = "parameter_353" + shape = [384] + dtype = "float32" + min_val = float("-84.2497") + max_val = float("28.1431") + mean = float("1.92494") + std = float("14.5184") + data = None + + +class Program_weight_tensor_parameter_354: + name = "parameter_354" + shape = [384, 384, 3, 3] + dtype = "float32" + min_val = float("-0.509431") + max_val = float("0.23397") + mean = float("0.000289444") + std = float("0.0306037") + data = None + + +class Program_weight_tensor_parameter_355: + name = "parameter_355" + shape = [384] + dtype = "float32" + min_val = float("-0.17013") + max_val = float("0.363103") + mean = float("0.0145125") + std = float("0.0634369") + data = None + + +class Program_weight_tensor_parameter_356: + name = "parameter_356" + shape = [384] + dtype = "float32" + min_val = float("0.693201") + max_val = float("1.62355") + mean = float("1.01359") + std = float("0.102217") + data = None + + +class Program_weight_tensor_parameter_357: + name = "parameter_357" + shape = [384] + dtype = "float32" + min_val = float("0.931458") + max_val = float("373.701") + mean = float("14.1979") + std = float("26.7127") + data = None + + +class Program_weight_tensor_parameter_358: + name = "parameter_358" + shape = [384] + dtype = "float32" + min_val = float("-4.77194") + max_val = float("7.91629") + mean = float("0.274654") + std = float("1.32549") + data = None + + +class Program_weight_tensor_parameter_359: + name = "parameter_359" + shape = [384, 384, 1, 1] + dtype = "float32" + min_val = float("-0.446355") + max_val = float("0.493553") + mean = float("0.00261486") + std = float("0.0485342") + data = None + + +class Program_weight_tensor_parameter_360: + name = "parameter_360" + shape = [384] + dtype = "float32" + min_val = float("-0.17013") + max_val = float("0.363103") + mean = float("0.0145125") + std = float("0.0634369") + data = None + + +class Program_weight_tensor_parameter_361: + name = "parameter_361" + shape = [384] + dtype = "float32" + min_val = float("0.114663") + max_val = float("1.61016") + mean = float("0.991551") + std = float("0.149223") + data = None + + +class Program_weight_tensor_parameter_362: + name = "parameter_362" + shape = [384] + dtype = "float32" + min_val = float("7.85773") + max_val = float("1001.22") + mean = float("89.9772") + std = float("108.332") + data = None + + +class Program_weight_tensor_parameter_363: + name = "parameter_363" + shape = [384] + dtype = "float32" + min_val = float("-13.9198") + max_val = float("19.3761") + mean = float("1.00742") + std = float("4.17813") + data = None + + +class Program_weight_tensor_parameter_364: + name = "parameter_364" + shape = [384, 384, 3, 3] + dtype = "float32" + min_val = float("-0.295433") + max_val = float("0.246348") + mean = float("0.00110397") + std = float("0.0234049") + data = None + + +class Program_weight_tensor_parameter_365: + name = "parameter_365" + shape = [384] + dtype = "float32" + min_val = float("-0.234135") + max_val = float("0.22812") + mean = float("0.00559602") + std = float("0.0627099") + data = None + + +class Program_weight_tensor_parameter_366: + name = "parameter_366" + shape = [384] + dtype = "float32" + min_val = float("0.597373") + max_val = float("1.56294") + mean = float("1.00758") + std = float("0.141447") + data = None + + +class Program_weight_tensor_parameter_367: + name = "parameter_367" + shape = [384] + dtype = "float32" + min_val = float("16.4499") + max_val = float("639.647") + mean = float("101.474") + std = float("86.4824") + data = None + + +class Program_weight_tensor_parameter_368: + name = "parameter_368" + shape = [384] + dtype = "float32" + min_val = float("-11.5778") + max_val = float("17.0663") + mean = float("2.01731") + std = float("3.57531") + data = None + + +class Program_weight_tensor_parameter_369: + name = "parameter_369" + shape = [384, 384, 3, 3] + dtype = "float32" + min_val = float("-0.234291") + max_val = float("0.284904") + mean = float("0.00264446") + std = float("0.0302637") + data = None + + +class Program_weight_tensor_parameter_370: + name = "parameter_370" + shape = [384] + dtype = "float32" + min_val = float("-0.405084") + max_val = float("0.228539") + mean = float("-0.0546357") + std = float("0.106613") + data = None + + +class Program_weight_tensor_parameter_371: + name = "parameter_371" + shape = [384] + dtype = "float32" + min_val = float("0.456281") + max_val = float("1.81298") + mean = float("1.01603") + std = float("0.21616") + data = None + + +class Program_weight_tensor_parameter_372: + name = "parameter_372" + shape = [384] + dtype = "float32" + min_val = float("9.59515") + max_val = float("11834.9") + mean = float("612.148") + std = float("918.789") + data = None + + +class Program_weight_tensor_parameter_373: + name = "parameter_373" + shape = [384] + dtype = "float32" + min_val = float("-247.962") + max_val = float("324.535") + mean = float("-18.2895") + std = float("68.074") + data = None + + +class Program_weight_tensor_parameter_374: + name = "parameter_374" + shape = [384, 1024, 1, 1] + dtype = "float32" + min_val = float("-6.13846") + max_val = float("3.56809") + mean = float("-0.000144253") + std = float("0.0970159") + data = None + + +class Program_weight_tensor_parameter_375: + name = "parameter_375" + shape = [384] + dtype = "float32" + min_val = float("-0.108175") + max_val = float("0.143437") + mean = float("-0.0212447") + std = float("0.0285578") + data = None + + +class Program_weight_tensor_parameter_376: + name = "parameter_376" + shape = [384] + dtype = "float32" + min_val = float("0.556928") + max_val = float("1.33029") + mean = float("0.984681") + std = float("0.116005") + data = None + + +class Program_weight_tensor_parameter_377: + name = "parameter_377" + shape = [384] + dtype = "float32" + min_val = float("0.748213") + max_val = float("62636.1") + mean = float("1444.21") + std = float("4777.5") + data = None + + +class Program_weight_tensor_parameter_378: + name = "parameter_378" + shape = [384] + dtype = "float32" + min_val = float("-786.339") + max_val = float("426.68") + mean = float("32.1313") + std = float("104.81") + data = None + + +class Program_weight_tensor_parameter_379: + name = "parameter_379" + shape = [384, 1024, 1, 1] + dtype = "float32" + min_val = float("-7.12945") + max_val = float("13.8328") + mean = float("-0.000268657") + std = float("0.0830857") + data = None + + +class Program_weight_tensor_parameter_380: + name = "parameter_380" + shape = [1024] + dtype = "float32" + min_val = float("-2.13639e-06") + max_val = float("2.09543e-06") + mean = float("8.64227e-09") + std = float("3.57256e-07") + data = None + + +class Program_weight_tensor_parameter_381: + name = "parameter_381" + shape = [1024] + dtype = "float32" + min_val = float("-5.47744") + max_val = float("11.4054") + mean = float("0.768884") + std = float("0.662971") + data = None + + +class Program_weight_tensor_parameter_382: + name = "parameter_382" + shape = [1024] + dtype = "float32" + min_val = float("-0.427674") + max_val = float("0.266324") + mean = float("-0.000194783") + std = float("0.0566951") + data = None + + +class Program_weight_tensor_parameter_383: + name = "parameter_383" + shape = [2048, 1024] + dtype = "float32" + min_val = float("-8.0062") + max_val = float("2.78176") + mean = float("-5.77928e-05") + std = float("0.0557509") + data = None + + +class Program_weight_tensor_parameter_384: + name = "parameter_384" + shape = [2048] + dtype = "float32" + min_val = float("-0.251987") + max_val = float("0.302276") + mean = float("0.000512312") + std = float("0.0388834") + data = None + + +class Program_weight_tensor_parameter_385: + name = "parameter_385" + shape = [1024, 2048] + dtype = "float32" + min_val = float("-3.07176") + max_val = float("3.57066") + mean = float("-7.0143e-08") + std = float("0.0401237") + data = None + + +class Program_weight_tensor_parameter_386: + name = "parameter_386" + shape = [1024] + dtype = "float32" + min_val = float("-0.373534") + max_val = float("0.402167") + mean = float("0.000830289") + std = float("0.0765843") + data = None + + +class Program_weight_tensor_parameter_387: + name = "parameter_387" + shape = [1024] + dtype = "float32" + min_val = float("-2.75952") + max_val = float("2.92771") + mean = float("0.758426") + std = float("0.282416") + data = None + + +class Program_weight_tensor_parameter_388: + name = "parameter_388" + shape = [1024] + dtype = "float32" + min_val = float("-0.193343") + max_val = float("0.180378") + mean = float("-0.000779537") + std = float("0.0370855") + data = None + + +class Program_weight_tensor_parameter_389: + name = "parameter_389" + shape = [1024, 1024] + dtype = "float32" + min_val = float("-1.38014") + max_val = float("1.27733") + mean = float("-9.81423e-07") + std = float("0.0600835") + data = None + + +class Program_weight_tensor_parameter_390: + name = "parameter_390" + shape = [1024] + dtype = "float32" + min_val = float("-0.757163") + max_val = float("1.00779") + mean = float("0.000330772") + std = float("0.236465") + data = None + + +class Program_weight_tensor_parameter_391: + name = "parameter_391" + shape = [1024] + dtype = "float32" + min_val = float("-0.817671") + max_val = float("2.55534") + mean = float("0.753427") + std = float("0.227651") + data = None + + +class Program_weight_tensor_parameter_392: + name = "parameter_392" + shape = [1024] + dtype = "float32" + min_val = float("-0.222994") + max_val = float("0.252568") + mean = float("-0.00120532") + std = float("0.048857") + data = None + + +class Program_weight_tensor_parameter_393: + name = "parameter_393" + shape = [2048, 1024] + dtype = "float32" + min_val = float("-0.870748") + max_val = float("1.17152") + mean = float("-8.51244e-05") + std = float("0.0362422") + data = None + + +class Program_weight_tensor_parameter_394: + name = "parameter_394" + shape = [2048] + dtype = "float32" + min_val = float("-0.100256") + max_val = float("0.37127") + mean = float("-0.000329183") + std = float("0.0429112") + data = None + + +class Program_weight_tensor_parameter_395: + name = "parameter_395" + shape = [1024, 2048] + dtype = "float32" + min_val = float("-2.07235") + max_val = float("2.28312") + mean = float("-7.78487e-08") + std = float("0.0418402") + data = None + + +class Program_weight_tensor_parameter_396: + name = "parameter_396" + shape = [1024] + dtype = "float32" + min_val = float("-1.1905") + max_val = float("1.43001") + mean = float("0.00104808") + std = float("0.299857") + data = None + + +class Program_weight_tensor_parameter_397: + name = "parameter_397" + shape = [1024] + dtype = "float32" + min_val = float("-1.34618") + max_val = float("2.57738") + mean = float("0.767945") + std = float("0.300486") + data = None + + +class Program_weight_tensor_parameter_398: + name = "parameter_398" + shape = [1024] + dtype = "float32" + min_val = float("-0.119799") + max_val = float("0.13848") + mean = float("-0.000153038") + std = float("0.0233011") + data = None + + +class Program_weight_tensor_parameter_399: + name = "parameter_399" + shape = [1024, 1024] + dtype = "float32" + min_val = float("-0.646145") + max_val = float("0.660627") + mean = float("9.12025e-06") + std = float("0.0406609") + data = None + + +class Program_weight_tensor_parameter_400: + name = "parameter_400" + shape = [1024] + dtype = "float32" + min_val = float("-0.376786") + max_val = float("0.283756") + mean = float("0.000858306") + std = float("0.0796666") + data = None + + +class Program_weight_tensor_parameter_401: + name = "parameter_401" + shape = [1024] + dtype = "float32" + min_val = float("0.432676") + max_val = float("1.70722") + mean = float("0.772387") + std = float("0.0772969") + data = None + + +class Program_weight_tensor_parameter_402: + name = "parameter_402" + shape = [1024] + dtype = "float32" + min_val = float("-0.179557") + max_val = float("0.115916") + mean = float("-0.000489374") + std = float("0.0340337") + data = None + + +class Program_weight_tensor_parameter_403: + name = "parameter_403" + shape = [2048, 1024] + dtype = "float32" + min_val = float("-0.809732") + max_val = float("0.561587") + mean = float("-3.63529e-05") + std = float("0.0201157") + data = None + + +class Program_weight_tensor_parameter_404: + name = "parameter_404" + shape = [2048] + dtype = "float32" + min_val = float("-0.0614404") + max_val = float("0.170786") + mean = float("-0.000372604") + std = float("0.0228483") + data = None + + +class Program_weight_tensor_parameter_405: + name = "parameter_405" + shape = [1024, 2048] + dtype = "float32" + min_val = float("-0.408439") + max_val = float("0.609131") + mean = float("-1.65619e-06") + std = float("0.01953") + data = None + + +class Program_weight_tensor_parameter_406: + name = "parameter_406" + shape = [1024] + dtype = "float32" + min_val = float("-0.132853") + max_val = float("0.116198") + mean = float("0.000532131") + std = float("0.0355694") + data = None + + +class Program_weight_tensor_parameter_407: + name = "parameter_407" + shape = [1024] + dtype = "float32" + min_val = float("0.561748") + max_val = float("0.98164") + mean = float("0.774081") + std = float("0.0415141") + data = None + + +class Program_weight_tensor_parameter_408: + name = "parameter_408" + shape = [1024] + dtype = "float32" + min_val = float("-0.0332844") + max_val = float("0.0397268") + mean = float("0.000161912") + std = float("0.0099558") + data = None + + +class Program_weight_tensor_parameter_409: + name = "parameter_409" + shape = [1024, 1024] + dtype = "float32" + min_val = float("-0.123777") + max_val = float("0.124455") + mean = float("1.09109e-05") + std = float("0.0259569") + data = None + + +class Program_weight_tensor_parameter_410: + name = "parameter_410" + shape = [1024] + dtype = "float32" + min_val = float("-0.044097") + max_val = float("0.0392825") + mean = float("0.000663671") + std = float("0.011796") + data = None + + +class Program_weight_tensor_parameter_411: + name = "parameter_411" + shape = [1024] + dtype = "float32" + min_val = float("0.677342") + max_val = float("0.824057") + mean = float("0.772183") + std = float("0.0108191") + data = None + + +class Program_weight_tensor_parameter_412: + name = "parameter_412" + shape = [1024] + dtype = "float32" + min_val = float("-0.0510995") + max_val = float("0.0464933") + mean = float("-3.34255e-05") + std = float("0.0157544") + data = None + + +class Program_weight_tensor_parameter_413: + name = "parameter_413" + shape = [2048, 1024] + dtype = "float32" + min_val = float("-0.0809581") + max_val = float("0.0763313") + mean = float("-7.16222e-06") + std = float("0.0105854") + data = None + + +class Program_weight_tensor_parameter_414: + name = "parameter_414" + shape = [2048] + dtype = "float32" + min_val = float("-0.0301308") + max_val = float("0.0358787") + mean = float("0.000247623") + std = float("0.0140049") + data = None + + +class Program_weight_tensor_parameter_415: + name = "parameter_415" + shape = [1024, 2048] + dtype = "float32" + min_val = float("-0.0659331") + max_val = float("0.064373") + mean = float("-1.40326e-06") + std = float("0.0142046") + data = None + + +class Program_weight_tensor_parameter_416: + name = "parameter_416" + shape = [1024] + dtype = "float32" + min_val = float("-0.0295178") + max_val = float("0.0281065") + mean = float("1.88381e-05") + std = float("0.00950261") + data = None + + +class Program_weight_tensor_parameter_417: + name = "parameter_417" + shape = [1024] + dtype = "float32" + min_val = float("0.644602") + max_val = float("0.848359") + mean = float("0.772298") + std = float("0.0127321") + data = None + + +class Program_weight_tensor_parameter_418: + name = "parameter_418" + shape = [1024] + dtype = "float32" + min_val = float("-0.0790784") + max_val = float("0.0651222") + mean = float("-0.000211896") + std = float("0.0151313") + data = None + + +class Program_weight_tensor_parameter_419: + name = "parameter_419" + shape = [1024, 1024] + dtype = "float32" + min_val = float("-0.244804") + max_val = float("0.193716") + mean = float("7.12405e-06") + std = float("0.0253563") + data = None + + +class Program_weight_tensor_parameter_420: + name = "parameter_420" + shape = [1024] + dtype = "float32" + min_val = float("-3.75718") + max_val = float("-0.734662") + mean = float("-2.18749") + std = float("0.42871") + data = None + + +class Program_weight_tensor_parameter_421: + name = "parameter_421" + shape = [1024] + dtype = "float32" + min_val = float("1.61923") + max_val = float("4.43994") + mean = float("3.0808") + std = float("0.254311") + data = None + + +class Program_weight_tensor_parameter_422: + name = "parameter_422" + shape = [1024] + dtype = "float32" + min_val = float("0.00308285") + max_val = float("17013.4") + mean = float("25.4014") + std = float("536.038") + data = None + + +class Program_weight_tensor_parameter_423: + name = "parameter_423" + shape = [1024] + dtype = "float32" + min_val = float("-4.71495") + max_val = float("23.0879") + mean = float("0.0230467") + std = float("0.959745") + data = None + + +class Program_weight_tensor_parameter_424: + name = "parameter_424" + shape = [1024, 768, 1, 1] + dtype = "float32" + min_val = float("-1.76134") + max_val = float("1.01155") + mean = float("-0.000333937") + std = float("0.0199517") + data = None + + +class Program_weight_tensor_parameter_425: + name = "parameter_425" + shape = [768] + dtype = "float32" + min_val = float("-0.019835") + max_val = float("0.0113714") + mean = float("-0.000741206") + std = float("0.00266306") + data = None + + +class Program_weight_tensor_parameter_426: + name = "parameter_426" + shape = [768, 768, 1, 1] + dtype = "float32" + min_val = float("-0.0753175") + max_val = float("0.130102") + mean = float("-0.000275394") + std = float("0.00174935") + data = None + + +class Program_weight_tensor_parameter_427: + name = "parameter_427" + shape = [384] + dtype = "float32" + min_val = float("-1.77417") + max_val = float("0.319995") + mean = float("-0.31086") + std = float("0.291331") + data = None + + +class Program_weight_tensor_parameter_428: + name = "parameter_428" + shape = [384] + dtype = "float32" + min_val = float("0.188386") + max_val = float("1.81753") + mean = float("0.609696") + std = float("0.262622") + data = None + + +class Program_weight_tensor_parameter_429: + name = "parameter_429" + shape = [384] + dtype = "float32" + min_val = float("6.15935e-05") + max_val = float("4.62814") + mean = float("0.0557435") + std = float("0.276651") + data = None + + +class Program_weight_tensor_parameter_430: + name = "parameter_430" + shape = [384] + dtype = "float32" + min_val = float("-0.324677") + max_val = float("0.744616") + mean = float("0.0422052") + std = float("0.0941939") + data = None + + +class Program_weight_tensor_parameter_431: + name = "parameter_431" + shape = [384, 384, 1, 1] + dtype = "float32" + min_val = float("-0.0909018") + max_val = float("0.0595466") + mean = float("-0.000571428") + std = float("0.0044968") + data = None + + +class Program_weight_tensor_parameter_432: + name = "parameter_432" + shape = [384] + dtype = "float32" + min_val = float("-1.77414") + max_val = float("0.320788") + mean = float("-0.31081") + std = float("0.291371") + data = None + + +class Program_weight_tensor_parameter_433: + name = "parameter_433" + shape = [384] + dtype = "float32" + min_val = float("0.333443") + max_val = float("2.59782") + mean = float("1.02587") + std = float("0.290167") + data = None + + +class Program_weight_tensor_parameter_434: + name = "parameter_434" + shape = [384] + dtype = "float32" + min_val = float("0.00526696") + max_val = float("10.6996") + mean = float("0.380233") + std = float("0.946604") + data = None + + +class Program_weight_tensor_parameter_435: + name = "parameter_435" + shape = [384] + dtype = "float32" + min_val = float("-1.59457") + max_val = float("1.73203") + mean = float("0.027563") + std = float("0.392866") + data = None + + +class Program_weight_tensor_parameter_436: + name = "parameter_436" + shape = [384, 384, 3, 3] + dtype = "float32" + min_val = float("-0.0409816") + max_val = float("0.0679231") + mean = float("-6.68397e-05") + std = float("0.00268875") + data = None + + +class Program_weight_tensor_parameter_437: + name = "parameter_437" + shape = [384] + dtype = "float32" + min_val = float("-2.58257") + max_val = float("0.0340648") + mean = float("-1.56873") + std = float("0.416046") + data = None + + +class Program_weight_tensor_parameter_438: + name = "parameter_438" + shape = [384] + dtype = "float32" + min_val = float("0.521872") + max_val = float("1.64448") + mean = float("1.13567") + std = float("0.149452") + data = None + + +class Program_weight_tensor_parameter_439: + name = "parameter_439" + shape = [384] + dtype = "float32" + min_val = float("0.0434595") + max_val = float("137.616") + mean = float("3.86704") + std = float("10.8376") + data = None + + +class Program_weight_tensor_parameter_440: + name = "parameter_440" + shape = [384] + dtype = "float32" + min_val = float("-2.41808") + max_val = float("3.09603") + mean = float("0.231089") + std = float("0.62894") + data = None + + +class Program_weight_tensor_parameter_441: + name = "parameter_441" + shape = [384, 384, 3, 3] + dtype = "float32" + min_val = float("-0.054677") + max_val = float("0.0563856") + mean = float("-0.000288685") + std = float("0.00307104") + data = None + + +class Program_weight_tensor_parameter_442: + name = "parameter_442" + shape = [384] + dtype = "float32" + min_val = float("-1.93947") + max_val = float("0.64501") + mean = float("-0.575028") + std = float("0.358749") + data = None + + +class Program_weight_tensor_parameter_443: + name = "parameter_443" + shape = [384] + dtype = "float32" + min_val = float("0.16358") + max_val = float("2.07281") + mean = float("0.562153") + std = float("0.227419") + data = None + + +class Program_weight_tensor_parameter_444: + name = "parameter_444" + shape = [384] + dtype = "float32" + min_val = float("0.000134774") + max_val = float("9.62556") + mean = float("0.118304") + std = float("0.563335") + data = None + + +class Program_weight_tensor_parameter_445: + name = "parameter_445" + shape = [384] + dtype = "float32" + min_val = float("-0.543874") + max_val = float("0.774253") + mean = float("0.0423873") + std = float("0.0963768") + data = None + + +class Program_weight_tensor_parameter_446: + name = "parameter_446" + shape = [384, 384, 1, 1] + dtype = "float32" + min_val = float("-0.0892927") + max_val = float("0.0554288") + mean = float("-0.000566883") + std = float("0.00429646") + data = None + + +class Program_weight_tensor_parameter_447: + name = "parameter_447" + shape = [384] + dtype = "float32" + min_val = float("-1.93949") + max_val = float("0.646073") + mean = float("-0.575") + std = float("0.35884") + data = None + + +class Program_weight_tensor_parameter_448: + name = "parameter_448" + shape = [384] + dtype = "float32" + min_val = float("0.583556") + max_val = float("2.15683") + mean = float("1.08421") + std = float("0.255749") + data = None + + +class Program_weight_tensor_parameter_449: + name = "parameter_449" + shape = [384] + dtype = "float32" + min_val = float("0.00783921") + max_val = float("124.799") + mean = float("1.60597") + std = float("7.86036") + data = None + + +class Program_weight_tensor_parameter_450: + name = "parameter_450" + shape = [384] + dtype = "float32" + min_val = float("-3.22343") + max_val = float("3.94482") + mean = float("0.0868392") + std = float("0.461958") + data = None + + +class Program_weight_tensor_parameter_451: + name = "parameter_451" + shape = [384, 384, 3, 3] + dtype = "float32" + min_val = float("-0.0549026") + max_val = float("0.0517603") + mean = float("-0.000166704") + std = float("0.00299084") + data = None + + +class Program_weight_tensor_parameter_452: + name = "parameter_452" + shape = [384] + dtype = "float32" + min_val = float("-2.39604") + max_val = float("0.84662") + mean = float("-1.40556") + std = float("0.360638") + data = None + + +class Program_weight_tensor_parameter_453: + name = "parameter_453" + shape = [384] + dtype = "float32" + min_val = float("0.454807") + max_val = float("1.91969") + mean = float("1.1665") + std = float("0.148108") + data = None + + +class Program_weight_tensor_parameter_454: + name = "parameter_454" + shape = [384] + dtype = "float32" + min_val = float("0.045164") + max_val = float("100.409") + mean = float("2.60368") + std = float("7.74496") + data = None + + +class Program_weight_tensor_parameter_455: + name = "parameter_455" + shape = [384] + dtype = "float32" + min_val = float("-1.74076") + max_val = float("2.04402") + mean = float("0.0999496") + std = float("0.364673") + data = None + + +class Program_weight_tensor_parameter_456: + name = "parameter_456" + shape = [384, 384, 3, 3] + dtype = "float32" + min_val = float("-0.0546141") + max_val = float("0.0505175") + mean = float("-0.000226174") + std = float("0.00309866") + data = None + + +class Program_weight_tensor_parameter_457: + name = "parameter_457" + shape = [384] + dtype = "float32" + min_val = float("-1.87645") + max_val = float("0.453192") + mean = float("-0.485461") + std = float("0.3765") + data = None + + +class Program_weight_tensor_parameter_458: + name = "parameter_458" + shape = [384] + dtype = "float32" + min_val = float("0.0773511") + max_val = float("2.11967") + mean = float("0.441788") + std = float("0.217778") + data = None + + +class Program_weight_tensor_parameter_459: + name = "parameter_459" + shape = [384] + dtype = "float32" + min_val = float("0.000187171") + max_val = float("57.8954") + mean = float("0.270576") + std = float("2.97593") + data = None + + +class Program_weight_tensor_parameter_460: + name = "parameter_460" + shape = [384] + dtype = "float32" + min_val = float("-0.586198") + max_val = float("2.71157") + mean = float("0.0563236") + std = float("0.173875") + data = None + + +class Program_weight_tensor_parameter_461: + name = "parameter_461" + shape = [384, 384, 1, 1] + dtype = "float32" + min_val = float("-0.192933") + max_val = float("0.0894331") + mean = float("-0.000737868") + std = float("0.00524842") + data = None + + +class Program_weight_tensor_parameter_462: + name = "parameter_462" + shape = [384] + dtype = "float32" + min_val = float("-1.8768") + max_val = float("0.453535") + mean = float("-0.485423") + std = float("0.376585") + data = None + + +class Program_weight_tensor_parameter_463: + name = "parameter_463" + shape = [384] + dtype = "float32" + min_val = float("0.522396") + max_val = float("2.22479") + mean = float("1.0531") + std = float("0.260023") + data = None + + +class Program_weight_tensor_parameter_464: + name = "parameter_464" + shape = [384] + dtype = "float32" + min_val = float("0.0164889") + max_val = float("54.2063") + mean = float("1.7199") + std = float("5.49213") + data = None + + +class Program_weight_tensor_parameter_465: + name = "parameter_465" + shape = [384] + dtype = "float32" + min_val = float("-2.79875") + max_val = float("3.1827") + mean = float("0.133888") + std = float("0.520105") + data = None + + +class Program_weight_tensor_parameter_466: + name = "parameter_466" + shape = [384, 384, 3, 3] + dtype = "float32" + min_val = float("-0.0551643") + max_val = float("0.0464871") + mean = float("-0.000209229") + std = float("0.00312542") + data = None + + +class Program_weight_tensor_parameter_467: + name = "parameter_467" + shape = [384] + dtype = "float32" + min_val = float("-2.15679") + max_val = float("0.417994") + mean = float("-1.36728") + std = float("0.27749") + data = None + + +class Program_weight_tensor_parameter_468: + name = "parameter_468" + shape = [384] + dtype = "float32" + min_val = float("0.703467") + max_val = float("1.63812") + mean = float("1.14314") + std = float("0.101723") + data = None + + +class Program_weight_tensor_parameter_469: + name = "parameter_469" + shape = [384] + dtype = "float32" + min_val = float("0.00919977") + max_val = float("81.76") + mean = float("1.34673") + std = float("4.5747") + data = None + + +class Program_weight_tensor_parameter_470: + name = "parameter_470" + shape = [384] + dtype = "float32" + min_val = float("-0.575647") + max_val = float("1.06647") + mean = float("0.035897") + std = float("0.208355") + data = None + + +class Program_weight_tensor_parameter_471: + name = "parameter_471" + shape = [384, 384, 3, 3] + dtype = "float32" + min_val = float("-0.0647421") + max_val = float("0.0467325") + mean = float("-0.000209626") + std = float("0.00304124") + data = None + + +class Program_weight_tensor_parameter_472: + name = "parameter_472" + shape = [384] + dtype = "float32" + min_val = float("-2.92359") + max_val = float("1.66367") + mean = float("-0.760505") + std = float("0.643564") + data = None + + +class Program_weight_tensor_parameter_473: + name = "parameter_473" + shape = [384] + dtype = "float32" + min_val = float("0.952469") + max_val = float("2.9182") + mean = float("1.86349") + std = float("0.276369") + data = None + + +class Program_weight_tensor_parameter_474: + name = "parameter_474" + shape = [384] + dtype = "float32" + min_val = float("0.00173967") + max_val = float("105.064") + mean = float("1.59074") + std = float("7.33152") + data = None + + +class Program_weight_tensor_parameter_475: + name = "parameter_475" + shape = [384] + dtype = "float32" + min_val = float("-4.49694") + max_val = float("2.52903") + mean = float("0.12643") + std = float("0.583266") + data = None + + +class Program_weight_tensor_parameter_476: + name = "parameter_476" + shape = [384, 768, 1, 1] + dtype = "float32" + min_val = float("-0.154099") + max_val = float("0.233525") + mean = float("-0.000812832") + std = float("0.00998468") + data = None + + +class Program_weight_tensor_parameter_477: + name = "parameter_477" + shape = [384] + dtype = "float32" + min_val = float("-2.24744") + max_val = float("0.681285") + mean = float("-0.777265") + std = float("0.47295") + data = None + + +class Program_weight_tensor_parameter_478: + name = "parameter_478" + shape = [384] + dtype = "float32" + min_val = float("0.965764") + max_val = float("2.89406") + mean = float("2.09742") + std = float("0.305508") + data = None + + +class Program_weight_tensor_parameter_479: + name = "parameter_479" + shape = [384] + dtype = "float32" + min_val = float("0.000269628") + max_val = float("135.338") + mean = float("1.36164") + std = float("8.44144") + data = None + + +class Program_weight_tensor_parameter_480: + name = "parameter_480" + shape = [384] + dtype = "float32" + min_val = float("-5.20344") + max_val = float("3.39669") + mean = float("0.0630202") + std = float("0.514197") + data = None + + +class Program_weight_tensor_parameter_481: + name = "parameter_481" + shape = [384, 768, 1, 1] + dtype = "float32" + min_val = float("-0.16414") + max_val = float("0.337926") + mean = float("-0.000419817") + std = float("0.00888272") + data = None + + +class Program_weight_tensor_parameter_482: + name = "parameter_482" + shape = [768] + dtype = "float32" + min_val = float("-2.40228") + max_val = float("0.644848") + mean = float("-0.908493") + std = float("0.339383") + data = None + + +class Program_weight_tensor_parameter_483: + name = "parameter_483" + shape = [768] + dtype = "float32" + min_val = float("0.530034") + max_val = float("1.90745") + mean = float("0.919715") + std = float("0.149307") + data = None + + +class Program_weight_tensor_parameter_484: + name = "parameter_484" + shape = [768] + dtype = "float32" + min_val = float("0.0150888") + max_val = float("1732.27") + mean = float("29.3911") + std = float("114.584") + data = None + + +class Program_weight_tensor_parameter_485: + name = "parameter_485" + shape = [768] + dtype = "float32" + min_val = float("-5.94074") + max_val = float("6.69318") + mean = float("0.211305") + std = float("0.84809") + data = None + + +class Program_weight_tensor_parameter_486: + name = "parameter_486" + shape = [768, 512, 3, 3] + dtype = "float32" + min_val = float("-0.218355") + max_val = float("0.28161") + mean = float("-0.000294452") + std = float("0.0064194") + data = None + + +class Program_weight_tensor_parameter_487: + name = "parameter_487" + shape = [512] + dtype = "float32" + min_val = float("-3.38939") + max_val = float("1.70918") + mean = float("-1.14983") + std = float("0.514302") + data = None + + +class Program_weight_tensor_parameter_488: + name = "parameter_488" + shape = [512] + dtype = "float32" + min_val = float("0.521801") + max_val = float("2.22375") + mean = float("1.23032") + std = float("0.302913") + data = None + + +class Program_weight_tensor_parameter_489: + name = "parameter_489" + shape = [512] + dtype = "float32" + min_val = float("0.0145403") + max_val = float("54186.8") + mean = float("348.808") + std = float("2906.74") + data = None + + +class Program_weight_tensor_parameter_490: + name = "parameter_490" + shape = [512] + dtype = "float32" + min_val = float("-26.3804") + max_val = float("7.97325") + mean = float("-0.484726") + std = float("2.21214") + data = None + + +class Program_weight_tensor_parameter_491: + name = "parameter_491" + shape = [512, 384, 1, 1] + dtype = "float32" + min_val = float("-2.03421") + max_val = float("3.42772") + mean = float("0.00117115") + std = float("0.0623002") + data = None + + +class Program_weight_tensor_parameter_492: + name = "parameter_492" + shape = [384] + dtype = "float32" + min_val = float("-0.0823458") + max_val = float("0.12967") + mean = float("-0.00608152") + std = float("0.0201329") + data = None + + +class Program_weight_tensor_parameter_493: + name = "parameter_493" + shape = [384, 384, 1, 1] + dtype = "float32" + min_val = float("-0.224516") + max_val = float("0.342766") + mean = float("-0.00262916") + std = float("0.0143209") + data = None + + +class Program_weight_tensor_parameter_494: + name = "parameter_494" + shape = [192] + dtype = "float32" + min_val = float("-1.98142") + max_val = float("0.451263") + mean = float("-0.346403") + std = float("0.335794") + data = None + + +class Program_weight_tensor_parameter_495: + name = "parameter_495" + shape = [192] + dtype = "float32" + min_val = float("0.0373373") + max_val = float("2.155") + mean = float("0.583284") + std = float("0.423173") + data = None + + +class Program_weight_tensor_parameter_496: + name = "parameter_496" + shape = [192] + dtype = "float32" + min_val = float("0.000185619") + max_val = float("192.815") + mean = float("5.7247") + std = float("24.3824") + data = None + + +class Program_weight_tensor_parameter_497: + name = "parameter_497" + shape = [192] + dtype = "float32" + min_val = float("-7.38842") + max_val = float("1.98841") + mean = float("-0.198995") + std = float("0.943909") + data = None + + +class Program_weight_tensor_parameter_498: + name = "parameter_498" + shape = [192, 192, 1, 1] + dtype = "float32" + min_val = float("-0.580025") + max_val = float("2.15283") + mean = float("0.00513099") + std = float("0.0569125") + data = None + + +class Program_weight_tensor_parameter_499: + name = "parameter_499" + shape = [192] + dtype = "float32" + min_val = float("-1.97967") + max_val = float("0.447507") + mean = float("-0.345995") + std = float("0.335506") + data = None + + +class Program_weight_tensor_parameter_500: + name = "parameter_500" + shape = [192] + dtype = "float32" + min_val = float("0.371023") + max_val = float("2.70259") + mean = float("1.21044") + std = float("0.504894") + data = None + + +class Program_weight_tensor_parameter_501: + name = "parameter_501" + shape = [192] + dtype = "float32" + min_val = float("0.00637081") + max_val = float("1674.61") + mean = float("44.1997") + std = float("166.821") + data = None + + +class Program_weight_tensor_parameter_502: + name = "parameter_502" + shape = [192] + dtype = "float32" + min_val = float("-18.0063") + max_val = float("7.12971") + mean = float("-0.405877") + std = float("3.08127") + data = None + + +class Program_weight_tensor_parameter_503: + name = "parameter_503" + shape = [192, 192, 3, 3] + dtype = "float32" + min_val = float("-0.397216") + max_val = float("0.812883") + mean = float("0.0012172") + std = float("0.0227744") + data = None + + +class Program_weight_tensor_parameter_504: + name = "parameter_504" + shape = [192] + dtype = "float32" + min_val = float("-2.89075") + max_val = float("-0.189656") + mean = float("-1.31556") + std = float("0.398965") + data = None + + +class Program_weight_tensor_parameter_505: + name = "parameter_505" + shape = [192] + dtype = "float32" + min_val = float("0.646243") + max_val = float("2.09349") + mean = float("1.18163") + std = float("0.178004") + data = None + + +class Program_weight_tensor_parameter_506: + name = "parameter_506" + shape = [192] + dtype = "float32" + min_val = float("0.471418") + max_val = float("72426.6") + mean = float("1773.09") + std = float("6247.22") + data = None + + +class Program_weight_tensor_parameter_507: + name = "parameter_507" + shape = [192] + dtype = "float32" + min_val = float("-37.115") + max_val = float("23.7835") + mean = float("0.211371") + std = float("5.61628") + data = None + + +class Program_weight_tensor_parameter_508: + name = "parameter_508" + shape = [192, 192, 3, 3] + dtype = "float32" + min_val = float("-0.592805") + max_val = float("0.537074") + mean = float("-0.000652126") + std = float("0.0250761") + data = None + + +class Program_weight_tensor_parameter_509: + name = "parameter_509" + shape = [192] + dtype = "float32" + min_val = float("-1.94029") + max_val = float("0.531231") + mean = float("-0.278479") + std = float("0.323345") + data = None + + +class Program_weight_tensor_parameter_510: + name = "parameter_510" + shape = [192] + dtype = "float32" + min_val = float("0.0356558") + max_val = float("1.75139") + mean = float("0.446893") + std = float("0.305307") + data = None + + +class Program_weight_tensor_parameter_511: + name = "parameter_511" + shape = [192] + dtype = "float32" + min_val = float("0.000301537") + max_val = float("145.303") + mean = float("1.92494") + std = float("11.1055") + data = None + + +class Program_weight_tensor_parameter_512: + name = "parameter_512" + shape = [192] + dtype = "float32" + min_val = float("-2.88926") + max_val = float("1.57866") + mean = float("0.0242455") + std = float("0.365955") + data = None + + +class Program_weight_tensor_parameter_513: + name = "parameter_513" + shape = [192, 192, 1, 1] + dtype = "float32" + min_val = float("-0.410132") + max_val = float("0.734914") + mean = float("-0.000525589") + std = float("0.0264104") + data = None + + +class Program_weight_tensor_parameter_514: + name = "parameter_514" + shape = [192] + dtype = "float32" + min_val = float("-1.94023") + max_val = float("0.548467") + mean = float("-0.277621") + std = float("0.325203") + data = None + + +class Program_weight_tensor_parameter_515: + name = "parameter_515" + shape = [192] + dtype = "float32" + min_val = float("0.481263") + max_val = float("2.27039") + mean = float("1.13778") + std = float("0.378291") + data = None + + +class Program_weight_tensor_parameter_516: + name = "parameter_516" + shape = [192] + dtype = "float32" + min_val = float("0.0203109") + max_val = float("22478.4") + mean = float("225.796") + std = float("1659.66") + data = None + + +class Program_weight_tensor_parameter_517: + name = "parameter_517" + shape = [192] + dtype = "float32" + min_val = float("-48.6907") + max_val = float("8.4395") + mean = float("-0.502169") + std = float("4.76403") + data = None + + +class Program_weight_tensor_parameter_518: + name = "parameter_518" + shape = [192, 192, 3, 3] + dtype = "float32" + min_val = float("-0.313622") + max_val = float("1.27335") + mean = float("0.00170232") + std = float("0.031685") + data = None + + +class Program_weight_tensor_parameter_519: + name = "parameter_519" + shape = [192] + dtype = "float32" + min_val = float("-2.50879") + max_val = float("-0.125677") + mean = float("-1.29056") + std = float("0.442856") + data = None + + +class Program_weight_tensor_parameter_520: + name = "parameter_520" + shape = [192] + dtype = "float32" + min_val = float("0.664414") + max_val = float("1.67017") + mean = float("1.19888") + std = float("0.169685") + data = None + + +class Program_weight_tensor_parameter_521: + name = "parameter_521" + shape = [192] + dtype = "float32" + min_val = float("0.287109") + max_val = float("29547.7") + mean = float("678.615") + std = float("2860.07") + data = None + + +class Program_weight_tensor_parameter_522: + name = "parameter_522" + shape = [192] + dtype = "float32" + min_val = float("-7.98132") + max_val = float("18.8422") + mean = float("0.247665") + std = float("2.39087") + data = None + + +class Program_weight_tensor_parameter_523: + name = "parameter_523" + shape = [192, 192, 3, 3] + dtype = "float32" + min_val = float("-0.523885") + max_val = float("0.286298") + mean = float("-0.000893893") + std = float("0.0195481") + data = None + + +class Program_weight_tensor_parameter_524: + name = "parameter_524" + shape = [192] + dtype = "float32" + min_val = float("-1.7571") + max_val = float("0.488578") + mean = float("-0.261083") + std = float("0.338234") + data = None + + +class Program_weight_tensor_parameter_525: + name = "parameter_525" + shape = [192] + dtype = "float32" + min_val = float("0.00549547") + max_val = float("1.67906") + mean = float("0.350066") + std = float("0.250449") + data = None + + +class Program_weight_tensor_parameter_526: + name = "parameter_526" + shape = [192] + dtype = "float32" + min_val = float("0.000263742") + max_val = float("67.4674") + mean = float("1.69806") + std = float("7.49189") + data = None + + +class Program_weight_tensor_parameter_527: + name = "parameter_527" + shape = [192] + dtype = "float32" + min_val = float("-1.87036") + max_val = float("1.36173") + mean = float("0.00772141") + std = float("0.331707") + data = None + + +class Program_weight_tensor_parameter_528: + name = "parameter_528" + shape = [192, 192, 1, 1] + dtype = "float32" + min_val = float("-0.244631") + max_val = float("0.629934") + mean = float("-0.000125584") + std = float("0.0216092") + data = None + + +class Program_weight_tensor_parameter_529: + name = "parameter_529" + shape = [192] + dtype = "float32" + min_val = float("-1.75687") + max_val = float("0.505733") + mean = float("-0.259961") + std = float("0.340366") + data = None + + +class Program_weight_tensor_parameter_530: + name = "parameter_530" + shape = [192] + dtype = "float32" + min_val = float("0.404833") + max_val = float("1.97797") + mean = float("1.0658") + std = float("0.336907") + data = None + + +class Program_weight_tensor_parameter_531: + name = "parameter_531" + shape = [192] + dtype = "float32" + min_val = float("0.0455324") + max_val = float("2298.58") + mean = float("100.245") + std = float("322.102") + data = None + + +class Program_weight_tensor_parameter_532: + name = "parameter_532" + shape = [192] + dtype = "float32" + min_val = float("-13.5983") + max_val = float("11.9641") + mean = float("0.202523") + std = float("2.83912") + data = None + + +class Program_weight_tensor_parameter_533: + name = "parameter_533" + shape = [192, 192, 3, 3] + dtype = "float32" + min_val = float("-0.285037") + max_val = float("0.46813") + mean = float("-0.000664059") + std = float("0.0213735") + data = None + + +class Program_weight_tensor_parameter_534: + name = "parameter_534" + shape = [192] + dtype = "float32" + min_val = float("-2.49739") + max_val = float("0.140187") + mean = float("-1.24342") + std = float("0.424497") + data = None + + +class Program_weight_tensor_parameter_535: + name = "parameter_535" + shape = [192] + dtype = "float32" + min_val = float("0.651114") + max_val = float("1.77127") + mean = float("1.16792") + std = float("0.169098") + data = None + + +class Program_weight_tensor_parameter_536: + name = "parameter_536" + shape = [192] + dtype = "float32" + min_val = float("0.194451") + max_val = float("4128.54") + mean = float("163.103") + std = float("472.833") + data = None + + +class Program_weight_tensor_parameter_537: + name = "parameter_537" + shape = [192] + dtype = "float32" + min_val = float("-4.64446") + max_val = float("5.59883") + mean = float("-0.14272") + std = float("1.30511") + data = None + + +class Program_weight_tensor_parameter_538: + name = "parameter_538" + shape = [192, 192, 3, 3] + dtype = "float32" + min_val = float("-0.284668") + max_val = float("0.265208") + mean = float("0.000179334") + std = float("0.0149656") + data = None + + +class Program_weight_tensor_parameter_539: + name = "parameter_539" + shape = [192] + dtype = "float32" + min_val = float("-2.07951") + max_val = float("0.55605") + mean = float("-0.270635") + std = float("0.37776") + data = None + + +class Program_weight_tensor_parameter_540: + name = "parameter_540" + shape = [192] + dtype = "float32" + min_val = float("-0.00686515") + max_val = float("0.752308") + mean = float("0.215992") + std = float("0.136444") + data = None + + +class Program_weight_tensor_parameter_541: + name = "parameter_541" + shape = [192] + dtype = "float32" + min_val = float("1.77584e-06") + max_val = float("63.1959") + mean = float("0.938992") + std = float("4.94515") + data = None + + +class Program_weight_tensor_parameter_542: + name = "parameter_542" + shape = [192] + dtype = "float32" + min_val = float("-2.2753") + max_val = float("1.12971") + mean = float("0.0266441") + std = float("0.358626") + data = None + + +class Program_weight_tensor_parameter_543: + name = "parameter_543" + shape = [192, 192, 1, 1] + dtype = "float32" + min_val = float("-0.345764") + max_val = float("0.421705") + mean = float("-0.000348753") + std = float("0.0214336") + data = None + + +class Program_weight_tensor_parameter_544: + name = "parameter_544" + shape = [192] + dtype = "float32" + min_val = float("-2.07965") + max_val = float("0.574226") + mean = float("-0.269426") + std = float("0.379863") + data = None + + +class Program_weight_tensor_parameter_545: + name = "parameter_545" + shape = [192] + dtype = "float32" + min_val = float("0.3928") + max_val = float("1.96281") + mean = float("0.95567") + std = float("0.310273") + data = None + + +class Program_weight_tensor_parameter_546: + name = "parameter_546" + shape = [192] + dtype = "float32" + min_val = float("0.0232563") + max_val = float("879.129") + mean = float("36.8329") + std = float("108.746") + data = None + + +class Program_weight_tensor_parameter_547: + name = "parameter_547" + shape = [192] + dtype = "float32" + min_val = float("-17.4775") + max_val = float("7.90044") + mean = float("0.231825") + std = float("2.99101") + data = None + + +class Program_weight_tensor_parameter_548: + name = "parameter_548" + shape = [192, 192, 3, 3] + dtype = "float32" + min_val = float("-0.242859") + max_val = float("0.437694") + mean = float("-0.000616003") + std = float("0.0203443") + data = None + + +class Program_weight_tensor_parameter_549: + name = "parameter_549" + shape = [192] + dtype = "float32" + min_val = float("-2.74147") + max_val = float("-0.0826416") + mean = float("-1.23743") + std = float("0.434625") + data = None + + +class Program_weight_tensor_parameter_550: + name = "parameter_550" + shape = [192] + dtype = "float32" + min_val = float("0.716526") + max_val = float("1.6211") + mean = float("1.15068") + std = float("0.146716") + data = None + + +class Program_weight_tensor_parameter_551: + name = "parameter_551" + shape = [192] + dtype = "float32" + min_val = float("0.199091") + max_val = float("12243.2") + mean = float("232.788") + std = float("982.724") + data = None + + +class Program_weight_tensor_parameter_552: + name = "parameter_552" + shape = [192] + dtype = "float32" + min_val = float("-2.18119") + max_val = float("8.70009") + mean = float("0.136404") + std = float("1.21707") + data = None + + +class Program_weight_tensor_parameter_553: + name = "parameter_553" + shape = [192, 192, 3, 3] + dtype = "float32" + min_val = float("-0.378008") + max_val = float("0.395816") + mean = float("-0.00267221") + std = float("0.0195782") + data = None + + +class Program_weight_tensor_parameter_554: + name = "parameter_554" + shape = [192] + dtype = "float32" + min_val = float("-1.21764") + max_val = float("0.465113") + mean = float("-0.2312") + std = float("0.342153") + data = None + + +class Program_weight_tensor_parameter_555: + name = "parameter_555" + shape = [192] + dtype = "float32" + min_val = float("-0.0716155") + max_val = float("0.68623") + mean = float("0.188329") + std = float("0.126481") + data = None + + +class Program_weight_tensor_parameter_556: + name = "parameter_556" + shape = [192] + dtype = "float32" + min_val = float("7.23985e-05") + max_val = float("104.829") + mean = float("2.37577") + std = float("11.3397") + data = None + + +class Program_weight_tensor_parameter_557: + name = "parameter_557" + shape = [192] + dtype = "float32" + min_val = float("-3.9469") + max_val = float("5.6855") + mean = float("-0.0147774") + std = float("0.606084") + data = None + + +class Program_weight_tensor_parameter_558: + name = "parameter_558" + shape = [192, 192, 1, 1] + dtype = "float32" + min_val = float("-0.721449") + max_val = float("0.592602") + mean = float("0.000724118") + std = float("0.0311054") + data = None + + +class Program_weight_tensor_parameter_559: + name = "parameter_559" + shape = [192] + dtype = "float32" + min_val = float("-1.22134") + max_val = float("0.47785") + mean = float("-0.230452") + std = float("0.344237") + data = None + + +class Program_weight_tensor_parameter_560: + name = "parameter_560" + shape = [192] + dtype = "float32" + min_val = float("0.379249") + max_val = float("1.5641") + mean = float("0.845207") + std = float("0.264352") + data = None + + +class Program_weight_tensor_parameter_561: + name = "parameter_561" + shape = [192] + dtype = "float32" + min_val = float("0.0326402") + max_val = float("1699.91") + mean = float("79.7938") + std = float("203.032") + data = None + + +class Program_weight_tensor_parameter_562: + name = "parameter_562" + shape = [192] + dtype = "float32" + min_val = float("-22.9383") + max_val = float("10.8789") + mean = float("0.262372") + std = float("3.76359") + data = None + + +class Program_weight_tensor_parameter_563: + name = "parameter_563" + shape = [192, 192, 3, 3] + dtype = "float32" + min_val = float("-0.342571") + max_val = float("0.553615") + mean = float("-0.000835491") + std = float("0.0244072") + data = None + + +class Program_weight_tensor_parameter_564: + name = "parameter_564" + shape = [192] + dtype = "float32" + min_val = float("-2.49862") + max_val = float("-0.1334") + mean = float("-1.25035") + std = float("0.418402") + data = None + + +class Program_weight_tensor_parameter_565: + name = "parameter_565" + shape = [192] + dtype = "float32" + min_val = float("0.687417") + max_val = float("1.51921") + mean = float("1.12542") + std = float("0.137076") + data = None + + +class Program_weight_tensor_parameter_566: + name = "parameter_566" + shape = [192] + dtype = "float32" + min_val = float("0.0234704") + max_val = float("2282.94") + mean = float("135.36") + std = float("361.333") + data = None + + +class Program_weight_tensor_parameter_567: + name = "parameter_567" + shape = [192] + dtype = "float32" + min_val = float("-4.30705") + max_val = float("4.49319") + mean = float("-0.0200033") + std = float("0.945224") + data = None + + +class Program_weight_tensor_parameter_568: + name = "parameter_568" + shape = [192, 192, 3, 3] + dtype = "float32" + min_val = float("-0.442987") + max_val = float("0.472427") + mean = float("0.00134724") + std = float("0.0235963") + data = None + + +class Program_weight_tensor_parameter_569: + name = "parameter_569" + shape = [192] + dtype = "float32" + min_val = float("-1.21821") + max_val = float("0.512429") + mean = float("-0.167023") + std = float("0.295713") + data = None + + +class Program_weight_tensor_parameter_570: + name = "parameter_570" + shape = [192] + dtype = "float32" + min_val = float("-0.0291043") + max_val = float("1.56896") + mean = float("0.237779") + std = float("0.215149") + data = None + + +class Program_weight_tensor_parameter_571: + name = "parameter_571" + shape = [192] + dtype = "float32" + min_val = float("7.90699e-05") + max_val = float("49.1783") + mean = float("1.93217") + std = float("6.20102") + data = None + + +class Program_weight_tensor_parameter_572: + name = "parameter_572" + shape = [192] + dtype = "float32" + min_val = float("-1.39605") + max_val = float("4.77961") + mean = float("0.0637951") + std = float("0.550877") + data = None + + +class Program_weight_tensor_parameter_573: + name = "parameter_573" + shape = [192, 192, 1, 1] + dtype = "float32" + min_val = float("-0.645951") + max_val = float("0.375303") + mean = float("-0.00125196") + std = float("0.0318071") + data = None + + +class Program_weight_tensor_parameter_574: + name = "parameter_574" + shape = [192] + dtype = "float32" + min_val = float("-1.2185") + max_val = float("0.527355") + mean = float("-0.166844") + std = float("0.298285") + data = None + + +class Program_weight_tensor_parameter_575: + name = "parameter_575" + shape = [192] + dtype = "float32" + min_val = float("0.309676") + max_val = float("1.56341") + mean = float("0.760182") + std = float("0.23226") + data = None + + +class Program_weight_tensor_parameter_576: + name = "parameter_576" + shape = [192] + dtype = "float32" + min_val = float("0.0361632") + max_val = float("858.774") + mean = float("64.0277") + std = float("129.822") + data = None + + +class Program_weight_tensor_parameter_577: + name = "parameter_577" + shape = [192] + dtype = "float32" + min_val = float("-18.5822") + max_val = float("19.3904") + mean = float("-0.138431") + std = float("4.06149") + data = None + + +class Program_weight_tensor_parameter_578: + name = "parameter_578" + shape = [192, 192, 3, 3] + dtype = "float32" + min_val = float("-0.495237") + max_val = float("0.583184") + mean = float("0.000581124") + std = float("0.0274539") + data = None + + +class Program_weight_tensor_parameter_579: + name = "parameter_579" + shape = [192] + dtype = "float32" + min_val = float("-1.88035") + max_val = float("-0.215406") + mean = float("-1.14935") + std = float("0.32495") + data = None + + +class Program_weight_tensor_parameter_580: + name = "parameter_580" + shape = [192] + dtype = "float32" + min_val = float("0.788564") + max_val = float("1.75022") + mean = float("1.1191") + std = float("0.142522") + data = None + + +class Program_weight_tensor_parameter_581: + name = "parameter_581" + shape = [192] + dtype = "float32" + min_val = float("0.0172999") + max_val = float("311.063") + mean = float("18.5297") + std = float("40.3369") + data = None + + +class Program_weight_tensor_parameter_582: + name = "parameter_582" + shape = [192] + dtype = "float32" + min_val = float("-5.09272") + max_val = float("4.68315") + mean = float("0.0566841") + std = float("1.36936") + data = None + + +class Program_weight_tensor_parameter_583: + name = "parameter_583" + shape = [192, 192, 3, 3] + dtype = "float32" + min_val = float("-0.220454") + max_val = float("0.337137") + mean = float("-0.000195961") + std = float("0.0169417") + data = None + + +class Program_weight_tensor_parameter_584: + name = "parameter_584" + shape = [192] + dtype = "float32" + min_val = float("-2.91697") + max_val = float("1.61235") + mean = float("-0.027192") + std = float("0.752121") + data = None + + +class Program_weight_tensor_parameter_585: + name = "parameter_585" + shape = [192] + dtype = "float32" + min_val = float("0.396043") + max_val = float("1.75405") + mean = float("0.900478") + std = float("0.234394") + data = None + + +class Program_weight_tensor_parameter_586: + name = "parameter_586" + shape = [192] + dtype = "float32" + min_val = float("0.00833347") + max_val = float("4804.89") + mean = float("67.294") + std = float("384.311") + data = None + + +class Program_weight_tensor_parameter_587: + name = "parameter_587" + shape = [192] + dtype = "float32" + min_val = float("-2.08976") + max_val = float("2.12545") + mean = float("-0.027234") + std = float("0.343916") + data = None + + +class Program_weight_tensor_parameter_588: + name = "parameter_588" + shape = [192, 384, 1, 1] + dtype = "float32" + min_val = float("-1.42325") + max_val = float("3.22093") + mean = float("-0.000215879") + std = float("0.0689701") + data = None + + +class Program_weight_tensor_parameter_589: + name = "parameter_589" + shape = [192] + dtype = "float32" + min_val = float("-2.96702") + max_val = float("1.70072") + mean = float("0.100509") + std = float("0.666605") + data = None + + +class Program_weight_tensor_parameter_590: + name = "parameter_590" + shape = [192] + dtype = "float32" + min_val = float("0.812343") + max_val = float("5.55805") + mean = float("1.90737") + std = float("0.933933") + data = None + + +class Program_weight_tensor_parameter_591: + name = "parameter_591" + shape = [192] + dtype = "float32" + min_val = float("0.00143576") + max_val = float("202.609") + mean = float("3.23072") + std = float("18.3131") + data = None + + +class Program_weight_tensor_parameter_592: + name = "parameter_592" + shape = [192] + dtype = "float32" + min_val = float("-0.381013") + max_val = float("0.905162") + mean = float("0.0657245") + std = float("0.161239") + data = None + + +class Program_weight_tensor_parameter_593: + name = "parameter_593" + shape = [192, 384, 1, 1] + dtype = "float32" + min_val = float("-0.259077") + max_val = float("0.331409") + mean = float("-0.00132013") + std = float("0.0192623") + data = None + + +class Program_weight_tensor_parameter_594: + name = "parameter_594" + shape = [384] + dtype = "float32" + min_val = float("-2.92371") + max_val = float("1.31882") + mean = float("-0.301972") + std = float("0.563982") + data = None + + +class Program_weight_tensor_parameter_595: + name = "parameter_595" + shape = [384] + dtype = "float32" + min_val = float("0.646526") + max_val = float("2.49291") + mean = float("1.15848") + std = float("0.262959") + data = None + + +class Program_weight_tensor_parameter_596: + name = "parameter_596" + shape = [384] + dtype = "float32" + min_val = float("0.13752") + max_val = float("33554.1") + mean = float("441.904") + std = float("2215.12") + data = None + + +class Program_weight_tensor_parameter_597: + name = "parameter_597" + shape = [384] + dtype = "float32" + min_val = float("-10.6841") + max_val = float("7.24379") + mean = float("0.245326") + std = float("1.24953") + data = None + + +class Program_weight_tensor_parameter_598: + name = "parameter_598" + shape = [384, 256, 3, 3] + dtype = "float32" + min_val = float("-0.420455") + max_val = float("0.352272") + mean = float("-0.00112555") + std = float("0.0151274") + data = None + + +class Program_weight_tensor_parameter_599: + name = "parameter_599" + shape = [256] + dtype = "float32" + min_val = float("-2.04434") + max_val = float("1.33309") + mean = float("-0.916794") + std = float("0.542582") + data = None + + +class Program_weight_tensor_parameter_600: + name = "parameter_600" + shape = [256] + dtype = "float32" + min_val = float("0.550387") + max_val = float("2.97247") + mean = float("1.34652") + std = float("0.492025") + data = None + + +class Program_weight_tensor_parameter_601: + name = "parameter_601" + shape = [256] + dtype = "float32" + min_val = float("0.00440378") + max_val = float("105978.0") + mean = float("456.901") + std = float("6614.54") + data = None + + +class Program_weight_tensor_parameter_602: + name = "parameter_602" + shape = [256] + dtype = "float32" + min_val = float("-4.34738") + max_val = float("143.205") + mean = float("1.74977") + std = float("9.28044") + data = None + + +class Program_weight_tensor_parameter_603: + name = "parameter_603" + shape = [256, 192, 1, 1] + dtype = "float32" + min_val = float("-0.869804") + max_val = float("15.9901") + mean = float("0.00715179") + std = float("0.19203") + data = None + + +class Program_weight_tensor_parameter_604: + name = "parameter_604" + shape = [192] + dtype = "float32" + min_val = float("-0.0987519") + max_val = float("0.0663724") + mean = float("-0.0118358") + std = float("0.0235705") + data = None + + +class Program_weight_tensor_parameter_605: + name = "parameter_605" + shape = [192, 192, 1, 1] + dtype = "float32" + min_val = float("-0.405807") + max_val = float("0.264014") + mean = float("-0.0102004") + std = float("0.0319322") + data = None + + +class Program_weight_tensor_parameter_606: + name = "parameter_606" + shape = [96] + dtype = "float32" + min_val = float("-1.91285") + max_val = float("0.546302") + mean = float("-0.205148") + std = float("0.439731") + data = None + + +class Program_weight_tensor_parameter_607: + name = "parameter_607" + shape = [96] + dtype = "float32" + min_val = float("0.0895864") + max_val = float("3.22778") + mean = float("0.637041") + std = float("0.670036") + data = None + + +class Program_weight_tensor_parameter_608: + name = "parameter_608" + shape = [96] + dtype = "float32" + min_val = float("6.87535e-05") + max_val = float("66.5661") + mean = float("1.37836") + std = float("7.15906") + data = None + + +class Program_weight_tensor_parameter_609: + name = "parameter_609" + shape = [96] + dtype = "float32" + min_val = float("-1.39614") + max_val = float("2.38344") + mean = float("0.0320484") + std = float("0.377448") + data = None + + +class Program_weight_tensor_parameter_610: + name = "parameter_610" + shape = [96, 96, 1, 1] + dtype = "float32" + min_val = float("-1.24365") + max_val = float("1.18406") + mean = float("-0.000738032") + std = float("0.0621546") + data = None + + +class Program_weight_tensor_parameter_611: + name = "parameter_611" + shape = [96] + dtype = "float32" + min_val = float("-1.91234") + max_val = float("0.55126") + mean = float("-0.203811") + std = float("0.441223") + data = None + + +class Program_weight_tensor_parameter_612: + name = "parameter_612" + shape = [96] + dtype = "float32" + min_val = float("0.335907") + max_val = float("5.47089") + mean = float("1.09302") + std = float("0.887054") + data = None + + +class Program_weight_tensor_parameter_613: + name = "parameter_613" + shape = [96] + dtype = "float32" + min_val = float("0.0187208") + max_val = float("717.249") + mean = float("24.146") + std = float("98.0324") + data = None + + +class Program_weight_tensor_parameter_614: + name = "parameter_614" + shape = [96] + dtype = "float32" + min_val = float("-8.11887") + max_val = float("10.6872") + mean = float("0.447085") + std = float("1.98274") + data = None + + +class Program_weight_tensor_parameter_615: + name = "parameter_615" + shape = [96, 96, 3, 3] + dtype = "float32" + min_val = float("-0.64786") + max_val = float("0.610955") + mean = float("-0.00228054") + std = float("0.0317145") + data = None + + +class Program_weight_tensor_parameter_616: + name = "parameter_616" + shape = [96] + dtype = "float32" + min_val = float("-2.48733") + max_val = float("-0.0177181") + mean = float("-1.24064") + std = float("0.462674") + data = None + + +class Program_weight_tensor_parameter_617: + name = "parameter_617" + shape = [96] + dtype = "float32" + min_val = float("0.376749") + max_val = float("1.61027") + mean = float("0.92679") + std = float("0.174674") + data = None + + +class Program_weight_tensor_parameter_618: + name = "parameter_618" + shape = [96] + dtype = "float32" + min_val = float("0.458787") + max_val = float("14051.9") + mean = float("581.565") + std = float("2213.73") + data = None + + +class Program_weight_tensor_parameter_619: + name = "parameter_619" + shape = [96] + dtype = "float32" + min_val = float("-39.2") + max_val = float("92.3483") + mean = float("2.75935") + std = float("16.8796") + data = None + + +class Program_weight_tensor_parameter_620: + name = "parameter_620" + shape = [96, 96, 3, 3] + dtype = "float32" + min_val = float("-0.893357") + max_val = float("1.83963") + mean = float("0.00316801") + std = float("0.0516147") + data = None + + +class Program_weight_tensor_parameter_621: + name = "parameter_621" + shape = [96] + dtype = "float32" + min_val = float("-1.4122") + max_val = float("0.638769") + mean = float("-0.124599") + std = float("0.359237") + data = None + + +class Program_weight_tensor_parameter_622: + name = "parameter_622" + shape = [96] + dtype = "float32" + min_val = float("0.0151502") + max_val = float("1.86528") + mean = float("0.460244") + std = float("0.383531") + data = None + + +class Program_weight_tensor_parameter_623: + name = "parameter_623" + shape = [96] + dtype = "float32" + min_val = float("7.42465e-05") + max_val = float("105.345") + mean = float("2.00551") + std = float("11.8642") + data = None + + +class Program_weight_tensor_parameter_624: + name = "parameter_624" + shape = [96] + dtype = "float32" + min_val = float("-1.33815") + max_val = float("3.52671") + mean = float("0.134428") + std = float("0.582479") + data = None + + +class Program_weight_tensor_parameter_625: + name = "parameter_625" + shape = [96, 96, 1, 1] + dtype = "float32" + min_val = float("-7.68975") + max_val = float("6.35168") + mean = float("-0.005773") + std = float("0.180518") + data = None + + +class Program_weight_tensor_parameter_626: + name = "parameter_626" + shape = [96] + dtype = "float32" + min_val = float("-1.41481") + max_val = float("0.679644") + mean = float("-0.122232") + std = float("0.362344") + data = None + + +class Program_weight_tensor_parameter_627: + name = "parameter_627" + shape = [96] + dtype = "float32" + min_val = float("0.363177") + max_val = float("2.32912") + mean = float("0.921647") + std = float("0.446698") + data = None + + +class Program_weight_tensor_parameter_628: + name = "parameter_628" + shape = [96] + dtype = "float32" + min_val = float("0.00667803") + max_val = float("904.461") + mean = float("25.9921") + std = float("124.724") + data = None + + +class Program_weight_tensor_parameter_629: + name = "parameter_629" + shape = [96] + dtype = "float32" + min_val = float("-1.07626") + max_val = float("17.9598") + mean = float("1.18124") + std = float("2.65473") + data = None + + +class Program_weight_tensor_parameter_630: + name = "parameter_630" + shape = [96, 96, 3, 3] + dtype = "float32" + min_val = float("-1.68483") + max_val = float("0.545277") + mean = float("-0.00668389") + std = float("0.0418904") + data = None + + +class Program_weight_tensor_parameter_631: + name = "parameter_631" + shape = [96] + dtype = "float32" + min_val = float("-3.31429") + max_val = float("0.406389") + mean = float("-1.18503") + std = float("0.560632") + data = None + + +class Program_weight_tensor_parameter_632: + name = "parameter_632" + shape = [96] + dtype = "float32" + min_val = float("0.397342") + max_val = float("1.98469") + mean = float("1.02414") + std = float("0.257601") + data = None + + +class Program_weight_tensor_parameter_633: + name = "parameter_633" + shape = [96] + dtype = "float32" + min_val = float("0.171811") + max_val = float("5118.63") + mean = float("129.006") + std = float("573.108") + data = None + + +class Program_weight_tensor_parameter_634: + name = "parameter_634" + shape = [96] + dtype = "float32" + min_val = float("-16.5294") + max_val = float("56.8048") + mean = float("0.660576") + std = float("9.1639") + data = None + + +class Program_weight_tensor_parameter_635: + name = "parameter_635" + shape = [96, 96, 3, 3] + dtype = "float32" + min_val = float("-0.45496") + max_val = float("1.25223") + mean = float("-0.000778588") + std = float("0.0350886") + data = None + + +class Program_weight_tensor_parameter_636: + name = "parameter_636" + shape = [96] + dtype = "float32" + min_val = float("-1.25001") + max_val = float("0.639741") + mean = float("-0.0989652") + std = float("0.302867") + data = None + + +class Program_weight_tensor_parameter_637: + name = "parameter_637" + shape = [96] + dtype = "float32" + min_val = float("-0.429595") + max_val = float("1.27713") + mean = float("0.306498") + std = float("0.23536") + data = None + + +class Program_weight_tensor_parameter_638: + name = "parameter_638" + shape = [96] + dtype = "float32" + min_val = float("0.000129874") + max_val = float("656.233") + mean = float("8.72779") + std = float("66.7746") + data = None + + +class Program_weight_tensor_parameter_639: + name = "parameter_639" + shape = [96] + dtype = "float32" + min_val = float("-0.700466") + max_val = float("6.05986") + mean = float("0.144363") + std = float("0.725059") + data = None + + +class Program_weight_tensor_parameter_640: + name = "parameter_640" + shape = [96, 96, 1, 1] + dtype = "float32" + min_val = float("-4.5365") + max_val = float("0.985421") + mean = float("-0.00920448") + std = float("0.122988") + data = None + + +class Program_weight_tensor_parameter_641: + name = "parameter_641" + shape = [96] + dtype = "float32" + min_val = float("-1.25019") + max_val = float("0.698569") + mean = float("-0.0957422") + std = float("0.306901") + data = None + + +class Program_weight_tensor_parameter_642: + name = "parameter_642" + shape = [96] + dtype = "float32" + min_val = float("-0.268583") + max_val = float("1.67072") + mean = float("0.716015") + std = float("0.312801") + data = None + + +class Program_weight_tensor_parameter_643: + name = "parameter_643" + shape = [96] + dtype = "float32" + min_val = float("0.0242012") + max_val = float("692.616") + mean = float("37.3314") + std = float("101.342") + data = None + + +class Program_weight_tensor_parameter_644: + name = "parameter_644" + shape = [96] + dtype = "float32" + min_val = float("-2.63584") + max_val = float("9.84662") + mean = float("0.502801") + std = float("1.7425") + data = None + + +class Program_weight_tensor_parameter_645: + name = "parameter_645" + shape = [96, 96, 3, 3] + dtype = "float32" + min_val = float("-0.71585") + max_val = float("1.8677") + mean = float("-0.00331932") + std = float("0.0418535") + data = None + + +class Program_weight_tensor_parameter_646: + name = "parameter_646" + shape = [96] + dtype = "float32" + min_val = float("-3.58339") + max_val = float("0.283111") + mean = float("-1.13341") + std = float("0.568821") + data = None + + +class Program_weight_tensor_parameter_647: + name = "parameter_647" + shape = [96] + dtype = "float32" + min_val = float("0.448275") + max_val = float("2.19169") + mean = float("1.04969") + std = float("0.242886") + data = None + + +class Program_weight_tensor_parameter_648: + name = "parameter_648" + shape = [96] + dtype = "float32" + min_val = float("0.187804") + max_val = float("13267.2") + mean = float("371.726") + std = float("1684.34") + data = None + + +class Program_weight_tensor_parameter_649: + name = "parameter_649" + shape = [96] + dtype = "float32" + min_val = float("-67.2311") + max_val = float("84.7225") + mean = float("0.0843976") + std = float("14.2964") + data = None + + +class Program_weight_tensor_parameter_650: + name = "parameter_650" + shape = [96, 96, 3, 3] + dtype = "float32" + min_val = float("-0.840055") + max_val = float("0.994105") + mean = float("-0.00223153") + std = float("0.0530689") + data = None + + +class Program_weight_tensor_parameter_651: + name = "parameter_651" + shape = [96] + dtype = "float32" + min_val = float("-0.896599") + max_val = float("0.53856") + mean = float("-0.157416") + std = float("0.295036") + data = None + + +class Program_weight_tensor_parameter_652: + name = "parameter_652" + shape = [96] + dtype = "float32" + min_val = float("0.00576366") + max_val = float("1.40649") + mean = float("0.338961") + std = float("0.222205") + data = None + + +class Program_weight_tensor_parameter_653: + name = "parameter_653" + shape = [96] + dtype = "float32" + min_val = float("0.000125609") + max_val = float("4.926") + mean = float("0.192034") + std = float("0.563473") + data = None + + +class Program_weight_tensor_parameter_654: + name = "parameter_654" + shape = [96] + dtype = "float32" + min_val = float("-1.15434") + max_val = float("1.71827") + mean = float("0.0970238") + std = float("0.329713") + data = None + + +class Program_weight_tensor_parameter_655: + name = "parameter_655" + shape = [96, 96, 1, 1] + dtype = "float32" + min_val = float("-1.11751") + max_val = float("1.22849") + mean = float("-0.00515414") + std = float("0.0668462") + data = None + + +class Program_weight_tensor_parameter_656: + name = "parameter_656" + shape = [96] + dtype = "float32" + min_val = float("-0.89747") + max_val = float("0.547756") + mean = float("-0.155531") + std = float("0.300238") + data = None + + +class Program_weight_tensor_parameter_657: + name = "parameter_657" + shape = [96] + dtype = "float32" + min_val = float("-0.680114") + max_val = float("1.77938") + mean = float("0.715642") + std = float("0.343738") + data = None + + +class Program_weight_tensor_parameter_658: + name = "parameter_658" + shape = [96] + dtype = "float32" + min_val = float("0.00962529") + max_val = float("117.592") + mean = float("6.28867") + std = float("17.7652") + data = None + + +class Program_weight_tensor_parameter_659: + name = "parameter_659" + shape = [96] + dtype = "float32" + min_val = float("-8.19355") + max_val = float("8.76688") + mean = float("0.523784") + std = float("1.96973") + data = None + + +class Program_weight_tensor_parameter_660: + name = "parameter_660" + shape = [96, 96, 3, 3] + dtype = "float32" + min_val = float("-0.963159") + max_val = float("0.937514") + mean = float("-0.00312149") + std = float("0.0409439") + data = None + + +class Program_weight_tensor_parameter_661: + name = "parameter_661" + shape = [96] + dtype = "float32" + min_val = float("-2.65947") + max_val = float("0.0663366") + mean = float("-1.07481") + std = float("0.49964") + data = None + + +class Program_weight_tensor_parameter_662: + name = "parameter_662" + shape = [96] + dtype = "float32" + min_val = float("0.494054") + max_val = float("1.73711") + mean = float("1.00358") + std = float("0.201714") + data = None + + +class Program_weight_tensor_parameter_663: + name = "parameter_663" + shape = [96] + dtype = "float32" + min_val = float("0.0670117") + max_val = float("108.685") + mean = float("11.7244") + std = float("22.7056") + data = None + + +class Program_weight_tensor_parameter_664: + name = "parameter_664" + shape = [96] + dtype = "float32" + min_val = float("-19.6122") + max_val = float("22.3347") + mean = float("-1.43905") + std = float("6.53676") + data = None + + +class Program_weight_tensor_parameter_665: + name = "parameter_665" + shape = [96, 96, 3, 3] + dtype = "float32" + min_val = float("-0.883886") + max_val = float("0.636549") + mean = float("-0.00434231") + std = float("0.0362072") + data = None + + +class Program_weight_tensor_parameter_666: + name = "parameter_666" + shape = [96] + dtype = "float32" + min_val = float("-0.971987") + max_val = float("0.722959") + mean = float("-0.144632") + std = float("0.309261") + data = None + + +class Program_weight_tensor_parameter_667: + name = "parameter_667" + shape = [96] + dtype = "float32" + min_val = float("-0.272081") + max_val = float("1.61121") + mean = float("0.305009") + std = float("0.272026") + data = None + + +class Program_weight_tensor_parameter_668: + name = "parameter_668" + shape = [96] + dtype = "float32" + min_val = float("0.000543445") + max_val = float("31.5598") + mean = float("1.4504") + std = float("3.93834") + data = None + + +class Program_weight_tensor_parameter_669: + name = "parameter_669" + shape = [96] + dtype = "float32" + min_val = float("-2.05874") + max_val = float("2.35712") + mean = float("0.153436") + std = float("0.577965") + data = None + + +class Program_weight_tensor_parameter_670: + name = "parameter_670" + shape = [96, 96, 1, 1] + dtype = "float32" + min_val = float("-1.77571") + max_val = float("0.727339") + mean = float("-0.012376") + std = float("0.0994884") + data = None + + +class Program_weight_tensor_parameter_671: + name = "parameter_671" + shape = [96] + dtype = "float32" + min_val = float("-0.973155") + max_val = float("0.722476") + mean = float("-0.141307") + std = float("0.307416") + data = None + + +class Program_weight_tensor_parameter_672: + name = "parameter_672" + shape = [96] + dtype = "float32" + min_val = float("-0.0263993") + max_val = float("2.25472") + mean = float("0.596976") + std = float("0.320417") + data = None + + +class Program_weight_tensor_parameter_673: + name = "parameter_673" + shape = [96] + dtype = "float32" + min_val = float("0.0138918") + max_val = float("3176.65") + mean = float("50.1448") + std = float("326.27") + data = None + + +class Program_weight_tensor_parameter_674: + name = "parameter_674" + shape = [96] + dtype = "float32" + min_val = float("-11.8577") + max_val = float("18.2503") + mean = float("0.529937") + std = float("2.87528") + data = None + + +class Program_weight_tensor_parameter_675: + name = "parameter_675" + shape = [96, 96, 3, 3] + dtype = "float32" + min_val = float("-1.3398") + max_val = float("0.471906") + mean = float("-0.00519289") + std = float("0.054962") + data = None + + +class Program_weight_tensor_parameter_676: + name = "parameter_676" + shape = [96] + dtype = "float32" + min_val = float("-3.4571") + max_val = float("0.163505") + mean = float("-1.04781") + std = float("0.565484") + data = None + + +class Program_weight_tensor_parameter_677: + name = "parameter_677" + shape = [96] + dtype = "float32" + min_val = float("0.181655") + max_val = float("2.51355") + mean = float("1.04007") + std = float("0.337096") + data = None + + +class Program_weight_tensor_parameter_678: + name = "parameter_678" + shape = [96] + dtype = "float32" + min_val = float("0.0450568") + max_val = float("591.311") + mean = float("35.261") + std = float("84.194") + data = None + + +class Program_weight_tensor_parameter_679: + name = "parameter_679" + shape = [96] + dtype = "float32" + min_val = float("-53.1661") + max_val = float("29.0565") + mean = float("-0.946483") + std = float("12.2566") + data = None + + +class Program_weight_tensor_parameter_680: + name = "parameter_680" + shape = [96, 96, 3, 3] + dtype = "float32" + min_val = float("-1.44326") + max_val = float("0.605019") + mean = float("-0.00302561") + std = float("0.0604205") + data = None + + +class Program_weight_tensor_parameter_681: + name = "parameter_681" + shape = [96] + dtype = "float32" + min_val = float("-0.979853") + max_val = float("0.506673") + mean = float("-0.105704") + std = float("0.301591") + data = None + + +class Program_weight_tensor_parameter_682: + name = "parameter_682" + shape = [96] + dtype = "float32" + min_val = float("-0.690202") + max_val = float("1.30922") + mean = float("0.212369") + std = float("0.326455") + data = None + + +class Program_weight_tensor_parameter_683: + name = "parameter_683" + shape = [96] + dtype = "float32" + min_val = float("0.00319137") + max_val = float("122.847") + mean = float("4.41125") + std = float("14.7791") + data = None + + +class Program_weight_tensor_parameter_684: + name = "parameter_684" + shape = [96] + dtype = "float32" + min_val = float("-1.63063") + max_val = float("0.873704") + mean = float("-0.0808103") + std = float("0.375139") + data = None + + +class Program_weight_tensor_parameter_685: + name = "parameter_685" + shape = [96, 96, 1, 1] + dtype = "float32" + min_val = float("-1.21185") + max_val = float("1.0658") + mean = float("0.00855805") + std = float("0.102244") + data = None + + +class Program_weight_tensor_parameter_686: + name = "parameter_686" + shape = [96] + dtype = "float32" + min_val = float("-0.929882") + max_val = float("0.508894") + mean = float("-0.0994689") + std = float("0.294231") + data = None + + +class Program_weight_tensor_parameter_687: + name = "parameter_687" + shape = [96] + dtype = "float32" + min_val = float("-0.652054") + max_val = float("1.42808") + mean = float("0.46451") + std = float("0.343141") + data = None + + +class Program_weight_tensor_parameter_688: + name = "parameter_688" + shape = [96] + dtype = "float32" + min_val = float("0.0905742") + max_val = float("4913.13") + mean = float("111.121") + std = float("522.181") + data = None + + +class Program_weight_tensor_parameter_689: + name = "parameter_689" + shape = [96] + dtype = "float32" + min_val = float("-6.23794") + max_val = float("20.563") + mean = float("0.310017") + std = float("2.71574") + data = None + + +class Program_weight_tensor_parameter_690: + name = "parameter_690" + shape = [96, 96, 3, 3] + dtype = "float32" + min_val = float("-1.49733") + max_val = float("0.706858") + mean = float("-0.0040344") + std = float("0.0702472") + data = None + + +class Program_weight_tensor_parameter_691: + name = "parameter_691" + shape = [96] + dtype = "float32" + min_val = float("-2.44345") + max_val = float("0.44985") + mean = float("-0.86721") + std = float("0.476335") + data = None + + +class Program_weight_tensor_parameter_692: + name = "parameter_692" + shape = [96] + dtype = "float32" + min_val = float("0.709681") + max_val = float("2.12223") + mean = float("1.25465") + std = float("0.222513") + data = None + + +class Program_weight_tensor_parameter_693: + name = "parameter_693" + shape = [96] + dtype = "float32" + min_val = float("0.0168317") + max_val = float("328.239") + mean = float("23.0774") + std = float("55.6159") + data = None + + +class Program_weight_tensor_parameter_694: + name = "parameter_694" + shape = [96] + dtype = "float32" + min_val = float("-50.6514") + max_val = float("40.9589") + mean = float("0.723976") + std = float("11.3687") + data = None + + +class Program_weight_tensor_parameter_695: + name = "parameter_695" + shape = [96, 96, 3, 3] + dtype = "float32" + min_val = float("-0.849899") + max_val = float("0.852562") + mean = float("-0.000185799") + std = float("0.0645014") + data = None + + +class Program_weight_tensor_parameter_696: + name = "parameter_696" + shape = [96] + dtype = "float32" + min_val = float("-3.17223") + max_val = float("1.90963") + mean = float("0.534742") + std = float("0.924546") + data = None + + +class Program_weight_tensor_parameter_697: + name = "parameter_697" + shape = [96] + dtype = "float32" + min_val = float("-1.03377") + max_val = float("2.62632") + mean = float("0.492338") + std = float("0.520207") + data = None + + +class Program_weight_tensor_parameter_698: + name = "parameter_698" + shape = [96] + dtype = "float32" + min_val = float("0.00977803") + max_val = float("734.582") + mean = float("37.8") + std = float("108.323") + data = None + + +class Program_weight_tensor_parameter_699: + name = "parameter_699" + shape = [96] + dtype = "float32" + min_val = float("-7.31017") + max_val = float("14.4237") + mean = float("-0.228157") + std = float("2.72443") + data = None + + +class Program_weight_tensor_parameter_700: + name = "parameter_700" + shape = [96, 192, 1, 1] + dtype = "float32" + min_val = float("-1.95633") + max_val = float("1.65436") + mean = float("-0.017819") + std = float("0.179031") + data = None + + +class Program_weight_tensor_parameter_701: + name = "parameter_701" + shape = [96] + dtype = "float32" + min_val = float("-4.92471") + max_val = float("1.58003") + mean = float("0.384968") + std = float("1.04986") + data = None + + +class Program_weight_tensor_parameter_702: + name = "parameter_702" + shape = [96] + dtype = "float32" + min_val = float("0.381893") + max_val = float("6.77746") + mean = float("1.68003") + std = float("1.3141") + data = None + + +class Program_weight_tensor_parameter_703: + name = "parameter_703" + shape = [96] + dtype = "float32" + min_val = float("0.0102556") + max_val = float("63.1576") + mean = float("1.94865") + std = float("8.72854") + data = None + + +class Program_weight_tensor_parameter_704: + name = "parameter_704" + shape = [96] + dtype = "float32" + min_val = float("-1.81576") + max_val = float("1.31792") + mean = float("0.00314065") + std = float("0.477093") + data = None + + +class Program_weight_tensor_parameter_705: + name = "parameter_705" + shape = [96, 192, 1, 1] + dtype = "float32" + min_val = float("-0.476384") + max_val = float("0.704714") + mean = float("-0.00467472") + std = float("0.0415664") + data = None + + +class Program_weight_tensor_parameter_706: + name = "parameter_706" + shape = [192] + dtype = "float32" + min_val = float("-2.2747") + max_val = float("1.80222") + mean = float("-0.130603") + std = float("0.756459") + data = None + + +class Program_weight_tensor_parameter_707: + name = "parameter_707" + shape = [192] + dtype = "float32" + min_val = float("0.466035") + max_val = float("2.96638") + mean = float("1.07147") + std = float("0.320539") + data = None + + +class Program_weight_tensor_parameter_708: + name = "parameter_708" + shape = [192] + dtype = "float32" + min_val = float("0.0464299") + max_val = float("1042.66") + mean = float("38.2298") + std = float("121.792") + data = None + + +class Program_weight_tensor_parameter_709: + name = "parameter_709" + shape = [192] + dtype = "float32" + min_val = float("-9.56391") + max_val = float("27.4888") + mean = float("0.648476") + std = float("3.1584") + data = None + + +class Program_weight_tensor_parameter_710: + name = "parameter_710" + shape = [192, 128, 3, 3] + dtype = "float32" + min_val = float("-2.4927") + max_val = float("0.935874") + mean = float("-0.00472509") + std = float("0.0589256") + data = None + + +class Program_weight_tensor_parameter_711: + name = "parameter_711" + shape = [128] + dtype = "float32" + min_val = float("-2.8142") + max_val = float("1.94884") + mean = float("-0.734722") + std = float("0.677613") + data = None + + +class Program_weight_tensor_parameter_712: + name = "parameter_712" + shape = [128] + dtype = "float32" + min_val = float("-0.229859") + max_val = float("2.99053") + mean = float("0.992786") + std = float("0.396784") + data = None + + +class Program_weight_tensor_parameter_713: + name = "parameter_713" + shape = [128] + dtype = "float32" + min_val = float("2.39451") + max_val = float("125679.0") + mean = float("4054.76") + std = float("14301.4") + data = None + + +class Program_weight_tensor_parameter_714: + name = "parameter_714" + shape = [128] + dtype = "float32" + min_val = float("-271.424") + max_val = float("158.592") + mean = float("-11.2378") + std = float("49.0082") + data = None + + +class Program_weight_tensor_parameter_715: + name = "parameter_715" + shape = [128, 96, 1, 1] + dtype = "float32" + min_val = float("-5.10604") + max_val = float("3.19008") + mean = float("-0.0220007") + std = float("0.265626") + data = None + + +class Program_weight_tensor_parameter_716: + name = "parameter_716" + shape = [96] + dtype = "float32" + min_val = float("-0.161611") + max_val = float("0.37311") + mean = float("0.00734357") + std = float("0.0624152") + data = None + + +class Program_weight_tensor_parameter_717: + name = "parameter_717" + shape = [96, 96, 1, 1] + dtype = "float32" + min_val = float("-1.50057") + max_val = float("1.5759") + mean = float("0.00741555") + std = float("0.12383") + data = None + + +class Program_weight_tensor_parameter_718: + name = "parameter_718" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_719: + name = "parameter_719" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_720: + name = "parameter_720" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_721: + name = "parameter_721" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_722: + name = "parameter_722" + shape = [48, 48, 1, 1] + dtype = "float32" + min_val = float("-2.83139") + max_val = float("3.12402") + mean = float("0.00934544") + std = float("0.243272") + data = None + + +class Program_weight_tensor_parameter_723: + name = "parameter_723" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_724: + name = "parameter_724" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_725: + name = "parameter_725" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_726: + name = "parameter_726" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_727: + name = "parameter_727" + shape = [48, 48, 3, 3] + dtype = "float32" + min_val = float("-1.103") + max_val = float("1.0228") + mean = float("0.00321872") + std = float("0.113578") + data = None + + +class Program_weight_tensor_parameter_728: + name = "parameter_728" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_729: + name = "parameter_729" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_730: + name = "parameter_730" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_731: + name = "parameter_731" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_732: + name = "parameter_732" + shape = [48, 48, 3, 3] + dtype = "float32" + min_val = float("-2.92076") + max_val = float("1.62643") + mean = float("-0.0145573") + std = float("0.236441") + data = None + + +class Program_weight_tensor_parameter_733: + name = "parameter_733" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_734: + name = "parameter_734" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_735: + name = "parameter_735" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_736: + name = "parameter_736" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_737: + name = "parameter_737" + shape = [48, 48, 1, 1] + dtype = "float32" + min_val = float("-1.61531") + max_val = float("0.722945") + mean = float("-0.0152863") + std = float("0.122929") + data = None + + +class Program_weight_tensor_parameter_738: + name = "parameter_738" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_739: + name = "parameter_739" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_740: + name = "parameter_740" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_741: + name = "parameter_741" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_742: + name = "parameter_742" + shape = [48, 48, 3, 3] + dtype = "float32" + min_val = float("-9.07419") + max_val = float("3.91485") + mean = float("-0.0791542") + std = float("0.545463") + data = None + + +class Program_weight_tensor_parameter_743: + name = "parameter_743" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_744: + name = "parameter_744" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_745: + name = "parameter_745" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_746: + name = "parameter_746" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_747: + name = "parameter_747" + shape = [48, 48, 3, 3] + dtype = "float32" + min_val = float("-6.23405") + max_val = float("6.69712") + mean = float("-0.00447183") + std = float("0.74018") + data = None + + +class Program_weight_tensor_parameter_748: + name = "parameter_748" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_749: + name = "parameter_749" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_750: + name = "parameter_750" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_751: + name = "parameter_751" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_752: + name = "parameter_752" + shape = [48, 48, 1, 1] + dtype = "float32" + min_val = float("-7.99359") + max_val = float("7.76186") + mean = float("0.0820972") + std = float("1.09773") + data = None + + +class Program_weight_tensor_parameter_753: + name = "parameter_753" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_754: + name = "parameter_754" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_755: + name = "parameter_755" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_756: + name = "parameter_756" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_757: + name = "parameter_757" + shape = [48, 48, 3, 3] + dtype = "float32" + min_val = float("-10.0014") + max_val = float("5.47988") + mean = float("0.0621421") + std = float("0.658574") + data = None + + +class Program_weight_tensor_parameter_758: + name = "parameter_758" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_759: + name = "parameter_759" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_760: + name = "parameter_760" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_761: + name = "parameter_761" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_762: + name = "parameter_762" + shape = [48, 48, 3, 3] + dtype = "float32" + min_val = float("-12.9982") + max_val = float("11.0176") + mean = float("-0.0784427") + std = float("1.11962") + data = None + + +class Program_weight_tensor_parameter_763: + name = "parameter_763" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_764: + name = "parameter_764" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_765: + name = "parameter_765" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_766: + name = "parameter_766" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_767: + name = "parameter_767" + shape = [48, 96, 1, 1] + dtype = "float32" + min_val = float("-59.2065") + max_val = float("44.9563") + mean = float("-0.705466") + std = float("5.28086") + data = None + + +class Program_weight_tensor_parameter_768: + name = "parameter_768" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_769: + name = "parameter_769" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_770: + name = "parameter_770" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_771: + name = "parameter_771" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_772: + name = "parameter_772" + shape = [48, 96, 1, 1] + dtype = "float32" + min_val = float("-2.11481") + max_val = float("1.29398") + mean = float("-0.0184801") + std = float("0.17633") + data = None + + +class Program_weight_tensor_parameter_773: + name = "parameter_773" + shape = [96] + dtype = "float32" + min_val = float("-8.09242") + max_val = float("7.25992") + mean = float("0.75209") + std = float("2.67553") + data = None + + +class Program_weight_tensor_parameter_774: + name = "parameter_774" + shape = [96] + dtype = "float32" + min_val = float("-8.02644") + max_val = float("10.6649") + mean = float("2.05862") + std = float("3.39567") + data = None + + +class Program_weight_tensor_parameter_775: + name = "parameter_775" + shape = [96] + dtype = "float32" + min_val = float("544.401") + max_val = float("9795570.0") + mean = float("868764.0") + std = float("1759410.0") + data = None + + +class Program_weight_tensor_parameter_776: + name = "parameter_776" + shape = [96] + dtype = "float32" + min_val = float("-3150.84") + max_val = float("1801.7") + mean = float("-292.814") + std = float("845.923") + data = None + + +class Program_weight_tensor_parameter_777: + name = "parameter_777" + shape = [96, 64, 3, 3] + dtype = "float32" + min_val = float("-16.205") + max_val = float("9.48416") + mean = float("-0.195033") + std = float("1.32428") + data = None + + +class Program_weight_tensor_parameter_778: + name = "parameter_778" + shape = [64] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_779: + name = "parameter_779" + shape = [64] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_780: + name = "parameter_780" + shape = [64] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_781: + name = "parameter_781" + shape = [64] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_782: + name = "parameter_782" + shape = [64, 32, 3, 3] + dtype = "float32" + min_val = float("-39.064") + max_val = float("47.84") + mean = float("0.203101") + std = float("4.43152") + data = None + + +class Program_weight_tensor_parameter_783: + name = "parameter_783" + shape = [32] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_784: + name = "parameter_784" + shape = [32] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_785: + name = "parameter_785" + shape = [32] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_786: + name = "parameter_786" + shape = [32] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_787: + name = "parameter_787" + shape = [32, 32, 3, 3] + dtype = "float32" + min_val = float("-18.7778") + max_val = float("17.8983") + mean = float("-0.451057") + std = float("2.25346") + data = None + + +class Program_weight_tensor_parameter_788: + name = "parameter_788" + shape = [32] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_789: + name = "parameter_789" + shape = [32] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_790: + name = "parameter_790" + shape = [32] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_791: + name = "parameter_791" + shape = [32] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_792: + name = "parameter_792" + shape = [32, 3, 3, 3] + dtype = "float32" + min_val = float("-8.84763") + max_val = float("9.81836") + mean = float("-0.13036") + std = float("2.17947") + data = None diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-S/subgraph_0/graph_hash.txt b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-S/subgraph_0/graph_hash.txt new file mode 100644 index 000000000..98dc188be --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-S/subgraph_0/graph_hash.txt @@ -0,0 +1 @@ +2f7e328616242ad3e2a3699e8e0aa947042688f032eb97cd618d4e16392e47fe \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-S/subgraph_0/graph_net.json b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-S/subgraph_0/graph_net.json new file mode 100644 index 000000000..6ce3cf9a5 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-S/subgraph_0/graph_net.json @@ -0,0 +1,6 @@ +{ + "framework": "paddle", + "model_name": "PP-YOLOE_plus_SOD-S", + "num_devices_required": 1, + "num_nodes_required": 1 +} \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-S/subgraph_0/input_meta.py b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-S/subgraph_0/input_meta.py new file mode 100644 index 000000000..e095af688 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-S/subgraph_0/input_meta.py @@ -0,0 +1,119 @@ +class Program_weight_tensor_data_0: + name = "data_0" + shape = [512, 1536] + dtype = "float32" + min_val = float("-0.0470195") + max_val = float("0.049223") + mean = float("-1.90915e-05") + std = float("0.024744") + data = None + + +class Program_weight_tensor_data_1: + name = "data_1" + shape = [1536] + dtype = "float32" + min_val = float("-0.00375843") + max_val = float("0.00395785") + mean = float("-2.46626e-07") + std = float("0.000720732") + data = None + + +class Program_weight_tensor_data_2: + name = "data_2" + shape = [512, 1536] + dtype = "float32" + min_val = float("-0.0453469") + max_val = float("0.0447273") + mean = float("-1.88659e-05") + std = float("0.0247423") + data = None + + +class Program_weight_tensor_data_3: + name = "data_3" + shape = [1536] + dtype = "float32" + min_val = float("-0.00084003") + max_val = float("0.00111775") + mean = float("-5.56178e-06") + std = float("0.000260705") + data = None + + +class Program_weight_tensor_data_4: + name = "data_4" + shape = [512, 1536] + dtype = "float32" + min_val = float("-0.0444956") + max_val = float("0.0437462") + mean = float("-1.82329e-05") + std = float("0.0247405") + data = None + + +class Program_weight_tensor_data_5: + name = "data_5" + shape = [1536] + dtype = "float32" + min_val = float("-0.000738378") + max_val = float("0.000799289") + mean = float("-1.00275e-06") + std = float("0.000181485") + data = None + + +class Program_weight_tensor_data_6: + name = "data_6" + shape = [512, 1536] + dtype = "float32" + min_val = float("-0.0439514") + max_val = float("0.0436532") + mean = float("-1.77261e-05") + std = float("0.0247393") + data = None + + +class Program_weight_tensor_data_7: + name = "data_7" + shape = [1536] + dtype = "float32" + min_val = float("-0.000790993") + max_val = float("0.000747807") + mean = float("-1.15197e-06") + std = float("0.000148136") + data = None + + +class Program_weight_tensor_data_8: + name = "data_8" + shape = [2, 128, 56, 56] + dtype = "float32" + min_val = float("-0.278465") + max_val = float("5.80439") + mean = float("-0.029314") + std = float("0.382187") + data = None + + +class Program_weight_tensor_data_9: + name = "data_9" + shape = [2, 256, 28, 28] + dtype = "float32" + min_val = float("-0.278465") + max_val = float("5.88435") + mean = float("-0.0801455") + std = float("0.351136") + data = None + + +class Program_weight_tensor_data_10: + name = "data_10" + shape = [2, 512, 14, 14] + dtype = "float32" + min_val = float("-0.278465") + max_val = float("18.5447") + mean = float("0.223738") + std = float("1.02327") + data = None diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-S/subgraph_0/model.py b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-S/subgraph_0/model.py new file mode 100644 index 000000000..55a1556ff --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-S/subgraph_0/model.py @@ -0,0 +1,3357 @@ +import paddle + + +class GraphModule(paddle.nn.Layer): + def __init__(self): + super().__init__() + + def forward( + self, + parameter_0, + parameter_1, + parameter_2, + parameter_3, + parameter_4, + parameter_5, + parameter_6, + parameter_7, + parameter_8, + parameter_9, + parameter_10, + parameter_11, + parameter_12, + parameter_13, + parameter_14, + parameter_15, + parameter_16, + parameter_17, + parameter_18, + parameter_19, + parameter_20, + parameter_21, + parameter_22, + parameter_23, + parameter_24, + parameter_25, + parameter_26, + parameter_27, + parameter_28, + parameter_29, + parameter_30, + parameter_31, + parameter_32, + parameter_33, + parameter_34, + parameter_35, + parameter_36, + parameter_37, + parameter_38, + parameter_39, + parameter_40, + parameter_41, + parameter_42, + parameter_43, + parameter_44, + parameter_45, + parameter_46, + parameter_47, + parameter_48, + parameter_49, + parameter_50, + parameter_51, + parameter_52, + parameter_53, + parameter_54, + parameter_55, + parameter_56, + parameter_57, + parameter_58, + parameter_59, + parameter_60, + parameter_61, + parameter_62, + parameter_63, + parameter_64, + parameter_65, + parameter_66, + parameter_67, + parameter_68, + parameter_69, + parameter_70, + parameter_71, + parameter_72, + parameter_73, + parameter_74, + parameter_75, + parameter_76, + parameter_77, + parameter_78, + parameter_79, + parameter_80, + parameter_81, + parameter_82, + parameter_83, + parameter_84, + parameter_85, + parameter_86, + parameter_87, + parameter_88, + parameter_89, + parameter_90, + parameter_91, + parameter_92, + parameter_93, + parameter_94, + parameter_95, + parameter_96, + parameter_97, + parameter_98, + parameter_99, + parameter_100, + parameter_101, + parameter_102, + parameter_103, + parameter_104, + parameter_105, + parameter_106, + parameter_107, + parameter_108, + parameter_109, + parameter_110, + parameter_111, + parameter_112, + parameter_113, + parameter_114, + parameter_115, + parameter_116, + parameter_117, + parameter_118, + parameter_119, + parameter_120, + parameter_121, + parameter_122, + parameter_123, + parameter_124, + parameter_125, + parameter_126, + parameter_127, + parameter_128, + parameter_129, + parameter_130, + parameter_131, + parameter_132, + parameter_133, + parameter_134, + parameter_135, + parameter_136, + parameter_137, + parameter_138, + parameter_139, + parameter_140, + parameter_141, + parameter_142, + parameter_143, + parameter_144, + parameter_145, + parameter_146, + parameter_147, + parameter_148, + parameter_149, + parameter_150, + parameter_151, + parameter_152, + parameter_153, + parameter_154, + parameter_155, + parameter_156, + parameter_157, + parameter_158, + parameter_159, + parameter_160, + parameter_161, + parameter_162, + parameter_163, + parameter_164, + parameter_165, + parameter_166, + parameter_167, + parameter_168, + parameter_169, + parameter_170, + parameter_171, + parameter_172, + parameter_173, + parameter_174, + parameter_175, + parameter_176, + parameter_177, + parameter_178, + parameter_179, + parameter_180, + parameter_181, + parameter_182, + parameter_183, + parameter_184, + parameter_185, + parameter_186, + parameter_187, + parameter_188, + parameter_189, + parameter_190, + parameter_191, + parameter_192, + parameter_193, + parameter_194, + parameter_195, + parameter_196, + parameter_197, + parameter_198, + parameter_199, + parameter_200, + parameter_201, + parameter_202, + parameter_203, + parameter_204, + parameter_205, + parameter_206, + parameter_207, + parameter_208, + parameter_209, + parameter_210, + parameter_211, + parameter_212, + parameter_213, + parameter_214, + data_0, + data_1, + data_2, + data_3, + data_4, + data_5, + data_6, + data_7, + data_8, + data_9, + data_10, + ): + # pd_op.flatten: (2x512x196xf32) <- (2x512x14x14xf32) + flatten_0 = paddle._C_ops.flatten(data_10, 2, 3) + del data_10 + + # pd_op.transpose: (2x196x512xf32) <- (2x512x196xf32) + transpose_0 = paddle._C_ops.transpose(flatten_0, [0, 2, 1]) + del flatten_0 + + # pd_op.full: (1xf64) <- () + full_0 = paddle._C_ops.full( + [1], float("0"), paddle.float64, paddle.core.CPUPlace() + ) + + # pd_op.full: (1xf64) <- () + full_1 = paddle._C_ops.full( + [1], float("14"), paddle.float64, paddle.core.CPUPlace() + ) + + # pd_op.full: (1xf64) <- () + full_2 = paddle._C_ops.full( + [1], float("1"), paddle.float64, paddle.core.CPUPlace() + ) + + # pd_op.arange: (14xf32) <- (1xf64, 1xf64, 1xf64) + arange_0 = paddle.arange(full_0, full_1, full_2, dtype="float32") + del full_1 + + # builtin.combine: ([14xf32, 14xf32]) <- (14xf32, 14xf32) + combine_0 = [arange_0, arange_0] + del arange_0 + + # pd_op.meshgrid: ([14x14xf32, 14x14xf32]) <- ([14xf32, 14xf32]) + meshgrid_0 = paddle._C_ops.meshgrid(combine_0) + del combine_0 + + # builtin.split: (14x14xf32, 14x14xf32) <- ([14x14xf32, 14x14xf32]) + ( + split_0, + split_1, + ) = meshgrid_0 + del meshgrid_0 + + # pd_op.full: (1xf64) <- () + full_3 = paddle._C_ops.full( + [1], float("128"), paddle.float64, paddle.core.CPUPlace() + ) + + # pd_op.arange: (128xf32) <- (1xf64, 1xf64, 1xf64) + arange_1 = paddle.arange(full_0, full_3, full_2, dtype="float32") + del full_0, full_2, full_3 + + # pd_op.full: (1xf32) <- () + full_4 = paddle._C_ops.full( + [1], float("0.0078125"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.scale: (128xf32) <- (128xf32, 1xf32) + scale_0 = paddle._C_ops.scale(arange_1, full_4, float("0"), True) + del arange_1, full_4 + + # pd_op.full: (128xf32) <- () + full_5 = paddle._C_ops.full( + [128], + float("10000"), + paddle.float32, + paddle.framework._current_expected_place(), + ) + + # pd_op.elementwise_pow: (128xf32) <- (128xf32, 128xf32) + elementwise_pow_0 = paddle._C_ops.elementwise_pow(full_5, scale_0) + del full_5, scale_0 + + # pd_op.full: (128xf32) <- () + full_6 = paddle._C_ops.full( + [128], + float("1"), + paddle.float32, + paddle.framework._current_expected_place(), + ) + + # pd_op.divide: (128xf32) <- (128xf32, 128xf32) + divide_0 = paddle._C_ops.divide(full_6, elementwise_pow_0) + del elementwise_pow_0, full_6 + + # pd_op.flatten: (196xf32) <- (14x14xf32) + flatten_1 = paddle._C_ops.flatten(split_0, 0, 1) + del split_0 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_0 = [1] + + # pd_op.unsqueeze: (196x1xf32) <- (196xf32, 1xi64) + unsqueeze_0 = paddle._C_ops.unsqueeze(flatten_1, full_int_array_0) + del flatten_1 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_1 = [0] + + # pd_op.assign: (1xi64) <- (1xi64) + assign_0 = full_int_array_1 + + # pd_op.assign: (1xi64) <- (1xi64) + assign_1 = full_int_array_1 + + # pd_op.assign: (1xi64) <- (1xi64) + assign_2 = full_int_array_1 + + # pd_op.assign: (1xi64) <- (1xi64) + assign_3 = full_int_array_1 + + # pd_op.assign: (1xi64) <- (1xi64) + assign_4 = full_int_array_1 + + # pd_op.assign: (1xi64) <- (1xi64) + assign_5 = full_int_array_1 + + # pd_op.assign: (1xi64) <- (1xi64) + assign_6 = full_int_array_1 + + # pd_op.assign: (1xi64) <- (1xi64) + assign_7 = full_int_array_1 + + # pd_op.unsqueeze: (1x128xf32) <- (128xf32, 1xi64) + unsqueeze_1 = paddle._C_ops.unsqueeze(divide_0, full_int_array_1) + del divide_0 + + # pd_op.matmul: (196x128xf32) <- (196x1xf32, 1x128xf32) + matmul_0 = paddle._C_ops.matmul(unsqueeze_0, unsqueeze_1, False, False) + del unsqueeze_0 + + # pd_op.flatten: (196xf32) <- (14x14xf32) + flatten_2 = paddle._C_ops.flatten(split_1, 0, 1) + del split_1 + + # pd_op.unsqueeze: (196x1xf32) <- (196xf32, 1xi64) + unsqueeze_2 = paddle._C_ops.unsqueeze(flatten_2, full_int_array_0) + del flatten_2, full_int_array_0 + + # pd_op.matmul: (196x128xf32) <- (196x1xf32, 1x128xf32) + matmul_1 = paddle._C_ops.matmul(unsqueeze_2, unsqueeze_1, False, False) + del unsqueeze_1, unsqueeze_2 + + # pd_op.sin: (196x128xf32) <- (196x128xf32) + sin_0 = paddle._C_ops.sin(matmul_0) + + # pd_op.cos: (196x128xf32) <- (196x128xf32) + cos_0 = paddle._C_ops.cos(matmul_0) + del matmul_0 + + # pd_op.sin: (196x128xf32) <- (196x128xf32) + sin_1 = paddle._C_ops.sin(matmul_1) + + # pd_op.cos: (196x128xf32) <- (196x128xf32) + cos_1 = paddle._C_ops.cos(matmul_1) + del matmul_1 + + # pd_op.full: (1xi32) <- () + full_7 = paddle._C_ops.full( + [1], float("1"), paddle.int32, paddle.core.CPUPlace() + ) + + # pd_op.assign: (1xi32) <- (1xi32) + assign_8 = full_7 + + # pd_op.assign: (1xi32) <- (1xi32) + assign_9 = full_7 + + # pd_op.assign: (1xi32) <- (1xi32) + assign_10 = full_7 + + # pd_op.assign: (1xi32) <- (1xi32) + assign_11 = full_7 + + # pd_op.assign: (1xi32) <- (1xi32) + assign_12 = full_7 + + # pd_op.assign: (1xi32) <- (1xi32) + assign_13 = full_7 + + # pd_op.assign: (1xi32) <- (1xi32) + assign_14 = full_7 + + # pd_op.assign: (1xi32) <- (1xi32) + assign_15 = full_7 + + # pd_op.assign: (1xi32) <- (1xi32) + assign_16 = full_7 + + # pd_op.assign: (1xi32) <- (1xi32) + assign_17 = full_7 + + # builtin.combine: ([196x128xf32, 196x128xf32, 196x128xf32, 196x128xf32]) <- (196x128xf32, 196x128xf32, 196x128xf32, 196x128xf32) + combine_1 = [sin_0, cos_0, sin_1, cos_1] + del cos_0, cos_1, sin_0, sin_1 + + # pd_op.concat: (196x512xf32) <- ([196x128xf32, 196x128xf32, 196x128xf32, 196x128xf32], 1xi32) + concat_0 = paddle._C_ops.concat(combine_1, full_7) + del combine_1 + + # pd_op.unsqueeze: (1x196x512xf32) <- (196x512xf32, 1xi64) + unsqueeze_3 = paddle._C_ops.unsqueeze(concat_0, full_int_array_1) + del concat_0 + + # pd_op.add: (2x196x512xf32) <- (2x196x512xf32, 1x196x512xf32) + add_0 = paddle._C_ops.add(transpose_0, unsqueeze_3) + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_2 = [512] + + # pd_op.assign: (1xi64) <- (1xi64) + assign_18 = full_int_array_2 + + # pd_op.assign: (1xi64) <- (1xi64) + assign_19 = full_int_array_2 + + # pd_op.assign: (1xi64) <- (1xi64) + assign_20 = full_int_array_2 + + # pd_op.assign: (1xi64) <- (1xi64) + assign_21 = full_int_array_2 + + # pd_op.assign: (1xi64) <- (1xi64) + assign_22 = full_int_array_2 + + # pd_op.assign: (1xi64) <- (1xi64) + assign_23 = full_int_array_2 + + # pd_op.assign: (1xi64) <- (1xi64) + assign_24 = full_int_array_2 + + # pd_op.assign: (1xi64) <- (1xi64) + assign_25 = full_int_array_2 + + # pd_op.assign: (1xi64) <- (1xi64) + assign_26 = full_int_array_2 + + # pd_op.assign: (1xi64) <- (1xi64) + assign_27 = full_int_array_2 + + # pd_op.assign: (1xi64) <- (1xi64) + assign_28 = full_int_array_2 + + # pd_op.assign: (1xi64) <- (1xi64) + assign_29 = full_int_array_2 + + # pd_op.assign: (1xi64) <- (1xi64) + assign_30 = full_int_array_2 + + # pd_op.assign: (1xi64) <- (1xi64) + assign_31 = full_int_array_2 + + # pd_op.assign: (1xi64) <- (1xi64) + assign_32 = full_int_array_2 + + # pd_op.slice: (512x512xf32) <- (512x1536xf32, 1xi64, 1xi64) + slice_0 = paddle._C_ops.slice( + data_0, [1], full_int_array_1, full_int_array_2, [1], [] + ) + + # pd_op.slice: (512xf32) <- (1536xf32, 1xi64, 1xi64) + slice_1 = paddle._C_ops.slice( + data_1, [0], full_int_array_1, full_int_array_2, [1], [] + ) + + # pd_op.matmul: (2x196x512xf32) <- (2x196x512xf32, 512x512xf32) + matmul_2 = paddle._C_ops.matmul(add_0, slice_0, False, False) + + # pd_op.add: (2x196x512xf32) <- (2x196x512xf32, 512xf32) + add_1 = paddle._C_ops.add(matmul_2, slice_1) + + # pd_op.full_int_array: (4xi64) <- () + full_int_array_3 = [0, 0, 4, 128] + + # pd_op.reshape: (2x196x4x128xf32) <- (2x196x512xf32, 4xi64) + reshape_0 = paddle._C_ops.reshape(add_1, full_int_array_3) + + # pd_op.transpose: (2x4x196x128xf32) <- (2x196x4x128xf32) + transpose_1 = paddle._C_ops.transpose(reshape_0, [0, 2, 1, 3]) + del reshape_0 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_4 = [1024] + + # pd_op.assign: (1xi64) <- (1xi64) + assign_33 = full_int_array_4 + + # pd_op.assign: (1xi64) <- (1xi64) + assign_34 = full_int_array_4 + + # pd_op.assign: (1xi64) <- (1xi64) + assign_35 = full_int_array_4 + + # pd_op.assign: (1xi64) <- (1xi64) + assign_36 = full_int_array_4 + + # pd_op.assign: (1xi64) <- (1xi64) + assign_37 = full_int_array_4 + + # pd_op.assign: (1xi64) <- (1xi64) + assign_38 = full_int_array_4 + + # pd_op.assign: (1xi64) <- (1xi64) + assign_39 = full_int_array_4 + + # pd_op.assign: (1xi64) <- (1xi64) + assign_40 = full_int_array_4 + + # pd_op.assign: (1xi64) <- (1xi64) + assign_41 = full_int_array_4 + + # pd_op.assign: (1xi64) <- (1xi64) + assign_42 = full_int_array_4 + + # pd_op.assign: (1xi64) <- (1xi64) + assign_43 = full_int_array_4 + + # pd_op.assign: (1xi64) <- (1xi64) + assign_44 = full_int_array_4 + + # pd_op.assign: (1xi64) <- (1xi64) + assign_45 = full_int_array_4 + + # pd_op.assign: (1xi64) <- (1xi64) + assign_46 = full_int_array_4 + + # pd_op.assign: (1xi64) <- (1xi64) + assign_47 = full_int_array_4 + + # pd_op.slice: (512x512xf32) <- (512x1536xf32, 1xi64, 1xi64) + slice_2 = paddle._C_ops.slice( + data_0, [1], full_int_array_2, full_int_array_4, [1], [] + ) + + # pd_op.slice: (512xf32) <- (1536xf32, 1xi64, 1xi64) + slice_3 = paddle._C_ops.slice( + data_1, [0], full_int_array_2, full_int_array_4, [1], [] + ) + + # pd_op.matmul: (2x196x512xf32) <- (2x196x512xf32, 512x512xf32) + matmul_3 = paddle._C_ops.matmul(add_0, slice_2, False, False) + + # pd_op.add: (2x196x512xf32) <- (2x196x512xf32, 512xf32) + add_2 = paddle._C_ops.add(matmul_3, slice_3) + + # pd_op.reshape: (2x196x4x128xf32) <- (2x196x512xf32, 4xi64) + reshape_1 = paddle._C_ops.reshape(add_2, full_int_array_3) + + # pd_op.transpose: (2x4x196x128xf32) <- (2x196x4x128xf32) + transpose_2 = paddle._C_ops.transpose(reshape_1, [0, 2, 1, 3]) + del reshape_1 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_5 = [2147483647] + + # pd_op.assign: (1xi64) <- (1xi64) + assign_48 = full_int_array_5 + + # pd_op.assign: (1xi64) <- (1xi64) + assign_49 = full_int_array_5 + + # pd_op.assign: (1xi64) <- (1xi64) + assign_50 = full_int_array_5 + + # pd_op.assign: (1xi64) <- (1xi64) + assign_51 = full_int_array_5 + + # pd_op.assign: (1xi64) <- (1xi64) + assign_52 = full_int_array_5 + + # pd_op.assign: (1xi64) <- (1xi64) + assign_53 = full_int_array_5 + + # pd_op.assign: (1xi64) <- (1xi64) + assign_54 = full_int_array_5 + + # pd_op.slice: (512x512xf32) <- (512x1536xf32, 1xi64, 1xi64) + slice_4 = paddle._C_ops.slice( + data_0, [1], full_int_array_4, full_int_array_5, [1], [] + ) + del data_0 + + # pd_op.slice: (512xf32) <- (1536xf32, 1xi64, 1xi64) + slice_5 = paddle._C_ops.slice( + data_1, [0], full_int_array_4, full_int_array_5, [1], [] + ) + del data_1 + + # pd_op.matmul: (2x196x512xf32) <- (2x196x512xf32, 512x512xf32) + matmul_4 = paddle._C_ops.matmul(transpose_0, slice_4, False, False) + + # pd_op.add: (2x196x512xf32) <- (2x196x512xf32, 512xf32) + add_3 = paddle._C_ops.add(matmul_4, slice_5) + + # pd_op.reshape: (2x196x4x128xf32) <- (2x196x512xf32, 4xi64) + reshape_2 = paddle._C_ops.reshape(add_3, full_int_array_3) + + # pd_op.transpose: (2x4x196x128xf32) <- (2x196x4x128xf32) + transpose_3 = paddle._C_ops.transpose(reshape_2, [0, 2, 1, 3]) + del reshape_2 + + # pd_op.matmul: (2x4x196x196xf32) <- (2x4x196x128xf32, 2x4x196x128xf32) + matmul_5 = paddle._C_ops.matmul(transpose_1, transpose_2, False, True) + + # pd_op.full: (1xf32) <- () + full_8 = paddle._C_ops.full( + [1], float("0.0883883"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.assign: (1xf32) <- (1xf32) + assign_55 = full_8 + + # pd_op.assign: (1xf32) <- (1xf32) + assign_56 = full_8 + + # pd_op.assign: (1xf32) <- (1xf32) + assign_57 = full_8 + + # pd_op.scale: (2x4x196x196xf32) <- (2x4x196x196xf32, 1xf32) + scale_1 = paddle._C_ops.scale(matmul_5, full_8, float("0"), True) + del matmul_5 + + # pd_op.softmax: (2x4x196x196xf32) <- (2x4x196x196xf32) + softmax_0 = paddle._C_ops.softmax(scale_1, -1) + del scale_1 + + # pd_op.full: (1xf32) <- () + full_9 = paddle._C_ops.full( + [1], float("0.1"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.assign: (1xf32) <- (1xf32) + assign_58 = full_9 + + # pd_op.assign: (1xf32) <- (1xf32) + assign_59 = full_9 + + # pd_op.assign: (1xf32) <- (1xf32) + assign_60 = full_9 + + # pd_op.assign: (1xf32) <- (1xf32) + assign_61 = full_9 + + # pd_op.assign: (1xf32) <- (1xf32) + assign_62 = full_9 + + # pd_op.assign: (1xf32) <- (1xf32) + assign_63 = full_9 + + # pd_op.assign: (1xf32) <- (1xf32) + assign_64 = full_9 + + # pd_op.assign: (1xf32) <- (1xf32) + assign_65 = full_9 + + # pd_op.assign: (1xf32) <- (1xf32) + assign_66 = full_9 + + # pd_op.assign: (1xf32) <- (1xf32) + assign_67 = full_9 + + # pd_op.assign: (1xf32) <- (1xf32) + assign_68 = full_9 + + # pd_op.assign: (1xf32) <- (1xf32) + assign_69 = full_9 + + # pd_op.assign: (1xf32) <- (1xf32) + assign_70 = full_9 + + # pd_op.assign: (1xf32) <- (1xf32) + assign_71 = full_9 + + # pd_op.assign: (1xf32) <- (1xf32) + assign_72 = full_9 + + # pd_op.dropout: (2x4x196x196xf32, 2x4x196x196xui8) <- (2x4x196x196xf32, None, 1xf32) + dropout_0, dropout_1 = (lambda x, f: f(x))( + paddle._C_ops.dropout( + softmax_0, None, full_9, False, "upscale_in_train", 0, False + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None), + ) + + # pd_op.matmul: (2x4x196x128xf32) <- (2x4x196x196xf32, 2x4x196x128xf32) + matmul_6 = paddle._C_ops.matmul(dropout_0, transpose_3, False, False) + + # pd_op.transpose: (2x196x4x128xf32) <- (2x4x196x128xf32) + transpose_4 = paddle._C_ops.transpose(matmul_6, [0, 2, 1, 3]) + del matmul_6 + + # pd_op.full_int_array: (3xi64) <- () + full_int_array_6 = [0, 0, 512] + + # pd_op.reshape: (2x196x512xf32) <- (2x196x4x128xf32, 3xi64) + reshape_3 = paddle._C_ops.reshape(transpose_4, full_int_array_6) + + # pd_op.matmul: (2x196x512xf32) <- (2x196x512xf32, 512x512xf32) + matmul_7 = paddle._C_ops.matmul(reshape_3, parameter_214, False, False) + del parameter_214 + + # pd_op.add: (2x196x512xf32) <- (2x196x512xf32, 512xf32) + add_4 = paddle._C_ops.add(matmul_7, parameter_213) + del parameter_213 + + # pd_op.dropout: (2x196x512xf32, 2x196x512xui8) <- (2x196x512xf32, None, 1xf32) + dropout_2, dropout_3 = (lambda x, f: f(x))( + paddle._C_ops.dropout( + add_4, None, full_9, False, "upscale_in_train", 0, False + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None), + ) + del add_4 + + # pd_op.add: (2x196x512xf32) <- (2x196x512xf32, 2x196x512xf32) + add_5 = paddle._C_ops.add(transpose_0, dropout_2) + + # pd_op.layer_norm: (2x196x512xf32, 2x196xf32, 2x196xf32) <- (2x196x512xf32, 512xf32, 512xf32) + layer_norm_0, layer_norm_1, layer_norm_2 = (lambda x, f: f(x))( + paddle._C_ops.layer_norm( + add_5, parameter_212, parameter_211, float("1e-05"), 2 + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None, None), + ) + del parameter_211, parameter_212 + + # pd_op.matmul: (2x196x2048xf32) <- (2x196x512xf32, 512x2048xf32) + matmul_8 = paddle._C_ops.matmul(layer_norm_0, parameter_210, False, False) + del parameter_210 + + # pd_op.add: (2x196x2048xf32) <- (2x196x2048xf32, 2048xf32) + add_6 = paddle._C_ops.add(matmul_8, parameter_209) + del parameter_209 + + # pd_op.gelu: (2x196x2048xf32) <- (2x196x2048xf32) + gelu_0 = paddle._C_ops.gelu(add_6, False) + + # pd_op.dropout: (2x196x2048xf32, 2x196x2048xui8) <- (2x196x2048xf32, None, 1xf32) + dropout_4, dropout_5 = (lambda x, f: f(x))( + paddle._C_ops.dropout( + gelu_0, None, full_9, False, "upscale_in_train", 0, False + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None), + ) + del gelu_0 + + # pd_op.matmul: (2x196x512xf32) <- (2x196x2048xf32, 2048x512xf32) + matmul_9 = paddle._C_ops.matmul(dropout_4, parameter_208, False, False) + del parameter_208 + + # pd_op.add: (2x196x512xf32) <- (2x196x512xf32, 512xf32) + add_7 = paddle._C_ops.add(matmul_9, parameter_207) + del parameter_207 + + # pd_op.dropout: (2x196x512xf32, 2x196x512xui8) <- (2x196x512xf32, None, 1xf32) + dropout_6, dropout_7 = (lambda x, f: f(x))( + paddle._C_ops.dropout( + add_7, None, full_9, False, "upscale_in_train", 0, False + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None), + ) + del add_7 + + # pd_op.add: (2x196x512xf32) <- (2x196x512xf32, 2x196x512xf32) + add_8 = paddle._C_ops.add(layer_norm_0, dropout_6) + + # pd_op.layer_norm: (2x196x512xf32, 2x196xf32, 2x196xf32) <- (2x196x512xf32, 512xf32, 512xf32) + layer_norm_3, layer_norm_4, layer_norm_5 = (lambda x, f: f(x))( + paddle._C_ops.layer_norm( + add_8, parameter_206, parameter_205, float("1e-05"), 2 + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None, None), + ) + del parameter_205, parameter_206 + + # pd_op.add: (2x196x512xf32) <- (2x196x512xf32, 1x196x512xf32) + add_9 = paddle._C_ops.add(layer_norm_3, unsqueeze_3) + + # pd_op.slice: (512x512xf32) <- (512x1536xf32, 1xi64, 1xi64) + slice_6 = paddle._C_ops.slice( + data_2, [1], full_int_array_1, full_int_array_2, [1], [] + ) + + # pd_op.slice: (512xf32) <- (1536xf32, 1xi64, 1xi64) + slice_7 = paddle._C_ops.slice( + data_3, [0], full_int_array_1, full_int_array_2, [1], [] + ) + + # pd_op.matmul: (2x196x512xf32) <- (2x196x512xf32, 512x512xf32) + matmul_10 = paddle._C_ops.matmul(add_9, slice_6, False, False) + + # pd_op.add: (2x196x512xf32) <- (2x196x512xf32, 512xf32) + add_10 = paddle._C_ops.add(matmul_10, slice_7) + + # pd_op.reshape: (2x196x4x128xf32) <- (2x196x512xf32, 4xi64) + reshape_4 = paddle._C_ops.reshape(add_10, full_int_array_3) + + # pd_op.transpose: (2x4x196x128xf32) <- (2x196x4x128xf32) + transpose_5 = paddle._C_ops.transpose(reshape_4, [0, 2, 1, 3]) + del reshape_4 + + # pd_op.slice: (512x512xf32) <- (512x1536xf32, 1xi64, 1xi64) + slice_8 = paddle._C_ops.slice( + data_2, [1], full_int_array_2, full_int_array_4, [1], [] + ) + + # pd_op.slice: (512xf32) <- (1536xf32, 1xi64, 1xi64) + slice_9 = paddle._C_ops.slice( + data_3, [0], full_int_array_2, full_int_array_4, [1], [] + ) + + # pd_op.matmul: (2x196x512xf32) <- (2x196x512xf32, 512x512xf32) + matmul_11 = paddle._C_ops.matmul(add_9, slice_8, False, False) + + # pd_op.add: (2x196x512xf32) <- (2x196x512xf32, 512xf32) + add_11 = paddle._C_ops.add(matmul_11, slice_9) + + # pd_op.reshape: (2x196x4x128xf32) <- (2x196x512xf32, 4xi64) + reshape_5 = paddle._C_ops.reshape(add_11, full_int_array_3) + + # pd_op.transpose: (2x4x196x128xf32) <- (2x196x4x128xf32) + transpose_6 = paddle._C_ops.transpose(reshape_5, [0, 2, 1, 3]) + del reshape_5 + + # pd_op.slice: (512x512xf32) <- (512x1536xf32, 1xi64, 1xi64) + slice_10 = paddle._C_ops.slice( + data_2, [1], full_int_array_4, full_int_array_5, [1], [] + ) + del data_2 + + # pd_op.slice: (512xf32) <- (1536xf32, 1xi64, 1xi64) + slice_11 = paddle._C_ops.slice( + data_3, [0], full_int_array_4, full_int_array_5, [1], [] + ) + del data_3 + + # pd_op.matmul: (2x196x512xf32) <- (2x196x512xf32, 512x512xf32) + matmul_12 = paddle._C_ops.matmul(layer_norm_3, slice_10, False, False) + + # pd_op.add: (2x196x512xf32) <- (2x196x512xf32, 512xf32) + add_12 = paddle._C_ops.add(matmul_12, slice_11) + + # pd_op.reshape: (2x196x4x128xf32) <- (2x196x512xf32, 4xi64) + reshape_6 = paddle._C_ops.reshape(add_12, full_int_array_3) + + # pd_op.transpose: (2x4x196x128xf32) <- (2x196x4x128xf32) + transpose_7 = paddle._C_ops.transpose(reshape_6, [0, 2, 1, 3]) + del reshape_6 + + # pd_op.matmul: (2x4x196x196xf32) <- (2x4x196x128xf32, 2x4x196x128xf32) + matmul_13 = paddle._C_ops.matmul(transpose_5, transpose_6, False, True) + + # pd_op.scale: (2x4x196x196xf32) <- (2x4x196x196xf32, 1xf32) + scale_2 = paddle._C_ops.scale(matmul_13, full_8, float("0"), True) + del matmul_13 + + # pd_op.softmax: (2x4x196x196xf32) <- (2x4x196x196xf32) + softmax_1 = paddle._C_ops.softmax(scale_2, -1) + del scale_2 + + # pd_op.dropout: (2x4x196x196xf32, 2x4x196x196xui8) <- (2x4x196x196xf32, None, 1xf32) + dropout_8, dropout_9 = (lambda x, f: f(x))( + paddle._C_ops.dropout( + softmax_1, None, full_9, False, "upscale_in_train", 0, False + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None), + ) + + # pd_op.matmul: (2x4x196x128xf32) <- (2x4x196x196xf32, 2x4x196x128xf32) + matmul_14 = paddle._C_ops.matmul(dropout_8, transpose_7, False, False) + + # pd_op.transpose: (2x196x4x128xf32) <- (2x4x196x128xf32) + transpose_8 = paddle._C_ops.transpose(matmul_14, [0, 2, 1, 3]) + del matmul_14 + + # pd_op.reshape: (2x196x512xf32) <- (2x196x4x128xf32, 3xi64) + reshape_7 = paddle._C_ops.reshape(transpose_8, full_int_array_6) + + # pd_op.matmul: (2x196x512xf32) <- (2x196x512xf32, 512x512xf32) + matmul_15 = paddle._C_ops.matmul(reshape_7, parameter_204, False, False) + del parameter_204 + + # pd_op.add: (2x196x512xf32) <- (2x196x512xf32, 512xf32) + add_13 = paddle._C_ops.add(matmul_15, parameter_203) + del parameter_203 + + # pd_op.dropout: (2x196x512xf32, 2x196x512xui8) <- (2x196x512xf32, None, 1xf32) + dropout_10, dropout_11 = (lambda x, f: f(x))( + paddle._C_ops.dropout( + add_13, None, full_9, False, "upscale_in_train", 0, False + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None), + ) + del add_13 + + # pd_op.add: (2x196x512xf32) <- (2x196x512xf32, 2x196x512xf32) + add_14 = paddle._C_ops.add(layer_norm_3, dropout_10) + + # pd_op.layer_norm: (2x196x512xf32, 2x196xf32, 2x196xf32) <- (2x196x512xf32, 512xf32, 512xf32) + layer_norm_6, layer_norm_7, layer_norm_8 = (lambda x, f: f(x))( + paddle._C_ops.layer_norm( + add_14, parameter_202, parameter_201, float("1e-05"), 2 + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None, None), + ) + del parameter_201, parameter_202 + + # pd_op.matmul: (2x196x2048xf32) <- (2x196x512xf32, 512x2048xf32) + matmul_16 = paddle._C_ops.matmul(layer_norm_6, parameter_200, False, False) + del parameter_200 + + # pd_op.add: (2x196x2048xf32) <- (2x196x2048xf32, 2048xf32) + add_15 = paddle._C_ops.add(matmul_16, parameter_199) + del parameter_199 + + # pd_op.gelu: (2x196x2048xf32) <- (2x196x2048xf32) + gelu_1 = paddle._C_ops.gelu(add_15, False) + + # pd_op.dropout: (2x196x2048xf32, 2x196x2048xui8) <- (2x196x2048xf32, None, 1xf32) + dropout_12, dropout_13 = (lambda x, f: f(x))( + paddle._C_ops.dropout( + gelu_1, None, full_9, False, "upscale_in_train", 0, False + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None), + ) + del gelu_1 + + # pd_op.matmul: (2x196x512xf32) <- (2x196x2048xf32, 2048x512xf32) + matmul_17 = paddle._C_ops.matmul(dropout_12, parameter_198, False, False) + del parameter_198 + + # pd_op.add: (2x196x512xf32) <- (2x196x512xf32, 512xf32) + add_16 = paddle._C_ops.add(matmul_17, parameter_197) + del parameter_197 + + # pd_op.dropout: (2x196x512xf32, 2x196x512xui8) <- (2x196x512xf32, None, 1xf32) + dropout_14, dropout_15 = (lambda x, f: f(x))( + paddle._C_ops.dropout( + add_16, None, full_9, False, "upscale_in_train", 0, False + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None), + ) + del add_16 + + # pd_op.add: (2x196x512xf32) <- (2x196x512xf32, 2x196x512xf32) + add_17 = paddle._C_ops.add(layer_norm_6, dropout_14) + + # pd_op.layer_norm: (2x196x512xf32, 2x196xf32, 2x196xf32) <- (2x196x512xf32, 512xf32, 512xf32) + layer_norm_9, layer_norm_10, layer_norm_11 = (lambda x, f: f(x))( + paddle._C_ops.layer_norm( + add_17, parameter_196, parameter_195, float("1e-05"), 2 + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None, None), + ) + del parameter_195, parameter_196 + + # pd_op.add: (2x196x512xf32) <- (2x196x512xf32, 1x196x512xf32) + add_18 = paddle._C_ops.add(layer_norm_9, unsqueeze_3) + + # pd_op.slice: (512x512xf32) <- (512x1536xf32, 1xi64, 1xi64) + slice_12 = paddle._C_ops.slice( + data_4, [1], full_int_array_1, full_int_array_2, [1], [] + ) + + # pd_op.slice: (512xf32) <- (1536xf32, 1xi64, 1xi64) + slice_13 = paddle._C_ops.slice( + data_5, [0], full_int_array_1, full_int_array_2, [1], [] + ) + + # pd_op.matmul: (2x196x512xf32) <- (2x196x512xf32, 512x512xf32) + matmul_18 = paddle._C_ops.matmul(add_18, slice_12, False, False) + + # pd_op.add: (2x196x512xf32) <- (2x196x512xf32, 512xf32) + add_19 = paddle._C_ops.add(matmul_18, slice_13) + + # pd_op.reshape: (2x196x4x128xf32) <- (2x196x512xf32, 4xi64) + reshape_8 = paddle._C_ops.reshape(add_19, full_int_array_3) + + # pd_op.transpose: (2x4x196x128xf32) <- (2x196x4x128xf32) + transpose_9 = paddle._C_ops.transpose(reshape_8, [0, 2, 1, 3]) + del reshape_8 + + # pd_op.slice: (512x512xf32) <- (512x1536xf32, 1xi64, 1xi64) + slice_14 = paddle._C_ops.slice( + data_4, [1], full_int_array_2, full_int_array_4, [1], [] + ) + + # pd_op.slice: (512xf32) <- (1536xf32, 1xi64, 1xi64) + slice_15 = paddle._C_ops.slice( + data_5, [0], full_int_array_2, full_int_array_4, [1], [] + ) + + # pd_op.matmul: (2x196x512xf32) <- (2x196x512xf32, 512x512xf32) + matmul_19 = paddle._C_ops.matmul(add_18, slice_14, False, False) + + # pd_op.add: (2x196x512xf32) <- (2x196x512xf32, 512xf32) + add_20 = paddle._C_ops.add(matmul_19, slice_15) + + # pd_op.reshape: (2x196x4x128xf32) <- (2x196x512xf32, 4xi64) + reshape_9 = paddle._C_ops.reshape(add_20, full_int_array_3) + + # pd_op.transpose: (2x4x196x128xf32) <- (2x196x4x128xf32) + transpose_10 = paddle._C_ops.transpose(reshape_9, [0, 2, 1, 3]) + del reshape_9 + + # pd_op.slice: (512x512xf32) <- (512x1536xf32, 1xi64, 1xi64) + slice_16 = paddle._C_ops.slice( + data_4, [1], full_int_array_4, full_int_array_5, [1], [] + ) + del data_4 + + # pd_op.slice: (512xf32) <- (1536xf32, 1xi64, 1xi64) + slice_17 = paddle._C_ops.slice( + data_5, [0], full_int_array_4, full_int_array_5, [1], [] + ) + del data_5 + + # pd_op.matmul: (2x196x512xf32) <- (2x196x512xf32, 512x512xf32) + matmul_20 = paddle._C_ops.matmul(layer_norm_9, slice_16, False, False) + + # pd_op.add: (2x196x512xf32) <- (2x196x512xf32, 512xf32) + add_21 = paddle._C_ops.add(matmul_20, slice_17) + + # pd_op.reshape: (2x196x4x128xf32) <- (2x196x512xf32, 4xi64) + reshape_10 = paddle._C_ops.reshape(add_21, full_int_array_3) + + # pd_op.transpose: (2x4x196x128xf32) <- (2x196x4x128xf32) + transpose_11 = paddle._C_ops.transpose(reshape_10, [0, 2, 1, 3]) + del reshape_10 + + # pd_op.matmul: (2x4x196x196xf32) <- (2x4x196x128xf32, 2x4x196x128xf32) + matmul_21 = paddle._C_ops.matmul(transpose_9, transpose_10, False, True) + + # pd_op.scale: (2x4x196x196xf32) <- (2x4x196x196xf32, 1xf32) + scale_3 = paddle._C_ops.scale(matmul_21, full_8, float("0"), True) + del matmul_21 + + # pd_op.softmax: (2x4x196x196xf32) <- (2x4x196x196xf32) + softmax_2 = paddle._C_ops.softmax(scale_3, -1) + del scale_3 + + # pd_op.dropout: (2x4x196x196xf32, 2x4x196x196xui8) <- (2x4x196x196xf32, None, 1xf32) + dropout_16, dropout_17 = (lambda x, f: f(x))( + paddle._C_ops.dropout( + softmax_2, None, full_9, False, "upscale_in_train", 0, False + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None), + ) + + # pd_op.matmul: (2x4x196x128xf32) <- (2x4x196x196xf32, 2x4x196x128xf32) + matmul_22 = paddle._C_ops.matmul(dropout_16, transpose_11, False, False) + + # pd_op.transpose: (2x196x4x128xf32) <- (2x4x196x128xf32) + transpose_12 = paddle._C_ops.transpose(matmul_22, [0, 2, 1, 3]) + del matmul_22 + + # pd_op.reshape: (2x196x512xf32) <- (2x196x4x128xf32, 3xi64) + reshape_11 = paddle._C_ops.reshape(transpose_12, full_int_array_6) + + # pd_op.matmul: (2x196x512xf32) <- (2x196x512xf32, 512x512xf32) + matmul_23 = paddle._C_ops.matmul(reshape_11, parameter_194, False, False) + del parameter_194 + + # pd_op.add: (2x196x512xf32) <- (2x196x512xf32, 512xf32) + add_22 = paddle._C_ops.add(matmul_23, parameter_193) + del parameter_193 + + # pd_op.dropout: (2x196x512xf32, 2x196x512xui8) <- (2x196x512xf32, None, 1xf32) + dropout_18, dropout_19 = (lambda x, f: f(x))( + paddle._C_ops.dropout( + add_22, None, full_9, False, "upscale_in_train", 0, False + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None), + ) + del add_22 + + # pd_op.add: (2x196x512xf32) <- (2x196x512xf32, 2x196x512xf32) + add_23 = paddle._C_ops.add(layer_norm_9, dropout_18) + + # pd_op.layer_norm: (2x196x512xf32, 2x196xf32, 2x196xf32) <- (2x196x512xf32, 512xf32, 512xf32) + layer_norm_12, layer_norm_13, layer_norm_14 = (lambda x, f: f(x))( + paddle._C_ops.layer_norm( + add_23, parameter_192, parameter_191, float("1e-05"), 2 + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None, None), + ) + del parameter_191, parameter_192 + + # pd_op.matmul: (2x196x2048xf32) <- (2x196x512xf32, 512x2048xf32) + matmul_24 = paddle._C_ops.matmul(layer_norm_12, parameter_190, False, False) + del parameter_190 + + # pd_op.add: (2x196x2048xf32) <- (2x196x2048xf32, 2048xf32) + add_24 = paddle._C_ops.add(matmul_24, parameter_189) + del parameter_189 + + # pd_op.gelu: (2x196x2048xf32) <- (2x196x2048xf32) + gelu_2 = paddle._C_ops.gelu(add_24, False) + + # pd_op.dropout: (2x196x2048xf32, 2x196x2048xui8) <- (2x196x2048xf32, None, 1xf32) + dropout_20, dropout_21 = (lambda x, f: f(x))( + paddle._C_ops.dropout( + gelu_2, None, full_9, False, "upscale_in_train", 0, False + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None), + ) + del gelu_2 + + # pd_op.matmul: (2x196x512xf32) <- (2x196x2048xf32, 2048x512xf32) + matmul_25 = paddle._C_ops.matmul(dropout_20, parameter_188, False, False) + del parameter_188 + + # pd_op.add: (2x196x512xf32) <- (2x196x512xf32, 512xf32) + add_25 = paddle._C_ops.add(matmul_25, parameter_187) + del parameter_187 + + # pd_op.dropout: (2x196x512xf32, 2x196x512xui8) <- (2x196x512xf32, None, 1xf32) + dropout_22, dropout_23 = (lambda x, f: f(x))( + paddle._C_ops.dropout( + add_25, None, full_9, False, "upscale_in_train", 0, False + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None), + ) + del add_25 + + # pd_op.add: (2x196x512xf32) <- (2x196x512xf32, 2x196x512xf32) + add_26 = paddle._C_ops.add(layer_norm_12, dropout_22) + + # pd_op.layer_norm: (2x196x512xf32, 2x196xf32, 2x196xf32) <- (2x196x512xf32, 512xf32, 512xf32) + layer_norm_15, layer_norm_16, layer_norm_17 = (lambda x, f: f(x))( + paddle._C_ops.layer_norm( + add_26, parameter_186, parameter_185, float("1e-05"), 2 + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None, None), + ) + del parameter_185, parameter_186 + + # pd_op.add: (2x196x512xf32) <- (2x196x512xf32, 1x196x512xf32) + add_27 = paddle._C_ops.add(layer_norm_15, unsqueeze_3) + + # pd_op.slice: (512x512xf32) <- (512x1536xf32, 1xi64, 1xi64) + slice_18 = paddle._C_ops.slice( + data_6, [1], full_int_array_1, full_int_array_2, [1], [] + ) + + # pd_op.slice: (512xf32) <- (1536xf32, 1xi64, 1xi64) + slice_19 = paddle._C_ops.slice( + data_7, [0], full_int_array_1, full_int_array_2, [1], [] + ) + del full_int_array_1 + + # pd_op.matmul: (2x196x512xf32) <- (2x196x512xf32, 512x512xf32) + matmul_26 = paddle._C_ops.matmul(add_27, slice_18, False, False) + + # pd_op.add: (2x196x512xf32) <- (2x196x512xf32, 512xf32) + add_28 = paddle._C_ops.add(matmul_26, slice_19) + + # pd_op.reshape: (2x196x4x128xf32) <- (2x196x512xf32, 4xi64) + reshape_12 = paddle._C_ops.reshape(add_28, full_int_array_3) + + # pd_op.transpose: (2x4x196x128xf32) <- (2x196x4x128xf32) + transpose_13 = paddle._C_ops.transpose(reshape_12, [0, 2, 1, 3]) + del reshape_12 + + # pd_op.slice: (512x512xf32) <- (512x1536xf32, 1xi64, 1xi64) + slice_20 = paddle._C_ops.slice( + data_6, [1], full_int_array_2, full_int_array_4, [1], [] + ) + + # pd_op.slice: (512xf32) <- (1536xf32, 1xi64, 1xi64) + slice_21 = paddle._C_ops.slice( + data_7, [0], full_int_array_2, full_int_array_4, [1], [] + ) + + # pd_op.matmul: (2x196x512xf32) <- (2x196x512xf32, 512x512xf32) + matmul_27 = paddle._C_ops.matmul(add_27, slice_20, False, False) + + # pd_op.add: (2x196x512xf32) <- (2x196x512xf32, 512xf32) + add_29 = paddle._C_ops.add(matmul_27, slice_21) + + # pd_op.reshape: (2x196x4x128xf32) <- (2x196x512xf32, 4xi64) + reshape_13 = paddle._C_ops.reshape(add_29, full_int_array_3) + + # pd_op.transpose: (2x4x196x128xf32) <- (2x196x4x128xf32) + transpose_14 = paddle._C_ops.transpose(reshape_13, [0, 2, 1, 3]) + del reshape_13 + + # pd_op.slice: (512x512xf32) <- (512x1536xf32, 1xi64, 1xi64) + slice_22 = paddle._C_ops.slice( + data_6, [1], full_int_array_4, full_int_array_5, [1], [] + ) + del data_6 + + # pd_op.slice: (512xf32) <- (1536xf32, 1xi64, 1xi64) + slice_23 = paddle._C_ops.slice( + data_7, [0], full_int_array_4, full_int_array_5, [1], [] + ) + del data_7 + + # pd_op.matmul: (2x196x512xf32) <- (2x196x512xf32, 512x512xf32) + matmul_28 = paddle._C_ops.matmul(layer_norm_15, slice_22, False, False) + + # pd_op.add: (2x196x512xf32) <- (2x196x512xf32, 512xf32) + add_30 = paddle._C_ops.add(matmul_28, slice_23) + + # pd_op.reshape: (2x196x4x128xf32) <- (2x196x512xf32, 4xi64) + reshape_14 = paddle._C_ops.reshape(add_30, full_int_array_3) + del full_int_array_3 + + # pd_op.transpose: (2x4x196x128xf32) <- (2x196x4x128xf32) + transpose_15 = paddle._C_ops.transpose(reshape_14, [0, 2, 1, 3]) + del reshape_14 + + # pd_op.matmul: (2x4x196x196xf32) <- (2x4x196x128xf32, 2x4x196x128xf32) + matmul_29 = paddle._C_ops.matmul(transpose_13, transpose_14, False, True) + + # pd_op.scale: (2x4x196x196xf32) <- (2x4x196x196xf32, 1xf32) + scale_4 = paddle._C_ops.scale(matmul_29, full_8, float("0"), True) + del matmul_29 + + # pd_op.softmax: (2x4x196x196xf32) <- (2x4x196x196xf32) + softmax_3 = paddle._C_ops.softmax(scale_4, -1) + del scale_4 + + # pd_op.dropout: (2x4x196x196xf32, 2x4x196x196xui8) <- (2x4x196x196xf32, None, 1xf32) + dropout_24, dropout_25 = (lambda x, f: f(x))( + paddle._C_ops.dropout( + softmax_3, None, full_9, False, "upscale_in_train", 0, False + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None), + ) + + # pd_op.matmul: (2x4x196x128xf32) <- (2x4x196x196xf32, 2x4x196x128xf32) + matmul_30 = paddle._C_ops.matmul(dropout_24, transpose_15, False, False) + + # pd_op.transpose: (2x196x4x128xf32) <- (2x4x196x128xf32) + transpose_16 = paddle._C_ops.transpose(matmul_30, [0, 2, 1, 3]) + del matmul_30 + + # pd_op.reshape: (2x196x512xf32) <- (2x196x4x128xf32, 3xi64) + reshape_15 = paddle._C_ops.reshape(transpose_16, full_int_array_6) + del full_int_array_6 + + # pd_op.matmul: (2x196x512xf32) <- (2x196x512xf32, 512x512xf32) + matmul_31 = paddle._C_ops.matmul(reshape_15, parameter_184, False, False) + del parameter_184 + + # pd_op.add: (2x196x512xf32) <- (2x196x512xf32, 512xf32) + add_31 = paddle._C_ops.add(matmul_31, parameter_183) + del parameter_183 + + # pd_op.dropout: (2x196x512xf32, 2x196x512xui8) <- (2x196x512xf32, None, 1xf32) + dropout_26, dropout_27 = (lambda x, f: f(x))( + paddle._C_ops.dropout( + add_31, None, full_9, False, "upscale_in_train", 0, False + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None), + ) + del add_31 + + # pd_op.add: (2x196x512xf32) <- (2x196x512xf32, 2x196x512xf32) + add_32 = paddle._C_ops.add(layer_norm_15, dropout_26) + + # pd_op.layer_norm: (2x196x512xf32, 2x196xf32, 2x196xf32) <- (2x196x512xf32, 512xf32, 512xf32) + layer_norm_18, layer_norm_19, layer_norm_20 = (lambda x, f: f(x))( + paddle._C_ops.layer_norm( + add_32, parameter_182, parameter_181, float("1e-05"), 2 + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None, None), + ) + del parameter_181, parameter_182 + + # pd_op.matmul: (2x196x2048xf32) <- (2x196x512xf32, 512x2048xf32) + matmul_32 = paddle._C_ops.matmul(layer_norm_18, parameter_180, False, False) + del parameter_180 + + # pd_op.add: (2x196x2048xf32) <- (2x196x2048xf32, 2048xf32) + add_33 = paddle._C_ops.add(matmul_32, parameter_179) + del parameter_179 + + # pd_op.gelu: (2x196x2048xf32) <- (2x196x2048xf32) + gelu_3 = paddle._C_ops.gelu(add_33, False) + + # pd_op.dropout: (2x196x2048xf32, 2x196x2048xui8) <- (2x196x2048xf32, None, 1xf32) + dropout_28, dropout_29 = (lambda x, f: f(x))( + paddle._C_ops.dropout( + gelu_3, None, full_9, False, "upscale_in_train", 0, False + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None), + ) + del gelu_3 + + # pd_op.matmul: (2x196x512xf32) <- (2x196x2048xf32, 2048x512xf32) + matmul_33 = paddle._C_ops.matmul(dropout_28, parameter_178, False, False) + del parameter_178 + + # pd_op.add: (2x196x512xf32) <- (2x196x512xf32, 512xf32) + add_34 = paddle._C_ops.add(matmul_33, parameter_177) + del parameter_177 + + # pd_op.dropout: (2x196x512xf32, 2x196x512xui8) <- (2x196x512xf32, None, 1xf32) + dropout_30, dropout_31 = (lambda x, f: f(x))( + paddle._C_ops.dropout( + add_34, None, full_9, False, "upscale_in_train", 0, False + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None), + ) + del add_34 + + # pd_op.add: (2x196x512xf32) <- (2x196x512xf32, 2x196x512xf32) + add_35 = paddle._C_ops.add(layer_norm_18, dropout_30) + + # pd_op.layer_norm: (2x196x512xf32, 2x196xf32, 2x196xf32) <- (2x196x512xf32, 512xf32, 512xf32) + layer_norm_21, layer_norm_22, layer_norm_23 = (lambda x, f: f(x))( + paddle._C_ops.layer_norm( + add_35, parameter_176, parameter_175, float("1e-05"), 2 + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None, None), + ) + del parameter_175, parameter_176 + + # pd_op.transpose: (2x512x196xf32) <- (2x196x512xf32) + transpose_17 = paddle._C_ops.transpose(layer_norm_21, [0, 2, 1]) + del layer_norm_21 + + # pd_op.full_int_array: (4xi64) <- () + full_int_array_7 = [2, 512, 14, 14] + + # pd_op.reshape: (2x512x14x14xf32) <- (2x512x196xf32, 4xi64) + reshape_16 = paddle._C_ops.reshape(transpose_17, full_int_array_7) + del full_int_array_7 + + # pd_op.conv2d: (2x192x14x14xf32) <- (2x512x14x14xf32, 192x512x1x1xf32) + conv2d_0 = paddle._C_ops.conv2d( + reshape_16, parameter_174, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_174 + + # pd_op.batch_norm_: (2x192x14x14xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x14x14xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__0, + batch_norm__1, + batch_norm__2, + batch_norm__3, + batch_norm__4, + batch_norm__5, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_0, + parameter_173, + parameter_172, + parameter_171, + parameter_170, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_170, parameter_171, parameter_172, parameter_173 + + # pd_op.swish: (2x192x14x14xf32) <- (2x192x14x14xf32) + swish_1 = paddle._C_ops.swish(batch_norm__0) + + # pd_op.conv2d: (2x192x14x14xf32) <- (2x512x14x14xf32, 192x512x1x1xf32) + conv2d_1 = paddle._C_ops.conv2d( + reshape_16, parameter_169, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_169 + + # pd_op.batch_norm_: (2x192x14x14xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x14x14xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__6, + batch_norm__7, + batch_norm__8, + batch_norm__9, + batch_norm__10, + batch_norm__11, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_1, + parameter_168, + parameter_167, + parameter_166, + parameter_165, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_165, parameter_166, parameter_167, parameter_168 + + # pd_op.swish: (2x192x14x14xf32) <- (2x192x14x14xf32) + swish_2 = paddle._C_ops.swish(batch_norm__6) + + # pd_op.conv2d: (2x192x14x14xf32) <- (2x192x14x14xf32, 192x192x3x3xf32) + conv2d_2 = paddle._C_ops.conv2d( + swish_2, parameter_164, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_164 + + # pd_op.batch_norm_: (2x192x14x14xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x14x14xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__12, + batch_norm__13, + batch_norm__14, + batch_norm__15, + batch_norm__16, + batch_norm__17, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_2, + parameter_163, + parameter_162, + parameter_161, + parameter_160, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_160, parameter_161, parameter_162, parameter_163 + + # pd_op.swish: (2x192x14x14xf32) <- (2x192x14x14xf32) + swish_3 = paddle._C_ops.swish(batch_norm__12) + + # pd_op.conv2d: (2x192x14x14xf32) <- (2x192x14x14xf32, 192x192x3x3xf32) + conv2d_3 = paddle._C_ops.conv2d( + swish_3, parameter_159, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_159 + + # pd_op.batch_norm_: (2x192x14x14xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x14x14xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__18, + batch_norm__19, + batch_norm__20, + batch_norm__21, + batch_norm__22, + batch_norm__23, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_3, + parameter_158, + parameter_157, + parameter_156, + parameter_155, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_155, parameter_156, parameter_157, parameter_158 + + # pd_op.conv2d: (2x192x14x14xf32) <- (2x192x14x14xf32, 192x192x1x1xf32) + conv2d_4 = paddle._C_ops.conv2d( + swish_3, parameter_154, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_154 + + # pd_op.batch_norm_: (2x192x14x14xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x14x14xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__24, + batch_norm__25, + batch_norm__26, + batch_norm__27, + batch_norm__28, + batch_norm__29, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_4, + parameter_153, + parameter_152, + parameter_151, + parameter_150, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_150, parameter_151, parameter_152, parameter_153 + + # pd_op.add: (2x192x14x14xf32) <- (2x192x14x14xf32, 2x192x14x14xf32) + add_36 = paddle._C_ops.add(batch_norm__18, batch_norm__24) + + # pd_op.swish: (2x192x14x14xf32) <- (2x192x14x14xf32) + swish_4 = paddle._C_ops.swish(add_36) + + # pd_op.full_int_array: (2xi64) <- () + full_int_array_8 = [5, 5] + + # pd_op.pool2d: (2x192x14x14xf32) <- (2x192x14x14xf32, 2xi64) + pool2d_0 = paddle._C_ops.pool2d( + swish_4, + full_int_array_8, + [1, 1], + [2, 2], + False, + True, + "NCHW", + "max", + False, + False, + "EXPLICIT", + ) + + # pd_op.full_int_array: (2xi64) <- () + full_int_array_9 = [9, 9] + + # pd_op.pool2d: (2x192x14x14xf32) <- (2x192x14x14xf32, 2xi64) + pool2d_1 = paddle._C_ops.pool2d( + swish_4, + full_int_array_9, + [1, 1], + [4, 4], + False, + True, + "NCHW", + "max", + False, + False, + "EXPLICIT", + ) + + # pd_op.full_int_array: (2xi64) <- () + full_int_array_10 = [13, 13] + + # pd_op.pool2d: (2x192x14x14xf32) <- (2x192x14x14xf32, 2xi64) + pool2d_2 = paddle._C_ops.pool2d( + swish_4, + full_int_array_10, + [1, 1], + [6, 6], + False, + True, + "NCHW", + "max", + False, + False, + "EXPLICIT", + ) + + # builtin.combine: ([2x192x14x14xf32, 2x192x14x14xf32, 2x192x14x14xf32, 2x192x14x14xf32]) <- (2x192x14x14xf32, 2x192x14x14xf32, 2x192x14x14xf32, 2x192x14x14xf32) + combine_2 = [swish_4, pool2d_0, pool2d_1, pool2d_2] + + # pd_op.concat: (2x768x14x14xf32) <- ([2x192x14x14xf32, 2x192x14x14xf32, 2x192x14x14xf32, 2x192x14x14xf32], 1xi32) + concat_1 = paddle._C_ops.concat(combine_2, full_7) + del combine_2 + + # pd_op.conv2d: (2x192x14x14xf32) <- (2x768x14x14xf32, 192x768x1x1xf32) + conv2d_5 = paddle._C_ops.conv2d( + concat_1, parameter_149, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_149 + + # pd_op.batch_norm_: (2x192x14x14xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x14x14xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__30, + batch_norm__31, + batch_norm__32, + batch_norm__33, + batch_norm__34, + batch_norm__35, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_5, + parameter_148, + parameter_147, + parameter_146, + parameter_145, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_145, parameter_146, parameter_147, parameter_148 + + # pd_op.swish: (2x192x14x14xf32) <- (2x192x14x14xf32) + swish_5 = paddle._C_ops.swish(batch_norm__30) + + # builtin.combine: ([2x192x14x14xf32, 2x192x14x14xf32]) <- (2x192x14x14xf32, 2x192x14x14xf32) + combine_3 = [swish_1, swish_5] + + # pd_op.concat: (2x384x14x14xf32) <- ([2x192x14x14xf32, 2x192x14x14xf32], 1xi32) + concat_2 = paddle._C_ops.concat(combine_3, full_7) + del combine_3 + + # pd_op.conv2d: (2x384x14x14xf32) <- (2x384x14x14xf32, 384x384x1x1xf32) + conv2d_6 = paddle._C_ops.conv2d( + concat_2, parameter_144, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_144 + + # pd_op.batch_norm_: (2x384x14x14xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x14x14xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__36, + batch_norm__37, + batch_norm__38, + batch_norm__39, + batch_norm__40, + batch_norm__41, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_6, + parameter_143, + parameter_142, + parameter_141, + parameter_140, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_140, parameter_141, parameter_142, parameter_143 + + # pd_op.swish: (2x384x14x14xf32) <- (2x384x14x14xf32) + swish_6 = paddle._C_ops.swish(batch_norm__36) + + # pd_op.conv2d: (2x192x14x14xf32) <- (2x384x14x14xf32, 192x384x1x1xf32) + conv2d_7 = paddle._C_ops.conv2d( + swish_6, parameter_139, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_139 + + # pd_op.batch_norm_: (2x192x14x14xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x14x14xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__42, + batch_norm__43, + batch_norm__44, + batch_norm__45, + batch_norm__46, + batch_norm__47, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_7, + parameter_138, + parameter_137, + parameter_136, + parameter_135, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_135, parameter_136, parameter_137, parameter_138 + + # pd_op.swish: (2x192x14x14xf32) <- (2x192x14x14xf32) + swish_7 = paddle._C_ops.swish(batch_norm__42) + + # pd_op.nearest_interp: (2x192x28x28xf32) <- (2x192x14x14xf32, None, None, None) + nearest_interp_0 = paddle._C_ops.nearest_interp( + swish_7, + None, + None, + None, + "NCHW", + -1, + -1, + -1, + [float("2"), float("2")], + "nearest", + False, + 0, + ) + + # builtin.combine: ([2x192x28x28xf32, 2x256x-1x-1xf32]) <- (2x192x28x28xf32, 2x256x-1x-1xf32) + combine_4 = [nearest_interp_0, data_9] + del data_9 + + # pd_op.concat: (2x448x28x28xf32) <- ([2x192x28x28xf32, 2x256x-1x-1xf32], 1xi32) + concat_3 = paddle._C_ops.concat(combine_4, full_7) + del combine_4 + + # pd_op.conv2d: (2x96x28x28xf32) <- (2x448x28x28xf32, 96x448x1x1xf32) + conv2d_8 = paddle._C_ops.conv2d( + concat_3, parameter_134, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_134 + + # pd_op.batch_norm_: (2x96x28x28xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x28x28xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__48, + batch_norm__49, + batch_norm__50, + batch_norm__51, + batch_norm__52, + batch_norm__53, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_8, + parameter_133, + parameter_132, + parameter_131, + parameter_130, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_130, parameter_131, parameter_132, parameter_133 + + # pd_op.swish: (2x96x28x28xf32) <- (2x96x28x28xf32) + swish_8 = paddle._C_ops.swish(batch_norm__48) + + # pd_op.conv2d: (2x96x28x28xf32) <- (2x448x28x28xf32, 96x448x1x1xf32) + conv2d_9 = paddle._C_ops.conv2d( + concat_3, parameter_129, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_129 + + # pd_op.batch_norm_: (2x96x28x28xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x28x28xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__54, + batch_norm__55, + batch_norm__56, + batch_norm__57, + batch_norm__58, + batch_norm__59, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_9, + parameter_128, + parameter_127, + parameter_126, + parameter_125, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_125, parameter_126, parameter_127, parameter_128 + + # pd_op.swish: (2x96x28x28xf32) <- (2x96x28x28xf32) + swish_9 = paddle._C_ops.swish(batch_norm__54) + + # pd_op.conv2d: (2x96x28x28xf32) <- (2x96x28x28xf32, 96x96x3x3xf32) + conv2d_10 = paddle._C_ops.conv2d( + swish_9, parameter_124, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_124 + + # pd_op.batch_norm_: (2x96x28x28xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x28x28xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__60, + batch_norm__61, + batch_norm__62, + batch_norm__63, + batch_norm__64, + batch_norm__65, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_10, + parameter_123, + parameter_122, + parameter_121, + parameter_120, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_120, parameter_121, parameter_122, parameter_123 + + # pd_op.swish: (2x96x28x28xf32) <- (2x96x28x28xf32) + swish_10 = paddle._C_ops.swish(batch_norm__60) + + # pd_op.conv2d: (2x96x28x28xf32) <- (2x96x28x28xf32, 96x96x3x3xf32) + conv2d_11 = paddle._C_ops.conv2d( + swish_10, parameter_119, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_119 + + # pd_op.batch_norm_: (2x96x28x28xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x28x28xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__66, + batch_norm__67, + batch_norm__68, + batch_norm__69, + batch_norm__70, + batch_norm__71, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_11, + parameter_118, + parameter_117, + parameter_116, + parameter_115, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_115, parameter_116, parameter_117, parameter_118 + + # pd_op.conv2d: (2x96x28x28xf32) <- (2x96x28x28xf32, 96x96x1x1xf32) + conv2d_12 = paddle._C_ops.conv2d( + swish_10, parameter_114, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_114 + + # pd_op.batch_norm_: (2x96x28x28xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x28x28xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__72, + batch_norm__73, + batch_norm__74, + batch_norm__75, + batch_norm__76, + batch_norm__77, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_12, + parameter_113, + parameter_112, + parameter_111, + parameter_110, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_110, parameter_111, parameter_112, parameter_113 + + # pd_op.add: (2x96x28x28xf32) <- (2x96x28x28xf32, 2x96x28x28xf32) + add_37 = paddle._C_ops.add(batch_norm__66, batch_norm__72) + + # pd_op.swish: (2x96x28x28xf32) <- (2x96x28x28xf32) + swish_11 = paddle._C_ops.swish(add_37) + + # builtin.combine: ([2x96x28x28xf32, 2x96x28x28xf32]) <- (2x96x28x28xf32, 2x96x28x28xf32) + combine_5 = [swish_8, swish_11] + + # pd_op.concat: (2x192x28x28xf32) <- ([2x96x28x28xf32, 2x96x28x28xf32], 1xi32) + concat_4 = paddle._C_ops.concat(combine_5, full_7) + del combine_5 + + # pd_op.conv2d: (2x192x28x28xf32) <- (2x192x28x28xf32, 192x192x1x1xf32) + conv2d_13 = paddle._C_ops.conv2d( + concat_4, parameter_109, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_109 + + # pd_op.batch_norm_: (2x192x28x28xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x28x28xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__78, + batch_norm__79, + batch_norm__80, + batch_norm__81, + batch_norm__82, + batch_norm__83, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_13, + parameter_108, + parameter_107, + parameter_106, + parameter_105, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_105, parameter_106, parameter_107, parameter_108 + + # pd_op.swish: (2x192x28x28xf32) <- (2x192x28x28xf32) + swish_12 = paddle._C_ops.swish(batch_norm__78) + + # pd_op.conv2d: (2x96x28x28xf32) <- (2x192x28x28xf32, 96x192x1x1xf32) + conv2d_14 = paddle._C_ops.conv2d( + swish_12, parameter_104, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_104 + + # pd_op.batch_norm_: (2x96x28x28xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x28x28xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__84, + batch_norm__85, + batch_norm__86, + batch_norm__87, + batch_norm__88, + batch_norm__89, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_14, + parameter_103, + parameter_102, + parameter_101, + parameter_100, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_100, parameter_101, parameter_102, parameter_103 + + # pd_op.swish: (2x96x28x28xf32) <- (2x96x28x28xf32) + swish_13 = paddle._C_ops.swish(batch_norm__84) + + # pd_op.nearest_interp: (2x96x56x56xf32) <- (2x96x28x28xf32, None, None, None) + nearest_interp_1 = paddle._C_ops.nearest_interp( + swish_13, + None, + None, + None, + "NCHW", + -1, + -1, + -1, + [float("2"), float("2")], + "nearest", + False, + 0, + ) + + # builtin.combine: ([2x96x56x56xf32, 2x128x-1x-1xf32]) <- (2x96x56x56xf32, 2x128x-1x-1xf32) + combine_6 = [nearest_interp_1, data_8] + del data_8 + + # pd_op.concat: (2x224x56x56xf32) <- ([2x96x56x56xf32, 2x128x-1x-1xf32], 1xi32) + concat_5 = paddle._C_ops.concat(combine_6, full_7) + del combine_6 + + # pd_op.conv2d: (2x48x56x56xf32) <- (2x224x56x56xf32, 48x224x1x1xf32) + conv2d_15 = paddle._C_ops.conv2d( + concat_5, parameter_99, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_99 + + # pd_op.batch_norm_: (2x48x56x56xf32, 48xf32, 48xf32, 48xf32, 48xf32, -1xui8) <- (2x48x56x56xf32, 48xf32, 48xf32, 48xf32, 48xf32) + ( + batch_norm__90, + batch_norm__91, + batch_norm__92, + batch_norm__93, + batch_norm__94, + batch_norm__95, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_15, + parameter_98, + parameter_97, + parameter_96, + parameter_95, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_95, parameter_96, parameter_97, parameter_98 + + # pd_op.swish: (2x48x56x56xf32) <- (2x48x56x56xf32) + swish_14 = paddle._C_ops.swish(batch_norm__90) + + # pd_op.conv2d: (2x48x56x56xf32) <- (2x224x56x56xf32, 48x224x1x1xf32) + conv2d_16 = paddle._C_ops.conv2d( + concat_5, parameter_94, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_94 + + # pd_op.batch_norm_: (2x48x56x56xf32, 48xf32, 48xf32, 48xf32, 48xf32, -1xui8) <- (2x48x56x56xf32, 48xf32, 48xf32, 48xf32, 48xf32) + ( + batch_norm__96, + batch_norm__97, + batch_norm__98, + batch_norm__99, + batch_norm__100, + batch_norm__101, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_16, + parameter_93, + parameter_92, + parameter_91, + parameter_90, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_90, parameter_91, parameter_92, parameter_93 + + # pd_op.swish: (2x48x56x56xf32) <- (2x48x56x56xf32) + swish_15 = paddle._C_ops.swish(batch_norm__96) + + # pd_op.conv2d: (2x48x56x56xf32) <- (2x48x56x56xf32, 48x48x3x3xf32) + conv2d_17 = paddle._C_ops.conv2d( + swish_15, parameter_89, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_89 + + # pd_op.batch_norm_: (2x48x56x56xf32, 48xf32, 48xf32, 48xf32, 48xf32, -1xui8) <- (2x48x56x56xf32, 48xf32, 48xf32, 48xf32, 48xf32) + ( + batch_norm__102, + batch_norm__103, + batch_norm__104, + batch_norm__105, + batch_norm__106, + batch_norm__107, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_17, + parameter_88, + parameter_87, + parameter_86, + parameter_85, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_85, parameter_86, parameter_87, parameter_88 + + # pd_op.swish: (2x48x56x56xf32) <- (2x48x56x56xf32) + swish_16 = paddle._C_ops.swish(batch_norm__102) + + # pd_op.conv2d: (2x48x56x56xf32) <- (2x48x56x56xf32, 48x48x3x3xf32) + conv2d_18 = paddle._C_ops.conv2d( + swish_16, parameter_84, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_84 + + # pd_op.batch_norm_: (2x48x56x56xf32, 48xf32, 48xf32, 48xf32, 48xf32, -1xui8) <- (2x48x56x56xf32, 48xf32, 48xf32, 48xf32, 48xf32) + ( + batch_norm__108, + batch_norm__109, + batch_norm__110, + batch_norm__111, + batch_norm__112, + batch_norm__113, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_18, + parameter_83, + parameter_82, + parameter_81, + parameter_80, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_80, parameter_81, parameter_82, parameter_83 + + # pd_op.conv2d: (2x48x56x56xf32) <- (2x48x56x56xf32, 48x48x1x1xf32) + conv2d_19 = paddle._C_ops.conv2d( + swish_16, parameter_79, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_79 + + # pd_op.batch_norm_: (2x48x56x56xf32, 48xf32, 48xf32, 48xf32, 48xf32, -1xui8) <- (2x48x56x56xf32, 48xf32, 48xf32, 48xf32, 48xf32) + ( + batch_norm__114, + batch_norm__115, + batch_norm__116, + batch_norm__117, + batch_norm__118, + batch_norm__119, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_19, + parameter_78, + parameter_77, + parameter_76, + parameter_75, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_75, parameter_76, parameter_77, parameter_78 + + # pd_op.add: (2x48x56x56xf32) <- (2x48x56x56xf32, 2x48x56x56xf32) + add_38 = paddle._C_ops.add(batch_norm__108, batch_norm__114) + + # pd_op.swish: (2x48x56x56xf32) <- (2x48x56x56xf32) + swish_17 = paddle._C_ops.swish(add_38) + + # builtin.combine: ([2x48x56x56xf32, 2x48x56x56xf32]) <- (2x48x56x56xf32, 2x48x56x56xf32) + combine_7 = [swish_14, swish_17] + + # pd_op.concat: (2x96x56x56xf32) <- ([2x48x56x56xf32, 2x48x56x56xf32], 1xi32) + concat_6 = paddle._C_ops.concat(combine_7, full_7) + del combine_7 + + # pd_op.conv2d: (2x96x56x56xf32) <- (2x96x56x56xf32, 96x96x1x1xf32) + conv2d_20 = paddle._C_ops.conv2d( + concat_6, parameter_74, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_74 + + # pd_op.batch_norm_: (2x96x56x56xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x56x56xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__120, + batch_norm__121, + batch_norm__122, + batch_norm__123, + batch_norm__124, + batch_norm__125, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_20, + parameter_73, + parameter_72, + parameter_71, + parameter_70, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_70, parameter_71, parameter_72, parameter_73 + + # pd_op.swish: (2x96x56x56xf32) <- (2x96x56x56xf32) + swish_18 = paddle._C_ops.swish(batch_norm__120) + + # pd_op.conv2d: (2x96x28x28xf32) <- (2x96x56x56xf32, 96x96x3x3xf32) + conv2d_21 = paddle._C_ops.conv2d( + swish_18, parameter_69, [2, 2], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_69 + + # pd_op.batch_norm_: (2x96x28x28xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x28x28xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__126, + batch_norm__127, + batch_norm__128, + batch_norm__129, + batch_norm__130, + batch_norm__131, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_21, + parameter_68, + parameter_67, + parameter_66, + parameter_65, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_65, parameter_66, parameter_67, parameter_68 + + # pd_op.swish: (2x96x28x28xf32) <- (2x96x28x28xf32) + swish_19 = paddle._C_ops.swish(batch_norm__126) + + # builtin.combine: ([2x96x28x28xf32, 2x192x28x28xf32]) <- (2x96x28x28xf32, 2x192x28x28xf32) + combine_8 = [swish_19, swish_12] + + # pd_op.concat: (2x288x28x28xf32) <- ([2x96x28x28xf32, 2x192x28x28xf32], 1xi32) + concat_7 = paddle._C_ops.concat(combine_8, full_7) + del combine_8 + + # pd_op.conv2d: (2x96x28x28xf32) <- (2x288x28x28xf32, 96x288x1x1xf32) + conv2d_22 = paddle._C_ops.conv2d( + concat_7, parameter_64, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_64 + + # pd_op.batch_norm_: (2x96x28x28xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x28x28xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__132, + batch_norm__133, + batch_norm__134, + batch_norm__135, + batch_norm__136, + batch_norm__137, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_22, + parameter_63, + parameter_62, + parameter_61, + parameter_60, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_60, parameter_61, parameter_62, parameter_63 + + # pd_op.swish: (2x96x28x28xf32) <- (2x96x28x28xf32) + swish_20 = paddle._C_ops.swish(batch_norm__132) + + # pd_op.conv2d: (2x96x28x28xf32) <- (2x288x28x28xf32, 96x288x1x1xf32) + conv2d_23 = paddle._C_ops.conv2d( + concat_7, parameter_59, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_59 + + # pd_op.batch_norm_: (2x96x28x28xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x28x28xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__138, + batch_norm__139, + batch_norm__140, + batch_norm__141, + batch_norm__142, + batch_norm__143, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_23, + parameter_58, + parameter_57, + parameter_56, + parameter_55, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_55, parameter_56, parameter_57, parameter_58 + + # pd_op.swish: (2x96x28x28xf32) <- (2x96x28x28xf32) + swish_21 = paddle._C_ops.swish(batch_norm__138) + + # pd_op.conv2d: (2x96x28x28xf32) <- (2x96x28x28xf32, 96x96x3x3xf32) + conv2d_24 = paddle._C_ops.conv2d( + swish_21, parameter_54, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_54 + + # pd_op.batch_norm_: (2x96x28x28xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x28x28xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__144, + batch_norm__145, + batch_norm__146, + batch_norm__147, + batch_norm__148, + batch_norm__149, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_24, + parameter_53, + parameter_52, + parameter_51, + parameter_50, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_50, parameter_51, parameter_52, parameter_53 + + # pd_op.swish: (2x96x28x28xf32) <- (2x96x28x28xf32) + swish_22 = paddle._C_ops.swish(batch_norm__144) + + # pd_op.conv2d: (2x96x28x28xf32) <- (2x96x28x28xf32, 96x96x3x3xf32) + conv2d_25 = paddle._C_ops.conv2d( + swish_22, parameter_49, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_49 + + # pd_op.batch_norm_: (2x96x28x28xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x28x28xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__150, + batch_norm__151, + batch_norm__152, + batch_norm__153, + batch_norm__154, + batch_norm__155, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_25, + parameter_48, + parameter_47, + parameter_46, + parameter_45, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_45, parameter_46, parameter_47, parameter_48 + + # pd_op.conv2d: (2x96x28x28xf32) <- (2x96x28x28xf32, 96x96x1x1xf32) + conv2d_26 = paddle._C_ops.conv2d( + swish_22, parameter_44, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_44 + + # pd_op.batch_norm_: (2x96x28x28xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x28x28xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__156, + batch_norm__157, + batch_norm__158, + batch_norm__159, + batch_norm__160, + batch_norm__161, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_26, + parameter_43, + parameter_42, + parameter_41, + parameter_40, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_40, parameter_41, parameter_42, parameter_43 + + # pd_op.add: (2x96x28x28xf32) <- (2x96x28x28xf32, 2x96x28x28xf32) + add_39 = paddle._C_ops.add(batch_norm__150, batch_norm__156) + + # pd_op.swish: (2x96x28x28xf32) <- (2x96x28x28xf32) + swish_23 = paddle._C_ops.swish(add_39) + + # builtin.combine: ([2x96x28x28xf32, 2x96x28x28xf32]) <- (2x96x28x28xf32, 2x96x28x28xf32) + combine_9 = [swish_20, swish_23] + + # pd_op.concat: (2x192x28x28xf32) <- ([2x96x28x28xf32, 2x96x28x28xf32], 1xi32) + concat_8 = paddle._C_ops.concat(combine_9, full_7) + del combine_9 + + # pd_op.conv2d: (2x192x28x28xf32) <- (2x192x28x28xf32, 192x192x1x1xf32) + conv2d_27 = paddle._C_ops.conv2d( + concat_8, parameter_39, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_39 + + # pd_op.batch_norm_: (2x192x28x28xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x28x28xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__162, + batch_norm__163, + batch_norm__164, + batch_norm__165, + batch_norm__166, + batch_norm__167, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_27, + parameter_38, + parameter_37, + parameter_36, + parameter_35, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_35, parameter_36, parameter_37, parameter_38 + + # pd_op.swish: (2x192x28x28xf32) <- (2x192x28x28xf32) + swish_24 = paddle._C_ops.swish(batch_norm__162) + + # pd_op.conv2d: (2x192x14x14xf32) <- (2x192x28x28xf32, 192x192x3x3xf32) + conv2d_28 = paddle._C_ops.conv2d( + swish_24, parameter_34, [2, 2], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_34 + + # pd_op.batch_norm_: (2x192x14x14xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x14x14xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__168, + batch_norm__169, + batch_norm__170, + batch_norm__171, + batch_norm__172, + batch_norm__173, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_28, + parameter_33, + parameter_32, + parameter_31, + parameter_30, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_30, parameter_31, parameter_32, parameter_33 + + # pd_op.swish: (2x192x14x14xf32) <- (2x192x14x14xf32) + swish_25 = paddle._C_ops.swish(batch_norm__168) + + # builtin.combine: ([2x192x14x14xf32, 2x384x14x14xf32]) <- (2x192x14x14xf32, 2x384x14x14xf32) + combine_10 = [swish_25, swish_6] + + # pd_op.concat: (2x576x14x14xf32) <- ([2x192x14x14xf32, 2x384x14x14xf32], 1xi32) + concat_9 = paddle._C_ops.concat(combine_10, full_7) + del combine_10 + + # pd_op.conv2d: (2x192x14x14xf32) <- (2x576x14x14xf32, 192x576x1x1xf32) + conv2d_29 = paddle._C_ops.conv2d( + concat_9, parameter_29, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_29 + + # pd_op.batch_norm_: (2x192x14x14xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x14x14xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__174, + batch_norm__175, + batch_norm__176, + batch_norm__177, + batch_norm__178, + batch_norm__179, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_29, + parameter_28, + parameter_27, + parameter_26, + parameter_25, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_25, parameter_26, parameter_27, parameter_28 + + # pd_op.swish: (2x192x14x14xf32) <- (2x192x14x14xf32) + swish_26 = paddle._C_ops.swish(batch_norm__174) + + # pd_op.conv2d: (2x192x14x14xf32) <- (2x576x14x14xf32, 192x576x1x1xf32) + conv2d_30 = paddle._C_ops.conv2d( + concat_9, parameter_24, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_24 + + # pd_op.batch_norm_: (2x192x14x14xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x14x14xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__180, + batch_norm__181, + batch_norm__182, + batch_norm__183, + batch_norm__184, + batch_norm__185, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_30, + parameter_23, + parameter_22, + parameter_21, + parameter_20, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_20, parameter_21, parameter_22, parameter_23 + + # pd_op.swish: (2x192x14x14xf32) <- (2x192x14x14xf32) + swish_27 = paddle._C_ops.swish(batch_norm__180) + + # pd_op.conv2d: (2x192x14x14xf32) <- (2x192x14x14xf32, 192x192x3x3xf32) + conv2d_31 = paddle._C_ops.conv2d( + swish_27, parameter_19, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_19 + + # pd_op.batch_norm_: (2x192x14x14xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x14x14xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__186, + batch_norm__187, + batch_norm__188, + batch_norm__189, + batch_norm__190, + batch_norm__191, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_31, + parameter_18, + parameter_17, + parameter_16, + parameter_15, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_15, parameter_16, parameter_17, parameter_18 + + # pd_op.swish: (2x192x14x14xf32) <- (2x192x14x14xf32) + swish_28 = paddle._C_ops.swish(batch_norm__186) + + # pd_op.conv2d: (2x192x14x14xf32) <- (2x192x14x14xf32, 192x192x3x3xf32) + conv2d_32 = paddle._C_ops.conv2d( + swish_28, parameter_14, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_14 + + # pd_op.batch_norm_: (2x192x14x14xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x14x14xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__192, + batch_norm__193, + batch_norm__194, + batch_norm__195, + batch_norm__196, + batch_norm__197, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_32, + parameter_13, + parameter_12, + parameter_11, + parameter_10, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_10, parameter_11, parameter_12, parameter_13 + + # pd_op.conv2d: (2x192x14x14xf32) <- (2x192x14x14xf32, 192x192x1x1xf32) + conv2d_33 = paddle._C_ops.conv2d( + swish_28, parameter_9, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_9 + + # pd_op.batch_norm_: (2x192x14x14xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x14x14xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__198, + batch_norm__199, + batch_norm__200, + batch_norm__201, + batch_norm__202, + batch_norm__203, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_33, + parameter_8, + parameter_7, + parameter_6, + parameter_5, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_5, parameter_6, parameter_7, parameter_8 + + # pd_op.add: (2x192x14x14xf32) <- (2x192x14x14xf32, 2x192x14x14xf32) + add_40 = paddle._C_ops.add(batch_norm__192, batch_norm__198) + + # pd_op.swish: (2x192x14x14xf32) <- (2x192x14x14xf32) + swish_29 = paddle._C_ops.swish(add_40) + + # builtin.combine: ([2x192x14x14xf32, 2x192x14x14xf32]) <- (2x192x14x14xf32, 2x192x14x14xf32) + combine_11 = [swish_26, swish_29] + + # pd_op.concat: (2x384x14x14xf32) <- ([2x192x14x14xf32, 2x192x14x14xf32], 1xi32) + concat_10 = paddle._C_ops.concat(combine_11, full_7) + del combine_11, full_7 + + # pd_op.conv2d: (2x384x14x14xf32) <- (2x384x14x14xf32, 384x384x1x1xf32) + conv2d_34 = paddle._C_ops.conv2d( + concat_10, parameter_4, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_4 + + # pd_op.batch_norm_: (2x384x14x14xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x14x14xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__204, + batch_norm__205, + batch_norm__206, + batch_norm__207, + batch_norm__208, + batch_norm__209, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_34, + parameter_3, + parameter_2, + parameter_1, + parameter_0, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_0, parameter_1, parameter_2, parameter_3 + + # pd_op.swish: (2x384x14x14xf32) <- (2x384x14x14xf32) + swish_0 = paddle._C_ops.swish(batch_norm__204) + del ( + add_0, + add_1, + add_10, + add_11, + add_12, + add_14, + add_15, + add_17, + add_18, + add_19, + add_2, + add_20, + add_21, + add_23, + add_24, + add_26, + add_27, + add_28, + add_29, + add_3, + add_30, + add_32, + add_33, + add_35, + add_36, + add_37, + add_38, + add_39, + add_40, + add_5, + add_6, + add_8, + add_9, + assign_0, + assign_1, + assign_10, + assign_11, + assign_12, + assign_13, + assign_14, + assign_15, + assign_16, + assign_17, + assign_18, + assign_19, + assign_2, + assign_20, + assign_21, + assign_22, + assign_23, + assign_24, + assign_25, + assign_26, + assign_27, + assign_28, + assign_29, + assign_3, + assign_30, + assign_31, + assign_32, + assign_33, + assign_34, + assign_35, + assign_36, + assign_37, + assign_38, + assign_39, + assign_4, + assign_40, + assign_41, + assign_42, + assign_43, + assign_44, + assign_45, + assign_46, + assign_47, + assign_48, + assign_49, + assign_5, + assign_50, + assign_51, + assign_52, + assign_53, + assign_54, + assign_55, + assign_56, + assign_57, + assign_58, + assign_59, + assign_6, + assign_60, + assign_61, + assign_62, + assign_63, + assign_64, + assign_65, + assign_66, + assign_67, + assign_68, + assign_69, + assign_7, + assign_70, + assign_71, + assign_72, + assign_8, + assign_9, + batch_norm__0, + batch_norm__1, + batch_norm__10, + batch_norm__100, + batch_norm__101, + batch_norm__102, + batch_norm__103, + batch_norm__104, + batch_norm__105, + batch_norm__106, + batch_norm__107, + batch_norm__108, + batch_norm__109, + batch_norm__11, + batch_norm__110, + batch_norm__111, + batch_norm__112, + batch_norm__113, + batch_norm__114, + batch_norm__115, + batch_norm__116, + batch_norm__117, + batch_norm__118, + batch_norm__119, + batch_norm__12, + batch_norm__120, + batch_norm__121, + batch_norm__122, + batch_norm__123, + batch_norm__124, + batch_norm__125, + batch_norm__126, + batch_norm__127, + batch_norm__128, + batch_norm__129, + batch_norm__13, + batch_norm__130, + batch_norm__131, + batch_norm__132, + batch_norm__133, + batch_norm__134, + batch_norm__135, + batch_norm__136, + batch_norm__137, + batch_norm__138, + batch_norm__139, + batch_norm__14, + batch_norm__140, + batch_norm__141, + batch_norm__142, + batch_norm__143, + batch_norm__144, + batch_norm__145, + batch_norm__146, + batch_norm__147, + batch_norm__148, + batch_norm__149, + batch_norm__15, + batch_norm__150, + batch_norm__151, + batch_norm__152, + batch_norm__153, + batch_norm__154, + batch_norm__155, + batch_norm__156, + batch_norm__157, + batch_norm__158, + batch_norm__159, + batch_norm__16, + batch_norm__160, + batch_norm__161, + batch_norm__162, + batch_norm__163, + batch_norm__164, + batch_norm__165, + batch_norm__166, + batch_norm__167, + batch_norm__168, + batch_norm__169, + batch_norm__17, + batch_norm__170, + batch_norm__171, + batch_norm__172, + batch_norm__173, + batch_norm__174, + batch_norm__175, + batch_norm__176, + batch_norm__177, + batch_norm__178, + batch_norm__179, + batch_norm__18, + batch_norm__180, + batch_norm__181, + batch_norm__182, + batch_norm__183, + batch_norm__184, + batch_norm__185, + batch_norm__186, + batch_norm__187, + batch_norm__188, + batch_norm__189, + batch_norm__19, + batch_norm__190, + batch_norm__191, + batch_norm__192, + batch_norm__193, + batch_norm__194, + batch_norm__195, + batch_norm__196, + batch_norm__197, + batch_norm__198, + batch_norm__199, + batch_norm__2, + batch_norm__20, + batch_norm__200, + batch_norm__201, + batch_norm__202, + batch_norm__203, + batch_norm__204, + batch_norm__205, + batch_norm__206, + batch_norm__207, + batch_norm__208, + batch_norm__209, + batch_norm__21, + batch_norm__22, + batch_norm__23, + batch_norm__24, + batch_norm__25, + batch_norm__26, + batch_norm__27, + batch_norm__28, + batch_norm__29, + batch_norm__3, + batch_norm__30, + batch_norm__31, + batch_norm__32, + batch_norm__33, + batch_norm__34, + batch_norm__35, + batch_norm__36, + batch_norm__37, + batch_norm__38, + batch_norm__39, + batch_norm__4, + batch_norm__40, + batch_norm__41, + batch_norm__42, + batch_norm__43, + batch_norm__44, + batch_norm__45, + batch_norm__46, + batch_norm__47, + batch_norm__48, + batch_norm__49, + batch_norm__5, + batch_norm__50, + batch_norm__51, + batch_norm__52, + batch_norm__53, + batch_norm__54, + batch_norm__55, + batch_norm__56, + batch_norm__57, + batch_norm__58, + batch_norm__59, + batch_norm__6, + batch_norm__60, + batch_norm__61, + batch_norm__62, + batch_norm__63, + batch_norm__64, + batch_norm__65, + batch_norm__66, + batch_norm__67, + batch_norm__68, + batch_norm__69, + batch_norm__7, + batch_norm__70, + batch_norm__71, + batch_norm__72, + batch_norm__73, + batch_norm__74, + batch_norm__75, + batch_norm__76, + batch_norm__77, + batch_norm__78, + batch_norm__79, + batch_norm__8, + batch_norm__80, + batch_norm__81, + batch_norm__82, + batch_norm__83, + batch_norm__84, + batch_norm__85, + batch_norm__86, + batch_norm__87, + batch_norm__88, + batch_norm__89, + batch_norm__9, + batch_norm__90, + batch_norm__91, + batch_norm__92, + batch_norm__93, + batch_norm__94, + batch_norm__95, + batch_norm__96, + batch_norm__97, + batch_norm__98, + batch_norm__99, + concat_1, + concat_10, + concat_2, + concat_3, + concat_4, + concat_5, + concat_6, + concat_7, + concat_8, + concat_9, + conv2d_0, + conv2d_1, + conv2d_10, + conv2d_11, + conv2d_12, + conv2d_13, + conv2d_14, + conv2d_15, + conv2d_16, + conv2d_17, + conv2d_18, + conv2d_19, + conv2d_2, + conv2d_20, + conv2d_21, + conv2d_22, + conv2d_23, + conv2d_24, + conv2d_25, + conv2d_26, + conv2d_27, + conv2d_28, + conv2d_29, + conv2d_3, + conv2d_30, + conv2d_31, + conv2d_32, + conv2d_33, + conv2d_34, + conv2d_4, + conv2d_5, + conv2d_6, + conv2d_7, + conv2d_8, + conv2d_9, + dropout_0, + dropout_1, + dropout_10, + dropout_11, + dropout_12, + dropout_13, + dropout_14, + dropout_15, + dropout_16, + dropout_17, + dropout_18, + dropout_19, + dropout_2, + dropout_20, + dropout_21, + dropout_22, + dropout_23, + dropout_24, + dropout_25, + dropout_26, + dropout_27, + dropout_28, + dropout_29, + dropout_3, + dropout_30, + dropout_31, + dropout_4, + dropout_5, + dropout_6, + dropout_7, + dropout_8, + dropout_9, + full_8, + full_9, + full_int_array_10, + full_int_array_2, + full_int_array_4, + full_int_array_5, + full_int_array_8, + full_int_array_9, + layer_norm_0, + layer_norm_1, + layer_norm_10, + layer_norm_11, + layer_norm_12, + layer_norm_13, + layer_norm_14, + layer_norm_15, + layer_norm_16, + layer_norm_17, + layer_norm_18, + layer_norm_19, + layer_norm_2, + layer_norm_20, + layer_norm_22, + layer_norm_23, + layer_norm_3, + layer_norm_4, + layer_norm_5, + layer_norm_6, + layer_norm_7, + layer_norm_8, + layer_norm_9, + matmul_10, + matmul_11, + matmul_12, + matmul_15, + matmul_16, + matmul_17, + matmul_18, + matmul_19, + matmul_2, + matmul_20, + matmul_23, + matmul_24, + matmul_25, + matmul_26, + matmul_27, + matmul_28, + matmul_3, + matmul_31, + matmul_32, + matmul_33, + matmul_4, + matmul_7, + matmul_8, + matmul_9, + nearest_interp_0, + nearest_interp_1, + pool2d_0, + pool2d_1, + pool2d_2, + reshape_11, + reshape_15, + reshape_16, + reshape_3, + reshape_7, + slice_0, + slice_1, + slice_10, + slice_11, + slice_12, + slice_13, + slice_14, + slice_15, + slice_16, + slice_17, + slice_18, + slice_19, + slice_2, + slice_20, + slice_21, + slice_22, + slice_23, + slice_3, + slice_4, + slice_5, + slice_6, + slice_7, + slice_8, + slice_9, + softmax_0, + softmax_1, + softmax_2, + softmax_3, + swish_1, + swish_10, + swish_11, + swish_12, + swish_13, + swish_14, + swish_15, + swish_16, + swish_17, + swish_18, + swish_19, + swish_2, + swish_20, + swish_21, + swish_22, + swish_23, + swish_24, + swish_25, + swish_26, + swish_27, + swish_28, + swish_29, + swish_3, + swish_4, + swish_5, + swish_6, + swish_7, + swish_8, + swish_9, + transpose_0, + transpose_1, + transpose_10, + transpose_11, + transpose_12, + transpose_13, + transpose_14, + transpose_15, + transpose_16, + transpose_17, + transpose_2, + transpose_3, + transpose_4, + transpose_5, + transpose_6, + transpose_7, + transpose_8, + transpose_9, + unsqueeze_3, + ) + + return swish_0 diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-S/subgraph_0/weight_meta.py b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-S/subgraph_0/weight_meta.py new file mode 100644 index 000000000..b881dcc4c --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-S/subgraph_0/weight_meta.py @@ -0,0 +1,2323 @@ +class Program_weight_tensor_parameter_0: + name = "parameter_0" + shape = [384] + dtype = "float32" + min_val = float("-0.452683") + max_val = float("0.520016") + mean = float("0.179163") + std = float("0.147254") + data = None + + +class Program_weight_tensor_parameter_1: + name = "parameter_1" + shape = [384] + dtype = "float32" + min_val = float("0.925767") + max_val = float("1.48759") + mean = float("1.14239") + std = float("0.0749515") + data = None + + +class Program_weight_tensor_parameter_2: + name = "parameter_2" + shape = [384] + dtype = "float32" + min_val = float("0.00208755") + max_val = float("0.027735") + mean = float("0.00782822") + std = float("0.00340613") + data = None + + +class Program_weight_tensor_parameter_3: + name = "parameter_3" + shape = [384] + dtype = "float32" + min_val = float("-0.119095") + max_val = float("0.0523664") + mean = float("-0.0195343") + std = float("0.0250626") + data = None + + +class Program_weight_tensor_parameter_4: + name = "parameter_4" + shape = [384, 384, 1, 1] + dtype = "float32" + min_val = float("-0.0715528") + max_val = float("0.0605753") + mean = float("-0.00029568") + std = float("0.00518431") + data = None + + +class Program_weight_tensor_parameter_5: + name = "parameter_5" + shape = [192] + dtype = "float32" + min_val = float("-0.285295") + max_val = float("0.0760154") + mean = float("-0.0576417") + std = float("0.0689916") + data = None + + +class Program_weight_tensor_parameter_6: + name = "parameter_6" + shape = [192] + dtype = "float32" + min_val = float("0.874212") + max_val = float("1.05715") + mean = float("0.955828") + std = float("0.0252646") + data = None + + +class Program_weight_tensor_parameter_7: + name = "parameter_7" + shape = [192] + dtype = "float32" + min_val = float("0.00225351") + max_val = float("0.0237414") + mean = float("0.00687008") + std = float("0.00365467") + data = None + + +class Program_weight_tensor_parameter_8: + name = "parameter_8" + shape = [192] + dtype = "float32" + min_val = float("-0.0454866") + max_val = float("0.0820983") + mean = float("0.00740881") + std = float("0.0199907") + data = None + + +class Program_weight_tensor_parameter_9: + name = "parameter_9" + shape = [192, 192, 1, 1] + dtype = "float32" + min_val = float("-0.0396186") + max_val = float("0.0321212") + mean = float("-3.30464e-05") + std = float("0.00353709") + data = None + + +class Program_weight_tensor_parameter_10: + name = "parameter_10" + shape = [192] + dtype = "float32" + min_val = float("-0.285295") + max_val = float("0.0760154") + mean = float("-0.0576417") + std = float("0.0689916") + data = None + + +class Program_weight_tensor_parameter_11: + name = "parameter_11" + shape = [192] + dtype = "float32" + min_val = float("0.955327") + max_val = float("1.24547") + mean = float("1.06354") + std = float("0.0502099") + data = None + + +class Program_weight_tensor_parameter_12: + name = "parameter_12" + shape = [192] + dtype = "float32" + min_val = float("0.00377482") + max_val = float("0.0326292") + mean = float("0.0105649") + std = float("0.00450991") + data = None + + +class Program_weight_tensor_parameter_13: + name = "parameter_13" + shape = [192] + dtype = "float32" + min_val = float("-0.113549") + max_val = float("0.121467") + mean = float("-0.0250543") + std = float("0.0264393") + data = None + + +class Program_weight_tensor_parameter_14: + name = "parameter_14" + shape = [192, 192, 3, 3] + dtype = "float32" + min_val = float("-0.0682235") + max_val = float("0.0483376") + mean = float("-0.000130608") + std = float("0.00307911") + data = None + + +class Program_weight_tensor_parameter_15: + name = "parameter_15" + shape = [192] + dtype = "float32" + min_val = float("-0.36677") + max_val = float("0.248412") + mean = float("-0.120383") + std = float("0.0930536") + data = None + + +class Program_weight_tensor_parameter_16: + name = "parameter_16" + shape = [192] + dtype = "float32" + min_val = float("0.867888") + max_val = float("1.55252") + mean = float("1.03562") + std = float("0.0842523") + data = None + + +class Program_weight_tensor_parameter_17: + name = "parameter_17" + shape = [192] + dtype = "float32" + min_val = float("0.00703681") + max_val = float("0.0644315") + mean = float("0.0196034") + std = float("0.00878839") + data = None + + +class Program_weight_tensor_parameter_18: + name = "parameter_18" + shape = [192] + dtype = "float32" + min_val = float("-0.19484") + max_val = float("0.0714367") + mean = float("-0.0429705") + std = float("0.0418555") + data = None + + +class Program_weight_tensor_parameter_19: + name = "parameter_19" + shape = [192, 192, 3, 3] + dtype = "float32" + min_val = float("-0.0761584") + max_val = float("0.0789142") + mean = float("-0.000193399") + std = float("0.00345675") + data = None + + +class Program_weight_tensor_parameter_20: + name = "parameter_20" + shape = [192] + dtype = "float32" + min_val = float("-0.271966") + max_val = float("0.101208") + mean = float("-0.0713996") + std = float("0.0674906") + data = None + + +class Program_weight_tensor_parameter_21: + name = "parameter_21" + shape = [192] + dtype = "float32" + min_val = float("0.903154") + max_val = float("1.18503") + mean = float("1.01548") + std = float("0.0495696") + data = None + + +class Program_weight_tensor_parameter_22: + name = "parameter_22" + shape = [192] + dtype = "float32" + min_val = float("0.00362619") + max_val = float("0.0199518") + mean = float("0.00696381") + std = float("0.00238296") + data = None + + +class Program_weight_tensor_parameter_23: + name = "parameter_23" + shape = [192] + dtype = "float32" + min_val = float("-0.108991") + max_val = float("0.0688881") + mean = float("-0.0176029") + std = float("0.027949") + data = None + + +class Program_weight_tensor_parameter_24: + name = "parameter_24" + shape = [192, 576, 1, 1] + dtype = "float32" + min_val = float("-0.114156") + max_val = float("0.120683") + mean = float("-0.000168527") + std = float("0.00483872") + data = None + + +class Program_weight_tensor_parameter_25: + name = "parameter_25" + shape = [192] + dtype = "float32" + min_val = float("-0.110844") + max_val = float("0.0101896") + mean = float("-0.0407097") + std = float("0.0213657") + data = None + + +class Program_weight_tensor_parameter_26: + name = "parameter_26" + shape = [192] + dtype = "float32" + min_val = float("0.825233") + max_val = float("1.16377") + mean = float("0.995983") + std = float("0.0348186") + data = None + + +class Program_weight_tensor_parameter_27: + name = "parameter_27" + shape = [192] + dtype = "float32" + min_val = float("0.0022236") + max_val = float("0.0164595") + mean = float("0.00567591") + std = float("0.00205699") + data = None + + +class Program_weight_tensor_parameter_28: + name = "parameter_28" + shape = [192] + dtype = "float32" + min_val = float("-0.095126") + max_val = float("0.0971043") + mean = float("-0.0231581") + std = float("0.0263706") + data = None + + +class Program_weight_tensor_parameter_29: + name = "parameter_29" + shape = [192, 576, 1, 1] + dtype = "float32" + min_val = float("-0.0338268") + max_val = float("0.0480022") + mean = float("-0.000263794") + std = float("0.00422819") + data = None + + +class Program_weight_tensor_parameter_30: + name = "parameter_30" + shape = [192] + dtype = "float32" + min_val = float("-0.187741") + max_val = float("0.00806793") + mean = float("-0.0695907") + std = float("0.0368951") + data = None + + +class Program_weight_tensor_parameter_31: + name = "parameter_31" + shape = [192] + dtype = "float32" + min_val = float("0.811184") + max_val = float("1.20064") + mean = float("1.03736") + std = float("0.0483567") + data = None + + +class Program_weight_tensor_parameter_32: + name = "parameter_32" + shape = [192] + dtype = "float32" + min_val = float("0.00769277") + max_val = float("0.0606136") + mean = float("0.0189169") + std = float("0.00830017") + data = None + + +class Program_weight_tensor_parameter_33: + name = "parameter_33" + shape = [192] + dtype = "float32" + min_val = float("-0.460153") + max_val = float("0.30482") + mean = float("-0.0474693") + std = float("0.0960008") + data = None + + +class Program_weight_tensor_parameter_34: + name = "parameter_34" + shape = [192, 192, 3, 3] + dtype = "float32" + min_val = float("-0.0422034") + max_val = float("0.0426075") + mean = float("-5.43208e-05") + std = float("0.00256073") + data = None + + +class Program_weight_tensor_parameter_35: + name = "parameter_35" + shape = [192] + dtype = "float32" + min_val = float("-0.42846") + max_val = float("1.12423") + mean = float("0.353852") + std = float("0.274871") + data = None + + +class Program_weight_tensor_parameter_36: + name = "parameter_36" + shape = [192] + dtype = "float32" + min_val = float("0.675626") + max_val = float("1.64209") + mean = float("1.20601") + std = float("0.162244") + data = None + + +class Program_weight_tensor_parameter_37: + name = "parameter_37" + shape = [192] + dtype = "float32" + min_val = float("0.00528809") + max_val = float("0.0687383") + mean = float("0.01885") + std = float("0.00993133") + data = None + + +class Program_weight_tensor_parameter_38: + name = "parameter_38" + shape = [192] + dtype = "float32" + min_val = float("-0.151869") + max_val = float("0.0638964") + mean = float("-0.0396031") + std = float("0.0334379") + data = None + + +class Program_weight_tensor_parameter_39: + name = "parameter_39" + shape = [192, 192, 1, 1] + dtype = "float32" + min_val = float("-0.130795") + max_val = float("0.129015") + mean = float("-0.000928473") + std = float("0.0104561") + data = None + + +class Program_weight_tensor_parameter_40: + name = "parameter_40" + shape = [96] + dtype = "float32" + min_val = float("-0.288894") + max_val = float("0.177005") + mean = float("-0.0686505") + std = float("0.0982332") + data = None + + +class Program_weight_tensor_parameter_41: + name = "parameter_41" + shape = [96] + dtype = "float32" + min_val = float("0.803374") + max_val = float("1.20152") + mean = float("0.918185") + std = float("0.0578714") + data = None + + +class Program_weight_tensor_parameter_42: + name = "parameter_42" + shape = [96] + dtype = "float32" + min_val = float("0.00123928") + max_val = float("0.0138615") + mean = float("0.00610989") + std = float("0.00260212") + data = None + + +class Program_weight_tensor_parameter_43: + name = "parameter_43" + shape = [96] + dtype = "float32" + min_val = float("-0.0524226") + max_val = float("0.036323") + mean = float("-0.000608934") + std = float("0.017601") + data = None + + +class Program_weight_tensor_parameter_44: + name = "parameter_44" + shape = [96, 96, 1, 1] + dtype = "float32" + min_val = float("-0.0689645") + max_val = float("0.0432332") + mean = float("-0.000653594") + std = float("0.00712179") + data = None + + +class Program_weight_tensor_parameter_45: + name = "parameter_45" + shape = [96] + dtype = "float32" + min_val = float("-0.288894") + max_val = float("0.177005") + mean = float("-0.0686505") + std = float("0.0982332") + data = None + + +class Program_weight_tensor_parameter_46: + name = "parameter_46" + shape = [96] + dtype = "float32" + min_val = float("0.915165") + max_val = float("1.34956") + mean = float("1.07114") + std = float("0.0649452") + data = None + + +class Program_weight_tensor_parameter_47: + name = "parameter_47" + shape = [96] + dtype = "float32" + min_val = float("0.00571053") + max_val = float("0.0688588") + mean = float("0.0189641") + std = float("0.0116896") + data = None + + +class Program_weight_tensor_parameter_48: + name = "parameter_48" + shape = [96] + dtype = "float32" + min_val = float("-0.126333") + max_val = float("0.110075") + mean = float("-0.0352924") + std = float("0.0398269") + data = None + + +class Program_weight_tensor_parameter_49: + name = "parameter_49" + shape = [96, 96, 3, 3] + dtype = "float32" + min_val = float("-0.0904939") + max_val = float("0.0672733") + mean = float("-0.000306932") + std = float("0.00582517") + data = None + + +class Program_weight_tensor_parameter_50: + name = "parameter_50" + shape = [96] + dtype = "float32" + min_val = float("-0.598498") + max_val = float("0.275467") + mean = float("-0.203884") + std = float("0.141426") + data = None + + +class Program_weight_tensor_parameter_51: + name = "parameter_51" + shape = [96] + dtype = "float32" + min_val = float("0.748158") + max_val = float("1.49346") + mean = float("1.00128") + std = float("0.110998") + data = None + + +class Program_weight_tensor_parameter_52: + name = "parameter_52" + shape = [96] + dtype = "float32" + min_val = float("0.0139572") + max_val = float("0.0636486") + mean = float("0.0284227") + std = float("0.0104632") + data = None + + +class Program_weight_tensor_parameter_53: + name = "parameter_53" + shape = [96] + dtype = "float32" + min_val = float("-0.0925774") + max_val = float("0.0222471") + mean = float("-0.0356679") + std = float("0.0241854") + data = None + + +class Program_weight_tensor_parameter_54: + name = "parameter_54" + shape = [96, 96, 3, 3] + dtype = "float32" + min_val = float("-0.0863241") + max_val = float("0.0874179") + mean = float("-0.000438597") + std = float("0.00661261") + data = None + + +class Program_weight_tensor_parameter_55: + name = "parameter_55" + shape = [96] + dtype = "float32" + min_val = float("-0.492263") + max_val = float("0.196615") + mean = float("-0.141232") + std = float("0.103923") + data = None + + +class Program_weight_tensor_parameter_56: + name = "parameter_56" + shape = [96] + dtype = "float32" + min_val = float("0.846758") + max_val = float("1.23368") + mean = float("0.997396") + std = float("0.0775293") + data = None + + +class Program_weight_tensor_parameter_57: + name = "parameter_57" + shape = [96] + dtype = "float32" + min_val = float("0.00479462") + max_val = float("0.0381532") + mean = float("0.0125184") + std = float("0.00475146") + data = None + + +class Program_weight_tensor_parameter_58: + name = "parameter_58" + shape = [96] + dtype = "float32" + min_val = float("-0.152602") + max_val = float("0.0388256") + mean = float("-0.0303863") + std = float("0.0300894") + data = None + + +class Program_weight_tensor_parameter_59: + name = "parameter_59" + shape = [96, 288, 1, 1] + dtype = "float32" + min_val = float("-0.0816609") + max_val = float("0.0740173") + mean = float("-0.000525186") + std = float("0.00886885") + data = None + + +class Program_weight_tensor_parameter_60: + name = "parameter_60" + shape = [96] + dtype = "float32" + min_val = float("-0.13292") + max_val = float("0.0509434") + mean = float("-0.0248071") + std = float("0.0356347") + data = None + + +class Program_weight_tensor_parameter_61: + name = "parameter_61" + shape = [96] + dtype = "float32" + min_val = float("0.816688") + max_val = float("1.39825") + mean = float("0.955372") + std = float("0.0698904") + data = None + + +class Program_weight_tensor_parameter_62: + name = "parameter_62" + shape = [96] + dtype = "float32" + min_val = float("0.00533424") + max_val = float("0.0337934") + mean = float("0.014424") + std = float("0.00698288") + data = None + + +class Program_weight_tensor_parameter_63: + name = "parameter_63" + shape = [96] + dtype = "float32" + min_val = float("-0.08722") + max_val = float("0.0642508") + mean = float("-0.00768992") + std = float("0.0295591") + data = None + + +class Program_weight_tensor_parameter_64: + name = "parameter_64" + shape = [96, 288, 1, 1] + dtype = "float32" + min_val = float("-0.0877099") + max_val = float("0.117713") + mean = float("-1.27293e-05") + std = float("0.00838081") + data = None + + +class Program_weight_tensor_parameter_65: + name = "parameter_65" + shape = [96] + dtype = "float32" + min_val = float("-0.275331") + max_val = float("0.0752847") + mean = float("-0.0911612") + std = float("0.0725468") + data = None + + +class Program_weight_tensor_parameter_66: + name = "parameter_66" + shape = [96] + dtype = "float32" + min_val = float("0.710157") + max_val = float("1.16778") + mean = float("1.0072") + std = float("0.0758174") + data = None + + +class Program_weight_tensor_parameter_67: + name = "parameter_67" + shape = [96] + dtype = "float32" + min_val = float("0.0117676") + max_val = float("0.08674") + mean = float("0.0288393") + std = float("0.0139605") + data = None + + +class Program_weight_tensor_parameter_68: + name = "parameter_68" + shape = [96] + dtype = "float32" + min_val = float("-0.444541") + max_val = float("0.419967") + mean = float("-0.0364364") + std = float("0.14917") + data = None + + +class Program_weight_tensor_parameter_69: + name = "parameter_69" + shape = [96, 96, 3, 3] + dtype = "float32" + min_val = float("-0.059943") + max_val = float("0.073598") + mean = float("-0.000111194") + std = float("0.00604095") + data = None + + +class Program_weight_tensor_parameter_70: + name = "parameter_70" + shape = [96] + dtype = "float32" + min_val = float("-0.725935") + max_val = float("1.73632") + mean = float("0.545777") + std = float("0.560443") + data = None + + +class Program_weight_tensor_parameter_71: + name = "parameter_71" + shape = [96] + dtype = "float32" + min_val = float("0.498479") + max_val = float("1.73847") + mean = float("1.18042") + std = float("0.279398") + data = None + + +class Program_weight_tensor_parameter_72: + name = "parameter_72" + shape = [96] + dtype = "float32" + min_val = float("0.00945594") + max_val = float("0.146716") + mean = float("0.0513174") + std = float("0.0306128") + data = None + + +class Program_weight_tensor_parameter_73: + name = "parameter_73" + shape = [96] + dtype = "float32" + min_val = float("-0.243464") + max_val = float("0.126544") + mean = float("-0.0573701") + std = float("0.0724117") + data = None + + +class Program_weight_tensor_parameter_74: + name = "parameter_74" + shape = [96, 96, 1, 1] + dtype = "float32" + min_val = float("-0.213183") + max_val = float("0.156765") + mean = float("-0.00274567") + std = float("0.025333") + data = None + + +class Program_weight_tensor_parameter_75: + name = "parameter_75" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_76: + name = "parameter_76" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_77: + name = "parameter_77" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_78: + name = "parameter_78" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_79: + name = "parameter_79" + shape = [48, 48, 1, 1] + dtype = "float32" + min_val = float("-0.0883355") + max_val = float("0.0815737") + mean = float("-0.00545242") + std = float("0.0185733") + data = None + + +class Program_weight_tensor_parameter_80: + name = "parameter_80" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_81: + name = "parameter_81" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_82: + name = "parameter_82" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_83: + name = "parameter_83" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_84: + name = "parameter_84" + shape = [48, 48, 3, 3] + dtype = "float32" + min_val = float("-0.146307") + max_val = float("0.154115") + mean = float("-0.000208312") + std = float("0.0146409") + data = None + + +class Program_weight_tensor_parameter_85: + name = "parameter_85" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_86: + name = "parameter_86" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_87: + name = "parameter_87" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_88: + name = "parameter_88" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_89: + name = "parameter_89" + shape = [48, 48, 3, 3] + dtype = "float32" + min_val = float("-0.156302") + max_val = float("0.125342") + mean = float("-0.0015162") + std = float("0.0167375") + data = None + + +class Program_weight_tensor_parameter_90: + name = "parameter_90" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_91: + name = "parameter_91" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_92: + name = "parameter_92" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_93: + name = "parameter_93" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_94: + name = "parameter_94" + shape = [48, 224, 1, 1] + dtype = "float32" + min_val = float("-0.204906") + max_val = float("0.164741") + mean = float("-0.00122436") + std = float("0.0211089") + data = None + + +class Program_weight_tensor_parameter_95: + name = "parameter_95" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_96: + name = "parameter_96" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_97: + name = "parameter_97" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_98: + name = "parameter_98" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_99: + name = "parameter_99" + shape = [48, 224, 1, 1] + dtype = "float32" + min_val = float("-0.117432") + max_val = float("0.151868") + mean = float("-0.000117465") + std = float("0.0157767") + data = None + + +class Program_weight_tensor_parameter_100: + name = "parameter_100" + shape = [96] + dtype = "float32" + min_val = float("-0.279666") + max_val = float("0.370247") + mean = float("0.0267277") + std = float("0.15292") + data = None + + +class Program_weight_tensor_parameter_101: + name = "parameter_101" + shape = [96] + dtype = "float32" + min_val = float("0.570144") + max_val = float("1.55244") + mean = float("0.838994") + std = float("0.135747") + data = None + + +class Program_weight_tensor_parameter_102: + name = "parameter_102" + shape = [96] + dtype = "float32" + min_val = float("0.0106917") + max_val = float("0.116746") + mean = float("0.0280572") + std = float("0.0157541") + data = None + + +class Program_weight_tensor_parameter_103: + name = "parameter_103" + shape = [96] + dtype = "float32" + min_val = float("-0.179085") + max_val = float("0.0732485") + mean = float("-0.0445505") + std = float("0.0494942") + data = None + + +class Program_weight_tensor_parameter_104: + name = "parameter_104" + shape = [96, 192, 1, 1] + dtype = "float32" + min_val = float("-0.10276") + max_val = float("0.10613") + mean = float("-0.00112204") + std = float("0.0141691") + data = None + + +class Program_weight_tensor_parameter_105: + name = "parameter_105" + shape = [192] + dtype = "float32" + min_val = float("-0.406643") + max_val = float("0.233383") + mean = float("-0.0499853") + std = float("0.110184") + data = None + + +class Program_weight_tensor_parameter_106: + name = "parameter_106" + shape = [192] + dtype = "float32" + min_val = float("0.710043") + max_val = float("1.49603") + mean = float("0.967599") + std = float("0.110329") + data = None + + +class Program_weight_tensor_parameter_107: + name = "parameter_107" + shape = [192] + dtype = "float32" + min_val = float("0.00807578") + max_val = float("0.0673105") + mean = float("0.0231982") + std = float("0.0104913") + data = None + + +class Program_weight_tensor_parameter_108: + name = "parameter_108" + shape = [192] + dtype = "float32" + min_val = float("-0.249401") + max_val = float("0.162635") + mean = float("-0.0563985") + std = float("0.0564944") + data = None + + +class Program_weight_tensor_parameter_109: + name = "parameter_109" + shape = [192, 192, 1, 1] + dtype = "float32" + min_val = float("-0.108991") + max_val = float("0.114364") + mean = float("-0.00132369") + std = float("0.013176") + data = None + + +class Program_weight_tensor_parameter_110: + name = "parameter_110" + shape = [96] + dtype = "float32" + min_val = float("-0.329532") + max_val = float("0.198136") + mean = float("-0.0441618") + std = float("0.0973655") + data = None + + +class Program_weight_tensor_parameter_111: + name = "parameter_111" + shape = [96] + dtype = "float32" + min_val = float("0.675243") + max_val = float("0.97079") + mean = float("0.8646") + std = float("0.051521") + data = None + + +class Program_weight_tensor_parameter_112: + name = "parameter_112" + shape = [96] + dtype = "float32" + min_val = float("0.00447042") + max_val = float("0.024291") + mean = float("0.0115165") + std = float("0.00387158") + data = None + + +class Program_weight_tensor_parameter_113: + name = "parameter_113" + shape = [96] + dtype = "float32" + min_val = float("-0.054565") + max_val = float("0.0511437") + mean = float("-0.0176592") + std = float("0.0238124") + data = None + + +class Program_weight_tensor_parameter_114: + name = "parameter_114" + shape = [96, 96, 1, 1] + dtype = "float32" + min_val = float("-0.051869") + max_val = float("0.0461316") + mean = float("-0.00185641") + std = float("0.0099112") + data = None + + +class Program_weight_tensor_parameter_115: + name = "parameter_115" + shape = [96] + dtype = "float32" + min_val = float("-0.329532") + max_val = float("0.198136") + mean = float("-0.0441618") + std = float("0.0973655") + data = None + + +class Program_weight_tensor_parameter_116: + name = "parameter_116" + shape = [96] + dtype = "float32" + min_val = float("0.879109") + max_val = float("1.2327") + mean = float("1.0107") + std = float("0.057628") + data = None + + +class Program_weight_tensor_parameter_117: + name = "parameter_117" + shape = [96] + dtype = "float32" + min_val = float("0.014723") + max_val = float("0.0722036") + mean = float("0.0325906") + std = float("0.0118594") + data = None + + +class Program_weight_tensor_parameter_118: + name = "parameter_118" + shape = [96] + dtype = "float32" + min_val = float("-0.119395") + max_val = float("0.0830643") + mean = float("-0.0258219") + std = float("0.037457") + data = None + + +class Program_weight_tensor_parameter_119: + name = "parameter_119" + shape = [96, 96, 3, 3] + dtype = "float32" + min_val = float("-0.119863") + max_val = float("0.0984702") + mean = float("-0.000273278") + std = float("0.00746969") + data = None + + +class Program_weight_tensor_parameter_120: + name = "parameter_120" + shape = [96] + dtype = "float32" + min_val = float("-0.684479") + max_val = float("0.16417") + mean = float("-0.212985") + std = float("0.163583") + data = None + + +class Program_weight_tensor_parameter_121: + name = "parameter_121" + shape = [96] + dtype = "float32" + min_val = float("0.647154") + max_val = float("1.32159") + mean = float("1.0012") + std = float("0.146671") + data = None + + +class Program_weight_tensor_parameter_122: + name = "parameter_122" + shape = [96] + dtype = "float32" + min_val = float("0.0179775") + max_val = float("0.0661087") + mean = float("0.0347131") + std = float("0.00967136") + data = None + + +class Program_weight_tensor_parameter_123: + name = "parameter_123" + shape = [96] + dtype = "float32" + min_val = float("-0.143275") + max_val = float("0.0811889") + mean = float("-0.0385351") + std = float("0.0427264") + data = None + + +class Program_weight_tensor_parameter_124: + name = "parameter_124" + shape = [96, 96, 3, 3] + dtype = "float32" + min_val = float("-0.150701") + max_val = float("0.106934") + mean = float("-0.000497665") + std = float("0.00873237") + data = None + + +class Program_weight_tensor_parameter_125: + name = "parameter_125" + shape = [96] + dtype = "float32" + min_val = float("-0.629138") + max_val = float("0.432204") + mean = float("-0.186809") + std = float("0.228477") + data = None + + +class Program_weight_tensor_parameter_126: + name = "parameter_126" + shape = [96] + dtype = "float32" + min_val = float("0.719559") + max_val = float("1.31915") + mean = float("0.942542") + std = float("0.106703") + data = None + + +class Program_weight_tensor_parameter_127: + name = "parameter_127" + shape = [96] + dtype = "float32" + min_val = float("0.00837206") + max_val = float("0.0349705") + mean = float("0.016197") + std = float("0.00542727") + data = None + + +class Program_weight_tensor_parameter_128: + name = "parameter_128" + shape = [96] + dtype = "float32" + min_val = float("-0.1407") + max_val = float("0.19587") + mean = float("0.0146219") + std = float("0.0530728") + data = None + + +class Program_weight_tensor_parameter_129: + name = "parameter_129" + shape = [96, 448, 1, 1] + dtype = "float32" + min_val = float("-0.232396") + max_val = float("0.219119") + mean = float("-0.000588784") + std = float("0.0112715") + data = None + + +class Program_weight_tensor_parameter_130: + name = "parameter_130" + shape = [96] + dtype = "float32" + min_val = float("-0.188629") + max_val = float("0.369002") + mean = float("0.0340135") + std = float("0.102985") + data = None + + +class Program_weight_tensor_parameter_131: + name = "parameter_131" + shape = [96] + dtype = "float32" + min_val = float("0.811507") + max_val = float("1.15984") + mean = float("0.971652") + std = float("0.0770892") + data = None + + +class Program_weight_tensor_parameter_132: + name = "parameter_132" + shape = [96] + dtype = "float32" + min_val = float("0.00601664") + max_val = float("0.0882787") + mean = float("0.0200064") + std = float("0.0128299") + data = None + + +class Program_weight_tensor_parameter_133: + name = "parameter_133" + shape = [96] + dtype = "float32" + min_val = float("-0.108405") + max_val = float("0.0731608") + mean = float("0.0090796") + std = float("0.0334477") + data = None + + +class Program_weight_tensor_parameter_134: + name = "parameter_134" + shape = [96, 448, 1, 1] + dtype = "float32" + min_val = float("-0.13482") + max_val = float("0.159342") + mean = float("-0.000411627") + std = float("0.010511") + data = None + + +class Program_weight_tensor_parameter_135: + name = "parameter_135" + shape = [192] + dtype = "float32" + min_val = float("-0.428995") + max_val = float("-0.00797578") + mean = float("-0.188467") + std = float("0.0765575") + data = None + + +class Program_weight_tensor_parameter_136: + name = "parameter_136" + shape = [192] + dtype = "float32" + min_val = float("0.677275") + max_val = float("1.20867") + mean = float("0.867383") + std = float("0.078958") + data = None + + +class Program_weight_tensor_parameter_137: + name = "parameter_137" + shape = [192] + dtype = "float32" + min_val = float("0.00937355") + max_val = float("0.0733965") + mean = float("0.0263643") + std = float("0.0137075") + data = None + + +class Program_weight_tensor_parameter_138: + name = "parameter_138" + shape = [192] + dtype = "float32" + min_val = float("-0.132564") + max_val = float("0.0683199") + mean = float("-0.0432793") + std = float("0.0351238") + data = None + + +class Program_weight_tensor_parameter_139: + name = "parameter_139" + shape = [192, 384, 1, 1] + dtype = "float32" + min_val = float("-0.0860279") + max_val = float("0.0677437") + mean = float("-0.00080263") + std = float("0.00868338") + data = None + + +class Program_weight_tensor_parameter_140: + name = "parameter_140" + shape = [384] + dtype = "float32" + min_val = float("-0.34678") + max_val = float("0.206495") + mean = float("-0.121695") + std = float("0.0631507") + data = None + + +class Program_weight_tensor_parameter_141: + name = "parameter_141" + shape = [384] + dtype = "float32" + min_val = float("0.85173") + max_val = float("1.38851") + mean = float("1.03319") + std = float("0.0688269") + data = None + + +class Program_weight_tensor_parameter_142: + name = "parameter_142" + shape = [384] + dtype = "float32" + min_val = float("0.0064039") + max_val = float("0.0645945") + mean = float("0.0152945") + std = float("0.00674652") + data = None + + +class Program_weight_tensor_parameter_143: + name = "parameter_143" + shape = [384] + dtype = "float32" + min_val = float("-0.140946") + max_val = float("0.0817556") + mean = float("-0.0337464") + std = float("0.0351534") + data = None + + +class Program_weight_tensor_parameter_144: + name = "parameter_144" + shape = [384, 384, 1, 1] + dtype = "float32" + min_val = float("-0.12987") + max_val = float("0.122136") + mean = float("-0.000529031") + std = float("0.00780451") + data = None + + +class Program_weight_tensor_parameter_145: + name = "parameter_145" + shape = [192] + dtype = "float32" + min_val = float("-0.341339") + max_val = float("0.170617") + mean = float("-0.0915373") + std = float("0.0828769") + data = None + + +class Program_weight_tensor_parameter_146: + name = "parameter_146" + shape = [192] + dtype = "float32" + min_val = float("0.847019") + max_val = float("1.44354") + mean = float("1.04166") + std = float("0.11176") + data = None + + +class Program_weight_tensor_parameter_147: + name = "parameter_147" + shape = [192] + dtype = "float32" + min_val = float("0.0589707") + max_val = float("0.788044") + mean = float("0.209067") + std = float("0.100566") + data = None + + +class Program_weight_tensor_parameter_148: + name = "parameter_148" + shape = [192] + dtype = "float32" + min_val = float("-1.61611") + max_val = float("1.11881") + mean = float("-0.174358") + std = float("0.522036") + data = None + + +class Program_weight_tensor_parameter_149: + name = "parameter_149" + shape = [192, 768, 1, 1] + dtype = "float32" + min_val = float("-0.114669") + max_val = float("0.119097") + mean = float("-7.90388e-05") + std = float("0.00682974") + data = None + + +class Program_weight_tensor_parameter_150: + name = "parameter_150" + shape = [192] + dtype = "float32" + min_val = float("-0.0804559") + max_val = float("0.148319") + mean = float("0.0130393") + std = float("0.0391702") + data = None + + +class Program_weight_tensor_parameter_151: + name = "parameter_151" + shape = [192] + dtype = "float32" + min_val = float("0.807653") + max_val = float("0.999034") + mean = float("0.918023") + std = float("0.0323846") + data = None + + +class Program_weight_tensor_parameter_152: + name = "parameter_152" + shape = [192] + dtype = "float32" + min_val = float("0.00516858") + max_val = float("0.0283583") + mean = float("0.011637") + std = float("0.0033925") + data = None + + +class Program_weight_tensor_parameter_153: + name = "parameter_153" + shape = [192] + dtype = "float32" + min_val = float("-0.109864") + max_val = float("0.0562016") + mean = float("-0.051433") + std = float("0.0374144") + data = None + + +class Program_weight_tensor_parameter_154: + name = "parameter_154" + shape = [192, 192, 1, 1] + dtype = "float32" + min_val = float("-0.0362525") + max_val = float("0.0477394") + mean = float("-0.00128") + std = float("0.00569633") + data = None + + +class Program_weight_tensor_parameter_155: + name = "parameter_155" + shape = [192] + dtype = "float32" + min_val = float("-0.0804559") + max_val = float("0.148319") + mean = float("0.0130393") + std = float("0.0391702") + data = None + + +class Program_weight_tensor_parameter_156: + name = "parameter_156" + shape = [192] + dtype = "float32" + min_val = float("0.898348") + max_val = float("1.29981") + mean = float("1.06513") + std = float("0.0769408") + data = None + + +class Program_weight_tensor_parameter_157: + name = "parameter_157" + shape = [192] + dtype = "float32" + min_val = float("0.0238668") + max_val = float("0.130974") + mean = float("0.0550805") + std = float("0.0200109") + data = None + + +class Program_weight_tensor_parameter_158: + name = "parameter_158" + shape = [192] + dtype = "float32" + min_val = float("-0.338547") + max_val = float("0.117307") + mean = float("-0.15839") + std = float("0.0724063") + data = None + + +class Program_weight_tensor_parameter_159: + name = "parameter_159" + shape = [192, 192, 3, 3] + dtype = "float32" + min_val = float("-0.0682399") + max_val = float("0.0632157") + mean = float("-0.000524681") + std = float("0.00430292") + data = None + + +class Program_weight_tensor_parameter_160: + name = "parameter_160" + shape = [192] + dtype = "float32" + min_val = float("-0.283115") + max_val = float("0.167559") + mean = float("-0.0797537") + std = float("0.0645401") + data = None + + +class Program_weight_tensor_parameter_161: + name = "parameter_161" + shape = [192] + dtype = "float32" + min_val = float("0.866579") + max_val = float("1.43762") + mean = float("1.07207") + std = float("0.10405") + data = None + + +class Program_weight_tensor_parameter_162: + name = "parameter_162" + shape = [192] + dtype = "float32" + min_val = float("0.0281452") + max_val = float("0.156984") + mean = float("0.0618238") + std = float("0.0200347") + data = None + + +class Program_weight_tensor_parameter_163: + name = "parameter_163" + shape = [192] + dtype = "float32" + min_val = float("-0.464178") + max_val = float("0.165434") + mean = float("-0.11328") + std = float("0.0836054") + data = None + + +class Program_weight_tensor_parameter_164: + name = "parameter_164" + shape = [192, 192, 3, 3] + dtype = "float32" + min_val = float("-0.0784709") + max_val = float("0.0511441") + mean = float("-0.000396257") + std = float("0.00462021") + data = None + + +class Program_weight_tensor_parameter_165: + name = "parameter_165" + shape = [192] + dtype = "float32" + min_val = float("-0.309798") + max_val = float("0.254723") + mean = float("-0.0932638") + std = float("0.0859359") + data = None + + +class Program_weight_tensor_parameter_166: + name = "parameter_166" + shape = [192] + dtype = "float32" + min_val = float("0.917672") + max_val = float("1.34888") + mean = float("1.06023") + std = float("0.0660289") + data = None + + +class Program_weight_tensor_parameter_167: + name = "parameter_167" + shape = [192] + dtype = "float32" + min_val = float("0.0152609") + max_val = float("0.0612552") + mean = float("0.0302613") + std = float("0.00813544") + data = None + + +class Program_weight_tensor_parameter_168: + name = "parameter_168" + shape = [192] + dtype = "float32" + min_val = float("-0.209367") + max_val = float("0.212695") + mean = float("-0.0124058") + std = float("0.0774831") + data = None + + +class Program_weight_tensor_parameter_169: + name = "parameter_169" + shape = [192, 512, 1, 1] + dtype = "float32" + min_val = float("-0.111587") + max_val = float("0.0992661") + mean = float("-0.000756506") + std = float("0.00802745") + data = None + + +class Program_weight_tensor_parameter_170: + name = "parameter_170" + shape = [192] + dtype = "float32" + min_val = float("-0.12447") + max_val = float("0.0376972") + mean = float("-0.0214629") + std = float("0.0237295") + data = None + + +class Program_weight_tensor_parameter_171: + name = "parameter_171" + shape = [192] + dtype = "float32" + min_val = float("0.874474") + max_val = float("1.17984") + mean = float("0.966639") + std = float("0.04306") + data = None + + +class Program_weight_tensor_parameter_172: + name = "parameter_172" + shape = [192] + dtype = "float32" + min_val = float("0.0106412") + max_val = float("0.0454647") + mean = float("0.0197839") + std = float("0.00486231") + data = None + + +class Program_weight_tensor_parameter_173: + name = "parameter_173" + shape = [192] + dtype = "float32" + min_val = float("-0.18243") + max_val = float("0.128089") + mean = float("-0.00727717") + std = float("0.0570606") + data = None + + +class Program_weight_tensor_parameter_174: + name = "parameter_174" + shape = [192, 512, 1, 1] + dtype = "float32" + min_val = float("-0.0353564") + max_val = float("0.0647642") + mean = float("-0.000560276") + std = float("0.00645681") + data = None + + +class Program_weight_tensor_parameter_175: + name = "parameter_175" + shape = [512] + dtype = "float32" + min_val = float("-4.43684e-10") + max_val = float("5.61231e-10") + mean = float("-1.56352e-11") + std = float("1.50613e-10") + data = None + + +class Program_weight_tensor_parameter_176: + name = "parameter_176" + shape = [512] + dtype = "float32" + min_val = float("0.777531") + max_val = float("0.797318") + mean = float("0.791027") + std = float("0.00109601") + data = None + + +class Program_weight_tensor_parameter_177: + name = "parameter_177" + shape = [512] + dtype = "float32" + min_val = float("-0.0178149") + max_val = float("0.0177451") + mean = float("0.000116742") + std = float("0.0100297") + data = None + + +class Program_weight_tensor_parameter_178: + name = "parameter_178" + shape = [2048, 512] + dtype = "float32" + min_val = float("-0.0183338") + max_val = float("0.0184838") + mean = float("8.09536e-06") + std = float("0.0100893") + data = None + + +class Program_weight_tensor_parameter_179: + name = "parameter_179" + shape = [2048] + dtype = "float32" + min_val = float("-0.0350612") + max_val = float("0.0350388") + mean = float("0.000306451") + std = float("0.0203862") + data = None + + +class Program_weight_tensor_parameter_180: + name = "parameter_180" + shape = [512, 2048] + dtype = "float32" + min_val = float("-0.0353432") + max_val = float("0.035415") + mean = float("-2.91299e-05") + std = float("0.0201965") + data = None + + +class Program_weight_tensor_parameter_181: + name = "parameter_181" + shape = [512] + dtype = "float32" + min_val = float("-0.00108203") + max_val = float("0.00177371") + mean = float("-4.44406e-06") + std = float("0.00028086") + data = None + + +class Program_weight_tensor_parameter_182: + name = "parameter_182" + shape = [512] + dtype = "float32" + min_val = float("0.787457") + max_val = float("0.796241") + mean = float("0.791032") + std = float("0.00095207") + data = None + + +class Program_weight_tensor_parameter_183: + name = "parameter_183" + shape = [512] + dtype = "float32" + min_val = float("-0.000754274") + max_val = float("0.00115085") + mean = float("9.31287e-06") + std = float("0.000259457") + data = None + + +class Program_weight_tensor_parameter_184: + name = "parameter_184" + shape = [512, 512] + dtype = "float32" + min_val = float("-0.0612193") + max_val = float("0.0610019") + mean = float("-6.94994e-05") + std = float("0.0349274") + data = None + + +class Program_weight_tensor_parameter_185: + name = "parameter_185" + shape = [512] + dtype = "float32" + min_val = float("-0.000854187") + max_val = float("0.00156477") + mean = float("2.46445e-05") + std = float("0.000279737") + data = None + + +class Program_weight_tensor_parameter_186: + name = "parameter_186" + shape = [512] + dtype = "float32" + min_val = float("0.787903") + max_val = float("0.796151") + mean = float("0.791074") + std = float("0.000888105") + data = None + + +class Program_weight_tensor_parameter_187: + name = "parameter_187" + shape = [512] + dtype = "float32" + min_val = float("-0.018138") + max_val = float("0.0178488") + mean = float("0.000107374") + std = float("0.0100307") + data = None + + +class Program_weight_tensor_parameter_188: + name = "parameter_188" + shape = [2048, 512] + dtype = "float32" + min_val = float("-0.0182018") + max_val = float("0.0184007") + mean = float("7.2759e-06") + std = float("0.0100895") + data = None + + +class Program_weight_tensor_parameter_189: + name = "parameter_189" + shape = [2048] + dtype = "float32" + min_val = float("-0.0349371") + max_val = float("0.035034") + mean = float("0.00031173") + std = float("0.0203865") + data = None + + +class Program_weight_tensor_parameter_190: + name = "parameter_190" + shape = [512, 2048] + dtype = "float32" + min_val = float("-0.0352741") + max_val = float("0.035474") + mean = float("-2.91298e-05") + std = float("0.0201966") + data = None + + +class Program_weight_tensor_parameter_191: + name = "parameter_191" + shape = [512] + dtype = "float32" + min_val = float("-0.000903085") + max_val = float("0.00140943") + mean = float("-6.14297e-06") + std = float("0.00028349") + data = None + + +class Program_weight_tensor_parameter_192: + name = "parameter_192" + shape = [512] + dtype = "float32" + min_val = float("0.787892") + max_val = float("0.795524") + mean = float("0.791031") + std = float("0.000860907") + data = None + + +class Program_weight_tensor_parameter_193: + name = "parameter_193" + shape = [512] + dtype = "float32" + min_val = float("-0.000858256") + max_val = float("0.000858117") + mean = float("1.52034e-05") + std = float("0.000263698") + data = None + + +class Program_weight_tensor_parameter_194: + name = "parameter_194" + shape = [512, 512] + dtype = "float32" + min_val = float("-0.0609822") + max_val = float("0.0610064") + mean = float("-6.95223e-05") + std = float("0.0349277") + data = None + + +class Program_weight_tensor_parameter_195: + name = "parameter_195" + shape = [512] + dtype = "float32" + min_val = float("-0.00117359") + max_val = float("0.00132905") + mean = float("3.0159e-05") + std = float("0.000365555") + data = None + + +class Program_weight_tensor_parameter_196: + name = "parameter_196" + shape = [512] + dtype = "float32" + min_val = float("0.788455") + max_val = float("0.795484") + mean = float("0.791109") + std = float("0.000832725") + data = None + + +class Program_weight_tensor_parameter_197: + name = "parameter_197" + shape = [512] + dtype = "float32" + min_val = float("-0.018283") + max_val = float("0.0174813") + mean = float("0.000126803") + std = float("0.0100439") + data = None + + +class Program_weight_tensor_parameter_198: + name = "parameter_198" + shape = [2048, 512] + dtype = "float32" + min_val = float("-0.0182138") + max_val = float("0.0186291") + mean = float("8.65347e-06") + std = float("0.0100897") + data = None + + +class Program_weight_tensor_parameter_199: + name = "parameter_199" + shape = [2048] + dtype = "float32" + min_val = float("-0.0349256") + max_val = float("0.0350039") + mean = float("0.000314623") + std = float("0.0203859") + data = None + + +class Program_weight_tensor_parameter_200: + name = "parameter_200" + shape = [512, 2048] + dtype = "float32" + min_val = float("-0.0353219") + max_val = float("0.0358783") + mean = float("-2.91298e-05") + std = float("0.0201967") + data = None + + +class Program_weight_tensor_parameter_201: + name = "parameter_201" + shape = [512] + dtype = "float32" + min_val = float("-0.0012677") + max_val = float("0.00134733") + mean = float("-8.64835e-06") + std = float("0.000377654") + data = None + + +class Program_weight_tensor_parameter_202: + name = "parameter_202" + shape = [512] + dtype = "float32" + min_val = float("0.787729") + max_val = float("0.795337") + mean = float("0.791029") + std = float("0.000824634") + data = None + + +class Program_weight_tensor_parameter_203: + name = "parameter_203" + shape = [512] + dtype = "float32" + min_val = float("-0.00109096") + max_val = float("0.00108049") + mean = float("2.34987e-06") + std = float("0.000354852") + data = None + + +class Program_weight_tensor_parameter_204: + name = "parameter_204" + shape = [512, 512] + dtype = "float32" + min_val = float("-0.0611516") + max_val = float("0.0613365") + mean = float("-6.96102e-05") + std = float("0.0349284") + data = None + + +class Program_weight_tensor_parameter_205: + name = "parameter_205" + shape = [512] + dtype = "float32" + min_val = float("-0.00181093") + max_val = float("0.00243437") + mean = float("3.10452e-05") + std = float("0.000582361") + data = None + + +class Program_weight_tensor_parameter_206: + name = "parameter_206" + shape = [512] + dtype = "float32" + min_val = float("0.787859") + max_val = float("0.795035") + mean = float("0.79115") + std = float("0.000823911") + data = None + + +class Program_weight_tensor_parameter_207: + name = "parameter_207" + shape = [512] + dtype = "float32" + min_val = float("-0.018331") + max_val = float("0.0183401") + mean = float("0.000118541") + std = float("0.0100681") + data = None + + +class Program_weight_tensor_parameter_208: + name = "parameter_208" + shape = [2048, 512] + dtype = "float32" + min_val = float("-0.0182466") + max_val = float("0.0185713") + mean = float("8.27531e-06") + std = float("0.0100902") + data = None + + +class Program_weight_tensor_parameter_209: + name = "parameter_209" + shape = [2048] + dtype = "float32" + min_val = float("-0.0349322") + max_val = float("0.0349492") + mean = float("0.000317605") + std = float("0.0203846") + data = None + + +class Program_weight_tensor_parameter_210: + name = "parameter_210" + shape = [512, 2048] + dtype = "float32" + min_val = float("-0.0353813") + max_val = float("0.0364302") + mean = float("-2.91295e-05") + std = float("0.0201967") + data = None + + +class Program_weight_tensor_parameter_211: + name = "parameter_211" + shape = [512] + dtype = "float32" + min_val = float("-0.0019486") + max_val = float("0.00251071") + mean = float("-3.89145e-06") + std = float("0.000606758") + data = None + + +class Program_weight_tensor_parameter_212: + name = "parameter_212" + shape = [512] + dtype = "float32" + min_val = float("0.786434") + max_val = float("0.795238") + mean = float("0.791028") + std = float("0.000834391") + data = None + + +class Program_weight_tensor_parameter_213: + name = "parameter_213" + shape = [512] + dtype = "float32" + min_val = float("-0.00303392") + max_val = float("0.0052062") + mean = float("5.5219e-06") + std = float("0.00120578") + data = None + + +class Program_weight_tensor_parameter_214: + name = "parameter_214" + shape = [512, 512] + dtype = "float32" + min_val = float("-0.0615593") + max_val = float("0.0620012") + mean = float("-6.96735e-05") + std = float("0.0349305") + data = None diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-S/subgraph_1/graph_hash.txt b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-S/subgraph_1/graph_hash.txt new file mode 100644 index 000000000..67b2ea2b5 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-S/subgraph_1/graph_hash.txt @@ -0,0 +1 @@ +1b6cda785ee348e006a4ad4adf240bb805da997f66d069af736c20f1756774c5 \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-S/subgraph_1/graph_net.json b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-S/subgraph_1/graph_net.json new file mode 100644 index 000000000..6ce3cf9a5 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-S/subgraph_1/graph_net.json @@ -0,0 +1,6 @@ +{ + "framework": "paddle", + "model_name": "PP-YOLOE_plus_SOD-S", + "num_devices_required": 1, + "num_nodes_required": 1 +} \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-S/subgraph_1/input_meta.py b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-S/subgraph_1/input_meta.py new file mode 100644 index 000000000..0b2b2b7c3 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-S/subgraph_1/input_meta.py @@ -0,0 +1,89 @@ +class Program_weight_tensor_data_0: + name = "data_0" + shape = [2, 13, 6069] + dtype = "float32" + max_val = float("1.0") + mean = float("0.000741473") + std = float("0.0272199") + data = None + + +class Program_weight_tensor_data_1: + name = "data_1" + shape = [2, 1] + dtype = "int32" + data = [0, 1] + + +class Program_weight_tensor_data_2: + name = "data_2" + shape = [2, 13, 1] + dtype = "int32" + data = [ + 3, + 3, + 3, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 3, + 3, + 3, + 3, + 0, + 3, + 3, + 3, + 3, + 3, + 3, + 3, + 3, + ] + + +class Program_weight_tensor_data_3: + name = "data_3" + shape = [2, 6069] + dtype = "float32" + max_val = float("1.0") + mean = float("0.00963915") + std = float("0.0977048") + data = None + + +class Program_weight_tensor_data_4: + name = "data_4" + shape = [2, 13, 4] + dtype = "float32" + max_val = float("544.0") + mean = float("152.282") + std = float("162.227") + data = None + + +class Program_weight_tensor_data_5: + name = "data_5" + shape = [2, 13, 6069] + dtype = "float32" + max_val = float("0.0674893") + mean = float("2.9226e-06") + std = float("0.000269731") + data = None + + +class Program_weight_tensor_data_6: + name = "data_6" + shape = [2, 13, 6069] + dtype = "float32" + max_val = float("0.902031") + mean = float("0.000925564") + std = float("0.0166036") + data = None diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-S/subgraph_1/model.py b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-S/subgraph_1/model.py new file mode 100644 index 000000000..0c1739521 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-S/subgraph_1/model.py @@ -0,0 +1,192 @@ +import paddle + + +class GraphModule(paddle.nn.Layer): + def __init__(self): + super().__init__() + + def forward(self, data_0, data_1, data_2, data_3, data_4, data_5, data_6): + # pd_op.full: (1xi64) <- () + full_0 = paddle._C_ops.full( + [1], float("-2"), paddle.int64, paddle.core.CPUPlace() + ) + + # pd_op.argmax: (2x6069xi64) <- (2x13x6069xf32, 1xi64) + argmax_0 = paddle._C_ops.argmax(data_0, full_0, False, False, paddle.int64) + del full_0 + + # pd_op.full: (1xf32) <- () + full_1 = paddle._C_ops.full( + [1], float("13"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.scale: (2x1xi32) <- (2x1xi32, 1xf32) + scale_0 = paddle._C_ops.scale(data_1, full_1, float("0"), True) + del data_1, full_1 + + # pd_op.cast: (2x1xi64) <- (2x1xi32) + cast_0 = paddle._C_ops.cast(scale_0, paddle.int64) + del scale_0 + + # pd_op.add: (2x6069xi64) <- (2x6069xi64, 2x1xi64) + add_0 = paddle._C_ops.add(argmax_0, cast_0) + del argmax_0, cast_0 + + # pd_op.flatten: (26xi32) <- (2x13x1xi32) + flatten_0 = paddle._C_ops.flatten(data_2, 0, 2) + del data_2 + + # pd_op.flatten: (12138xi64) <- (2x6069xi64) + flatten_1 = paddle._C_ops.flatten(add_0, 0, 1) + del add_0 + + # pd_op.full: (1xi32) <- () + full_2 = paddle._C_ops.full( + [1], float("0"), paddle.int32, paddle.core.CPUPlace() + ) + + # pd_op.gather: (12138xi32) <- (26xi32, 12138xi64, 1xi32) + gather_0 = paddle._C_ops.gather(flatten_0, flatten_1, full_2) + del flatten_0 + + # pd_op.full_int_array: (2xi64) <- () + full_int_array_0 = [2, 6069] + + # pd_op.reshape: (2x6069xi32) <- (12138xi32, 2xi64) + reshape_1 = paddle._C_ops.reshape(gather_0, full_int_array_0) + del full_int_array_0, gather_0 + + # pd_op.full: (xf32) <- () + full_3 = paddle._C_ops.full( + [], float("0"), paddle.float32, paddle.framework._current_expected_place() + ) + + # pd_op.greater_than: (2x6069xb) <- (2x6069xf32, xf32) + greater_than_0 = paddle._C_ops.greater_than(data_3, full_3) + del data_3, full_3 + + # pd_op.full: (1xf32) <- () + full_4 = paddle._C_ops.full( + [1], float("10"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.full_like: (2x6069xi32) <- (2x6069xi32, 1xf32) + full_like_0 = paddle._C_ops.full_like( + reshape_1, full_4, paddle.int32, paddle.framework._current_expected_place() + ) + del full_4 + + # pd_op.where: (2x6069xi32) <- (2x6069xb, 2x6069xi32, 2x6069xi32) + where_0 = paddle._C_ops.where(greater_than_0, reshape_1, full_like_0) + del full_like_0, greater_than_0, reshape_1 + + # pd_op.full_int_array: (2xi64) <- () + full_int_array_1 = [-1, 4] + + # pd_op.reshape: (26x4xf32) <- (2x13x4xf32, 2xi64) + reshape_2 = paddle._C_ops.reshape(data_4, full_int_array_1) + del data_4, full_int_array_1 + + # pd_op.gather: (12138x4xf32) <- (26x4xf32, 12138xi64, 1xi32) + gather_1 = paddle._C_ops.gather(reshape_2, flatten_1, full_2) + del flatten_1, full_2, reshape_2 + + # pd_op.full_int_array: (3xi64) <- () + full_int_array_2 = [2, 6069, 4] + + # pd_op.reshape: (2x6069x4xf32) <- (12138x4xf32, 3xi64) + reshape_0 = paddle._C_ops.reshape(gather_1, full_int_array_2) + del full_int_array_2, gather_1 + + # pd_op.full: (1xi32) <- () + full_5 = paddle._C_ops.full( + [1], float("11"), paddle.int32, paddle.core.CPUPlace() + ) + + # pd_op.one_hot: (2x6069x11xf32) <- (2x6069xi32, 1xi32) + one_hot_0 = paddle._C_ops.one_hot( + where_0 % paddle.cast(full_5, where_0.dtype), full_5 + ) + del full_5 + + # pd_op.full: (10xi64) <- () + full_6 = paddle._C_ops.full( + [10], float("0"), paddle.int64, paddle.framework._current_expected_place() + ) + + # pd_op.assign_value_: (10xi64) <- (10xi64) + assign_value__0 = paddle._C_ops.assign_value_( + full_6, + [10], + paddle.int64, + [ + float("0"), + float("1"), + float("2"), + float("3"), + float("4"), + float("5"), + float("6"), + float("7"), + float("8"), + float("9"), + ], + paddle.framework._current_expected_place(), + ) + del full_6 + + # pd_op.index_select: (2x6069x10xf32) <- (2x6069x11xf32, 10xi64) + index_select_0 = paddle._C_ops.index_select(one_hot_0, assign_value__0, -1) + del assign_value__0, one_hot_0 + + # pd_op.multiply: (2x13x6069xf32) <- (2x13x6069xf32, 2x13x6069xf32) + multiply_1 = paddle._C_ops.multiply(data_5, data_0) + del data_5 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_3 = [-1] + + # pd_op.max: (2x13x1xf32) <- (2x13x6069xf32, 1xi64) + max_0 = paddle._C_ops.max(multiply_1, full_int_array_3, True) + + # pd_op.multiply: (2x13x6069xf32) <- (2x13x6069xf32, 2x13x6069xf32) + multiply_2 = paddle._C_ops.multiply(data_6, data_0) + del data_0, data_6 + + # pd_op.max: (2x13x1xf32) <- (2x13x6069xf32, 1xi64) + max_1 = paddle._C_ops.max(multiply_2, full_int_array_3, True) + del multiply_2 + + # pd_op.full: (1xf32) <- () + full_7 = paddle._C_ops.full( + [1], float("1"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.scale: (2x13x1xf32) <- (2x13x1xf32, 1xf32) + scale_1 = paddle._C_ops.scale(max_0, full_7, float("1e-09"), True) + del full_7, max_0 + + # pd_op.divide: (2x13x6069xf32) <- (2x13x6069xf32, 2x13x1xf32) + divide_0 = paddle._C_ops.divide(multiply_1, scale_1) + del multiply_1, scale_1 + + # pd_op.multiply: (2x13x6069xf32) <- (2x13x6069xf32, 2x13x1xf32) + multiply_3 = paddle._C_ops.multiply(divide_0, max_1) + del divide_0, max_1 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_4 = [-2] + + # pd_op.max: (2x6069xf32) <- (2x13x6069xf32, 1xi64) + max_2 = paddle._C_ops.max(multiply_3, full_int_array_4, False) + del full_int_array_4, multiply_3 + + # pd_op.unsqueeze: (2x6069x1xf32) <- (2x6069xf32, 1xi64) + unsqueeze_0 = paddle._C_ops.unsqueeze(max_2, full_int_array_3) + del full_int_array_3, max_2 + + # pd_op.multiply: (2x6069x10xf32) <- (2x6069x10xf32, 2x6069x1xf32) + multiply_0 = paddle._C_ops.multiply(index_select_0, unsqueeze_0) + del index_select_0, unsqueeze_0, where_0 + + return reshape_0, multiply_0 diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-S/subgraph_1/weight_meta.py b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-S/subgraph_1/weight_meta.py new file mode 100644 index 000000000..8b1378917 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-S/subgraph_1/weight_meta.py @@ -0,0 +1 @@ + diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-S/subgraph_10/graph_hash.txt b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-S/subgraph_10/graph_hash.txt new file mode 100644 index 000000000..550f78f19 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-S/subgraph_10/graph_hash.txt @@ -0,0 +1 @@ +602c7b60afd4d84c2495c136df21f6a27d2065e01dded183accc3ee4e4ff58af \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-S/subgraph_10/graph_net.json b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-S/subgraph_10/graph_net.json new file mode 100644 index 000000000..6ce3cf9a5 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-S/subgraph_10/graph_net.json @@ -0,0 +1,6 @@ +{ + "framework": "paddle", + "model_name": "PP-YOLOE_plus_SOD-S", + "num_devices_required": 1, + "num_nodes_required": 1 +} \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-S/subgraph_10/input_meta.py b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-S/subgraph_10/input_meta.py new file mode 100644 index 000000000..6b1fa8f00 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-S/subgraph_10/input_meta.py @@ -0,0 +1,50 @@ +class Program_weight_tensor_data_0: + name = "data_0" + shape = [1] + dtype = "float32" + data = [0.814948] + + +class Program_weight_tensor_data_1: + name = "data_1" + shape = [1] + dtype = "float32" + data = [0.685804] + + +class Program_weight_tensor_data_2: + name = "data_2" + shape = [1] + dtype = "float32" + data = [0.706408] + + +class Program_weight_tensor_data_3: + name = "data_3" + shape = [1] + dtype = "float32" + data = [0.690983] + + +class Program_weight_tensor_data_4: + name = "data_4" + shape = [1] + dtype = "float32" + data = [0.749639] + + +class Program_weight_tensor_data_5: + name = "data_5" + shape = [1] + dtype = "float32" + data = [0.8096] + + +class Program_weight_tensor_data_6: + name = "data_6" + shape = [2, 3, 480, 480] + dtype = "float32" + max_val = float("1.0") + mean = float("0.316462") + std = float("0.193005") + data = None diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-S/subgraph_10/model.py b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-S/subgraph_10/model.py new file mode 100644 index 000000000..1c6a2562f --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-S/subgraph_10/model.py @@ -0,0 +1,2120 @@ +import paddle + + +class GraphModule(paddle.nn.Layer): + def __init__(self): + super().__init__() + + def forward( + self, + parameter_0, + parameter_1, + parameter_2, + parameter_3, + parameter_4, + parameter_5, + parameter_6, + parameter_7, + parameter_8, + parameter_9, + parameter_10, + parameter_11, + parameter_12, + parameter_13, + parameter_14, + parameter_15, + parameter_16, + parameter_17, + parameter_18, + parameter_19, + parameter_20, + parameter_21, + parameter_22, + parameter_23, + parameter_24, + parameter_25, + parameter_26, + parameter_27, + parameter_28, + parameter_29, + parameter_30, + parameter_31, + parameter_32, + parameter_33, + parameter_34, + parameter_35, + parameter_36, + parameter_37, + parameter_38, + parameter_39, + parameter_40, + parameter_41, + parameter_42, + parameter_43, + parameter_44, + parameter_45, + parameter_46, + parameter_47, + parameter_48, + parameter_49, + parameter_50, + parameter_51, + parameter_52, + parameter_53, + parameter_54, + parameter_55, + parameter_56, + parameter_57, + parameter_58, + parameter_59, + parameter_60, + parameter_61, + parameter_62, + parameter_63, + parameter_64, + parameter_65, + parameter_66, + parameter_67, + parameter_68, + parameter_69, + parameter_70, + parameter_71, + parameter_72, + parameter_73, + parameter_74, + parameter_75, + parameter_76, + parameter_77, + parameter_78, + parameter_79, + parameter_80, + parameter_81, + parameter_82, + parameter_83, + parameter_84, + parameter_85, + parameter_86, + parameter_87, + parameter_88, + parameter_89, + parameter_90, + parameter_91, + parameter_92, + parameter_93, + parameter_94, + parameter_95, + parameter_96, + parameter_97, + parameter_98, + parameter_99, + parameter_100, + parameter_101, + parameter_102, + parameter_103, + parameter_104, + parameter_105, + parameter_106, + parameter_107, + parameter_108, + parameter_109, + parameter_110, + parameter_111, + parameter_112, + parameter_113, + parameter_114, + parameter_115, + parameter_116, + parameter_117, + parameter_118, + parameter_119, + parameter_120, + parameter_121, + parameter_122, + parameter_123, + parameter_124, + parameter_125, + parameter_126, + parameter_127, + parameter_128, + parameter_129, + parameter_130, + parameter_131, + parameter_132, + parameter_133, + parameter_134, + parameter_135, + parameter_136, + parameter_137, + parameter_138, + parameter_139, + parameter_140, + parameter_141, + parameter_142, + parameter_143, + parameter_144, + parameter_145, + parameter_146, + parameter_147, + parameter_148, + parameter_149, + parameter_150, + parameter_151, + parameter_152, + parameter_153, + parameter_154, + parameter_155, + parameter_156, + parameter_157, + parameter_158, + parameter_159, + parameter_160, + parameter_161, + parameter_162, + parameter_163, + parameter_164, + parameter_165, + parameter_166, + parameter_167, + parameter_168, + parameter_169, + parameter_170, + parameter_171, + parameter_172, + parameter_173, + parameter_174, + parameter_175, + parameter_176, + parameter_177, + parameter_178, + parameter_179, + parameter_180, + parameter_181, + parameter_182, + parameter_183, + parameter_184, + parameter_185, + parameter_186, + parameter_187, + parameter_188, + parameter_189, + parameter_190, + parameter_191, + parameter_192, + data_0, + data_1, + data_2, + data_3, + data_4, + data_5, + data_6, + ): + # pd_op.conv2d: (2x16x-1x-1xf32) <- (2x3x-1x-1xf32, 16x3x3x3xf32) + conv2d_0 = paddle._C_ops.conv2d( + data_6, parameter_192, [2, 2], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del data_6, parameter_192 + + # pd_op.batch_norm_: (2x16x-1x-1xf32, 16xf32, 16xf32, 16xf32, 16xf32, -1xui8) <- (2x16x-1x-1xf32, 16xf32, 16xf32, 16xf32, 16xf32) + ( + batch_norm__0, + batch_norm__1, + batch_norm__2, + batch_norm__3, + batch_norm__4, + batch_norm__5, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_0, + parameter_191, + parameter_190, + parameter_189, + parameter_188, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_188, parameter_189, parameter_190, parameter_191 + + # pd_op.swish: (2x16x-1x-1xf32) <- (2x16x-1x-1xf32) + swish_1 = paddle._C_ops.swish(batch_norm__0) + + # pd_op.conv2d: (2x16x-1x-1xf32) <- (2x16x-1x-1xf32, 16x16x3x3xf32) + conv2d_1 = paddle._C_ops.conv2d( + swish_1, parameter_187, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_187 + + # pd_op.batch_norm_: (2x16x-1x-1xf32, 16xf32, 16xf32, 16xf32, 16xf32, -1xui8) <- (2x16x-1x-1xf32, 16xf32, 16xf32, 16xf32, 16xf32) + ( + batch_norm__6, + batch_norm__7, + batch_norm__8, + batch_norm__9, + batch_norm__10, + batch_norm__11, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_1, + parameter_186, + parameter_185, + parameter_184, + parameter_183, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_183, parameter_184, parameter_185, parameter_186 + + # pd_op.swish: (2x16x-1x-1xf32) <- (2x16x-1x-1xf32) + swish_2 = paddle._C_ops.swish(batch_norm__6) + + # pd_op.conv2d: (2x32x-1x-1xf32) <- (2x16x-1x-1xf32, 32x16x3x3xf32) + conv2d_2 = paddle._C_ops.conv2d( + swish_2, parameter_182, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_182 + + # pd_op.batch_norm_: (2x32x-1x-1xf32, 32xf32, 32xf32, 32xf32, 32xf32, -1xui8) <- (2x32x-1x-1xf32, 32xf32, 32xf32, 32xf32, 32xf32) + ( + batch_norm__12, + batch_norm__13, + batch_norm__14, + batch_norm__15, + batch_norm__16, + batch_norm__17, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_2, + parameter_181, + parameter_180, + parameter_179, + parameter_178, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_178, parameter_179, parameter_180, parameter_181 + + # pd_op.swish: (2x32x-1x-1xf32) <- (2x32x-1x-1xf32) + swish_3 = paddle._C_ops.swish(batch_norm__12) + + # pd_op.conv2d: (2x48x-1x-1xf32) <- (2x32x-1x-1xf32, 48x32x3x3xf32) + conv2d_3 = paddle._C_ops.conv2d( + swish_3, parameter_177, [2, 2], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_177 + + # pd_op.batch_norm_: (2x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32, -1xui8) <- (2x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32) + ( + batch_norm__18, + batch_norm__19, + batch_norm__20, + batch_norm__21, + batch_norm__22, + batch_norm__23, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_3, + parameter_176, + parameter_175, + parameter_174, + parameter_173, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_173, parameter_174, parameter_175, parameter_176 + + # pd_op.swish: (2x48x-1x-1xf32) <- (2x48x-1x-1xf32) + swish_4 = paddle._C_ops.swish(batch_norm__18) + + # pd_op.conv2d: (2x24x-1x-1xf32) <- (2x48x-1x-1xf32, 24x48x1x1xf32) + conv2d_4 = paddle._C_ops.conv2d( + swish_4, parameter_172, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_172 + + # pd_op.batch_norm_: (2x24x-1x-1xf32, 24xf32, 24xf32, 24xf32, 24xf32, -1xui8) <- (2x24x-1x-1xf32, 24xf32, 24xf32, 24xf32, 24xf32) + ( + batch_norm__24, + batch_norm__25, + batch_norm__26, + batch_norm__27, + batch_norm__28, + batch_norm__29, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_4, + parameter_171, + parameter_170, + parameter_169, + parameter_168, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_168, parameter_169, parameter_170, parameter_171 + + # pd_op.swish: (2x24x-1x-1xf32) <- (2x24x-1x-1xf32) + swish_5 = paddle._C_ops.swish(batch_norm__24) + + # pd_op.conv2d: (2x24x-1x-1xf32) <- (2x48x-1x-1xf32, 24x48x1x1xf32) + conv2d_5 = paddle._C_ops.conv2d( + swish_4, parameter_167, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_167 + + # pd_op.batch_norm_: (2x24x-1x-1xf32, 24xf32, 24xf32, 24xf32, 24xf32, -1xui8) <- (2x24x-1x-1xf32, 24xf32, 24xf32, 24xf32, 24xf32) + ( + batch_norm__30, + batch_norm__31, + batch_norm__32, + batch_norm__33, + batch_norm__34, + batch_norm__35, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_5, + parameter_166, + parameter_165, + parameter_164, + parameter_163, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_163, parameter_164, parameter_165, parameter_166 + + # pd_op.swish: (2x24x-1x-1xf32) <- (2x24x-1x-1xf32) + swish_6 = paddle._C_ops.swish(batch_norm__30) + + # pd_op.conv2d: (2x24x-1x-1xf32) <- (2x24x-1x-1xf32, 24x24x3x3xf32) + conv2d_6 = paddle._C_ops.conv2d( + swish_6, parameter_162, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_162 + + # pd_op.batch_norm_: (2x24x-1x-1xf32, 24xf32, 24xf32, 24xf32, 24xf32, -1xui8) <- (2x24x-1x-1xf32, 24xf32, 24xf32, 24xf32, 24xf32) + ( + batch_norm__36, + batch_norm__37, + batch_norm__38, + batch_norm__39, + batch_norm__40, + batch_norm__41, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_6, + parameter_161, + parameter_160, + parameter_159, + parameter_158, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_158, parameter_159, parameter_160, parameter_161 + + # pd_op.swish: (2x24x-1x-1xf32) <- (2x24x-1x-1xf32) + swish_7 = paddle._C_ops.swish(batch_norm__36) + + # pd_op.conv2d: (2x24x-1x-1xf32) <- (2x24x-1x-1xf32, 24x24x3x3xf32) + conv2d_7 = paddle._C_ops.conv2d( + swish_7, parameter_157, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_157 + + # pd_op.batch_norm_: (2x24x-1x-1xf32, 24xf32, 24xf32, 24xf32, 24xf32, -1xui8) <- (2x24x-1x-1xf32, 24xf32, 24xf32, 24xf32, 24xf32) + ( + batch_norm__42, + batch_norm__43, + batch_norm__44, + batch_norm__45, + batch_norm__46, + batch_norm__47, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_7, + parameter_156, + parameter_155, + parameter_154, + parameter_153, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_153, parameter_154, parameter_155, parameter_156 + + # pd_op.conv2d: (2x24x-1x-1xf32) <- (2x24x-1x-1xf32, 24x24x1x1xf32) + conv2d_8 = paddle._C_ops.conv2d( + swish_7, parameter_152, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_152 + + # pd_op.batch_norm_: (2x24x-1x-1xf32, 24xf32, 24xf32, 24xf32, 24xf32, -1xui8) <- (2x24x-1x-1xf32, 24xf32, 24xf32, 24xf32, 24xf32) + ( + batch_norm__48, + batch_norm__49, + batch_norm__50, + batch_norm__51, + batch_norm__52, + batch_norm__53, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_8, + parameter_151, + parameter_150, + parameter_149, + parameter_148, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_148, parameter_149, parameter_150, parameter_151 + + # pd_op.multiply: (2x24x-1x-1xf32) <- (1xf32, 2x24x-1x-1xf32) + multiply_0 = paddle._C_ops.multiply(data_0, batch_norm__48) + del data_0 + + # pd_op.add: (2x24x-1x-1xf32) <- (2x24x-1x-1xf32, 2x24x-1x-1xf32) + add_0 = paddle._C_ops.add(batch_norm__42, multiply_0) + + # pd_op.swish: (2x24x-1x-1xf32) <- (2x24x-1x-1xf32) + swish_8 = paddle._C_ops.swish(add_0) + + # pd_op.add: (2x24x-1x-1xf32) <- (2x24x-1x-1xf32, 2x24x-1x-1xf32) + add_1 = paddle._C_ops.add(swish_6, swish_8) + + # pd_op.full: (1xi32) <- () + full_0 = paddle._C_ops.full( + [1], float("1"), paddle.int32, paddle.core.CPUPlace() + ) + + # pd_op.assign: (1xi32) <- (1xi32) + assign_0 = full_0 + + # pd_op.assign: (1xi32) <- (1xi32) + assign_1 = full_0 + + # pd_op.assign: (1xi32) <- (1xi32) + assign_2 = full_0 + + # builtin.combine: ([2x24x-1x-1xf32, 2x24x-1x-1xf32]) <- (2x24x-1x-1xf32, 2x24x-1x-1xf32) + combine_0 = [swish_5, add_1] + + # pd_op.concat: (2x48x-1x-1xf32) <- ([2x24x-1x-1xf32, 2x24x-1x-1xf32], 1xi32) + concat_0 = paddle._C_ops.concat(combine_0, full_0) + del combine_0 + + # pd_op.full_int_array: (2xi64) <- () + full_int_array_0 = [2, 3] + + # pd_op.assign: (2xi64) <- (2xi64) + assign_3 = full_int_array_0 + + # pd_op.assign: (2xi64) <- (2xi64) + assign_4 = full_int_array_0 + + # pd_op.assign: (2xi64) <- (2xi64) + assign_5 = full_int_array_0 + + # pd_op.mean: (2x48x1x1xf32) <- (2x48x-1x-1xf32, 2xi64) + mean_0 = paddle._C_ops.mean(concat_0, full_int_array_0, True) + + # pd_op.conv2d: (2x48x1x1xf32) <- (2x48x1x1xf32, 48x48x1x1xf32) + conv2d_9 = paddle._C_ops.conv2d( + mean_0, parameter_147, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_147 + + # pd_op.full_int_array: (4xi64) <- () + full_int_array_1 = [1, -1, 1, 1] + + # pd_op.reshape: (1x48x1x1xf32) <- (48xf32, 4xi64) + reshape_0 = paddle._C_ops.reshape(parameter_146, full_int_array_1) + del parameter_146 + + # pd_op.add: (2x48x1x1xf32) <- (2x48x1x1xf32, 1x48x1x1xf32) + add_2 = paddle._C_ops.add(conv2d_9, reshape_0) + + # pd_op.hardsigmoid: (2x48x1x1xf32) <- (2x48x1x1xf32) + hardsigmoid_0 = paddle._C_ops.hardsigmoid( + add_2, float("0.166667"), float("0.5") + ) + del add_2 + + # pd_op.multiply: (2x48x-1x-1xf32) <- (2x48x-1x-1xf32, 2x48x1x1xf32) + multiply_1 = paddle._C_ops.multiply(concat_0, hardsigmoid_0) + + # pd_op.conv2d: (2x64x-1x-1xf32) <- (2x48x-1x-1xf32, 64x48x1x1xf32) + conv2d_10 = paddle._C_ops.conv2d( + multiply_1, parameter_145, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_145 + + # pd_op.batch_norm_: (2x64x-1x-1xf32, 64xf32, 64xf32, 64xf32, 64xf32, -1xui8) <- (2x64x-1x-1xf32, 64xf32, 64xf32, 64xf32, 64xf32) + ( + batch_norm__54, + batch_norm__55, + batch_norm__56, + batch_norm__57, + batch_norm__58, + batch_norm__59, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_10, + parameter_144, + parameter_143, + parameter_142, + parameter_141, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_141, parameter_142, parameter_143, parameter_144 + + # pd_op.swish: (2x64x-1x-1xf32) <- (2x64x-1x-1xf32) + swish_9 = paddle._C_ops.swish(batch_norm__54) + + # pd_op.conv2d: (2x96x-1x-1xf32) <- (2x64x-1x-1xf32, 96x64x3x3xf32) + conv2d_11 = paddle._C_ops.conv2d( + swish_9, parameter_140, [2, 2], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_140 + + # pd_op.batch_norm_: (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__60, + batch_norm__61, + batch_norm__62, + batch_norm__63, + batch_norm__64, + batch_norm__65, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_11, + parameter_139, + parameter_138, + parameter_137, + parameter_136, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_136, parameter_137, parameter_138, parameter_139 + + # pd_op.swish: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32) + swish_10 = paddle._C_ops.swish(batch_norm__60) + + # pd_op.conv2d: (2x48x-1x-1xf32) <- (2x96x-1x-1xf32, 48x96x1x1xf32) + conv2d_12 = paddle._C_ops.conv2d( + swish_10, parameter_135, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_135 + + # pd_op.batch_norm_: (2x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32, -1xui8) <- (2x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32) + ( + batch_norm__66, + batch_norm__67, + batch_norm__68, + batch_norm__69, + batch_norm__70, + batch_norm__71, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_12, + parameter_134, + parameter_133, + parameter_132, + parameter_131, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_131, parameter_132, parameter_133, parameter_134 + + # pd_op.swish: (2x48x-1x-1xf32) <- (2x48x-1x-1xf32) + swish_11 = paddle._C_ops.swish(batch_norm__66) + + # pd_op.conv2d: (2x48x-1x-1xf32) <- (2x96x-1x-1xf32, 48x96x1x1xf32) + conv2d_13 = paddle._C_ops.conv2d( + swish_10, parameter_130, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_130 + + # pd_op.batch_norm_: (2x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32, -1xui8) <- (2x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32) + ( + batch_norm__72, + batch_norm__73, + batch_norm__74, + batch_norm__75, + batch_norm__76, + batch_norm__77, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_13, + parameter_129, + parameter_128, + parameter_127, + parameter_126, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_126, parameter_127, parameter_128, parameter_129 + + # pd_op.swish: (2x48x-1x-1xf32) <- (2x48x-1x-1xf32) + swish_12 = paddle._C_ops.swish(batch_norm__72) + + # pd_op.conv2d: (2x48x-1x-1xf32) <- (2x48x-1x-1xf32, 48x48x3x3xf32) + conv2d_14 = paddle._C_ops.conv2d( + swish_12, parameter_125, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_125 + + # pd_op.batch_norm_: (2x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32, -1xui8) <- (2x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32) + ( + batch_norm__78, + batch_norm__79, + batch_norm__80, + batch_norm__81, + batch_norm__82, + batch_norm__83, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_14, + parameter_124, + parameter_123, + parameter_122, + parameter_121, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_121, parameter_122, parameter_123, parameter_124 + + # pd_op.swish: (2x48x-1x-1xf32) <- (2x48x-1x-1xf32) + swish_13 = paddle._C_ops.swish(batch_norm__78) + + # pd_op.conv2d: (2x48x-1x-1xf32) <- (2x48x-1x-1xf32, 48x48x3x3xf32) + conv2d_15 = paddle._C_ops.conv2d( + swish_13, parameter_120, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_120 + + # pd_op.batch_norm_: (2x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32, -1xui8) <- (2x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32) + ( + batch_norm__84, + batch_norm__85, + batch_norm__86, + batch_norm__87, + batch_norm__88, + batch_norm__89, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_15, + parameter_119, + parameter_118, + parameter_117, + parameter_116, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_116, parameter_117, parameter_118, parameter_119 + + # pd_op.conv2d: (2x48x-1x-1xf32) <- (2x48x-1x-1xf32, 48x48x1x1xf32) + conv2d_16 = paddle._C_ops.conv2d( + swish_13, parameter_115, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_115 + + # pd_op.batch_norm_: (2x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32, -1xui8) <- (2x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32) + ( + batch_norm__90, + batch_norm__91, + batch_norm__92, + batch_norm__93, + batch_norm__94, + batch_norm__95, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_16, + parameter_114, + parameter_113, + parameter_112, + parameter_111, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_111, parameter_112, parameter_113, parameter_114 + + # pd_op.multiply: (2x48x-1x-1xf32) <- (1xf32, 2x48x-1x-1xf32) + multiply_2 = paddle._C_ops.multiply(data_1, batch_norm__90) + del data_1 + + # pd_op.add: (2x48x-1x-1xf32) <- (2x48x-1x-1xf32, 2x48x-1x-1xf32) + add_3 = paddle._C_ops.add(batch_norm__84, multiply_2) + + # pd_op.swish: (2x48x-1x-1xf32) <- (2x48x-1x-1xf32) + swish_14 = paddle._C_ops.swish(add_3) + + # pd_op.add: (2x48x-1x-1xf32) <- (2x48x-1x-1xf32, 2x48x-1x-1xf32) + add_4 = paddle._C_ops.add(swish_12, swish_14) + + # pd_op.conv2d: (2x48x-1x-1xf32) <- (2x48x-1x-1xf32, 48x48x3x3xf32) + conv2d_17 = paddle._C_ops.conv2d( + add_4, parameter_110, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_110 + + # pd_op.batch_norm_: (2x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32, -1xui8) <- (2x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32) + ( + batch_norm__96, + batch_norm__97, + batch_norm__98, + batch_norm__99, + batch_norm__100, + batch_norm__101, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_17, + parameter_109, + parameter_108, + parameter_107, + parameter_106, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_106, parameter_107, parameter_108, parameter_109 + + # pd_op.swish: (2x48x-1x-1xf32) <- (2x48x-1x-1xf32) + swish_15 = paddle._C_ops.swish(batch_norm__96) + + # pd_op.conv2d: (2x48x-1x-1xf32) <- (2x48x-1x-1xf32, 48x48x3x3xf32) + conv2d_18 = paddle._C_ops.conv2d( + swish_15, parameter_105, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_105 + + # pd_op.batch_norm_: (2x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32, -1xui8) <- (2x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32) + ( + batch_norm__102, + batch_norm__103, + batch_norm__104, + batch_norm__105, + batch_norm__106, + batch_norm__107, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_18, + parameter_104, + parameter_103, + parameter_102, + parameter_101, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_101, parameter_102, parameter_103, parameter_104 + + # pd_op.conv2d: (2x48x-1x-1xf32) <- (2x48x-1x-1xf32, 48x48x1x1xf32) + conv2d_19 = paddle._C_ops.conv2d( + swish_15, parameter_100, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_100 + + # pd_op.batch_norm_: (2x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32, -1xui8) <- (2x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32) + ( + batch_norm__108, + batch_norm__109, + batch_norm__110, + batch_norm__111, + batch_norm__112, + batch_norm__113, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_19, + parameter_99, + parameter_98, + parameter_97, + parameter_96, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_96, parameter_97, parameter_98, parameter_99 + + # pd_op.multiply: (2x48x-1x-1xf32) <- (1xf32, 2x48x-1x-1xf32) + multiply_3 = paddle._C_ops.multiply(data_2, batch_norm__108) + del data_2 + + # pd_op.add: (2x48x-1x-1xf32) <- (2x48x-1x-1xf32, 2x48x-1x-1xf32) + add_5 = paddle._C_ops.add(batch_norm__102, multiply_3) + + # pd_op.swish: (2x48x-1x-1xf32) <- (2x48x-1x-1xf32) + swish_16 = paddle._C_ops.swish(add_5) + + # pd_op.add: (2x48x-1x-1xf32) <- (2x48x-1x-1xf32, 2x48x-1x-1xf32) + add_6 = paddle._C_ops.add(add_4, swish_16) + + # builtin.combine: ([2x48x-1x-1xf32, 2x48x-1x-1xf32]) <- (2x48x-1x-1xf32, 2x48x-1x-1xf32) + combine_1 = [swish_11, add_6] + + # pd_op.concat: (2x96x-1x-1xf32) <- ([2x48x-1x-1xf32, 2x48x-1x-1xf32], 1xi32) + concat_1 = paddle._C_ops.concat(combine_1, full_0) + del combine_1 + + # pd_op.mean: (2x96x1x1xf32) <- (2x96x-1x-1xf32, 2xi64) + mean_1 = paddle._C_ops.mean(concat_1, full_int_array_0, True) + + # pd_op.conv2d: (2x96x1x1xf32) <- (2x96x1x1xf32, 96x96x1x1xf32) + conv2d_20 = paddle._C_ops.conv2d( + mean_1, parameter_95, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_95 + + # pd_op.reshape: (1x96x1x1xf32) <- (96xf32, 4xi64) + reshape_1 = paddle._C_ops.reshape(parameter_94, full_int_array_1) + del parameter_94 + + # pd_op.add: (2x96x1x1xf32) <- (2x96x1x1xf32, 1x96x1x1xf32) + add_7 = paddle._C_ops.add(conv2d_20, reshape_1) + + # pd_op.hardsigmoid: (2x96x1x1xf32) <- (2x96x1x1xf32) + hardsigmoid_1 = paddle._C_ops.hardsigmoid( + add_7, float("0.166667"), float("0.5") + ) + del add_7 + + # pd_op.multiply: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, 2x96x1x1xf32) + multiply_4 = paddle._C_ops.multiply(concat_1, hardsigmoid_1) + + # pd_op.conv2d: (2x128x-1x-1xf32) <- (2x96x-1x-1xf32, 128x96x1x1xf32) + conv2d_21 = paddle._C_ops.conv2d( + multiply_4, parameter_93, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_93 + + # pd_op.batch_norm_: (2x128x-1x-1xf32, 128xf32, 128xf32, 128xf32, 128xf32, -1xui8) <- (2x128x-1x-1xf32, 128xf32, 128xf32, 128xf32, 128xf32) + ( + batch_norm__114, + batch_norm__115, + batch_norm__116, + batch_norm__117, + batch_norm__118, + batch_norm__119, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_21, + parameter_92, + parameter_91, + parameter_90, + parameter_89, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_89, parameter_90, parameter_91, parameter_92 + + # pd_op.swish: (2x128x-1x-1xf32) <- (2x128x-1x-1xf32) + swish_17 = paddle._C_ops.swish(batch_norm__114) + + # pd_op.conv2d: (2x192x-1x-1xf32) <- (2x128x-1x-1xf32, 192x128x3x3xf32) + conv2d_22 = paddle._C_ops.conv2d( + swish_17, parameter_88, [2, 2], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_88 + + # pd_op.batch_norm_: (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__120, + batch_norm__121, + batch_norm__122, + batch_norm__123, + batch_norm__124, + batch_norm__125, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_22, + parameter_87, + parameter_86, + parameter_85, + parameter_84, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_84, parameter_85, parameter_86, parameter_87 + + # pd_op.swish: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32) + swish_18 = paddle._C_ops.swish(batch_norm__120) + + # pd_op.conv2d: (2x96x-1x-1xf32) <- (2x192x-1x-1xf32, 96x192x1x1xf32) + conv2d_23 = paddle._C_ops.conv2d( + swish_18, parameter_83, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_83 + + # pd_op.batch_norm_: (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__126, + batch_norm__127, + batch_norm__128, + batch_norm__129, + batch_norm__130, + batch_norm__131, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_23, + parameter_82, + parameter_81, + parameter_80, + parameter_79, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_79, parameter_80, parameter_81, parameter_82 + + # pd_op.swish: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32) + swish_19 = paddle._C_ops.swish(batch_norm__126) + + # pd_op.conv2d: (2x96x-1x-1xf32) <- (2x192x-1x-1xf32, 96x192x1x1xf32) + conv2d_24 = paddle._C_ops.conv2d( + swish_18, parameter_78, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_78 + + # pd_op.batch_norm_: (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__132, + batch_norm__133, + batch_norm__134, + batch_norm__135, + batch_norm__136, + batch_norm__137, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_24, + parameter_77, + parameter_76, + parameter_75, + parameter_74, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_74, parameter_75, parameter_76, parameter_77 + + # pd_op.swish: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32) + swish_20 = paddle._C_ops.swish(batch_norm__132) + + # pd_op.conv2d: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, 96x96x3x3xf32) + conv2d_25 = paddle._C_ops.conv2d( + swish_20, parameter_73, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_73 + + # pd_op.batch_norm_: (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__138, + batch_norm__139, + batch_norm__140, + batch_norm__141, + batch_norm__142, + batch_norm__143, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_25, + parameter_72, + parameter_71, + parameter_70, + parameter_69, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_69, parameter_70, parameter_71, parameter_72 + + # pd_op.swish: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32) + swish_21 = paddle._C_ops.swish(batch_norm__138) + + # pd_op.conv2d: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, 96x96x3x3xf32) + conv2d_26 = paddle._C_ops.conv2d( + swish_21, parameter_68, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_68 + + # pd_op.batch_norm_: (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__144, + batch_norm__145, + batch_norm__146, + batch_norm__147, + batch_norm__148, + batch_norm__149, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_26, + parameter_67, + parameter_66, + parameter_65, + parameter_64, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_64, parameter_65, parameter_66, parameter_67 + + # pd_op.conv2d: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, 96x96x1x1xf32) + conv2d_27 = paddle._C_ops.conv2d( + swish_21, parameter_63, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_63 + + # pd_op.batch_norm_: (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__150, + batch_norm__151, + batch_norm__152, + batch_norm__153, + batch_norm__154, + batch_norm__155, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_27, + parameter_62, + parameter_61, + parameter_60, + parameter_59, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_59, parameter_60, parameter_61, parameter_62 + + # pd_op.multiply: (2x96x-1x-1xf32) <- (1xf32, 2x96x-1x-1xf32) + multiply_5 = paddle._C_ops.multiply(data_3, batch_norm__150) + del data_3 + + # pd_op.add: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, 2x96x-1x-1xf32) + add_8 = paddle._C_ops.add(batch_norm__144, multiply_5) + + # pd_op.swish: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32) + swish_22 = paddle._C_ops.swish(add_8) + + # pd_op.add: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, 2x96x-1x-1xf32) + add_9 = paddle._C_ops.add(swish_20, swish_22) + + # pd_op.conv2d: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, 96x96x3x3xf32) + conv2d_28 = paddle._C_ops.conv2d( + add_9, parameter_58, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_58 + + # pd_op.batch_norm_: (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__156, + batch_norm__157, + batch_norm__158, + batch_norm__159, + batch_norm__160, + batch_norm__161, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_28, + parameter_57, + parameter_56, + parameter_55, + parameter_54, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_54, parameter_55, parameter_56, parameter_57 + + # pd_op.swish: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32) + swish_23 = paddle._C_ops.swish(batch_norm__156) + + # pd_op.conv2d: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, 96x96x3x3xf32) + conv2d_29 = paddle._C_ops.conv2d( + swish_23, parameter_53, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_53 + + # pd_op.batch_norm_: (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__162, + batch_norm__163, + batch_norm__164, + batch_norm__165, + batch_norm__166, + batch_norm__167, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_29, + parameter_52, + parameter_51, + parameter_50, + parameter_49, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_49, parameter_50, parameter_51, parameter_52 + + # pd_op.conv2d: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, 96x96x1x1xf32) + conv2d_30 = paddle._C_ops.conv2d( + swish_23, parameter_48, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_48 + + # pd_op.batch_norm_: (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__168, + batch_norm__169, + batch_norm__170, + batch_norm__171, + batch_norm__172, + batch_norm__173, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_30, + parameter_47, + parameter_46, + parameter_45, + parameter_44, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_44, parameter_45, parameter_46, parameter_47 + + # pd_op.multiply: (2x96x-1x-1xf32) <- (1xf32, 2x96x-1x-1xf32) + multiply_6 = paddle._C_ops.multiply(data_4, batch_norm__168) + del data_4 + + # pd_op.add: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, 2x96x-1x-1xf32) + add_10 = paddle._C_ops.add(batch_norm__162, multiply_6) + + # pd_op.swish: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32) + swish_24 = paddle._C_ops.swish(add_10) + + # pd_op.add: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, 2x96x-1x-1xf32) + add_11 = paddle._C_ops.add(add_9, swish_24) + + # builtin.combine: ([2x96x-1x-1xf32, 2x96x-1x-1xf32]) <- (2x96x-1x-1xf32, 2x96x-1x-1xf32) + combine_2 = [swish_19, add_11] + + # pd_op.concat: (2x192x-1x-1xf32) <- ([2x96x-1x-1xf32, 2x96x-1x-1xf32], 1xi32) + concat_2 = paddle._C_ops.concat(combine_2, full_0) + del combine_2 + + # pd_op.mean: (2x192x1x1xf32) <- (2x192x-1x-1xf32, 2xi64) + mean_2 = paddle._C_ops.mean(concat_2, full_int_array_0, True) + + # pd_op.conv2d: (2x192x1x1xf32) <- (2x192x1x1xf32, 192x192x1x1xf32) + conv2d_31 = paddle._C_ops.conv2d( + mean_2, parameter_43, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_43 + + # pd_op.reshape: (1x192x1x1xf32) <- (192xf32, 4xi64) + reshape_2 = paddle._C_ops.reshape(parameter_42, full_int_array_1) + del parameter_42 + + # pd_op.add: (2x192x1x1xf32) <- (2x192x1x1xf32, 1x192x1x1xf32) + add_12 = paddle._C_ops.add(conv2d_31, reshape_2) + + # pd_op.hardsigmoid: (2x192x1x1xf32) <- (2x192x1x1xf32) + hardsigmoid_2 = paddle._C_ops.hardsigmoid( + add_12, float("0.166667"), float("0.5") + ) + del add_12 + + # pd_op.multiply: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 2x192x1x1xf32) + multiply_7 = paddle._C_ops.multiply(concat_2, hardsigmoid_2) + + # pd_op.conv2d: (2x256x-1x-1xf32) <- (2x192x-1x-1xf32, 256x192x1x1xf32) + conv2d_32 = paddle._C_ops.conv2d( + multiply_7, parameter_41, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_41 + + # pd_op.batch_norm_: (2x256x-1x-1xf32, 256xf32, 256xf32, 256xf32, 256xf32, -1xui8) <- (2x256x-1x-1xf32, 256xf32, 256xf32, 256xf32, 256xf32) + ( + batch_norm__174, + batch_norm__175, + batch_norm__176, + batch_norm__177, + batch_norm__178, + batch_norm__179, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_32, + parameter_40, + parameter_39, + parameter_38, + parameter_37, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_37, parameter_38, parameter_39, parameter_40 + + # pd_op.swish: (2x256x-1x-1xf32) <- (2x256x-1x-1xf32) + swish_25 = paddle._C_ops.swish(batch_norm__174) + + # pd_op.conv2d: (2x384x-1x-1xf32) <- (2x256x-1x-1xf32, 384x256x3x3xf32) + conv2d_33 = paddle._C_ops.conv2d( + swish_25, parameter_36, [2, 2], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_36 + + # pd_op.batch_norm_: (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__180, + batch_norm__181, + batch_norm__182, + batch_norm__183, + batch_norm__184, + batch_norm__185, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_33, + parameter_35, + parameter_34, + parameter_33, + parameter_32, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_32, parameter_33, parameter_34, parameter_35 + + # pd_op.swish: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32) + swish_26 = paddle._C_ops.swish(batch_norm__180) + + # pd_op.conv2d: (2x192x-1x-1xf32) <- (2x384x-1x-1xf32, 192x384x1x1xf32) + conv2d_34 = paddle._C_ops.conv2d( + swish_26, parameter_31, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_31 + + # pd_op.batch_norm_: (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__186, + batch_norm__187, + batch_norm__188, + batch_norm__189, + batch_norm__190, + batch_norm__191, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_34, + parameter_30, + parameter_29, + parameter_28, + parameter_27, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_27, parameter_28, parameter_29, parameter_30 + + # pd_op.swish: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32) + swish_27 = paddle._C_ops.swish(batch_norm__186) + + # pd_op.conv2d: (2x192x-1x-1xf32) <- (2x384x-1x-1xf32, 192x384x1x1xf32) + conv2d_35 = paddle._C_ops.conv2d( + swish_26, parameter_26, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_26 + + # pd_op.batch_norm_: (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__192, + batch_norm__193, + batch_norm__194, + batch_norm__195, + batch_norm__196, + batch_norm__197, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_35, + parameter_25, + parameter_24, + parameter_23, + parameter_22, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_22, parameter_23, parameter_24, parameter_25 + + # pd_op.swish: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32) + swish_28 = paddle._C_ops.swish(batch_norm__192) + + # pd_op.conv2d: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 192x192x3x3xf32) + conv2d_36 = paddle._C_ops.conv2d( + swish_28, parameter_21, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_21 + + # pd_op.batch_norm_: (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__198, + batch_norm__199, + batch_norm__200, + batch_norm__201, + batch_norm__202, + batch_norm__203, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_36, + parameter_20, + parameter_19, + parameter_18, + parameter_17, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_17, parameter_18, parameter_19, parameter_20 + + # pd_op.swish: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32) + swish_29 = paddle._C_ops.swish(batch_norm__198) + + # pd_op.conv2d: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 192x192x3x3xf32) + conv2d_37 = paddle._C_ops.conv2d( + swish_29, parameter_16, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_16 + + # pd_op.batch_norm_: (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__204, + batch_norm__205, + batch_norm__206, + batch_norm__207, + batch_norm__208, + batch_norm__209, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_37, + parameter_15, + parameter_14, + parameter_13, + parameter_12, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_12, parameter_13, parameter_14, parameter_15 + + # pd_op.conv2d: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 192x192x1x1xf32) + conv2d_38 = paddle._C_ops.conv2d( + swish_29, parameter_11, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_11 + + # pd_op.batch_norm_: (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__210, + batch_norm__211, + batch_norm__212, + batch_norm__213, + batch_norm__214, + batch_norm__215, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_38, + parameter_10, + parameter_9, + parameter_8, + parameter_7, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_10, parameter_7, parameter_8, parameter_9 + + # pd_op.multiply: (2x192x-1x-1xf32) <- (1xf32, 2x192x-1x-1xf32) + multiply_8 = paddle._C_ops.multiply(data_5, batch_norm__210) + del data_5 + + # pd_op.add: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 2x192x-1x-1xf32) + add_13 = paddle._C_ops.add(batch_norm__204, multiply_8) + + # pd_op.swish: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32) + swish_30 = paddle._C_ops.swish(add_13) + + # pd_op.add: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 2x192x-1x-1xf32) + add_14 = paddle._C_ops.add(swish_28, swish_30) + + # builtin.combine: ([2x192x-1x-1xf32, 2x192x-1x-1xf32]) <- (2x192x-1x-1xf32, 2x192x-1x-1xf32) + combine_3 = [swish_27, add_14] + + # pd_op.concat: (2x384x-1x-1xf32) <- ([2x192x-1x-1xf32, 2x192x-1x-1xf32], 1xi32) + concat_3 = paddle._C_ops.concat(combine_3, full_0) + del combine_3 + + # pd_op.mean: (2x384x1x1xf32) <- (2x384x-1x-1xf32, 2xi64) + mean_3 = paddle._C_ops.mean(concat_3, full_int_array_0, True) + + # pd_op.conv2d: (2x384x1x1xf32) <- (2x384x1x1xf32, 384x384x1x1xf32) + conv2d_39 = paddle._C_ops.conv2d( + mean_3, parameter_6, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_6 + + # pd_op.reshape: (1x384x1x1xf32) <- (384xf32, 4xi64) + reshape_3 = paddle._C_ops.reshape(parameter_5, full_int_array_1) + del full_int_array_1, parameter_5 + + # pd_op.add: (2x384x1x1xf32) <- (2x384x1x1xf32, 1x384x1x1xf32) + add_15 = paddle._C_ops.add(conv2d_39, reshape_3) + + # pd_op.hardsigmoid: (2x384x1x1xf32) <- (2x384x1x1xf32) + hardsigmoid_3 = paddle._C_ops.hardsigmoid( + add_15, float("0.166667"), float("0.5") + ) + del add_15 + + # pd_op.multiply: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32, 2x384x1x1xf32) + multiply_9 = paddle._C_ops.multiply(concat_3, hardsigmoid_3) + + # pd_op.conv2d: (2x512x-1x-1xf32) <- (2x384x-1x-1xf32, 512x384x1x1xf32) + conv2d_40 = paddle._C_ops.conv2d( + multiply_9, parameter_4, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_4 + + # pd_op.batch_norm_: (2x512x-1x-1xf32, 512xf32, 512xf32, 512xf32, 512xf32, -1xui8) <- (2x512x-1x-1xf32, 512xf32, 512xf32, 512xf32, 512xf32) + ( + batch_norm__216, + batch_norm__217, + batch_norm__218, + batch_norm__219, + batch_norm__220, + batch_norm__221, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_40, + parameter_3, + parameter_2, + parameter_1, + parameter_0, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_0, parameter_1, parameter_2, parameter_3 + + # pd_op.swish: (2x512x-1x-1xf32) <- (2x512x-1x-1xf32) + swish_0 = paddle._C_ops.swish(batch_norm__216) + del ( + add_0, + add_1, + add_10, + add_11, + add_13, + add_14, + add_3, + add_4, + add_5, + add_6, + add_8, + add_9, + assign_0, + assign_1, + assign_2, + assign_3, + assign_4, + assign_5, + batch_norm__0, + batch_norm__1, + batch_norm__10, + batch_norm__100, + batch_norm__101, + batch_norm__102, + batch_norm__103, + batch_norm__104, + batch_norm__105, + batch_norm__106, + batch_norm__107, + batch_norm__108, + batch_norm__109, + batch_norm__11, + batch_norm__110, + batch_norm__111, + batch_norm__112, + batch_norm__113, + batch_norm__114, + batch_norm__115, + batch_norm__116, + batch_norm__117, + batch_norm__118, + batch_norm__119, + batch_norm__12, + batch_norm__120, + batch_norm__121, + batch_norm__122, + batch_norm__123, + batch_norm__124, + batch_norm__125, + batch_norm__126, + batch_norm__127, + batch_norm__128, + batch_norm__129, + batch_norm__13, + batch_norm__130, + batch_norm__131, + batch_norm__132, + batch_norm__133, + batch_norm__134, + batch_norm__135, + batch_norm__136, + batch_norm__137, + batch_norm__138, + batch_norm__139, + batch_norm__14, + batch_norm__140, + batch_norm__141, + batch_norm__142, + batch_norm__143, + batch_norm__144, + batch_norm__145, + batch_norm__146, + batch_norm__147, + batch_norm__148, + batch_norm__149, + batch_norm__15, + batch_norm__150, + batch_norm__151, + batch_norm__152, + batch_norm__153, + batch_norm__154, + batch_norm__155, + batch_norm__156, + batch_norm__157, + batch_norm__158, + batch_norm__159, + batch_norm__16, + batch_norm__160, + batch_norm__161, + batch_norm__162, + batch_norm__163, + batch_norm__164, + batch_norm__165, + batch_norm__166, + batch_norm__167, + batch_norm__168, + batch_norm__169, + batch_norm__17, + batch_norm__170, + batch_norm__171, + batch_norm__172, + batch_norm__173, + batch_norm__174, + batch_norm__175, + batch_norm__176, + batch_norm__177, + batch_norm__178, + batch_norm__179, + batch_norm__18, + batch_norm__180, + batch_norm__181, + batch_norm__182, + batch_norm__183, + batch_norm__184, + batch_norm__185, + batch_norm__186, + batch_norm__187, + batch_norm__188, + batch_norm__189, + batch_norm__19, + batch_norm__190, + batch_norm__191, + batch_norm__192, + batch_norm__193, + batch_norm__194, + batch_norm__195, + batch_norm__196, + batch_norm__197, + batch_norm__198, + batch_norm__199, + batch_norm__2, + batch_norm__20, + batch_norm__200, + batch_norm__201, + batch_norm__202, + batch_norm__203, + batch_norm__204, + batch_norm__205, + batch_norm__206, + batch_norm__207, + batch_norm__208, + batch_norm__209, + batch_norm__21, + batch_norm__210, + batch_norm__211, + batch_norm__212, + batch_norm__213, + batch_norm__214, + batch_norm__215, + batch_norm__216, + batch_norm__217, + batch_norm__218, + batch_norm__219, + batch_norm__22, + batch_norm__220, + batch_norm__221, + batch_norm__23, + batch_norm__24, + batch_norm__25, + batch_norm__26, + batch_norm__27, + batch_norm__28, + batch_norm__29, + batch_norm__3, + batch_norm__30, + batch_norm__31, + batch_norm__32, + batch_norm__33, + batch_norm__34, + batch_norm__35, + batch_norm__36, + batch_norm__37, + batch_norm__38, + batch_norm__39, + batch_norm__4, + batch_norm__40, + batch_norm__41, + batch_norm__42, + batch_norm__43, + batch_norm__44, + batch_norm__45, + batch_norm__46, + batch_norm__47, + batch_norm__48, + batch_norm__49, + batch_norm__5, + batch_norm__50, + batch_norm__51, + batch_norm__52, + batch_norm__53, + batch_norm__54, + batch_norm__55, + batch_norm__56, + batch_norm__57, + batch_norm__58, + batch_norm__59, + batch_norm__6, + batch_norm__60, + batch_norm__61, + batch_norm__62, + batch_norm__63, + batch_norm__64, + batch_norm__65, + batch_norm__66, + batch_norm__67, + batch_norm__68, + batch_norm__69, + batch_norm__7, + batch_norm__70, + batch_norm__71, + batch_norm__72, + batch_norm__73, + batch_norm__74, + batch_norm__75, + batch_norm__76, + batch_norm__77, + batch_norm__78, + batch_norm__79, + batch_norm__8, + batch_norm__80, + batch_norm__81, + batch_norm__82, + batch_norm__83, + batch_norm__84, + batch_norm__85, + batch_norm__86, + batch_norm__87, + batch_norm__88, + batch_norm__89, + batch_norm__9, + batch_norm__90, + batch_norm__91, + batch_norm__92, + batch_norm__93, + batch_norm__94, + batch_norm__95, + batch_norm__96, + batch_norm__97, + batch_norm__98, + batch_norm__99, + concat_0, + concat_1, + concat_2, + concat_3, + conv2d_0, + conv2d_1, + conv2d_10, + conv2d_11, + conv2d_12, + conv2d_13, + conv2d_14, + conv2d_15, + conv2d_16, + conv2d_17, + conv2d_18, + conv2d_19, + conv2d_2, + conv2d_20, + conv2d_21, + conv2d_22, + conv2d_23, + conv2d_24, + conv2d_25, + conv2d_26, + conv2d_27, + conv2d_28, + conv2d_29, + conv2d_3, + conv2d_30, + conv2d_31, + conv2d_32, + conv2d_33, + conv2d_34, + conv2d_35, + conv2d_36, + conv2d_37, + conv2d_38, + conv2d_39, + conv2d_4, + conv2d_40, + conv2d_5, + conv2d_6, + conv2d_7, + conv2d_8, + conv2d_9, + full_0, + full_int_array_0, + hardsigmoid_0, + hardsigmoid_1, + hardsigmoid_2, + hardsigmoid_3, + mean_0, + mean_1, + mean_2, + mean_3, + multiply_0, + multiply_1, + multiply_2, + multiply_3, + multiply_4, + multiply_5, + multiply_6, + multiply_7, + multiply_8, + multiply_9, + reshape_0, + reshape_1, + reshape_2, + reshape_3, + swish_1, + swish_10, + swish_11, + swish_12, + swish_13, + swish_14, + swish_15, + swish_16, + swish_17, + swish_18, + swish_19, + swish_2, + swish_20, + swish_21, + swish_22, + swish_23, + swish_24, + swish_25, + swish_26, + swish_27, + swish_28, + swish_29, + swish_3, + swish_30, + swish_4, + swish_5, + swish_6, + swish_7, + swish_8, + swish_9, + ) + + return swish_0 diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-S/subgraph_10/weight_meta.py b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-S/subgraph_10/weight_meta.py new file mode 100644 index 000000000..bbcb5dc50 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-S/subgraph_10/weight_meta.py @@ -0,0 +1,1975 @@ +class Program_weight_tensor_parameter_0: + name = "parameter_0" + shape = [512] + dtype = "float32" + min_val = float("-4.7684") + max_val = float("-0.175565") + mean = float("-2.26789") + std = float("0.761953") + data = None + + +class Program_weight_tensor_parameter_1: + name = "parameter_1" + shape = [512] + dtype = "float32" + min_val = float("2.03375") + max_val = float("5.2834") + mean = float("3.71839") + std = float("0.500321") + data = None + + +class Program_weight_tensor_parameter_2: + name = "parameter_2" + shape = [512] + dtype = "float32" + min_val = float("0.00781345") + max_val = float("0.0514268") + mean = float("0.017109") + std = float("0.00525651") + data = None + + +class Program_weight_tensor_parameter_3: + name = "parameter_3" + shape = [512] + dtype = "float32" + min_val = float("-0.177925") + max_val = float("0.0752219") + mean = float("-0.0717851") + std = float("0.0377539") + data = None + + +class Program_weight_tensor_parameter_4: + name = "parameter_4" + shape = [512, 384, 1, 1] + dtype = "float32" + min_val = float("-0.109533") + max_val = float("0.130657") + mean = float("-0.00087456") + std = float("0.00816989") + data = None + + +class Program_weight_tensor_parameter_5: + name = "parameter_5" + shape = [384] + dtype = "float32" + min_val = float("-0.0280175") + max_val = float("0.00315655") + mean = float("-0.00254859") + std = float("0.00471735") + data = None + + +class Program_weight_tensor_parameter_6: + name = "parameter_6" + shape = [384, 384, 1, 1] + dtype = "float32" + min_val = float("-0.222129") + max_val = float("0.18458") + mean = float("-0.000742582") + std = float("0.00434552") + data = None + + +class Program_weight_tensor_parameter_7: + name = "parameter_7" + shape = [192] + dtype = "float32" + min_val = float("-2.40391") + max_val = float("2.31829") + mean = float("-0.263102") + std = float("0.51507") + data = None + + +class Program_weight_tensor_parameter_8: + name = "parameter_8" + shape = [192] + dtype = "float32" + min_val = float("0.147308") + max_val = float("2.09628") + mean = float("0.465287") + std = float("0.3238") + data = None + + +class Program_weight_tensor_parameter_9: + name = "parameter_9" + shape = [192] + dtype = "float32" + min_val = float("0.000223497") + max_val = float("0.0119736") + mean = float("0.00100871") + std = float("0.00101248") + data = None + + +class Program_weight_tensor_parameter_10: + name = "parameter_10" + shape = [192] + dtype = "float32" + min_val = float("-0.0592469") + max_val = float("0.0640928") + mean = float("0.00745475") + std = float("0.0186085") + data = None + + +class Program_weight_tensor_parameter_11: + name = "parameter_11" + shape = [192, 192, 1, 1] + dtype = "float32" + min_val = float("-0.0401314") + max_val = float("0.0713962") + mean = float("-0.000302538") + std = float("0.0046896") + data = None + + +class Program_weight_tensor_parameter_12: + name = "parameter_12" + shape = [192] + dtype = "float32" + min_val = float("-2.40401") + max_val = float("2.31762") + mean = float("-0.263216") + std = float("0.514996") + data = None + + +class Program_weight_tensor_parameter_13: + name = "parameter_13" + shape = [192] + dtype = "float32" + min_val = float("0.6552") + max_val = float("2.82897") + mean = float("1.34109") + std = float("0.430342") + data = None + + +class Program_weight_tensor_parameter_14: + name = "parameter_14" + shape = [192] + dtype = "float32" + min_val = float("0.00417632") + max_val = float("0.0735299") + mean = float("0.009869") + std = float("0.00599692") + data = None + + +class Program_weight_tensor_parameter_15: + name = "parameter_15" + shape = [192] + dtype = "float32" + min_val = float("-0.257045") + max_val = float("0.290052") + mean = float("0.0166623") + std = float("0.0641044") + data = None + + +class Program_weight_tensor_parameter_16: + name = "parameter_16" + shape = [192, 192, 3, 3] + dtype = "float32" + min_val = float("-0.0896803") + max_val = float("0.0514106") + mean = float("-0.00016858") + std = float("0.00453782") + data = None + + +class Program_weight_tensor_parameter_17: + name = "parameter_17" + shape = [192] + dtype = "float32" + min_val = float("-3.2846") + max_val = float("1.08332") + mean = float("-1.32548") + std = float("0.629983") + data = None + + +class Program_weight_tensor_parameter_18: + name = "parameter_18" + shape = [192] + dtype = "float32" + min_val = float("0.511801") + max_val = float("1.92243") + mean = float("1.15336") + std = float("0.223177") + data = None + + +class Program_weight_tensor_parameter_19: + name = "parameter_19" + shape = [192] + dtype = "float32" + min_val = float("0.0179659") + max_val = float("0.313719") + mean = float("0.0470707") + std = float("0.0287421") + data = None + + +class Program_weight_tensor_parameter_20: + name = "parameter_20" + shape = [192] + dtype = "float32" + min_val = float("-1.14608") + max_val = float("0.151876") + mean = float("-0.134507") + std = float("0.132802") + data = None + + +class Program_weight_tensor_parameter_21: + name = "parameter_21" + shape = [192, 192, 3, 3] + dtype = "float32" + min_val = float("-0.0696522") + max_val = float("0.0793202") + mean = float("-0.000380521") + std = float("0.00518021") + data = None + + +class Program_weight_tensor_parameter_22: + name = "parameter_22" + shape = [192] + dtype = "float32" + min_val = float("-3.81238") + max_val = float("3.66623") + mean = float("-0.649936") + std = float("0.909196") + data = None + + +class Program_weight_tensor_parameter_23: + name = "parameter_23" + shape = [192] + dtype = "float32" + min_val = float("0.692753") + max_val = float("4.20685") + mean = float("1.51581") + std = float("0.440915") + data = None + + +class Program_weight_tensor_parameter_24: + name = "parameter_24" + shape = [192] + dtype = "float32" + min_val = float("0.00440514") + max_val = float("0.0383893") + mean = float("0.0111392") + std = float("0.00471873") + data = None + + +class Program_weight_tensor_parameter_25: + name = "parameter_25" + shape = [192] + dtype = "float32" + min_val = float("-0.14241") + max_val = float("0.16728") + mean = float("0.0374833") + std = float("0.0360002") + data = None + + +class Program_weight_tensor_parameter_26: + name = "parameter_26" + shape = [192, 384, 1, 1] + dtype = "float32" + min_val = float("-0.110642") + max_val = float("0.0914116") + mean = float("-0.00138469") + std = float("0.00922725") + data = None + + +class Program_weight_tensor_parameter_27: + name = "parameter_27" + shape = [192] + dtype = "float32" + min_val = float("-2.92381") + max_val = float("0.905207") + mean = float("-0.416961") + std = float("0.669044") + data = None + + +class Program_weight_tensor_parameter_28: + name = "parameter_28" + shape = [192] + dtype = "float32" + min_val = float("0.782064") + max_val = float("3.25351") + mean = float("1.44788") + std = float("0.40939") + data = None + + +class Program_weight_tensor_parameter_29: + name = "parameter_29" + shape = [192] + dtype = "float32" + min_val = float("0.00243459") + max_val = float("0.00745952") + mean = float("0.00393691") + std = float("0.00103292") + data = None + + +class Program_weight_tensor_parameter_30: + name = "parameter_30" + shape = [192] + dtype = "float32" + min_val = float("-0.0577181") + max_val = float("0.0588189") + mean = float("0.0137332") + std = float("0.0220783") + data = None + + +class Program_weight_tensor_parameter_31: + name = "parameter_31" + shape = [192, 384, 1, 1] + dtype = "float32" + min_val = float("-0.0702044") + max_val = float("0.0981504") + mean = float("-0.000543738") + std = float("0.00741051") + data = None + + +class Program_weight_tensor_parameter_32: + name = "parameter_32" + shape = [384] + dtype = "float32" + min_val = float("-2.7622") + max_val = float("1.15725") + mean = float("-0.674252") + std = float("0.494382") + data = None + + +class Program_weight_tensor_parameter_33: + name = "parameter_33" + shape = [384] + dtype = "float32" + min_val = float("0.436734") + max_val = float("1.90909") + mean = float("0.859705") + std = float("0.233984") + data = None + + +class Program_weight_tensor_parameter_34: + name = "parameter_34" + shape = [384] + dtype = "float32" + min_val = float("0.00724101") + max_val = float("0.0589479") + mean = float("0.0165906") + std = float("0.00632963") + data = None + + +class Program_weight_tensor_parameter_35: + name = "parameter_35" + shape = [384] + dtype = "float32" + min_val = float("-0.510919") + max_val = float("0.313855") + mean = float("0.0145988") + std = float("0.0915292") + data = None + + +class Program_weight_tensor_parameter_36: + name = "parameter_36" + shape = [384, 256, 3, 3] + dtype = "float32" + min_val = float("-0.0732695") + max_val = float("0.0636209") + mean = float("-0.000162932") + std = float("0.00429878") + data = None + + +class Program_weight_tensor_parameter_37: + name = "parameter_37" + shape = [256] + dtype = "float32" + min_val = float("-2.80583") + max_val = float("1.38972") + mean = float("-0.942672") + std = float("0.616162") + data = None + + +class Program_weight_tensor_parameter_38: + name = "parameter_38" + shape = [256] + dtype = "float32" + min_val = float("0.390795") + max_val = float("1.71259") + mean = float("0.927819") + std = float("0.167042") + data = None + + +class Program_weight_tensor_parameter_39: + name = "parameter_39" + shape = [256] + dtype = "float32" + min_val = float("0.00138672") + max_val = float("0.00613532") + mean = float("0.00320228") + std = float("0.000914529") + data = None + + +class Program_weight_tensor_parameter_40: + name = "parameter_40" + shape = [256] + dtype = "float32" + min_val = float("-0.219205") + max_val = float("0.191887") + mean = float("-0.051187") + std = float("0.0691612") + data = None + + +class Program_weight_tensor_parameter_41: + name = "parameter_41" + shape = [256, 192, 1, 1] + dtype = "float32" + min_val = float("-0.180777") + max_val = float("0.171873") + mean = float("-0.00107654") + std = float("0.0143585") + data = None + + +class Program_weight_tensor_parameter_42: + name = "parameter_42" + shape = [192] + dtype = "float32" + min_val = float("-0.0148095") + max_val = float("0.000185925") + mean = float("-0.00625529") + std = float("0.00388123") + data = None + + +class Program_weight_tensor_parameter_43: + name = "parameter_43" + shape = [192, 192, 1, 1] + dtype = "float32" + min_val = float("-0.194557") + max_val = float("0.272117") + mean = float("-0.00452407") + std = float("0.0108287") + data = None + + +class Program_weight_tensor_parameter_44: + name = "parameter_44" + shape = [96] + dtype = "float32" + min_val = float("-2.29623") + max_val = float("0.84744") + mean = float("-0.061664") + std = float("0.525458") + data = None + + +class Program_weight_tensor_parameter_45: + name = "parameter_45" + shape = [96] + dtype = "float32" + min_val = float("-0.119028") + max_val = float("2.24351") + mean = float("0.320993") + std = float("0.35019") + data = None + + +class Program_weight_tensor_parameter_46: + name = "parameter_46" + shape = [96] + dtype = "float32" + min_val = float("1.83319e-10") + max_val = float("0.00303052") + mean = float("0.000677801") + std = float("0.000447248") + data = None + + +class Program_weight_tensor_parameter_47: + name = "parameter_47" + shape = [96] + dtype = "float32" + min_val = float("-0.0488795") + max_val = float("0.0695594") + mean = float("0.00490942") + std = float("0.0190257") + data = None + + +class Program_weight_tensor_parameter_48: + name = "parameter_48" + shape = [96, 96, 1, 1] + dtype = "float32" + min_val = float("-0.0465584") + max_val = float("0.0804484") + mean = float("-0.000322433") + std = float("0.006747") + data = None + + +class Program_weight_tensor_parameter_49: + name = "parameter_49" + shape = [96] + dtype = "float32" + min_val = float("-2.29626") + max_val = float("0.848892") + mean = float("-0.0610437") + std = float("0.525854") + data = None + + +class Program_weight_tensor_parameter_50: + name = "parameter_50" + shape = [96] + dtype = "float32" + min_val = float("0.467882") + max_val = float("3.28353") + mean = float("1.28238") + std = float("0.627065") + data = None + + +class Program_weight_tensor_parameter_51: + name = "parameter_51" + shape = [96] + dtype = "float32" + min_val = float("0.0032021") + max_val = float("0.0408694") + mean = float("0.0176397") + std = float("0.00727882") + data = None + + +class Program_weight_tensor_parameter_52: + name = "parameter_52" + shape = [96] + dtype = "float32" + min_val = float("-0.170769") + max_val = float("0.159259") + mean = float("0.0188615") + std = float("0.0641774") + data = None + + +class Program_weight_tensor_parameter_53: + name = "parameter_53" + shape = [96, 96, 3, 3] + dtype = "float32" + min_val = float("-0.125748") + max_val = float("0.0961584") + mean = float("-0.000305803") + std = float("0.00769471") + data = None + + +class Program_weight_tensor_parameter_54: + name = "parameter_54" + shape = [96] + dtype = "float32" + min_val = float("-2.80484") + max_val = float("1.43502") + mean = float("-1.03868") + std = float("0.701443") + data = None + + +class Program_weight_tensor_parameter_55: + name = "parameter_55" + shape = [96] + dtype = "float32" + min_val = float("0.367755") + max_val = float("2.00427") + mean = float("1.06663") + std = float("0.229304") + data = None + + +class Program_weight_tensor_parameter_56: + name = "parameter_56" + shape = [96] + dtype = "float32" + min_val = float("0.0214295") + max_val = float("0.14789") + mean = float("0.0558744") + std = float("0.0186924") + data = None + + +class Program_weight_tensor_parameter_57: + name = "parameter_57" + shape = [96] + dtype = "float32" + min_val = float("-2.01902") + max_val = float("1.05278") + mean = float("-0.183407") + std = float("0.350024") + data = None + + +class Program_weight_tensor_parameter_58: + name = "parameter_58" + shape = [96, 96, 3, 3] + dtype = "float32" + min_val = float("-0.0550323") + max_val = float("0.0782782") + mean = float("-0.000581257") + std = float("0.00844085") + data = None + + +class Program_weight_tensor_parameter_59: + name = "parameter_59" + shape = [96] + dtype = "float32" + min_val = float("-2.54212") + max_val = float("0.880687") + mean = float("-0.00372463") + std = float("0.509016") + data = None + + +class Program_weight_tensor_parameter_60: + name = "parameter_60" + shape = [96] + dtype = "float32" + min_val = float("-0.0965963") + max_val = float("3.24141") + mean = float("0.305728") + std = float("0.399008") + data = None + + +class Program_weight_tensor_parameter_61: + name = "parameter_61" + shape = [96] + dtype = "float32" + min_val = float("0.000262614") + max_val = float("0.016379") + mean = float("0.00184961") + std = float("0.00222075") + data = None + + +class Program_weight_tensor_parameter_62: + name = "parameter_62" + shape = [96] + dtype = "float32" + min_val = float("-0.0509296") + max_val = float("0.0858967") + mean = float("0.0128051") + std = float("0.0243828") + data = None + + +class Program_weight_tensor_parameter_63: + name = "parameter_63" + shape = [96, 96, 1, 1] + dtype = "float32" + min_val = float("-0.114929") + max_val = float("0.102275") + mean = float("-0.00112332") + std = float("0.00855384") + data = None + + +class Program_weight_tensor_parameter_64: + name = "parameter_64" + shape = [96] + dtype = "float32" + min_val = float("-2.54215") + max_val = float("0.882154") + mean = float("-0.00332994") + std = float("0.509446") + data = None + + +class Program_weight_tensor_parameter_65: + name = "parameter_65" + shape = [96] + dtype = "float32" + min_val = float("0.385235") + max_val = float("2.98414") + mean = float("0.918566") + std = float("0.401453") + data = None + + +class Program_weight_tensor_parameter_66: + name = "parameter_66" + shape = [96] + dtype = "float32" + min_val = float("0.0122137") + max_val = float("0.062811") + mean = float("0.0283652") + std = float("0.00940543") + data = None + + +class Program_weight_tensor_parameter_67: + name = "parameter_67" + shape = [96] + dtype = "float32" + min_val = float("-0.175086") + max_val = float("0.219576") + mean = float("0.0388326") + std = float("0.0675513") + data = None + + +class Program_weight_tensor_parameter_68: + name = "parameter_68" + shape = [96, 96, 3, 3] + dtype = "float32" + min_val = float("-0.0699125") + max_val = float("0.0583475") + mean = float("-0.000591747") + std = float("0.00783073") + data = None + + +class Program_weight_tensor_parameter_69: + name = "parameter_69" + shape = [96] + dtype = "float32" + min_val = float("-2.0651") + max_val = float("1.50717") + mean = float("-0.859382") + std = float("0.652877") + data = None + + +class Program_weight_tensor_parameter_70: + name = "parameter_70" + shape = [96] + dtype = "float32" + min_val = float("0.444271") + max_val = float("1.97568") + mean = float("1.08764") + std = float("0.245813") + data = None + + +class Program_weight_tensor_parameter_71: + name = "parameter_71" + shape = [96] + dtype = "float32" + min_val = float("0.00764485") + max_val = float("0.0656173") + mean = float("0.0233303") + std = float("0.00898267") + data = None + + +class Program_weight_tensor_parameter_72: + name = "parameter_72" + shape = [96] + dtype = "float32" + min_val = float("-0.870251") + max_val = float("0.155258") + mean = float("-0.08095") + std = float("0.138751") + data = None + + +class Program_weight_tensor_parameter_73: + name = "parameter_73" + shape = [96, 96, 3, 3] + dtype = "float32" + min_val = float("-0.138808") + max_val = float("0.140323") + mean = float("-0.000446325") + std = float("0.00880798") + data = None + + +class Program_weight_tensor_parameter_74: + name = "parameter_74" + shape = [96] + dtype = "float32" + min_val = float("-1.50102") + max_val = float("1.85499") + mean = float("0.0859838") + std = float("0.86653") + data = None + + +class Program_weight_tensor_parameter_75: + name = "parameter_75" + shape = [96] + dtype = "float32" + min_val = float("0.284081") + max_val = float("1.33672") + mean = float("0.688997") + std = float("0.267224") + data = None + + +class Program_weight_tensor_parameter_76: + name = "parameter_76" + shape = [96] + dtype = "float32" + min_val = float("0.0102049") + max_val = float("0.0679824") + mean = float("0.0270556") + std = float("0.0119495") + data = None + + +class Program_weight_tensor_parameter_77: + name = "parameter_77" + shape = [96] + dtype = "float32" + min_val = float("-0.412328") + max_val = float("0.286398") + mean = float("-0.0389779") + std = float("0.125684") + data = None + + +class Program_weight_tensor_parameter_78: + name = "parameter_78" + shape = [96, 192, 1, 1] + dtype = "float32" + min_val = float("-0.167969") + max_val = float("0.175471") + mean = float("-0.00107338") + std = float("0.0148063") + data = None + + +class Program_weight_tensor_parameter_79: + name = "parameter_79" + shape = [96] + dtype = "float32" + min_val = float("-2.53701") + max_val = float("1.68017") + mean = float("0.398128") + std = float("0.701783") + data = None + + +class Program_weight_tensor_parameter_80: + name = "parameter_80" + shape = [96] + dtype = "float32" + min_val = float("0.397613") + max_val = float("4.70134") + mean = float("1.36973") + std = float("0.963852") + data = None + + +class Program_weight_tensor_parameter_81: + name = "parameter_81" + shape = [96] + dtype = "float32" + min_val = float("0.00616655") + max_val = float("0.108706") + mean = float("0.0240847") + std = float("0.0129944") + data = None + + +class Program_weight_tensor_parameter_82: + name = "parameter_82" + shape = [96] + dtype = "float32" + min_val = float("-0.261612") + max_val = float("0.282834") + mean = float("0.00414113") + std = float("0.121506") + data = None + + +class Program_weight_tensor_parameter_83: + name = "parameter_83" + shape = [96, 192, 1, 1] + dtype = "float32" + min_val = float("-0.105657") + max_val = float("0.151337") + mean = float("-0.000397048") + std = float("0.0151535") + data = None + + +class Program_weight_tensor_parameter_84: + name = "parameter_84" + shape = [192] + dtype = "float32" + min_val = float("-4.5847") + max_val = float("2.01272") + mean = float("-0.0602424") + std = float("0.877135") + data = None + + +class Program_weight_tensor_parameter_85: + name = "parameter_85" + shape = [192] + dtype = "float32" + min_val = float("0.538202") + max_val = float("4.32625") + mean = float("1.03791") + std = float("0.431062") + data = None + + +class Program_weight_tensor_parameter_86: + name = "parameter_86" + shape = [192] + dtype = "float32" + min_val = float("0.00694844") + max_val = float("0.0895498") + mean = float("0.0223027") + std = float("0.0128266") + data = None + + +class Program_weight_tensor_parameter_87: + name = "parameter_87" + shape = [192] + dtype = "float32" + min_val = float("-0.350724") + max_val = float("0.282166") + mean = float("0.00521238") + std = float("0.108235") + data = None + + +class Program_weight_tensor_parameter_88: + name = "parameter_88" + shape = [192, 128, 3, 3] + dtype = "float32" + min_val = float("-0.113976") + max_val = float("0.126205") + mean = float("-0.000242846") + std = float("0.00778871") + data = None + + +class Program_weight_tensor_parameter_89: + name = "parameter_89" + shape = [128] + dtype = "float32" + min_val = float("-2.16466") + max_val = float("1.44162") + mean = float("-0.613418") + std = float("0.632513") + data = None + + +class Program_weight_tensor_parameter_90: + name = "parameter_90" + shape = [128] + dtype = "float32" + min_val = float("0.322913") + max_val = float("2.19318") + mean = float("0.786634") + std = float("0.214289") + data = None + + +class Program_weight_tensor_parameter_91: + name = "parameter_91" + shape = [128] + dtype = "float32" + min_val = float("0.00206281") + max_val = float("0.0167294") + mean = float("0.00554479") + std = float("0.00263892") + data = None + + +class Program_weight_tensor_parameter_92: + name = "parameter_92" + shape = [128] + dtype = "float32" + min_val = float("-0.321016") + max_val = float("0.459228") + mean = float("-0.0782682") + std = float("0.118683") + data = None + + +class Program_weight_tensor_parameter_93: + name = "parameter_93" + shape = [128, 96, 1, 1] + dtype = "float32" + min_val = float("-0.212864") + max_val = float("0.22012") + mean = float("-0.0016944") + std = float("0.0257433") + data = None + + +class Program_weight_tensor_parameter_94: + name = "parameter_94" + shape = [96] + dtype = "float32" + min_val = float("-0.0230382") + max_val = float("0.0047538") + mean = float("-0.00783544") + std = float("0.00711077") + data = None + + +class Program_weight_tensor_parameter_95: + name = "parameter_95" + shape = [96, 96, 1, 1] + dtype = "float32" + min_val = float("-0.31077") + max_val = float("0.228926") + mean = float("-0.0057098") + std = float("0.0189347") + data = None + + +class Program_weight_tensor_parameter_96: + name = "parameter_96" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_97: + name = "parameter_97" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_98: + name = "parameter_98" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_99: + name = "parameter_99" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_100: + name = "parameter_100" + shape = [48, 48, 1, 1] + dtype = "float32" + min_val = float("-0.0667704") + max_val = float("0.0923849") + mean = float("-0.00177062") + std = float("0.0140855") + data = None + + +class Program_weight_tensor_parameter_101: + name = "parameter_101" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_102: + name = "parameter_102" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_103: + name = "parameter_103" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_104: + name = "parameter_104" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_105: + name = "parameter_105" + shape = [48, 48, 3, 3] + dtype = "float32" + min_val = float("-0.101548") + max_val = float("0.0931938") + mean = float("-0.000616131") + std = float("0.0138816") + data = None + + +class Program_weight_tensor_parameter_106: + name = "parameter_106" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_107: + name = "parameter_107" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_108: + name = "parameter_108" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_109: + name = "parameter_109" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_110: + name = "parameter_110" + shape = [48, 48, 3, 3] + dtype = "float32" + min_val = float("-0.0875293") + max_val = float("0.138099") + mean = float("-0.00085434") + std = float("0.0146977") + data = None + + +class Program_weight_tensor_parameter_111: + name = "parameter_111" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_112: + name = "parameter_112" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_113: + name = "parameter_113" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_114: + name = "parameter_114" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_115: + name = "parameter_115" + shape = [48, 48, 1, 1] + dtype = "float32" + min_val = float("-0.0682765") + max_val = float("0.106851") + mean = float("-0.00265824") + std = float("0.0168496") + data = None + + +class Program_weight_tensor_parameter_116: + name = "parameter_116" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_117: + name = "parameter_117" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_118: + name = "parameter_118" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_119: + name = "parameter_119" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_120: + name = "parameter_120" + shape = [48, 48, 3, 3] + dtype = "float32" + min_val = float("-0.0753598") + max_val = float("0.0722024") + mean = float("-0.00104566") + std = float("0.0133656") + data = None + + +class Program_weight_tensor_parameter_121: + name = "parameter_121" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_122: + name = "parameter_122" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_123: + name = "parameter_123" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_124: + name = "parameter_124" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_125: + name = "parameter_125" + shape = [48, 48, 3, 3] + dtype = "float32" + min_val = float("-0.136967") + max_val = float("0.0953467") + mean = float("-0.000974076") + std = float("0.0154503") + data = None + + +class Program_weight_tensor_parameter_126: + name = "parameter_126" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_127: + name = "parameter_127" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_128: + name = "parameter_128" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_129: + name = "parameter_129" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_130: + name = "parameter_130" + shape = [48, 96, 1, 1] + dtype = "float32" + min_val = float("-0.167198") + max_val = float("0.158575") + mean = float("-0.00217736") + std = float("0.0255494") + data = None + + +class Program_weight_tensor_parameter_131: + name = "parameter_131" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_132: + name = "parameter_132" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_133: + name = "parameter_133" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_134: + name = "parameter_134" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_135: + name = "parameter_135" + shape = [48, 96, 1, 1] + dtype = "float32" + min_val = float("-0.173509") + max_val = float("0.254309") + mean = float("0.000938672") + std = float("0.0275195") + data = None + + +class Program_weight_tensor_parameter_136: + name = "parameter_136" + shape = [96] + dtype = "float32" + min_val = float("-3.42052") + max_val = float("3.81379") + mean = float("0.316598") + std = float("1.1982") + data = None + + +class Program_weight_tensor_parameter_137: + name = "parameter_137" + shape = [96] + dtype = "float32" + min_val = float("0.528309") + max_val = float("5.52433") + mean = float("1.04466") + std = float("0.547027") + data = None + + +class Program_weight_tensor_parameter_138: + name = "parameter_138" + shape = [96] + dtype = "float32" + min_val = float("0.0202921") + max_val = float("0.254641") + mean = float("0.0500099") + std = float("0.0343005") + data = None + + +class Program_weight_tensor_parameter_139: + name = "parameter_139" + shape = [96] + dtype = "float32" + min_val = float("-0.774569") + max_val = float("0.605062") + mean = float("-0.0355999") + std = float("0.184504") + data = None + + +class Program_weight_tensor_parameter_140: + name = "parameter_140" + shape = [96, 64, 3, 3] + dtype = "float32" + min_val = float("-0.131442") + max_val = float("0.137659") + mean = float("-0.000181925") + std = float("0.013913") + data = None + + +class Program_weight_tensor_parameter_141: + name = "parameter_141" + shape = [64] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_142: + name = "parameter_142" + shape = [64] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_143: + name = "parameter_143" + shape = [64] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_144: + name = "parameter_144" + shape = [64] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_145: + name = "parameter_145" + shape = [64, 48, 1, 1] + dtype = "float32" + min_val = float("-0.206043") + max_val = float("0.271847") + mean = float("-0.00145344") + std = float("0.0390376") + data = None + + +class Program_weight_tensor_parameter_146: + name = "parameter_146" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_147: + name = "parameter_147" + shape = [48, 48, 1, 1] + dtype = "float32" + min_val = float("-0.192206") + max_val = float("0.171418") + mean = float("-0.0146261") + std = float("0.029449") + data = None + + +class Program_weight_tensor_parameter_148: + name = "parameter_148" + shape = [24] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_149: + name = "parameter_149" + shape = [24] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_150: + name = "parameter_150" + shape = [24] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_151: + name = "parameter_151" + shape = [24] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_152: + name = "parameter_152" + shape = [24, 24, 1, 1] + dtype = "float32" + min_val = float("-0.155915") + max_val = float("0.129377") + mean = float("0.00162577") + std = float("0.0327822") + data = None + + +class Program_weight_tensor_parameter_153: + name = "parameter_153" + shape = [24] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_154: + name = "parameter_154" + shape = [24] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_155: + name = "parameter_155" + shape = [24] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_156: + name = "parameter_156" + shape = [24] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_157: + name = "parameter_157" + shape = [24, 24, 3, 3] + dtype = "float32" + min_val = float("-0.150092") + max_val = float("0.116282") + mean = float("-0.00114223") + std = float("0.0238489") + data = None + + +class Program_weight_tensor_parameter_158: + name = "parameter_158" + shape = [24] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_159: + name = "parameter_159" + shape = [24] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_160: + name = "parameter_160" + shape = [24] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_161: + name = "parameter_161" + shape = [24] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_162: + name = "parameter_162" + shape = [24, 24, 3, 3] + dtype = "float32" + min_val = float("-0.183793") + max_val = float("0.209257") + mean = float("-0.000668016") + std = float("0.0269376") + data = None + + +class Program_weight_tensor_parameter_163: + name = "parameter_163" + shape = [24] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_164: + name = "parameter_164" + shape = [24] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_165: + name = "parameter_165" + shape = [24] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_166: + name = "parameter_166" + shape = [24] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_167: + name = "parameter_167" + shape = [24, 48, 1, 1] + dtype = "float32" + min_val = float("-0.180287") + max_val = float("0.188822") + mean = float("-0.0027093") + std = float("0.0424717") + data = None + + +class Program_weight_tensor_parameter_168: + name = "parameter_168" + shape = [24] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_169: + name = "parameter_169" + shape = [24] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_170: + name = "parameter_170" + shape = [24] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_171: + name = "parameter_171" + shape = [24] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_172: + name = "parameter_172" + shape = [24, 48, 1, 1] + dtype = "float32" + min_val = float("-0.207794") + max_val = float("0.188844") + mean = float("-0.000951303") + std = float("0.0430266") + data = None + + +class Program_weight_tensor_parameter_173: + name = "parameter_173" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_174: + name = "parameter_174" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_175: + name = "parameter_175" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_176: + name = "parameter_176" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_177: + name = "parameter_177" + shape = [48, 32, 3, 3] + dtype = "float32" + min_val = float("-0.177728") + max_val = float("0.135622") + mean = float("6.35848e-05") + std = float("0.0231614") + data = None + + +class Program_weight_tensor_parameter_178: + name = "parameter_178" + shape = [32] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_179: + name = "parameter_179" + shape = [32] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_180: + name = "parameter_180" + shape = [32] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_181: + name = "parameter_181" + shape = [32] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_182: + name = "parameter_182" + shape = [32, 16, 3, 3] + dtype = "float32" + min_val = float("-0.258409") + max_val = float("0.268148") + mean = float("-0.000591489") + std = float("0.0389936") + data = None + + +class Program_weight_tensor_parameter_183: + name = "parameter_183" + shape = [16] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_184: + name = "parameter_184" + shape = [16] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_185: + name = "parameter_185" + shape = [16] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_186: + name = "parameter_186" + shape = [16] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_187: + name = "parameter_187" + shape = [16, 16, 3, 3] + dtype = "float32" + min_val = float("-0.339065") + max_val = float("0.330435") + mean = float("0.000302612") + std = float("0.0524466") + data = None + + +class Program_weight_tensor_parameter_188: + name = "parameter_188" + shape = [16] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_189: + name = "parameter_189" + shape = [16] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_190: + name = "parameter_190" + shape = [16] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_191: + name = "parameter_191" + shape = [16] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_192: + name = "parameter_192" + shape = [16, 3, 3, 3] + dtype = "float32" + min_val = float("-0.267419") + max_val = float("0.355597") + mean = float("-0.0023263") + std = float("0.0773249") + data = None diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-S/subgraph_11/graph_hash.txt b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-S/subgraph_11/graph_hash.txt new file mode 100644 index 000000000..f7e774060 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-S/subgraph_11/graph_hash.txt @@ -0,0 +1 @@ +b3c06a5ff7d63f2fddf054f6dc5423b30986582d3ae6be2b7b5c8465e31f6fac \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-S/subgraph_11/graph_net.json b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-S/subgraph_11/graph_net.json new file mode 100644 index 000000000..6ce3cf9a5 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-S/subgraph_11/graph_net.json @@ -0,0 +1,6 @@ +{ + "framework": "paddle", + "model_name": "PP-YOLOE_plus_SOD-S", + "num_devices_required": 1, + "num_nodes_required": 1 +} \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-S/subgraph_11/input_meta.py b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-S/subgraph_11/input_meta.py new file mode 100644 index 000000000..cb20407fd --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-S/subgraph_11/input_meta.py @@ -0,0 +1,28 @@ +class Program_weight_tensor_data_0: + name = "data_0" + shape = [2, 9261, 10] + dtype = "float32" + min_val = float("2.43778e-11") + max_val = float("0.940314") + mean = float("0.00863495") + std = float("0.0285493") + data = None + + +class Program_weight_tensor_data_1: + name = "data_1" + shape = [2, 9261] + dtype = "int32" + min_val = 0 + max_val = 10 + data = None + + +class Program_weight_tensor_data_2: + name = "data_2" + shape = [2, 9261, 10] + dtype = "float32" + max_val = float("0.968205") + mean = float("0.00099555") + std = float("0.0245513") + data = None diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-S/subgraph_11/model.py b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-S/subgraph_11/model.py new file mode 100644 index 000000000..604867124 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-S/subgraph_11/model.py @@ -0,0 +1,110 @@ +import paddle + + +class GraphModule(paddle.nn.Layer): + def __init__(self): + super().__init__() + + def forward(self, data_0, data_1, data_2): + # pd_op.full: (1xi32) <- () + full_0 = paddle._C_ops.full( + [1], float("11"), paddle.int32, paddle.core.CPUPlace() + ) + + # pd_op.one_hot: (2x9261x11xf32) <- (2x9261xi32, 1xi32) + one_hot_0 = paddle._C_ops.one_hot( + data_1 % paddle.cast(full_0, data_1.dtype), full_0 + ) + del data_1, full_0 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_0 = [0] + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_1 = [-1] + + # pd_op.slice: (2x9261x10xf32) <- (2x9261x11xf32, 1xi64, 1xi64) + slice_0 = paddle._C_ops.slice( + one_hot_0, [2], full_int_array_0, full_int_array_1, [1], [] + ) + del full_int_array_0, full_int_array_1, one_hot_0 + + # pd_op.pow: (2x9261x10xf32) <- (2x9261x10xf32) + pow_0 = paddle._C_ops.pow(data_0, float("2")) + + # pd_op.full: (1xf32) <- () + full_1 = paddle._C_ops.full( + [1], float("0.75"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.scale: (2x9261x10xf32) <- (2x9261x10xf32, 1xf32) + scale_0 = paddle._C_ops.scale(pow_0, full_1, float("0"), True) + del pow_0 + + # pd_op.full: (1xf32) <- () + full_2 = paddle._C_ops.full( + [1], float("-1"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.scale: (2x9261x10xf32) <- (2x9261x10xf32, 1xf32) + scale_1 = paddle._C_ops.scale(slice_0, full_2, float("1"), True) + del full_2 + + # pd_op.multiply: (2x9261x10xf32) <- (2x9261x10xf32, 2x9261x10xf32) + multiply_0 = paddle._C_ops.multiply(scale_0, scale_1) + + # pd_op.multiply: (2x9261x10xf32) <- (2x9261x10xf32, 2x9261x10xf32) + multiply_1 = paddle._C_ops.multiply(data_2, slice_0) + del slice_0 + + # pd_op.add: (2x9261x10xf32) <- (2x9261x10xf32, 2x9261x10xf32) + add_0 = paddle._C_ops.add(multiply_0, multiply_1) + + # pd_op.bce_loss: (2x9261x10xf32) <- (2x9261x10xf32, 2x9261x10xf32) + bce_loss_0 = paddle._C_ops.bce_loss(data_0, data_2) + del data_0 + + # pd_op.multiply: (2x9261x10xf32) <- (2x9261x10xf32, 2x9261x10xf32) + multiply_2 = paddle._C_ops.multiply(bce_loss_0, add_0) + + # pd_op.full_int_array: (0xi64) <- () + full_int_array_2 = [] + + # pd_op.sum: (xf32) <- (2x9261x10xf32, 0xi64) + sum_0 = paddle._C_ops.sum(multiply_2, full_int_array_2, None, False) + + # pd_op.sum: (xf32) <- (2x9261x10xf32, 0xi64) + sum_1 = paddle._C_ops.sum(data_2, full_int_array_2, None, False) + del data_2 + + # pd_op.full: (1xf32) <- () + full_3 = paddle._C_ops.full( + [1], float("1"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.full: (1xf32) <- () + full_4 = paddle._C_ops.full( + [1], float("3.40282e+38"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.clip: (xf32) <- (xf32, 1xf32, 1xf32) + clip_0 = paddle._C_ops.clip(sum_1, full_3, full_4) + del full_3, full_4, sum_1 + + # pd_op.divide: (xf32) <- (xf32, xf32) + divide_0 = paddle._C_ops.divide(sum_0, clip_0) + del ( + add_0, + bce_loss_0, + clip_0, + full_1, + full_int_array_2, + multiply_0, + multiply_1, + multiply_2, + scale_0, + scale_1, + sum_0, + ) + + return divide_0 diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-S/subgraph_11/weight_meta.py b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-S/subgraph_11/weight_meta.py new file mode 100644 index 000000000..8b1378917 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-S/subgraph_11/weight_meta.py @@ -0,0 +1 @@ + diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-S/subgraph_12/graph_hash.txt b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-S/subgraph_12/graph_hash.txt new file mode 100644 index 000000000..26ca02140 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-S/subgraph_12/graph_hash.txt @@ -0,0 +1 @@ +8a2815233af5ac01ede0f5b0291f7fbde332769a8e8425665222d469c075cedc \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-S/subgraph_12/graph_net.json b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-S/subgraph_12/graph_net.json new file mode 100644 index 000000000..6ce3cf9a5 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-S/subgraph_12/graph_net.json @@ -0,0 +1,6 @@ +{ + "framework": "paddle", + "model_name": "PP-YOLOE_plus_SOD-S", + "num_devices_required": 1, + "num_nodes_required": 1 +} \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-S/subgraph_12/input_meta.py b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-S/subgraph_12/input_meta.py new file mode 100644 index 000000000..ac689aa1f --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-S/subgraph_12/input_meta.py @@ -0,0 +1,67 @@ +class Program_weight_tensor_data_0: + name = "data_0" + shape = [2, 4725] + dtype = "bool" + min_val = 0 + max_val = 2 + data = None + + +class Program_weight_tensor_data_1: + name = "data_1" + shape = [2, 4725, 4] + dtype = "float32" + min_val = float("-2.92829") + max_val = float("63.2116") + mean = float("26.07") + std = float("17.2826") + data = None + + +class Program_weight_tensor_data_2: + name = "data_2" + shape = [2, 4725, 4] + dtype = "float32" + max_val = float("59.8477") + mean = float("18.4942") + std = float("15.258") + data = None + + +class Program_weight_tensor_data_3: + name = "data_3" + shape = [2, 4725, 10] + dtype = "float32" + max_val = float("0.883773") + mean = float("0.000917971") + std = float("0.0218008") + data = None + + +class Program_weight_tensor_data_4: + name = "data_4" + shape = [] + dtype = "float32" + data = [86.7483] + + +class Program_weight_tensor_data_5: + name = "data_5" + shape = [2, 4725, 40] + dtype = "float32" + min_val = float("-5.17301") + max_val = float("9.53964") + mean = float("0.791038") + std = float("2.0561") + data = None + + +class Program_weight_tensor_data_6: + name = "data_6" + shape = [4725, 2] + dtype = "float32" + min_val = float("0.5") + max_val = float("59.5") + mean = float("26.0714") + std = float("17.1813") + data = None diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-S/subgraph_12/model.py b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-S/subgraph_12/model.py new file mode 100644 index 000000000..ff2e6d80a --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-S/subgraph_12/model.py @@ -0,0 +1,514 @@ +import paddle + + +class GraphModule(paddle.nn.Layer): + def __init__(self): + super().__init__() + + def forward(self, data_0, data_1, data_2, data_3, data_4, data_5, data_6): + # pd_op.cast: (2x-1xi32) <- (2x-1xb) + cast_0 = paddle._C_ops.cast(data_0, paddle.int32) + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_0 = [-1] + + # pd_op.assign: (1xi64) <- (1xi64) + assign_0 = full_int_array_0 + + # pd_op.assign: (1xi64) <- (1xi64) + assign_1 = full_int_array_0 + + # pd_op.assign: (1xi64) <- (1xi64) + assign_2 = full_int_array_0 + + # pd_op.unsqueeze: (2x-1x1xi32) <- (2x-1xi32, 1xi64) + unsqueeze_0 = paddle._C_ops.unsqueeze(cast_0, full_int_array_0) + del cast_0 + + # pd_op.full_int_array: (3xi64) <- () + full_int_array_1 = [1, 1, 4] + + # pd_op.tile: (2x-1x4xi32) <- (2x-1x1xi32, 3xi64) + tile_0 = paddle._C_ops.tile(unsqueeze_0, full_int_array_1) + del full_int_array_1, unsqueeze_0 + + # pd_op.cast: (2x-1x4xb) <- (2x-1x4xi32) + cast_1 = paddle._C_ops.cast(tile_0, paddle.bool) + del tile_0 + + # pd_op.masked_select: (-1xf32) <- (2x-1x4xf32, 2x-1x4xb) + masked_select_0 = paddle._C_ops.masked_select(data_1, cast_1) + del data_1 + + # pd_op.full_int_array: (2xi64) <- () + full_int_array_2 = [-1, 4] + + # pd_op.reshape: (-1x4xf32) <- (-1xf32, 2xi64) + reshape_0 = paddle._C_ops.reshape(masked_select_0, full_int_array_2) + + # pd_op.masked_select: (-1xf32) <- (2x-1x4xf32, 2x-1x4xb) + masked_select_1 = paddle._C_ops.masked_select(data_2, cast_1) + + # pd_op.reshape: (-1x4xf32) <- (-1xf32, 2xi64) + reshape_1 = paddle._C_ops.reshape(masked_select_1, full_int_array_2) + del masked_select_1 + + # pd_op.sum: (2x-1xf32) <- (2x-1x10xf32, 1xi64) + sum_0 = paddle._C_ops.sum(data_3, full_int_array_0, None, False) + del data_3 + + # pd_op.masked_select: (-1xf32) <- (2x-1xf32, 2x-1xb) + masked_select_2 = paddle._C_ops.masked_select(sum_0, data_0) + del sum_0 + + # pd_op.unsqueeze: (-1x1xf32) <- (-1xf32, 1xi64) + unsqueeze_1 = paddle._C_ops.unsqueeze(masked_select_2, full_int_array_0) + del masked_select_2 + + # pd_op.subtract: (-1x4xf32) <- (-1x4xf32, -1x4xf32) + subtract_0 = paddle._C_ops.subtract(reshape_0, reshape_1) + + # pd_op.abs: (-1x4xf32) <- (-1x4xf32) + abs_0 = paddle._C_ops.abs(subtract_0) + + # pd_op.mean_all: (xf32) <- (-1x4xf32) + mean_all_0 = paddle._C_ops.mean_all(abs_0) + + # pd_op.full: (1xi32) <- () + full_0 = paddle._C_ops.full( + [1], float("1"), paddle.int32, paddle.core.CPUPlace() + ) + + # pd_op.split_with_num: ([-1x1xf32, -1x1xf32, -1x1xf32, -1x1xf32]) <- (-1x4xf32, 1xi32) + split_with_num_0 = paddle._C_ops.split_with_num(reshape_0, 4, full_0) + + # builtin.split: (-1x1xf32, -1x1xf32, -1x1xf32, -1x1xf32) <- ([-1x1xf32, -1x1xf32, -1x1xf32, -1x1xf32]) + ( + split_0, + split_1, + split_2, + split_3, + ) = split_with_num_0 + del split_with_num_0 + + # pd_op.split_with_num: ([-1x1xf32, -1x1xf32, -1x1xf32, -1x1xf32]) <- (-1x4xf32, 1xi32) + split_with_num_1 = paddle._C_ops.split_with_num(reshape_1, 4, full_0) + + # builtin.split: (-1x1xf32, -1x1xf32, -1x1xf32, -1x1xf32) <- ([-1x1xf32, -1x1xf32, -1x1xf32, -1x1xf32]) + ( + split_4, + split_5, + split_6, + split_7, + ) = split_with_num_1 + del split_with_num_1 + + # pd_op.maximum: (-1x1xf32) <- (-1x1xf32, -1x1xf32) + maximum_0 = paddle._C_ops.maximum(split_0, split_4) + + # pd_op.maximum: (-1x1xf32) <- (-1x1xf32, -1x1xf32) + maximum_1 = paddle._C_ops.maximum(split_1, split_5) + + # pd_op.minimum: (-1x1xf32) <- (-1x1xf32, -1x1xf32) + minimum_0 = paddle._C_ops.minimum(split_2, split_6) + + # pd_op.minimum: (-1x1xf32) <- (-1x1xf32, -1x1xf32) + minimum_1 = paddle._C_ops.minimum(split_3, split_7) + + # pd_op.subtract: (-1x1xf32) <- (-1x1xf32, -1x1xf32) + subtract_1 = paddle._C_ops.subtract(minimum_0, maximum_0) + + # pd_op.full: (1xf32) <- () + full_1 = paddle._C_ops.full( + [1], float("0"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.assign: (1xf32) <- (1xf32) + assign_3 = full_1 + + # pd_op.full: (1xf32) <- () + full_2 = paddle._C_ops.full( + [1], float("3.40282e+38"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.assign: (1xf32) <- (1xf32) + assign_4 = full_2 + + # pd_op.clip: (-1x1xf32) <- (-1x1xf32, 1xf32, 1xf32) + clip_0 = paddle._C_ops.clip(subtract_1, full_1, full_2) + + # pd_op.subtract: (-1x1xf32) <- (-1x1xf32, -1x1xf32) + subtract_2 = paddle._C_ops.subtract(minimum_1, maximum_1) + + # pd_op.clip: (-1x1xf32) <- (-1x1xf32, 1xf32, 1xf32) + clip_1 = paddle._C_ops.clip(subtract_2, full_1, full_2) + + # pd_op.multiply: (-1x1xf32) <- (-1x1xf32, -1x1xf32) + multiply_0 = paddle._C_ops.multiply(clip_0, clip_1) + + # pd_op.subtract: (-1x1xf32) <- (-1x1xf32, -1x1xf32) + subtract_3 = paddle._C_ops.subtract(split_2, split_0) + + # pd_op.subtract: (-1x1xf32) <- (-1x1xf32, -1x1xf32) + subtract_4 = paddle._C_ops.subtract(split_3, split_1) + + # pd_op.multiply: (-1x1xf32) <- (-1x1xf32, -1x1xf32) + multiply_1 = paddle._C_ops.multiply(subtract_3, subtract_4) + + # pd_op.subtract: (-1x1xf32) <- (-1x1xf32, -1x1xf32) + subtract_5 = paddle._C_ops.subtract(split_6, split_4) + + # pd_op.subtract: (-1x1xf32) <- (-1x1xf32, -1x1xf32) + subtract_6 = paddle._C_ops.subtract(split_7, split_5) + + # pd_op.multiply: (-1x1xf32) <- (-1x1xf32, -1x1xf32) + multiply_2 = paddle._C_ops.multiply(subtract_5, subtract_6) + del subtract_5, subtract_6 + + # pd_op.add: (-1x1xf32) <- (-1x1xf32, -1x1xf32) + add_0 = paddle._C_ops.add(multiply_1, multiply_2) + + # pd_op.subtract: (-1x1xf32) <- (-1x1xf32, -1x1xf32) + subtract_7 = paddle._C_ops.subtract(add_0, multiply_0) + + # pd_op.full: (1xf32) <- () + full_3 = paddle._C_ops.full( + [1], float("1"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.assign: (1xf32) <- (1xf32) + assign_5 = full_3 + + # pd_op.assign: (1xf32) <- (1xf32) + assign_6 = full_3 + + # pd_op.scale: (-1x1xf32) <- (-1x1xf32, 1xf32) + scale_0 = paddle._C_ops.scale(subtract_7, full_3, float("1e-10"), True) + del subtract_7 + + # pd_op.divide: (-1x1xf32) <- (-1x1xf32, -1x1xf32) + divide_2 = paddle._C_ops.divide(multiply_0, scale_0) + + # pd_op.minimum: (-1x1xf32) <- (-1x1xf32, -1x1xf32) + minimum_2 = paddle._C_ops.minimum(split_0, split_4) + + # pd_op.minimum: (-1x1xf32) <- (-1x1xf32, -1x1xf32) + minimum_3 = paddle._C_ops.minimum(split_1, split_5) + + # pd_op.maximum: (-1x1xf32) <- (-1x1xf32, -1x1xf32) + maximum_2 = paddle._C_ops.maximum(split_2, split_6) + + # pd_op.maximum: (-1x1xf32) <- (-1x1xf32, -1x1xf32) + maximum_3 = paddle._C_ops.maximum(split_3, split_7) + + # pd_op.subtract: (-1x1xf32) <- (-1x1xf32, -1x1xf32) + subtract_8 = paddle._C_ops.subtract(maximum_2, minimum_2) + + # pd_op.subtract: (-1x1xf32) <- (-1x1xf32, -1x1xf32) + subtract_9 = paddle._C_ops.subtract(maximum_3, minimum_3) + + # pd_op.multiply: (-1x1xf32) <- (-1x1xf32, -1x1xf32) + multiply_3 = paddle._C_ops.multiply(subtract_8, subtract_9) + + # pd_op.scale: (-1x1xf32) <- (-1x1xf32, 1xf32) + scale_1 = paddle._C_ops.scale(multiply_3, full_3, float("1e-10"), True) + del multiply_3 + + # pd_op.subtract: (-1x1xf32) <- (-1x1xf32, -1x1xf32) + subtract_10 = paddle._C_ops.subtract(scale_1, scale_0) + + # pd_op.divide: (-1x1xf32) <- (-1x1xf32, -1x1xf32) + divide_3 = paddle._C_ops.divide(subtract_10, scale_1) + + # pd_op.subtract: (-1x1xf32) <- (-1x1xf32, -1x1xf32) + subtract_11 = paddle._C_ops.subtract(divide_2, divide_3) + + # pd_op.full: (1xf32) <- () + full_4 = paddle._C_ops.full( + [1], float("-1"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.scale: (-1x1xf32) <- (-1x1xf32, 1xf32) + scale_2 = paddle._C_ops.scale(subtract_11, full_4, float("1"), True) + del subtract_11 + + # pd_op.scale: (-1x1xf32) <- (-1x1xf32, 1xf32) + scale_3 = paddle._C_ops.scale(scale_2, full_3, float("0"), True) + del scale_2 + + # pd_op.multiply: (-1x1xf32) <- (-1x1xf32, -1x1xf32) + multiply_4 = paddle._C_ops.multiply(scale_3, unsqueeze_1) + + # pd_op.full_int_array: (0xi64) <- () + full_int_array_3 = [] + + # pd_op.assign: (0xi64) <- (0xi64) + assign_7 = full_int_array_3 + + # pd_op.sum: (xf32) <- (-1x1xf32, 0xi64) + sum_1 = paddle._C_ops.sum(multiply_4, full_int_array_3, None, False) + + # pd_op.divide: (xf32) <- (xf32, xf32) + divide_0 = paddle._C_ops.divide(sum_1, data_4) + + # pd_op.unsqueeze: (2x-1x1xb) <- (2x-1xb, 1xi64) + unsqueeze_2 = paddle._C_ops.unsqueeze(data_0, full_int_array_0) + del data_0 + + # pd_op.cast: (2x-1x1xi32) <- (2x-1x1xb) + cast_2 = paddle._C_ops.cast(unsqueeze_2, paddle.int32) + del unsqueeze_2 + + # pd_op.full_int_array: (3xi64) <- () + full_int_array_4 = [1, 1, 40] + + # pd_op.tile: (2x-1x40xi32) <- (2x-1x1xi32, 3xi64) + tile_1 = paddle._C_ops.tile(cast_2, full_int_array_4) + del cast_2, full_int_array_4 + + # pd_op.cast: (2x-1x40xb) <- (2x-1x40xi32) + cast_3 = paddle._C_ops.cast(tile_1, paddle.bool) + del tile_1 + + # pd_op.masked_select: (-1xf32) <- (2x-1x40xf32, 2x-1x40xb) + masked_select_3 = paddle._C_ops.masked_select(data_5, cast_3) + del data_5 + + # pd_op.full_int_array: (3xi64) <- () + full_int_array_5 = [-1, 4, 10] + + # pd_op.reshape: (-1x4x10xf32) <- (-1xf32, 3xi64) + reshape_2 = paddle._C_ops.reshape(masked_select_3, full_int_array_5) + del full_int_array_5 + + # pd_op.full: (1xi32) <- () + full_5 = paddle._C_ops.full( + [1], float("2"), paddle.int32, paddle.core.CPUPlace() + ) + + # pd_op.split_with_num: ([2x-1x2xf32, 2x-1x2xf32]) <- (2x-1x4xf32, 1xi32) + split_with_num_2 = paddle._C_ops.split_with_num(data_2, 2, full_5) + del data_2, full_5 + + # builtin.split: (2x-1x2xf32, 2x-1x2xf32) <- ([2x-1x2xf32, 2x-1x2xf32]) + ( + split_8, + split_9, + ) = split_with_num_2 + del split_with_num_2 + + # pd_op.subtract: (2x-1x2xf32) <- (-1x2xf32, 2x-1x2xf32) + subtract_12 = paddle._C_ops.subtract(data_6, split_8) + del split_8 + + # pd_op.subtract: (2x-1x2xf32) <- (2x-1x2xf32, -1x2xf32) + subtract_13 = paddle._C_ops.subtract(split_9, data_6) + del data_6, split_9 + + # pd_op.full: (1xi32) <- () + full_6 = paddle._C_ops.full( + [1], float("-1"), paddle.int32, paddle.core.CPUPlace() + ) + + # builtin.combine: ([2x-1x2xf32, 2x-1x2xf32]) <- (2x-1x2xf32, 2x-1x2xf32) + combine_0 = [subtract_12, subtract_13] + del subtract_12, subtract_13 + + # pd_op.concat: (2x-1x4xf32) <- ([2x-1x2xf32, 2x-1x2xf32], 1xi32) + concat_0 = paddle._C_ops.concat(combine_0, full_6) + del combine_0, full_6 + + # pd_op.full: (1xf32) <- () + full_7 = paddle._C_ops.full( + [1], float("-2"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.full: (1xf32) <- () + full_8 = paddle._C_ops.full( + [1], float("6.99"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.clip: (2x-1x4xf32) <- (2x-1x4xf32, 1xf32, 1xf32) + clip_2 = paddle._C_ops.clip(concat_0, full_7, full_8) + del concat_0, full_7, full_8 + + # pd_op.masked_select: (-1xf32) <- (2x-1x4xf32, 2x-1x4xb) + masked_select_4 = paddle._C_ops.masked_select(clip_2, cast_1) + del clip_2 + + # pd_op.reshape: (-1x4xf32) <- (-1xf32, 2xi64) + reshape_3 = paddle._C_ops.reshape(masked_select_4, full_int_array_2) + del full_int_array_2, masked_select_4 + + # pd_op.floor: (-1x4xf32) <- (-1x4xf32) + floor_0 = paddle._C_ops.floor(reshape_3) + + # pd_op.cast: (-1x4xi64) <- (-1x4xf32) + cast_4 = paddle._C_ops.cast(floor_0, paddle.int64) + del floor_0 + + # pd_op.scale: (-1x4xi64) <- (-1x4xi64, 1xf32) + scale_4 = paddle._C_ops.scale(cast_4, full_3, float("1"), True) + + # pd_op.cast: (-1x4xf32) <- (-1x4xi64) + cast_5 = paddle._C_ops.cast(scale_4, paddle.float32) + + # pd_op.subtract: (-1x4xf32) <- (-1x4xf32, -1x4xf32) + subtract_14 = paddle._C_ops.subtract(cast_5, reshape_3) + del cast_5, reshape_3 + + # pd_op.scale: (-1x4xf32) <- (-1x4xf32, 1xf32) + scale_5 = paddle._C_ops.scale(subtract_14, full_4, float("1"), True) + + # pd_op.scale: (-1x4xi64) <- (-1x4xi64, 1xf32) + scale_6 = paddle._C_ops.scale(cast_4, full_3, float("2"), True) + del cast_4 + + # pd_op.unsqueeze: (-1x4x1xi64) <- (-1x4xi64, 1xi64) + unsqueeze_3 = paddle._C_ops.unsqueeze(scale_6, full_int_array_0) + del scale_6 + + # pd_op.cross_entropy_with_softmax: (-1x4x10xf32, -1x4x1xf32) <- (-1x4x10xf32, -1x4x1xi64) + cross_entropy_with_softmax_0, cross_entropy_with_softmax_2 = ( + lambda x, f: f(x) + )( + paddle._C_ops.cross_entropy_with_softmax( + reshape_2, unsqueeze_3, False, True, True, -100, -1 + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None), + ) + + # pd_op.squeeze: (-1x4xf32) <- (-1x4x1xf32, 1xi64) + squeeze_0 = paddle._C_ops.squeeze( + cross_entropy_with_softmax_2, full_int_array_0 + ) + + # pd_op.multiply: (-1x4xf32) <- (-1x4xf32, -1x4xf32) + multiply_5 = paddle._C_ops.multiply(squeeze_0, subtract_14) + + # pd_op.scale: (-1x4xi64) <- (-1x4xi64, 1xf32) + scale_7 = paddle._C_ops.scale(scale_4, full_3, float("2"), True) + del scale_4 + + # pd_op.unsqueeze: (-1x4x1xi64) <- (-1x4xi64, 1xi64) + unsqueeze_4 = paddle._C_ops.unsqueeze(scale_7, full_int_array_0) + del scale_7 + + # pd_op.cross_entropy_with_softmax: (-1x4x10xf32, -1x4x1xf32) <- (-1x4x10xf32, -1x4x1xi64) + cross_entropy_with_softmax_1, cross_entropy_with_softmax_3 = ( + lambda x, f: f(x) + )( + paddle._C_ops.cross_entropy_with_softmax( + reshape_2, unsqueeze_4, False, True, True, -100, -1 + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None), + ) + del reshape_2 + + # pd_op.squeeze: (-1x4xf32) <- (-1x4x1xf32, 1xi64) + squeeze_1 = paddle._C_ops.squeeze( + cross_entropy_with_softmax_3, full_int_array_0 + ) + + # pd_op.multiply: (-1x4xf32) <- (-1x4xf32, -1x4xf32) + multiply_6 = paddle._C_ops.multiply(squeeze_1, scale_5) + + # pd_op.add: (-1x4xf32) <- (-1x4xf32, -1x4xf32) + add_1 = paddle._C_ops.add(multiply_5, multiply_6) + + # pd_op.mean: (-1x1xf32) <- (-1x4xf32, 1xi64) + mean_0 = paddle._C_ops.mean(add_1, full_int_array_0, True) + del full_int_array_0 + + # pd_op.multiply: (-1x1xf32) <- (-1x1xf32, -1x1xf32) + multiply_7 = paddle._C_ops.multiply(mean_0, unsqueeze_1) + + # pd_op.sum: (xf32) <- (-1x1xf32, 0xi64) + sum_2 = paddle._C_ops.sum(multiply_7, full_int_array_3, None, False) + + # pd_op.divide: (xf32) <- (xf32, xf32) + divide_1 = paddle._C_ops.divide(sum_2, data_4) + del ( + abs_0, + add_0, + add_1, + assign_0, + assign_1, + assign_2, + assign_3, + assign_4, + assign_5, + assign_6, + assign_7, + cast_1, + cast_3, + clip_0, + clip_1, + cross_entropy_with_softmax_2, + cross_entropy_with_softmax_3, + data_4, + divide_2, + divide_3, + full_0, + full_1, + full_2, + full_3, + full_4, + full_int_array_3, + masked_select_0, + masked_select_3, + maximum_0, + maximum_1, + maximum_2, + maximum_3, + mean_0, + minimum_0, + minimum_1, + minimum_2, + minimum_3, + multiply_0, + multiply_1, + multiply_2, + multiply_4, + multiply_5, + multiply_6, + multiply_7, + reshape_0, + reshape_1, + scale_0, + scale_1, + scale_3, + scale_5, + split_0, + split_1, + split_2, + split_3, + split_4, + split_5, + split_6, + split_7, + squeeze_0, + squeeze_1, + subtract_0, + subtract_1, + subtract_10, + subtract_14, + subtract_2, + subtract_3, + subtract_4, + subtract_8, + subtract_9, + sum_1, + sum_2, + unsqueeze_1, + unsqueeze_3, + unsqueeze_4, + ) + + return ( + cross_entropy_with_softmax_0, + cross_entropy_with_softmax_1, + mean_all_0, + divide_0, + divide_1, + ) diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-S/subgraph_12/weight_meta.py b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-S/subgraph_12/weight_meta.py new file mode 100644 index 000000000..8b1378917 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-S/subgraph_12/weight_meta.py @@ -0,0 +1 @@ + diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-S/subgraph_13/graph_hash.txt b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-S/subgraph_13/graph_hash.txt new file mode 100644 index 000000000..ef3c2f741 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-S/subgraph_13/graph_hash.txt @@ -0,0 +1 @@ +6d51865e3c32cbca7e4a68c8f2e502f2b509535978ab0d3bf37be09133954c81 \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-S/subgraph_13/graph_net.json b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-S/subgraph_13/graph_net.json new file mode 100644 index 000000000..6ce3cf9a5 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-S/subgraph_13/graph_net.json @@ -0,0 +1,6 @@ +{ + "framework": "paddle", + "model_name": "PP-YOLOE_plus_SOD-S", + "num_devices_required": 1, + "num_nodes_required": 1 +} \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-S/subgraph_13/input_meta.py b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-S/subgraph_13/input_meta.py new file mode 100644 index 000000000..38461ba0b --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-S/subgraph_13/input_meta.py @@ -0,0 +1,19 @@ +class Program_weight_tensor_data_0: + name = "data_0" + shape = [] + dtype = "float32" + data = [0.0] + + +class Program_weight_tensor_data_1: + name = "data_1" + shape = [] + dtype = "float32" + data = [0.0] + + +class Program_weight_tensor_data_2: + name = "data_2" + shape = [] + dtype = "float32" + data = [5.06752e-09] diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-S/subgraph_13/model.py b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-S/subgraph_13/model.py new file mode 100644 index 000000000..850af692b --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-S/subgraph_13/model.py @@ -0,0 +1,43 @@ +import paddle + + +class GraphModule(paddle.nn.Layer): + def __init__(self): + super().__init__() + + def forward(self, data_0, data_1, data_2): + # pd_op.full: (1xf32) <- () + full_0 = paddle._C_ops.full( + [1], float("1"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.scale: (xf32) <- (xf32, 1xf32) + scale_0 = paddle._C_ops.scale(data_2, full_0, float("0"), True) + del data_2 + + # pd_op.full: (1xf32) <- () + full_1 = paddle._C_ops.full( + [1], float("2.5"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.scale: (xf32) <- (xf32, 1xf32) + scale_1 = paddle._C_ops.scale(data_0, full_1, float("0"), True) + del data_0, full_1 + + # pd_op.add: (xf32) <- (xf32, xf32) + add_1 = paddle._C_ops.add(scale_0, scale_1) + + # pd_op.full: (1xf32) <- () + full_2 = paddle._C_ops.full( + [1], float("0.5"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.scale: (xf32) <- (xf32, 1xf32) + scale_2 = paddle._C_ops.scale(data_1, full_2, float("0"), True) + del data_1 + + # pd_op.add: (xf32) <- (xf32, xf32) + add_0 = paddle._C_ops.add(add_1, scale_2) + del add_1, full_0, full_2, scale_0, scale_1, scale_2 + + return add_0 diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-S/subgraph_13/weight_meta.py b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-S/subgraph_13/weight_meta.py new file mode 100644 index 000000000..8b1378917 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-S/subgraph_13/weight_meta.py @@ -0,0 +1 @@ + diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-S/subgraph_14/graph_hash.txt b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-S/subgraph_14/graph_hash.txt new file mode 100644 index 000000000..5cab9badf --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-S/subgraph_14/graph_hash.txt @@ -0,0 +1 @@ +989b2703fbe877de377ed3188931f22704f691b68378de50cfb501eb2585215a \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-S/subgraph_14/graph_net.json b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-S/subgraph_14/graph_net.json new file mode 100644 index 000000000..6ce3cf9a5 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-S/subgraph_14/graph_net.json @@ -0,0 +1,6 @@ +{ + "framework": "paddle", + "model_name": "PP-YOLOE_plus_SOD-S", + "num_devices_required": 1, + "num_nodes_required": 1 +} \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-S/subgraph_14/input_meta.py b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-S/subgraph_14/input_meta.py new file mode 100644 index 000000000..b2d7a0d9b --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-S/subgraph_14/input_meta.py @@ -0,0 +1,31 @@ +class Program_weight_tensor_data_0: + name = "data_0" + shape = [2, 384, 21, 21] + dtype = "float32" + min_val = float("-0.278465") + max_val = float("7.78725") + mean = float("0.342055") + std = float("0.701076") + data = None + + +class Program_weight_tensor_data_1: + name = "data_1" + shape = [2, 192, 42, 42] + dtype = "float32" + min_val = float("-0.278465") + max_val = float("10.8889") + mean = float("0.468477") + std = float("0.805834") + data = None + + +class Program_weight_tensor_data_2: + name = "data_2" + shape = [2, 96, 84, 84] + dtype = "float32" + min_val = float("-0.278465") + max_val = float("14.2506") + mean = float("0.591911") + std = float("0.841758") + data = None diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-S/subgraph_14/model.py b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-S/subgraph_14/model.py new file mode 100644 index 000000000..400943fc1 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-S/subgraph_14/model.py @@ -0,0 +1,1050 @@ +import paddle + + +class GraphModule(paddle.nn.Layer): + def __init__(self): + super().__init__() + + def forward( + self, + parameter_0, + parameter_1, + parameter_2, + parameter_3, + parameter_4, + parameter_5, + parameter_6, + parameter_7, + parameter_8, + parameter_9, + parameter_10, + parameter_11, + parameter_12, + parameter_13, + parameter_14, + parameter_15, + parameter_16, + parameter_17, + parameter_18, + parameter_19, + parameter_20, + parameter_21, + parameter_22, + parameter_23, + parameter_24, + parameter_25, + parameter_26, + parameter_27, + parameter_28, + parameter_29, + parameter_30, + parameter_31, + parameter_32, + parameter_33, + parameter_34, + parameter_35, + parameter_36, + parameter_37, + parameter_38, + parameter_39, + parameter_40, + parameter_41, + parameter_42, + parameter_43, + parameter_44, + parameter_45, + parameter_46, + parameter_47, + parameter_48, + parameter_49, + parameter_50, + parameter_51, + parameter_52, + parameter_53, + data_0, + data_1, + data_2, + ): + # pd_op.full: (1xf64) <- () + full_0 = paddle._C_ops.full( + [1], float("0"), paddle.float64, paddle.core.CPUPlace() + ) + + # pd_op.full: (1xf64) <- () + full_1 = paddle._C_ops.full( + [1], float("21"), paddle.float64, paddle.core.CPUPlace() + ) + + # pd_op.full: (1xf64) <- () + full_2 = paddle._C_ops.full( + [1], float("1"), paddle.float64, paddle.core.CPUPlace() + ) + + # pd_op.arange: (21xi64) <- (1xf64, 1xf64, 1xf64) + arange_0 = paddle.arange(full_0, full_1, full_2, dtype="int64") + del full_1 + + # pd_op.cast: (21xf32) <- (21xi64) + cast_0 = paddle._C_ops.cast(arange_0, paddle.float32) + del arange_0 + + # pd_op.full: (1xf32) <- () + full_3 = paddle._C_ops.full( + [1], float("1"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.scale: (21xf32) <- (21xf32, 1xf32) + scale_0 = paddle._C_ops.scale(cast_0, full_3, float("0.5"), True) + del cast_0 + + # pd_op.full: (1xf32) <- () + full_4 = paddle._C_ops.full( + [1], float("32"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.scale: (21xf32) <- (21xf32, 1xf32) + scale_1 = paddle._C_ops.scale(scale_0, full_4, float("0"), True) + del full_4, scale_0 + + # builtin.combine: ([21xf32, 21xf32]) <- (21xf32, 21xf32) + combine_0 = [scale_1, scale_1] + del scale_1 + + # pd_op.meshgrid: ([21x21xf32, 21x21xf32]) <- ([21xf32, 21xf32]) + meshgrid_0 = paddle._C_ops.meshgrid(combine_0) + del combine_0 + + # builtin.split: (21x21xf32, 21x21xf32) <- ([21x21xf32, 21x21xf32]) + ( + split_0, + split_1, + ) = meshgrid_0 + del meshgrid_0 + + # pd_op.scale: (21x21xf32) <- (21x21xf32, 1xf32) + scale_2 = paddle._C_ops.scale(split_1, full_3, float("-80"), True) + + # pd_op.scale: (21x21xf32) <- (21x21xf32, 1xf32) + scale_3 = paddle._C_ops.scale(split_0, full_3, float("-80"), True) + + # pd_op.scale: (21x21xf32) <- (21x21xf32, 1xf32) + scale_4 = paddle._C_ops.scale(split_1, full_3, float("80"), True) + + # pd_op.scale: (21x21xf32) <- (21x21xf32, 1xf32) + scale_5 = paddle._C_ops.scale(split_0, full_3, float("80"), True) + + # builtin.combine: ([21x21xf32, 21x21xf32, 21x21xf32, 21x21xf32]) <- (21x21xf32, 21x21xf32, 21x21xf32, 21x21xf32) + combine_1 = [scale_2, scale_3, scale_4, scale_5] + del scale_2, scale_3, scale_4, scale_5 + + # pd_op.stack: (21x21x4xf32) <- ([21x21xf32, 21x21xf32, 21x21xf32, 21x21xf32]) + stack_0 = paddle._C_ops.stack(combine_1, -1) + del combine_1 + + # builtin.combine: ([21x21xf32, 21x21xf32]) <- (21x21xf32, 21x21xf32) + combine_2 = [split_1, split_0] + del split_0, split_1 + + # pd_op.stack: (21x21x2xf32) <- ([21x21xf32, 21x21xf32]) + stack_1 = paddle._C_ops.stack(combine_2, -1) + del combine_2 + + # pd_op.full_int_array: (2xi64) <- () + full_int_array_0 = [-1, 4] + + # pd_op.reshape: (441x4xf32) <- (21x21x4xf32, 2xi64) + reshape_0 = paddle._C_ops.reshape(stack_0, full_int_array_0) + del stack_0 + + # pd_op.full_int_array: (2xi64) <- () + full_int_array_1 = [-1, 2] + + # pd_op.reshape: (441x2xf32) <- (21x21x2xf32, 2xi64) + reshape_1 = paddle._C_ops.reshape(stack_1, full_int_array_1) + del stack_1 + + # pd_op.full: (441x1xf32) <- () + full_5 = paddle._C_ops.full( + [441, 1], + float("32"), + paddle.float32, + paddle.framework._current_expected_place(), + ) + + # pd_op.full: (1xf64) <- () + full_6 = paddle._C_ops.full( + [1], float("42"), paddle.float64, paddle.core.CPUPlace() + ) + + # pd_op.arange: (42xi64) <- (1xf64, 1xf64, 1xf64) + arange_1 = paddle.arange(full_0, full_6, full_2, dtype="int64") + del full_6 + + # pd_op.cast: (42xf32) <- (42xi64) + cast_1 = paddle._C_ops.cast(arange_1, paddle.float32) + del arange_1 + + # pd_op.scale: (42xf32) <- (42xf32, 1xf32) + scale_6 = paddle._C_ops.scale(cast_1, full_3, float("0.5"), True) + del cast_1 + + # pd_op.full: (1xf32) <- () + full_7 = paddle._C_ops.full( + [1], float("16"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.scale: (42xf32) <- (42xf32, 1xf32) + scale_7 = paddle._C_ops.scale(scale_6, full_7, float("0"), True) + del full_7, scale_6 + + # builtin.combine: ([42xf32, 42xf32]) <- (42xf32, 42xf32) + combine_3 = [scale_7, scale_7] + del scale_7 + + # pd_op.meshgrid: ([42x42xf32, 42x42xf32]) <- ([42xf32, 42xf32]) + meshgrid_1 = paddle._C_ops.meshgrid(combine_3) + del combine_3 + + # builtin.split: (42x42xf32, 42x42xf32) <- ([42x42xf32, 42x42xf32]) + ( + split_2, + split_3, + ) = meshgrid_1 + del meshgrid_1 + + # pd_op.scale: (42x42xf32) <- (42x42xf32, 1xf32) + scale_8 = paddle._C_ops.scale(split_3, full_3, float("-40"), True) + + # pd_op.scale: (42x42xf32) <- (42x42xf32, 1xf32) + scale_9 = paddle._C_ops.scale(split_2, full_3, float("-40"), True) + + # pd_op.scale: (42x42xf32) <- (42x42xf32, 1xf32) + scale_10 = paddle._C_ops.scale(split_3, full_3, float("40"), True) + + # pd_op.scale: (42x42xf32) <- (42x42xf32, 1xf32) + scale_11 = paddle._C_ops.scale(split_2, full_3, float("40"), True) + + # builtin.combine: ([42x42xf32, 42x42xf32, 42x42xf32, 42x42xf32]) <- (42x42xf32, 42x42xf32, 42x42xf32, 42x42xf32) + combine_4 = [scale_8, scale_9, scale_10, scale_11] + del scale_10, scale_11, scale_8, scale_9 + + # pd_op.stack: (42x42x4xf32) <- ([42x42xf32, 42x42xf32, 42x42xf32, 42x42xf32]) + stack_2 = paddle._C_ops.stack(combine_4, -1) + del combine_4 + + # builtin.combine: ([42x42xf32, 42x42xf32]) <- (42x42xf32, 42x42xf32) + combine_5 = [split_3, split_2] + del split_2, split_3 + + # pd_op.stack: (42x42x2xf32) <- ([42x42xf32, 42x42xf32]) + stack_3 = paddle._C_ops.stack(combine_5, -1) + del combine_5 + + # pd_op.reshape: (1764x4xf32) <- (42x42x4xf32, 2xi64) + reshape_2 = paddle._C_ops.reshape(stack_2, full_int_array_0) + del stack_2 + + # pd_op.reshape: (1764x2xf32) <- (42x42x2xf32, 2xi64) + reshape_3 = paddle._C_ops.reshape(stack_3, full_int_array_1) + del stack_3 + + # pd_op.full: (1764x1xf32) <- () + full_8 = paddle._C_ops.full( + [1764, 1], + float("16"), + paddle.float32, + paddle.framework._current_expected_place(), + ) + + # pd_op.full: (1xf64) <- () + full_9 = paddle._C_ops.full( + [1], float("84"), paddle.float64, paddle.core.CPUPlace() + ) + + # pd_op.arange: (84xi64) <- (1xf64, 1xf64, 1xf64) + arange_2 = paddle.arange(full_0, full_9, full_2, dtype="int64") + del full_0, full_2, full_9 + + # pd_op.cast: (84xf32) <- (84xi64) + cast_2 = paddle._C_ops.cast(arange_2, paddle.float32) + del arange_2 + + # pd_op.scale: (84xf32) <- (84xf32, 1xf32) + scale_12 = paddle._C_ops.scale(cast_2, full_3, float("0.5"), True) + del cast_2 + + # pd_op.full: (1xf32) <- () + full_10 = paddle._C_ops.full( + [1], float("8"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.scale: (84xf32) <- (84xf32, 1xf32) + scale_13 = paddle._C_ops.scale(scale_12, full_10, float("0"), True) + del full_10, scale_12 + + # builtin.combine: ([84xf32, 84xf32]) <- (84xf32, 84xf32) + combine_6 = [scale_13, scale_13] + del scale_13 + + # pd_op.meshgrid: ([84x84xf32, 84x84xf32]) <- ([84xf32, 84xf32]) + meshgrid_2 = paddle._C_ops.meshgrid(combine_6) + del combine_6 + + # builtin.split: (84x84xf32, 84x84xf32) <- ([84x84xf32, 84x84xf32]) + ( + split_4, + split_5, + ) = meshgrid_2 + del meshgrid_2 + + # pd_op.scale: (84x84xf32) <- (84x84xf32, 1xf32) + scale_14 = paddle._C_ops.scale(split_5, full_3, float("-20"), True) + + # pd_op.scale: (84x84xf32) <- (84x84xf32, 1xf32) + scale_15 = paddle._C_ops.scale(split_4, full_3, float("-20"), True) + + # pd_op.scale: (84x84xf32) <- (84x84xf32, 1xf32) + scale_16 = paddle._C_ops.scale(split_5, full_3, float("20"), True) + + # pd_op.scale: (84x84xf32) <- (84x84xf32, 1xf32) + scale_17 = paddle._C_ops.scale(split_4, full_3, float("20"), True) + del full_3 + + # builtin.combine: ([84x84xf32, 84x84xf32, 84x84xf32, 84x84xf32]) <- (84x84xf32, 84x84xf32, 84x84xf32, 84x84xf32) + combine_7 = [scale_14, scale_15, scale_16, scale_17] + del scale_14, scale_15, scale_16, scale_17 + + # pd_op.stack: (84x84x4xf32) <- ([84x84xf32, 84x84xf32, 84x84xf32, 84x84xf32]) + stack_4 = paddle._C_ops.stack(combine_7, -1) + del combine_7 + + # builtin.combine: ([84x84xf32, 84x84xf32]) <- (84x84xf32, 84x84xf32) + combine_8 = [split_5, split_4] + del split_4, split_5 + + # pd_op.stack: (84x84x2xf32) <- ([84x84xf32, 84x84xf32]) + stack_5 = paddle._C_ops.stack(combine_8, -1) + del combine_8 + + # pd_op.reshape: (7056x4xf32) <- (84x84x4xf32, 2xi64) + reshape_4 = paddle._C_ops.reshape(stack_4, full_int_array_0) + del full_int_array_0, stack_4 + + # pd_op.reshape: (7056x2xf32) <- (84x84x2xf32, 2xi64) + reshape_5 = paddle._C_ops.reshape(stack_5, full_int_array_1) + del full_int_array_1, stack_5 + + # pd_op.full: (7056x1xf32) <- () + full_11 = paddle._C_ops.full( + [7056, 1], + float("8"), + paddle.float32, + paddle.framework._current_expected_place(), + ) + + # pd_op.full: (1xi32) <- () + full_12 = paddle._C_ops.full( + [1], float("0"), paddle.int32, paddle.core.CPUPlace() + ) + + # builtin.combine: ([441x4xf32, 1764x4xf32, 7056x4xf32]) <- (441x4xf32, 1764x4xf32, 7056x4xf32) + combine_9 = [reshape_0, reshape_2, reshape_4] + + # pd_op.concat: (9261x4xf32) <- ([441x4xf32, 1764x4xf32, 7056x4xf32], 1xi32) + concat_2 = paddle._C_ops.concat(combine_9, full_12) + del combine_9 + + # builtin.combine: ([441x2xf32, 1764x2xf32, 7056x2xf32]) <- (441x2xf32, 1764x2xf32, 7056x2xf32) + combine_10 = [reshape_1, reshape_3, reshape_5] + del reshape_1, reshape_3, reshape_5 + + # pd_op.concat: (9261x2xf32) <- ([441x2xf32, 1764x2xf32, 7056x2xf32], 1xi32) + concat_3 = paddle._C_ops.concat(combine_10, full_12) + del combine_10 + + # builtin.combine: ([441x1xf32, 1764x1xf32, 7056x1xf32]) <- (441x1xf32, 1764x1xf32, 7056x1xf32) + combine_11 = [full_5, full_8, full_11] + del full_11, full_5, full_8 + + # pd_op.concat: (9261x1xf32) <- ([441x1xf32, 1764x1xf32, 7056x1xf32], 1xi32) + concat_4 = paddle._C_ops.concat(combine_11, full_12) + del combine_11, full_12 + + # pd_op.full_int_array: (2xi64) <- () + full_int_array_2 = [1, 1] + + # pd_op.assign: (2xi64) <- (2xi64) + assign_0 = full_int_array_2 + + # pd_op.assign: (2xi64) <- (2xi64) + assign_1 = full_int_array_2 + + # pd_op.pool2d: (2x384x1x1xf32) <- (2x384x21x21xf32, 2xi64) + pool2d_0 = paddle._C_ops.pool2d( + data_0, + full_int_array_2, + [1, 1], + [0, 0], + False, + True, + "NCHW", + "avg", + False, + True, + "EXPLICIT", + ) + + # pd_op.conv2d: (2x384x1x1xf32) <- (2x384x1x1xf32, 384x384x1x1xf32) + conv2d_0 = paddle._C_ops.conv2d( + pool2d_0, parameter_53, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_53 + + # pd_op.full_int_array: (4xi64) <- () + full_int_array_3 = [1, -1, 1, 1] + + # pd_op.reshape: (1x384x1x1xf32) <- (384xf32, 4xi64) + reshape_6 = paddle._C_ops.reshape(parameter_52, full_int_array_3) + del parameter_52 + + # pd_op.add: (2x384x1x1xf32) <- (2x384x1x1xf32, 1x384x1x1xf32) + add_0 = paddle._C_ops.add(conv2d_0, reshape_6) + + # pd_op.sigmoid: (2x384x1x1xf32) <- (2x384x1x1xf32) + sigmoid_0 = paddle._C_ops.sigmoid(add_0) + del add_0 + + # pd_op.multiply: (2x384x21x21xf32) <- (2x384x21x21xf32, 2x384x1x1xf32) + multiply_0 = paddle._C_ops.multiply(data_0, sigmoid_0) + + # pd_op.conv2d: (2x384x21x21xf32) <- (2x384x21x21xf32, 384x384x1x1xf32) + conv2d_1 = paddle._C_ops.conv2d( + multiply_0, parameter_51, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_51 + + # pd_op.batch_norm_: (2x384x21x21xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x21x21xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__0, + batch_norm__1, + batch_norm__2, + batch_norm__3, + batch_norm__4, + batch_norm__5, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_1, + parameter_50, + parameter_49, + parameter_48, + parameter_47, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_47, parameter_48, parameter_49, parameter_50 + + # pd_op.swish: (2x384x21x21xf32) <- (2x384x21x21xf32) + swish_0 = paddle._C_ops.swish(batch_norm__0) + + # pd_op.add: (2x384x21x21xf32) <- (2x384x21x21xf32, 2x384x21x21xf32) + add_1 = paddle._C_ops.add(swish_0, data_0) + + # pd_op.conv2d: (2x10x21x21xf32) <- (2x384x21x21xf32, 10x384x3x3xf32) + conv2d_2 = paddle._C_ops.conv2d( + add_1, parameter_46, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_46 + + # pd_op.reshape: (1x10x1x1xf32) <- (10xf32, 4xi64) + reshape_7 = paddle._C_ops.reshape(parameter_45, full_int_array_3) + del parameter_45 + + # pd_op.add: (2x10x21x21xf32) <- (2x10x21x21xf32, 1x10x1x1xf32) + add_2 = paddle._C_ops.add(conv2d_2, reshape_7) + + # pd_op.conv2d: (2x384x1x1xf32) <- (2x384x1x1xf32, 384x384x1x1xf32) + conv2d_3 = paddle._C_ops.conv2d( + pool2d_0, parameter_44, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_44 + + # pd_op.reshape: (1x384x1x1xf32) <- (384xf32, 4xi64) + reshape_8 = paddle._C_ops.reshape(parameter_43, full_int_array_3) + del parameter_43 + + # pd_op.add: (2x384x1x1xf32) <- (2x384x1x1xf32, 1x384x1x1xf32) + add_3 = paddle._C_ops.add(conv2d_3, reshape_8) + + # pd_op.sigmoid: (2x384x1x1xf32) <- (2x384x1x1xf32) + sigmoid_1 = paddle._C_ops.sigmoid(add_3) + del add_3 + + # pd_op.multiply: (2x384x21x21xf32) <- (2x384x21x21xf32, 2x384x1x1xf32) + multiply_1 = paddle._C_ops.multiply(data_0, sigmoid_1) + del data_0 + + # pd_op.conv2d: (2x384x21x21xf32) <- (2x384x21x21xf32, 384x384x1x1xf32) + conv2d_4 = paddle._C_ops.conv2d( + multiply_1, parameter_42, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_42 + + # pd_op.batch_norm_: (2x384x21x21xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x21x21xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__6, + batch_norm__7, + batch_norm__8, + batch_norm__9, + batch_norm__10, + batch_norm__11, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_4, + parameter_41, + parameter_40, + parameter_39, + parameter_38, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_38, parameter_39, parameter_40, parameter_41 + + # pd_op.swish: (2x384x21x21xf32) <- (2x384x21x21xf32) + swish_1 = paddle._C_ops.swish(batch_norm__6) + + # pd_op.conv2d: (2x40x21x21xf32) <- (2x384x21x21xf32, 40x384x3x3xf32) + conv2d_5 = paddle._C_ops.conv2d( + swish_1, parameter_37, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_37 + + # pd_op.reshape: (1x40x1x1xf32) <- (40xf32, 4xi64) + reshape_9 = paddle._C_ops.reshape(parameter_36, full_int_array_3) + del parameter_36 + + # pd_op.add: (2x40x21x21xf32) <- (2x40x21x21xf32, 1x40x1x1xf32) + add_4 = paddle._C_ops.add(conv2d_5, reshape_9) + + # pd_op.sigmoid: (2x10x21x21xf32) <- (2x10x21x21xf32) + sigmoid_2 = paddle._C_ops.sigmoid(add_2) + del add_2 + + # pd_op.flatten: (2x10x441xf32) <- (2x10x21x21xf32) + flatten_0 = paddle._C_ops.flatten(sigmoid_2, 2, 3) + + # pd_op.transpose: (2x441x10xf32) <- (2x10x441xf32) + transpose_0 = paddle._C_ops.transpose(flatten_0, [0, 2, 1]) + del flatten_0 + + # pd_op.flatten: (2x40x441xf32) <- (2x40x21x21xf32) + flatten_1 = paddle._C_ops.flatten(add_4, 2, 3) + + # pd_op.transpose: (2x441x40xf32) <- (2x40x441xf32) + transpose_1 = paddle._C_ops.transpose(flatten_1, [0, 2, 1]) + del flatten_1 + + # pd_op.pool2d: (2x192x1x1xf32) <- (2x192x42x42xf32, 2xi64) + pool2d_1 = paddle._C_ops.pool2d( + data_1, + full_int_array_2, + [1, 1], + [0, 0], + False, + True, + "NCHW", + "avg", + False, + True, + "EXPLICIT", + ) + + # pd_op.conv2d: (2x192x1x1xf32) <- (2x192x1x1xf32, 192x192x1x1xf32) + conv2d_6 = paddle._C_ops.conv2d( + pool2d_1, parameter_35, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_35 + + # pd_op.reshape: (1x192x1x1xf32) <- (192xf32, 4xi64) + reshape_10 = paddle._C_ops.reshape(parameter_34, full_int_array_3) + del parameter_34 + + # pd_op.add: (2x192x1x1xf32) <- (2x192x1x1xf32, 1x192x1x1xf32) + add_5 = paddle._C_ops.add(conv2d_6, reshape_10) + + # pd_op.sigmoid: (2x192x1x1xf32) <- (2x192x1x1xf32) + sigmoid_3 = paddle._C_ops.sigmoid(add_5) + del add_5 + + # pd_op.multiply: (2x192x42x42xf32) <- (2x192x42x42xf32, 2x192x1x1xf32) + multiply_2 = paddle._C_ops.multiply(data_1, sigmoid_3) + + # pd_op.conv2d: (2x192x42x42xf32) <- (2x192x42x42xf32, 192x192x1x1xf32) + conv2d_7 = paddle._C_ops.conv2d( + multiply_2, parameter_33, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_33 + + # pd_op.batch_norm_: (2x192x42x42xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x42x42xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__12, + batch_norm__13, + batch_norm__14, + batch_norm__15, + batch_norm__16, + batch_norm__17, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_7, + parameter_32, + parameter_31, + parameter_30, + parameter_29, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_29, parameter_30, parameter_31, parameter_32 + + # pd_op.swish: (2x192x42x42xf32) <- (2x192x42x42xf32) + swish_2 = paddle._C_ops.swish(batch_norm__12) + + # pd_op.add: (2x192x42x42xf32) <- (2x192x42x42xf32, 2x192x42x42xf32) + add_6 = paddle._C_ops.add(swish_2, data_1) + + # pd_op.conv2d: (2x10x42x42xf32) <- (2x192x42x42xf32, 10x192x3x3xf32) + conv2d_8 = paddle._C_ops.conv2d( + add_6, parameter_28, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_28 + + # pd_op.reshape: (1x10x1x1xf32) <- (10xf32, 4xi64) + reshape_11 = paddle._C_ops.reshape(parameter_27, full_int_array_3) + del parameter_27 + + # pd_op.add: (2x10x42x42xf32) <- (2x10x42x42xf32, 1x10x1x1xf32) + add_7 = paddle._C_ops.add(conv2d_8, reshape_11) + + # pd_op.conv2d: (2x192x1x1xf32) <- (2x192x1x1xf32, 192x192x1x1xf32) + conv2d_9 = paddle._C_ops.conv2d( + pool2d_1, parameter_26, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_26 + + # pd_op.reshape: (1x192x1x1xf32) <- (192xf32, 4xi64) + reshape_12 = paddle._C_ops.reshape(parameter_25, full_int_array_3) + del parameter_25 + + # pd_op.add: (2x192x1x1xf32) <- (2x192x1x1xf32, 1x192x1x1xf32) + add_8 = paddle._C_ops.add(conv2d_9, reshape_12) + + # pd_op.sigmoid: (2x192x1x1xf32) <- (2x192x1x1xf32) + sigmoid_4 = paddle._C_ops.sigmoid(add_8) + del add_8 + + # pd_op.multiply: (2x192x42x42xf32) <- (2x192x42x42xf32, 2x192x1x1xf32) + multiply_3 = paddle._C_ops.multiply(data_1, sigmoid_4) + del data_1 + + # pd_op.conv2d: (2x192x42x42xf32) <- (2x192x42x42xf32, 192x192x1x1xf32) + conv2d_10 = paddle._C_ops.conv2d( + multiply_3, parameter_24, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_24 + + # pd_op.batch_norm_: (2x192x42x42xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x42x42xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__18, + batch_norm__19, + batch_norm__20, + batch_norm__21, + batch_norm__22, + batch_norm__23, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_10, + parameter_23, + parameter_22, + parameter_21, + parameter_20, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_20, parameter_21, parameter_22, parameter_23 + + # pd_op.swish: (2x192x42x42xf32) <- (2x192x42x42xf32) + swish_3 = paddle._C_ops.swish(batch_norm__18) + + # pd_op.conv2d: (2x40x42x42xf32) <- (2x192x42x42xf32, 40x192x3x3xf32) + conv2d_11 = paddle._C_ops.conv2d( + swish_3, parameter_19, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_19 + + # pd_op.reshape: (1x40x1x1xf32) <- (40xf32, 4xi64) + reshape_13 = paddle._C_ops.reshape(parameter_18, full_int_array_3) + del parameter_18 + + # pd_op.add: (2x40x42x42xf32) <- (2x40x42x42xf32, 1x40x1x1xf32) + add_9 = paddle._C_ops.add(conv2d_11, reshape_13) + + # pd_op.sigmoid: (2x10x42x42xf32) <- (2x10x42x42xf32) + sigmoid_5 = paddle._C_ops.sigmoid(add_7) + del add_7 + + # pd_op.flatten: (2x10x1764xf32) <- (2x10x42x42xf32) + flatten_2 = paddle._C_ops.flatten(sigmoid_5, 2, 3) + + # pd_op.transpose: (2x1764x10xf32) <- (2x10x1764xf32) + transpose_2 = paddle._C_ops.transpose(flatten_2, [0, 2, 1]) + del flatten_2 + + # pd_op.flatten: (2x40x1764xf32) <- (2x40x42x42xf32) + flatten_3 = paddle._C_ops.flatten(add_9, 2, 3) + + # pd_op.transpose: (2x1764x40xf32) <- (2x40x1764xf32) + transpose_3 = paddle._C_ops.transpose(flatten_3, [0, 2, 1]) + del flatten_3 + + # pd_op.pool2d: (2x96x1x1xf32) <- (2x96x84x84xf32, 2xi64) + pool2d_2 = paddle._C_ops.pool2d( + data_2, + full_int_array_2, + [1, 1], + [0, 0], + False, + True, + "NCHW", + "avg", + False, + True, + "EXPLICIT", + ) + + # pd_op.conv2d: (2x96x1x1xf32) <- (2x96x1x1xf32, 96x96x1x1xf32) + conv2d_12 = paddle._C_ops.conv2d( + pool2d_2, parameter_17, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_17 + + # pd_op.reshape: (1x96x1x1xf32) <- (96xf32, 4xi64) + reshape_14 = paddle._C_ops.reshape(parameter_16, full_int_array_3) + del parameter_16 + + # pd_op.add: (2x96x1x1xf32) <- (2x96x1x1xf32, 1x96x1x1xf32) + add_10 = paddle._C_ops.add(conv2d_12, reshape_14) + + # pd_op.sigmoid: (2x96x1x1xf32) <- (2x96x1x1xf32) + sigmoid_6 = paddle._C_ops.sigmoid(add_10) + del add_10 + + # pd_op.multiply: (2x96x84x84xf32) <- (2x96x84x84xf32, 2x96x1x1xf32) + multiply_4 = paddle._C_ops.multiply(data_2, sigmoid_6) + + # pd_op.conv2d: (2x96x84x84xf32) <- (2x96x84x84xf32, 96x96x1x1xf32) + conv2d_13 = paddle._C_ops.conv2d( + multiply_4, parameter_15, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_15 + + # pd_op.batch_norm_: (2x96x84x84xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x84x84xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__24, + batch_norm__25, + batch_norm__26, + batch_norm__27, + batch_norm__28, + batch_norm__29, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_13, + parameter_14, + parameter_13, + parameter_12, + parameter_11, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_11, parameter_12, parameter_13, parameter_14 + + # pd_op.swish: (2x96x84x84xf32) <- (2x96x84x84xf32) + swish_4 = paddle._C_ops.swish(batch_norm__24) + + # pd_op.add: (2x96x84x84xf32) <- (2x96x84x84xf32, 2x96x84x84xf32) + add_11 = paddle._C_ops.add(swish_4, data_2) + + # pd_op.conv2d: (2x10x84x84xf32) <- (2x96x84x84xf32, 10x96x3x3xf32) + conv2d_14 = paddle._C_ops.conv2d( + add_11, parameter_10, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_10 + + # pd_op.reshape: (1x10x1x1xf32) <- (10xf32, 4xi64) + reshape_15 = paddle._C_ops.reshape(parameter_9, full_int_array_3) + del parameter_9 + + # pd_op.add: (2x10x84x84xf32) <- (2x10x84x84xf32, 1x10x1x1xf32) + add_12 = paddle._C_ops.add(conv2d_14, reshape_15) + + # pd_op.conv2d: (2x96x1x1xf32) <- (2x96x1x1xf32, 96x96x1x1xf32) + conv2d_15 = paddle._C_ops.conv2d( + pool2d_2, parameter_8, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_8 + + # pd_op.reshape: (1x96x1x1xf32) <- (96xf32, 4xi64) + reshape_16 = paddle._C_ops.reshape(parameter_7, full_int_array_3) + del parameter_7 + + # pd_op.add: (2x96x1x1xf32) <- (2x96x1x1xf32, 1x96x1x1xf32) + add_13 = paddle._C_ops.add(conv2d_15, reshape_16) + + # pd_op.sigmoid: (2x96x1x1xf32) <- (2x96x1x1xf32) + sigmoid_7 = paddle._C_ops.sigmoid(add_13) + del add_13 + + # pd_op.multiply: (2x96x84x84xf32) <- (2x96x84x84xf32, 2x96x1x1xf32) + multiply_5 = paddle._C_ops.multiply(data_2, sigmoid_7) + del data_2 + + # pd_op.conv2d: (2x96x84x84xf32) <- (2x96x84x84xf32, 96x96x1x1xf32) + conv2d_16 = paddle._C_ops.conv2d( + multiply_5, parameter_6, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_6 + + # pd_op.batch_norm_: (2x96x84x84xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x84x84xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__30, + batch_norm__31, + batch_norm__32, + batch_norm__33, + batch_norm__34, + batch_norm__35, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_16, + parameter_5, + parameter_4, + parameter_3, + parameter_2, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_2, parameter_3, parameter_4, parameter_5 + + # pd_op.swish: (2x96x84x84xf32) <- (2x96x84x84xf32) + swish_5 = paddle._C_ops.swish(batch_norm__30) + + # pd_op.conv2d: (2x40x84x84xf32) <- (2x96x84x84xf32, 40x96x3x3xf32) + conv2d_17 = paddle._C_ops.conv2d( + swish_5, parameter_1, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_1 + + # pd_op.reshape: (1x40x1x1xf32) <- (40xf32, 4xi64) + reshape_17 = paddle._C_ops.reshape(parameter_0, full_int_array_3) + del full_int_array_3, parameter_0 + + # pd_op.add: (2x40x84x84xf32) <- (2x40x84x84xf32, 1x40x1x1xf32) + add_14 = paddle._C_ops.add(conv2d_17, reshape_17) + + # pd_op.sigmoid: (2x10x84x84xf32) <- (2x10x84x84xf32) + sigmoid_8 = paddle._C_ops.sigmoid(add_12) + del add_12 + + # pd_op.flatten: (2x10x7056xf32) <- (2x10x84x84xf32) + flatten_4 = paddle._C_ops.flatten(sigmoid_8, 2, 3) + + # pd_op.transpose: (2x7056x10xf32) <- (2x10x7056xf32) + transpose_4 = paddle._C_ops.transpose(flatten_4, [0, 2, 1]) + del flatten_4 + + # pd_op.flatten: (2x40x7056xf32) <- (2x40x84x84xf32) + flatten_5 = paddle._C_ops.flatten(add_14, 2, 3) + + # pd_op.transpose: (2x7056x40xf32) <- (2x40x7056xf32) + transpose_5 = paddle._C_ops.transpose(flatten_5, [0, 2, 1]) + del flatten_5 + + # pd_op.full: (1xi32) <- () + full_13 = paddle._C_ops.full( + [1], float("1"), paddle.int32, paddle.core.CPUPlace() + ) + + # pd_op.assign: (1xi32) <- (1xi32) + assign_2 = full_13 + + # builtin.combine: ([2x441x10xf32, 2x1764x10xf32, 2x7056x10xf32]) <- (2x441x10xf32, 2x1764x10xf32, 2x7056x10xf32) + combine_12 = [transpose_0, transpose_2, transpose_4] + + # pd_op.concat: (2x9261x10xf32) <- ([2x441x10xf32, 2x1764x10xf32, 2x7056x10xf32], 1xi32) + concat_0 = paddle._C_ops.concat(combine_12, full_13) + del combine_12 + + # builtin.combine: ([2x441x40xf32, 2x1764x40xf32, 2x7056x40xf32]) <- (2x441x40xf32, 2x1764x40xf32, 2x7056x40xf32) + combine_13 = [transpose_1, transpose_3, transpose_5] + + # pd_op.concat: (2x9261x40xf32) <- ([2x441x40xf32, 2x1764x40xf32, 2x7056x40xf32], 1xi32) + concat_1 = paddle._C_ops.concat(combine_13, full_13) + del ( + add_1, + add_11, + add_14, + add_4, + add_6, + add_9, + assign_0, + assign_1, + assign_2, + batch_norm__0, + batch_norm__1, + batch_norm__10, + batch_norm__11, + batch_norm__12, + batch_norm__13, + batch_norm__14, + batch_norm__15, + batch_norm__16, + batch_norm__17, + batch_norm__18, + batch_norm__19, + batch_norm__2, + batch_norm__20, + batch_norm__21, + batch_norm__22, + batch_norm__23, + batch_norm__24, + batch_norm__25, + batch_norm__26, + batch_norm__27, + batch_norm__28, + batch_norm__29, + batch_norm__3, + batch_norm__30, + batch_norm__31, + batch_norm__32, + batch_norm__33, + batch_norm__34, + batch_norm__35, + batch_norm__4, + batch_norm__5, + batch_norm__6, + batch_norm__7, + batch_norm__8, + batch_norm__9, + combine_13, + conv2d_0, + conv2d_1, + conv2d_10, + conv2d_11, + conv2d_12, + conv2d_13, + conv2d_14, + conv2d_15, + conv2d_16, + conv2d_17, + conv2d_2, + conv2d_3, + conv2d_4, + conv2d_5, + conv2d_6, + conv2d_7, + conv2d_8, + conv2d_9, + full_13, + full_int_array_2, + multiply_0, + multiply_1, + multiply_2, + multiply_3, + multiply_4, + multiply_5, + pool2d_0, + pool2d_1, + pool2d_2, + reshape_0, + reshape_10, + reshape_11, + reshape_12, + reshape_13, + reshape_14, + reshape_15, + reshape_16, + reshape_17, + reshape_2, + reshape_4, + reshape_6, + reshape_7, + reshape_8, + reshape_9, + sigmoid_0, + sigmoid_1, + sigmoid_2, + sigmoid_3, + sigmoid_4, + sigmoid_5, + sigmoid_6, + sigmoid_7, + sigmoid_8, + swish_0, + swish_1, + swish_2, + swish_3, + swish_4, + swish_5, + transpose_0, + transpose_1, + transpose_2, + transpose_3, + transpose_4, + transpose_5, + ) + + return concat_0, concat_1, concat_2, concat_3, concat_4 diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-S/subgraph_14/weight_meta.py b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-S/subgraph_14/weight_meta.py new file mode 100644 index 000000000..114efd7d3 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-S/subgraph_14/weight_meta.py @@ -0,0 +1,580 @@ +class Program_weight_tensor_parameter_0: + name = "parameter_0" + shape = [40] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_1: + name = "parameter_1" + shape = [40, 96, 3, 3] + dtype = "float32" + min_val = float("-0.220386") + max_val = float("0.222309") + mean = float("2.62808e-08") + std = float("0.0161374") + data = None + + +class Program_weight_tensor_parameter_2: + name = "parameter_2" + shape = [96] + dtype = "float32" + min_val = float("-0.127121") + max_val = float("0.345832") + mean = float("0.108268") + std = float("0.11199") + data = None + + +class Program_weight_tensor_parameter_3: + name = "parameter_3" + shape = [96] + dtype = "float32" + min_val = float("0.947191") + max_val = float("2.26117") + mean = float("1.5267") + std = float("0.270308") + data = None + + +class Program_weight_tensor_parameter_4: + name = "parameter_4" + shape = [96] + dtype = "float32" + min_val = float("0.000629044") + max_val = float("0.0511984") + mean = float("0.00624261") + std = float("0.0079902") + data = None + + +class Program_weight_tensor_parameter_5: + name = "parameter_5" + shape = [96] + dtype = "float32" + min_val = float("-0.180373") + max_val = float("0.105325") + mean = float("-0.0118238") + std = float("0.0470357") + data = None + + +class Program_weight_tensor_parameter_6: + name = "parameter_6" + shape = [96, 96, 1, 1] + dtype = "float32" + min_val = float("-0.16617") + max_val = float("0.150574") + mean = float("-0.0015204") + std = float("0.018482") + data = None + + +class Program_weight_tensor_parameter_7: + name = "parameter_7" + shape = [96] + dtype = "float32" + min_val = float("-0.01695") + max_val = float("0.0144876") + mean = float("-0.000590983") + std = float("0.0062921") + data = None + + +class Program_weight_tensor_parameter_8: + name = "parameter_8" + shape = [96, 96, 1, 1] + dtype = "float32" + min_val = float("-0.0289852") + max_val = float("0.0383979") + mean = float("-0.00056367") + std = float("0.00473631") + data = None + + +class Program_weight_tensor_parameter_9: + name = "parameter_9" + shape = [10] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_10: + name = "parameter_10" + shape = [10, 96, 3, 3] + dtype = "float32" + min_val = float("-0.158508") + max_val = float("0.111546") + mean = float("-0.00123997") + std = float("0.0149231") + data = None + + +class Program_weight_tensor_parameter_11: + name = "parameter_11" + shape = [96] + dtype = "float32" + min_val = float("-1.00246") + max_val = float("1.70494") + mean = float("0.553674") + std = float("0.526983") + data = None + + +class Program_weight_tensor_parameter_12: + name = "parameter_12" + shape = [96] + dtype = "float32" + min_val = float("0.751697") + max_val = float("2.08213") + mean = float("1.46982") + std = float("0.2376") + data = None + + +class Program_weight_tensor_parameter_13: + name = "parameter_13" + shape = [96] + dtype = "float32" + min_val = float("0.000667443") + max_val = float("0.03843") + mean = float("0.00521712") + std = float("0.00544283") + data = None + + +class Program_weight_tensor_parameter_14: + name = "parameter_14" + shape = [96] + dtype = "float32" + min_val = float("-0.302733") + max_val = float("0.241993") + mean = float("0.0338989") + std = float("0.0730685") + data = None + + +class Program_weight_tensor_parameter_15: + name = "parameter_15" + shape = [96, 96, 1, 1] + dtype = "float32" + min_val = float("-0.0920552") + max_val = float("0.101403") + mean = float("-0.000391196") + std = float("0.016093") + data = None + + +class Program_weight_tensor_parameter_16: + name = "parameter_16" + shape = [96] + dtype = "float32" + min_val = float("-0.00790501") + max_val = float("0.0118839") + mean = float("-0.000616206") + std = float("0.00334672") + data = None + + +class Program_weight_tensor_parameter_17: + name = "parameter_17" + shape = [96, 96, 1, 1] + dtype = "float32" + min_val = float("-0.0354745") + max_val = float("0.0393783") + mean = float("-0.000415455") + std = float("0.00398812") + data = None + + +class Program_weight_tensor_parameter_18: + name = "parameter_18" + shape = [40] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_19: + name = "parameter_19" + shape = [40, 192, 3, 3] + dtype = "float32" + min_val = float("-0.15995") + max_val = float("0.172865") + mean = float("7.42875e-09") + std = float("0.00878877") + data = None + + +class Program_weight_tensor_parameter_20: + name = "parameter_20" + shape = [192] + dtype = "float32" + min_val = float("-0.0211182") + max_val = float("0.16802") + mean = float("0.0783479") + std = float("0.0391695") + data = None + + +class Program_weight_tensor_parameter_21: + name = "parameter_21" + shape = [192] + dtype = "float32" + min_val = float("1.07811") + max_val = float("1.51568") + mean = float("1.30274") + std = float("0.0876299") + data = None + + +class Program_weight_tensor_parameter_22: + name = "parameter_22" + shape = [192] + dtype = "float32" + min_val = float("0.000340722") + max_val = float("0.0215384") + mean = float("0.00329097") + std = float("0.00394459") + data = None + + +class Program_weight_tensor_parameter_23: + name = "parameter_23" + shape = [192] + dtype = "float32" + min_val = float("-0.124486") + max_val = float("0.0532824") + mean = float("-0.0102578") + std = float("0.0263505") + data = None + + +class Program_weight_tensor_parameter_24: + name = "parameter_24" + shape = [192, 192, 1, 1] + dtype = "float32" + min_val = float("-0.0792181") + max_val = float("0.107778") + mean = float("-0.000368823") + std = float("0.00729713") + data = None + + +class Program_weight_tensor_parameter_25: + name = "parameter_25" + shape = [192] + dtype = "float32" + min_val = float("-0.00746441") + max_val = float("0.00722328") + mean = float("-8.13383e-05") + std = float("0.00296425") + data = None + + +class Program_weight_tensor_parameter_26: + name = "parameter_26" + shape = [192, 192, 1, 1] + dtype = "float32" + min_val = float("-0.00795566") + max_val = float("0.0110585") + mean = float("-0.000113663") + std = float("0.00156623") + data = None + + +class Program_weight_tensor_parameter_27: + name = "parameter_27" + shape = [10] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_28: + name = "parameter_28" + shape = [10, 192, 3, 3] + dtype = "float32" + min_val = float("-0.0899103") + max_val = float("0.0751371") + mean = float("-0.000593556") + std = float("0.00682701") + data = None + + +class Program_weight_tensor_parameter_29: + name = "parameter_29" + shape = [192] + dtype = "float32" + min_val = float("-0.291965") + max_val = float("1.00507") + mean = float("0.404706") + std = float("0.237409") + data = None + + +class Program_weight_tensor_parameter_30: + name = "parameter_30" + shape = [192] + dtype = "float32" + min_val = float("1.0436") + max_val = float("1.84744") + mean = float("1.34394") + std = float("0.127482") + data = None + + +class Program_weight_tensor_parameter_31: + name = "parameter_31" + shape = [192] + dtype = "float32" + min_val = float("0.000364342") + max_val = float("0.0104285") + mean = float("0.00182714") + std = float("0.00169406") + data = None + + +class Program_weight_tensor_parameter_32: + name = "parameter_32" + shape = [192] + dtype = "float32" + min_val = float("-0.16024") + max_val = float("0.11807") + mean = float("-0.00110864") + std = float("0.0399019") + data = None + + +class Program_weight_tensor_parameter_33: + name = "parameter_33" + shape = [192, 192, 1, 1] + dtype = "float32" + min_val = float("-0.0517684") + max_val = float("0.0545506") + mean = float("-0.000237345") + std = float("0.00623955") + data = None + + +class Program_weight_tensor_parameter_34: + name = "parameter_34" + shape = [192] + dtype = "float32" + min_val = float("-0.00367397") + max_val = float("0.00885849") + mean = float("-0.000153054") + std = float("0.00155494") + data = None + + +class Program_weight_tensor_parameter_35: + name = "parameter_35" + shape = [192, 192, 1, 1] + dtype = "float32" + min_val = float("-0.0153384") + max_val = float("0.0338853") + mean = float("-0.000100079") + std = float("0.00136892") + data = None + + +class Program_weight_tensor_parameter_36: + name = "parameter_36" + shape = [40] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_37: + name = "parameter_37" + shape = [40, 384, 3, 3] + dtype = "float32" + min_val = float("-0.02606") + max_val = float("0.0317561") + mean = float("9.52241e-10") + std = float("0.0020846") + data = None + + +class Program_weight_tensor_parameter_38: + name = "parameter_38" + shape = [384] + dtype = "float32" + min_val = float("-0.0246739") + max_val = float("0.152016") + mean = float("0.0408553") + std = float("0.032015") + data = None + + +class Program_weight_tensor_parameter_39: + name = "parameter_39" + shape = [384] + dtype = "float32" + min_val = float("1.05705") + max_val = float("1.41825") + mean = float("1.2191") + std = float("0.0542166") + data = None + + +class Program_weight_tensor_parameter_40: + name = "parameter_40" + shape = [384] + dtype = "float32" + min_val = float("0.000103297") + max_val = float("0.00449041") + mean = float("0.000451116") + std = float("0.000447237") + data = None + + +class Program_weight_tensor_parameter_41: + name = "parameter_41" + shape = [384] + dtype = "float32" + min_val = float("-0.0274016") + max_val = float("0.0126119") + mean = float("-0.00687847") + std = float("0.00609523") + data = None + + +class Program_weight_tensor_parameter_42: + name = "parameter_42" + shape = [384, 384, 1, 1] + dtype = "float32" + min_val = float("-0.0464693") + max_val = float("0.0548612") + mean = float("-0.000100111") + std = float("0.00295451") + data = None + + +class Program_weight_tensor_parameter_43: + name = "parameter_43" + shape = [384] + dtype = "float32" + min_val = float("-0.00639402") + max_val = float("0.00421888") + mean = float("3.54671e-05") + std = float("0.00171694") + data = None + + +class Program_weight_tensor_parameter_44: + name = "parameter_44" + shape = [384, 384, 1, 1] + dtype = "float32" + min_val = float("-0.00654664") + max_val = float("0.00763939") + mean = float("-4.30022e-06") + std = float("0.000719087") + data = None + + +class Program_weight_tensor_parameter_45: + name = "parameter_45" + shape = [10] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_46: + name = "parameter_46" + shape = [10, 384, 3, 3] + dtype = "float32" + min_val = float("-0.0276653") + max_val = float("0.0223204") + mean = float("-0.000443044") + std = float("0.00213442") + data = None + + +class Program_weight_tensor_parameter_47: + name = "parameter_47" + shape = [384] + dtype = "float32" + min_val = float("-0.409858") + max_val = float("0.610632") + mean = float("0.212569") + std = float("0.109879") + data = None + + +class Program_weight_tensor_parameter_48: + name = "parameter_48" + shape = [384] + dtype = "float32" + min_val = float("1.05961") + max_val = float("1.46695") + mean = float("1.20196") + std = float("0.0657315") + data = None + + +class Program_weight_tensor_parameter_49: + name = "parameter_49" + shape = [384] + dtype = "float32" + min_val = float("7.4508e-05") + max_val = float("0.00540989") + mean = float("0.000792194") + std = float("0.000653005") + data = None + + +class Program_weight_tensor_parameter_50: + name = "parameter_50" + shape = [384] + dtype = "float32" + min_val = float("-0.0807539") + max_val = float("0.0680056") + mean = float("-0.0130928") + std = float("0.022196") + data = None + + +class Program_weight_tensor_parameter_51: + name = "parameter_51" + shape = [384, 384, 1, 1] + dtype = "float32" + min_val = float("-0.0661697") + max_val = float("0.0355423") + mean = float("-0.00020161") + std = float("0.00342025") + data = None + + +class Program_weight_tensor_parameter_52: + name = "parameter_52" + shape = [384] + dtype = "float32" + min_val = float("-0.00360413") + max_val = float("0.00538468") + mean = float("-8.84011e-05") + std = float("0.000952451") + data = None + + +class Program_weight_tensor_parameter_53: + name = "parameter_53" + shape = [384, 384, 1, 1] + dtype = "float32" + min_val = float("-0.0169924") + max_val = float("0.0292524") + mean = float("-2.04535e-05") + std = float("0.000783345") + data = None diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-S/subgraph_15/graph_hash.txt b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-S/subgraph_15/graph_hash.txt new file mode 100644 index 000000000..a0bcabb82 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-S/subgraph_15/graph_hash.txt @@ -0,0 +1 @@ +ec435c57522cff4aa2e0b88c81cfd985851c951e344bf1f29bad03418a045d50 \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-S/subgraph_15/graph_net.json b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-S/subgraph_15/graph_net.json new file mode 100644 index 000000000..6ce3cf9a5 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-S/subgraph_15/graph_net.json @@ -0,0 +1,6 @@ +{ + "framework": "paddle", + "model_name": "PP-YOLOE_plus_SOD-S", + "num_devices_required": 1, + "num_nodes_required": 1 +} \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-S/subgraph_15/input_meta.py b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-S/subgraph_15/input_meta.py new file mode 100644 index 000000000..e5e974b2e --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-S/subgraph_15/input_meta.py @@ -0,0 +1,149 @@ +class Program_weight_tensor_data_0: + name = "data_0" + shape = [1] + dtype = "float32" + data = [1.57956] + + +class Program_weight_tensor_data_1: + name = "data_1" + shape = [1] + dtype = "float32" + data = [0.568783] + + +class Program_weight_tensor_data_2: + name = "data_2" + shape = [1] + dtype = "float32" + data = [0.867217] + + +class Program_weight_tensor_data_3: + name = "data_3" + shape = [1] + dtype = "float32" + data = [0.726404] + + +class Program_weight_tensor_data_4: + name = "data_4" + shape = [1] + dtype = "float32" + data = [0.722815] + + +class Program_weight_tensor_data_5: + name = "data_5" + shape = [1] + dtype = "float32" + data = [0.85667] + + +class Program_weight_tensor_data_6: + name = "data_6" + shape = [512, 1536] + dtype = "float32" + min_val = float("-0.162592") + max_val = float("0.180973") + mean = float("-9.33722e-05") + std = float("0.0262455") + data = None + + +class Program_weight_tensor_data_7: + name = "data_7" + shape = [1536] + dtype = "float32" + min_val = float("-0.0684767") + max_val = float("0.0607908") + mean = float("-0.000411208") + std = float("0.0109722") + data = None + + +class Program_weight_tensor_data_8: + name = "data_8" + shape = [512, 1536] + dtype = "float32" + min_val = float("-0.0709287") + max_val = float("0.066977") + mean = float("-1.66902e-05") + std = float("0.0241004") + data = None + + +class Program_weight_tensor_data_9: + name = "data_9" + shape = [1536] + dtype = "float32" + min_val = float("-0.0128107") + max_val = float("0.0123793") + mean = float("0.000143729") + std = float("0.00255001") + data = None + + +class Program_weight_tensor_data_10: + name = "data_10" + shape = [512, 1536] + dtype = "float32" + min_val = float("-0.0766966") + max_val = float("0.0869937") + mean = float("-1.34002e-05") + std = float("0.0241326") + data = None + + +class Program_weight_tensor_data_11: + name = "data_11" + shape = [1536] + dtype = "float32" + min_val = float("-0.0153123") + max_val = float("0.0159657") + mean = float("4.29792e-05") + std = float("0.00300275") + data = None + + +class Program_weight_tensor_data_12: + name = "data_12" + shape = [512, 1536] + dtype = "float32" + min_val = float("-0.167232") + max_val = float("0.169831") + mean = float("-1.11063e-05") + std = float("0.0253923") + data = None + + +class Program_weight_tensor_data_13: + name = "data_13" + shape = [1536] + dtype = "float32" + min_val = float("-0.0720536") + max_val = float("0.053979") + mean = float("-0.000603133") + std = float("0.0115504") + data = None + + +class Program_weight_tensor_data_14: + name = "data_14" + shape = [1, 3, 640, 640] + dtype = "float32" + max_val = float("1.0") + mean = float("0.467665") + std = float("0.176432") + data = None + + +class Program_weight_tensor_data_15: + name = "data_15" + shape = [1, 400, 512] + dtype = "float32" + min_val = float("-0.999998") + max_val = float("1.0") + mean = float("0.443308") + std = float("0.550888") + data = None diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-S/subgraph_15/model.py b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-S/subgraph_15/model.py new file mode 100644 index 000000000..e449466fd --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-S/subgraph_15/model.py @@ -0,0 +1,5289 @@ +import paddle + + +class GraphModule(paddle.nn.Layer): + def __init__(self): + super().__init__() + + def forward( + self, + parameter_0, + parameter_1, + parameter_2, + parameter_3, + parameter_4, + parameter_5, + parameter_6, + parameter_7, + parameter_8, + parameter_9, + parameter_10, + parameter_11, + parameter_12, + parameter_13, + parameter_14, + parameter_15, + parameter_16, + parameter_17, + parameter_18, + parameter_19, + parameter_20, + parameter_21, + parameter_22, + parameter_23, + parameter_24, + parameter_25, + parameter_26, + parameter_27, + parameter_28, + parameter_29, + parameter_30, + parameter_31, + parameter_32, + parameter_33, + parameter_34, + parameter_35, + parameter_36, + parameter_37, + parameter_38, + parameter_39, + parameter_40, + parameter_41, + parameter_42, + parameter_43, + parameter_44, + parameter_45, + parameter_46, + parameter_47, + parameter_48, + parameter_49, + parameter_50, + parameter_51, + parameter_52, + parameter_53, + parameter_54, + parameter_55, + parameter_56, + parameter_57, + parameter_58, + parameter_59, + parameter_60, + parameter_61, + parameter_62, + parameter_63, + parameter_64, + parameter_65, + parameter_66, + parameter_67, + parameter_68, + parameter_69, + parameter_70, + parameter_71, + parameter_72, + parameter_73, + parameter_74, + parameter_75, + parameter_76, + parameter_77, + parameter_78, + parameter_79, + parameter_80, + parameter_81, + parameter_82, + parameter_83, + parameter_84, + parameter_85, + parameter_86, + parameter_87, + parameter_88, + parameter_89, + parameter_90, + parameter_91, + parameter_92, + parameter_93, + parameter_94, + parameter_95, + parameter_96, + parameter_97, + parameter_98, + parameter_99, + parameter_100, + parameter_101, + parameter_102, + parameter_103, + parameter_104, + parameter_105, + parameter_106, + parameter_107, + parameter_108, + parameter_109, + parameter_110, + parameter_111, + parameter_112, + parameter_113, + parameter_114, + parameter_115, + parameter_116, + parameter_117, + parameter_118, + parameter_119, + parameter_120, + parameter_121, + parameter_122, + parameter_123, + parameter_124, + parameter_125, + parameter_126, + parameter_127, + parameter_128, + parameter_129, + parameter_130, + parameter_131, + parameter_132, + parameter_133, + parameter_134, + parameter_135, + parameter_136, + parameter_137, + parameter_138, + parameter_139, + parameter_140, + parameter_141, + parameter_142, + parameter_143, + parameter_144, + parameter_145, + parameter_146, + parameter_147, + parameter_148, + parameter_149, + parameter_150, + parameter_151, + parameter_152, + parameter_153, + parameter_154, + parameter_155, + parameter_156, + parameter_157, + parameter_158, + parameter_159, + parameter_160, + parameter_161, + parameter_162, + parameter_163, + parameter_164, + parameter_165, + parameter_166, + parameter_167, + parameter_168, + parameter_169, + parameter_170, + parameter_171, + parameter_172, + parameter_173, + parameter_174, + parameter_175, + parameter_176, + parameter_177, + parameter_178, + parameter_179, + parameter_180, + parameter_181, + parameter_182, + parameter_183, + parameter_184, + parameter_185, + parameter_186, + parameter_187, + parameter_188, + parameter_189, + parameter_190, + parameter_191, + parameter_192, + parameter_193, + parameter_194, + parameter_195, + parameter_196, + parameter_197, + parameter_198, + parameter_199, + parameter_200, + parameter_201, + parameter_202, + parameter_203, + parameter_204, + parameter_205, + parameter_206, + parameter_207, + parameter_208, + parameter_209, + parameter_210, + parameter_211, + parameter_212, + parameter_213, + parameter_214, + parameter_215, + parameter_216, + parameter_217, + parameter_218, + parameter_219, + parameter_220, + parameter_221, + parameter_222, + parameter_223, + parameter_224, + parameter_225, + parameter_226, + parameter_227, + parameter_228, + parameter_229, + parameter_230, + parameter_231, + parameter_232, + parameter_233, + parameter_234, + parameter_235, + parameter_236, + parameter_237, + parameter_238, + parameter_239, + parameter_240, + parameter_241, + parameter_242, + parameter_243, + parameter_244, + parameter_245, + parameter_246, + parameter_247, + parameter_248, + parameter_249, + parameter_250, + parameter_251, + parameter_252, + parameter_253, + parameter_254, + parameter_255, + parameter_256, + parameter_257, + parameter_258, + parameter_259, + parameter_260, + parameter_261, + parameter_262, + parameter_263, + parameter_264, + parameter_265, + parameter_266, + parameter_267, + parameter_268, + parameter_269, + parameter_270, + parameter_271, + parameter_272, + parameter_273, + parameter_274, + parameter_275, + parameter_276, + parameter_277, + parameter_278, + parameter_279, + parameter_280, + parameter_281, + parameter_282, + parameter_283, + parameter_284, + parameter_285, + parameter_286, + parameter_287, + parameter_288, + parameter_289, + parameter_290, + parameter_291, + parameter_292, + parameter_293, + parameter_294, + parameter_295, + parameter_296, + parameter_297, + parameter_298, + parameter_299, + parameter_300, + parameter_301, + parameter_302, + parameter_303, + parameter_304, + parameter_305, + parameter_306, + parameter_307, + parameter_308, + parameter_309, + parameter_310, + parameter_311, + parameter_312, + parameter_313, + parameter_314, + parameter_315, + parameter_316, + parameter_317, + parameter_318, + parameter_319, + parameter_320, + parameter_321, + parameter_322, + parameter_323, + parameter_324, + parameter_325, + parameter_326, + parameter_327, + parameter_328, + parameter_329, + parameter_330, + parameter_331, + parameter_332, + parameter_333, + parameter_334, + parameter_335, + parameter_336, + parameter_337, + parameter_338, + parameter_339, + parameter_340, + parameter_341, + parameter_342, + parameter_343, + parameter_344, + parameter_345, + parameter_346, + parameter_347, + parameter_348, + parameter_349, + parameter_350, + parameter_351, + parameter_352, + parameter_353, + parameter_354, + parameter_355, + parameter_356, + parameter_357, + parameter_358, + parameter_359, + parameter_360, + parameter_361, + parameter_362, + parameter_363, + parameter_364, + parameter_365, + parameter_366, + parameter_367, + parameter_368, + parameter_369, + parameter_370, + parameter_371, + parameter_372, + parameter_373, + parameter_374, + parameter_375, + parameter_376, + parameter_377, + parameter_378, + parameter_379, + parameter_380, + parameter_381, + parameter_382, + parameter_383, + parameter_384, + parameter_385, + parameter_386, + parameter_387, + parameter_388, + parameter_389, + parameter_390, + parameter_391, + parameter_392, + parameter_393, + parameter_394, + parameter_395, + parameter_396, + parameter_397, + parameter_398, + parameter_399, + parameter_400, + parameter_401, + parameter_402, + parameter_403, + parameter_404, + parameter_405, + parameter_406, + parameter_407, + parameter_408, + parameter_409, + parameter_410, + parameter_411, + parameter_412, + parameter_413, + parameter_414, + parameter_415, + parameter_416, + parameter_417, + parameter_418, + parameter_419, + parameter_420, + parameter_421, + parameter_422, + parameter_423, + parameter_424, + parameter_425, + parameter_426, + parameter_427, + parameter_428, + parameter_429, + parameter_430, + parameter_431, + parameter_432, + parameter_433, + parameter_434, + parameter_435, + parameter_436, + parameter_437, + parameter_438, + parameter_439, + parameter_440, + parameter_441, + parameter_442, + parameter_443, + parameter_444, + parameter_445, + parameter_446, + parameter_447, + parameter_448, + parameter_449, + parameter_450, + parameter_451, + parameter_452, + parameter_453, + parameter_454, + parameter_455, + parameter_456, + parameter_457, + parameter_458, + parameter_459, + parameter_460, + parameter_461, + parameter_462, + data_0, + data_1, + data_2, + data_3, + data_4, + data_5, + data_6, + data_7, + data_8, + data_9, + data_10, + data_11, + data_12, + data_13, + data_14, + data_15, + ): + # pd_op.conv2d: (-1x16x-1x-1xf32) <- (-1x3x-1x-1xf32, 16x3x3x3xf32) + conv2d_0 = paddle._C_ops.conv2d( + data_14, parameter_462, [2, 2], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del data_14, parameter_462 + + # pd_op.batch_norm_: (-1x16x-1x-1xf32, 16xf32, 16xf32, 16xf32, 16xf32, -1xui8) <- (-1x16x-1x-1xf32, 16xf32, 16xf32, 16xf32, 16xf32) + ( + batch_norm__0, + batch_norm__1, + batch_norm__2, + batch_norm__3, + batch_norm__4, + batch_norm__5, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_0, + parameter_461, + parameter_460, + parameter_459, + parameter_458, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_0, parameter_458, parameter_459, parameter_460, parameter_461 + + # pd_op.swish: (-1x16x-1x-1xf32) <- (-1x16x-1x-1xf32) + swish_0 = paddle._C_ops.swish(batch_norm__0) + del batch_norm__0 + + # pd_op.conv2d: (-1x16x-1x-1xf32) <- (-1x16x-1x-1xf32, 16x16x3x3xf32) + conv2d_1 = paddle._C_ops.conv2d( + swish_0, parameter_457, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_457, swish_0 + + # pd_op.batch_norm_: (-1x16x-1x-1xf32, 16xf32, 16xf32, 16xf32, 16xf32, -1xui8) <- (-1x16x-1x-1xf32, 16xf32, 16xf32, 16xf32, 16xf32) + ( + batch_norm__6, + batch_norm__7, + batch_norm__8, + batch_norm__9, + batch_norm__10, + batch_norm__11, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_1, + parameter_456, + parameter_455, + parameter_454, + parameter_453, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_1, parameter_453, parameter_454, parameter_455, parameter_456 + + # pd_op.swish: (-1x16x-1x-1xf32) <- (-1x16x-1x-1xf32) + swish_1 = paddle._C_ops.swish(batch_norm__6) + del batch_norm__6 + + # pd_op.conv2d: (-1x32x-1x-1xf32) <- (-1x16x-1x-1xf32, 32x16x3x3xf32) + conv2d_2 = paddle._C_ops.conv2d( + swish_1, parameter_452, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_452, swish_1 + + # pd_op.batch_norm_: (-1x32x-1x-1xf32, 32xf32, 32xf32, 32xf32, 32xf32, -1xui8) <- (-1x32x-1x-1xf32, 32xf32, 32xf32, 32xf32, 32xf32) + ( + batch_norm__12, + batch_norm__13, + batch_norm__14, + batch_norm__15, + batch_norm__16, + batch_norm__17, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_2, + parameter_451, + parameter_450, + parameter_449, + parameter_448, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_2, parameter_448, parameter_449, parameter_450, parameter_451 + + # pd_op.swish: (-1x32x-1x-1xf32) <- (-1x32x-1x-1xf32) + swish_2 = paddle._C_ops.swish(batch_norm__12) + del batch_norm__12 + + # pd_op.conv2d: (-1x48x-1x-1xf32) <- (-1x32x-1x-1xf32, 48x32x3x3xf32) + conv2d_3 = paddle._C_ops.conv2d( + swish_2, parameter_447, [2, 2], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_447, swish_2 + + # pd_op.batch_norm_: (-1x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32, -1xui8) <- (-1x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32) + ( + batch_norm__18, + batch_norm__19, + batch_norm__20, + batch_norm__21, + batch_norm__22, + batch_norm__23, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_3, + parameter_446, + parameter_445, + parameter_444, + parameter_443, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_3, parameter_443, parameter_444, parameter_445, parameter_446 + + # pd_op.swish: (-1x48x-1x-1xf32) <- (-1x48x-1x-1xf32) + swish_3 = paddle._C_ops.swish(batch_norm__18) + del batch_norm__18 + + # pd_op.conv2d: (-1x24x-1x-1xf32) <- (-1x48x-1x-1xf32, 24x48x1x1xf32) + conv2d_4 = paddle._C_ops.conv2d( + swish_3, parameter_442, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_442 + + # pd_op.batch_norm_: (-1x24x-1x-1xf32, 24xf32, 24xf32, 24xf32, 24xf32, -1xui8) <- (-1x24x-1x-1xf32, 24xf32, 24xf32, 24xf32, 24xf32) + ( + batch_norm__24, + batch_norm__25, + batch_norm__26, + batch_norm__27, + batch_norm__28, + batch_norm__29, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_4, + parameter_441, + parameter_440, + parameter_439, + parameter_438, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_4, parameter_438, parameter_439, parameter_440, parameter_441 + + # pd_op.swish: (-1x24x-1x-1xf32) <- (-1x24x-1x-1xf32) + swish_4 = paddle._C_ops.swish(batch_norm__24) + del batch_norm__24 + + # pd_op.conv2d: (-1x24x-1x-1xf32) <- (-1x48x-1x-1xf32, 24x48x1x1xf32) + conv2d_5 = paddle._C_ops.conv2d( + swish_3, parameter_437, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_437, swish_3 + + # pd_op.batch_norm_: (-1x24x-1x-1xf32, 24xf32, 24xf32, 24xf32, 24xf32, -1xui8) <- (-1x24x-1x-1xf32, 24xf32, 24xf32, 24xf32, 24xf32) + ( + batch_norm__30, + batch_norm__31, + batch_norm__32, + batch_norm__33, + batch_norm__34, + batch_norm__35, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_5, + parameter_436, + parameter_435, + parameter_434, + parameter_433, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_5, parameter_433, parameter_434, parameter_435, parameter_436 + + # pd_op.swish: (-1x24x-1x-1xf32) <- (-1x24x-1x-1xf32) + swish_5 = paddle._C_ops.swish(batch_norm__30) + del batch_norm__30 + + # pd_op.conv2d: (-1x24x-1x-1xf32) <- (-1x24x-1x-1xf32, 24x24x3x3xf32) + conv2d_6 = paddle._C_ops.conv2d( + swish_5, parameter_432, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_432 + + # pd_op.batch_norm_: (-1x24x-1x-1xf32, 24xf32, 24xf32, 24xf32, 24xf32, -1xui8) <- (-1x24x-1x-1xf32, 24xf32, 24xf32, 24xf32, 24xf32) + ( + batch_norm__36, + batch_norm__37, + batch_norm__38, + batch_norm__39, + batch_norm__40, + batch_norm__41, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_6, + parameter_431, + parameter_430, + parameter_429, + parameter_428, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_6, parameter_428, parameter_429, parameter_430, parameter_431 + + # pd_op.swish: (-1x24x-1x-1xf32) <- (-1x24x-1x-1xf32) + swish_6 = paddle._C_ops.swish(batch_norm__36) + del batch_norm__36 + + # pd_op.conv2d: (-1x24x-1x-1xf32) <- (-1x24x-1x-1xf32, 24x24x3x3xf32) + conv2d_7 = paddle._C_ops.conv2d( + swish_6, parameter_427, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_427 + + # pd_op.batch_norm_: (-1x24x-1x-1xf32, 24xf32, 24xf32, 24xf32, 24xf32, -1xui8) <- (-1x24x-1x-1xf32, 24xf32, 24xf32, 24xf32, 24xf32) + ( + batch_norm__42, + batch_norm__43, + batch_norm__44, + batch_norm__45, + batch_norm__46, + batch_norm__47, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_7, + parameter_426, + parameter_425, + parameter_424, + parameter_423, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_7, parameter_423, parameter_424, parameter_425, parameter_426 + + # pd_op.conv2d: (-1x24x-1x-1xf32) <- (-1x24x-1x-1xf32, 24x24x1x1xf32) + conv2d_8 = paddle._C_ops.conv2d( + swish_6, parameter_422, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_422, swish_6 + + # pd_op.batch_norm_: (-1x24x-1x-1xf32, 24xf32, 24xf32, 24xf32, 24xf32, -1xui8) <- (-1x24x-1x-1xf32, 24xf32, 24xf32, 24xf32, 24xf32) + ( + batch_norm__48, + batch_norm__49, + batch_norm__50, + batch_norm__51, + batch_norm__52, + batch_norm__53, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_8, + parameter_421, + parameter_420, + parameter_419, + parameter_418, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_8, parameter_418, parameter_419, parameter_420, parameter_421 + + # pd_op.multiply: (-1x24x-1x-1xf32) <- (1xf32, -1x24x-1x-1xf32) + multiply_0 = paddle._C_ops.multiply(data_0, batch_norm__48) + del batch_norm__48, data_0 + + # pd_op.add: (-1x24x-1x-1xf32) <- (-1x24x-1x-1xf32, -1x24x-1x-1xf32) + add_0 = paddle._C_ops.add(batch_norm__42, multiply_0) + del batch_norm__42, multiply_0 + + # pd_op.swish: (-1x24x-1x-1xf32) <- (-1x24x-1x-1xf32) + swish_7 = paddle._C_ops.swish(add_0) + del add_0 + + # pd_op.add: (-1x24x-1x-1xf32) <- (-1x24x-1x-1xf32, -1x24x-1x-1xf32) + add_1 = paddle._C_ops.add(swish_5, swish_7) + del swish_5, swish_7 + + # pd_op.full: (1xi32) <- () + full_0 = paddle._C_ops.full( + [1], float("1"), paddle.int32, paddle.core.CPUPlace() + ) + + # builtin.combine: ([-1x24x-1x-1xf32, -1x24x-1x-1xf32]) <- (-1x24x-1x-1xf32, -1x24x-1x-1xf32) + combine_0 = [swish_4, add_1] + del add_1, swish_4 + + # pd_op.concat: (-1x48x-1x-1xf32) <- ([-1x24x-1x-1xf32, -1x24x-1x-1xf32], 1xi32) + concat_2 = paddle._C_ops.concat(combine_0, full_0) + del combine_0 + + # pd_op.full_int_array: (2xi64) <- () + full_int_array_0 = [2, 3] + + # pd_op.mean: (-1x48x1x1xf32) <- (-1x48x-1x-1xf32, 2xi64) + mean_0 = paddle._C_ops.mean(concat_2, full_int_array_0, True) + + # pd_op.conv2d: (-1x48x1x1xf32) <- (-1x48x1x1xf32, 48x48x1x1xf32) + conv2d_9 = paddle._C_ops.conv2d( + mean_0, parameter_417, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del mean_0, parameter_417 + + # pd_op.full_int_array: (4xi64) <- () + full_int_array_1 = [1, -1, 1, 1] + + # pd_op.reshape: (1x48x1x1xf32) <- (48xf32, 4xi64) + reshape_0 = paddle._C_ops.reshape(parameter_416, full_int_array_1) + del parameter_416 + + # pd_op.add: (-1x48x1x1xf32) <- (-1x48x1x1xf32, 1x48x1x1xf32) + add_2 = paddle._C_ops.add(conv2d_9, reshape_0) + del conv2d_9, reshape_0 + + # pd_op.hardsigmoid: (-1x48x1x1xf32) <- (-1x48x1x1xf32) + hardsigmoid_0 = paddle._C_ops.hardsigmoid( + add_2, float("0.166667"), float("0.5") + ) + del add_2 + + # pd_op.multiply: (-1x48x-1x-1xf32) <- (-1x48x-1x-1xf32, -1x48x1x1xf32) + multiply_1 = paddle._C_ops.multiply(concat_2, hardsigmoid_0) + del concat_2, hardsigmoid_0 + + # pd_op.conv2d: (-1x64x-1x-1xf32) <- (-1x48x-1x-1xf32, 64x48x1x1xf32) + conv2d_10 = paddle._C_ops.conv2d( + multiply_1, parameter_415, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del multiply_1, parameter_415 + + # pd_op.batch_norm_: (-1x64x-1x-1xf32, 64xf32, 64xf32, 64xf32, 64xf32, -1xui8) <- (-1x64x-1x-1xf32, 64xf32, 64xf32, 64xf32, 64xf32) + ( + batch_norm__54, + batch_norm__55, + batch_norm__56, + batch_norm__57, + batch_norm__58, + batch_norm__59, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_10, + parameter_414, + parameter_413, + parameter_412, + parameter_411, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_10, parameter_411, parameter_412, parameter_413, parameter_414 + + # pd_op.swish: (-1x64x-1x-1xf32) <- (-1x64x-1x-1xf32) + swish_8 = paddle._C_ops.swish(batch_norm__54) + del batch_norm__54 + + # pd_op.conv2d: (-1x96x-1x-1xf32) <- (-1x64x-1x-1xf32, 96x64x3x3xf32) + conv2d_11 = paddle._C_ops.conv2d( + swish_8, parameter_410, [2, 2], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_410, swish_8 + + # pd_op.batch_norm_: (-1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (-1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__60, + batch_norm__61, + batch_norm__62, + batch_norm__63, + batch_norm__64, + batch_norm__65, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_11, + parameter_409, + parameter_408, + parameter_407, + parameter_406, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_11, parameter_406, parameter_407, parameter_408, parameter_409 + + # pd_op.swish: (-1x96x-1x-1xf32) <- (-1x96x-1x-1xf32) + swish_9 = paddle._C_ops.swish(batch_norm__60) + del batch_norm__60 + + # pd_op.conv2d: (-1x48x-1x-1xf32) <- (-1x96x-1x-1xf32, 48x96x1x1xf32) + conv2d_12 = paddle._C_ops.conv2d( + swish_9, parameter_405, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_405 + + # pd_op.batch_norm_: (-1x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32, -1xui8) <- (-1x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32) + ( + batch_norm__66, + batch_norm__67, + batch_norm__68, + batch_norm__69, + batch_norm__70, + batch_norm__71, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_12, + parameter_404, + parameter_403, + parameter_402, + parameter_401, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_12, parameter_401, parameter_402, parameter_403, parameter_404 + + # pd_op.swish: (-1x48x-1x-1xf32) <- (-1x48x-1x-1xf32) + swish_10 = paddle._C_ops.swish(batch_norm__66) + del batch_norm__66 + + # pd_op.conv2d: (-1x48x-1x-1xf32) <- (-1x96x-1x-1xf32, 48x96x1x1xf32) + conv2d_13 = paddle._C_ops.conv2d( + swish_9, parameter_400, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_400, swish_9 + + # pd_op.batch_norm_: (-1x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32, -1xui8) <- (-1x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32) + ( + batch_norm__72, + batch_norm__73, + batch_norm__74, + batch_norm__75, + batch_norm__76, + batch_norm__77, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_13, + parameter_399, + parameter_398, + parameter_397, + parameter_396, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_13, parameter_396, parameter_397, parameter_398, parameter_399 + + # pd_op.swish: (-1x48x-1x-1xf32) <- (-1x48x-1x-1xf32) + swish_11 = paddle._C_ops.swish(batch_norm__72) + del batch_norm__72 + + # pd_op.conv2d: (-1x48x-1x-1xf32) <- (-1x48x-1x-1xf32, 48x48x3x3xf32) + conv2d_14 = paddle._C_ops.conv2d( + swish_11, parameter_395, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_395 + + # pd_op.batch_norm_: (-1x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32, -1xui8) <- (-1x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32) + ( + batch_norm__78, + batch_norm__79, + batch_norm__80, + batch_norm__81, + batch_norm__82, + batch_norm__83, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_14, + parameter_394, + parameter_393, + parameter_392, + parameter_391, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_14, parameter_391, parameter_392, parameter_393, parameter_394 + + # pd_op.swish: (-1x48x-1x-1xf32) <- (-1x48x-1x-1xf32) + swish_12 = paddle._C_ops.swish(batch_norm__78) + del batch_norm__78 + + # pd_op.conv2d: (-1x48x-1x-1xf32) <- (-1x48x-1x-1xf32, 48x48x3x3xf32) + conv2d_15 = paddle._C_ops.conv2d( + swish_12, parameter_390, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_390 + + # pd_op.batch_norm_: (-1x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32, -1xui8) <- (-1x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32) + ( + batch_norm__84, + batch_norm__85, + batch_norm__86, + batch_norm__87, + batch_norm__88, + batch_norm__89, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_15, + parameter_389, + parameter_388, + parameter_387, + parameter_386, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_15, parameter_386, parameter_387, parameter_388, parameter_389 + + # pd_op.conv2d: (-1x48x-1x-1xf32) <- (-1x48x-1x-1xf32, 48x48x1x1xf32) + conv2d_16 = paddle._C_ops.conv2d( + swish_12, parameter_385, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_385, swish_12 + + # pd_op.batch_norm_: (-1x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32, -1xui8) <- (-1x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32) + ( + batch_norm__90, + batch_norm__91, + batch_norm__92, + batch_norm__93, + batch_norm__94, + batch_norm__95, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_16, + parameter_384, + parameter_383, + parameter_382, + parameter_381, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_16, parameter_381, parameter_382, parameter_383, parameter_384 + + # pd_op.multiply: (-1x48x-1x-1xf32) <- (1xf32, -1x48x-1x-1xf32) + multiply_2 = paddle._C_ops.multiply(data_1, batch_norm__90) + del batch_norm__90, data_1 + + # pd_op.add: (-1x48x-1x-1xf32) <- (-1x48x-1x-1xf32, -1x48x-1x-1xf32) + add_3 = paddle._C_ops.add(batch_norm__84, multiply_2) + del batch_norm__84, multiply_2 + + # pd_op.swish: (-1x48x-1x-1xf32) <- (-1x48x-1x-1xf32) + swish_13 = paddle._C_ops.swish(add_3) + del add_3 + + # pd_op.add: (-1x48x-1x-1xf32) <- (-1x48x-1x-1xf32, -1x48x-1x-1xf32) + add_4 = paddle._C_ops.add(swish_11, swish_13) + del swish_11, swish_13 + + # pd_op.conv2d: (-1x48x-1x-1xf32) <- (-1x48x-1x-1xf32, 48x48x3x3xf32) + conv2d_17 = paddle._C_ops.conv2d( + add_4, parameter_380, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_380 + + # pd_op.batch_norm_: (-1x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32, -1xui8) <- (-1x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32) + ( + batch_norm__96, + batch_norm__97, + batch_norm__98, + batch_norm__99, + batch_norm__100, + batch_norm__101, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_17, + parameter_379, + parameter_378, + parameter_377, + parameter_376, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_17, parameter_376, parameter_377, parameter_378, parameter_379 + + # pd_op.swish: (-1x48x-1x-1xf32) <- (-1x48x-1x-1xf32) + swish_14 = paddle._C_ops.swish(batch_norm__96) + del batch_norm__96 + + # pd_op.conv2d: (-1x48x-1x-1xf32) <- (-1x48x-1x-1xf32, 48x48x3x3xf32) + conv2d_18 = paddle._C_ops.conv2d( + swish_14, parameter_375, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_375 + + # pd_op.batch_norm_: (-1x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32, -1xui8) <- (-1x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32) + ( + batch_norm__102, + batch_norm__103, + batch_norm__104, + batch_norm__105, + batch_norm__106, + batch_norm__107, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_18, + parameter_374, + parameter_373, + parameter_372, + parameter_371, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_18, parameter_371, parameter_372, parameter_373, parameter_374 + + # pd_op.conv2d: (-1x48x-1x-1xf32) <- (-1x48x-1x-1xf32, 48x48x1x1xf32) + conv2d_19 = paddle._C_ops.conv2d( + swish_14, parameter_370, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_370, swish_14 + + # pd_op.batch_norm_: (-1x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32, -1xui8) <- (-1x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32) + ( + batch_norm__108, + batch_norm__109, + batch_norm__110, + batch_norm__111, + batch_norm__112, + batch_norm__113, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_19, + parameter_369, + parameter_368, + parameter_367, + parameter_366, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_19, parameter_366, parameter_367, parameter_368, parameter_369 + + # pd_op.multiply: (-1x48x-1x-1xf32) <- (1xf32, -1x48x-1x-1xf32) + multiply_3 = paddle._C_ops.multiply(data_2, batch_norm__108) + del batch_norm__108, data_2 + + # pd_op.add: (-1x48x-1x-1xf32) <- (-1x48x-1x-1xf32, -1x48x-1x-1xf32) + add_5 = paddle._C_ops.add(batch_norm__102, multiply_3) + del batch_norm__102, multiply_3 + + # pd_op.swish: (-1x48x-1x-1xf32) <- (-1x48x-1x-1xf32) + swish_15 = paddle._C_ops.swish(add_5) + del add_5 + + # pd_op.add: (-1x48x-1x-1xf32) <- (-1x48x-1x-1xf32, -1x48x-1x-1xf32) + add_6 = paddle._C_ops.add(add_4, swish_15) + del add_4, swish_15 + + # builtin.combine: ([-1x48x-1x-1xf32, -1x48x-1x-1xf32]) <- (-1x48x-1x-1xf32, -1x48x-1x-1xf32) + combine_1 = [swish_10, add_6] + del add_6, swish_10 + + # pd_op.concat: (-1x96x-1x-1xf32) <- ([-1x48x-1x-1xf32, -1x48x-1x-1xf32], 1xi32) + concat_3 = paddle._C_ops.concat(combine_1, full_0) + del combine_1 + + # pd_op.mean: (-1x96x1x1xf32) <- (-1x96x-1x-1xf32, 2xi64) + mean_1 = paddle._C_ops.mean(concat_3, full_int_array_0, True) + + # pd_op.conv2d: (-1x96x1x1xf32) <- (-1x96x1x1xf32, 96x96x1x1xf32) + conv2d_20 = paddle._C_ops.conv2d( + mean_1, parameter_365, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del mean_1, parameter_365 + + # pd_op.reshape: (1x96x1x1xf32) <- (96xf32, 4xi64) + reshape_1 = paddle._C_ops.reshape(parameter_364, full_int_array_1) + del parameter_364 + + # pd_op.add: (-1x96x1x1xf32) <- (-1x96x1x1xf32, 1x96x1x1xf32) + add_7 = paddle._C_ops.add(conv2d_20, reshape_1) + del conv2d_20, reshape_1 + + # pd_op.hardsigmoid: (-1x96x1x1xf32) <- (-1x96x1x1xf32) + hardsigmoid_1 = paddle._C_ops.hardsigmoid( + add_7, float("0.166667"), float("0.5") + ) + del add_7 + + # pd_op.multiply: (-1x96x-1x-1xf32) <- (-1x96x-1x-1xf32, -1x96x1x1xf32) + multiply_4 = paddle._C_ops.multiply(concat_3, hardsigmoid_1) + del concat_3, hardsigmoid_1 + + # pd_op.conv2d: (-1x128x-1x-1xf32) <- (-1x96x-1x-1xf32, 128x96x1x1xf32) + conv2d_21 = paddle._C_ops.conv2d( + multiply_4, parameter_363, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del multiply_4, parameter_363 + + # pd_op.batch_norm_: (-1x128x-1x-1xf32, 128xf32, 128xf32, 128xf32, 128xf32, -1xui8) <- (-1x128x-1x-1xf32, 128xf32, 128xf32, 128xf32, 128xf32) + ( + batch_norm__114, + batch_norm__115, + batch_norm__116, + batch_norm__117, + batch_norm__118, + batch_norm__119, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_21, + parameter_362, + parameter_361, + parameter_360, + parameter_359, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_21, parameter_359, parameter_360, parameter_361, parameter_362 + + # pd_op.swish: (-1x128x-1x-1xf32) <- (-1x128x-1x-1xf32) + swish_16 = paddle._C_ops.swish(batch_norm__114) + del batch_norm__114 + + # pd_op.conv2d: (-1x192x-1x-1xf32) <- (-1x128x-1x-1xf32, 192x128x3x3xf32) + conv2d_22 = paddle._C_ops.conv2d( + swish_16, parameter_358, [2, 2], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_358 + + # pd_op.batch_norm_: (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__120, + batch_norm__121, + batch_norm__122, + batch_norm__123, + batch_norm__124, + batch_norm__125, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_22, + parameter_357, + parameter_356, + parameter_355, + parameter_354, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_22, parameter_354, parameter_355, parameter_356, parameter_357 + + # pd_op.swish: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32) + swish_17 = paddle._C_ops.swish(batch_norm__120) + del batch_norm__120 + + # pd_op.conv2d: (-1x96x-1x-1xf32) <- (-1x192x-1x-1xf32, 96x192x1x1xf32) + conv2d_23 = paddle._C_ops.conv2d( + swish_17, parameter_353, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_353 + + # pd_op.batch_norm_: (-1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (-1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__126, + batch_norm__127, + batch_norm__128, + batch_norm__129, + batch_norm__130, + batch_norm__131, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_23, + parameter_352, + parameter_351, + parameter_350, + parameter_349, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_23, parameter_349, parameter_350, parameter_351, parameter_352 + + # pd_op.swish: (-1x96x-1x-1xf32) <- (-1x96x-1x-1xf32) + swish_18 = paddle._C_ops.swish(batch_norm__126) + del batch_norm__126 + + # pd_op.conv2d: (-1x96x-1x-1xf32) <- (-1x192x-1x-1xf32, 96x192x1x1xf32) + conv2d_24 = paddle._C_ops.conv2d( + swish_17, parameter_348, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_348, swish_17 + + # pd_op.batch_norm_: (-1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (-1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__132, + batch_norm__133, + batch_norm__134, + batch_norm__135, + batch_norm__136, + batch_norm__137, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_24, + parameter_347, + parameter_346, + parameter_345, + parameter_344, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_24, parameter_344, parameter_345, parameter_346, parameter_347 + + # pd_op.swish: (-1x96x-1x-1xf32) <- (-1x96x-1x-1xf32) + swish_19 = paddle._C_ops.swish(batch_norm__132) + del batch_norm__132 + + # pd_op.conv2d: (-1x96x-1x-1xf32) <- (-1x96x-1x-1xf32, 96x96x3x3xf32) + conv2d_25 = paddle._C_ops.conv2d( + swish_19, parameter_343, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_343 + + # pd_op.batch_norm_: (-1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (-1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__138, + batch_norm__139, + batch_norm__140, + batch_norm__141, + batch_norm__142, + batch_norm__143, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_25, + parameter_342, + parameter_341, + parameter_340, + parameter_339, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_25, parameter_339, parameter_340, parameter_341, parameter_342 + + # pd_op.swish: (-1x96x-1x-1xf32) <- (-1x96x-1x-1xf32) + swish_20 = paddle._C_ops.swish(batch_norm__138) + del batch_norm__138 + + # pd_op.conv2d: (-1x96x-1x-1xf32) <- (-1x96x-1x-1xf32, 96x96x3x3xf32) + conv2d_26 = paddle._C_ops.conv2d( + swish_20, parameter_338, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_338 + + # pd_op.batch_norm_: (-1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (-1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__144, + batch_norm__145, + batch_norm__146, + batch_norm__147, + batch_norm__148, + batch_norm__149, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_26, + parameter_337, + parameter_336, + parameter_335, + parameter_334, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_26, parameter_334, parameter_335, parameter_336, parameter_337 + + # pd_op.conv2d: (-1x96x-1x-1xf32) <- (-1x96x-1x-1xf32, 96x96x1x1xf32) + conv2d_27 = paddle._C_ops.conv2d( + swish_20, parameter_333, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_333, swish_20 + + # pd_op.batch_norm_: (-1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (-1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__150, + batch_norm__151, + batch_norm__152, + batch_norm__153, + batch_norm__154, + batch_norm__155, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_27, + parameter_332, + parameter_331, + parameter_330, + parameter_329, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_27, parameter_329, parameter_330, parameter_331, parameter_332 + + # pd_op.multiply: (-1x96x-1x-1xf32) <- (1xf32, -1x96x-1x-1xf32) + multiply_5 = paddle._C_ops.multiply(data_3, batch_norm__150) + del batch_norm__150, data_3 + + # pd_op.add: (-1x96x-1x-1xf32) <- (-1x96x-1x-1xf32, -1x96x-1x-1xf32) + add_8 = paddle._C_ops.add(batch_norm__144, multiply_5) + del batch_norm__144, multiply_5 + + # pd_op.swish: (-1x96x-1x-1xf32) <- (-1x96x-1x-1xf32) + swish_21 = paddle._C_ops.swish(add_8) + del add_8 + + # pd_op.add: (-1x96x-1x-1xf32) <- (-1x96x-1x-1xf32, -1x96x-1x-1xf32) + add_9 = paddle._C_ops.add(swish_19, swish_21) + del swish_19, swish_21 + + # pd_op.conv2d: (-1x96x-1x-1xf32) <- (-1x96x-1x-1xf32, 96x96x3x3xf32) + conv2d_28 = paddle._C_ops.conv2d( + add_9, parameter_328, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_328 + + # pd_op.batch_norm_: (-1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (-1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__156, + batch_norm__157, + batch_norm__158, + batch_norm__159, + batch_norm__160, + batch_norm__161, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_28, + parameter_327, + parameter_326, + parameter_325, + parameter_324, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_28, parameter_324, parameter_325, parameter_326, parameter_327 + + # pd_op.swish: (-1x96x-1x-1xf32) <- (-1x96x-1x-1xf32) + swish_22 = paddle._C_ops.swish(batch_norm__156) + del batch_norm__156 + + # pd_op.conv2d: (-1x96x-1x-1xf32) <- (-1x96x-1x-1xf32, 96x96x3x3xf32) + conv2d_29 = paddle._C_ops.conv2d( + swish_22, parameter_323, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_323 + + # pd_op.batch_norm_: (-1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (-1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__162, + batch_norm__163, + batch_norm__164, + batch_norm__165, + batch_norm__166, + batch_norm__167, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_29, + parameter_322, + parameter_321, + parameter_320, + parameter_319, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_29, parameter_319, parameter_320, parameter_321, parameter_322 + + # pd_op.conv2d: (-1x96x-1x-1xf32) <- (-1x96x-1x-1xf32, 96x96x1x1xf32) + conv2d_30 = paddle._C_ops.conv2d( + swish_22, parameter_318, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_318, swish_22 + + # pd_op.batch_norm_: (-1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (-1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__168, + batch_norm__169, + batch_norm__170, + batch_norm__171, + batch_norm__172, + batch_norm__173, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_30, + parameter_317, + parameter_316, + parameter_315, + parameter_314, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_30, parameter_314, parameter_315, parameter_316, parameter_317 + + # pd_op.multiply: (-1x96x-1x-1xf32) <- (1xf32, -1x96x-1x-1xf32) + multiply_6 = paddle._C_ops.multiply(data_4, batch_norm__168) + del batch_norm__168, data_4 + + # pd_op.add: (-1x96x-1x-1xf32) <- (-1x96x-1x-1xf32, -1x96x-1x-1xf32) + add_10 = paddle._C_ops.add(batch_norm__162, multiply_6) + del batch_norm__162, multiply_6 + + # pd_op.swish: (-1x96x-1x-1xf32) <- (-1x96x-1x-1xf32) + swish_23 = paddle._C_ops.swish(add_10) + del add_10 + + # pd_op.add: (-1x96x-1x-1xf32) <- (-1x96x-1x-1xf32, -1x96x-1x-1xf32) + add_11 = paddle._C_ops.add(add_9, swish_23) + del add_9, swish_23 + + # builtin.combine: ([-1x96x-1x-1xf32, -1x96x-1x-1xf32]) <- (-1x96x-1x-1xf32, -1x96x-1x-1xf32) + combine_2 = [swish_18, add_11] + del add_11, swish_18 + + # pd_op.concat: (-1x192x-1x-1xf32) <- ([-1x96x-1x-1xf32, -1x96x-1x-1xf32], 1xi32) + concat_4 = paddle._C_ops.concat(combine_2, full_0) + del combine_2 + + # pd_op.mean: (-1x192x1x1xf32) <- (-1x192x-1x-1xf32, 2xi64) + mean_2 = paddle._C_ops.mean(concat_4, full_int_array_0, True) + + # pd_op.conv2d: (-1x192x1x1xf32) <- (-1x192x1x1xf32, 192x192x1x1xf32) + conv2d_31 = paddle._C_ops.conv2d( + mean_2, parameter_313, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del mean_2, parameter_313 + + # pd_op.reshape: (1x192x1x1xf32) <- (192xf32, 4xi64) + reshape_2 = paddle._C_ops.reshape(parameter_312, full_int_array_1) + del parameter_312 + + # pd_op.add: (-1x192x1x1xf32) <- (-1x192x1x1xf32, 1x192x1x1xf32) + add_12 = paddle._C_ops.add(conv2d_31, reshape_2) + del conv2d_31, reshape_2 + + # pd_op.hardsigmoid: (-1x192x1x1xf32) <- (-1x192x1x1xf32) + hardsigmoid_2 = paddle._C_ops.hardsigmoid( + add_12, float("0.166667"), float("0.5") + ) + del add_12 + + # pd_op.multiply: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32, -1x192x1x1xf32) + multiply_7 = paddle._C_ops.multiply(concat_4, hardsigmoid_2) + del concat_4, hardsigmoid_2 + + # pd_op.conv2d: (-1x256x-1x-1xf32) <- (-1x192x-1x-1xf32, 256x192x1x1xf32) + conv2d_32 = paddle._C_ops.conv2d( + multiply_7, parameter_311, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del multiply_7, parameter_311 + + # pd_op.batch_norm_: (-1x256x-1x-1xf32, 256xf32, 256xf32, 256xf32, 256xf32, -1xui8) <- (-1x256x-1x-1xf32, 256xf32, 256xf32, 256xf32, 256xf32) + ( + batch_norm__174, + batch_norm__175, + batch_norm__176, + batch_norm__177, + batch_norm__178, + batch_norm__179, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_32, + parameter_310, + parameter_309, + parameter_308, + parameter_307, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_32, parameter_307, parameter_308, parameter_309, parameter_310 + + # pd_op.swish: (-1x256x-1x-1xf32) <- (-1x256x-1x-1xf32) + swish_24 = paddle._C_ops.swish(batch_norm__174) + del batch_norm__174 + + # pd_op.conv2d: (-1x384x-1x-1xf32) <- (-1x256x-1x-1xf32, 384x256x3x3xf32) + conv2d_33 = paddle._C_ops.conv2d( + swish_24, parameter_306, [2, 2], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_306 + + # pd_op.batch_norm_: (-1x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (-1x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__180, + batch_norm__181, + batch_norm__182, + batch_norm__183, + batch_norm__184, + batch_norm__185, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_33, + parameter_305, + parameter_304, + parameter_303, + parameter_302, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_33, parameter_302, parameter_303, parameter_304, parameter_305 + + # pd_op.swish: (-1x384x-1x-1xf32) <- (-1x384x-1x-1xf32) + swish_25 = paddle._C_ops.swish(batch_norm__180) + del batch_norm__180 + + # pd_op.conv2d: (-1x192x-1x-1xf32) <- (-1x384x-1x-1xf32, 192x384x1x1xf32) + conv2d_34 = paddle._C_ops.conv2d( + swish_25, parameter_301, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_301 + + # pd_op.batch_norm_: (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__186, + batch_norm__187, + batch_norm__188, + batch_norm__189, + batch_norm__190, + batch_norm__191, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_34, + parameter_300, + parameter_299, + parameter_298, + parameter_297, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_34, parameter_297, parameter_298, parameter_299, parameter_300 + + # pd_op.swish: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32) + swish_26 = paddle._C_ops.swish(batch_norm__186) + del batch_norm__186 + + # pd_op.conv2d: (-1x192x-1x-1xf32) <- (-1x384x-1x-1xf32, 192x384x1x1xf32) + conv2d_35 = paddle._C_ops.conv2d( + swish_25, parameter_296, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_296, swish_25 + + # pd_op.batch_norm_: (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__192, + batch_norm__193, + batch_norm__194, + batch_norm__195, + batch_norm__196, + batch_norm__197, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_35, + parameter_295, + parameter_294, + parameter_293, + parameter_292, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_35, parameter_292, parameter_293, parameter_294, parameter_295 + + # pd_op.swish: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32) + swish_27 = paddle._C_ops.swish(batch_norm__192) + del batch_norm__192 + + # pd_op.conv2d: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32, 192x192x3x3xf32) + conv2d_36 = paddle._C_ops.conv2d( + swish_27, parameter_291, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_291 + + # pd_op.batch_norm_: (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__198, + batch_norm__199, + batch_norm__200, + batch_norm__201, + batch_norm__202, + batch_norm__203, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_36, + parameter_290, + parameter_289, + parameter_288, + parameter_287, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_36, parameter_287, parameter_288, parameter_289, parameter_290 + + # pd_op.swish: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32) + swish_28 = paddle._C_ops.swish(batch_norm__198) + del batch_norm__198 + + # pd_op.conv2d: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32, 192x192x3x3xf32) + conv2d_37 = paddle._C_ops.conv2d( + swish_28, parameter_286, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_286 + + # pd_op.batch_norm_: (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__204, + batch_norm__205, + batch_norm__206, + batch_norm__207, + batch_norm__208, + batch_norm__209, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_37, + parameter_285, + parameter_284, + parameter_283, + parameter_282, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_37, parameter_282, parameter_283, parameter_284, parameter_285 + + # pd_op.conv2d: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32, 192x192x1x1xf32) + conv2d_38 = paddle._C_ops.conv2d( + swish_28, parameter_281, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_281, swish_28 + + # pd_op.batch_norm_: (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__210, + batch_norm__211, + batch_norm__212, + batch_norm__213, + batch_norm__214, + batch_norm__215, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_38, + parameter_280, + parameter_279, + parameter_278, + parameter_277, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_38, parameter_277, parameter_278, parameter_279, parameter_280 + + # pd_op.multiply: (-1x192x-1x-1xf32) <- (1xf32, -1x192x-1x-1xf32) + multiply_8 = paddle._C_ops.multiply(data_5, batch_norm__210) + del batch_norm__210, data_5 + + # pd_op.add: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32, -1x192x-1x-1xf32) + add_13 = paddle._C_ops.add(batch_norm__204, multiply_8) + del batch_norm__204, multiply_8 + + # pd_op.swish: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32) + swish_29 = paddle._C_ops.swish(add_13) + del add_13 + + # pd_op.add: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32, -1x192x-1x-1xf32) + add_14 = paddle._C_ops.add(swish_27, swish_29) + del swish_27, swish_29 + + # builtin.combine: ([-1x192x-1x-1xf32, -1x192x-1x-1xf32]) <- (-1x192x-1x-1xf32, -1x192x-1x-1xf32) + combine_3 = [swish_26, add_14] + del add_14, swish_26 + + # pd_op.concat: (-1x384x-1x-1xf32) <- ([-1x192x-1x-1xf32, -1x192x-1x-1xf32], 1xi32) + concat_5 = paddle._C_ops.concat(combine_3, full_0) + del combine_3 + + # pd_op.mean: (-1x384x1x1xf32) <- (-1x384x-1x-1xf32, 2xi64) + mean_3 = paddle._C_ops.mean(concat_5, full_int_array_0, True) + del full_int_array_0 + + # pd_op.conv2d: (-1x384x1x1xf32) <- (-1x384x1x1xf32, 384x384x1x1xf32) + conv2d_39 = paddle._C_ops.conv2d( + mean_3, parameter_276, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del mean_3, parameter_276 + + # pd_op.reshape: (1x384x1x1xf32) <- (384xf32, 4xi64) + reshape_3 = paddle._C_ops.reshape(parameter_275, full_int_array_1) + del parameter_275 + + # pd_op.add: (-1x384x1x1xf32) <- (-1x384x1x1xf32, 1x384x1x1xf32) + add_15 = paddle._C_ops.add(conv2d_39, reshape_3) + del conv2d_39, reshape_3 + + # pd_op.hardsigmoid: (-1x384x1x1xf32) <- (-1x384x1x1xf32) + hardsigmoid_3 = paddle._C_ops.hardsigmoid( + add_15, float("0.166667"), float("0.5") + ) + del add_15 + + # pd_op.multiply: (-1x384x-1x-1xf32) <- (-1x384x-1x-1xf32, -1x384x1x1xf32) + multiply_9 = paddle._C_ops.multiply(concat_5, hardsigmoid_3) + del concat_5, hardsigmoid_3 + + # pd_op.conv2d: (-1x512x-1x-1xf32) <- (-1x384x-1x-1xf32, 512x384x1x1xf32) + conv2d_40 = paddle._C_ops.conv2d( + multiply_9, parameter_274, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del multiply_9, parameter_274 + + # pd_op.batch_norm_: (-1x512x-1x-1xf32, 512xf32, 512xf32, 512xf32, 512xf32, -1xui8) <- (-1x512x-1x-1xf32, 512xf32, 512xf32, 512xf32, 512xf32) + ( + batch_norm__216, + batch_norm__217, + batch_norm__218, + batch_norm__219, + batch_norm__220, + batch_norm__221, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_40, + parameter_273, + parameter_272, + parameter_271, + parameter_270, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_40, parameter_270, parameter_271, parameter_272, parameter_273 + + # pd_op.swish: (-1x512x-1x-1xf32) <- (-1x512x-1x-1xf32) + swish_30 = paddle._C_ops.swish(batch_norm__216) + del batch_norm__216 + + # pd_op.shape64: (4xi64) <- (-1x512x-1x-1xf32) + shape64_0 = paddle._C_ops.shape64(swish_30) + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_2 = [0] + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_3 = [1] + + # pd_op.slice: (xi64) <- (4xi64, 1xi64, 1xi64) + slice_0 = paddle._C_ops.slice( + shape64_0, [0], full_int_array_2, full_int_array_3, [1], [0] + ) + del shape64_0 + + # pd_op.shape64: (4xi64) <- (-1x512x-1x-1xf32) + shape64_1 = paddle._C_ops.shape64(swish_30) + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_4 = [2] + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_5 = [3] + + # pd_op.slice: (xi64) <- (4xi64, 1xi64, 1xi64) + slice_1 = paddle._C_ops.slice( + shape64_1, [0], full_int_array_4, full_int_array_5, [1], [0] + ) + del shape64_1 + + # pd_op.shape64: (4xi64) <- (-1x512x-1x-1xf32) + shape64_2 = paddle._C_ops.shape64(swish_30) + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_6 = [4] + + # pd_op.slice: (xi64) <- (4xi64, 1xi64, 1xi64) + slice_2 = paddle._C_ops.slice( + shape64_2, [0], full_int_array_5, full_int_array_6, [1], [0] + ) + del shape64_2 + + # pd_op.flatten: (-1x512x-1xf32) <- (-1x512x-1x-1xf32) + flatten_0 = paddle._C_ops.flatten(swish_30, 2, 3) + del swish_30 + + # pd_op.transpose: (-1x-1x512xf32) <- (-1x512x-1xf32) + transpose_0 = paddle._C_ops.transpose(flatten_0, [0, 2, 1]) + del flatten_0 + + # pd_op.add: (-1x400x512xf32) <- (-1x-1x512xf32, 1x400x512xf32) + add_16 = paddle._C_ops.add(transpose_0, data_15) + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_7 = [512] + + # pd_op.slice: (512x512xf32) <- (512x1536xf32, 1xi64, 1xi64) + slice_3 = paddle._C_ops.slice( + data_6, [1], full_int_array_2, full_int_array_7, [1], [] + ) + + # pd_op.slice: (512xf32) <- (1536xf32, 1xi64, 1xi64) + slice_4 = paddle._C_ops.slice( + data_7, [0], full_int_array_2, full_int_array_7, [1], [] + ) + + # pd_op.matmul: (-1x400x512xf32) <- (-1x400x512xf32, 512x512xf32) + matmul_0 = paddle._C_ops.matmul(add_16, slice_3, False, False) + del slice_3 + + # pd_op.add: (-1x400x512xf32) <- (-1x400x512xf32, 512xf32) + add_17 = paddle._C_ops.add(matmul_0, slice_4) + del matmul_0, slice_4 + + # pd_op.full_int_array: (4xi64) <- () + full_int_array_8 = [0, 0, 4, 128] + + # pd_op.reshape: (-1x400x4x128xf32) <- (-1x400x512xf32, 4xi64) + reshape_4 = paddle._C_ops.reshape(add_17, full_int_array_8) + del add_17 + + # pd_op.transpose: (-1x4x400x128xf32) <- (-1x400x4x128xf32) + transpose_1 = paddle._C_ops.transpose(reshape_4, [0, 2, 1, 3]) + del reshape_4 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_9 = [1024] + + # pd_op.slice: (512x512xf32) <- (512x1536xf32, 1xi64, 1xi64) + slice_5 = paddle._C_ops.slice( + data_6, [1], full_int_array_7, full_int_array_9, [1], [] + ) + + # pd_op.slice: (512xf32) <- (1536xf32, 1xi64, 1xi64) + slice_6 = paddle._C_ops.slice( + data_7, [0], full_int_array_7, full_int_array_9, [1], [] + ) + + # pd_op.matmul: (-1x400x512xf32) <- (-1x400x512xf32, 512x512xf32) + matmul_1 = paddle._C_ops.matmul(add_16, slice_5, False, False) + del add_16, slice_5 + + # pd_op.add: (-1x400x512xf32) <- (-1x400x512xf32, 512xf32) + add_18 = paddle._C_ops.add(matmul_1, slice_6) + del matmul_1, slice_6 + + # pd_op.reshape: (-1x400x4x128xf32) <- (-1x400x512xf32, 4xi64) + reshape_5 = paddle._C_ops.reshape(add_18, full_int_array_8) + del add_18 + + # pd_op.transpose: (-1x4x400x128xf32) <- (-1x400x4x128xf32) + transpose_2 = paddle._C_ops.transpose(reshape_5, [0, 2, 1, 3]) + del reshape_5 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_10 = [2147483647] + + # pd_op.slice: (512x512xf32) <- (512x1536xf32, 1xi64, 1xi64) + slice_7 = paddle._C_ops.slice( + data_6, [1], full_int_array_9, full_int_array_10, [1], [] + ) + del data_6 + + # pd_op.slice: (512xf32) <- (1536xf32, 1xi64, 1xi64) + slice_8 = paddle._C_ops.slice( + data_7, [0], full_int_array_9, full_int_array_10, [1], [] + ) + del data_7 + + # pd_op.matmul: (-1x-1x512xf32) <- (-1x-1x512xf32, 512x512xf32) + matmul_2 = paddle._C_ops.matmul(transpose_0, slice_7, False, False) + del slice_7 + + # pd_op.add: (-1x-1x512xf32) <- (-1x-1x512xf32, 512xf32) + add_19 = paddle._C_ops.add(matmul_2, slice_8) + del matmul_2, slice_8 + + # pd_op.reshape: (-1x-1x4x128xf32) <- (-1x-1x512xf32, 4xi64) + reshape_6 = paddle._C_ops.reshape(add_19, full_int_array_8) + del add_19 + + # pd_op.transpose: (-1x4x-1x128xf32) <- (-1x-1x4x128xf32) + transpose_3 = paddle._C_ops.transpose(reshape_6, [0, 2, 1, 3]) + del reshape_6 + + # pd_op.matmul: (-1x4x400x400xf32) <- (-1x4x400x128xf32, -1x4x400x128xf32) + matmul_3 = paddle._C_ops.matmul(transpose_1, transpose_2, False, True) + del transpose_1, transpose_2 + + # pd_op.full: (1xf32) <- () + full_1 = paddle._C_ops.full( + [1], float("0.0883883"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.scale: (-1x4x400x400xf32) <- (-1x4x400x400xf32, 1xf32) + scale_0 = paddle._C_ops.scale(matmul_3, full_1, float("0"), True) + del matmul_3 + + # pd_op.softmax: (-1x4x400x400xf32) <- (-1x4x400x400xf32) + softmax_0 = paddle._C_ops.softmax(scale_0, -1) + del scale_0 + + # pd_op.full: (1xf32) <- () + full_2 = paddle._C_ops.full( + [1], float("0.1"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.dropout: (-1x4x400x400xf32, -1x4x400x400xui8) <- (-1x4x400x400xf32, None, 1xf32) + dropout_0, dropout_1 = (lambda x, f: f(x))( + paddle._C_ops.dropout( + softmax_0, None, full_2, True, "upscale_in_train", 0, False + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None), + ) + del softmax_0 + + # pd_op.matmul: (-1x4x400x128xf32) <- (-1x4x400x400xf32, -1x4x-1x128xf32) + matmul_4 = paddle._C_ops.matmul(dropout_0, transpose_3, False, False) + del dropout_0, transpose_3 + + # pd_op.transpose: (-1x400x4x128xf32) <- (-1x4x400x128xf32) + transpose_4 = paddle._C_ops.transpose(matmul_4, [0, 2, 1, 3]) + del matmul_4 + + # pd_op.shape64: (4xi64) <- (-1x400x4x128xf32) + shape64_3 = paddle._C_ops.shape64(transpose_4) + + # pd_op.slice: (xi64) <- (4xi64, 1xi64, 1xi64) + slice_9 = paddle._C_ops.slice( + shape64_3, [0], full_int_array_2, full_int_array_3, [1], [0] + ) + del shape64_3 + + # pd_op.full_int_array: (3xi64) <- () + full_int_array_11 = [0, 0, 512] + + # pd_op.reshape: (-1x400x512xf32) <- (-1x400x4x128xf32, 3xi64) + reshape_7 = paddle._C_ops.reshape(transpose_4, full_int_array_11) + del transpose_4 + + # pd_op.matmul: (-1x400x512xf32) <- (-1x400x512xf32, 512x512xf32) + matmul_5 = paddle._C_ops.matmul(reshape_7, parameter_269, False, False) + del parameter_269, reshape_7 + + # pd_op.add: (-1x400x512xf32) <- (-1x400x512xf32, 512xf32) + add_20 = paddle._C_ops.add(matmul_5, parameter_268) + del matmul_5, parameter_268 + + # pd_op.dropout: (-1x400x512xf32, -1x400x512xui8) <- (-1x400x512xf32, None, 1xf32) + dropout_2, dropout_3 = (lambda x, f: f(x))( + paddle._C_ops.dropout( + add_20, None, full_2, True, "upscale_in_train", 0, False + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None), + ) + del add_20 + + # pd_op.add: (-1x400x512xf32) <- (-1x-1x512xf32, -1x400x512xf32) + add_21 = paddle._C_ops.add(transpose_0, dropout_2) + del dropout_2, transpose_0 + + # pd_op.layer_norm: (-1x400x512xf32, -1x400xf32, -1x400xf32) <- (-1x400x512xf32, 512xf32, 512xf32) + layer_norm_0, layer_norm_1, layer_norm_2 = (lambda x, f: f(x))( + paddle._C_ops.layer_norm( + add_21, parameter_267, parameter_266, float("1e-05"), 2 + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None, None), + ) + del add_21, parameter_266, parameter_267 + + # pd_op.matmul: (-1x400x2048xf32) <- (-1x400x512xf32, 512x2048xf32) + matmul_6 = paddle._C_ops.matmul(layer_norm_0, parameter_265, False, False) + del parameter_265 + + # pd_op.add: (-1x400x2048xf32) <- (-1x400x2048xf32, 2048xf32) + add_22 = paddle._C_ops.add(matmul_6, parameter_264) + del matmul_6, parameter_264 + + # pd_op.gelu: (-1x400x2048xf32) <- (-1x400x2048xf32) + gelu_0 = paddle._C_ops.gelu(add_22, False) + del add_22 + + # pd_op.dropout: (-1x400x2048xf32, -1x400x2048xui8) <- (-1x400x2048xf32, None, 1xf32) + dropout_4, dropout_5 = (lambda x, f: f(x))( + paddle._C_ops.dropout( + gelu_0, None, full_2, True, "upscale_in_train", 0, False + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None), + ) + del gelu_0 + + # pd_op.matmul: (-1x400x512xf32) <- (-1x400x2048xf32, 2048x512xf32) + matmul_7 = paddle._C_ops.matmul(dropout_4, parameter_263, False, False) + del dropout_4, parameter_263 + + # pd_op.add: (-1x400x512xf32) <- (-1x400x512xf32, 512xf32) + add_23 = paddle._C_ops.add(matmul_7, parameter_262) + del matmul_7, parameter_262 + + # pd_op.dropout: (-1x400x512xf32, -1x400x512xui8) <- (-1x400x512xf32, None, 1xf32) + dropout_6, dropout_7 = (lambda x, f: f(x))( + paddle._C_ops.dropout( + add_23, None, full_2, True, "upscale_in_train", 0, False + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None), + ) + del add_23 + + # pd_op.add: (-1x400x512xf32) <- (-1x400x512xf32, -1x400x512xf32) + add_24 = paddle._C_ops.add(layer_norm_0, dropout_6) + del dropout_6, layer_norm_0 + + # pd_op.layer_norm: (-1x400x512xf32, -1x400xf32, -1x400xf32) <- (-1x400x512xf32, 512xf32, 512xf32) + layer_norm_3, layer_norm_4, layer_norm_5 = (lambda x, f: f(x))( + paddle._C_ops.layer_norm( + add_24, parameter_261, parameter_260, float("1e-05"), 2 + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None, None), + ) + del add_24, parameter_260, parameter_261 + + # pd_op.add: (-1x400x512xf32) <- (-1x400x512xf32, 1x400x512xf32) + add_25 = paddle._C_ops.add(layer_norm_3, data_15) + + # pd_op.slice: (512x512xf32) <- (512x1536xf32, 1xi64, 1xi64) + slice_10 = paddle._C_ops.slice( + data_8, [1], full_int_array_2, full_int_array_7, [1], [] + ) + + # pd_op.slice: (512xf32) <- (1536xf32, 1xi64, 1xi64) + slice_11 = paddle._C_ops.slice( + data_9, [0], full_int_array_2, full_int_array_7, [1], [] + ) + + # pd_op.matmul: (-1x400x512xf32) <- (-1x400x512xf32, 512x512xf32) + matmul_8 = paddle._C_ops.matmul(add_25, slice_10, False, False) + del slice_10 + + # pd_op.add: (-1x400x512xf32) <- (-1x400x512xf32, 512xf32) + add_26 = paddle._C_ops.add(matmul_8, slice_11) + del matmul_8, slice_11 + + # pd_op.reshape: (-1x400x4x128xf32) <- (-1x400x512xf32, 4xi64) + reshape_8 = paddle._C_ops.reshape(add_26, full_int_array_8) + del add_26 + + # pd_op.transpose: (-1x4x400x128xf32) <- (-1x400x4x128xf32) + transpose_5 = paddle._C_ops.transpose(reshape_8, [0, 2, 1, 3]) + del reshape_8 + + # pd_op.slice: (512x512xf32) <- (512x1536xf32, 1xi64, 1xi64) + slice_12 = paddle._C_ops.slice( + data_8, [1], full_int_array_7, full_int_array_9, [1], [] + ) + + # pd_op.slice: (512xf32) <- (1536xf32, 1xi64, 1xi64) + slice_13 = paddle._C_ops.slice( + data_9, [0], full_int_array_7, full_int_array_9, [1], [] + ) + + # pd_op.matmul: (-1x400x512xf32) <- (-1x400x512xf32, 512x512xf32) + matmul_9 = paddle._C_ops.matmul(add_25, slice_12, False, False) + del add_25, slice_12 + + # pd_op.add: (-1x400x512xf32) <- (-1x400x512xf32, 512xf32) + add_27 = paddle._C_ops.add(matmul_9, slice_13) + del matmul_9, slice_13 + + # pd_op.reshape: (-1x400x4x128xf32) <- (-1x400x512xf32, 4xi64) + reshape_9 = paddle._C_ops.reshape(add_27, full_int_array_8) + del add_27 + + # pd_op.transpose: (-1x4x400x128xf32) <- (-1x400x4x128xf32) + transpose_6 = paddle._C_ops.transpose(reshape_9, [0, 2, 1, 3]) + del reshape_9 + + # pd_op.slice: (512x512xf32) <- (512x1536xf32, 1xi64, 1xi64) + slice_14 = paddle._C_ops.slice( + data_8, [1], full_int_array_9, full_int_array_10, [1], [] + ) + del data_8 + + # pd_op.slice: (512xf32) <- (1536xf32, 1xi64, 1xi64) + slice_15 = paddle._C_ops.slice( + data_9, [0], full_int_array_9, full_int_array_10, [1], [] + ) + del data_9 + + # pd_op.matmul: (-1x400x512xf32) <- (-1x400x512xf32, 512x512xf32) + matmul_10 = paddle._C_ops.matmul(layer_norm_3, slice_14, False, False) + del slice_14 + + # pd_op.add: (-1x400x512xf32) <- (-1x400x512xf32, 512xf32) + add_28 = paddle._C_ops.add(matmul_10, slice_15) + del matmul_10, slice_15 + + # pd_op.reshape: (-1x400x4x128xf32) <- (-1x400x512xf32, 4xi64) + reshape_10 = paddle._C_ops.reshape(add_28, full_int_array_8) + del add_28 + + # pd_op.transpose: (-1x4x400x128xf32) <- (-1x400x4x128xf32) + transpose_7 = paddle._C_ops.transpose(reshape_10, [0, 2, 1, 3]) + del reshape_10 + + # pd_op.matmul: (-1x4x400x400xf32) <- (-1x4x400x128xf32, -1x4x400x128xf32) + matmul_11 = paddle._C_ops.matmul(transpose_5, transpose_6, False, True) + del transpose_5, transpose_6 + + # pd_op.scale: (-1x4x400x400xf32) <- (-1x4x400x400xf32, 1xf32) + scale_1 = paddle._C_ops.scale(matmul_11, full_1, float("0"), True) + del matmul_11 + + # pd_op.softmax: (-1x4x400x400xf32) <- (-1x4x400x400xf32) + softmax_1 = paddle._C_ops.softmax(scale_1, -1) + del scale_1 + + # pd_op.dropout: (-1x4x400x400xf32, -1x4x400x400xui8) <- (-1x4x400x400xf32, None, 1xf32) + dropout_8, dropout_9 = (lambda x, f: f(x))( + paddle._C_ops.dropout( + softmax_1, None, full_2, True, "upscale_in_train", 0, False + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None), + ) + del softmax_1 + + # pd_op.matmul: (-1x4x400x128xf32) <- (-1x4x400x400xf32, -1x4x400x128xf32) + matmul_12 = paddle._C_ops.matmul(dropout_8, transpose_7, False, False) + del dropout_8, transpose_7 + + # pd_op.transpose: (-1x400x4x128xf32) <- (-1x4x400x128xf32) + transpose_8 = paddle._C_ops.transpose(matmul_12, [0, 2, 1, 3]) + del matmul_12 + + # pd_op.shape64: (4xi64) <- (-1x400x4x128xf32) + shape64_4 = paddle._C_ops.shape64(transpose_8) + + # pd_op.slice: (xi64) <- (4xi64, 1xi64, 1xi64) + slice_16 = paddle._C_ops.slice( + shape64_4, [0], full_int_array_2, full_int_array_3, [1], [0] + ) + del shape64_4 + + # pd_op.reshape: (-1x400x512xf32) <- (-1x400x4x128xf32, 3xi64) + reshape_11 = paddle._C_ops.reshape(transpose_8, full_int_array_11) + del transpose_8 + + # pd_op.matmul: (-1x400x512xf32) <- (-1x400x512xf32, 512x512xf32) + matmul_13 = paddle._C_ops.matmul(reshape_11, parameter_259, False, False) + del parameter_259, reshape_11 + + # pd_op.add: (-1x400x512xf32) <- (-1x400x512xf32, 512xf32) + add_29 = paddle._C_ops.add(matmul_13, parameter_258) + del matmul_13, parameter_258 + + # pd_op.dropout: (-1x400x512xf32, -1x400x512xui8) <- (-1x400x512xf32, None, 1xf32) + dropout_10, dropout_11 = (lambda x, f: f(x))( + paddle._C_ops.dropout( + add_29, None, full_2, True, "upscale_in_train", 0, False + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None), + ) + del add_29 + + # pd_op.add: (-1x400x512xf32) <- (-1x400x512xf32, -1x400x512xf32) + add_30 = paddle._C_ops.add(layer_norm_3, dropout_10) + del dropout_10, layer_norm_3 + + # pd_op.layer_norm: (-1x400x512xf32, -1x400xf32, -1x400xf32) <- (-1x400x512xf32, 512xf32, 512xf32) + layer_norm_6, layer_norm_7, layer_norm_8 = (lambda x, f: f(x))( + paddle._C_ops.layer_norm( + add_30, parameter_257, parameter_256, float("1e-05"), 2 + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None, None), + ) + del add_30, parameter_256, parameter_257 + + # pd_op.matmul: (-1x400x2048xf32) <- (-1x400x512xf32, 512x2048xf32) + matmul_14 = paddle._C_ops.matmul(layer_norm_6, parameter_255, False, False) + del parameter_255 + + # pd_op.add: (-1x400x2048xf32) <- (-1x400x2048xf32, 2048xf32) + add_31 = paddle._C_ops.add(matmul_14, parameter_254) + del matmul_14, parameter_254 + + # pd_op.gelu: (-1x400x2048xf32) <- (-1x400x2048xf32) + gelu_1 = paddle._C_ops.gelu(add_31, False) + del add_31 + + # pd_op.dropout: (-1x400x2048xf32, -1x400x2048xui8) <- (-1x400x2048xf32, None, 1xf32) + dropout_12, dropout_13 = (lambda x, f: f(x))( + paddle._C_ops.dropout( + gelu_1, None, full_2, True, "upscale_in_train", 0, False + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None), + ) + del gelu_1 + + # pd_op.matmul: (-1x400x512xf32) <- (-1x400x2048xf32, 2048x512xf32) + matmul_15 = paddle._C_ops.matmul(dropout_12, parameter_253, False, False) + del dropout_12, parameter_253 + + # pd_op.add: (-1x400x512xf32) <- (-1x400x512xf32, 512xf32) + add_32 = paddle._C_ops.add(matmul_15, parameter_252) + del matmul_15, parameter_252 + + # pd_op.dropout: (-1x400x512xf32, -1x400x512xui8) <- (-1x400x512xf32, None, 1xf32) + dropout_14, dropout_15 = (lambda x, f: f(x))( + paddle._C_ops.dropout( + add_32, None, full_2, True, "upscale_in_train", 0, False + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None), + ) + del add_32 + + # pd_op.add: (-1x400x512xf32) <- (-1x400x512xf32, -1x400x512xf32) + add_33 = paddle._C_ops.add(layer_norm_6, dropout_14) + del dropout_14, layer_norm_6 + + # pd_op.layer_norm: (-1x400x512xf32, -1x400xf32, -1x400xf32) <- (-1x400x512xf32, 512xf32, 512xf32) + layer_norm_9, layer_norm_10, layer_norm_11 = (lambda x, f: f(x))( + paddle._C_ops.layer_norm( + add_33, parameter_251, parameter_250, float("1e-05"), 2 + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None, None), + ) + del add_33, parameter_250, parameter_251 + + # pd_op.add: (-1x400x512xf32) <- (-1x400x512xf32, 1x400x512xf32) + add_34 = paddle._C_ops.add(layer_norm_9, data_15) + + # pd_op.slice: (512x512xf32) <- (512x1536xf32, 1xi64, 1xi64) + slice_17 = paddle._C_ops.slice( + data_10, [1], full_int_array_2, full_int_array_7, [1], [] + ) + + # pd_op.slice: (512xf32) <- (1536xf32, 1xi64, 1xi64) + slice_18 = paddle._C_ops.slice( + data_11, [0], full_int_array_2, full_int_array_7, [1], [] + ) + + # pd_op.matmul: (-1x400x512xf32) <- (-1x400x512xf32, 512x512xf32) + matmul_16 = paddle._C_ops.matmul(add_34, slice_17, False, False) + del slice_17 + + # pd_op.add: (-1x400x512xf32) <- (-1x400x512xf32, 512xf32) + add_35 = paddle._C_ops.add(matmul_16, slice_18) + del matmul_16, slice_18 + + # pd_op.reshape: (-1x400x4x128xf32) <- (-1x400x512xf32, 4xi64) + reshape_12 = paddle._C_ops.reshape(add_35, full_int_array_8) + del add_35 + + # pd_op.transpose: (-1x4x400x128xf32) <- (-1x400x4x128xf32) + transpose_9 = paddle._C_ops.transpose(reshape_12, [0, 2, 1, 3]) + del reshape_12 + + # pd_op.slice: (512x512xf32) <- (512x1536xf32, 1xi64, 1xi64) + slice_19 = paddle._C_ops.slice( + data_10, [1], full_int_array_7, full_int_array_9, [1], [] + ) + + # pd_op.slice: (512xf32) <- (1536xf32, 1xi64, 1xi64) + slice_20 = paddle._C_ops.slice( + data_11, [0], full_int_array_7, full_int_array_9, [1], [] + ) + + # pd_op.matmul: (-1x400x512xf32) <- (-1x400x512xf32, 512x512xf32) + matmul_17 = paddle._C_ops.matmul(add_34, slice_19, False, False) + del add_34, slice_19 + + # pd_op.add: (-1x400x512xf32) <- (-1x400x512xf32, 512xf32) + add_36 = paddle._C_ops.add(matmul_17, slice_20) + del matmul_17, slice_20 + + # pd_op.reshape: (-1x400x4x128xf32) <- (-1x400x512xf32, 4xi64) + reshape_13 = paddle._C_ops.reshape(add_36, full_int_array_8) + del add_36 + + # pd_op.transpose: (-1x4x400x128xf32) <- (-1x400x4x128xf32) + transpose_10 = paddle._C_ops.transpose(reshape_13, [0, 2, 1, 3]) + del reshape_13 + + # pd_op.slice: (512x512xf32) <- (512x1536xf32, 1xi64, 1xi64) + slice_21 = paddle._C_ops.slice( + data_10, [1], full_int_array_9, full_int_array_10, [1], [] + ) + del data_10 + + # pd_op.slice: (512xf32) <- (1536xf32, 1xi64, 1xi64) + slice_22 = paddle._C_ops.slice( + data_11, [0], full_int_array_9, full_int_array_10, [1], [] + ) + del data_11 + + # pd_op.matmul: (-1x400x512xf32) <- (-1x400x512xf32, 512x512xf32) + matmul_18 = paddle._C_ops.matmul(layer_norm_9, slice_21, False, False) + del slice_21 + + # pd_op.add: (-1x400x512xf32) <- (-1x400x512xf32, 512xf32) + add_37 = paddle._C_ops.add(matmul_18, slice_22) + del matmul_18, slice_22 + + # pd_op.reshape: (-1x400x4x128xf32) <- (-1x400x512xf32, 4xi64) + reshape_14 = paddle._C_ops.reshape(add_37, full_int_array_8) + del add_37 + + # pd_op.transpose: (-1x4x400x128xf32) <- (-1x400x4x128xf32) + transpose_11 = paddle._C_ops.transpose(reshape_14, [0, 2, 1, 3]) + del reshape_14 + + # pd_op.matmul: (-1x4x400x400xf32) <- (-1x4x400x128xf32, -1x4x400x128xf32) + matmul_19 = paddle._C_ops.matmul(transpose_9, transpose_10, False, True) + del transpose_10, transpose_9 + + # pd_op.scale: (-1x4x400x400xf32) <- (-1x4x400x400xf32, 1xf32) + scale_2 = paddle._C_ops.scale(matmul_19, full_1, float("0"), True) + del matmul_19 + + # pd_op.softmax: (-1x4x400x400xf32) <- (-1x4x400x400xf32) + softmax_2 = paddle._C_ops.softmax(scale_2, -1) + del scale_2 + + # pd_op.dropout: (-1x4x400x400xf32, -1x4x400x400xui8) <- (-1x4x400x400xf32, None, 1xf32) + dropout_16, dropout_17 = (lambda x, f: f(x))( + paddle._C_ops.dropout( + softmax_2, None, full_2, True, "upscale_in_train", 0, False + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None), + ) + del softmax_2 + + # pd_op.matmul: (-1x4x400x128xf32) <- (-1x4x400x400xf32, -1x4x400x128xf32) + matmul_20 = paddle._C_ops.matmul(dropout_16, transpose_11, False, False) + del dropout_16, transpose_11 + + # pd_op.transpose: (-1x400x4x128xf32) <- (-1x4x400x128xf32) + transpose_12 = paddle._C_ops.transpose(matmul_20, [0, 2, 1, 3]) + del matmul_20 + + # pd_op.shape64: (4xi64) <- (-1x400x4x128xf32) + shape64_5 = paddle._C_ops.shape64(transpose_12) + + # pd_op.slice: (xi64) <- (4xi64, 1xi64, 1xi64) + slice_23 = paddle._C_ops.slice( + shape64_5, [0], full_int_array_2, full_int_array_3, [1], [0] + ) + del shape64_5 + + # pd_op.reshape: (-1x400x512xf32) <- (-1x400x4x128xf32, 3xi64) + reshape_15 = paddle._C_ops.reshape(transpose_12, full_int_array_11) + del transpose_12 + + # pd_op.matmul: (-1x400x512xf32) <- (-1x400x512xf32, 512x512xf32) + matmul_21 = paddle._C_ops.matmul(reshape_15, parameter_249, False, False) + del parameter_249, reshape_15 + + # pd_op.add: (-1x400x512xf32) <- (-1x400x512xf32, 512xf32) + add_38 = paddle._C_ops.add(matmul_21, parameter_248) + del matmul_21, parameter_248 + + # pd_op.dropout: (-1x400x512xf32, -1x400x512xui8) <- (-1x400x512xf32, None, 1xf32) + dropout_18, dropout_19 = (lambda x, f: f(x))( + paddle._C_ops.dropout( + add_38, None, full_2, True, "upscale_in_train", 0, False + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None), + ) + del add_38 + + # pd_op.add: (-1x400x512xf32) <- (-1x400x512xf32, -1x400x512xf32) + add_39 = paddle._C_ops.add(layer_norm_9, dropout_18) + del dropout_18, layer_norm_9 + + # pd_op.layer_norm: (-1x400x512xf32, -1x400xf32, -1x400xf32) <- (-1x400x512xf32, 512xf32, 512xf32) + layer_norm_12, layer_norm_13, layer_norm_14 = (lambda x, f: f(x))( + paddle._C_ops.layer_norm( + add_39, parameter_247, parameter_246, float("1e-05"), 2 + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None, None), + ) + del add_39, parameter_246, parameter_247 + + # pd_op.matmul: (-1x400x2048xf32) <- (-1x400x512xf32, 512x2048xf32) + matmul_22 = paddle._C_ops.matmul(layer_norm_12, parameter_245, False, False) + del parameter_245 + + # pd_op.add: (-1x400x2048xf32) <- (-1x400x2048xf32, 2048xf32) + add_40 = paddle._C_ops.add(matmul_22, parameter_244) + del matmul_22, parameter_244 + + # pd_op.gelu: (-1x400x2048xf32) <- (-1x400x2048xf32) + gelu_2 = paddle._C_ops.gelu(add_40, False) + del add_40 + + # pd_op.dropout: (-1x400x2048xf32, -1x400x2048xui8) <- (-1x400x2048xf32, None, 1xf32) + dropout_20, dropout_21 = (lambda x, f: f(x))( + paddle._C_ops.dropout( + gelu_2, None, full_2, True, "upscale_in_train", 0, False + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None), + ) + del gelu_2 + + # pd_op.matmul: (-1x400x512xf32) <- (-1x400x2048xf32, 2048x512xf32) + matmul_23 = paddle._C_ops.matmul(dropout_20, parameter_243, False, False) + del dropout_20, parameter_243 + + # pd_op.add: (-1x400x512xf32) <- (-1x400x512xf32, 512xf32) + add_41 = paddle._C_ops.add(matmul_23, parameter_242) + del matmul_23, parameter_242 + + # pd_op.dropout: (-1x400x512xf32, -1x400x512xui8) <- (-1x400x512xf32, None, 1xf32) + dropout_22, dropout_23 = (lambda x, f: f(x))( + paddle._C_ops.dropout( + add_41, None, full_2, True, "upscale_in_train", 0, False + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None), + ) + del add_41 + + # pd_op.add: (-1x400x512xf32) <- (-1x400x512xf32, -1x400x512xf32) + add_42 = paddle._C_ops.add(layer_norm_12, dropout_22) + del dropout_22, layer_norm_12 + + # pd_op.layer_norm: (-1x400x512xf32, -1x400xf32, -1x400xf32) <- (-1x400x512xf32, 512xf32, 512xf32) + layer_norm_15, layer_norm_16, layer_norm_17 = (lambda x, f: f(x))( + paddle._C_ops.layer_norm( + add_42, parameter_241, parameter_240, float("1e-05"), 2 + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None, None), + ) + del add_42, parameter_240, parameter_241 + + # pd_op.add: (-1x400x512xf32) <- (-1x400x512xf32, 1x400x512xf32) + add_43 = paddle._C_ops.add(layer_norm_15, data_15) + del data_15 + + # pd_op.slice: (512x512xf32) <- (512x1536xf32, 1xi64, 1xi64) + slice_24 = paddle._C_ops.slice( + data_12, [1], full_int_array_2, full_int_array_7, [1], [] + ) + + # pd_op.slice: (512xf32) <- (1536xf32, 1xi64, 1xi64) + slice_25 = paddle._C_ops.slice( + data_13, [0], full_int_array_2, full_int_array_7, [1], [] + ) + + # pd_op.matmul: (-1x400x512xf32) <- (-1x400x512xf32, 512x512xf32) + matmul_24 = paddle._C_ops.matmul(add_43, slice_24, False, False) + del slice_24 + + # pd_op.add: (-1x400x512xf32) <- (-1x400x512xf32, 512xf32) + add_44 = paddle._C_ops.add(matmul_24, slice_25) + del matmul_24, slice_25 + + # pd_op.reshape: (-1x400x4x128xf32) <- (-1x400x512xf32, 4xi64) + reshape_16 = paddle._C_ops.reshape(add_44, full_int_array_8) + del add_44 + + # pd_op.transpose: (-1x4x400x128xf32) <- (-1x400x4x128xf32) + transpose_13 = paddle._C_ops.transpose(reshape_16, [0, 2, 1, 3]) + del reshape_16 + + # pd_op.slice: (512x512xf32) <- (512x1536xf32, 1xi64, 1xi64) + slice_26 = paddle._C_ops.slice( + data_12, [1], full_int_array_7, full_int_array_9, [1], [] + ) + + # pd_op.slice: (512xf32) <- (1536xf32, 1xi64, 1xi64) + slice_27 = paddle._C_ops.slice( + data_13, [0], full_int_array_7, full_int_array_9, [1], [] + ) + del full_int_array_7 + + # pd_op.matmul: (-1x400x512xf32) <- (-1x400x512xf32, 512x512xf32) + matmul_25 = paddle._C_ops.matmul(add_43, slice_26, False, False) + del add_43, slice_26 + + # pd_op.add: (-1x400x512xf32) <- (-1x400x512xf32, 512xf32) + add_45 = paddle._C_ops.add(matmul_25, slice_27) + del matmul_25, slice_27 + + # pd_op.reshape: (-1x400x4x128xf32) <- (-1x400x512xf32, 4xi64) + reshape_17 = paddle._C_ops.reshape(add_45, full_int_array_8) + del add_45 + + # pd_op.transpose: (-1x4x400x128xf32) <- (-1x400x4x128xf32) + transpose_14 = paddle._C_ops.transpose(reshape_17, [0, 2, 1, 3]) + del reshape_17 + + # pd_op.slice: (512x512xf32) <- (512x1536xf32, 1xi64, 1xi64) + slice_28 = paddle._C_ops.slice( + data_12, [1], full_int_array_9, full_int_array_10, [1], [] + ) + del data_12 + + # pd_op.slice: (512xf32) <- (1536xf32, 1xi64, 1xi64) + slice_29 = paddle._C_ops.slice( + data_13, [0], full_int_array_9, full_int_array_10, [1], [] + ) + del data_13, full_int_array_10, full_int_array_9 + + # pd_op.matmul: (-1x400x512xf32) <- (-1x400x512xf32, 512x512xf32) + matmul_26 = paddle._C_ops.matmul(layer_norm_15, slice_28, False, False) + del slice_28 + + # pd_op.add: (-1x400x512xf32) <- (-1x400x512xf32, 512xf32) + add_46 = paddle._C_ops.add(matmul_26, slice_29) + del matmul_26, slice_29 + + # pd_op.reshape: (-1x400x4x128xf32) <- (-1x400x512xf32, 4xi64) + reshape_18 = paddle._C_ops.reshape(add_46, full_int_array_8) + del add_46, full_int_array_8 + + # pd_op.transpose: (-1x4x400x128xf32) <- (-1x400x4x128xf32) + transpose_15 = paddle._C_ops.transpose(reshape_18, [0, 2, 1, 3]) + del reshape_18 + + # pd_op.matmul: (-1x4x400x400xf32) <- (-1x4x400x128xf32, -1x4x400x128xf32) + matmul_27 = paddle._C_ops.matmul(transpose_13, transpose_14, False, True) + del transpose_13, transpose_14 + + # pd_op.scale: (-1x4x400x400xf32) <- (-1x4x400x400xf32, 1xf32) + scale_3 = paddle._C_ops.scale(matmul_27, full_1, float("0"), True) + del full_1, matmul_27 + + # pd_op.softmax: (-1x4x400x400xf32) <- (-1x4x400x400xf32) + softmax_3 = paddle._C_ops.softmax(scale_3, -1) + del scale_3 + + # pd_op.dropout: (-1x4x400x400xf32, -1x4x400x400xui8) <- (-1x4x400x400xf32, None, 1xf32) + dropout_24, dropout_25 = (lambda x, f: f(x))( + paddle._C_ops.dropout( + softmax_3, None, full_2, True, "upscale_in_train", 0, False + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None), + ) + del softmax_3 + + # pd_op.matmul: (-1x4x400x128xf32) <- (-1x4x400x400xf32, -1x4x400x128xf32) + matmul_28 = paddle._C_ops.matmul(dropout_24, transpose_15, False, False) + del dropout_24, transpose_15 + + # pd_op.transpose: (-1x400x4x128xf32) <- (-1x4x400x128xf32) + transpose_16 = paddle._C_ops.transpose(matmul_28, [0, 2, 1, 3]) + del matmul_28 + + # pd_op.shape64: (4xi64) <- (-1x400x4x128xf32) + shape64_6 = paddle._C_ops.shape64(transpose_16) + + # pd_op.slice: (xi64) <- (4xi64, 1xi64, 1xi64) + slice_30 = paddle._C_ops.slice( + shape64_6, [0], full_int_array_2, full_int_array_3, [1], [0] + ) + del shape64_6 + + # pd_op.reshape: (-1x400x512xf32) <- (-1x400x4x128xf32, 3xi64) + reshape_19 = paddle._C_ops.reshape(transpose_16, full_int_array_11) + del full_int_array_11, transpose_16 + + # pd_op.matmul: (-1x400x512xf32) <- (-1x400x512xf32, 512x512xf32) + matmul_29 = paddle._C_ops.matmul(reshape_19, parameter_239, False, False) + del parameter_239, reshape_19 + + # pd_op.add: (-1x400x512xf32) <- (-1x400x512xf32, 512xf32) + add_47 = paddle._C_ops.add(matmul_29, parameter_238) + del matmul_29, parameter_238 + + # pd_op.dropout: (-1x400x512xf32, -1x400x512xui8) <- (-1x400x512xf32, None, 1xf32) + dropout_26, dropout_27 = (lambda x, f: f(x))( + paddle._C_ops.dropout( + add_47, None, full_2, True, "upscale_in_train", 0, False + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None), + ) + del add_47 + + # pd_op.add: (-1x400x512xf32) <- (-1x400x512xf32, -1x400x512xf32) + add_48 = paddle._C_ops.add(layer_norm_15, dropout_26) + del dropout_26, layer_norm_15 + + # pd_op.layer_norm: (-1x400x512xf32, -1x400xf32, -1x400xf32) <- (-1x400x512xf32, 512xf32, 512xf32) + layer_norm_18, layer_norm_19, layer_norm_20 = (lambda x, f: f(x))( + paddle._C_ops.layer_norm( + add_48, parameter_237, parameter_236, float("1e-05"), 2 + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None, None), + ) + del add_48, parameter_236, parameter_237 + + # pd_op.matmul: (-1x400x2048xf32) <- (-1x400x512xf32, 512x2048xf32) + matmul_30 = paddle._C_ops.matmul(layer_norm_18, parameter_235, False, False) + del parameter_235 + + # pd_op.add: (-1x400x2048xf32) <- (-1x400x2048xf32, 2048xf32) + add_49 = paddle._C_ops.add(matmul_30, parameter_234) + del matmul_30, parameter_234 + + # pd_op.gelu: (-1x400x2048xf32) <- (-1x400x2048xf32) + gelu_3 = paddle._C_ops.gelu(add_49, False) + del add_49 + + # pd_op.dropout: (-1x400x2048xf32, -1x400x2048xui8) <- (-1x400x2048xf32, None, 1xf32) + dropout_28, dropout_29 = (lambda x, f: f(x))( + paddle._C_ops.dropout( + gelu_3, None, full_2, True, "upscale_in_train", 0, False + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None), + ) + del gelu_3 + + # pd_op.matmul: (-1x400x512xf32) <- (-1x400x2048xf32, 2048x512xf32) + matmul_31 = paddle._C_ops.matmul(dropout_28, parameter_233, False, False) + del dropout_28, parameter_233 + + # pd_op.add: (-1x400x512xf32) <- (-1x400x512xf32, 512xf32) + add_50 = paddle._C_ops.add(matmul_31, parameter_232) + del matmul_31, parameter_232 + + # pd_op.dropout: (-1x400x512xf32, -1x400x512xui8) <- (-1x400x512xf32, None, 1xf32) + dropout_30, dropout_31 = (lambda x, f: f(x))( + paddle._C_ops.dropout( + add_50, None, full_2, True, "upscale_in_train", 0, False + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None), + ) + del add_50, full_2 + + # pd_op.add: (-1x400x512xf32) <- (-1x400x512xf32, -1x400x512xf32) + add_51 = paddle._C_ops.add(layer_norm_18, dropout_30) + del dropout_30, layer_norm_18 + + # pd_op.layer_norm: (-1x400x512xf32, -1x400xf32, -1x400xf32) <- (-1x400x512xf32, 512xf32, 512xf32) + layer_norm_21, layer_norm_22, layer_norm_23 = (lambda x, f: f(x))( + paddle._C_ops.layer_norm( + add_51, parameter_231, parameter_230, float("1e-05"), 2 + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None, None), + ) + del add_51, parameter_230, parameter_231 + + # pd_op.transpose: (-1x512x400xf32) <- (-1x400x512xf32) + transpose_17 = paddle._C_ops.transpose(layer_norm_21, [0, 2, 1]) + del layer_norm_21 + + # pd_op.full: (xi64) <- () + full_3 = paddle._C_ops.full( + [], float("512"), paddle.int64, paddle.core.CPUPlace() + ) + + # builtin.combine: ([xi64, xi64, xi64, xi64]) <- (xi64, xi64, xi64, xi64) + combine_4 = [slice_0, full_3, slice_1, slice_2] + del full_3, slice_0, slice_1, slice_2 + + # pd_op.stack: (4xi64) <- ([xi64, xi64, xi64, xi64]) + stack_0 = paddle._C_ops.stack(combine_4, 0) + del combine_4 + + # pd_op.reshape: (-1x512x-1x-1xf32) <- (-1x512x400xf32, 4xi64) + reshape_20 = paddle._C_ops.reshape(transpose_17, stack_0) + del stack_0, transpose_17 + + # pd_op.conv2d: (-1x192x-1x-1xf32) <- (-1x512x-1x-1xf32, 192x512x1x1xf32) + conv2d_41 = paddle._C_ops.conv2d( + reshape_20, parameter_229, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_229 + + # pd_op.batch_norm_: (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__222, + batch_norm__223, + batch_norm__224, + batch_norm__225, + batch_norm__226, + batch_norm__227, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_41, + parameter_228, + parameter_227, + parameter_226, + parameter_225, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_41, parameter_225, parameter_226, parameter_227, parameter_228 + + # pd_op.swish: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32) + swish_31 = paddle._C_ops.swish(batch_norm__222) + del batch_norm__222 + + # pd_op.conv2d: (-1x192x-1x-1xf32) <- (-1x512x-1x-1xf32, 192x512x1x1xf32) + conv2d_42 = paddle._C_ops.conv2d( + reshape_20, parameter_224, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_224, reshape_20 + + # pd_op.batch_norm_: (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__228, + batch_norm__229, + batch_norm__230, + batch_norm__231, + batch_norm__232, + batch_norm__233, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_42, + parameter_223, + parameter_222, + parameter_221, + parameter_220, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_42, parameter_220, parameter_221, parameter_222, parameter_223 + + # pd_op.swish: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32) + swish_32 = paddle._C_ops.swish(batch_norm__228) + del batch_norm__228 + + # pd_op.conv2d: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32, 192x192x3x3xf32) + conv2d_43 = paddle._C_ops.conv2d( + swish_32, parameter_219, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_219, swish_32 + + # pd_op.batch_norm_: (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__234, + batch_norm__235, + batch_norm__236, + batch_norm__237, + batch_norm__238, + batch_norm__239, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_43, + parameter_218, + parameter_217, + parameter_216, + parameter_215, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_43, parameter_215, parameter_216, parameter_217, parameter_218 + + # pd_op.swish: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32) + swish_33 = paddle._C_ops.swish(batch_norm__234) + del batch_norm__234 + + # pd_op.conv2d: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32, 192x192x3x3xf32) + conv2d_44 = paddle._C_ops.conv2d( + swish_33, parameter_214, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_214 + + # pd_op.batch_norm_: (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__240, + batch_norm__241, + batch_norm__242, + batch_norm__243, + batch_norm__244, + batch_norm__245, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_44, + parameter_213, + parameter_212, + parameter_211, + parameter_210, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_44, parameter_210, parameter_211, parameter_212, parameter_213 + + # pd_op.conv2d: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32, 192x192x1x1xf32) + conv2d_45 = paddle._C_ops.conv2d( + swish_33, parameter_209, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_209, swish_33 + + # pd_op.batch_norm_: (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__246, + batch_norm__247, + batch_norm__248, + batch_norm__249, + batch_norm__250, + batch_norm__251, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_45, + parameter_208, + parameter_207, + parameter_206, + parameter_205, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_45, parameter_205, parameter_206, parameter_207, parameter_208 + + # pd_op.add: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32, -1x192x-1x-1xf32) + add_52 = paddle._C_ops.add(batch_norm__240, batch_norm__246) + del batch_norm__240, batch_norm__246 + + # pd_op.swish: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32) + swish_34 = paddle._C_ops.swish(add_52) + del add_52 + + # pd_op.full_int_array: (2xi64) <- () + full_int_array_12 = [5, 5] + + # pd_op.pool2d: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32, 2xi64) + pool2d_0 = paddle._C_ops.pool2d( + swish_34, + full_int_array_12, + [1, 1], + [2, 2], + False, + True, + "NCHW", + "max", + False, + False, + "EXPLICIT", + ) + del full_int_array_12 + + # pd_op.full_int_array: (2xi64) <- () + full_int_array_13 = [9, 9] + + # pd_op.pool2d: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32, 2xi64) + pool2d_1 = paddle._C_ops.pool2d( + swish_34, + full_int_array_13, + [1, 1], + [4, 4], + False, + True, + "NCHW", + "max", + False, + False, + "EXPLICIT", + ) + del full_int_array_13 + + # pd_op.full_int_array: (2xi64) <- () + full_int_array_14 = [13, 13] + + # pd_op.pool2d: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32, 2xi64) + pool2d_2 = paddle._C_ops.pool2d( + swish_34, + full_int_array_14, + [1, 1], + [6, 6], + False, + True, + "NCHW", + "max", + False, + False, + "EXPLICIT", + ) + del full_int_array_14 + + # builtin.combine: ([-1x192x-1x-1xf32, -1x192x-1x-1xf32, -1x192x-1x-1xf32, -1x192x-1x-1xf32]) <- (-1x192x-1x-1xf32, -1x192x-1x-1xf32, -1x192x-1x-1xf32, -1x192x-1x-1xf32) + combine_5 = [swish_34, pool2d_0, pool2d_1, pool2d_2] + del pool2d_0, pool2d_1, pool2d_2, swish_34 + + # pd_op.concat: (-1x768x-1x-1xf32) <- ([-1x192x-1x-1xf32, -1x192x-1x-1xf32, -1x192x-1x-1xf32, -1x192x-1x-1xf32], 1xi32) + concat_6 = paddle._C_ops.concat(combine_5, full_0) + del combine_5 + + # pd_op.conv2d: (-1x192x-1x-1xf32) <- (-1x768x-1x-1xf32, 192x768x1x1xf32) + conv2d_46 = paddle._C_ops.conv2d( + concat_6, parameter_204, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del concat_6, parameter_204 + + # pd_op.batch_norm_: (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__252, + batch_norm__253, + batch_norm__254, + batch_norm__255, + batch_norm__256, + batch_norm__257, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_46, + parameter_203, + parameter_202, + parameter_201, + parameter_200, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_46, parameter_200, parameter_201, parameter_202, parameter_203 + + # pd_op.swish: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32) + swish_35 = paddle._C_ops.swish(batch_norm__252) + del batch_norm__252 + + # builtin.combine: ([-1x192x-1x-1xf32, -1x192x-1x-1xf32]) <- (-1x192x-1x-1xf32, -1x192x-1x-1xf32) + combine_6 = [swish_31, swish_35] + del swish_31, swish_35 + + # pd_op.concat: (-1x384x-1x-1xf32) <- ([-1x192x-1x-1xf32, -1x192x-1x-1xf32], 1xi32) + concat_7 = paddle._C_ops.concat(combine_6, full_0) + del combine_6 + + # pd_op.conv2d: (-1x384x-1x-1xf32) <- (-1x384x-1x-1xf32, 384x384x1x1xf32) + conv2d_47 = paddle._C_ops.conv2d( + concat_7, parameter_199, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del concat_7, parameter_199 + + # pd_op.batch_norm_: (-1x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (-1x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__258, + batch_norm__259, + batch_norm__260, + batch_norm__261, + batch_norm__262, + batch_norm__263, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_47, + parameter_198, + parameter_197, + parameter_196, + parameter_195, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_47, parameter_195, parameter_196, parameter_197, parameter_198 + + # pd_op.swish: (-1x384x-1x-1xf32) <- (-1x384x-1x-1xf32) + swish_36 = paddle._C_ops.swish(batch_norm__258) + del batch_norm__258 + + # pd_op.conv2d: (-1x192x-1x-1xf32) <- (-1x384x-1x-1xf32, 192x384x1x1xf32) + conv2d_48 = paddle._C_ops.conv2d( + swish_36, parameter_194, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_194 + + # pd_op.batch_norm_: (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__264, + batch_norm__265, + batch_norm__266, + batch_norm__267, + batch_norm__268, + batch_norm__269, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_48, + parameter_193, + parameter_192, + parameter_191, + parameter_190, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_48, parameter_190, parameter_191, parameter_192, parameter_193 + + # pd_op.swish: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32) + swish_37 = paddle._C_ops.swish(batch_norm__264) + del batch_norm__264 + + # pd_op.nearest_interp: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32, None, None, None) + nearest_interp_0 = paddle._C_ops.nearest_interp( + swish_37, + None, + None, + None, + "NCHW", + -1, + -1, + -1, + [float("2"), float("2")], + "nearest", + False, + 0, + ) + del swish_37 + + # builtin.combine: ([-1x192x-1x-1xf32, -1x256x-1x-1xf32]) <- (-1x192x-1x-1xf32, -1x256x-1x-1xf32) + combine_7 = [nearest_interp_0, swish_24] + del nearest_interp_0, swish_24 + + # pd_op.concat: (-1x448x-1x-1xf32) <- ([-1x192x-1x-1xf32, -1x256x-1x-1xf32], 1xi32) + concat_8 = paddle._C_ops.concat(combine_7, full_0) + del combine_7 + + # pd_op.conv2d: (-1x96x-1x-1xf32) <- (-1x448x-1x-1xf32, 96x448x1x1xf32) + conv2d_49 = paddle._C_ops.conv2d( + concat_8, parameter_189, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_189 + + # pd_op.batch_norm_: (-1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (-1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__270, + batch_norm__271, + batch_norm__272, + batch_norm__273, + batch_norm__274, + batch_norm__275, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_49, + parameter_188, + parameter_187, + parameter_186, + parameter_185, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_49, parameter_185, parameter_186, parameter_187, parameter_188 + + # pd_op.swish: (-1x96x-1x-1xf32) <- (-1x96x-1x-1xf32) + swish_38 = paddle._C_ops.swish(batch_norm__270) + del batch_norm__270 + + # pd_op.conv2d: (-1x96x-1x-1xf32) <- (-1x448x-1x-1xf32, 96x448x1x1xf32) + conv2d_50 = paddle._C_ops.conv2d( + concat_8, parameter_184, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del concat_8, parameter_184 + + # pd_op.batch_norm_: (-1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (-1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__276, + batch_norm__277, + batch_norm__278, + batch_norm__279, + batch_norm__280, + batch_norm__281, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_50, + parameter_183, + parameter_182, + parameter_181, + parameter_180, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_50, parameter_180, parameter_181, parameter_182, parameter_183 + + # pd_op.swish: (-1x96x-1x-1xf32) <- (-1x96x-1x-1xf32) + swish_39 = paddle._C_ops.swish(batch_norm__276) + del batch_norm__276 + + # pd_op.conv2d: (-1x96x-1x-1xf32) <- (-1x96x-1x-1xf32, 96x96x3x3xf32) + conv2d_51 = paddle._C_ops.conv2d( + swish_39, parameter_179, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_179, swish_39 + + # pd_op.batch_norm_: (-1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (-1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__282, + batch_norm__283, + batch_norm__284, + batch_norm__285, + batch_norm__286, + batch_norm__287, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_51, + parameter_178, + parameter_177, + parameter_176, + parameter_175, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_51, parameter_175, parameter_176, parameter_177, parameter_178 + + # pd_op.swish: (-1x96x-1x-1xf32) <- (-1x96x-1x-1xf32) + swish_40 = paddle._C_ops.swish(batch_norm__282) + del batch_norm__282 + + # pd_op.conv2d: (-1x96x-1x-1xf32) <- (-1x96x-1x-1xf32, 96x96x3x3xf32) + conv2d_52 = paddle._C_ops.conv2d( + swish_40, parameter_174, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_174 + + # pd_op.batch_norm_: (-1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (-1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__288, + batch_norm__289, + batch_norm__290, + batch_norm__291, + batch_norm__292, + batch_norm__293, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_52, + parameter_173, + parameter_172, + parameter_171, + parameter_170, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_52, parameter_170, parameter_171, parameter_172, parameter_173 + + # pd_op.conv2d: (-1x96x-1x-1xf32) <- (-1x96x-1x-1xf32, 96x96x1x1xf32) + conv2d_53 = paddle._C_ops.conv2d( + swish_40, parameter_169, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_169, swish_40 + + # pd_op.batch_norm_: (-1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (-1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__294, + batch_norm__295, + batch_norm__296, + batch_norm__297, + batch_norm__298, + batch_norm__299, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_53, + parameter_168, + parameter_167, + parameter_166, + parameter_165, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_53, parameter_165, parameter_166, parameter_167, parameter_168 + + # pd_op.add: (-1x96x-1x-1xf32) <- (-1x96x-1x-1xf32, -1x96x-1x-1xf32) + add_53 = paddle._C_ops.add(batch_norm__288, batch_norm__294) + del batch_norm__288, batch_norm__294 + + # pd_op.swish: (-1x96x-1x-1xf32) <- (-1x96x-1x-1xf32) + swish_41 = paddle._C_ops.swish(add_53) + del add_53 + + # builtin.combine: ([-1x96x-1x-1xf32, -1x96x-1x-1xf32]) <- (-1x96x-1x-1xf32, -1x96x-1x-1xf32) + combine_8 = [swish_38, swish_41] + del swish_38, swish_41 + + # pd_op.concat: (-1x192x-1x-1xf32) <- ([-1x96x-1x-1xf32, -1x96x-1x-1xf32], 1xi32) + concat_9 = paddle._C_ops.concat(combine_8, full_0) + del combine_8 + + # pd_op.conv2d: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32, 192x192x1x1xf32) + conv2d_54 = paddle._C_ops.conv2d( + concat_9, parameter_164, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del concat_9, parameter_164 + + # pd_op.batch_norm_: (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__300, + batch_norm__301, + batch_norm__302, + batch_norm__303, + batch_norm__304, + batch_norm__305, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_54, + parameter_163, + parameter_162, + parameter_161, + parameter_160, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_54, parameter_160, parameter_161, parameter_162, parameter_163 + + # pd_op.swish: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32) + swish_42 = paddle._C_ops.swish(batch_norm__300) + del batch_norm__300 + + # pd_op.conv2d: (-1x96x-1x-1xf32) <- (-1x192x-1x-1xf32, 96x192x1x1xf32) + conv2d_55 = paddle._C_ops.conv2d( + swish_42, parameter_159, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_159 + + # pd_op.batch_norm_: (-1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (-1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__306, + batch_norm__307, + batch_norm__308, + batch_norm__309, + batch_norm__310, + batch_norm__311, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_55, + parameter_158, + parameter_157, + parameter_156, + parameter_155, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_55, parameter_155, parameter_156, parameter_157, parameter_158 + + # pd_op.swish: (-1x96x-1x-1xf32) <- (-1x96x-1x-1xf32) + swish_43 = paddle._C_ops.swish(batch_norm__306) + del batch_norm__306 + + # pd_op.nearest_interp: (-1x96x-1x-1xf32) <- (-1x96x-1x-1xf32, None, None, None) + nearest_interp_1 = paddle._C_ops.nearest_interp( + swish_43, + None, + None, + None, + "NCHW", + -1, + -1, + -1, + [float("2"), float("2")], + "nearest", + False, + 0, + ) + del swish_43 + + # builtin.combine: ([-1x96x-1x-1xf32, -1x128x-1x-1xf32]) <- (-1x96x-1x-1xf32, -1x128x-1x-1xf32) + combine_9 = [nearest_interp_1, swish_16] + del nearest_interp_1, swish_16 + + # pd_op.concat: (-1x224x-1x-1xf32) <- ([-1x96x-1x-1xf32, -1x128x-1x-1xf32], 1xi32) + concat_10 = paddle._C_ops.concat(combine_9, full_0) + del combine_9 + + # pd_op.conv2d: (-1x48x-1x-1xf32) <- (-1x224x-1x-1xf32, 48x224x1x1xf32) + conv2d_56 = paddle._C_ops.conv2d( + concat_10, parameter_154, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_154 + + # pd_op.batch_norm_: (-1x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32, -1xui8) <- (-1x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32) + ( + batch_norm__312, + batch_norm__313, + batch_norm__314, + batch_norm__315, + batch_norm__316, + batch_norm__317, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_56, + parameter_153, + parameter_152, + parameter_151, + parameter_150, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_56, parameter_150, parameter_151, parameter_152, parameter_153 + + # pd_op.swish: (-1x48x-1x-1xf32) <- (-1x48x-1x-1xf32) + swish_44 = paddle._C_ops.swish(batch_norm__312) + del batch_norm__312 + + # pd_op.conv2d: (-1x48x-1x-1xf32) <- (-1x224x-1x-1xf32, 48x224x1x1xf32) + conv2d_57 = paddle._C_ops.conv2d( + concat_10, parameter_149, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del concat_10, parameter_149 + + # pd_op.batch_norm_: (-1x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32, -1xui8) <- (-1x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32) + ( + batch_norm__318, + batch_norm__319, + batch_norm__320, + batch_norm__321, + batch_norm__322, + batch_norm__323, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_57, + parameter_148, + parameter_147, + parameter_146, + parameter_145, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_57, parameter_145, parameter_146, parameter_147, parameter_148 + + # pd_op.swish: (-1x48x-1x-1xf32) <- (-1x48x-1x-1xf32) + swish_45 = paddle._C_ops.swish(batch_norm__318) + del batch_norm__318 + + # pd_op.conv2d: (-1x48x-1x-1xf32) <- (-1x48x-1x-1xf32, 48x48x3x3xf32) + conv2d_58 = paddle._C_ops.conv2d( + swish_45, parameter_144, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_144, swish_45 + + # pd_op.batch_norm_: (-1x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32, -1xui8) <- (-1x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32) + ( + batch_norm__324, + batch_norm__325, + batch_norm__326, + batch_norm__327, + batch_norm__328, + batch_norm__329, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_58, + parameter_143, + parameter_142, + parameter_141, + parameter_140, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_58, parameter_140, parameter_141, parameter_142, parameter_143 + + # pd_op.swish: (-1x48x-1x-1xf32) <- (-1x48x-1x-1xf32) + swish_46 = paddle._C_ops.swish(batch_norm__324) + del batch_norm__324 + + # pd_op.conv2d: (-1x48x-1x-1xf32) <- (-1x48x-1x-1xf32, 48x48x3x3xf32) + conv2d_59 = paddle._C_ops.conv2d( + swish_46, parameter_139, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_139 + + # pd_op.batch_norm_: (-1x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32, -1xui8) <- (-1x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32) + ( + batch_norm__330, + batch_norm__331, + batch_norm__332, + batch_norm__333, + batch_norm__334, + batch_norm__335, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_59, + parameter_138, + parameter_137, + parameter_136, + parameter_135, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_59, parameter_135, parameter_136, parameter_137, parameter_138 + + # pd_op.conv2d: (-1x48x-1x-1xf32) <- (-1x48x-1x-1xf32, 48x48x1x1xf32) + conv2d_60 = paddle._C_ops.conv2d( + swish_46, parameter_134, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_134, swish_46 + + # pd_op.batch_norm_: (-1x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32, -1xui8) <- (-1x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32) + ( + batch_norm__336, + batch_norm__337, + batch_norm__338, + batch_norm__339, + batch_norm__340, + batch_norm__341, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_60, + parameter_133, + parameter_132, + parameter_131, + parameter_130, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_60, parameter_130, parameter_131, parameter_132, parameter_133 + + # pd_op.add: (-1x48x-1x-1xf32) <- (-1x48x-1x-1xf32, -1x48x-1x-1xf32) + add_54 = paddle._C_ops.add(batch_norm__330, batch_norm__336) + del batch_norm__330, batch_norm__336 + + # pd_op.swish: (-1x48x-1x-1xf32) <- (-1x48x-1x-1xf32) + swish_47 = paddle._C_ops.swish(add_54) + del add_54 + + # builtin.combine: ([-1x48x-1x-1xf32, -1x48x-1x-1xf32]) <- (-1x48x-1x-1xf32, -1x48x-1x-1xf32) + combine_10 = [swish_44, swish_47] + del swish_44, swish_47 + + # pd_op.concat: (-1x96x-1x-1xf32) <- ([-1x48x-1x-1xf32, -1x48x-1x-1xf32], 1xi32) + concat_11 = paddle._C_ops.concat(combine_10, full_0) + del combine_10 + + # pd_op.conv2d: (-1x96x-1x-1xf32) <- (-1x96x-1x-1xf32, 96x96x1x1xf32) + conv2d_61 = paddle._C_ops.conv2d( + concat_11, parameter_129, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del concat_11, parameter_129 + + # pd_op.batch_norm_: (-1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (-1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__342, + batch_norm__343, + batch_norm__344, + batch_norm__345, + batch_norm__346, + batch_norm__347, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_61, + parameter_128, + parameter_127, + parameter_126, + parameter_125, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_61, parameter_125, parameter_126, parameter_127, parameter_128 + + # pd_op.swish: (-1x96x-1x-1xf32) <- (-1x96x-1x-1xf32) + swish_48 = paddle._C_ops.swish(batch_norm__342) + del batch_norm__342 + + # pd_op.conv2d: (-1x96x-1x-1xf32) <- (-1x96x-1x-1xf32, 96x96x3x3xf32) + conv2d_62 = paddle._C_ops.conv2d( + swish_48, parameter_124, [2, 2], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_124 + + # pd_op.batch_norm_: (-1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (-1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__348, + batch_norm__349, + batch_norm__350, + batch_norm__351, + batch_norm__352, + batch_norm__353, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_62, + parameter_123, + parameter_122, + parameter_121, + parameter_120, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_62, parameter_120, parameter_121, parameter_122, parameter_123 + + # pd_op.swish: (-1x96x-1x-1xf32) <- (-1x96x-1x-1xf32) + swish_49 = paddle._C_ops.swish(batch_norm__348) + del batch_norm__348 + + # builtin.combine: ([-1x96x-1x-1xf32, -1x192x-1x-1xf32]) <- (-1x96x-1x-1xf32, -1x192x-1x-1xf32) + combine_11 = [swish_49, swish_42] + del swish_42, swish_49 + + # pd_op.concat: (-1x288x-1x-1xf32) <- ([-1x96x-1x-1xf32, -1x192x-1x-1xf32], 1xi32) + concat_12 = paddle._C_ops.concat(combine_11, full_0) + del combine_11 + + # pd_op.conv2d: (-1x96x-1x-1xf32) <- (-1x288x-1x-1xf32, 96x288x1x1xf32) + conv2d_63 = paddle._C_ops.conv2d( + concat_12, parameter_119, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_119 + + # pd_op.batch_norm_: (-1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (-1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__354, + batch_norm__355, + batch_norm__356, + batch_norm__357, + batch_norm__358, + batch_norm__359, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_63, + parameter_118, + parameter_117, + parameter_116, + parameter_115, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_63, parameter_115, parameter_116, parameter_117, parameter_118 + + # pd_op.swish: (-1x96x-1x-1xf32) <- (-1x96x-1x-1xf32) + swish_50 = paddle._C_ops.swish(batch_norm__354) + del batch_norm__354 + + # pd_op.conv2d: (-1x96x-1x-1xf32) <- (-1x288x-1x-1xf32, 96x288x1x1xf32) + conv2d_64 = paddle._C_ops.conv2d( + concat_12, parameter_114, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del concat_12, parameter_114 + + # pd_op.batch_norm_: (-1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (-1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__360, + batch_norm__361, + batch_norm__362, + batch_norm__363, + batch_norm__364, + batch_norm__365, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_64, + parameter_113, + parameter_112, + parameter_111, + parameter_110, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_64, parameter_110, parameter_111, parameter_112, parameter_113 + + # pd_op.swish: (-1x96x-1x-1xf32) <- (-1x96x-1x-1xf32) + swish_51 = paddle._C_ops.swish(batch_norm__360) + del batch_norm__360 + + # pd_op.conv2d: (-1x96x-1x-1xf32) <- (-1x96x-1x-1xf32, 96x96x3x3xf32) + conv2d_65 = paddle._C_ops.conv2d( + swish_51, parameter_109, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_109, swish_51 + + # pd_op.batch_norm_: (-1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (-1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__366, + batch_norm__367, + batch_norm__368, + batch_norm__369, + batch_norm__370, + batch_norm__371, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_65, + parameter_108, + parameter_107, + parameter_106, + parameter_105, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_65, parameter_105, parameter_106, parameter_107, parameter_108 + + # pd_op.swish: (-1x96x-1x-1xf32) <- (-1x96x-1x-1xf32) + swish_52 = paddle._C_ops.swish(batch_norm__366) + del batch_norm__366 + + # pd_op.conv2d: (-1x96x-1x-1xf32) <- (-1x96x-1x-1xf32, 96x96x3x3xf32) + conv2d_66 = paddle._C_ops.conv2d( + swish_52, parameter_104, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_104 + + # pd_op.batch_norm_: (-1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (-1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__372, + batch_norm__373, + batch_norm__374, + batch_norm__375, + batch_norm__376, + batch_norm__377, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_66, + parameter_103, + parameter_102, + parameter_101, + parameter_100, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_66, parameter_100, parameter_101, parameter_102, parameter_103 + + # pd_op.conv2d: (-1x96x-1x-1xf32) <- (-1x96x-1x-1xf32, 96x96x1x1xf32) + conv2d_67 = paddle._C_ops.conv2d( + swish_52, parameter_99, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_99, swish_52 + + # pd_op.batch_norm_: (-1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (-1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__378, + batch_norm__379, + batch_norm__380, + batch_norm__381, + batch_norm__382, + batch_norm__383, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_67, + parameter_98, + parameter_97, + parameter_96, + parameter_95, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_67, parameter_95, parameter_96, parameter_97, parameter_98 + + # pd_op.add: (-1x96x-1x-1xf32) <- (-1x96x-1x-1xf32, -1x96x-1x-1xf32) + add_55 = paddle._C_ops.add(batch_norm__372, batch_norm__378) + del batch_norm__372, batch_norm__378 + + # pd_op.swish: (-1x96x-1x-1xf32) <- (-1x96x-1x-1xf32) + swish_53 = paddle._C_ops.swish(add_55) + del add_55 + + # builtin.combine: ([-1x96x-1x-1xf32, -1x96x-1x-1xf32]) <- (-1x96x-1x-1xf32, -1x96x-1x-1xf32) + combine_12 = [swish_50, swish_53] + del swish_50, swish_53 + + # pd_op.concat: (-1x192x-1x-1xf32) <- ([-1x96x-1x-1xf32, -1x96x-1x-1xf32], 1xi32) + concat_13 = paddle._C_ops.concat(combine_12, full_0) + del combine_12 + + # pd_op.conv2d: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32, 192x192x1x1xf32) + conv2d_68 = paddle._C_ops.conv2d( + concat_13, parameter_94, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del concat_13, parameter_94 + + # pd_op.batch_norm_: (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__384, + batch_norm__385, + batch_norm__386, + batch_norm__387, + batch_norm__388, + batch_norm__389, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_68, + parameter_93, + parameter_92, + parameter_91, + parameter_90, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_68, parameter_90, parameter_91, parameter_92, parameter_93 + + # pd_op.swish: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32) + swish_54 = paddle._C_ops.swish(batch_norm__384) + del batch_norm__384 + + # pd_op.conv2d: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32, 192x192x3x3xf32) + conv2d_69 = paddle._C_ops.conv2d( + swish_54, parameter_89, [2, 2], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_89 + + # pd_op.batch_norm_: (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__390, + batch_norm__391, + batch_norm__392, + batch_norm__393, + batch_norm__394, + batch_norm__395, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_69, + parameter_88, + parameter_87, + parameter_86, + parameter_85, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_69, parameter_85, parameter_86, parameter_87, parameter_88 + + # pd_op.swish: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32) + swish_55 = paddle._C_ops.swish(batch_norm__390) + del batch_norm__390 + + # builtin.combine: ([-1x192x-1x-1xf32, -1x384x-1x-1xf32]) <- (-1x192x-1x-1xf32, -1x384x-1x-1xf32) + combine_13 = [swish_55, swish_36] + del swish_36, swish_55 + + # pd_op.concat: (-1x576x-1x-1xf32) <- ([-1x192x-1x-1xf32, -1x384x-1x-1xf32], 1xi32) + concat_14 = paddle._C_ops.concat(combine_13, full_0) + del combine_13 + + # pd_op.conv2d: (-1x192x-1x-1xf32) <- (-1x576x-1x-1xf32, 192x576x1x1xf32) + conv2d_70 = paddle._C_ops.conv2d( + concat_14, parameter_84, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_84 + + # pd_op.batch_norm_: (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__396, + batch_norm__397, + batch_norm__398, + batch_norm__399, + batch_norm__400, + batch_norm__401, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_70, + parameter_83, + parameter_82, + parameter_81, + parameter_80, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_70, parameter_80, parameter_81, parameter_82, parameter_83 + + # pd_op.swish: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32) + swish_56 = paddle._C_ops.swish(batch_norm__396) + del batch_norm__396 + + # pd_op.conv2d: (-1x192x-1x-1xf32) <- (-1x576x-1x-1xf32, 192x576x1x1xf32) + conv2d_71 = paddle._C_ops.conv2d( + concat_14, parameter_79, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del concat_14, parameter_79 + + # pd_op.batch_norm_: (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__402, + batch_norm__403, + batch_norm__404, + batch_norm__405, + batch_norm__406, + batch_norm__407, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_71, + parameter_78, + parameter_77, + parameter_76, + parameter_75, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_71, parameter_75, parameter_76, parameter_77, parameter_78 + + # pd_op.swish: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32) + swish_57 = paddle._C_ops.swish(batch_norm__402) + del batch_norm__402 + + # pd_op.conv2d: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32, 192x192x3x3xf32) + conv2d_72 = paddle._C_ops.conv2d( + swish_57, parameter_74, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_74, swish_57 + + # pd_op.batch_norm_: (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__408, + batch_norm__409, + batch_norm__410, + batch_norm__411, + batch_norm__412, + batch_norm__413, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_72, + parameter_73, + parameter_72, + parameter_71, + parameter_70, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_72, parameter_70, parameter_71, parameter_72, parameter_73 + + # pd_op.swish: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32) + swish_58 = paddle._C_ops.swish(batch_norm__408) + del batch_norm__408 + + # pd_op.conv2d: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32, 192x192x3x3xf32) + conv2d_73 = paddle._C_ops.conv2d( + swish_58, parameter_69, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_69 + + # pd_op.batch_norm_: (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__414, + batch_norm__415, + batch_norm__416, + batch_norm__417, + batch_norm__418, + batch_norm__419, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_73, + parameter_68, + parameter_67, + parameter_66, + parameter_65, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_73, parameter_65, parameter_66, parameter_67, parameter_68 + + # pd_op.conv2d: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32, 192x192x1x1xf32) + conv2d_74 = paddle._C_ops.conv2d( + swish_58, parameter_64, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_64, swish_58 + + # pd_op.batch_norm_: (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__420, + batch_norm__421, + batch_norm__422, + batch_norm__423, + batch_norm__424, + batch_norm__425, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_74, + parameter_63, + parameter_62, + parameter_61, + parameter_60, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_74, parameter_60, parameter_61, parameter_62, parameter_63 + + # pd_op.add: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32, -1x192x-1x-1xf32) + add_56 = paddle._C_ops.add(batch_norm__414, batch_norm__420) + del batch_norm__414, batch_norm__420 + + # pd_op.swish: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32) + swish_59 = paddle._C_ops.swish(add_56) + del add_56 + + # builtin.combine: ([-1x192x-1x-1xf32, -1x192x-1x-1xf32]) <- (-1x192x-1x-1xf32, -1x192x-1x-1xf32) + combine_14 = [swish_56, swish_59] + del swish_56, swish_59 + + # pd_op.concat: (-1x384x-1x-1xf32) <- ([-1x192x-1x-1xf32, -1x192x-1x-1xf32], 1xi32) + concat_15 = paddle._C_ops.concat(combine_14, full_0) + del combine_14 + + # pd_op.conv2d: (-1x384x-1x-1xf32) <- (-1x384x-1x-1xf32, 384x384x1x1xf32) + conv2d_75 = paddle._C_ops.conv2d( + concat_15, parameter_59, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del concat_15, parameter_59 + + # pd_op.batch_norm_: (-1x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (-1x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__426, + batch_norm__427, + batch_norm__428, + batch_norm__429, + batch_norm__430, + batch_norm__431, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_75, + parameter_58, + parameter_57, + parameter_56, + parameter_55, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_75, parameter_55, parameter_56, parameter_57, parameter_58 + + # pd_op.swish: (-1x384x-1x-1xf32) <- (-1x384x-1x-1xf32) + swish_60 = paddle._C_ops.swish(batch_norm__426) + del batch_norm__426 + + # pd_op.shape64: (4xi64) <- (-1x384x-1x-1xf32) + shape64_7 = paddle._C_ops.shape64(swish_60) + + # pd_op.slice: (xi64) <- (4xi64, 1xi64, 1xi64) + slice_31 = paddle._C_ops.slice( + shape64_7, [0], full_int_array_2, full_int_array_3, [1], [0] + ) + del shape64_7 + + # pd_op.shape64: (4xi64) <- (-1x384x-1x-1xf32) + shape64_8 = paddle._C_ops.shape64(swish_60) + + # pd_op.slice: (xi64) <- (4xi64, 1xi64, 1xi64) + slice_32 = paddle._C_ops.slice( + shape64_8, [0], full_int_array_4, full_int_array_5, [1], [0] + ) + del shape64_8 + + # pd_op.shape64: (4xi64) <- (-1x384x-1x-1xf32) + shape64_9 = paddle._C_ops.shape64(swish_60) + + # pd_op.slice: (xi64) <- (4xi64, 1xi64, 1xi64) + slice_33 = paddle._C_ops.slice( + shape64_9, [0], full_int_array_5, full_int_array_6, [1], [0] + ) + del shape64_9 + + # pd_op.multiply: (xi64) <- (xi64, xi64) + multiply_10 = paddle._C_ops.multiply(slice_32, slice_33) + del slice_32, slice_33 + + # pd_op.full_int_array: (2xi64) <- () + full_int_array_15 = [1, 1] + + # pd_op.pool2d: (-1x384x1x1xf32) <- (-1x384x-1x-1xf32, 2xi64) + pool2d_3 = paddle._C_ops.pool2d( + swish_60, + full_int_array_15, + [1, 1], + [0, 0], + False, + True, + "NCHW", + "avg", + False, + True, + "EXPLICIT", + ) + + # pd_op.conv2d: (-1x384x1x1xf32) <- (-1x384x1x1xf32, 384x384x1x1xf32) + conv2d_76 = paddle._C_ops.conv2d( + pool2d_3, parameter_54, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_54 + + # pd_op.reshape: (1x384x1x1xf32) <- (384xf32, 4xi64) + reshape_21 = paddle._C_ops.reshape(parameter_53, full_int_array_1) + del parameter_53 + + # pd_op.add: (-1x384x1x1xf32) <- (-1x384x1x1xf32, 1x384x1x1xf32) + add_57 = paddle._C_ops.add(conv2d_76, reshape_21) + del conv2d_76, reshape_21 + + # pd_op.sigmoid: (-1x384x1x1xf32) <- (-1x384x1x1xf32) + sigmoid_0 = paddle._C_ops.sigmoid(add_57) + del add_57 + + # pd_op.multiply: (-1x384x-1x-1xf32) <- (-1x384x-1x-1xf32, -1x384x1x1xf32) + multiply_11 = paddle._C_ops.multiply(swish_60, sigmoid_0) + del sigmoid_0 + + # pd_op.conv2d: (-1x384x-1x-1xf32) <- (-1x384x-1x-1xf32, 384x384x1x1xf32) + conv2d_77 = paddle._C_ops.conv2d( + multiply_11, parameter_52, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del multiply_11, parameter_52 + + # pd_op.batch_norm_: (-1x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (-1x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__432, + batch_norm__433, + batch_norm__434, + batch_norm__435, + batch_norm__436, + batch_norm__437, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_77, + parameter_51, + parameter_50, + parameter_49, + parameter_48, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_77, parameter_48, parameter_49, parameter_50, parameter_51 + + # pd_op.swish: (-1x384x-1x-1xf32) <- (-1x384x-1x-1xf32) + swish_61 = paddle._C_ops.swish(batch_norm__432) + del batch_norm__432 + + # pd_op.add: (-1x384x-1x-1xf32) <- (-1x384x-1x-1xf32, -1x384x-1x-1xf32) + add_58 = paddle._C_ops.add(swish_61, swish_60) + del swish_61 + + # pd_op.conv2d: (-1x10x-1x-1xf32) <- (-1x384x-1x-1xf32, 10x384x3x3xf32) + conv2d_78 = paddle._C_ops.conv2d( + add_58, parameter_47, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del add_58, parameter_47 + + # pd_op.reshape: (1x10x1x1xf32) <- (10xf32, 4xi64) + reshape_22 = paddle._C_ops.reshape(parameter_46, full_int_array_1) + del parameter_46 + + # pd_op.add: (-1x10x-1x-1xf32) <- (-1x10x-1x-1xf32, 1x10x1x1xf32) + add_59 = paddle._C_ops.add(conv2d_78, reshape_22) + del conv2d_78, reshape_22 + + # pd_op.conv2d: (-1x384x1x1xf32) <- (-1x384x1x1xf32, 384x384x1x1xf32) + conv2d_79 = paddle._C_ops.conv2d( + pool2d_3, parameter_45, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_45, pool2d_3 + + # pd_op.reshape: (1x384x1x1xf32) <- (384xf32, 4xi64) + reshape_23 = paddle._C_ops.reshape(parameter_44, full_int_array_1) + del parameter_44 + + # pd_op.add: (-1x384x1x1xf32) <- (-1x384x1x1xf32, 1x384x1x1xf32) + add_60 = paddle._C_ops.add(conv2d_79, reshape_23) + del conv2d_79, reshape_23 + + # pd_op.sigmoid: (-1x384x1x1xf32) <- (-1x384x1x1xf32) + sigmoid_1 = paddle._C_ops.sigmoid(add_60) + del add_60 + + # pd_op.multiply: (-1x384x-1x-1xf32) <- (-1x384x-1x-1xf32, -1x384x1x1xf32) + multiply_12 = paddle._C_ops.multiply(swish_60, sigmoid_1) + del sigmoid_1, swish_60 + + # pd_op.conv2d: (-1x384x-1x-1xf32) <- (-1x384x-1x-1xf32, 384x384x1x1xf32) + conv2d_80 = paddle._C_ops.conv2d( + multiply_12, parameter_43, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del multiply_12, parameter_43 + + # pd_op.batch_norm_: (-1x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (-1x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__438, + batch_norm__439, + batch_norm__440, + batch_norm__441, + batch_norm__442, + batch_norm__443, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_80, + parameter_42, + parameter_41, + parameter_40, + parameter_39, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_80, parameter_39, parameter_40, parameter_41, parameter_42 + + # pd_op.swish: (-1x384x-1x-1xf32) <- (-1x384x-1x-1xf32) + swish_62 = paddle._C_ops.swish(batch_norm__438) + del batch_norm__438 + + # pd_op.conv2d: (-1x40x-1x-1xf32) <- (-1x384x-1x-1xf32, 40x384x3x3xf32) + conv2d_81 = paddle._C_ops.conv2d( + swish_62, parameter_38, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_38, swish_62 + + # pd_op.reshape: (1x40x1x1xf32) <- (40xf32, 4xi64) + reshape_24 = paddle._C_ops.reshape(parameter_37, full_int_array_1) + del parameter_37 + + # pd_op.add: (-1x40x-1x-1xf32) <- (-1x40x-1x-1xf32, 1x40x1x1xf32) + add_61 = paddle._C_ops.add(conv2d_81, reshape_24) + del conv2d_81, reshape_24 + + # pd_op.full: (xi64) <- () + full_4 = paddle._C_ops.full( + [], float("-1"), paddle.int64, paddle.core.CPUPlace() + ) + + # pd_op.full: (xi64) <- () + full_5 = paddle._C_ops.full( + [], float("4"), paddle.int64, paddle.core.CPUPlace() + ) + + # pd_op.full: (xi64) <- () + full_6 = paddle._C_ops.full( + [], float("10"), paddle.int64, paddle.core.CPUPlace() + ) + + # builtin.combine: ([xi64, xi64, xi64, xi64]) <- (xi64, xi64, xi64, xi64) + combine_15 = [full_4, full_5, full_6, multiply_10] + + # pd_op.stack: (4xi64) <- ([xi64, xi64, xi64, xi64]) + stack_1 = paddle._C_ops.stack(combine_15, 0) + del combine_15 + + # pd_op.reshape: (-1x4x10x-1xf32) <- (-1x40x-1x-1xf32, 4xi64) + reshape_25 = paddle._C_ops.reshape(add_61, stack_1) + del add_61, stack_1 + + # pd_op.transpose: (-1x10x-1x4xf32) <- (-1x4x10x-1xf32) + transpose_18 = paddle._C_ops.transpose(reshape_25, [0, 2, 3, 1]) + del reshape_25 + + # pd_op.softmax: (-1x10x-1x4xf32) <- (-1x10x-1x4xf32) + softmax_4 = paddle._C_ops.softmax(transpose_18, 1) + del transpose_18 + + # pd_op.conv2d: (-1x1x-1x4xf32) <- (-1x10x-1x4xf32, 1x10x1x1xf32) + conv2d_82 = paddle._C_ops.conv2d( + softmax_4, parameter_36, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del softmax_4 + + # pd_op.squeeze: (-1x-1x4xf32) <- (-1x1x-1x4xf32, 1xi64) + squeeze_0 = paddle._C_ops.squeeze(conv2d_82, full_int_array_3) + del conv2d_82 + + # pd_op.sigmoid: (-1x10x-1x-1xf32) <- (-1x10x-1x-1xf32) + sigmoid_2 = paddle._C_ops.sigmoid(add_59) + del add_59 + + # builtin.combine: ([xi64, xi64, xi64]) <- (xi64, xi64, xi64) + combine_16 = [full_4, full_6, multiply_10] + del multiply_10 + + # pd_op.stack: (3xi64) <- ([xi64, xi64, xi64]) + stack_2 = paddle._C_ops.stack(combine_16, 0) + del combine_16 + + # pd_op.reshape: (-1x10x-1xf32) <- (-1x10x-1x-1xf32, 3xi64) + reshape_26 = paddle._C_ops.reshape(sigmoid_2, stack_2) + del sigmoid_2, stack_2 + + # pd_op.shape64: (4xi64) <- (-1x192x-1x-1xf32) + shape64_10 = paddle._C_ops.shape64(swish_54) + + # pd_op.slice: (xi64) <- (4xi64, 1xi64, 1xi64) + slice_34 = paddle._C_ops.slice( + shape64_10, [0], full_int_array_2, full_int_array_3, [1], [0] + ) + del shape64_10 + + # pd_op.shape64: (4xi64) <- (-1x192x-1x-1xf32) + shape64_11 = paddle._C_ops.shape64(swish_54) + + # pd_op.slice: (xi64) <- (4xi64, 1xi64, 1xi64) + slice_35 = paddle._C_ops.slice( + shape64_11, [0], full_int_array_4, full_int_array_5, [1], [0] + ) + del shape64_11 + + # pd_op.shape64: (4xi64) <- (-1x192x-1x-1xf32) + shape64_12 = paddle._C_ops.shape64(swish_54) + + # pd_op.slice: (xi64) <- (4xi64, 1xi64, 1xi64) + slice_36 = paddle._C_ops.slice( + shape64_12, [0], full_int_array_5, full_int_array_6, [1], [0] + ) + del shape64_12 + + # pd_op.multiply: (xi64) <- (xi64, xi64) + multiply_13 = paddle._C_ops.multiply(slice_35, slice_36) + del slice_35, slice_36 + + # pd_op.pool2d: (-1x192x1x1xf32) <- (-1x192x-1x-1xf32, 2xi64) + pool2d_4 = paddle._C_ops.pool2d( + swish_54, + full_int_array_15, + [1, 1], + [0, 0], + False, + True, + "NCHW", + "avg", + False, + True, + "EXPLICIT", + ) + + # pd_op.conv2d: (-1x192x1x1xf32) <- (-1x192x1x1xf32, 192x192x1x1xf32) + conv2d_83 = paddle._C_ops.conv2d( + pool2d_4, parameter_35, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_35 + + # pd_op.reshape: (1x192x1x1xf32) <- (192xf32, 4xi64) + reshape_27 = paddle._C_ops.reshape(parameter_34, full_int_array_1) + del parameter_34 + + # pd_op.add: (-1x192x1x1xf32) <- (-1x192x1x1xf32, 1x192x1x1xf32) + add_62 = paddle._C_ops.add(conv2d_83, reshape_27) + del conv2d_83, reshape_27 + + # pd_op.sigmoid: (-1x192x1x1xf32) <- (-1x192x1x1xf32) + sigmoid_3 = paddle._C_ops.sigmoid(add_62) + del add_62 + + # pd_op.multiply: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32, -1x192x1x1xf32) + multiply_14 = paddle._C_ops.multiply(swish_54, sigmoid_3) + del sigmoid_3 + + # pd_op.conv2d: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32, 192x192x1x1xf32) + conv2d_84 = paddle._C_ops.conv2d( + multiply_14, parameter_33, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del multiply_14, parameter_33 + + # pd_op.batch_norm_: (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__444, + batch_norm__445, + batch_norm__446, + batch_norm__447, + batch_norm__448, + batch_norm__449, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_84, + parameter_32, + parameter_31, + parameter_30, + parameter_29, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_84, parameter_29, parameter_30, parameter_31, parameter_32 + + # pd_op.swish: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32) + swish_63 = paddle._C_ops.swish(batch_norm__444) + del batch_norm__444 + + # pd_op.add: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32, -1x192x-1x-1xf32) + add_63 = paddle._C_ops.add(swish_63, swish_54) + del swish_63 + + # pd_op.conv2d: (-1x10x-1x-1xf32) <- (-1x192x-1x-1xf32, 10x192x3x3xf32) + conv2d_85 = paddle._C_ops.conv2d( + add_63, parameter_28, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del add_63, parameter_28 + + # pd_op.reshape: (1x10x1x1xf32) <- (10xf32, 4xi64) + reshape_28 = paddle._C_ops.reshape(parameter_27, full_int_array_1) + del parameter_27 + + # pd_op.add: (-1x10x-1x-1xf32) <- (-1x10x-1x-1xf32, 1x10x1x1xf32) + add_64 = paddle._C_ops.add(conv2d_85, reshape_28) + del conv2d_85, reshape_28 + + # pd_op.conv2d: (-1x192x1x1xf32) <- (-1x192x1x1xf32, 192x192x1x1xf32) + conv2d_86 = paddle._C_ops.conv2d( + pool2d_4, parameter_26, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_26, pool2d_4 + + # pd_op.reshape: (1x192x1x1xf32) <- (192xf32, 4xi64) + reshape_29 = paddle._C_ops.reshape(parameter_25, full_int_array_1) + del parameter_25 + + # pd_op.add: (-1x192x1x1xf32) <- (-1x192x1x1xf32, 1x192x1x1xf32) + add_65 = paddle._C_ops.add(conv2d_86, reshape_29) + del conv2d_86, reshape_29 + + # pd_op.sigmoid: (-1x192x1x1xf32) <- (-1x192x1x1xf32) + sigmoid_4 = paddle._C_ops.sigmoid(add_65) + del add_65 + + # pd_op.multiply: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32, -1x192x1x1xf32) + multiply_15 = paddle._C_ops.multiply(swish_54, sigmoid_4) + del sigmoid_4, swish_54 + + # pd_op.conv2d: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32, 192x192x1x1xf32) + conv2d_87 = paddle._C_ops.conv2d( + multiply_15, parameter_24, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del multiply_15, parameter_24 + + # pd_op.batch_norm_: (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__450, + batch_norm__451, + batch_norm__452, + batch_norm__453, + batch_norm__454, + batch_norm__455, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_87, + parameter_23, + parameter_22, + parameter_21, + parameter_20, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_87, parameter_20, parameter_21, parameter_22, parameter_23 + + # pd_op.swish: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32) + swish_64 = paddle._C_ops.swish(batch_norm__450) + del batch_norm__450 + + # pd_op.conv2d: (-1x40x-1x-1xf32) <- (-1x192x-1x-1xf32, 40x192x3x3xf32) + conv2d_88 = paddle._C_ops.conv2d( + swish_64, parameter_19, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_19, swish_64 + + # pd_op.reshape: (1x40x1x1xf32) <- (40xf32, 4xi64) + reshape_30 = paddle._C_ops.reshape(parameter_18, full_int_array_1) + del parameter_18 + + # pd_op.add: (-1x40x-1x-1xf32) <- (-1x40x-1x-1xf32, 1x40x1x1xf32) + add_66 = paddle._C_ops.add(conv2d_88, reshape_30) + del conv2d_88, reshape_30 + + # builtin.combine: ([xi64, xi64, xi64, xi64]) <- (xi64, xi64, xi64, xi64) + combine_17 = [full_4, full_5, full_6, multiply_13] + + # pd_op.stack: (4xi64) <- ([xi64, xi64, xi64, xi64]) + stack_3 = paddle._C_ops.stack(combine_17, 0) + del combine_17 + + # pd_op.reshape: (-1x4x10x-1xf32) <- (-1x40x-1x-1xf32, 4xi64) + reshape_31 = paddle._C_ops.reshape(add_66, stack_3) + del add_66, stack_3 + + # pd_op.transpose: (-1x10x-1x4xf32) <- (-1x4x10x-1xf32) + transpose_19 = paddle._C_ops.transpose(reshape_31, [0, 2, 3, 1]) + del reshape_31 + + # pd_op.softmax: (-1x10x-1x4xf32) <- (-1x10x-1x4xf32) + softmax_5 = paddle._C_ops.softmax(transpose_19, 1) + del transpose_19 + + # pd_op.conv2d: (-1x1x-1x4xf32) <- (-1x10x-1x4xf32, 1x10x1x1xf32) + conv2d_89 = paddle._C_ops.conv2d( + softmax_5, parameter_36, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del softmax_5 + + # pd_op.squeeze: (-1x-1x4xf32) <- (-1x1x-1x4xf32, 1xi64) + squeeze_1 = paddle._C_ops.squeeze(conv2d_89, full_int_array_3) + del conv2d_89 + + # pd_op.sigmoid: (-1x10x-1x-1xf32) <- (-1x10x-1x-1xf32) + sigmoid_5 = paddle._C_ops.sigmoid(add_64) + del add_64 + + # builtin.combine: ([xi64, xi64, xi64]) <- (xi64, xi64, xi64) + combine_18 = [full_4, full_6, multiply_13] + del multiply_13 + + # pd_op.stack: (3xi64) <- ([xi64, xi64, xi64]) + stack_4 = paddle._C_ops.stack(combine_18, 0) + del combine_18 + + # pd_op.reshape: (-1x10x-1xf32) <- (-1x10x-1x-1xf32, 3xi64) + reshape_32 = paddle._C_ops.reshape(sigmoid_5, stack_4) + del sigmoid_5, stack_4 + + # pd_op.shape64: (4xi64) <- (-1x96x-1x-1xf32) + shape64_13 = paddle._C_ops.shape64(swish_48) + + # pd_op.slice: (xi64) <- (4xi64, 1xi64, 1xi64) + slice_37 = paddle._C_ops.slice( + shape64_13, [0], full_int_array_2, full_int_array_3, [1], [0] + ) + del full_int_array_2, shape64_13 + + # pd_op.shape64: (4xi64) <- (-1x96x-1x-1xf32) + shape64_14 = paddle._C_ops.shape64(swish_48) + + # pd_op.slice: (xi64) <- (4xi64, 1xi64, 1xi64) + slice_38 = paddle._C_ops.slice( + shape64_14, [0], full_int_array_4, full_int_array_5, [1], [0] + ) + del full_int_array_4, shape64_14 + + # pd_op.shape64: (4xi64) <- (-1x96x-1x-1xf32) + shape64_15 = paddle._C_ops.shape64(swish_48) + + # pd_op.slice: (xi64) <- (4xi64, 1xi64, 1xi64) + slice_39 = paddle._C_ops.slice( + shape64_15, [0], full_int_array_5, full_int_array_6, [1], [0] + ) + del full_int_array_5, full_int_array_6, shape64_15 + + # pd_op.multiply: (xi64) <- (xi64, xi64) + multiply_16 = paddle._C_ops.multiply(slice_38, slice_39) + del slice_38, slice_39 + + # pd_op.pool2d: (-1x96x1x1xf32) <- (-1x96x-1x-1xf32, 2xi64) + pool2d_5 = paddle._C_ops.pool2d( + swish_48, + full_int_array_15, + [1, 1], + [0, 0], + False, + True, + "NCHW", + "avg", + False, + True, + "EXPLICIT", + ) + del full_int_array_15 + + # pd_op.conv2d: (-1x96x1x1xf32) <- (-1x96x1x1xf32, 96x96x1x1xf32) + conv2d_90 = paddle._C_ops.conv2d( + pool2d_5, parameter_17, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_17 + + # pd_op.reshape: (1x96x1x1xf32) <- (96xf32, 4xi64) + reshape_33 = paddle._C_ops.reshape(parameter_16, full_int_array_1) + del parameter_16 + + # pd_op.add: (-1x96x1x1xf32) <- (-1x96x1x1xf32, 1x96x1x1xf32) + add_67 = paddle._C_ops.add(conv2d_90, reshape_33) + del conv2d_90, reshape_33 + + # pd_op.sigmoid: (-1x96x1x1xf32) <- (-1x96x1x1xf32) + sigmoid_6 = paddle._C_ops.sigmoid(add_67) + del add_67 + + # pd_op.multiply: (-1x96x-1x-1xf32) <- (-1x96x-1x-1xf32, -1x96x1x1xf32) + multiply_17 = paddle._C_ops.multiply(swish_48, sigmoid_6) + del sigmoid_6 + + # pd_op.conv2d: (-1x96x-1x-1xf32) <- (-1x96x-1x-1xf32, 96x96x1x1xf32) + conv2d_91 = paddle._C_ops.conv2d( + multiply_17, parameter_15, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del multiply_17, parameter_15 + + # pd_op.batch_norm_: (-1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (-1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__456, + batch_norm__457, + batch_norm__458, + batch_norm__459, + batch_norm__460, + batch_norm__461, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_91, + parameter_14, + parameter_13, + parameter_12, + parameter_11, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_91, parameter_11, parameter_12, parameter_13, parameter_14 + + # pd_op.swish: (-1x96x-1x-1xf32) <- (-1x96x-1x-1xf32) + swish_65 = paddle._C_ops.swish(batch_norm__456) + del batch_norm__456 + + # pd_op.add: (-1x96x-1x-1xf32) <- (-1x96x-1x-1xf32, -1x96x-1x-1xf32) + add_68 = paddle._C_ops.add(swish_65, swish_48) + del swish_65 + + # pd_op.conv2d: (-1x10x-1x-1xf32) <- (-1x96x-1x-1xf32, 10x96x3x3xf32) + conv2d_92 = paddle._C_ops.conv2d( + add_68, parameter_10, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del add_68, parameter_10 + + # pd_op.reshape: (1x10x1x1xf32) <- (10xf32, 4xi64) + reshape_34 = paddle._C_ops.reshape(parameter_9, full_int_array_1) + del parameter_9 + + # pd_op.add: (-1x10x-1x-1xf32) <- (-1x10x-1x-1xf32, 1x10x1x1xf32) + add_69 = paddle._C_ops.add(conv2d_92, reshape_34) + del conv2d_92, reshape_34 + + # pd_op.conv2d: (-1x96x1x1xf32) <- (-1x96x1x1xf32, 96x96x1x1xf32) + conv2d_93 = paddle._C_ops.conv2d( + pool2d_5, parameter_8, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_8, pool2d_5 + + # pd_op.reshape: (1x96x1x1xf32) <- (96xf32, 4xi64) + reshape_35 = paddle._C_ops.reshape(parameter_7, full_int_array_1) + del parameter_7 + + # pd_op.add: (-1x96x1x1xf32) <- (-1x96x1x1xf32, 1x96x1x1xf32) + add_70 = paddle._C_ops.add(conv2d_93, reshape_35) + del conv2d_93, reshape_35 + + # pd_op.sigmoid: (-1x96x1x1xf32) <- (-1x96x1x1xf32) + sigmoid_7 = paddle._C_ops.sigmoid(add_70) + del add_70 + + # pd_op.multiply: (-1x96x-1x-1xf32) <- (-1x96x-1x-1xf32, -1x96x1x1xf32) + multiply_18 = paddle._C_ops.multiply(swish_48, sigmoid_7) + del sigmoid_7, swish_48 + + # pd_op.conv2d: (-1x96x-1x-1xf32) <- (-1x96x-1x-1xf32, 96x96x1x1xf32) + conv2d_94 = paddle._C_ops.conv2d( + multiply_18, parameter_6, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del multiply_18, parameter_6 + + # pd_op.batch_norm_: (-1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (-1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__462, + batch_norm__463, + batch_norm__464, + batch_norm__465, + batch_norm__466, + batch_norm__467, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_94, + parameter_5, + parameter_4, + parameter_3, + parameter_2, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_94, parameter_2, parameter_3, parameter_4, parameter_5 + + # pd_op.swish: (-1x96x-1x-1xf32) <- (-1x96x-1x-1xf32) + swish_66 = paddle._C_ops.swish(batch_norm__462) + del batch_norm__462 + + # pd_op.conv2d: (-1x40x-1x-1xf32) <- (-1x96x-1x-1xf32, 40x96x3x3xf32) + conv2d_95 = paddle._C_ops.conv2d( + swish_66, parameter_1, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_1, swish_66 + + # pd_op.reshape: (1x40x1x1xf32) <- (40xf32, 4xi64) + reshape_36 = paddle._C_ops.reshape(parameter_0, full_int_array_1) + del full_int_array_1, parameter_0 + + # pd_op.add: (-1x40x-1x-1xf32) <- (-1x40x-1x-1xf32, 1x40x1x1xf32) + add_71 = paddle._C_ops.add(conv2d_95, reshape_36) + del conv2d_95, reshape_36 + + # builtin.combine: ([xi64, xi64, xi64, xi64]) <- (xi64, xi64, xi64, xi64) + combine_19 = [full_4, full_5, full_6, multiply_16] + del full_5 + + # pd_op.stack: (4xi64) <- ([xi64, xi64, xi64, xi64]) + stack_5 = paddle._C_ops.stack(combine_19, 0) + del combine_19 + + # pd_op.reshape: (-1x4x10x-1xf32) <- (-1x40x-1x-1xf32, 4xi64) + reshape_37 = paddle._C_ops.reshape(add_71, stack_5) + del add_71, stack_5 + + # pd_op.transpose: (-1x10x-1x4xf32) <- (-1x4x10x-1xf32) + transpose_20 = paddle._C_ops.transpose(reshape_37, [0, 2, 3, 1]) + del reshape_37 + + # pd_op.softmax: (-1x10x-1x4xf32) <- (-1x10x-1x4xf32) + softmax_6 = paddle._C_ops.softmax(transpose_20, 1) + del transpose_20 + + # pd_op.conv2d: (-1x1x-1x4xf32) <- (-1x10x-1x4xf32, 1x10x1x1xf32) + conv2d_96 = paddle._C_ops.conv2d( + softmax_6, parameter_36, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_36, softmax_6 + + # pd_op.squeeze: (-1x-1x4xf32) <- (-1x1x-1x4xf32, 1xi64) + squeeze_2 = paddle._C_ops.squeeze(conv2d_96, full_int_array_3) + del conv2d_96, full_int_array_3 + + # pd_op.sigmoid: (-1x10x-1x-1xf32) <- (-1x10x-1x-1xf32) + sigmoid_8 = paddle._C_ops.sigmoid(add_69) + del add_69 + + # builtin.combine: ([xi64, xi64, xi64]) <- (xi64, xi64, xi64) + combine_20 = [full_4, full_6, multiply_16] + del full_4, full_6, multiply_16 + + # pd_op.stack: (3xi64) <- ([xi64, xi64, xi64]) + stack_6 = paddle._C_ops.stack(combine_20, 0) + del combine_20 + + # pd_op.reshape: (-1x10x-1xf32) <- (-1x10x-1x-1xf32, 3xi64) + reshape_38 = paddle._C_ops.reshape(sigmoid_8, stack_6) + del sigmoid_8, stack_6 + + # pd_op.full: (1xi32) <- () + full_7 = paddle._C_ops.full( + [1], float("-1"), paddle.int32, paddle.core.CPUPlace() + ) + + # builtin.combine: ([-1x10x-1xf32, -1x10x-1xf32, -1x10x-1xf32]) <- (-1x10x-1xf32, -1x10x-1xf32, -1x10x-1xf32) + combine_21 = [reshape_26, reshape_32, reshape_38] + del reshape_26, reshape_32, reshape_38 + + # pd_op.concat: (-1x10x-1xf32) <- ([-1x10x-1xf32, -1x10x-1xf32, -1x10x-1xf32], 1xi32) + concat_0 = paddle._C_ops.concat(combine_21, full_7) + del combine_21, full_7 + + # builtin.combine: ([-1x-1x4xf32, -1x-1x4xf32, -1x-1x4xf32]) <- (-1x-1x4xf32, -1x-1x4xf32, -1x-1x4xf32) + combine_22 = [squeeze_0, squeeze_1, squeeze_2] + del squeeze_0, squeeze_1, squeeze_2 + + # pd_op.concat: (-1x-1x4xf32) <- ([-1x-1x4xf32, -1x-1x4xf32, -1x-1x4xf32], 1xi32) + concat_1 = paddle._C_ops.concat(combine_22, full_0) + del combine_22, full_0 + + return concat_0, concat_1 diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-S/subgraph_15/weight_meta.py b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-S/subgraph_15/weight_meta.py new file mode 100644 index 000000000..82b3928f2 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-S/subgraph_15/weight_meta.py @@ -0,0 +1,4891 @@ +class Program_weight_tensor_parameter_0: + name = "parameter_0" + shape = [40] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_1: + name = "parameter_1" + shape = [40, 96, 3, 3] + dtype = "float32" + min_val = float("-0.211372") + max_val = float("0.213967") + mean = float("2.54367e-08") + std = float("0.0162066") + data = None + + +class Program_weight_tensor_parameter_2: + name = "parameter_2" + shape = [96] + dtype = "float32" + min_val = float("-0.115083") + max_val = float("0.358128") + mean = float("0.114656") + std = float("0.113948") + data = None + + +class Program_weight_tensor_parameter_3: + name = "parameter_3" + shape = [96] + dtype = "float32" + min_val = float("0.951753") + max_val = float("2.26189") + mean = float("1.52767") + std = float("0.270648") + data = None + + +class Program_weight_tensor_parameter_4: + name = "parameter_4" + shape = [96] + dtype = "float32" + min_val = float("0.000693755") + max_val = float("0.167904") + mean = float("0.036745") + std = float("0.0346478") + data = None + + +class Program_weight_tensor_parameter_5: + name = "parameter_5" + shape = [96] + dtype = "float32" + min_val = float("-0.29083") + max_val = float("0.263843") + mean = float("-0.0338458") + std = float("0.104575") + data = None + + +class Program_weight_tensor_parameter_6: + name = "parameter_6" + shape = [96, 96, 1, 1] + dtype = "float32" + min_val = float("-0.163667") + max_val = float("0.138825") + mean = float("-0.00220364") + std = float("0.019273") + data = None + + +class Program_weight_tensor_parameter_7: + name = "parameter_7" + shape = [96] + dtype = "float32" + min_val = float("-0.0171517") + max_val = float("0.0144073") + mean = float("-0.000569723") + std = float("0.00619475") + data = None + + +class Program_weight_tensor_parameter_8: + name = "parameter_8" + shape = [96, 96, 1, 1] + dtype = "float32" + min_val = float("-0.029931") + max_val = float("0.0370881") + mean = float("-0.000536685") + std = float("0.00463494") + data = None + + +class Program_weight_tensor_parameter_9: + name = "parameter_9" + shape = [10] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_10: + name = "parameter_10" + shape = [10, 96, 3, 3] + dtype = "float32" + min_val = float("-147.179") + max_val = float("0.660965") + mean = float("-4.88749") + std = float("17.5359") + data = None + + +class Program_weight_tensor_parameter_11: + name = "parameter_11" + shape = [96] + dtype = "float32" + min_val = float("-1.01105") + max_val = float("1.7318") + mean = float("0.535475") + std = float("0.568297") + data = None + + +class Program_weight_tensor_parameter_12: + name = "parameter_12" + shape = [96] + dtype = "float32" + min_val = float("-3.62111") + max_val = float("6.75119") + mean = float("1.40028") + std = float("1.43643") + data = None + + +class Program_weight_tensor_parameter_13: + name = "parameter_13" + shape = [96] + dtype = "float32" + min_val = float("0.0703373") + max_val = float("6872.76") + mean = float("263.421") + std = float("1084.96") + data = None + + +class Program_weight_tensor_parameter_14: + name = "parameter_14" + shape = [96] + dtype = "float32" + min_val = float("-74.8477") + max_val = float("57.3349") + mean = float("0.120224") + std = float("13.9604") + data = None + + +class Program_weight_tensor_parameter_15: + name = "parameter_15" + shape = [96, 96, 1, 1] + dtype = "float32" + min_val = float("-6.64278") + max_val = float("4.73501") + mean = float("0.00138439") + std = float("0.45685") + data = None + + +class Program_weight_tensor_parameter_16: + name = "parameter_16" + shape = [96] + dtype = "float32" + min_val = float("-0.215062") + max_val = float("0.521381") + mean = float("0.0230467") + std = float("0.103752") + data = None + + +class Program_weight_tensor_parameter_17: + name = "parameter_17" + shape = [96, 96, 1, 1] + dtype = "float32" + min_val = float("-0.368833") + max_val = float("0.872126") + mean = float("0.014359") + std = float("0.076755") + data = None + + +class Program_weight_tensor_parameter_18: + name = "parameter_18" + shape = [40] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_19: + name = "parameter_19" + shape = [40, 192, 3, 3] + dtype = "float32" + min_val = float("-0.149007") + max_val = float("0.179056") + mean = float("7.04313e-09") + std = float("0.0118831") + data = None + + +class Program_weight_tensor_parameter_20: + name = "parameter_20" + shape = [192] + dtype = "float32" + min_val = float("-0.0157676") + max_val = float("0.211293") + mean = float("0.0894421") + std = float("0.0424799") + data = None + + +class Program_weight_tensor_parameter_21: + name = "parameter_21" + shape = [192] + dtype = "float32" + min_val = float("1.08076") + max_val = float("1.5164") + mean = float("1.30379") + std = float("0.0875478") + data = None + + +class Program_weight_tensor_parameter_22: + name = "parameter_22" + shape = [192] + dtype = "float32" + min_val = float("0.00150177") + max_val = float("4.49811") + mean = float("0.223019") + std = float("0.537645") + data = None + + +class Program_weight_tensor_parameter_23: + name = "parameter_23" + shape = [192] + dtype = "float32" + min_val = float("-0.420716") + max_val = float("0.891691") + mean = float("0.00483043") + std = float("0.215039") + data = None + + +class Program_weight_tensor_parameter_24: + name = "parameter_24" + shape = [192, 192, 1, 1] + dtype = "float32" + min_val = float("-0.094948") + max_val = float("0.136006") + mean = float("-0.000414778") + std = float("0.0141674") + data = None + + +class Program_weight_tensor_parameter_25: + name = "parameter_25" + shape = [192] + dtype = "float32" + min_val = float("-0.00670688") + max_val = float("0.00836793") + mean = float("-6.36137e-05") + std = float("0.00299297") + data = None + + +class Program_weight_tensor_parameter_26: + name = "parameter_26" + shape = [192, 192, 1, 1] + dtype = "float32" + min_val = float("-0.00951381") + max_val = float("0.010467") + mean = float("-0.000194559") + std = float("0.00164111") + data = None + + +class Program_weight_tensor_parameter_27: + name = "parameter_27" + shape = [10] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_28: + name = "parameter_28" + shape = [10, 192, 3, 3] + dtype = "float32" + min_val = float("-12.9162") + max_val = float("0.473088") + mean = float("-0.375163") + std = float("1.10239") + data = None + + +class Program_weight_tensor_parameter_29: + name = "parameter_29" + shape = [192] + dtype = "float32" + min_val = float("-0.323238") + max_val = float("1.00619") + mean = float("0.395691") + std = float("0.241688") + data = None + + +class Program_weight_tensor_parameter_30: + name = "parameter_30" + shape = [192] + dtype = "float32" + min_val = float("0.960681") + max_val = float("1.76782") + mean = float("1.32238") + std = float("0.142641") + data = None + + +class Program_weight_tensor_parameter_31: + name = "parameter_31" + shape = [192] + dtype = "float32" + min_val = float("0.0232065") + max_val = float("91.5482") + mean = float("4.49702") + std = float("10.997") + data = None + + +class Program_weight_tensor_parameter_32: + name = "parameter_32" + shape = [192] + dtype = "float32" + min_val = float("-6.14227") + max_val = float("2.3041") + mean = float("-0.120745") + std = float("1.12205") + data = None + + +class Program_weight_tensor_parameter_33: + name = "parameter_33" + shape = [192, 192, 1, 1] + dtype = "float32" + min_val = float("-0.652194") + max_val = float("0.399802") + mean = float("-0.00298454") + std = float("0.0598755") + data = None + + +class Program_weight_tensor_parameter_34: + name = "parameter_34" + shape = [192] + dtype = "float32" + min_val = float("-0.0264194") + max_val = float("0.0339535") + mean = float("0.000284419") + std = float("0.00844905") + data = None + + +class Program_weight_tensor_parameter_35: + name = "parameter_35" + shape = [192, 192, 1, 1] + dtype = "float32" + min_val = float("-0.0318726") + max_val = float("0.0351758") + mean = float("4.28945e-05") + std = float("0.00388519") + data = None + + +class Program_weight_tensor_parameter_36: + name = "parameter_36" + shape = [1, 10, 1, 1] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_37: + name = "parameter_37" + shape = [40] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_38: + name = "parameter_38" + shape = [40, 384, 3, 3] + dtype = "float32" + min_val = float("-0.239633") + max_val = float("0.147111") + mean = float("7.85803e-10") + std = float("0.00710889") + data = None + + +class Program_weight_tensor_parameter_39: + name = "parameter_39" + shape = [384] + dtype = "float32" + min_val = float("-0.0264226") + max_val = float("0.151642") + mean = float("0.0389686") + std = float("0.031602") + data = None + + +class Program_weight_tensor_parameter_40: + name = "parameter_40" + shape = [384] + dtype = "float32" + min_val = float("1.05636") + max_val = float("1.41732") + mean = float("1.21716") + std = float("0.0540894") + data = None + + +class Program_weight_tensor_parameter_41: + name = "parameter_41" + shape = [384] + dtype = "float32" + min_val = float("0.0020119") + max_val = float("1.14149") + mean = float("0.0723687") + std = float("0.109773") + data = None + + +class Program_weight_tensor_parameter_42: + name = "parameter_42" + shape = [384] + dtype = "float32" + min_val = float("-1.35683") + max_val = float("1.42528") + mean = float("-0.169891") + std = float("0.36563") + data = None + + +class Program_weight_tensor_parameter_43: + name = "parameter_43" + shape = [384, 384, 1, 1] + dtype = "float32" + min_val = float("-0.0500389") + max_val = float("0.057125") + mean = float("-0.00094884") + std = float("0.00580724") + data = None + + +class Program_weight_tensor_parameter_44: + name = "parameter_44" + shape = [384] + dtype = "float32" + min_val = float("-0.00615517") + max_val = float("0.00543546") + mean = float("3.08023e-05") + std = float("0.00178142") + data = None + + +class Program_weight_tensor_parameter_45: + name = "parameter_45" + shape = [384, 384, 1, 1] + dtype = "float32" + min_val = float("-0.00688777") + max_val = float("0.00710017") + mean = float("4.23549e-05") + std = float("0.000832116") + data = None + + +class Program_weight_tensor_parameter_46: + name = "parameter_46" + shape = [10] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_47: + name = "parameter_47" + shape = [10, 384, 3, 3] + dtype = "float32" + min_val = float("-41.6163") + max_val = float("3.10072") + mean = float("-1.55937") + std = float("3.42786") + data = None + + +class Program_weight_tensor_parameter_48: + name = "parameter_48" + shape = [384] + dtype = "float32" + min_val = float("-2.89492") + max_val = float("3.90498") + mean = float("0.717614") + std = float("0.930456") + data = None + + +class Program_weight_tensor_parameter_49: + name = "parameter_49" + shape = [384] + dtype = "float32" + min_val = float("-0.320123") + max_val = float("3.0082") + mean = float("1.02013") + std = float("0.408653") + data = None + + +class Program_weight_tensor_parameter_50: + name = "parameter_50" + shape = [384] + dtype = "float32" + min_val = float("0.33259") + max_val = float("4665.76") + mean = float("159.296") + std = float("412.826") + data = None + + +class Program_weight_tensor_parameter_51: + name = "parameter_51" + shape = [384] + dtype = "float32" + min_val = float("-62.7742") + max_val = float("69.0625") + mean = float("-1.91282") + std = float("12.8145") + data = None + + +class Program_weight_tensor_parameter_52: + name = "parameter_52" + shape = [384, 384, 1, 1] + dtype = "float32" + min_val = float("-2.26195") + max_val = float("2.1307") + mean = float("0.0161538") + std = float("0.209553") + data = None + + +class Program_weight_tensor_parameter_53: + name = "parameter_53" + shape = [384] + dtype = "float32" + min_val = float("-0.426069") + max_val = float("0.516238") + mean = float("-0.0102657") + std = float("0.119641") + data = None + + +class Program_weight_tensor_parameter_54: + name = "parameter_54" + shape = [384, 384, 1, 1] + dtype = "float32" + min_val = float("-0.454954") + max_val = float("0.719579") + mean = float("-0.000220123") + std = float("0.0450053") + data = None + + +class Program_weight_tensor_parameter_55: + name = "parameter_55" + shape = [384] + dtype = "float32" + min_val = float("-2.82057") + max_val = float("3.82315") + mean = float("0.56578") + std = float("0.863461") + data = None + + +class Program_weight_tensor_parameter_56: + name = "parameter_56" + shape = [384] + dtype = "float32" + min_val = float("-0.946322") + max_val = float("2.77367") + mean = float("0.861686") + std = float("0.614335") + data = None + + +class Program_weight_tensor_parameter_57: + name = "parameter_57" + shape = [384] + dtype = "float32" + min_val = float("2.08205") + max_val = float("9283.07") + mean = float("439.334") + std = float("1007.74") + data = None + + +class Program_weight_tensor_parameter_58: + name = "parameter_58" + shape = [384] + dtype = "float32" + min_val = float("-13.9354") + max_val = float("20.8558") + mean = float("-0.418451") + std = float("3.76821") + data = None + + +class Program_weight_tensor_parameter_59: + name = "parameter_59" + shape = [384, 384, 1, 1] + dtype = "float32" + min_val = float("-1.68175") + max_val = float("2.53594") + mean = float("0.00258849") + std = float("0.144369") + data = None + + +class Program_weight_tensor_parameter_60: + name = "parameter_60" + shape = [192] + dtype = "float32" + min_val = float("-0.820833") + max_val = float("0.456562") + mean = float("-0.18679") + std = float("0.260809") + data = None + + +class Program_weight_tensor_parameter_61: + name = "parameter_61" + shape = [192] + dtype = "float32" + min_val = float("0.570766") + max_val = float("1.65399") + mean = float("0.976551") + std = float("0.149378") + data = None + + +class Program_weight_tensor_parameter_62: + name = "parameter_62" + shape = [192] + dtype = "float32" + min_val = float("0.0786884") + max_val = float("465.021") + mean = float("62.995") + std = float("91.1322") + data = None + + +class Program_weight_tensor_parameter_63: + name = "parameter_63" + shape = [192] + dtype = "float32" + min_val = float("-6.06859") + max_val = float("5.04157") + mean = float("-0.924303") + std = float("1.73293") + data = None + + +class Program_weight_tensor_parameter_64: + name = "parameter_64" + shape = [192, 192, 1, 1] + dtype = "float32" + min_val = float("-0.918638") + max_val = float("1.02728") + mean = float("-0.00178183") + std = float("0.142322") + data = None + + +class Program_weight_tensor_parameter_65: + name = "parameter_65" + shape = [192] + dtype = "float32" + min_val = float("-0.820833") + max_val = float("0.456562") + mean = float("-0.18679") + std = float("0.260809") + data = None + + +class Program_weight_tensor_parameter_66: + name = "parameter_66" + shape = [192] + dtype = "float32" + min_val = float("0.47387") + max_val = float("1.80472") + mean = float("1.05508") + std = float("0.20188") + data = None + + +class Program_weight_tensor_parameter_67: + name = "parameter_67" + shape = [192] + dtype = "float32" + min_val = float("1.75934") + max_val = float("3299.58") + mean = float("287.511") + std = float("396.093") + data = None + + +class Program_weight_tensor_parameter_68: + name = "parameter_68" + shape = [192] + dtype = "float32" + min_val = float("-16.0731") + max_val = float("15.9319") + mean = float("-1.65254") + std = float("4.39761") + data = None + + +class Program_weight_tensor_parameter_69: + name = "parameter_69" + shape = [192, 192, 3, 3] + dtype = "float32" + min_val = float("-0.433066") + max_val = float("0.680055") + mean = float("0.00639302") + std = float("0.0576293") + data = None + + +class Program_weight_tensor_parameter_70: + name = "parameter_70" + shape = [192] + dtype = "float32" + min_val = float("-1.40329") + max_val = float("0.685051") + mean = float("-0.249326") + std = float("0.387994") + data = None + + +class Program_weight_tensor_parameter_71: + name = "parameter_71" + shape = [192] + dtype = "float32" + min_val = float("0.216632") + max_val = float("1.88557") + mean = float("1.03229") + std = float("0.31925") + data = None + + +class Program_weight_tensor_parameter_72: + name = "parameter_72" + shape = [192] + dtype = "float32" + min_val = float("15.491") + max_val = float("17773.4") + mean = float("1476.12") + std = float("2075.78") + data = None + + +class Program_weight_tensor_parameter_73: + name = "parameter_73" + shape = [192] + dtype = "float32" + min_val = float("-15.3074") + max_val = float("32.3491") + mean = float("4.55234") + std = float("7.23514") + data = None + + +class Program_weight_tensor_parameter_74: + name = "parameter_74" + shape = [192, 192, 3, 3] + dtype = "float32" + min_val = float("-0.584546") + max_val = float("1.28615") + mean = float("0.0141884") + std = float("0.083819") + data = None + + +class Program_weight_tensor_parameter_75: + name = "parameter_75" + shape = [192] + dtype = "float32" + min_val = float("-0.645293") + max_val = float("0.139143") + mean = float("-0.178297") + std = float("0.169533") + data = None + + +class Program_weight_tensor_parameter_76: + name = "parameter_76" + shape = [192] + dtype = "float32" + min_val = float("0.209924") + max_val = float("2.11968") + mean = float("1.00713") + std = float("0.372888") + data = None + + +class Program_weight_tensor_parameter_77: + name = "parameter_77" + shape = [192] + dtype = "float32" + min_val = float("1.34422") + max_val = float("2475.49") + mean = float("225.21") + std = float("354.207") + data = None + + +class Program_weight_tensor_parameter_78: + name = "parameter_78" + shape = [192] + dtype = "float32" + min_val = float("-6.01293") + max_val = float("10.0435") + mean = float("1.76063") + std = float("2.57145") + data = None + + +class Program_weight_tensor_parameter_79: + name = "parameter_79" + shape = [192, 576, 1, 1] + dtype = "float32" + min_val = float("-0.882002") + max_val = float("1.10669") + mean = float("0.0224454") + std = float("0.121117") + data = None + + +class Program_weight_tensor_parameter_80: + name = "parameter_80" + shape = [192] + dtype = "float32" + min_val = float("-0.291425") + max_val = float("0.30073") + mean = float("-0.0499517") + std = float("0.0978276") + data = None + + +class Program_weight_tensor_parameter_81: + name = "parameter_81" + shape = [192] + dtype = "float32" + min_val = float("0.537385") + max_val = float("1.371") + mean = float("0.963683") + std = float("0.148266") + data = None + + +class Program_weight_tensor_parameter_82: + name = "parameter_82" + shape = [192] + dtype = "float32" + min_val = float("0.496517") + max_val = float("632.759") + mean = float("26.7516") + std = float("66.2857") + data = None + + +class Program_weight_tensor_parameter_83: + name = "parameter_83" + shape = [192] + dtype = "float32" + min_val = float("-3.74374") + max_val = float("5.0446") + mean = float("-0.10763") + std = float("1.09476") + data = None + + +class Program_weight_tensor_parameter_84: + name = "parameter_84" + shape = [192, 576, 1, 1] + dtype = "float32" + min_val = float("-0.361263") + max_val = float("0.55684") + mean = float("-0.000766228") + std = float("0.0434958") + data = None + + +class Program_weight_tensor_parameter_85: + name = "parameter_85" + shape = [192] + dtype = "float32" + min_val = float("-0.178598") + max_val = float("0.00990263") + mean = float("-0.066553") + std = float("0.0419784") + data = None + + +class Program_weight_tensor_parameter_86: + name = "parameter_86" + shape = [192] + dtype = "float32" + min_val = float("0.807738") + max_val = float("1.28416") + mean = float("1.06766") + std = float("0.0769779") + data = None + + +class Program_weight_tensor_parameter_87: + name = "parameter_87" + shape = [192] + dtype = "float32" + min_val = float("0.312063") + max_val = float("3288.6") + mean = float("109.271") + std = float("356.67") + data = None + + +class Program_weight_tensor_parameter_88: + name = "parameter_88" + shape = [192] + dtype = "float32" + min_val = float("-10.9504") + max_val = float("27.1142") + mean = float("1.25804") + std = float("4.38411") + data = None + + +class Program_weight_tensor_parameter_89: + name = "parameter_89" + shape = [192, 192, 3, 3] + dtype = "float32" + min_val = float("-0.192369") + max_val = float("0.287525") + mean = float("0.00126957") + std = float("0.0175737") + data = None + + +class Program_weight_tensor_parameter_90: + name = "parameter_90" + shape = [192] + dtype = "float32" + min_val = float("-0.448769") + max_val = float("1.22452") + mean = float("0.343163") + std = float("0.274074") + data = None + + +class Program_weight_tensor_parameter_91: + name = "parameter_91" + shape = [192] + dtype = "float32" + min_val = float("0.66215") + max_val = float("1.76023") + mean = float("1.17769") + std = float("0.18141") + data = None + + +class Program_weight_tensor_parameter_92: + name = "parameter_92" + shape = [192] + dtype = "float32" + min_val = float("0.268707") + max_val = float("401.346") + mean = float("18.6622") + std = float("47.8265") + data = None + + +class Program_weight_tensor_parameter_93: + name = "parameter_93" + shape = [192] + dtype = "float32" + min_val = float("-4.94655") + max_val = float("3.78657") + mean = float("-0.322792") + std = float("1.22066") + data = None + + +class Program_weight_tensor_parameter_94: + name = "parameter_94" + shape = [192, 192, 1, 1] + dtype = "float32" + min_val = float("-0.843054") + max_val = float("0.640027") + mean = float("-0.00699637") + std = float("0.0647932") + data = None + + +class Program_weight_tensor_parameter_95: + name = "parameter_95" + shape = [96] + dtype = "float32" + min_val = float("-0.312946") + max_val = float("0.137762") + mean = float("-0.0715004") + std = float("0.0998321") + data = None + + +class Program_weight_tensor_parameter_96: + name = "parameter_96" + shape = [96] + dtype = "float32" + min_val = float("0.802427") + max_val = float("1.19647") + mean = float("0.91096") + std = float("0.0647076") + data = None + + +class Program_weight_tensor_parameter_97: + name = "parameter_97" + shape = [96] + dtype = "float32" + min_val = float("0.0408397") + max_val = float("19.5747") + mean = float("1.60484") + std = float("2.3158") + data = None + + +class Program_weight_tensor_parameter_98: + name = "parameter_98" + shape = [96] + dtype = "float32" + min_val = float("-0.461892") + max_val = float("0.47687") + mean = float("0.0154949") + std = float("0.167054") + data = None + + +class Program_weight_tensor_parameter_99: + name = "parameter_99" + shape = [96, 96, 1, 1] + dtype = "float32" + min_val = float("-0.529392") + max_val = float("0.380502") + mean = float("-0.00309373") + std = float("0.0617256") + data = None + + +class Program_weight_tensor_parameter_100: + name = "parameter_100" + shape = [96] + dtype = "float32" + min_val = float("-0.312946") + max_val = float("0.137762") + mean = float("-0.0715004") + std = float("0.0998321") + data = None + + +class Program_weight_tensor_parameter_101: + name = "parameter_101" + shape = [96] + dtype = "float32" + min_val = float("0.911937") + max_val = float("1.31226") + mean = float("1.07259") + std = float("0.0769765") + data = None + + +class Program_weight_tensor_parameter_102: + name = "parameter_102" + shape = [96] + dtype = "float32" + min_val = float("0.402477") + max_val = float("128.877") + mean = float("14.8668") + std = float("18.5813") + data = None + + +class Program_weight_tensor_parameter_103: + name = "parameter_103" + shape = [96] + dtype = "float32" + min_val = float("-1.72848") + max_val = float("1.26291") + mean = float("-0.0146559") + std = float("0.49536") + data = None + + +class Program_weight_tensor_parameter_104: + name = "parameter_104" + shape = [96, 96, 3, 3] + dtype = "float32" + min_val = float("-0.324026") + max_val = float("0.348086") + mean = float("-0.00120559") + std = float("0.0277991") + data = None + + +class Program_weight_tensor_parameter_105: + name = "parameter_105" + shape = [96] + dtype = "float32" + min_val = float("-0.614623") + max_val = float("0.289827") + mean = float("-0.203526") + std = float("0.146809") + data = None + + +class Program_weight_tensor_parameter_106: + name = "parameter_106" + shape = [96] + dtype = "float32" + min_val = float("0.769218") + max_val = float("1.49312") + mean = float("1.00355") + std = float("0.124304") + data = None + + +class Program_weight_tensor_parameter_107: + name = "parameter_107" + shape = [96] + dtype = "float32" + min_val = float("0.11912") + max_val = float("762.186") + mean = float("47.5519") + std = float("95.6614") + data = None + + +class Program_weight_tensor_parameter_108: + name = "parameter_108" + shape = [96] + dtype = "float32" + min_val = float("-5.37401") + max_val = float("3.5075") + mean = float("-0.0539787") + std = float("1.34494") + data = None + + +class Program_weight_tensor_parameter_109: + name = "parameter_109" + shape = [96, 96, 3, 3] + dtype = "float32" + min_val = float("-0.579172") + max_val = float("0.315085") + mean = float("-0.00196329") + std = float("0.0381453") + data = None + + +class Program_weight_tensor_parameter_110: + name = "parameter_110" + shape = [96] + dtype = "float32" + min_val = float("-0.506462") + max_val = float("0.160219") + mean = float("-0.137571") + std = float("0.104908") + data = None + + +class Program_weight_tensor_parameter_111: + name = "parameter_111" + shape = [96] + dtype = "float32" + min_val = float("0.607824") + max_val = float("1.46238") + mean = float("1.00065") + std = float("0.14535") + data = None + + +class Program_weight_tensor_parameter_112: + name = "parameter_112" + shape = [96] + dtype = "float32" + min_val = float("0.0878712") + max_val = float("185.421") + mean = float("25.2349") + std = float("42.2306") + data = None + + +class Program_weight_tensor_parameter_113: + name = "parameter_113" + shape = [96] + dtype = "float32" + min_val = float("-3.95134") + max_val = float("3.14829") + mean = float("-0.0808873") + std = float("1.16815") + data = None + + +class Program_weight_tensor_parameter_114: + name = "parameter_114" + shape = [96, 288, 1, 1] + dtype = "float32" + min_val = float("-0.630151") + max_val = float("0.391766") + mean = float("-0.00864223") + std = float("0.0740842") + data = None + + +class Program_weight_tensor_parameter_115: + name = "parameter_115" + shape = [96] + dtype = "float32" + min_val = float("-0.136788") + max_val = float("0.0863166") + mean = float("-0.0206311") + std = float("0.0391573") + data = None + + +class Program_weight_tensor_parameter_116: + name = "parameter_116" + shape = [96] + dtype = "float32" + min_val = float("0.817569") + max_val = float("1.42593") + mean = float("0.968003") + std = float("0.0796107") + data = None + + +class Program_weight_tensor_parameter_117: + name = "parameter_117" + shape = [96] + dtype = "float32" + min_val = float("0.0391371") + max_val = float("48.6918") + mean = float("3.67588") + std = float("6.74936") + data = None + + +class Program_weight_tensor_parameter_118: + name = "parameter_118" + shape = [96] + dtype = "float32" + min_val = float("-2.47325") + max_val = float("1.77") + mean = float("-0.00477395") + std = float("0.538411") + data = None + + +class Program_weight_tensor_parameter_119: + name = "parameter_119" + shape = [96, 288, 1, 1] + dtype = "float32" + min_val = float("-0.234435") + max_val = float("0.206171") + mean = float("-0.00167105") + std = float("0.0308459") + data = None + + +class Program_weight_tensor_parameter_120: + name = "parameter_120" + shape = [96] + dtype = "float32" + min_val = float("-0.287587") + max_val = float("0.0866058") + mean = float("-0.074022") + std = float("0.0780074") + data = None + + +class Program_weight_tensor_parameter_121: + name = "parameter_121" + shape = [96] + dtype = "float32" + min_val = float("0.709799") + max_val = float("1.3672") + mean = float("1.07829") + std = float("0.122676") + data = None + + +class Program_weight_tensor_parameter_122: + name = "parameter_122" + shape = [96] + dtype = "float32" + min_val = float("0.422226") + max_val = float("7577.7") + mean = float("183.281") + std = float("771.719") + data = None + + +class Program_weight_tensor_parameter_123: + name = "parameter_123" + shape = [96] + dtype = "float32" + min_val = float("-47.4532") + max_val = float("17.2191") + mean = float("1.10516") + std = float("6.7476") + data = None + + +class Program_weight_tensor_parameter_124: + name = "parameter_124" + shape = [96, 96, 3, 3] + dtype = "float32" + min_val = float("-0.439143") + max_val = float("0.168468") + mean = float("0.000703296") + std = float("0.0186624") + data = None + + +class Program_weight_tensor_parameter_125: + name = "parameter_125" + shape = [96] + dtype = "float32" + min_val = float("-0.633322") + max_val = float("1.61392") + mean = float("0.491304") + std = float("0.537726") + data = None + + +class Program_weight_tensor_parameter_126: + name = "parameter_126" + shape = [96] + dtype = "float32" + min_val = float("-4.28082") + max_val = float("5.42468") + mean = float("1.10549") + std = float("1.63518") + data = None + + +class Program_weight_tensor_parameter_127: + name = "parameter_127" + shape = [96] + dtype = "float32" + min_val = float("0.327059") + max_val = float("2027.99") + mean = float("92.5814") + std = float("311.229") + data = None + + +class Program_weight_tensor_parameter_128: + name = "parameter_128" + shape = [96] + dtype = "float32" + min_val = float("-30.2292") + max_val = float("23.7824") + mean = float("-0.362454") + std = float("5.97943") + data = None + + +class Program_weight_tensor_parameter_129: + name = "parameter_129" + shape = [96, 96, 1, 1] + dtype = "float32" + min_val = float("-5.52821") + max_val = float("4.06716") + mean = float("-0.00507277") + std = float("0.295119") + data = None + + +class Program_weight_tensor_parameter_130: + name = "parameter_130" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_131: + name = "parameter_131" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_132: + name = "parameter_132" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_133: + name = "parameter_133" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_134: + name = "parameter_134" + shape = [48, 48, 1, 1] + dtype = "float32" + min_val = float("-2.54969") + max_val = float("3.93138") + mean = float("-0.00513147") + std = float("0.260174") + data = None + + +class Program_weight_tensor_parameter_135: + name = "parameter_135" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_136: + name = "parameter_136" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_137: + name = "parameter_137" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_138: + name = "parameter_138" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_139: + name = "parameter_139" + shape = [48, 48, 3, 3] + dtype = "float32" + min_val = float("-0.579649") + max_val = float("0.678831") + mean = float("-0.00630613") + std = float("0.0619967") + data = None + + +class Program_weight_tensor_parameter_140: + name = "parameter_140" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_141: + name = "parameter_141" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_142: + name = "parameter_142" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_143: + name = "parameter_143" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_144: + name = "parameter_144" + shape = [48, 48, 3, 3] + dtype = "float32" + min_val = float("-0.605055") + max_val = float("0.89966") + mean = float("-0.00364264") + std = float("0.0877097") + data = None + + +class Program_weight_tensor_parameter_145: + name = "parameter_145" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_146: + name = "parameter_146" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_147: + name = "parameter_147" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_148: + name = "parameter_148" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_149: + name = "parameter_149" + shape = [48, 224, 1, 1] + dtype = "float32" + min_val = float("-1.10653") + max_val = float("1.03117") + mean = float("0.00412353") + std = float("0.0962884") + data = None + + +class Program_weight_tensor_parameter_150: + name = "parameter_150" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_151: + name = "parameter_151" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_152: + name = "parameter_152" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_153: + name = "parameter_153" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_154: + name = "parameter_154" + shape = [48, 224, 1, 1] + dtype = "float32" + min_val = float("-0.887663") + max_val = float("0.948656") + mean = float("-0.00563303") + std = float("0.0922374") + data = None + + +class Program_weight_tensor_parameter_155: + name = "parameter_155" + shape = [96] + dtype = "float32" + min_val = float("-0.256264") + max_val = float("0.401136") + mean = float("0.0421691") + std = float("0.153754") + data = None + + +class Program_weight_tensor_parameter_156: + name = "parameter_156" + shape = [96] + dtype = "float32" + min_val = float("0.531906") + max_val = float("1.45329") + mean = float("0.858895") + std = float("0.143341") + data = None + + +class Program_weight_tensor_parameter_157: + name = "parameter_157" + shape = [96] + dtype = "float32" + min_val = float("0.0438288") + max_val = float("18.0522") + mean = float("1.92002") + std = float("2.75006") + data = None + + +class Program_weight_tensor_parameter_158: + name = "parameter_158" + shape = [96] + dtype = "float32" + min_val = float("-2.88104") + max_val = float("0.977598") + mean = float("-0.172589") + std = float("0.584688") + data = None + + +class Program_weight_tensor_parameter_159: + name = "parameter_159" + shape = [96, 192, 1, 1] + dtype = "float32" + min_val = float("-0.411803") + max_val = float("0.351176") + mean = float("-0.00826409") + std = float("0.059749") + data = None + + +class Program_weight_tensor_parameter_160: + name = "parameter_160" + shape = [192] + dtype = "float32" + min_val = float("-0.402358") + max_val = float("0.303354") + mean = float("-0.0388047") + std = float("0.118356") + data = None + + +class Program_weight_tensor_parameter_161: + name = "parameter_161" + shape = [192] + dtype = "float32" + min_val = float("0.581409") + max_val = float("1.47355") + mean = float("0.943228") + std = float("0.139921") + data = None + + +class Program_weight_tensor_parameter_162: + name = "parameter_162" + shape = [192] + dtype = "float32" + min_val = float("0.0441456") + max_val = float("60.9324") + mean = float("8.8508") + std = float("12.21") + data = None + + +class Program_weight_tensor_parameter_163: + name = "parameter_163" + shape = [192] + dtype = "float32" + min_val = float("-4.25851") + max_val = float("5.16823") + mean = float("-0.479189") + std = float("1.22297") + data = None + + +class Program_weight_tensor_parameter_164: + name = "parameter_164" + shape = [192, 192, 1, 1] + dtype = "float32" + min_val = float("-0.48295") + max_val = float("0.441249") + mean = float("-0.00881297") + std = float("0.0569229") + data = None + + +class Program_weight_tensor_parameter_165: + name = "parameter_165" + shape = [96] + dtype = "float32" + min_val = float("-0.32421") + max_val = float("0.266352") + mean = float("-0.00747895") + std = float("0.12613") + data = None + + +class Program_weight_tensor_parameter_166: + name = "parameter_166" + shape = [96] + dtype = "float32" + min_val = float("0.519219") + max_val = float("1.12638") + mean = float("0.877614") + std = float("0.0891218") + data = None + + +class Program_weight_tensor_parameter_167: + name = "parameter_167" + shape = [96] + dtype = "float32" + min_val = float("0.123396") + max_val = float("31.4165") + mean = float("3.14043") + std = float("5.35112") + data = None + + +class Program_weight_tensor_parameter_168: + name = "parameter_168" + shape = [96] + dtype = "float32" + min_val = float("-1.66228") + max_val = float("0.741603") + mean = float("-0.163404") + std = float("0.457137") + data = None + + +class Program_weight_tensor_parameter_169: + name = "parameter_169" + shape = [96, 96, 1, 1] + dtype = "float32" + min_val = float("-0.58867") + max_val = float("0.370978") + mean = float("-0.0142256") + std = float("0.0759194") + data = None + + +class Program_weight_tensor_parameter_170: + name = "parameter_170" + shape = [96] + dtype = "float32" + min_val = float("-0.32421") + max_val = float("0.266352") + mean = float("-0.00747895") + std = float("0.12613") + data = None + + +class Program_weight_tensor_parameter_171: + name = "parameter_171" + shape = [96] + dtype = "float32" + min_val = float("0.827742") + max_val = float("1.27498") + mean = float("1.01544") + std = float("0.0788542") + data = None + + +class Program_weight_tensor_parameter_172: + name = "parameter_172" + shape = [96] + dtype = "float32" + min_val = float("1.01171") + max_val = float("187.106") + mean = float("19.1738") + std = float("26.4103") + data = None + + +class Program_weight_tensor_parameter_173: + name = "parameter_173" + shape = [96] + dtype = "float32" + min_val = float("-4.39224") + max_val = float("2.57957") + mean = float("-0.261903") + std = float("1.22068") + data = None + + +class Program_weight_tensor_parameter_174: + name = "parameter_174" + shape = [96, 96, 3, 3] + dtype = "float32" + min_val = float("-0.285029") + max_val = float("0.180291") + mean = float("-0.00280467") + std = float("0.0298423") + data = None + + +class Program_weight_tensor_parameter_175: + name = "parameter_175" + shape = [96] + dtype = "float32" + min_val = float("-0.604961") + max_val = float("0.351632") + mean = float("-0.139031") + std = float("0.191536") + data = None + + +class Program_weight_tensor_parameter_176: + name = "parameter_176" + shape = [96] + dtype = "float32" + min_val = float("0.694254") + max_val = float("1.60003") + mean = float("1.02657") + std = float("0.199137") + data = None + + +class Program_weight_tensor_parameter_177: + name = "parameter_177" + shape = [96] + dtype = "float32" + min_val = float("0.921789") + max_val = float("121.405") + mean = float("17.5672") + std = float("20.9759") + data = None + + +class Program_weight_tensor_parameter_178: + name = "parameter_178" + shape = [96] + dtype = "float32" + min_val = float("-6.75359") + max_val = float("4.66068") + mean = float("-0.633152") + std = float("2.18349") + data = None + + +class Program_weight_tensor_parameter_179: + name = "parameter_179" + shape = [96, 96, 3, 3] + dtype = "float32" + min_val = float("-0.388441") + max_val = float("0.252028") + mean = float("-0.00574042") + std = float("0.040071") + data = None + + +class Program_weight_tensor_parameter_180: + name = "parameter_180" + shape = [96] + dtype = "float32" + min_val = float("-0.608761") + max_val = float("0.620147") + mean = float("-0.125889") + std = float("0.271963") + data = None + + +class Program_weight_tensor_parameter_181: + name = "parameter_181" + shape = [96] + dtype = "float32" + min_val = float("0.298227") + max_val = float("1.44494") + mean = float("0.958324") + std = float("0.185291") + data = None + + +class Program_weight_tensor_parameter_182: + name = "parameter_182" + shape = [96] + dtype = "float32" + min_val = float("0.76094") + max_val = float("166.584") + mean = float("11.544") + std = float("18.8008") + data = None + + +class Program_weight_tensor_parameter_183: + name = "parameter_183" + shape = [96] + dtype = "float32" + min_val = float("-2.29308") + max_val = float("3.45527") + mean = float("0.0881272") + std = float("0.747968") + data = None + + +class Program_weight_tensor_parameter_184: + name = "parameter_184" + shape = [96, 448, 1, 1] + dtype = "float32" + min_val = float("-0.425653") + max_val = float("0.42047") + mean = float("-0.00305354") + std = float("0.0471989") + data = None + + +class Program_weight_tensor_parameter_185: + name = "parameter_185" + shape = [96] + dtype = "float32" + min_val = float("-0.186827") + max_val = float("0.350873") + mean = float("0.028774") + std = float("0.10278") + data = None + + +class Program_weight_tensor_parameter_186: + name = "parameter_186" + shape = [96] + dtype = "float32" + min_val = float("0.781771") + max_val = float("1.17641") + mean = float("0.968082") + std = float("0.0795668") + data = None + + +class Program_weight_tensor_parameter_187: + name = "parameter_187" + shape = [96] + dtype = "float32" + min_val = float("0.0722287") + max_val = float("14.7507") + mean = float("1.8866") + std = float("2.13087") + data = None + + +class Program_weight_tensor_parameter_188: + name = "parameter_188" + shape = [96] + dtype = "float32" + min_val = float("-0.877989") + max_val = float("1.09596") + mean = float("0.0429838") + std = float("0.401404") + data = None + + +class Program_weight_tensor_parameter_189: + name = "parameter_189" + shape = [96, 448, 1, 1] + dtype = "float32" + min_val = float("-0.310367") + max_val = float("0.343865") + mean = float("9.32988e-05") + std = float("0.0258014") + data = None + + +class Program_weight_tensor_parameter_190: + name = "parameter_190" + shape = [192] + dtype = "float32" + min_val = float("-0.394429") + max_val = float("0.151118") + mean = float("-0.157936") + std = float("0.0947469") + data = None + + +class Program_weight_tensor_parameter_191: + name = "parameter_191" + shape = [192] + dtype = "float32" + min_val = float("0.569579") + max_val = float("1.22394") + mean = float("0.88452") + std = float("0.113121") + data = None + + +class Program_weight_tensor_parameter_192: + name = "parameter_192" + shape = [192] + dtype = "float32" + min_val = float("0.304946") + max_val = float("79.6404") + mean = float("7.91483") + std = float("11.2584") + data = None + + +class Program_weight_tensor_parameter_193: + name = "parameter_193" + shape = [192] + dtype = "float32" + min_val = float("-1.88539") + max_val = float("1.08967") + mean = float("-0.161356") + std = float("0.529325") + data = None + + +class Program_weight_tensor_parameter_194: + name = "parameter_194" + shape = [192, 384, 1, 1] + dtype = "float32" + min_val = float("-0.241863") + max_val = float("0.33427") + mean = float("-0.00303591") + std = float("0.0345329") + data = None + + +class Program_weight_tensor_parameter_195: + name = "parameter_195" + shape = [384] + dtype = "float32" + min_val = float("-0.516638") + max_val = float("0.212009") + mean = float("-0.165994") + std = float("0.0918108") + data = None + + +class Program_weight_tensor_parameter_196: + name = "parameter_196" + shape = [384] + dtype = "float32" + min_val = float("0.755509") + max_val = float("1.52142") + mean = float("1.02494") + std = float("0.104985") + data = None + + +class Program_weight_tensor_parameter_197: + name = "parameter_197" + shape = [384] + dtype = "float32" + min_val = float("0.264865") + max_val = float("62.3137") + mean = float("5.47285") + std = float("7.11494") + data = None + + +class Program_weight_tensor_parameter_198: + name = "parameter_198" + shape = [384] + dtype = "float32" + min_val = float("-1.06868") + max_val = float("1.47406") + mean = float("0.0944651") + std = float("0.417393") + data = None + + +class Program_weight_tensor_parameter_199: + name = "parameter_199" + shape = [384, 384, 1, 1] + dtype = "float32" + min_val = float("-0.767911") + max_val = float("0.955942") + mean = float("-0.00042618") + std = float("0.0605413") + data = None + + +class Program_weight_tensor_parameter_200: + name = "parameter_200" + shape = [192] + dtype = "float32" + min_val = float("-0.616241") + max_val = float("0.307741") + mean = float("-0.140768") + std = float("0.160981") + data = None + + +class Program_weight_tensor_parameter_201: + name = "parameter_201" + shape = [192] + dtype = "float32" + min_val = float("0.614779") + max_val = float("2.29244") + mean = float("1.04452") + std = float("0.238895") + data = None + + +class Program_weight_tensor_parameter_202: + name = "parameter_202" + shape = [192] + dtype = "float32" + min_val = float("10.4941") + max_val = float("18085.8") + mean = float("505.648") + std = float("1417.89") + data = None + + +class Program_weight_tensor_parameter_203: + name = "parameter_203" + shape = [192] + dtype = "float32" + min_val = float("-437.747") + max_val = float("111.616") + mean = float("-5.83122") + std = float("63.5469") + data = None + + +class Program_weight_tensor_parameter_204: + name = "parameter_204" + shape = [192, 768, 1, 1] + dtype = "float32" + min_val = float("-1.76883") + max_val = float("1.37873") + mean = float("-0.000800037") + std = float("0.0827842") + data = None + + +class Program_weight_tensor_parameter_205: + name = "parameter_205" + shape = [192] + dtype = "float32" + min_val = float("-0.230721") + max_val = float("0.17275") + mean = float("-0.00108227") + std = float("0.0695531") + data = None + + +class Program_weight_tensor_parameter_206: + name = "parameter_206" + shape = [192] + dtype = "float32" + min_val = float("0.0454979") + max_val = float("1.63561") + mean = float("0.903783") + std = float("0.193184") + data = None + + +class Program_weight_tensor_parameter_207: + name = "parameter_207" + shape = [192] + dtype = "float32" + min_val = float("0.67978") + max_val = float("210.067") + mean = float("14.1904") + std = float("26.2457") + data = None + + +class Program_weight_tensor_parameter_208: + name = "parameter_208" + shape = [192] + dtype = "float32" + min_val = float("-5.80765") + max_val = float("9.72675") + mean = float("0.00297098") + std = float("1.58559") + data = None + + +class Program_weight_tensor_parameter_209: + name = "parameter_209" + shape = [192, 192, 1, 1] + dtype = "float32" + min_val = float("-1.20605") + max_val = float("1.74847") + mean = float("-0.00163615") + std = float("0.129928") + data = None + + +class Program_weight_tensor_parameter_210: + name = "parameter_210" + shape = [192] + dtype = "float32" + min_val = float("-0.230721") + max_val = float("0.17275") + mean = float("-0.00108227") + std = float("0.0695531") + data = None + + +class Program_weight_tensor_parameter_211: + name = "parameter_211" + shape = [192] + dtype = "float32" + min_val = float("0.569895") + max_val = float("1.79557") + mean = float("1.07485") + std = float("0.196604") + data = None + + +class Program_weight_tensor_parameter_212: + name = "parameter_212" + shape = [192] + dtype = "float32" + min_val = float("4.98029") + max_val = float("317.166") + mean = float("48.0062") + std = float("48.6364") + data = None + + +class Program_weight_tensor_parameter_213: + name = "parameter_213" + shape = [192] + dtype = "float32" + min_val = float("-19.2376") + max_val = float("10.764") + mean = float("0.10151") + std = float("3.56058") + data = None + + +class Program_weight_tensor_parameter_214: + name = "parameter_214" + shape = [192, 192, 3, 3] + dtype = "float32" + min_val = float("-0.774637") + max_val = float("0.806049") + mean = float("-0.000546968") + std = float("0.0564541") + data = None + + +class Program_weight_tensor_parameter_215: + name = "parameter_215" + shape = [192] + dtype = "float32" + min_val = float("-0.380216") + max_val = float("0.251529") + mean = float("-0.104168") + std = float("0.115141") + data = None + + +class Program_weight_tensor_parameter_216: + name = "parameter_216" + shape = [192] + dtype = "float32" + min_val = float("0.592269") + max_val = float("2.17457") + mean = float("1.07796") + std = float("0.256684") + data = None + + +class Program_weight_tensor_parameter_217: + name = "parameter_217" + shape = [192] + dtype = "float32" + min_val = float("7.45182") + max_val = float("880.126") + mean = float("101.039") + std = float("169.899") + data = None + + +class Program_weight_tensor_parameter_218: + name = "parameter_218" + shape = [192] + dtype = "float32" + min_val = float("-7.11415") + max_val = float("22.4907") + mean = float("1.0794") + std = float("5.60557") + data = None + + +class Program_weight_tensor_parameter_219: + name = "parameter_219" + shape = [192, 192, 3, 3] + dtype = "float32" + min_val = float("-0.444042") + max_val = float("0.544237") + mean = float("0.0017251") + std = float("0.0526133") + data = None + + +class Program_weight_tensor_parameter_220: + name = "parameter_220" + shape = [192] + dtype = "float32" + min_val = float("-0.459433") + max_val = float("0.935566") + mean = float("-0.0972266") + std = float("0.183031") + data = None + + +class Program_weight_tensor_parameter_221: + name = "parameter_221" + shape = [192] + dtype = "float32" + min_val = float("0.284426") + max_val = float("2.55063") + mean = float("1.03769") + std = float("0.374772") + data = None + + +class Program_weight_tensor_parameter_222: + name = "parameter_222" + shape = [192] + dtype = "float32" + min_val = float("3.39096") + max_val = float("11291.5") + mean = float("731.14") + std = float("1201.61") + data = None + + +class Program_weight_tensor_parameter_223: + name = "parameter_223" + shape = [192] + dtype = "float32" + min_val = float("-270.97") + max_val = float("97.1423") + mean = float("-41.344") + std = float("52.4511") + data = None + + +class Program_weight_tensor_parameter_224: + name = "parameter_224" + shape = [192, 512, 1, 1] + dtype = "float32" + min_val = float("-3.30787") + max_val = float("4.96846") + mean = float("-0.000475496") + std = float("0.140827") + data = None + + +class Program_weight_tensor_parameter_225: + name = "parameter_225" + shape = [192] + dtype = "float32" + min_val = float("-0.234621") + max_val = float("0.240617") + mean = float("-0.0279227") + std = float("0.0653389") + data = None + + +class Program_weight_tensor_parameter_226: + name = "parameter_226" + shape = [192] + dtype = "float32" + min_val = float("0.734845") + max_val = float("1.42978") + mean = float("0.972282") + std = float("0.109607") + data = None + + +class Program_weight_tensor_parameter_227: + name = "parameter_227" + shape = [192] + dtype = "float32" + min_val = float("2.0343") + max_val = float("690.09") + mean = float("108.905") + std = float("129.097") + data = None + + +class Program_weight_tensor_parameter_228: + name = "parameter_228" + shape = [192] + dtype = "float32" + min_val = float("-55.381") + max_val = float("47.6224") + mean = float("0.413931") + std = float("20.044") + data = None + + +class Program_weight_tensor_parameter_229: + name = "parameter_229" + shape = [192, 512, 1, 1] + dtype = "float32" + min_val = float("-1.19672") + max_val = float("1.2509") + mean = float("-0.000508045") + std = float("0.0731395") + data = None + + +class Program_weight_tensor_parameter_230: + name = "parameter_230" + shape = [512] + dtype = "float32" + min_val = float("-1.63053e-06") + max_val = float("1.65415e-06") + mean = float("1.33836e-08") + std = float("2.89209e-07") + data = None + + +class Program_weight_tensor_parameter_231: + name = "parameter_231" + shape = [512] + dtype = "float32" + min_val = float("-5.66523") + max_val = float("14.0713") + mean = float("0.760613") + std = float("0.81466") + data = None + + +class Program_weight_tensor_parameter_232: + name = "parameter_232" + shape = [512] + dtype = "float32" + min_val = float("-0.259005") + max_val = float("0.417562") + mean = float("0.00391291") + std = float("0.068938") + data = None + + +class Program_weight_tensor_parameter_233: + name = "parameter_233" + shape = [2048, 512] + dtype = "float32" + min_val = float("-2.4938") + max_val = float("4.98887") + mean = float("0.000798087") + std = float("0.0597693") + data = None + + +class Program_weight_tensor_parameter_234: + name = "parameter_234" + shape = [2048] + dtype = "float32" + min_val = float("-0.350064") + max_val = float("0.381906") + mean = float("0.00123002") + std = float("0.0475218") + data = None + + +class Program_weight_tensor_parameter_235: + name = "parameter_235" + shape = [512, 2048] + dtype = "float32" + min_val = float("-3.10217") + max_val = float("2.23195") + mean = float("-7.5809e-05") + std = float("0.05166") + data = None + + +class Program_weight_tensor_parameter_236: + name = "parameter_236" + shape = [512] + dtype = "float32" + min_val = float("-0.560329") + max_val = float("0.4174") + mean = float("-0.000139144") + std = float("0.0911645") + data = None + + +class Program_weight_tensor_parameter_237: + name = "parameter_237" + shape = [512] + dtype = "float32" + min_val = float("-0.695101") + max_val = float("5.25296") + mean = float("0.777947") + std = float("0.336559") + data = None + + +class Program_weight_tensor_parameter_238: + name = "parameter_238" + shape = [512] + dtype = "float32" + min_val = float("-0.204238") + max_val = float("0.0957857") + mean = float("-0.00074094") + std = float("0.0231454") + data = None + + +class Program_weight_tensor_parameter_239: + name = "parameter_239" + shape = [512, 512] + dtype = "float32" + min_val = float("-0.994577") + max_val = float("0.821254") + mean = float("-0.000119189") + std = float("0.0523064") + data = None + + +class Program_weight_tensor_parameter_240: + name = "parameter_240" + shape = [512] + dtype = "float32" + min_val = float("-0.0606877") + max_val = float("0.0580723") + mean = float("-0.000150446") + std = float("0.018678") + data = None + + +class Program_weight_tensor_parameter_241: + name = "parameter_241" + shape = [512] + dtype = "float32" + min_val = float("0.231617") + max_val = float("0.929702") + mean = float("0.765052") + std = float("0.0383074") + data = None + + +class Program_weight_tensor_parameter_242: + name = "parameter_242" + shape = [512] + dtype = "float32" + min_val = float("-0.0632024") + max_val = float("0.0854736") + mean = float("0.000981029") + std = float("0.018734") + data = None + + +class Program_weight_tensor_parameter_243: + name = "parameter_243" + shape = [2048, 512] + dtype = "float32" + min_val = float("-0.161155") + max_val = float("0.180583") + mean = float("0.00010141") + std = float("0.0117882") + data = None + + +class Program_weight_tensor_parameter_244: + name = "parameter_244" + shape = [2048] + dtype = "float32" + min_val = float("-0.0422243") + max_val = float("0.0440755") + mean = float("0.000234211") + std = float("0.0200712") + data = None + + +class Program_weight_tensor_parameter_245: + name = "parameter_245" + shape = [512, 2048] + dtype = "float32" + min_val = float("-0.105924") + max_val = float("0.0957286") + mean = float("-2.7738e-05") + std = float("0.019841") + data = None + + +class Program_weight_tensor_parameter_246: + name = "parameter_246" + shape = [512] + dtype = "float32" + min_val = float("-0.0327103") + max_val = float("0.028206") + mean = float("0.000161737") + std = float("0.00908583") + data = None + + +class Program_weight_tensor_parameter_247: + name = "parameter_247" + shape = [512] + dtype = "float32" + min_val = float("0.611342") + max_val = float("0.948089") + mean = float("0.764828") + std = float("0.0221832") + data = None + + +class Program_weight_tensor_parameter_248: + name = "parameter_248" + shape = [512] + dtype = "float32" + min_val = float("-0.0232558") + max_val = float("0.0275779") + mean = float("0.000316595") + std = float("0.00647804") + data = None + + +class Program_weight_tensor_parameter_249: + name = "parameter_249" + shape = [512, 512] + dtype = "float32" + min_val = float("-0.121582") + max_val = float("0.12179") + mean = float("-6.34088e-05") + std = float("0.0344149") + data = None + + +class Program_weight_tensor_parameter_250: + name = "parameter_250" + shape = [512] + dtype = "float32" + min_val = float("-0.0139646") + max_val = float("0.013529") + mean = float("0.000176344") + std = float("0.0047541") + data = None + + +class Program_weight_tensor_parameter_251: + name = "parameter_251" + shape = [512] + dtype = "float32" + min_val = float("0.710162") + max_val = float("0.815042") + mean = float("0.765041") + std = float("0.0109646") + data = None + + +class Program_weight_tensor_parameter_252: + name = "parameter_252" + shape = [512] + dtype = "float32" + min_val = float("-0.0435377") + max_val = float("0.0277476") + mean = float("-0.000388171") + std = float("0.0119235") + data = None + + +class Program_weight_tensor_parameter_253: + name = "parameter_253" + shape = [2048, 512] + dtype = "float32" + min_val = float("-0.0655134") + max_val = float("0.0506251") + mean = float("-2.00607e-05") + std = float("0.0101169") + data = None + + +class Program_weight_tensor_parameter_254: + name = "parameter_254" + shape = [2048] + dtype = "float32" + min_val = float("-0.0364798") + max_val = float("0.0381679") + mean = float("0.000375744") + std = float("0.0197336") + data = None + + +class Program_weight_tensor_parameter_255: + name = "parameter_255" + shape = [512, 2048] + dtype = "float32" + min_val = float("-0.045739") + max_val = float("0.045359") + mean = float("-2.81896e-05") + std = float("0.0195896") + data = None + + +class Program_weight_tensor_parameter_256: + name = "parameter_256" + shape = [512] + dtype = "float32" + min_val = float("-0.0141306") + max_val = float("0.0113124") + mean = float("6.15386e-05") + std = float("0.00464767") + data = None + + +class Program_weight_tensor_parameter_257: + name = "parameter_257" + shape = [512] + dtype = "float32" + min_val = float("0.711393") + max_val = float("0.813025") + mean = float("0.764938") + std = float("0.0107866") + data = None + + +class Program_weight_tensor_parameter_258: + name = "parameter_258" + shape = [512] + dtype = "float32" + min_val = float("-0.0150515") + max_val = float("0.0208357") + mean = float("0.000247773") + std = float("0.00519934") + data = None + + +class Program_weight_tensor_parameter_259: + name = "parameter_259" + shape = [512, 512] + dtype = "float32" + min_val = float("-0.082851") + max_val = float("0.081632") + mean = float("-6.9631e-05") + std = float("0.0340312") + data = None + + +class Program_weight_tensor_parameter_260: + name = "parameter_260" + shape = [512] + dtype = "float32" + min_val = float("-0.0143427") + max_val = float("0.014045") + mean = float("0.000154787") + std = float("0.00470679") + data = None + + +class Program_weight_tensor_parameter_261: + name = "parameter_261" + shape = [512] + dtype = "float32" + min_val = float("0.711212") + max_val = float("0.803394") + mean = float("0.765047") + std = float("0.0107305") + data = None + + +class Program_weight_tensor_parameter_262: + name = "parameter_262" + shape = [512] + dtype = "float32" + min_val = float("-0.0314068") + max_val = float("0.0294973") + mean = float("0.000294591") + std = float("0.0114283") + data = None + + +class Program_weight_tensor_parameter_263: + name = "parameter_263" + shape = [2048, 512] + dtype = "float32" + min_val = float("-0.0362987") + max_val = float("0.0355755") + mean = float("1.57498e-05") + std = float("0.0100509") + data = None + + +class Program_weight_tensor_parameter_264: + name = "parameter_264" + shape = [2048] + dtype = "float32" + min_val = float("-0.036483") + max_val = float("0.0359683") + mean = float("0.000458059") + std = float("0.0197331") + data = None + + +class Program_weight_tensor_parameter_265: + name = "parameter_265" + shape = [512, 2048] + dtype = "float32" + min_val = float("-0.0428148") + max_val = float("0.0420146") + mean = float("-2.82709e-05") + std = float("0.019582") + data = None + + +class Program_weight_tensor_parameter_266: + name = "parameter_266" + shape = [512] + dtype = "float32" + min_val = float("-0.0134312") + max_val = float("0.0139554") + mean = float("-0.000103076") + std = float("0.0047426") + data = None + + +class Program_weight_tensor_parameter_267: + name = "parameter_267" + shape = [512] + dtype = "float32" + min_val = float("0.705604") + max_val = float("0.803128") + mean = float("0.764935") + std = float("0.0109516") + data = None + + +class Program_weight_tensor_parameter_268: + name = "parameter_268" + shape = [512] + dtype = "float32" + min_val = float("-0.0426247") + max_val = float("0.0337448") + mean = float("-1.16657e-05") + std = float("0.0122017") + data = None + + +class Program_weight_tensor_parameter_269: + name = "parameter_269" + shape = [512, 512] + dtype = "float32" + min_val = float("-0.140401") + max_val = float("0.170011") + mean = float("-6.86181e-05") + std = float("0.0348799") + data = None + + +class Program_weight_tensor_parameter_270: + name = "parameter_270" + shape = [512] + dtype = "float32" + min_val = float("-4.76834") + max_val = float("-0.173181") + mean = float("-2.26859") + std = float("0.762335") + data = None + + +class Program_weight_tensor_parameter_271: + name = "parameter_271" + shape = [512] + dtype = "float32" + min_val = float("2.03038") + max_val = float("5.28381") + mean = float("3.71892") + std = float("0.500243") + data = None + + +class Program_weight_tensor_parameter_272: + name = "parameter_272" + shape = [512] + dtype = "float32" + min_val = float("0.0122129") + max_val = float("463.858") + mean = float("9.7792") + std = float("36.2264") + data = None + + +class Program_weight_tensor_parameter_273: + name = "parameter_273" + shape = [512] + dtype = "float32" + min_val = float("-1.34075") + max_val = float("1.7229") + mean = float("-0.0325411") + std = float("0.229985") + data = None + + +class Program_weight_tensor_parameter_274: + name = "parameter_274" + shape = [512, 384, 1, 1] + dtype = "float32" + min_val = float("-0.691029") + max_val = float("0.813911") + mean = float("-0.00166457") + std = float("0.0345452") + data = None + + +class Program_weight_tensor_parameter_275: + name = "parameter_275" + shape = [384] + dtype = "float32" + min_val = float("-0.0413051") + max_val = float("0.0288542") + mean = float("-0.00256086") + std = float("0.00614563") + data = None + + +class Program_weight_tensor_parameter_276: + name = "parameter_276" + shape = [384, 384, 1, 1] + dtype = "float32" + min_val = float("-0.22733") + max_val = float("0.175369") + mean = float("-0.000704235") + std = float("0.00512356") + data = None + + +class Program_weight_tensor_parameter_277: + name = "parameter_277" + shape = [192] + dtype = "float32" + min_val = float("-2.40363") + max_val = float("2.31885") + mean = float("-0.263388") + std = float("0.515093") + data = None + + +class Program_weight_tensor_parameter_278: + name = "parameter_278" + shape = [192] + dtype = "float32" + min_val = float("0.147659") + max_val = float("2.09538") + mean = float("0.46584") + std = float("0.324063") + data = None + + +class Program_weight_tensor_parameter_279: + name = "parameter_279" + shape = [192] + dtype = "float32" + min_val = float("0.000201702") + max_val = float("83.793") + mean = float("1.12327") + std = float("6.2137") + data = None + + +class Program_weight_tensor_parameter_280: + name = "parameter_280" + shape = [192] + dtype = "float32" + min_val = float("-3.18981") + max_val = float("0.719387") + mean = float("-0.0137013") + std = float("0.311806") + data = None + + +class Program_weight_tensor_parameter_281: + name = "parameter_281" + shape = [192, 192, 1, 1] + dtype = "float32" + min_val = float("-0.346072") + max_val = float("0.467647") + mean = float("0.00059567") + std = float("0.0213881") + data = None + + +class Program_weight_tensor_parameter_282: + name = "parameter_282" + shape = [192] + dtype = "float32" + min_val = float("-2.40359") + max_val = float("2.31826") + mean = float("-0.263559") + std = float("0.515014") + data = None + + +class Program_weight_tensor_parameter_283: + name = "parameter_283" + shape = [192] + dtype = "float32" + min_val = float("0.658206") + max_val = float("2.82848") + mean = float("1.3408") + std = float("0.43093") + data = None + + +class Program_weight_tensor_parameter_284: + name = "parameter_284" + shape = [192] + dtype = "float32" + min_val = float("0.0213317") + max_val = float("176.9") + mean = float("11.5476") + std = float("23.6055") + data = None + + +class Program_weight_tensor_parameter_285: + name = "parameter_285" + shape = [192] + dtype = "float32" + min_val = float("-5.09019") + max_val = float("3.18034") + mean = float("-0.0144") + std = float("1.10603") + data = None + + +class Program_weight_tensor_parameter_286: + name = "parameter_286" + shape = [192, 192, 3, 3] + dtype = "float32" + min_val = float("-0.165291") + max_val = float("0.29268") + mean = float("-8.8285e-05") + std = float("0.0142077") + data = None + + +class Program_weight_tensor_parameter_287: + name = "parameter_287" + shape = [192] + dtype = "float32" + min_val = float("-3.28552") + max_val = float("1.08343") + mean = float("-1.32577") + std = float("0.63018") + data = None + + +class Program_weight_tensor_parameter_288: + name = "parameter_288" + shape = [192] + dtype = "float32" + min_val = float("0.496691") + max_val = float("1.92652") + mean = float("1.15367") + std = float("0.223986") + data = None + + +class Program_weight_tensor_parameter_289: + name = "parameter_289" + shape = [192] + dtype = "float32" + min_val = float("0.134184") + max_val = float("1638.26") + mean = float("20.6166") + std = float("118.192") + data = None + + +class Program_weight_tensor_parameter_290: + name = "parameter_290" + shape = [192] + dtype = "float32" + min_val = float("-1.78132") + max_val = float("2.85271") + mean = float("0.0248559") + std = float("0.545498") + data = None + + +class Program_weight_tensor_parameter_291: + name = "parameter_291" + shape = [192, 192, 3, 3] + dtype = "float32" + min_val = float("-0.379139") + max_val = float("0.44934") + mean = float("-0.000220283") + std = float("0.0141957") + data = None + + +class Program_weight_tensor_parameter_292: + name = "parameter_292" + shape = [192] + dtype = "float32" + min_val = float("-3.81338") + max_val = float("3.66498") + mean = float("-0.650403") + std = float("0.909259") + data = None + + +class Program_weight_tensor_parameter_293: + name = "parameter_293" + shape = [192] + dtype = "float32" + min_val = float("0.688655") + max_val = float("4.20872") + mean = float("1.51654") + std = float("0.440912") + data = None + + +class Program_weight_tensor_parameter_294: + name = "parameter_294" + shape = [192] + dtype = "float32" + min_val = float("0.0287672") + max_val = float("349.627") + mean = float("7.75443") + std = float("31.5038") + data = None + + +class Program_weight_tensor_parameter_295: + name = "parameter_295" + shape = [192] + dtype = "float32" + min_val = float("-2.21739") + max_val = float("4.32269") + mean = float("0.0619236") + std = float("0.529319") + data = None + + +class Program_weight_tensor_parameter_296: + name = "parameter_296" + shape = [192, 384, 1, 1] + dtype = "float32" + min_val = float("-0.61869") + max_val = float("0.697654") + mean = float("-0.000868431") + std = float("0.0307954") + data = None + + +class Program_weight_tensor_parameter_297: + name = "parameter_297" + shape = [192] + dtype = "float32" + min_val = float("-2.9224") + max_val = float("0.906953") + mean = float("-0.416892") + std = float("0.669075") + data = None + + +class Program_weight_tensor_parameter_298: + name = "parameter_298" + shape = [192] + dtype = "float32" + min_val = float("0.779086") + max_val = float("3.2699") + mean = float("1.44797") + std = float("0.409923") + data = None + + +class Program_weight_tensor_parameter_299: + name = "parameter_299" + shape = [192] + dtype = "float32" + min_val = float("0.00121816") + max_val = float("70.9365") + mean = float("3.07187") + std = float("7.14326") + data = None + + +class Program_weight_tensor_parameter_300: + name = "parameter_300" + shape = [192] + dtype = "float32" + min_val = float("-1.29601") + max_val = float("1.85357") + mean = float("0.0464032") + std = float("0.334498") + data = None + + +class Program_weight_tensor_parameter_301: + name = "parameter_301" + shape = [192, 384, 1, 1] + dtype = "float32" + min_val = float("-0.334798") + max_val = float("0.231374") + mean = float("-0.00102603") + std = float("0.0226155") + data = None + + +class Program_weight_tensor_parameter_302: + name = "parameter_302" + shape = [384] + dtype = "float32" + min_val = float("-2.76292") + max_val = float("1.1604") + mean = float("-0.674221") + std = float("0.494111") + data = None + + +class Program_weight_tensor_parameter_303: + name = "parameter_303" + shape = [384] + dtype = "float32" + min_val = float("0.411399") + max_val = float("1.90232") + mean = float("0.859858") + std = float("0.234777") + data = None + + +class Program_weight_tensor_parameter_304: + name = "parameter_304" + shape = [384] + dtype = "float32" + min_val = float("0.01724") + max_val = float("194.634") + mean = float("8.38296") + std = float("18.9123") + data = None + + +class Program_weight_tensor_parameter_305: + name = "parameter_305" + shape = [384] + dtype = "float32" + min_val = float("-7.87774") + max_val = float("4.23078") + mean = float("0.0585295") + std = float("1.22032") + data = None + + +class Program_weight_tensor_parameter_306: + name = "parameter_306" + shape = [384, 256, 3, 3] + dtype = "float32" + min_val = float("-0.229048") + max_val = float("0.29779") + mean = float("-0.000167159") + std = float("0.011214") + data = None + + +class Program_weight_tensor_parameter_307: + name = "parameter_307" + shape = [256] + dtype = "float32" + min_val = float("-2.80703") + max_val = float("1.38327") + mean = float("-0.942732") + std = float("0.615805") + data = None + + +class Program_weight_tensor_parameter_308: + name = "parameter_308" + shape = [256] + dtype = "float32" + min_val = float("0.366017") + max_val = float("1.6916") + mean = float("0.920091") + std = float("0.171174") + data = None + + +class Program_weight_tensor_parameter_309: + name = "parameter_309" + shape = [256] + dtype = "float32" + min_val = float("0.0113925") + max_val = float("32.4245") + mean = float("2.49178") + std = float("4.31629") + data = None + + +class Program_weight_tensor_parameter_310: + name = "parameter_310" + shape = [256] + dtype = "float32" + min_val = float("-2.54016") + max_val = float("1.49565") + mean = float("-0.0678992") + std = float("0.597361") + data = None + + +class Program_weight_tensor_parameter_311: + name = "parameter_311" + shape = [256, 192, 1, 1] + dtype = "float32" + min_val = float("-0.459227") + max_val = float("0.421982") + mean = float("-0.00172613") + std = float("0.0335115") + data = None + + +class Program_weight_tensor_parameter_312: + name = "parameter_312" + shape = [192] + dtype = "float32" + min_val = float("-0.110164") + max_val = float("0.0498288") + mean = float("-0.00603749") + std = float("0.0136978") + data = None + + +class Program_weight_tensor_parameter_313: + name = "parameter_313" + shape = [192, 192, 1, 1] + dtype = "float32" + min_val = float("-0.390404") + max_val = float("0.254318") + mean = float("-0.00448787") + std = float("0.0179257") + data = None + + +class Program_weight_tensor_parameter_314: + name = "parameter_314" + shape = [96] + dtype = "float32" + min_val = float("-2.29657") + max_val = float("0.851176") + mean = float("-0.0598443") + std = float("0.525626") + data = None + + +class Program_weight_tensor_parameter_315: + name = "parameter_315" + shape = [96] + dtype = "float32" + min_val = float("-0.124334") + max_val = float("2.27868") + mean = float("0.320449") + std = float("0.351576") + data = None + + +class Program_weight_tensor_parameter_316: + name = "parameter_316" + shape = [96] + dtype = "float32" + min_val = float("0.000119426") + max_val = float("0.757895") + mean = float("0.0421386") + std = float("0.10326") + data = None + + +class Program_weight_tensor_parameter_317: + name = "parameter_317" + shape = [96] + dtype = "float32" + min_val = float("-0.255899") + max_val = float("0.229185") + mean = float("0.00350536") + std = float("0.072908") + data = None + + +class Program_weight_tensor_parameter_318: + name = "parameter_318" + shape = [96, 96, 1, 1] + dtype = "float32" + min_val = float("-0.156216") + max_val = float("0.179163") + mean = float("-5.52528e-05") + std = float("0.0151973") + data = None + + +class Program_weight_tensor_parameter_319: + name = "parameter_319" + shape = [96] + dtype = "float32" + min_val = float("-2.29661") + max_val = float("0.85374") + mean = float("-0.0587759") + std = float("0.526151") + data = None + + +class Program_weight_tensor_parameter_320: + name = "parameter_320" + shape = [96] + dtype = "float32" + min_val = float("0.459585") + max_val = float("3.28397") + mean = float("1.27898") + std = float("0.629667") + data = None + + +class Program_weight_tensor_parameter_321: + name = "parameter_321" + shape = [96] + dtype = "float32" + min_val = float("0.00632215") + max_val = float("138.217") + mean = float("5.12786") + std = float("19.2554") + data = None + + +class Program_weight_tensor_parameter_322: + name = "parameter_322" + shape = [96] + dtype = "float32" + min_val = float("-2.42899") + max_val = float("2.01634") + mean = float("0.0614915") + std = float("0.643132") + data = None + + +class Program_weight_tensor_parameter_323: + name = "parameter_323" + shape = [96, 96, 3, 3] + dtype = "float32" + min_val = float("-0.248678") + max_val = float("0.470269") + mean = float("-0.00072636") + std = float("0.0181105") + data = None + + +class Program_weight_tensor_parameter_324: + name = "parameter_324" + shape = [96] + dtype = "float32" + min_val = float("-2.80574") + max_val = float("1.43341") + mean = float("-1.0354") + std = float("0.7042") + data = None + + +class Program_weight_tensor_parameter_325: + name = "parameter_325" + shape = [96] + dtype = "float32" + min_val = float("0.391437") + max_val = float("2.00932") + mean = float("1.06992") + std = float("0.22779") + data = None + + +class Program_weight_tensor_parameter_326: + name = "parameter_326" + shape = [96] + dtype = "float32" + min_val = float("0.0870898") + max_val = float("187.346") + mean = float("16.5146") + std = float("35.0408") + data = None + + +class Program_weight_tensor_parameter_327: + name = "parameter_327" + shape = [96] + dtype = "float32" + min_val = float("-5.68789") + max_val = float("3.04399") + mean = float("-0.245948") + std = float("1.45454") + data = None + + +class Program_weight_tensor_parameter_328: + name = "parameter_328" + shape = [96, 96, 3, 3] + dtype = "float32" + min_val = float("-0.202087") + max_val = float("0.209263") + mean = float("-0.00142875") + std = float("0.0167814") + data = None + + +class Program_weight_tensor_parameter_329: + name = "parameter_329" + shape = [96] + dtype = "float32" + min_val = float("-2.54215") + max_val = float("0.882014") + mean = float("-0.00160162") + std = float("0.5099") + data = None + + +class Program_weight_tensor_parameter_330: + name = "parameter_330" + shape = [96] + dtype = "float32" + min_val = float("-0.103753") + max_val = float("3.23826") + mean = float("0.307247") + std = float("0.398645") + data = None + + +class Program_weight_tensor_parameter_331: + name = "parameter_331" + shape = [96] + dtype = "float32" + min_val = float("0.000261127") + max_val = float("1.54478") + mean = float("0.133443") + std = float("0.254186") + data = None + + +class Program_weight_tensor_parameter_332: + name = "parameter_332" + shape = [96] + dtype = "float32" + min_val = float("-0.119804") + max_val = float("0.204739") + mean = float("0.0173238") + std = float("0.0579852") + data = None + + +class Program_weight_tensor_parameter_333: + name = "parameter_333" + shape = [96, 96, 1, 1] + dtype = "float32" + min_val = float("-0.129922") + max_val = float("0.0964318") + mean = float("-0.00132652") + std = float("0.0165042") + data = None + + +class Program_weight_tensor_parameter_334: + name = "parameter_334" + shape = [96] + dtype = "float32" + min_val = float("-2.54207") + max_val = float("0.884054") + mean = float("-0.000448709") + std = float("0.510682") + data = None + + +class Program_weight_tensor_parameter_335: + name = "parameter_335" + shape = [96] + dtype = "float32" + min_val = float("0.380336") + max_val = float("2.98099") + mean = float("0.924315") + std = float("0.404415") + data = None + + +class Program_weight_tensor_parameter_336: + name = "parameter_336" + shape = [96] + dtype = "float32" + min_val = float("0.0505116") + max_val = float("77.0173") + mean = float("5.13723") + std = float("9.25549") + data = None + + +class Program_weight_tensor_parameter_337: + name = "parameter_337" + shape = [96] + dtype = "float32" + min_val = float("-0.906732") + max_val = float("1.00025") + mean = float("0.0466181") + std = float("0.38374") + data = None + + +class Program_weight_tensor_parameter_338: + name = "parameter_338" + shape = [96, 96, 3, 3] + dtype = "float32" + min_val = float("-0.344494") + max_val = float("0.320728") + mean = float("-0.000472941") + std = float("0.0172637") + data = None + + +class Program_weight_tensor_parameter_339: + name = "parameter_339" + shape = [96] + dtype = "float32" + min_val = float("-2.06112") + max_val = float("1.5015") + mean = float("-0.857611") + std = float("0.652753") + data = None + + +class Program_weight_tensor_parameter_340: + name = "parameter_340" + shape = [96] + dtype = "float32" + min_val = float("0.436453") + max_val = float("1.95619") + mean = float("1.08976") + std = float("0.247041") + data = None + + +class Program_weight_tensor_parameter_341: + name = "parameter_341" + shape = [96] + dtype = "float32" + min_val = float("0.0890425") + max_val = float("43.3359") + mean = float("3.86968") + std = float("7.39116") + data = None + + +class Program_weight_tensor_parameter_342: + name = "parameter_342" + shape = [96] + dtype = "float32" + min_val = float("-3.15364") + max_val = float("1.89453") + mean = float("-0.175744") + std = float("0.76799") + data = None + + +class Program_weight_tensor_parameter_343: + name = "parameter_343" + shape = [96, 96, 3, 3] + dtype = "float32" + min_val = float("-0.190613") + max_val = float("0.220246") + mean = float("-0.000576299") + std = float("0.0187421") + data = None + + +class Program_weight_tensor_parameter_344: + name = "parameter_344" + shape = [96] + dtype = "float32" + min_val = float("-1.49948") + max_val = float("1.86726") + mean = float("0.0897808") + std = float("0.865262") + data = None + + +class Program_weight_tensor_parameter_345: + name = "parameter_345" + shape = [96] + dtype = "float32" + min_val = float("0.284704") + max_val = float("1.33171") + mean = float("0.695795") + std = float("0.271829") + data = None + + +class Program_weight_tensor_parameter_346: + name = "parameter_346" + shape = [96] + dtype = "float32" + min_val = float("0.0341847") + max_val = float("36.4222") + mean = float("4.46755") + std = float("7.44847") + data = None + + +class Program_weight_tensor_parameter_347: + name = "parameter_347" + shape = [96] + dtype = "float32" + min_val = float("-1.34419") + max_val = float("1.56937") + mean = float("-0.0755564") + std = float("0.518799") + data = None + + +class Program_weight_tensor_parameter_348: + name = "parameter_348" + shape = [96, 192, 1, 1] + dtype = "float32" + min_val = float("-0.267866") + max_val = float("0.348449") + mean = float("-0.00189204") + std = float("0.0344483") + data = None + + +class Program_weight_tensor_parameter_349: + name = "parameter_349" + shape = [96] + dtype = "float32" + min_val = float("-2.53699") + max_val = float("1.68752") + mean = float("0.398864") + std = float("0.702501") + data = None + + +class Program_weight_tensor_parameter_350: + name = "parameter_350" + shape = [96] + dtype = "float32" + min_val = float("0.401051") + max_val = float("4.70735") + mean = float("1.37094") + std = float("0.963362") + data = None + + +class Program_weight_tensor_parameter_351: + name = "parameter_351" + shape = [96] + dtype = "float32" + min_val = float("0.0121875") + max_val = float("32.521") + mean = float("1.06082") + std = float("3.43873") + data = None + + +class Program_weight_tensor_parameter_352: + name = "parameter_352" + shape = [96] + dtype = "float32" + min_val = float("-2.14146") + max_val = float("0.648238") + mean = float("0.0364229") + std = float("0.351286") + data = None + + +class Program_weight_tensor_parameter_353: + name = "parameter_353" + shape = [96, 192, 1, 1] + dtype = "float32" + min_val = float("-0.270663") + max_val = float("0.317164") + mean = float("-0.000504822") + std = float("0.0263221") + data = None + + +class Program_weight_tensor_parameter_354: + name = "parameter_354" + shape = [192] + dtype = "float32" + min_val = float("-4.58862") + max_val = float("2.01289") + mean = float("-0.059504") + std = float("0.877069") + data = None + + +class Program_weight_tensor_parameter_355: + name = "parameter_355" + shape = [192] + dtype = "float32" + min_val = float("0.549565") + max_val = float("4.31144") + mean = float("1.03925") + std = float("0.430187") + data = None + + +class Program_weight_tensor_parameter_356: + name = "parameter_356" + shape = [192] + dtype = "float32" + min_val = float("0.00878514") + max_val = float("30.5151") + mean = float("1.56899") + std = float("3.46067") + data = None + + +class Program_weight_tensor_parameter_357: + name = "parameter_357" + shape = [192] + dtype = "float32" + min_val = float("-1.00552") + max_val = float("2.23401") + mean = float("0.0551103") + std = float("0.443125") + data = None + + +class Program_weight_tensor_parameter_358: + name = "parameter_358" + shape = [192, 128, 3, 3] + dtype = "float32" + min_val = float("-0.192189") + max_val = float("0.192709") + mean = float("-0.000819168") + std = float("0.0142726") + data = None + + +class Program_weight_tensor_parameter_359: + name = "parameter_359" + shape = [128] + dtype = "float32" + min_val = float("-2.16473") + max_val = float("1.4447") + mean = float("-0.60961") + std = float("0.630418") + data = None + + +class Program_weight_tensor_parameter_360: + name = "parameter_360" + shape = [128] + dtype = "float32" + min_val = float("0.345005") + max_val = float("2.18971") + mean = float("0.792173") + std = float("0.224656") + data = None + + +class Program_weight_tensor_parameter_361: + name = "parameter_361" + shape = [128] + dtype = "float32" + min_val = float("0.0162545") + max_val = float("35.9585") + mean = float("2.7438") + std = float("4.84461") + data = None + + +class Program_weight_tensor_parameter_362: + name = "parameter_362" + shape = [128] + dtype = "float32" + min_val = float("-1.65531") + max_val = float("0.928473") + mean = float("-0.31948") + std = float("0.461471") + data = None + + +class Program_weight_tensor_parameter_363: + name = "parameter_363" + shape = [128, 96, 1, 1] + dtype = "float32" + min_val = float("-1.14324") + max_val = float("0.691253") + mean = float("-0.00242802") + std = float("0.0660078") + data = None + + +class Program_weight_tensor_parameter_364: + name = "parameter_364" + shape = [96] + dtype = "float32" + min_val = float("-0.0399276") + max_val = float("0.0452427") + mean = float("-0.00642342") + std = float("0.0148177") + data = None + + +class Program_weight_tensor_parameter_365: + name = "parameter_365" + shape = [96, 96, 1, 1] + dtype = "float32" + min_val = float("-0.375768") + max_val = float("0.256567") + mean = float("-0.0056385") + std = float("0.0274486") + data = None + + +class Program_weight_tensor_parameter_366: + name = "parameter_366" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_367: + name = "parameter_367" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_368: + name = "parameter_368" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_369: + name = "parameter_369" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_370: + name = "parameter_370" + shape = [48, 48, 1, 1] + dtype = "float32" + min_val = float("-0.245537") + max_val = float("0.208991") + mean = float("-0.00097107") + std = float("0.0324578") + data = None + + +class Program_weight_tensor_parameter_371: + name = "parameter_371" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_372: + name = "parameter_372" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_373: + name = "parameter_373" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_374: + name = "parameter_374" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_375: + name = "parameter_375" + shape = [48, 48, 3, 3] + dtype = "float32" + min_val = float("-0.594484") + max_val = float("0.471216") + mean = float("-0.00379107") + std = float("0.0338907") + data = None + + +class Program_weight_tensor_parameter_376: + name = "parameter_376" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_377: + name = "parameter_377" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_378: + name = "parameter_378" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_379: + name = "parameter_379" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_380: + name = "parameter_380" + shape = [48, 48, 3, 3] + dtype = "float32" + min_val = float("-0.471666") + max_val = float("0.510794") + mean = float("-0.00233325") + std = float("0.035912") + data = None + + +class Program_weight_tensor_parameter_381: + name = "parameter_381" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_382: + name = "parameter_382" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_383: + name = "parameter_383" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_384: + name = "parameter_384" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_385: + name = "parameter_385" + shape = [48, 48, 1, 1] + dtype = "float32" + min_val = float("-0.358014") + max_val = float("0.243577") + mean = float("-0.00220567") + std = float("0.0410855") + data = None + + +class Program_weight_tensor_parameter_386: + name = "parameter_386" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_387: + name = "parameter_387" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_388: + name = "parameter_388" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_389: + name = "parameter_389" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_390: + name = "parameter_390" + shape = [48, 48, 3, 3] + dtype = "float32" + min_val = float("-0.212955") + max_val = float("0.227952") + mean = float("-0.000831372") + std = float("0.0279648") + data = None + + +class Program_weight_tensor_parameter_391: + name = "parameter_391" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_392: + name = "parameter_392" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_393: + name = "parameter_393" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_394: + name = "parameter_394" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_395: + name = "parameter_395" + shape = [48, 48, 3, 3] + dtype = "float32" + min_val = float("-0.509937") + max_val = float("0.513332") + mean = float("-0.00111681") + std = float("0.0338375") + data = None + + +class Program_weight_tensor_parameter_396: + name = "parameter_396" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_397: + name = "parameter_397" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_398: + name = "parameter_398" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_399: + name = "parameter_399" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_400: + name = "parameter_400" + shape = [48, 96, 1, 1] + dtype = "float32" + min_val = float("-0.542048") + max_val = float("0.67105") + mean = float("-0.00118045") + std = float("0.0623895") + data = None + + +class Program_weight_tensor_parameter_401: + name = "parameter_401" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_402: + name = "parameter_402" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_403: + name = "parameter_403" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_404: + name = "parameter_404" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_405: + name = "parameter_405" + shape = [48, 96, 1, 1] + dtype = "float32" + min_val = float("-0.309978") + max_val = float("0.371717") + mean = float("0.00549379") + std = float("0.0542365") + data = None + + +class Program_weight_tensor_parameter_406: + name = "parameter_406" + shape = [96] + dtype = "float32" + min_val = float("-3.42008") + max_val = float("3.81464") + mean = float("0.315855") + std = float("1.19703") + data = None + + +class Program_weight_tensor_parameter_407: + name = "parameter_407" + shape = [96] + dtype = "float32" + min_val = float("0.536665") + max_val = float("5.52108") + mean = float("1.04896") + std = float("0.54924") + data = None + + +class Program_weight_tensor_parameter_408: + name = "parameter_408" + shape = [96] + dtype = "float32" + min_val = float("0.292414") + max_val = float("63.9814") + mean = float("10.8852") + std = float("13.3847") + data = None + + +class Program_weight_tensor_parameter_409: + name = "parameter_409" + shape = [96] + dtype = "float32" + min_val = float("-2.47862") + max_val = float("2.08655") + mean = float("0.0687172") + std = float("0.733523") + data = None + + +class Program_weight_tensor_parameter_410: + name = "parameter_410" + shape = [96, 64, 3, 3] + dtype = "float32" + min_val = float("-0.362108") + max_val = float("0.338032") + mean = float("0.00137787") + std = float("0.0346947") + data = None + + +class Program_weight_tensor_parameter_411: + name = "parameter_411" + shape = [64] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_412: + name = "parameter_412" + shape = [64] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_413: + name = "parameter_413" + shape = [64] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_414: + name = "parameter_414" + shape = [64] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_415: + name = "parameter_415" + shape = [64, 48, 1, 1] + dtype = "float32" + min_val = float("-1.55424") + max_val = float("1.95132") + mean = float("6.49773e-05") + std = float("0.167671") + data = None + + +class Program_weight_tensor_parameter_416: + name = "parameter_416" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_417: + name = "parameter_417" + shape = [48, 48, 1, 1] + dtype = "float32" + min_val = float("-0.81389") + max_val = float("0.655433") + mean = float("-0.0146491") + std = float("0.120861") + data = None + + +class Program_weight_tensor_parameter_418: + name = "parameter_418" + shape = [24] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_419: + name = "parameter_419" + shape = [24] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_420: + name = "parameter_420" + shape = [24] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_421: + name = "parameter_421" + shape = [24] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_422: + name = "parameter_422" + shape = [24, 24, 1, 1] + dtype = "float32" + min_val = float("-0.845528") + max_val = float("0.827015") + mean = float("0.000817065") + std = float("0.136801") + data = None + + +class Program_weight_tensor_parameter_423: + name = "parameter_423" + shape = [24] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_424: + name = "parameter_424" + shape = [24] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_425: + name = "parameter_425" + shape = [24] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_426: + name = "parameter_426" + shape = [24] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_427: + name = "parameter_427" + shape = [24, 24, 3, 3] + dtype = "float32" + min_val = float("-0.435837") + max_val = float("0.721875") + mean = float("0.00369336") + std = float("0.0692632") + data = None + + +class Program_weight_tensor_parameter_428: + name = "parameter_428" + shape = [24] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_429: + name = "parameter_429" + shape = [24] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_430: + name = "parameter_430" + shape = [24] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_431: + name = "parameter_431" + shape = [24] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_432: + name = "parameter_432" + shape = [24, 24, 3, 3] + dtype = "float32" + min_val = float("-0.925161") + max_val = float("0.592081") + mean = float("-0.00596284") + std = float("0.0925129") + data = None + + +class Program_weight_tensor_parameter_433: + name = "parameter_433" + shape = [24] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_434: + name = "parameter_434" + shape = [24] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_435: + name = "parameter_435" + shape = [24] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_436: + name = "parameter_436" + shape = [24] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_437: + name = "parameter_437" + shape = [24, 48, 1, 1] + dtype = "float32" + min_val = float("-1.13479") + max_val = float("1.33198") + mean = float("-0.0122417") + std = float("0.234651") + data = None + + +class Program_weight_tensor_parameter_438: + name = "parameter_438" + shape = [24] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_439: + name = "parameter_439" + shape = [24] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_440: + name = "parameter_440" + shape = [24] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_441: + name = "parameter_441" + shape = [24] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_442: + name = "parameter_442" + shape = [24, 48, 1, 1] + dtype = "float32" + min_val = float("-0.788603") + max_val = float("1.48827") + mean = float("8.33313e-06") + std = float("0.179123") + data = None + + +class Program_weight_tensor_parameter_443: + name = "parameter_443" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_444: + name = "parameter_444" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_445: + name = "parameter_445" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_446: + name = "parameter_446" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_447: + name = "parameter_447" + shape = [48, 32, 3, 3] + dtype = "float32" + min_val = float("-0.967493") + max_val = float("0.594212") + mean = float("-0.0103727") + std = float("0.0932582") + data = None + + +class Program_weight_tensor_parameter_448: + name = "parameter_448" + shape = [32] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_449: + name = "parameter_449" + shape = [32] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_450: + name = "parameter_450" + shape = [32] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_451: + name = "parameter_451" + shape = [32] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_452: + name = "parameter_452" + shape = [32, 16, 3, 3] + dtype = "float32" + min_val = float("-1.09328") + max_val = float("0.732833") + mean = float("-0.0062838") + std = float("0.157053") + data = None + + +class Program_weight_tensor_parameter_453: + name = "parameter_453" + shape = [16] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_454: + name = "parameter_454" + shape = [16] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_455: + name = "parameter_455" + shape = [16] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_456: + name = "parameter_456" + shape = [16] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_457: + name = "parameter_457" + shape = [16, 16, 3, 3] + dtype = "float32" + min_val = float("-1.35452") + max_val = float("1.03463") + mean = float("-0.00454889") + std = float("0.262717") + data = None + + +class Program_weight_tensor_parameter_458: + name = "parameter_458" + shape = [16] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_459: + name = "parameter_459" + shape = [16] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_460: + name = "parameter_460" + shape = [16] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_461: + name = "parameter_461" + shape = [16] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_462: + name = "parameter_462" + shape = [16, 3, 3, 3] + dtype = "float32" + min_val = float("-2.69993") + max_val = float("1.29774") + mean = float("0.0419468") + std = float("0.626833") + data = None diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-S/subgraph_16/graph_hash.txt b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-S/subgraph_16/graph_hash.txt new file mode 100644 index 000000000..54a609c33 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-S/subgraph_16/graph_hash.txt @@ -0,0 +1 @@ +a999d5921e887e45f1813dad835878d69c80f2ace07b55d5437ad7d4132486cf \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-S/subgraph_16/graph_net.json b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-S/subgraph_16/graph_net.json new file mode 100644 index 000000000..6ce3cf9a5 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-S/subgraph_16/graph_net.json @@ -0,0 +1,6 @@ +{ + "framework": "paddle", + "model_name": "PP-YOLOE_plus_SOD-S", + "num_devices_required": 1, + "num_nodes_required": 1 +} \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-S/subgraph_16/input_meta.py b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-S/subgraph_16/input_meta.py new file mode 100644 index 000000000..aa17b1f21 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-S/subgraph_16/input_meta.py @@ -0,0 +1,64 @@ +class Program_weight_tensor_data_0: + name = "data_0" + shape = [2, 9261] + dtype = "float32" + max_val = float("10.0") + mean = float("0.0390347") + std = float("0.295082") + data = None + + +class Program_weight_tensor_data_1: + name = "data_1" + shape = [2, 38, 9261] + dtype = "float32" + max_val = float("0.970134") + mean = float("0.00102475") + std = float("0.0250592") + data = None + + +class Program_weight_tensor_data_2: + name = "data_2" + shape = [2, 38, 9261] + dtype = "float32" + max_val = float("1.0") + mean = float("0.00102723") + std = float("0.0320339") + data = None + + +class Program_weight_tensor_data_3: + name = "data_3" + shape = [2, 1] + dtype = "int32" + data = [0, 1] + + +class Program_weight_tensor_data_4: + name = "data_4" + shape = [2, 38, 1] + dtype = "int32" + min_val = 0 + max_val = 9 + data = None + + +class Program_weight_tensor_data_5: + name = "data_5" + shape = [2, 38, 4] + dtype = "float32" + max_val = float("654.24") + mean = float("257.813") + std = float("167.378") + data = None + + +class Program_weight_tensor_data_6: + name = "data_6" + shape = [2, 38, 9261] + dtype = "float32" + max_val = float("0.653712") + mean = float("9.28356e-05") + std = float("0.00547156") + data = None diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-S/subgraph_16/model.py b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-S/subgraph_16/model.py new file mode 100644 index 000000000..7af677b88 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-S/subgraph_16/model.py @@ -0,0 +1,244 @@ +import paddle + + +class GraphModule(paddle.nn.Layer): + def __init__(self): + super().__init__() + + def forward(self, data_0, data_1, data_2, data_3, data_4, data_5, data_6): + # pd_op.full_int_array: (1xi64) <- () + full_int_array_0 = [1] + + # pd_op.unsqueeze: (2x1x9261xf32) <- (2x9261xf32, 1xi64) + unsqueeze_0 = paddle._C_ops.unsqueeze(data_0, full_int_array_0) + del data_0, full_int_array_0 + + # pd_op.full: (xf32) <- () + full_0 = paddle._C_ops.full( + [], float("1"), paddle.float32, paddle.framework._current_expected_place() + ) + + # pd_op.greater_than: (2x1x9261xb) <- (2x1x9261xf32, xf32) + greater_than_0 = paddle._C_ops.greater_than(unsqueeze_0, full_0) + del full_0, unsqueeze_0 + + # pd_op.full_int_array: (3xi64) <- () + full_int_array_1 = [1, 38, 1] + + # pd_op.tile: (2x38x9261xb) <- (2x1x9261xb, 3xi64) + tile_0 = paddle._C_ops.tile(greater_than_0, full_int_array_1) + del full_int_array_1, greater_than_0 + + # pd_op.multiply: (2x38x9261xf32) <- (2x38x9261xf32, 2x38x9261xf32) + multiply_1 = paddle._C_ops.multiply(data_1, data_2) + + # pd_op.full: (1xi64) <- () + full_1 = paddle._C_ops.full( + [1], float("-2"), paddle.int64, paddle.core.CPUPlace() + ) + + # pd_op.argmax: (2x9261xi64) <- (2x38x9261xf32, 1xi64) + argmax_0 = paddle._C_ops.argmax(multiply_1, full_1, False, False, paddle.int64) + del multiply_1 + + # pd_op.full: (1xi32) <- () + full_2 = paddle._C_ops.full( + [1], float("38"), paddle.int32, paddle.core.CPUPlace() + ) + + # pd_op.one_hot: (2x9261x38xf32) <- (2x9261xi64, 1xi32) + one_hot_0 = paddle._C_ops.one_hot( + argmax_0 % paddle.cast(full_2, argmax_0.dtype), full_2 + ) + del argmax_0, full_2 + + # pd_op.transpose: (2x38x9261xf32) <- (2x9261x38xf32) + transpose_0 = paddle._C_ops.transpose(one_hot_0, [0, 2, 1]) + del one_hot_0 + + # pd_op.where: (2x38x9261xf32) <- (2x38x9261xb, 2x38x9261xf32, 2x38x9261xf32) + where_0 = paddle._C_ops.where(tile_0, transpose_0, data_2) + del data_2, tile_0, transpose_0 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_2 = [-2] + + # pd_op.sum: (2x9261xf32) <- (2x38x9261xf32, 1xi64) + sum_0 = paddle._C_ops.sum(where_0, full_int_array_2, None, False) + + # pd_op.argmax: (2x9261xi64) <- (2x38x9261xf32, 1xi64) + argmax_1 = paddle._C_ops.argmax(where_0, full_1, False, False, paddle.int64) + del full_1 + + # pd_op.full: (1xf32) <- () + full_3 = paddle._C_ops.full( + [1], float("38"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.scale: (2x1xi32) <- (2x1xi32, 1xf32) + scale_0 = paddle._C_ops.scale(data_3, full_3, float("0"), True) + del data_3, full_3 + + # pd_op.cast: (2x1xi64) <- (2x1xi32) + cast_0 = paddle._C_ops.cast(scale_0, paddle.int64) + del scale_0 + + # pd_op.add: (2x9261xi64) <- (2x9261xi64, 2x1xi64) + add_0 = paddle._C_ops.add(argmax_1, cast_0) + del argmax_1, cast_0 + + # pd_op.flatten: (76xi32) <- (2x38x1xi32) + flatten_0 = paddle._C_ops.flatten(data_4, 0, 2) + del data_4 + + # pd_op.flatten: (18522xi64) <- (2x9261xi64) + flatten_1 = paddle._C_ops.flatten(add_0, 0, 1) + del add_0 + + # pd_op.full: (1xi32) <- () + full_4 = paddle._C_ops.full( + [1], float("0"), paddle.int32, paddle.core.CPUPlace() + ) + + # pd_op.gather: (18522xi32) <- (76xi32, 18522xi64, 1xi32) + gather_0 = paddle._C_ops.gather(flatten_0, flatten_1, full_4) + del flatten_0 + + # pd_op.full_int_array: (2xi64) <- () + full_int_array_3 = [2, 9261] + + # pd_op.reshape: (2x9261xi32) <- (18522xi32, 2xi64) + reshape_1 = paddle._C_ops.reshape(gather_0, full_int_array_3) + del full_int_array_3, gather_0 + + # pd_op.full: (xf32) <- () + full_5 = paddle._C_ops.full( + [], float("0"), paddle.float32, paddle.framework._current_expected_place() + ) + + # pd_op.greater_than: (2x9261xb) <- (2x9261xf32, xf32) + greater_than_1 = paddle._C_ops.greater_than(sum_0, full_5) + del full_5, sum_0 + + # pd_op.full: (1xf32) <- () + full_6 = paddle._C_ops.full( + [1], float("10"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.full_like: (2x9261xi32) <- (2x9261xi32, 1xf32) + full_like_0 = paddle._C_ops.full_like( + reshape_1, full_6, paddle.int32, paddle.framework._current_expected_place() + ) + del full_6 + + # pd_op.where: (2x9261xi32) <- (2x9261xb, 2x9261xi32, 2x9261xi32) + where_1 = paddle._C_ops.where(greater_than_1, reshape_1, full_like_0) + del full_like_0, greater_than_1, reshape_1 + + # pd_op.full_int_array: (2xi64) <- () + full_int_array_4 = [-1, 4] + + # pd_op.reshape: (76x4xf32) <- (2x38x4xf32, 2xi64) + reshape_2 = paddle._C_ops.reshape(data_5, full_int_array_4) + del data_5, full_int_array_4 + + # pd_op.gather: (18522x4xf32) <- (76x4xf32, 18522xi64, 1xi32) + gather_1 = paddle._C_ops.gather(reshape_2, flatten_1, full_4) + del flatten_1, full_4, reshape_2 + + # pd_op.full_int_array: (3xi64) <- () + full_int_array_5 = [2, 9261, 4] + + # pd_op.reshape: (2x9261x4xf32) <- (18522x4xf32, 3xi64) + reshape_0 = paddle._C_ops.reshape(gather_1, full_int_array_5) + del full_int_array_5, gather_1 + + # pd_op.full: (1xi32) <- () + full_7 = paddle._C_ops.full( + [1], float("11"), paddle.int32, paddle.core.CPUPlace() + ) + + # pd_op.one_hot: (2x9261x11xf32) <- (2x9261xi32, 1xi32) + one_hot_1 = paddle._C_ops.one_hot( + where_1 % paddle.cast(full_7, where_1.dtype), full_7 + ) + del full_7 + + # pd_op.full: (10xi64) <- () + full_8 = paddle._C_ops.full( + [10], float("0"), paddle.int64, paddle.framework._current_expected_place() + ) + + # pd_op.assign_value_: (10xi64) <- (10xi64) + assign_value__0 = paddle._C_ops.assign_value_( + full_8, + [10], + paddle.int64, + [ + float("0"), + float("1"), + float("2"), + float("3"), + float("4"), + float("5"), + float("6"), + float("7"), + float("8"), + float("9"), + ], + paddle.framework._current_expected_place(), + ) + del full_8 + + # pd_op.index_select: (2x9261x10xf32) <- (2x9261x11xf32, 10xi64) + index_select_0 = paddle._C_ops.index_select(one_hot_1, assign_value__0, -1) + del assign_value__0, one_hot_1 + + # pd_op.multiply: (2x38x9261xf32) <- (2x38x9261xf32, 2x38x9261xf32) + multiply_2 = paddle._C_ops.multiply(data_6, where_0) + del data_6 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_6 = [-1] + + # pd_op.max: (2x38x1xf32) <- (2x38x9261xf32, 1xi64) + max_0 = paddle._C_ops.max(multiply_2, full_int_array_6, True) + + # pd_op.multiply: (2x38x9261xf32) <- (2x38x9261xf32, 2x38x9261xf32) + multiply_3 = paddle._C_ops.multiply(data_1, where_0) + del data_1, where_0 + + # pd_op.max: (2x38x1xf32) <- (2x38x9261xf32, 1xi64) + max_1 = paddle._C_ops.max(multiply_3, full_int_array_6, True) + del multiply_3 + + # pd_op.full: (1xf32) <- () + full_9 = paddle._C_ops.full( + [1], float("1"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.scale: (2x38x1xf32) <- (2x38x1xf32, 1xf32) + scale_1 = paddle._C_ops.scale(max_0, full_9, float("1e-09"), True) + del full_9, max_0 + + # pd_op.divide: (2x38x9261xf32) <- (2x38x9261xf32, 2x38x1xf32) + divide_0 = paddle._C_ops.divide(multiply_2, scale_1) + del multiply_2, scale_1 + + # pd_op.multiply: (2x38x9261xf32) <- (2x38x9261xf32, 2x38x1xf32) + multiply_4 = paddle._C_ops.multiply(divide_0, max_1) + del divide_0, max_1 + + # pd_op.max: (2x9261xf32) <- (2x38x9261xf32, 1xi64) + max_2 = paddle._C_ops.max(multiply_4, full_int_array_2, False) + del full_int_array_2, multiply_4 + + # pd_op.unsqueeze: (2x9261x1xf32) <- (2x9261xf32, 1xi64) + unsqueeze_1 = paddle._C_ops.unsqueeze(max_2, full_int_array_6) + del full_int_array_6, max_2 + + # pd_op.multiply: (2x9261x10xf32) <- (2x9261x10xf32, 2x9261x1xf32) + multiply_0 = paddle._C_ops.multiply(index_select_0, unsqueeze_1) + del index_select_0, unsqueeze_1, where_1 + + return reshape_0, multiply_0 diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-S/subgraph_16/weight_meta.py b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-S/subgraph_16/weight_meta.py new file mode 100644 index 000000000..8b1378917 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-S/subgraph_16/weight_meta.py @@ -0,0 +1 @@ + diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-S/subgraph_17/graph_hash.txt b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-S/subgraph_17/graph_hash.txt new file mode 100644 index 000000000..463698320 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-S/subgraph_17/graph_hash.txt @@ -0,0 +1 @@ +611aaf40802ae728109fb4c99495540bcf765813a1a25b37c13dde4b4b8683ee \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-S/subgraph_17/graph_net.json b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-S/subgraph_17/graph_net.json new file mode 100644 index 000000000..6ce3cf9a5 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-S/subgraph_17/graph_net.json @@ -0,0 +1,6 @@ +{ + "framework": "paddle", + "model_name": "PP-YOLOE_plus_SOD-S", + "num_devices_required": 1, + "num_nodes_required": 1 +} \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-S/subgraph_17/input_meta.py b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-S/subgraph_17/input_meta.py new file mode 100644 index 000000000..ff853d672 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-S/subgraph_17/input_meta.py @@ -0,0 +1,38 @@ +class Program_weight_tensor_data_0: + name = "data_0" + shape = [1, 8400, 4] + dtype = "float32" + min_val = float("-0.0328555") + max_val = float("2.43675") + mean = float("0.969788") + std = float("0.438411") + data = None + + +class Program_weight_tensor_data_1: + name = "data_1" + shape = [8400, 2] + dtype = "float32" + min_val = float("0.5") + max_val = float("79.5") + mean = float("34.7619") + std = float("22.9098") + data = None + + +class Program_weight_tensor_data_2: + name = "data_2" + shape = [8400, 1] + dtype = "float32" + min_val = float("8.0") + max_val = float("32.0") + mean = float("10.6667") + std = float("5.70157") + data = None + + +class Program_weight_tensor_data_3: + name = "data_3" + shape = [1, 2] + dtype = "float32" + data = [0.836601, 0.470588] diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-S/subgraph_17/model.py b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-S/subgraph_17/model.py new file mode 100644 index 000000000..5d4c5e86c --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-S/subgraph_17/model.py @@ -0,0 +1,94 @@ +import paddle + + +class GraphModule(paddle.nn.Layer): + def __init__(self): + super().__init__() + + def forward(self, data_0, data_1, data_2, data_3): + # pd_op.full: (1xi32) <- () + full_0 = paddle._C_ops.full( + [1], float("2"), paddle.int32, paddle.core.CPUPlace() + ) + + # pd_op.split_with_num: ([1x8400x2xf32, 1x8400x2xf32]) <- (1x8400x4xf32, 1xi32) + split_with_num_0 = paddle._C_ops.split_with_num(data_0, 2, full_0) + del data_0, full_0 + + # builtin.split: (1x8400x2xf32, 1x8400x2xf32) <- ([1x8400x2xf32, 1x8400x2xf32]) + ( + split_0, + split_1, + ) = split_with_num_0 + del split_with_num_0 + + # pd_op.full: (1xf32) <- () + full_1 = paddle._C_ops.full( + [1], float("-1"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.scale: (1x8400x2xf32) <- (1x8400x2xf32, 1xf32) + scale_0 = paddle._C_ops.scale(split_0, full_1, float("0"), True) + del full_1, split_0 + + # pd_op.add: (1x8400x2xf32) <- (1x8400x2xf32, 8400x2xf32) + add_0 = paddle._C_ops.add(scale_0, data_1) + del scale_0 + + # pd_op.add: (1x8400x2xf32) <- (1x8400x2xf32, 8400x2xf32) + add_1 = paddle._C_ops.add(split_1, data_1) + del data_1, split_1 + + # pd_op.full: (1xi32) <- () + full_2 = paddle._C_ops.full( + [1], float("-1"), paddle.int32, paddle.core.CPUPlace() + ) + + # builtin.combine: ([1x8400x2xf32, 1x8400x2xf32]) <- (1x8400x2xf32, 1x8400x2xf32) + combine_0 = [add_0, add_1] + del add_0, add_1 + + # pd_op.concat: (1x8400x4xf32) <- ([1x8400x2xf32, 1x8400x2xf32], 1xi32) + concat_0 = paddle._C_ops.concat(combine_0, full_2) + del combine_0 + + # pd_op.multiply: (1x8400x4xf32) <- (1x8400x4xf32, 8400x1xf32) + multiply_0 = paddle._C_ops.multiply(concat_0, data_2) + del concat_0, data_2 + + # pd_op.full: (1xi32) <- () + full_3 = paddle._C_ops.full( + [1], float("1"), paddle.int32, paddle.core.CPUPlace() + ) + + # pd_op.split_with_num: ([1x1xf32, 1x1xf32]) <- (1x2xf32, 1xi32) + split_with_num_1 = paddle._C_ops.split_with_num(data_3, 2, full_3) + del data_3, full_3 + + # builtin.split: (1x1xf32, 1x1xf32) <- ([1x1xf32, 1x1xf32]) + ( + split_2, + split_3, + ) = split_with_num_1 + del split_with_num_1 + + # builtin.combine: ([1x1xf32, 1x1xf32, 1x1xf32, 1x1xf32]) <- (1x1xf32, 1x1xf32, 1x1xf32, 1x1xf32) + combine_1 = [split_3, split_2, split_3, split_2] + del split_2, split_3 + + # pd_op.concat: (1x4xf32) <- ([1x1xf32, 1x1xf32, 1x1xf32, 1x1xf32], 1xi32) + concat_1 = paddle._C_ops.concat(combine_1, full_2) + del combine_1, full_2 + + # pd_op.full_int_array: (3xi64) <- () + full_int_array_0 = [-1, 1, 4] + + # pd_op.reshape: (1x1x4xf32) <- (1x4xf32, 3xi64) + reshape_0 = paddle._C_ops.reshape(concat_1, full_int_array_0) + del concat_1, full_int_array_0 + + # pd_op.divide: (1x8400x4xf32) <- (1x8400x4xf32, 1x1x4xf32) + divide_0 = paddle._C_ops.divide(multiply_0, reshape_0) + del multiply_0, reshape_0 + + return divide_0 diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-S/subgraph_17/weight_meta.py b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-S/subgraph_17/weight_meta.py new file mode 100644 index 000000000..8b1378917 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-S/subgraph_17/weight_meta.py @@ -0,0 +1 @@ + diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-S/subgraph_18/graph_hash.txt b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-S/subgraph_18/graph_hash.txt new file mode 100644 index 000000000..324872d83 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-S/subgraph_18/graph_hash.txt @@ -0,0 +1 @@ +9a8c4e1b7378010caa6e7b8e301735887344173917cbd1df9b66817f174a100c \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-S/subgraph_18/graph_net.json b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-S/subgraph_18/graph_net.json new file mode 100644 index 000000000..6ce3cf9a5 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-S/subgraph_18/graph_net.json @@ -0,0 +1,6 @@ +{ + "framework": "paddle", + "model_name": "PP-YOLOE_plus_SOD-S", + "num_devices_required": 1, + "num_nodes_required": 1 +} \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-S/subgraph_18/input_meta.py b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-S/subgraph_18/input_meta.py new file mode 100644 index 000000000..b1ee32a68 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-S/subgraph_18/input_meta.py @@ -0,0 +1,138 @@ +class Program_weight_tensor_data_0: + name = "data_0" + shape = [] + dtype = "int64" + data = [8] + + +class Program_weight_tensor_data_1: + name = "data_1" + shape = [] + dtype = "int64" + data = [4725] + + +class Program_weight_tensor_data_2: + name = "data_2" + shape = [2, 8, 4725] + dtype = "float32" + max_val = float("1.0") + mean = float("0.00149471") + std = float("0.0386326") + data = None + + +class Program_weight_tensor_data_3: + name = "data_3" + shape = [2, 1] + dtype = "int32" + data = [0, 1] + + +class Program_weight_tensor_data_4: + name = "data_4" + shape = [2, 8, 1] + dtype = "int32" + data = [3, 4, 8, 0, 0, 0, 0, 0, 3, 3, 3, 8, 8, 0, 5, 5] + + +class Program_weight_tensor_data_5: + name = "data_5" + shape = [2, 4725] + dtype = "float32" + max_val = float("1.0") + mean = float("0.0119577") + std = float("0.108695") + data = None + + +class Program_weight_tensor_data_6: + name = "data_6" + shape = [2, 8, 4] + dtype = "float32" + data = [ + 10.4673, + 88.2581, + 29.9065, + 109.935, + 86.729, + 34.0645, + 116.636, + 55.7419, + 231.776, + 374.71, + 480.0, + 453.677, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 24.5839, + 49.1873, + 36.2612, + 100.071, + 50.3969, + 84.8057, + 63.3035, + 147.562, + 25.8131, + 408.763, + 46.0947, + 480.0, + 65.1472, + 242.544, + 89.1165, + 480.0, + 296.85, + 351.095, + 326.351, + 478.304, + 280.256, + 120.424, + 283.944, + 150.954, + 251.985, + 61.0601, + 275.954, + 169.611, + 236.62, + 0.0, + 263.047, + 79.7173, + ] + + +class Program_weight_tensor_data_7: + name = "data_7" + shape = [2, 8, 4725] + dtype = "float32" + max_val = float("0.049982") + mean = float("1.7069e-06") + std = float("0.00021642") + data = None + + +class Program_weight_tensor_data_8: + name = "data_8" + shape = [2, 8, 4725] + dtype = "float32" + max_val = float("0.886292") + mean = float("0.0026412") + std = float("0.0230532") + data = None diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-S/subgraph_18/model.py b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-S/subgraph_18/model.py new file mode 100644 index 000000000..4307988eb --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-S/subgraph_18/model.py @@ -0,0 +1,212 @@ +import paddle + + +class GraphModule(paddle.nn.Layer): + def __init__(self): + super().__init__() + + def forward( + self, data_0, data_1, data_2, data_3, data_4, data_5, data_6, data_7, data_8 + ): + # pd_op.full: (1xi64) <- () + full_0 = paddle._C_ops.full( + [1], float("-2"), paddle.int64, paddle.core.CPUPlace() + ) + + # pd_op.argmax: (2x-1xi64) <- (2x-1x-1xf32, 1xi64) + argmax_0 = paddle._C_ops.argmax(data_2, full_0, False, False, paddle.int64) + del full_0 + + # pd_op.cast: (xi32) <- (xi64) + cast_0 = paddle._C_ops.cast(data_0, paddle.int32) + del data_0 + + # pd_op.multiply: (2x1xi32) <- (2x1xi32, xi32) + multiply_1 = paddle._C_ops.multiply(data_3, cast_0) + del cast_0, data_3 + + # pd_op.cast: (2x1xi64) <- (2x1xi32) + cast_1 = paddle._C_ops.cast(multiply_1, paddle.int64) + del multiply_1 + + # pd_op.add: (2x-1xi64) <- (2x-1xi64, 2x1xi64) + add_0 = paddle._C_ops.add(argmax_0, cast_1) + del argmax_0, cast_1 + + # pd_op.flatten: (-1xi32) <- (2x-1x1xi32) + flatten_0 = paddle._C_ops.flatten(data_4, 0, 2) + del data_4 + + # pd_op.flatten: (-1xi64) <- (2x-1xi64) + flatten_1 = paddle._C_ops.flatten(add_0, 0, 1) + del add_0 + + # pd_op.full: (1xi32) <- () + full_1 = paddle._C_ops.full( + [1], float("0"), paddle.int32, paddle.core.CPUPlace() + ) + + # pd_op.gather: (-1xi32) <- (-1xi32, -1xi64, 1xi32) + gather_0 = paddle._C_ops.gather(flatten_0, flatten_1, full_1) + del flatten_0 + + # pd_op.full: (xi64) <- () + full_2 = paddle._C_ops.full( + [], float("2"), paddle.int64, paddle.core.CPUPlace() + ) + + # builtin.combine: ([xi64, xi64]) <- (xi64, xi64) + combine_0 = [full_2, data_1] + + # pd_op.stack: (2xi64) <- ([xi64, xi64]) + stack_0 = paddle._C_ops.stack(combine_0, 0) + del combine_0 + + # pd_op.reshape: (2x-1xi32) <- (-1xi32, 2xi64) + reshape_1 = paddle._C_ops.reshape(gather_0, stack_0) + del gather_0, stack_0 + + # pd_op.full: (xf32) <- () + full_3 = paddle._C_ops.full( + [], float("0"), paddle.float32, paddle.framework._current_expected_place() + ) + + # pd_op.greater_than: (2x-1xb) <- (2x-1xf32, xf32) + greater_than_0 = paddle._C_ops.greater_than(data_5, full_3) + del data_5, full_3 + + # pd_op.full: (1xf32) <- () + full_4 = paddle._C_ops.full( + [1], float("10"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.full_like: (2x-1xi32) <- (2x-1xi32, 1xf32) + full_like_0 = paddle._C_ops.full_like( + reshape_1, full_4, paddle.int32, paddle.framework._current_expected_place() + ) + del full_4 + + # pd_op.where: (2x-1xi32) <- (2x-1xb, 2x-1xi32, 2x-1xi32) + where_0 = paddle._C_ops.where(greater_than_0, reshape_1, full_like_0) + del full_like_0, greater_than_0, reshape_1 + + # pd_op.full_int_array: (2xi64) <- () + full_int_array_0 = [-1, 4] + + # pd_op.reshape: (-1x4xf32) <- (2x-1x4xf32, 2xi64) + reshape_2 = paddle._C_ops.reshape(data_6, full_int_array_0) + del data_6, full_int_array_0 + + # pd_op.gather: (-1x4xf32) <- (-1x4xf32, -1xi64, 1xi32) + gather_1 = paddle._C_ops.gather(reshape_2, flatten_1, full_1) + del flatten_1, full_1, reshape_2 + + # pd_op.full: (xi64) <- () + full_5 = paddle._C_ops.full( + [], float("4"), paddle.int64, paddle.core.CPUPlace() + ) + + # builtin.combine: ([xi64, xi64, xi64]) <- (xi64, xi64, xi64) + combine_1 = [full_2, data_1, full_5] + del data_1, full_2, full_5 + + # pd_op.stack: (3xi64) <- ([xi64, xi64, xi64]) + stack_1 = paddle._C_ops.stack(combine_1, 0) + del combine_1 + + # pd_op.reshape: (2x-1x4xf32) <- (-1x4xf32, 3xi64) + reshape_0 = paddle._C_ops.reshape(gather_1, stack_1) + del gather_1, stack_1 + + # pd_op.full: (1xi32) <- () + full_6 = paddle._C_ops.full( + [1], float("11"), paddle.int32, paddle.core.CPUPlace() + ) + + # pd_op.one_hot: (2x-1x11xf32) <- (2x-1xi32, 1xi32) + one_hot_0 = paddle._C_ops.one_hot( + where_0 % paddle.cast(full_6, where_0.dtype), full_6 + ) + del full_6 + + # pd_op.full: (10xi64) <- () + full_7 = paddle._C_ops.full( + [10], float("0"), paddle.int64, paddle.framework._current_expected_place() + ) + + # pd_op.assign_value_: (10xi64) <- (10xi64) + assign_value__0 = paddle._C_ops.assign_value_( + full_7, + [10], + paddle.int64, + [ + float("0"), + float("1"), + float("2"), + float("3"), + float("4"), + float("5"), + float("6"), + float("7"), + float("8"), + float("9"), + ], + paddle.framework._current_expected_place(), + ) + del full_7 + + # pd_op.index_select: (2x-1x10xf32) <- (2x-1x11xf32, 10xi64) + index_select_0 = paddle._C_ops.index_select(one_hot_0, assign_value__0, -1) + del assign_value__0, one_hot_0 + + # pd_op.multiply: (2x-1x-1xf32) <- (2x-1x-1xf32, 2x-1x-1xf32) + multiply_2 = paddle._C_ops.multiply(data_7, data_2) + del data_7 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_1 = [-1] + + # pd_op.max: (2x-1x1xf32) <- (2x-1x-1xf32, 1xi64) + max_0 = paddle._C_ops.max(multiply_2, full_int_array_1, True) + + # pd_op.multiply: (2x-1x-1xf32) <- (2x-1x-1xf32, 2x-1x-1xf32) + multiply_3 = paddle._C_ops.multiply(data_8, data_2) + del data_2, data_8 + + # pd_op.max: (2x-1x1xf32) <- (2x-1x-1xf32, 1xi64) + max_1 = paddle._C_ops.max(multiply_3, full_int_array_1, True) + del multiply_3 + + # pd_op.full: (1xf32) <- () + full_8 = paddle._C_ops.full( + [1], float("1"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.scale: (2x-1x1xf32) <- (2x-1x1xf32, 1xf32) + scale_0 = paddle._C_ops.scale(max_0, full_8, float("1e-09"), True) + del full_8, max_0 + + # pd_op.divide: (2x-1x-1xf32) <- (2x-1x-1xf32, 2x-1x1xf32) + divide_0 = paddle._C_ops.divide(multiply_2, scale_0) + del multiply_2, scale_0 + + # pd_op.multiply: (2x-1x-1xf32) <- (2x-1x-1xf32, 2x-1x1xf32) + multiply_4 = paddle._C_ops.multiply(divide_0, max_1) + del divide_0, max_1 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_2 = [-2] + + # pd_op.max: (2x-1xf32) <- (2x-1x-1xf32, 1xi64) + max_2 = paddle._C_ops.max(multiply_4, full_int_array_2, False) + del full_int_array_2, multiply_4 + + # pd_op.unsqueeze: (2x-1x1xf32) <- (2x-1xf32, 1xi64) + unsqueeze_0 = paddle._C_ops.unsqueeze(max_2, full_int_array_1) + del full_int_array_1, max_2 + + # pd_op.multiply: (2x-1x10xf32) <- (2x-1x10xf32, 2x-1x1xf32) + multiply_0 = paddle._C_ops.multiply(index_select_0, unsqueeze_0) + del index_select_0, unsqueeze_0, where_0 + + return reshape_0, multiply_0 diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-S/subgraph_18/weight_meta.py b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-S/subgraph_18/weight_meta.py new file mode 100644 index 000000000..8b1378917 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-S/subgraph_18/weight_meta.py @@ -0,0 +1 @@ + diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-S/subgraph_2/graph_hash.txt b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-S/subgraph_2/graph_hash.txt new file mode 100644 index 000000000..3b4c91958 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-S/subgraph_2/graph_hash.txt @@ -0,0 +1 @@ +2c26a537c16306bff9e45eef0a8e89a4bfc4ef2474aa9287527ef7c6aae5a086 \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-S/subgraph_2/graph_net.json b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-S/subgraph_2/graph_net.json new file mode 100644 index 000000000..6ce3cf9a5 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-S/subgraph_2/graph_net.json @@ -0,0 +1,6 @@ +{ + "framework": "paddle", + "model_name": "PP-YOLOE_plus_SOD-S", + "num_devices_required": 1, + "num_nodes_required": 1 +} \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-S/subgraph_2/input_meta.py b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-S/subgraph_2/input_meta.py new file mode 100644 index 000000000..ceee92b28 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-S/subgraph_2/input_meta.py @@ -0,0 +1,42 @@ +class Program_weight_tensor_data_0: + name = "data_0" + shape = [2, 9261, 10] + dtype = "float32" + min_val = float("2.43778e-11") + max_val = float("0.940314") + mean = float("0.00863495") + std = float("0.0285493") + data = None + + +class Program_weight_tensor_data_1: + name = "data_1" + shape = [2, 9261, 40] + dtype = "float32" + min_val = float("-6.01871") + max_val = float("10.0402") + mean = float("0.791037") + std = float("2.0138") + data = None + + +class Program_weight_tensor_data_2: + name = "data_2" + shape = [9261, 2] + dtype = "float32" + min_val = float("4.0") + max_val = float("668.0") + mean = float("336.0") + std = float("193.958") + data = None + + +class Program_weight_tensor_data_3: + name = "data_3" + shape = [9261, 1] + dtype = "float32" + min_val = float("8.0") + max_val = float("32.0") + mean = float("10.6667") + std = float("5.70157") + data = None diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-S/subgraph_2/model.py b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-S/subgraph_2/model.py new file mode 100644 index 000000000..3c5845f1f --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-S/subgraph_2/model.py @@ -0,0 +1,162 @@ +import paddle + + +class GraphModule(paddle.nn.Layer): + def __init__(self): + super().__init__() + + def forward(self, parameter_0, data_0, data_1, data_2, data_3): + # pd_op.divide: (9261x2xf32) <- (9261x2xf32, 9261x1xf32) + divide_0 = paddle._C_ops.divide(data_2, data_3) + del data_2 + + # pd_op.shape64: (3xi64) <- (2x9261x40xf32) + shape64_0 = paddle._C_ops.shape64(data_1) + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_0 = [0] + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_1 = [1] + + # pd_op.assign: (1xi64) <- (1xi64) + assign_0 = full_int_array_1 + + # pd_op.slice: (xi64) <- (3xi64, 1xi64, 1xi64) + slice_0 = paddle._C_ops.slice( + shape64_0, [0], full_int_array_0, full_int_array_1, [1], [0] + ) + del full_int_array_0 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_2 = [2] + + # pd_op.slice: (xi64) <- (3xi64, 1xi64, 1xi64) + slice_1 = paddle._C_ops.slice( + shape64_0, [0], full_int_array_1, full_int_array_2, [1], [0] + ) + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_3 = [3] + + # pd_op.slice: (xi64) <- (3xi64, 1xi64, 1xi64) + slice_2 = paddle._C_ops.slice( + shape64_0, [0], full_int_array_2, full_int_array_3, [1], [0] + ) + del full_int_array_2, full_int_array_3, shape64_0 + + # pd_op.full: (xi64) <- () + full_0 = paddle._C_ops.full( + [], float("-1"), paddle.int64, paddle.core.CPUPlace() + ) + + # pd_op.full: (xi64) <- () + full_1 = paddle._C_ops.full( + [], float("4"), paddle.int64, paddle.core.CPUPlace() + ) + + # pd_op.full: (xi64) <- () + full_2 = paddle._C_ops.full( + [], float("10"), paddle.int64, paddle.core.CPUPlace() + ) + + # builtin.combine: ([xi64, xi64, xi64, xi64]) <- (xi64, xi64, xi64, xi64) + combine_0 = [full_0, slice_1, full_1, full_2] + del full_0, full_1, full_2, slice_1 + + # pd_op.stack: (4xi64) <- ([xi64, xi64, xi64, xi64]) + stack_0 = paddle._C_ops.stack(combine_0, 0) + del combine_0 + + # pd_op.reshape: (-1x-1x4x10xf32) <- (2x9261x40xf32, 4xi64) + reshape_0 = paddle._C_ops.reshape(data_1, stack_0) + del data_1, stack_0 + + # pd_op.softmax: (-1x-1x4x10xf32) <- (-1x-1x4x10xf32) + softmax_0 = paddle._C_ops.softmax(reshape_0, -1) + del reshape_0 + + # pd_op.transpose: (-1x10x-1x4xf32) <- (-1x-1x4x10xf32) + transpose_0 = paddle._C_ops.transpose(softmax_0, [0, 3, 1, 2]) + + # pd_op.conv2d: (-1x1x-1x4xf32) <- (-1x10x-1x4xf32, 1x10x1x1xf32) + conv2d_0 = paddle._C_ops.conv2d( + transpose_0, parameter_0, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_0 + + # pd_op.squeeze: (-1x-1x4xf32) <- (-1x1x-1x4xf32, 1xi64) + squeeze_0 = paddle._C_ops.squeeze(conv2d_0, full_int_array_1) + del full_int_array_1 + + # pd_op.full: (1xi32) <- () + full_3 = paddle._C_ops.full( + [1], float("2"), paddle.int32, paddle.core.CPUPlace() + ) + + # pd_op.split_with_num: ([-1x-1x2xf32, -1x-1x2xf32]) <- (-1x-1x4xf32, 1xi32) + split_with_num_0 = paddle._C_ops.split_with_num(squeeze_0, 2, full_3) + del squeeze_0 + + # builtin.split: (-1x-1x2xf32, -1x-1x2xf32) <- ([-1x-1x2xf32, -1x-1x2xf32]) + ( + split_0, + split_1, + ) = split_with_num_0 + del split_with_num_0 + + # pd_op.full: (1xf32) <- () + full_4 = paddle._C_ops.full( + [1], float("-1"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.scale: (-1x-1x2xf32) <- (-1x-1x2xf32, 1xf32) + scale_0 = paddle._C_ops.scale(split_0, full_4, float("0"), True) + del split_0 + + # pd_op.add: (-1x9261x2xf32) <- (-1x-1x2xf32, 9261x2xf32) + add_0 = paddle._C_ops.add(scale_0, divide_0) + + # pd_op.add: (-1x9261x2xf32) <- (-1x-1x2xf32, 9261x2xf32) + add_1 = paddle._C_ops.add(split_1, divide_0) + + # pd_op.full: (1xi32) <- () + full_5 = paddle._C_ops.full( + [1], float("-1"), paddle.int32, paddle.core.CPUPlace() + ) + + # builtin.combine: ([-1x9261x2xf32, -1x9261x2xf32]) <- (-1x9261x2xf32, -1x9261x2xf32) + combine_1 = [add_0, add_1] + + # pd_op.concat: (-1x9261x4xf32) <- ([-1x9261x2xf32, -1x9261x2xf32], 1xi32) + concat_0 = paddle._C_ops.concat(combine_1, full_5) + del combine_1 + + # pd_op.share_data_: (2x9261x10xf32) <- (2x9261x10xf32) + share_data__0 = data_0.detach() + del data_0 + + # pd_op.share_data_: (-1x9261x4xf32) <- (-1x9261x4xf32) + share_data__1 = concat_0.detach() + + # pd_op.multiply: (-1x9261x4xf32) <- (-1x9261x4xf32, 9261x1xf32) + multiply_0 = paddle._C_ops.multiply(share_data__1, data_3) + del ( + add_0, + add_1, + assign_0, + concat_0, + conv2d_0, + data_3, + divide_0, + full_3, + full_4, + full_5, + scale_0, + share_data__1, + softmax_0, + split_1, + transpose_0, + ) + + return share_data__0, multiply_0 diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-S/subgraph_2/weight_meta.py b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-S/subgraph_2/weight_meta.py new file mode 100644 index 000000000..88fef0bea --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-S/subgraph_2/weight_meta.py @@ -0,0 +1,7 @@ +class Program_weight_tensor_parameter_0: + name = "parameter_0" + shape = [1, 10, 1, 1] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-S/subgraph_3/graph_hash.txt b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-S/subgraph_3/graph_hash.txt new file mode 100644 index 000000000..3a428f405 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-S/subgraph_3/graph_hash.txt @@ -0,0 +1 @@ +b624ace49d286bb7132767299810ec40d3177bfc836e08f1f20bb12437c85436 \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-S/subgraph_3/graph_net.json b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-S/subgraph_3/graph_net.json new file mode 100644 index 000000000..6ce3cf9a5 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-S/subgraph_3/graph_net.json @@ -0,0 +1,6 @@ +{ + "framework": "paddle", + "model_name": "PP-YOLOE_plus_SOD-S", + "num_devices_required": 1, + "num_nodes_required": 1 +} \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-S/subgraph_3/input_meta.py b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-S/subgraph_3/input_meta.py new file mode 100644 index 000000000..d1545d186 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-S/subgraph_3/input_meta.py @@ -0,0 +1,138 @@ +class Program_weight_tensor_data_0: + name = "data_0" + shape = [1] + dtype = "float32" + data = [0.814948] + + +class Program_weight_tensor_data_1: + name = "data_1" + shape = [1] + dtype = "float32" + data = [0.685804] + + +class Program_weight_tensor_data_2: + name = "data_2" + shape = [1] + dtype = "float32" + data = [0.706408] + + +class Program_weight_tensor_data_3: + name = "data_3" + shape = [1] + dtype = "float32" + data = [0.690983] + + +class Program_weight_tensor_data_4: + name = "data_4" + shape = [1] + dtype = "float32" + data = [0.749639] + + +class Program_weight_tensor_data_5: + name = "data_5" + shape = [1] + dtype = "float32" + data = [0.8096] + + +class Program_weight_tensor_data_6: + name = "data_6" + shape = [512, 1536] + dtype = "float32" + min_val = float("-0.0470399") + max_val = float("0.0491953") + mean = float("-1.90385e-05") + std = float("0.024744") + data = None + + +class Program_weight_tensor_data_7: + name = "data_7" + shape = [1536] + dtype = "float32" + min_val = float("-0.00376428") + max_val = float("0.003968") + mean = float("-2.01216e-07") + std = float("0.000720534") + data = None + + +class Program_weight_tensor_data_8: + name = "data_8" + shape = [512, 1536] + dtype = "float32" + min_val = float("-0.0453472") + max_val = float("0.0447252") + mean = float("-1.88635e-05") + std = float("0.0247424") + data = None + + +class Program_weight_tensor_data_9: + name = "data_9" + shape = [1536] + dtype = "float32" + min_val = float("-0.00084092") + max_val = float("0.00111797") + mean = float("-5.53371e-06") + std = float("0.000260887") + data = None + + +class Program_weight_tensor_data_10: + name = "data_10" + shape = [512, 1536] + dtype = "float32" + min_val = float("-0.044496") + max_val = float("0.0437471") + mean = float("-1.82325e-05") + std = float("0.0247405") + data = None + + +class Program_weight_tensor_data_11: + name = "data_11" + shape = [1536] + dtype = "float32" + min_val = float("-0.000737891") + max_val = float("0.000798035") + mean = float("-9.86809e-07") + std = float("0.000181539") + data = None + + +class Program_weight_tensor_data_12: + name = "data_12" + shape = [512, 1536] + dtype = "float32" + min_val = float("-0.0439528") + max_val = float("0.0436547") + mean = float("-1.77278e-05") + std = float("0.0247394") + data = None + + +class Program_weight_tensor_data_13: + name = "data_13" + shape = [1536] + dtype = "float32" + min_val = float("-0.000791799") + max_val = float("0.000747675") + mean = float("-1.15815e-06") + std = float("0.000148195") + data = None + + +class Program_weight_tensor_data_14: + name = "data_14" + shape = [2, 3, 672, 672] + dtype = "float32" + max_val = float("0.995113") + mean = float("0.375674") + std = float("0.170865") + data = None diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-S/subgraph_3/model.py b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-S/subgraph_3/model.py new file mode 100644 index 000000000..3395e8d02 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-S/subgraph_3/model.py @@ -0,0 +1,5454 @@ +import paddle + + +class GraphModule(paddle.nn.Layer): + def __init__(self): + super().__init__() + + def forward( + self, + parameter_0, + parameter_1, + parameter_2, + parameter_3, + parameter_4, + parameter_5, + parameter_6, + parameter_7, + parameter_8, + parameter_9, + parameter_10, + parameter_11, + parameter_12, + parameter_13, + parameter_14, + parameter_15, + parameter_16, + parameter_17, + parameter_18, + parameter_19, + parameter_20, + parameter_21, + parameter_22, + parameter_23, + parameter_24, + parameter_25, + parameter_26, + parameter_27, + parameter_28, + parameter_29, + parameter_30, + parameter_31, + parameter_32, + parameter_33, + parameter_34, + parameter_35, + parameter_36, + parameter_37, + parameter_38, + parameter_39, + parameter_40, + parameter_41, + parameter_42, + parameter_43, + parameter_44, + parameter_45, + parameter_46, + parameter_47, + parameter_48, + parameter_49, + parameter_50, + parameter_51, + parameter_52, + parameter_53, + parameter_54, + parameter_55, + parameter_56, + parameter_57, + parameter_58, + parameter_59, + parameter_60, + parameter_61, + parameter_62, + parameter_63, + parameter_64, + parameter_65, + parameter_66, + parameter_67, + parameter_68, + parameter_69, + parameter_70, + parameter_71, + parameter_72, + parameter_73, + parameter_74, + parameter_75, + parameter_76, + parameter_77, + parameter_78, + parameter_79, + parameter_80, + parameter_81, + parameter_82, + parameter_83, + parameter_84, + parameter_85, + parameter_86, + parameter_87, + parameter_88, + parameter_89, + parameter_90, + parameter_91, + parameter_92, + parameter_93, + parameter_94, + parameter_95, + parameter_96, + parameter_97, + parameter_98, + parameter_99, + parameter_100, + parameter_101, + parameter_102, + parameter_103, + parameter_104, + parameter_105, + parameter_106, + parameter_107, + parameter_108, + parameter_109, + parameter_110, + parameter_111, + parameter_112, + parameter_113, + parameter_114, + parameter_115, + parameter_116, + parameter_117, + parameter_118, + parameter_119, + parameter_120, + parameter_121, + parameter_122, + parameter_123, + parameter_124, + parameter_125, + parameter_126, + parameter_127, + parameter_128, + parameter_129, + parameter_130, + parameter_131, + parameter_132, + parameter_133, + parameter_134, + parameter_135, + parameter_136, + parameter_137, + parameter_138, + parameter_139, + parameter_140, + parameter_141, + parameter_142, + parameter_143, + parameter_144, + parameter_145, + parameter_146, + parameter_147, + parameter_148, + parameter_149, + parameter_150, + parameter_151, + parameter_152, + parameter_153, + parameter_154, + parameter_155, + parameter_156, + parameter_157, + parameter_158, + parameter_159, + parameter_160, + parameter_161, + parameter_162, + parameter_163, + parameter_164, + parameter_165, + parameter_166, + parameter_167, + parameter_168, + parameter_169, + parameter_170, + parameter_171, + parameter_172, + parameter_173, + parameter_174, + parameter_175, + parameter_176, + parameter_177, + parameter_178, + parameter_179, + parameter_180, + parameter_181, + parameter_182, + parameter_183, + parameter_184, + parameter_185, + parameter_186, + parameter_187, + parameter_188, + parameter_189, + parameter_190, + parameter_191, + parameter_192, + parameter_193, + parameter_194, + parameter_195, + parameter_196, + parameter_197, + parameter_198, + parameter_199, + parameter_200, + parameter_201, + parameter_202, + parameter_203, + parameter_204, + parameter_205, + parameter_206, + parameter_207, + parameter_208, + parameter_209, + parameter_210, + parameter_211, + parameter_212, + parameter_213, + parameter_214, + parameter_215, + parameter_216, + parameter_217, + parameter_218, + parameter_219, + parameter_220, + parameter_221, + parameter_222, + parameter_223, + parameter_224, + parameter_225, + parameter_226, + parameter_227, + parameter_228, + parameter_229, + parameter_230, + parameter_231, + parameter_232, + parameter_233, + parameter_234, + parameter_235, + parameter_236, + parameter_237, + parameter_238, + parameter_239, + parameter_240, + parameter_241, + parameter_242, + parameter_243, + parameter_244, + parameter_245, + parameter_246, + parameter_247, + parameter_248, + parameter_249, + parameter_250, + parameter_251, + parameter_252, + parameter_253, + parameter_254, + parameter_255, + parameter_256, + parameter_257, + parameter_258, + parameter_259, + parameter_260, + parameter_261, + parameter_262, + parameter_263, + parameter_264, + parameter_265, + parameter_266, + parameter_267, + parameter_268, + parameter_269, + parameter_270, + parameter_271, + parameter_272, + parameter_273, + parameter_274, + parameter_275, + parameter_276, + parameter_277, + parameter_278, + parameter_279, + parameter_280, + parameter_281, + parameter_282, + parameter_283, + parameter_284, + parameter_285, + parameter_286, + parameter_287, + parameter_288, + parameter_289, + parameter_290, + parameter_291, + parameter_292, + parameter_293, + parameter_294, + parameter_295, + parameter_296, + parameter_297, + parameter_298, + parameter_299, + parameter_300, + parameter_301, + parameter_302, + parameter_303, + parameter_304, + parameter_305, + parameter_306, + parameter_307, + parameter_308, + parameter_309, + parameter_310, + parameter_311, + parameter_312, + parameter_313, + parameter_314, + parameter_315, + parameter_316, + parameter_317, + parameter_318, + parameter_319, + parameter_320, + parameter_321, + parameter_322, + parameter_323, + parameter_324, + parameter_325, + parameter_326, + parameter_327, + parameter_328, + parameter_329, + parameter_330, + parameter_331, + parameter_332, + parameter_333, + parameter_334, + parameter_335, + parameter_336, + parameter_337, + parameter_338, + parameter_339, + parameter_340, + parameter_341, + parameter_342, + parameter_343, + parameter_344, + parameter_345, + parameter_346, + parameter_347, + parameter_348, + parameter_349, + parameter_350, + parameter_351, + parameter_352, + parameter_353, + parameter_354, + parameter_355, + parameter_356, + parameter_357, + parameter_358, + parameter_359, + parameter_360, + parameter_361, + parameter_362, + parameter_363, + parameter_364, + parameter_365, + parameter_366, + parameter_367, + parameter_368, + parameter_369, + parameter_370, + parameter_371, + parameter_372, + parameter_373, + parameter_374, + parameter_375, + parameter_376, + parameter_377, + parameter_378, + parameter_379, + parameter_380, + parameter_381, + parameter_382, + parameter_383, + parameter_384, + parameter_385, + parameter_386, + parameter_387, + parameter_388, + parameter_389, + parameter_390, + parameter_391, + parameter_392, + parameter_393, + parameter_394, + parameter_395, + parameter_396, + parameter_397, + parameter_398, + parameter_399, + parameter_400, + parameter_401, + parameter_402, + parameter_403, + parameter_404, + parameter_405, + parameter_406, + parameter_407, + data_0, + data_1, + data_2, + data_3, + data_4, + data_5, + data_6, + data_7, + data_8, + data_9, + data_10, + data_11, + data_12, + data_13, + data_14, + ): + # pd_op.conv2d: (2x16x336x336xf32) <- (2x3x672x672xf32, 16x3x3x3xf32) + conv2d_0 = paddle._C_ops.conv2d( + data_14, parameter_407, [2, 2], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del data_14, parameter_407 + + # pd_op.batch_norm_: (2x16x336x336xf32, 16xf32, 16xf32, 16xf32, 16xf32, -1xui8) <- (2x16x336x336xf32, 16xf32, 16xf32, 16xf32, 16xf32) + ( + batch_norm__0, + batch_norm__1, + batch_norm__2, + batch_norm__3, + batch_norm__4, + batch_norm__5, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_0, + parameter_406, + parameter_405, + parameter_404, + parameter_403, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_403, parameter_404, parameter_405, parameter_406 + + # pd_op.swish: (2x16x336x336xf32) <- (2x16x336x336xf32) + swish_1 = paddle._C_ops.swish(batch_norm__0) + + # pd_op.conv2d: (2x16x336x336xf32) <- (2x16x336x336xf32, 16x16x3x3xf32) + conv2d_1 = paddle._C_ops.conv2d( + swish_1, parameter_402, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_402 + + # pd_op.batch_norm_: (2x16x336x336xf32, 16xf32, 16xf32, 16xf32, 16xf32, -1xui8) <- (2x16x336x336xf32, 16xf32, 16xf32, 16xf32, 16xf32) + ( + batch_norm__6, + batch_norm__7, + batch_norm__8, + batch_norm__9, + batch_norm__10, + batch_norm__11, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_1, + parameter_401, + parameter_400, + parameter_399, + parameter_398, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_398, parameter_399, parameter_400, parameter_401 + + # pd_op.swish: (2x16x336x336xf32) <- (2x16x336x336xf32) + swish_2 = paddle._C_ops.swish(batch_norm__6) + + # pd_op.conv2d: (2x32x336x336xf32) <- (2x16x336x336xf32, 32x16x3x3xf32) + conv2d_2 = paddle._C_ops.conv2d( + swish_2, parameter_397, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_397 + + # pd_op.batch_norm_: (2x32x336x336xf32, 32xf32, 32xf32, 32xf32, 32xf32, -1xui8) <- (2x32x336x336xf32, 32xf32, 32xf32, 32xf32, 32xf32) + ( + batch_norm__12, + batch_norm__13, + batch_norm__14, + batch_norm__15, + batch_norm__16, + batch_norm__17, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_2, + parameter_396, + parameter_395, + parameter_394, + parameter_393, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_393, parameter_394, parameter_395, parameter_396 + + # pd_op.swish: (2x32x336x336xf32) <- (2x32x336x336xf32) + swish_3 = paddle._C_ops.swish(batch_norm__12) + + # pd_op.conv2d: (2x48x168x168xf32) <- (2x32x336x336xf32, 48x32x3x3xf32) + conv2d_3 = paddle._C_ops.conv2d( + swish_3, parameter_392, [2, 2], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_392 + + # pd_op.batch_norm_: (2x48x168x168xf32, 48xf32, 48xf32, 48xf32, 48xf32, -1xui8) <- (2x48x168x168xf32, 48xf32, 48xf32, 48xf32, 48xf32) + ( + batch_norm__18, + batch_norm__19, + batch_norm__20, + batch_norm__21, + batch_norm__22, + batch_norm__23, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_3, + parameter_391, + parameter_390, + parameter_389, + parameter_388, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_388, parameter_389, parameter_390, parameter_391 + + # pd_op.swish: (2x48x168x168xf32) <- (2x48x168x168xf32) + swish_4 = paddle._C_ops.swish(batch_norm__18) + + # pd_op.conv2d: (2x24x168x168xf32) <- (2x48x168x168xf32, 24x48x1x1xf32) + conv2d_4 = paddle._C_ops.conv2d( + swish_4, parameter_387, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_387 + + # pd_op.batch_norm_: (2x24x168x168xf32, 24xf32, 24xf32, 24xf32, 24xf32, -1xui8) <- (2x24x168x168xf32, 24xf32, 24xf32, 24xf32, 24xf32) + ( + batch_norm__24, + batch_norm__25, + batch_norm__26, + batch_norm__27, + batch_norm__28, + batch_norm__29, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_4, + parameter_386, + parameter_385, + parameter_384, + parameter_383, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_383, parameter_384, parameter_385, parameter_386 + + # pd_op.swish: (2x24x168x168xf32) <- (2x24x168x168xf32) + swish_5 = paddle._C_ops.swish(batch_norm__24) + + # pd_op.conv2d: (2x24x168x168xf32) <- (2x48x168x168xf32, 24x48x1x1xf32) + conv2d_5 = paddle._C_ops.conv2d( + swish_4, parameter_382, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_382 + + # pd_op.batch_norm_: (2x24x168x168xf32, 24xf32, 24xf32, 24xf32, 24xf32, -1xui8) <- (2x24x168x168xf32, 24xf32, 24xf32, 24xf32, 24xf32) + ( + batch_norm__30, + batch_norm__31, + batch_norm__32, + batch_norm__33, + batch_norm__34, + batch_norm__35, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_5, + parameter_381, + parameter_380, + parameter_379, + parameter_378, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_378, parameter_379, parameter_380, parameter_381 + + # pd_op.swish: (2x24x168x168xf32) <- (2x24x168x168xf32) + swish_6 = paddle._C_ops.swish(batch_norm__30) + + # pd_op.conv2d: (2x24x168x168xf32) <- (2x24x168x168xf32, 24x24x3x3xf32) + conv2d_6 = paddle._C_ops.conv2d( + swish_6, parameter_377, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_377 + + # pd_op.batch_norm_: (2x24x168x168xf32, 24xf32, 24xf32, 24xf32, 24xf32, -1xui8) <- (2x24x168x168xf32, 24xf32, 24xf32, 24xf32, 24xf32) + ( + batch_norm__36, + batch_norm__37, + batch_norm__38, + batch_norm__39, + batch_norm__40, + batch_norm__41, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_6, + parameter_376, + parameter_375, + parameter_374, + parameter_373, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_373, parameter_374, parameter_375, parameter_376 + + # pd_op.swish: (2x24x168x168xf32) <- (2x24x168x168xf32) + swish_7 = paddle._C_ops.swish(batch_norm__36) + + # pd_op.conv2d: (2x24x168x168xf32) <- (2x24x168x168xf32, 24x24x3x3xf32) + conv2d_7 = paddle._C_ops.conv2d( + swish_7, parameter_372, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_372 + + # pd_op.batch_norm_: (2x24x168x168xf32, 24xf32, 24xf32, 24xf32, 24xf32, -1xui8) <- (2x24x168x168xf32, 24xf32, 24xf32, 24xf32, 24xf32) + ( + batch_norm__42, + batch_norm__43, + batch_norm__44, + batch_norm__45, + batch_norm__46, + batch_norm__47, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_7, + parameter_371, + parameter_370, + parameter_369, + parameter_368, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_368, parameter_369, parameter_370, parameter_371 + + # pd_op.conv2d: (2x24x168x168xf32) <- (2x24x168x168xf32, 24x24x1x1xf32) + conv2d_8 = paddle._C_ops.conv2d( + swish_7, parameter_367, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_367 + + # pd_op.batch_norm_: (2x24x168x168xf32, 24xf32, 24xf32, 24xf32, 24xf32, -1xui8) <- (2x24x168x168xf32, 24xf32, 24xf32, 24xf32, 24xf32) + ( + batch_norm__48, + batch_norm__49, + batch_norm__50, + batch_norm__51, + batch_norm__52, + batch_norm__53, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_8, + parameter_366, + parameter_365, + parameter_364, + parameter_363, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_363, parameter_364, parameter_365, parameter_366 + + # pd_op.multiply: (2x24x168x168xf32) <- (1xf32, 2x24x168x168xf32) + multiply_0 = paddle._C_ops.multiply(data_0, batch_norm__48) + del data_0 + + # pd_op.add: (2x24x168x168xf32) <- (2x24x168x168xf32, 2x24x168x168xf32) + add_0 = paddle._C_ops.add(batch_norm__42, multiply_0) + + # pd_op.swish: (2x24x168x168xf32) <- (2x24x168x168xf32) + swish_8 = paddle._C_ops.swish(add_0) + + # pd_op.add: (2x24x168x168xf32) <- (2x24x168x168xf32, 2x24x168x168xf32) + add_1 = paddle._C_ops.add(swish_6, swish_8) + + # pd_op.full: (1xi32) <- () + full_0 = paddle._C_ops.full( + [1], float("1"), paddle.int32, paddle.core.CPUPlace() + ) + + # pd_op.assign: (1xi32) <- (1xi32) + assign_0 = full_0 + + # pd_op.assign: (1xi32) <- (1xi32) + assign_1 = full_0 + + # pd_op.assign: (1xi32) <- (1xi32) + assign_2 = full_0 + + # pd_op.assign: (1xi32) <- (1xi32) + assign_3 = full_0 + + # pd_op.assign: (1xi32) <- (1xi32) + assign_4 = full_0 + + # pd_op.assign: (1xi32) <- (1xi32) + assign_5 = full_0 + + # pd_op.assign: (1xi32) <- (1xi32) + assign_6 = full_0 + + # pd_op.assign: (1xi32) <- (1xi32) + assign_7 = full_0 + + # pd_op.assign: (1xi32) <- (1xi32) + assign_8 = full_0 + + # pd_op.assign: (1xi32) <- (1xi32) + assign_9 = full_0 + + # pd_op.assign: (1xi32) <- (1xi32) + assign_10 = full_0 + + # pd_op.assign: (1xi32) <- (1xi32) + assign_11 = full_0 + + # pd_op.assign: (1xi32) <- (1xi32) + assign_12 = full_0 + + # builtin.combine: ([2x24x168x168xf32, 2x24x168x168xf32]) <- (2x24x168x168xf32, 2x24x168x168xf32) + combine_0 = [swish_5, add_1] + + # pd_op.concat: (2x48x168x168xf32) <- ([2x24x168x168xf32, 2x24x168x168xf32], 1xi32) + concat_0 = paddle._C_ops.concat(combine_0, full_0) + del combine_0 + + # pd_op.full_int_array: (2xi64) <- () + full_int_array_0 = [2, 3] + + # pd_op.assign: (2xi64) <- (2xi64) + assign_13 = full_int_array_0 + + # pd_op.assign: (2xi64) <- (2xi64) + assign_14 = full_int_array_0 + + # pd_op.assign: (2xi64) <- (2xi64) + assign_15 = full_int_array_0 + + # pd_op.mean: (2x48x1x1xf32) <- (2x48x168x168xf32, 2xi64) + mean_0 = paddle._C_ops.mean(concat_0, full_int_array_0, True) + + # pd_op.conv2d: (2x48x1x1xf32) <- (2x48x1x1xf32, 48x48x1x1xf32) + conv2d_9 = paddle._C_ops.conv2d( + mean_0, parameter_362, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_362 + + # pd_op.full_int_array: (4xi64) <- () + full_int_array_1 = [1, -1, 1, 1] + + # pd_op.reshape: (1x48x1x1xf32) <- (48xf32, 4xi64) + reshape_0 = paddle._C_ops.reshape(parameter_361, full_int_array_1) + del parameter_361 + + # pd_op.add: (2x48x1x1xf32) <- (2x48x1x1xf32, 1x48x1x1xf32) + add_2 = paddle._C_ops.add(conv2d_9, reshape_0) + + # pd_op.hardsigmoid: (2x48x1x1xf32) <- (2x48x1x1xf32) + hardsigmoid_0 = paddle._C_ops.hardsigmoid( + add_2, float("0.166667"), float("0.5") + ) + del add_2 + + # pd_op.multiply: (2x48x168x168xf32) <- (2x48x168x168xf32, 2x48x1x1xf32) + multiply_1 = paddle._C_ops.multiply(concat_0, hardsigmoid_0) + + # pd_op.conv2d: (2x64x168x168xf32) <- (2x48x168x168xf32, 64x48x1x1xf32) + conv2d_10 = paddle._C_ops.conv2d( + multiply_1, parameter_360, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_360 + + # pd_op.batch_norm_: (2x64x168x168xf32, 64xf32, 64xf32, 64xf32, 64xf32, -1xui8) <- (2x64x168x168xf32, 64xf32, 64xf32, 64xf32, 64xf32) + ( + batch_norm__54, + batch_norm__55, + batch_norm__56, + batch_norm__57, + batch_norm__58, + batch_norm__59, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_10, + parameter_359, + parameter_358, + parameter_357, + parameter_356, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_356, parameter_357, parameter_358, parameter_359 + + # pd_op.swish: (2x64x168x168xf32) <- (2x64x168x168xf32) + swish_9 = paddle._C_ops.swish(batch_norm__54) + + # pd_op.conv2d: (2x96x84x84xf32) <- (2x64x168x168xf32, 96x64x3x3xf32) + conv2d_11 = paddle._C_ops.conv2d( + swish_9, parameter_355, [2, 2], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_355 + + # pd_op.batch_norm_: (2x96x84x84xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x84x84xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__60, + batch_norm__61, + batch_norm__62, + batch_norm__63, + batch_norm__64, + batch_norm__65, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_11, + parameter_354, + parameter_353, + parameter_352, + parameter_351, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_351, parameter_352, parameter_353, parameter_354 + + # pd_op.swish: (2x96x84x84xf32) <- (2x96x84x84xf32) + swish_10 = paddle._C_ops.swish(batch_norm__60) + + # pd_op.conv2d: (2x48x84x84xf32) <- (2x96x84x84xf32, 48x96x1x1xf32) + conv2d_12 = paddle._C_ops.conv2d( + swish_10, parameter_350, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_350 + + # pd_op.batch_norm_: (2x48x84x84xf32, 48xf32, 48xf32, 48xf32, 48xf32, -1xui8) <- (2x48x84x84xf32, 48xf32, 48xf32, 48xf32, 48xf32) + ( + batch_norm__66, + batch_norm__67, + batch_norm__68, + batch_norm__69, + batch_norm__70, + batch_norm__71, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_12, + parameter_349, + parameter_348, + parameter_347, + parameter_346, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_346, parameter_347, parameter_348, parameter_349 + + # pd_op.swish: (2x48x84x84xf32) <- (2x48x84x84xf32) + swish_11 = paddle._C_ops.swish(batch_norm__66) + + # pd_op.conv2d: (2x48x84x84xf32) <- (2x96x84x84xf32, 48x96x1x1xf32) + conv2d_13 = paddle._C_ops.conv2d( + swish_10, parameter_345, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_345 + + # pd_op.batch_norm_: (2x48x84x84xf32, 48xf32, 48xf32, 48xf32, 48xf32, -1xui8) <- (2x48x84x84xf32, 48xf32, 48xf32, 48xf32, 48xf32) + ( + batch_norm__72, + batch_norm__73, + batch_norm__74, + batch_norm__75, + batch_norm__76, + batch_norm__77, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_13, + parameter_344, + parameter_343, + parameter_342, + parameter_341, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_341, parameter_342, parameter_343, parameter_344 + + # pd_op.swish: (2x48x84x84xf32) <- (2x48x84x84xf32) + swish_12 = paddle._C_ops.swish(batch_norm__72) + + # pd_op.conv2d: (2x48x84x84xf32) <- (2x48x84x84xf32, 48x48x3x3xf32) + conv2d_14 = paddle._C_ops.conv2d( + swish_12, parameter_340, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_340 + + # pd_op.batch_norm_: (2x48x84x84xf32, 48xf32, 48xf32, 48xf32, 48xf32, -1xui8) <- (2x48x84x84xf32, 48xf32, 48xf32, 48xf32, 48xf32) + ( + batch_norm__78, + batch_norm__79, + batch_norm__80, + batch_norm__81, + batch_norm__82, + batch_norm__83, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_14, + parameter_339, + parameter_338, + parameter_337, + parameter_336, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_336, parameter_337, parameter_338, parameter_339 + + # pd_op.swish: (2x48x84x84xf32) <- (2x48x84x84xf32) + swish_13 = paddle._C_ops.swish(batch_norm__78) + + # pd_op.conv2d: (2x48x84x84xf32) <- (2x48x84x84xf32, 48x48x3x3xf32) + conv2d_15 = paddle._C_ops.conv2d( + swish_13, parameter_335, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_335 + + # pd_op.batch_norm_: (2x48x84x84xf32, 48xf32, 48xf32, 48xf32, 48xf32, -1xui8) <- (2x48x84x84xf32, 48xf32, 48xf32, 48xf32, 48xf32) + ( + batch_norm__84, + batch_norm__85, + batch_norm__86, + batch_norm__87, + batch_norm__88, + batch_norm__89, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_15, + parameter_334, + parameter_333, + parameter_332, + parameter_331, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_331, parameter_332, parameter_333, parameter_334 + + # pd_op.conv2d: (2x48x84x84xf32) <- (2x48x84x84xf32, 48x48x1x1xf32) + conv2d_16 = paddle._C_ops.conv2d( + swish_13, parameter_330, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_330 + + # pd_op.batch_norm_: (2x48x84x84xf32, 48xf32, 48xf32, 48xf32, 48xf32, -1xui8) <- (2x48x84x84xf32, 48xf32, 48xf32, 48xf32, 48xf32) + ( + batch_norm__90, + batch_norm__91, + batch_norm__92, + batch_norm__93, + batch_norm__94, + batch_norm__95, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_16, + parameter_329, + parameter_328, + parameter_327, + parameter_326, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_326, parameter_327, parameter_328, parameter_329 + + # pd_op.multiply: (2x48x84x84xf32) <- (1xf32, 2x48x84x84xf32) + multiply_2 = paddle._C_ops.multiply(data_1, batch_norm__90) + del data_1 + + # pd_op.add: (2x48x84x84xf32) <- (2x48x84x84xf32, 2x48x84x84xf32) + add_3 = paddle._C_ops.add(batch_norm__84, multiply_2) + + # pd_op.swish: (2x48x84x84xf32) <- (2x48x84x84xf32) + swish_14 = paddle._C_ops.swish(add_3) + + # pd_op.add: (2x48x84x84xf32) <- (2x48x84x84xf32, 2x48x84x84xf32) + add_4 = paddle._C_ops.add(swish_12, swish_14) + + # pd_op.conv2d: (2x48x84x84xf32) <- (2x48x84x84xf32, 48x48x3x3xf32) + conv2d_17 = paddle._C_ops.conv2d( + add_4, parameter_325, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_325 + + # pd_op.batch_norm_: (2x48x84x84xf32, 48xf32, 48xf32, 48xf32, 48xf32, -1xui8) <- (2x48x84x84xf32, 48xf32, 48xf32, 48xf32, 48xf32) + ( + batch_norm__96, + batch_norm__97, + batch_norm__98, + batch_norm__99, + batch_norm__100, + batch_norm__101, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_17, + parameter_324, + parameter_323, + parameter_322, + parameter_321, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_321, parameter_322, parameter_323, parameter_324 + + # pd_op.swish: (2x48x84x84xf32) <- (2x48x84x84xf32) + swish_15 = paddle._C_ops.swish(batch_norm__96) + + # pd_op.conv2d: (2x48x84x84xf32) <- (2x48x84x84xf32, 48x48x3x3xf32) + conv2d_18 = paddle._C_ops.conv2d( + swish_15, parameter_320, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_320 + + # pd_op.batch_norm_: (2x48x84x84xf32, 48xf32, 48xf32, 48xf32, 48xf32, -1xui8) <- (2x48x84x84xf32, 48xf32, 48xf32, 48xf32, 48xf32) + ( + batch_norm__102, + batch_norm__103, + batch_norm__104, + batch_norm__105, + batch_norm__106, + batch_norm__107, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_18, + parameter_319, + parameter_318, + parameter_317, + parameter_316, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_316, parameter_317, parameter_318, parameter_319 + + # pd_op.conv2d: (2x48x84x84xf32) <- (2x48x84x84xf32, 48x48x1x1xf32) + conv2d_19 = paddle._C_ops.conv2d( + swish_15, parameter_315, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_315 + + # pd_op.batch_norm_: (2x48x84x84xf32, 48xf32, 48xf32, 48xf32, 48xf32, -1xui8) <- (2x48x84x84xf32, 48xf32, 48xf32, 48xf32, 48xf32) + ( + batch_norm__108, + batch_norm__109, + batch_norm__110, + batch_norm__111, + batch_norm__112, + batch_norm__113, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_19, + parameter_314, + parameter_313, + parameter_312, + parameter_311, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_311, parameter_312, parameter_313, parameter_314 + + # pd_op.multiply: (2x48x84x84xf32) <- (1xf32, 2x48x84x84xf32) + multiply_3 = paddle._C_ops.multiply(data_2, batch_norm__108) + del data_2 + + # pd_op.add: (2x48x84x84xf32) <- (2x48x84x84xf32, 2x48x84x84xf32) + add_5 = paddle._C_ops.add(batch_norm__102, multiply_3) + + # pd_op.swish: (2x48x84x84xf32) <- (2x48x84x84xf32) + swish_16 = paddle._C_ops.swish(add_5) + + # pd_op.add: (2x48x84x84xf32) <- (2x48x84x84xf32, 2x48x84x84xf32) + add_6 = paddle._C_ops.add(add_4, swish_16) + + # builtin.combine: ([2x48x84x84xf32, 2x48x84x84xf32]) <- (2x48x84x84xf32, 2x48x84x84xf32) + combine_1 = [swish_11, add_6] + + # pd_op.concat: (2x96x84x84xf32) <- ([2x48x84x84xf32, 2x48x84x84xf32], 1xi32) + concat_1 = paddle._C_ops.concat(combine_1, full_0) + del combine_1 + + # pd_op.mean: (2x96x1x1xf32) <- (2x96x84x84xf32, 2xi64) + mean_1 = paddle._C_ops.mean(concat_1, full_int_array_0, True) + + # pd_op.conv2d: (2x96x1x1xf32) <- (2x96x1x1xf32, 96x96x1x1xf32) + conv2d_20 = paddle._C_ops.conv2d( + mean_1, parameter_310, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_310 + + # pd_op.reshape: (1x96x1x1xf32) <- (96xf32, 4xi64) + reshape_1 = paddle._C_ops.reshape(parameter_309, full_int_array_1) + del parameter_309 + + # pd_op.add: (2x96x1x1xf32) <- (2x96x1x1xf32, 1x96x1x1xf32) + add_7 = paddle._C_ops.add(conv2d_20, reshape_1) + + # pd_op.hardsigmoid: (2x96x1x1xf32) <- (2x96x1x1xf32) + hardsigmoid_1 = paddle._C_ops.hardsigmoid( + add_7, float("0.166667"), float("0.5") + ) + del add_7 + + # pd_op.multiply: (2x96x84x84xf32) <- (2x96x84x84xf32, 2x96x1x1xf32) + multiply_4 = paddle._C_ops.multiply(concat_1, hardsigmoid_1) + + # pd_op.conv2d: (2x128x84x84xf32) <- (2x96x84x84xf32, 128x96x1x1xf32) + conv2d_21 = paddle._C_ops.conv2d( + multiply_4, parameter_308, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_308 + + # pd_op.batch_norm_: (2x128x84x84xf32, 128xf32, 128xf32, 128xf32, 128xf32, -1xui8) <- (2x128x84x84xf32, 128xf32, 128xf32, 128xf32, 128xf32) + ( + batch_norm__114, + batch_norm__115, + batch_norm__116, + batch_norm__117, + batch_norm__118, + batch_norm__119, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_21, + parameter_307, + parameter_306, + parameter_305, + parameter_304, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_304, parameter_305, parameter_306, parameter_307 + + # pd_op.swish: (2x128x84x84xf32) <- (2x128x84x84xf32) + swish_17 = paddle._C_ops.swish(batch_norm__114) + + # pd_op.conv2d: (2x192x42x42xf32) <- (2x128x84x84xf32, 192x128x3x3xf32) + conv2d_22 = paddle._C_ops.conv2d( + swish_17, parameter_303, [2, 2], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_303 + + # pd_op.batch_norm_: (2x192x42x42xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x42x42xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__120, + batch_norm__121, + batch_norm__122, + batch_norm__123, + batch_norm__124, + batch_norm__125, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_22, + parameter_302, + parameter_301, + parameter_300, + parameter_299, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_299, parameter_300, parameter_301, parameter_302 + + # pd_op.swish: (2x192x42x42xf32) <- (2x192x42x42xf32) + swish_18 = paddle._C_ops.swish(batch_norm__120) + + # pd_op.conv2d: (2x96x42x42xf32) <- (2x192x42x42xf32, 96x192x1x1xf32) + conv2d_23 = paddle._C_ops.conv2d( + swish_18, parameter_298, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_298 + + # pd_op.batch_norm_: (2x96x42x42xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x42x42xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__126, + batch_norm__127, + batch_norm__128, + batch_norm__129, + batch_norm__130, + batch_norm__131, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_23, + parameter_297, + parameter_296, + parameter_295, + parameter_294, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_294, parameter_295, parameter_296, parameter_297 + + # pd_op.swish: (2x96x42x42xf32) <- (2x96x42x42xf32) + swish_19 = paddle._C_ops.swish(batch_norm__126) + + # pd_op.conv2d: (2x96x42x42xf32) <- (2x192x42x42xf32, 96x192x1x1xf32) + conv2d_24 = paddle._C_ops.conv2d( + swish_18, parameter_293, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_293 + + # pd_op.batch_norm_: (2x96x42x42xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x42x42xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__132, + batch_norm__133, + batch_norm__134, + batch_norm__135, + batch_norm__136, + batch_norm__137, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_24, + parameter_292, + parameter_291, + parameter_290, + parameter_289, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_289, parameter_290, parameter_291, parameter_292 + + # pd_op.swish: (2x96x42x42xf32) <- (2x96x42x42xf32) + swish_20 = paddle._C_ops.swish(batch_norm__132) + + # pd_op.conv2d: (2x96x42x42xf32) <- (2x96x42x42xf32, 96x96x3x3xf32) + conv2d_25 = paddle._C_ops.conv2d( + swish_20, parameter_288, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_288 + + # pd_op.batch_norm_: (2x96x42x42xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x42x42xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__138, + batch_norm__139, + batch_norm__140, + batch_norm__141, + batch_norm__142, + batch_norm__143, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_25, + parameter_287, + parameter_286, + parameter_285, + parameter_284, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_284, parameter_285, parameter_286, parameter_287 + + # pd_op.swish: (2x96x42x42xf32) <- (2x96x42x42xf32) + swish_21 = paddle._C_ops.swish(batch_norm__138) + + # pd_op.conv2d: (2x96x42x42xf32) <- (2x96x42x42xf32, 96x96x3x3xf32) + conv2d_26 = paddle._C_ops.conv2d( + swish_21, parameter_283, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_283 + + # pd_op.batch_norm_: (2x96x42x42xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x42x42xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__144, + batch_norm__145, + batch_norm__146, + batch_norm__147, + batch_norm__148, + batch_norm__149, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_26, + parameter_282, + parameter_281, + parameter_280, + parameter_279, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_279, parameter_280, parameter_281, parameter_282 + + # pd_op.conv2d: (2x96x42x42xf32) <- (2x96x42x42xf32, 96x96x1x1xf32) + conv2d_27 = paddle._C_ops.conv2d( + swish_21, parameter_278, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_278 + + # pd_op.batch_norm_: (2x96x42x42xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x42x42xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__150, + batch_norm__151, + batch_norm__152, + batch_norm__153, + batch_norm__154, + batch_norm__155, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_27, + parameter_277, + parameter_276, + parameter_275, + parameter_274, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_274, parameter_275, parameter_276, parameter_277 + + # pd_op.multiply: (2x96x42x42xf32) <- (1xf32, 2x96x42x42xf32) + multiply_5 = paddle._C_ops.multiply(data_3, batch_norm__150) + del data_3 + + # pd_op.add: (2x96x42x42xf32) <- (2x96x42x42xf32, 2x96x42x42xf32) + add_8 = paddle._C_ops.add(batch_norm__144, multiply_5) + + # pd_op.swish: (2x96x42x42xf32) <- (2x96x42x42xf32) + swish_22 = paddle._C_ops.swish(add_8) + + # pd_op.add: (2x96x42x42xf32) <- (2x96x42x42xf32, 2x96x42x42xf32) + add_9 = paddle._C_ops.add(swish_20, swish_22) + + # pd_op.conv2d: (2x96x42x42xf32) <- (2x96x42x42xf32, 96x96x3x3xf32) + conv2d_28 = paddle._C_ops.conv2d( + add_9, parameter_273, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_273 + + # pd_op.batch_norm_: (2x96x42x42xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x42x42xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__156, + batch_norm__157, + batch_norm__158, + batch_norm__159, + batch_norm__160, + batch_norm__161, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_28, + parameter_272, + parameter_271, + parameter_270, + parameter_269, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_269, parameter_270, parameter_271, parameter_272 + + # pd_op.swish: (2x96x42x42xf32) <- (2x96x42x42xf32) + swish_23 = paddle._C_ops.swish(batch_norm__156) + + # pd_op.conv2d: (2x96x42x42xf32) <- (2x96x42x42xf32, 96x96x3x3xf32) + conv2d_29 = paddle._C_ops.conv2d( + swish_23, parameter_268, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_268 + + # pd_op.batch_norm_: (2x96x42x42xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x42x42xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__162, + batch_norm__163, + batch_norm__164, + batch_norm__165, + batch_norm__166, + batch_norm__167, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_29, + parameter_267, + parameter_266, + parameter_265, + parameter_264, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_264, parameter_265, parameter_266, parameter_267 + + # pd_op.conv2d: (2x96x42x42xf32) <- (2x96x42x42xf32, 96x96x1x1xf32) + conv2d_30 = paddle._C_ops.conv2d( + swish_23, parameter_263, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_263 + + # pd_op.batch_norm_: (2x96x42x42xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x42x42xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__168, + batch_norm__169, + batch_norm__170, + batch_norm__171, + batch_norm__172, + batch_norm__173, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_30, + parameter_262, + parameter_261, + parameter_260, + parameter_259, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_259, parameter_260, parameter_261, parameter_262 + + # pd_op.multiply: (2x96x42x42xf32) <- (1xf32, 2x96x42x42xf32) + multiply_6 = paddle._C_ops.multiply(data_4, batch_norm__168) + del data_4 + + # pd_op.add: (2x96x42x42xf32) <- (2x96x42x42xf32, 2x96x42x42xf32) + add_10 = paddle._C_ops.add(batch_norm__162, multiply_6) + + # pd_op.swish: (2x96x42x42xf32) <- (2x96x42x42xf32) + swish_24 = paddle._C_ops.swish(add_10) + + # pd_op.add: (2x96x42x42xf32) <- (2x96x42x42xf32, 2x96x42x42xf32) + add_11 = paddle._C_ops.add(add_9, swish_24) + + # builtin.combine: ([2x96x42x42xf32, 2x96x42x42xf32]) <- (2x96x42x42xf32, 2x96x42x42xf32) + combine_2 = [swish_19, add_11] + + # pd_op.concat: (2x192x42x42xf32) <- ([2x96x42x42xf32, 2x96x42x42xf32], 1xi32) + concat_2 = paddle._C_ops.concat(combine_2, full_0) + del combine_2 + + # pd_op.mean: (2x192x1x1xf32) <- (2x192x42x42xf32, 2xi64) + mean_2 = paddle._C_ops.mean(concat_2, full_int_array_0, True) + + # pd_op.conv2d: (2x192x1x1xf32) <- (2x192x1x1xf32, 192x192x1x1xf32) + conv2d_31 = paddle._C_ops.conv2d( + mean_2, parameter_258, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_258 + + # pd_op.reshape: (1x192x1x1xf32) <- (192xf32, 4xi64) + reshape_2 = paddle._C_ops.reshape(parameter_257, full_int_array_1) + del parameter_257 + + # pd_op.add: (2x192x1x1xf32) <- (2x192x1x1xf32, 1x192x1x1xf32) + add_12 = paddle._C_ops.add(conv2d_31, reshape_2) + + # pd_op.hardsigmoid: (2x192x1x1xf32) <- (2x192x1x1xf32) + hardsigmoid_2 = paddle._C_ops.hardsigmoid( + add_12, float("0.166667"), float("0.5") + ) + del add_12 + + # pd_op.multiply: (2x192x42x42xf32) <- (2x192x42x42xf32, 2x192x1x1xf32) + multiply_7 = paddle._C_ops.multiply(concat_2, hardsigmoid_2) + + # pd_op.conv2d: (2x256x42x42xf32) <- (2x192x42x42xf32, 256x192x1x1xf32) + conv2d_32 = paddle._C_ops.conv2d( + multiply_7, parameter_256, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_256 + + # pd_op.batch_norm_: (2x256x42x42xf32, 256xf32, 256xf32, 256xf32, 256xf32, -1xui8) <- (2x256x42x42xf32, 256xf32, 256xf32, 256xf32, 256xf32) + ( + batch_norm__174, + batch_norm__175, + batch_norm__176, + batch_norm__177, + batch_norm__178, + batch_norm__179, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_32, + parameter_255, + parameter_254, + parameter_253, + parameter_252, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_252, parameter_253, parameter_254, parameter_255 + + # pd_op.swish: (2x256x42x42xf32) <- (2x256x42x42xf32) + swish_25 = paddle._C_ops.swish(batch_norm__174) + + # pd_op.conv2d: (2x384x21x21xf32) <- (2x256x42x42xf32, 384x256x3x3xf32) + conv2d_33 = paddle._C_ops.conv2d( + swish_25, parameter_251, [2, 2], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_251 + + # pd_op.batch_norm_: (2x384x21x21xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x21x21xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__180, + batch_norm__181, + batch_norm__182, + batch_norm__183, + batch_norm__184, + batch_norm__185, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_33, + parameter_250, + parameter_249, + parameter_248, + parameter_247, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_247, parameter_248, parameter_249, parameter_250 + + # pd_op.swish: (2x384x21x21xf32) <- (2x384x21x21xf32) + swish_26 = paddle._C_ops.swish(batch_norm__180) + + # pd_op.conv2d: (2x192x21x21xf32) <- (2x384x21x21xf32, 192x384x1x1xf32) + conv2d_34 = paddle._C_ops.conv2d( + swish_26, parameter_246, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_246 + + # pd_op.batch_norm_: (2x192x21x21xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x21x21xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__186, + batch_norm__187, + batch_norm__188, + batch_norm__189, + batch_norm__190, + batch_norm__191, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_34, + parameter_245, + parameter_244, + parameter_243, + parameter_242, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_242, parameter_243, parameter_244, parameter_245 + + # pd_op.swish: (2x192x21x21xf32) <- (2x192x21x21xf32) + swish_27 = paddle._C_ops.swish(batch_norm__186) + + # pd_op.conv2d: (2x192x21x21xf32) <- (2x384x21x21xf32, 192x384x1x1xf32) + conv2d_35 = paddle._C_ops.conv2d( + swish_26, parameter_241, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_241 + + # pd_op.batch_norm_: (2x192x21x21xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x21x21xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__192, + batch_norm__193, + batch_norm__194, + batch_norm__195, + batch_norm__196, + batch_norm__197, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_35, + parameter_240, + parameter_239, + parameter_238, + parameter_237, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_237, parameter_238, parameter_239, parameter_240 + + # pd_op.swish: (2x192x21x21xf32) <- (2x192x21x21xf32) + swish_28 = paddle._C_ops.swish(batch_norm__192) + + # pd_op.conv2d: (2x192x21x21xf32) <- (2x192x21x21xf32, 192x192x3x3xf32) + conv2d_36 = paddle._C_ops.conv2d( + swish_28, parameter_236, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_236 + + # pd_op.batch_norm_: (2x192x21x21xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x21x21xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__198, + batch_norm__199, + batch_norm__200, + batch_norm__201, + batch_norm__202, + batch_norm__203, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_36, + parameter_235, + parameter_234, + parameter_233, + parameter_232, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_232, parameter_233, parameter_234, parameter_235 + + # pd_op.swish: (2x192x21x21xf32) <- (2x192x21x21xf32) + swish_29 = paddle._C_ops.swish(batch_norm__198) + + # pd_op.conv2d: (2x192x21x21xf32) <- (2x192x21x21xf32, 192x192x3x3xf32) + conv2d_37 = paddle._C_ops.conv2d( + swish_29, parameter_231, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_231 + + # pd_op.batch_norm_: (2x192x21x21xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x21x21xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__204, + batch_norm__205, + batch_norm__206, + batch_norm__207, + batch_norm__208, + batch_norm__209, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_37, + parameter_230, + parameter_229, + parameter_228, + parameter_227, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_227, parameter_228, parameter_229, parameter_230 + + # pd_op.conv2d: (2x192x21x21xf32) <- (2x192x21x21xf32, 192x192x1x1xf32) + conv2d_38 = paddle._C_ops.conv2d( + swish_29, parameter_226, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_226 + + # pd_op.batch_norm_: (2x192x21x21xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x21x21xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__210, + batch_norm__211, + batch_norm__212, + batch_norm__213, + batch_norm__214, + batch_norm__215, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_38, + parameter_225, + parameter_224, + parameter_223, + parameter_222, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_222, parameter_223, parameter_224, parameter_225 + + # pd_op.multiply: (2x192x21x21xf32) <- (1xf32, 2x192x21x21xf32) + multiply_8 = paddle._C_ops.multiply(data_5, batch_norm__210) + del data_5 + + # pd_op.add: (2x192x21x21xf32) <- (2x192x21x21xf32, 2x192x21x21xf32) + add_13 = paddle._C_ops.add(batch_norm__204, multiply_8) + + # pd_op.swish: (2x192x21x21xf32) <- (2x192x21x21xf32) + swish_30 = paddle._C_ops.swish(add_13) + + # pd_op.add: (2x192x21x21xf32) <- (2x192x21x21xf32, 2x192x21x21xf32) + add_14 = paddle._C_ops.add(swish_28, swish_30) + + # builtin.combine: ([2x192x21x21xf32, 2x192x21x21xf32]) <- (2x192x21x21xf32, 2x192x21x21xf32) + combine_3 = [swish_27, add_14] + + # pd_op.concat: (2x384x21x21xf32) <- ([2x192x21x21xf32, 2x192x21x21xf32], 1xi32) + concat_3 = paddle._C_ops.concat(combine_3, full_0) + del combine_3 + + # pd_op.mean: (2x384x1x1xf32) <- (2x384x21x21xf32, 2xi64) + mean_3 = paddle._C_ops.mean(concat_3, full_int_array_0, True) + + # pd_op.conv2d: (2x384x1x1xf32) <- (2x384x1x1xf32, 384x384x1x1xf32) + conv2d_39 = paddle._C_ops.conv2d( + mean_3, parameter_221, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_221 + + # pd_op.reshape: (1x384x1x1xf32) <- (384xf32, 4xi64) + reshape_3 = paddle._C_ops.reshape(parameter_220, full_int_array_1) + del full_int_array_1, parameter_220 + + # pd_op.add: (2x384x1x1xf32) <- (2x384x1x1xf32, 1x384x1x1xf32) + add_15 = paddle._C_ops.add(conv2d_39, reshape_3) + + # pd_op.hardsigmoid: (2x384x1x1xf32) <- (2x384x1x1xf32) + hardsigmoid_3 = paddle._C_ops.hardsigmoid( + add_15, float("0.166667"), float("0.5") + ) + del add_15 + + # pd_op.multiply: (2x384x21x21xf32) <- (2x384x21x21xf32, 2x384x1x1xf32) + multiply_9 = paddle._C_ops.multiply(concat_3, hardsigmoid_3) + + # pd_op.conv2d: (2x512x21x21xf32) <- (2x384x21x21xf32, 512x384x1x1xf32) + conv2d_40 = paddle._C_ops.conv2d( + multiply_9, parameter_219, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_219 + + # pd_op.batch_norm_: (2x512x21x21xf32, 512xf32, 512xf32, 512xf32, 512xf32, -1xui8) <- (2x512x21x21xf32, 512xf32, 512xf32, 512xf32, 512xf32) + ( + batch_norm__216, + batch_norm__217, + batch_norm__218, + batch_norm__219, + batch_norm__220, + batch_norm__221, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_40, + parameter_218, + parameter_217, + parameter_216, + parameter_215, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_215, parameter_216, parameter_217, parameter_218 + + # pd_op.swish: (2x512x21x21xf32) <- (2x512x21x21xf32) + swish_31 = paddle._C_ops.swish(batch_norm__216) + + # pd_op.flatten: (2x512x441xf32) <- (2x512x21x21xf32) + flatten_0 = paddle._C_ops.flatten(swish_31, 2, 3) + + # pd_op.transpose: (2x441x512xf32) <- (2x512x441xf32) + transpose_0 = paddle._C_ops.transpose(flatten_0, [0, 2, 1]) + del flatten_0 + + # pd_op.full: (1xf64) <- () + full_1 = paddle._C_ops.full( + [1], float("0"), paddle.float64, paddle.core.CPUPlace() + ) + + # pd_op.full: (1xf64) <- () + full_2 = paddle._C_ops.full( + [1], float("21"), paddle.float64, paddle.core.CPUPlace() + ) + + # pd_op.full: (1xf64) <- () + full_3 = paddle._C_ops.full( + [1], float("1"), paddle.float64, paddle.core.CPUPlace() + ) + + # pd_op.arange: (21xf32) <- (1xf64, 1xf64, 1xf64) + arange_0 = paddle.arange(full_1, full_2, full_3, dtype="float32") + del full_2 + + # builtin.combine: ([21xf32, 21xf32]) <- (21xf32, 21xf32) + combine_4 = [arange_0, arange_0] + del arange_0 + + # pd_op.meshgrid: ([21x21xf32, 21x21xf32]) <- ([21xf32, 21xf32]) + meshgrid_0 = paddle._C_ops.meshgrid(combine_4) + del combine_4 + + # builtin.split: (21x21xf32, 21x21xf32) <- ([21x21xf32, 21x21xf32]) + ( + split_0, + split_1, + ) = meshgrid_0 + del meshgrid_0 + + # pd_op.full: (1xf64) <- () + full_4 = paddle._C_ops.full( + [1], float("128"), paddle.float64, paddle.core.CPUPlace() + ) + + # pd_op.arange: (128xf32) <- (1xf64, 1xf64, 1xf64) + arange_1 = paddle.arange(full_1, full_4, full_3, dtype="float32") + del full_1, full_3, full_4 + + # pd_op.full: (1xf32) <- () + full_5 = paddle._C_ops.full( + [1], float("0.0078125"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.scale: (128xf32) <- (128xf32, 1xf32) + scale_0 = paddle._C_ops.scale(arange_1, full_5, float("0"), True) + del arange_1, full_5 + + # pd_op.full: (128xf32) <- () + full_6 = paddle._C_ops.full( + [128], + float("10000"), + paddle.float32, + paddle.framework._current_expected_place(), + ) + + # pd_op.elementwise_pow: (128xf32) <- (128xf32, 128xf32) + elementwise_pow_0 = paddle._C_ops.elementwise_pow(full_6, scale_0) + del full_6, scale_0 + + # pd_op.full: (128xf32) <- () + full_7 = paddle._C_ops.full( + [128], + float("1"), + paddle.float32, + paddle.framework._current_expected_place(), + ) + + # pd_op.divide: (128xf32) <- (128xf32, 128xf32) + divide_0 = paddle._C_ops.divide(full_7, elementwise_pow_0) + del elementwise_pow_0, full_7 + + # pd_op.flatten: (441xf32) <- (21x21xf32) + flatten_1 = paddle._C_ops.flatten(split_0, 0, 1) + del split_0 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_2 = [1] + + # pd_op.unsqueeze: (441x1xf32) <- (441xf32, 1xi64) + unsqueeze_0 = paddle._C_ops.unsqueeze(flatten_1, full_int_array_2) + del flatten_1 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_3 = [0] + + # pd_op.assign: (1xi64) <- (1xi64) + assign_16 = full_int_array_3 + + # pd_op.assign: (1xi64) <- (1xi64) + assign_17 = full_int_array_3 + + # pd_op.assign: (1xi64) <- (1xi64) + assign_18 = full_int_array_3 + + # pd_op.assign: (1xi64) <- (1xi64) + assign_19 = full_int_array_3 + + # pd_op.assign: (1xi64) <- (1xi64) + assign_20 = full_int_array_3 + + # pd_op.assign: (1xi64) <- (1xi64) + assign_21 = full_int_array_3 + + # pd_op.assign: (1xi64) <- (1xi64) + assign_22 = full_int_array_3 + + # pd_op.assign: (1xi64) <- (1xi64) + assign_23 = full_int_array_3 + + # pd_op.unsqueeze: (1x128xf32) <- (128xf32, 1xi64) + unsqueeze_1 = paddle._C_ops.unsqueeze(divide_0, full_int_array_3) + del divide_0 + + # pd_op.matmul: (441x128xf32) <- (441x1xf32, 1x128xf32) + matmul_0 = paddle._C_ops.matmul(unsqueeze_0, unsqueeze_1, False, False) + del unsqueeze_0 + + # pd_op.flatten: (441xf32) <- (21x21xf32) + flatten_2 = paddle._C_ops.flatten(split_1, 0, 1) + del split_1 + + # pd_op.unsqueeze: (441x1xf32) <- (441xf32, 1xi64) + unsqueeze_2 = paddle._C_ops.unsqueeze(flatten_2, full_int_array_2) + del flatten_2, full_int_array_2 + + # pd_op.matmul: (441x128xf32) <- (441x1xf32, 1x128xf32) + matmul_1 = paddle._C_ops.matmul(unsqueeze_2, unsqueeze_1, False, False) + del unsqueeze_1, unsqueeze_2 + + # pd_op.sin: (441x128xf32) <- (441x128xf32) + sin_0 = paddle._C_ops.sin(matmul_0) + + # pd_op.cos: (441x128xf32) <- (441x128xf32) + cos_0 = paddle._C_ops.cos(matmul_0) + del matmul_0 + + # pd_op.sin: (441x128xf32) <- (441x128xf32) + sin_1 = paddle._C_ops.sin(matmul_1) + + # pd_op.cos: (441x128xf32) <- (441x128xf32) + cos_1 = paddle._C_ops.cos(matmul_1) + del matmul_1 + + # builtin.combine: ([441x128xf32, 441x128xf32, 441x128xf32, 441x128xf32]) <- (441x128xf32, 441x128xf32, 441x128xf32, 441x128xf32) + combine_5 = [sin_0, cos_0, sin_1, cos_1] + del cos_0, cos_1, sin_0, sin_1 + + # pd_op.concat: (441x512xf32) <- ([441x128xf32, 441x128xf32, 441x128xf32, 441x128xf32], 1xi32) + concat_4 = paddle._C_ops.concat(combine_5, full_0) + del combine_5 + + # pd_op.unsqueeze: (1x441x512xf32) <- (441x512xf32, 1xi64) + unsqueeze_3 = paddle._C_ops.unsqueeze(concat_4, full_int_array_3) + del concat_4 + + # pd_op.add: (2x441x512xf32) <- (2x441x512xf32, 1x441x512xf32) + add_16 = paddle._C_ops.add(transpose_0, unsqueeze_3) + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_4 = [512] + + # pd_op.assign: (1xi64) <- (1xi64) + assign_24 = full_int_array_4 + + # pd_op.assign: (1xi64) <- (1xi64) + assign_25 = full_int_array_4 + + # pd_op.assign: (1xi64) <- (1xi64) + assign_26 = full_int_array_4 + + # pd_op.assign: (1xi64) <- (1xi64) + assign_27 = full_int_array_4 + + # pd_op.assign: (1xi64) <- (1xi64) + assign_28 = full_int_array_4 + + # pd_op.assign: (1xi64) <- (1xi64) + assign_29 = full_int_array_4 + + # pd_op.assign: (1xi64) <- (1xi64) + assign_30 = full_int_array_4 + + # pd_op.assign: (1xi64) <- (1xi64) + assign_31 = full_int_array_4 + + # pd_op.assign: (1xi64) <- (1xi64) + assign_32 = full_int_array_4 + + # pd_op.assign: (1xi64) <- (1xi64) + assign_33 = full_int_array_4 + + # pd_op.assign: (1xi64) <- (1xi64) + assign_34 = full_int_array_4 + + # pd_op.assign: (1xi64) <- (1xi64) + assign_35 = full_int_array_4 + + # pd_op.assign: (1xi64) <- (1xi64) + assign_36 = full_int_array_4 + + # pd_op.assign: (1xi64) <- (1xi64) + assign_37 = full_int_array_4 + + # pd_op.assign: (1xi64) <- (1xi64) + assign_38 = full_int_array_4 + + # pd_op.slice: (512x512xf32) <- (512x1536xf32, 1xi64, 1xi64) + slice_0 = paddle._C_ops.slice( + data_6, [1], full_int_array_3, full_int_array_4, [1], [] + ) + + # pd_op.slice: (512xf32) <- (1536xf32, 1xi64, 1xi64) + slice_1 = paddle._C_ops.slice( + data_7, [0], full_int_array_3, full_int_array_4, [1], [] + ) + + # pd_op.matmul: (2x441x512xf32) <- (2x441x512xf32, 512x512xf32) + matmul_2 = paddle._C_ops.matmul(add_16, slice_0, False, False) + + # pd_op.add: (2x441x512xf32) <- (2x441x512xf32, 512xf32) + add_17 = paddle._C_ops.add(matmul_2, slice_1) + + # pd_op.full_int_array: (4xi64) <- () + full_int_array_5 = [0, 0, 4, 128] + + # pd_op.reshape: (2x441x4x128xf32) <- (2x441x512xf32, 4xi64) + reshape_4 = paddle._C_ops.reshape(add_17, full_int_array_5) + + # pd_op.transpose: (2x4x441x128xf32) <- (2x441x4x128xf32) + transpose_1 = paddle._C_ops.transpose(reshape_4, [0, 2, 1, 3]) + del reshape_4 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_6 = [1024] + + # pd_op.assign: (1xi64) <- (1xi64) + assign_39 = full_int_array_6 + + # pd_op.assign: (1xi64) <- (1xi64) + assign_40 = full_int_array_6 + + # pd_op.assign: (1xi64) <- (1xi64) + assign_41 = full_int_array_6 + + # pd_op.assign: (1xi64) <- (1xi64) + assign_42 = full_int_array_6 + + # pd_op.assign: (1xi64) <- (1xi64) + assign_43 = full_int_array_6 + + # pd_op.assign: (1xi64) <- (1xi64) + assign_44 = full_int_array_6 + + # pd_op.assign: (1xi64) <- (1xi64) + assign_45 = full_int_array_6 + + # pd_op.assign: (1xi64) <- (1xi64) + assign_46 = full_int_array_6 + + # pd_op.assign: (1xi64) <- (1xi64) + assign_47 = full_int_array_6 + + # pd_op.assign: (1xi64) <- (1xi64) + assign_48 = full_int_array_6 + + # pd_op.assign: (1xi64) <- (1xi64) + assign_49 = full_int_array_6 + + # pd_op.assign: (1xi64) <- (1xi64) + assign_50 = full_int_array_6 + + # pd_op.assign: (1xi64) <- (1xi64) + assign_51 = full_int_array_6 + + # pd_op.assign: (1xi64) <- (1xi64) + assign_52 = full_int_array_6 + + # pd_op.assign: (1xi64) <- (1xi64) + assign_53 = full_int_array_6 + + # pd_op.slice: (512x512xf32) <- (512x1536xf32, 1xi64, 1xi64) + slice_2 = paddle._C_ops.slice( + data_6, [1], full_int_array_4, full_int_array_6, [1], [] + ) + + # pd_op.slice: (512xf32) <- (1536xf32, 1xi64, 1xi64) + slice_3 = paddle._C_ops.slice( + data_7, [0], full_int_array_4, full_int_array_6, [1], [] + ) + + # pd_op.matmul: (2x441x512xf32) <- (2x441x512xf32, 512x512xf32) + matmul_3 = paddle._C_ops.matmul(add_16, slice_2, False, False) + + # pd_op.add: (2x441x512xf32) <- (2x441x512xf32, 512xf32) + add_18 = paddle._C_ops.add(matmul_3, slice_3) + + # pd_op.reshape: (2x441x4x128xf32) <- (2x441x512xf32, 4xi64) + reshape_5 = paddle._C_ops.reshape(add_18, full_int_array_5) + + # pd_op.transpose: (2x4x441x128xf32) <- (2x441x4x128xf32) + transpose_2 = paddle._C_ops.transpose(reshape_5, [0, 2, 1, 3]) + del reshape_5 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_7 = [2147483647] + + # pd_op.assign: (1xi64) <- (1xi64) + assign_54 = full_int_array_7 + + # pd_op.assign: (1xi64) <- (1xi64) + assign_55 = full_int_array_7 + + # pd_op.assign: (1xi64) <- (1xi64) + assign_56 = full_int_array_7 + + # pd_op.assign: (1xi64) <- (1xi64) + assign_57 = full_int_array_7 + + # pd_op.assign: (1xi64) <- (1xi64) + assign_58 = full_int_array_7 + + # pd_op.assign: (1xi64) <- (1xi64) + assign_59 = full_int_array_7 + + # pd_op.assign: (1xi64) <- (1xi64) + assign_60 = full_int_array_7 + + # pd_op.slice: (512x512xf32) <- (512x1536xf32, 1xi64, 1xi64) + slice_4 = paddle._C_ops.slice( + data_6, [1], full_int_array_6, full_int_array_7, [1], [] + ) + del data_6 + + # pd_op.slice: (512xf32) <- (1536xf32, 1xi64, 1xi64) + slice_5 = paddle._C_ops.slice( + data_7, [0], full_int_array_6, full_int_array_7, [1], [] + ) + del data_7 + + # pd_op.matmul: (2x441x512xf32) <- (2x441x512xf32, 512x512xf32) + matmul_4 = paddle._C_ops.matmul(transpose_0, slice_4, False, False) + + # pd_op.add: (2x441x512xf32) <- (2x441x512xf32, 512xf32) + add_19 = paddle._C_ops.add(matmul_4, slice_5) + + # pd_op.reshape: (2x441x4x128xf32) <- (2x441x512xf32, 4xi64) + reshape_6 = paddle._C_ops.reshape(add_19, full_int_array_5) + + # pd_op.transpose: (2x4x441x128xf32) <- (2x441x4x128xf32) + transpose_3 = paddle._C_ops.transpose(reshape_6, [0, 2, 1, 3]) + del reshape_6 + + # pd_op.matmul: (2x4x441x441xf32) <- (2x4x441x128xf32, 2x4x441x128xf32) + matmul_5 = paddle._C_ops.matmul(transpose_1, transpose_2, False, True) + + # pd_op.full: (1xf32) <- () + full_8 = paddle._C_ops.full( + [1], float("0.0883883"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.assign: (1xf32) <- (1xf32) + assign_61 = full_8 + + # pd_op.assign: (1xf32) <- (1xf32) + assign_62 = full_8 + + # pd_op.assign: (1xf32) <- (1xf32) + assign_63 = full_8 + + # pd_op.scale: (2x4x441x441xf32) <- (2x4x441x441xf32, 1xf32) + scale_1 = paddle._C_ops.scale(matmul_5, full_8, float("0"), True) + del matmul_5 + + # pd_op.softmax: (2x4x441x441xf32) <- (2x4x441x441xf32) + softmax_0 = paddle._C_ops.softmax(scale_1, -1) + del scale_1 + + # pd_op.full: (1xf32) <- () + full_9 = paddle._C_ops.full( + [1], float("0.1"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.assign: (1xf32) <- (1xf32) + assign_64 = full_9 + + # pd_op.assign: (1xf32) <- (1xf32) + assign_65 = full_9 + + # pd_op.assign: (1xf32) <- (1xf32) + assign_66 = full_9 + + # pd_op.assign: (1xf32) <- (1xf32) + assign_67 = full_9 + + # pd_op.assign: (1xf32) <- (1xf32) + assign_68 = full_9 + + # pd_op.assign: (1xf32) <- (1xf32) + assign_69 = full_9 + + # pd_op.assign: (1xf32) <- (1xf32) + assign_70 = full_9 + + # pd_op.assign: (1xf32) <- (1xf32) + assign_71 = full_9 + + # pd_op.assign: (1xf32) <- (1xf32) + assign_72 = full_9 + + # pd_op.assign: (1xf32) <- (1xf32) + assign_73 = full_9 + + # pd_op.assign: (1xf32) <- (1xf32) + assign_74 = full_9 + + # pd_op.assign: (1xf32) <- (1xf32) + assign_75 = full_9 + + # pd_op.assign: (1xf32) <- (1xf32) + assign_76 = full_9 + + # pd_op.assign: (1xf32) <- (1xf32) + assign_77 = full_9 + + # pd_op.assign: (1xf32) <- (1xf32) + assign_78 = full_9 + + # pd_op.dropout: (2x4x441x441xf32, 2x4x441x441xui8) <- (2x4x441x441xf32, None, 1xf32) + dropout_0, dropout_1 = (lambda x, f: f(x))( + paddle._C_ops.dropout( + softmax_0, None, full_9, False, "upscale_in_train", 0, False + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None), + ) + + # pd_op.matmul: (2x4x441x128xf32) <- (2x4x441x441xf32, 2x4x441x128xf32) + matmul_6 = paddle._C_ops.matmul(dropout_0, transpose_3, False, False) + + # pd_op.transpose: (2x441x4x128xf32) <- (2x4x441x128xf32) + transpose_4 = paddle._C_ops.transpose(matmul_6, [0, 2, 1, 3]) + del matmul_6 + + # pd_op.full_int_array: (3xi64) <- () + full_int_array_8 = [0, 0, 512] + + # pd_op.reshape: (2x441x512xf32) <- (2x441x4x128xf32, 3xi64) + reshape_7 = paddle._C_ops.reshape(transpose_4, full_int_array_8) + + # pd_op.matmul: (2x441x512xf32) <- (2x441x512xf32, 512x512xf32) + matmul_7 = paddle._C_ops.matmul(reshape_7, parameter_214, False, False) + del parameter_214 + + # pd_op.add: (2x441x512xf32) <- (2x441x512xf32, 512xf32) + add_20 = paddle._C_ops.add(matmul_7, parameter_213) + del parameter_213 + + # pd_op.dropout: (2x441x512xf32, 2x441x512xui8) <- (2x441x512xf32, None, 1xf32) + dropout_2, dropout_3 = (lambda x, f: f(x))( + paddle._C_ops.dropout( + add_20, None, full_9, False, "upscale_in_train", 0, False + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None), + ) + del add_20 + + # pd_op.add: (2x441x512xf32) <- (2x441x512xf32, 2x441x512xf32) + add_21 = paddle._C_ops.add(transpose_0, dropout_2) + + # pd_op.layer_norm: (2x441x512xf32, 2x441xf32, 2x441xf32) <- (2x441x512xf32, 512xf32, 512xf32) + layer_norm_0, layer_norm_1, layer_norm_2 = (lambda x, f: f(x))( + paddle._C_ops.layer_norm( + add_21, parameter_212, parameter_211, float("1e-05"), 2 + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None, None), + ) + del parameter_211, parameter_212 + + # pd_op.matmul: (2x441x2048xf32) <- (2x441x512xf32, 512x2048xf32) + matmul_8 = paddle._C_ops.matmul(layer_norm_0, parameter_210, False, False) + del parameter_210 + + # pd_op.add: (2x441x2048xf32) <- (2x441x2048xf32, 2048xf32) + add_22 = paddle._C_ops.add(matmul_8, parameter_209) + del parameter_209 + + # pd_op.gelu: (2x441x2048xf32) <- (2x441x2048xf32) + gelu_0 = paddle._C_ops.gelu(add_22, False) + + # pd_op.dropout: (2x441x2048xf32, 2x441x2048xui8) <- (2x441x2048xf32, None, 1xf32) + dropout_4, dropout_5 = (lambda x, f: f(x))( + paddle._C_ops.dropout( + gelu_0, None, full_9, False, "upscale_in_train", 0, False + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None), + ) + del gelu_0 + + # pd_op.matmul: (2x441x512xf32) <- (2x441x2048xf32, 2048x512xf32) + matmul_9 = paddle._C_ops.matmul(dropout_4, parameter_208, False, False) + del parameter_208 + + # pd_op.add: (2x441x512xf32) <- (2x441x512xf32, 512xf32) + add_23 = paddle._C_ops.add(matmul_9, parameter_207) + del parameter_207 + + # pd_op.dropout: (2x441x512xf32, 2x441x512xui8) <- (2x441x512xf32, None, 1xf32) + dropout_6, dropout_7 = (lambda x, f: f(x))( + paddle._C_ops.dropout( + add_23, None, full_9, False, "upscale_in_train", 0, False + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None), + ) + del add_23 + + # pd_op.add: (2x441x512xf32) <- (2x441x512xf32, 2x441x512xf32) + add_24 = paddle._C_ops.add(layer_norm_0, dropout_6) + + # pd_op.layer_norm: (2x441x512xf32, 2x441xf32, 2x441xf32) <- (2x441x512xf32, 512xf32, 512xf32) + layer_norm_3, layer_norm_4, layer_norm_5 = (lambda x, f: f(x))( + paddle._C_ops.layer_norm( + add_24, parameter_206, parameter_205, float("1e-05"), 2 + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None, None), + ) + del parameter_205, parameter_206 + + # pd_op.add: (2x441x512xf32) <- (2x441x512xf32, 1x441x512xf32) + add_25 = paddle._C_ops.add(layer_norm_3, unsqueeze_3) + + # pd_op.slice: (512x512xf32) <- (512x1536xf32, 1xi64, 1xi64) + slice_6 = paddle._C_ops.slice( + data_8, [1], full_int_array_3, full_int_array_4, [1], [] + ) + + # pd_op.slice: (512xf32) <- (1536xf32, 1xi64, 1xi64) + slice_7 = paddle._C_ops.slice( + data_9, [0], full_int_array_3, full_int_array_4, [1], [] + ) + + # pd_op.matmul: (2x441x512xf32) <- (2x441x512xf32, 512x512xf32) + matmul_10 = paddle._C_ops.matmul(add_25, slice_6, False, False) + + # pd_op.add: (2x441x512xf32) <- (2x441x512xf32, 512xf32) + add_26 = paddle._C_ops.add(matmul_10, slice_7) + + # pd_op.reshape: (2x441x4x128xf32) <- (2x441x512xf32, 4xi64) + reshape_8 = paddle._C_ops.reshape(add_26, full_int_array_5) + + # pd_op.transpose: (2x4x441x128xf32) <- (2x441x4x128xf32) + transpose_5 = paddle._C_ops.transpose(reshape_8, [0, 2, 1, 3]) + del reshape_8 + + # pd_op.slice: (512x512xf32) <- (512x1536xf32, 1xi64, 1xi64) + slice_8 = paddle._C_ops.slice( + data_8, [1], full_int_array_4, full_int_array_6, [1], [] + ) + + # pd_op.slice: (512xf32) <- (1536xf32, 1xi64, 1xi64) + slice_9 = paddle._C_ops.slice( + data_9, [0], full_int_array_4, full_int_array_6, [1], [] + ) + + # pd_op.matmul: (2x441x512xf32) <- (2x441x512xf32, 512x512xf32) + matmul_11 = paddle._C_ops.matmul(add_25, slice_8, False, False) + + # pd_op.add: (2x441x512xf32) <- (2x441x512xf32, 512xf32) + add_27 = paddle._C_ops.add(matmul_11, slice_9) + + # pd_op.reshape: (2x441x4x128xf32) <- (2x441x512xf32, 4xi64) + reshape_9 = paddle._C_ops.reshape(add_27, full_int_array_5) + + # pd_op.transpose: (2x4x441x128xf32) <- (2x441x4x128xf32) + transpose_6 = paddle._C_ops.transpose(reshape_9, [0, 2, 1, 3]) + del reshape_9 + + # pd_op.slice: (512x512xf32) <- (512x1536xf32, 1xi64, 1xi64) + slice_10 = paddle._C_ops.slice( + data_8, [1], full_int_array_6, full_int_array_7, [1], [] + ) + del data_8 + + # pd_op.slice: (512xf32) <- (1536xf32, 1xi64, 1xi64) + slice_11 = paddle._C_ops.slice( + data_9, [0], full_int_array_6, full_int_array_7, [1], [] + ) + del data_9 + + # pd_op.matmul: (2x441x512xf32) <- (2x441x512xf32, 512x512xf32) + matmul_12 = paddle._C_ops.matmul(layer_norm_3, slice_10, False, False) + + # pd_op.add: (2x441x512xf32) <- (2x441x512xf32, 512xf32) + add_28 = paddle._C_ops.add(matmul_12, slice_11) + + # pd_op.reshape: (2x441x4x128xf32) <- (2x441x512xf32, 4xi64) + reshape_10 = paddle._C_ops.reshape(add_28, full_int_array_5) + + # pd_op.transpose: (2x4x441x128xf32) <- (2x441x4x128xf32) + transpose_7 = paddle._C_ops.transpose(reshape_10, [0, 2, 1, 3]) + del reshape_10 + + # pd_op.matmul: (2x4x441x441xf32) <- (2x4x441x128xf32, 2x4x441x128xf32) + matmul_13 = paddle._C_ops.matmul(transpose_5, transpose_6, False, True) + + # pd_op.scale: (2x4x441x441xf32) <- (2x4x441x441xf32, 1xf32) + scale_2 = paddle._C_ops.scale(matmul_13, full_8, float("0"), True) + del matmul_13 + + # pd_op.softmax: (2x4x441x441xf32) <- (2x4x441x441xf32) + softmax_1 = paddle._C_ops.softmax(scale_2, -1) + del scale_2 + + # pd_op.dropout: (2x4x441x441xf32, 2x4x441x441xui8) <- (2x4x441x441xf32, None, 1xf32) + dropout_8, dropout_9 = (lambda x, f: f(x))( + paddle._C_ops.dropout( + softmax_1, None, full_9, False, "upscale_in_train", 0, False + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None), + ) + + # pd_op.matmul: (2x4x441x128xf32) <- (2x4x441x441xf32, 2x4x441x128xf32) + matmul_14 = paddle._C_ops.matmul(dropout_8, transpose_7, False, False) + + # pd_op.transpose: (2x441x4x128xf32) <- (2x4x441x128xf32) + transpose_8 = paddle._C_ops.transpose(matmul_14, [0, 2, 1, 3]) + del matmul_14 + + # pd_op.reshape: (2x441x512xf32) <- (2x441x4x128xf32, 3xi64) + reshape_11 = paddle._C_ops.reshape(transpose_8, full_int_array_8) + + # pd_op.matmul: (2x441x512xf32) <- (2x441x512xf32, 512x512xf32) + matmul_15 = paddle._C_ops.matmul(reshape_11, parameter_204, False, False) + del parameter_204 + + # pd_op.add: (2x441x512xf32) <- (2x441x512xf32, 512xf32) + add_29 = paddle._C_ops.add(matmul_15, parameter_203) + del parameter_203 + + # pd_op.dropout: (2x441x512xf32, 2x441x512xui8) <- (2x441x512xf32, None, 1xf32) + dropout_10, dropout_11 = (lambda x, f: f(x))( + paddle._C_ops.dropout( + add_29, None, full_9, False, "upscale_in_train", 0, False + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None), + ) + del add_29 + + # pd_op.add: (2x441x512xf32) <- (2x441x512xf32, 2x441x512xf32) + add_30 = paddle._C_ops.add(layer_norm_3, dropout_10) + + # pd_op.layer_norm: (2x441x512xf32, 2x441xf32, 2x441xf32) <- (2x441x512xf32, 512xf32, 512xf32) + layer_norm_6, layer_norm_7, layer_norm_8 = (lambda x, f: f(x))( + paddle._C_ops.layer_norm( + add_30, parameter_202, parameter_201, float("1e-05"), 2 + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None, None), + ) + del parameter_201, parameter_202 + + # pd_op.matmul: (2x441x2048xf32) <- (2x441x512xf32, 512x2048xf32) + matmul_16 = paddle._C_ops.matmul(layer_norm_6, parameter_200, False, False) + del parameter_200 + + # pd_op.add: (2x441x2048xf32) <- (2x441x2048xf32, 2048xf32) + add_31 = paddle._C_ops.add(matmul_16, parameter_199) + del parameter_199 + + # pd_op.gelu: (2x441x2048xf32) <- (2x441x2048xf32) + gelu_1 = paddle._C_ops.gelu(add_31, False) + + # pd_op.dropout: (2x441x2048xf32, 2x441x2048xui8) <- (2x441x2048xf32, None, 1xf32) + dropout_12, dropout_13 = (lambda x, f: f(x))( + paddle._C_ops.dropout( + gelu_1, None, full_9, False, "upscale_in_train", 0, False + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None), + ) + del gelu_1 + + # pd_op.matmul: (2x441x512xf32) <- (2x441x2048xf32, 2048x512xf32) + matmul_17 = paddle._C_ops.matmul(dropout_12, parameter_198, False, False) + del parameter_198 + + # pd_op.add: (2x441x512xf32) <- (2x441x512xf32, 512xf32) + add_32 = paddle._C_ops.add(matmul_17, parameter_197) + del parameter_197 + + # pd_op.dropout: (2x441x512xf32, 2x441x512xui8) <- (2x441x512xf32, None, 1xf32) + dropout_14, dropout_15 = (lambda x, f: f(x))( + paddle._C_ops.dropout( + add_32, None, full_9, False, "upscale_in_train", 0, False + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None), + ) + del add_32 + + # pd_op.add: (2x441x512xf32) <- (2x441x512xf32, 2x441x512xf32) + add_33 = paddle._C_ops.add(layer_norm_6, dropout_14) + + # pd_op.layer_norm: (2x441x512xf32, 2x441xf32, 2x441xf32) <- (2x441x512xf32, 512xf32, 512xf32) + layer_norm_9, layer_norm_10, layer_norm_11 = (lambda x, f: f(x))( + paddle._C_ops.layer_norm( + add_33, parameter_196, parameter_195, float("1e-05"), 2 + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None, None), + ) + del parameter_195, parameter_196 + + # pd_op.add: (2x441x512xf32) <- (2x441x512xf32, 1x441x512xf32) + add_34 = paddle._C_ops.add(layer_norm_9, unsqueeze_3) + + # pd_op.slice: (512x512xf32) <- (512x1536xf32, 1xi64, 1xi64) + slice_12 = paddle._C_ops.slice( + data_10, [1], full_int_array_3, full_int_array_4, [1], [] + ) + + # pd_op.slice: (512xf32) <- (1536xf32, 1xi64, 1xi64) + slice_13 = paddle._C_ops.slice( + data_11, [0], full_int_array_3, full_int_array_4, [1], [] + ) + + # pd_op.matmul: (2x441x512xf32) <- (2x441x512xf32, 512x512xf32) + matmul_18 = paddle._C_ops.matmul(add_34, slice_12, False, False) + + # pd_op.add: (2x441x512xf32) <- (2x441x512xf32, 512xf32) + add_35 = paddle._C_ops.add(matmul_18, slice_13) + + # pd_op.reshape: (2x441x4x128xf32) <- (2x441x512xf32, 4xi64) + reshape_12 = paddle._C_ops.reshape(add_35, full_int_array_5) + + # pd_op.transpose: (2x4x441x128xf32) <- (2x441x4x128xf32) + transpose_9 = paddle._C_ops.transpose(reshape_12, [0, 2, 1, 3]) + del reshape_12 + + # pd_op.slice: (512x512xf32) <- (512x1536xf32, 1xi64, 1xi64) + slice_14 = paddle._C_ops.slice( + data_10, [1], full_int_array_4, full_int_array_6, [1], [] + ) + + # pd_op.slice: (512xf32) <- (1536xf32, 1xi64, 1xi64) + slice_15 = paddle._C_ops.slice( + data_11, [0], full_int_array_4, full_int_array_6, [1], [] + ) + + # pd_op.matmul: (2x441x512xf32) <- (2x441x512xf32, 512x512xf32) + matmul_19 = paddle._C_ops.matmul(add_34, slice_14, False, False) + + # pd_op.add: (2x441x512xf32) <- (2x441x512xf32, 512xf32) + add_36 = paddle._C_ops.add(matmul_19, slice_15) + + # pd_op.reshape: (2x441x4x128xf32) <- (2x441x512xf32, 4xi64) + reshape_13 = paddle._C_ops.reshape(add_36, full_int_array_5) + + # pd_op.transpose: (2x4x441x128xf32) <- (2x441x4x128xf32) + transpose_10 = paddle._C_ops.transpose(reshape_13, [0, 2, 1, 3]) + del reshape_13 + + # pd_op.slice: (512x512xf32) <- (512x1536xf32, 1xi64, 1xi64) + slice_16 = paddle._C_ops.slice( + data_10, [1], full_int_array_6, full_int_array_7, [1], [] + ) + del data_10 + + # pd_op.slice: (512xf32) <- (1536xf32, 1xi64, 1xi64) + slice_17 = paddle._C_ops.slice( + data_11, [0], full_int_array_6, full_int_array_7, [1], [] + ) + del data_11 + + # pd_op.matmul: (2x441x512xf32) <- (2x441x512xf32, 512x512xf32) + matmul_20 = paddle._C_ops.matmul(layer_norm_9, slice_16, False, False) + + # pd_op.add: (2x441x512xf32) <- (2x441x512xf32, 512xf32) + add_37 = paddle._C_ops.add(matmul_20, slice_17) + + # pd_op.reshape: (2x441x4x128xf32) <- (2x441x512xf32, 4xi64) + reshape_14 = paddle._C_ops.reshape(add_37, full_int_array_5) + + # pd_op.transpose: (2x4x441x128xf32) <- (2x441x4x128xf32) + transpose_11 = paddle._C_ops.transpose(reshape_14, [0, 2, 1, 3]) + del reshape_14 + + # pd_op.matmul: (2x4x441x441xf32) <- (2x4x441x128xf32, 2x4x441x128xf32) + matmul_21 = paddle._C_ops.matmul(transpose_9, transpose_10, False, True) + + # pd_op.scale: (2x4x441x441xf32) <- (2x4x441x441xf32, 1xf32) + scale_3 = paddle._C_ops.scale(matmul_21, full_8, float("0"), True) + del matmul_21 + + # pd_op.softmax: (2x4x441x441xf32) <- (2x4x441x441xf32) + softmax_2 = paddle._C_ops.softmax(scale_3, -1) + del scale_3 + + # pd_op.dropout: (2x4x441x441xf32, 2x4x441x441xui8) <- (2x4x441x441xf32, None, 1xf32) + dropout_16, dropout_17 = (lambda x, f: f(x))( + paddle._C_ops.dropout( + softmax_2, None, full_9, False, "upscale_in_train", 0, False + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None), + ) + + # pd_op.matmul: (2x4x441x128xf32) <- (2x4x441x441xf32, 2x4x441x128xf32) + matmul_22 = paddle._C_ops.matmul(dropout_16, transpose_11, False, False) + + # pd_op.transpose: (2x441x4x128xf32) <- (2x4x441x128xf32) + transpose_12 = paddle._C_ops.transpose(matmul_22, [0, 2, 1, 3]) + del matmul_22 + + # pd_op.reshape: (2x441x512xf32) <- (2x441x4x128xf32, 3xi64) + reshape_15 = paddle._C_ops.reshape(transpose_12, full_int_array_8) + + # pd_op.matmul: (2x441x512xf32) <- (2x441x512xf32, 512x512xf32) + matmul_23 = paddle._C_ops.matmul(reshape_15, parameter_194, False, False) + del parameter_194 + + # pd_op.add: (2x441x512xf32) <- (2x441x512xf32, 512xf32) + add_38 = paddle._C_ops.add(matmul_23, parameter_193) + del parameter_193 + + # pd_op.dropout: (2x441x512xf32, 2x441x512xui8) <- (2x441x512xf32, None, 1xf32) + dropout_18, dropout_19 = (lambda x, f: f(x))( + paddle._C_ops.dropout( + add_38, None, full_9, False, "upscale_in_train", 0, False + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None), + ) + del add_38 + + # pd_op.add: (2x441x512xf32) <- (2x441x512xf32, 2x441x512xf32) + add_39 = paddle._C_ops.add(layer_norm_9, dropout_18) + + # pd_op.layer_norm: (2x441x512xf32, 2x441xf32, 2x441xf32) <- (2x441x512xf32, 512xf32, 512xf32) + layer_norm_12, layer_norm_13, layer_norm_14 = (lambda x, f: f(x))( + paddle._C_ops.layer_norm( + add_39, parameter_192, parameter_191, float("1e-05"), 2 + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None, None), + ) + del parameter_191, parameter_192 + + # pd_op.matmul: (2x441x2048xf32) <- (2x441x512xf32, 512x2048xf32) + matmul_24 = paddle._C_ops.matmul(layer_norm_12, parameter_190, False, False) + del parameter_190 + + # pd_op.add: (2x441x2048xf32) <- (2x441x2048xf32, 2048xf32) + add_40 = paddle._C_ops.add(matmul_24, parameter_189) + del parameter_189 + + # pd_op.gelu: (2x441x2048xf32) <- (2x441x2048xf32) + gelu_2 = paddle._C_ops.gelu(add_40, False) + + # pd_op.dropout: (2x441x2048xf32, 2x441x2048xui8) <- (2x441x2048xf32, None, 1xf32) + dropout_20, dropout_21 = (lambda x, f: f(x))( + paddle._C_ops.dropout( + gelu_2, None, full_9, False, "upscale_in_train", 0, False + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None), + ) + del gelu_2 + + # pd_op.matmul: (2x441x512xf32) <- (2x441x2048xf32, 2048x512xf32) + matmul_25 = paddle._C_ops.matmul(dropout_20, parameter_188, False, False) + del parameter_188 + + # pd_op.add: (2x441x512xf32) <- (2x441x512xf32, 512xf32) + add_41 = paddle._C_ops.add(matmul_25, parameter_187) + del parameter_187 + + # pd_op.dropout: (2x441x512xf32, 2x441x512xui8) <- (2x441x512xf32, None, 1xf32) + dropout_22, dropout_23 = (lambda x, f: f(x))( + paddle._C_ops.dropout( + add_41, None, full_9, False, "upscale_in_train", 0, False + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None), + ) + del add_41 + + # pd_op.add: (2x441x512xf32) <- (2x441x512xf32, 2x441x512xf32) + add_42 = paddle._C_ops.add(layer_norm_12, dropout_22) + + # pd_op.layer_norm: (2x441x512xf32, 2x441xf32, 2x441xf32) <- (2x441x512xf32, 512xf32, 512xf32) + layer_norm_15, layer_norm_16, layer_norm_17 = (lambda x, f: f(x))( + paddle._C_ops.layer_norm( + add_42, parameter_186, parameter_185, float("1e-05"), 2 + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None, None), + ) + del parameter_185, parameter_186 + + # pd_op.add: (2x441x512xf32) <- (2x441x512xf32, 1x441x512xf32) + add_43 = paddle._C_ops.add(layer_norm_15, unsqueeze_3) + + # pd_op.slice: (512x512xf32) <- (512x1536xf32, 1xi64, 1xi64) + slice_18 = paddle._C_ops.slice( + data_12, [1], full_int_array_3, full_int_array_4, [1], [] + ) + + # pd_op.slice: (512xf32) <- (1536xf32, 1xi64, 1xi64) + slice_19 = paddle._C_ops.slice( + data_13, [0], full_int_array_3, full_int_array_4, [1], [] + ) + del full_int_array_3 + + # pd_op.matmul: (2x441x512xf32) <- (2x441x512xf32, 512x512xf32) + matmul_26 = paddle._C_ops.matmul(add_43, slice_18, False, False) + + # pd_op.add: (2x441x512xf32) <- (2x441x512xf32, 512xf32) + add_44 = paddle._C_ops.add(matmul_26, slice_19) + + # pd_op.reshape: (2x441x4x128xf32) <- (2x441x512xf32, 4xi64) + reshape_16 = paddle._C_ops.reshape(add_44, full_int_array_5) + + # pd_op.transpose: (2x4x441x128xf32) <- (2x441x4x128xf32) + transpose_13 = paddle._C_ops.transpose(reshape_16, [0, 2, 1, 3]) + del reshape_16 + + # pd_op.slice: (512x512xf32) <- (512x1536xf32, 1xi64, 1xi64) + slice_20 = paddle._C_ops.slice( + data_12, [1], full_int_array_4, full_int_array_6, [1], [] + ) + + # pd_op.slice: (512xf32) <- (1536xf32, 1xi64, 1xi64) + slice_21 = paddle._C_ops.slice( + data_13, [0], full_int_array_4, full_int_array_6, [1], [] + ) + + # pd_op.matmul: (2x441x512xf32) <- (2x441x512xf32, 512x512xf32) + matmul_27 = paddle._C_ops.matmul(add_43, slice_20, False, False) + + # pd_op.add: (2x441x512xf32) <- (2x441x512xf32, 512xf32) + add_45 = paddle._C_ops.add(matmul_27, slice_21) + + # pd_op.reshape: (2x441x4x128xf32) <- (2x441x512xf32, 4xi64) + reshape_17 = paddle._C_ops.reshape(add_45, full_int_array_5) + + # pd_op.transpose: (2x4x441x128xf32) <- (2x441x4x128xf32) + transpose_14 = paddle._C_ops.transpose(reshape_17, [0, 2, 1, 3]) + del reshape_17 + + # pd_op.slice: (512x512xf32) <- (512x1536xf32, 1xi64, 1xi64) + slice_22 = paddle._C_ops.slice( + data_12, [1], full_int_array_6, full_int_array_7, [1], [] + ) + del data_12 + + # pd_op.slice: (512xf32) <- (1536xf32, 1xi64, 1xi64) + slice_23 = paddle._C_ops.slice( + data_13, [0], full_int_array_6, full_int_array_7, [1], [] + ) + del data_13 + + # pd_op.matmul: (2x441x512xf32) <- (2x441x512xf32, 512x512xf32) + matmul_28 = paddle._C_ops.matmul(layer_norm_15, slice_22, False, False) + + # pd_op.add: (2x441x512xf32) <- (2x441x512xf32, 512xf32) + add_46 = paddle._C_ops.add(matmul_28, slice_23) + + # pd_op.reshape: (2x441x4x128xf32) <- (2x441x512xf32, 4xi64) + reshape_18 = paddle._C_ops.reshape(add_46, full_int_array_5) + del full_int_array_5 + + # pd_op.transpose: (2x4x441x128xf32) <- (2x441x4x128xf32) + transpose_15 = paddle._C_ops.transpose(reshape_18, [0, 2, 1, 3]) + del reshape_18 + + # pd_op.matmul: (2x4x441x441xf32) <- (2x4x441x128xf32, 2x4x441x128xf32) + matmul_29 = paddle._C_ops.matmul(transpose_13, transpose_14, False, True) + + # pd_op.scale: (2x4x441x441xf32) <- (2x4x441x441xf32, 1xf32) + scale_4 = paddle._C_ops.scale(matmul_29, full_8, float("0"), True) + del matmul_29 + + # pd_op.softmax: (2x4x441x441xf32) <- (2x4x441x441xf32) + softmax_3 = paddle._C_ops.softmax(scale_4, -1) + del scale_4 + + # pd_op.dropout: (2x4x441x441xf32, 2x4x441x441xui8) <- (2x4x441x441xf32, None, 1xf32) + dropout_24, dropout_25 = (lambda x, f: f(x))( + paddle._C_ops.dropout( + softmax_3, None, full_9, False, "upscale_in_train", 0, False + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None), + ) + + # pd_op.matmul: (2x4x441x128xf32) <- (2x4x441x441xf32, 2x4x441x128xf32) + matmul_30 = paddle._C_ops.matmul(dropout_24, transpose_15, False, False) + + # pd_op.transpose: (2x441x4x128xf32) <- (2x4x441x128xf32) + transpose_16 = paddle._C_ops.transpose(matmul_30, [0, 2, 1, 3]) + del matmul_30 + + # pd_op.reshape: (2x441x512xf32) <- (2x441x4x128xf32, 3xi64) + reshape_19 = paddle._C_ops.reshape(transpose_16, full_int_array_8) + del full_int_array_8 + + # pd_op.matmul: (2x441x512xf32) <- (2x441x512xf32, 512x512xf32) + matmul_31 = paddle._C_ops.matmul(reshape_19, parameter_184, False, False) + del parameter_184 + + # pd_op.add: (2x441x512xf32) <- (2x441x512xf32, 512xf32) + add_47 = paddle._C_ops.add(matmul_31, parameter_183) + del parameter_183 + + # pd_op.dropout: (2x441x512xf32, 2x441x512xui8) <- (2x441x512xf32, None, 1xf32) + dropout_26, dropout_27 = (lambda x, f: f(x))( + paddle._C_ops.dropout( + add_47, None, full_9, False, "upscale_in_train", 0, False + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None), + ) + del add_47 + + # pd_op.add: (2x441x512xf32) <- (2x441x512xf32, 2x441x512xf32) + add_48 = paddle._C_ops.add(layer_norm_15, dropout_26) + + # pd_op.layer_norm: (2x441x512xf32, 2x441xf32, 2x441xf32) <- (2x441x512xf32, 512xf32, 512xf32) + layer_norm_18, layer_norm_19, layer_norm_20 = (lambda x, f: f(x))( + paddle._C_ops.layer_norm( + add_48, parameter_182, parameter_181, float("1e-05"), 2 + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None, None), + ) + del parameter_181, parameter_182 + + # pd_op.matmul: (2x441x2048xf32) <- (2x441x512xf32, 512x2048xf32) + matmul_32 = paddle._C_ops.matmul(layer_norm_18, parameter_180, False, False) + del parameter_180 + + # pd_op.add: (2x441x2048xf32) <- (2x441x2048xf32, 2048xf32) + add_49 = paddle._C_ops.add(matmul_32, parameter_179) + del parameter_179 + + # pd_op.gelu: (2x441x2048xf32) <- (2x441x2048xf32) + gelu_3 = paddle._C_ops.gelu(add_49, False) + + # pd_op.dropout: (2x441x2048xf32, 2x441x2048xui8) <- (2x441x2048xf32, None, 1xf32) + dropout_28, dropout_29 = (lambda x, f: f(x))( + paddle._C_ops.dropout( + gelu_3, None, full_9, False, "upscale_in_train", 0, False + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None), + ) + del gelu_3 + + # pd_op.matmul: (2x441x512xf32) <- (2x441x2048xf32, 2048x512xf32) + matmul_33 = paddle._C_ops.matmul(dropout_28, parameter_178, False, False) + del parameter_178 + + # pd_op.add: (2x441x512xf32) <- (2x441x512xf32, 512xf32) + add_50 = paddle._C_ops.add(matmul_33, parameter_177) + del parameter_177 + + # pd_op.dropout: (2x441x512xf32, 2x441x512xui8) <- (2x441x512xf32, None, 1xf32) + dropout_30, dropout_31 = (lambda x, f: f(x))( + paddle._C_ops.dropout( + add_50, None, full_9, False, "upscale_in_train", 0, False + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None), + ) + del add_50 + + # pd_op.add: (2x441x512xf32) <- (2x441x512xf32, 2x441x512xf32) + add_51 = paddle._C_ops.add(layer_norm_18, dropout_30) + + # pd_op.layer_norm: (2x441x512xf32, 2x441xf32, 2x441xf32) <- (2x441x512xf32, 512xf32, 512xf32) + layer_norm_21, layer_norm_22, layer_norm_23 = (lambda x, f: f(x))( + paddle._C_ops.layer_norm( + add_51, parameter_176, parameter_175, float("1e-05"), 2 + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None, None), + ) + del parameter_175, parameter_176 + + # pd_op.transpose: (2x512x441xf32) <- (2x441x512xf32) + transpose_17 = paddle._C_ops.transpose(layer_norm_21, [0, 2, 1]) + del layer_norm_21 + + # pd_op.full_int_array: (4xi64) <- () + full_int_array_9 = [2, 512, 21, 21] + + # pd_op.reshape: (2x512x21x21xf32) <- (2x512x441xf32, 4xi64) + reshape_20 = paddle._C_ops.reshape(transpose_17, full_int_array_9) + del full_int_array_9 + + # pd_op.conv2d: (2x192x21x21xf32) <- (2x512x21x21xf32, 192x512x1x1xf32) + conv2d_41 = paddle._C_ops.conv2d( + reshape_20, parameter_174, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_174 + + # pd_op.batch_norm_: (2x192x21x21xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x21x21xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__222, + batch_norm__223, + batch_norm__224, + batch_norm__225, + batch_norm__226, + batch_norm__227, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_41, + parameter_173, + parameter_172, + parameter_171, + parameter_170, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_170, parameter_171, parameter_172, parameter_173 + + # pd_op.swish: (2x192x21x21xf32) <- (2x192x21x21xf32) + swish_32 = paddle._C_ops.swish(batch_norm__222) + + # pd_op.conv2d: (2x192x21x21xf32) <- (2x512x21x21xf32, 192x512x1x1xf32) + conv2d_42 = paddle._C_ops.conv2d( + reshape_20, parameter_169, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_169 + + # pd_op.batch_norm_: (2x192x21x21xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x21x21xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__228, + batch_norm__229, + batch_norm__230, + batch_norm__231, + batch_norm__232, + batch_norm__233, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_42, + parameter_168, + parameter_167, + parameter_166, + parameter_165, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_165, parameter_166, parameter_167, parameter_168 + + # pd_op.swish: (2x192x21x21xf32) <- (2x192x21x21xf32) + swish_33 = paddle._C_ops.swish(batch_norm__228) + + # pd_op.conv2d: (2x192x21x21xf32) <- (2x192x21x21xf32, 192x192x3x3xf32) + conv2d_43 = paddle._C_ops.conv2d( + swish_33, parameter_164, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_164 + + # pd_op.batch_norm_: (2x192x21x21xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x21x21xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__234, + batch_norm__235, + batch_norm__236, + batch_norm__237, + batch_norm__238, + batch_norm__239, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_43, + parameter_163, + parameter_162, + parameter_161, + parameter_160, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_160, parameter_161, parameter_162, parameter_163 + + # pd_op.swish: (2x192x21x21xf32) <- (2x192x21x21xf32) + swish_34 = paddle._C_ops.swish(batch_norm__234) + + # pd_op.conv2d: (2x192x21x21xf32) <- (2x192x21x21xf32, 192x192x3x3xf32) + conv2d_44 = paddle._C_ops.conv2d( + swish_34, parameter_159, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_159 + + # pd_op.batch_norm_: (2x192x21x21xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x21x21xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__240, + batch_norm__241, + batch_norm__242, + batch_norm__243, + batch_norm__244, + batch_norm__245, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_44, + parameter_158, + parameter_157, + parameter_156, + parameter_155, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_155, parameter_156, parameter_157, parameter_158 + + # pd_op.conv2d: (2x192x21x21xf32) <- (2x192x21x21xf32, 192x192x1x1xf32) + conv2d_45 = paddle._C_ops.conv2d( + swish_34, parameter_154, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_154 + + # pd_op.batch_norm_: (2x192x21x21xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x21x21xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__246, + batch_norm__247, + batch_norm__248, + batch_norm__249, + batch_norm__250, + batch_norm__251, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_45, + parameter_153, + parameter_152, + parameter_151, + parameter_150, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_150, parameter_151, parameter_152, parameter_153 + + # pd_op.add: (2x192x21x21xf32) <- (2x192x21x21xf32, 2x192x21x21xf32) + add_52 = paddle._C_ops.add(batch_norm__240, batch_norm__246) + + # pd_op.swish: (2x192x21x21xf32) <- (2x192x21x21xf32) + swish_35 = paddle._C_ops.swish(add_52) + + # pd_op.full_int_array: (2xi64) <- () + full_int_array_10 = [5, 5] + + # pd_op.pool2d: (2x192x21x21xf32) <- (2x192x21x21xf32, 2xi64) + pool2d_0 = paddle._C_ops.pool2d( + swish_35, + full_int_array_10, + [1, 1], + [2, 2], + False, + True, + "NCHW", + "max", + False, + False, + "EXPLICIT", + ) + + # pd_op.full_int_array: (2xi64) <- () + full_int_array_11 = [9, 9] + + # pd_op.pool2d: (2x192x21x21xf32) <- (2x192x21x21xf32, 2xi64) + pool2d_1 = paddle._C_ops.pool2d( + swish_35, + full_int_array_11, + [1, 1], + [4, 4], + False, + True, + "NCHW", + "max", + False, + False, + "EXPLICIT", + ) + + # pd_op.full_int_array: (2xi64) <- () + full_int_array_12 = [13, 13] + + # pd_op.pool2d: (2x192x21x21xf32) <- (2x192x21x21xf32, 2xi64) + pool2d_2 = paddle._C_ops.pool2d( + swish_35, + full_int_array_12, + [1, 1], + [6, 6], + False, + True, + "NCHW", + "max", + False, + False, + "EXPLICIT", + ) + + # builtin.combine: ([2x192x21x21xf32, 2x192x21x21xf32, 2x192x21x21xf32, 2x192x21x21xf32]) <- (2x192x21x21xf32, 2x192x21x21xf32, 2x192x21x21xf32, 2x192x21x21xf32) + combine_6 = [swish_35, pool2d_0, pool2d_1, pool2d_2] + + # pd_op.concat: (2x768x21x21xf32) <- ([2x192x21x21xf32, 2x192x21x21xf32, 2x192x21x21xf32, 2x192x21x21xf32], 1xi32) + concat_5 = paddle._C_ops.concat(combine_6, full_0) + del combine_6 + + # pd_op.conv2d: (2x192x21x21xf32) <- (2x768x21x21xf32, 192x768x1x1xf32) + conv2d_46 = paddle._C_ops.conv2d( + concat_5, parameter_149, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_149 + + # pd_op.batch_norm_: (2x192x21x21xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x21x21xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__252, + batch_norm__253, + batch_norm__254, + batch_norm__255, + batch_norm__256, + batch_norm__257, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_46, + parameter_148, + parameter_147, + parameter_146, + parameter_145, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_145, parameter_146, parameter_147, parameter_148 + + # pd_op.swish: (2x192x21x21xf32) <- (2x192x21x21xf32) + swish_36 = paddle._C_ops.swish(batch_norm__252) + + # builtin.combine: ([2x192x21x21xf32, 2x192x21x21xf32]) <- (2x192x21x21xf32, 2x192x21x21xf32) + combine_7 = [swish_32, swish_36] + + # pd_op.concat: (2x384x21x21xf32) <- ([2x192x21x21xf32, 2x192x21x21xf32], 1xi32) + concat_6 = paddle._C_ops.concat(combine_7, full_0) + del combine_7 + + # pd_op.conv2d: (2x384x21x21xf32) <- (2x384x21x21xf32, 384x384x1x1xf32) + conv2d_47 = paddle._C_ops.conv2d( + concat_6, parameter_144, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_144 + + # pd_op.batch_norm_: (2x384x21x21xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x21x21xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__258, + batch_norm__259, + batch_norm__260, + batch_norm__261, + batch_norm__262, + batch_norm__263, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_47, + parameter_143, + parameter_142, + parameter_141, + parameter_140, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_140, parameter_141, parameter_142, parameter_143 + + # pd_op.swish: (2x384x21x21xf32) <- (2x384x21x21xf32) + swish_37 = paddle._C_ops.swish(batch_norm__258) + + # pd_op.conv2d: (2x192x21x21xf32) <- (2x384x21x21xf32, 192x384x1x1xf32) + conv2d_48 = paddle._C_ops.conv2d( + swish_37, parameter_139, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_139 + + # pd_op.batch_norm_: (2x192x21x21xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x21x21xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__264, + batch_norm__265, + batch_norm__266, + batch_norm__267, + batch_norm__268, + batch_norm__269, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_48, + parameter_138, + parameter_137, + parameter_136, + parameter_135, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_135, parameter_136, parameter_137, parameter_138 + + # pd_op.swish: (2x192x21x21xf32) <- (2x192x21x21xf32) + swish_38 = paddle._C_ops.swish(batch_norm__264) + + # pd_op.nearest_interp: (2x192x42x42xf32) <- (2x192x21x21xf32, None, None, None) + nearest_interp_0 = paddle._C_ops.nearest_interp( + swish_38, + None, + None, + None, + "NCHW", + -1, + -1, + -1, + [float("2"), float("2")], + "nearest", + False, + 0, + ) + + # builtin.combine: ([2x192x42x42xf32, 2x256x42x42xf32]) <- (2x192x42x42xf32, 2x256x42x42xf32) + combine_8 = [nearest_interp_0, swish_25] + + # pd_op.concat: (2x448x42x42xf32) <- ([2x192x42x42xf32, 2x256x42x42xf32], 1xi32) + concat_7 = paddle._C_ops.concat(combine_8, full_0) + del combine_8 + + # pd_op.conv2d: (2x96x42x42xf32) <- (2x448x42x42xf32, 96x448x1x1xf32) + conv2d_49 = paddle._C_ops.conv2d( + concat_7, parameter_134, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_134 + + # pd_op.batch_norm_: (2x96x42x42xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x42x42xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__270, + batch_norm__271, + batch_norm__272, + batch_norm__273, + batch_norm__274, + batch_norm__275, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_49, + parameter_133, + parameter_132, + parameter_131, + parameter_130, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_130, parameter_131, parameter_132, parameter_133 + + # pd_op.swish: (2x96x42x42xf32) <- (2x96x42x42xf32) + swish_39 = paddle._C_ops.swish(batch_norm__270) + + # pd_op.conv2d: (2x96x42x42xf32) <- (2x448x42x42xf32, 96x448x1x1xf32) + conv2d_50 = paddle._C_ops.conv2d( + concat_7, parameter_129, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_129 + + # pd_op.batch_norm_: (2x96x42x42xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x42x42xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__276, + batch_norm__277, + batch_norm__278, + batch_norm__279, + batch_norm__280, + batch_norm__281, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_50, + parameter_128, + parameter_127, + parameter_126, + parameter_125, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_125, parameter_126, parameter_127, parameter_128 + + # pd_op.swish: (2x96x42x42xf32) <- (2x96x42x42xf32) + swish_40 = paddle._C_ops.swish(batch_norm__276) + + # pd_op.conv2d: (2x96x42x42xf32) <- (2x96x42x42xf32, 96x96x3x3xf32) + conv2d_51 = paddle._C_ops.conv2d( + swish_40, parameter_124, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_124 + + # pd_op.batch_norm_: (2x96x42x42xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x42x42xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__282, + batch_norm__283, + batch_norm__284, + batch_norm__285, + batch_norm__286, + batch_norm__287, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_51, + parameter_123, + parameter_122, + parameter_121, + parameter_120, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_120, parameter_121, parameter_122, parameter_123 + + # pd_op.swish: (2x96x42x42xf32) <- (2x96x42x42xf32) + swish_41 = paddle._C_ops.swish(batch_norm__282) + + # pd_op.conv2d: (2x96x42x42xf32) <- (2x96x42x42xf32, 96x96x3x3xf32) + conv2d_52 = paddle._C_ops.conv2d( + swish_41, parameter_119, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_119 + + # pd_op.batch_norm_: (2x96x42x42xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x42x42xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__288, + batch_norm__289, + batch_norm__290, + batch_norm__291, + batch_norm__292, + batch_norm__293, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_52, + parameter_118, + parameter_117, + parameter_116, + parameter_115, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_115, parameter_116, parameter_117, parameter_118 + + # pd_op.conv2d: (2x96x42x42xf32) <- (2x96x42x42xf32, 96x96x1x1xf32) + conv2d_53 = paddle._C_ops.conv2d( + swish_41, parameter_114, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_114 + + # pd_op.batch_norm_: (2x96x42x42xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x42x42xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__294, + batch_norm__295, + batch_norm__296, + batch_norm__297, + batch_norm__298, + batch_norm__299, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_53, + parameter_113, + parameter_112, + parameter_111, + parameter_110, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_110, parameter_111, parameter_112, parameter_113 + + # pd_op.add: (2x96x42x42xf32) <- (2x96x42x42xf32, 2x96x42x42xf32) + add_53 = paddle._C_ops.add(batch_norm__288, batch_norm__294) + + # pd_op.swish: (2x96x42x42xf32) <- (2x96x42x42xf32) + swish_42 = paddle._C_ops.swish(add_53) + + # builtin.combine: ([2x96x42x42xf32, 2x96x42x42xf32]) <- (2x96x42x42xf32, 2x96x42x42xf32) + combine_9 = [swish_39, swish_42] + + # pd_op.concat: (2x192x42x42xf32) <- ([2x96x42x42xf32, 2x96x42x42xf32], 1xi32) + concat_8 = paddle._C_ops.concat(combine_9, full_0) + del combine_9 + + # pd_op.conv2d: (2x192x42x42xf32) <- (2x192x42x42xf32, 192x192x1x1xf32) + conv2d_54 = paddle._C_ops.conv2d( + concat_8, parameter_109, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_109 + + # pd_op.batch_norm_: (2x192x42x42xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x42x42xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__300, + batch_norm__301, + batch_norm__302, + batch_norm__303, + batch_norm__304, + batch_norm__305, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_54, + parameter_108, + parameter_107, + parameter_106, + parameter_105, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_105, parameter_106, parameter_107, parameter_108 + + # pd_op.swish: (2x192x42x42xf32) <- (2x192x42x42xf32) + swish_43 = paddle._C_ops.swish(batch_norm__300) + + # pd_op.conv2d: (2x96x42x42xf32) <- (2x192x42x42xf32, 96x192x1x1xf32) + conv2d_55 = paddle._C_ops.conv2d( + swish_43, parameter_104, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_104 + + # pd_op.batch_norm_: (2x96x42x42xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x42x42xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__306, + batch_norm__307, + batch_norm__308, + batch_norm__309, + batch_norm__310, + batch_norm__311, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_55, + parameter_103, + parameter_102, + parameter_101, + parameter_100, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_100, parameter_101, parameter_102, parameter_103 + + # pd_op.swish: (2x96x42x42xf32) <- (2x96x42x42xf32) + swish_44 = paddle._C_ops.swish(batch_norm__306) + + # pd_op.nearest_interp: (2x96x84x84xf32) <- (2x96x42x42xf32, None, None, None) + nearest_interp_1 = paddle._C_ops.nearest_interp( + swish_44, + None, + None, + None, + "NCHW", + -1, + -1, + -1, + [float("2"), float("2")], + "nearest", + False, + 0, + ) + + # builtin.combine: ([2x96x84x84xf32, 2x128x84x84xf32]) <- (2x96x84x84xf32, 2x128x84x84xf32) + combine_10 = [nearest_interp_1, swish_17] + + # pd_op.concat: (2x224x84x84xf32) <- ([2x96x84x84xf32, 2x128x84x84xf32], 1xi32) + concat_9 = paddle._C_ops.concat(combine_10, full_0) + del combine_10 + + # pd_op.conv2d: (2x48x84x84xf32) <- (2x224x84x84xf32, 48x224x1x1xf32) + conv2d_56 = paddle._C_ops.conv2d( + concat_9, parameter_99, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_99 + + # pd_op.batch_norm_: (2x48x84x84xf32, 48xf32, 48xf32, 48xf32, 48xf32, -1xui8) <- (2x48x84x84xf32, 48xf32, 48xf32, 48xf32, 48xf32) + ( + batch_norm__312, + batch_norm__313, + batch_norm__314, + batch_norm__315, + batch_norm__316, + batch_norm__317, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_56, + parameter_98, + parameter_97, + parameter_96, + parameter_95, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_95, parameter_96, parameter_97, parameter_98 + + # pd_op.swish: (2x48x84x84xf32) <- (2x48x84x84xf32) + swish_45 = paddle._C_ops.swish(batch_norm__312) + + # pd_op.conv2d: (2x48x84x84xf32) <- (2x224x84x84xf32, 48x224x1x1xf32) + conv2d_57 = paddle._C_ops.conv2d( + concat_9, parameter_94, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_94 + + # pd_op.batch_norm_: (2x48x84x84xf32, 48xf32, 48xf32, 48xf32, 48xf32, -1xui8) <- (2x48x84x84xf32, 48xf32, 48xf32, 48xf32, 48xf32) + ( + batch_norm__318, + batch_norm__319, + batch_norm__320, + batch_norm__321, + batch_norm__322, + batch_norm__323, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_57, + parameter_93, + parameter_92, + parameter_91, + parameter_90, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_90, parameter_91, parameter_92, parameter_93 + + # pd_op.swish: (2x48x84x84xf32) <- (2x48x84x84xf32) + swish_46 = paddle._C_ops.swish(batch_norm__318) + + # pd_op.conv2d: (2x48x84x84xf32) <- (2x48x84x84xf32, 48x48x3x3xf32) + conv2d_58 = paddle._C_ops.conv2d( + swish_46, parameter_89, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_89 + + # pd_op.batch_norm_: (2x48x84x84xf32, 48xf32, 48xf32, 48xf32, 48xf32, -1xui8) <- (2x48x84x84xf32, 48xf32, 48xf32, 48xf32, 48xf32) + ( + batch_norm__324, + batch_norm__325, + batch_norm__326, + batch_norm__327, + batch_norm__328, + batch_norm__329, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_58, + parameter_88, + parameter_87, + parameter_86, + parameter_85, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_85, parameter_86, parameter_87, parameter_88 + + # pd_op.swish: (2x48x84x84xf32) <- (2x48x84x84xf32) + swish_47 = paddle._C_ops.swish(batch_norm__324) + + # pd_op.conv2d: (2x48x84x84xf32) <- (2x48x84x84xf32, 48x48x3x3xf32) + conv2d_59 = paddle._C_ops.conv2d( + swish_47, parameter_84, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_84 + + # pd_op.batch_norm_: (2x48x84x84xf32, 48xf32, 48xf32, 48xf32, 48xf32, -1xui8) <- (2x48x84x84xf32, 48xf32, 48xf32, 48xf32, 48xf32) + ( + batch_norm__330, + batch_norm__331, + batch_norm__332, + batch_norm__333, + batch_norm__334, + batch_norm__335, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_59, + parameter_83, + parameter_82, + parameter_81, + parameter_80, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_80, parameter_81, parameter_82, parameter_83 + + # pd_op.conv2d: (2x48x84x84xf32) <- (2x48x84x84xf32, 48x48x1x1xf32) + conv2d_60 = paddle._C_ops.conv2d( + swish_47, parameter_79, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_79 + + # pd_op.batch_norm_: (2x48x84x84xf32, 48xf32, 48xf32, 48xf32, 48xf32, -1xui8) <- (2x48x84x84xf32, 48xf32, 48xf32, 48xf32, 48xf32) + ( + batch_norm__336, + batch_norm__337, + batch_norm__338, + batch_norm__339, + batch_norm__340, + batch_norm__341, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_60, + parameter_78, + parameter_77, + parameter_76, + parameter_75, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_75, parameter_76, parameter_77, parameter_78 + + # pd_op.add: (2x48x84x84xf32) <- (2x48x84x84xf32, 2x48x84x84xf32) + add_54 = paddle._C_ops.add(batch_norm__330, batch_norm__336) + + # pd_op.swish: (2x48x84x84xf32) <- (2x48x84x84xf32) + swish_48 = paddle._C_ops.swish(add_54) + + # builtin.combine: ([2x48x84x84xf32, 2x48x84x84xf32]) <- (2x48x84x84xf32, 2x48x84x84xf32) + combine_11 = [swish_45, swish_48] + + # pd_op.concat: (2x96x84x84xf32) <- ([2x48x84x84xf32, 2x48x84x84xf32], 1xi32) + concat_10 = paddle._C_ops.concat(combine_11, full_0) + del combine_11 + + # pd_op.conv2d: (2x96x84x84xf32) <- (2x96x84x84xf32, 96x96x1x1xf32) + conv2d_61 = paddle._C_ops.conv2d( + concat_10, parameter_74, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_74 + + # pd_op.batch_norm_: (2x96x84x84xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x84x84xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__342, + batch_norm__343, + batch_norm__344, + batch_norm__345, + batch_norm__346, + batch_norm__347, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_61, + parameter_73, + parameter_72, + parameter_71, + parameter_70, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_70, parameter_71, parameter_72, parameter_73 + + # pd_op.swish: (2x96x84x84xf32) <- (2x96x84x84xf32) + swish_49 = paddle._C_ops.swish(batch_norm__342) + + # pd_op.conv2d: (2x96x42x42xf32) <- (2x96x84x84xf32, 96x96x3x3xf32) + conv2d_62 = paddle._C_ops.conv2d( + swish_49, parameter_69, [2, 2], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_69 + + # pd_op.batch_norm_: (2x96x42x42xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x42x42xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__348, + batch_norm__349, + batch_norm__350, + batch_norm__351, + batch_norm__352, + batch_norm__353, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_62, + parameter_68, + parameter_67, + parameter_66, + parameter_65, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_65, parameter_66, parameter_67, parameter_68 + + # pd_op.swish: (2x96x42x42xf32) <- (2x96x42x42xf32) + swish_50 = paddle._C_ops.swish(batch_norm__348) + + # builtin.combine: ([2x96x42x42xf32, 2x192x42x42xf32]) <- (2x96x42x42xf32, 2x192x42x42xf32) + combine_12 = [swish_50, swish_43] + + # pd_op.concat: (2x288x42x42xf32) <- ([2x96x42x42xf32, 2x192x42x42xf32], 1xi32) + concat_11 = paddle._C_ops.concat(combine_12, full_0) + del combine_12 + + # pd_op.conv2d: (2x96x42x42xf32) <- (2x288x42x42xf32, 96x288x1x1xf32) + conv2d_63 = paddle._C_ops.conv2d( + concat_11, parameter_64, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_64 + + # pd_op.batch_norm_: (2x96x42x42xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x42x42xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__354, + batch_norm__355, + batch_norm__356, + batch_norm__357, + batch_norm__358, + batch_norm__359, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_63, + parameter_63, + parameter_62, + parameter_61, + parameter_60, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_60, parameter_61, parameter_62, parameter_63 + + # pd_op.swish: (2x96x42x42xf32) <- (2x96x42x42xf32) + swish_51 = paddle._C_ops.swish(batch_norm__354) + + # pd_op.conv2d: (2x96x42x42xf32) <- (2x288x42x42xf32, 96x288x1x1xf32) + conv2d_64 = paddle._C_ops.conv2d( + concat_11, parameter_59, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_59 + + # pd_op.batch_norm_: (2x96x42x42xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x42x42xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__360, + batch_norm__361, + batch_norm__362, + batch_norm__363, + batch_norm__364, + batch_norm__365, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_64, + parameter_58, + parameter_57, + parameter_56, + parameter_55, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_55, parameter_56, parameter_57, parameter_58 + + # pd_op.swish: (2x96x42x42xf32) <- (2x96x42x42xf32) + swish_52 = paddle._C_ops.swish(batch_norm__360) + + # pd_op.conv2d: (2x96x42x42xf32) <- (2x96x42x42xf32, 96x96x3x3xf32) + conv2d_65 = paddle._C_ops.conv2d( + swish_52, parameter_54, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_54 + + # pd_op.batch_norm_: (2x96x42x42xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x42x42xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__366, + batch_norm__367, + batch_norm__368, + batch_norm__369, + batch_norm__370, + batch_norm__371, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_65, + parameter_53, + parameter_52, + parameter_51, + parameter_50, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_50, parameter_51, parameter_52, parameter_53 + + # pd_op.swish: (2x96x42x42xf32) <- (2x96x42x42xf32) + swish_53 = paddle._C_ops.swish(batch_norm__366) + + # pd_op.conv2d: (2x96x42x42xf32) <- (2x96x42x42xf32, 96x96x3x3xf32) + conv2d_66 = paddle._C_ops.conv2d( + swish_53, parameter_49, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_49 + + # pd_op.batch_norm_: (2x96x42x42xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x42x42xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__372, + batch_norm__373, + batch_norm__374, + batch_norm__375, + batch_norm__376, + batch_norm__377, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_66, + parameter_48, + parameter_47, + parameter_46, + parameter_45, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_45, parameter_46, parameter_47, parameter_48 + + # pd_op.conv2d: (2x96x42x42xf32) <- (2x96x42x42xf32, 96x96x1x1xf32) + conv2d_67 = paddle._C_ops.conv2d( + swish_53, parameter_44, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_44 + + # pd_op.batch_norm_: (2x96x42x42xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x42x42xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__378, + batch_norm__379, + batch_norm__380, + batch_norm__381, + batch_norm__382, + batch_norm__383, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_67, + parameter_43, + parameter_42, + parameter_41, + parameter_40, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_40, parameter_41, parameter_42, parameter_43 + + # pd_op.add: (2x96x42x42xf32) <- (2x96x42x42xf32, 2x96x42x42xf32) + add_55 = paddle._C_ops.add(batch_norm__372, batch_norm__378) + + # pd_op.swish: (2x96x42x42xf32) <- (2x96x42x42xf32) + swish_54 = paddle._C_ops.swish(add_55) + + # builtin.combine: ([2x96x42x42xf32, 2x96x42x42xf32]) <- (2x96x42x42xf32, 2x96x42x42xf32) + combine_13 = [swish_51, swish_54] + + # pd_op.concat: (2x192x42x42xf32) <- ([2x96x42x42xf32, 2x96x42x42xf32], 1xi32) + concat_12 = paddle._C_ops.concat(combine_13, full_0) + del combine_13 + + # pd_op.conv2d: (2x192x42x42xf32) <- (2x192x42x42xf32, 192x192x1x1xf32) + conv2d_68 = paddle._C_ops.conv2d( + concat_12, parameter_39, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_39 + + # pd_op.batch_norm_: (2x192x42x42xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x42x42xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__384, + batch_norm__385, + batch_norm__386, + batch_norm__387, + batch_norm__388, + batch_norm__389, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_68, + parameter_38, + parameter_37, + parameter_36, + parameter_35, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_35, parameter_36, parameter_37, parameter_38 + + # pd_op.swish: (2x192x42x42xf32) <- (2x192x42x42xf32) + swish_55 = paddle._C_ops.swish(batch_norm__384) + + # pd_op.conv2d: (2x192x21x21xf32) <- (2x192x42x42xf32, 192x192x3x3xf32) + conv2d_69 = paddle._C_ops.conv2d( + swish_55, parameter_34, [2, 2], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_34 + + # pd_op.batch_norm_: (2x192x21x21xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x21x21xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__390, + batch_norm__391, + batch_norm__392, + batch_norm__393, + batch_norm__394, + batch_norm__395, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_69, + parameter_33, + parameter_32, + parameter_31, + parameter_30, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_30, parameter_31, parameter_32, parameter_33 + + # pd_op.swish: (2x192x21x21xf32) <- (2x192x21x21xf32) + swish_56 = paddle._C_ops.swish(batch_norm__390) + + # builtin.combine: ([2x192x21x21xf32, 2x384x21x21xf32]) <- (2x192x21x21xf32, 2x384x21x21xf32) + combine_14 = [swish_56, swish_37] + + # pd_op.concat: (2x576x21x21xf32) <- ([2x192x21x21xf32, 2x384x21x21xf32], 1xi32) + concat_13 = paddle._C_ops.concat(combine_14, full_0) + del combine_14 + + # pd_op.conv2d: (2x192x21x21xf32) <- (2x576x21x21xf32, 192x576x1x1xf32) + conv2d_70 = paddle._C_ops.conv2d( + concat_13, parameter_29, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_29 + + # pd_op.batch_norm_: (2x192x21x21xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x21x21xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__396, + batch_norm__397, + batch_norm__398, + batch_norm__399, + batch_norm__400, + batch_norm__401, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_70, + parameter_28, + parameter_27, + parameter_26, + parameter_25, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_25, parameter_26, parameter_27, parameter_28 + + # pd_op.swish: (2x192x21x21xf32) <- (2x192x21x21xf32) + swish_57 = paddle._C_ops.swish(batch_norm__396) + + # pd_op.conv2d: (2x192x21x21xf32) <- (2x576x21x21xf32, 192x576x1x1xf32) + conv2d_71 = paddle._C_ops.conv2d( + concat_13, parameter_24, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_24 + + # pd_op.batch_norm_: (2x192x21x21xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x21x21xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__402, + batch_norm__403, + batch_norm__404, + batch_norm__405, + batch_norm__406, + batch_norm__407, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_71, + parameter_23, + parameter_22, + parameter_21, + parameter_20, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_20, parameter_21, parameter_22, parameter_23 + + # pd_op.swish: (2x192x21x21xf32) <- (2x192x21x21xf32) + swish_58 = paddle._C_ops.swish(batch_norm__402) + + # pd_op.conv2d: (2x192x21x21xf32) <- (2x192x21x21xf32, 192x192x3x3xf32) + conv2d_72 = paddle._C_ops.conv2d( + swish_58, parameter_19, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_19 + + # pd_op.batch_norm_: (2x192x21x21xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x21x21xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__408, + batch_norm__409, + batch_norm__410, + batch_norm__411, + batch_norm__412, + batch_norm__413, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_72, + parameter_18, + parameter_17, + parameter_16, + parameter_15, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_15, parameter_16, parameter_17, parameter_18 + + # pd_op.swish: (2x192x21x21xf32) <- (2x192x21x21xf32) + swish_59 = paddle._C_ops.swish(batch_norm__408) + + # pd_op.conv2d: (2x192x21x21xf32) <- (2x192x21x21xf32, 192x192x3x3xf32) + conv2d_73 = paddle._C_ops.conv2d( + swish_59, parameter_14, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_14 + + # pd_op.batch_norm_: (2x192x21x21xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x21x21xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__414, + batch_norm__415, + batch_norm__416, + batch_norm__417, + batch_norm__418, + batch_norm__419, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_73, + parameter_13, + parameter_12, + parameter_11, + parameter_10, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_10, parameter_11, parameter_12, parameter_13 + + # pd_op.conv2d: (2x192x21x21xf32) <- (2x192x21x21xf32, 192x192x1x1xf32) + conv2d_74 = paddle._C_ops.conv2d( + swish_59, parameter_9, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_9 + + # pd_op.batch_norm_: (2x192x21x21xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x21x21xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__420, + batch_norm__421, + batch_norm__422, + batch_norm__423, + batch_norm__424, + batch_norm__425, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_74, + parameter_8, + parameter_7, + parameter_6, + parameter_5, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_5, parameter_6, parameter_7, parameter_8 + + # pd_op.add: (2x192x21x21xf32) <- (2x192x21x21xf32, 2x192x21x21xf32) + add_56 = paddle._C_ops.add(batch_norm__414, batch_norm__420) + + # pd_op.swish: (2x192x21x21xf32) <- (2x192x21x21xf32) + swish_60 = paddle._C_ops.swish(add_56) + + # builtin.combine: ([2x192x21x21xf32, 2x192x21x21xf32]) <- (2x192x21x21xf32, 2x192x21x21xf32) + combine_15 = [swish_57, swish_60] + + # pd_op.concat: (2x384x21x21xf32) <- ([2x192x21x21xf32, 2x192x21x21xf32], 1xi32) + concat_14 = paddle._C_ops.concat(combine_15, full_0) + del combine_15 + + # pd_op.conv2d: (2x384x21x21xf32) <- (2x384x21x21xf32, 384x384x1x1xf32) + conv2d_75 = paddle._C_ops.conv2d( + concat_14, parameter_4, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_4 + + # pd_op.batch_norm_: (2x384x21x21xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x21x21xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__426, + batch_norm__427, + batch_norm__428, + batch_norm__429, + batch_norm__430, + batch_norm__431, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_75, + parameter_3, + parameter_2, + parameter_1, + parameter_0, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_0, parameter_1, parameter_2, parameter_3 + + # pd_op.swish: (2x384x21x21xf32) <- (2x384x21x21xf32) + swish_0 = paddle._C_ops.swish(batch_norm__426) + del ( + add_0, + add_1, + add_10, + add_11, + add_13, + add_14, + add_16, + add_17, + add_18, + add_19, + add_21, + add_22, + add_24, + add_25, + add_26, + add_27, + add_28, + add_3, + add_30, + add_31, + add_33, + add_34, + add_35, + add_36, + add_37, + add_39, + add_4, + add_40, + add_42, + add_43, + add_44, + add_45, + add_46, + add_48, + add_49, + add_5, + add_51, + add_52, + add_53, + add_54, + add_55, + add_56, + add_6, + add_8, + add_9, + assign_0, + assign_1, + assign_10, + assign_11, + assign_12, + assign_13, + assign_14, + assign_15, + assign_16, + assign_17, + assign_18, + assign_19, + assign_2, + assign_20, + assign_21, + assign_22, + assign_23, + assign_24, + assign_25, + assign_26, + assign_27, + assign_28, + assign_29, + assign_3, + assign_30, + assign_31, + assign_32, + assign_33, + assign_34, + assign_35, + assign_36, + assign_37, + assign_38, + assign_39, + assign_4, + assign_40, + assign_41, + assign_42, + assign_43, + assign_44, + assign_45, + assign_46, + assign_47, + assign_48, + assign_49, + assign_5, + assign_50, + assign_51, + assign_52, + assign_53, + assign_54, + assign_55, + assign_56, + assign_57, + assign_58, + assign_59, + assign_6, + assign_60, + assign_61, + assign_62, + assign_63, + assign_64, + assign_65, + assign_66, + assign_67, + assign_68, + assign_69, + assign_7, + assign_70, + assign_71, + assign_72, + assign_73, + assign_74, + assign_75, + assign_76, + assign_77, + assign_78, + assign_8, + assign_9, + batch_norm__0, + batch_norm__1, + batch_norm__10, + batch_norm__100, + batch_norm__101, + batch_norm__102, + batch_norm__103, + batch_norm__104, + batch_norm__105, + batch_norm__106, + batch_norm__107, + batch_norm__108, + batch_norm__109, + batch_norm__11, + batch_norm__110, + batch_norm__111, + batch_norm__112, + batch_norm__113, + batch_norm__114, + batch_norm__115, + batch_norm__116, + batch_norm__117, + batch_norm__118, + batch_norm__119, + batch_norm__12, + batch_norm__120, + batch_norm__121, + batch_norm__122, + batch_norm__123, + batch_norm__124, + batch_norm__125, + batch_norm__126, + batch_norm__127, + batch_norm__128, + batch_norm__129, + batch_norm__13, + batch_norm__130, + batch_norm__131, + batch_norm__132, + batch_norm__133, + batch_norm__134, + batch_norm__135, + batch_norm__136, + batch_norm__137, + batch_norm__138, + batch_norm__139, + batch_norm__14, + batch_norm__140, + batch_norm__141, + batch_norm__142, + batch_norm__143, + batch_norm__144, + batch_norm__145, + batch_norm__146, + batch_norm__147, + batch_norm__148, + batch_norm__149, + batch_norm__15, + batch_norm__150, + batch_norm__151, + batch_norm__152, + batch_norm__153, + batch_norm__154, + batch_norm__155, + batch_norm__156, + batch_norm__157, + batch_norm__158, + batch_norm__159, + batch_norm__16, + batch_norm__160, + batch_norm__161, + batch_norm__162, + batch_norm__163, + batch_norm__164, + batch_norm__165, + batch_norm__166, + batch_norm__167, + batch_norm__168, + batch_norm__169, + batch_norm__17, + batch_norm__170, + batch_norm__171, + batch_norm__172, + batch_norm__173, + batch_norm__174, + batch_norm__175, + batch_norm__176, + batch_norm__177, + batch_norm__178, + batch_norm__179, + batch_norm__18, + batch_norm__180, + batch_norm__181, + batch_norm__182, + batch_norm__183, + batch_norm__184, + batch_norm__185, + batch_norm__186, + batch_norm__187, + batch_norm__188, + batch_norm__189, + batch_norm__19, + batch_norm__190, + batch_norm__191, + batch_norm__192, + batch_norm__193, + batch_norm__194, + batch_norm__195, + batch_norm__196, + batch_norm__197, + batch_norm__198, + batch_norm__199, + batch_norm__2, + batch_norm__20, + batch_norm__200, + batch_norm__201, + batch_norm__202, + batch_norm__203, + batch_norm__204, + batch_norm__205, + batch_norm__206, + batch_norm__207, + batch_norm__208, + batch_norm__209, + batch_norm__21, + batch_norm__210, + batch_norm__211, + batch_norm__212, + batch_norm__213, + batch_norm__214, + batch_norm__215, + batch_norm__216, + batch_norm__217, + batch_norm__218, + batch_norm__219, + batch_norm__22, + batch_norm__220, + batch_norm__221, + batch_norm__222, + batch_norm__223, + batch_norm__224, + batch_norm__225, + batch_norm__226, + batch_norm__227, + batch_norm__228, + batch_norm__229, + batch_norm__23, + batch_norm__230, + batch_norm__231, + batch_norm__232, + batch_norm__233, + batch_norm__234, + batch_norm__235, + batch_norm__236, + batch_norm__237, + batch_norm__238, + batch_norm__239, + batch_norm__24, + batch_norm__240, + batch_norm__241, + batch_norm__242, + batch_norm__243, + batch_norm__244, + batch_norm__245, + batch_norm__246, + batch_norm__247, + batch_norm__248, + batch_norm__249, + batch_norm__25, + batch_norm__250, + batch_norm__251, + batch_norm__252, + batch_norm__253, + batch_norm__254, + batch_norm__255, + batch_norm__256, + batch_norm__257, + batch_norm__258, + batch_norm__259, + batch_norm__26, + batch_norm__260, + batch_norm__261, + batch_norm__262, + batch_norm__263, + batch_norm__264, + batch_norm__265, + batch_norm__266, + batch_norm__267, + batch_norm__268, + batch_norm__269, + batch_norm__27, + batch_norm__270, + batch_norm__271, + batch_norm__272, + batch_norm__273, + batch_norm__274, + batch_norm__275, + batch_norm__276, + batch_norm__277, + batch_norm__278, + batch_norm__279, + batch_norm__28, + batch_norm__280, + batch_norm__281, + batch_norm__282, + batch_norm__283, + batch_norm__284, + batch_norm__285, + batch_norm__286, + batch_norm__287, + batch_norm__288, + batch_norm__289, + batch_norm__29, + batch_norm__290, + batch_norm__291, + batch_norm__292, + batch_norm__293, + batch_norm__294, + batch_norm__295, + batch_norm__296, + batch_norm__297, + batch_norm__298, + batch_norm__299, + batch_norm__3, + batch_norm__30, + batch_norm__300, + batch_norm__301, + batch_norm__302, + batch_norm__303, + batch_norm__304, + batch_norm__305, + batch_norm__306, + batch_norm__307, + batch_norm__308, + batch_norm__309, + batch_norm__31, + batch_norm__310, + batch_norm__311, + batch_norm__312, + batch_norm__313, + batch_norm__314, + batch_norm__315, + batch_norm__316, + batch_norm__317, + batch_norm__318, + batch_norm__319, + batch_norm__32, + batch_norm__320, + batch_norm__321, + batch_norm__322, + batch_norm__323, + batch_norm__324, + batch_norm__325, + batch_norm__326, + batch_norm__327, + batch_norm__328, + batch_norm__329, + batch_norm__33, + batch_norm__330, + batch_norm__331, + batch_norm__332, + batch_norm__333, + batch_norm__334, + batch_norm__335, + batch_norm__336, + batch_norm__337, + batch_norm__338, + batch_norm__339, + batch_norm__34, + batch_norm__340, + batch_norm__341, + batch_norm__342, + batch_norm__343, + batch_norm__344, + batch_norm__345, + batch_norm__346, + batch_norm__347, + batch_norm__348, + batch_norm__349, + batch_norm__35, + batch_norm__350, + batch_norm__351, + batch_norm__352, + batch_norm__353, + batch_norm__354, + batch_norm__355, + batch_norm__356, + batch_norm__357, + batch_norm__358, + batch_norm__359, + batch_norm__36, + batch_norm__360, + batch_norm__361, + batch_norm__362, + batch_norm__363, + batch_norm__364, + batch_norm__365, + batch_norm__366, + batch_norm__367, + batch_norm__368, + batch_norm__369, + batch_norm__37, + batch_norm__370, + batch_norm__371, + batch_norm__372, + batch_norm__373, + batch_norm__374, + batch_norm__375, + batch_norm__376, + batch_norm__377, + batch_norm__378, + batch_norm__379, + batch_norm__38, + batch_norm__380, + batch_norm__381, + batch_norm__382, + batch_norm__383, + batch_norm__384, + batch_norm__385, + batch_norm__386, + batch_norm__387, + batch_norm__388, + batch_norm__389, + batch_norm__39, + batch_norm__390, + batch_norm__391, + batch_norm__392, + batch_norm__393, + batch_norm__394, + batch_norm__395, + batch_norm__396, + batch_norm__397, + batch_norm__398, + batch_norm__399, + batch_norm__4, + batch_norm__40, + batch_norm__400, + batch_norm__401, + batch_norm__402, + batch_norm__403, + batch_norm__404, + batch_norm__405, + batch_norm__406, + batch_norm__407, + batch_norm__408, + batch_norm__409, + batch_norm__41, + batch_norm__410, + batch_norm__411, + batch_norm__412, + batch_norm__413, + batch_norm__414, + batch_norm__415, + batch_norm__416, + batch_norm__417, + batch_norm__418, + batch_norm__419, + batch_norm__42, + batch_norm__420, + batch_norm__421, + batch_norm__422, + batch_norm__423, + batch_norm__424, + batch_norm__425, + batch_norm__426, + batch_norm__427, + batch_norm__428, + batch_norm__429, + batch_norm__43, + batch_norm__430, + batch_norm__431, + batch_norm__44, + batch_norm__45, + batch_norm__46, + batch_norm__47, + batch_norm__48, + batch_norm__49, + batch_norm__5, + batch_norm__50, + batch_norm__51, + batch_norm__52, + batch_norm__53, + batch_norm__54, + batch_norm__55, + batch_norm__56, + batch_norm__57, + batch_norm__58, + batch_norm__59, + batch_norm__6, + batch_norm__60, + batch_norm__61, + batch_norm__62, + batch_norm__63, + batch_norm__64, + batch_norm__65, + batch_norm__66, + batch_norm__67, + batch_norm__68, + batch_norm__69, + batch_norm__7, + batch_norm__70, + batch_norm__71, + batch_norm__72, + batch_norm__73, + batch_norm__74, + batch_norm__75, + batch_norm__76, + batch_norm__77, + batch_norm__78, + batch_norm__79, + batch_norm__8, + batch_norm__80, + batch_norm__81, + batch_norm__82, + batch_norm__83, + batch_norm__84, + batch_norm__85, + batch_norm__86, + batch_norm__87, + batch_norm__88, + batch_norm__89, + batch_norm__9, + batch_norm__90, + batch_norm__91, + batch_norm__92, + batch_norm__93, + batch_norm__94, + batch_norm__95, + batch_norm__96, + batch_norm__97, + batch_norm__98, + batch_norm__99, + concat_0, + concat_1, + concat_10, + concat_11, + concat_12, + concat_13, + concat_14, + concat_2, + concat_3, + concat_5, + concat_6, + concat_7, + concat_8, + concat_9, + conv2d_0, + conv2d_1, + conv2d_10, + conv2d_11, + conv2d_12, + conv2d_13, + conv2d_14, + conv2d_15, + conv2d_16, + conv2d_17, + conv2d_18, + conv2d_19, + conv2d_2, + conv2d_20, + conv2d_21, + conv2d_22, + conv2d_23, + conv2d_24, + conv2d_25, + conv2d_26, + conv2d_27, + conv2d_28, + conv2d_29, + conv2d_3, + conv2d_30, + conv2d_31, + conv2d_32, + conv2d_33, + conv2d_34, + conv2d_35, + conv2d_36, + conv2d_37, + conv2d_38, + conv2d_39, + conv2d_4, + conv2d_40, + conv2d_41, + conv2d_42, + conv2d_43, + conv2d_44, + conv2d_45, + conv2d_46, + conv2d_47, + conv2d_48, + conv2d_49, + conv2d_5, + conv2d_50, + conv2d_51, + conv2d_52, + conv2d_53, + conv2d_54, + conv2d_55, + conv2d_56, + conv2d_57, + conv2d_58, + conv2d_59, + conv2d_6, + conv2d_60, + conv2d_61, + conv2d_62, + conv2d_63, + conv2d_64, + conv2d_65, + conv2d_66, + conv2d_67, + conv2d_68, + conv2d_69, + conv2d_7, + conv2d_70, + conv2d_71, + conv2d_72, + conv2d_73, + conv2d_74, + conv2d_75, + conv2d_8, + conv2d_9, + dropout_0, + dropout_1, + dropout_10, + dropout_11, + dropout_12, + dropout_13, + dropout_14, + dropout_15, + dropout_16, + dropout_17, + dropout_18, + dropout_19, + dropout_2, + dropout_20, + dropout_21, + dropout_22, + dropout_23, + dropout_24, + dropout_25, + dropout_26, + dropout_27, + dropout_28, + dropout_29, + dropout_3, + dropout_30, + dropout_31, + dropout_4, + dropout_5, + dropout_6, + dropout_7, + dropout_8, + dropout_9, + full_0, + full_8, + full_9, + full_int_array_0, + full_int_array_10, + full_int_array_11, + full_int_array_12, + full_int_array_4, + full_int_array_6, + full_int_array_7, + hardsigmoid_0, + hardsigmoid_1, + hardsigmoid_2, + hardsigmoid_3, + layer_norm_0, + layer_norm_1, + layer_norm_10, + layer_norm_11, + layer_norm_12, + layer_norm_13, + layer_norm_14, + layer_norm_15, + layer_norm_16, + layer_norm_17, + layer_norm_18, + layer_norm_19, + layer_norm_2, + layer_norm_20, + layer_norm_22, + layer_norm_23, + layer_norm_3, + layer_norm_4, + layer_norm_5, + layer_norm_6, + layer_norm_7, + layer_norm_8, + layer_norm_9, + matmul_10, + matmul_11, + matmul_12, + matmul_15, + matmul_16, + matmul_17, + matmul_18, + matmul_19, + matmul_2, + matmul_20, + matmul_23, + matmul_24, + matmul_25, + matmul_26, + matmul_27, + matmul_28, + matmul_3, + matmul_31, + matmul_32, + matmul_33, + matmul_4, + matmul_7, + matmul_8, + matmul_9, + mean_0, + mean_1, + mean_2, + mean_3, + multiply_0, + multiply_1, + multiply_2, + multiply_3, + multiply_4, + multiply_5, + multiply_6, + multiply_7, + multiply_8, + multiply_9, + nearest_interp_0, + nearest_interp_1, + pool2d_0, + pool2d_1, + pool2d_2, + reshape_0, + reshape_1, + reshape_11, + reshape_15, + reshape_19, + reshape_2, + reshape_20, + reshape_3, + reshape_7, + slice_0, + slice_1, + slice_10, + slice_11, + slice_12, + slice_13, + slice_14, + slice_15, + slice_16, + slice_17, + slice_18, + slice_19, + slice_2, + slice_20, + slice_21, + slice_22, + slice_23, + slice_3, + slice_4, + slice_5, + slice_6, + slice_7, + slice_8, + slice_9, + softmax_0, + softmax_1, + softmax_2, + softmax_3, + swish_1, + swish_10, + swish_11, + swish_12, + swish_13, + swish_14, + swish_15, + swish_16, + swish_17, + swish_18, + swish_19, + swish_2, + swish_20, + swish_21, + swish_22, + swish_23, + swish_24, + swish_25, + swish_26, + swish_27, + swish_28, + swish_29, + swish_3, + swish_30, + swish_31, + swish_32, + swish_33, + swish_34, + swish_35, + swish_36, + swish_37, + swish_38, + swish_39, + swish_4, + swish_40, + swish_41, + swish_42, + swish_43, + swish_44, + swish_45, + swish_46, + swish_47, + swish_48, + swish_49, + swish_5, + swish_50, + swish_51, + swish_52, + swish_53, + swish_54, + swish_55, + swish_56, + swish_57, + swish_58, + swish_59, + swish_6, + swish_60, + swish_7, + swish_8, + swish_9, + transpose_0, + transpose_1, + transpose_10, + transpose_11, + transpose_12, + transpose_13, + transpose_14, + transpose_15, + transpose_16, + transpose_17, + transpose_2, + transpose_3, + transpose_4, + transpose_5, + transpose_6, + transpose_7, + transpose_8, + transpose_9, + unsqueeze_3, + ) + + return swish_0 diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-S/subgraph_3/weight_meta.py b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-S/subgraph_3/weight_meta.py new file mode 100644 index 000000000..883e0a90e --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-S/subgraph_3/weight_meta.py @@ -0,0 +1,4300 @@ +class Program_weight_tensor_parameter_0: + name = "parameter_0" + shape = [384] + dtype = "float32" + min_val = float("-0.452683") + max_val = float("0.520016") + mean = float("0.179162") + std = float("0.147254") + data = None + + +class Program_weight_tensor_parameter_1: + name = "parameter_1" + shape = [384] + dtype = "float32" + min_val = float("0.925768") + max_val = float("1.48759") + mean = float("1.14239") + std = float("0.0749515") + data = None + + +class Program_weight_tensor_parameter_2: + name = "parameter_2" + shape = [384] + dtype = "float32" + min_val = float("0.00210463") + max_val = float("0.026769") + mean = float("0.00776839") + std = float("0.00342444") + data = None + + +class Program_weight_tensor_parameter_3: + name = "parameter_3" + shape = [384] + dtype = "float32" + min_val = float("-0.113418") + max_val = float("0.0521309") + mean = float("-0.0186753") + std = float("0.0242096") + data = None + + +class Program_weight_tensor_parameter_4: + name = "parameter_4" + shape = [384, 384, 1, 1] + dtype = "float32" + min_val = float("-0.0715531") + max_val = float("0.0605752") + mean = float("-0.000295655") + std = float("0.00518432") + data = None + + +class Program_weight_tensor_parameter_5: + name = "parameter_5" + shape = [192] + dtype = "float32" + min_val = float("-0.285295") + max_val = float("0.0760154") + mean = float("-0.0576417") + std = float("0.0689916") + data = None + + +class Program_weight_tensor_parameter_6: + name = "parameter_6" + shape = [192] + dtype = "float32" + min_val = float("0.874213") + max_val = float("1.05715") + mean = float("0.955828") + std = float("0.0252646") + data = None + + +class Program_weight_tensor_parameter_7: + name = "parameter_7" + shape = [192] + dtype = "float32" + min_val = float("0.00237784") + max_val = float("0.0250828") + mean = float("0.00729247") + std = float("0.00391993") + data = None + + +class Program_weight_tensor_parameter_8: + name = "parameter_8" + shape = [192] + dtype = "float32" + min_val = float("-0.0443605") + max_val = float("0.0799242") + mean = float("0.00754799") + std = float("0.0194932") + data = None + + +class Program_weight_tensor_parameter_9: + name = "parameter_9" + shape = [192, 192, 1, 1] + dtype = "float32" + min_val = float("-0.0396215") + max_val = float("0.0321202") + mean = float("-3.31023e-05") + std = float("0.0035371") + data = None + + +class Program_weight_tensor_parameter_10: + name = "parameter_10" + shape = [192] + dtype = "float32" + min_val = float("-0.285295") + max_val = float("0.0760154") + mean = float("-0.0576417") + std = float("0.0689916") + data = None + + +class Program_weight_tensor_parameter_11: + name = "parameter_11" + shape = [192] + dtype = "float32" + min_val = float("0.955327") + max_val = float("1.24547") + mean = float("1.06354") + std = float("0.05021") + data = None + + +class Program_weight_tensor_parameter_12: + name = "parameter_12" + shape = [192] + dtype = "float32" + min_val = float("0.00358067") + max_val = float("0.033424") + mean = float("0.0107375") + std = float("0.0045715") + data = None + + +class Program_weight_tensor_parameter_13: + name = "parameter_13" + shape = [192] + dtype = "float32" + min_val = float("-0.109692") + max_val = float("0.118946") + mean = float("-0.0242889") + std = float("0.0256644") + data = None + + +class Program_weight_tensor_parameter_14: + name = "parameter_14" + shape = [192, 192, 3, 3] + dtype = "float32" + min_val = float("-0.0682235") + max_val = float("0.0483379") + mean = float("-0.000130636") + std = float("0.00307912") + data = None + + +class Program_weight_tensor_parameter_15: + name = "parameter_15" + shape = [192] + dtype = "float32" + min_val = float("-0.36677") + max_val = float("0.248413") + mean = float("-0.120383") + std = float("0.0930536") + data = None + + +class Program_weight_tensor_parameter_16: + name = "parameter_16" + shape = [192] + dtype = "float32" + min_val = float("0.867888") + max_val = float("1.55252") + mean = float("1.03562") + std = float("0.0842523") + data = None + + +class Program_weight_tensor_parameter_17: + name = "parameter_17" + shape = [192] + dtype = "float32" + min_val = float("0.00722418") + max_val = float("0.0714367") + mean = float("0.0204194") + std = float("0.00950298") + data = None + + +class Program_weight_tensor_parameter_18: + name = "parameter_18" + shape = [192] + dtype = "float32" + min_val = float("-0.191905") + max_val = float("0.0690325") + mean = float("-0.042388") + std = float("0.0414146") + data = None + + +class Program_weight_tensor_parameter_19: + name = "parameter_19" + shape = [192, 192, 3, 3] + dtype = "float32" + min_val = float("-0.0761583") + max_val = float("0.0789144") + mean = float("-0.000193443") + std = float("0.00345676") + data = None + + +class Program_weight_tensor_parameter_20: + name = "parameter_20" + shape = [192] + dtype = "float32" + min_val = float("-0.271966") + max_val = float("0.101208") + mean = float("-0.0713997") + std = float("0.0674906") + data = None + + +class Program_weight_tensor_parameter_21: + name = "parameter_21" + shape = [192] + dtype = "float32" + min_val = float("0.903154") + max_val = float("1.18503") + mean = float("1.01548") + std = float("0.0495696") + data = None + + +class Program_weight_tensor_parameter_22: + name = "parameter_22" + shape = [192] + dtype = "float32" + min_val = float("0.00346844") + max_val = float("0.0220096") + mean = float("0.00712286") + std = float("0.00255333") + data = None + + +class Program_weight_tensor_parameter_23: + name = "parameter_23" + shape = [192] + dtype = "float32" + min_val = float("-0.107861") + max_val = float("0.0684214") + mean = float("-0.0173983") + std = float("0.0277055") + data = None + + +class Program_weight_tensor_parameter_24: + name = "parameter_24" + shape = [192, 576, 1, 1] + dtype = "float32" + min_val = float("-0.114155") + max_val = float("0.120684") + mean = float("-0.000168433") + std = float("0.00483873") + data = None + + +class Program_weight_tensor_parameter_25: + name = "parameter_25" + shape = [192] + dtype = "float32" + min_val = float("-0.110844") + max_val = float("0.0101898") + mean = float("-0.0407097") + std = float("0.0213656") + data = None + + +class Program_weight_tensor_parameter_26: + name = "parameter_26" + shape = [192] + dtype = "float32" + min_val = float("0.825232") + max_val = float("1.16377") + mean = float("0.995983") + std = float("0.0348186") + data = None + + +class Program_weight_tensor_parameter_27: + name = "parameter_27" + shape = [192] + dtype = "float32" + min_val = float("0.00217247") + max_val = float("0.0168889") + mean = float("0.00585613") + std = float("0.00215375") + data = None + + +class Program_weight_tensor_parameter_28: + name = "parameter_28" + shape = [192] + dtype = "float32" + min_val = float("-0.0953202") + max_val = float("0.0969014") + mean = float("-0.0231192") + std = float("0.0262698") + data = None + + +class Program_weight_tensor_parameter_29: + name = "parameter_29" + shape = [192, 576, 1, 1] + dtype = "float32" + min_val = float("-0.0338228") + max_val = float("0.0480082") + mean = float("-0.000263793") + std = float("0.0042282") + data = None + + +class Program_weight_tensor_parameter_30: + name = "parameter_30" + shape = [192] + dtype = "float32" + min_val = float("-0.187742") + max_val = float("0.00806848") + mean = float("-0.0695907") + std = float("0.0368951") + data = None + + +class Program_weight_tensor_parameter_31: + name = "parameter_31" + shape = [192] + dtype = "float32" + min_val = float("0.811184") + max_val = float("1.20064") + mean = float("1.03736") + std = float("0.0483566") + data = None + + +class Program_weight_tensor_parameter_32: + name = "parameter_32" + shape = [192] + dtype = "float32" + min_val = float("0.00738289") + max_val = float("0.0626225") + mean = float("0.0189409") + std = float("0.00837535") + data = None + + +class Program_weight_tensor_parameter_33: + name = "parameter_33" + shape = [192] + dtype = "float32" + min_val = float("-0.456792") + max_val = float("0.305059") + mean = float("-0.0475366") + std = float("0.0955555") + data = None + + +class Program_weight_tensor_parameter_34: + name = "parameter_34" + shape = [192, 192, 3, 3] + dtype = "float32" + min_val = float("-0.0422041") + max_val = float("0.0426098") + mean = float("-5.43654e-05") + std = float("0.00256074") + data = None + + +class Program_weight_tensor_parameter_35: + name = "parameter_35" + shape = [192] + dtype = "float32" + min_val = float("-0.428458") + max_val = float("1.12422") + mean = float("0.353853") + std = float("0.274871") + data = None + + +class Program_weight_tensor_parameter_36: + name = "parameter_36" + shape = [192] + dtype = "float32" + min_val = float("0.675662") + max_val = float("1.64209") + mean = float("1.20602") + std = float("0.162242") + data = None + + +class Program_weight_tensor_parameter_37: + name = "parameter_37" + shape = [192] + dtype = "float32" + min_val = float("0.00537697") + max_val = float("0.070407") + mean = float("0.0193519") + std = float("0.0102235") + data = None + + +class Program_weight_tensor_parameter_38: + name = "parameter_38" + shape = [192] + dtype = "float32" + min_val = float("-0.147927") + max_val = float("0.0607472") + mean = float("-0.0380247") + std = float("0.0324731") + data = None + + +class Program_weight_tensor_parameter_39: + name = "parameter_39" + shape = [192, 192, 1, 1] + dtype = "float32" + min_val = float("-0.130819") + max_val = float("0.129095") + mean = float("-0.000927703") + std = float("0.0104561") + data = None + + +class Program_weight_tensor_parameter_40: + name = "parameter_40" + shape = [96] + dtype = "float32" + min_val = float("-0.288895") + max_val = float("0.177009") + mean = float("-0.0686498") + std = float("0.0982327") + data = None + + +class Program_weight_tensor_parameter_41: + name = "parameter_41" + shape = [96] + dtype = "float32" + min_val = float("0.803373") + max_val = float("1.20152") + mean = float("0.918185") + std = float("0.0578716") + data = None + + +class Program_weight_tensor_parameter_42: + name = "parameter_42" + shape = [96] + dtype = "float32" + min_val = float("0.00118859") + max_val = float("0.014306") + mean = float("0.0062369") + std = float("0.00269475") + data = None + + +class Program_weight_tensor_parameter_43: + name = "parameter_43" + shape = [96] + dtype = "float32" + min_val = float("-0.0513549") + max_val = float("0.0355656") + mean = float("-0.000576274") + std = float("0.0171242") + data = None + + +class Program_weight_tensor_parameter_44: + name = "parameter_44" + shape = [96, 96, 1, 1] + dtype = "float32" + min_val = float("-0.0689765") + max_val = float("0.0432502") + mean = float("-0.000652836") + std = float("0.00712184") + data = None + + +class Program_weight_tensor_parameter_45: + name = "parameter_45" + shape = [96] + dtype = "float32" + min_val = float("-0.288895") + max_val = float("0.177009") + mean = float("-0.0686498") + std = float("0.0982327") + data = None + + +class Program_weight_tensor_parameter_46: + name = "parameter_46" + shape = [96] + dtype = "float32" + min_val = float("0.915166") + max_val = float("1.34956") + mean = float("1.07114") + std = float("0.0649452") + data = None + + +class Program_weight_tensor_parameter_47: + name = "parameter_47" + shape = [96] + dtype = "float32" + min_val = float("0.00551406") + max_val = float("0.0685331") + mean = float("0.0191014") + std = float("0.0117144") + data = None + + +class Program_weight_tensor_parameter_48: + name = "parameter_48" + shape = [96] + dtype = "float32" + min_val = float("-0.124132") + max_val = float("0.107774") + mean = float("-0.0343131") + std = float("0.0391153") + data = None + + +class Program_weight_tensor_parameter_49: + name = "parameter_49" + shape = [96, 96, 3, 3] + dtype = "float32" + min_val = float("-0.0904983") + max_val = float("0.0672513") + mean = float("-0.000306911") + std = float("0.00582517") + data = None + + +class Program_weight_tensor_parameter_50: + name = "parameter_50" + shape = [96] + dtype = "float32" + min_val = float("-0.598497") + max_val = float("0.275454") + mean = float("-0.203884") + std = float("0.141426") + data = None + + +class Program_weight_tensor_parameter_51: + name = "parameter_51" + shape = [96] + dtype = "float32" + min_val = float("0.748155") + max_val = float("1.49346") + mean = float("1.00128") + std = float("0.110997") + data = None + + +class Program_weight_tensor_parameter_52: + name = "parameter_52" + shape = [96] + dtype = "float32" + min_val = float("0.0136157") + max_val = float("0.0649745") + mean = float("0.0291018") + std = float("0.0108806") + data = None + + +class Program_weight_tensor_parameter_53: + name = "parameter_53" + shape = [96] + dtype = "float32" + min_val = float("-0.0918722") + max_val = float("0.0221828") + mean = float("-0.0350031") + std = float("0.0238651") + data = None + + +class Program_weight_tensor_parameter_54: + name = "parameter_54" + shape = [96, 96, 3, 3] + dtype = "float32" + min_val = float("-0.0863313") + max_val = float("0.0875384") + mean = float("-0.000438194") + std = float("0.00661265") + data = None + + +class Program_weight_tensor_parameter_55: + name = "parameter_55" + shape = [96] + dtype = "float32" + min_val = float("-0.492262") + max_val = float("0.196613") + mean = float("-0.141232") + std = float("0.103923") + data = None + + +class Program_weight_tensor_parameter_56: + name = "parameter_56" + shape = [96] + dtype = "float32" + min_val = float("0.846758") + max_val = float("1.23368") + mean = float("0.997396") + std = float("0.0775293") + data = None + + +class Program_weight_tensor_parameter_57: + name = "parameter_57" + shape = [96] + dtype = "float32" + min_val = float("0.00489883") + max_val = float("0.0394259") + mean = float("0.0128552") + std = float("0.00490413") + data = None + + +class Program_weight_tensor_parameter_58: + name = "parameter_58" + shape = [96] + dtype = "float32" + min_val = float("-0.151614") + max_val = float("0.0381158") + mean = float("-0.029881") + std = float("0.0298224") + data = None + + +class Program_weight_tensor_parameter_59: + name = "parameter_59" + shape = [96, 288, 1, 1] + dtype = "float32" + min_val = float("-0.0816683") + max_val = float("0.0740289") + mean = float("-0.000524912") + std = float("0.00886887") + data = None + + +class Program_weight_tensor_parameter_60: + name = "parameter_60" + shape = [96] + dtype = "float32" + min_val = float("-0.132923") + max_val = float("0.0509413") + mean = float("-0.0248077") + std = float("0.0356345") + data = None + + +class Program_weight_tensor_parameter_61: + name = "parameter_61" + shape = [96] + dtype = "float32" + min_val = float("0.816684") + max_val = float("1.39825") + mean = float("0.955372") + std = float("0.0698917") + data = None + + +class Program_weight_tensor_parameter_62: + name = "parameter_62" + shape = [96] + dtype = "float32" + min_val = float("0.00537154") + max_val = float("0.0354164") + mean = float("0.0148676") + std = float("0.0071552") + data = None + + +class Program_weight_tensor_parameter_63: + name = "parameter_63" + shape = [96] + dtype = "float32" + min_val = float("-0.0870071") + max_val = float("0.0637512") + mean = float("-0.00749368") + std = float("0.0293202") + data = None + + +class Program_weight_tensor_parameter_64: + name = "parameter_64" + shape = [96, 288, 1, 1] + dtype = "float32" + min_val = float("-0.0876552") + max_val = float("0.117658") + mean = float("-1.22433e-05") + std = float("0.00838076") + data = None + + +class Program_weight_tensor_parameter_65: + name = "parameter_65" + shape = [96] + dtype = "float32" + min_val = float("-0.275329") + max_val = float("0.0752799") + mean = float("-0.0911609") + std = float("0.0725464") + data = None + + +class Program_weight_tensor_parameter_66: + name = "parameter_66" + shape = [96] + dtype = "float32" + min_val = float("0.710176") + max_val = float("1.16778") + mean = float("1.0072") + std = float("0.0758171") + data = None + + +class Program_weight_tensor_parameter_67: + name = "parameter_67" + shape = [96] + dtype = "float32" + min_val = float("0.0113942") + max_val = float("0.0820975") + mean = float("0.0281015") + std = float("0.0141345") + data = None + + +class Program_weight_tensor_parameter_68: + name = "parameter_68" + shape = [96] + dtype = "float32" + min_val = float("-0.444146") + max_val = float("0.41526") + mean = float("-0.036066") + std = float("0.147893") + data = None + + +class Program_weight_tensor_parameter_69: + name = "parameter_69" + shape = [96, 96, 3, 3] + dtype = "float32" + min_val = float("-0.0598955") + max_val = float("0.073597") + mean = float("-0.000112433") + std = float("0.00604091") + data = None + + +class Program_weight_tensor_parameter_70: + name = "parameter_70" + shape = [96] + dtype = "float32" + min_val = float("-0.725732") + max_val = float("1.73631") + mean = float("0.545789") + std = float("0.560439") + data = None + + +class Program_weight_tensor_parameter_71: + name = "parameter_71" + shape = [96] + dtype = "float32" + min_val = float("0.4982") + max_val = float("1.73846") + mean = float("1.18044") + std = float("0.279373") + data = None + + +class Program_weight_tensor_parameter_72: + name = "parameter_72" + shape = [96] + dtype = "float32" + min_val = float("0.0091194") + max_val = float("0.155535") + mean = float("0.0531339") + std = float("0.0329718") + data = None + + +class Program_weight_tensor_parameter_73: + name = "parameter_73" + shape = [96] + dtype = "float32" + min_val = float("-0.236452") + max_val = float("0.124374") + mean = float("-0.0543668") + std = float("0.0701244") + data = None + + +class Program_weight_tensor_parameter_74: + name = "parameter_74" + shape = [96, 96, 1, 1] + dtype = "float32" + min_val = float("-0.213003") + max_val = float("0.156739") + mean = float("-0.00273394") + std = float("0.0253342") + data = None + + +class Program_weight_tensor_parameter_75: + name = "parameter_75" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_76: + name = "parameter_76" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_77: + name = "parameter_77" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_78: + name = "parameter_78" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_79: + name = "parameter_79" + shape = [48, 48, 1, 1] + dtype = "float32" + min_val = float("-0.0883468") + max_val = float("0.0815666") + mean = float("-0.00545195") + std = float("0.0185733") + data = None + + +class Program_weight_tensor_parameter_80: + name = "parameter_80" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_81: + name = "parameter_81" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_82: + name = "parameter_82" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_83: + name = "parameter_83" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_84: + name = "parameter_84" + shape = [48, 48, 3, 3] + dtype = "float32" + min_val = float("-0.146343") + max_val = float("0.154092") + mean = float("-0.000210578") + std = float("0.0146408") + data = None + + +class Program_weight_tensor_parameter_85: + name = "parameter_85" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_86: + name = "parameter_86" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_87: + name = "parameter_87" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_88: + name = "parameter_88" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_89: + name = "parameter_89" + shape = [48, 48, 3, 3] + dtype = "float32" + min_val = float("-0.156417") + max_val = float("0.125386") + mean = float("-0.00151248") + std = float("0.0167378") + data = None + + +class Program_weight_tensor_parameter_90: + name = "parameter_90" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_91: + name = "parameter_91" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_92: + name = "parameter_92" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_93: + name = "parameter_93" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_94: + name = "parameter_94" + shape = [48, 224, 1, 1] + dtype = "float32" + min_val = float("-0.204946") + max_val = float("0.164575") + mean = float("-0.00122585") + std = float("0.0211088") + data = None + + +class Program_weight_tensor_parameter_95: + name = "parameter_95" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_96: + name = "parameter_96" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_97: + name = "parameter_97" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_98: + name = "parameter_98" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_99: + name = "parameter_99" + shape = [48, 224, 1, 1] + dtype = "float32" + min_val = float("-0.117203") + max_val = float("0.151865") + mean = float("-0.000119193") + std = float("0.0157767") + data = None + + +class Program_weight_tensor_parameter_100: + name = "parameter_100" + shape = [96] + dtype = "float32" + min_val = float("-0.279696") + max_val = float("0.370246") + mean = float("0.0267309") + std = float("0.152921") + data = None + + +class Program_weight_tensor_parameter_101: + name = "parameter_101" + shape = [96] + dtype = "float32" + min_val = float("0.57014") + max_val = float("1.55242") + mean = float("0.838997") + std = float("0.135746") + data = None + + +class Program_weight_tensor_parameter_102: + name = "parameter_102" + shape = [96] + dtype = "float32" + min_val = float("0.0106683") + max_val = float("0.126804") + mean = float("0.0291814") + std = float("0.0170072") + data = None + + +class Program_weight_tensor_parameter_103: + name = "parameter_103" + shape = [96] + dtype = "float32" + min_val = float("-0.179898") + max_val = float("0.0732908") + mean = float("-0.0440135") + std = float("0.0490268") + data = None + + +class Program_weight_tensor_parameter_104: + name = "parameter_104" + shape = [96, 192, 1, 1] + dtype = "float32" + min_val = float("-0.102714") + max_val = float("0.106157") + mean = float("-0.00112266") + std = float("0.014169") + data = None + + +class Program_weight_tensor_parameter_105: + name = "parameter_105" + shape = [192] + dtype = "float32" + min_val = float("-0.406642") + max_val = float("0.233358") + mean = float("-0.049985") + std = float("0.110186") + data = None + + +class Program_weight_tensor_parameter_106: + name = "parameter_106" + shape = [192] + dtype = "float32" + min_val = float("0.709947") + max_val = float("1.49603") + mean = float("0.967598") + std = float("0.110329") + data = None + + +class Program_weight_tensor_parameter_107: + name = "parameter_107" + shape = [192] + dtype = "float32" + min_val = float("0.00789098") + max_val = float("0.0673619") + mean = float("0.0234475") + std = float("0.0107832") + data = None + + +class Program_weight_tensor_parameter_108: + name = "parameter_108" + shape = [192] + dtype = "float32" + min_val = float("-0.246778") + max_val = float("0.160666") + mean = float("-0.0552822") + std = float("0.0558804") + data = None + + +class Program_weight_tensor_parameter_109: + name = "parameter_109" + shape = [192, 192, 1, 1] + dtype = "float32" + min_val = float("-0.108975") + max_val = float("0.114522") + mean = float("-0.00132414") + std = float("0.0131759") + data = None + + +class Program_weight_tensor_parameter_110: + name = "parameter_110" + shape = [96] + dtype = "float32" + min_val = float("-0.329525") + max_val = float("0.198139") + mean = float("-0.0441613") + std = float("0.0973656") + data = None + + +class Program_weight_tensor_parameter_111: + name = "parameter_111" + shape = [96] + dtype = "float32" + min_val = float("0.675223") + max_val = float("0.97078") + mean = float("0.8646") + std = float("0.0515217") + data = None + + +class Program_weight_tensor_parameter_112: + name = "parameter_112" + shape = [96] + dtype = "float32" + min_val = float("0.00421185") + max_val = float("0.0249386") + mean = float("0.0123504") + std = float("0.00410601") + data = None + + +class Program_weight_tensor_parameter_113: + name = "parameter_113" + shape = [96] + dtype = "float32" + min_val = float("-0.053823") + max_val = float("0.050496") + mean = float("-0.0174828") + std = float("0.0234916") + data = None + + +class Program_weight_tensor_parameter_114: + name = "parameter_114" + shape = [96, 96, 1, 1] + dtype = "float32" + min_val = float("-0.0518405") + max_val = float("0.0461214") + mean = float("-0.00185578") + std = float("0.00991125") + data = None + + +class Program_weight_tensor_parameter_115: + name = "parameter_115" + shape = [96] + dtype = "float32" + min_val = float("-0.329525") + max_val = float("0.198139") + mean = float("-0.0441612") + std = float("0.0973656") + data = None + + +class Program_weight_tensor_parameter_116: + name = "parameter_116" + shape = [96] + dtype = "float32" + min_val = float("0.879123") + max_val = float("1.2327") + mean = float("1.0107") + std = float("0.0576271") + data = None + + +class Program_weight_tensor_parameter_117: + name = "parameter_117" + shape = [96] + dtype = "float32" + min_val = float("0.0143843") + max_val = float("0.0843909") + mean = float("0.0337415") + std = float("0.0133066") + data = None + + +class Program_weight_tensor_parameter_118: + name = "parameter_118" + shape = [96] + dtype = "float32" + min_val = float("-0.118606") + max_val = float("0.082009") + mean = float("-0.0257981") + std = float("0.037228") + data = None + + +class Program_weight_tensor_parameter_119: + name = "parameter_119" + shape = [96, 96, 3, 3] + dtype = "float32" + min_val = float("-0.119884") + max_val = float("0.0984876") + mean = float("-0.000273645") + std = float("0.00746965") + data = None + + +class Program_weight_tensor_parameter_120: + name = "parameter_120" + shape = [96] + dtype = "float32" + min_val = float("-0.684471") + max_val = float("0.164175") + mean = float("-0.212984") + std = float("0.163582") + data = None + + +class Program_weight_tensor_parameter_121: + name = "parameter_121" + shape = [96] + dtype = "float32" + min_val = float("0.647157") + max_val = float("1.32161") + mean = float("1.0012") + std = float("0.14667") + data = None + + +class Program_weight_tensor_parameter_122: + name = "parameter_122" + shape = [96] + dtype = "float32" + min_val = float("0.0204866") + max_val = float("0.0736433") + mean = float("0.0367459") + std = float("0.0105341") + data = None + + +class Program_weight_tensor_parameter_123: + name = "parameter_123" + shape = [96] + dtype = "float32" + min_val = float("-0.145655") + max_val = float("0.081865") + mean = float("-0.0386404") + std = float("0.0428506") + data = None + + +class Program_weight_tensor_parameter_124: + name = "parameter_124" + shape = [96, 96, 3, 3] + dtype = "float32" + min_val = float("-0.150728") + max_val = float("0.106984") + mean = float("-0.000497765") + std = float("0.00873233") + data = None + + +class Program_weight_tensor_parameter_125: + name = "parameter_125" + shape = [96] + dtype = "float32" + min_val = float("-0.62912") + max_val = float("0.432206") + mean = float("-0.18681") + std = float("0.228476") + data = None + + +class Program_weight_tensor_parameter_126: + name = "parameter_126" + shape = [96] + dtype = "float32" + min_val = float("0.719568") + max_val = float("1.31906") + mean = float("0.942543") + std = float("0.1067") + data = None + + +class Program_weight_tensor_parameter_127: + name = "parameter_127" + shape = [96] + dtype = "float32" + min_val = float("0.00885873") + max_val = float("0.0349769") + mean = float("0.0166273") + std = float("0.00558395") + data = None + + +class Program_weight_tensor_parameter_128: + name = "parameter_128" + shape = [96] + dtype = "float32" + min_val = float("-0.139965") + max_val = float("0.193582") + mean = float("0.014265") + std = float("0.05275") + data = None + + +class Program_weight_tensor_parameter_129: + name = "parameter_129" + shape = [96, 448, 1, 1] + dtype = "float32" + min_val = float("-0.232382") + max_val = float("0.219151") + mean = float("-0.000587386") + std = float("0.0112715") + data = None + + +class Program_weight_tensor_parameter_130: + name = "parameter_130" + shape = [96] + dtype = "float32" + min_val = float("-0.188632") + max_val = float("0.369005") + mean = float("0.0340127") + std = float("0.102984") + data = None + + +class Program_weight_tensor_parameter_131: + name = "parameter_131" + shape = [96] + dtype = "float32" + min_val = float("0.811482") + max_val = float("1.15985") + mean = float("0.971652") + std = float("0.0770903") + data = None + + +class Program_weight_tensor_parameter_132: + name = "parameter_132" + shape = [96] + dtype = "float32" + min_val = float("0.00634313") + max_val = float("0.0888851") + mean = float("0.0208191") + std = float("0.0130631") + data = None + + +class Program_weight_tensor_parameter_133: + name = "parameter_133" + shape = [96] + dtype = "float32" + min_val = float("-0.107048") + max_val = float("0.071888") + mean = float("0.00890674") + std = float("0.0331609") + data = None + + +class Program_weight_tensor_parameter_134: + name = "parameter_134" + shape = [96, 448, 1, 1] + dtype = "float32" + min_val = float("-0.134885") + max_val = float("0.159464") + mean = float("-0.000410195") + std = float("0.010511") + data = None + + +class Program_weight_tensor_parameter_135: + name = "parameter_135" + shape = [192] + dtype = "float32" + min_val = float("-0.428994") + max_val = float("-0.0079739") + mean = float("-0.188467") + std = float("0.0765561") + data = None + + +class Program_weight_tensor_parameter_136: + name = "parameter_136" + shape = [192] + dtype = "float32" + min_val = float("0.677258") + max_val = float("1.20866") + mean = float("0.867383") + std = float("0.0789588") + data = None + + +class Program_weight_tensor_parameter_137: + name = "parameter_137" + shape = [192] + dtype = "float32" + min_val = float("0.00982117") + max_val = float("0.0744581") + mean = float("0.0275621") + std = float("0.0137219") + data = None + + +class Program_weight_tensor_parameter_138: + name = "parameter_138" + shape = [192] + dtype = "float32" + min_val = float("-0.132161") + max_val = float("0.0679295") + mean = float("-0.043245") + std = float("0.0350758") + data = None + + +class Program_weight_tensor_parameter_139: + name = "parameter_139" + shape = [192, 384, 1, 1] + dtype = "float32" + min_val = float("-0.0860177") + max_val = float("0.067703") + mean = float("-0.000801812") + std = float("0.00868342") + data = None + + +class Program_weight_tensor_parameter_140: + name = "parameter_140" + shape = [384] + dtype = "float32" + min_val = float("-0.34678") + max_val = float("0.206496") + mean = float("-0.121695") + std = float("0.0631508") + data = None + + +class Program_weight_tensor_parameter_141: + name = "parameter_141" + shape = [384] + dtype = "float32" + min_val = float("0.851728") + max_val = float("1.38848") + mean = float("1.03319") + std = float("0.0688275") + data = None + + +class Program_weight_tensor_parameter_142: + name = "parameter_142" + shape = [384] + dtype = "float32" + min_val = float("0.0069404") + max_val = float("0.069479") + mean = float("0.0158635") + std = float("0.00706509") + data = None + + +class Program_weight_tensor_parameter_143: + name = "parameter_143" + shape = [384] + dtype = "float32" + min_val = float("-0.14028") + max_val = float("0.0814906") + mean = float("-0.0336421") + std = float("0.0349413") + data = None + + +class Program_weight_tensor_parameter_144: + name = "parameter_144" + shape = [384, 384, 1, 1] + dtype = "float32" + min_val = float("-0.129868") + max_val = float("0.122139") + mean = float("-0.000529178") + std = float("0.00780446") + data = None + + +class Program_weight_tensor_parameter_145: + name = "parameter_145" + shape = [192] + dtype = "float32" + min_val = float("-0.341336") + max_val = float("0.170618") + mean = float("-0.091537") + std = float("0.082877") + data = None + + +class Program_weight_tensor_parameter_146: + name = "parameter_146" + shape = [192] + dtype = "float32" + min_val = float("0.847029") + max_val = float("1.44354") + mean = float("1.04166") + std = float("0.11176") + data = None + + +class Program_weight_tensor_parameter_147: + name = "parameter_147" + shape = [192] + dtype = "float32" + min_val = float("0.0708478") + max_val = float("0.876148") + mean = float("0.242073") + std = float("0.121003") + data = None + + +class Program_weight_tensor_parameter_148: + name = "parameter_148" + shape = [192] + dtype = "float32" + min_val = float("-1.46399") + max_val = float("1.0207") + mean = float("-0.159336") + std = float("0.471288") + data = None + + +class Program_weight_tensor_parameter_149: + name = "parameter_149" + shape = [192, 768, 1, 1] + dtype = "float32" + min_val = float("-0.114663") + max_val = float("0.11913") + mean = float("-7.96988e-05") + std = float("0.00682969") + data = None + + +class Program_weight_tensor_parameter_150: + name = "parameter_150" + shape = [192] + dtype = "float32" + min_val = float("-0.0804562") + max_val = float("0.148317") + mean = float("0.0130393") + std = float("0.0391703") + data = None + + +class Program_weight_tensor_parameter_151: + name = "parameter_151" + shape = [192] + dtype = "float32" + min_val = float("0.807658") + max_val = float("0.999034") + mean = float("0.918023") + std = float("0.0323845") + data = None + + +class Program_weight_tensor_parameter_152: + name = "parameter_152" + shape = [192] + dtype = "float32" + min_val = float("0.00552433") + max_val = float("0.0291597") + mean = float("0.0124664") + std = float("0.00358779") + data = None + + +class Program_weight_tensor_parameter_153: + name = "parameter_153" + shape = [192] + dtype = "float32" + min_val = float("-0.10915") + max_val = float("0.0556478") + mean = float("-0.0510605") + std = float("0.0371129") + data = None + + +class Program_weight_tensor_parameter_154: + name = "parameter_154" + shape = [192, 192, 1, 1] + dtype = "float32" + min_val = float("-0.0362227") + max_val = float("0.0477232") + mean = float("-0.00128002") + std = float("0.00569625") + data = None + + +class Program_weight_tensor_parameter_155: + name = "parameter_155" + shape = [192] + dtype = "float32" + min_val = float("-0.0804562") + max_val = float("0.148317") + mean = float("0.0130393") + std = float("0.0391703") + data = None + + +class Program_weight_tensor_parameter_156: + name = "parameter_156" + shape = [192] + dtype = "float32" + min_val = float("0.898363") + max_val = float("1.29981") + mean = float("1.06513") + std = float("0.0769411") + data = None + + +class Program_weight_tensor_parameter_157: + name = "parameter_157" + shape = [192] + dtype = "float32" + min_val = float("0.0261593") + max_val = float("0.139816") + mean = float("0.0594586") + std = float("0.0222963") + data = None + + +class Program_weight_tensor_parameter_158: + name = "parameter_158" + shape = [192] + dtype = "float32" + min_val = float("-0.338466") + max_val = float("0.116576") + mean = float("-0.158943") + std = float("0.0724513") + data = None + + +class Program_weight_tensor_parameter_159: + name = "parameter_159" + shape = [192, 192, 3, 3] + dtype = "float32" + min_val = float("-0.0682462") + max_val = float("0.0632307") + mean = float("-0.00052431") + std = float("0.00430294") + data = None + + +class Program_weight_tensor_parameter_160: + name = "parameter_160" + shape = [192] + dtype = "float32" + min_val = float("-0.283114") + max_val = float("0.167559") + mean = float("-0.0797541") + std = float("0.0645398") + data = None + + +class Program_weight_tensor_parameter_161: + name = "parameter_161" + shape = [192] + dtype = "float32" + min_val = float("0.866586") + max_val = float("1.43761") + mean = float("1.07207") + std = float("0.10405") + data = None + + +class Program_weight_tensor_parameter_162: + name = "parameter_162" + shape = [192] + dtype = "float32" + min_val = float("0.0288085") + max_val = float("0.197173") + mean = float("0.0708018") + std = float("0.024139") + data = None + + +class Program_weight_tensor_parameter_163: + name = "parameter_163" + shape = [192] + dtype = "float32" + min_val = float("-0.46907") + max_val = float("0.165239") + mean = float("-0.113767") + std = float("0.0836793") + data = None + + +class Program_weight_tensor_parameter_164: + name = "parameter_164" + shape = [192, 192, 3, 3] + dtype = "float32" + min_val = float("-0.078482") + max_val = float("0.0511665") + mean = float("-0.00039617") + std = float("0.00462019") + data = None + + +class Program_weight_tensor_parameter_165: + name = "parameter_165" + shape = [192] + dtype = "float32" + min_val = float("-0.309805") + max_val = float("0.254716") + mean = float("-0.0932639") + std = float("0.0859364") + data = None + + +class Program_weight_tensor_parameter_166: + name = "parameter_166" + shape = [192] + dtype = "float32" + min_val = float("0.917679") + max_val = float("1.34889") + mean = float("1.06023") + std = float("0.0660292") + data = None + + +class Program_weight_tensor_parameter_167: + name = "parameter_167" + shape = [192] + dtype = "float32" + min_val = float("0.0174687") + max_val = float("0.0683914") + mean = float("0.0322673") + std = float("0.00871773") + data = None + + +class Program_weight_tensor_parameter_168: + name = "parameter_168" + shape = [192] + dtype = "float32" + min_val = float("-0.208139") + max_val = float("0.214511") + mean = float("-0.0131188") + std = float("0.0751091") + data = None + + +class Program_weight_tensor_parameter_169: + name = "parameter_169" + shape = [192, 512, 1, 1] + dtype = "float32" + min_val = float("-0.111568") + max_val = float("0.0993024") + mean = float("-0.000756508") + std = float("0.0080274") + data = None + + +class Program_weight_tensor_parameter_170: + name = "parameter_170" + shape = [192] + dtype = "float32" + min_val = float("-0.124473") + max_val = float("0.0377012") + mean = float("-0.021463") + std = float("0.0237294") + data = None + + +class Program_weight_tensor_parameter_171: + name = "parameter_171" + shape = [192] + dtype = "float32" + min_val = float("0.874472") + max_val = float("1.17982") + mean = float("0.966638") + std = float("0.0430596") + data = None + + +class Program_weight_tensor_parameter_172: + name = "parameter_172" + shape = [192] + dtype = "float32" + min_val = float("0.011943") + max_val = float("0.0496902") + mean = float("0.0214318") + std = float("0.00563825") + data = None + + +class Program_weight_tensor_parameter_173: + name = "parameter_173" + shape = [192] + dtype = "float32" + min_val = float("-0.184935") + max_val = float("0.122522") + mean = float("-0.00818287") + std = float("0.0564883") + data = None + + +class Program_weight_tensor_parameter_174: + name = "parameter_174" + shape = [192, 512, 1, 1] + dtype = "float32" + min_val = float("-0.0353683") + max_val = float("0.0647353") + mean = float("-0.000560278") + std = float("0.00645676") + data = None + + +class Program_weight_tensor_parameter_175: + name = "parameter_175" + shape = [512] + dtype = "float32" + min_val = float("-4.43317e-10") + max_val = float("5.61669e-10") + mean = float("-1.57278e-11") + std = float("1.50619e-10") + data = None + + +class Program_weight_tensor_parameter_176: + name = "parameter_176" + shape = [512] + dtype = "float32" + min_val = float("0.777502") + max_val = float("0.797352") + mean = float("0.79103") + std = float("0.00109751") + data = None + + +class Program_weight_tensor_parameter_177: + name = "parameter_177" + shape = [512] + dtype = "float32" + min_val = float("-0.0178174") + max_val = float("0.017746") + mean = float("0.000116862") + std = float("0.0100297") + data = None + + +class Program_weight_tensor_parameter_178: + name = "parameter_178" + shape = [2048, 512] + dtype = "float32" + min_val = float("-0.018331") + max_val = float("0.0184827") + mean = float("8.10141e-06") + std = float("0.0100894") + data = None + + +class Program_weight_tensor_parameter_179: + name = "parameter_179" + shape = [2048] + dtype = "float32" + min_val = float("-0.0350606") + max_val = float("0.0350384") + mean = float("0.000306453") + std = float("0.0203863") + data = None + + +class Program_weight_tensor_parameter_180: + name = "parameter_180" + shape = [512, 2048] + dtype = "float32" + min_val = float("-0.0353448") + max_val = float("0.0354131") + mean = float("-2.913e-05") + std = float("0.0201965") + data = None + + +class Program_weight_tensor_parameter_181: + name = "parameter_181" + shape = [512] + dtype = "float32" + min_val = float("-0.00108135") + max_val = float("0.00177537") + mean = float("-4.41915e-06") + std = float("0.000281082") + data = None + + +class Program_weight_tensor_parameter_182: + name = "parameter_182" + shape = [512] + dtype = "float32" + min_val = float("0.787443") + max_val = float("0.796202") + mean = float("0.791035") + std = float("0.000952022") + data = None + + +class Program_weight_tensor_parameter_183: + name = "parameter_183" + shape = [512] + dtype = "float32" + min_val = float("-0.000752734") + max_val = float("0.00115659") + mean = float("9.29203e-06") + std = float("0.000259648") + data = None + + +class Program_weight_tensor_parameter_184: + name = "parameter_184" + shape = [512, 512] + dtype = "float32" + min_val = float("-0.0612224") + max_val = float("0.061003") + mean = float("-6.94995e-05") + std = float("0.0349275") + data = None + + +class Program_weight_tensor_parameter_185: + name = "parameter_185" + shape = [512] + dtype = "float32" + min_val = float("-0.000857937") + max_val = float("0.00156671") + mean = float("2.45979e-05") + std = float("0.000279984") + data = None + + +class Program_weight_tensor_parameter_186: + name = "parameter_186" + shape = [512] + dtype = "float32" + min_val = float("0.787894") + max_val = float("0.796172") + mean = float("0.791076") + std = float("0.000888069") + data = None + + +class Program_weight_tensor_parameter_187: + name = "parameter_187" + shape = [512] + dtype = "float32" + min_val = float("-0.0181451") + max_val = float("0.0178488") + mean = float("0.000107421") + std = float("0.0100308") + data = None + + +class Program_weight_tensor_parameter_188: + name = "parameter_188" + shape = [2048, 512] + dtype = "float32" + min_val = float("-0.0182006") + max_val = float("0.0184013") + mean = float("7.27847e-06") + std = float("0.0100895") + data = None + + +class Program_weight_tensor_parameter_189: + name = "parameter_189" + shape = [2048] + dtype = "float32" + min_val = float("-0.0349372") + max_val = float("0.0350332") + mean = float("0.000311716") + std = float("0.0203865") + data = None + + +class Program_weight_tensor_parameter_190: + name = "parameter_190" + shape = [512, 2048] + dtype = "float32" + min_val = float("-0.0352744") + max_val = float("0.0354713") + mean = float("-2.91299e-05") + std = float("0.0201967") + data = None + + +class Program_weight_tensor_parameter_191: + name = "parameter_191" + shape = [512] + dtype = "float32" + min_val = float("-0.00090646") + max_val = float("0.00141201") + mean = float("-6.1515e-06") + std = float("0.000283748") + data = None + + +class Program_weight_tensor_parameter_192: + name = "parameter_192" + shape = [512] + dtype = "float32" + min_val = float("0.787879") + max_val = float("0.795482") + mean = float("0.791033") + std = float("0.000860877") + data = None + + +class Program_weight_tensor_parameter_193: + name = "parameter_193" + shape = [512] + dtype = "float32" + min_val = float("-0.000860853") + max_val = float("0.00085914") + mean = float("1.517e-05") + std = float("0.000263815") + data = None + + +class Program_weight_tensor_parameter_194: + name = "parameter_194" + shape = [512, 512] + dtype = "float32" + min_val = float("-0.0609814") + max_val = float("0.0610056") + mean = float("-6.95229e-05") + std = float("0.0349278") + data = None + + +class Program_weight_tensor_parameter_195: + name = "parameter_195" + shape = [512] + dtype = "float32" + min_val = float("-0.00117395") + max_val = float("0.0013318") + mean = float("3.01186e-05") + std = float("0.000365825") + data = None + + +class Program_weight_tensor_parameter_196: + name = "parameter_196" + shape = [512] + dtype = "float32" + min_val = float("0.788444") + max_val = float("0.7955") + mean = float("0.791111") + std = float("0.000833189") + data = None + + +class Program_weight_tensor_parameter_197: + name = "parameter_197" + shape = [512] + dtype = "float32" + min_val = float("-0.0182878") + max_val = float("0.0174842") + mean = float("0.000126839") + std = float("0.0100441") + data = None + + +class Program_weight_tensor_parameter_198: + name = "parameter_198" + shape = [2048, 512] + dtype = "float32" + min_val = float("-0.0182158") + max_val = float("0.0186263") + mean = float("8.65513e-06") + std = float("0.0100897") + data = None + + +class Program_weight_tensor_parameter_199: + name = "parameter_199" + shape = [2048] + dtype = "float32" + min_val = float("-0.0349254") + max_val = float("0.0350035") + mean = float("0.000314621") + std = float("0.0203859") + data = None + + +class Program_weight_tensor_parameter_200: + name = "parameter_200" + shape = [512, 2048] + dtype = "float32" + min_val = float("-0.0353207") + max_val = float("0.0358856") + mean = float("-2.91299e-05") + std = float("0.0201967") + data = None + + +class Program_weight_tensor_parameter_201: + name = "parameter_201" + shape = [512] + dtype = "float32" + min_val = float("-0.00126811") + max_val = float("0.00134705") + mean = float("-8.63145e-06") + std = float("0.00037799") + data = None + + +class Program_weight_tensor_parameter_202: + name = "parameter_202" + shape = [512] + dtype = "float32" + min_val = float("0.787714") + max_val = float("0.795345") + mean = float("0.791031") + std = float("0.000824989") + data = None + + +class Program_weight_tensor_parameter_203: + name = "parameter_203" + shape = [512] + dtype = "float32" + min_val = float("-0.00109735") + max_val = float("0.00108191") + mean = float("2.34299e-06") + std = float("0.000355206") + data = None + + +class Program_weight_tensor_parameter_204: + name = "parameter_204" + shape = [512, 512] + dtype = "float32" + min_val = float("-0.0611537") + max_val = float("0.061337") + mean = float("-6.96103e-05") + std = float("0.0349285") + data = None + + +class Program_weight_tensor_parameter_205: + name = "parameter_205" + shape = [512] + dtype = "float32" + min_val = float("-0.00181207") + max_val = float("0.00243249") + mean = float("3.10172e-05") + std = float("0.000582875") + data = None + + +class Program_weight_tensor_parameter_206: + name = "parameter_206" + shape = [512] + dtype = "float32" + min_val = float("0.787905") + max_val = float("0.795049") + mean = float("0.791152") + std = float("0.000824837") + data = None + + +class Program_weight_tensor_parameter_207: + name = "parameter_207" + shape = [512] + dtype = "float32" + min_val = float("-0.0183354") + max_val = float("0.0183375") + mean = float("0.000118518") + std = float("0.0100683") + data = None + + +class Program_weight_tensor_parameter_208: + name = "parameter_208" + shape = [2048, 512] + dtype = "float32" + min_val = float("-0.0182466") + max_val = float("0.0185687") + mean = float("8.27384e-06") + std = float("0.0100902") + data = None + + +class Program_weight_tensor_parameter_209: + name = "parameter_209" + shape = [2048] + dtype = "float32" + min_val = float("-0.0349326") + max_val = float("0.0349493") + mean = float("0.0003176") + std = float("0.0203846") + data = None + + +class Program_weight_tensor_parameter_210: + name = "parameter_210" + shape = [512, 2048] + dtype = "float32" + min_val = float("-0.0353877") + max_val = float("0.0364363") + mean = float("-2.91296e-05") + std = float("0.0201968") + data = None + + +class Program_weight_tensor_parameter_211: + name = "parameter_211" + shape = [512] + dtype = "float32" + min_val = float("-0.00194925") + max_val = float("0.0025094") + mean = float("-3.86519e-06") + std = float("0.000607271") + data = None + + +class Program_weight_tensor_parameter_212: + name = "parameter_212" + shape = [512] + dtype = "float32" + min_val = float("0.786413") + max_val = float("0.795254") + mean = float("0.79103") + std = float("0.000834782") + data = None + + +class Program_weight_tensor_parameter_213: + name = "parameter_213" + shape = [512] + dtype = "float32" + min_val = float("-0.00302405") + max_val = float("0.00520267") + mean = float("5.48518e-06") + std = float("0.00120513") + data = None + + +class Program_weight_tensor_parameter_214: + name = "parameter_214" + shape = [512, 512] + dtype = "float32" + min_val = float("-0.0615596") + max_val = float("0.0620019") + mean = float("-6.96739e-05") + std = float("0.0349306") + data = None + + +class Program_weight_tensor_parameter_215: + name = "parameter_215" + shape = [512] + dtype = "float32" + min_val = float("-4.7684") + max_val = float("-0.175565") + mean = float("-2.26789") + std = float("0.761953") + data = None + + +class Program_weight_tensor_parameter_216: + name = "parameter_216" + shape = [512] + dtype = "float32" + min_val = float("2.03375") + max_val = float("5.2834") + mean = float("3.71839") + std = float("0.500321") + data = None + + +class Program_weight_tensor_parameter_217: + name = "parameter_217" + shape = [512] + dtype = "float32" + min_val = float("0.00793564") + max_val = float("0.0505589") + mean = float("0.017105") + std = float("0.00519706") + data = None + + +class Program_weight_tensor_parameter_218: + name = "parameter_218" + shape = [512] + dtype = "float32" + min_val = float("-0.177111") + max_val = float("0.0758915") + mean = float("-0.071011") + std = float("0.0377507") + data = None + + +class Program_weight_tensor_parameter_219: + name = "parameter_219" + shape = [512, 384, 1, 1] + dtype = "float32" + min_val = float("-0.109533") + max_val = float("0.130657") + mean = float("-0.00087456") + std = float("0.00816989") + data = None + + +class Program_weight_tensor_parameter_220: + name = "parameter_220" + shape = [384] + dtype = "float32" + min_val = float("-0.0280175") + max_val = float("0.00315655") + mean = float("-0.00254859") + std = float("0.00471735") + data = None + + +class Program_weight_tensor_parameter_221: + name = "parameter_221" + shape = [384, 384, 1, 1] + dtype = "float32" + min_val = float("-0.222129") + max_val = float("0.18458") + mean = float("-0.000742582") + std = float("0.00434552") + data = None + + +class Program_weight_tensor_parameter_222: + name = "parameter_222" + shape = [192] + dtype = "float32" + min_val = float("-2.40391") + max_val = float("2.31829") + mean = float("-0.263102") + std = float("0.51507") + data = None + + +class Program_weight_tensor_parameter_223: + name = "parameter_223" + shape = [192] + dtype = "float32" + min_val = float("0.147308") + max_val = float("2.09628") + mean = float("0.465287") + std = float("0.3238") + data = None + + +class Program_weight_tensor_parameter_224: + name = "parameter_224" + shape = [192] + dtype = "float32" + min_val = float("0.000226313") + max_val = float("0.0119362") + mean = float("0.00101561") + std = float("0.00100928") + data = None + + +class Program_weight_tensor_parameter_225: + name = "parameter_225" + shape = [192] + dtype = "float32" + min_val = float("-0.0590275") + max_val = float("0.0641189") + mean = float("0.00752121") + std = float("0.018598") + data = None + + +class Program_weight_tensor_parameter_226: + name = "parameter_226" + shape = [192, 192, 1, 1] + dtype = "float32" + min_val = float("-0.0401314") + max_val = float("0.0713962") + mean = float("-0.000302538") + std = float("0.0046896") + data = None + + +class Program_weight_tensor_parameter_227: + name = "parameter_227" + shape = [192] + dtype = "float32" + min_val = float("-2.40401") + max_val = float("2.31762") + mean = float("-0.263216") + std = float("0.514996") + data = None + + +class Program_weight_tensor_parameter_228: + name = "parameter_228" + shape = [192] + dtype = "float32" + min_val = float("0.6552") + max_val = float("2.82897") + mean = float("1.34109") + std = float("0.430342") + data = None + + +class Program_weight_tensor_parameter_229: + name = "parameter_229" + shape = [192] + dtype = "float32" + min_val = float("0.00431293") + max_val = float("0.0718222") + mean = float("0.0100381") + std = float("0.00593012") + data = None + + +class Program_weight_tensor_parameter_230: + name = "parameter_230" + shape = [192] + dtype = "float32" + min_val = float("-0.255796") + max_val = float("0.288713") + mean = float("0.0167581") + std = float("0.06384") + data = None + + +class Program_weight_tensor_parameter_231: + name = "parameter_231" + shape = [192, 192, 3, 3] + dtype = "float32" + min_val = float("-0.0896803") + max_val = float("0.0514106") + mean = float("-0.00016858") + std = float("0.00453782") + data = None + + +class Program_weight_tensor_parameter_232: + name = "parameter_232" + shape = [192] + dtype = "float32" + min_val = float("-3.2846") + max_val = float("1.08332") + mean = float("-1.32548") + std = float("0.629983") + data = None + + +class Program_weight_tensor_parameter_233: + name = "parameter_233" + shape = [192] + dtype = "float32" + min_val = float("0.511801") + max_val = float("1.92243") + mean = float("1.15336") + std = float("0.223177") + data = None + + +class Program_weight_tensor_parameter_234: + name = "parameter_234" + shape = [192] + dtype = "float32" + min_val = float("0.0178895") + max_val = float("0.293069") + mean = float("0.0463284") + std = float("0.0274981") + data = None + + +class Program_weight_tensor_parameter_235: + name = "parameter_235" + shape = [192] + dtype = "float32" + min_val = float("-1.14223") + max_val = float("0.153253") + mean = float("-0.133221") + std = float("0.132507") + data = None + + +class Program_weight_tensor_parameter_236: + name = "parameter_236" + shape = [192, 192, 3, 3] + dtype = "float32" + min_val = float("-0.0696522") + max_val = float("0.0793202") + mean = float("-0.000380521") + std = float("0.00518021") + data = None + + +class Program_weight_tensor_parameter_237: + name = "parameter_237" + shape = [192] + dtype = "float32" + min_val = float("-3.81238") + max_val = float("3.66623") + mean = float("-0.649936") + std = float("0.909196") + data = None + + +class Program_weight_tensor_parameter_238: + name = "parameter_238" + shape = [192] + dtype = "float32" + min_val = float("0.692753") + max_val = float("4.20685") + mean = float("1.51581") + std = float("0.440915") + data = None + + +class Program_weight_tensor_parameter_239: + name = "parameter_239" + shape = [192] + dtype = "float32" + min_val = float("0.00440345") + max_val = float("0.0380397") + mean = float("0.0113698") + std = float("0.00483404") + data = None + + +class Program_weight_tensor_parameter_240: + name = "parameter_240" + shape = [192] + dtype = "float32" + min_val = float("-0.142699") + max_val = float("0.16726") + mean = float("0.0376018") + std = float("0.0359983") + data = None + + +class Program_weight_tensor_parameter_241: + name = "parameter_241" + shape = [192, 384, 1, 1] + dtype = "float32" + min_val = float("-0.110642") + max_val = float("0.0914116") + mean = float("-0.00138469") + std = float("0.00922725") + data = None + + +class Program_weight_tensor_parameter_242: + name = "parameter_242" + shape = [192] + dtype = "float32" + min_val = float("-2.92381") + max_val = float("0.905207") + mean = float("-0.416961") + std = float("0.669044") + data = None + + +class Program_weight_tensor_parameter_243: + name = "parameter_243" + shape = [192] + dtype = "float32" + min_val = float("0.782064") + max_val = float("3.25351") + mean = float("1.44788") + std = float("0.40939") + data = None + + +class Program_weight_tensor_parameter_244: + name = "parameter_244" + shape = [192] + dtype = "float32" + min_val = float("0.00242725") + max_val = float("0.00741479") + mean = float("0.00396045") + std = float("0.00104202") + data = None + + +class Program_weight_tensor_parameter_245: + name = "parameter_245" + shape = [192] + dtype = "float32" + min_val = float("-0.05768") + max_val = float("0.0587972") + mean = float("0.0137742") + std = float("0.02209") + data = None + + +class Program_weight_tensor_parameter_246: + name = "parameter_246" + shape = [192, 384, 1, 1] + dtype = "float32" + min_val = float("-0.0702044") + max_val = float("0.0981504") + mean = float("-0.000543738") + std = float("0.00741051") + data = None + + +class Program_weight_tensor_parameter_247: + name = "parameter_247" + shape = [384] + dtype = "float32" + min_val = float("-2.7622") + max_val = float("1.15725") + mean = float("-0.674252") + std = float("0.494382") + data = None + + +class Program_weight_tensor_parameter_248: + name = "parameter_248" + shape = [384] + dtype = "float32" + min_val = float("0.436734") + max_val = float("1.90909") + mean = float("0.859705") + std = float("0.233984") + data = None + + +class Program_weight_tensor_parameter_249: + name = "parameter_249" + shape = [384] + dtype = "float32" + min_val = float("0.00745905") + max_val = float("0.0573681") + mean = float("0.0167084") + std = float("0.00634355") + data = None + + +class Program_weight_tensor_parameter_250: + name = "parameter_250" + shape = [384] + dtype = "float32" + min_val = float("-0.510448") + max_val = float("0.313545") + mean = float("0.0146021") + std = float("0.0913832") + data = None + + +class Program_weight_tensor_parameter_251: + name = "parameter_251" + shape = [384, 256, 3, 3] + dtype = "float32" + min_val = float("-0.0732695") + max_val = float("0.0636209") + mean = float("-0.000162932") + std = float("0.00429878") + data = None + + +class Program_weight_tensor_parameter_252: + name = "parameter_252" + shape = [256] + dtype = "float32" + min_val = float("-2.80583") + max_val = float("1.38972") + mean = float("-0.942672") + std = float("0.616162") + data = None + + +class Program_weight_tensor_parameter_253: + name = "parameter_253" + shape = [256] + dtype = "float32" + min_val = float("0.390795") + max_val = float("1.71259") + mean = float("0.927819") + std = float("0.167042") + data = None + + +class Program_weight_tensor_parameter_254: + name = "parameter_254" + shape = [256] + dtype = "float32" + min_val = float("0.0013671") + max_val = float("0.00610039") + mean = float("0.00319985") + std = float("0.000918791") + data = None + + +class Program_weight_tensor_parameter_255: + name = "parameter_255" + shape = [256] + dtype = "float32" + min_val = float("-0.219355") + max_val = float("0.191886") + mean = float("-0.0512222") + std = float("0.0691501") + data = None + + +class Program_weight_tensor_parameter_256: + name = "parameter_256" + shape = [256, 192, 1, 1] + dtype = "float32" + min_val = float("-0.180777") + max_val = float("0.171873") + mean = float("-0.00107654") + std = float("0.0143585") + data = None + + +class Program_weight_tensor_parameter_257: + name = "parameter_257" + shape = [192] + dtype = "float32" + min_val = float("-0.0148095") + max_val = float("0.000185925") + mean = float("-0.00625529") + std = float("0.00388123") + data = None + + +class Program_weight_tensor_parameter_258: + name = "parameter_258" + shape = [192, 192, 1, 1] + dtype = "float32" + min_val = float("-0.194557") + max_val = float("0.272117") + mean = float("-0.00452407") + std = float("0.0108287") + data = None + + +class Program_weight_tensor_parameter_259: + name = "parameter_259" + shape = [96] + dtype = "float32" + min_val = float("-2.29623") + max_val = float("0.84744") + mean = float("-0.061664") + std = float("0.525458") + data = None + + +class Program_weight_tensor_parameter_260: + name = "parameter_260" + shape = [96] + dtype = "float32" + min_val = float("-0.119028") + max_val = float("2.24351") + mean = float("0.320993") + std = float("0.35019") + data = None + + +class Program_weight_tensor_parameter_261: + name = "parameter_261" + shape = [96] + dtype = "float32" + min_val = float("1.84862e-10") + max_val = float("0.00298614") + mean = float("0.000677433") + std = float("0.000446022") + data = None + + +class Program_weight_tensor_parameter_262: + name = "parameter_262" + shape = [96] + dtype = "float32" + min_val = float("-0.048905") + max_val = float("0.0694838") + mean = float("0.00491462") + std = float("0.0190368") + data = None + + +class Program_weight_tensor_parameter_263: + name = "parameter_263" + shape = [96, 96, 1, 1] + dtype = "float32" + min_val = float("-0.0465584") + max_val = float("0.0804484") + mean = float("-0.000322433") + std = float("0.006747") + data = None + + +class Program_weight_tensor_parameter_264: + name = "parameter_264" + shape = [96] + dtype = "float32" + min_val = float("-2.29626") + max_val = float("0.848892") + mean = float("-0.0610437") + std = float("0.525854") + data = None + + +class Program_weight_tensor_parameter_265: + name = "parameter_265" + shape = [96] + dtype = "float32" + min_val = float("0.467882") + max_val = float("3.28353") + mean = float("1.28238") + std = float("0.627065") + data = None + + +class Program_weight_tensor_parameter_266: + name = "parameter_266" + shape = [96] + dtype = "float32" + min_val = float("0.00318884") + max_val = float("0.0406128") + mean = float("0.0176231") + std = float("0.00719165") + data = None + + +class Program_weight_tensor_parameter_267: + name = "parameter_267" + shape = [96] + dtype = "float32" + min_val = float("-0.170451") + max_val = float("0.159699") + mean = float("0.0187586") + std = float("0.0641208") + data = None + + +class Program_weight_tensor_parameter_268: + name = "parameter_268" + shape = [96, 96, 3, 3] + dtype = "float32" + min_val = float("-0.125748") + max_val = float("0.0961584") + mean = float("-0.000305803") + std = float("0.00769471") + data = None + + +class Program_weight_tensor_parameter_269: + name = "parameter_269" + shape = [96] + dtype = "float32" + min_val = float("-2.80484") + max_val = float("1.43502") + mean = float("-1.03868") + std = float("0.701443") + data = None + + +class Program_weight_tensor_parameter_270: + name = "parameter_270" + shape = [96] + dtype = "float32" + min_val = float("0.367755") + max_val = float("2.00427") + mean = float("1.06663") + std = float("0.229304") + data = None + + +class Program_weight_tensor_parameter_271: + name = "parameter_271" + shape = [96] + dtype = "float32" + min_val = float("0.021631") + max_val = float("0.148654") + mean = float("0.0561004") + std = float("0.0186508") + data = None + + +class Program_weight_tensor_parameter_272: + name = "parameter_272" + shape = [96] + dtype = "float32" + min_val = float("-2.01412") + max_val = float("1.04899") + mean = float("-0.183303") + std = float("0.349343") + data = None + + +class Program_weight_tensor_parameter_273: + name = "parameter_273" + shape = [96, 96, 3, 3] + dtype = "float32" + min_val = float("-0.0550323") + max_val = float("0.0782782") + mean = float("-0.000581257") + std = float("0.00844085") + data = None + + +class Program_weight_tensor_parameter_274: + name = "parameter_274" + shape = [96] + dtype = "float32" + min_val = float("-2.54212") + max_val = float("0.880687") + mean = float("-0.00372463") + std = float("0.509016") + data = None + + +class Program_weight_tensor_parameter_275: + name = "parameter_275" + shape = [96] + dtype = "float32" + min_val = float("-0.0965963") + max_val = float("3.24141") + mean = float("0.305728") + std = float("0.399008") + data = None + + +class Program_weight_tensor_parameter_276: + name = "parameter_276" + shape = [96] + dtype = "float32" + min_val = float("0.000257759") + max_val = float("0.0159174") + mean = float("0.0018202") + std = float("0.00219223") + data = None + + +class Program_weight_tensor_parameter_277: + name = "parameter_277" + shape = [96] + dtype = "float32" + min_val = float("-0.0508167") + max_val = float("0.0863359") + mean = float("0.0128663") + std = float("0.0244442") + data = None + + +class Program_weight_tensor_parameter_278: + name = "parameter_278" + shape = [96, 96, 1, 1] + dtype = "float32" + min_val = float("-0.114929") + max_val = float("0.102275") + mean = float("-0.00112332") + std = float("0.00855384") + data = None + + +class Program_weight_tensor_parameter_279: + name = "parameter_279" + shape = [96] + dtype = "float32" + min_val = float("-2.54215") + max_val = float("0.882154") + mean = float("-0.00332994") + std = float("0.509446") + data = None + + +class Program_weight_tensor_parameter_280: + name = "parameter_280" + shape = [96] + dtype = "float32" + min_val = float("0.385235") + max_val = float("2.98414") + mean = float("0.918566") + std = float("0.401453") + data = None + + +class Program_weight_tensor_parameter_281: + name = "parameter_281" + shape = [96] + dtype = "float32" + min_val = float("0.0121027") + max_val = float("0.0629028") + mean = float("0.028253") + std = float("0.00954018") + data = None + + +class Program_weight_tensor_parameter_282: + name = "parameter_282" + shape = [96] + dtype = "float32" + min_val = float("-0.175262") + max_val = float("0.219869") + mean = float("0.0388041") + std = float("0.0675618") + data = None + + +class Program_weight_tensor_parameter_283: + name = "parameter_283" + shape = [96, 96, 3, 3] + dtype = "float32" + min_val = float("-0.0699125") + max_val = float("0.0583475") + mean = float("-0.000591747") + std = float("0.00783073") + data = None + + +class Program_weight_tensor_parameter_284: + name = "parameter_284" + shape = [96] + dtype = "float32" + min_val = float("-2.0651") + max_val = float("1.50717") + mean = float("-0.859382") + std = float("0.652877") + data = None + + +class Program_weight_tensor_parameter_285: + name = "parameter_285" + shape = [96] + dtype = "float32" + min_val = float("0.444271") + max_val = float("1.97568") + mean = float("1.08764") + std = float("0.245813") + data = None + + +class Program_weight_tensor_parameter_286: + name = "parameter_286" + shape = [96] + dtype = "float32" + min_val = float("0.00767386") + max_val = float("0.0663026") + mean = float("0.0235471") + std = float("0.00918407") + data = None + + +class Program_weight_tensor_parameter_287: + name = "parameter_287" + shape = [96] + dtype = "float32" + min_val = float("-0.868837") + max_val = float("0.154679") + mean = float("-0.0809624") + std = float("0.138574") + data = None + + +class Program_weight_tensor_parameter_288: + name = "parameter_288" + shape = [96, 96, 3, 3] + dtype = "float32" + min_val = float("-0.138808") + max_val = float("0.140323") + mean = float("-0.000446325") + std = float("0.00880798") + data = None + + +class Program_weight_tensor_parameter_289: + name = "parameter_289" + shape = [96] + dtype = "float32" + min_val = float("-1.50102") + max_val = float("1.85499") + mean = float("0.0859838") + std = float("0.86653") + data = None + + +class Program_weight_tensor_parameter_290: + name = "parameter_290" + shape = [96] + dtype = "float32" + min_val = float("0.284081") + max_val = float("1.33672") + mean = float("0.688997") + std = float("0.267224") + data = None + + +class Program_weight_tensor_parameter_291: + name = "parameter_291" + shape = [96] + dtype = "float32" + min_val = float("0.010188") + max_val = float("0.0686183") + mean = float("0.0270771") + std = float("0.0121446") + data = None + + +class Program_weight_tensor_parameter_292: + name = "parameter_292" + shape = [96] + dtype = "float32" + min_val = float("-0.411275") + max_val = float("0.28692") + mean = float("-0.0389212") + std = float("0.125574") + data = None + + +class Program_weight_tensor_parameter_293: + name = "parameter_293" + shape = [96, 192, 1, 1] + dtype = "float32" + min_val = float("-0.167969") + max_val = float("0.175471") + mean = float("-0.00107338") + std = float("0.0148063") + data = None + + +class Program_weight_tensor_parameter_294: + name = "parameter_294" + shape = [96] + dtype = "float32" + min_val = float("-2.53701") + max_val = float("1.68017") + mean = float("0.398128") + std = float("0.701783") + data = None + + +class Program_weight_tensor_parameter_295: + name = "parameter_295" + shape = [96] + dtype = "float32" + min_val = float("0.397613") + max_val = float("4.70134") + mean = float("1.36973") + std = float("0.963852") + data = None + + +class Program_weight_tensor_parameter_296: + name = "parameter_296" + shape = [96] + dtype = "float32" + min_val = float("0.00616883") + max_val = float("0.107322") + mean = float("0.0239614") + std = float("0.0130094") + data = None + + +class Program_weight_tensor_parameter_297: + name = "parameter_297" + shape = [96] + dtype = "float32" + min_val = float("-0.261088") + max_val = float("0.282727") + mean = float("0.00424058") + std = float("0.1215") + data = None + + +class Program_weight_tensor_parameter_298: + name = "parameter_298" + shape = [96, 192, 1, 1] + dtype = "float32" + min_val = float("-0.105657") + max_val = float("0.151337") + mean = float("-0.000397048") + std = float("0.0151535") + data = None + + +class Program_weight_tensor_parameter_299: + name = "parameter_299" + shape = [192] + dtype = "float32" + min_val = float("-4.5847") + max_val = float("2.01272") + mean = float("-0.0602424") + std = float("0.877135") + data = None + + +class Program_weight_tensor_parameter_300: + name = "parameter_300" + shape = [192] + dtype = "float32" + min_val = float("0.538202") + max_val = float("4.32625") + mean = float("1.03791") + std = float("0.431062") + data = None + + +class Program_weight_tensor_parameter_301: + name = "parameter_301" + shape = [192] + dtype = "float32" + min_val = float("0.00691667") + max_val = float("0.0910159") + mean = float("0.0224277") + std = float("0.0129748") + data = None + + +class Program_weight_tensor_parameter_302: + name = "parameter_302" + shape = [192] + dtype = "float32" + min_val = float("-0.350152") + max_val = float("0.28186") + mean = float("0.00531245") + std = float("0.108206") + data = None + + +class Program_weight_tensor_parameter_303: + name = "parameter_303" + shape = [192, 128, 3, 3] + dtype = "float32" + min_val = float("-0.113976") + max_val = float("0.126205") + mean = float("-0.000242846") + std = float("0.00778871") + data = None + + +class Program_weight_tensor_parameter_304: + name = "parameter_304" + shape = [128] + dtype = "float32" + min_val = float("-2.16466") + max_val = float("1.44162") + mean = float("-0.613418") + std = float("0.632513") + data = None + + +class Program_weight_tensor_parameter_305: + name = "parameter_305" + shape = [128] + dtype = "float32" + min_val = float("0.322913") + max_val = float("2.19318") + mean = float("0.786634") + std = float("0.214289") + data = None + + +class Program_weight_tensor_parameter_306: + name = "parameter_306" + shape = [128] + dtype = "float32" + min_val = float("0.00202904") + max_val = float("0.016507") + mean = float("0.00553313") + std = float("0.00262303") + data = None + + +class Program_weight_tensor_parameter_307: + name = "parameter_307" + shape = [128] + dtype = "float32" + min_val = float("-0.320597") + max_val = float("0.458915") + mean = float("-0.07816") + std = float("0.118662") + data = None + + +class Program_weight_tensor_parameter_308: + name = "parameter_308" + shape = [128, 96, 1, 1] + dtype = "float32" + min_val = float("-0.212864") + max_val = float("0.22012") + mean = float("-0.0016944") + std = float("0.0257433") + data = None + + +class Program_weight_tensor_parameter_309: + name = "parameter_309" + shape = [96] + dtype = "float32" + min_val = float("-0.0230382") + max_val = float("0.0047538") + mean = float("-0.00783544") + std = float("0.00711077") + data = None + + +class Program_weight_tensor_parameter_310: + name = "parameter_310" + shape = [96, 96, 1, 1] + dtype = "float32" + min_val = float("-0.31077") + max_val = float("0.228926") + mean = float("-0.0057098") + std = float("0.0189347") + data = None + + +class Program_weight_tensor_parameter_311: + name = "parameter_311" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_312: + name = "parameter_312" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_313: + name = "parameter_313" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_314: + name = "parameter_314" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_315: + name = "parameter_315" + shape = [48, 48, 1, 1] + dtype = "float32" + min_val = float("-0.0667704") + max_val = float("0.0923849") + mean = float("-0.00177062") + std = float("0.0140855") + data = None + + +class Program_weight_tensor_parameter_316: + name = "parameter_316" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_317: + name = "parameter_317" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_318: + name = "parameter_318" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_319: + name = "parameter_319" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_320: + name = "parameter_320" + shape = [48, 48, 3, 3] + dtype = "float32" + min_val = float("-0.101548") + max_val = float("0.0931938") + mean = float("-0.000616131") + std = float("0.0138816") + data = None + + +class Program_weight_tensor_parameter_321: + name = "parameter_321" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_322: + name = "parameter_322" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_323: + name = "parameter_323" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_324: + name = "parameter_324" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_325: + name = "parameter_325" + shape = [48, 48, 3, 3] + dtype = "float32" + min_val = float("-0.0875293") + max_val = float("0.138099") + mean = float("-0.00085434") + std = float("0.0146977") + data = None + + +class Program_weight_tensor_parameter_326: + name = "parameter_326" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_327: + name = "parameter_327" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_328: + name = "parameter_328" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_329: + name = "parameter_329" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_330: + name = "parameter_330" + shape = [48, 48, 1, 1] + dtype = "float32" + min_val = float("-0.0682765") + max_val = float("0.106851") + mean = float("-0.00265824") + std = float("0.0168496") + data = None + + +class Program_weight_tensor_parameter_331: + name = "parameter_331" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_332: + name = "parameter_332" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_333: + name = "parameter_333" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_334: + name = "parameter_334" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_335: + name = "parameter_335" + shape = [48, 48, 3, 3] + dtype = "float32" + min_val = float("-0.0753598") + max_val = float("0.0722024") + mean = float("-0.00104566") + std = float("0.0133656") + data = None + + +class Program_weight_tensor_parameter_336: + name = "parameter_336" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_337: + name = "parameter_337" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_338: + name = "parameter_338" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_339: + name = "parameter_339" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_340: + name = "parameter_340" + shape = [48, 48, 3, 3] + dtype = "float32" + min_val = float("-0.136967") + max_val = float("0.0953467") + mean = float("-0.000974076") + std = float("0.0154503") + data = None + + +class Program_weight_tensor_parameter_341: + name = "parameter_341" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_342: + name = "parameter_342" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_343: + name = "parameter_343" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_344: + name = "parameter_344" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_345: + name = "parameter_345" + shape = [48, 96, 1, 1] + dtype = "float32" + min_val = float("-0.167198") + max_val = float("0.158575") + mean = float("-0.00217736") + std = float("0.0255494") + data = None + + +class Program_weight_tensor_parameter_346: + name = "parameter_346" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_347: + name = "parameter_347" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_348: + name = "parameter_348" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_349: + name = "parameter_349" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_350: + name = "parameter_350" + shape = [48, 96, 1, 1] + dtype = "float32" + min_val = float("-0.173509") + max_val = float("0.254309") + mean = float("0.000938672") + std = float("0.0275195") + data = None + + +class Program_weight_tensor_parameter_351: + name = "parameter_351" + shape = [96] + dtype = "float32" + min_val = float("-3.42052") + max_val = float("3.81379") + mean = float("0.316598") + std = float("1.1982") + data = None + + +class Program_weight_tensor_parameter_352: + name = "parameter_352" + shape = [96] + dtype = "float32" + min_val = float("0.528309") + max_val = float("5.52433") + mean = float("1.04466") + std = float("0.547027") + data = None + + +class Program_weight_tensor_parameter_353: + name = "parameter_353" + shape = [96] + dtype = "float32" + min_val = float("0.0201492") + max_val = float("0.256147") + mean = float("0.0498166") + std = float("0.034115") + data = None + + +class Program_weight_tensor_parameter_354: + name = "parameter_354" + shape = [96] + dtype = "float32" + min_val = float("-0.774972") + max_val = float("0.603816") + mean = float("-0.035635") + std = float("0.184447") + data = None + + +class Program_weight_tensor_parameter_355: + name = "parameter_355" + shape = [96, 64, 3, 3] + dtype = "float32" + min_val = float("-0.131442") + max_val = float("0.137659") + mean = float("-0.000181925") + std = float("0.013913") + data = None + + +class Program_weight_tensor_parameter_356: + name = "parameter_356" + shape = [64] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_357: + name = "parameter_357" + shape = [64] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_358: + name = "parameter_358" + shape = [64] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_359: + name = "parameter_359" + shape = [64] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_360: + name = "parameter_360" + shape = [64, 48, 1, 1] + dtype = "float32" + min_val = float("-0.206043") + max_val = float("0.271847") + mean = float("-0.00145344") + std = float("0.0390376") + data = None + + +class Program_weight_tensor_parameter_361: + name = "parameter_361" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_362: + name = "parameter_362" + shape = [48, 48, 1, 1] + dtype = "float32" + min_val = float("-0.192206") + max_val = float("0.171418") + mean = float("-0.0146261") + std = float("0.029449") + data = None + + +class Program_weight_tensor_parameter_363: + name = "parameter_363" + shape = [24] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_364: + name = "parameter_364" + shape = [24] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_365: + name = "parameter_365" + shape = [24] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_366: + name = "parameter_366" + shape = [24] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_367: + name = "parameter_367" + shape = [24, 24, 1, 1] + dtype = "float32" + min_val = float("-0.155915") + max_val = float("0.129377") + mean = float("0.00162577") + std = float("0.0327822") + data = None + + +class Program_weight_tensor_parameter_368: + name = "parameter_368" + shape = [24] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_369: + name = "parameter_369" + shape = [24] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_370: + name = "parameter_370" + shape = [24] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_371: + name = "parameter_371" + shape = [24] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_372: + name = "parameter_372" + shape = [24, 24, 3, 3] + dtype = "float32" + min_val = float("-0.150092") + max_val = float("0.116282") + mean = float("-0.00114223") + std = float("0.0238489") + data = None + + +class Program_weight_tensor_parameter_373: + name = "parameter_373" + shape = [24] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_374: + name = "parameter_374" + shape = [24] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_375: + name = "parameter_375" + shape = [24] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_376: + name = "parameter_376" + shape = [24] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_377: + name = "parameter_377" + shape = [24, 24, 3, 3] + dtype = "float32" + min_val = float("-0.183793") + max_val = float("0.209257") + mean = float("-0.000668016") + std = float("0.0269376") + data = None + + +class Program_weight_tensor_parameter_378: + name = "parameter_378" + shape = [24] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_379: + name = "parameter_379" + shape = [24] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_380: + name = "parameter_380" + shape = [24] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_381: + name = "parameter_381" + shape = [24] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_382: + name = "parameter_382" + shape = [24, 48, 1, 1] + dtype = "float32" + min_val = float("-0.180287") + max_val = float("0.188822") + mean = float("-0.0027093") + std = float("0.0424717") + data = None + + +class Program_weight_tensor_parameter_383: + name = "parameter_383" + shape = [24] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_384: + name = "parameter_384" + shape = [24] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_385: + name = "parameter_385" + shape = [24] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_386: + name = "parameter_386" + shape = [24] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_387: + name = "parameter_387" + shape = [24, 48, 1, 1] + dtype = "float32" + min_val = float("-0.207794") + max_val = float("0.188844") + mean = float("-0.000951303") + std = float("0.0430266") + data = None + + +class Program_weight_tensor_parameter_388: + name = "parameter_388" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_389: + name = "parameter_389" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_390: + name = "parameter_390" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_391: + name = "parameter_391" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_392: + name = "parameter_392" + shape = [48, 32, 3, 3] + dtype = "float32" + min_val = float("-0.177728") + max_val = float("0.135622") + mean = float("6.35848e-05") + std = float("0.0231614") + data = None + + +class Program_weight_tensor_parameter_393: + name = "parameter_393" + shape = [32] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_394: + name = "parameter_394" + shape = [32] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_395: + name = "parameter_395" + shape = [32] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_396: + name = "parameter_396" + shape = [32] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_397: + name = "parameter_397" + shape = [32, 16, 3, 3] + dtype = "float32" + min_val = float("-0.258409") + max_val = float("0.268148") + mean = float("-0.000591489") + std = float("0.0389936") + data = None + + +class Program_weight_tensor_parameter_398: + name = "parameter_398" + shape = [16] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_399: + name = "parameter_399" + shape = [16] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_400: + name = "parameter_400" + shape = [16] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_401: + name = "parameter_401" + shape = [16] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_402: + name = "parameter_402" + shape = [16, 16, 3, 3] + dtype = "float32" + min_val = float("-0.339065") + max_val = float("0.330435") + mean = float("0.000302612") + std = float("0.0524466") + data = None + + +class Program_weight_tensor_parameter_403: + name = "parameter_403" + shape = [16] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_404: + name = "parameter_404" + shape = [16] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_405: + name = "parameter_405" + shape = [16] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_406: + name = "parameter_406" + shape = [16] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_407: + name = "parameter_407" + shape = [16, 3, 3, 3] + dtype = "float32" + min_val = float("-0.267419") + max_val = float("0.355597") + mean = float("-0.0023263") + std = float("0.0773249") + data = None diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-S/subgraph_4/graph_hash.txt b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-S/subgraph_4/graph_hash.txt new file mode 100644 index 000000000..4e14ba7ec --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-S/subgraph_4/graph_hash.txt @@ -0,0 +1 @@ +5cbc5021225385505f1108fab63db5c78abc03b8e6dd37e4cd359d0c7f8a7039 \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-S/subgraph_4/graph_net.json b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-S/subgraph_4/graph_net.json new file mode 100644 index 000000000..6ce3cf9a5 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-S/subgraph_4/graph_net.json @@ -0,0 +1,6 @@ +{ + "framework": "paddle", + "model_name": "PP-YOLOE_plus_SOD-S", + "num_devices_required": 1, + "num_nodes_required": 1 +} \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-S/subgraph_4/input_meta.py b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-S/subgraph_4/input_meta.py new file mode 100644 index 000000000..992977f06 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-S/subgraph_4/input_meta.py @@ -0,0 +1,48 @@ +class Program_weight_tensor_data_0: + name = "data_0" + shape = [] + dtype = "int64" + data = [1] + + +class Program_weight_tensor_data_1: + name = "data_1" + shape = [2, 4116, 10] + dtype = "float32" + max_val = float("2.45686e-06") + mean = float("1.5371e-10") + std = float("1.05938e-08") + data = None + + +class Program_weight_tensor_data_2: + name = "data_2" + shape = [2, 4116, 40] + dtype = "float32" + min_val = float("-34.847") + max_val = float("141.436") + mean = float("0.761771") + std = float("2.75335") + data = None + + +class Program_weight_tensor_data_3: + name = "data_3" + shape = [4116, 2] + dtype = "float32" + min_val = float("4.0") + max_val = float("444.0") + mean = float("224.0") + std = float("129.279") + data = None + + +class Program_weight_tensor_data_4: + name = "data_4" + shape = [4116, 1] + dtype = "float32" + min_val = float("8.0") + max_val = float("32.0") + mean = float("10.6667") + std = float("5.70157") + data = None diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-S/subgraph_4/model.py b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-S/subgraph_4/model.py new file mode 100644 index 000000000..9113a8d1e --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-S/subgraph_4/model.py @@ -0,0 +1,192 @@ +import paddle + + +class GraphModule(paddle.nn.Layer): + def __init__(self): + super().__init__() + + def forward(self, parameter_0, data_0, data_1, data_2, data_3, data_4): + # pd_op.divide: (-1x2xf32) <- (-1x2xf32, -1x1xf32) + divide_0 = paddle._C_ops.divide(data_3, data_4) + del data_3 + + # pd_op.shape64: (3xi64) <- (2x-1x40xf32) + shape64_0 = paddle._C_ops.shape64(data_2) + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_0 = [0] + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_1 = [1] + + # pd_op.assign: (1xi64) <- (1xi64) + assign_0 = full_int_array_1 + + # pd_op.slice: (xi64) <- (3xi64, 1xi64, 1xi64) + slice_0 = paddle._C_ops.slice( + shape64_0, [0], full_int_array_0, full_int_array_1, [1], [0] + ) + del full_int_array_0 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_2 = [2] + + # pd_op.slice: (xi64) <- (3xi64, 1xi64, 1xi64) + slice_1 = paddle._C_ops.slice( + shape64_0, [0], full_int_array_1, full_int_array_2, [1], [0] + ) + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_3 = [3] + + # pd_op.slice: (xi64) <- (3xi64, 1xi64, 1xi64) + slice_2 = paddle._C_ops.slice( + shape64_0, [0], full_int_array_2, full_int_array_3, [1], [0] + ) + del full_int_array_2, full_int_array_3, shape64_0 + + # pd_op.full: (xi64) <- () + full_0 = paddle._C_ops.full( + [], float("-1"), paddle.int64, paddle.core.CPUPlace() + ) + + # pd_op.full: (xi64) <- () + full_1 = paddle._C_ops.full( + [], float("4"), paddle.int64, paddle.core.CPUPlace() + ) + + # pd_op.full: (xi64) <- () + full_2 = paddle._C_ops.full( + [], float("10"), paddle.int64, paddle.core.CPUPlace() + ) + + # builtin.combine: ([xi64, xi64, xi64, xi64]) <- (xi64, xi64, xi64, xi64) + combine_0 = [full_0, slice_1, full_1, full_2] + del full_0, full_1, full_2, slice_1 + + # pd_op.stack: (4xi64) <- ([xi64, xi64, xi64, xi64]) + stack_0 = paddle._C_ops.stack(combine_0, 0) + del combine_0 + + # pd_op.reshape: (-1x-1x4x10xf32) <- (2x-1x40xf32, 4xi64) + reshape_0 = paddle._C_ops.reshape(data_2, stack_0) + del data_2, stack_0 + + # pd_op.softmax: (-1x-1x4x10xf32) <- (-1x-1x4x10xf32) + softmax_0 = paddle._C_ops.softmax(reshape_0, -1) + del reshape_0 + + # pd_op.transpose: (-1x10x-1x4xf32) <- (-1x-1x4x10xf32) + transpose_0 = paddle._C_ops.transpose(softmax_0, [0, 3, 1, 2]) + + # pd_op.conv2d: (-1x1x-1x4xf32) <- (-1x10x-1x4xf32, 1x10x1x1xf32) + conv2d_0 = paddle._C_ops.conv2d( + transpose_0, parameter_0, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_0 + + # pd_op.squeeze: (-1x-1x4xf32) <- (-1x1x-1x4xf32, 1xi64) + squeeze_0 = paddle._C_ops.squeeze(conv2d_0, full_int_array_1) + del full_int_array_1 + + # pd_op.full: (1xi32) <- () + full_3 = paddle._C_ops.full( + [1], float("2"), paddle.int32, paddle.core.CPUPlace() + ) + + # pd_op.split_with_num: ([-1x-1x2xf32, -1x-1x2xf32]) <- (-1x-1x4xf32, 1xi32) + split_with_num_0 = paddle._C_ops.split_with_num(squeeze_0, 2, full_3) + del squeeze_0 + + # builtin.split: (-1x-1x2xf32, -1x-1x2xf32) <- ([-1x-1x2xf32, -1x-1x2xf32]) + ( + split_0, + split_1, + ) = split_with_num_0 + del split_with_num_0 + + # pd_op.full: (1xf32) <- () + full_4 = paddle._C_ops.full( + [1], float("-1"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.scale: (-1x-1x2xf32) <- (-1x-1x2xf32, 1xf32) + scale_0 = paddle._C_ops.scale(split_0, full_4, float("0"), True) + del split_0 + + # pd_op.add: (-1x-1x2xf32) <- (-1x-1x2xf32, -1x2xf32) + add_0 = paddle._C_ops.add(scale_0, divide_0) + + # pd_op.add: (-1x-1x2xf32) <- (-1x-1x2xf32, -1x2xf32) + add_1 = paddle._C_ops.add(split_1, divide_0) + + # pd_op.full: (1xi32) <- () + full_5 = paddle._C_ops.full( + [1], float("-1"), paddle.int32, paddle.core.CPUPlace() + ) + + # builtin.combine: ([-1x-1x2xf32, -1x-1x2xf32]) <- (-1x-1x2xf32, -1x-1x2xf32) + combine_1 = [add_0, add_1] + + # pd_op.concat: (-1x-1x4xf32) <- ([-1x-1x2xf32, -1x-1x2xf32], 1xi32) + concat_0 = paddle._C_ops.concat(combine_1, full_5) + del combine_1 + + # pd_op.full: (xi64) <- () + full_6 = paddle._C_ops.full( + [], float("-1"), paddle.int64, paddle.framework._current_expected_place() + ) + + # pd_op.less_than: (xb) <- (xi64, xi64) + less_than_0 = paddle._C_ops.less_than(data_0, full_6) + del data_0, full_6 + + # pd_op.cast: (xi64) <- (xb) + cast_0 = paddle._C_ops.cast(less_than_0, paddle.int64) + del less_than_0 + + # pd_op.full: (xi64) <- () + full_7 = paddle._C_ops.full( + [], float("0"), paddle.int64, paddle.framework._current_expected_place() + ) + + # pd_op.not_equal: (xb) <- (xi64, xi64) + not_equal_0 = paddle._C_ops.not_equal(cast_0, full_7) + del cast_0 + + # pd_op.cast: (xi64) <- (xb) + cast_1 = paddle._C_ops.cast(not_equal_0, paddle.int64) + del not_equal_0 + + # pd_op.equal: (xb) <- (xi64, xi64) + equal_0 = paddle._C_ops.equal(cast_1, full_7) + del cast_1, full_7 + + # pd_op.share_data_: (2x-1x10xf32) <- (2x-1x10xf32) + share_data__0 = data_1.detach() + del data_1 + + # pd_op.share_data_: (-1x-1x4xf32) <- (-1x-1x4xf32) + share_data__1 = concat_0.detach() + + # pd_op.multiply: (-1x-1x4xf32) <- (-1x-1x4xf32, -1x1xf32) + multiply_0 = paddle._C_ops.multiply(share_data__1, data_4) + del ( + add_0, + add_1, + assign_0, + concat_0, + conv2d_0, + data_4, + divide_0, + full_3, + full_4, + full_5, + scale_0, + share_data__1, + softmax_0, + split_1, + transpose_0, + ) + + return share_data__0, multiply_0 diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-S/subgraph_4/weight_meta.py b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-S/subgraph_4/weight_meta.py new file mode 100644 index 000000000..88fef0bea --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-S/subgraph_4/weight_meta.py @@ -0,0 +1,7 @@ +class Program_weight_tensor_parameter_0: + name = "parameter_0" + shape = [1, 10, 1, 1] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-S/subgraph_5/graph_hash.txt b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-S/subgraph_5/graph_hash.txt new file mode 100644 index 000000000..a90272c96 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-S/subgraph_5/graph_hash.txt @@ -0,0 +1 @@ +32cd2923bc0a61a7c5f1bc48378db5c4de25399c2f9388132502f296c7a71760 \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-S/subgraph_5/graph_net.json b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-S/subgraph_5/graph_net.json new file mode 100644 index 000000000..6ce3cf9a5 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-S/subgraph_5/graph_net.json @@ -0,0 +1,6 @@ +{ + "framework": "paddle", + "model_name": "PP-YOLOE_plus_SOD-S", + "num_devices_required": 1, + "num_nodes_required": 1 +} \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-S/subgraph_5/input_meta.py b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-S/subgraph_5/input_meta.py new file mode 100644 index 000000000..84df7ef74 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-S/subgraph_5/input_meta.py @@ -0,0 +1,73 @@ +class Program_weight_tensor_data_0: + name = "data_0" + shape = [] + dtype = "int64" + data = [15] + + +class Program_weight_tensor_data_1: + name = "data_1" + shape = [] + dtype = "int64" + data = [15] + + +class Program_weight_tensor_data_2: + name = "data_2" + shape = [] + dtype = "int64" + data = [30] + + +class Program_weight_tensor_data_3: + name = "data_3" + shape = [] + dtype = "int64" + data = [30] + + +class Program_weight_tensor_data_4: + name = "data_4" + shape = [] + dtype = "int64" + data = [60] + + +class Program_weight_tensor_data_5: + name = "data_5" + shape = [] + dtype = "int64" + data = [60] + + +class Program_weight_tensor_data_6: + name = "data_6" + shape = [2, 384, 15, 15] + dtype = "float32" + min_val = float("-0.278465") + max_val = float("6.73963") + mean = float("0.347025") + std = float("0.708791") + data = None + + +class Program_weight_tensor_data_7: + name = "data_7" + shape = [2, 192, 30, 30] + dtype = "float32" + min_val = float("-0.278465") + max_val = float("10.0545") + mean = float("0.469318") + std = float("0.798413") + data = None + + +class Program_weight_tensor_data_8: + name = "data_8" + shape = [2, 96, 60, 60] + dtype = "float32" + min_val = float("-0.278465") + max_val = float("14.5401") + mean = float("0.60308") + std = float("0.85045") + data = None diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-S/subgraph_5/model.py b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-S/subgraph_5/model.py new file mode 100644 index 000000000..7cfbed716 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-S/subgraph_5/model.py @@ -0,0 +1,1144 @@ +import paddle + + +class GraphModule(paddle.nn.Layer): + def __init__(self): + super().__init__() + + def forward( + self, + parameter_0, + parameter_1, + parameter_2, + parameter_3, + parameter_4, + parameter_5, + parameter_6, + parameter_7, + parameter_8, + parameter_9, + parameter_10, + parameter_11, + parameter_12, + parameter_13, + parameter_14, + parameter_15, + parameter_16, + parameter_17, + parameter_18, + parameter_19, + parameter_20, + parameter_21, + parameter_22, + parameter_23, + parameter_24, + parameter_25, + parameter_26, + parameter_27, + parameter_28, + parameter_29, + parameter_30, + parameter_31, + parameter_32, + parameter_33, + parameter_34, + parameter_35, + parameter_36, + parameter_37, + parameter_38, + parameter_39, + parameter_40, + parameter_41, + parameter_42, + parameter_43, + parameter_44, + parameter_45, + parameter_46, + parameter_47, + parameter_48, + parameter_49, + parameter_50, + parameter_51, + parameter_52, + parameter_53, + data_0, + data_1, + data_2, + data_3, + data_4, + data_5, + data_6, + data_7, + data_8, + ): + # pd_op.full: (1xi64) <- () + full_0 = paddle._C_ops.full( + [1], float("0"), paddle.int64, paddle.core.CPUPlace() + ) + + # pd_op.full: (1xi64) <- () + full_1 = paddle._C_ops.full( + [1], float("1"), paddle.int64, paddle.core.CPUPlace() + ) + + # pd_op.arange: (-1xi64) <- (1xi64, xi64, 1xi64) + arange_0 = paddle.arange(full_0, data_1, full_1, dtype="int64") + del data_1 + + # pd_op.cast: (-1xf32) <- (-1xi64) + cast_0 = paddle._C_ops.cast(arange_0, paddle.float32) + del arange_0 + + # pd_op.full: (1xf32) <- () + full_2 = paddle._C_ops.full( + [1], float("1"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.scale: (-1xf32) <- (-1xf32, 1xf32) + scale_0 = paddle._C_ops.scale(cast_0, full_2, float("0.5"), True) + del cast_0 + + # pd_op.full: (1xf32) <- () + full_3 = paddle._C_ops.full( + [1], float("32"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.scale: (-1xf32) <- (-1xf32, 1xf32) + scale_1 = paddle._C_ops.scale(scale_0, full_3, float("0"), True) + del scale_0 + + # pd_op.arange: (-1xi64) <- (1xi64, xi64, 1xi64) + arange_1 = paddle.arange(full_0, data_0, full_1, dtype="int64") + del data_0 + + # pd_op.cast: (-1xf32) <- (-1xi64) + cast_1 = paddle._C_ops.cast(arange_1, paddle.float32) + del arange_1 + + # pd_op.scale: (-1xf32) <- (-1xf32, 1xf32) + scale_2 = paddle._C_ops.scale(cast_1, full_2, float("0.5"), True) + del cast_1 + + # pd_op.scale: (-1xf32) <- (-1xf32, 1xf32) + scale_3 = paddle._C_ops.scale(scale_2, full_3, float("0"), True) + del scale_2 + + # builtin.combine: ([-1xf32, -1xf32]) <- (-1xf32, -1xf32) + combine_0 = [scale_3, scale_1] + del scale_1, scale_3 + + # pd_op.meshgrid: ([-1x-1xf32, -1x-1xf32]) <- ([-1xf32, -1xf32]) + meshgrid_0 = paddle._C_ops.meshgrid(combine_0) + del combine_0 + + # builtin.split: (-1x-1xf32, -1x-1xf32) <- ([-1x-1xf32, -1x-1xf32]) + ( + split_0, + split_1, + ) = meshgrid_0 + del meshgrid_0 + + # pd_op.scale: (-1x-1xf32) <- (-1x-1xf32, 1xf32) + scale_4 = paddle._C_ops.scale(split_1, full_2, float("-80"), True) + + # pd_op.scale: (-1x-1xf32) <- (-1x-1xf32, 1xf32) + scale_5 = paddle._C_ops.scale(split_0, full_2, float("-80"), True) + + # pd_op.scale: (-1x-1xf32) <- (-1x-1xf32, 1xf32) + scale_6 = paddle._C_ops.scale(split_1, full_2, float("80"), True) + + # pd_op.scale: (-1x-1xf32) <- (-1x-1xf32, 1xf32) + scale_7 = paddle._C_ops.scale(split_0, full_2, float("80"), True) + + # builtin.combine: ([-1x-1xf32, -1x-1xf32, -1x-1xf32, -1x-1xf32]) <- (-1x-1xf32, -1x-1xf32, -1x-1xf32, -1x-1xf32) + combine_1 = [scale_4, scale_5, scale_6, scale_7] + del scale_4, scale_5, scale_6, scale_7 + + # pd_op.stack: (-1x-1x4xf32) <- ([-1x-1xf32, -1x-1xf32, -1x-1xf32, -1x-1xf32]) + stack_0 = paddle._C_ops.stack(combine_1, -1) + del combine_1 + + # builtin.combine: ([-1x-1xf32, -1x-1xf32]) <- (-1x-1xf32, -1x-1xf32) + combine_2 = [split_1, split_0] + del split_0, split_1 + + # pd_op.stack: (-1x-1x2xf32) <- ([-1x-1xf32, -1x-1xf32]) + stack_1 = paddle._C_ops.stack(combine_2, -1) + del combine_2 + + # pd_op.full_int_array: (2xi64) <- () + full_int_array_0 = [-1, 4] + + # pd_op.reshape: (-1x4xf32) <- (-1x-1x4xf32, 2xi64) + reshape_0 = paddle._C_ops.reshape(stack_0, full_int_array_0) + del stack_0 + + # pd_op.full_int_array: (2xi64) <- () + full_int_array_1 = [-1, 2] + + # pd_op.reshape: (-1x2xf32) <- (-1x-1x2xf32, 2xi64) + reshape_1 = paddle._C_ops.reshape(stack_1, full_int_array_1) + del stack_1 + + # pd_op.shape64: (2xi64) <- (-1x4xf32) + shape64_0 = paddle._C_ops.shape64(reshape_0) + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_2 = [0] + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_3 = [1] + + # pd_op.slice: (xi64) <- (2xi64, 1xi64, 1xi64) + slice_0 = paddle._C_ops.slice( + shape64_0, [0], full_int_array_2, full_int_array_3, [1], [0] + ) + del shape64_0 + + # pd_op.full: (xi64) <- () + full_4 = paddle._C_ops.full( + [], float("1"), paddle.int64, paddle.core.CPUPlace() + ) + + # builtin.combine: ([xi64, xi64]) <- (xi64, xi64) + combine_3 = [slice_0, full_4] + + # pd_op.stack: (2xi64) <- ([xi64, xi64]) + stack_2 = paddle._C_ops.stack(combine_3, 0) + del combine_3 + + # pd_op.full_with_tensor: (-1x1xf32) <- (1xf32, 2xi64) + full_with_tensor_0 = paddle._C_ops.full_with_tensor( + full_3, stack_2, paddle.float32 + ) + del full_3, stack_2 + + # pd_op.arange: (-1xi64) <- (1xi64, xi64, 1xi64) + arange_2 = paddle.arange(full_0, data_3, full_1, dtype="int64") + del data_3 + + # pd_op.cast: (-1xf32) <- (-1xi64) + cast_2 = paddle._C_ops.cast(arange_2, paddle.float32) + del arange_2 + + # pd_op.scale: (-1xf32) <- (-1xf32, 1xf32) + scale_8 = paddle._C_ops.scale(cast_2, full_2, float("0.5"), True) + del cast_2 + + # pd_op.full: (1xf32) <- () + full_5 = paddle._C_ops.full( + [1], float("16"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.scale: (-1xf32) <- (-1xf32, 1xf32) + scale_9 = paddle._C_ops.scale(scale_8, full_5, float("0"), True) + del scale_8 + + # pd_op.arange: (-1xi64) <- (1xi64, xi64, 1xi64) + arange_3 = paddle.arange(full_0, data_2, full_1, dtype="int64") + del data_2 + + # pd_op.cast: (-1xf32) <- (-1xi64) + cast_3 = paddle._C_ops.cast(arange_3, paddle.float32) + del arange_3 + + # pd_op.scale: (-1xf32) <- (-1xf32, 1xf32) + scale_10 = paddle._C_ops.scale(cast_3, full_2, float("0.5"), True) + del cast_3 + + # pd_op.scale: (-1xf32) <- (-1xf32, 1xf32) + scale_11 = paddle._C_ops.scale(scale_10, full_5, float("0"), True) + del scale_10 + + # builtin.combine: ([-1xf32, -1xf32]) <- (-1xf32, -1xf32) + combine_4 = [scale_11, scale_9] + del scale_11, scale_9 + + # pd_op.meshgrid: ([-1x-1xf32, -1x-1xf32]) <- ([-1xf32, -1xf32]) + meshgrid_1 = paddle._C_ops.meshgrid(combine_4) + del combine_4 + + # builtin.split: (-1x-1xf32, -1x-1xf32) <- ([-1x-1xf32, -1x-1xf32]) + ( + split_2, + split_3, + ) = meshgrid_1 + del meshgrid_1 + + # pd_op.scale: (-1x-1xf32) <- (-1x-1xf32, 1xf32) + scale_12 = paddle._C_ops.scale(split_3, full_2, float("-40"), True) + + # pd_op.scale: (-1x-1xf32) <- (-1x-1xf32, 1xf32) + scale_13 = paddle._C_ops.scale(split_2, full_2, float("-40"), True) + + # pd_op.scale: (-1x-1xf32) <- (-1x-1xf32, 1xf32) + scale_14 = paddle._C_ops.scale(split_3, full_2, float("40"), True) + + # pd_op.scale: (-1x-1xf32) <- (-1x-1xf32, 1xf32) + scale_15 = paddle._C_ops.scale(split_2, full_2, float("40"), True) + + # builtin.combine: ([-1x-1xf32, -1x-1xf32, -1x-1xf32, -1x-1xf32]) <- (-1x-1xf32, -1x-1xf32, -1x-1xf32, -1x-1xf32) + combine_5 = [scale_12, scale_13, scale_14, scale_15] + del scale_12, scale_13, scale_14, scale_15 + + # pd_op.stack: (-1x-1x4xf32) <- ([-1x-1xf32, -1x-1xf32, -1x-1xf32, -1x-1xf32]) + stack_3 = paddle._C_ops.stack(combine_5, -1) + del combine_5 + + # builtin.combine: ([-1x-1xf32, -1x-1xf32]) <- (-1x-1xf32, -1x-1xf32) + combine_6 = [split_3, split_2] + del split_2, split_3 + + # pd_op.stack: (-1x-1x2xf32) <- ([-1x-1xf32, -1x-1xf32]) + stack_4 = paddle._C_ops.stack(combine_6, -1) + del combine_6 + + # pd_op.reshape: (-1x4xf32) <- (-1x-1x4xf32, 2xi64) + reshape_2 = paddle._C_ops.reshape(stack_3, full_int_array_0) + del stack_3 + + # pd_op.reshape: (-1x2xf32) <- (-1x-1x2xf32, 2xi64) + reshape_3 = paddle._C_ops.reshape(stack_4, full_int_array_1) + del stack_4 + + # pd_op.shape64: (2xi64) <- (-1x4xf32) + shape64_1 = paddle._C_ops.shape64(reshape_2) + + # pd_op.slice: (xi64) <- (2xi64, 1xi64, 1xi64) + slice_1 = paddle._C_ops.slice( + shape64_1, [0], full_int_array_2, full_int_array_3, [1], [0] + ) + del shape64_1 + + # builtin.combine: ([xi64, xi64]) <- (xi64, xi64) + combine_7 = [slice_1, full_4] + + # pd_op.stack: (2xi64) <- ([xi64, xi64]) + stack_5 = paddle._C_ops.stack(combine_7, 0) + del combine_7 + + # pd_op.full_with_tensor: (-1x1xf32) <- (1xf32, 2xi64) + full_with_tensor_1 = paddle._C_ops.full_with_tensor( + full_5, stack_5, paddle.float32 + ) + del full_5, stack_5 + + # pd_op.arange: (-1xi64) <- (1xi64, xi64, 1xi64) + arange_4 = paddle.arange(full_0, data_5, full_1, dtype="int64") + del data_5 + + # pd_op.cast: (-1xf32) <- (-1xi64) + cast_4 = paddle._C_ops.cast(arange_4, paddle.float32) + del arange_4 + + # pd_op.scale: (-1xf32) <- (-1xf32, 1xf32) + scale_16 = paddle._C_ops.scale(cast_4, full_2, float("0.5"), True) + del cast_4 + + # pd_op.full: (1xf32) <- () + full_6 = paddle._C_ops.full( + [1], float("8"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.scale: (-1xf32) <- (-1xf32, 1xf32) + scale_17 = paddle._C_ops.scale(scale_16, full_6, float("0"), True) + del scale_16 + + # pd_op.arange: (-1xi64) <- (1xi64, xi64, 1xi64) + arange_5 = paddle.arange(full_0, data_4, full_1, dtype="int64") + del data_4, full_0, full_1 + + # pd_op.cast: (-1xf32) <- (-1xi64) + cast_5 = paddle._C_ops.cast(arange_5, paddle.float32) + del arange_5 + + # pd_op.scale: (-1xf32) <- (-1xf32, 1xf32) + scale_18 = paddle._C_ops.scale(cast_5, full_2, float("0.5"), True) + del cast_5 + + # pd_op.scale: (-1xf32) <- (-1xf32, 1xf32) + scale_19 = paddle._C_ops.scale(scale_18, full_6, float("0"), True) + del scale_18 + + # builtin.combine: ([-1xf32, -1xf32]) <- (-1xf32, -1xf32) + combine_8 = [scale_19, scale_17] + del scale_17, scale_19 + + # pd_op.meshgrid: ([-1x-1xf32, -1x-1xf32]) <- ([-1xf32, -1xf32]) + meshgrid_2 = paddle._C_ops.meshgrid(combine_8) + del combine_8 + + # builtin.split: (-1x-1xf32, -1x-1xf32) <- ([-1x-1xf32, -1x-1xf32]) + ( + split_4, + split_5, + ) = meshgrid_2 + del meshgrid_2 + + # pd_op.scale: (-1x-1xf32) <- (-1x-1xf32, 1xf32) + scale_20 = paddle._C_ops.scale(split_5, full_2, float("-20"), True) + + # pd_op.scale: (-1x-1xf32) <- (-1x-1xf32, 1xf32) + scale_21 = paddle._C_ops.scale(split_4, full_2, float("-20"), True) + + # pd_op.scale: (-1x-1xf32) <- (-1x-1xf32, 1xf32) + scale_22 = paddle._C_ops.scale(split_5, full_2, float("20"), True) + + # pd_op.scale: (-1x-1xf32) <- (-1x-1xf32, 1xf32) + scale_23 = paddle._C_ops.scale(split_4, full_2, float("20"), True) + del full_2 + + # builtin.combine: ([-1x-1xf32, -1x-1xf32, -1x-1xf32, -1x-1xf32]) <- (-1x-1xf32, -1x-1xf32, -1x-1xf32, -1x-1xf32) + combine_9 = [scale_20, scale_21, scale_22, scale_23] + del scale_20, scale_21, scale_22, scale_23 + + # pd_op.stack: (-1x-1x4xf32) <- ([-1x-1xf32, -1x-1xf32, -1x-1xf32, -1x-1xf32]) + stack_6 = paddle._C_ops.stack(combine_9, -1) + del combine_9 + + # builtin.combine: ([-1x-1xf32, -1x-1xf32]) <- (-1x-1xf32, -1x-1xf32) + combine_10 = [split_5, split_4] + del split_4, split_5 + + # pd_op.stack: (-1x-1x2xf32) <- ([-1x-1xf32, -1x-1xf32]) + stack_7 = paddle._C_ops.stack(combine_10, -1) + del combine_10 + + # pd_op.reshape: (-1x4xf32) <- (-1x-1x4xf32, 2xi64) + reshape_4 = paddle._C_ops.reshape(stack_6, full_int_array_0) + del full_int_array_0, stack_6 + + # pd_op.reshape: (-1x2xf32) <- (-1x-1x2xf32, 2xi64) + reshape_5 = paddle._C_ops.reshape(stack_7, full_int_array_1) + del full_int_array_1, stack_7 + + # pd_op.shape64: (2xi64) <- (-1x4xf32) + shape64_2 = paddle._C_ops.shape64(reshape_4) + + # pd_op.slice: (xi64) <- (2xi64, 1xi64, 1xi64) + slice_2 = paddle._C_ops.slice( + shape64_2, [0], full_int_array_2, full_int_array_3, [1], [0] + ) + del full_int_array_2, full_int_array_3, shape64_2 + + # builtin.combine: ([xi64, xi64]) <- (xi64, xi64) + combine_11 = [slice_2, full_4] + del full_4 + + # pd_op.stack: (2xi64) <- ([xi64, xi64]) + stack_8 = paddle._C_ops.stack(combine_11, 0) + del combine_11 + + # pd_op.full_with_tensor: (-1x1xf32) <- (1xf32, 2xi64) + full_with_tensor_2 = paddle._C_ops.full_with_tensor( + full_6, stack_8, paddle.float32 + ) + del full_6, stack_8 + + # pd_op.full: (1xi32) <- () + full_7 = paddle._C_ops.full( + [1], float("0"), paddle.int32, paddle.core.CPUPlace() + ) + + # builtin.combine: ([-1x4xf32, -1x4xf32, -1x4xf32]) <- (-1x4xf32, -1x4xf32, -1x4xf32) + combine_12 = [reshape_0, reshape_2, reshape_4] + del reshape_0, reshape_2, reshape_4 + + # pd_op.concat: (-1x4xf32) <- ([-1x4xf32, -1x4xf32, -1x4xf32], 1xi32) + concat_2 = paddle._C_ops.concat(combine_12, full_7) + del combine_12 + + # builtin.combine: ([-1x2xf32, -1x2xf32, -1x2xf32]) <- (-1x2xf32, -1x2xf32, -1x2xf32) + combine_13 = [reshape_1, reshape_3, reshape_5] + del reshape_1, reshape_3, reshape_5 + + # pd_op.concat: (-1x2xf32) <- ([-1x2xf32, -1x2xf32, -1x2xf32], 1xi32) + concat_3 = paddle._C_ops.concat(combine_13, full_7) + del combine_13 + + # builtin.combine: ([-1x1xf32, -1x1xf32, -1x1xf32]) <- (-1x1xf32, -1x1xf32, -1x1xf32) + combine_14 = [full_with_tensor_0, full_with_tensor_1, full_with_tensor_2] + del full_with_tensor_0, full_with_tensor_1, full_with_tensor_2 + + # pd_op.concat: (-1x1xf32) <- ([-1x1xf32, -1x1xf32, -1x1xf32], 1xi32) + concat_4 = paddle._C_ops.concat(combine_14, full_7) + del combine_14, full_7 + + # pd_op.full_int_array: (2xi64) <- () + full_int_array_4 = [1, 1] + + # pd_op.assign: (2xi64) <- (2xi64) + assign_0 = full_int_array_4 + + # pd_op.assign: (2xi64) <- (2xi64) + assign_1 = full_int_array_4 + + # pd_op.pool2d: (2x384x1x1xf32) <- (2x384x-1x-1xf32, 2xi64) + pool2d_0 = paddle._C_ops.pool2d( + data_6, + full_int_array_4, + [1, 1], + [0, 0], + False, + True, + "NCHW", + "avg", + False, + True, + "EXPLICIT", + ) + + # pd_op.conv2d: (2x384x1x1xf32) <- (2x384x1x1xf32, 384x384x1x1xf32) + conv2d_0 = paddle._C_ops.conv2d( + pool2d_0, parameter_53, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_53 + + # pd_op.full_int_array: (4xi64) <- () + full_int_array_5 = [1, -1, 1, 1] + + # pd_op.reshape: (1x384x1x1xf32) <- (384xf32, 4xi64) + reshape_6 = paddle._C_ops.reshape(parameter_52, full_int_array_5) + del parameter_52 + + # pd_op.add: (2x384x1x1xf32) <- (2x384x1x1xf32, 1x384x1x1xf32) + add_0 = paddle._C_ops.add(conv2d_0, reshape_6) + + # pd_op.sigmoid: (2x384x1x1xf32) <- (2x384x1x1xf32) + sigmoid_0 = paddle._C_ops.sigmoid(add_0) + del add_0 + + # pd_op.multiply: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32, 2x384x1x1xf32) + multiply_0 = paddle._C_ops.multiply(data_6, sigmoid_0) + + # pd_op.conv2d: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32, 384x384x1x1xf32) + conv2d_1 = paddle._C_ops.conv2d( + multiply_0, parameter_51, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_51 + + # pd_op.batch_norm_: (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__0, + batch_norm__1, + batch_norm__2, + batch_norm__3, + batch_norm__4, + batch_norm__5, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_1, + parameter_50, + parameter_49, + parameter_48, + parameter_47, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_47, parameter_48, parameter_49, parameter_50 + + # pd_op.swish: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32) + swish_0 = paddle._C_ops.swish(batch_norm__0) + + # pd_op.add: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32, 2x384x-1x-1xf32) + add_1 = paddle._C_ops.add(swish_0, data_6) + + # pd_op.conv2d: (2x10x-1x-1xf32) <- (2x384x-1x-1xf32, 10x384x3x3xf32) + conv2d_2 = paddle._C_ops.conv2d( + add_1, parameter_46, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_46 + + # pd_op.reshape: (1x10x1x1xf32) <- (10xf32, 4xi64) + reshape_7 = paddle._C_ops.reshape(parameter_45, full_int_array_5) + del parameter_45 + + # pd_op.add: (2x10x-1x-1xf32) <- (2x10x-1x-1xf32, 1x10x1x1xf32) + add_2 = paddle._C_ops.add(conv2d_2, reshape_7) + + # pd_op.conv2d: (2x384x1x1xf32) <- (2x384x1x1xf32, 384x384x1x1xf32) + conv2d_3 = paddle._C_ops.conv2d( + pool2d_0, parameter_44, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_44 + + # pd_op.reshape: (1x384x1x1xf32) <- (384xf32, 4xi64) + reshape_8 = paddle._C_ops.reshape(parameter_43, full_int_array_5) + del parameter_43 + + # pd_op.add: (2x384x1x1xf32) <- (2x384x1x1xf32, 1x384x1x1xf32) + add_3 = paddle._C_ops.add(conv2d_3, reshape_8) + + # pd_op.sigmoid: (2x384x1x1xf32) <- (2x384x1x1xf32) + sigmoid_1 = paddle._C_ops.sigmoid(add_3) + del add_3 + + # pd_op.multiply: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32, 2x384x1x1xf32) + multiply_1 = paddle._C_ops.multiply(data_6, sigmoid_1) + del data_6 + + # pd_op.conv2d: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32, 384x384x1x1xf32) + conv2d_4 = paddle._C_ops.conv2d( + multiply_1, parameter_42, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_42 + + # pd_op.batch_norm_: (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__6, + batch_norm__7, + batch_norm__8, + batch_norm__9, + batch_norm__10, + batch_norm__11, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_4, + parameter_41, + parameter_40, + parameter_39, + parameter_38, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_38, parameter_39, parameter_40, parameter_41 + + # pd_op.swish: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32) + swish_1 = paddle._C_ops.swish(batch_norm__6) + + # pd_op.conv2d: (2x40x-1x-1xf32) <- (2x384x-1x-1xf32, 40x384x3x3xf32) + conv2d_5 = paddle._C_ops.conv2d( + swish_1, parameter_37, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_37 + + # pd_op.reshape: (1x40x1x1xf32) <- (40xf32, 4xi64) + reshape_9 = paddle._C_ops.reshape(parameter_36, full_int_array_5) + del parameter_36 + + # pd_op.add: (2x40x-1x-1xf32) <- (2x40x-1x-1xf32, 1x40x1x1xf32) + add_4 = paddle._C_ops.add(conv2d_5, reshape_9) + + # pd_op.sigmoid: (2x10x-1x-1xf32) <- (2x10x-1x-1xf32) + sigmoid_2 = paddle._C_ops.sigmoid(add_2) + del add_2 + + # pd_op.flatten: (2x10x-1xf32) <- (2x10x-1x-1xf32) + flatten_0 = paddle._C_ops.flatten(sigmoid_2, 2, 3) + + # pd_op.transpose: (2x-1x10xf32) <- (2x10x-1xf32) + transpose_0 = paddle._C_ops.transpose(flatten_0, [0, 2, 1]) + del flatten_0 + + # pd_op.flatten: (2x40x-1xf32) <- (2x40x-1x-1xf32) + flatten_1 = paddle._C_ops.flatten(add_4, 2, 3) + + # pd_op.transpose: (2x-1x40xf32) <- (2x40x-1xf32) + transpose_1 = paddle._C_ops.transpose(flatten_1, [0, 2, 1]) + del flatten_1 + + # pd_op.pool2d: (2x192x1x1xf32) <- (2x192x-1x-1xf32, 2xi64) + pool2d_1 = paddle._C_ops.pool2d( + data_7, + full_int_array_4, + [1, 1], + [0, 0], + False, + True, + "NCHW", + "avg", + False, + True, + "EXPLICIT", + ) + + # pd_op.conv2d: (2x192x1x1xf32) <- (2x192x1x1xf32, 192x192x1x1xf32) + conv2d_6 = paddle._C_ops.conv2d( + pool2d_1, parameter_35, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_35 + + # pd_op.reshape: (1x192x1x1xf32) <- (192xf32, 4xi64) + reshape_10 = paddle._C_ops.reshape(parameter_34, full_int_array_5) + del parameter_34 + + # pd_op.add: (2x192x1x1xf32) <- (2x192x1x1xf32, 1x192x1x1xf32) + add_5 = paddle._C_ops.add(conv2d_6, reshape_10) + + # pd_op.sigmoid: (2x192x1x1xf32) <- (2x192x1x1xf32) + sigmoid_3 = paddle._C_ops.sigmoid(add_5) + del add_5 + + # pd_op.multiply: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 2x192x1x1xf32) + multiply_2 = paddle._C_ops.multiply(data_7, sigmoid_3) + + # pd_op.conv2d: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 192x192x1x1xf32) + conv2d_7 = paddle._C_ops.conv2d( + multiply_2, parameter_33, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_33 + + # pd_op.batch_norm_: (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__12, + batch_norm__13, + batch_norm__14, + batch_norm__15, + batch_norm__16, + batch_norm__17, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_7, + parameter_32, + parameter_31, + parameter_30, + parameter_29, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_29, parameter_30, parameter_31, parameter_32 + + # pd_op.swish: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32) + swish_2 = paddle._C_ops.swish(batch_norm__12) + + # pd_op.add: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 2x192x-1x-1xf32) + add_6 = paddle._C_ops.add(swish_2, data_7) + + # pd_op.conv2d: (2x10x-1x-1xf32) <- (2x192x-1x-1xf32, 10x192x3x3xf32) + conv2d_8 = paddle._C_ops.conv2d( + add_6, parameter_28, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_28 + + # pd_op.reshape: (1x10x1x1xf32) <- (10xf32, 4xi64) + reshape_11 = paddle._C_ops.reshape(parameter_27, full_int_array_5) + del parameter_27 + + # pd_op.add: (2x10x-1x-1xf32) <- (2x10x-1x-1xf32, 1x10x1x1xf32) + add_7 = paddle._C_ops.add(conv2d_8, reshape_11) + + # pd_op.conv2d: (2x192x1x1xf32) <- (2x192x1x1xf32, 192x192x1x1xf32) + conv2d_9 = paddle._C_ops.conv2d( + pool2d_1, parameter_26, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_26 + + # pd_op.reshape: (1x192x1x1xf32) <- (192xf32, 4xi64) + reshape_12 = paddle._C_ops.reshape(parameter_25, full_int_array_5) + del parameter_25 + + # pd_op.add: (2x192x1x1xf32) <- (2x192x1x1xf32, 1x192x1x1xf32) + add_8 = paddle._C_ops.add(conv2d_9, reshape_12) + + # pd_op.sigmoid: (2x192x1x1xf32) <- (2x192x1x1xf32) + sigmoid_4 = paddle._C_ops.sigmoid(add_8) + del add_8 + + # pd_op.multiply: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 2x192x1x1xf32) + multiply_3 = paddle._C_ops.multiply(data_7, sigmoid_4) + del data_7 + + # pd_op.conv2d: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 192x192x1x1xf32) + conv2d_10 = paddle._C_ops.conv2d( + multiply_3, parameter_24, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_24 + + # pd_op.batch_norm_: (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__18, + batch_norm__19, + batch_norm__20, + batch_norm__21, + batch_norm__22, + batch_norm__23, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_10, + parameter_23, + parameter_22, + parameter_21, + parameter_20, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_20, parameter_21, parameter_22, parameter_23 + + # pd_op.swish: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32) + swish_3 = paddle._C_ops.swish(batch_norm__18) + + # pd_op.conv2d: (2x40x-1x-1xf32) <- (2x192x-1x-1xf32, 40x192x3x3xf32) + conv2d_11 = paddle._C_ops.conv2d( + swish_3, parameter_19, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_19 + + # pd_op.reshape: (1x40x1x1xf32) <- (40xf32, 4xi64) + reshape_13 = paddle._C_ops.reshape(parameter_18, full_int_array_5) + del parameter_18 + + # pd_op.add: (2x40x-1x-1xf32) <- (2x40x-1x-1xf32, 1x40x1x1xf32) + add_9 = paddle._C_ops.add(conv2d_11, reshape_13) + + # pd_op.sigmoid: (2x10x-1x-1xf32) <- (2x10x-1x-1xf32) + sigmoid_5 = paddle._C_ops.sigmoid(add_7) + del add_7 + + # pd_op.flatten: (2x10x-1xf32) <- (2x10x-1x-1xf32) + flatten_2 = paddle._C_ops.flatten(sigmoid_5, 2, 3) + + # pd_op.transpose: (2x-1x10xf32) <- (2x10x-1xf32) + transpose_2 = paddle._C_ops.transpose(flatten_2, [0, 2, 1]) + del flatten_2 + + # pd_op.flatten: (2x40x-1xf32) <- (2x40x-1x-1xf32) + flatten_3 = paddle._C_ops.flatten(add_9, 2, 3) + + # pd_op.transpose: (2x-1x40xf32) <- (2x40x-1xf32) + transpose_3 = paddle._C_ops.transpose(flatten_3, [0, 2, 1]) + del flatten_3 + + # pd_op.pool2d: (2x96x1x1xf32) <- (2x96x-1x-1xf32, 2xi64) + pool2d_2 = paddle._C_ops.pool2d( + data_8, + full_int_array_4, + [1, 1], + [0, 0], + False, + True, + "NCHW", + "avg", + False, + True, + "EXPLICIT", + ) + + # pd_op.conv2d: (2x96x1x1xf32) <- (2x96x1x1xf32, 96x96x1x1xf32) + conv2d_12 = paddle._C_ops.conv2d( + pool2d_2, parameter_17, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_17 + + # pd_op.reshape: (1x96x1x1xf32) <- (96xf32, 4xi64) + reshape_14 = paddle._C_ops.reshape(parameter_16, full_int_array_5) + del parameter_16 + + # pd_op.add: (2x96x1x1xf32) <- (2x96x1x1xf32, 1x96x1x1xf32) + add_10 = paddle._C_ops.add(conv2d_12, reshape_14) + + # pd_op.sigmoid: (2x96x1x1xf32) <- (2x96x1x1xf32) + sigmoid_6 = paddle._C_ops.sigmoid(add_10) + del add_10 + + # pd_op.multiply: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, 2x96x1x1xf32) + multiply_4 = paddle._C_ops.multiply(data_8, sigmoid_6) + + # pd_op.conv2d: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, 96x96x1x1xf32) + conv2d_13 = paddle._C_ops.conv2d( + multiply_4, parameter_15, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_15 + + # pd_op.batch_norm_: (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__24, + batch_norm__25, + batch_norm__26, + batch_norm__27, + batch_norm__28, + batch_norm__29, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_13, + parameter_14, + parameter_13, + parameter_12, + parameter_11, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_11, parameter_12, parameter_13, parameter_14 + + # pd_op.swish: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32) + swish_4 = paddle._C_ops.swish(batch_norm__24) + + # pd_op.add: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, 2x96x-1x-1xf32) + add_11 = paddle._C_ops.add(swish_4, data_8) + + # pd_op.conv2d: (2x10x-1x-1xf32) <- (2x96x-1x-1xf32, 10x96x3x3xf32) + conv2d_14 = paddle._C_ops.conv2d( + add_11, parameter_10, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_10 + + # pd_op.reshape: (1x10x1x1xf32) <- (10xf32, 4xi64) + reshape_15 = paddle._C_ops.reshape(parameter_9, full_int_array_5) + del parameter_9 + + # pd_op.add: (2x10x-1x-1xf32) <- (2x10x-1x-1xf32, 1x10x1x1xf32) + add_12 = paddle._C_ops.add(conv2d_14, reshape_15) + + # pd_op.conv2d: (2x96x1x1xf32) <- (2x96x1x1xf32, 96x96x1x1xf32) + conv2d_15 = paddle._C_ops.conv2d( + pool2d_2, parameter_8, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_8 + + # pd_op.reshape: (1x96x1x1xf32) <- (96xf32, 4xi64) + reshape_16 = paddle._C_ops.reshape(parameter_7, full_int_array_5) + del parameter_7 + + # pd_op.add: (2x96x1x1xf32) <- (2x96x1x1xf32, 1x96x1x1xf32) + add_13 = paddle._C_ops.add(conv2d_15, reshape_16) + + # pd_op.sigmoid: (2x96x1x1xf32) <- (2x96x1x1xf32) + sigmoid_7 = paddle._C_ops.sigmoid(add_13) + del add_13 + + # pd_op.multiply: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, 2x96x1x1xf32) + multiply_5 = paddle._C_ops.multiply(data_8, sigmoid_7) + del data_8 + + # pd_op.conv2d: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, 96x96x1x1xf32) + conv2d_16 = paddle._C_ops.conv2d( + multiply_5, parameter_6, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_6 + + # pd_op.batch_norm_: (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__30, + batch_norm__31, + batch_norm__32, + batch_norm__33, + batch_norm__34, + batch_norm__35, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_16, + parameter_5, + parameter_4, + parameter_3, + parameter_2, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_2, parameter_3, parameter_4, parameter_5 + + # pd_op.swish: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32) + swish_5 = paddle._C_ops.swish(batch_norm__30) + + # pd_op.conv2d: (2x40x-1x-1xf32) <- (2x96x-1x-1xf32, 40x96x3x3xf32) + conv2d_17 = paddle._C_ops.conv2d( + swish_5, parameter_1, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_1 + + # pd_op.reshape: (1x40x1x1xf32) <- (40xf32, 4xi64) + reshape_17 = paddle._C_ops.reshape(parameter_0, full_int_array_5) + del full_int_array_5, parameter_0 + + # pd_op.add: (2x40x-1x-1xf32) <- (2x40x-1x-1xf32, 1x40x1x1xf32) + add_14 = paddle._C_ops.add(conv2d_17, reshape_17) + + # pd_op.sigmoid: (2x10x-1x-1xf32) <- (2x10x-1x-1xf32) + sigmoid_8 = paddle._C_ops.sigmoid(add_12) + del add_12 + + # pd_op.flatten: (2x10x-1xf32) <- (2x10x-1x-1xf32) + flatten_4 = paddle._C_ops.flatten(sigmoid_8, 2, 3) + + # pd_op.transpose: (2x-1x10xf32) <- (2x10x-1xf32) + transpose_4 = paddle._C_ops.transpose(flatten_4, [0, 2, 1]) + del flatten_4 + + # pd_op.flatten: (2x40x-1xf32) <- (2x40x-1x-1xf32) + flatten_5 = paddle._C_ops.flatten(add_14, 2, 3) + + # pd_op.transpose: (2x-1x40xf32) <- (2x40x-1xf32) + transpose_5 = paddle._C_ops.transpose(flatten_5, [0, 2, 1]) + del flatten_5 + + # pd_op.full: (1xi32) <- () + full_8 = paddle._C_ops.full( + [1], float("1"), paddle.int32, paddle.core.CPUPlace() + ) + + # pd_op.assign: (1xi32) <- (1xi32) + assign_2 = full_8 + + # builtin.combine: ([2x-1x10xf32, 2x-1x10xf32, 2x-1x10xf32]) <- (2x-1x10xf32, 2x-1x10xf32, 2x-1x10xf32) + combine_15 = [transpose_0, transpose_2, transpose_4] + + # pd_op.concat: (2x-1x10xf32) <- ([2x-1x10xf32, 2x-1x10xf32, 2x-1x10xf32], 1xi32) + concat_0 = paddle._C_ops.concat(combine_15, full_8) + del combine_15 + + # builtin.combine: ([2x-1x40xf32, 2x-1x40xf32, 2x-1x40xf32]) <- (2x-1x40xf32, 2x-1x40xf32, 2x-1x40xf32) + combine_16 = [transpose_1, transpose_3, transpose_5] + + # pd_op.concat: (2x-1x40xf32) <- ([2x-1x40xf32, 2x-1x40xf32, 2x-1x40xf32], 1xi32) + concat_1 = paddle._C_ops.concat(combine_16, full_8) + del ( + add_1, + add_11, + add_14, + add_4, + add_6, + add_9, + assign_0, + assign_1, + assign_2, + batch_norm__0, + batch_norm__1, + batch_norm__10, + batch_norm__11, + batch_norm__12, + batch_norm__13, + batch_norm__14, + batch_norm__15, + batch_norm__16, + batch_norm__17, + batch_norm__18, + batch_norm__19, + batch_norm__2, + batch_norm__20, + batch_norm__21, + batch_norm__22, + batch_norm__23, + batch_norm__24, + batch_norm__25, + batch_norm__26, + batch_norm__27, + batch_norm__28, + batch_norm__29, + batch_norm__3, + batch_norm__30, + batch_norm__31, + batch_norm__32, + batch_norm__33, + batch_norm__34, + batch_norm__35, + batch_norm__4, + batch_norm__5, + batch_norm__6, + batch_norm__7, + batch_norm__8, + batch_norm__9, + combine_16, + conv2d_0, + conv2d_1, + conv2d_10, + conv2d_11, + conv2d_12, + conv2d_13, + conv2d_14, + conv2d_15, + conv2d_16, + conv2d_17, + conv2d_2, + conv2d_3, + conv2d_4, + conv2d_5, + conv2d_6, + conv2d_7, + conv2d_8, + conv2d_9, + full_8, + full_int_array_4, + multiply_0, + multiply_1, + multiply_2, + multiply_3, + multiply_4, + multiply_5, + pool2d_0, + pool2d_1, + pool2d_2, + reshape_10, + reshape_11, + reshape_12, + reshape_13, + reshape_14, + reshape_15, + reshape_16, + reshape_17, + reshape_6, + reshape_7, + reshape_8, + reshape_9, + sigmoid_0, + sigmoid_1, + sigmoid_2, + sigmoid_3, + sigmoid_4, + sigmoid_5, + sigmoid_6, + sigmoid_7, + sigmoid_8, + slice_0, + slice_1, + slice_2, + swish_0, + swish_1, + swish_2, + swish_3, + swish_4, + swish_5, + transpose_0, + transpose_1, + transpose_2, + transpose_3, + transpose_4, + transpose_5, + ) + + return concat_0, concat_1, concat_2, concat_3, concat_4 diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-S/subgraph_5/weight_meta.py b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-S/subgraph_5/weight_meta.py new file mode 100644 index 000000000..cd9bb1db0 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-S/subgraph_5/weight_meta.py @@ -0,0 +1,580 @@ +class Program_weight_tensor_parameter_0: + name = "parameter_0" + shape = [40] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_1: + name = "parameter_1" + shape = [40, 96, 3, 3] + dtype = "float32" + min_val = float("-0.220386") + max_val = float("0.222309") + mean = float("2.62808e-08") + std = float("0.0161374") + data = None + + +class Program_weight_tensor_parameter_2: + name = "parameter_2" + shape = [96] + dtype = "float32" + min_val = float("-0.127121") + max_val = float("0.345832") + mean = float("0.108268") + std = float("0.11199") + data = None + + +class Program_weight_tensor_parameter_3: + name = "parameter_3" + shape = [96] + dtype = "float32" + min_val = float("0.947191") + max_val = float("2.26117") + mean = float("1.5267") + std = float("0.270308") + data = None + + +class Program_weight_tensor_parameter_4: + name = "parameter_4" + shape = [96] + dtype = "float32" + min_val = float("0.000653841") + max_val = float("0.0512872") + mean = float("0.00630045") + std = float("0.00803373") + data = None + + +class Program_weight_tensor_parameter_5: + name = "parameter_5" + shape = [96] + dtype = "float32" + min_val = float("-0.1806") + max_val = float("0.105559") + mean = float("-0.0118148") + std = float("0.0471293") + data = None + + +class Program_weight_tensor_parameter_6: + name = "parameter_6" + shape = [96, 96, 1, 1] + dtype = "float32" + min_val = float("-0.16617") + max_val = float("0.150574") + mean = float("-0.0015204") + std = float("0.018482") + data = None + + +class Program_weight_tensor_parameter_7: + name = "parameter_7" + shape = [96] + dtype = "float32" + min_val = float("-0.01695") + max_val = float("0.0144876") + mean = float("-0.000590983") + std = float("0.0062921") + data = None + + +class Program_weight_tensor_parameter_8: + name = "parameter_8" + shape = [96, 96, 1, 1] + dtype = "float32" + min_val = float("-0.0289852") + max_val = float("0.0383979") + mean = float("-0.00056367") + std = float("0.00473631") + data = None + + +class Program_weight_tensor_parameter_9: + name = "parameter_9" + shape = [10] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_10: + name = "parameter_10" + shape = [10, 96, 3, 3] + dtype = "float32" + min_val = float("-0.158508") + max_val = float("0.111546") + mean = float("-0.00123997") + std = float("0.0149231") + data = None + + +class Program_weight_tensor_parameter_11: + name = "parameter_11" + shape = [96] + dtype = "float32" + min_val = float("-1.00246") + max_val = float("1.70494") + mean = float("0.553674") + std = float("0.526983") + data = None + + +class Program_weight_tensor_parameter_12: + name = "parameter_12" + shape = [96] + dtype = "float32" + min_val = float("0.751697") + max_val = float("2.08213") + mean = float("1.46982") + std = float("0.2376") + data = None + + +class Program_weight_tensor_parameter_13: + name = "parameter_13" + shape = [96] + dtype = "float32" + min_val = float("0.000680279") + max_val = float("0.0389719") + mean = float("0.00525898") + std = float("0.00550929") + data = None + + +class Program_weight_tensor_parameter_14: + name = "parameter_14" + shape = [96] + dtype = "float32" + min_val = float("-0.302926") + max_val = float("0.242336") + mean = float("0.0338812") + std = float("0.0731476") + data = None + + +class Program_weight_tensor_parameter_15: + name = "parameter_15" + shape = [96, 96, 1, 1] + dtype = "float32" + min_val = float("-0.0920552") + max_val = float("0.101403") + mean = float("-0.000391196") + std = float("0.016093") + data = None + + +class Program_weight_tensor_parameter_16: + name = "parameter_16" + shape = [96] + dtype = "float32" + min_val = float("-0.00790501") + max_val = float("0.0118839") + mean = float("-0.000616206") + std = float("0.00334672") + data = None + + +class Program_weight_tensor_parameter_17: + name = "parameter_17" + shape = [96, 96, 1, 1] + dtype = "float32" + min_val = float("-0.0354745") + max_val = float("0.0393783") + mean = float("-0.000415455") + std = float("0.00398812") + data = None + + +class Program_weight_tensor_parameter_18: + name = "parameter_18" + shape = [40] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_19: + name = "parameter_19" + shape = [40, 192, 3, 3] + dtype = "float32" + min_val = float("-0.15995") + max_val = float("0.172865") + mean = float("7.42875e-09") + std = float("0.00878877") + data = None + + +class Program_weight_tensor_parameter_20: + name = "parameter_20" + shape = [192] + dtype = "float32" + min_val = float("-0.0211182") + max_val = float("0.16802") + mean = float("0.0783479") + std = float("0.0391695") + data = None + + +class Program_weight_tensor_parameter_21: + name = "parameter_21" + shape = [192] + dtype = "float32" + min_val = float("1.07811") + max_val = float("1.51568") + mean = float("1.30274") + std = float("0.0876299") + data = None + + +class Program_weight_tensor_parameter_22: + name = "parameter_22" + shape = [192] + dtype = "float32" + min_val = float("0.000349327") + max_val = float("0.0216234") + mean = float("0.00330979") + std = float("0.00396173") + data = None + + +class Program_weight_tensor_parameter_23: + name = "parameter_23" + shape = [192] + dtype = "float32" + min_val = float("-0.124726") + max_val = float("0.0534715") + mean = float("-0.0102394") + std = float("0.0264137") + data = None + + +class Program_weight_tensor_parameter_24: + name = "parameter_24" + shape = [192, 192, 1, 1] + dtype = "float32" + min_val = float("-0.0792181") + max_val = float("0.107778") + mean = float("-0.000368823") + std = float("0.00729713") + data = None + + +class Program_weight_tensor_parameter_25: + name = "parameter_25" + shape = [192] + dtype = "float32" + min_val = float("-0.00746441") + max_val = float("0.00722328") + mean = float("-8.13383e-05") + std = float("0.00296425") + data = None + + +class Program_weight_tensor_parameter_26: + name = "parameter_26" + shape = [192, 192, 1, 1] + dtype = "float32" + min_val = float("-0.00795566") + max_val = float("0.0110585") + mean = float("-0.000113663") + std = float("0.00156623") + data = None + + +class Program_weight_tensor_parameter_27: + name = "parameter_27" + shape = [10] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_28: + name = "parameter_28" + shape = [10, 192, 3, 3] + dtype = "float32" + min_val = float("-0.0899103") + max_val = float("0.0751371") + mean = float("-0.000593556") + std = float("0.00682701") + data = None + + +class Program_weight_tensor_parameter_29: + name = "parameter_29" + shape = [192] + dtype = "float32" + min_val = float("-0.291965") + max_val = float("1.00507") + mean = float("0.404706") + std = float("0.237409") + data = None + + +class Program_weight_tensor_parameter_30: + name = "parameter_30" + shape = [192] + dtype = "float32" + min_val = float("1.0436") + max_val = float("1.84744") + mean = float("1.34394") + std = float("0.127482") + data = None + + +class Program_weight_tensor_parameter_31: + name = "parameter_31" + shape = [192] + dtype = "float32" + min_val = float("0.000363284") + max_val = float("0.0104766") + mean = float("0.00183808") + std = float("0.00170966") + data = None + + +class Program_weight_tensor_parameter_32: + name = "parameter_32" + shape = [192] + dtype = "float32" + min_val = float("-0.160158") + max_val = float("0.11803") + mean = float("-0.00116506") + std = float("0.0399602") + data = None + + +class Program_weight_tensor_parameter_33: + name = "parameter_33" + shape = [192, 192, 1, 1] + dtype = "float32" + min_val = float("-0.0517684") + max_val = float("0.0545506") + mean = float("-0.000237345") + std = float("0.00623955") + data = None + + +class Program_weight_tensor_parameter_34: + name = "parameter_34" + shape = [192] + dtype = "float32" + min_val = float("-0.00367397") + max_val = float("0.00885849") + mean = float("-0.000153054") + std = float("0.00155494") + data = None + + +class Program_weight_tensor_parameter_35: + name = "parameter_35" + shape = [192, 192, 1, 1] + dtype = "float32" + min_val = float("-0.0153384") + max_val = float("0.0338853") + mean = float("-0.000100079") + std = float("0.00136892") + data = None + + +class Program_weight_tensor_parameter_36: + name = "parameter_36" + shape = [40] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_37: + name = "parameter_37" + shape = [40, 384, 3, 3] + dtype = "float32" + min_val = float("-0.02606") + max_val = float("0.0317561") + mean = float("9.52241e-10") + std = float("0.0020846") + data = None + + +class Program_weight_tensor_parameter_38: + name = "parameter_38" + shape = [384] + dtype = "float32" + min_val = float("-0.0246739") + max_val = float("0.152016") + mean = float("0.0408553") + std = float("0.032015") + data = None + + +class Program_weight_tensor_parameter_39: + name = "parameter_39" + shape = [384] + dtype = "float32" + min_val = float("1.05705") + max_val = float("1.41825") + mean = float("1.2191") + std = float("0.0542166") + data = None + + +class Program_weight_tensor_parameter_40: + name = "parameter_40" + shape = [384] + dtype = "float32" + min_val = float("0.000103167") + max_val = float("0.00446044") + mean = float("0.000448355") + std = float("0.000443999") + data = None + + +class Program_weight_tensor_parameter_41: + name = "parameter_41" + shape = [384] + dtype = "float32" + min_val = float("-0.0274058") + max_val = float("0.0126066") + mean = float("-0.00688192") + std = float("0.00609138") + data = None + + +class Program_weight_tensor_parameter_42: + name = "parameter_42" + shape = [384, 384, 1, 1] + dtype = "float32" + min_val = float("-0.0464693") + max_val = float("0.0548612") + mean = float("-0.000100111") + std = float("0.00295451") + data = None + + +class Program_weight_tensor_parameter_43: + name = "parameter_43" + shape = [384] + dtype = "float32" + min_val = float("-0.00639402") + max_val = float("0.00421888") + mean = float("3.54671e-05") + std = float("0.00171694") + data = None + + +class Program_weight_tensor_parameter_44: + name = "parameter_44" + shape = [384, 384, 1, 1] + dtype = "float32" + min_val = float("-0.00654664") + max_val = float("0.00763939") + mean = float("-4.30022e-06") + std = float("0.000719087") + data = None + + +class Program_weight_tensor_parameter_45: + name = "parameter_45" + shape = [10] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_46: + name = "parameter_46" + shape = [10, 384, 3, 3] + dtype = "float32" + min_val = float("-0.0276653") + max_val = float("0.0223204") + mean = float("-0.000443044") + std = float("0.00213442") + data = None + + +class Program_weight_tensor_parameter_47: + name = "parameter_47" + shape = [384] + dtype = "float32" + min_val = float("-0.409858") + max_val = float("0.610632") + mean = float("0.212569") + std = float("0.109879") + data = None + + +class Program_weight_tensor_parameter_48: + name = "parameter_48" + shape = [384] + dtype = "float32" + min_val = float("1.05961") + max_val = float("1.46695") + mean = float("1.20196") + std = float("0.0657315") + data = None + + +class Program_weight_tensor_parameter_49: + name = "parameter_49" + shape = [384] + dtype = "float32" + min_val = float("7.46256e-05") + max_val = float("0.0054246") + mean = float("0.000793045") + std = float("0.00065768") + data = None + + +class Program_weight_tensor_parameter_50: + name = "parameter_50" + shape = [384] + dtype = "float32" + min_val = float("-0.0807872") + max_val = float("0.0680079") + mean = float("-0.0131062") + std = float("0.0222074") + data = None + + +class Program_weight_tensor_parameter_51: + name = "parameter_51" + shape = [384, 384, 1, 1] + dtype = "float32" + min_val = float("-0.0661697") + max_val = float("0.0355423") + mean = float("-0.00020161") + std = float("0.00342025") + data = None + + +class Program_weight_tensor_parameter_52: + name = "parameter_52" + shape = [384] + dtype = "float32" + min_val = float("-0.00360413") + max_val = float("0.00538468") + mean = float("-8.84011e-05") + std = float("0.000952451") + data = None + + +class Program_weight_tensor_parameter_53: + name = "parameter_53" + shape = [384, 384, 1, 1] + dtype = "float32" + min_val = float("-0.0169924") + max_val = float("0.0292524") + mean = float("-2.04535e-05") + std = float("0.000783345") + data = None diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-S/subgraph_6/graph_hash.txt b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-S/subgraph_6/graph_hash.txt new file mode 100644 index 000000000..4396963bd --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-S/subgraph_6/graph_hash.txt @@ -0,0 +1 @@ +7e641c3cae870d8e911f0ad7dadcaeff908d67ccf007dae85cd9995bbaf2bbd3 \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-S/subgraph_6/graph_net.json b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-S/subgraph_6/graph_net.json new file mode 100644 index 000000000..6ce3cf9a5 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-S/subgraph_6/graph_net.json @@ -0,0 +1,6 @@ +{ + "framework": "paddle", + "model_name": "PP-YOLOE_plus_SOD-S", + "num_devices_required": 1, + "num_nodes_required": 1 +} \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-S/subgraph_6/input_meta.py b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-S/subgraph_6/input_meta.py new file mode 100644 index 000000000..f9425676d --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-S/subgraph_6/input_meta.py @@ -0,0 +1,78 @@ +class Program_weight_tensor_data_0: + name = "data_0" + shape = [] + dtype = "int64" + data = [60] + + +class Program_weight_tensor_data_1: + name = "data_1" + shape = [2, 4725, 10] + dtype = "float32" + min_val = float("7.08102e-11") + max_val = float("0.879841") + mean = float("0.00990287") + std = float("0.0301353") + data = None + + +class Program_weight_tensor_data_2: + name = "data_2" + shape = [2, 4725, 4] + dtype = "float32" + min_val = float("-93.7054") + max_val = float("622.862") + mean = float("240.021") + std = float("140.921") + data = None + + +class Program_weight_tensor_data_3: + name = "data_3" + shape = [4725, 2] + dtype = "float32" + min_val = float("4.0") + max_val = float("476.0") + mean = float("240.0") + std = float("138.52") + data = None + + +class Program_weight_tensor_data_4: + name = "data_4" + shape = [4725, 1] + dtype = "float32" + min_val = float("8.0") + max_val = float("32.0") + mean = float("10.6667") + std = float("5.70157") + data = None + + +class Program_weight_tensor_data_5: + name = "data_5" + shape = [2, 60, 1] + dtype = "int32" + min_val = 0 + max_val = 9 + data = None + + +class Program_weight_tensor_data_6: + name = "data_6" + shape = [2, 60, 4] + dtype = "float32" + max_val = float("478.782") + mean = float("81.1713") + std = float("81.8496") + data = None + + +class Program_weight_tensor_data_7: + name = "data_7" + shape = [2, 60, 1] + dtype = "float32" + max_val = float("1.0") + mean = float("0.65") + std = float("0.47697") + data = None diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-S/subgraph_6/model.py b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-S/subgraph_6/model.py new file mode 100644 index 000000000..a887d4797 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-S/subgraph_6/model.py @@ -0,0 +1,551 @@ +import paddle + + +class GraphModule(paddle.nn.Layer): + def __init__(self): + super().__init__() + + def forward(self, data_0, data_1, data_2, data_3, data_4, data_5, data_6, data_7): + # pd_op.full: (xi64) <- () + full_0 = paddle._C_ops.full( + [], float("0"), paddle.int64, paddle.framework._current_expected_place() + ) + + # pd_op.equal: (xb) <- (xi64, xi64) + equal_0 = paddle._C_ops.equal(data_0, full_0) + + # pd_op.cast: (xi64) <- (xb) + cast_0 = paddle._C_ops.cast(equal_0, paddle.int64) + del equal_0 + + # pd_op.not_equal: (xb) <- (xi64, xi64) + not_equal_0 = paddle._C_ops.not_equal(cast_0, full_0) + del cast_0 + + # pd_op.cast: (xi64) <- (xb) + cast_1 = paddle._C_ops.cast(not_equal_0, paddle.int64) + del not_equal_0 + + # pd_op.equal: (xb) <- (xi64, xi64) + equal_1 = paddle._C_ops.equal(cast_1, full_0) + del cast_1, full_0 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_0 = [2] + + # pd_op.unsqueeze: (2x-1x1x4xf32) <- (2x-1x4xf32, 1xi64) + unsqueeze_0 = paddle._C_ops.unsqueeze(data_6, full_int_array_0) + del data_6 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_1 = [1] + + # pd_op.unsqueeze: (2x1x-1x4xf32) <- (2x-1x4xf32, 1xi64) + unsqueeze_1 = paddle._C_ops.unsqueeze(data_2, full_int_array_1) + del data_2 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_2 = [0] + + # pd_op.slice: (2x-1x1x2xf32) <- (2x-1x1x4xf32, 1xi64, 1xi64) + slice_0 = paddle._C_ops.slice( + unsqueeze_0, [3], full_int_array_2, full_int_array_0, [1], [] + ) + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_3 = [2147483647] + + # pd_op.slice: (2x-1x1x2xf32) <- (2x-1x1x4xf32, 1xi64, 1xi64) + slice_1 = paddle._C_ops.slice( + unsqueeze_0, [3], full_int_array_0, full_int_array_3, [1], [] + ) + + # pd_op.slice: (2x1x-1x2xf32) <- (2x1x-1x4xf32, 1xi64, 1xi64) + slice_2 = paddle._C_ops.slice( + unsqueeze_1, [3], full_int_array_2, full_int_array_0, [1], [] + ) + del full_int_array_2 + + # pd_op.slice: (2x1x-1x2xf32) <- (2x1x-1x4xf32, 1xi64, 1xi64) + slice_3 = paddle._C_ops.slice( + unsqueeze_1, [3], full_int_array_0, full_int_array_3, [1], [] + ) + del full_int_array_3, unsqueeze_1 + + # pd_op.maximum: (2x-1x-1x2xf32) <- (2x-1x1x2xf32, 2x1x-1x2xf32) + maximum_0 = paddle._C_ops.maximum(slice_0, slice_2) + + # pd_op.minimum: (2x-1x-1x2xf32) <- (2x-1x1x2xf32, 2x1x-1x2xf32) + minimum_0 = paddle._C_ops.minimum(slice_1, slice_3) + + # pd_op.subtract: (2x-1x-1x2xf32) <- (2x-1x-1x2xf32, 2x-1x-1x2xf32) + subtract_0 = paddle._C_ops.subtract(minimum_0, maximum_0) + del maximum_0, minimum_0 + + # pd_op.full: (1xf32) <- () + full_1 = paddle._C_ops.full( + [1], float("0"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.full: (1xf32) <- () + full_2 = paddle._C_ops.full( + [1], float("3.40282e+38"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.clip: (2x-1x-1x2xf32) <- (2x-1x-1x2xf32, 1xf32, 1xf32) + clip_0 = paddle._C_ops.clip(subtract_0, full_1, full_2) + del subtract_0 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_4 = [-1] + + # pd_op.prod: (2x-1x-1xf32) <- (2x-1x-1x2xf32, 1xi64) + prod_0 = paddle._C_ops.prod(clip_0, full_int_array_4, False, False) + del clip_0 + + # pd_op.subtract: (2x-1x1x2xf32) <- (2x-1x1x2xf32, 2x-1x1x2xf32) + subtract_1 = paddle._C_ops.subtract(slice_1, slice_0) + del slice_0, slice_1 + + # pd_op.clip: (2x-1x1x2xf32) <- (2x-1x1x2xf32, 1xf32, 1xf32) + clip_1 = paddle._C_ops.clip(subtract_1, full_1, full_2) + del subtract_1 + + # pd_op.prod: (2x-1x1xf32) <- (2x-1x1x2xf32, 1xi64) + prod_1 = paddle._C_ops.prod(clip_1, full_int_array_4, False, False) + del clip_1 + + # pd_op.subtract: (2x1x-1x2xf32) <- (2x1x-1x2xf32, 2x1x-1x2xf32) + subtract_2 = paddle._C_ops.subtract(slice_3, slice_2) + del slice_2, slice_3 + + # pd_op.clip: (2x1x-1x2xf32) <- (2x1x-1x2xf32, 1xf32, 1xf32) + clip_2 = paddle._C_ops.clip(subtract_2, full_1, full_2) + del full_2, subtract_2 + + # pd_op.prod: (2x1x-1xf32) <- (2x1x-1x2xf32, 1xi64) + prod_2 = paddle._C_ops.prod(clip_2, full_int_array_4, False, False) + del clip_2 + + # pd_op.add: (2x-1x-1xf32) <- (2x-1x1xf32, 2x1x-1xf32) + add_0 = paddle._C_ops.add(prod_1, prod_2) + del prod_1, prod_2 + + # pd_op.subtract: (2x-1x-1xf32) <- (2x-1x-1xf32, 2x-1x-1xf32) + subtract_3 = paddle._C_ops.subtract(add_0, prod_0) + del add_0 + + # pd_op.full: (1xf32) <- () + full_3 = paddle._C_ops.full( + [1], float("1"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.scale: (2x-1x-1xf32) <- (2x-1x-1xf32, 1xf32) + scale_0 = paddle._C_ops.scale(subtract_3, full_3, float("1e-09"), True) + del subtract_3 + + # pd_op.divide: (2x-1x-1xf32) <- (2x-1x-1xf32, 2x-1x-1xf32) + divide_0 = paddle._C_ops.divide(prod_0, scale_0) + del prod_0, scale_0 + + # pd_op.transpose: (2x10x-1xf32) <- (2x-1x10xf32) + transpose_0 = paddle._C_ops.transpose(data_1, [0, 2, 1]) + del data_1 + + # pd_op.full: (1xf64) <- () + full_4 = paddle._C_ops.full( + [1], float("0"), paddle.float64, paddle.core.CPUPlace() + ) + + # pd_op.full: (1xf64) <- () + full_5 = paddle._C_ops.full( + [1], float("2"), paddle.float64, paddle.core.CPUPlace() + ) + + # pd_op.full: (1xf64) <- () + full_6 = paddle._C_ops.full( + [1], float("1"), paddle.float64, paddle.core.CPUPlace() + ) + + # pd_op.arange: (2xi32) <- (1xf64, 1xf64, 1xf64) + arange_0 = paddle.arange(full_4, full_5, full_6, dtype="int32") + del full_4, full_5, full_6 + + # pd_op.unsqueeze: (2x1xi32) <- (2xi32, 1xi64) + unsqueeze_2 = paddle._C_ops.unsqueeze(arange_0, full_int_array_4) + del arange_0 + + # pd_op.full: (xi64) <- () + full_7 = paddle._C_ops.full( + [], float("1"), paddle.int64, paddle.core.CPUPlace() + ) + + # builtin.combine: ([xi64, xi64]) <- (xi64, xi64) + combine_0 = [full_7, data_0] + del data_0, full_7 + + # pd_op.stack: (2xi64) <- ([xi64, xi64]) + stack_0 = paddle._C_ops.stack(combine_0, 0) + del combine_0 + + # pd_op.tile: (2x-1xi32) <- (2x1xi32, 2xi64) + tile_0 = paddle._C_ops.tile(unsqueeze_2, stack_0) + del stack_0 + + # pd_op.squeeze: (2x-1xi32) <- (2x-1x1xi32, 1xi64) + squeeze_0 = paddle._C_ops.squeeze(data_5, full_int_array_4) + del data_5 + + # builtin.combine: ([2x-1xi32, 2x-1xi32]) <- (2x-1xi32, 2x-1xi32) + combine_1 = [tile_0, squeeze_0] + del squeeze_0, tile_0 + + # pd_op.stack: (2x-1x2xi32) <- ([2x-1xi32, 2x-1xi32]) + stack_1 = paddle._C_ops.stack(combine_1, -1) + del combine_1 + + # pd_op.gather_nd: (2x-1x-1xf32) <- (2x10x-1xf32, 2x-1x2xi32) + gather_nd_0 = paddle._C_ops.gather_nd(transpose_0, stack_1) + del stack_1, transpose_0 + + # pd_op.pow: (2x-1x-1xf32) <- (2x-1x-1xf32) + pow_0 = paddle._C_ops.pow(gather_nd_0, float("1")) + del gather_nd_0 + + # pd_op.pow: (2x-1x-1xf32) <- (2x-1x-1xf32) + pow_1 = paddle._C_ops.pow(divide_0, float("6")) + + # pd_op.multiply: (2x-1x-1xf32) <- (2x-1x-1xf32, 2x-1x-1xf32) + multiply_0 = paddle._C_ops.multiply(pow_0, pow_1) + del pow_0, pow_1 + + # pd_op.multiply: (2x-1x-1xf32) <- (2x-1x-1xf32, 2x-1x1xf32) + multiply_1 = paddle._C_ops.multiply(multiply_0, data_7) + del multiply_0 + + # pd_op.scale: (-1x1xf32) <- (-1x1xf32, 1xf32) + scale_1 = paddle._C_ops.scale(data_4, full_3, float("0"), True) + del data_4, full_3 + + # pd_op.full_int_array: (2xi64) <- () + full_int_array_5 = [0, 1] + + # pd_op.unsqueeze: (1x1x-1x2xf32) <- (-1x2xf32, 2xi64) + unsqueeze_3 = paddle._C_ops.unsqueeze(data_3, full_int_array_5) + del data_3 + + # pd_op.full: (1xi32) <- () + full_8 = paddle._C_ops.full( + [1], float("3"), paddle.int32, paddle.core.CPUPlace() + ) + + # pd_op.split_with_num: ([1x1x-1x1xf32, 1x1x-1x1xf32]) <- (1x1x-1x2xf32, 1xi32) + split_with_num_0 = paddle._C_ops.split_with_num(unsqueeze_3, 2, full_8) + del unsqueeze_3 + + # builtin.split: (1x1x-1x1xf32, 1x1x-1x1xf32) <- ([1x1x-1x1xf32, 1x1x-1x1xf32]) + ( + split_0, + split_1, + ) = split_with_num_0 + del split_with_num_0 + + # pd_op.split_with_num: ([2x-1x1x1xf32, 2x-1x1x1xf32, 2x-1x1x1xf32, 2x-1x1x1xf32]) <- (2x-1x1x4xf32, 1xi32) + split_with_num_1 = paddle._C_ops.split_with_num(unsqueeze_0, 4, full_8) + del full_8, unsqueeze_0 + + # builtin.split: (2x-1x1x1xf32, 2x-1x1x1xf32, 2x-1x1x1xf32, 2x-1x1x1xf32) <- ([2x-1x1x1xf32, 2x-1x1x1xf32, 2x-1x1x1xf32, 2x-1x1x1xf32]) + ( + split_2, + split_3, + split_4, + split_5, + ) = split_with_num_1 + del split_with_num_1 + + # pd_op.subtract: (2x-1x-1x1xf32) <- (1x1x-1x1xf32, 2x-1x1x1xf32) + subtract_4 = paddle._C_ops.subtract(split_0, split_2) + + # pd_op.subtract: (2x-1x-1x1xf32) <- (1x1x-1x1xf32, 2x-1x1x1xf32) + subtract_5 = paddle._C_ops.subtract(split_1, split_3) + + # pd_op.subtract: (2x-1x-1x1xf32) <- (2x-1x1x1xf32, 1x1x-1x1xf32) + subtract_6 = paddle._C_ops.subtract(split_4, split_0) + + # pd_op.subtract: (2x-1x-1x1xf32) <- (2x-1x1x1xf32, 1x1x-1x1xf32) + subtract_7 = paddle._C_ops.subtract(split_5, split_1) + + # pd_op.full: (1xi32) <- () + full_9 = paddle._C_ops.full( + [1], float("-1"), paddle.int32, paddle.core.CPUPlace() + ) + + # builtin.combine: ([2x-1x-1x1xf32, 2x-1x-1x1xf32, 2x-1x-1x1xf32, 2x-1x-1x1xf32]) <- (2x-1x-1x1xf32, 2x-1x-1x1xf32, 2x-1x-1x1xf32, 2x-1x-1x1xf32) + combine_2 = [subtract_4, subtract_5, subtract_6, subtract_7] + del subtract_4, subtract_5, subtract_6, subtract_7 + + # pd_op.concat: (2x-1x-1x4xf32) <- ([2x-1x-1x1xf32, 2x-1x-1x1xf32, 2x-1x-1x1xf32, 2x-1x-1x1xf32], 1xi32) + concat_0 = paddle._C_ops.concat(combine_2, full_9) + del combine_2 + + # pd_op.min: (2x-1x-1xf32) <- (2x-1x-1x4xf32, 1xi64) + min_0 = paddle._C_ops.min(concat_0, full_int_array_4, False) + del concat_0 + + # pd_op.full: (xf32) <- () + full_10 = paddle._C_ops.full( + [], + float("1e-09"), + paddle.float32, + paddle.framework._current_expected_place(), + ) + + # pd_op.greater_than: (2x-1x-1xb) <- (2x-1x-1xf32, xf32) + greater_than_1 = paddle._C_ops.greater_than(min_0, full_10) + del min_0 + + # pd_op.unsqueeze: (1x1x-1x1xf32) <- (-1x1xf32, 2xi64) + unsqueeze_4 = paddle._C_ops.unsqueeze(scale_1, full_int_array_5) + del full_int_array_5, scale_1 + + # pd_op.add: (2x-1x1x1xf32) <- (2x-1x1x1xf32, 2x-1x1x1xf32) + add_1 = paddle._C_ops.add(split_2, split_4) + del split_2, split_4 + + # pd_op.full: (1xf32) <- () + full_11 = paddle._C_ops.full( + [1], float("0.5"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.scale: (2x-1x1x1xf32) <- (2x-1x1x1xf32, 1xf32) + scale_2 = paddle._C_ops.scale(add_1, full_11, float("0"), True) + del add_1 + + # pd_op.add: (2x-1x1x1xf32) <- (2x-1x1x1xf32, 2x-1x1x1xf32) + add_2 = paddle._C_ops.add(split_3, split_5) + del split_3, split_5 + + # pd_op.scale: (2x-1x1x1xf32) <- (2x-1x1x1xf32, 1xf32) + scale_3 = paddle._C_ops.scale(add_2, full_11, float("0"), True) + del add_2, full_11 + + # pd_op.subtract: (2x-1x-1x1xf32) <- (2x-1x1x1xf32, 1x1x-1x1xf32) + subtract_8 = paddle._C_ops.subtract(scale_2, unsqueeze_4) + + # pd_op.subtract: (2x-1x-1x1xf32) <- (1x1x-1x1xf32, 2x-1x-1x1xf32) + subtract_9 = paddle._C_ops.subtract(split_0, subtract_8) + del subtract_8 + + # pd_op.subtract: (2x-1x-1x1xf32) <- (2x-1x1x1xf32, 1x1x-1x1xf32) + subtract_10 = paddle._C_ops.subtract(scale_3, unsqueeze_4) + + # pd_op.subtract: (2x-1x-1x1xf32) <- (1x1x-1x1xf32, 2x-1x-1x1xf32) + subtract_11 = paddle._C_ops.subtract(split_1, subtract_10) + del subtract_10 + + # pd_op.add: (2x-1x-1x1xf32) <- (2x-1x1x1xf32, 1x1x-1x1xf32) + add_3 = paddle._C_ops.add(scale_2, unsqueeze_4) + del scale_2 + + # pd_op.subtract: (2x-1x-1x1xf32) <- (2x-1x-1x1xf32, 1x1x-1x1xf32) + subtract_12 = paddle._C_ops.subtract(add_3, split_0) + del add_3, split_0 + + # pd_op.add: (2x-1x-1x1xf32) <- (2x-1x1x1xf32, 1x1x-1x1xf32) + add_4 = paddle._C_ops.add(scale_3, unsqueeze_4) + del scale_3, unsqueeze_4 + + # pd_op.subtract: (2x-1x-1x1xf32) <- (2x-1x-1x1xf32, 1x1x-1x1xf32) + subtract_13 = paddle._C_ops.subtract(add_4, split_1) + del add_4, split_1 + + # builtin.combine: ([2x-1x-1x1xf32, 2x-1x-1x1xf32, 2x-1x-1x1xf32, 2x-1x-1x1xf32]) <- (2x-1x-1x1xf32, 2x-1x-1x1xf32, 2x-1x-1x1xf32, 2x-1x-1x1xf32) + combine_3 = [subtract_9, subtract_11, subtract_12, subtract_13] + del subtract_11, subtract_12, subtract_13, subtract_9 + + # pd_op.concat: (2x-1x-1x4xf32) <- ([2x-1x-1x1xf32, 2x-1x-1x1xf32, 2x-1x-1x1xf32, 2x-1x-1x1xf32], 1xi32) + concat_1 = paddle._C_ops.concat(combine_3, full_9) + del combine_3, full_9 + + # pd_op.min: (2x-1x-1xf32) <- (2x-1x-1x4xf32, 1xi64) + min_1 = paddle._C_ops.min(concat_1, full_int_array_4, False) + del concat_1 + + # pd_op.greater_than: (2x-1x-1xb) <- (2x-1x-1xf32, xf32) + greater_than_2 = paddle._C_ops.greater_than(min_1, full_10) + del full_10, min_1 + + # pd_op.cast: (2x-1x-1xf32) <- (2x-1x-1xb) + cast_2 = paddle._C_ops.cast(greater_than_1, paddle.float32) + del greater_than_1 + + # pd_op.cast: (2x-1x-1xf32) <- (2x-1x-1xb) + cast_3 = paddle._C_ops.cast(greater_than_2, paddle.float32) + del greater_than_2 + + # pd_op.multiply: (2x-1x-1xf32) <- (2x-1x-1xf32, 2x-1x1xf32) + multiply_2 = paddle._C_ops.multiply(cast_2, data_7) + del cast_2 + + # pd_op.multiply: (2x-1x-1xf32) <- (2x-1x-1xf32, 2x-1x1xf32) + multiply_3 = paddle._C_ops.multiply(cast_3, data_7) + del cast_3 + + # pd_op.sum: (2x-1x1xf32) <- (2x-1x-1xf32, 1xi64) + sum_0 = paddle._C_ops.sum(multiply_2, full_int_array_4, None, True) + del full_int_array_4 + + # pd_op.full: (xf32) <- () + full_12 = paddle._C_ops.full( + [], float("0"), paddle.float32, paddle.framework._current_expected_place() + ) + + # pd_op.equal: (2x-1x1xb) <- (2x-1x1xf32, xf32) + equal_2 = paddle._C_ops.equal(sum_0, full_12) + del sum_0 + + # pd_op.add: (2x-1x-1xf32) <- (2x-1x-1xf32, 2x-1x-1xf32) + add_5 = paddle._C_ops.add(multiply_1, multiply_3) + + # pd_op.full_like: (2x-1x-1xf32) <- (2x-1x-1xf32, 1xf32) + full_like_0 = paddle._C_ops.full_like( + add_5, full_1, paddle.float32, paddle.framework._current_expected_place() + ) + + # pd_op.full_like: (2x-1x-1xf32) <- (2x-1x-1xf32, 1xf32) + full_like_1 = paddle._C_ops.full_like( + multiply_1, + full_1, + paddle.float32, + paddle.framework._current_expected_place(), + ) + + # pd_op.full_like: (2x-1x1xb) <- (2x-1x1xb, 1xf32) + full_like_2 = paddle._C_ops.full_like( + equal_2, full_1, paddle.bool, paddle.framework._current_expected_place() + ) + del full_1 + + # pd_op.cast: (2x-1x1xf32) <- (2x-1x1xb) + cast_4 = paddle._C_ops.cast(full_like_2, paddle.float32) + del full_like_2 + + # pd_op.cast: (2x-1x1xf32) <- (2x-1x1xb) + cast_5 = paddle._C_ops.cast(equal_2, paddle.float32) + del equal_2 + + # pd_op.add: (2x-1x-1xf32) <- (2x-1x-1xf32, 2x-1x-1xf32) + add_6 = paddle._C_ops.add(full_like_0, full_like_1) + del full_like_0, full_like_1 + + # pd_op.add: (2x-1x-1xf32) <- (2x-1x-1xf32, 2x-1x1xf32) + add_7 = paddle._C_ops.add(add_6, cast_4) + del add_6, cast_4 + + # pd_op.add: (2x-1x-1xf32) <- (2x-1x-1xf32, 2x-1x-1xf32) + add_8 = paddle._C_ops.add(add_5, add_7) + del add_5 + + # pd_op.add: (2x-1x-1xf32) <- (2x-1x-1xf32, 2x-1x-1xf32) + add_9 = paddle._C_ops.add(multiply_1, add_7) + + # pd_op.add: (2x-1x-1xf32) <- (2x-1x1xf32, 2x-1x-1xf32) + add_10 = paddle._C_ops.add(cast_5, add_7) + del add_7, cast_5 + + # pd_op.cast: (2x-1x-1xb) <- (2x-1x-1xf32) + cast_6 = paddle._C_ops.cast(add_10, paddle.bool) + del add_10 + + # pd_op.where: (2x-1x-1xf32) <- (2x-1x-1xb, 2x-1x-1xf32, 2x-1x-1xf32) + where_0 = paddle._C_ops.where(cast_6, add_8, add_9) + del add_8, add_9, cast_6 + + # pd_op.shape64: (3xi64) <- (2x-1x-1xf32) + shape64_0 = paddle._C_ops.shape64(where_0) + + # pd_op.slice: (xi64) <- (3xi64, 1xi64, 1xi64) + slice_4 = paddle._C_ops.slice( + shape64_0, [0], full_int_array_1, full_int_array_0, [1], [0] + ) + del full_int_array_1 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_6 = [3] + + # pd_op.slice: (xi64) <- (3xi64, 1xi64, 1xi64) + slice_5 = paddle._C_ops.slice( + shape64_0, [0], full_int_array_0, full_int_array_6, [1], [0] + ) + del full_int_array_0, full_int_array_6, shape64_0 + + # pd_op.full: (1xi32) <- () + full_13 = paddle._C_ops.full( + [1], float("13"), paddle.int32, paddle.core.CPUPlace() + ) + + # pd_op.topk: (2x-1x13xf32, 2x-1x13xi64) <- (2x-1x-1xf32, 1xi32) + topk_0, topk_1 = (lambda x, f: f(x))( + paddle._C_ops.topk(where_0, full_13, -1, True, True), + lambda out: out if isinstance(out, (list, tuple)) else (out, None), + ) + del full_13, where_0 + + # pd_op.one_hot: (2x-1x13x-1xf32) <- (2x-1x13xi64, xi64) + one_hot_0 = paddle._C_ops.one_hot( + topk_1 % paddle.cast(slice_5, topk_1.dtype), slice_5 + ) + del slice_5, topk_1 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_7 = [-2] + + # pd_op.sum: (2x-1x-1xf32) <- (2x-1x13x-1xf32, 1xi64) + sum_1 = paddle._C_ops.sum(one_hot_0, full_int_array_7, None, False) + del one_hot_0 + + # pd_op.multiply: (2x-1x-1xf32) <- (2x-1x-1xf32, 2x-1x1xf32) + multiply_4 = paddle._C_ops.multiply(sum_1, data_7) + del data_7, sum_1 + + # pd_op.greater_than: (2x-1x-1xb) <- (2x-1x-1xf32, xf32) + greater_than_3 = paddle._C_ops.greater_than(multiply_3, full_12) + del multiply_3 + + # pd_op.greater_than: (2x-1x-1xb) <- (2x-1x-1xf32, xf32) + greater_than_4 = paddle._C_ops.greater_than(multiply_2, full_12) + del full_12, multiply_2 + + # pd_op.bitwise_or: (2x-1x-1xb) <- (2x-1x-1xb, 2x-1x-1xb) + bitwise_or_0 = paddle._C_ops.bitwise_or(greater_than_3, greater_than_4) + del greater_than_3, greater_than_4 + + # pd_op.cast: (2x-1x-1xf32) <- (2x-1x-1xb) + cast_7 = paddle._C_ops.cast(bitwise_or_0, paddle.float32) + del bitwise_or_0 + + # pd_op.multiply: (2x-1x-1xf32) <- (2x-1x-1xf32, 2x-1x-1xf32) + multiply_5 = paddle._C_ops.multiply(multiply_4, cast_7) + del cast_7, multiply_4 + + # pd_op.sum: (2x-1xf32) <- (2x-1x-1xf32, 1xi64) + sum_2 = paddle._C_ops.sum(multiply_5, full_int_array_7, None, False) + del full_int_array_7 + + # pd_op.full_int_array: (0xi64) <- () + full_int_array_8 = [] + + # pd_op.max: (xf32) <- (2x-1xf32, 0xi64) + max_0 = paddle._C_ops.max(sum_2, full_int_array_8, False) + del full_int_array_8 + + # pd_op.full: (xf32) <- () + full_14 = paddle._C_ops.full( + [], float("1"), paddle.float32, paddle.framework._current_expected_place() + ) + + # pd_op.greater_than: (xb) <- (xf32, xf32) + greater_than_0 = paddle._C_ops.greater_than(max_0, full_14) + del divide_0, full_14, max_0, multiply_1, multiply_5, sum_2, unsqueeze_2 + + return greater_than_0 diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-S/subgraph_6/weight_meta.py b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-S/subgraph_6/weight_meta.py new file mode 100644 index 000000000..8b1378917 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-S/subgraph_6/weight_meta.py @@ -0,0 +1 @@ + diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-S/subgraph_7/graph_hash.txt b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-S/subgraph_7/graph_hash.txt new file mode 100644 index 000000000..64ef15f84 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-S/subgraph_7/graph_hash.txt @@ -0,0 +1 @@ +b28bf90ac95a22521022a0ac3c4a08d766b56e12bda87396cd4425fcb91c1c29 \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-S/subgraph_7/graph_net.json b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-S/subgraph_7/graph_net.json new file mode 100644 index 000000000..6ce3cf9a5 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-S/subgraph_7/graph_net.json @@ -0,0 +1,6 @@ +{ + "framework": "paddle", + "model_name": "PP-YOLOE_plus_SOD-S", + "num_devices_required": 1, + "num_nodes_required": 1 +} \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-S/subgraph_7/input_meta.py b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-S/subgraph_7/input_meta.py new file mode 100644 index 000000000..a9e8301f3 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-S/subgraph_7/input_meta.py @@ -0,0 +1,78 @@ +class Program_weight_tensor_data_0: + name = "data_0" + shape = [] + dtype = "int64" + data = [60] + + +class Program_weight_tensor_data_1: + name = "data_1" + shape = [] + dtype = "int64" + data = [4725] + + +class Program_weight_tensor_data_2: + name = "data_2" + shape = [2, 4725] + dtype = "float32" + max_val = float("24.0") + mean = float("0.0858201") + std = float("0.756726") + data = None + + +class Program_weight_tensor_data_3: + name = "data_3" + shape = [2, 60, 4725] + dtype = "float32" + max_val = float("0.93016") + mean = float("0.000487774") + std = float("0.0162952") + data = None + + +class Program_weight_tensor_data_4: + name = "data_4" + shape = [2, 60, 4725] + dtype = "float32" + max_val = float("1.0") + mean = float("0.00143033") + std = float("0.0377927") + data = None + + +class Program_weight_tensor_data_5: + name = "data_5" + shape = [2, 1] + dtype = "int32" + data = [0, 1] + + +class Program_weight_tensor_data_6: + name = "data_6" + shape = [2, 60, 1] + dtype = "int32" + min_val = 0 + max_val = 9 + data = None + + +class Program_weight_tensor_data_7: + name = "data_7" + shape = [2, 60, 4] + dtype = "float32" + max_val = float("478.782") + mean = float("81.1713") + std = float("81.8496") + data = None + + +class Program_weight_tensor_data_8: + name = "data_8" + shape = [2, 60, 4725] + dtype = "float32" + max_val = float("0.237328") + mean = float("1.24907e-05") + std = float("0.00104253") + data = None diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-S/subgraph_7/model.py b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-S/subgraph_7/model.py new file mode 100644 index 000000000..90c3991aa --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-S/subgraph_7/model.py @@ -0,0 +1,290 @@ +import paddle + + +class GraphModule(paddle.nn.Layer): + def __init__(self): + super().__init__() + + def forward( + self, data_0, data_1, data_2, data_3, data_4, data_5, data_6, data_7, data_8 + ): + # pd_op.full_int_array: (1xi64) <- () + full_int_array_0 = [1] + + # pd_op.unsqueeze: (2x1x-1xf32) <- (2x-1xf32, 1xi64) + unsqueeze_0 = paddle._C_ops.unsqueeze(data_2, full_int_array_0) + del data_2 + + # pd_op.full: (xf32) <- () + full_0 = paddle._C_ops.full( + [], float("1"), paddle.float32, paddle.framework._current_expected_place() + ) + + # pd_op.greater_than: (2x1x-1xb) <- (2x1x-1xf32, xf32) + greater_than_0 = paddle._C_ops.greater_than(unsqueeze_0, full_0) + del full_0, unsqueeze_0 + + # pd_op.full: (xi64) <- () + full_1 = paddle._C_ops.full( + [], float("1"), paddle.int64, paddle.core.CPUPlace() + ) + + # builtin.combine: ([xi64, xi64, xi64]) <- (xi64, xi64, xi64) + combine_0 = [full_1, data_0, full_1] + del full_1 + + # pd_op.stack: (3xi64) <- ([xi64, xi64, xi64]) + stack_0 = paddle._C_ops.stack(combine_0, 0) + del combine_0 + + # pd_op.tile: (2x-1x-1xb) <- (2x1x-1xb, 3xi64) + tile_0 = paddle._C_ops.tile(greater_than_0, stack_0) + del greater_than_0, stack_0 + + # pd_op.multiply: (2x-1x-1xf32) <- (2x-1x-1xf32, 2x-1x-1xf32) + multiply_1 = paddle._C_ops.multiply(data_3, data_4) + + # pd_op.shape64: (3xi64) <- (2x-1x-1xf32) + shape64_0 = paddle._C_ops.shape64(multiply_1) + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_1 = [2] + + # pd_op.slice: (xi64) <- (3xi64, 1xi64, 1xi64) + slice_0 = paddle._C_ops.slice( + shape64_0, [0], full_int_array_0, full_int_array_1, [1], [0] + ) + del full_int_array_0 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_2 = [3] + + # pd_op.slice: (xi64) <- (3xi64, 1xi64, 1xi64) + slice_1 = paddle._C_ops.slice( + shape64_0, [0], full_int_array_1, full_int_array_2, [1], [0] + ) + del full_int_array_1, full_int_array_2, shape64_0 + + # pd_op.full: (1xi64) <- () + full_2 = paddle._C_ops.full( + [1], float("-2"), paddle.int64, paddle.core.CPUPlace() + ) + + # pd_op.argmax: (2x-1xi64) <- (2x-1x-1xf32, 1xi64) + argmax_0 = paddle._C_ops.argmax(multiply_1, full_2, False, False, paddle.int64) + del multiply_1 + + # pd_op.one_hot: (2x-1x-1xf32) <- (2x-1xi64, xi64) + one_hot_0 = paddle._C_ops.one_hot( + argmax_0 % paddle.cast(slice_0, argmax_0.dtype), slice_0 + ) + del argmax_0, slice_0 + + # pd_op.transpose: (2x-1x-1xf32) <- (2x-1x-1xf32) + transpose_0 = paddle._C_ops.transpose(one_hot_0, [0, 2, 1]) + del one_hot_0 + + # pd_op.where: (2x-1x-1xf32) <- (2x-1x-1xb, 2x-1x-1xf32, 2x-1x-1xf32) + where_0 = paddle._C_ops.where(tile_0, transpose_0, data_4) + del data_4, tile_0, transpose_0 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_3 = [-2] + + # pd_op.sum: (2x-1xf32) <- (2x-1x-1xf32, 1xi64) + sum_0 = paddle._C_ops.sum(where_0, full_int_array_3, None, False) + + # pd_op.argmax: (2x-1xi64) <- (2x-1x-1xf32, 1xi64) + argmax_1 = paddle._C_ops.argmax(where_0, full_2, False, False, paddle.int64) + del full_2 + + # pd_op.cast: (xi32) <- (xi64) + cast_0 = paddle._C_ops.cast(data_0, paddle.int32) + del data_0 + + # pd_op.multiply: (2x1xi32) <- (2x1xi32, xi32) + multiply_2 = paddle._C_ops.multiply(data_5, cast_0) + del cast_0, data_5 + + # pd_op.cast: (2x1xi64) <- (2x1xi32) + cast_1 = paddle._C_ops.cast(multiply_2, paddle.int64) + del multiply_2 + + # pd_op.add: (2x-1xi64) <- (2x-1xi64, 2x1xi64) + add_0 = paddle._C_ops.add(argmax_1, cast_1) + del argmax_1, cast_1 + + # pd_op.flatten: (-1xi32) <- (2x-1x1xi32) + flatten_0 = paddle._C_ops.flatten(data_6, 0, 2) + del data_6 + + # pd_op.flatten: (-1xi64) <- (2x-1xi64) + flatten_1 = paddle._C_ops.flatten(add_0, 0, 1) + del add_0 + + # pd_op.full: (1xi32) <- () + full_3 = paddle._C_ops.full( + [1], float("0"), paddle.int32, paddle.core.CPUPlace() + ) + + # pd_op.gather: (-1xi32) <- (-1xi32, -1xi64, 1xi32) + gather_0 = paddle._C_ops.gather(flatten_0, flatten_1, full_3) + del flatten_0 + + # pd_op.full: (xi64) <- () + full_4 = paddle._C_ops.full( + [], float("2"), paddle.int64, paddle.core.CPUPlace() + ) + + # builtin.combine: ([xi64, xi64]) <- (xi64, xi64) + combine_1 = [full_4, data_1] + + # pd_op.stack: (2xi64) <- ([xi64, xi64]) + stack_1 = paddle._C_ops.stack(combine_1, 0) + del combine_1 + + # pd_op.reshape: (2x-1xi32) <- (-1xi32, 2xi64) + reshape_1 = paddle._C_ops.reshape(gather_0, stack_1) + del gather_0, stack_1 + + # pd_op.full: (xf32) <- () + full_5 = paddle._C_ops.full( + [], float("0"), paddle.float32, paddle.framework._current_expected_place() + ) + + # pd_op.greater_than: (2x-1xb) <- (2x-1xf32, xf32) + greater_than_1 = paddle._C_ops.greater_than(sum_0, full_5) + del full_5, sum_0 + + # pd_op.full: (1xf32) <- () + full_6 = paddle._C_ops.full( + [1], float("10"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.full_like: (2x-1xi32) <- (2x-1xi32, 1xf32) + full_like_0 = paddle._C_ops.full_like( + reshape_1, full_6, paddle.int32, paddle.framework._current_expected_place() + ) + del full_6 + + # pd_op.where: (2x-1xi32) <- (2x-1xb, 2x-1xi32, 2x-1xi32) + where_1 = paddle._C_ops.where(greater_than_1, reshape_1, full_like_0) + del full_like_0, greater_than_1, reshape_1 + + # pd_op.full_int_array: (2xi64) <- () + full_int_array_4 = [-1, 4] + + # pd_op.reshape: (-1x4xf32) <- (2x-1x4xf32, 2xi64) + reshape_2 = paddle._C_ops.reshape(data_7, full_int_array_4) + del data_7, full_int_array_4 + + # pd_op.gather: (-1x4xf32) <- (-1x4xf32, -1xi64, 1xi32) + gather_1 = paddle._C_ops.gather(reshape_2, flatten_1, full_3) + del flatten_1, full_3, reshape_2 + + # pd_op.full: (xi64) <- () + full_7 = paddle._C_ops.full( + [], float("4"), paddle.int64, paddle.core.CPUPlace() + ) + + # builtin.combine: ([xi64, xi64, xi64]) <- (xi64, xi64, xi64) + combine_2 = [full_4, data_1, full_7] + del data_1, full_4, full_7 + + # pd_op.stack: (3xi64) <- ([xi64, xi64, xi64]) + stack_2 = paddle._C_ops.stack(combine_2, 0) + del combine_2 + + # pd_op.reshape: (2x-1x4xf32) <- (-1x4xf32, 3xi64) + reshape_0 = paddle._C_ops.reshape(gather_1, stack_2) + del gather_1, stack_2 + + # pd_op.full: (1xi32) <- () + full_8 = paddle._C_ops.full( + [1], float("11"), paddle.int32, paddle.core.CPUPlace() + ) + + # pd_op.one_hot: (2x-1x11xf32) <- (2x-1xi32, 1xi32) + one_hot_1 = paddle._C_ops.one_hot( + where_1 % paddle.cast(full_8, where_1.dtype), full_8 + ) + del full_8 + + # pd_op.full: (10xi64) <- () + full_9 = paddle._C_ops.full( + [10], float("0"), paddle.int64, paddle.framework._current_expected_place() + ) + + # pd_op.assign_value_: (10xi64) <- (10xi64) + assign_value__0 = paddle._C_ops.assign_value_( + full_9, + [10], + paddle.int64, + [ + float("0"), + float("1"), + float("2"), + float("3"), + float("4"), + float("5"), + float("6"), + float("7"), + float("8"), + float("9"), + ], + paddle.framework._current_expected_place(), + ) + del full_9 + + # pd_op.index_select: (2x-1x10xf32) <- (2x-1x11xf32, 10xi64) + index_select_0 = paddle._C_ops.index_select(one_hot_1, assign_value__0, -1) + del assign_value__0, one_hot_1 + + # pd_op.multiply: (2x-1x-1xf32) <- (2x-1x-1xf32, 2x-1x-1xf32) + multiply_3 = paddle._C_ops.multiply(data_8, where_0) + del data_8 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_5 = [-1] + + # pd_op.max: (2x-1x1xf32) <- (2x-1x-1xf32, 1xi64) + max_0 = paddle._C_ops.max(multiply_3, full_int_array_5, True) + + # pd_op.multiply: (2x-1x-1xf32) <- (2x-1x-1xf32, 2x-1x-1xf32) + multiply_4 = paddle._C_ops.multiply(data_3, where_0) + del data_3, where_0 + + # pd_op.max: (2x-1x1xf32) <- (2x-1x-1xf32, 1xi64) + max_1 = paddle._C_ops.max(multiply_4, full_int_array_5, True) + del multiply_4 + + # pd_op.full: (1xf32) <- () + full_10 = paddle._C_ops.full( + [1], float("1"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.scale: (2x-1x1xf32) <- (2x-1x1xf32, 1xf32) + scale_0 = paddle._C_ops.scale(max_0, full_10, float("1e-09"), True) + del full_10, max_0 + + # pd_op.divide: (2x-1x-1xf32) <- (2x-1x-1xf32, 2x-1x1xf32) + divide_0 = paddle._C_ops.divide(multiply_3, scale_0) + del multiply_3, scale_0 + + # pd_op.multiply: (2x-1x-1xf32) <- (2x-1x-1xf32, 2x-1x1xf32) + multiply_5 = paddle._C_ops.multiply(divide_0, max_1) + del divide_0, max_1 + + # pd_op.max: (2x-1xf32) <- (2x-1x-1xf32, 1xi64) + max_2 = paddle._C_ops.max(multiply_5, full_int_array_3, False) + del full_int_array_3, multiply_5 + + # pd_op.unsqueeze: (2x-1x1xf32) <- (2x-1xf32, 1xi64) + unsqueeze_1 = paddle._C_ops.unsqueeze(max_2, full_int_array_5) + del full_int_array_5, max_2 + + # pd_op.multiply: (2x-1x10xf32) <- (2x-1x10xf32, 2x-1x1xf32) + multiply_0 = paddle._C_ops.multiply(index_select_0, unsqueeze_1) + del index_select_0, unsqueeze_1, where_1 + + return reshape_0, multiply_0 diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-S/subgraph_7/weight_meta.py b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-S/subgraph_7/weight_meta.py new file mode 100644 index 000000000..8b1378917 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-S/subgraph_7/weight_meta.py @@ -0,0 +1 @@ + diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-S/subgraph_8/graph_hash.txt b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-S/subgraph_8/graph_hash.txt new file mode 100644 index 000000000..e4d87c73f --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-S/subgraph_8/graph_hash.txt @@ -0,0 +1 @@ +3c00943b4a99c0dabb02d091e9638b26cae706e7081d61ba73c72e9e733042bf \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-S/subgraph_8/graph_net.json b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-S/subgraph_8/graph_net.json new file mode 100644 index 000000000..6ce3cf9a5 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-S/subgraph_8/graph_net.json @@ -0,0 +1,6 @@ +{ + "framework": "paddle", + "model_name": "PP-YOLOE_plus_SOD-S", + "num_devices_required": 1, + "num_nodes_required": 1 +} \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-S/subgraph_8/input_meta.py b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-S/subgraph_8/input_meta.py new file mode 100644 index 000000000..fbc129bcf --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-S/subgraph_8/input_meta.py @@ -0,0 +1,71 @@ +class Program_weight_tensor_data_0: + name = "data_0" + shape = [2, 9261, 10] + dtype = "float32" + min_val = float("2.43778e-11") + max_val = float("0.940314") + mean = float("0.00863495") + std = float("0.0285493") + data = None + + +class Program_weight_tensor_data_1: + name = "data_1" + shape = [2, 9261, 4] + dtype = "float32" + min_val = float("-98.1602") + max_val = float("799.63") + mean = float("336.038") + std = float("195.803") + data = None + + +class Program_weight_tensor_data_2: + name = "data_2" + shape = [9261, 2] + dtype = "float32" + min_val = float("4.0") + max_val = float("668.0") + mean = float("336.0") + std = float("193.958") + data = None + + +class Program_weight_tensor_data_3: + name = "data_3" + shape = [9261, 1] + dtype = "float32" + min_val = float("8.0") + max_val = float("32.0") + mean = float("10.6667") + std = float("5.70157") + data = None + + +class Program_weight_tensor_data_4: + name = "data_4" + shape = [2, 38, 1] + dtype = "int32" + min_val = 0 + max_val = 9 + data = None + + +class Program_weight_tensor_data_5: + name = "data_5" + shape = [2, 38, 4] + dtype = "float32" + max_val = float("654.24") + mean = float("257.813") + std = float("167.378") + data = None + + +class Program_weight_tensor_data_6: + name = "data_6" + shape = [2, 38, 1] + dtype = "float32" + max_val = float("1.0") + mean = float("0.986842") + std = float("0.113951") + data = None diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-S/subgraph_8/model.py b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-S/subgraph_8/model.py new file mode 100644 index 000000000..f4e1a03e0 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-S/subgraph_8/model.py @@ -0,0 +1,504 @@ +import paddle + + +class GraphModule(paddle.nn.Layer): + def __init__(self): + super().__init__() + + def forward(self, data_0, data_1, data_2, data_3, data_4, data_5, data_6): + # pd_op.full_int_array: (1xi64) <- () + full_int_array_0 = [2] + + # pd_op.unsqueeze: (2x38x1x4xf32) <- (2x38x4xf32, 1xi64) + unsqueeze_0 = paddle._C_ops.unsqueeze(data_5, full_int_array_0) + del data_5 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_1 = [1] + + # pd_op.unsqueeze: (2x1x9261x4xf32) <- (2x9261x4xf32, 1xi64) + unsqueeze_1 = paddle._C_ops.unsqueeze(data_1, full_int_array_1) + del data_1, full_int_array_1 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_2 = [0] + + # pd_op.slice: (2x38x1x2xf32) <- (2x38x1x4xf32, 1xi64, 1xi64) + slice_0 = paddle._C_ops.slice( + unsqueeze_0, [3], full_int_array_2, full_int_array_0, [1], [] + ) + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_3 = [2147483647] + + # pd_op.slice: (2x38x1x2xf32) <- (2x38x1x4xf32, 1xi64, 1xi64) + slice_1 = paddle._C_ops.slice( + unsqueeze_0, [3], full_int_array_0, full_int_array_3, [1], [] + ) + + # pd_op.slice: (2x1x9261x2xf32) <- (2x1x9261x4xf32, 1xi64, 1xi64) + slice_2 = paddle._C_ops.slice( + unsqueeze_1, [3], full_int_array_2, full_int_array_0, [1], [] + ) + del full_int_array_2 + + # pd_op.slice: (2x1x9261x2xf32) <- (2x1x9261x4xf32, 1xi64, 1xi64) + slice_3 = paddle._C_ops.slice( + unsqueeze_1, [3], full_int_array_0, full_int_array_3, [1], [] + ) + del full_int_array_0, full_int_array_3, unsqueeze_1 + + # pd_op.maximum: (2x38x9261x2xf32) <- (2x38x1x2xf32, 2x1x9261x2xf32) + maximum_0 = paddle._C_ops.maximum(slice_0, slice_2) + + # pd_op.minimum: (2x38x9261x2xf32) <- (2x38x1x2xf32, 2x1x9261x2xf32) + minimum_0 = paddle._C_ops.minimum(slice_1, slice_3) + + # pd_op.subtract: (2x38x9261x2xf32) <- (2x38x9261x2xf32, 2x38x9261x2xf32) + subtract_0 = paddle._C_ops.subtract(minimum_0, maximum_0) + del maximum_0, minimum_0 + + # pd_op.full: (1xf32) <- () + full_0 = paddle._C_ops.full( + [1], float("0"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.full: (1xf32) <- () + full_1 = paddle._C_ops.full( + [1], float("3.40282e+38"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.clip: (2x38x9261x2xf32) <- (2x38x9261x2xf32, 1xf32, 1xf32) + clip_0 = paddle._C_ops.clip(subtract_0, full_0, full_1) + del subtract_0 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_4 = [-1] + + # pd_op.prod: (2x38x9261xf32) <- (2x38x9261x2xf32, 1xi64) + prod_0 = paddle._C_ops.prod(clip_0, full_int_array_4, False, False) + del clip_0 + + # pd_op.subtract: (2x38x1x2xf32) <- (2x38x1x2xf32, 2x38x1x2xf32) + subtract_1 = paddle._C_ops.subtract(slice_1, slice_0) + del slice_0, slice_1 + + # pd_op.clip: (2x38x1x2xf32) <- (2x38x1x2xf32, 1xf32, 1xf32) + clip_1 = paddle._C_ops.clip(subtract_1, full_0, full_1) + del subtract_1 + + # pd_op.prod: (2x38x1xf32) <- (2x38x1x2xf32, 1xi64) + prod_1 = paddle._C_ops.prod(clip_1, full_int_array_4, False, False) + del clip_1 + + # pd_op.subtract: (2x1x9261x2xf32) <- (2x1x9261x2xf32, 2x1x9261x2xf32) + subtract_2 = paddle._C_ops.subtract(slice_3, slice_2) + del slice_2, slice_3 + + # pd_op.clip: (2x1x9261x2xf32) <- (2x1x9261x2xf32, 1xf32, 1xf32) + clip_2 = paddle._C_ops.clip(subtract_2, full_0, full_1) + del full_1, subtract_2 + + # pd_op.prod: (2x1x9261xf32) <- (2x1x9261x2xf32, 1xi64) + prod_2 = paddle._C_ops.prod(clip_2, full_int_array_4, False, False) + del clip_2 + + # pd_op.add: (2x38x9261xf32) <- (2x38x1xf32, 2x1x9261xf32) + add_0 = paddle._C_ops.add(prod_1, prod_2) + del prod_1, prod_2 + + # pd_op.subtract: (2x38x9261xf32) <- (2x38x9261xf32, 2x38x9261xf32) + subtract_3 = paddle._C_ops.subtract(add_0, prod_0) + del add_0 + + # pd_op.full: (1xf32) <- () + full_2 = paddle._C_ops.full( + [1], float("1"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.scale: (2x38x9261xf32) <- (2x38x9261xf32, 1xf32) + scale_0 = paddle._C_ops.scale(subtract_3, full_2, float("1e-09"), True) + del subtract_3 + + # pd_op.divide: (2x38x9261xf32) <- (2x38x9261xf32, 2x38x9261xf32) + divide_0 = paddle._C_ops.divide(prod_0, scale_0) + del prod_0, scale_0 + + # pd_op.transpose: (2x10x9261xf32) <- (2x9261x10xf32) + transpose_0 = paddle._C_ops.transpose(data_0, [0, 2, 1]) + del data_0 + + # pd_op.full: (1xf64) <- () + full_3 = paddle._C_ops.full( + [1], float("0"), paddle.float64, paddle.core.CPUPlace() + ) + + # pd_op.full: (1xf64) <- () + full_4 = paddle._C_ops.full( + [1], float("2"), paddle.float64, paddle.core.CPUPlace() + ) + + # pd_op.full: (1xf64) <- () + full_5 = paddle._C_ops.full( + [1], float("1"), paddle.float64, paddle.core.CPUPlace() + ) + + # pd_op.arange: (2xi32) <- (1xf64, 1xf64, 1xf64) + arange_0 = paddle.arange(full_3, full_4, full_5, dtype="int32") + del full_3, full_4, full_5 + + # pd_op.unsqueeze: (2x1xi32) <- (2xi32, 1xi64) + unsqueeze_2 = paddle._C_ops.unsqueeze(arange_0, full_int_array_4) + del arange_0 + + # pd_op.full_int_array: (2xi64) <- () + full_int_array_5 = [1, 38] + + # pd_op.tile: (2x38xi32) <- (2x1xi32, 2xi64) + tile_0 = paddle._C_ops.tile(unsqueeze_2, full_int_array_5) + del full_int_array_5 + + # pd_op.squeeze: (2x38xi32) <- (2x38x1xi32, 1xi64) + squeeze_0 = paddle._C_ops.squeeze(data_4, full_int_array_4) + del data_4 + + # builtin.combine: ([2x38xi32, 2x38xi32]) <- (2x38xi32, 2x38xi32) + combine_0 = [tile_0, squeeze_0] + del squeeze_0, tile_0 + + # pd_op.stack: (2x38x2xi32) <- ([2x38xi32, 2x38xi32]) + stack_0 = paddle._C_ops.stack(combine_0, -1) + del combine_0 + + # pd_op.gather_nd: (2x38x9261xf32) <- (2x10x9261xf32, 2x38x2xi32) + gather_nd_0 = paddle._C_ops.gather_nd(transpose_0, stack_0) + del stack_0, transpose_0 + + # pd_op.pow: (2x38x9261xf32) <- (2x38x9261xf32) + pow_0 = paddle._C_ops.pow(gather_nd_0, float("1")) + del gather_nd_0 + + # pd_op.pow: (2x38x9261xf32) <- (2x38x9261xf32) + pow_1 = paddle._C_ops.pow(divide_0, float("6")) + + # pd_op.multiply: (2x38x9261xf32) <- (2x38x9261xf32, 2x38x9261xf32) + multiply_0 = paddle._C_ops.multiply(pow_0, pow_1) + del pow_0, pow_1 + + # pd_op.multiply: (2x38x9261xf32) <- (2x38x9261xf32, 2x38x1xf32) + multiply_1 = paddle._C_ops.multiply(multiply_0, data_6) + del multiply_0 + + # pd_op.scale: (9261x1xf32) <- (9261x1xf32, 1xf32) + scale_1 = paddle._C_ops.scale(data_3, full_2, float("0"), True) + del data_3, full_2 + + # pd_op.full_int_array: (2xi64) <- () + full_int_array_6 = [0, 1] + + # pd_op.unsqueeze: (1x1x9261x2xf32) <- (9261x2xf32, 2xi64) + unsqueeze_3 = paddle._C_ops.unsqueeze(data_2, full_int_array_6) + del data_2 + + # pd_op.full: (1xi32) <- () + full_6 = paddle._C_ops.full( + [1], float("3"), paddle.int32, paddle.core.CPUPlace() + ) + + # pd_op.split_with_num: ([1x1x9261x1xf32, 1x1x9261x1xf32]) <- (1x1x9261x2xf32, 1xi32) + split_with_num_0 = paddle._C_ops.split_with_num(unsqueeze_3, 2, full_6) + del unsqueeze_3 + + # builtin.split: (1x1x9261x1xf32, 1x1x9261x1xf32) <- ([1x1x9261x1xf32, 1x1x9261x1xf32]) + ( + split_0, + split_1, + ) = split_with_num_0 + del split_with_num_0 + + # pd_op.split_with_num: ([2x38x1x1xf32, 2x38x1x1xf32, 2x38x1x1xf32, 2x38x1x1xf32]) <- (2x38x1x4xf32, 1xi32) + split_with_num_1 = paddle._C_ops.split_with_num(unsqueeze_0, 4, full_6) + del full_6, unsqueeze_0 + + # builtin.split: (2x38x1x1xf32, 2x38x1x1xf32, 2x38x1x1xf32, 2x38x1x1xf32) <- ([2x38x1x1xf32, 2x38x1x1xf32, 2x38x1x1xf32, 2x38x1x1xf32]) + ( + split_2, + split_3, + split_4, + split_5, + ) = split_with_num_1 + del split_with_num_1 + + # pd_op.subtract: (2x38x9261x1xf32) <- (1x1x9261x1xf32, 2x38x1x1xf32) + subtract_4 = paddle._C_ops.subtract(split_0, split_2) + + # pd_op.subtract: (2x38x9261x1xf32) <- (1x1x9261x1xf32, 2x38x1x1xf32) + subtract_5 = paddle._C_ops.subtract(split_1, split_3) + + # pd_op.subtract: (2x38x9261x1xf32) <- (2x38x1x1xf32, 1x1x9261x1xf32) + subtract_6 = paddle._C_ops.subtract(split_4, split_0) + + # pd_op.subtract: (2x38x9261x1xf32) <- (2x38x1x1xf32, 1x1x9261x1xf32) + subtract_7 = paddle._C_ops.subtract(split_5, split_1) + + # pd_op.full: (1xi32) <- () + full_7 = paddle._C_ops.full( + [1], float("-1"), paddle.int32, paddle.core.CPUPlace() + ) + + # builtin.combine: ([2x38x9261x1xf32, 2x38x9261x1xf32, 2x38x9261x1xf32, 2x38x9261x1xf32]) <- (2x38x9261x1xf32, 2x38x9261x1xf32, 2x38x9261x1xf32, 2x38x9261x1xf32) + combine_1 = [subtract_4, subtract_5, subtract_6, subtract_7] + del subtract_4, subtract_5, subtract_6, subtract_7 + + # pd_op.concat: (2x38x9261x4xf32) <- ([2x38x9261x1xf32, 2x38x9261x1xf32, 2x38x9261x1xf32, 2x38x9261x1xf32], 1xi32) + concat_0 = paddle._C_ops.concat(combine_1, full_7) + del combine_1 + + # pd_op.min: (2x38x9261xf32) <- (2x38x9261x4xf32, 1xi64) + min_0 = paddle._C_ops.min(concat_0, full_int_array_4, False) + del concat_0 + + # pd_op.full: (xf32) <- () + full_8 = paddle._C_ops.full( + [], + float("1e-09"), + paddle.float32, + paddle.framework._current_expected_place(), + ) + + # pd_op.greater_than: (2x38x9261xb) <- (2x38x9261xf32, xf32) + greater_than_1 = paddle._C_ops.greater_than(min_0, full_8) + del min_0 + + # pd_op.unsqueeze: (1x1x9261x1xf32) <- (9261x1xf32, 2xi64) + unsqueeze_4 = paddle._C_ops.unsqueeze(scale_1, full_int_array_6) + del full_int_array_6, scale_1 + + # pd_op.add: (2x38x1x1xf32) <- (2x38x1x1xf32, 2x38x1x1xf32) + add_1 = paddle._C_ops.add(split_2, split_4) + del split_2, split_4 + + # pd_op.full: (1xf32) <- () + full_9 = paddle._C_ops.full( + [1], float("0.5"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.scale: (2x38x1x1xf32) <- (2x38x1x1xf32, 1xf32) + scale_2 = paddle._C_ops.scale(add_1, full_9, float("0"), True) + del add_1 + + # pd_op.add: (2x38x1x1xf32) <- (2x38x1x1xf32, 2x38x1x1xf32) + add_2 = paddle._C_ops.add(split_3, split_5) + del split_3, split_5 + + # pd_op.scale: (2x38x1x1xf32) <- (2x38x1x1xf32, 1xf32) + scale_3 = paddle._C_ops.scale(add_2, full_9, float("0"), True) + del add_2, full_9 + + # pd_op.subtract: (2x38x9261x1xf32) <- (2x38x1x1xf32, 1x1x9261x1xf32) + subtract_8 = paddle._C_ops.subtract(scale_2, unsqueeze_4) + + # pd_op.subtract: (2x38x9261x1xf32) <- (1x1x9261x1xf32, 2x38x9261x1xf32) + subtract_9 = paddle._C_ops.subtract(split_0, subtract_8) + del subtract_8 + + # pd_op.subtract: (2x38x9261x1xf32) <- (2x38x1x1xf32, 1x1x9261x1xf32) + subtract_10 = paddle._C_ops.subtract(scale_3, unsqueeze_4) + + # pd_op.subtract: (2x38x9261x1xf32) <- (1x1x9261x1xf32, 2x38x9261x1xf32) + subtract_11 = paddle._C_ops.subtract(split_1, subtract_10) + del subtract_10 + + # pd_op.add: (2x38x9261x1xf32) <- (2x38x1x1xf32, 1x1x9261x1xf32) + add_3 = paddle._C_ops.add(scale_2, unsqueeze_4) + del scale_2 + + # pd_op.subtract: (2x38x9261x1xf32) <- (2x38x9261x1xf32, 1x1x9261x1xf32) + subtract_12 = paddle._C_ops.subtract(add_3, split_0) + del add_3, split_0 + + # pd_op.add: (2x38x9261x1xf32) <- (2x38x1x1xf32, 1x1x9261x1xf32) + add_4 = paddle._C_ops.add(scale_3, unsqueeze_4) + del scale_3, unsqueeze_4 + + # pd_op.subtract: (2x38x9261x1xf32) <- (2x38x9261x1xf32, 1x1x9261x1xf32) + subtract_13 = paddle._C_ops.subtract(add_4, split_1) + del add_4, split_1 + + # builtin.combine: ([2x38x9261x1xf32, 2x38x9261x1xf32, 2x38x9261x1xf32, 2x38x9261x1xf32]) <- (2x38x9261x1xf32, 2x38x9261x1xf32, 2x38x9261x1xf32, 2x38x9261x1xf32) + combine_2 = [subtract_9, subtract_11, subtract_12, subtract_13] + del subtract_11, subtract_12, subtract_13, subtract_9 + + # pd_op.concat: (2x38x9261x4xf32) <- ([2x38x9261x1xf32, 2x38x9261x1xf32, 2x38x9261x1xf32, 2x38x9261x1xf32], 1xi32) + concat_1 = paddle._C_ops.concat(combine_2, full_7) + del combine_2, full_7 + + # pd_op.min: (2x38x9261xf32) <- (2x38x9261x4xf32, 1xi64) + min_1 = paddle._C_ops.min(concat_1, full_int_array_4, False) + del concat_1 + + # pd_op.greater_than: (2x38x9261xb) <- (2x38x9261xf32, xf32) + greater_than_2 = paddle._C_ops.greater_than(min_1, full_8) + del full_8, min_1 + + # pd_op.cast: (2x38x9261xf32) <- (2x38x9261xb) + cast_0 = paddle._C_ops.cast(greater_than_1, paddle.float32) + del greater_than_1 + + # pd_op.cast: (2x38x9261xf32) <- (2x38x9261xb) + cast_1 = paddle._C_ops.cast(greater_than_2, paddle.float32) + del greater_than_2 + + # pd_op.multiply: (2x38x9261xf32) <- (2x38x9261xf32, 2x38x1xf32) + multiply_2 = paddle._C_ops.multiply(cast_0, data_6) + del cast_0 + + # pd_op.multiply: (2x38x9261xf32) <- (2x38x9261xf32, 2x38x1xf32) + multiply_3 = paddle._C_ops.multiply(cast_1, data_6) + del cast_1 + + # pd_op.sum: (2x38x1xf32) <- (2x38x9261xf32, 1xi64) + sum_0 = paddle._C_ops.sum(multiply_2, full_int_array_4, None, True) + del full_int_array_4 + + # pd_op.full: (xf32) <- () + full_10 = paddle._C_ops.full( + [], float("0"), paddle.float32, paddle.framework._current_expected_place() + ) + + # pd_op.equal: (2x38x1xb) <- (2x38x1xf32, xf32) + equal_0 = paddle._C_ops.equal(sum_0, full_10) + del sum_0 + + # pd_op.add: (2x38x9261xf32) <- (2x38x9261xf32, 2x38x9261xf32) + add_5 = paddle._C_ops.add(multiply_1, multiply_3) + + # pd_op.full_like: (2x38x9261xf32) <- (2x38x9261xf32, 1xf32) + full_like_0 = paddle._C_ops.full_like( + add_5, full_0, paddle.float32, paddle.framework._current_expected_place() + ) + + # pd_op.full_like: (2x38x9261xf32) <- (2x38x9261xf32, 1xf32) + full_like_1 = paddle._C_ops.full_like( + multiply_1, + full_0, + paddle.float32, + paddle.framework._current_expected_place(), + ) + + # pd_op.full_like: (2x38x1xb) <- (2x38x1xb, 1xf32) + full_like_2 = paddle._C_ops.full_like( + equal_0, full_0, paddle.bool, paddle.framework._current_expected_place() + ) + del full_0 + + # pd_op.cast: (2x38x1xf32) <- (2x38x1xb) + cast_2 = paddle._C_ops.cast(full_like_2, paddle.float32) + del full_like_2 + + # pd_op.cast: (2x38x1xf32) <- (2x38x1xb) + cast_3 = paddle._C_ops.cast(equal_0, paddle.float32) + del equal_0 + + # pd_op.add: (2x38x9261xf32) <- (2x38x9261xf32, 2x38x9261xf32) + add_6 = paddle._C_ops.add(full_like_0, full_like_1) + del full_like_0, full_like_1 + + # pd_op.add: (2x38x9261xf32) <- (2x38x9261xf32, 2x38x1xf32) + add_7 = paddle._C_ops.add(add_6, cast_2) + del add_6, cast_2 + + # pd_op.add: (2x38x9261xf32) <- (2x38x9261xf32, 2x38x9261xf32) + add_8 = paddle._C_ops.add(add_5, add_7) + del add_5 + + # pd_op.add: (2x38x9261xf32) <- (2x38x9261xf32, 2x38x9261xf32) + add_9 = paddle._C_ops.add(multiply_1, add_7) + + # pd_op.add: (2x38x9261xf32) <- (2x38x1xf32, 2x38x9261xf32) + add_10 = paddle._C_ops.add(cast_3, add_7) + del add_7, cast_3 + + # pd_op.cast: (2x38x9261xb) <- (2x38x9261xf32) + cast_4 = paddle._C_ops.cast(add_10, paddle.bool) + del add_10 + + # pd_op.where: (2x38x9261xf32) <- (2x38x9261xb, 2x38x9261xf32, 2x38x9261xf32) + where_0 = paddle._C_ops.where(cast_4, add_8, add_9) + del add_8, add_9, cast_4 + + # pd_op.full: (1xi32) <- () + full_11 = paddle._C_ops.full( + [1], float("13"), paddle.int32, paddle.core.CPUPlace() + ) + + # pd_op.topk: (2x38x13xf32, 2x38x13xi64) <- (2x38x9261xf32, 1xi32) + topk_0, topk_1 = (lambda x, f: f(x))( + paddle._C_ops.topk(where_0, full_11, -1, True, True), + lambda out: out if isinstance(out, (list, tuple)) else (out, None), + ) + del full_11, where_0 + + # pd_op.full: (1xi32) <- () + full_12 = paddle._C_ops.full( + [1], float("9261"), paddle.int32, paddle.core.CPUPlace() + ) + + # pd_op.one_hot: (2x38x13x9261xf32) <- (2x38x13xi64, 1xi32) + one_hot_0 = paddle._C_ops.one_hot( + topk_1 % paddle.cast(full_12, topk_1.dtype), full_12 + ) + del full_12, topk_1 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_7 = [-2] + + # pd_op.sum: (2x38x9261xf32) <- (2x38x13x9261xf32, 1xi64) + sum_1 = paddle._C_ops.sum(one_hot_0, full_int_array_7, None, False) + del one_hot_0 + + # pd_op.multiply: (2x38x9261xf32) <- (2x38x9261xf32, 2x38x1xf32) + multiply_4 = paddle._C_ops.multiply(sum_1, data_6) + del data_6, sum_1 + + # pd_op.greater_than: (2x38x9261xb) <- (2x38x9261xf32, xf32) + greater_than_3 = paddle._C_ops.greater_than(multiply_3, full_10) + del multiply_3 + + # pd_op.greater_than: (2x38x9261xb) <- (2x38x9261xf32, xf32) + greater_than_4 = paddle._C_ops.greater_than(multiply_2, full_10) + del full_10, multiply_2 + + # pd_op.bitwise_or: (2x38x9261xb) <- (2x38x9261xb, 2x38x9261xb) + bitwise_or_0 = paddle._C_ops.bitwise_or(greater_than_3, greater_than_4) + del greater_than_3, greater_than_4 + + # pd_op.cast: (2x38x9261xf32) <- (2x38x9261xb) + cast_5 = paddle._C_ops.cast(bitwise_or_0, paddle.float32) + del bitwise_or_0 + + # pd_op.multiply: (2x38x9261xf32) <- (2x38x9261xf32, 2x38x9261xf32) + multiply_5 = paddle._C_ops.multiply(multiply_4, cast_5) + del cast_5, multiply_4 + + # pd_op.sum: (2x9261xf32) <- (2x38x9261xf32, 1xi64) + sum_2 = paddle._C_ops.sum(multiply_5, full_int_array_7, None, False) + del full_int_array_7 + + # pd_op.full_int_array: (0xi64) <- () + full_int_array_8 = [] + + # pd_op.max: (xf32) <- (2x9261xf32, 0xi64) + max_0 = paddle._C_ops.max(sum_2, full_int_array_8, False) + del full_int_array_8 + + # pd_op.full: (xf32) <- () + full_13 = paddle._C_ops.full( + [], float("1"), paddle.float32, paddle.framework._current_expected_place() + ) + + # pd_op.greater_than: (xb) <- (xf32, xf32) + greater_than_0 = paddle._C_ops.greater_than(max_0, full_13) + del divide_0, full_13, max_0, multiply_1, multiply_5, sum_2, unsqueeze_2 + + return greater_than_0 diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-S/subgraph_8/weight_meta.py b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-S/subgraph_8/weight_meta.py new file mode 100644 index 000000000..8b1378917 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-S/subgraph_8/weight_meta.py @@ -0,0 +1 @@ + diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-S/subgraph_9/graph_hash.txt b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-S/subgraph_9/graph_hash.txt new file mode 100644 index 000000000..0e41bf3ac --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-S/subgraph_9/graph_hash.txt @@ -0,0 +1 @@ +56b33ce8013681e5774a98f1aef660acf3db6206cf15004487eb3e77248d98b0 \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-S/subgraph_9/graph_net.json b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-S/subgraph_9/graph_net.json new file mode 100644 index 000000000..6ce3cf9a5 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-S/subgraph_9/graph_net.json @@ -0,0 +1,6 @@ +{ + "framework": "paddle", + "model_name": "PP-YOLOE_plus_SOD-S", + "num_devices_required": 1, + "num_nodes_required": 1 +} \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-S/subgraph_9/input_meta.py b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-S/subgraph_9/input_meta.py new file mode 100644 index 000000000..92edd8821 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-S/subgraph_9/input_meta.py @@ -0,0 +1,7 @@ +class Program_weight_tensor_data_0: + name = "data_0" + shape = [2, 4725] + dtype = "int32" + min_val = 0 + max_val = 10 + data = None diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-S/subgraph_9/model.py b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-S/subgraph_9/model.py new file mode 100644 index 000000000..14b178ed9 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-S/subgraph_9/model.py @@ -0,0 +1,34 @@ +import paddle + + +class GraphModule(paddle.nn.Layer): + def __init__(self): + super().__init__() + + def forward(self, data_0): + # pd_op.full: (xi32) <- () + full_0 = paddle._C_ops.full( + [], float("10"), paddle.int32, paddle.framework._current_expected_place() + ) + + # pd_op.not_equal: (2x-1xb) <- (2x-1xi32, xi32) + not_equal_0 = paddle._C_ops.not_equal(data_0, full_0) + del data_0, full_0 + + # pd_op.full_int_array: (0xi64) <- () + full_int_array_0 = [] + + # pd_op.sum: (xi64) <- (2x-1xb, 0xi64) + sum_0 = paddle._C_ops.sum(not_equal_0, full_int_array_0, None, False) + del full_int_array_0 + + # pd_op.full: (xi64) <- () + full_1 = paddle._C_ops.full( + [], float("0"), paddle.int64, paddle.framework._current_expected_place() + ) + + # pd_op.greater_than: (xb) <- (xi64, xi64) + greater_than_0 = paddle._C_ops.greater_than(sum_0, full_1) + del full_1, not_equal_0, sum_0 + + return greater_than_0 diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-S/subgraph_9/weight_meta.py b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-S/subgraph_9/weight_meta.py new file mode 100644 index 000000000..8b1378917 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-S/subgraph_9/weight_meta.py @@ -0,0 +1 @@ + diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_0/graph_hash.txt b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_0/graph_hash.txt new file mode 100644 index 000000000..b08da1263 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_0/graph_hash.txt @@ -0,0 +1 @@ +4d26d0110d82b3e8a1ea0c4059adeb25427870d4527791748b1197dfa32e25ef \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_0/graph_net.json b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_0/graph_net.json new file mode 100644 index 000000000..381598f86 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_0/graph_net.json @@ -0,0 +1,6 @@ +{ + "framework": "paddle", + "model_name": "PP-YOLOE_plus_SOD-largesize-L", + "num_devices_required": 1, + "num_nodes_required": 1 +} \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_0/input_meta.py b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_0/input_meta.py new file mode 100644 index 000000000..bf83235e8 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_0/input_meta.py @@ -0,0 +1,19 @@ +class Program_weight_tensor_data_0: + name = "data_0" + shape = [] + dtype = "float32" + data = [0.136913] + + +class Program_weight_tensor_data_1: + name = "data_1" + shape = [] + dtype = "float32" + data = [0.574719] + + +class Program_weight_tensor_data_2: + name = "data_2" + shape = [] + dtype = "float32" + data = [0.566784] diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_0/model.py b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_0/model.py new file mode 100644 index 000000000..4cccb2b8e --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_0/model.py @@ -0,0 +1,43 @@ +import paddle + + +class GraphModule(paddle.nn.Layer): + def __init__(self): + super().__init__() + + def forward(self, data_0, data_1, data_2): + # pd_op.full: (1xf32) <- () + full_0 = paddle._C_ops.full( + [1], float("1"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.scale: (xf32) <- (xf32, 1xf32) + scale_0 = paddle._C_ops.scale(data_2, full_0, float("0"), True) + del data_2 + + # pd_op.full: (1xf32) <- () + full_1 = paddle._C_ops.full( + [1], float("2.5"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.scale: (xf32) <- (xf32, 1xf32) + scale_1 = paddle._C_ops.scale(data_0, full_1, float("0"), True) + del data_0 + + # pd_op.add: (xf32) <- (xf32, xf32) + add_1 = paddle._C_ops.add(scale_0, scale_1) + + # pd_op.full: (1xf32) <- () + full_2 = paddle._C_ops.full( + [1], float("0.5"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.scale: (xf32) <- (xf32, 1xf32) + scale_2 = paddle._C_ops.scale(data_1, full_2, float("0"), True) + del data_1 + + # pd_op.add: (xf32) <- (xf32, xf32) + add_0 = paddle._C_ops.add(add_1, scale_2) + del add_1, full_0, full_1, full_2, scale_0, scale_1, scale_2 + + return add_0 diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_0/weight_meta.py b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_0/weight_meta.py new file mode 100644 index 000000000..8b1378917 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_0/weight_meta.py @@ -0,0 +1 @@ + diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_1/graph_hash.txt b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_1/graph_hash.txt new file mode 100644 index 000000000..a90272c96 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_1/graph_hash.txt @@ -0,0 +1 @@ +32cd2923bc0a61a7c5f1bc48378db5c4de25399c2f9388132502f296c7a71760 \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_1/graph_net.json b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_1/graph_net.json new file mode 100644 index 000000000..381598f86 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_1/graph_net.json @@ -0,0 +1,6 @@ +{ + "framework": "paddle", + "model_name": "PP-YOLOE_plus_SOD-largesize-L", + "num_devices_required": 1, + "num_nodes_required": 1 +} \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_1/input_meta.py b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_1/input_meta.py new file mode 100644 index 000000000..0f83461ca --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_1/input_meta.py @@ -0,0 +1,73 @@ +class Program_weight_tensor_data_0: + name = "data_0" + shape = [] + dtype = "int64" + data = [48] + + +class Program_weight_tensor_data_1: + name = "data_1" + shape = [] + dtype = "int64" + data = [48] + + +class Program_weight_tensor_data_2: + name = "data_2" + shape = [] + dtype = "int64" + data = [96] + + +class Program_weight_tensor_data_3: + name = "data_3" + shape = [] + dtype = "int64" + data = [96] + + +class Program_weight_tensor_data_4: + name = "data_4" + shape = [] + dtype = "int64" + data = [192] + + +class Program_weight_tensor_data_5: + name = "data_5" + shape = [] + dtype = "int64" + data = [192] + + +class Program_weight_tensor_data_6: + name = "data_6" + shape = [1, 768, 48, 48] + dtype = "float32" + min_val = float("-0.278465") + max_val = float("6.21279") + mean = float("0.263701") + std = float("0.615333") + data = None + + +class Program_weight_tensor_data_7: + name = "data_7" + shape = [1, 384, 96, 96] + dtype = "float32" + min_val = float("-0.278465") + max_val = float("9.39853") + mean = float("0.366505") + std = float("0.697682") + data = None + + +class Program_weight_tensor_data_8: + name = "data_8" + shape = [1, 192, 192, 192] + dtype = "float32" + min_val = float("-0.278465") + max_val = float("13.9944") + mean = float("0.442546") + std = float("0.692429") + data = None diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_1/model.py b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_1/model.py new file mode 100644 index 000000000..254a76346 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_1/model.py @@ -0,0 +1,1144 @@ +import paddle + + +class GraphModule(paddle.nn.Layer): + def __init__(self): + super().__init__() + + def forward( + self, + parameter_0, + parameter_1, + parameter_2, + parameter_3, + parameter_4, + parameter_5, + parameter_6, + parameter_7, + parameter_8, + parameter_9, + parameter_10, + parameter_11, + parameter_12, + parameter_13, + parameter_14, + parameter_15, + parameter_16, + parameter_17, + parameter_18, + parameter_19, + parameter_20, + parameter_21, + parameter_22, + parameter_23, + parameter_24, + parameter_25, + parameter_26, + parameter_27, + parameter_28, + parameter_29, + parameter_30, + parameter_31, + parameter_32, + parameter_33, + parameter_34, + parameter_35, + parameter_36, + parameter_37, + parameter_38, + parameter_39, + parameter_40, + parameter_41, + parameter_42, + parameter_43, + parameter_44, + parameter_45, + parameter_46, + parameter_47, + parameter_48, + parameter_49, + parameter_50, + parameter_51, + parameter_52, + parameter_53, + data_0, + data_1, + data_2, + data_3, + data_4, + data_5, + data_6, + data_7, + data_8, + ): + # pd_op.full: (1xi64) <- () + full_0 = paddle._C_ops.full( + [1], float("0"), paddle.int64, paddle.core.CPUPlace() + ) + + # pd_op.full: (1xi64) <- () + full_1 = paddle._C_ops.full( + [1], float("1"), paddle.int64, paddle.core.CPUPlace() + ) + + # pd_op.arange: (-1xi64) <- (1xi64, xi64, 1xi64) + arange_0 = paddle.arange(full_0, data_1, full_1, dtype="int64") + del data_1 + + # pd_op.cast: (-1xf32) <- (-1xi64) + cast_0 = paddle._C_ops.cast(arange_0, paddle.float32) + del arange_0 + + # pd_op.full: (1xf32) <- () + full_2 = paddle._C_ops.full( + [1], float("1"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.scale: (-1xf32) <- (-1xf32, 1xf32) + scale_0 = paddle._C_ops.scale(cast_0, full_2, float("0.5"), True) + del cast_0 + + # pd_op.full: (1xf32) <- () + full_3 = paddle._C_ops.full( + [1], float("32"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.scale: (-1xf32) <- (-1xf32, 1xf32) + scale_1 = paddle._C_ops.scale(scale_0, full_3, float("0"), True) + del scale_0 + + # pd_op.arange: (-1xi64) <- (1xi64, xi64, 1xi64) + arange_1 = paddle.arange(full_0, data_0, full_1, dtype="int64") + del data_0 + + # pd_op.cast: (-1xf32) <- (-1xi64) + cast_1 = paddle._C_ops.cast(arange_1, paddle.float32) + del arange_1 + + # pd_op.scale: (-1xf32) <- (-1xf32, 1xf32) + scale_2 = paddle._C_ops.scale(cast_1, full_2, float("0.5"), True) + del cast_1 + + # pd_op.scale: (-1xf32) <- (-1xf32, 1xf32) + scale_3 = paddle._C_ops.scale(scale_2, full_3, float("0"), True) + del scale_2 + + # builtin.combine: ([-1xf32, -1xf32]) <- (-1xf32, -1xf32) + combine_0 = [scale_3, scale_1] + del scale_1, scale_3 + + # pd_op.meshgrid: ([-1x-1xf32, -1x-1xf32]) <- ([-1xf32, -1xf32]) + meshgrid_0 = paddle._C_ops.meshgrid(combine_0) + del combine_0 + + # builtin.split: (-1x-1xf32, -1x-1xf32) <- ([-1x-1xf32, -1x-1xf32]) + ( + split_0, + split_1, + ) = meshgrid_0 + del meshgrid_0 + + # pd_op.scale: (-1x-1xf32) <- (-1x-1xf32, 1xf32) + scale_4 = paddle._C_ops.scale(split_1, full_2, float("-80"), True) + + # pd_op.scale: (-1x-1xf32) <- (-1x-1xf32, 1xf32) + scale_5 = paddle._C_ops.scale(split_0, full_2, float("-80"), True) + + # pd_op.scale: (-1x-1xf32) <- (-1x-1xf32, 1xf32) + scale_6 = paddle._C_ops.scale(split_1, full_2, float("80"), True) + + # pd_op.scale: (-1x-1xf32) <- (-1x-1xf32, 1xf32) + scale_7 = paddle._C_ops.scale(split_0, full_2, float("80"), True) + + # builtin.combine: ([-1x-1xf32, -1x-1xf32, -1x-1xf32, -1x-1xf32]) <- (-1x-1xf32, -1x-1xf32, -1x-1xf32, -1x-1xf32) + combine_1 = [scale_4, scale_5, scale_6, scale_7] + del scale_4, scale_5, scale_6, scale_7 + + # pd_op.stack: (-1x-1x4xf32) <- ([-1x-1xf32, -1x-1xf32, -1x-1xf32, -1x-1xf32]) + stack_0 = paddle._C_ops.stack(combine_1, -1) + del combine_1 + + # builtin.combine: ([-1x-1xf32, -1x-1xf32]) <- (-1x-1xf32, -1x-1xf32) + combine_2 = [split_1, split_0] + del split_0, split_1 + + # pd_op.stack: (-1x-1x2xf32) <- ([-1x-1xf32, -1x-1xf32]) + stack_1 = paddle._C_ops.stack(combine_2, -1) + del combine_2 + + # pd_op.full_int_array: (2xi64) <- () + full_int_array_0 = [-1, 4] + + # pd_op.reshape: (-1x4xf32) <- (-1x-1x4xf32, 2xi64) + reshape_0 = paddle._C_ops.reshape(stack_0, full_int_array_0) + del stack_0 + + # pd_op.full_int_array: (2xi64) <- () + full_int_array_1 = [-1, 2] + + # pd_op.reshape: (-1x2xf32) <- (-1x-1x2xf32, 2xi64) + reshape_1 = paddle._C_ops.reshape(stack_1, full_int_array_1) + del stack_1 + + # pd_op.shape64: (2xi64) <- (-1x4xf32) + shape64_0 = paddle._C_ops.shape64(reshape_0) + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_2 = [0] + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_3 = [1] + + # pd_op.slice: (xi64) <- (2xi64, 1xi64, 1xi64) + slice_0 = paddle._C_ops.slice( + shape64_0, [0], full_int_array_2, full_int_array_3, [1], [0] + ) + del shape64_0 + + # pd_op.full: (xi64) <- () + full_4 = paddle._C_ops.full( + [], float("1"), paddle.int64, paddle.core.CPUPlace() + ) + + # builtin.combine: ([xi64, xi64]) <- (xi64, xi64) + combine_3 = [slice_0, full_4] + + # pd_op.stack: (2xi64) <- ([xi64, xi64]) + stack_2 = paddle._C_ops.stack(combine_3, 0) + del combine_3 + + # pd_op.full_with_tensor: (-1x1xf32) <- (1xf32, 2xi64) + full_with_tensor_0 = paddle._C_ops.full_with_tensor( + full_3, stack_2, paddle.float32 + ) + del full_3, stack_2 + + # pd_op.arange: (-1xi64) <- (1xi64, xi64, 1xi64) + arange_2 = paddle.arange(full_0, data_3, full_1, dtype="int64") + del data_3 + + # pd_op.cast: (-1xf32) <- (-1xi64) + cast_2 = paddle._C_ops.cast(arange_2, paddle.float32) + del arange_2 + + # pd_op.scale: (-1xf32) <- (-1xf32, 1xf32) + scale_8 = paddle._C_ops.scale(cast_2, full_2, float("0.5"), True) + del cast_2 + + # pd_op.full: (1xf32) <- () + full_5 = paddle._C_ops.full( + [1], float("16"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.scale: (-1xf32) <- (-1xf32, 1xf32) + scale_9 = paddle._C_ops.scale(scale_8, full_5, float("0"), True) + del scale_8 + + # pd_op.arange: (-1xi64) <- (1xi64, xi64, 1xi64) + arange_3 = paddle.arange(full_0, data_2, full_1, dtype="int64") + del data_2 + + # pd_op.cast: (-1xf32) <- (-1xi64) + cast_3 = paddle._C_ops.cast(arange_3, paddle.float32) + del arange_3 + + # pd_op.scale: (-1xf32) <- (-1xf32, 1xf32) + scale_10 = paddle._C_ops.scale(cast_3, full_2, float("0.5"), True) + del cast_3 + + # pd_op.scale: (-1xf32) <- (-1xf32, 1xf32) + scale_11 = paddle._C_ops.scale(scale_10, full_5, float("0"), True) + del scale_10 + + # builtin.combine: ([-1xf32, -1xf32]) <- (-1xf32, -1xf32) + combine_4 = [scale_11, scale_9] + del scale_11, scale_9 + + # pd_op.meshgrid: ([-1x-1xf32, -1x-1xf32]) <- ([-1xf32, -1xf32]) + meshgrid_1 = paddle._C_ops.meshgrid(combine_4) + del combine_4 + + # builtin.split: (-1x-1xf32, -1x-1xf32) <- ([-1x-1xf32, -1x-1xf32]) + ( + split_2, + split_3, + ) = meshgrid_1 + del meshgrid_1 + + # pd_op.scale: (-1x-1xf32) <- (-1x-1xf32, 1xf32) + scale_12 = paddle._C_ops.scale(split_3, full_2, float("-40"), True) + + # pd_op.scale: (-1x-1xf32) <- (-1x-1xf32, 1xf32) + scale_13 = paddle._C_ops.scale(split_2, full_2, float("-40"), True) + + # pd_op.scale: (-1x-1xf32) <- (-1x-1xf32, 1xf32) + scale_14 = paddle._C_ops.scale(split_3, full_2, float("40"), True) + + # pd_op.scale: (-1x-1xf32) <- (-1x-1xf32, 1xf32) + scale_15 = paddle._C_ops.scale(split_2, full_2, float("40"), True) + + # builtin.combine: ([-1x-1xf32, -1x-1xf32, -1x-1xf32, -1x-1xf32]) <- (-1x-1xf32, -1x-1xf32, -1x-1xf32, -1x-1xf32) + combine_5 = [scale_12, scale_13, scale_14, scale_15] + del scale_12, scale_13, scale_14, scale_15 + + # pd_op.stack: (-1x-1x4xf32) <- ([-1x-1xf32, -1x-1xf32, -1x-1xf32, -1x-1xf32]) + stack_3 = paddle._C_ops.stack(combine_5, -1) + del combine_5 + + # builtin.combine: ([-1x-1xf32, -1x-1xf32]) <- (-1x-1xf32, -1x-1xf32) + combine_6 = [split_3, split_2] + del split_2, split_3 + + # pd_op.stack: (-1x-1x2xf32) <- ([-1x-1xf32, -1x-1xf32]) + stack_4 = paddle._C_ops.stack(combine_6, -1) + del combine_6 + + # pd_op.reshape: (-1x4xf32) <- (-1x-1x4xf32, 2xi64) + reshape_2 = paddle._C_ops.reshape(stack_3, full_int_array_0) + del stack_3 + + # pd_op.reshape: (-1x2xf32) <- (-1x-1x2xf32, 2xi64) + reshape_3 = paddle._C_ops.reshape(stack_4, full_int_array_1) + del stack_4 + + # pd_op.shape64: (2xi64) <- (-1x4xf32) + shape64_1 = paddle._C_ops.shape64(reshape_2) + + # pd_op.slice: (xi64) <- (2xi64, 1xi64, 1xi64) + slice_1 = paddle._C_ops.slice( + shape64_1, [0], full_int_array_2, full_int_array_3, [1], [0] + ) + del shape64_1 + + # builtin.combine: ([xi64, xi64]) <- (xi64, xi64) + combine_7 = [slice_1, full_4] + + # pd_op.stack: (2xi64) <- ([xi64, xi64]) + stack_5 = paddle._C_ops.stack(combine_7, 0) + del combine_7 + + # pd_op.full_with_tensor: (-1x1xf32) <- (1xf32, 2xi64) + full_with_tensor_1 = paddle._C_ops.full_with_tensor( + full_5, stack_5, paddle.float32 + ) + del full_5, stack_5 + + # pd_op.arange: (-1xi64) <- (1xi64, xi64, 1xi64) + arange_4 = paddle.arange(full_0, data_5, full_1, dtype="int64") + del data_5 + + # pd_op.cast: (-1xf32) <- (-1xi64) + cast_4 = paddle._C_ops.cast(arange_4, paddle.float32) + del arange_4 + + # pd_op.scale: (-1xf32) <- (-1xf32, 1xf32) + scale_16 = paddle._C_ops.scale(cast_4, full_2, float("0.5"), True) + del cast_4 + + # pd_op.full: (1xf32) <- () + full_6 = paddle._C_ops.full( + [1], float("8"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.scale: (-1xf32) <- (-1xf32, 1xf32) + scale_17 = paddle._C_ops.scale(scale_16, full_6, float("0"), True) + del scale_16 + + # pd_op.arange: (-1xi64) <- (1xi64, xi64, 1xi64) + arange_5 = paddle.arange(full_0, data_4, full_1, dtype="int64") + del data_4, full_0, full_1 + + # pd_op.cast: (-1xf32) <- (-1xi64) + cast_5 = paddle._C_ops.cast(arange_5, paddle.float32) + del arange_5 + + # pd_op.scale: (-1xf32) <- (-1xf32, 1xf32) + scale_18 = paddle._C_ops.scale(cast_5, full_2, float("0.5"), True) + del cast_5 + + # pd_op.scale: (-1xf32) <- (-1xf32, 1xf32) + scale_19 = paddle._C_ops.scale(scale_18, full_6, float("0"), True) + del scale_18 + + # builtin.combine: ([-1xf32, -1xf32]) <- (-1xf32, -1xf32) + combine_8 = [scale_19, scale_17] + del scale_17, scale_19 + + # pd_op.meshgrid: ([-1x-1xf32, -1x-1xf32]) <- ([-1xf32, -1xf32]) + meshgrid_2 = paddle._C_ops.meshgrid(combine_8) + del combine_8 + + # builtin.split: (-1x-1xf32, -1x-1xf32) <- ([-1x-1xf32, -1x-1xf32]) + ( + split_4, + split_5, + ) = meshgrid_2 + del meshgrid_2 + + # pd_op.scale: (-1x-1xf32) <- (-1x-1xf32, 1xf32) + scale_20 = paddle._C_ops.scale(split_5, full_2, float("-20"), True) + + # pd_op.scale: (-1x-1xf32) <- (-1x-1xf32, 1xf32) + scale_21 = paddle._C_ops.scale(split_4, full_2, float("-20"), True) + + # pd_op.scale: (-1x-1xf32) <- (-1x-1xf32, 1xf32) + scale_22 = paddle._C_ops.scale(split_5, full_2, float("20"), True) + + # pd_op.scale: (-1x-1xf32) <- (-1x-1xf32, 1xf32) + scale_23 = paddle._C_ops.scale(split_4, full_2, float("20"), True) + del full_2 + + # builtin.combine: ([-1x-1xf32, -1x-1xf32, -1x-1xf32, -1x-1xf32]) <- (-1x-1xf32, -1x-1xf32, -1x-1xf32, -1x-1xf32) + combine_9 = [scale_20, scale_21, scale_22, scale_23] + del scale_20, scale_21, scale_22, scale_23 + + # pd_op.stack: (-1x-1x4xf32) <- ([-1x-1xf32, -1x-1xf32, -1x-1xf32, -1x-1xf32]) + stack_6 = paddle._C_ops.stack(combine_9, -1) + del combine_9 + + # builtin.combine: ([-1x-1xf32, -1x-1xf32]) <- (-1x-1xf32, -1x-1xf32) + combine_10 = [split_5, split_4] + del split_4, split_5 + + # pd_op.stack: (-1x-1x2xf32) <- ([-1x-1xf32, -1x-1xf32]) + stack_7 = paddle._C_ops.stack(combine_10, -1) + del combine_10 + + # pd_op.reshape: (-1x4xf32) <- (-1x-1x4xf32, 2xi64) + reshape_4 = paddle._C_ops.reshape(stack_6, full_int_array_0) + del full_int_array_0, stack_6 + + # pd_op.reshape: (-1x2xf32) <- (-1x-1x2xf32, 2xi64) + reshape_5 = paddle._C_ops.reshape(stack_7, full_int_array_1) + del full_int_array_1, stack_7 + + # pd_op.shape64: (2xi64) <- (-1x4xf32) + shape64_2 = paddle._C_ops.shape64(reshape_4) + + # pd_op.slice: (xi64) <- (2xi64, 1xi64, 1xi64) + slice_2 = paddle._C_ops.slice( + shape64_2, [0], full_int_array_2, full_int_array_3, [1], [0] + ) + del full_int_array_2, full_int_array_3, shape64_2 + + # builtin.combine: ([xi64, xi64]) <- (xi64, xi64) + combine_11 = [slice_2, full_4] + del full_4 + + # pd_op.stack: (2xi64) <- ([xi64, xi64]) + stack_8 = paddle._C_ops.stack(combine_11, 0) + del combine_11 + + # pd_op.full_with_tensor: (-1x1xf32) <- (1xf32, 2xi64) + full_with_tensor_2 = paddle._C_ops.full_with_tensor( + full_6, stack_8, paddle.float32 + ) + del full_6, stack_8 + + # pd_op.full: (1xi32) <- () + full_7 = paddle._C_ops.full( + [1], float("0"), paddle.int32, paddle.core.CPUPlace() + ) + + # builtin.combine: ([-1x4xf32, -1x4xf32, -1x4xf32]) <- (-1x4xf32, -1x4xf32, -1x4xf32) + combine_12 = [reshape_0, reshape_2, reshape_4] + del reshape_0, reshape_2, reshape_4 + + # pd_op.concat: (-1x4xf32) <- ([-1x4xf32, -1x4xf32, -1x4xf32], 1xi32) + concat_2 = paddle._C_ops.concat(combine_12, full_7) + del combine_12 + + # builtin.combine: ([-1x2xf32, -1x2xf32, -1x2xf32]) <- (-1x2xf32, -1x2xf32, -1x2xf32) + combine_13 = [reshape_1, reshape_3, reshape_5] + del reshape_1, reshape_3, reshape_5 + + # pd_op.concat: (-1x2xf32) <- ([-1x2xf32, -1x2xf32, -1x2xf32], 1xi32) + concat_3 = paddle._C_ops.concat(combine_13, full_7) + del combine_13 + + # builtin.combine: ([-1x1xf32, -1x1xf32, -1x1xf32]) <- (-1x1xf32, -1x1xf32, -1x1xf32) + combine_14 = [full_with_tensor_0, full_with_tensor_1, full_with_tensor_2] + del full_with_tensor_0, full_with_tensor_1, full_with_tensor_2 + + # pd_op.concat: (-1x1xf32) <- ([-1x1xf32, -1x1xf32, -1x1xf32], 1xi32) + concat_4 = paddle._C_ops.concat(combine_14, full_7) + del combine_14, full_7 + + # pd_op.full_int_array: (2xi64) <- () + full_int_array_4 = [1, 1] + + # pd_op.assign: (2xi64) <- (2xi64) + assign_0 = full_int_array_4 + + # pd_op.assign: (2xi64) <- (2xi64) + assign_1 = full_int_array_4 + + # pd_op.pool2d: (1x768x1x1xf32) <- (1x768x-1x-1xf32, 2xi64) + pool2d_0 = paddle._C_ops.pool2d( + data_6, + full_int_array_4, + [1, 1], + [0, 0], + False, + True, + "NCHW", + "avg", + False, + True, + "EXPLICIT", + ) + + # pd_op.conv2d: (1x768x1x1xf32) <- (1x768x1x1xf32, 768x768x1x1xf32) + conv2d_0 = paddle._C_ops.conv2d( + pool2d_0, parameter_53, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_53 + + # pd_op.full_int_array: (4xi64) <- () + full_int_array_5 = [1, -1, 1, 1] + + # pd_op.reshape: (1x768x1x1xf32) <- (768xf32, 4xi64) + reshape_6 = paddle._C_ops.reshape(parameter_52, full_int_array_5) + del parameter_52 + + # pd_op.add: (1x768x1x1xf32) <- (1x768x1x1xf32, 1x768x1x1xf32) + add_0 = paddle._C_ops.add(conv2d_0, reshape_6) + + # pd_op.sigmoid: (1x768x1x1xf32) <- (1x768x1x1xf32) + sigmoid_0 = paddle._C_ops.sigmoid(add_0) + del add_0 + + # pd_op.multiply: (1x768x-1x-1xf32) <- (1x768x-1x-1xf32, 1x768x1x1xf32) + multiply_0 = paddle._C_ops.multiply(data_6, sigmoid_0) + + # pd_op.conv2d: (1x768x-1x-1xf32) <- (1x768x-1x-1xf32, 768x768x1x1xf32) + conv2d_1 = paddle._C_ops.conv2d( + multiply_0, parameter_51, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_51 + + # pd_op.batch_norm_: (1x768x-1x-1xf32, 768xf32, 768xf32, 768xf32, 768xf32, -1xui8) <- (1x768x-1x-1xf32, 768xf32, 768xf32, 768xf32, 768xf32) + ( + batch_norm__0, + batch_norm__1, + batch_norm__2, + batch_norm__3, + batch_norm__4, + batch_norm__5, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_1, + parameter_50, + parameter_49, + parameter_48, + parameter_47, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_47, parameter_48, parameter_49, parameter_50 + + # pd_op.swish: (1x768x-1x-1xf32) <- (1x768x-1x-1xf32) + swish_0 = paddle._C_ops.swish(batch_norm__0) + + # pd_op.add: (1x768x-1x-1xf32) <- (1x768x-1x-1xf32, 1x768x-1x-1xf32) + add_1 = paddle._C_ops.add(swish_0, data_6) + + # pd_op.conv2d: (1x10x-1x-1xf32) <- (1x768x-1x-1xf32, 10x768x3x3xf32) + conv2d_2 = paddle._C_ops.conv2d( + add_1, parameter_46, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_46 + + # pd_op.reshape: (1x10x1x1xf32) <- (10xf32, 4xi64) + reshape_7 = paddle._C_ops.reshape(parameter_45, full_int_array_5) + del parameter_45 + + # pd_op.add: (1x10x-1x-1xf32) <- (1x10x-1x-1xf32, 1x10x1x1xf32) + add_2 = paddle._C_ops.add(conv2d_2, reshape_7) + + # pd_op.conv2d: (1x768x1x1xf32) <- (1x768x1x1xf32, 768x768x1x1xf32) + conv2d_3 = paddle._C_ops.conv2d( + pool2d_0, parameter_44, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_44 + + # pd_op.reshape: (1x768x1x1xf32) <- (768xf32, 4xi64) + reshape_8 = paddle._C_ops.reshape(parameter_43, full_int_array_5) + del parameter_43 + + # pd_op.add: (1x768x1x1xf32) <- (1x768x1x1xf32, 1x768x1x1xf32) + add_3 = paddle._C_ops.add(conv2d_3, reshape_8) + + # pd_op.sigmoid: (1x768x1x1xf32) <- (1x768x1x1xf32) + sigmoid_1 = paddle._C_ops.sigmoid(add_3) + del add_3 + + # pd_op.multiply: (1x768x-1x-1xf32) <- (1x768x-1x-1xf32, 1x768x1x1xf32) + multiply_1 = paddle._C_ops.multiply(data_6, sigmoid_1) + del data_6 + + # pd_op.conv2d: (1x768x-1x-1xf32) <- (1x768x-1x-1xf32, 768x768x1x1xf32) + conv2d_4 = paddle._C_ops.conv2d( + multiply_1, parameter_42, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_42 + + # pd_op.batch_norm_: (1x768x-1x-1xf32, 768xf32, 768xf32, 768xf32, 768xf32, -1xui8) <- (1x768x-1x-1xf32, 768xf32, 768xf32, 768xf32, 768xf32) + ( + batch_norm__6, + batch_norm__7, + batch_norm__8, + batch_norm__9, + batch_norm__10, + batch_norm__11, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_4, + parameter_41, + parameter_40, + parameter_39, + parameter_38, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_38, parameter_39, parameter_40, parameter_41 + + # pd_op.swish: (1x768x-1x-1xf32) <- (1x768x-1x-1xf32) + swish_1 = paddle._C_ops.swish(batch_norm__6) + + # pd_op.conv2d: (1x88x-1x-1xf32) <- (1x768x-1x-1xf32, 88x768x3x3xf32) + conv2d_5 = paddle._C_ops.conv2d( + swish_1, parameter_37, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_37 + + # pd_op.reshape: (1x88x1x1xf32) <- (88xf32, 4xi64) + reshape_9 = paddle._C_ops.reshape(parameter_36, full_int_array_5) + del parameter_36 + + # pd_op.add: (1x88x-1x-1xf32) <- (1x88x-1x-1xf32, 1x88x1x1xf32) + add_4 = paddle._C_ops.add(conv2d_5, reshape_9) + + # pd_op.sigmoid: (1x10x-1x-1xf32) <- (1x10x-1x-1xf32) + sigmoid_2 = paddle._C_ops.sigmoid(add_2) + del add_2 + + # pd_op.flatten: (1x10x-1xf32) <- (1x10x-1x-1xf32) + flatten_0 = paddle._C_ops.flatten(sigmoid_2, 2, 3) + + # pd_op.transpose: (1x-1x10xf32) <- (1x10x-1xf32) + transpose_0 = paddle._C_ops.transpose(flatten_0, [0, 2, 1]) + del flatten_0 + + # pd_op.flatten: (1x88x-1xf32) <- (1x88x-1x-1xf32) + flatten_1 = paddle._C_ops.flatten(add_4, 2, 3) + + # pd_op.transpose: (1x-1x88xf32) <- (1x88x-1xf32) + transpose_1 = paddle._C_ops.transpose(flatten_1, [0, 2, 1]) + del flatten_1 + + # pd_op.pool2d: (1x384x1x1xf32) <- (1x384x-1x-1xf32, 2xi64) + pool2d_1 = paddle._C_ops.pool2d( + data_7, + full_int_array_4, + [1, 1], + [0, 0], + False, + True, + "NCHW", + "avg", + False, + True, + "EXPLICIT", + ) + + # pd_op.conv2d: (1x384x1x1xf32) <- (1x384x1x1xf32, 384x384x1x1xf32) + conv2d_6 = paddle._C_ops.conv2d( + pool2d_1, parameter_35, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_35 + + # pd_op.reshape: (1x384x1x1xf32) <- (384xf32, 4xi64) + reshape_10 = paddle._C_ops.reshape(parameter_34, full_int_array_5) + del parameter_34 + + # pd_op.add: (1x384x1x1xf32) <- (1x384x1x1xf32, 1x384x1x1xf32) + add_5 = paddle._C_ops.add(conv2d_6, reshape_10) + + # pd_op.sigmoid: (1x384x1x1xf32) <- (1x384x1x1xf32) + sigmoid_3 = paddle._C_ops.sigmoid(add_5) + del add_5 + + # pd_op.multiply: (1x384x-1x-1xf32) <- (1x384x-1x-1xf32, 1x384x1x1xf32) + multiply_2 = paddle._C_ops.multiply(data_7, sigmoid_3) + + # pd_op.conv2d: (1x384x-1x-1xf32) <- (1x384x-1x-1xf32, 384x384x1x1xf32) + conv2d_7 = paddle._C_ops.conv2d( + multiply_2, parameter_33, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_33 + + # pd_op.batch_norm_: (1x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (1x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__12, + batch_norm__13, + batch_norm__14, + batch_norm__15, + batch_norm__16, + batch_norm__17, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_7, + parameter_32, + parameter_31, + parameter_30, + parameter_29, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_29, parameter_30, parameter_31, parameter_32 + + # pd_op.swish: (1x384x-1x-1xf32) <- (1x384x-1x-1xf32) + swish_2 = paddle._C_ops.swish(batch_norm__12) + + # pd_op.add: (1x384x-1x-1xf32) <- (1x384x-1x-1xf32, 1x384x-1x-1xf32) + add_6 = paddle._C_ops.add(swish_2, data_7) + + # pd_op.conv2d: (1x10x-1x-1xf32) <- (1x384x-1x-1xf32, 10x384x3x3xf32) + conv2d_8 = paddle._C_ops.conv2d( + add_6, parameter_28, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_28 + + # pd_op.reshape: (1x10x1x1xf32) <- (10xf32, 4xi64) + reshape_11 = paddle._C_ops.reshape(parameter_27, full_int_array_5) + del parameter_27 + + # pd_op.add: (1x10x-1x-1xf32) <- (1x10x-1x-1xf32, 1x10x1x1xf32) + add_7 = paddle._C_ops.add(conv2d_8, reshape_11) + + # pd_op.conv2d: (1x384x1x1xf32) <- (1x384x1x1xf32, 384x384x1x1xf32) + conv2d_9 = paddle._C_ops.conv2d( + pool2d_1, parameter_26, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_26 + + # pd_op.reshape: (1x384x1x1xf32) <- (384xf32, 4xi64) + reshape_12 = paddle._C_ops.reshape(parameter_25, full_int_array_5) + del parameter_25 + + # pd_op.add: (1x384x1x1xf32) <- (1x384x1x1xf32, 1x384x1x1xf32) + add_8 = paddle._C_ops.add(conv2d_9, reshape_12) + + # pd_op.sigmoid: (1x384x1x1xf32) <- (1x384x1x1xf32) + sigmoid_4 = paddle._C_ops.sigmoid(add_8) + del add_8 + + # pd_op.multiply: (1x384x-1x-1xf32) <- (1x384x-1x-1xf32, 1x384x1x1xf32) + multiply_3 = paddle._C_ops.multiply(data_7, sigmoid_4) + del data_7 + + # pd_op.conv2d: (1x384x-1x-1xf32) <- (1x384x-1x-1xf32, 384x384x1x1xf32) + conv2d_10 = paddle._C_ops.conv2d( + multiply_3, parameter_24, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_24 + + # pd_op.batch_norm_: (1x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (1x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__18, + batch_norm__19, + batch_norm__20, + batch_norm__21, + batch_norm__22, + batch_norm__23, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_10, + parameter_23, + parameter_22, + parameter_21, + parameter_20, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_20, parameter_21, parameter_22, parameter_23 + + # pd_op.swish: (1x384x-1x-1xf32) <- (1x384x-1x-1xf32) + swish_3 = paddle._C_ops.swish(batch_norm__18) + + # pd_op.conv2d: (1x88x-1x-1xf32) <- (1x384x-1x-1xf32, 88x384x3x3xf32) + conv2d_11 = paddle._C_ops.conv2d( + swish_3, parameter_19, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_19 + + # pd_op.reshape: (1x88x1x1xf32) <- (88xf32, 4xi64) + reshape_13 = paddle._C_ops.reshape(parameter_18, full_int_array_5) + del parameter_18 + + # pd_op.add: (1x88x-1x-1xf32) <- (1x88x-1x-1xf32, 1x88x1x1xf32) + add_9 = paddle._C_ops.add(conv2d_11, reshape_13) + + # pd_op.sigmoid: (1x10x-1x-1xf32) <- (1x10x-1x-1xf32) + sigmoid_5 = paddle._C_ops.sigmoid(add_7) + del add_7 + + # pd_op.flatten: (1x10x-1xf32) <- (1x10x-1x-1xf32) + flatten_2 = paddle._C_ops.flatten(sigmoid_5, 2, 3) + + # pd_op.transpose: (1x-1x10xf32) <- (1x10x-1xf32) + transpose_2 = paddle._C_ops.transpose(flatten_2, [0, 2, 1]) + del flatten_2 + + # pd_op.flatten: (1x88x-1xf32) <- (1x88x-1x-1xf32) + flatten_3 = paddle._C_ops.flatten(add_9, 2, 3) + + # pd_op.transpose: (1x-1x88xf32) <- (1x88x-1xf32) + transpose_3 = paddle._C_ops.transpose(flatten_3, [0, 2, 1]) + del flatten_3 + + # pd_op.pool2d: (1x192x1x1xf32) <- (1x192x-1x-1xf32, 2xi64) + pool2d_2 = paddle._C_ops.pool2d( + data_8, + full_int_array_4, + [1, 1], + [0, 0], + False, + True, + "NCHW", + "avg", + False, + True, + "EXPLICIT", + ) + + # pd_op.conv2d: (1x192x1x1xf32) <- (1x192x1x1xf32, 192x192x1x1xf32) + conv2d_12 = paddle._C_ops.conv2d( + pool2d_2, parameter_17, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_17 + + # pd_op.reshape: (1x192x1x1xf32) <- (192xf32, 4xi64) + reshape_14 = paddle._C_ops.reshape(parameter_16, full_int_array_5) + del parameter_16 + + # pd_op.add: (1x192x1x1xf32) <- (1x192x1x1xf32, 1x192x1x1xf32) + add_10 = paddle._C_ops.add(conv2d_12, reshape_14) + + # pd_op.sigmoid: (1x192x1x1xf32) <- (1x192x1x1xf32) + sigmoid_6 = paddle._C_ops.sigmoid(add_10) + del add_10 + + # pd_op.multiply: (1x192x-1x-1xf32) <- (1x192x-1x-1xf32, 1x192x1x1xf32) + multiply_4 = paddle._C_ops.multiply(data_8, sigmoid_6) + + # pd_op.conv2d: (1x192x-1x-1xf32) <- (1x192x-1x-1xf32, 192x192x1x1xf32) + conv2d_13 = paddle._C_ops.conv2d( + multiply_4, parameter_15, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_15 + + # pd_op.batch_norm_: (1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__24, + batch_norm__25, + batch_norm__26, + batch_norm__27, + batch_norm__28, + batch_norm__29, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_13, + parameter_14, + parameter_13, + parameter_12, + parameter_11, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_11, parameter_12, parameter_13, parameter_14 + + # pd_op.swish: (1x192x-1x-1xf32) <- (1x192x-1x-1xf32) + swish_4 = paddle._C_ops.swish(batch_norm__24) + + # pd_op.add: (1x192x-1x-1xf32) <- (1x192x-1x-1xf32, 1x192x-1x-1xf32) + add_11 = paddle._C_ops.add(swish_4, data_8) + + # pd_op.conv2d: (1x10x-1x-1xf32) <- (1x192x-1x-1xf32, 10x192x3x3xf32) + conv2d_14 = paddle._C_ops.conv2d( + add_11, parameter_10, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_10 + + # pd_op.reshape: (1x10x1x1xf32) <- (10xf32, 4xi64) + reshape_15 = paddle._C_ops.reshape(parameter_9, full_int_array_5) + del parameter_9 + + # pd_op.add: (1x10x-1x-1xf32) <- (1x10x-1x-1xf32, 1x10x1x1xf32) + add_12 = paddle._C_ops.add(conv2d_14, reshape_15) + + # pd_op.conv2d: (1x192x1x1xf32) <- (1x192x1x1xf32, 192x192x1x1xf32) + conv2d_15 = paddle._C_ops.conv2d( + pool2d_2, parameter_8, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_8 + + # pd_op.reshape: (1x192x1x1xf32) <- (192xf32, 4xi64) + reshape_16 = paddle._C_ops.reshape(parameter_7, full_int_array_5) + del parameter_7 + + # pd_op.add: (1x192x1x1xf32) <- (1x192x1x1xf32, 1x192x1x1xf32) + add_13 = paddle._C_ops.add(conv2d_15, reshape_16) + + # pd_op.sigmoid: (1x192x1x1xf32) <- (1x192x1x1xf32) + sigmoid_7 = paddle._C_ops.sigmoid(add_13) + del add_13 + + # pd_op.multiply: (1x192x-1x-1xf32) <- (1x192x-1x-1xf32, 1x192x1x1xf32) + multiply_5 = paddle._C_ops.multiply(data_8, sigmoid_7) + del data_8 + + # pd_op.conv2d: (1x192x-1x-1xf32) <- (1x192x-1x-1xf32, 192x192x1x1xf32) + conv2d_16 = paddle._C_ops.conv2d( + multiply_5, parameter_6, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_6 + + # pd_op.batch_norm_: (1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__30, + batch_norm__31, + batch_norm__32, + batch_norm__33, + batch_norm__34, + batch_norm__35, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_16, + parameter_5, + parameter_4, + parameter_3, + parameter_2, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_2, parameter_3, parameter_4, parameter_5 + + # pd_op.swish: (1x192x-1x-1xf32) <- (1x192x-1x-1xf32) + swish_5 = paddle._C_ops.swish(batch_norm__30) + + # pd_op.conv2d: (1x88x-1x-1xf32) <- (1x192x-1x-1xf32, 88x192x3x3xf32) + conv2d_17 = paddle._C_ops.conv2d( + swish_5, parameter_1, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_1 + + # pd_op.reshape: (1x88x1x1xf32) <- (88xf32, 4xi64) + reshape_17 = paddle._C_ops.reshape(parameter_0, full_int_array_5) + del full_int_array_5, parameter_0 + + # pd_op.add: (1x88x-1x-1xf32) <- (1x88x-1x-1xf32, 1x88x1x1xf32) + add_14 = paddle._C_ops.add(conv2d_17, reshape_17) + + # pd_op.sigmoid: (1x10x-1x-1xf32) <- (1x10x-1x-1xf32) + sigmoid_8 = paddle._C_ops.sigmoid(add_12) + del add_12 + + # pd_op.flatten: (1x10x-1xf32) <- (1x10x-1x-1xf32) + flatten_4 = paddle._C_ops.flatten(sigmoid_8, 2, 3) + + # pd_op.transpose: (1x-1x10xf32) <- (1x10x-1xf32) + transpose_4 = paddle._C_ops.transpose(flatten_4, [0, 2, 1]) + del flatten_4 + + # pd_op.flatten: (1x88x-1xf32) <- (1x88x-1x-1xf32) + flatten_5 = paddle._C_ops.flatten(add_14, 2, 3) + + # pd_op.transpose: (1x-1x88xf32) <- (1x88x-1xf32) + transpose_5 = paddle._C_ops.transpose(flatten_5, [0, 2, 1]) + del flatten_5 + + # pd_op.full: (1xi32) <- () + full_8 = paddle._C_ops.full( + [1], float("1"), paddle.int32, paddle.core.CPUPlace() + ) + + # pd_op.assign: (1xi32) <- (1xi32) + assign_2 = full_8 + + # builtin.combine: ([1x-1x10xf32, 1x-1x10xf32, 1x-1x10xf32]) <- (1x-1x10xf32, 1x-1x10xf32, 1x-1x10xf32) + combine_15 = [transpose_0, transpose_2, transpose_4] + + # pd_op.concat: (1x-1x10xf32) <- ([1x-1x10xf32, 1x-1x10xf32, 1x-1x10xf32], 1xi32) + concat_0 = paddle._C_ops.concat(combine_15, full_8) + del combine_15 + + # builtin.combine: ([1x-1x88xf32, 1x-1x88xf32, 1x-1x88xf32]) <- (1x-1x88xf32, 1x-1x88xf32, 1x-1x88xf32) + combine_16 = [transpose_1, transpose_3, transpose_5] + + # pd_op.concat: (1x-1x88xf32) <- ([1x-1x88xf32, 1x-1x88xf32, 1x-1x88xf32], 1xi32) + concat_1 = paddle._C_ops.concat(combine_16, full_8) + del ( + add_1, + add_11, + add_14, + add_4, + add_6, + add_9, + assign_0, + assign_1, + assign_2, + batch_norm__0, + batch_norm__1, + batch_norm__10, + batch_norm__11, + batch_norm__12, + batch_norm__13, + batch_norm__14, + batch_norm__15, + batch_norm__16, + batch_norm__17, + batch_norm__18, + batch_norm__19, + batch_norm__2, + batch_norm__20, + batch_norm__21, + batch_norm__22, + batch_norm__23, + batch_norm__24, + batch_norm__25, + batch_norm__26, + batch_norm__27, + batch_norm__28, + batch_norm__29, + batch_norm__3, + batch_norm__30, + batch_norm__31, + batch_norm__32, + batch_norm__33, + batch_norm__34, + batch_norm__35, + batch_norm__4, + batch_norm__5, + batch_norm__6, + batch_norm__7, + batch_norm__8, + batch_norm__9, + combine_16, + conv2d_0, + conv2d_1, + conv2d_10, + conv2d_11, + conv2d_12, + conv2d_13, + conv2d_14, + conv2d_15, + conv2d_16, + conv2d_17, + conv2d_2, + conv2d_3, + conv2d_4, + conv2d_5, + conv2d_6, + conv2d_7, + conv2d_8, + conv2d_9, + full_8, + full_int_array_4, + multiply_0, + multiply_1, + multiply_2, + multiply_3, + multiply_4, + multiply_5, + pool2d_0, + pool2d_1, + pool2d_2, + reshape_10, + reshape_11, + reshape_12, + reshape_13, + reshape_14, + reshape_15, + reshape_16, + reshape_17, + reshape_6, + reshape_7, + reshape_8, + reshape_9, + sigmoid_0, + sigmoid_1, + sigmoid_2, + sigmoid_3, + sigmoid_4, + sigmoid_5, + sigmoid_6, + sigmoid_7, + sigmoid_8, + slice_0, + slice_1, + slice_2, + swish_0, + swish_1, + swish_2, + swish_3, + swish_4, + swish_5, + transpose_0, + transpose_1, + transpose_2, + transpose_3, + transpose_4, + transpose_5, + ) + + return concat_0, concat_1, concat_2, concat_3, concat_4 diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_1/weight_meta.py b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_1/weight_meta.py new file mode 100644 index 000000000..8e33e8e3a --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_1/weight_meta.py @@ -0,0 +1,586 @@ +class Program_weight_tensor_parameter_0: + name = "parameter_0" + shape = [88] + dtype = "float32" + min_val = float("0.825624") + max_val = float("0.846159") + mean = float("0.828073") + std = float("0.00356405") + data = None + + +class Program_weight_tensor_parameter_1: + name = "parameter_1" + shape = [88, 192, 3, 3] + dtype = "float32" + min_val = float("-0.120061") + max_val = float("0.122726") + mean = float("1.20344e-08") + std = float("0.00589807") + data = None + + +class Program_weight_tensor_parameter_2: + name = "parameter_2" + shape = [192] + dtype = "float32" + min_val = float("-0.0433758") + max_val = float("0.207094") + mean = float("0.0514628") + std = float("0.0402492") + data = None + + +class Program_weight_tensor_parameter_3: + name = "parameter_3" + shape = [192] + dtype = "float32" + min_val = float("0.850872") + max_val = float("1.63127") + mean = float("1.22454") + std = float("0.145326") + data = None + + +class Program_weight_tensor_parameter_4: + name = "parameter_4" + shape = [192] + dtype = "float32" + min_val = float("0.000193476") + max_val = float("0.00844475") + mean = float("0.00135281") + std = float("0.00118991") + data = None + + +class Program_weight_tensor_parameter_5: + name = "parameter_5" + shape = [192] + dtype = "float32" + min_val = float("-0.0754706") + max_val = float("0.0319285") + mean = float("-0.0124719") + std = float("0.0176434") + data = None + + +class Program_weight_tensor_parameter_6: + name = "parameter_6" + shape = [192, 192, 1, 1] + dtype = "float32" + min_val = float("-0.0734168") + max_val = float("0.108512") + mean = float("-0.000444445") + std = float("0.00763924") + data = None + + +class Program_weight_tensor_parameter_7: + name = "parameter_7" + shape = [192] + dtype = "float32" + min_val = float("-0.00613384") + max_val = float("0.00922103") + mean = float("-7.86384e-05") + std = float("0.00341788") + data = None + + +class Program_weight_tensor_parameter_8: + name = "parameter_8" + shape = [192, 192, 1, 1] + dtype = "float32" + min_val = float("-0.00662369") + max_val = float("0.0119822") + mean = float("-0.000127685") + std = float("0.00177164") + data = None + + +class Program_weight_tensor_parameter_9: + name = "parameter_9" + shape = [10] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_10: + name = "parameter_10" + shape = [10, 192, 3, 3] + dtype = "float32" + min_val = float("-0.175474") + max_val = float("0.055091") + mean = float("-0.00147039") + std = float("0.00842565") + data = None + + +class Program_weight_tensor_parameter_11: + name = "parameter_11" + shape = [192] + dtype = "float32" + min_val = float("-0.329693") + max_val = float("0.892228") + mean = float("0.356694") + std = float("0.271228") + data = None + + +class Program_weight_tensor_parameter_12: + name = "parameter_12" + shape = [192] + dtype = "float32" + min_val = float("1.01538") + max_val = float("1.77428") + mean = float("1.31556") + std = float("0.143187") + data = None + + +class Program_weight_tensor_parameter_13: + name = "parameter_13" + shape = [192] + dtype = "float32" + min_val = float("0.000400596") + max_val = float("0.0113603") + mean = float("0.00180946") + std = float("0.00164804") + data = None + + +class Program_weight_tensor_parameter_14: + name = "parameter_14" + shape = [192] + dtype = "float32" + min_val = float("-0.164541") + max_val = float("0.127897") + mean = float("-0.00423752") + std = float("0.0391887") + data = None + + +class Program_weight_tensor_parameter_15: + name = "parameter_15" + shape = [192, 192, 1, 1] + dtype = "float32" + min_val = float("-0.0620339") + max_val = float("0.0578527") + mean = float("-0.000609344") + std = float("0.0074149") + data = None + + +class Program_weight_tensor_parameter_16: + name = "parameter_16" + shape = [192] + dtype = "float32" + min_val = float("-0.0053139") + max_val = float("0.0128966") + mean = float("-0.000148839") + std = float("0.00226529") + data = None + + +class Program_weight_tensor_parameter_17: + name = "parameter_17" + shape = [192, 192, 1, 1] + dtype = "float32" + min_val = float("-0.0108365") + max_val = float("0.0180944") + mean = float("-7.70563e-05") + std = float("0.0014898") + data = None + + +class Program_weight_tensor_parameter_18: + name = "parameter_18" + shape = [88] + dtype = "float32" + min_val = float("0.826359") + max_val = float("0.837586") + mean = float("0.828071") + std = float("0.00217956") + data = None + + +class Program_weight_tensor_parameter_19: + name = "parameter_19" + shape = [88, 384, 3, 3] + dtype = "float32" + min_val = float("-0.0854456") + max_val = float("0.0873423") + mean = float("4.34375e-09") + std = float("0.0031191") + data = None + + +class Program_weight_tensor_parameter_20: + name = "parameter_20" + shape = [384] + dtype = "float32" + min_val = float("-0.00526138") + max_val = float("0.0696216") + mean = float("0.0259227") + std = float("0.0132331") + data = None + + +class Program_weight_tensor_parameter_21: + name = "parameter_21" + shape = [384] + dtype = "float32" + min_val = float("0.99865") + max_val = float("1.23747") + mean = float("1.1069") + std = float("0.0410692") + data = None + + +class Program_weight_tensor_parameter_22: + name = "parameter_22" + shape = [384] + dtype = "float32" + min_val = float("8.35707e-05") + max_val = float("0.00597629") + mean = float("0.000723624") + std = float("0.000763811") + data = None + + +class Program_weight_tensor_parameter_23: + name = "parameter_23" + shape = [384] + dtype = "float32" + min_val = float("-0.0442923") + max_val = float("0.00827867") + mean = float("-0.00966063") + std = float("0.00931597") + data = None + + +class Program_weight_tensor_parameter_24: + name = "parameter_24" + shape = [384, 384, 1, 1] + dtype = "float32" + min_val = float("-0.0461624") + max_val = float("0.063746") + mean = float("-0.000138651") + std = float("0.00306682") + data = None + + +class Program_weight_tensor_parameter_25: + name = "parameter_25" + shape = [384] + dtype = "float32" + min_val = float("-0.00274369") + max_val = float("0.00511827") + mean = float("6.10625e-05") + std = float("0.00152895") + data = None + + +class Program_weight_tensor_parameter_26: + name = "parameter_26" + shape = [384, 384, 1, 1] + dtype = "float32" + min_val = float("-0.00184752") + max_val = float("0.0048019") + mean = float("3.76045e-06") + std = float("0.000597256") + data = None + + +class Program_weight_tensor_parameter_27: + name = "parameter_27" + shape = [10] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_28: + name = "parameter_28" + shape = [10, 384, 3, 3] + dtype = "float32" + min_val = float("-0.0721721") + max_val = float("0.0404456") + mean = float("-0.000999238") + std = float("0.00397589") + data = None + + +class Program_weight_tensor_parameter_29: + name = "parameter_29" + shape = [384] + dtype = "float32" + min_val = float("-0.152983") + max_val = float("0.452749") + mean = float("0.229344") + std = float("0.100245") + data = None + + +class Program_weight_tensor_parameter_30: + name = "parameter_30" + shape = [384] + dtype = "float32" + min_val = float("1.00417") + max_val = float("1.40261") + mean = float("1.1866") + std = float("0.0603403") + data = None + + +class Program_weight_tensor_parameter_31: + name = "parameter_31" + shape = [384] + dtype = "float32" + min_val = float("0.000148108") + max_val = float("0.00569047") + mean = float("0.000833938") + std = float("0.000746994") + data = None + + +class Program_weight_tensor_parameter_32: + name = "parameter_32" + shape = [384] + dtype = "float32" + min_val = float("-0.0987288") + max_val = float("0.0644616") + mean = float("-0.0146565") + std = float("0.0228018") + data = None + + +class Program_weight_tensor_parameter_33: + name = "parameter_33" + shape = [384, 384, 1, 1] + dtype = "float32" + min_val = float("-0.0528263") + max_val = float("0.037782") + mean = float("-0.000246446") + std = float("0.00296888") + data = None + + +class Program_weight_tensor_parameter_34: + name = "parameter_34" + shape = [384] + dtype = "float32" + min_val = float("-0.00198673") + max_val = float("0.0108277") + mean = float("-1.89144e-05") + std = float("0.00104636") + data = None + + +class Program_weight_tensor_parameter_35: + name = "parameter_35" + shape = [384, 384, 1, 1] + dtype = "float32" + min_val = float("-0.00490867") + max_val = float("0.00769719") + mean = float("-1.63033e-05") + std = float("0.00053086") + data = None + + +class Program_weight_tensor_parameter_36: + name = "parameter_36" + shape = [88] + dtype = "float32" + min_val = float("0.827794") + max_val = float("0.828556") + mean = float("0.828072") + std = float("0.000199979") + data = None + + +class Program_weight_tensor_parameter_37: + name = "parameter_37" + shape = [88, 768, 3, 3] + dtype = "float32" + min_val = float("-0.00645056") + max_val = float("0.0120405") + mean = float("4.87489e-10") + std = float("0.000843696") + data = None + + +class Program_weight_tensor_parameter_38: + name = "parameter_38" + shape = [768] + dtype = "float32" + min_val = float("-0.0143031") + max_val = float("0.0478323") + mean = float("0.0113513") + std = float("0.0104705") + data = None + + +class Program_weight_tensor_parameter_39: + name = "parameter_39" + shape = [768] + dtype = "float32" + min_val = float("1.00867") + max_val = float("1.20113") + mean = float("1.06607") + std = float("0.0224781") + data = None + + +class Program_weight_tensor_parameter_40: + name = "parameter_40" + shape = [768] + dtype = "float32" + min_val = float("3.23648e-05") + max_val = float("0.00119716") + mean = float("0.00013138") + std = float("9.50346e-05") + data = None + + +class Program_weight_tensor_parameter_41: + name = "parameter_41" + shape = [768] + dtype = "float32" + min_val = float("-0.0210089") + max_val = float("0.00349173") + mean = float("-0.00472837") + std = float("0.00312975") + data = None + + +class Program_weight_tensor_parameter_42: + name = "parameter_42" + shape = [768, 768, 1, 1] + dtype = "float32" + min_val = float("-0.034299") + max_val = float("0.0337449") + mean = float("-4.66048e-05") + std = float("0.00140342") + data = None + + +class Program_weight_tensor_parameter_43: + name = "parameter_43" + shape = [768] + dtype = "float32" + min_val = float("-0.00390782") + max_val = float("0.00260919") + mean = float("7.12606e-05") + std = float("0.000831057") + data = None + + +class Program_weight_tensor_parameter_44: + name = "parameter_44" + shape = [768, 768, 1, 1] + dtype = "float32" + min_val = float("-0.00261376") + max_val = float("0.00228504") + mean = float("1.77784e-05") + std = float("0.000252276") + data = None + + +class Program_weight_tensor_parameter_45: + name = "parameter_45" + shape = [10] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_46: + name = "parameter_46" + shape = [10, 768, 3, 3] + dtype = "float32" + min_val = float("-0.015813") + max_val = float("0.00965711") + mean = float("-0.000546702") + std = float("0.00140653") + data = None + + +class Program_weight_tensor_parameter_47: + name = "parameter_47" + shape = [768] + dtype = "float32" + min_val = float("-0.110932") + max_val = float("0.199913") + mean = float("0.0934025") + std = float("0.0422427") + data = None + + +class Program_weight_tensor_parameter_48: + name = "parameter_48" + shape = [768] + dtype = "float32" + min_val = float("1.00786") + max_val = float("1.25519") + mean = float("1.07879") + std = float("0.0261974") + data = None + + +class Program_weight_tensor_parameter_49: + name = "parameter_49" + shape = [768] + dtype = "float32" + min_val = float("7.25456e-05") + max_val = float("0.00245344") + mean = float("0.000534885") + std = float("0.000318044") + data = None + + +class Program_weight_tensor_parameter_50: + name = "parameter_50" + shape = [768] + dtype = "float32" + min_val = float("-0.0725999") + max_val = float("0.0495439") + mean = float("-0.0155315") + std = float("0.0150006") + data = None + + +class Program_weight_tensor_parameter_51: + name = "parameter_51" + shape = [768, 768, 1, 1] + dtype = "float32" + min_val = float("-0.0538655") + max_val = float("0.0252785") + mean = float("-0.000146665") + std = float("0.00153735") + data = None + + +class Program_weight_tensor_parameter_52: + name = "parameter_52" + shape = [768] + dtype = "float32" + min_val = float("-0.00117685") + max_val = float("0.00393971") + mean = float("3.40689e-06") + std = float("0.000473697") + data = None + + +class Program_weight_tensor_parameter_53: + name = "parameter_53" + shape = [768, 768, 1, 1] + dtype = "float32" + min_val = float("-0.0124531") + max_val = float("0.02256") + mean = float("2.28217e-06") + std = float("0.00024775") + data = None diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_10/graph_hash.txt b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_10/graph_hash.txt new file mode 100644 index 000000000..9b36a0018 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_10/graph_hash.txt @@ -0,0 +1 @@ +82e75ce23c7b46c514fc50a98c682d1cfca4b1645d371c225ab17772ef29a450 \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_10/graph_net.json b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_10/graph_net.json new file mode 100644 index 000000000..381598f86 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_10/graph_net.json @@ -0,0 +1,6 @@ +{ + "framework": "paddle", + "model_name": "PP-YOLOE_plus_SOD-largesize-L", + "num_devices_required": 1, + "num_nodes_required": 1 +} \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_10/input_meta.py b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_10/input_meta.py new file mode 100644 index 000000000..aba4e15ed --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_10/input_meta.py @@ -0,0 +1,42 @@ +class Program_weight_tensor_data_0: + name = "data_0" + shape = [1, 48384, 10] + dtype = "float32" + min_val = float("1.08574e-08") + max_val = float("0.85674") + mean = float("0.00225546") + std = float("0.010105") + data = None + + +class Program_weight_tensor_data_1: + name = "data_1" + shape = [1, 48384, 88] + dtype = "float32" + min_val = float("-3.34129") + max_val = float("13.1745") + mean = float("0.828078") + std = float("1.50377") + data = None + + +class Program_weight_tensor_data_2: + name = "data_2" + shape = [48384, 2] + dtype = "float32" + min_val = float("4.0") + max_val = float("1532.0") + mean = float("768.0") + std = float("443.391") + data = None + + +class Program_weight_tensor_data_3: + name = "data_3" + shape = [48384, 1] + dtype = "float32" + min_val = float("8.0") + max_val = float("32.0") + mean = float("10.6667") + std = float("5.70157") + data = None diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_10/model.py b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_10/model.py new file mode 100644 index 000000000..80a95179a --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_10/model.py @@ -0,0 +1,162 @@ +import paddle + + +class GraphModule(paddle.nn.Layer): + def __init__(self): + super().__init__() + + def forward(self, parameter_0, data_0, data_1, data_2, data_3): + # pd_op.divide: (-1x2xf32) <- (-1x2xf32, -1x1xf32) + divide_0 = paddle._C_ops.divide(data_2, data_3) + del data_2 + + # pd_op.shape64: (3xi64) <- (1x-1x88xf32) + shape64_0 = paddle._C_ops.shape64(data_1) + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_0 = [0] + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_1 = [1] + + # pd_op.assign: (1xi64) <- (1xi64) + assign_0 = full_int_array_1 + + # pd_op.slice: (xi64) <- (3xi64, 1xi64, 1xi64) + slice_0 = paddle._C_ops.slice( + shape64_0, [0], full_int_array_0, full_int_array_1, [1], [0] + ) + del full_int_array_0 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_2 = [2] + + # pd_op.slice: (xi64) <- (3xi64, 1xi64, 1xi64) + slice_1 = paddle._C_ops.slice( + shape64_0, [0], full_int_array_1, full_int_array_2, [1], [0] + ) + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_3 = [3] + + # pd_op.slice: (xi64) <- (3xi64, 1xi64, 1xi64) + slice_2 = paddle._C_ops.slice( + shape64_0, [0], full_int_array_2, full_int_array_3, [1], [0] + ) + del full_int_array_2, full_int_array_3, shape64_0 + + # pd_op.full: (xi64) <- () + full_0 = paddle._C_ops.full( + [], float("-1"), paddle.int64, paddle.core.CPUPlace() + ) + + # pd_op.full: (xi64) <- () + full_1 = paddle._C_ops.full( + [], float("4"), paddle.int64, paddle.core.CPUPlace() + ) + + # pd_op.full: (xi64) <- () + full_2 = paddle._C_ops.full( + [], float("22"), paddle.int64, paddle.core.CPUPlace() + ) + + # builtin.combine: ([xi64, xi64, xi64, xi64]) <- (xi64, xi64, xi64, xi64) + combine_0 = [full_0, slice_1, full_1, full_2] + del full_0, full_1, full_2, slice_1 + + # pd_op.stack: (4xi64) <- ([xi64, xi64, xi64, xi64]) + stack_0 = paddle._C_ops.stack(combine_0, 0) + del combine_0 + + # pd_op.reshape: (-1x-1x4x22xf32) <- (1x-1x88xf32, 4xi64) + reshape_0 = paddle._C_ops.reshape(data_1, stack_0) + del data_1, stack_0 + + # pd_op.softmax: (-1x-1x4x22xf32) <- (-1x-1x4x22xf32) + softmax_0 = paddle._C_ops.softmax(reshape_0, -1) + del reshape_0 + + # pd_op.transpose: (-1x22x-1x4xf32) <- (-1x-1x4x22xf32) + transpose_0 = paddle._C_ops.transpose(softmax_0, [0, 3, 1, 2]) + + # pd_op.conv2d: (-1x1x-1x4xf32) <- (-1x22x-1x4xf32, 1x22x1x1xf32) + conv2d_0 = paddle._C_ops.conv2d( + transpose_0, parameter_0, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_0 + + # pd_op.squeeze: (-1x-1x4xf32) <- (-1x1x-1x4xf32, 1xi64) + squeeze_0 = paddle._C_ops.squeeze(conv2d_0, full_int_array_1) + del full_int_array_1 + + # pd_op.full: (1xi32) <- () + full_3 = paddle._C_ops.full( + [1], float("2"), paddle.int32, paddle.core.CPUPlace() + ) + + # pd_op.split_with_num: ([-1x-1x2xf32, -1x-1x2xf32]) <- (-1x-1x4xf32, 1xi32) + split_with_num_0 = paddle._C_ops.split_with_num(squeeze_0, 2, full_3) + del squeeze_0 + + # builtin.split: (-1x-1x2xf32, -1x-1x2xf32) <- ([-1x-1x2xf32, -1x-1x2xf32]) + ( + split_0, + split_1, + ) = split_with_num_0 + del split_with_num_0 + + # pd_op.full: (1xf32) <- () + full_4 = paddle._C_ops.full( + [1], float("-1"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.scale: (-1x-1x2xf32) <- (-1x-1x2xf32, 1xf32) + scale_0 = paddle._C_ops.scale(split_0, full_4, float("0"), True) + del split_0 + + # pd_op.add: (-1x-1x2xf32) <- (-1x-1x2xf32, -1x2xf32) + add_0 = paddle._C_ops.add(scale_0, divide_0) + + # pd_op.add: (-1x-1x2xf32) <- (-1x-1x2xf32, -1x2xf32) + add_1 = paddle._C_ops.add(split_1, divide_0) + + # pd_op.full: (1xi32) <- () + full_5 = paddle._C_ops.full( + [1], float("-1"), paddle.int32, paddle.core.CPUPlace() + ) + + # builtin.combine: ([-1x-1x2xf32, -1x-1x2xf32]) <- (-1x-1x2xf32, -1x-1x2xf32) + combine_1 = [add_0, add_1] + + # pd_op.concat: (-1x-1x4xf32) <- ([-1x-1x2xf32, -1x-1x2xf32], 1xi32) + concat_0 = paddle._C_ops.concat(combine_1, full_5) + del combine_1 + + # pd_op.share_data_: (1x-1x10xf32) <- (1x-1x10xf32) + share_data__0 = data_0.detach() + del data_0 + + # pd_op.share_data_: (-1x-1x4xf32) <- (-1x-1x4xf32) + share_data__1 = concat_0.detach() + + # pd_op.multiply: (-1x-1x4xf32) <- (-1x-1x4xf32, -1x1xf32) + multiply_0 = paddle._C_ops.multiply(share_data__1, data_3) + del ( + add_0, + add_1, + assign_0, + concat_0, + conv2d_0, + data_3, + divide_0, + full_3, + full_4, + full_5, + scale_0, + share_data__1, + softmax_0, + split_1, + transpose_0, + ) + + return share_data__0, multiply_0 diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_10/weight_meta.py b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_10/weight_meta.py new file mode 100644 index 000000000..a3837d8b1 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_10/weight_meta.py @@ -0,0 +1,7 @@ +class Program_weight_tensor_parameter_0: + name = "parameter_0" + shape = [1, 22, 1, 1] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_11/graph_hash.txt b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_11/graph_hash.txt new file mode 100644 index 000000000..e6d623fb0 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_11/graph_hash.txt @@ -0,0 +1 @@ +bf1f631da16bb2dc01a60c71c55618c40ceba9451302c9c325fd97977839b501 \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_11/graph_net.json b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_11/graph_net.json new file mode 100644 index 000000000..381598f86 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_11/graph_net.json @@ -0,0 +1,6 @@ +{ + "framework": "paddle", + "model_name": "PP-YOLOE_plus_SOD-largesize-L", + "num_devices_required": 1, + "num_nodes_required": 1 +} \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_11/input_meta.py b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_11/input_meta.py new file mode 100644 index 000000000..66d018686 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_11/input_meta.py @@ -0,0 +1,222 @@ +class Program_weight_tensor_data_0: + name = "data_0" + shape = [1] + dtype = "float32" + data = [0.724553] + + +class Program_weight_tensor_data_1: + name = "data_1" + shape = [1] + dtype = "float32" + data = [0.710696] + + +class Program_weight_tensor_data_2: + name = "data_2" + shape = [1] + dtype = "float32" + data = [0.69274] + + +class Program_weight_tensor_data_3: + name = "data_3" + shape = [1] + dtype = "float32" + data = [0.697763] + + +class Program_weight_tensor_data_4: + name = "data_4" + shape = [1] + dtype = "float32" + data = [0.67767] + + +class Program_weight_tensor_data_5: + name = "data_5" + shape = [1] + dtype = "float32" + data = [0.628229] + + +class Program_weight_tensor_data_6: + name = "data_6" + shape = [1] + dtype = "float32" + data = [0.643942] + + +class Program_weight_tensor_data_7: + name = "data_7" + shape = [1] + dtype = "float32" + data = [0.633569] + + +class Program_weight_tensor_data_8: + name = "data_8" + shape = [1] + dtype = "float32" + data = [0.801205] + + +class Program_weight_tensor_data_9: + name = "data_9" + shape = [1] + dtype = "float32" + data = [0.652613] + + +class Program_weight_tensor_data_10: + name = "data_10" + shape = [1] + dtype = "float32" + data = [0.636874] + + +class Program_weight_tensor_data_11: + name = "data_11" + shape = [1] + dtype = "float32" + data = [0.631148] + + +class Program_weight_tensor_data_12: + name = "data_12" + shape = [1] + dtype = "float32" + data = [0.635341] + + +class Program_weight_tensor_data_13: + name = "data_13" + shape = [1] + dtype = "float32" + data = [0.640054] + + +class Program_weight_tensor_data_14: + name = "data_14" + shape = [1] + dtype = "float32" + data = [0.755822] + + +class Program_weight_tensor_data_15: + name = "data_15" + shape = [1] + dtype = "float32" + data = [0.575326] + + +class Program_weight_tensor_data_16: + name = "data_16" + shape = [1] + dtype = "float32" + data = [0.59257] + + +class Program_weight_tensor_data_17: + name = "data_17" + shape = [1] + dtype = "float32" + data = [0.72331] + + +class Program_weight_tensor_data_18: + name = "data_18" + shape = [1024, 3072] + dtype = "float32" + min_val = float("-0.033771") + max_val = float("0.0342897") + mean = float("-1.71997e-05") + std = float("0.0182992") + data = None + + +class Program_weight_tensor_data_19: + name = "data_19" + shape = [3072] + dtype = "float32" + min_val = float("-0.000858009") + max_val = float("0.000895398") + mean = float("1.43686e-06") + std = float("0.000180851") + data = None + + +class Program_weight_tensor_data_20: + name = "data_20" + shape = [1024, 3072] + dtype = "float32" + min_val = float("-0.0324395") + max_val = float("0.0323104") + mean = float("-1.57215e-05") + std = float("0.0182981") + data = None + + +class Program_weight_tensor_data_21: + name = "data_21" + shape = [3072] + dtype = "float32" + min_val = float("-0.000630237") + max_val = float("0.000514313") + mean = float("2.76087e-06") + std = float("0.000126903") + data = None + + +class Program_weight_tensor_data_22: + name = "data_22" + shape = [1024, 3072] + dtype = "float32" + min_val = float("-0.0321875") + max_val = float("0.0321786") + mean = float("-1.59553e-05") + std = float("0.0182975") + data = None + + +class Program_weight_tensor_data_23: + name = "data_23" + shape = [3072] + dtype = "float32" + min_val = float("-0.000429784") + max_val = float("0.00042718") + mean = float("1.59216e-06") + std = float("8.8453e-05") + data = None + + +class Program_weight_tensor_data_24: + name = "data_24" + shape = [1024, 3072] + dtype = "float32" + min_val = float("-0.0321313") + max_val = float("0.0321203") + mean = float("-1.62062e-05") + std = float("0.018297") + data = None + + +class Program_weight_tensor_data_25: + name = "data_25" + shape = [3072] + dtype = "float32" + min_val = float("-0.000397408") + max_val = float("0.000488986") + mean = float("1.04643e-06") + std = float("8.30004e-05") + data = None + + +class Program_weight_tensor_data_26: + name = "data_26" + shape = [1, 3, 1088, 1088] + dtype = "float32" + max_val = float("1.0") + mean = float("0.443477") + std = float("0.162527") + data = None diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_11/model.py b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_11/model.py new file mode 100644 index 000000000..55b437f95 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_11/model.py @@ -0,0 +1,8874 @@ +import paddle + + +class GraphModule(paddle.nn.Layer): + def __init__(self): + super().__init__() + + def forward( + self, + parameter_0, + parameter_1, + parameter_2, + parameter_3, + parameter_4, + parameter_5, + parameter_6, + parameter_7, + parameter_8, + parameter_9, + parameter_10, + parameter_11, + parameter_12, + parameter_13, + parameter_14, + parameter_15, + parameter_16, + parameter_17, + parameter_18, + parameter_19, + parameter_20, + parameter_21, + parameter_22, + parameter_23, + parameter_24, + parameter_25, + parameter_26, + parameter_27, + parameter_28, + parameter_29, + parameter_30, + parameter_31, + parameter_32, + parameter_33, + parameter_34, + parameter_35, + parameter_36, + parameter_37, + parameter_38, + parameter_39, + parameter_40, + parameter_41, + parameter_42, + parameter_43, + parameter_44, + parameter_45, + parameter_46, + parameter_47, + parameter_48, + parameter_49, + parameter_50, + parameter_51, + parameter_52, + parameter_53, + parameter_54, + parameter_55, + parameter_56, + parameter_57, + parameter_58, + parameter_59, + parameter_60, + parameter_61, + parameter_62, + parameter_63, + parameter_64, + parameter_65, + parameter_66, + parameter_67, + parameter_68, + parameter_69, + parameter_70, + parameter_71, + parameter_72, + parameter_73, + parameter_74, + parameter_75, + parameter_76, + parameter_77, + parameter_78, + parameter_79, + parameter_80, + parameter_81, + parameter_82, + parameter_83, + parameter_84, + parameter_85, + parameter_86, + parameter_87, + parameter_88, + parameter_89, + parameter_90, + parameter_91, + parameter_92, + parameter_93, + parameter_94, + parameter_95, + parameter_96, + parameter_97, + parameter_98, + parameter_99, + parameter_100, + parameter_101, + parameter_102, + parameter_103, + parameter_104, + parameter_105, + parameter_106, + parameter_107, + parameter_108, + parameter_109, + parameter_110, + parameter_111, + parameter_112, + parameter_113, + parameter_114, + parameter_115, + parameter_116, + parameter_117, + parameter_118, + parameter_119, + parameter_120, + parameter_121, + parameter_122, + parameter_123, + parameter_124, + parameter_125, + parameter_126, + parameter_127, + parameter_128, + parameter_129, + parameter_130, + parameter_131, + parameter_132, + parameter_133, + parameter_134, + parameter_135, + parameter_136, + parameter_137, + parameter_138, + parameter_139, + parameter_140, + parameter_141, + parameter_142, + parameter_143, + parameter_144, + parameter_145, + parameter_146, + parameter_147, + parameter_148, + parameter_149, + parameter_150, + parameter_151, + parameter_152, + parameter_153, + parameter_154, + parameter_155, + parameter_156, + parameter_157, + parameter_158, + parameter_159, + parameter_160, + parameter_161, + parameter_162, + parameter_163, + parameter_164, + parameter_165, + parameter_166, + parameter_167, + parameter_168, + parameter_169, + parameter_170, + parameter_171, + parameter_172, + parameter_173, + parameter_174, + parameter_175, + parameter_176, + parameter_177, + parameter_178, + parameter_179, + parameter_180, + parameter_181, + parameter_182, + parameter_183, + parameter_184, + parameter_185, + parameter_186, + parameter_187, + parameter_188, + parameter_189, + parameter_190, + parameter_191, + parameter_192, + parameter_193, + parameter_194, + parameter_195, + parameter_196, + parameter_197, + parameter_198, + parameter_199, + parameter_200, + parameter_201, + parameter_202, + parameter_203, + parameter_204, + parameter_205, + parameter_206, + parameter_207, + parameter_208, + parameter_209, + parameter_210, + parameter_211, + parameter_212, + parameter_213, + parameter_214, + parameter_215, + parameter_216, + parameter_217, + parameter_218, + parameter_219, + parameter_220, + parameter_221, + parameter_222, + parameter_223, + parameter_224, + parameter_225, + parameter_226, + parameter_227, + parameter_228, + parameter_229, + parameter_230, + parameter_231, + parameter_232, + parameter_233, + parameter_234, + parameter_235, + parameter_236, + parameter_237, + parameter_238, + parameter_239, + parameter_240, + parameter_241, + parameter_242, + parameter_243, + parameter_244, + parameter_245, + parameter_246, + parameter_247, + parameter_248, + parameter_249, + parameter_250, + parameter_251, + parameter_252, + parameter_253, + parameter_254, + parameter_255, + parameter_256, + parameter_257, + parameter_258, + parameter_259, + parameter_260, + parameter_261, + parameter_262, + parameter_263, + parameter_264, + parameter_265, + parameter_266, + parameter_267, + parameter_268, + parameter_269, + parameter_270, + parameter_271, + parameter_272, + parameter_273, + parameter_274, + parameter_275, + parameter_276, + parameter_277, + parameter_278, + parameter_279, + parameter_280, + parameter_281, + parameter_282, + parameter_283, + parameter_284, + parameter_285, + parameter_286, + parameter_287, + parameter_288, + parameter_289, + parameter_290, + parameter_291, + parameter_292, + parameter_293, + parameter_294, + parameter_295, + parameter_296, + parameter_297, + parameter_298, + parameter_299, + parameter_300, + parameter_301, + parameter_302, + parameter_303, + parameter_304, + parameter_305, + parameter_306, + parameter_307, + parameter_308, + parameter_309, + parameter_310, + parameter_311, + parameter_312, + parameter_313, + parameter_314, + parameter_315, + parameter_316, + parameter_317, + parameter_318, + parameter_319, + parameter_320, + parameter_321, + parameter_322, + parameter_323, + parameter_324, + parameter_325, + parameter_326, + parameter_327, + parameter_328, + parameter_329, + parameter_330, + parameter_331, + parameter_332, + parameter_333, + parameter_334, + parameter_335, + parameter_336, + parameter_337, + parameter_338, + parameter_339, + parameter_340, + parameter_341, + parameter_342, + parameter_343, + parameter_344, + parameter_345, + parameter_346, + parameter_347, + parameter_348, + parameter_349, + parameter_350, + parameter_351, + parameter_352, + parameter_353, + parameter_354, + parameter_355, + parameter_356, + parameter_357, + parameter_358, + parameter_359, + parameter_360, + parameter_361, + parameter_362, + parameter_363, + parameter_364, + parameter_365, + parameter_366, + parameter_367, + parameter_368, + parameter_369, + parameter_370, + parameter_371, + parameter_372, + parameter_373, + parameter_374, + parameter_375, + parameter_376, + parameter_377, + parameter_378, + parameter_379, + parameter_380, + parameter_381, + parameter_382, + parameter_383, + parameter_384, + parameter_385, + parameter_386, + parameter_387, + parameter_388, + parameter_389, + parameter_390, + parameter_391, + parameter_392, + parameter_393, + parameter_394, + parameter_395, + parameter_396, + parameter_397, + parameter_398, + parameter_399, + parameter_400, + parameter_401, + parameter_402, + parameter_403, + parameter_404, + parameter_405, + parameter_406, + parameter_407, + parameter_408, + parameter_409, + parameter_410, + parameter_411, + parameter_412, + parameter_413, + parameter_414, + parameter_415, + parameter_416, + parameter_417, + parameter_418, + parameter_419, + parameter_420, + parameter_421, + parameter_422, + parameter_423, + parameter_424, + parameter_425, + parameter_426, + parameter_427, + parameter_428, + parameter_429, + parameter_430, + parameter_431, + parameter_432, + parameter_433, + parameter_434, + parameter_435, + parameter_436, + parameter_437, + parameter_438, + parameter_439, + parameter_440, + parameter_441, + parameter_442, + parameter_443, + parameter_444, + parameter_445, + parameter_446, + parameter_447, + parameter_448, + parameter_449, + parameter_450, + parameter_451, + parameter_452, + parameter_453, + parameter_454, + parameter_455, + parameter_456, + parameter_457, + parameter_458, + parameter_459, + parameter_460, + parameter_461, + parameter_462, + parameter_463, + parameter_464, + parameter_465, + parameter_466, + parameter_467, + parameter_468, + parameter_469, + parameter_470, + parameter_471, + parameter_472, + parameter_473, + parameter_474, + parameter_475, + parameter_476, + parameter_477, + parameter_478, + parameter_479, + parameter_480, + parameter_481, + parameter_482, + parameter_483, + parameter_484, + parameter_485, + parameter_486, + parameter_487, + parameter_488, + parameter_489, + parameter_490, + parameter_491, + parameter_492, + parameter_493, + parameter_494, + parameter_495, + parameter_496, + parameter_497, + parameter_498, + parameter_499, + parameter_500, + parameter_501, + parameter_502, + parameter_503, + parameter_504, + parameter_505, + parameter_506, + parameter_507, + parameter_508, + parameter_509, + parameter_510, + parameter_511, + parameter_512, + parameter_513, + parameter_514, + parameter_515, + parameter_516, + parameter_517, + parameter_518, + parameter_519, + parameter_520, + parameter_521, + parameter_522, + parameter_523, + parameter_524, + parameter_525, + parameter_526, + parameter_527, + parameter_528, + parameter_529, + parameter_530, + parameter_531, + parameter_532, + parameter_533, + parameter_534, + parameter_535, + parameter_536, + parameter_537, + parameter_538, + parameter_539, + parameter_540, + parameter_541, + parameter_542, + parameter_543, + parameter_544, + parameter_545, + parameter_546, + parameter_547, + parameter_548, + parameter_549, + parameter_550, + parameter_551, + parameter_552, + parameter_553, + parameter_554, + parameter_555, + parameter_556, + parameter_557, + parameter_558, + parameter_559, + parameter_560, + parameter_561, + parameter_562, + parameter_563, + parameter_564, + parameter_565, + parameter_566, + parameter_567, + parameter_568, + parameter_569, + parameter_570, + parameter_571, + parameter_572, + parameter_573, + parameter_574, + parameter_575, + parameter_576, + parameter_577, + parameter_578, + parameter_579, + parameter_580, + parameter_581, + parameter_582, + parameter_583, + parameter_584, + parameter_585, + parameter_586, + parameter_587, + parameter_588, + parameter_589, + parameter_590, + parameter_591, + parameter_592, + parameter_593, + parameter_594, + parameter_595, + parameter_596, + parameter_597, + parameter_598, + parameter_599, + parameter_600, + parameter_601, + parameter_602, + parameter_603, + parameter_604, + parameter_605, + parameter_606, + parameter_607, + parameter_608, + parameter_609, + parameter_610, + parameter_611, + parameter_612, + parameter_613, + parameter_614, + parameter_615, + parameter_616, + parameter_617, + parameter_618, + parameter_619, + parameter_620, + parameter_621, + parameter_622, + parameter_623, + parameter_624, + parameter_625, + parameter_626, + parameter_627, + parameter_628, + parameter_629, + parameter_630, + parameter_631, + parameter_632, + parameter_633, + parameter_634, + parameter_635, + parameter_636, + parameter_637, + parameter_638, + parameter_639, + parameter_640, + parameter_641, + parameter_642, + parameter_643, + parameter_644, + parameter_645, + parameter_646, + parameter_647, + parameter_648, + parameter_649, + parameter_650, + parameter_651, + parameter_652, + parameter_653, + parameter_654, + parameter_655, + parameter_656, + parameter_657, + parameter_658, + parameter_659, + parameter_660, + parameter_661, + parameter_662, + parameter_663, + parameter_664, + parameter_665, + parameter_666, + parameter_667, + parameter_668, + parameter_669, + parameter_670, + parameter_671, + parameter_672, + parameter_673, + parameter_674, + parameter_675, + parameter_676, + parameter_677, + parameter_678, + parameter_679, + parameter_680, + parameter_681, + parameter_682, + parameter_683, + parameter_684, + parameter_685, + parameter_686, + parameter_687, + parameter_688, + parameter_689, + parameter_690, + parameter_691, + parameter_692, + parameter_693, + parameter_694, + parameter_695, + parameter_696, + parameter_697, + parameter_698, + parameter_699, + parameter_700, + parameter_701, + parameter_702, + parameter_703, + parameter_704, + parameter_705, + parameter_706, + parameter_707, + parameter_708, + parameter_709, + parameter_710, + parameter_711, + parameter_712, + parameter_713, + parameter_714, + parameter_715, + parameter_716, + parameter_717, + parameter_718, + parameter_719, + parameter_720, + parameter_721, + parameter_722, + parameter_723, + parameter_724, + parameter_725, + parameter_726, + parameter_727, + parameter_728, + parameter_729, + parameter_730, + parameter_731, + parameter_732, + parameter_733, + parameter_734, + parameter_735, + parameter_736, + parameter_737, + data_0, + data_1, + data_2, + data_3, + data_4, + data_5, + data_6, + data_7, + data_8, + data_9, + data_10, + data_11, + data_12, + data_13, + data_14, + data_15, + data_16, + data_17, + data_18, + data_19, + data_20, + data_21, + data_22, + data_23, + data_24, + data_25, + data_26, + ): + # pd_op.conv2d: (1x32x544x544xf32) <- (1x3x1088x1088xf32, 32x3x3x3xf32) + conv2d_0 = paddle._C_ops.conv2d( + data_26, parameter_737, [2, 2], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del data_26, parameter_737 + + # pd_op.batch_norm_: (1x32x544x544xf32, 32xf32, 32xf32, 32xf32, 32xf32, -1xui8) <- (1x32x544x544xf32, 32xf32, 32xf32, 32xf32, 32xf32) + ( + batch_norm__0, + batch_norm__1, + batch_norm__2, + batch_norm__3, + batch_norm__4, + batch_norm__5, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_0, + parameter_736, + parameter_735, + parameter_734, + parameter_733, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_733, parameter_734, parameter_735, parameter_736 + + # pd_op.swish: (1x32x544x544xf32) <- (1x32x544x544xf32) + swish_1 = paddle._C_ops.swish(batch_norm__0) + + # pd_op.conv2d: (1x32x544x544xf32) <- (1x32x544x544xf32, 32x32x3x3xf32) + conv2d_1 = paddle._C_ops.conv2d( + swish_1, parameter_732, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_732 + + # pd_op.batch_norm_: (1x32x544x544xf32, 32xf32, 32xf32, 32xf32, 32xf32, -1xui8) <- (1x32x544x544xf32, 32xf32, 32xf32, 32xf32, 32xf32) + ( + batch_norm__6, + batch_norm__7, + batch_norm__8, + batch_norm__9, + batch_norm__10, + batch_norm__11, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_1, + parameter_731, + parameter_730, + parameter_729, + parameter_728, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_728, parameter_729, parameter_730, parameter_731 + + # pd_op.swish: (1x32x544x544xf32) <- (1x32x544x544xf32) + swish_2 = paddle._C_ops.swish(batch_norm__6) + + # pd_op.conv2d: (1x64x544x544xf32) <- (1x32x544x544xf32, 64x32x3x3xf32) + conv2d_2 = paddle._C_ops.conv2d( + swish_2, parameter_727, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_727 + + # pd_op.batch_norm_: (1x64x544x544xf32, 64xf32, 64xf32, 64xf32, 64xf32, -1xui8) <- (1x64x544x544xf32, 64xf32, 64xf32, 64xf32, 64xf32) + ( + batch_norm__12, + batch_norm__13, + batch_norm__14, + batch_norm__15, + batch_norm__16, + batch_norm__17, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_2, + parameter_726, + parameter_725, + parameter_724, + parameter_723, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_723, parameter_724, parameter_725, parameter_726 + + # pd_op.swish: (1x64x544x544xf32) <- (1x64x544x544xf32) + swish_3 = paddle._C_ops.swish(batch_norm__12) + + # pd_op.conv2d: (1x96x272x272xf32) <- (1x64x544x544xf32, 96x64x3x3xf32) + conv2d_3 = paddle._C_ops.conv2d( + swish_3, parameter_722, [2, 2], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_722 + + # pd_op.batch_norm_: (1x96x272x272xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (1x96x272x272xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__18, + batch_norm__19, + batch_norm__20, + batch_norm__21, + batch_norm__22, + batch_norm__23, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_3, + parameter_721, + parameter_720, + parameter_719, + parameter_718, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_718, parameter_719, parameter_720, parameter_721 + + # pd_op.swish: (1x96x272x272xf32) <- (1x96x272x272xf32) + swish_4 = paddle._C_ops.swish(batch_norm__18) + + # pd_op.conv2d: (1x48x272x272xf32) <- (1x96x272x272xf32, 48x96x1x1xf32) + conv2d_4 = paddle._C_ops.conv2d( + swish_4, parameter_717, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_717 + + # pd_op.batch_norm_: (1x48x272x272xf32, 48xf32, 48xf32, 48xf32, 48xf32, -1xui8) <- (1x48x272x272xf32, 48xf32, 48xf32, 48xf32, 48xf32) + ( + batch_norm__24, + batch_norm__25, + batch_norm__26, + batch_norm__27, + batch_norm__28, + batch_norm__29, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_4, + parameter_716, + parameter_715, + parameter_714, + parameter_713, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_713, parameter_714, parameter_715, parameter_716 + + # pd_op.swish: (1x48x272x272xf32) <- (1x48x272x272xf32) + swish_5 = paddle._C_ops.swish(batch_norm__24) + + # pd_op.conv2d: (1x48x272x272xf32) <- (1x96x272x272xf32, 48x96x1x1xf32) + conv2d_5 = paddle._C_ops.conv2d( + swish_4, parameter_712, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_712 + + # pd_op.batch_norm_: (1x48x272x272xf32, 48xf32, 48xf32, 48xf32, 48xf32, -1xui8) <- (1x48x272x272xf32, 48xf32, 48xf32, 48xf32, 48xf32) + ( + batch_norm__30, + batch_norm__31, + batch_norm__32, + batch_norm__33, + batch_norm__34, + batch_norm__35, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_5, + parameter_711, + parameter_710, + parameter_709, + parameter_708, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_708, parameter_709, parameter_710, parameter_711 + + # pd_op.swish: (1x48x272x272xf32) <- (1x48x272x272xf32) + swish_6 = paddle._C_ops.swish(batch_norm__30) + + # pd_op.conv2d: (1x48x272x272xf32) <- (1x48x272x272xf32, 48x48x3x3xf32) + conv2d_6 = paddle._C_ops.conv2d( + swish_6, parameter_707, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_707 + + # pd_op.batch_norm_: (1x48x272x272xf32, 48xf32, 48xf32, 48xf32, 48xf32, -1xui8) <- (1x48x272x272xf32, 48xf32, 48xf32, 48xf32, 48xf32) + ( + batch_norm__36, + batch_norm__37, + batch_norm__38, + batch_norm__39, + batch_norm__40, + batch_norm__41, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_6, + parameter_706, + parameter_705, + parameter_704, + parameter_703, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_703, parameter_704, parameter_705, parameter_706 + + # pd_op.swish: (1x48x272x272xf32) <- (1x48x272x272xf32) + swish_7 = paddle._C_ops.swish(batch_norm__36) + + # pd_op.conv2d: (1x48x272x272xf32) <- (1x48x272x272xf32, 48x48x3x3xf32) + conv2d_7 = paddle._C_ops.conv2d( + swish_7, parameter_702, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_702 + + # pd_op.batch_norm_: (1x48x272x272xf32, 48xf32, 48xf32, 48xf32, 48xf32, -1xui8) <- (1x48x272x272xf32, 48xf32, 48xf32, 48xf32, 48xf32) + ( + batch_norm__42, + batch_norm__43, + batch_norm__44, + batch_norm__45, + batch_norm__46, + batch_norm__47, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_7, + parameter_701, + parameter_700, + parameter_699, + parameter_698, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_698, parameter_699, parameter_700, parameter_701 + + # pd_op.conv2d: (1x48x272x272xf32) <- (1x48x272x272xf32, 48x48x1x1xf32) + conv2d_8 = paddle._C_ops.conv2d( + swish_7, parameter_697, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_697 + + # pd_op.batch_norm_: (1x48x272x272xf32, 48xf32, 48xf32, 48xf32, 48xf32, -1xui8) <- (1x48x272x272xf32, 48xf32, 48xf32, 48xf32, 48xf32) + ( + batch_norm__48, + batch_norm__49, + batch_norm__50, + batch_norm__51, + batch_norm__52, + batch_norm__53, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_8, + parameter_696, + parameter_695, + parameter_694, + parameter_693, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_693, parameter_694, parameter_695, parameter_696 + + # pd_op.multiply: (1x48x272x272xf32) <- (1xf32, 1x48x272x272xf32) + multiply_0 = paddle._C_ops.multiply(data_0, batch_norm__48) + del data_0 + + # pd_op.add: (1x48x272x272xf32) <- (1x48x272x272xf32, 1x48x272x272xf32) + add_0 = paddle._C_ops.add(batch_norm__42, multiply_0) + + # pd_op.swish: (1x48x272x272xf32) <- (1x48x272x272xf32) + swish_8 = paddle._C_ops.swish(add_0) + + # pd_op.add: (1x48x272x272xf32) <- (1x48x272x272xf32, 1x48x272x272xf32) + add_1 = paddle._C_ops.add(swish_6, swish_8) + + # pd_op.conv2d: (1x48x272x272xf32) <- (1x48x272x272xf32, 48x48x3x3xf32) + conv2d_9 = paddle._C_ops.conv2d( + add_1, parameter_692, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_692 + + # pd_op.batch_norm_: (1x48x272x272xf32, 48xf32, 48xf32, 48xf32, 48xf32, -1xui8) <- (1x48x272x272xf32, 48xf32, 48xf32, 48xf32, 48xf32) + ( + batch_norm__54, + batch_norm__55, + batch_norm__56, + batch_norm__57, + batch_norm__58, + batch_norm__59, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_9, + parameter_691, + parameter_690, + parameter_689, + parameter_688, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_688, parameter_689, parameter_690, parameter_691 + + # pd_op.swish: (1x48x272x272xf32) <- (1x48x272x272xf32) + swish_9 = paddle._C_ops.swish(batch_norm__54) + + # pd_op.conv2d: (1x48x272x272xf32) <- (1x48x272x272xf32, 48x48x3x3xf32) + conv2d_10 = paddle._C_ops.conv2d( + swish_9, parameter_687, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_687 + + # pd_op.batch_norm_: (1x48x272x272xf32, 48xf32, 48xf32, 48xf32, 48xf32, -1xui8) <- (1x48x272x272xf32, 48xf32, 48xf32, 48xf32, 48xf32) + ( + batch_norm__60, + batch_norm__61, + batch_norm__62, + batch_norm__63, + batch_norm__64, + batch_norm__65, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_10, + parameter_686, + parameter_685, + parameter_684, + parameter_683, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_683, parameter_684, parameter_685, parameter_686 + + # pd_op.conv2d: (1x48x272x272xf32) <- (1x48x272x272xf32, 48x48x1x1xf32) + conv2d_11 = paddle._C_ops.conv2d( + swish_9, parameter_682, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_682 + + # pd_op.batch_norm_: (1x48x272x272xf32, 48xf32, 48xf32, 48xf32, 48xf32, -1xui8) <- (1x48x272x272xf32, 48xf32, 48xf32, 48xf32, 48xf32) + ( + batch_norm__66, + batch_norm__67, + batch_norm__68, + batch_norm__69, + batch_norm__70, + batch_norm__71, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_11, + parameter_681, + parameter_680, + parameter_679, + parameter_678, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_678, parameter_679, parameter_680, parameter_681 + + # pd_op.multiply: (1x48x272x272xf32) <- (1xf32, 1x48x272x272xf32) + multiply_1 = paddle._C_ops.multiply(data_1, batch_norm__66) + del data_1 + + # pd_op.add: (1x48x272x272xf32) <- (1x48x272x272xf32, 1x48x272x272xf32) + add_2 = paddle._C_ops.add(batch_norm__60, multiply_1) + + # pd_op.swish: (1x48x272x272xf32) <- (1x48x272x272xf32) + swish_10 = paddle._C_ops.swish(add_2) + + # pd_op.add: (1x48x272x272xf32) <- (1x48x272x272xf32, 1x48x272x272xf32) + add_3 = paddle._C_ops.add(add_1, swish_10) + + # pd_op.conv2d: (1x48x272x272xf32) <- (1x48x272x272xf32, 48x48x3x3xf32) + conv2d_12 = paddle._C_ops.conv2d( + add_3, parameter_677, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_677 + + # pd_op.batch_norm_: (1x48x272x272xf32, 48xf32, 48xf32, 48xf32, 48xf32, -1xui8) <- (1x48x272x272xf32, 48xf32, 48xf32, 48xf32, 48xf32) + ( + batch_norm__72, + batch_norm__73, + batch_norm__74, + batch_norm__75, + batch_norm__76, + batch_norm__77, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_12, + parameter_676, + parameter_675, + parameter_674, + parameter_673, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_673, parameter_674, parameter_675, parameter_676 + + # pd_op.swish: (1x48x272x272xf32) <- (1x48x272x272xf32) + swish_11 = paddle._C_ops.swish(batch_norm__72) + + # pd_op.conv2d: (1x48x272x272xf32) <- (1x48x272x272xf32, 48x48x3x3xf32) + conv2d_13 = paddle._C_ops.conv2d( + swish_11, parameter_672, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_672 + + # pd_op.batch_norm_: (1x48x272x272xf32, 48xf32, 48xf32, 48xf32, 48xf32, -1xui8) <- (1x48x272x272xf32, 48xf32, 48xf32, 48xf32, 48xf32) + ( + batch_norm__78, + batch_norm__79, + batch_norm__80, + batch_norm__81, + batch_norm__82, + batch_norm__83, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_13, + parameter_671, + parameter_670, + parameter_669, + parameter_668, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_668, parameter_669, parameter_670, parameter_671 + + # pd_op.conv2d: (1x48x272x272xf32) <- (1x48x272x272xf32, 48x48x1x1xf32) + conv2d_14 = paddle._C_ops.conv2d( + swish_11, parameter_667, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_667 + + # pd_op.batch_norm_: (1x48x272x272xf32, 48xf32, 48xf32, 48xf32, 48xf32, -1xui8) <- (1x48x272x272xf32, 48xf32, 48xf32, 48xf32, 48xf32) + ( + batch_norm__84, + batch_norm__85, + batch_norm__86, + batch_norm__87, + batch_norm__88, + batch_norm__89, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_14, + parameter_666, + parameter_665, + parameter_664, + parameter_663, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_663, parameter_664, parameter_665, parameter_666 + + # pd_op.multiply: (1x48x272x272xf32) <- (1xf32, 1x48x272x272xf32) + multiply_2 = paddle._C_ops.multiply(data_2, batch_norm__84) + del data_2 + + # pd_op.add: (1x48x272x272xf32) <- (1x48x272x272xf32, 1x48x272x272xf32) + add_4 = paddle._C_ops.add(batch_norm__78, multiply_2) + + # pd_op.swish: (1x48x272x272xf32) <- (1x48x272x272xf32) + swish_12 = paddle._C_ops.swish(add_4) + + # pd_op.add: (1x48x272x272xf32) <- (1x48x272x272xf32, 1x48x272x272xf32) + add_5 = paddle._C_ops.add(add_3, swish_12) + + # pd_op.full: (1xi32) <- () + full_0 = paddle._C_ops.full( + [1], float("1"), paddle.int32, paddle.core.CPUPlace() + ) + + # pd_op.assign: (1xi32) <- (1xi32) + assign_0 = full_0 + + # pd_op.assign: (1xi32) <- (1xi32) + assign_1 = full_0 + + # pd_op.assign: (1xi32) <- (1xi32) + assign_2 = full_0 + + # pd_op.assign: (1xi32) <- (1xi32) + assign_3 = full_0 + + # pd_op.assign: (1xi32) <- (1xi32) + assign_4 = full_0 + + # pd_op.assign: (1xi32) <- (1xi32) + assign_5 = full_0 + + # pd_op.assign: (1xi32) <- (1xi32) + assign_6 = full_0 + + # pd_op.assign: (1xi32) <- (1xi32) + assign_7 = full_0 + + # pd_op.assign: (1xi32) <- (1xi32) + assign_8 = full_0 + + # pd_op.assign: (1xi32) <- (1xi32) + assign_9 = full_0 + + # pd_op.assign: (1xi32) <- (1xi32) + assign_10 = full_0 + + # pd_op.assign: (1xi32) <- (1xi32) + assign_11 = full_0 + + # pd_op.assign: (1xi32) <- (1xi32) + assign_12 = full_0 + + # builtin.combine: ([1x48x272x272xf32, 1x48x272x272xf32]) <- (1x48x272x272xf32, 1x48x272x272xf32) + combine_0 = [swish_5, add_5] + + # pd_op.concat: (1x96x272x272xf32) <- ([1x48x272x272xf32, 1x48x272x272xf32], 1xi32) + concat_0 = paddle._C_ops.concat(combine_0, full_0) + del combine_0 + + # pd_op.full_int_array: (2xi64) <- () + full_int_array_0 = [2, 3] + + # pd_op.assign: (2xi64) <- (2xi64) + assign_13 = full_int_array_0 + + # pd_op.assign: (2xi64) <- (2xi64) + assign_14 = full_int_array_0 + + # pd_op.assign: (2xi64) <- (2xi64) + assign_15 = full_int_array_0 + + # pd_op.mean: (1x96x1x1xf32) <- (1x96x272x272xf32, 2xi64) + mean_0 = paddle._C_ops.mean(concat_0, full_int_array_0, True) + + # pd_op.conv2d: (1x96x1x1xf32) <- (1x96x1x1xf32, 96x96x1x1xf32) + conv2d_15 = paddle._C_ops.conv2d( + mean_0, parameter_662, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_662 + + # pd_op.full_int_array: (4xi64) <- () + full_int_array_1 = [1, -1, 1, 1] + + # pd_op.reshape: (1x96x1x1xf32) <- (96xf32, 4xi64) + reshape_0 = paddle._C_ops.reshape(parameter_661, full_int_array_1) + del parameter_661 + + # pd_op.add: (1x96x1x1xf32) <- (1x96x1x1xf32, 1x96x1x1xf32) + add_6 = paddle._C_ops.add(conv2d_15, reshape_0) + + # pd_op.hardsigmoid: (1x96x1x1xf32) <- (1x96x1x1xf32) + hardsigmoid_0 = paddle._C_ops.hardsigmoid( + add_6, float("0.166667"), float("0.5") + ) + del add_6 + + # pd_op.multiply: (1x96x272x272xf32) <- (1x96x272x272xf32, 1x96x1x1xf32) + multiply_3 = paddle._C_ops.multiply(concat_0, hardsigmoid_0) + + # pd_op.conv2d: (1x128x272x272xf32) <- (1x96x272x272xf32, 128x96x1x1xf32) + conv2d_16 = paddle._C_ops.conv2d( + multiply_3, parameter_660, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_660 + + # pd_op.batch_norm_: (1x128x272x272xf32, 128xf32, 128xf32, 128xf32, 128xf32, -1xui8) <- (1x128x272x272xf32, 128xf32, 128xf32, 128xf32, 128xf32) + ( + batch_norm__90, + batch_norm__91, + batch_norm__92, + batch_norm__93, + batch_norm__94, + batch_norm__95, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_16, + parameter_659, + parameter_658, + parameter_657, + parameter_656, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_656, parameter_657, parameter_658, parameter_659 + + # pd_op.swish: (1x128x272x272xf32) <- (1x128x272x272xf32) + swish_13 = paddle._C_ops.swish(batch_norm__90) + + # pd_op.conv2d: (1x192x136x136xf32) <- (1x128x272x272xf32, 192x128x3x3xf32) + conv2d_17 = paddle._C_ops.conv2d( + swish_13, parameter_655, [2, 2], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_655 + + # pd_op.batch_norm_: (1x192x136x136xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (1x192x136x136xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__96, + batch_norm__97, + batch_norm__98, + batch_norm__99, + batch_norm__100, + batch_norm__101, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_17, + parameter_654, + parameter_653, + parameter_652, + parameter_651, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_651, parameter_652, parameter_653, parameter_654 + + # pd_op.swish: (1x192x136x136xf32) <- (1x192x136x136xf32) + swish_14 = paddle._C_ops.swish(batch_norm__96) + + # pd_op.conv2d: (1x96x136x136xf32) <- (1x192x136x136xf32, 96x192x1x1xf32) + conv2d_18 = paddle._C_ops.conv2d( + swish_14, parameter_650, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_650 + + # pd_op.batch_norm_: (1x96x136x136xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (1x96x136x136xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__102, + batch_norm__103, + batch_norm__104, + batch_norm__105, + batch_norm__106, + batch_norm__107, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_18, + parameter_649, + parameter_648, + parameter_647, + parameter_646, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_646, parameter_647, parameter_648, parameter_649 + + # pd_op.swish: (1x96x136x136xf32) <- (1x96x136x136xf32) + swish_15 = paddle._C_ops.swish(batch_norm__102) + + # pd_op.conv2d: (1x96x136x136xf32) <- (1x192x136x136xf32, 96x192x1x1xf32) + conv2d_19 = paddle._C_ops.conv2d( + swish_14, parameter_645, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_645 + + # pd_op.batch_norm_: (1x96x136x136xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (1x96x136x136xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__108, + batch_norm__109, + batch_norm__110, + batch_norm__111, + batch_norm__112, + batch_norm__113, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_19, + parameter_644, + parameter_643, + parameter_642, + parameter_641, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_641, parameter_642, parameter_643, parameter_644 + + # pd_op.swish: (1x96x136x136xf32) <- (1x96x136x136xf32) + swish_16 = paddle._C_ops.swish(batch_norm__108) + + # pd_op.conv2d: (1x96x136x136xf32) <- (1x96x136x136xf32, 96x96x3x3xf32) + conv2d_20 = paddle._C_ops.conv2d( + swish_16, parameter_640, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_640 + + # pd_op.batch_norm_: (1x96x136x136xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (1x96x136x136xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__114, + batch_norm__115, + batch_norm__116, + batch_norm__117, + batch_norm__118, + batch_norm__119, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_20, + parameter_639, + parameter_638, + parameter_637, + parameter_636, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_636, parameter_637, parameter_638, parameter_639 + + # pd_op.swish: (1x96x136x136xf32) <- (1x96x136x136xf32) + swish_17 = paddle._C_ops.swish(batch_norm__114) + + # pd_op.conv2d: (1x96x136x136xf32) <- (1x96x136x136xf32, 96x96x3x3xf32) + conv2d_21 = paddle._C_ops.conv2d( + swish_17, parameter_635, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_635 + + # pd_op.batch_norm_: (1x96x136x136xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (1x96x136x136xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__120, + batch_norm__121, + batch_norm__122, + batch_norm__123, + batch_norm__124, + batch_norm__125, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_21, + parameter_634, + parameter_633, + parameter_632, + parameter_631, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_631, parameter_632, parameter_633, parameter_634 + + # pd_op.conv2d: (1x96x136x136xf32) <- (1x96x136x136xf32, 96x96x1x1xf32) + conv2d_22 = paddle._C_ops.conv2d( + swish_17, parameter_630, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_630 + + # pd_op.batch_norm_: (1x96x136x136xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (1x96x136x136xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__126, + batch_norm__127, + batch_norm__128, + batch_norm__129, + batch_norm__130, + batch_norm__131, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_22, + parameter_629, + parameter_628, + parameter_627, + parameter_626, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_626, parameter_627, parameter_628, parameter_629 + + # pd_op.multiply: (1x96x136x136xf32) <- (1xf32, 1x96x136x136xf32) + multiply_4 = paddle._C_ops.multiply(data_3, batch_norm__126) + del data_3 + + # pd_op.add: (1x96x136x136xf32) <- (1x96x136x136xf32, 1x96x136x136xf32) + add_7 = paddle._C_ops.add(batch_norm__120, multiply_4) + + # pd_op.swish: (1x96x136x136xf32) <- (1x96x136x136xf32) + swish_18 = paddle._C_ops.swish(add_7) + + # pd_op.add: (1x96x136x136xf32) <- (1x96x136x136xf32, 1x96x136x136xf32) + add_8 = paddle._C_ops.add(swish_16, swish_18) + + # pd_op.conv2d: (1x96x136x136xf32) <- (1x96x136x136xf32, 96x96x3x3xf32) + conv2d_23 = paddle._C_ops.conv2d( + add_8, parameter_625, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_625 + + # pd_op.batch_norm_: (1x96x136x136xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (1x96x136x136xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__132, + batch_norm__133, + batch_norm__134, + batch_norm__135, + batch_norm__136, + batch_norm__137, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_23, + parameter_624, + parameter_623, + parameter_622, + parameter_621, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_621, parameter_622, parameter_623, parameter_624 + + # pd_op.swish: (1x96x136x136xf32) <- (1x96x136x136xf32) + swish_19 = paddle._C_ops.swish(batch_norm__132) + + # pd_op.conv2d: (1x96x136x136xf32) <- (1x96x136x136xf32, 96x96x3x3xf32) + conv2d_24 = paddle._C_ops.conv2d( + swish_19, parameter_620, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_620 + + # pd_op.batch_norm_: (1x96x136x136xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (1x96x136x136xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__138, + batch_norm__139, + batch_norm__140, + batch_norm__141, + batch_norm__142, + batch_norm__143, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_24, + parameter_619, + parameter_618, + parameter_617, + parameter_616, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_616, parameter_617, parameter_618, parameter_619 + + # pd_op.conv2d: (1x96x136x136xf32) <- (1x96x136x136xf32, 96x96x1x1xf32) + conv2d_25 = paddle._C_ops.conv2d( + swish_19, parameter_615, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_615 + + # pd_op.batch_norm_: (1x96x136x136xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (1x96x136x136xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__144, + batch_norm__145, + batch_norm__146, + batch_norm__147, + batch_norm__148, + batch_norm__149, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_25, + parameter_614, + parameter_613, + parameter_612, + parameter_611, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_611, parameter_612, parameter_613, parameter_614 + + # pd_op.multiply: (1x96x136x136xf32) <- (1xf32, 1x96x136x136xf32) + multiply_5 = paddle._C_ops.multiply(data_4, batch_norm__144) + del data_4 + + # pd_op.add: (1x96x136x136xf32) <- (1x96x136x136xf32, 1x96x136x136xf32) + add_9 = paddle._C_ops.add(batch_norm__138, multiply_5) + + # pd_op.swish: (1x96x136x136xf32) <- (1x96x136x136xf32) + swish_20 = paddle._C_ops.swish(add_9) + + # pd_op.add: (1x96x136x136xf32) <- (1x96x136x136xf32, 1x96x136x136xf32) + add_10 = paddle._C_ops.add(add_8, swish_20) + + # pd_op.conv2d: (1x96x136x136xf32) <- (1x96x136x136xf32, 96x96x3x3xf32) + conv2d_26 = paddle._C_ops.conv2d( + add_10, parameter_610, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_610 + + # pd_op.batch_norm_: (1x96x136x136xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (1x96x136x136xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__150, + batch_norm__151, + batch_norm__152, + batch_norm__153, + batch_norm__154, + batch_norm__155, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_26, + parameter_609, + parameter_608, + parameter_607, + parameter_606, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_606, parameter_607, parameter_608, parameter_609 + + # pd_op.swish: (1x96x136x136xf32) <- (1x96x136x136xf32) + swish_21 = paddle._C_ops.swish(batch_norm__150) + + # pd_op.conv2d: (1x96x136x136xf32) <- (1x96x136x136xf32, 96x96x3x3xf32) + conv2d_27 = paddle._C_ops.conv2d( + swish_21, parameter_605, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_605 + + # pd_op.batch_norm_: (1x96x136x136xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (1x96x136x136xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__156, + batch_norm__157, + batch_norm__158, + batch_norm__159, + batch_norm__160, + batch_norm__161, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_27, + parameter_604, + parameter_603, + parameter_602, + parameter_601, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_601, parameter_602, parameter_603, parameter_604 + + # pd_op.conv2d: (1x96x136x136xf32) <- (1x96x136x136xf32, 96x96x1x1xf32) + conv2d_28 = paddle._C_ops.conv2d( + swish_21, parameter_600, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_600 + + # pd_op.batch_norm_: (1x96x136x136xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (1x96x136x136xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__162, + batch_norm__163, + batch_norm__164, + batch_norm__165, + batch_norm__166, + batch_norm__167, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_28, + parameter_599, + parameter_598, + parameter_597, + parameter_596, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_596, parameter_597, parameter_598, parameter_599 + + # pd_op.multiply: (1x96x136x136xf32) <- (1xf32, 1x96x136x136xf32) + multiply_6 = paddle._C_ops.multiply(data_5, batch_norm__162) + del data_5 + + # pd_op.add: (1x96x136x136xf32) <- (1x96x136x136xf32, 1x96x136x136xf32) + add_11 = paddle._C_ops.add(batch_norm__156, multiply_6) + + # pd_op.swish: (1x96x136x136xf32) <- (1x96x136x136xf32) + swish_22 = paddle._C_ops.swish(add_11) + + # pd_op.add: (1x96x136x136xf32) <- (1x96x136x136xf32, 1x96x136x136xf32) + add_12 = paddle._C_ops.add(add_10, swish_22) + + # pd_op.conv2d: (1x96x136x136xf32) <- (1x96x136x136xf32, 96x96x3x3xf32) + conv2d_29 = paddle._C_ops.conv2d( + add_12, parameter_595, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_595 + + # pd_op.batch_norm_: (1x96x136x136xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (1x96x136x136xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__168, + batch_norm__169, + batch_norm__170, + batch_norm__171, + batch_norm__172, + batch_norm__173, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_29, + parameter_594, + parameter_593, + parameter_592, + parameter_591, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_591, parameter_592, parameter_593, parameter_594 + + # pd_op.swish: (1x96x136x136xf32) <- (1x96x136x136xf32) + swish_23 = paddle._C_ops.swish(batch_norm__168) + + # pd_op.conv2d: (1x96x136x136xf32) <- (1x96x136x136xf32, 96x96x3x3xf32) + conv2d_30 = paddle._C_ops.conv2d( + swish_23, parameter_590, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_590 + + # pd_op.batch_norm_: (1x96x136x136xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (1x96x136x136xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__174, + batch_norm__175, + batch_norm__176, + batch_norm__177, + batch_norm__178, + batch_norm__179, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_30, + parameter_589, + parameter_588, + parameter_587, + parameter_586, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_586, parameter_587, parameter_588, parameter_589 + + # pd_op.conv2d: (1x96x136x136xf32) <- (1x96x136x136xf32, 96x96x1x1xf32) + conv2d_31 = paddle._C_ops.conv2d( + swish_23, parameter_585, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_585 + + # pd_op.batch_norm_: (1x96x136x136xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (1x96x136x136xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__180, + batch_norm__181, + batch_norm__182, + batch_norm__183, + batch_norm__184, + batch_norm__185, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_31, + parameter_584, + parameter_583, + parameter_582, + parameter_581, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_581, parameter_582, parameter_583, parameter_584 + + # pd_op.multiply: (1x96x136x136xf32) <- (1xf32, 1x96x136x136xf32) + multiply_7 = paddle._C_ops.multiply(data_6, batch_norm__180) + del data_6 + + # pd_op.add: (1x96x136x136xf32) <- (1x96x136x136xf32, 1x96x136x136xf32) + add_13 = paddle._C_ops.add(batch_norm__174, multiply_7) + + # pd_op.swish: (1x96x136x136xf32) <- (1x96x136x136xf32) + swish_24 = paddle._C_ops.swish(add_13) + + # pd_op.add: (1x96x136x136xf32) <- (1x96x136x136xf32, 1x96x136x136xf32) + add_14 = paddle._C_ops.add(add_12, swish_24) + + # pd_op.conv2d: (1x96x136x136xf32) <- (1x96x136x136xf32, 96x96x3x3xf32) + conv2d_32 = paddle._C_ops.conv2d( + add_14, parameter_580, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_580 + + # pd_op.batch_norm_: (1x96x136x136xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (1x96x136x136xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__186, + batch_norm__187, + batch_norm__188, + batch_norm__189, + batch_norm__190, + batch_norm__191, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_32, + parameter_579, + parameter_578, + parameter_577, + parameter_576, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_576, parameter_577, parameter_578, parameter_579 + + # pd_op.swish: (1x96x136x136xf32) <- (1x96x136x136xf32) + swish_25 = paddle._C_ops.swish(batch_norm__186) + + # pd_op.conv2d: (1x96x136x136xf32) <- (1x96x136x136xf32, 96x96x3x3xf32) + conv2d_33 = paddle._C_ops.conv2d( + swish_25, parameter_575, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_575 + + # pd_op.batch_norm_: (1x96x136x136xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (1x96x136x136xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__192, + batch_norm__193, + batch_norm__194, + batch_norm__195, + batch_norm__196, + batch_norm__197, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_33, + parameter_574, + parameter_573, + parameter_572, + parameter_571, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_571, parameter_572, parameter_573, parameter_574 + + # pd_op.conv2d: (1x96x136x136xf32) <- (1x96x136x136xf32, 96x96x1x1xf32) + conv2d_34 = paddle._C_ops.conv2d( + swish_25, parameter_570, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_570 + + # pd_op.batch_norm_: (1x96x136x136xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (1x96x136x136xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__198, + batch_norm__199, + batch_norm__200, + batch_norm__201, + batch_norm__202, + batch_norm__203, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_34, + parameter_569, + parameter_568, + parameter_567, + parameter_566, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_566, parameter_567, parameter_568, parameter_569 + + # pd_op.multiply: (1x96x136x136xf32) <- (1xf32, 1x96x136x136xf32) + multiply_8 = paddle._C_ops.multiply(data_7, batch_norm__198) + del data_7 + + # pd_op.add: (1x96x136x136xf32) <- (1x96x136x136xf32, 1x96x136x136xf32) + add_15 = paddle._C_ops.add(batch_norm__192, multiply_8) + + # pd_op.swish: (1x96x136x136xf32) <- (1x96x136x136xf32) + swish_26 = paddle._C_ops.swish(add_15) + + # pd_op.add: (1x96x136x136xf32) <- (1x96x136x136xf32, 1x96x136x136xf32) + add_16 = paddle._C_ops.add(add_14, swish_26) + + # pd_op.conv2d: (1x96x136x136xf32) <- (1x96x136x136xf32, 96x96x3x3xf32) + conv2d_35 = paddle._C_ops.conv2d( + add_16, parameter_565, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_565 + + # pd_op.batch_norm_: (1x96x136x136xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (1x96x136x136xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__204, + batch_norm__205, + batch_norm__206, + batch_norm__207, + batch_norm__208, + batch_norm__209, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_35, + parameter_564, + parameter_563, + parameter_562, + parameter_561, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_561, parameter_562, parameter_563, parameter_564 + + # pd_op.swish: (1x96x136x136xf32) <- (1x96x136x136xf32) + swish_27 = paddle._C_ops.swish(batch_norm__204) + + # pd_op.conv2d: (1x96x136x136xf32) <- (1x96x136x136xf32, 96x96x3x3xf32) + conv2d_36 = paddle._C_ops.conv2d( + swish_27, parameter_560, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_560 + + # pd_op.batch_norm_: (1x96x136x136xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (1x96x136x136xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__210, + batch_norm__211, + batch_norm__212, + batch_norm__213, + batch_norm__214, + batch_norm__215, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_36, + parameter_559, + parameter_558, + parameter_557, + parameter_556, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_556, parameter_557, parameter_558, parameter_559 + + # pd_op.conv2d: (1x96x136x136xf32) <- (1x96x136x136xf32, 96x96x1x1xf32) + conv2d_37 = paddle._C_ops.conv2d( + swish_27, parameter_555, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_555 + + # pd_op.batch_norm_: (1x96x136x136xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (1x96x136x136xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__216, + batch_norm__217, + batch_norm__218, + batch_norm__219, + batch_norm__220, + batch_norm__221, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_37, + parameter_554, + parameter_553, + parameter_552, + parameter_551, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_551, parameter_552, parameter_553, parameter_554 + + # pd_op.multiply: (1x96x136x136xf32) <- (1xf32, 1x96x136x136xf32) + multiply_9 = paddle._C_ops.multiply(data_8, batch_norm__216) + del data_8 + + # pd_op.add: (1x96x136x136xf32) <- (1x96x136x136xf32, 1x96x136x136xf32) + add_17 = paddle._C_ops.add(batch_norm__210, multiply_9) + + # pd_op.swish: (1x96x136x136xf32) <- (1x96x136x136xf32) + swish_28 = paddle._C_ops.swish(add_17) + + # pd_op.add: (1x96x136x136xf32) <- (1x96x136x136xf32, 1x96x136x136xf32) + add_18 = paddle._C_ops.add(add_16, swish_28) + + # builtin.combine: ([1x96x136x136xf32, 1x96x136x136xf32]) <- (1x96x136x136xf32, 1x96x136x136xf32) + combine_1 = [swish_15, add_18] + + # pd_op.concat: (1x192x136x136xf32) <- ([1x96x136x136xf32, 1x96x136x136xf32], 1xi32) + concat_1 = paddle._C_ops.concat(combine_1, full_0) + del combine_1 + + # pd_op.mean: (1x192x1x1xf32) <- (1x192x136x136xf32, 2xi64) + mean_1 = paddle._C_ops.mean(concat_1, full_int_array_0, True) + + # pd_op.conv2d: (1x192x1x1xf32) <- (1x192x1x1xf32, 192x192x1x1xf32) + conv2d_38 = paddle._C_ops.conv2d( + mean_1, parameter_550, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_550 + + # pd_op.reshape: (1x192x1x1xf32) <- (192xf32, 4xi64) + reshape_1 = paddle._C_ops.reshape(parameter_549, full_int_array_1) + del parameter_549 + + # pd_op.add: (1x192x1x1xf32) <- (1x192x1x1xf32, 1x192x1x1xf32) + add_19 = paddle._C_ops.add(conv2d_38, reshape_1) + + # pd_op.hardsigmoid: (1x192x1x1xf32) <- (1x192x1x1xf32) + hardsigmoid_1 = paddle._C_ops.hardsigmoid( + add_19, float("0.166667"), float("0.5") + ) + del add_19 + + # pd_op.multiply: (1x192x136x136xf32) <- (1x192x136x136xf32, 1x192x1x1xf32) + multiply_10 = paddle._C_ops.multiply(concat_1, hardsigmoid_1) + + # pd_op.conv2d: (1x256x136x136xf32) <- (1x192x136x136xf32, 256x192x1x1xf32) + conv2d_39 = paddle._C_ops.conv2d( + multiply_10, parameter_548, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_548 + + # pd_op.batch_norm_: (1x256x136x136xf32, 256xf32, 256xf32, 256xf32, 256xf32, -1xui8) <- (1x256x136x136xf32, 256xf32, 256xf32, 256xf32, 256xf32) + ( + batch_norm__222, + batch_norm__223, + batch_norm__224, + batch_norm__225, + batch_norm__226, + batch_norm__227, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_39, + parameter_547, + parameter_546, + parameter_545, + parameter_544, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_544, parameter_545, parameter_546, parameter_547 + + # pd_op.swish: (1x256x136x136xf32) <- (1x256x136x136xf32) + swish_29 = paddle._C_ops.swish(batch_norm__222) + + # pd_op.conv2d: (1x384x68x68xf32) <- (1x256x136x136xf32, 384x256x3x3xf32) + conv2d_40 = paddle._C_ops.conv2d( + swish_29, parameter_543, [2, 2], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_543 + + # pd_op.batch_norm_: (1x384x68x68xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (1x384x68x68xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__228, + batch_norm__229, + batch_norm__230, + batch_norm__231, + batch_norm__232, + batch_norm__233, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_40, + parameter_542, + parameter_541, + parameter_540, + parameter_539, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_539, parameter_540, parameter_541, parameter_542 + + # pd_op.swish: (1x384x68x68xf32) <- (1x384x68x68xf32) + swish_30 = paddle._C_ops.swish(batch_norm__228) + + # pd_op.conv2d: (1x192x68x68xf32) <- (1x384x68x68xf32, 192x384x1x1xf32) + conv2d_41 = paddle._C_ops.conv2d( + swish_30, parameter_538, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_538 + + # pd_op.batch_norm_: (1x192x68x68xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (1x192x68x68xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__234, + batch_norm__235, + batch_norm__236, + batch_norm__237, + batch_norm__238, + batch_norm__239, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_41, + parameter_537, + parameter_536, + parameter_535, + parameter_534, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_534, parameter_535, parameter_536, parameter_537 + + # pd_op.swish: (1x192x68x68xf32) <- (1x192x68x68xf32) + swish_31 = paddle._C_ops.swish(batch_norm__234) + + # pd_op.conv2d: (1x192x68x68xf32) <- (1x384x68x68xf32, 192x384x1x1xf32) + conv2d_42 = paddle._C_ops.conv2d( + swish_30, parameter_533, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_533 + + # pd_op.batch_norm_: (1x192x68x68xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (1x192x68x68xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__240, + batch_norm__241, + batch_norm__242, + batch_norm__243, + batch_norm__244, + batch_norm__245, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_42, + parameter_532, + parameter_531, + parameter_530, + parameter_529, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_529, parameter_530, parameter_531, parameter_532 + + # pd_op.swish: (1x192x68x68xf32) <- (1x192x68x68xf32) + swish_32 = paddle._C_ops.swish(batch_norm__240) + + # pd_op.conv2d: (1x192x68x68xf32) <- (1x192x68x68xf32, 192x192x3x3xf32) + conv2d_43 = paddle._C_ops.conv2d( + swish_32, parameter_528, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_528 + + # pd_op.batch_norm_: (1x192x68x68xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (1x192x68x68xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__246, + batch_norm__247, + batch_norm__248, + batch_norm__249, + batch_norm__250, + batch_norm__251, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_43, + parameter_527, + parameter_526, + parameter_525, + parameter_524, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_524, parameter_525, parameter_526, parameter_527 + + # pd_op.swish: (1x192x68x68xf32) <- (1x192x68x68xf32) + swish_33 = paddle._C_ops.swish(batch_norm__246) + + # pd_op.conv2d: (1x192x68x68xf32) <- (1x192x68x68xf32, 192x192x3x3xf32) + conv2d_44 = paddle._C_ops.conv2d( + swish_33, parameter_523, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_523 + + # pd_op.batch_norm_: (1x192x68x68xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (1x192x68x68xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__252, + batch_norm__253, + batch_norm__254, + batch_norm__255, + batch_norm__256, + batch_norm__257, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_44, + parameter_522, + parameter_521, + parameter_520, + parameter_519, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_519, parameter_520, parameter_521, parameter_522 + + # pd_op.conv2d: (1x192x68x68xf32) <- (1x192x68x68xf32, 192x192x1x1xf32) + conv2d_45 = paddle._C_ops.conv2d( + swish_33, parameter_518, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_518 + + # pd_op.batch_norm_: (1x192x68x68xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (1x192x68x68xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__258, + batch_norm__259, + batch_norm__260, + batch_norm__261, + batch_norm__262, + batch_norm__263, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_45, + parameter_517, + parameter_516, + parameter_515, + parameter_514, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_514, parameter_515, parameter_516, parameter_517 + + # pd_op.multiply: (1x192x68x68xf32) <- (1xf32, 1x192x68x68xf32) + multiply_11 = paddle._C_ops.multiply(data_9, batch_norm__258) + del data_9 + + # pd_op.add: (1x192x68x68xf32) <- (1x192x68x68xf32, 1x192x68x68xf32) + add_20 = paddle._C_ops.add(batch_norm__252, multiply_11) + + # pd_op.swish: (1x192x68x68xf32) <- (1x192x68x68xf32) + swish_34 = paddle._C_ops.swish(add_20) + + # pd_op.add: (1x192x68x68xf32) <- (1x192x68x68xf32, 1x192x68x68xf32) + add_21 = paddle._C_ops.add(swish_32, swish_34) + + # pd_op.conv2d: (1x192x68x68xf32) <- (1x192x68x68xf32, 192x192x3x3xf32) + conv2d_46 = paddle._C_ops.conv2d( + add_21, parameter_513, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_513 + + # pd_op.batch_norm_: (1x192x68x68xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (1x192x68x68xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__264, + batch_norm__265, + batch_norm__266, + batch_norm__267, + batch_norm__268, + batch_norm__269, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_46, + parameter_512, + parameter_511, + parameter_510, + parameter_509, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_509, parameter_510, parameter_511, parameter_512 + + # pd_op.swish: (1x192x68x68xf32) <- (1x192x68x68xf32) + swish_35 = paddle._C_ops.swish(batch_norm__264) + + # pd_op.conv2d: (1x192x68x68xf32) <- (1x192x68x68xf32, 192x192x3x3xf32) + conv2d_47 = paddle._C_ops.conv2d( + swish_35, parameter_508, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_508 + + # pd_op.batch_norm_: (1x192x68x68xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (1x192x68x68xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__270, + batch_norm__271, + batch_norm__272, + batch_norm__273, + batch_norm__274, + batch_norm__275, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_47, + parameter_507, + parameter_506, + parameter_505, + parameter_504, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_504, parameter_505, parameter_506, parameter_507 + + # pd_op.conv2d: (1x192x68x68xf32) <- (1x192x68x68xf32, 192x192x1x1xf32) + conv2d_48 = paddle._C_ops.conv2d( + swish_35, parameter_503, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_503 + + # pd_op.batch_norm_: (1x192x68x68xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (1x192x68x68xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__276, + batch_norm__277, + batch_norm__278, + batch_norm__279, + batch_norm__280, + batch_norm__281, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_48, + parameter_502, + parameter_501, + parameter_500, + parameter_499, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_499, parameter_500, parameter_501, parameter_502 + + # pd_op.multiply: (1x192x68x68xf32) <- (1xf32, 1x192x68x68xf32) + multiply_12 = paddle._C_ops.multiply(data_10, batch_norm__276) + del data_10 + + # pd_op.add: (1x192x68x68xf32) <- (1x192x68x68xf32, 1x192x68x68xf32) + add_22 = paddle._C_ops.add(batch_norm__270, multiply_12) + + # pd_op.swish: (1x192x68x68xf32) <- (1x192x68x68xf32) + swish_36 = paddle._C_ops.swish(add_22) + + # pd_op.add: (1x192x68x68xf32) <- (1x192x68x68xf32, 1x192x68x68xf32) + add_23 = paddle._C_ops.add(add_21, swish_36) + + # pd_op.conv2d: (1x192x68x68xf32) <- (1x192x68x68xf32, 192x192x3x3xf32) + conv2d_49 = paddle._C_ops.conv2d( + add_23, parameter_498, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_498 + + # pd_op.batch_norm_: (1x192x68x68xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (1x192x68x68xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__282, + batch_norm__283, + batch_norm__284, + batch_norm__285, + batch_norm__286, + batch_norm__287, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_49, + parameter_497, + parameter_496, + parameter_495, + parameter_494, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_494, parameter_495, parameter_496, parameter_497 + + # pd_op.swish: (1x192x68x68xf32) <- (1x192x68x68xf32) + swish_37 = paddle._C_ops.swish(batch_norm__282) + + # pd_op.conv2d: (1x192x68x68xf32) <- (1x192x68x68xf32, 192x192x3x3xf32) + conv2d_50 = paddle._C_ops.conv2d( + swish_37, parameter_493, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_493 + + # pd_op.batch_norm_: (1x192x68x68xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (1x192x68x68xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__288, + batch_norm__289, + batch_norm__290, + batch_norm__291, + batch_norm__292, + batch_norm__293, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_50, + parameter_492, + parameter_491, + parameter_490, + parameter_489, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_489, parameter_490, parameter_491, parameter_492 + + # pd_op.conv2d: (1x192x68x68xf32) <- (1x192x68x68xf32, 192x192x1x1xf32) + conv2d_51 = paddle._C_ops.conv2d( + swish_37, parameter_488, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_488 + + # pd_op.batch_norm_: (1x192x68x68xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (1x192x68x68xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__294, + batch_norm__295, + batch_norm__296, + batch_norm__297, + batch_norm__298, + batch_norm__299, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_51, + parameter_487, + parameter_486, + parameter_485, + parameter_484, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_484, parameter_485, parameter_486, parameter_487 + + # pd_op.multiply: (1x192x68x68xf32) <- (1xf32, 1x192x68x68xf32) + multiply_13 = paddle._C_ops.multiply(data_11, batch_norm__294) + del data_11 + + # pd_op.add: (1x192x68x68xf32) <- (1x192x68x68xf32, 1x192x68x68xf32) + add_24 = paddle._C_ops.add(batch_norm__288, multiply_13) + + # pd_op.swish: (1x192x68x68xf32) <- (1x192x68x68xf32) + swish_38 = paddle._C_ops.swish(add_24) + + # pd_op.add: (1x192x68x68xf32) <- (1x192x68x68xf32, 1x192x68x68xf32) + add_25 = paddle._C_ops.add(add_23, swish_38) + + # pd_op.conv2d: (1x192x68x68xf32) <- (1x192x68x68xf32, 192x192x3x3xf32) + conv2d_52 = paddle._C_ops.conv2d( + add_25, parameter_483, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_483 + + # pd_op.batch_norm_: (1x192x68x68xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (1x192x68x68xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__300, + batch_norm__301, + batch_norm__302, + batch_norm__303, + batch_norm__304, + batch_norm__305, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_52, + parameter_482, + parameter_481, + parameter_480, + parameter_479, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_479, parameter_480, parameter_481, parameter_482 + + # pd_op.swish: (1x192x68x68xf32) <- (1x192x68x68xf32) + swish_39 = paddle._C_ops.swish(batch_norm__300) + + # pd_op.conv2d: (1x192x68x68xf32) <- (1x192x68x68xf32, 192x192x3x3xf32) + conv2d_53 = paddle._C_ops.conv2d( + swish_39, parameter_478, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_478 + + # pd_op.batch_norm_: (1x192x68x68xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (1x192x68x68xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__306, + batch_norm__307, + batch_norm__308, + batch_norm__309, + batch_norm__310, + batch_norm__311, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_53, + parameter_477, + parameter_476, + parameter_475, + parameter_474, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_474, parameter_475, parameter_476, parameter_477 + + # pd_op.conv2d: (1x192x68x68xf32) <- (1x192x68x68xf32, 192x192x1x1xf32) + conv2d_54 = paddle._C_ops.conv2d( + swish_39, parameter_473, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_473 + + # pd_op.batch_norm_: (1x192x68x68xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (1x192x68x68xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__312, + batch_norm__313, + batch_norm__314, + batch_norm__315, + batch_norm__316, + batch_norm__317, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_54, + parameter_472, + parameter_471, + parameter_470, + parameter_469, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_469, parameter_470, parameter_471, parameter_472 + + # pd_op.multiply: (1x192x68x68xf32) <- (1xf32, 1x192x68x68xf32) + multiply_14 = paddle._C_ops.multiply(data_12, batch_norm__312) + del data_12 + + # pd_op.add: (1x192x68x68xf32) <- (1x192x68x68xf32, 1x192x68x68xf32) + add_26 = paddle._C_ops.add(batch_norm__306, multiply_14) + + # pd_op.swish: (1x192x68x68xf32) <- (1x192x68x68xf32) + swish_40 = paddle._C_ops.swish(add_26) + + # pd_op.add: (1x192x68x68xf32) <- (1x192x68x68xf32, 1x192x68x68xf32) + add_27 = paddle._C_ops.add(add_25, swish_40) + + # pd_op.conv2d: (1x192x68x68xf32) <- (1x192x68x68xf32, 192x192x3x3xf32) + conv2d_55 = paddle._C_ops.conv2d( + add_27, parameter_468, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_468 + + # pd_op.batch_norm_: (1x192x68x68xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (1x192x68x68xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__318, + batch_norm__319, + batch_norm__320, + batch_norm__321, + batch_norm__322, + batch_norm__323, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_55, + parameter_467, + parameter_466, + parameter_465, + parameter_464, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_464, parameter_465, parameter_466, parameter_467 + + # pd_op.swish: (1x192x68x68xf32) <- (1x192x68x68xf32) + swish_41 = paddle._C_ops.swish(batch_norm__318) + + # pd_op.conv2d: (1x192x68x68xf32) <- (1x192x68x68xf32, 192x192x3x3xf32) + conv2d_56 = paddle._C_ops.conv2d( + swish_41, parameter_463, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_463 + + # pd_op.batch_norm_: (1x192x68x68xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (1x192x68x68xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__324, + batch_norm__325, + batch_norm__326, + batch_norm__327, + batch_norm__328, + batch_norm__329, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_56, + parameter_462, + parameter_461, + parameter_460, + parameter_459, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_459, parameter_460, parameter_461, parameter_462 + + # pd_op.conv2d: (1x192x68x68xf32) <- (1x192x68x68xf32, 192x192x1x1xf32) + conv2d_57 = paddle._C_ops.conv2d( + swish_41, parameter_458, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_458 + + # pd_op.batch_norm_: (1x192x68x68xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (1x192x68x68xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__330, + batch_norm__331, + batch_norm__332, + batch_norm__333, + batch_norm__334, + batch_norm__335, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_57, + parameter_457, + parameter_456, + parameter_455, + parameter_454, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_454, parameter_455, parameter_456, parameter_457 + + # pd_op.multiply: (1x192x68x68xf32) <- (1xf32, 1x192x68x68xf32) + multiply_15 = paddle._C_ops.multiply(data_13, batch_norm__330) + del data_13 + + # pd_op.add: (1x192x68x68xf32) <- (1x192x68x68xf32, 1x192x68x68xf32) + add_28 = paddle._C_ops.add(batch_norm__324, multiply_15) + + # pd_op.swish: (1x192x68x68xf32) <- (1x192x68x68xf32) + swish_42 = paddle._C_ops.swish(add_28) + + # pd_op.add: (1x192x68x68xf32) <- (1x192x68x68xf32, 1x192x68x68xf32) + add_29 = paddle._C_ops.add(add_27, swish_42) + + # pd_op.conv2d: (1x192x68x68xf32) <- (1x192x68x68xf32, 192x192x3x3xf32) + conv2d_58 = paddle._C_ops.conv2d( + add_29, parameter_453, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_453 + + # pd_op.batch_norm_: (1x192x68x68xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (1x192x68x68xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__336, + batch_norm__337, + batch_norm__338, + batch_norm__339, + batch_norm__340, + batch_norm__341, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_58, + parameter_452, + parameter_451, + parameter_450, + parameter_449, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_449, parameter_450, parameter_451, parameter_452 + + # pd_op.swish: (1x192x68x68xf32) <- (1x192x68x68xf32) + swish_43 = paddle._C_ops.swish(batch_norm__336) + + # pd_op.conv2d: (1x192x68x68xf32) <- (1x192x68x68xf32, 192x192x3x3xf32) + conv2d_59 = paddle._C_ops.conv2d( + swish_43, parameter_448, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_448 + + # pd_op.batch_norm_: (1x192x68x68xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (1x192x68x68xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__342, + batch_norm__343, + batch_norm__344, + batch_norm__345, + batch_norm__346, + batch_norm__347, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_59, + parameter_447, + parameter_446, + parameter_445, + parameter_444, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_444, parameter_445, parameter_446, parameter_447 + + # pd_op.conv2d: (1x192x68x68xf32) <- (1x192x68x68xf32, 192x192x1x1xf32) + conv2d_60 = paddle._C_ops.conv2d( + swish_43, parameter_443, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_443 + + # pd_op.batch_norm_: (1x192x68x68xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (1x192x68x68xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__348, + batch_norm__349, + batch_norm__350, + batch_norm__351, + batch_norm__352, + batch_norm__353, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_60, + parameter_442, + parameter_441, + parameter_440, + parameter_439, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_439, parameter_440, parameter_441, parameter_442 + + # pd_op.multiply: (1x192x68x68xf32) <- (1xf32, 1x192x68x68xf32) + multiply_16 = paddle._C_ops.multiply(data_14, batch_norm__348) + del data_14 + + # pd_op.add: (1x192x68x68xf32) <- (1x192x68x68xf32, 1x192x68x68xf32) + add_30 = paddle._C_ops.add(batch_norm__342, multiply_16) + + # pd_op.swish: (1x192x68x68xf32) <- (1x192x68x68xf32) + swish_44 = paddle._C_ops.swish(add_30) + + # pd_op.add: (1x192x68x68xf32) <- (1x192x68x68xf32, 1x192x68x68xf32) + add_31 = paddle._C_ops.add(add_29, swish_44) + + # builtin.combine: ([1x192x68x68xf32, 1x192x68x68xf32]) <- (1x192x68x68xf32, 1x192x68x68xf32) + combine_2 = [swish_31, add_31] + + # pd_op.concat: (1x384x68x68xf32) <- ([1x192x68x68xf32, 1x192x68x68xf32], 1xi32) + concat_2 = paddle._C_ops.concat(combine_2, full_0) + del combine_2 + + # pd_op.mean: (1x384x1x1xf32) <- (1x384x68x68xf32, 2xi64) + mean_2 = paddle._C_ops.mean(concat_2, full_int_array_0, True) + + # pd_op.conv2d: (1x384x1x1xf32) <- (1x384x1x1xf32, 384x384x1x1xf32) + conv2d_61 = paddle._C_ops.conv2d( + mean_2, parameter_438, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_438 + + # pd_op.reshape: (1x384x1x1xf32) <- (384xf32, 4xi64) + reshape_2 = paddle._C_ops.reshape(parameter_437, full_int_array_1) + del parameter_437 + + # pd_op.add: (1x384x1x1xf32) <- (1x384x1x1xf32, 1x384x1x1xf32) + add_32 = paddle._C_ops.add(conv2d_61, reshape_2) + + # pd_op.hardsigmoid: (1x384x1x1xf32) <- (1x384x1x1xf32) + hardsigmoid_2 = paddle._C_ops.hardsigmoid( + add_32, float("0.166667"), float("0.5") + ) + del add_32 + + # pd_op.multiply: (1x384x68x68xf32) <- (1x384x68x68xf32, 1x384x1x1xf32) + multiply_17 = paddle._C_ops.multiply(concat_2, hardsigmoid_2) + + # pd_op.conv2d: (1x512x68x68xf32) <- (1x384x68x68xf32, 512x384x1x1xf32) + conv2d_62 = paddle._C_ops.conv2d( + multiply_17, parameter_436, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_436 + + # pd_op.batch_norm_: (1x512x68x68xf32, 512xf32, 512xf32, 512xf32, 512xf32, -1xui8) <- (1x512x68x68xf32, 512xf32, 512xf32, 512xf32, 512xf32) + ( + batch_norm__354, + batch_norm__355, + batch_norm__356, + batch_norm__357, + batch_norm__358, + batch_norm__359, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_62, + parameter_435, + parameter_434, + parameter_433, + parameter_432, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_432, parameter_433, parameter_434, parameter_435 + + # pd_op.swish: (1x512x68x68xf32) <- (1x512x68x68xf32) + swish_45 = paddle._C_ops.swish(batch_norm__354) + + # pd_op.conv2d: (1x768x34x34xf32) <- (1x512x68x68xf32, 768x512x3x3xf32) + conv2d_63 = paddle._C_ops.conv2d( + swish_45, parameter_431, [2, 2], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_431 + + # pd_op.batch_norm_: (1x768x34x34xf32, 768xf32, 768xf32, 768xf32, 768xf32, -1xui8) <- (1x768x34x34xf32, 768xf32, 768xf32, 768xf32, 768xf32) + ( + batch_norm__360, + batch_norm__361, + batch_norm__362, + batch_norm__363, + batch_norm__364, + batch_norm__365, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_63, + parameter_430, + parameter_429, + parameter_428, + parameter_427, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_427, parameter_428, parameter_429, parameter_430 + + # pd_op.swish: (1x768x34x34xf32) <- (1x768x34x34xf32) + swish_46 = paddle._C_ops.swish(batch_norm__360) + + # pd_op.conv2d: (1x384x34x34xf32) <- (1x768x34x34xf32, 384x768x1x1xf32) + conv2d_64 = paddle._C_ops.conv2d( + swish_46, parameter_426, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_426 + + # pd_op.batch_norm_: (1x384x34x34xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (1x384x34x34xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__366, + batch_norm__367, + batch_norm__368, + batch_norm__369, + batch_norm__370, + batch_norm__371, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_64, + parameter_425, + parameter_424, + parameter_423, + parameter_422, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_422, parameter_423, parameter_424, parameter_425 + + # pd_op.swish: (1x384x34x34xf32) <- (1x384x34x34xf32) + swish_47 = paddle._C_ops.swish(batch_norm__366) + + # pd_op.conv2d: (1x384x34x34xf32) <- (1x768x34x34xf32, 384x768x1x1xf32) + conv2d_65 = paddle._C_ops.conv2d( + swish_46, parameter_421, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_421 + + # pd_op.batch_norm_: (1x384x34x34xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (1x384x34x34xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__372, + batch_norm__373, + batch_norm__374, + batch_norm__375, + batch_norm__376, + batch_norm__377, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_65, + parameter_420, + parameter_419, + parameter_418, + parameter_417, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_417, parameter_418, parameter_419, parameter_420 + + # pd_op.swish: (1x384x34x34xf32) <- (1x384x34x34xf32) + swish_48 = paddle._C_ops.swish(batch_norm__372) + + # pd_op.conv2d: (1x384x34x34xf32) <- (1x384x34x34xf32, 384x384x3x3xf32) + conv2d_66 = paddle._C_ops.conv2d( + swish_48, parameter_416, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_416 + + # pd_op.batch_norm_: (1x384x34x34xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (1x384x34x34xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__378, + batch_norm__379, + batch_norm__380, + batch_norm__381, + batch_norm__382, + batch_norm__383, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_66, + parameter_415, + parameter_414, + parameter_413, + parameter_412, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_412, parameter_413, parameter_414, parameter_415 + + # pd_op.swish: (1x384x34x34xf32) <- (1x384x34x34xf32) + swish_49 = paddle._C_ops.swish(batch_norm__378) + + # pd_op.conv2d: (1x384x34x34xf32) <- (1x384x34x34xf32, 384x384x3x3xf32) + conv2d_67 = paddle._C_ops.conv2d( + swish_49, parameter_411, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_411 + + # pd_op.batch_norm_: (1x384x34x34xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (1x384x34x34xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__384, + batch_norm__385, + batch_norm__386, + batch_norm__387, + batch_norm__388, + batch_norm__389, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_67, + parameter_410, + parameter_409, + parameter_408, + parameter_407, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_407, parameter_408, parameter_409, parameter_410 + + # pd_op.conv2d: (1x384x34x34xf32) <- (1x384x34x34xf32, 384x384x1x1xf32) + conv2d_68 = paddle._C_ops.conv2d( + swish_49, parameter_406, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_406 + + # pd_op.batch_norm_: (1x384x34x34xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (1x384x34x34xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__390, + batch_norm__391, + batch_norm__392, + batch_norm__393, + batch_norm__394, + batch_norm__395, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_68, + parameter_405, + parameter_404, + parameter_403, + parameter_402, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_402, parameter_403, parameter_404, parameter_405 + + # pd_op.multiply: (1x384x34x34xf32) <- (1xf32, 1x384x34x34xf32) + multiply_18 = paddle._C_ops.multiply(data_15, batch_norm__390) + del data_15 + + # pd_op.add: (1x384x34x34xf32) <- (1x384x34x34xf32, 1x384x34x34xf32) + add_33 = paddle._C_ops.add(batch_norm__384, multiply_18) + + # pd_op.swish: (1x384x34x34xf32) <- (1x384x34x34xf32) + swish_50 = paddle._C_ops.swish(add_33) + + # pd_op.add: (1x384x34x34xf32) <- (1x384x34x34xf32, 1x384x34x34xf32) + add_34 = paddle._C_ops.add(swish_48, swish_50) + + # pd_op.conv2d: (1x384x34x34xf32) <- (1x384x34x34xf32, 384x384x3x3xf32) + conv2d_69 = paddle._C_ops.conv2d( + add_34, parameter_401, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_401 + + # pd_op.batch_norm_: (1x384x34x34xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (1x384x34x34xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__396, + batch_norm__397, + batch_norm__398, + batch_norm__399, + batch_norm__400, + batch_norm__401, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_69, + parameter_400, + parameter_399, + parameter_398, + parameter_397, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_397, parameter_398, parameter_399, parameter_400 + + # pd_op.swish: (1x384x34x34xf32) <- (1x384x34x34xf32) + swish_51 = paddle._C_ops.swish(batch_norm__396) + + # pd_op.conv2d: (1x384x34x34xf32) <- (1x384x34x34xf32, 384x384x3x3xf32) + conv2d_70 = paddle._C_ops.conv2d( + swish_51, parameter_396, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_396 + + # pd_op.batch_norm_: (1x384x34x34xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (1x384x34x34xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__402, + batch_norm__403, + batch_norm__404, + batch_norm__405, + batch_norm__406, + batch_norm__407, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_70, + parameter_395, + parameter_394, + parameter_393, + parameter_392, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_392, parameter_393, parameter_394, parameter_395 + + # pd_op.conv2d: (1x384x34x34xf32) <- (1x384x34x34xf32, 384x384x1x1xf32) + conv2d_71 = paddle._C_ops.conv2d( + swish_51, parameter_391, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_391 + + # pd_op.batch_norm_: (1x384x34x34xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (1x384x34x34xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__408, + batch_norm__409, + batch_norm__410, + batch_norm__411, + batch_norm__412, + batch_norm__413, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_71, + parameter_390, + parameter_389, + parameter_388, + parameter_387, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_387, parameter_388, parameter_389, parameter_390 + + # pd_op.multiply: (1x384x34x34xf32) <- (1xf32, 1x384x34x34xf32) + multiply_19 = paddle._C_ops.multiply(data_16, batch_norm__408) + del data_16 + + # pd_op.add: (1x384x34x34xf32) <- (1x384x34x34xf32, 1x384x34x34xf32) + add_35 = paddle._C_ops.add(batch_norm__402, multiply_19) + + # pd_op.swish: (1x384x34x34xf32) <- (1x384x34x34xf32) + swish_52 = paddle._C_ops.swish(add_35) + + # pd_op.add: (1x384x34x34xf32) <- (1x384x34x34xf32, 1x384x34x34xf32) + add_36 = paddle._C_ops.add(add_34, swish_52) + + # pd_op.conv2d: (1x384x34x34xf32) <- (1x384x34x34xf32, 384x384x3x3xf32) + conv2d_72 = paddle._C_ops.conv2d( + add_36, parameter_386, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_386 + + # pd_op.batch_norm_: (1x384x34x34xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (1x384x34x34xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__414, + batch_norm__415, + batch_norm__416, + batch_norm__417, + batch_norm__418, + batch_norm__419, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_72, + parameter_385, + parameter_384, + parameter_383, + parameter_382, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_382, parameter_383, parameter_384, parameter_385 + + # pd_op.swish: (1x384x34x34xf32) <- (1x384x34x34xf32) + swish_53 = paddle._C_ops.swish(batch_norm__414) + + # pd_op.conv2d: (1x384x34x34xf32) <- (1x384x34x34xf32, 384x384x3x3xf32) + conv2d_73 = paddle._C_ops.conv2d( + swish_53, parameter_381, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_381 + + # pd_op.batch_norm_: (1x384x34x34xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (1x384x34x34xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__420, + batch_norm__421, + batch_norm__422, + batch_norm__423, + batch_norm__424, + batch_norm__425, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_73, + parameter_380, + parameter_379, + parameter_378, + parameter_377, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_377, parameter_378, parameter_379, parameter_380 + + # pd_op.conv2d: (1x384x34x34xf32) <- (1x384x34x34xf32, 384x384x1x1xf32) + conv2d_74 = paddle._C_ops.conv2d( + swish_53, parameter_376, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_376 + + # pd_op.batch_norm_: (1x384x34x34xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (1x384x34x34xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__426, + batch_norm__427, + batch_norm__428, + batch_norm__429, + batch_norm__430, + batch_norm__431, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_74, + parameter_375, + parameter_374, + parameter_373, + parameter_372, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_372, parameter_373, parameter_374, parameter_375 + + # pd_op.multiply: (1x384x34x34xf32) <- (1xf32, 1x384x34x34xf32) + multiply_20 = paddle._C_ops.multiply(data_17, batch_norm__426) + del data_17 + + # pd_op.add: (1x384x34x34xf32) <- (1x384x34x34xf32, 1x384x34x34xf32) + add_37 = paddle._C_ops.add(batch_norm__420, multiply_20) + + # pd_op.swish: (1x384x34x34xf32) <- (1x384x34x34xf32) + swish_54 = paddle._C_ops.swish(add_37) + + # pd_op.add: (1x384x34x34xf32) <- (1x384x34x34xf32, 1x384x34x34xf32) + add_38 = paddle._C_ops.add(add_36, swish_54) + + # builtin.combine: ([1x384x34x34xf32, 1x384x34x34xf32]) <- (1x384x34x34xf32, 1x384x34x34xf32) + combine_3 = [swish_47, add_38] + + # pd_op.concat: (1x768x34x34xf32) <- ([1x384x34x34xf32, 1x384x34x34xf32], 1xi32) + concat_3 = paddle._C_ops.concat(combine_3, full_0) + del combine_3 + + # pd_op.mean: (1x768x1x1xf32) <- (1x768x34x34xf32, 2xi64) + mean_3 = paddle._C_ops.mean(concat_3, full_int_array_0, True) + + # pd_op.conv2d: (1x768x1x1xf32) <- (1x768x1x1xf32, 768x768x1x1xf32) + conv2d_75 = paddle._C_ops.conv2d( + mean_3, parameter_371, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_371 + + # pd_op.reshape: (1x768x1x1xf32) <- (768xf32, 4xi64) + reshape_3 = paddle._C_ops.reshape(parameter_370, full_int_array_1) + del full_int_array_1, parameter_370 + + # pd_op.add: (1x768x1x1xf32) <- (1x768x1x1xf32, 1x768x1x1xf32) + add_39 = paddle._C_ops.add(conv2d_75, reshape_3) + + # pd_op.hardsigmoid: (1x768x1x1xf32) <- (1x768x1x1xf32) + hardsigmoid_3 = paddle._C_ops.hardsigmoid( + add_39, float("0.166667"), float("0.5") + ) + del add_39 + + # pd_op.multiply: (1x768x34x34xf32) <- (1x768x34x34xf32, 1x768x1x1xf32) + multiply_21 = paddle._C_ops.multiply(concat_3, hardsigmoid_3) + + # pd_op.conv2d: (1x1024x34x34xf32) <- (1x768x34x34xf32, 1024x768x1x1xf32) + conv2d_76 = paddle._C_ops.conv2d( + multiply_21, parameter_369, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_369 + + # pd_op.batch_norm_: (1x1024x34x34xf32, 1024xf32, 1024xf32, 1024xf32, 1024xf32, -1xui8) <- (1x1024x34x34xf32, 1024xf32, 1024xf32, 1024xf32, 1024xf32) + ( + batch_norm__432, + batch_norm__433, + batch_norm__434, + batch_norm__435, + batch_norm__436, + batch_norm__437, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_76, + parameter_368, + parameter_367, + parameter_366, + parameter_365, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_365, parameter_366, parameter_367, parameter_368 + + # pd_op.swish: (1x1024x34x34xf32) <- (1x1024x34x34xf32) + swish_55 = paddle._C_ops.swish(batch_norm__432) + + # pd_op.flatten: (1x1024x1156xf32) <- (1x1024x34x34xf32) + flatten_0 = paddle._C_ops.flatten(swish_55, 2, 3) + + # pd_op.transpose: (1x1156x1024xf32) <- (1x1024x1156xf32) + transpose_0 = paddle._C_ops.transpose(flatten_0, [0, 2, 1]) + del flatten_0 + + # pd_op.full: (1xf64) <- () + full_1 = paddle._C_ops.full( + [1], float("0"), paddle.float64, paddle.core.CPUPlace() + ) + + # pd_op.full: (1xf64) <- () + full_2 = paddle._C_ops.full( + [1], float("34"), paddle.float64, paddle.core.CPUPlace() + ) + + # pd_op.full: (1xf64) <- () + full_3 = paddle._C_ops.full( + [1], float("1"), paddle.float64, paddle.core.CPUPlace() + ) + + # pd_op.arange: (34xf32) <- (1xf64, 1xf64, 1xf64) + arange_0 = paddle.arange(full_1, full_2, full_3, dtype="float32") + del full_2 + + # builtin.combine: ([34xf32, 34xf32]) <- (34xf32, 34xf32) + combine_4 = [arange_0, arange_0] + del arange_0 + + # pd_op.meshgrid: ([34x34xf32, 34x34xf32]) <- ([34xf32, 34xf32]) + meshgrid_0 = paddle._C_ops.meshgrid(combine_4) + del combine_4 + + # builtin.split: (34x34xf32, 34x34xf32) <- ([34x34xf32, 34x34xf32]) + ( + split_0, + split_1, + ) = meshgrid_0 + del meshgrid_0 + + # pd_op.full: (1xf64) <- () + full_4 = paddle._C_ops.full( + [1], float("256"), paddle.float64, paddle.core.CPUPlace() + ) + + # pd_op.arange: (256xf32) <- (1xf64, 1xf64, 1xf64) + arange_1 = paddle.arange(full_1, full_4, full_3, dtype="float32") + del full_1, full_3, full_4 + + # pd_op.full: (1xf32) <- () + full_5 = paddle._C_ops.full( + [1], float("0.00390625"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.scale: (256xf32) <- (256xf32, 1xf32) + scale_0 = paddle._C_ops.scale(arange_1, full_5, float("0"), True) + del arange_1, full_5 + + # pd_op.full: (256xf32) <- () + full_6 = paddle._C_ops.full( + [256], + float("10000"), + paddle.float32, + paddle.framework._current_expected_place(), + ) + + # pd_op.elementwise_pow: (256xf32) <- (256xf32, 256xf32) + elementwise_pow_0 = paddle._C_ops.elementwise_pow(full_6, scale_0) + del full_6, scale_0 + + # pd_op.full: (256xf32) <- () + full_7 = paddle._C_ops.full( + [256], + float("1"), + paddle.float32, + paddle.framework._current_expected_place(), + ) + + # pd_op.divide: (256xf32) <- (256xf32, 256xf32) + divide_0 = paddle._C_ops.divide(full_7, elementwise_pow_0) + del elementwise_pow_0, full_7 + + # pd_op.flatten: (1156xf32) <- (34x34xf32) + flatten_1 = paddle._C_ops.flatten(split_0, 0, 1) + del split_0 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_2 = [1] + + # pd_op.unsqueeze: (1156x1xf32) <- (1156xf32, 1xi64) + unsqueeze_0 = paddle._C_ops.unsqueeze(flatten_1, full_int_array_2) + del flatten_1 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_3 = [0] + + # pd_op.assign: (1xi64) <- (1xi64) + assign_16 = full_int_array_3 + + # pd_op.assign: (1xi64) <- (1xi64) + assign_17 = full_int_array_3 + + # pd_op.assign: (1xi64) <- (1xi64) + assign_18 = full_int_array_3 + + # pd_op.assign: (1xi64) <- (1xi64) + assign_19 = full_int_array_3 + + # pd_op.assign: (1xi64) <- (1xi64) + assign_20 = full_int_array_3 + + # pd_op.assign: (1xi64) <- (1xi64) + assign_21 = full_int_array_3 + + # pd_op.assign: (1xi64) <- (1xi64) + assign_22 = full_int_array_3 + + # pd_op.assign: (1xi64) <- (1xi64) + assign_23 = full_int_array_3 + + # pd_op.unsqueeze: (1x256xf32) <- (256xf32, 1xi64) + unsqueeze_1 = paddle._C_ops.unsqueeze(divide_0, full_int_array_3) + del divide_0 + + # pd_op.matmul: (1156x256xf32) <- (1156x1xf32, 1x256xf32) + matmul_0 = paddle._C_ops.matmul(unsqueeze_0, unsqueeze_1, False, False) + del unsqueeze_0 + + # pd_op.flatten: (1156xf32) <- (34x34xf32) + flatten_2 = paddle._C_ops.flatten(split_1, 0, 1) + del split_1 + + # pd_op.unsqueeze: (1156x1xf32) <- (1156xf32, 1xi64) + unsqueeze_2 = paddle._C_ops.unsqueeze(flatten_2, full_int_array_2) + del flatten_2, full_int_array_2 + + # pd_op.matmul: (1156x256xf32) <- (1156x1xf32, 1x256xf32) + matmul_1 = paddle._C_ops.matmul(unsqueeze_2, unsqueeze_1, False, False) + del unsqueeze_1, unsqueeze_2 + + # pd_op.sin: (1156x256xf32) <- (1156x256xf32) + sin_0 = paddle._C_ops.sin(matmul_0) + + # pd_op.cos: (1156x256xf32) <- (1156x256xf32) + cos_0 = paddle._C_ops.cos(matmul_0) + del matmul_0 + + # pd_op.sin: (1156x256xf32) <- (1156x256xf32) + sin_1 = paddle._C_ops.sin(matmul_1) + + # pd_op.cos: (1156x256xf32) <- (1156x256xf32) + cos_1 = paddle._C_ops.cos(matmul_1) + del matmul_1 + + # builtin.combine: ([1156x256xf32, 1156x256xf32, 1156x256xf32, 1156x256xf32]) <- (1156x256xf32, 1156x256xf32, 1156x256xf32, 1156x256xf32) + combine_5 = [sin_0, cos_0, sin_1, cos_1] + del cos_0, cos_1, sin_0, sin_1 + + # pd_op.concat: (1156x1024xf32) <- ([1156x256xf32, 1156x256xf32, 1156x256xf32, 1156x256xf32], 1xi32) + concat_4 = paddle._C_ops.concat(combine_5, full_0) + del combine_5 + + # pd_op.unsqueeze: (1x1156x1024xf32) <- (1156x1024xf32, 1xi64) + unsqueeze_3 = paddle._C_ops.unsqueeze(concat_4, full_int_array_3) + del concat_4 + + # pd_op.add: (1x1156x1024xf32) <- (1x1156x1024xf32, 1x1156x1024xf32) + add_40 = paddle._C_ops.add(transpose_0, unsqueeze_3) + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_4 = [1024] + + # pd_op.assign: (1xi64) <- (1xi64) + assign_24 = full_int_array_4 + + # pd_op.assign: (1xi64) <- (1xi64) + assign_25 = full_int_array_4 + + # pd_op.assign: (1xi64) <- (1xi64) + assign_26 = full_int_array_4 + + # pd_op.assign: (1xi64) <- (1xi64) + assign_27 = full_int_array_4 + + # pd_op.assign: (1xi64) <- (1xi64) + assign_28 = full_int_array_4 + + # pd_op.assign: (1xi64) <- (1xi64) + assign_29 = full_int_array_4 + + # pd_op.assign: (1xi64) <- (1xi64) + assign_30 = full_int_array_4 + + # pd_op.assign: (1xi64) <- (1xi64) + assign_31 = full_int_array_4 + + # pd_op.assign: (1xi64) <- (1xi64) + assign_32 = full_int_array_4 + + # pd_op.assign: (1xi64) <- (1xi64) + assign_33 = full_int_array_4 + + # pd_op.assign: (1xi64) <- (1xi64) + assign_34 = full_int_array_4 + + # pd_op.assign: (1xi64) <- (1xi64) + assign_35 = full_int_array_4 + + # pd_op.assign: (1xi64) <- (1xi64) + assign_36 = full_int_array_4 + + # pd_op.assign: (1xi64) <- (1xi64) + assign_37 = full_int_array_4 + + # pd_op.assign: (1xi64) <- (1xi64) + assign_38 = full_int_array_4 + + # pd_op.slice: (1024x1024xf32) <- (1024x3072xf32, 1xi64, 1xi64) + slice_0 = paddle._C_ops.slice( + data_18, [1], full_int_array_3, full_int_array_4, [1], [] + ) + + # pd_op.slice: (1024xf32) <- (3072xf32, 1xi64, 1xi64) + slice_1 = paddle._C_ops.slice( + data_19, [0], full_int_array_3, full_int_array_4, [1], [] + ) + + # pd_op.matmul: (1x1156x1024xf32) <- (1x1156x1024xf32, 1024x1024xf32) + matmul_2 = paddle._C_ops.matmul(add_40, slice_0, False, False) + + # pd_op.add: (1x1156x1024xf32) <- (1x1156x1024xf32, 1024xf32) + add_41 = paddle._C_ops.add(matmul_2, slice_1) + + # pd_op.full_int_array: (4xi64) <- () + full_int_array_5 = [0, 0, 4, 256] + + # pd_op.reshape: (1x1156x4x256xf32) <- (1x1156x1024xf32, 4xi64) + reshape_4 = paddle._C_ops.reshape(add_41, full_int_array_5) + + # pd_op.transpose: (1x4x1156x256xf32) <- (1x1156x4x256xf32) + transpose_1 = paddle._C_ops.transpose(reshape_4, [0, 2, 1, 3]) + del reshape_4 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_6 = [2048] + + # pd_op.assign: (1xi64) <- (1xi64) + assign_39 = full_int_array_6 + + # pd_op.assign: (1xi64) <- (1xi64) + assign_40 = full_int_array_6 + + # pd_op.assign: (1xi64) <- (1xi64) + assign_41 = full_int_array_6 + + # pd_op.assign: (1xi64) <- (1xi64) + assign_42 = full_int_array_6 + + # pd_op.assign: (1xi64) <- (1xi64) + assign_43 = full_int_array_6 + + # pd_op.assign: (1xi64) <- (1xi64) + assign_44 = full_int_array_6 + + # pd_op.assign: (1xi64) <- (1xi64) + assign_45 = full_int_array_6 + + # pd_op.assign: (1xi64) <- (1xi64) + assign_46 = full_int_array_6 + + # pd_op.assign: (1xi64) <- (1xi64) + assign_47 = full_int_array_6 + + # pd_op.assign: (1xi64) <- (1xi64) + assign_48 = full_int_array_6 + + # pd_op.assign: (1xi64) <- (1xi64) + assign_49 = full_int_array_6 + + # pd_op.assign: (1xi64) <- (1xi64) + assign_50 = full_int_array_6 + + # pd_op.assign: (1xi64) <- (1xi64) + assign_51 = full_int_array_6 + + # pd_op.assign: (1xi64) <- (1xi64) + assign_52 = full_int_array_6 + + # pd_op.assign: (1xi64) <- (1xi64) + assign_53 = full_int_array_6 + + # pd_op.slice: (1024x1024xf32) <- (1024x3072xf32, 1xi64, 1xi64) + slice_2 = paddle._C_ops.slice( + data_18, [1], full_int_array_4, full_int_array_6, [1], [] + ) + + # pd_op.slice: (1024xf32) <- (3072xf32, 1xi64, 1xi64) + slice_3 = paddle._C_ops.slice( + data_19, [0], full_int_array_4, full_int_array_6, [1], [] + ) + + # pd_op.matmul: (1x1156x1024xf32) <- (1x1156x1024xf32, 1024x1024xf32) + matmul_3 = paddle._C_ops.matmul(add_40, slice_2, False, False) + + # pd_op.add: (1x1156x1024xf32) <- (1x1156x1024xf32, 1024xf32) + add_42 = paddle._C_ops.add(matmul_3, slice_3) + + # pd_op.reshape: (1x1156x4x256xf32) <- (1x1156x1024xf32, 4xi64) + reshape_5 = paddle._C_ops.reshape(add_42, full_int_array_5) + + # pd_op.transpose: (1x4x1156x256xf32) <- (1x1156x4x256xf32) + transpose_2 = paddle._C_ops.transpose(reshape_5, [0, 2, 1, 3]) + del reshape_5 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_7 = [2147483647] + + # pd_op.assign: (1xi64) <- (1xi64) + assign_54 = full_int_array_7 + + # pd_op.assign: (1xi64) <- (1xi64) + assign_55 = full_int_array_7 + + # pd_op.assign: (1xi64) <- (1xi64) + assign_56 = full_int_array_7 + + # pd_op.assign: (1xi64) <- (1xi64) + assign_57 = full_int_array_7 + + # pd_op.assign: (1xi64) <- (1xi64) + assign_58 = full_int_array_7 + + # pd_op.assign: (1xi64) <- (1xi64) + assign_59 = full_int_array_7 + + # pd_op.assign: (1xi64) <- (1xi64) + assign_60 = full_int_array_7 + + # pd_op.slice: (1024x1024xf32) <- (1024x3072xf32, 1xi64, 1xi64) + slice_4 = paddle._C_ops.slice( + data_18, [1], full_int_array_6, full_int_array_7, [1], [] + ) + del data_18 + + # pd_op.slice: (1024xf32) <- (3072xf32, 1xi64, 1xi64) + slice_5 = paddle._C_ops.slice( + data_19, [0], full_int_array_6, full_int_array_7, [1], [] + ) + del data_19 + + # pd_op.matmul: (1x1156x1024xf32) <- (1x1156x1024xf32, 1024x1024xf32) + matmul_4 = paddle._C_ops.matmul(transpose_0, slice_4, False, False) + + # pd_op.add: (1x1156x1024xf32) <- (1x1156x1024xf32, 1024xf32) + add_43 = paddle._C_ops.add(matmul_4, slice_5) + + # pd_op.reshape: (1x1156x4x256xf32) <- (1x1156x1024xf32, 4xi64) + reshape_6 = paddle._C_ops.reshape(add_43, full_int_array_5) + + # pd_op.transpose: (1x4x1156x256xf32) <- (1x1156x4x256xf32) + transpose_3 = paddle._C_ops.transpose(reshape_6, [0, 2, 1, 3]) + del reshape_6 + + # pd_op.matmul: (1x4x1156x1156xf32) <- (1x4x1156x256xf32, 1x4x1156x256xf32) + matmul_5 = paddle._C_ops.matmul(transpose_1, transpose_2, False, True) + + # pd_op.full: (1xf32) <- () + full_8 = paddle._C_ops.full( + [1], float("0.0625"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.assign: (1xf32) <- (1xf32) + assign_61 = full_8 + + # pd_op.assign: (1xf32) <- (1xf32) + assign_62 = full_8 + + # pd_op.assign: (1xf32) <- (1xf32) + assign_63 = full_8 + + # pd_op.scale: (1x4x1156x1156xf32) <- (1x4x1156x1156xf32, 1xf32) + scale_1 = paddle._C_ops.scale(matmul_5, full_8, float("0"), True) + del matmul_5 + + # pd_op.softmax: (1x4x1156x1156xf32) <- (1x4x1156x1156xf32) + softmax_0 = paddle._C_ops.softmax(scale_1, -1) + del scale_1 + + # pd_op.full: (1xf32) <- () + full_9 = paddle._C_ops.full( + [1], float("0.1"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.assign: (1xf32) <- (1xf32) + assign_64 = full_9 + + # pd_op.assign: (1xf32) <- (1xf32) + assign_65 = full_9 + + # pd_op.assign: (1xf32) <- (1xf32) + assign_66 = full_9 + + # pd_op.assign: (1xf32) <- (1xf32) + assign_67 = full_9 + + # pd_op.assign: (1xf32) <- (1xf32) + assign_68 = full_9 + + # pd_op.assign: (1xf32) <- (1xf32) + assign_69 = full_9 + + # pd_op.assign: (1xf32) <- (1xf32) + assign_70 = full_9 + + # pd_op.assign: (1xf32) <- (1xf32) + assign_71 = full_9 + + # pd_op.assign: (1xf32) <- (1xf32) + assign_72 = full_9 + + # pd_op.assign: (1xf32) <- (1xf32) + assign_73 = full_9 + + # pd_op.assign: (1xf32) <- (1xf32) + assign_74 = full_9 + + # pd_op.assign: (1xf32) <- (1xf32) + assign_75 = full_9 + + # pd_op.assign: (1xf32) <- (1xf32) + assign_76 = full_9 + + # pd_op.assign: (1xf32) <- (1xf32) + assign_77 = full_9 + + # pd_op.assign: (1xf32) <- (1xf32) + assign_78 = full_9 + + # pd_op.dropout: (1x4x1156x1156xf32, 1x4x1156x1156xui8) <- (1x4x1156x1156xf32, None, 1xf32) + dropout_0, dropout_1 = (lambda x, f: f(x))( + paddle._C_ops.dropout( + softmax_0, None, full_9, False, "upscale_in_train", 0, False + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None), + ) + + # pd_op.matmul: (1x4x1156x256xf32) <- (1x4x1156x1156xf32, 1x4x1156x256xf32) + matmul_6 = paddle._C_ops.matmul(dropout_0, transpose_3, False, False) + + # pd_op.transpose: (1x1156x4x256xf32) <- (1x4x1156x256xf32) + transpose_4 = paddle._C_ops.transpose(matmul_6, [0, 2, 1, 3]) + del matmul_6 + + # pd_op.full_int_array: (3xi64) <- () + full_int_array_8 = [0, 0, 1024] + + # pd_op.reshape: (1x1156x1024xf32) <- (1x1156x4x256xf32, 3xi64) + reshape_7 = paddle._C_ops.reshape(transpose_4, full_int_array_8) + + # pd_op.matmul: (1x1156x1024xf32) <- (1x1156x1024xf32, 1024x1024xf32) + matmul_7 = paddle._C_ops.matmul(reshape_7, parameter_364, False, False) + del parameter_364 + + # pd_op.add: (1x1156x1024xf32) <- (1x1156x1024xf32, 1024xf32) + add_44 = paddle._C_ops.add(matmul_7, parameter_363) + del parameter_363 + + # pd_op.dropout: (1x1156x1024xf32, 1x1156x1024xui8) <- (1x1156x1024xf32, None, 1xf32) + dropout_2, dropout_3 = (lambda x, f: f(x))( + paddle._C_ops.dropout( + add_44, None, full_9, False, "upscale_in_train", 0, False + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None), + ) + del add_44 + + # pd_op.add: (1x1156x1024xf32) <- (1x1156x1024xf32, 1x1156x1024xf32) + add_45 = paddle._C_ops.add(transpose_0, dropout_2) + + # pd_op.layer_norm: (1x1156x1024xf32, 1x1156xf32, 1x1156xf32) <- (1x1156x1024xf32, 1024xf32, 1024xf32) + layer_norm_0, layer_norm_1, layer_norm_2 = (lambda x, f: f(x))( + paddle._C_ops.layer_norm( + add_45, parameter_362, parameter_361, float("1e-05"), 2 + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None, None), + ) + del parameter_361, parameter_362 + + # pd_op.matmul: (1x1156x2048xf32) <- (1x1156x1024xf32, 1024x2048xf32) + matmul_8 = paddle._C_ops.matmul(layer_norm_0, parameter_360, False, False) + del parameter_360 + + # pd_op.add: (1x1156x2048xf32) <- (1x1156x2048xf32, 2048xf32) + add_46 = paddle._C_ops.add(matmul_8, parameter_359) + del parameter_359 + + # pd_op.gelu: (1x1156x2048xf32) <- (1x1156x2048xf32) + gelu_0 = paddle._C_ops.gelu(add_46, False) + + # pd_op.dropout: (1x1156x2048xf32, 1x1156x2048xui8) <- (1x1156x2048xf32, None, 1xf32) + dropout_4, dropout_5 = (lambda x, f: f(x))( + paddle._C_ops.dropout( + gelu_0, None, full_9, False, "upscale_in_train", 0, False + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None), + ) + del gelu_0 + + # pd_op.matmul: (1x1156x1024xf32) <- (1x1156x2048xf32, 2048x1024xf32) + matmul_9 = paddle._C_ops.matmul(dropout_4, parameter_358, False, False) + del parameter_358 + + # pd_op.add: (1x1156x1024xf32) <- (1x1156x1024xf32, 1024xf32) + add_47 = paddle._C_ops.add(matmul_9, parameter_357) + del parameter_357 + + # pd_op.dropout: (1x1156x1024xf32, 1x1156x1024xui8) <- (1x1156x1024xf32, None, 1xf32) + dropout_6, dropout_7 = (lambda x, f: f(x))( + paddle._C_ops.dropout( + add_47, None, full_9, False, "upscale_in_train", 0, False + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None), + ) + del add_47 + + # pd_op.add: (1x1156x1024xf32) <- (1x1156x1024xf32, 1x1156x1024xf32) + add_48 = paddle._C_ops.add(layer_norm_0, dropout_6) + + # pd_op.layer_norm: (1x1156x1024xf32, 1x1156xf32, 1x1156xf32) <- (1x1156x1024xf32, 1024xf32, 1024xf32) + layer_norm_3, layer_norm_4, layer_norm_5 = (lambda x, f: f(x))( + paddle._C_ops.layer_norm( + add_48, parameter_356, parameter_355, float("1e-05"), 2 + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None, None), + ) + del parameter_355, parameter_356 + + # pd_op.add: (1x1156x1024xf32) <- (1x1156x1024xf32, 1x1156x1024xf32) + add_49 = paddle._C_ops.add(layer_norm_3, unsqueeze_3) + + # pd_op.slice: (1024x1024xf32) <- (1024x3072xf32, 1xi64, 1xi64) + slice_6 = paddle._C_ops.slice( + data_20, [1], full_int_array_3, full_int_array_4, [1], [] + ) + + # pd_op.slice: (1024xf32) <- (3072xf32, 1xi64, 1xi64) + slice_7 = paddle._C_ops.slice( + data_21, [0], full_int_array_3, full_int_array_4, [1], [] + ) + + # pd_op.matmul: (1x1156x1024xf32) <- (1x1156x1024xf32, 1024x1024xf32) + matmul_10 = paddle._C_ops.matmul(add_49, slice_6, False, False) + + # pd_op.add: (1x1156x1024xf32) <- (1x1156x1024xf32, 1024xf32) + add_50 = paddle._C_ops.add(matmul_10, slice_7) + + # pd_op.reshape: (1x1156x4x256xf32) <- (1x1156x1024xf32, 4xi64) + reshape_8 = paddle._C_ops.reshape(add_50, full_int_array_5) + + # pd_op.transpose: (1x4x1156x256xf32) <- (1x1156x4x256xf32) + transpose_5 = paddle._C_ops.transpose(reshape_8, [0, 2, 1, 3]) + del reshape_8 + + # pd_op.slice: (1024x1024xf32) <- (1024x3072xf32, 1xi64, 1xi64) + slice_8 = paddle._C_ops.slice( + data_20, [1], full_int_array_4, full_int_array_6, [1], [] + ) + + # pd_op.slice: (1024xf32) <- (3072xf32, 1xi64, 1xi64) + slice_9 = paddle._C_ops.slice( + data_21, [0], full_int_array_4, full_int_array_6, [1], [] + ) + + # pd_op.matmul: (1x1156x1024xf32) <- (1x1156x1024xf32, 1024x1024xf32) + matmul_11 = paddle._C_ops.matmul(add_49, slice_8, False, False) + + # pd_op.add: (1x1156x1024xf32) <- (1x1156x1024xf32, 1024xf32) + add_51 = paddle._C_ops.add(matmul_11, slice_9) + + # pd_op.reshape: (1x1156x4x256xf32) <- (1x1156x1024xf32, 4xi64) + reshape_9 = paddle._C_ops.reshape(add_51, full_int_array_5) + + # pd_op.transpose: (1x4x1156x256xf32) <- (1x1156x4x256xf32) + transpose_6 = paddle._C_ops.transpose(reshape_9, [0, 2, 1, 3]) + del reshape_9 + + # pd_op.slice: (1024x1024xf32) <- (1024x3072xf32, 1xi64, 1xi64) + slice_10 = paddle._C_ops.slice( + data_20, [1], full_int_array_6, full_int_array_7, [1], [] + ) + del data_20 + + # pd_op.slice: (1024xf32) <- (3072xf32, 1xi64, 1xi64) + slice_11 = paddle._C_ops.slice( + data_21, [0], full_int_array_6, full_int_array_7, [1], [] + ) + del data_21 + + # pd_op.matmul: (1x1156x1024xf32) <- (1x1156x1024xf32, 1024x1024xf32) + matmul_12 = paddle._C_ops.matmul(layer_norm_3, slice_10, False, False) + + # pd_op.add: (1x1156x1024xf32) <- (1x1156x1024xf32, 1024xf32) + add_52 = paddle._C_ops.add(matmul_12, slice_11) + + # pd_op.reshape: (1x1156x4x256xf32) <- (1x1156x1024xf32, 4xi64) + reshape_10 = paddle._C_ops.reshape(add_52, full_int_array_5) + + # pd_op.transpose: (1x4x1156x256xf32) <- (1x1156x4x256xf32) + transpose_7 = paddle._C_ops.transpose(reshape_10, [0, 2, 1, 3]) + del reshape_10 + + # pd_op.matmul: (1x4x1156x1156xf32) <- (1x4x1156x256xf32, 1x4x1156x256xf32) + matmul_13 = paddle._C_ops.matmul(transpose_5, transpose_6, False, True) + + # pd_op.scale: (1x4x1156x1156xf32) <- (1x4x1156x1156xf32, 1xf32) + scale_2 = paddle._C_ops.scale(matmul_13, full_8, float("0"), True) + del matmul_13 + + # pd_op.softmax: (1x4x1156x1156xf32) <- (1x4x1156x1156xf32) + softmax_1 = paddle._C_ops.softmax(scale_2, -1) + del scale_2 + + # pd_op.dropout: (1x4x1156x1156xf32, 1x4x1156x1156xui8) <- (1x4x1156x1156xf32, None, 1xf32) + dropout_8, dropout_9 = (lambda x, f: f(x))( + paddle._C_ops.dropout( + softmax_1, None, full_9, False, "upscale_in_train", 0, False + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None), + ) + + # pd_op.matmul: (1x4x1156x256xf32) <- (1x4x1156x1156xf32, 1x4x1156x256xf32) + matmul_14 = paddle._C_ops.matmul(dropout_8, transpose_7, False, False) + + # pd_op.transpose: (1x1156x4x256xf32) <- (1x4x1156x256xf32) + transpose_8 = paddle._C_ops.transpose(matmul_14, [0, 2, 1, 3]) + del matmul_14 + + # pd_op.reshape: (1x1156x1024xf32) <- (1x1156x4x256xf32, 3xi64) + reshape_11 = paddle._C_ops.reshape(transpose_8, full_int_array_8) + + # pd_op.matmul: (1x1156x1024xf32) <- (1x1156x1024xf32, 1024x1024xf32) + matmul_15 = paddle._C_ops.matmul(reshape_11, parameter_354, False, False) + del parameter_354 + + # pd_op.add: (1x1156x1024xf32) <- (1x1156x1024xf32, 1024xf32) + add_53 = paddle._C_ops.add(matmul_15, parameter_353) + del parameter_353 + + # pd_op.dropout: (1x1156x1024xf32, 1x1156x1024xui8) <- (1x1156x1024xf32, None, 1xf32) + dropout_10, dropout_11 = (lambda x, f: f(x))( + paddle._C_ops.dropout( + add_53, None, full_9, False, "upscale_in_train", 0, False + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None), + ) + del add_53 + + # pd_op.add: (1x1156x1024xf32) <- (1x1156x1024xf32, 1x1156x1024xf32) + add_54 = paddle._C_ops.add(layer_norm_3, dropout_10) + + # pd_op.layer_norm: (1x1156x1024xf32, 1x1156xf32, 1x1156xf32) <- (1x1156x1024xf32, 1024xf32, 1024xf32) + layer_norm_6, layer_norm_7, layer_norm_8 = (lambda x, f: f(x))( + paddle._C_ops.layer_norm( + add_54, parameter_352, parameter_351, float("1e-05"), 2 + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None, None), + ) + del parameter_351, parameter_352 + + # pd_op.matmul: (1x1156x2048xf32) <- (1x1156x1024xf32, 1024x2048xf32) + matmul_16 = paddle._C_ops.matmul(layer_norm_6, parameter_350, False, False) + del parameter_350 + + # pd_op.add: (1x1156x2048xf32) <- (1x1156x2048xf32, 2048xf32) + add_55 = paddle._C_ops.add(matmul_16, parameter_349) + del parameter_349 + + # pd_op.gelu: (1x1156x2048xf32) <- (1x1156x2048xf32) + gelu_1 = paddle._C_ops.gelu(add_55, False) + + # pd_op.dropout: (1x1156x2048xf32, 1x1156x2048xui8) <- (1x1156x2048xf32, None, 1xf32) + dropout_12, dropout_13 = (lambda x, f: f(x))( + paddle._C_ops.dropout( + gelu_1, None, full_9, False, "upscale_in_train", 0, False + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None), + ) + del gelu_1 + + # pd_op.matmul: (1x1156x1024xf32) <- (1x1156x2048xf32, 2048x1024xf32) + matmul_17 = paddle._C_ops.matmul(dropout_12, parameter_348, False, False) + del parameter_348 + + # pd_op.add: (1x1156x1024xf32) <- (1x1156x1024xf32, 1024xf32) + add_56 = paddle._C_ops.add(matmul_17, parameter_347) + del parameter_347 + + # pd_op.dropout: (1x1156x1024xf32, 1x1156x1024xui8) <- (1x1156x1024xf32, None, 1xf32) + dropout_14, dropout_15 = (lambda x, f: f(x))( + paddle._C_ops.dropout( + add_56, None, full_9, False, "upscale_in_train", 0, False + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None), + ) + del add_56 + + # pd_op.add: (1x1156x1024xf32) <- (1x1156x1024xf32, 1x1156x1024xf32) + add_57 = paddle._C_ops.add(layer_norm_6, dropout_14) + + # pd_op.layer_norm: (1x1156x1024xf32, 1x1156xf32, 1x1156xf32) <- (1x1156x1024xf32, 1024xf32, 1024xf32) + layer_norm_9, layer_norm_10, layer_norm_11 = (lambda x, f: f(x))( + paddle._C_ops.layer_norm( + add_57, parameter_346, parameter_345, float("1e-05"), 2 + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None, None), + ) + del parameter_345, parameter_346 + + # pd_op.add: (1x1156x1024xf32) <- (1x1156x1024xf32, 1x1156x1024xf32) + add_58 = paddle._C_ops.add(layer_norm_9, unsqueeze_3) + + # pd_op.slice: (1024x1024xf32) <- (1024x3072xf32, 1xi64, 1xi64) + slice_12 = paddle._C_ops.slice( + data_22, [1], full_int_array_3, full_int_array_4, [1], [] + ) + + # pd_op.slice: (1024xf32) <- (3072xf32, 1xi64, 1xi64) + slice_13 = paddle._C_ops.slice( + data_23, [0], full_int_array_3, full_int_array_4, [1], [] + ) + + # pd_op.matmul: (1x1156x1024xf32) <- (1x1156x1024xf32, 1024x1024xf32) + matmul_18 = paddle._C_ops.matmul(add_58, slice_12, False, False) + + # pd_op.add: (1x1156x1024xf32) <- (1x1156x1024xf32, 1024xf32) + add_59 = paddle._C_ops.add(matmul_18, slice_13) + + # pd_op.reshape: (1x1156x4x256xf32) <- (1x1156x1024xf32, 4xi64) + reshape_12 = paddle._C_ops.reshape(add_59, full_int_array_5) + + # pd_op.transpose: (1x4x1156x256xf32) <- (1x1156x4x256xf32) + transpose_9 = paddle._C_ops.transpose(reshape_12, [0, 2, 1, 3]) + del reshape_12 + + # pd_op.slice: (1024x1024xf32) <- (1024x3072xf32, 1xi64, 1xi64) + slice_14 = paddle._C_ops.slice( + data_22, [1], full_int_array_4, full_int_array_6, [1], [] + ) + + # pd_op.slice: (1024xf32) <- (3072xf32, 1xi64, 1xi64) + slice_15 = paddle._C_ops.slice( + data_23, [0], full_int_array_4, full_int_array_6, [1], [] + ) + + # pd_op.matmul: (1x1156x1024xf32) <- (1x1156x1024xf32, 1024x1024xf32) + matmul_19 = paddle._C_ops.matmul(add_58, slice_14, False, False) + + # pd_op.add: (1x1156x1024xf32) <- (1x1156x1024xf32, 1024xf32) + add_60 = paddle._C_ops.add(matmul_19, slice_15) + + # pd_op.reshape: (1x1156x4x256xf32) <- (1x1156x1024xf32, 4xi64) + reshape_13 = paddle._C_ops.reshape(add_60, full_int_array_5) + + # pd_op.transpose: (1x4x1156x256xf32) <- (1x1156x4x256xf32) + transpose_10 = paddle._C_ops.transpose(reshape_13, [0, 2, 1, 3]) + del reshape_13 + + # pd_op.slice: (1024x1024xf32) <- (1024x3072xf32, 1xi64, 1xi64) + slice_16 = paddle._C_ops.slice( + data_22, [1], full_int_array_6, full_int_array_7, [1], [] + ) + del data_22 + + # pd_op.slice: (1024xf32) <- (3072xf32, 1xi64, 1xi64) + slice_17 = paddle._C_ops.slice( + data_23, [0], full_int_array_6, full_int_array_7, [1], [] + ) + del data_23 + + # pd_op.matmul: (1x1156x1024xf32) <- (1x1156x1024xf32, 1024x1024xf32) + matmul_20 = paddle._C_ops.matmul(layer_norm_9, slice_16, False, False) + + # pd_op.add: (1x1156x1024xf32) <- (1x1156x1024xf32, 1024xf32) + add_61 = paddle._C_ops.add(matmul_20, slice_17) + + # pd_op.reshape: (1x1156x4x256xf32) <- (1x1156x1024xf32, 4xi64) + reshape_14 = paddle._C_ops.reshape(add_61, full_int_array_5) + + # pd_op.transpose: (1x4x1156x256xf32) <- (1x1156x4x256xf32) + transpose_11 = paddle._C_ops.transpose(reshape_14, [0, 2, 1, 3]) + del reshape_14 + + # pd_op.matmul: (1x4x1156x1156xf32) <- (1x4x1156x256xf32, 1x4x1156x256xf32) + matmul_21 = paddle._C_ops.matmul(transpose_9, transpose_10, False, True) + + # pd_op.scale: (1x4x1156x1156xf32) <- (1x4x1156x1156xf32, 1xf32) + scale_3 = paddle._C_ops.scale(matmul_21, full_8, float("0"), True) + del matmul_21 + + # pd_op.softmax: (1x4x1156x1156xf32) <- (1x4x1156x1156xf32) + softmax_2 = paddle._C_ops.softmax(scale_3, -1) + del scale_3 + + # pd_op.dropout: (1x4x1156x1156xf32, 1x4x1156x1156xui8) <- (1x4x1156x1156xf32, None, 1xf32) + dropout_16, dropout_17 = (lambda x, f: f(x))( + paddle._C_ops.dropout( + softmax_2, None, full_9, False, "upscale_in_train", 0, False + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None), + ) + + # pd_op.matmul: (1x4x1156x256xf32) <- (1x4x1156x1156xf32, 1x4x1156x256xf32) + matmul_22 = paddle._C_ops.matmul(dropout_16, transpose_11, False, False) + + # pd_op.transpose: (1x1156x4x256xf32) <- (1x4x1156x256xf32) + transpose_12 = paddle._C_ops.transpose(matmul_22, [0, 2, 1, 3]) + del matmul_22 + + # pd_op.reshape: (1x1156x1024xf32) <- (1x1156x4x256xf32, 3xi64) + reshape_15 = paddle._C_ops.reshape(transpose_12, full_int_array_8) + + # pd_op.matmul: (1x1156x1024xf32) <- (1x1156x1024xf32, 1024x1024xf32) + matmul_23 = paddle._C_ops.matmul(reshape_15, parameter_344, False, False) + del parameter_344 + + # pd_op.add: (1x1156x1024xf32) <- (1x1156x1024xf32, 1024xf32) + add_62 = paddle._C_ops.add(matmul_23, parameter_343) + del parameter_343 + + # pd_op.dropout: (1x1156x1024xf32, 1x1156x1024xui8) <- (1x1156x1024xf32, None, 1xf32) + dropout_18, dropout_19 = (lambda x, f: f(x))( + paddle._C_ops.dropout( + add_62, None, full_9, False, "upscale_in_train", 0, False + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None), + ) + del add_62 + + # pd_op.add: (1x1156x1024xf32) <- (1x1156x1024xf32, 1x1156x1024xf32) + add_63 = paddle._C_ops.add(layer_norm_9, dropout_18) + + # pd_op.layer_norm: (1x1156x1024xf32, 1x1156xf32, 1x1156xf32) <- (1x1156x1024xf32, 1024xf32, 1024xf32) + layer_norm_12, layer_norm_13, layer_norm_14 = (lambda x, f: f(x))( + paddle._C_ops.layer_norm( + add_63, parameter_342, parameter_341, float("1e-05"), 2 + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None, None), + ) + del parameter_341, parameter_342 + + # pd_op.matmul: (1x1156x2048xf32) <- (1x1156x1024xf32, 1024x2048xf32) + matmul_24 = paddle._C_ops.matmul(layer_norm_12, parameter_340, False, False) + del parameter_340 + + # pd_op.add: (1x1156x2048xf32) <- (1x1156x2048xf32, 2048xf32) + add_64 = paddle._C_ops.add(matmul_24, parameter_339) + del parameter_339 + + # pd_op.gelu: (1x1156x2048xf32) <- (1x1156x2048xf32) + gelu_2 = paddle._C_ops.gelu(add_64, False) + + # pd_op.dropout: (1x1156x2048xf32, 1x1156x2048xui8) <- (1x1156x2048xf32, None, 1xf32) + dropout_20, dropout_21 = (lambda x, f: f(x))( + paddle._C_ops.dropout( + gelu_2, None, full_9, False, "upscale_in_train", 0, False + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None), + ) + del gelu_2 + + # pd_op.matmul: (1x1156x1024xf32) <- (1x1156x2048xf32, 2048x1024xf32) + matmul_25 = paddle._C_ops.matmul(dropout_20, parameter_338, False, False) + del parameter_338 + + # pd_op.add: (1x1156x1024xf32) <- (1x1156x1024xf32, 1024xf32) + add_65 = paddle._C_ops.add(matmul_25, parameter_337) + del parameter_337 + + # pd_op.dropout: (1x1156x1024xf32, 1x1156x1024xui8) <- (1x1156x1024xf32, None, 1xf32) + dropout_22, dropout_23 = (lambda x, f: f(x))( + paddle._C_ops.dropout( + add_65, None, full_9, False, "upscale_in_train", 0, False + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None), + ) + del add_65 + + # pd_op.add: (1x1156x1024xf32) <- (1x1156x1024xf32, 1x1156x1024xf32) + add_66 = paddle._C_ops.add(layer_norm_12, dropout_22) + + # pd_op.layer_norm: (1x1156x1024xf32, 1x1156xf32, 1x1156xf32) <- (1x1156x1024xf32, 1024xf32, 1024xf32) + layer_norm_15, layer_norm_16, layer_norm_17 = (lambda x, f: f(x))( + paddle._C_ops.layer_norm( + add_66, parameter_336, parameter_335, float("1e-05"), 2 + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None, None), + ) + del parameter_335, parameter_336 + + # pd_op.add: (1x1156x1024xf32) <- (1x1156x1024xf32, 1x1156x1024xf32) + add_67 = paddle._C_ops.add(layer_norm_15, unsqueeze_3) + + # pd_op.slice: (1024x1024xf32) <- (1024x3072xf32, 1xi64, 1xi64) + slice_18 = paddle._C_ops.slice( + data_24, [1], full_int_array_3, full_int_array_4, [1], [] + ) + + # pd_op.slice: (1024xf32) <- (3072xf32, 1xi64, 1xi64) + slice_19 = paddle._C_ops.slice( + data_25, [0], full_int_array_3, full_int_array_4, [1], [] + ) + del full_int_array_3 + + # pd_op.matmul: (1x1156x1024xf32) <- (1x1156x1024xf32, 1024x1024xf32) + matmul_26 = paddle._C_ops.matmul(add_67, slice_18, False, False) + + # pd_op.add: (1x1156x1024xf32) <- (1x1156x1024xf32, 1024xf32) + add_68 = paddle._C_ops.add(matmul_26, slice_19) + + # pd_op.reshape: (1x1156x4x256xf32) <- (1x1156x1024xf32, 4xi64) + reshape_16 = paddle._C_ops.reshape(add_68, full_int_array_5) + + # pd_op.transpose: (1x4x1156x256xf32) <- (1x1156x4x256xf32) + transpose_13 = paddle._C_ops.transpose(reshape_16, [0, 2, 1, 3]) + del reshape_16 + + # pd_op.slice: (1024x1024xf32) <- (1024x3072xf32, 1xi64, 1xi64) + slice_20 = paddle._C_ops.slice( + data_24, [1], full_int_array_4, full_int_array_6, [1], [] + ) + + # pd_op.slice: (1024xf32) <- (3072xf32, 1xi64, 1xi64) + slice_21 = paddle._C_ops.slice( + data_25, [0], full_int_array_4, full_int_array_6, [1], [] + ) + + # pd_op.matmul: (1x1156x1024xf32) <- (1x1156x1024xf32, 1024x1024xf32) + matmul_27 = paddle._C_ops.matmul(add_67, slice_20, False, False) + + # pd_op.add: (1x1156x1024xf32) <- (1x1156x1024xf32, 1024xf32) + add_69 = paddle._C_ops.add(matmul_27, slice_21) + + # pd_op.reshape: (1x1156x4x256xf32) <- (1x1156x1024xf32, 4xi64) + reshape_17 = paddle._C_ops.reshape(add_69, full_int_array_5) + + # pd_op.transpose: (1x4x1156x256xf32) <- (1x1156x4x256xf32) + transpose_14 = paddle._C_ops.transpose(reshape_17, [0, 2, 1, 3]) + del reshape_17 + + # pd_op.slice: (1024x1024xf32) <- (1024x3072xf32, 1xi64, 1xi64) + slice_22 = paddle._C_ops.slice( + data_24, [1], full_int_array_6, full_int_array_7, [1], [] + ) + del data_24 + + # pd_op.slice: (1024xf32) <- (3072xf32, 1xi64, 1xi64) + slice_23 = paddle._C_ops.slice( + data_25, [0], full_int_array_6, full_int_array_7, [1], [] + ) + del data_25 + + # pd_op.matmul: (1x1156x1024xf32) <- (1x1156x1024xf32, 1024x1024xf32) + matmul_28 = paddle._C_ops.matmul(layer_norm_15, slice_22, False, False) + + # pd_op.add: (1x1156x1024xf32) <- (1x1156x1024xf32, 1024xf32) + add_70 = paddle._C_ops.add(matmul_28, slice_23) + + # pd_op.reshape: (1x1156x4x256xf32) <- (1x1156x1024xf32, 4xi64) + reshape_18 = paddle._C_ops.reshape(add_70, full_int_array_5) + del full_int_array_5 + + # pd_op.transpose: (1x4x1156x256xf32) <- (1x1156x4x256xf32) + transpose_15 = paddle._C_ops.transpose(reshape_18, [0, 2, 1, 3]) + del reshape_18 + + # pd_op.matmul: (1x4x1156x1156xf32) <- (1x4x1156x256xf32, 1x4x1156x256xf32) + matmul_29 = paddle._C_ops.matmul(transpose_13, transpose_14, False, True) + + # pd_op.scale: (1x4x1156x1156xf32) <- (1x4x1156x1156xf32, 1xf32) + scale_4 = paddle._C_ops.scale(matmul_29, full_8, float("0"), True) + del matmul_29 + + # pd_op.softmax: (1x4x1156x1156xf32) <- (1x4x1156x1156xf32) + softmax_3 = paddle._C_ops.softmax(scale_4, -1) + del scale_4 + + # pd_op.dropout: (1x4x1156x1156xf32, 1x4x1156x1156xui8) <- (1x4x1156x1156xf32, None, 1xf32) + dropout_24, dropout_25 = (lambda x, f: f(x))( + paddle._C_ops.dropout( + softmax_3, None, full_9, False, "upscale_in_train", 0, False + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None), + ) + + # pd_op.matmul: (1x4x1156x256xf32) <- (1x4x1156x1156xf32, 1x4x1156x256xf32) + matmul_30 = paddle._C_ops.matmul(dropout_24, transpose_15, False, False) + + # pd_op.transpose: (1x1156x4x256xf32) <- (1x4x1156x256xf32) + transpose_16 = paddle._C_ops.transpose(matmul_30, [0, 2, 1, 3]) + del matmul_30 + + # pd_op.reshape: (1x1156x1024xf32) <- (1x1156x4x256xf32, 3xi64) + reshape_19 = paddle._C_ops.reshape(transpose_16, full_int_array_8) + del full_int_array_8 + + # pd_op.matmul: (1x1156x1024xf32) <- (1x1156x1024xf32, 1024x1024xf32) + matmul_31 = paddle._C_ops.matmul(reshape_19, parameter_334, False, False) + del parameter_334 + + # pd_op.add: (1x1156x1024xf32) <- (1x1156x1024xf32, 1024xf32) + add_71 = paddle._C_ops.add(matmul_31, parameter_333) + del parameter_333 + + # pd_op.dropout: (1x1156x1024xf32, 1x1156x1024xui8) <- (1x1156x1024xf32, None, 1xf32) + dropout_26, dropout_27 = (lambda x, f: f(x))( + paddle._C_ops.dropout( + add_71, None, full_9, False, "upscale_in_train", 0, False + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None), + ) + del add_71 + + # pd_op.add: (1x1156x1024xf32) <- (1x1156x1024xf32, 1x1156x1024xf32) + add_72 = paddle._C_ops.add(layer_norm_15, dropout_26) + + # pd_op.layer_norm: (1x1156x1024xf32, 1x1156xf32, 1x1156xf32) <- (1x1156x1024xf32, 1024xf32, 1024xf32) + layer_norm_18, layer_norm_19, layer_norm_20 = (lambda x, f: f(x))( + paddle._C_ops.layer_norm( + add_72, parameter_332, parameter_331, float("1e-05"), 2 + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None, None), + ) + del parameter_331, parameter_332 + + # pd_op.matmul: (1x1156x2048xf32) <- (1x1156x1024xf32, 1024x2048xf32) + matmul_32 = paddle._C_ops.matmul(layer_norm_18, parameter_330, False, False) + del parameter_330 + + # pd_op.add: (1x1156x2048xf32) <- (1x1156x2048xf32, 2048xf32) + add_73 = paddle._C_ops.add(matmul_32, parameter_329) + del parameter_329 + + # pd_op.gelu: (1x1156x2048xf32) <- (1x1156x2048xf32) + gelu_3 = paddle._C_ops.gelu(add_73, False) + + # pd_op.dropout: (1x1156x2048xf32, 1x1156x2048xui8) <- (1x1156x2048xf32, None, 1xf32) + dropout_28, dropout_29 = (lambda x, f: f(x))( + paddle._C_ops.dropout( + gelu_3, None, full_9, False, "upscale_in_train", 0, False + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None), + ) + del gelu_3 + + # pd_op.matmul: (1x1156x1024xf32) <- (1x1156x2048xf32, 2048x1024xf32) + matmul_33 = paddle._C_ops.matmul(dropout_28, parameter_328, False, False) + del parameter_328 + + # pd_op.add: (1x1156x1024xf32) <- (1x1156x1024xf32, 1024xf32) + add_74 = paddle._C_ops.add(matmul_33, parameter_327) + del parameter_327 + + # pd_op.dropout: (1x1156x1024xf32, 1x1156x1024xui8) <- (1x1156x1024xf32, None, 1xf32) + dropout_30, dropout_31 = (lambda x, f: f(x))( + paddle._C_ops.dropout( + add_74, None, full_9, False, "upscale_in_train", 0, False + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None), + ) + del add_74 + + # pd_op.add: (1x1156x1024xf32) <- (1x1156x1024xf32, 1x1156x1024xf32) + add_75 = paddle._C_ops.add(layer_norm_18, dropout_30) + + # pd_op.layer_norm: (1x1156x1024xf32, 1x1156xf32, 1x1156xf32) <- (1x1156x1024xf32, 1024xf32, 1024xf32) + layer_norm_21, layer_norm_22, layer_norm_23 = (lambda x, f: f(x))( + paddle._C_ops.layer_norm( + add_75, parameter_326, parameter_325, float("1e-05"), 2 + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None, None), + ) + del parameter_325, parameter_326 + + # pd_op.transpose: (1x1024x1156xf32) <- (1x1156x1024xf32) + transpose_17 = paddle._C_ops.transpose(layer_norm_21, [0, 2, 1]) + del layer_norm_21 + + # pd_op.full_int_array: (4xi64) <- () + full_int_array_9 = [1, 1024, 34, 34] + + # pd_op.reshape: (1x1024x34x34xf32) <- (1x1024x1156xf32, 4xi64) + reshape_20 = paddle._C_ops.reshape(transpose_17, full_int_array_9) + del full_int_array_9 + + # pd_op.conv2d: (1x384x34x34xf32) <- (1x1024x34x34xf32, 384x1024x1x1xf32) + conv2d_77 = paddle._C_ops.conv2d( + reshape_20, parameter_324, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_324 + + # pd_op.batch_norm_: (1x384x34x34xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (1x384x34x34xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__438, + batch_norm__439, + batch_norm__440, + batch_norm__441, + batch_norm__442, + batch_norm__443, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_77, + parameter_323, + parameter_322, + parameter_321, + parameter_320, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_320, parameter_321, parameter_322, parameter_323 + + # pd_op.swish: (1x384x34x34xf32) <- (1x384x34x34xf32) + swish_56 = paddle._C_ops.swish(batch_norm__438) + + # pd_op.conv2d: (1x384x34x34xf32) <- (1x1024x34x34xf32, 384x1024x1x1xf32) + conv2d_78 = paddle._C_ops.conv2d( + reshape_20, parameter_319, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_319 + + # pd_op.batch_norm_: (1x384x34x34xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (1x384x34x34xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__444, + batch_norm__445, + batch_norm__446, + batch_norm__447, + batch_norm__448, + batch_norm__449, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_78, + parameter_318, + parameter_317, + parameter_316, + parameter_315, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_315, parameter_316, parameter_317, parameter_318 + + # pd_op.swish: (1x384x34x34xf32) <- (1x384x34x34xf32) + swish_57 = paddle._C_ops.swish(batch_norm__444) + + # pd_op.conv2d: (1x384x34x34xf32) <- (1x384x34x34xf32, 384x384x3x3xf32) + conv2d_79 = paddle._C_ops.conv2d( + swish_57, parameter_314, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_314 + + # pd_op.batch_norm_: (1x384x34x34xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (1x384x34x34xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__450, + batch_norm__451, + batch_norm__452, + batch_norm__453, + batch_norm__454, + batch_norm__455, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_79, + parameter_313, + parameter_312, + parameter_311, + parameter_310, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_310, parameter_311, parameter_312, parameter_313 + + # pd_op.swish: (1x384x34x34xf32) <- (1x384x34x34xf32) + swish_58 = paddle._C_ops.swish(batch_norm__450) + + # pd_op.conv2d: (1x384x34x34xf32) <- (1x384x34x34xf32, 384x384x3x3xf32) + conv2d_80 = paddle._C_ops.conv2d( + swish_58, parameter_309, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_309 + + # pd_op.batch_norm_: (1x384x34x34xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (1x384x34x34xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__456, + batch_norm__457, + batch_norm__458, + batch_norm__459, + batch_norm__460, + batch_norm__461, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_80, + parameter_308, + parameter_307, + parameter_306, + parameter_305, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_305, parameter_306, parameter_307, parameter_308 + + # pd_op.conv2d: (1x384x34x34xf32) <- (1x384x34x34xf32, 384x384x1x1xf32) + conv2d_81 = paddle._C_ops.conv2d( + swish_58, parameter_304, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_304 + + # pd_op.batch_norm_: (1x384x34x34xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (1x384x34x34xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__462, + batch_norm__463, + batch_norm__464, + batch_norm__465, + batch_norm__466, + batch_norm__467, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_81, + parameter_303, + parameter_302, + parameter_301, + parameter_300, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_300, parameter_301, parameter_302, parameter_303 + + # pd_op.add: (1x384x34x34xf32) <- (1x384x34x34xf32, 1x384x34x34xf32) + add_76 = paddle._C_ops.add(batch_norm__456, batch_norm__462) + + # pd_op.swish: (1x384x34x34xf32) <- (1x384x34x34xf32) + swish_59 = paddle._C_ops.swish(add_76) + + # pd_op.conv2d: (1x384x34x34xf32) <- (1x384x34x34xf32, 384x384x3x3xf32) + conv2d_82 = paddle._C_ops.conv2d( + swish_59, parameter_299, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_299 + + # pd_op.batch_norm_: (1x384x34x34xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (1x384x34x34xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__468, + batch_norm__469, + batch_norm__470, + batch_norm__471, + batch_norm__472, + batch_norm__473, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_82, + parameter_298, + parameter_297, + parameter_296, + parameter_295, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_295, parameter_296, parameter_297, parameter_298 + + # pd_op.swish: (1x384x34x34xf32) <- (1x384x34x34xf32) + swish_60 = paddle._C_ops.swish(batch_norm__468) + + # pd_op.conv2d: (1x384x34x34xf32) <- (1x384x34x34xf32, 384x384x3x3xf32) + conv2d_83 = paddle._C_ops.conv2d( + swish_60, parameter_294, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_294 + + # pd_op.batch_norm_: (1x384x34x34xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (1x384x34x34xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__474, + batch_norm__475, + batch_norm__476, + batch_norm__477, + batch_norm__478, + batch_norm__479, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_83, + parameter_293, + parameter_292, + parameter_291, + parameter_290, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_290, parameter_291, parameter_292, parameter_293 + + # pd_op.conv2d: (1x384x34x34xf32) <- (1x384x34x34xf32, 384x384x1x1xf32) + conv2d_84 = paddle._C_ops.conv2d( + swish_60, parameter_289, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_289 + + # pd_op.batch_norm_: (1x384x34x34xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (1x384x34x34xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__480, + batch_norm__481, + batch_norm__482, + batch_norm__483, + batch_norm__484, + batch_norm__485, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_84, + parameter_288, + parameter_287, + parameter_286, + parameter_285, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_285, parameter_286, parameter_287, parameter_288 + + # pd_op.add: (1x384x34x34xf32) <- (1x384x34x34xf32, 1x384x34x34xf32) + add_77 = paddle._C_ops.add(batch_norm__474, batch_norm__480) + + # pd_op.swish: (1x384x34x34xf32) <- (1x384x34x34xf32) + swish_61 = paddle._C_ops.swish(add_77) + + # pd_op.full_int_array: (2xi64) <- () + full_int_array_10 = [5, 5] + + # pd_op.pool2d: (1x384x34x34xf32) <- (1x384x34x34xf32, 2xi64) + pool2d_0 = paddle._C_ops.pool2d( + swish_61, + full_int_array_10, + [1, 1], + [2, 2], + False, + True, + "NCHW", + "max", + False, + False, + "EXPLICIT", + ) + + # pd_op.full_int_array: (2xi64) <- () + full_int_array_11 = [9, 9] + + # pd_op.pool2d: (1x384x34x34xf32) <- (1x384x34x34xf32, 2xi64) + pool2d_1 = paddle._C_ops.pool2d( + swish_61, + full_int_array_11, + [1, 1], + [4, 4], + False, + True, + "NCHW", + "max", + False, + False, + "EXPLICIT", + ) + + # pd_op.full_int_array: (2xi64) <- () + full_int_array_12 = [13, 13] + + # pd_op.pool2d: (1x384x34x34xf32) <- (1x384x34x34xf32, 2xi64) + pool2d_2 = paddle._C_ops.pool2d( + swish_61, + full_int_array_12, + [1, 1], + [6, 6], + False, + True, + "NCHW", + "max", + False, + False, + "EXPLICIT", + ) + + # builtin.combine: ([1x384x34x34xf32, 1x384x34x34xf32, 1x384x34x34xf32, 1x384x34x34xf32]) <- (1x384x34x34xf32, 1x384x34x34xf32, 1x384x34x34xf32, 1x384x34x34xf32) + combine_6 = [swish_61, pool2d_0, pool2d_1, pool2d_2] + + # pd_op.concat: (1x1536x34x34xf32) <- ([1x384x34x34xf32, 1x384x34x34xf32, 1x384x34x34xf32, 1x384x34x34xf32], 1xi32) + concat_5 = paddle._C_ops.concat(combine_6, full_0) + del combine_6 + + # pd_op.conv2d: (1x384x34x34xf32) <- (1x1536x34x34xf32, 384x1536x1x1xf32) + conv2d_85 = paddle._C_ops.conv2d( + concat_5, parameter_284, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_284 + + # pd_op.batch_norm_: (1x384x34x34xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (1x384x34x34xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__486, + batch_norm__487, + batch_norm__488, + batch_norm__489, + batch_norm__490, + batch_norm__491, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_85, + parameter_283, + parameter_282, + parameter_281, + parameter_280, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_280, parameter_281, parameter_282, parameter_283 + + # pd_op.swish: (1x384x34x34xf32) <- (1x384x34x34xf32) + swish_62 = paddle._C_ops.swish(batch_norm__486) + + # pd_op.conv2d: (1x384x34x34xf32) <- (1x384x34x34xf32, 384x384x3x3xf32) + conv2d_86 = paddle._C_ops.conv2d( + swish_62, parameter_279, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_279 + + # pd_op.batch_norm_: (1x384x34x34xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (1x384x34x34xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__492, + batch_norm__493, + batch_norm__494, + batch_norm__495, + batch_norm__496, + batch_norm__497, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_86, + parameter_278, + parameter_277, + parameter_276, + parameter_275, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_275, parameter_276, parameter_277, parameter_278 + + # pd_op.swish: (1x384x34x34xf32) <- (1x384x34x34xf32) + swish_63 = paddle._C_ops.swish(batch_norm__492) + + # pd_op.conv2d: (1x384x34x34xf32) <- (1x384x34x34xf32, 384x384x3x3xf32) + conv2d_87 = paddle._C_ops.conv2d( + swish_63, parameter_274, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_274 + + # pd_op.batch_norm_: (1x384x34x34xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (1x384x34x34xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__498, + batch_norm__499, + batch_norm__500, + batch_norm__501, + batch_norm__502, + batch_norm__503, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_87, + parameter_273, + parameter_272, + parameter_271, + parameter_270, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_270, parameter_271, parameter_272, parameter_273 + + # pd_op.conv2d: (1x384x34x34xf32) <- (1x384x34x34xf32, 384x384x1x1xf32) + conv2d_88 = paddle._C_ops.conv2d( + swish_63, parameter_269, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_269 + + # pd_op.batch_norm_: (1x384x34x34xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (1x384x34x34xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__504, + batch_norm__505, + batch_norm__506, + batch_norm__507, + batch_norm__508, + batch_norm__509, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_88, + parameter_268, + parameter_267, + parameter_266, + parameter_265, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_265, parameter_266, parameter_267, parameter_268 + + # pd_op.add: (1x384x34x34xf32) <- (1x384x34x34xf32, 1x384x34x34xf32) + add_78 = paddle._C_ops.add(batch_norm__498, batch_norm__504) + + # pd_op.swish: (1x384x34x34xf32) <- (1x384x34x34xf32) + swish_64 = paddle._C_ops.swish(add_78) + + # builtin.combine: ([1x384x34x34xf32, 1x384x34x34xf32]) <- (1x384x34x34xf32, 1x384x34x34xf32) + combine_7 = [swish_56, swish_64] + + # pd_op.concat: (1x768x34x34xf32) <- ([1x384x34x34xf32, 1x384x34x34xf32], 1xi32) + concat_6 = paddle._C_ops.concat(combine_7, full_0) + del combine_7 + + # pd_op.conv2d: (1x768x34x34xf32) <- (1x768x34x34xf32, 768x768x1x1xf32) + conv2d_89 = paddle._C_ops.conv2d( + concat_6, parameter_264, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_264 + + # pd_op.batch_norm_: (1x768x34x34xf32, 768xf32, 768xf32, 768xf32, 768xf32, -1xui8) <- (1x768x34x34xf32, 768xf32, 768xf32, 768xf32, 768xf32) + ( + batch_norm__510, + batch_norm__511, + batch_norm__512, + batch_norm__513, + batch_norm__514, + batch_norm__515, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_89, + parameter_263, + parameter_262, + parameter_261, + parameter_260, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_260, parameter_261, parameter_262, parameter_263 + + # pd_op.swish: (1x768x34x34xf32) <- (1x768x34x34xf32) + swish_65 = paddle._C_ops.swish(batch_norm__510) + + # pd_op.conv2d: (1x384x34x34xf32) <- (1x768x34x34xf32, 384x768x1x1xf32) + conv2d_90 = paddle._C_ops.conv2d( + swish_65, parameter_259, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_259 + + # pd_op.batch_norm_: (1x384x34x34xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (1x384x34x34xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__516, + batch_norm__517, + batch_norm__518, + batch_norm__519, + batch_norm__520, + batch_norm__521, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_90, + parameter_258, + parameter_257, + parameter_256, + parameter_255, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_255, parameter_256, parameter_257, parameter_258 + + # pd_op.swish: (1x384x34x34xf32) <- (1x384x34x34xf32) + swish_66 = paddle._C_ops.swish(batch_norm__516) + + # pd_op.nearest_interp: (1x384x68x68xf32) <- (1x384x34x34xf32, None, None, None) + nearest_interp_0 = paddle._C_ops.nearest_interp( + swish_66, + None, + None, + None, + "NCHW", + -1, + -1, + -1, + [float("2"), float("2")], + "nearest", + False, + 0, + ) + + # builtin.combine: ([1x384x68x68xf32, 1x512x68x68xf32]) <- (1x384x68x68xf32, 1x512x68x68xf32) + combine_8 = [nearest_interp_0, swish_45] + + # pd_op.concat: (1x896x68x68xf32) <- ([1x384x68x68xf32, 1x512x68x68xf32], 1xi32) + concat_7 = paddle._C_ops.concat(combine_8, full_0) + del combine_8 + + # pd_op.conv2d: (1x192x68x68xf32) <- (1x896x68x68xf32, 192x896x1x1xf32) + conv2d_91 = paddle._C_ops.conv2d( + concat_7, parameter_254, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_254 + + # pd_op.batch_norm_: (1x192x68x68xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (1x192x68x68xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__522, + batch_norm__523, + batch_norm__524, + batch_norm__525, + batch_norm__526, + batch_norm__527, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_91, + parameter_253, + parameter_252, + parameter_251, + parameter_250, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_250, parameter_251, parameter_252, parameter_253 + + # pd_op.swish: (1x192x68x68xf32) <- (1x192x68x68xf32) + swish_67 = paddle._C_ops.swish(batch_norm__522) + + # pd_op.conv2d: (1x192x68x68xf32) <- (1x896x68x68xf32, 192x896x1x1xf32) + conv2d_92 = paddle._C_ops.conv2d( + concat_7, parameter_249, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_249 + + # pd_op.batch_norm_: (1x192x68x68xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (1x192x68x68xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__528, + batch_norm__529, + batch_norm__530, + batch_norm__531, + batch_norm__532, + batch_norm__533, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_92, + parameter_248, + parameter_247, + parameter_246, + parameter_245, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_245, parameter_246, parameter_247, parameter_248 + + # pd_op.swish: (1x192x68x68xf32) <- (1x192x68x68xf32) + swish_68 = paddle._C_ops.swish(batch_norm__528) + + # pd_op.conv2d: (1x192x68x68xf32) <- (1x192x68x68xf32, 192x192x3x3xf32) + conv2d_93 = paddle._C_ops.conv2d( + swish_68, parameter_244, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_244 + + # pd_op.batch_norm_: (1x192x68x68xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (1x192x68x68xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__534, + batch_norm__535, + batch_norm__536, + batch_norm__537, + batch_norm__538, + batch_norm__539, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_93, + parameter_243, + parameter_242, + parameter_241, + parameter_240, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_240, parameter_241, parameter_242, parameter_243 + + # pd_op.swish: (1x192x68x68xf32) <- (1x192x68x68xf32) + swish_69 = paddle._C_ops.swish(batch_norm__534) + + # pd_op.conv2d: (1x192x68x68xf32) <- (1x192x68x68xf32, 192x192x3x3xf32) + conv2d_94 = paddle._C_ops.conv2d( + swish_69, parameter_239, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_239 + + # pd_op.batch_norm_: (1x192x68x68xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (1x192x68x68xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__540, + batch_norm__541, + batch_norm__542, + batch_norm__543, + batch_norm__544, + batch_norm__545, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_94, + parameter_238, + parameter_237, + parameter_236, + parameter_235, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_235, parameter_236, parameter_237, parameter_238 + + # pd_op.conv2d: (1x192x68x68xf32) <- (1x192x68x68xf32, 192x192x1x1xf32) + conv2d_95 = paddle._C_ops.conv2d( + swish_69, parameter_234, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_234 + + # pd_op.batch_norm_: (1x192x68x68xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (1x192x68x68xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__546, + batch_norm__547, + batch_norm__548, + batch_norm__549, + batch_norm__550, + batch_norm__551, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_95, + parameter_233, + parameter_232, + parameter_231, + parameter_230, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_230, parameter_231, parameter_232, parameter_233 + + # pd_op.add: (1x192x68x68xf32) <- (1x192x68x68xf32, 1x192x68x68xf32) + add_79 = paddle._C_ops.add(batch_norm__540, batch_norm__546) + + # pd_op.swish: (1x192x68x68xf32) <- (1x192x68x68xf32) + swish_70 = paddle._C_ops.swish(add_79) + + # pd_op.conv2d: (1x192x68x68xf32) <- (1x192x68x68xf32, 192x192x3x3xf32) + conv2d_96 = paddle._C_ops.conv2d( + swish_70, parameter_229, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_229 + + # pd_op.batch_norm_: (1x192x68x68xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (1x192x68x68xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__552, + batch_norm__553, + batch_norm__554, + batch_norm__555, + batch_norm__556, + batch_norm__557, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_96, + parameter_228, + parameter_227, + parameter_226, + parameter_225, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_225, parameter_226, parameter_227, parameter_228 + + # pd_op.swish: (1x192x68x68xf32) <- (1x192x68x68xf32) + swish_71 = paddle._C_ops.swish(batch_norm__552) + + # pd_op.conv2d: (1x192x68x68xf32) <- (1x192x68x68xf32, 192x192x3x3xf32) + conv2d_97 = paddle._C_ops.conv2d( + swish_71, parameter_224, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_224 + + # pd_op.batch_norm_: (1x192x68x68xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (1x192x68x68xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__558, + batch_norm__559, + batch_norm__560, + batch_norm__561, + batch_norm__562, + batch_norm__563, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_97, + parameter_223, + parameter_222, + parameter_221, + parameter_220, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_220, parameter_221, parameter_222, parameter_223 + + # pd_op.conv2d: (1x192x68x68xf32) <- (1x192x68x68xf32, 192x192x1x1xf32) + conv2d_98 = paddle._C_ops.conv2d( + swish_71, parameter_219, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_219 + + # pd_op.batch_norm_: (1x192x68x68xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (1x192x68x68xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__564, + batch_norm__565, + batch_norm__566, + batch_norm__567, + batch_norm__568, + batch_norm__569, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_98, + parameter_218, + parameter_217, + parameter_216, + parameter_215, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_215, parameter_216, parameter_217, parameter_218 + + # pd_op.add: (1x192x68x68xf32) <- (1x192x68x68xf32, 1x192x68x68xf32) + add_80 = paddle._C_ops.add(batch_norm__558, batch_norm__564) + + # pd_op.swish: (1x192x68x68xf32) <- (1x192x68x68xf32) + swish_72 = paddle._C_ops.swish(add_80) + + # pd_op.conv2d: (1x192x68x68xf32) <- (1x192x68x68xf32, 192x192x3x3xf32) + conv2d_99 = paddle._C_ops.conv2d( + swish_72, parameter_214, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_214 + + # pd_op.batch_norm_: (1x192x68x68xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (1x192x68x68xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__570, + batch_norm__571, + batch_norm__572, + batch_norm__573, + batch_norm__574, + batch_norm__575, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_99, + parameter_213, + parameter_212, + parameter_211, + parameter_210, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_210, parameter_211, parameter_212, parameter_213 + + # pd_op.swish: (1x192x68x68xf32) <- (1x192x68x68xf32) + swish_73 = paddle._C_ops.swish(batch_norm__570) + + # pd_op.conv2d: (1x192x68x68xf32) <- (1x192x68x68xf32, 192x192x3x3xf32) + conv2d_100 = paddle._C_ops.conv2d( + swish_73, parameter_209, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_209 + + # pd_op.batch_norm_: (1x192x68x68xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (1x192x68x68xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__576, + batch_norm__577, + batch_norm__578, + batch_norm__579, + batch_norm__580, + batch_norm__581, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_100, + parameter_208, + parameter_207, + parameter_206, + parameter_205, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_205, parameter_206, parameter_207, parameter_208 + + # pd_op.conv2d: (1x192x68x68xf32) <- (1x192x68x68xf32, 192x192x1x1xf32) + conv2d_101 = paddle._C_ops.conv2d( + swish_73, parameter_204, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_204 + + # pd_op.batch_norm_: (1x192x68x68xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (1x192x68x68xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__582, + batch_norm__583, + batch_norm__584, + batch_norm__585, + batch_norm__586, + batch_norm__587, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_101, + parameter_203, + parameter_202, + parameter_201, + parameter_200, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_200, parameter_201, parameter_202, parameter_203 + + # pd_op.add: (1x192x68x68xf32) <- (1x192x68x68xf32, 1x192x68x68xf32) + add_81 = paddle._C_ops.add(batch_norm__576, batch_norm__582) + + # pd_op.swish: (1x192x68x68xf32) <- (1x192x68x68xf32) + swish_74 = paddle._C_ops.swish(add_81) + + # builtin.combine: ([1x192x68x68xf32, 1x192x68x68xf32]) <- (1x192x68x68xf32, 1x192x68x68xf32) + combine_9 = [swish_67, swish_74] + + # pd_op.concat: (1x384x68x68xf32) <- ([1x192x68x68xf32, 1x192x68x68xf32], 1xi32) + concat_8 = paddle._C_ops.concat(combine_9, full_0) + del combine_9 + + # pd_op.conv2d: (1x384x68x68xf32) <- (1x384x68x68xf32, 384x384x1x1xf32) + conv2d_102 = paddle._C_ops.conv2d( + concat_8, parameter_199, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_199 + + # pd_op.batch_norm_: (1x384x68x68xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (1x384x68x68xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__588, + batch_norm__589, + batch_norm__590, + batch_norm__591, + batch_norm__592, + batch_norm__593, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_102, + parameter_198, + parameter_197, + parameter_196, + parameter_195, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_195, parameter_196, parameter_197, parameter_198 + + # pd_op.swish: (1x384x68x68xf32) <- (1x384x68x68xf32) + swish_75 = paddle._C_ops.swish(batch_norm__588) + + # pd_op.conv2d: (1x192x68x68xf32) <- (1x384x68x68xf32, 192x384x1x1xf32) + conv2d_103 = paddle._C_ops.conv2d( + swish_75, parameter_194, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_194 + + # pd_op.batch_norm_: (1x192x68x68xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (1x192x68x68xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__594, + batch_norm__595, + batch_norm__596, + batch_norm__597, + batch_norm__598, + batch_norm__599, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_103, + parameter_193, + parameter_192, + parameter_191, + parameter_190, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_190, parameter_191, parameter_192, parameter_193 + + # pd_op.swish: (1x192x68x68xf32) <- (1x192x68x68xf32) + swish_76 = paddle._C_ops.swish(batch_norm__594) + + # pd_op.nearest_interp: (1x192x136x136xf32) <- (1x192x68x68xf32, None, None, None) + nearest_interp_1 = paddle._C_ops.nearest_interp( + swish_76, + None, + None, + None, + "NCHW", + -1, + -1, + -1, + [float("2"), float("2")], + "nearest", + False, + 0, + ) + + # builtin.combine: ([1x192x136x136xf32, 1x256x136x136xf32]) <- (1x192x136x136xf32, 1x256x136x136xf32) + combine_10 = [nearest_interp_1, swish_29] + + # pd_op.concat: (1x448x136x136xf32) <- ([1x192x136x136xf32, 1x256x136x136xf32], 1xi32) + concat_9 = paddle._C_ops.concat(combine_10, full_0) + del combine_10 + + # pd_op.conv2d: (1x96x136x136xf32) <- (1x448x136x136xf32, 96x448x1x1xf32) + conv2d_104 = paddle._C_ops.conv2d( + concat_9, parameter_189, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_189 + + # pd_op.batch_norm_: (1x96x136x136xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (1x96x136x136xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__600, + batch_norm__601, + batch_norm__602, + batch_norm__603, + batch_norm__604, + batch_norm__605, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_104, + parameter_188, + parameter_187, + parameter_186, + parameter_185, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_185, parameter_186, parameter_187, parameter_188 + + # pd_op.swish: (1x96x136x136xf32) <- (1x96x136x136xf32) + swish_77 = paddle._C_ops.swish(batch_norm__600) + + # pd_op.conv2d: (1x96x136x136xf32) <- (1x448x136x136xf32, 96x448x1x1xf32) + conv2d_105 = paddle._C_ops.conv2d( + concat_9, parameter_184, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_184 + + # pd_op.batch_norm_: (1x96x136x136xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (1x96x136x136xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__606, + batch_norm__607, + batch_norm__608, + batch_norm__609, + batch_norm__610, + batch_norm__611, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_105, + parameter_183, + parameter_182, + parameter_181, + parameter_180, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_180, parameter_181, parameter_182, parameter_183 + + # pd_op.swish: (1x96x136x136xf32) <- (1x96x136x136xf32) + swish_78 = paddle._C_ops.swish(batch_norm__606) + + # pd_op.conv2d: (1x96x136x136xf32) <- (1x96x136x136xf32, 96x96x3x3xf32) + conv2d_106 = paddle._C_ops.conv2d( + swish_78, parameter_179, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_179 + + # pd_op.batch_norm_: (1x96x136x136xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (1x96x136x136xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__612, + batch_norm__613, + batch_norm__614, + batch_norm__615, + batch_norm__616, + batch_norm__617, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_106, + parameter_178, + parameter_177, + parameter_176, + parameter_175, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_175, parameter_176, parameter_177, parameter_178 + + # pd_op.swish: (1x96x136x136xf32) <- (1x96x136x136xf32) + swish_79 = paddle._C_ops.swish(batch_norm__612) + + # pd_op.conv2d: (1x96x136x136xf32) <- (1x96x136x136xf32, 96x96x3x3xf32) + conv2d_107 = paddle._C_ops.conv2d( + swish_79, parameter_174, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_174 + + # pd_op.batch_norm_: (1x96x136x136xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (1x96x136x136xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__618, + batch_norm__619, + batch_norm__620, + batch_norm__621, + batch_norm__622, + batch_norm__623, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_107, + parameter_173, + parameter_172, + parameter_171, + parameter_170, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_170, parameter_171, parameter_172, parameter_173 + + # pd_op.conv2d: (1x96x136x136xf32) <- (1x96x136x136xf32, 96x96x1x1xf32) + conv2d_108 = paddle._C_ops.conv2d( + swish_79, parameter_169, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_169 + + # pd_op.batch_norm_: (1x96x136x136xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (1x96x136x136xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__624, + batch_norm__625, + batch_norm__626, + batch_norm__627, + batch_norm__628, + batch_norm__629, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_108, + parameter_168, + parameter_167, + parameter_166, + parameter_165, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_165, parameter_166, parameter_167, parameter_168 + + # pd_op.add: (1x96x136x136xf32) <- (1x96x136x136xf32, 1x96x136x136xf32) + add_82 = paddle._C_ops.add(batch_norm__618, batch_norm__624) + + # pd_op.swish: (1x96x136x136xf32) <- (1x96x136x136xf32) + swish_80 = paddle._C_ops.swish(add_82) + + # pd_op.conv2d: (1x96x136x136xf32) <- (1x96x136x136xf32, 96x96x3x3xf32) + conv2d_109 = paddle._C_ops.conv2d( + swish_80, parameter_164, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_164 + + # pd_op.batch_norm_: (1x96x136x136xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (1x96x136x136xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__630, + batch_norm__631, + batch_norm__632, + batch_norm__633, + batch_norm__634, + batch_norm__635, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_109, + parameter_163, + parameter_162, + parameter_161, + parameter_160, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_160, parameter_161, parameter_162, parameter_163 + + # pd_op.swish: (1x96x136x136xf32) <- (1x96x136x136xf32) + swish_81 = paddle._C_ops.swish(batch_norm__630) + + # pd_op.conv2d: (1x96x136x136xf32) <- (1x96x136x136xf32, 96x96x3x3xf32) + conv2d_110 = paddle._C_ops.conv2d( + swish_81, parameter_159, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_159 + + # pd_op.batch_norm_: (1x96x136x136xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (1x96x136x136xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__636, + batch_norm__637, + batch_norm__638, + batch_norm__639, + batch_norm__640, + batch_norm__641, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_110, + parameter_158, + parameter_157, + parameter_156, + parameter_155, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_155, parameter_156, parameter_157, parameter_158 + + # pd_op.conv2d: (1x96x136x136xf32) <- (1x96x136x136xf32, 96x96x1x1xf32) + conv2d_111 = paddle._C_ops.conv2d( + swish_81, parameter_154, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_154 + + # pd_op.batch_norm_: (1x96x136x136xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (1x96x136x136xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__642, + batch_norm__643, + batch_norm__644, + batch_norm__645, + batch_norm__646, + batch_norm__647, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_111, + parameter_153, + parameter_152, + parameter_151, + parameter_150, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_150, parameter_151, parameter_152, parameter_153 + + # pd_op.add: (1x96x136x136xf32) <- (1x96x136x136xf32, 1x96x136x136xf32) + add_83 = paddle._C_ops.add(batch_norm__636, batch_norm__642) + + # pd_op.swish: (1x96x136x136xf32) <- (1x96x136x136xf32) + swish_82 = paddle._C_ops.swish(add_83) + + # pd_op.conv2d: (1x96x136x136xf32) <- (1x96x136x136xf32, 96x96x3x3xf32) + conv2d_112 = paddle._C_ops.conv2d( + swish_82, parameter_149, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_149 + + # pd_op.batch_norm_: (1x96x136x136xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (1x96x136x136xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__648, + batch_norm__649, + batch_norm__650, + batch_norm__651, + batch_norm__652, + batch_norm__653, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_112, + parameter_148, + parameter_147, + parameter_146, + parameter_145, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_145, parameter_146, parameter_147, parameter_148 + + # pd_op.swish: (1x96x136x136xf32) <- (1x96x136x136xf32) + swish_83 = paddle._C_ops.swish(batch_norm__648) + + # pd_op.conv2d: (1x96x136x136xf32) <- (1x96x136x136xf32, 96x96x3x3xf32) + conv2d_113 = paddle._C_ops.conv2d( + swish_83, parameter_144, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_144 + + # pd_op.batch_norm_: (1x96x136x136xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (1x96x136x136xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__654, + batch_norm__655, + batch_norm__656, + batch_norm__657, + batch_norm__658, + batch_norm__659, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_113, + parameter_143, + parameter_142, + parameter_141, + parameter_140, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_140, parameter_141, parameter_142, parameter_143 + + # pd_op.conv2d: (1x96x136x136xf32) <- (1x96x136x136xf32, 96x96x1x1xf32) + conv2d_114 = paddle._C_ops.conv2d( + swish_83, parameter_139, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_139 + + # pd_op.batch_norm_: (1x96x136x136xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (1x96x136x136xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__660, + batch_norm__661, + batch_norm__662, + batch_norm__663, + batch_norm__664, + batch_norm__665, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_114, + parameter_138, + parameter_137, + parameter_136, + parameter_135, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_135, parameter_136, parameter_137, parameter_138 + + # pd_op.add: (1x96x136x136xf32) <- (1x96x136x136xf32, 1x96x136x136xf32) + add_84 = paddle._C_ops.add(batch_norm__654, batch_norm__660) + + # pd_op.swish: (1x96x136x136xf32) <- (1x96x136x136xf32) + swish_84 = paddle._C_ops.swish(add_84) + + # builtin.combine: ([1x96x136x136xf32, 1x96x136x136xf32]) <- (1x96x136x136xf32, 1x96x136x136xf32) + combine_11 = [swish_77, swish_84] + + # pd_op.concat: (1x192x136x136xf32) <- ([1x96x136x136xf32, 1x96x136x136xf32], 1xi32) + concat_10 = paddle._C_ops.concat(combine_11, full_0) + del combine_11 + + # pd_op.conv2d: (1x192x136x136xf32) <- (1x192x136x136xf32, 192x192x1x1xf32) + conv2d_115 = paddle._C_ops.conv2d( + concat_10, parameter_134, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_134 + + # pd_op.batch_norm_: (1x192x136x136xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (1x192x136x136xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__666, + batch_norm__667, + batch_norm__668, + batch_norm__669, + batch_norm__670, + batch_norm__671, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_115, + parameter_133, + parameter_132, + parameter_131, + parameter_130, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_130, parameter_131, parameter_132, parameter_133 + + # pd_op.swish: (1x192x136x136xf32) <- (1x192x136x136xf32) + swish_85 = paddle._C_ops.swish(batch_norm__666) + + # pd_op.conv2d: (1x192x68x68xf32) <- (1x192x136x136xf32, 192x192x3x3xf32) + conv2d_116 = paddle._C_ops.conv2d( + swish_85, parameter_129, [2, 2], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_129 + + # pd_op.batch_norm_: (1x192x68x68xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (1x192x68x68xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__672, + batch_norm__673, + batch_norm__674, + batch_norm__675, + batch_norm__676, + batch_norm__677, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_116, + parameter_128, + parameter_127, + parameter_126, + parameter_125, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_125, parameter_126, parameter_127, parameter_128 + + # pd_op.swish: (1x192x68x68xf32) <- (1x192x68x68xf32) + swish_86 = paddle._C_ops.swish(batch_norm__672) + + # builtin.combine: ([1x192x68x68xf32, 1x384x68x68xf32]) <- (1x192x68x68xf32, 1x384x68x68xf32) + combine_12 = [swish_86, swish_75] + + # pd_op.concat: (1x576x68x68xf32) <- ([1x192x68x68xf32, 1x384x68x68xf32], 1xi32) + concat_11 = paddle._C_ops.concat(combine_12, full_0) + del combine_12 + + # pd_op.conv2d: (1x192x68x68xf32) <- (1x576x68x68xf32, 192x576x1x1xf32) + conv2d_117 = paddle._C_ops.conv2d( + concat_11, parameter_124, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_124 + + # pd_op.batch_norm_: (1x192x68x68xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (1x192x68x68xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__678, + batch_norm__679, + batch_norm__680, + batch_norm__681, + batch_norm__682, + batch_norm__683, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_117, + parameter_123, + parameter_122, + parameter_121, + parameter_120, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_120, parameter_121, parameter_122, parameter_123 + + # pd_op.swish: (1x192x68x68xf32) <- (1x192x68x68xf32) + swish_87 = paddle._C_ops.swish(batch_norm__678) + + # pd_op.conv2d: (1x192x68x68xf32) <- (1x576x68x68xf32, 192x576x1x1xf32) + conv2d_118 = paddle._C_ops.conv2d( + concat_11, parameter_119, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_119 + + # pd_op.batch_norm_: (1x192x68x68xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (1x192x68x68xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__684, + batch_norm__685, + batch_norm__686, + batch_norm__687, + batch_norm__688, + batch_norm__689, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_118, + parameter_118, + parameter_117, + parameter_116, + parameter_115, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_115, parameter_116, parameter_117, parameter_118 + + # pd_op.swish: (1x192x68x68xf32) <- (1x192x68x68xf32) + swish_88 = paddle._C_ops.swish(batch_norm__684) + + # pd_op.conv2d: (1x192x68x68xf32) <- (1x192x68x68xf32, 192x192x3x3xf32) + conv2d_119 = paddle._C_ops.conv2d( + swish_88, parameter_114, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_114 + + # pd_op.batch_norm_: (1x192x68x68xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (1x192x68x68xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__690, + batch_norm__691, + batch_norm__692, + batch_norm__693, + batch_norm__694, + batch_norm__695, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_119, + parameter_113, + parameter_112, + parameter_111, + parameter_110, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_110, parameter_111, parameter_112, parameter_113 + + # pd_op.swish: (1x192x68x68xf32) <- (1x192x68x68xf32) + swish_89 = paddle._C_ops.swish(batch_norm__690) + + # pd_op.conv2d: (1x192x68x68xf32) <- (1x192x68x68xf32, 192x192x3x3xf32) + conv2d_120 = paddle._C_ops.conv2d( + swish_89, parameter_109, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_109 + + # pd_op.batch_norm_: (1x192x68x68xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (1x192x68x68xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__696, + batch_norm__697, + batch_norm__698, + batch_norm__699, + batch_norm__700, + batch_norm__701, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_120, + parameter_108, + parameter_107, + parameter_106, + parameter_105, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_105, parameter_106, parameter_107, parameter_108 + + # pd_op.conv2d: (1x192x68x68xf32) <- (1x192x68x68xf32, 192x192x1x1xf32) + conv2d_121 = paddle._C_ops.conv2d( + swish_89, parameter_104, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_104 + + # pd_op.batch_norm_: (1x192x68x68xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (1x192x68x68xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__702, + batch_norm__703, + batch_norm__704, + batch_norm__705, + batch_norm__706, + batch_norm__707, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_121, + parameter_103, + parameter_102, + parameter_101, + parameter_100, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_100, parameter_101, parameter_102, parameter_103 + + # pd_op.add: (1x192x68x68xf32) <- (1x192x68x68xf32, 1x192x68x68xf32) + add_85 = paddle._C_ops.add(batch_norm__696, batch_norm__702) + + # pd_op.swish: (1x192x68x68xf32) <- (1x192x68x68xf32) + swish_90 = paddle._C_ops.swish(add_85) + + # pd_op.conv2d: (1x192x68x68xf32) <- (1x192x68x68xf32, 192x192x3x3xf32) + conv2d_122 = paddle._C_ops.conv2d( + swish_90, parameter_99, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_99 + + # pd_op.batch_norm_: (1x192x68x68xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (1x192x68x68xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__708, + batch_norm__709, + batch_norm__710, + batch_norm__711, + batch_norm__712, + batch_norm__713, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_122, + parameter_98, + parameter_97, + parameter_96, + parameter_95, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_95, parameter_96, parameter_97, parameter_98 + + # pd_op.swish: (1x192x68x68xf32) <- (1x192x68x68xf32) + swish_91 = paddle._C_ops.swish(batch_norm__708) + + # pd_op.conv2d: (1x192x68x68xf32) <- (1x192x68x68xf32, 192x192x3x3xf32) + conv2d_123 = paddle._C_ops.conv2d( + swish_91, parameter_94, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_94 + + # pd_op.batch_norm_: (1x192x68x68xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (1x192x68x68xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__714, + batch_norm__715, + batch_norm__716, + batch_norm__717, + batch_norm__718, + batch_norm__719, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_123, + parameter_93, + parameter_92, + parameter_91, + parameter_90, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_90, parameter_91, parameter_92, parameter_93 + + # pd_op.conv2d: (1x192x68x68xf32) <- (1x192x68x68xf32, 192x192x1x1xf32) + conv2d_124 = paddle._C_ops.conv2d( + swish_91, parameter_89, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_89 + + # pd_op.batch_norm_: (1x192x68x68xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (1x192x68x68xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__720, + batch_norm__721, + batch_norm__722, + batch_norm__723, + batch_norm__724, + batch_norm__725, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_124, + parameter_88, + parameter_87, + parameter_86, + parameter_85, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_85, parameter_86, parameter_87, parameter_88 + + # pd_op.add: (1x192x68x68xf32) <- (1x192x68x68xf32, 1x192x68x68xf32) + add_86 = paddle._C_ops.add(batch_norm__714, batch_norm__720) + + # pd_op.swish: (1x192x68x68xf32) <- (1x192x68x68xf32) + swish_92 = paddle._C_ops.swish(add_86) + + # pd_op.conv2d: (1x192x68x68xf32) <- (1x192x68x68xf32, 192x192x3x3xf32) + conv2d_125 = paddle._C_ops.conv2d( + swish_92, parameter_84, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_84 + + # pd_op.batch_norm_: (1x192x68x68xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (1x192x68x68xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__726, + batch_norm__727, + batch_norm__728, + batch_norm__729, + batch_norm__730, + batch_norm__731, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_125, + parameter_83, + parameter_82, + parameter_81, + parameter_80, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_80, parameter_81, parameter_82, parameter_83 + + # pd_op.swish: (1x192x68x68xf32) <- (1x192x68x68xf32) + swish_93 = paddle._C_ops.swish(batch_norm__726) + + # pd_op.conv2d: (1x192x68x68xf32) <- (1x192x68x68xf32, 192x192x3x3xf32) + conv2d_126 = paddle._C_ops.conv2d( + swish_93, parameter_79, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_79 + + # pd_op.batch_norm_: (1x192x68x68xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (1x192x68x68xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__732, + batch_norm__733, + batch_norm__734, + batch_norm__735, + batch_norm__736, + batch_norm__737, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_126, + parameter_78, + parameter_77, + parameter_76, + parameter_75, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_75, parameter_76, parameter_77, parameter_78 + + # pd_op.conv2d: (1x192x68x68xf32) <- (1x192x68x68xf32, 192x192x1x1xf32) + conv2d_127 = paddle._C_ops.conv2d( + swish_93, parameter_74, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_74 + + # pd_op.batch_norm_: (1x192x68x68xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (1x192x68x68xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__738, + batch_norm__739, + batch_norm__740, + batch_norm__741, + batch_norm__742, + batch_norm__743, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_127, + parameter_73, + parameter_72, + parameter_71, + parameter_70, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_70, parameter_71, parameter_72, parameter_73 + + # pd_op.add: (1x192x68x68xf32) <- (1x192x68x68xf32, 1x192x68x68xf32) + add_87 = paddle._C_ops.add(batch_norm__732, batch_norm__738) + + # pd_op.swish: (1x192x68x68xf32) <- (1x192x68x68xf32) + swish_94 = paddle._C_ops.swish(add_87) + + # builtin.combine: ([1x192x68x68xf32, 1x192x68x68xf32]) <- (1x192x68x68xf32, 1x192x68x68xf32) + combine_13 = [swish_87, swish_94] + + # pd_op.concat: (1x384x68x68xf32) <- ([1x192x68x68xf32, 1x192x68x68xf32], 1xi32) + concat_12 = paddle._C_ops.concat(combine_13, full_0) + del combine_13 + + # pd_op.conv2d: (1x384x68x68xf32) <- (1x384x68x68xf32, 384x384x1x1xf32) + conv2d_128 = paddle._C_ops.conv2d( + concat_12, parameter_69, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_69 + + # pd_op.batch_norm_: (1x384x68x68xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (1x384x68x68xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__744, + batch_norm__745, + batch_norm__746, + batch_norm__747, + batch_norm__748, + batch_norm__749, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_128, + parameter_68, + parameter_67, + parameter_66, + parameter_65, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_65, parameter_66, parameter_67, parameter_68 + + # pd_op.swish: (1x384x68x68xf32) <- (1x384x68x68xf32) + swish_95 = paddle._C_ops.swish(batch_norm__744) + + # pd_op.conv2d: (1x384x34x34xf32) <- (1x384x68x68xf32, 384x384x3x3xf32) + conv2d_129 = paddle._C_ops.conv2d( + swish_95, parameter_64, [2, 2], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_64 + + # pd_op.batch_norm_: (1x384x34x34xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (1x384x34x34xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__750, + batch_norm__751, + batch_norm__752, + batch_norm__753, + batch_norm__754, + batch_norm__755, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_129, + parameter_63, + parameter_62, + parameter_61, + parameter_60, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_60, parameter_61, parameter_62, parameter_63 + + # pd_op.swish: (1x384x34x34xf32) <- (1x384x34x34xf32) + swish_96 = paddle._C_ops.swish(batch_norm__750) + + # builtin.combine: ([1x384x34x34xf32, 1x768x34x34xf32]) <- (1x384x34x34xf32, 1x768x34x34xf32) + combine_14 = [swish_96, swish_65] + + # pd_op.concat: (1x1152x34x34xf32) <- ([1x384x34x34xf32, 1x768x34x34xf32], 1xi32) + concat_13 = paddle._C_ops.concat(combine_14, full_0) + del combine_14 + + # pd_op.conv2d: (1x384x34x34xf32) <- (1x1152x34x34xf32, 384x1152x1x1xf32) + conv2d_130 = paddle._C_ops.conv2d( + concat_13, parameter_59, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_59 + + # pd_op.batch_norm_: (1x384x34x34xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (1x384x34x34xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__756, + batch_norm__757, + batch_norm__758, + batch_norm__759, + batch_norm__760, + batch_norm__761, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_130, + parameter_58, + parameter_57, + parameter_56, + parameter_55, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_55, parameter_56, parameter_57, parameter_58 + + # pd_op.swish: (1x384x34x34xf32) <- (1x384x34x34xf32) + swish_97 = paddle._C_ops.swish(batch_norm__756) + + # pd_op.conv2d: (1x384x34x34xf32) <- (1x1152x34x34xf32, 384x1152x1x1xf32) + conv2d_131 = paddle._C_ops.conv2d( + concat_13, parameter_54, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_54 + + # pd_op.batch_norm_: (1x384x34x34xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (1x384x34x34xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__762, + batch_norm__763, + batch_norm__764, + batch_norm__765, + batch_norm__766, + batch_norm__767, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_131, + parameter_53, + parameter_52, + parameter_51, + parameter_50, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_50, parameter_51, parameter_52, parameter_53 + + # pd_op.swish: (1x384x34x34xf32) <- (1x384x34x34xf32) + swish_98 = paddle._C_ops.swish(batch_norm__762) + + # pd_op.conv2d: (1x384x34x34xf32) <- (1x384x34x34xf32, 384x384x3x3xf32) + conv2d_132 = paddle._C_ops.conv2d( + swish_98, parameter_49, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_49 + + # pd_op.batch_norm_: (1x384x34x34xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (1x384x34x34xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__768, + batch_norm__769, + batch_norm__770, + batch_norm__771, + batch_norm__772, + batch_norm__773, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_132, + parameter_48, + parameter_47, + parameter_46, + parameter_45, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_45, parameter_46, parameter_47, parameter_48 + + # pd_op.swish: (1x384x34x34xf32) <- (1x384x34x34xf32) + swish_99 = paddle._C_ops.swish(batch_norm__768) + + # pd_op.conv2d: (1x384x34x34xf32) <- (1x384x34x34xf32, 384x384x3x3xf32) + conv2d_133 = paddle._C_ops.conv2d( + swish_99, parameter_44, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_44 + + # pd_op.batch_norm_: (1x384x34x34xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (1x384x34x34xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__774, + batch_norm__775, + batch_norm__776, + batch_norm__777, + batch_norm__778, + batch_norm__779, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_133, + parameter_43, + parameter_42, + parameter_41, + parameter_40, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_40, parameter_41, parameter_42, parameter_43 + + # pd_op.conv2d: (1x384x34x34xf32) <- (1x384x34x34xf32, 384x384x1x1xf32) + conv2d_134 = paddle._C_ops.conv2d( + swish_99, parameter_39, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_39 + + # pd_op.batch_norm_: (1x384x34x34xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (1x384x34x34xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__780, + batch_norm__781, + batch_norm__782, + batch_norm__783, + batch_norm__784, + batch_norm__785, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_134, + parameter_38, + parameter_37, + parameter_36, + parameter_35, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_35, parameter_36, parameter_37, parameter_38 + + # pd_op.add: (1x384x34x34xf32) <- (1x384x34x34xf32, 1x384x34x34xf32) + add_88 = paddle._C_ops.add(batch_norm__774, batch_norm__780) + + # pd_op.swish: (1x384x34x34xf32) <- (1x384x34x34xf32) + swish_100 = paddle._C_ops.swish(add_88) + + # pd_op.conv2d: (1x384x34x34xf32) <- (1x384x34x34xf32, 384x384x3x3xf32) + conv2d_135 = paddle._C_ops.conv2d( + swish_100, parameter_34, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_34 + + # pd_op.batch_norm_: (1x384x34x34xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (1x384x34x34xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__786, + batch_norm__787, + batch_norm__788, + batch_norm__789, + batch_norm__790, + batch_norm__791, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_135, + parameter_33, + parameter_32, + parameter_31, + parameter_30, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_30, parameter_31, parameter_32, parameter_33 + + # pd_op.swish: (1x384x34x34xf32) <- (1x384x34x34xf32) + swish_101 = paddle._C_ops.swish(batch_norm__786) + + # pd_op.conv2d: (1x384x34x34xf32) <- (1x384x34x34xf32, 384x384x3x3xf32) + conv2d_136 = paddle._C_ops.conv2d( + swish_101, parameter_29, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_29 + + # pd_op.batch_norm_: (1x384x34x34xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (1x384x34x34xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__792, + batch_norm__793, + batch_norm__794, + batch_norm__795, + batch_norm__796, + batch_norm__797, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_136, + parameter_28, + parameter_27, + parameter_26, + parameter_25, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_25, parameter_26, parameter_27, parameter_28 + + # pd_op.conv2d: (1x384x34x34xf32) <- (1x384x34x34xf32, 384x384x1x1xf32) + conv2d_137 = paddle._C_ops.conv2d( + swish_101, parameter_24, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_24 + + # pd_op.batch_norm_: (1x384x34x34xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (1x384x34x34xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__798, + batch_norm__799, + batch_norm__800, + batch_norm__801, + batch_norm__802, + batch_norm__803, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_137, + parameter_23, + parameter_22, + parameter_21, + parameter_20, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_20, parameter_21, parameter_22, parameter_23 + + # pd_op.add: (1x384x34x34xf32) <- (1x384x34x34xf32, 1x384x34x34xf32) + add_89 = paddle._C_ops.add(batch_norm__792, batch_norm__798) + + # pd_op.swish: (1x384x34x34xf32) <- (1x384x34x34xf32) + swish_102 = paddle._C_ops.swish(add_89) + + # pd_op.conv2d: (1x384x34x34xf32) <- (1x384x34x34xf32, 384x384x3x3xf32) + conv2d_138 = paddle._C_ops.conv2d( + swish_102, parameter_19, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_19 + + # pd_op.batch_norm_: (1x384x34x34xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (1x384x34x34xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__804, + batch_norm__805, + batch_norm__806, + batch_norm__807, + batch_norm__808, + batch_norm__809, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_138, + parameter_18, + parameter_17, + parameter_16, + parameter_15, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_15, parameter_16, parameter_17, parameter_18 + + # pd_op.swish: (1x384x34x34xf32) <- (1x384x34x34xf32) + swish_103 = paddle._C_ops.swish(batch_norm__804) + + # pd_op.conv2d: (1x384x34x34xf32) <- (1x384x34x34xf32, 384x384x3x3xf32) + conv2d_139 = paddle._C_ops.conv2d( + swish_103, parameter_14, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_14 + + # pd_op.batch_norm_: (1x384x34x34xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (1x384x34x34xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__810, + batch_norm__811, + batch_norm__812, + batch_norm__813, + batch_norm__814, + batch_norm__815, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_139, + parameter_13, + parameter_12, + parameter_11, + parameter_10, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_10, parameter_11, parameter_12, parameter_13 + + # pd_op.conv2d: (1x384x34x34xf32) <- (1x384x34x34xf32, 384x384x1x1xf32) + conv2d_140 = paddle._C_ops.conv2d( + swish_103, parameter_9, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_9 + + # pd_op.batch_norm_: (1x384x34x34xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (1x384x34x34xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__816, + batch_norm__817, + batch_norm__818, + batch_norm__819, + batch_norm__820, + batch_norm__821, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_140, + parameter_8, + parameter_7, + parameter_6, + parameter_5, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_5, parameter_6, parameter_7, parameter_8 + + # pd_op.add: (1x384x34x34xf32) <- (1x384x34x34xf32, 1x384x34x34xf32) + add_90 = paddle._C_ops.add(batch_norm__810, batch_norm__816) + + # pd_op.swish: (1x384x34x34xf32) <- (1x384x34x34xf32) + swish_104 = paddle._C_ops.swish(add_90) + + # builtin.combine: ([1x384x34x34xf32, 1x384x34x34xf32]) <- (1x384x34x34xf32, 1x384x34x34xf32) + combine_15 = [swish_97, swish_104] + + # pd_op.concat: (1x768x34x34xf32) <- ([1x384x34x34xf32, 1x384x34x34xf32], 1xi32) + concat_14 = paddle._C_ops.concat(combine_15, full_0) + del combine_15 + + # pd_op.conv2d: (1x768x34x34xf32) <- (1x768x34x34xf32, 768x768x1x1xf32) + conv2d_141 = paddle._C_ops.conv2d( + concat_14, parameter_4, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_4 + + # pd_op.batch_norm_: (1x768x34x34xf32, 768xf32, 768xf32, 768xf32, 768xf32, -1xui8) <- (1x768x34x34xf32, 768xf32, 768xf32, 768xf32, 768xf32) + ( + batch_norm__822, + batch_norm__823, + batch_norm__824, + batch_norm__825, + batch_norm__826, + batch_norm__827, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_141, + parameter_3, + parameter_2, + parameter_1, + parameter_0, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_0, parameter_1, parameter_2, parameter_3 + + # pd_op.swish: (1x768x34x34xf32) <- (1x768x34x34xf32) + swish_0 = paddle._C_ops.swish(batch_norm__822) + del ( + add_0, + add_1, + add_10, + add_11, + add_12, + add_13, + add_14, + add_15, + add_16, + add_17, + add_18, + add_2, + add_20, + add_21, + add_22, + add_23, + add_24, + add_25, + add_26, + add_27, + add_28, + add_29, + add_3, + add_30, + add_31, + add_33, + add_34, + add_35, + add_36, + add_37, + add_38, + add_4, + add_40, + add_41, + add_42, + add_43, + add_45, + add_46, + add_48, + add_49, + add_5, + add_50, + add_51, + add_52, + add_54, + add_55, + add_57, + add_58, + add_59, + add_60, + add_61, + add_63, + add_64, + add_66, + add_67, + add_68, + add_69, + add_7, + add_70, + add_72, + add_73, + add_75, + add_76, + add_77, + add_78, + add_79, + add_8, + add_80, + add_81, + add_82, + add_83, + add_84, + add_85, + add_86, + add_87, + add_88, + add_89, + add_9, + add_90, + assign_0, + assign_1, + assign_10, + assign_11, + assign_12, + assign_13, + assign_14, + assign_15, + assign_16, + assign_17, + assign_18, + assign_19, + assign_2, + assign_20, + assign_21, + assign_22, + assign_23, + assign_24, + assign_25, + assign_26, + assign_27, + assign_28, + assign_29, + assign_3, + assign_30, + assign_31, + assign_32, + assign_33, + assign_34, + assign_35, + assign_36, + assign_37, + assign_38, + assign_39, + assign_4, + assign_40, + assign_41, + assign_42, + assign_43, + assign_44, + assign_45, + assign_46, + assign_47, + assign_48, + assign_49, + assign_5, + assign_50, + assign_51, + assign_52, + assign_53, + assign_54, + assign_55, + assign_56, + assign_57, + assign_58, + assign_59, + assign_6, + assign_60, + assign_61, + assign_62, + assign_63, + assign_64, + assign_65, + assign_66, + assign_67, + assign_68, + assign_69, + assign_7, + assign_70, + assign_71, + assign_72, + assign_73, + assign_74, + assign_75, + assign_76, + assign_77, + assign_78, + assign_8, + assign_9, + batch_norm__0, + batch_norm__1, + batch_norm__10, + batch_norm__100, + batch_norm__101, + batch_norm__102, + batch_norm__103, + batch_norm__104, + batch_norm__105, + batch_norm__106, + batch_norm__107, + batch_norm__108, + batch_norm__109, + batch_norm__11, + batch_norm__110, + batch_norm__111, + batch_norm__112, + batch_norm__113, + batch_norm__114, + batch_norm__115, + batch_norm__116, + batch_norm__117, + batch_norm__118, + batch_norm__119, + batch_norm__12, + batch_norm__120, + batch_norm__121, + batch_norm__122, + batch_norm__123, + batch_norm__124, + batch_norm__125, + batch_norm__126, + batch_norm__127, + batch_norm__128, + batch_norm__129, + batch_norm__13, + batch_norm__130, + batch_norm__131, + batch_norm__132, + batch_norm__133, + batch_norm__134, + batch_norm__135, + batch_norm__136, + batch_norm__137, + batch_norm__138, + batch_norm__139, + batch_norm__14, + batch_norm__140, + batch_norm__141, + batch_norm__142, + batch_norm__143, + batch_norm__144, + batch_norm__145, + batch_norm__146, + batch_norm__147, + batch_norm__148, + batch_norm__149, + batch_norm__15, + batch_norm__150, + batch_norm__151, + batch_norm__152, + batch_norm__153, + batch_norm__154, + batch_norm__155, + batch_norm__156, + batch_norm__157, + batch_norm__158, + batch_norm__159, + batch_norm__16, + batch_norm__160, + batch_norm__161, + batch_norm__162, + batch_norm__163, + batch_norm__164, + batch_norm__165, + batch_norm__166, + batch_norm__167, + batch_norm__168, + batch_norm__169, + batch_norm__17, + batch_norm__170, + batch_norm__171, + batch_norm__172, + batch_norm__173, + batch_norm__174, + batch_norm__175, + batch_norm__176, + batch_norm__177, + batch_norm__178, + batch_norm__179, + batch_norm__18, + batch_norm__180, + batch_norm__181, + batch_norm__182, + batch_norm__183, + batch_norm__184, + batch_norm__185, + batch_norm__186, + batch_norm__187, + batch_norm__188, + batch_norm__189, + batch_norm__19, + batch_norm__190, + batch_norm__191, + batch_norm__192, + batch_norm__193, + batch_norm__194, + batch_norm__195, + batch_norm__196, + batch_norm__197, + batch_norm__198, + batch_norm__199, + batch_norm__2, + batch_norm__20, + batch_norm__200, + batch_norm__201, + batch_norm__202, + batch_norm__203, + batch_norm__204, + batch_norm__205, + batch_norm__206, + batch_norm__207, + batch_norm__208, + batch_norm__209, + batch_norm__21, + batch_norm__210, + batch_norm__211, + batch_norm__212, + batch_norm__213, + batch_norm__214, + batch_norm__215, + batch_norm__216, + batch_norm__217, + batch_norm__218, + batch_norm__219, + batch_norm__22, + batch_norm__220, + batch_norm__221, + batch_norm__222, + batch_norm__223, + batch_norm__224, + batch_norm__225, + batch_norm__226, + batch_norm__227, + batch_norm__228, + batch_norm__229, + batch_norm__23, + batch_norm__230, + batch_norm__231, + batch_norm__232, + batch_norm__233, + batch_norm__234, + batch_norm__235, + batch_norm__236, + batch_norm__237, + batch_norm__238, + batch_norm__239, + batch_norm__24, + batch_norm__240, + batch_norm__241, + batch_norm__242, + batch_norm__243, + batch_norm__244, + batch_norm__245, + batch_norm__246, + batch_norm__247, + batch_norm__248, + batch_norm__249, + batch_norm__25, + batch_norm__250, + batch_norm__251, + batch_norm__252, + batch_norm__253, + batch_norm__254, + batch_norm__255, + batch_norm__256, + batch_norm__257, + batch_norm__258, + batch_norm__259, + batch_norm__26, + batch_norm__260, + batch_norm__261, + batch_norm__262, + batch_norm__263, + batch_norm__264, + batch_norm__265, + batch_norm__266, + batch_norm__267, + batch_norm__268, + batch_norm__269, + batch_norm__27, + batch_norm__270, + batch_norm__271, + batch_norm__272, + batch_norm__273, + batch_norm__274, + batch_norm__275, + batch_norm__276, + batch_norm__277, + batch_norm__278, + batch_norm__279, + batch_norm__28, + batch_norm__280, + batch_norm__281, + batch_norm__282, + batch_norm__283, + batch_norm__284, + batch_norm__285, + batch_norm__286, + batch_norm__287, + batch_norm__288, + batch_norm__289, + batch_norm__29, + batch_norm__290, + batch_norm__291, + batch_norm__292, + batch_norm__293, + batch_norm__294, + batch_norm__295, + batch_norm__296, + batch_norm__297, + batch_norm__298, + batch_norm__299, + batch_norm__3, + batch_norm__30, + batch_norm__300, + batch_norm__301, + batch_norm__302, + batch_norm__303, + batch_norm__304, + batch_norm__305, + batch_norm__306, + batch_norm__307, + batch_norm__308, + batch_norm__309, + batch_norm__31, + batch_norm__310, + batch_norm__311, + batch_norm__312, + batch_norm__313, + batch_norm__314, + batch_norm__315, + batch_norm__316, + batch_norm__317, + batch_norm__318, + batch_norm__319, + batch_norm__32, + batch_norm__320, + batch_norm__321, + batch_norm__322, + batch_norm__323, + batch_norm__324, + batch_norm__325, + batch_norm__326, + batch_norm__327, + batch_norm__328, + batch_norm__329, + batch_norm__33, + batch_norm__330, + batch_norm__331, + batch_norm__332, + batch_norm__333, + batch_norm__334, + batch_norm__335, + batch_norm__336, + batch_norm__337, + batch_norm__338, + batch_norm__339, + batch_norm__34, + batch_norm__340, + batch_norm__341, + batch_norm__342, + batch_norm__343, + batch_norm__344, + batch_norm__345, + batch_norm__346, + batch_norm__347, + batch_norm__348, + batch_norm__349, + batch_norm__35, + batch_norm__350, + batch_norm__351, + batch_norm__352, + batch_norm__353, + batch_norm__354, + batch_norm__355, + batch_norm__356, + batch_norm__357, + batch_norm__358, + batch_norm__359, + batch_norm__36, + batch_norm__360, + batch_norm__361, + batch_norm__362, + batch_norm__363, + batch_norm__364, + batch_norm__365, + batch_norm__366, + batch_norm__367, + batch_norm__368, + batch_norm__369, + batch_norm__37, + batch_norm__370, + batch_norm__371, + batch_norm__372, + batch_norm__373, + batch_norm__374, + batch_norm__375, + batch_norm__376, + batch_norm__377, + batch_norm__378, + batch_norm__379, + batch_norm__38, + batch_norm__380, + batch_norm__381, + batch_norm__382, + batch_norm__383, + batch_norm__384, + batch_norm__385, + batch_norm__386, + batch_norm__387, + batch_norm__388, + batch_norm__389, + batch_norm__39, + batch_norm__390, + batch_norm__391, + batch_norm__392, + batch_norm__393, + batch_norm__394, + batch_norm__395, + batch_norm__396, + batch_norm__397, + batch_norm__398, + batch_norm__399, + batch_norm__4, + batch_norm__40, + batch_norm__400, + batch_norm__401, + batch_norm__402, + batch_norm__403, + batch_norm__404, + batch_norm__405, + batch_norm__406, + batch_norm__407, + batch_norm__408, + batch_norm__409, + batch_norm__41, + batch_norm__410, + batch_norm__411, + batch_norm__412, + batch_norm__413, + batch_norm__414, + batch_norm__415, + batch_norm__416, + batch_norm__417, + batch_norm__418, + batch_norm__419, + batch_norm__42, + batch_norm__420, + batch_norm__421, + batch_norm__422, + batch_norm__423, + batch_norm__424, + batch_norm__425, + batch_norm__426, + batch_norm__427, + batch_norm__428, + batch_norm__429, + batch_norm__43, + batch_norm__430, + batch_norm__431, + batch_norm__432, + batch_norm__433, + batch_norm__434, + batch_norm__435, + batch_norm__436, + batch_norm__437, + batch_norm__438, + batch_norm__439, + batch_norm__44, + batch_norm__440, + batch_norm__441, + batch_norm__442, + batch_norm__443, + batch_norm__444, + batch_norm__445, + batch_norm__446, + batch_norm__447, + batch_norm__448, + batch_norm__449, + batch_norm__45, + batch_norm__450, + batch_norm__451, + batch_norm__452, + batch_norm__453, + batch_norm__454, + batch_norm__455, + batch_norm__456, + batch_norm__457, + batch_norm__458, + batch_norm__459, + batch_norm__46, + batch_norm__460, + batch_norm__461, + batch_norm__462, + batch_norm__463, + batch_norm__464, + batch_norm__465, + batch_norm__466, + batch_norm__467, + batch_norm__468, + batch_norm__469, + batch_norm__47, + batch_norm__470, + batch_norm__471, + batch_norm__472, + batch_norm__473, + batch_norm__474, + batch_norm__475, + batch_norm__476, + batch_norm__477, + batch_norm__478, + batch_norm__479, + batch_norm__48, + batch_norm__480, + batch_norm__481, + batch_norm__482, + batch_norm__483, + batch_norm__484, + batch_norm__485, + batch_norm__486, + batch_norm__487, + batch_norm__488, + batch_norm__489, + batch_norm__49, + batch_norm__490, + batch_norm__491, + batch_norm__492, + batch_norm__493, + batch_norm__494, + batch_norm__495, + batch_norm__496, + batch_norm__497, + batch_norm__498, + batch_norm__499, + batch_norm__5, + batch_norm__50, + batch_norm__500, + batch_norm__501, + batch_norm__502, + batch_norm__503, + batch_norm__504, + batch_norm__505, + batch_norm__506, + batch_norm__507, + batch_norm__508, + batch_norm__509, + batch_norm__51, + batch_norm__510, + batch_norm__511, + batch_norm__512, + batch_norm__513, + batch_norm__514, + batch_norm__515, + batch_norm__516, + batch_norm__517, + batch_norm__518, + batch_norm__519, + batch_norm__52, + batch_norm__520, + batch_norm__521, + batch_norm__522, + batch_norm__523, + batch_norm__524, + batch_norm__525, + batch_norm__526, + batch_norm__527, + batch_norm__528, + batch_norm__529, + batch_norm__53, + batch_norm__530, + batch_norm__531, + batch_norm__532, + batch_norm__533, + batch_norm__534, + batch_norm__535, + batch_norm__536, + batch_norm__537, + batch_norm__538, + batch_norm__539, + batch_norm__54, + batch_norm__540, + batch_norm__541, + batch_norm__542, + batch_norm__543, + batch_norm__544, + batch_norm__545, + batch_norm__546, + batch_norm__547, + batch_norm__548, + batch_norm__549, + batch_norm__55, + batch_norm__550, + batch_norm__551, + batch_norm__552, + batch_norm__553, + batch_norm__554, + batch_norm__555, + batch_norm__556, + batch_norm__557, + batch_norm__558, + batch_norm__559, + batch_norm__56, + batch_norm__560, + batch_norm__561, + batch_norm__562, + batch_norm__563, + batch_norm__564, + batch_norm__565, + batch_norm__566, + batch_norm__567, + batch_norm__568, + batch_norm__569, + batch_norm__57, + batch_norm__570, + batch_norm__571, + batch_norm__572, + batch_norm__573, + batch_norm__574, + batch_norm__575, + batch_norm__576, + batch_norm__577, + batch_norm__578, + batch_norm__579, + batch_norm__58, + batch_norm__580, + batch_norm__581, + batch_norm__582, + batch_norm__583, + batch_norm__584, + batch_norm__585, + batch_norm__586, + batch_norm__587, + batch_norm__588, + batch_norm__589, + batch_norm__59, + batch_norm__590, + batch_norm__591, + batch_norm__592, + batch_norm__593, + batch_norm__594, + batch_norm__595, + batch_norm__596, + batch_norm__597, + batch_norm__598, + batch_norm__599, + batch_norm__6, + batch_norm__60, + batch_norm__600, + batch_norm__601, + batch_norm__602, + batch_norm__603, + batch_norm__604, + batch_norm__605, + batch_norm__606, + batch_norm__607, + batch_norm__608, + batch_norm__609, + batch_norm__61, + batch_norm__610, + batch_norm__611, + batch_norm__612, + batch_norm__613, + batch_norm__614, + batch_norm__615, + batch_norm__616, + batch_norm__617, + batch_norm__618, + batch_norm__619, + batch_norm__62, + batch_norm__620, + batch_norm__621, + batch_norm__622, + batch_norm__623, + batch_norm__624, + batch_norm__625, + batch_norm__626, + batch_norm__627, + batch_norm__628, + batch_norm__629, + batch_norm__63, + batch_norm__630, + batch_norm__631, + batch_norm__632, + batch_norm__633, + batch_norm__634, + batch_norm__635, + batch_norm__636, + batch_norm__637, + batch_norm__638, + batch_norm__639, + batch_norm__64, + batch_norm__640, + batch_norm__641, + batch_norm__642, + batch_norm__643, + batch_norm__644, + batch_norm__645, + batch_norm__646, + batch_norm__647, + batch_norm__648, + batch_norm__649, + batch_norm__65, + batch_norm__650, + batch_norm__651, + batch_norm__652, + batch_norm__653, + batch_norm__654, + batch_norm__655, + batch_norm__656, + batch_norm__657, + batch_norm__658, + batch_norm__659, + batch_norm__66, + batch_norm__660, + batch_norm__661, + batch_norm__662, + batch_norm__663, + batch_norm__664, + batch_norm__665, + batch_norm__666, + batch_norm__667, + batch_norm__668, + batch_norm__669, + batch_norm__67, + batch_norm__670, + batch_norm__671, + batch_norm__672, + batch_norm__673, + batch_norm__674, + batch_norm__675, + batch_norm__676, + batch_norm__677, + batch_norm__678, + batch_norm__679, + batch_norm__68, + batch_norm__680, + batch_norm__681, + batch_norm__682, + batch_norm__683, + batch_norm__684, + batch_norm__685, + batch_norm__686, + batch_norm__687, + batch_norm__688, + batch_norm__689, + batch_norm__69, + batch_norm__690, + batch_norm__691, + batch_norm__692, + batch_norm__693, + batch_norm__694, + batch_norm__695, + batch_norm__696, + batch_norm__697, + batch_norm__698, + batch_norm__699, + batch_norm__7, + batch_norm__70, + batch_norm__700, + batch_norm__701, + batch_norm__702, + batch_norm__703, + batch_norm__704, + batch_norm__705, + batch_norm__706, + batch_norm__707, + batch_norm__708, + batch_norm__709, + batch_norm__71, + batch_norm__710, + batch_norm__711, + batch_norm__712, + batch_norm__713, + batch_norm__714, + batch_norm__715, + batch_norm__716, + batch_norm__717, + batch_norm__718, + batch_norm__719, + batch_norm__72, + batch_norm__720, + batch_norm__721, + batch_norm__722, + batch_norm__723, + batch_norm__724, + batch_norm__725, + batch_norm__726, + batch_norm__727, + batch_norm__728, + batch_norm__729, + batch_norm__73, + batch_norm__730, + batch_norm__731, + batch_norm__732, + batch_norm__733, + batch_norm__734, + batch_norm__735, + batch_norm__736, + batch_norm__737, + batch_norm__738, + batch_norm__739, + batch_norm__74, + batch_norm__740, + batch_norm__741, + batch_norm__742, + batch_norm__743, + batch_norm__744, + batch_norm__745, + batch_norm__746, + batch_norm__747, + batch_norm__748, + batch_norm__749, + batch_norm__75, + batch_norm__750, + batch_norm__751, + batch_norm__752, + batch_norm__753, + batch_norm__754, + batch_norm__755, + batch_norm__756, + batch_norm__757, + batch_norm__758, + batch_norm__759, + batch_norm__76, + batch_norm__760, + batch_norm__761, + batch_norm__762, + batch_norm__763, + batch_norm__764, + batch_norm__765, + batch_norm__766, + batch_norm__767, + batch_norm__768, + batch_norm__769, + batch_norm__77, + batch_norm__770, + batch_norm__771, + batch_norm__772, + batch_norm__773, + batch_norm__774, + batch_norm__775, + batch_norm__776, + batch_norm__777, + batch_norm__778, + batch_norm__779, + batch_norm__78, + batch_norm__780, + batch_norm__781, + batch_norm__782, + batch_norm__783, + batch_norm__784, + batch_norm__785, + batch_norm__786, + batch_norm__787, + batch_norm__788, + batch_norm__789, + batch_norm__79, + batch_norm__790, + batch_norm__791, + batch_norm__792, + batch_norm__793, + batch_norm__794, + batch_norm__795, + batch_norm__796, + batch_norm__797, + batch_norm__798, + batch_norm__799, + batch_norm__8, + batch_norm__80, + batch_norm__800, + batch_norm__801, + batch_norm__802, + batch_norm__803, + batch_norm__804, + batch_norm__805, + batch_norm__806, + batch_norm__807, + batch_norm__808, + batch_norm__809, + batch_norm__81, + batch_norm__810, + batch_norm__811, + batch_norm__812, + batch_norm__813, + batch_norm__814, + batch_norm__815, + batch_norm__816, + batch_norm__817, + batch_norm__818, + batch_norm__819, + batch_norm__82, + batch_norm__820, + batch_norm__821, + batch_norm__822, + batch_norm__823, + batch_norm__824, + batch_norm__825, + batch_norm__826, + batch_norm__827, + batch_norm__83, + batch_norm__84, + batch_norm__85, + batch_norm__86, + batch_norm__87, + batch_norm__88, + batch_norm__89, + batch_norm__9, + batch_norm__90, + batch_norm__91, + batch_norm__92, + batch_norm__93, + batch_norm__94, + batch_norm__95, + batch_norm__96, + batch_norm__97, + batch_norm__98, + batch_norm__99, + concat_0, + concat_1, + concat_10, + concat_11, + concat_12, + concat_13, + concat_14, + concat_2, + concat_3, + concat_5, + concat_6, + concat_7, + concat_8, + concat_9, + conv2d_0, + conv2d_1, + conv2d_10, + conv2d_100, + conv2d_101, + conv2d_102, + conv2d_103, + conv2d_104, + conv2d_105, + conv2d_106, + conv2d_107, + conv2d_108, + conv2d_109, + conv2d_11, + conv2d_110, + conv2d_111, + conv2d_112, + conv2d_113, + conv2d_114, + conv2d_115, + conv2d_116, + conv2d_117, + conv2d_118, + conv2d_119, + conv2d_12, + conv2d_120, + conv2d_121, + conv2d_122, + conv2d_123, + conv2d_124, + conv2d_125, + conv2d_126, + conv2d_127, + conv2d_128, + conv2d_129, + conv2d_13, + conv2d_130, + conv2d_131, + conv2d_132, + conv2d_133, + conv2d_134, + conv2d_135, + conv2d_136, + conv2d_137, + conv2d_138, + conv2d_139, + conv2d_14, + conv2d_140, + conv2d_141, + conv2d_15, + conv2d_16, + conv2d_17, + conv2d_18, + conv2d_19, + conv2d_2, + conv2d_20, + conv2d_21, + conv2d_22, + conv2d_23, + conv2d_24, + conv2d_25, + conv2d_26, + conv2d_27, + conv2d_28, + conv2d_29, + conv2d_3, + conv2d_30, + conv2d_31, + conv2d_32, + conv2d_33, + conv2d_34, + conv2d_35, + conv2d_36, + conv2d_37, + conv2d_38, + conv2d_39, + conv2d_4, + conv2d_40, + conv2d_41, + conv2d_42, + conv2d_43, + conv2d_44, + conv2d_45, + conv2d_46, + conv2d_47, + conv2d_48, + conv2d_49, + conv2d_5, + conv2d_50, + conv2d_51, + conv2d_52, + conv2d_53, + conv2d_54, + conv2d_55, + conv2d_56, + conv2d_57, + conv2d_58, + conv2d_59, + conv2d_6, + conv2d_60, + conv2d_61, + conv2d_62, + conv2d_63, + conv2d_64, + conv2d_65, + conv2d_66, + conv2d_67, + conv2d_68, + conv2d_69, + conv2d_7, + conv2d_70, + conv2d_71, + conv2d_72, + conv2d_73, + conv2d_74, + conv2d_75, + conv2d_76, + conv2d_77, + conv2d_78, + conv2d_79, + conv2d_8, + conv2d_80, + conv2d_81, + conv2d_82, + conv2d_83, + conv2d_84, + conv2d_85, + conv2d_86, + conv2d_87, + conv2d_88, + conv2d_89, + conv2d_9, + conv2d_90, + conv2d_91, + conv2d_92, + conv2d_93, + conv2d_94, + conv2d_95, + conv2d_96, + conv2d_97, + conv2d_98, + conv2d_99, + dropout_0, + dropout_1, + dropout_10, + dropout_11, + dropout_12, + dropout_13, + dropout_14, + dropout_15, + dropout_16, + dropout_17, + dropout_18, + dropout_19, + dropout_2, + dropout_20, + dropout_21, + dropout_22, + dropout_23, + dropout_24, + dropout_25, + dropout_26, + dropout_27, + dropout_28, + dropout_29, + dropout_3, + dropout_30, + dropout_31, + dropout_4, + dropout_5, + dropout_6, + dropout_7, + dropout_8, + dropout_9, + full_0, + full_8, + full_9, + full_int_array_0, + full_int_array_10, + full_int_array_11, + full_int_array_12, + full_int_array_4, + full_int_array_6, + full_int_array_7, + hardsigmoid_0, + hardsigmoid_1, + hardsigmoid_2, + hardsigmoid_3, + layer_norm_0, + layer_norm_1, + layer_norm_10, + layer_norm_11, + layer_norm_12, + layer_norm_13, + layer_norm_14, + layer_norm_15, + layer_norm_16, + layer_norm_17, + layer_norm_18, + layer_norm_19, + layer_norm_2, + layer_norm_20, + layer_norm_22, + layer_norm_23, + layer_norm_3, + layer_norm_4, + layer_norm_5, + layer_norm_6, + layer_norm_7, + layer_norm_8, + layer_norm_9, + matmul_10, + matmul_11, + matmul_12, + matmul_15, + matmul_16, + matmul_17, + matmul_18, + matmul_19, + matmul_2, + matmul_20, + matmul_23, + matmul_24, + matmul_25, + matmul_26, + matmul_27, + matmul_28, + matmul_3, + matmul_31, + matmul_32, + matmul_33, + matmul_4, + matmul_7, + matmul_8, + matmul_9, + mean_0, + mean_1, + mean_2, + mean_3, + multiply_0, + multiply_1, + multiply_10, + multiply_11, + multiply_12, + multiply_13, + multiply_14, + multiply_15, + multiply_16, + multiply_17, + multiply_18, + multiply_19, + multiply_2, + multiply_20, + multiply_21, + multiply_3, + multiply_4, + multiply_5, + multiply_6, + multiply_7, + multiply_8, + multiply_9, + nearest_interp_0, + nearest_interp_1, + pool2d_0, + pool2d_1, + pool2d_2, + reshape_0, + reshape_1, + reshape_11, + reshape_15, + reshape_19, + reshape_2, + reshape_20, + reshape_3, + reshape_7, + slice_0, + slice_1, + slice_10, + slice_11, + slice_12, + slice_13, + slice_14, + slice_15, + slice_16, + slice_17, + slice_18, + slice_19, + slice_2, + slice_20, + slice_21, + slice_22, + slice_23, + slice_3, + slice_4, + slice_5, + slice_6, + slice_7, + slice_8, + slice_9, + softmax_0, + softmax_1, + softmax_2, + softmax_3, + swish_1, + swish_10, + swish_100, + swish_101, + swish_102, + swish_103, + swish_104, + swish_11, + swish_12, + swish_13, + swish_14, + swish_15, + swish_16, + swish_17, + swish_18, + swish_19, + swish_2, + swish_20, + swish_21, + swish_22, + swish_23, + swish_24, + swish_25, + swish_26, + swish_27, + swish_28, + swish_29, + swish_3, + swish_30, + swish_31, + swish_32, + swish_33, + swish_34, + swish_35, + swish_36, + swish_37, + swish_38, + swish_39, + swish_4, + swish_40, + swish_41, + swish_42, + swish_43, + swish_44, + swish_45, + swish_46, + swish_47, + swish_48, + swish_49, + swish_5, + swish_50, + swish_51, + swish_52, + swish_53, + swish_54, + swish_55, + swish_56, + swish_57, + swish_58, + swish_59, + swish_6, + swish_60, + swish_61, + swish_62, + swish_63, + swish_64, + swish_65, + swish_66, + swish_67, + swish_68, + swish_69, + swish_7, + swish_70, + swish_71, + swish_72, + swish_73, + swish_74, + swish_75, + swish_76, + swish_77, + swish_78, + swish_79, + swish_8, + swish_80, + swish_81, + swish_82, + swish_83, + swish_84, + swish_85, + swish_86, + swish_87, + swish_88, + swish_89, + swish_9, + swish_90, + swish_91, + swish_92, + swish_93, + swish_94, + swish_95, + swish_96, + swish_97, + swish_98, + swish_99, + transpose_0, + transpose_1, + transpose_10, + transpose_11, + transpose_12, + transpose_13, + transpose_14, + transpose_15, + transpose_16, + transpose_17, + transpose_2, + transpose_3, + transpose_4, + transpose_5, + transpose_6, + transpose_7, + transpose_8, + transpose_9, + unsqueeze_3, + ) + + return swish_0 diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_11/weight_meta.py b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_11/weight_meta.py new file mode 100644 index 000000000..7bb3e9a3b --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_11/weight_meta.py @@ -0,0 +1,8004 @@ +class Program_weight_tensor_parameter_0: + name = "parameter_0" + shape = [768] + dtype = "float32" + min_val = float("-0.175875") + max_val = float("0.210823") + mean = float("0.0834695") + std = float("0.0566098") + data = None + + +class Program_weight_tensor_parameter_1: + name = "parameter_1" + shape = [768] + dtype = "float32" + min_val = float("0.939895") + max_val = float("1.29826") + mean = float("1.06397") + std = float("0.0312259") + data = None + + +class Program_weight_tensor_parameter_2: + name = "parameter_2" + shape = [768] + dtype = "float32" + min_val = float("0.00117681") + max_val = float("0.0493074") + mean = float("0.00765869") + std = float("0.0046166") + data = None + + +class Program_weight_tensor_parameter_3: + name = "parameter_3" + shape = [768] + dtype = "float32" + min_val = float("-0.13338") + max_val = float("0.0572232") + mean = float("-0.0286564") + std = float("0.0288427") + data = None + + +class Program_weight_tensor_parameter_4: + name = "parameter_4" + shape = [768, 768, 1, 1] + dtype = "float32" + min_val = float("-0.0548133") + max_val = float("0.0388089") + mean = float("-0.000154658") + std = float("0.00249634") + data = None + + +class Program_weight_tensor_parameter_5: + name = "parameter_5" + shape = [384] + dtype = "float32" + min_val = float("-0.14169") + max_val = float("0.0305817") + mean = float("-0.0188052") + std = float("0.0234504") + data = None + + +class Program_weight_tensor_parameter_6: + name = "parameter_6" + shape = [384] + dtype = "float32" + min_val = float("0.945748") + max_val = float("1.04442") + mean = float("0.98666") + std = float("0.0105852") + data = None + + +class Program_weight_tensor_parameter_7: + name = "parameter_7" + shape = [384] + dtype = "float32" + min_val = float("0.000824752") + max_val = float("0.0183885") + mean = float("0.00496541") + std = float("0.0033442") + data = None + + +class Program_weight_tensor_parameter_8: + name = "parameter_8" + shape = [384] + dtype = "float32" + min_val = float("-0.0551455") + max_val = float("0.0628843") + mean = float("0.00283053") + std = float("0.0222857") + data = None + + +class Program_weight_tensor_parameter_9: + name = "parameter_9" + shape = [384, 384, 1, 1] + dtype = "float32" + min_val = float("-0.0299324") + max_val = float("0.020664") + mean = float("2.29272e-05") + std = float("0.00192338") + data = None + + +class Program_weight_tensor_parameter_10: + name = "parameter_10" + shape = [384] + dtype = "float32" + min_val = float("-0.14169") + max_val = float("0.0305817") + mean = float("-0.0188052") + std = float("0.0234504") + data = None + + +class Program_weight_tensor_parameter_11: + name = "parameter_11" + shape = [384] + dtype = "float32" + min_val = float("0.968039") + max_val = float("1.13059") + mean = float("1.01544") + std = float("0.0171846") + data = None + + +class Program_weight_tensor_parameter_12: + name = "parameter_12" + shape = [384] + dtype = "float32" + min_val = float("0.00204949") + max_val = float("0.0543081") + mean = float("0.00785702") + std = float("0.00475222") + data = None + + +class Program_weight_tensor_parameter_13: + name = "parameter_13" + shape = [384] + dtype = "float32" + min_val = float("-0.203105") + max_val = float("0.152445") + mean = float("-0.0430598") + std = float("0.036306") + data = None + + +class Program_weight_tensor_parameter_14: + name = "parameter_14" + shape = [384, 384, 3, 3] + dtype = "float32" + min_val = float("-0.029908") + max_val = float("0.035511") + mean = float("-7.29867e-05") + std = float("0.00131195") + data = None + + +class Program_weight_tensor_parameter_15: + name = "parameter_15" + shape = [384] + dtype = "float32" + min_val = float("-0.170219") + max_val = float("0.0209993") + mean = float("-0.0348873") + std = float("0.0279313") + data = None + + +class Program_weight_tensor_parameter_16: + name = "parameter_16" + shape = [384] + dtype = "float32" + min_val = float("0.975222") + max_val = float("1.12587") + mean = float("1.015") + std = float("0.0240805") + data = None + + +class Program_weight_tensor_parameter_17: + name = "parameter_17" + shape = [384] + dtype = "float32" + min_val = float("0.00543045") + max_val = float("0.191688") + mean = float("0.021541") + std = float("0.0159869") + data = None + + +class Program_weight_tensor_parameter_18: + name = "parameter_18" + shape = [384] + dtype = "float32" + min_val = float("-0.266668") + max_val = float("0.4144") + mean = float("-0.0370778") + std = float("0.0509328") + data = None + + +class Program_weight_tensor_parameter_19: + name = "parameter_19" + shape = [384, 384, 3, 3] + dtype = "float32" + min_val = float("-0.0331339") + max_val = float("0.0530855") + mean = float("-6.31513e-05") + std = float("0.00148047") + data = None + + +class Program_weight_tensor_parameter_20: + name = "parameter_20" + shape = [384] + dtype = "float32" + min_val = float("-0.105219") + max_val = float("0.0129843") + mean = float("-0.0358029") + std = float("0.0193236") + data = None + + +class Program_weight_tensor_parameter_21: + name = "parameter_21" + shape = [384] + dtype = "float32" + min_val = float("0.945357") + max_val = float("1.04501") + mean = float("0.988631") + std = float("0.00984229") + data = None + + +class Program_weight_tensor_parameter_22: + name = "parameter_22" + shape = [384] + dtype = "float32" + min_val = float("0.000719695") + max_val = float("0.0175524") + mean = float("0.00510978") + std = float("0.00307552") + data = None + + +class Program_weight_tensor_parameter_23: + name = "parameter_23" + shape = [384] + dtype = "float32" + min_val = float("-0.0846979") + max_val = float("0.0435371") + mean = float("-0.00255296") + std = float("0.0171291") + data = None + + +class Program_weight_tensor_parameter_24: + name = "parameter_24" + shape = [384, 384, 1, 1] + dtype = "float32" + min_val = float("-0.0267958") + max_val = float("0.025491") + mean = float("-5.37291e-05") + std = float("0.00203271") + data = None + + +class Program_weight_tensor_parameter_25: + name = "parameter_25" + shape = [384] + dtype = "float32" + min_val = float("-0.105219") + max_val = float("0.0129843") + mean = float("-0.0358029") + std = float("0.0193236") + data = None + + +class Program_weight_tensor_parameter_26: + name = "parameter_26" + shape = [384] + dtype = "float32" + min_val = float("0.959852") + max_val = float("1.10509") + mean = float("1.01609") + std = float("0.0177564") + data = None + + +class Program_weight_tensor_parameter_27: + name = "parameter_27" + shape = [384] + dtype = "float32" + min_val = float("0.0024927") + max_val = float("0.0541337") + mean = float("0.00984546") + std = float("0.0050589") + data = None + + +class Program_weight_tensor_parameter_28: + name = "parameter_28" + shape = [384] + dtype = "float32" + min_val = float("-0.215525") + max_val = float("0.32115") + mean = float("-0.0500705") + std = float("0.0449912") + data = None + + +class Program_weight_tensor_parameter_29: + name = "parameter_29" + shape = [384, 384, 3, 3] + dtype = "float32" + min_val = float("-0.0363929") + max_val = float("0.0514823") + mean = float("-8.41934e-05") + std = float("0.00132563") + data = None + + +class Program_weight_tensor_parameter_30: + name = "parameter_30" + shape = [384] + dtype = "float32" + min_val = float("-0.0896627") + max_val = float("0.0192839") + mean = float("-0.0360783") + std = float("0.0194692") + data = None + + +class Program_weight_tensor_parameter_31: + name = "parameter_31" + shape = [384] + dtype = "float32" + min_val = float("0.933291") + max_val = float("1.11466") + mean = float("1.01167") + std = float("0.026589") + data = None + + +class Program_weight_tensor_parameter_32: + name = "parameter_32" + shape = [384] + dtype = "float32" + min_val = float("0.00555425") + max_val = float("0.0674583") + mean = float("0.0182706") + std = float("0.00926606") + data = None + + +class Program_weight_tensor_parameter_33: + name = "parameter_33" + shape = [384] + dtype = "float32" + min_val = float("-0.23163") + max_val = float("0.124775") + mean = float("-0.0274763") + std = float("0.0558182") + data = None + + +class Program_weight_tensor_parameter_34: + name = "parameter_34" + shape = [384, 384, 3, 3] + dtype = "float32" + min_val = float("-0.0397048") + max_val = float("0.0499731") + mean = float("-5.43157e-05") + std = float("0.00151173") + data = None + + +class Program_weight_tensor_parameter_35: + name = "parameter_35" + shape = [384] + dtype = "float32" + min_val = float("-0.116341") + max_val = float("0.0161185") + mean = float("-0.0373639") + std = float("0.0201507") + data = None + + +class Program_weight_tensor_parameter_36: + name = "parameter_36" + shape = [384] + dtype = "float32" + min_val = float("0.929383") + max_val = float("1.02791") + mean = float("0.98704") + std = float("0.0110296") + data = None + + +class Program_weight_tensor_parameter_37: + name = "parameter_37" + shape = [384] + dtype = "float32" + min_val = float("0.00126812") + max_val = float("0.0117075") + mean = float("0.00454736") + std = float("0.00186229") + data = None + + +class Program_weight_tensor_parameter_38: + name = "parameter_38" + shape = [384] + dtype = "float32" + min_val = float("-0.0553816") + max_val = float("0.0350888") + mean = float("-0.00843188") + std = float("0.0133692") + data = None + + +class Program_weight_tensor_parameter_39: + name = "parameter_39" + shape = [384, 384, 1, 1] + dtype = "float32" + min_val = float("-0.0386337") + max_val = float("0.0282119") + mean = float("-0.000152706") + std = float("0.00204597") + data = None + + +class Program_weight_tensor_parameter_40: + name = "parameter_40" + shape = [384] + dtype = "float32" + min_val = float("-0.116341") + max_val = float("0.0161185") + mean = float("-0.0373639") + std = float("0.0201507") + data = None + + +class Program_weight_tensor_parameter_41: + name = "parameter_41" + shape = [384] + dtype = "float32" + min_val = float("0.981354") + max_val = float("1.10683") + mean = float("1.01834") + std = float("0.0222205") + data = None + + +class Program_weight_tensor_parameter_42: + name = "parameter_42" + shape = [384] + dtype = "float32" + min_val = float("0.00525993") + max_val = float("0.0393454") + mean = float("0.0117862") + std = float("0.00495311") + data = None + + +class Program_weight_tensor_parameter_43: + name = "parameter_43" + shape = [384] + dtype = "float32" + min_val = float("-0.189916") + max_val = float("0.089502") + mean = float("-0.0267907") + std = float("0.0350026") + data = None + + +class Program_weight_tensor_parameter_44: + name = "parameter_44" + shape = [384, 384, 3, 3] + dtype = "float32" + min_val = float("-0.0360859") + max_val = float("0.0633791") + mean = float("-4.66788e-05") + std = float("0.00138059") + data = None + + +class Program_weight_tensor_parameter_45: + name = "parameter_45" + shape = [384] + dtype = "float32" + min_val = float("-0.107113") + max_val = float("0.0239382") + mean = float("-0.0375215") + std = float("0.0214567") + data = None + + +class Program_weight_tensor_parameter_46: + name = "parameter_46" + shape = [384] + dtype = "float32" + min_val = float("0.944795") + max_val = float("1.11465") + mean = float("1.01186") + std = float("0.0277861") + data = None + + +class Program_weight_tensor_parameter_47: + name = "parameter_47" + shape = [384] + dtype = "float32" + min_val = float("0.0055232") + max_val = float("0.0636068") + mean = float("0.0154489") + std = float("0.00777637") + data = None + + +class Program_weight_tensor_parameter_48: + name = "parameter_48" + shape = [384] + dtype = "float32" + min_val = float("-0.154281") + max_val = float("0.124624") + mean = float("-0.04841") + std = float("0.0507409") + data = None + + +class Program_weight_tensor_parameter_49: + name = "parameter_49" + shape = [384, 384, 3, 3] + dtype = "float32" + min_val = float("-0.0279282") + max_val = float("0.0439271") + mean = float("-7.87759e-05") + std = float("0.00153817") + data = None + + +class Program_weight_tensor_parameter_50: + name = "parameter_50" + shape = [384] + dtype = "float32" + min_val = float("-0.10674") + max_val = float("0.046738") + mean = float("-0.026306") + std = float("0.0154157") + data = None + + +class Program_weight_tensor_parameter_51: + name = "parameter_51" + shape = [384] + dtype = "float32" + min_val = float("0.973756") + max_val = float("1.08653") + mean = float("1.00903") + std = float("0.0171142") + data = None + + +class Program_weight_tensor_parameter_52: + name = "parameter_52" + shape = [384] + dtype = "float32" + min_val = float("0.00240361") + max_val = float("0.0172008") + mean = float("0.00539159") + std = float("0.0019353") + data = None + + +class Program_weight_tensor_parameter_53: + name = "parameter_53" + shape = [384] + dtype = "float32" + min_val = float("-0.100425") + max_val = float("0.0867517") + mean = float("-0.0196731") + std = float("0.0269084") + data = None + + +class Program_weight_tensor_parameter_54: + name = "parameter_54" + shape = [384, 1152, 1, 1] + dtype = "float32" + min_val = float("-0.0619005") + max_val = float("0.0744808") + mean = float("-8.91799e-05") + std = float("0.00230778") + data = None + + +class Program_weight_tensor_parameter_55: + name = "parameter_55" + shape = [384] + dtype = "float32" + min_val = float("-0.0424904") + max_val = float("0.0160654") + mean = float("-0.00899509") + std = float("0.00840798") + data = None + + +class Program_weight_tensor_parameter_56: + name = "parameter_56" + shape = [384] + dtype = "float32" + min_val = float("0.959519") + max_val = float("1.05137") + mean = float("1.00788") + std = float("0.0115961") + data = None + + +class Program_weight_tensor_parameter_57: + name = "parameter_57" + shape = [384] + dtype = "float32" + min_val = float("0.00126033") + max_val = float("0.03023") + mean = float("0.00451665") + std = float("0.00218518") + data = None + + +class Program_weight_tensor_parameter_58: + name = "parameter_58" + shape = [384] + dtype = "float32" + min_val = float("-0.110881") + max_val = float("0.0925399") + mean = float("-0.0236355") + std = float("0.0234762") + data = None + + +class Program_weight_tensor_parameter_59: + name = "parameter_59" + shape = [384, 1152, 1, 1] + dtype = "float32" + min_val = float("-0.0245473") + max_val = float("0.0425909") + mean = float("-0.000112646") + std = float("0.00208633") + data = None + + +class Program_weight_tensor_parameter_60: + name = "parameter_60" + shape = [384] + dtype = "float32" + min_val = float("-0.0529748") + max_val = float("0.0059538") + mean = float("-0.0166275") + std = float("0.00987957") + data = None + + +class Program_weight_tensor_parameter_61: + name = "parameter_61" + shape = [384] + dtype = "float32" + min_val = float("0.988678") + max_val = float("1.10388") + mean = float("1.01957") + std = float("0.0168754") + data = None + + +class Program_weight_tensor_parameter_62: + name = "parameter_62" + shape = [384] + dtype = "float32" + min_val = float("0.00462734") + max_val = float("0.0682865") + mean = float("0.0144132") + std = float("0.00829381") + data = None + + +class Program_weight_tensor_parameter_63: + name = "parameter_63" + shape = [384] + dtype = "float32" + min_val = float("-0.443847") + max_val = float("0.193358") + mean = float("-0.047384") + std = float("0.0711895") + data = None + + +class Program_weight_tensor_parameter_64: + name = "parameter_64" + shape = [384, 384, 3, 3] + dtype = "float32" + min_val = float("-0.0212973") + max_val = float("0.0335283") + mean = float("-3.20307e-05") + std = float("0.00117985") + data = None + + +class Program_weight_tensor_parameter_65: + name = "parameter_65" + shape = [384] + dtype = "float32" + min_val = float("-0.222314") + max_val = float("0.492622") + mean = float("0.217344") + std = float("0.124262") + data = None + + +class Program_weight_tensor_parameter_66: + name = "parameter_66" + shape = [384] + dtype = "float32" + min_val = float("0.919258") + max_val = float("1.4834") + mean = float("1.14101") + std = float("0.0738465") + data = None + + +class Program_weight_tensor_parameter_67: + name = "parameter_67" + shape = [384] + dtype = "float32" + min_val = float("0.00377408") + max_val = float("0.0757958") + mean = float("0.0118179") + std = float("0.00580441") + data = None + + +class Program_weight_tensor_parameter_68: + name = "parameter_68" + shape = [384] + dtype = "float32" + min_val = float("-0.129308") + max_val = float("0.0593208") + mean = float("-0.0372124") + std = float("0.0301462") + data = None + + +class Program_weight_tensor_parameter_69: + name = "parameter_69" + shape = [384, 384, 1, 1] + dtype = "float32" + min_val = float("-0.0788092") + max_val = float("0.0718439") + mean = float("-0.000420206") + std = float("0.00505348") + data = None + + +class Program_weight_tensor_parameter_70: + name = "parameter_70" + shape = [192] + dtype = "float32" + min_val = float("-0.165903") + max_val = float("0.0468638") + mean = float("-0.0248091") + std = float("0.0394948") + data = None + + +class Program_weight_tensor_parameter_71: + name = "parameter_71" + shape = [192] + dtype = "float32" + min_val = float("0.841187") + max_val = float("1.05089") + mean = float("0.972721") + std = float("0.0237726") + data = None + + +class Program_weight_tensor_parameter_72: + name = "parameter_72" + shape = [192] + dtype = "float32" + min_val = float("0.00135964") + max_val = float("0.0214683") + mean = float("0.00615868") + std = float("0.00390446") + data = None + + +class Program_weight_tensor_parameter_73: + name = "parameter_73" + shape = [192] + dtype = "float32" + min_val = float("-0.0635033") + max_val = float("0.0921305") + mean = float("-0.00577622") + std = float("0.0200987") + data = None + + +class Program_weight_tensor_parameter_74: + name = "parameter_74" + shape = [192, 192, 1, 1] + dtype = "float32" + min_val = float("-0.0496361") + max_val = float("0.0295852") + mean = float("-0.000179209") + std = float("0.00381061") + data = None + + +class Program_weight_tensor_parameter_75: + name = "parameter_75" + shape = [192] + dtype = "float32" + min_val = float("-0.165903") + max_val = float("0.0468638") + mean = float("-0.0248091") + std = float("0.0394948") + data = None + + +class Program_weight_tensor_parameter_76: + name = "parameter_76" + shape = [192] + dtype = "float32" + min_val = float("0.72984") + max_val = float("1.12263") + mean = float("1.02218") + std = float("0.0372419") + data = None + + +class Program_weight_tensor_parameter_77: + name = "parameter_77" + shape = [192] + dtype = "float32" + min_val = float("0.00534163") + max_val = float("0.0562738") + mean = float("0.0136988") + std = float("0.0062901") + data = None + + +class Program_weight_tensor_parameter_78: + name = "parameter_78" + shape = [192] + dtype = "float32" + min_val = float("-0.219364") + max_val = float("0.10138") + mean = float("-0.0376619") + std = float("0.0434336") + data = None + + +class Program_weight_tensor_parameter_79: + name = "parameter_79" + shape = [192, 192, 3, 3] + dtype = "float32" + min_val = float("-0.0430374") + max_val = float("0.049516") + mean = float("-0.000124454") + std = float("0.00256786") + data = None + + +class Program_weight_tensor_parameter_80: + name = "parameter_80" + shape = [192] + dtype = "float32" + min_val = float("-0.191344") + max_val = float("0.0444996") + mean = float("-0.057942") + std = float("0.0491062") + data = None + + +class Program_weight_tensor_parameter_81: + name = "parameter_81" + shape = [192] + dtype = "float32" + min_val = float("0.897737") + max_val = float("1.18792") + mean = float("1.01539") + std = float("0.0484046") + data = None + + +class Program_weight_tensor_parameter_82: + name = "parameter_82" + shape = [192] + dtype = "float32" + min_val = float("0.0105665") + max_val = float("0.202383") + mean = float("0.0352004") + std = float("0.0227021") + data = None + + +class Program_weight_tensor_parameter_83: + name = "parameter_83" + shape = [192] + dtype = "float32" + min_val = float("-0.295941") + max_val = float("0.513277") + mean = float("-0.0405879") + std = float("0.0633076") + data = None + + +class Program_weight_tensor_parameter_84: + name = "parameter_84" + shape = [192, 192, 3, 3] + dtype = "float32" + min_val = float("-0.047378") + max_val = float("0.0557186") + mean = float("-0.000110452") + std = float("0.00285571") + data = None + + +class Program_weight_tensor_parameter_85: + name = "parameter_85" + shape = [192] + dtype = "float32" + min_val = float("-0.191632") + max_val = float("0.00854012") + mean = float("-0.064207") + std = float("0.0334262") + data = None + + +class Program_weight_tensor_parameter_86: + name = "parameter_86" + shape = [192] + dtype = "float32" + min_val = float("0.922153") + max_val = float("1.04653") + mean = float("0.973445") + std = float("0.017956") + data = None + + +class Program_weight_tensor_parameter_87: + name = "parameter_87" + shape = [192] + dtype = "float32" + min_val = float("0.00111369") + max_val = float("0.0150475") + mean = float("0.00521977") + std = float("0.00256485") + data = None + + +class Program_weight_tensor_parameter_88: + name = "parameter_88" + shape = [192] + dtype = "float32" + min_val = float("-0.0705634") + max_val = float("0.0364988") + mean = float("-0.00792433") + std = float("0.0151559") + data = None + + +class Program_weight_tensor_parameter_89: + name = "parameter_89" + shape = [192, 192, 1, 1] + dtype = "float32" + min_val = float("-0.0386531") + max_val = float("0.0308172") + mean = float("-0.000343288") + std = float("0.00384278") + data = None + + +class Program_weight_tensor_parameter_90: + name = "parameter_90" + shape = [192] + dtype = "float32" + min_val = float("-0.191632") + max_val = float("0.00854012") + mean = float("-0.064207") + std = float("0.0334262") + data = None + + +class Program_weight_tensor_parameter_91: + name = "parameter_91" + shape = [192] + dtype = "float32" + min_val = float("0.968104") + max_val = float("1.14778") + mean = float("1.02415") + std = float("0.0294364") + data = None + + +class Program_weight_tensor_parameter_92: + name = "parameter_92" + shape = [192] + dtype = "float32" + min_val = float("0.00435298") + max_val = float("0.0469628") + mean = float("0.0119993") + std = float("0.00625753") + data = None + + +class Program_weight_tensor_parameter_93: + name = "parameter_93" + shape = [192] + dtype = "float32" + min_val = float("-0.186388") + max_val = float("0.140954") + mean = float("-0.0380294") + std = float("0.0385226") + data = None + + +class Program_weight_tensor_parameter_94: + name = "parameter_94" + shape = [192, 192, 3, 3] + dtype = "float32" + min_val = float("-0.0471114") + max_val = float("0.0550151") + mean = float("-0.00014067") + std = float("0.00262923") + data = None + + +class Program_weight_tensor_parameter_95: + name = "parameter_95" + shape = [192] + dtype = "float32" + min_val = float("-0.188926") + max_val = float("0.062054") + mean = float("-0.0755775") + std = float("0.0405971") + data = None + + +class Program_weight_tensor_parameter_96: + name = "parameter_96" + shape = [192] + dtype = "float32" + min_val = float("0.880419") + max_val = float("1.21878") + mean = float("1.01465") + std = float("0.050849") + data = None + + +class Program_weight_tensor_parameter_97: + name = "parameter_97" + shape = [192] + dtype = "float32" + min_val = float("0.00811769") + max_val = float("0.0673723") + mean = float("0.021795") + std = float("0.0107691") + data = None + + +class Program_weight_tensor_parameter_98: + name = "parameter_98" + shape = [192] + dtype = "float32" + min_val = float("-0.117015") + max_val = float("0.0523492") + mean = float("-0.0248809") + std = float("0.0343347") + data = None + + +class Program_weight_tensor_parameter_99: + name = "parameter_99" + shape = [192, 192, 3, 3] + dtype = "float32" + min_val = float("-0.0427984") + max_val = float("0.0615634") + mean = float("-0.000111452") + std = float("0.00299174") + data = None + + +class Program_weight_tensor_parameter_100: + name = "parameter_100" + shape = [192] + dtype = "float32" + min_val = float("-0.229476") + max_val = float("-0.00962433") + mean = float("-0.0831852") + std = float("0.0422479") + data = None + + +class Program_weight_tensor_parameter_101: + name = "parameter_101" + shape = [192] + dtype = "float32" + min_val = float("0.900428") + max_val = float("1.02666") + mean = float("0.975123") + std = float("0.0229582") + data = None + + +class Program_weight_tensor_parameter_102: + name = "parameter_102" + shape = [192] + dtype = "float32" + min_val = float("0.00171033") + max_val = float("0.0153508") + mean = float("0.00574446") + std = float("0.00198723") + data = None + + +class Program_weight_tensor_parameter_103: + name = "parameter_103" + shape = [192] + dtype = "float32" + min_val = float("-0.0390528") + max_val = float("0.0467988") + mean = float("-0.0106683") + std = float("0.0171761") + data = None + + +class Program_weight_tensor_parameter_104: + name = "parameter_104" + shape = [192, 192, 1, 1] + dtype = "float32" + min_val = float("-0.0436521") + max_val = float("0.0635251") + mean = float("-0.000488447") + std = float("0.00437095") + data = None + + +class Program_weight_tensor_parameter_105: + name = "parameter_105" + shape = [192] + dtype = "float32" + min_val = float("-0.229476") + max_val = float("-0.00962433") + mean = float("-0.0831852") + std = float("0.0422479") + data = None + + +class Program_weight_tensor_parameter_106: + name = "parameter_106" + shape = [192] + dtype = "float32" + min_val = float("0.947654") + max_val = float("1.11111") + mean = float("1.02112") + std = float("0.0306157") + data = None + + +class Program_weight_tensor_parameter_107: + name = "parameter_107" + shape = [192] + dtype = "float32" + min_val = float("0.00720341") + max_val = float("0.0581605") + mean = float("0.0166222") + std = float("0.00832442") + data = None + + +class Program_weight_tensor_parameter_108: + name = "parameter_108" + shape = [192] + dtype = "float32" + min_val = float("-0.129987") + max_val = float("0.0597114") + mean = float("-0.0235942") + std = float("0.0336059") + data = None + + +class Program_weight_tensor_parameter_109: + name = "parameter_109" + shape = [192, 192, 3, 3] + dtype = "float32" + min_val = float("-0.0485052") + max_val = float("0.0562451") + mean = float("-9.74246e-05") + std = float("0.00278606") + data = None + + +class Program_weight_tensor_parameter_110: + name = "parameter_110" + shape = [192] + dtype = "float32" + min_val = float("-0.234305") + max_val = float("0.081368") + mean = float("-0.0947175") + std = float("0.0463051") + data = None + + +class Program_weight_tensor_parameter_111: + name = "parameter_111" + shape = [192] + dtype = "float32" + min_val = float("0.886145") + max_val = float("1.20472") + mean = float("1.01666") + std = float("0.0540248") + data = None + + +class Program_weight_tensor_parameter_112: + name = "parameter_112" + shape = [192] + dtype = "float32" + min_val = float("0.00887124") + max_val = float("0.100013") + mean = float("0.0211819") + std = float("0.0128206") + data = None + + +class Program_weight_tensor_parameter_113: + name = "parameter_113" + shape = [192] + dtype = "float32" + min_val = float("-0.180694") + max_val = float("0.0963527") + mean = float("-0.0400641") + std = float("0.0435422") + data = None + + +class Program_weight_tensor_parameter_114: + name = "parameter_114" + shape = [192, 192, 3, 3] + dtype = "float32" + min_val = float("-0.0410156") + max_val = float("0.0751964") + mean = float("-0.000134498") + std = float("0.0032483") + data = None + + +class Program_weight_tensor_parameter_115: + name = "parameter_115" + shape = [192] + dtype = "float32" + min_val = float("-0.199948") + max_val = float("0.0153484") + mean = float("-0.0662884") + std = float("0.031178") + data = None + + +class Program_weight_tensor_parameter_116: + name = "parameter_116" + shape = [192] + dtype = "float32" + min_val = float("0.925493") + max_val = float("1.15259") + mean = float("1.01328") + std = float("0.0383643") + data = None + + +class Program_weight_tensor_parameter_117: + name = "parameter_117" + shape = [192] + dtype = "float32" + min_val = float("0.0044011") + max_val = float("0.0246488") + mean = float("0.00855375") + std = float("0.00310702") + data = None + + +class Program_weight_tensor_parameter_118: + name = "parameter_118" + shape = [192] + dtype = "float32" + min_val = float("-0.0887579") + max_val = float("0.122375") + mean = float("-0.0224875") + std = float("0.0291868") + data = None + + +class Program_weight_tensor_parameter_119: + name = "parameter_119" + shape = [192, 576, 1, 1] + dtype = "float32" + min_val = float("-0.0628757") + max_val = float("0.0645969") + mean = float("-0.000195496") + std = float("0.00467829") + data = None + + +class Program_weight_tensor_parameter_120: + name = "parameter_120" + shape = [192] + dtype = "float32" + min_val = float("-0.0999632") + max_val = float("0.037411") + mean = float("-0.0139724") + std = float("0.0203964") + data = None + + +class Program_weight_tensor_parameter_121: + name = "parameter_121" + shape = [192] + dtype = "float32" + min_val = float("0.923856") + max_val = float("1.19918") + mean = float("1.00277") + std = float("0.025885") + data = None + + +class Program_weight_tensor_parameter_122: + name = "parameter_122" + shape = [192] + dtype = "float32" + min_val = float("0.00336859") + max_val = float("0.0385248") + mean = float("0.00844927") + std = float("0.00421125") + data = None + + +class Program_weight_tensor_parameter_123: + name = "parameter_123" + shape = [192] + dtype = "float32" + min_val = float("-0.0725677") + max_val = float("0.0457479") + mean = float("-0.0168628") + std = float("0.0213133") + data = None + + +class Program_weight_tensor_parameter_124: + name = "parameter_124" + shape = [192, 576, 1, 1] + dtype = "float32" + min_val = float("-0.0557051") + max_val = float("0.0726466") + mean = float("-0.000148835") + std = float("0.00416084") + data = None + + +class Program_weight_tensor_parameter_125: + name = "parameter_125" + shape = [192] + dtype = "float32" + min_val = float("-0.15908") + max_val = float("-0.000555601") + mean = float("-0.038944") + std = float("0.0217257") + data = None + + +class Program_weight_tensor_parameter_126: + name = "parameter_126" + shape = [192] + dtype = "float32" + min_val = float("0.921159") + max_val = float("1.24866") + mean = float("1.00725") + std = float("0.0301467") + data = None + + +class Program_weight_tensor_parameter_127: + name = "parameter_127" + shape = [192] + dtype = "float32" + min_val = float("0.00433237") + max_val = float("0.0626783") + mean = float("0.0160168") + std = float("0.00846658") + data = None + + +class Program_weight_tensor_parameter_128: + name = "parameter_128" + shape = [192] + dtype = "float32" + min_val = float("-0.396741") + max_val = float("0.33475") + mean = float("-0.0359227") + std = float("0.0957987") + data = None + + +class Program_weight_tensor_parameter_129: + name = "parameter_129" + shape = [192, 192, 3, 3] + dtype = "float32" + min_val = float("-0.0350237") + max_val = float("0.0471653") + mean = float("-3.44387e-05") + std = float("0.00253963") + data = None + + +class Program_weight_tensor_parameter_130: + name = "parameter_130" + shape = [192] + dtype = "float32" + min_val = float("-0.552248") + max_val = float("1.14732") + mean = float("0.355898") + std = float("0.346059") + data = None + + +class Program_weight_tensor_parameter_131: + name = "parameter_131" + shape = [192] + dtype = "float32" + min_val = float("0.541472") + max_val = float("1.57746") + mean = float("1.15098") + std = float("0.184373") + data = None + + +class Program_weight_tensor_parameter_132: + name = "parameter_132" + shape = [192] + dtype = "float32" + min_val = float("0.00561378") + max_val = float("0.117481") + mean = float("0.0300174") + std = float("0.0177759") + data = None + + +class Program_weight_tensor_parameter_133: + name = "parameter_133" + shape = [192] + dtype = "float32" + min_val = float("-0.181834") + max_val = float("0.204641") + mean = float("-0.0498559") + std = float("0.0488345") + data = None + + +class Program_weight_tensor_parameter_134: + name = "parameter_134" + shape = [192, 192, 1, 1] + dtype = "float32" + min_val = float("-0.140077") + max_val = float("0.117821") + mean = float("-0.0010577") + std = float("0.0117759") + data = None + + +class Program_weight_tensor_parameter_135: + name = "parameter_135" + shape = [96] + dtype = "float32" + min_val = float("-0.457965") + max_val = float("0.231213") + mean = float("-0.00944132") + std = float("0.144606") + data = None + + +class Program_weight_tensor_parameter_136: + name = "parameter_136" + shape = [96] + dtype = "float32" + min_val = float("0.762871") + max_val = float("1.23462") + mean = float("0.948542") + std = float("0.0712293") + data = None + + +class Program_weight_tensor_parameter_137: + name = "parameter_137" + shape = [96] + dtype = "float32" + min_val = float("0.00298878") + max_val = float("0.0421928") + mean = float("0.0124331") + std = float("0.00821157") + data = None + + +class Program_weight_tensor_parameter_138: + name = "parameter_138" + shape = [96] + dtype = "float32" + min_val = float("-0.0584852") + max_val = float("0.0908825") + mean = float("-0.0135007") + std = float("0.024126") + data = None + + +class Program_weight_tensor_parameter_139: + name = "parameter_139" + shape = [96, 96, 1, 1] + dtype = "float32" + min_val = float("-0.0753814") + max_val = float("0.0571343") + mean = float("-0.00127258") + std = float("0.00926422") + data = None + + +class Program_weight_tensor_parameter_140: + name = "parameter_140" + shape = [96] + dtype = "float32" + min_val = float("-0.457965") + max_val = float("0.231213") + mean = float("-0.00944132") + std = float("0.144606") + data = None + + +class Program_weight_tensor_parameter_141: + name = "parameter_141" + shape = [96] + dtype = "float32" + min_val = float("0.505007") + max_val = float("1.2709") + mean = float("1.02954") + std = float("0.0962551") + data = None + + +class Program_weight_tensor_parameter_142: + name = "parameter_142" + shape = [96] + dtype = "float32" + min_val = float("0.00864011") + max_val = float("0.0819817") + mean = float("0.029707") + std = float("0.0154312") + data = None + + +class Program_weight_tensor_parameter_143: + name = "parameter_143" + shape = [96] + dtype = "float32" + min_val = float("-0.235686") + max_val = float("0.133508") + mean = float("-0.023179") + std = float("0.060166") + data = None + + +class Program_weight_tensor_parameter_144: + name = "parameter_144" + shape = [96, 96, 3, 3] + dtype = "float32" + min_val = float("-0.0934252") + max_val = float("0.0953084") + mean = float("-0.000117725") + std = float("0.00631181") + data = None + + +class Program_weight_tensor_parameter_145: + name = "parameter_145" + shape = [96] + dtype = "float32" + min_val = float("-0.703685") + max_val = float("0.495421") + mean = float("-0.112778") + std = float("0.198104") + data = None + + +class Program_weight_tensor_parameter_146: + name = "parameter_146" + shape = [96] + dtype = "float32" + min_val = float("0.723217") + max_val = float("1.7117") + mean = float("0.995187") + std = float("0.133891") + data = None + + +class Program_weight_tensor_parameter_147: + name = "parameter_147" + shape = [96] + dtype = "float32" + min_val = float("0.0126242") + max_val = float("0.189013") + mean = float("0.0406096") + std = float("0.0302792") + data = None + + +class Program_weight_tensor_parameter_148: + name = "parameter_148" + shape = [96] + dtype = "float32" + min_val = float("-0.211007") + max_val = float("0.139458") + mean = float("-0.0274803") + std = float("0.0625687") + data = None + + +class Program_weight_tensor_parameter_149: + name = "parameter_149" + shape = [96, 96, 3, 3] + dtype = "float32" + min_val = float("-0.0919405") + max_val = float("0.0707093") + mean = float("-0.000470706") + std = float("0.00699141") + data = None + + +class Program_weight_tensor_parameter_150: + name = "parameter_150" + shape = [96] + dtype = "float32" + min_val = float("-0.364151") + max_val = float("0.190267") + mean = float("-0.138622") + std = float("0.0960161") + data = None + + +class Program_weight_tensor_parameter_151: + name = "parameter_151" + shape = [96] + dtype = "float32" + min_val = float("0.626997") + max_val = float("1.01953") + mean = float("0.906483") + std = float("0.0555602") + data = None + + +class Program_weight_tensor_parameter_152: + name = "parameter_152" + shape = [96] + dtype = "float32" + min_val = float("0.00320113") + max_val = float("0.0231713") + mean = float("0.0111523") + std = float("0.00438468") + data = None + + +class Program_weight_tensor_parameter_153: + name = "parameter_153" + shape = [96] + dtype = "float32" + min_val = float("-0.0654375") + max_val = float("0.0381209") + mean = float("-0.00860304") + std = float("0.0165133") + data = None + + +class Program_weight_tensor_parameter_154: + name = "parameter_154" + shape = [96, 96, 1, 1] + dtype = "float32" + min_val = float("-0.0710343") + max_val = float("0.0593522") + mean = float("-0.00106867") + std = float("0.00947732") + data = None + + +class Program_weight_tensor_parameter_155: + name = "parameter_155" + shape = [96] + dtype = "float32" + min_val = float("-0.364151") + max_val = float("0.190267") + mean = float("-0.138622") + std = float("0.0960161") + data = None + + +class Program_weight_tensor_parameter_156: + name = "parameter_156" + shape = [96] + dtype = "float32" + min_val = float("0.811163") + max_val = float("1.15777") + mean = float("1.02225") + std = float("0.060594") + data = None + + +class Program_weight_tensor_parameter_157: + name = "parameter_157" + shape = [96] + dtype = "float32" + min_val = float("0.0112523") + max_val = float("0.110271") + mean = float("0.0288634") + std = float("0.0207594") + data = None + + +class Program_weight_tensor_parameter_158: + name = "parameter_158" + shape = [96] + dtype = "float32" + min_val = float("-0.163525") + max_val = float("0.0372296") + mean = float("-0.0369433") + std = float("0.0331166") + data = None + + +class Program_weight_tensor_parameter_159: + name = "parameter_159" + shape = [96, 96, 3, 3] + dtype = "float32" + min_val = float("-0.0811922") + max_val = float("0.0768953") + mean = float("-0.000466623") + std = float("0.00651757") + data = None + + +class Program_weight_tensor_parameter_160: + name = "parameter_160" + shape = [96] + dtype = "float32" + min_val = float("-0.486488") + max_val = float("0.169402") + mean = float("-0.16699") + std = float("0.131221") + data = None + + +class Program_weight_tensor_parameter_161: + name = "parameter_161" + shape = [96] + dtype = "float32" + min_val = float("0.77745") + max_val = float("1.29252") + mean = float("0.963023") + std = float("0.0981107") + data = None + + +class Program_weight_tensor_parameter_162: + name = "parameter_162" + shape = [96] + dtype = "float32" + min_val = float("0.0097702") + max_val = float("0.108736") + mean = float("0.0240622") + std = float("0.0138876") + data = None + + +class Program_weight_tensor_parameter_163: + name = "parameter_163" + shape = [96] + dtype = "float32" + min_val = float("-0.150372") + max_val = float("0.0656242") + mean = float("0.00931518") + std = float("0.0377034") + data = None + + +class Program_weight_tensor_parameter_164: + name = "parameter_164" + shape = [96, 96, 3, 3] + dtype = "float32" + min_val = float("-0.0993825") + max_val = float("0.0757514") + mean = float("-0.000423273") + std = float("0.00766977") + data = None + + +class Program_weight_tensor_parameter_165: + name = "parameter_165" + shape = [96] + dtype = "float32" + min_val = float("-0.489705") + max_val = float("0.065165") + mean = float("-0.168145") + std = float("0.114783") + data = None + + +class Program_weight_tensor_parameter_166: + name = "parameter_166" + shape = [96] + dtype = "float32" + min_val = float("0.722939") + max_val = float("1.0022") + mean = float("0.918838") + std = float("0.0531756") + data = None + + +class Program_weight_tensor_parameter_167: + name = "parameter_167" + shape = [96] + dtype = "float32" + min_val = float("0.00758991") + max_val = float("0.0378697") + mean = float("0.0164332") + std = float("0.00578739") + data = None + + +class Program_weight_tensor_parameter_168: + name = "parameter_168" + shape = [96] + dtype = "float32" + min_val = float("-0.0561772") + max_val = float("0.0397966") + mean = float("-0.0196014") + std = float("0.0187438") + data = None + + +class Program_weight_tensor_parameter_169: + name = "parameter_169" + shape = [96, 96, 1, 1] + dtype = "float32" + min_val = float("-0.103953") + max_val = float("0.0646149") + mean = float("-0.00221242") + std = float("0.0110162") + data = None + + +class Program_weight_tensor_parameter_170: + name = "parameter_170" + shape = [96] + dtype = "float32" + min_val = float("-0.489705") + max_val = float("0.065165") + mean = float("-0.168145") + std = float("0.114783") + data = None + + +class Program_weight_tensor_parameter_171: + name = "parameter_171" + shape = [96] + dtype = "float32" + min_val = float("0.766539") + max_val = float("1.15353") + mean = float("0.982409") + std = float("0.0579773") + data = None + + +class Program_weight_tensor_parameter_172: + name = "parameter_172" + shape = [96] + dtype = "float32" + min_val = float("0.0171857") + max_val = float("0.220166") + mean = float("0.0443842") + std = float("0.0319483") + data = None + + +class Program_weight_tensor_parameter_173: + name = "parameter_173" + shape = [96] + dtype = "float32" + min_val = float("-0.196226") + max_val = float("0.0865161") + mean = float("-0.0155778") + std = float("0.0409091") + data = None + + +class Program_weight_tensor_parameter_174: + name = "parameter_174" + shape = [96, 96, 3, 3] + dtype = "float32" + min_val = float("-0.0992682") + max_val = float("0.0973879") + mean = float("-0.000248799") + std = float("0.00741391") + data = None + + +class Program_weight_tensor_parameter_175: + name = "parameter_175" + shape = [96] + dtype = "float32" + min_val = float("-0.564609") + max_val = float("0.347562") + mean = float("-0.179116") + std = float("0.173215") + data = None + + +class Program_weight_tensor_parameter_176: + name = "parameter_176" + shape = [96] + dtype = "float32" + min_val = float("0.764459") + max_val = float("1.33669") + mean = float("0.954532") + std = float("0.110883") + data = None + + +class Program_weight_tensor_parameter_177: + name = "parameter_177" + shape = [96] + dtype = "float32" + min_val = float("0.0145544") + max_val = float("0.11097") + mean = float("0.0319708") + std = float("0.0188345") + data = None + + +class Program_weight_tensor_parameter_178: + name = "parameter_178" + shape = [96] + dtype = "float32" + min_val = float("-0.17302") + max_val = float("0.269018") + mean = float("-0.0215072") + std = float("0.0939673") + data = None + + +class Program_weight_tensor_parameter_179: + name = "parameter_179" + shape = [96, 96, 3, 3] + dtype = "float32" + min_val = float("-0.142383") + max_val = float("0.117263") + mean = float("-0.000229517") + std = float("0.00873001") + data = None + + +class Program_weight_tensor_parameter_180: + name = "parameter_180" + shape = [96] + dtype = "float32" + min_val = float("-0.625413") + max_val = float("0.597772") + mean = float("-0.0821868") + std = float("0.254375") + data = None + + +class Program_weight_tensor_parameter_181: + name = "parameter_181" + shape = [96] + dtype = "float32" + min_val = float("0.647479") + max_val = float("1.22747") + mean = float("0.866594") + std = float("0.1146") + data = None + + +class Program_weight_tensor_parameter_182: + name = "parameter_182" + shape = [96] + dtype = "float32" + min_val = float("0.0115736") + max_val = float("0.0790864") + mean = float("0.0255985") + std = float("0.0116053") + data = None + + +class Program_weight_tensor_parameter_183: + name = "parameter_183" + shape = [96] + dtype = "float32" + min_val = float("-0.112932") + max_val = float("0.0913184") + mean = float("-0.0111621") + std = float("0.0404469") + data = None + + +class Program_weight_tensor_parameter_184: + name = "parameter_184" + shape = [96, 448, 1, 1] + dtype = "float32" + min_val = float("-0.14903") + max_val = float("0.149062") + mean = float("-0.000519099") + std = float("0.0115778") + data = None + + +class Program_weight_tensor_parameter_185: + name = "parameter_185" + shape = [96] + dtype = "float32" + min_val = float("-0.0986349") + max_val = float("0.227763") + mean = float("0.0619239") + std = float("0.0545689") + data = None + + +class Program_weight_tensor_parameter_186: + name = "parameter_186" + shape = [96] + dtype = "float32" + min_val = float("0.703928") + max_val = float("1.12525") + mean = float("0.932492") + std = float("0.0634652") + data = None + + +class Program_weight_tensor_parameter_187: + name = "parameter_187" + shape = [96] + dtype = "float32" + min_val = float("0.00519295") + max_val = float("0.0599625") + mean = float("0.0119181") + std = float("0.00693821") + data = None + + +class Program_weight_tensor_parameter_188: + name = "parameter_188" + shape = [96] + dtype = "float32" + min_val = float("-0.0889976") + max_val = float("0.164161") + mean = float("-0.017344") + std = float("0.0389571") + data = None + + +class Program_weight_tensor_parameter_189: + name = "parameter_189" + shape = [96, 448, 1, 1] + dtype = "float32" + min_val = float("-0.0952125") + max_val = float("0.110914") + mean = float("-0.000272416") + std = float("0.00775169") + data = None + + +class Program_weight_tensor_parameter_190: + name = "parameter_190" + shape = [192] + dtype = "float32" + min_val = float("-0.295367") + max_val = float("0.199876") + mean = float("-0.065903") + std = float("0.0695813") + data = None + + +class Program_weight_tensor_parameter_191: + name = "parameter_191" + shape = [192] + dtype = "float32" + min_val = float("0.670697") + max_val = float("1.45276") + mean = float("0.885134") + std = float("0.0783825") + data = None + + +class Program_weight_tensor_parameter_192: + name = "parameter_192" + shape = [192] + dtype = "float32" + min_val = float("0.00810165") + max_val = float("0.127755") + mean = float("0.0227973") + std = float("0.0122998") + data = None + + +class Program_weight_tensor_parameter_193: + name = "parameter_193" + shape = [192] + dtype = "float32" + min_val = float("-0.147256") + max_val = float("0.0479519") + mean = float("-0.0364953") + std = float("0.0354482") + data = None + + +class Program_weight_tensor_parameter_194: + name = "parameter_194" + shape = [192, 384, 1, 1] + dtype = "float32" + min_val = float("-0.0959126") + max_val = float("0.117328") + mean = float("-0.000597241") + std = float("0.00788359") + data = None + + +class Program_weight_tensor_parameter_195: + name = "parameter_195" + shape = [384] + dtype = "float32" + min_val = float("-0.201782") + max_val = float("0.241811") + mean = float("-0.0670364") + std = float("0.0416536") + data = None + + +class Program_weight_tensor_parameter_196: + name = "parameter_196" + shape = [384] + dtype = "float32" + min_val = float("0.873178") + max_val = float("1.54065") + mean = float("1.01926") + std = float("0.0632841") + data = None + + +class Program_weight_tensor_parameter_197: + name = "parameter_197" + shape = [384] + dtype = "float32" + min_val = float("0.00754976") + max_val = float("0.0799005") + mean = float("0.015547") + std = float("0.00760761") + data = None + + +class Program_weight_tensor_parameter_198: + name = "parameter_198" + shape = [384] + dtype = "float32" + min_val = float("-0.335306") + max_val = float("0.119584") + mean = float("-0.0574114") + std = float("0.0470951") + data = None + + +class Program_weight_tensor_parameter_199: + name = "parameter_199" + shape = [384, 384, 1, 1] + dtype = "float32" + min_val = float("-0.104641") + max_val = float("0.104081") + mean = float("-0.000725337") + std = float("0.00722264") + data = None + + +class Program_weight_tensor_parameter_200: + name = "parameter_200" + shape = [192] + dtype = "float32" + min_val = float("-0.176949") + max_val = float("0.00590593") + mean = float("-0.0653774") + std = float("0.0325609") + data = None + + +class Program_weight_tensor_parameter_201: + name = "parameter_201" + shape = [192] + dtype = "float32" + min_val = float("0.884903") + max_val = float("0.991186") + mean = float("0.949253") + std = float("0.016433") + data = None + + +class Program_weight_tensor_parameter_202: + name = "parameter_202" + shape = [192] + dtype = "float32" + min_val = float("0.00345864") + max_val = float("0.0250397") + mean = float("0.0100895") + std = float("0.00362139") + data = None + + +class Program_weight_tensor_parameter_203: + name = "parameter_203" + shape = [192] + dtype = "float32" + min_val = float("-0.0784978") + max_val = float("0.0700745") + mean = float("-0.0238174") + std = float("0.0311932") + data = None + + +class Program_weight_tensor_parameter_204: + name = "parameter_204" + shape = [192, 192, 1, 1] + dtype = "float32" + min_val = float("-0.0569075") + max_val = float("0.0369732") + mean = float("-0.000733351") + std = float("0.00540254") + data = None + + +class Program_weight_tensor_parameter_205: + name = "parameter_205" + shape = [192] + dtype = "float32" + min_val = float("-0.176949") + max_val = float("0.00590593") + mean = float("-0.0653774") + std = float("0.0325609") + data = None + + +class Program_weight_tensor_parameter_206: + name = "parameter_206" + shape = [192] + dtype = "float32" + min_val = float("0.945936") + max_val = float("1.03267") + mean = float("0.988143") + std = float("0.0166204") + data = None + + +class Program_weight_tensor_parameter_207: + name = "parameter_207" + shape = [192] + dtype = "float32" + min_val = float("0.0155344") + max_val = float("0.0844419") + mean = float("0.0339711") + std = float("0.012502") + data = None + + +class Program_weight_tensor_parameter_208: + name = "parameter_208" + shape = [192] + dtype = "float32" + min_val = float("-0.176762") + max_val = float("0.15265") + mean = float("-0.0230656") + std = float("0.0601937") + data = None + + +class Program_weight_tensor_parameter_209: + name = "parameter_209" + shape = [192, 192, 3, 3] + dtype = "float32" + min_val = float("-0.0444131") + max_val = float("0.0760357") + mean = float("-7.02524e-05") + std = float("0.00300584") + data = None + + +class Program_weight_tensor_parameter_210: + name = "parameter_210" + shape = [192] + dtype = "float32" + min_val = float("-0.217095") + max_val = float("-0.00148108") + mean = float("-0.0741376") + std = float("0.0354109") + data = None + + +class Program_weight_tensor_parameter_211: + name = "parameter_211" + shape = [192] + dtype = "float32" + min_val = float("0.939031") + max_val = float("1.15417") + mean = float("1.02943") + std = float("0.0431658") + data = None + + +class Program_weight_tensor_parameter_212: + name = "parameter_212" + shape = [192] + dtype = "float32" + min_val = float("0.0364266") + max_val = float("0.231139") + mean = float("0.0631271") + std = float("0.0206699") + data = None + + +class Program_weight_tensor_parameter_213: + name = "parameter_213" + shape = [192] + dtype = "float32" + min_val = float("-0.262217") + max_val = float("0.304208") + mean = float("-0.0426428") + std = float("0.0718369") + data = None + + +class Program_weight_tensor_parameter_214: + name = "parameter_214" + shape = [192, 192, 3, 3] + dtype = "float32" + min_val = float("-0.0622017") + max_val = float("0.0626879") + mean = float("-0.000102158") + std = float("0.00367047") + data = None + + +class Program_weight_tensor_parameter_215: + name = "parameter_215" + shape = [192] + dtype = "float32" + min_val = float("-0.196617") + max_val = float("-0.00995737") + mean = float("-0.071187") + std = float("0.0319798") + data = None + + +class Program_weight_tensor_parameter_216: + name = "parameter_216" + shape = [192] + dtype = "float32" + min_val = float("0.94411") + max_val = float("1.04693") + mean = float("0.987726") + std = float("0.0137706") + data = None + + +class Program_weight_tensor_parameter_217: + name = "parameter_217" + shape = [192] + dtype = "float32" + min_val = float("0.00228676") + max_val = float("0.00961601") + mean = float("0.00480728") + std = float("0.00123428") + data = None + + +class Program_weight_tensor_parameter_218: + name = "parameter_218" + shape = [192] + dtype = "float32" + min_val = float("-0.0953901") + max_val = float("0.0389215") + mean = float("-0.025087") + std = float("0.0209448") + data = None + + +class Program_weight_tensor_parameter_219: + name = "parameter_219" + shape = [192, 192, 1, 1] + dtype = "float32" + min_val = float("-0.0313923") + max_val = float("0.0416125") + mean = float("-0.000809335") + std = float("0.00570058") + data = None + + +class Program_weight_tensor_parameter_220: + name = "parameter_220" + shape = [192] + dtype = "float32" + min_val = float("-0.196617") + max_val = float("-0.00995737") + mean = float("-0.071187") + std = float("0.0319798") + data = None + + +class Program_weight_tensor_parameter_221: + name = "parameter_221" + shape = [192] + dtype = "float32" + min_val = float("0.953711") + max_val = float("1.11463") + mean = float("1.00472") + std = float("0.0265116") + data = None + + +class Program_weight_tensor_parameter_222: + name = "parameter_222" + shape = [192] + dtype = "float32" + min_val = float("0.0101684") + max_val = float("0.0483375") + mean = float("0.0181984") + std = float("0.00551311") + data = None + + +class Program_weight_tensor_parameter_223: + name = "parameter_223" + shape = [192] + dtype = "float32" + min_val = float("-0.187973") + max_val = float("0.143836") + mean = float("-0.0474657") + std = float("0.0465909") + data = None + + +class Program_weight_tensor_parameter_224: + name = "parameter_224" + shape = [192, 192, 3, 3] + dtype = "float32" + min_val = float("-0.0484376") + max_val = float("0.0812032") + mean = float("-0.000164179") + std = float("0.00306328") + data = None + + +class Program_weight_tensor_parameter_225: + name = "parameter_225" + shape = [192] + dtype = "float32" + min_val = float("-0.232846") + max_val = float("-0.0185216") + mean = float("-0.0943343") + std = float("0.040046") + data = None + + +class Program_weight_tensor_parameter_226: + name = "parameter_226" + shape = [192] + dtype = "float32" + min_val = float("0.946521") + max_val = float("1.19181") + mean = float("1.02411") + std = float("0.0460177") + data = None + + +class Program_weight_tensor_parameter_227: + name = "parameter_227" + shape = [192] + dtype = "float32" + min_val = float("0.0361899") + max_val = float("0.141548") + mean = float("0.0649197") + std = float("0.0200832") + data = None + + +class Program_weight_tensor_parameter_228: + name = "parameter_228" + shape = [192] + dtype = "float32" + min_val = float("-0.350006") + max_val = float("0.262728") + mean = float("-0.0865782") + std = float("0.0989383") + data = None + + +class Program_weight_tensor_parameter_229: + name = "parameter_229" + shape = [192, 192, 3, 3] + dtype = "float32" + min_val = float("-0.0611519") + max_val = float("0.0870387") + mean = float("-0.000165155") + std = float("0.00384626") + data = None + + +class Program_weight_tensor_parameter_230: + name = "parameter_230" + shape = [192] + dtype = "float32" + min_val = float("-0.154886") + max_val = float("0.00333791") + mean = float("-0.0685634") + std = float("0.0234192") + data = None + + +class Program_weight_tensor_parameter_231: + name = "parameter_231" + shape = [192] + dtype = "float32" + min_val = float("0.932342") + max_val = float("1.07188") + mean = float("0.99857") + std = float("0.0218607") + data = None + + +class Program_weight_tensor_parameter_232: + name = "parameter_232" + shape = [192] + dtype = "float32" + min_val = float("0.0020288") + max_val = float("0.00959064") + mean = float("0.0040899") + std = float("0.0011474") + data = None + + +class Program_weight_tensor_parameter_233: + name = "parameter_233" + shape = [192] + dtype = "float32" + min_val = float("-0.0826953") + max_val = float("0.0992723") + mean = float("-0.0125266") + std = float("0.0204973") + data = None + + +class Program_weight_tensor_parameter_234: + name = "parameter_234" + shape = [192, 192, 1, 1] + dtype = "float32" + min_val = float("-0.0348635") + max_val = float("0.0478139") + mean = float("-0.000426004") + std = float("0.00642907") + data = None + + +class Program_weight_tensor_parameter_235: + name = "parameter_235" + shape = [192] + dtype = "float32" + min_val = float("-0.154886") + max_val = float("0.0033379") + mean = float("-0.0685634") + std = float("0.0234192") + data = None + + +class Program_weight_tensor_parameter_236: + name = "parameter_236" + shape = [192] + dtype = "float32" + min_val = float("0.936172") + max_val = float("1.11491") + mean = float("0.992553") + std = float("0.0259462") + data = None + + +class Program_weight_tensor_parameter_237: + name = "parameter_237" + shape = [192] + dtype = "float32" + min_val = float("0.0092625") + max_val = float("0.0464439") + mean = float("0.0187122") + std = float("0.00577318") + data = None + + +class Program_weight_tensor_parameter_238: + name = "parameter_238" + shape = [192] + dtype = "float32" + min_val = float("-0.280981") + max_val = float("0.146793") + mean = float("-0.0420046") + std = float("0.0462309") + data = None + + +class Program_weight_tensor_parameter_239: + name = "parameter_239" + shape = [192, 192, 3, 3] + dtype = "float32" + min_val = float("-0.0372398") + max_val = float("0.0656079") + mean = float("-0.000164107") + std = float("0.00303882") + data = None + + +class Program_weight_tensor_parameter_240: + name = "parameter_240" + shape = [192] + dtype = "float32" + min_val = float("-0.289028") + max_val = float("0.0181015") + mean = float("-0.109759") + std = float("0.0400942") + data = None + + +class Program_weight_tensor_parameter_241: + name = "parameter_241" + shape = [192] + dtype = "float32" + min_val = float("0.943873") + max_val = float("1.25886") + mean = float("1.02651") + std = float("0.0418277") + data = None + + +class Program_weight_tensor_parameter_242: + name = "parameter_242" + shape = [192] + dtype = "float32" + min_val = float("0.0146559") + max_val = float("0.0682576") + mean = float("0.029378") + std = float("0.009291") + data = None + + +class Program_weight_tensor_parameter_243: + name = "parameter_243" + shape = [192] + dtype = "float32" + min_val = float("-0.381607") + max_val = float("0.108223") + mean = float("-0.0546604") + std = float("0.0618861") + data = None + + +class Program_weight_tensor_parameter_244: + name = "parameter_244" + shape = [192, 192, 3, 3] + dtype = "float32" + min_val = float("-0.0566363") + max_val = float("0.0721231") + mean = float("-0.000213222") + std = float("0.00432259") + data = None + + +class Program_weight_tensor_parameter_245: + name = "parameter_245" + shape = [192] + dtype = "float32" + min_val = float("-0.257034") + max_val = float("-0.0134243") + mean = float("-0.121787") + std = float("0.0441916") + data = None + + +class Program_weight_tensor_parameter_246: + name = "parameter_246" + shape = [192] + dtype = "float32" + min_val = float("0.916939") + max_val = float("1.13523") + mean = float("1.02431") + std = float("0.042227") + data = None + + +class Program_weight_tensor_parameter_247: + name = "parameter_247" + shape = [192] + dtype = "float32" + min_val = float("0.00558617") + max_val = float("0.0215959") + mean = float("0.0106695") + std = float("0.00291488") + data = None + + +class Program_weight_tensor_parameter_248: + name = "parameter_248" + shape = [192] + dtype = "float32" + min_val = float("-0.121693") + max_val = float("0.105152") + mean = float("0.0154863") + std = float("0.0289093") + data = None + + +class Program_weight_tensor_parameter_249: + name = "parameter_249" + shape = [192, 896, 1, 1] + dtype = "float32" + min_val = float("-0.0812553") + max_val = float("0.103824") + mean = float("-0.000190188") + std = float("0.00606084") + data = None + + +class Program_weight_tensor_parameter_250: + name = "parameter_250" + shape = [192] + dtype = "float32" + min_val = float("-0.176608") + max_val = float("0.214363") + mean = float("-0.00723538") + std = float("0.0506647") + data = None + + +class Program_weight_tensor_parameter_251: + name = "parameter_251" + shape = [192] + dtype = "float32" + min_val = float("0.951166") + max_val = float("1.21791") + mean = float("1.05549") + std = float("0.0498194") + data = None + + +class Program_weight_tensor_parameter_252: + name = "parameter_252" + shape = [192] + dtype = "float32" + min_val = float("0.00700942") + max_val = float("0.0590357") + mean = float("0.0141953") + std = float("0.00517131") + data = None + + +class Program_weight_tensor_parameter_253: + name = "parameter_253" + shape = [192] + dtype = "float32" + min_val = float("-0.0747975") + max_val = float("0.0811747") + mean = float("-0.00053402") + std = float("0.0274613") + data = None + + +class Program_weight_tensor_parameter_254: + name = "parameter_254" + shape = [192, 896, 1, 1] + dtype = "float32" + min_val = float("-0.055207") + max_val = float("0.102723") + mean = float("-0.000223052") + std = float("0.00619518") + data = None + + +class Program_weight_tensor_parameter_255: + name = "parameter_255" + shape = [384] + dtype = "float32" + min_val = float("-0.249775") + max_val = float("-0.0568629") + mean = float("-0.125062") + std = float("0.0336773") + data = None + + +class Program_weight_tensor_parameter_256: + name = "parameter_256" + shape = [384] + dtype = "float32" + min_val = float("0.814907") + max_val = float("1.01643") + mean = float("0.909518") + std = float("0.0258168") + data = None + + +class Program_weight_tensor_parameter_257: + name = "parameter_257" + shape = [384] + dtype = "float32" + min_val = float("0.0100937") + max_val = float("0.0695395") + mean = float("0.022801") + std = float("0.00921208") + data = None + + +class Program_weight_tensor_parameter_258: + name = "parameter_258" + shape = [384] + dtype = "float32" + min_val = float("-0.146181") + max_val = float("0.110285") + mean = float("-0.0346603") + std = float("0.0383019") + data = None + + +class Program_weight_tensor_parameter_259: + name = "parameter_259" + shape = [384, 768, 1, 1] + dtype = "float32" + min_val = float("-0.0364868") + max_val = float("0.0339674") + mean = float("-0.000277799") + std = float("0.00472355") + data = None + + +class Program_weight_tensor_parameter_260: + name = "parameter_260" + shape = [768] + dtype = "float32" + min_val = float("-0.104276") + max_val = float("0.0723922") + mean = float("-0.0568764") + std = float("0.0153315") + data = None + + +class Program_weight_tensor_parameter_261: + name = "parameter_261" + shape = [768] + dtype = "float32" + min_val = float("0.9523") + max_val = float("1.1435") + mean = float("1.02091") + std = float("0.0210274") + data = None + + +class Program_weight_tensor_parameter_262: + name = "parameter_262" + shape = [768] + dtype = "float32" + min_val = float("0.00433515") + max_val = float("0.0356021") + mean = float("0.00969406") + std = float("0.00346217") + data = None + + +class Program_weight_tensor_parameter_263: + name = "parameter_263" + shape = [768] + dtype = "float32" + min_val = float("-0.103914") + max_val = float("0.111571") + mean = float("-0.0347286") + std = float("0.0270313") + data = None + + +class Program_weight_tensor_parameter_264: + name = "parameter_264" + shape = [768, 768, 1, 1] + dtype = "float32" + min_val = float("-0.0581812") + max_val = float("0.113051") + mean = float("-0.000304548") + std = float("0.00402831") + data = None + + +class Program_weight_tensor_parameter_265: + name = "parameter_265" + shape = [384] + dtype = "float32" + min_val = float("-0.158166") + max_val = float("0.0744681") + mean = float("-0.0400513") + std = float("0.0206673") + data = None + + +class Program_weight_tensor_parameter_266: + name = "parameter_266" + shape = [384] + dtype = "float32" + min_val = float("0.888577") + max_val = float("1.07465") + mean = float("0.982117") + std = float("0.0132258") + data = None + + +class Program_weight_tensor_parameter_267: + name = "parameter_267" + shape = [384] + dtype = "float32" + min_val = float("0.00600496") + max_val = float("0.0953219") + mean = float("0.0199657") + std = float("0.00942153") + data = None + + +class Program_weight_tensor_parameter_268: + name = "parameter_268" + shape = [384] + dtype = "float32" + min_val = float("-0.0681594") + max_val = float("0.0604839") + mean = float("-0.00585837") + std = float("0.0270223") + data = None + + +class Program_weight_tensor_parameter_269: + name = "parameter_269" + shape = [384, 384, 1, 1] + dtype = "float32" + min_val = float("-0.0396804") + max_val = float("0.073742") + mean = float("-7.22725e-05") + std = float("0.00350095") + data = None + + +class Program_weight_tensor_parameter_270: + name = "parameter_270" + shape = [384] + dtype = "float32" + min_val = float("-0.158166") + max_val = float("0.0744681") + mean = float("-0.0400513") + std = float("0.0206673") + data = None + + +class Program_weight_tensor_parameter_271: + name = "parameter_271" + shape = [384] + dtype = "float32" + min_val = float("0.879914") + max_val = float("1.07681") + mean = float("0.993922") + std = float("0.0123427") + data = None + + +class Program_weight_tensor_parameter_272: + name = "parameter_272" + shape = [384] + dtype = "float32" + min_val = float("0.0282422") + max_val = float("0.756467") + mean = float("0.146192") + std = float("0.0664623") + data = None + + +class Program_weight_tensor_parameter_273: + name = "parameter_273" + shape = [384] + dtype = "float32" + min_val = float("-0.276936") + max_val = float("0.156867") + mean = float("-0.0841025") + std = float("0.0859055") + data = None + + +class Program_weight_tensor_parameter_274: + name = "parameter_274" + shape = [384, 384, 3, 3] + dtype = "float32" + min_val = float("-0.0424878") + max_val = float("0.0475734") + mean = float("-0.000126902") + std = float("0.00130674") + data = None + + +class Program_weight_tensor_parameter_275: + name = "parameter_275" + shape = [384] + dtype = "float32" + min_val = float("-0.0801146") + max_val = float("0.116771") + mean = float("-0.0189931") + std = float("0.0160256") + data = None + + +class Program_weight_tensor_parameter_276: + name = "parameter_276" + shape = [384] + dtype = "float32" + min_val = float("0.920205") + max_val = float("1.16667") + mean = float("1.01504") + std = float("0.0246966") + data = None + + +class Program_weight_tensor_parameter_277: + name = "parameter_277" + shape = [384] + dtype = "float32" + min_val = float("0.0223031") + max_val = float("0.220359") + mean = float("0.0742131") + std = float("0.0330541") + data = None + + +class Program_weight_tensor_parameter_278: + name = "parameter_278" + shape = [384] + dtype = "float32" + min_val = float("-0.235368") + max_val = float("0.220698") + mean = float("-0.0231606") + std = float("0.0793647") + data = None + + +class Program_weight_tensor_parameter_279: + name = "parameter_279" + shape = [384, 384, 3, 3] + dtype = "float32" + min_val = float("-0.0274578") + max_val = float("0.0359223") + mean = float("-3.21913e-05") + std = float("0.00171791") + data = None + + +class Program_weight_tensor_parameter_280: + name = "parameter_280" + shape = [384] + dtype = "float32" + min_val = float("-0.0739505") + max_val = float("0.0209991") + mean = float("-0.0234999") + std = float("0.0134887") + data = None + + +class Program_weight_tensor_parameter_281: + name = "parameter_281" + shape = [384] + dtype = "float32" + min_val = float("0.946312") + max_val = float("1.16798") + mean = float("1.01467") + std = float("0.0273905") + data = None + + +class Program_weight_tensor_parameter_282: + name = "parameter_282" + shape = [384] + dtype = "float32" + min_val = float("0.0666339") + max_val = float("0.525489") + mean = float("0.192642") + std = float("0.0813326") + data = None + + +class Program_weight_tensor_parameter_283: + name = "parameter_283" + shape = [384] + dtype = "float32" + min_val = float("-1.5811") + max_val = float("1.58853") + mean = float("0.0464615") + std = float("0.567191") + data = None + + +class Program_weight_tensor_parameter_284: + name = "parameter_284" + shape = [384, 1536, 1, 1] + dtype = "float32" + min_val = float("-0.0467316") + max_val = float("0.0575595") + mean = float("8.55437e-05") + std = float("0.0030071") + data = None + + +class Program_weight_tensor_parameter_285: + name = "parameter_285" + shape = [384] + dtype = "float32" + min_val = float("-0.0183804") + max_val = float("0.0258619") + mean = float("-0.00144525") + std = float("0.00680648") + data = None + + +class Program_weight_tensor_parameter_286: + name = "parameter_286" + shape = [384] + dtype = "float32" + min_val = float("0.969538") + max_val = float("1.06054") + mean = float("0.993834") + std = float("0.0122522") + data = None + + +class Program_weight_tensor_parameter_287: + name = "parameter_287" + shape = [384] + dtype = "float32" + min_val = float("0.00292493") + max_val = float("0.0159254") + mean = float("0.00705807") + std = float("0.00238091") + data = None + + +class Program_weight_tensor_parameter_288: + name = "parameter_288" + shape = [384] + dtype = "float32" + min_val = float("-0.0970828") + max_val = float("0.0620531") + mean = float("-0.0420728") + std = float("0.0244223") + data = None + + +class Program_weight_tensor_parameter_289: + name = "parameter_289" + shape = [384, 384, 1, 1] + dtype = "float32" + min_val = float("-0.0333324") + max_val = float("0.0411053") + mean = float("-0.000526762") + std = float("0.00328183") + data = None + + +class Program_weight_tensor_parameter_290: + name = "parameter_290" + shape = [384] + dtype = "float32" + min_val = float("-0.0183804") + max_val = float("0.0258619") + mean = float("-0.00144525") + std = float("0.00680648") + data = None + + +class Program_weight_tensor_parameter_291: + name = "parameter_291" + shape = [384] + dtype = "float32" + min_val = float("0.972046") + max_val = float("1.08568") + mean = float("1.00364") + std = float("0.0181342") + data = None + + +class Program_weight_tensor_parameter_292: + name = "parameter_292" + shape = [384] + dtype = "float32" + min_val = float("0.0169031") + max_val = float("0.138508") + mean = float("0.0431682") + std = float("0.0166625") + data = None + + +class Program_weight_tensor_parameter_293: + name = "parameter_293" + shape = [384] + dtype = "float32" + min_val = float("-0.322591") + max_val = float("0.0931467") + mean = float("-0.131452") + std = float("0.0636301") + data = None + + +class Program_weight_tensor_parameter_294: + name = "parameter_294" + shape = [384, 384, 3, 3] + dtype = "float32" + min_val = float("-0.0285775") + max_val = float("0.0755074") + mean = float("-0.000191474") + std = float("0.0013728") + data = None + + +class Program_weight_tensor_parameter_295: + name = "parameter_295" + shape = [384] + dtype = "float32" + min_val = float("-0.0498105") + max_val = float("0.00884065") + mean = float("-0.00838186") + std = float("0.00779167") + data = None + + +class Program_weight_tensor_parameter_296: + name = "parameter_296" + shape = [384] + dtype = "float32" + min_val = float("0.953878") + max_val = float("1.13497") + mean = float("1.01253") + std = float("0.0201047") + data = None + + +class Program_weight_tensor_parameter_297: + name = "parameter_297" + shape = [384] + dtype = "float32" + min_val = float("0.072012") + max_val = float("0.425232") + mean = float("0.171605") + std = float("0.0477212") + data = None + + +class Program_weight_tensor_parameter_298: + name = "parameter_298" + shape = [384] + dtype = "float32" + min_val = float("-1.24821") + max_val = float("0.9224") + mean = float("-0.241954") + std = float("0.272039") + data = None + + +class Program_weight_tensor_parameter_299: + name = "parameter_299" + shape = [384, 384, 3, 3] + dtype = "float32" + min_val = float("-0.024251") + max_val = float("0.0585297") + mean = float("-0.000141777") + std = float("0.00163037") + data = None + + +class Program_weight_tensor_parameter_300: + name = "parameter_300" + shape = [384] + dtype = "float32" + min_val = float("-0.0360838") + max_val = float("0.0137949") + mean = float("-0.00769057") + std = float("0.00789116") + data = None + + +class Program_weight_tensor_parameter_301: + name = "parameter_301" + shape = [384] + dtype = "float32" + min_val = float("0.984179") + max_val = float("1.03462") + mean = float("0.999922") + std = float("0.00715393") + data = None + + +class Program_weight_tensor_parameter_302: + name = "parameter_302" + shape = [384] + dtype = "float32" + min_val = float("0.00227712") + max_val = float("0.00995773") + mean = float("0.00397087") + std = float("0.00113598") + data = None + + +class Program_weight_tensor_parameter_303: + name = "parameter_303" + shape = [384] + dtype = "float32" + min_val = float("-0.0776737") + max_val = float("0.150078") + mean = float("-0.0200997") + std = float("0.0256007") + data = None + + +class Program_weight_tensor_parameter_304: + name = "parameter_304" + shape = [384, 384, 1, 1] + dtype = "float32" + min_val = float("-0.0209147") + max_val = float("0.0327192") + mean = float("-0.000264199") + std = float("0.00284316") + data = None + + +class Program_weight_tensor_parameter_305: + name = "parameter_305" + shape = [384] + dtype = "float32" + min_val = float("-0.0360838") + max_val = float("0.0137949") + mean = float("-0.00769057") + std = float("0.00789116") + data = None + + +class Program_weight_tensor_parameter_306: + name = "parameter_306" + shape = [384] + dtype = "float32" + min_val = float("0.982136") + max_val = float("1.06749") + mean = float("1.00454") + std = float("0.0126701") + data = None + + +class Program_weight_tensor_parameter_307: + name = "parameter_307" + shape = [384] + dtype = "float32" + min_val = float("0.00988707") + max_val = float("0.074172") + mean = float("0.0261233") + std = float("0.00883817") + data = None + + +class Program_weight_tensor_parameter_308: + name = "parameter_308" + shape = [384] + dtype = "float32" + min_val = float("-0.234406") + max_val = float("0.373645") + mean = float("-0.0733953") + std = float("0.0700589") + data = None + + +class Program_weight_tensor_parameter_309: + name = "parameter_309" + shape = [384, 384, 3, 3] + dtype = "float32" + min_val = float("-0.0111228") + max_val = float("0.0376454") + mean = float("-0.000113878") + std = float("0.00115243") + data = None + + +class Program_weight_tensor_parameter_310: + name = "parameter_310" + shape = [384] + dtype = "float32" + min_val = float("-0.0529908") + max_val = float("0.00370586") + mean = float("-0.0207007") + std = float("0.00870238") + data = None + + +class Program_weight_tensor_parameter_311: + name = "parameter_311" + shape = [384] + dtype = "float32" + min_val = float("0.976061") + max_val = float("1.08549") + mean = float("1.01199") + std = float("0.0159983") + data = None + + +class Program_weight_tensor_parameter_312: + name = "parameter_312" + shape = [384] + dtype = "float32" + min_val = float("0.0127448") + max_val = float("0.0747085") + mean = float("0.0330308") + std = float("0.00982086") + data = None + + +class Program_weight_tensor_parameter_313: + name = "parameter_313" + shape = [384] + dtype = "float32" + min_val = float("-0.182538") + max_val = float("0.229487") + mean = float("-0.0382627") + std = float("0.0546861") + data = None + + +class Program_weight_tensor_parameter_314: + name = "parameter_314" + shape = [384, 384, 3, 3] + dtype = "float32" + min_val = float("-0.0155426") + max_val = float("0.0250033") + mean = float("-6.09821e-05") + std = float("0.00159019") + data = None + + +class Program_weight_tensor_parameter_315: + name = "parameter_315" + shape = [384] + dtype = "float32" + min_val = float("-0.0699578") + max_val = float("0.0213472") + mean = float("-0.0334829") + std = float("0.0126426") + data = None + + +class Program_weight_tensor_parameter_316: + name = "parameter_316" + shape = [384] + dtype = "float32" + min_val = float("0.981937") + max_val = float("1.05593") + mean = float("1.0134") + std = float("0.0107706") + data = None + + +class Program_weight_tensor_parameter_317: + name = "parameter_317" + shape = [384] + dtype = "float32" + min_val = float("0.00885101") + max_val = float("0.0351366") + mean = float("0.0147251") + std = float("0.00337135") + data = None + + +class Program_weight_tensor_parameter_318: + name = "parameter_318" + shape = [384] + dtype = "float32" + min_val = float("-0.11759") + max_val = float("0.125101") + mean = float("-0.0114268") + std = float("0.0398999") + data = None + + +class Program_weight_tensor_parameter_319: + name = "parameter_319" + shape = [384, 1024, 1, 1] + dtype = "float32" + min_val = float("-0.0187213") + max_val = float("0.0462026") + mean = float("-0.000204289") + std = float("0.00328169") + data = None + + +class Program_weight_tensor_parameter_320: + name = "parameter_320" + shape = [384] + dtype = "float32" + min_val = float("-0.024099") + max_val = float("0.0209723") + mean = float("-0.000328398") + std = float("0.00796388") + data = None + + +class Program_weight_tensor_parameter_321: + name = "parameter_321" + shape = [384] + dtype = "float32" + min_val = float("0.994048") + max_val = float("1.08372") + mean = float("1.04108") + std = float("0.0136738") + data = None + + +class Program_weight_tensor_parameter_322: + name = "parameter_322" + shape = [384] + dtype = "float32" + min_val = float("0.0117538") + max_val = float("0.0620667") + mean = float("0.0197522") + std = float("0.00513232") + data = None + + +class Program_weight_tensor_parameter_323: + name = "parameter_323" + shape = [384] + dtype = "float32" + min_val = float("-0.154192") + max_val = float("0.134319") + mean = float("-0.0110652") + std = float("0.0497964") + data = None + + +class Program_weight_tensor_parameter_324: + name = "parameter_324" + shape = [384, 1024, 1, 1] + dtype = "float32" + min_val = float("-0.0381973") + max_val = float("0.0298107") + mean = float("-0.00023944") + std = float("0.00387698") + data = None + + +class Program_weight_tensor_parameter_325: + name = "parameter_325" + shape = [1024] + dtype = "float32" + min_val = float("-3.19596e-10") + max_val = float("2.57347e-10") + mean = float("-6.94228e-12") + std = float("8.15169e-11") + data = None + + +class Program_weight_tensor_parameter_326: + name = "parameter_326" + shape = [1024] + dtype = "float32" + min_val = float("0.826159") + max_val = float("0.830526") + mean = float("0.828072") + std = float("0.00038843") + data = None + + +class Program_weight_tensor_parameter_327: + name = "parameter_327" + shape = [1024] + dtype = "float32" + min_val = float("-0.0184725") + max_val = float("0.0186349") + mean = float("3.29491e-06") + std = float("0.0105958") + data = None + + +class Program_weight_tensor_parameter_328: + name = "parameter_328" + shape = [2048, 1024] + dtype = "float32" + min_val = float("-0.0186694") + max_val = float("0.0186323") + mean = float("-3.09482e-06") + std = float("0.0105631") + data = None + + +class Program_weight_tensor_parameter_329: + name = "parameter_329" + shape = [2048] + dtype = "float32" + min_val = float("-0.0258373") + max_val = float("0.0258488") + mean = float("-0.000490033") + std = float("0.0147842") + data = None + + +class Program_weight_tensor_parameter_330: + name = "parameter_330" + shape = [1024, 2048] + dtype = "float32" + min_val = float("-0.0261231") + max_val = float("0.0262344") + mean = float("-1.26e-05") + std = float("0.0149406") + data = None + + +class Program_weight_tensor_parameter_331: + name = "parameter_331" + shape = [1024] + dtype = "float32" + min_val = float("-0.000644078") + max_val = float("0.000416122") + mean = float("1.0367e-06") + std = float("0.000160918") + data = None + + +class Program_weight_tensor_parameter_332: + name = "parameter_332" + shape = [1024] + dtype = "float32" + min_val = float("0.825075") + max_val = float("0.831152") + mean = float("0.828074") + std = float("0.000498935") + data = None + + +class Program_weight_tensor_parameter_333: + name = "parameter_333" + shape = [1024] + dtype = "float32" + min_val = float("-0.000571568") + max_val = float("0.000431714") + mean = float("-6.059e-07") + std = float("0.000151099") + data = None + + +class Program_weight_tensor_parameter_334: + name = "parameter_334" + shape = [1024, 1024] + dtype = "float32" + min_val = float("-0.0452304") + max_val = float("0.0451715") + mean = float("2.40342e-05") + std = float("0.0258606") + data = None + + +class Program_weight_tensor_parameter_335: + name = "parameter_335" + shape = [1024] + dtype = "float32" + min_val = float("-0.000495841") + max_val = float("0.000502198") + mean = float("2.39512e-05") + std = float("0.000158431") + data = None + + +class Program_weight_tensor_parameter_336: + name = "parameter_336" + shape = [1024] + dtype = "float32" + min_val = float("0.825239") + max_val = float("0.831385") + mean = float("0.8281") + std = float("0.000479393") + data = None + + +class Program_weight_tensor_parameter_337: + name = "parameter_337" + shape = [1024] + dtype = "float32" + min_val = float("-0.0182544") + max_val = float("0.0183953") + mean = float("1.83012e-06") + std = float("0.0105888") + data = None + + +class Program_weight_tensor_parameter_338: + name = "parameter_338" + shape = [2048, 1024] + dtype = "float32" + min_val = float("-0.0185876") + max_val = float("0.0186055") + mean = float("-3.09823e-06") + std = float("0.010563") + data = None + + +class Program_weight_tensor_parameter_339: + name = "parameter_339" + shape = [2048] + dtype = "float32" + min_val = float("-0.0258719") + max_val = float("0.025874") + mean = float("-0.00048855") + std = float("0.0147851") + data = None + + +class Program_weight_tensor_parameter_340: + name = "parameter_340" + shape = [1024, 2048] + dtype = "float32" + min_val = float("-0.0260955") + max_val = float("0.0261499") + mean = float("-1.26e-05") + std = float("0.0149406") + data = None + + +class Program_weight_tensor_parameter_341: + name = "parameter_341" + shape = [1024] + dtype = "float32" + min_val = float("-0.000468908") + max_val = float("0.000411959") + mean = float("2.0632e-06") + std = float("0.000140162") + data = None + + +class Program_weight_tensor_parameter_342: + name = "parameter_342" + shape = [1024] + dtype = "float32" + min_val = float("0.825683") + max_val = float("0.831193") + mean = float("0.828073") + std = float("0.000448511") + data = None + + +class Program_weight_tensor_parameter_343: + name = "parameter_343" + shape = [1024] + dtype = "float32" + min_val = float("-0.000528411") + max_val = float("0.000383675") + mean = float("2.84251e-06") + std = float("0.000141636") + data = None + + +class Program_weight_tensor_parameter_344: + name = "parameter_344" + shape = [1024, 1024] + dtype = "float32" + min_val = float("-0.0450293") + max_val = float("0.0450631") + mean = float("2.40173e-05") + std = float("0.0258607") + data = None + + +class Program_weight_tensor_parameter_345: + name = "parameter_345" + shape = [1024] + dtype = "float32" + min_val = float("-0.000544272") + max_val = float("0.000596298") + mean = float("2.42076e-05") + std = float("0.000181497") + data = None + + +class Program_weight_tensor_parameter_346: + name = "parameter_346" + shape = [1024] + dtype = "float32" + min_val = float("0.825946") + max_val = float("0.831225") + mean = float("0.828119") + std = float("0.000435131") + data = None + + +class Program_weight_tensor_parameter_347: + name = "parameter_347" + shape = [1024] + dtype = "float32" + min_val = float("-0.0184487") + max_val = float("0.0183801") + mean = float("4.31404e-06") + std = float("0.0105859") + data = None + + +class Program_weight_tensor_parameter_348: + name = "parameter_348" + shape = [2048, 1024] + dtype = "float32" + min_val = float("-0.0185587") + max_val = float("0.0185999") + mean = float("-2.97794e-06") + std = float("0.010563") + data = None + + +class Program_weight_tensor_parameter_349: + name = "parameter_349" + shape = [2048] + dtype = "float32" + min_val = float("-0.0259392") + max_val = float("0.025878") + mean = float("-0.000488744") + std = float("0.014786") + data = None + + +class Program_weight_tensor_parameter_350: + name = "parameter_350" + shape = [1024, 2048] + dtype = "float32" + min_val = float("-0.0261446") + max_val = float("0.0261367") + mean = float("-1.26001e-05") + std = float("0.0149405") + data = None + + +class Program_weight_tensor_parameter_351: + name = "parameter_351" + shape = [1024] + dtype = "float32" + min_val = float("-0.000525158") + max_val = float("0.000569597") + mean = float("1.84284e-06") + std = float("0.000180024") + data = None + + +class Program_weight_tensor_parameter_352: + name = "parameter_352" + shape = [1024] + dtype = "float32" + min_val = float("0.826325") + max_val = float("0.831088") + mean = float("0.828071") + std = float("0.00042233") + data = None + + +class Program_weight_tensor_parameter_353: + name = "parameter_353" + shape = [1024] + dtype = "float32" + min_val = float("-0.000560432") + max_val = float("0.000596894") + mean = float("2.2615e-06") + std = float("0.00018498") + data = None + + +class Program_weight_tensor_parameter_354: + name = "parameter_354" + shape = [1024, 1024] + dtype = "float32" + min_val = float("-0.0451116") + max_val = float("0.0451354") + mean = float("2.40528e-05") + std = float("0.0258608") + data = None + + +class Program_weight_tensor_parameter_355: + name = "parameter_355" + shape = [1024] + dtype = "float32" + min_val = float("-0.000823759") + max_val = float("0.000904078") + mean = float("2.92117e-05") + std = float("0.000277537") + data = None + + +class Program_weight_tensor_parameter_356: + name = "parameter_356" + shape = [1024] + dtype = "float32" + min_val = float("0.826283") + max_val = float("0.83082") + mean = float("0.828142") + std = float("0.000430459") + data = None + + +class Program_weight_tensor_parameter_357: + name = "parameter_357" + shape = [1024] + dtype = "float32" + min_val = float("-0.0185659") + max_val = float("0.0186155") + mean = float("4.15762e-06") + std = float("0.0105906") + data = None + + +class Program_weight_tensor_parameter_358: + name = "parameter_358" + shape = [2048, 1024] + dtype = "float32" + min_val = float("-0.0186584") + max_val = float("0.0186457") + mean = float("-3.0236e-06") + std = float("0.0105631") + data = None + + +class Program_weight_tensor_parameter_359: + name = "parameter_359" + shape = [2048] + dtype = "float32" + min_val = float("-0.0260158") + max_val = float("0.0259107") + mean = float("-0.000488165") + std = float("0.0147856") + data = None + + +class Program_weight_tensor_parameter_360: + name = "parameter_360" + shape = [1024, 2048] + dtype = "float32" + min_val = float("-0.026139") + max_val = float("0.026125") + mean = float("-1.26002e-05") + std = float("0.0149405") + data = None + + +class Program_weight_tensor_parameter_361: + name = "parameter_361" + shape = [1024] + dtype = "float32" + min_val = float("-0.000913026") + max_val = float("0.000860109") + mean = float("1.52616e-06") + std = float("0.000286932") + data = None + + +class Program_weight_tensor_parameter_362: + name = "parameter_362" + shape = [1024] + dtype = "float32" + min_val = float("0.826227") + max_val = float("0.830736") + mean = float("0.828069") + std = float("0.000440282") + data = None + + +class Program_weight_tensor_parameter_363: + name = "parameter_363" + shape = [1024] + dtype = "float32" + min_val = float("-0.00089386") + max_val = float("0.000983855") + mean = float("2.69912e-06") + std = float("0.000279129") + data = None + + +class Program_weight_tensor_parameter_364: + name = "parameter_364" + shape = [1024, 1024] + dtype = "float32" + min_val = float("-0.0456631") + max_val = float("0.0456484") + mean = float("2.40399e-05") + std = float("0.0258625") + data = None + + +class Program_weight_tensor_parameter_365: + name = "parameter_365" + shape = [1024] + dtype = "float32" + min_val = float("-3.75937") + max_val = float("-0.734") + mean = float("-2.18719") + std = float("0.428746") + data = None + + +class Program_weight_tensor_parameter_366: + name = "parameter_366" + shape = [1024] + dtype = "float32" + min_val = float("1.61944") + max_val = float("4.44114") + mean = float("3.08041") + std = float("0.254214") + data = None + + +class Program_weight_tensor_parameter_367: + name = "parameter_367" + shape = [1024] + dtype = "float32" + min_val = float("0.00515514") + max_val = float("0.0275054") + mean = float("0.00882973") + std = float("0.00191584") + data = None + + +class Program_weight_tensor_parameter_368: + name = "parameter_368" + shape = [1024] + dtype = "float32" + min_val = float("-0.173492") + max_val = float("0.132414") + mean = float("-0.0625274") + std = float("0.0318422") + data = None + + +class Program_weight_tensor_parameter_369: + name = "parameter_369" + shape = [1024, 768, 1, 1] + dtype = "float32" + min_val = float("-0.0420016") + max_val = float("0.0672891") + mean = float("-0.000434506") + std = float("0.00419984") + data = None + + +class Program_weight_tensor_parameter_370: + name = "parameter_370" + shape = [768] + dtype = "float32" + min_val = float("-0.0144958") + max_val = float("0.00204154") + mean = float("-0.000784991") + std = float("0.00208566") + data = None + + +class Program_weight_tensor_parameter_371: + name = "parameter_371" + shape = [768, 768, 1, 1] + dtype = "float32" + min_val = float("-0.0809974") + max_val = float("0.144837") + mean = float("-0.000290719") + std = float("0.0016779") + data = None + + +class Program_weight_tensor_parameter_372: + name = "parameter_372" + shape = [384] + dtype = "float32" + min_val = float("-1.77404") + max_val = float("0.318904") + mean = float("-0.31075") + std = float("0.291253") + data = None + + +class Program_weight_tensor_parameter_373: + name = "parameter_373" + shape = [384] + dtype = "float32" + min_val = float("0.188368") + max_val = float("1.82104") + mean = float("0.60964") + std = float("0.262596") + data = None + + +class Program_weight_tensor_parameter_374: + name = "parameter_374" + shape = [384] + dtype = "float32" + min_val = float("7.69323e-05") + max_val = float("0.00105931") + mean = float("0.000262139") + std = float("0.000132205") + data = None + + +class Program_weight_tensor_parameter_375: + name = "parameter_375" + shape = [384] + dtype = "float32" + min_val = float("-0.0656167") + max_val = float("0.0776953") + mean = float("0.0239193") + std = float("0.0176294") + data = None + + +class Program_weight_tensor_parameter_376: + name = "parameter_376" + shape = [384, 384, 1, 1] + dtype = "float32" + min_val = float("-0.020871") + max_val = float("0.0273244") + mean = float("-0.000414716") + std = float("0.00284754") + data = None + + +class Program_weight_tensor_parameter_377: + name = "parameter_377" + shape = [384] + dtype = "float32" + min_val = float("-1.77405") + max_val = float("0.319251") + mean = float("-0.310681") + std = float("0.291275") + data = None + + +class Program_weight_tensor_parameter_378: + name = "parameter_378" + shape = [384] + dtype = "float32" + min_val = float("0.335122") + max_val = float("2.60483") + mean = float("1.02609") + std = float("0.290246") + data = None + + +class Program_weight_tensor_parameter_379: + name = "parameter_379" + shape = [384] + dtype = "float32" + min_val = float("0.000764026") + max_val = float("0.00789643") + mean = float("0.00239397") + std = float("0.000872399") + data = None + + +class Program_weight_tensor_parameter_380: + name = "parameter_380" + shape = [384] + dtype = "float32" + min_val = float("-0.229833") + max_val = float("0.162266") + mean = float("0.0349416") + std = float("0.0423478") + data = None + + +class Program_weight_tensor_parameter_381: + name = "parameter_381" + shape = [384, 384, 3, 3] + dtype = "float32" + min_val = float("-0.0185255") + max_val = float("0.0282844") + mean = float("-7.21101e-05") + std = float("0.00183304") + data = None + + +class Program_weight_tensor_parameter_382: + name = "parameter_382" + shape = [384] + dtype = "float32" + min_val = float("-2.58205") + max_val = float("0.0326997") + mean = float("-1.56844") + std = float("0.416017") + data = None + + +class Program_weight_tensor_parameter_383: + name = "parameter_383" + shape = [384] + dtype = "float32" + min_val = float("0.51894") + max_val = float("1.64424") + mean = float("1.13558") + std = float("0.149427") + data = None + + +class Program_weight_tensor_parameter_384: + name = "parameter_384" + shape = [384] + dtype = "float32" + min_val = float("0.0445179") + max_val = float("0.278452") + mean = float("0.101004") + std = float("0.0266658") + data = None + + +class Program_weight_tensor_parameter_385: + name = "parameter_385" + shape = [384] + dtype = "float32" + min_val = float("-1.05877") + max_val = float("0.500591") + mean = float("-0.285429") + std = float("0.144535") + data = None + + +class Program_weight_tensor_parameter_386: + name = "parameter_386" + shape = [384, 384, 3, 3] + dtype = "float32" + min_val = float("-0.0217847") + max_val = float("0.0601331") + mean = float("-0.000214232") + std = float("0.00242153") + data = None + + +class Program_weight_tensor_parameter_387: + name = "parameter_387" + shape = [384] + dtype = "float32" + min_val = float("-1.93932") + max_val = float("0.644238") + mean = float("-0.57485") + std = float("0.358678") + data = None + + +class Program_weight_tensor_parameter_388: + name = "parameter_388" + shape = [384] + dtype = "float32" + min_val = float("0.163976") + max_val = float("2.06584") + mean = float("0.56203") + std = float("0.227231") + data = None + + +class Program_weight_tensor_parameter_389: + name = "parameter_389" + shape = [384] + dtype = "float32" + min_val = float("8.46446e-05") + max_val = float("0.00181652") + mean = float("0.000300897") + std = float("0.000147903") + data = None + + +class Program_weight_tensor_parameter_390: + name = "parameter_390" + shape = [384] + dtype = "float32" + min_val = float("-0.0395058") + max_val = float("0.072267") + mean = float("0.0222665") + std = float("0.0153805") + data = None + + +class Program_weight_tensor_parameter_391: + name = "parameter_391" + shape = [384, 384, 1, 1] + dtype = "float32" + min_val = float("-0.0311026") + max_val = float("0.039225") + mean = float("-0.000409791") + std = float("0.00262815") + data = None + + +class Program_weight_tensor_parameter_392: + name = "parameter_392" + shape = [384] + dtype = "float32" + min_val = float("-1.9394") + max_val = float("0.644918") + mean = float("-0.574762") + std = float("0.358753") + data = None + + +class Program_weight_tensor_parameter_393: + name = "parameter_393" + shape = [384] + dtype = "float32" + min_val = float("0.583818") + max_val = float("2.15633") + mean = float("1.08411") + std = float("0.255713") + data = None + + +class Program_weight_tensor_parameter_394: + name = "parameter_394" + shape = [384] + dtype = "float32" + min_val = float("0.00151649") + max_val = float("0.011387") + mean = float("0.00363589") + std = float("0.00111629") + data = None + + +class Program_weight_tensor_parameter_395: + name = "parameter_395" + shape = [384] + dtype = "float32" + min_val = float("-0.114817") + max_val = float("0.168288") + mean = float("0.040355") + std = float("0.0413819") + data = None + + +class Program_weight_tensor_parameter_396: + name = "parameter_396" + shape = [384, 384, 3, 3] + dtype = "float32" + min_val = float("-0.0211861") + max_val = float("0.0312284") + mean = float("-9.86606e-05") + std = float("0.00198109") + data = None + + +class Program_weight_tensor_parameter_397: + name = "parameter_397" + shape = [384] + dtype = "float32" + min_val = float("-2.39618") + max_val = float("0.845899") + mean = float("-1.40537") + std = float("0.36063") + data = None + + +class Program_weight_tensor_parameter_398: + name = "parameter_398" + shape = [384] + dtype = "float32" + min_val = float("0.454223") + max_val = float("1.91875") + mean = float("1.16633") + std = float("0.147984") + data = None + + +class Program_weight_tensor_parameter_399: + name = "parameter_399" + shape = [384] + dtype = "float32" + min_val = float("0.0369914") + max_val = float("0.169613") + mean = float("0.067321") + std = float("0.0165547") + data = None + + +class Program_weight_tensor_parameter_400: + name = "parameter_400" + shape = [384] + dtype = "float32" + min_val = float("-0.916864") + max_val = float("0.834885") + mean = float("-0.197255") + std = float("0.118118") + data = None + + +class Program_weight_tensor_parameter_401: + name = "parameter_401" + shape = [384, 384, 3, 3] + dtype = "float32" + min_val = float("-0.0304568") + max_val = float("0.0446889") + mean = float("-0.000206096") + std = float("0.00245489") + data = None + + +class Program_weight_tensor_parameter_402: + name = "parameter_402" + shape = [384] + dtype = "float32" + min_val = float("-1.87628") + max_val = float("0.453077") + mean = float("-0.485305") + std = float("0.376481") + data = None + + +class Program_weight_tensor_parameter_403: + name = "parameter_403" + shape = [384] + dtype = "float32" + min_val = float("0.0771953") + max_val = float("2.11917") + mean = float("0.441977") + std = float("0.217648") + data = None + + +class Program_weight_tensor_parameter_404: + name = "parameter_404" + shape = [384] + dtype = "float32" + min_val = float("7.57603e-05") + max_val = float("0.00171771") + mean = float("0.00036293") + std = float("0.000186378") + data = None + + +class Program_weight_tensor_parameter_405: + name = "parameter_405" + shape = [384] + dtype = "float32" + min_val = float("-0.0528798") + max_val = float("0.0858378") + mean = float("0.0268765") + std = float("0.0175426") + data = None + + +class Program_weight_tensor_parameter_406: + name = "parameter_406" + shape = [384, 384, 1, 1] + dtype = "float32" + min_val = float("-0.0213328") + max_val = float("0.0283453") + mean = float("-0.000505242") + std = float("0.00224656") + data = None + + +class Program_weight_tensor_parameter_407: + name = "parameter_407" + shape = [384] + dtype = "float32" + min_val = float("-1.87669") + max_val = float("0.45341") + mean = float("-0.485211") + std = float("0.376586") + data = None + + +class Program_weight_tensor_parameter_408: + name = "parameter_408" + shape = [384] + dtype = "float32" + min_val = float("0.522977") + max_val = float("2.22431") + mean = float("1.05297") + std = float("0.260052") + data = None + + +class Program_weight_tensor_parameter_409: + name = "parameter_409" + shape = [384] + dtype = "float32" + min_val = float("0.00214087") + max_val = float("0.0106285") + mean = float("0.00466215") + std = float("0.00134646") + data = None + + +class Program_weight_tensor_parameter_410: + name = "parameter_410" + shape = [384] + dtype = "float32" + min_val = float("-0.272097") + max_val = float("0.182301") + mean = float("0.0462845") + std = float("0.0484542") + data = None + + +class Program_weight_tensor_parameter_411: + name = "parameter_411" + shape = [384, 384, 3, 3] + dtype = "float32" + min_val = float("-0.0214852") + max_val = float("0.0348977") + mean = float("-0.000101693") + std = float("0.00210424") + data = None + + +class Program_weight_tensor_parameter_412: + name = "parameter_412" + shape = [384] + dtype = "float32" + min_val = float("-2.1565") + max_val = float("0.418538") + mean = float("-1.36711") + std = float("0.277506") + data = None + + +class Program_weight_tensor_parameter_413: + name = "parameter_413" + shape = [384] + dtype = "float32" + min_val = float("0.707119") + max_val = float("1.63571") + mean = float("1.14297") + std = float("0.101612") + data = None + + +class Program_weight_tensor_parameter_414: + name = "parameter_414" + shape = [384] + dtype = "float32" + min_val = float("0.0267598") + max_val = float("0.120536") + mean = float("0.0531872") + std = float("0.0145039") + data = None + + +class Program_weight_tensor_parameter_415: + name = "parameter_415" + shape = [384] + dtype = "float32" + min_val = float("-0.737016") + max_val = float("0.211594") + mean = float("-0.135647") + std = float("0.0976005") + data = None + + +class Program_weight_tensor_parameter_416: + name = "parameter_416" + shape = [384, 384, 3, 3] + dtype = "float32" + min_val = float("-0.0300983") + max_val = float("0.05499") + mean = float("-0.000159015") + std = float("0.00235156") + data = None + + +class Program_weight_tensor_parameter_417: + name = "parameter_417" + shape = [384] + dtype = "float32" + min_val = float("-2.92344") + max_val = float("1.66439") + mean = float("-0.760407") + std = float("0.643554") + data = None + + +class Program_weight_tensor_parameter_418: + name = "parameter_418" + shape = [384] + dtype = "float32" + min_val = float("0.953228") + max_val = float("2.9182") + mean = float("1.86309") + std = float("0.276205") + data = None + + +class Program_weight_tensor_parameter_419: + name = "parameter_419" + shape = [384] + dtype = "float32" + min_val = float("0.00273562") + max_val = float("0.012939") + mean = float("0.00578831") + std = float("0.00145222") + data = None + + +class Program_weight_tensor_parameter_420: + name = "parameter_420" + shape = [384] + dtype = "float32" + min_val = float("-0.279172") + max_val = float("0.135794") + mean = float("0.0682701") + std = float("0.0329249") + data = None + + +class Program_weight_tensor_parameter_421: + name = "parameter_421" + shape = [384, 768, 1, 1] + dtype = "float32" + min_val = float("-0.0411036") + max_val = float("0.048141") + mean = float("-0.000774534") + std = float("0.00548625") + data = None + + +class Program_weight_tensor_parameter_422: + name = "parameter_422" + shape = [384] + dtype = "float32" + min_val = float("-2.24702") + max_val = float("0.681993") + mean = float("-0.777088") + std = float("0.472908") + data = None + + +class Program_weight_tensor_parameter_423: + name = "parameter_423" + shape = [384] + dtype = "float32" + min_val = float("0.965876") + max_val = float("2.89361") + mean = float("2.09705") + std = float("0.305445") + data = None + + +class Program_weight_tensor_parameter_424: + name = "parameter_424" + shape = [384] + dtype = "float32" + min_val = float("0.000839665") + max_val = float("0.00423233") + mean = float("0.00221563") + std = float("0.000537") + data = None + + +class Program_weight_tensor_parameter_425: + name = "parameter_425" + shape = [384] + dtype = "float32" + min_val = float("-0.0182533") + max_val = float("0.0914483") + mean = float("0.0419083") + std = float("0.0183649") + data = None + + +class Program_weight_tensor_parameter_426: + name = "parameter_426" + shape = [384, 768, 1, 1] + dtype = "float32" + min_val = float("-0.0837021") + max_val = float("0.0611426") + mean = float("-0.00045084") + std = float("0.00374174") + data = None + + +class Program_weight_tensor_parameter_427: + name = "parameter_427" + shape = [768] + dtype = "float32" + min_val = float("-2.40194") + max_val = float("0.642339") + mean = float("-0.908288") + std = float("0.339331") + data = None + + +class Program_weight_tensor_parameter_428: + name = "parameter_428" + shape = [768] + dtype = "float32" + min_val = float("0.53146") + max_val = float("1.90712") + mean = float("0.919684") + std = float("0.149212") + data = None + + +class Program_weight_tensor_parameter_429: + name = "parameter_429" + shape = [768] + dtype = "float32" + min_val = float("0.00745832") + max_val = float("0.0743865") + mean = float("0.0178485") + std = float("0.00551587") + data = None + + +class Program_weight_tensor_parameter_430: + name = "parameter_430" + shape = [768] + dtype = "float32" + min_val = float("-0.236023") + max_val = float("0.207751") + mean = float("0.041919") + std = float("0.0579014") + data = None + + +class Program_weight_tensor_parameter_431: + name = "parameter_431" + shape = [768, 512, 3, 3] + dtype = "float32" + min_val = float("-0.0383779") + max_val = float("0.0519002") + mean = float("-9.93933e-05") + std = float("0.00244217") + data = None + + +class Program_weight_tensor_parameter_432: + name = "parameter_432" + shape = [512] + dtype = "float32" + min_val = float("-3.39029") + max_val = float("1.66616") + mean = float("-1.16168") + std = float("0.513766") + data = None + + +class Program_weight_tensor_parameter_433: + name = "parameter_433" + shape = [512] + dtype = "float32" + min_val = float("0.520928") + max_val = float("1.67546") + mean = float("1.11104") + std = float("0.148384") + data = None + + +class Program_weight_tensor_parameter_434: + name = "parameter_434" + shape = [512] + dtype = "float32" + min_val = float("0.00230842") + max_val = float("0.0165448") + mean = float("0.00755702") + std = float("0.00192355") + data = None + + +class Program_weight_tensor_parameter_435: + name = "parameter_435" + shape = [512] + dtype = "float32" + min_val = float("-0.159179") + max_val = float("0.0723523") + mean = float("-0.0485061") + std = float("0.0412122") + data = None + + +class Program_weight_tensor_parameter_436: + name = "parameter_436" + shape = [512, 384, 1, 1] + dtype = "float32" + min_val = float("-0.208779") + max_val = float("0.179911") + mean = float("-0.000606249") + std = float("0.0081171") + data = None + + +class Program_weight_tensor_parameter_437: + name = "parameter_437" + shape = [384] + dtype = "float32" + min_val = float("-0.0103559") + max_val = float("0.00155602") + mean = float("-0.00302775") + std = float("0.0023618") + data = None + + +class Program_weight_tensor_parameter_438: + name = "parameter_438" + shape = [384, 384, 1, 1] + dtype = "float32" + min_val = float("-0.204999") + max_val = float("0.141306") + mean = float("-0.00211219") + std = float("0.00500511") + data = None + + +class Program_weight_tensor_parameter_439: + name = "parameter_439" + shape = [192] + dtype = "float32" + min_val = float("-1.97063") + max_val = float("0.41045") + mean = float("-0.348649") + std = float("0.333533") + data = None + + +class Program_weight_tensor_parameter_440: + name = "parameter_440" + shape = [192] + dtype = "float32" + min_val = float("0.0528508") + max_val = float("2.16013") + mean = float("0.581272") + std = float("0.419844") + data = None + + +class Program_weight_tensor_parameter_441: + name = "parameter_441" + shape = [192] + dtype = "float32" + min_val = float("9.94453e-05") + max_val = float("0.00123961") + mean = float("0.000476419") + std = float("0.000224006") + data = None + + +class Program_weight_tensor_parameter_442: + name = "parameter_442" + shape = [192] + dtype = "float32" + min_val = float("-0.0376085") + max_val = float("0.0570153") + mean = float("0.00573177") + std = float("0.0152437") + data = None + + +class Program_weight_tensor_parameter_443: + name = "parameter_443" + shape = [192, 192, 1, 1] + dtype = "float32" + min_val = float("-0.0210389") + max_val = float("0.0585363") + mean = float("-0.000352054") + std = float("0.00423892") + data = None + + +class Program_weight_tensor_parameter_444: + name = "parameter_444" + shape = [192] + dtype = "float32" + min_val = float("-1.97059") + max_val = float("0.411367") + mean = float("-0.348497") + std = float("0.333596") + data = None + + +class Program_weight_tensor_parameter_445: + name = "parameter_445" + shape = [192] + dtype = "float32" + min_val = float("0.372764") + max_val = float("2.70243") + mean = float("1.20208") + std = float("0.49364") + data = None + + +class Program_weight_tensor_parameter_446: + name = "parameter_446" + shape = [192] + dtype = "float32" + min_val = float("0.0014863") + max_val = float("0.020345") + mean = float("0.00560471") + std = float("0.00209367") + data = None + + +class Program_weight_tensor_parameter_447: + name = "parameter_447" + shape = [192] + dtype = "float32" + min_val = float("-0.115289") + max_val = float("0.163741") + mean = float("0.0194467") + std = float("0.0436225") + data = None + + +class Program_weight_tensor_parameter_448: + name = "parameter_448" + shape = [192, 192, 3, 3] + dtype = "float32" + min_val = float("-0.031927") + max_val = float("0.0389496") + mean = float("-0.000144904") + std = float("0.00325908") + data = None + + +class Program_weight_tensor_parameter_449: + name = "parameter_449" + shape = [192] + dtype = "float32" + min_val = float("-2.89054") + max_val = float("-0.177595") + mean = float("-1.31446") + std = float("0.401195") + data = None + + +class Program_weight_tensor_parameter_450: + name = "parameter_450" + shape = [192] + dtype = "float32" + min_val = float("0.695074") + max_val = float("2.09481") + mean = float("1.17912") + std = float("0.169901") + data = None + + +class Program_weight_tensor_parameter_451: + name = "parameter_451" + shape = [192] + dtype = "float32" + min_val = float("0.0658237") + max_val = float("0.479229") + mean = float("0.138928") + std = float("0.0482074") + data = None + + +class Program_weight_tensor_parameter_452: + name = "parameter_452" + shape = [192] + dtype = "float32" + min_val = float("-2.47032") + max_val = float("1.83399") + mean = float("-0.227578") + std = float("0.394509") + data = None + + +class Program_weight_tensor_parameter_453: + name = "parameter_453" + shape = [192, 192, 3, 3] + dtype = "float32" + min_val = float("-0.0350379") + max_val = float("0.0468605") + mean = float("-0.000221381") + std = float("0.00388426") + data = None + + +class Program_weight_tensor_parameter_454: + name = "parameter_454" + shape = [192] + dtype = "float32" + min_val = float("-1.94031") + max_val = float("0.513263") + mean = float("-0.279273") + std = float("0.321486") + data = None + + +class Program_weight_tensor_parameter_455: + name = "parameter_455" + shape = [192] + dtype = "float32" + min_val = float("0.0449424") + max_val = float("1.76947") + mean = float("0.444383") + std = float("0.305669") + data = None + + +class Program_weight_tensor_parameter_456: + name = "parameter_456" + shape = [192] + dtype = "float32" + min_val = float("7.91667e-05") + max_val = float("0.00164061") + mean = float("0.00043007") + std = float("0.000226992") + data = None + + +class Program_weight_tensor_parameter_457: + name = "parameter_457" + shape = [192] + dtype = "float32" + min_val = float("-0.0363552") + max_val = float("0.0461841") + mean = float("0.00877747") + std = float("0.0120158") + data = None + + +class Program_weight_tensor_parameter_458: + name = "parameter_458" + shape = [192, 192, 1, 1] + dtype = "float32" + min_val = float("-0.02483") + max_val = float("0.0404131") + mean = float("-0.000400917") + std = float("0.00391908") + data = None + + +class Program_weight_tensor_parameter_459: + name = "parameter_459" + shape = [192] + dtype = "float32" + min_val = float("-1.94031") + max_val = float("0.514903") + mean = float("-0.279015") + std = float("0.321709") + data = None + + +class Program_weight_tensor_parameter_460: + name = "parameter_460" + shape = [192] + dtype = "float32" + min_val = float("0.481654") + max_val = float("2.27026") + mean = float("1.13859") + std = float("0.375612") + data = None + + +class Program_weight_tensor_parameter_461: + name = "parameter_461" + shape = [192] + dtype = "float32" + min_val = float("0.00303177") + max_val = float("0.0146645") + mean = float("0.00648") + std = float("0.00181309") + data = None + + +class Program_weight_tensor_parameter_462: + name = "parameter_462" + shape = [192] + dtype = "float32" + min_val = float("-0.0803161") + max_val = float("0.116901") + mean = float("0.0359767") + std = float("0.0322211") + data = None + + +class Program_weight_tensor_parameter_463: + name = "parameter_463" + shape = [192, 192, 3, 3] + dtype = "float32" + min_val = float("-0.0229799") + max_val = float("0.0371751") + mean = float("-0.000196939") + std = float("0.00352878") + data = None + + +class Program_weight_tensor_parameter_464: + name = "parameter_464" + shape = [192] + dtype = "float32" + min_val = float("-2.50826") + max_val = float("-0.12355") + mean = float("-1.2887") + std = float("0.443822") + data = None + + +class Program_weight_tensor_parameter_465: + name = "parameter_465" + shape = [192] + dtype = "float32" + min_val = float("0.653803") + max_val = float("1.66962") + mean = float("1.19928") + std = float("0.166233") + data = None + + +class Program_weight_tensor_parameter_466: + name = "parameter_466" + shape = [192] + dtype = "float32" + min_val = float("0.0475951") + max_val = float("0.209951") + mean = float("0.0950332") + std = float("0.0248435") + data = None + + +class Program_weight_tensor_parameter_467: + name = "parameter_467" + shape = [192] + dtype = "float32" + min_val = float("-2.16167") + max_val = float("0.473341") + mean = float("-0.117492") + std = float("0.248865") + data = None + + +class Program_weight_tensor_parameter_468: + name = "parameter_468" + shape = [192, 192, 3, 3] + dtype = "float32" + min_val = float("-0.038582") + max_val = float("0.0537646") + mean = float("-0.00026749") + std = float("0.0040656") + data = None + + +class Program_weight_tensor_parameter_469: + name = "parameter_469" + shape = [192] + dtype = "float32" + min_val = float("-1.75738") + max_val = float("0.468608") + mean = float("-0.262263") + std = float("0.335862") + data = None + + +class Program_weight_tensor_parameter_470: + name = "parameter_470" + shape = [192] + dtype = "float32" + min_val = float("0.00305103") + max_val = float("1.67905") + mean = float("0.351948") + std = float("0.251703") + data = None + + +class Program_weight_tensor_parameter_471: + name = "parameter_471" + shape = [192] + dtype = "float32" + min_val = float("1.02293e-06") + max_val = float("0.00228453") + mean = float("0.000400551") + std = float("0.000283282") + data = None + + +class Program_weight_tensor_parameter_472: + name = "parameter_472" + shape = [192] + dtype = "float32" + min_val = float("-0.031609") + max_val = float("0.0551924") + mean = float("0.0110783") + std = float("0.0123374") + data = None + + +class Program_weight_tensor_parameter_473: + name = "parameter_473" + shape = [192, 192, 1, 1] + dtype = "float32" + min_val = float("-0.0307534") + max_val = float("0.0384153") + mean = float("-0.00045859") + std = float("0.00377622") + data = None + + +class Program_weight_tensor_parameter_474: + name = "parameter_474" + shape = [192] + dtype = "float32" + min_val = float("-1.75744") + max_val = float("0.470024") + mean = float("-0.262025") + std = float("0.336099") + data = None + + +class Program_weight_tensor_parameter_475: + name = "parameter_475" + shape = [192] + dtype = "float32" + min_val = float("0.405457") + max_val = float("1.97843") + mean = float("1.06603") + std = float("0.334153") + data = None + + +class Program_weight_tensor_parameter_476: + name = "parameter_476" + shape = [192] + dtype = "float32" + min_val = float("0.00267969") + max_val = float("0.0141796") + mean = float("0.00700109") + std = float("0.00190313") + data = None + + +class Program_weight_tensor_parameter_477: + name = "parameter_477" + shape = [192] + dtype = "float32" + min_val = float("-0.0881741") + max_val = float("0.111433") + mean = float("0.0401956") + std = float("0.0325546") + data = None + + +class Program_weight_tensor_parameter_478: + name = "parameter_478" + shape = [192, 192, 3, 3] + dtype = "float32" + min_val = float("-0.0336081") + max_val = float("0.0420323") + mean = float("-0.000205836") + std = float("0.00368544") + data = None + + +class Program_weight_tensor_parameter_479: + name = "parameter_479" + shape = [192] + dtype = "float32" + min_val = float("-2.49703") + max_val = float("0.138789") + mean = float("-1.24309") + std = float("0.424468") + data = None + + +class Program_weight_tensor_parameter_480: + name = "parameter_480" + shape = [192] + dtype = "float32" + min_val = float("0.652493") + max_val = float("1.80896") + mean = float("1.16711") + std = float("0.165463") + data = None + + +class Program_weight_tensor_parameter_481: + name = "parameter_481" + shape = [192] + dtype = "float32" + min_val = float("0.0304637") + max_val = float("0.147553") + mean = float("0.067116") + std = float("0.0164386") + data = None + + +class Program_weight_tensor_parameter_482: + name = "parameter_482" + shape = [192] + dtype = "float32" + min_val = float("-1.70097") + max_val = float("0.305559") + mean = float("-0.0850748") + std = float("0.199213") + data = None + + +class Program_weight_tensor_parameter_483: + name = "parameter_483" + shape = [192, 192, 3, 3] + dtype = "float32" + min_val = float("-0.0472912") + max_val = float("0.0583976") + mean = float("-0.000284769") + std = float("0.00417002") + data = None + + +class Program_weight_tensor_parameter_484: + name = "parameter_484" + shape = [192] + dtype = "float32" + min_val = float("-2.07915") + max_val = float("0.533836") + mean = float("-0.272165") + std = float("0.375339") + data = None + + +class Program_weight_tensor_parameter_485: + name = "parameter_485" + shape = [192] + dtype = "float32" + min_val = float("0.000522804") + max_val = float("0.732366") + mean = float("0.21194") + std = float("0.136205") + data = None + + +class Program_weight_tensor_parameter_486: + name = "parameter_486" + shape = [192] + dtype = "float32" + min_val = float("5.96543e-08") + max_val = float("0.000937142") + mean = float("0.000261376") + std = float("0.000147877") + data = None + + +class Program_weight_tensor_parameter_487: + name = "parameter_487" + shape = [192] + dtype = "float32" + min_val = float("-0.0266706") + max_val = float("0.0357546") + mean = float("0.00698739") + std = float("0.0098736") + data = None + + +class Program_weight_tensor_parameter_488: + name = "parameter_488" + shape = [192, 192, 1, 1] + dtype = "float32" + min_val = float("-0.0207564") + max_val = float("0.0335475") + mean = float("-0.000292443") + std = float("0.00332227") + data = None + + +class Program_weight_tensor_parameter_489: + name = "parameter_489" + shape = [192] + dtype = "float32" + min_val = float("-2.07924") + max_val = float("0.535791") + mean = float("-0.271976") + std = float("0.375569") + data = None + + +class Program_weight_tensor_parameter_490: + name = "parameter_490" + shape = [192] + dtype = "float32" + min_val = float("0.395086") + max_val = float("1.96267") + mean = float("0.959008") + std = float("0.303814") + data = None + + +class Program_weight_tensor_parameter_491: + name = "parameter_491" + shape = [192] + dtype = "float32" + min_val = float("0.00302737") + max_val = float("0.0157952") + mean = float("0.00707016") + std = float("0.00211662") + data = None + + +class Program_weight_tensor_parameter_492: + name = "parameter_492" + shape = [192] + dtype = "float32" + min_val = float("-0.0788482") + max_val = float("0.119233") + mean = float("0.0430225") + std = float("0.0339838") + data = None + + +class Program_weight_tensor_parameter_493: + name = "parameter_493" + shape = [192, 192, 3, 3] + dtype = "float32" + min_val = float("-0.0340016") + max_val = float("0.0403474") + mean = float("-0.000216247") + std = float("0.00380285") + data = None + + +class Program_weight_tensor_parameter_494: + name = "parameter_494" + shape = [192] + dtype = "float32" + min_val = float("-2.74084") + max_val = float("-0.0805818") + mean = float("-1.23662") + std = float("0.434286") + data = None + + +class Program_weight_tensor_parameter_495: + name = "parameter_495" + shape = [192] + dtype = "float32" + min_val = float("0.761952") + max_val = float("1.62053") + mean = float("1.15094") + std = float("0.142444") + data = None + + +class Program_weight_tensor_parameter_496: + name = "parameter_496" + shape = [192] + dtype = "float32" + min_val = float("0.0278922") + max_val = float("0.0817439") + mean = float("0.0488411") + std = float("0.0102139") + data = None + + +class Program_weight_tensor_parameter_497: + name = "parameter_497" + shape = [192] + dtype = "float32" + min_val = float("-1.39522") + max_val = float("0.291819") + mean = float("-0.0734705") + std = float("0.166804") + data = None + + +class Program_weight_tensor_parameter_498: + name = "parameter_498" + shape = [192, 192, 3, 3] + dtype = "float32" + min_val = float("-0.0589398") + max_val = float("0.0606418") + mean = float("-0.000300541") + std = float("0.00415388") + data = None + + +class Program_weight_tensor_parameter_499: + name = "parameter_499" + shape = [192] + dtype = "float32" + min_val = float("-1.212") + max_val = float("0.447452") + mean = float("-0.232044") + std = float("0.339385") + data = None + + +class Program_weight_tensor_parameter_500: + name = "parameter_500" + shape = [192] + dtype = "float32" + min_val = float("-9.43381e-05") + max_val = float("0.678118") + mean = float("0.192025") + std = float("0.120758") + data = None + + +class Program_weight_tensor_parameter_501: + name = "parameter_501" + shape = [192] + dtype = "float32" + min_val = float("2.50564e-10") + max_val = float("0.000967586") + mean = float("0.000259696") + std = float("0.000158071") + data = None + + +class Program_weight_tensor_parameter_502: + name = "parameter_502" + shape = [192] + dtype = "float32" + min_val = float("-0.0445459") + max_val = float("0.0432653") + mean = float("0.00753396") + std = float("0.0124482") + data = None + + +class Program_weight_tensor_parameter_503: + name = "parameter_503" + shape = [192, 192, 1, 1] + dtype = "float32" + min_val = float("-0.0374404") + max_val = float("0.0395949") + mean = float("-0.000292615") + std = float("0.00342625") + data = None + + +class Program_weight_tensor_parameter_504: + name = "parameter_504" + shape = [192] + dtype = "float32" + min_val = float("-1.21197") + max_val = float("0.448806") + mean = float("-0.231853") + std = float("0.339659") + data = None + + +class Program_weight_tensor_parameter_505: + name = "parameter_505" + shape = [192] + dtype = "float32" + min_val = float("0.382853") + max_val = float("1.56358") + mean = float("0.852209") + std = float("0.259926") + data = None + + +class Program_weight_tensor_parameter_506: + name = "parameter_506" + shape = [192] + dtype = "float32" + min_val = float("0.00286492") + max_val = float("0.0140629") + mean = float("0.00682827") + std = float("0.00187941") + data = None + + +class Program_weight_tensor_parameter_507: + name = "parameter_507" + shape = [192] + dtype = "float32" + min_val = float("-0.0776134") + max_val = float("0.150128") + mean = float("0.0470268") + std = float("0.037034") + data = None + + +class Program_weight_tensor_parameter_508: + name = "parameter_508" + shape = [192, 192, 3, 3] + dtype = "float32" + min_val = float("-0.0368355") + max_val = float("0.0400254") + mean = float("-0.000211959") + std = float("0.00380574") + data = None + + +class Program_weight_tensor_parameter_509: + name = "parameter_509" + shape = [192] + dtype = "float32" + min_val = float("-2.48699") + max_val = float("-0.132487") + mean = float("-1.2498") + std = float("0.418473") + data = None + + +class Program_weight_tensor_parameter_510: + name = "parameter_510" + shape = [192] + dtype = "float32" + min_val = float("0.689021") + max_val = float("1.51961") + mean = float("1.12491") + std = float("0.134826") + data = None + + +class Program_weight_tensor_parameter_511: + name = "parameter_511" + shape = [192] + dtype = "float32" + min_val = float("0.0195954") + max_val = float("0.0657275") + mean = float("0.0354674") + std = float("0.00852691") + data = None + + +class Program_weight_tensor_parameter_512: + name = "parameter_512" + shape = [192] + dtype = "float32" + min_val = float("-0.841336") + max_val = float("0.288596") + mean = float("-0.0804588") + std = float("0.135459") + data = None + + +class Program_weight_tensor_parameter_513: + name = "parameter_513" + shape = [192, 192, 3, 3] + dtype = "float32" + min_val = float("-0.0647608") + max_val = float("0.0671244") + mean = float("-0.000301379") + std = float("0.00415559") + data = None + + +class Program_weight_tensor_parameter_514: + name = "parameter_514" + shape = [192] + dtype = "float32" + min_val = float("-1.21773") + max_val = float("0.49966") + mean = float("-0.167333") + std = float("0.293611") + data = None + + +class Program_weight_tensor_parameter_515: + name = "parameter_515" + shape = [192] + dtype = "float32" + min_val = float("0.00864435") + max_val = float("1.53701") + mean = float("0.238131") + std = float("0.21185") + data = None + + +class Program_weight_tensor_parameter_516: + name = "parameter_516" + shape = [192] + dtype = "float32" + min_val = float("2.28062e-05") + max_val = float("0.00680281") + mean = float("0.00052519") + std = float("0.000661192") + data = None + + +class Program_weight_tensor_parameter_517: + name = "parameter_517" + shape = [192] + dtype = "float32" + min_val = float("-0.0690564") + max_val = float("0.101614") + mean = float("0.0104971") + std = float("0.0186282") + data = None + + +class Program_weight_tensor_parameter_518: + name = "parameter_518" + shape = [192, 192, 1, 1] + dtype = "float32" + min_val = float("-0.0626678") + max_val = float("0.0382933") + mean = float("-0.000453582") + std = float("0.00413962") + data = None + + +class Program_weight_tensor_parameter_519: + name = "parameter_519" + shape = [192] + dtype = "float32" + min_val = float("-1.21774") + max_val = float("0.50078") + mean = float("-0.167049") + std = float("0.293829") + data = None + + +class Program_weight_tensor_parameter_520: + name = "parameter_520" + shape = [192] + dtype = "float32" + min_val = float("0.353208") + max_val = float("1.45018") + mean = float("0.756982") + std = float("0.216639") + data = None + + +class Program_weight_tensor_parameter_521: + name = "parameter_521" + shape = [192] + dtype = "float32" + min_val = float("0.00474286") + max_val = float("0.0202689") + mean = float("0.00952365") + std = float("0.00262046") + data = None + + +class Program_weight_tensor_parameter_522: + name = "parameter_522" + shape = [192] + dtype = "float32" + min_val = float("-0.102625") + max_val = float("0.150149") + mean = float("0.0567521") + std = float("0.0496463") + data = None + + +class Program_weight_tensor_parameter_523: + name = "parameter_523" + shape = [192, 192, 3, 3] + dtype = "float32" + min_val = float("-0.0712483") + max_val = float("0.0533123") + mean = float("-0.000260747") + std = float("0.00375359") + data = None + + +class Program_weight_tensor_parameter_524: + name = "parameter_524" + shape = [192] + dtype = "float32" + min_val = float("-1.87984") + max_val = float("-0.210289") + mean = float("-1.14605") + std = float("0.325945") + data = None + + +class Program_weight_tensor_parameter_525: + name = "parameter_525" + shape = [192] + dtype = "float32" + min_val = float("0.790161") + max_val = float("1.59635") + mean = float("1.12149") + std = float("0.129857") + data = None + + +class Program_weight_tensor_parameter_526: + name = "parameter_526" + shape = [192] + dtype = "float32" + min_val = float("0.0175701") + max_val = float("0.0649204") + mean = float("0.0311456") + std = float("0.00871316") + data = None + + +class Program_weight_tensor_parameter_527: + name = "parameter_527" + shape = [192] + dtype = "float32" + min_val = float("-0.857673") + max_val = float("0.269081") + mean = float("-0.0673534") + std = float("0.134064") + data = None + + +class Program_weight_tensor_parameter_528: + name = "parameter_528" + shape = [192, 192, 3, 3] + dtype = "float32" + min_val = float("-0.0680887") + max_val = float("0.0796042") + mean = float("-0.000244907") + std = float("0.0040245") + data = None + + +class Program_weight_tensor_parameter_529: + name = "parameter_529" + shape = [192] + dtype = "float32" + min_val = float("-2.86208") + max_val = float("1.58104") + mean = float("-0.027572") + std = float("0.747892") + data = None + + +class Program_weight_tensor_parameter_530: + name = "parameter_530" + shape = [192] + dtype = "float32" + min_val = float("0.490153") + max_val = float("2.07789") + mean = float("0.900423") + std = float("0.231981") + data = None + + +class Program_weight_tensor_parameter_531: + name = "parameter_531" + shape = [192] + dtype = "float32" + min_val = float("0.0121565") + max_val = float("0.0723228") + mean = float("0.0255903") + std = float("0.0100067") + data = None + + +class Program_weight_tensor_parameter_532: + name = "parameter_532" + shape = [192] + dtype = "float32" + min_val = float("-0.232643") + max_val = float("0.322942") + mean = float("-0.0434104") + std = float("0.0608082") + data = None + + +class Program_weight_tensor_parameter_533: + name = "parameter_533" + shape = [192, 384, 1, 1] + dtype = "float32" + min_val = float("-0.112904") + max_val = float("0.101906") + mean = float("-0.000605477") + std = float("0.00869645") + data = None + + +class Program_weight_tensor_parameter_534: + name = "parameter_534" + shape = [192] + dtype = "float32" + min_val = float("-2.96795") + max_val = float("1.66848") + mean = float("0.0967615") + std = float("0.663297") + data = None + + +class Program_weight_tensor_parameter_535: + name = "parameter_535" + shape = [192] + dtype = "float32" + min_val = float("0.830405") + max_val = float("5.55794") + mean = float("1.91324") + std = float("0.933276") + data = None + + +class Program_weight_tensor_parameter_536: + name = "parameter_536" + shape = [192] + dtype = "float32" + min_val = float("0.00635322") + max_val = float("0.0445876") + mean = float("0.0175605") + std = float("0.00556344") + data = None + + +class Program_weight_tensor_parameter_537: + name = "parameter_537" + shape = [192] + dtype = "float32" + min_val = float("-0.144806") + max_val = float("0.154975") + mean = float("-0.022062") + std = float("0.0559356") + data = None + + +class Program_weight_tensor_parameter_538: + name = "parameter_538" + shape = [192, 384, 1, 1] + dtype = "float32" + min_val = float("-0.100414") + max_val = float("0.0965722") + mean = float("-0.000481739") + std = float("0.00788359") + data = None + + +class Program_weight_tensor_parameter_539: + name = "parameter_539" + shape = [384] + dtype = "float32" + min_val = float("-2.9234") + max_val = float("1.32689") + mean = float("-0.300856") + std = float("0.563737") + data = None + + +class Program_weight_tensor_parameter_540: + name = "parameter_540" + shape = [384] + dtype = "float32" + min_val = float("0.633896") + max_val = float("2.47246") + mean = float("1.15988") + std = float("0.257349") + data = None + + +class Program_weight_tensor_parameter_541: + name = "parameter_541" + shape = [384] + dtype = "float32" + min_val = float("0.0117343") + max_val = float("0.113588") + mean = float("0.0270697") + std = float("0.0131501") + data = None + + +class Program_weight_tensor_parameter_542: + name = "parameter_542" + shape = [384] + dtype = "float32" + min_val = float("-0.269172") + max_val = float("0.242303") + mean = float("0.0298657") + std = float("0.0746447") + data = None + + +class Program_weight_tensor_parameter_543: + name = "parameter_543" + shape = [384, 256, 3, 3] + dtype = "float32" + min_val = float("-0.0777711") + max_val = float("0.0733026") + mean = float("-9.30129e-05") + std = float("0.00423326") + data = None + + +class Program_weight_tensor_parameter_544: + name = "parameter_544" + shape = [256] + dtype = "float32" + min_val = float("-2.04675") + max_val = float("1.2869") + mean = float("-0.92413") + std = float("0.542635") + data = None + + +class Program_weight_tensor_parameter_545: + name = "parameter_545" + shape = [256] + dtype = "float32" + min_val = float("0.509654") + max_val = float("1.69024") + mean = float("1.05364") + std = float("0.177449") + data = None + + +class Program_weight_tensor_parameter_546: + name = "parameter_546" + shape = [256] + dtype = "float32" + min_val = float("0.00164958") + max_val = float("0.0205898") + mean = float("0.00554979") + std = float("0.00243516") + data = None + + +class Program_weight_tensor_parameter_547: + name = "parameter_547" + shape = [256] + dtype = "float32" + min_val = float("-0.248048") + max_val = float("0.18055") + mean = float("-0.0481355") + std = float("0.0642407") + data = None + + +class Program_weight_tensor_parameter_548: + name = "parameter_548" + shape = [256, 192, 1, 1] + dtype = "float32" + min_val = float("-0.211445") + max_val = float("0.154025") + mean = float("-0.00090718") + std = float("0.0139364") + data = None + + +class Program_weight_tensor_parameter_549: + name = "parameter_549" + shape = [192] + dtype = "float32" + min_val = float("-0.0146056") + max_val = float("0.00252242") + mean = float("-0.00513018") + std = float("0.00389486") + data = None + + +class Program_weight_tensor_parameter_550: + name = "parameter_550" + shape = [192, 192, 1, 1] + dtype = "float32" + min_val = float("-0.340895") + max_val = float("0.243469") + mean = float("-0.00395929") + std = float("0.0107136") + data = None + + +class Program_weight_tensor_parameter_551: + name = "parameter_551" + shape = [96] + dtype = "float32" + min_val = float("-1.9141") + max_val = float("0.53448") + mean = float("-0.208812") + std = float("0.434585") + data = None + + +class Program_weight_tensor_parameter_552: + name = "parameter_552" + shape = [96] + dtype = "float32" + min_val = float("0.139627") + max_val = float("3.23019") + mean = float("0.63562") + std = float("0.668608") + data = None + + +class Program_weight_tensor_parameter_553: + name = "parameter_553" + shape = [96] + dtype = "float32" + min_val = float("9.44925e-05") + max_val = float("0.00259545") + mean = float("0.000627228") + std = float("0.0004663") + data = None + + +class Program_weight_tensor_parameter_554: + name = "parameter_554" + shape = [96] + dtype = "float32" + min_val = float("-0.0508301") + max_val = float("0.0646139") + mean = float("0.00729974") + std = float("0.022731") + data = None + + +class Program_weight_tensor_parameter_555: + name = "parameter_555" + shape = [96, 96, 1, 1] + dtype = "float32" + min_val = float("-0.0529209") + max_val = float("0.0938109") + mean = float("-0.00068654") + std = float("0.00780134") + data = None + + +class Program_weight_tensor_parameter_556: + name = "parameter_556" + shape = [96] + dtype = "float32" + min_val = float("-1.91385") + max_val = float("0.535947") + mean = float("-0.208472") + std = float("0.434758") + data = None + + +class Program_weight_tensor_parameter_557: + name = "parameter_557" + shape = [96] + dtype = "float32" + min_val = float("0.343945") + max_val = float("5.46861") + mean = float("1.08565") + std = float("0.883653") + data = None + + +class Program_weight_tensor_parameter_558: + name = "parameter_558" + shape = [96] + dtype = "float32" + min_val = float("0.000831351") + max_val = float("0.0140034") + mean = float("0.00502365") + std = float("0.0025539") + data = None + + +class Program_weight_tensor_parameter_559: + name = "parameter_559" + shape = [96] + dtype = "float32" + min_val = float("-0.135563") + max_val = float("0.206685") + mean = float("0.0107306") + std = float("0.0611442") + data = None + + +class Program_weight_tensor_parameter_560: + name = "parameter_560" + shape = [96, 96, 3, 3] + dtype = "float32" + min_val = float("-0.0417476") + max_val = float("0.0707409") + mean = float("-0.000200496") + std = float("0.00586268") + data = None + + +class Program_weight_tensor_parameter_561: + name = "parameter_561" + shape = [96] + dtype = "float32" + min_val = float("-2.46669") + max_val = float("-0.0188941") + mean = float("-1.22596") + std = float("0.444206") + data = None + + +class Program_weight_tensor_parameter_562: + name = "parameter_562" + shape = [96] + dtype = "float32" + min_val = float("0.540095") + max_val = float("1.63859") + mean = float("0.945542") + std = float("0.172479") + data = None + + +class Program_weight_tensor_parameter_563: + name = "parameter_563" + shape = [96] + dtype = "float32" + min_val = float("0.0343629") + max_val = float("0.225267") + mean = float("0.0825205") + std = float("0.0339352") + data = None + + +class Program_weight_tensor_parameter_564: + name = "parameter_564" + shape = [96] + dtype = "float32" + min_val = float("-2.59911") + max_val = float("2.14438") + mean = float("-0.187597") + std = float("0.479") + data = None + + +class Program_weight_tensor_parameter_565: + name = "parameter_565" + shape = [96, 96, 3, 3] + dtype = "float32" + min_val = float("-0.159603") + max_val = float("0.105542") + mean = float("-0.000422661") + std = float("0.00713371") + data = None + + +class Program_weight_tensor_parameter_566: + name = "parameter_566" + shape = [96] + dtype = "float32" + min_val = float("-1.38744") + max_val = float("0.563004") + mean = float("-0.132441") + std = float("0.347447") + data = None + + +class Program_weight_tensor_parameter_567: + name = "parameter_567" + shape = [96] + dtype = "float32" + min_val = float("0.0452771") + max_val = float("1.86502") + mean = float("0.460871") + std = float("0.366358") + data = None + + +class Program_weight_tensor_parameter_568: + name = "parameter_568" + shape = [96] + dtype = "float32" + min_val = float("7.60148e-05") + max_val = float("0.00285319") + mean = float("0.000794421") + std = float("0.000636585") + data = None + + +class Program_weight_tensor_parameter_569: + name = "parameter_569" + shape = [96] + dtype = "float32" + min_val = float("-0.0497884") + max_val = float("0.0479867") + mean = float("0.00766729") + std = float("0.0176144") + data = None + + +class Program_weight_tensor_parameter_570: + name = "parameter_570" + shape = [96, 96, 1, 1] + dtype = "float32" + min_val = float("-0.0484855") + max_val = float("0.0469527") + mean = float("-0.000557248") + std = float("0.00696514") + data = None + + +class Program_weight_tensor_parameter_571: + name = "parameter_571" + shape = [96] + dtype = "float32" + min_val = float("-1.38716") + max_val = float("0.565575") + mean = float("-0.131901") + std = float("0.347951") + data = None + + +class Program_weight_tensor_parameter_572: + name = "parameter_572" + shape = [96] + dtype = "float32" + min_val = float("0.373276") + max_val = float("2.32827") + mean = float("0.902354") + std = float("0.426303") + data = None + + +class Program_weight_tensor_parameter_573: + name = "parameter_573" + shape = [96] + dtype = "float32" + min_val = float("0.00302635") + max_val = float("0.0233198") + mean = float("0.00879849") + std = float("0.00436968") + data = None + + +class Program_weight_tensor_parameter_574: + name = "parameter_574" + shape = [96] + dtype = "float32" + min_val = float("-0.106151") + max_val = float("0.119838") + mean = float("0.0358036") + std = float("0.043231") + data = None + + +class Program_weight_tensor_parameter_575: + name = "parameter_575" + shape = [96, 96, 3, 3] + dtype = "float32" + min_val = float("-0.0601192") + max_val = float("0.0479345") + mean = float("-0.000334461") + std = float("0.00588243") + data = None + + +class Program_weight_tensor_parameter_576: + name = "parameter_576" + shape = [96] + dtype = "float32" + min_val = float("-3.32059") + max_val = float("0.366033") + mean = float("-1.1777") + std = float("0.556588") + data = None + + +class Program_weight_tensor_parameter_577: + name = "parameter_577" + shape = [96] + dtype = "float32" + min_val = float("0.470758") + max_val = float("1.9813") + mean = float("1.03925") + std = float("0.238611") + data = None + + +class Program_weight_tensor_parameter_578: + name = "parameter_578" + shape = [96] + dtype = "float32" + min_val = float("0.0279788") + max_val = float("0.183449") + mean = float("0.0506417") + std = float("0.0183646") + data = None + + +class Program_weight_tensor_parameter_579: + name = "parameter_579" + shape = [96] + dtype = "float32" + min_val = float("-1.05837") + max_val = float("0.786092") + mean = float("-0.0424528") + std = float("0.278771") + data = None + + +class Program_weight_tensor_parameter_580: + name = "parameter_580" + shape = [96, 96, 3, 3] + dtype = "float32" + min_val = float("-0.152735") + max_val = float("0.158912") + mean = float("-0.000426001") + std = float("0.00705743") + data = None + + +class Program_weight_tensor_parameter_581: + name = "parameter_581" + shape = [96] + dtype = "float32" + min_val = float("-1.24949") + max_val = float("0.583942") + mean = float("-0.109112") + std = float("0.292117") + data = None + + +class Program_weight_tensor_parameter_582: + name = "parameter_582" + shape = [96] + dtype = "float32" + min_val = float("0.0224878") + max_val = float("1.27796") + mean = float("0.324443") + std = float("0.192946") + data = None + + +class Program_weight_tensor_parameter_583: + name = "parameter_583" + shape = [96] + dtype = "float32" + min_val = float("2.50107e-05") + max_val = float("0.00308123") + mean = float("0.000650214") + std = float("0.000486492") + data = None + + +class Program_weight_tensor_parameter_584: + name = "parameter_584" + shape = [96] + dtype = "float32" + min_val = float("-0.0398841") + max_val = float("0.0533346") + mean = float("0.00424068") + std = float("0.0172095") + data = None + + +class Program_weight_tensor_parameter_585: + name = "parameter_585" + shape = [96, 96, 1, 1] + dtype = "float32" + min_val = float("-0.0406747") + max_val = float("0.0494878") + mean = float("-0.000325615") + std = float("0.0071059") + data = None + + +class Program_weight_tensor_parameter_586: + name = "parameter_586" + shape = [96] + dtype = "float32" + min_val = float("-1.24929") + max_val = float("0.586311") + mean = float("-0.108658") + std = float("0.29268") + data = None + + +class Program_weight_tensor_parameter_587: + name = "parameter_587" + shape = [96] + dtype = "float32" + min_val = float("0.311326") + max_val = float("1.67043") + mean = float("0.747441") + std = float("0.257878") + data = None + + +class Program_weight_tensor_parameter_588: + name = "parameter_588" + shape = [96] + dtype = "float32" + min_val = float("0.00299069") + max_val = float("0.0188881") + mean = float("0.00858598") + std = float("0.00338781") + data = None + + +class Program_weight_tensor_parameter_589: + name = "parameter_589" + shape = [96] + dtype = "float32" + min_val = float("-0.104806") + max_val = float("0.146672") + mean = float("0.0293301") + std = float("0.0382013") + data = None + + +class Program_weight_tensor_parameter_590: + name = "parameter_590" + shape = [96, 96, 3, 3] + dtype = "float32" + min_val = float("-0.0728298") + max_val = float("0.065903") + mean = float("-0.000300919") + std = float("0.00597289") + data = None + + +class Program_weight_tensor_parameter_591: + name = "parameter_591" + shape = [96] + dtype = "float32" + min_val = float("-3.5826") + max_val = float("0.291706") + mean = float("-1.12744") + std = float("0.572685") + data = None + + +class Program_weight_tensor_parameter_592: + name = "parameter_592" + shape = [96] + dtype = "float32" + min_val = float("0.511064") + max_val = float("2.19222") + mean = float("1.05217") + std = float("0.238287") + data = None + + +class Program_weight_tensor_parameter_593: + name = "parameter_593" + shape = [96] + dtype = "float32" + min_val = float("0.021583") + max_val = float("0.0772463") + mean = float("0.0393307") + std = float("0.00939936") + data = None + + +class Program_weight_tensor_parameter_594: + name = "parameter_594" + shape = [96] + dtype = "float32" + min_val = float("-0.95654") + max_val = float("0.644938") + mean = float("-0.042882") + std = float("0.216242") + data = None + + +class Program_weight_tensor_parameter_595: + name = "parameter_595" + shape = [96, 96, 3, 3] + dtype = "float32" + min_val = float("-0.0984925") + max_val = float("0.137263") + mean = float("-0.000483231") + std = float("0.00714155") + data = None + + +class Program_weight_tensor_parameter_596: + name = "parameter_596" + shape = [96] + dtype = "float32" + min_val = float("-0.891765") + max_val = float("0.530315") + mean = float("-0.160042") + std = float("0.28168") + data = None + + +class Program_weight_tensor_parameter_597: + name = "parameter_597" + shape = [96] + dtype = "float32" + min_val = float("0.0202036") + max_val = float("1.40549") + mean = float("0.324747") + std = float("0.213549") + data = None + + +class Program_weight_tensor_parameter_598: + name = "parameter_598" + shape = [96] + dtype = "float32" + min_val = float("5.2419e-05") + max_val = float("0.00309807") + mean = float("0.00068279") + std = float("0.000470997") + data = None + + +class Program_weight_tensor_parameter_599: + name = "parameter_599" + shape = [96] + dtype = "float32" + min_val = float("-0.0353761") + max_val = float("0.0539706") + mean = float("0.00757239") + std = float("0.0158983") + data = None + + +class Program_weight_tensor_parameter_600: + name = "parameter_600" + shape = [96, 96, 1, 1] + dtype = "float32" + min_val = float("-0.050403") + max_val = float("0.0470333") + mean = float("-0.000602859") + std = float("0.00719125") + data = None + + +class Program_weight_tensor_parameter_601: + name = "parameter_601" + shape = [96] + dtype = "float32" + min_val = float("-0.891522") + max_val = float("0.532005") + mean = float("-0.15962") + std = float("0.282144") + data = None + + +class Program_weight_tensor_parameter_602: + name = "parameter_602" + shape = [96] + dtype = "float32" + min_val = float("0.170998") + max_val = float("1.78064") + mean = float("0.708933") + std = float("0.284476") + data = None + + +class Program_weight_tensor_parameter_603: + name = "parameter_603" + shape = [96] + dtype = "float32" + min_val = float("0.00186209") + max_val = float("0.0242538") + mean = float("0.00887548") + std = float("0.00332774") + data = None + + +class Program_weight_tensor_parameter_604: + name = "parameter_604" + shape = [96] + dtype = "float32" + min_val = float("-0.0317255") + max_val = float("0.148332") + mean = float("0.0439334") + std = float("0.0383947") + data = None + + +class Program_weight_tensor_parameter_605: + name = "parameter_605" + shape = [96, 96, 3, 3] + dtype = "float32" + min_val = float("-0.0673552") + max_val = float("0.0665555") + mean = float("-0.000406403") + std = float("0.00600122") + data = None + + +class Program_weight_tensor_parameter_606: + name = "parameter_606" + shape = [96] + dtype = "float32" + min_val = float("-2.65797") + max_val = float("0.0644665") + mean = float("-1.06329") + std = float("0.488575") + data = None + + +class Program_weight_tensor_parameter_607: + name = "parameter_607" + shape = [96] + dtype = "float32" + min_val = float("0.510122") + max_val = float("1.73722") + mean = float("1.01545") + std = float("0.193669") + data = None + + +class Program_weight_tensor_parameter_608: + name = "parameter_608" + shape = [96] + dtype = "float32" + min_val = float("0.0170441") + max_val = float("0.0592749") + mean = float("0.0303397") + std = float("0.00732367") + data = None + + +class Program_weight_tensor_parameter_609: + name = "parameter_609" + shape = [96] + dtype = "float32" + min_val = float("-0.802591") + max_val = float("0.759118") + mean = float("-0.0649493") + std = float("0.211368") + data = None + + +class Program_weight_tensor_parameter_610: + name = "parameter_610" + shape = [96, 96, 3, 3] + dtype = "float32" + min_val = float("-0.0799583") + max_val = float("0.12863") + mean = float("-0.000463251") + std = float("0.00696947") + data = None + + +class Program_weight_tensor_parameter_611: + name = "parameter_611" + shape = [96] + dtype = "float32" + min_val = float("-0.979363") + max_val = float("0.488329") + mean = float("-0.1357") + std = float("0.278693") + data = None + + +class Program_weight_tensor_parameter_612: + name = "parameter_612" + shape = [96] + dtype = "float32" + min_val = float("0.0499672") + max_val = float("1.15174") + mean = float("0.296075") + std = float("0.172795") + data = None + + +class Program_weight_tensor_parameter_613: + name = "parameter_613" + shape = [96] + dtype = "float32" + min_val = float("0.000124848") + max_val = float("0.00438819") + mean = float("0.00108131") + std = float("0.000696678") + data = None + + +class Program_weight_tensor_parameter_614: + name = "parameter_614" + shape = [96] + dtype = "float32" + min_val = float("-0.0427797") + max_val = float("0.06109") + mean = float("0.00673208") + std = float("0.0190435") + data = None + + +class Program_weight_tensor_parameter_615: + name = "parameter_615" + shape = [96, 96, 1, 1] + dtype = "float32" + min_val = float("-0.0730409") + max_val = float("0.0734237") + mean = float("-0.000668194") + std = float("0.00816827") + data = None + + +class Program_weight_tensor_parameter_616: + name = "parameter_616" + shape = [96] + dtype = "float32" + min_val = float("-0.979598") + max_val = float("0.490087") + mean = float("-0.135308") + std = float("0.279185") + data = None + + +class Program_weight_tensor_parameter_617: + name = "parameter_617" + shape = [96] + dtype = "float32" + min_val = float("0.240111") + max_val = float("1.69891") + mean = float("0.604647") + std = float("0.228294") + data = None + + +class Program_weight_tensor_parameter_618: + name = "parameter_618" + shape = [96] + dtype = "float32" + min_val = float("0.00479228") + max_val = float("0.0493428") + mean = float("0.0126094") + std = float("0.00558079") + data = None + + +class Program_weight_tensor_parameter_619: + name = "parameter_619" + shape = [96] + dtype = "float32" + min_val = float("-0.0884025") + max_val = float("0.162813") + mean = float("0.0330297") + std = float("0.0455641") + data = None + + +class Program_weight_tensor_parameter_620: + name = "parameter_620" + shape = [96, 96, 3, 3] + dtype = "float32" + min_val = float("-0.070586") + max_val = float("0.053917") + mean = float("-0.000353734") + std = float("0.00603503") + data = None + + +class Program_weight_tensor_parameter_621: + name = "parameter_621" + shape = [96] + dtype = "float32" + min_val = float("-3.46749") + max_val = float("0.20134") + mean = float("-1.00429") + std = float("0.548683") + data = None + + +class Program_weight_tensor_parameter_622: + name = "parameter_622" + shape = [96] + dtype = "float32" + min_val = float("0.68469") + max_val = float("2.50521") + mean = float("1.07421") + std = float("0.212064") + data = None + + +class Program_weight_tensor_parameter_623: + name = "parameter_623" + shape = [96] + dtype = "float32" + min_val = float("0.0126502") + max_val = float("0.0593798") + mean = float("0.025404") + std = float("0.00851987") + data = None + + +class Program_weight_tensor_parameter_624: + name = "parameter_624" + shape = [96] + dtype = "float32" + min_val = float("-0.59646") + max_val = float("0.699113") + mean = float("-0.0602622") + std = float("0.200876") + data = None + + +class Program_weight_tensor_parameter_625: + name = "parameter_625" + shape = [96, 96, 3, 3] + dtype = "float32" + min_val = float("-0.0875016") + max_val = float("0.0958638") + mean = float("-0.000393602") + std = float("0.00713622") + data = None + + +class Program_weight_tensor_parameter_626: + name = "parameter_626" + shape = [96] + dtype = "float32" + min_val = float("-0.623249") + max_val = float("0.450355") + mean = float("-0.0811173") + std = float("0.25665") + data = None + + +class Program_weight_tensor_parameter_627: + name = "parameter_627" + shape = [96] + dtype = "float32" + min_val = float("0.0905173") + max_val = float("1.30172") + mean = float("0.309137") + std = float("0.196898") + data = None + + +class Program_weight_tensor_parameter_628: + name = "parameter_628" + shape = [96] + dtype = "float32" + min_val = float("0.000486077") + max_val = float("0.0206445") + mean = float("0.00387906") + std = float("0.00325823") + data = None + + +class Program_weight_tensor_parameter_629: + name = "parameter_629" + shape = [96] + dtype = "float32" + min_val = float("-0.0378971") + max_val = float("0.0272841") + mean = float("0.000360893") + std = float("0.0116936") + data = None + + +class Program_weight_tensor_parameter_630: + name = "parameter_630" + shape = [96, 96, 1, 1] + dtype = "float32" + min_val = float("-0.0967686") + max_val = float("0.0726096") + mean = float("-0.00111676") + std = float("0.00943776") + data = None + + +class Program_weight_tensor_parameter_631: + name = "parameter_631" + shape = [96] + dtype = "float32" + min_val = float("-0.62253") + max_val = float("0.451504") + mean = float("-0.0806935") + std = float("0.256953") + data = None + + +class Program_weight_tensor_parameter_632: + name = "parameter_632" + shape = [96] + dtype = "float32" + min_val = float("0.210918") + max_val = float("1.42997") + mean = float("0.527932") + std = float("0.258611") + data = None + + +class Program_weight_tensor_parameter_633: + name = "parameter_633" + shape = [96] + dtype = "float32" + min_val = float("0.0110923") + max_val = float("0.101379") + mean = float("0.0342554") + std = float("0.0175406") + data = None + + +class Program_weight_tensor_parameter_634: + name = "parameter_634" + shape = [96] + dtype = "float32" + min_val = float("-0.105783") + max_val = float("0.0988172") + mean = float("-0.00552355") + std = float("0.039398") + data = None + + +class Program_weight_tensor_parameter_635: + name = "parameter_635" + shape = [96, 96, 3, 3] + dtype = "float32" + min_val = float("-0.0996365") + max_val = float("0.0540305") + mean = float("-0.00042977") + std = float("0.00592197") + data = None + + +class Program_weight_tensor_parameter_636: + name = "parameter_636" + shape = [96] + dtype = "float32" + min_val = float("-2.4099") + max_val = float("0.510062") + mean = float("-0.827896") + std = float("0.467957") + data = None + + +class Program_weight_tensor_parameter_637: + name = "parameter_637" + shape = [96] + dtype = "float32" + min_val = float("0.855439") + max_val = float("2.18052") + mean = float("1.27541") + std = float("0.20896") + data = None + + +class Program_weight_tensor_parameter_638: + name = "parameter_638" + shape = [96] + dtype = "float32" + min_val = float("0.0104439") + max_val = float("0.0520779") + mean = float("0.0209799") + std = float("0.00859072") + data = None + + +class Program_weight_tensor_parameter_639: + name = "parameter_639" + shape = [96] + dtype = "float32" + min_val = float("-0.780626") + max_val = float("0.470779") + mean = float("-0.0616335") + std = float("0.196544") + data = None + + +class Program_weight_tensor_parameter_640: + name = "parameter_640" + shape = [96, 96, 3, 3] + dtype = "float32" + min_val = float("-0.154701") + max_val = float("0.153806") + mean = float("-0.00026052") + std = float("0.00735431") + data = None + + +class Program_weight_tensor_parameter_641: + name = "parameter_641" + shape = [96] + dtype = "float32" + min_val = float("-3.15956") + max_val = float("1.89061") + mean = float("0.502181") + std = float("0.861277") + data = None + + +class Program_weight_tensor_parameter_642: + name = "parameter_642" + shape = [96] + dtype = "float32" + min_val = float("0.209789") + max_val = float("2.62802") + mean = float("0.557131") + std = float("0.318659") + data = None + + +class Program_weight_tensor_parameter_643: + name = "parameter_643" + shape = [96] + dtype = "float32" + min_val = float("0.00949005") + max_val = float("0.147612") + mean = float("0.0342476") + std = float("0.0235361") + data = None + + +class Program_weight_tensor_parameter_644: + name = "parameter_644" + shape = [96] + dtype = "float32" + min_val = float("-0.272514") + max_val = float("0.303684") + mean = float("-0.0269397") + std = float("0.0869885") + data = None + + +class Program_weight_tensor_parameter_645: + name = "parameter_645" + shape = [96, 192, 1, 1] + dtype = "float32" + min_val = float("-0.190092") + max_val = float("0.235795") + mean = float("-0.00054682") + std = float("0.0152601") + data = None + + +class Program_weight_tensor_parameter_646: + name = "parameter_646" + shape = [96] + dtype = "float32" + min_val = float("-4.92412") + max_val = float("1.57941") + mean = float("0.384226") + std = float("1.04886") + data = None + + +class Program_weight_tensor_parameter_647: + name = "parameter_647" + shape = [96] + dtype = "float32" + min_val = float("0.411425") + max_val = float("6.77791") + mean = float("1.69479") + std = float("1.30749") + data = None + + +class Program_weight_tensor_parameter_648: + name = "parameter_648" + shape = [96] + dtype = "float32" + min_val = float("0.00569395") + max_val = float("0.186568") + mean = float("0.0312372") + std = float("0.0269306") + data = None + + +class Program_weight_tensor_parameter_649: + name = "parameter_649" + shape = [96] + dtype = "float32" + min_val = float("-0.123122") + max_val = float("0.396242") + mean = float("0.0353184") + std = float("0.0935304") + data = None + + +class Program_weight_tensor_parameter_650: + name = "parameter_650" + shape = [96, 192, 1, 1] + dtype = "float32" + min_val = float("-0.115428") + max_val = float("0.143096") + mean = float("0.000288353") + std = float("0.0138526") + data = None + + +class Program_weight_tensor_parameter_651: + name = "parameter_651" + shape = [192] + dtype = "float32" + min_val = float("-2.27512") + max_val = float("1.75006") + mean = float("-0.125702") + std = float("0.740468") + data = None + + +class Program_weight_tensor_parameter_652: + name = "parameter_652" + shape = [192] + dtype = "float32" + min_val = float("0.632726") + max_val = float("2.96908") + mean = float("1.08749") + std = float("0.283555") + data = None + + +class Program_weight_tensor_parameter_653: + name = "parameter_653" + shape = [192] + dtype = "float32" + min_val = float("0.0128887") + max_val = float("0.306476") + mean = float("0.0430534") + std = float("0.0345139") + data = None + + +class Program_weight_tensor_parameter_654: + name = "parameter_654" + shape = [192] + dtype = "float32" + min_val = float("-0.476717") + max_val = float("0.27685") + mean = float("-0.0597992") + std = float("0.114967") + data = None + + +class Program_weight_tensor_parameter_655: + name = "parameter_655" + shape = [192, 128, 3, 3] + dtype = "float32" + min_val = float("-0.0811233") + max_val = float("0.11238") + mean = float("-0.000121273") + std = float("0.00716338") + data = None + + +class Program_weight_tensor_parameter_656: + name = "parameter_656" + shape = [128] + dtype = "float32" + min_val = float("-2.81253") + max_val = float("1.96258") + mean = float("-0.709313") + std = float("0.64886") + data = None + + +class Program_weight_tensor_parameter_657: + name = "parameter_657" + shape = [128] + dtype = "float32" + min_val = float("0.302011") + max_val = float("2.86022") + mean = float("1.01859") + std = float("0.279425") + data = None + + +class Program_weight_tensor_parameter_658: + name = "parameter_658" + shape = [128] + dtype = "float32" + min_val = float("0.000683803") + max_val = float("0.0143901") + mean = float("0.00380984") + std = float("0.00196434") + data = None + + +class Program_weight_tensor_parameter_659: + name = "parameter_659" + shape = [128] + dtype = "float32" + min_val = float("-0.241007") + max_val = float("0.23083") + mean = float("0.00336445") + std = float("0.0801385") + data = None + + +class Program_weight_tensor_parameter_660: + name = "parameter_660" + shape = [128, 96, 1, 1] + dtype = "float32" + min_val = float("-0.16828") + max_val = float("0.191318") + mean = float("-0.00143145") + std = float("0.0216253") + data = None + + +class Program_weight_tensor_parameter_661: + name = "parameter_661" + shape = [96] + dtype = "float32" + min_val = float("-0.0182017") + max_val = float("-0.00100735") + mean = float("-0.00761377") + std = float("0.00459165") + data = None + + +class Program_weight_tensor_parameter_662: + name = "parameter_662" + shape = [96, 96, 1, 1] + dtype = "float32" + min_val = float("-0.297058") + max_val = float("0.124247") + mean = float("-0.00811798") + std = float("0.0180434") + data = None + + +class Program_weight_tensor_parameter_663: + name = "parameter_663" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_664: + name = "parameter_664" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_665: + name = "parameter_665" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_666: + name = "parameter_666" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_667: + name = "parameter_667" + shape = [48, 48, 1, 1] + dtype = "float32" + min_val = float("-0.0524219") + max_val = float("0.062819") + mean = float("-0.00145834") + std = float("0.0124603") + data = None + + +class Program_weight_tensor_parameter_668: + name = "parameter_668" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_669: + name = "parameter_669" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_670: + name = "parameter_670" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_671: + name = "parameter_671" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_672: + name = "parameter_672" + shape = [48, 48, 3, 3] + dtype = "float32" + min_val = float("-0.053396") + max_val = float("0.0780475") + mean = float("-0.000432103") + std = float("0.0105215") + data = None + + +class Program_weight_tensor_parameter_673: + name = "parameter_673" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_674: + name = "parameter_674" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_675: + name = "parameter_675" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_676: + name = "parameter_676" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_677: + name = "parameter_677" + shape = [48, 48, 3, 3] + dtype = "float32" + min_val = float("-0.0907736") + max_val = float("0.0889891") + mean = float("-0.000674195") + std = float("0.0115766") + data = None + + +class Program_weight_tensor_parameter_678: + name = "parameter_678" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_679: + name = "parameter_679" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_680: + name = "parameter_680" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_681: + name = "parameter_681" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_682: + name = "parameter_682" + shape = [48, 48, 1, 1] + dtype = "float32" + min_val = float("-0.0701343") + max_val = float("0.0744403") + mean = float("-0.000969115") + std = float("0.0132523") + data = None + + +class Program_weight_tensor_parameter_683: + name = "parameter_683" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_684: + name = "parameter_684" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_685: + name = "parameter_685" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_686: + name = "parameter_686" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_687: + name = "parameter_687" + shape = [48, 48, 3, 3] + dtype = "float32" + min_val = float("-0.0625249") + max_val = float("0.0628193") + mean = float("-0.000704405") + std = float("0.010522") + data = None + + +class Program_weight_tensor_parameter_688: + name = "parameter_688" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_689: + name = "parameter_689" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_690: + name = "parameter_690" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_691: + name = "parameter_691" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_692: + name = "parameter_692" + shape = [48, 48, 3, 3] + dtype = "float32" + min_val = float("-0.105534") + max_val = float("0.0876318") + mean = float("-0.000291303") + std = float("0.0118198") + data = None + + +class Program_weight_tensor_parameter_693: + name = "parameter_693" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_694: + name = "parameter_694" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_695: + name = "parameter_695" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_696: + name = "parameter_696" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_697: + name = "parameter_697" + shape = [48, 48, 1, 1] + dtype = "float32" + min_val = float("-0.0927544") + max_val = float("0.067179") + mean = float("-0.00167319") + std = float("0.0164656") + data = None + + +class Program_weight_tensor_parameter_698: + name = "parameter_698" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_699: + name = "parameter_699" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_700: + name = "parameter_700" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_701: + name = "parameter_701" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_702: + name = "parameter_702" + shape = [48, 48, 3, 3] + dtype = "float32" + min_val = float("-0.0662936") + max_val = float("0.0926268") + mean = float("-0.000546134") + std = float("0.0110591") + data = None + + +class Program_weight_tensor_parameter_703: + name = "parameter_703" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_704: + name = "parameter_704" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_705: + name = "parameter_705" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_706: + name = "parameter_706" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_707: + name = "parameter_707" + shape = [48, 48, 3, 3] + dtype = "float32" + min_val = float("-0.115861") + max_val = float("0.0843934") + mean = float("-0.000390165") + std = float("0.0126271") + data = None + + +class Program_weight_tensor_parameter_708: + name = "parameter_708" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_709: + name = "parameter_709" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_710: + name = "parameter_710" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_711: + name = "parameter_711" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_712: + name = "parameter_712" + shape = [48, 96, 1, 1] + dtype = "float32" + min_val = float("-0.156722") + max_val = float("0.12438") + mean = float("-0.00240073") + std = float("0.0227151") + data = None + + +class Program_weight_tensor_parameter_713: + name = "parameter_713" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_714: + name = "parameter_714" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_715: + name = "parameter_715" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_716: + name = "parameter_716" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_717: + name = "parameter_717" + shape = [48, 96, 1, 1] + dtype = "float32" + min_val = float("-0.133366") + max_val = float("0.190723") + mean = float("-0.000461332") + std = float("0.0215494") + data = None + + +class Program_weight_tensor_parameter_718: + name = "parameter_718" + shape = [96] + dtype = "float32" + min_val = float("-3.40388") + max_val = float("3.27594") + mean = float("0.331") + std = float("1.14502") + data = None + + +class Program_weight_tensor_parameter_719: + name = "parameter_719" + shape = [96] + dtype = "float32" + min_val = float("0.861639") + max_val = float("4.91749") + mean = float("1.91516") + std = float("0.75496") + data = None + + +class Program_weight_tensor_parameter_720: + name = "parameter_720" + shape = [96] + dtype = "float32" + min_val = float("0.68512") + max_val = float("19.942") + mean = float("2.38283") + std = float("2.38942") + data = None + + +class Program_weight_tensor_parameter_721: + name = "parameter_721" + shape = [96] + dtype = "float32" + min_val = float("-1.44893") + max_val = float("1.82311") + mean = float("-0.333309") + std = float("0.618856") + data = None + + +class Program_weight_tensor_parameter_722: + name = "parameter_722" + shape = [96, 64, 3, 3] + dtype = "float32" + min_val = float("-0.115845") + max_val = float("0.115419") + mean = float("-0.000438744") + std = float("0.0120833") + data = None + + +class Program_weight_tensor_parameter_723: + name = "parameter_723" + shape = [64] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_724: + name = "parameter_724" + shape = [64] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_725: + name = "parameter_725" + shape = [64] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_726: + name = "parameter_726" + shape = [64] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_727: + name = "parameter_727" + shape = [64, 32, 3, 3] + dtype = "float32" + min_val = float("-0.153743") + max_val = float("0.135272") + mean = float("-0.000740633") + std = float("0.0191711") + data = None + + +class Program_weight_tensor_parameter_728: + name = "parameter_728" + shape = [32] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_729: + name = "parameter_729" + shape = [32] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_730: + name = "parameter_730" + shape = [32] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_731: + name = "parameter_731" + shape = [32] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_732: + name = "parameter_732" + shape = [32, 32, 3, 3] + dtype = "float32" + min_val = float("-0.307002") + max_val = float("0.202588") + mean = float("-4.43961e-05") + std = float("0.025069") + data = None + + +class Program_weight_tensor_parameter_733: + name = "parameter_733" + shape = [32] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_734: + name = "parameter_734" + shape = [32] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_735: + name = "parameter_735" + shape = [32] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_736: + name = "parameter_736" + shape = [32] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_737: + name = "parameter_737" + shape = [32, 3, 3, 3] + dtype = "float32" + min_val = float("-0.297631") + max_val = float("0.278985") + mean = float("-0.00146872") + std = float("0.0683342") + data = None diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_12/graph_hash.txt b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_12/graph_hash.txt new file mode 100644 index 000000000..496563ea1 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_12/graph_hash.txt @@ -0,0 +1 @@ +786abe68abca12db6864fd56c948ebc989117954d84f405345ed2db9d4231827 \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_12/graph_net.json b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_12/graph_net.json new file mode 100644 index 000000000..381598f86 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_12/graph_net.json @@ -0,0 +1,6 @@ +{ + "framework": "paddle", + "model_name": "PP-YOLOE_plus_SOD-largesize-L", + "num_devices_required": 1, + "num_nodes_required": 1 +} \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_12/input_meta.py b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_12/input_meta.py new file mode 100644 index 000000000..0bbda7212 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_12/input_meta.py @@ -0,0 +1,102 @@ +class Program_weight_tensor_data_0: + name = "data_0" + shape = [1, 24276] + dtype = "float32" + max_val = float("2.0") + mean = float("0.0175482") + std = float("0.141858") + data = None + + +class Program_weight_tensor_data_1: + name = "data_1" + shape = [1, 38, 24276] + dtype = "float32" + max_val = float("0.982337") + mean = float("0.000792632") + std = float("0.022185") + data = None + + +class Program_weight_tensor_data_2: + name = "data_2" + shape = [1, 38, 24276] + dtype = "float32" + max_val = float("1.0") + mean = float("0.000461795") + std = float("0.0214844") + data = None + + +class Program_weight_tensor_data_3: + name = "data_3" + shape = [1, 1] + dtype = "int32" + data = [0] + + +class Program_weight_tensor_data_4: + name = "data_4" + shape = [1, 38, 1] + dtype = "int32" + data = [ + 3, + 3, + 9, + 1, + 0, + 0, + 0, + 0, + 3, + 3, + 3, + 3, + 3, + 0, + 0, + 0, + 8, + 3, + 3, + 3, + 0, + 0, + 3, + 3, + 3, + 3, + 3, + 5, + 3, + 3, + 3, + 3, + 3, + 0, + 3, + 3, + 0, + 0, + ] + + +class Program_weight_tensor_data_5: + name = "data_5" + shape = [1, 38, 4] + dtype = "float32" + min_val = float("354.773") + max_val = float("1051.0") + mean = float("652.35") + std = float("193.013") + data = None + + +class Program_weight_tensor_data_6: + name = "data_6" + shape = [1, 38, 24276] + dtype = "float32" + max_val = float("0.73484") + mean = float("8.98923e-05") + std = float("0.00618669") + data = None diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_12/model.py b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_12/model.py new file mode 100644 index 000000000..88cd8833b --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_12/model.py @@ -0,0 +1,244 @@ +import paddle + + +class GraphModule(paddle.nn.Layer): + def __init__(self): + super().__init__() + + def forward(self, data_0, data_1, data_2, data_3, data_4, data_5, data_6): + # pd_op.full_int_array: (1xi64) <- () + full_int_array_0 = [1] + + # pd_op.unsqueeze: (1x1x24276xf32) <- (1x24276xf32, 1xi64) + unsqueeze_0 = paddle._C_ops.unsqueeze(data_0, full_int_array_0) + del data_0, full_int_array_0 + + # pd_op.full: (xf32) <- () + full_0 = paddle._C_ops.full( + [], float("1"), paddle.float32, paddle.framework._current_expected_place() + ) + + # pd_op.greater_than: (1x1x24276xb) <- (1x1x24276xf32, xf32) + greater_than_0 = paddle._C_ops.greater_than(unsqueeze_0, full_0) + del full_0, unsqueeze_0 + + # pd_op.full_int_array: (3xi64) <- () + full_int_array_1 = [1, 38, 1] + + # pd_op.tile: (1x38x24276xb) <- (1x1x24276xb, 3xi64) + tile_0 = paddle._C_ops.tile(greater_than_0, full_int_array_1) + del full_int_array_1, greater_than_0 + + # pd_op.multiply: (1x38x24276xf32) <- (1x38x24276xf32, 1x38x24276xf32) + multiply_1 = paddle._C_ops.multiply(data_1, data_2) + + # pd_op.full: (1xi64) <- () + full_1 = paddle._C_ops.full( + [1], float("-2"), paddle.int64, paddle.core.CPUPlace() + ) + + # pd_op.argmax: (1x24276xi64) <- (1x38x24276xf32, 1xi64) + argmax_0 = paddle._C_ops.argmax(multiply_1, full_1, False, False, paddle.int64) + del multiply_1 + + # pd_op.full: (1xi32) <- () + full_2 = paddle._C_ops.full( + [1], float("38"), paddle.int32, paddle.core.CPUPlace() + ) + + # pd_op.one_hot: (1x24276x38xf32) <- (1x24276xi64, 1xi32) + one_hot_0 = paddle._C_ops.one_hot( + argmax_0 % paddle.cast(full_2, argmax_0.dtype), full_2 + ) + del argmax_0, full_2 + + # pd_op.transpose: (1x38x24276xf32) <- (1x24276x38xf32) + transpose_0 = paddle._C_ops.transpose(one_hot_0, [0, 2, 1]) + del one_hot_0 + + # pd_op.where: (1x38x24276xf32) <- (1x38x24276xb, 1x38x24276xf32, 1x38x24276xf32) + where_0 = paddle._C_ops.where(tile_0, transpose_0, data_2) + del data_2, tile_0, transpose_0 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_2 = [-2] + + # pd_op.sum: (1x24276xf32) <- (1x38x24276xf32, 1xi64) + sum_0 = paddle._C_ops.sum(where_0, full_int_array_2, None, False) + + # pd_op.argmax: (1x24276xi64) <- (1x38x24276xf32, 1xi64) + argmax_1 = paddle._C_ops.argmax(where_0, full_1, False, False, paddle.int64) + del full_1 + + # pd_op.full: (1xf32) <- () + full_3 = paddle._C_ops.full( + [1], float("38"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.scale: (1x1xi32) <- (1x1xi32, 1xf32) + scale_0 = paddle._C_ops.scale(data_3, full_3, float("0"), True) + del data_3, full_3 + + # pd_op.cast: (1x1xi64) <- (1x1xi32) + cast_0 = paddle._C_ops.cast(scale_0, paddle.int64) + del scale_0 + + # pd_op.add: (1x24276xi64) <- (1x24276xi64, 1x1xi64) + add_0 = paddle._C_ops.add(argmax_1, cast_0) + del argmax_1, cast_0 + + # pd_op.flatten: (38xi32) <- (1x38x1xi32) + flatten_0 = paddle._C_ops.flatten(data_4, 0, 2) + del data_4 + + # pd_op.flatten: (24276xi64) <- (1x24276xi64) + flatten_1 = paddle._C_ops.flatten(add_0, 0, 1) + del add_0 + + # pd_op.full: (1xi32) <- () + full_4 = paddle._C_ops.full( + [1], float("0"), paddle.int32, paddle.core.CPUPlace() + ) + + # pd_op.gather: (24276xi32) <- (38xi32, 24276xi64, 1xi32) + gather_0 = paddle._C_ops.gather(flatten_0, flatten_1, full_4) + del flatten_0 + + # pd_op.full_int_array: (2xi64) <- () + full_int_array_3 = [1, 24276] + + # pd_op.reshape: (1x24276xi32) <- (24276xi32, 2xi64) + reshape_1 = paddle._C_ops.reshape(gather_0, full_int_array_3) + del full_int_array_3, gather_0 + + # pd_op.full: (xf32) <- () + full_5 = paddle._C_ops.full( + [], float("0"), paddle.float32, paddle.framework._current_expected_place() + ) + + # pd_op.greater_than: (1x24276xb) <- (1x24276xf32, xf32) + greater_than_1 = paddle._C_ops.greater_than(sum_0, full_5) + del full_5, sum_0 + + # pd_op.full: (1xf32) <- () + full_6 = paddle._C_ops.full( + [1], float("10"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.full_like: (1x24276xi32) <- (1x24276xi32, 1xf32) + full_like_0 = paddle._C_ops.full_like( + reshape_1, full_6, paddle.int32, paddle.framework._current_expected_place() + ) + del full_6 + + # pd_op.where: (1x24276xi32) <- (1x24276xb, 1x24276xi32, 1x24276xi32) + where_1 = paddle._C_ops.where(greater_than_1, reshape_1, full_like_0) + del full_like_0, greater_than_1, reshape_1 + + # pd_op.full_int_array: (2xi64) <- () + full_int_array_4 = [-1, 4] + + # pd_op.reshape: (38x4xf32) <- (1x38x4xf32, 2xi64) + reshape_2 = paddle._C_ops.reshape(data_5, full_int_array_4) + del data_5, full_int_array_4 + + # pd_op.gather: (24276x4xf32) <- (38x4xf32, 24276xi64, 1xi32) + gather_1 = paddle._C_ops.gather(reshape_2, flatten_1, full_4) + del flatten_1, full_4, reshape_2 + + # pd_op.full_int_array: (3xi64) <- () + full_int_array_5 = [1, 24276, 4] + + # pd_op.reshape: (1x24276x4xf32) <- (24276x4xf32, 3xi64) + reshape_0 = paddle._C_ops.reshape(gather_1, full_int_array_5) + del full_int_array_5, gather_1 + + # pd_op.full: (1xi32) <- () + full_7 = paddle._C_ops.full( + [1], float("11"), paddle.int32, paddle.core.CPUPlace() + ) + + # pd_op.one_hot: (1x24276x11xf32) <- (1x24276xi32, 1xi32) + one_hot_1 = paddle._C_ops.one_hot( + where_1 % paddle.cast(full_7, where_1.dtype), full_7 + ) + del full_7 + + # pd_op.full: (10xi64) <- () + full_8 = paddle._C_ops.full( + [10], float("0"), paddle.int64, paddle.framework._current_expected_place() + ) + + # pd_op.assign_value_: (10xi64) <- (10xi64) + assign_value__0 = paddle._C_ops.assign_value_( + full_8, + [10], + paddle.int64, + [ + float("0"), + float("1"), + float("2"), + float("3"), + float("4"), + float("5"), + float("6"), + float("7"), + float("8"), + float("9"), + ], + paddle.framework._current_expected_place(), + ) + del full_8 + + # pd_op.index_select: (1x24276x10xf32) <- (1x24276x11xf32, 10xi64) + index_select_0 = paddle._C_ops.index_select(one_hot_1, assign_value__0, -1) + del assign_value__0, one_hot_1 + + # pd_op.multiply: (1x38x24276xf32) <- (1x38x24276xf32, 1x38x24276xf32) + multiply_2 = paddle._C_ops.multiply(data_6, where_0) + del data_6 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_6 = [-1] + + # pd_op.max: (1x38x1xf32) <- (1x38x24276xf32, 1xi64) + max_0 = paddle._C_ops.max(multiply_2, full_int_array_6, True) + + # pd_op.multiply: (1x38x24276xf32) <- (1x38x24276xf32, 1x38x24276xf32) + multiply_3 = paddle._C_ops.multiply(data_1, where_0) + del data_1, where_0 + + # pd_op.max: (1x38x1xf32) <- (1x38x24276xf32, 1xi64) + max_1 = paddle._C_ops.max(multiply_3, full_int_array_6, True) + del multiply_3 + + # pd_op.full: (1xf32) <- () + full_9 = paddle._C_ops.full( + [1], float("1"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.scale: (1x38x1xf32) <- (1x38x1xf32, 1xf32) + scale_1 = paddle._C_ops.scale(max_0, full_9, float("1e-09"), True) + del full_9, max_0 + + # pd_op.divide: (1x38x24276xf32) <- (1x38x24276xf32, 1x38x1xf32) + divide_0 = paddle._C_ops.divide(multiply_2, scale_1) + del multiply_2, scale_1 + + # pd_op.multiply: (1x38x24276xf32) <- (1x38x24276xf32, 1x38x1xf32) + multiply_4 = paddle._C_ops.multiply(divide_0, max_1) + del divide_0, max_1 + + # pd_op.max: (1x24276xf32) <- (1x38x24276xf32, 1xi64) + max_2 = paddle._C_ops.max(multiply_4, full_int_array_2, False) + del full_int_array_2, multiply_4 + + # pd_op.unsqueeze: (1x24276x1xf32) <- (1x24276xf32, 1xi64) + unsqueeze_1 = paddle._C_ops.unsqueeze(max_2, full_int_array_6) + del full_int_array_6, max_2 + + # pd_op.multiply: (1x24276x10xf32) <- (1x24276x10xf32, 1x24276x1xf32) + multiply_0 = paddle._C_ops.multiply(index_select_0, unsqueeze_1) + del index_select_0, unsqueeze_1, where_1 + + return reshape_0, multiply_0 diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_12/weight_meta.py b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_12/weight_meta.py new file mode 100644 index 000000000..8b1378917 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_12/weight_meta.py @@ -0,0 +1 @@ + diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_13/graph_hash.txt b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_13/graph_hash.txt new file mode 100644 index 000000000..51cc26f7d --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_13/graph_hash.txt @@ -0,0 +1 @@ +0291aa8f0123c85e18600babdb8ed46589822734c20e7a75cb5b99f64ad81067 \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_13/graph_net.json b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_13/graph_net.json new file mode 100644 index 000000000..381598f86 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_13/graph_net.json @@ -0,0 +1,6 @@ +{ + "framework": "paddle", + "model_name": "PP-YOLOE_plus_SOD-largesize-L", + "num_devices_required": 1, + "num_nodes_required": 1 +} \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_13/input_meta.py b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_13/input_meta.py new file mode 100644 index 000000000..59f05b8d6 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_13/input_meta.py @@ -0,0 +1,68 @@ +class Program_weight_tensor_data_0: + name = "data_0" + shape = [1, 24276] + dtype = "bool" + min_val = 0 + max_val = 2 + data = None + + +class Program_weight_tensor_data_1: + name = "data_1" + shape = [1, 24276, 4] + dtype = "float32" + min_val = float("-8.51911") + max_val = float("141.539") + mean = float("59.1174") + std = float("39.293") + data = None + + +class Program_weight_tensor_data_2: + name = "data_2" + shape = [1, 24276, 4] + dtype = "float32" + min_val = float("11.9244") + max_val = float("131.375") + mean = float("50.6359") + std = float("16.0055") + data = None + + +class Program_weight_tensor_data_3: + name = "data_3" + shape = [1, 24276, 10] + dtype = "float32" + max_val = float("0.981539") + mean = float("0.000708745") + std = float("0.0222415") + data = None + + +class Program_weight_tensor_data_4: + name = "data_4" + shape = [] + dtype = "float32" + data = [172.055] + + +class Program_weight_tensor_data_5: + name = "data_5" + shape = [1, 24276, 88] + dtype = "float32" + min_val = float("-3.36394") + max_val = float("13.3402") + mean = float("0.828078") + std = float("1.48853") + data = None + + +class Program_weight_tensor_data_6: + name = "data_6" + shape = [24276, 2] + dtype = "float32" + min_val = float("0.5") + max_val = float("135.5") + mean = float("59.0952") + std = float("38.9487") + data = None diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_13/model.py b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_13/model.py new file mode 100644 index 000000000..bb7813590 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_13/model.py @@ -0,0 +1,514 @@ +import paddle + + +class GraphModule(paddle.nn.Layer): + def __init__(self): + super().__init__() + + def forward(self, data_0, data_1, data_2, data_3, data_4, data_5, data_6): + # pd_op.cast: (1x24276xi32) <- (1x24276xb) + cast_0 = paddle._C_ops.cast(data_0, paddle.int32) + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_0 = [-1] + + # pd_op.assign: (1xi64) <- (1xi64) + assign_0 = full_int_array_0 + + # pd_op.assign: (1xi64) <- (1xi64) + assign_1 = full_int_array_0 + + # pd_op.assign: (1xi64) <- (1xi64) + assign_2 = full_int_array_0 + + # pd_op.unsqueeze: (1x24276x1xi32) <- (1x24276xi32, 1xi64) + unsqueeze_0 = paddle._C_ops.unsqueeze(cast_0, full_int_array_0) + del cast_0 + + # pd_op.full_int_array: (3xi64) <- () + full_int_array_1 = [1, 1, 4] + + # pd_op.tile: (1x24276x4xi32) <- (1x24276x1xi32, 3xi64) + tile_0 = paddle._C_ops.tile(unsqueeze_0, full_int_array_1) + del full_int_array_1, unsqueeze_0 + + # pd_op.cast: (1x24276x4xb) <- (1x24276x4xi32) + cast_1 = paddle._C_ops.cast(tile_0, paddle.bool) + del tile_0 + + # pd_op.masked_select: (-1xf32) <- (1x24276x4xf32, 1x24276x4xb) + masked_select_0 = paddle._C_ops.masked_select(data_1, cast_1) + del data_1 + + # pd_op.full_int_array: (2xi64) <- () + full_int_array_2 = [-1, 4] + + # pd_op.reshape: (-1x4xf32) <- (-1xf32, 2xi64) + reshape_0 = paddle._C_ops.reshape(masked_select_0, full_int_array_2) + + # pd_op.masked_select: (-1xf32) <- (1x24276x4xf32, 1x24276x4xb) + masked_select_1 = paddle._C_ops.masked_select(data_2, cast_1) + + # pd_op.reshape: (-1x4xf32) <- (-1xf32, 2xi64) + reshape_1 = paddle._C_ops.reshape(masked_select_1, full_int_array_2) + del masked_select_1 + + # pd_op.sum: (1x24276xf32) <- (1x24276x10xf32, 1xi64) + sum_0 = paddle._C_ops.sum(data_3, full_int_array_0, None, False) + del data_3 + + # pd_op.masked_select: (-1xf32) <- (1x24276xf32, 1x24276xb) + masked_select_2 = paddle._C_ops.masked_select(sum_0, data_0) + del sum_0 + + # pd_op.unsqueeze: (-1x1xf32) <- (-1xf32, 1xi64) + unsqueeze_1 = paddle._C_ops.unsqueeze(masked_select_2, full_int_array_0) + del masked_select_2 + + # pd_op.subtract: (-1x4xf32) <- (-1x4xf32, -1x4xf32) + subtract_0 = paddle._C_ops.subtract(reshape_0, reshape_1) + + # pd_op.abs: (-1x4xf32) <- (-1x4xf32) + abs_0 = paddle._C_ops.abs(subtract_0) + + # pd_op.mean_all: (xf32) <- (-1x4xf32) + mean_all_0 = paddle._C_ops.mean_all(abs_0) + + # pd_op.full: (1xi32) <- () + full_0 = paddle._C_ops.full( + [1], float("1"), paddle.int32, paddle.core.CPUPlace() + ) + + # pd_op.split_with_num: ([-1x1xf32, -1x1xf32, -1x1xf32, -1x1xf32]) <- (-1x4xf32, 1xi32) + split_with_num_0 = paddle._C_ops.split_with_num(reshape_0, 4, full_0) + + # builtin.split: (-1x1xf32, -1x1xf32, -1x1xf32, -1x1xf32) <- ([-1x1xf32, -1x1xf32, -1x1xf32, -1x1xf32]) + ( + split_0, + split_1, + split_2, + split_3, + ) = split_with_num_0 + del split_with_num_0 + + # pd_op.split_with_num: ([-1x1xf32, -1x1xf32, -1x1xf32, -1x1xf32]) <- (-1x4xf32, 1xi32) + split_with_num_1 = paddle._C_ops.split_with_num(reshape_1, 4, full_0) + + # builtin.split: (-1x1xf32, -1x1xf32, -1x1xf32, -1x1xf32) <- ([-1x1xf32, -1x1xf32, -1x1xf32, -1x1xf32]) + ( + split_4, + split_5, + split_6, + split_7, + ) = split_with_num_1 + del split_with_num_1 + + # pd_op.maximum: (-1x1xf32) <- (-1x1xf32, -1x1xf32) + maximum_0 = paddle._C_ops.maximum(split_0, split_4) + + # pd_op.maximum: (-1x1xf32) <- (-1x1xf32, -1x1xf32) + maximum_1 = paddle._C_ops.maximum(split_1, split_5) + + # pd_op.minimum: (-1x1xf32) <- (-1x1xf32, -1x1xf32) + minimum_0 = paddle._C_ops.minimum(split_2, split_6) + + # pd_op.minimum: (-1x1xf32) <- (-1x1xf32, -1x1xf32) + minimum_1 = paddle._C_ops.minimum(split_3, split_7) + + # pd_op.subtract: (-1x1xf32) <- (-1x1xf32, -1x1xf32) + subtract_1 = paddle._C_ops.subtract(minimum_0, maximum_0) + + # pd_op.full: (1xf32) <- () + full_1 = paddle._C_ops.full( + [1], float("0"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.assign: (1xf32) <- (1xf32) + assign_3 = full_1 + + # pd_op.full: (1xf32) <- () + full_2 = paddle._C_ops.full( + [1], float("3.40282e+38"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.assign: (1xf32) <- (1xf32) + assign_4 = full_2 + + # pd_op.clip: (-1x1xf32) <- (-1x1xf32, 1xf32, 1xf32) + clip_0 = paddle._C_ops.clip(subtract_1, full_1, full_2) + + # pd_op.subtract: (-1x1xf32) <- (-1x1xf32, -1x1xf32) + subtract_2 = paddle._C_ops.subtract(minimum_1, maximum_1) + + # pd_op.clip: (-1x1xf32) <- (-1x1xf32, 1xf32, 1xf32) + clip_1 = paddle._C_ops.clip(subtract_2, full_1, full_2) + + # pd_op.multiply: (-1x1xf32) <- (-1x1xf32, -1x1xf32) + multiply_0 = paddle._C_ops.multiply(clip_0, clip_1) + + # pd_op.subtract: (-1x1xf32) <- (-1x1xf32, -1x1xf32) + subtract_3 = paddle._C_ops.subtract(split_2, split_0) + + # pd_op.subtract: (-1x1xf32) <- (-1x1xf32, -1x1xf32) + subtract_4 = paddle._C_ops.subtract(split_3, split_1) + + # pd_op.multiply: (-1x1xf32) <- (-1x1xf32, -1x1xf32) + multiply_1 = paddle._C_ops.multiply(subtract_3, subtract_4) + + # pd_op.subtract: (-1x1xf32) <- (-1x1xf32, -1x1xf32) + subtract_5 = paddle._C_ops.subtract(split_6, split_4) + + # pd_op.subtract: (-1x1xf32) <- (-1x1xf32, -1x1xf32) + subtract_6 = paddle._C_ops.subtract(split_7, split_5) + + # pd_op.multiply: (-1x1xf32) <- (-1x1xf32, -1x1xf32) + multiply_2 = paddle._C_ops.multiply(subtract_5, subtract_6) + del subtract_5, subtract_6 + + # pd_op.add: (-1x1xf32) <- (-1x1xf32, -1x1xf32) + add_0 = paddle._C_ops.add(multiply_1, multiply_2) + + # pd_op.subtract: (-1x1xf32) <- (-1x1xf32, -1x1xf32) + subtract_7 = paddle._C_ops.subtract(add_0, multiply_0) + + # pd_op.full: (1xf32) <- () + full_3 = paddle._C_ops.full( + [1], float("1"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.assign: (1xf32) <- (1xf32) + assign_5 = full_3 + + # pd_op.assign: (1xf32) <- (1xf32) + assign_6 = full_3 + + # pd_op.scale: (-1x1xf32) <- (-1x1xf32, 1xf32) + scale_0 = paddle._C_ops.scale(subtract_7, full_3, float("1e-10"), True) + del subtract_7 + + # pd_op.divide: (-1x1xf32) <- (-1x1xf32, -1x1xf32) + divide_2 = paddle._C_ops.divide(multiply_0, scale_0) + + # pd_op.minimum: (-1x1xf32) <- (-1x1xf32, -1x1xf32) + minimum_2 = paddle._C_ops.minimum(split_0, split_4) + + # pd_op.minimum: (-1x1xf32) <- (-1x1xf32, -1x1xf32) + minimum_3 = paddle._C_ops.minimum(split_1, split_5) + + # pd_op.maximum: (-1x1xf32) <- (-1x1xf32, -1x1xf32) + maximum_2 = paddle._C_ops.maximum(split_2, split_6) + + # pd_op.maximum: (-1x1xf32) <- (-1x1xf32, -1x1xf32) + maximum_3 = paddle._C_ops.maximum(split_3, split_7) + + # pd_op.subtract: (-1x1xf32) <- (-1x1xf32, -1x1xf32) + subtract_8 = paddle._C_ops.subtract(maximum_2, minimum_2) + + # pd_op.subtract: (-1x1xf32) <- (-1x1xf32, -1x1xf32) + subtract_9 = paddle._C_ops.subtract(maximum_3, minimum_3) + + # pd_op.multiply: (-1x1xf32) <- (-1x1xf32, -1x1xf32) + multiply_3 = paddle._C_ops.multiply(subtract_8, subtract_9) + + # pd_op.scale: (-1x1xf32) <- (-1x1xf32, 1xf32) + scale_1 = paddle._C_ops.scale(multiply_3, full_3, float("1e-10"), True) + del multiply_3 + + # pd_op.subtract: (-1x1xf32) <- (-1x1xf32, -1x1xf32) + subtract_10 = paddle._C_ops.subtract(scale_1, scale_0) + + # pd_op.divide: (-1x1xf32) <- (-1x1xf32, -1x1xf32) + divide_3 = paddle._C_ops.divide(subtract_10, scale_1) + + # pd_op.subtract: (-1x1xf32) <- (-1x1xf32, -1x1xf32) + subtract_11 = paddle._C_ops.subtract(divide_2, divide_3) + + # pd_op.full: (1xf32) <- () + full_4 = paddle._C_ops.full( + [1], float("-1"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.scale: (-1x1xf32) <- (-1x1xf32, 1xf32) + scale_2 = paddle._C_ops.scale(subtract_11, full_4, float("1"), True) + del subtract_11 + + # pd_op.scale: (-1x1xf32) <- (-1x1xf32, 1xf32) + scale_3 = paddle._C_ops.scale(scale_2, full_3, float("0"), True) + del scale_2 + + # pd_op.multiply: (-1x1xf32) <- (-1x1xf32, -1x1xf32) + multiply_4 = paddle._C_ops.multiply(scale_3, unsqueeze_1) + + # pd_op.full_int_array: (0xi64) <- () + full_int_array_3 = [] + + # pd_op.assign: (0xi64) <- (0xi64) + assign_7 = full_int_array_3 + + # pd_op.sum: (xf32) <- (-1x1xf32, 0xi64) + sum_1 = paddle._C_ops.sum(multiply_4, full_int_array_3, None, False) + + # pd_op.divide: (xf32) <- (xf32, xf32) + divide_0 = paddle._C_ops.divide(sum_1, data_4) + + # pd_op.unsqueeze: (1x24276x1xb) <- (1x24276xb, 1xi64) + unsqueeze_2 = paddle._C_ops.unsqueeze(data_0, full_int_array_0) + del data_0 + + # pd_op.cast: (1x24276x1xi32) <- (1x24276x1xb) + cast_2 = paddle._C_ops.cast(unsqueeze_2, paddle.int32) + del unsqueeze_2 + + # pd_op.full_int_array: (3xi64) <- () + full_int_array_4 = [1, 1, 88] + + # pd_op.tile: (1x24276x88xi32) <- (1x24276x1xi32, 3xi64) + tile_1 = paddle._C_ops.tile(cast_2, full_int_array_4) + del cast_2, full_int_array_4 + + # pd_op.cast: (1x24276x88xb) <- (1x24276x88xi32) + cast_3 = paddle._C_ops.cast(tile_1, paddle.bool) + del tile_1 + + # pd_op.masked_select: (-1xf32) <- (1x24276x88xf32, 1x24276x88xb) + masked_select_3 = paddle._C_ops.masked_select(data_5, cast_3) + del data_5 + + # pd_op.full_int_array: (3xi64) <- () + full_int_array_5 = [-1, 4, 22] + + # pd_op.reshape: (-1x4x22xf32) <- (-1xf32, 3xi64) + reshape_2 = paddle._C_ops.reshape(masked_select_3, full_int_array_5) + del full_int_array_5 + + # pd_op.full: (1xi32) <- () + full_5 = paddle._C_ops.full( + [1], float("2"), paddle.int32, paddle.core.CPUPlace() + ) + + # pd_op.split_with_num: ([1x24276x2xf32, 1x24276x2xf32]) <- (1x24276x4xf32, 1xi32) + split_with_num_2 = paddle._C_ops.split_with_num(data_2, 2, full_5) + del data_2, full_5 + + # builtin.split: (1x24276x2xf32, 1x24276x2xf32) <- ([1x24276x2xf32, 1x24276x2xf32]) + ( + split_8, + split_9, + ) = split_with_num_2 + del split_with_num_2 + + # pd_op.subtract: (1x24276x2xf32) <- (24276x2xf32, 1x24276x2xf32) + subtract_12 = paddle._C_ops.subtract(data_6, split_8) + del split_8 + + # pd_op.subtract: (1x24276x2xf32) <- (1x24276x2xf32, 24276x2xf32) + subtract_13 = paddle._C_ops.subtract(split_9, data_6) + del data_6, split_9 + + # pd_op.full: (1xi32) <- () + full_6 = paddle._C_ops.full( + [1], float("-1"), paddle.int32, paddle.core.CPUPlace() + ) + + # builtin.combine: ([1x24276x2xf32, 1x24276x2xf32]) <- (1x24276x2xf32, 1x24276x2xf32) + combine_0 = [subtract_12, subtract_13] + del subtract_12, subtract_13 + + # pd_op.concat: (1x24276x4xf32) <- ([1x24276x2xf32, 1x24276x2xf32], 1xi32) + concat_0 = paddle._C_ops.concat(combine_0, full_6) + del combine_0, full_6 + + # pd_op.full: (1xf32) <- () + full_7 = paddle._C_ops.full( + [1], float("-2"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.full: (1xf32) <- () + full_8 = paddle._C_ops.full( + [1], float("18.99"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.clip: (1x24276x4xf32) <- (1x24276x4xf32, 1xf32, 1xf32) + clip_2 = paddle._C_ops.clip(concat_0, full_7, full_8) + del concat_0, full_7, full_8 + + # pd_op.masked_select: (-1xf32) <- (1x24276x4xf32, 1x24276x4xb) + masked_select_4 = paddle._C_ops.masked_select(clip_2, cast_1) + del clip_2 + + # pd_op.reshape: (-1x4xf32) <- (-1xf32, 2xi64) + reshape_3 = paddle._C_ops.reshape(masked_select_4, full_int_array_2) + del full_int_array_2, masked_select_4 + + # pd_op.floor: (-1x4xf32) <- (-1x4xf32) + floor_0 = paddle._C_ops.floor(reshape_3) + + # pd_op.cast: (-1x4xi64) <- (-1x4xf32) + cast_4 = paddle._C_ops.cast(floor_0, paddle.int64) + del floor_0 + + # pd_op.scale: (-1x4xi64) <- (-1x4xi64, 1xf32) + scale_4 = paddle._C_ops.scale(cast_4, full_3, float("1"), True) + + # pd_op.cast: (-1x4xf32) <- (-1x4xi64) + cast_5 = paddle._C_ops.cast(scale_4, paddle.float32) + + # pd_op.subtract: (-1x4xf32) <- (-1x4xf32, -1x4xf32) + subtract_14 = paddle._C_ops.subtract(cast_5, reshape_3) + del cast_5, reshape_3 + + # pd_op.scale: (-1x4xf32) <- (-1x4xf32, 1xf32) + scale_5 = paddle._C_ops.scale(subtract_14, full_4, float("1"), True) + + # pd_op.scale: (-1x4xi64) <- (-1x4xi64, 1xf32) + scale_6 = paddle._C_ops.scale(cast_4, full_3, float("2"), True) + del cast_4 + + # pd_op.unsqueeze: (-1x4x1xi64) <- (-1x4xi64, 1xi64) + unsqueeze_3 = paddle._C_ops.unsqueeze(scale_6, full_int_array_0) + del scale_6 + + # pd_op.cross_entropy_with_softmax: (-1x4x22xf32, -1x4x1xf32) <- (-1x4x22xf32, -1x4x1xi64) + cross_entropy_with_softmax_0, cross_entropy_with_softmax_2 = ( + lambda x, f: f(x) + )( + paddle._C_ops.cross_entropy_with_softmax( + reshape_2, unsqueeze_3, False, True, True, -100, -1 + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None), + ) + + # pd_op.squeeze: (-1x4xf32) <- (-1x4x1xf32, 1xi64) + squeeze_0 = paddle._C_ops.squeeze( + cross_entropy_with_softmax_2, full_int_array_0 + ) + + # pd_op.multiply: (-1x4xf32) <- (-1x4xf32, -1x4xf32) + multiply_5 = paddle._C_ops.multiply(squeeze_0, subtract_14) + + # pd_op.scale: (-1x4xi64) <- (-1x4xi64, 1xf32) + scale_7 = paddle._C_ops.scale(scale_4, full_3, float("2"), True) + del scale_4 + + # pd_op.unsqueeze: (-1x4x1xi64) <- (-1x4xi64, 1xi64) + unsqueeze_4 = paddle._C_ops.unsqueeze(scale_7, full_int_array_0) + del scale_7 + + # pd_op.cross_entropy_with_softmax: (-1x4x22xf32, -1x4x1xf32) <- (-1x4x22xf32, -1x4x1xi64) + cross_entropy_with_softmax_1, cross_entropy_with_softmax_3 = ( + lambda x, f: f(x) + )( + paddle._C_ops.cross_entropy_with_softmax( + reshape_2, unsqueeze_4, False, True, True, -100, -1 + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None), + ) + del reshape_2 + + # pd_op.squeeze: (-1x4xf32) <- (-1x4x1xf32, 1xi64) + squeeze_1 = paddle._C_ops.squeeze( + cross_entropy_with_softmax_3, full_int_array_0 + ) + + # pd_op.multiply: (-1x4xf32) <- (-1x4xf32, -1x4xf32) + multiply_6 = paddle._C_ops.multiply(squeeze_1, scale_5) + + # pd_op.add: (-1x4xf32) <- (-1x4xf32, -1x4xf32) + add_1 = paddle._C_ops.add(multiply_5, multiply_6) + + # pd_op.mean: (-1x1xf32) <- (-1x4xf32, 1xi64) + mean_0 = paddle._C_ops.mean(add_1, full_int_array_0, True) + del full_int_array_0 + + # pd_op.multiply: (-1x1xf32) <- (-1x1xf32, -1x1xf32) + multiply_7 = paddle._C_ops.multiply(mean_0, unsqueeze_1) + + # pd_op.sum: (xf32) <- (-1x1xf32, 0xi64) + sum_2 = paddle._C_ops.sum(multiply_7, full_int_array_3, None, False) + + # pd_op.divide: (xf32) <- (xf32, xf32) + divide_1 = paddle._C_ops.divide(sum_2, data_4) + del ( + abs_0, + add_0, + add_1, + assign_0, + assign_1, + assign_2, + assign_3, + assign_4, + assign_5, + assign_6, + assign_7, + cast_1, + cast_3, + clip_0, + clip_1, + cross_entropy_with_softmax_2, + cross_entropy_with_softmax_3, + data_4, + divide_2, + divide_3, + full_0, + full_1, + full_2, + full_3, + full_4, + full_int_array_3, + masked_select_0, + masked_select_3, + maximum_0, + maximum_1, + maximum_2, + maximum_3, + mean_0, + minimum_0, + minimum_1, + minimum_2, + minimum_3, + multiply_0, + multiply_1, + multiply_2, + multiply_4, + multiply_5, + multiply_6, + multiply_7, + reshape_0, + reshape_1, + scale_0, + scale_1, + scale_3, + scale_5, + split_0, + split_1, + split_2, + split_3, + split_4, + split_5, + split_6, + split_7, + squeeze_0, + squeeze_1, + subtract_0, + subtract_1, + subtract_10, + subtract_14, + subtract_2, + subtract_3, + subtract_4, + subtract_8, + subtract_9, + sum_1, + sum_2, + unsqueeze_1, + unsqueeze_3, + unsqueeze_4, + ) + + return ( + cross_entropy_with_softmax_0, + cross_entropy_with_softmax_1, + mean_all_0, + divide_0, + divide_1, + ) diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_13/weight_meta.py b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_13/weight_meta.py new file mode 100644 index 000000000..8b1378917 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_13/weight_meta.py @@ -0,0 +1 @@ + diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_14/graph_hash.txt b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_14/graph_hash.txt new file mode 100644 index 000000000..889a488e6 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_14/graph_hash.txt @@ -0,0 +1 @@ +791525d2a1842eea753f34e744dc5933e8bf52c2061a0c0c9d7772ac9dd0afe6 \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_14/graph_net.json b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_14/graph_net.json new file mode 100644 index 000000000..381598f86 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_14/graph_net.json @@ -0,0 +1,6 @@ +{ + "framework": "paddle", + "model_name": "PP-YOLOE_plus_SOD-largesize-L", + "num_devices_required": 1, + "num_nodes_required": 1 +} \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_14/input_meta.py b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_14/input_meta.py new file mode 100644 index 000000000..4fdfc39bb --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_14/input_meta.py @@ -0,0 +1,145 @@ +class Program_weight_tensor_data_0: + name = "data_0" + shape = [1, 24276, 10] + dtype = "float32" + min_val = float("9.96627e-10") + max_val = float("0.919436") + mean = float("0.00348174") + std = float("0.0220349") + data = None + + +class Program_weight_tensor_data_1: + name = "data_1" + shape = [1, 24276, 4] + dtype = "float32" + min_val = float("-272.612") + max_val = float("1371.86") + mean = float("544.298") + std = float("322.539") + data = None + + +class Program_weight_tensor_data_2: + name = "data_2" + shape = [24276, 2] + dtype = "float32" + min_val = float("4.0") + max_val = float("1084.0") + mean = float("544.0") + std = float("314.059") + data = None + + +class Program_weight_tensor_data_3: + name = "data_3" + shape = [24276, 1] + dtype = "float32" + min_val = float("8.0") + max_val = float("32.0") + mean = float("10.6667") + std = float("5.70157") + data = None + + +class Program_weight_tensor_data_4: + name = "data_4" + shape = [1, 38, 1] + dtype = "int32" + data = [ + 3, + 3, + 9, + 1, + 0, + 0, + 0, + 0, + 3, + 3, + 3, + 3, + 3, + 0, + 0, + 0, + 8, + 3, + 3, + 3, + 0, + 0, + 3, + 3, + 3, + 3, + 3, + 5, + 3, + 3, + 3, + 3, + 3, + 0, + 3, + 3, + 0, + 0, + ] + + +class Program_weight_tensor_data_5: + name = "data_5" + shape = [1, 38, 4] + dtype = "float32" + min_val = float("354.773") + max_val = float("1051.0") + mean = float("652.35") + std = float("193.013") + data = None + + +class Program_weight_tensor_data_6: + name = "data_6" + shape = [1, 38, 1] + dtype = "float32" + data = [ + 1.0, + 1.0, + 1.0, + 1.0, + 1.0, + 1.0, + 1.0, + 1.0, + 1.0, + 1.0, + 1.0, + 1.0, + 1.0, + 1.0, + 1.0, + 1.0, + 1.0, + 1.0, + 1.0, + 1.0, + 1.0, + 1.0, + 1.0, + 1.0, + 1.0, + 1.0, + 1.0, + 1.0, + 1.0, + 1.0, + 1.0, + 1.0, + 1.0, + 1.0, + 1.0, + 1.0, + 1.0, + 1.0, + ] diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_14/model.py b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_14/model.py new file mode 100644 index 000000000..0aac88f68 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_14/model.py @@ -0,0 +1,499 @@ +import paddle + + +class GraphModule(paddle.nn.Layer): + def __init__(self): + super().__init__() + + def forward(self, data_0, data_1, data_2, data_3, data_4, data_5, data_6): + # pd_op.full_int_array: (1xi64) <- () + full_int_array_0 = [2] + + # pd_op.unsqueeze: (1x38x1x4xf32) <- (1x38x4xf32, 1xi64) + unsqueeze_0 = paddle._C_ops.unsqueeze(data_5, full_int_array_0) + del data_5 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_1 = [1] + + # pd_op.unsqueeze: (1x1x24276x4xf32) <- (1x24276x4xf32, 1xi64) + unsqueeze_1 = paddle._C_ops.unsqueeze(data_1, full_int_array_1) + del data_1, full_int_array_1 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_2 = [0] + + # pd_op.slice: (1x38x1x2xf32) <- (1x38x1x4xf32, 1xi64, 1xi64) + slice_0 = paddle._C_ops.slice( + unsqueeze_0, [3], full_int_array_2, full_int_array_0, [1], [] + ) + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_3 = [2147483647] + + # pd_op.slice: (1x38x1x2xf32) <- (1x38x1x4xf32, 1xi64, 1xi64) + slice_1 = paddle._C_ops.slice( + unsqueeze_0, [3], full_int_array_0, full_int_array_3, [1], [] + ) + + # pd_op.slice: (1x1x24276x2xf32) <- (1x1x24276x4xf32, 1xi64, 1xi64) + slice_2 = paddle._C_ops.slice( + unsqueeze_1, [3], full_int_array_2, full_int_array_0, [1], [] + ) + del full_int_array_2 + + # pd_op.slice: (1x1x24276x2xf32) <- (1x1x24276x4xf32, 1xi64, 1xi64) + slice_3 = paddle._C_ops.slice( + unsqueeze_1, [3], full_int_array_0, full_int_array_3, [1], [] + ) + del full_int_array_0, full_int_array_3, unsqueeze_1 + + # pd_op.maximum: (1x38x24276x2xf32) <- (1x38x1x2xf32, 1x1x24276x2xf32) + maximum_0 = paddle._C_ops.maximum(slice_0, slice_2) + + # pd_op.minimum: (1x38x24276x2xf32) <- (1x38x1x2xf32, 1x1x24276x2xf32) + minimum_0 = paddle._C_ops.minimum(slice_1, slice_3) + + # pd_op.subtract: (1x38x24276x2xf32) <- (1x38x24276x2xf32, 1x38x24276x2xf32) + subtract_0 = paddle._C_ops.subtract(minimum_0, maximum_0) + del maximum_0, minimum_0 + + # pd_op.full: (1xf32) <- () + full_0 = paddle._C_ops.full( + [1], float("0"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.full: (1xf32) <- () + full_1 = paddle._C_ops.full( + [1], float("3.40282e+38"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.clip: (1x38x24276x2xf32) <- (1x38x24276x2xf32, 1xf32, 1xf32) + clip_0 = paddle._C_ops.clip(subtract_0, full_0, full_1) + del subtract_0 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_4 = [-1] + + # pd_op.prod: (1x38x24276xf32) <- (1x38x24276x2xf32, 1xi64) + prod_0 = paddle._C_ops.prod(clip_0, full_int_array_4, False, False) + del clip_0 + + # pd_op.subtract: (1x38x1x2xf32) <- (1x38x1x2xf32, 1x38x1x2xf32) + subtract_1 = paddle._C_ops.subtract(slice_1, slice_0) + del slice_0, slice_1 + + # pd_op.clip: (1x38x1x2xf32) <- (1x38x1x2xf32, 1xf32, 1xf32) + clip_1 = paddle._C_ops.clip(subtract_1, full_0, full_1) + del subtract_1 + + # pd_op.prod: (1x38x1xf32) <- (1x38x1x2xf32, 1xi64) + prod_1 = paddle._C_ops.prod(clip_1, full_int_array_4, False, False) + del clip_1 + + # pd_op.subtract: (1x1x24276x2xf32) <- (1x1x24276x2xf32, 1x1x24276x2xf32) + subtract_2 = paddle._C_ops.subtract(slice_3, slice_2) + del slice_2, slice_3 + + # pd_op.clip: (1x1x24276x2xf32) <- (1x1x24276x2xf32, 1xf32, 1xf32) + clip_2 = paddle._C_ops.clip(subtract_2, full_0, full_1) + del full_1, subtract_2 + + # pd_op.prod: (1x1x24276xf32) <- (1x1x24276x2xf32, 1xi64) + prod_2 = paddle._C_ops.prod(clip_2, full_int_array_4, False, False) + del clip_2 + + # pd_op.add: (1x38x24276xf32) <- (1x38x1xf32, 1x1x24276xf32) + add_0 = paddle._C_ops.add(prod_1, prod_2) + del prod_1, prod_2 + + # pd_op.subtract: (1x38x24276xf32) <- (1x38x24276xf32, 1x38x24276xf32) + subtract_3 = paddle._C_ops.subtract(add_0, prod_0) + del add_0 + + # pd_op.full: (1xf32) <- () + full_2 = paddle._C_ops.full( + [1], float("1"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.scale: (1x38x24276xf32) <- (1x38x24276xf32, 1xf32) + scale_0 = paddle._C_ops.scale(subtract_3, full_2, float("1e-09"), True) + del subtract_3 + + # pd_op.divide: (1x38x24276xf32) <- (1x38x24276xf32, 1x38x24276xf32) + divide_0 = paddle._C_ops.divide(prod_0, scale_0) + del prod_0, scale_0 + + # pd_op.transpose: (1x10x24276xf32) <- (1x24276x10xf32) + transpose_0 = paddle._C_ops.transpose(data_0, [0, 2, 1]) + del data_0 + + # pd_op.full: (1xf64) <- () + full_3 = paddle._C_ops.full( + [1], float("0"), paddle.float64, paddle.core.CPUPlace() + ) + + # pd_op.full: (1xf64) <- () + full_4 = paddle._C_ops.full( + [1], float("1"), paddle.float64, paddle.core.CPUPlace() + ) + + # pd_op.arange: (1xi32) <- (1xf64, 1xf64, 1xf64) + arange_0 = paddle.arange(full_3, full_4, full_4, dtype="int32") + del full_3, full_4 + + # pd_op.unsqueeze: (1x1xi32) <- (1xi32, 1xi64) + unsqueeze_2 = paddle._C_ops.unsqueeze(arange_0, full_int_array_4) + del arange_0 + + # pd_op.full_int_array: (2xi64) <- () + full_int_array_5 = [1, 38] + + # pd_op.tile: (1x38xi32) <- (1x1xi32, 2xi64) + tile_0 = paddle._C_ops.tile(unsqueeze_2, full_int_array_5) + del full_int_array_5 + + # pd_op.squeeze: (1x38xi32) <- (1x38x1xi32, 1xi64) + squeeze_0 = paddle._C_ops.squeeze(data_4, full_int_array_4) + del data_4 + + # builtin.combine: ([1x38xi32, 1x38xi32]) <- (1x38xi32, 1x38xi32) + combine_0 = [tile_0, squeeze_0] + del squeeze_0, tile_0 + + # pd_op.stack: (1x38x2xi32) <- ([1x38xi32, 1x38xi32]) + stack_0 = paddle._C_ops.stack(combine_0, -1) + del combine_0 + + # pd_op.gather_nd: (1x38x24276xf32) <- (1x10x24276xf32, 1x38x2xi32) + gather_nd_0 = paddle._C_ops.gather_nd(transpose_0, stack_0) + del stack_0, transpose_0 + + # pd_op.pow: (1x38x24276xf32) <- (1x38x24276xf32) + pow_0 = paddle._C_ops.pow(gather_nd_0, float("1")) + del gather_nd_0 + + # pd_op.pow: (1x38x24276xf32) <- (1x38x24276xf32) + pow_1 = paddle._C_ops.pow(divide_0, float("6")) + + # pd_op.multiply: (1x38x24276xf32) <- (1x38x24276xf32, 1x38x24276xf32) + multiply_0 = paddle._C_ops.multiply(pow_0, pow_1) + del pow_0, pow_1 + + # pd_op.multiply: (1x38x24276xf32) <- (1x38x24276xf32, 1x38x1xf32) + multiply_1 = paddle._C_ops.multiply(multiply_0, data_6) + del multiply_0 + + # pd_op.scale: (24276x1xf32) <- (24276x1xf32, 1xf32) + scale_1 = paddle._C_ops.scale(data_3, full_2, float("0"), True) + del data_3, full_2 + + # pd_op.full_int_array: (2xi64) <- () + full_int_array_6 = [0, 1] + + # pd_op.unsqueeze: (1x1x24276x2xf32) <- (24276x2xf32, 2xi64) + unsqueeze_3 = paddle._C_ops.unsqueeze(data_2, full_int_array_6) + del data_2 + + # pd_op.full: (1xi32) <- () + full_5 = paddle._C_ops.full( + [1], float("3"), paddle.int32, paddle.core.CPUPlace() + ) + + # pd_op.split_with_num: ([1x1x24276x1xf32, 1x1x24276x1xf32]) <- (1x1x24276x2xf32, 1xi32) + split_with_num_0 = paddle._C_ops.split_with_num(unsqueeze_3, 2, full_5) + del unsqueeze_3 + + # builtin.split: (1x1x24276x1xf32, 1x1x24276x1xf32) <- ([1x1x24276x1xf32, 1x1x24276x1xf32]) + ( + split_0, + split_1, + ) = split_with_num_0 + del split_with_num_0 + + # pd_op.split_with_num: ([1x38x1x1xf32, 1x38x1x1xf32, 1x38x1x1xf32, 1x38x1x1xf32]) <- (1x38x1x4xf32, 1xi32) + split_with_num_1 = paddle._C_ops.split_with_num(unsqueeze_0, 4, full_5) + del full_5, unsqueeze_0 + + # builtin.split: (1x38x1x1xf32, 1x38x1x1xf32, 1x38x1x1xf32, 1x38x1x1xf32) <- ([1x38x1x1xf32, 1x38x1x1xf32, 1x38x1x1xf32, 1x38x1x1xf32]) + ( + split_2, + split_3, + split_4, + split_5, + ) = split_with_num_1 + del split_with_num_1 + + # pd_op.subtract: (1x38x24276x1xf32) <- (1x1x24276x1xf32, 1x38x1x1xf32) + subtract_4 = paddle._C_ops.subtract(split_0, split_2) + + # pd_op.subtract: (1x38x24276x1xf32) <- (1x1x24276x1xf32, 1x38x1x1xf32) + subtract_5 = paddle._C_ops.subtract(split_1, split_3) + + # pd_op.subtract: (1x38x24276x1xf32) <- (1x38x1x1xf32, 1x1x24276x1xf32) + subtract_6 = paddle._C_ops.subtract(split_4, split_0) + + # pd_op.subtract: (1x38x24276x1xf32) <- (1x38x1x1xf32, 1x1x24276x1xf32) + subtract_7 = paddle._C_ops.subtract(split_5, split_1) + + # pd_op.full: (1xi32) <- () + full_6 = paddle._C_ops.full( + [1], float("-1"), paddle.int32, paddle.core.CPUPlace() + ) + + # builtin.combine: ([1x38x24276x1xf32, 1x38x24276x1xf32, 1x38x24276x1xf32, 1x38x24276x1xf32]) <- (1x38x24276x1xf32, 1x38x24276x1xf32, 1x38x24276x1xf32, 1x38x24276x1xf32) + combine_1 = [subtract_4, subtract_5, subtract_6, subtract_7] + del subtract_4, subtract_5, subtract_6, subtract_7 + + # pd_op.concat: (1x38x24276x4xf32) <- ([1x38x24276x1xf32, 1x38x24276x1xf32, 1x38x24276x1xf32, 1x38x24276x1xf32], 1xi32) + concat_0 = paddle._C_ops.concat(combine_1, full_6) + del combine_1 + + # pd_op.min: (1x38x24276xf32) <- (1x38x24276x4xf32, 1xi64) + min_0 = paddle._C_ops.min(concat_0, full_int_array_4, False) + del concat_0 + + # pd_op.full: (xf32) <- () + full_7 = paddle._C_ops.full( + [], + float("1e-09"), + paddle.float32, + paddle.framework._current_expected_place(), + ) + + # pd_op.greater_than: (1x38x24276xb) <- (1x38x24276xf32, xf32) + greater_than_1 = paddle._C_ops.greater_than(min_0, full_7) + del min_0 + + # pd_op.unsqueeze: (1x1x24276x1xf32) <- (24276x1xf32, 2xi64) + unsqueeze_4 = paddle._C_ops.unsqueeze(scale_1, full_int_array_6) + del full_int_array_6, scale_1 + + # pd_op.add: (1x38x1x1xf32) <- (1x38x1x1xf32, 1x38x1x1xf32) + add_1 = paddle._C_ops.add(split_2, split_4) + del split_2, split_4 + + # pd_op.full: (1xf32) <- () + full_8 = paddle._C_ops.full( + [1], float("0.5"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.scale: (1x38x1x1xf32) <- (1x38x1x1xf32, 1xf32) + scale_2 = paddle._C_ops.scale(add_1, full_8, float("0"), True) + del add_1 + + # pd_op.add: (1x38x1x1xf32) <- (1x38x1x1xf32, 1x38x1x1xf32) + add_2 = paddle._C_ops.add(split_3, split_5) + del split_3, split_5 + + # pd_op.scale: (1x38x1x1xf32) <- (1x38x1x1xf32, 1xf32) + scale_3 = paddle._C_ops.scale(add_2, full_8, float("0"), True) + del add_2, full_8 + + # pd_op.subtract: (1x38x24276x1xf32) <- (1x38x1x1xf32, 1x1x24276x1xf32) + subtract_8 = paddle._C_ops.subtract(scale_2, unsqueeze_4) + + # pd_op.subtract: (1x38x24276x1xf32) <- (1x1x24276x1xf32, 1x38x24276x1xf32) + subtract_9 = paddle._C_ops.subtract(split_0, subtract_8) + del subtract_8 + + # pd_op.subtract: (1x38x24276x1xf32) <- (1x38x1x1xf32, 1x1x24276x1xf32) + subtract_10 = paddle._C_ops.subtract(scale_3, unsqueeze_4) + + # pd_op.subtract: (1x38x24276x1xf32) <- (1x1x24276x1xf32, 1x38x24276x1xf32) + subtract_11 = paddle._C_ops.subtract(split_1, subtract_10) + del subtract_10 + + # pd_op.add: (1x38x24276x1xf32) <- (1x38x1x1xf32, 1x1x24276x1xf32) + add_3 = paddle._C_ops.add(scale_2, unsqueeze_4) + del scale_2 + + # pd_op.subtract: (1x38x24276x1xf32) <- (1x38x24276x1xf32, 1x1x24276x1xf32) + subtract_12 = paddle._C_ops.subtract(add_3, split_0) + del add_3, split_0 + + # pd_op.add: (1x38x24276x1xf32) <- (1x38x1x1xf32, 1x1x24276x1xf32) + add_4 = paddle._C_ops.add(scale_3, unsqueeze_4) + del scale_3, unsqueeze_4 + + # pd_op.subtract: (1x38x24276x1xf32) <- (1x38x24276x1xf32, 1x1x24276x1xf32) + subtract_13 = paddle._C_ops.subtract(add_4, split_1) + del add_4, split_1 + + # builtin.combine: ([1x38x24276x1xf32, 1x38x24276x1xf32, 1x38x24276x1xf32, 1x38x24276x1xf32]) <- (1x38x24276x1xf32, 1x38x24276x1xf32, 1x38x24276x1xf32, 1x38x24276x1xf32) + combine_2 = [subtract_9, subtract_11, subtract_12, subtract_13] + del subtract_11, subtract_12, subtract_13, subtract_9 + + # pd_op.concat: (1x38x24276x4xf32) <- ([1x38x24276x1xf32, 1x38x24276x1xf32, 1x38x24276x1xf32, 1x38x24276x1xf32], 1xi32) + concat_1 = paddle._C_ops.concat(combine_2, full_6) + del combine_2, full_6 + + # pd_op.min: (1x38x24276xf32) <- (1x38x24276x4xf32, 1xi64) + min_1 = paddle._C_ops.min(concat_1, full_int_array_4, False) + del concat_1 + + # pd_op.greater_than: (1x38x24276xb) <- (1x38x24276xf32, xf32) + greater_than_2 = paddle._C_ops.greater_than(min_1, full_7) + del full_7, min_1 + + # pd_op.cast: (1x38x24276xf32) <- (1x38x24276xb) + cast_0 = paddle._C_ops.cast(greater_than_1, paddle.float32) + del greater_than_1 + + # pd_op.cast: (1x38x24276xf32) <- (1x38x24276xb) + cast_1 = paddle._C_ops.cast(greater_than_2, paddle.float32) + del greater_than_2 + + # pd_op.multiply: (1x38x24276xf32) <- (1x38x24276xf32, 1x38x1xf32) + multiply_2 = paddle._C_ops.multiply(cast_0, data_6) + del cast_0 + + # pd_op.multiply: (1x38x24276xf32) <- (1x38x24276xf32, 1x38x1xf32) + multiply_3 = paddle._C_ops.multiply(cast_1, data_6) + del cast_1 + + # pd_op.sum: (1x38x1xf32) <- (1x38x24276xf32, 1xi64) + sum_0 = paddle._C_ops.sum(multiply_2, full_int_array_4, None, True) + del full_int_array_4 + + # pd_op.full: (xf32) <- () + full_9 = paddle._C_ops.full( + [], float("0"), paddle.float32, paddle.framework._current_expected_place() + ) + + # pd_op.equal: (1x38x1xb) <- (1x38x1xf32, xf32) + equal_0 = paddle._C_ops.equal(sum_0, full_9) + del sum_0 + + # pd_op.add: (1x38x24276xf32) <- (1x38x24276xf32, 1x38x24276xf32) + add_5 = paddle._C_ops.add(multiply_1, multiply_3) + + # pd_op.full_like: (1x38x24276xf32) <- (1x38x24276xf32, 1xf32) + full_like_0 = paddle._C_ops.full_like( + add_5, full_0, paddle.float32, paddle.framework._current_expected_place() + ) + + # pd_op.full_like: (1x38x24276xf32) <- (1x38x24276xf32, 1xf32) + full_like_1 = paddle._C_ops.full_like( + multiply_1, + full_0, + paddle.float32, + paddle.framework._current_expected_place(), + ) + + # pd_op.full_like: (1x38x1xb) <- (1x38x1xb, 1xf32) + full_like_2 = paddle._C_ops.full_like( + equal_0, full_0, paddle.bool, paddle.framework._current_expected_place() + ) + del full_0 + + # pd_op.cast: (1x38x1xf32) <- (1x38x1xb) + cast_2 = paddle._C_ops.cast(full_like_2, paddle.float32) + del full_like_2 + + # pd_op.cast: (1x38x1xf32) <- (1x38x1xb) + cast_3 = paddle._C_ops.cast(equal_0, paddle.float32) + del equal_0 + + # pd_op.add: (1x38x24276xf32) <- (1x38x24276xf32, 1x38x24276xf32) + add_6 = paddle._C_ops.add(full_like_0, full_like_1) + del full_like_0, full_like_1 + + # pd_op.add: (1x38x24276xf32) <- (1x38x24276xf32, 1x38x1xf32) + add_7 = paddle._C_ops.add(add_6, cast_2) + del add_6, cast_2 + + # pd_op.add: (1x38x24276xf32) <- (1x38x24276xf32, 1x38x24276xf32) + add_8 = paddle._C_ops.add(add_5, add_7) + del add_5 + + # pd_op.add: (1x38x24276xf32) <- (1x38x24276xf32, 1x38x24276xf32) + add_9 = paddle._C_ops.add(multiply_1, add_7) + + # pd_op.add: (1x38x24276xf32) <- (1x38x1xf32, 1x38x24276xf32) + add_10 = paddle._C_ops.add(cast_3, add_7) + del add_7, cast_3 + + # pd_op.cast: (1x38x24276xb) <- (1x38x24276xf32) + cast_4 = paddle._C_ops.cast(add_10, paddle.bool) + del add_10 + + # pd_op.where: (1x38x24276xf32) <- (1x38x24276xb, 1x38x24276xf32, 1x38x24276xf32) + where_0 = paddle._C_ops.where(cast_4, add_8, add_9) + del add_8, add_9, cast_4 + + # pd_op.full: (1xi32) <- () + full_10 = paddle._C_ops.full( + [1], float("13"), paddle.int32, paddle.core.CPUPlace() + ) + + # pd_op.topk: (1x38x13xf32, 1x38x13xi64) <- (1x38x24276xf32, 1xi32) + topk_0, topk_1 = (lambda x, f: f(x))( + paddle._C_ops.topk(where_0, full_10, -1, True, True), + lambda out: out if isinstance(out, (list, tuple)) else (out, None), + ) + del full_10, where_0 + + # pd_op.full: (1xi32) <- () + full_11 = paddle._C_ops.full( + [1], float("24276"), paddle.int32, paddle.core.CPUPlace() + ) + + # pd_op.one_hot: (1x38x13x24276xf32) <- (1x38x13xi64, 1xi32) + one_hot_0 = paddle._C_ops.one_hot( + topk_1 % paddle.cast(full_11, topk_1.dtype), full_11 + ) + del full_11, topk_1 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_7 = [-2] + + # pd_op.sum: (1x38x24276xf32) <- (1x38x13x24276xf32, 1xi64) + sum_1 = paddle._C_ops.sum(one_hot_0, full_int_array_7, None, False) + del one_hot_0 + + # pd_op.multiply: (1x38x24276xf32) <- (1x38x24276xf32, 1x38x1xf32) + multiply_4 = paddle._C_ops.multiply(sum_1, data_6) + del data_6, sum_1 + + # pd_op.greater_than: (1x38x24276xb) <- (1x38x24276xf32, xf32) + greater_than_3 = paddle._C_ops.greater_than(multiply_3, full_9) + del multiply_3 + + # pd_op.greater_than: (1x38x24276xb) <- (1x38x24276xf32, xf32) + greater_than_4 = paddle._C_ops.greater_than(multiply_2, full_9) + del full_9, multiply_2 + + # pd_op.bitwise_or: (1x38x24276xb) <- (1x38x24276xb, 1x38x24276xb) + bitwise_or_0 = paddle._C_ops.bitwise_or(greater_than_3, greater_than_4) + del greater_than_3, greater_than_4 + + # pd_op.cast: (1x38x24276xf32) <- (1x38x24276xb) + cast_5 = paddle._C_ops.cast(bitwise_or_0, paddle.float32) + del bitwise_or_0 + + # pd_op.multiply: (1x38x24276xf32) <- (1x38x24276xf32, 1x38x24276xf32) + multiply_5 = paddle._C_ops.multiply(multiply_4, cast_5) + del cast_5, multiply_4 + + # pd_op.sum: (1x24276xf32) <- (1x38x24276xf32, 1xi64) + sum_2 = paddle._C_ops.sum(multiply_5, full_int_array_7, None, False) + del full_int_array_7 + + # pd_op.full_int_array: (0xi64) <- () + full_int_array_8 = [] + + # pd_op.max: (xf32) <- (1x24276xf32, 0xi64) + max_0 = paddle._C_ops.max(sum_2, full_int_array_8, False) + del full_int_array_8 + + # pd_op.full: (xf32) <- () + full_12 = paddle._C_ops.full( + [], float("1"), paddle.float32, paddle.framework._current_expected_place() + ) + + # pd_op.greater_than: (xb) <- (xf32, xf32) + greater_than_0 = paddle._C_ops.greater_than(max_0, full_12) + del divide_0, full_12, max_0, multiply_1, multiply_5, sum_2, unsqueeze_2 + + return greater_than_0 diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_14/weight_meta.py b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_14/weight_meta.py new file mode 100644 index 000000000..8b1378917 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_14/weight_meta.py @@ -0,0 +1 @@ + diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_2/graph_hash.txt b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_2/graph_hash.txt new file mode 100644 index 000000000..61ec9b696 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_2/graph_hash.txt @@ -0,0 +1 @@ +f7d2795b3f58b48affda49a5b442b89fd5091c1bc54718fe79a24f6c241dbf36 \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_2/graph_net.json b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_2/graph_net.json new file mode 100644 index 000000000..381598f86 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_2/graph_net.json @@ -0,0 +1,6 @@ +{ + "framework": "paddle", + "model_name": "PP-YOLOE_plus_SOD-largesize-L", + "num_devices_required": 1, + "num_nodes_required": 1 +} \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_2/input_meta.py b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_2/input_meta.py new file mode 100644 index 000000000..6b8c591d4 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_2/input_meta.py @@ -0,0 +1,119 @@ +class Program_weight_tensor_data_0: + name = "data_0" + shape = [1024, 3072] + dtype = "float32" + min_val = float("-0.0337707") + max_val = float("0.03429") + mean = float("-1.71998e-05") + std = float("0.0182992") + data = None + + +class Program_weight_tensor_data_1: + name = "data_1" + shape = [3072] + dtype = "float32" + min_val = float("-0.000858163") + max_val = float("0.000895478") + mean = float("1.43758e-06") + std = float("0.000180847") + data = None + + +class Program_weight_tensor_data_2: + name = "data_2" + shape = [1024, 3072] + dtype = "float32" + min_val = float("-0.0324395") + max_val = float("0.0323104") + mean = float("-1.57215e-05") + std = float("0.0182981") + data = None + + +class Program_weight_tensor_data_3: + name = "data_3" + shape = [3072] + dtype = "float32" + min_val = float("-0.000630245") + max_val = float("0.000514232") + mean = float("2.75956e-06") + std = float("0.000126901") + data = None + + +class Program_weight_tensor_data_4: + name = "data_4" + shape = [1024, 3072] + dtype = "float32" + min_val = float("-0.0321875") + max_val = float("0.0321786") + mean = float("-1.59553e-05") + std = float("0.0182974") + data = None + + +class Program_weight_tensor_data_5: + name = "data_5" + shape = [3072] + dtype = "float32" + min_val = float("-0.000429817") + max_val = float("0.000427089") + mean = float("1.59167e-06") + std = float("8.84515e-05") + data = None + + +class Program_weight_tensor_data_6: + name = "data_6" + shape = [1024, 3072] + dtype = "float32" + min_val = float("-0.0321313") + max_val = float("0.0321203") + mean = float("-1.62062e-05") + std = float("0.018297") + data = None + + +class Program_weight_tensor_data_7: + name = "data_7" + shape = [3072] + dtype = "float32" + min_val = float("-0.000397465") + max_val = float("0.000488945") + mean = float("1.04525e-06") + std = float("8.30003e-05") + data = None + + +class Program_weight_tensor_data_8: + name = "data_8" + shape = [1, 256, 240, 240] + dtype = "float32" + min_val = float("-0.278465") + max_val = float("12.296") + mean = float("-0.0693066") + std = float("0.362327") + data = None + + +class Program_weight_tensor_data_9: + name = "data_9" + shape = [1, 512, 120, 120] + dtype = "float32" + min_val = float("-0.278465") + max_val = float("10.8892") + mean = float("-0.0919699") + std = float("0.352598") + data = None + + +class Program_weight_tensor_data_10: + name = "data_10" + shape = [1, 1024, 60, 60] + dtype = "float32" + min_val = float("-0.278465") + max_val = float("28.4265") + mean = float("0.296413") + std = float("1.17347") + data = None diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_2/model.py b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_2/model.py new file mode 100644 index 000000000..2377a8523 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_2/model.py @@ -0,0 +1,4857 @@ +import paddle + + +class GraphModule(paddle.nn.Layer): + def __init__(self): + super().__init__() + + def forward( + self, + parameter_0, + parameter_1, + parameter_2, + parameter_3, + parameter_4, + parameter_5, + parameter_6, + parameter_7, + parameter_8, + parameter_9, + parameter_10, + parameter_11, + parameter_12, + parameter_13, + parameter_14, + parameter_15, + parameter_16, + parameter_17, + parameter_18, + parameter_19, + parameter_20, + parameter_21, + parameter_22, + parameter_23, + parameter_24, + parameter_25, + parameter_26, + parameter_27, + parameter_28, + parameter_29, + parameter_30, + parameter_31, + parameter_32, + parameter_33, + parameter_34, + parameter_35, + parameter_36, + parameter_37, + parameter_38, + parameter_39, + parameter_40, + parameter_41, + parameter_42, + parameter_43, + parameter_44, + parameter_45, + parameter_46, + parameter_47, + parameter_48, + parameter_49, + parameter_50, + parameter_51, + parameter_52, + parameter_53, + parameter_54, + parameter_55, + parameter_56, + parameter_57, + parameter_58, + parameter_59, + parameter_60, + parameter_61, + parameter_62, + parameter_63, + parameter_64, + parameter_65, + parameter_66, + parameter_67, + parameter_68, + parameter_69, + parameter_70, + parameter_71, + parameter_72, + parameter_73, + parameter_74, + parameter_75, + parameter_76, + parameter_77, + parameter_78, + parameter_79, + parameter_80, + parameter_81, + parameter_82, + parameter_83, + parameter_84, + parameter_85, + parameter_86, + parameter_87, + parameter_88, + parameter_89, + parameter_90, + parameter_91, + parameter_92, + parameter_93, + parameter_94, + parameter_95, + parameter_96, + parameter_97, + parameter_98, + parameter_99, + parameter_100, + parameter_101, + parameter_102, + parameter_103, + parameter_104, + parameter_105, + parameter_106, + parameter_107, + parameter_108, + parameter_109, + parameter_110, + parameter_111, + parameter_112, + parameter_113, + parameter_114, + parameter_115, + parameter_116, + parameter_117, + parameter_118, + parameter_119, + parameter_120, + parameter_121, + parameter_122, + parameter_123, + parameter_124, + parameter_125, + parameter_126, + parameter_127, + parameter_128, + parameter_129, + parameter_130, + parameter_131, + parameter_132, + parameter_133, + parameter_134, + parameter_135, + parameter_136, + parameter_137, + parameter_138, + parameter_139, + parameter_140, + parameter_141, + parameter_142, + parameter_143, + parameter_144, + parameter_145, + parameter_146, + parameter_147, + parameter_148, + parameter_149, + parameter_150, + parameter_151, + parameter_152, + parameter_153, + parameter_154, + parameter_155, + parameter_156, + parameter_157, + parameter_158, + parameter_159, + parameter_160, + parameter_161, + parameter_162, + parameter_163, + parameter_164, + parameter_165, + parameter_166, + parameter_167, + parameter_168, + parameter_169, + parameter_170, + parameter_171, + parameter_172, + parameter_173, + parameter_174, + parameter_175, + parameter_176, + parameter_177, + parameter_178, + parameter_179, + parameter_180, + parameter_181, + parameter_182, + parameter_183, + parameter_184, + parameter_185, + parameter_186, + parameter_187, + parameter_188, + parameter_189, + parameter_190, + parameter_191, + parameter_192, + parameter_193, + parameter_194, + parameter_195, + parameter_196, + parameter_197, + parameter_198, + parameter_199, + parameter_200, + parameter_201, + parameter_202, + parameter_203, + parameter_204, + parameter_205, + parameter_206, + parameter_207, + parameter_208, + parameter_209, + parameter_210, + parameter_211, + parameter_212, + parameter_213, + parameter_214, + parameter_215, + parameter_216, + parameter_217, + parameter_218, + parameter_219, + parameter_220, + parameter_221, + parameter_222, + parameter_223, + parameter_224, + parameter_225, + parameter_226, + parameter_227, + parameter_228, + parameter_229, + parameter_230, + parameter_231, + parameter_232, + parameter_233, + parameter_234, + parameter_235, + parameter_236, + parameter_237, + parameter_238, + parameter_239, + parameter_240, + parameter_241, + parameter_242, + parameter_243, + parameter_244, + parameter_245, + parameter_246, + parameter_247, + parameter_248, + parameter_249, + parameter_250, + parameter_251, + parameter_252, + parameter_253, + parameter_254, + parameter_255, + parameter_256, + parameter_257, + parameter_258, + parameter_259, + parameter_260, + parameter_261, + parameter_262, + parameter_263, + parameter_264, + parameter_265, + parameter_266, + parameter_267, + parameter_268, + parameter_269, + parameter_270, + parameter_271, + parameter_272, + parameter_273, + parameter_274, + parameter_275, + parameter_276, + parameter_277, + parameter_278, + parameter_279, + parameter_280, + parameter_281, + parameter_282, + parameter_283, + parameter_284, + parameter_285, + parameter_286, + parameter_287, + parameter_288, + parameter_289, + parameter_290, + parameter_291, + parameter_292, + parameter_293, + parameter_294, + parameter_295, + parameter_296, + parameter_297, + parameter_298, + parameter_299, + parameter_300, + parameter_301, + parameter_302, + parameter_303, + parameter_304, + parameter_305, + parameter_306, + parameter_307, + parameter_308, + parameter_309, + parameter_310, + parameter_311, + parameter_312, + parameter_313, + parameter_314, + parameter_315, + parameter_316, + parameter_317, + parameter_318, + parameter_319, + parameter_320, + parameter_321, + parameter_322, + parameter_323, + parameter_324, + parameter_325, + parameter_326, + parameter_327, + parameter_328, + parameter_329, + parameter_330, + parameter_331, + parameter_332, + parameter_333, + parameter_334, + parameter_335, + parameter_336, + parameter_337, + parameter_338, + parameter_339, + parameter_340, + parameter_341, + parameter_342, + parameter_343, + parameter_344, + parameter_345, + parameter_346, + parameter_347, + parameter_348, + parameter_349, + parameter_350, + parameter_351, + parameter_352, + parameter_353, + parameter_354, + parameter_355, + parameter_356, + parameter_357, + parameter_358, + parameter_359, + parameter_360, + parameter_361, + parameter_362, + parameter_363, + parameter_364, + data_0, + data_1, + data_2, + data_3, + data_4, + data_5, + data_6, + data_7, + data_8, + data_9, + data_10, + ): + # pd_op.flatten: (1x1024x3600xf32) <- (1x1024x60x60xf32) + flatten_0 = paddle._C_ops.flatten(data_10, 2, 3) + del data_10 + + # pd_op.transpose: (1x3600x1024xf32) <- (1x1024x3600xf32) + transpose_0 = paddle._C_ops.transpose(flatten_0, [0, 2, 1]) + del flatten_0 + + # pd_op.full: (1xf64) <- () + full_0 = paddle._C_ops.full( + [1], float("0"), paddle.float64, paddle.core.CPUPlace() + ) + + # pd_op.full: (1xf64) <- () + full_1 = paddle._C_ops.full( + [1], float("60"), paddle.float64, paddle.core.CPUPlace() + ) + + # pd_op.full: (1xf64) <- () + full_2 = paddle._C_ops.full( + [1], float("1"), paddle.float64, paddle.core.CPUPlace() + ) + + # pd_op.arange: (60xf32) <- (1xf64, 1xf64, 1xf64) + arange_0 = paddle.arange(full_0, full_1, full_2, dtype="float32") + del full_1 + + # builtin.combine: ([60xf32, 60xf32]) <- (60xf32, 60xf32) + combine_0 = [arange_0, arange_0] + del arange_0 + + # pd_op.meshgrid: ([60x60xf32, 60x60xf32]) <- ([60xf32, 60xf32]) + meshgrid_0 = paddle._C_ops.meshgrid(combine_0) + del combine_0 + + # builtin.split: (60x60xf32, 60x60xf32) <- ([60x60xf32, 60x60xf32]) + ( + split_0, + split_1, + ) = meshgrid_0 + del meshgrid_0 + + # pd_op.full: (1xf64) <- () + full_3 = paddle._C_ops.full( + [1], float("256"), paddle.float64, paddle.core.CPUPlace() + ) + + # pd_op.arange: (256xf32) <- (1xf64, 1xf64, 1xf64) + arange_1 = paddle.arange(full_0, full_3, full_2, dtype="float32") + del full_0, full_2, full_3 + + # pd_op.full: (1xf32) <- () + full_4 = paddle._C_ops.full( + [1], float("0.00390625"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.scale: (256xf32) <- (256xf32, 1xf32) + scale_0 = paddle._C_ops.scale(arange_1, full_4, float("0"), True) + del arange_1, full_4 + + # pd_op.full: (256xf32) <- () + full_5 = paddle._C_ops.full( + [256], + float("10000"), + paddle.float32, + paddle.framework._current_expected_place(), + ) + + # pd_op.elementwise_pow: (256xf32) <- (256xf32, 256xf32) + elementwise_pow_0 = paddle._C_ops.elementwise_pow(full_5, scale_0) + del full_5, scale_0 + + # pd_op.full: (256xf32) <- () + full_6 = paddle._C_ops.full( + [256], + float("1"), + paddle.float32, + paddle.framework._current_expected_place(), + ) + + # pd_op.divide: (256xf32) <- (256xf32, 256xf32) + divide_0 = paddle._C_ops.divide(full_6, elementwise_pow_0) + del elementwise_pow_0, full_6 + + # pd_op.flatten: (3600xf32) <- (60x60xf32) + flatten_1 = paddle._C_ops.flatten(split_0, 0, 1) + del split_0 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_0 = [1] + + # pd_op.unsqueeze: (3600x1xf32) <- (3600xf32, 1xi64) + unsqueeze_0 = paddle._C_ops.unsqueeze(flatten_1, full_int_array_0) + del flatten_1 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_1 = [0] + + # pd_op.assign: (1xi64) <- (1xi64) + assign_0 = full_int_array_1 + + # pd_op.assign: (1xi64) <- (1xi64) + assign_1 = full_int_array_1 + + # pd_op.assign: (1xi64) <- (1xi64) + assign_2 = full_int_array_1 + + # pd_op.assign: (1xi64) <- (1xi64) + assign_3 = full_int_array_1 + + # pd_op.assign: (1xi64) <- (1xi64) + assign_4 = full_int_array_1 + + # pd_op.assign: (1xi64) <- (1xi64) + assign_5 = full_int_array_1 + + # pd_op.assign: (1xi64) <- (1xi64) + assign_6 = full_int_array_1 + + # pd_op.assign: (1xi64) <- (1xi64) + assign_7 = full_int_array_1 + + # pd_op.unsqueeze: (1x256xf32) <- (256xf32, 1xi64) + unsqueeze_1 = paddle._C_ops.unsqueeze(divide_0, full_int_array_1) + del divide_0 + + # pd_op.matmul: (3600x256xf32) <- (3600x1xf32, 1x256xf32) + matmul_0 = paddle._C_ops.matmul(unsqueeze_0, unsqueeze_1, False, False) + del unsqueeze_0 + + # pd_op.flatten: (3600xf32) <- (60x60xf32) + flatten_2 = paddle._C_ops.flatten(split_1, 0, 1) + del split_1 + + # pd_op.unsqueeze: (3600x1xf32) <- (3600xf32, 1xi64) + unsqueeze_2 = paddle._C_ops.unsqueeze(flatten_2, full_int_array_0) + del flatten_2, full_int_array_0 + + # pd_op.matmul: (3600x256xf32) <- (3600x1xf32, 1x256xf32) + matmul_1 = paddle._C_ops.matmul(unsqueeze_2, unsqueeze_1, False, False) + del unsqueeze_1, unsqueeze_2 + + # pd_op.sin: (3600x256xf32) <- (3600x256xf32) + sin_0 = paddle._C_ops.sin(matmul_0) + + # pd_op.cos: (3600x256xf32) <- (3600x256xf32) + cos_0 = paddle._C_ops.cos(matmul_0) + del matmul_0 + + # pd_op.sin: (3600x256xf32) <- (3600x256xf32) + sin_1 = paddle._C_ops.sin(matmul_1) + + # pd_op.cos: (3600x256xf32) <- (3600x256xf32) + cos_1 = paddle._C_ops.cos(matmul_1) + del matmul_1 + + # pd_op.full: (1xi32) <- () + full_7 = paddle._C_ops.full( + [1], float("1"), paddle.int32, paddle.core.CPUPlace() + ) + + # pd_op.assign: (1xi32) <- (1xi32) + assign_8 = full_7 + + # pd_op.assign: (1xi32) <- (1xi32) + assign_9 = full_7 + + # pd_op.assign: (1xi32) <- (1xi32) + assign_10 = full_7 + + # pd_op.assign: (1xi32) <- (1xi32) + assign_11 = full_7 + + # pd_op.assign: (1xi32) <- (1xi32) + assign_12 = full_7 + + # pd_op.assign: (1xi32) <- (1xi32) + assign_13 = full_7 + + # pd_op.assign: (1xi32) <- (1xi32) + assign_14 = full_7 + + # pd_op.assign: (1xi32) <- (1xi32) + assign_15 = full_7 + + # pd_op.assign: (1xi32) <- (1xi32) + assign_16 = full_7 + + # pd_op.assign: (1xi32) <- (1xi32) + assign_17 = full_7 + + # builtin.combine: ([3600x256xf32, 3600x256xf32, 3600x256xf32, 3600x256xf32]) <- (3600x256xf32, 3600x256xf32, 3600x256xf32, 3600x256xf32) + combine_1 = [sin_0, cos_0, sin_1, cos_1] + del cos_0, cos_1, sin_0, sin_1 + + # pd_op.concat: (3600x1024xf32) <- ([3600x256xf32, 3600x256xf32, 3600x256xf32, 3600x256xf32], 1xi32) + concat_0 = paddle._C_ops.concat(combine_1, full_7) + del combine_1 + + # pd_op.unsqueeze: (1x3600x1024xf32) <- (3600x1024xf32, 1xi64) + unsqueeze_3 = paddle._C_ops.unsqueeze(concat_0, full_int_array_1) + del concat_0 + + # pd_op.add: (1x3600x1024xf32) <- (1x3600x1024xf32, 1x3600x1024xf32) + add_0 = paddle._C_ops.add(transpose_0, unsqueeze_3) + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_2 = [1024] + + # pd_op.assign: (1xi64) <- (1xi64) + assign_18 = full_int_array_2 + + # pd_op.assign: (1xi64) <- (1xi64) + assign_19 = full_int_array_2 + + # pd_op.assign: (1xi64) <- (1xi64) + assign_20 = full_int_array_2 + + # pd_op.assign: (1xi64) <- (1xi64) + assign_21 = full_int_array_2 + + # pd_op.assign: (1xi64) <- (1xi64) + assign_22 = full_int_array_2 + + # pd_op.assign: (1xi64) <- (1xi64) + assign_23 = full_int_array_2 + + # pd_op.assign: (1xi64) <- (1xi64) + assign_24 = full_int_array_2 + + # pd_op.assign: (1xi64) <- (1xi64) + assign_25 = full_int_array_2 + + # pd_op.assign: (1xi64) <- (1xi64) + assign_26 = full_int_array_2 + + # pd_op.assign: (1xi64) <- (1xi64) + assign_27 = full_int_array_2 + + # pd_op.assign: (1xi64) <- (1xi64) + assign_28 = full_int_array_2 + + # pd_op.assign: (1xi64) <- (1xi64) + assign_29 = full_int_array_2 + + # pd_op.assign: (1xi64) <- (1xi64) + assign_30 = full_int_array_2 + + # pd_op.assign: (1xi64) <- (1xi64) + assign_31 = full_int_array_2 + + # pd_op.assign: (1xi64) <- (1xi64) + assign_32 = full_int_array_2 + + # pd_op.slice: (1024x1024xf32) <- (1024x3072xf32, 1xi64, 1xi64) + slice_0 = paddle._C_ops.slice( + data_0, [1], full_int_array_1, full_int_array_2, [1], [] + ) + + # pd_op.slice: (1024xf32) <- (3072xf32, 1xi64, 1xi64) + slice_1 = paddle._C_ops.slice( + data_1, [0], full_int_array_1, full_int_array_2, [1], [] + ) + + # pd_op.matmul: (1x3600x1024xf32) <- (1x3600x1024xf32, 1024x1024xf32) + matmul_2 = paddle._C_ops.matmul(add_0, slice_0, False, False) + + # pd_op.add: (1x3600x1024xf32) <- (1x3600x1024xf32, 1024xf32) + add_1 = paddle._C_ops.add(matmul_2, slice_1) + + # pd_op.full_int_array: (4xi64) <- () + full_int_array_3 = [0, 0, 4, 256] + + # pd_op.reshape: (1x3600x4x256xf32) <- (1x3600x1024xf32, 4xi64) + reshape_0 = paddle._C_ops.reshape(add_1, full_int_array_3) + + # pd_op.transpose: (1x4x3600x256xf32) <- (1x3600x4x256xf32) + transpose_1 = paddle._C_ops.transpose(reshape_0, [0, 2, 1, 3]) + del reshape_0 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_4 = [2048] + + # pd_op.assign: (1xi64) <- (1xi64) + assign_33 = full_int_array_4 + + # pd_op.assign: (1xi64) <- (1xi64) + assign_34 = full_int_array_4 + + # pd_op.assign: (1xi64) <- (1xi64) + assign_35 = full_int_array_4 + + # pd_op.assign: (1xi64) <- (1xi64) + assign_36 = full_int_array_4 + + # pd_op.assign: (1xi64) <- (1xi64) + assign_37 = full_int_array_4 + + # pd_op.assign: (1xi64) <- (1xi64) + assign_38 = full_int_array_4 + + # pd_op.assign: (1xi64) <- (1xi64) + assign_39 = full_int_array_4 + + # pd_op.assign: (1xi64) <- (1xi64) + assign_40 = full_int_array_4 + + # pd_op.assign: (1xi64) <- (1xi64) + assign_41 = full_int_array_4 + + # pd_op.assign: (1xi64) <- (1xi64) + assign_42 = full_int_array_4 + + # pd_op.assign: (1xi64) <- (1xi64) + assign_43 = full_int_array_4 + + # pd_op.assign: (1xi64) <- (1xi64) + assign_44 = full_int_array_4 + + # pd_op.assign: (1xi64) <- (1xi64) + assign_45 = full_int_array_4 + + # pd_op.assign: (1xi64) <- (1xi64) + assign_46 = full_int_array_4 + + # pd_op.assign: (1xi64) <- (1xi64) + assign_47 = full_int_array_4 + + # pd_op.slice: (1024x1024xf32) <- (1024x3072xf32, 1xi64, 1xi64) + slice_2 = paddle._C_ops.slice( + data_0, [1], full_int_array_2, full_int_array_4, [1], [] + ) + + # pd_op.slice: (1024xf32) <- (3072xf32, 1xi64, 1xi64) + slice_3 = paddle._C_ops.slice( + data_1, [0], full_int_array_2, full_int_array_4, [1], [] + ) + + # pd_op.matmul: (1x3600x1024xf32) <- (1x3600x1024xf32, 1024x1024xf32) + matmul_3 = paddle._C_ops.matmul(add_0, slice_2, False, False) + + # pd_op.add: (1x3600x1024xf32) <- (1x3600x1024xf32, 1024xf32) + add_2 = paddle._C_ops.add(matmul_3, slice_3) + + # pd_op.reshape: (1x3600x4x256xf32) <- (1x3600x1024xf32, 4xi64) + reshape_1 = paddle._C_ops.reshape(add_2, full_int_array_3) + + # pd_op.transpose: (1x4x3600x256xf32) <- (1x3600x4x256xf32) + transpose_2 = paddle._C_ops.transpose(reshape_1, [0, 2, 1, 3]) + del reshape_1 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_5 = [2147483647] + + # pd_op.assign: (1xi64) <- (1xi64) + assign_48 = full_int_array_5 + + # pd_op.assign: (1xi64) <- (1xi64) + assign_49 = full_int_array_5 + + # pd_op.assign: (1xi64) <- (1xi64) + assign_50 = full_int_array_5 + + # pd_op.assign: (1xi64) <- (1xi64) + assign_51 = full_int_array_5 + + # pd_op.assign: (1xi64) <- (1xi64) + assign_52 = full_int_array_5 + + # pd_op.assign: (1xi64) <- (1xi64) + assign_53 = full_int_array_5 + + # pd_op.assign: (1xi64) <- (1xi64) + assign_54 = full_int_array_5 + + # pd_op.slice: (1024x1024xf32) <- (1024x3072xf32, 1xi64, 1xi64) + slice_4 = paddle._C_ops.slice( + data_0, [1], full_int_array_4, full_int_array_5, [1], [] + ) + del data_0 + + # pd_op.slice: (1024xf32) <- (3072xf32, 1xi64, 1xi64) + slice_5 = paddle._C_ops.slice( + data_1, [0], full_int_array_4, full_int_array_5, [1], [] + ) + del data_1 + + # pd_op.matmul: (1x3600x1024xf32) <- (1x3600x1024xf32, 1024x1024xf32) + matmul_4 = paddle._C_ops.matmul(transpose_0, slice_4, False, False) + + # pd_op.add: (1x3600x1024xf32) <- (1x3600x1024xf32, 1024xf32) + add_3 = paddle._C_ops.add(matmul_4, slice_5) + + # pd_op.reshape: (1x3600x4x256xf32) <- (1x3600x1024xf32, 4xi64) + reshape_2 = paddle._C_ops.reshape(add_3, full_int_array_3) + + # pd_op.transpose: (1x4x3600x256xf32) <- (1x3600x4x256xf32) + transpose_3 = paddle._C_ops.transpose(reshape_2, [0, 2, 1, 3]) + del reshape_2 + + # pd_op.matmul: (1x4x3600x3600xf32) <- (1x4x3600x256xf32, 1x4x3600x256xf32) + matmul_5 = paddle._C_ops.matmul(transpose_1, transpose_2, False, True) + + # pd_op.full: (1xf32) <- () + full_8 = paddle._C_ops.full( + [1], float("0.0625"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.assign: (1xf32) <- (1xf32) + assign_55 = full_8 + + # pd_op.assign: (1xf32) <- (1xf32) + assign_56 = full_8 + + # pd_op.assign: (1xf32) <- (1xf32) + assign_57 = full_8 + + # pd_op.scale: (1x4x3600x3600xf32) <- (1x4x3600x3600xf32, 1xf32) + scale_1 = paddle._C_ops.scale(matmul_5, full_8, float("0"), True) + del matmul_5 + + # pd_op.softmax: (1x4x3600x3600xf32) <- (1x4x3600x3600xf32) + softmax_0 = paddle._C_ops.softmax(scale_1, -1) + del scale_1 + + # pd_op.full: (1xf32) <- () + full_9 = paddle._C_ops.full( + [1], float("0.1"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.assign: (1xf32) <- (1xf32) + assign_58 = full_9 + + # pd_op.assign: (1xf32) <- (1xf32) + assign_59 = full_9 + + # pd_op.assign: (1xf32) <- (1xf32) + assign_60 = full_9 + + # pd_op.assign: (1xf32) <- (1xf32) + assign_61 = full_9 + + # pd_op.assign: (1xf32) <- (1xf32) + assign_62 = full_9 + + # pd_op.assign: (1xf32) <- (1xf32) + assign_63 = full_9 + + # pd_op.assign: (1xf32) <- (1xf32) + assign_64 = full_9 + + # pd_op.assign: (1xf32) <- (1xf32) + assign_65 = full_9 + + # pd_op.assign: (1xf32) <- (1xf32) + assign_66 = full_9 + + # pd_op.assign: (1xf32) <- (1xf32) + assign_67 = full_9 + + # pd_op.assign: (1xf32) <- (1xf32) + assign_68 = full_9 + + # pd_op.assign: (1xf32) <- (1xf32) + assign_69 = full_9 + + # pd_op.assign: (1xf32) <- (1xf32) + assign_70 = full_9 + + # pd_op.assign: (1xf32) <- (1xf32) + assign_71 = full_9 + + # pd_op.assign: (1xf32) <- (1xf32) + assign_72 = full_9 + + # pd_op.dropout: (1x4x3600x3600xf32, 1x4x3600x3600xui8) <- (1x4x3600x3600xf32, None, 1xf32) + dropout_0, dropout_1 = (lambda x, f: f(x))( + paddle._C_ops.dropout( + softmax_0, None, full_9, False, "upscale_in_train", 0, False + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None), + ) + + # pd_op.matmul: (1x4x3600x256xf32) <- (1x4x3600x3600xf32, 1x4x3600x256xf32) + matmul_6 = paddle._C_ops.matmul(dropout_0, transpose_3, False, False) + + # pd_op.transpose: (1x3600x4x256xf32) <- (1x4x3600x256xf32) + transpose_4 = paddle._C_ops.transpose(matmul_6, [0, 2, 1, 3]) + del matmul_6 + + # pd_op.full_int_array: (3xi64) <- () + full_int_array_6 = [0, 0, 1024] + + # pd_op.reshape: (1x3600x1024xf32) <- (1x3600x4x256xf32, 3xi64) + reshape_3 = paddle._C_ops.reshape(transpose_4, full_int_array_6) + + # pd_op.matmul: (1x3600x1024xf32) <- (1x3600x1024xf32, 1024x1024xf32) + matmul_7 = paddle._C_ops.matmul(reshape_3, parameter_364, False, False) + del parameter_364 + + # pd_op.add: (1x3600x1024xf32) <- (1x3600x1024xf32, 1024xf32) + add_4 = paddle._C_ops.add(matmul_7, parameter_363) + del parameter_363 + + # pd_op.dropout: (1x3600x1024xf32, 1x3600x1024xui8) <- (1x3600x1024xf32, None, 1xf32) + dropout_2, dropout_3 = (lambda x, f: f(x))( + paddle._C_ops.dropout( + add_4, None, full_9, False, "upscale_in_train", 0, False + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None), + ) + del add_4 + + # pd_op.add: (1x3600x1024xf32) <- (1x3600x1024xf32, 1x3600x1024xf32) + add_5 = paddle._C_ops.add(transpose_0, dropout_2) + + # pd_op.layer_norm: (1x3600x1024xf32, 1x3600xf32, 1x3600xf32) <- (1x3600x1024xf32, 1024xf32, 1024xf32) + layer_norm_0, layer_norm_1, layer_norm_2 = (lambda x, f: f(x))( + paddle._C_ops.layer_norm( + add_5, parameter_362, parameter_361, float("1e-05"), 2 + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None, None), + ) + del parameter_361, parameter_362 + + # pd_op.matmul: (1x3600x2048xf32) <- (1x3600x1024xf32, 1024x2048xf32) + matmul_8 = paddle._C_ops.matmul(layer_norm_0, parameter_360, False, False) + del parameter_360 + + # pd_op.add: (1x3600x2048xf32) <- (1x3600x2048xf32, 2048xf32) + add_6 = paddle._C_ops.add(matmul_8, parameter_359) + del parameter_359 + + # pd_op.gelu: (1x3600x2048xf32) <- (1x3600x2048xf32) + gelu_0 = paddle._C_ops.gelu(add_6, False) + + # pd_op.dropout: (1x3600x2048xf32, 1x3600x2048xui8) <- (1x3600x2048xf32, None, 1xf32) + dropout_4, dropout_5 = (lambda x, f: f(x))( + paddle._C_ops.dropout( + gelu_0, None, full_9, False, "upscale_in_train", 0, False + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None), + ) + del gelu_0 + + # pd_op.matmul: (1x3600x1024xf32) <- (1x3600x2048xf32, 2048x1024xf32) + matmul_9 = paddle._C_ops.matmul(dropout_4, parameter_358, False, False) + del parameter_358 + + # pd_op.add: (1x3600x1024xf32) <- (1x3600x1024xf32, 1024xf32) + add_7 = paddle._C_ops.add(matmul_9, parameter_357) + del parameter_357 + + # pd_op.dropout: (1x3600x1024xf32, 1x3600x1024xui8) <- (1x3600x1024xf32, None, 1xf32) + dropout_6, dropout_7 = (lambda x, f: f(x))( + paddle._C_ops.dropout( + add_7, None, full_9, False, "upscale_in_train", 0, False + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None), + ) + del add_7 + + # pd_op.add: (1x3600x1024xf32) <- (1x3600x1024xf32, 1x3600x1024xf32) + add_8 = paddle._C_ops.add(layer_norm_0, dropout_6) + + # pd_op.layer_norm: (1x3600x1024xf32, 1x3600xf32, 1x3600xf32) <- (1x3600x1024xf32, 1024xf32, 1024xf32) + layer_norm_3, layer_norm_4, layer_norm_5 = (lambda x, f: f(x))( + paddle._C_ops.layer_norm( + add_8, parameter_356, parameter_355, float("1e-05"), 2 + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None, None), + ) + del parameter_355, parameter_356 + + # pd_op.add: (1x3600x1024xf32) <- (1x3600x1024xf32, 1x3600x1024xf32) + add_9 = paddle._C_ops.add(layer_norm_3, unsqueeze_3) + + # pd_op.slice: (1024x1024xf32) <- (1024x3072xf32, 1xi64, 1xi64) + slice_6 = paddle._C_ops.slice( + data_2, [1], full_int_array_1, full_int_array_2, [1], [] + ) + + # pd_op.slice: (1024xf32) <- (3072xf32, 1xi64, 1xi64) + slice_7 = paddle._C_ops.slice( + data_3, [0], full_int_array_1, full_int_array_2, [1], [] + ) + + # pd_op.matmul: (1x3600x1024xf32) <- (1x3600x1024xf32, 1024x1024xf32) + matmul_10 = paddle._C_ops.matmul(add_9, slice_6, False, False) + + # pd_op.add: (1x3600x1024xf32) <- (1x3600x1024xf32, 1024xf32) + add_10 = paddle._C_ops.add(matmul_10, slice_7) + + # pd_op.reshape: (1x3600x4x256xf32) <- (1x3600x1024xf32, 4xi64) + reshape_4 = paddle._C_ops.reshape(add_10, full_int_array_3) + + # pd_op.transpose: (1x4x3600x256xf32) <- (1x3600x4x256xf32) + transpose_5 = paddle._C_ops.transpose(reshape_4, [0, 2, 1, 3]) + del reshape_4 + + # pd_op.slice: (1024x1024xf32) <- (1024x3072xf32, 1xi64, 1xi64) + slice_8 = paddle._C_ops.slice( + data_2, [1], full_int_array_2, full_int_array_4, [1], [] + ) + + # pd_op.slice: (1024xf32) <- (3072xf32, 1xi64, 1xi64) + slice_9 = paddle._C_ops.slice( + data_3, [0], full_int_array_2, full_int_array_4, [1], [] + ) + + # pd_op.matmul: (1x3600x1024xf32) <- (1x3600x1024xf32, 1024x1024xf32) + matmul_11 = paddle._C_ops.matmul(add_9, slice_8, False, False) + + # pd_op.add: (1x3600x1024xf32) <- (1x3600x1024xf32, 1024xf32) + add_11 = paddle._C_ops.add(matmul_11, slice_9) + + # pd_op.reshape: (1x3600x4x256xf32) <- (1x3600x1024xf32, 4xi64) + reshape_5 = paddle._C_ops.reshape(add_11, full_int_array_3) + + # pd_op.transpose: (1x4x3600x256xf32) <- (1x3600x4x256xf32) + transpose_6 = paddle._C_ops.transpose(reshape_5, [0, 2, 1, 3]) + del reshape_5 + + # pd_op.slice: (1024x1024xf32) <- (1024x3072xf32, 1xi64, 1xi64) + slice_10 = paddle._C_ops.slice( + data_2, [1], full_int_array_4, full_int_array_5, [1], [] + ) + del data_2 + + # pd_op.slice: (1024xf32) <- (3072xf32, 1xi64, 1xi64) + slice_11 = paddle._C_ops.slice( + data_3, [0], full_int_array_4, full_int_array_5, [1], [] + ) + del data_3 + + # pd_op.matmul: (1x3600x1024xf32) <- (1x3600x1024xf32, 1024x1024xf32) + matmul_12 = paddle._C_ops.matmul(layer_norm_3, slice_10, False, False) + + # pd_op.add: (1x3600x1024xf32) <- (1x3600x1024xf32, 1024xf32) + add_12 = paddle._C_ops.add(matmul_12, slice_11) + + # pd_op.reshape: (1x3600x4x256xf32) <- (1x3600x1024xf32, 4xi64) + reshape_6 = paddle._C_ops.reshape(add_12, full_int_array_3) + + # pd_op.transpose: (1x4x3600x256xf32) <- (1x3600x4x256xf32) + transpose_7 = paddle._C_ops.transpose(reshape_6, [0, 2, 1, 3]) + del reshape_6 + + # pd_op.matmul: (1x4x3600x3600xf32) <- (1x4x3600x256xf32, 1x4x3600x256xf32) + matmul_13 = paddle._C_ops.matmul(transpose_5, transpose_6, False, True) + + # pd_op.scale: (1x4x3600x3600xf32) <- (1x4x3600x3600xf32, 1xf32) + scale_2 = paddle._C_ops.scale(matmul_13, full_8, float("0"), True) + del matmul_13 + + # pd_op.softmax: (1x4x3600x3600xf32) <- (1x4x3600x3600xf32) + softmax_1 = paddle._C_ops.softmax(scale_2, -1) + del scale_2 + + # pd_op.dropout: (1x4x3600x3600xf32, 1x4x3600x3600xui8) <- (1x4x3600x3600xf32, None, 1xf32) + dropout_8, dropout_9 = (lambda x, f: f(x))( + paddle._C_ops.dropout( + softmax_1, None, full_9, False, "upscale_in_train", 0, False + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None), + ) + + # pd_op.matmul: (1x4x3600x256xf32) <- (1x4x3600x3600xf32, 1x4x3600x256xf32) + matmul_14 = paddle._C_ops.matmul(dropout_8, transpose_7, False, False) + + # pd_op.transpose: (1x3600x4x256xf32) <- (1x4x3600x256xf32) + transpose_8 = paddle._C_ops.transpose(matmul_14, [0, 2, 1, 3]) + del matmul_14 + + # pd_op.reshape: (1x3600x1024xf32) <- (1x3600x4x256xf32, 3xi64) + reshape_7 = paddle._C_ops.reshape(transpose_8, full_int_array_6) + + # pd_op.matmul: (1x3600x1024xf32) <- (1x3600x1024xf32, 1024x1024xf32) + matmul_15 = paddle._C_ops.matmul(reshape_7, parameter_354, False, False) + del parameter_354 + + # pd_op.add: (1x3600x1024xf32) <- (1x3600x1024xf32, 1024xf32) + add_13 = paddle._C_ops.add(matmul_15, parameter_353) + del parameter_353 + + # pd_op.dropout: (1x3600x1024xf32, 1x3600x1024xui8) <- (1x3600x1024xf32, None, 1xf32) + dropout_10, dropout_11 = (lambda x, f: f(x))( + paddle._C_ops.dropout( + add_13, None, full_9, False, "upscale_in_train", 0, False + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None), + ) + del add_13 + + # pd_op.add: (1x3600x1024xf32) <- (1x3600x1024xf32, 1x3600x1024xf32) + add_14 = paddle._C_ops.add(layer_norm_3, dropout_10) + + # pd_op.layer_norm: (1x3600x1024xf32, 1x3600xf32, 1x3600xf32) <- (1x3600x1024xf32, 1024xf32, 1024xf32) + layer_norm_6, layer_norm_7, layer_norm_8 = (lambda x, f: f(x))( + paddle._C_ops.layer_norm( + add_14, parameter_352, parameter_351, float("1e-05"), 2 + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None, None), + ) + del parameter_351, parameter_352 + + # pd_op.matmul: (1x3600x2048xf32) <- (1x3600x1024xf32, 1024x2048xf32) + matmul_16 = paddle._C_ops.matmul(layer_norm_6, parameter_350, False, False) + del parameter_350 + + # pd_op.add: (1x3600x2048xf32) <- (1x3600x2048xf32, 2048xf32) + add_15 = paddle._C_ops.add(matmul_16, parameter_349) + del parameter_349 + + # pd_op.gelu: (1x3600x2048xf32) <- (1x3600x2048xf32) + gelu_1 = paddle._C_ops.gelu(add_15, False) + + # pd_op.dropout: (1x3600x2048xf32, 1x3600x2048xui8) <- (1x3600x2048xf32, None, 1xf32) + dropout_12, dropout_13 = (lambda x, f: f(x))( + paddle._C_ops.dropout( + gelu_1, None, full_9, False, "upscale_in_train", 0, False + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None), + ) + del gelu_1 + + # pd_op.matmul: (1x3600x1024xf32) <- (1x3600x2048xf32, 2048x1024xf32) + matmul_17 = paddle._C_ops.matmul(dropout_12, parameter_348, False, False) + del parameter_348 + + # pd_op.add: (1x3600x1024xf32) <- (1x3600x1024xf32, 1024xf32) + add_16 = paddle._C_ops.add(matmul_17, parameter_347) + del parameter_347 + + # pd_op.dropout: (1x3600x1024xf32, 1x3600x1024xui8) <- (1x3600x1024xf32, None, 1xf32) + dropout_14, dropout_15 = (lambda x, f: f(x))( + paddle._C_ops.dropout( + add_16, None, full_9, False, "upscale_in_train", 0, False + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None), + ) + del add_16 + + # pd_op.add: (1x3600x1024xf32) <- (1x3600x1024xf32, 1x3600x1024xf32) + add_17 = paddle._C_ops.add(layer_norm_6, dropout_14) + + # pd_op.layer_norm: (1x3600x1024xf32, 1x3600xf32, 1x3600xf32) <- (1x3600x1024xf32, 1024xf32, 1024xf32) + layer_norm_9, layer_norm_10, layer_norm_11 = (lambda x, f: f(x))( + paddle._C_ops.layer_norm( + add_17, parameter_346, parameter_345, float("1e-05"), 2 + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None, None), + ) + del parameter_345, parameter_346 + + # pd_op.add: (1x3600x1024xf32) <- (1x3600x1024xf32, 1x3600x1024xf32) + add_18 = paddle._C_ops.add(layer_norm_9, unsqueeze_3) + + # pd_op.slice: (1024x1024xf32) <- (1024x3072xf32, 1xi64, 1xi64) + slice_12 = paddle._C_ops.slice( + data_4, [1], full_int_array_1, full_int_array_2, [1], [] + ) + + # pd_op.slice: (1024xf32) <- (3072xf32, 1xi64, 1xi64) + slice_13 = paddle._C_ops.slice( + data_5, [0], full_int_array_1, full_int_array_2, [1], [] + ) + + # pd_op.matmul: (1x3600x1024xf32) <- (1x3600x1024xf32, 1024x1024xf32) + matmul_18 = paddle._C_ops.matmul(add_18, slice_12, False, False) + + # pd_op.add: (1x3600x1024xf32) <- (1x3600x1024xf32, 1024xf32) + add_19 = paddle._C_ops.add(matmul_18, slice_13) + + # pd_op.reshape: (1x3600x4x256xf32) <- (1x3600x1024xf32, 4xi64) + reshape_8 = paddle._C_ops.reshape(add_19, full_int_array_3) + + # pd_op.transpose: (1x4x3600x256xf32) <- (1x3600x4x256xf32) + transpose_9 = paddle._C_ops.transpose(reshape_8, [0, 2, 1, 3]) + del reshape_8 + + # pd_op.slice: (1024x1024xf32) <- (1024x3072xf32, 1xi64, 1xi64) + slice_14 = paddle._C_ops.slice( + data_4, [1], full_int_array_2, full_int_array_4, [1], [] + ) + + # pd_op.slice: (1024xf32) <- (3072xf32, 1xi64, 1xi64) + slice_15 = paddle._C_ops.slice( + data_5, [0], full_int_array_2, full_int_array_4, [1], [] + ) + + # pd_op.matmul: (1x3600x1024xf32) <- (1x3600x1024xf32, 1024x1024xf32) + matmul_19 = paddle._C_ops.matmul(add_18, slice_14, False, False) + + # pd_op.add: (1x3600x1024xf32) <- (1x3600x1024xf32, 1024xf32) + add_20 = paddle._C_ops.add(matmul_19, slice_15) + + # pd_op.reshape: (1x3600x4x256xf32) <- (1x3600x1024xf32, 4xi64) + reshape_9 = paddle._C_ops.reshape(add_20, full_int_array_3) + + # pd_op.transpose: (1x4x3600x256xf32) <- (1x3600x4x256xf32) + transpose_10 = paddle._C_ops.transpose(reshape_9, [0, 2, 1, 3]) + del reshape_9 + + # pd_op.slice: (1024x1024xf32) <- (1024x3072xf32, 1xi64, 1xi64) + slice_16 = paddle._C_ops.slice( + data_4, [1], full_int_array_4, full_int_array_5, [1], [] + ) + del data_4 + + # pd_op.slice: (1024xf32) <- (3072xf32, 1xi64, 1xi64) + slice_17 = paddle._C_ops.slice( + data_5, [0], full_int_array_4, full_int_array_5, [1], [] + ) + del data_5 + + # pd_op.matmul: (1x3600x1024xf32) <- (1x3600x1024xf32, 1024x1024xf32) + matmul_20 = paddle._C_ops.matmul(layer_norm_9, slice_16, False, False) + + # pd_op.add: (1x3600x1024xf32) <- (1x3600x1024xf32, 1024xf32) + add_21 = paddle._C_ops.add(matmul_20, slice_17) + + # pd_op.reshape: (1x3600x4x256xf32) <- (1x3600x1024xf32, 4xi64) + reshape_10 = paddle._C_ops.reshape(add_21, full_int_array_3) + + # pd_op.transpose: (1x4x3600x256xf32) <- (1x3600x4x256xf32) + transpose_11 = paddle._C_ops.transpose(reshape_10, [0, 2, 1, 3]) + del reshape_10 + + # pd_op.matmul: (1x4x3600x3600xf32) <- (1x4x3600x256xf32, 1x4x3600x256xf32) + matmul_21 = paddle._C_ops.matmul(transpose_9, transpose_10, False, True) + + # pd_op.scale: (1x4x3600x3600xf32) <- (1x4x3600x3600xf32, 1xf32) + scale_3 = paddle._C_ops.scale(matmul_21, full_8, float("0"), True) + del matmul_21 + + # pd_op.softmax: (1x4x3600x3600xf32) <- (1x4x3600x3600xf32) + softmax_2 = paddle._C_ops.softmax(scale_3, -1) + del scale_3 + + # pd_op.dropout: (1x4x3600x3600xf32, 1x4x3600x3600xui8) <- (1x4x3600x3600xf32, None, 1xf32) + dropout_16, dropout_17 = (lambda x, f: f(x))( + paddle._C_ops.dropout( + softmax_2, None, full_9, False, "upscale_in_train", 0, False + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None), + ) + + # pd_op.matmul: (1x4x3600x256xf32) <- (1x4x3600x3600xf32, 1x4x3600x256xf32) + matmul_22 = paddle._C_ops.matmul(dropout_16, transpose_11, False, False) + + # pd_op.transpose: (1x3600x4x256xf32) <- (1x4x3600x256xf32) + transpose_12 = paddle._C_ops.transpose(matmul_22, [0, 2, 1, 3]) + del matmul_22 + + # pd_op.reshape: (1x3600x1024xf32) <- (1x3600x4x256xf32, 3xi64) + reshape_11 = paddle._C_ops.reshape(transpose_12, full_int_array_6) + + # pd_op.matmul: (1x3600x1024xf32) <- (1x3600x1024xf32, 1024x1024xf32) + matmul_23 = paddle._C_ops.matmul(reshape_11, parameter_344, False, False) + del parameter_344 + + # pd_op.add: (1x3600x1024xf32) <- (1x3600x1024xf32, 1024xf32) + add_22 = paddle._C_ops.add(matmul_23, parameter_343) + del parameter_343 + + # pd_op.dropout: (1x3600x1024xf32, 1x3600x1024xui8) <- (1x3600x1024xf32, None, 1xf32) + dropout_18, dropout_19 = (lambda x, f: f(x))( + paddle._C_ops.dropout( + add_22, None, full_9, False, "upscale_in_train", 0, False + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None), + ) + del add_22 + + # pd_op.add: (1x3600x1024xf32) <- (1x3600x1024xf32, 1x3600x1024xf32) + add_23 = paddle._C_ops.add(layer_norm_9, dropout_18) + + # pd_op.layer_norm: (1x3600x1024xf32, 1x3600xf32, 1x3600xf32) <- (1x3600x1024xf32, 1024xf32, 1024xf32) + layer_norm_12, layer_norm_13, layer_norm_14 = (lambda x, f: f(x))( + paddle._C_ops.layer_norm( + add_23, parameter_342, parameter_341, float("1e-05"), 2 + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None, None), + ) + del parameter_341, parameter_342 + + # pd_op.matmul: (1x3600x2048xf32) <- (1x3600x1024xf32, 1024x2048xf32) + matmul_24 = paddle._C_ops.matmul(layer_norm_12, parameter_340, False, False) + del parameter_340 + + # pd_op.add: (1x3600x2048xf32) <- (1x3600x2048xf32, 2048xf32) + add_24 = paddle._C_ops.add(matmul_24, parameter_339) + del parameter_339 + + # pd_op.gelu: (1x3600x2048xf32) <- (1x3600x2048xf32) + gelu_2 = paddle._C_ops.gelu(add_24, False) + + # pd_op.dropout: (1x3600x2048xf32, 1x3600x2048xui8) <- (1x3600x2048xf32, None, 1xf32) + dropout_20, dropout_21 = (lambda x, f: f(x))( + paddle._C_ops.dropout( + gelu_2, None, full_9, False, "upscale_in_train", 0, False + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None), + ) + del gelu_2 + + # pd_op.matmul: (1x3600x1024xf32) <- (1x3600x2048xf32, 2048x1024xf32) + matmul_25 = paddle._C_ops.matmul(dropout_20, parameter_338, False, False) + del parameter_338 + + # pd_op.add: (1x3600x1024xf32) <- (1x3600x1024xf32, 1024xf32) + add_25 = paddle._C_ops.add(matmul_25, parameter_337) + del parameter_337 + + # pd_op.dropout: (1x3600x1024xf32, 1x3600x1024xui8) <- (1x3600x1024xf32, None, 1xf32) + dropout_22, dropout_23 = (lambda x, f: f(x))( + paddle._C_ops.dropout( + add_25, None, full_9, False, "upscale_in_train", 0, False + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None), + ) + del add_25 + + # pd_op.add: (1x3600x1024xf32) <- (1x3600x1024xf32, 1x3600x1024xf32) + add_26 = paddle._C_ops.add(layer_norm_12, dropout_22) + + # pd_op.layer_norm: (1x3600x1024xf32, 1x3600xf32, 1x3600xf32) <- (1x3600x1024xf32, 1024xf32, 1024xf32) + layer_norm_15, layer_norm_16, layer_norm_17 = (lambda x, f: f(x))( + paddle._C_ops.layer_norm( + add_26, parameter_336, parameter_335, float("1e-05"), 2 + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None, None), + ) + del parameter_335, parameter_336 + + # pd_op.add: (1x3600x1024xf32) <- (1x3600x1024xf32, 1x3600x1024xf32) + add_27 = paddle._C_ops.add(layer_norm_15, unsqueeze_3) + + # pd_op.slice: (1024x1024xf32) <- (1024x3072xf32, 1xi64, 1xi64) + slice_18 = paddle._C_ops.slice( + data_6, [1], full_int_array_1, full_int_array_2, [1], [] + ) + + # pd_op.slice: (1024xf32) <- (3072xf32, 1xi64, 1xi64) + slice_19 = paddle._C_ops.slice( + data_7, [0], full_int_array_1, full_int_array_2, [1], [] + ) + del full_int_array_1 + + # pd_op.matmul: (1x3600x1024xf32) <- (1x3600x1024xf32, 1024x1024xf32) + matmul_26 = paddle._C_ops.matmul(add_27, slice_18, False, False) + + # pd_op.add: (1x3600x1024xf32) <- (1x3600x1024xf32, 1024xf32) + add_28 = paddle._C_ops.add(matmul_26, slice_19) + + # pd_op.reshape: (1x3600x4x256xf32) <- (1x3600x1024xf32, 4xi64) + reshape_12 = paddle._C_ops.reshape(add_28, full_int_array_3) + + # pd_op.transpose: (1x4x3600x256xf32) <- (1x3600x4x256xf32) + transpose_13 = paddle._C_ops.transpose(reshape_12, [0, 2, 1, 3]) + del reshape_12 + + # pd_op.slice: (1024x1024xf32) <- (1024x3072xf32, 1xi64, 1xi64) + slice_20 = paddle._C_ops.slice( + data_6, [1], full_int_array_2, full_int_array_4, [1], [] + ) + + # pd_op.slice: (1024xf32) <- (3072xf32, 1xi64, 1xi64) + slice_21 = paddle._C_ops.slice( + data_7, [0], full_int_array_2, full_int_array_4, [1], [] + ) + + # pd_op.matmul: (1x3600x1024xf32) <- (1x3600x1024xf32, 1024x1024xf32) + matmul_27 = paddle._C_ops.matmul(add_27, slice_20, False, False) + + # pd_op.add: (1x3600x1024xf32) <- (1x3600x1024xf32, 1024xf32) + add_29 = paddle._C_ops.add(matmul_27, slice_21) + + # pd_op.reshape: (1x3600x4x256xf32) <- (1x3600x1024xf32, 4xi64) + reshape_13 = paddle._C_ops.reshape(add_29, full_int_array_3) + + # pd_op.transpose: (1x4x3600x256xf32) <- (1x3600x4x256xf32) + transpose_14 = paddle._C_ops.transpose(reshape_13, [0, 2, 1, 3]) + del reshape_13 + + # pd_op.slice: (1024x1024xf32) <- (1024x3072xf32, 1xi64, 1xi64) + slice_22 = paddle._C_ops.slice( + data_6, [1], full_int_array_4, full_int_array_5, [1], [] + ) + del data_6 + + # pd_op.slice: (1024xf32) <- (3072xf32, 1xi64, 1xi64) + slice_23 = paddle._C_ops.slice( + data_7, [0], full_int_array_4, full_int_array_5, [1], [] + ) + del data_7 + + # pd_op.matmul: (1x3600x1024xf32) <- (1x3600x1024xf32, 1024x1024xf32) + matmul_28 = paddle._C_ops.matmul(layer_norm_15, slice_22, False, False) + + # pd_op.add: (1x3600x1024xf32) <- (1x3600x1024xf32, 1024xf32) + add_30 = paddle._C_ops.add(matmul_28, slice_23) + + # pd_op.reshape: (1x3600x4x256xf32) <- (1x3600x1024xf32, 4xi64) + reshape_14 = paddle._C_ops.reshape(add_30, full_int_array_3) + del full_int_array_3 + + # pd_op.transpose: (1x4x3600x256xf32) <- (1x3600x4x256xf32) + transpose_15 = paddle._C_ops.transpose(reshape_14, [0, 2, 1, 3]) + del reshape_14 + + # pd_op.matmul: (1x4x3600x3600xf32) <- (1x4x3600x256xf32, 1x4x3600x256xf32) + matmul_29 = paddle._C_ops.matmul(transpose_13, transpose_14, False, True) + + # pd_op.scale: (1x4x3600x3600xf32) <- (1x4x3600x3600xf32, 1xf32) + scale_4 = paddle._C_ops.scale(matmul_29, full_8, float("0"), True) + del matmul_29 + + # pd_op.softmax: (1x4x3600x3600xf32) <- (1x4x3600x3600xf32) + softmax_3 = paddle._C_ops.softmax(scale_4, -1) + del scale_4 + + # pd_op.dropout: (1x4x3600x3600xf32, 1x4x3600x3600xui8) <- (1x4x3600x3600xf32, None, 1xf32) + dropout_24, dropout_25 = (lambda x, f: f(x))( + paddle._C_ops.dropout( + softmax_3, None, full_9, False, "upscale_in_train", 0, False + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None), + ) + + # pd_op.matmul: (1x4x3600x256xf32) <- (1x4x3600x3600xf32, 1x4x3600x256xf32) + matmul_30 = paddle._C_ops.matmul(dropout_24, transpose_15, False, False) + + # pd_op.transpose: (1x3600x4x256xf32) <- (1x4x3600x256xf32) + transpose_16 = paddle._C_ops.transpose(matmul_30, [0, 2, 1, 3]) + del matmul_30 + + # pd_op.reshape: (1x3600x1024xf32) <- (1x3600x4x256xf32, 3xi64) + reshape_15 = paddle._C_ops.reshape(transpose_16, full_int_array_6) + del full_int_array_6 + + # pd_op.matmul: (1x3600x1024xf32) <- (1x3600x1024xf32, 1024x1024xf32) + matmul_31 = paddle._C_ops.matmul(reshape_15, parameter_334, False, False) + del parameter_334 + + # pd_op.add: (1x3600x1024xf32) <- (1x3600x1024xf32, 1024xf32) + add_31 = paddle._C_ops.add(matmul_31, parameter_333) + del parameter_333 + + # pd_op.dropout: (1x3600x1024xf32, 1x3600x1024xui8) <- (1x3600x1024xf32, None, 1xf32) + dropout_26, dropout_27 = (lambda x, f: f(x))( + paddle._C_ops.dropout( + add_31, None, full_9, False, "upscale_in_train", 0, False + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None), + ) + del add_31 + + # pd_op.add: (1x3600x1024xf32) <- (1x3600x1024xf32, 1x3600x1024xf32) + add_32 = paddle._C_ops.add(layer_norm_15, dropout_26) + + # pd_op.layer_norm: (1x3600x1024xf32, 1x3600xf32, 1x3600xf32) <- (1x3600x1024xf32, 1024xf32, 1024xf32) + layer_norm_18, layer_norm_19, layer_norm_20 = (lambda x, f: f(x))( + paddle._C_ops.layer_norm( + add_32, parameter_332, parameter_331, float("1e-05"), 2 + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None, None), + ) + del parameter_331, parameter_332 + + # pd_op.matmul: (1x3600x2048xf32) <- (1x3600x1024xf32, 1024x2048xf32) + matmul_32 = paddle._C_ops.matmul(layer_norm_18, parameter_330, False, False) + del parameter_330 + + # pd_op.add: (1x3600x2048xf32) <- (1x3600x2048xf32, 2048xf32) + add_33 = paddle._C_ops.add(matmul_32, parameter_329) + del parameter_329 + + # pd_op.gelu: (1x3600x2048xf32) <- (1x3600x2048xf32) + gelu_3 = paddle._C_ops.gelu(add_33, False) + + # pd_op.dropout: (1x3600x2048xf32, 1x3600x2048xui8) <- (1x3600x2048xf32, None, 1xf32) + dropout_28, dropout_29 = (lambda x, f: f(x))( + paddle._C_ops.dropout( + gelu_3, None, full_9, False, "upscale_in_train", 0, False + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None), + ) + del gelu_3 + + # pd_op.matmul: (1x3600x1024xf32) <- (1x3600x2048xf32, 2048x1024xf32) + matmul_33 = paddle._C_ops.matmul(dropout_28, parameter_328, False, False) + del parameter_328 + + # pd_op.add: (1x3600x1024xf32) <- (1x3600x1024xf32, 1024xf32) + add_34 = paddle._C_ops.add(matmul_33, parameter_327) + del parameter_327 + + # pd_op.dropout: (1x3600x1024xf32, 1x3600x1024xui8) <- (1x3600x1024xf32, None, 1xf32) + dropout_30, dropout_31 = (lambda x, f: f(x))( + paddle._C_ops.dropout( + add_34, None, full_9, False, "upscale_in_train", 0, False + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None), + ) + del add_34 + + # pd_op.add: (1x3600x1024xf32) <- (1x3600x1024xf32, 1x3600x1024xf32) + add_35 = paddle._C_ops.add(layer_norm_18, dropout_30) + + # pd_op.layer_norm: (1x3600x1024xf32, 1x3600xf32, 1x3600xf32) <- (1x3600x1024xf32, 1024xf32, 1024xf32) + layer_norm_21, layer_norm_22, layer_norm_23 = (lambda x, f: f(x))( + paddle._C_ops.layer_norm( + add_35, parameter_326, parameter_325, float("1e-05"), 2 + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None, None), + ) + del parameter_325, parameter_326 + + # pd_op.transpose: (1x1024x3600xf32) <- (1x3600x1024xf32) + transpose_17 = paddle._C_ops.transpose(layer_norm_21, [0, 2, 1]) + del layer_norm_21 + + # pd_op.full_int_array: (4xi64) <- () + full_int_array_7 = [1, 1024, 60, 60] + + # pd_op.reshape: (1x1024x60x60xf32) <- (1x1024x3600xf32, 4xi64) + reshape_16 = paddle._C_ops.reshape(transpose_17, full_int_array_7) + del full_int_array_7 + + # pd_op.conv2d: (1x384x60x60xf32) <- (1x1024x60x60xf32, 384x1024x1x1xf32) + conv2d_0 = paddle._C_ops.conv2d( + reshape_16, parameter_324, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_324 + + # pd_op.batch_norm_: (1x384x60x60xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (1x384x60x60xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__0, + batch_norm__1, + batch_norm__2, + batch_norm__3, + batch_norm__4, + batch_norm__5, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_0, + parameter_323, + parameter_322, + parameter_321, + parameter_320, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_320, parameter_321, parameter_322, parameter_323 + + # pd_op.swish: (1x384x60x60xf32) <- (1x384x60x60xf32) + swish_1 = paddle._C_ops.swish(batch_norm__0) + + # pd_op.conv2d: (1x384x60x60xf32) <- (1x1024x60x60xf32, 384x1024x1x1xf32) + conv2d_1 = paddle._C_ops.conv2d( + reshape_16, parameter_319, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_319 + + # pd_op.batch_norm_: (1x384x60x60xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (1x384x60x60xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__6, + batch_norm__7, + batch_norm__8, + batch_norm__9, + batch_norm__10, + batch_norm__11, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_1, + parameter_318, + parameter_317, + parameter_316, + parameter_315, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_315, parameter_316, parameter_317, parameter_318 + + # pd_op.swish: (1x384x60x60xf32) <- (1x384x60x60xf32) + swish_2 = paddle._C_ops.swish(batch_norm__6) + + # pd_op.conv2d: (1x384x60x60xf32) <- (1x384x60x60xf32, 384x384x3x3xf32) + conv2d_2 = paddle._C_ops.conv2d( + swish_2, parameter_314, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_314 + + # pd_op.batch_norm_: (1x384x60x60xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (1x384x60x60xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__12, + batch_norm__13, + batch_norm__14, + batch_norm__15, + batch_norm__16, + batch_norm__17, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_2, + parameter_313, + parameter_312, + parameter_311, + parameter_310, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_310, parameter_311, parameter_312, parameter_313 + + # pd_op.swish: (1x384x60x60xf32) <- (1x384x60x60xf32) + swish_3 = paddle._C_ops.swish(batch_norm__12) + + # pd_op.conv2d: (1x384x60x60xf32) <- (1x384x60x60xf32, 384x384x3x3xf32) + conv2d_3 = paddle._C_ops.conv2d( + swish_3, parameter_309, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_309 + + # pd_op.batch_norm_: (1x384x60x60xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (1x384x60x60xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__18, + batch_norm__19, + batch_norm__20, + batch_norm__21, + batch_norm__22, + batch_norm__23, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_3, + parameter_308, + parameter_307, + parameter_306, + parameter_305, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_305, parameter_306, parameter_307, parameter_308 + + # pd_op.conv2d: (1x384x60x60xf32) <- (1x384x60x60xf32, 384x384x1x1xf32) + conv2d_4 = paddle._C_ops.conv2d( + swish_3, parameter_304, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_304 + + # pd_op.batch_norm_: (1x384x60x60xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (1x384x60x60xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__24, + batch_norm__25, + batch_norm__26, + batch_norm__27, + batch_norm__28, + batch_norm__29, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_4, + parameter_303, + parameter_302, + parameter_301, + parameter_300, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_300, parameter_301, parameter_302, parameter_303 + + # pd_op.add: (1x384x60x60xf32) <- (1x384x60x60xf32, 1x384x60x60xf32) + add_36 = paddle._C_ops.add(batch_norm__18, batch_norm__24) + + # pd_op.swish: (1x384x60x60xf32) <- (1x384x60x60xf32) + swish_4 = paddle._C_ops.swish(add_36) + + # pd_op.conv2d: (1x384x60x60xf32) <- (1x384x60x60xf32, 384x384x3x3xf32) + conv2d_5 = paddle._C_ops.conv2d( + swish_4, parameter_299, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_299 + + # pd_op.batch_norm_: (1x384x60x60xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (1x384x60x60xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__30, + batch_norm__31, + batch_norm__32, + batch_norm__33, + batch_norm__34, + batch_norm__35, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_5, + parameter_298, + parameter_297, + parameter_296, + parameter_295, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_295, parameter_296, parameter_297, parameter_298 + + # pd_op.swish: (1x384x60x60xf32) <- (1x384x60x60xf32) + swish_5 = paddle._C_ops.swish(batch_norm__30) + + # pd_op.conv2d: (1x384x60x60xf32) <- (1x384x60x60xf32, 384x384x3x3xf32) + conv2d_6 = paddle._C_ops.conv2d( + swish_5, parameter_294, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_294 + + # pd_op.batch_norm_: (1x384x60x60xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (1x384x60x60xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__36, + batch_norm__37, + batch_norm__38, + batch_norm__39, + batch_norm__40, + batch_norm__41, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_6, + parameter_293, + parameter_292, + parameter_291, + parameter_290, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_290, parameter_291, parameter_292, parameter_293 + + # pd_op.conv2d: (1x384x60x60xf32) <- (1x384x60x60xf32, 384x384x1x1xf32) + conv2d_7 = paddle._C_ops.conv2d( + swish_5, parameter_289, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_289 + + # pd_op.batch_norm_: (1x384x60x60xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (1x384x60x60xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__42, + batch_norm__43, + batch_norm__44, + batch_norm__45, + batch_norm__46, + batch_norm__47, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_7, + parameter_288, + parameter_287, + parameter_286, + parameter_285, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_285, parameter_286, parameter_287, parameter_288 + + # pd_op.add: (1x384x60x60xf32) <- (1x384x60x60xf32, 1x384x60x60xf32) + add_37 = paddle._C_ops.add(batch_norm__36, batch_norm__42) + + # pd_op.swish: (1x384x60x60xf32) <- (1x384x60x60xf32) + swish_6 = paddle._C_ops.swish(add_37) + + # pd_op.full_int_array: (2xi64) <- () + full_int_array_8 = [5, 5] + + # pd_op.pool2d: (1x384x60x60xf32) <- (1x384x60x60xf32, 2xi64) + pool2d_0 = paddle._C_ops.pool2d( + swish_6, + full_int_array_8, + [1, 1], + [2, 2], + False, + True, + "NCHW", + "max", + False, + False, + "EXPLICIT", + ) + + # pd_op.full_int_array: (2xi64) <- () + full_int_array_9 = [9, 9] + + # pd_op.pool2d: (1x384x60x60xf32) <- (1x384x60x60xf32, 2xi64) + pool2d_1 = paddle._C_ops.pool2d( + swish_6, + full_int_array_9, + [1, 1], + [4, 4], + False, + True, + "NCHW", + "max", + False, + False, + "EXPLICIT", + ) + + # pd_op.full_int_array: (2xi64) <- () + full_int_array_10 = [13, 13] + + # pd_op.pool2d: (1x384x60x60xf32) <- (1x384x60x60xf32, 2xi64) + pool2d_2 = paddle._C_ops.pool2d( + swish_6, + full_int_array_10, + [1, 1], + [6, 6], + False, + True, + "NCHW", + "max", + False, + False, + "EXPLICIT", + ) + + # builtin.combine: ([1x384x60x60xf32, 1x384x60x60xf32, 1x384x60x60xf32, 1x384x60x60xf32]) <- (1x384x60x60xf32, 1x384x60x60xf32, 1x384x60x60xf32, 1x384x60x60xf32) + combine_2 = [swish_6, pool2d_0, pool2d_1, pool2d_2] + + # pd_op.concat: (1x1536x60x60xf32) <- ([1x384x60x60xf32, 1x384x60x60xf32, 1x384x60x60xf32, 1x384x60x60xf32], 1xi32) + concat_1 = paddle._C_ops.concat(combine_2, full_7) + del combine_2 + + # pd_op.conv2d: (1x384x60x60xf32) <- (1x1536x60x60xf32, 384x1536x1x1xf32) + conv2d_8 = paddle._C_ops.conv2d( + concat_1, parameter_284, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_284 + + # pd_op.batch_norm_: (1x384x60x60xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (1x384x60x60xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__48, + batch_norm__49, + batch_norm__50, + batch_norm__51, + batch_norm__52, + batch_norm__53, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_8, + parameter_283, + parameter_282, + parameter_281, + parameter_280, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_280, parameter_281, parameter_282, parameter_283 + + # pd_op.swish: (1x384x60x60xf32) <- (1x384x60x60xf32) + swish_7 = paddle._C_ops.swish(batch_norm__48) + + # pd_op.conv2d: (1x384x60x60xf32) <- (1x384x60x60xf32, 384x384x3x3xf32) + conv2d_9 = paddle._C_ops.conv2d( + swish_7, parameter_279, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_279 + + # pd_op.batch_norm_: (1x384x60x60xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (1x384x60x60xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__54, + batch_norm__55, + batch_norm__56, + batch_norm__57, + batch_norm__58, + batch_norm__59, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_9, + parameter_278, + parameter_277, + parameter_276, + parameter_275, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_275, parameter_276, parameter_277, parameter_278 + + # pd_op.swish: (1x384x60x60xf32) <- (1x384x60x60xf32) + swish_8 = paddle._C_ops.swish(batch_norm__54) + + # pd_op.conv2d: (1x384x60x60xf32) <- (1x384x60x60xf32, 384x384x3x3xf32) + conv2d_10 = paddle._C_ops.conv2d( + swish_8, parameter_274, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_274 + + # pd_op.batch_norm_: (1x384x60x60xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (1x384x60x60xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__60, + batch_norm__61, + batch_norm__62, + batch_norm__63, + batch_norm__64, + batch_norm__65, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_10, + parameter_273, + parameter_272, + parameter_271, + parameter_270, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_270, parameter_271, parameter_272, parameter_273 + + # pd_op.conv2d: (1x384x60x60xf32) <- (1x384x60x60xf32, 384x384x1x1xf32) + conv2d_11 = paddle._C_ops.conv2d( + swish_8, parameter_269, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_269 + + # pd_op.batch_norm_: (1x384x60x60xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (1x384x60x60xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__66, + batch_norm__67, + batch_norm__68, + batch_norm__69, + batch_norm__70, + batch_norm__71, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_11, + parameter_268, + parameter_267, + parameter_266, + parameter_265, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_265, parameter_266, parameter_267, parameter_268 + + # pd_op.add: (1x384x60x60xf32) <- (1x384x60x60xf32, 1x384x60x60xf32) + add_38 = paddle._C_ops.add(batch_norm__60, batch_norm__66) + + # pd_op.swish: (1x384x60x60xf32) <- (1x384x60x60xf32) + swish_9 = paddle._C_ops.swish(add_38) + + # builtin.combine: ([1x384x60x60xf32, 1x384x60x60xf32]) <- (1x384x60x60xf32, 1x384x60x60xf32) + combine_3 = [swish_1, swish_9] + + # pd_op.concat: (1x768x60x60xf32) <- ([1x384x60x60xf32, 1x384x60x60xf32], 1xi32) + concat_2 = paddle._C_ops.concat(combine_3, full_7) + del combine_3 + + # pd_op.conv2d: (1x768x60x60xf32) <- (1x768x60x60xf32, 768x768x1x1xf32) + conv2d_12 = paddle._C_ops.conv2d( + concat_2, parameter_264, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_264 + + # pd_op.batch_norm_: (1x768x60x60xf32, 768xf32, 768xf32, 768xf32, 768xf32, -1xui8) <- (1x768x60x60xf32, 768xf32, 768xf32, 768xf32, 768xf32) + ( + batch_norm__72, + batch_norm__73, + batch_norm__74, + batch_norm__75, + batch_norm__76, + batch_norm__77, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_12, + parameter_263, + parameter_262, + parameter_261, + parameter_260, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_260, parameter_261, parameter_262, parameter_263 + + # pd_op.swish: (1x768x60x60xf32) <- (1x768x60x60xf32) + swish_10 = paddle._C_ops.swish(batch_norm__72) + + # pd_op.conv2d: (1x384x60x60xf32) <- (1x768x60x60xf32, 384x768x1x1xf32) + conv2d_13 = paddle._C_ops.conv2d( + swish_10, parameter_259, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_259 + + # pd_op.batch_norm_: (1x384x60x60xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (1x384x60x60xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__78, + batch_norm__79, + batch_norm__80, + batch_norm__81, + batch_norm__82, + batch_norm__83, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_13, + parameter_258, + parameter_257, + parameter_256, + parameter_255, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_255, parameter_256, parameter_257, parameter_258 + + # pd_op.swish: (1x384x60x60xf32) <- (1x384x60x60xf32) + swish_11 = paddle._C_ops.swish(batch_norm__78) + + # pd_op.nearest_interp: (1x384x120x120xf32) <- (1x384x60x60xf32, None, None, None) + nearest_interp_0 = paddle._C_ops.nearest_interp( + swish_11, + None, + None, + None, + "NCHW", + -1, + -1, + -1, + [float("2"), float("2")], + "nearest", + False, + 0, + ) + + # builtin.combine: ([1x384x120x120xf32, 1x512x-1x-1xf32]) <- (1x384x120x120xf32, 1x512x-1x-1xf32) + combine_4 = [nearest_interp_0, data_9] + del data_9 + + # pd_op.concat: (1x896x120x120xf32) <- ([1x384x120x120xf32, 1x512x-1x-1xf32], 1xi32) + concat_3 = paddle._C_ops.concat(combine_4, full_7) + del combine_4 + + # pd_op.conv2d: (1x192x120x120xf32) <- (1x896x120x120xf32, 192x896x1x1xf32) + conv2d_14 = paddle._C_ops.conv2d( + concat_3, parameter_254, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_254 + + # pd_op.batch_norm_: (1x192x120x120xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (1x192x120x120xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__84, + batch_norm__85, + batch_norm__86, + batch_norm__87, + batch_norm__88, + batch_norm__89, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_14, + parameter_253, + parameter_252, + parameter_251, + parameter_250, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_250, parameter_251, parameter_252, parameter_253 + + # pd_op.swish: (1x192x120x120xf32) <- (1x192x120x120xf32) + swish_12 = paddle._C_ops.swish(batch_norm__84) + + # pd_op.conv2d: (1x192x120x120xf32) <- (1x896x120x120xf32, 192x896x1x1xf32) + conv2d_15 = paddle._C_ops.conv2d( + concat_3, parameter_249, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_249 + + # pd_op.batch_norm_: (1x192x120x120xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (1x192x120x120xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__90, + batch_norm__91, + batch_norm__92, + batch_norm__93, + batch_norm__94, + batch_norm__95, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_15, + parameter_248, + parameter_247, + parameter_246, + parameter_245, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_245, parameter_246, parameter_247, parameter_248 + + # pd_op.swish: (1x192x120x120xf32) <- (1x192x120x120xf32) + swish_13 = paddle._C_ops.swish(batch_norm__90) + + # pd_op.conv2d: (1x192x120x120xf32) <- (1x192x120x120xf32, 192x192x3x3xf32) + conv2d_16 = paddle._C_ops.conv2d( + swish_13, parameter_244, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_244 + + # pd_op.batch_norm_: (1x192x120x120xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (1x192x120x120xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__96, + batch_norm__97, + batch_norm__98, + batch_norm__99, + batch_norm__100, + batch_norm__101, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_16, + parameter_243, + parameter_242, + parameter_241, + parameter_240, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_240, parameter_241, parameter_242, parameter_243 + + # pd_op.swish: (1x192x120x120xf32) <- (1x192x120x120xf32) + swish_14 = paddle._C_ops.swish(batch_norm__96) + + # pd_op.conv2d: (1x192x120x120xf32) <- (1x192x120x120xf32, 192x192x3x3xf32) + conv2d_17 = paddle._C_ops.conv2d( + swish_14, parameter_239, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_239 + + # pd_op.batch_norm_: (1x192x120x120xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (1x192x120x120xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__102, + batch_norm__103, + batch_norm__104, + batch_norm__105, + batch_norm__106, + batch_norm__107, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_17, + parameter_238, + parameter_237, + parameter_236, + parameter_235, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_235, parameter_236, parameter_237, parameter_238 + + # pd_op.conv2d: (1x192x120x120xf32) <- (1x192x120x120xf32, 192x192x1x1xf32) + conv2d_18 = paddle._C_ops.conv2d( + swish_14, parameter_234, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_234 + + # pd_op.batch_norm_: (1x192x120x120xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (1x192x120x120xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__108, + batch_norm__109, + batch_norm__110, + batch_norm__111, + batch_norm__112, + batch_norm__113, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_18, + parameter_233, + parameter_232, + parameter_231, + parameter_230, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_230, parameter_231, parameter_232, parameter_233 + + # pd_op.add: (1x192x120x120xf32) <- (1x192x120x120xf32, 1x192x120x120xf32) + add_39 = paddle._C_ops.add(batch_norm__102, batch_norm__108) + + # pd_op.swish: (1x192x120x120xf32) <- (1x192x120x120xf32) + swish_15 = paddle._C_ops.swish(add_39) + + # pd_op.conv2d: (1x192x120x120xf32) <- (1x192x120x120xf32, 192x192x3x3xf32) + conv2d_19 = paddle._C_ops.conv2d( + swish_15, parameter_229, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_229 + + # pd_op.batch_norm_: (1x192x120x120xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (1x192x120x120xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__114, + batch_norm__115, + batch_norm__116, + batch_norm__117, + batch_norm__118, + batch_norm__119, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_19, + parameter_228, + parameter_227, + parameter_226, + parameter_225, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_225, parameter_226, parameter_227, parameter_228 + + # pd_op.swish: (1x192x120x120xf32) <- (1x192x120x120xf32) + swish_16 = paddle._C_ops.swish(batch_norm__114) + + # pd_op.conv2d: (1x192x120x120xf32) <- (1x192x120x120xf32, 192x192x3x3xf32) + conv2d_20 = paddle._C_ops.conv2d( + swish_16, parameter_224, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_224 + + # pd_op.batch_norm_: (1x192x120x120xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (1x192x120x120xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__120, + batch_norm__121, + batch_norm__122, + batch_norm__123, + batch_norm__124, + batch_norm__125, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_20, + parameter_223, + parameter_222, + parameter_221, + parameter_220, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_220, parameter_221, parameter_222, parameter_223 + + # pd_op.conv2d: (1x192x120x120xf32) <- (1x192x120x120xf32, 192x192x1x1xf32) + conv2d_21 = paddle._C_ops.conv2d( + swish_16, parameter_219, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_219 + + # pd_op.batch_norm_: (1x192x120x120xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (1x192x120x120xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__126, + batch_norm__127, + batch_norm__128, + batch_norm__129, + batch_norm__130, + batch_norm__131, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_21, + parameter_218, + parameter_217, + parameter_216, + parameter_215, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_215, parameter_216, parameter_217, parameter_218 + + # pd_op.add: (1x192x120x120xf32) <- (1x192x120x120xf32, 1x192x120x120xf32) + add_40 = paddle._C_ops.add(batch_norm__120, batch_norm__126) + + # pd_op.swish: (1x192x120x120xf32) <- (1x192x120x120xf32) + swish_17 = paddle._C_ops.swish(add_40) + + # pd_op.conv2d: (1x192x120x120xf32) <- (1x192x120x120xf32, 192x192x3x3xf32) + conv2d_22 = paddle._C_ops.conv2d( + swish_17, parameter_214, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_214 + + # pd_op.batch_norm_: (1x192x120x120xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (1x192x120x120xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__132, + batch_norm__133, + batch_norm__134, + batch_norm__135, + batch_norm__136, + batch_norm__137, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_22, + parameter_213, + parameter_212, + parameter_211, + parameter_210, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_210, parameter_211, parameter_212, parameter_213 + + # pd_op.swish: (1x192x120x120xf32) <- (1x192x120x120xf32) + swish_18 = paddle._C_ops.swish(batch_norm__132) + + # pd_op.conv2d: (1x192x120x120xf32) <- (1x192x120x120xf32, 192x192x3x3xf32) + conv2d_23 = paddle._C_ops.conv2d( + swish_18, parameter_209, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_209 + + # pd_op.batch_norm_: (1x192x120x120xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (1x192x120x120xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__138, + batch_norm__139, + batch_norm__140, + batch_norm__141, + batch_norm__142, + batch_norm__143, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_23, + parameter_208, + parameter_207, + parameter_206, + parameter_205, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_205, parameter_206, parameter_207, parameter_208 + + # pd_op.conv2d: (1x192x120x120xf32) <- (1x192x120x120xf32, 192x192x1x1xf32) + conv2d_24 = paddle._C_ops.conv2d( + swish_18, parameter_204, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_204 + + # pd_op.batch_norm_: (1x192x120x120xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (1x192x120x120xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__144, + batch_norm__145, + batch_norm__146, + batch_norm__147, + batch_norm__148, + batch_norm__149, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_24, + parameter_203, + parameter_202, + parameter_201, + parameter_200, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_200, parameter_201, parameter_202, parameter_203 + + # pd_op.add: (1x192x120x120xf32) <- (1x192x120x120xf32, 1x192x120x120xf32) + add_41 = paddle._C_ops.add(batch_norm__138, batch_norm__144) + + # pd_op.swish: (1x192x120x120xf32) <- (1x192x120x120xf32) + swish_19 = paddle._C_ops.swish(add_41) + + # builtin.combine: ([1x192x120x120xf32, 1x192x120x120xf32]) <- (1x192x120x120xf32, 1x192x120x120xf32) + combine_5 = [swish_12, swish_19] + + # pd_op.concat: (1x384x120x120xf32) <- ([1x192x120x120xf32, 1x192x120x120xf32], 1xi32) + concat_4 = paddle._C_ops.concat(combine_5, full_7) + del combine_5 + + # pd_op.conv2d: (1x384x120x120xf32) <- (1x384x120x120xf32, 384x384x1x1xf32) + conv2d_25 = paddle._C_ops.conv2d( + concat_4, parameter_199, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_199 + + # pd_op.batch_norm_: (1x384x120x120xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (1x384x120x120xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__150, + batch_norm__151, + batch_norm__152, + batch_norm__153, + batch_norm__154, + batch_norm__155, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_25, + parameter_198, + parameter_197, + parameter_196, + parameter_195, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_195, parameter_196, parameter_197, parameter_198 + + # pd_op.swish: (1x384x120x120xf32) <- (1x384x120x120xf32) + swish_20 = paddle._C_ops.swish(batch_norm__150) + + # pd_op.conv2d: (1x192x120x120xf32) <- (1x384x120x120xf32, 192x384x1x1xf32) + conv2d_26 = paddle._C_ops.conv2d( + swish_20, parameter_194, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_194 + + # pd_op.batch_norm_: (1x192x120x120xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (1x192x120x120xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__156, + batch_norm__157, + batch_norm__158, + batch_norm__159, + batch_norm__160, + batch_norm__161, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_26, + parameter_193, + parameter_192, + parameter_191, + parameter_190, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_190, parameter_191, parameter_192, parameter_193 + + # pd_op.swish: (1x192x120x120xf32) <- (1x192x120x120xf32) + swish_21 = paddle._C_ops.swish(batch_norm__156) + + # pd_op.nearest_interp: (1x192x240x240xf32) <- (1x192x120x120xf32, None, None, None) + nearest_interp_1 = paddle._C_ops.nearest_interp( + swish_21, + None, + None, + None, + "NCHW", + -1, + -1, + -1, + [float("2"), float("2")], + "nearest", + False, + 0, + ) + + # builtin.combine: ([1x192x240x240xf32, 1x256x-1x-1xf32]) <- (1x192x240x240xf32, 1x256x-1x-1xf32) + combine_6 = [nearest_interp_1, data_8] + del data_8 + + # pd_op.concat: (1x448x240x240xf32) <- ([1x192x240x240xf32, 1x256x-1x-1xf32], 1xi32) + concat_5 = paddle._C_ops.concat(combine_6, full_7) + del combine_6 + + # pd_op.conv2d: (1x96x240x240xf32) <- (1x448x240x240xf32, 96x448x1x1xf32) + conv2d_27 = paddle._C_ops.conv2d( + concat_5, parameter_189, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_189 + + # pd_op.batch_norm_: (1x96x240x240xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (1x96x240x240xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__162, + batch_norm__163, + batch_norm__164, + batch_norm__165, + batch_norm__166, + batch_norm__167, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_27, + parameter_188, + parameter_187, + parameter_186, + parameter_185, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_185, parameter_186, parameter_187, parameter_188 + + # pd_op.swish: (1x96x240x240xf32) <- (1x96x240x240xf32) + swish_22 = paddle._C_ops.swish(batch_norm__162) + + # pd_op.conv2d: (1x96x240x240xf32) <- (1x448x240x240xf32, 96x448x1x1xf32) + conv2d_28 = paddle._C_ops.conv2d( + concat_5, parameter_184, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_184 + + # pd_op.batch_norm_: (1x96x240x240xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (1x96x240x240xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__168, + batch_norm__169, + batch_norm__170, + batch_norm__171, + batch_norm__172, + batch_norm__173, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_28, + parameter_183, + parameter_182, + parameter_181, + parameter_180, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_180, parameter_181, parameter_182, parameter_183 + + # pd_op.swish: (1x96x240x240xf32) <- (1x96x240x240xf32) + swish_23 = paddle._C_ops.swish(batch_norm__168) + + # pd_op.conv2d: (1x96x240x240xf32) <- (1x96x240x240xf32, 96x96x3x3xf32) + conv2d_29 = paddle._C_ops.conv2d( + swish_23, parameter_179, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_179 + + # pd_op.batch_norm_: (1x96x240x240xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (1x96x240x240xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__174, + batch_norm__175, + batch_norm__176, + batch_norm__177, + batch_norm__178, + batch_norm__179, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_29, + parameter_178, + parameter_177, + parameter_176, + parameter_175, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_175, parameter_176, parameter_177, parameter_178 + + # pd_op.swish: (1x96x240x240xf32) <- (1x96x240x240xf32) + swish_24 = paddle._C_ops.swish(batch_norm__174) + + # pd_op.conv2d: (1x96x240x240xf32) <- (1x96x240x240xf32, 96x96x3x3xf32) + conv2d_30 = paddle._C_ops.conv2d( + swish_24, parameter_174, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_174 + + # pd_op.batch_norm_: (1x96x240x240xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (1x96x240x240xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__180, + batch_norm__181, + batch_norm__182, + batch_norm__183, + batch_norm__184, + batch_norm__185, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_30, + parameter_173, + parameter_172, + parameter_171, + parameter_170, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_170, parameter_171, parameter_172, parameter_173 + + # pd_op.conv2d: (1x96x240x240xf32) <- (1x96x240x240xf32, 96x96x1x1xf32) + conv2d_31 = paddle._C_ops.conv2d( + swish_24, parameter_169, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_169 + + # pd_op.batch_norm_: (1x96x240x240xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (1x96x240x240xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__186, + batch_norm__187, + batch_norm__188, + batch_norm__189, + batch_norm__190, + batch_norm__191, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_31, + parameter_168, + parameter_167, + parameter_166, + parameter_165, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_165, parameter_166, parameter_167, parameter_168 + + # pd_op.add: (1x96x240x240xf32) <- (1x96x240x240xf32, 1x96x240x240xf32) + add_42 = paddle._C_ops.add(batch_norm__180, batch_norm__186) + + # pd_op.swish: (1x96x240x240xf32) <- (1x96x240x240xf32) + swish_25 = paddle._C_ops.swish(add_42) + + # pd_op.conv2d: (1x96x240x240xf32) <- (1x96x240x240xf32, 96x96x3x3xf32) + conv2d_32 = paddle._C_ops.conv2d( + swish_25, parameter_164, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_164 + + # pd_op.batch_norm_: (1x96x240x240xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (1x96x240x240xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__192, + batch_norm__193, + batch_norm__194, + batch_norm__195, + batch_norm__196, + batch_norm__197, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_32, + parameter_163, + parameter_162, + parameter_161, + parameter_160, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_160, parameter_161, parameter_162, parameter_163 + + # pd_op.swish: (1x96x240x240xf32) <- (1x96x240x240xf32) + swish_26 = paddle._C_ops.swish(batch_norm__192) + + # pd_op.conv2d: (1x96x240x240xf32) <- (1x96x240x240xf32, 96x96x3x3xf32) + conv2d_33 = paddle._C_ops.conv2d( + swish_26, parameter_159, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_159 + + # pd_op.batch_norm_: (1x96x240x240xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (1x96x240x240xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__198, + batch_norm__199, + batch_norm__200, + batch_norm__201, + batch_norm__202, + batch_norm__203, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_33, + parameter_158, + parameter_157, + parameter_156, + parameter_155, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_155, parameter_156, parameter_157, parameter_158 + + # pd_op.conv2d: (1x96x240x240xf32) <- (1x96x240x240xf32, 96x96x1x1xf32) + conv2d_34 = paddle._C_ops.conv2d( + swish_26, parameter_154, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_154 + + # pd_op.batch_norm_: (1x96x240x240xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (1x96x240x240xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__204, + batch_norm__205, + batch_norm__206, + batch_norm__207, + batch_norm__208, + batch_norm__209, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_34, + parameter_153, + parameter_152, + parameter_151, + parameter_150, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_150, parameter_151, parameter_152, parameter_153 + + # pd_op.add: (1x96x240x240xf32) <- (1x96x240x240xf32, 1x96x240x240xf32) + add_43 = paddle._C_ops.add(batch_norm__198, batch_norm__204) + + # pd_op.swish: (1x96x240x240xf32) <- (1x96x240x240xf32) + swish_27 = paddle._C_ops.swish(add_43) + + # pd_op.conv2d: (1x96x240x240xf32) <- (1x96x240x240xf32, 96x96x3x3xf32) + conv2d_35 = paddle._C_ops.conv2d( + swish_27, parameter_149, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_149 + + # pd_op.batch_norm_: (1x96x240x240xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (1x96x240x240xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__210, + batch_norm__211, + batch_norm__212, + batch_norm__213, + batch_norm__214, + batch_norm__215, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_35, + parameter_148, + parameter_147, + parameter_146, + parameter_145, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_145, parameter_146, parameter_147, parameter_148 + + # pd_op.swish: (1x96x240x240xf32) <- (1x96x240x240xf32) + swish_28 = paddle._C_ops.swish(batch_norm__210) + + # pd_op.conv2d: (1x96x240x240xf32) <- (1x96x240x240xf32, 96x96x3x3xf32) + conv2d_36 = paddle._C_ops.conv2d( + swish_28, parameter_144, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_144 + + # pd_op.batch_norm_: (1x96x240x240xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (1x96x240x240xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__216, + batch_norm__217, + batch_norm__218, + batch_norm__219, + batch_norm__220, + batch_norm__221, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_36, + parameter_143, + parameter_142, + parameter_141, + parameter_140, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_140, parameter_141, parameter_142, parameter_143 + + # pd_op.conv2d: (1x96x240x240xf32) <- (1x96x240x240xf32, 96x96x1x1xf32) + conv2d_37 = paddle._C_ops.conv2d( + swish_28, parameter_139, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_139 + + # pd_op.batch_norm_: (1x96x240x240xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (1x96x240x240xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__222, + batch_norm__223, + batch_norm__224, + batch_norm__225, + batch_norm__226, + batch_norm__227, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_37, + parameter_138, + parameter_137, + parameter_136, + parameter_135, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_135, parameter_136, parameter_137, parameter_138 + + # pd_op.add: (1x96x240x240xf32) <- (1x96x240x240xf32, 1x96x240x240xf32) + add_44 = paddle._C_ops.add(batch_norm__216, batch_norm__222) + + # pd_op.swish: (1x96x240x240xf32) <- (1x96x240x240xf32) + swish_29 = paddle._C_ops.swish(add_44) + + # builtin.combine: ([1x96x240x240xf32, 1x96x240x240xf32]) <- (1x96x240x240xf32, 1x96x240x240xf32) + combine_7 = [swish_22, swish_29] + + # pd_op.concat: (1x192x240x240xf32) <- ([1x96x240x240xf32, 1x96x240x240xf32], 1xi32) + concat_6 = paddle._C_ops.concat(combine_7, full_7) + del combine_7 + + # pd_op.conv2d: (1x192x240x240xf32) <- (1x192x240x240xf32, 192x192x1x1xf32) + conv2d_38 = paddle._C_ops.conv2d( + concat_6, parameter_134, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_134 + + # pd_op.batch_norm_: (1x192x240x240xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (1x192x240x240xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__228, + batch_norm__229, + batch_norm__230, + batch_norm__231, + batch_norm__232, + batch_norm__233, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_38, + parameter_133, + parameter_132, + parameter_131, + parameter_130, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_130, parameter_131, parameter_132, parameter_133 + + # pd_op.swish: (1x192x240x240xf32) <- (1x192x240x240xf32) + swish_30 = paddle._C_ops.swish(batch_norm__228) + + # pd_op.conv2d: (1x192x120x120xf32) <- (1x192x240x240xf32, 192x192x3x3xf32) + conv2d_39 = paddle._C_ops.conv2d( + swish_30, parameter_129, [2, 2], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_129 + + # pd_op.batch_norm_: (1x192x120x120xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (1x192x120x120xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__234, + batch_norm__235, + batch_norm__236, + batch_norm__237, + batch_norm__238, + batch_norm__239, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_39, + parameter_128, + parameter_127, + parameter_126, + parameter_125, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_125, parameter_126, parameter_127, parameter_128 + + # pd_op.swish: (1x192x120x120xf32) <- (1x192x120x120xf32) + swish_31 = paddle._C_ops.swish(batch_norm__234) + + # builtin.combine: ([1x192x120x120xf32, 1x384x120x120xf32]) <- (1x192x120x120xf32, 1x384x120x120xf32) + combine_8 = [swish_31, swish_20] + + # pd_op.concat: (1x576x120x120xf32) <- ([1x192x120x120xf32, 1x384x120x120xf32], 1xi32) + concat_7 = paddle._C_ops.concat(combine_8, full_7) + del combine_8 + + # pd_op.conv2d: (1x192x120x120xf32) <- (1x576x120x120xf32, 192x576x1x1xf32) + conv2d_40 = paddle._C_ops.conv2d( + concat_7, parameter_124, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_124 + + # pd_op.batch_norm_: (1x192x120x120xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (1x192x120x120xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__240, + batch_norm__241, + batch_norm__242, + batch_norm__243, + batch_norm__244, + batch_norm__245, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_40, + parameter_123, + parameter_122, + parameter_121, + parameter_120, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_120, parameter_121, parameter_122, parameter_123 + + # pd_op.swish: (1x192x120x120xf32) <- (1x192x120x120xf32) + swish_32 = paddle._C_ops.swish(batch_norm__240) + + # pd_op.conv2d: (1x192x120x120xf32) <- (1x576x120x120xf32, 192x576x1x1xf32) + conv2d_41 = paddle._C_ops.conv2d( + concat_7, parameter_119, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_119 + + # pd_op.batch_norm_: (1x192x120x120xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (1x192x120x120xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__246, + batch_norm__247, + batch_norm__248, + batch_norm__249, + batch_norm__250, + batch_norm__251, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_41, + parameter_118, + parameter_117, + parameter_116, + parameter_115, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_115, parameter_116, parameter_117, parameter_118 + + # pd_op.swish: (1x192x120x120xf32) <- (1x192x120x120xf32) + swish_33 = paddle._C_ops.swish(batch_norm__246) + + # pd_op.conv2d: (1x192x120x120xf32) <- (1x192x120x120xf32, 192x192x3x3xf32) + conv2d_42 = paddle._C_ops.conv2d( + swish_33, parameter_114, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_114 + + # pd_op.batch_norm_: (1x192x120x120xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (1x192x120x120xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__252, + batch_norm__253, + batch_norm__254, + batch_norm__255, + batch_norm__256, + batch_norm__257, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_42, + parameter_113, + parameter_112, + parameter_111, + parameter_110, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_110, parameter_111, parameter_112, parameter_113 + + # pd_op.swish: (1x192x120x120xf32) <- (1x192x120x120xf32) + swish_34 = paddle._C_ops.swish(batch_norm__252) + + # pd_op.conv2d: (1x192x120x120xf32) <- (1x192x120x120xf32, 192x192x3x3xf32) + conv2d_43 = paddle._C_ops.conv2d( + swish_34, parameter_109, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_109 + + # pd_op.batch_norm_: (1x192x120x120xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (1x192x120x120xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__258, + batch_norm__259, + batch_norm__260, + batch_norm__261, + batch_norm__262, + batch_norm__263, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_43, + parameter_108, + parameter_107, + parameter_106, + parameter_105, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_105, parameter_106, parameter_107, parameter_108 + + # pd_op.conv2d: (1x192x120x120xf32) <- (1x192x120x120xf32, 192x192x1x1xf32) + conv2d_44 = paddle._C_ops.conv2d( + swish_34, parameter_104, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_104 + + # pd_op.batch_norm_: (1x192x120x120xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (1x192x120x120xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__264, + batch_norm__265, + batch_norm__266, + batch_norm__267, + batch_norm__268, + batch_norm__269, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_44, + parameter_103, + parameter_102, + parameter_101, + parameter_100, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_100, parameter_101, parameter_102, parameter_103 + + # pd_op.add: (1x192x120x120xf32) <- (1x192x120x120xf32, 1x192x120x120xf32) + add_45 = paddle._C_ops.add(batch_norm__258, batch_norm__264) + + # pd_op.swish: (1x192x120x120xf32) <- (1x192x120x120xf32) + swish_35 = paddle._C_ops.swish(add_45) + + # pd_op.conv2d: (1x192x120x120xf32) <- (1x192x120x120xf32, 192x192x3x3xf32) + conv2d_45 = paddle._C_ops.conv2d( + swish_35, parameter_99, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_99 + + # pd_op.batch_norm_: (1x192x120x120xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (1x192x120x120xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__270, + batch_norm__271, + batch_norm__272, + batch_norm__273, + batch_norm__274, + batch_norm__275, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_45, + parameter_98, + parameter_97, + parameter_96, + parameter_95, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_95, parameter_96, parameter_97, parameter_98 + + # pd_op.swish: (1x192x120x120xf32) <- (1x192x120x120xf32) + swish_36 = paddle._C_ops.swish(batch_norm__270) + + # pd_op.conv2d: (1x192x120x120xf32) <- (1x192x120x120xf32, 192x192x3x3xf32) + conv2d_46 = paddle._C_ops.conv2d( + swish_36, parameter_94, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_94 + + # pd_op.batch_norm_: (1x192x120x120xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (1x192x120x120xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__276, + batch_norm__277, + batch_norm__278, + batch_norm__279, + batch_norm__280, + batch_norm__281, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_46, + parameter_93, + parameter_92, + parameter_91, + parameter_90, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_90, parameter_91, parameter_92, parameter_93 + + # pd_op.conv2d: (1x192x120x120xf32) <- (1x192x120x120xf32, 192x192x1x1xf32) + conv2d_47 = paddle._C_ops.conv2d( + swish_36, parameter_89, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_89 + + # pd_op.batch_norm_: (1x192x120x120xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (1x192x120x120xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__282, + batch_norm__283, + batch_norm__284, + batch_norm__285, + batch_norm__286, + batch_norm__287, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_47, + parameter_88, + parameter_87, + parameter_86, + parameter_85, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_85, parameter_86, parameter_87, parameter_88 + + # pd_op.add: (1x192x120x120xf32) <- (1x192x120x120xf32, 1x192x120x120xf32) + add_46 = paddle._C_ops.add(batch_norm__276, batch_norm__282) + + # pd_op.swish: (1x192x120x120xf32) <- (1x192x120x120xf32) + swish_37 = paddle._C_ops.swish(add_46) + + # pd_op.conv2d: (1x192x120x120xf32) <- (1x192x120x120xf32, 192x192x3x3xf32) + conv2d_48 = paddle._C_ops.conv2d( + swish_37, parameter_84, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_84 + + # pd_op.batch_norm_: (1x192x120x120xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (1x192x120x120xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__288, + batch_norm__289, + batch_norm__290, + batch_norm__291, + batch_norm__292, + batch_norm__293, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_48, + parameter_83, + parameter_82, + parameter_81, + parameter_80, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_80, parameter_81, parameter_82, parameter_83 + + # pd_op.swish: (1x192x120x120xf32) <- (1x192x120x120xf32) + swish_38 = paddle._C_ops.swish(batch_norm__288) + + # pd_op.conv2d: (1x192x120x120xf32) <- (1x192x120x120xf32, 192x192x3x3xf32) + conv2d_49 = paddle._C_ops.conv2d( + swish_38, parameter_79, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_79 + + # pd_op.batch_norm_: (1x192x120x120xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (1x192x120x120xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__294, + batch_norm__295, + batch_norm__296, + batch_norm__297, + batch_norm__298, + batch_norm__299, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_49, + parameter_78, + parameter_77, + parameter_76, + parameter_75, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_75, parameter_76, parameter_77, parameter_78 + + # pd_op.conv2d: (1x192x120x120xf32) <- (1x192x120x120xf32, 192x192x1x1xf32) + conv2d_50 = paddle._C_ops.conv2d( + swish_38, parameter_74, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_74 + + # pd_op.batch_norm_: (1x192x120x120xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (1x192x120x120xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__300, + batch_norm__301, + batch_norm__302, + batch_norm__303, + batch_norm__304, + batch_norm__305, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_50, + parameter_73, + parameter_72, + parameter_71, + parameter_70, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_70, parameter_71, parameter_72, parameter_73 + + # pd_op.add: (1x192x120x120xf32) <- (1x192x120x120xf32, 1x192x120x120xf32) + add_47 = paddle._C_ops.add(batch_norm__294, batch_norm__300) + + # pd_op.swish: (1x192x120x120xf32) <- (1x192x120x120xf32) + swish_39 = paddle._C_ops.swish(add_47) + + # builtin.combine: ([1x192x120x120xf32, 1x192x120x120xf32]) <- (1x192x120x120xf32, 1x192x120x120xf32) + combine_9 = [swish_32, swish_39] + + # pd_op.concat: (1x384x120x120xf32) <- ([1x192x120x120xf32, 1x192x120x120xf32], 1xi32) + concat_8 = paddle._C_ops.concat(combine_9, full_7) + del combine_9 + + # pd_op.conv2d: (1x384x120x120xf32) <- (1x384x120x120xf32, 384x384x1x1xf32) + conv2d_51 = paddle._C_ops.conv2d( + concat_8, parameter_69, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_69 + + # pd_op.batch_norm_: (1x384x120x120xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (1x384x120x120xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__306, + batch_norm__307, + batch_norm__308, + batch_norm__309, + batch_norm__310, + batch_norm__311, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_51, + parameter_68, + parameter_67, + parameter_66, + parameter_65, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_65, parameter_66, parameter_67, parameter_68 + + # pd_op.swish: (1x384x120x120xf32) <- (1x384x120x120xf32) + swish_40 = paddle._C_ops.swish(batch_norm__306) + + # pd_op.conv2d: (1x384x60x60xf32) <- (1x384x120x120xf32, 384x384x3x3xf32) + conv2d_52 = paddle._C_ops.conv2d( + swish_40, parameter_64, [2, 2], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_64 + + # pd_op.batch_norm_: (1x384x60x60xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (1x384x60x60xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__312, + batch_norm__313, + batch_norm__314, + batch_norm__315, + batch_norm__316, + batch_norm__317, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_52, + parameter_63, + parameter_62, + parameter_61, + parameter_60, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_60, parameter_61, parameter_62, parameter_63 + + # pd_op.swish: (1x384x60x60xf32) <- (1x384x60x60xf32) + swish_41 = paddle._C_ops.swish(batch_norm__312) + + # builtin.combine: ([1x384x60x60xf32, 1x768x60x60xf32]) <- (1x384x60x60xf32, 1x768x60x60xf32) + combine_10 = [swish_41, swish_10] + + # pd_op.concat: (1x1152x60x60xf32) <- ([1x384x60x60xf32, 1x768x60x60xf32], 1xi32) + concat_9 = paddle._C_ops.concat(combine_10, full_7) + del combine_10 + + # pd_op.conv2d: (1x384x60x60xf32) <- (1x1152x60x60xf32, 384x1152x1x1xf32) + conv2d_53 = paddle._C_ops.conv2d( + concat_9, parameter_59, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_59 + + # pd_op.batch_norm_: (1x384x60x60xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (1x384x60x60xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__318, + batch_norm__319, + batch_norm__320, + batch_norm__321, + batch_norm__322, + batch_norm__323, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_53, + parameter_58, + parameter_57, + parameter_56, + parameter_55, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_55, parameter_56, parameter_57, parameter_58 + + # pd_op.swish: (1x384x60x60xf32) <- (1x384x60x60xf32) + swish_42 = paddle._C_ops.swish(batch_norm__318) + + # pd_op.conv2d: (1x384x60x60xf32) <- (1x1152x60x60xf32, 384x1152x1x1xf32) + conv2d_54 = paddle._C_ops.conv2d( + concat_9, parameter_54, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_54 + + # pd_op.batch_norm_: (1x384x60x60xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (1x384x60x60xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__324, + batch_norm__325, + batch_norm__326, + batch_norm__327, + batch_norm__328, + batch_norm__329, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_54, + parameter_53, + parameter_52, + parameter_51, + parameter_50, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_50, parameter_51, parameter_52, parameter_53 + + # pd_op.swish: (1x384x60x60xf32) <- (1x384x60x60xf32) + swish_43 = paddle._C_ops.swish(batch_norm__324) + + # pd_op.conv2d: (1x384x60x60xf32) <- (1x384x60x60xf32, 384x384x3x3xf32) + conv2d_55 = paddle._C_ops.conv2d( + swish_43, parameter_49, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_49 + + # pd_op.batch_norm_: (1x384x60x60xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (1x384x60x60xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__330, + batch_norm__331, + batch_norm__332, + batch_norm__333, + batch_norm__334, + batch_norm__335, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_55, + parameter_48, + parameter_47, + parameter_46, + parameter_45, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_45, parameter_46, parameter_47, parameter_48 + + # pd_op.swish: (1x384x60x60xf32) <- (1x384x60x60xf32) + swish_44 = paddle._C_ops.swish(batch_norm__330) + + # pd_op.conv2d: (1x384x60x60xf32) <- (1x384x60x60xf32, 384x384x3x3xf32) + conv2d_56 = paddle._C_ops.conv2d( + swish_44, parameter_44, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_44 + + # pd_op.batch_norm_: (1x384x60x60xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (1x384x60x60xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__336, + batch_norm__337, + batch_norm__338, + batch_norm__339, + batch_norm__340, + batch_norm__341, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_56, + parameter_43, + parameter_42, + parameter_41, + parameter_40, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_40, parameter_41, parameter_42, parameter_43 + + # pd_op.conv2d: (1x384x60x60xf32) <- (1x384x60x60xf32, 384x384x1x1xf32) + conv2d_57 = paddle._C_ops.conv2d( + swish_44, parameter_39, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_39 + + # pd_op.batch_norm_: (1x384x60x60xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (1x384x60x60xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__342, + batch_norm__343, + batch_norm__344, + batch_norm__345, + batch_norm__346, + batch_norm__347, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_57, + parameter_38, + parameter_37, + parameter_36, + parameter_35, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_35, parameter_36, parameter_37, parameter_38 + + # pd_op.add: (1x384x60x60xf32) <- (1x384x60x60xf32, 1x384x60x60xf32) + add_48 = paddle._C_ops.add(batch_norm__336, batch_norm__342) + + # pd_op.swish: (1x384x60x60xf32) <- (1x384x60x60xf32) + swish_45 = paddle._C_ops.swish(add_48) + + # pd_op.conv2d: (1x384x60x60xf32) <- (1x384x60x60xf32, 384x384x3x3xf32) + conv2d_58 = paddle._C_ops.conv2d( + swish_45, parameter_34, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_34 + + # pd_op.batch_norm_: (1x384x60x60xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (1x384x60x60xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__348, + batch_norm__349, + batch_norm__350, + batch_norm__351, + batch_norm__352, + batch_norm__353, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_58, + parameter_33, + parameter_32, + parameter_31, + parameter_30, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_30, parameter_31, parameter_32, parameter_33 + + # pd_op.swish: (1x384x60x60xf32) <- (1x384x60x60xf32) + swish_46 = paddle._C_ops.swish(batch_norm__348) + + # pd_op.conv2d: (1x384x60x60xf32) <- (1x384x60x60xf32, 384x384x3x3xf32) + conv2d_59 = paddle._C_ops.conv2d( + swish_46, parameter_29, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_29 + + # pd_op.batch_norm_: (1x384x60x60xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (1x384x60x60xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__354, + batch_norm__355, + batch_norm__356, + batch_norm__357, + batch_norm__358, + batch_norm__359, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_59, + parameter_28, + parameter_27, + parameter_26, + parameter_25, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_25, parameter_26, parameter_27, parameter_28 + + # pd_op.conv2d: (1x384x60x60xf32) <- (1x384x60x60xf32, 384x384x1x1xf32) + conv2d_60 = paddle._C_ops.conv2d( + swish_46, parameter_24, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_24 + + # pd_op.batch_norm_: (1x384x60x60xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (1x384x60x60xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__360, + batch_norm__361, + batch_norm__362, + batch_norm__363, + batch_norm__364, + batch_norm__365, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_60, + parameter_23, + parameter_22, + parameter_21, + parameter_20, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_20, parameter_21, parameter_22, parameter_23 + + # pd_op.add: (1x384x60x60xf32) <- (1x384x60x60xf32, 1x384x60x60xf32) + add_49 = paddle._C_ops.add(batch_norm__354, batch_norm__360) + + # pd_op.swish: (1x384x60x60xf32) <- (1x384x60x60xf32) + swish_47 = paddle._C_ops.swish(add_49) + + # pd_op.conv2d: (1x384x60x60xf32) <- (1x384x60x60xf32, 384x384x3x3xf32) + conv2d_61 = paddle._C_ops.conv2d( + swish_47, parameter_19, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_19 + + # pd_op.batch_norm_: (1x384x60x60xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (1x384x60x60xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__366, + batch_norm__367, + batch_norm__368, + batch_norm__369, + batch_norm__370, + batch_norm__371, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_61, + parameter_18, + parameter_17, + parameter_16, + parameter_15, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_15, parameter_16, parameter_17, parameter_18 + + # pd_op.swish: (1x384x60x60xf32) <- (1x384x60x60xf32) + swish_48 = paddle._C_ops.swish(batch_norm__366) + + # pd_op.conv2d: (1x384x60x60xf32) <- (1x384x60x60xf32, 384x384x3x3xf32) + conv2d_62 = paddle._C_ops.conv2d( + swish_48, parameter_14, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_14 + + # pd_op.batch_norm_: (1x384x60x60xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (1x384x60x60xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__372, + batch_norm__373, + batch_norm__374, + batch_norm__375, + batch_norm__376, + batch_norm__377, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_62, + parameter_13, + parameter_12, + parameter_11, + parameter_10, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_10, parameter_11, parameter_12, parameter_13 + + # pd_op.conv2d: (1x384x60x60xf32) <- (1x384x60x60xf32, 384x384x1x1xf32) + conv2d_63 = paddle._C_ops.conv2d( + swish_48, parameter_9, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_9 + + # pd_op.batch_norm_: (1x384x60x60xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (1x384x60x60xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__378, + batch_norm__379, + batch_norm__380, + batch_norm__381, + batch_norm__382, + batch_norm__383, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_63, + parameter_8, + parameter_7, + parameter_6, + parameter_5, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_5, parameter_6, parameter_7, parameter_8 + + # pd_op.add: (1x384x60x60xf32) <- (1x384x60x60xf32, 1x384x60x60xf32) + add_50 = paddle._C_ops.add(batch_norm__372, batch_norm__378) + + # pd_op.swish: (1x384x60x60xf32) <- (1x384x60x60xf32) + swish_49 = paddle._C_ops.swish(add_50) + + # builtin.combine: ([1x384x60x60xf32, 1x384x60x60xf32]) <- (1x384x60x60xf32, 1x384x60x60xf32) + combine_11 = [swish_42, swish_49] + + # pd_op.concat: (1x768x60x60xf32) <- ([1x384x60x60xf32, 1x384x60x60xf32], 1xi32) + concat_10 = paddle._C_ops.concat(combine_11, full_7) + del combine_11, full_7 + + # pd_op.conv2d: (1x768x60x60xf32) <- (1x768x60x60xf32, 768x768x1x1xf32) + conv2d_64 = paddle._C_ops.conv2d( + concat_10, parameter_4, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_4 + + # pd_op.batch_norm_: (1x768x60x60xf32, 768xf32, 768xf32, 768xf32, 768xf32, -1xui8) <- (1x768x60x60xf32, 768xf32, 768xf32, 768xf32, 768xf32) + ( + batch_norm__384, + batch_norm__385, + batch_norm__386, + batch_norm__387, + batch_norm__388, + batch_norm__389, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_64, + parameter_3, + parameter_2, + parameter_1, + parameter_0, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_0, parameter_1, parameter_2, parameter_3 + + # pd_op.swish: (1x768x60x60xf32) <- (1x768x60x60xf32) + swish_0 = paddle._C_ops.swish(batch_norm__384) + del ( + add_0, + add_1, + add_10, + add_11, + add_12, + add_14, + add_15, + add_17, + add_18, + add_19, + add_2, + add_20, + add_21, + add_23, + add_24, + add_26, + add_27, + add_28, + add_29, + add_3, + add_30, + add_32, + add_33, + add_35, + add_36, + add_37, + add_38, + add_39, + add_40, + add_41, + add_42, + add_43, + add_44, + add_45, + add_46, + add_47, + add_48, + add_49, + add_5, + add_50, + add_6, + add_8, + add_9, + assign_0, + assign_1, + assign_10, + assign_11, + assign_12, + assign_13, + assign_14, + assign_15, + assign_16, + assign_17, + assign_18, + assign_19, + assign_2, + assign_20, + assign_21, + assign_22, + assign_23, + assign_24, + assign_25, + assign_26, + assign_27, + assign_28, + assign_29, + assign_3, + assign_30, + assign_31, + assign_32, + assign_33, + assign_34, + assign_35, + assign_36, + assign_37, + assign_38, + assign_39, + assign_4, + assign_40, + assign_41, + assign_42, + assign_43, + assign_44, + assign_45, + assign_46, + assign_47, + assign_48, + assign_49, + assign_5, + assign_50, + assign_51, + assign_52, + assign_53, + assign_54, + assign_55, + assign_56, + assign_57, + assign_58, + assign_59, + assign_6, + assign_60, + assign_61, + assign_62, + assign_63, + assign_64, + assign_65, + assign_66, + assign_67, + assign_68, + assign_69, + assign_7, + assign_70, + assign_71, + assign_72, + assign_8, + assign_9, + batch_norm__0, + batch_norm__1, + batch_norm__10, + batch_norm__100, + batch_norm__101, + batch_norm__102, + batch_norm__103, + batch_norm__104, + batch_norm__105, + batch_norm__106, + batch_norm__107, + batch_norm__108, + batch_norm__109, + batch_norm__11, + batch_norm__110, + batch_norm__111, + batch_norm__112, + batch_norm__113, + batch_norm__114, + batch_norm__115, + batch_norm__116, + batch_norm__117, + batch_norm__118, + batch_norm__119, + batch_norm__12, + batch_norm__120, + batch_norm__121, + batch_norm__122, + batch_norm__123, + batch_norm__124, + batch_norm__125, + batch_norm__126, + batch_norm__127, + batch_norm__128, + batch_norm__129, + batch_norm__13, + batch_norm__130, + batch_norm__131, + batch_norm__132, + batch_norm__133, + batch_norm__134, + batch_norm__135, + batch_norm__136, + batch_norm__137, + batch_norm__138, + batch_norm__139, + batch_norm__14, + batch_norm__140, + batch_norm__141, + batch_norm__142, + batch_norm__143, + batch_norm__144, + batch_norm__145, + batch_norm__146, + batch_norm__147, + batch_norm__148, + batch_norm__149, + batch_norm__15, + batch_norm__150, + batch_norm__151, + batch_norm__152, + batch_norm__153, + batch_norm__154, + batch_norm__155, + batch_norm__156, + batch_norm__157, + batch_norm__158, + batch_norm__159, + batch_norm__16, + batch_norm__160, + batch_norm__161, + batch_norm__162, + batch_norm__163, + batch_norm__164, + batch_norm__165, + batch_norm__166, + batch_norm__167, + batch_norm__168, + batch_norm__169, + batch_norm__17, + batch_norm__170, + batch_norm__171, + batch_norm__172, + batch_norm__173, + batch_norm__174, + batch_norm__175, + batch_norm__176, + batch_norm__177, + batch_norm__178, + batch_norm__179, + batch_norm__18, + batch_norm__180, + batch_norm__181, + batch_norm__182, + batch_norm__183, + batch_norm__184, + batch_norm__185, + batch_norm__186, + batch_norm__187, + batch_norm__188, + batch_norm__189, + batch_norm__19, + batch_norm__190, + batch_norm__191, + batch_norm__192, + batch_norm__193, + batch_norm__194, + batch_norm__195, + batch_norm__196, + batch_norm__197, + batch_norm__198, + batch_norm__199, + batch_norm__2, + batch_norm__20, + batch_norm__200, + batch_norm__201, + batch_norm__202, + batch_norm__203, + batch_norm__204, + batch_norm__205, + batch_norm__206, + batch_norm__207, + batch_norm__208, + batch_norm__209, + batch_norm__21, + batch_norm__210, + batch_norm__211, + batch_norm__212, + batch_norm__213, + batch_norm__214, + batch_norm__215, + batch_norm__216, + batch_norm__217, + batch_norm__218, + batch_norm__219, + batch_norm__22, + batch_norm__220, + batch_norm__221, + batch_norm__222, + batch_norm__223, + batch_norm__224, + batch_norm__225, + batch_norm__226, + batch_norm__227, + batch_norm__228, + batch_norm__229, + batch_norm__23, + batch_norm__230, + batch_norm__231, + batch_norm__232, + batch_norm__233, + batch_norm__234, + batch_norm__235, + batch_norm__236, + batch_norm__237, + batch_norm__238, + batch_norm__239, + batch_norm__24, + batch_norm__240, + batch_norm__241, + batch_norm__242, + batch_norm__243, + batch_norm__244, + batch_norm__245, + batch_norm__246, + batch_norm__247, + batch_norm__248, + batch_norm__249, + batch_norm__25, + batch_norm__250, + batch_norm__251, + batch_norm__252, + batch_norm__253, + batch_norm__254, + batch_norm__255, + batch_norm__256, + batch_norm__257, + batch_norm__258, + batch_norm__259, + batch_norm__26, + batch_norm__260, + batch_norm__261, + batch_norm__262, + batch_norm__263, + batch_norm__264, + batch_norm__265, + batch_norm__266, + batch_norm__267, + batch_norm__268, + batch_norm__269, + batch_norm__27, + batch_norm__270, + batch_norm__271, + batch_norm__272, + batch_norm__273, + batch_norm__274, + batch_norm__275, + batch_norm__276, + batch_norm__277, + batch_norm__278, + batch_norm__279, + batch_norm__28, + batch_norm__280, + batch_norm__281, + batch_norm__282, + batch_norm__283, + batch_norm__284, + batch_norm__285, + batch_norm__286, + batch_norm__287, + batch_norm__288, + batch_norm__289, + batch_norm__29, + batch_norm__290, + batch_norm__291, + batch_norm__292, + batch_norm__293, + batch_norm__294, + batch_norm__295, + batch_norm__296, + batch_norm__297, + batch_norm__298, + batch_norm__299, + batch_norm__3, + batch_norm__30, + batch_norm__300, + batch_norm__301, + batch_norm__302, + batch_norm__303, + batch_norm__304, + batch_norm__305, + batch_norm__306, + batch_norm__307, + batch_norm__308, + batch_norm__309, + batch_norm__31, + batch_norm__310, + batch_norm__311, + batch_norm__312, + batch_norm__313, + batch_norm__314, + batch_norm__315, + batch_norm__316, + batch_norm__317, + batch_norm__318, + batch_norm__319, + batch_norm__32, + batch_norm__320, + batch_norm__321, + batch_norm__322, + batch_norm__323, + batch_norm__324, + batch_norm__325, + batch_norm__326, + batch_norm__327, + batch_norm__328, + batch_norm__329, + batch_norm__33, + batch_norm__330, + batch_norm__331, + batch_norm__332, + batch_norm__333, + batch_norm__334, + batch_norm__335, + batch_norm__336, + batch_norm__337, + batch_norm__338, + batch_norm__339, + batch_norm__34, + batch_norm__340, + batch_norm__341, + batch_norm__342, + batch_norm__343, + batch_norm__344, + batch_norm__345, + batch_norm__346, + batch_norm__347, + batch_norm__348, + batch_norm__349, + batch_norm__35, + batch_norm__350, + batch_norm__351, + batch_norm__352, + batch_norm__353, + batch_norm__354, + batch_norm__355, + batch_norm__356, + batch_norm__357, + batch_norm__358, + batch_norm__359, + batch_norm__36, + batch_norm__360, + batch_norm__361, + batch_norm__362, + batch_norm__363, + batch_norm__364, + batch_norm__365, + batch_norm__366, + batch_norm__367, + batch_norm__368, + batch_norm__369, + batch_norm__37, + batch_norm__370, + batch_norm__371, + batch_norm__372, + batch_norm__373, + batch_norm__374, + batch_norm__375, + batch_norm__376, + batch_norm__377, + batch_norm__378, + batch_norm__379, + batch_norm__38, + batch_norm__380, + batch_norm__381, + batch_norm__382, + batch_norm__383, + batch_norm__384, + batch_norm__385, + batch_norm__386, + batch_norm__387, + batch_norm__388, + batch_norm__389, + batch_norm__39, + batch_norm__4, + batch_norm__40, + batch_norm__41, + batch_norm__42, + batch_norm__43, + batch_norm__44, + batch_norm__45, + batch_norm__46, + batch_norm__47, + batch_norm__48, + batch_norm__49, + batch_norm__5, + batch_norm__50, + batch_norm__51, + batch_norm__52, + batch_norm__53, + batch_norm__54, + batch_norm__55, + batch_norm__56, + batch_norm__57, + batch_norm__58, + batch_norm__59, + batch_norm__6, + batch_norm__60, + batch_norm__61, + batch_norm__62, + batch_norm__63, + batch_norm__64, + batch_norm__65, + batch_norm__66, + batch_norm__67, + batch_norm__68, + batch_norm__69, + batch_norm__7, + batch_norm__70, + batch_norm__71, + batch_norm__72, + batch_norm__73, + batch_norm__74, + batch_norm__75, + batch_norm__76, + batch_norm__77, + batch_norm__78, + batch_norm__79, + batch_norm__8, + batch_norm__80, + batch_norm__81, + batch_norm__82, + batch_norm__83, + batch_norm__84, + batch_norm__85, + batch_norm__86, + batch_norm__87, + batch_norm__88, + batch_norm__89, + batch_norm__9, + batch_norm__90, + batch_norm__91, + batch_norm__92, + batch_norm__93, + batch_norm__94, + batch_norm__95, + batch_norm__96, + batch_norm__97, + batch_norm__98, + batch_norm__99, + concat_1, + concat_10, + concat_2, + concat_3, + concat_4, + concat_5, + concat_6, + concat_7, + concat_8, + concat_9, + conv2d_0, + conv2d_1, + conv2d_10, + conv2d_11, + conv2d_12, + conv2d_13, + conv2d_14, + conv2d_15, + conv2d_16, + conv2d_17, + conv2d_18, + conv2d_19, + conv2d_2, + conv2d_20, + conv2d_21, + conv2d_22, + conv2d_23, + conv2d_24, + conv2d_25, + conv2d_26, + conv2d_27, + conv2d_28, + conv2d_29, + conv2d_3, + conv2d_30, + conv2d_31, + conv2d_32, + conv2d_33, + conv2d_34, + conv2d_35, + conv2d_36, + conv2d_37, + conv2d_38, + conv2d_39, + conv2d_4, + conv2d_40, + conv2d_41, + conv2d_42, + conv2d_43, + conv2d_44, + conv2d_45, + conv2d_46, + conv2d_47, + conv2d_48, + conv2d_49, + conv2d_5, + conv2d_50, + conv2d_51, + conv2d_52, + conv2d_53, + conv2d_54, + conv2d_55, + conv2d_56, + conv2d_57, + conv2d_58, + conv2d_59, + conv2d_6, + conv2d_60, + conv2d_61, + conv2d_62, + conv2d_63, + conv2d_64, + conv2d_7, + conv2d_8, + conv2d_9, + dropout_0, + dropout_1, + dropout_10, + dropout_11, + dropout_12, + dropout_13, + dropout_14, + dropout_15, + dropout_16, + dropout_17, + dropout_18, + dropout_19, + dropout_2, + dropout_20, + dropout_21, + dropout_22, + dropout_23, + dropout_24, + dropout_25, + dropout_26, + dropout_27, + dropout_28, + dropout_29, + dropout_3, + dropout_30, + dropout_31, + dropout_4, + dropout_5, + dropout_6, + dropout_7, + dropout_8, + dropout_9, + full_8, + full_9, + full_int_array_10, + full_int_array_2, + full_int_array_4, + full_int_array_5, + full_int_array_8, + full_int_array_9, + layer_norm_0, + layer_norm_1, + layer_norm_10, + layer_norm_11, + layer_norm_12, + layer_norm_13, + layer_norm_14, + layer_norm_15, + layer_norm_16, + layer_norm_17, + layer_norm_18, + layer_norm_19, + layer_norm_2, + layer_norm_20, + layer_norm_22, + layer_norm_23, + layer_norm_3, + layer_norm_4, + layer_norm_5, + layer_norm_6, + layer_norm_7, + layer_norm_8, + layer_norm_9, + matmul_10, + matmul_11, + matmul_12, + matmul_15, + matmul_16, + matmul_17, + matmul_18, + matmul_19, + matmul_2, + matmul_20, + matmul_23, + matmul_24, + matmul_25, + matmul_26, + matmul_27, + matmul_28, + matmul_3, + matmul_31, + matmul_32, + matmul_33, + matmul_4, + matmul_7, + matmul_8, + matmul_9, + nearest_interp_0, + nearest_interp_1, + pool2d_0, + pool2d_1, + pool2d_2, + reshape_11, + reshape_15, + reshape_16, + reshape_3, + reshape_7, + slice_0, + slice_1, + slice_10, + slice_11, + slice_12, + slice_13, + slice_14, + slice_15, + slice_16, + slice_17, + slice_18, + slice_19, + slice_2, + slice_20, + slice_21, + slice_22, + slice_23, + slice_3, + slice_4, + slice_5, + slice_6, + slice_7, + slice_8, + slice_9, + softmax_0, + softmax_1, + softmax_2, + softmax_3, + swish_1, + swish_10, + swish_11, + swish_12, + swish_13, + swish_14, + swish_15, + swish_16, + swish_17, + swish_18, + swish_19, + swish_2, + swish_20, + swish_21, + swish_22, + swish_23, + swish_24, + swish_25, + swish_26, + swish_27, + swish_28, + swish_29, + swish_3, + swish_30, + swish_31, + swish_32, + swish_33, + swish_34, + swish_35, + swish_36, + swish_37, + swish_38, + swish_39, + swish_4, + swish_40, + swish_41, + swish_42, + swish_43, + swish_44, + swish_45, + swish_46, + swish_47, + swish_48, + swish_49, + swish_5, + swish_6, + swish_7, + swish_8, + swish_9, + transpose_0, + transpose_1, + transpose_10, + transpose_11, + transpose_12, + transpose_13, + transpose_14, + transpose_15, + transpose_16, + transpose_17, + transpose_2, + transpose_3, + transpose_4, + transpose_5, + transpose_6, + transpose_7, + transpose_8, + transpose_9, + unsqueeze_3, + ) + + return swish_0 diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_2/weight_meta.py b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_2/weight_meta.py new file mode 100644 index 000000000..bd82badb0 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_2/weight_meta.py @@ -0,0 +1,4013 @@ +class Program_weight_tensor_parameter_0: + name = "parameter_0" + shape = [768] + dtype = "float32" + min_val = float("-0.175875") + max_val = float("0.210823") + mean = float("0.0834695") + std = float("0.0566098") + data = None + + +class Program_weight_tensor_parameter_1: + name = "parameter_1" + shape = [768] + dtype = "float32" + min_val = float("0.939895") + max_val = float("1.29826") + mean = float("1.06397") + std = float("0.0312259") + data = None + + +class Program_weight_tensor_parameter_2: + name = "parameter_2" + shape = [768] + dtype = "float32" + min_val = float("0.00114665") + max_val = float("0.0503602") + mean = float("0.00766089") + std = float("0.00465424") + data = None + + +class Program_weight_tensor_parameter_3: + name = "parameter_3" + shape = [768] + dtype = "float32" + min_val = float("-0.134835") + max_val = float("0.0565135") + mean = float("-0.0288952") + std = float("0.0290584") + data = None + + +class Program_weight_tensor_parameter_4: + name = "parameter_4" + shape = [768, 768, 1, 1] + dtype = "float32" + min_val = float("-0.0548133") + max_val = float("0.0388088") + mean = float("-0.000154659") + std = float("0.00249634") + data = None + + +class Program_weight_tensor_parameter_5: + name = "parameter_5" + shape = [384] + dtype = "float32" + min_val = float("-0.14169") + max_val = float("0.0305817") + mean = float("-0.0188052") + std = float("0.0234504") + data = None + + +class Program_weight_tensor_parameter_6: + name = "parameter_6" + shape = [384] + dtype = "float32" + min_val = float("0.945748") + max_val = float("1.04442") + mean = float("0.98666") + std = float("0.0105852") + data = None + + +class Program_weight_tensor_parameter_7: + name = "parameter_7" + shape = [384] + dtype = "float32" + min_val = float("0.000803951") + max_val = float("0.0187689") + mean = float("0.00492345") + std = float("0.00343028") + data = None + + +class Program_weight_tensor_parameter_8: + name = "parameter_8" + shape = [384] + dtype = "float32" + min_val = float("-0.0551849") + max_val = float("0.062912") + mean = float("0.00285491") + std = float("0.0223164") + data = None + + +class Program_weight_tensor_parameter_9: + name = "parameter_9" + shape = [384, 384, 1, 1] + dtype = "float32" + min_val = float("-0.0299323") + max_val = float("0.020664") + mean = float("2.29275e-05") + std = float("0.00192338") + data = None + + +class Program_weight_tensor_parameter_10: + name = "parameter_10" + shape = [384] + dtype = "float32" + min_val = float("-0.14169") + max_val = float("0.0305817") + mean = float("-0.0188052") + std = float("0.0234504") + data = None + + +class Program_weight_tensor_parameter_11: + name = "parameter_11" + shape = [384] + dtype = "float32" + min_val = float("0.968039") + max_val = float("1.13059") + mean = float("1.01544") + std = float("0.0171846") + data = None + + +class Program_weight_tensor_parameter_12: + name = "parameter_12" + shape = [384] + dtype = "float32" + min_val = float("0.00197981") + max_val = float("0.0503469") + mean = float("0.00764845") + std = float("0.00453187") + data = None + + +class Program_weight_tensor_parameter_13: + name = "parameter_13" + shape = [384] + dtype = "float32" + min_val = float("-0.202706") + max_val = float("0.152191") + mean = float("-0.0431393") + std = float("0.0362812") + data = None + + +class Program_weight_tensor_parameter_14: + name = "parameter_14" + shape = [384, 384, 3, 3] + dtype = "float32" + min_val = float("-0.029908") + max_val = float("0.035511") + mean = float("-7.29869e-05") + std = float("0.00131195") + data = None + + +class Program_weight_tensor_parameter_15: + name = "parameter_15" + shape = [384] + dtype = "float32" + min_val = float("-0.170219") + max_val = float("0.0209993") + mean = float("-0.0348873") + std = float("0.0279313") + data = None + + +class Program_weight_tensor_parameter_16: + name = "parameter_16" + shape = [384] + dtype = "float32" + min_val = float("0.975222") + max_val = float("1.12587") + mean = float("1.015") + std = float("0.0240805") + data = None + + +class Program_weight_tensor_parameter_17: + name = "parameter_17" + shape = [384] + dtype = "float32" + min_val = float("0.00530043") + max_val = float("0.186183") + mean = float("0.0215775") + std = float("0.0159645") + data = None + + +class Program_weight_tensor_parameter_18: + name = "parameter_18" + shape = [384] + dtype = "float32" + min_val = float("-0.265254") + max_val = float("0.415528") + mean = float("-0.0379328") + std = float("0.0510841") + data = None + + +class Program_weight_tensor_parameter_19: + name = "parameter_19" + shape = [384, 384, 3, 3] + dtype = "float32" + min_val = float("-0.0331338") + max_val = float("0.0530854") + mean = float("-6.31513e-05") + std = float("0.00148047") + data = None + + +class Program_weight_tensor_parameter_20: + name = "parameter_20" + shape = [384] + dtype = "float32" + min_val = float("-0.105219") + max_val = float("0.0129843") + mean = float("-0.0358029") + std = float("0.0193236") + data = None + + +class Program_weight_tensor_parameter_21: + name = "parameter_21" + shape = [384] + dtype = "float32" + min_val = float("0.945357") + max_val = float("1.04501") + mean = float("0.988631") + std = float("0.00984229") + data = None + + +class Program_weight_tensor_parameter_22: + name = "parameter_22" + shape = [384] + dtype = "float32" + min_val = float("0.000690331") + max_val = float("0.0185684") + mean = float("0.00513745") + std = float("0.00318726") + data = None + + +class Program_weight_tensor_parameter_23: + name = "parameter_23" + shape = [384] + dtype = "float32" + min_val = float("-0.0849168") + max_val = float("0.0438217") + mean = float("-0.00259504") + std = float("0.017162") + data = None + + +class Program_weight_tensor_parameter_24: + name = "parameter_24" + shape = [384, 384, 1, 1] + dtype = "float32" + min_val = float("-0.0267959") + max_val = float("0.025491") + mean = float("-5.37283e-05") + std = float("0.00203271") + data = None + + +class Program_weight_tensor_parameter_25: + name = "parameter_25" + shape = [384] + dtype = "float32" + min_val = float("-0.105219") + max_val = float("0.0129843") + mean = float("-0.0358029") + std = float("0.0193236") + data = None + + +class Program_weight_tensor_parameter_26: + name = "parameter_26" + shape = [384] + dtype = "float32" + min_val = float("0.959852") + max_val = float("1.10509") + mean = float("1.01609") + std = float("0.0177564") + data = None + + +class Program_weight_tensor_parameter_27: + name = "parameter_27" + shape = [384] + dtype = "float32" + min_val = float("0.00248164") + max_val = float("0.0491499") + mean = float("0.00954645") + std = float("0.00479825") + data = None + + +class Program_weight_tensor_parameter_28: + name = "parameter_28" + shape = [384] + dtype = "float32" + min_val = float("-0.215332") + max_val = float("0.320794") + mean = float("-0.0502206") + std = float("0.044921") + data = None + + +class Program_weight_tensor_parameter_29: + name = "parameter_29" + shape = [384, 384, 3, 3] + dtype = "float32" + min_val = float("-0.0363929") + max_val = float("0.0514823") + mean = float("-8.4193e-05") + std = float("0.00132563") + data = None + + +class Program_weight_tensor_parameter_30: + name = "parameter_30" + shape = [384] + dtype = "float32" + min_val = float("-0.0896627") + max_val = float("0.0192839") + mean = float("-0.0360783") + std = float("0.0194692") + data = None + + +class Program_weight_tensor_parameter_31: + name = "parameter_31" + shape = [384] + dtype = "float32" + min_val = float("0.933291") + max_val = float("1.11466") + mean = float("1.01167") + std = float("0.026589") + data = None + + +class Program_weight_tensor_parameter_32: + name = "parameter_32" + shape = [384] + dtype = "float32" + min_val = float("0.00571017") + max_val = float("0.0668329") + mean = float("0.0190313") + std = float("0.00954406") + data = None + + +class Program_weight_tensor_parameter_33: + name = "parameter_33" + shape = [384] + dtype = "float32" + min_val = float("-0.241704") + max_val = float("0.126745") + mean = float("-0.0297335") + std = float("0.0569594") + data = None + + +class Program_weight_tensor_parameter_34: + name = "parameter_34" + shape = [384, 384, 3, 3] + dtype = "float32" + min_val = float("-0.0397047") + max_val = float("0.0499731") + mean = float("-5.43156e-05") + std = float("0.00151173") + data = None + + +class Program_weight_tensor_parameter_35: + name = "parameter_35" + shape = [384] + dtype = "float32" + min_val = float("-0.116341") + max_val = float("0.0161185") + mean = float("-0.0373639") + std = float("0.0201507") + data = None + + +class Program_weight_tensor_parameter_36: + name = "parameter_36" + shape = [384] + dtype = "float32" + min_val = float("0.929383") + max_val = float("1.02791") + mean = float("0.98704") + std = float("0.0110296") + data = None + + +class Program_weight_tensor_parameter_37: + name = "parameter_37" + shape = [384] + dtype = "float32" + min_val = float("0.00125121") + max_val = float("0.0114154") + mean = float("0.00429573") + std = float("0.00176768") + data = None + + +class Program_weight_tensor_parameter_38: + name = "parameter_38" + shape = [384] + dtype = "float32" + min_val = float("-0.0558903") + max_val = float("0.0353347") + mean = float("-0.00854145") + std = float("0.0134779") + data = None + + +class Program_weight_tensor_parameter_39: + name = "parameter_39" + shape = [384, 384, 1, 1] + dtype = "float32" + min_val = float("-0.0386337") + max_val = float("0.028212") + mean = float("-0.000152706") + std = float("0.00204597") + data = None + + +class Program_weight_tensor_parameter_40: + name = "parameter_40" + shape = [384] + dtype = "float32" + min_val = float("-0.116341") + max_val = float("0.0161185") + mean = float("-0.0373639") + std = float("0.0201507") + data = None + + +class Program_weight_tensor_parameter_41: + name = "parameter_41" + shape = [384] + dtype = "float32" + min_val = float("0.981354") + max_val = float("1.10683") + mean = float("1.01834") + std = float("0.0222205") + data = None + + +class Program_weight_tensor_parameter_42: + name = "parameter_42" + shape = [384] + dtype = "float32" + min_val = float("0.00487818") + max_val = float("0.0360324") + mean = float("0.0114429") + std = float("0.00469479") + data = None + + +class Program_weight_tensor_parameter_43: + name = "parameter_43" + shape = [384] + dtype = "float32" + min_val = float("-0.191838") + max_val = float("0.0902951") + mean = float("-0.0270964") + std = float("0.0352644") + data = None + + +class Program_weight_tensor_parameter_44: + name = "parameter_44" + shape = [384, 384, 3, 3] + dtype = "float32" + min_val = float("-0.0360859") + max_val = float("0.0633791") + mean = float("-4.66789e-05") + std = float("0.00138059") + data = None + + +class Program_weight_tensor_parameter_45: + name = "parameter_45" + shape = [384] + dtype = "float32" + min_val = float("-0.107113") + max_val = float("0.0239382") + mean = float("-0.0375215") + std = float("0.0214567") + data = None + + +class Program_weight_tensor_parameter_46: + name = "parameter_46" + shape = [384] + dtype = "float32" + min_val = float("0.944795") + max_val = float("1.11465") + mean = float("1.01186") + std = float("0.0277861") + data = None + + +class Program_weight_tensor_parameter_47: + name = "parameter_47" + shape = [384] + dtype = "float32" + min_val = float("0.00539047") + max_val = float("0.0596139") + mean = float("0.0152076") + std = float("0.0073805") + data = None + + +class Program_weight_tensor_parameter_48: + name = "parameter_48" + shape = [384] + dtype = "float32" + min_val = float("-0.154913") + max_val = float("0.125914") + mean = float("-0.0486592") + std = float("0.0509119") + data = None + + +class Program_weight_tensor_parameter_49: + name = "parameter_49" + shape = [384, 384, 3, 3] + dtype = "float32" + min_val = float("-0.0279281") + max_val = float("0.0439271") + mean = float("-7.87755e-05") + std = float("0.00153817") + data = None + + +class Program_weight_tensor_parameter_50: + name = "parameter_50" + shape = [384] + dtype = "float32" + min_val = float("-0.10674") + max_val = float("0.046738") + mean = float("-0.026306") + std = float("0.0154157") + data = None + + +class Program_weight_tensor_parameter_51: + name = "parameter_51" + shape = [384] + dtype = "float32" + min_val = float("0.973756") + max_val = float("1.08653") + mean = float("1.00903") + std = float("0.0171142") + data = None + + +class Program_weight_tensor_parameter_52: + name = "parameter_52" + shape = [384] + dtype = "float32" + min_val = float("0.00231428") + max_val = float("0.0166757") + mean = float("0.00532256") + std = float("0.00190646") + data = None + + +class Program_weight_tensor_parameter_53: + name = "parameter_53" + shape = [384] + dtype = "float32" + min_val = float("-0.10048") + max_val = float("0.0869698") + mean = float("-0.0197301") + std = float("0.0269629") + data = None + + +class Program_weight_tensor_parameter_54: + name = "parameter_54" + shape = [384, 1152, 1, 1] + dtype = "float32" + min_val = float("-0.0619005") + max_val = float("0.0744809") + mean = float("-8.91799e-05") + std = float("0.00230778") + data = None + + +class Program_weight_tensor_parameter_55: + name = "parameter_55" + shape = [384] + dtype = "float32" + min_val = float("-0.0424904") + max_val = float("0.0160654") + mean = float("-0.00899509") + std = float("0.00840798") + data = None + + +class Program_weight_tensor_parameter_56: + name = "parameter_56" + shape = [384] + dtype = "float32" + min_val = float("0.959519") + max_val = float("1.05137") + mean = float("1.00788") + std = float("0.0115961") + data = None + + +class Program_weight_tensor_parameter_57: + name = "parameter_57" + shape = [384] + dtype = "float32" + min_val = float("0.00124549") + max_val = float("0.0304895") + mean = float("0.00442696") + std = float("0.00213864") + data = None + + +class Program_weight_tensor_parameter_58: + name = "parameter_58" + shape = [384] + dtype = "float32" + min_val = float("-0.110999") + max_val = float("0.0924363") + mean = float("-0.0236762") + std = float("0.0234971") + data = None + + +class Program_weight_tensor_parameter_59: + name = "parameter_59" + shape = [384, 1152, 1, 1] + dtype = "float32" + min_val = float("-0.0245473") + max_val = float("0.0425909") + mean = float("-0.000112646") + std = float("0.00208633") + data = None + + +class Program_weight_tensor_parameter_60: + name = "parameter_60" + shape = [384] + dtype = "float32" + min_val = float("-0.0529748") + max_val = float("0.0059538") + mean = float("-0.0166275") + std = float("0.00987957") + data = None + + +class Program_weight_tensor_parameter_61: + name = "parameter_61" + shape = [384] + dtype = "float32" + min_val = float("0.988678") + max_val = float("1.10388") + mean = float("1.01957") + std = float("0.0168754") + data = None + + +class Program_weight_tensor_parameter_62: + name = "parameter_62" + shape = [384] + dtype = "float32" + min_val = float("0.00468338") + max_val = float("0.0641493") + mean = float("0.0144074") + std = float("0.0082444") + data = None + + +class Program_weight_tensor_parameter_63: + name = "parameter_63" + shape = [384] + dtype = "float32" + min_val = float("-0.44327") + max_val = float("0.19537") + mean = float("-0.0473944") + std = float("0.0713197") + data = None + + +class Program_weight_tensor_parameter_64: + name = "parameter_64" + shape = [384, 384, 3, 3] + dtype = "float32" + min_val = float("-0.0212973") + max_val = float("0.0335283") + mean = float("-3.20311e-05") + std = float("0.00117985") + data = None + + +class Program_weight_tensor_parameter_65: + name = "parameter_65" + shape = [384] + dtype = "float32" + min_val = float("-0.222314") + max_val = float("0.492622") + mean = float("0.217344") + std = float("0.124262") + data = None + + +class Program_weight_tensor_parameter_66: + name = "parameter_66" + shape = [384] + dtype = "float32" + min_val = float("0.919259") + max_val = float("1.4834") + mean = float("1.14101") + std = float("0.0738465") + data = None + + +class Program_weight_tensor_parameter_67: + name = "parameter_67" + shape = [384] + dtype = "float32" + min_val = float("0.00374913") + max_val = float("0.0753866") + mean = float("0.0117258") + std = float("0.00578106") + data = None + + +class Program_weight_tensor_parameter_68: + name = "parameter_68" + shape = [384] + dtype = "float32" + min_val = float("-0.129657") + max_val = float("0.0597492") + mean = float("-0.037386") + std = float("0.0303036") + data = None + + +class Program_weight_tensor_parameter_69: + name = "parameter_69" + shape = [384, 384, 1, 1] + dtype = "float32" + min_val = float("-0.0788092") + max_val = float("0.0718385") + mean = float("-0.00042023") + std = float("0.00505347") + data = None + + +class Program_weight_tensor_parameter_70: + name = "parameter_70" + shape = [192] + dtype = "float32" + min_val = float("-0.165903") + max_val = float("0.0468638") + mean = float("-0.0248091") + std = float("0.0394948") + data = None + + +class Program_weight_tensor_parameter_71: + name = "parameter_71" + shape = [192] + dtype = "float32" + min_val = float("0.841187") + max_val = float("1.05089") + mean = float("0.972721") + std = float("0.0237726") + data = None + + +class Program_weight_tensor_parameter_72: + name = "parameter_72" + shape = [192] + dtype = "float32" + min_val = float("0.00140171") + max_val = float("0.0211185") + mean = float("0.00615762") + std = float("0.00390072") + data = None + + +class Program_weight_tensor_parameter_73: + name = "parameter_73" + shape = [192] + dtype = "float32" + min_val = float("-0.0638207") + max_val = float("0.0926642") + mean = float("-0.00576702") + std = float("0.0201979") + data = None + + +class Program_weight_tensor_parameter_74: + name = "parameter_74" + shape = [192, 192, 1, 1] + dtype = "float32" + min_val = float("-0.0496362") + max_val = float("0.0295849") + mean = float("-0.000179203") + std = float("0.00381061") + data = None + + +class Program_weight_tensor_parameter_75: + name = "parameter_75" + shape = [192] + dtype = "float32" + min_val = float("-0.165903") + max_val = float("0.0468638") + mean = float("-0.0248091") + std = float("0.0394948") + data = None + + +class Program_weight_tensor_parameter_76: + name = "parameter_76" + shape = [192] + dtype = "float32" + min_val = float("0.729841") + max_val = float("1.12263") + mean = float("1.02218") + std = float("0.0372419") + data = None + + +class Program_weight_tensor_parameter_77: + name = "parameter_77" + shape = [192] + dtype = "float32" + min_val = float("0.00540078") + max_val = float("0.0575204") + mean = float("0.0136539") + std = float("0.00636345") + data = None + + +class Program_weight_tensor_parameter_78: + name = "parameter_78" + shape = [192] + dtype = "float32" + min_val = float("-0.219646") + max_val = float("0.101609") + mean = float("-0.037761") + std = float("0.0435659") + data = None + + +class Program_weight_tensor_parameter_79: + name = "parameter_79" + shape = [192, 192, 3, 3] + dtype = "float32" + min_val = float("-0.0430374") + max_val = float("0.0495163") + mean = float("-0.000124453") + std = float("0.00256786") + data = None + + +class Program_weight_tensor_parameter_80: + name = "parameter_80" + shape = [192] + dtype = "float32" + min_val = float("-0.191344") + max_val = float("0.0444996") + mean = float("-0.057942") + std = float("0.0491063") + data = None + + +class Program_weight_tensor_parameter_81: + name = "parameter_81" + shape = [192] + dtype = "float32" + min_val = float("0.897737") + max_val = float("1.18792") + mean = float("1.01539") + std = float("0.0484046") + data = None + + +class Program_weight_tensor_parameter_82: + name = "parameter_82" + shape = [192] + dtype = "float32" + min_val = float("0.010406") + max_val = float("0.203638") + mean = float("0.0350038") + std = float("0.0227289") + data = None + + +class Program_weight_tensor_parameter_83: + name = "parameter_83" + shape = [192] + dtype = "float32" + min_val = float("-0.296379") + max_val = float("0.516951") + mean = float("-0.0407173") + std = float("0.0634974") + data = None + + +class Program_weight_tensor_parameter_84: + name = "parameter_84" + shape = [192, 192, 3, 3] + dtype = "float32" + min_val = float("-0.0473781") + max_val = float("0.0557186") + mean = float("-0.00011045") + std = float("0.00285571") + data = None + + +class Program_weight_tensor_parameter_85: + name = "parameter_85" + shape = [192] + dtype = "float32" + min_val = float("-0.191632") + max_val = float("0.00854023") + mean = float("-0.064207") + std = float("0.0334262") + data = None + + +class Program_weight_tensor_parameter_86: + name = "parameter_86" + shape = [192] + dtype = "float32" + min_val = float("0.922153") + max_val = float("1.04653") + mean = float("0.973445") + std = float("0.017956") + data = None + + +class Program_weight_tensor_parameter_87: + name = "parameter_87" + shape = [192] + dtype = "float32" + min_val = float("0.00109672") + max_val = float("0.0152916") + mean = float("0.00524439") + std = float("0.00262094") + data = None + + +class Program_weight_tensor_parameter_88: + name = "parameter_88" + shape = [192] + dtype = "float32" + min_val = float("-0.0707542") + max_val = float("0.0365569") + mean = float("-0.00798934") + std = float("0.0151914") + data = None + + +class Program_weight_tensor_parameter_89: + name = "parameter_89" + shape = [192, 192, 1, 1] + dtype = "float32" + min_val = float("-0.0386532") + max_val = float("0.0308154") + mean = float("-0.000343292") + std = float("0.00384278") + data = None + + +class Program_weight_tensor_parameter_90: + name = "parameter_90" + shape = [192] + dtype = "float32" + min_val = float("-0.191632") + max_val = float("0.00854023") + mean = float("-0.064207") + std = float("0.0334262") + data = None + + +class Program_weight_tensor_parameter_91: + name = "parameter_91" + shape = [192] + dtype = "float32" + min_val = float("0.968104") + max_val = float("1.14778") + mean = float("1.02415") + std = float("0.0294364") + data = None + + +class Program_weight_tensor_parameter_92: + name = "parameter_92" + shape = [192] + dtype = "float32" + min_val = float("0.00424155") + max_val = float("0.04597") + mean = float("0.0118971") + std = float("0.00620602") + data = None + + +class Program_weight_tensor_parameter_93: + name = "parameter_93" + shape = [192] + dtype = "float32" + min_val = float("-0.186844") + max_val = float("0.141321") + mean = float("-0.0381112") + std = float("0.0386452") + data = None + + +class Program_weight_tensor_parameter_94: + name = "parameter_94" + shape = [192, 192, 3, 3] + dtype = "float32" + min_val = float("-0.0471115") + max_val = float("0.0550156") + mean = float("-0.000140673") + std = float("0.00262922") + data = None + + +class Program_weight_tensor_parameter_95: + name = "parameter_95" + shape = [192] + dtype = "float32" + min_val = float("-0.188926") + max_val = float("0.062054") + mean = float("-0.0755775") + std = float("0.0405971") + data = None + + +class Program_weight_tensor_parameter_96: + name = "parameter_96" + shape = [192] + dtype = "float32" + min_val = float("0.880419") + max_val = float("1.21878") + mean = float("1.01465") + std = float("0.050849") + data = None + + +class Program_weight_tensor_parameter_97: + name = "parameter_97" + shape = [192] + dtype = "float32" + min_val = float("0.00813457") + max_val = float("0.0698677") + mean = float("0.0225257") + std = float("0.0112607") + data = None + + +class Program_weight_tensor_parameter_98: + name = "parameter_98" + shape = [192] + dtype = "float32" + min_val = float("-0.123249") + max_val = float("0.0530247") + mean = float("-0.0265596") + std = float("0.0349301") + data = None + + +class Program_weight_tensor_parameter_99: + name = "parameter_99" + shape = [192, 192, 3, 3] + dtype = "float32" + min_val = float("-0.0427988") + max_val = float("0.0615636") + mean = float("-0.000111453") + std = float("0.00299174") + data = None + + +class Program_weight_tensor_parameter_100: + name = "parameter_100" + shape = [192] + dtype = "float32" + min_val = float("-0.229476") + max_val = float("-0.00962432") + mean = float("-0.0831852") + std = float("0.0422479") + data = None + + +class Program_weight_tensor_parameter_101: + name = "parameter_101" + shape = [192] + dtype = "float32" + min_val = float("0.900428") + max_val = float("1.02666") + mean = float("0.975123") + std = float("0.0229582") + data = None + + +class Program_weight_tensor_parameter_102: + name = "parameter_102" + shape = [192] + dtype = "float32" + min_val = float("0.00166927") + max_val = float("0.0149214") + mean = float("0.00548105") + std = float("0.00187349") + data = None + + +class Program_weight_tensor_parameter_103: + name = "parameter_103" + shape = [192] + dtype = "float32" + min_val = float("-0.0393056") + max_val = float("0.0471334") + mean = float("-0.0107647") + std = float("0.0172946") + data = None + + +class Program_weight_tensor_parameter_104: + name = "parameter_104" + shape = [192, 192, 1, 1] + dtype = "float32" + min_val = float("-0.0436514") + max_val = float("0.0635205") + mean = float("-0.000488458") + std = float("0.00437095") + data = None + + +class Program_weight_tensor_parameter_105: + name = "parameter_105" + shape = [192] + dtype = "float32" + min_val = float("-0.229476") + max_val = float("-0.00962433") + mean = float("-0.0831852") + std = float("0.0422479") + data = None + + +class Program_weight_tensor_parameter_106: + name = "parameter_106" + shape = [192] + dtype = "float32" + min_val = float("0.947654") + max_val = float("1.11111") + mean = float("1.02112") + std = float("0.0306157") + data = None + + +class Program_weight_tensor_parameter_107: + name = "parameter_107" + shape = [192] + dtype = "float32" + min_val = float("0.00719786") + max_val = float("0.0556434") + mean = float("0.0161388") + std = float("0.00787651") + data = None + + +class Program_weight_tensor_parameter_108: + name = "parameter_108" + shape = [192] + dtype = "float32" + min_val = float("-0.131118") + max_val = float("0.0600064") + mean = float("-0.0237574") + std = float("0.0337413") + data = None + + +class Program_weight_tensor_parameter_109: + name = "parameter_109" + shape = [192, 192, 3, 3] + dtype = "float32" + min_val = float("-0.0485053") + max_val = float("0.0562451") + mean = float("-9.74267e-05") + std = float("0.00278606") + data = None + + +class Program_weight_tensor_parameter_110: + name = "parameter_110" + shape = [192] + dtype = "float32" + min_val = float("-0.234305") + max_val = float("0.0813681") + mean = float("-0.0947175") + std = float("0.0463051") + data = None + + +class Program_weight_tensor_parameter_111: + name = "parameter_111" + shape = [192] + dtype = "float32" + min_val = float("0.886145") + max_val = float("1.20472") + mean = float("1.01666") + std = float("0.0540248") + data = None + + +class Program_weight_tensor_parameter_112: + name = "parameter_112" + shape = [192] + dtype = "float32" + min_val = float("0.00868237") + max_val = float("0.0982557") + mean = float("0.0208977") + std = float("0.0125654") + data = None + + +class Program_weight_tensor_parameter_113: + name = "parameter_113" + shape = [192] + dtype = "float32" + min_val = float("-0.181299") + max_val = float("0.0965931") + mean = float("-0.0401902") + std = float("0.0436891") + data = None + + +class Program_weight_tensor_parameter_114: + name = "parameter_114" + shape = [192, 192, 3, 3] + dtype = "float32" + min_val = float("-0.0410138") + max_val = float("0.0751959") + mean = float("-0.000134497") + std = float("0.0032483") + data = None + + +class Program_weight_tensor_parameter_115: + name = "parameter_115" + shape = [192] + dtype = "float32" + min_val = float("-0.199948") + max_val = float("0.0153483") + mean = float("-0.0662884") + std = float("0.031178") + data = None + + +class Program_weight_tensor_parameter_116: + name = "parameter_116" + shape = [192] + dtype = "float32" + min_val = float("0.925493") + max_val = float("1.15259") + mean = float("1.01328") + std = float("0.0383643") + data = None + + +class Program_weight_tensor_parameter_117: + name = "parameter_117" + shape = [192] + dtype = "float32" + min_val = float("0.00445121") + max_val = float("0.0245699") + mean = float("0.00852168") + std = float("0.00311132") + data = None + + +class Program_weight_tensor_parameter_118: + name = "parameter_118" + shape = [192] + dtype = "float32" + min_val = float("-0.0890928") + max_val = float("0.122453") + mean = float("-0.0225451") + std = float("0.0292516") + data = None + + +class Program_weight_tensor_parameter_119: + name = "parameter_119" + shape = [192, 576, 1, 1] + dtype = "float32" + min_val = float("-0.0628757") + max_val = float("0.0645973") + mean = float("-0.000195493") + std = float("0.00467829") + data = None + + +class Program_weight_tensor_parameter_120: + name = "parameter_120" + shape = [192] + dtype = "float32" + min_val = float("-0.099963") + max_val = float("0.0374111") + mean = float("-0.0139724") + std = float("0.0203964") + data = None + + +class Program_weight_tensor_parameter_121: + name = "parameter_121" + shape = [192] + dtype = "float32" + min_val = float("0.923856") + max_val = float("1.19918") + mean = float("1.00277") + std = float("0.025885") + data = None + + +class Program_weight_tensor_parameter_122: + name = "parameter_122" + shape = [192] + dtype = "float32" + min_val = float("0.00335112") + max_val = float("0.0376873") + mean = float("0.00846023") + std = float("0.00424043") + data = None + + +class Program_weight_tensor_parameter_123: + name = "parameter_123" + shape = [192] + dtype = "float32" + min_val = float("-0.0728879") + max_val = float("0.0457376") + mean = float("-0.0169366") + std = float("0.0214102") + data = None + + +class Program_weight_tensor_parameter_124: + name = "parameter_124" + shape = [192, 576, 1, 1] + dtype = "float32" + min_val = float("-0.0557051") + max_val = float("0.0726594") + mean = float("-0.000148829") + std = float("0.00416084") + data = None + + +class Program_weight_tensor_parameter_125: + name = "parameter_125" + shape = [192] + dtype = "float32" + min_val = float("-0.15908") + max_val = float("-0.000555524") + mean = float("-0.038944") + std = float("0.0217257") + data = None + + +class Program_weight_tensor_parameter_126: + name = "parameter_126" + shape = [192] + dtype = "float32" + min_val = float("0.921159") + max_val = float("1.24866") + mean = float("1.00725") + std = float("0.0301467") + data = None + + +class Program_weight_tensor_parameter_127: + name = "parameter_127" + shape = [192] + dtype = "float32" + min_val = float("0.00450918") + max_val = float("0.0608082") + mean = float("0.0161327") + std = float("0.00840885") + data = None + + +class Program_weight_tensor_parameter_128: + name = "parameter_128" + shape = [192] + dtype = "float32" + min_val = float("-0.400402") + max_val = float("0.338842") + mean = float("-0.0361684") + std = float("0.0966519") + data = None + + +class Program_weight_tensor_parameter_129: + name = "parameter_129" + shape = [192, 192, 3, 3] + dtype = "float32" + min_val = float("-0.0350247") + max_val = float("0.0471653") + mean = float("-3.44506e-05") + std = float("0.00253963") + data = None + + +class Program_weight_tensor_parameter_130: + name = "parameter_130" + shape = [192] + dtype = "float32" + min_val = float("-0.552249") + max_val = float("1.14732") + mean = float("0.355898") + std = float("0.346059") + data = None + + +class Program_weight_tensor_parameter_131: + name = "parameter_131" + shape = [192] + dtype = "float32" + min_val = float("0.541478") + max_val = float("1.57746") + mean = float("1.15098") + std = float("0.184373") + data = None + + +class Program_weight_tensor_parameter_132: + name = "parameter_132" + shape = [192] + dtype = "float32" + min_val = float("0.00575627") + max_val = float("0.117619") + mean = float("0.0298341") + std = float("0.017588") + data = None + + +class Program_weight_tensor_parameter_133: + name = "parameter_133" + shape = [192] + dtype = "float32" + min_val = float("-0.182786") + max_val = float("0.205671") + mean = float("-0.0504974") + std = float("0.0491969") + data = None + + +class Program_weight_tensor_parameter_134: + name = "parameter_134" + shape = [192, 192, 1, 1] + dtype = "float32" + min_val = float("-0.14008") + max_val = float("0.117816") + mean = float("-0.00105786") + std = float("0.0117759") + data = None + + +class Program_weight_tensor_parameter_135: + name = "parameter_135" + shape = [96] + dtype = "float32" + min_val = float("-0.457965") + max_val = float("0.231213") + mean = float("-0.0094414") + std = float("0.144606") + data = None + + +class Program_weight_tensor_parameter_136: + name = "parameter_136" + shape = [96] + dtype = "float32" + min_val = float("0.762871") + max_val = float("1.23462") + mean = float("0.948542") + std = float("0.0712293") + data = None + + +class Program_weight_tensor_parameter_137: + name = "parameter_137" + shape = [96] + dtype = "float32" + min_val = float("0.00291973") + max_val = float("0.0418611") + mean = float("0.0123709") + std = float("0.00820581") + data = None + + +class Program_weight_tensor_parameter_138: + name = "parameter_138" + shape = [96] + dtype = "float32" + min_val = float("-0.0589327") + max_val = float("0.0912759") + mean = float("-0.0136043") + std = float("0.0242705") + data = None + + +class Program_weight_tensor_parameter_139: + name = "parameter_139" + shape = [96, 96, 1, 1] + dtype = "float32" + min_val = float("-0.0753795") + max_val = float("0.0571307") + mean = float("-0.00127258") + std = float("0.00926422") + data = None + + +class Program_weight_tensor_parameter_140: + name = "parameter_140" + shape = [96] + dtype = "float32" + min_val = float("-0.457965") + max_val = float("0.231213") + mean = float("-0.0094414") + std = float("0.144606") + data = None + + +class Program_weight_tensor_parameter_141: + name = "parameter_141" + shape = [96] + dtype = "float32" + min_val = float("0.505008") + max_val = float("1.2709") + mean = float("1.02954") + std = float("0.096255") + data = None + + +class Program_weight_tensor_parameter_142: + name = "parameter_142" + shape = [96] + dtype = "float32" + min_val = float("0.00864928") + max_val = float("0.0821514") + mean = float("0.0294604") + std = float("0.0153982") + data = None + + +class Program_weight_tensor_parameter_143: + name = "parameter_143" + shape = [96] + dtype = "float32" + min_val = float("-0.235962") + max_val = float("0.133407") + mean = float("-0.023131") + std = float("0.0603207") + data = None + + +class Program_weight_tensor_parameter_144: + name = "parameter_144" + shape = [96, 96, 3, 3] + dtype = "float32" + min_val = float("-0.0934278") + max_val = float("0.0953093") + mean = float("-0.000117742") + std = float("0.00631181") + data = None + + +class Program_weight_tensor_parameter_145: + name = "parameter_145" + shape = [96] + dtype = "float32" + min_val = float("-0.703686") + max_val = float("0.495423") + mean = float("-0.112778") + std = float("0.198105") + data = None + + +class Program_weight_tensor_parameter_146: + name = "parameter_146" + shape = [96] + dtype = "float32" + min_val = float("0.723215") + max_val = float("1.7117") + mean = float("0.995187") + std = float("0.133891") + data = None + + +class Program_weight_tensor_parameter_147: + name = "parameter_147" + shape = [96] + dtype = "float32" + min_val = float("0.01304") + max_val = float("0.194962") + mean = float("0.0419874") + std = float("0.0313694") + data = None + + +class Program_weight_tensor_parameter_148: + name = "parameter_148" + shape = [96] + dtype = "float32" + min_val = float("-0.214376") + max_val = float("0.134518") + mean = float("-0.0301872") + std = float("0.0628285") + data = None + + +class Program_weight_tensor_parameter_149: + name = "parameter_149" + shape = [96, 96, 3, 3] + dtype = "float32" + min_val = float("-0.0919381") + max_val = float("0.0707172") + mean = float("-0.000470717") + std = float("0.00699141") + data = None + + +class Program_weight_tensor_parameter_150: + name = "parameter_150" + shape = [96] + dtype = "float32" + min_val = float("-0.36415") + max_val = float("0.190267") + mean = float("-0.138622") + std = float("0.0960162") + data = None + + +class Program_weight_tensor_parameter_151: + name = "parameter_151" + shape = [96] + dtype = "float32" + min_val = float("0.626999") + max_val = float("1.01953") + mean = float("0.906483") + std = float("0.05556") + data = None + + +class Program_weight_tensor_parameter_152: + name = "parameter_152" + shape = [96] + dtype = "float32" + min_val = float("0.0031041") + max_val = float("0.0221224") + mean = float("0.0106635") + std = float("0.00413137") + data = None + + +class Program_weight_tensor_parameter_153: + name = "parameter_153" + shape = [96] + dtype = "float32" + min_val = float("-0.0660527") + max_val = float("0.0390983") + mean = float("-0.0088487") + std = float("0.0169076") + data = None + + +class Program_weight_tensor_parameter_154: + name = "parameter_154" + shape = [96, 96, 1, 1] + dtype = "float32" + min_val = float("-0.0710399") + max_val = float("0.0593543") + mean = float("-0.00106871") + std = float("0.00947732") + data = None + + +class Program_weight_tensor_parameter_155: + name = "parameter_155" + shape = [96] + dtype = "float32" + min_val = float("-0.36415") + max_val = float("0.190267") + mean = float("-0.138622") + std = float("0.0960162") + data = None + + +class Program_weight_tensor_parameter_156: + name = "parameter_156" + shape = [96] + dtype = "float32" + min_val = float("0.811164") + max_val = float("1.15777") + mean = float("1.02225") + std = float("0.0605937") + data = None + + +class Program_weight_tensor_parameter_157: + name = "parameter_157" + shape = [96] + dtype = "float32" + min_val = float("0.0111709") + max_val = float("0.105695") + mean = float("0.0280629") + std = float("0.0196981") + data = None + + +class Program_weight_tensor_parameter_158: + name = "parameter_158" + shape = [96] + dtype = "float32" + min_val = float("-0.164873") + max_val = float("0.0372368") + mean = float("-0.0373236") + std = float("0.0335712") + data = None + + +class Program_weight_tensor_parameter_159: + name = "parameter_159" + shape = [96, 96, 3, 3] + dtype = "float32" + min_val = float("-0.0811913") + max_val = float("0.0769005") + mean = float("-0.000466698") + std = float("0.00651756") + data = None + + +class Program_weight_tensor_parameter_160: + name = "parameter_160" + shape = [96] + dtype = "float32" + min_val = float("-0.486488") + max_val = float("0.169402") + mean = float("-0.166991") + std = float("0.131221") + data = None + + +class Program_weight_tensor_parameter_161: + name = "parameter_161" + shape = [96] + dtype = "float32" + min_val = float("0.777448") + max_val = float("1.29252") + mean = float("0.963023") + std = float("0.0981105") + data = None + + +class Program_weight_tensor_parameter_162: + name = "parameter_162" + shape = [96] + dtype = "float32" + min_val = float("0.00992224") + max_val = float("0.105847") + mean = float("0.0243501") + std = float("0.0137999") + data = None + + +class Program_weight_tensor_parameter_163: + name = "parameter_163" + shape = [96] + dtype = "float32" + min_val = float("-0.154927") + max_val = float("0.0659024") + mean = float("0.00872401") + std = float("0.0386172") + data = None + + +class Program_weight_tensor_parameter_164: + name = "parameter_164" + shape = [96, 96, 3, 3] + dtype = "float32" + min_val = float("-0.0993817") + max_val = float("0.0757508") + mean = float("-0.000423259") + std = float("0.00766977") + data = None + + +class Program_weight_tensor_parameter_165: + name = "parameter_165" + shape = [96] + dtype = "float32" + min_val = float("-0.489705") + max_val = float("0.0651658") + mean = float("-0.168146") + std = float("0.114783") + data = None + + +class Program_weight_tensor_parameter_166: + name = "parameter_166" + shape = [96] + dtype = "float32" + min_val = float("0.722939") + max_val = float("1.0022") + mean = float("0.918838") + std = float("0.0531756") + data = None + + +class Program_weight_tensor_parameter_167: + name = "parameter_167" + shape = [96] + dtype = "float32" + min_val = float("0.00775018") + max_val = float("0.036921") + mean = float("0.0160984") + std = float("0.0056277") + data = None + + +class Program_weight_tensor_parameter_168: + name = "parameter_168" + shape = [96] + dtype = "float32" + min_val = float("-0.0570203") + max_val = float("0.0404215") + mean = float("-0.019887") + std = float("0.0190603") + data = None + + +class Program_weight_tensor_parameter_169: + name = "parameter_169" + shape = [96, 96, 1, 1] + dtype = "float32" + min_val = float("-0.103947") + max_val = float("0.0646105") + mean = float("-0.00221249") + std = float("0.0110162") + data = None + + +class Program_weight_tensor_parameter_170: + name = "parameter_170" + shape = [96] + dtype = "float32" + min_val = float("-0.489705") + max_val = float("0.0651658") + mean = float("-0.168146") + std = float("0.114783") + data = None + + +class Program_weight_tensor_parameter_171: + name = "parameter_171" + shape = [96] + dtype = "float32" + min_val = float("0.766535") + max_val = float("1.15353") + mean = float("0.982409") + std = float("0.0579775") + data = None + + +class Program_weight_tensor_parameter_172: + name = "parameter_172" + shape = [96] + dtype = "float32" + min_val = float("0.0171361") + max_val = float("0.213573") + mean = float("0.0436358") + std = float("0.0309431") + data = None + + +class Program_weight_tensor_parameter_173: + name = "parameter_173" + shape = [96] + dtype = "float32" + min_val = float("-0.199015") + max_val = float("0.0871906") + mean = float("-0.0157018") + std = float("0.0414583") + data = None + + +class Program_weight_tensor_parameter_174: + name = "parameter_174" + shape = [96, 96, 3, 3] + dtype = "float32" + min_val = float("-0.099267") + max_val = float("0.0973901") + mean = float("-0.000248761") + std = float("0.00741391") + data = None + + +class Program_weight_tensor_parameter_175: + name = "parameter_175" + shape = [96] + dtype = "float32" + min_val = float("-0.564609") + max_val = float("0.347562") + mean = float("-0.179116") + std = float("0.173215") + data = None + + +class Program_weight_tensor_parameter_176: + name = "parameter_176" + shape = [96] + dtype = "float32" + min_val = float("0.764463") + max_val = float("1.33669") + mean = float("0.954532") + std = float("0.110883") + data = None + + +class Program_weight_tensor_parameter_177: + name = "parameter_177" + shape = [96] + dtype = "float32" + min_val = float("0.0148204") + max_val = float("0.110617") + mean = float("0.0314773") + std = float("0.0184817") + data = None + + +class Program_weight_tensor_parameter_178: + name = "parameter_178" + shape = [96] + dtype = "float32" + min_val = float("-0.172672") + max_val = float("0.269786") + mean = float("-0.0212967") + std = float("0.0939031") + data = None + + +class Program_weight_tensor_parameter_179: + name = "parameter_179" + shape = [96, 96, 3, 3] + dtype = "float32" + min_val = float("-0.142387") + max_val = float("0.117261") + mean = float("-0.000229489") + std = float("0.00873001") + data = None + + +class Program_weight_tensor_parameter_180: + name = "parameter_180" + shape = [96] + dtype = "float32" + min_val = float("-0.625413") + max_val = float("0.597772") + mean = float("-0.0821868") + std = float("0.254375") + data = None + + +class Program_weight_tensor_parameter_181: + name = "parameter_181" + shape = [96] + dtype = "float32" + min_val = float("0.647481") + max_val = float("1.22746") + mean = float("0.866594") + std = float("0.1146") + data = None + + +class Program_weight_tensor_parameter_182: + name = "parameter_182" + shape = [96] + dtype = "float32" + min_val = float("0.0117598") + max_val = float("0.0756791") + mean = float("0.0256735") + std = float("0.011465") + data = None + + +class Program_weight_tensor_parameter_183: + name = "parameter_183" + shape = [96] + dtype = "float32" + min_val = float("-0.116141") + max_val = float("0.0942395") + mean = float("-0.0106851") + std = float("0.0412708") + data = None + + +class Program_weight_tensor_parameter_184: + name = "parameter_184" + shape = [96, 448, 1, 1] + dtype = "float32" + min_val = float("-0.149031") + max_val = float("0.14906") + mean = float("-0.000519018") + std = float("0.0115778") + data = None + + +class Program_weight_tensor_parameter_185: + name = "parameter_185" + shape = [96] + dtype = "float32" + min_val = float("-0.0986348") + max_val = float("0.227765") + mean = float("0.0619239") + std = float("0.054569") + data = None + + +class Program_weight_tensor_parameter_186: + name = "parameter_186" + shape = [96] + dtype = "float32" + min_val = float("0.703927") + max_val = float("1.12526") + mean = float("0.932492") + std = float("0.0634651") + data = None + + +class Program_weight_tensor_parameter_187: + name = "parameter_187" + shape = [96] + dtype = "float32" + min_val = float("0.00537723") + max_val = float("0.0583092") + mean = float("0.0120102") + std = float("0.00693174") + data = None + + +class Program_weight_tensor_parameter_188: + name = "parameter_188" + shape = [96] + dtype = "float32" + min_val = float("-0.0902684") + max_val = float("0.166922") + mean = float("-0.0175149") + std = float("0.0398964") + data = None + + +class Program_weight_tensor_parameter_189: + name = "parameter_189" + shape = [96, 448, 1, 1] + dtype = "float32" + min_val = float("-0.0952113") + max_val = float("0.110912") + mean = float("-0.000272286") + std = float("0.00775169") + data = None + + +class Program_weight_tensor_parameter_190: + name = "parameter_190" + shape = [192] + dtype = "float32" + min_val = float("-0.295368") + max_val = float("0.199876") + mean = float("-0.065903") + std = float("0.0695814") + data = None + + +class Program_weight_tensor_parameter_191: + name = "parameter_191" + shape = [192] + dtype = "float32" + min_val = float("0.670697") + max_val = float("1.45276") + mean = float("0.885134") + std = float("0.0783824") + data = None + + +class Program_weight_tensor_parameter_192: + name = "parameter_192" + shape = [192] + dtype = "float32" + min_val = float("0.00790397") + max_val = float("0.127787") + mean = float("0.0225856") + std = float("0.0123405") + data = None + + +class Program_weight_tensor_parameter_193: + name = "parameter_193" + shape = [192] + dtype = "float32" + min_val = float("-0.148651") + max_val = float("0.0483928") + mean = float("-0.0369365") + std = float("0.0357253") + data = None + + +class Program_weight_tensor_parameter_194: + name = "parameter_194" + shape = [192, 384, 1, 1] + dtype = "float32" + min_val = float("-0.0959148") + max_val = float("0.117327") + mean = float("-0.000597284") + std = float("0.00788358") + data = None + + +class Program_weight_tensor_parameter_195: + name = "parameter_195" + shape = [384] + dtype = "float32" + min_val = float("-0.201782") + max_val = float("0.241811") + mean = float("-0.0670364") + std = float("0.0416536") + data = None + + +class Program_weight_tensor_parameter_196: + name = "parameter_196" + shape = [384] + dtype = "float32" + min_val = float("0.87318") + max_val = float("1.54065") + mean = float("1.01926") + std = float("0.0632841") + data = None + + +class Program_weight_tensor_parameter_197: + name = "parameter_197" + shape = [384] + dtype = "float32" + min_val = float("0.00737718") + max_val = float("0.076491") + mean = float("0.0155636") + std = float("0.00762575") + data = None + + +class Program_weight_tensor_parameter_198: + name = "parameter_198" + shape = [384] + dtype = "float32" + min_val = float("-0.339572") + max_val = float("0.121455") + mean = float("-0.0580769") + std = float("0.0477813") + data = None + + +class Program_weight_tensor_parameter_199: + name = "parameter_199" + shape = [384, 384, 1, 1] + dtype = "float32" + min_val = float("-0.104641") + max_val = float("0.10408") + mean = float("-0.000725338") + std = float("0.00722264") + data = None + + +class Program_weight_tensor_parameter_200: + name = "parameter_200" + shape = [192] + dtype = "float32" + min_val = float("-0.176949") + max_val = float("0.00590601") + mean = float("-0.0653774") + std = float("0.0325609") + data = None + + +class Program_weight_tensor_parameter_201: + name = "parameter_201" + shape = [192] + dtype = "float32" + min_val = float("0.884903") + max_val = float("0.991186") + mean = float("0.949253") + std = float("0.016433") + data = None + + +class Program_weight_tensor_parameter_202: + name = "parameter_202" + shape = [192] + dtype = "float32" + min_val = float("0.00353105") + max_val = float("0.0231426") + mean = float("0.0095938") + std = float("0.0033673") + data = None + + +class Program_weight_tensor_parameter_203: + name = "parameter_203" + shape = [192] + dtype = "float32" + min_val = float("-0.0792437") + max_val = float("0.0708871") + mean = float("-0.0240779") + std = float("0.0316357") + data = None + + +class Program_weight_tensor_parameter_204: + name = "parameter_204" + shape = [192, 192, 1, 1] + dtype = "float32" + min_val = float("-0.0569077") + max_val = float("0.0369732") + mean = float("-0.000733357") + std = float("0.00540254") + data = None + + +class Program_weight_tensor_parameter_205: + name = "parameter_205" + shape = [192] + dtype = "float32" + min_val = float("-0.176949") + max_val = float("0.00590601") + mean = float("-0.0653774") + std = float("0.0325609") + data = None + + +class Program_weight_tensor_parameter_206: + name = "parameter_206" + shape = [192] + dtype = "float32" + min_val = float("0.945936") + max_val = float("1.03267") + mean = float("0.988143") + std = float("0.0166204") + data = None + + +class Program_weight_tensor_parameter_207: + name = "parameter_207" + shape = [192] + dtype = "float32" + min_val = float("0.0150858") + max_val = float("0.0802088") + mean = float("0.0328899") + std = float("0.0117288") + data = None + + +class Program_weight_tensor_parameter_208: + name = "parameter_208" + shape = [192] + dtype = "float32" + min_val = float("-0.177971") + max_val = float("0.153985") + mean = float("-0.0232557") + std = float("0.0607795") + data = None + + +class Program_weight_tensor_parameter_209: + name = "parameter_209" + shape = [192, 192, 3, 3] + dtype = "float32" + min_val = float("-0.0444125") + max_val = float("0.0760331") + mean = float("-7.02503e-05") + std = float("0.00300584") + data = None + + +class Program_weight_tensor_parameter_210: + name = "parameter_210" + shape = [192] + dtype = "float32" + min_val = float("-0.217095") + max_val = float("-0.00148132") + mean = float("-0.0741376") + std = float("0.0354109") + data = None + + +class Program_weight_tensor_parameter_211: + name = "parameter_211" + shape = [192] + dtype = "float32" + min_val = float("0.939032") + max_val = float("1.15417") + mean = float("1.02943") + std = float("0.0431659") + data = None + + +class Program_weight_tensor_parameter_212: + name = "parameter_212" + shape = [192] + dtype = "float32" + min_val = float("0.0360784") + max_val = float("0.225914") + mean = float("0.0635349") + std = float("0.0203909") + data = None + + +class Program_weight_tensor_parameter_213: + name = "parameter_213" + shape = [192] + dtype = "float32" + min_val = float("-0.264891") + max_val = float("0.307619") + mean = float("-0.0428129") + std = float("0.0726088") + data = None + + +class Program_weight_tensor_parameter_214: + name = "parameter_214" + shape = [192, 192, 3, 3] + dtype = "float32" + min_val = float("-0.0622024") + max_val = float("0.0626876") + mean = float("-0.000102155") + std = float("0.00367047") + data = None + + +class Program_weight_tensor_parameter_215: + name = "parameter_215" + shape = [192] + dtype = "float32" + min_val = float("-0.196618") + max_val = float("-0.00995733") + mean = float("-0.071187") + std = float("0.0319798") + data = None + + +class Program_weight_tensor_parameter_216: + name = "parameter_216" + shape = [192] + dtype = "float32" + min_val = float("0.94411") + max_val = float("1.04693") + mean = float("0.987726") + std = float("0.0137706") + data = None + + +class Program_weight_tensor_parameter_217: + name = "parameter_217" + shape = [192] + dtype = "float32" + min_val = float("0.00227428") + max_val = float("0.00943763") + mean = float("0.00475154") + std = float("0.00122457") + data = None + + +class Program_weight_tensor_parameter_218: + name = "parameter_218" + shape = [192] + dtype = "float32" + min_val = float("-0.0961104") + max_val = float("0.0392249") + mean = float("-0.0252723") + std = float("0.0210983") + data = None + + +class Program_weight_tensor_parameter_219: + name = "parameter_219" + shape = [192, 192, 1, 1] + dtype = "float32" + min_val = float("-0.0313925") + max_val = float("0.0416139") + mean = float("-0.000809328") + std = float("0.00570058") + data = None + + +class Program_weight_tensor_parameter_220: + name = "parameter_220" + shape = [192] + dtype = "float32" + min_val = float("-0.196618") + max_val = float("-0.00995733") + mean = float("-0.071187") + std = float("0.0319798") + data = None + + +class Program_weight_tensor_parameter_221: + name = "parameter_221" + shape = [192] + dtype = "float32" + min_val = float("0.953711") + max_val = float("1.11463") + mean = float("1.00472") + std = float("0.0265116") + data = None + + +class Program_weight_tensor_parameter_222: + name = "parameter_222" + shape = [192] + dtype = "float32" + min_val = float("0.010102") + max_val = float("0.0479077") + mean = float("0.0179937") + std = float("0.00541134") + data = None + + +class Program_weight_tensor_parameter_223: + name = "parameter_223" + shape = [192] + dtype = "float32" + min_val = float("-0.189335") + max_val = float("0.144594") + mean = float("-0.0477661") + std = float("0.0469458") + data = None + + +class Program_weight_tensor_parameter_224: + name = "parameter_224" + shape = [192, 192, 3, 3] + dtype = "float32" + min_val = float("-0.0484375") + max_val = float("0.0812037") + mean = float("-0.000164179") + std = float("0.00306328") + data = None + + +class Program_weight_tensor_parameter_225: + name = "parameter_225" + shape = [192] + dtype = "float32" + min_val = float("-0.232846") + max_val = float("-0.0185216") + mean = float("-0.0943343") + std = float("0.040046") + data = None + + +class Program_weight_tensor_parameter_226: + name = "parameter_226" + shape = [192] + dtype = "float32" + min_val = float("0.946521") + max_val = float("1.19181") + mean = float("1.02411") + std = float("0.0460177") + data = None + + +class Program_weight_tensor_parameter_227: + name = "parameter_227" + shape = [192] + dtype = "float32" + min_val = float("0.0359278") + max_val = float("0.139671") + mean = float("0.0648104") + std = float("0.0197637") + data = None + + +class Program_weight_tensor_parameter_228: + name = "parameter_228" + shape = [192] + dtype = "float32" + min_val = float("-0.353487") + max_val = float("0.265509") + mean = float("-0.0870985") + std = float("0.100017") + data = None + + +class Program_weight_tensor_parameter_229: + name = "parameter_229" + shape = [192, 192, 3, 3] + dtype = "float32" + min_val = float("-0.0611518") + max_val = float("0.0870387") + mean = float("-0.000165144") + std = float("0.00384626") + data = None + + +class Program_weight_tensor_parameter_230: + name = "parameter_230" + shape = [192] + dtype = "float32" + min_val = float("-0.154886") + max_val = float("0.00333786") + mean = float("-0.0685634") + std = float("0.0234192") + data = None + + +class Program_weight_tensor_parameter_231: + name = "parameter_231" + shape = [192] + dtype = "float32" + min_val = float("0.932342") + max_val = float("1.07188") + mean = float("0.998569") + std = float("0.0218607") + data = None + + +class Program_weight_tensor_parameter_232: + name = "parameter_232" + shape = [192] + dtype = "float32" + min_val = float("0.00201715") + max_val = float("0.00936504") + mean = float("0.00403713") + std = float("0.00113824") + data = None + + +class Program_weight_tensor_parameter_233: + name = "parameter_233" + shape = [192] + dtype = "float32" + min_val = float("-0.0829276") + max_val = float("0.0996937") + mean = float("-0.0125365") + std = float("0.0205666") + data = None + + +class Program_weight_tensor_parameter_234: + name = "parameter_234" + shape = [192, 192, 1, 1] + dtype = "float32" + min_val = float("-0.0348639") + max_val = float("0.0478114") + mean = float("-0.000426039") + std = float("0.00642907") + data = None + + +class Program_weight_tensor_parameter_235: + name = "parameter_235" + shape = [192] + dtype = "float32" + min_val = float("-0.154886") + max_val = float("0.00333784") + mean = float("-0.0685634") + std = float("0.0234192") + data = None + + +class Program_weight_tensor_parameter_236: + name = "parameter_236" + shape = [192] + dtype = "float32" + min_val = float("0.936173") + max_val = float("1.11491") + mean = float("0.992553") + std = float("0.0259462") + data = None + + +class Program_weight_tensor_parameter_237: + name = "parameter_237" + shape = [192] + dtype = "float32" + min_val = float("0.00897904") + max_val = float("0.0431413") + mean = float("0.0183886") + std = float("0.00560404") + data = None + + +class Program_weight_tensor_parameter_238: + name = "parameter_238" + shape = [192] + dtype = "float32" + min_val = float("-0.280815") + max_val = float("0.147007") + mean = float("-0.0420801") + std = float("0.0463274") + data = None + + +class Program_weight_tensor_parameter_239: + name = "parameter_239" + shape = [192, 192, 3, 3] + dtype = "float32" + min_val = float("-0.0372381") + max_val = float("0.0656086") + mean = float("-0.000164115") + std = float("0.00303882") + data = None + + +class Program_weight_tensor_parameter_240: + name = "parameter_240" + shape = [192] + dtype = "float32" + min_val = float("-0.289029") + max_val = float("0.0181024") + mean = float("-0.109759") + std = float("0.0400942") + data = None + + +class Program_weight_tensor_parameter_241: + name = "parameter_241" + shape = [192] + dtype = "float32" + min_val = float("0.943873") + max_val = float("1.25886") + mean = float("1.02651") + std = float("0.0418277") + data = None + + +class Program_weight_tensor_parameter_242: + name = "parameter_242" + shape = [192] + dtype = "float32" + min_val = float("0.0146575") + max_val = float("0.0648707") + mean = float("0.0286275") + std = float("0.00896829") + data = None + + +class Program_weight_tensor_parameter_243: + name = "parameter_243" + shape = [192] + dtype = "float32" + min_val = float("-0.381573") + max_val = float("0.107524") + mean = float("-0.0547241") + std = float("0.0618592") + data = None + + +class Program_weight_tensor_parameter_244: + name = "parameter_244" + shape = [192, 192, 3, 3] + dtype = "float32" + min_val = float("-0.0566325") + max_val = float("0.0721215") + mean = float("-0.000213243") + std = float("0.00432258") + data = None + + +class Program_weight_tensor_parameter_245: + name = "parameter_245" + shape = [192] + dtype = "float32" + min_val = float("-0.257034") + max_val = float("-0.0134244") + mean = float("-0.121787") + std = float("0.0441916") + data = None + + +class Program_weight_tensor_parameter_246: + name = "parameter_246" + shape = [192] + dtype = "float32" + min_val = float("0.916942") + max_val = float("1.13523") + mean = float("1.02431") + std = float("0.042227") + data = None + + +class Program_weight_tensor_parameter_247: + name = "parameter_247" + shape = [192] + dtype = "float32" + min_val = float("0.00558811") + max_val = float("0.0227368") + mean = float("0.0107834") + std = float("0.00302476") + data = None + + +class Program_weight_tensor_parameter_248: + name = "parameter_248" + shape = [192] + dtype = "float32" + min_val = float("-0.120986") + max_val = float("0.105939") + mean = float("0.0157535") + std = float("0.0292084") + data = None + + +class Program_weight_tensor_parameter_249: + name = "parameter_249" + shape = [192, 896, 1, 1] + dtype = "float32" + min_val = float("-0.0812543") + max_val = float("0.103822") + mean = float("-0.000190185") + std = float("0.00606084") + data = None + + +class Program_weight_tensor_parameter_250: + name = "parameter_250" + shape = [192] + dtype = "float32" + min_val = float("-0.176609") + max_val = float("0.214363") + mean = float("-0.00723539") + std = float("0.0506647") + data = None + + +class Program_weight_tensor_parameter_251: + name = "parameter_251" + shape = [192] + dtype = "float32" + min_val = float("0.951166") + max_val = float("1.2179") + mean = float("1.05549") + std = float("0.0498193") + data = None + + +class Program_weight_tensor_parameter_252: + name = "parameter_252" + shape = [192] + dtype = "float32" + min_val = float("0.0068407") + max_val = float("0.0571011") + mean = float("0.0142387") + std = float("0.00513597") + data = None + + +class Program_weight_tensor_parameter_253: + name = "parameter_253" + shape = [192] + dtype = "float32" + min_val = float("-0.076614") + max_val = float("0.0818422") + mean = float("-0.000474385") + std = float("0.0276303") + data = None + + +class Program_weight_tensor_parameter_254: + name = "parameter_254" + shape = [192, 896, 1, 1] + dtype = "float32" + min_val = float("-0.0552042") + max_val = float("0.102734") + mean = float("-0.000223038") + std = float("0.00619518") + data = None + + +class Program_weight_tensor_parameter_255: + name = "parameter_255" + shape = [384] + dtype = "float32" + min_val = float("-0.249775") + max_val = float("-0.0568627") + mean = float("-0.125062") + std = float("0.0336773") + data = None + + +class Program_weight_tensor_parameter_256: + name = "parameter_256" + shape = [384] + dtype = "float32" + min_val = float("0.814907") + max_val = float("1.01643") + mean = float("0.909518") + std = float("0.0258168") + data = None + + +class Program_weight_tensor_parameter_257: + name = "parameter_257" + shape = [384] + dtype = "float32" + min_val = float("0.00972713") + max_val = float("0.0669189") + mean = float("0.022077") + std = float("0.00889464") + data = None + + +class Program_weight_tensor_parameter_258: + name = "parameter_258" + shape = [384] + dtype = "float32" + min_val = float("-0.146609") + max_val = float("0.11002") + mean = float("-0.0348074") + std = float("0.0384279") + data = None + + +class Program_weight_tensor_parameter_259: + name = "parameter_259" + shape = [384, 768, 1, 1] + dtype = "float32" + min_val = float("-0.0364879") + max_val = float("0.033967") + mean = float("-0.000277781") + std = float("0.00472355") + data = None + + +class Program_weight_tensor_parameter_260: + name = "parameter_260" + shape = [768] + dtype = "float32" + min_val = float("-0.104277") + max_val = float("0.0723922") + mean = float("-0.0568764") + std = float("0.0153315") + data = None + + +class Program_weight_tensor_parameter_261: + name = "parameter_261" + shape = [768] + dtype = "float32" + min_val = float("0.9523") + max_val = float("1.1435") + mean = float("1.02091") + std = float("0.0210274") + data = None + + +class Program_weight_tensor_parameter_262: + name = "parameter_262" + shape = [768] + dtype = "float32" + min_val = float("0.00431744") + max_val = float("0.0370758") + mean = float("0.00970086") + std = float("0.00356025") + data = None + + +class Program_weight_tensor_parameter_263: + name = "parameter_263" + shape = [768] + dtype = "float32" + min_val = float("-0.103952") + max_val = float("0.111506") + mean = float("-0.0349987") + std = float("0.0271815") + data = None + + +class Program_weight_tensor_parameter_264: + name = "parameter_264" + shape = [768, 768, 1, 1] + dtype = "float32" + min_val = float("-0.0581811") + max_val = float("0.113051") + mean = float("-0.000304542") + std = float("0.00402831") + data = None + + +class Program_weight_tensor_parameter_265: + name = "parameter_265" + shape = [384] + dtype = "float32" + min_val = float("-0.158167") + max_val = float("0.0744682") + mean = float("-0.0400513") + std = float("0.0206673") + data = None + + +class Program_weight_tensor_parameter_266: + name = "parameter_266" + shape = [384] + dtype = "float32" + min_val = float("0.888577") + max_val = float("1.07465") + mean = float("0.982117") + std = float("0.0132258") + data = None + + +class Program_weight_tensor_parameter_267: + name = "parameter_267" + shape = [384] + dtype = "float32" + min_val = float("0.005659") + max_val = float("0.0930103") + mean = float("0.0194514") + std = float("0.0092216") + data = None + + +class Program_weight_tensor_parameter_268: + name = "parameter_268" + shape = [384] + dtype = "float32" + min_val = float("-0.0685013") + max_val = float("0.0608002") + mean = float("-0.00592014") + std = float("0.0271757") + data = None + + +class Program_weight_tensor_parameter_269: + name = "parameter_269" + shape = [384, 384, 1, 1] + dtype = "float32" + min_val = float("-0.0396793") + max_val = float("0.0737422") + mean = float("-7.2276e-05") + std = float("0.00350095") + data = None + + +class Program_weight_tensor_parameter_270: + name = "parameter_270" + shape = [384] + dtype = "float32" + min_val = float("-0.158167") + max_val = float("0.0744682") + mean = float("-0.0400513") + std = float("0.0206673") + data = None + + +class Program_weight_tensor_parameter_271: + name = "parameter_271" + shape = [384] + dtype = "float32" + min_val = float("0.879914") + max_val = float("1.07681") + mean = float("0.993922") + std = float("0.0123427") + data = None + + +class Program_weight_tensor_parameter_272: + name = "parameter_272" + shape = [384] + dtype = "float32" + min_val = float("0.0237413") + max_val = float("0.735274") + mean = float("0.139783") + std = float("0.0640231") + data = None + + +class Program_weight_tensor_parameter_273: + name = "parameter_273" + shape = [384] + dtype = "float32" + min_val = float("-0.277276") + max_val = float("0.156692") + mean = float("-0.0840022") + std = float("0.0855229") + data = None + + +class Program_weight_tensor_parameter_274: + name = "parameter_274" + shape = [384, 384, 3, 3] + dtype = "float32" + min_val = float("-0.0424879") + max_val = float("0.0475735") + mean = float("-0.0001269") + std = float("0.00130674") + data = None + + +class Program_weight_tensor_parameter_275: + name = "parameter_275" + shape = [384] + dtype = "float32" + min_val = float("-0.0801146") + max_val = float("0.116771") + mean = float("-0.0189931") + std = float("0.0160256") + data = None + + +class Program_weight_tensor_parameter_276: + name = "parameter_276" + shape = [384] + dtype = "float32" + min_val = float("0.920205") + max_val = float("1.16667") + mean = float("1.01504") + std = float("0.0246966") + data = None + + +class Program_weight_tensor_parameter_277: + name = "parameter_277" + shape = [384] + dtype = "float32" + min_val = float("0.0222636") + max_val = float("0.202839") + mean = float("0.0725926") + std = float("0.0321105") + data = None + + +class Program_weight_tensor_parameter_278: + name = "parameter_278" + shape = [384] + dtype = "float32" + min_val = float("-0.233438") + max_val = float("0.219682") + mean = float("-0.023274") + std = float("0.079013") + data = None + + +class Program_weight_tensor_parameter_279: + name = "parameter_279" + shape = [384, 384, 3, 3] + dtype = "float32" + min_val = float("-0.0274571") + max_val = float("0.0359229") + mean = float("-3.21889e-05") + std = float("0.00171791") + data = None + + +class Program_weight_tensor_parameter_280: + name = "parameter_280" + shape = [384] + dtype = "float32" + min_val = float("-0.0739507") + max_val = float("0.020999") + mean = float("-0.0234999") + std = float("0.0134887") + data = None + + +class Program_weight_tensor_parameter_281: + name = "parameter_281" + shape = [384] + dtype = "float32" + min_val = float("0.946312") + max_val = float("1.16798") + mean = float("1.01467") + std = float("0.0273906") + data = None + + +class Program_weight_tensor_parameter_282: + name = "parameter_282" + shape = [384] + dtype = "float32" + min_val = float("0.0631221") + max_val = float("0.463548") + mean = float("0.173967") + std = float("0.0694937") + data = None + + +class Program_weight_tensor_parameter_283: + name = "parameter_283" + shape = [384] + dtype = "float32" + min_val = float("-1.66842") + max_val = float("1.69585") + mean = float("0.0445105") + std = float("0.596448") + data = None + + +class Program_weight_tensor_parameter_284: + name = "parameter_284" + shape = [384, 1536, 1, 1] + dtype = "float32" + min_val = float("-0.0467314") + max_val = float("0.0575585") + mean = float("8.5535e-05") + std = float("0.0030071") + data = None + + +class Program_weight_tensor_parameter_285: + name = "parameter_285" + shape = [384] + dtype = "float32" + min_val = float("-0.0183803") + max_val = float("0.0258619") + mean = float("-0.00144525") + std = float("0.00680649") + data = None + + +class Program_weight_tensor_parameter_286: + name = "parameter_286" + shape = [384] + dtype = "float32" + min_val = float("0.969538") + max_val = float("1.06054") + mean = float("0.993834") + std = float("0.0122522") + data = None + + +class Program_weight_tensor_parameter_287: + name = "parameter_287" + shape = [384] + dtype = "float32" + min_val = float("0.00292151") + max_val = float("0.0163042") + mean = float("0.00723264") + std = float("0.0025015") + data = None + + +class Program_weight_tensor_parameter_288: + name = "parameter_288" + shape = [384] + dtype = "float32" + min_val = float("-0.0974501") + max_val = float("0.0623071") + mean = float("-0.0422498") + std = float("0.0245183") + data = None + + +class Program_weight_tensor_parameter_289: + name = "parameter_289" + shape = [384, 384, 1, 1] + dtype = "float32" + min_val = float("-0.0333324") + max_val = float("0.0411082") + mean = float("-0.000526783") + std = float("0.00328183") + data = None + + +class Program_weight_tensor_parameter_290: + name = "parameter_290" + shape = [384] + dtype = "float32" + min_val = float("-0.0183803") + max_val = float("0.0258619") + mean = float("-0.00144524") + std = float("0.00680649") + data = None + + +class Program_weight_tensor_parameter_291: + name = "parameter_291" + shape = [384] + dtype = "float32" + min_val = float("0.972046") + max_val = float("1.08568") + mean = float("1.00364") + std = float("0.0181342") + data = None + + +class Program_weight_tensor_parameter_292: + name = "parameter_292" + shape = [384] + dtype = "float32" + min_val = float("0.0170031") + max_val = float("0.141069") + mean = float("0.0433458") + std = float("0.0174643") + data = None + + +class Program_weight_tensor_parameter_293: + name = "parameter_293" + shape = [384] + dtype = "float32" + min_val = float("-0.324472") + max_val = float("0.0923265") + mean = float("-0.132281") + std = float("0.0638993") + data = None + + +class Program_weight_tensor_parameter_294: + name = "parameter_294" + shape = [384, 384, 3, 3] + dtype = "float32" + min_val = float("-0.0285762") + max_val = float("0.0755103") + mean = float("-0.00019148") + std = float("0.0013728") + data = None + + +class Program_weight_tensor_parameter_295: + name = "parameter_295" + shape = [384] + dtype = "float32" + min_val = float("-0.0498104") + max_val = float("0.00884068") + mean = float("-0.00838186") + std = float("0.00779168") + data = None + + +class Program_weight_tensor_parameter_296: + name = "parameter_296" + shape = [384] + dtype = "float32" + min_val = float("0.953878") + max_val = float("1.13497") + mean = float("1.01253") + std = float("0.0201047") + data = None + + +class Program_weight_tensor_parameter_297: + name = "parameter_297" + shape = [384] + dtype = "float32" + min_val = float("0.0734313") + max_val = float("0.420989") + mean = float("0.16627") + std = float("0.0472493") + data = None + + +class Program_weight_tensor_parameter_298: + name = "parameter_298" + shape = [384] + dtype = "float32" + min_val = float("-1.24929") + max_val = float("0.921608") + mean = float("-0.241972") + std = float("0.271836") + data = None + + +class Program_weight_tensor_parameter_299: + name = "parameter_299" + shape = [384, 384, 3, 3] + dtype = "float32" + min_val = float("-0.0242525") + max_val = float("0.0585337") + mean = float("-0.00014178") + std = float("0.00163037") + data = None + + +class Program_weight_tensor_parameter_300: + name = "parameter_300" + shape = [384] + dtype = "float32" + min_val = float("-0.0360838") + max_val = float("0.0137949") + mean = float("-0.00769057") + std = float("0.00789116") + data = None + + +class Program_weight_tensor_parameter_301: + name = "parameter_301" + shape = [384] + dtype = "float32" + min_val = float("0.984179") + max_val = float("1.03462") + mean = float("0.999922") + std = float("0.00715392") + data = None + + +class Program_weight_tensor_parameter_302: + name = "parameter_302" + shape = [384] + dtype = "float32" + min_val = float("0.00218605") + max_val = float("0.0108072") + mean = float("0.00399253") + std = float("0.00118789") + data = None + + +class Program_weight_tensor_parameter_303: + name = "parameter_303" + shape = [384] + dtype = "float32" + min_val = float("-0.0778225") + max_val = float("0.150166") + mean = float("-0.020106") + std = float("0.0256109") + data = None + + +class Program_weight_tensor_parameter_304: + name = "parameter_304" + shape = [384, 384, 1, 1] + dtype = "float32" + min_val = float("-0.020915") + max_val = float("0.0327181") + mean = float("-0.0002642") + std = float("0.00284316") + data = None + + +class Program_weight_tensor_parameter_305: + name = "parameter_305" + shape = [384] + dtype = "float32" + min_val = float("-0.0360838") + max_val = float("0.0137949") + mean = float("-0.00769057") + std = float("0.00789116") + data = None + + +class Program_weight_tensor_parameter_306: + name = "parameter_306" + shape = [384] + dtype = "float32" + min_val = float("0.982136") + max_val = float("1.06749") + mean = float("1.00454") + std = float("0.0126701") + data = None + + +class Program_weight_tensor_parameter_307: + name = "parameter_307" + shape = [384] + dtype = "float32" + min_val = float("0.009878") + max_val = float("0.0736381") + mean = float("0.025173") + std = float("0.0083744") + data = None + + +class Program_weight_tensor_parameter_308: + name = "parameter_308" + shape = [384] + dtype = "float32" + min_val = float("-0.234125") + max_val = float("0.373004") + mean = float("-0.0733105") + std = float("0.0699428") + data = None + + +class Program_weight_tensor_parameter_309: + name = "parameter_309" + shape = [384, 384, 3, 3] + dtype = "float32" + min_val = float("-0.0111228") + max_val = float("0.0376461") + mean = float("-0.000113877") + std = float("0.00115243") + data = None + + +class Program_weight_tensor_parameter_310: + name = "parameter_310" + shape = [384] + dtype = "float32" + min_val = float("-0.0529908") + max_val = float("0.00370578") + mean = float("-0.0207007") + std = float("0.00870238") + data = None + + +class Program_weight_tensor_parameter_311: + name = "parameter_311" + shape = [384] + dtype = "float32" + min_val = float("0.976061") + max_val = float("1.08549") + mean = float("1.01199") + std = float("0.0159983") + data = None + + +class Program_weight_tensor_parameter_312: + name = "parameter_312" + shape = [384] + dtype = "float32" + min_val = float("0.0131757") + max_val = float("0.0743652") + mean = float("0.0315857") + std = float("0.00914424") + data = None + + +class Program_weight_tensor_parameter_313: + name = "parameter_313" + shape = [384] + dtype = "float32" + min_val = float("-0.181599") + max_val = float("0.227392") + mean = float("-0.0380446") + std = float("0.0543988") + data = None + + +class Program_weight_tensor_parameter_314: + name = "parameter_314" + shape = [384, 384, 3, 3] + dtype = "float32" + min_val = float("-0.0155422") + max_val = float("0.0250036") + mean = float("-6.09728e-05") + std = float("0.00159019") + data = None + + +class Program_weight_tensor_parameter_315: + name = "parameter_315" + shape = [384] + dtype = "float32" + min_val = float("-0.0699577") + max_val = float("0.021347") + mean = float("-0.0334829") + std = float("0.0126426") + data = None + + +class Program_weight_tensor_parameter_316: + name = "parameter_316" + shape = [384] + dtype = "float32" + min_val = float("0.981937") + max_val = float("1.05593") + mean = float("1.0134") + std = float("0.0107706") + data = None + + +class Program_weight_tensor_parameter_317: + name = "parameter_317" + shape = [384] + dtype = "float32" + min_val = float("0.00841399") + max_val = float("0.0336046") + mean = float("0.0138806") + std = float("0.00321811") + data = None + + +class Program_weight_tensor_parameter_318: + name = "parameter_318" + shape = [384] + dtype = "float32" + min_val = float("-0.130353") + max_val = float("0.129976") + mean = float("-0.0125698") + std = float("0.0429897") + data = None + + +class Program_weight_tensor_parameter_319: + name = "parameter_319" + shape = [384, 1024, 1, 1] + dtype = "float32" + min_val = float("-0.0187221") + max_val = float("0.0462025") + mean = float("-0.000204289") + std = float("0.00328169") + data = None + + +class Program_weight_tensor_parameter_320: + name = "parameter_320" + shape = [384] + dtype = "float32" + min_val = float("-0.0240994") + max_val = float("0.0209722") + mean = float("-0.000328404") + std = float("0.00796388") + data = None + + +class Program_weight_tensor_parameter_321: + name = "parameter_321" + shape = [384] + dtype = "float32" + min_val = float("0.994047") + max_val = float("1.08372") + mean = float("1.04108") + std = float("0.0136739") + data = None + + +class Program_weight_tensor_parameter_322: + name = "parameter_322" + shape = [384] + dtype = "float32" + min_val = float("0.0107661") + max_val = float("0.0506047") + mean = float("0.0173147") + std = float("0.00413353") + data = None + + +class Program_weight_tensor_parameter_323: + name = "parameter_323" + shape = [384] + dtype = "float32" + min_val = float("-0.201399") + max_val = float("0.140707") + mean = float("-0.0130836") + std = float("0.0556937") + data = None + + +class Program_weight_tensor_parameter_324: + name = "parameter_324" + shape = [384, 1024, 1, 1] + dtype = "float32" + min_val = float("-0.038196") + max_val = float("0.0298097") + mean = float("-0.00023944") + std = float("0.00387698") + data = None + + +class Program_weight_tensor_parameter_325: + name = "parameter_325" + shape = [1024] + dtype = "float32" + min_val = float("-3.19613e-10") + max_val = float("2.57341e-10") + mean = float("-6.94186e-12") + std = float("8.1518e-11") + data = None + + +class Program_weight_tensor_parameter_326: + name = "parameter_326" + shape = [1024] + dtype = "float32" + min_val = float("0.826158") + max_val = float("0.830526") + mean = float("0.828072") + std = float("0.000388443") + data = None + + +class Program_weight_tensor_parameter_327: + name = "parameter_327" + shape = [1024] + dtype = "float32" + min_val = float("-0.0184723") + max_val = float("0.0186349") + mean = float("3.29345e-06") + std = float("0.0105958") + data = None + + +class Program_weight_tensor_parameter_328: + name = "parameter_328" + shape = [2048, 1024] + dtype = "float32" + min_val = float("-0.0186692") + max_val = float("0.0186323") + mean = float("-3.0949e-06") + std = float("0.0105631") + data = None + + +class Program_weight_tensor_parameter_329: + name = "parameter_329" + shape = [2048] + dtype = "float32" + min_val = float("-0.0258373") + max_val = float("0.0258489") + mean = float("-0.000490034") + std = float("0.0147842") + data = None + + +class Program_weight_tensor_parameter_330: + name = "parameter_330" + shape = [1024, 2048] + dtype = "float32" + min_val = float("-0.0261231") + max_val = float("0.0262344") + mean = float("-1.26e-05") + std = float("0.0149406") + data = None + + +class Program_weight_tensor_parameter_331: + name = "parameter_331" + shape = [1024] + dtype = "float32" + min_val = float("-0.000644044") + max_val = float("0.000416141") + mean = float("1.03571e-06") + std = float("0.00016092") + data = None + + +class Program_weight_tensor_parameter_332: + name = "parameter_332" + shape = [1024] + dtype = "float32" + min_val = float("0.825074") + max_val = float("0.831152") + mean = float("0.828074") + std = float("0.000498943") + data = None + + +class Program_weight_tensor_parameter_333: + name = "parameter_333" + shape = [1024] + dtype = "float32" + min_val = float("-0.00057158") + max_val = float("0.000431758") + mean = float("-6.03783e-07") + std = float("0.000151099") + data = None + + +class Program_weight_tensor_parameter_334: + name = "parameter_334" + shape = [1024, 1024] + dtype = "float32" + min_val = float("-0.0452304") + max_val = float("0.0451715") + mean = float("2.40342e-05") + std = float("0.0258606") + data = None + + +class Program_weight_tensor_parameter_335: + name = "parameter_335" + shape = [1024] + dtype = "float32" + min_val = float("-0.000495877") + max_val = float("0.000502274") + mean = float("2.39519e-05") + std = float("0.000158429") + data = None + + +class Program_weight_tensor_parameter_336: + name = "parameter_336" + shape = [1024] + dtype = "float32" + min_val = float("0.825239") + max_val = float("0.831385") + mean = float("0.828099") + std = float("0.000479399") + data = None + + +class Program_weight_tensor_parameter_337: + name = "parameter_337" + shape = [1024] + dtype = "float32" + min_val = float("-0.0182543") + max_val = float("0.0183952") + mean = float("1.83687e-06") + std = float("0.0105888") + data = None + + +class Program_weight_tensor_parameter_338: + name = "parameter_338" + shape = [2048, 1024] + dtype = "float32" + min_val = float("-0.0185874") + max_val = float("0.0186053") + mean = float("-3.09786e-06") + std = float("0.010563") + data = None + + +class Program_weight_tensor_parameter_339: + name = "parameter_339" + shape = [2048] + dtype = "float32" + min_val = float("-0.0258718") + max_val = float("0.025874") + mean = float("-0.00048855") + std = float("0.0147851") + data = None + + +class Program_weight_tensor_parameter_340: + name = "parameter_340" + shape = [1024, 2048] + dtype = "float32" + min_val = float("-0.0260955") + max_val = float("0.0261498") + mean = float("-1.26e-05") + std = float("0.0149406") + data = None + + +class Program_weight_tensor_parameter_341: + name = "parameter_341" + shape = [1024] + dtype = "float32" + min_val = float("-0.000468906") + max_val = float("0.000412054") + mean = float("2.06222e-06") + std = float("0.000140159") + data = None + + +class Program_weight_tensor_parameter_342: + name = "parameter_342" + shape = [1024] + dtype = "float32" + min_val = float("0.825682") + max_val = float("0.831193") + mean = float("0.828073") + std = float("0.000448518") + data = None + + +class Program_weight_tensor_parameter_343: + name = "parameter_343" + shape = [1024] + dtype = "float32" + min_val = float("-0.000528327") + max_val = float("0.000383813") + mean = float("2.84058e-06") + std = float("0.000141633") + data = None + + +class Program_weight_tensor_parameter_344: + name = "parameter_344" + shape = [1024, 1024] + dtype = "float32" + min_val = float("-0.0450293") + max_val = float("0.0450631") + mean = float("2.40173e-05") + std = float("0.0258607") + data = None + + +class Program_weight_tensor_parameter_345: + name = "parameter_345" + shape = [1024] + dtype = "float32" + min_val = float("-0.000544222") + max_val = float("0.000596302") + mean = float("2.4209e-05") + std = float("0.00018149") + data = None + + +class Program_weight_tensor_parameter_346: + name = "parameter_346" + shape = [1024] + dtype = "float32" + min_val = float("0.825946") + max_val = float("0.831225") + mean = float("0.828119") + std = float("0.000435136") + data = None + + +class Program_weight_tensor_parameter_347: + name = "parameter_347" + shape = [1024] + dtype = "float32" + min_val = float("-0.0184486") + max_val = float("0.01838") + mean = float("4.31468e-06") + std = float("0.0105859") + data = None + + +class Program_weight_tensor_parameter_348: + name = "parameter_348" + shape = [2048, 1024] + dtype = "float32" + min_val = float("-0.0185587") + max_val = float("0.0185999") + mean = float("-2.9779e-06") + std = float("0.010563") + data = None + + +class Program_weight_tensor_parameter_349: + name = "parameter_349" + shape = [2048] + dtype = "float32" + min_val = float("-0.0259392") + max_val = float("0.025878") + mean = float("-0.000488745") + std = float("0.014786") + data = None + + +class Program_weight_tensor_parameter_350: + name = "parameter_350" + shape = [1024, 2048] + dtype = "float32" + min_val = float("-0.0261446") + max_val = float("0.0261368") + mean = float("-1.26001e-05") + std = float("0.0149405") + data = None + + +class Program_weight_tensor_parameter_351: + name = "parameter_351" + shape = [1024] + dtype = "float32" + min_val = float("-0.000525085") + max_val = float("0.000569726") + mean = float("1.8424e-06") + std = float("0.000180016") + data = None + + +class Program_weight_tensor_parameter_352: + name = "parameter_352" + shape = [1024] + dtype = "float32" + min_val = float("0.826325") + max_val = float("0.831088") + mean = float("0.828071") + std = float("0.000422331") + data = None + + +class Program_weight_tensor_parameter_353: + name = "parameter_353" + shape = [1024] + dtype = "float32" + min_val = float("-0.000560374") + max_val = float("0.00059686") + mean = float("2.25972e-06") + std = float("0.000184975") + data = None + + +class Program_weight_tensor_parameter_354: + name = "parameter_354" + shape = [1024, 1024] + dtype = "float32" + min_val = float("-0.0451116") + max_val = float("0.0451354") + mean = float("2.40528e-05") + std = float("0.0258608") + data = None + + +class Program_weight_tensor_parameter_355: + name = "parameter_355" + shape = [1024] + dtype = "float32" + min_val = float("-0.000823618") + max_val = float("0.000904054") + mean = float("2.92117e-05") + std = float("0.000277524") + data = None + + +class Program_weight_tensor_parameter_356: + name = "parameter_356" + shape = [1024] + dtype = "float32" + min_val = float("0.826282") + max_val = float("0.830821") + mean = float("0.828142") + std = float("0.000430458") + data = None + + +class Program_weight_tensor_parameter_357: + name = "parameter_357" + shape = [1024] + dtype = "float32" + min_val = float("-0.0185659") + max_val = float("0.0186153") + mean = float("4.15863e-06") + std = float("0.0105906") + data = None + + +class Program_weight_tensor_parameter_358: + name = "parameter_358" + shape = [2048, 1024] + dtype = "float32" + min_val = float("-0.0186583") + max_val = float("0.0186457") + mean = float("-3.02356e-06") + std = float("0.0105631") + data = None + + +class Program_weight_tensor_parameter_359: + name = "parameter_359" + shape = [2048] + dtype = "float32" + min_val = float("-0.0260157") + max_val = float("0.0259108") + mean = float("-0.000488166") + std = float("0.0147856") + data = None + + +class Program_weight_tensor_parameter_360: + name = "parameter_360" + shape = [1024, 2048] + dtype = "float32" + min_val = float("-0.0261391") + max_val = float("0.026125") + mean = float("-1.26002e-05") + std = float("0.0149405") + data = None + + +class Program_weight_tensor_parameter_361: + name = "parameter_361" + shape = [1024] + dtype = "float32" + min_val = float("-0.000912874") + max_val = float("0.000860046") + mean = float("1.52602e-06") + std = float("0.000286919") + data = None + + +class Program_weight_tensor_parameter_362: + name = "parameter_362" + shape = [1024] + dtype = "float32" + min_val = float("0.826227") + max_val = float("0.830736") + mean = float("0.828069") + std = float("0.000440276") + data = None + + +class Program_weight_tensor_parameter_363: + name = "parameter_363" + shape = [1024] + dtype = "float32" + min_val = float("-0.000894026") + max_val = float("0.000983702") + mean = float("2.69971e-06") + std = float("0.000279123") + data = None + + +class Program_weight_tensor_parameter_364: + name = "parameter_364" + shape = [1024, 1024] + dtype = "float32" + min_val = float("-0.0456631") + max_val = float("0.0456485") + mean = float("2.40398e-05") + std = float("0.0258625") + data = None diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_3/graph_hash.txt b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_3/graph_hash.txt new file mode 100644 index 000000000..219233c00 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_3/graph_hash.txt @@ -0,0 +1 @@ +2846bd6e07113ea79b009a65c357a7bcda1ab2bbcb3cd794972513936663d342 \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_3/graph_net.json b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_3/graph_net.json new file mode 100644 index 000000000..381598f86 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_3/graph_net.json @@ -0,0 +1,6 @@ +{ + "framework": "paddle", + "model_name": "PP-YOLOE_plus_SOD-largesize-L", + "num_devices_required": 1, + "num_nodes_required": 1 +} \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_3/input_meta.py b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_3/input_meta.py new file mode 100644 index 000000000..380d3daa6 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_3/input_meta.py @@ -0,0 +1,31 @@ +class Program_weight_tensor_data_0: + name = "data_0" + shape = [1, 768, 34, 34] + dtype = "float32" + min_val = float("-0.278465") + max_val = float("7.26132") + mean = float("0.265664") + std = float("0.60913") + data = None + + +class Program_weight_tensor_data_1: + name = "data_1" + shape = [1, 384, 68, 68] + dtype = "float32" + min_val = float("-0.278465") + max_val = float("9.84456") + mean = float("0.366383") + std = float("0.709092") + data = None + + +class Program_weight_tensor_data_2: + name = "data_2" + shape = [1, 192, 136, 136] + dtype = "float32" + min_val = float("-0.278465") + max_val = float("15.7599") + mean = float("0.450238") + std = float("0.719633") + data = None diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_3/model.py b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_3/model.py new file mode 100644 index 000000000..0ee326a64 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_3/model.py @@ -0,0 +1,1050 @@ +import paddle + + +class GraphModule(paddle.nn.Layer): + def __init__(self): + super().__init__() + + def forward( + self, + parameter_0, + parameter_1, + parameter_2, + parameter_3, + parameter_4, + parameter_5, + parameter_6, + parameter_7, + parameter_8, + parameter_9, + parameter_10, + parameter_11, + parameter_12, + parameter_13, + parameter_14, + parameter_15, + parameter_16, + parameter_17, + parameter_18, + parameter_19, + parameter_20, + parameter_21, + parameter_22, + parameter_23, + parameter_24, + parameter_25, + parameter_26, + parameter_27, + parameter_28, + parameter_29, + parameter_30, + parameter_31, + parameter_32, + parameter_33, + parameter_34, + parameter_35, + parameter_36, + parameter_37, + parameter_38, + parameter_39, + parameter_40, + parameter_41, + parameter_42, + parameter_43, + parameter_44, + parameter_45, + parameter_46, + parameter_47, + parameter_48, + parameter_49, + parameter_50, + parameter_51, + parameter_52, + parameter_53, + data_0, + data_1, + data_2, + ): + # pd_op.full: (1xf64) <- () + full_0 = paddle._C_ops.full( + [1], float("0"), paddle.float64, paddle.core.CPUPlace() + ) + + # pd_op.full: (1xf64) <- () + full_1 = paddle._C_ops.full( + [1], float("34"), paddle.float64, paddle.core.CPUPlace() + ) + + # pd_op.full: (1xf64) <- () + full_2 = paddle._C_ops.full( + [1], float("1"), paddle.float64, paddle.core.CPUPlace() + ) + + # pd_op.arange: (34xi64) <- (1xf64, 1xf64, 1xf64) + arange_0 = paddle.arange(full_0, full_1, full_2, dtype="int64") + del full_1 + + # pd_op.cast: (34xf32) <- (34xi64) + cast_0 = paddle._C_ops.cast(arange_0, paddle.float32) + del arange_0 + + # pd_op.full: (1xf32) <- () + full_3 = paddle._C_ops.full( + [1], float("1"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.scale: (34xf32) <- (34xf32, 1xf32) + scale_0 = paddle._C_ops.scale(cast_0, full_3, float("0.5"), True) + del cast_0 + + # pd_op.full: (1xf32) <- () + full_4 = paddle._C_ops.full( + [1], float("32"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.scale: (34xf32) <- (34xf32, 1xf32) + scale_1 = paddle._C_ops.scale(scale_0, full_4, float("0"), True) + del full_4, scale_0 + + # builtin.combine: ([34xf32, 34xf32]) <- (34xf32, 34xf32) + combine_0 = [scale_1, scale_1] + del scale_1 + + # pd_op.meshgrid: ([34x34xf32, 34x34xf32]) <- ([34xf32, 34xf32]) + meshgrid_0 = paddle._C_ops.meshgrid(combine_0) + del combine_0 + + # builtin.split: (34x34xf32, 34x34xf32) <- ([34x34xf32, 34x34xf32]) + ( + split_0, + split_1, + ) = meshgrid_0 + del meshgrid_0 + + # pd_op.scale: (34x34xf32) <- (34x34xf32, 1xf32) + scale_2 = paddle._C_ops.scale(split_1, full_3, float("-80"), True) + + # pd_op.scale: (34x34xf32) <- (34x34xf32, 1xf32) + scale_3 = paddle._C_ops.scale(split_0, full_3, float("-80"), True) + + # pd_op.scale: (34x34xf32) <- (34x34xf32, 1xf32) + scale_4 = paddle._C_ops.scale(split_1, full_3, float("80"), True) + + # pd_op.scale: (34x34xf32) <- (34x34xf32, 1xf32) + scale_5 = paddle._C_ops.scale(split_0, full_3, float("80"), True) + + # builtin.combine: ([34x34xf32, 34x34xf32, 34x34xf32, 34x34xf32]) <- (34x34xf32, 34x34xf32, 34x34xf32, 34x34xf32) + combine_1 = [scale_2, scale_3, scale_4, scale_5] + del scale_2, scale_3, scale_4, scale_5 + + # pd_op.stack: (34x34x4xf32) <- ([34x34xf32, 34x34xf32, 34x34xf32, 34x34xf32]) + stack_0 = paddle._C_ops.stack(combine_1, -1) + del combine_1 + + # builtin.combine: ([34x34xf32, 34x34xf32]) <- (34x34xf32, 34x34xf32) + combine_2 = [split_1, split_0] + del split_0, split_1 + + # pd_op.stack: (34x34x2xf32) <- ([34x34xf32, 34x34xf32]) + stack_1 = paddle._C_ops.stack(combine_2, -1) + del combine_2 + + # pd_op.full_int_array: (2xi64) <- () + full_int_array_0 = [-1, 4] + + # pd_op.reshape: (1156x4xf32) <- (34x34x4xf32, 2xi64) + reshape_0 = paddle._C_ops.reshape(stack_0, full_int_array_0) + del stack_0 + + # pd_op.full_int_array: (2xi64) <- () + full_int_array_1 = [-1, 2] + + # pd_op.reshape: (1156x2xf32) <- (34x34x2xf32, 2xi64) + reshape_1 = paddle._C_ops.reshape(stack_1, full_int_array_1) + del stack_1 + + # pd_op.full: (1156x1xf32) <- () + full_5 = paddle._C_ops.full( + [1156, 1], + float("32"), + paddle.float32, + paddle.framework._current_expected_place(), + ) + + # pd_op.full: (1xf64) <- () + full_6 = paddle._C_ops.full( + [1], float("68"), paddle.float64, paddle.core.CPUPlace() + ) + + # pd_op.arange: (68xi64) <- (1xf64, 1xf64, 1xf64) + arange_1 = paddle.arange(full_0, full_6, full_2, dtype="int64") + del full_6 + + # pd_op.cast: (68xf32) <- (68xi64) + cast_1 = paddle._C_ops.cast(arange_1, paddle.float32) + del arange_1 + + # pd_op.scale: (68xf32) <- (68xf32, 1xf32) + scale_6 = paddle._C_ops.scale(cast_1, full_3, float("0.5"), True) + del cast_1 + + # pd_op.full: (1xf32) <- () + full_7 = paddle._C_ops.full( + [1], float("16"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.scale: (68xf32) <- (68xf32, 1xf32) + scale_7 = paddle._C_ops.scale(scale_6, full_7, float("0"), True) + del full_7, scale_6 + + # builtin.combine: ([68xf32, 68xf32]) <- (68xf32, 68xf32) + combine_3 = [scale_7, scale_7] + del scale_7 + + # pd_op.meshgrid: ([68x68xf32, 68x68xf32]) <- ([68xf32, 68xf32]) + meshgrid_1 = paddle._C_ops.meshgrid(combine_3) + del combine_3 + + # builtin.split: (68x68xf32, 68x68xf32) <- ([68x68xf32, 68x68xf32]) + ( + split_2, + split_3, + ) = meshgrid_1 + del meshgrid_1 + + # pd_op.scale: (68x68xf32) <- (68x68xf32, 1xf32) + scale_8 = paddle._C_ops.scale(split_3, full_3, float("-40"), True) + + # pd_op.scale: (68x68xf32) <- (68x68xf32, 1xf32) + scale_9 = paddle._C_ops.scale(split_2, full_3, float("-40"), True) + + # pd_op.scale: (68x68xf32) <- (68x68xf32, 1xf32) + scale_10 = paddle._C_ops.scale(split_3, full_3, float("40"), True) + + # pd_op.scale: (68x68xf32) <- (68x68xf32, 1xf32) + scale_11 = paddle._C_ops.scale(split_2, full_3, float("40"), True) + + # builtin.combine: ([68x68xf32, 68x68xf32, 68x68xf32, 68x68xf32]) <- (68x68xf32, 68x68xf32, 68x68xf32, 68x68xf32) + combine_4 = [scale_8, scale_9, scale_10, scale_11] + del scale_10, scale_11, scale_8, scale_9 + + # pd_op.stack: (68x68x4xf32) <- ([68x68xf32, 68x68xf32, 68x68xf32, 68x68xf32]) + stack_2 = paddle._C_ops.stack(combine_4, -1) + del combine_4 + + # builtin.combine: ([68x68xf32, 68x68xf32]) <- (68x68xf32, 68x68xf32) + combine_5 = [split_3, split_2] + del split_2, split_3 + + # pd_op.stack: (68x68x2xf32) <- ([68x68xf32, 68x68xf32]) + stack_3 = paddle._C_ops.stack(combine_5, -1) + del combine_5 + + # pd_op.reshape: (4624x4xf32) <- (68x68x4xf32, 2xi64) + reshape_2 = paddle._C_ops.reshape(stack_2, full_int_array_0) + del stack_2 + + # pd_op.reshape: (4624x2xf32) <- (68x68x2xf32, 2xi64) + reshape_3 = paddle._C_ops.reshape(stack_3, full_int_array_1) + del stack_3 + + # pd_op.full: (4624x1xf32) <- () + full_8 = paddle._C_ops.full( + [4624, 1], + float("16"), + paddle.float32, + paddle.framework._current_expected_place(), + ) + + # pd_op.full: (1xf64) <- () + full_9 = paddle._C_ops.full( + [1], float("136"), paddle.float64, paddle.core.CPUPlace() + ) + + # pd_op.arange: (136xi64) <- (1xf64, 1xf64, 1xf64) + arange_2 = paddle.arange(full_0, full_9, full_2, dtype="int64") + del full_0, full_2, full_9 + + # pd_op.cast: (136xf32) <- (136xi64) + cast_2 = paddle._C_ops.cast(arange_2, paddle.float32) + del arange_2 + + # pd_op.scale: (136xf32) <- (136xf32, 1xf32) + scale_12 = paddle._C_ops.scale(cast_2, full_3, float("0.5"), True) + del cast_2 + + # pd_op.full: (1xf32) <- () + full_10 = paddle._C_ops.full( + [1], float("8"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.scale: (136xf32) <- (136xf32, 1xf32) + scale_13 = paddle._C_ops.scale(scale_12, full_10, float("0"), True) + del full_10, scale_12 + + # builtin.combine: ([136xf32, 136xf32]) <- (136xf32, 136xf32) + combine_6 = [scale_13, scale_13] + del scale_13 + + # pd_op.meshgrid: ([136x136xf32, 136x136xf32]) <- ([136xf32, 136xf32]) + meshgrid_2 = paddle._C_ops.meshgrid(combine_6) + del combine_6 + + # builtin.split: (136x136xf32, 136x136xf32) <- ([136x136xf32, 136x136xf32]) + ( + split_4, + split_5, + ) = meshgrid_2 + del meshgrid_2 + + # pd_op.scale: (136x136xf32) <- (136x136xf32, 1xf32) + scale_14 = paddle._C_ops.scale(split_5, full_3, float("-20"), True) + + # pd_op.scale: (136x136xf32) <- (136x136xf32, 1xf32) + scale_15 = paddle._C_ops.scale(split_4, full_3, float("-20"), True) + + # pd_op.scale: (136x136xf32) <- (136x136xf32, 1xf32) + scale_16 = paddle._C_ops.scale(split_5, full_3, float("20"), True) + + # pd_op.scale: (136x136xf32) <- (136x136xf32, 1xf32) + scale_17 = paddle._C_ops.scale(split_4, full_3, float("20"), True) + del full_3 + + # builtin.combine: ([136x136xf32, 136x136xf32, 136x136xf32, 136x136xf32]) <- (136x136xf32, 136x136xf32, 136x136xf32, 136x136xf32) + combine_7 = [scale_14, scale_15, scale_16, scale_17] + del scale_14, scale_15, scale_16, scale_17 + + # pd_op.stack: (136x136x4xf32) <- ([136x136xf32, 136x136xf32, 136x136xf32, 136x136xf32]) + stack_4 = paddle._C_ops.stack(combine_7, -1) + del combine_7 + + # builtin.combine: ([136x136xf32, 136x136xf32]) <- (136x136xf32, 136x136xf32) + combine_8 = [split_5, split_4] + del split_4, split_5 + + # pd_op.stack: (136x136x2xf32) <- ([136x136xf32, 136x136xf32]) + stack_5 = paddle._C_ops.stack(combine_8, -1) + del combine_8 + + # pd_op.reshape: (18496x4xf32) <- (136x136x4xf32, 2xi64) + reshape_4 = paddle._C_ops.reshape(stack_4, full_int_array_0) + del full_int_array_0, stack_4 + + # pd_op.reshape: (18496x2xf32) <- (136x136x2xf32, 2xi64) + reshape_5 = paddle._C_ops.reshape(stack_5, full_int_array_1) + del full_int_array_1, stack_5 + + # pd_op.full: (18496x1xf32) <- () + full_11 = paddle._C_ops.full( + [18496, 1], + float("8"), + paddle.float32, + paddle.framework._current_expected_place(), + ) + + # pd_op.full: (1xi32) <- () + full_12 = paddle._C_ops.full( + [1], float("0"), paddle.int32, paddle.core.CPUPlace() + ) + + # builtin.combine: ([1156x4xf32, 4624x4xf32, 18496x4xf32]) <- (1156x4xf32, 4624x4xf32, 18496x4xf32) + combine_9 = [reshape_0, reshape_2, reshape_4] + + # pd_op.concat: (24276x4xf32) <- ([1156x4xf32, 4624x4xf32, 18496x4xf32], 1xi32) + concat_2 = paddle._C_ops.concat(combine_9, full_12) + del combine_9 + + # builtin.combine: ([1156x2xf32, 4624x2xf32, 18496x2xf32]) <- (1156x2xf32, 4624x2xf32, 18496x2xf32) + combine_10 = [reshape_1, reshape_3, reshape_5] + del reshape_1, reshape_3, reshape_5 + + # pd_op.concat: (24276x2xf32) <- ([1156x2xf32, 4624x2xf32, 18496x2xf32], 1xi32) + concat_3 = paddle._C_ops.concat(combine_10, full_12) + del combine_10 + + # builtin.combine: ([1156x1xf32, 4624x1xf32, 18496x1xf32]) <- (1156x1xf32, 4624x1xf32, 18496x1xf32) + combine_11 = [full_5, full_8, full_11] + del full_11, full_5, full_8 + + # pd_op.concat: (24276x1xf32) <- ([1156x1xf32, 4624x1xf32, 18496x1xf32], 1xi32) + concat_4 = paddle._C_ops.concat(combine_11, full_12) + del combine_11, full_12 + + # pd_op.full_int_array: (2xi64) <- () + full_int_array_2 = [1, 1] + + # pd_op.assign: (2xi64) <- (2xi64) + assign_0 = full_int_array_2 + + # pd_op.assign: (2xi64) <- (2xi64) + assign_1 = full_int_array_2 + + # pd_op.pool2d: (1x768x1x1xf32) <- (1x768x34x34xf32, 2xi64) + pool2d_0 = paddle._C_ops.pool2d( + data_0, + full_int_array_2, + [1, 1], + [0, 0], + False, + True, + "NCHW", + "avg", + False, + True, + "EXPLICIT", + ) + + # pd_op.conv2d: (1x768x1x1xf32) <- (1x768x1x1xf32, 768x768x1x1xf32) + conv2d_0 = paddle._C_ops.conv2d( + pool2d_0, parameter_53, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_53 + + # pd_op.full_int_array: (4xi64) <- () + full_int_array_3 = [1, -1, 1, 1] + + # pd_op.reshape: (1x768x1x1xf32) <- (768xf32, 4xi64) + reshape_6 = paddle._C_ops.reshape(parameter_52, full_int_array_3) + del parameter_52 + + # pd_op.add: (1x768x1x1xf32) <- (1x768x1x1xf32, 1x768x1x1xf32) + add_0 = paddle._C_ops.add(conv2d_0, reshape_6) + + # pd_op.sigmoid: (1x768x1x1xf32) <- (1x768x1x1xf32) + sigmoid_0 = paddle._C_ops.sigmoid(add_0) + del add_0 + + # pd_op.multiply: (1x768x34x34xf32) <- (1x768x34x34xf32, 1x768x1x1xf32) + multiply_0 = paddle._C_ops.multiply(data_0, sigmoid_0) + + # pd_op.conv2d: (1x768x34x34xf32) <- (1x768x34x34xf32, 768x768x1x1xf32) + conv2d_1 = paddle._C_ops.conv2d( + multiply_0, parameter_51, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_51 + + # pd_op.batch_norm_: (1x768x34x34xf32, 768xf32, 768xf32, 768xf32, 768xf32, -1xui8) <- (1x768x34x34xf32, 768xf32, 768xf32, 768xf32, 768xf32) + ( + batch_norm__0, + batch_norm__1, + batch_norm__2, + batch_norm__3, + batch_norm__4, + batch_norm__5, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_1, + parameter_50, + parameter_49, + parameter_48, + parameter_47, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_47, parameter_48, parameter_49, parameter_50 + + # pd_op.swish: (1x768x34x34xf32) <- (1x768x34x34xf32) + swish_0 = paddle._C_ops.swish(batch_norm__0) + + # pd_op.add: (1x768x34x34xf32) <- (1x768x34x34xf32, 1x768x34x34xf32) + add_1 = paddle._C_ops.add(swish_0, data_0) + + # pd_op.conv2d: (1x10x34x34xf32) <- (1x768x34x34xf32, 10x768x3x3xf32) + conv2d_2 = paddle._C_ops.conv2d( + add_1, parameter_46, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_46 + + # pd_op.reshape: (1x10x1x1xf32) <- (10xf32, 4xi64) + reshape_7 = paddle._C_ops.reshape(parameter_45, full_int_array_3) + del parameter_45 + + # pd_op.add: (1x10x34x34xf32) <- (1x10x34x34xf32, 1x10x1x1xf32) + add_2 = paddle._C_ops.add(conv2d_2, reshape_7) + + # pd_op.conv2d: (1x768x1x1xf32) <- (1x768x1x1xf32, 768x768x1x1xf32) + conv2d_3 = paddle._C_ops.conv2d( + pool2d_0, parameter_44, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_44 + + # pd_op.reshape: (1x768x1x1xf32) <- (768xf32, 4xi64) + reshape_8 = paddle._C_ops.reshape(parameter_43, full_int_array_3) + del parameter_43 + + # pd_op.add: (1x768x1x1xf32) <- (1x768x1x1xf32, 1x768x1x1xf32) + add_3 = paddle._C_ops.add(conv2d_3, reshape_8) + + # pd_op.sigmoid: (1x768x1x1xf32) <- (1x768x1x1xf32) + sigmoid_1 = paddle._C_ops.sigmoid(add_3) + del add_3 + + # pd_op.multiply: (1x768x34x34xf32) <- (1x768x34x34xf32, 1x768x1x1xf32) + multiply_1 = paddle._C_ops.multiply(data_0, sigmoid_1) + del data_0 + + # pd_op.conv2d: (1x768x34x34xf32) <- (1x768x34x34xf32, 768x768x1x1xf32) + conv2d_4 = paddle._C_ops.conv2d( + multiply_1, parameter_42, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_42 + + # pd_op.batch_norm_: (1x768x34x34xf32, 768xf32, 768xf32, 768xf32, 768xf32, -1xui8) <- (1x768x34x34xf32, 768xf32, 768xf32, 768xf32, 768xf32) + ( + batch_norm__6, + batch_norm__7, + batch_norm__8, + batch_norm__9, + batch_norm__10, + batch_norm__11, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_4, + parameter_41, + parameter_40, + parameter_39, + parameter_38, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_38, parameter_39, parameter_40, parameter_41 + + # pd_op.swish: (1x768x34x34xf32) <- (1x768x34x34xf32) + swish_1 = paddle._C_ops.swish(batch_norm__6) + + # pd_op.conv2d: (1x88x34x34xf32) <- (1x768x34x34xf32, 88x768x3x3xf32) + conv2d_5 = paddle._C_ops.conv2d( + swish_1, parameter_37, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_37 + + # pd_op.reshape: (1x88x1x1xf32) <- (88xf32, 4xi64) + reshape_9 = paddle._C_ops.reshape(parameter_36, full_int_array_3) + del parameter_36 + + # pd_op.add: (1x88x34x34xf32) <- (1x88x34x34xf32, 1x88x1x1xf32) + add_4 = paddle._C_ops.add(conv2d_5, reshape_9) + + # pd_op.sigmoid: (1x10x34x34xf32) <- (1x10x34x34xf32) + sigmoid_2 = paddle._C_ops.sigmoid(add_2) + del add_2 + + # pd_op.flatten: (1x10x1156xf32) <- (1x10x34x34xf32) + flatten_0 = paddle._C_ops.flatten(sigmoid_2, 2, 3) + + # pd_op.transpose: (1x1156x10xf32) <- (1x10x1156xf32) + transpose_0 = paddle._C_ops.transpose(flatten_0, [0, 2, 1]) + del flatten_0 + + # pd_op.flatten: (1x88x1156xf32) <- (1x88x34x34xf32) + flatten_1 = paddle._C_ops.flatten(add_4, 2, 3) + + # pd_op.transpose: (1x1156x88xf32) <- (1x88x1156xf32) + transpose_1 = paddle._C_ops.transpose(flatten_1, [0, 2, 1]) + del flatten_1 + + # pd_op.pool2d: (1x384x1x1xf32) <- (1x384x68x68xf32, 2xi64) + pool2d_1 = paddle._C_ops.pool2d( + data_1, + full_int_array_2, + [1, 1], + [0, 0], + False, + True, + "NCHW", + "avg", + False, + True, + "EXPLICIT", + ) + + # pd_op.conv2d: (1x384x1x1xf32) <- (1x384x1x1xf32, 384x384x1x1xf32) + conv2d_6 = paddle._C_ops.conv2d( + pool2d_1, parameter_35, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_35 + + # pd_op.reshape: (1x384x1x1xf32) <- (384xf32, 4xi64) + reshape_10 = paddle._C_ops.reshape(parameter_34, full_int_array_3) + del parameter_34 + + # pd_op.add: (1x384x1x1xf32) <- (1x384x1x1xf32, 1x384x1x1xf32) + add_5 = paddle._C_ops.add(conv2d_6, reshape_10) + + # pd_op.sigmoid: (1x384x1x1xf32) <- (1x384x1x1xf32) + sigmoid_3 = paddle._C_ops.sigmoid(add_5) + del add_5 + + # pd_op.multiply: (1x384x68x68xf32) <- (1x384x68x68xf32, 1x384x1x1xf32) + multiply_2 = paddle._C_ops.multiply(data_1, sigmoid_3) + + # pd_op.conv2d: (1x384x68x68xf32) <- (1x384x68x68xf32, 384x384x1x1xf32) + conv2d_7 = paddle._C_ops.conv2d( + multiply_2, parameter_33, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_33 + + # pd_op.batch_norm_: (1x384x68x68xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (1x384x68x68xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__12, + batch_norm__13, + batch_norm__14, + batch_norm__15, + batch_norm__16, + batch_norm__17, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_7, + parameter_32, + parameter_31, + parameter_30, + parameter_29, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_29, parameter_30, parameter_31, parameter_32 + + # pd_op.swish: (1x384x68x68xf32) <- (1x384x68x68xf32) + swish_2 = paddle._C_ops.swish(batch_norm__12) + + # pd_op.add: (1x384x68x68xf32) <- (1x384x68x68xf32, 1x384x68x68xf32) + add_6 = paddle._C_ops.add(swish_2, data_1) + + # pd_op.conv2d: (1x10x68x68xf32) <- (1x384x68x68xf32, 10x384x3x3xf32) + conv2d_8 = paddle._C_ops.conv2d( + add_6, parameter_28, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_28 + + # pd_op.reshape: (1x10x1x1xf32) <- (10xf32, 4xi64) + reshape_11 = paddle._C_ops.reshape(parameter_27, full_int_array_3) + del parameter_27 + + # pd_op.add: (1x10x68x68xf32) <- (1x10x68x68xf32, 1x10x1x1xf32) + add_7 = paddle._C_ops.add(conv2d_8, reshape_11) + + # pd_op.conv2d: (1x384x1x1xf32) <- (1x384x1x1xf32, 384x384x1x1xf32) + conv2d_9 = paddle._C_ops.conv2d( + pool2d_1, parameter_26, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_26 + + # pd_op.reshape: (1x384x1x1xf32) <- (384xf32, 4xi64) + reshape_12 = paddle._C_ops.reshape(parameter_25, full_int_array_3) + del parameter_25 + + # pd_op.add: (1x384x1x1xf32) <- (1x384x1x1xf32, 1x384x1x1xf32) + add_8 = paddle._C_ops.add(conv2d_9, reshape_12) + + # pd_op.sigmoid: (1x384x1x1xf32) <- (1x384x1x1xf32) + sigmoid_4 = paddle._C_ops.sigmoid(add_8) + del add_8 + + # pd_op.multiply: (1x384x68x68xf32) <- (1x384x68x68xf32, 1x384x1x1xf32) + multiply_3 = paddle._C_ops.multiply(data_1, sigmoid_4) + del data_1 + + # pd_op.conv2d: (1x384x68x68xf32) <- (1x384x68x68xf32, 384x384x1x1xf32) + conv2d_10 = paddle._C_ops.conv2d( + multiply_3, parameter_24, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_24 + + # pd_op.batch_norm_: (1x384x68x68xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (1x384x68x68xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__18, + batch_norm__19, + batch_norm__20, + batch_norm__21, + batch_norm__22, + batch_norm__23, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_10, + parameter_23, + parameter_22, + parameter_21, + parameter_20, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_20, parameter_21, parameter_22, parameter_23 + + # pd_op.swish: (1x384x68x68xf32) <- (1x384x68x68xf32) + swish_3 = paddle._C_ops.swish(batch_norm__18) + + # pd_op.conv2d: (1x88x68x68xf32) <- (1x384x68x68xf32, 88x384x3x3xf32) + conv2d_11 = paddle._C_ops.conv2d( + swish_3, parameter_19, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_19 + + # pd_op.reshape: (1x88x1x1xf32) <- (88xf32, 4xi64) + reshape_13 = paddle._C_ops.reshape(parameter_18, full_int_array_3) + del parameter_18 + + # pd_op.add: (1x88x68x68xf32) <- (1x88x68x68xf32, 1x88x1x1xf32) + add_9 = paddle._C_ops.add(conv2d_11, reshape_13) + + # pd_op.sigmoid: (1x10x68x68xf32) <- (1x10x68x68xf32) + sigmoid_5 = paddle._C_ops.sigmoid(add_7) + del add_7 + + # pd_op.flatten: (1x10x4624xf32) <- (1x10x68x68xf32) + flatten_2 = paddle._C_ops.flatten(sigmoid_5, 2, 3) + + # pd_op.transpose: (1x4624x10xf32) <- (1x10x4624xf32) + transpose_2 = paddle._C_ops.transpose(flatten_2, [0, 2, 1]) + del flatten_2 + + # pd_op.flatten: (1x88x4624xf32) <- (1x88x68x68xf32) + flatten_3 = paddle._C_ops.flatten(add_9, 2, 3) + + # pd_op.transpose: (1x4624x88xf32) <- (1x88x4624xf32) + transpose_3 = paddle._C_ops.transpose(flatten_3, [0, 2, 1]) + del flatten_3 + + # pd_op.pool2d: (1x192x1x1xf32) <- (1x192x136x136xf32, 2xi64) + pool2d_2 = paddle._C_ops.pool2d( + data_2, + full_int_array_2, + [1, 1], + [0, 0], + False, + True, + "NCHW", + "avg", + False, + True, + "EXPLICIT", + ) + + # pd_op.conv2d: (1x192x1x1xf32) <- (1x192x1x1xf32, 192x192x1x1xf32) + conv2d_12 = paddle._C_ops.conv2d( + pool2d_2, parameter_17, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_17 + + # pd_op.reshape: (1x192x1x1xf32) <- (192xf32, 4xi64) + reshape_14 = paddle._C_ops.reshape(parameter_16, full_int_array_3) + del parameter_16 + + # pd_op.add: (1x192x1x1xf32) <- (1x192x1x1xf32, 1x192x1x1xf32) + add_10 = paddle._C_ops.add(conv2d_12, reshape_14) + + # pd_op.sigmoid: (1x192x1x1xf32) <- (1x192x1x1xf32) + sigmoid_6 = paddle._C_ops.sigmoid(add_10) + del add_10 + + # pd_op.multiply: (1x192x136x136xf32) <- (1x192x136x136xf32, 1x192x1x1xf32) + multiply_4 = paddle._C_ops.multiply(data_2, sigmoid_6) + + # pd_op.conv2d: (1x192x136x136xf32) <- (1x192x136x136xf32, 192x192x1x1xf32) + conv2d_13 = paddle._C_ops.conv2d( + multiply_4, parameter_15, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_15 + + # pd_op.batch_norm_: (1x192x136x136xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (1x192x136x136xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__24, + batch_norm__25, + batch_norm__26, + batch_norm__27, + batch_norm__28, + batch_norm__29, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_13, + parameter_14, + parameter_13, + parameter_12, + parameter_11, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_11, parameter_12, parameter_13, parameter_14 + + # pd_op.swish: (1x192x136x136xf32) <- (1x192x136x136xf32) + swish_4 = paddle._C_ops.swish(batch_norm__24) + + # pd_op.add: (1x192x136x136xf32) <- (1x192x136x136xf32, 1x192x136x136xf32) + add_11 = paddle._C_ops.add(swish_4, data_2) + + # pd_op.conv2d: (1x10x136x136xf32) <- (1x192x136x136xf32, 10x192x3x3xf32) + conv2d_14 = paddle._C_ops.conv2d( + add_11, parameter_10, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_10 + + # pd_op.reshape: (1x10x1x1xf32) <- (10xf32, 4xi64) + reshape_15 = paddle._C_ops.reshape(parameter_9, full_int_array_3) + del parameter_9 + + # pd_op.add: (1x10x136x136xf32) <- (1x10x136x136xf32, 1x10x1x1xf32) + add_12 = paddle._C_ops.add(conv2d_14, reshape_15) + + # pd_op.conv2d: (1x192x1x1xf32) <- (1x192x1x1xf32, 192x192x1x1xf32) + conv2d_15 = paddle._C_ops.conv2d( + pool2d_2, parameter_8, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_8 + + # pd_op.reshape: (1x192x1x1xf32) <- (192xf32, 4xi64) + reshape_16 = paddle._C_ops.reshape(parameter_7, full_int_array_3) + del parameter_7 + + # pd_op.add: (1x192x1x1xf32) <- (1x192x1x1xf32, 1x192x1x1xf32) + add_13 = paddle._C_ops.add(conv2d_15, reshape_16) + + # pd_op.sigmoid: (1x192x1x1xf32) <- (1x192x1x1xf32) + sigmoid_7 = paddle._C_ops.sigmoid(add_13) + del add_13 + + # pd_op.multiply: (1x192x136x136xf32) <- (1x192x136x136xf32, 1x192x1x1xf32) + multiply_5 = paddle._C_ops.multiply(data_2, sigmoid_7) + del data_2 + + # pd_op.conv2d: (1x192x136x136xf32) <- (1x192x136x136xf32, 192x192x1x1xf32) + conv2d_16 = paddle._C_ops.conv2d( + multiply_5, parameter_6, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_6 + + # pd_op.batch_norm_: (1x192x136x136xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (1x192x136x136xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__30, + batch_norm__31, + batch_norm__32, + batch_norm__33, + batch_norm__34, + batch_norm__35, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_16, + parameter_5, + parameter_4, + parameter_3, + parameter_2, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_2, parameter_3, parameter_4, parameter_5 + + # pd_op.swish: (1x192x136x136xf32) <- (1x192x136x136xf32) + swish_5 = paddle._C_ops.swish(batch_norm__30) + + # pd_op.conv2d: (1x88x136x136xf32) <- (1x192x136x136xf32, 88x192x3x3xf32) + conv2d_17 = paddle._C_ops.conv2d( + swish_5, parameter_1, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_1 + + # pd_op.reshape: (1x88x1x1xf32) <- (88xf32, 4xi64) + reshape_17 = paddle._C_ops.reshape(parameter_0, full_int_array_3) + del full_int_array_3, parameter_0 + + # pd_op.add: (1x88x136x136xf32) <- (1x88x136x136xf32, 1x88x1x1xf32) + add_14 = paddle._C_ops.add(conv2d_17, reshape_17) + + # pd_op.sigmoid: (1x10x136x136xf32) <- (1x10x136x136xf32) + sigmoid_8 = paddle._C_ops.sigmoid(add_12) + del add_12 + + # pd_op.flatten: (1x10x18496xf32) <- (1x10x136x136xf32) + flatten_4 = paddle._C_ops.flatten(sigmoid_8, 2, 3) + + # pd_op.transpose: (1x18496x10xf32) <- (1x10x18496xf32) + transpose_4 = paddle._C_ops.transpose(flatten_4, [0, 2, 1]) + del flatten_4 + + # pd_op.flatten: (1x88x18496xf32) <- (1x88x136x136xf32) + flatten_5 = paddle._C_ops.flatten(add_14, 2, 3) + + # pd_op.transpose: (1x18496x88xf32) <- (1x88x18496xf32) + transpose_5 = paddle._C_ops.transpose(flatten_5, [0, 2, 1]) + del flatten_5 + + # pd_op.full: (1xi32) <- () + full_13 = paddle._C_ops.full( + [1], float("1"), paddle.int32, paddle.core.CPUPlace() + ) + + # pd_op.assign: (1xi32) <- (1xi32) + assign_2 = full_13 + + # builtin.combine: ([1x1156x10xf32, 1x4624x10xf32, 1x18496x10xf32]) <- (1x1156x10xf32, 1x4624x10xf32, 1x18496x10xf32) + combine_12 = [transpose_0, transpose_2, transpose_4] + + # pd_op.concat: (1x24276x10xf32) <- ([1x1156x10xf32, 1x4624x10xf32, 1x18496x10xf32], 1xi32) + concat_0 = paddle._C_ops.concat(combine_12, full_13) + del combine_12 + + # builtin.combine: ([1x1156x88xf32, 1x4624x88xf32, 1x18496x88xf32]) <- (1x1156x88xf32, 1x4624x88xf32, 1x18496x88xf32) + combine_13 = [transpose_1, transpose_3, transpose_5] + + # pd_op.concat: (1x24276x88xf32) <- ([1x1156x88xf32, 1x4624x88xf32, 1x18496x88xf32], 1xi32) + concat_1 = paddle._C_ops.concat(combine_13, full_13) + del ( + add_1, + add_11, + add_14, + add_4, + add_6, + add_9, + assign_0, + assign_1, + assign_2, + batch_norm__0, + batch_norm__1, + batch_norm__10, + batch_norm__11, + batch_norm__12, + batch_norm__13, + batch_norm__14, + batch_norm__15, + batch_norm__16, + batch_norm__17, + batch_norm__18, + batch_norm__19, + batch_norm__2, + batch_norm__20, + batch_norm__21, + batch_norm__22, + batch_norm__23, + batch_norm__24, + batch_norm__25, + batch_norm__26, + batch_norm__27, + batch_norm__28, + batch_norm__29, + batch_norm__3, + batch_norm__30, + batch_norm__31, + batch_norm__32, + batch_norm__33, + batch_norm__34, + batch_norm__35, + batch_norm__4, + batch_norm__5, + batch_norm__6, + batch_norm__7, + batch_norm__8, + batch_norm__9, + combine_13, + conv2d_0, + conv2d_1, + conv2d_10, + conv2d_11, + conv2d_12, + conv2d_13, + conv2d_14, + conv2d_15, + conv2d_16, + conv2d_17, + conv2d_2, + conv2d_3, + conv2d_4, + conv2d_5, + conv2d_6, + conv2d_7, + conv2d_8, + conv2d_9, + full_13, + full_int_array_2, + multiply_0, + multiply_1, + multiply_2, + multiply_3, + multiply_4, + multiply_5, + pool2d_0, + pool2d_1, + pool2d_2, + reshape_0, + reshape_10, + reshape_11, + reshape_12, + reshape_13, + reshape_14, + reshape_15, + reshape_16, + reshape_17, + reshape_2, + reshape_4, + reshape_6, + reshape_7, + reshape_8, + reshape_9, + sigmoid_0, + sigmoid_1, + sigmoid_2, + sigmoid_3, + sigmoid_4, + sigmoid_5, + sigmoid_6, + sigmoid_7, + sigmoid_8, + swish_0, + swish_1, + swish_2, + swish_3, + swish_4, + swish_5, + transpose_0, + transpose_1, + transpose_2, + transpose_3, + transpose_4, + transpose_5, + ) + + return concat_0, concat_1, concat_2, concat_3, concat_4 diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_3/weight_meta.py b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_3/weight_meta.py new file mode 100644 index 000000000..433ec9a1b --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_3/weight_meta.py @@ -0,0 +1,586 @@ +class Program_weight_tensor_parameter_0: + name = "parameter_0" + shape = [88] + dtype = "float32" + min_val = float("0.825624") + max_val = float("0.846159") + mean = float("0.828073") + std = float("0.00356405") + data = None + + +class Program_weight_tensor_parameter_1: + name = "parameter_1" + shape = [88, 192, 3, 3] + dtype = "float32" + min_val = float("-0.120061") + max_val = float("0.122726") + mean = float("1.20344e-08") + std = float("0.00589807") + data = None + + +class Program_weight_tensor_parameter_2: + name = "parameter_2" + shape = [192] + dtype = "float32" + min_val = float("-0.0433758") + max_val = float("0.207094") + mean = float("0.0514628") + std = float("0.0402492") + data = None + + +class Program_weight_tensor_parameter_3: + name = "parameter_3" + shape = [192] + dtype = "float32" + min_val = float("0.850872") + max_val = float("1.63127") + mean = float("1.22454") + std = float("0.145326") + data = None + + +class Program_weight_tensor_parameter_4: + name = "parameter_4" + shape = [192] + dtype = "float32" + min_val = float("0.000189735") + max_val = float("0.00828383") + mean = float("0.00133599") + std = float("0.00117081") + data = None + + +class Program_weight_tensor_parameter_5: + name = "parameter_5" + shape = [192] + dtype = "float32" + min_val = float("-0.0753393") + max_val = float("0.0318059") + mean = float("-0.0124341") + std = float("0.0175849") + data = None + + +class Program_weight_tensor_parameter_6: + name = "parameter_6" + shape = [192, 192, 1, 1] + dtype = "float32" + min_val = float("-0.0734168") + max_val = float("0.108512") + mean = float("-0.000444445") + std = float("0.00763924") + data = None + + +class Program_weight_tensor_parameter_7: + name = "parameter_7" + shape = [192] + dtype = "float32" + min_val = float("-0.00613384") + max_val = float("0.00922103") + mean = float("-7.86384e-05") + std = float("0.00341788") + data = None + + +class Program_weight_tensor_parameter_8: + name = "parameter_8" + shape = [192, 192, 1, 1] + dtype = "float32" + min_val = float("-0.00662369") + max_val = float("0.0119822") + mean = float("-0.000127685") + std = float("0.00177164") + data = None + + +class Program_weight_tensor_parameter_9: + name = "parameter_9" + shape = [10] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_10: + name = "parameter_10" + shape = [10, 192, 3, 3] + dtype = "float32" + min_val = float("-0.175474") + max_val = float("0.055091") + mean = float("-0.00147039") + std = float("0.00842565") + data = None + + +class Program_weight_tensor_parameter_11: + name = "parameter_11" + shape = [192] + dtype = "float32" + min_val = float("-0.329693") + max_val = float("0.892228") + mean = float("0.356694") + std = float("0.271228") + data = None + + +class Program_weight_tensor_parameter_12: + name = "parameter_12" + shape = [192] + dtype = "float32" + min_val = float("1.01538") + max_val = float("1.77428") + mean = float("1.31556") + std = float("0.143187") + data = None + + +class Program_weight_tensor_parameter_13: + name = "parameter_13" + shape = [192] + dtype = "float32" + min_val = float("0.000388466") + max_val = float("0.011235") + mean = float("0.00179138") + std = float("0.00163858") + data = None + + +class Program_weight_tensor_parameter_14: + name = "parameter_14" + shape = [192] + dtype = "float32" + min_val = float("-0.16391") + max_val = float("0.127577") + mean = float("-0.00419185") + std = float("0.0390567") + data = None + + +class Program_weight_tensor_parameter_15: + name = "parameter_15" + shape = [192, 192, 1, 1] + dtype = "float32" + min_val = float("-0.0620339") + max_val = float("0.0578527") + mean = float("-0.000609344") + std = float("0.0074149") + data = None + + +class Program_weight_tensor_parameter_16: + name = "parameter_16" + shape = [192] + dtype = "float32" + min_val = float("-0.0053139") + max_val = float("0.0128966") + mean = float("-0.000148839") + std = float("0.00226529") + data = None + + +class Program_weight_tensor_parameter_17: + name = "parameter_17" + shape = [192, 192, 1, 1] + dtype = "float32" + min_val = float("-0.0108365") + max_val = float("0.0180944") + mean = float("-7.70563e-05") + std = float("0.0014898") + data = None + + +class Program_weight_tensor_parameter_18: + name = "parameter_18" + shape = [88] + dtype = "float32" + min_val = float("0.826359") + max_val = float("0.837586") + mean = float("0.828071") + std = float("0.00217956") + data = None + + +class Program_weight_tensor_parameter_19: + name = "parameter_19" + shape = [88, 384, 3, 3] + dtype = "float32" + min_val = float("-0.0854456") + max_val = float("0.0873423") + mean = float("4.34375e-09") + std = float("0.0031191") + data = None + + +class Program_weight_tensor_parameter_20: + name = "parameter_20" + shape = [384] + dtype = "float32" + min_val = float("-0.00526138") + max_val = float("0.0696216") + mean = float("0.0259227") + std = float("0.0132331") + data = None + + +class Program_weight_tensor_parameter_21: + name = "parameter_21" + shape = [384] + dtype = "float32" + min_val = float("0.99865") + max_val = float("1.23747") + mean = float("1.1069") + std = float("0.0410692") + data = None + + +class Program_weight_tensor_parameter_22: + name = "parameter_22" + shape = [384] + dtype = "float32" + min_val = float("8.30341e-05") + max_val = float("0.00596526") + mean = float("0.000721317") + std = float("0.000758143") + data = None + + +class Program_weight_tensor_parameter_23: + name = "parameter_23" + shape = [384] + dtype = "float32" + min_val = float("-0.044286") + max_val = float("0.00828286") + mean = float("-0.00965973") + std = float("0.0093075") + data = None + + +class Program_weight_tensor_parameter_24: + name = "parameter_24" + shape = [384, 384, 1, 1] + dtype = "float32" + min_val = float("-0.0461624") + max_val = float("0.063746") + mean = float("-0.000138651") + std = float("0.00306682") + data = None + + +class Program_weight_tensor_parameter_25: + name = "parameter_25" + shape = [384] + dtype = "float32" + min_val = float("-0.00274369") + max_val = float("0.00511827") + mean = float("6.10625e-05") + std = float("0.00152895") + data = None + + +class Program_weight_tensor_parameter_26: + name = "parameter_26" + shape = [384, 384, 1, 1] + dtype = "float32" + min_val = float("-0.00184752") + max_val = float("0.0048019") + mean = float("3.76045e-06") + std = float("0.000597256") + data = None + + +class Program_weight_tensor_parameter_27: + name = "parameter_27" + shape = [10] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_28: + name = "parameter_28" + shape = [10, 384, 3, 3] + dtype = "float32" + min_val = float("-0.0721721") + max_val = float("0.0404456") + mean = float("-0.000999238") + std = float("0.00397589") + data = None + + +class Program_weight_tensor_parameter_29: + name = "parameter_29" + shape = [384] + dtype = "float32" + min_val = float("-0.152983") + max_val = float("0.452749") + mean = float("0.229344") + std = float("0.100245") + data = None + + +class Program_weight_tensor_parameter_30: + name = "parameter_30" + shape = [384] + dtype = "float32" + min_val = float("1.00417") + max_val = float("1.40261") + mean = float("1.1866") + std = float("0.0603403") + data = None + + +class Program_weight_tensor_parameter_31: + name = "parameter_31" + shape = [384] + dtype = "float32" + min_val = float("0.000150697") + max_val = float("0.00568262") + mean = float("0.000831286") + std = float("0.000745356") + data = None + + +class Program_weight_tensor_parameter_32: + name = "parameter_32" + shape = [384] + dtype = "float32" + min_val = float("-0.0985161") + max_val = float("0.0644014") + mean = float("-0.0146302") + std = float("0.022773") + data = None + + +class Program_weight_tensor_parameter_33: + name = "parameter_33" + shape = [384, 384, 1, 1] + dtype = "float32" + min_val = float("-0.0528263") + max_val = float("0.037782") + mean = float("-0.000246446") + std = float("0.00296888") + data = None + + +class Program_weight_tensor_parameter_34: + name = "parameter_34" + shape = [384] + dtype = "float32" + min_val = float("-0.00198673") + max_val = float("0.0108277") + mean = float("-1.89144e-05") + std = float("0.00104636") + data = None + + +class Program_weight_tensor_parameter_35: + name = "parameter_35" + shape = [384, 384, 1, 1] + dtype = "float32" + min_val = float("-0.00490867") + max_val = float("0.00769719") + mean = float("-1.63033e-05") + std = float("0.00053086") + data = None + + +class Program_weight_tensor_parameter_36: + name = "parameter_36" + shape = [88] + dtype = "float32" + min_val = float("0.827794") + max_val = float("0.828556") + mean = float("0.828072") + std = float("0.000199979") + data = None + + +class Program_weight_tensor_parameter_37: + name = "parameter_37" + shape = [88, 768, 3, 3] + dtype = "float32" + min_val = float("-0.00645056") + max_val = float("0.0120405") + mean = float("4.87489e-10") + std = float("0.000843696") + data = None + + +class Program_weight_tensor_parameter_38: + name = "parameter_38" + shape = [768] + dtype = "float32" + min_val = float("-0.0143031") + max_val = float("0.0478323") + mean = float("0.0113513") + std = float("0.0104705") + data = None + + +class Program_weight_tensor_parameter_39: + name = "parameter_39" + shape = [768] + dtype = "float32" + min_val = float("1.00867") + max_val = float("1.20113") + mean = float("1.06607") + std = float("0.0224781") + data = None + + +class Program_weight_tensor_parameter_40: + name = "parameter_40" + shape = [768] + dtype = "float32" + min_val = float("3.16922e-05") + max_val = float("0.00119994") + mean = float("0.000132026") + std = float("9.51389e-05") + data = None + + +class Program_weight_tensor_parameter_41: + name = "parameter_41" + shape = [768] + dtype = "float32" + min_val = float("-0.0209402") + max_val = float("0.00348513") + mean = float("-0.00472064") + std = float("0.00312486") + data = None + + +class Program_weight_tensor_parameter_42: + name = "parameter_42" + shape = [768, 768, 1, 1] + dtype = "float32" + min_val = float("-0.034299") + max_val = float("0.0337449") + mean = float("-4.66048e-05") + std = float("0.00140342") + data = None + + +class Program_weight_tensor_parameter_43: + name = "parameter_43" + shape = [768] + dtype = "float32" + min_val = float("-0.00390782") + max_val = float("0.00260919") + mean = float("7.12606e-05") + std = float("0.000831057") + data = None + + +class Program_weight_tensor_parameter_44: + name = "parameter_44" + shape = [768, 768, 1, 1] + dtype = "float32" + min_val = float("-0.00261376") + max_val = float("0.00228504") + mean = float("1.77784e-05") + std = float("0.000252276") + data = None + + +class Program_weight_tensor_parameter_45: + name = "parameter_45" + shape = [10] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_46: + name = "parameter_46" + shape = [10, 768, 3, 3] + dtype = "float32" + min_val = float("-0.015813") + max_val = float("0.00965711") + mean = float("-0.000546702") + std = float("0.00140653") + data = None + + +class Program_weight_tensor_parameter_47: + name = "parameter_47" + shape = [768] + dtype = "float32" + min_val = float("-0.110932") + max_val = float("0.199913") + mean = float("0.0934025") + std = float("0.0422427") + data = None + + +class Program_weight_tensor_parameter_48: + name = "parameter_48" + shape = [768] + dtype = "float32" + min_val = float("1.00786") + max_val = float("1.25519") + mean = float("1.07879") + std = float("0.0261974") + data = None + + +class Program_weight_tensor_parameter_49: + name = "parameter_49" + shape = [768] + dtype = "float32" + min_val = float("7.42637e-05") + max_val = float("0.00246904") + mean = float("0.000537048") + std = float("0.000318384") + data = None + + +class Program_weight_tensor_parameter_50: + name = "parameter_50" + shape = [768] + dtype = "float32" + min_val = float("-0.0725538") + max_val = float("0.0495499") + mean = float("-0.0155163") + std = float("0.0149963") + data = None + + +class Program_weight_tensor_parameter_51: + name = "parameter_51" + shape = [768, 768, 1, 1] + dtype = "float32" + min_val = float("-0.0538655") + max_val = float("0.0252785") + mean = float("-0.000146665") + std = float("0.00153735") + data = None + + +class Program_weight_tensor_parameter_52: + name = "parameter_52" + shape = [768] + dtype = "float32" + min_val = float("-0.00117685") + max_val = float("0.00393971") + mean = float("3.40689e-06") + std = float("0.000473697") + data = None + + +class Program_weight_tensor_parameter_53: + name = "parameter_53" + shape = [768, 768, 1, 1] + dtype = "float32" + min_val = float("-0.0124531") + max_val = float("0.02256") + mean = float("2.28217e-06") + std = float("0.00024775") + data = None diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_4/graph_hash.txt b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_4/graph_hash.txt new file mode 100644 index 000000000..0e41bf3ac --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_4/graph_hash.txt @@ -0,0 +1 @@ +56b33ce8013681e5774a98f1aef660acf3db6206cf15004487eb3e77248d98b0 \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_4/graph_net.json b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_4/graph_net.json new file mode 100644 index 000000000..381598f86 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_4/graph_net.json @@ -0,0 +1,6 @@ +{ + "framework": "paddle", + "model_name": "PP-YOLOE_plus_SOD-largesize-L", + "num_devices_required": 1, + "num_nodes_required": 1 +} \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_4/input_meta.py b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_4/input_meta.py new file mode 100644 index 000000000..86f4e8443 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_4/input_meta.py @@ -0,0 +1,7 @@ +class Program_weight_tensor_data_0: + name = "data_0" + shape = [1, 24276] + dtype = "int32" + min_val = 0 + max_val = 10 + data = None diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_4/model.py b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_4/model.py new file mode 100644 index 000000000..24c271baa --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_4/model.py @@ -0,0 +1,34 @@ +import paddle + + +class GraphModule(paddle.nn.Layer): + def __init__(self): + super().__init__() + + def forward(self, data_0): + # pd_op.full: (xi32) <- () + full_0 = paddle._C_ops.full( + [], float("10"), paddle.int32, paddle.framework._current_expected_place() + ) + + # pd_op.not_equal: (1x24276xb) <- (1x24276xi32, xi32) + not_equal_0 = paddle._C_ops.not_equal(data_0, full_0) + del data_0, full_0 + + # pd_op.full_int_array: (0xi64) <- () + full_int_array_0 = [] + + # pd_op.sum: (xi64) <- (1x24276xb, 0xi64) + sum_0 = paddle._C_ops.sum(not_equal_0, full_int_array_0, None, False) + del full_int_array_0 + + # pd_op.full: (xi64) <- () + full_1 = paddle._C_ops.full( + [], float("0"), paddle.int64, paddle.framework._current_expected_place() + ) + + # pd_op.greater_than: (xb) <- (xi64, xi64) + greater_than_0 = paddle._C_ops.greater_than(sum_0, full_1) + del full_1, not_equal_0, sum_0 + + return greater_than_0 diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_4/weight_meta.py b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_4/weight_meta.py new file mode 100644 index 000000000..8b1378917 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_4/weight_meta.py @@ -0,0 +1 @@ + diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_5/graph_hash.txt b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_5/graph_hash.txt new file mode 100644 index 000000000..65c76e393 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_5/graph_hash.txt @@ -0,0 +1 @@ +8f0135cbbe42b981c8797a64fd4a5ae056bd1fb7b99b39eaa845c7b47dd391d9 \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_5/graph_net.json b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_5/graph_net.json new file mode 100644 index 000000000..381598f86 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_5/graph_net.json @@ -0,0 +1,6 @@ +{ + "framework": "paddle", + "model_name": "PP-YOLOE_plus_SOD-largesize-L", + "num_devices_required": 1, + "num_nodes_required": 1 +} \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_5/input_meta.py b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_5/input_meta.py new file mode 100644 index 000000000..7d313afce --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_5/input_meta.py @@ -0,0 +1,174 @@ +class Program_weight_tensor_data_0: + name = "data_0" + shape = [] + dtype = "int64" + data = [49] + + +class Program_weight_tensor_data_1: + name = "data_1" + shape = [1, 48384, 10] + dtype = "float32" + min_val = float("1.08574e-08") + max_val = float("0.85674") + mean = float("0.00225546") + std = float("0.010105") + data = None + + +class Program_weight_tensor_data_2: + name = "data_2" + shape = [1, 48384, 4] + dtype = "float32" + min_val = float("-253.983") + max_val = float("1811.4") + mean = float("768.345") + std = float("449.589") + data = None + + +class Program_weight_tensor_data_3: + name = "data_3" + shape = [48384, 2] + dtype = "float32" + min_val = float("4.0") + max_val = float("1532.0") + mean = float("768.0") + std = float("443.391") + data = None + + +class Program_weight_tensor_data_4: + name = "data_4" + shape = [48384, 1] + dtype = "float32" + min_val = float("8.0") + max_val = float("32.0") + mean = float("10.6667") + std = float("5.70157") + data = None + + +class Program_weight_tensor_data_5: + name = "data_5" + shape = [1, 49, 1] + dtype = "int32" + data = [ + 0, + 0, + 3, + 8, + 3, + 3, + 3, + 3, + 3, + 3, + 3, + 3, + 4, + 4, + 3, + 3, + 3, + 4, + 4, + 8, + 3, + 3, + 3, + 3, + 8, + 8, + 8, + 8, + 8, + 8, + 4, + 8, + 8, + 8, + 8, + 8, + 5, + 0, + 8, + 8, + 8, + 8, + 8, + 3, + 3, + 8, + 8, + 0, + 4, + ] + + +class Program_weight_tensor_data_6: + name = "data_6" + shape = [1, 49, 4] + dtype = "float32" + min_val = float("830.542") + max_val = float("1214.81") + mean = float("996.371") + std = float("76.2156") + data = None + + +class Program_weight_tensor_data_7: + name = "data_7" + shape = [1, 49, 1] + dtype = "float32" + data = [ + 1.0, + 1.0, + 1.0, + 1.0, + 1.0, + 1.0, + 1.0, + 1.0, + 1.0, + 1.0, + 1.0, + 1.0, + 1.0, + 1.0, + 1.0, + 1.0, + 1.0, + 1.0, + 1.0, + 1.0, + 1.0, + 1.0, + 1.0, + 1.0, + 1.0, + 1.0, + 1.0, + 1.0, + 1.0, + 1.0, + 1.0, + 1.0, + 1.0, + 1.0, + 1.0, + 1.0, + 1.0, + 1.0, + 1.0, + 1.0, + 1.0, + 1.0, + 1.0, + 1.0, + 1.0, + 1.0, + 1.0, + 1.0, + 1.0, + ] diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_5/model.py b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_5/model.py new file mode 100644 index 000000000..7403c5b74 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_5/model.py @@ -0,0 +1,546 @@ +import paddle + + +class GraphModule(paddle.nn.Layer): + def __init__(self): + super().__init__() + + def forward(self, data_0, data_1, data_2, data_3, data_4, data_5, data_6, data_7): + # pd_op.full: (xi64) <- () + full_0 = paddle._C_ops.full( + [], float("0"), paddle.int64, paddle.framework._current_expected_place() + ) + + # pd_op.equal: (xb) <- (xi64, xi64) + equal_0 = paddle._C_ops.equal(data_0, full_0) + + # pd_op.cast: (xi64) <- (xb) + cast_0 = paddle._C_ops.cast(equal_0, paddle.int64) + del equal_0 + + # pd_op.not_equal: (xb) <- (xi64, xi64) + not_equal_0 = paddle._C_ops.not_equal(cast_0, full_0) + del cast_0 + + # pd_op.cast: (xi64) <- (xb) + cast_1 = paddle._C_ops.cast(not_equal_0, paddle.int64) + del not_equal_0 + + # pd_op.equal: (xb) <- (xi64, xi64) + equal_1 = paddle._C_ops.equal(cast_1, full_0) + del cast_1, full_0 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_0 = [2] + + # pd_op.unsqueeze: (1x-1x1x4xf32) <- (1x-1x4xf32, 1xi64) + unsqueeze_0 = paddle._C_ops.unsqueeze(data_6, full_int_array_0) + del data_6 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_1 = [1] + + # pd_op.unsqueeze: (1x1x-1x4xf32) <- (1x-1x4xf32, 1xi64) + unsqueeze_1 = paddle._C_ops.unsqueeze(data_2, full_int_array_1) + del data_2 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_2 = [0] + + # pd_op.slice: (1x-1x1x2xf32) <- (1x-1x1x4xf32, 1xi64, 1xi64) + slice_0 = paddle._C_ops.slice( + unsqueeze_0, [3], full_int_array_2, full_int_array_0, [1], [] + ) + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_3 = [2147483647] + + # pd_op.slice: (1x-1x1x2xf32) <- (1x-1x1x4xf32, 1xi64, 1xi64) + slice_1 = paddle._C_ops.slice( + unsqueeze_0, [3], full_int_array_0, full_int_array_3, [1], [] + ) + + # pd_op.slice: (1x1x-1x2xf32) <- (1x1x-1x4xf32, 1xi64, 1xi64) + slice_2 = paddle._C_ops.slice( + unsqueeze_1, [3], full_int_array_2, full_int_array_0, [1], [] + ) + del full_int_array_2 + + # pd_op.slice: (1x1x-1x2xf32) <- (1x1x-1x4xf32, 1xi64, 1xi64) + slice_3 = paddle._C_ops.slice( + unsqueeze_1, [3], full_int_array_0, full_int_array_3, [1], [] + ) + del full_int_array_3, unsqueeze_1 + + # pd_op.maximum: (1x-1x-1x2xf32) <- (1x-1x1x2xf32, 1x1x-1x2xf32) + maximum_0 = paddle._C_ops.maximum(slice_0, slice_2) + + # pd_op.minimum: (1x-1x-1x2xf32) <- (1x-1x1x2xf32, 1x1x-1x2xf32) + minimum_0 = paddle._C_ops.minimum(slice_1, slice_3) + + # pd_op.subtract: (1x-1x-1x2xf32) <- (1x-1x-1x2xf32, 1x-1x-1x2xf32) + subtract_0 = paddle._C_ops.subtract(minimum_0, maximum_0) + del maximum_0, minimum_0 + + # pd_op.full: (1xf32) <- () + full_1 = paddle._C_ops.full( + [1], float("0"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.full: (1xf32) <- () + full_2 = paddle._C_ops.full( + [1], float("3.40282e+38"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.clip: (1x-1x-1x2xf32) <- (1x-1x-1x2xf32, 1xf32, 1xf32) + clip_0 = paddle._C_ops.clip(subtract_0, full_1, full_2) + del subtract_0 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_4 = [-1] + + # pd_op.prod: (1x-1x-1xf32) <- (1x-1x-1x2xf32, 1xi64) + prod_0 = paddle._C_ops.prod(clip_0, full_int_array_4, False, False) + del clip_0 + + # pd_op.subtract: (1x-1x1x2xf32) <- (1x-1x1x2xf32, 1x-1x1x2xf32) + subtract_1 = paddle._C_ops.subtract(slice_1, slice_0) + del slice_0, slice_1 + + # pd_op.clip: (1x-1x1x2xf32) <- (1x-1x1x2xf32, 1xf32, 1xf32) + clip_1 = paddle._C_ops.clip(subtract_1, full_1, full_2) + del subtract_1 + + # pd_op.prod: (1x-1x1xf32) <- (1x-1x1x2xf32, 1xi64) + prod_1 = paddle._C_ops.prod(clip_1, full_int_array_4, False, False) + del clip_1 + + # pd_op.subtract: (1x1x-1x2xf32) <- (1x1x-1x2xf32, 1x1x-1x2xf32) + subtract_2 = paddle._C_ops.subtract(slice_3, slice_2) + del slice_2, slice_3 + + # pd_op.clip: (1x1x-1x2xf32) <- (1x1x-1x2xf32, 1xf32, 1xf32) + clip_2 = paddle._C_ops.clip(subtract_2, full_1, full_2) + del full_2, subtract_2 + + # pd_op.prod: (1x1x-1xf32) <- (1x1x-1x2xf32, 1xi64) + prod_2 = paddle._C_ops.prod(clip_2, full_int_array_4, False, False) + del clip_2 + + # pd_op.add: (1x-1x-1xf32) <- (1x-1x1xf32, 1x1x-1xf32) + add_0 = paddle._C_ops.add(prod_1, prod_2) + del prod_1, prod_2 + + # pd_op.subtract: (1x-1x-1xf32) <- (1x-1x-1xf32, 1x-1x-1xf32) + subtract_3 = paddle._C_ops.subtract(add_0, prod_0) + del add_0 + + # pd_op.full: (1xf32) <- () + full_3 = paddle._C_ops.full( + [1], float("1"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.scale: (1x-1x-1xf32) <- (1x-1x-1xf32, 1xf32) + scale_0 = paddle._C_ops.scale(subtract_3, full_3, float("1e-09"), True) + del subtract_3 + + # pd_op.divide: (1x-1x-1xf32) <- (1x-1x-1xf32, 1x-1x-1xf32) + divide_0 = paddle._C_ops.divide(prod_0, scale_0) + del prod_0, scale_0 + + # pd_op.transpose: (1x10x-1xf32) <- (1x-1x10xf32) + transpose_0 = paddle._C_ops.transpose(data_1, [0, 2, 1]) + del data_1 + + # pd_op.full: (1xf64) <- () + full_4 = paddle._C_ops.full( + [1], float("0"), paddle.float64, paddle.core.CPUPlace() + ) + + # pd_op.full: (1xf64) <- () + full_5 = paddle._C_ops.full( + [1], float("1"), paddle.float64, paddle.core.CPUPlace() + ) + + # pd_op.arange: (1xi32) <- (1xf64, 1xf64, 1xf64) + arange_0 = paddle.arange(full_4, full_5, full_5, dtype="int32") + del full_4, full_5 + + # pd_op.unsqueeze: (1x1xi32) <- (1xi32, 1xi64) + unsqueeze_2 = paddle._C_ops.unsqueeze(arange_0, full_int_array_4) + del arange_0 + + # pd_op.full: (xi64) <- () + full_6 = paddle._C_ops.full( + [], float("1"), paddle.int64, paddle.core.CPUPlace() + ) + + # builtin.combine: ([xi64, xi64]) <- (xi64, xi64) + combine_0 = [full_6, data_0] + del data_0, full_6 + + # pd_op.stack: (2xi64) <- ([xi64, xi64]) + stack_0 = paddle._C_ops.stack(combine_0, 0) + del combine_0 + + # pd_op.tile: (1x-1xi32) <- (1x1xi32, 2xi64) + tile_0 = paddle._C_ops.tile(unsqueeze_2, stack_0) + del stack_0 + + # pd_op.squeeze: (1x-1xi32) <- (1x-1x1xi32, 1xi64) + squeeze_0 = paddle._C_ops.squeeze(data_5, full_int_array_4) + del data_5 + + # builtin.combine: ([1x-1xi32, 1x-1xi32]) <- (1x-1xi32, 1x-1xi32) + combine_1 = [tile_0, squeeze_0] + del squeeze_0, tile_0 + + # pd_op.stack: (1x-1x2xi32) <- ([1x-1xi32, 1x-1xi32]) + stack_1 = paddle._C_ops.stack(combine_1, -1) + del combine_1 + + # pd_op.gather_nd: (1x-1x-1xf32) <- (1x10x-1xf32, 1x-1x2xi32) + gather_nd_0 = paddle._C_ops.gather_nd(transpose_0, stack_1) + del stack_1, transpose_0 + + # pd_op.pow: (1x-1x-1xf32) <- (1x-1x-1xf32) + pow_0 = paddle._C_ops.pow(gather_nd_0, float("1")) + del gather_nd_0 + + # pd_op.pow: (1x-1x-1xf32) <- (1x-1x-1xf32) + pow_1 = paddle._C_ops.pow(divide_0, float("6")) + + # pd_op.multiply: (1x-1x-1xf32) <- (1x-1x-1xf32, 1x-1x-1xf32) + multiply_0 = paddle._C_ops.multiply(pow_0, pow_1) + del pow_0, pow_1 + + # pd_op.multiply: (1x-1x-1xf32) <- (1x-1x-1xf32, 1x-1x1xf32) + multiply_1 = paddle._C_ops.multiply(multiply_0, data_7) + del multiply_0 + + # pd_op.scale: (-1x1xf32) <- (-1x1xf32, 1xf32) + scale_1 = paddle._C_ops.scale(data_4, full_3, float("0"), True) + del data_4, full_3 + + # pd_op.full_int_array: (2xi64) <- () + full_int_array_5 = [0, 1] + + # pd_op.unsqueeze: (1x1x-1x2xf32) <- (-1x2xf32, 2xi64) + unsqueeze_3 = paddle._C_ops.unsqueeze(data_3, full_int_array_5) + del data_3 + + # pd_op.full: (1xi32) <- () + full_7 = paddle._C_ops.full( + [1], float("3"), paddle.int32, paddle.core.CPUPlace() + ) + + # pd_op.split_with_num: ([1x1x-1x1xf32, 1x1x-1x1xf32]) <- (1x1x-1x2xf32, 1xi32) + split_with_num_0 = paddle._C_ops.split_with_num(unsqueeze_3, 2, full_7) + del unsqueeze_3 + + # builtin.split: (1x1x-1x1xf32, 1x1x-1x1xf32) <- ([1x1x-1x1xf32, 1x1x-1x1xf32]) + ( + split_0, + split_1, + ) = split_with_num_0 + del split_with_num_0 + + # pd_op.split_with_num: ([1x-1x1x1xf32, 1x-1x1x1xf32, 1x-1x1x1xf32, 1x-1x1x1xf32]) <- (1x-1x1x4xf32, 1xi32) + split_with_num_1 = paddle._C_ops.split_with_num(unsqueeze_0, 4, full_7) + del full_7, unsqueeze_0 + + # builtin.split: (1x-1x1x1xf32, 1x-1x1x1xf32, 1x-1x1x1xf32, 1x-1x1x1xf32) <- ([1x-1x1x1xf32, 1x-1x1x1xf32, 1x-1x1x1xf32, 1x-1x1x1xf32]) + ( + split_2, + split_3, + split_4, + split_5, + ) = split_with_num_1 + del split_with_num_1 + + # pd_op.subtract: (1x-1x-1x1xf32) <- (1x1x-1x1xf32, 1x-1x1x1xf32) + subtract_4 = paddle._C_ops.subtract(split_0, split_2) + + # pd_op.subtract: (1x-1x-1x1xf32) <- (1x1x-1x1xf32, 1x-1x1x1xf32) + subtract_5 = paddle._C_ops.subtract(split_1, split_3) + + # pd_op.subtract: (1x-1x-1x1xf32) <- (1x-1x1x1xf32, 1x1x-1x1xf32) + subtract_6 = paddle._C_ops.subtract(split_4, split_0) + + # pd_op.subtract: (1x-1x-1x1xf32) <- (1x-1x1x1xf32, 1x1x-1x1xf32) + subtract_7 = paddle._C_ops.subtract(split_5, split_1) + + # pd_op.full: (1xi32) <- () + full_8 = paddle._C_ops.full( + [1], float("-1"), paddle.int32, paddle.core.CPUPlace() + ) + + # builtin.combine: ([1x-1x-1x1xf32, 1x-1x-1x1xf32, 1x-1x-1x1xf32, 1x-1x-1x1xf32]) <- (1x-1x-1x1xf32, 1x-1x-1x1xf32, 1x-1x-1x1xf32, 1x-1x-1x1xf32) + combine_2 = [subtract_4, subtract_5, subtract_6, subtract_7] + del subtract_4, subtract_5, subtract_6, subtract_7 + + # pd_op.concat: (1x-1x-1x4xf32) <- ([1x-1x-1x1xf32, 1x-1x-1x1xf32, 1x-1x-1x1xf32, 1x-1x-1x1xf32], 1xi32) + concat_0 = paddle._C_ops.concat(combine_2, full_8) + del combine_2 + + # pd_op.min: (1x-1x-1xf32) <- (1x-1x-1x4xf32, 1xi64) + min_0 = paddle._C_ops.min(concat_0, full_int_array_4, False) + del concat_0 + + # pd_op.full: (xf32) <- () + full_9 = paddle._C_ops.full( + [], + float("1e-09"), + paddle.float32, + paddle.framework._current_expected_place(), + ) + + # pd_op.greater_than: (1x-1x-1xb) <- (1x-1x-1xf32, xf32) + greater_than_1 = paddle._C_ops.greater_than(min_0, full_9) + del min_0 + + # pd_op.unsqueeze: (1x1x-1x1xf32) <- (-1x1xf32, 2xi64) + unsqueeze_4 = paddle._C_ops.unsqueeze(scale_1, full_int_array_5) + del full_int_array_5, scale_1 + + # pd_op.add: (1x-1x1x1xf32) <- (1x-1x1x1xf32, 1x-1x1x1xf32) + add_1 = paddle._C_ops.add(split_2, split_4) + del split_2, split_4 + + # pd_op.full: (1xf32) <- () + full_10 = paddle._C_ops.full( + [1], float("0.5"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.scale: (1x-1x1x1xf32) <- (1x-1x1x1xf32, 1xf32) + scale_2 = paddle._C_ops.scale(add_1, full_10, float("0"), True) + del add_1 + + # pd_op.add: (1x-1x1x1xf32) <- (1x-1x1x1xf32, 1x-1x1x1xf32) + add_2 = paddle._C_ops.add(split_3, split_5) + del split_3, split_5 + + # pd_op.scale: (1x-1x1x1xf32) <- (1x-1x1x1xf32, 1xf32) + scale_3 = paddle._C_ops.scale(add_2, full_10, float("0"), True) + del add_2, full_10 + + # pd_op.subtract: (1x-1x-1x1xf32) <- (1x-1x1x1xf32, 1x1x-1x1xf32) + subtract_8 = paddle._C_ops.subtract(scale_2, unsqueeze_4) + + # pd_op.subtract: (1x-1x-1x1xf32) <- (1x1x-1x1xf32, 1x-1x-1x1xf32) + subtract_9 = paddle._C_ops.subtract(split_0, subtract_8) + del subtract_8 + + # pd_op.subtract: (1x-1x-1x1xf32) <- (1x-1x1x1xf32, 1x1x-1x1xf32) + subtract_10 = paddle._C_ops.subtract(scale_3, unsqueeze_4) + + # pd_op.subtract: (1x-1x-1x1xf32) <- (1x1x-1x1xf32, 1x-1x-1x1xf32) + subtract_11 = paddle._C_ops.subtract(split_1, subtract_10) + del subtract_10 + + # pd_op.add: (1x-1x-1x1xf32) <- (1x-1x1x1xf32, 1x1x-1x1xf32) + add_3 = paddle._C_ops.add(scale_2, unsqueeze_4) + del scale_2 + + # pd_op.subtract: (1x-1x-1x1xf32) <- (1x-1x-1x1xf32, 1x1x-1x1xf32) + subtract_12 = paddle._C_ops.subtract(add_3, split_0) + del add_3, split_0 + + # pd_op.add: (1x-1x-1x1xf32) <- (1x-1x1x1xf32, 1x1x-1x1xf32) + add_4 = paddle._C_ops.add(scale_3, unsqueeze_4) + del scale_3, unsqueeze_4 + + # pd_op.subtract: (1x-1x-1x1xf32) <- (1x-1x-1x1xf32, 1x1x-1x1xf32) + subtract_13 = paddle._C_ops.subtract(add_4, split_1) + del add_4, split_1 + + # builtin.combine: ([1x-1x-1x1xf32, 1x-1x-1x1xf32, 1x-1x-1x1xf32, 1x-1x-1x1xf32]) <- (1x-1x-1x1xf32, 1x-1x-1x1xf32, 1x-1x-1x1xf32, 1x-1x-1x1xf32) + combine_3 = [subtract_9, subtract_11, subtract_12, subtract_13] + del subtract_11, subtract_12, subtract_13, subtract_9 + + # pd_op.concat: (1x-1x-1x4xf32) <- ([1x-1x-1x1xf32, 1x-1x-1x1xf32, 1x-1x-1x1xf32, 1x-1x-1x1xf32], 1xi32) + concat_1 = paddle._C_ops.concat(combine_3, full_8) + del combine_3, full_8 + + # pd_op.min: (1x-1x-1xf32) <- (1x-1x-1x4xf32, 1xi64) + min_1 = paddle._C_ops.min(concat_1, full_int_array_4, False) + del concat_1 + + # pd_op.greater_than: (1x-1x-1xb) <- (1x-1x-1xf32, xf32) + greater_than_2 = paddle._C_ops.greater_than(min_1, full_9) + del full_9, min_1 + + # pd_op.cast: (1x-1x-1xf32) <- (1x-1x-1xb) + cast_2 = paddle._C_ops.cast(greater_than_1, paddle.float32) + del greater_than_1 + + # pd_op.cast: (1x-1x-1xf32) <- (1x-1x-1xb) + cast_3 = paddle._C_ops.cast(greater_than_2, paddle.float32) + del greater_than_2 + + # pd_op.multiply: (1x-1x-1xf32) <- (1x-1x-1xf32, 1x-1x1xf32) + multiply_2 = paddle._C_ops.multiply(cast_2, data_7) + del cast_2 + + # pd_op.multiply: (1x-1x-1xf32) <- (1x-1x-1xf32, 1x-1x1xf32) + multiply_3 = paddle._C_ops.multiply(cast_3, data_7) + del cast_3 + + # pd_op.sum: (1x-1x1xf32) <- (1x-1x-1xf32, 1xi64) + sum_0 = paddle._C_ops.sum(multiply_2, full_int_array_4, None, True) + del full_int_array_4 + + # pd_op.full: (xf32) <- () + full_11 = paddle._C_ops.full( + [], float("0"), paddle.float32, paddle.framework._current_expected_place() + ) + + # pd_op.equal: (1x-1x1xb) <- (1x-1x1xf32, xf32) + equal_2 = paddle._C_ops.equal(sum_0, full_11) + del sum_0 + + # pd_op.add: (1x-1x-1xf32) <- (1x-1x-1xf32, 1x-1x-1xf32) + add_5 = paddle._C_ops.add(multiply_1, multiply_3) + + # pd_op.full_like: (1x-1x-1xf32) <- (1x-1x-1xf32, 1xf32) + full_like_0 = paddle._C_ops.full_like( + add_5, full_1, paddle.float32, paddle.framework._current_expected_place() + ) + + # pd_op.full_like: (1x-1x-1xf32) <- (1x-1x-1xf32, 1xf32) + full_like_1 = paddle._C_ops.full_like( + multiply_1, + full_1, + paddle.float32, + paddle.framework._current_expected_place(), + ) + + # pd_op.full_like: (1x-1x1xb) <- (1x-1x1xb, 1xf32) + full_like_2 = paddle._C_ops.full_like( + equal_2, full_1, paddle.bool, paddle.framework._current_expected_place() + ) + del full_1 + + # pd_op.cast: (1x-1x1xf32) <- (1x-1x1xb) + cast_4 = paddle._C_ops.cast(full_like_2, paddle.float32) + del full_like_2 + + # pd_op.cast: (1x-1x1xf32) <- (1x-1x1xb) + cast_5 = paddle._C_ops.cast(equal_2, paddle.float32) + del equal_2 + + # pd_op.add: (1x-1x-1xf32) <- (1x-1x-1xf32, 1x-1x-1xf32) + add_6 = paddle._C_ops.add(full_like_0, full_like_1) + del full_like_0, full_like_1 + + # pd_op.add: (1x-1x-1xf32) <- (1x-1x-1xf32, 1x-1x1xf32) + add_7 = paddle._C_ops.add(add_6, cast_4) + del add_6, cast_4 + + # pd_op.add: (1x-1x-1xf32) <- (1x-1x-1xf32, 1x-1x-1xf32) + add_8 = paddle._C_ops.add(add_5, add_7) + del add_5 + + # pd_op.add: (1x-1x-1xf32) <- (1x-1x-1xf32, 1x-1x-1xf32) + add_9 = paddle._C_ops.add(multiply_1, add_7) + + # pd_op.add: (1x-1x-1xf32) <- (1x-1x1xf32, 1x-1x-1xf32) + add_10 = paddle._C_ops.add(cast_5, add_7) + del add_7, cast_5 + + # pd_op.cast: (1x-1x-1xb) <- (1x-1x-1xf32) + cast_6 = paddle._C_ops.cast(add_10, paddle.bool) + del add_10 + + # pd_op.where: (1x-1x-1xf32) <- (1x-1x-1xb, 1x-1x-1xf32, 1x-1x-1xf32) + where_0 = paddle._C_ops.where(cast_6, add_8, add_9) + del add_8, add_9, cast_6 + + # pd_op.shape64: (3xi64) <- (1x-1x-1xf32) + shape64_0 = paddle._C_ops.shape64(where_0) + + # pd_op.slice: (xi64) <- (3xi64, 1xi64, 1xi64) + slice_4 = paddle._C_ops.slice( + shape64_0, [0], full_int_array_1, full_int_array_0, [1], [0] + ) + del full_int_array_1 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_6 = [3] + + # pd_op.slice: (xi64) <- (3xi64, 1xi64, 1xi64) + slice_5 = paddle._C_ops.slice( + shape64_0, [0], full_int_array_0, full_int_array_6, [1], [0] + ) + del full_int_array_0, full_int_array_6, shape64_0 + + # pd_op.full: (1xi32) <- () + full_12 = paddle._C_ops.full( + [1], float("13"), paddle.int32, paddle.core.CPUPlace() + ) + + # pd_op.topk: (1x-1x13xf32, 1x-1x13xi64) <- (1x-1x-1xf32, 1xi32) + topk_0, topk_1 = (lambda x, f: f(x))( + paddle._C_ops.topk(where_0, full_12, -1, True, True), + lambda out: out if isinstance(out, (list, tuple)) else (out, None), + ) + del full_12, where_0 + + # pd_op.one_hot: (1x-1x13x-1xf32) <- (1x-1x13xi64, xi64) + one_hot_0 = paddle._C_ops.one_hot( + topk_1 % paddle.cast(slice_5, topk_1.dtype), slice_5 + ) + del slice_5, topk_1 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_7 = [-2] + + # pd_op.sum: (1x-1x-1xf32) <- (1x-1x13x-1xf32, 1xi64) + sum_1 = paddle._C_ops.sum(one_hot_0, full_int_array_7, None, False) + del one_hot_0 + + # pd_op.multiply: (1x-1x-1xf32) <- (1x-1x-1xf32, 1x-1x1xf32) + multiply_4 = paddle._C_ops.multiply(sum_1, data_7) + del data_7, sum_1 + + # pd_op.greater_than: (1x-1x-1xb) <- (1x-1x-1xf32, xf32) + greater_than_3 = paddle._C_ops.greater_than(multiply_3, full_11) + del multiply_3 + + # pd_op.greater_than: (1x-1x-1xb) <- (1x-1x-1xf32, xf32) + greater_than_4 = paddle._C_ops.greater_than(multiply_2, full_11) + del full_11, multiply_2 + + # pd_op.bitwise_or: (1x-1x-1xb) <- (1x-1x-1xb, 1x-1x-1xb) + bitwise_or_0 = paddle._C_ops.bitwise_or(greater_than_3, greater_than_4) + del greater_than_3, greater_than_4 + + # pd_op.cast: (1x-1x-1xf32) <- (1x-1x-1xb) + cast_7 = paddle._C_ops.cast(bitwise_or_0, paddle.float32) + del bitwise_or_0 + + # pd_op.multiply: (1x-1x-1xf32) <- (1x-1x-1xf32, 1x-1x-1xf32) + multiply_5 = paddle._C_ops.multiply(multiply_4, cast_7) + del cast_7, multiply_4 + + # pd_op.sum: (1x-1xf32) <- (1x-1x-1xf32, 1xi64) + sum_2 = paddle._C_ops.sum(multiply_5, full_int_array_7, None, False) + del full_int_array_7 + + # pd_op.full_int_array: (0xi64) <- () + full_int_array_8 = [] + + # pd_op.max: (xf32) <- (1x-1xf32, 0xi64) + max_0 = paddle._C_ops.max(sum_2, full_int_array_8, False) + del full_int_array_8 + + # pd_op.full: (xf32) <- () + full_13 = paddle._C_ops.full( + [], float("1"), paddle.float32, paddle.framework._current_expected_place() + ) + + # pd_op.greater_than: (xb) <- (xf32, xf32) + greater_than_0 = paddle._C_ops.greater_than(max_0, full_13) + del divide_0, full_13, max_0, multiply_1, multiply_5, sum_2, unsqueeze_2 + + return greater_than_0 diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_5/weight_meta.py b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_5/weight_meta.py new file mode 100644 index 000000000..8b1378917 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_5/weight_meta.py @@ -0,0 +1 @@ + diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_6/graph_hash.txt b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_6/graph_hash.txt new file mode 100644 index 000000000..67621f064 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_6/graph_hash.txt @@ -0,0 +1 @@ +17ecceda6c26b3eed13c84f7e0875457f7c0022056dc13a8ed61c7b6e215b286 \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_6/graph_net.json b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_6/graph_net.json new file mode 100644 index 000000000..381598f86 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_6/graph_net.json @@ -0,0 +1,6 @@ +{ + "framework": "paddle", + "model_name": "PP-YOLOE_plus_SOD-largesize-L", + "num_devices_required": 1, + "num_nodes_required": 1 +} \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_6/input_meta.py b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_6/input_meta.py new file mode 100644 index 000000000..56cfb4b20 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_6/input_meta.py @@ -0,0 +1,127 @@ +class Program_weight_tensor_data_0: + name = "data_0" + shape = [] + dtype = "int64" + data = [49] + + +class Program_weight_tensor_data_1: + name = "data_1" + shape = [] + dtype = "int64" + data = [48384] + + +class Program_weight_tensor_data_2: + name = "data_2" + shape = [1, 48384] + dtype = "float32" + max_val = float("8.0") + mean = float("0.00859788") + std = float("0.140155") + data = None + + +class Program_weight_tensor_data_3: + name = "data_3" + shape = [1, 49, 48384] + dtype = "float32" + max_val = float("0.949472") + mean = float("8.67853e-05") + std = float("0.0068947") + data = None + + +class Program_weight_tensor_data_4: + name = "data_4" + shape = [1, 49, 48384] + dtype = "float32" + max_val = float("1.0") + mean = float("0.000175467") + std = float("0.0132452") + data = None + + +class Program_weight_tensor_data_5: + name = "data_5" + shape = [1, 1] + dtype = "int32" + data = [0] + + +class Program_weight_tensor_data_6: + name = "data_6" + shape = [1, 49, 1] + dtype = "int32" + data = [ + 0, + 0, + 3, + 8, + 3, + 3, + 3, + 3, + 3, + 3, + 3, + 3, + 4, + 4, + 3, + 3, + 3, + 4, + 4, + 8, + 3, + 3, + 3, + 3, + 8, + 8, + 8, + 8, + 8, + 8, + 4, + 8, + 8, + 8, + 8, + 8, + 5, + 0, + 8, + 8, + 8, + 8, + 8, + 3, + 3, + 8, + 8, + 0, + 4, + ] + + +class Program_weight_tensor_data_7: + name = "data_7" + shape = [1, 49, 4] + dtype = "float32" + min_val = float("830.542") + max_val = float("1214.81") + mean = float("996.371") + std = float("76.2156") + data = None + + +class Program_weight_tensor_data_8: + name = "data_8" + shape = [1, 49, 48384] + dtype = "float32" + max_val = float("0.514231") + mean = float("4.88935e-06") + std = float("0.000998645") + data = None diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_6/model.py b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_6/model.py new file mode 100644 index 000000000..54f7a46c8 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_6/model.py @@ -0,0 +1,284 @@ +import paddle + + +class GraphModule(paddle.nn.Layer): + def __init__(self): + super().__init__() + + def forward( + self, data_0, data_1, data_2, data_3, data_4, data_5, data_6, data_7, data_8 + ): + # pd_op.full_int_array: (1xi64) <- () + full_int_array_0 = [1] + + # pd_op.unsqueeze: (1x1x-1xf32) <- (1x-1xf32, 1xi64) + unsqueeze_0 = paddle._C_ops.unsqueeze(data_2, full_int_array_0) + del data_2 + + # pd_op.full: (xf32) <- () + full_0 = paddle._C_ops.full( + [], float("1"), paddle.float32, paddle.framework._current_expected_place() + ) + + # pd_op.greater_than: (1x1x-1xb) <- (1x1x-1xf32, xf32) + greater_than_0 = paddle._C_ops.greater_than(unsqueeze_0, full_0) + del full_0, unsqueeze_0 + + # pd_op.full: (xi64) <- () + full_1 = paddle._C_ops.full( + [], float("1"), paddle.int64, paddle.core.CPUPlace() + ) + + # builtin.combine: ([xi64, xi64, xi64]) <- (xi64, xi64, xi64) + combine_0 = [full_1, data_0, full_1] + + # pd_op.stack: (3xi64) <- ([xi64, xi64, xi64]) + stack_0 = paddle._C_ops.stack(combine_0, 0) + del combine_0 + + # pd_op.tile: (1x-1x-1xb) <- (1x1x-1xb, 3xi64) + tile_0 = paddle._C_ops.tile(greater_than_0, stack_0) + del greater_than_0, stack_0 + + # pd_op.multiply: (1x-1x-1xf32) <- (1x-1x-1xf32, 1x-1x-1xf32) + multiply_1 = paddle._C_ops.multiply(data_3, data_4) + + # pd_op.shape64: (3xi64) <- (1x-1x-1xf32) + shape64_0 = paddle._C_ops.shape64(multiply_1) + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_1 = [2] + + # pd_op.slice: (xi64) <- (3xi64, 1xi64, 1xi64) + slice_0 = paddle._C_ops.slice( + shape64_0, [0], full_int_array_0, full_int_array_1, [1], [0] + ) + del full_int_array_0 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_2 = [3] + + # pd_op.slice: (xi64) <- (3xi64, 1xi64, 1xi64) + slice_1 = paddle._C_ops.slice( + shape64_0, [0], full_int_array_1, full_int_array_2, [1], [0] + ) + del full_int_array_1, full_int_array_2, shape64_0 + + # pd_op.full: (1xi64) <- () + full_2 = paddle._C_ops.full( + [1], float("-2"), paddle.int64, paddle.core.CPUPlace() + ) + + # pd_op.argmax: (1x-1xi64) <- (1x-1x-1xf32, 1xi64) + argmax_0 = paddle._C_ops.argmax(multiply_1, full_2, False, False, paddle.int64) + del multiply_1 + + # pd_op.one_hot: (1x-1x-1xf32) <- (1x-1xi64, xi64) + one_hot_0 = paddle._C_ops.one_hot( + argmax_0 % paddle.cast(slice_0, argmax_0.dtype), slice_0 + ) + del argmax_0, slice_0 + + # pd_op.transpose: (1x-1x-1xf32) <- (1x-1x-1xf32) + transpose_0 = paddle._C_ops.transpose(one_hot_0, [0, 2, 1]) + del one_hot_0 + + # pd_op.where: (1x-1x-1xf32) <- (1x-1x-1xb, 1x-1x-1xf32, 1x-1x-1xf32) + where_0 = paddle._C_ops.where(tile_0, transpose_0, data_4) + del data_4, tile_0, transpose_0 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_3 = [-2] + + # pd_op.sum: (1x-1xf32) <- (1x-1x-1xf32, 1xi64) + sum_0 = paddle._C_ops.sum(where_0, full_int_array_3, None, False) + + # pd_op.argmax: (1x-1xi64) <- (1x-1x-1xf32, 1xi64) + argmax_1 = paddle._C_ops.argmax(where_0, full_2, False, False, paddle.int64) + del full_2 + + # pd_op.cast: (xi32) <- (xi64) + cast_0 = paddle._C_ops.cast(data_0, paddle.int32) + del data_0 + + # pd_op.multiply: (1x1xi32) <- (1x1xi32, xi32) + multiply_2 = paddle._C_ops.multiply(data_5, cast_0) + del cast_0, data_5 + + # pd_op.cast: (1x1xi64) <- (1x1xi32) + cast_1 = paddle._C_ops.cast(multiply_2, paddle.int64) + del multiply_2 + + # pd_op.add: (1x-1xi64) <- (1x-1xi64, 1x1xi64) + add_0 = paddle._C_ops.add(argmax_1, cast_1) + del argmax_1, cast_1 + + # pd_op.flatten: (-1xi32) <- (1x-1x1xi32) + flatten_0 = paddle._C_ops.flatten(data_6, 0, 2) + del data_6 + + # pd_op.flatten: (-1xi64) <- (1x-1xi64) + flatten_1 = paddle._C_ops.flatten(add_0, 0, 1) + del add_0 + + # pd_op.full: (1xi32) <- () + full_3 = paddle._C_ops.full( + [1], float("0"), paddle.int32, paddle.core.CPUPlace() + ) + + # pd_op.gather: (-1xi32) <- (-1xi32, -1xi64, 1xi32) + gather_0 = paddle._C_ops.gather(flatten_0, flatten_1, full_3) + del flatten_0 + + # builtin.combine: ([xi64, xi64]) <- (xi64, xi64) + combine_1 = [full_1, data_1] + + # pd_op.stack: (2xi64) <- ([xi64, xi64]) + stack_1 = paddle._C_ops.stack(combine_1, 0) + del combine_1 + + # pd_op.reshape: (1x-1xi32) <- (-1xi32, 2xi64) + reshape_1 = paddle._C_ops.reshape(gather_0, stack_1) + del gather_0, stack_1 + + # pd_op.full: (xf32) <- () + full_4 = paddle._C_ops.full( + [], float("0"), paddle.float32, paddle.framework._current_expected_place() + ) + + # pd_op.greater_than: (1x-1xb) <- (1x-1xf32, xf32) + greater_than_1 = paddle._C_ops.greater_than(sum_0, full_4) + del full_4, sum_0 + + # pd_op.full: (1xf32) <- () + full_5 = paddle._C_ops.full( + [1], float("10"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.full_like: (1x-1xi32) <- (1x-1xi32, 1xf32) + full_like_0 = paddle._C_ops.full_like( + reshape_1, full_5, paddle.int32, paddle.framework._current_expected_place() + ) + del full_5 + + # pd_op.where: (1x-1xi32) <- (1x-1xb, 1x-1xi32, 1x-1xi32) + where_1 = paddle._C_ops.where(greater_than_1, reshape_1, full_like_0) + del full_like_0, greater_than_1, reshape_1 + + # pd_op.full_int_array: (2xi64) <- () + full_int_array_4 = [-1, 4] + + # pd_op.reshape: (-1x4xf32) <- (1x-1x4xf32, 2xi64) + reshape_2 = paddle._C_ops.reshape(data_7, full_int_array_4) + del data_7, full_int_array_4 + + # pd_op.gather: (-1x4xf32) <- (-1x4xf32, -1xi64, 1xi32) + gather_1 = paddle._C_ops.gather(reshape_2, flatten_1, full_3) + del flatten_1, full_3, reshape_2 + + # pd_op.full: (xi64) <- () + full_6 = paddle._C_ops.full( + [], float("4"), paddle.int64, paddle.core.CPUPlace() + ) + + # builtin.combine: ([xi64, xi64, xi64]) <- (xi64, xi64, xi64) + combine_2 = [full_1, data_1, full_6] + del data_1, full_1, full_6 + + # pd_op.stack: (3xi64) <- ([xi64, xi64, xi64]) + stack_2 = paddle._C_ops.stack(combine_2, 0) + del combine_2 + + # pd_op.reshape: (1x-1x4xf32) <- (-1x4xf32, 3xi64) + reshape_0 = paddle._C_ops.reshape(gather_1, stack_2) + del gather_1, stack_2 + + # pd_op.full: (1xi32) <- () + full_7 = paddle._C_ops.full( + [1], float("11"), paddle.int32, paddle.core.CPUPlace() + ) + + # pd_op.one_hot: (1x-1x11xf32) <- (1x-1xi32, 1xi32) + one_hot_1 = paddle._C_ops.one_hot( + where_1 % paddle.cast(full_7, where_1.dtype), full_7 + ) + del full_7 + + # pd_op.full: (10xi64) <- () + full_8 = paddle._C_ops.full( + [10], float("0"), paddle.int64, paddle.framework._current_expected_place() + ) + + # pd_op.assign_value_: (10xi64) <- (10xi64) + assign_value__0 = paddle._C_ops.assign_value_( + full_8, + [10], + paddle.int64, + [ + float("0"), + float("1"), + float("2"), + float("3"), + float("4"), + float("5"), + float("6"), + float("7"), + float("8"), + float("9"), + ], + paddle.framework._current_expected_place(), + ) + del full_8 + + # pd_op.index_select: (1x-1x10xf32) <- (1x-1x11xf32, 10xi64) + index_select_0 = paddle._C_ops.index_select(one_hot_1, assign_value__0, -1) + del assign_value__0, one_hot_1 + + # pd_op.multiply: (1x-1x-1xf32) <- (1x-1x-1xf32, 1x-1x-1xf32) + multiply_3 = paddle._C_ops.multiply(data_8, where_0) + del data_8 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_5 = [-1] + + # pd_op.max: (1x-1x1xf32) <- (1x-1x-1xf32, 1xi64) + max_0 = paddle._C_ops.max(multiply_3, full_int_array_5, True) + + # pd_op.multiply: (1x-1x-1xf32) <- (1x-1x-1xf32, 1x-1x-1xf32) + multiply_4 = paddle._C_ops.multiply(data_3, where_0) + del data_3, where_0 + + # pd_op.max: (1x-1x1xf32) <- (1x-1x-1xf32, 1xi64) + max_1 = paddle._C_ops.max(multiply_4, full_int_array_5, True) + del multiply_4 + + # pd_op.full: (1xf32) <- () + full_9 = paddle._C_ops.full( + [1], float("1"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.scale: (1x-1x1xf32) <- (1x-1x1xf32, 1xf32) + scale_0 = paddle._C_ops.scale(max_0, full_9, float("1e-09"), True) + del full_9, max_0 + + # pd_op.divide: (1x-1x-1xf32) <- (1x-1x-1xf32, 1x-1x1xf32) + divide_0 = paddle._C_ops.divide(multiply_3, scale_0) + del multiply_3, scale_0 + + # pd_op.multiply: (1x-1x-1xf32) <- (1x-1x-1xf32, 1x-1x1xf32) + multiply_5 = paddle._C_ops.multiply(divide_0, max_1) + del divide_0, max_1 + + # pd_op.max: (1x-1xf32) <- (1x-1x-1xf32, 1xi64) + max_2 = paddle._C_ops.max(multiply_5, full_int_array_3, False) + del full_int_array_3, multiply_5 + + # pd_op.unsqueeze: (1x-1x1xf32) <- (1x-1xf32, 1xi64) + unsqueeze_1 = paddle._C_ops.unsqueeze(max_2, full_int_array_5) + del full_int_array_5, max_2 + + # pd_op.multiply: (1x-1x10xf32) <- (1x-1x10xf32, 1x-1x1xf32) + multiply_0 = paddle._C_ops.multiply(index_select_0, unsqueeze_1) + del index_select_0, unsqueeze_1, where_1 + + return reshape_0, multiply_0 diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_6/weight_meta.py b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_6/weight_meta.py new file mode 100644 index 000000000..8b1378917 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_6/weight_meta.py @@ -0,0 +1 @@ + diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_7/graph_hash.txt b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_7/graph_hash.txt new file mode 100644 index 000000000..1dc0e3cd9 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_7/graph_hash.txt @@ -0,0 +1 @@ +1ebfa0731cd404fc0d11b70f0637266da6c267aa7f705df9df9bd0ea79785b39 \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_7/graph_net.json b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_7/graph_net.json new file mode 100644 index 000000000..381598f86 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_7/graph_net.json @@ -0,0 +1,6 @@ +{ + "framework": "paddle", + "model_name": "PP-YOLOE_plus_SOD-largesize-L", + "num_devices_required": 1, + "num_nodes_required": 1 +} \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_7/input_meta.py b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_7/input_meta.py new file mode 100644 index 000000000..62a914daf --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_7/input_meta.py @@ -0,0 +1,135 @@ +class Program_weight_tensor_data_0: + name = "data_0" + shape = [1] + dtype = "float32" + data = [0.724553] + + +class Program_weight_tensor_data_1: + name = "data_1" + shape = [1] + dtype = "float32" + data = [0.710696] + + +class Program_weight_tensor_data_2: + name = "data_2" + shape = [1] + dtype = "float32" + data = [0.69274] + + +class Program_weight_tensor_data_3: + name = "data_3" + shape = [1] + dtype = "float32" + data = [0.697763] + + +class Program_weight_tensor_data_4: + name = "data_4" + shape = [1] + dtype = "float32" + data = [0.67767] + + +class Program_weight_tensor_data_5: + name = "data_5" + shape = [1] + dtype = "float32" + data = [0.628229] + + +class Program_weight_tensor_data_6: + name = "data_6" + shape = [1] + dtype = "float32" + data = [0.643942] + + +class Program_weight_tensor_data_7: + name = "data_7" + shape = [1] + dtype = "float32" + data = [0.633569] + + +class Program_weight_tensor_data_8: + name = "data_8" + shape = [1] + dtype = "float32" + data = [0.801205] + + +class Program_weight_tensor_data_9: + name = "data_9" + shape = [1] + dtype = "float32" + data = [0.652613] + + +class Program_weight_tensor_data_10: + name = "data_10" + shape = [1] + dtype = "float32" + data = [0.636874] + + +class Program_weight_tensor_data_11: + name = "data_11" + shape = [1] + dtype = "float32" + data = [0.631148] + + +class Program_weight_tensor_data_12: + name = "data_12" + shape = [1] + dtype = "float32" + data = [0.635341] + + +class Program_weight_tensor_data_13: + name = "data_13" + shape = [1] + dtype = "float32" + data = [0.640054] + + +class Program_weight_tensor_data_14: + name = "data_14" + shape = [1] + dtype = "float32" + data = [0.755822] + + +class Program_weight_tensor_data_15: + name = "data_15" + shape = [1] + dtype = "float32" + data = [0.575326] + + +class Program_weight_tensor_data_16: + name = "data_16" + shape = [1] + dtype = "float32" + data = [0.59257] + + +class Program_weight_tensor_data_17: + name = "data_17" + shape = [1] + dtype = "float32" + data = [0.72331] + + +class Program_weight_tensor_data_18: + name = "data_18" + shape = [1, 3, 1536, 1536] + dtype = "float32" + min_val = float("0.0218883") + max_val = float("0.663022") + mean = float("0.428838") + std = float("0.0832401") + data = None diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_7/model.py b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_7/model.py new file mode 100644 index 000000000..c5ebef41f --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_7/model.py @@ -0,0 +1,4040 @@ +import paddle + + +class GraphModule(paddle.nn.Layer): + def __init__(self): + super().__init__() + + def forward( + self, + parameter_0, + parameter_1, + parameter_2, + parameter_3, + parameter_4, + parameter_5, + parameter_6, + parameter_7, + parameter_8, + parameter_9, + parameter_10, + parameter_11, + parameter_12, + parameter_13, + parameter_14, + parameter_15, + parameter_16, + parameter_17, + parameter_18, + parameter_19, + parameter_20, + parameter_21, + parameter_22, + parameter_23, + parameter_24, + parameter_25, + parameter_26, + parameter_27, + parameter_28, + parameter_29, + parameter_30, + parameter_31, + parameter_32, + parameter_33, + parameter_34, + parameter_35, + parameter_36, + parameter_37, + parameter_38, + parameter_39, + parameter_40, + parameter_41, + parameter_42, + parameter_43, + parameter_44, + parameter_45, + parameter_46, + parameter_47, + parameter_48, + parameter_49, + parameter_50, + parameter_51, + parameter_52, + parameter_53, + parameter_54, + parameter_55, + parameter_56, + parameter_57, + parameter_58, + parameter_59, + parameter_60, + parameter_61, + parameter_62, + parameter_63, + parameter_64, + parameter_65, + parameter_66, + parameter_67, + parameter_68, + parameter_69, + parameter_70, + parameter_71, + parameter_72, + parameter_73, + parameter_74, + parameter_75, + parameter_76, + parameter_77, + parameter_78, + parameter_79, + parameter_80, + parameter_81, + parameter_82, + parameter_83, + parameter_84, + parameter_85, + parameter_86, + parameter_87, + parameter_88, + parameter_89, + parameter_90, + parameter_91, + parameter_92, + parameter_93, + parameter_94, + parameter_95, + parameter_96, + parameter_97, + parameter_98, + parameter_99, + parameter_100, + parameter_101, + parameter_102, + parameter_103, + parameter_104, + parameter_105, + parameter_106, + parameter_107, + parameter_108, + parameter_109, + parameter_110, + parameter_111, + parameter_112, + parameter_113, + parameter_114, + parameter_115, + parameter_116, + parameter_117, + parameter_118, + parameter_119, + parameter_120, + parameter_121, + parameter_122, + parameter_123, + parameter_124, + parameter_125, + parameter_126, + parameter_127, + parameter_128, + parameter_129, + parameter_130, + parameter_131, + parameter_132, + parameter_133, + parameter_134, + parameter_135, + parameter_136, + parameter_137, + parameter_138, + parameter_139, + parameter_140, + parameter_141, + parameter_142, + parameter_143, + parameter_144, + parameter_145, + parameter_146, + parameter_147, + parameter_148, + parameter_149, + parameter_150, + parameter_151, + parameter_152, + parameter_153, + parameter_154, + parameter_155, + parameter_156, + parameter_157, + parameter_158, + parameter_159, + parameter_160, + parameter_161, + parameter_162, + parameter_163, + parameter_164, + parameter_165, + parameter_166, + parameter_167, + parameter_168, + parameter_169, + parameter_170, + parameter_171, + parameter_172, + parameter_173, + parameter_174, + parameter_175, + parameter_176, + parameter_177, + parameter_178, + parameter_179, + parameter_180, + parameter_181, + parameter_182, + parameter_183, + parameter_184, + parameter_185, + parameter_186, + parameter_187, + parameter_188, + parameter_189, + parameter_190, + parameter_191, + parameter_192, + parameter_193, + parameter_194, + parameter_195, + parameter_196, + parameter_197, + parameter_198, + parameter_199, + parameter_200, + parameter_201, + parameter_202, + parameter_203, + parameter_204, + parameter_205, + parameter_206, + parameter_207, + parameter_208, + parameter_209, + parameter_210, + parameter_211, + parameter_212, + parameter_213, + parameter_214, + parameter_215, + parameter_216, + parameter_217, + parameter_218, + parameter_219, + parameter_220, + parameter_221, + parameter_222, + parameter_223, + parameter_224, + parameter_225, + parameter_226, + parameter_227, + parameter_228, + parameter_229, + parameter_230, + parameter_231, + parameter_232, + parameter_233, + parameter_234, + parameter_235, + parameter_236, + parameter_237, + parameter_238, + parameter_239, + parameter_240, + parameter_241, + parameter_242, + parameter_243, + parameter_244, + parameter_245, + parameter_246, + parameter_247, + parameter_248, + parameter_249, + parameter_250, + parameter_251, + parameter_252, + parameter_253, + parameter_254, + parameter_255, + parameter_256, + parameter_257, + parameter_258, + parameter_259, + parameter_260, + parameter_261, + parameter_262, + parameter_263, + parameter_264, + parameter_265, + parameter_266, + parameter_267, + parameter_268, + parameter_269, + parameter_270, + parameter_271, + parameter_272, + parameter_273, + parameter_274, + parameter_275, + parameter_276, + parameter_277, + parameter_278, + parameter_279, + parameter_280, + parameter_281, + parameter_282, + parameter_283, + parameter_284, + parameter_285, + parameter_286, + parameter_287, + parameter_288, + parameter_289, + parameter_290, + parameter_291, + parameter_292, + parameter_293, + parameter_294, + parameter_295, + parameter_296, + parameter_297, + parameter_298, + parameter_299, + parameter_300, + parameter_301, + parameter_302, + parameter_303, + parameter_304, + parameter_305, + parameter_306, + parameter_307, + parameter_308, + parameter_309, + parameter_310, + parameter_311, + parameter_312, + parameter_313, + parameter_314, + parameter_315, + parameter_316, + parameter_317, + parameter_318, + parameter_319, + parameter_320, + parameter_321, + parameter_322, + parameter_323, + parameter_324, + parameter_325, + parameter_326, + parameter_327, + parameter_328, + parameter_329, + parameter_330, + parameter_331, + parameter_332, + parameter_333, + parameter_334, + parameter_335, + parameter_336, + parameter_337, + parameter_338, + parameter_339, + parameter_340, + parameter_341, + parameter_342, + parameter_343, + parameter_344, + parameter_345, + parameter_346, + parameter_347, + parameter_348, + parameter_349, + parameter_350, + parameter_351, + parameter_352, + parameter_353, + parameter_354, + parameter_355, + parameter_356, + parameter_357, + parameter_358, + parameter_359, + parameter_360, + parameter_361, + parameter_362, + parameter_363, + parameter_364, + parameter_365, + parameter_366, + parameter_367, + parameter_368, + parameter_369, + parameter_370, + parameter_371, + parameter_372, + data_0, + data_1, + data_2, + data_3, + data_4, + data_5, + data_6, + data_7, + data_8, + data_9, + data_10, + data_11, + data_12, + data_13, + data_14, + data_15, + data_16, + data_17, + data_18, + ): + # pd_op.conv2d: (1x32x-1x-1xf32) <- (1x3x-1x-1xf32, 32x3x3x3xf32) + conv2d_0 = paddle._C_ops.conv2d( + data_18, parameter_372, [2, 2], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del data_18, parameter_372 + + # pd_op.batch_norm_: (1x32x-1x-1xf32, 32xf32, 32xf32, 32xf32, 32xf32, -1xui8) <- (1x32x-1x-1xf32, 32xf32, 32xf32, 32xf32, 32xf32) + ( + batch_norm__0, + batch_norm__1, + batch_norm__2, + batch_norm__3, + batch_norm__4, + batch_norm__5, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_0, + parameter_371, + parameter_370, + parameter_369, + parameter_368, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_368, parameter_369, parameter_370, parameter_371 + + # pd_op.swish: (1x32x-1x-1xf32) <- (1x32x-1x-1xf32) + swish_1 = paddle._C_ops.swish(batch_norm__0) + + # pd_op.conv2d: (1x32x-1x-1xf32) <- (1x32x-1x-1xf32, 32x32x3x3xf32) + conv2d_1 = paddle._C_ops.conv2d( + swish_1, parameter_367, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_367 + + # pd_op.batch_norm_: (1x32x-1x-1xf32, 32xf32, 32xf32, 32xf32, 32xf32, -1xui8) <- (1x32x-1x-1xf32, 32xf32, 32xf32, 32xf32, 32xf32) + ( + batch_norm__6, + batch_norm__7, + batch_norm__8, + batch_norm__9, + batch_norm__10, + batch_norm__11, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_1, + parameter_366, + parameter_365, + parameter_364, + parameter_363, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_363, parameter_364, parameter_365, parameter_366 + + # pd_op.swish: (1x32x-1x-1xf32) <- (1x32x-1x-1xf32) + swish_2 = paddle._C_ops.swish(batch_norm__6) + + # pd_op.conv2d: (1x64x-1x-1xf32) <- (1x32x-1x-1xf32, 64x32x3x3xf32) + conv2d_2 = paddle._C_ops.conv2d( + swish_2, parameter_362, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_362 + + # pd_op.batch_norm_: (1x64x-1x-1xf32, 64xf32, 64xf32, 64xf32, 64xf32, -1xui8) <- (1x64x-1x-1xf32, 64xf32, 64xf32, 64xf32, 64xf32) + ( + batch_norm__12, + batch_norm__13, + batch_norm__14, + batch_norm__15, + batch_norm__16, + batch_norm__17, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_2, + parameter_361, + parameter_360, + parameter_359, + parameter_358, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_358, parameter_359, parameter_360, parameter_361 + + # pd_op.swish: (1x64x-1x-1xf32) <- (1x64x-1x-1xf32) + swish_3 = paddle._C_ops.swish(batch_norm__12) + + # pd_op.conv2d: (1x96x-1x-1xf32) <- (1x64x-1x-1xf32, 96x64x3x3xf32) + conv2d_3 = paddle._C_ops.conv2d( + swish_3, parameter_357, [2, 2], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_357 + + # pd_op.batch_norm_: (1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__18, + batch_norm__19, + batch_norm__20, + batch_norm__21, + batch_norm__22, + batch_norm__23, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_3, + parameter_356, + parameter_355, + parameter_354, + parameter_353, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_353, parameter_354, parameter_355, parameter_356 + + # pd_op.swish: (1x96x-1x-1xf32) <- (1x96x-1x-1xf32) + swish_4 = paddle._C_ops.swish(batch_norm__18) + + # pd_op.conv2d: (1x48x-1x-1xf32) <- (1x96x-1x-1xf32, 48x96x1x1xf32) + conv2d_4 = paddle._C_ops.conv2d( + swish_4, parameter_352, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_352 + + # pd_op.batch_norm_: (1x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32, -1xui8) <- (1x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32) + ( + batch_norm__24, + batch_norm__25, + batch_norm__26, + batch_norm__27, + batch_norm__28, + batch_norm__29, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_4, + parameter_351, + parameter_350, + parameter_349, + parameter_348, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_348, parameter_349, parameter_350, parameter_351 + + # pd_op.swish: (1x48x-1x-1xf32) <- (1x48x-1x-1xf32) + swish_5 = paddle._C_ops.swish(batch_norm__24) + + # pd_op.conv2d: (1x48x-1x-1xf32) <- (1x96x-1x-1xf32, 48x96x1x1xf32) + conv2d_5 = paddle._C_ops.conv2d( + swish_4, parameter_347, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_347 + + # pd_op.batch_norm_: (1x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32, -1xui8) <- (1x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32) + ( + batch_norm__30, + batch_norm__31, + batch_norm__32, + batch_norm__33, + batch_norm__34, + batch_norm__35, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_5, + parameter_346, + parameter_345, + parameter_344, + parameter_343, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_343, parameter_344, parameter_345, parameter_346 + + # pd_op.swish: (1x48x-1x-1xf32) <- (1x48x-1x-1xf32) + swish_6 = paddle._C_ops.swish(batch_norm__30) + + # pd_op.conv2d: (1x48x-1x-1xf32) <- (1x48x-1x-1xf32, 48x48x3x3xf32) + conv2d_6 = paddle._C_ops.conv2d( + swish_6, parameter_342, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_342 + + # pd_op.batch_norm_: (1x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32, -1xui8) <- (1x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32) + ( + batch_norm__36, + batch_norm__37, + batch_norm__38, + batch_norm__39, + batch_norm__40, + batch_norm__41, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_6, + parameter_341, + parameter_340, + parameter_339, + parameter_338, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_338, parameter_339, parameter_340, parameter_341 + + # pd_op.swish: (1x48x-1x-1xf32) <- (1x48x-1x-1xf32) + swish_7 = paddle._C_ops.swish(batch_norm__36) + + # pd_op.conv2d: (1x48x-1x-1xf32) <- (1x48x-1x-1xf32, 48x48x3x3xf32) + conv2d_7 = paddle._C_ops.conv2d( + swish_7, parameter_337, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_337 + + # pd_op.batch_norm_: (1x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32, -1xui8) <- (1x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32) + ( + batch_norm__42, + batch_norm__43, + batch_norm__44, + batch_norm__45, + batch_norm__46, + batch_norm__47, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_7, + parameter_336, + parameter_335, + parameter_334, + parameter_333, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_333, parameter_334, parameter_335, parameter_336 + + # pd_op.conv2d: (1x48x-1x-1xf32) <- (1x48x-1x-1xf32, 48x48x1x1xf32) + conv2d_8 = paddle._C_ops.conv2d( + swish_7, parameter_332, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_332 + + # pd_op.batch_norm_: (1x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32, -1xui8) <- (1x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32) + ( + batch_norm__48, + batch_norm__49, + batch_norm__50, + batch_norm__51, + batch_norm__52, + batch_norm__53, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_8, + parameter_331, + parameter_330, + parameter_329, + parameter_328, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_328, parameter_329, parameter_330, parameter_331 + + # pd_op.multiply: (1x48x-1x-1xf32) <- (1xf32, 1x48x-1x-1xf32) + multiply_0 = paddle._C_ops.multiply(data_0, batch_norm__48) + del data_0 + + # pd_op.add: (1x48x-1x-1xf32) <- (1x48x-1x-1xf32, 1x48x-1x-1xf32) + add_0 = paddle._C_ops.add(batch_norm__42, multiply_0) + + # pd_op.swish: (1x48x-1x-1xf32) <- (1x48x-1x-1xf32) + swish_8 = paddle._C_ops.swish(add_0) + + # pd_op.add: (1x48x-1x-1xf32) <- (1x48x-1x-1xf32, 1x48x-1x-1xf32) + add_1 = paddle._C_ops.add(swish_6, swish_8) + + # pd_op.conv2d: (1x48x-1x-1xf32) <- (1x48x-1x-1xf32, 48x48x3x3xf32) + conv2d_9 = paddle._C_ops.conv2d( + add_1, parameter_327, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_327 + + # pd_op.batch_norm_: (1x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32, -1xui8) <- (1x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32) + ( + batch_norm__54, + batch_norm__55, + batch_norm__56, + batch_norm__57, + batch_norm__58, + batch_norm__59, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_9, + parameter_326, + parameter_325, + parameter_324, + parameter_323, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_323, parameter_324, parameter_325, parameter_326 + + # pd_op.swish: (1x48x-1x-1xf32) <- (1x48x-1x-1xf32) + swish_9 = paddle._C_ops.swish(batch_norm__54) + + # pd_op.conv2d: (1x48x-1x-1xf32) <- (1x48x-1x-1xf32, 48x48x3x3xf32) + conv2d_10 = paddle._C_ops.conv2d( + swish_9, parameter_322, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_322 + + # pd_op.batch_norm_: (1x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32, -1xui8) <- (1x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32) + ( + batch_norm__60, + batch_norm__61, + batch_norm__62, + batch_norm__63, + batch_norm__64, + batch_norm__65, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_10, + parameter_321, + parameter_320, + parameter_319, + parameter_318, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_318, parameter_319, parameter_320, parameter_321 + + # pd_op.conv2d: (1x48x-1x-1xf32) <- (1x48x-1x-1xf32, 48x48x1x1xf32) + conv2d_11 = paddle._C_ops.conv2d( + swish_9, parameter_317, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_317 + + # pd_op.batch_norm_: (1x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32, -1xui8) <- (1x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32) + ( + batch_norm__66, + batch_norm__67, + batch_norm__68, + batch_norm__69, + batch_norm__70, + batch_norm__71, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_11, + parameter_316, + parameter_315, + parameter_314, + parameter_313, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_313, parameter_314, parameter_315, parameter_316 + + # pd_op.multiply: (1x48x-1x-1xf32) <- (1xf32, 1x48x-1x-1xf32) + multiply_1 = paddle._C_ops.multiply(data_1, batch_norm__66) + del data_1 + + # pd_op.add: (1x48x-1x-1xf32) <- (1x48x-1x-1xf32, 1x48x-1x-1xf32) + add_2 = paddle._C_ops.add(batch_norm__60, multiply_1) + + # pd_op.swish: (1x48x-1x-1xf32) <- (1x48x-1x-1xf32) + swish_10 = paddle._C_ops.swish(add_2) + + # pd_op.add: (1x48x-1x-1xf32) <- (1x48x-1x-1xf32, 1x48x-1x-1xf32) + add_3 = paddle._C_ops.add(add_1, swish_10) + + # pd_op.conv2d: (1x48x-1x-1xf32) <- (1x48x-1x-1xf32, 48x48x3x3xf32) + conv2d_12 = paddle._C_ops.conv2d( + add_3, parameter_312, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_312 + + # pd_op.batch_norm_: (1x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32, -1xui8) <- (1x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32) + ( + batch_norm__72, + batch_norm__73, + batch_norm__74, + batch_norm__75, + batch_norm__76, + batch_norm__77, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_12, + parameter_311, + parameter_310, + parameter_309, + parameter_308, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_308, parameter_309, parameter_310, parameter_311 + + # pd_op.swish: (1x48x-1x-1xf32) <- (1x48x-1x-1xf32) + swish_11 = paddle._C_ops.swish(batch_norm__72) + + # pd_op.conv2d: (1x48x-1x-1xf32) <- (1x48x-1x-1xf32, 48x48x3x3xf32) + conv2d_13 = paddle._C_ops.conv2d( + swish_11, parameter_307, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_307 + + # pd_op.batch_norm_: (1x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32, -1xui8) <- (1x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32) + ( + batch_norm__78, + batch_norm__79, + batch_norm__80, + batch_norm__81, + batch_norm__82, + batch_norm__83, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_13, + parameter_306, + parameter_305, + parameter_304, + parameter_303, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_303, parameter_304, parameter_305, parameter_306 + + # pd_op.conv2d: (1x48x-1x-1xf32) <- (1x48x-1x-1xf32, 48x48x1x1xf32) + conv2d_14 = paddle._C_ops.conv2d( + swish_11, parameter_302, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_302 + + # pd_op.batch_norm_: (1x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32, -1xui8) <- (1x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32) + ( + batch_norm__84, + batch_norm__85, + batch_norm__86, + batch_norm__87, + batch_norm__88, + batch_norm__89, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_14, + parameter_301, + parameter_300, + parameter_299, + parameter_298, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_298, parameter_299, parameter_300, parameter_301 + + # pd_op.multiply: (1x48x-1x-1xf32) <- (1xf32, 1x48x-1x-1xf32) + multiply_2 = paddle._C_ops.multiply(data_2, batch_norm__84) + del data_2 + + # pd_op.add: (1x48x-1x-1xf32) <- (1x48x-1x-1xf32, 1x48x-1x-1xf32) + add_4 = paddle._C_ops.add(batch_norm__78, multiply_2) + + # pd_op.swish: (1x48x-1x-1xf32) <- (1x48x-1x-1xf32) + swish_12 = paddle._C_ops.swish(add_4) + + # pd_op.add: (1x48x-1x-1xf32) <- (1x48x-1x-1xf32, 1x48x-1x-1xf32) + add_5 = paddle._C_ops.add(add_3, swish_12) + + # pd_op.full: (1xi32) <- () + full_0 = paddle._C_ops.full( + [1], float("1"), paddle.int32, paddle.core.CPUPlace() + ) + + # pd_op.assign: (1xi32) <- (1xi32) + assign_0 = full_0 + + # pd_op.assign: (1xi32) <- (1xi32) + assign_1 = full_0 + + # pd_op.assign: (1xi32) <- (1xi32) + assign_2 = full_0 + + # builtin.combine: ([1x48x-1x-1xf32, 1x48x-1x-1xf32]) <- (1x48x-1x-1xf32, 1x48x-1x-1xf32) + combine_0 = [swish_5, add_5] + + # pd_op.concat: (1x96x-1x-1xf32) <- ([1x48x-1x-1xf32, 1x48x-1x-1xf32], 1xi32) + concat_0 = paddle._C_ops.concat(combine_0, full_0) + del combine_0 + + # pd_op.full_int_array: (2xi64) <- () + full_int_array_0 = [2, 3] + + # pd_op.assign: (2xi64) <- (2xi64) + assign_3 = full_int_array_0 + + # pd_op.assign: (2xi64) <- (2xi64) + assign_4 = full_int_array_0 + + # pd_op.assign: (2xi64) <- (2xi64) + assign_5 = full_int_array_0 + + # pd_op.mean: (1x96x1x1xf32) <- (1x96x-1x-1xf32, 2xi64) + mean_0 = paddle._C_ops.mean(concat_0, full_int_array_0, True) + + # pd_op.conv2d: (1x96x1x1xf32) <- (1x96x1x1xf32, 96x96x1x1xf32) + conv2d_15 = paddle._C_ops.conv2d( + mean_0, parameter_297, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_297 + + # pd_op.full_int_array: (4xi64) <- () + full_int_array_1 = [1, -1, 1, 1] + + # pd_op.reshape: (1x96x1x1xf32) <- (96xf32, 4xi64) + reshape_0 = paddle._C_ops.reshape(parameter_296, full_int_array_1) + del parameter_296 + + # pd_op.add: (1x96x1x1xf32) <- (1x96x1x1xf32, 1x96x1x1xf32) + add_6 = paddle._C_ops.add(conv2d_15, reshape_0) + + # pd_op.hardsigmoid: (1x96x1x1xf32) <- (1x96x1x1xf32) + hardsigmoid_0 = paddle._C_ops.hardsigmoid( + add_6, float("0.166667"), float("0.5") + ) + del add_6 + + # pd_op.multiply: (1x96x-1x-1xf32) <- (1x96x-1x-1xf32, 1x96x1x1xf32) + multiply_3 = paddle._C_ops.multiply(concat_0, hardsigmoid_0) + + # pd_op.conv2d: (1x128x-1x-1xf32) <- (1x96x-1x-1xf32, 128x96x1x1xf32) + conv2d_16 = paddle._C_ops.conv2d( + multiply_3, parameter_295, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_295 + + # pd_op.batch_norm_: (1x128x-1x-1xf32, 128xf32, 128xf32, 128xf32, 128xf32, -1xui8) <- (1x128x-1x-1xf32, 128xf32, 128xf32, 128xf32, 128xf32) + ( + batch_norm__90, + batch_norm__91, + batch_norm__92, + batch_norm__93, + batch_norm__94, + batch_norm__95, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_16, + parameter_294, + parameter_293, + parameter_292, + parameter_291, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_291, parameter_292, parameter_293, parameter_294 + + # pd_op.swish: (1x128x-1x-1xf32) <- (1x128x-1x-1xf32) + swish_13 = paddle._C_ops.swish(batch_norm__90) + + # pd_op.conv2d: (1x192x-1x-1xf32) <- (1x128x-1x-1xf32, 192x128x3x3xf32) + conv2d_17 = paddle._C_ops.conv2d( + swish_13, parameter_290, [2, 2], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_290 + + # pd_op.batch_norm_: (1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__96, + batch_norm__97, + batch_norm__98, + batch_norm__99, + batch_norm__100, + batch_norm__101, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_17, + parameter_289, + parameter_288, + parameter_287, + parameter_286, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_286, parameter_287, parameter_288, parameter_289 + + # pd_op.swish: (1x192x-1x-1xf32) <- (1x192x-1x-1xf32) + swish_14 = paddle._C_ops.swish(batch_norm__96) + + # pd_op.conv2d: (1x96x-1x-1xf32) <- (1x192x-1x-1xf32, 96x192x1x1xf32) + conv2d_18 = paddle._C_ops.conv2d( + swish_14, parameter_285, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_285 + + # pd_op.batch_norm_: (1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__102, + batch_norm__103, + batch_norm__104, + batch_norm__105, + batch_norm__106, + batch_norm__107, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_18, + parameter_284, + parameter_283, + parameter_282, + parameter_281, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_281, parameter_282, parameter_283, parameter_284 + + # pd_op.swish: (1x96x-1x-1xf32) <- (1x96x-1x-1xf32) + swish_15 = paddle._C_ops.swish(batch_norm__102) + + # pd_op.conv2d: (1x96x-1x-1xf32) <- (1x192x-1x-1xf32, 96x192x1x1xf32) + conv2d_19 = paddle._C_ops.conv2d( + swish_14, parameter_280, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_280 + + # pd_op.batch_norm_: (1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__108, + batch_norm__109, + batch_norm__110, + batch_norm__111, + batch_norm__112, + batch_norm__113, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_19, + parameter_279, + parameter_278, + parameter_277, + parameter_276, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_276, parameter_277, parameter_278, parameter_279 + + # pd_op.swish: (1x96x-1x-1xf32) <- (1x96x-1x-1xf32) + swish_16 = paddle._C_ops.swish(batch_norm__108) + + # pd_op.conv2d: (1x96x-1x-1xf32) <- (1x96x-1x-1xf32, 96x96x3x3xf32) + conv2d_20 = paddle._C_ops.conv2d( + swish_16, parameter_275, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_275 + + # pd_op.batch_norm_: (1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__114, + batch_norm__115, + batch_norm__116, + batch_norm__117, + batch_norm__118, + batch_norm__119, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_20, + parameter_274, + parameter_273, + parameter_272, + parameter_271, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_271, parameter_272, parameter_273, parameter_274 + + # pd_op.swish: (1x96x-1x-1xf32) <- (1x96x-1x-1xf32) + swish_17 = paddle._C_ops.swish(batch_norm__114) + + # pd_op.conv2d: (1x96x-1x-1xf32) <- (1x96x-1x-1xf32, 96x96x3x3xf32) + conv2d_21 = paddle._C_ops.conv2d( + swish_17, parameter_270, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_270 + + # pd_op.batch_norm_: (1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__120, + batch_norm__121, + batch_norm__122, + batch_norm__123, + batch_norm__124, + batch_norm__125, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_21, + parameter_269, + parameter_268, + parameter_267, + parameter_266, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_266, parameter_267, parameter_268, parameter_269 + + # pd_op.conv2d: (1x96x-1x-1xf32) <- (1x96x-1x-1xf32, 96x96x1x1xf32) + conv2d_22 = paddle._C_ops.conv2d( + swish_17, parameter_265, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_265 + + # pd_op.batch_norm_: (1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__126, + batch_norm__127, + batch_norm__128, + batch_norm__129, + batch_norm__130, + batch_norm__131, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_22, + parameter_264, + parameter_263, + parameter_262, + parameter_261, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_261, parameter_262, parameter_263, parameter_264 + + # pd_op.multiply: (1x96x-1x-1xf32) <- (1xf32, 1x96x-1x-1xf32) + multiply_4 = paddle._C_ops.multiply(data_3, batch_norm__126) + del data_3 + + # pd_op.add: (1x96x-1x-1xf32) <- (1x96x-1x-1xf32, 1x96x-1x-1xf32) + add_7 = paddle._C_ops.add(batch_norm__120, multiply_4) + + # pd_op.swish: (1x96x-1x-1xf32) <- (1x96x-1x-1xf32) + swish_18 = paddle._C_ops.swish(add_7) + + # pd_op.add: (1x96x-1x-1xf32) <- (1x96x-1x-1xf32, 1x96x-1x-1xf32) + add_8 = paddle._C_ops.add(swish_16, swish_18) + + # pd_op.conv2d: (1x96x-1x-1xf32) <- (1x96x-1x-1xf32, 96x96x3x3xf32) + conv2d_23 = paddle._C_ops.conv2d( + add_8, parameter_260, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_260 + + # pd_op.batch_norm_: (1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__132, + batch_norm__133, + batch_norm__134, + batch_norm__135, + batch_norm__136, + batch_norm__137, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_23, + parameter_259, + parameter_258, + parameter_257, + parameter_256, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_256, parameter_257, parameter_258, parameter_259 + + # pd_op.swish: (1x96x-1x-1xf32) <- (1x96x-1x-1xf32) + swish_19 = paddle._C_ops.swish(batch_norm__132) + + # pd_op.conv2d: (1x96x-1x-1xf32) <- (1x96x-1x-1xf32, 96x96x3x3xf32) + conv2d_24 = paddle._C_ops.conv2d( + swish_19, parameter_255, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_255 + + # pd_op.batch_norm_: (1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__138, + batch_norm__139, + batch_norm__140, + batch_norm__141, + batch_norm__142, + batch_norm__143, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_24, + parameter_254, + parameter_253, + parameter_252, + parameter_251, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_251, parameter_252, parameter_253, parameter_254 + + # pd_op.conv2d: (1x96x-1x-1xf32) <- (1x96x-1x-1xf32, 96x96x1x1xf32) + conv2d_25 = paddle._C_ops.conv2d( + swish_19, parameter_250, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_250 + + # pd_op.batch_norm_: (1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__144, + batch_norm__145, + batch_norm__146, + batch_norm__147, + batch_norm__148, + batch_norm__149, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_25, + parameter_249, + parameter_248, + parameter_247, + parameter_246, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_246, parameter_247, parameter_248, parameter_249 + + # pd_op.multiply: (1x96x-1x-1xf32) <- (1xf32, 1x96x-1x-1xf32) + multiply_5 = paddle._C_ops.multiply(data_4, batch_norm__144) + del data_4 + + # pd_op.add: (1x96x-1x-1xf32) <- (1x96x-1x-1xf32, 1x96x-1x-1xf32) + add_9 = paddle._C_ops.add(batch_norm__138, multiply_5) + + # pd_op.swish: (1x96x-1x-1xf32) <- (1x96x-1x-1xf32) + swish_20 = paddle._C_ops.swish(add_9) + + # pd_op.add: (1x96x-1x-1xf32) <- (1x96x-1x-1xf32, 1x96x-1x-1xf32) + add_10 = paddle._C_ops.add(add_8, swish_20) + + # pd_op.conv2d: (1x96x-1x-1xf32) <- (1x96x-1x-1xf32, 96x96x3x3xf32) + conv2d_26 = paddle._C_ops.conv2d( + add_10, parameter_245, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_245 + + # pd_op.batch_norm_: (1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__150, + batch_norm__151, + batch_norm__152, + batch_norm__153, + batch_norm__154, + batch_norm__155, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_26, + parameter_244, + parameter_243, + parameter_242, + parameter_241, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_241, parameter_242, parameter_243, parameter_244 + + # pd_op.swish: (1x96x-1x-1xf32) <- (1x96x-1x-1xf32) + swish_21 = paddle._C_ops.swish(batch_norm__150) + + # pd_op.conv2d: (1x96x-1x-1xf32) <- (1x96x-1x-1xf32, 96x96x3x3xf32) + conv2d_27 = paddle._C_ops.conv2d( + swish_21, parameter_240, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_240 + + # pd_op.batch_norm_: (1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__156, + batch_norm__157, + batch_norm__158, + batch_norm__159, + batch_norm__160, + batch_norm__161, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_27, + parameter_239, + parameter_238, + parameter_237, + parameter_236, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_236, parameter_237, parameter_238, parameter_239 + + # pd_op.conv2d: (1x96x-1x-1xf32) <- (1x96x-1x-1xf32, 96x96x1x1xf32) + conv2d_28 = paddle._C_ops.conv2d( + swish_21, parameter_235, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_235 + + # pd_op.batch_norm_: (1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__162, + batch_norm__163, + batch_norm__164, + batch_norm__165, + batch_norm__166, + batch_norm__167, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_28, + parameter_234, + parameter_233, + parameter_232, + parameter_231, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_231, parameter_232, parameter_233, parameter_234 + + # pd_op.multiply: (1x96x-1x-1xf32) <- (1xf32, 1x96x-1x-1xf32) + multiply_6 = paddle._C_ops.multiply(data_5, batch_norm__162) + del data_5 + + # pd_op.add: (1x96x-1x-1xf32) <- (1x96x-1x-1xf32, 1x96x-1x-1xf32) + add_11 = paddle._C_ops.add(batch_norm__156, multiply_6) + + # pd_op.swish: (1x96x-1x-1xf32) <- (1x96x-1x-1xf32) + swish_22 = paddle._C_ops.swish(add_11) + + # pd_op.add: (1x96x-1x-1xf32) <- (1x96x-1x-1xf32, 1x96x-1x-1xf32) + add_12 = paddle._C_ops.add(add_10, swish_22) + + # pd_op.conv2d: (1x96x-1x-1xf32) <- (1x96x-1x-1xf32, 96x96x3x3xf32) + conv2d_29 = paddle._C_ops.conv2d( + add_12, parameter_230, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_230 + + # pd_op.batch_norm_: (1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__168, + batch_norm__169, + batch_norm__170, + batch_norm__171, + batch_norm__172, + batch_norm__173, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_29, + parameter_229, + parameter_228, + parameter_227, + parameter_226, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_226, parameter_227, parameter_228, parameter_229 + + # pd_op.swish: (1x96x-1x-1xf32) <- (1x96x-1x-1xf32) + swish_23 = paddle._C_ops.swish(batch_norm__168) + + # pd_op.conv2d: (1x96x-1x-1xf32) <- (1x96x-1x-1xf32, 96x96x3x3xf32) + conv2d_30 = paddle._C_ops.conv2d( + swish_23, parameter_225, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_225 + + # pd_op.batch_norm_: (1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__174, + batch_norm__175, + batch_norm__176, + batch_norm__177, + batch_norm__178, + batch_norm__179, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_30, + parameter_224, + parameter_223, + parameter_222, + parameter_221, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_221, parameter_222, parameter_223, parameter_224 + + # pd_op.conv2d: (1x96x-1x-1xf32) <- (1x96x-1x-1xf32, 96x96x1x1xf32) + conv2d_31 = paddle._C_ops.conv2d( + swish_23, parameter_220, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_220 + + # pd_op.batch_norm_: (1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__180, + batch_norm__181, + batch_norm__182, + batch_norm__183, + batch_norm__184, + batch_norm__185, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_31, + parameter_219, + parameter_218, + parameter_217, + parameter_216, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_216, parameter_217, parameter_218, parameter_219 + + # pd_op.multiply: (1x96x-1x-1xf32) <- (1xf32, 1x96x-1x-1xf32) + multiply_7 = paddle._C_ops.multiply(data_6, batch_norm__180) + del data_6 + + # pd_op.add: (1x96x-1x-1xf32) <- (1x96x-1x-1xf32, 1x96x-1x-1xf32) + add_13 = paddle._C_ops.add(batch_norm__174, multiply_7) + + # pd_op.swish: (1x96x-1x-1xf32) <- (1x96x-1x-1xf32) + swish_24 = paddle._C_ops.swish(add_13) + + # pd_op.add: (1x96x-1x-1xf32) <- (1x96x-1x-1xf32, 1x96x-1x-1xf32) + add_14 = paddle._C_ops.add(add_12, swish_24) + + # pd_op.conv2d: (1x96x-1x-1xf32) <- (1x96x-1x-1xf32, 96x96x3x3xf32) + conv2d_32 = paddle._C_ops.conv2d( + add_14, parameter_215, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_215 + + # pd_op.batch_norm_: (1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__186, + batch_norm__187, + batch_norm__188, + batch_norm__189, + batch_norm__190, + batch_norm__191, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_32, + parameter_214, + parameter_213, + parameter_212, + parameter_211, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_211, parameter_212, parameter_213, parameter_214 + + # pd_op.swish: (1x96x-1x-1xf32) <- (1x96x-1x-1xf32) + swish_25 = paddle._C_ops.swish(batch_norm__186) + + # pd_op.conv2d: (1x96x-1x-1xf32) <- (1x96x-1x-1xf32, 96x96x3x3xf32) + conv2d_33 = paddle._C_ops.conv2d( + swish_25, parameter_210, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_210 + + # pd_op.batch_norm_: (1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__192, + batch_norm__193, + batch_norm__194, + batch_norm__195, + batch_norm__196, + batch_norm__197, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_33, + parameter_209, + parameter_208, + parameter_207, + parameter_206, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_206, parameter_207, parameter_208, parameter_209 + + # pd_op.conv2d: (1x96x-1x-1xf32) <- (1x96x-1x-1xf32, 96x96x1x1xf32) + conv2d_34 = paddle._C_ops.conv2d( + swish_25, parameter_205, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_205 + + # pd_op.batch_norm_: (1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__198, + batch_norm__199, + batch_norm__200, + batch_norm__201, + batch_norm__202, + batch_norm__203, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_34, + parameter_204, + parameter_203, + parameter_202, + parameter_201, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_201, parameter_202, parameter_203, parameter_204 + + # pd_op.multiply: (1x96x-1x-1xf32) <- (1xf32, 1x96x-1x-1xf32) + multiply_8 = paddle._C_ops.multiply(data_7, batch_norm__198) + del data_7 + + # pd_op.add: (1x96x-1x-1xf32) <- (1x96x-1x-1xf32, 1x96x-1x-1xf32) + add_15 = paddle._C_ops.add(batch_norm__192, multiply_8) + + # pd_op.swish: (1x96x-1x-1xf32) <- (1x96x-1x-1xf32) + swish_26 = paddle._C_ops.swish(add_15) + + # pd_op.add: (1x96x-1x-1xf32) <- (1x96x-1x-1xf32, 1x96x-1x-1xf32) + add_16 = paddle._C_ops.add(add_14, swish_26) + + # pd_op.conv2d: (1x96x-1x-1xf32) <- (1x96x-1x-1xf32, 96x96x3x3xf32) + conv2d_35 = paddle._C_ops.conv2d( + add_16, parameter_200, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_200 + + # pd_op.batch_norm_: (1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__204, + batch_norm__205, + batch_norm__206, + batch_norm__207, + batch_norm__208, + batch_norm__209, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_35, + parameter_199, + parameter_198, + parameter_197, + parameter_196, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_196, parameter_197, parameter_198, parameter_199 + + # pd_op.swish: (1x96x-1x-1xf32) <- (1x96x-1x-1xf32) + swish_27 = paddle._C_ops.swish(batch_norm__204) + + # pd_op.conv2d: (1x96x-1x-1xf32) <- (1x96x-1x-1xf32, 96x96x3x3xf32) + conv2d_36 = paddle._C_ops.conv2d( + swish_27, parameter_195, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_195 + + # pd_op.batch_norm_: (1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__210, + batch_norm__211, + batch_norm__212, + batch_norm__213, + batch_norm__214, + batch_norm__215, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_36, + parameter_194, + parameter_193, + parameter_192, + parameter_191, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_191, parameter_192, parameter_193, parameter_194 + + # pd_op.conv2d: (1x96x-1x-1xf32) <- (1x96x-1x-1xf32, 96x96x1x1xf32) + conv2d_37 = paddle._C_ops.conv2d( + swish_27, parameter_190, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_190 + + # pd_op.batch_norm_: (1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__216, + batch_norm__217, + batch_norm__218, + batch_norm__219, + batch_norm__220, + batch_norm__221, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_37, + parameter_189, + parameter_188, + parameter_187, + parameter_186, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_186, parameter_187, parameter_188, parameter_189 + + # pd_op.multiply: (1x96x-1x-1xf32) <- (1xf32, 1x96x-1x-1xf32) + multiply_9 = paddle._C_ops.multiply(data_8, batch_norm__216) + del data_8 + + # pd_op.add: (1x96x-1x-1xf32) <- (1x96x-1x-1xf32, 1x96x-1x-1xf32) + add_17 = paddle._C_ops.add(batch_norm__210, multiply_9) + + # pd_op.swish: (1x96x-1x-1xf32) <- (1x96x-1x-1xf32) + swish_28 = paddle._C_ops.swish(add_17) + + # pd_op.add: (1x96x-1x-1xf32) <- (1x96x-1x-1xf32, 1x96x-1x-1xf32) + add_18 = paddle._C_ops.add(add_16, swish_28) + + # builtin.combine: ([1x96x-1x-1xf32, 1x96x-1x-1xf32]) <- (1x96x-1x-1xf32, 1x96x-1x-1xf32) + combine_1 = [swish_15, add_18] + + # pd_op.concat: (1x192x-1x-1xf32) <- ([1x96x-1x-1xf32, 1x96x-1x-1xf32], 1xi32) + concat_1 = paddle._C_ops.concat(combine_1, full_0) + del combine_1 + + # pd_op.mean: (1x192x1x1xf32) <- (1x192x-1x-1xf32, 2xi64) + mean_1 = paddle._C_ops.mean(concat_1, full_int_array_0, True) + + # pd_op.conv2d: (1x192x1x1xf32) <- (1x192x1x1xf32, 192x192x1x1xf32) + conv2d_38 = paddle._C_ops.conv2d( + mean_1, parameter_185, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_185 + + # pd_op.reshape: (1x192x1x1xf32) <- (192xf32, 4xi64) + reshape_1 = paddle._C_ops.reshape(parameter_184, full_int_array_1) + del parameter_184 + + # pd_op.add: (1x192x1x1xf32) <- (1x192x1x1xf32, 1x192x1x1xf32) + add_19 = paddle._C_ops.add(conv2d_38, reshape_1) + + # pd_op.hardsigmoid: (1x192x1x1xf32) <- (1x192x1x1xf32) + hardsigmoid_1 = paddle._C_ops.hardsigmoid( + add_19, float("0.166667"), float("0.5") + ) + del add_19 + + # pd_op.multiply: (1x192x-1x-1xf32) <- (1x192x-1x-1xf32, 1x192x1x1xf32) + multiply_10 = paddle._C_ops.multiply(concat_1, hardsigmoid_1) + + # pd_op.conv2d: (1x256x-1x-1xf32) <- (1x192x-1x-1xf32, 256x192x1x1xf32) + conv2d_39 = paddle._C_ops.conv2d( + multiply_10, parameter_183, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_183 + + # pd_op.batch_norm_: (1x256x-1x-1xf32, 256xf32, 256xf32, 256xf32, 256xf32, -1xui8) <- (1x256x-1x-1xf32, 256xf32, 256xf32, 256xf32, 256xf32) + ( + batch_norm__222, + batch_norm__223, + batch_norm__224, + batch_norm__225, + batch_norm__226, + batch_norm__227, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_39, + parameter_182, + parameter_181, + parameter_180, + parameter_179, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_179, parameter_180, parameter_181, parameter_182 + + # pd_op.swish: (1x256x-1x-1xf32) <- (1x256x-1x-1xf32) + swish_29 = paddle._C_ops.swish(batch_norm__222) + + # pd_op.conv2d: (1x384x-1x-1xf32) <- (1x256x-1x-1xf32, 384x256x3x3xf32) + conv2d_40 = paddle._C_ops.conv2d( + swish_29, parameter_178, [2, 2], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_178 + + # pd_op.batch_norm_: (1x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (1x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__228, + batch_norm__229, + batch_norm__230, + batch_norm__231, + batch_norm__232, + batch_norm__233, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_40, + parameter_177, + parameter_176, + parameter_175, + parameter_174, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_174, parameter_175, parameter_176, parameter_177 + + # pd_op.swish: (1x384x-1x-1xf32) <- (1x384x-1x-1xf32) + swish_30 = paddle._C_ops.swish(batch_norm__228) + + # pd_op.conv2d: (1x192x-1x-1xf32) <- (1x384x-1x-1xf32, 192x384x1x1xf32) + conv2d_41 = paddle._C_ops.conv2d( + swish_30, parameter_173, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_173 + + # pd_op.batch_norm_: (1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__234, + batch_norm__235, + batch_norm__236, + batch_norm__237, + batch_norm__238, + batch_norm__239, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_41, + parameter_172, + parameter_171, + parameter_170, + parameter_169, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_169, parameter_170, parameter_171, parameter_172 + + # pd_op.swish: (1x192x-1x-1xf32) <- (1x192x-1x-1xf32) + swish_31 = paddle._C_ops.swish(batch_norm__234) + + # pd_op.conv2d: (1x192x-1x-1xf32) <- (1x384x-1x-1xf32, 192x384x1x1xf32) + conv2d_42 = paddle._C_ops.conv2d( + swish_30, parameter_168, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_168 + + # pd_op.batch_norm_: (1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__240, + batch_norm__241, + batch_norm__242, + batch_norm__243, + batch_norm__244, + batch_norm__245, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_42, + parameter_167, + parameter_166, + parameter_165, + parameter_164, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_164, parameter_165, parameter_166, parameter_167 + + # pd_op.swish: (1x192x-1x-1xf32) <- (1x192x-1x-1xf32) + swish_32 = paddle._C_ops.swish(batch_norm__240) + + # pd_op.conv2d: (1x192x-1x-1xf32) <- (1x192x-1x-1xf32, 192x192x3x3xf32) + conv2d_43 = paddle._C_ops.conv2d( + swish_32, parameter_163, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_163 + + # pd_op.batch_norm_: (1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__246, + batch_norm__247, + batch_norm__248, + batch_norm__249, + batch_norm__250, + batch_norm__251, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_43, + parameter_162, + parameter_161, + parameter_160, + parameter_159, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_159, parameter_160, parameter_161, parameter_162 + + # pd_op.swish: (1x192x-1x-1xf32) <- (1x192x-1x-1xf32) + swish_33 = paddle._C_ops.swish(batch_norm__246) + + # pd_op.conv2d: (1x192x-1x-1xf32) <- (1x192x-1x-1xf32, 192x192x3x3xf32) + conv2d_44 = paddle._C_ops.conv2d( + swish_33, parameter_158, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_158 + + # pd_op.batch_norm_: (1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__252, + batch_norm__253, + batch_norm__254, + batch_norm__255, + batch_norm__256, + batch_norm__257, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_44, + parameter_157, + parameter_156, + parameter_155, + parameter_154, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_154, parameter_155, parameter_156, parameter_157 + + # pd_op.conv2d: (1x192x-1x-1xf32) <- (1x192x-1x-1xf32, 192x192x1x1xf32) + conv2d_45 = paddle._C_ops.conv2d( + swish_33, parameter_153, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_153 + + # pd_op.batch_norm_: (1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__258, + batch_norm__259, + batch_norm__260, + batch_norm__261, + batch_norm__262, + batch_norm__263, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_45, + parameter_152, + parameter_151, + parameter_150, + parameter_149, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_149, parameter_150, parameter_151, parameter_152 + + # pd_op.multiply: (1x192x-1x-1xf32) <- (1xf32, 1x192x-1x-1xf32) + multiply_11 = paddle._C_ops.multiply(data_9, batch_norm__258) + del data_9 + + # pd_op.add: (1x192x-1x-1xf32) <- (1x192x-1x-1xf32, 1x192x-1x-1xf32) + add_20 = paddle._C_ops.add(batch_norm__252, multiply_11) + + # pd_op.swish: (1x192x-1x-1xf32) <- (1x192x-1x-1xf32) + swish_34 = paddle._C_ops.swish(add_20) + + # pd_op.add: (1x192x-1x-1xf32) <- (1x192x-1x-1xf32, 1x192x-1x-1xf32) + add_21 = paddle._C_ops.add(swish_32, swish_34) + + # pd_op.conv2d: (1x192x-1x-1xf32) <- (1x192x-1x-1xf32, 192x192x3x3xf32) + conv2d_46 = paddle._C_ops.conv2d( + add_21, parameter_148, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_148 + + # pd_op.batch_norm_: (1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__264, + batch_norm__265, + batch_norm__266, + batch_norm__267, + batch_norm__268, + batch_norm__269, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_46, + parameter_147, + parameter_146, + parameter_145, + parameter_144, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_144, parameter_145, parameter_146, parameter_147 + + # pd_op.swish: (1x192x-1x-1xf32) <- (1x192x-1x-1xf32) + swish_35 = paddle._C_ops.swish(batch_norm__264) + + # pd_op.conv2d: (1x192x-1x-1xf32) <- (1x192x-1x-1xf32, 192x192x3x3xf32) + conv2d_47 = paddle._C_ops.conv2d( + swish_35, parameter_143, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_143 + + # pd_op.batch_norm_: (1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__270, + batch_norm__271, + batch_norm__272, + batch_norm__273, + batch_norm__274, + batch_norm__275, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_47, + parameter_142, + parameter_141, + parameter_140, + parameter_139, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_139, parameter_140, parameter_141, parameter_142 + + # pd_op.conv2d: (1x192x-1x-1xf32) <- (1x192x-1x-1xf32, 192x192x1x1xf32) + conv2d_48 = paddle._C_ops.conv2d( + swish_35, parameter_138, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_138 + + # pd_op.batch_norm_: (1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__276, + batch_norm__277, + batch_norm__278, + batch_norm__279, + batch_norm__280, + batch_norm__281, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_48, + parameter_137, + parameter_136, + parameter_135, + parameter_134, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_134, parameter_135, parameter_136, parameter_137 + + # pd_op.multiply: (1x192x-1x-1xf32) <- (1xf32, 1x192x-1x-1xf32) + multiply_12 = paddle._C_ops.multiply(data_10, batch_norm__276) + del data_10 + + # pd_op.add: (1x192x-1x-1xf32) <- (1x192x-1x-1xf32, 1x192x-1x-1xf32) + add_22 = paddle._C_ops.add(batch_norm__270, multiply_12) + + # pd_op.swish: (1x192x-1x-1xf32) <- (1x192x-1x-1xf32) + swish_36 = paddle._C_ops.swish(add_22) + + # pd_op.add: (1x192x-1x-1xf32) <- (1x192x-1x-1xf32, 1x192x-1x-1xf32) + add_23 = paddle._C_ops.add(add_21, swish_36) + + # pd_op.conv2d: (1x192x-1x-1xf32) <- (1x192x-1x-1xf32, 192x192x3x3xf32) + conv2d_49 = paddle._C_ops.conv2d( + add_23, parameter_133, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_133 + + # pd_op.batch_norm_: (1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__282, + batch_norm__283, + batch_norm__284, + batch_norm__285, + batch_norm__286, + batch_norm__287, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_49, + parameter_132, + parameter_131, + parameter_130, + parameter_129, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_129, parameter_130, parameter_131, parameter_132 + + # pd_op.swish: (1x192x-1x-1xf32) <- (1x192x-1x-1xf32) + swish_37 = paddle._C_ops.swish(batch_norm__282) + + # pd_op.conv2d: (1x192x-1x-1xf32) <- (1x192x-1x-1xf32, 192x192x3x3xf32) + conv2d_50 = paddle._C_ops.conv2d( + swish_37, parameter_128, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_128 + + # pd_op.batch_norm_: (1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__288, + batch_norm__289, + batch_norm__290, + batch_norm__291, + batch_norm__292, + batch_norm__293, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_50, + parameter_127, + parameter_126, + parameter_125, + parameter_124, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_124, parameter_125, parameter_126, parameter_127 + + # pd_op.conv2d: (1x192x-1x-1xf32) <- (1x192x-1x-1xf32, 192x192x1x1xf32) + conv2d_51 = paddle._C_ops.conv2d( + swish_37, parameter_123, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_123 + + # pd_op.batch_norm_: (1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__294, + batch_norm__295, + batch_norm__296, + batch_norm__297, + batch_norm__298, + batch_norm__299, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_51, + parameter_122, + parameter_121, + parameter_120, + parameter_119, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_119, parameter_120, parameter_121, parameter_122 + + # pd_op.multiply: (1x192x-1x-1xf32) <- (1xf32, 1x192x-1x-1xf32) + multiply_13 = paddle._C_ops.multiply(data_11, batch_norm__294) + del data_11 + + # pd_op.add: (1x192x-1x-1xf32) <- (1x192x-1x-1xf32, 1x192x-1x-1xf32) + add_24 = paddle._C_ops.add(batch_norm__288, multiply_13) + + # pd_op.swish: (1x192x-1x-1xf32) <- (1x192x-1x-1xf32) + swish_38 = paddle._C_ops.swish(add_24) + + # pd_op.add: (1x192x-1x-1xf32) <- (1x192x-1x-1xf32, 1x192x-1x-1xf32) + add_25 = paddle._C_ops.add(add_23, swish_38) + + # pd_op.conv2d: (1x192x-1x-1xf32) <- (1x192x-1x-1xf32, 192x192x3x3xf32) + conv2d_52 = paddle._C_ops.conv2d( + add_25, parameter_118, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_118 + + # pd_op.batch_norm_: (1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__300, + batch_norm__301, + batch_norm__302, + batch_norm__303, + batch_norm__304, + batch_norm__305, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_52, + parameter_117, + parameter_116, + parameter_115, + parameter_114, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_114, parameter_115, parameter_116, parameter_117 + + # pd_op.swish: (1x192x-1x-1xf32) <- (1x192x-1x-1xf32) + swish_39 = paddle._C_ops.swish(batch_norm__300) + + # pd_op.conv2d: (1x192x-1x-1xf32) <- (1x192x-1x-1xf32, 192x192x3x3xf32) + conv2d_53 = paddle._C_ops.conv2d( + swish_39, parameter_113, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_113 + + # pd_op.batch_norm_: (1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__306, + batch_norm__307, + batch_norm__308, + batch_norm__309, + batch_norm__310, + batch_norm__311, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_53, + parameter_112, + parameter_111, + parameter_110, + parameter_109, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_109, parameter_110, parameter_111, parameter_112 + + # pd_op.conv2d: (1x192x-1x-1xf32) <- (1x192x-1x-1xf32, 192x192x1x1xf32) + conv2d_54 = paddle._C_ops.conv2d( + swish_39, parameter_108, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_108 + + # pd_op.batch_norm_: (1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__312, + batch_norm__313, + batch_norm__314, + batch_norm__315, + batch_norm__316, + batch_norm__317, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_54, + parameter_107, + parameter_106, + parameter_105, + parameter_104, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_104, parameter_105, parameter_106, parameter_107 + + # pd_op.multiply: (1x192x-1x-1xf32) <- (1xf32, 1x192x-1x-1xf32) + multiply_14 = paddle._C_ops.multiply(data_12, batch_norm__312) + del data_12 + + # pd_op.add: (1x192x-1x-1xf32) <- (1x192x-1x-1xf32, 1x192x-1x-1xf32) + add_26 = paddle._C_ops.add(batch_norm__306, multiply_14) + + # pd_op.swish: (1x192x-1x-1xf32) <- (1x192x-1x-1xf32) + swish_40 = paddle._C_ops.swish(add_26) + + # pd_op.add: (1x192x-1x-1xf32) <- (1x192x-1x-1xf32, 1x192x-1x-1xf32) + add_27 = paddle._C_ops.add(add_25, swish_40) + + # pd_op.conv2d: (1x192x-1x-1xf32) <- (1x192x-1x-1xf32, 192x192x3x3xf32) + conv2d_55 = paddle._C_ops.conv2d( + add_27, parameter_103, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_103 + + # pd_op.batch_norm_: (1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__318, + batch_norm__319, + batch_norm__320, + batch_norm__321, + batch_norm__322, + batch_norm__323, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_55, + parameter_102, + parameter_101, + parameter_100, + parameter_99, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_100, parameter_101, parameter_102, parameter_99 + + # pd_op.swish: (1x192x-1x-1xf32) <- (1x192x-1x-1xf32) + swish_41 = paddle._C_ops.swish(batch_norm__318) + + # pd_op.conv2d: (1x192x-1x-1xf32) <- (1x192x-1x-1xf32, 192x192x3x3xf32) + conv2d_56 = paddle._C_ops.conv2d( + swish_41, parameter_98, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_98 + + # pd_op.batch_norm_: (1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__324, + batch_norm__325, + batch_norm__326, + batch_norm__327, + batch_norm__328, + batch_norm__329, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_56, + parameter_97, + parameter_96, + parameter_95, + parameter_94, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_94, parameter_95, parameter_96, parameter_97 + + # pd_op.conv2d: (1x192x-1x-1xf32) <- (1x192x-1x-1xf32, 192x192x1x1xf32) + conv2d_57 = paddle._C_ops.conv2d( + swish_41, parameter_93, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_93 + + # pd_op.batch_norm_: (1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__330, + batch_norm__331, + batch_norm__332, + batch_norm__333, + batch_norm__334, + batch_norm__335, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_57, + parameter_92, + parameter_91, + parameter_90, + parameter_89, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_89, parameter_90, parameter_91, parameter_92 + + # pd_op.multiply: (1x192x-1x-1xf32) <- (1xf32, 1x192x-1x-1xf32) + multiply_15 = paddle._C_ops.multiply(data_13, batch_norm__330) + del data_13 + + # pd_op.add: (1x192x-1x-1xf32) <- (1x192x-1x-1xf32, 1x192x-1x-1xf32) + add_28 = paddle._C_ops.add(batch_norm__324, multiply_15) + + # pd_op.swish: (1x192x-1x-1xf32) <- (1x192x-1x-1xf32) + swish_42 = paddle._C_ops.swish(add_28) + + # pd_op.add: (1x192x-1x-1xf32) <- (1x192x-1x-1xf32, 1x192x-1x-1xf32) + add_29 = paddle._C_ops.add(add_27, swish_42) + + # pd_op.conv2d: (1x192x-1x-1xf32) <- (1x192x-1x-1xf32, 192x192x3x3xf32) + conv2d_58 = paddle._C_ops.conv2d( + add_29, parameter_88, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_88 + + # pd_op.batch_norm_: (1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__336, + batch_norm__337, + batch_norm__338, + batch_norm__339, + batch_norm__340, + batch_norm__341, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_58, + parameter_87, + parameter_86, + parameter_85, + parameter_84, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_84, parameter_85, parameter_86, parameter_87 + + # pd_op.swish: (1x192x-1x-1xf32) <- (1x192x-1x-1xf32) + swish_43 = paddle._C_ops.swish(batch_norm__336) + + # pd_op.conv2d: (1x192x-1x-1xf32) <- (1x192x-1x-1xf32, 192x192x3x3xf32) + conv2d_59 = paddle._C_ops.conv2d( + swish_43, parameter_83, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_83 + + # pd_op.batch_norm_: (1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__342, + batch_norm__343, + batch_norm__344, + batch_norm__345, + batch_norm__346, + batch_norm__347, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_59, + parameter_82, + parameter_81, + parameter_80, + parameter_79, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_79, parameter_80, parameter_81, parameter_82 + + # pd_op.conv2d: (1x192x-1x-1xf32) <- (1x192x-1x-1xf32, 192x192x1x1xf32) + conv2d_60 = paddle._C_ops.conv2d( + swish_43, parameter_78, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_78 + + # pd_op.batch_norm_: (1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__348, + batch_norm__349, + batch_norm__350, + batch_norm__351, + batch_norm__352, + batch_norm__353, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_60, + parameter_77, + parameter_76, + parameter_75, + parameter_74, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_74, parameter_75, parameter_76, parameter_77 + + # pd_op.multiply: (1x192x-1x-1xf32) <- (1xf32, 1x192x-1x-1xf32) + multiply_16 = paddle._C_ops.multiply(data_14, batch_norm__348) + del data_14 + + # pd_op.add: (1x192x-1x-1xf32) <- (1x192x-1x-1xf32, 1x192x-1x-1xf32) + add_30 = paddle._C_ops.add(batch_norm__342, multiply_16) + + # pd_op.swish: (1x192x-1x-1xf32) <- (1x192x-1x-1xf32) + swish_44 = paddle._C_ops.swish(add_30) + + # pd_op.add: (1x192x-1x-1xf32) <- (1x192x-1x-1xf32, 1x192x-1x-1xf32) + add_31 = paddle._C_ops.add(add_29, swish_44) + + # builtin.combine: ([1x192x-1x-1xf32, 1x192x-1x-1xf32]) <- (1x192x-1x-1xf32, 1x192x-1x-1xf32) + combine_2 = [swish_31, add_31] + + # pd_op.concat: (1x384x-1x-1xf32) <- ([1x192x-1x-1xf32, 1x192x-1x-1xf32], 1xi32) + concat_2 = paddle._C_ops.concat(combine_2, full_0) + del combine_2 + + # pd_op.mean: (1x384x1x1xf32) <- (1x384x-1x-1xf32, 2xi64) + mean_2 = paddle._C_ops.mean(concat_2, full_int_array_0, True) + + # pd_op.conv2d: (1x384x1x1xf32) <- (1x384x1x1xf32, 384x384x1x1xf32) + conv2d_61 = paddle._C_ops.conv2d( + mean_2, parameter_73, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_73 + + # pd_op.reshape: (1x384x1x1xf32) <- (384xf32, 4xi64) + reshape_2 = paddle._C_ops.reshape(parameter_72, full_int_array_1) + del parameter_72 + + # pd_op.add: (1x384x1x1xf32) <- (1x384x1x1xf32, 1x384x1x1xf32) + add_32 = paddle._C_ops.add(conv2d_61, reshape_2) + + # pd_op.hardsigmoid: (1x384x1x1xf32) <- (1x384x1x1xf32) + hardsigmoid_2 = paddle._C_ops.hardsigmoid( + add_32, float("0.166667"), float("0.5") + ) + del add_32 + + # pd_op.multiply: (1x384x-1x-1xf32) <- (1x384x-1x-1xf32, 1x384x1x1xf32) + multiply_17 = paddle._C_ops.multiply(concat_2, hardsigmoid_2) + + # pd_op.conv2d: (1x512x-1x-1xf32) <- (1x384x-1x-1xf32, 512x384x1x1xf32) + conv2d_62 = paddle._C_ops.conv2d( + multiply_17, parameter_71, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_71 + + # pd_op.batch_norm_: (1x512x-1x-1xf32, 512xf32, 512xf32, 512xf32, 512xf32, -1xui8) <- (1x512x-1x-1xf32, 512xf32, 512xf32, 512xf32, 512xf32) + ( + batch_norm__354, + batch_norm__355, + batch_norm__356, + batch_norm__357, + batch_norm__358, + batch_norm__359, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_62, + parameter_70, + parameter_69, + parameter_68, + parameter_67, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_67, parameter_68, parameter_69, parameter_70 + + # pd_op.swish: (1x512x-1x-1xf32) <- (1x512x-1x-1xf32) + swish_45 = paddle._C_ops.swish(batch_norm__354) + + # pd_op.conv2d: (1x768x-1x-1xf32) <- (1x512x-1x-1xf32, 768x512x3x3xf32) + conv2d_63 = paddle._C_ops.conv2d( + swish_45, parameter_66, [2, 2], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_66 + + # pd_op.batch_norm_: (1x768x-1x-1xf32, 768xf32, 768xf32, 768xf32, 768xf32, -1xui8) <- (1x768x-1x-1xf32, 768xf32, 768xf32, 768xf32, 768xf32) + ( + batch_norm__360, + batch_norm__361, + batch_norm__362, + batch_norm__363, + batch_norm__364, + batch_norm__365, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_63, + parameter_65, + parameter_64, + parameter_63, + parameter_62, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_62, parameter_63, parameter_64, parameter_65 + + # pd_op.swish: (1x768x-1x-1xf32) <- (1x768x-1x-1xf32) + swish_46 = paddle._C_ops.swish(batch_norm__360) + + # pd_op.conv2d: (1x384x-1x-1xf32) <- (1x768x-1x-1xf32, 384x768x1x1xf32) + conv2d_64 = paddle._C_ops.conv2d( + swish_46, parameter_61, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_61 + + # pd_op.batch_norm_: (1x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (1x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__366, + batch_norm__367, + batch_norm__368, + batch_norm__369, + batch_norm__370, + batch_norm__371, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_64, + parameter_60, + parameter_59, + parameter_58, + parameter_57, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_57, parameter_58, parameter_59, parameter_60 + + # pd_op.swish: (1x384x-1x-1xf32) <- (1x384x-1x-1xf32) + swish_47 = paddle._C_ops.swish(batch_norm__366) + + # pd_op.conv2d: (1x384x-1x-1xf32) <- (1x768x-1x-1xf32, 384x768x1x1xf32) + conv2d_65 = paddle._C_ops.conv2d( + swish_46, parameter_56, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_56 + + # pd_op.batch_norm_: (1x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (1x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__372, + batch_norm__373, + batch_norm__374, + batch_norm__375, + batch_norm__376, + batch_norm__377, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_65, + parameter_55, + parameter_54, + parameter_53, + parameter_52, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_52, parameter_53, parameter_54, parameter_55 + + # pd_op.swish: (1x384x-1x-1xf32) <- (1x384x-1x-1xf32) + swish_48 = paddle._C_ops.swish(batch_norm__372) + + # pd_op.conv2d: (1x384x-1x-1xf32) <- (1x384x-1x-1xf32, 384x384x3x3xf32) + conv2d_66 = paddle._C_ops.conv2d( + swish_48, parameter_51, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_51 + + # pd_op.batch_norm_: (1x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (1x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__378, + batch_norm__379, + batch_norm__380, + batch_norm__381, + batch_norm__382, + batch_norm__383, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_66, + parameter_50, + parameter_49, + parameter_48, + parameter_47, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_47, parameter_48, parameter_49, parameter_50 + + # pd_op.swish: (1x384x-1x-1xf32) <- (1x384x-1x-1xf32) + swish_49 = paddle._C_ops.swish(batch_norm__378) + + # pd_op.conv2d: (1x384x-1x-1xf32) <- (1x384x-1x-1xf32, 384x384x3x3xf32) + conv2d_67 = paddle._C_ops.conv2d( + swish_49, parameter_46, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_46 + + # pd_op.batch_norm_: (1x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (1x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__384, + batch_norm__385, + batch_norm__386, + batch_norm__387, + batch_norm__388, + batch_norm__389, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_67, + parameter_45, + parameter_44, + parameter_43, + parameter_42, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_42, parameter_43, parameter_44, parameter_45 + + # pd_op.conv2d: (1x384x-1x-1xf32) <- (1x384x-1x-1xf32, 384x384x1x1xf32) + conv2d_68 = paddle._C_ops.conv2d( + swish_49, parameter_41, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_41 + + # pd_op.batch_norm_: (1x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (1x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__390, + batch_norm__391, + batch_norm__392, + batch_norm__393, + batch_norm__394, + batch_norm__395, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_68, + parameter_40, + parameter_39, + parameter_38, + parameter_37, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_37, parameter_38, parameter_39, parameter_40 + + # pd_op.multiply: (1x384x-1x-1xf32) <- (1xf32, 1x384x-1x-1xf32) + multiply_18 = paddle._C_ops.multiply(data_15, batch_norm__390) + del data_15 + + # pd_op.add: (1x384x-1x-1xf32) <- (1x384x-1x-1xf32, 1x384x-1x-1xf32) + add_33 = paddle._C_ops.add(batch_norm__384, multiply_18) + + # pd_op.swish: (1x384x-1x-1xf32) <- (1x384x-1x-1xf32) + swish_50 = paddle._C_ops.swish(add_33) + + # pd_op.add: (1x384x-1x-1xf32) <- (1x384x-1x-1xf32, 1x384x-1x-1xf32) + add_34 = paddle._C_ops.add(swish_48, swish_50) + + # pd_op.conv2d: (1x384x-1x-1xf32) <- (1x384x-1x-1xf32, 384x384x3x3xf32) + conv2d_69 = paddle._C_ops.conv2d( + add_34, parameter_36, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_36 + + # pd_op.batch_norm_: (1x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (1x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__396, + batch_norm__397, + batch_norm__398, + batch_norm__399, + batch_norm__400, + batch_norm__401, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_69, + parameter_35, + parameter_34, + parameter_33, + parameter_32, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_32, parameter_33, parameter_34, parameter_35 + + # pd_op.swish: (1x384x-1x-1xf32) <- (1x384x-1x-1xf32) + swish_51 = paddle._C_ops.swish(batch_norm__396) + + # pd_op.conv2d: (1x384x-1x-1xf32) <- (1x384x-1x-1xf32, 384x384x3x3xf32) + conv2d_70 = paddle._C_ops.conv2d( + swish_51, parameter_31, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_31 + + # pd_op.batch_norm_: (1x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (1x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__402, + batch_norm__403, + batch_norm__404, + batch_norm__405, + batch_norm__406, + batch_norm__407, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_70, + parameter_30, + parameter_29, + parameter_28, + parameter_27, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_27, parameter_28, parameter_29, parameter_30 + + # pd_op.conv2d: (1x384x-1x-1xf32) <- (1x384x-1x-1xf32, 384x384x1x1xf32) + conv2d_71 = paddle._C_ops.conv2d( + swish_51, parameter_26, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_26 + + # pd_op.batch_norm_: (1x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (1x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__408, + batch_norm__409, + batch_norm__410, + batch_norm__411, + batch_norm__412, + batch_norm__413, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_71, + parameter_25, + parameter_24, + parameter_23, + parameter_22, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_22, parameter_23, parameter_24, parameter_25 + + # pd_op.multiply: (1x384x-1x-1xf32) <- (1xf32, 1x384x-1x-1xf32) + multiply_19 = paddle._C_ops.multiply(data_16, batch_norm__408) + del data_16 + + # pd_op.add: (1x384x-1x-1xf32) <- (1x384x-1x-1xf32, 1x384x-1x-1xf32) + add_35 = paddle._C_ops.add(batch_norm__402, multiply_19) + + # pd_op.swish: (1x384x-1x-1xf32) <- (1x384x-1x-1xf32) + swish_52 = paddle._C_ops.swish(add_35) + + # pd_op.add: (1x384x-1x-1xf32) <- (1x384x-1x-1xf32, 1x384x-1x-1xf32) + add_36 = paddle._C_ops.add(add_34, swish_52) + + # pd_op.conv2d: (1x384x-1x-1xf32) <- (1x384x-1x-1xf32, 384x384x3x3xf32) + conv2d_72 = paddle._C_ops.conv2d( + add_36, parameter_21, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_21 + + # pd_op.batch_norm_: (1x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (1x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__414, + batch_norm__415, + batch_norm__416, + batch_norm__417, + batch_norm__418, + batch_norm__419, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_72, + parameter_20, + parameter_19, + parameter_18, + parameter_17, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_17, parameter_18, parameter_19, parameter_20 + + # pd_op.swish: (1x384x-1x-1xf32) <- (1x384x-1x-1xf32) + swish_53 = paddle._C_ops.swish(batch_norm__414) + + # pd_op.conv2d: (1x384x-1x-1xf32) <- (1x384x-1x-1xf32, 384x384x3x3xf32) + conv2d_73 = paddle._C_ops.conv2d( + swish_53, parameter_16, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_16 + + # pd_op.batch_norm_: (1x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (1x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__420, + batch_norm__421, + batch_norm__422, + batch_norm__423, + batch_norm__424, + batch_norm__425, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_73, + parameter_15, + parameter_14, + parameter_13, + parameter_12, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_12, parameter_13, parameter_14, parameter_15 + + # pd_op.conv2d: (1x384x-1x-1xf32) <- (1x384x-1x-1xf32, 384x384x1x1xf32) + conv2d_74 = paddle._C_ops.conv2d( + swish_53, parameter_11, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_11 + + # pd_op.batch_norm_: (1x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (1x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__426, + batch_norm__427, + batch_norm__428, + batch_norm__429, + batch_norm__430, + batch_norm__431, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_74, + parameter_10, + parameter_9, + parameter_8, + parameter_7, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_10, parameter_7, parameter_8, parameter_9 + + # pd_op.multiply: (1x384x-1x-1xf32) <- (1xf32, 1x384x-1x-1xf32) + multiply_20 = paddle._C_ops.multiply(data_17, batch_norm__426) + del data_17 + + # pd_op.add: (1x384x-1x-1xf32) <- (1x384x-1x-1xf32, 1x384x-1x-1xf32) + add_37 = paddle._C_ops.add(batch_norm__420, multiply_20) + + # pd_op.swish: (1x384x-1x-1xf32) <- (1x384x-1x-1xf32) + swish_54 = paddle._C_ops.swish(add_37) + + # pd_op.add: (1x384x-1x-1xf32) <- (1x384x-1x-1xf32, 1x384x-1x-1xf32) + add_38 = paddle._C_ops.add(add_36, swish_54) + + # builtin.combine: ([1x384x-1x-1xf32, 1x384x-1x-1xf32]) <- (1x384x-1x-1xf32, 1x384x-1x-1xf32) + combine_3 = [swish_47, add_38] + + # pd_op.concat: (1x768x-1x-1xf32) <- ([1x384x-1x-1xf32, 1x384x-1x-1xf32], 1xi32) + concat_3 = paddle._C_ops.concat(combine_3, full_0) + del combine_3 + + # pd_op.mean: (1x768x1x1xf32) <- (1x768x-1x-1xf32, 2xi64) + mean_3 = paddle._C_ops.mean(concat_3, full_int_array_0, True) + + # pd_op.conv2d: (1x768x1x1xf32) <- (1x768x1x1xf32, 768x768x1x1xf32) + conv2d_75 = paddle._C_ops.conv2d( + mean_3, parameter_6, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_6 + + # pd_op.reshape: (1x768x1x1xf32) <- (768xf32, 4xi64) + reshape_3 = paddle._C_ops.reshape(parameter_5, full_int_array_1) + del full_int_array_1, parameter_5 + + # pd_op.add: (1x768x1x1xf32) <- (1x768x1x1xf32, 1x768x1x1xf32) + add_39 = paddle._C_ops.add(conv2d_75, reshape_3) + + # pd_op.hardsigmoid: (1x768x1x1xf32) <- (1x768x1x1xf32) + hardsigmoid_3 = paddle._C_ops.hardsigmoid( + add_39, float("0.166667"), float("0.5") + ) + del add_39 + + # pd_op.multiply: (1x768x-1x-1xf32) <- (1x768x-1x-1xf32, 1x768x1x1xf32) + multiply_21 = paddle._C_ops.multiply(concat_3, hardsigmoid_3) + + # pd_op.conv2d: (1x1024x-1x-1xf32) <- (1x768x-1x-1xf32, 1024x768x1x1xf32) + conv2d_76 = paddle._C_ops.conv2d( + multiply_21, parameter_4, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_4 + + # pd_op.batch_norm_: (1x1024x-1x-1xf32, 1024xf32, 1024xf32, 1024xf32, 1024xf32, -1xui8) <- (1x1024x-1x-1xf32, 1024xf32, 1024xf32, 1024xf32, 1024xf32) + ( + batch_norm__432, + batch_norm__433, + batch_norm__434, + batch_norm__435, + batch_norm__436, + batch_norm__437, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_76, + parameter_3, + parameter_2, + parameter_1, + parameter_0, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_0, parameter_1, parameter_2, parameter_3 + + # pd_op.swish: (1x1024x-1x-1xf32) <- (1x1024x-1x-1xf32) + swish_0 = paddle._C_ops.swish(batch_norm__432) + del ( + add_0, + add_1, + add_10, + add_11, + add_12, + add_13, + add_14, + add_15, + add_16, + add_17, + add_18, + add_2, + add_20, + add_21, + add_22, + add_23, + add_24, + add_25, + add_26, + add_27, + add_28, + add_29, + add_3, + add_30, + add_31, + add_33, + add_34, + add_35, + add_36, + add_37, + add_38, + add_4, + add_5, + add_7, + add_8, + add_9, + assign_0, + assign_1, + assign_2, + assign_3, + assign_4, + assign_5, + batch_norm__0, + batch_norm__1, + batch_norm__10, + batch_norm__100, + batch_norm__101, + batch_norm__102, + batch_norm__103, + batch_norm__104, + batch_norm__105, + batch_norm__106, + batch_norm__107, + batch_norm__108, + batch_norm__109, + batch_norm__11, + batch_norm__110, + batch_norm__111, + batch_norm__112, + batch_norm__113, + batch_norm__114, + batch_norm__115, + batch_norm__116, + batch_norm__117, + batch_norm__118, + batch_norm__119, + batch_norm__12, + batch_norm__120, + batch_norm__121, + batch_norm__122, + batch_norm__123, + batch_norm__124, + batch_norm__125, + batch_norm__126, + batch_norm__127, + batch_norm__128, + batch_norm__129, + batch_norm__13, + batch_norm__130, + batch_norm__131, + batch_norm__132, + batch_norm__133, + batch_norm__134, + batch_norm__135, + batch_norm__136, + batch_norm__137, + batch_norm__138, + batch_norm__139, + batch_norm__14, + batch_norm__140, + batch_norm__141, + batch_norm__142, + batch_norm__143, + batch_norm__144, + batch_norm__145, + batch_norm__146, + batch_norm__147, + batch_norm__148, + batch_norm__149, + batch_norm__15, + batch_norm__150, + batch_norm__151, + batch_norm__152, + batch_norm__153, + batch_norm__154, + batch_norm__155, + batch_norm__156, + batch_norm__157, + batch_norm__158, + batch_norm__159, + batch_norm__16, + batch_norm__160, + batch_norm__161, + batch_norm__162, + batch_norm__163, + batch_norm__164, + batch_norm__165, + batch_norm__166, + batch_norm__167, + batch_norm__168, + batch_norm__169, + batch_norm__17, + batch_norm__170, + batch_norm__171, + batch_norm__172, + batch_norm__173, + batch_norm__174, + batch_norm__175, + batch_norm__176, + batch_norm__177, + batch_norm__178, + batch_norm__179, + batch_norm__18, + batch_norm__180, + batch_norm__181, + batch_norm__182, + batch_norm__183, + batch_norm__184, + batch_norm__185, + batch_norm__186, + batch_norm__187, + batch_norm__188, + batch_norm__189, + batch_norm__19, + batch_norm__190, + batch_norm__191, + batch_norm__192, + batch_norm__193, + batch_norm__194, + batch_norm__195, + batch_norm__196, + batch_norm__197, + batch_norm__198, + batch_norm__199, + batch_norm__2, + batch_norm__20, + batch_norm__200, + batch_norm__201, + batch_norm__202, + batch_norm__203, + batch_norm__204, + batch_norm__205, + batch_norm__206, + batch_norm__207, + batch_norm__208, + batch_norm__209, + batch_norm__21, + batch_norm__210, + batch_norm__211, + batch_norm__212, + batch_norm__213, + batch_norm__214, + batch_norm__215, + batch_norm__216, + batch_norm__217, + batch_norm__218, + batch_norm__219, + batch_norm__22, + batch_norm__220, + batch_norm__221, + batch_norm__222, + batch_norm__223, + batch_norm__224, + batch_norm__225, + batch_norm__226, + batch_norm__227, + batch_norm__228, + batch_norm__229, + batch_norm__23, + batch_norm__230, + batch_norm__231, + batch_norm__232, + batch_norm__233, + batch_norm__234, + batch_norm__235, + batch_norm__236, + batch_norm__237, + batch_norm__238, + batch_norm__239, + batch_norm__24, + batch_norm__240, + batch_norm__241, + batch_norm__242, + batch_norm__243, + batch_norm__244, + batch_norm__245, + batch_norm__246, + batch_norm__247, + batch_norm__248, + batch_norm__249, + batch_norm__25, + batch_norm__250, + batch_norm__251, + batch_norm__252, + batch_norm__253, + batch_norm__254, + batch_norm__255, + batch_norm__256, + batch_norm__257, + batch_norm__258, + batch_norm__259, + batch_norm__26, + batch_norm__260, + batch_norm__261, + batch_norm__262, + batch_norm__263, + batch_norm__264, + batch_norm__265, + batch_norm__266, + batch_norm__267, + batch_norm__268, + batch_norm__269, + batch_norm__27, + batch_norm__270, + batch_norm__271, + batch_norm__272, + batch_norm__273, + batch_norm__274, + batch_norm__275, + batch_norm__276, + batch_norm__277, + batch_norm__278, + batch_norm__279, + batch_norm__28, + batch_norm__280, + batch_norm__281, + batch_norm__282, + batch_norm__283, + batch_norm__284, + batch_norm__285, + batch_norm__286, + batch_norm__287, + batch_norm__288, + batch_norm__289, + batch_norm__29, + batch_norm__290, + batch_norm__291, + batch_norm__292, + batch_norm__293, + batch_norm__294, + batch_norm__295, + batch_norm__296, + batch_norm__297, + batch_norm__298, + batch_norm__299, + batch_norm__3, + batch_norm__30, + batch_norm__300, + batch_norm__301, + batch_norm__302, + batch_norm__303, + batch_norm__304, + batch_norm__305, + batch_norm__306, + batch_norm__307, + batch_norm__308, + batch_norm__309, + batch_norm__31, + batch_norm__310, + batch_norm__311, + batch_norm__312, + batch_norm__313, + batch_norm__314, + batch_norm__315, + batch_norm__316, + batch_norm__317, + batch_norm__318, + batch_norm__319, + batch_norm__32, + batch_norm__320, + batch_norm__321, + batch_norm__322, + batch_norm__323, + batch_norm__324, + batch_norm__325, + batch_norm__326, + batch_norm__327, + batch_norm__328, + batch_norm__329, + batch_norm__33, + batch_norm__330, + batch_norm__331, + batch_norm__332, + batch_norm__333, + batch_norm__334, + batch_norm__335, + batch_norm__336, + batch_norm__337, + batch_norm__338, + batch_norm__339, + batch_norm__34, + batch_norm__340, + batch_norm__341, + batch_norm__342, + batch_norm__343, + batch_norm__344, + batch_norm__345, + batch_norm__346, + batch_norm__347, + batch_norm__348, + batch_norm__349, + batch_norm__35, + batch_norm__350, + batch_norm__351, + batch_norm__352, + batch_norm__353, + batch_norm__354, + batch_norm__355, + batch_norm__356, + batch_norm__357, + batch_norm__358, + batch_norm__359, + batch_norm__36, + batch_norm__360, + batch_norm__361, + batch_norm__362, + batch_norm__363, + batch_norm__364, + batch_norm__365, + batch_norm__366, + batch_norm__367, + batch_norm__368, + batch_norm__369, + batch_norm__37, + batch_norm__370, + batch_norm__371, + batch_norm__372, + batch_norm__373, + batch_norm__374, + batch_norm__375, + batch_norm__376, + batch_norm__377, + batch_norm__378, + batch_norm__379, + batch_norm__38, + batch_norm__380, + batch_norm__381, + batch_norm__382, + batch_norm__383, + batch_norm__384, + batch_norm__385, + batch_norm__386, + batch_norm__387, + batch_norm__388, + batch_norm__389, + batch_norm__39, + batch_norm__390, + batch_norm__391, + batch_norm__392, + batch_norm__393, + batch_norm__394, + batch_norm__395, + batch_norm__396, + batch_norm__397, + batch_norm__398, + batch_norm__399, + batch_norm__4, + batch_norm__40, + batch_norm__400, + batch_norm__401, + batch_norm__402, + batch_norm__403, + batch_norm__404, + batch_norm__405, + batch_norm__406, + batch_norm__407, + batch_norm__408, + batch_norm__409, + batch_norm__41, + batch_norm__410, + batch_norm__411, + batch_norm__412, + batch_norm__413, + batch_norm__414, + batch_norm__415, + batch_norm__416, + batch_norm__417, + batch_norm__418, + batch_norm__419, + batch_norm__42, + batch_norm__420, + batch_norm__421, + batch_norm__422, + batch_norm__423, + batch_norm__424, + batch_norm__425, + batch_norm__426, + batch_norm__427, + batch_norm__428, + batch_norm__429, + batch_norm__43, + batch_norm__430, + batch_norm__431, + batch_norm__432, + batch_norm__433, + batch_norm__434, + batch_norm__435, + batch_norm__436, + batch_norm__437, + batch_norm__44, + batch_norm__45, + batch_norm__46, + batch_norm__47, + batch_norm__48, + batch_norm__49, + batch_norm__5, + batch_norm__50, + batch_norm__51, + batch_norm__52, + batch_norm__53, + batch_norm__54, + batch_norm__55, + batch_norm__56, + batch_norm__57, + batch_norm__58, + batch_norm__59, + batch_norm__6, + batch_norm__60, + batch_norm__61, + batch_norm__62, + batch_norm__63, + batch_norm__64, + batch_norm__65, + batch_norm__66, + batch_norm__67, + batch_norm__68, + batch_norm__69, + batch_norm__7, + batch_norm__70, + batch_norm__71, + batch_norm__72, + batch_norm__73, + batch_norm__74, + batch_norm__75, + batch_norm__76, + batch_norm__77, + batch_norm__78, + batch_norm__79, + batch_norm__8, + batch_norm__80, + batch_norm__81, + batch_norm__82, + batch_norm__83, + batch_norm__84, + batch_norm__85, + batch_norm__86, + batch_norm__87, + batch_norm__88, + batch_norm__89, + batch_norm__9, + batch_norm__90, + batch_norm__91, + batch_norm__92, + batch_norm__93, + batch_norm__94, + batch_norm__95, + batch_norm__96, + batch_norm__97, + batch_norm__98, + batch_norm__99, + concat_0, + concat_1, + concat_2, + concat_3, + conv2d_0, + conv2d_1, + conv2d_10, + conv2d_11, + conv2d_12, + conv2d_13, + conv2d_14, + conv2d_15, + conv2d_16, + conv2d_17, + conv2d_18, + conv2d_19, + conv2d_2, + conv2d_20, + conv2d_21, + conv2d_22, + conv2d_23, + conv2d_24, + conv2d_25, + conv2d_26, + conv2d_27, + conv2d_28, + conv2d_29, + conv2d_3, + conv2d_30, + conv2d_31, + conv2d_32, + conv2d_33, + conv2d_34, + conv2d_35, + conv2d_36, + conv2d_37, + conv2d_38, + conv2d_39, + conv2d_4, + conv2d_40, + conv2d_41, + conv2d_42, + conv2d_43, + conv2d_44, + conv2d_45, + conv2d_46, + conv2d_47, + conv2d_48, + conv2d_49, + conv2d_5, + conv2d_50, + conv2d_51, + conv2d_52, + conv2d_53, + conv2d_54, + conv2d_55, + conv2d_56, + conv2d_57, + conv2d_58, + conv2d_59, + conv2d_6, + conv2d_60, + conv2d_61, + conv2d_62, + conv2d_63, + conv2d_64, + conv2d_65, + conv2d_66, + conv2d_67, + conv2d_68, + conv2d_69, + conv2d_7, + conv2d_70, + conv2d_71, + conv2d_72, + conv2d_73, + conv2d_74, + conv2d_75, + conv2d_76, + conv2d_8, + conv2d_9, + full_0, + full_int_array_0, + hardsigmoid_0, + hardsigmoid_1, + hardsigmoid_2, + hardsigmoid_3, + mean_0, + mean_1, + mean_2, + mean_3, + multiply_0, + multiply_1, + multiply_10, + multiply_11, + multiply_12, + multiply_13, + multiply_14, + multiply_15, + multiply_16, + multiply_17, + multiply_18, + multiply_19, + multiply_2, + multiply_20, + multiply_21, + multiply_3, + multiply_4, + multiply_5, + multiply_6, + multiply_7, + multiply_8, + multiply_9, + reshape_0, + reshape_1, + reshape_2, + reshape_3, + swish_1, + swish_10, + swish_11, + swish_12, + swish_13, + swish_14, + swish_15, + swish_16, + swish_17, + swish_18, + swish_19, + swish_2, + swish_20, + swish_21, + swish_22, + swish_23, + swish_24, + swish_25, + swish_26, + swish_27, + swish_28, + swish_29, + swish_3, + swish_30, + swish_31, + swish_32, + swish_33, + swish_34, + swish_35, + swish_36, + swish_37, + swish_38, + swish_39, + swish_4, + swish_40, + swish_41, + swish_42, + swish_43, + swish_44, + swish_45, + swish_46, + swish_47, + swish_48, + swish_49, + swish_5, + swish_50, + swish_51, + swish_52, + swish_53, + swish_54, + swish_6, + swish_7, + swish_8, + swish_9, + ) + + return swish_0 diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_7/weight_meta.py b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_7/weight_meta.py new file mode 100644 index 000000000..a4133e248 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_7/weight_meta.py @@ -0,0 +1,3989 @@ +class Program_weight_tensor_parameter_0: + name = "parameter_0" + shape = [1024] + dtype = "float32" + min_val = float("-3.75937") + max_val = float("-0.734") + mean = float("-2.18719") + std = float("0.428746") + data = None + + +class Program_weight_tensor_parameter_1: + name = "parameter_1" + shape = [1024] + dtype = "float32" + min_val = float("1.61944") + max_val = float("4.44114") + mean = float("3.08041") + std = float("0.254214") + data = None + + +class Program_weight_tensor_parameter_2: + name = "parameter_2" + shape = [1024] + dtype = "float32" + min_val = float("0.0050978") + max_val = float("0.0274139") + mean = float("0.00876083") + std = float("0.00191502") + data = None + + +class Program_weight_tensor_parameter_3: + name = "parameter_3" + shape = [1024] + dtype = "float32" + min_val = float("-0.173441") + max_val = float("0.132182") + mean = float("-0.0624446") + std = float("0.0318177") + data = None + + +class Program_weight_tensor_parameter_4: + name = "parameter_4" + shape = [1024, 768, 1, 1] + dtype = "float32" + min_val = float("-0.0420016") + max_val = float("0.0672891") + mean = float("-0.000434506") + std = float("0.00419984") + data = None + + +class Program_weight_tensor_parameter_5: + name = "parameter_5" + shape = [768] + dtype = "float32" + min_val = float("-0.0144958") + max_val = float("0.00204154") + mean = float("-0.000784991") + std = float("0.00208566") + data = None + + +class Program_weight_tensor_parameter_6: + name = "parameter_6" + shape = [768, 768, 1, 1] + dtype = "float32" + min_val = float("-0.0809974") + max_val = float("0.144837") + mean = float("-0.000290719") + std = float("0.0016779") + data = None + + +class Program_weight_tensor_parameter_7: + name = "parameter_7" + shape = [384] + dtype = "float32" + min_val = float("-1.77404") + max_val = float("0.318904") + mean = float("-0.31075") + std = float("0.291253") + data = None + + +class Program_weight_tensor_parameter_8: + name = "parameter_8" + shape = [384] + dtype = "float32" + min_val = float("0.188368") + max_val = float("1.82104") + mean = float("0.60964") + std = float("0.262596") + data = None + + +class Program_weight_tensor_parameter_9: + name = "parameter_9" + shape = [384] + dtype = "float32" + min_val = float("7.63933e-05") + max_val = float("0.00106861") + mean = float("0.000259708") + std = float("0.000131578") + data = None + + +class Program_weight_tensor_parameter_10: + name = "parameter_10" + shape = [384] + dtype = "float32" + min_val = float("-0.0655344") + max_val = float("0.0775217") + mean = float("0.0238682") + std = float("0.0176001") + data = None + + +class Program_weight_tensor_parameter_11: + name = "parameter_11" + shape = [384, 384, 1, 1] + dtype = "float32" + min_val = float("-0.020871") + max_val = float("0.0273244") + mean = float("-0.000414716") + std = float("0.00284754") + data = None + + +class Program_weight_tensor_parameter_12: + name = "parameter_12" + shape = [384] + dtype = "float32" + min_val = float("-1.77405") + max_val = float("0.319251") + mean = float("-0.310681") + std = float("0.291275") + data = None + + +class Program_weight_tensor_parameter_13: + name = "parameter_13" + shape = [384] + dtype = "float32" + min_val = float("0.335122") + max_val = float("2.60483") + mean = float("1.02609") + std = float("0.290246") + data = None + + +class Program_weight_tensor_parameter_14: + name = "parameter_14" + shape = [384] + dtype = "float32" + min_val = float("0.000763408") + max_val = float("0.00774847") + mean = float("0.0023501") + std = float("0.000855015") + data = None + + +class Program_weight_tensor_parameter_15: + name = "parameter_15" + shape = [384] + dtype = "float32" + min_val = float("-0.228802") + max_val = float("0.161783") + mean = float("0.0348261") + std = float("0.0422183") + data = None + + +class Program_weight_tensor_parameter_16: + name = "parameter_16" + shape = [384, 384, 3, 3] + dtype = "float32" + min_val = float("-0.0185255") + max_val = float("0.0282844") + mean = float("-7.21101e-05") + std = float("0.00183304") + data = None + + +class Program_weight_tensor_parameter_17: + name = "parameter_17" + shape = [384] + dtype = "float32" + min_val = float("-2.58205") + max_val = float("0.0326997") + mean = float("-1.56844") + std = float("0.416017") + data = None + + +class Program_weight_tensor_parameter_18: + name = "parameter_18" + shape = [384] + dtype = "float32" + min_val = float("0.51894") + max_val = float("1.64424") + mean = float("1.13558") + std = float("0.149427") + data = None + + +class Program_weight_tensor_parameter_19: + name = "parameter_19" + shape = [384] + dtype = "float32" + min_val = float("0.0432612") + max_val = float("0.263912") + mean = float("0.0990143") + std = float("0.0258689") + data = None + + +class Program_weight_tensor_parameter_20: + name = "parameter_20" + shape = [384] + dtype = "float32" + min_val = float("-1.05647") + max_val = float("0.500171") + mean = float("-0.284757") + std = float("0.144218") + data = None + + +class Program_weight_tensor_parameter_21: + name = "parameter_21" + shape = [384, 384, 3, 3] + dtype = "float32" + min_val = float("-0.0217847") + max_val = float("0.0601331") + mean = float("-0.000214232") + std = float("0.00242153") + data = None + + +class Program_weight_tensor_parameter_22: + name = "parameter_22" + shape = [384] + dtype = "float32" + min_val = float("-1.93932") + max_val = float("0.644238") + mean = float("-0.57485") + std = float("0.358678") + data = None + + +class Program_weight_tensor_parameter_23: + name = "parameter_23" + shape = [384] + dtype = "float32" + min_val = float("0.163976") + max_val = float("2.06584") + mean = float("0.56203") + std = float("0.227231") + data = None + + +class Program_weight_tensor_parameter_24: + name = "parameter_24" + shape = [384] + dtype = "float32" + min_val = float("8.27966e-05") + max_val = float("0.00179396") + mean = float("0.000297678") + std = float("0.000146921") + data = None + + +class Program_weight_tensor_parameter_25: + name = "parameter_25" + shape = [384] + dtype = "float32" + min_val = float("-0.039417") + max_val = float("0.0723179") + mean = float("0.0222404") + std = float("0.0153684") + data = None + + +class Program_weight_tensor_parameter_26: + name = "parameter_26" + shape = [384, 384, 1, 1] + dtype = "float32" + min_val = float("-0.0311026") + max_val = float("0.039225") + mean = float("-0.000409791") + std = float("0.00262815") + data = None + + +class Program_weight_tensor_parameter_27: + name = "parameter_27" + shape = [384] + dtype = "float32" + min_val = float("-1.9394") + max_val = float("0.644918") + mean = float("-0.574762") + std = float("0.358753") + data = None + + +class Program_weight_tensor_parameter_28: + name = "parameter_28" + shape = [384] + dtype = "float32" + min_val = float("0.583818") + max_val = float("2.15633") + mean = float("1.08411") + std = float("0.255713") + data = None + + +class Program_weight_tensor_parameter_29: + name = "parameter_29" + shape = [384] + dtype = "float32" + min_val = float("0.00147808") + max_val = float("0.0112958") + mean = float("0.00356599") + std = float("0.00110113") + data = None + + +class Program_weight_tensor_parameter_30: + name = "parameter_30" + shape = [384] + dtype = "float32" + min_val = float("-0.114487") + max_val = float("0.168596") + mean = float("0.0403135") + std = float("0.0412827") + data = None + + +class Program_weight_tensor_parameter_31: + name = "parameter_31" + shape = [384, 384, 3, 3] + dtype = "float32" + min_val = float("-0.0211861") + max_val = float("0.0312284") + mean = float("-9.86606e-05") + std = float("0.00198109") + data = None + + +class Program_weight_tensor_parameter_32: + name = "parameter_32" + shape = [384] + dtype = "float32" + min_val = float("-2.39618") + max_val = float("0.845899") + mean = float("-1.40537") + std = float("0.36063") + data = None + + +class Program_weight_tensor_parameter_33: + name = "parameter_33" + shape = [384] + dtype = "float32" + min_val = float("0.454223") + max_val = float("1.91875") + mean = float("1.16633") + std = float("0.147984") + data = None + + +class Program_weight_tensor_parameter_34: + name = "parameter_34" + shape = [384] + dtype = "float32" + min_val = float("0.0366463") + max_val = float("0.164533") + mean = float("0.0661917") + std = float("0.0162349") + data = None + + +class Program_weight_tensor_parameter_35: + name = "parameter_35" + shape = [384] + dtype = "float32" + min_val = float("-0.915528") + max_val = float("0.831942") + mean = float("-0.196761") + std = float("0.117911") + data = None + + +class Program_weight_tensor_parameter_36: + name = "parameter_36" + shape = [384, 384, 3, 3] + dtype = "float32" + min_val = float("-0.0304568") + max_val = float("0.0446889") + mean = float("-0.000206096") + std = float("0.00245489") + data = None + + +class Program_weight_tensor_parameter_37: + name = "parameter_37" + shape = [384] + dtype = "float32" + min_val = float("-1.87628") + max_val = float("0.453077") + mean = float("-0.485305") + std = float("0.376481") + data = None + + +class Program_weight_tensor_parameter_38: + name = "parameter_38" + shape = [384] + dtype = "float32" + min_val = float("0.0771953") + max_val = float("2.11917") + mean = float("0.441977") + std = float("0.217648") + data = None + + +class Program_weight_tensor_parameter_39: + name = "parameter_39" + shape = [384] + dtype = "float32" + min_val = float("7.36916e-05") + max_val = float("0.00170445") + mean = float("0.000357372") + std = float("0.000182226") + data = None + + +class Program_weight_tensor_parameter_40: + name = "parameter_40" + shape = [384] + dtype = "float32" + min_val = float("-0.0529189") + max_val = float("0.0858856") + mean = float("0.0268843") + std = float("0.0175464") + data = None + + +class Program_weight_tensor_parameter_41: + name = "parameter_41" + shape = [384, 384, 1, 1] + dtype = "float32" + min_val = float("-0.0213328") + max_val = float("0.0283453") + mean = float("-0.000505242") + std = float("0.00224656") + data = None + + +class Program_weight_tensor_parameter_42: + name = "parameter_42" + shape = [384] + dtype = "float32" + min_val = float("-1.87669") + max_val = float("0.45341") + mean = float("-0.485211") + std = float("0.376586") + data = None + + +class Program_weight_tensor_parameter_43: + name = "parameter_43" + shape = [384] + dtype = "float32" + min_val = float("0.522977") + max_val = float("2.22431") + mean = float("1.05297") + std = float("0.260052") + data = None + + +class Program_weight_tensor_parameter_44: + name = "parameter_44" + shape = [384] + dtype = "float32" + min_val = float("0.0021093") + max_val = float("0.0103458") + mean = float("0.00457088") + std = float("0.00131851") + data = None + + +class Program_weight_tensor_parameter_45: + name = "parameter_45" + shape = [384] + dtype = "float32" + min_val = float("-0.272542") + max_val = float("0.182129") + mean = float("0.0462809") + std = float("0.0484224") + data = None + + +class Program_weight_tensor_parameter_46: + name = "parameter_46" + shape = [384, 384, 3, 3] + dtype = "float32" + min_val = float("-0.0214852") + max_val = float("0.0348977") + mean = float("-0.000101693") + std = float("0.00210424") + data = None + + +class Program_weight_tensor_parameter_47: + name = "parameter_47" + shape = [384] + dtype = "float32" + min_val = float("-2.1565") + max_val = float("0.418538") + mean = float("-1.36711") + std = float("0.277506") + data = None + + +class Program_weight_tensor_parameter_48: + name = "parameter_48" + shape = [384] + dtype = "float32" + min_val = float("0.707119") + max_val = float("1.63571") + mean = float("1.14297") + std = float("0.101612") + data = None + + +class Program_weight_tensor_parameter_49: + name = "parameter_49" + shape = [384] + dtype = "float32" + min_val = float("0.027003") + max_val = float("0.119021") + mean = float("0.0524081") + std = float("0.0141785") + data = None + + +class Program_weight_tensor_parameter_50: + name = "parameter_50" + shape = [384] + dtype = "float32" + min_val = float("-0.735058") + max_val = float("0.211464") + mean = float("-0.135262") + std = float("0.0973352") + data = None + + +class Program_weight_tensor_parameter_51: + name = "parameter_51" + shape = [384, 384, 3, 3] + dtype = "float32" + min_val = float("-0.0300983") + max_val = float("0.05499") + mean = float("-0.000159015") + std = float("0.00235156") + data = None + + +class Program_weight_tensor_parameter_52: + name = "parameter_52" + shape = [384] + dtype = "float32" + min_val = float("-2.92344") + max_val = float("1.66439") + mean = float("-0.760407") + std = float("0.643554") + data = None + + +class Program_weight_tensor_parameter_53: + name = "parameter_53" + shape = [384] + dtype = "float32" + min_val = float("0.953228") + max_val = float("2.9182") + mean = float("1.86309") + std = float("0.276205") + data = None + + +class Program_weight_tensor_parameter_54: + name = "parameter_54" + shape = [384] + dtype = "float32" + min_val = float("0.00273344") + max_val = float("0.0130488") + mean = float("0.00578892") + std = float("0.00146091") + data = None + + +class Program_weight_tensor_parameter_55: + name = "parameter_55" + shape = [384] + dtype = "float32" + min_val = float("-0.279522") + max_val = float("0.136057") + mean = float("0.068312") + std = float("0.0329566") + data = None + + +class Program_weight_tensor_parameter_56: + name = "parameter_56" + shape = [384, 768, 1, 1] + dtype = "float32" + min_val = float("-0.0411036") + max_val = float("0.048141") + mean = float("-0.000774534") + std = float("0.00548625") + data = None + + +class Program_weight_tensor_parameter_57: + name = "parameter_57" + shape = [384] + dtype = "float32" + min_val = float("-2.24702") + max_val = float("0.681993") + mean = float("-0.777088") + std = float("0.472908") + data = None + + +class Program_weight_tensor_parameter_58: + name = "parameter_58" + shape = [384] + dtype = "float32" + min_val = float("0.965876") + max_val = float("2.89361") + mean = float("2.09705") + std = float("0.305445") + data = None + + +class Program_weight_tensor_parameter_59: + name = "parameter_59" + shape = [384] + dtype = "float32" + min_val = float("0.000836446") + max_val = float("0.0043118") + mean = float("0.00221644") + std = float("0.000544507") + data = None + + +class Program_weight_tensor_parameter_60: + name = "parameter_60" + shape = [384] + dtype = "float32" + min_val = float("-0.0181609") + max_val = float("0.0915652") + mean = float("0.0419498") + std = float("0.0183738") + data = None + + +class Program_weight_tensor_parameter_61: + name = "parameter_61" + shape = [384, 768, 1, 1] + dtype = "float32" + min_val = float("-0.0837021") + max_val = float("0.0611426") + mean = float("-0.00045084") + std = float("0.00374174") + data = None + + +class Program_weight_tensor_parameter_62: + name = "parameter_62" + shape = [768] + dtype = "float32" + min_val = float("-2.40194") + max_val = float("0.642339") + mean = float("-0.908288") + std = float("0.339331") + data = None + + +class Program_weight_tensor_parameter_63: + name = "parameter_63" + shape = [768] + dtype = "float32" + min_val = float("0.53146") + max_val = float("1.90712") + mean = float("0.919684") + std = float("0.149212") + data = None + + +class Program_weight_tensor_parameter_64: + name = "parameter_64" + shape = [768] + dtype = "float32" + min_val = float("0.00736934") + max_val = float("0.074494") + mean = float("0.0176525") + std = float("0.00547046") + data = None + + +class Program_weight_tensor_parameter_65: + name = "parameter_65" + shape = [768] + dtype = "float32" + min_val = float("-0.236448") + max_val = float("0.209185") + mean = float("0.0420968") + std = float("0.0580626") + data = None + + +class Program_weight_tensor_parameter_66: + name = "parameter_66" + shape = [768, 512, 3, 3] + dtype = "float32" + min_val = float("-0.0383779") + max_val = float("0.0519002") + mean = float("-9.93933e-05") + std = float("0.00244217") + data = None + + +class Program_weight_tensor_parameter_67: + name = "parameter_67" + shape = [512] + dtype = "float32" + min_val = float("-3.39029") + max_val = float("1.66616") + mean = float("-1.16168") + std = float("0.513766") + data = None + + +class Program_weight_tensor_parameter_68: + name = "parameter_68" + shape = [512] + dtype = "float32" + min_val = float("0.520928") + max_val = float("1.67546") + mean = float("1.11104") + std = float("0.148384") + data = None + + +class Program_weight_tensor_parameter_69: + name = "parameter_69" + shape = [512] + dtype = "float32" + min_val = float("0.00220886") + max_val = float("0.0162899") + mean = float("0.00755366") + std = float("0.00191954") + data = None + + +class Program_weight_tensor_parameter_70: + name = "parameter_70" + shape = [512] + dtype = "float32" + min_val = float("-0.159233") + max_val = float("0.0720554") + mean = float("-0.0485279") + std = float("0.0411912") + data = None + + +class Program_weight_tensor_parameter_71: + name = "parameter_71" + shape = [512, 384, 1, 1] + dtype = "float32" + min_val = float("-0.208779") + max_val = float("0.179911") + mean = float("-0.000606249") + std = float("0.0081171") + data = None + + +class Program_weight_tensor_parameter_72: + name = "parameter_72" + shape = [384] + dtype = "float32" + min_val = float("-0.0103559") + max_val = float("0.00155602") + mean = float("-0.00302775") + std = float("0.0023618") + data = None + + +class Program_weight_tensor_parameter_73: + name = "parameter_73" + shape = [384, 384, 1, 1] + dtype = "float32" + min_val = float("-0.204999") + max_val = float("0.141306") + mean = float("-0.00211219") + std = float("0.00500511") + data = None + + +class Program_weight_tensor_parameter_74: + name = "parameter_74" + shape = [192] + dtype = "float32" + min_val = float("-1.97063") + max_val = float("0.41045") + mean = float("-0.348649") + std = float("0.333533") + data = None + + +class Program_weight_tensor_parameter_75: + name = "parameter_75" + shape = [192] + dtype = "float32" + min_val = float("0.0528508") + max_val = float("2.16013") + mean = float("0.581272") + std = float("0.419844") + data = None + + +class Program_weight_tensor_parameter_76: + name = "parameter_76" + shape = [192] + dtype = "float32" + min_val = float("9.84565e-05") + max_val = float("0.00122402") + mean = float("0.000477939") + std = float("0.000224956") + data = None + + +class Program_weight_tensor_parameter_77: + name = "parameter_77" + shape = [192] + dtype = "float32" + min_val = float("-0.0376647") + max_val = float("0.0569873") + mean = float("0.00567798") + std = float("0.015222") + data = None + + +class Program_weight_tensor_parameter_78: + name = "parameter_78" + shape = [192, 192, 1, 1] + dtype = "float32" + min_val = float("-0.0210389") + max_val = float("0.0585363") + mean = float("-0.000352054") + std = float("0.00423892") + data = None + + +class Program_weight_tensor_parameter_79: + name = "parameter_79" + shape = [192] + dtype = "float32" + min_val = float("-1.97059") + max_val = float("0.411367") + mean = float("-0.348497") + std = float("0.333596") + data = None + + +class Program_weight_tensor_parameter_80: + name = "parameter_80" + shape = [192] + dtype = "float32" + min_val = float("0.372764") + max_val = float("2.70243") + mean = float("1.20208") + std = float("0.49364") + data = None + + +class Program_weight_tensor_parameter_81: + name = "parameter_81" + shape = [192] + dtype = "float32" + min_val = float("0.0014624") + max_val = float("0.0202289") + mean = float("0.00559275") + std = float("0.0020797") + data = None + + +class Program_weight_tensor_parameter_82: + name = "parameter_82" + shape = [192] + dtype = "float32" + min_val = float("-0.115196") + max_val = float("0.163529") + mean = float("0.0192204") + std = float("0.0435021") + data = None + + +class Program_weight_tensor_parameter_83: + name = "parameter_83" + shape = [192, 192, 3, 3] + dtype = "float32" + min_val = float("-0.031927") + max_val = float("0.0389496") + mean = float("-0.000144904") + std = float("0.00325908") + data = None + + +class Program_weight_tensor_parameter_84: + name = "parameter_84" + shape = [192] + dtype = "float32" + min_val = float("-2.89054") + max_val = float("-0.177595") + mean = float("-1.31446") + std = float("0.401195") + data = None + + +class Program_weight_tensor_parameter_85: + name = "parameter_85" + shape = [192] + dtype = "float32" + min_val = float("0.695074") + max_val = float("2.09481") + mean = float("1.17912") + std = float("0.169901") + data = None + + +class Program_weight_tensor_parameter_86: + name = "parameter_86" + shape = [192] + dtype = "float32" + min_val = float("0.0654421") + max_val = float("0.471484") + mean = float("0.138461") + std = float("0.0475155") + data = None + + +class Program_weight_tensor_parameter_87: + name = "parameter_87" + shape = [192] + dtype = "float32" + min_val = float("-2.47419") + max_val = float("1.83595") + mean = float("-0.229004") + std = float("0.395047") + data = None + + +class Program_weight_tensor_parameter_88: + name = "parameter_88" + shape = [192, 192, 3, 3] + dtype = "float32" + min_val = float("-0.0350379") + max_val = float("0.0468605") + mean = float("-0.000221381") + std = float("0.00388426") + data = None + + +class Program_weight_tensor_parameter_89: + name = "parameter_89" + shape = [192] + dtype = "float32" + min_val = float("-1.94031") + max_val = float("0.513263") + mean = float("-0.279273") + std = float("0.321486") + data = None + + +class Program_weight_tensor_parameter_90: + name = "parameter_90" + shape = [192] + dtype = "float32" + min_val = float("0.0449424") + max_val = float("1.76947") + mean = float("0.444383") + std = float("0.305669") + data = None + + +class Program_weight_tensor_parameter_91: + name = "parameter_91" + shape = [192] + dtype = "float32" + min_val = float("7.96339e-05") + max_val = float("0.00168176") + mean = float("0.000430774") + std = float("0.000230126") + data = None + + +class Program_weight_tensor_parameter_92: + name = "parameter_92" + shape = [192] + dtype = "float32" + min_val = float("-0.0362367") + max_val = float("0.0459797") + mean = float("0.0087194") + std = float("0.0119612") + data = None + + +class Program_weight_tensor_parameter_93: + name = "parameter_93" + shape = [192, 192, 1, 1] + dtype = "float32" + min_val = float("-0.02483") + max_val = float("0.0404131") + mean = float("-0.000400917") + std = float("0.00391908") + data = None + + +class Program_weight_tensor_parameter_94: + name = "parameter_94" + shape = [192] + dtype = "float32" + min_val = float("-1.94031") + max_val = float("0.514903") + mean = float("-0.279015") + std = float("0.321709") + data = None + + +class Program_weight_tensor_parameter_95: + name = "parameter_95" + shape = [192] + dtype = "float32" + min_val = float("0.481654") + max_val = float("2.27026") + mean = float("1.13859") + std = float("0.375612") + data = None + + +class Program_weight_tensor_parameter_96: + name = "parameter_96" + shape = [192] + dtype = "float32" + min_val = float("0.00304728") + max_val = float("0.0144724") + mean = float("0.00647186") + std = float("0.00181328") + data = None + + +class Program_weight_tensor_parameter_97: + name = "parameter_97" + shape = [192] + dtype = "float32" + min_val = float("-0.0801327") + max_val = float("0.116547") + mean = float("0.0356733") + std = float("0.0320593") + data = None + + +class Program_weight_tensor_parameter_98: + name = "parameter_98" + shape = [192, 192, 3, 3] + dtype = "float32" + min_val = float("-0.0229799") + max_val = float("0.0371751") + mean = float("-0.000196939") + std = float("0.00352878") + data = None + + +class Program_weight_tensor_parameter_99: + name = "parameter_99" + shape = [192] + dtype = "float32" + min_val = float("-2.50826") + max_val = float("-0.12355") + mean = float("-1.2887") + std = float("0.443822") + data = None + + +class Program_weight_tensor_parameter_100: + name = "parameter_100" + shape = [192] + dtype = "float32" + min_val = float("0.653803") + max_val = float("1.66962") + mean = float("1.19928") + std = float("0.166233") + data = None + + +class Program_weight_tensor_parameter_101: + name = "parameter_101" + shape = [192] + dtype = "float32" + min_val = float("0.0475495") + max_val = float("0.208235") + mean = float("0.0948451") + std = float("0.0245631") + data = None + + +class Program_weight_tensor_parameter_102: + name = "parameter_102" + shape = [192] + dtype = "float32" + min_val = float("-2.1632") + max_val = float("0.473042") + mean = float("-0.118896") + std = float("0.249139") + data = None + + +class Program_weight_tensor_parameter_103: + name = "parameter_103" + shape = [192, 192, 3, 3] + dtype = "float32" + min_val = float("-0.038582") + max_val = float("0.0537646") + mean = float("-0.00026749") + std = float("0.0040656") + data = None + + +class Program_weight_tensor_parameter_104: + name = "parameter_104" + shape = [192] + dtype = "float32" + min_val = float("-1.75738") + max_val = float("0.468608") + mean = float("-0.262263") + std = float("0.335862") + data = None + + +class Program_weight_tensor_parameter_105: + name = "parameter_105" + shape = [192] + dtype = "float32" + min_val = float("0.00305103") + max_val = float("1.67905") + mean = float("0.351948") + std = float("0.251703") + data = None + + +class Program_weight_tensor_parameter_106: + name = "parameter_106" + shape = [192] + dtype = "float32" + min_val = float("1.01992e-06") + max_val = float("0.00222302") + mean = float("0.000398674") + std = float("0.000279493") + data = None + + +class Program_weight_tensor_parameter_107: + name = "parameter_107" + shape = [192] + dtype = "float32" + min_val = float("-0.0314916") + max_val = float("0.0548995") + mean = float("0.0110299") + std = float("0.0122915") + data = None + + +class Program_weight_tensor_parameter_108: + name = "parameter_108" + shape = [192, 192, 1, 1] + dtype = "float32" + min_val = float("-0.0307534") + max_val = float("0.0384153") + mean = float("-0.00045859") + std = float("0.00377622") + data = None + + +class Program_weight_tensor_parameter_109: + name = "parameter_109" + shape = [192] + dtype = "float32" + min_val = float("-1.75744") + max_val = float("0.470024") + mean = float("-0.262025") + std = float("0.336099") + data = None + + +class Program_weight_tensor_parameter_110: + name = "parameter_110" + shape = [192] + dtype = "float32" + min_val = float("0.405457") + max_val = float("1.97843") + mean = float("1.06603") + std = float("0.334153") + data = None + + +class Program_weight_tensor_parameter_111: + name = "parameter_111" + shape = [192] + dtype = "float32" + min_val = float("0.00267407") + max_val = float("0.0142805") + mean = float("0.00698013") + std = float("0.0019104") + data = None + + +class Program_weight_tensor_parameter_112: + name = "parameter_112" + shape = [192] + dtype = "float32" + min_val = float("-0.0878738") + max_val = float("0.110839") + mean = float("0.0399626") + std = float("0.0323914") + data = None + + +class Program_weight_tensor_parameter_113: + name = "parameter_113" + shape = [192, 192, 3, 3] + dtype = "float32" + min_val = float("-0.0336081") + max_val = float("0.0420323") + mean = float("-0.000205836") + std = float("0.00368544") + data = None + + +class Program_weight_tensor_parameter_114: + name = "parameter_114" + shape = [192] + dtype = "float32" + min_val = float("-2.49703") + max_val = float("0.138789") + mean = float("-1.24309") + std = float("0.424468") + data = None + + +class Program_weight_tensor_parameter_115: + name = "parameter_115" + shape = [192] + dtype = "float32" + min_val = float("0.652493") + max_val = float("1.80896") + mean = float("1.16711") + std = float("0.165463") + data = None + + +class Program_weight_tensor_parameter_116: + name = "parameter_116" + shape = [192] + dtype = "float32" + min_val = float("0.0303129") + max_val = float("0.14633") + mean = float("0.0670479") + std = float("0.0163216") + data = None + + +class Program_weight_tensor_parameter_117: + name = "parameter_117" + shape = [192] + dtype = "float32" + min_val = float("-1.70247") + max_val = float("0.30536") + mean = float("-0.0862267") + std = float("0.199355") + data = None + + +class Program_weight_tensor_parameter_118: + name = "parameter_118" + shape = [192, 192, 3, 3] + dtype = "float32" + min_val = float("-0.0472912") + max_val = float("0.0583976") + mean = float("-0.000284769") + std = float("0.00417002") + data = None + + +class Program_weight_tensor_parameter_119: + name = "parameter_119" + shape = [192] + dtype = "float32" + min_val = float("-2.07915") + max_val = float("0.533836") + mean = float("-0.272165") + std = float("0.375339") + data = None + + +class Program_weight_tensor_parameter_120: + name = "parameter_120" + shape = [192] + dtype = "float32" + min_val = float("0.000522804") + max_val = float("0.732366") + mean = float("0.21194") + std = float("0.136205") + data = None + + +class Program_weight_tensor_parameter_121: + name = "parameter_121" + shape = [192] + dtype = "float32" + min_val = float("5.9055e-08") + max_val = float("0.000953757") + mean = float("0.000261566") + std = float("0.000147906") + data = None + + +class Program_weight_tensor_parameter_122: + name = "parameter_122" + shape = [192] + dtype = "float32" + min_val = float("-0.0264134") + max_val = float("0.0356786") + mean = float("0.00695978") + std = float("0.00983596") + data = None + + +class Program_weight_tensor_parameter_123: + name = "parameter_123" + shape = [192, 192, 1, 1] + dtype = "float32" + min_val = float("-0.0207564") + max_val = float("0.0335475") + mean = float("-0.000292443") + std = float("0.00332227") + data = None + + +class Program_weight_tensor_parameter_124: + name = "parameter_124" + shape = [192] + dtype = "float32" + min_val = float("-2.07924") + max_val = float("0.535791") + mean = float("-0.271976") + std = float("0.375569") + data = None + + +class Program_weight_tensor_parameter_125: + name = "parameter_125" + shape = [192] + dtype = "float32" + min_val = float("0.395086") + max_val = float("1.96267") + mean = float("0.959008") + std = float("0.303814") + data = None + + +class Program_weight_tensor_parameter_126: + name = "parameter_126" + shape = [192] + dtype = "float32" + min_val = float("0.00304751") + max_val = float("0.015787") + mean = float("0.00706292") + std = float("0.00213169") + data = None + + +class Program_weight_tensor_parameter_127: + name = "parameter_127" + shape = [192] + dtype = "float32" + min_val = float("-0.078765") + max_val = float("0.118653") + mean = float("0.0428106") + std = float("0.0338285") + data = None + + +class Program_weight_tensor_parameter_128: + name = "parameter_128" + shape = [192, 192, 3, 3] + dtype = "float32" + min_val = float("-0.0340016") + max_val = float("0.0403474") + mean = float("-0.000216247") + std = float("0.00380285") + data = None + + +class Program_weight_tensor_parameter_129: + name = "parameter_129" + shape = [192] + dtype = "float32" + min_val = float("-2.74084") + max_val = float("-0.0805818") + mean = float("-1.23662") + std = float("0.434286") + data = None + + +class Program_weight_tensor_parameter_130: + name = "parameter_130" + shape = [192] + dtype = "float32" + min_val = float("0.761952") + max_val = float("1.62053") + mean = float("1.15094") + std = float("0.142444") + data = None + + +class Program_weight_tensor_parameter_131: + name = "parameter_131" + shape = [192] + dtype = "float32" + min_val = float("0.0276638") + max_val = float("0.0803679") + mean = float("0.0486605") + std = float("0.0101769") + data = None + + +class Program_weight_tensor_parameter_132: + name = "parameter_132" + shape = [192] + dtype = "float32" + min_val = float("-1.39612") + max_val = float("0.291383") + mean = float("-0.0742001") + std = float("0.166863") + data = None + + +class Program_weight_tensor_parameter_133: + name = "parameter_133" + shape = [192, 192, 3, 3] + dtype = "float32" + min_val = float("-0.0589398") + max_val = float("0.0606418") + mean = float("-0.000300541") + std = float("0.00415388") + data = None + + +class Program_weight_tensor_parameter_134: + name = "parameter_134" + shape = [192] + dtype = "float32" + min_val = float("-1.212") + max_val = float("0.447452") + mean = float("-0.232044") + std = float("0.339385") + data = None + + +class Program_weight_tensor_parameter_135: + name = "parameter_135" + shape = [192] + dtype = "float32" + min_val = float("-9.43381e-05") + max_val = float("0.678118") + mean = float("0.192025") + std = float("0.120758") + data = None + + +class Program_weight_tensor_parameter_136: + name = "parameter_136" + shape = [192] + dtype = "float32" + min_val = float("2.4814e-10") + max_val = float("0.000962865") + mean = float("0.000259823") + std = float("0.000158281") + data = None + + +class Program_weight_tensor_parameter_137: + name = "parameter_137" + shape = [192] + dtype = "float32" + min_val = float("-0.0444415") + max_val = float("0.0432657") + mean = float("0.00752981") + std = float("0.0124547") + data = None + + +class Program_weight_tensor_parameter_138: + name = "parameter_138" + shape = [192, 192, 1, 1] + dtype = "float32" + min_val = float("-0.0374404") + max_val = float("0.0395949") + mean = float("-0.000292615") + std = float("0.00342625") + data = None + + +class Program_weight_tensor_parameter_139: + name = "parameter_139" + shape = [192] + dtype = "float32" + min_val = float("-1.21197") + max_val = float("0.448806") + mean = float("-0.231853") + std = float("0.339659") + data = None + + +class Program_weight_tensor_parameter_140: + name = "parameter_140" + shape = [192] + dtype = "float32" + min_val = float("0.382853") + max_val = float("1.56358") + mean = float("0.852209") + std = float("0.259926") + data = None + + +class Program_weight_tensor_parameter_141: + name = "parameter_141" + shape = [192] + dtype = "float32" + min_val = float("0.00286251") + max_val = float("0.0142248") + mean = float("0.00680236") + std = float("0.00188027") + data = None + + +class Program_weight_tensor_parameter_142: + name = "parameter_142" + shape = [192] + dtype = "float32" + min_val = float("-0.0777897") + max_val = float("0.150363") + mean = float("0.0469745") + std = float("0.0370425") + data = None + + +class Program_weight_tensor_parameter_143: + name = "parameter_143" + shape = [192, 192, 3, 3] + dtype = "float32" + min_val = float("-0.0368355") + max_val = float("0.0400254") + mean = float("-0.000211959") + std = float("0.00380574") + data = None + + +class Program_weight_tensor_parameter_144: + name = "parameter_144" + shape = [192] + dtype = "float32" + min_val = float("-2.48699") + max_val = float("-0.132487") + mean = float("-1.2498") + std = float("0.418473") + data = None + + +class Program_weight_tensor_parameter_145: + name = "parameter_145" + shape = [192] + dtype = "float32" + min_val = float("0.689021") + max_val = float("1.51961") + mean = float("1.12491") + std = float("0.134826") + data = None + + +class Program_weight_tensor_parameter_146: + name = "parameter_146" + shape = [192] + dtype = "float32" + min_val = float("0.0194344") + max_val = float("0.0647326") + mean = float("0.0353335") + std = float("0.00848713") + data = None + + +class Program_weight_tensor_parameter_147: + name = "parameter_147" + shape = [192] + dtype = "float32" + min_val = float("-0.842031") + max_val = float("0.288259") + mean = float("-0.0809481") + std = float("0.135503") + data = None + + +class Program_weight_tensor_parameter_148: + name = "parameter_148" + shape = [192, 192, 3, 3] + dtype = "float32" + min_val = float("-0.0647608") + max_val = float("0.0671244") + mean = float("-0.000301379") + std = float("0.00415559") + data = None + + +class Program_weight_tensor_parameter_149: + name = "parameter_149" + shape = [192] + dtype = "float32" + min_val = float("-1.21773") + max_val = float("0.49966") + mean = float("-0.167333") + std = float("0.293611") + data = None + + +class Program_weight_tensor_parameter_150: + name = "parameter_150" + shape = [192] + dtype = "float32" + min_val = float("0.00864435") + max_val = float("1.53701") + mean = float("0.238131") + std = float("0.21185") + data = None + + +class Program_weight_tensor_parameter_151: + name = "parameter_151" + shape = [192] + dtype = "float32" + min_val = float("2.34858e-05") + max_val = float("0.00710491") + mean = float("0.000531262") + std = float("0.00068873") + data = None + + +class Program_weight_tensor_parameter_152: + name = "parameter_152" + shape = [192] + dtype = "float32" + min_val = float("-0.0691024") + max_val = float("0.101541") + mean = float("0.0105168") + std = float("0.0186603") + data = None + + +class Program_weight_tensor_parameter_153: + name = "parameter_153" + shape = [192, 192, 1, 1] + dtype = "float32" + min_val = float("-0.0626678") + max_val = float("0.0382933") + mean = float("-0.000453582") + std = float("0.00413962") + data = None + + +class Program_weight_tensor_parameter_154: + name = "parameter_154" + shape = [192] + dtype = "float32" + min_val = float("-1.21774") + max_val = float("0.50078") + mean = float("-0.167049") + std = float("0.293829") + data = None + + +class Program_weight_tensor_parameter_155: + name = "parameter_155" + shape = [192] + dtype = "float32" + min_val = float("0.353208") + max_val = float("1.45018") + mean = float("0.756982") + std = float("0.216639") + data = None + + +class Program_weight_tensor_parameter_156: + name = "parameter_156" + shape = [192] + dtype = "float32" + min_val = float("0.00481832") + max_val = float("0.0211758") + mean = float("0.00953731") + std = float("0.00267146") + data = None + + +class Program_weight_tensor_parameter_157: + name = "parameter_157" + shape = [192] + dtype = "float32" + min_val = float("-0.103005") + max_val = float("0.150479") + mean = float("0.0568873") + std = float("0.0497249") + data = None + + +class Program_weight_tensor_parameter_158: + name = "parameter_158" + shape = [192, 192, 3, 3] + dtype = "float32" + min_val = float("-0.0712483") + max_val = float("0.0533123") + mean = float("-0.000260747") + std = float("0.00375359") + data = None + + +class Program_weight_tensor_parameter_159: + name = "parameter_159" + shape = [192] + dtype = "float32" + min_val = float("-1.87984") + max_val = float("-0.210289") + mean = float("-1.14605") + std = float("0.325945") + data = None + + +class Program_weight_tensor_parameter_160: + name = "parameter_160" + shape = [192] + dtype = "float32" + min_val = float("0.790161") + max_val = float("1.59635") + mean = float("1.12149") + std = float("0.129857") + data = None + + +class Program_weight_tensor_parameter_161: + name = "parameter_161" + shape = [192] + dtype = "float32" + min_val = float("0.0174547") + max_val = float("0.0659133") + mean = float("0.031237") + std = float("0.00884456") + data = None + + +class Program_weight_tensor_parameter_162: + name = "parameter_162" + shape = [192] + dtype = "float32" + min_val = float("-0.857208") + max_val = float("0.269781") + mean = float("-0.0676028") + std = float("0.134013") + data = None + + +class Program_weight_tensor_parameter_163: + name = "parameter_163" + shape = [192, 192, 3, 3] + dtype = "float32" + min_val = float("-0.0680887") + max_val = float("0.0796042") + mean = float("-0.000244907") + std = float("0.0040245") + data = None + + +class Program_weight_tensor_parameter_164: + name = "parameter_164" + shape = [192] + dtype = "float32" + min_val = float("-2.86208") + max_val = float("1.58104") + mean = float("-0.027572") + std = float("0.747892") + data = None + + +class Program_weight_tensor_parameter_165: + name = "parameter_165" + shape = [192] + dtype = "float32" + min_val = float("0.490153") + max_val = float("2.07789") + mean = float("0.900423") + std = float("0.231981") + data = None + + +class Program_weight_tensor_parameter_166: + name = "parameter_166" + shape = [192] + dtype = "float32" + min_val = float("0.012085") + max_val = float("0.0729411") + mean = float("0.0254063") + std = float("0.00999328") + data = None + + +class Program_weight_tensor_parameter_167: + name = "parameter_167" + shape = [192] + dtype = "float32" + min_val = float("-0.232877") + max_val = float("0.322739") + mean = float("-0.043425") + std = float("0.0608633") + data = None + + +class Program_weight_tensor_parameter_168: + name = "parameter_168" + shape = [192, 384, 1, 1] + dtype = "float32" + min_val = float("-0.112904") + max_val = float("0.101906") + mean = float("-0.000605477") + std = float("0.00869645") + data = None + + +class Program_weight_tensor_parameter_169: + name = "parameter_169" + shape = [192] + dtype = "float32" + min_val = float("-2.96795") + max_val = float("1.66848") + mean = float("0.0967615") + std = float("0.663297") + data = None + + +class Program_weight_tensor_parameter_170: + name = "parameter_170" + shape = [192] + dtype = "float32" + min_val = float("0.830405") + max_val = float("5.55794") + mean = float("1.91324") + std = float("0.933276") + data = None + + +class Program_weight_tensor_parameter_171: + name = "parameter_171" + shape = [192] + dtype = "float32" + min_val = float("0.00638727") + max_val = float("0.0461032") + mean = float("0.0175233") + std = float("0.00555475") + data = None + + +class Program_weight_tensor_parameter_172: + name = "parameter_172" + shape = [192] + dtype = "float32" + min_val = float("-0.14477") + max_val = float("0.154899") + mean = float("-0.0220724") + std = float("0.0559826") + data = None + + +class Program_weight_tensor_parameter_173: + name = "parameter_173" + shape = [192, 384, 1, 1] + dtype = "float32" + min_val = float("-0.100414") + max_val = float("0.0965722") + mean = float("-0.000481739") + std = float("0.00788359") + data = None + + +class Program_weight_tensor_parameter_174: + name = "parameter_174" + shape = [384] + dtype = "float32" + min_val = float("-2.9234") + max_val = float("1.32689") + mean = float("-0.300856") + std = float("0.563737") + data = None + + +class Program_weight_tensor_parameter_175: + name = "parameter_175" + shape = [384] + dtype = "float32" + min_val = float("0.633896") + max_val = float("2.47246") + mean = float("1.15988") + std = float("0.257349") + data = None + + +class Program_weight_tensor_parameter_176: + name = "parameter_176" + shape = [384] + dtype = "float32" + min_val = float("0.0120681") + max_val = float("0.111573") + mean = float("0.027173") + std = float("0.0132211") + data = None + + +class Program_weight_tensor_parameter_177: + name = "parameter_177" + shape = [384] + dtype = "float32" + min_val = float("-0.269578") + max_val = float("0.241792") + mean = float("0.0299257") + std = float("0.0746028") + data = None + + +class Program_weight_tensor_parameter_178: + name = "parameter_178" + shape = [384, 256, 3, 3] + dtype = "float32" + min_val = float("-0.0777711") + max_val = float("0.0733026") + mean = float("-9.30129e-05") + std = float("0.00423326") + data = None + + +class Program_weight_tensor_parameter_179: + name = "parameter_179" + shape = [256] + dtype = "float32" + min_val = float("-2.04675") + max_val = float("1.2869") + mean = float("-0.92413") + std = float("0.542635") + data = None + + +class Program_weight_tensor_parameter_180: + name = "parameter_180" + shape = [256] + dtype = "float32" + min_val = float("0.509654") + max_val = float("1.69024") + mean = float("1.05364") + std = float("0.177449") + data = None + + +class Program_weight_tensor_parameter_181: + name = "parameter_181" + shape = [256] + dtype = "float32" + min_val = float("0.0016847") + max_val = float("0.0202013") + mean = float("0.00552268") + std = float("0.00242365") + data = None + + +class Program_weight_tensor_parameter_182: + name = "parameter_182" + shape = [256] + dtype = "float32" + min_val = float("-0.247824") + max_val = float("0.180174") + mean = float("-0.0483161") + std = float("0.064182") + data = None + + +class Program_weight_tensor_parameter_183: + name = "parameter_183" + shape = [256, 192, 1, 1] + dtype = "float32" + min_val = float("-0.211445") + max_val = float("0.154025") + mean = float("-0.00090718") + std = float("0.0139364") + data = None + + +class Program_weight_tensor_parameter_184: + name = "parameter_184" + shape = [192] + dtype = "float32" + min_val = float("-0.0146056") + max_val = float("0.00252242") + mean = float("-0.00513018") + std = float("0.00389486") + data = None + + +class Program_weight_tensor_parameter_185: + name = "parameter_185" + shape = [192, 192, 1, 1] + dtype = "float32" + min_val = float("-0.340895") + max_val = float("0.243469") + mean = float("-0.00395929") + std = float("0.0107136") + data = None + + +class Program_weight_tensor_parameter_186: + name = "parameter_186" + shape = [96] + dtype = "float32" + min_val = float("-1.9141") + max_val = float("0.53448") + mean = float("-0.208812") + std = float("0.434585") + data = None + + +class Program_weight_tensor_parameter_187: + name = "parameter_187" + shape = [96] + dtype = "float32" + min_val = float("0.139627") + max_val = float("3.23019") + mean = float("0.63562") + std = float("0.668608") + data = None + + +class Program_weight_tensor_parameter_188: + name = "parameter_188" + shape = [96] + dtype = "float32" + min_val = float("9.81546e-05") + max_val = float("0.00262635") + mean = float("0.000631594") + std = float("0.000470416") + data = None + + +class Program_weight_tensor_parameter_189: + name = "parameter_189" + shape = [96] + dtype = "float32" + min_val = float("-0.0508496") + max_val = float("0.0645139") + mean = float("0.0073241") + std = float("0.0226978") + data = None + + +class Program_weight_tensor_parameter_190: + name = "parameter_190" + shape = [96, 96, 1, 1] + dtype = "float32" + min_val = float("-0.0529209") + max_val = float("0.0938109") + mean = float("-0.00068654") + std = float("0.00780134") + data = None + + +class Program_weight_tensor_parameter_191: + name = "parameter_191" + shape = [96] + dtype = "float32" + min_val = float("-1.91385") + max_val = float("0.535947") + mean = float("-0.208472") + std = float("0.434758") + data = None + + +class Program_weight_tensor_parameter_192: + name = "parameter_192" + shape = [96] + dtype = "float32" + min_val = float("0.343945") + max_val = float("5.46861") + mean = float("1.08565") + std = float("0.883653") + data = None + + +class Program_weight_tensor_parameter_193: + name = "parameter_193" + shape = [96] + dtype = "float32" + min_val = float("0.000857905") + max_val = float("0.0144521") + mean = float("0.00502113") + std = float("0.0025215") + data = None + + +class Program_weight_tensor_parameter_194: + name = "parameter_194" + shape = [96] + dtype = "float32" + min_val = float("-0.134633") + max_val = float("0.206261") + mean = float("0.0108598") + std = float("0.0610727") + data = None + + +class Program_weight_tensor_parameter_195: + name = "parameter_195" + shape = [96, 96, 3, 3] + dtype = "float32" + min_val = float("-0.0417476") + max_val = float("0.0707409") + mean = float("-0.000200496") + std = float("0.00586268") + data = None + + +class Program_weight_tensor_parameter_196: + name = "parameter_196" + shape = [96] + dtype = "float32" + min_val = float("-2.46669") + max_val = float("-0.0188941") + mean = float("-1.22596") + std = float("0.444206") + data = None + + +class Program_weight_tensor_parameter_197: + name = "parameter_197" + shape = [96] + dtype = "float32" + min_val = float("0.540095") + max_val = float("1.63859") + mean = float("0.945542") + std = float("0.172479") + data = None + + +class Program_weight_tensor_parameter_198: + name = "parameter_198" + shape = [96] + dtype = "float32" + min_val = float("0.0347183") + max_val = float("0.227627") + mean = float("0.082417") + std = float("0.0336491") + data = None + + +class Program_weight_tensor_parameter_199: + name = "parameter_199" + shape = [96] + dtype = "float32" + min_val = float("-2.59922") + max_val = float("2.15076") + mean = float("-0.188655") + std = float("0.479579") + data = None + + +class Program_weight_tensor_parameter_200: + name = "parameter_200" + shape = [96, 96, 3, 3] + dtype = "float32" + min_val = float("-0.159603") + max_val = float("0.105542") + mean = float("-0.000422661") + std = float("0.00713371") + data = None + + +class Program_weight_tensor_parameter_201: + name = "parameter_201" + shape = [96] + dtype = "float32" + min_val = float("-1.38744") + max_val = float("0.563004") + mean = float("-0.132441") + std = float("0.347447") + data = None + + +class Program_weight_tensor_parameter_202: + name = "parameter_202" + shape = [96] + dtype = "float32" + min_val = float("0.0452771") + max_val = float("1.86502") + mean = float("0.460871") + std = float("0.366358") + data = None + + +class Program_weight_tensor_parameter_203: + name = "parameter_203" + shape = [96] + dtype = "float32" + min_val = float("7.20046e-05") + max_val = float("0.00271049") + mean = float("0.000780889") + std = float("0.000618051") + data = None + + +class Program_weight_tensor_parameter_204: + name = "parameter_204" + shape = [96] + dtype = "float32" + min_val = float("-0.0499407") + max_val = float("0.0480118") + mean = float("0.00767865") + std = float("0.0176588") + data = None + + +class Program_weight_tensor_parameter_205: + name = "parameter_205" + shape = [96, 96, 1, 1] + dtype = "float32" + min_val = float("-0.0484855") + max_val = float("0.0469527") + mean = float("-0.000557248") + std = float("0.00696514") + data = None + + +class Program_weight_tensor_parameter_206: + name = "parameter_206" + shape = [96] + dtype = "float32" + min_val = float("-1.38716") + max_val = float("0.565575") + mean = float("-0.131901") + std = float("0.347951") + data = None + + +class Program_weight_tensor_parameter_207: + name = "parameter_207" + shape = [96] + dtype = "float32" + min_val = float("0.373276") + max_val = float("2.32827") + mean = float("0.902354") + std = float("0.426303") + data = None + + +class Program_weight_tensor_parameter_208: + name = "parameter_208" + shape = [96] + dtype = "float32" + min_val = float("0.00300443") + max_val = float("0.0229962") + mean = float("0.00858887") + std = float("0.00415652") + data = None + + +class Program_weight_tensor_parameter_209: + name = "parameter_209" + shape = [96] + dtype = "float32" + min_val = float("-0.106265") + max_val = float("0.119063") + mean = float("0.0359685") + std = float("0.0431121") + data = None + + +class Program_weight_tensor_parameter_210: + name = "parameter_210" + shape = [96, 96, 3, 3] + dtype = "float32" + min_val = float("-0.0601192") + max_val = float("0.0479345") + mean = float("-0.000334461") + std = float("0.00588243") + data = None + + +class Program_weight_tensor_parameter_211: + name = "parameter_211" + shape = [96] + dtype = "float32" + min_val = float("-3.32059") + max_val = float("0.366033") + mean = float("-1.1777") + std = float("0.556588") + data = None + + +class Program_weight_tensor_parameter_212: + name = "parameter_212" + shape = [96] + dtype = "float32" + min_val = float("0.470758") + max_val = float("1.9813") + mean = float("1.03925") + std = float("0.238611") + data = None + + +class Program_weight_tensor_parameter_213: + name = "parameter_213" + shape = [96] + dtype = "float32" + min_val = float("0.0279332") + max_val = float("0.176668") + mean = float("0.0504175") + std = float("0.0177105") + data = None + + +class Program_weight_tensor_parameter_214: + name = "parameter_214" + shape = [96] + dtype = "float32" + min_val = float("-1.05972") + max_val = float("0.787961") + mean = float("-0.0421876") + std = float("0.278962") + data = None + + +class Program_weight_tensor_parameter_215: + name = "parameter_215" + shape = [96, 96, 3, 3] + dtype = "float32" + min_val = float("-0.152735") + max_val = float("0.158912") + mean = float("-0.000426001") + std = float("0.00705743") + data = None + + +class Program_weight_tensor_parameter_216: + name = "parameter_216" + shape = [96] + dtype = "float32" + min_val = float("-1.24949") + max_val = float("0.583942") + mean = float("-0.109112") + std = float("0.292117") + data = None + + +class Program_weight_tensor_parameter_217: + name = "parameter_217" + shape = [96] + dtype = "float32" + min_val = float("0.0224878") + max_val = float("1.27796") + mean = float("0.324443") + std = float("0.192946") + data = None + + +class Program_weight_tensor_parameter_218: + name = "parameter_218" + shape = [96] + dtype = "float32" + min_val = float("2.48592e-05") + max_val = float("0.00308798") + mean = float("0.000656812") + std = float("0.000490412") + data = None + + +class Program_weight_tensor_parameter_219: + name = "parameter_219" + shape = [96] + dtype = "float32" + min_val = float("-0.0398012") + max_val = float("0.0538955") + mean = float("0.00423704") + std = float("0.0172967") + data = None + + +class Program_weight_tensor_parameter_220: + name = "parameter_220" + shape = [96, 96, 1, 1] + dtype = "float32" + min_val = float("-0.0406747") + max_val = float("0.0494878") + mean = float("-0.000325615") + std = float("0.0071059") + data = None + + +class Program_weight_tensor_parameter_221: + name = "parameter_221" + shape = [96] + dtype = "float32" + min_val = float("-1.24929") + max_val = float("0.586311") + mean = float("-0.108658") + std = float("0.29268") + data = None + + +class Program_weight_tensor_parameter_222: + name = "parameter_222" + shape = [96] + dtype = "float32" + min_val = float("0.311326") + max_val = float("1.67043") + mean = float("0.747441") + std = float("0.257878") + data = None + + +class Program_weight_tensor_parameter_223: + name = "parameter_223" + shape = [96] + dtype = "float32" + min_val = float("0.00302674") + max_val = float("0.0184666") + mean = float("0.00857766") + std = float("0.0033254") + data = None + + +class Program_weight_tensor_parameter_224: + name = "parameter_224" + shape = [96] + dtype = "float32" + min_val = float("-0.105385") + max_val = float("0.147591") + mean = float("0.0293962") + std = float("0.0383478") + data = None + + +class Program_weight_tensor_parameter_225: + name = "parameter_225" + shape = [96, 96, 3, 3] + dtype = "float32" + min_val = float("-0.0728298") + max_val = float("0.065903") + mean = float("-0.000300919") + std = float("0.00597289") + data = None + + +class Program_weight_tensor_parameter_226: + name = "parameter_226" + shape = [96] + dtype = "float32" + min_val = float("-3.5826") + max_val = float("0.291706") + mean = float("-1.12744") + std = float("0.572685") + data = None + + +class Program_weight_tensor_parameter_227: + name = "parameter_227" + shape = [96] + dtype = "float32" + min_val = float("0.511064") + max_val = float("2.19222") + mean = float("1.05217") + std = float("0.238287") + data = None + + +class Program_weight_tensor_parameter_228: + name = "parameter_228" + shape = [96] + dtype = "float32" + min_val = float("0.021508") + max_val = float("0.0772456") + mean = float("0.0390789") + std = float("0.00924531") + data = None + + +class Program_weight_tensor_parameter_229: + name = "parameter_229" + shape = [96] + dtype = "float32" + min_val = float("-0.95569") + max_val = float("0.64461") + mean = float("-0.0425366") + std = float("0.216225") + data = None + + +class Program_weight_tensor_parameter_230: + name = "parameter_230" + shape = [96, 96, 3, 3] + dtype = "float32" + min_val = float("-0.0984925") + max_val = float("0.137263") + mean = float("-0.000483231") + std = float("0.00714155") + data = None + + +class Program_weight_tensor_parameter_231: + name = "parameter_231" + shape = [96] + dtype = "float32" + min_val = float("-0.891765") + max_val = float("0.530315") + mean = float("-0.160042") + std = float("0.28168") + data = None + + +class Program_weight_tensor_parameter_232: + name = "parameter_232" + shape = [96] + dtype = "float32" + min_val = float("0.0202036") + max_val = float("1.40549") + mean = float("0.324747") + std = float("0.213549") + data = None + + +class Program_weight_tensor_parameter_233: + name = "parameter_233" + shape = [96] + dtype = "float32" + min_val = float("5.1999e-05") + max_val = float("0.00308025") + mean = float("0.000681748") + std = float("0.000468256") + data = None + + +class Program_weight_tensor_parameter_234: + name = "parameter_234" + shape = [96] + dtype = "float32" + min_val = float("-0.0356116") + max_val = float("0.0543912") + mean = float("0.00763867") + std = float("0.0160098") + data = None + + +class Program_weight_tensor_parameter_235: + name = "parameter_235" + shape = [96, 96, 1, 1] + dtype = "float32" + min_val = float("-0.050403") + max_val = float("0.0470333") + mean = float("-0.000602859") + std = float("0.00719125") + data = None + + +class Program_weight_tensor_parameter_236: + name = "parameter_236" + shape = [96] + dtype = "float32" + min_val = float("-0.891522") + max_val = float("0.532005") + mean = float("-0.15962") + std = float("0.282144") + data = None + + +class Program_weight_tensor_parameter_237: + name = "parameter_237" + shape = [96] + dtype = "float32" + min_val = float("0.170998") + max_val = float("1.78064") + mean = float("0.708933") + std = float("0.284476") + data = None + + +class Program_weight_tensor_parameter_238: + name = "parameter_238" + shape = [96] + dtype = "float32" + min_val = float("0.00181135") + max_val = float("0.0235388") + mean = float("0.00884351") + std = float("0.00329263") + data = None + + +class Program_weight_tensor_parameter_239: + name = "parameter_239" + shape = [96] + dtype = "float32" + min_val = float("-0.0317818") + max_val = float("0.148669") + mean = float("0.0443214") + std = float("0.0385248") + data = None + + +class Program_weight_tensor_parameter_240: + name = "parameter_240" + shape = [96, 96, 3, 3] + dtype = "float32" + min_val = float("-0.0673552") + max_val = float("0.0665555") + mean = float("-0.000406403") + std = float("0.00600122") + data = None + + +class Program_weight_tensor_parameter_241: + name = "parameter_241" + shape = [96] + dtype = "float32" + min_val = float("-2.65797") + max_val = float("0.0644665") + mean = float("-1.06329") + std = float("0.488575") + data = None + + +class Program_weight_tensor_parameter_242: + name = "parameter_242" + shape = [96] + dtype = "float32" + min_val = float("0.510122") + max_val = float("1.73722") + mean = float("1.01545") + std = float("0.193669") + data = None + + +class Program_weight_tensor_parameter_243: + name = "parameter_243" + shape = [96] + dtype = "float32" + min_val = float("0.0172563") + max_val = float("0.0595435") + mean = float("0.0301509") + std = float("0.00732214") + data = None + + +class Program_weight_tensor_parameter_244: + name = "parameter_244" + shape = [96] + dtype = "float32" + min_val = float("-0.801324") + max_val = float("0.759004") + mean = float("-0.0646748") + std = float("0.211257") + data = None + + +class Program_weight_tensor_parameter_245: + name = "parameter_245" + shape = [96, 96, 3, 3] + dtype = "float32" + min_val = float("-0.0799583") + max_val = float("0.12863") + mean = float("-0.000463251") + std = float("0.00696947") + data = None + + +class Program_weight_tensor_parameter_246: + name = "parameter_246" + shape = [96] + dtype = "float32" + min_val = float("-0.979363") + max_val = float("0.488329") + mean = float("-0.1357") + std = float("0.278693") + data = None + + +class Program_weight_tensor_parameter_247: + name = "parameter_247" + shape = [96] + dtype = "float32" + min_val = float("0.0499672") + max_val = float("1.15174") + mean = float("0.296075") + std = float("0.172795") + data = None + + +class Program_weight_tensor_parameter_248: + name = "parameter_248" + shape = [96] + dtype = "float32" + min_val = float("0.000124111") + max_val = float("0.00434228") + mean = float("0.00108239") + std = float("0.000694533") + data = None + + +class Program_weight_tensor_parameter_249: + name = "parameter_249" + shape = [96] + dtype = "float32" + min_val = float("-0.0430023") + max_val = float("0.0614512") + mean = float("0.00682349") + std = float("0.019208") + data = None + + +class Program_weight_tensor_parameter_250: + name = "parameter_250" + shape = [96, 96, 1, 1] + dtype = "float32" + min_val = float("-0.0730409") + max_val = float("0.0734237") + mean = float("-0.000668194") + std = float("0.00816827") + data = None + + +class Program_weight_tensor_parameter_251: + name = "parameter_251" + shape = [96] + dtype = "float32" + min_val = float("-0.979598") + max_val = float("0.490087") + mean = float("-0.135308") + std = float("0.279185") + data = None + + +class Program_weight_tensor_parameter_252: + name = "parameter_252" + shape = [96] + dtype = "float32" + min_val = float("0.240111") + max_val = float("1.69891") + mean = float("0.604647") + std = float("0.228294") + data = None + + +class Program_weight_tensor_parameter_253: + name = "parameter_253" + shape = [96] + dtype = "float32" + min_val = float("0.00464956") + max_val = float("0.0447737") + mean = float("0.0124628") + std = float("0.00526411") + data = None + + +class Program_weight_tensor_parameter_254: + name = "parameter_254" + shape = [96] + dtype = "float32" + min_val = float("-0.088988") + max_val = float("0.163347") + mean = float("0.0332765") + std = float("0.0457333") + data = None + + +class Program_weight_tensor_parameter_255: + name = "parameter_255" + shape = [96, 96, 3, 3] + dtype = "float32" + min_val = float("-0.070586") + max_val = float("0.053917") + mean = float("-0.000353734") + std = float("0.00603503") + data = None + + +class Program_weight_tensor_parameter_256: + name = "parameter_256" + shape = [96] + dtype = "float32" + min_val = float("-3.46749") + max_val = float("0.20134") + mean = float("-1.00429") + std = float("0.548683") + data = None + + +class Program_weight_tensor_parameter_257: + name = "parameter_257" + shape = [96] + dtype = "float32" + min_val = float("0.68469") + max_val = float("2.50521") + mean = float("1.07421") + std = float("0.212064") + data = None + + +class Program_weight_tensor_parameter_258: + name = "parameter_258" + shape = [96] + dtype = "float32" + min_val = float("0.0128335") + max_val = float("0.0562505") + mean = float("0.0252273") + std = float("0.00835494") + data = None + + +class Program_weight_tensor_parameter_259: + name = "parameter_259" + shape = [96] + dtype = "float32" + min_val = float("-0.594873") + max_val = float("0.694291") + mean = float("-0.0599848") + std = float("0.200504") + data = None + + +class Program_weight_tensor_parameter_260: + name = "parameter_260" + shape = [96, 96, 3, 3] + dtype = "float32" + min_val = float("-0.0875016") + max_val = float("0.0958638") + mean = float("-0.000393602") + std = float("0.00713622") + data = None + + +class Program_weight_tensor_parameter_261: + name = "parameter_261" + shape = [96] + dtype = "float32" + min_val = float("-0.623249") + max_val = float("0.450355") + mean = float("-0.0811173") + std = float("0.25665") + data = None + + +class Program_weight_tensor_parameter_262: + name = "parameter_262" + shape = [96] + dtype = "float32" + min_val = float("0.0905173") + max_val = float("1.30172") + mean = float("0.309137") + std = float("0.196898") + data = None + + +class Program_weight_tensor_parameter_263: + name = "parameter_263" + shape = [96] + dtype = "float32" + min_val = float("0.000482307") + max_val = float("0.0212544") + mean = float("0.00391036") + std = float("0.00335167") + data = None + + +class Program_weight_tensor_parameter_264: + name = "parameter_264" + shape = [96] + dtype = "float32" + min_val = float("-0.0380137") + max_val = float("0.0274317") + mean = float("0.000597392") + std = float("0.0117867") + data = None + + +class Program_weight_tensor_parameter_265: + name = "parameter_265" + shape = [96, 96, 1, 1] + dtype = "float32" + min_val = float("-0.0967686") + max_val = float("0.0726096") + mean = float("-0.00111676") + std = float("0.00943776") + data = None + + +class Program_weight_tensor_parameter_266: + name = "parameter_266" + shape = [96] + dtype = "float32" + min_val = float("-0.62253") + max_val = float("0.451504") + mean = float("-0.0806935") + std = float("0.256953") + data = None + + +class Program_weight_tensor_parameter_267: + name = "parameter_267" + shape = [96] + dtype = "float32" + min_val = float("0.210918") + max_val = float("1.42997") + mean = float("0.527932") + std = float("0.258611") + data = None + + +class Program_weight_tensor_parameter_268: + name = "parameter_268" + shape = [96] + dtype = "float32" + min_val = float("0.0108854") + max_val = float("0.101724") + mean = float("0.0340185") + std = float("0.0173202") + data = None + + +class Program_weight_tensor_parameter_269: + name = "parameter_269" + shape = [96] + dtype = "float32" + min_val = float("-0.10483") + max_val = float("0.0991255") + mean = float("-0.00462957") + std = float("0.0392523") + data = None + + +class Program_weight_tensor_parameter_270: + name = "parameter_270" + shape = [96, 96, 3, 3] + dtype = "float32" + min_val = float("-0.0996365") + max_val = float("0.0540305") + mean = float("-0.00042977") + std = float("0.00592197") + data = None + + +class Program_weight_tensor_parameter_271: + name = "parameter_271" + shape = [96] + dtype = "float32" + min_val = float("-2.4099") + max_val = float("0.510062") + mean = float("-0.827896") + std = float("0.467957") + data = None + + +class Program_weight_tensor_parameter_272: + name = "parameter_272" + shape = [96] + dtype = "float32" + min_val = float("0.855439") + max_val = float("2.18052") + mean = float("1.27541") + std = float("0.20896") + data = None + + +class Program_weight_tensor_parameter_273: + name = "parameter_273" + shape = [96] + dtype = "float32" + min_val = float("0.0103972") + max_val = float("0.0527158") + mean = float("0.0209256") + std = float("0.00862648") + data = None + + +class Program_weight_tensor_parameter_274: + name = "parameter_274" + shape = [96] + dtype = "float32" + min_val = float("-0.780321") + max_val = float("0.470817") + mean = float("-0.061274") + std = float("0.196346") + data = None + + +class Program_weight_tensor_parameter_275: + name = "parameter_275" + shape = [96, 96, 3, 3] + dtype = "float32" + min_val = float("-0.154701") + max_val = float("0.153806") + mean = float("-0.00026052") + std = float("0.00735431") + data = None + + +class Program_weight_tensor_parameter_276: + name = "parameter_276" + shape = [96] + dtype = "float32" + min_val = float("-3.15956") + max_val = float("1.89061") + mean = float("0.502181") + std = float("0.861277") + data = None + + +class Program_weight_tensor_parameter_277: + name = "parameter_277" + shape = [96] + dtype = "float32" + min_val = float("0.209789") + max_val = float("2.62802") + mean = float("0.557131") + std = float("0.318659") + data = None + + +class Program_weight_tensor_parameter_278: + name = "parameter_278" + shape = [96] + dtype = "float32" + min_val = float("0.00944476") + max_val = float("0.145226") + mean = float("0.0342646") + std = float("0.0234271") + data = None + + +class Program_weight_tensor_parameter_279: + name = "parameter_279" + shape = [96] + dtype = "float32" + min_val = float("-0.271688") + max_val = float("0.303077") + mean = float("-0.0264941") + std = float("0.0868152") + data = None + + +class Program_weight_tensor_parameter_280: + name = "parameter_280" + shape = [96, 192, 1, 1] + dtype = "float32" + min_val = float("-0.190092") + max_val = float("0.235795") + mean = float("-0.00054682") + std = float("0.0152601") + data = None + + +class Program_weight_tensor_parameter_281: + name = "parameter_281" + shape = [96] + dtype = "float32" + min_val = float("-4.92412") + max_val = float("1.57941") + mean = float("0.384226") + std = float("1.04886") + data = None + + +class Program_weight_tensor_parameter_282: + name = "parameter_282" + shape = [96] + dtype = "float32" + min_val = float("0.411425") + max_val = float("6.77791") + mean = float("1.69479") + std = float("1.30749") + data = None + + +class Program_weight_tensor_parameter_283: + name = "parameter_283" + shape = [96] + dtype = "float32" + min_val = float("0.0059326") + max_val = float("0.187703") + mean = float("0.0313027") + std = float("0.0270184") + data = None + + +class Program_weight_tensor_parameter_284: + name = "parameter_284" + shape = [96] + dtype = "float32" + min_val = float("-0.122136") + max_val = float("0.395194") + mean = float("0.0355431") + std = float("0.0933339") + data = None + + +class Program_weight_tensor_parameter_285: + name = "parameter_285" + shape = [96, 192, 1, 1] + dtype = "float32" + min_val = float("-0.115428") + max_val = float("0.143096") + mean = float("0.000288353") + std = float("0.0138526") + data = None + + +class Program_weight_tensor_parameter_286: + name = "parameter_286" + shape = [192] + dtype = "float32" + min_val = float("-2.27512") + max_val = float("1.75006") + mean = float("-0.125702") + std = float("0.740468") + data = None + + +class Program_weight_tensor_parameter_287: + name = "parameter_287" + shape = [192] + dtype = "float32" + min_val = float("0.632726") + max_val = float("2.96908") + mean = float("1.08749") + std = float("0.283555") + data = None + + +class Program_weight_tensor_parameter_288: + name = "parameter_288" + shape = [192] + dtype = "float32" + min_val = float("0.0130979") + max_val = float("0.31876") + mean = float("0.04291") + std = float("0.035214") + data = None + + +class Program_weight_tensor_parameter_289: + name = "parameter_289" + shape = [192] + dtype = "float32" + min_val = float("-0.47354") + max_val = float("0.278468") + mean = float("-0.0584653") + std = float("0.115063") + data = None + + +class Program_weight_tensor_parameter_290: + name = "parameter_290" + shape = [192, 128, 3, 3] + dtype = "float32" + min_val = float("-0.0811233") + max_val = float("0.11238") + mean = float("-0.000121273") + std = float("0.00716338") + data = None + + +class Program_weight_tensor_parameter_291: + name = "parameter_291" + shape = [128] + dtype = "float32" + min_val = float("-2.81253") + max_val = float("1.96258") + mean = float("-0.709313") + std = float("0.64886") + data = None + + +class Program_weight_tensor_parameter_292: + name = "parameter_292" + shape = [128] + dtype = "float32" + min_val = float("0.302011") + max_val = float("2.86022") + mean = float("1.01859") + std = float("0.279425") + data = None + + +class Program_weight_tensor_parameter_293: + name = "parameter_293" + shape = [128] + dtype = "float32" + min_val = float("0.000689708") + max_val = float("0.0143167") + mean = float("0.00379586") + std = float("0.00196197") + data = None + + +class Program_weight_tensor_parameter_294: + name = "parameter_294" + shape = [128] + dtype = "float32" + min_val = float("-0.240616") + max_val = float("0.230863") + mean = float("0.00348518") + std = float("0.0801109") + data = None + + +class Program_weight_tensor_parameter_295: + name = "parameter_295" + shape = [128, 96, 1, 1] + dtype = "float32" + min_val = float("-0.16828") + max_val = float("0.191318") + mean = float("-0.00143145") + std = float("0.0216253") + data = None + + +class Program_weight_tensor_parameter_296: + name = "parameter_296" + shape = [96] + dtype = "float32" + min_val = float("-0.0182017") + max_val = float("-0.00100735") + mean = float("-0.00761377") + std = float("0.00459165") + data = None + + +class Program_weight_tensor_parameter_297: + name = "parameter_297" + shape = [96, 96, 1, 1] + dtype = "float32" + min_val = float("-0.297058") + max_val = float("0.124247") + mean = float("-0.00811798") + std = float("0.0180434") + data = None + + +class Program_weight_tensor_parameter_298: + name = "parameter_298" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_299: + name = "parameter_299" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_300: + name = "parameter_300" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_301: + name = "parameter_301" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_302: + name = "parameter_302" + shape = [48, 48, 1, 1] + dtype = "float32" + min_val = float("-0.0524219") + max_val = float("0.062819") + mean = float("-0.00145834") + std = float("0.0124603") + data = None + + +class Program_weight_tensor_parameter_303: + name = "parameter_303" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_304: + name = "parameter_304" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_305: + name = "parameter_305" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_306: + name = "parameter_306" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_307: + name = "parameter_307" + shape = [48, 48, 3, 3] + dtype = "float32" + min_val = float("-0.053396") + max_val = float("0.0780475") + mean = float("-0.000432103") + std = float("0.0105215") + data = None + + +class Program_weight_tensor_parameter_308: + name = "parameter_308" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_309: + name = "parameter_309" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_310: + name = "parameter_310" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_311: + name = "parameter_311" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_312: + name = "parameter_312" + shape = [48, 48, 3, 3] + dtype = "float32" + min_val = float("-0.0907736") + max_val = float("0.0889891") + mean = float("-0.000674195") + std = float("0.0115766") + data = None + + +class Program_weight_tensor_parameter_313: + name = "parameter_313" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_314: + name = "parameter_314" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_315: + name = "parameter_315" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_316: + name = "parameter_316" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_317: + name = "parameter_317" + shape = [48, 48, 1, 1] + dtype = "float32" + min_val = float("-0.0701343") + max_val = float("0.0744403") + mean = float("-0.000969115") + std = float("0.0132523") + data = None + + +class Program_weight_tensor_parameter_318: + name = "parameter_318" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_319: + name = "parameter_319" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_320: + name = "parameter_320" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_321: + name = "parameter_321" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_322: + name = "parameter_322" + shape = [48, 48, 3, 3] + dtype = "float32" + min_val = float("-0.0625249") + max_val = float("0.0628193") + mean = float("-0.000704405") + std = float("0.010522") + data = None + + +class Program_weight_tensor_parameter_323: + name = "parameter_323" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_324: + name = "parameter_324" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_325: + name = "parameter_325" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_326: + name = "parameter_326" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_327: + name = "parameter_327" + shape = [48, 48, 3, 3] + dtype = "float32" + min_val = float("-0.105534") + max_val = float("0.0876318") + mean = float("-0.000291303") + std = float("0.0118198") + data = None + + +class Program_weight_tensor_parameter_328: + name = "parameter_328" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_329: + name = "parameter_329" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_330: + name = "parameter_330" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_331: + name = "parameter_331" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_332: + name = "parameter_332" + shape = [48, 48, 1, 1] + dtype = "float32" + min_val = float("-0.0927544") + max_val = float("0.067179") + mean = float("-0.00167319") + std = float("0.0164656") + data = None + + +class Program_weight_tensor_parameter_333: + name = "parameter_333" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_334: + name = "parameter_334" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_335: + name = "parameter_335" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_336: + name = "parameter_336" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_337: + name = "parameter_337" + shape = [48, 48, 3, 3] + dtype = "float32" + min_val = float("-0.0662936") + max_val = float("0.0926268") + mean = float("-0.000546134") + std = float("0.0110591") + data = None + + +class Program_weight_tensor_parameter_338: + name = "parameter_338" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_339: + name = "parameter_339" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_340: + name = "parameter_340" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_341: + name = "parameter_341" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_342: + name = "parameter_342" + shape = [48, 48, 3, 3] + dtype = "float32" + min_val = float("-0.115861") + max_val = float("0.0843934") + mean = float("-0.000390165") + std = float("0.0126271") + data = None + + +class Program_weight_tensor_parameter_343: + name = "parameter_343" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_344: + name = "parameter_344" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_345: + name = "parameter_345" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_346: + name = "parameter_346" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_347: + name = "parameter_347" + shape = [48, 96, 1, 1] + dtype = "float32" + min_val = float("-0.156722") + max_val = float("0.12438") + mean = float("-0.00240073") + std = float("0.0227151") + data = None + + +class Program_weight_tensor_parameter_348: + name = "parameter_348" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_349: + name = "parameter_349" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_350: + name = "parameter_350" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_351: + name = "parameter_351" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_352: + name = "parameter_352" + shape = [48, 96, 1, 1] + dtype = "float32" + min_val = float("-0.133366") + max_val = float("0.190723") + mean = float("-0.000461332") + std = float("0.0215494") + data = None + + +class Program_weight_tensor_parameter_353: + name = "parameter_353" + shape = [96] + dtype = "float32" + min_val = float("-3.40388") + max_val = float("3.27594") + mean = float("0.331") + std = float("1.14502") + data = None + + +class Program_weight_tensor_parameter_354: + name = "parameter_354" + shape = [96] + dtype = "float32" + min_val = float("0.861639") + max_val = float("4.91749") + mean = float("1.91516") + std = float("0.75496") + data = None + + +class Program_weight_tensor_parameter_355: + name = "parameter_355" + shape = [96] + dtype = "float32" + min_val = float("0.674644") + max_val = float("20.4484") + mean = float("2.3946") + std = float("2.42082") + data = None + + +class Program_weight_tensor_parameter_356: + name = "parameter_356" + shape = [96] + dtype = "float32" + min_val = float("-1.41455") + max_val = float("1.80091") + mean = float("-0.328594") + std = float("0.607956") + data = None + + +class Program_weight_tensor_parameter_357: + name = "parameter_357" + shape = [96, 64, 3, 3] + dtype = "float32" + min_val = float("-0.115845") + max_val = float("0.115419") + mean = float("-0.000438744") + std = float("0.0120833") + data = None + + +class Program_weight_tensor_parameter_358: + name = "parameter_358" + shape = [64] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_359: + name = "parameter_359" + shape = [64] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_360: + name = "parameter_360" + shape = [64] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_361: + name = "parameter_361" + shape = [64] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_362: + name = "parameter_362" + shape = [64, 32, 3, 3] + dtype = "float32" + min_val = float("-0.153743") + max_val = float("0.135272") + mean = float("-0.000740633") + std = float("0.0191711") + data = None + + +class Program_weight_tensor_parameter_363: + name = "parameter_363" + shape = [32] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_364: + name = "parameter_364" + shape = [32] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_365: + name = "parameter_365" + shape = [32] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_366: + name = "parameter_366" + shape = [32] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_367: + name = "parameter_367" + shape = [32, 32, 3, 3] + dtype = "float32" + min_val = float("-0.307002") + max_val = float("0.202588") + mean = float("-4.43961e-05") + std = float("0.025069") + data = None + + +class Program_weight_tensor_parameter_368: + name = "parameter_368" + shape = [32] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_369: + name = "parameter_369" + shape = [32] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_370: + name = "parameter_370" + shape = [32] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_371: + name = "parameter_371" + shape = [32] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_372: + name = "parameter_372" + shape = [32, 3, 3, 3] + dtype = "float32" + min_val = float("-0.297631") + max_val = float("0.278985") + mean = float("-0.00146872") + std = float("0.0683342") + data = None diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_8/graph_hash.txt b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_8/graph_hash.txt new file mode 100644 index 000000000..c231ef3a7 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_8/graph_hash.txt @@ -0,0 +1 @@ +152334c04694bdeb79b77702f66c2b2b0bf6070104e39ed497a16b2b7c9bf19f \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_8/graph_net.json b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_8/graph_net.json new file mode 100644 index 000000000..381598f86 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_8/graph_net.json @@ -0,0 +1,6 @@ +{ + "framework": "paddle", + "model_name": "PP-YOLOE_plus_SOD-largesize-L", + "num_devices_required": 1, + "num_nodes_required": 1 +} \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_8/input_meta.py b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_8/input_meta.py new file mode 100644 index 000000000..0488f0946 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_8/input_meta.py @@ -0,0 +1,108 @@ +class Program_weight_tensor_data_0: + name = "data_0" + shape = [1, 12, 27216] + dtype = "float32" + max_val = float("1.0") + mean = float("0.000453165") + std = float("0.0212828") + data = None + + +class Program_weight_tensor_data_1: + name = "data_1" + shape = [1, 1] + dtype = "int32" + data = [0] + + +class Program_weight_tensor_data_2: + name = "data_2" + shape = [1, 12, 1] + dtype = "int32" + data = [4, 4, 3, 3, 3, 4, 4, 4, 3, 3, 3, 3] + + +class Program_weight_tensor_data_3: + name = "data_3" + shape = [1, 27216] + dtype = "float32" + max_val = float("1.0") + mean = float("0.00543798") + std = float("0.0735419") + data = None + + +class Program_weight_tensor_data_4: + name = "data_4" + shape = [1, 12, 4] + dtype = "float32" + data = [ + 810.02, + 1015.02, + 826.828, + 1104.59, + 803.556, + 862.244, + 821.01, + 939.512, + 685.253, + 848.195, + 696.242, + 913.171, + 707.232, + 783.219, + 720.162, + 865.756, + 705.293, + 614.634, + 718.869, + 688.39, + 622.545, + 934.244, + 636.768, + 1037.85, + 625.778, + 567.219, + 640.646, + 632.195, + 605.091, + 567.219, + 617.374, + 632.195, + 538.505, + 763.902, + 548.849, + 864.0, + 536.566, + 567.219, + 550.788, + 621.659, + 513.939, + 978.146, + 529.455, + 1074.73, + 789.98, + 570.732, + 806.788, + 637.463, + ] + + +class Program_weight_tensor_data_5: + name = "data_5" + shape = [1, 12, 27216] + dtype = "float32" + max_val = float("0.365907") + mean = float("5.42289e-05") + std = float("0.00308233") + data = None + + +class Program_weight_tensor_data_6: + name = "data_6" + shape = [1, 12, 27216] + dtype = "float32" + max_val = float("0.95733") + mean = float("0.00162842") + std = float("0.0295307") + data = None diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_8/model.py b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_8/model.py new file mode 100644 index 000000000..8a1f2862b --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_8/model.py @@ -0,0 +1,192 @@ +import paddle + + +class GraphModule(paddle.nn.Layer): + def __init__(self): + super().__init__() + + def forward(self, data_0, data_1, data_2, data_3, data_4, data_5, data_6): + # pd_op.full: (1xi64) <- () + full_0 = paddle._C_ops.full( + [1], float("-2"), paddle.int64, paddle.core.CPUPlace() + ) + + # pd_op.argmax: (1x27216xi64) <- (1x12x27216xf32, 1xi64) + argmax_0 = paddle._C_ops.argmax(data_0, full_0, False, False, paddle.int64) + del full_0 + + # pd_op.full: (1xf32) <- () + full_1 = paddle._C_ops.full( + [1], float("12"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.scale: (1x1xi32) <- (1x1xi32, 1xf32) + scale_0 = paddle._C_ops.scale(data_1, full_1, float("0"), True) + del data_1, full_1 + + # pd_op.cast: (1x1xi64) <- (1x1xi32) + cast_0 = paddle._C_ops.cast(scale_0, paddle.int64) + del scale_0 + + # pd_op.add: (1x27216xi64) <- (1x27216xi64, 1x1xi64) + add_0 = paddle._C_ops.add(argmax_0, cast_0) + del argmax_0, cast_0 + + # pd_op.flatten: (12xi32) <- (1x12x1xi32) + flatten_0 = paddle._C_ops.flatten(data_2, 0, 2) + del data_2 + + # pd_op.flatten: (27216xi64) <- (1x27216xi64) + flatten_1 = paddle._C_ops.flatten(add_0, 0, 1) + del add_0 + + # pd_op.full: (1xi32) <- () + full_2 = paddle._C_ops.full( + [1], float("0"), paddle.int32, paddle.core.CPUPlace() + ) + + # pd_op.gather: (27216xi32) <- (12xi32, 27216xi64, 1xi32) + gather_0 = paddle._C_ops.gather(flatten_0, flatten_1, full_2) + del flatten_0 + + # pd_op.full_int_array: (2xi64) <- () + full_int_array_0 = [1, 27216] + + # pd_op.reshape: (1x27216xi32) <- (27216xi32, 2xi64) + reshape_1 = paddle._C_ops.reshape(gather_0, full_int_array_0) + del full_int_array_0, gather_0 + + # pd_op.full: (xf32) <- () + full_3 = paddle._C_ops.full( + [], float("0"), paddle.float32, paddle.framework._current_expected_place() + ) + + # pd_op.greater_than: (1x27216xb) <- (1x27216xf32, xf32) + greater_than_0 = paddle._C_ops.greater_than(data_3, full_3) + del data_3, full_3 + + # pd_op.full: (1xf32) <- () + full_4 = paddle._C_ops.full( + [1], float("10"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.full_like: (1x27216xi32) <- (1x27216xi32, 1xf32) + full_like_0 = paddle._C_ops.full_like( + reshape_1, full_4, paddle.int32, paddle.framework._current_expected_place() + ) + del full_4 + + # pd_op.where: (1x27216xi32) <- (1x27216xb, 1x27216xi32, 1x27216xi32) + where_0 = paddle._C_ops.where(greater_than_0, reshape_1, full_like_0) + del full_like_0, greater_than_0, reshape_1 + + # pd_op.full_int_array: (2xi64) <- () + full_int_array_1 = [-1, 4] + + # pd_op.reshape: (12x4xf32) <- (1x12x4xf32, 2xi64) + reshape_2 = paddle._C_ops.reshape(data_4, full_int_array_1) + del data_4, full_int_array_1 + + # pd_op.gather: (27216x4xf32) <- (12x4xf32, 27216xi64, 1xi32) + gather_1 = paddle._C_ops.gather(reshape_2, flatten_1, full_2) + del flatten_1, full_2, reshape_2 + + # pd_op.full_int_array: (3xi64) <- () + full_int_array_2 = [1, 27216, 4] + + # pd_op.reshape: (1x27216x4xf32) <- (27216x4xf32, 3xi64) + reshape_0 = paddle._C_ops.reshape(gather_1, full_int_array_2) + del full_int_array_2, gather_1 + + # pd_op.full: (1xi32) <- () + full_5 = paddle._C_ops.full( + [1], float("11"), paddle.int32, paddle.core.CPUPlace() + ) + + # pd_op.one_hot: (1x27216x11xf32) <- (1x27216xi32, 1xi32) + one_hot_0 = paddle._C_ops.one_hot( + where_0 % paddle.cast(full_5, where_0.dtype), full_5 + ) + del full_5 + + # pd_op.full: (10xi64) <- () + full_6 = paddle._C_ops.full( + [10], float("0"), paddle.int64, paddle.framework._current_expected_place() + ) + + # pd_op.assign_value_: (10xi64) <- (10xi64) + assign_value__0 = paddle._C_ops.assign_value_( + full_6, + [10], + paddle.int64, + [ + float("0"), + float("1"), + float("2"), + float("3"), + float("4"), + float("5"), + float("6"), + float("7"), + float("8"), + float("9"), + ], + paddle.framework._current_expected_place(), + ) + del full_6 + + # pd_op.index_select: (1x27216x10xf32) <- (1x27216x11xf32, 10xi64) + index_select_0 = paddle._C_ops.index_select(one_hot_0, assign_value__0, -1) + del assign_value__0, one_hot_0 + + # pd_op.multiply: (1x12x27216xf32) <- (1x12x27216xf32, 1x12x27216xf32) + multiply_1 = paddle._C_ops.multiply(data_5, data_0) + del data_5 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_3 = [-1] + + # pd_op.max: (1x12x1xf32) <- (1x12x27216xf32, 1xi64) + max_0 = paddle._C_ops.max(multiply_1, full_int_array_3, True) + + # pd_op.multiply: (1x12x27216xf32) <- (1x12x27216xf32, 1x12x27216xf32) + multiply_2 = paddle._C_ops.multiply(data_6, data_0) + del data_0, data_6 + + # pd_op.max: (1x12x1xf32) <- (1x12x27216xf32, 1xi64) + max_1 = paddle._C_ops.max(multiply_2, full_int_array_3, True) + del multiply_2 + + # pd_op.full: (1xf32) <- () + full_7 = paddle._C_ops.full( + [1], float("1"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.scale: (1x12x1xf32) <- (1x12x1xf32, 1xf32) + scale_1 = paddle._C_ops.scale(max_0, full_7, float("1e-09"), True) + del full_7, max_0 + + # pd_op.divide: (1x12x27216xf32) <- (1x12x27216xf32, 1x12x1xf32) + divide_0 = paddle._C_ops.divide(multiply_1, scale_1) + del multiply_1, scale_1 + + # pd_op.multiply: (1x12x27216xf32) <- (1x12x27216xf32, 1x12x1xf32) + multiply_3 = paddle._C_ops.multiply(divide_0, max_1) + del divide_0, max_1 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_4 = [-2] + + # pd_op.max: (1x27216xf32) <- (1x12x27216xf32, 1xi64) + max_2 = paddle._C_ops.max(multiply_3, full_int_array_4, False) + del full_int_array_4, multiply_3 + + # pd_op.unsqueeze: (1x27216x1xf32) <- (1x27216xf32, 1xi64) + unsqueeze_0 = paddle._C_ops.unsqueeze(max_2, full_int_array_3) + del full_int_array_3, max_2 + + # pd_op.multiply: (1x27216x10xf32) <- (1x27216x10xf32, 1x27216x1xf32) + multiply_0 = paddle._C_ops.multiply(index_select_0, unsqueeze_0) + del index_select_0, unsqueeze_0, where_0 + + return reshape_0, multiply_0 diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_8/weight_meta.py b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_8/weight_meta.py new file mode 100644 index 000000000..8b1378917 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_8/weight_meta.py @@ -0,0 +1 @@ + diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_9/graph_hash.txt b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_9/graph_hash.txt new file mode 100644 index 000000000..f7e774060 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_9/graph_hash.txt @@ -0,0 +1 @@ +b3c06a5ff7d63f2fddf054f6dc5423b30986582d3ae6be2b7b5c8465e31f6fac \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_9/graph_net.json b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_9/graph_net.json new file mode 100644 index 000000000..381598f86 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_9/graph_net.json @@ -0,0 +1,6 @@ +{ + "framework": "paddle", + "model_name": "PP-YOLOE_plus_SOD-largesize-L", + "num_devices_required": 1, + "num_nodes_required": 1 +} \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_9/input_meta.py b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_9/input_meta.py new file mode 100644 index 000000000..d17aba14e --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_9/input_meta.py @@ -0,0 +1,28 @@ +class Program_weight_tensor_data_0: + name = "data_0" + shape = [1, 48384, 10] + dtype = "float32" + min_val = float("1.08574e-08") + max_val = float("0.85674") + mean = float("0.00225546") + std = float("0.010105") + data = None + + +class Program_weight_tensor_data_1: + name = "data_1" + shape = [1, 48384] + dtype = "int32" + min_val = 0 + max_val = 10 + data = None + + +class Program_weight_tensor_data_2: + name = "data_2" + shape = [1, 48384, 10] + dtype = "float32" + max_val = float("0.949472") + mean = float("0.000146427") + std = float("0.0089493") + data = None diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_9/model.py b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_9/model.py new file mode 100644 index 000000000..d4c2470ef --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_9/model.py @@ -0,0 +1,110 @@ +import paddle + + +class GraphModule(paddle.nn.Layer): + def __init__(self): + super().__init__() + + def forward(self, data_0, data_1, data_2): + # pd_op.full: (1xi32) <- () + full_0 = paddle._C_ops.full( + [1], float("11"), paddle.int32, paddle.core.CPUPlace() + ) + + # pd_op.one_hot: (1x-1x11xf32) <- (1x-1xi32, 1xi32) + one_hot_0 = paddle._C_ops.one_hot( + data_1 % paddle.cast(full_0, data_1.dtype), full_0 + ) + del data_1, full_0 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_0 = [0] + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_1 = [-1] + + # pd_op.slice: (1x-1x10xf32) <- (1x-1x11xf32, 1xi64, 1xi64) + slice_0 = paddle._C_ops.slice( + one_hot_0, [2], full_int_array_0, full_int_array_1, [1], [] + ) + del full_int_array_0, full_int_array_1, one_hot_0 + + # pd_op.pow: (1x-1x10xf32) <- (1x-1x10xf32) + pow_0 = paddle._C_ops.pow(data_0, float("2")) + + # pd_op.full: (1xf32) <- () + full_1 = paddle._C_ops.full( + [1], float("0.75"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.scale: (1x-1x10xf32) <- (1x-1x10xf32, 1xf32) + scale_0 = paddle._C_ops.scale(pow_0, full_1, float("0"), True) + del pow_0 + + # pd_op.full: (1xf32) <- () + full_2 = paddle._C_ops.full( + [1], float("-1"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.scale: (1x-1x10xf32) <- (1x-1x10xf32, 1xf32) + scale_1 = paddle._C_ops.scale(slice_0, full_2, float("1"), True) + del full_2 + + # pd_op.multiply: (1x-1x10xf32) <- (1x-1x10xf32, 1x-1x10xf32) + multiply_0 = paddle._C_ops.multiply(scale_0, scale_1) + + # pd_op.multiply: (1x-1x10xf32) <- (1x-1x10xf32, 1x-1x10xf32) + multiply_1 = paddle._C_ops.multiply(data_2, slice_0) + del slice_0 + + # pd_op.add: (1x-1x10xf32) <- (1x-1x10xf32, 1x-1x10xf32) + add_0 = paddle._C_ops.add(multiply_0, multiply_1) + + # pd_op.bce_loss: (1x-1x10xf32) <- (1x-1x10xf32, 1x-1x10xf32) + bce_loss_0 = paddle._C_ops.bce_loss(data_0, data_2) + del data_0 + + # pd_op.multiply: (1x-1x10xf32) <- (1x-1x10xf32, 1x-1x10xf32) + multiply_2 = paddle._C_ops.multiply(bce_loss_0, add_0) + + # pd_op.full_int_array: (0xi64) <- () + full_int_array_2 = [] + + # pd_op.sum: (xf32) <- (1x-1x10xf32, 0xi64) + sum_0 = paddle._C_ops.sum(multiply_2, full_int_array_2, None, False) + + # pd_op.sum: (xf32) <- (1x-1x10xf32, 0xi64) + sum_1 = paddle._C_ops.sum(data_2, full_int_array_2, None, False) + del data_2 + + # pd_op.full: (1xf32) <- () + full_3 = paddle._C_ops.full( + [1], float("1"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.full: (1xf32) <- () + full_4 = paddle._C_ops.full( + [1], float("3.40282e+38"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.clip: (xf32) <- (xf32, 1xf32, 1xf32) + clip_0 = paddle._C_ops.clip(sum_1, full_3, full_4) + del full_3, full_4, sum_1 + + # pd_op.divide: (xf32) <- (xf32, xf32) + divide_0 = paddle._C_ops.divide(sum_0, clip_0) + del ( + add_0, + bce_loss_0, + clip_0, + full_1, + full_int_array_2, + multiply_0, + multiply_1, + multiply_2, + scale_0, + scale_1, + sum_0, + ) + + return divide_0 diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_9/weight_meta.py b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_9/weight_meta.py new file mode 100644 index 000000000..8b1378917 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_9/weight_meta.py @@ -0,0 +1 @@ + diff --git a/paddle_samples/PaddleX/TimesNet/subgraph_0/graph_hash.txt b/paddle_samples/PaddleX/TimesNet/subgraph_0/graph_hash.txt new file mode 100644 index 000000000..ee2d352b9 --- /dev/null +++ b/paddle_samples/PaddleX/TimesNet/subgraph_0/graph_hash.txt @@ -0,0 +1 @@ +c1cb87f0287a3fcb24a3b45b530a98d9dec2a44e18da7b06651cfe11d2f53243 \ No newline at end of file diff --git a/paddle_samples/PaddleX/TimesNet/subgraph_0/graph_net.json b/paddle_samples/PaddleX/TimesNet/subgraph_0/graph_net.json new file mode 100644 index 000000000..c5c3dc20a --- /dev/null +++ b/paddle_samples/PaddleX/TimesNet/subgraph_0/graph_net.json @@ -0,0 +1,6 @@ +{ + "framework": "paddle", + "model_name": "TimesNet", + "num_devices_required": 1, + "num_nodes_required": 1 +} \ No newline at end of file diff --git a/paddle_samples/PaddleX/TimesNet/subgraph_0/input_meta.py b/paddle_samples/PaddleX/TimesNet/subgraph_0/input_meta.py new file mode 100644 index 000000000..5d38b3cde --- /dev/null +++ b/paddle_samples/PaddleX/TimesNet/subgraph_0/input_meta.py @@ -0,0 +1,23 @@ +class Program_weight_tensor_data_0: + name = "data_0" + shape = [16, 192, 32] + dtype = "float32" + min_val = float("-8.83133") + max_val = float("8.15217") + mean = float("0.0149629") + std = float("1.02783") + data = None + + +class Program_weight_tensor_data_1: + name = "data_1" + shape = [] + dtype = "int32" + data = [3] + + +class Program_weight_tensor_data_2: + name = "data_2" + shape = [] + dtype = "int64" + data = [192] diff --git a/paddle_samples/PaddleX/TimesNet/subgraph_0/model.py b/paddle_samples/PaddleX/TimesNet/subgraph_0/model.py new file mode 100644 index 000000000..9022b6b96 --- /dev/null +++ b/paddle_samples/PaddleX/TimesNet/subgraph_0/model.py @@ -0,0 +1,331 @@ +import paddle + + +class GraphModule(paddle.nn.Layer): + def __init__(self): + super().__init__() + + def forward( + self, + parameter_0, + parameter_1, + parameter_2, + parameter_3, + parameter_4, + parameter_5, + parameter_6, + parameter_7, + parameter_8, + parameter_9, + parameter_10, + parameter_11, + parameter_12, + parameter_13, + parameter_14, + parameter_15, + parameter_16, + parameter_17, + parameter_18, + parameter_19, + parameter_20, + parameter_21, + parameter_22, + parameter_23, + data_0, + data_1, + data_2, + ): + # pd_op.cast: (xi64) <- (xi32) + cast_0 = paddle._C_ops.cast(data_1, paddle.int64) + del data_1 + + # pd_op.floor_divide: (xi64) <- (xi64, xi64) + floor_divide_0 = paddle._C_ops.floor_divide(data_2, cast_0) + del data_2 + + # pd_op.full: (xi64) <- () + full_0 = paddle._C_ops.full( + [], float("16"), paddle.int64, paddle.core.CPUPlace() + ) + + # pd_op.full: (xi64) <- () + full_1 = paddle._C_ops.full( + [], float("32"), paddle.int64, paddle.core.CPUPlace() + ) + + # builtin.combine: ([xi64, xi64, xi64, xi64]) <- (xi64, xi64, xi64, xi64) + combine_0 = [full_0, floor_divide_0, cast_0, full_1] + del cast_0, floor_divide_0, full_0, full_1 + + # pd_op.stack: (4xi64) <- ([xi64, xi64, xi64, xi64]) + stack_0 = paddle._C_ops.stack(combine_0, 0) + del combine_0 + + # pd_op.reshape: (16x-1x-1x32xf32) <- (16x192x32xf32, 4xi64) + reshape_0 = paddle._C_ops.reshape(data_0, stack_0) + del data_0, stack_0 + + # pd_op.transpose: (16x32x-1x-1xf32) <- (16x-1x-1x32xf32) + transpose_0 = paddle._C_ops.transpose(reshape_0, [0, 3, 1, 2]) + del reshape_0 + + # pd_op.conv2d: (16x32x-1x-1xf32) <- (16x32x-1x-1xf32, 32x32x1x1xf32) + conv2d_0 = paddle._C_ops.conv2d( + transpose_0, parameter_23, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_23 + + # pd_op.full_int_array: (4xi64) <- () + full_int_array_0 = [1, -1, 1, 1] + + # pd_op.reshape: (1x32x1x1xf32) <- (32xf32, 4xi64) + reshape_1 = paddle._C_ops.reshape(parameter_22, full_int_array_0) + del parameter_22 + + # pd_op.add: (16x32x-1x-1xf32) <- (16x32x-1x-1xf32, 1x32x1x1xf32) + add_0 = paddle._C_ops.add(conv2d_0, reshape_1) + + # pd_op.conv2d: (16x32x-1x-1xf32) <- (16x32x-1x-1xf32, 32x32x3x3xf32) + conv2d_1 = paddle._C_ops.conv2d( + transpose_0, parameter_21, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_21 + + # pd_op.reshape: (1x32x1x1xf32) <- (32xf32, 4xi64) + reshape_2 = paddle._C_ops.reshape(parameter_20, full_int_array_0) + del parameter_20 + + # pd_op.add: (16x32x-1x-1xf32) <- (16x32x-1x-1xf32, 1x32x1x1xf32) + add_1 = paddle._C_ops.add(conv2d_1, reshape_2) + + # pd_op.conv2d: (16x32x-1x-1xf32) <- (16x32x-1x-1xf32, 32x32x5x5xf32) + conv2d_2 = paddle._C_ops.conv2d( + transpose_0, parameter_19, [1, 1], [2, 2], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_19 + + # pd_op.reshape: (1x32x1x1xf32) <- (32xf32, 4xi64) + reshape_3 = paddle._C_ops.reshape(parameter_18, full_int_array_0) + del parameter_18 + + # pd_op.add: (16x32x-1x-1xf32) <- (16x32x-1x-1xf32, 1x32x1x1xf32) + add_2 = paddle._C_ops.add(conv2d_2, reshape_3) + + # pd_op.conv2d: (16x32x-1x-1xf32) <- (16x32x-1x-1xf32, 32x32x7x7xf32) + conv2d_3 = paddle._C_ops.conv2d( + transpose_0, parameter_17, [1, 1], [3, 3], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_17 + + # pd_op.reshape: (1x32x1x1xf32) <- (32xf32, 4xi64) + reshape_4 = paddle._C_ops.reshape(parameter_16, full_int_array_0) + del parameter_16 + + # pd_op.add: (16x32x-1x-1xf32) <- (16x32x-1x-1xf32, 1x32x1x1xf32) + add_3 = paddle._C_ops.add(conv2d_3, reshape_4) + + # pd_op.conv2d: (16x32x-1x-1xf32) <- (16x32x-1x-1xf32, 32x32x9x9xf32) + conv2d_4 = paddle._C_ops.conv2d( + transpose_0, parameter_15, [1, 1], [4, 4], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_15 + + # pd_op.reshape: (1x32x1x1xf32) <- (32xf32, 4xi64) + reshape_5 = paddle._C_ops.reshape(parameter_14, full_int_array_0) + del parameter_14 + + # pd_op.add: (16x32x-1x-1xf32) <- (16x32x-1x-1xf32, 1x32x1x1xf32) + add_4 = paddle._C_ops.add(conv2d_4, reshape_5) + + # pd_op.conv2d: (16x32x-1x-1xf32) <- (16x32x-1x-1xf32, 32x32x11x11xf32) + conv2d_5 = paddle._C_ops.conv2d( + transpose_0, parameter_13, [1, 1], [5, 5], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_13 + + # pd_op.reshape: (1x32x1x1xf32) <- (32xf32, 4xi64) + reshape_6 = paddle._C_ops.reshape(parameter_12, full_int_array_0) + del parameter_12 + + # pd_op.add: (16x32x-1x-1xf32) <- (16x32x-1x-1xf32, 1x32x1x1xf32) + add_5 = paddle._C_ops.add(conv2d_5, reshape_6) + + # builtin.combine: ([16x32x-1x-1xf32, 16x32x-1x-1xf32, 16x32x-1x-1xf32, 16x32x-1x-1xf32, 16x32x-1x-1xf32, 16x32x-1x-1xf32]) <- (16x32x-1x-1xf32, 16x32x-1x-1xf32, 16x32x-1x-1xf32, 16x32x-1x-1xf32, 16x32x-1x-1xf32, 16x32x-1x-1xf32) + combine_1 = [add_0, add_1, add_2, add_3, add_4, add_5] + + # pd_op.stack: (16x32x-1x-1x6xf32) <- ([16x32x-1x-1xf32, 16x32x-1x-1xf32, 16x32x-1x-1xf32, 16x32x-1x-1xf32, 16x32x-1x-1xf32, 16x32x-1x-1xf32]) + stack_1 = paddle._C_ops.stack(combine_1, -1) + del combine_1 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_1 = [-1] + + # pd_op.assign: (1xi64) <- (1xi64) + assign_0 = full_int_array_1 + + # pd_op.mean: (16x32x-1x-1xf32) <- (16x32x-1x-1x6xf32, 1xi64) + mean_0 = paddle._C_ops.mean(stack_1, full_int_array_1, False) + + # pd_op.gelu: (16x32x-1x-1xf32) <- (16x32x-1x-1xf32) + gelu_0 = paddle._C_ops.gelu(mean_0, False) + + # pd_op.conv2d: (16x32x-1x-1xf32) <- (16x32x-1x-1xf32, 32x32x1x1xf32) + conv2d_6 = paddle._C_ops.conv2d( + gelu_0, parameter_11, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_11 + + # pd_op.reshape: (1x32x1x1xf32) <- (32xf32, 4xi64) + reshape_7 = paddle._C_ops.reshape(parameter_10, full_int_array_0) + del parameter_10 + + # pd_op.add: (16x32x-1x-1xf32) <- (16x32x-1x-1xf32, 1x32x1x1xf32) + add_6 = paddle._C_ops.add(conv2d_6, reshape_7) + + # pd_op.conv2d: (16x32x-1x-1xf32) <- (16x32x-1x-1xf32, 32x32x3x3xf32) + conv2d_7 = paddle._C_ops.conv2d( + gelu_0, parameter_9, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_9 + + # pd_op.reshape: (1x32x1x1xf32) <- (32xf32, 4xi64) + reshape_8 = paddle._C_ops.reshape(parameter_8, full_int_array_0) + del parameter_8 + + # pd_op.add: (16x32x-1x-1xf32) <- (16x32x-1x-1xf32, 1x32x1x1xf32) + add_7 = paddle._C_ops.add(conv2d_7, reshape_8) + + # pd_op.conv2d: (16x32x-1x-1xf32) <- (16x32x-1x-1xf32, 32x32x5x5xf32) + conv2d_8 = paddle._C_ops.conv2d( + gelu_0, parameter_7, [1, 1], [2, 2], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_7 + + # pd_op.reshape: (1x32x1x1xf32) <- (32xf32, 4xi64) + reshape_9 = paddle._C_ops.reshape(parameter_6, full_int_array_0) + del parameter_6 + + # pd_op.add: (16x32x-1x-1xf32) <- (16x32x-1x-1xf32, 1x32x1x1xf32) + add_8 = paddle._C_ops.add(conv2d_8, reshape_9) + + # pd_op.conv2d: (16x32x-1x-1xf32) <- (16x32x-1x-1xf32, 32x32x7x7xf32) + conv2d_9 = paddle._C_ops.conv2d( + gelu_0, parameter_5, [1, 1], [3, 3], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_5 + + # pd_op.reshape: (1x32x1x1xf32) <- (32xf32, 4xi64) + reshape_10 = paddle._C_ops.reshape(parameter_4, full_int_array_0) + del parameter_4 + + # pd_op.add: (16x32x-1x-1xf32) <- (16x32x-1x-1xf32, 1x32x1x1xf32) + add_9 = paddle._C_ops.add(conv2d_9, reshape_10) + + # pd_op.conv2d: (16x32x-1x-1xf32) <- (16x32x-1x-1xf32, 32x32x9x9xf32) + conv2d_10 = paddle._C_ops.conv2d( + gelu_0, parameter_3, [1, 1], [4, 4], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_3 + + # pd_op.reshape: (1x32x1x1xf32) <- (32xf32, 4xi64) + reshape_11 = paddle._C_ops.reshape(parameter_2, full_int_array_0) + del parameter_2 + + # pd_op.add: (16x32x-1x-1xf32) <- (16x32x-1x-1xf32, 1x32x1x1xf32) + add_10 = paddle._C_ops.add(conv2d_10, reshape_11) + + # pd_op.conv2d: (16x32x-1x-1xf32) <- (16x32x-1x-1xf32, 32x32x11x11xf32) + conv2d_11 = paddle._C_ops.conv2d( + gelu_0, parameter_1, [1, 1], [5, 5], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_1 + + # pd_op.reshape: (1x32x1x1xf32) <- (32xf32, 4xi64) + reshape_12 = paddle._C_ops.reshape(parameter_0, full_int_array_0) + del full_int_array_0, parameter_0 + + # pd_op.add: (16x32x-1x-1xf32) <- (16x32x-1x-1xf32, 1x32x1x1xf32) + add_11 = paddle._C_ops.add(conv2d_11, reshape_12) + + # builtin.combine: ([16x32x-1x-1xf32, 16x32x-1x-1xf32, 16x32x-1x-1xf32, 16x32x-1x-1xf32, 16x32x-1x-1xf32, 16x32x-1x-1xf32]) <- (16x32x-1x-1xf32, 16x32x-1x-1xf32, 16x32x-1x-1xf32, 16x32x-1x-1xf32, 16x32x-1x-1xf32, 16x32x-1x-1xf32) + combine_2 = [add_6, add_7, add_8, add_9, add_10, add_11] + + # pd_op.stack: (16x32x-1x-1x6xf32) <- ([16x32x-1x-1xf32, 16x32x-1x-1xf32, 16x32x-1x-1xf32, 16x32x-1x-1xf32, 16x32x-1x-1xf32, 16x32x-1x-1xf32]) + stack_2 = paddle._C_ops.stack(combine_2, -1) + del combine_2 + + # pd_op.mean: (16x32x-1x-1xf32) <- (16x32x-1x-1x6xf32, 1xi64) + mean_1 = paddle._C_ops.mean(stack_2, full_int_array_1, False) + + # pd_op.transpose: (16x-1x-1x32xf32) <- (16x32x-1x-1xf32) + transpose_1 = paddle._C_ops.transpose(mean_1, [0, 2, 3, 1]) + del mean_1 + + # pd_op.full_int_array: (3xi64) <- () + full_int_array_2 = [16, -1, 32] + + # pd_op.reshape: (16x-1x32xf32) <- (16x-1x-1x32xf32, 3xi64) + reshape_13 = paddle._C_ops.reshape(transpose_1, full_int_array_2) + del full_int_array_2 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_3 = [0] + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_4 = [192] + + # pd_op.slice: (16x-1x32xf32) <- (16x-1x32xf32, 1xi64, 1xi64) + slice_0 = paddle._C_ops.slice( + reshape_13, [1], full_int_array_3, full_int_array_4, [1], [] + ) + del ( + add_0, + add_1, + add_10, + add_11, + add_2, + add_3, + add_4, + add_5, + add_6, + add_7, + add_8, + add_9, + assign_0, + conv2d_0, + conv2d_1, + conv2d_10, + conv2d_11, + conv2d_2, + conv2d_3, + conv2d_4, + conv2d_5, + conv2d_6, + conv2d_7, + conv2d_8, + conv2d_9, + full_int_array_1, + full_int_array_3, + full_int_array_4, + gelu_0, + mean_0, + reshape_1, + reshape_10, + reshape_11, + reshape_12, + reshape_13, + reshape_2, + reshape_3, + reshape_4, + reshape_5, + reshape_6, + reshape_7, + reshape_8, + reshape_9, + stack_1, + stack_2, + transpose_0, + transpose_1, + ) + + return slice_0 diff --git a/paddle_samples/PaddleX/TimesNet/subgraph_0/weight_meta.py b/paddle_samples/PaddleX/TimesNet/subgraph_0/weight_meta.py new file mode 100644 index 000000000..c8ef9ee5b --- /dev/null +++ b/paddle_samples/PaddleX/TimesNet/subgraph_0/weight_meta.py @@ -0,0 +1,238 @@ +class Program_weight_tensor_parameter_0: + name = "parameter_0" + shape = [32] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_1: + name = "parameter_1" + shape = [32, 32, 11, 11] + dtype = "float32" + min_val = float("-0.102238") + max_val = float("0.0927036") + mean = float("0.000113053") + std = float("0.0228297") + data = None + + +class Program_weight_tensor_parameter_2: + name = "parameter_2" + shape = [32] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_3: + name = "parameter_3" + shape = [32, 32, 9, 9] + dtype = "float32" + min_val = float("-0.112767") + max_val = float("0.119055") + mean = float("3.03685e-05") + std = float("0.0279281") + data = None + + +class Program_weight_tensor_parameter_4: + name = "parameter_4" + shape = [32] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_5: + name = "parameter_5" + shape = [32, 32, 7, 7] + dtype = "float32" + min_val = float("-0.14684") + max_val = float("0.167006") + mean = float("-0.00012447") + std = float("0.0356576") + data = None + + +class Program_weight_tensor_parameter_6: + name = "parameter_6" + shape = [32] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_7: + name = "parameter_7" + shape = [32, 32, 5, 5] + dtype = "float32" + min_val = float("-0.192442") + max_val = float("0.193324") + mean = float("0.000265612") + std = float("0.0496898") + data = None + + +class Program_weight_tensor_parameter_8: + name = "parameter_8" + shape = [32] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_9: + name = "parameter_9" + shape = [32, 32, 3, 3] + dtype = "float32" + min_val = float("-0.318764") + max_val = float("0.303228") + mean = float("0.00108387") + std = float("0.084175") + data = None + + +class Program_weight_tensor_parameter_10: + name = "parameter_10" + shape = [32] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_11: + name = "parameter_11" + shape = [32, 32, 1, 1] + dtype = "float32" + min_val = float("-0.784862") + max_val = float("0.770297") + mean = float("-0.00846063") + std = float("0.251293") + data = None + + +class Program_weight_tensor_parameter_12: + name = "parameter_12" + shape = [32] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_13: + name = "parameter_13" + shape = [32, 32, 11, 11] + dtype = "float32" + min_val = float("-0.10255") + max_val = float("0.106043") + mean = float("2.11022e-05") + std = float("0.0227488") + data = None + + +class Program_weight_tensor_parameter_14: + name = "parameter_14" + shape = [32] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_15: + name = "parameter_15" + shape = [32, 32, 9, 9] + dtype = "float32" + min_val = float("-0.107219") + max_val = float("0.119778") + mean = float("1.75684e-05") + std = float("0.0278287") + data = None + + +class Program_weight_tensor_parameter_16: + name = "parameter_16" + shape = [32] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_17: + name = "parameter_17" + shape = [32, 32, 7, 7] + dtype = "float32" + min_val = float("-0.146602") + max_val = float("0.158066") + mean = float("0.000117266") + std = float("0.0357741") + data = None + + +class Program_weight_tensor_parameter_18: + name = "parameter_18" + shape = [32] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_19: + name = "parameter_19" + shape = [32, 32, 5, 5] + dtype = "float32" + min_val = float("-0.187517") + max_val = float("0.200635") + mean = float("0.000273748") + std = float("0.0499875") + data = None + + +class Program_weight_tensor_parameter_20: + name = "parameter_20" + shape = [32] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_21: + name = "parameter_21" + shape = [32, 32, 3, 3] + dtype = "float32" + min_val = float("-0.330627") + max_val = float("0.338776") + mean = float("-0.0014634") + std = float("0.0833068") + data = None + + +class Program_weight_tensor_parameter_22: + name = "parameter_22" + shape = [32] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_23: + name = "parameter_23" + shape = [32, 32, 1, 1] + dtype = "float32" + min_val = float("-0.866138") + max_val = float("0.720528") + mean = float("-0.00521132") + std = float("0.248346") + data = None diff --git a/paddle_samples/PaddleX/TimesNet/subgraph_1/graph_hash.txt b/paddle_samples/PaddleX/TimesNet/subgraph_1/graph_hash.txt new file mode 100644 index 000000000..c00e7783f --- /dev/null +++ b/paddle_samples/PaddleX/TimesNet/subgraph_1/graph_hash.txt @@ -0,0 +1 @@ +407b15c03c74c9b8977ce4970deb83767907c2c418ebb97a894bac7d97061ec5 \ No newline at end of file diff --git a/paddle_samples/PaddleX/TimesNet/subgraph_1/graph_net.json b/paddle_samples/PaddleX/TimesNet/subgraph_1/graph_net.json new file mode 100644 index 000000000..c5c3dc20a --- /dev/null +++ b/paddle_samples/PaddleX/TimesNet/subgraph_1/graph_net.json @@ -0,0 +1,6 @@ +{ + "framework": "paddle", + "model_name": "TimesNet", + "num_devices_required": 1, + "num_nodes_required": 1 +} \ No newline at end of file diff --git a/paddle_samples/PaddleX/TimesNet/subgraph_1/input_meta.py b/paddle_samples/PaddleX/TimesNet/subgraph_1/input_meta.py new file mode 100644 index 000000000..08d370d8d --- /dev/null +++ b/paddle_samples/PaddleX/TimesNet/subgraph_1/input_meta.py @@ -0,0 +1,23 @@ +class Program_weight_tensor_data_0: + name = "data_0" + shape = [] + dtype = "int32" + data = [38] + + +class Program_weight_tensor_data_1: + name = "data_1" + shape = [16, 192, 32] + dtype = "float32" + min_val = float("-4.19262") + max_val = float("4.39353") + mean = float("3.92669e-05") + std = float("1.00007") + data = None + + +class Program_weight_tensor_data_2: + name = "data_2" + shape = [] + dtype = "int64" + data = [192] diff --git a/paddle_samples/PaddleX/TimesNet/subgraph_1/model.py b/paddle_samples/PaddleX/TimesNet/subgraph_1/model.py new file mode 100644 index 000000000..66004e829 --- /dev/null +++ b/paddle_samples/PaddleX/TimesNet/subgraph_1/model.py @@ -0,0 +1,394 @@ +import paddle + + +class GraphModule(paddle.nn.Layer): + def __init__(self): + super().__init__() + + def forward( + self, + parameter_0, + parameter_1, + parameter_2, + parameter_3, + parameter_4, + parameter_5, + parameter_6, + parameter_7, + parameter_8, + parameter_9, + parameter_10, + parameter_11, + parameter_12, + parameter_13, + parameter_14, + parameter_15, + parameter_16, + parameter_17, + parameter_18, + parameter_19, + parameter_20, + parameter_21, + parameter_22, + parameter_23, + data_0, + data_1, + data_2, + ): + # pd_op.cast: (xi64) <- (xi32) + cast_0 = paddle._C_ops.cast(data_0, paddle.int64) + del data_0 + + # pd_op.floor_divide: (xi64) <- (xi64, xi64) + floor_divide_0 = paddle._C_ops.floor_divide(data_2, cast_0) + + # pd_op.full: (1xf32) <- () + full_0 = paddle._C_ops.full( + [1], float("1"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.scale: (xi64) <- (xi64, 1xf32) + scale_0 = paddle._C_ops.scale(floor_divide_0, full_0, float("1"), True) + del floor_divide_0, full_0 + + # pd_op.multiply: (xi64) <- (xi64, xi64) + multiply_0 = paddle._C_ops.multiply(scale_0, cast_0) + del scale_0 + + # pd_op.subtract: (xi64) <- (xi64, xi64) + subtract_0 = paddle._C_ops.subtract(multiply_0, data_2) + del data_2 + + # pd_op.full: (xi64) <- () + full_1 = paddle._C_ops.full( + [], float("16"), paddle.int64, paddle.core.CPUPlace() + ) + + # pd_op.full: (xi64) <- () + full_2 = paddle._C_ops.full( + [], float("32"), paddle.int64, paddle.core.CPUPlace() + ) + + # builtin.combine: ([xi64, xi64, xi64]) <- (xi64, xi64, xi64) + combine_0 = [full_1, subtract_0, full_2] + del subtract_0 + + # pd_op.stack: (3xi64) <- ([xi64, xi64, xi64]) + stack_0 = paddle._C_ops.stack(combine_0, 0) + del combine_0 + + # pd_op.full: (1xf32) <- () + full_3 = paddle._C_ops.full( + [1], float("0"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.full_with_tensor: (16x-1x32xf32) <- (1xf32, 3xi64) + full_with_tensor_0 = paddle._C_ops.full_with_tensor( + full_3, stack_0, paddle.float32 + ) + del full_3, stack_0 + + # pd_op.cast: (16x192x32xf32) <- (16x192x32xf32) + cast_1 = paddle._C_ops.cast(data_1, paddle.float32) + del data_1 + + # pd_op.cast: (16x-1x32xf32) <- (16x-1x32xf32) + cast_2 = paddle._C_ops.cast(full_with_tensor_0, paddle.float32) + + # pd_op.full: (1xi32) <- () + full_4 = paddle._C_ops.full( + [1], float("1"), paddle.int32, paddle.core.CPUPlace() + ) + + # builtin.combine: ([16x192x32xf32, 16x-1x32xf32]) <- (16x192x32xf32, 16x-1x32xf32) + combine_1 = [cast_1, cast_2] + + # pd_op.concat: (16x-1x32xf32) <- ([16x192x32xf32, 16x-1x32xf32], 1xi32) + concat_0 = paddle._C_ops.concat(combine_1, full_4) + del combine_1 + + # pd_op.floor_divide: (xi64) <- (xi64, xi64) + floor_divide_1 = paddle._C_ops.floor_divide(multiply_0, cast_0) + + # builtin.combine: ([xi64, xi64, xi64, xi64]) <- (xi64, xi64, xi64, xi64) + combine_2 = [full_1, floor_divide_1, cast_0, full_2] + del cast_0, floor_divide_1, full_1, full_2 + + # pd_op.stack: (4xi64) <- ([xi64, xi64, xi64, xi64]) + stack_1 = paddle._C_ops.stack(combine_2, 0) + del combine_2 + + # pd_op.reshape: (16x-1x-1x32xf32) <- (16x-1x32xf32, 4xi64) + reshape_0 = paddle._C_ops.reshape(concat_0, stack_1) + del stack_1 + + # pd_op.transpose: (16x32x-1x-1xf32) <- (16x-1x-1x32xf32) + transpose_0 = paddle._C_ops.transpose(reshape_0, [0, 3, 1, 2]) + del reshape_0 + + # pd_op.conv2d: (16x32x-1x-1xf32) <- (16x32x-1x-1xf32, 32x32x1x1xf32) + conv2d_0 = paddle._C_ops.conv2d( + transpose_0, parameter_23, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_23 + + # pd_op.full_int_array: (4xi64) <- () + full_int_array_0 = [1, -1, 1, 1] + + # pd_op.reshape: (1x32x1x1xf32) <- (32xf32, 4xi64) + reshape_1 = paddle._C_ops.reshape(parameter_22, full_int_array_0) + del parameter_22 + + # pd_op.add: (16x32x-1x-1xf32) <- (16x32x-1x-1xf32, 1x32x1x1xf32) + add_0 = paddle._C_ops.add(conv2d_0, reshape_1) + + # pd_op.conv2d: (16x32x-1x-1xf32) <- (16x32x-1x-1xf32, 32x32x3x3xf32) + conv2d_1 = paddle._C_ops.conv2d( + transpose_0, parameter_21, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_21 + + # pd_op.reshape: (1x32x1x1xf32) <- (32xf32, 4xi64) + reshape_2 = paddle._C_ops.reshape(parameter_20, full_int_array_0) + del parameter_20 + + # pd_op.add: (16x32x-1x-1xf32) <- (16x32x-1x-1xf32, 1x32x1x1xf32) + add_1 = paddle._C_ops.add(conv2d_1, reshape_2) + + # pd_op.conv2d: (16x32x-1x-1xf32) <- (16x32x-1x-1xf32, 32x32x5x5xf32) + conv2d_2 = paddle._C_ops.conv2d( + transpose_0, parameter_19, [1, 1], [2, 2], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_19 + + # pd_op.reshape: (1x32x1x1xf32) <- (32xf32, 4xi64) + reshape_3 = paddle._C_ops.reshape(parameter_18, full_int_array_0) + del parameter_18 + + # pd_op.add: (16x32x-1x-1xf32) <- (16x32x-1x-1xf32, 1x32x1x1xf32) + add_2 = paddle._C_ops.add(conv2d_2, reshape_3) + + # pd_op.conv2d: (16x32x-1x-1xf32) <- (16x32x-1x-1xf32, 32x32x7x7xf32) + conv2d_3 = paddle._C_ops.conv2d( + transpose_0, parameter_17, [1, 1], [3, 3], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_17 + + # pd_op.reshape: (1x32x1x1xf32) <- (32xf32, 4xi64) + reshape_4 = paddle._C_ops.reshape(parameter_16, full_int_array_0) + del parameter_16 + + # pd_op.add: (16x32x-1x-1xf32) <- (16x32x-1x-1xf32, 1x32x1x1xf32) + add_3 = paddle._C_ops.add(conv2d_3, reshape_4) + + # pd_op.conv2d: (16x32x-1x-1xf32) <- (16x32x-1x-1xf32, 32x32x9x9xf32) + conv2d_4 = paddle._C_ops.conv2d( + transpose_0, parameter_15, [1, 1], [4, 4], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_15 + + # pd_op.reshape: (1x32x1x1xf32) <- (32xf32, 4xi64) + reshape_5 = paddle._C_ops.reshape(parameter_14, full_int_array_0) + del parameter_14 + + # pd_op.add: (16x32x-1x-1xf32) <- (16x32x-1x-1xf32, 1x32x1x1xf32) + add_4 = paddle._C_ops.add(conv2d_4, reshape_5) + + # pd_op.conv2d: (16x32x-1x-1xf32) <- (16x32x-1x-1xf32, 32x32x11x11xf32) + conv2d_5 = paddle._C_ops.conv2d( + transpose_0, parameter_13, [1, 1], [5, 5], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_13 + + # pd_op.reshape: (1x32x1x1xf32) <- (32xf32, 4xi64) + reshape_6 = paddle._C_ops.reshape(parameter_12, full_int_array_0) + del parameter_12 + + # pd_op.add: (16x32x-1x-1xf32) <- (16x32x-1x-1xf32, 1x32x1x1xf32) + add_5 = paddle._C_ops.add(conv2d_5, reshape_6) + + # builtin.combine: ([16x32x-1x-1xf32, 16x32x-1x-1xf32, 16x32x-1x-1xf32, 16x32x-1x-1xf32, 16x32x-1x-1xf32, 16x32x-1x-1xf32]) <- (16x32x-1x-1xf32, 16x32x-1x-1xf32, 16x32x-1x-1xf32, 16x32x-1x-1xf32, 16x32x-1x-1xf32, 16x32x-1x-1xf32) + combine_3 = [add_0, add_1, add_2, add_3, add_4, add_5] + + # pd_op.stack: (16x32x-1x-1x6xf32) <- ([16x32x-1x-1xf32, 16x32x-1x-1xf32, 16x32x-1x-1xf32, 16x32x-1x-1xf32, 16x32x-1x-1xf32, 16x32x-1x-1xf32]) + stack_2 = paddle._C_ops.stack(combine_3, -1) + del combine_3 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_1 = [-1] + + # pd_op.assign: (1xi64) <- (1xi64) + assign_0 = full_int_array_1 + + # pd_op.mean: (16x32x-1x-1xf32) <- (16x32x-1x-1x6xf32, 1xi64) + mean_0 = paddle._C_ops.mean(stack_2, full_int_array_1, False) + + # pd_op.gelu: (16x32x-1x-1xf32) <- (16x32x-1x-1xf32) + gelu_0 = paddle._C_ops.gelu(mean_0, False) + + # pd_op.conv2d: (16x32x-1x-1xf32) <- (16x32x-1x-1xf32, 32x32x1x1xf32) + conv2d_6 = paddle._C_ops.conv2d( + gelu_0, parameter_11, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_11 + + # pd_op.reshape: (1x32x1x1xf32) <- (32xf32, 4xi64) + reshape_7 = paddle._C_ops.reshape(parameter_10, full_int_array_0) + del parameter_10 + + # pd_op.add: (16x32x-1x-1xf32) <- (16x32x-1x-1xf32, 1x32x1x1xf32) + add_6 = paddle._C_ops.add(conv2d_6, reshape_7) + + # pd_op.conv2d: (16x32x-1x-1xf32) <- (16x32x-1x-1xf32, 32x32x3x3xf32) + conv2d_7 = paddle._C_ops.conv2d( + gelu_0, parameter_9, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_9 + + # pd_op.reshape: (1x32x1x1xf32) <- (32xf32, 4xi64) + reshape_8 = paddle._C_ops.reshape(parameter_8, full_int_array_0) + del parameter_8 + + # pd_op.add: (16x32x-1x-1xf32) <- (16x32x-1x-1xf32, 1x32x1x1xf32) + add_7 = paddle._C_ops.add(conv2d_7, reshape_8) + + # pd_op.conv2d: (16x32x-1x-1xf32) <- (16x32x-1x-1xf32, 32x32x5x5xf32) + conv2d_8 = paddle._C_ops.conv2d( + gelu_0, parameter_7, [1, 1], [2, 2], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_7 + + # pd_op.reshape: (1x32x1x1xf32) <- (32xf32, 4xi64) + reshape_9 = paddle._C_ops.reshape(parameter_6, full_int_array_0) + del parameter_6 + + # pd_op.add: (16x32x-1x-1xf32) <- (16x32x-1x-1xf32, 1x32x1x1xf32) + add_8 = paddle._C_ops.add(conv2d_8, reshape_9) + + # pd_op.conv2d: (16x32x-1x-1xf32) <- (16x32x-1x-1xf32, 32x32x7x7xf32) + conv2d_9 = paddle._C_ops.conv2d( + gelu_0, parameter_5, [1, 1], [3, 3], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_5 + + # pd_op.reshape: (1x32x1x1xf32) <- (32xf32, 4xi64) + reshape_10 = paddle._C_ops.reshape(parameter_4, full_int_array_0) + del parameter_4 + + # pd_op.add: (16x32x-1x-1xf32) <- (16x32x-1x-1xf32, 1x32x1x1xf32) + add_9 = paddle._C_ops.add(conv2d_9, reshape_10) + + # pd_op.conv2d: (16x32x-1x-1xf32) <- (16x32x-1x-1xf32, 32x32x9x9xf32) + conv2d_10 = paddle._C_ops.conv2d( + gelu_0, parameter_3, [1, 1], [4, 4], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_3 + + # pd_op.reshape: (1x32x1x1xf32) <- (32xf32, 4xi64) + reshape_11 = paddle._C_ops.reshape(parameter_2, full_int_array_0) + del parameter_2 + + # pd_op.add: (16x32x-1x-1xf32) <- (16x32x-1x-1xf32, 1x32x1x1xf32) + add_10 = paddle._C_ops.add(conv2d_10, reshape_11) + + # pd_op.conv2d: (16x32x-1x-1xf32) <- (16x32x-1x-1xf32, 32x32x11x11xf32) + conv2d_11 = paddle._C_ops.conv2d( + gelu_0, parameter_1, [1, 1], [5, 5], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_1 + + # pd_op.reshape: (1x32x1x1xf32) <- (32xf32, 4xi64) + reshape_12 = paddle._C_ops.reshape(parameter_0, full_int_array_0) + del full_int_array_0, parameter_0 + + # pd_op.add: (16x32x-1x-1xf32) <- (16x32x-1x-1xf32, 1x32x1x1xf32) + add_11 = paddle._C_ops.add(conv2d_11, reshape_12) + + # builtin.combine: ([16x32x-1x-1xf32, 16x32x-1x-1xf32, 16x32x-1x-1xf32, 16x32x-1x-1xf32, 16x32x-1x-1xf32, 16x32x-1x-1xf32]) <- (16x32x-1x-1xf32, 16x32x-1x-1xf32, 16x32x-1x-1xf32, 16x32x-1x-1xf32, 16x32x-1x-1xf32, 16x32x-1x-1xf32) + combine_4 = [add_6, add_7, add_8, add_9, add_10, add_11] + + # pd_op.stack: (16x32x-1x-1x6xf32) <- ([16x32x-1x-1xf32, 16x32x-1x-1xf32, 16x32x-1x-1xf32, 16x32x-1x-1xf32, 16x32x-1x-1xf32, 16x32x-1x-1xf32]) + stack_3 = paddle._C_ops.stack(combine_4, -1) + del combine_4 + + # pd_op.mean: (16x32x-1x-1xf32) <- (16x32x-1x-1x6xf32, 1xi64) + mean_1 = paddle._C_ops.mean(stack_3, full_int_array_1, False) + + # pd_op.transpose: (16x-1x-1x32xf32) <- (16x32x-1x-1xf32) + transpose_1 = paddle._C_ops.transpose(mean_1, [0, 2, 3, 1]) + del mean_1 + + # pd_op.full_int_array: (3xi64) <- () + full_int_array_2 = [16, -1, 32] + + # pd_op.reshape: (16x-1x32xf32) <- (16x-1x-1x32xf32, 3xi64) + reshape_13 = paddle._C_ops.reshape(transpose_1, full_int_array_2) + del full_int_array_2 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_3 = [0] + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_4 = [192] + + # pd_op.slice: (16x-1x32xf32) <- (16x-1x32xf32, 1xi64, 1xi64) + slice_0 = paddle._C_ops.slice( + reshape_13, [1], full_int_array_3, full_int_array_4, [1], [] + ) + del ( + add_0, + add_1, + add_10, + add_11, + add_2, + add_3, + add_4, + add_5, + add_6, + add_7, + add_8, + add_9, + assign_0, + cast_1, + cast_2, + concat_0, + conv2d_0, + conv2d_1, + conv2d_10, + conv2d_11, + conv2d_2, + conv2d_3, + conv2d_4, + conv2d_5, + conv2d_6, + conv2d_7, + conv2d_8, + conv2d_9, + full_4, + full_int_array_1, + full_int_array_3, + full_int_array_4, + full_with_tensor_0, + gelu_0, + mean_0, + multiply_0, + reshape_1, + reshape_10, + reshape_11, + reshape_12, + reshape_13, + reshape_2, + reshape_3, + reshape_4, + reshape_5, + reshape_6, + reshape_7, + reshape_8, + reshape_9, + stack_2, + stack_3, + transpose_0, + transpose_1, + ) + + return slice_0 diff --git a/paddle_samples/PaddleX/TimesNet/subgraph_1/weight_meta.py b/paddle_samples/PaddleX/TimesNet/subgraph_1/weight_meta.py new file mode 100644 index 000000000..20bb61da9 --- /dev/null +++ b/paddle_samples/PaddleX/TimesNet/subgraph_1/weight_meta.py @@ -0,0 +1,238 @@ +class Program_weight_tensor_parameter_0: + name = "parameter_0" + shape = [32] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_1: + name = "parameter_1" + shape = [32, 32, 11, 11] + dtype = "float32" + min_val = float("-0.09948") + max_val = float("0.108094") + mean = float("2.14745e-05") + std = float("0.0227671") + data = None + + +class Program_weight_tensor_parameter_2: + name = "parameter_2" + shape = [32] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_3: + name = "parameter_3" + shape = [32, 32, 9, 9] + dtype = "float32" + min_val = float("-0.136599") + max_val = float("0.117266") + mean = float("-0.000153645") + std = float("0.0278266") + data = None + + +class Program_weight_tensor_parameter_4: + name = "parameter_4" + shape = [32] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_5: + name = "parameter_5" + shape = [32, 32, 7, 7] + dtype = "float32" + min_val = float("-0.162164") + max_val = float("0.152977") + mean = float("7.89058e-06") + std = float("0.0357809") + data = None + + +class Program_weight_tensor_parameter_6: + name = "parameter_6" + shape = [32] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_7: + name = "parameter_7" + shape = [32, 32, 5, 5] + dtype = "float32" + min_val = float("-0.190406") + max_val = float("0.204609") + mean = float("1.3685e-05") + std = float("0.050339") + data = None + + +class Program_weight_tensor_parameter_8: + name = "parameter_8" + shape = [32] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_9: + name = "parameter_9" + shape = [32, 32, 3, 3] + dtype = "float32" + min_val = float("-0.362011") + max_val = float("0.359679") + mean = float("-0.000370573") + std = float("0.083461") + data = None + + +class Program_weight_tensor_parameter_10: + name = "parameter_10" + shape = [32] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_11: + name = "parameter_11" + shape = [32, 32, 1, 1] + dtype = "float32" + min_val = float("-0.828661") + max_val = float("0.758363") + mean = float("0.00340537") + std = float("0.251425") + data = None + + +class Program_weight_tensor_parameter_12: + name = "parameter_12" + shape = [32] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_13: + name = "parameter_13" + shape = [32, 32, 11, 11] + dtype = "float32" + min_val = float("-0.101958") + max_val = float("0.108789") + mean = float("5.72181e-08") + std = float("0.0226885") + data = None + + +class Program_weight_tensor_parameter_14: + name = "parameter_14" + shape = [32] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_15: + name = "parameter_15" + shape = [32, 32, 9, 9] + dtype = "float32" + min_val = float("-0.126104") + max_val = float("0.115011") + mean = float("-8.96263e-05") + std = float("0.027827") + data = None + + +class Program_weight_tensor_parameter_16: + name = "parameter_16" + shape = [32] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_17: + name = "parameter_17" + shape = [32, 32, 7, 7] + dtype = "float32" + min_val = float("-0.158255") + max_val = float("0.142761") + mean = float("-0.000107769") + std = float("0.0357698") + data = None + + +class Program_weight_tensor_parameter_18: + name = "parameter_18" + shape = [32] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_19: + name = "parameter_19" + shape = [32, 32, 5, 5] + dtype = "float32" + min_val = float("-0.195684") + max_val = float("0.199793") + mean = float("0.000253579") + std = float("0.0502081") + data = None + + +class Program_weight_tensor_parameter_20: + name = "parameter_20" + shape = [32] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_21: + name = "parameter_21" + shape = [32, 32, 3, 3] + dtype = "float32" + min_val = float("-0.345247") + max_val = float("0.291583") + mean = float("0.000151203") + std = float("0.0836349") + data = None + + +class Program_weight_tensor_parameter_22: + name = "parameter_22" + shape = [32] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_23: + name = "parameter_23" + shape = [32, 32, 1, 1] + dtype = "float32" + min_val = float("-0.793187") + max_val = float("0.722647") + mean = float("-0.00534969") + std = float("0.250514") + data = None diff --git a/paddle_samples/PaddleX/TimesNet/subgraph_10/graph_hash.txt b/paddle_samples/PaddleX/TimesNet/subgraph_10/graph_hash.txt new file mode 100644 index 000000000..59b6a77fb --- /dev/null +++ b/paddle_samples/PaddleX/TimesNet/subgraph_10/graph_hash.txt @@ -0,0 +1 @@ +0f8a6b7104d3aca48b5037c599fd68df9cebbccbe6d26742ef5ec762870148e3 \ No newline at end of file diff --git a/paddle_samples/PaddleX/TimesNet/subgraph_10/graph_net.json b/paddle_samples/PaddleX/TimesNet/subgraph_10/graph_net.json new file mode 100644 index 000000000..c5c3dc20a --- /dev/null +++ b/paddle_samples/PaddleX/TimesNet/subgraph_10/graph_net.json @@ -0,0 +1,6 @@ +{ + "framework": "paddle", + "model_name": "TimesNet", + "num_devices_required": 1, + "num_nodes_required": 1 +} \ No newline at end of file diff --git a/paddle_samples/PaddleX/TimesNet/subgraph_10/input_meta.py b/paddle_samples/PaddleX/TimesNet/subgraph_10/input_meta.py new file mode 100644 index 000000000..0e1a97ce4 --- /dev/null +++ b/paddle_samples/PaddleX/TimesNet/subgraph_10/input_meta.py @@ -0,0 +1,9 @@ +class Program_weight_tensor_data_0: + name = "data_0" + shape = [16, 32, 39, 5] + dtype = "float32" + min_val = float("-3.68611") + max_val = float("3.82573") + mean = float("0.000837713") + std = float("0.992063") + data = None diff --git a/paddle_samples/PaddleX/TimesNet/subgraph_10/model.py b/paddle_samples/PaddleX/TimesNet/subgraph_10/model.py new file mode 100644 index 000000000..31d1be47e --- /dev/null +++ b/paddle_samples/PaddleX/TimesNet/subgraph_10/model.py @@ -0,0 +1,238 @@ +import paddle + + +class GraphModule(paddle.nn.Layer): + def __init__(self): + super().__init__() + + def forward( + self, + parameter_0, + parameter_1, + parameter_2, + parameter_3, + parameter_4, + parameter_5, + parameter_6, + parameter_7, + parameter_8, + parameter_9, + parameter_10, + parameter_11, + parameter_12, + parameter_13, + parameter_14, + parameter_15, + parameter_16, + parameter_17, + parameter_18, + parameter_19, + parameter_20, + parameter_21, + parameter_22, + parameter_23, + data_0, + ): + # pd_op.conv2d: (16x32x-1x-1xf32) <- (16x32x-1x-1xf32, 32x32x1x1xf32) + conv2d_0 = paddle._C_ops.conv2d( + data_0, parameter_23, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_23 + + # pd_op.full_int_array: (4xi64) <- () + full_int_array_0 = [1, -1, 1, 1] + + # pd_op.reshape: (1x32x1x1xf32) <- (32xf32, 4xi64) + reshape_0 = paddle._C_ops.reshape(parameter_22, full_int_array_0) + del parameter_22 + + # pd_op.add: (16x32x-1x-1xf32) <- (16x32x-1x-1xf32, 1x32x1x1xf32) + add_0 = paddle._C_ops.add(conv2d_0, reshape_0) + del conv2d_0, reshape_0 + + # pd_op.conv2d: (16x32x-1x-1xf32) <- (16x32x-1x-1xf32, 32x32x3x3xf32) + conv2d_1 = paddle._C_ops.conv2d( + data_0, parameter_21, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_21 + + # pd_op.reshape: (1x32x1x1xf32) <- (32xf32, 4xi64) + reshape_1 = paddle._C_ops.reshape(parameter_20, full_int_array_0) + del parameter_20 + + # pd_op.add: (16x32x-1x-1xf32) <- (16x32x-1x-1xf32, 1x32x1x1xf32) + add_1 = paddle._C_ops.add(conv2d_1, reshape_1) + del conv2d_1, reshape_1 + + # pd_op.conv2d: (16x32x-1x-1xf32) <- (16x32x-1x-1xf32, 32x32x5x5xf32) + conv2d_2 = paddle._C_ops.conv2d( + data_0, parameter_19, [1, 1], [2, 2], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_19 + + # pd_op.reshape: (1x32x1x1xf32) <- (32xf32, 4xi64) + reshape_2 = paddle._C_ops.reshape(parameter_18, full_int_array_0) + del parameter_18 + + # pd_op.add: (16x32x-1x-1xf32) <- (16x32x-1x-1xf32, 1x32x1x1xf32) + add_2 = paddle._C_ops.add(conv2d_2, reshape_2) + del conv2d_2, reshape_2 + + # pd_op.conv2d: (16x32x-1x-1xf32) <- (16x32x-1x-1xf32, 32x32x7x7xf32) + conv2d_3 = paddle._C_ops.conv2d( + data_0, parameter_17, [1, 1], [3, 3], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_17 + + # pd_op.reshape: (1x32x1x1xf32) <- (32xf32, 4xi64) + reshape_3 = paddle._C_ops.reshape(parameter_16, full_int_array_0) + del parameter_16 + + # pd_op.add: (16x32x-1x-1xf32) <- (16x32x-1x-1xf32, 1x32x1x1xf32) + add_3 = paddle._C_ops.add(conv2d_3, reshape_3) + del conv2d_3, reshape_3 + + # pd_op.conv2d: (16x32x-1x-1xf32) <- (16x32x-1x-1xf32, 32x32x9x9xf32) + conv2d_4 = paddle._C_ops.conv2d( + data_0, parameter_15, [1, 1], [4, 4], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_15 + + # pd_op.reshape: (1x32x1x1xf32) <- (32xf32, 4xi64) + reshape_4 = paddle._C_ops.reshape(parameter_14, full_int_array_0) + del parameter_14 + + # pd_op.add: (16x32x-1x-1xf32) <- (16x32x-1x-1xf32, 1x32x1x1xf32) + add_4 = paddle._C_ops.add(conv2d_4, reshape_4) + del conv2d_4, reshape_4 + + # pd_op.conv2d: (16x32x-1x-1xf32) <- (16x32x-1x-1xf32, 32x32x11x11xf32) + conv2d_5 = paddle._C_ops.conv2d( + data_0, parameter_13, [1, 1], [5, 5], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del data_0, parameter_13 + + # pd_op.reshape: (1x32x1x1xf32) <- (32xf32, 4xi64) + reshape_5 = paddle._C_ops.reshape(parameter_12, full_int_array_0) + del parameter_12 + + # pd_op.add: (16x32x-1x-1xf32) <- (16x32x-1x-1xf32, 1x32x1x1xf32) + add_5 = paddle._C_ops.add(conv2d_5, reshape_5) + del conv2d_5, reshape_5 + + # builtin.combine: ([16x32x-1x-1xf32, 16x32x-1x-1xf32, 16x32x-1x-1xf32, 16x32x-1x-1xf32, 16x32x-1x-1xf32, 16x32x-1x-1xf32]) <- (16x32x-1x-1xf32, 16x32x-1x-1xf32, 16x32x-1x-1xf32, 16x32x-1x-1xf32, 16x32x-1x-1xf32, 16x32x-1x-1xf32) + combine_0 = [add_0, add_1, add_2, add_3, add_4, add_5] + del add_0, add_1, add_2, add_3, add_4, add_5 + + # pd_op.stack: (16x32x-1x-1x6xf32) <- ([16x32x-1x-1xf32, 16x32x-1x-1xf32, 16x32x-1x-1xf32, 16x32x-1x-1xf32, 16x32x-1x-1xf32, 16x32x-1x-1xf32]) + stack_0 = paddle._C_ops.stack(combine_0, -1) + del combine_0 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_1 = [-1] + + # pd_op.mean: (16x32x-1x-1xf32) <- (16x32x-1x-1x6xf32, 1xi64) + mean_1 = paddle._C_ops.mean(stack_0, full_int_array_1, False) + del stack_0 + + # pd_op.gelu: (16x32x-1x-1xf32) <- (16x32x-1x-1xf32) + gelu_0 = paddle._C_ops.gelu(mean_1, False) + del mean_1 + + # pd_op.conv2d: (16x32x-1x-1xf32) <- (16x32x-1x-1xf32, 32x32x1x1xf32) + conv2d_6 = paddle._C_ops.conv2d( + gelu_0, parameter_11, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_11 + + # pd_op.reshape: (1x32x1x1xf32) <- (32xf32, 4xi64) + reshape_6 = paddle._C_ops.reshape(parameter_10, full_int_array_0) + del parameter_10 + + # pd_op.add: (16x32x-1x-1xf32) <- (16x32x-1x-1xf32, 1x32x1x1xf32) + add_6 = paddle._C_ops.add(conv2d_6, reshape_6) + del conv2d_6, reshape_6 + + # pd_op.conv2d: (16x32x-1x-1xf32) <- (16x32x-1x-1xf32, 32x32x3x3xf32) + conv2d_7 = paddle._C_ops.conv2d( + gelu_0, parameter_9, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_9 + + # pd_op.reshape: (1x32x1x1xf32) <- (32xf32, 4xi64) + reshape_7 = paddle._C_ops.reshape(parameter_8, full_int_array_0) + del parameter_8 + + # pd_op.add: (16x32x-1x-1xf32) <- (16x32x-1x-1xf32, 1x32x1x1xf32) + add_7 = paddle._C_ops.add(conv2d_7, reshape_7) + del conv2d_7, reshape_7 + + # pd_op.conv2d: (16x32x-1x-1xf32) <- (16x32x-1x-1xf32, 32x32x5x5xf32) + conv2d_8 = paddle._C_ops.conv2d( + gelu_0, parameter_7, [1, 1], [2, 2], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_7 + + # pd_op.reshape: (1x32x1x1xf32) <- (32xf32, 4xi64) + reshape_8 = paddle._C_ops.reshape(parameter_6, full_int_array_0) + del parameter_6 + + # pd_op.add: (16x32x-1x-1xf32) <- (16x32x-1x-1xf32, 1x32x1x1xf32) + add_8 = paddle._C_ops.add(conv2d_8, reshape_8) + del conv2d_8, reshape_8 + + # pd_op.conv2d: (16x32x-1x-1xf32) <- (16x32x-1x-1xf32, 32x32x7x7xf32) + conv2d_9 = paddle._C_ops.conv2d( + gelu_0, parameter_5, [1, 1], [3, 3], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_5 + + # pd_op.reshape: (1x32x1x1xf32) <- (32xf32, 4xi64) + reshape_9 = paddle._C_ops.reshape(parameter_4, full_int_array_0) + del parameter_4 + + # pd_op.add: (16x32x-1x-1xf32) <- (16x32x-1x-1xf32, 1x32x1x1xf32) + add_9 = paddle._C_ops.add(conv2d_9, reshape_9) + del conv2d_9, reshape_9 + + # pd_op.conv2d: (16x32x-1x-1xf32) <- (16x32x-1x-1xf32, 32x32x9x9xf32) + conv2d_10 = paddle._C_ops.conv2d( + gelu_0, parameter_3, [1, 1], [4, 4], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_3 + + # pd_op.reshape: (1x32x1x1xf32) <- (32xf32, 4xi64) + reshape_10 = paddle._C_ops.reshape(parameter_2, full_int_array_0) + del parameter_2 + + # pd_op.add: (16x32x-1x-1xf32) <- (16x32x-1x-1xf32, 1x32x1x1xf32) + add_10 = paddle._C_ops.add(conv2d_10, reshape_10) + del conv2d_10, reshape_10 + + # pd_op.conv2d: (16x32x-1x-1xf32) <- (16x32x-1x-1xf32, 32x32x11x11xf32) + conv2d_11 = paddle._C_ops.conv2d( + gelu_0, parameter_1, [1, 1], [5, 5], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del gelu_0, parameter_1 + + # pd_op.reshape: (1x32x1x1xf32) <- (32xf32, 4xi64) + reshape_11 = paddle._C_ops.reshape(parameter_0, full_int_array_0) + del full_int_array_0, parameter_0 + + # pd_op.add: (16x32x-1x-1xf32) <- (16x32x-1x-1xf32, 1x32x1x1xf32) + add_11 = paddle._C_ops.add(conv2d_11, reshape_11) + del conv2d_11, reshape_11 + + # builtin.combine: ([16x32x-1x-1xf32, 16x32x-1x-1xf32, 16x32x-1x-1xf32, 16x32x-1x-1xf32, 16x32x-1x-1xf32, 16x32x-1x-1xf32]) <- (16x32x-1x-1xf32, 16x32x-1x-1xf32, 16x32x-1x-1xf32, 16x32x-1x-1xf32, 16x32x-1x-1xf32, 16x32x-1x-1xf32) + combine_1 = [add_6, add_7, add_8, add_9, add_10, add_11] + del add_10, add_11, add_6, add_7, add_8, add_9 + + # pd_op.stack: (16x32x-1x-1x6xf32) <- ([16x32x-1x-1xf32, 16x32x-1x-1xf32, 16x32x-1x-1xf32, 16x32x-1x-1xf32, 16x32x-1x-1xf32, 16x32x-1x-1xf32]) + stack_1 = paddle._C_ops.stack(combine_1, -1) + del combine_1 + + # pd_op.mean: (16x32x-1x-1xf32) <- (16x32x-1x-1x6xf32, 1xi64) + mean_0 = paddle._C_ops.mean(stack_1, full_int_array_1, False) + del full_int_array_1, stack_1 + + return mean_0 diff --git a/paddle_samples/PaddleX/TimesNet/subgraph_10/weight_meta.py b/paddle_samples/PaddleX/TimesNet/subgraph_10/weight_meta.py new file mode 100644 index 000000000..e2fb71dc7 --- /dev/null +++ b/paddle_samples/PaddleX/TimesNet/subgraph_10/weight_meta.py @@ -0,0 +1,238 @@ +class Program_weight_tensor_parameter_0: + name = "parameter_0" + shape = [32] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_1: + name = "parameter_1" + shape = [32, 32, 11, 11] + dtype = "float32" + min_val = float("-0.106031") + max_val = float("0.116393") + mean = float("0.000256864") + std = float("0.0233445") + data = None + + +class Program_weight_tensor_parameter_2: + name = "parameter_2" + shape = [32] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_3: + name = "parameter_3" + shape = [32, 32, 9, 9] + dtype = "float32" + min_val = float("-0.131822") + max_val = float("0.125447") + mean = float("0.000120529") + std = float("0.0284273") + data = None + + +class Program_weight_tensor_parameter_4: + name = "parameter_4" + shape = [32] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_5: + name = "parameter_5" + shape = [32, 32, 7, 7] + dtype = "float32" + min_val = float("-0.166161") + max_val = float("0.162014") + mean = float("0.000283896") + std = float("0.0362103") + data = None + + +class Program_weight_tensor_parameter_6: + name = "parameter_6" + shape = [32] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_7: + name = "parameter_7" + shape = [32, 32, 5, 5] + dtype = "float32" + min_val = float("-0.188534") + max_val = float("0.206915") + mean = float("0.000295986") + std = float("0.050662") + data = None + + +class Program_weight_tensor_parameter_8: + name = "parameter_8" + shape = [32] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_9: + name = "parameter_9" + shape = [32, 32, 3, 3] + dtype = "float32" + min_val = float("-0.362962") + max_val = float("0.358126") + mean = float("-7.70989e-05") + std = float("0.0837865") + data = None + + +class Program_weight_tensor_parameter_10: + name = "parameter_10" + shape = [32] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_11: + name = "parameter_11" + shape = [32, 32, 1, 1] + dtype = "float32" + min_val = float("-0.843709") + max_val = float("0.757844") + mean = float("0.00369445") + std = float("0.252115") + data = None + + +class Program_weight_tensor_parameter_12: + name = "parameter_12" + shape = [32] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_13: + name = "parameter_13" + shape = [32, 32, 11, 11] + dtype = "float32" + min_val = float("-0.113689") + max_val = float("0.110907") + mean = float("-1.82819e-05") + std = float("0.023429") + data = None + + +class Program_weight_tensor_parameter_14: + name = "parameter_14" + shape = [32] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_15: + name = "parameter_15" + shape = [32, 32, 9, 9] + dtype = "float32" + min_val = float("-0.124724") + max_val = float("0.118932") + mean = float("-9.94658e-05") + std = float("0.0285337") + data = None + + +class Program_weight_tensor_parameter_16: + name = "parameter_16" + shape = [32] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_17: + name = "parameter_17" + shape = [32, 32, 7, 7] + dtype = "float32" + min_val = float("-0.168642") + max_val = float("0.14708") + mean = float("-0.000117223") + std = float("0.0363065") + data = None + + +class Program_weight_tensor_parameter_18: + name = "parameter_18" + shape = [32] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_19: + name = "parameter_19" + shape = [32, 32, 5, 5] + dtype = "float32" + min_val = float("-0.19885") + max_val = float("0.201538") + mean = float("0.000227899") + std = float("0.0505781") + data = None + + +class Program_weight_tensor_parameter_20: + name = "parameter_20" + shape = [32] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_21: + name = "parameter_21" + shape = [32, 32, 3, 3] + dtype = "float32" + min_val = float("-0.343614") + max_val = float("0.288478") + mean = float("0.000101018") + std = float("0.0839068") + data = None + + +class Program_weight_tensor_parameter_22: + name = "parameter_22" + shape = [32] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_23: + name = "parameter_23" + shape = [32, 32, 1, 1] + dtype = "float32" + min_val = float("-0.790273") + max_val = float("0.733436") + mean = float("-0.00547106") + std = float("0.250736") + data = None diff --git a/paddle_samples/PaddleX/TimesNet_cls/subgraph_7/graph_hash.txt b/paddle_samples/PaddleX/TimesNet/subgraph_11/graph_hash.txt similarity index 100% rename from paddle_samples/PaddleX/TimesNet_cls/subgraph_7/graph_hash.txt rename to paddle_samples/PaddleX/TimesNet/subgraph_11/graph_hash.txt diff --git a/paddle_samples/PaddleX/TimesNet/subgraph_11/graph_net.json b/paddle_samples/PaddleX/TimesNet/subgraph_11/graph_net.json new file mode 100644 index 000000000..c5c3dc20a --- /dev/null +++ b/paddle_samples/PaddleX/TimesNet/subgraph_11/graph_net.json @@ -0,0 +1,6 @@ +{ + "framework": "paddle", + "model_name": "TimesNet", + "num_devices_required": 1, + "num_nodes_required": 1 +} \ No newline at end of file diff --git a/paddle_samples/PaddleX/TimesNet/subgraph_11/input_meta.py b/paddle_samples/PaddleX/TimesNet/subgraph_11/input_meta.py new file mode 100644 index 000000000..4d6cecc21 --- /dev/null +++ b/paddle_samples/PaddleX/TimesNet/subgraph_11/input_meta.py @@ -0,0 +1,19 @@ +class Program_weight_tensor_data_0: + name = "data_0" + shape = [] + dtype = "int64" + data = [1] + + +class Program_weight_tensor_data_1: + name = "data_1" + shape = [5] + dtype = "int32" + data = [4, 3, 4, 3, 4] + + +class Program_weight_tensor_data_2: + name = "data_2" + shape = [] + dtype = "int64" + data = [192] diff --git a/paddle_samples/PaddleX/TimesNet/subgraph_11/model.py b/paddle_samples/PaddleX/TimesNet/subgraph_11/model.py new file mode 100644 index 000000000..e5e5f2c0e --- /dev/null +++ b/paddle_samples/PaddleX/TimesNet/subgraph_11/model.py @@ -0,0 +1,54 @@ +import paddle + + +class GraphModule(paddle.nn.Layer): + def __init__(self): + super().__init__() + + def forward(self, data_0, data_1, data_2): + # pd_op.full: (1xf32) <- () + full_0 = paddle._C_ops.full( + [1], float("1"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.scale: (xi64) <- (xi64, 1xf32) + scale_0 = paddle._C_ops.scale(data_0, full_0, float("1"), True) + del full_0 + + # builtin.combine: ([xi64]) <- (xi64) + combine_0 = [data_0] + del data_0 + + # pd_op.stack: (1xi64) <- ([xi64]) + stack_0 = paddle._C_ops.stack(combine_0, 0) + del combine_0 + + # builtin.combine: ([xi64]) <- (xi64) + combine_1 = [scale_0] + del scale_0 + + # pd_op.stack: (1xi64) <- ([xi64]) + stack_1 = paddle._C_ops.stack(combine_1, 0) + del combine_1 + + # pd_op.slice: (xi32) <- (5xi32, 1xi64, 1xi64) + slice_0 = paddle._C_ops.slice(data_1, [0], stack_0, stack_1, [-1], [0]) + del data_1, stack_0, stack_1 + + # pd_op.cast: (xi64) <- (xi32) + cast_0 = paddle._C_ops.cast(slice_0, paddle.int64) + + # pd_op.remainder: (xi64) <- (xi64, xi64) + remainder_0 = paddle._C_ops.remainder(data_2, cast_0) + del cast_0, data_2 + + # pd_op.full: (xi64) <- () + full_1 = paddle._C_ops.full( + [], float("0"), paddle.int64, paddle.framework._current_expected_place() + ) + + # pd_op.not_equal: (xb) <- (xi64, xi64) + not_equal_0 = paddle._C_ops.not_equal(remainder_0, full_1) + del full_1, remainder_0, slice_0 + + return not_equal_0 diff --git a/paddle_samples/PaddleX/TimesNet/subgraph_11/weight_meta.py b/paddle_samples/PaddleX/TimesNet/subgraph_11/weight_meta.py new file mode 100644 index 000000000..8b1378917 --- /dev/null +++ b/paddle_samples/PaddleX/TimesNet/subgraph_11/weight_meta.py @@ -0,0 +1 @@ + diff --git a/paddle_samples/PaddleX/TimesNet/subgraph_12/graph_hash.txt b/paddle_samples/PaddleX/TimesNet/subgraph_12/graph_hash.txt new file mode 100644 index 000000000..fce86584a --- /dev/null +++ b/paddle_samples/PaddleX/TimesNet/subgraph_12/graph_hash.txt @@ -0,0 +1 @@ +4f91c3f0b64160211924301dbdf78ec6dc0174a3afa7a61a30995162d1e27b07 \ No newline at end of file diff --git a/paddle_samples/PaddleX/TimesNet/subgraph_12/graph_net.json b/paddle_samples/PaddleX/TimesNet/subgraph_12/graph_net.json new file mode 100644 index 000000000..c5c3dc20a --- /dev/null +++ b/paddle_samples/PaddleX/TimesNet/subgraph_12/graph_net.json @@ -0,0 +1,6 @@ +{ + "framework": "paddle", + "model_name": "TimesNet", + "num_devices_required": 1, + "num_nodes_required": 1 +} \ No newline at end of file diff --git a/paddle_samples/PaddleX/TimesNet/subgraph_12/input_meta.py b/paddle_samples/PaddleX/TimesNet/subgraph_12/input_meta.py new file mode 100644 index 000000000..2ab647741 --- /dev/null +++ b/paddle_samples/PaddleX/TimesNet/subgraph_12/input_meta.py @@ -0,0 +1,75 @@ +class Program_weight_tensor_data_0: + name = "data_0" + shape = [16, 192, 32] + dtype = "float32" + min_val = float("-0.598028") + max_val = float("0.622307") + mean = float("-0.000209205") + std = float("0.129719") + data = None + + +class Program_weight_tensor_data_1: + name = "data_1" + shape = [16, 192, 32] + dtype = "float32" + min_val = float("-0.630425") + max_val = float("0.690109") + mean = float("-7.41824e-05") + std = float("0.113588") + data = None + + +class Program_weight_tensor_data_2: + name = "data_2" + shape = [16, 192, 32] + dtype = "float32" + min_val = float("-0.598028") + max_val = float("0.622307") + mean = float("-0.000209205") + std = float("0.129719") + data = None + + +class Program_weight_tensor_data_3: + name = "data_3" + shape = [16, 192, 32] + dtype = "float32" + min_val = float("-0.630425") + max_val = float("0.690109") + mean = float("-7.41824e-05") + std = float("0.113588") + data = None + + +class Program_weight_tensor_data_4: + name = "data_4" + shape = [16, 192, 32] + dtype = "float32" + min_val = float("-0.598028") + max_val = float("0.622307") + mean = float("-0.000209205") + std = float("0.129719") + data = None + + +class Program_weight_tensor_data_5: + name = "data_5" + shape = [16, 5] + dtype = "float32" + min_val = float("6.73973") + max_val = float("28.3813") + mean = float("16.0856") + std = float("4.37869") + data = None + + +class Program_weight_tensor_data_6: + name = "data_6" + shape = [16, 192, 32] + dtype = "float32" + min_val = float("-8.49015") + max_val = float("8.05498") + mean = float("0.0150445") + std = float("1.02128") + data = None diff --git a/paddle_samples/PaddleX/TimesNet/subgraph_12/model.py b/paddle_samples/PaddleX/TimesNet/subgraph_12/model.py new file mode 100644 index 000000000..1d0126b46 --- /dev/null +++ b/paddle_samples/PaddleX/TimesNet/subgraph_12/model.py @@ -0,0 +1,65 @@ +import paddle + + +class GraphModule(paddle.nn.Layer): + def __init__(self): + super().__init__() + + def forward(self, data_0, data_1, data_2, data_3, data_4, data_5, data_6): + # builtin.combine: ([16x192x32xf32, 16x192x32xf32, 16x192x32xf32, 16x192x32xf32, 16x192x32xf32]) <- (16x192x32xf32, 16x192x32xf32, 16x192x32xf32, 16x192x32xf32, 16x192x32xf32) + combine_0 = [data_0, data_1, data_2, data_3, data_4] + del data_0, data_1, data_2, data_3, data_4 + + # pd_op.stack: (16x192x32x5xf32) <- ([16x192x32xf32, 16x192x32xf32, 16x192x32xf32, 16x192x32xf32, 16x192x32xf32]) + stack_0 = paddle._C_ops.stack(combine_0, -1) + del combine_0 + + # pd_op.softmax: (16x5xf32) <- (16x5xf32) + softmax_0 = paddle._C_ops.softmax(data_5, 1) + del data_5 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_0 = [1] + + # pd_op.assign: (1xi64) <- (1xi64) + assign_0 = full_int_array_0 + + # pd_op.unsqueeze: (16x1x5xf32) <- (16x5xf32, 1xi64) + unsqueeze_0 = paddle._C_ops.unsqueeze(softmax_0, full_int_array_0) + + # pd_op.unsqueeze: (16x1x1x5xf32) <- (16x1x5xf32, 1xi64) + unsqueeze_1 = paddle._C_ops.unsqueeze(unsqueeze_0, full_int_array_0) + + # pd_op.full_int_array: (4xi64) <- () + full_int_array_1 = [1, 192, 32, 1] + + # pd_op.tile: (16x192x32x5xf32) <- (16x1x1x5xf32, 4xi64) + tile_0 = paddle._C_ops.tile(unsqueeze_1, full_int_array_1) + + # pd_op.multiply: (16x192x32x5xf32) <- (16x192x32x5xf32, 16x192x32x5xf32) + multiply_0 = paddle._C_ops.multiply(stack_0, tile_0) + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_2 = [-1] + + # pd_op.sum: (16x192x32xf32) <- (16x192x32x5xf32, 1xi64) + sum_0 = paddle._C_ops.sum(multiply_0, full_int_array_2, None, False) + + # pd_op.add: (16x192x32xf32) <- (16x192x32xf32, 16x192x32xf32) + add_0 = paddle._C_ops.add(sum_0, data_6) + del ( + assign_0, + data_6, + full_int_array_0, + full_int_array_1, + full_int_array_2, + multiply_0, + softmax_0, + stack_0, + sum_0, + tile_0, + unsqueeze_0, + unsqueeze_1, + ) + + return add_0 diff --git a/paddle_samples/PaddleX/TimesNet/subgraph_12/weight_meta.py b/paddle_samples/PaddleX/TimesNet/subgraph_12/weight_meta.py new file mode 100644 index 000000000..8b1378917 --- /dev/null +++ b/paddle_samples/PaddleX/TimesNet/subgraph_12/weight_meta.py @@ -0,0 +1 @@ + diff --git a/paddle_samples/PaddleX/TimesNet/subgraph_13/graph_hash.txt b/paddle_samples/PaddleX/TimesNet/subgraph_13/graph_hash.txt new file mode 100644 index 000000000..e4f46efd7 --- /dev/null +++ b/paddle_samples/PaddleX/TimesNet/subgraph_13/graph_hash.txt @@ -0,0 +1 @@ +188cf83c0475a855a984c8a226bb1a089806fdcd434d27a2ca33aaee9a4ba9ef \ No newline at end of file diff --git a/paddle_samples/PaddleX/TimesNet/subgraph_13/graph_net.json b/paddle_samples/PaddleX/TimesNet/subgraph_13/graph_net.json new file mode 100644 index 000000000..c5c3dc20a --- /dev/null +++ b/paddle_samples/PaddleX/TimesNet/subgraph_13/graph_net.json @@ -0,0 +1,6 @@ +{ + "framework": "paddle", + "model_name": "TimesNet", + "num_devices_required": 1, + "num_nodes_required": 1 +} \ No newline at end of file diff --git a/paddle_samples/PaddleX/TimesNet/subgraph_13/input_meta.py b/paddle_samples/PaddleX/TimesNet/subgraph_13/input_meta.py new file mode 100644 index 000000000..6e1083d6c --- /dev/null +++ b/paddle_samples/PaddleX/TimesNet/subgraph_13/input_meta.py @@ -0,0 +1,31 @@ +class Program_weight_tensor_data_0: + name = "data_0" + shape = [16, 96, 1] + dtype = "float32" + min_val = float("-2.06142") + max_val = float("-0.30026") + mean = float("-1.28192") + std = float("0.34683") + data = None + + +class Program_weight_tensor_data_1: + name = "data_1" + shape = [16, 96, 4] + dtype = "float32" + min_val = float("-0.5") + max_val = float("0.533333") + mean = float("-0.0375838") + std = float("0.335692") + data = None + + +class Program_weight_tensor_data_2: + name = "data_2" + shape = [1, 5000, 32] + dtype = "float32" + min_val = float("-1.0") + max_val = float("1.0") + mean = float("0.119002") + std = float("0.697021") + data = None diff --git a/paddle_samples/PaddleX/TimesNet/subgraph_13/model.py b/paddle_samples/PaddleX/TimesNet/subgraph_13/model.py new file mode 100644 index 000000000..c91f633aa --- /dev/null +++ b/paddle_samples/PaddleX/TimesNet/subgraph_13/model.py @@ -0,0 +1,203 @@ +import paddle + + +class GraphModule(paddle.nn.Layer): + def __init__(self): + super().__init__() + + def forward( + self, parameter_0, parameter_1, parameter_2, parameter_3, data_0, data_1, data_2 + ): + # pd_op.full_int_array: (1xi64) <- () + full_int_array_0 = [1] + + # pd_op.mean: (-1x1x1xf32) <- (-1x96x1xf32, 1xi64) + mean_0 = paddle._C_ops.mean(data_0, full_int_array_0, True) + + # pd_op.share_data_: (-1x1x1xf32) <- (-1x1x1xf32) + share_data__0 = mean_0.detach() + del mean_0 + + # pd_op.subtract: (-1x96x1xf32) <- (-1x96x1xf32, -1x1x1xf32) + subtract_0 = paddle._C_ops.subtract(data_0, share_data__0) + del data_0 + + # pd_op.mean: (-1x1x1xf32) <- (-1x96x1xf32, 1xi64) + mean_1 = paddle._C_ops.mean(subtract_0, full_int_array_0, True) + + # pd_op.subtract: (-1x96x1xf32) <- (-1x96x1xf32, -1x1x1xf32) + subtract_1 = paddle._C_ops.subtract(subtract_0, mean_1) + del mean_1 + + # pd_op.pow: (-1x96x1xf32) <- (-1x96x1xf32) + pow_0 = paddle._C_ops.pow(subtract_1, float("2")) + del subtract_1 + + # pd_op.sum: (-1x1x1xf32) <- (-1x96x1xf32, 1xi64) + sum_0 = paddle._C_ops.sum(pow_0, full_int_array_0, paddle.float32, True) + del pow_0 + + # pd_op.numel: (xi64) <- (-1x96x1xf32) + numel_0 = paddle._C_ops.numel(subtract_0) + + # pd_op.cast: (xi64) <- (xi64) + cast_0 = paddle._C_ops.cast(numel_0, paddle.int64) + del numel_0 + + # pd_op.numel: (xi64) <- (-1x1x1xf32) + numel_1 = paddle._C_ops.numel(sum_0) + + # pd_op.cast: (xi64) <- (xi64) + cast_1 = paddle._C_ops.cast(numel_1, paddle.int64) + del numel_1 + + # pd_op.cast: (xf32) <- (xi64) + cast_2 = paddle._C_ops.cast(cast_0, paddle.float32) + del cast_0 + + # pd_op.cast: (xf32) <- (xi64) + cast_3 = paddle._C_ops.cast(cast_1, paddle.float32) + del cast_1 + + # pd_op.divide: (xf32) <- (xf32, xf32) + divide_0 = paddle._C_ops.divide(cast_2, cast_3) + del cast_2, cast_3 + + # pd_op.divide: (-1x1x1xf32) <- (-1x1x1xf32, xf32) + divide_1 = paddle._C_ops.divide(sum_0, divide_0) + del divide_0, sum_0 + + # pd_op.full: (1xf32) <- () + full_0 = paddle._C_ops.full( + [1], float("1"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.scale: (-1x1x1xf32) <- (-1x1x1xf32, 1xf32) + scale_0 = paddle._C_ops.scale(divide_1, full_0, float("1e-05"), True) + del divide_1, full_0 + + # pd_op.sqrt: (-1x1x1xf32) <- (-1x1x1xf32) + sqrt_0 = paddle._C_ops.sqrt(scale_0) + del scale_0 + + # pd_op.divide: (-1x96x1xf32) <- (-1x96x1xf32, -1x1x1xf32) + divide_2 = paddle._C_ops.divide(subtract_0, sqrt_0) + del subtract_0 + + # pd_op.transpose: (-1x1x96xf32) <- (-1x96x1xf32) + transpose_1 = paddle._C_ops.transpose(divide_2, [0, 2, 1]) + + # pd_op.full_int_array: (2xi64) <- () + full_int_array_1 = [3, 4] + + # pd_op.unsqueeze: (-1x1x96x1x1xf32) <- (-1x1x96xf32, 2xi64) + unsqueeze_0 = paddle._C_ops.unsqueeze(transpose_1, full_int_array_1) + del transpose_1 + + # pd_op.full_int_array: (6xi64) <- () + full_int_array_2 = [0, 0, 0, 0, 1, 1] + + # pd_op.pad3d: (-1x1x98x1x1xf32) <- (-1x1x96x1x1xf32, 6xi64) + pad3d_0 = paddle._C_ops.pad3d( + unsqueeze_0, full_int_array_2, "circular", float("0"), "NCDHW" + ) + del full_int_array_2, unsqueeze_0 + + # pd_op.squeeze: (-1x1x98xf32) <- (-1x1x98x1x1xf32, 2xi64) + squeeze_0 = paddle._C_ops.squeeze(pad3d_0, full_int_array_1) + del full_int_array_1, pad3d_0 + + # pd_op.assign: (32x1x3xf32) <- (32x1x3xf32) + assign_0 = parameter_3 + del parameter_3 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_3 = [-2] + + # pd_op.unsqueeze: (32x1x1x3xf32) <- (32x1x3xf32, 1xi64) + unsqueeze_1 = paddle._C_ops.unsqueeze(assign_0, full_int_array_3) + del assign_0 + + # pd_op.unsqueeze: (-1x1x1x98xf32) <- (-1x1x98xf32, 1xi64) + unsqueeze_2 = paddle._C_ops.unsqueeze(squeeze_0, full_int_array_3) + del squeeze_0 + + # pd_op.conv2d: (-1x32x1x96xf32) <- (-1x1x1x98xf32, 32x1x1x3xf32) + conv2d_0 = paddle._C_ops.conv2d( + unsqueeze_2, unsqueeze_1, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del unsqueeze_1, unsqueeze_2 + + # pd_op.squeeze: (-1x32x96xf32) <- (-1x32x1x96xf32, 1xi64) + squeeze_1 = paddle._C_ops.squeeze(conv2d_0, full_int_array_3) + del conv2d_0, full_int_array_3 + + # pd_op.transpose: (-1x96x32xf32) <- (-1x32x96xf32) + transpose_2 = paddle._C_ops.transpose(squeeze_1, [0, 2, 1]) + del squeeze_1 + + # pd_op.matmul: (-1x96x32xf32) <- (-1x96x4xf32, 4x32xf32) + matmul_0 = paddle._C_ops.matmul(data_1, parameter_2, False, False) + del data_1, parameter_2 + + # pd_op.add: (-1x96x32xf32) <- (-1x96x32xf32, -1x96x32xf32) + add_0 = paddle._C_ops.add(transpose_2, matmul_0) + del matmul_0, transpose_2 + + # pd_op.shape64: (3xi64) <- (-1x96x1xf32) + shape64_0 = paddle._C_ops.shape64(divide_2) + del divide_2 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_4 = [0] + + # pd_op.slice: (xi64) <- (3xi64, 1xi64, 1xi64) + slice_0 = paddle._C_ops.slice( + shape64_0, [0], full_int_array_4, full_int_array_0, [1], [0] + ) + del full_int_array_0, shape64_0 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_5 = [96] + + # pd_op.slice: (1x96x32xf32) <- (1x5000x32xf32, 1xi64, 1xi64) + slice_1 = paddle._C_ops.slice( + data_2, [1], full_int_array_4, full_int_array_5, [1], [] + ) + del data_2, full_int_array_4, full_int_array_5 + + # pd_op.add: (-1x96x32xf32) <- (-1x96x32xf32, 1x96x32xf32) + add_1 = paddle._C_ops.add(add_0, slice_1) + del add_0, slice_1 + + # pd_op.full: (1xf32) <- () + full_1 = paddle._C_ops.full( + [1], float("0.1"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.dropout: (-1x96x32xf32, -1x96x32xui8) <- (-1x96x32xf32, None, 1xf32) + dropout_0, dropout_1 = (lambda x, f: f(x))( + paddle._C_ops.dropout( + add_1, None, full_1, True, "upscale_in_train", 0, False + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None), + ) + del add_1, full_1 + + # pd_op.transpose: (-1x32x96xf32) <- (-1x96x32xf32) + transpose_3 = paddle._C_ops.transpose(dropout_0, [0, 2, 1]) + del dropout_0 + + # pd_op.matmul: (-1x32x192xf32) <- (-1x32x96xf32, 96x192xf32) + matmul_1 = paddle._C_ops.matmul(transpose_3, parameter_1, False, False) + del parameter_1, transpose_3 + + # pd_op.add: (-1x32x192xf32) <- (-1x32x192xf32, 192xf32) + add_2 = paddle._C_ops.add(matmul_1, parameter_0) + del matmul_1, parameter_0 + + # pd_op.transpose: (-1x192x32xf32) <- (-1x32x192xf32) + transpose_0 = paddle._C_ops.transpose(add_2, [0, 2, 1]) + del add_2, share_data__0, sqrt_0 + + return transpose_0 diff --git a/paddle_samples/PaddleX/TimesNet/subgraph_13/weight_meta.py b/paddle_samples/PaddleX/TimesNet/subgraph_13/weight_meta.py new file mode 100644 index 000000000..c75ae3f39 --- /dev/null +++ b/paddle_samples/PaddleX/TimesNet/subgraph_13/weight_meta.py @@ -0,0 +1,42 @@ +class Program_weight_tensor_parameter_0: + name = "parameter_0" + shape = [192] + dtype = "float32" + min_val = float("-0.101041") + max_val = float("0.104398") + mean = float("0.00159347") + std = float("0.058136") + data = None + + +class Program_weight_tensor_parameter_1: + name = "parameter_1" + shape = [96, 192] + dtype = "float32" + min_val = float("-0.123315") + max_val = float("0.126816") + mean = float("0.000183364") + std = float("0.0590505") + data = None + + +class Program_weight_tensor_parameter_2: + name = "parameter_2" + shape = [4, 32] + dtype = "float32" + min_val = float("-0.529197") + max_val = float("0.515945") + mean = float("-0.00250537") + std = float("0.290832") + data = None + + +class Program_weight_tensor_parameter_3: + name = "parameter_3" + shape = [32, 1, 3] + dtype = "float32" + min_val = float("-1.38319") + max_val = float("2.03246") + mean = float("0.188744") + std = float("0.811947") + data = None diff --git a/paddle_samples/PaddleX/TimesNet/subgraph_2/graph_hash.txt b/paddle_samples/PaddleX/TimesNet/subgraph_2/graph_hash.txt new file mode 100644 index 000000000..c8237f439 --- /dev/null +++ b/paddle_samples/PaddleX/TimesNet/subgraph_2/graph_hash.txt @@ -0,0 +1 @@ +b2f90bd3d39f65b7d05191485f78b9cf3162cbcce2f87f1f22000f97a6635e8f \ No newline at end of file diff --git a/paddle_samples/PaddleX/TimesNet/subgraph_2/graph_net.json b/paddle_samples/PaddleX/TimesNet/subgraph_2/graph_net.json new file mode 100644 index 000000000..c5c3dc20a --- /dev/null +++ b/paddle_samples/PaddleX/TimesNet/subgraph_2/graph_net.json @@ -0,0 +1,6 @@ +{ + "framework": "paddle", + "model_name": "TimesNet", + "num_devices_required": 1, + "num_nodes_required": 1 +} \ No newline at end of file diff --git a/paddle_samples/PaddleX/TimesNet/subgraph_2/input_meta.py b/paddle_samples/PaddleX/TimesNet/subgraph_2/input_meta.py new file mode 100644 index 000000000..e051f4487 --- /dev/null +++ b/paddle_samples/PaddleX/TimesNet/subgraph_2/input_meta.py @@ -0,0 +1,37 @@ +class Program_weight_tensor_data_0: + name = "data_0" + shape = [] + dtype = "int64" + data = [3] + + +class Program_weight_tensor_data_1: + name = "data_1" + shape = [] + dtype = "int64" + data = [3] + + +class Program_weight_tensor_data_2: + name = "data_2" + shape = [] + dtype = "int32" + data = [5] + + +class Program_weight_tensor_data_3: + name = "data_3" + shape = [3, 192, 32] + dtype = "float32" + min_val = float("-7.04948") + max_val = float("9.58753") + mean = float("-0.00771211") + std = float("1.07541") + data = None + + +class Program_weight_tensor_data_4: + name = "data_4" + shape = [] + dtype = "int64" + data = [192] diff --git a/paddle_samples/PaddleX/TimesNet/subgraph_2/model.py b/paddle_samples/PaddleX/TimesNet/subgraph_2/model.py new file mode 100644 index 000000000..651f6e004 --- /dev/null +++ b/paddle_samples/PaddleX/TimesNet/subgraph_2/model.py @@ -0,0 +1,401 @@ +import paddle + + +class GraphModule(paddle.nn.Layer): + def __init__(self): + super().__init__() + + def forward( + self, + parameter_0, + parameter_1, + parameter_2, + parameter_3, + parameter_4, + parameter_5, + parameter_6, + parameter_7, + parameter_8, + parameter_9, + parameter_10, + parameter_11, + parameter_12, + parameter_13, + parameter_14, + parameter_15, + parameter_16, + parameter_17, + parameter_18, + parameter_19, + parameter_20, + parameter_21, + parameter_22, + parameter_23, + data_0, + data_1, + data_2, + data_3, + data_4, + ): + # pd_op.cast: (xi64) <- (xi32) + cast_0 = paddle._C_ops.cast(data_2, paddle.int64) + del data_2 + + # pd_op.floor_divide: (xi64) <- (xi64, xi64) + floor_divide_0 = paddle._C_ops.floor_divide(data_4, cast_0) + + # pd_op.full: (1xf32) <- () + full_0 = paddle._C_ops.full( + [1], float("1"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.scale: (xi64) <- (xi64, 1xf32) + scale_0 = paddle._C_ops.scale(floor_divide_0, full_0, float("1"), True) + del floor_divide_0, full_0 + + # pd_op.multiply: (xi64) <- (xi64, xi64) + multiply_0 = paddle._C_ops.multiply(scale_0, cast_0) + del scale_0 + + # pd_op.subtract: (xi64) <- (xi64, xi64) + subtract_0 = paddle._C_ops.subtract(multiply_0, data_4) + del data_4 + + # pd_op.full: (xi64) <- () + full_1 = paddle._C_ops.full( + [], float("32"), paddle.int64, paddle.core.CPUPlace() + ) + + # builtin.combine: ([xi64, xi64, xi64]) <- (xi64, xi64, xi64) + combine_0 = [data_1, subtract_0, full_1] + del data_1, subtract_0 + + # pd_op.stack: (3xi64) <- ([xi64, xi64, xi64]) + stack_0 = paddle._C_ops.stack(combine_0, 0) + del combine_0 + + # pd_op.full: (1xf32) <- () + full_2 = paddle._C_ops.full( + [1], float("0"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.full_with_tensor: (-1x-1x32xf32) <- (1xf32, 3xi64) + full_with_tensor_0 = paddle._C_ops.full_with_tensor( + full_2, stack_0, paddle.float32 + ) + del full_2, stack_0 + + # pd_op.cast: (-1x192x32xf32) <- (-1x192x32xf32) + cast_1 = paddle._C_ops.cast(data_3, paddle.float32) + del data_3 + + # pd_op.cast: (-1x-1x32xf32) <- (-1x-1x32xf32) + cast_2 = paddle._C_ops.cast(full_with_tensor_0, paddle.float32) + + # pd_op.full: (1xi32) <- () + full_3 = paddle._C_ops.full( + [1], float("1"), paddle.int32, paddle.core.CPUPlace() + ) + + # builtin.combine: ([-1x192x32xf32, -1x-1x32xf32]) <- (-1x192x32xf32, -1x-1x32xf32) + combine_1 = [cast_1, cast_2] + + # pd_op.concat: (-1x-1x32xf32) <- ([-1x192x32xf32, -1x-1x32xf32], 1xi32) + concat_0 = paddle._C_ops.concat(combine_1, full_3) + del combine_1 + + # pd_op.floor_divide: (xi64) <- (xi64, xi64) + floor_divide_1 = paddle._C_ops.floor_divide(multiply_0, cast_0) + + # builtin.combine: ([xi64, xi64, xi64, xi64]) <- (xi64, xi64, xi64, xi64) + combine_2 = [data_0, floor_divide_1, cast_0, full_1] + del cast_0, floor_divide_1 + + # pd_op.stack: (4xi64) <- ([xi64, xi64, xi64, xi64]) + stack_1 = paddle._C_ops.stack(combine_2, 0) + del combine_2 + + # pd_op.reshape: (-1x-1x-1x32xf32) <- (-1x-1x32xf32, 4xi64) + reshape_0 = paddle._C_ops.reshape(concat_0, stack_1) + del stack_1 + + # pd_op.transpose: (-1x32x-1x-1xf32) <- (-1x-1x-1x32xf32) + transpose_0 = paddle._C_ops.transpose(reshape_0, [0, 3, 1, 2]) + del reshape_0 + + # pd_op.conv2d: (-1x32x-1x-1xf32) <- (-1x32x-1x-1xf32, 32x32x1x1xf32) + conv2d_0 = paddle._C_ops.conv2d( + transpose_0, parameter_23, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_23 + + # pd_op.full_int_array: (4xi64) <- () + full_int_array_0 = [1, -1, 1, 1] + + # pd_op.reshape: (1x32x1x1xf32) <- (32xf32, 4xi64) + reshape_1 = paddle._C_ops.reshape(parameter_22, full_int_array_0) + del parameter_22 + + # pd_op.add: (-1x32x-1x-1xf32) <- (-1x32x-1x-1xf32, 1x32x1x1xf32) + add_0 = paddle._C_ops.add(conv2d_0, reshape_1) + + # pd_op.conv2d: (-1x32x-1x-1xf32) <- (-1x32x-1x-1xf32, 32x32x3x3xf32) + conv2d_1 = paddle._C_ops.conv2d( + transpose_0, parameter_21, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_21 + + # pd_op.reshape: (1x32x1x1xf32) <- (32xf32, 4xi64) + reshape_2 = paddle._C_ops.reshape(parameter_20, full_int_array_0) + del parameter_20 + + # pd_op.add: (-1x32x-1x-1xf32) <- (-1x32x-1x-1xf32, 1x32x1x1xf32) + add_1 = paddle._C_ops.add(conv2d_1, reshape_2) + + # pd_op.conv2d: (-1x32x-1x-1xf32) <- (-1x32x-1x-1xf32, 32x32x5x5xf32) + conv2d_2 = paddle._C_ops.conv2d( + transpose_0, parameter_19, [1, 1], [2, 2], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_19 + + # pd_op.reshape: (1x32x1x1xf32) <- (32xf32, 4xi64) + reshape_3 = paddle._C_ops.reshape(parameter_18, full_int_array_0) + del parameter_18 + + # pd_op.add: (-1x32x-1x-1xf32) <- (-1x32x-1x-1xf32, 1x32x1x1xf32) + add_2 = paddle._C_ops.add(conv2d_2, reshape_3) + + # pd_op.conv2d: (-1x32x-1x-1xf32) <- (-1x32x-1x-1xf32, 32x32x7x7xf32) + conv2d_3 = paddle._C_ops.conv2d( + transpose_0, parameter_17, [1, 1], [3, 3], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_17 + + # pd_op.reshape: (1x32x1x1xf32) <- (32xf32, 4xi64) + reshape_4 = paddle._C_ops.reshape(parameter_16, full_int_array_0) + del parameter_16 + + # pd_op.add: (-1x32x-1x-1xf32) <- (-1x32x-1x-1xf32, 1x32x1x1xf32) + add_3 = paddle._C_ops.add(conv2d_3, reshape_4) + + # pd_op.conv2d: (-1x32x-1x-1xf32) <- (-1x32x-1x-1xf32, 32x32x9x9xf32) + conv2d_4 = paddle._C_ops.conv2d( + transpose_0, parameter_15, [1, 1], [4, 4], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_15 + + # pd_op.reshape: (1x32x1x1xf32) <- (32xf32, 4xi64) + reshape_5 = paddle._C_ops.reshape(parameter_14, full_int_array_0) + del parameter_14 + + # pd_op.add: (-1x32x-1x-1xf32) <- (-1x32x-1x-1xf32, 1x32x1x1xf32) + add_4 = paddle._C_ops.add(conv2d_4, reshape_5) + + # pd_op.conv2d: (-1x32x-1x-1xf32) <- (-1x32x-1x-1xf32, 32x32x11x11xf32) + conv2d_5 = paddle._C_ops.conv2d( + transpose_0, parameter_13, [1, 1], [5, 5], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_13 + + # pd_op.reshape: (1x32x1x1xf32) <- (32xf32, 4xi64) + reshape_6 = paddle._C_ops.reshape(parameter_12, full_int_array_0) + del parameter_12 + + # pd_op.add: (-1x32x-1x-1xf32) <- (-1x32x-1x-1xf32, 1x32x1x1xf32) + add_5 = paddle._C_ops.add(conv2d_5, reshape_6) + + # builtin.combine: ([-1x32x-1x-1xf32, -1x32x-1x-1xf32, -1x32x-1x-1xf32, -1x32x-1x-1xf32, -1x32x-1x-1xf32, -1x32x-1x-1xf32]) <- (-1x32x-1x-1xf32, -1x32x-1x-1xf32, -1x32x-1x-1xf32, -1x32x-1x-1xf32, -1x32x-1x-1xf32, -1x32x-1x-1xf32) + combine_3 = [add_0, add_1, add_2, add_3, add_4, add_5] + + # pd_op.stack: (-1x32x-1x-1x6xf32) <- ([-1x32x-1x-1xf32, -1x32x-1x-1xf32, -1x32x-1x-1xf32, -1x32x-1x-1xf32, -1x32x-1x-1xf32, -1x32x-1x-1xf32]) + stack_2 = paddle._C_ops.stack(combine_3, -1) + del combine_3 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_1 = [-1] + + # pd_op.assign: (1xi64) <- (1xi64) + assign_0 = full_int_array_1 + + # pd_op.mean: (-1x32x-1x-1xf32) <- (-1x32x-1x-1x6xf32, 1xi64) + mean_0 = paddle._C_ops.mean(stack_2, full_int_array_1, False) + + # pd_op.gelu: (-1x32x-1x-1xf32) <- (-1x32x-1x-1xf32) + gelu_0 = paddle._C_ops.gelu(mean_0, False) + + # pd_op.conv2d: (-1x32x-1x-1xf32) <- (-1x32x-1x-1xf32, 32x32x1x1xf32) + conv2d_6 = paddle._C_ops.conv2d( + gelu_0, parameter_11, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_11 + + # pd_op.reshape: (1x32x1x1xf32) <- (32xf32, 4xi64) + reshape_7 = paddle._C_ops.reshape(parameter_10, full_int_array_0) + del parameter_10 + + # pd_op.add: (-1x32x-1x-1xf32) <- (-1x32x-1x-1xf32, 1x32x1x1xf32) + add_6 = paddle._C_ops.add(conv2d_6, reshape_7) + + # pd_op.conv2d: (-1x32x-1x-1xf32) <- (-1x32x-1x-1xf32, 32x32x3x3xf32) + conv2d_7 = paddle._C_ops.conv2d( + gelu_0, parameter_9, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_9 + + # pd_op.reshape: (1x32x1x1xf32) <- (32xf32, 4xi64) + reshape_8 = paddle._C_ops.reshape(parameter_8, full_int_array_0) + del parameter_8 + + # pd_op.add: (-1x32x-1x-1xf32) <- (-1x32x-1x-1xf32, 1x32x1x1xf32) + add_7 = paddle._C_ops.add(conv2d_7, reshape_8) + + # pd_op.conv2d: (-1x32x-1x-1xf32) <- (-1x32x-1x-1xf32, 32x32x5x5xf32) + conv2d_8 = paddle._C_ops.conv2d( + gelu_0, parameter_7, [1, 1], [2, 2], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_7 + + # pd_op.reshape: (1x32x1x1xf32) <- (32xf32, 4xi64) + reshape_9 = paddle._C_ops.reshape(parameter_6, full_int_array_0) + del parameter_6 + + # pd_op.add: (-1x32x-1x-1xf32) <- (-1x32x-1x-1xf32, 1x32x1x1xf32) + add_8 = paddle._C_ops.add(conv2d_8, reshape_9) + + # pd_op.conv2d: (-1x32x-1x-1xf32) <- (-1x32x-1x-1xf32, 32x32x7x7xf32) + conv2d_9 = paddle._C_ops.conv2d( + gelu_0, parameter_5, [1, 1], [3, 3], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_5 + + # pd_op.reshape: (1x32x1x1xf32) <- (32xf32, 4xi64) + reshape_10 = paddle._C_ops.reshape(parameter_4, full_int_array_0) + del parameter_4 + + # pd_op.add: (-1x32x-1x-1xf32) <- (-1x32x-1x-1xf32, 1x32x1x1xf32) + add_9 = paddle._C_ops.add(conv2d_9, reshape_10) + + # pd_op.conv2d: (-1x32x-1x-1xf32) <- (-1x32x-1x-1xf32, 32x32x9x9xf32) + conv2d_10 = paddle._C_ops.conv2d( + gelu_0, parameter_3, [1, 1], [4, 4], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_3 + + # pd_op.reshape: (1x32x1x1xf32) <- (32xf32, 4xi64) + reshape_11 = paddle._C_ops.reshape(parameter_2, full_int_array_0) + del parameter_2 + + # pd_op.add: (-1x32x-1x-1xf32) <- (-1x32x-1x-1xf32, 1x32x1x1xf32) + add_10 = paddle._C_ops.add(conv2d_10, reshape_11) + + # pd_op.conv2d: (-1x32x-1x-1xf32) <- (-1x32x-1x-1xf32, 32x32x11x11xf32) + conv2d_11 = paddle._C_ops.conv2d( + gelu_0, parameter_1, [1, 1], [5, 5], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_1 + + # pd_op.reshape: (1x32x1x1xf32) <- (32xf32, 4xi64) + reshape_12 = paddle._C_ops.reshape(parameter_0, full_int_array_0) + del full_int_array_0, parameter_0 + + # pd_op.add: (-1x32x-1x-1xf32) <- (-1x32x-1x-1xf32, 1x32x1x1xf32) + add_11 = paddle._C_ops.add(conv2d_11, reshape_12) + + # builtin.combine: ([-1x32x-1x-1xf32, -1x32x-1x-1xf32, -1x32x-1x-1xf32, -1x32x-1x-1xf32, -1x32x-1x-1xf32, -1x32x-1x-1xf32]) <- (-1x32x-1x-1xf32, -1x32x-1x-1xf32, -1x32x-1x-1xf32, -1x32x-1x-1xf32, -1x32x-1x-1xf32, -1x32x-1x-1xf32) + combine_4 = [add_6, add_7, add_8, add_9, add_10, add_11] + + # pd_op.stack: (-1x32x-1x-1x6xf32) <- ([-1x32x-1x-1xf32, -1x32x-1x-1xf32, -1x32x-1x-1xf32, -1x32x-1x-1xf32, -1x32x-1x-1xf32, -1x32x-1x-1xf32]) + stack_3 = paddle._C_ops.stack(combine_4, -1) + del combine_4 + + # pd_op.mean: (-1x32x-1x-1xf32) <- (-1x32x-1x-1x6xf32, 1xi64) + mean_1 = paddle._C_ops.mean(stack_3, full_int_array_1, False) + + # pd_op.transpose: (-1x-1x-1x32xf32) <- (-1x32x-1x-1xf32) + transpose_1 = paddle._C_ops.transpose(mean_1, [0, 2, 3, 1]) + del mean_1 + + # pd_op.full: (xi64) <- () + full_4 = paddle._C_ops.full( + [], float("-1"), paddle.int64, paddle.core.CPUPlace() + ) + + # builtin.combine: ([xi64, xi64, xi64]) <- (xi64, xi64, xi64) + combine_5 = [data_0, full_4, full_1] + del data_0, full_1, full_4 + + # pd_op.stack: (3xi64) <- ([xi64, xi64, xi64]) + stack_4 = paddle._C_ops.stack(combine_5, 0) + del combine_5 + + # pd_op.reshape: (-1x-1x32xf32) <- (-1x-1x-1x32xf32, 3xi64) + reshape_13 = paddle._C_ops.reshape(transpose_1, stack_4) + del stack_4 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_2 = [0] + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_3 = [192] + + # pd_op.slice: (-1x-1x32xf32) <- (-1x-1x32xf32, 1xi64, 1xi64) + slice_0 = paddle._C_ops.slice( + reshape_13, [1], full_int_array_2, full_int_array_3, [1], [] + ) + del ( + add_0, + add_1, + add_10, + add_11, + add_2, + add_3, + add_4, + add_5, + add_6, + add_7, + add_8, + add_9, + assign_0, + cast_1, + cast_2, + concat_0, + conv2d_0, + conv2d_1, + conv2d_10, + conv2d_11, + conv2d_2, + conv2d_3, + conv2d_4, + conv2d_5, + conv2d_6, + conv2d_7, + conv2d_8, + conv2d_9, + full_3, + full_int_array_1, + full_int_array_2, + full_int_array_3, + full_with_tensor_0, + gelu_0, + mean_0, + multiply_0, + reshape_1, + reshape_10, + reshape_11, + reshape_12, + reshape_13, + reshape_2, + reshape_3, + reshape_4, + reshape_5, + reshape_6, + reshape_7, + reshape_8, + reshape_9, + stack_2, + stack_3, + transpose_0, + transpose_1, + ) + + return slice_0 diff --git a/paddle_samples/PaddleX/TimesNet/subgraph_2/weight_meta.py b/paddle_samples/PaddleX/TimesNet/subgraph_2/weight_meta.py new file mode 100644 index 000000000..3a28123cc --- /dev/null +++ b/paddle_samples/PaddleX/TimesNet/subgraph_2/weight_meta.py @@ -0,0 +1,238 @@ +class Program_weight_tensor_parameter_0: + name = "parameter_0" + shape = [32] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_1: + name = "parameter_1" + shape = [32, 32, 11, 11] + dtype = "float32" + min_val = float("-0.103984") + max_val = float("0.102089") + mean = float("0.000160244") + std = float("0.023189") + data = None + + +class Program_weight_tensor_parameter_2: + name = "parameter_2" + shape = [32] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_3: + name = "parameter_3" + shape = [32, 32, 9, 9] + dtype = "float32" + min_val = float("-0.112301") + max_val = float("0.114987") + mean = float("8.12361e-05") + std = float("0.0282732") + data = None + + +class Program_weight_tensor_parameter_4: + name = "parameter_4" + shape = [32] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_5: + name = "parameter_5" + shape = [32, 32, 7, 7] + dtype = "float32" + min_val = float("-0.146683") + max_val = float("0.166673") + mean = float("-8.57411e-05") + std = float("0.0359436") + data = None + + +class Program_weight_tensor_parameter_6: + name = "parameter_6" + shape = [32] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_7: + name = "parameter_7" + shape = [32, 32, 5, 5] + dtype = "float32" + min_val = float("-0.188074") + max_val = float("0.192521") + mean = float("0.000294916") + std = float("0.0499382") + data = None + + +class Program_weight_tensor_parameter_8: + name = "parameter_8" + shape = [32] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_9: + name = "parameter_9" + shape = [32, 32, 3, 3] + dtype = "float32" + min_val = float("-0.316724") + max_val = float("0.305989") + mean = float("0.001123") + std = float("0.084446") + data = None + + +class Program_weight_tensor_parameter_10: + name = "parameter_10" + shape = [32] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_11: + name = "parameter_11" + shape = [32, 32, 1, 1] + dtype = "float32" + min_val = float("-0.78535") + max_val = float("0.777555") + mean = float("-0.00845492") + std = float("0.251401") + data = None + + +class Program_weight_tensor_parameter_12: + name = "parameter_12" + shape = [32] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_13: + name = "parameter_13" + shape = [32, 32, 11, 11] + dtype = "float32" + min_val = float("-0.104429") + max_val = float("0.0998193") + mean = float("2.95454e-05") + std = float("0.0233169") + data = None + + +class Program_weight_tensor_parameter_14: + name = "parameter_14" + shape = [32] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_15: + name = "parameter_15" + shape = [32, 32, 9, 9] + dtype = "float32" + min_val = float("-0.113112") + max_val = float("0.120167") + mean = float("2.94621e-05") + std = float("0.0283125") + data = None + + +class Program_weight_tensor_parameter_16: + name = "parameter_16" + shape = [32] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_17: + name = "parameter_17" + shape = [32, 32, 7, 7] + dtype = "float32" + min_val = float("-0.158196") + max_val = float("0.165092") + mean = float("0.000183237") + std = float("0.0361576") + data = None + + +class Program_weight_tensor_parameter_18: + name = "parameter_18" + shape = [32] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_19: + name = "parameter_19" + shape = [32, 32, 5, 5] + dtype = "float32" + min_val = float("-0.188011") + max_val = float("0.195722") + mean = float("0.000246363") + std = float("0.0502302") + data = None + + +class Program_weight_tensor_parameter_20: + name = "parameter_20" + shape = [32] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_21: + name = "parameter_21" + shape = [32, 32, 3, 3] + dtype = "float32" + min_val = float("-0.326088") + max_val = float("0.34024") + mean = float("-0.00140756") + std = float("0.0834774") + data = None + + +class Program_weight_tensor_parameter_22: + name = "parameter_22" + shape = [32] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_23: + name = "parameter_23" + shape = [32, 32, 1, 1] + dtype = "float32" + min_val = float("-0.863163") + max_val = float("0.712173") + mean = float("-0.00469805") + std = float("0.248158") + data = None diff --git a/paddle_samples/PaddleX/TimesNet/subgraph_3/graph_hash.txt b/paddle_samples/PaddleX/TimesNet/subgraph_3/graph_hash.txt new file mode 100644 index 000000000..dede65584 --- /dev/null +++ b/paddle_samples/PaddleX/TimesNet/subgraph_3/graph_hash.txt @@ -0,0 +1 @@ +540f1a1c8b15551cdd0f329d60f7fe7cc322a6a3f15298423e5b68b13c4cc43c \ No newline at end of file diff --git a/paddle_samples/PaddleX/TimesNet/subgraph_3/graph_net.json b/paddle_samples/PaddleX/TimesNet/subgraph_3/graph_net.json new file mode 100644 index 000000000..c5c3dc20a --- /dev/null +++ b/paddle_samples/PaddleX/TimesNet/subgraph_3/graph_net.json @@ -0,0 +1,6 @@ +{ + "framework": "paddle", + "model_name": "TimesNet", + "num_devices_required": 1, + "num_nodes_required": 1 +} \ No newline at end of file diff --git a/paddle_samples/PaddleX/TimesNet/subgraph_3/input_meta.py b/paddle_samples/PaddleX/TimesNet/subgraph_3/input_meta.py new file mode 100644 index 000000000..25441ab21 --- /dev/null +++ b/paddle_samples/PaddleX/TimesNet/subgraph_3/input_meta.py @@ -0,0 +1,9 @@ +class Program_weight_tensor_data_0: + name = "data_0" + shape = [3, 192, 32] + dtype = "float32" + min_val = float("-7.04948") + max_val = float("9.58753") + mean = float("-0.00771211") + std = float("1.07541") + data = None diff --git a/paddle_samples/PaddleX/TimesNet/subgraph_3/model.py b/paddle_samples/PaddleX/TimesNet/subgraph_3/model.py new file mode 100644 index 000000000..434fab3d8 --- /dev/null +++ b/paddle_samples/PaddleX/TimesNet/subgraph_3/model.py @@ -0,0 +1,109 @@ +import paddle + + +class GraphModule(paddle.nn.Layer): + def __init__(self): + super().__init__() + + def forward(self, data_0): + # pd_op.fft_r2c: (-1x97x32xc64) <- (-1x192x32xf32) + fft_r2c_0 = paddle._C_ops.fft_r2c(data_0, [1], "backward", True, True) + + # pd_op.abs: (-1x97x32xf32) <- (-1x97x32xc64) + abs_0 = paddle._C_ops.abs(fft_r2c_0) + + # pd_op.assign: (-1x97x32xf32) <- (-1x97x32xf32) + assign_0 = abs_0 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_0 = [0] + + # pd_op.mean: (97x32xf32) <- (-1x97x32xf32, 1xi64) + mean_0 = paddle._C_ops.mean(abs_0, full_int_array_0, False) + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_1 = [-1] + + # pd_op.assign: (1xi64) <- (1xi64) + assign_1 = full_int_array_1 + + # pd_op.mean: (97xf32) <- (97x32xf32, 1xi64) + mean_1 = paddle._C_ops.mean(mean_0, full_int_array_1, False) + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_2 = [1] + + # pd_op.set_value_: (97xf32) <- (97xf32, 1xi64, 1xi64, 1xi64) + set_value__0 = paddle._C_ops.set_value_( + mean_1, + full_int_array_0, + full_int_array_2, + full_int_array_2, + [0], + [0], + [], + [1], + [float("0")], + ) + del mean_1 + + # pd_op.full: (1xi32) <- () + full_0 = paddle._C_ops.full( + [1], float("5"), paddle.int32, paddle.core.CPUPlace() + ) + + # pd_op.topk: (5xf32, 5xi64) <- (97xf32, 1xi32) + topk_0, topk_1 = (lambda x, f: f(x))( + paddle._C_ops.topk(set_value__0, full_0, -1, True, True), + lambda out: out if isinstance(out, (list, tuple)) else (out, None), + ) + del full_0 + + # pd_op.share_data_: (5xi64) <- (5xi64) + share_data__0 = topk_1.detach() + del topk_1 + + # pd_op.cast: (5xi32) <- (5xi64) + cast_0 = paddle._C_ops.cast(share_data__0, paddle.int32) + del share_data__0 + + # pd_op.shape64: (3xi64) <- (-1x192x32xf32) + shape64_0 = paddle._C_ops.shape64(data_0) + del data_0 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_3 = [2] + + # pd_op.slice: (xi64) <- (3xi64, 1xi64, 1xi64) + slice_0 = paddle._C_ops.slice( + shape64_0, [0], full_int_array_2, full_int_array_3, [1], [0] + ) + del full_int_array_2, full_int_array_3, shape64_0 + + # pd_op.cast: (xi32) <- (xi64) + cast_1 = paddle._C_ops.cast(slice_0, paddle.int32) + del slice_0 + + # pd_op.floor_divide: (5xi32) <- (xi32, 5xi32) + floor_divide_0 = paddle._C_ops.floor_divide(cast_1, cast_0) + del cast_1 + + # pd_op.mean: (-1x97xf32) <- (-1x97x32xf32, 1xi64) + mean_2 = paddle._C_ops.mean(abs_0, full_int_array_1, False) + + # pd_op.index_select: (-1x5xf32) <- (-1x97xf32, 5xi32) + index_select_0 = paddle._C_ops.index_select(mean_2, cast_0, 1) + del ( + abs_0, + assign_0, + assign_1, + cast_0, + fft_r2c_0, + full_int_array_0, + full_int_array_1, + mean_0, + mean_2, + set_value__0, + ) + + return floor_divide_0, index_select_0 diff --git a/paddle_samples/PaddleX/TimesNet/subgraph_3/weight_meta.py b/paddle_samples/PaddleX/TimesNet/subgraph_3/weight_meta.py new file mode 100644 index 000000000..8b1378917 --- /dev/null +++ b/paddle_samples/PaddleX/TimesNet/subgraph_3/weight_meta.py @@ -0,0 +1 @@ + diff --git a/paddle_samples/PaddleX/TimesNet/subgraph_4/graph_hash.txt b/paddle_samples/PaddleX/TimesNet/subgraph_4/graph_hash.txt new file mode 100644 index 000000000..c670f6da8 --- /dev/null +++ b/paddle_samples/PaddleX/TimesNet/subgraph_4/graph_hash.txt @@ -0,0 +1 @@ +20fef56040727df448eeb1a05f7a18e5785d7ae35839f722fde80d4a22f8f5f4 \ No newline at end of file diff --git a/paddle_samples/PaddleX/TimesNet/subgraph_4/graph_net.json b/paddle_samples/PaddleX/TimesNet/subgraph_4/graph_net.json new file mode 100644 index 000000000..c5c3dc20a --- /dev/null +++ b/paddle_samples/PaddleX/TimesNet/subgraph_4/graph_net.json @@ -0,0 +1,6 @@ +{ + "framework": "paddle", + "model_name": "TimesNet", + "num_devices_required": 1, + "num_nodes_required": 1 +} \ No newline at end of file diff --git a/paddle_samples/PaddleX/TimesNet/subgraph_4/input_meta.py b/paddle_samples/PaddleX/TimesNet/subgraph_4/input_meta.py new file mode 100644 index 000000000..23fbcd091 --- /dev/null +++ b/paddle_samples/PaddleX/TimesNet/subgraph_4/input_meta.py @@ -0,0 +1,9 @@ +class Program_weight_tensor_data_0: + name = "data_0" + shape = [16, 32, 64, 3] + dtype = "float32" + min_val = float("-7.91167") + max_val = float("8.25317") + mean = float("0.000202711") + std = float("1.01386") + data = None diff --git a/paddle_samples/PaddleX/TimesNet/subgraph_4/model.py b/paddle_samples/PaddleX/TimesNet/subgraph_4/model.py new file mode 100644 index 000000000..80ebd67f4 --- /dev/null +++ b/paddle_samples/PaddleX/TimesNet/subgraph_4/model.py @@ -0,0 +1,268 @@ +import paddle + + +class GraphModule(paddle.nn.Layer): + def __init__(self): + super().__init__() + + def forward( + self, + parameter_0, + parameter_1, + parameter_2, + parameter_3, + parameter_4, + parameter_5, + parameter_6, + parameter_7, + parameter_8, + parameter_9, + parameter_10, + parameter_11, + parameter_12, + parameter_13, + parameter_14, + parameter_15, + parameter_16, + parameter_17, + parameter_18, + parameter_19, + parameter_20, + parameter_21, + parameter_22, + parameter_23, + data_0, + ): + # pd_op.conv2d: (16x32x-1x-1xf32) <- (16x32x-1x-1xf32, 32x32x1x1xf32) + conv2d_0 = paddle._C_ops.conv2d( + data_0, parameter_23, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_23 + + # pd_op.full_int_array: (4xi64) <- () + full_int_array_0 = [1, -1, 1, 1] + + # pd_op.reshape: (1x32x1x1xf32) <- (32xf32, 4xi64) + reshape_0 = paddle._C_ops.reshape(parameter_22, full_int_array_0) + del parameter_22 + + # pd_op.add: (16x32x-1x-1xf32) <- (16x32x-1x-1xf32, 1x32x1x1xf32) + add_0 = paddle._C_ops.add(conv2d_0, reshape_0) + + # pd_op.conv2d: (16x32x-1x-1xf32) <- (16x32x-1x-1xf32, 32x32x3x3xf32) + conv2d_1 = paddle._C_ops.conv2d( + data_0, parameter_21, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_21 + + # pd_op.reshape: (1x32x1x1xf32) <- (32xf32, 4xi64) + reshape_1 = paddle._C_ops.reshape(parameter_20, full_int_array_0) + del parameter_20 + + # pd_op.add: (16x32x-1x-1xf32) <- (16x32x-1x-1xf32, 1x32x1x1xf32) + add_1 = paddle._C_ops.add(conv2d_1, reshape_1) + + # pd_op.conv2d: (16x32x-1x-1xf32) <- (16x32x-1x-1xf32, 32x32x5x5xf32) + conv2d_2 = paddle._C_ops.conv2d( + data_0, parameter_19, [1, 1], [2, 2], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_19 + + # pd_op.reshape: (1x32x1x1xf32) <- (32xf32, 4xi64) + reshape_2 = paddle._C_ops.reshape(parameter_18, full_int_array_0) + del parameter_18 + + # pd_op.add: (16x32x-1x-1xf32) <- (16x32x-1x-1xf32, 1x32x1x1xf32) + add_2 = paddle._C_ops.add(conv2d_2, reshape_2) + + # pd_op.conv2d: (16x32x-1x-1xf32) <- (16x32x-1x-1xf32, 32x32x7x7xf32) + conv2d_3 = paddle._C_ops.conv2d( + data_0, parameter_17, [1, 1], [3, 3], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_17 + + # pd_op.reshape: (1x32x1x1xf32) <- (32xf32, 4xi64) + reshape_3 = paddle._C_ops.reshape(parameter_16, full_int_array_0) + del parameter_16 + + # pd_op.add: (16x32x-1x-1xf32) <- (16x32x-1x-1xf32, 1x32x1x1xf32) + add_3 = paddle._C_ops.add(conv2d_3, reshape_3) + + # pd_op.conv2d: (16x32x-1x-1xf32) <- (16x32x-1x-1xf32, 32x32x9x9xf32) + conv2d_4 = paddle._C_ops.conv2d( + data_0, parameter_15, [1, 1], [4, 4], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_15 + + # pd_op.reshape: (1x32x1x1xf32) <- (32xf32, 4xi64) + reshape_4 = paddle._C_ops.reshape(parameter_14, full_int_array_0) + del parameter_14 + + # pd_op.add: (16x32x-1x-1xf32) <- (16x32x-1x-1xf32, 1x32x1x1xf32) + add_4 = paddle._C_ops.add(conv2d_4, reshape_4) + + # pd_op.conv2d: (16x32x-1x-1xf32) <- (16x32x-1x-1xf32, 32x32x11x11xf32) + conv2d_5 = paddle._C_ops.conv2d( + data_0, parameter_13, [1, 1], [5, 5], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del data_0, parameter_13 + + # pd_op.reshape: (1x32x1x1xf32) <- (32xf32, 4xi64) + reshape_5 = paddle._C_ops.reshape(parameter_12, full_int_array_0) + del parameter_12 + + # pd_op.add: (16x32x-1x-1xf32) <- (16x32x-1x-1xf32, 1x32x1x1xf32) + add_5 = paddle._C_ops.add(conv2d_5, reshape_5) + + # builtin.combine: ([16x32x-1x-1xf32, 16x32x-1x-1xf32, 16x32x-1x-1xf32, 16x32x-1x-1xf32, 16x32x-1x-1xf32, 16x32x-1x-1xf32]) <- (16x32x-1x-1xf32, 16x32x-1x-1xf32, 16x32x-1x-1xf32, 16x32x-1x-1xf32, 16x32x-1x-1xf32, 16x32x-1x-1xf32) + combine_0 = [add_0, add_1, add_2, add_3, add_4, add_5] + + # pd_op.stack: (16x32x-1x-1x6xf32) <- ([16x32x-1x-1xf32, 16x32x-1x-1xf32, 16x32x-1x-1xf32, 16x32x-1x-1xf32, 16x32x-1x-1xf32, 16x32x-1x-1xf32]) + stack_0 = paddle._C_ops.stack(combine_0, -1) + del combine_0 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_1 = [-1] + + # pd_op.assign: (1xi64) <- (1xi64) + assign_0 = full_int_array_1 + + # pd_op.mean: (16x32x-1x-1xf32) <- (16x32x-1x-1x6xf32, 1xi64) + mean_1 = paddle._C_ops.mean(stack_0, full_int_array_1, False) + + # pd_op.gelu: (16x32x-1x-1xf32) <- (16x32x-1x-1xf32) + gelu_0 = paddle._C_ops.gelu(mean_1, False) + + # pd_op.conv2d: (16x32x-1x-1xf32) <- (16x32x-1x-1xf32, 32x32x1x1xf32) + conv2d_6 = paddle._C_ops.conv2d( + gelu_0, parameter_11, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_11 + + # pd_op.reshape: (1x32x1x1xf32) <- (32xf32, 4xi64) + reshape_6 = paddle._C_ops.reshape(parameter_10, full_int_array_0) + del parameter_10 + + # pd_op.add: (16x32x-1x-1xf32) <- (16x32x-1x-1xf32, 1x32x1x1xf32) + add_6 = paddle._C_ops.add(conv2d_6, reshape_6) + + # pd_op.conv2d: (16x32x-1x-1xf32) <- (16x32x-1x-1xf32, 32x32x3x3xf32) + conv2d_7 = paddle._C_ops.conv2d( + gelu_0, parameter_9, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_9 + + # pd_op.reshape: (1x32x1x1xf32) <- (32xf32, 4xi64) + reshape_7 = paddle._C_ops.reshape(parameter_8, full_int_array_0) + del parameter_8 + + # pd_op.add: (16x32x-1x-1xf32) <- (16x32x-1x-1xf32, 1x32x1x1xf32) + add_7 = paddle._C_ops.add(conv2d_7, reshape_7) + + # pd_op.conv2d: (16x32x-1x-1xf32) <- (16x32x-1x-1xf32, 32x32x5x5xf32) + conv2d_8 = paddle._C_ops.conv2d( + gelu_0, parameter_7, [1, 1], [2, 2], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_7 + + # pd_op.reshape: (1x32x1x1xf32) <- (32xf32, 4xi64) + reshape_8 = paddle._C_ops.reshape(parameter_6, full_int_array_0) + del parameter_6 + + # pd_op.add: (16x32x-1x-1xf32) <- (16x32x-1x-1xf32, 1x32x1x1xf32) + add_8 = paddle._C_ops.add(conv2d_8, reshape_8) + + # pd_op.conv2d: (16x32x-1x-1xf32) <- (16x32x-1x-1xf32, 32x32x7x7xf32) + conv2d_9 = paddle._C_ops.conv2d( + gelu_0, parameter_5, [1, 1], [3, 3], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_5 + + # pd_op.reshape: (1x32x1x1xf32) <- (32xf32, 4xi64) + reshape_9 = paddle._C_ops.reshape(parameter_4, full_int_array_0) + del parameter_4 + + # pd_op.add: (16x32x-1x-1xf32) <- (16x32x-1x-1xf32, 1x32x1x1xf32) + add_9 = paddle._C_ops.add(conv2d_9, reshape_9) + + # pd_op.conv2d: (16x32x-1x-1xf32) <- (16x32x-1x-1xf32, 32x32x9x9xf32) + conv2d_10 = paddle._C_ops.conv2d( + gelu_0, parameter_3, [1, 1], [4, 4], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_3 + + # pd_op.reshape: (1x32x1x1xf32) <- (32xf32, 4xi64) + reshape_10 = paddle._C_ops.reshape(parameter_2, full_int_array_0) + del parameter_2 + + # pd_op.add: (16x32x-1x-1xf32) <- (16x32x-1x-1xf32, 1x32x1x1xf32) + add_10 = paddle._C_ops.add(conv2d_10, reshape_10) + + # pd_op.conv2d: (16x32x-1x-1xf32) <- (16x32x-1x-1xf32, 32x32x11x11xf32) + conv2d_11 = paddle._C_ops.conv2d( + gelu_0, parameter_1, [1, 1], [5, 5], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_1 + + # pd_op.reshape: (1x32x1x1xf32) <- (32xf32, 4xi64) + reshape_11 = paddle._C_ops.reshape(parameter_0, full_int_array_0) + del full_int_array_0, parameter_0 + + # pd_op.add: (16x32x-1x-1xf32) <- (16x32x-1x-1xf32, 1x32x1x1xf32) + add_11 = paddle._C_ops.add(conv2d_11, reshape_11) + + # builtin.combine: ([16x32x-1x-1xf32, 16x32x-1x-1xf32, 16x32x-1x-1xf32, 16x32x-1x-1xf32, 16x32x-1x-1xf32, 16x32x-1x-1xf32]) <- (16x32x-1x-1xf32, 16x32x-1x-1xf32, 16x32x-1x-1xf32, 16x32x-1x-1xf32, 16x32x-1x-1xf32, 16x32x-1x-1xf32) + combine_1 = [add_6, add_7, add_8, add_9, add_10, add_11] + + # pd_op.stack: (16x32x-1x-1x6xf32) <- ([16x32x-1x-1xf32, 16x32x-1x-1xf32, 16x32x-1x-1xf32, 16x32x-1x-1xf32, 16x32x-1x-1xf32, 16x32x-1x-1xf32]) + stack_1 = paddle._C_ops.stack(combine_1, -1) + del combine_1 + + # pd_op.mean: (16x32x-1x-1xf32) <- (16x32x-1x-1x6xf32, 1xi64) + mean_0 = paddle._C_ops.mean(stack_1, full_int_array_1, False) + del ( + add_0, + add_1, + add_10, + add_11, + add_2, + add_3, + add_4, + add_5, + add_6, + add_7, + add_8, + add_9, + assign_0, + conv2d_0, + conv2d_1, + conv2d_10, + conv2d_11, + conv2d_2, + conv2d_3, + conv2d_4, + conv2d_5, + conv2d_6, + conv2d_7, + conv2d_8, + conv2d_9, + full_int_array_1, + gelu_0, + mean_1, + reshape_0, + reshape_1, + reshape_10, + reshape_11, + reshape_2, + reshape_3, + reshape_4, + reshape_5, + reshape_6, + reshape_7, + reshape_8, + reshape_9, + stack_0, + stack_1, + ) + + return mean_0 diff --git a/paddle_samples/PaddleX/TimesNet/subgraph_4/weight_meta.py b/paddle_samples/PaddleX/TimesNet/subgraph_4/weight_meta.py new file mode 100644 index 000000000..b913ee039 --- /dev/null +++ b/paddle_samples/PaddleX/TimesNet/subgraph_4/weight_meta.py @@ -0,0 +1,238 @@ +class Program_weight_tensor_parameter_0: + name = "parameter_0" + shape = [32] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_1: + name = "parameter_1" + shape = [32, 32, 11, 11] + dtype = "float32" + min_val = float("-0.10275") + max_val = float("0.09344") + mean = float("0.000134032") + std = float("0.0228583") + data = None + + +class Program_weight_tensor_parameter_2: + name = "parameter_2" + shape = [32] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_3: + name = "parameter_3" + shape = [32, 32, 9, 9] + dtype = "float32" + min_val = float("-0.112361") + max_val = float("0.118653") + mean = float("5.61565e-05") + std = float("0.0279607") + data = None + + +class Program_weight_tensor_parameter_4: + name = "parameter_4" + shape = [32] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_5: + name = "parameter_5" + shape = [32, 32, 7, 7] + dtype = "float32" + min_val = float("-0.145373") + max_val = float("0.166861") + mean = float("-9.52656e-05") + std = float("0.0356916") + data = None + + +class Program_weight_tensor_parameter_6: + name = "parameter_6" + shape = [32] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_7: + name = "parameter_7" + shape = [32, 32, 5, 5] + dtype = "float32" + min_val = float("-0.191757") + max_val = float("0.193765") + mean = float("0.000295524") + std = float("0.0497232") + data = None + + +class Program_weight_tensor_parameter_8: + name = "parameter_8" + shape = [32] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_9: + name = "parameter_9" + shape = [32, 32, 3, 3] + dtype = "float32" + min_val = float("-0.317988") + max_val = float("0.303791") + mean = float("0.00111962") + std = float("0.0842165") + data = None + + +class Program_weight_tensor_parameter_10: + name = "parameter_10" + shape = [32] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_11: + name = "parameter_11" + shape = [32, 32, 1, 1] + dtype = "float32" + min_val = float("-0.782884") + max_val = float("0.770116") + mean = float("-0.00843537") + std = float("0.251209") + data = None + + +class Program_weight_tensor_parameter_12: + name = "parameter_12" + shape = [32] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_13: + name = "parameter_13" + shape = [32, 32, 11, 11] + dtype = "float32" + min_val = float("-0.103917") + max_val = float("0.105406") + mean = float("3.74354e-05") + std = float("0.0227928") + data = None + + +class Program_weight_tensor_parameter_14: + name = "parameter_14" + shape = [32] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_15: + name = "parameter_15" + shape = [32, 32, 9, 9] + dtype = "float32" + min_val = float("-0.107645") + max_val = float("0.121197") + mean = float("3.6604e-05") + std = float("0.0278616") + data = None + + +class Program_weight_tensor_parameter_16: + name = "parameter_16" + shape = [32] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_17: + name = "parameter_17" + shape = [32, 32, 7, 7] + dtype = "float32" + min_val = float("-0.147247") + max_val = float("0.160106") + mean = float("0.000136359") + std = float("0.0358139") + data = None + + +class Program_weight_tensor_parameter_18: + name = "parameter_18" + shape = [32] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_19: + name = "parameter_19" + shape = [32, 32, 5, 5] + dtype = "float32" + min_val = float("-0.187723") + max_val = float("0.201406") + mean = float("0.000276687") + std = float("0.0500147") + data = None + + +class Program_weight_tensor_parameter_20: + name = "parameter_20" + shape = [32] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_21: + name = "parameter_21" + shape = [32, 32, 3, 3] + dtype = "float32" + min_val = float("-0.327989") + max_val = float("0.336704") + mean = float("-0.00147316") + std = float("0.0833125") + data = None + + +class Program_weight_tensor_parameter_22: + name = "parameter_22" + shape = [32] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_23: + name = "parameter_23" + shape = [32, 32, 1, 1] + dtype = "float32" + min_val = float("-0.867694") + max_val = float("0.717671") + mean = float("-0.00509395") + std = float("0.248265") + data = None diff --git a/paddle_samples/PaddleX/TimesNet/subgraph_5/graph_hash.txt b/paddle_samples/PaddleX/TimesNet/subgraph_5/graph_hash.txt new file mode 100644 index 000000000..3ea29a59a --- /dev/null +++ b/paddle_samples/PaddleX/TimesNet/subgraph_5/graph_hash.txt @@ -0,0 +1 @@ +7096065249a442fe087b073010031bbe4a1c8d6e2707d0000f4d8be72faab72e \ No newline at end of file diff --git a/paddle_samples/PaddleX/TimesNet/subgraph_5/graph_net.json b/paddle_samples/PaddleX/TimesNet/subgraph_5/graph_net.json new file mode 100644 index 000000000..c5c3dc20a --- /dev/null +++ b/paddle_samples/PaddleX/TimesNet/subgraph_5/graph_net.json @@ -0,0 +1,6 @@ +{ + "framework": "paddle", + "model_name": "TimesNet", + "num_devices_required": 1, + "num_nodes_required": 1 +} \ No newline at end of file diff --git a/paddle_samples/PaddleX/TimesNet/subgraph_5/input_meta.py b/paddle_samples/PaddleX/TimesNet/subgraph_5/input_meta.py new file mode 100644 index 000000000..6c3adf1dc --- /dev/null +++ b/paddle_samples/PaddleX/TimesNet/subgraph_5/input_meta.py @@ -0,0 +1,12 @@ +class Program_weight_tensor_data_0: + name = "data_0" + shape = [5] + dtype = "int32" + data = [4, 3, 4, 3, 4] + + +class Program_weight_tensor_data_1: + name = "data_1" + shape = [] + dtype = "int64" + data = [192] diff --git a/paddle_samples/PaddleX/TimesNet/subgraph_5/model.py b/paddle_samples/PaddleX/TimesNet/subgraph_5/model.py new file mode 100644 index 000000000..d5dc8fea0 --- /dev/null +++ b/paddle_samples/PaddleX/TimesNet/subgraph_5/model.py @@ -0,0 +1,37 @@ +import paddle + + +class GraphModule(paddle.nn.Layer): + def __init__(self): + super().__init__() + + def forward(self, data_0, data_1): + # pd_op.full_int_array: (1xi64) <- () + full_int_array_0 = [0] + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_1 = [1] + + # pd_op.slice: (xi32) <- (5xi32, 1xi64, 1xi64) + slice_0 = paddle._C_ops.slice( + data_0, [0], full_int_array_0, full_int_array_1, [1], [0] + ) + del data_0, full_int_array_0, full_int_array_1 + + # pd_op.cast: (xi64) <- (xi32) + cast_0 = paddle._C_ops.cast(slice_0, paddle.int64) + + # pd_op.remainder: (xi64) <- (xi64, xi64) + remainder_0 = paddle._C_ops.remainder(data_1, cast_0) + del cast_0, data_1 + + # pd_op.full: (xi64) <- () + full_0 = paddle._C_ops.full( + [], float("0"), paddle.int64, paddle.framework._current_expected_place() + ) + + # pd_op.not_equal: (xb) <- (xi64, xi64) + not_equal_0 = paddle._C_ops.not_equal(remainder_0, full_0) + del full_0, remainder_0, slice_0 + + return not_equal_0 diff --git a/paddle_samples/PaddleX/TimesNet/subgraph_5/weight_meta.py b/paddle_samples/PaddleX/TimesNet/subgraph_5/weight_meta.py new file mode 100644 index 000000000..8b1378917 --- /dev/null +++ b/paddle_samples/PaddleX/TimesNet/subgraph_5/weight_meta.py @@ -0,0 +1 @@ + diff --git a/paddle_samples/PaddleX/TimesNet/subgraph_6/graph_hash.txt b/paddle_samples/PaddleX/TimesNet/subgraph_6/graph_hash.txt new file mode 100644 index 000000000..9c533910c --- /dev/null +++ b/paddle_samples/PaddleX/TimesNet/subgraph_6/graph_hash.txt @@ -0,0 +1 @@ +9c2bbb3e4b6b3e46cd5a3be27873b919ff3a4707a8afa5460cb8d23f3c81fe4f \ No newline at end of file diff --git a/paddle_samples/PaddleX/TimesNet/subgraph_6/graph_net.json b/paddle_samples/PaddleX/TimesNet/subgraph_6/graph_net.json new file mode 100644 index 000000000..c5c3dc20a --- /dev/null +++ b/paddle_samples/PaddleX/TimesNet/subgraph_6/graph_net.json @@ -0,0 +1,6 @@ +{ + "framework": "paddle", + "model_name": "TimesNet", + "num_devices_required": 1, + "num_nodes_required": 1 +} \ No newline at end of file diff --git a/paddle_samples/PaddleX/TimesNet/subgraph_6/input_meta.py b/paddle_samples/PaddleX/TimesNet/subgraph_6/input_meta.py new file mode 100644 index 000000000..4b468e290 --- /dev/null +++ b/paddle_samples/PaddleX/TimesNet/subgraph_6/input_meta.py @@ -0,0 +1,37 @@ +class Program_weight_tensor_data_0: + name = "data_0" + shape = [] + dtype = "int64" + data = [16] + + +class Program_weight_tensor_data_1: + name = "data_1" + shape = [] + dtype = "int64" + data = [16] + + +class Program_weight_tensor_data_2: + name = "data_2" + shape = [] + dtype = "int32" + data = [5] + + +class Program_weight_tensor_data_3: + name = "data_3" + shape = [16, 192, 32] + dtype = "float32" + min_val = float("-3.69325") + max_val = float("4.23172") + mean = float("0.000807828") + std = float("0.99959") + data = None + + +class Program_weight_tensor_data_4: + name = "data_4" + shape = [] + dtype = "int64" + data = [192] diff --git a/paddle_samples/PaddleX/TimesNet/subgraph_6/model.py b/paddle_samples/PaddleX/TimesNet/subgraph_6/model.py new file mode 100644 index 000000000..c99385c85 --- /dev/null +++ b/paddle_samples/PaddleX/TimesNet/subgraph_6/model.py @@ -0,0 +1,368 @@ +import paddle + + +class GraphModule(paddle.nn.Layer): + def __init__(self): + super().__init__() + + def forward( + self, + parameter_0, + parameter_1, + parameter_2, + parameter_3, + parameter_4, + parameter_5, + parameter_6, + parameter_7, + parameter_8, + parameter_9, + parameter_10, + parameter_11, + parameter_12, + parameter_13, + parameter_14, + parameter_15, + parameter_16, + parameter_17, + parameter_18, + parameter_19, + parameter_20, + parameter_21, + parameter_22, + parameter_23, + data_0, + data_1, + data_2, + data_3, + data_4, + ): + # pd_op.cast: (xi64) <- (xi32) + cast_0 = paddle._C_ops.cast(data_2, paddle.int64) + del data_2 + + # pd_op.floor_divide: (xi64) <- (xi64, xi64) + floor_divide_0 = paddle._C_ops.floor_divide(data_4, cast_0) + + # pd_op.full: (1xf32) <- () + full_0 = paddle._C_ops.full( + [1], float("1"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.scale: (xi64) <- (xi64, 1xf32) + scale_0 = paddle._C_ops.scale(floor_divide_0, full_0, float("1"), True) + del floor_divide_0, full_0 + + # pd_op.multiply: (xi64) <- (xi64, xi64) + multiply_0 = paddle._C_ops.multiply(scale_0, cast_0) + del scale_0 + + # pd_op.subtract: (xi64) <- (xi64, xi64) + subtract_0 = paddle._C_ops.subtract(multiply_0, data_4) + del data_4 + + # pd_op.full: (xi64) <- () + full_1 = paddle._C_ops.full( + [], float("32"), paddle.int64, paddle.core.CPUPlace() + ) + + # builtin.combine: ([xi64, xi64, xi64]) <- (xi64, xi64, xi64) + combine_0 = [data_1, subtract_0, full_1] + del data_1, subtract_0 + + # pd_op.stack: (3xi64) <- ([xi64, xi64, xi64]) + stack_0 = paddle._C_ops.stack(combine_0, 0) + del combine_0 + + # pd_op.full: (1xf32) <- () + full_2 = paddle._C_ops.full( + [1], float("0"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.full_with_tensor: (-1x-1x32xf32) <- (1xf32, 3xi64) + full_with_tensor_0 = paddle._C_ops.full_with_tensor( + full_2, stack_0, paddle.float32 + ) + del full_2, stack_0 + + # pd_op.cast: (-1x192x32xf32) <- (-1x192x32xf32) + cast_1 = paddle._C_ops.cast(data_3, paddle.float32) + del data_3 + + # pd_op.cast: (-1x-1x32xf32) <- (-1x-1x32xf32) + cast_2 = paddle._C_ops.cast(full_with_tensor_0, paddle.float32) + + # pd_op.full: (1xi32) <- () + full_3 = paddle._C_ops.full( + [1], float("1"), paddle.int32, paddle.core.CPUPlace() + ) + + # builtin.combine: ([-1x192x32xf32, -1x-1x32xf32]) <- (-1x192x32xf32, -1x-1x32xf32) + combine_1 = [cast_1, cast_2] + del cast_1, cast_2 + + # pd_op.concat: (-1x-1x32xf32) <- ([-1x192x32xf32, -1x-1x32xf32], 1xi32) + concat_0 = paddle._C_ops.concat(combine_1, full_3) + del combine_1, full_3 + + # pd_op.floor_divide: (xi64) <- (xi64, xi64) + floor_divide_1 = paddle._C_ops.floor_divide(multiply_0, cast_0) + + # builtin.combine: ([xi64, xi64, xi64, xi64]) <- (xi64, xi64, xi64, xi64) + combine_2 = [data_0, floor_divide_1, cast_0, full_1] + del cast_0, floor_divide_1 + + # pd_op.stack: (4xi64) <- ([xi64, xi64, xi64, xi64]) + stack_1 = paddle._C_ops.stack(combine_2, 0) + del combine_2 + + # pd_op.reshape: (-1x-1x-1x32xf32) <- (-1x-1x32xf32, 4xi64) + reshape_0 = paddle._C_ops.reshape(concat_0, stack_1) + del concat_0, stack_1 + + # pd_op.transpose: (-1x32x-1x-1xf32) <- (-1x-1x-1x32xf32) + transpose_0 = paddle._C_ops.transpose(reshape_0, [0, 3, 1, 2]) + del reshape_0 + + # pd_op.conv2d: (-1x32x-1x-1xf32) <- (-1x32x-1x-1xf32, 32x32x1x1xf32) + conv2d_0 = paddle._C_ops.conv2d( + transpose_0, parameter_23, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_23 + + # pd_op.full_int_array: (4xi64) <- () + full_int_array_0 = [1, -1, 1, 1] + + # pd_op.reshape: (1x32x1x1xf32) <- (32xf32, 4xi64) + reshape_1 = paddle._C_ops.reshape(parameter_22, full_int_array_0) + del parameter_22 + + # pd_op.add: (-1x32x-1x-1xf32) <- (-1x32x-1x-1xf32, 1x32x1x1xf32) + add_0 = paddle._C_ops.add(conv2d_0, reshape_1) + del conv2d_0, reshape_1 + + # pd_op.conv2d: (-1x32x-1x-1xf32) <- (-1x32x-1x-1xf32, 32x32x3x3xf32) + conv2d_1 = paddle._C_ops.conv2d( + transpose_0, parameter_21, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_21 + + # pd_op.reshape: (1x32x1x1xf32) <- (32xf32, 4xi64) + reshape_2 = paddle._C_ops.reshape(parameter_20, full_int_array_0) + del parameter_20 + + # pd_op.add: (-1x32x-1x-1xf32) <- (-1x32x-1x-1xf32, 1x32x1x1xf32) + add_1 = paddle._C_ops.add(conv2d_1, reshape_2) + del conv2d_1, reshape_2 + + # pd_op.conv2d: (-1x32x-1x-1xf32) <- (-1x32x-1x-1xf32, 32x32x5x5xf32) + conv2d_2 = paddle._C_ops.conv2d( + transpose_0, parameter_19, [1, 1], [2, 2], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_19 + + # pd_op.reshape: (1x32x1x1xf32) <- (32xf32, 4xi64) + reshape_3 = paddle._C_ops.reshape(parameter_18, full_int_array_0) + del parameter_18 + + # pd_op.add: (-1x32x-1x-1xf32) <- (-1x32x-1x-1xf32, 1x32x1x1xf32) + add_2 = paddle._C_ops.add(conv2d_2, reshape_3) + del conv2d_2, reshape_3 + + # pd_op.conv2d: (-1x32x-1x-1xf32) <- (-1x32x-1x-1xf32, 32x32x7x7xf32) + conv2d_3 = paddle._C_ops.conv2d( + transpose_0, parameter_17, [1, 1], [3, 3], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_17 + + # pd_op.reshape: (1x32x1x1xf32) <- (32xf32, 4xi64) + reshape_4 = paddle._C_ops.reshape(parameter_16, full_int_array_0) + del parameter_16 + + # pd_op.add: (-1x32x-1x-1xf32) <- (-1x32x-1x-1xf32, 1x32x1x1xf32) + add_3 = paddle._C_ops.add(conv2d_3, reshape_4) + del conv2d_3, reshape_4 + + # pd_op.conv2d: (-1x32x-1x-1xf32) <- (-1x32x-1x-1xf32, 32x32x9x9xf32) + conv2d_4 = paddle._C_ops.conv2d( + transpose_0, parameter_15, [1, 1], [4, 4], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_15 + + # pd_op.reshape: (1x32x1x1xf32) <- (32xf32, 4xi64) + reshape_5 = paddle._C_ops.reshape(parameter_14, full_int_array_0) + del parameter_14 + + # pd_op.add: (-1x32x-1x-1xf32) <- (-1x32x-1x-1xf32, 1x32x1x1xf32) + add_4 = paddle._C_ops.add(conv2d_4, reshape_5) + del conv2d_4, reshape_5 + + # pd_op.conv2d: (-1x32x-1x-1xf32) <- (-1x32x-1x-1xf32, 32x32x11x11xf32) + conv2d_5 = paddle._C_ops.conv2d( + transpose_0, parameter_13, [1, 1], [5, 5], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_13, transpose_0 + + # pd_op.reshape: (1x32x1x1xf32) <- (32xf32, 4xi64) + reshape_6 = paddle._C_ops.reshape(parameter_12, full_int_array_0) + del parameter_12 + + # pd_op.add: (-1x32x-1x-1xf32) <- (-1x32x-1x-1xf32, 1x32x1x1xf32) + add_5 = paddle._C_ops.add(conv2d_5, reshape_6) + del conv2d_5, reshape_6 + + # builtin.combine: ([-1x32x-1x-1xf32, -1x32x-1x-1xf32, -1x32x-1x-1xf32, -1x32x-1x-1xf32, -1x32x-1x-1xf32, -1x32x-1x-1xf32]) <- (-1x32x-1x-1xf32, -1x32x-1x-1xf32, -1x32x-1x-1xf32, -1x32x-1x-1xf32, -1x32x-1x-1xf32, -1x32x-1x-1xf32) + combine_3 = [add_0, add_1, add_2, add_3, add_4, add_5] + del add_0, add_1, add_2, add_3, add_4, add_5 + + # pd_op.stack: (-1x32x-1x-1x6xf32) <- ([-1x32x-1x-1xf32, -1x32x-1x-1xf32, -1x32x-1x-1xf32, -1x32x-1x-1xf32, -1x32x-1x-1xf32, -1x32x-1x-1xf32]) + stack_2 = paddle._C_ops.stack(combine_3, -1) + del combine_3 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_1 = [-1] + + # pd_op.mean: (-1x32x-1x-1xf32) <- (-1x32x-1x-1x6xf32, 1xi64) + mean_0 = paddle._C_ops.mean(stack_2, full_int_array_1, False) + del stack_2 + + # pd_op.gelu: (-1x32x-1x-1xf32) <- (-1x32x-1x-1xf32) + gelu_0 = paddle._C_ops.gelu(mean_0, False) + del mean_0 + + # pd_op.conv2d: (-1x32x-1x-1xf32) <- (-1x32x-1x-1xf32, 32x32x1x1xf32) + conv2d_6 = paddle._C_ops.conv2d( + gelu_0, parameter_11, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_11 + + # pd_op.reshape: (1x32x1x1xf32) <- (32xf32, 4xi64) + reshape_7 = paddle._C_ops.reshape(parameter_10, full_int_array_0) + del parameter_10 + + # pd_op.add: (-1x32x-1x-1xf32) <- (-1x32x-1x-1xf32, 1x32x1x1xf32) + add_6 = paddle._C_ops.add(conv2d_6, reshape_7) + del conv2d_6, reshape_7 + + # pd_op.conv2d: (-1x32x-1x-1xf32) <- (-1x32x-1x-1xf32, 32x32x3x3xf32) + conv2d_7 = paddle._C_ops.conv2d( + gelu_0, parameter_9, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_9 + + # pd_op.reshape: (1x32x1x1xf32) <- (32xf32, 4xi64) + reshape_8 = paddle._C_ops.reshape(parameter_8, full_int_array_0) + del parameter_8 + + # pd_op.add: (-1x32x-1x-1xf32) <- (-1x32x-1x-1xf32, 1x32x1x1xf32) + add_7 = paddle._C_ops.add(conv2d_7, reshape_8) + del conv2d_7, reshape_8 + + # pd_op.conv2d: (-1x32x-1x-1xf32) <- (-1x32x-1x-1xf32, 32x32x5x5xf32) + conv2d_8 = paddle._C_ops.conv2d( + gelu_0, parameter_7, [1, 1], [2, 2], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_7 + + # pd_op.reshape: (1x32x1x1xf32) <- (32xf32, 4xi64) + reshape_9 = paddle._C_ops.reshape(parameter_6, full_int_array_0) + del parameter_6 + + # pd_op.add: (-1x32x-1x-1xf32) <- (-1x32x-1x-1xf32, 1x32x1x1xf32) + add_8 = paddle._C_ops.add(conv2d_8, reshape_9) + del conv2d_8, reshape_9 + + # pd_op.conv2d: (-1x32x-1x-1xf32) <- (-1x32x-1x-1xf32, 32x32x7x7xf32) + conv2d_9 = paddle._C_ops.conv2d( + gelu_0, parameter_5, [1, 1], [3, 3], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_5 + + # pd_op.reshape: (1x32x1x1xf32) <- (32xf32, 4xi64) + reshape_10 = paddle._C_ops.reshape(parameter_4, full_int_array_0) + del parameter_4 + + # pd_op.add: (-1x32x-1x-1xf32) <- (-1x32x-1x-1xf32, 1x32x1x1xf32) + add_9 = paddle._C_ops.add(conv2d_9, reshape_10) + del conv2d_9, reshape_10 + + # pd_op.conv2d: (-1x32x-1x-1xf32) <- (-1x32x-1x-1xf32, 32x32x9x9xf32) + conv2d_10 = paddle._C_ops.conv2d( + gelu_0, parameter_3, [1, 1], [4, 4], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_3 + + # pd_op.reshape: (1x32x1x1xf32) <- (32xf32, 4xi64) + reshape_11 = paddle._C_ops.reshape(parameter_2, full_int_array_0) + del parameter_2 + + # pd_op.add: (-1x32x-1x-1xf32) <- (-1x32x-1x-1xf32, 1x32x1x1xf32) + add_10 = paddle._C_ops.add(conv2d_10, reshape_11) + del conv2d_10, reshape_11 + + # pd_op.conv2d: (-1x32x-1x-1xf32) <- (-1x32x-1x-1xf32, 32x32x11x11xf32) + conv2d_11 = paddle._C_ops.conv2d( + gelu_0, parameter_1, [1, 1], [5, 5], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del gelu_0, parameter_1 + + # pd_op.reshape: (1x32x1x1xf32) <- (32xf32, 4xi64) + reshape_12 = paddle._C_ops.reshape(parameter_0, full_int_array_0) + del full_int_array_0, parameter_0 + + # pd_op.add: (-1x32x-1x-1xf32) <- (-1x32x-1x-1xf32, 1x32x1x1xf32) + add_11 = paddle._C_ops.add(conv2d_11, reshape_12) + del conv2d_11, reshape_12 + + # builtin.combine: ([-1x32x-1x-1xf32, -1x32x-1x-1xf32, -1x32x-1x-1xf32, -1x32x-1x-1xf32, -1x32x-1x-1xf32, -1x32x-1x-1xf32]) <- (-1x32x-1x-1xf32, -1x32x-1x-1xf32, -1x32x-1x-1xf32, -1x32x-1x-1xf32, -1x32x-1x-1xf32, -1x32x-1x-1xf32) + combine_4 = [add_6, add_7, add_8, add_9, add_10, add_11] + del add_10, add_11, add_6, add_7, add_8, add_9 + + # pd_op.stack: (-1x32x-1x-1x6xf32) <- ([-1x32x-1x-1xf32, -1x32x-1x-1xf32, -1x32x-1x-1xf32, -1x32x-1x-1xf32, -1x32x-1x-1xf32, -1x32x-1x-1xf32]) + stack_3 = paddle._C_ops.stack(combine_4, -1) + del combine_4 + + # pd_op.mean: (-1x32x-1x-1xf32) <- (-1x32x-1x-1x6xf32, 1xi64) + mean_1 = paddle._C_ops.mean(stack_3, full_int_array_1, False) + del full_int_array_1, stack_3 + + # pd_op.transpose: (-1x-1x-1x32xf32) <- (-1x32x-1x-1xf32) + transpose_1 = paddle._C_ops.transpose(mean_1, [0, 2, 3, 1]) + del mean_1 + + # pd_op.full: (xi64) <- () + full_4 = paddle._C_ops.full( + [], float("-1"), paddle.int64, paddle.core.CPUPlace() + ) + + # builtin.combine: ([xi64, xi64, xi64]) <- (xi64, xi64, xi64) + combine_5 = [data_0, full_4, full_1] + del data_0, full_1, full_4 + + # pd_op.stack: (3xi64) <- ([xi64, xi64, xi64]) + stack_4 = paddle._C_ops.stack(combine_5, 0) + del combine_5 + + # pd_op.reshape: (-1x-1x32xf32) <- (-1x-1x-1x32xf32, 3xi64) + reshape_13 = paddle._C_ops.reshape(transpose_1, stack_4) + del stack_4, transpose_1 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_2 = [0] + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_3 = [192] + + # pd_op.slice: (-1x-1x32xf32) <- (-1x-1x32xf32, 1xi64, 1xi64) + slice_0 = paddle._C_ops.slice( + reshape_13, [1], full_int_array_2, full_int_array_3, [1], [] + ) + del ( + full_int_array_2, + full_int_array_3, + full_with_tensor_0, + multiply_0, + reshape_13, + ) + + return slice_0 diff --git a/paddle_samples/PaddleX/TimesNet/subgraph_6/weight_meta.py b/paddle_samples/PaddleX/TimesNet/subgraph_6/weight_meta.py new file mode 100644 index 000000000..e2fb71dc7 --- /dev/null +++ b/paddle_samples/PaddleX/TimesNet/subgraph_6/weight_meta.py @@ -0,0 +1,238 @@ +class Program_weight_tensor_parameter_0: + name = "parameter_0" + shape = [32] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_1: + name = "parameter_1" + shape = [32, 32, 11, 11] + dtype = "float32" + min_val = float("-0.106031") + max_val = float("0.116393") + mean = float("0.000256864") + std = float("0.0233445") + data = None + + +class Program_weight_tensor_parameter_2: + name = "parameter_2" + shape = [32] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_3: + name = "parameter_3" + shape = [32, 32, 9, 9] + dtype = "float32" + min_val = float("-0.131822") + max_val = float("0.125447") + mean = float("0.000120529") + std = float("0.0284273") + data = None + + +class Program_weight_tensor_parameter_4: + name = "parameter_4" + shape = [32] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_5: + name = "parameter_5" + shape = [32, 32, 7, 7] + dtype = "float32" + min_val = float("-0.166161") + max_val = float("0.162014") + mean = float("0.000283896") + std = float("0.0362103") + data = None + + +class Program_weight_tensor_parameter_6: + name = "parameter_6" + shape = [32] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_7: + name = "parameter_7" + shape = [32, 32, 5, 5] + dtype = "float32" + min_val = float("-0.188534") + max_val = float("0.206915") + mean = float("0.000295986") + std = float("0.050662") + data = None + + +class Program_weight_tensor_parameter_8: + name = "parameter_8" + shape = [32] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_9: + name = "parameter_9" + shape = [32, 32, 3, 3] + dtype = "float32" + min_val = float("-0.362962") + max_val = float("0.358126") + mean = float("-7.70989e-05") + std = float("0.0837865") + data = None + + +class Program_weight_tensor_parameter_10: + name = "parameter_10" + shape = [32] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_11: + name = "parameter_11" + shape = [32, 32, 1, 1] + dtype = "float32" + min_val = float("-0.843709") + max_val = float("0.757844") + mean = float("0.00369445") + std = float("0.252115") + data = None + + +class Program_weight_tensor_parameter_12: + name = "parameter_12" + shape = [32] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_13: + name = "parameter_13" + shape = [32, 32, 11, 11] + dtype = "float32" + min_val = float("-0.113689") + max_val = float("0.110907") + mean = float("-1.82819e-05") + std = float("0.023429") + data = None + + +class Program_weight_tensor_parameter_14: + name = "parameter_14" + shape = [32] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_15: + name = "parameter_15" + shape = [32, 32, 9, 9] + dtype = "float32" + min_val = float("-0.124724") + max_val = float("0.118932") + mean = float("-9.94658e-05") + std = float("0.0285337") + data = None + + +class Program_weight_tensor_parameter_16: + name = "parameter_16" + shape = [32] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_17: + name = "parameter_17" + shape = [32, 32, 7, 7] + dtype = "float32" + min_val = float("-0.168642") + max_val = float("0.14708") + mean = float("-0.000117223") + std = float("0.0363065") + data = None + + +class Program_weight_tensor_parameter_18: + name = "parameter_18" + shape = [32] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_19: + name = "parameter_19" + shape = [32, 32, 5, 5] + dtype = "float32" + min_val = float("-0.19885") + max_val = float("0.201538") + mean = float("0.000227899") + std = float("0.0505781") + data = None + + +class Program_weight_tensor_parameter_20: + name = "parameter_20" + shape = [32] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_21: + name = "parameter_21" + shape = [32, 32, 3, 3] + dtype = "float32" + min_val = float("-0.343614") + max_val = float("0.288478") + mean = float("0.000101018") + std = float("0.0839068") + data = None + + +class Program_weight_tensor_parameter_22: + name = "parameter_22" + shape = [32] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_23: + name = "parameter_23" + shape = [32, 32, 1, 1] + dtype = "float32" + min_val = float("-0.790273") + max_val = float("0.733436") + mean = float("-0.00547106") + std = float("0.250736") + data = None diff --git a/paddle_samples/PaddleX/TimesNet/subgraph_7/graph_hash.txt b/paddle_samples/PaddleX/TimesNet/subgraph_7/graph_hash.txt new file mode 100644 index 000000000..1fa817232 --- /dev/null +++ b/paddle_samples/PaddleX/TimesNet/subgraph_7/graph_hash.txt @@ -0,0 +1 @@ +61c25a8642cca6853f0da98a5de760f183321f50fd1db253f0c679f86130d597 \ No newline at end of file diff --git a/paddle_samples/PaddleX/TimesNet/subgraph_7/graph_net.json b/paddle_samples/PaddleX/TimesNet/subgraph_7/graph_net.json new file mode 100644 index 000000000..c5c3dc20a --- /dev/null +++ b/paddle_samples/PaddleX/TimesNet/subgraph_7/graph_net.json @@ -0,0 +1,6 @@ +{ + "framework": "paddle", + "model_name": "TimesNet", + "num_devices_required": 1, + "num_nodes_required": 1 +} \ No newline at end of file diff --git a/paddle_samples/PaddleX/TimesNet/subgraph_7/input_meta.py b/paddle_samples/PaddleX/TimesNet/subgraph_7/input_meta.py new file mode 100644 index 000000000..6129a4a2e --- /dev/null +++ b/paddle_samples/PaddleX/TimesNet/subgraph_7/input_meta.py @@ -0,0 +1,57 @@ +class Program_weight_tensor_data_0: + name = "data_0" + shape = [16, 192, 32] + dtype = "float32" + min_val = float("-3.50984") + max_val = float("4.62978") + mean = float("0.0016889") + std = float("1.00383") + data = None + + +class Program_weight_tensor_data_1: + name = "data_1" + shape = [16, 1, 1] + dtype = "float32" + data = [ + 0.261247, + 0.145511, + 0.185056, + 0.117109, + 0.135865, + 0.277098, + 0.220224, + 0.474357, + 0.356629, + 0.256262, + 0.133888, + 0.132025, + 0.163574, + 0.168011, + 0.184111, + 0.11935, + ] + + +class Program_weight_tensor_data_2: + name = "data_2" + shape = [16, 1, 1] + dtype = "float32" + data = [ + -1.43369, + -1.31897, + -1.52516, + -1.58589, + -1.42842, + -0.924512, + -1.46827, + -1.03185, + -1.23558, + -0.841294, + -1.3259, + -1.62626, + -0.705076, + -1.3546, + -1.35865, + -1.34662, + ] diff --git a/paddle_samples/PaddleX/TimesNet/subgraph_7/model.py b/paddle_samples/PaddleX/TimesNet/subgraph_7/model.py new file mode 100644 index 000000000..27f045227 --- /dev/null +++ b/paddle_samples/PaddleX/TimesNet/subgraph_7/model.py @@ -0,0 +1,62 @@ +import paddle + + +class GraphModule(paddle.nn.Layer): + def __init__(self): + super().__init__() + + def forward(self, parameter_0, parameter_1, data_0, data_1, data_2): + # pd_op.matmul: (-1x192x1xf32) <- (-1x192x32xf32, 32x1xf32) + matmul_0 = paddle._C_ops.matmul(data_0, parameter_1, False, False) + del data_0, parameter_1 + + # pd_op.add: (-1x192x1xf32) <- (-1x192x1xf32, 1xf32) + add_1 = paddle._C_ops.add(matmul_0, parameter_0) + del matmul_0, parameter_0 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_0 = [0] + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_1 = [1] + + # pd_op.slice: (-1x1xf32) <- (-1x1x1xf32, 1xi64, 1xi64) + slice_0 = paddle._C_ops.slice( + data_1, [1], full_int_array_0, full_int_array_1, [1], [1] + ) + del data_1 + + # pd_op.unsqueeze: (-1x1x1xf32) <- (-1x1xf32, 1xi64) + unsqueeze_0 = paddle._C_ops.unsqueeze(slice_0, full_int_array_1) + del slice_0 + + # pd_op.full_int_array: (3xi64) <- () + full_int_array_2 = [1, 192, 1] + + # pd_op.tile: (-1x192x1xf32) <- (-1x1x1xf32, 3xi64) + tile_0 = paddle._C_ops.tile(unsqueeze_0, full_int_array_2) + del unsqueeze_0 + + # pd_op.multiply: (-1x192x1xf32) <- (-1x192x1xf32, -1x192x1xf32) + multiply_0 = paddle._C_ops.multiply(add_1, tile_0) + del add_1, tile_0 + + # pd_op.slice: (-1x1xf32) <- (-1x1x1xf32, 1xi64, 1xi64) + slice_1 = paddle._C_ops.slice( + data_2, [1], full_int_array_0, full_int_array_1, [1], [1] + ) + del data_2, full_int_array_0 + + # pd_op.unsqueeze: (-1x1x1xf32) <- (-1x1xf32, 1xi64) + unsqueeze_1 = paddle._C_ops.unsqueeze(slice_1, full_int_array_1) + del full_int_array_1, slice_1 + + # pd_op.tile: (-1x192x1xf32) <- (-1x1x1xf32, 3xi64) + tile_1 = paddle._C_ops.tile(unsqueeze_1, full_int_array_2) + del full_int_array_2, unsqueeze_1 + + # pd_op.add: (-1x192x1xf32) <- (-1x192x1xf32, -1x192x1xf32) + add_0 = paddle._C_ops.add(multiply_0, tile_1) + del multiply_0, tile_1 + + return add_0 diff --git a/paddle_samples/PaddleX/TimesNet/subgraph_7/weight_meta.py b/paddle_samples/PaddleX/TimesNet/subgraph_7/weight_meta.py new file mode 100644 index 000000000..2ddd55564 --- /dev/null +++ b/paddle_samples/PaddleX/TimesNet/subgraph_7/weight_meta.py @@ -0,0 +1,16 @@ +class Program_weight_tensor_parameter_0: + name = "parameter_0" + shape = [1] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_1: + name = "parameter_1" + shape = [32, 1] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None diff --git a/paddle_samples/PaddleX/TimesNet/subgraph_8/graph_hash.txt b/paddle_samples/PaddleX/TimesNet/subgraph_8/graph_hash.txt new file mode 100644 index 000000000..828b60c10 --- /dev/null +++ b/paddle_samples/PaddleX/TimesNet/subgraph_8/graph_hash.txt @@ -0,0 +1 @@ +443ea3bb7515bb372aab3c199d2d8cafa28adad053deff1304f18a2ab5126c29 \ No newline at end of file diff --git a/paddle_samples/PaddleX/TimesNet/subgraph_8/graph_net.json b/paddle_samples/PaddleX/TimesNet/subgraph_8/graph_net.json new file mode 100644 index 000000000..c5c3dc20a --- /dev/null +++ b/paddle_samples/PaddleX/TimesNet/subgraph_8/graph_net.json @@ -0,0 +1,6 @@ +{ + "framework": "paddle", + "model_name": "TimesNet", + "num_devices_required": 1, + "num_nodes_required": 1 +} \ No newline at end of file diff --git a/paddle_samples/PaddleX/TimesNet/subgraph_8/input_meta.py b/paddle_samples/PaddleX/TimesNet/subgraph_8/input_meta.py new file mode 100644 index 000000000..64ff9a07b --- /dev/null +++ b/paddle_samples/PaddleX/TimesNet/subgraph_8/input_meta.py @@ -0,0 +1,31 @@ +class Program_weight_tensor_data_0: + name = "data_0" + shape = [16, 96, 1] + dtype = "float32" + min_val = float("-1.42933") + max_val = float("3.1545") + mean = float("0.408696") + std = float("0.978119") + data = None + + +class Program_weight_tensor_data_1: + name = "data_1" + shape = [16, 96, 4] + dtype = "float32" + min_val = float("-0.5") + max_val = float("0.5") + mean = float("-0.0114936") + std = float("0.286348") + data = None + + +class Program_weight_tensor_data_2: + name = "data_2" + shape = [1, 5000, 32] + dtype = "float32" + min_val = float("-1.0") + max_val = float("1.0") + mean = float("0.119002") + std = float("0.697021") + data = None diff --git a/paddle_samples/PaddleX/TimesNet/subgraph_8/model.py b/paddle_samples/PaddleX/TimesNet/subgraph_8/model.py new file mode 100644 index 000000000..46d0f2d4f --- /dev/null +++ b/paddle_samples/PaddleX/TimesNet/subgraph_8/model.py @@ -0,0 +1,210 @@ +import paddle + + +class GraphModule(paddle.nn.Layer): + def __init__(self): + super().__init__() + + def forward( + self, parameter_0, parameter_1, parameter_2, parameter_3, data_0, data_1, data_2 + ): + # pd_op.full_int_array: (1xi64) <- () + full_int_array_0 = [1] + + # pd_op.mean: (16x1x1xf32) <- (16x96x1xf32, 1xi64) + mean_0 = paddle._C_ops.mean(data_0, full_int_array_0, True) + + # pd_op.share_data_: (16x1x1xf32) <- (16x1x1xf32) + share_data__0 = mean_0.detach() + del mean_0 + + # pd_op.subtract: (16x96x1xf32) <- (16x96x1xf32, 16x1x1xf32) + subtract_0 = paddle._C_ops.subtract(data_0, share_data__0) + del data_0 + + # pd_op.mean: (16x1x1xf32) <- (16x96x1xf32, 1xi64) + mean_1 = paddle._C_ops.mean(subtract_0, full_int_array_0, True) + + # pd_op.subtract: (16x96x1xf32) <- (16x96x1xf32, 16x1x1xf32) + subtract_1 = paddle._C_ops.subtract(subtract_0, mean_1) + del mean_1 + + # pd_op.pow: (16x96x1xf32) <- (16x96x1xf32) + pow_0 = paddle._C_ops.pow(subtract_1, float("2")) + del subtract_1 + + # pd_op.sum: (16x1x1xf32) <- (16x96x1xf32, 1xi64) + sum_0 = paddle._C_ops.sum(pow_0, full_int_array_0, paddle.float32, True) + del full_int_array_0, pow_0 + + # pd_op.numel: (xi64) <- (16x96x1xf32) + numel_0 = paddle._C_ops.numel(subtract_0) + + # pd_op.cast: (xi64) <- (xi64) + cast_0 = paddle._C_ops.cast(numel_0, paddle.int64) + del numel_0 + + # pd_op.numel: (xi64) <- (16x1x1xf32) + numel_1 = paddle._C_ops.numel(sum_0) + + # pd_op.cast: (xi64) <- (xi64) + cast_1 = paddle._C_ops.cast(numel_1, paddle.int64) + del numel_1 + + # pd_op.cast: (xf32) <- (xi64) + cast_2 = paddle._C_ops.cast(cast_0, paddle.float32) + del cast_0 + + # pd_op.cast: (xf32) <- (xi64) + cast_3 = paddle._C_ops.cast(cast_1, paddle.float32) + del cast_1 + + # pd_op.divide: (xf32) <- (xf32, xf32) + divide_0 = paddle._C_ops.divide(cast_2, cast_3) + del cast_2, cast_3 + + # pd_op.divide: (16x1x1xf32) <- (16x1x1xf32, xf32) + divide_1 = paddle._C_ops.divide(sum_0, divide_0) + del divide_0, sum_0 + + # pd_op.full: (1xf32) <- () + full_0 = paddle._C_ops.full( + [1], float("1"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.scale: (16x1x1xf32) <- (16x1x1xf32, 1xf32) + scale_0 = paddle._C_ops.scale(divide_1, full_0, float("1e-05"), True) + del divide_1, full_0 + + # pd_op.sqrt: (16x1x1xf32) <- (16x1x1xf32) + sqrt_0 = paddle._C_ops.sqrt(scale_0) + del scale_0 + + # pd_op.divide: (16x96x1xf32) <- (16x96x1xf32, 16x1x1xf32) + divide_2 = paddle._C_ops.divide(subtract_0, sqrt_0) + del subtract_0 + + # pd_op.transpose: (16x1x96xf32) <- (16x96x1xf32) + transpose_1 = paddle._C_ops.transpose(divide_2, [0, 2, 1]) + del divide_2 + + # pd_op.full_int_array: (2xi64) <- () + full_int_array_1 = [3, 4] + + # pd_op.unsqueeze: (16x1x96x1x1xf32) <- (16x1x96xf32, 2xi64) + unsqueeze_0 = paddle._C_ops.unsqueeze(transpose_1, full_int_array_1) + del transpose_1 + + # pd_op.full_int_array: (6xi64) <- () + full_int_array_2 = [0, 0, 0, 0, 1, 1] + + # pd_op.pad3d: (16x1x98x1x1xf32) <- (16x1x96x1x1xf32, 6xi64) + pad3d_0 = paddle._C_ops.pad3d( + unsqueeze_0, full_int_array_2, "circular", float("0"), "NCDHW" + ) + del full_int_array_2, unsqueeze_0 + + # pd_op.squeeze: (16x1x98xf32) <- (16x1x98x1x1xf32, 2xi64) + squeeze_0 = paddle._C_ops.squeeze(pad3d_0, full_int_array_1) + del full_int_array_1, pad3d_0 + + # pd_op.assign: (32x1x3xf32) <- (32x1x3xf32) + assign_0 = parameter_3 + del parameter_3 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_3 = [-2] + + # pd_op.assign: (1xi64) <- (1xi64) + assign_1 = full_int_array_3 + + # pd_op.unsqueeze: (32x1x1x3xf32) <- (32x1x3xf32, 1xi64) + unsqueeze_1 = paddle._C_ops.unsqueeze(assign_0, full_int_array_3) + + # pd_op.unsqueeze: (16x1x1x98xf32) <- (16x1x98xf32, 1xi64) + unsqueeze_2 = paddle._C_ops.unsqueeze(squeeze_0, full_int_array_3) + del squeeze_0 + + # pd_op.conv2d: (16x32x1x96xf32) <- (16x1x1x98xf32, 32x1x1x3xf32) + conv2d_0 = paddle._C_ops.conv2d( + unsqueeze_2, unsqueeze_1, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + + # pd_op.squeeze: (16x32x96xf32) <- (16x32x1x96xf32, 1xi64) + squeeze_1 = paddle._C_ops.squeeze(conv2d_0, full_int_array_3) + + # pd_op.transpose: (16x96x32xf32) <- (16x32x96xf32) + transpose_2 = paddle._C_ops.transpose(squeeze_1, [0, 2, 1]) + del squeeze_1 + + # pd_op.matmul: (16x96x32xf32) <- (16x96x4xf32, 4x32xf32) + matmul_0 = paddle._C_ops.matmul(data_1, parameter_2, False, False) + del data_1, parameter_2 + + # pd_op.add: (16x96x32xf32) <- (16x96x32xf32, 16x96x32xf32) + add_0 = paddle._C_ops.add(transpose_2, matmul_0) + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_4 = [0] + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_5 = [96] + + # pd_op.slice: (1x96x32xf32) <- (1x5000x32xf32, 1xi64, 1xi64) + slice_0 = paddle._C_ops.slice( + data_2, [1], full_int_array_4, full_int_array_5, [1], [] + ) + del data_2, full_int_array_4, full_int_array_5 + + # pd_op.add: (16x96x32xf32) <- (16x96x32xf32, 1x96x32xf32) + add_1 = paddle._C_ops.add(add_0, slice_0) + + # pd_op.full: (1xf32) <- () + full_1 = paddle._C_ops.full( + [1], float("0.1"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.dropout: (16x96x32xf32, 16x96x32xui8) <- (16x96x32xf32, None, 1xf32) + dropout_0, dropout_1 = (lambda x, f: f(x))( + paddle._C_ops.dropout( + add_1, None, full_1, False, "upscale_in_train", 0, False + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None), + ) + del add_1 + + # pd_op.transpose: (16x32x96xf32) <- (16x96x32xf32) + transpose_3 = paddle._C_ops.transpose(dropout_0, [0, 2, 1]) + del dropout_0 + + # pd_op.matmul: (16x32x192xf32) <- (16x32x96xf32, 96x192xf32) + matmul_1 = paddle._C_ops.matmul(transpose_3, parameter_1, False, False) + del parameter_1 + + # pd_op.add: (16x32x192xf32) <- (16x32x192xf32, 192xf32) + add_2 = paddle._C_ops.add(matmul_1, parameter_0) + del parameter_0 + + # pd_op.transpose: (16x192x32xf32) <- (16x32x192xf32) + transpose_0 = paddle._C_ops.transpose(add_2, [0, 2, 1]) + del ( + add_0, + add_2, + assign_0, + assign_1, + conv2d_0, + dropout_1, + full_1, + full_int_array_3, + matmul_0, + matmul_1, + share_data__0, + slice_0, + sqrt_0, + transpose_2, + transpose_3, + unsqueeze_1, + unsqueeze_2, + ) + + return transpose_0 diff --git a/paddle_samples/PaddleX/TimesNet/subgraph_8/weight_meta.py b/paddle_samples/PaddleX/TimesNet/subgraph_8/weight_meta.py new file mode 100644 index 000000000..9e48f3639 --- /dev/null +++ b/paddle_samples/PaddleX/TimesNet/subgraph_8/weight_meta.py @@ -0,0 +1,42 @@ +class Program_weight_tensor_parameter_0: + name = "parameter_0" + shape = [192] + dtype = "float32" + min_val = float("-0.101941") + max_val = float("0.100828") + mean = float("0.000979628") + std = float("0.0578965") + data = None + + +class Program_weight_tensor_parameter_1: + name = "parameter_1" + shape = [96, 192] + dtype = "float32" + min_val = float("-0.10206") + max_val = float("0.102041") + mean = float("0.000454452") + std = float("0.0589493") + data = None + + +class Program_weight_tensor_parameter_2: + name = "parameter_2" + shape = [4, 32] + dtype = "float32" + min_val = float("-0.489572") + max_val = float("0.497211") + mean = float("-0.0022002") + std = float("0.286519") + data = None + + +class Program_weight_tensor_parameter_3: + name = "parameter_3" + shape = [32, 1, 3] + dtype = "float32" + min_val = float("-1.38428") + max_val = float("2.04166") + mean = float("0.18919") + std = float("0.812535") + data = None diff --git a/paddle_samples/PaddleX/TimesNet/subgraph_9/graph_hash.txt b/paddle_samples/PaddleX/TimesNet/subgraph_9/graph_hash.txt new file mode 100644 index 000000000..5b83ce4fe --- /dev/null +++ b/paddle_samples/PaddleX/TimesNet/subgraph_9/graph_hash.txt @@ -0,0 +1 @@ +bd56bcf5d21fc7ae38447b3f591ffe9628bd1b4f2991fc12452cc57baaf25272 \ No newline at end of file diff --git a/paddle_samples/PaddleX/TimesNet/subgraph_9/graph_net.json b/paddle_samples/PaddleX/TimesNet/subgraph_9/graph_net.json new file mode 100644 index 000000000..c5c3dc20a --- /dev/null +++ b/paddle_samples/PaddleX/TimesNet/subgraph_9/graph_net.json @@ -0,0 +1,6 @@ +{ + "framework": "paddle", + "model_name": "TimesNet", + "num_devices_required": 1, + "num_nodes_required": 1 +} \ No newline at end of file diff --git a/paddle_samples/PaddleX/TimesNet/subgraph_9/input_meta.py b/paddle_samples/PaddleX/TimesNet/subgraph_9/input_meta.py new file mode 100644 index 000000000..fd3adbcd8 --- /dev/null +++ b/paddle_samples/PaddleX/TimesNet/subgraph_9/input_meta.py @@ -0,0 +1,31 @@ +class Program_weight_tensor_data_0: + name = "data_0" + shape = [3, 96, 1] + dtype = "float32" + min_val = float("-1.36201") + max_val = float("2.86798") + mean = float("0.42894") + std = float("1.34003") + data = None + + +class Program_weight_tensor_data_1: + name = "data_1" + shape = [3, 96, 4] + dtype = "float32" + min_val = float("-0.5") + max_val = float("0.5") + mean = float("-0.0849295") + std = float("0.311331") + data = None + + +class Program_weight_tensor_data_2: + name = "data_2" + shape = [1, 5000, 32] + dtype = "float32" + min_val = float("-1.0") + max_val = float("1.0") + mean = float("0.119002") + std = float("0.697021") + data = None diff --git a/paddle_samples/PaddleX/TimesNet/subgraph_9/model.py b/paddle_samples/PaddleX/TimesNet/subgraph_9/model.py new file mode 100644 index 000000000..401185c0b --- /dev/null +++ b/paddle_samples/PaddleX/TimesNet/subgraph_9/model.py @@ -0,0 +1,219 @@ +import paddle + + +class GraphModule(paddle.nn.Layer): + def __init__(self): + super().__init__() + + def forward( + self, parameter_0, parameter_1, parameter_2, parameter_3, data_0, data_1, data_2 + ): + # pd_op.full_int_array: (1xi64) <- () + full_int_array_0 = [1] + + # pd_op.mean: (-1x1x1xf32) <- (-1x96x1xf32, 1xi64) + mean_0 = paddle._C_ops.mean(data_0, full_int_array_0, True) + + # pd_op.share_data_: (-1x1x1xf32) <- (-1x1x1xf32) + share_data__0 = mean_0.detach() + del mean_0 + + # pd_op.subtract: (-1x96x1xf32) <- (-1x96x1xf32, -1x1x1xf32) + subtract_0 = paddle._C_ops.subtract(data_0, share_data__0) + del data_0 + + # pd_op.mean: (-1x1x1xf32) <- (-1x96x1xf32, 1xi64) + mean_1 = paddle._C_ops.mean(subtract_0, full_int_array_0, True) + + # pd_op.subtract: (-1x96x1xf32) <- (-1x96x1xf32, -1x1x1xf32) + subtract_1 = paddle._C_ops.subtract(subtract_0, mean_1) + del mean_1 + + # pd_op.pow: (-1x96x1xf32) <- (-1x96x1xf32) + pow_0 = paddle._C_ops.pow(subtract_1, float("2")) + del subtract_1 + + # pd_op.sum: (-1x1x1xf32) <- (-1x96x1xf32, 1xi64) + sum_0 = paddle._C_ops.sum(pow_0, full_int_array_0, paddle.float32, True) + del pow_0 + + # pd_op.numel: (xi64) <- (-1x96x1xf32) + numel_0 = paddle._C_ops.numel(subtract_0) + + # pd_op.cast: (xi64) <- (xi64) + cast_0 = paddle._C_ops.cast(numel_0, paddle.int64) + del numel_0 + + # pd_op.numel: (xi64) <- (-1x1x1xf32) + numel_1 = paddle._C_ops.numel(sum_0) + + # pd_op.cast: (xi64) <- (xi64) + cast_1 = paddle._C_ops.cast(numel_1, paddle.int64) + del numel_1 + + # pd_op.cast: (xf32) <- (xi64) + cast_2 = paddle._C_ops.cast(cast_0, paddle.float32) + del cast_0 + + # pd_op.cast: (xf32) <- (xi64) + cast_3 = paddle._C_ops.cast(cast_1, paddle.float32) + del cast_1 + + # pd_op.divide: (xf32) <- (xf32, xf32) + divide_0 = paddle._C_ops.divide(cast_2, cast_3) + del cast_2, cast_3 + + # pd_op.divide: (-1x1x1xf32) <- (-1x1x1xf32, xf32) + divide_1 = paddle._C_ops.divide(sum_0, divide_0) + del divide_0, sum_0 + + # pd_op.full: (1xf32) <- () + full_0 = paddle._C_ops.full( + [1], float("1"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.scale: (-1x1x1xf32) <- (-1x1x1xf32, 1xf32) + scale_0 = paddle._C_ops.scale(divide_1, full_0, float("1e-05"), True) + del divide_1, full_0 + + # pd_op.sqrt: (-1x1x1xf32) <- (-1x1x1xf32) + sqrt_0 = paddle._C_ops.sqrt(scale_0) + del scale_0 + + # pd_op.divide: (-1x96x1xf32) <- (-1x96x1xf32, -1x1x1xf32) + divide_2 = paddle._C_ops.divide(subtract_0, sqrt_0) + del subtract_0 + + # pd_op.transpose: (-1x1x96xf32) <- (-1x96x1xf32) + transpose_1 = paddle._C_ops.transpose(divide_2, [0, 2, 1]) + + # pd_op.full_int_array: (2xi64) <- () + full_int_array_1 = [3, 4] + + # pd_op.unsqueeze: (-1x1x96x1x1xf32) <- (-1x1x96xf32, 2xi64) + unsqueeze_0 = paddle._C_ops.unsqueeze(transpose_1, full_int_array_1) + del transpose_1 + + # pd_op.full_int_array: (6xi64) <- () + full_int_array_2 = [0, 0, 0, 0, 1, 1] + + # pd_op.pad3d: (-1x1x98x1x1xf32) <- (-1x1x96x1x1xf32, 6xi64) + pad3d_0 = paddle._C_ops.pad3d( + unsqueeze_0, full_int_array_2, "circular", float("0"), "NCDHW" + ) + del full_int_array_2, unsqueeze_0 + + # pd_op.squeeze: (-1x1x98xf32) <- (-1x1x98x1x1xf32, 2xi64) + squeeze_0 = paddle._C_ops.squeeze(pad3d_0, full_int_array_1) + del full_int_array_1, pad3d_0 + + # pd_op.assign: (32x1x3xf32) <- (32x1x3xf32) + assign_0 = parameter_3 + del parameter_3 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_3 = [-2] + + # pd_op.assign: (1xi64) <- (1xi64) + assign_1 = full_int_array_3 + + # pd_op.unsqueeze: (32x1x1x3xf32) <- (32x1x3xf32, 1xi64) + unsqueeze_1 = paddle._C_ops.unsqueeze(assign_0, full_int_array_3) + + # pd_op.unsqueeze: (-1x1x1x98xf32) <- (-1x1x98xf32, 1xi64) + unsqueeze_2 = paddle._C_ops.unsqueeze(squeeze_0, full_int_array_3) + del squeeze_0 + + # pd_op.conv2d: (-1x32x1x96xf32) <- (-1x1x1x98xf32, 32x1x1x3xf32) + conv2d_0 = paddle._C_ops.conv2d( + unsqueeze_2, unsqueeze_1, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + + # pd_op.squeeze: (-1x32x96xf32) <- (-1x32x1x96xf32, 1xi64) + squeeze_1 = paddle._C_ops.squeeze(conv2d_0, full_int_array_3) + + # pd_op.transpose: (-1x96x32xf32) <- (-1x32x96xf32) + transpose_2 = paddle._C_ops.transpose(squeeze_1, [0, 2, 1]) + del squeeze_1 + + # pd_op.matmul: (-1x96x32xf32) <- (-1x96x4xf32, 4x32xf32) + matmul_0 = paddle._C_ops.matmul(data_1, parameter_2, False, False) + del data_1, parameter_2 + + # pd_op.add: (-1x96x32xf32) <- (-1x96x32xf32, -1x96x32xf32) + add_0 = paddle._C_ops.add(transpose_2, matmul_0) + + # pd_op.shape64: (3xi64) <- (-1x96x1xf32) + shape64_0 = paddle._C_ops.shape64(divide_2) + del divide_2 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_4 = [0] + + # pd_op.slice: (xi64) <- (3xi64, 1xi64, 1xi64) + slice_0 = paddle._C_ops.slice( + shape64_0, [0], full_int_array_4, full_int_array_0, [1], [0] + ) + del full_int_array_0, shape64_0 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_5 = [96] + + # pd_op.slice: (1x96x32xf32) <- (1x5000x32xf32, 1xi64, 1xi64) + slice_1 = paddle._C_ops.slice( + data_2, [1], full_int_array_4, full_int_array_5, [1], [] + ) + del data_2, full_int_array_4, full_int_array_5 + + # pd_op.add: (-1x96x32xf32) <- (-1x96x32xf32, 1x96x32xf32) + add_1 = paddle._C_ops.add(add_0, slice_1) + + # pd_op.full: (1xf32) <- () + full_1 = paddle._C_ops.full( + [1], float("0.1"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.dropout: (-1x96x32xf32, -1x96x32xui8) <- (-1x96x32xf32, None, 1xf32) + dropout_0, dropout_1 = (lambda x, f: f(x))( + paddle._C_ops.dropout( + add_1, None, full_1, False, "upscale_in_train", 0, False + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None), + ) + del add_1 + + # pd_op.transpose: (-1x32x96xf32) <- (-1x96x32xf32) + transpose_3 = paddle._C_ops.transpose(dropout_0, [0, 2, 1]) + del dropout_0 + + # pd_op.matmul: (-1x32x192xf32) <- (-1x32x96xf32, 96x192xf32) + matmul_1 = paddle._C_ops.matmul(transpose_3, parameter_1, False, False) + del parameter_1 + + # pd_op.add: (-1x32x192xf32) <- (-1x32x192xf32, 192xf32) + add_2 = paddle._C_ops.add(matmul_1, parameter_0) + del parameter_0 + + # pd_op.transpose: (-1x192x32xf32) <- (-1x32x192xf32) + transpose_0 = paddle._C_ops.transpose(add_2, [0, 2, 1]) + del ( + add_0, + add_2, + assign_0, + assign_1, + conv2d_0, + dropout_1, + full_1, + full_int_array_3, + matmul_0, + matmul_1, + share_data__0, + slice_1, + sqrt_0, + transpose_2, + transpose_3, + unsqueeze_1, + unsqueeze_2, + ) + + return transpose_0 diff --git a/paddle_samples/PaddleX/TimesNet/subgraph_9/weight_meta.py b/paddle_samples/PaddleX/TimesNet/subgraph_9/weight_meta.py new file mode 100644 index 000000000..891919163 --- /dev/null +++ b/paddle_samples/PaddleX/TimesNet/subgraph_9/weight_meta.py @@ -0,0 +1,42 @@ +class Program_weight_tensor_parameter_0: + name = "parameter_0" + shape = [192] + dtype = "float32" + min_val = float("-0.10104") + max_val = float("0.104358") + mean = float("0.00159109") + std = float("0.0581367") + data = None + + +class Program_weight_tensor_parameter_1: + name = "parameter_1" + shape = [96, 192] + dtype = "float32" + min_val = float("-0.123297") + max_val = float("0.126791") + mean = float("0.000182814") + std = float("0.0590503") + data = None + + +class Program_weight_tensor_parameter_2: + name = "parameter_2" + shape = [4, 32] + dtype = "float32" + min_val = float("-0.529133") + max_val = float("0.515933") + mean = float("-0.00250421") + std = float("0.290818") + data = None + + +class Program_weight_tensor_parameter_3: + name = "parameter_3" + shape = [32, 1, 3] + dtype = "float32" + min_val = float("-1.38317") + max_val = float("2.03246") + mean = float("0.188748") + std = float("0.81195") + data = None diff --git a/paddle_samples/PaddleX/TimesNet_ad/subgraph_0/graph_hash.txt b/paddle_samples/PaddleX/TimesNet_ad/subgraph_0/graph_hash.txt new file mode 100644 index 000000000..f539e935b --- /dev/null +++ b/paddle_samples/PaddleX/TimesNet_ad/subgraph_0/graph_hash.txt @@ -0,0 +1 @@ +015f6a3551680938ba72619056276641ee3a7ac6cfdbe40edde396d9f09e7e47 \ No newline at end of file diff --git a/paddle_samples/PaddleX/TimesNet_ad/subgraph_0/graph_net.json b/paddle_samples/PaddleX/TimesNet_ad/subgraph_0/graph_net.json new file mode 100644 index 000000000..026782ffa --- /dev/null +++ b/paddle_samples/PaddleX/TimesNet_ad/subgraph_0/graph_net.json @@ -0,0 +1,6 @@ +{ + "framework": "paddle", + "model_name": "TimesNet_ad", + "num_devices_required": 1, + "num_nodes_required": 1 +} \ No newline at end of file diff --git a/paddle_samples/PaddleX/TimesNet_ad/subgraph_0/input_meta.py b/paddle_samples/PaddleX/TimesNet_ad/subgraph_0/input_meta.py new file mode 100644 index 000000000..b107fdbbc --- /dev/null +++ b/paddle_samples/PaddleX/TimesNet_ad/subgraph_0/input_meta.py @@ -0,0 +1,30 @@ +class Program_weight_tensor_data_0: + name = "data_0" + shape = [] + dtype = "int64" + data = [16] + + +class Program_weight_tensor_data_1: + name = "data_1" + shape = [16, 96, 32] + dtype = "float32" + min_val = float("-3.13961") + max_val = float("4.56241") + mean = float("0.0201719") + std = float("1.02255") + data = None + + +class Program_weight_tensor_data_2: + name = "data_2" + shape = [] + dtype = "int32" + data = [96] + + +class Program_weight_tensor_data_3: + name = "data_3" + shape = [] + dtype = "int64" + data = [96] diff --git a/paddle_samples/PaddleX/TimesNet_ad/subgraph_0/model.py b/paddle_samples/PaddleX/TimesNet_ad/subgraph_0/model.py new file mode 100644 index 000000000..ebb766caa --- /dev/null +++ b/paddle_samples/PaddleX/TimesNet_ad/subgraph_0/model.py @@ -0,0 +1,303 @@ +import paddle + + +class GraphModule(paddle.nn.Layer): + def __init__(self): + super().__init__() + + def forward( + self, + parameter_0, + parameter_1, + parameter_2, + parameter_3, + parameter_4, + parameter_5, + parameter_6, + parameter_7, + parameter_8, + parameter_9, + parameter_10, + parameter_11, + parameter_12, + parameter_13, + parameter_14, + parameter_15, + parameter_16, + parameter_17, + parameter_18, + parameter_19, + parameter_20, + parameter_21, + parameter_22, + parameter_23, + data_0, + data_1, + data_2, + data_3, + ): + # pd_op.cast: (xi64) <- (xi32) + cast_0 = paddle._C_ops.cast(data_2, paddle.int64) + del data_2 + + # pd_op.floor_divide: (xi64) <- (xi64, xi64) + floor_divide_0 = paddle._C_ops.floor_divide(data_3, cast_0) + del data_3 + + # pd_op.full: (xi64) <- () + full_0 = paddle._C_ops.full( + [], float("32"), paddle.int64, paddle.core.CPUPlace() + ) + + # builtin.combine: ([xi64, xi64, xi64, xi64]) <- (xi64, xi64, xi64, xi64) + combine_0 = [data_0, floor_divide_0, cast_0, full_0] + del cast_0, floor_divide_0 + + # pd_op.stack: (4xi64) <- ([xi64, xi64, xi64, xi64]) + stack_0 = paddle._C_ops.stack(combine_0, 0) + del combine_0 + + # pd_op.reshape: (-1x-1x-1x32xf32) <- (-1x96x32xf32, 4xi64) + reshape_0 = paddle._C_ops.reshape(data_1, stack_0) + del data_1, stack_0 + + # pd_op.transpose: (-1x32x-1x-1xf32) <- (-1x-1x-1x32xf32) + transpose_0 = paddle._C_ops.transpose(reshape_0, [0, 3, 1, 2]) + del reshape_0 + + # pd_op.conv2d: (-1x64x-1x-1xf32) <- (-1x32x-1x-1xf32, 64x32x1x1xf32) + conv2d_0 = paddle._C_ops.conv2d( + transpose_0, parameter_23, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_23 + + # pd_op.full_int_array: (4xi64) <- () + full_int_array_0 = [1, -1, 1, 1] + + # pd_op.reshape: (1x64x1x1xf32) <- (64xf32, 4xi64) + reshape_1 = paddle._C_ops.reshape(parameter_22, full_int_array_0) + del parameter_22 + + # pd_op.add: (-1x64x-1x-1xf32) <- (-1x64x-1x-1xf32, 1x64x1x1xf32) + add_0 = paddle._C_ops.add(conv2d_0, reshape_1) + del conv2d_0, reshape_1 + + # pd_op.conv2d: (-1x64x-1x-1xf32) <- (-1x32x-1x-1xf32, 64x32x3x3xf32) + conv2d_1 = paddle._C_ops.conv2d( + transpose_0, parameter_21, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_21 + + # pd_op.reshape: (1x64x1x1xf32) <- (64xf32, 4xi64) + reshape_2 = paddle._C_ops.reshape(parameter_20, full_int_array_0) + del parameter_20 + + # pd_op.add: (-1x64x-1x-1xf32) <- (-1x64x-1x-1xf32, 1x64x1x1xf32) + add_1 = paddle._C_ops.add(conv2d_1, reshape_2) + del conv2d_1, reshape_2 + + # pd_op.conv2d: (-1x64x-1x-1xf32) <- (-1x32x-1x-1xf32, 64x32x5x5xf32) + conv2d_2 = paddle._C_ops.conv2d( + transpose_0, parameter_19, [1, 1], [2, 2], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_19 + + # pd_op.reshape: (1x64x1x1xf32) <- (64xf32, 4xi64) + reshape_3 = paddle._C_ops.reshape(parameter_18, full_int_array_0) + del parameter_18 + + # pd_op.add: (-1x64x-1x-1xf32) <- (-1x64x-1x-1xf32, 1x64x1x1xf32) + add_2 = paddle._C_ops.add(conv2d_2, reshape_3) + del conv2d_2, reshape_3 + + # pd_op.conv2d: (-1x64x-1x-1xf32) <- (-1x32x-1x-1xf32, 64x32x7x7xf32) + conv2d_3 = paddle._C_ops.conv2d( + transpose_0, parameter_17, [1, 1], [3, 3], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_17 + + # pd_op.reshape: (1x64x1x1xf32) <- (64xf32, 4xi64) + reshape_4 = paddle._C_ops.reshape(parameter_16, full_int_array_0) + del parameter_16 + + # pd_op.add: (-1x64x-1x-1xf32) <- (-1x64x-1x-1xf32, 1x64x1x1xf32) + add_3 = paddle._C_ops.add(conv2d_3, reshape_4) + del conv2d_3, reshape_4 + + # pd_op.conv2d: (-1x64x-1x-1xf32) <- (-1x32x-1x-1xf32, 64x32x9x9xf32) + conv2d_4 = paddle._C_ops.conv2d( + transpose_0, parameter_15, [1, 1], [4, 4], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_15 + + # pd_op.reshape: (1x64x1x1xf32) <- (64xf32, 4xi64) + reshape_5 = paddle._C_ops.reshape(parameter_14, full_int_array_0) + del parameter_14 + + # pd_op.add: (-1x64x-1x-1xf32) <- (-1x64x-1x-1xf32, 1x64x1x1xf32) + add_4 = paddle._C_ops.add(conv2d_4, reshape_5) + del conv2d_4, reshape_5 + + # pd_op.conv2d: (-1x64x-1x-1xf32) <- (-1x32x-1x-1xf32, 64x32x11x11xf32) + conv2d_5 = paddle._C_ops.conv2d( + transpose_0, parameter_13, [1, 1], [5, 5], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_13, transpose_0 + + # pd_op.reshape: (1x64x1x1xf32) <- (64xf32, 4xi64) + reshape_6 = paddle._C_ops.reshape(parameter_12, full_int_array_0) + del parameter_12 + + # pd_op.add: (-1x64x-1x-1xf32) <- (-1x64x-1x-1xf32, 1x64x1x1xf32) + add_5 = paddle._C_ops.add(conv2d_5, reshape_6) + del conv2d_5, reshape_6 + + # builtin.combine: ([-1x64x-1x-1xf32, -1x64x-1x-1xf32, -1x64x-1x-1xf32, -1x64x-1x-1xf32, -1x64x-1x-1xf32, -1x64x-1x-1xf32]) <- (-1x64x-1x-1xf32, -1x64x-1x-1xf32, -1x64x-1x-1xf32, -1x64x-1x-1xf32, -1x64x-1x-1xf32, -1x64x-1x-1xf32) + combine_1 = [add_0, add_1, add_2, add_3, add_4, add_5] + del add_0, add_1, add_2, add_3, add_4, add_5 + + # pd_op.stack: (-1x64x-1x-1x6xf32) <- ([-1x64x-1x-1xf32, -1x64x-1x-1xf32, -1x64x-1x-1xf32, -1x64x-1x-1xf32, -1x64x-1x-1xf32, -1x64x-1x-1xf32]) + stack_1 = paddle._C_ops.stack(combine_1, -1) + del combine_1 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_1 = [-1] + + # pd_op.mean: (-1x64x-1x-1xf32) <- (-1x64x-1x-1x6xf32, 1xi64) + mean_0 = paddle._C_ops.mean(stack_1, full_int_array_1, False) + del stack_1 + + # pd_op.gelu: (-1x64x-1x-1xf32) <- (-1x64x-1x-1xf32) + gelu_0 = paddle._C_ops.gelu(mean_0, False) + del mean_0 + + # pd_op.conv2d: (-1x32x-1x-1xf32) <- (-1x64x-1x-1xf32, 32x64x1x1xf32) + conv2d_6 = paddle._C_ops.conv2d( + gelu_0, parameter_11, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_11 + + # pd_op.reshape: (1x32x1x1xf32) <- (32xf32, 4xi64) + reshape_7 = paddle._C_ops.reshape(parameter_10, full_int_array_0) + del parameter_10 + + # pd_op.add: (-1x32x-1x-1xf32) <- (-1x32x-1x-1xf32, 1x32x1x1xf32) + add_6 = paddle._C_ops.add(conv2d_6, reshape_7) + del conv2d_6, reshape_7 + + # pd_op.conv2d: (-1x32x-1x-1xf32) <- (-1x64x-1x-1xf32, 32x64x3x3xf32) + conv2d_7 = paddle._C_ops.conv2d( + gelu_0, parameter_9, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_9 + + # pd_op.reshape: (1x32x1x1xf32) <- (32xf32, 4xi64) + reshape_8 = paddle._C_ops.reshape(parameter_8, full_int_array_0) + del parameter_8 + + # pd_op.add: (-1x32x-1x-1xf32) <- (-1x32x-1x-1xf32, 1x32x1x1xf32) + add_7 = paddle._C_ops.add(conv2d_7, reshape_8) + del conv2d_7, reshape_8 + + # pd_op.conv2d: (-1x32x-1x-1xf32) <- (-1x64x-1x-1xf32, 32x64x5x5xf32) + conv2d_8 = paddle._C_ops.conv2d( + gelu_0, parameter_7, [1, 1], [2, 2], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_7 + + # pd_op.reshape: (1x32x1x1xf32) <- (32xf32, 4xi64) + reshape_9 = paddle._C_ops.reshape(parameter_6, full_int_array_0) + del parameter_6 + + # pd_op.add: (-1x32x-1x-1xf32) <- (-1x32x-1x-1xf32, 1x32x1x1xf32) + add_8 = paddle._C_ops.add(conv2d_8, reshape_9) + del conv2d_8, reshape_9 + + # pd_op.conv2d: (-1x32x-1x-1xf32) <- (-1x64x-1x-1xf32, 32x64x7x7xf32) + conv2d_9 = paddle._C_ops.conv2d( + gelu_0, parameter_5, [1, 1], [3, 3], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_5 + + # pd_op.reshape: (1x32x1x1xf32) <- (32xf32, 4xi64) + reshape_10 = paddle._C_ops.reshape(parameter_4, full_int_array_0) + del parameter_4 + + # pd_op.add: (-1x32x-1x-1xf32) <- (-1x32x-1x-1xf32, 1x32x1x1xf32) + add_9 = paddle._C_ops.add(conv2d_9, reshape_10) + del conv2d_9, reshape_10 + + # pd_op.conv2d: (-1x32x-1x-1xf32) <- (-1x64x-1x-1xf32, 32x64x9x9xf32) + conv2d_10 = paddle._C_ops.conv2d( + gelu_0, parameter_3, [1, 1], [4, 4], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_3 + + # pd_op.reshape: (1x32x1x1xf32) <- (32xf32, 4xi64) + reshape_11 = paddle._C_ops.reshape(parameter_2, full_int_array_0) + del parameter_2 + + # pd_op.add: (-1x32x-1x-1xf32) <- (-1x32x-1x-1xf32, 1x32x1x1xf32) + add_10 = paddle._C_ops.add(conv2d_10, reshape_11) + del conv2d_10, reshape_11 + + # pd_op.conv2d: (-1x32x-1x-1xf32) <- (-1x64x-1x-1xf32, 32x64x11x11xf32) + conv2d_11 = paddle._C_ops.conv2d( + gelu_0, parameter_1, [1, 1], [5, 5], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del gelu_0, parameter_1 + + # pd_op.reshape: (1x32x1x1xf32) <- (32xf32, 4xi64) + reshape_12 = paddle._C_ops.reshape(parameter_0, full_int_array_0) + del full_int_array_0, parameter_0 + + # pd_op.add: (-1x32x-1x-1xf32) <- (-1x32x-1x-1xf32, 1x32x1x1xf32) + add_11 = paddle._C_ops.add(conv2d_11, reshape_12) + del conv2d_11, reshape_12 + + # builtin.combine: ([-1x32x-1x-1xf32, -1x32x-1x-1xf32, -1x32x-1x-1xf32, -1x32x-1x-1xf32, -1x32x-1x-1xf32, -1x32x-1x-1xf32]) <- (-1x32x-1x-1xf32, -1x32x-1x-1xf32, -1x32x-1x-1xf32, -1x32x-1x-1xf32, -1x32x-1x-1xf32, -1x32x-1x-1xf32) + combine_2 = [add_6, add_7, add_8, add_9, add_10, add_11] + del add_10, add_11, add_6, add_7, add_8, add_9 + + # pd_op.stack: (-1x32x-1x-1x6xf32) <- ([-1x32x-1x-1xf32, -1x32x-1x-1xf32, -1x32x-1x-1xf32, -1x32x-1x-1xf32, -1x32x-1x-1xf32, -1x32x-1x-1xf32]) + stack_2 = paddle._C_ops.stack(combine_2, -1) + del combine_2 + + # pd_op.mean: (-1x32x-1x-1xf32) <- (-1x32x-1x-1x6xf32, 1xi64) + mean_1 = paddle._C_ops.mean(stack_2, full_int_array_1, False) + del full_int_array_1, stack_2 + + # pd_op.transpose: (-1x-1x-1x32xf32) <- (-1x32x-1x-1xf32) + transpose_1 = paddle._C_ops.transpose(mean_1, [0, 2, 3, 1]) + del mean_1 + + # pd_op.full: (xi64) <- () + full_1 = paddle._C_ops.full( + [], float("-1"), paddle.int64, paddle.core.CPUPlace() + ) + + # builtin.combine: ([xi64, xi64, xi64]) <- (xi64, xi64, xi64) + combine_3 = [data_0, full_1, full_0] + del data_0, full_0, full_1 + + # pd_op.stack: (3xi64) <- ([xi64, xi64, xi64]) + stack_3 = paddle._C_ops.stack(combine_3, 0) + del combine_3 + + # pd_op.reshape: (-1x-1x32xf32) <- (-1x-1x-1x32xf32, 3xi64) + reshape_13 = paddle._C_ops.reshape(transpose_1, stack_3) + del stack_3, transpose_1 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_2 = [0] + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_3 = [96] + + # pd_op.slice: (-1x-1x32xf32) <- (-1x-1x32xf32, 1xi64, 1xi64) + slice_0 = paddle._C_ops.slice( + reshape_13, [1], full_int_array_2, full_int_array_3, [1], [] + ) + del full_int_array_2, full_int_array_3, reshape_13 + + return slice_0 diff --git a/paddle_samples/PaddleX/TimesNet_ad/subgraph_0/weight_meta.py b/paddle_samples/PaddleX/TimesNet_ad/subgraph_0/weight_meta.py new file mode 100644 index 000000000..7cd5001a5 --- /dev/null +++ b/paddle_samples/PaddleX/TimesNet_ad/subgraph_0/weight_meta.py @@ -0,0 +1,238 @@ +class Program_weight_tensor_parameter_0: + name = "parameter_0" + shape = [32] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_1: + name = "parameter_1" + shape = [32, 64, 11, 11] + dtype = "float32" + min_val = float("-0.171637") + max_val = float("0.286486") + mean = float("0.00013885") + std = float("0.0208474") + data = None + + +class Program_weight_tensor_parameter_2: + name = "parameter_2" + shape = [32] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_3: + name = "parameter_3" + shape = [32, 64, 9, 9] + dtype = "float32" + min_val = float("-0.155399") + max_val = float("0.280516") + mean = float("0.000198488") + std = float("0.0244337") + data = None + + +class Program_weight_tensor_parameter_4: + name = "parameter_4" + shape = [32] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_5: + name = "parameter_5" + shape = [32, 64, 7, 7] + dtype = "float32" + min_val = float("-0.167556") + max_val = float("0.280229") + mean = float("0.000259204") + std = float("0.0301799") + data = None + + +class Program_weight_tensor_parameter_6: + name = "parameter_6" + shape = [32] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_7: + name = "parameter_7" + shape = [32, 64, 5, 5] + dtype = "float32" + min_val = float("-0.169968") + max_val = float("0.283124") + mean = float("0.000434687") + std = float("0.040722") + data = None + + +class Program_weight_tensor_parameter_8: + name = "parameter_8" + shape = [32] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_9: + name = "parameter_9" + shape = [32, 64, 3, 3] + dtype = "float32" + min_val = float("-0.246544") + max_val = float("0.304029") + mean = float("0.0011065") + std = float("0.0640689") + data = None + + +class Program_weight_tensor_parameter_10: + name = "parameter_10" + shape = [32] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_11: + name = "parameter_11" + shape = [32, 64, 1, 1] + dtype = "float32" + min_val = float("-0.612306") + max_val = float("0.645088") + mean = float("-0.00132332") + std = float("0.186336") + data = None + + +class Program_weight_tensor_parameter_12: + name = "parameter_12" + shape = [64] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_13: + name = "parameter_13" + shape = [64, 32, 11, 11] + dtype = "float32" + min_val = float("-0.3972") + max_val = float("0.384126") + mean = float("0.000531841") + std = float("0.0285271") + data = None + + +class Program_weight_tensor_parameter_14: + name = "parameter_14" + shape = [64] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_15: + name = "parameter_15" + shape = [64, 32, 9, 9] + dtype = "float32" + min_val = float("-0.35393") + max_val = float("0.406716") + mean = float("0.000740527") + std = float("0.0337336") + data = None + + +class Program_weight_tensor_parameter_16: + name = "parameter_16" + shape = [64] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_17: + name = "parameter_17" + shape = [64, 32, 7, 7] + dtype = "float32" + min_val = float("-0.347071") + max_val = float("0.378394") + mean = float("0.000667025") + std = float("0.0419882") + data = None + + +class Program_weight_tensor_parameter_18: + name = "parameter_18" + shape = [64] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_19: + name = "parameter_19" + shape = [64, 32, 5, 5] + dtype = "float32" + min_val = float("-0.39897") + max_val = float("0.34896") + mean = float("0.00116756") + std = float("0.0565563") + data = None + + +class Program_weight_tensor_parameter_20: + name = "parameter_20" + shape = [64] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_21: + name = "parameter_21" + shape = [64, 32, 3, 3] + dtype = "float32" + min_val = float("-0.498514") + max_val = float("0.47818") + mean = float("0.0014499") + std = float("0.0900361") + data = None + + +class Program_weight_tensor_parameter_22: + name = "parameter_22" + shape = [64] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_23: + name = "parameter_23" + shape = [64, 32, 1, 1] + dtype = "float32" + min_val = float("-0.766261") + max_val = float("0.852441") + mean = float("-0.00652626") + std = float("0.263178") + data = None diff --git a/paddle_samples/PaddleX/TimesNet_ad/subgraph_1/graph_hash.txt b/paddle_samples/PaddleX/TimesNet_ad/subgraph_1/graph_hash.txt new file mode 100644 index 000000000..d67a70ea3 --- /dev/null +++ b/paddle_samples/PaddleX/TimesNet_ad/subgraph_1/graph_hash.txt @@ -0,0 +1 @@ +3fb65681e9f9a0fc473c9d65f70cbfb29bfb92e719d9d0c6562eca4978ec51e5 \ No newline at end of file diff --git a/paddle_samples/PaddleX/TimesNet_ad/subgraph_1/graph_net.json b/paddle_samples/PaddleX/TimesNet_ad/subgraph_1/graph_net.json new file mode 100644 index 000000000..026782ffa --- /dev/null +++ b/paddle_samples/PaddleX/TimesNet_ad/subgraph_1/graph_net.json @@ -0,0 +1,6 @@ +{ + "framework": "paddle", + "model_name": "TimesNet_ad", + "num_devices_required": 1, + "num_nodes_required": 1 +} \ No newline at end of file diff --git a/paddle_samples/PaddleX/TimesNet_ad/subgraph_1/input_meta.py b/paddle_samples/PaddleX/TimesNet_ad/subgraph_1/input_meta.py new file mode 100644 index 000000000..c5373c6bb --- /dev/null +++ b/paddle_samples/PaddleX/TimesNet_ad/subgraph_1/input_meta.py @@ -0,0 +1,23 @@ +class Program_weight_tensor_data_0: + name = "data_0" + shape = [] + dtype = "int32" + data = [19] + + +class Program_weight_tensor_data_1: + name = "data_1" + shape = [16, 96, 32] + dtype = "float32" + min_val = float("-9.41539") + max_val = float("10.3818") + mean = float("0.340659") + std = float("1.6485") + data = None + + +class Program_weight_tensor_data_2: + name = "data_2" + shape = [] + dtype = "int64" + data = [96] diff --git a/paddle_samples/PaddleX/TimesNet_ad/subgraph_1/model.py b/paddle_samples/PaddleX/TimesNet_ad/subgraph_1/model.py new file mode 100644 index 000000000..4b9c2176d --- /dev/null +++ b/paddle_samples/PaddleX/TimesNet_ad/subgraph_1/model.py @@ -0,0 +1,394 @@ +import paddle + + +class GraphModule(paddle.nn.Layer): + def __init__(self): + super().__init__() + + def forward( + self, + parameter_0, + parameter_1, + parameter_2, + parameter_3, + parameter_4, + parameter_5, + parameter_6, + parameter_7, + parameter_8, + parameter_9, + parameter_10, + parameter_11, + parameter_12, + parameter_13, + parameter_14, + parameter_15, + parameter_16, + parameter_17, + parameter_18, + parameter_19, + parameter_20, + parameter_21, + parameter_22, + parameter_23, + data_0, + data_1, + data_2, + ): + # pd_op.cast: (xi64) <- (xi32) + cast_0 = paddle._C_ops.cast(data_0, paddle.int64) + del data_0 + + # pd_op.floor_divide: (xi64) <- (xi64, xi64) + floor_divide_0 = paddle._C_ops.floor_divide(data_2, cast_0) + + # pd_op.full: (1xf32) <- () + full_0 = paddle._C_ops.full( + [1], float("1"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.scale: (xi64) <- (xi64, 1xf32) + scale_0 = paddle._C_ops.scale(floor_divide_0, full_0, float("1"), True) + del floor_divide_0, full_0 + + # pd_op.multiply: (xi64) <- (xi64, xi64) + multiply_0 = paddle._C_ops.multiply(scale_0, cast_0) + del scale_0 + + # pd_op.subtract: (xi64) <- (xi64, xi64) + subtract_0 = paddle._C_ops.subtract(multiply_0, data_2) + del data_2 + + # pd_op.full: (xi64) <- () + full_1 = paddle._C_ops.full( + [], float("16"), paddle.int64, paddle.core.CPUPlace() + ) + + # pd_op.full: (xi64) <- () + full_2 = paddle._C_ops.full( + [], float("32"), paddle.int64, paddle.core.CPUPlace() + ) + + # builtin.combine: ([xi64, xi64, xi64]) <- (xi64, xi64, xi64) + combine_0 = [full_1, subtract_0, full_2] + del subtract_0 + + # pd_op.stack: (3xi64) <- ([xi64, xi64, xi64]) + stack_0 = paddle._C_ops.stack(combine_0, 0) + del combine_0 + + # pd_op.full: (1xf32) <- () + full_3 = paddle._C_ops.full( + [1], float("0"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.full_with_tensor: (16x-1x32xf32) <- (1xf32, 3xi64) + full_with_tensor_0 = paddle._C_ops.full_with_tensor( + full_3, stack_0, paddle.float32 + ) + del full_3, stack_0 + + # pd_op.cast: (16x96x32xf32) <- (16x96x32xf32) + cast_1 = paddle._C_ops.cast(data_1, paddle.float32) + del data_1 + + # pd_op.cast: (16x-1x32xf32) <- (16x-1x32xf32) + cast_2 = paddle._C_ops.cast(full_with_tensor_0, paddle.float32) + + # pd_op.full: (1xi32) <- () + full_4 = paddle._C_ops.full( + [1], float("1"), paddle.int32, paddle.core.CPUPlace() + ) + + # builtin.combine: ([16x96x32xf32, 16x-1x32xf32]) <- (16x96x32xf32, 16x-1x32xf32) + combine_1 = [cast_1, cast_2] + + # pd_op.concat: (16x-1x32xf32) <- ([16x96x32xf32, 16x-1x32xf32], 1xi32) + concat_0 = paddle._C_ops.concat(combine_1, full_4) + del combine_1 + + # pd_op.floor_divide: (xi64) <- (xi64, xi64) + floor_divide_1 = paddle._C_ops.floor_divide(multiply_0, cast_0) + + # builtin.combine: ([xi64, xi64, xi64, xi64]) <- (xi64, xi64, xi64, xi64) + combine_2 = [full_1, floor_divide_1, cast_0, full_2] + del cast_0, floor_divide_1, full_1, full_2 + + # pd_op.stack: (4xi64) <- ([xi64, xi64, xi64, xi64]) + stack_1 = paddle._C_ops.stack(combine_2, 0) + del combine_2 + + # pd_op.reshape: (16x-1x-1x32xf32) <- (16x-1x32xf32, 4xi64) + reshape_0 = paddle._C_ops.reshape(concat_0, stack_1) + del stack_1 + + # pd_op.transpose: (16x32x-1x-1xf32) <- (16x-1x-1x32xf32) + transpose_0 = paddle._C_ops.transpose(reshape_0, [0, 3, 1, 2]) + del reshape_0 + + # pd_op.conv2d: (16x64x-1x-1xf32) <- (16x32x-1x-1xf32, 64x32x1x1xf32) + conv2d_0 = paddle._C_ops.conv2d( + transpose_0, parameter_23, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_23 + + # pd_op.full_int_array: (4xi64) <- () + full_int_array_0 = [1, -1, 1, 1] + + # pd_op.reshape: (1x64x1x1xf32) <- (64xf32, 4xi64) + reshape_1 = paddle._C_ops.reshape(parameter_22, full_int_array_0) + del parameter_22 + + # pd_op.add: (16x64x-1x-1xf32) <- (16x64x-1x-1xf32, 1x64x1x1xf32) + add_0 = paddle._C_ops.add(conv2d_0, reshape_1) + + # pd_op.conv2d: (16x64x-1x-1xf32) <- (16x32x-1x-1xf32, 64x32x3x3xf32) + conv2d_1 = paddle._C_ops.conv2d( + transpose_0, parameter_21, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_21 + + # pd_op.reshape: (1x64x1x1xf32) <- (64xf32, 4xi64) + reshape_2 = paddle._C_ops.reshape(parameter_20, full_int_array_0) + del parameter_20 + + # pd_op.add: (16x64x-1x-1xf32) <- (16x64x-1x-1xf32, 1x64x1x1xf32) + add_1 = paddle._C_ops.add(conv2d_1, reshape_2) + + # pd_op.conv2d: (16x64x-1x-1xf32) <- (16x32x-1x-1xf32, 64x32x5x5xf32) + conv2d_2 = paddle._C_ops.conv2d( + transpose_0, parameter_19, [1, 1], [2, 2], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_19 + + # pd_op.reshape: (1x64x1x1xf32) <- (64xf32, 4xi64) + reshape_3 = paddle._C_ops.reshape(parameter_18, full_int_array_0) + del parameter_18 + + # pd_op.add: (16x64x-1x-1xf32) <- (16x64x-1x-1xf32, 1x64x1x1xf32) + add_2 = paddle._C_ops.add(conv2d_2, reshape_3) + + # pd_op.conv2d: (16x64x-1x-1xf32) <- (16x32x-1x-1xf32, 64x32x7x7xf32) + conv2d_3 = paddle._C_ops.conv2d( + transpose_0, parameter_17, [1, 1], [3, 3], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_17 + + # pd_op.reshape: (1x64x1x1xf32) <- (64xf32, 4xi64) + reshape_4 = paddle._C_ops.reshape(parameter_16, full_int_array_0) + del parameter_16 + + # pd_op.add: (16x64x-1x-1xf32) <- (16x64x-1x-1xf32, 1x64x1x1xf32) + add_3 = paddle._C_ops.add(conv2d_3, reshape_4) + + # pd_op.conv2d: (16x64x-1x-1xf32) <- (16x32x-1x-1xf32, 64x32x9x9xf32) + conv2d_4 = paddle._C_ops.conv2d( + transpose_0, parameter_15, [1, 1], [4, 4], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_15 + + # pd_op.reshape: (1x64x1x1xf32) <- (64xf32, 4xi64) + reshape_5 = paddle._C_ops.reshape(parameter_14, full_int_array_0) + del parameter_14 + + # pd_op.add: (16x64x-1x-1xf32) <- (16x64x-1x-1xf32, 1x64x1x1xf32) + add_4 = paddle._C_ops.add(conv2d_4, reshape_5) + + # pd_op.conv2d: (16x64x-1x-1xf32) <- (16x32x-1x-1xf32, 64x32x11x11xf32) + conv2d_5 = paddle._C_ops.conv2d( + transpose_0, parameter_13, [1, 1], [5, 5], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_13 + + # pd_op.reshape: (1x64x1x1xf32) <- (64xf32, 4xi64) + reshape_6 = paddle._C_ops.reshape(parameter_12, full_int_array_0) + del parameter_12 + + # pd_op.add: (16x64x-1x-1xf32) <- (16x64x-1x-1xf32, 1x64x1x1xf32) + add_5 = paddle._C_ops.add(conv2d_5, reshape_6) + + # builtin.combine: ([16x64x-1x-1xf32, 16x64x-1x-1xf32, 16x64x-1x-1xf32, 16x64x-1x-1xf32, 16x64x-1x-1xf32, 16x64x-1x-1xf32]) <- (16x64x-1x-1xf32, 16x64x-1x-1xf32, 16x64x-1x-1xf32, 16x64x-1x-1xf32, 16x64x-1x-1xf32, 16x64x-1x-1xf32) + combine_3 = [add_0, add_1, add_2, add_3, add_4, add_5] + + # pd_op.stack: (16x64x-1x-1x6xf32) <- ([16x64x-1x-1xf32, 16x64x-1x-1xf32, 16x64x-1x-1xf32, 16x64x-1x-1xf32, 16x64x-1x-1xf32, 16x64x-1x-1xf32]) + stack_2 = paddle._C_ops.stack(combine_3, -1) + del combine_3 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_1 = [-1] + + # pd_op.assign: (1xi64) <- (1xi64) + assign_0 = full_int_array_1 + + # pd_op.mean: (16x64x-1x-1xf32) <- (16x64x-1x-1x6xf32, 1xi64) + mean_0 = paddle._C_ops.mean(stack_2, full_int_array_1, False) + + # pd_op.gelu: (16x64x-1x-1xf32) <- (16x64x-1x-1xf32) + gelu_0 = paddle._C_ops.gelu(mean_0, False) + + # pd_op.conv2d: (16x32x-1x-1xf32) <- (16x64x-1x-1xf32, 32x64x1x1xf32) + conv2d_6 = paddle._C_ops.conv2d( + gelu_0, parameter_11, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_11 + + # pd_op.reshape: (1x32x1x1xf32) <- (32xf32, 4xi64) + reshape_7 = paddle._C_ops.reshape(parameter_10, full_int_array_0) + del parameter_10 + + # pd_op.add: (16x32x-1x-1xf32) <- (16x32x-1x-1xf32, 1x32x1x1xf32) + add_6 = paddle._C_ops.add(conv2d_6, reshape_7) + + # pd_op.conv2d: (16x32x-1x-1xf32) <- (16x64x-1x-1xf32, 32x64x3x3xf32) + conv2d_7 = paddle._C_ops.conv2d( + gelu_0, parameter_9, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_9 + + # pd_op.reshape: (1x32x1x1xf32) <- (32xf32, 4xi64) + reshape_8 = paddle._C_ops.reshape(parameter_8, full_int_array_0) + del parameter_8 + + # pd_op.add: (16x32x-1x-1xf32) <- (16x32x-1x-1xf32, 1x32x1x1xf32) + add_7 = paddle._C_ops.add(conv2d_7, reshape_8) + + # pd_op.conv2d: (16x32x-1x-1xf32) <- (16x64x-1x-1xf32, 32x64x5x5xf32) + conv2d_8 = paddle._C_ops.conv2d( + gelu_0, parameter_7, [1, 1], [2, 2], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_7 + + # pd_op.reshape: (1x32x1x1xf32) <- (32xf32, 4xi64) + reshape_9 = paddle._C_ops.reshape(parameter_6, full_int_array_0) + del parameter_6 + + # pd_op.add: (16x32x-1x-1xf32) <- (16x32x-1x-1xf32, 1x32x1x1xf32) + add_8 = paddle._C_ops.add(conv2d_8, reshape_9) + + # pd_op.conv2d: (16x32x-1x-1xf32) <- (16x64x-1x-1xf32, 32x64x7x7xf32) + conv2d_9 = paddle._C_ops.conv2d( + gelu_0, parameter_5, [1, 1], [3, 3], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_5 + + # pd_op.reshape: (1x32x1x1xf32) <- (32xf32, 4xi64) + reshape_10 = paddle._C_ops.reshape(parameter_4, full_int_array_0) + del parameter_4 + + # pd_op.add: (16x32x-1x-1xf32) <- (16x32x-1x-1xf32, 1x32x1x1xf32) + add_9 = paddle._C_ops.add(conv2d_9, reshape_10) + + # pd_op.conv2d: (16x32x-1x-1xf32) <- (16x64x-1x-1xf32, 32x64x9x9xf32) + conv2d_10 = paddle._C_ops.conv2d( + gelu_0, parameter_3, [1, 1], [4, 4], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_3 + + # pd_op.reshape: (1x32x1x1xf32) <- (32xf32, 4xi64) + reshape_11 = paddle._C_ops.reshape(parameter_2, full_int_array_0) + del parameter_2 + + # pd_op.add: (16x32x-1x-1xf32) <- (16x32x-1x-1xf32, 1x32x1x1xf32) + add_10 = paddle._C_ops.add(conv2d_10, reshape_11) + + # pd_op.conv2d: (16x32x-1x-1xf32) <- (16x64x-1x-1xf32, 32x64x11x11xf32) + conv2d_11 = paddle._C_ops.conv2d( + gelu_0, parameter_1, [1, 1], [5, 5], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_1 + + # pd_op.reshape: (1x32x1x1xf32) <- (32xf32, 4xi64) + reshape_12 = paddle._C_ops.reshape(parameter_0, full_int_array_0) + del full_int_array_0, parameter_0 + + # pd_op.add: (16x32x-1x-1xf32) <- (16x32x-1x-1xf32, 1x32x1x1xf32) + add_11 = paddle._C_ops.add(conv2d_11, reshape_12) + + # builtin.combine: ([16x32x-1x-1xf32, 16x32x-1x-1xf32, 16x32x-1x-1xf32, 16x32x-1x-1xf32, 16x32x-1x-1xf32, 16x32x-1x-1xf32]) <- (16x32x-1x-1xf32, 16x32x-1x-1xf32, 16x32x-1x-1xf32, 16x32x-1x-1xf32, 16x32x-1x-1xf32, 16x32x-1x-1xf32) + combine_4 = [add_6, add_7, add_8, add_9, add_10, add_11] + + # pd_op.stack: (16x32x-1x-1x6xf32) <- ([16x32x-1x-1xf32, 16x32x-1x-1xf32, 16x32x-1x-1xf32, 16x32x-1x-1xf32, 16x32x-1x-1xf32, 16x32x-1x-1xf32]) + stack_3 = paddle._C_ops.stack(combine_4, -1) + del combine_4 + + # pd_op.mean: (16x32x-1x-1xf32) <- (16x32x-1x-1x6xf32, 1xi64) + mean_1 = paddle._C_ops.mean(stack_3, full_int_array_1, False) + + # pd_op.transpose: (16x-1x-1x32xf32) <- (16x32x-1x-1xf32) + transpose_1 = paddle._C_ops.transpose(mean_1, [0, 2, 3, 1]) + del mean_1 + + # pd_op.full_int_array: (3xi64) <- () + full_int_array_2 = [16, -1, 32] + + # pd_op.reshape: (16x-1x32xf32) <- (16x-1x-1x32xf32, 3xi64) + reshape_13 = paddle._C_ops.reshape(transpose_1, full_int_array_2) + del full_int_array_2 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_3 = [0] + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_4 = [96] + + # pd_op.slice: (16x-1x32xf32) <- (16x-1x32xf32, 1xi64, 1xi64) + slice_0 = paddle._C_ops.slice( + reshape_13, [1], full_int_array_3, full_int_array_4, [1], [] + ) + del ( + add_0, + add_1, + add_10, + add_11, + add_2, + add_3, + add_4, + add_5, + add_6, + add_7, + add_8, + add_9, + assign_0, + cast_1, + cast_2, + concat_0, + conv2d_0, + conv2d_1, + conv2d_10, + conv2d_11, + conv2d_2, + conv2d_3, + conv2d_4, + conv2d_5, + conv2d_6, + conv2d_7, + conv2d_8, + conv2d_9, + full_4, + full_int_array_1, + full_int_array_3, + full_int_array_4, + full_with_tensor_0, + gelu_0, + mean_0, + multiply_0, + reshape_1, + reshape_10, + reshape_11, + reshape_12, + reshape_13, + reshape_2, + reshape_3, + reshape_4, + reshape_5, + reshape_6, + reshape_7, + reshape_8, + reshape_9, + stack_2, + stack_3, + transpose_0, + transpose_1, + ) + + return slice_0 diff --git a/paddle_samples/PaddleX/TimesNet_ad/subgraph_1/weight_meta.py b/paddle_samples/PaddleX/TimesNet_ad/subgraph_1/weight_meta.py new file mode 100644 index 000000000..e045b31b2 --- /dev/null +++ b/paddle_samples/PaddleX/TimesNet_ad/subgraph_1/weight_meta.py @@ -0,0 +1,238 @@ +class Program_weight_tensor_parameter_0: + name = "parameter_0" + shape = [32] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_1: + name = "parameter_1" + shape = [32, 64, 11, 11] + dtype = "float32" + min_val = float("-0.0821478") + max_val = float("0.0853727") + mean = float("2.75789e-05") + std = float("0.0167532") + data = None + + +class Program_weight_tensor_parameter_2: + name = "parameter_2" + shape = [32] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_3: + name = "parameter_3" + shape = [32, 64, 9, 9] + dtype = "float32" + min_val = float("-0.101623") + max_val = float("0.0919988") + mean = float("-1.03966e-05") + std = float("0.0203609") + data = None + + +class Program_weight_tensor_parameter_4: + name = "parameter_4" + shape = [32] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_5: + name = "parameter_5" + shape = [32, 64, 7, 7] + dtype = "float32" + min_val = float("-0.122233") + max_val = float("0.104412") + mean = float("0.000171616") + std = float("0.026057") + data = None + + +class Program_weight_tensor_parameter_6: + name = "parameter_6" + shape = [32] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_7: + name = "parameter_7" + shape = [32, 64, 5, 5] + dtype = "float32" + min_val = float("-0.147488") + max_val = float("0.155319") + mean = float("0.000104671") + std = float("0.0362299") + data = None + + +class Program_weight_tensor_parameter_8: + name = "parameter_8" + shape = [32] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_9: + name = "parameter_9" + shape = [32, 64, 3, 3] + dtype = "float32" + min_val = float("-0.261617") + max_val = float("0.271659") + mean = float("0.000722002") + std = float("0.059876") + data = None + + +class Program_weight_tensor_parameter_10: + name = "parameter_10" + shape = [32] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_11: + name = "parameter_11" + shape = [32, 64, 1, 1] + dtype = "float32" + min_val = float("-0.551416") + max_val = float("0.659869") + mean = float("0.00739116") + std = float("0.172832") + data = None + + +class Program_weight_tensor_parameter_12: + name = "parameter_12" + shape = [64] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_13: + name = "parameter_13" + shape = [64, 32, 11, 11] + dtype = "float32" + min_val = float("-0.116984") + max_val = float("0.108236") + mean = float("-0.00017192") + std = float("0.0232049") + data = None + + +class Program_weight_tensor_parameter_14: + name = "parameter_14" + shape = [64] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_15: + name = "parameter_15" + shape = [64, 32, 9, 9] + dtype = "float32" + min_val = float("-0.130613") + max_val = float("0.126211") + mean = float("-0.000376987") + std = float("0.0282008") + data = None + + +class Program_weight_tensor_parameter_16: + name = "parameter_16" + shape = [64] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_17: + name = "parameter_17" + shape = [64, 32, 7, 7] + dtype = "float32" + min_val = float("-0.147766") + max_val = float("0.176776") + mean = float("-0.000280078") + std = float("0.0362595") + data = None + + +class Program_weight_tensor_parameter_18: + name = "parameter_18" + shape = [64] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_19: + name = "parameter_19" + shape = [64, 32, 5, 5] + dtype = "float32" + min_val = float("-0.19525") + max_val = float("0.209583") + mean = float("-0.000640883") + std = float("0.0508085") + data = None + + +class Program_weight_tensor_parameter_20: + name = "parameter_20" + shape = [64] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_21: + name = "parameter_21" + shape = [64, 32, 3, 3] + dtype = "float32" + min_val = float("-0.313806") + max_val = float("0.364535") + mean = float("-0.000638754") + std = float("0.0841542") + data = None + + +class Program_weight_tensor_parameter_22: + name = "parameter_22" + shape = [64] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_23: + name = "parameter_23" + shape = [64, 32, 1, 1] + dtype = "float32" + min_val = float("-0.826352") + max_val = float("0.791534") + mean = float("-0.00447392") + std = float("0.252132") + data = None diff --git a/paddle_samples/PaddleX/TimesNet_ad/subgraph_10/graph_hash.txt b/paddle_samples/PaddleX/TimesNet_ad/subgraph_10/graph_hash.txt new file mode 100644 index 000000000..3ea29a59a --- /dev/null +++ b/paddle_samples/PaddleX/TimesNet_ad/subgraph_10/graph_hash.txt @@ -0,0 +1 @@ +7096065249a442fe087b073010031bbe4a1c8d6e2707d0000f4d8be72faab72e \ No newline at end of file diff --git a/paddle_samples/PaddleX/TimesNet_ad/subgraph_10/graph_net.json b/paddle_samples/PaddleX/TimesNet_ad/subgraph_10/graph_net.json new file mode 100644 index 000000000..026782ffa --- /dev/null +++ b/paddle_samples/PaddleX/TimesNet_ad/subgraph_10/graph_net.json @@ -0,0 +1,6 @@ +{ + "framework": "paddle", + "model_name": "TimesNet_ad", + "num_devices_required": 1, + "num_nodes_required": 1 +} \ No newline at end of file diff --git a/paddle_samples/PaddleX/TimesNet_ad/subgraph_10/input_meta.py b/paddle_samples/PaddleX/TimesNet_ad/subgraph_10/input_meta.py new file mode 100644 index 000000000..772c6b011 --- /dev/null +++ b/paddle_samples/PaddleX/TimesNet_ad/subgraph_10/input_meta.py @@ -0,0 +1,12 @@ +class Program_weight_tensor_data_0: + name = "data_0" + shape = [3] + dtype = "int32" + data = [96, 48, 32] + + +class Program_weight_tensor_data_1: + name = "data_1" + shape = [] + dtype = "int64" + data = [96] diff --git a/paddle_samples/PaddleX/TimesNet_ad/subgraph_10/model.py b/paddle_samples/PaddleX/TimesNet_ad/subgraph_10/model.py new file mode 100644 index 000000000..59c3e41f0 --- /dev/null +++ b/paddle_samples/PaddleX/TimesNet_ad/subgraph_10/model.py @@ -0,0 +1,37 @@ +import paddle + + +class GraphModule(paddle.nn.Layer): + def __init__(self): + super().__init__() + + def forward(self, data_0, data_1): + # pd_op.full_int_array: (1xi64) <- () + full_int_array_0 = [0] + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_1 = [1] + + # pd_op.slice: (xi32) <- (3xi32, 1xi64, 1xi64) + slice_0 = paddle._C_ops.slice( + data_0, [0], full_int_array_0, full_int_array_1, [1], [0] + ) + del data_0, full_int_array_0, full_int_array_1 + + # pd_op.cast: (xi64) <- (xi32) + cast_0 = paddle._C_ops.cast(slice_0, paddle.int64) + + # pd_op.remainder: (xi64) <- (xi64, xi64) + remainder_0 = paddle._C_ops.remainder(data_1, cast_0) + del cast_0, data_1 + + # pd_op.full: (xi64) <- () + full_0 = paddle._C_ops.full( + [], float("0"), paddle.int64, paddle.framework._current_expected_place() + ) + + # pd_op.not_equal: (xb) <- (xi64, xi64) + not_equal_0 = paddle._C_ops.not_equal(remainder_0, full_0) + del full_0, remainder_0, slice_0 + + return not_equal_0 diff --git a/paddle_samples/PaddleX/TimesNet_ad/subgraph_10/weight_meta.py b/paddle_samples/PaddleX/TimesNet_ad/subgraph_10/weight_meta.py new file mode 100644 index 000000000..8b1378917 --- /dev/null +++ b/paddle_samples/PaddleX/TimesNet_ad/subgraph_10/weight_meta.py @@ -0,0 +1 @@ + diff --git a/paddle_samples/PaddleX/TimesNet_ad/subgraph_11/graph_hash.txt b/paddle_samples/PaddleX/TimesNet_ad/subgraph_11/graph_hash.txt new file mode 100644 index 000000000..628bed2cb --- /dev/null +++ b/paddle_samples/PaddleX/TimesNet_ad/subgraph_11/graph_hash.txt @@ -0,0 +1 @@ +d542d39e895f6c99fcdd793a5d5d813c0497991729db1137c2fc19139a08cc42 \ No newline at end of file diff --git a/paddle_samples/PaddleX/TimesNet_ad/subgraph_11/graph_net.json b/paddle_samples/PaddleX/TimesNet_ad/subgraph_11/graph_net.json new file mode 100644 index 000000000..026782ffa --- /dev/null +++ b/paddle_samples/PaddleX/TimesNet_ad/subgraph_11/graph_net.json @@ -0,0 +1,6 @@ +{ + "framework": "paddle", + "model_name": "TimesNet_ad", + "num_devices_required": 1, + "num_nodes_required": 1 +} \ No newline at end of file diff --git a/paddle_samples/PaddleX/TimesNet_ad/subgraph_11/input_meta.py b/paddle_samples/PaddleX/TimesNet_ad/subgraph_11/input_meta.py new file mode 100644 index 000000000..91524202f --- /dev/null +++ b/paddle_samples/PaddleX/TimesNet_ad/subgraph_11/input_meta.py @@ -0,0 +1,37 @@ +class Program_weight_tensor_data_0: + name = "data_0" + shape = [] + dtype = "int64" + data = [2] + + +class Program_weight_tensor_data_1: + name = "data_1" + shape = [] + dtype = "int64" + data = [2] + + +class Program_weight_tensor_data_2: + name = "data_2" + shape = [] + dtype = "int32" + data = [19] + + +class Program_weight_tensor_data_3: + name = "data_3" + shape = [2, 96, 32] + dtype = "float32" + min_val = float("-2.85988") + max_val = float("4.34883") + mean = float("0.0198675") + std = float("1.02037") + data = None + + +class Program_weight_tensor_data_4: + name = "data_4" + shape = [] + dtype = "int64" + data = [96] diff --git a/paddle_samples/PaddleX/TimesNet_ad/subgraph_11/model.py b/paddle_samples/PaddleX/TimesNet_ad/subgraph_11/model.py new file mode 100644 index 000000000..c8f6ecea3 --- /dev/null +++ b/paddle_samples/PaddleX/TimesNet_ad/subgraph_11/model.py @@ -0,0 +1,401 @@ +import paddle + + +class GraphModule(paddle.nn.Layer): + def __init__(self): + super().__init__() + + def forward( + self, + parameter_0, + parameter_1, + parameter_2, + parameter_3, + parameter_4, + parameter_5, + parameter_6, + parameter_7, + parameter_8, + parameter_9, + parameter_10, + parameter_11, + parameter_12, + parameter_13, + parameter_14, + parameter_15, + parameter_16, + parameter_17, + parameter_18, + parameter_19, + parameter_20, + parameter_21, + parameter_22, + parameter_23, + data_0, + data_1, + data_2, + data_3, + data_4, + ): + # pd_op.cast: (xi64) <- (xi32) + cast_0 = paddle._C_ops.cast(data_2, paddle.int64) + del data_2 + + # pd_op.floor_divide: (xi64) <- (xi64, xi64) + floor_divide_0 = paddle._C_ops.floor_divide(data_4, cast_0) + + # pd_op.full: (1xf32) <- () + full_0 = paddle._C_ops.full( + [1], float("1"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.scale: (xi64) <- (xi64, 1xf32) + scale_0 = paddle._C_ops.scale(floor_divide_0, full_0, float("1"), True) + del floor_divide_0, full_0 + + # pd_op.multiply: (xi64) <- (xi64, xi64) + multiply_0 = paddle._C_ops.multiply(scale_0, cast_0) + del scale_0 + + # pd_op.subtract: (xi64) <- (xi64, xi64) + subtract_0 = paddle._C_ops.subtract(multiply_0, data_4) + del data_4 + + # pd_op.full: (xi64) <- () + full_1 = paddle._C_ops.full( + [], float("32"), paddle.int64, paddle.core.CPUPlace() + ) + + # builtin.combine: ([xi64, xi64, xi64]) <- (xi64, xi64, xi64) + combine_0 = [data_1, subtract_0, full_1] + del data_1, subtract_0 + + # pd_op.stack: (3xi64) <- ([xi64, xi64, xi64]) + stack_0 = paddle._C_ops.stack(combine_0, 0) + del combine_0 + + # pd_op.full: (1xf32) <- () + full_2 = paddle._C_ops.full( + [1], float("0"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.full_with_tensor: (-1x-1x32xf32) <- (1xf32, 3xi64) + full_with_tensor_0 = paddle._C_ops.full_with_tensor( + full_2, stack_0, paddle.float32 + ) + del full_2, stack_0 + + # pd_op.cast: (-1x96x32xf32) <- (-1x96x32xf32) + cast_1 = paddle._C_ops.cast(data_3, paddle.float32) + del data_3 + + # pd_op.cast: (-1x-1x32xf32) <- (-1x-1x32xf32) + cast_2 = paddle._C_ops.cast(full_with_tensor_0, paddle.float32) + + # pd_op.full: (1xi32) <- () + full_3 = paddle._C_ops.full( + [1], float("1"), paddle.int32, paddle.core.CPUPlace() + ) + + # builtin.combine: ([-1x96x32xf32, -1x-1x32xf32]) <- (-1x96x32xf32, -1x-1x32xf32) + combine_1 = [cast_1, cast_2] + + # pd_op.concat: (-1x-1x32xf32) <- ([-1x96x32xf32, -1x-1x32xf32], 1xi32) + concat_0 = paddle._C_ops.concat(combine_1, full_3) + del combine_1 + + # pd_op.floor_divide: (xi64) <- (xi64, xi64) + floor_divide_1 = paddle._C_ops.floor_divide(multiply_0, cast_0) + + # builtin.combine: ([xi64, xi64, xi64, xi64]) <- (xi64, xi64, xi64, xi64) + combine_2 = [data_0, floor_divide_1, cast_0, full_1] + del cast_0, floor_divide_1 + + # pd_op.stack: (4xi64) <- ([xi64, xi64, xi64, xi64]) + stack_1 = paddle._C_ops.stack(combine_2, 0) + del combine_2 + + # pd_op.reshape: (-1x-1x-1x32xf32) <- (-1x-1x32xf32, 4xi64) + reshape_0 = paddle._C_ops.reshape(concat_0, stack_1) + del stack_1 + + # pd_op.transpose: (-1x32x-1x-1xf32) <- (-1x-1x-1x32xf32) + transpose_0 = paddle._C_ops.transpose(reshape_0, [0, 3, 1, 2]) + del reshape_0 + + # pd_op.conv2d: (-1x64x-1x-1xf32) <- (-1x32x-1x-1xf32, 64x32x1x1xf32) + conv2d_0 = paddle._C_ops.conv2d( + transpose_0, parameter_23, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_23 + + # pd_op.full_int_array: (4xi64) <- () + full_int_array_0 = [1, -1, 1, 1] + + # pd_op.reshape: (1x64x1x1xf32) <- (64xf32, 4xi64) + reshape_1 = paddle._C_ops.reshape(parameter_22, full_int_array_0) + del parameter_22 + + # pd_op.add: (-1x64x-1x-1xf32) <- (-1x64x-1x-1xf32, 1x64x1x1xf32) + add_0 = paddle._C_ops.add(conv2d_0, reshape_1) + + # pd_op.conv2d: (-1x64x-1x-1xf32) <- (-1x32x-1x-1xf32, 64x32x3x3xf32) + conv2d_1 = paddle._C_ops.conv2d( + transpose_0, parameter_21, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_21 + + # pd_op.reshape: (1x64x1x1xf32) <- (64xf32, 4xi64) + reshape_2 = paddle._C_ops.reshape(parameter_20, full_int_array_0) + del parameter_20 + + # pd_op.add: (-1x64x-1x-1xf32) <- (-1x64x-1x-1xf32, 1x64x1x1xf32) + add_1 = paddle._C_ops.add(conv2d_1, reshape_2) + + # pd_op.conv2d: (-1x64x-1x-1xf32) <- (-1x32x-1x-1xf32, 64x32x5x5xf32) + conv2d_2 = paddle._C_ops.conv2d( + transpose_0, parameter_19, [1, 1], [2, 2], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_19 + + # pd_op.reshape: (1x64x1x1xf32) <- (64xf32, 4xi64) + reshape_3 = paddle._C_ops.reshape(parameter_18, full_int_array_0) + del parameter_18 + + # pd_op.add: (-1x64x-1x-1xf32) <- (-1x64x-1x-1xf32, 1x64x1x1xf32) + add_2 = paddle._C_ops.add(conv2d_2, reshape_3) + + # pd_op.conv2d: (-1x64x-1x-1xf32) <- (-1x32x-1x-1xf32, 64x32x7x7xf32) + conv2d_3 = paddle._C_ops.conv2d( + transpose_0, parameter_17, [1, 1], [3, 3], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_17 + + # pd_op.reshape: (1x64x1x1xf32) <- (64xf32, 4xi64) + reshape_4 = paddle._C_ops.reshape(parameter_16, full_int_array_0) + del parameter_16 + + # pd_op.add: (-1x64x-1x-1xf32) <- (-1x64x-1x-1xf32, 1x64x1x1xf32) + add_3 = paddle._C_ops.add(conv2d_3, reshape_4) + + # pd_op.conv2d: (-1x64x-1x-1xf32) <- (-1x32x-1x-1xf32, 64x32x9x9xf32) + conv2d_4 = paddle._C_ops.conv2d( + transpose_0, parameter_15, [1, 1], [4, 4], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_15 + + # pd_op.reshape: (1x64x1x1xf32) <- (64xf32, 4xi64) + reshape_5 = paddle._C_ops.reshape(parameter_14, full_int_array_0) + del parameter_14 + + # pd_op.add: (-1x64x-1x-1xf32) <- (-1x64x-1x-1xf32, 1x64x1x1xf32) + add_4 = paddle._C_ops.add(conv2d_4, reshape_5) + + # pd_op.conv2d: (-1x64x-1x-1xf32) <- (-1x32x-1x-1xf32, 64x32x11x11xf32) + conv2d_5 = paddle._C_ops.conv2d( + transpose_0, parameter_13, [1, 1], [5, 5], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_13 + + # pd_op.reshape: (1x64x1x1xf32) <- (64xf32, 4xi64) + reshape_6 = paddle._C_ops.reshape(parameter_12, full_int_array_0) + del parameter_12 + + # pd_op.add: (-1x64x-1x-1xf32) <- (-1x64x-1x-1xf32, 1x64x1x1xf32) + add_5 = paddle._C_ops.add(conv2d_5, reshape_6) + + # builtin.combine: ([-1x64x-1x-1xf32, -1x64x-1x-1xf32, -1x64x-1x-1xf32, -1x64x-1x-1xf32, -1x64x-1x-1xf32, -1x64x-1x-1xf32]) <- (-1x64x-1x-1xf32, -1x64x-1x-1xf32, -1x64x-1x-1xf32, -1x64x-1x-1xf32, -1x64x-1x-1xf32, -1x64x-1x-1xf32) + combine_3 = [add_0, add_1, add_2, add_3, add_4, add_5] + + # pd_op.stack: (-1x64x-1x-1x6xf32) <- ([-1x64x-1x-1xf32, -1x64x-1x-1xf32, -1x64x-1x-1xf32, -1x64x-1x-1xf32, -1x64x-1x-1xf32, -1x64x-1x-1xf32]) + stack_2 = paddle._C_ops.stack(combine_3, -1) + del combine_3 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_1 = [-1] + + # pd_op.assign: (1xi64) <- (1xi64) + assign_0 = full_int_array_1 + + # pd_op.mean: (-1x64x-1x-1xf32) <- (-1x64x-1x-1x6xf32, 1xi64) + mean_0 = paddle._C_ops.mean(stack_2, full_int_array_1, False) + + # pd_op.gelu: (-1x64x-1x-1xf32) <- (-1x64x-1x-1xf32) + gelu_0 = paddle._C_ops.gelu(mean_0, False) + + # pd_op.conv2d: (-1x32x-1x-1xf32) <- (-1x64x-1x-1xf32, 32x64x1x1xf32) + conv2d_6 = paddle._C_ops.conv2d( + gelu_0, parameter_11, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_11 + + # pd_op.reshape: (1x32x1x1xf32) <- (32xf32, 4xi64) + reshape_7 = paddle._C_ops.reshape(parameter_10, full_int_array_0) + del parameter_10 + + # pd_op.add: (-1x32x-1x-1xf32) <- (-1x32x-1x-1xf32, 1x32x1x1xf32) + add_6 = paddle._C_ops.add(conv2d_6, reshape_7) + + # pd_op.conv2d: (-1x32x-1x-1xf32) <- (-1x64x-1x-1xf32, 32x64x3x3xf32) + conv2d_7 = paddle._C_ops.conv2d( + gelu_0, parameter_9, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_9 + + # pd_op.reshape: (1x32x1x1xf32) <- (32xf32, 4xi64) + reshape_8 = paddle._C_ops.reshape(parameter_8, full_int_array_0) + del parameter_8 + + # pd_op.add: (-1x32x-1x-1xf32) <- (-1x32x-1x-1xf32, 1x32x1x1xf32) + add_7 = paddle._C_ops.add(conv2d_7, reshape_8) + + # pd_op.conv2d: (-1x32x-1x-1xf32) <- (-1x64x-1x-1xf32, 32x64x5x5xf32) + conv2d_8 = paddle._C_ops.conv2d( + gelu_0, parameter_7, [1, 1], [2, 2], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_7 + + # pd_op.reshape: (1x32x1x1xf32) <- (32xf32, 4xi64) + reshape_9 = paddle._C_ops.reshape(parameter_6, full_int_array_0) + del parameter_6 + + # pd_op.add: (-1x32x-1x-1xf32) <- (-1x32x-1x-1xf32, 1x32x1x1xf32) + add_8 = paddle._C_ops.add(conv2d_8, reshape_9) + + # pd_op.conv2d: (-1x32x-1x-1xf32) <- (-1x64x-1x-1xf32, 32x64x7x7xf32) + conv2d_9 = paddle._C_ops.conv2d( + gelu_0, parameter_5, [1, 1], [3, 3], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_5 + + # pd_op.reshape: (1x32x1x1xf32) <- (32xf32, 4xi64) + reshape_10 = paddle._C_ops.reshape(parameter_4, full_int_array_0) + del parameter_4 + + # pd_op.add: (-1x32x-1x-1xf32) <- (-1x32x-1x-1xf32, 1x32x1x1xf32) + add_9 = paddle._C_ops.add(conv2d_9, reshape_10) + + # pd_op.conv2d: (-1x32x-1x-1xf32) <- (-1x64x-1x-1xf32, 32x64x9x9xf32) + conv2d_10 = paddle._C_ops.conv2d( + gelu_0, parameter_3, [1, 1], [4, 4], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_3 + + # pd_op.reshape: (1x32x1x1xf32) <- (32xf32, 4xi64) + reshape_11 = paddle._C_ops.reshape(parameter_2, full_int_array_0) + del parameter_2 + + # pd_op.add: (-1x32x-1x-1xf32) <- (-1x32x-1x-1xf32, 1x32x1x1xf32) + add_10 = paddle._C_ops.add(conv2d_10, reshape_11) + + # pd_op.conv2d: (-1x32x-1x-1xf32) <- (-1x64x-1x-1xf32, 32x64x11x11xf32) + conv2d_11 = paddle._C_ops.conv2d( + gelu_0, parameter_1, [1, 1], [5, 5], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_1 + + # pd_op.reshape: (1x32x1x1xf32) <- (32xf32, 4xi64) + reshape_12 = paddle._C_ops.reshape(parameter_0, full_int_array_0) + del full_int_array_0, parameter_0 + + # pd_op.add: (-1x32x-1x-1xf32) <- (-1x32x-1x-1xf32, 1x32x1x1xf32) + add_11 = paddle._C_ops.add(conv2d_11, reshape_12) + + # builtin.combine: ([-1x32x-1x-1xf32, -1x32x-1x-1xf32, -1x32x-1x-1xf32, -1x32x-1x-1xf32, -1x32x-1x-1xf32, -1x32x-1x-1xf32]) <- (-1x32x-1x-1xf32, -1x32x-1x-1xf32, -1x32x-1x-1xf32, -1x32x-1x-1xf32, -1x32x-1x-1xf32, -1x32x-1x-1xf32) + combine_4 = [add_6, add_7, add_8, add_9, add_10, add_11] + + # pd_op.stack: (-1x32x-1x-1x6xf32) <- ([-1x32x-1x-1xf32, -1x32x-1x-1xf32, -1x32x-1x-1xf32, -1x32x-1x-1xf32, -1x32x-1x-1xf32, -1x32x-1x-1xf32]) + stack_3 = paddle._C_ops.stack(combine_4, -1) + del combine_4 + + # pd_op.mean: (-1x32x-1x-1xf32) <- (-1x32x-1x-1x6xf32, 1xi64) + mean_1 = paddle._C_ops.mean(stack_3, full_int_array_1, False) + + # pd_op.transpose: (-1x-1x-1x32xf32) <- (-1x32x-1x-1xf32) + transpose_1 = paddle._C_ops.transpose(mean_1, [0, 2, 3, 1]) + del mean_1 + + # pd_op.full: (xi64) <- () + full_4 = paddle._C_ops.full( + [], float("-1"), paddle.int64, paddle.core.CPUPlace() + ) + + # builtin.combine: ([xi64, xi64, xi64]) <- (xi64, xi64, xi64) + combine_5 = [data_0, full_4, full_1] + del data_0, full_1, full_4 + + # pd_op.stack: (3xi64) <- ([xi64, xi64, xi64]) + stack_4 = paddle._C_ops.stack(combine_5, 0) + del combine_5 + + # pd_op.reshape: (-1x-1x32xf32) <- (-1x-1x-1x32xf32, 3xi64) + reshape_13 = paddle._C_ops.reshape(transpose_1, stack_4) + del stack_4 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_2 = [0] + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_3 = [96] + + # pd_op.slice: (-1x-1x32xf32) <- (-1x-1x32xf32, 1xi64, 1xi64) + slice_0 = paddle._C_ops.slice( + reshape_13, [1], full_int_array_2, full_int_array_3, [1], [] + ) + del ( + add_0, + add_1, + add_10, + add_11, + add_2, + add_3, + add_4, + add_5, + add_6, + add_7, + add_8, + add_9, + assign_0, + cast_1, + cast_2, + concat_0, + conv2d_0, + conv2d_1, + conv2d_10, + conv2d_11, + conv2d_2, + conv2d_3, + conv2d_4, + conv2d_5, + conv2d_6, + conv2d_7, + conv2d_8, + conv2d_9, + full_3, + full_int_array_1, + full_int_array_2, + full_int_array_3, + full_with_tensor_0, + gelu_0, + mean_0, + multiply_0, + reshape_1, + reshape_10, + reshape_11, + reshape_12, + reshape_13, + reshape_2, + reshape_3, + reshape_4, + reshape_5, + reshape_6, + reshape_7, + reshape_8, + reshape_9, + stack_2, + stack_3, + transpose_0, + transpose_1, + ) + + return slice_0 diff --git a/paddle_samples/PaddleX/TimesNet_ad/subgraph_11/weight_meta.py b/paddle_samples/PaddleX/TimesNet_ad/subgraph_11/weight_meta.py new file mode 100644 index 000000000..5fd23c522 --- /dev/null +++ b/paddle_samples/PaddleX/TimesNet_ad/subgraph_11/weight_meta.py @@ -0,0 +1,238 @@ +class Program_weight_tensor_parameter_0: + name = "parameter_0" + shape = [32] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_1: + name = "parameter_1" + shape = [32, 64, 11, 11] + dtype = "float32" + min_val = float("-0.171178") + max_val = float("0.28641") + mean = float("0.000138819") + std = float("0.0208469") + data = None + + +class Program_weight_tensor_parameter_2: + name = "parameter_2" + shape = [32] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_3: + name = "parameter_3" + shape = [32, 64, 9, 9] + dtype = "float32" + min_val = float("-0.155314") + max_val = float("0.280429") + mean = float("0.000198439") + std = float("0.0244335") + data = None + + +class Program_weight_tensor_parameter_4: + name = "parameter_4" + shape = [32] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_5: + name = "parameter_5" + shape = [32, 64, 7, 7] + dtype = "float32" + min_val = float("-0.167556") + max_val = float("0.280153") + mean = float("0.000259141") + std = float("0.0301798") + data = None + + +class Program_weight_tensor_parameter_6: + name = "parameter_6" + shape = [32] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_7: + name = "parameter_7" + shape = [32, 64, 5, 5] + dtype = "float32" + min_val = float("-0.169913") + max_val = float("0.283049") + mean = float("0.000434627") + std = float("0.0407218") + data = None + + +class Program_weight_tensor_parameter_8: + name = "parameter_8" + shape = [32] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_9: + name = "parameter_9" + shape = [32, 64, 3, 3] + dtype = "float32" + min_val = float("-0.246544") + max_val = float("0.304018") + mean = float("0.00110634") + std = float("0.0640675") + data = None + + +class Program_weight_tensor_parameter_10: + name = "parameter_10" + shape = [32] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_11: + name = "parameter_11" + shape = [32, 64, 1, 1] + dtype = "float32" + min_val = float("-0.612299") + max_val = float("0.645148") + mean = float("-0.00132418") + std = float("0.186337") + data = None + + +class Program_weight_tensor_parameter_12: + name = "parameter_12" + shape = [64] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_13: + name = "parameter_13" + shape = [64, 32, 11, 11] + dtype = "float32" + min_val = float("-0.397206") + max_val = float("0.384341") + mean = float("0.000531751") + std = float("0.0285274") + data = None + + +class Program_weight_tensor_parameter_14: + name = "parameter_14" + shape = [64] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_15: + name = "parameter_15" + shape = [64, 32, 9, 9] + dtype = "float32" + min_val = float("-0.353936") + max_val = float("0.406931") + mean = float("0.000740455") + std = float("0.0337338") + data = None + + +class Program_weight_tensor_parameter_16: + name = "parameter_16" + shape = [64] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_17: + name = "parameter_17" + shape = [64, 32, 7, 7] + dtype = "float32" + min_val = float("-0.347076") + max_val = float("0.378609") + mean = float("0.000666987") + std = float("0.0419889") + data = None + + +class Program_weight_tensor_parameter_18: + name = "parameter_18" + shape = [64] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_19: + name = "parameter_19" + shape = [64, 32, 5, 5] + dtype = "float32" + min_val = float("-0.398976") + max_val = float("0.349175") + mean = float("0.00116756") + std = float("0.0565579") + data = None + + +class Program_weight_tensor_parameter_20: + name = "parameter_20" + shape = [64] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_21: + name = "parameter_21" + shape = [64, 32, 3, 3] + dtype = "float32" + min_val = float("-0.498519") + max_val = float("0.47824") + mean = float("0.00144987") + std = float("0.0900388") + data = None + + +class Program_weight_tensor_parameter_22: + name = "parameter_22" + shape = [64] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_23: + name = "parameter_23" + shape = [64, 32, 1, 1] + dtype = "float32" + min_val = float("-0.766112") + max_val = float("0.852336") + mean = float("-0.00652652") + std = float("0.263183") + data = None diff --git a/paddle_samples/PaddleX/TimesNet_ad/subgraph_12/graph_hash.txt b/paddle_samples/PaddleX/TimesNet_ad/subgraph_12/graph_hash.txt new file mode 100644 index 000000000..5b9740b6d --- /dev/null +++ b/paddle_samples/PaddleX/TimesNet_ad/subgraph_12/graph_hash.txt @@ -0,0 +1 @@ +c788ee53ec7446be5d13146cc01d890972d8ea81ab50143e201ddeb574745fb3 \ No newline at end of file diff --git a/paddle_samples/PaddleX/TimesNet_ad/subgraph_12/graph_net.json b/paddle_samples/PaddleX/TimesNet_ad/subgraph_12/graph_net.json new file mode 100644 index 000000000..026782ffa --- /dev/null +++ b/paddle_samples/PaddleX/TimesNet_ad/subgraph_12/graph_net.json @@ -0,0 +1,6 @@ +{ + "framework": "paddle", + "model_name": "TimesNet_ad", + "num_devices_required": 1, + "num_nodes_required": 1 +} \ No newline at end of file diff --git a/paddle_samples/PaddleX/TimesNet_cls/subgraph_7/input_meta.py b/paddle_samples/PaddleX/TimesNet_ad/subgraph_12/input_meta.py similarity index 86% rename from paddle_samples/PaddleX/TimesNet_cls/subgraph_7/input_meta.py rename to paddle_samples/PaddleX/TimesNet_ad/subgraph_12/input_meta.py index e5ae7474f..36dd0f676 100644 --- a/paddle_samples/PaddleX/TimesNet_cls/subgraph_7/input_meta.py +++ b/paddle_samples/PaddleX/TimesNet_ad/subgraph_12/input_meta.py @@ -9,11 +9,11 @@ class Program_weight_tensor_data_1: name = "data_1" shape = [3] dtype = "int32" - data = [405, 202, 101] + data = [96, 48, 32] class Program_weight_tensor_data_2: name = "data_2" shape = [] dtype = "int64" - data = [405] + data = [96] diff --git a/paddle_samples/PaddleX/TimesNet_cls/subgraph_7/model.py b/paddle_samples/PaddleX/TimesNet_ad/subgraph_12/model.py similarity index 100% rename from paddle_samples/PaddleX/TimesNet_cls/subgraph_7/model.py rename to paddle_samples/PaddleX/TimesNet_ad/subgraph_12/model.py diff --git a/paddle_samples/PaddleX/TimesNet_ad/subgraph_12/weight_meta.py b/paddle_samples/PaddleX/TimesNet_ad/subgraph_12/weight_meta.py new file mode 100644 index 000000000..8b1378917 --- /dev/null +++ b/paddle_samples/PaddleX/TimesNet_ad/subgraph_12/weight_meta.py @@ -0,0 +1 @@ + diff --git a/paddle_samples/PaddleX/TimesNet_ad/subgraph_2/graph_hash.txt b/paddle_samples/PaddleX/TimesNet_ad/subgraph_2/graph_hash.txt new file mode 100644 index 000000000..0502190db --- /dev/null +++ b/paddle_samples/PaddleX/TimesNet_ad/subgraph_2/graph_hash.txt @@ -0,0 +1 @@ +9451eec787ebbf6d81d05cf078e17cb1b8017eefbaeabcf25382ff692dd94b12 \ No newline at end of file diff --git a/paddle_samples/PaddleX/TimesNet_ad/subgraph_2/graph_net.json b/paddle_samples/PaddleX/TimesNet_ad/subgraph_2/graph_net.json new file mode 100644 index 000000000..026782ffa --- /dev/null +++ b/paddle_samples/PaddleX/TimesNet_ad/subgraph_2/graph_net.json @@ -0,0 +1,6 @@ +{ + "framework": "paddle", + "model_name": "TimesNet_ad", + "num_devices_required": 1, + "num_nodes_required": 1 +} \ No newline at end of file diff --git a/paddle_samples/PaddleX/TimesNet_ad/subgraph_2/input_meta.py b/paddle_samples/PaddleX/TimesNet_ad/subgraph_2/input_meta.py new file mode 100644 index 000000000..8153d7ca5 --- /dev/null +++ b/paddle_samples/PaddleX/TimesNet_ad/subgraph_2/input_meta.py @@ -0,0 +1,30 @@ +class Program_weight_tensor_data_0: + name = "data_0" + shape = [] + dtype = "int64" + data = [2] + + +class Program_weight_tensor_data_1: + name = "data_1" + shape = [2, 96, 32] + dtype = "float32" + min_val = float("-2.85988") + max_val = float("4.34883") + mean = float("0.0198675") + std = float("1.02037") + data = None + + +class Program_weight_tensor_data_2: + name = "data_2" + shape = [] + dtype = "int32" + data = [96] + + +class Program_weight_tensor_data_3: + name = "data_3" + shape = [] + dtype = "int64" + data = [96] diff --git a/paddle_samples/PaddleX/TimesNet_ad/subgraph_2/model.py b/paddle_samples/PaddleX/TimesNet_ad/subgraph_2/model.py new file mode 100644 index 000000000..0a54a8c55 --- /dev/null +++ b/paddle_samples/PaddleX/TimesNet_ad/subgraph_2/model.py @@ -0,0 +1,337 @@ +import paddle + + +class GraphModule(paddle.nn.Layer): + def __init__(self): + super().__init__() + + def forward( + self, + parameter_0, + parameter_1, + parameter_2, + parameter_3, + parameter_4, + parameter_5, + parameter_6, + parameter_7, + parameter_8, + parameter_9, + parameter_10, + parameter_11, + parameter_12, + parameter_13, + parameter_14, + parameter_15, + parameter_16, + parameter_17, + parameter_18, + parameter_19, + parameter_20, + parameter_21, + parameter_22, + parameter_23, + data_0, + data_1, + data_2, + data_3, + ): + # pd_op.cast: (xi64) <- (xi32) + cast_0 = paddle._C_ops.cast(data_2, paddle.int64) + del data_2 + + # pd_op.floor_divide: (xi64) <- (xi64, xi64) + floor_divide_0 = paddle._C_ops.floor_divide(data_3, cast_0) + del data_3 + + # pd_op.full: (xi64) <- () + full_0 = paddle._C_ops.full( + [], float("32"), paddle.int64, paddle.core.CPUPlace() + ) + + # builtin.combine: ([xi64, xi64, xi64, xi64]) <- (xi64, xi64, xi64, xi64) + combine_0 = [data_0, floor_divide_0, cast_0, full_0] + del cast_0, floor_divide_0 + + # pd_op.stack: (4xi64) <- ([xi64, xi64, xi64, xi64]) + stack_0 = paddle._C_ops.stack(combine_0, 0) + del combine_0 + + # pd_op.reshape: (-1x-1x-1x32xf32) <- (-1x96x32xf32, 4xi64) + reshape_0 = paddle._C_ops.reshape(data_1, stack_0) + del data_1, stack_0 + + # pd_op.transpose: (-1x32x-1x-1xf32) <- (-1x-1x-1x32xf32) + transpose_0 = paddle._C_ops.transpose(reshape_0, [0, 3, 1, 2]) + del reshape_0 + + # pd_op.conv2d: (-1x64x-1x-1xf32) <- (-1x32x-1x-1xf32, 64x32x1x1xf32) + conv2d_0 = paddle._C_ops.conv2d( + transpose_0, parameter_23, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_23 + + # pd_op.full_int_array: (4xi64) <- () + full_int_array_0 = [1, -1, 1, 1] + + # pd_op.reshape: (1x64x1x1xf32) <- (64xf32, 4xi64) + reshape_1 = paddle._C_ops.reshape(parameter_22, full_int_array_0) + del parameter_22 + + # pd_op.add: (-1x64x-1x-1xf32) <- (-1x64x-1x-1xf32, 1x64x1x1xf32) + add_0 = paddle._C_ops.add(conv2d_0, reshape_1) + + # pd_op.conv2d: (-1x64x-1x-1xf32) <- (-1x32x-1x-1xf32, 64x32x3x3xf32) + conv2d_1 = paddle._C_ops.conv2d( + transpose_0, parameter_21, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_21 + + # pd_op.reshape: (1x64x1x1xf32) <- (64xf32, 4xi64) + reshape_2 = paddle._C_ops.reshape(parameter_20, full_int_array_0) + del parameter_20 + + # pd_op.add: (-1x64x-1x-1xf32) <- (-1x64x-1x-1xf32, 1x64x1x1xf32) + add_1 = paddle._C_ops.add(conv2d_1, reshape_2) + + # pd_op.conv2d: (-1x64x-1x-1xf32) <- (-1x32x-1x-1xf32, 64x32x5x5xf32) + conv2d_2 = paddle._C_ops.conv2d( + transpose_0, parameter_19, [1, 1], [2, 2], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_19 + + # pd_op.reshape: (1x64x1x1xf32) <- (64xf32, 4xi64) + reshape_3 = paddle._C_ops.reshape(parameter_18, full_int_array_0) + del parameter_18 + + # pd_op.add: (-1x64x-1x-1xf32) <- (-1x64x-1x-1xf32, 1x64x1x1xf32) + add_2 = paddle._C_ops.add(conv2d_2, reshape_3) + + # pd_op.conv2d: (-1x64x-1x-1xf32) <- (-1x32x-1x-1xf32, 64x32x7x7xf32) + conv2d_3 = paddle._C_ops.conv2d( + transpose_0, parameter_17, [1, 1], [3, 3], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_17 + + # pd_op.reshape: (1x64x1x1xf32) <- (64xf32, 4xi64) + reshape_4 = paddle._C_ops.reshape(parameter_16, full_int_array_0) + del parameter_16 + + # pd_op.add: (-1x64x-1x-1xf32) <- (-1x64x-1x-1xf32, 1x64x1x1xf32) + add_3 = paddle._C_ops.add(conv2d_3, reshape_4) + + # pd_op.conv2d: (-1x64x-1x-1xf32) <- (-1x32x-1x-1xf32, 64x32x9x9xf32) + conv2d_4 = paddle._C_ops.conv2d( + transpose_0, parameter_15, [1, 1], [4, 4], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_15 + + # pd_op.reshape: (1x64x1x1xf32) <- (64xf32, 4xi64) + reshape_5 = paddle._C_ops.reshape(parameter_14, full_int_array_0) + del parameter_14 + + # pd_op.add: (-1x64x-1x-1xf32) <- (-1x64x-1x-1xf32, 1x64x1x1xf32) + add_4 = paddle._C_ops.add(conv2d_4, reshape_5) + + # pd_op.conv2d: (-1x64x-1x-1xf32) <- (-1x32x-1x-1xf32, 64x32x11x11xf32) + conv2d_5 = paddle._C_ops.conv2d( + transpose_0, parameter_13, [1, 1], [5, 5], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_13 + + # pd_op.reshape: (1x64x1x1xf32) <- (64xf32, 4xi64) + reshape_6 = paddle._C_ops.reshape(parameter_12, full_int_array_0) + del parameter_12 + + # pd_op.add: (-1x64x-1x-1xf32) <- (-1x64x-1x-1xf32, 1x64x1x1xf32) + add_5 = paddle._C_ops.add(conv2d_5, reshape_6) + + # builtin.combine: ([-1x64x-1x-1xf32, -1x64x-1x-1xf32, -1x64x-1x-1xf32, -1x64x-1x-1xf32, -1x64x-1x-1xf32, -1x64x-1x-1xf32]) <- (-1x64x-1x-1xf32, -1x64x-1x-1xf32, -1x64x-1x-1xf32, -1x64x-1x-1xf32, -1x64x-1x-1xf32, -1x64x-1x-1xf32) + combine_1 = [add_0, add_1, add_2, add_3, add_4, add_5] + + # pd_op.stack: (-1x64x-1x-1x6xf32) <- ([-1x64x-1x-1xf32, -1x64x-1x-1xf32, -1x64x-1x-1xf32, -1x64x-1x-1xf32, -1x64x-1x-1xf32, -1x64x-1x-1xf32]) + stack_1 = paddle._C_ops.stack(combine_1, -1) + del combine_1 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_1 = [-1] + + # pd_op.assign: (1xi64) <- (1xi64) + assign_0 = full_int_array_1 + + # pd_op.mean: (-1x64x-1x-1xf32) <- (-1x64x-1x-1x6xf32, 1xi64) + mean_0 = paddle._C_ops.mean(stack_1, full_int_array_1, False) + + # pd_op.gelu: (-1x64x-1x-1xf32) <- (-1x64x-1x-1xf32) + gelu_0 = paddle._C_ops.gelu(mean_0, False) + + # pd_op.conv2d: (-1x32x-1x-1xf32) <- (-1x64x-1x-1xf32, 32x64x1x1xf32) + conv2d_6 = paddle._C_ops.conv2d( + gelu_0, parameter_11, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_11 + + # pd_op.reshape: (1x32x1x1xf32) <- (32xf32, 4xi64) + reshape_7 = paddle._C_ops.reshape(parameter_10, full_int_array_0) + del parameter_10 + + # pd_op.add: (-1x32x-1x-1xf32) <- (-1x32x-1x-1xf32, 1x32x1x1xf32) + add_6 = paddle._C_ops.add(conv2d_6, reshape_7) + + # pd_op.conv2d: (-1x32x-1x-1xf32) <- (-1x64x-1x-1xf32, 32x64x3x3xf32) + conv2d_7 = paddle._C_ops.conv2d( + gelu_0, parameter_9, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_9 + + # pd_op.reshape: (1x32x1x1xf32) <- (32xf32, 4xi64) + reshape_8 = paddle._C_ops.reshape(parameter_8, full_int_array_0) + del parameter_8 + + # pd_op.add: (-1x32x-1x-1xf32) <- (-1x32x-1x-1xf32, 1x32x1x1xf32) + add_7 = paddle._C_ops.add(conv2d_7, reshape_8) + + # pd_op.conv2d: (-1x32x-1x-1xf32) <- (-1x64x-1x-1xf32, 32x64x5x5xf32) + conv2d_8 = paddle._C_ops.conv2d( + gelu_0, parameter_7, [1, 1], [2, 2], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_7 + + # pd_op.reshape: (1x32x1x1xf32) <- (32xf32, 4xi64) + reshape_9 = paddle._C_ops.reshape(parameter_6, full_int_array_0) + del parameter_6 + + # pd_op.add: (-1x32x-1x-1xf32) <- (-1x32x-1x-1xf32, 1x32x1x1xf32) + add_8 = paddle._C_ops.add(conv2d_8, reshape_9) + + # pd_op.conv2d: (-1x32x-1x-1xf32) <- (-1x64x-1x-1xf32, 32x64x7x7xf32) + conv2d_9 = paddle._C_ops.conv2d( + gelu_0, parameter_5, [1, 1], [3, 3], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_5 + + # pd_op.reshape: (1x32x1x1xf32) <- (32xf32, 4xi64) + reshape_10 = paddle._C_ops.reshape(parameter_4, full_int_array_0) + del parameter_4 + + # pd_op.add: (-1x32x-1x-1xf32) <- (-1x32x-1x-1xf32, 1x32x1x1xf32) + add_9 = paddle._C_ops.add(conv2d_9, reshape_10) + + # pd_op.conv2d: (-1x32x-1x-1xf32) <- (-1x64x-1x-1xf32, 32x64x9x9xf32) + conv2d_10 = paddle._C_ops.conv2d( + gelu_0, parameter_3, [1, 1], [4, 4], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_3 + + # pd_op.reshape: (1x32x1x1xf32) <- (32xf32, 4xi64) + reshape_11 = paddle._C_ops.reshape(parameter_2, full_int_array_0) + del parameter_2 + + # pd_op.add: (-1x32x-1x-1xf32) <- (-1x32x-1x-1xf32, 1x32x1x1xf32) + add_10 = paddle._C_ops.add(conv2d_10, reshape_11) + + # pd_op.conv2d: (-1x32x-1x-1xf32) <- (-1x64x-1x-1xf32, 32x64x11x11xf32) + conv2d_11 = paddle._C_ops.conv2d( + gelu_0, parameter_1, [1, 1], [5, 5], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_1 + + # pd_op.reshape: (1x32x1x1xf32) <- (32xf32, 4xi64) + reshape_12 = paddle._C_ops.reshape(parameter_0, full_int_array_0) + del full_int_array_0, parameter_0 + + # pd_op.add: (-1x32x-1x-1xf32) <- (-1x32x-1x-1xf32, 1x32x1x1xf32) + add_11 = paddle._C_ops.add(conv2d_11, reshape_12) + + # builtin.combine: ([-1x32x-1x-1xf32, -1x32x-1x-1xf32, -1x32x-1x-1xf32, -1x32x-1x-1xf32, -1x32x-1x-1xf32, -1x32x-1x-1xf32]) <- (-1x32x-1x-1xf32, -1x32x-1x-1xf32, -1x32x-1x-1xf32, -1x32x-1x-1xf32, -1x32x-1x-1xf32, -1x32x-1x-1xf32) + combine_2 = [add_6, add_7, add_8, add_9, add_10, add_11] + + # pd_op.stack: (-1x32x-1x-1x6xf32) <- ([-1x32x-1x-1xf32, -1x32x-1x-1xf32, -1x32x-1x-1xf32, -1x32x-1x-1xf32, -1x32x-1x-1xf32, -1x32x-1x-1xf32]) + stack_2 = paddle._C_ops.stack(combine_2, -1) + del combine_2 + + # pd_op.mean: (-1x32x-1x-1xf32) <- (-1x32x-1x-1x6xf32, 1xi64) + mean_1 = paddle._C_ops.mean(stack_2, full_int_array_1, False) + + # pd_op.transpose: (-1x-1x-1x32xf32) <- (-1x32x-1x-1xf32) + transpose_1 = paddle._C_ops.transpose(mean_1, [0, 2, 3, 1]) + del mean_1 + + # pd_op.full: (xi64) <- () + full_1 = paddle._C_ops.full( + [], float("-1"), paddle.int64, paddle.core.CPUPlace() + ) + + # builtin.combine: ([xi64, xi64, xi64]) <- (xi64, xi64, xi64) + combine_3 = [data_0, full_1, full_0] + del data_0, full_0, full_1 + + # pd_op.stack: (3xi64) <- ([xi64, xi64, xi64]) + stack_3 = paddle._C_ops.stack(combine_3, 0) + del combine_3 + + # pd_op.reshape: (-1x-1x32xf32) <- (-1x-1x-1x32xf32, 3xi64) + reshape_13 = paddle._C_ops.reshape(transpose_1, stack_3) + del stack_3 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_2 = [0] + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_3 = [96] + + # pd_op.slice: (-1x-1x32xf32) <- (-1x-1x32xf32, 1xi64, 1xi64) + slice_0 = paddle._C_ops.slice( + reshape_13, [1], full_int_array_2, full_int_array_3, [1], [] + ) + del ( + add_0, + add_1, + add_10, + add_11, + add_2, + add_3, + add_4, + add_5, + add_6, + add_7, + add_8, + add_9, + assign_0, + conv2d_0, + conv2d_1, + conv2d_10, + conv2d_11, + conv2d_2, + conv2d_3, + conv2d_4, + conv2d_5, + conv2d_6, + conv2d_7, + conv2d_8, + conv2d_9, + full_int_array_1, + full_int_array_2, + full_int_array_3, + gelu_0, + mean_0, + reshape_1, + reshape_10, + reshape_11, + reshape_12, + reshape_13, + reshape_2, + reshape_3, + reshape_4, + reshape_5, + reshape_6, + reshape_7, + reshape_8, + reshape_9, + stack_1, + stack_2, + transpose_0, + transpose_1, + ) + + return slice_0 diff --git a/paddle_samples/PaddleX/TimesNet_ad/subgraph_2/weight_meta.py b/paddle_samples/PaddleX/TimesNet_ad/subgraph_2/weight_meta.py new file mode 100644 index 000000000..5fd23c522 --- /dev/null +++ b/paddle_samples/PaddleX/TimesNet_ad/subgraph_2/weight_meta.py @@ -0,0 +1,238 @@ +class Program_weight_tensor_parameter_0: + name = "parameter_0" + shape = [32] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_1: + name = "parameter_1" + shape = [32, 64, 11, 11] + dtype = "float32" + min_val = float("-0.171178") + max_val = float("0.28641") + mean = float("0.000138819") + std = float("0.0208469") + data = None + + +class Program_weight_tensor_parameter_2: + name = "parameter_2" + shape = [32] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_3: + name = "parameter_3" + shape = [32, 64, 9, 9] + dtype = "float32" + min_val = float("-0.155314") + max_val = float("0.280429") + mean = float("0.000198439") + std = float("0.0244335") + data = None + + +class Program_weight_tensor_parameter_4: + name = "parameter_4" + shape = [32] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_5: + name = "parameter_5" + shape = [32, 64, 7, 7] + dtype = "float32" + min_val = float("-0.167556") + max_val = float("0.280153") + mean = float("0.000259141") + std = float("0.0301798") + data = None + + +class Program_weight_tensor_parameter_6: + name = "parameter_6" + shape = [32] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_7: + name = "parameter_7" + shape = [32, 64, 5, 5] + dtype = "float32" + min_val = float("-0.169913") + max_val = float("0.283049") + mean = float("0.000434627") + std = float("0.0407218") + data = None + + +class Program_weight_tensor_parameter_8: + name = "parameter_8" + shape = [32] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_9: + name = "parameter_9" + shape = [32, 64, 3, 3] + dtype = "float32" + min_val = float("-0.246544") + max_val = float("0.304018") + mean = float("0.00110634") + std = float("0.0640675") + data = None + + +class Program_weight_tensor_parameter_10: + name = "parameter_10" + shape = [32] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_11: + name = "parameter_11" + shape = [32, 64, 1, 1] + dtype = "float32" + min_val = float("-0.612299") + max_val = float("0.645148") + mean = float("-0.00132418") + std = float("0.186337") + data = None + + +class Program_weight_tensor_parameter_12: + name = "parameter_12" + shape = [64] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_13: + name = "parameter_13" + shape = [64, 32, 11, 11] + dtype = "float32" + min_val = float("-0.397206") + max_val = float("0.384341") + mean = float("0.000531751") + std = float("0.0285274") + data = None + + +class Program_weight_tensor_parameter_14: + name = "parameter_14" + shape = [64] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_15: + name = "parameter_15" + shape = [64, 32, 9, 9] + dtype = "float32" + min_val = float("-0.353936") + max_val = float("0.406931") + mean = float("0.000740455") + std = float("0.0337338") + data = None + + +class Program_weight_tensor_parameter_16: + name = "parameter_16" + shape = [64] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_17: + name = "parameter_17" + shape = [64, 32, 7, 7] + dtype = "float32" + min_val = float("-0.347076") + max_val = float("0.378609") + mean = float("0.000666987") + std = float("0.0419889") + data = None + + +class Program_weight_tensor_parameter_18: + name = "parameter_18" + shape = [64] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_19: + name = "parameter_19" + shape = [64, 32, 5, 5] + dtype = "float32" + min_val = float("-0.398976") + max_val = float("0.349175") + mean = float("0.00116756") + std = float("0.0565579") + data = None + + +class Program_weight_tensor_parameter_20: + name = "parameter_20" + shape = [64] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_21: + name = "parameter_21" + shape = [64, 32, 3, 3] + dtype = "float32" + min_val = float("-0.498519") + max_val = float("0.47824") + mean = float("0.00144987") + std = float("0.0900388") + data = None + + +class Program_weight_tensor_parameter_22: + name = "parameter_22" + shape = [64] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_23: + name = "parameter_23" + shape = [64, 32, 1, 1] + dtype = "float32" + min_val = float("-0.766112") + max_val = float("0.852336") + mean = float("-0.00652652") + std = float("0.263183") + data = None diff --git a/paddle_samples/PaddleX/TimesNet_ad/subgraph_3/graph_hash.txt b/paddle_samples/PaddleX/TimesNet_ad/subgraph_3/graph_hash.txt new file mode 100644 index 000000000..59b6a77fb --- /dev/null +++ b/paddle_samples/PaddleX/TimesNet_ad/subgraph_3/graph_hash.txt @@ -0,0 +1 @@ +0f8a6b7104d3aca48b5037c599fd68df9cebbccbe6d26742ef5ec762870148e3 \ No newline at end of file diff --git a/paddle_samples/PaddleX/TimesNet_ad/subgraph_3/graph_net.json b/paddle_samples/PaddleX/TimesNet_ad/subgraph_3/graph_net.json new file mode 100644 index 000000000..026782ffa --- /dev/null +++ b/paddle_samples/PaddleX/TimesNet_ad/subgraph_3/graph_net.json @@ -0,0 +1,6 @@ +{ + "framework": "paddle", + "model_name": "TimesNet_ad", + "num_devices_required": 1, + "num_nodes_required": 1 +} \ No newline at end of file diff --git a/paddle_samples/PaddleX/TimesNet_ad/subgraph_3/input_meta.py b/paddle_samples/PaddleX/TimesNet_ad/subgraph_3/input_meta.py new file mode 100644 index 000000000..d10b7338b --- /dev/null +++ b/paddle_samples/PaddleX/TimesNet_ad/subgraph_3/input_meta.py @@ -0,0 +1,9 @@ +class Program_weight_tensor_data_0: + name = "data_0" + shape = [16, 32, 1, 96] + dtype = "float32" + min_val = float("-10.2965") + max_val = float("14.1976") + mean = float("0.342926") + std = float("1.27794") + data = None diff --git a/paddle_samples/PaddleX/TimesNet_ad/subgraph_3/model.py b/paddle_samples/PaddleX/TimesNet_ad/subgraph_3/model.py new file mode 100644 index 000000000..0dfe809a1 --- /dev/null +++ b/paddle_samples/PaddleX/TimesNet_ad/subgraph_3/model.py @@ -0,0 +1,238 @@ +import paddle + + +class GraphModule(paddle.nn.Layer): + def __init__(self): + super().__init__() + + def forward( + self, + parameter_0, + parameter_1, + parameter_2, + parameter_3, + parameter_4, + parameter_5, + parameter_6, + parameter_7, + parameter_8, + parameter_9, + parameter_10, + parameter_11, + parameter_12, + parameter_13, + parameter_14, + parameter_15, + parameter_16, + parameter_17, + parameter_18, + parameter_19, + parameter_20, + parameter_21, + parameter_22, + parameter_23, + data_0, + ): + # pd_op.conv2d: (16x64x1x96xf32) <- (16x32x1x96xf32, 64x32x1x1xf32) + conv2d_0 = paddle._C_ops.conv2d( + data_0, parameter_23, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_23 + + # pd_op.full_int_array: (4xi64) <- () + full_int_array_0 = [1, -1, 1, 1] + + # pd_op.reshape: (1x64x1x1xf32) <- (64xf32, 4xi64) + reshape_0 = paddle._C_ops.reshape(parameter_22, full_int_array_0) + del parameter_22 + + # pd_op.add: (16x64x1x96xf32) <- (16x64x1x96xf32, 1x64x1x1xf32) + add_0 = paddle._C_ops.add(conv2d_0, reshape_0) + del conv2d_0, reshape_0 + + # pd_op.conv2d: (16x64x1x96xf32) <- (16x32x1x96xf32, 64x32x3x3xf32) + conv2d_1 = paddle._C_ops.conv2d( + data_0, parameter_21, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_21 + + # pd_op.reshape: (1x64x1x1xf32) <- (64xf32, 4xi64) + reshape_1 = paddle._C_ops.reshape(parameter_20, full_int_array_0) + del parameter_20 + + # pd_op.add: (16x64x1x96xf32) <- (16x64x1x96xf32, 1x64x1x1xf32) + add_1 = paddle._C_ops.add(conv2d_1, reshape_1) + del conv2d_1, reshape_1 + + # pd_op.conv2d: (16x64x1x96xf32) <- (16x32x1x96xf32, 64x32x5x5xf32) + conv2d_2 = paddle._C_ops.conv2d( + data_0, parameter_19, [1, 1], [2, 2], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_19 + + # pd_op.reshape: (1x64x1x1xf32) <- (64xf32, 4xi64) + reshape_2 = paddle._C_ops.reshape(parameter_18, full_int_array_0) + del parameter_18 + + # pd_op.add: (16x64x1x96xf32) <- (16x64x1x96xf32, 1x64x1x1xf32) + add_2 = paddle._C_ops.add(conv2d_2, reshape_2) + del conv2d_2, reshape_2 + + # pd_op.conv2d: (16x64x1x96xf32) <- (16x32x1x96xf32, 64x32x7x7xf32) + conv2d_3 = paddle._C_ops.conv2d( + data_0, parameter_17, [1, 1], [3, 3], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_17 + + # pd_op.reshape: (1x64x1x1xf32) <- (64xf32, 4xi64) + reshape_3 = paddle._C_ops.reshape(parameter_16, full_int_array_0) + del parameter_16 + + # pd_op.add: (16x64x1x96xf32) <- (16x64x1x96xf32, 1x64x1x1xf32) + add_3 = paddle._C_ops.add(conv2d_3, reshape_3) + del conv2d_3, reshape_3 + + # pd_op.conv2d: (16x64x1x96xf32) <- (16x32x1x96xf32, 64x32x9x9xf32) + conv2d_4 = paddle._C_ops.conv2d( + data_0, parameter_15, [1, 1], [4, 4], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_15 + + # pd_op.reshape: (1x64x1x1xf32) <- (64xf32, 4xi64) + reshape_4 = paddle._C_ops.reshape(parameter_14, full_int_array_0) + del parameter_14 + + # pd_op.add: (16x64x1x96xf32) <- (16x64x1x96xf32, 1x64x1x1xf32) + add_4 = paddle._C_ops.add(conv2d_4, reshape_4) + del conv2d_4, reshape_4 + + # pd_op.conv2d: (16x64x1x96xf32) <- (16x32x1x96xf32, 64x32x11x11xf32) + conv2d_5 = paddle._C_ops.conv2d( + data_0, parameter_13, [1, 1], [5, 5], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del data_0, parameter_13 + + # pd_op.reshape: (1x64x1x1xf32) <- (64xf32, 4xi64) + reshape_5 = paddle._C_ops.reshape(parameter_12, full_int_array_0) + del parameter_12 + + # pd_op.add: (16x64x1x96xf32) <- (16x64x1x96xf32, 1x64x1x1xf32) + add_5 = paddle._C_ops.add(conv2d_5, reshape_5) + del conv2d_5, reshape_5 + + # builtin.combine: ([16x64x1x96xf32, 16x64x1x96xf32, 16x64x1x96xf32, 16x64x1x96xf32, 16x64x1x96xf32, 16x64x1x96xf32]) <- (16x64x1x96xf32, 16x64x1x96xf32, 16x64x1x96xf32, 16x64x1x96xf32, 16x64x1x96xf32, 16x64x1x96xf32) + combine_0 = [add_0, add_1, add_2, add_3, add_4, add_5] + del add_0, add_1, add_2, add_3, add_4, add_5 + + # pd_op.stack: (16x64x1x96x6xf32) <- ([16x64x1x96xf32, 16x64x1x96xf32, 16x64x1x96xf32, 16x64x1x96xf32, 16x64x1x96xf32, 16x64x1x96xf32]) + stack_0 = paddle._C_ops.stack(combine_0, -1) + del combine_0 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_1 = [-1] + + # pd_op.mean: (16x64x1x96xf32) <- (16x64x1x96x6xf32, 1xi64) + mean_1 = paddle._C_ops.mean(stack_0, full_int_array_1, False) + del stack_0 + + # pd_op.gelu: (16x64x1x96xf32) <- (16x64x1x96xf32) + gelu_0 = paddle._C_ops.gelu(mean_1, False) + del mean_1 + + # pd_op.conv2d: (16x32x1x96xf32) <- (16x64x1x96xf32, 32x64x1x1xf32) + conv2d_6 = paddle._C_ops.conv2d( + gelu_0, parameter_11, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_11 + + # pd_op.reshape: (1x32x1x1xf32) <- (32xf32, 4xi64) + reshape_6 = paddle._C_ops.reshape(parameter_10, full_int_array_0) + del parameter_10 + + # pd_op.add: (16x32x1x96xf32) <- (16x32x1x96xf32, 1x32x1x1xf32) + add_6 = paddle._C_ops.add(conv2d_6, reshape_6) + del conv2d_6, reshape_6 + + # pd_op.conv2d: (16x32x1x96xf32) <- (16x64x1x96xf32, 32x64x3x3xf32) + conv2d_7 = paddle._C_ops.conv2d( + gelu_0, parameter_9, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_9 + + # pd_op.reshape: (1x32x1x1xf32) <- (32xf32, 4xi64) + reshape_7 = paddle._C_ops.reshape(parameter_8, full_int_array_0) + del parameter_8 + + # pd_op.add: (16x32x1x96xf32) <- (16x32x1x96xf32, 1x32x1x1xf32) + add_7 = paddle._C_ops.add(conv2d_7, reshape_7) + del conv2d_7, reshape_7 + + # pd_op.conv2d: (16x32x1x96xf32) <- (16x64x1x96xf32, 32x64x5x5xf32) + conv2d_8 = paddle._C_ops.conv2d( + gelu_0, parameter_7, [1, 1], [2, 2], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_7 + + # pd_op.reshape: (1x32x1x1xf32) <- (32xf32, 4xi64) + reshape_8 = paddle._C_ops.reshape(parameter_6, full_int_array_0) + del parameter_6 + + # pd_op.add: (16x32x1x96xf32) <- (16x32x1x96xf32, 1x32x1x1xf32) + add_8 = paddle._C_ops.add(conv2d_8, reshape_8) + del conv2d_8, reshape_8 + + # pd_op.conv2d: (16x32x1x96xf32) <- (16x64x1x96xf32, 32x64x7x7xf32) + conv2d_9 = paddle._C_ops.conv2d( + gelu_0, parameter_5, [1, 1], [3, 3], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_5 + + # pd_op.reshape: (1x32x1x1xf32) <- (32xf32, 4xi64) + reshape_9 = paddle._C_ops.reshape(parameter_4, full_int_array_0) + del parameter_4 + + # pd_op.add: (16x32x1x96xf32) <- (16x32x1x96xf32, 1x32x1x1xf32) + add_9 = paddle._C_ops.add(conv2d_9, reshape_9) + del conv2d_9, reshape_9 + + # pd_op.conv2d: (16x32x1x96xf32) <- (16x64x1x96xf32, 32x64x9x9xf32) + conv2d_10 = paddle._C_ops.conv2d( + gelu_0, parameter_3, [1, 1], [4, 4], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_3 + + # pd_op.reshape: (1x32x1x1xf32) <- (32xf32, 4xi64) + reshape_10 = paddle._C_ops.reshape(parameter_2, full_int_array_0) + del parameter_2 + + # pd_op.add: (16x32x1x96xf32) <- (16x32x1x96xf32, 1x32x1x1xf32) + add_10 = paddle._C_ops.add(conv2d_10, reshape_10) + del conv2d_10, reshape_10 + + # pd_op.conv2d: (16x32x1x96xf32) <- (16x64x1x96xf32, 32x64x11x11xf32) + conv2d_11 = paddle._C_ops.conv2d( + gelu_0, parameter_1, [1, 1], [5, 5], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del gelu_0, parameter_1 + + # pd_op.reshape: (1x32x1x1xf32) <- (32xf32, 4xi64) + reshape_11 = paddle._C_ops.reshape(parameter_0, full_int_array_0) + del full_int_array_0, parameter_0 + + # pd_op.add: (16x32x1x96xf32) <- (16x32x1x96xf32, 1x32x1x1xf32) + add_11 = paddle._C_ops.add(conv2d_11, reshape_11) + del conv2d_11, reshape_11 + + # builtin.combine: ([16x32x1x96xf32, 16x32x1x96xf32, 16x32x1x96xf32, 16x32x1x96xf32, 16x32x1x96xf32, 16x32x1x96xf32]) <- (16x32x1x96xf32, 16x32x1x96xf32, 16x32x1x96xf32, 16x32x1x96xf32, 16x32x1x96xf32, 16x32x1x96xf32) + combine_1 = [add_6, add_7, add_8, add_9, add_10, add_11] + del add_10, add_11, add_6, add_7, add_8, add_9 + + # pd_op.stack: (16x32x1x96x6xf32) <- ([16x32x1x96xf32, 16x32x1x96xf32, 16x32x1x96xf32, 16x32x1x96xf32, 16x32x1x96xf32, 16x32x1x96xf32]) + stack_1 = paddle._C_ops.stack(combine_1, -1) + del combine_1 + + # pd_op.mean: (16x32x1x96xf32) <- (16x32x1x96x6xf32, 1xi64) + mean_0 = paddle._C_ops.mean(stack_1, full_int_array_1, False) + del full_int_array_1, stack_1 + + return mean_0 diff --git a/paddle_samples/PaddleX/TimesNet_ad/subgraph_3/weight_meta.py b/paddle_samples/PaddleX/TimesNet_ad/subgraph_3/weight_meta.py new file mode 100644 index 000000000..00a1adb0c --- /dev/null +++ b/paddle_samples/PaddleX/TimesNet_ad/subgraph_3/weight_meta.py @@ -0,0 +1,238 @@ +class Program_weight_tensor_parameter_0: + name = "parameter_0" + shape = [32] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_1: + name = "parameter_1" + shape = [32, 64, 11, 11] + dtype = "float32" + min_val = float("-0.378676") + max_val = float("0.365137") + mean = float("-0.000197318") + std = float("0.0303417") + data = None + + +class Program_weight_tensor_parameter_2: + name = "parameter_2" + shape = [32] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_3: + name = "parameter_3" + shape = [32, 64, 9, 9] + dtype = "float32" + min_val = float("-0.400279") + max_val = float("0.379996") + mean = float("-0.000243122") + std = float("0.0345614") + data = None + + +class Program_weight_tensor_parameter_4: + name = "parameter_4" + shape = [32] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_5: + name = "parameter_5" + shape = [32, 64, 7, 7] + dtype = "float32" + min_val = float("-0.411308") + max_val = float("0.37641") + mean = float("-8.07878e-05") + std = float("0.0410653") + data = None + + +class Program_weight_tensor_parameter_6: + name = "parameter_6" + shape = [32] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_7: + name = "parameter_7" + shape = [32, 64, 5, 5] + dtype = "float32" + min_val = float("-0.37956") + max_val = float("0.382445") + mean = float("-0.000167806") + std = float("0.0525399") + data = None + + +class Program_weight_tensor_parameter_8: + name = "parameter_8" + shape = [32] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_9: + name = "parameter_9" + shape = [32, 64, 3, 3] + dtype = "float32" + min_val = float("-0.43568") + max_val = float("0.373109") + mean = float("0.00107606") + std = float("0.0743243") + data = None + + +class Program_weight_tensor_parameter_10: + name = "parameter_10" + shape = [32] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_11: + name = "parameter_11" + shape = [32, 64, 1, 1] + dtype = "float32" + min_val = float("-0.606749") + max_val = float("0.716587") + mean = float("0.00986633") + std = float("0.187626") + data = None + + +class Program_weight_tensor_parameter_12: + name = "parameter_12" + shape = [64] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_13: + name = "parameter_13" + shape = [64, 32, 11, 11] + dtype = "float32" + min_val = float("-0.705811") + max_val = float("0.38403") + mean = float("-0.00554413") + std = float("0.0356425") + data = None + + +class Program_weight_tensor_parameter_14: + name = "parameter_14" + shape = [64] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_15: + name = "parameter_15" + shape = [64, 32, 9, 9] + dtype = "float32" + min_val = float("-0.771232") + max_val = float("0.366518") + mean = float("-0.0068774") + std = float("0.0408845") + data = None + + +class Program_weight_tensor_parameter_16: + name = "parameter_16" + shape = [64] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_17: + name = "parameter_17" + shape = [64, 32, 7, 7] + dtype = "float32" + min_val = float("-0.695085") + max_val = float("0.354321") + mean = float("-0.00856062") + std = float("0.0491656") + data = None + + +class Program_weight_tensor_parameter_18: + name = "parameter_18" + shape = [64] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_19: + name = "parameter_19" + shape = [64, 32, 5, 5] + dtype = "float32" + min_val = float("-0.730604") + max_val = float("0.393171") + mean = float("-0.0118981") + std = float("0.0648549") + data = None + + +class Program_weight_tensor_parameter_20: + name = "parameter_20" + shape = [64] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_21: + name = "parameter_21" + shape = [64, 32, 3, 3] + dtype = "float32" + min_val = float("-0.728807") + max_val = float("0.600694") + mean = float("-0.0154007") + std = float("0.0968162") + data = None + + +class Program_weight_tensor_parameter_22: + name = "parameter_22" + shape = [64] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_23: + name = "parameter_23" + shape = [64, 32, 1, 1] + dtype = "float32" + min_val = float("-0.938968") + max_val = float("0.799272") + mean = float("-0.0267053") + std = float("0.263792") + data = None diff --git a/paddle_samples/PaddleX/TimesNet_ad/subgraph_4/graph_hash.txt b/paddle_samples/PaddleX/TimesNet_ad/subgraph_4/graph_hash.txt new file mode 100644 index 000000000..19b5c8150 --- /dev/null +++ b/paddle_samples/PaddleX/TimesNet_ad/subgraph_4/graph_hash.txt @@ -0,0 +1 @@ +06943ae7ebc7532ff1bcd6ca3875dfb2729eb0348f9169988043fc4aa3ce050b \ No newline at end of file diff --git a/paddle_samples/PaddleX/TimesNet_ad/subgraph_4/graph_net.json b/paddle_samples/PaddleX/TimesNet_ad/subgraph_4/graph_net.json new file mode 100644 index 000000000..026782ffa --- /dev/null +++ b/paddle_samples/PaddleX/TimesNet_ad/subgraph_4/graph_net.json @@ -0,0 +1,6 @@ +{ + "framework": "paddle", + "model_name": "TimesNet_ad", + "num_devices_required": 1, + "num_nodes_required": 1 +} \ No newline at end of file diff --git a/paddle_samples/PaddleX/TimesNet_ad/subgraph_4/input_meta.py b/paddle_samples/PaddleX/TimesNet_ad/subgraph_4/input_meta.py new file mode 100644 index 000000000..6db6ec575 --- /dev/null +++ b/paddle_samples/PaddleX/TimesNet_ad/subgraph_4/input_meta.py @@ -0,0 +1,37 @@ +class Program_weight_tensor_data_0: + name = "data_0" + shape = [] + dtype = "int64" + data = [16] + + +class Program_weight_tensor_data_1: + name = "data_1" + shape = [] + dtype = "int64" + data = [16] + + +class Program_weight_tensor_data_2: + name = "data_2" + shape = [] + dtype = "int32" + data = [19] + + +class Program_weight_tensor_data_3: + name = "data_3" + shape = [16, 96, 32] + dtype = "float32" + min_val = float("-3.32402") + max_val = float("4.62089") + mean = float("0.031122") + std = float("1.015") + data = None + + +class Program_weight_tensor_data_4: + name = "data_4" + shape = [] + dtype = "int64" + data = [96] diff --git a/paddle_samples/PaddleX/TimesNet_ad/subgraph_4/model.py b/paddle_samples/PaddleX/TimesNet_ad/subgraph_4/model.py new file mode 100644 index 000000000..e9e757f97 --- /dev/null +++ b/paddle_samples/PaddleX/TimesNet_ad/subgraph_4/model.py @@ -0,0 +1,368 @@ +import paddle + + +class GraphModule(paddle.nn.Layer): + def __init__(self): + super().__init__() + + def forward( + self, + parameter_0, + parameter_1, + parameter_2, + parameter_3, + parameter_4, + parameter_5, + parameter_6, + parameter_7, + parameter_8, + parameter_9, + parameter_10, + parameter_11, + parameter_12, + parameter_13, + parameter_14, + parameter_15, + parameter_16, + parameter_17, + parameter_18, + parameter_19, + parameter_20, + parameter_21, + parameter_22, + parameter_23, + data_0, + data_1, + data_2, + data_3, + data_4, + ): + # pd_op.cast: (xi64) <- (xi32) + cast_0 = paddle._C_ops.cast(data_2, paddle.int64) + del data_2 + + # pd_op.floor_divide: (xi64) <- (xi64, xi64) + floor_divide_0 = paddle._C_ops.floor_divide(data_4, cast_0) + + # pd_op.full: (1xf32) <- () + full_0 = paddle._C_ops.full( + [1], float("1"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.scale: (xi64) <- (xi64, 1xf32) + scale_0 = paddle._C_ops.scale(floor_divide_0, full_0, float("1"), True) + del floor_divide_0, full_0 + + # pd_op.multiply: (xi64) <- (xi64, xi64) + multiply_0 = paddle._C_ops.multiply(scale_0, cast_0) + del scale_0 + + # pd_op.subtract: (xi64) <- (xi64, xi64) + subtract_0 = paddle._C_ops.subtract(multiply_0, data_4) + del data_4 + + # pd_op.full: (xi64) <- () + full_1 = paddle._C_ops.full( + [], float("32"), paddle.int64, paddle.core.CPUPlace() + ) + + # builtin.combine: ([xi64, xi64, xi64]) <- (xi64, xi64, xi64) + combine_0 = [data_1, subtract_0, full_1] + del data_1, subtract_0 + + # pd_op.stack: (3xi64) <- ([xi64, xi64, xi64]) + stack_0 = paddle._C_ops.stack(combine_0, 0) + del combine_0 + + # pd_op.full: (1xf32) <- () + full_2 = paddle._C_ops.full( + [1], float("0"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.full_with_tensor: (-1x-1x32xf32) <- (1xf32, 3xi64) + full_with_tensor_0 = paddle._C_ops.full_with_tensor( + full_2, stack_0, paddle.float32 + ) + del full_2, stack_0 + + # pd_op.cast: (-1x96x32xf32) <- (-1x96x32xf32) + cast_1 = paddle._C_ops.cast(data_3, paddle.float32) + del data_3 + + # pd_op.cast: (-1x-1x32xf32) <- (-1x-1x32xf32) + cast_2 = paddle._C_ops.cast(full_with_tensor_0, paddle.float32) + + # pd_op.full: (1xi32) <- () + full_3 = paddle._C_ops.full( + [1], float("1"), paddle.int32, paddle.core.CPUPlace() + ) + + # builtin.combine: ([-1x96x32xf32, -1x-1x32xf32]) <- (-1x96x32xf32, -1x-1x32xf32) + combine_1 = [cast_1, cast_2] + del cast_1, cast_2 + + # pd_op.concat: (-1x-1x32xf32) <- ([-1x96x32xf32, -1x-1x32xf32], 1xi32) + concat_0 = paddle._C_ops.concat(combine_1, full_3) + del combine_1, full_3 + + # pd_op.floor_divide: (xi64) <- (xi64, xi64) + floor_divide_1 = paddle._C_ops.floor_divide(multiply_0, cast_0) + + # builtin.combine: ([xi64, xi64, xi64, xi64]) <- (xi64, xi64, xi64, xi64) + combine_2 = [data_0, floor_divide_1, cast_0, full_1] + del cast_0, floor_divide_1 + + # pd_op.stack: (4xi64) <- ([xi64, xi64, xi64, xi64]) + stack_1 = paddle._C_ops.stack(combine_2, 0) + del combine_2 + + # pd_op.reshape: (-1x-1x-1x32xf32) <- (-1x-1x32xf32, 4xi64) + reshape_0 = paddle._C_ops.reshape(concat_0, stack_1) + del concat_0, stack_1 + + # pd_op.transpose: (-1x32x-1x-1xf32) <- (-1x-1x-1x32xf32) + transpose_0 = paddle._C_ops.transpose(reshape_0, [0, 3, 1, 2]) + del reshape_0 + + # pd_op.conv2d: (-1x64x-1x-1xf32) <- (-1x32x-1x-1xf32, 64x32x1x1xf32) + conv2d_0 = paddle._C_ops.conv2d( + transpose_0, parameter_23, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_23 + + # pd_op.full_int_array: (4xi64) <- () + full_int_array_0 = [1, -1, 1, 1] + + # pd_op.reshape: (1x64x1x1xf32) <- (64xf32, 4xi64) + reshape_1 = paddle._C_ops.reshape(parameter_22, full_int_array_0) + del parameter_22 + + # pd_op.add: (-1x64x-1x-1xf32) <- (-1x64x-1x-1xf32, 1x64x1x1xf32) + add_0 = paddle._C_ops.add(conv2d_0, reshape_1) + del conv2d_0, reshape_1 + + # pd_op.conv2d: (-1x64x-1x-1xf32) <- (-1x32x-1x-1xf32, 64x32x3x3xf32) + conv2d_1 = paddle._C_ops.conv2d( + transpose_0, parameter_21, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_21 + + # pd_op.reshape: (1x64x1x1xf32) <- (64xf32, 4xi64) + reshape_2 = paddle._C_ops.reshape(parameter_20, full_int_array_0) + del parameter_20 + + # pd_op.add: (-1x64x-1x-1xf32) <- (-1x64x-1x-1xf32, 1x64x1x1xf32) + add_1 = paddle._C_ops.add(conv2d_1, reshape_2) + del conv2d_1, reshape_2 + + # pd_op.conv2d: (-1x64x-1x-1xf32) <- (-1x32x-1x-1xf32, 64x32x5x5xf32) + conv2d_2 = paddle._C_ops.conv2d( + transpose_0, parameter_19, [1, 1], [2, 2], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_19 + + # pd_op.reshape: (1x64x1x1xf32) <- (64xf32, 4xi64) + reshape_3 = paddle._C_ops.reshape(parameter_18, full_int_array_0) + del parameter_18 + + # pd_op.add: (-1x64x-1x-1xf32) <- (-1x64x-1x-1xf32, 1x64x1x1xf32) + add_2 = paddle._C_ops.add(conv2d_2, reshape_3) + del conv2d_2, reshape_3 + + # pd_op.conv2d: (-1x64x-1x-1xf32) <- (-1x32x-1x-1xf32, 64x32x7x7xf32) + conv2d_3 = paddle._C_ops.conv2d( + transpose_0, parameter_17, [1, 1], [3, 3], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_17 + + # pd_op.reshape: (1x64x1x1xf32) <- (64xf32, 4xi64) + reshape_4 = paddle._C_ops.reshape(parameter_16, full_int_array_0) + del parameter_16 + + # pd_op.add: (-1x64x-1x-1xf32) <- (-1x64x-1x-1xf32, 1x64x1x1xf32) + add_3 = paddle._C_ops.add(conv2d_3, reshape_4) + del conv2d_3, reshape_4 + + # pd_op.conv2d: (-1x64x-1x-1xf32) <- (-1x32x-1x-1xf32, 64x32x9x9xf32) + conv2d_4 = paddle._C_ops.conv2d( + transpose_0, parameter_15, [1, 1], [4, 4], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_15 + + # pd_op.reshape: (1x64x1x1xf32) <- (64xf32, 4xi64) + reshape_5 = paddle._C_ops.reshape(parameter_14, full_int_array_0) + del parameter_14 + + # pd_op.add: (-1x64x-1x-1xf32) <- (-1x64x-1x-1xf32, 1x64x1x1xf32) + add_4 = paddle._C_ops.add(conv2d_4, reshape_5) + del conv2d_4, reshape_5 + + # pd_op.conv2d: (-1x64x-1x-1xf32) <- (-1x32x-1x-1xf32, 64x32x11x11xf32) + conv2d_5 = paddle._C_ops.conv2d( + transpose_0, parameter_13, [1, 1], [5, 5], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_13, transpose_0 + + # pd_op.reshape: (1x64x1x1xf32) <- (64xf32, 4xi64) + reshape_6 = paddle._C_ops.reshape(parameter_12, full_int_array_0) + del parameter_12 + + # pd_op.add: (-1x64x-1x-1xf32) <- (-1x64x-1x-1xf32, 1x64x1x1xf32) + add_5 = paddle._C_ops.add(conv2d_5, reshape_6) + del conv2d_5, reshape_6 + + # builtin.combine: ([-1x64x-1x-1xf32, -1x64x-1x-1xf32, -1x64x-1x-1xf32, -1x64x-1x-1xf32, -1x64x-1x-1xf32, -1x64x-1x-1xf32]) <- (-1x64x-1x-1xf32, -1x64x-1x-1xf32, -1x64x-1x-1xf32, -1x64x-1x-1xf32, -1x64x-1x-1xf32, -1x64x-1x-1xf32) + combine_3 = [add_0, add_1, add_2, add_3, add_4, add_5] + del add_0, add_1, add_2, add_3, add_4, add_5 + + # pd_op.stack: (-1x64x-1x-1x6xf32) <- ([-1x64x-1x-1xf32, -1x64x-1x-1xf32, -1x64x-1x-1xf32, -1x64x-1x-1xf32, -1x64x-1x-1xf32, -1x64x-1x-1xf32]) + stack_2 = paddle._C_ops.stack(combine_3, -1) + del combine_3 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_1 = [-1] + + # pd_op.mean: (-1x64x-1x-1xf32) <- (-1x64x-1x-1x6xf32, 1xi64) + mean_0 = paddle._C_ops.mean(stack_2, full_int_array_1, False) + del stack_2 + + # pd_op.gelu: (-1x64x-1x-1xf32) <- (-1x64x-1x-1xf32) + gelu_0 = paddle._C_ops.gelu(mean_0, False) + del mean_0 + + # pd_op.conv2d: (-1x32x-1x-1xf32) <- (-1x64x-1x-1xf32, 32x64x1x1xf32) + conv2d_6 = paddle._C_ops.conv2d( + gelu_0, parameter_11, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_11 + + # pd_op.reshape: (1x32x1x1xf32) <- (32xf32, 4xi64) + reshape_7 = paddle._C_ops.reshape(parameter_10, full_int_array_0) + del parameter_10 + + # pd_op.add: (-1x32x-1x-1xf32) <- (-1x32x-1x-1xf32, 1x32x1x1xf32) + add_6 = paddle._C_ops.add(conv2d_6, reshape_7) + del conv2d_6, reshape_7 + + # pd_op.conv2d: (-1x32x-1x-1xf32) <- (-1x64x-1x-1xf32, 32x64x3x3xf32) + conv2d_7 = paddle._C_ops.conv2d( + gelu_0, parameter_9, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_9 + + # pd_op.reshape: (1x32x1x1xf32) <- (32xf32, 4xi64) + reshape_8 = paddle._C_ops.reshape(parameter_8, full_int_array_0) + del parameter_8 + + # pd_op.add: (-1x32x-1x-1xf32) <- (-1x32x-1x-1xf32, 1x32x1x1xf32) + add_7 = paddle._C_ops.add(conv2d_7, reshape_8) + del conv2d_7, reshape_8 + + # pd_op.conv2d: (-1x32x-1x-1xf32) <- (-1x64x-1x-1xf32, 32x64x5x5xf32) + conv2d_8 = paddle._C_ops.conv2d( + gelu_0, parameter_7, [1, 1], [2, 2], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_7 + + # pd_op.reshape: (1x32x1x1xf32) <- (32xf32, 4xi64) + reshape_9 = paddle._C_ops.reshape(parameter_6, full_int_array_0) + del parameter_6 + + # pd_op.add: (-1x32x-1x-1xf32) <- (-1x32x-1x-1xf32, 1x32x1x1xf32) + add_8 = paddle._C_ops.add(conv2d_8, reshape_9) + del conv2d_8, reshape_9 + + # pd_op.conv2d: (-1x32x-1x-1xf32) <- (-1x64x-1x-1xf32, 32x64x7x7xf32) + conv2d_9 = paddle._C_ops.conv2d( + gelu_0, parameter_5, [1, 1], [3, 3], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_5 + + # pd_op.reshape: (1x32x1x1xf32) <- (32xf32, 4xi64) + reshape_10 = paddle._C_ops.reshape(parameter_4, full_int_array_0) + del parameter_4 + + # pd_op.add: (-1x32x-1x-1xf32) <- (-1x32x-1x-1xf32, 1x32x1x1xf32) + add_9 = paddle._C_ops.add(conv2d_9, reshape_10) + del conv2d_9, reshape_10 + + # pd_op.conv2d: (-1x32x-1x-1xf32) <- (-1x64x-1x-1xf32, 32x64x9x9xf32) + conv2d_10 = paddle._C_ops.conv2d( + gelu_0, parameter_3, [1, 1], [4, 4], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_3 + + # pd_op.reshape: (1x32x1x1xf32) <- (32xf32, 4xi64) + reshape_11 = paddle._C_ops.reshape(parameter_2, full_int_array_0) + del parameter_2 + + # pd_op.add: (-1x32x-1x-1xf32) <- (-1x32x-1x-1xf32, 1x32x1x1xf32) + add_10 = paddle._C_ops.add(conv2d_10, reshape_11) + del conv2d_10, reshape_11 + + # pd_op.conv2d: (-1x32x-1x-1xf32) <- (-1x64x-1x-1xf32, 32x64x11x11xf32) + conv2d_11 = paddle._C_ops.conv2d( + gelu_0, parameter_1, [1, 1], [5, 5], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del gelu_0, parameter_1 + + # pd_op.reshape: (1x32x1x1xf32) <- (32xf32, 4xi64) + reshape_12 = paddle._C_ops.reshape(parameter_0, full_int_array_0) + del full_int_array_0, parameter_0 + + # pd_op.add: (-1x32x-1x-1xf32) <- (-1x32x-1x-1xf32, 1x32x1x1xf32) + add_11 = paddle._C_ops.add(conv2d_11, reshape_12) + del conv2d_11, reshape_12 + + # builtin.combine: ([-1x32x-1x-1xf32, -1x32x-1x-1xf32, -1x32x-1x-1xf32, -1x32x-1x-1xf32, -1x32x-1x-1xf32, -1x32x-1x-1xf32]) <- (-1x32x-1x-1xf32, -1x32x-1x-1xf32, -1x32x-1x-1xf32, -1x32x-1x-1xf32, -1x32x-1x-1xf32, -1x32x-1x-1xf32) + combine_4 = [add_6, add_7, add_8, add_9, add_10, add_11] + del add_10, add_11, add_6, add_7, add_8, add_9 + + # pd_op.stack: (-1x32x-1x-1x6xf32) <- ([-1x32x-1x-1xf32, -1x32x-1x-1xf32, -1x32x-1x-1xf32, -1x32x-1x-1xf32, -1x32x-1x-1xf32, -1x32x-1x-1xf32]) + stack_3 = paddle._C_ops.stack(combine_4, -1) + del combine_4 + + # pd_op.mean: (-1x32x-1x-1xf32) <- (-1x32x-1x-1x6xf32, 1xi64) + mean_1 = paddle._C_ops.mean(stack_3, full_int_array_1, False) + del full_int_array_1, stack_3 + + # pd_op.transpose: (-1x-1x-1x32xf32) <- (-1x32x-1x-1xf32) + transpose_1 = paddle._C_ops.transpose(mean_1, [0, 2, 3, 1]) + del mean_1 + + # pd_op.full: (xi64) <- () + full_4 = paddle._C_ops.full( + [], float("-1"), paddle.int64, paddle.core.CPUPlace() + ) + + # builtin.combine: ([xi64, xi64, xi64]) <- (xi64, xi64, xi64) + combine_5 = [data_0, full_4, full_1] + del data_0, full_1, full_4 + + # pd_op.stack: (3xi64) <- ([xi64, xi64, xi64]) + stack_4 = paddle._C_ops.stack(combine_5, 0) + del combine_5 + + # pd_op.reshape: (-1x-1x32xf32) <- (-1x-1x-1x32xf32, 3xi64) + reshape_13 = paddle._C_ops.reshape(transpose_1, stack_4) + del stack_4, transpose_1 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_2 = [0] + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_3 = [96] + + # pd_op.slice: (-1x-1x32xf32) <- (-1x-1x32xf32, 1xi64, 1xi64) + slice_0 = paddle._C_ops.slice( + reshape_13, [1], full_int_array_2, full_int_array_3, [1], [] + ) + del ( + full_int_array_2, + full_int_array_3, + full_with_tensor_0, + multiply_0, + reshape_13, + ) + + return slice_0 diff --git a/paddle_samples/PaddleX/TimesNet_ad/subgraph_4/weight_meta.py b/paddle_samples/PaddleX/TimesNet_ad/subgraph_4/weight_meta.py new file mode 100644 index 000000000..ca5b1d84a --- /dev/null +++ b/paddle_samples/PaddleX/TimesNet_ad/subgraph_4/weight_meta.py @@ -0,0 +1,238 @@ +class Program_weight_tensor_parameter_0: + name = "parameter_0" + shape = [32] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_1: + name = "parameter_1" + shape = [32, 64, 11, 11] + dtype = "float32" + min_val = float("-0.264679") + max_val = float("0.376382") + mean = float("0.000111027") + std = float("0.0245485") + data = None + + +class Program_weight_tensor_parameter_2: + name = "parameter_2" + shape = [32] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_3: + name = "parameter_3" + shape = [32, 64, 9, 9] + dtype = "float32" + min_val = float("-0.294357") + max_val = float("0.38058") + mean = float("0.000170868") + std = float("0.0282573") + data = None + + +class Program_weight_tensor_parameter_4: + name = "parameter_4" + shape = [32] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_5: + name = "parameter_5" + shape = [32, 64, 7, 7] + dtype = "float32" + min_val = float("-0.283553") + max_val = float("0.370125") + mean = float("0.000228007") + std = float("0.0342913") + data = None + + +class Program_weight_tensor_parameter_6: + name = "parameter_6" + shape = [32] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_7: + name = "parameter_7" + shape = [32, 64, 5, 5] + dtype = "float32" + min_val = float("-0.283169") + max_val = float("0.37302") + mean = float("0.000392404") + std = float("0.0451435") + data = None + + +class Program_weight_tensor_parameter_8: + name = "parameter_8" + shape = [32] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_9: + name = "parameter_9" + shape = [32, 64, 3, 3] + dtype = "float32" + min_val = float("-0.335589") + max_val = float("0.381078") + mean = float("0.00115978") + std = float("0.0690689") + data = None + + +class Program_weight_tensor_parameter_10: + name = "parameter_10" + shape = [32] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_11: + name = "parameter_11" + shape = [32, 64, 1, 1] + dtype = "float32" + min_val = float("-0.63918") + max_val = float("0.689408") + mean = float("-0.000835581") + std = float("0.190792") + data = None + + +class Program_weight_tensor_parameter_12: + name = "parameter_12" + shape = [64] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_13: + name = "parameter_13" + shape = [64, 32, 11, 11] + dtype = "float32" + min_val = float("-0.460614") + max_val = float("0.452903") + mean = float("0.000691717") + std = float("0.0339579") + data = None + + +class Program_weight_tensor_parameter_14: + name = "parameter_14" + shape = [64] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_15: + name = "parameter_15" + shape = [64, 32, 9, 9] + dtype = "float32" + min_val = float("-0.417344") + max_val = float("0.475493") + mean = float("0.000917908") + std = float("0.039257") + data = None + + +class Program_weight_tensor_parameter_16: + name = "parameter_16" + shape = [64] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_17: + name = "parameter_17" + shape = [64, 32, 7, 7] + dtype = "float32" + min_val = float("-0.429916") + max_val = float("0.447171") + mean = float("0.000883161") + std = float("0.0478161") + data = None + + +class Program_weight_tensor_parameter_18: + name = "parameter_18" + shape = [64] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_19: + name = "parameter_19" + shape = [64, 32, 5, 5] + dtype = "float32" + min_val = float("-0.462384") + max_val = float("0.417737") + mean = float("0.00143336") + std = float("0.0626403") + data = None + + +class Program_weight_tensor_parameter_20: + name = "parameter_20" + shape = [64] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_21: + name = "parameter_21" + shape = [64, 32, 3, 3] + dtype = "float32" + min_val = float("-0.561929") + max_val = float("0.524655") + mean = float("0.00179128") + std = float("0.0966917") + data = None + + +class Program_weight_tensor_parameter_22: + name = "parameter_22" + shape = [64] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_23: + name = "parameter_23" + shape = [64, 32, 1, 1] + dtype = "float32" + min_val = float("-0.732336") + max_val = float("0.817109") + mean = float("-0.00702827") + std = float("0.267835") + data = None diff --git a/paddle_samples/PaddleX/TimesNet_ad/subgraph_5/graph_hash.txt b/paddle_samples/PaddleX/TimesNet_ad/subgraph_5/graph_hash.txt new file mode 100644 index 000000000..c562da014 --- /dev/null +++ b/paddle_samples/PaddleX/TimesNet_ad/subgraph_5/graph_hash.txt @@ -0,0 +1 @@ +c4459a986fcd6ab51cf96bcd62d9df977c2f0352291ee922d5ecfb21cda64b80 \ No newline at end of file diff --git a/paddle_samples/PaddleX/TimesNet_ad/subgraph_5/graph_net.json b/paddle_samples/PaddleX/TimesNet_ad/subgraph_5/graph_net.json new file mode 100644 index 000000000..026782ffa --- /dev/null +++ b/paddle_samples/PaddleX/TimesNet_ad/subgraph_5/graph_net.json @@ -0,0 +1,6 @@ +{ + "framework": "paddle", + "model_name": "TimesNet_ad", + "num_devices_required": 1, + "num_nodes_required": 1 +} \ No newline at end of file diff --git a/paddle_samples/PaddleX/TimesNet_ad/subgraph_5/input_meta.py b/paddle_samples/PaddleX/TimesNet_ad/subgraph_5/input_meta.py new file mode 100644 index 000000000..0ac057bd6 --- /dev/null +++ b/paddle_samples/PaddleX/TimesNet_ad/subgraph_5/input_meta.py @@ -0,0 +1,49 @@ +class Program_weight_tensor_data_0: + name = "data_0" + shape = [2, 96, 32] + dtype = "float32" + min_val = float("-3.98551") + max_val = float("8.82186") + mean = float("0.029929") + std = float("1.18773") + data = None + + +class Program_weight_tensor_data_1: + name = "data_1" + shape = [2, 96, 32] + dtype = "float32" + min_val = float("-5.13918") + max_val = float("11.3318") + mean = float("0.0523877") + std = float("1.64322") + data = None + + +class Program_weight_tensor_data_2: + name = "data_2" + shape = [2, 96, 32] + dtype = "float32" + min_val = float("-7.82916") + max_val = float("8.93734") + mean = float("0.0112696") + std = float("1.79837") + data = None + + +class Program_weight_tensor_data_3: + name = "data_3" + shape = [2, 3] + dtype = "float32" + data = [46.9227, 32.7293, 20.2913, 30.6332, 29.8448, 21.058] + + +class Program_weight_tensor_data_4: + name = "data_4" + shape = [2, 96, 32] + dtype = "float32" + min_val = float("-6.34525") + max_val = float("9.14017") + mean = float("0.336976") + std = float("1.50118") + data = None diff --git a/paddle_samples/PaddleX/TimesNet_ad/subgraph_5/model.py b/paddle_samples/PaddleX/TimesNet_ad/subgraph_5/model.py new file mode 100644 index 000000000..50e134d21 --- /dev/null +++ b/paddle_samples/PaddleX/TimesNet_ad/subgraph_5/model.py @@ -0,0 +1,65 @@ +import paddle + + +class GraphModule(paddle.nn.Layer): + def __init__(self): + super().__init__() + + def forward(self, data_0, data_1, data_2, data_3, data_4): + # builtin.combine: ([-1x96x32xf32, -1x96x32xf32, -1x96x32xf32]) <- (-1x96x32xf32, -1x96x32xf32, -1x96x32xf32) + combine_0 = [data_0, data_1, data_2] + del data_0, data_1, data_2 + + # pd_op.stack: (-1x96x32x3xf32) <- ([-1x96x32xf32, -1x96x32xf32, -1x96x32xf32]) + stack_0 = paddle._C_ops.stack(combine_0, -1) + del combine_0 + + # pd_op.softmax: (-1x3xf32) <- (-1x3xf32) + softmax_0 = paddle._C_ops.softmax(data_3, 1) + del data_3 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_0 = [1] + + # pd_op.assign: (1xi64) <- (1xi64) + assign_0 = full_int_array_0 + + # pd_op.unsqueeze: (-1x1x3xf32) <- (-1x3xf32, 1xi64) + unsqueeze_0 = paddle._C_ops.unsqueeze(softmax_0, full_int_array_0) + + # pd_op.unsqueeze: (-1x1x1x3xf32) <- (-1x1x3xf32, 1xi64) + unsqueeze_1 = paddle._C_ops.unsqueeze(unsqueeze_0, full_int_array_0) + + # pd_op.full_int_array: (4xi64) <- () + full_int_array_1 = [1, 96, 32, 1] + + # pd_op.tile: (-1x96x32x3xf32) <- (-1x1x1x3xf32, 4xi64) + tile_0 = paddle._C_ops.tile(unsqueeze_1, full_int_array_1) + + # pd_op.multiply: (-1x96x32x3xf32) <- (-1x96x32x3xf32, -1x96x32x3xf32) + multiply_0 = paddle._C_ops.multiply(stack_0, tile_0) + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_2 = [-1] + + # pd_op.sum: (-1x96x32xf32) <- (-1x96x32x3xf32, 1xi64) + sum_0 = paddle._C_ops.sum(multiply_0, full_int_array_2, None, False) + + # pd_op.add: (-1x96x32xf32) <- (-1x96x32xf32, -1x96x32xf32) + add_0 = paddle._C_ops.add(sum_0, data_4) + del ( + assign_0, + data_4, + full_int_array_0, + full_int_array_1, + full_int_array_2, + multiply_0, + softmax_0, + stack_0, + sum_0, + tile_0, + unsqueeze_0, + unsqueeze_1, + ) + + return add_0 diff --git a/paddle_samples/PaddleX/TimesNet_ad/subgraph_5/weight_meta.py b/paddle_samples/PaddleX/TimesNet_ad/subgraph_5/weight_meta.py new file mode 100644 index 000000000..8b1378917 --- /dev/null +++ b/paddle_samples/PaddleX/TimesNet_ad/subgraph_5/weight_meta.py @@ -0,0 +1 @@ + diff --git a/paddle_samples/PaddleX/TimesNet_ad/subgraph_6/graph_hash.txt b/paddle_samples/PaddleX/TimesNet_ad/subgraph_6/graph_hash.txt new file mode 100644 index 000000000..44b2ee351 --- /dev/null +++ b/paddle_samples/PaddleX/TimesNet_ad/subgraph_6/graph_hash.txt @@ -0,0 +1 @@ +b1a67bcc15427ee0367a3a1b199f542e3368882ab0bf73dc2d8cb9fe481a112d \ No newline at end of file diff --git a/paddle_samples/PaddleX/TimesNet_ad/subgraph_6/graph_net.json b/paddle_samples/PaddleX/TimesNet_ad/subgraph_6/graph_net.json new file mode 100644 index 000000000..026782ffa --- /dev/null +++ b/paddle_samples/PaddleX/TimesNet_ad/subgraph_6/graph_net.json @@ -0,0 +1,6 @@ +{ + "framework": "paddle", + "model_name": "TimesNet_ad", + "num_devices_required": 1, + "num_nodes_required": 1 +} \ No newline at end of file diff --git a/paddle_samples/PaddleX/TimesNet_ad/subgraph_6/input_meta.py b/paddle_samples/PaddleX/TimesNet_ad/subgraph_6/input_meta.py new file mode 100644 index 000000000..9992dff58 --- /dev/null +++ b/paddle_samples/PaddleX/TimesNet_ad/subgraph_6/input_meta.py @@ -0,0 +1,89 @@ +class Program_weight_tensor_data_0: + name = "data_0" + shape = [16, 96, 32] + dtype = "float32" + min_val = float("-2.67673") + max_val = float("4.8069") + mean = float("0.0226662") + std = float("1.04036") + data = None + + +class Program_weight_tensor_data_1: + name = "data_1" + shape = [16, 1, 2] + dtype = "float32" + data = [ + 0.0152262, + 0.112992, + 0.0131863, + 0.00941724, + 0.00817011, + 0.0263628, + 0.00638176, + 0.0175078, + 0.0234966, + 0.00656714, + 0.0124417, + 0.0132452, + 0.0246282, + 0.00765128, + 0.0301115, + 0.00777976, + 0.0628782, + 0.0259483, + 0.0631641, + 0.047438, + 0.0094697, + 0.00714918, + 0.0377613, + 0.0259842, + 0.624268, + 1.5689, + 0.00617691, + 0.00916586, + 0.00874541, + 0.00718946, + 0.00659123, + 0.00577362, + ] + + +class Program_weight_tensor_data_2: + name = "data_2" + shape = [16, 1, 2] + dtype = "float32" + data = [ + 0.304086, + -4.2993, + -0.136159, + -1.50789, + 0.148375, + -1.23287, + 0.424943, + -2.33587, + -0.441654, + -3.51483, + -0.706665, + -2.10642, + -0.297288, + -1.02127, + -0.403002, + -3.52043, + -0.189814, + -0.818702, + -1.26374, + -3.75699, + -0.90199, + -1.11094, + -0.498064, + -2.30995, + -2.3935, + -2.71923, + -0.51015, + -3.49819, + -0.297187, + -1.5618, + 0.158008, + -1.12087, + ] diff --git a/paddle_samples/PaddleX/TimesNet_ad/subgraph_6/model.py b/paddle_samples/PaddleX/TimesNet_ad/subgraph_6/model.py new file mode 100644 index 000000000..d28b81f52 --- /dev/null +++ b/paddle_samples/PaddleX/TimesNet_ad/subgraph_6/model.py @@ -0,0 +1,62 @@ +import paddle + + +class GraphModule(paddle.nn.Layer): + def __init__(self): + super().__init__() + + def forward(self, parameter_0, parameter_1, data_0, data_1, data_2): + # pd_op.matmul: (-1x96x2xf32) <- (-1x96x32xf32, 32x2xf32) + matmul_0 = paddle._C_ops.matmul(data_0, parameter_1, False, False) + del data_0, parameter_1 + + # pd_op.add: (-1x96x2xf32) <- (-1x96x2xf32, 2xf32) + add_1 = paddle._C_ops.add(matmul_0, parameter_0) + del matmul_0, parameter_0 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_0 = [0] + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_1 = [1] + + # pd_op.slice: (-1x2xf32) <- (-1x1x2xf32, 1xi64, 1xi64) + slice_0 = paddle._C_ops.slice( + data_1, [1], full_int_array_0, full_int_array_1, [1], [1] + ) + del data_1 + + # pd_op.unsqueeze: (-1x1x2xf32) <- (-1x2xf32, 1xi64) + unsqueeze_0 = paddle._C_ops.unsqueeze(slice_0, full_int_array_1) + del slice_0 + + # pd_op.full_int_array: (3xi64) <- () + full_int_array_2 = [1, 96, 1] + + # pd_op.tile: (-1x96x2xf32) <- (-1x1x2xf32, 3xi64) + tile_0 = paddle._C_ops.tile(unsqueeze_0, full_int_array_2) + del unsqueeze_0 + + # pd_op.multiply: (-1x96x2xf32) <- (-1x96x2xf32, -1x96x2xf32) + multiply_0 = paddle._C_ops.multiply(add_1, tile_0) + del add_1, tile_0 + + # pd_op.slice: (-1x2xf32) <- (-1x1x2xf32, 1xi64, 1xi64) + slice_1 = paddle._C_ops.slice( + data_2, [1], full_int_array_0, full_int_array_1, [1], [1] + ) + del data_2, full_int_array_0 + + # pd_op.unsqueeze: (-1x1x2xf32) <- (-1x2xf32, 1xi64) + unsqueeze_1 = paddle._C_ops.unsqueeze(slice_1, full_int_array_1) + del full_int_array_1, slice_1 + + # pd_op.tile: (-1x96x2xf32) <- (-1x1x2xf32, 3xi64) + tile_1 = paddle._C_ops.tile(unsqueeze_1, full_int_array_2) + del full_int_array_2, unsqueeze_1 + + # pd_op.add: (-1x96x2xf32) <- (-1x96x2xf32, -1x96x2xf32) + add_0 = paddle._C_ops.add(multiply_0, tile_1) + del multiply_0, tile_1 + + return add_0 diff --git a/paddle_samples/PaddleX/TimesNet_ad/subgraph_6/weight_meta.py b/paddle_samples/PaddleX/TimesNet_ad/subgraph_6/weight_meta.py new file mode 100644 index 000000000..2b35288ca --- /dev/null +++ b/paddle_samples/PaddleX/TimesNet_ad/subgraph_6/weight_meta.py @@ -0,0 +1,16 @@ +class Program_weight_tensor_parameter_0: + name = "parameter_0" + shape = [2] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_1: + name = "parameter_1" + shape = [32, 2] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None diff --git a/paddle_samples/PaddleX/TimesNet_ad/subgraph_7/graph_hash.txt b/paddle_samples/PaddleX/TimesNet_ad/subgraph_7/graph_hash.txt new file mode 100644 index 000000000..f14aa14e3 --- /dev/null +++ b/paddle_samples/PaddleX/TimesNet_ad/subgraph_7/graph_hash.txt @@ -0,0 +1 @@ +f56463afba6c18c9082b9fd47939fa06137c08c2c8f638602b3eed63df795895 \ No newline at end of file diff --git a/paddle_samples/PaddleX/TimesNet_ad/subgraph_7/graph_net.json b/paddle_samples/PaddleX/TimesNet_ad/subgraph_7/graph_net.json new file mode 100644 index 000000000..026782ffa --- /dev/null +++ b/paddle_samples/PaddleX/TimesNet_ad/subgraph_7/graph_net.json @@ -0,0 +1,6 @@ +{ + "framework": "paddle", + "model_name": "TimesNet_ad", + "num_devices_required": 1, + "num_nodes_required": 1 +} \ No newline at end of file diff --git a/paddle_samples/PaddleX/TimesNet_ad/subgraph_7/input_meta.py b/paddle_samples/PaddleX/TimesNet_ad/subgraph_7/input_meta.py new file mode 100644 index 000000000..79774a9fe --- /dev/null +++ b/paddle_samples/PaddleX/TimesNet_ad/subgraph_7/input_meta.py @@ -0,0 +1,9 @@ +class Program_weight_tensor_data_0: + name = "data_0" + shape = [2, 96, 32] + dtype = "float32" + min_val = float("-6.34525") + max_val = float("9.14017") + mean = float("0.336976") + std = float("1.50118") + data = None diff --git a/paddle_samples/PaddleX/TimesNet_ad/subgraph_7/model.py b/paddle_samples/PaddleX/TimesNet_ad/subgraph_7/model.py new file mode 100644 index 000000000..468f49791 --- /dev/null +++ b/paddle_samples/PaddleX/TimesNet_ad/subgraph_7/model.py @@ -0,0 +1,109 @@ +import paddle + + +class GraphModule(paddle.nn.Layer): + def __init__(self): + super().__init__() + + def forward(self, data_0): + # pd_op.fft_r2c: (-1x49x32xc64) <- (-1x96x32xf32) + fft_r2c_0 = paddle._C_ops.fft_r2c(data_0, [1], "backward", True, True) + + # pd_op.abs: (-1x49x32xf32) <- (-1x49x32xc64) + abs_0 = paddle._C_ops.abs(fft_r2c_0) + + # pd_op.assign: (-1x49x32xf32) <- (-1x49x32xf32) + assign_0 = abs_0 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_0 = [0] + + # pd_op.mean: (49x32xf32) <- (-1x49x32xf32, 1xi64) + mean_0 = paddle._C_ops.mean(abs_0, full_int_array_0, False) + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_1 = [-1] + + # pd_op.assign: (1xi64) <- (1xi64) + assign_1 = full_int_array_1 + + # pd_op.mean: (49xf32) <- (49x32xf32, 1xi64) + mean_1 = paddle._C_ops.mean(mean_0, full_int_array_1, False) + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_2 = [1] + + # pd_op.set_value_: (49xf32) <- (49xf32, 1xi64, 1xi64, 1xi64) + set_value__0 = paddle._C_ops.set_value_( + mean_1, + full_int_array_0, + full_int_array_2, + full_int_array_2, + [0], + [0], + [], + [1], + [float("0")], + ) + del mean_1 + + # pd_op.full: (1xi32) <- () + full_0 = paddle._C_ops.full( + [1], float("3"), paddle.int32, paddle.core.CPUPlace() + ) + + # pd_op.topk: (3xf32, 3xi64) <- (49xf32, 1xi32) + topk_0, topk_1 = (lambda x, f: f(x))( + paddle._C_ops.topk(set_value__0, full_0, -1, True, True), + lambda out: out if isinstance(out, (list, tuple)) else (out, None), + ) + del full_0 + + # pd_op.share_data_: (3xi64) <- (3xi64) + share_data__0 = topk_1.detach() + del topk_1 + + # pd_op.cast: (3xi32) <- (3xi64) + cast_0 = paddle._C_ops.cast(share_data__0, paddle.int32) + del share_data__0 + + # pd_op.shape64: (3xi64) <- (-1x96x32xf32) + shape64_0 = paddle._C_ops.shape64(data_0) + del data_0 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_3 = [2] + + # pd_op.slice: (xi64) <- (3xi64, 1xi64, 1xi64) + slice_0 = paddle._C_ops.slice( + shape64_0, [0], full_int_array_2, full_int_array_3, [1], [0] + ) + del full_int_array_2, full_int_array_3, shape64_0 + + # pd_op.cast: (xi32) <- (xi64) + cast_1 = paddle._C_ops.cast(slice_0, paddle.int32) + del slice_0 + + # pd_op.floor_divide: (3xi32) <- (xi32, 3xi32) + floor_divide_0 = paddle._C_ops.floor_divide(cast_1, cast_0) + del cast_1 + + # pd_op.mean: (-1x49xf32) <- (-1x49x32xf32, 1xi64) + mean_2 = paddle._C_ops.mean(abs_0, full_int_array_1, False) + + # pd_op.index_select: (-1x3xf32) <- (-1x49xf32, 3xi32) + index_select_0 = paddle._C_ops.index_select(mean_2, cast_0, 1) + del ( + abs_0, + assign_0, + assign_1, + cast_0, + fft_r2c_0, + full_int_array_0, + full_int_array_1, + mean_0, + mean_2, + set_value__0, + ) + + return floor_divide_0, index_select_0 diff --git a/paddle_samples/PaddleX/TimesNet_ad/subgraph_7/weight_meta.py b/paddle_samples/PaddleX/TimesNet_ad/subgraph_7/weight_meta.py new file mode 100644 index 000000000..8b1378917 --- /dev/null +++ b/paddle_samples/PaddleX/TimesNet_ad/subgraph_7/weight_meta.py @@ -0,0 +1 @@ + diff --git a/paddle_samples/PaddleX/TimesNet_ad/subgraph_8/graph_hash.txt b/paddle_samples/PaddleX/TimesNet_ad/subgraph_8/graph_hash.txt new file mode 100644 index 000000000..08a9c2299 --- /dev/null +++ b/paddle_samples/PaddleX/TimesNet_ad/subgraph_8/graph_hash.txt @@ -0,0 +1 @@ +d04dcb16bbb8bceb9a186ee5f5bcd178fc2377f29963a707cc07b94f519ab7ff \ No newline at end of file diff --git a/paddle_samples/PaddleX/TimesNet_ad/subgraph_8/graph_net.json b/paddle_samples/PaddleX/TimesNet_ad/subgraph_8/graph_net.json new file mode 100644 index 000000000..026782ffa --- /dev/null +++ b/paddle_samples/PaddleX/TimesNet_ad/subgraph_8/graph_net.json @@ -0,0 +1,6 @@ +{ + "framework": "paddle", + "model_name": "TimesNet_ad", + "num_devices_required": 1, + "num_nodes_required": 1 +} \ No newline at end of file diff --git a/paddle_samples/PaddleX/TimesNet_ad/subgraph_8/input_meta.py b/paddle_samples/PaddleX/TimesNet_ad/subgraph_8/input_meta.py new file mode 100644 index 000000000..bfa4e6d08 --- /dev/null +++ b/paddle_samples/PaddleX/TimesNet_ad/subgraph_8/input_meta.py @@ -0,0 +1,20 @@ +class Program_weight_tensor_data_0: + name = "data_0" + shape = [16, 96, 2] + dtype = "float32" + min_val = float("-6.37408") + max_val = float("0.439645") + mean = float("-1.35748") + std = float("1.3301") + data = None + + +class Program_weight_tensor_data_1: + name = "data_1" + shape = [1, 5000, 32] + dtype = "float32" + min_val = float("-1.0") + max_val = float("1.0") + mean = float("0.119002") + std = float("0.697021") + data = None diff --git a/paddle_samples/PaddleX/TimesNet_ad/subgraph_8/model.py b/paddle_samples/PaddleX/TimesNet_ad/subgraph_8/model.py new file mode 100644 index 000000000..b62e3ba47 --- /dev/null +++ b/paddle_samples/PaddleX/TimesNet_ad/subgraph_8/model.py @@ -0,0 +1,177 @@ +import paddle + + +class GraphModule(paddle.nn.Layer): + def __init__(self): + super().__init__() + + def forward(self, parameter_0, data_0, data_1): + # pd_op.full_int_array: (1xi64) <- () + full_int_array_0 = [1] + + # pd_op.mean: (-1x1x2xf32) <- (-1x96x2xf32, 1xi64) + mean_0 = paddle._C_ops.mean(data_0, full_int_array_0, True) + + # pd_op.share_data_: (-1x1x2xf32) <- (-1x1x2xf32) + share_data__0 = mean_0.detach() + del mean_0 + + # pd_op.subtract: (-1x96x2xf32) <- (-1x96x2xf32, -1x1x2xf32) + subtract_0 = paddle._C_ops.subtract(data_0, share_data__0) + del data_0 + + # pd_op.mean: (-1x1x2xf32) <- (-1x96x2xf32, 1xi64) + mean_1 = paddle._C_ops.mean(subtract_0, full_int_array_0, True) + + # pd_op.subtract: (-1x96x2xf32) <- (-1x96x2xf32, -1x1x2xf32) + subtract_1 = paddle._C_ops.subtract(subtract_0, mean_1) + del mean_1 + + # pd_op.pow: (-1x96x2xf32) <- (-1x96x2xf32) + pow_0 = paddle._C_ops.pow(subtract_1, float("2")) + del subtract_1 + + # pd_op.sum: (-1x1x2xf32) <- (-1x96x2xf32, 1xi64) + sum_0 = paddle._C_ops.sum(pow_0, full_int_array_0, paddle.float32, True) + del pow_0 + + # pd_op.numel: (xi64) <- (-1x96x2xf32) + numel_0 = paddle._C_ops.numel(subtract_0) + + # pd_op.cast: (xi64) <- (xi64) + cast_0 = paddle._C_ops.cast(numel_0, paddle.int64) + del numel_0 + + # pd_op.numel: (xi64) <- (-1x1x2xf32) + numel_1 = paddle._C_ops.numel(sum_0) + + # pd_op.cast: (xi64) <- (xi64) + cast_1 = paddle._C_ops.cast(numel_1, paddle.int64) + del numel_1 + + # pd_op.cast: (xf32) <- (xi64) + cast_2 = paddle._C_ops.cast(cast_0, paddle.float32) + del cast_0 + + # pd_op.cast: (xf32) <- (xi64) + cast_3 = paddle._C_ops.cast(cast_1, paddle.float32) + del cast_1 + + # pd_op.divide: (xf32) <- (xf32, xf32) + divide_0 = paddle._C_ops.divide(cast_2, cast_3) + del cast_2, cast_3 + + # pd_op.divide: (-1x1x2xf32) <- (-1x1x2xf32, xf32) + divide_1 = paddle._C_ops.divide(sum_0, divide_0) + del divide_0, sum_0 + + # pd_op.full: (1xf32) <- () + full_0 = paddle._C_ops.full( + [1], float("1"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.scale: (-1x1x2xf32) <- (-1x1x2xf32, 1xf32) + scale_0 = paddle._C_ops.scale(divide_1, full_0, float("1e-05"), True) + del divide_1, full_0 + + # pd_op.sqrt: (-1x1x2xf32) <- (-1x1x2xf32) + sqrt_0 = paddle._C_ops.sqrt(scale_0) + del scale_0 + + # pd_op.divide: (-1x96x2xf32) <- (-1x96x2xf32, -1x1x2xf32) + divide_2 = paddle._C_ops.divide(subtract_0, sqrt_0) + del subtract_0 + + # pd_op.transpose: (-1x2x96xf32) <- (-1x96x2xf32) + transpose_0 = paddle._C_ops.transpose(divide_2, [0, 2, 1]) + + # pd_op.full_int_array: (2xi64) <- () + full_int_array_1 = [3, 4] + + # pd_op.unsqueeze: (-1x2x96x1x1xf32) <- (-1x2x96xf32, 2xi64) + unsqueeze_0 = paddle._C_ops.unsqueeze(transpose_0, full_int_array_1) + del transpose_0 + + # pd_op.full_int_array: (6xi64) <- () + full_int_array_2 = [0, 0, 0, 0, 1, 1] + + # pd_op.pad3d: (-1x2x98x1x1xf32) <- (-1x2x96x1x1xf32, 6xi64) + pad3d_0 = paddle._C_ops.pad3d( + unsqueeze_0, full_int_array_2, "circular", float("0"), "NCDHW" + ) + del full_int_array_2, unsqueeze_0 + + # pd_op.squeeze: (-1x2x98xf32) <- (-1x2x98x1x1xf32, 2xi64) + squeeze_0 = paddle._C_ops.squeeze(pad3d_0, full_int_array_1) + del full_int_array_1, pad3d_0 + + # pd_op.assign: (32x2x3xf32) <- (32x2x3xf32) + assign_0 = parameter_0 + del parameter_0 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_3 = [-2] + + # pd_op.unsqueeze: (32x2x1x3xf32) <- (32x2x3xf32, 1xi64) + unsqueeze_1 = paddle._C_ops.unsqueeze(assign_0, full_int_array_3) + del assign_0 + + # pd_op.unsqueeze: (-1x2x1x98xf32) <- (-1x2x98xf32, 1xi64) + unsqueeze_2 = paddle._C_ops.unsqueeze(squeeze_0, full_int_array_3) + del squeeze_0 + + # pd_op.conv2d: (-1x32x1x96xf32) <- (-1x2x1x98xf32, 32x2x1x3xf32) + conv2d_0 = paddle._C_ops.conv2d( + unsqueeze_2, unsqueeze_1, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del unsqueeze_1, unsqueeze_2 + + # pd_op.squeeze: (-1x32x96xf32) <- (-1x32x1x96xf32, 1xi64) + squeeze_1 = paddle._C_ops.squeeze(conv2d_0, full_int_array_3) + del conv2d_0, full_int_array_3 + + # pd_op.transpose: (-1x96x32xf32) <- (-1x32x96xf32) + transpose_1 = paddle._C_ops.transpose(squeeze_1, [0, 2, 1]) + del squeeze_1 + + # pd_op.shape64: (3xi64) <- (-1x96x2xf32) + shape64_0 = paddle._C_ops.shape64(divide_2) + del divide_2 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_4 = [0] + + # pd_op.slice: (xi64) <- (3xi64, 1xi64, 1xi64) + slice_0 = paddle._C_ops.slice( + shape64_0, [0], full_int_array_4, full_int_array_0, [1], [0] + ) + del full_int_array_0, shape64_0 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_5 = [96] + + # pd_op.slice: (1x96x32xf32) <- (1x5000x32xf32, 1xi64, 1xi64) + slice_1 = paddle._C_ops.slice( + data_1, [1], full_int_array_4, full_int_array_5, [1], [] + ) + del data_1, full_int_array_4, full_int_array_5 + + # pd_op.add: (-1x96x32xf32) <- (-1x96x32xf32, 1x96x32xf32) + add_0 = paddle._C_ops.add(transpose_1, slice_1) + del slice_1, transpose_1 + + # pd_op.full: (1xf32) <- () + full_1 = paddle._C_ops.full( + [1], float("0.1"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.dropout: (-1x96x32xf32, -1x96x32xui8) <- (-1x96x32xf32, None, 1xf32) + dropout_0, dropout_1 = (lambda x, f: f(x))( + paddle._C_ops.dropout( + add_0, None, full_1, True, "upscale_in_train", 0, False + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None), + ) + del add_0, full_1, share_data__0, sqrt_0 + + return dropout_0 diff --git a/paddle_samples/PaddleX/TimesNet_ad/subgraph_8/weight_meta.py b/paddle_samples/PaddleX/TimesNet_ad/subgraph_8/weight_meta.py new file mode 100644 index 000000000..297640557 --- /dev/null +++ b/paddle_samples/PaddleX/TimesNet_ad/subgraph_8/weight_meta.py @@ -0,0 +1,9 @@ +class Program_weight_tensor_parameter_0: + name = "parameter_0" + shape = [32, 2, 3] + dtype = "float32" + min_val = float("-1.67133") + max_val = float("1.7001") + mean = float("0.0189139") + std = float("0.615287") + data = None diff --git a/paddle_samples/PaddleX/TimesNet_ad/subgraph_9/graph_hash.txt b/paddle_samples/PaddleX/TimesNet_ad/subgraph_9/graph_hash.txt new file mode 100644 index 000000000..7d5c89e14 --- /dev/null +++ b/paddle_samples/PaddleX/TimesNet_ad/subgraph_9/graph_hash.txt @@ -0,0 +1 @@ +a80937a58b6502456681671049b582ccfb3dbb036758ae01f1b804819ade0464 \ No newline at end of file diff --git a/paddle_samples/PaddleX/TimesNet_ad/subgraph_9/graph_net.json b/paddle_samples/PaddleX/TimesNet_ad/subgraph_9/graph_net.json new file mode 100644 index 000000000..026782ffa --- /dev/null +++ b/paddle_samples/PaddleX/TimesNet_ad/subgraph_9/graph_net.json @@ -0,0 +1,6 @@ +{ + "framework": "paddle", + "model_name": "TimesNet_ad", + "num_devices_required": 1, + "num_nodes_required": 1 +} \ No newline at end of file diff --git a/paddle_samples/PaddleX/TimesNet_ad/subgraph_9/input_meta.py b/paddle_samples/PaddleX/TimesNet_ad/subgraph_9/input_meta.py new file mode 100644 index 000000000..085bf32ce --- /dev/null +++ b/paddle_samples/PaddleX/TimesNet_ad/subgraph_9/input_meta.py @@ -0,0 +1,20 @@ +class Program_weight_tensor_data_0: + name = "data_0" + shape = [16, 96, 2] + dtype = "float32" + min_val = float("-2.41022") + max_val = float("2.54238") + mean = float("0.061457") + std = float("1.05113") + data = None + + +class Program_weight_tensor_data_1: + name = "data_1" + shape = [1, 5000, 32] + dtype = "float32" + min_val = float("-1.0") + max_val = float("1.0") + mean = float("0.119002") + std = float("0.697021") + data = None diff --git a/paddle_samples/PaddleX/TimesNet_ad/subgraph_9/model.py b/paddle_samples/PaddleX/TimesNet_ad/subgraph_9/model.py new file mode 100644 index 000000000..c44ceee47 --- /dev/null +++ b/paddle_samples/PaddleX/TimesNet_ad/subgraph_9/model.py @@ -0,0 +1,180 @@ +import paddle + + +class GraphModule(paddle.nn.Layer): + def __init__(self): + super().__init__() + + def forward(self, parameter_0, data_0, data_1): + # pd_op.full_int_array: (1xi64) <- () + full_int_array_0 = [1] + + # pd_op.mean: (16x1x2xf32) <- (16x96x2xf32, 1xi64) + mean_0 = paddle._C_ops.mean(data_0, full_int_array_0, True) + + # pd_op.share_data_: (16x1x2xf32) <- (16x1x2xf32) + share_data__0 = mean_0.detach() + del mean_0 + + # pd_op.subtract: (16x96x2xf32) <- (16x96x2xf32, 16x1x2xf32) + subtract_0 = paddle._C_ops.subtract(data_0, share_data__0) + del data_0 + + # pd_op.mean: (16x1x2xf32) <- (16x96x2xf32, 1xi64) + mean_1 = paddle._C_ops.mean(subtract_0, full_int_array_0, True) + + # pd_op.subtract: (16x96x2xf32) <- (16x96x2xf32, 16x1x2xf32) + subtract_1 = paddle._C_ops.subtract(subtract_0, mean_1) + del mean_1 + + # pd_op.pow: (16x96x2xf32) <- (16x96x2xf32) + pow_0 = paddle._C_ops.pow(subtract_1, float("2")) + del subtract_1 + + # pd_op.sum: (16x1x2xf32) <- (16x96x2xf32, 1xi64) + sum_0 = paddle._C_ops.sum(pow_0, full_int_array_0, paddle.float32, True) + del full_int_array_0, pow_0 + + # pd_op.numel: (xi64) <- (16x96x2xf32) + numel_0 = paddle._C_ops.numel(subtract_0) + + # pd_op.cast: (xi64) <- (xi64) + cast_0 = paddle._C_ops.cast(numel_0, paddle.int64) + del numel_0 + + # pd_op.numel: (xi64) <- (16x1x2xf32) + numel_1 = paddle._C_ops.numel(sum_0) + + # pd_op.cast: (xi64) <- (xi64) + cast_1 = paddle._C_ops.cast(numel_1, paddle.int64) + del numel_1 + + # pd_op.cast: (xf32) <- (xi64) + cast_2 = paddle._C_ops.cast(cast_0, paddle.float32) + del cast_0 + + # pd_op.cast: (xf32) <- (xi64) + cast_3 = paddle._C_ops.cast(cast_1, paddle.float32) + del cast_1 + + # pd_op.divide: (xf32) <- (xf32, xf32) + divide_0 = paddle._C_ops.divide(cast_2, cast_3) + del cast_2, cast_3 + + # pd_op.divide: (16x1x2xf32) <- (16x1x2xf32, xf32) + divide_1 = paddle._C_ops.divide(sum_0, divide_0) + del divide_0, sum_0 + + # pd_op.full: (1xf32) <- () + full_0 = paddle._C_ops.full( + [1], float("1"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.scale: (16x1x2xf32) <- (16x1x2xf32, 1xf32) + scale_0 = paddle._C_ops.scale(divide_1, full_0, float("1e-05"), True) + del divide_1, full_0 + + # pd_op.sqrt: (16x1x2xf32) <- (16x1x2xf32) + sqrt_0 = paddle._C_ops.sqrt(scale_0) + del scale_0 + + # pd_op.divide: (16x96x2xf32) <- (16x96x2xf32, 16x1x2xf32) + divide_2 = paddle._C_ops.divide(subtract_0, sqrt_0) + del subtract_0 + + # pd_op.transpose: (16x2x96xf32) <- (16x96x2xf32) + transpose_0 = paddle._C_ops.transpose(divide_2, [0, 2, 1]) + del divide_2 + + # pd_op.full_int_array: (2xi64) <- () + full_int_array_1 = [3, 4] + + # pd_op.unsqueeze: (16x2x96x1x1xf32) <- (16x2x96xf32, 2xi64) + unsqueeze_0 = paddle._C_ops.unsqueeze(transpose_0, full_int_array_1) + del transpose_0 + + # pd_op.full_int_array: (6xi64) <- () + full_int_array_2 = [0, 0, 0, 0, 1, 1] + + # pd_op.pad3d: (16x2x98x1x1xf32) <- (16x2x96x1x1xf32, 6xi64) + pad3d_0 = paddle._C_ops.pad3d( + unsqueeze_0, full_int_array_2, "circular", float("0"), "NCDHW" + ) + del full_int_array_2, unsqueeze_0 + + # pd_op.squeeze: (16x2x98xf32) <- (16x2x98x1x1xf32, 2xi64) + squeeze_0 = paddle._C_ops.squeeze(pad3d_0, full_int_array_1) + del full_int_array_1, pad3d_0 + + # pd_op.assign: (32x2x3xf32) <- (32x2x3xf32) + assign_0 = parameter_0 + del parameter_0 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_3 = [-2] + + # pd_op.assign: (1xi64) <- (1xi64) + assign_1 = full_int_array_3 + + # pd_op.unsqueeze: (32x2x1x3xf32) <- (32x2x3xf32, 1xi64) + unsqueeze_1 = paddle._C_ops.unsqueeze(assign_0, full_int_array_3) + + # pd_op.unsqueeze: (16x2x1x98xf32) <- (16x2x98xf32, 1xi64) + unsqueeze_2 = paddle._C_ops.unsqueeze(squeeze_0, full_int_array_3) + del squeeze_0 + + # pd_op.conv2d: (16x32x1x96xf32) <- (16x2x1x98xf32, 32x2x1x3xf32) + conv2d_0 = paddle._C_ops.conv2d( + unsqueeze_2, unsqueeze_1, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + + # pd_op.squeeze: (16x32x96xf32) <- (16x32x1x96xf32, 1xi64) + squeeze_1 = paddle._C_ops.squeeze(conv2d_0, full_int_array_3) + + # pd_op.transpose: (16x96x32xf32) <- (16x32x96xf32) + transpose_1 = paddle._C_ops.transpose(squeeze_1, [0, 2, 1]) + del squeeze_1 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_4 = [0] + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_5 = [96] + + # pd_op.slice: (1x96x32xf32) <- (1x5000x32xf32, 1xi64, 1xi64) + slice_0 = paddle._C_ops.slice( + data_1, [1], full_int_array_4, full_int_array_5, [1], [] + ) + del data_1, full_int_array_4, full_int_array_5 + + # pd_op.add: (16x96x32xf32) <- (16x96x32xf32, 1x96x32xf32) + add_0 = paddle._C_ops.add(transpose_1, slice_0) + + # pd_op.full: (1xf32) <- () + full_1 = paddle._C_ops.full( + [1], float("0.1"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.dropout: (16x96x32xf32, 16x96x32xui8) <- (16x96x32xf32, None, 1xf32) + dropout_0, dropout_1 = (lambda x, f: f(x))( + paddle._C_ops.dropout( + add_0, None, full_1, False, "upscale_in_train", 0, False + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None), + ) + del ( + add_0, + assign_0, + assign_1, + conv2d_0, + full_1, + full_int_array_3, + share_data__0, + slice_0, + sqrt_0, + transpose_1, + unsqueeze_1, + unsqueeze_2, + ) + + return dropout_0 diff --git a/paddle_samples/PaddleX/TimesNet_ad/subgraph_9/weight_meta.py b/paddle_samples/PaddleX/TimesNet_ad/subgraph_9/weight_meta.py new file mode 100644 index 000000000..aa6d23753 --- /dev/null +++ b/paddle_samples/PaddleX/TimesNet_ad/subgraph_9/weight_meta.py @@ -0,0 +1,9 @@ +class Program_weight_tensor_parameter_0: + name = "parameter_0" + shape = [32, 2, 3] + dtype = "float32" + min_val = float("-1.76635") + max_val = float("1.67318") + mean = float("0.0120366") + std = float("0.639064") + data = None diff --git a/paddle_samples/PaddleX/ch_SVTRv2_rec/subgraph_0/graph_hash.txt b/paddle_samples/PaddleX/ch_SVTRv2_rec/subgraph_0/graph_hash.txt new file mode 100644 index 000000000..b869f645c --- /dev/null +++ b/paddle_samples/PaddleX/ch_SVTRv2_rec/subgraph_0/graph_hash.txt @@ -0,0 +1 @@ +acb18fe3df13e7be558fc9325eb78ade5fdd8f993b06397cc0704de9673ce006 \ No newline at end of file diff --git a/paddle_samples/PaddleX/ch_SVTRv2_rec/subgraph_0/graph_net.json b/paddle_samples/PaddleX/ch_SVTRv2_rec/subgraph_0/graph_net.json new file mode 100644 index 000000000..4e1cc3c0d --- /dev/null +++ b/paddle_samples/PaddleX/ch_SVTRv2_rec/subgraph_0/graph_net.json @@ -0,0 +1,6 @@ +{ + "framework": "paddle", + "model_name": "ch_SVTRv2_rec", + "num_devices_required": 1, + "num_nodes_required": 1 +} \ No newline at end of file diff --git a/paddle_samples/PaddleX/ch_SVTRv2_rec/subgraph_0/input_meta.py b/paddle_samples/PaddleX/ch_SVTRv2_rec/subgraph_0/input_meta.py new file mode 100644 index 000000000..1a59996a6 --- /dev/null +++ b/paddle_samples/PaddleX/ch_SVTRv2_rec/subgraph_0/input_meta.py @@ -0,0 +1,51 @@ +class Program_weight_tensor_data_0: + name = "data_0" + shape = [] + dtype = "int64" + data = [12] + + +class Program_weight_tensor_data_1: + name = "data_1" + shape = [] + dtype = "int64" + data = [384] + + +class Program_weight_tensor_data_2: + name = "data_2" + shape = [] + dtype = "int64" + data = [12] + + +class Program_weight_tensor_data_3: + name = "data_3" + shape = [] + dtype = "int64" + data = [384] + + +class Program_weight_tensor_data_4: + name = "data_4" + shape = [] + dtype = "int64" + data = [12] + + +class Program_weight_tensor_data_5: + name = "data_5" + shape = [] + dtype = "int64" + data = [384] + + +class Program_weight_tensor_data_6: + name = "data_6" + shape = [4, 240, 384] + dtype = "float32" + min_val = float("-5.3456") + max_val = float("4.80906") + mean = float("0.0003766") + std = float("0.460151") + data = None diff --git a/paddle_samples/PaddleX/ch_SVTRv2_rec/subgraph_0/model.py b/paddle_samples/PaddleX/ch_SVTRv2_rec/subgraph_0/model.py new file mode 100644 index 000000000..14f747e74 --- /dev/null +++ b/paddle_samples/PaddleX/ch_SVTRv2_rec/subgraph_0/model.py @@ -0,0 +1,867 @@ +import paddle + + +class GraphModule(paddle.nn.Layer): + def __init__(self): + super().__init__() + + def forward( + self, + parameter_0, + parameter_1, + parameter_2, + parameter_3, + parameter_4, + parameter_5, + parameter_6, + parameter_7, + parameter_8, + parameter_9, + parameter_10, + parameter_11, + parameter_12, + parameter_13, + parameter_14, + parameter_15, + parameter_16, + parameter_17, + parameter_18, + parameter_19, + parameter_20, + parameter_21, + parameter_22, + parameter_23, + parameter_24, + parameter_25, + parameter_26, + parameter_27, + parameter_28, + parameter_29, + parameter_30, + parameter_31, + parameter_32, + parameter_33, + parameter_34, + parameter_35, + parameter_36, + parameter_37, + parameter_38, + parameter_39, + parameter_40, + parameter_41, + parameter_42, + parameter_43, + parameter_44, + parameter_45, + parameter_46, + parameter_47, + parameter_48, + parameter_49, + parameter_50, + parameter_51, + parameter_52, + parameter_53, + parameter_54, + parameter_55, + parameter_56, + parameter_57, + parameter_58, + parameter_59, + parameter_60, + parameter_61, + parameter_62, + parameter_63, + parameter_64, + parameter_65, + parameter_66, + parameter_67, + parameter_68, + parameter_69, + parameter_70, + parameter_71, + data_0, + data_1, + data_2, + data_3, + data_4, + data_5, + data_6, + ): + # pd_op.matmul: (-1x-1x1152xf32) <- (-1x-1x-1xf32, 384x1152xf32) + matmul_0 = paddle._C_ops.matmul(data_6, parameter_71, False, False) + del parameter_71 + + # pd_op.add: (-1x-1x1152xf32) <- (-1x-1x1152xf32, 1152xf32) + add_0 = paddle._C_ops.add(matmul_0, parameter_70) + del matmul_0, parameter_70 + + # pd_op.full_int_array: (5xi64) <- () + full_int_array_0 = [0, -1, 3, 12, 32] + + # pd_op.reshape: (-1x-1x3x12x32xf32) <- (-1x-1x1152xf32, 5xi64) + reshape_0 = paddle._C_ops.reshape(add_0, full_int_array_0) + del add_0 + + # pd_op.transpose: (3x-1x12x-1x32xf32) <- (-1x-1x3x12x32xf32) + transpose_0 = paddle._C_ops.transpose(reshape_0, [2, 0, 3, 1, 4]) + del reshape_0 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_1 = [0] + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_2 = [1] + + # pd_op.slice: (-1x12x-1x32xf32) <- (3x-1x12x-1x32xf32, 1xi64, 1xi64) + slice_0 = paddle._C_ops.slice( + transpose_0, [0], full_int_array_1, full_int_array_2, [1], [0] + ) + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_3 = [2] + + # pd_op.slice: (-1x12x-1x32xf32) <- (3x-1x12x-1x32xf32, 1xi64, 1xi64) + slice_1 = paddle._C_ops.slice( + transpose_0, [0], full_int_array_2, full_int_array_3, [1], [0] + ) + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_4 = [3] + + # pd_op.slice: (-1x12x-1x32xf32) <- (3x-1x12x-1x32xf32, 1xi64, 1xi64) + slice_2 = paddle._C_ops.slice( + transpose_0, [0], full_int_array_3, full_int_array_4, [1], [0] + ) + del transpose_0 + + # pd_op.transpose: (-1x12x32x-1xf32) <- (-1x12x-1x32xf32) + transpose_1 = paddle._C_ops.transpose(slice_1, [0, 1, 3, 2]) + del slice_1 + + # pd_op.matmul: (-1x12x-1x-1xf32) <- (-1x12x-1x32xf32, -1x12x32x-1xf32) + matmul_1 = paddle._C_ops.matmul(slice_0, transpose_1, False, False) + del slice_0, transpose_1 + + # pd_op.full: (1xf32) <- () + full_0 = paddle._C_ops.full( + [1], float("0.176777"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.scale: (-1x12x-1x-1xf32) <- (-1x12x-1x-1xf32, 1xf32) + scale_0 = paddle._C_ops.scale(matmul_1, full_0, float("0"), True) + del matmul_1 + + # pd_op.softmax: (-1x12x-1x-1xf32) <- (-1x12x-1x-1xf32) + softmax_0 = paddle._C_ops.softmax(scale_0, -1) + del scale_0 + + # pd_op.matmul: (-1x12x-1x32xf32) <- (-1x12x-1x-1xf32, -1x12x-1x32xf32) + matmul_2 = paddle._C_ops.matmul(softmax_0, slice_2, False, False) + del slice_2, softmax_0 + + # pd_op.transpose: (-1x-1x12x32xf32) <- (-1x12x-1x32xf32) + transpose_2 = paddle._C_ops.transpose(matmul_2, [0, 2, 1, 3]) + del matmul_2 + + # pd_op.full_int_array: (3xi64) <- () + full_int_array_5 = [0, -1, 384] + + # pd_op.reshape: (-1x-1x384xf32) <- (-1x-1x12x32xf32, 3xi64) + reshape_1 = paddle._C_ops.reshape(transpose_2, full_int_array_5) + del transpose_2 + + # pd_op.matmul: (-1x-1x384xf32) <- (-1x-1x384xf32, 384x384xf32) + matmul_3 = paddle._C_ops.matmul(reshape_1, parameter_69, False, False) + del parameter_69, reshape_1 + + # pd_op.add: (-1x-1x384xf32) <- (-1x-1x384xf32, 384xf32) + add_1 = paddle._C_ops.add(matmul_3, parameter_68) + del matmul_3, parameter_68 + + # pd_op.add: (-1x-1x384xf32) <- (-1x-1x-1xf32, -1x-1x384xf32) + add_2 = paddle._C_ops.add(data_6, add_1) + del add_1, data_6 + + # pd_op.layer_norm: (-1x-1x384xf32, -1x-1xf32, -1x-1xf32) <- (-1x-1x384xf32, 384xf32, 384xf32) + layer_norm_1, layer_norm_2, layer_norm_3 = (lambda x, f: f(x))( + paddle._C_ops.layer_norm( + add_2, parameter_67, parameter_66, float("1e-06"), 2 + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None, None), + ) + del add_2, parameter_66, parameter_67 + + # pd_op.matmul: (-1x-1x1536xf32) <- (-1x-1x384xf32, 384x1536xf32) + matmul_4 = paddle._C_ops.matmul(layer_norm_1, parameter_65, False, False) + del parameter_65 + + # pd_op.add: (-1x-1x1536xf32) <- (-1x-1x1536xf32, 1536xf32) + add_3 = paddle._C_ops.add(matmul_4, parameter_64) + del matmul_4, parameter_64 + + # pd_op.gelu: (-1x-1x1536xf32) <- (-1x-1x1536xf32) + gelu_0 = paddle._C_ops.gelu(add_3, False) + del add_3 + + # pd_op.matmul: (-1x-1x384xf32) <- (-1x-1x1536xf32, 1536x384xf32) + matmul_5 = paddle._C_ops.matmul(gelu_0, parameter_63, False, False) + del gelu_0, parameter_63 + + # pd_op.add: (-1x-1x384xf32) <- (-1x-1x384xf32, 384xf32) + add_4 = paddle._C_ops.add(matmul_5, parameter_62) + del matmul_5, parameter_62 + + # pd_op.add: (-1x-1x384xf32) <- (-1x-1x384xf32, -1x-1x384xf32) + add_5 = paddle._C_ops.add(layer_norm_1, add_4) + del add_4, layer_norm_1 + + # pd_op.layer_norm: (-1x-1x384xf32, -1x-1xf32, -1x-1xf32) <- (-1x-1x384xf32, 384xf32, 384xf32) + layer_norm_4, layer_norm_5, layer_norm_6 = (lambda x, f: f(x))( + paddle._C_ops.layer_norm( + add_5, parameter_61, parameter_60, float("1e-06"), 2 + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None, None), + ) + del add_5, parameter_60, parameter_61 + + # pd_op.matmul: (-1x-1x1152xf32) <- (-1x-1x384xf32, 384x1152xf32) + matmul_6 = paddle._C_ops.matmul(layer_norm_4, parameter_59, False, False) + del parameter_59 + + # pd_op.add: (-1x-1x1152xf32) <- (-1x-1x1152xf32, 1152xf32) + add_6 = paddle._C_ops.add(matmul_6, parameter_58) + del matmul_6, parameter_58 + + # pd_op.reshape: (-1x-1x3x12x32xf32) <- (-1x-1x1152xf32, 5xi64) + reshape_2 = paddle._C_ops.reshape(add_6, full_int_array_0) + del add_6 + + # pd_op.transpose: (3x-1x12x-1x32xf32) <- (-1x-1x3x12x32xf32) + transpose_3 = paddle._C_ops.transpose(reshape_2, [2, 0, 3, 1, 4]) + del reshape_2 + + # pd_op.slice: (-1x12x-1x32xf32) <- (3x-1x12x-1x32xf32, 1xi64, 1xi64) + slice_3 = paddle._C_ops.slice( + transpose_3, [0], full_int_array_1, full_int_array_2, [1], [0] + ) + + # pd_op.slice: (-1x12x-1x32xf32) <- (3x-1x12x-1x32xf32, 1xi64, 1xi64) + slice_4 = paddle._C_ops.slice( + transpose_3, [0], full_int_array_2, full_int_array_3, [1], [0] + ) + + # pd_op.slice: (-1x12x-1x32xf32) <- (3x-1x12x-1x32xf32, 1xi64, 1xi64) + slice_5 = paddle._C_ops.slice( + transpose_3, [0], full_int_array_3, full_int_array_4, [1], [0] + ) + del transpose_3 + + # pd_op.transpose: (-1x12x32x-1xf32) <- (-1x12x-1x32xf32) + transpose_4 = paddle._C_ops.transpose(slice_4, [0, 1, 3, 2]) + del slice_4 + + # pd_op.matmul: (-1x12x-1x-1xf32) <- (-1x12x-1x32xf32, -1x12x32x-1xf32) + matmul_7 = paddle._C_ops.matmul(slice_3, transpose_4, False, False) + del slice_3, transpose_4 + + # pd_op.scale: (-1x12x-1x-1xf32) <- (-1x12x-1x-1xf32, 1xf32) + scale_1 = paddle._C_ops.scale(matmul_7, full_0, float("0"), True) + del matmul_7 + + # pd_op.softmax: (-1x12x-1x-1xf32) <- (-1x12x-1x-1xf32) + softmax_1 = paddle._C_ops.softmax(scale_1, -1) + del scale_1 + + # pd_op.matmul: (-1x12x-1x32xf32) <- (-1x12x-1x-1xf32, -1x12x-1x32xf32) + matmul_8 = paddle._C_ops.matmul(softmax_1, slice_5, False, False) + del slice_5, softmax_1 + + # pd_op.transpose: (-1x-1x12x32xf32) <- (-1x12x-1x32xf32) + transpose_5 = paddle._C_ops.transpose(matmul_8, [0, 2, 1, 3]) + del matmul_8 + + # pd_op.reshape: (-1x-1x384xf32) <- (-1x-1x12x32xf32, 3xi64) + reshape_3 = paddle._C_ops.reshape(transpose_5, full_int_array_5) + del transpose_5 + + # pd_op.matmul: (-1x-1x384xf32) <- (-1x-1x384xf32, 384x384xf32) + matmul_9 = paddle._C_ops.matmul(reshape_3, parameter_57, False, False) + del parameter_57, reshape_3 + + # pd_op.add: (-1x-1x384xf32) <- (-1x-1x384xf32, 384xf32) + add_7 = paddle._C_ops.add(matmul_9, parameter_56) + del matmul_9, parameter_56 + + # pd_op.add: (-1x-1x384xf32) <- (-1x-1x384xf32, -1x-1x384xf32) + add_8 = paddle._C_ops.add(layer_norm_4, add_7) + del add_7, layer_norm_4 + + # pd_op.layer_norm: (-1x-1x384xf32, -1x-1xf32, -1x-1xf32) <- (-1x-1x384xf32, 384xf32, 384xf32) + layer_norm_7, layer_norm_8, layer_norm_9 = (lambda x, f: f(x))( + paddle._C_ops.layer_norm( + add_8, parameter_55, parameter_54, float("1e-06"), 2 + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None, None), + ) + del add_8, parameter_54, parameter_55 + + # pd_op.matmul: (-1x-1x1536xf32) <- (-1x-1x384xf32, 384x1536xf32) + matmul_10 = paddle._C_ops.matmul(layer_norm_7, parameter_53, False, False) + del parameter_53 + + # pd_op.add: (-1x-1x1536xf32) <- (-1x-1x1536xf32, 1536xf32) + add_9 = paddle._C_ops.add(matmul_10, parameter_52) + del matmul_10, parameter_52 + + # pd_op.gelu: (-1x-1x1536xf32) <- (-1x-1x1536xf32) + gelu_1 = paddle._C_ops.gelu(add_9, False) + del add_9 + + # pd_op.matmul: (-1x-1x384xf32) <- (-1x-1x1536xf32, 1536x384xf32) + matmul_11 = paddle._C_ops.matmul(gelu_1, parameter_51, False, False) + del gelu_1, parameter_51 + + # pd_op.add: (-1x-1x384xf32) <- (-1x-1x384xf32, 384xf32) + add_10 = paddle._C_ops.add(matmul_11, parameter_50) + del matmul_11, parameter_50 + + # pd_op.add: (-1x-1x384xf32) <- (-1x-1x384xf32, -1x-1x384xf32) + add_11 = paddle._C_ops.add(layer_norm_7, add_10) + del add_10, layer_norm_7 + + # pd_op.layer_norm: (-1x-1x384xf32, -1x-1xf32, -1x-1xf32) <- (-1x-1x384xf32, 384xf32, 384xf32) + layer_norm_10, layer_norm_11, layer_norm_12 = (lambda x, f: f(x))( + paddle._C_ops.layer_norm( + add_11, parameter_49, parameter_48, float("1e-06"), 2 + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None, None), + ) + del add_11, parameter_48, parameter_49 + + # pd_op.matmul: (-1x-1x1152xf32) <- (-1x-1x384xf32, 384x1152xf32) + matmul_12 = paddle._C_ops.matmul(layer_norm_10, parameter_47, False, False) + del parameter_47 + + # pd_op.add: (-1x-1x1152xf32) <- (-1x-1x1152xf32, 1152xf32) + add_12 = paddle._C_ops.add(matmul_12, parameter_46) + del matmul_12, parameter_46 + + # pd_op.reshape: (-1x-1x3x12x32xf32) <- (-1x-1x1152xf32, 5xi64) + reshape_4 = paddle._C_ops.reshape(add_12, full_int_array_0) + del add_12, full_int_array_0 + + # pd_op.transpose: (3x-1x12x-1x32xf32) <- (-1x-1x3x12x32xf32) + transpose_6 = paddle._C_ops.transpose(reshape_4, [2, 0, 3, 1, 4]) + del reshape_4 + + # pd_op.slice: (-1x12x-1x32xf32) <- (3x-1x12x-1x32xf32, 1xi64, 1xi64) + slice_6 = paddle._C_ops.slice( + transpose_6, [0], full_int_array_1, full_int_array_2, [1], [0] + ) + + # pd_op.slice: (-1x12x-1x32xf32) <- (3x-1x12x-1x32xf32, 1xi64, 1xi64) + slice_7 = paddle._C_ops.slice( + transpose_6, [0], full_int_array_2, full_int_array_3, [1], [0] + ) + + # pd_op.slice: (-1x12x-1x32xf32) <- (3x-1x12x-1x32xf32, 1xi64, 1xi64) + slice_8 = paddle._C_ops.slice( + transpose_6, [0], full_int_array_3, full_int_array_4, [1], [0] + ) + del transpose_6 + + # pd_op.transpose: (-1x12x32x-1xf32) <- (-1x12x-1x32xf32) + transpose_7 = paddle._C_ops.transpose(slice_7, [0, 1, 3, 2]) + del slice_7 + + # pd_op.matmul: (-1x12x-1x-1xf32) <- (-1x12x-1x32xf32, -1x12x32x-1xf32) + matmul_13 = paddle._C_ops.matmul(slice_6, transpose_7, False, False) + del slice_6, transpose_7 + + # pd_op.scale: (-1x12x-1x-1xf32) <- (-1x12x-1x-1xf32, 1xf32) + scale_2 = paddle._C_ops.scale(matmul_13, full_0, float("0"), True) + del matmul_13 + + # pd_op.softmax: (-1x12x-1x-1xf32) <- (-1x12x-1x-1xf32) + softmax_2 = paddle._C_ops.softmax(scale_2, -1) + del scale_2 + + # pd_op.matmul: (-1x12x-1x32xf32) <- (-1x12x-1x-1xf32, -1x12x-1x32xf32) + matmul_14 = paddle._C_ops.matmul(softmax_2, slice_8, False, False) + del slice_8, softmax_2 + + # pd_op.transpose: (-1x-1x12x32xf32) <- (-1x12x-1x32xf32) + transpose_8 = paddle._C_ops.transpose(matmul_14, [0, 2, 1, 3]) + del matmul_14 + + # pd_op.reshape: (-1x-1x384xf32) <- (-1x-1x12x32xf32, 3xi64) + reshape_5 = paddle._C_ops.reshape(transpose_8, full_int_array_5) + del full_int_array_5, transpose_8 + + # pd_op.matmul: (-1x-1x384xf32) <- (-1x-1x384xf32, 384x384xf32) + matmul_15 = paddle._C_ops.matmul(reshape_5, parameter_45, False, False) + del parameter_45, reshape_5 + + # pd_op.add: (-1x-1x384xf32) <- (-1x-1x384xf32, 384xf32) + add_13 = paddle._C_ops.add(matmul_15, parameter_44) + del matmul_15, parameter_44 + + # pd_op.add: (-1x-1x384xf32) <- (-1x-1x384xf32, -1x-1x384xf32) + add_14 = paddle._C_ops.add(layer_norm_10, add_13) + del add_13, layer_norm_10 + + # pd_op.layer_norm: (-1x-1x384xf32, -1x-1xf32, -1x-1xf32) <- (-1x-1x384xf32, 384xf32, 384xf32) + layer_norm_13, layer_norm_14, layer_norm_15 = (lambda x, f: f(x))( + paddle._C_ops.layer_norm( + add_14, parameter_43, parameter_42, float("1e-06"), 2 + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None, None), + ) + del add_14, parameter_42, parameter_43 + + # pd_op.matmul: (-1x-1x1536xf32) <- (-1x-1x384xf32, 384x1536xf32) + matmul_16 = paddle._C_ops.matmul(layer_norm_13, parameter_41, False, False) + del parameter_41 + + # pd_op.add: (-1x-1x1536xf32) <- (-1x-1x1536xf32, 1536xf32) + add_15 = paddle._C_ops.add(matmul_16, parameter_40) + del matmul_16, parameter_40 + + # pd_op.gelu: (-1x-1x1536xf32) <- (-1x-1x1536xf32) + gelu_2 = paddle._C_ops.gelu(add_15, False) + del add_15 + + # pd_op.matmul: (-1x-1x384xf32) <- (-1x-1x1536xf32, 1536x384xf32) + matmul_17 = paddle._C_ops.matmul(gelu_2, parameter_39, False, False) + del gelu_2, parameter_39 + + # pd_op.add: (-1x-1x384xf32) <- (-1x-1x384xf32, 384xf32) + add_16 = paddle._C_ops.add(matmul_17, parameter_38) + del matmul_17, parameter_38 + + # pd_op.add: (-1x-1x384xf32) <- (-1x-1x384xf32, -1x-1x384xf32) + add_17 = paddle._C_ops.add(layer_norm_13, add_16) + del add_16, layer_norm_13 + + # pd_op.layer_norm: (-1x-1x384xf32, -1x-1xf32, -1x-1xf32) <- (-1x-1x384xf32, 384xf32, 384xf32) + layer_norm_16, layer_norm_17, layer_norm_18 = (lambda x, f: f(x))( + paddle._C_ops.layer_norm( + add_17, parameter_37, parameter_36, float("1e-06"), 2 + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None, None), + ) + del add_17, parameter_36, parameter_37 + + # pd_op.matmul: (-1x-1x1152xf32) <- (-1x-1x384xf32, 384x1152xf32) + matmul_18 = paddle._C_ops.matmul(layer_norm_16, parameter_35, False, False) + del parameter_35 + + # pd_op.add: (-1x-1x1152xf32) <- (-1x-1x1152xf32, 1152xf32) + add_18 = paddle._C_ops.add(matmul_18, parameter_34) + del matmul_18, parameter_34 + + # pd_op.full: (xi64) <- () + full_1 = paddle._C_ops.full( + [], float("0"), paddle.int64, paddle.core.CPUPlace() + ) + + # pd_op.full: (xi64) <- () + full_2 = paddle._C_ops.full( + [], float("-1"), paddle.int64, paddle.core.CPUPlace() + ) + + # pd_op.full: (xi64) <- () + full_3 = paddle._C_ops.full( + [], float("3"), paddle.int64, paddle.core.CPUPlace() + ) + + # pd_op.full: (xi64) <- () + full_4 = paddle._C_ops.full( + [], float("32"), paddle.int64, paddle.core.CPUPlace() + ) + + # builtin.combine: ([xi64, xi64, xi64, xi64, xi64]) <- (xi64, xi64, xi64, xi64, xi64) + combine_0 = [full_1, full_2, full_3, data_0, full_4] + del data_0 + + # pd_op.stack: (5xi64) <- ([xi64, xi64, xi64, xi64, xi64]) + stack_0 = paddle._C_ops.stack(combine_0, 0) + del combine_0 + + # pd_op.reshape: (-1x-1x3x-1x32xf32) <- (-1x-1x1152xf32, 5xi64) + reshape_6 = paddle._C_ops.reshape(add_18, stack_0) + del add_18, stack_0 + + # pd_op.transpose: (3x-1x-1x-1x32xf32) <- (-1x-1x3x-1x32xf32) + transpose_9 = paddle._C_ops.transpose(reshape_6, [2, 0, 3, 1, 4]) + del reshape_6 + + # pd_op.slice: (-1x-1x-1x32xf32) <- (3x-1x-1x-1x32xf32, 1xi64, 1xi64) + slice_9 = paddle._C_ops.slice( + transpose_9, [0], full_int_array_1, full_int_array_2, [1], [0] + ) + + # pd_op.slice: (-1x-1x-1x32xf32) <- (3x-1x-1x-1x32xf32, 1xi64, 1xi64) + slice_10 = paddle._C_ops.slice( + transpose_9, [0], full_int_array_2, full_int_array_3, [1], [0] + ) + + # pd_op.slice: (-1x-1x-1x32xf32) <- (3x-1x-1x-1x32xf32, 1xi64, 1xi64) + slice_11 = paddle._C_ops.slice( + transpose_9, [0], full_int_array_3, full_int_array_4, [1], [0] + ) + del transpose_9 + + # pd_op.transpose: (-1x-1x32x-1xf32) <- (-1x-1x-1x32xf32) + transpose_10 = paddle._C_ops.transpose(slice_10, [0, 1, 3, 2]) + del slice_10 + + # pd_op.matmul: (-1x-1x-1x-1xf32) <- (-1x-1x-1x32xf32, -1x-1x32x-1xf32) + matmul_19 = paddle._C_ops.matmul(slice_9, transpose_10, False, False) + del slice_9, transpose_10 + + # pd_op.scale: (-1x-1x-1x-1xf32) <- (-1x-1x-1x-1xf32, 1xf32) + scale_3 = paddle._C_ops.scale(matmul_19, full_0, float("0"), True) + del matmul_19 + + # pd_op.softmax: (-1x-1x-1x-1xf32) <- (-1x-1x-1x-1xf32) + softmax_3 = paddle._C_ops.softmax(scale_3, -1) + del scale_3 + + # pd_op.matmul: (-1x-1x-1x32xf32) <- (-1x-1x-1x-1xf32, -1x-1x-1x32xf32) + matmul_20 = paddle._C_ops.matmul(softmax_3, slice_11, False, False) + del slice_11, softmax_3 + + # pd_op.transpose: (-1x-1x-1x32xf32) <- (-1x-1x-1x32xf32) + transpose_11 = paddle._C_ops.transpose(matmul_20, [0, 2, 1, 3]) + del matmul_20 + + # builtin.combine: ([xi64, xi64, xi64]) <- (xi64, xi64, xi64) + combine_1 = [full_1, full_2, data_1] + del data_1 + + # pd_op.stack: (3xi64) <- ([xi64, xi64, xi64]) + stack_1 = paddle._C_ops.stack(combine_1, 0) + del combine_1 + + # pd_op.reshape: (-1x-1x-1xf32) <- (-1x-1x-1x32xf32, 3xi64) + reshape_7 = paddle._C_ops.reshape(transpose_11, stack_1) + del stack_1, transpose_11 + + # pd_op.matmul: (-1x-1x384xf32) <- (-1x-1x-1xf32, 384x384xf32) + matmul_21 = paddle._C_ops.matmul(reshape_7, parameter_33, False, False) + del parameter_33, reshape_7 + + # pd_op.add: (-1x-1x384xf32) <- (-1x-1x384xf32, 384xf32) + add_19 = paddle._C_ops.add(matmul_21, parameter_32) + del matmul_21, parameter_32 + + # pd_op.add: (-1x-1x384xf32) <- (-1x-1x384xf32, -1x-1x384xf32) + add_20 = paddle._C_ops.add(layer_norm_16, add_19) + del add_19, layer_norm_16 + + # pd_op.layer_norm: (-1x-1x384xf32, -1x-1xf32, -1x-1xf32) <- (-1x-1x384xf32, 384xf32, 384xf32) + layer_norm_19, layer_norm_20, layer_norm_21 = (lambda x, f: f(x))( + paddle._C_ops.layer_norm( + add_20, parameter_31, parameter_30, float("1e-06"), 2 + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None, None), + ) + del add_20, parameter_30, parameter_31 + + # pd_op.matmul: (-1x-1x1536xf32) <- (-1x-1x384xf32, 384x1536xf32) + matmul_22 = paddle._C_ops.matmul(layer_norm_19, parameter_29, False, False) + del parameter_29 + + # pd_op.add: (-1x-1x1536xf32) <- (-1x-1x1536xf32, 1536xf32) + add_21 = paddle._C_ops.add(matmul_22, parameter_28) + del matmul_22, parameter_28 + + # pd_op.gelu: (-1x-1x1536xf32) <- (-1x-1x1536xf32) + gelu_3 = paddle._C_ops.gelu(add_21, False) + del add_21 + + # pd_op.matmul: (-1x-1x384xf32) <- (-1x-1x1536xf32, 1536x384xf32) + matmul_23 = paddle._C_ops.matmul(gelu_3, parameter_27, False, False) + del gelu_3, parameter_27 + + # pd_op.add: (-1x-1x384xf32) <- (-1x-1x384xf32, 384xf32) + add_22 = paddle._C_ops.add(matmul_23, parameter_26) + del matmul_23, parameter_26 + + # pd_op.add: (-1x-1x384xf32) <- (-1x-1x384xf32, -1x-1x384xf32) + add_23 = paddle._C_ops.add(layer_norm_19, add_22) + del add_22, layer_norm_19 + + # pd_op.layer_norm: (-1x-1x384xf32, -1x-1xf32, -1x-1xf32) <- (-1x-1x384xf32, 384xf32, 384xf32) + layer_norm_22, layer_norm_23, layer_norm_24 = (lambda x, f: f(x))( + paddle._C_ops.layer_norm( + add_23, parameter_25, parameter_24, float("1e-06"), 2 + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None, None), + ) + del add_23, parameter_24, parameter_25 + + # pd_op.matmul: (-1x-1x1152xf32) <- (-1x-1x384xf32, 384x1152xf32) + matmul_24 = paddle._C_ops.matmul(layer_norm_22, parameter_23, False, False) + del parameter_23 + + # pd_op.add: (-1x-1x1152xf32) <- (-1x-1x1152xf32, 1152xf32) + add_24 = paddle._C_ops.add(matmul_24, parameter_22) + del matmul_24, parameter_22 + + # builtin.combine: ([xi64, xi64, xi64, xi64, xi64]) <- (xi64, xi64, xi64, xi64, xi64) + combine_2 = [full_1, full_2, full_3, data_2, full_4] + del data_2 + + # pd_op.stack: (5xi64) <- ([xi64, xi64, xi64, xi64, xi64]) + stack_2 = paddle._C_ops.stack(combine_2, 0) + del combine_2 + + # pd_op.reshape: (-1x-1x3x-1x32xf32) <- (-1x-1x1152xf32, 5xi64) + reshape_8 = paddle._C_ops.reshape(add_24, stack_2) + del add_24, stack_2 + + # pd_op.transpose: (3x-1x-1x-1x32xf32) <- (-1x-1x3x-1x32xf32) + transpose_12 = paddle._C_ops.transpose(reshape_8, [2, 0, 3, 1, 4]) + del reshape_8 + + # pd_op.slice: (-1x-1x-1x32xf32) <- (3x-1x-1x-1x32xf32, 1xi64, 1xi64) + slice_12 = paddle._C_ops.slice( + transpose_12, [0], full_int_array_1, full_int_array_2, [1], [0] + ) + + # pd_op.slice: (-1x-1x-1x32xf32) <- (3x-1x-1x-1x32xf32, 1xi64, 1xi64) + slice_13 = paddle._C_ops.slice( + transpose_12, [0], full_int_array_2, full_int_array_3, [1], [0] + ) + + # pd_op.slice: (-1x-1x-1x32xf32) <- (3x-1x-1x-1x32xf32, 1xi64, 1xi64) + slice_14 = paddle._C_ops.slice( + transpose_12, [0], full_int_array_3, full_int_array_4, [1], [0] + ) + del transpose_12 + + # pd_op.transpose: (-1x-1x32x-1xf32) <- (-1x-1x-1x32xf32) + transpose_13 = paddle._C_ops.transpose(slice_13, [0, 1, 3, 2]) + del slice_13 + + # pd_op.matmul: (-1x-1x-1x-1xf32) <- (-1x-1x-1x32xf32, -1x-1x32x-1xf32) + matmul_25 = paddle._C_ops.matmul(slice_12, transpose_13, False, False) + del slice_12, transpose_13 + + # pd_op.scale: (-1x-1x-1x-1xf32) <- (-1x-1x-1x-1xf32, 1xf32) + scale_4 = paddle._C_ops.scale(matmul_25, full_0, float("0"), True) + del matmul_25 + + # pd_op.softmax: (-1x-1x-1x-1xf32) <- (-1x-1x-1x-1xf32) + softmax_4 = paddle._C_ops.softmax(scale_4, -1) + del scale_4 + + # pd_op.matmul: (-1x-1x-1x32xf32) <- (-1x-1x-1x-1xf32, -1x-1x-1x32xf32) + matmul_26 = paddle._C_ops.matmul(softmax_4, slice_14, False, False) + del slice_14, softmax_4 + + # pd_op.transpose: (-1x-1x-1x32xf32) <- (-1x-1x-1x32xf32) + transpose_14 = paddle._C_ops.transpose(matmul_26, [0, 2, 1, 3]) + del matmul_26 + + # builtin.combine: ([xi64, xi64, xi64]) <- (xi64, xi64, xi64) + combine_3 = [full_1, full_2, data_3] + del data_3 + + # pd_op.stack: (3xi64) <- ([xi64, xi64, xi64]) + stack_3 = paddle._C_ops.stack(combine_3, 0) + del combine_3 + + # pd_op.reshape: (-1x-1x-1xf32) <- (-1x-1x-1x32xf32, 3xi64) + reshape_9 = paddle._C_ops.reshape(transpose_14, stack_3) + del stack_3, transpose_14 + + # pd_op.matmul: (-1x-1x384xf32) <- (-1x-1x-1xf32, 384x384xf32) + matmul_27 = paddle._C_ops.matmul(reshape_9, parameter_21, False, False) + del parameter_21, reshape_9 + + # pd_op.add: (-1x-1x384xf32) <- (-1x-1x384xf32, 384xf32) + add_25 = paddle._C_ops.add(matmul_27, parameter_20) + del matmul_27, parameter_20 + + # pd_op.add: (-1x-1x384xf32) <- (-1x-1x384xf32, -1x-1x384xf32) + add_26 = paddle._C_ops.add(layer_norm_22, add_25) + del add_25, layer_norm_22 + + # pd_op.layer_norm: (-1x-1x384xf32, -1x-1xf32, -1x-1xf32) <- (-1x-1x384xf32, 384xf32, 384xf32) + layer_norm_25, layer_norm_26, layer_norm_27 = (lambda x, f: f(x))( + paddle._C_ops.layer_norm( + add_26, parameter_19, parameter_18, float("1e-06"), 2 + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None, None), + ) + del add_26, parameter_18, parameter_19 + + # pd_op.matmul: (-1x-1x1536xf32) <- (-1x-1x384xf32, 384x1536xf32) + matmul_28 = paddle._C_ops.matmul(layer_norm_25, parameter_17, False, False) + del parameter_17 + + # pd_op.add: (-1x-1x1536xf32) <- (-1x-1x1536xf32, 1536xf32) + add_27 = paddle._C_ops.add(matmul_28, parameter_16) + del matmul_28, parameter_16 + + # pd_op.gelu: (-1x-1x1536xf32) <- (-1x-1x1536xf32) + gelu_4 = paddle._C_ops.gelu(add_27, False) + del add_27 + + # pd_op.matmul: (-1x-1x384xf32) <- (-1x-1x1536xf32, 1536x384xf32) + matmul_29 = paddle._C_ops.matmul(gelu_4, parameter_15, False, False) + del gelu_4, parameter_15 + + # pd_op.add: (-1x-1x384xf32) <- (-1x-1x384xf32, 384xf32) + add_28 = paddle._C_ops.add(matmul_29, parameter_14) + del matmul_29, parameter_14 + + # pd_op.add: (-1x-1x384xf32) <- (-1x-1x384xf32, -1x-1x384xf32) + add_29 = paddle._C_ops.add(layer_norm_25, add_28) + del add_28, layer_norm_25 + + # pd_op.layer_norm: (-1x-1x384xf32, -1x-1xf32, -1x-1xf32) <- (-1x-1x384xf32, 384xf32, 384xf32) + layer_norm_28, layer_norm_29, layer_norm_30 = (lambda x, f: f(x))( + paddle._C_ops.layer_norm( + add_29, parameter_13, parameter_12, float("1e-06"), 2 + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None, None), + ) + del add_29, parameter_12, parameter_13 + + # pd_op.matmul: (-1x-1x1152xf32) <- (-1x-1x384xf32, 384x1152xf32) + matmul_30 = paddle._C_ops.matmul(layer_norm_28, parameter_11, False, False) + del parameter_11 + + # pd_op.add: (-1x-1x1152xf32) <- (-1x-1x1152xf32, 1152xf32) + add_30 = paddle._C_ops.add(matmul_30, parameter_10) + del matmul_30, parameter_10 + + # builtin.combine: ([xi64, xi64, xi64, xi64, xi64]) <- (xi64, xi64, xi64, xi64, xi64) + combine_4 = [full_1, full_2, full_3, data_4, full_4] + del data_4, full_3, full_4 + + # pd_op.stack: (5xi64) <- ([xi64, xi64, xi64, xi64, xi64]) + stack_4 = paddle._C_ops.stack(combine_4, 0) + del combine_4 + + # pd_op.reshape: (-1x-1x3x-1x32xf32) <- (-1x-1x1152xf32, 5xi64) + reshape_10 = paddle._C_ops.reshape(add_30, stack_4) + del add_30, stack_4 + + # pd_op.transpose: (3x-1x-1x-1x32xf32) <- (-1x-1x3x-1x32xf32) + transpose_15 = paddle._C_ops.transpose(reshape_10, [2, 0, 3, 1, 4]) + del reshape_10 + + # pd_op.slice: (-1x-1x-1x32xf32) <- (3x-1x-1x-1x32xf32, 1xi64, 1xi64) + slice_15 = paddle._C_ops.slice( + transpose_15, [0], full_int_array_1, full_int_array_2, [1], [0] + ) + del full_int_array_1 + + # pd_op.slice: (-1x-1x-1x32xf32) <- (3x-1x-1x-1x32xf32, 1xi64, 1xi64) + slice_16 = paddle._C_ops.slice( + transpose_15, [0], full_int_array_2, full_int_array_3, [1], [0] + ) + del full_int_array_2 + + # pd_op.slice: (-1x-1x-1x32xf32) <- (3x-1x-1x-1x32xf32, 1xi64, 1xi64) + slice_17 = paddle._C_ops.slice( + transpose_15, [0], full_int_array_3, full_int_array_4, [1], [0] + ) + del full_int_array_3, full_int_array_4, transpose_15 + + # pd_op.transpose: (-1x-1x32x-1xf32) <- (-1x-1x-1x32xf32) + transpose_16 = paddle._C_ops.transpose(slice_16, [0, 1, 3, 2]) + del slice_16 + + # pd_op.matmul: (-1x-1x-1x-1xf32) <- (-1x-1x-1x32xf32, -1x-1x32x-1xf32) + matmul_31 = paddle._C_ops.matmul(slice_15, transpose_16, False, False) + del slice_15, transpose_16 + + # pd_op.scale: (-1x-1x-1x-1xf32) <- (-1x-1x-1x-1xf32, 1xf32) + scale_5 = paddle._C_ops.scale(matmul_31, full_0, float("0"), True) + del full_0, matmul_31 + + # pd_op.softmax: (-1x-1x-1x-1xf32) <- (-1x-1x-1x-1xf32) + softmax_5 = paddle._C_ops.softmax(scale_5, -1) + del scale_5 + + # pd_op.matmul: (-1x-1x-1x32xf32) <- (-1x-1x-1x-1xf32, -1x-1x-1x32xf32) + matmul_32 = paddle._C_ops.matmul(softmax_5, slice_17, False, False) + del slice_17, softmax_5 + + # pd_op.transpose: (-1x-1x-1x32xf32) <- (-1x-1x-1x32xf32) + transpose_17 = paddle._C_ops.transpose(matmul_32, [0, 2, 1, 3]) + del matmul_32 + + # builtin.combine: ([xi64, xi64, xi64]) <- (xi64, xi64, xi64) + combine_5 = [full_1, full_2, data_5] + del data_5, full_1, full_2 + + # pd_op.stack: (3xi64) <- ([xi64, xi64, xi64]) + stack_5 = paddle._C_ops.stack(combine_5, 0) + del combine_5 + + # pd_op.reshape: (-1x-1x-1xf32) <- (-1x-1x-1x32xf32, 3xi64) + reshape_11 = paddle._C_ops.reshape(transpose_17, stack_5) + del stack_5, transpose_17 + + # pd_op.matmul: (-1x-1x384xf32) <- (-1x-1x-1xf32, 384x384xf32) + matmul_33 = paddle._C_ops.matmul(reshape_11, parameter_9, False, False) + del parameter_9, reshape_11 + + # pd_op.add: (-1x-1x384xf32) <- (-1x-1x384xf32, 384xf32) + add_31 = paddle._C_ops.add(matmul_33, parameter_8) + del matmul_33, parameter_8 + + # pd_op.add: (-1x-1x384xf32) <- (-1x-1x384xf32, -1x-1x384xf32) + add_32 = paddle._C_ops.add(layer_norm_28, add_31) + del add_31, layer_norm_28 + + # pd_op.layer_norm: (-1x-1x384xf32, -1x-1xf32, -1x-1xf32) <- (-1x-1x384xf32, 384xf32, 384xf32) + layer_norm_31, layer_norm_32, layer_norm_33 = (lambda x, f: f(x))( + paddle._C_ops.layer_norm( + add_32, parameter_7, parameter_6, float("1e-06"), 2 + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None, None), + ) + del add_32, parameter_6, parameter_7 + + # pd_op.matmul: (-1x-1x1536xf32) <- (-1x-1x384xf32, 384x1536xf32) + matmul_34 = paddle._C_ops.matmul(layer_norm_31, parameter_5, False, False) + del parameter_5 + + # pd_op.add: (-1x-1x1536xf32) <- (-1x-1x1536xf32, 1536xf32) + add_33 = paddle._C_ops.add(matmul_34, parameter_4) + del matmul_34, parameter_4 + + # pd_op.gelu: (-1x-1x1536xf32) <- (-1x-1x1536xf32) + gelu_5 = paddle._C_ops.gelu(add_33, False) + del add_33 + + # pd_op.matmul: (-1x-1x384xf32) <- (-1x-1x1536xf32, 1536x384xf32) + matmul_35 = paddle._C_ops.matmul(gelu_5, parameter_3, False, False) + del gelu_5, parameter_3 + + # pd_op.add: (-1x-1x384xf32) <- (-1x-1x384xf32, 384xf32) + add_34 = paddle._C_ops.add(matmul_35, parameter_2) + del matmul_35, parameter_2 + + # pd_op.add: (-1x-1x384xf32) <- (-1x-1x384xf32, -1x-1x384xf32) + add_35 = paddle._C_ops.add(layer_norm_31, add_34) + del add_34, layer_norm_31 + + # pd_op.layer_norm: (-1x-1x384xf32, -1x-1xf32, -1x-1xf32) <- (-1x-1x384xf32, 384xf32, 384xf32) + layer_norm_0, layer_norm_34, layer_norm_35 = (lambda x, f: f(x))( + paddle._C_ops.layer_norm( + add_35, parameter_1, parameter_0, float("1e-06"), 2 + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None, None), + ) + del add_35, parameter_0, parameter_1 + + return layer_norm_0 diff --git a/paddle_samples/PaddleX/ch_SVTRv2_rec/subgraph_0/weight_meta.py b/paddle_samples/PaddleX/ch_SVTRv2_rec/subgraph_0/weight_meta.py new file mode 100644 index 000000000..4f187ef7d --- /dev/null +++ b/paddle_samples/PaddleX/ch_SVTRv2_rec/subgraph_0/weight_meta.py @@ -0,0 +1,790 @@ +class Program_weight_tensor_parameter_0: + name = "parameter_0" + shape = [384] + dtype = "float32" + min_val = float("-0.391402") + max_val = float("2.3367") + mean = float("-0.00598458") + std = float("0.141673") + data = None + + +class Program_weight_tensor_parameter_1: + name = "parameter_1" + shape = [384] + dtype = "float32" + min_val = float("-0.00565738") + max_val = float("1.27927") + mean = float("0.633123") + std = float("0.503759") + data = None + + +class Program_weight_tensor_parameter_2: + name = "parameter_2" + shape = [384] + dtype = "float32" + min_val = float("-6.21411") + max_val = float("3.16239") + mean = float("0.00834756") + std = float("0.559457") + data = None + + +class Program_weight_tensor_parameter_3: + name = "parameter_3" + shape = [1536, 384] + dtype = "float32" + min_val = float("-1.26191") + max_val = float("0.729362") + mean = float("-0.000228003") + std = float("0.0592983") + data = None + + +class Program_weight_tensor_parameter_4: + name = "parameter_4" + shape = [1536] + dtype = "float32" + min_val = float("-1.33757") + max_val = float("1.11645") + mean = float("-0.692215") + std = float("0.261334") + data = None + + +class Program_weight_tensor_parameter_5: + name = "parameter_5" + shape = [384, 1536] + dtype = "float32" + min_val = float("-0.294777") + max_val = float("0.396355") + mean = float("-0.00138533") + std = float("0.0536937") + data = None + + +class Program_weight_tensor_parameter_6: + name = "parameter_6" + shape = [384] + dtype = "float32" + min_val = float("-5.66578") + max_val = float("0.772169") + mean = float("-0.00872104") + std = float("0.388662") + data = None + + +class Program_weight_tensor_parameter_7: + name = "parameter_7" + shape = [384] + dtype = "float32" + min_val = float("-0.739159") + max_val = float("6.11831") + mean = float("0.678812") + std = float("0.448688") + data = None + + +class Program_weight_tensor_parameter_8: + name = "parameter_8" + shape = [384] + dtype = "float32" + min_val = float("-0.82461") + max_val = float("1.50422") + mean = float("-0.0176146") + std = float("0.162813") + data = None + + +class Program_weight_tensor_parameter_9: + name = "parameter_9" + shape = [384, 384] + dtype = "float32" + min_val = float("-0.359963") + max_val = float("0.452581") + mean = float("0.000360468") + std = float("0.0463619") + data = None + + +class Program_weight_tensor_parameter_10: + name = "parameter_10" + shape = [1152] + dtype = "float32" + min_val = float("-2.96068") + max_val = float("3.21107") + mean = float("0.00583689") + std = float("0.351656") + data = None + + +class Program_weight_tensor_parameter_11: + name = "parameter_11" + shape = [384, 1152] + dtype = "float32" + min_val = float("-0.357374") + max_val = float("0.357679") + mean = float("-9.55063e-05") + std = float("0.0499831") + data = None + + +class Program_weight_tensor_parameter_12: + name = "parameter_12" + shape = [384] + dtype = "float32" + min_val = float("-0.966682") + max_val = float("0.725959") + mean = float("-0.013408") + std = float("0.255614") + data = None + + +class Program_weight_tensor_parameter_13: + name = "parameter_13" + shape = [384] + dtype = "float32" + min_val = float("-0.00823331") + max_val = float("1.93247") + mean = float("0.840199") + std = float("0.330857") + data = None + + +class Program_weight_tensor_parameter_14: + name = "parameter_14" + shape = [384] + dtype = "float32" + min_val = float("-1.16823") + max_val = float("1.12459") + mean = float("-0.0174486") + std = float("0.171722") + data = None + + +class Program_weight_tensor_parameter_15: + name = "parameter_15" + shape = [1536, 384] + dtype = "float32" + min_val = float("-0.645645") + max_val = float("0.574877") + mean = float("7.36735e-05") + std = float("0.0669972") + data = None + + +class Program_weight_tensor_parameter_16: + name = "parameter_16" + shape = [1536] + dtype = "float32" + min_val = float("-1.29848") + max_val = float("0.18738") + mean = float("-0.606057") + std = float("0.151069") + data = None + + +class Program_weight_tensor_parameter_17: + name = "parameter_17" + shape = [384, 1536] + dtype = "float32" + min_val = float("-0.369164") + max_val = float("0.407876") + mean = float("-0.0018972") + std = float("0.0541995") + data = None + + +class Program_weight_tensor_parameter_18: + name = "parameter_18" + shape = [384] + dtype = "float32" + min_val = float("-3.01246") + max_val = float("1.12016") + mean = float("0.0298529") + std = float("0.365599") + data = None + + +class Program_weight_tensor_parameter_19: + name = "parameter_19" + shape = [384] + dtype = "float32" + min_val = float("-0.247378") + max_val = float("2.80921") + mean = float("0.68446") + std = float("0.319864") + data = None + + +class Program_weight_tensor_parameter_20: + name = "parameter_20" + shape = [384] + dtype = "float32" + min_val = float("-1.02185") + max_val = float("1.04649") + mean = float("-0.0148317") + std = float("0.150738") + data = None + + +class Program_weight_tensor_parameter_21: + name = "parameter_21" + shape = [384, 384] + dtype = "float32" + min_val = float("-0.327329") + max_val = float("0.277231") + mean = float("0.000226891") + std = float("0.0473665") + data = None + + +class Program_weight_tensor_parameter_22: + name = "parameter_22" + shape = [1152] + dtype = "float32" + min_val = float("-2.91261") + max_val = float("2.62501") + mean = float("-0.00812967") + std = float("0.320421") + data = None + + +class Program_weight_tensor_parameter_23: + name = "parameter_23" + shape = [384, 1152] + dtype = "float32" + min_val = float("-0.370563") + max_val = float("0.31117") + mean = float("-7.96871e-05") + std = float("0.0498858") + data = None + + +class Program_weight_tensor_parameter_24: + name = "parameter_24" + shape = [384] + dtype = "float32" + min_val = float("-0.956347") + max_val = float("1.27525") + mean = float("-0.0171171") + std = float("0.244056") + data = None + + +class Program_weight_tensor_parameter_25: + name = "parameter_25" + shape = [384] + dtype = "float32" + min_val = float("-0.00384169") + max_val = float("1.45222") + mean = float("0.817353") + std = float("0.306464") + data = None + + +class Program_weight_tensor_parameter_26: + name = "parameter_26" + shape = [384] + dtype = "float32" + min_val = float("-1.07632") + max_val = float("1.26615") + mean = float("0.00127065") + std = float("0.189295") + data = None + + +class Program_weight_tensor_parameter_27: + name = "parameter_27" + shape = [1536, 384] + dtype = "float32" + min_val = float("-0.658729") + max_val = float("0.6201") + mean = float("0.000119644") + std = float("0.0567182") + data = None + + +class Program_weight_tensor_parameter_28: + name = "parameter_28" + shape = [1536] + dtype = "float32" + min_val = float("-1.18985") + max_val = float("-0.11489") + mean = float("-0.612413") + std = float("0.159647") + data = None + + +class Program_weight_tensor_parameter_29: + name = "parameter_29" + shape = [384, 1536] + dtype = "float32" + min_val = float("-0.446305") + max_val = float("0.48456") + mean = float("-0.00116597") + std = float("0.0516982") + data = None + + +class Program_weight_tensor_parameter_30: + name = "parameter_30" + shape = [384] + dtype = "float32" + min_val = float("-2.2494") + max_val = float("1.87015") + mean = float("0.0159406") + std = float("0.363073") + data = None + + +class Program_weight_tensor_parameter_31: + name = "parameter_31" + shape = [384] + dtype = "float32" + min_val = float("-1.69658") + max_val = float("2.23941") + mean = float("0.623688") + std = float("0.349048") + data = None + + +class Program_weight_tensor_parameter_32: + name = "parameter_32" + shape = [384] + dtype = "float32" + min_val = float("-0.902065") + max_val = float("1.05334") + mean = float("-0.00722261") + std = float("0.163613") + data = None + + +class Program_weight_tensor_parameter_33: + name = "parameter_33" + shape = [384, 384] + dtype = "float32" + min_val = float("-0.380073") + max_val = float("0.3403") + mean = float("0.000120366") + std = float("0.045804") + data = None + + +class Program_weight_tensor_parameter_34: + name = "parameter_34" + shape = [1152] + dtype = "float32" + min_val = float("-2.90856") + max_val = float("2.61204") + mean = float("0.00604566") + std = float("0.313915") + data = None + + +class Program_weight_tensor_parameter_35: + name = "parameter_35" + shape = [384, 1152] + dtype = "float32" + min_val = float("-0.427216") + max_val = float("0.444826") + mean = float("-4.41144e-05") + std = float("0.0490547") + data = None + + +class Program_weight_tensor_parameter_36: + name = "parameter_36" + shape = [384] + dtype = "float32" + min_val = float("-0.91578") + max_val = float("1.7443") + mean = float("-0.00817724") + std = float("0.287092") + data = None + + +class Program_weight_tensor_parameter_37: + name = "parameter_37" + shape = [384] + dtype = "float32" + min_val = float("-0.722843") + max_val = float("1.49098") + mean = float("0.707506") + std = float("0.338238") + data = None + + +class Program_weight_tensor_parameter_38: + name = "parameter_38" + shape = [384] + dtype = "float32" + min_val = float("-1.79254") + max_val = float("1.51364") + mean = float("0.0130683") + std = float("0.187277") + data = None + + +class Program_weight_tensor_parameter_39: + name = "parameter_39" + shape = [1536, 384] + dtype = "float32" + min_val = float("-0.560982") + max_val = float("0.46107") + mean = float("0.000220093") + std = float("0.0556088") + data = None + + +class Program_weight_tensor_parameter_40: + name = "parameter_40" + shape = [1536] + dtype = "float32" + min_val = float("-1.17307") + max_val = float("0.110726") + mean = float("-0.589375") + std = float("0.266551") + data = None + + +class Program_weight_tensor_parameter_41: + name = "parameter_41" + shape = [384, 1536] + dtype = "float32" + min_val = float("-0.753973") + max_val = float("0.532313") + mean = float("-0.000429399") + std = float("0.0502703") + data = None + + +class Program_weight_tensor_parameter_42: + name = "parameter_42" + shape = [384] + dtype = "float32" + min_val = float("-4.64963") + max_val = float("1.32753") + mean = float("-0.00448569") + std = float("0.498694") + data = None + + +class Program_weight_tensor_parameter_43: + name = "parameter_43" + shape = [384] + dtype = "float32" + min_val = float("-1.46402") + max_val = float("2.61635") + mean = float("0.625875") + std = float("0.370867") + data = None + + +class Program_weight_tensor_parameter_44: + name = "parameter_44" + shape = [384] + dtype = "float32" + min_val = float("-0.815131") + max_val = float("1.47232") + mean = float("-0.00680412") + std = float("0.175158") + data = None + + +class Program_weight_tensor_parameter_45: + name = "parameter_45" + shape = [384, 384] + dtype = "float32" + min_val = float("-0.453735") + max_val = float("0.453823") + mean = float("-8.6766e-05") + std = float("0.0477994") + data = None + + +class Program_weight_tensor_parameter_46: + name = "parameter_46" + shape = [1152] + dtype = "float32" + min_val = float("-3.21735") + max_val = float("3.23225") + mean = float("0.00889441") + std = float("0.346004") + data = None + + +class Program_weight_tensor_parameter_47: + name = "parameter_47" + shape = [384, 1152] + dtype = "float32" + min_val = float("-0.412519") + max_val = float("0.585026") + mean = float("3.66532e-05") + std = float("0.0490304") + data = None + + +class Program_weight_tensor_parameter_48: + name = "parameter_48" + shape = [384] + dtype = "float32" + min_val = float("-0.990367") + max_val = float("1.80312") + mean = float("-0.0109785") + std = float("0.316399") + data = None + + +class Program_weight_tensor_parameter_49: + name = "parameter_49" + shape = [384] + dtype = "float32" + min_val = float("-0.0502721") + max_val = float("2.78606") + mean = float("0.796703") + std = float("0.429828") + data = None + + +class Program_weight_tensor_parameter_50: + name = "parameter_50" + shape = [384] + dtype = "float32" + min_val = float("-1.50918") + max_val = float("1.41042") + mean = float("0.022935") + std = float("0.35455") + data = None + + +class Program_weight_tensor_parameter_51: + name = "parameter_51" + shape = [1536, 384] + dtype = "float32" + min_val = float("-0.673039") + max_val = float("0.302366") + mean = float("0.00020859") + std = float("0.055987") + data = None + + +class Program_weight_tensor_parameter_52: + name = "parameter_52" + shape = [1536] + dtype = "float32" + min_val = float("-0.742302") + max_val = float("0.0126398") + mean = float("-0.503555") + std = float("0.118778") + data = None + + +class Program_weight_tensor_parameter_53: + name = "parameter_53" + shape = [384, 1536] + dtype = "float32" + min_val = float("-0.296392") + max_val = float("0.458163") + mean = float("-0.00100343") + std = float("0.0430499") + data = None + + +class Program_weight_tensor_parameter_54: + name = "parameter_54" + shape = [384] + dtype = "float32" + min_val = float("-5.35713") + max_val = float("1.78377") + mean = float("0.00429743") + std = float("0.727441") + data = None + + +class Program_weight_tensor_parameter_55: + name = "parameter_55" + shape = [384] + dtype = "float32" + min_val = float("-0.0886793") + max_val = float("3.47094") + mean = float("0.797286") + std = float("0.403355") + data = None + + +class Program_weight_tensor_parameter_56: + name = "parameter_56" + shape = [384] + dtype = "float32" + min_val = float("-1.34538") + max_val = float("1.73164") + mean = float("-0.00670597") + std = float("0.242496") + data = None + + +class Program_weight_tensor_parameter_57: + name = "parameter_57" + shape = [384, 384] + dtype = "float32" + min_val = float("-0.301494") + max_val = float("0.319887") + mean = float("-0.000225226") + std = float("0.0503969") + data = None + + +class Program_weight_tensor_parameter_58: + name = "parameter_58" + shape = [1152] + dtype = "float32" + min_val = float("-2.08303") + max_val = float("3.47333") + mean = float("0.00838089") + std = float("0.344024") + data = None + + +class Program_weight_tensor_parameter_59: + name = "parameter_59" + shape = [384, 1152] + dtype = "float32" + min_val = float("-0.40595") + max_val = float("0.385531") + mean = float("-5.09912e-05") + std = float("0.049489") + data = None + + +class Program_weight_tensor_parameter_60: + name = "parameter_60" + shape = [384] + dtype = "float32" + min_val = float("-1.11195") + max_val = float("1.82283") + mean = float("-0.0105979") + std = float("0.347943") + data = None + + +class Program_weight_tensor_parameter_61: + name = "parameter_61" + shape = [384] + dtype = "float32" + min_val = float("-0.058447") + max_val = float("2.66544") + mean = float("0.818105") + std = float("0.408355") + data = None + + +class Program_weight_tensor_parameter_62: + name = "parameter_62" + shape = [384] + dtype = "float32" + min_val = float("-1.43975") + max_val = float("1.8438") + mean = float("0.0177002") + std = float("0.410767") + data = None + + +class Program_weight_tensor_parameter_63: + name = "parameter_63" + shape = [1536, 384] + dtype = "float32" + min_val = float("-0.923805") + max_val = float("0.378559") + mean = float("0.000254345") + std = float("0.0562814") + data = None + + +class Program_weight_tensor_parameter_64: + name = "parameter_64" + shape = [1536] + dtype = "float32" + min_val = float("-0.689164") + max_val = float("-0.00582935") + mean = float("-0.500339") + std = float("0.12361") + data = None + + +class Program_weight_tensor_parameter_65: + name = "parameter_65" + shape = [384, 1536] + dtype = "float32" + min_val = float("-0.500607") + max_val = float("0.310346") + mean = float("9.87344e-05") + std = float("0.0412131") + data = None + + +class Program_weight_tensor_parameter_66: + name = "parameter_66" + shape = [384] + dtype = "float32" + min_val = float("-3.63276") + max_val = float("1.655") + mean = float("-0.00950253") + std = float("0.76394") + data = None + + +class Program_weight_tensor_parameter_67: + name = "parameter_67" + shape = [384] + dtype = "float32" + min_val = float("-0.0849895") + max_val = float("3.507") + mean = float("0.779191") + std = float("0.369713") + data = None + + +class Program_weight_tensor_parameter_68: + name = "parameter_68" + shape = [384] + dtype = "float32" + min_val = float("-0.672442") + max_val = float("0.951067") + mean = float("0.00423046") + std = float("0.17594") + data = None + + +class Program_weight_tensor_parameter_69: + name = "parameter_69" + shape = [384, 384] + dtype = "float32" + min_val = float("-0.361009") + max_val = float("0.395252") + mean = float("0.000257147") + std = float("0.0462705") + data = None + + +class Program_weight_tensor_parameter_70: + name = "parameter_70" + shape = [1152] + dtype = "float32" + min_val = float("-2.57961") + max_val = float("2.52776") + mean = float("-0.00793312") + std = float("0.316728") + data = None + + +class Program_weight_tensor_parameter_71: + name = "parameter_71" + shape = [384, 1152] + dtype = "float32" + min_val = float("-0.306372") + max_val = float("0.394796") + mean = float("4.17725e-05") + std = float("0.0469842") + data = None diff --git a/paddle_samples/PaddleX/ch_SVTRv2_rec/subgraph_1/graph_hash.txt b/paddle_samples/PaddleX/ch_SVTRv2_rec/subgraph_1/graph_hash.txt new file mode 100644 index 000000000..58336ba55 --- /dev/null +++ b/paddle_samples/PaddleX/ch_SVTRv2_rec/subgraph_1/graph_hash.txt @@ -0,0 +1 @@ +a15eb770dc17564594d30f8f56aac3cf032511a8e6c9feb11c06ac1ded7f7636 \ No newline at end of file diff --git a/paddle_samples/PaddleX/ch_SVTRv2_rec/subgraph_1/graph_net.json b/paddle_samples/PaddleX/ch_SVTRv2_rec/subgraph_1/graph_net.json new file mode 100644 index 000000000..4e1cc3c0d --- /dev/null +++ b/paddle_samples/PaddleX/ch_SVTRv2_rec/subgraph_1/graph_net.json @@ -0,0 +1,6 @@ +{ + "framework": "paddle", + "model_name": "ch_SVTRv2_rec", + "num_devices_required": 1, + "num_nodes_required": 1 +} \ No newline at end of file diff --git a/paddle_samples/PaddleX/ch_SVTRv2_rec/subgraph_1/input_meta.py b/paddle_samples/PaddleX/ch_SVTRv2_rec/subgraph_1/input_meta.py new file mode 100644 index 000000000..78a13a396 --- /dev/null +++ b/paddle_samples/PaddleX/ch_SVTRv2_rec/subgraph_1/input_meta.py @@ -0,0 +1,65 @@ +class Program_weight_tensor_data_0: + name = "data_0" + shape = [] + dtype = "int64" + data = [256] + + +class Program_weight_tensor_data_1: + name = "data_1" + shape = [] + dtype = "int64" + data = [6] + + +class Program_weight_tensor_data_2: + name = "data_2" + shape = [] + dtype = "int64" + data = [8] + + +class Program_weight_tensor_data_3: + name = "data_3" + shape = [] + dtype = "int64" + data = [256] + + +class Program_weight_tensor_data_4: + name = "data_4" + shape = [] + dtype = "int64" + data = [8] + + +class Program_weight_tensor_data_5: + name = "data_5" + shape = [] + dtype = "int64" + data = [256] + + +class Program_weight_tensor_data_6: + name = "data_6" + shape = [] + dtype = "int64" + data = [8] + + +class Program_weight_tensor_data_7: + name = "data_7" + shape = [] + dtype = "int64" + data = [256] + + +class Program_weight_tensor_data_8: + name = "data_8" + shape = [8, 256, 6, 80] + dtype = "float32" + min_val = float("-6.03514") + max_val = float("5.63268") + mean = float("0.00374437") + std = float("0.490587") + data = None diff --git a/paddle_samples/PaddleX/ch_SVTRv2_rec/subgraph_1/model.py b/paddle_samples/PaddleX/ch_SVTRv2_rec/subgraph_1/model.py new file mode 100644 index 000000000..0be0567b0 --- /dev/null +++ b/paddle_samples/PaddleX/ch_SVTRv2_rec/subgraph_1/model.py @@ -0,0 +1,889 @@ +import paddle + + +class GraphModule(paddle.nn.Layer): + def __init__(self): + super().__init__() + + def forward( + self, + parameter_0, + parameter_1, + parameter_2, + parameter_3, + parameter_4, + parameter_5, + parameter_6, + parameter_7, + parameter_8, + parameter_9, + parameter_10, + parameter_11, + parameter_12, + parameter_13, + parameter_14, + parameter_15, + parameter_16, + parameter_17, + parameter_18, + parameter_19, + parameter_20, + parameter_21, + parameter_22, + parameter_23, + parameter_24, + parameter_25, + parameter_26, + parameter_27, + parameter_28, + parameter_29, + parameter_30, + parameter_31, + parameter_32, + parameter_33, + parameter_34, + parameter_35, + parameter_36, + parameter_37, + parameter_38, + parameter_39, + parameter_40, + parameter_41, + parameter_42, + parameter_43, + parameter_44, + parameter_45, + parameter_46, + parameter_47, + parameter_48, + parameter_49, + parameter_50, + parameter_51, + parameter_52, + parameter_53, + parameter_54, + parameter_55, + parameter_56, + parameter_57, + parameter_58, + parameter_59, + parameter_60, + parameter_61, + parameter_62, + parameter_63, + parameter_64, + parameter_65, + parameter_66, + parameter_67, + parameter_68, + parameter_69, + parameter_70, + parameter_71, + data_0, + data_1, + data_2, + data_3, + data_4, + data_5, + data_6, + data_7, + data_8, + ): + # pd_op.conv2d: (8x256x-1x80xf32) <- (8x-1x-1x80xf32, 256x32x5x5xf32) + conv2d_0 = paddle._C_ops.conv2d( + data_8, parameter_71, [1, 1], [2, 2], "EXPLICIT", [1, 1], 8, "NCHW" + ) + del parameter_71 + + # pd_op.full_int_array: (4xi64) <- () + full_int_array_0 = [1, -1, 1, 1] + + # pd_op.reshape: (1x256x1x1xf32) <- (256xf32, 4xi64) + reshape_0 = paddle._C_ops.reshape(parameter_70, full_int_array_0) + del parameter_70 + + # pd_op.add: (8x256x-1x80xf32) <- (8x256x-1x80xf32, 1x256x1x1xf32) + add_0 = paddle._C_ops.add(conv2d_0, reshape_0) + del conv2d_0, reshape_0 + + # pd_op.add: (8x256x-1x80xf32) <- (8x-1x-1x80xf32, 8x256x-1x80xf32) + add_1 = paddle._C_ops.add(data_8, add_0) + del add_0, data_8 + + # pd_op.flatten: (8x256x-1xf32) <- (8x256x-1x80xf32) + flatten_0 = paddle._C_ops.flatten(add_1, 2, 3) + del add_1 + + # pd_op.transpose: (8x-1x256xf32) <- (8x256x-1xf32) + transpose_0 = paddle._C_ops.transpose(flatten_0, [0, 2, 1]) + del flatten_0 + + # pd_op.layer_norm: (8x-1x256xf32, 8x-1xf32, 8x-1xf32) <- (8x-1x256xf32, 256xf32, 256xf32) + layer_norm_1, layer_norm_2, layer_norm_3 = (lambda x, f: f(x))( + paddle._C_ops.layer_norm( + transpose_0, parameter_69, parameter_68, float("1e-06"), 2 + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None, None), + ) + del parameter_68, parameter_69, transpose_0 + + # pd_op.matmul: (8x-1x1024xf32) <- (8x-1x256xf32, 256x1024xf32) + matmul_0 = paddle._C_ops.matmul(layer_norm_1, parameter_67, False, False) + del parameter_67 + + # pd_op.add: (8x-1x1024xf32) <- (8x-1x1024xf32, 1024xf32) + add_2 = paddle._C_ops.add(matmul_0, parameter_66) + del matmul_0, parameter_66 + + # pd_op.gelu: (8x-1x1024xf32) <- (8x-1x1024xf32) + gelu_0 = paddle._C_ops.gelu(add_2, False) + del add_2 + + # pd_op.matmul: (8x-1x256xf32) <- (8x-1x1024xf32, 1024x256xf32) + matmul_1 = paddle._C_ops.matmul(gelu_0, parameter_65, False, False) + del gelu_0, parameter_65 + + # pd_op.add: (8x-1x256xf32) <- (8x-1x256xf32, 256xf32) + add_3 = paddle._C_ops.add(matmul_1, parameter_64) + del matmul_1, parameter_64 + + # pd_op.add: (8x-1x256xf32) <- (8x-1x256xf32, 8x-1x256xf32) + add_4 = paddle._C_ops.add(layer_norm_1, add_3) + del add_3, layer_norm_1 + + # pd_op.layer_norm: (8x-1x256xf32, 8x-1xf32, 8x-1xf32) <- (8x-1x256xf32, 256xf32, 256xf32) + layer_norm_4, layer_norm_5, layer_norm_6 = (lambda x, f: f(x))( + paddle._C_ops.layer_norm( + add_4, parameter_63, parameter_62, float("1e-06"), 2 + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None, None), + ) + del add_4, parameter_62, parameter_63 + + # pd_op.transpose: (8x256x-1xf32) <- (8x-1x256xf32) + transpose_1 = paddle._C_ops.transpose(layer_norm_4, [0, 2, 1]) + del layer_norm_4 + + # pd_op.full: (xi64) <- () + full_0 = paddle._C_ops.full( + [], float("0"), paddle.int64, paddle.core.CPUPlace() + ) + + # pd_op.full: (xi64) <- () + full_1 = paddle._C_ops.full( + [], float("80"), paddle.int64, paddle.core.CPUPlace() + ) + + # builtin.combine: ([xi64, xi64, xi64, xi64]) <- (xi64, xi64, xi64, xi64) + combine_0 = [full_0, data_0, data_1, full_1] + del data_0, data_1 + + # pd_op.stack: (4xi64) <- ([xi64, xi64, xi64, xi64]) + stack_0 = paddle._C_ops.stack(combine_0, 0) + del combine_0 + + # pd_op.reshape: (8x-1x-1x80xf32) <- (8x256x-1xf32, 4xi64) + reshape_1 = paddle._C_ops.reshape(transpose_1, stack_0) + del stack_0, transpose_1 + + # pd_op.shape64: (4xi64) <- (8x-1x-1x80xf32) + shape64_0 = paddle._C_ops.shape64(reshape_1) + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_1 = [1] + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_2 = [2] + + # pd_op.slice: (xi64) <- (4xi64, 1xi64, 1xi64) + slice_0 = paddle._C_ops.slice( + shape64_0, [0], full_int_array_1, full_int_array_2, [1], [0] + ) + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_3 = [3] + + # pd_op.slice: (xi64) <- (4xi64, 1xi64, 1xi64) + slice_1 = paddle._C_ops.slice( + shape64_0, [0], full_int_array_2, full_int_array_3, [1], [0] + ) + del shape64_0 + + # pd_op.conv2d: (8x256x-1x80xf32) <- (8x-1x-1x80xf32, 256x32x5x5xf32) + conv2d_1 = paddle._C_ops.conv2d( + reshape_1, parameter_61, [1, 1], [2, 2], "EXPLICIT", [1, 1], 8, "NCHW" + ) + del parameter_61 + + # pd_op.reshape: (1x256x1x1xf32) <- (256xf32, 4xi64) + reshape_2 = paddle._C_ops.reshape(parameter_60, full_int_array_0) + del parameter_60 + + # pd_op.add: (8x256x-1x80xf32) <- (8x256x-1x80xf32, 1x256x1x1xf32) + add_5 = paddle._C_ops.add(conv2d_1, reshape_2) + del conv2d_1, reshape_2 + + # pd_op.add: (8x256x-1x80xf32) <- (8x-1x-1x80xf32, 8x256x-1x80xf32) + add_6 = paddle._C_ops.add(reshape_1, add_5) + del add_5, reshape_1 + + # pd_op.flatten: (8x256x-1xf32) <- (8x256x-1x80xf32) + flatten_1 = paddle._C_ops.flatten(add_6, 2, 3) + del add_6 + + # pd_op.transpose: (8x-1x256xf32) <- (8x256x-1xf32) + transpose_2 = paddle._C_ops.transpose(flatten_1, [0, 2, 1]) + del flatten_1 + + # pd_op.layer_norm: (8x-1x256xf32, 8x-1xf32, 8x-1xf32) <- (8x-1x256xf32, 256xf32, 256xf32) + layer_norm_7, layer_norm_8, layer_norm_9 = (lambda x, f: f(x))( + paddle._C_ops.layer_norm( + transpose_2, parameter_59, parameter_58, float("1e-06"), 2 + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None, None), + ) + del parameter_58, parameter_59, transpose_2 + + # pd_op.matmul: (8x-1x1024xf32) <- (8x-1x256xf32, 256x1024xf32) + matmul_2 = paddle._C_ops.matmul(layer_norm_7, parameter_57, False, False) + del parameter_57 + + # pd_op.add: (8x-1x1024xf32) <- (8x-1x1024xf32, 1024xf32) + add_7 = paddle._C_ops.add(matmul_2, parameter_56) + del matmul_2, parameter_56 + + # pd_op.gelu: (8x-1x1024xf32) <- (8x-1x1024xf32) + gelu_1 = paddle._C_ops.gelu(add_7, False) + del add_7 + + # pd_op.matmul: (8x-1x256xf32) <- (8x-1x1024xf32, 1024x256xf32) + matmul_3 = paddle._C_ops.matmul(gelu_1, parameter_55, False, False) + del gelu_1, parameter_55 + + # pd_op.add: (8x-1x256xf32) <- (8x-1x256xf32, 256xf32) + add_8 = paddle._C_ops.add(matmul_3, parameter_54) + del matmul_3, parameter_54 + + # pd_op.add: (8x-1x256xf32) <- (8x-1x256xf32, 8x-1x256xf32) + add_9 = paddle._C_ops.add(layer_norm_7, add_8) + del add_8, layer_norm_7 + + # pd_op.layer_norm: (8x-1x256xf32, 8x-1xf32, 8x-1xf32) <- (8x-1x256xf32, 256xf32, 256xf32) + layer_norm_10, layer_norm_11, layer_norm_12 = (lambda x, f: f(x))( + paddle._C_ops.layer_norm( + add_9, parameter_53, parameter_52, float("1e-06"), 2 + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None, None), + ) + del add_9, parameter_52, parameter_53 + + # pd_op.transpose: (8x256x-1xf32) <- (8x-1x256xf32) + transpose_3 = paddle._C_ops.transpose(layer_norm_10, [0, 2, 1]) + del layer_norm_10 + + # builtin.combine: ([xi64, xi64, xi64, xi64]) <- (xi64, xi64, xi64, xi64) + combine_1 = [full_0, slice_0, slice_1, full_1] + del full_1, slice_0, slice_1 + + # pd_op.stack: (4xi64) <- ([xi64, xi64, xi64, xi64]) + stack_1 = paddle._C_ops.stack(combine_1, 0) + del combine_1 + + # pd_op.reshape: (8x-1x-1x80xf32) <- (8x256x-1xf32, 4xi64) + reshape_3 = paddle._C_ops.reshape(transpose_3, stack_1) + del stack_1, transpose_3 + + # pd_op.flatten: (8x-1x-1xf32) <- (8x-1x-1x80xf32) + flatten_2 = paddle._C_ops.flatten(reshape_3, 2, 3) + del reshape_3 + + # pd_op.transpose: (8x-1x-1xf32) <- (8x-1x-1xf32) + transpose_4 = paddle._C_ops.transpose(flatten_2, [0, 2, 1]) + del flatten_2 + + # pd_op.matmul: (8x-1x768xf32) <- (8x-1x-1xf32, 256x768xf32) + matmul_4 = paddle._C_ops.matmul(transpose_4, parameter_51, False, False) + del parameter_51 + + # pd_op.add: (8x-1x768xf32) <- (8x-1x768xf32, 768xf32) + add_10 = paddle._C_ops.add(matmul_4, parameter_50) + del matmul_4, parameter_50 + + # pd_op.full: (xi64) <- () + full_2 = paddle._C_ops.full( + [], float("-1"), paddle.int64, paddle.core.CPUPlace() + ) + + # pd_op.full: (xi64) <- () + full_3 = paddle._C_ops.full( + [], float("3"), paddle.int64, paddle.core.CPUPlace() + ) + + # pd_op.full: (xi64) <- () + full_4 = paddle._C_ops.full( + [], float("32"), paddle.int64, paddle.core.CPUPlace() + ) + + # builtin.combine: ([xi64, xi64, xi64, xi64, xi64]) <- (xi64, xi64, xi64, xi64, xi64) + combine_2 = [full_0, full_2, full_3, data_2, full_4] + del data_2 + + # pd_op.stack: (5xi64) <- ([xi64, xi64, xi64, xi64, xi64]) + stack_2 = paddle._C_ops.stack(combine_2, 0) + del combine_2 + + # pd_op.reshape: (8x-1x3x-1x32xf32) <- (8x-1x768xf32, 5xi64) + reshape_4 = paddle._C_ops.reshape(add_10, stack_2) + del add_10, stack_2 + + # pd_op.transpose: (3x8x-1x-1x32xf32) <- (8x-1x3x-1x32xf32) + transpose_5 = paddle._C_ops.transpose(reshape_4, [2, 0, 3, 1, 4]) + del reshape_4 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_4 = [0] + + # pd_op.slice: (8x-1x-1x32xf32) <- (3x8x-1x-1x32xf32, 1xi64, 1xi64) + slice_2 = paddle._C_ops.slice( + transpose_5, [0], full_int_array_4, full_int_array_1, [1], [0] + ) + + # pd_op.slice: (8x-1x-1x32xf32) <- (3x8x-1x-1x32xf32, 1xi64, 1xi64) + slice_3 = paddle._C_ops.slice( + transpose_5, [0], full_int_array_1, full_int_array_2, [1], [0] + ) + + # pd_op.slice: (8x-1x-1x32xf32) <- (3x8x-1x-1x32xf32, 1xi64, 1xi64) + slice_4 = paddle._C_ops.slice( + transpose_5, [0], full_int_array_2, full_int_array_3, [1], [0] + ) + del transpose_5 + + # pd_op.transpose: (8x-1x32x-1xf32) <- (8x-1x-1x32xf32) + transpose_6 = paddle._C_ops.transpose(slice_3, [0, 1, 3, 2]) + del slice_3 + + # pd_op.matmul: (8x-1x-1x-1xf32) <- (8x-1x-1x32xf32, 8x-1x32x-1xf32) + matmul_5 = paddle._C_ops.matmul(slice_2, transpose_6, False, False) + del slice_2, transpose_6 + + # pd_op.full: (1xf32) <- () + full_5 = paddle._C_ops.full( + [1], float("0.176777"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.scale: (8x-1x-1x-1xf32) <- (8x-1x-1x-1xf32, 1xf32) + scale_0 = paddle._C_ops.scale(matmul_5, full_5, float("0"), True) + del matmul_5 + + # pd_op.softmax: (8x-1x-1x-1xf32) <- (8x-1x-1x-1xf32) + softmax_0 = paddle._C_ops.softmax(scale_0, -1) + del scale_0 + + # pd_op.matmul: (8x-1x-1x32xf32) <- (8x-1x-1x-1xf32, 8x-1x-1x32xf32) + matmul_6 = paddle._C_ops.matmul(softmax_0, slice_4, False, False) + del slice_4, softmax_0 + + # pd_op.transpose: (8x-1x-1x32xf32) <- (8x-1x-1x32xf32) + transpose_7 = paddle._C_ops.transpose(matmul_6, [0, 2, 1, 3]) + del matmul_6 + + # builtin.combine: ([xi64, xi64, xi64]) <- (xi64, xi64, xi64) + combine_3 = [full_0, full_2, data_3] + del data_3 + + # pd_op.stack: (3xi64) <- ([xi64, xi64, xi64]) + stack_3 = paddle._C_ops.stack(combine_3, 0) + del combine_3 + + # pd_op.reshape: (8x-1x-1xf32) <- (8x-1x-1x32xf32, 3xi64) + reshape_5 = paddle._C_ops.reshape(transpose_7, stack_3) + del stack_3, transpose_7 + + # pd_op.matmul: (8x-1x256xf32) <- (8x-1x-1xf32, 256x256xf32) + matmul_7 = paddle._C_ops.matmul(reshape_5, parameter_49, False, False) + del parameter_49, reshape_5 + + # pd_op.add: (8x-1x256xf32) <- (8x-1x256xf32, 256xf32) + add_11 = paddle._C_ops.add(matmul_7, parameter_48) + del matmul_7, parameter_48 + + # pd_op.add: (8x-1x256xf32) <- (8x-1x-1xf32, 8x-1x256xf32) + add_12 = paddle._C_ops.add(transpose_4, add_11) + del add_11, transpose_4 + + # pd_op.layer_norm: (8x-1x256xf32, 8x-1xf32, 8x-1xf32) <- (8x-1x256xf32, 256xf32, 256xf32) + layer_norm_13, layer_norm_14, layer_norm_15 = (lambda x, f: f(x))( + paddle._C_ops.layer_norm( + add_12, parameter_47, parameter_46, float("1e-06"), 2 + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None, None), + ) + del add_12, parameter_46, parameter_47 + + # pd_op.matmul: (8x-1x1024xf32) <- (8x-1x256xf32, 256x1024xf32) + matmul_8 = paddle._C_ops.matmul(layer_norm_13, parameter_45, False, False) + del parameter_45 + + # pd_op.add: (8x-1x1024xf32) <- (8x-1x1024xf32, 1024xf32) + add_13 = paddle._C_ops.add(matmul_8, parameter_44) + del matmul_8, parameter_44 + + # pd_op.gelu: (8x-1x1024xf32) <- (8x-1x1024xf32) + gelu_2 = paddle._C_ops.gelu(add_13, False) + del add_13 + + # pd_op.matmul: (8x-1x256xf32) <- (8x-1x1024xf32, 1024x256xf32) + matmul_9 = paddle._C_ops.matmul(gelu_2, parameter_43, False, False) + del gelu_2, parameter_43 + + # pd_op.add: (8x-1x256xf32) <- (8x-1x256xf32, 256xf32) + add_14 = paddle._C_ops.add(matmul_9, parameter_42) + del matmul_9, parameter_42 + + # pd_op.add: (8x-1x256xf32) <- (8x-1x256xf32, 8x-1x256xf32) + add_15 = paddle._C_ops.add(layer_norm_13, add_14) + del add_14, layer_norm_13 + + # pd_op.layer_norm: (8x-1x256xf32, 8x-1xf32, 8x-1xf32) <- (8x-1x256xf32, 256xf32, 256xf32) + layer_norm_16, layer_norm_17, layer_norm_18 = (lambda x, f: f(x))( + paddle._C_ops.layer_norm( + add_15, parameter_41, parameter_40, float("1e-06"), 2 + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None, None), + ) + del add_15, parameter_40, parameter_41 + + # pd_op.matmul: (8x-1x768xf32) <- (8x-1x256xf32, 256x768xf32) + matmul_10 = paddle._C_ops.matmul(layer_norm_16, parameter_39, False, False) + del parameter_39 + + # pd_op.add: (8x-1x768xf32) <- (8x-1x768xf32, 768xf32) + add_16 = paddle._C_ops.add(matmul_10, parameter_38) + del matmul_10, parameter_38 + + # builtin.combine: ([xi64, xi64, xi64, xi64, xi64]) <- (xi64, xi64, xi64, xi64, xi64) + combine_4 = [full_0, full_2, full_3, data_4, full_4] + del data_4 + + # pd_op.stack: (5xi64) <- ([xi64, xi64, xi64, xi64, xi64]) + stack_4 = paddle._C_ops.stack(combine_4, 0) + del combine_4 + + # pd_op.reshape: (8x-1x3x-1x32xf32) <- (8x-1x768xf32, 5xi64) + reshape_6 = paddle._C_ops.reshape(add_16, stack_4) + del add_16, stack_4 + + # pd_op.transpose: (3x8x-1x-1x32xf32) <- (8x-1x3x-1x32xf32) + transpose_8 = paddle._C_ops.transpose(reshape_6, [2, 0, 3, 1, 4]) + del reshape_6 + + # pd_op.slice: (8x-1x-1x32xf32) <- (3x8x-1x-1x32xf32, 1xi64, 1xi64) + slice_5 = paddle._C_ops.slice( + transpose_8, [0], full_int_array_4, full_int_array_1, [1], [0] + ) + + # pd_op.slice: (8x-1x-1x32xf32) <- (3x8x-1x-1x32xf32, 1xi64, 1xi64) + slice_6 = paddle._C_ops.slice( + transpose_8, [0], full_int_array_1, full_int_array_2, [1], [0] + ) + + # pd_op.slice: (8x-1x-1x32xf32) <- (3x8x-1x-1x32xf32, 1xi64, 1xi64) + slice_7 = paddle._C_ops.slice( + transpose_8, [0], full_int_array_2, full_int_array_3, [1], [0] + ) + del transpose_8 + + # pd_op.transpose: (8x-1x32x-1xf32) <- (8x-1x-1x32xf32) + transpose_9 = paddle._C_ops.transpose(slice_6, [0, 1, 3, 2]) + del slice_6 + + # pd_op.matmul: (8x-1x-1x-1xf32) <- (8x-1x-1x32xf32, 8x-1x32x-1xf32) + matmul_11 = paddle._C_ops.matmul(slice_5, transpose_9, False, False) + del slice_5, transpose_9 + + # pd_op.scale: (8x-1x-1x-1xf32) <- (8x-1x-1x-1xf32, 1xf32) + scale_1 = paddle._C_ops.scale(matmul_11, full_5, float("0"), True) + del matmul_11 + + # pd_op.softmax: (8x-1x-1x-1xf32) <- (8x-1x-1x-1xf32) + softmax_1 = paddle._C_ops.softmax(scale_1, -1) + del scale_1 + + # pd_op.matmul: (8x-1x-1x32xf32) <- (8x-1x-1x-1xf32, 8x-1x-1x32xf32) + matmul_12 = paddle._C_ops.matmul(softmax_1, slice_7, False, False) + del slice_7, softmax_1 + + # pd_op.transpose: (8x-1x-1x32xf32) <- (8x-1x-1x32xf32) + transpose_10 = paddle._C_ops.transpose(matmul_12, [0, 2, 1, 3]) + del matmul_12 + + # builtin.combine: ([xi64, xi64, xi64]) <- (xi64, xi64, xi64) + combine_5 = [full_0, full_2, data_5] + del data_5 + + # pd_op.stack: (3xi64) <- ([xi64, xi64, xi64]) + stack_5 = paddle._C_ops.stack(combine_5, 0) + del combine_5 + + # pd_op.reshape: (8x-1x-1xf32) <- (8x-1x-1x32xf32, 3xi64) + reshape_7 = paddle._C_ops.reshape(transpose_10, stack_5) + del stack_5, transpose_10 + + # pd_op.matmul: (8x-1x256xf32) <- (8x-1x-1xf32, 256x256xf32) + matmul_13 = paddle._C_ops.matmul(reshape_7, parameter_37, False, False) + del parameter_37, reshape_7 + + # pd_op.add: (8x-1x256xf32) <- (8x-1x256xf32, 256xf32) + add_17 = paddle._C_ops.add(matmul_13, parameter_36) + del matmul_13, parameter_36 + + # pd_op.add: (8x-1x256xf32) <- (8x-1x256xf32, 8x-1x256xf32) + add_18 = paddle._C_ops.add(layer_norm_16, add_17) + del add_17, layer_norm_16 + + # pd_op.layer_norm: (8x-1x256xf32, 8x-1xf32, 8x-1xf32) <- (8x-1x256xf32, 256xf32, 256xf32) + layer_norm_19, layer_norm_20, layer_norm_21 = (lambda x, f: f(x))( + paddle._C_ops.layer_norm( + add_18, parameter_35, parameter_34, float("1e-06"), 2 + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None, None), + ) + del add_18, parameter_34, parameter_35 + + # pd_op.matmul: (8x-1x1024xf32) <- (8x-1x256xf32, 256x1024xf32) + matmul_14 = paddle._C_ops.matmul(layer_norm_19, parameter_33, False, False) + del parameter_33 + + # pd_op.add: (8x-1x1024xf32) <- (8x-1x1024xf32, 1024xf32) + add_19 = paddle._C_ops.add(matmul_14, parameter_32) + del matmul_14, parameter_32 + + # pd_op.gelu: (8x-1x1024xf32) <- (8x-1x1024xf32) + gelu_3 = paddle._C_ops.gelu(add_19, False) + del add_19 + + # pd_op.matmul: (8x-1x256xf32) <- (8x-1x1024xf32, 1024x256xf32) + matmul_15 = paddle._C_ops.matmul(gelu_3, parameter_31, False, False) + del gelu_3, parameter_31 + + # pd_op.add: (8x-1x256xf32) <- (8x-1x256xf32, 256xf32) + add_20 = paddle._C_ops.add(matmul_15, parameter_30) + del matmul_15, parameter_30 + + # pd_op.add: (8x-1x256xf32) <- (8x-1x256xf32, 8x-1x256xf32) + add_21 = paddle._C_ops.add(layer_norm_19, add_20) + del add_20, layer_norm_19 + + # pd_op.layer_norm: (8x-1x256xf32, 8x-1xf32, 8x-1xf32) <- (8x-1x256xf32, 256xf32, 256xf32) + layer_norm_22, layer_norm_23, layer_norm_24 = (lambda x, f: f(x))( + paddle._C_ops.layer_norm( + add_21, parameter_29, parameter_28, float("1e-06"), 2 + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None, None), + ) + del add_21, parameter_28, parameter_29 + + # pd_op.matmul: (8x-1x768xf32) <- (8x-1x256xf32, 256x768xf32) + matmul_16 = paddle._C_ops.matmul(layer_norm_22, parameter_27, False, False) + del parameter_27 + + # pd_op.add: (8x-1x768xf32) <- (8x-1x768xf32, 768xf32) + add_22 = paddle._C_ops.add(matmul_16, parameter_26) + del matmul_16, parameter_26 + + # builtin.combine: ([xi64, xi64, xi64, xi64, xi64]) <- (xi64, xi64, xi64, xi64, xi64) + combine_6 = [full_0, full_2, full_3, data_6, full_4] + del data_6, full_3, full_4 + + # pd_op.stack: (5xi64) <- ([xi64, xi64, xi64, xi64, xi64]) + stack_6 = paddle._C_ops.stack(combine_6, 0) + del combine_6 + + # pd_op.reshape: (8x-1x3x-1x32xf32) <- (8x-1x768xf32, 5xi64) + reshape_8 = paddle._C_ops.reshape(add_22, stack_6) + del add_22, stack_6 + + # pd_op.transpose: (3x8x-1x-1x32xf32) <- (8x-1x3x-1x32xf32) + transpose_11 = paddle._C_ops.transpose(reshape_8, [2, 0, 3, 1, 4]) + del reshape_8 + + # pd_op.slice: (8x-1x-1x32xf32) <- (3x8x-1x-1x32xf32, 1xi64, 1xi64) + slice_8 = paddle._C_ops.slice( + transpose_11, [0], full_int_array_4, full_int_array_1, [1], [0] + ) + + # pd_op.slice: (8x-1x-1x32xf32) <- (3x8x-1x-1x32xf32, 1xi64, 1xi64) + slice_9 = paddle._C_ops.slice( + transpose_11, [0], full_int_array_1, full_int_array_2, [1], [0] + ) + + # pd_op.slice: (8x-1x-1x32xf32) <- (3x8x-1x-1x32xf32, 1xi64, 1xi64) + slice_10 = paddle._C_ops.slice( + transpose_11, [0], full_int_array_2, full_int_array_3, [1], [0] + ) + del transpose_11 + + # pd_op.transpose: (8x-1x32x-1xf32) <- (8x-1x-1x32xf32) + transpose_12 = paddle._C_ops.transpose(slice_9, [0, 1, 3, 2]) + del slice_9 + + # pd_op.matmul: (8x-1x-1x-1xf32) <- (8x-1x-1x32xf32, 8x-1x32x-1xf32) + matmul_17 = paddle._C_ops.matmul(slice_8, transpose_12, False, False) + del slice_8, transpose_12 + + # pd_op.scale: (8x-1x-1x-1xf32) <- (8x-1x-1x-1xf32, 1xf32) + scale_2 = paddle._C_ops.scale(matmul_17, full_5, float("0"), True) + del matmul_17 + + # pd_op.softmax: (8x-1x-1x-1xf32) <- (8x-1x-1x-1xf32) + softmax_2 = paddle._C_ops.softmax(scale_2, -1) + del scale_2 + + # pd_op.matmul: (8x-1x-1x32xf32) <- (8x-1x-1x-1xf32, 8x-1x-1x32xf32) + matmul_18 = paddle._C_ops.matmul(softmax_2, slice_10, False, False) + del slice_10, softmax_2 + + # pd_op.transpose: (8x-1x-1x32xf32) <- (8x-1x-1x32xf32) + transpose_13 = paddle._C_ops.transpose(matmul_18, [0, 2, 1, 3]) + del matmul_18 + + # builtin.combine: ([xi64, xi64, xi64]) <- (xi64, xi64, xi64) + combine_7 = [full_0, full_2, data_7] + del data_7, full_0, full_2 + + # pd_op.stack: (3xi64) <- ([xi64, xi64, xi64]) + stack_7 = paddle._C_ops.stack(combine_7, 0) + del combine_7 + + # pd_op.reshape: (8x-1x-1xf32) <- (8x-1x-1x32xf32, 3xi64) + reshape_9 = paddle._C_ops.reshape(transpose_13, stack_7) + del stack_7, transpose_13 + + # pd_op.matmul: (8x-1x256xf32) <- (8x-1x-1xf32, 256x256xf32) + matmul_19 = paddle._C_ops.matmul(reshape_9, parameter_25, False, False) + del parameter_25, reshape_9 + + # pd_op.add: (8x-1x256xf32) <- (8x-1x256xf32, 256xf32) + add_23 = paddle._C_ops.add(matmul_19, parameter_24) + del matmul_19, parameter_24 + + # pd_op.add: (8x-1x256xf32) <- (8x-1x256xf32, 8x-1x256xf32) + add_24 = paddle._C_ops.add(layer_norm_22, add_23) + del add_23, layer_norm_22 + + # pd_op.layer_norm: (8x-1x256xf32, 8x-1xf32, 8x-1xf32) <- (8x-1x256xf32, 256xf32, 256xf32) + layer_norm_25, layer_norm_26, layer_norm_27 = (lambda x, f: f(x))( + paddle._C_ops.layer_norm( + add_24, parameter_23, parameter_22, float("1e-06"), 2 + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None, None), + ) + del add_24, parameter_22, parameter_23 + + # pd_op.matmul: (8x-1x1024xf32) <- (8x-1x256xf32, 256x1024xf32) + matmul_20 = paddle._C_ops.matmul(layer_norm_25, parameter_21, False, False) + del parameter_21 + + # pd_op.add: (8x-1x1024xf32) <- (8x-1x1024xf32, 1024xf32) + add_25 = paddle._C_ops.add(matmul_20, parameter_20) + del matmul_20, parameter_20 + + # pd_op.gelu: (8x-1x1024xf32) <- (8x-1x1024xf32) + gelu_4 = paddle._C_ops.gelu(add_25, False) + del add_25 + + # pd_op.matmul: (8x-1x256xf32) <- (8x-1x1024xf32, 1024x256xf32) + matmul_21 = paddle._C_ops.matmul(gelu_4, parameter_19, False, False) + del gelu_4, parameter_19 + + # pd_op.add: (8x-1x256xf32) <- (8x-1x256xf32, 256xf32) + add_26 = paddle._C_ops.add(matmul_21, parameter_18) + del matmul_21, parameter_18 + + # pd_op.add: (8x-1x256xf32) <- (8x-1x256xf32, 8x-1x256xf32) + add_27 = paddle._C_ops.add(layer_norm_25, add_26) + del add_26, layer_norm_25 + + # pd_op.layer_norm: (8x-1x256xf32, 8x-1xf32, 8x-1xf32) <- (8x-1x256xf32, 256xf32, 256xf32) + layer_norm_28, layer_norm_29, layer_norm_30 = (lambda x, f: f(x))( + paddle._C_ops.layer_norm( + add_27, parameter_17, parameter_16, float("1e-06"), 2 + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None, None), + ) + del add_27, parameter_16, parameter_17 + + # pd_op.matmul: (8x-1x768xf32) <- (8x-1x256xf32, 256x768xf32) + matmul_22 = paddle._C_ops.matmul(layer_norm_28, parameter_15, False, False) + del parameter_15 + + # pd_op.add: (8x-1x768xf32) <- (8x-1x768xf32, 768xf32) + add_28 = paddle._C_ops.add(matmul_22, parameter_14) + del matmul_22, parameter_14 + + # pd_op.full_int_array: (5xi64) <- () + full_int_array_5 = [0, -1, 3, 8, 32] + + # pd_op.reshape: (8x-1x3x8x32xf32) <- (8x-1x768xf32, 5xi64) + reshape_10 = paddle._C_ops.reshape(add_28, full_int_array_5) + del add_28, full_int_array_5 + + # pd_op.transpose: (3x8x8x-1x32xf32) <- (8x-1x3x8x32xf32) + transpose_14 = paddle._C_ops.transpose(reshape_10, [2, 0, 3, 1, 4]) + del reshape_10 + + # pd_op.slice: (8x8x-1x32xf32) <- (3x8x8x-1x32xf32, 1xi64, 1xi64) + slice_11 = paddle._C_ops.slice( + transpose_14, [0], full_int_array_4, full_int_array_1, [1], [0] + ) + del full_int_array_4 + + # pd_op.slice: (8x8x-1x32xf32) <- (3x8x8x-1x32xf32, 1xi64, 1xi64) + slice_12 = paddle._C_ops.slice( + transpose_14, [0], full_int_array_1, full_int_array_2, [1], [0] + ) + + # pd_op.slice: (8x8x-1x32xf32) <- (3x8x8x-1x32xf32, 1xi64, 1xi64) + slice_13 = paddle._C_ops.slice( + transpose_14, [0], full_int_array_2, full_int_array_3, [1], [0] + ) + del full_int_array_3, transpose_14 + + # pd_op.transpose: (8x8x32x-1xf32) <- (8x8x-1x32xf32) + transpose_15 = paddle._C_ops.transpose(slice_12, [0, 1, 3, 2]) + del slice_12 + + # pd_op.matmul: (8x8x-1x-1xf32) <- (8x8x-1x32xf32, 8x8x32x-1xf32) + matmul_23 = paddle._C_ops.matmul(slice_11, transpose_15, False, False) + del slice_11, transpose_15 + + # pd_op.scale: (8x8x-1x-1xf32) <- (8x8x-1x-1xf32, 1xf32) + scale_3 = paddle._C_ops.scale(matmul_23, full_5, float("0"), True) + del full_5, matmul_23 + + # pd_op.softmax: (8x8x-1x-1xf32) <- (8x8x-1x-1xf32) + softmax_3 = paddle._C_ops.softmax(scale_3, -1) + del scale_3 + + # pd_op.matmul: (8x8x-1x32xf32) <- (8x8x-1x-1xf32, 8x8x-1x32xf32) + matmul_24 = paddle._C_ops.matmul(softmax_3, slice_13, False, False) + del slice_13, softmax_3 + + # pd_op.transpose: (8x-1x8x32xf32) <- (8x8x-1x32xf32) + transpose_16 = paddle._C_ops.transpose(matmul_24, [0, 2, 1, 3]) + del matmul_24 + + # pd_op.full_int_array: (3xi64) <- () + full_int_array_6 = [0, -1, 256] + + # pd_op.reshape: (8x-1x256xf32) <- (8x-1x8x32xf32, 3xi64) + reshape_11 = paddle._C_ops.reshape(transpose_16, full_int_array_6) + del full_int_array_6, transpose_16 + + # pd_op.matmul: (8x-1x256xf32) <- (8x-1x256xf32, 256x256xf32) + matmul_25 = paddle._C_ops.matmul(reshape_11, parameter_13, False, False) + del parameter_13, reshape_11 + + # pd_op.add: (8x-1x256xf32) <- (8x-1x256xf32, 256xf32) + add_29 = paddle._C_ops.add(matmul_25, parameter_12) + del matmul_25, parameter_12 + + # pd_op.add: (8x-1x256xf32) <- (8x-1x256xf32, 8x-1x256xf32) + add_30 = paddle._C_ops.add(layer_norm_28, add_29) + del add_29, layer_norm_28 + + # pd_op.layer_norm: (8x-1x256xf32, 8x-1xf32, 8x-1xf32) <- (8x-1x256xf32, 256xf32, 256xf32) + layer_norm_31, layer_norm_32, layer_norm_33 = (lambda x, f: f(x))( + paddle._C_ops.layer_norm( + add_30, parameter_11, parameter_10, float("1e-06"), 2 + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None, None), + ) + del add_30, parameter_10, parameter_11 + + # pd_op.matmul: (8x-1x1024xf32) <- (8x-1x256xf32, 256x1024xf32) + matmul_26 = paddle._C_ops.matmul(layer_norm_31, parameter_9, False, False) + del parameter_9 + + # pd_op.add: (8x-1x1024xf32) <- (8x-1x1024xf32, 1024xf32) + add_31 = paddle._C_ops.add(matmul_26, parameter_8) + del matmul_26, parameter_8 + + # pd_op.gelu: (8x-1x1024xf32) <- (8x-1x1024xf32) + gelu_5 = paddle._C_ops.gelu(add_31, False) + del add_31 + + # pd_op.matmul: (8x-1x256xf32) <- (8x-1x1024xf32, 1024x256xf32) + matmul_27 = paddle._C_ops.matmul(gelu_5, parameter_7, False, False) + del gelu_5, parameter_7 + + # pd_op.add: (8x-1x256xf32) <- (8x-1x256xf32, 256xf32) + add_32 = paddle._C_ops.add(matmul_27, parameter_6) + del matmul_27, parameter_6 + + # pd_op.add: (8x-1x256xf32) <- (8x-1x256xf32, 8x-1x256xf32) + add_33 = paddle._C_ops.add(layer_norm_31, add_32) + del add_32, layer_norm_31 + + # pd_op.layer_norm: (8x-1x256xf32, 8x-1xf32, 8x-1xf32) <- (8x-1x256xf32, 256xf32, 256xf32) + layer_norm_34, layer_norm_35, layer_norm_36 = (lambda x, f: f(x))( + paddle._C_ops.layer_norm( + add_33, parameter_5, parameter_4, float("1e-06"), 2 + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None, None), + ) + del add_33, parameter_4, parameter_5 + + # pd_op.shape64: (3xi64) <- (8x-1x256xf32) + shape64_1 = paddle._C_ops.shape64(layer_norm_34) + + # pd_op.slice: (xi64) <- (3xi64, 1xi64, 1xi64) + slice_14 = paddle._C_ops.slice( + shape64_1, [0], full_int_array_1, full_int_array_2, [1], [0] + ) + del full_int_array_1, full_int_array_2, shape64_1 + + # pd_op.transpose: (8x256x-1xf32) <- (8x-1x256xf32) + transpose_17 = paddle._C_ops.transpose(layer_norm_34, [0, 2, 1]) + del layer_norm_34 + + # pd_op.full_int_array: (4xi64) <- () + full_int_array_7 = [0, 256, 6, 80] + + # pd_op.reshape: (8x256x6x80xf32) <- (8x256x-1xf32, 4xi64) + reshape_12 = paddle._C_ops.reshape(transpose_17, full_int_array_7) + del full_int_array_7, transpose_17 + + # pd_op.conv2d: (8x384x3x80xf32) <- (8x256x6x80xf32, 384x256x3x3xf32) + conv2d_2 = paddle._C_ops.conv2d( + reshape_12, parameter_3, [2, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_3, reshape_12 + + # pd_op.reshape: (1x384x1x1xf32) <- (384xf32, 4xi64) + reshape_13 = paddle._C_ops.reshape(parameter_2, full_int_array_0) + del full_int_array_0, parameter_2 + + # pd_op.add: (8x384x3x80xf32) <- (8x384x3x80xf32, 1x384x1x1xf32) + add_34 = paddle._C_ops.add(conv2d_2, reshape_13) + del conv2d_2, reshape_13 + + # pd_op.flatten: (8x384x240xf32) <- (8x384x3x80xf32) + flatten_3 = paddle._C_ops.flatten(add_34, 2, 3) + del add_34 + + # pd_op.transpose: (8x240x384xf32) <- (8x384x240xf32) + transpose_18 = paddle._C_ops.transpose(flatten_3, [0, 2, 1]) + del flatten_3 + + # pd_op.layer_norm: (8x240x384xf32, 8x240xf32, 8x240xf32) <- (8x240x384xf32, 384xf32, 384xf32) + layer_norm_0, layer_norm_37, layer_norm_38 = (lambda x, f: f(x))( + paddle._C_ops.layer_norm( + transpose_18, parameter_1, parameter_0, float("1e-05"), 2 + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None, None), + ) + del parameter_0, parameter_1, transpose_18 + + return layer_norm_0 diff --git a/paddle_samples/PaddleX/ch_SVTRv2_rec/subgraph_1/weight_meta.py b/paddle_samples/PaddleX/ch_SVTRv2_rec/subgraph_1/weight_meta.py new file mode 100644 index 000000000..ec27cb5b2 --- /dev/null +++ b/paddle_samples/PaddleX/ch_SVTRv2_rec/subgraph_1/weight_meta.py @@ -0,0 +1,790 @@ +class Program_weight_tensor_parameter_0: + name = "parameter_0" + shape = [384] + dtype = "float32" + min_val = float("-0.606548") + max_val = float("0.572658") + mean = float("0.00375302") + std = float("0.148426") + data = None + + +class Program_weight_tensor_parameter_1: + name = "parameter_1" + shape = [384] + dtype = "float32" + min_val = float("-0.0136385") + max_val = float("1.10781") + mean = float("0.805448") + std = float("0.318495") + data = None + + +class Program_weight_tensor_parameter_2: + name = "parameter_2" + shape = [384] + dtype = "float32" + min_val = float("-2.13381") + max_val = float("3.10468") + mean = float("0.00370893") + std = float("0.481209") + data = None + + +class Program_weight_tensor_parameter_3: + name = "parameter_3" + shape = [384, 256, 3, 3] + dtype = "float32" + min_val = float("-4.80588") + max_val = float("3.58056") + mean = float("-0.000669643") + std = float("0.434603") + data = None + + +class Program_weight_tensor_parameter_4: + name = "parameter_4" + shape = [256] + dtype = "float32" + min_val = float("-1.00377") + max_val = float("1.5866") + mean = float("-0.117704") + std = float("0.210453") + data = None + + +class Program_weight_tensor_parameter_5: + name = "parameter_5" + shape = [256] + dtype = "float32" + min_val = float("0.227411") + max_val = float("1.68324") + mean = float("1.24738") + std = float("0.144259") + data = None + + +class Program_weight_tensor_parameter_6: + name = "parameter_6" + shape = [256] + dtype = "float32" + min_val = float("-8.09277") + max_val = float("0.640735") + mean = float("-0.0152048") + std = float("0.534922") + data = None + + +class Program_weight_tensor_parameter_7: + name = "parameter_7" + shape = [1024, 256] + dtype = "float32" + min_val = float("-1.43496") + max_val = float("1.05897") + mean = float("1.53746e-05") + std = float("0.0573661") + data = None + + +class Program_weight_tensor_parameter_8: + name = "parameter_8" + shape = [1024] + dtype = "float32" + min_val = float("-1.46232") + max_val = float("-0.0231789") + mean = float("-0.687925") + std = float("0.224782") + data = None + + +class Program_weight_tensor_parameter_9: + name = "parameter_9" + shape = [256, 1024] + dtype = "float32" + min_val = float("-0.346092") + max_val = float("0.299287") + mean = float("-0.00565399") + std = float("0.0564931") + data = None + + +class Program_weight_tensor_parameter_10: + name = "parameter_10" + shape = [256] + dtype = "float32" + min_val = float("-3.06533") + max_val = float("1.76981") + mean = float("0.0968756") + std = float("0.355712") + data = None + + +class Program_weight_tensor_parameter_11: + name = "parameter_11" + shape = [256] + dtype = "float32" + min_val = float("0.111695") + max_val = float("2.92098") + mean = float("0.694832") + std = float("0.22963") + data = None + + +class Program_weight_tensor_parameter_12: + name = "parameter_12" + shape = [256] + dtype = "float32" + min_val = float("-1.23681") + max_val = float("1.0048") + mean = float("-0.00871141") + std = float("0.14359") + data = None + + +class Program_weight_tensor_parameter_13: + name = "parameter_13" + shape = [256, 256] + dtype = "float32" + min_val = float("-0.32287") + max_val = float("0.545039") + mean = float("-9.46548e-05") + std = float("0.0445592") + data = None + + +class Program_weight_tensor_parameter_14: + name = "parameter_14" + shape = [768] + dtype = "float32" + min_val = float("-3.23849") + max_val = float("3.07741") + mean = float("0.0190537") + std = float("0.323153") + data = None + + +class Program_weight_tensor_parameter_15: + name = "parameter_15" + shape = [256, 768] + dtype = "float32" + min_val = float("-0.599278") + max_val = float("0.496422") + mean = float("-3.80634e-05") + std = float("0.052645") + data = None + + +class Program_weight_tensor_parameter_16: + name = "parameter_16" + shape = [256] + dtype = "float32" + min_val = float("-1.03015") + max_val = float("1.35403") + mean = float("-0.012842") + std = float("0.214878") + data = None + + +class Program_weight_tensor_parameter_17: + name = "parameter_17" + shape = [256] + dtype = "float32" + min_val = float("0.291876") + max_val = float("1.5882") + mean = float("0.900833") + std = float("0.111472") + data = None + + +class Program_weight_tensor_parameter_18: + name = "parameter_18" + shape = [256] + dtype = "float32" + min_val = float("-2.05991") + max_val = float("0.660192") + mean = float("-0.00212401") + std = float("0.194412") + data = None + + +class Program_weight_tensor_parameter_19: + name = "parameter_19" + shape = [1024, 256] + dtype = "float32" + min_val = float("-0.563975") + max_val = float("0.813296") + mean = float("2.61305e-05") + std = float("0.0567821") + data = None + + +class Program_weight_tensor_parameter_20: + name = "parameter_20" + shape = [1024] + dtype = "float32" + min_val = float("-1.33353") + max_val = float("-0.258815") + mean = float("-0.694224") + std = float("0.209829") + data = None + + +class Program_weight_tensor_parameter_21: + name = "parameter_21" + shape = [256, 1024] + dtype = "float32" + min_val = float("-0.379618") + max_val = float("0.370024") + mean = float("-0.00381384") + std = float("0.0555629") + data = None + + +class Program_weight_tensor_parameter_22: + name = "parameter_22" + shape = [256] + dtype = "float32" + min_val = float("-2.22821") + max_val = float("1.38206") + mean = float("0.0666401") + std = float("0.302843") + data = None + + +class Program_weight_tensor_parameter_23: + name = "parameter_23" + shape = [256] + dtype = "float32" + min_val = float("0.0820868") + max_val = float("2.64053") + mean = float("0.745009") + std = float("0.190088") + data = None + + +class Program_weight_tensor_parameter_24: + name = "parameter_24" + shape = [256] + dtype = "float32" + min_val = float("-1.62208") + max_val = float("1.39124") + mean = float("-0.0110466") + std = float("0.1868") + data = None + + +class Program_weight_tensor_parameter_25: + name = "parameter_25" + shape = [256, 256] + dtype = "float32" + min_val = float("-0.484073") + max_val = float("0.322057") + mean = float("9.14735e-05") + std = float("0.0479608") + data = None + + +class Program_weight_tensor_parameter_26: + name = "parameter_26" + shape = [768] + dtype = "float32" + min_val = float("-2.94499") + max_val = float("3.08614") + mean = float("0.0137281") + std = float("0.347329") + data = None + + +class Program_weight_tensor_parameter_27: + name = "parameter_27" + shape = [256, 768] + dtype = "float32" + min_val = float("-0.406168") + max_val = float("0.440281") + mean = float("-2.74141e-05") + std = float("0.0531518") + data = None + + +class Program_weight_tensor_parameter_28: + name = "parameter_28" + shape = [256] + dtype = "float32" + min_val = float("-1.5211") + max_val = float("1.36009") + mean = float("-0.00825827") + std = float("0.293386") + data = None + + +class Program_weight_tensor_parameter_29: + name = "parameter_29" + shape = [256] + dtype = "float32" + min_val = float("0.261202") + max_val = float("2.43577") + mean = float("0.92451") + std = float("0.190809") + data = None + + +class Program_weight_tensor_parameter_30: + name = "parameter_30" + shape = [256] + dtype = "float32" + min_val = float("-0.749751") + max_val = float("0.592661") + mean = float("-0.000329425") + std = float("0.132286") + data = None + + +class Program_weight_tensor_parameter_31: + name = "parameter_31" + shape = [1024, 256] + dtype = "float32" + min_val = float("-0.505895") + max_val = float("0.472223") + mean = float("-3.80705e-05") + std = float("0.0616234") + data = None + + +class Program_weight_tensor_parameter_32: + name = "parameter_32" + shape = [1024] + dtype = "float32" + min_val = float("-1.11978") + max_val = float("-0.0299963") + mean = float("-0.639817") + std = float("0.206774") + data = None + + +class Program_weight_tensor_parameter_33: + name = "parameter_33" + shape = [256, 1024] + dtype = "float32" + min_val = float("-0.473619") + max_val = float("0.354136") + mean = float("-0.00145026") + std = float("0.0545714") + data = None + + +class Program_weight_tensor_parameter_34: + name = "parameter_34" + shape = [256] + dtype = "float32" + min_val = float("-2.6692") + max_val = float("2.16447") + mean = float("0.0282511") + std = float("0.485632") + data = None + + +class Program_weight_tensor_parameter_35: + name = "parameter_35" + shape = [256] + dtype = "float32" + min_val = float("0.0202418") + max_val = float("2.37993") + mean = float("0.79868") + std = float("0.238688") + data = None + + +class Program_weight_tensor_parameter_36: + name = "parameter_36" + shape = [256] + dtype = "float32" + min_val = float("-1.95204") + max_val = float("1.32873") + mean = float("-0.0100322") + std = float("0.260344") + data = None + + +class Program_weight_tensor_parameter_37: + name = "parameter_37" + shape = [256, 256] + dtype = "float32" + min_val = float("-0.331363") + max_val = float("0.302074") + mean = float("6.85545e-05") + std = float("0.0484705") + data = None + + +class Program_weight_tensor_parameter_38: + name = "parameter_38" + shape = [768] + dtype = "float32" + min_val = float("-3.23988") + max_val = float("3.17341") + mean = float("0.0112835") + std = float("0.360818") + data = None + + +class Program_weight_tensor_parameter_39: + name = "parameter_39" + shape = [256, 768] + dtype = "float32" + min_val = float("-0.485666") + max_val = float("0.382872") + mean = float("8.84596e-07") + std = float("0.0527938") + data = None + + +class Program_weight_tensor_parameter_40: + name = "parameter_40" + shape = [256] + dtype = "float32" + min_val = float("-1.22705") + max_val = float("1.12567") + mean = float("-0.00583161") + std = float("0.31517") + data = None + + +class Program_weight_tensor_parameter_41: + name = "parameter_41" + shape = [256] + dtype = "float32" + min_val = float("0.0719479") + max_val = float("2.17936") + mean = float("0.899499") + std = float("0.215358") + data = None + + +class Program_weight_tensor_parameter_42: + name = "parameter_42" + shape = [256] + dtype = "float32" + min_val = float("-0.593713") + max_val = float("0.880143") + mean = float("0.00452423") + std = float("0.137268") + data = None + + +class Program_weight_tensor_parameter_43: + name = "parameter_43" + shape = [1024, 256] + dtype = "float32" + min_val = float("-0.44878") + max_val = float("0.40673") + mean = float("5.79501e-05") + std = float("0.0640804") + data = None + + +class Program_weight_tensor_parameter_44: + name = "parameter_44" + shape = [1024] + dtype = "float32" + min_val = float("-1.1324") + max_val = float("-0.0597701") + mean = float("-0.618033") + std = float("0.192981") + data = None + + +class Program_weight_tensor_parameter_45: + name = "parameter_45" + shape = [256, 1024] + dtype = "float32" + min_val = float("-0.333676") + max_val = float("0.370896") + mean = float("0.000545573") + std = float("0.0537775") + data = None + + +class Program_weight_tensor_parameter_46: + name = "parameter_46" + shape = [256] + dtype = "float32" + min_val = float("-2.27663") + max_val = float("2.47616") + mean = float("-0.00601392") + std = float("0.576123") + data = None + + +class Program_weight_tensor_parameter_47: + name = "parameter_47" + shape = [256] + dtype = "float32" + min_val = float("0.0863369") + max_val = float("2.26134") + mean = float("0.787782") + std = float("0.203747") + data = None + + +class Program_weight_tensor_parameter_48: + name = "parameter_48" + shape = [256] + dtype = "float32" + min_val = float("-1.86604") + max_val = float("1.88891") + mean = float("-0.00533099") + std = float("0.221605") + data = None + + +class Program_weight_tensor_parameter_49: + name = "parameter_49" + shape = [256, 256] + dtype = "float32" + min_val = float("-0.427967") + max_val = float("0.523471") + mean = float("2.46172e-05") + std = float("0.048039") + data = None + + +class Program_weight_tensor_parameter_50: + name = "parameter_50" + shape = [768] + dtype = "float32" + min_val = float("-2.90043") + max_val = float("2.60659") + mean = float("-0.00467167") + std = float("0.351381") + data = None + + +class Program_weight_tensor_parameter_51: + name = "parameter_51" + shape = [256, 768] + dtype = "float32" + min_val = float("-0.303316") + max_val = float("0.421669") + mean = float("4.09005e-05") + std = float("0.0527594") + data = None + + +class Program_weight_tensor_parameter_52: + name = "parameter_52" + shape = [256] + dtype = "float32" + min_val = float("-1.72865") + max_val = float("1.36101") + mean = float("0.000942415") + std = float("0.4173") + data = None + + +class Program_weight_tensor_parameter_53: + name = "parameter_53" + shape = [256] + dtype = "float32" + min_val = float("0.301877") + max_val = float("1.3885") + mean = float("0.917692") + std = float("0.182302") + data = None + + +class Program_weight_tensor_parameter_54: + name = "parameter_54" + shape = [256] + dtype = "float32" + min_val = float("-0.546974") + max_val = float("0.441399") + mean = float("-0.00034962") + std = float("0.137598") + data = None + + +class Program_weight_tensor_parameter_55: + name = "parameter_55" + shape = [1024, 256] + dtype = "float32" + min_val = float("-0.438771") + max_val = float("0.348308") + mean = float("0.000119522") + std = float("0.06319") + data = None + + +class Program_weight_tensor_parameter_56: + name = "parameter_56" + shape = [1024] + dtype = "float32" + min_val = float("-0.870234") + max_val = float("-0.148508") + mean = float("-0.578987") + std = float("0.148147") + data = None + + +class Program_weight_tensor_parameter_57: + name = "parameter_57" + shape = [256, 1024] + dtype = "float32" + min_val = float("-0.618111") + max_val = float("0.42523") + mean = float("0.000793249") + std = float("0.0470161") + data = None + + +class Program_weight_tensor_parameter_58: + name = "parameter_58" + shape = [256] + dtype = "float32" + min_val = float("-1.54587") + max_val = float("2.06195") + mean = float("-0.0245377") + std = float("0.666909") + data = None + + +class Program_weight_tensor_parameter_59: + name = "parameter_59" + shape = [256] + dtype = "float32" + min_val = float("0.249175") + max_val = float("1.74662") + mean = float("0.92944") + std = float("0.194932") + data = None + + +class Program_weight_tensor_parameter_60: + name = "parameter_60" + shape = [256] + dtype = "float32" + min_val = float("-2.28339") + max_val = float("2.10392") + mean = float("0.00488615") + std = float("0.356684") + data = None + + +class Program_weight_tensor_parameter_61: + name = "parameter_61" + shape = [256, 32, 5, 5] + dtype = "float32" + min_val = float("-0.271139") + max_val = float("0.519153") + mean = float("0.00138608") + std = float("0.0431195") + data = None + + +class Program_weight_tensor_parameter_62: + name = "parameter_62" + shape = [256] + dtype = "float32" + min_val = float("-0.633619") + max_val = float("0.740488") + mean = float("-0.0519645") + std = float("0.1882") + data = None + + +class Program_weight_tensor_parameter_63: + name = "parameter_63" + shape = [256] + dtype = "float32" + min_val = float("0.0676594") + max_val = float("1.86373") + mean = float("1.3139") + std = float("0.156863") + data = None + + +class Program_weight_tensor_parameter_64: + name = "parameter_64" + shape = [256] + dtype = "float32" + min_val = float("-8.93803") + max_val = float("0.458159") + mean = float("-0.0257134") + std = float("0.585232") + data = None + + +class Program_weight_tensor_parameter_65: + name = "parameter_65" + shape = [1024, 256] + dtype = "float32" + min_val = float("-1.32384") + max_val = float("1.1636") + mean = float("-3.25298e-05") + std = float("0.0559817") + data = None + + +class Program_weight_tensor_parameter_66: + name = "parameter_66" + shape = [1024] + dtype = "float32" + min_val = float("-1.46769") + max_val = float("-0.128175") + mean = float("-0.6653") + std = float("0.178159") + data = None + + +class Program_weight_tensor_parameter_67: + name = "parameter_67" + shape = [256, 1024] + dtype = "float32" + min_val = float("-0.325814") + max_val = float("0.365852") + mean = float("0.000794164") + std = float("0.0568346") + data = None + + +class Program_weight_tensor_parameter_68: + name = "parameter_68" + shape = [256] + dtype = "float32" + min_val = float("-0.65967") + max_val = float("0.72201") + mean = float("-0.0107726") + std = float("0.19808") + data = None + + +class Program_weight_tensor_parameter_69: + name = "parameter_69" + shape = [256] + dtype = "float32" + min_val = float("0.402273") + max_val = float("1.83328") + mean = float("0.686545") + std = float("0.133889") + data = None + + +class Program_weight_tensor_parameter_70: + name = "parameter_70" + shape = [256] + dtype = "float32" + min_val = float("-2.1692") + max_val = float("1.48687") + mean = float("-0.00278559") + std = float("0.301189") + data = None + + +class Program_weight_tensor_parameter_71: + name = "parameter_71" + shape = [256, 32, 5, 5] + dtype = "float32" + min_val = float("-0.260716") + max_val = float("0.493174") + mean = float("0.00108033") + std = float("0.0415011") + data = None diff --git a/paddle_samples/PaddleX/ch_SVTRv2_rec/subgraph_10/graph_hash.txt b/paddle_samples/PaddleX/ch_SVTRv2_rec/subgraph_10/graph_hash.txt new file mode 100644 index 000000000..758d3cf6c --- /dev/null +++ b/paddle_samples/PaddleX/ch_SVTRv2_rec/subgraph_10/graph_hash.txt @@ -0,0 +1 @@ +e2ef048605e2438c1fcbd1ee00942eb3f372ac0ba43100bb502f52e55211e0f0 \ No newline at end of file diff --git a/paddle_samples/PaddleX/ch_SVTRv2_rec/subgraph_10/graph_net.json b/paddle_samples/PaddleX/ch_SVTRv2_rec/subgraph_10/graph_net.json new file mode 100644 index 000000000..4e1cc3c0d --- /dev/null +++ b/paddle_samples/PaddleX/ch_SVTRv2_rec/subgraph_10/graph_net.json @@ -0,0 +1,6 @@ +{ + "framework": "paddle", + "model_name": "ch_SVTRv2_rec", + "num_devices_required": 1, + "num_nodes_required": 1 +} \ No newline at end of file diff --git a/paddle_samples/PaddleX/ch_SVTRv2_rec/subgraph_10/input_meta.py b/paddle_samples/PaddleX/ch_SVTRv2_rec/subgraph_10/input_meta.py new file mode 100644 index 000000000..5c45bb35f --- /dev/null +++ b/paddle_samples/PaddleX/ch_SVTRv2_rec/subgraph_10/input_meta.py @@ -0,0 +1,9 @@ +class Program_weight_tensor_data_0: + name = "data_0" + shape = [5, 384, 1, 40] + dtype = "float32" + min_val = float("-8.03003") + max_val = float("4.27638") + mean = float("0.000711681") + std = float("0.433526") + data = None diff --git a/paddle_samples/PaddleX/ch_SVTRv2_rec/subgraph_10/model.py b/paddle_samples/PaddleX/ch_SVTRv2_rec/subgraph_10/model.py new file mode 100644 index 000000000..30fbbe858 --- /dev/null +++ b/paddle_samples/PaddleX/ch_SVTRv2_rec/subgraph_10/model.py @@ -0,0 +1,828 @@ +import paddle + + +class GraphModule(paddle.nn.Layer): + def __init__(self): + super().__init__() + + def forward( + self, + parameter_0, + parameter_1, + parameter_2, + parameter_3, + parameter_4, + parameter_5, + parameter_6, + parameter_7, + parameter_8, + parameter_9, + parameter_10, + parameter_11, + parameter_12, + parameter_13, + parameter_14, + parameter_15, + parameter_16, + parameter_17, + parameter_18, + parameter_19, + parameter_20, + parameter_21, + parameter_22, + parameter_23, + parameter_24, + parameter_25, + parameter_26, + parameter_27, + parameter_28, + parameter_29, + parameter_30, + parameter_31, + parameter_32, + parameter_33, + parameter_34, + parameter_35, + parameter_36, + parameter_37, + parameter_38, + parameter_39, + parameter_40, + parameter_41, + parameter_42, + parameter_43, + parameter_44, + parameter_45, + parameter_46, + parameter_47, + parameter_48, + parameter_49, + parameter_50, + parameter_51, + parameter_52, + data_0, + ): + # pd_op.assign: (-1x384x1x40xf32) <- (-1x384x1x40xf32) + assign_0 = data_0 + del data_0 + + # pd_op.conv2d: (-1x48x1x40xf32) <- (-1x384x1x40xf32, 48x384x1x3xf32) + conv2d_0 = paddle._C_ops.conv2d( + assign_0, parameter_52, [1, 1], [0, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_52 + + # pd_op.batch_norm_: (-1x48x1x40xf32, 48xf32, 48xf32, 48xf32, 48xf32, -1xui8) <- (-1x48x1x40xf32, 48xf32, 48xf32, 48xf32, 48xf32) + ( + batch_norm__0, + batch_norm__1, + batch_norm__2, + batch_norm__3, + batch_norm__4, + batch_norm__5, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_0, + parameter_51, + parameter_50, + parameter_49, + parameter_48, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_48, parameter_49, parameter_50, parameter_51 + + # pd_op.swish: (-1x48x1x40xf32) <- (-1x48x1x40xf32) + swish_0 = paddle._C_ops.swish(batch_norm__0) + + # pd_op.conv2d: (-1x256x1x40xf32) <- (-1x48x1x40xf32, 256x48x1x1xf32) + conv2d_1 = paddle._C_ops.conv2d( + swish_0, parameter_47, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_47 + + # pd_op.batch_norm_: (-1x256x1x40xf32, 256xf32, 256xf32, 256xf32, 256xf32, -1xui8) <- (-1x256x1x40xf32, 256xf32, 256xf32, 256xf32, 256xf32) + ( + batch_norm__6, + batch_norm__7, + batch_norm__8, + batch_norm__9, + batch_norm__10, + batch_norm__11, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_1, + parameter_46, + parameter_45, + parameter_44, + parameter_43, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_43, parameter_44, parameter_45, parameter_46 + + # pd_op.swish: (-1x256x1x40xf32) <- (-1x256x1x40xf32) + swish_1 = paddle._C_ops.swish(batch_norm__6) + + # pd_op.shape64: (4xi64) <- (-1x256x1x40xf32) + shape64_0 = paddle._C_ops.shape64(swish_1) + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_0 = [0] + + # pd_op.assign: (1xi64) <- (1xi64) + assign_1 = full_int_array_0 + + # pd_op.assign: (1xi64) <- (1xi64) + assign_2 = full_int_array_0 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_1 = [1] + + # pd_op.assign: (1xi64) <- (1xi64) + assign_3 = full_int_array_1 + + # pd_op.assign: (1xi64) <- (1xi64) + assign_4 = full_int_array_1 + + # pd_op.assign: (1xi64) <- (1xi64) + assign_5 = full_int_array_1 + + # pd_op.assign: (1xi64) <- (1xi64) + assign_6 = full_int_array_1 + + # pd_op.slice: (xi64) <- (4xi64, 1xi64, 1xi64) + slice_0 = paddle._C_ops.slice( + shape64_0, [0], full_int_array_0, full_int_array_1, [1], [0] + ) + del shape64_0 + + # pd_op.flatten: (-1x256x40xf32) <- (-1x256x1x40xf32) + flatten_0 = paddle._C_ops.flatten(swish_1, 2, 3) + + # pd_op.transpose: (-1x40x256xf32) <- (-1x256x40xf32) + transpose_0 = paddle._C_ops.transpose(flatten_0, [0, 2, 1]) + del flatten_0 + + # pd_op.layer_norm: (-1x40x256xf32, -1x40xf32, -1x40xf32) <- (-1x40x256xf32, 256xf32, 256xf32) + layer_norm_0, layer_norm_1, layer_norm_2 = (lambda x, f: f(x))( + paddle._C_ops.layer_norm( + transpose_0, parameter_42, parameter_41, float("1e-05"), 2 + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None, None), + ) + del parameter_41, parameter_42 + + # pd_op.matmul: (-1x40x768xf32) <- (-1x40x256xf32, 256x768xf32) + matmul_0 = paddle._C_ops.matmul(layer_norm_0, parameter_40, False, False) + del parameter_40 + + # pd_op.add: (-1x40x768xf32) <- (-1x40x768xf32, 768xf32) + add_1 = paddle._C_ops.add(matmul_0, parameter_39) + del parameter_39 + + # pd_op.full_int_array: (5xi64) <- () + full_int_array_2 = [0, -1, 3, 8, 32] + + # pd_op.reshape: (-1x-1x3x8x32xf32) <- (-1x40x768xf32, 5xi64) + reshape_0 = paddle._C_ops.reshape(add_1, full_int_array_2) + + # pd_op.transpose: (3x-1x8x-1x32xf32) <- (-1x-1x3x8x32xf32) + transpose_1 = paddle._C_ops.transpose(reshape_0, [2, 0, 3, 1, 4]) + del reshape_0 + + # pd_op.slice: (-1x8x-1x32xf32) <- (3x-1x8x-1x32xf32, 1xi64, 1xi64) + slice_1 = paddle._C_ops.slice( + transpose_1, [0], full_int_array_0, full_int_array_1, [1], [0] + ) + + # pd_op.full: (1xf32) <- () + full_0 = paddle._C_ops.full( + [1], float("0.176777"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.assign: (1xf32) <- (1xf32) + assign_7 = full_0 + + # pd_op.scale: (-1x8x-1x32xf32) <- (-1x8x-1x32xf32, 1xf32) + scale_0 = paddle._C_ops.scale(slice_1, full_0, float("0"), True) + del slice_1 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_3 = [2] + + # pd_op.assign: (1xi64) <- (1xi64) + assign_8 = full_int_array_3 + + # pd_op.assign: (1xi64) <- (1xi64) + assign_9 = full_int_array_3 + + # pd_op.assign: (1xi64) <- (1xi64) + assign_10 = full_int_array_3 + + # pd_op.assign: (1xi64) <- (1xi64) + assign_11 = full_int_array_3 + + # pd_op.slice: (-1x8x-1x32xf32) <- (3x-1x8x-1x32xf32, 1xi64, 1xi64) + slice_2 = paddle._C_ops.slice( + transpose_1, [0], full_int_array_1, full_int_array_3, [1], [0] + ) + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_4 = [3] + + # pd_op.assign: (1xi64) <- (1xi64) + assign_12 = full_int_array_4 + + # pd_op.slice: (-1x8x-1x32xf32) <- (3x-1x8x-1x32xf32, 1xi64, 1xi64) + slice_3 = paddle._C_ops.slice( + transpose_1, [0], full_int_array_3, full_int_array_4, [1], [0] + ) + + # pd_op.transpose: (-1x8x32x-1xf32) <- (-1x8x-1x32xf32) + transpose_2 = paddle._C_ops.transpose(slice_2, [0, 1, 3, 2]) + del slice_2 + + # pd_op.matmul: (-1x8x-1x-1xf32) <- (-1x8x-1x32xf32, -1x8x32x-1xf32) + matmul_1 = paddle._C_ops.matmul(scale_0, transpose_2, False, False) + + # pd_op.softmax: (-1x8x-1x-1xf32) <- (-1x8x-1x-1xf32) + softmax_0 = paddle._C_ops.softmax(matmul_1, -1) + del matmul_1 + + # pd_op.full: (1xf32) <- () + full_1 = paddle._C_ops.full( + [1], float("0.1"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.assign: (1xf32) <- (1xf32) + assign_13 = full_1 + + # pd_op.assign: (1xf32) <- (1xf32) + assign_14 = full_1 + + # pd_op.assign: (1xf32) <- (1xf32) + assign_15 = full_1 + + # pd_op.assign: (1xf32) <- (1xf32) + assign_16 = full_1 + + # pd_op.assign: (1xf32) <- (1xf32) + assign_17 = full_1 + + # pd_op.assign: (1xf32) <- (1xf32) + assign_18 = full_1 + + # pd_op.assign: (1xf32) <- (1xf32) + assign_19 = full_1 + + # pd_op.dropout: (-1x8x-1x-1xf32, -1x8x-1x-1xui8) <- (-1x8x-1x-1xf32, None, 1xf32) + dropout_0, dropout_1 = (lambda x, f: f(x))( + paddle._C_ops.dropout( + softmax_0, None, full_1, False, "upscale_in_train", 0, False + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None), + ) + + # pd_op.matmul: (-1x8x-1x32xf32) <- (-1x8x-1x-1xf32, -1x8x-1x32xf32) + matmul_2 = paddle._C_ops.matmul(dropout_0, slice_3, False, False) + + # pd_op.transpose: (-1x-1x8x32xf32) <- (-1x8x-1x32xf32) + transpose_3 = paddle._C_ops.transpose(matmul_2, [0, 2, 1, 3]) + del matmul_2 + + # pd_op.full_int_array: (3xi64) <- () + full_int_array_5 = [0, -1, 256] + + # pd_op.reshape: (-1x-1x256xf32) <- (-1x-1x8x32xf32, 3xi64) + reshape_1 = paddle._C_ops.reshape(transpose_3, full_int_array_5) + + # pd_op.matmul: (-1x-1x256xf32) <- (-1x-1x256xf32, 256x256xf32) + matmul_3 = paddle._C_ops.matmul(reshape_1, parameter_38, False, False) + del parameter_38 + + # pd_op.add: (-1x-1x256xf32) <- (-1x-1x256xf32, 256xf32) + add_2 = paddle._C_ops.add(matmul_3, parameter_37) + del parameter_37 + + # pd_op.dropout: (-1x-1x256xf32, -1x-1x256xui8) <- (-1x-1x256xf32, None, 1xf32) + dropout_2, dropout_3 = (lambda x, f: f(x))( + paddle._C_ops.dropout( + add_2, None, full_1, False, "upscale_in_train", 0, False + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None), + ) + del add_2 + + # pd_op.add: (-1x40x256xf32) <- (-1x40x256xf32, -1x-1x256xf32) + add_3 = paddle._C_ops.add(transpose_0, dropout_2) + + # pd_op.layer_norm: (-1x40x256xf32, -1x40xf32, -1x40xf32) <- (-1x40x256xf32, 256xf32, 256xf32) + layer_norm_3, layer_norm_4, layer_norm_5 = (lambda x, f: f(x))( + paddle._C_ops.layer_norm( + add_3, parameter_36, parameter_35, float("1e-05"), 2 + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None, None), + ) + del parameter_35, parameter_36 + + # pd_op.matmul: (-1x40x512xf32) <- (-1x40x256xf32, 256x512xf32) + matmul_4 = paddle._C_ops.matmul(layer_norm_3, parameter_34, False, False) + del parameter_34 + + # pd_op.add: (-1x40x512xf32) <- (-1x40x512xf32, 512xf32) + add_4 = paddle._C_ops.add(matmul_4, parameter_33) + del parameter_33 + + # pd_op.swish: (-1x40x512xf32) <- (-1x40x512xf32) + swish_2 = paddle._C_ops.swish(add_4) + + # pd_op.dropout: (-1x40x512xf32, -1x40x512xui8) <- (-1x40x512xf32, None, 1xf32) + dropout_4, dropout_5 = (lambda x, f: f(x))( + paddle._C_ops.dropout( + swish_2, None, full_1, False, "upscale_in_train", 0, False + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None), + ) + del swish_2 + + # pd_op.matmul: (-1x40x256xf32) <- (-1x40x512xf32, 512x256xf32) + matmul_5 = paddle._C_ops.matmul(dropout_4, parameter_32, False, False) + del parameter_32 + + # pd_op.add: (-1x40x256xf32) <- (-1x40x256xf32, 256xf32) + add_5 = paddle._C_ops.add(matmul_5, parameter_31) + del parameter_31 + + # pd_op.dropout: (-1x40x256xf32, -1x40x256xui8) <- (-1x40x256xf32, None, 1xf32) + dropout_6, dropout_7 = (lambda x, f: f(x))( + paddle._C_ops.dropout( + add_5, None, full_1, False, "upscale_in_train", 0, False + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None), + ) + del add_5 + + # pd_op.add: (-1x40x256xf32) <- (-1x40x256xf32, -1x40x256xf32) + add_6 = paddle._C_ops.add(add_3, dropout_6) + + # pd_op.layer_norm: (-1x40x256xf32, -1x40xf32, -1x40xf32) <- (-1x40x256xf32, 256xf32, 256xf32) + layer_norm_6, layer_norm_7, layer_norm_8 = (lambda x, f: f(x))( + paddle._C_ops.layer_norm( + add_6, parameter_30, parameter_29, float("1e-05"), 2 + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None, None), + ) + del parameter_29, parameter_30 + + # pd_op.matmul: (-1x40x768xf32) <- (-1x40x256xf32, 256x768xf32) + matmul_6 = paddle._C_ops.matmul(layer_norm_6, parameter_28, False, False) + del parameter_28 + + # pd_op.add: (-1x40x768xf32) <- (-1x40x768xf32, 768xf32) + add_7 = paddle._C_ops.add(matmul_6, parameter_27) + del parameter_27 + + # pd_op.reshape: (-1x-1x3x8x32xf32) <- (-1x40x768xf32, 5xi64) + reshape_2 = paddle._C_ops.reshape(add_7, full_int_array_2) + del full_int_array_2 + + # pd_op.transpose: (3x-1x8x-1x32xf32) <- (-1x-1x3x8x32xf32) + transpose_4 = paddle._C_ops.transpose(reshape_2, [2, 0, 3, 1, 4]) + del reshape_2 + + # pd_op.slice: (-1x8x-1x32xf32) <- (3x-1x8x-1x32xf32, 1xi64, 1xi64) + slice_4 = paddle._C_ops.slice( + transpose_4, [0], full_int_array_0, full_int_array_1, [1], [0] + ) + + # pd_op.scale: (-1x8x-1x32xf32) <- (-1x8x-1x32xf32, 1xf32) + scale_1 = paddle._C_ops.scale(slice_4, full_0, float("0"), True) + del slice_4 + + # pd_op.slice: (-1x8x-1x32xf32) <- (3x-1x8x-1x32xf32, 1xi64, 1xi64) + slice_5 = paddle._C_ops.slice( + transpose_4, [0], full_int_array_1, full_int_array_3, [1], [0] + ) + + # pd_op.slice: (-1x8x-1x32xf32) <- (3x-1x8x-1x32xf32, 1xi64, 1xi64) + slice_6 = paddle._C_ops.slice( + transpose_4, [0], full_int_array_3, full_int_array_4, [1], [0] + ) + + # pd_op.transpose: (-1x8x32x-1xf32) <- (-1x8x-1x32xf32) + transpose_5 = paddle._C_ops.transpose(slice_5, [0, 1, 3, 2]) + del slice_5 + + # pd_op.matmul: (-1x8x-1x-1xf32) <- (-1x8x-1x32xf32, -1x8x32x-1xf32) + matmul_7 = paddle._C_ops.matmul(scale_1, transpose_5, False, False) + + # pd_op.softmax: (-1x8x-1x-1xf32) <- (-1x8x-1x-1xf32) + softmax_1 = paddle._C_ops.softmax(matmul_7, -1) + del matmul_7 + + # pd_op.dropout: (-1x8x-1x-1xf32, -1x8x-1x-1xui8) <- (-1x8x-1x-1xf32, None, 1xf32) + dropout_8, dropout_9 = (lambda x, f: f(x))( + paddle._C_ops.dropout( + softmax_1, None, full_1, False, "upscale_in_train", 0, False + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None), + ) + + # pd_op.matmul: (-1x8x-1x32xf32) <- (-1x8x-1x-1xf32, -1x8x-1x32xf32) + matmul_8 = paddle._C_ops.matmul(dropout_8, slice_6, False, False) + + # pd_op.transpose: (-1x-1x8x32xf32) <- (-1x8x-1x32xf32) + transpose_6 = paddle._C_ops.transpose(matmul_8, [0, 2, 1, 3]) + del matmul_8 + + # pd_op.reshape: (-1x-1x256xf32) <- (-1x-1x8x32xf32, 3xi64) + reshape_3 = paddle._C_ops.reshape(transpose_6, full_int_array_5) + del full_int_array_5 + + # pd_op.matmul: (-1x-1x256xf32) <- (-1x-1x256xf32, 256x256xf32) + matmul_9 = paddle._C_ops.matmul(reshape_3, parameter_26, False, False) + del parameter_26 + + # pd_op.add: (-1x-1x256xf32) <- (-1x-1x256xf32, 256xf32) + add_8 = paddle._C_ops.add(matmul_9, parameter_25) + del parameter_25 + + # pd_op.dropout: (-1x-1x256xf32, -1x-1x256xui8) <- (-1x-1x256xf32, None, 1xf32) + dropout_10, dropout_11 = (lambda x, f: f(x))( + paddle._C_ops.dropout( + add_8, None, full_1, False, "upscale_in_train", 0, False + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None), + ) + del add_8 + + # pd_op.add: (-1x40x256xf32) <- (-1x40x256xf32, -1x-1x256xf32) + add_9 = paddle._C_ops.add(add_6, dropout_10) + + # pd_op.layer_norm: (-1x40x256xf32, -1x40xf32, -1x40xf32) <- (-1x40x256xf32, 256xf32, 256xf32) + layer_norm_9, layer_norm_10, layer_norm_11 = (lambda x, f: f(x))( + paddle._C_ops.layer_norm( + add_9, parameter_24, parameter_23, float("1e-05"), 2 + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None, None), + ) + del parameter_23, parameter_24 + + # pd_op.matmul: (-1x40x512xf32) <- (-1x40x256xf32, 256x512xf32) + matmul_10 = paddle._C_ops.matmul(layer_norm_9, parameter_22, False, False) + del parameter_22 + + # pd_op.add: (-1x40x512xf32) <- (-1x40x512xf32, 512xf32) + add_10 = paddle._C_ops.add(matmul_10, parameter_21) + del parameter_21 + + # pd_op.swish: (-1x40x512xf32) <- (-1x40x512xf32) + swish_3 = paddle._C_ops.swish(add_10) + + # pd_op.dropout: (-1x40x512xf32, -1x40x512xui8) <- (-1x40x512xf32, None, 1xf32) + dropout_12, dropout_13 = (lambda x, f: f(x))( + paddle._C_ops.dropout( + swish_3, None, full_1, False, "upscale_in_train", 0, False + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None), + ) + del swish_3 + + # pd_op.matmul: (-1x40x256xf32) <- (-1x40x512xf32, 512x256xf32) + matmul_11 = paddle._C_ops.matmul(dropout_12, parameter_20, False, False) + del parameter_20 + + # pd_op.add: (-1x40x256xf32) <- (-1x40x256xf32, 256xf32) + add_11 = paddle._C_ops.add(matmul_11, parameter_19) + del parameter_19 + + # pd_op.dropout: (-1x40x256xf32, -1x40x256xui8) <- (-1x40x256xf32, None, 1xf32) + dropout_14, dropout_15 = (lambda x, f: f(x))( + paddle._C_ops.dropout( + add_11, None, full_1, False, "upscale_in_train", 0, False + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None), + ) + del add_11 + + # pd_op.add: (-1x40x256xf32) <- (-1x40x256xf32, -1x40x256xf32) + add_12 = paddle._C_ops.add(add_9, dropout_14) + + # pd_op.layer_norm: (-1x40x256xf32, -1x40xf32, -1x40xf32) <- (-1x40x256xf32, 256xf32, 256xf32) + layer_norm_12, layer_norm_13, layer_norm_14 = (lambda x, f: f(x))( + paddle._C_ops.layer_norm( + add_12, parameter_18, parameter_17, float("1e-06"), 2 + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None, None), + ) + del parameter_17, parameter_18 + + # pd_op.full_int_array: (4xi64) <- () + full_int_array_6 = [0, 1, 40, 256] + + # pd_op.reshape: (-1x1x40x256xf32) <- (-1x40x256xf32, 4xi64) + reshape_4 = paddle._C_ops.reshape(layer_norm_12, full_int_array_6) + del full_int_array_6 + + # pd_op.transpose: (-1x256x1x40xf32) <- (-1x1x40x256xf32) + transpose_7 = paddle._C_ops.transpose(reshape_4, [0, 3, 1, 2]) + del reshape_4 + + # pd_op.conv2d: (-1x384x1x40xf32) <- (-1x256x1x40xf32, 384x256x1x1xf32) + conv2d_2 = paddle._C_ops.conv2d( + transpose_7, parameter_16, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_16 + + # pd_op.batch_norm_: (-1x384x1x40xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (-1x384x1x40xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__12, + batch_norm__13, + batch_norm__14, + batch_norm__15, + batch_norm__16, + batch_norm__17, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_2, + parameter_15, + parameter_14, + parameter_13, + parameter_12, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_12, parameter_13, parameter_14, parameter_15 + + # pd_op.swish: (-1x384x1x40xf32) <- (-1x384x1x40xf32) + swish_4 = paddle._C_ops.swish(batch_norm__12) + + # pd_op.full: (1xi32) <- () + full_2 = paddle._C_ops.full( + [1], float("1"), paddle.int32, paddle.core.CPUPlace() + ) + + # builtin.combine: ([-1x384x1x40xf32, -1x384x1x40xf32]) <- (-1x384x1x40xf32, -1x384x1x40xf32) + combine_0 = [assign_0, swish_4] + + # pd_op.concat: (-1x768x1x40xf32) <- ([-1x384x1x40xf32, -1x384x1x40xf32], 1xi32) + concat_0 = paddle._C_ops.concat(combine_0, full_2) + del combine_0 + + # pd_op.conv2d: (-1x48x1x40xf32) <- (-1x768x1x40xf32, 48x768x1x3xf32) + conv2d_3 = paddle._C_ops.conv2d( + concat_0, parameter_11, [1, 1], [0, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_11 + + # pd_op.batch_norm_: (-1x48x1x40xf32, 48xf32, 48xf32, 48xf32, 48xf32, -1xui8) <- (-1x48x1x40xf32, 48xf32, 48xf32, 48xf32, 48xf32) + ( + batch_norm__18, + batch_norm__19, + batch_norm__20, + batch_norm__21, + batch_norm__22, + batch_norm__23, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_3, + parameter_10, + parameter_9, + parameter_8, + parameter_7, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_10, parameter_7, parameter_8, parameter_9 + + # pd_op.swish: (-1x48x1x40xf32) <- (-1x48x1x40xf32) + swish_5 = paddle._C_ops.swish(batch_norm__18) + + # pd_op.conv2d: (-1x256x1x40xf32) <- (-1x48x1x40xf32, 256x48x1x1xf32) + conv2d_4 = paddle._C_ops.conv2d( + swish_5, parameter_6, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_6 + + # pd_op.batch_norm_: (-1x256x1x40xf32, 256xf32, 256xf32, 256xf32, 256xf32, -1xui8) <- (-1x256x1x40xf32, 256xf32, 256xf32, 256xf32, 256xf32) + ( + batch_norm__24, + batch_norm__25, + batch_norm__26, + batch_norm__27, + batch_norm__28, + batch_norm__29, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_4, + parameter_5, + parameter_4, + parameter_3, + parameter_2, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_2, parameter_3, parameter_4, parameter_5 + + # pd_op.swish: (-1x256x1x40xf32) <- (-1x256x1x40xf32) + swish_6 = paddle._C_ops.swish(batch_norm__24) + + # pd_op.shape64: (4xi64) <- (-1x256x1x40xf32) + shape64_1 = paddle._C_ops.shape64(swish_6) + + # pd_op.slice: (xi64) <- (4xi64, 1xi64, 1xi64) + slice_7 = paddle._C_ops.slice( + shape64_1, [0], full_int_array_0, full_int_array_1, [1], [0] + ) + del full_int_array_0, full_int_array_1, shape64_1 + + # pd_op.squeeze: (-1x256x40xf32) <- (-1x256x1x40xf32, 1xi64) + squeeze_0 = paddle._C_ops.squeeze(swish_6, full_int_array_3) + + # pd_op.transpose: (-1x40x256xf32) <- (-1x256x40xf32) + transpose_8 = paddle._C_ops.transpose(squeeze_0, [0, 2, 1]) + del squeeze_0 + + # pd_op.matmul: (-1x40x6625xf32) <- (-1x40x256xf32, 256x6625xf32) + matmul_12 = paddle._C_ops.matmul(transpose_8, parameter_1, False, False) + del parameter_1 + + # pd_op.add: (-1x40x6625xf32) <- (-1x40x6625xf32, 6625xf32) + add_0 = paddle._C_ops.add(matmul_12, parameter_0) + del ( + add_1, + add_10, + add_12, + add_3, + add_4, + add_6, + add_7, + add_9, + assign_0, + assign_1, + assign_10, + assign_11, + assign_12, + assign_13, + assign_14, + assign_15, + assign_16, + assign_17, + assign_18, + assign_19, + assign_2, + assign_3, + assign_4, + assign_5, + assign_6, + assign_7, + assign_8, + assign_9, + batch_norm__0, + batch_norm__1, + batch_norm__10, + batch_norm__11, + batch_norm__12, + batch_norm__13, + batch_norm__14, + batch_norm__15, + batch_norm__16, + batch_norm__17, + batch_norm__18, + batch_norm__19, + batch_norm__2, + batch_norm__20, + batch_norm__21, + batch_norm__22, + batch_norm__23, + batch_norm__24, + batch_norm__25, + batch_norm__26, + batch_norm__27, + batch_norm__28, + batch_norm__29, + batch_norm__3, + batch_norm__4, + batch_norm__5, + batch_norm__6, + batch_norm__7, + batch_norm__8, + batch_norm__9, + concat_0, + conv2d_0, + conv2d_1, + conv2d_2, + conv2d_3, + conv2d_4, + dropout_0, + dropout_1, + dropout_10, + dropout_11, + dropout_12, + dropout_13, + dropout_14, + dropout_15, + dropout_2, + dropout_3, + dropout_4, + dropout_5, + dropout_6, + dropout_7, + dropout_8, + dropout_9, + full_0, + full_1, + full_2, + full_int_array_3, + full_int_array_4, + layer_norm_0, + layer_norm_1, + layer_norm_10, + layer_norm_11, + layer_norm_12, + layer_norm_13, + layer_norm_14, + layer_norm_2, + layer_norm_3, + layer_norm_4, + layer_norm_5, + layer_norm_6, + layer_norm_7, + layer_norm_8, + layer_norm_9, + matmul_0, + matmul_10, + matmul_11, + matmul_12, + matmul_3, + matmul_4, + matmul_5, + matmul_6, + matmul_9, + parameter_0, + reshape_1, + reshape_3, + scale_0, + scale_1, + slice_3, + slice_6, + softmax_0, + softmax_1, + swish_0, + swish_1, + swish_4, + swish_5, + swish_6, + transpose_0, + transpose_1, + transpose_2, + transpose_3, + transpose_4, + transpose_5, + transpose_6, + transpose_7, + transpose_8, + ) + + return add_0 diff --git a/paddle_samples/PaddleX/ch_SVTRv2_rec/subgraph_10/weight_meta.py b/paddle_samples/PaddleX/ch_SVTRv2_rec/subgraph_10/weight_meta.py new file mode 100644 index 000000000..a2310ee69 --- /dev/null +++ b/paddle_samples/PaddleX/ch_SVTRv2_rec/subgraph_10/weight_meta.py @@ -0,0 +1,565 @@ +class Program_weight_tensor_parameter_0: + name = "parameter_0" + shape = [6625] + dtype = "float32" + min_val = float("-1.46606") + max_val = float("1.53074") + mean = float("-0.244704") + std = float("0.129444") + data = None + + +class Program_weight_tensor_parameter_1: + name = "parameter_1" + shape = [256, 6625] + dtype = "float32" + min_val = float("-0.842937") + max_val = float("0.387786") + mean = float("-0.230259") + std = float("0.0941191") + data = None + + +class Program_weight_tensor_parameter_2: + name = "parameter_2" + shape = [256] + dtype = "float32" + min_val = float("-3.44711") + max_val = float("9.32881") + mean = float("-0.0176235") + std = float("1.09428") + data = None + + +class Program_weight_tensor_parameter_3: + name = "parameter_3" + shape = [256] + dtype = "float32" + min_val = float("1.46858") + max_val = float("4.88491") + mean = float("2.71251") + std = float("0.50832") + data = None + + +class Program_weight_tensor_parameter_4: + name = "parameter_4" + shape = [256] + dtype = "float32" + min_val = float("0.0225336") + max_val = float("0.143379") + mean = float("0.0357366") + std = float("0.0149711") + data = None + + +class Program_weight_tensor_parameter_5: + name = "parameter_5" + shape = [256] + dtype = "float32" + min_val = float("-1.63944") + max_val = float("1.06631") + mean = float("0.232719") + std = float("0.393577") + data = None + + +class Program_weight_tensor_parameter_6: + name = "parameter_6" + shape = [256, 48, 1, 1] + dtype = "float32" + min_val = float("-0.346652") + max_val = float("0.199764") + mean = float("0.00135982") + std = float("0.0454835") + data = None + + +class Program_weight_tensor_parameter_7: + name = "parameter_7" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_8: + name = "parameter_8" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_9: + name = "parameter_9" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_10: + name = "parameter_10" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_11: + name = "parameter_11" + shape = [48, 768, 1, 3] + dtype = "float32" + min_val = float("-0.411824") + max_val = float("0.447639") + mean = float("0.000202113") + std = float("0.0461906") + data = None + + +class Program_weight_tensor_parameter_12: + name = "parameter_12" + shape = [384] + dtype = "float32" + min_val = float("-2.40135") + max_val = float("0.0933204") + mean = float("-1.07436") + std = float("0.346628") + data = None + + +class Program_weight_tensor_parameter_13: + name = "parameter_13" + shape = [384] + dtype = "float32" + min_val = float("0.392586") + max_val = float("2.96008") + mean = float("0.795887") + std = float("0.220098") + data = None + + +class Program_weight_tensor_parameter_14: + name = "parameter_14" + shape = [384] + dtype = "float32" + min_val = float("0.185625") + max_val = float("2.85175") + mean = float("0.414939") + std = float("0.201393") + data = None + + +class Program_weight_tensor_parameter_15: + name = "parameter_15" + shape = [384] + dtype = "float32" + min_val = float("-1.33882") + max_val = float("2.08143") + mean = float("0.166697") + std = float("0.557162") + data = None + + +class Program_weight_tensor_parameter_16: + name = "parameter_16" + shape = [384, 256, 1, 1] + dtype = "float32" + min_val = float("-0.334565") + max_val = float("0.246529") + mean = float("0.000726154") + std = float("0.0450263") + data = None + + +class Program_weight_tensor_parameter_17: + name = "parameter_17" + shape = [256] + dtype = "float32" + min_val = float("-1.32656") + max_val = float("1.39396") + mean = float("0.00907315") + std = float("0.384049") + data = None + + +class Program_weight_tensor_parameter_18: + name = "parameter_18" + shape = [256] + dtype = "float32" + min_val = float("0.24478") + max_val = float("2.11884") + mean = float("1.17906") + std = float("0.297452") + data = None + + +class Program_weight_tensor_parameter_19: + name = "parameter_19" + shape = [256] + dtype = "float32" + min_val = float("-2.38568") + max_val = float("2.51877") + mean = float("-0.0175149") + std = float("0.463899") + data = None + + +class Program_weight_tensor_parameter_20: + name = "parameter_20" + shape = [512, 256] + dtype = "float32" + min_val = float("-0.770555") + max_val = float("0.653487") + mean = float("0.00010081") + std = float("0.0694743") + data = None + + +class Program_weight_tensor_parameter_21: + name = "parameter_21" + shape = [512] + dtype = "float32" + min_val = float("-1.72071") + max_val = float("-0.0488819") + mean = float("-0.690943") + std = float("0.191943") + data = None + + +class Program_weight_tensor_parameter_22: + name = "parameter_22" + shape = [256, 512] + dtype = "float32" + min_val = float("-0.486813") + max_val = float("0.349136") + mean = float("-0.0109179") + std = float("0.0585589") + data = None + + +class Program_weight_tensor_parameter_23: + name = "parameter_23" + shape = [256] + dtype = "float32" + min_val = float("-0.830129") + max_val = float("3.54568") + mean = float("0.401418") + std = float("0.553377") + data = None + + +class Program_weight_tensor_parameter_24: + name = "parameter_24" + shape = [256] + dtype = "float32" + min_val = float("0.515229") + max_val = float("2.65418") + mean = float("1.58716") + std = float("0.339216") + data = None + + +class Program_weight_tensor_parameter_25: + name = "parameter_25" + shape = [256] + dtype = "float32" + min_val = float("-0.910073") + max_val = float("0.803297") + mean = float("0.013143") + std = float("0.180515") + data = None + + +class Program_weight_tensor_parameter_26: + name = "parameter_26" + shape = [256, 256] + dtype = "float32" + min_val = float("-0.336543") + max_val = float("0.384959") + mean = float("3.31584e-05") + std = float("0.0567728") + data = None + + +class Program_weight_tensor_parameter_27: + name = "parameter_27" + shape = [768] + dtype = "float32" + min_val = float("-2.12229") + max_val = float("2.02694") + mean = float("0.0144932") + std = float("0.382514") + data = None + + +class Program_weight_tensor_parameter_28: + name = "parameter_28" + shape = [256, 768] + dtype = "float32" + min_val = float("-0.52327") + max_val = float("0.465244") + mean = float("-3.15509e-05") + std = float("0.0524875") + data = None + + +class Program_weight_tensor_parameter_29: + name = "parameter_29" + shape = [256] + dtype = "float32" + min_val = float("-1.36519") + max_val = float("0.608402") + mean = float("0.0183697") + std = float("0.298894") + data = None + + +class Program_weight_tensor_parameter_30: + name = "parameter_30" + shape = [256] + dtype = "float32" + min_val = float("0.0883802") + max_val = float("1.58715") + mean = float("0.962013") + std = float("0.21906") + data = None + + +class Program_weight_tensor_parameter_31: + name = "parameter_31" + shape = [256] + dtype = "float32" + min_val = float("-1.50972") + max_val = float("0.606858") + mean = float("-0.0166074") + std = float("0.219025") + data = None + + +class Program_weight_tensor_parameter_32: + name = "parameter_32" + shape = [512, 256] + dtype = "float32" + min_val = float("-0.57403") + max_val = float("0.528108") + mean = float("-0.000393892") + std = float("0.0602463") + data = None + + +class Program_weight_tensor_parameter_33: + name = "parameter_33" + shape = [512] + dtype = "float32" + min_val = float("-1.37099") + max_val = float("0.0959241") + mean = float("-0.599216") + std = float("0.309803") + data = None + + +class Program_weight_tensor_parameter_34: + name = "parameter_34" + shape = [256, 512] + dtype = "float32" + min_val = float("-0.809903") + max_val = float("0.393094") + mean = float("-0.0150369") + std = float("0.063342") + data = None + + +class Program_weight_tensor_parameter_35: + name = "parameter_35" + shape = [256] + dtype = "float32" + min_val = float("-1.37513") + max_val = float("2.50846") + mean = float("0.558993") + std = float("0.614385") + data = None + + +class Program_weight_tensor_parameter_36: + name = "parameter_36" + shape = [256] + dtype = "float32" + min_val = float("-0.00444758") + max_val = float("2.37752") + mean = float("1.32518") + std = float("0.352321") + data = None + + +class Program_weight_tensor_parameter_37: + name = "parameter_37" + shape = [256] + dtype = "float32" + min_val = float("-0.889222") + max_val = float("0.34839") + mean = float("-0.000576614") + std = float("0.114881") + data = None + + +class Program_weight_tensor_parameter_38: + name = "parameter_38" + shape = [256, 256] + dtype = "float32" + min_val = float("-0.295902") + max_val = float("0.236114") + mean = float("-8.30219e-05") + std = float("0.0511669") + data = None + + +class Program_weight_tensor_parameter_39: + name = "parameter_39" + shape = [768] + dtype = "float32" + min_val = float("-1.65431") + max_val = float("1.70769") + mean = float("-0.00397458") + std = float("0.42148") + data = None + + +class Program_weight_tensor_parameter_40: + name = "parameter_40" + shape = [256, 768] + dtype = "float32" + min_val = float("-0.330457") + max_val = float("0.458014") + mean = float("6.30045e-05") + std = float("0.0509823") + data = None + + +class Program_weight_tensor_parameter_41: + name = "parameter_41" + shape = [256] + dtype = "float32" + min_val = float("-0.627416") + max_val = float("0.45842") + mean = float("0.0475874") + std = float("0.164939") + data = None + + +class Program_weight_tensor_parameter_42: + name = "parameter_42" + shape = [256] + dtype = "float32" + min_val = float("-0.239407") + max_val = float("1.06255") + mean = float("0.472467") + std = float("0.228607") + data = None + + +class Program_weight_tensor_parameter_43: + name = "parameter_43" + shape = [256] + dtype = "float32" + min_val = float("-1.8788") + max_val = float("2.96198") + mean = float("0.0198295") + std = float("0.837533") + data = None + + +class Program_weight_tensor_parameter_44: + name = "parameter_44" + shape = [256] + dtype = "float32" + min_val = float("0.545192") + max_val = float("5.24688") + mean = float("1.85214") + std = float("0.675565") + data = None + + +class Program_weight_tensor_parameter_45: + name = "parameter_45" + shape = [256] + dtype = "float32" + min_val = float("0.0283431") + max_val = float("0.933237") + mean = float("0.0941559") + std = float("0.118807") + data = None + + +class Program_weight_tensor_parameter_46: + name = "parameter_46" + shape = [256] + dtype = "float32" + min_val = float("-2.46669") + max_val = float("2.2176") + mean = float("-0.0694887") + std = float("0.825226") + data = None + + +class Program_weight_tensor_parameter_47: + name = "parameter_47" + shape = [256, 48, 1, 1] + dtype = "float32" + min_val = float("-0.397741") + max_val = float("0.601989") + mean = float("-0.000414698") + std = float("0.0453336") + data = None + + +class Program_weight_tensor_parameter_48: + name = "parameter_48" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_49: + name = "parameter_49" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_50: + name = "parameter_50" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_51: + name = "parameter_51" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_52: + name = "parameter_52" + shape = [48, 384, 1, 3] + dtype = "float32" + min_val = float("-0.496082") + max_val = float("0.336987") + mean = float("0.000211926") + std = float("0.0403539") + data = None diff --git a/paddle_samples/PaddleX/ch_SVTRv2_rec/subgraph_11/graph_hash.txt b/paddle_samples/PaddleX/ch_SVTRv2_rec/subgraph_11/graph_hash.txt new file mode 100644 index 000000000..192c53245 --- /dev/null +++ b/paddle_samples/PaddleX/ch_SVTRv2_rec/subgraph_11/graph_hash.txt @@ -0,0 +1 @@ +ea6c0f8c5a7a0f0dda4c8ff3c1fab4d0c43521b6e0eb52f0ce940ddde0178ca3 \ No newline at end of file diff --git a/paddle_samples/PaddleX/ch_SVTRv2_rec/subgraph_11/graph_net.json b/paddle_samples/PaddleX/ch_SVTRv2_rec/subgraph_11/graph_net.json new file mode 100644 index 000000000..4e1cc3c0d --- /dev/null +++ b/paddle_samples/PaddleX/ch_SVTRv2_rec/subgraph_11/graph_net.json @@ -0,0 +1,6 @@ +{ + "framework": "paddle", + "model_name": "ch_SVTRv2_rec", + "num_devices_required": 1, + "num_nodes_required": 1 +} \ No newline at end of file diff --git a/paddle_samples/PaddleX/ch_SVTRv2_rec/subgraph_11/input_meta.py b/paddle_samples/PaddleX/ch_SVTRv2_rec/subgraph_11/input_meta.py new file mode 100644 index 000000000..4493a6925 --- /dev/null +++ b/paddle_samples/PaddleX/ch_SVTRv2_rec/subgraph_11/input_meta.py @@ -0,0 +1,30 @@ +class Program_weight_tensor_data_0: + name = "data_0" + shape = [] + dtype = "int64" + data = [4] + + +class Program_weight_tensor_data_1: + name = "data_1" + shape = [] + dtype = "int64" + data = [256] + + +class Program_weight_tensor_data_2: + name = "data_2" + shape = [] + dtype = "int64" + data = [4] + + +class Program_weight_tensor_data_3: + name = "data_3" + shape = [8, 256, 4, 80] + dtype = "float32" + min_val = float("-5.68937") + max_val = float("6.58558") + mean = float("0.00427492") + std = float("0.459027") + data = None diff --git a/paddle_samples/PaddleX/ch_SVTRv2_rec/subgraph_11/model.py b/paddle_samples/PaddleX/ch_SVTRv2_rec/subgraph_11/model.py new file mode 100644 index 000000000..cae5c8ecd --- /dev/null +++ b/paddle_samples/PaddleX/ch_SVTRv2_rec/subgraph_11/model.py @@ -0,0 +1,1846 @@ +import paddle + + +class GraphModule(paddle.nn.Layer): + def __init__(self): + super().__init__() + + def forward( + self, + parameter_0, + parameter_1, + parameter_2, + parameter_3, + parameter_4, + parameter_5, + parameter_6, + parameter_7, + parameter_8, + parameter_9, + parameter_10, + parameter_11, + parameter_12, + parameter_13, + parameter_14, + parameter_15, + parameter_16, + parameter_17, + parameter_18, + parameter_19, + parameter_20, + parameter_21, + parameter_22, + parameter_23, + parameter_24, + parameter_25, + parameter_26, + parameter_27, + parameter_28, + parameter_29, + parameter_30, + parameter_31, + parameter_32, + parameter_33, + parameter_34, + parameter_35, + parameter_36, + parameter_37, + parameter_38, + parameter_39, + parameter_40, + parameter_41, + parameter_42, + parameter_43, + parameter_44, + parameter_45, + parameter_46, + parameter_47, + parameter_48, + parameter_49, + parameter_50, + parameter_51, + parameter_52, + parameter_53, + parameter_54, + parameter_55, + parameter_56, + parameter_57, + parameter_58, + parameter_59, + parameter_60, + parameter_61, + parameter_62, + parameter_63, + parameter_64, + parameter_65, + parameter_66, + parameter_67, + parameter_68, + parameter_69, + parameter_70, + parameter_71, + data_0, + data_1, + data_2, + data_3, + ): + # pd_op.conv2d: (8x256x-1x80xf32) <- (8x-1x-1x80xf32, 256x32x5x5xf32) + conv2d_0 = paddle._C_ops.conv2d( + data_3, parameter_71, [1, 1], [2, 2], "EXPLICIT", [1, 1], 8, "NCHW" + ) + del parameter_71 + + # pd_op.full_int_array: (4xi64) <- () + full_int_array_0 = [1, -1, 1, 1] + + # pd_op.reshape: (1x256x1x1xf32) <- (256xf32, 4xi64) + reshape_0 = paddle._C_ops.reshape(parameter_70, full_int_array_0) + del parameter_70 + + # pd_op.add: (8x256x-1x80xf32) <- (8x256x-1x80xf32, 1x256x1x1xf32) + add_0 = paddle._C_ops.add(conv2d_0, reshape_0) + + # pd_op.full: (xf64) <- () + full_0 = paddle._C_ops.full( + [], float("0"), paddle.float64, paddle.framework._current_expected_place() + ) + + # pd_op.assign_value_: (xf64) <- (xf64) + assign_value__0 = paddle._C_ops.assign_value_( + full_0, + [], + paddle.float64, + [float("0.964706")], + paddle.framework._current_expected_place(), + ) + del full_0 + + # pd_op.cast: (xf32) <- (xf64) + cast_0 = paddle._C_ops.cast(assign_value__0, paddle.float32) + del assign_value__0 + + # pd_op.shape64: (4xi64) <- (8x256x-1x80xf32) + shape64_0 = paddle._C_ops.shape64(add_0) + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_1 = [0] + + # pd_op.assign: (1xi64) <- (1xi64) + assign_0 = full_int_array_1 + + # pd_op.assign: (1xi64) <- (1xi64) + assign_1 = full_int_array_1 + + # pd_op.assign: (1xi64) <- (1xi64) + assign_2 = full_int_array_1 + + # pd_op.assign: (1xi64) <- (1xi64) + assign_3 = full_int_array_1 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_2 = [1] + + # pd_op.assign: (1xi64) <- (1xi64) + assign_4 = full_int_array_2 + + # pd_op.assign: (1xi64) <- (1xi64) + assign_5 = full_int_array_2 + + # pd_op.assign: (1xi64) <- (1xi64) + assign_6 = full_int_array_2 + + # pd_op.assign: (1xi64) <- (1xi64) + assign_7 = full_int_array_2 + + # pd_op.assign: (1xi64) <- (1xi64) + assign_8 = full_int_array_2 + + # pd_op.assign: (1xi64) <- (1xi64) + assign_9 = full_int_array_2 + + # pd_op.assign: (1xi64) <- (1xi64) + assign_10 = full_int_array_2 + + # pd_op.assign: (1xi64) <- (1xi64) + assign_11 = full_int_array_2 + + # pd_op.slice: (xi64) <- (4xi64, 1xi64, 1xi64) + slice_1 = paddle._C_ops.slice( + shape64_0, [0], full_int_array_1, full_int_array_2, [1], [0] + ) + del shape64_0 + + # pd_op.full: (xi64) <- () + full_1 = paddle._C_ops.full( + [], float("1"), paddle.int64, paddle.core.CPUPlace() + ) + + # builtin.combine: ([xi64, xi64, xi64, xi64]) <- (xi64, xi64, xi64, xi64) + combine_0 = [slice_1, full_1, full_1, full_1] + del slice_1 + + # pd_op.stack: (4xi64) <- ([xi64, xi64, xi64, xi64]) + stack_0 = paddle._C_ops.stack(combine_0, 0) + del combine_0 + + # pd_op.full: (1xf32) <- () + full_2 = paddle._C_ops.full( + [1], float("0"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.full: (1xf32) <- () + full_3 = paddle._C_ops.full( + [1], float("1"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.uniform: (-1x1x1x1xf32) <- (4xi64, 1xf32, 1xf32) + uniform_0 = paddle._C_ops.uniform( + stack_0, + paddle.float32, + full_2, + full_3, + 0, + paddle.framework._current_expected_place(), + ) + del stack_0 + + # pd_op.add: (-1x1x1x1xf32) <- (xf32, -1x1x1x1xf32) + add_1 = paddle._C_ops.add(cast_0, uniform_0) + del uniform_0 + + # pd_op.floor: (-1x1x1x1xf32) <- (-1x1x1x1xf32) + floor_0 = paddle._C_ops.floor(add_1) + del add_1 + + # pd_op.divide: (8x256x-1x80xf32) <- (8x256x-1x80xf32, xf32) + divide_0 = paddle._C_ops.divide(add_0, cast_0) + + # pd_op.multiply: (8x256x-1x80xf32) <- (8x256x-1x80xf32, -1x1x1x1xf32) + multiply_0 = paddle._C_ops.multiply(divide_0, floor_0) + + # pd_op.add: (8x256x-1x80xf32) <- (8x-1x-1x80xf32, 8x256x-1x80xf32) + add_2 = paddle._C_ops.add(data_3, multiply_0) + del data_3 + + # pd_op.flatten: (8x256x-1xf32) <- (8x256x-1x80xf32) + flatten_0 = paddle._C_ops.flatten(add_2, 2, 3) + + # pd_op.transpose: (8x-1x256xf32) <- (8x256x-1xf32) + transpose_0 = paddle._C_ops.transpose(flatten_0, [0, 2, 1]) + del flatten_0 + + # pd_op.layer_norm: (8x-1x256xf32, 8x-1xf32, 8x-1xf32) <- (8x-1x256xf32, 256xf32, 256xf32) + layer_norm_1, layer_norm_2, layer_norm_3 = (lambda x, f: f(x))( + paddle._C_ops.layer_norm( + transpose_0, parameter_69, parameter_68, float("1e-06"), 2 + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None, None), + ) + del parameter_68, parameter_69 + + # pd_op.matmul: (8x-1x1024xf32) <- (8x-1x256xf32, 256x1024xf32) + matmul_0 = paddle._C_ops.matmul(layer_norm_1, parameter_67, False, False) + del parameter_67 + + # pd_op.add: (8x-1x1024xf32) <- (8x-1x1024xf32, 1024xf32) + add_3 = paddle._C_ops.add(matmul_0, parameter_66) + del parameter_66 + + # pd_op.gelu: (8x-1x1024xf32) <- (8x-1x1024xf32) + gelu_0 = paddle._C_ops.gelu(add_3, False) + + # pd_op.matmul: (8x-1x256xf32) <- (8x-1x1024xf32, 1024x256xf32) + matmul_1 = paddle._C_ops.matmul(gelu_0, parameter_65, False, False) + del parameter_65 + + # pd_op.add: (8x-1x256xf32) <- (8x-1x256xf32, 256xf32) + add_4 = paddle._C_ops.add(matmul_1, parameter_64) + del parameter_64 + + # pd_op.full: (xf64) <- () + full_4 = paddle._C_ops.full( + [], float("0"), paddle.float64, paddle.framework._current_expected_place() + ) + + # pd_op.assign_value_: (xf64) <- (xf64) + assign_value__1 = paddle._C_ops.assign_value_( + full_4, + [], + paddle.float64, + [float("0.964706")], + paddle.framework._current_expected_place(), + ) + del full_4 + + # pd_op.cast: (xf32) <- (xf64) + cast_1 = paddle._C_ops.cast(assign_value__1, paddle.float32) + del assign_value__1 + + # pd_op.shape64: (3xi64) <- (8x-1x256xf32) + shape64_1 = paddle._C_ops.shape64(add_4) + + # pd_op.slice: (xi64) <- (3xi64, 1xi64, 1xi64) + slice_2 = paddle._C_ops.slice( + shape64_1, [0], full_int_array_1, full_int_array_2, [1], [0] + ) + del shape64_1 + + # builtin.combine: ([xi64, xi64, xi64]) <- (xi64, xi64, xi64) + combine_1 = [slice_2, full_1, full_1] + del slice_2 + + # pd_op.stack: (3xi64) <- ([xi64, xi64, xi64]) + stack_1 = paddle._C_ops.stack(combine_1, 0) + del combine_1 + + # pd_op.uniform: (-1x1x1xf32) <- (3xi64, 1xf32, 1xf32) + uniform_1 = paddle._C_ops.uniform( + stack_1, + paddle.float32, + full_2, + full_3, + 0, + paddle.framework._current_expected_place(), + ) + del stack_1 + + # pd_op.add: (-1x1x1xf32) <- (xf32, -1x1x1xf32) + add_5 = paddle._C_ops.add(cast_1, uniform_1) + del uniform_1 + + # pd_op.floor: (-1x1x1xf32) <- (-1x1x1xf32) + floor_1 = paddle._C_ops.floor(add_5) + del add_5 + + # pd_op.divide: (8x-1x256xf32) <- (8x-1x256xf32, xf32) + divide_1 = paddle._C_ops.divide(add_4, cast_1) + + # pd_op.multiply: (8x-1x256xf32) <- (8x-1x256xf32, -1x1x1xf32) + multiply_1 = paddle._C_ops.multiply(divide_1, floor_1) + + # pd_op.add: (8x-1x256xf32) <- (8x-1x256xf32, 8x-1x256xf32) + add_6 = paddle._C_ops.add(layer_norm_1, multiply_1) + + # pd_op.layer_norm: (8x-1x256xf32, 8x-1xf32, 8x-1xf32) <- (8x-1x256xf32, 256xf32, 256xf32) + layer_norm_4, layer_norm_5, layer_norm_6 = (lambda x, f: f(x))( + paddle._C_ops.layer_norm( + add_6, parameter_63, parameter_62, float("1e-06"), 2 + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None, None), + ) + del parameter_62, parameter_63 + + # pd_op.transpose: (8x256x-1xf32) <- (8x-1x256xf32) + transpose_1 = paddle._C_ops.transpose(layer_norm_4, [0, 2, 1]) + del layer_norm_4 + + # pd_op.full: (xi64) <- () + full_5 = paddle._C_ops.full( + [], float("0"), paddle.int64, paddle.core.CPUPlace() + ) + + # pd_op.full: (xi64) <- () + full_6 = paddle._C_ops.full( + [], float("80"), paddle.int64, paddle.core.CPUPlace() + ) + + # builtin.combine: ([xi64, xi64, xi64, xi64]) <- (xi64, xi64, xi64, xi64) + combine_2 = [full_5, data_1, data_2, full_6] + del data_1, data_2 + + # pd_op.stack: (4xi64) <- ([xi64, xi64, xi64, xi64]) + stack_2 = paddle._C_ops.stack(combine_2, 0) + del combine_2 + + # pd_op.reshape: (8x-1x-1x80xf32) <- (8x256x-1xf32, 4xi64) + reshape_1 = paddle._C_ops.reshape(transpose_1, stack_2) + del stack_2 + + # pd_op.shape64: (4xi64) <- (8x-1x-1x80xf32) + shape64_2 = paddle._C_ops.shape64(reshape_1) + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_3 = [2] + + # pd_op.assign: (1xi64) <- (1xi64) + assign_12 = full_int_array_3 + + # pd_op.assign: (1xi64) <- (1xi64) + assign_13 = full_int_array_3 + + # pd_op.assign: (1xi64) <- (1xi64) + assign_14 = full_int_array_3 + + # pd_op.assign: (1xi64) <- (1xi64) + assign_15 = full_int_array_3 + + # pd_op.assign: (1xi64) <- (1xi64) + assign_16 = full_int_array_3 + + # pd_op.assign: (1xi64) <- (1xi64) + assign_17 = full_int_array_3 + + # pd_op.assign: (1xi64) <- (1xi64) + assign_18 = full_int_array_3 + + # pd_op.assign: (1xi64) <- (1xi64) + assign_19 = full_int_array_3 + + # pd_op.slice: (xi64) <- (4xi64, 1xi64, 1xi64) + slice_3 = paddle._C_ops.slice( + shape64_2, [0], full_int_array_2, full_int_array_3, [1], [0] + ) + del shape64_2 + + # pd_op.shape64: (4xi64) <- (8x-1x-1x80xf32) + shape64_3 = paddle._C_ops.shape64(reshape_1) + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_4 = [3] + + # pd_op.assign: (1xi64) <- (1xi64) + assign_20 = full_int_array_4 + + # pd_op.assign: (1xi64) <- (1xi64) + assign_21 = full_int_array_4 + + # pd_op.assign: (1xi64) <- (1xi64) + assign_22 = full_int_array_4 + + # pd_op.assign: (1xi64) <- (1xi64) + assign_23 = full_int_array_4 + + # pd_op.slice: (xi64) <- (4xi64, 1xi64, 1xi64) + slice_4 = paddle._C_ops.slice( + shape64_3, [0], full_int_array_3, full_int_array_4, [1], [0] + ) + del shape64_3 + + # pd_op.conv2d: (8x256x-1x80xf32) <- (8x-1x-1x80xf32, 256x32x5x5xf32) + conv2d_1 = paddle._C_ops.conv2d( + reshape_1, parameter_61, [1, 1], [2, 2], "EXPLICIT", [1, 1], 8, "NCHW" + ) + del parameter_61 + + # pd_op.reshape: (1x256x1x1xf32) <- (256xf32, 4xi64) + reshape_2 = paddle._C_ops.reshape(parameter_60, full_int_array_0) + del parameter_60 + + # pd_op.add: (8x256x-1x80xf32) <- (8x256x-1x80xf32, 1x256x1x1xf32) + add_7 = paddle._C_ops.add(conv2d_1, reshape_2) + + # pd_op.full: (xf64) <- () + full_7 = paddle._C_ops.full( + [], float("0"), paddle.float64, paddle.framework._current_expected_place() + ) + + # pd_op.assign_value_: (xf64) <- (xf64) + assign_value__2 = paddle._C_ops.assign_value_( + full_7, + [], + paddle.float64, + [float("0.958824")], + paddle.framework._current_expected_place(), + ) + del full_7 + + # pd_op.cast: (xf32) <- (xf64) + cast_2 = paddle._C_ops.cast(assign_value__2, paddle.float32) + del assign_value__2 + + # pd_op.shape64: (4xi64) <- (8x256x-1x80xf32) + shape64_4 = paddle._C_ops.shape64(add_7) + + # pd_op.slice: (xi64) <- (4xi64, 1xi64, 1xi64) + slice_5 = paddle._C_ops.slice( + shape64_4, [0], full_int_array_1, full_int_array_2, [1], [0] + ) + del shape64_4 + + # builtin.combine: ([xi64, xi64, xi64, xi64]) <- (xi64, xi64, xi64, xi64) + combine_3 = [slice_5, full_1, full_1, full_1] + del slice_5 + + # pd_op.stack: (4xi64) <- ([xi64, xi64, xi64, xi64]) + stack_3 = paddle._C_ops.stack(combine_3, 0) + del combine_3 + + # pd_op.uniform: (-1x1x1x1xf32) <- (4xi64, 1xf32, 1xf32) + uniform_2 = paddle._C_ops.uniform( + stack_3, + paddle.float32, + full_2, + full_3, + 0, + paddle.framework._current_expected_place(), + ) + del stack_3 + + # pd_op.add: (-1x1x1x1xf32) <- (xf32, -1x1x1x1xf32) + add_8 = paddle._C_ops.add(cast_2, uniform_2) + del uniform_2 + + # pd_op.floor: (-1x1x1x1xf32) <- (-1x1x1x1xf32) + floor_2 = paddle._C_ops.floor(add_8) + del add_8 + + # pd_op.divide: (8x256x-1x80xf32) <- (8x256x-1x80xf32, xf32) + divide_2 = paddle._C_ops.divide(add_7, cast_2) + + # pd_op.multiply: (8x256x-1x80xf32) <- (8x256x-1x80xf32, -1x1x1x1xf32) + multiply_2 = paddle._C_ops.multiply(divide_2, floor_2) + + # pd_op.add: (8x256x-1x80xf32) <- (8x-1x-1x80xf32, 8x256x-1x80xf32) + add_9 = paddle._C_ops.add(reshape_1, multiply_2) + + # pd_op.flatten: (8x256x-1xf32) <- (8x256x-1x80xf32) + flatten_1 = paddle._C_ops.flatten(add_9, 2, 3) + + # pd_op.transpose: (8x-1x256xf32) <- (8x256x-1xf32) + transpose_2 = paddle._C_ops.transpose(flatten_1, [0, 2, 1]) + del flatten_1 + + # pd_op.layer_norm: (8x-1x256xf32, 8x-1xf32, 8x-1xf32) <- (8x-1x256xf32, 256xf32, 256xf32) + layer_norm_7, layer_norm_8, layer_norm_9 = (lambda x, f: f(x))( + paddle._C_ops.layer_norm( + transpose_2, parameter_59, parameter_58, float("1e-06"), 2 + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None, None), + ) + del parameter_58, parameter_59 + + # pd_op.matmul: (8x-1x1024xf32) <- (8x-1x256xf32, 256x1024xf32) + matmul_2 = paddle._C_ops.matmul(layer_norm_7, parameter_57, False, False) + del parameter_57 + + # pd_op.add: (8x-1x1024xf32) <- (8x-1x1024xf32, 1024xf32) + add_10 = paddle._C_ops.add(matmul_2, parameter_56) + del parameter_56 + + # pd_op.gelu: (8x-1x1024xf32) <- (8x-1x1024xf32) + gelu_1 = paddle._C_ops.gelu(add_10, False) + + # pd_op.matmul: (8x-1x256xf32) <- (8x-1x1024xf32, 1024x256xf32) + matmul_3 = paddle._C_ops.matmul(gelu_1, parameter_55, False, False) + del parameter_55 + + # pd_op.add: (8x-1x256xf32) <- (8x-1x256xf32, 256xf32) + add_11 = paddle._C_ops.add(matmul_3, parameter_54) + del parameter_54 + + # pd_op.full: (xf64) <- () + full_8 = paddle._C_ops.full( + [], float("0"), paddle.float64, paddle.framework._current_expected_place() + ) + + # pd_op.assign_value_: (xf64) <- (xf64) + assign_value__3 = paddle._C_ops.assign_value_( + full_8, + [], + paddle.float64, + [float("0.958824")], + paddle.framework._current_expected_place(), + ) + del full_8 + + # pd_op.cast: (xf32) <- (xf64) + cast_3 = paddle._C_ops.cast(assign_value__3, paddle.float32) + del assign_value__3 + + # pd_op.shape64: (3xi64) <- (8x-1x256xf32) + shape64_5 = paddle._C_ops.shape64(add_11) + + # pd_op.slice: (xi64) <- (3xi64, 1xi64, 1xi64) + slice_6 = paddle._C_ops.slice( + shape64_5, [0], full_int_array_1, full_int_array_2, [1], [0] + ) + del shape64_5 + + # builtin.combine: ([xi64, xi64, xi64]) <- (xi64, xi64, xi64) + combine_4 = [slice_6, full_1, full_1] + del slice_6 + + # pd_op.stack: (3xi64) <- ([xi64, xi64, xi64]) + stack_4 = paddle._C_ops.stack(combine_4, 0) + del combine_4 + + # pd_op.uniform: (-1x1x1xf32) <- (3xi64, 1xf32, 1xf32) + uniform_3 = paddle._C_ops.uniform( + stack_4, + paddle.float32, + full_2, + full_3, + 0, + paddle.framework._current_expected_place(), + ) + del stack_4 + + # pd_op.add: (-1x1x1xf32) <- (xf32, -1x1x1xf32) + add_12 = paddle._C_ops.add(cast_3, uniform_3) + del uniform_3 + + # pd_op.floor: (-1x1x1xf32) <- (-1x1x1xf32) + floor_3 = paddle._C_ops.floor(add_12) + del add_12 + + # pd_op.divide: (8x-1x256xf32) <- (8x-1x256xf32, xf32) + divide_3 = paddle._C_ops.divide(add_11, cast_3) + + # pd_op.multiply: (8x-1x256xf32) <- (8x-1x256xf32, -1x1x1xf32) + multiply_3 = paddle._C_ops.multiply(divide_3, floor_3) + + # pd_op.add: (8x-1x256xf32) <- (8x-1x256xf32, 8x-1x256xf32) + add_13 = paddle._C_ops.add(layer_norm_7, multiply_3) + + # pd_op.layer_norm: (8x-1x256xf32, 8x-1xf32, 8x-1xf32) <- (8x-1x256xf32, 256xf32, 256xf32) + layer_norm_10, layer_norm_11, layer_norm_12 = (lambda x, f: f(x))( + paddle._C_ops.layer_norm( + add_13, parameter_53, parameter_52, float("1e-06"), 2 + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None, None), + ) + del parameter_52, parameter_53 + + # pd_op.transpose: (8x256x-1xf32) <- (8x-1x256xf32) + transpose_3 = paddle._C_ops.transpose(layer_norm_10, [0, 2, 1]) + del layer_norm_10 + + # builtin.combine: ([xi64, xi64, xi64, xi64]) <- (xi64, xi64, xi64, xi64) + combine_5 = [full_5, slice_3, slice_4, full_6] + del slice_3, slice_4 + + # pd_op.stack: (4xi64) <- ([xi64, xi64, xi64, xi64]) + stack_5 = paddle._C_ops.stack(combine_5, 0) + del combine_5 + + # pd_op.reshape: (8x-1x-1x80xf32) <- (8x256x-1xf32, 4xi64) + reshape_3 = paddle._C_ops.reshape(transpose_3, stack_5) + del stack_5 + + # pd_op.flatten: (8x-1x-1xf32) <- (8x-1x-1x80xf32) + flatten_2 = paddle._C_ops.flatten(reshape_3, 2, 3) + + # pd_op.transpose: (8x-1x-1xf32) <- (8x-1x-1xf32) + transpose_4 = paddle._C_ops.transpose(flatten_2, [0, 2, 1]) + del flatten_2 + + # pd_op.matmul: (8x-1x768xf32) <- (8x-1x-1xf32, 256x768xf32) + matmul_4 = paddle._C_ops.matmul(transpose_4, parameter_51, False, False) + del parameter_51 + + # pd_op.add: (8x-1x768xf32) <- (8x-1x768xf32, 768xf32) + add_14 = paddle._C_ops.add(matmul_4, parameter_50) + del parameter_50 + + # pd_op.full_int_array: (5xi64) <- () + full_int_array_5 = [0, -1, 3, 8, 32] + + # pd_op.reshape: (8x-1x3x8x32xf32) <- (8x-1x768xf32, 5xi64) + reshape_4 = paddle._C_ops.reshape(add_14, full_int_array_5) + + # pd_op.transpose: (3x8x8x-1x32xf32) <- (8x-1x3x8x32xf32) + transpose_5 = paddle._C_ops.transpose(reshape_4, [2, 0, 3, 1, 4]) + del reshape_4 + + # pd_op.slice: (8x8x-1x32xf32) <- (3x8x8x-1x32xf32, 1xi64, 1xi64) + slice_7 = paddle._C_ops.slice( + transpose_5, [0], full_int_array_1, full_int_array_2, [1], [0] + ) + + # pd_op.slice: (8x8x-1x32xf32) <- (3x8x8x-1x32xf32, 1xi64, 1xi64) + slice_8 = paddle._C_ops.slice( + transpose_5, [0], full_int_array_2, full_int_array_3, [1], [0] + ) + + # pd_op.slice: (8x8x-1x32xf32) <- (3x8x8x-1x32xf32, 1xi64, 1xi64) + slice_9 = paddle._C_ops.slice( + transpose_5, [0], full_int_array_3, full_int_array_4, [1], [0] + ) + + # pd_op.transpose: (8x8x32x-1xf32) <- (8x8x-1x32xf32) + transpose_6 = paddle._C_ops.transpose(slice_8, [0, 1, 3, 2]) + del slice_8 + + # pd_op.matmul: (8x8x-1x-1xf32) <- (8x8x-1x32xf32, 8x8x32x-1xf32) + matmul_5 = paddle._C_ops.matmul(slice_7, transpose_6, False, False) + + # pd_op.full: (1xf32) <- () + full_9 = paddle._C_ops.full( + [1], float("0.176777"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.assign: (1xf32) <- (1xf32) + assign_24 = full_9 + + # pd_op.assign: (1xf32) <- (1xf32) + assign_25 = full_9 + + # pd_op.assign: (1xf32) <- (1xf32) + assign_26 = full_9 + + # pd_op.scale: (8x8x-1x-1xf32) <- (8x8x-1x-1xf32, 1xf32) + scale_0 = paddle._C_ops.scale(matmul_5, full_9, float("0"), True) + del matmul_5 + + # pd_op.softmax: (8x8x-1x-1xf32) <- (8x8x-1x-1xf32) + softmax_0 = paddle._C_ops.softmax(scale_0, -1) + del scale_0 + + # pd_op.matmul: (8x8x-1x32xf32) <- (8x8x-1x-1xf32, 8x8x-1x32xf32) + matmul_6 = paddle._C_ops.matmul(softmax_0, slice_9, False, False) + + # pd_op.transpose: (8x-1x8x32xf32) <- (8x8x-1x32xf32) + transpose_7 = paddle._C_ops.transpose(matmul_6, [0, 2, 1, 3]) + del matmul_6 + + # pd_op.full_int_array: (3xi64) <- () + full_int_array_6 = [0, -1, 256] + + # pd_op.reshape: (8x-1x256xf32) <- (8x-1x8x32xf32, 3xi64) + reshape_5 = paddle._C_ops.reshape(transpose_7, full_int_array_6) + + # pd_op.matmul: (8x-1x256xf32) <- (8x-1x256xf32, 256x256xf32) + matmul_7 = paddle._C_ops.matmul(reshape_5, parameter_49, False, False) + del parameter_49 + + # pd_op.add: (8x-1x256xf32) <- (8x-1x256xf32, 256xf32) + add_15 = paddle._C_ops.add(matmul_7, parameter_48) + del parameter_48 + + # pd_op.full: (xf64) <- () + full_10 = paddle._C_ops.full( + [], float("0"), paddle.float64, paddle.framework._current_expected_place() + ) + + # pd_op.assign_value_: (xf64) <- (xf64) + assign_value__4 = paddle._C_ops.assign_value_( + full_10, + [], + paddle.float64, + [float("0.952941")], + paddle.framework._current_expected_place(), + ) + del full_10 + + # pd_op.cast: (xf32) <- (xf64) + cast_4 = paddle._C_ops.cast(assign_value__4, paddle.float32) + del assign_value__4 + + # pd_op.shape64: (3xi64) <- (8x-1x256xf32) + shape64_6 = paddle._C_ops.shape64(add_15) + + # pd_op.slice: (xi64) <- (3xi64, 1xi64, 1xi64) + slice_10 = paddle._C_ops.slice( + shape64_6, [0], full_int_array_1, full_int_array_2, [1], [0] + ) + del shape64_6 + + # builtin.combine: ([xi64, xi64, xi64]) <- (xi64, xi64, xi64) + combine_6 = [slice_10, full_1, full_1] + del slice_10 + + # pd_op.stack: (3xi64) <- ([xi64, xi64, xi64]) + stack_6 = paddle._C_ops.stack(combine_6, 0) + del combine_6 + + # pd_op.uniform: (-1x1x1xf32) <- (3xi64, 1xf32, 1xf32) + uniform_4 = paddle._C_ops.uniform( + stack_6, + paddle.float32, + full_2, + full_3, + 0, + paddle.framework._current_expected_place(), + ) + del stack_6 + + # pd_op.add: (-1x1x1xf32) <- (xf32, -1x1x1xf32) + add_16 = paddle._C_ops.add(cast_4, uniform_4) + del uniform_4 + + # pd_op.floor: (-1x1x1xf32) <- (-1x1x1xf32) + floor_4 = paddle._C_ops.floor(add_16) + del add_16 + + # pd_op.divide: (8x-1x256xf32) <- (8x-1x256xf32, xf32) + divide_4 = paddle._C_ops.divide(add_15, cast_4) + + # pd_op.multiply: (8x-1x256xf32) <- (8x-1x256xf32, -1x1x1xf32) + multiply_4 = paddle._C_ops.multiply(divide_4, floor_4) + + # pd_op.add: (8x-1x256xf32) <- (8x-1x-1xf32, 8x-1x256xf32) + add_17 = paddle._C_ops.add(transpose_4, multiply_4) + + # pd_op.layer_norm: (8x-1x256xf32, 8x-1xf32, 8x-1xf32) <- (8x-1x256xf32, 256xf32, 256xf32) + layer_norm_13, layer_norm_14, layer_norm_15 = (lambda x, f: f(x))( + paddle._C_ops.layer_norm( + add_17, parameter_47, parameter_46, float("1e-06"), 2 + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None, None), + ) + del parameter_46, parameter_47 + + # pd_op.matmul: (8x-1x1024xf32) <- (8x-1x256xf32, 256x1024xf32) + matmul_8 = paddle._C_ops.matmul(layer_norm_13, parameter_45, False, False) + del parameter_45 + + # pd_op.add: (8x-1x1024xf32) <- (8x-1x1024xf32, 1024xf32) + add_18 = paddle._C_ops.add(matmul_8, parameter_44) + del parameter_44 + + # pd_op.gelu: (8x-1x1024xf32) <- (8x-1x1024xf32) + gelu_2 = paddle._C_ops.gelu(add_18, False) + + # pd_op.matmul: (8x-1x256xf32) <- (8x-1x1024xf32, 1024x256xf32) + matmul_9 = paddle._C_ops.matmul(gelu_2, parameter_43, False, False) + del parameter_43 + + # pd_op.add: (8x-1x256xf32) <- (8x-1x256xf32, 256xf32) + add_19 = paddle._C_ops.add(matmul_9, parameter_42) + del parameter_42 + + # pd_op.full: (xf64) <- () + full_11 = paddle._C_ops.full( + [], float("0"), paddle.float64, paddle.framework._current_expected_place() + ) + + # pd_op.assign_value_: (xf64) <- (xf64) + assign_value__5 = paddle._C_ops.assign_value_( + full_11, + [], + paddle.float64, + [float("0.952941")], + paddle.framework._current_expected_place(), + ) + del full_11 + + # pd_op.cast: (xf32) <- (xf64) + cast_5 = paddle._C_ops.cast(assign_value__5, paddle.float32) + del assign_value__5 + + # pd_op.shape64: (3xi64) <- (8x-1x256xf32) + shape64_7 = paddle._C_ops.shape64(add_19) + + # pd_op.slice: (xi64) <- (3xi64, 1xi64, 1xi64) + slice_11 = paddle._C_ops.slice( + shape64_7, [0], full_int_array_1, full_int_array_2, [1], [0] + ) + del shape64_7 + + # builtin.combine: ([xi64, xi64, xi64]) <- (xi64, xi64, xi64) + combine_7 = [slice_11, full_1, full_1] + del slice_11 + + # pd_op.stack: (3xi64) <- ([xi64, xi64, xi64]) + stack_7 = paddle._C_ops.stack(combine_7, 0) + del combine_7 + + # pd_op.uniform: (-1x1x1xf32) <- (3xi64, 1xf32, 1xf32) + uniform_5 = paddle._C_ops.uniform( + stack_7, + paddle.float32, + full_2, + full_3, + 0, + paddle.framework._current_expected_place(), + ) + del stack_7 + + # pd_op.add: (-1x1x1xf32) <- (xf32, -1x1x1xf32) + add_20 = paddle._C_ops.add(cast_5, uniform_5) + del uniform_5 + + # pd_op.floor: (-1x1x1xf32) <- (-1x1x1xf32) + floor_5 = paddle._C_ops.floor(add_20) + del add_20 + + # pd_op.divide: (8x-1x256xf32) <- (8x-1x256xf32, xf32) + divide_5 = paddle._C_ops.divide(add_19, cast_5) + + # pd_op.multiply: (8x-1x256xf32) <- (8x-1x256xf32, -1x1x1xf32) + multiply_5 = paddle._C_ops.multiply(divide_5, floor_5) + + # pd_op.add: (8x-1x256xf32) <- (8x-1x256xf32, 8x-1x256xf32) + add_21 = paddle._C_ops.add(layer_norm_13, multiply_5) + + # pd_op.layer_norm: (8x-1x256xf32, 8x-1xf32, 8x-1xf32) <- (8x-1x256xf32, 256xf32, 256xf32) + layer_norm_16, layer_norm_17, layer_norm_18 = (lambda x, f: f(x))( + paddle._C_ops.layer_norm( + add_21, parameter_41, parameter_40, float("1e-06"), 2 + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None, None), + ) + del parameter_40, parameter_41 + + # pd_op.matmul: (8x-1x768xf32) <- (8x-1x256xf32, 256x768xf32) + matmul_10 = paddle._C_ops.matmul(layer_norm_16, parameter_39, False, False) + del parameter_39 + + # pd_op.add: (8x-1x768xf32) <- (8x-1x768xf32, 768xf32) + add_22 = paddle._C_ops.add(matmul_10, parameter_38) + del parameter_38 + + # pd_op.reshape: (8x-1x3x8x32xf32) <- (8x-1x768xf32, 5xi64) + reshape_6 = paddle._C_ops.reshape(add_22, full_int_array_5) + + # pd_op.transpose: (3x8x8x-1x32xf32) <- (8x-1x3x8x32xf32) + transpose_8 = paddle._C_ops.transpose(reshape_6, [2, 0, 3, 1, 4]) + del reshape_6 + + # pd_op.slice: (8x8x-1x32xf32) <- (3x8x8x-1x32xf32, 1xi64, 1xi64) + slice_12 = paddle._C_ops.slice( + transpose_8, [0], full_int_array_1, full_int_array_2, [1], [0] + ) + + # pd_op.slice: (8x8x-1x32xf32) <- (3x8x8x-1x32xf32, 1xi64, 1xi64) + slice_13 = paddle._C_ops.slice( + transpose_8, [0], full_int_array_2, full_int_array_3, [1], [0] + ) + + # pd_op.slice: (8x8x-1x32xf32) <- (3x8x8x-1x32xf32, 1xi64, 1xi64) + slice_14 = paddle._C_ops.slice( + transpose_8, [0], full_int_array_3, full_int_array_4, [1], [0] + ) + + # pd_op.transpose: (8x8x32x-1xf32) <- (8x8x-1x32xf32) + transpose_9 = paddle._C_ops.transpose(slice_13, [0, 1, 3, 2]) + del slice_13 + + # pd_op.matmul: (8x8x-1x-1xf32) <- (8x8x-1x32xf32, 8x8x32x-1xf32) + matmul_11 = paddle._C_ops.matmul(slice_12, transpose_9, False, False) + + # pd_op.scale: (8x8x-1x-1xf32) <- (8x8x-1x-1xf32, 1xf32) + scale_1 = paddle._C_ops.scale(matmul_11, full_9, float("0"), True) + del matmul_11 + + # pd_op.softmax: (8x8x-1x-1xf32) <- (8x8x-1x-1xf32) + softmax_1 = paddle._C_ops.softmax(scale_1, -1) + del scale_1 + + # pd_op.matmul: (8x8x-1x32xf32) <- (8x8x-1x-1xf32, 8x8x-1x32xf32) + matmul_12 = paddle._C_ops.matmul(softmax_1, slice_14, False, False) + + # pd_op.transpose: (8x-1x8x32xf32) <- (8x8x-1x32xf32) + transpose_10 = paddle._C_ops.transpose(matmul_12, [0, 2, 1, 3]) + del matmul_12 + + # pd_op.reshape: (8x-1x256xf32) <- (8x-1x8x32xf32, 3xi64) + reshape_7 = paddle._C_ops.reshape(transpose_10, full_int_array_6) + + # pd_op.matmul: (8x-1x256xf32) <- (8x-1x256xf32, 256x256xf32) + matmul_13 = paddle._C_ops.matmul(reshape_7, parameter_37, False, False) + del parameter_37 + + # pd_op.add: (8x-1x256xf32) <- (8x-1x256xf32, 256xf32) + add_23 = paddle._C_ops.add(matmul_13, parameter_36) + del parameter_36 + + # pd_op.full: (xf64) <- () + full_12 = paddle._C_ops.full( + [], float("0"), paddle.float64, paddle.framework._current_expected_place() + ) + + # pd_op.assign_value_: (xf64) <- (xf64) + assign_value__6 = paddle._C_ops.assign_value_( + full_12, + [], + paddle.float64, + [float("0.947059")], + paddle.framework._current_expected_place(), + ) + del full_12 + + # pd_op.cast: (xf32) <- (xf64) + cast_6 = paddle._C_ops.cast(assign_value__6, paddle.float32) + del assign_value__6 + + # pd_op.shape64: (3xi64) <- (8x-1x256xf32) + shape64_8 = paddle._C_ops.shape64(add_23) + + # pd_op.slice: (xi64) <- (3xi64, 1xi64, 1xi64) + slice_15 = paddle._C_ops.slice( + shape64_8, [0], full_int_array_1, full_int_array_2, [1], [0] + ) + del shape64_8 + + # builtin.combine: ([xi64, xi64, xi64]) <- (xi64, xi64, xi64) + combine_8 = [slice_15, full_1, full_1] + del slice_15 + + # pd_op.stack: (3xi64) <- ([xi64, xi64, xi64]) + stack_8 = paddle._C_ops.stack(combine_8, 0) + del combine_8 + + # pd_op.uniform: (-1x1x1xf32) <- (3xi64, 1xf32, 1xf32) + uniform_6 = paddle._C_ops.uniform( + stack_8, + paddle.float32, + full_2, + full_3, + 0, + paddle.framework._current_expected_place(), + ) + del stack_8 + + # pd_op.add: (-1x1x1xf32) <- (xf32, -1x1x1xf32) + add_24 = paddle._C_ops.add(cast_6, uniform_6) + del uniform_6 + + # pd_op.floor: (-1x1x1xf32) <- (-1x1x1xf32) + floor_6 = paddle._C_ops.floor(add_24) + del add_24 + + # pd_op.divide: (8x-1x256xf32) <- (8x-1x256xf32, xf32) + divide_6 = paddle._C_ops.divide(add_23, cast_6) + + # pd_op.multiply: (8x-1x256xf32) <- (8x-1x256xf32, -1x1x1xf32) + multiply_6 = paddle._C_ops.multiply(divide_6, floor_6) + + # pd_op.add: (8x-1x256xf32) <- (8x-1x256xf32, 8x-1x256xf32) + add_25 = paddle._C_ops.add(layer_norm_16, multiply_6) + + # pd_op.layer_norm: (8x-1x256xf32, 8x-1xf32, 8x-1xf32) <- (8x-1x256xf32, 256xf32, 256xf32) + layer_norm_19, layer_norm_20, layer_norm_21 = (lambda x, f: f(x))( + paddle._C_ops.layer_norm( + add_25, parameter_35, parameter_34, float("1e-06"), 2 + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None, None), + ) + del parameter_34, parameter_35 + + # pd_op.matmul: (8x-1x1024xf32) <- (8x-1x256xf32, 256x1024xf32) + matmul_14 = paddle._C_ops.matmul(layer_norm_19, parameter_33, False, False) + del parameter_33 + + # pd_op.add: (8x-1x1024xf32) <- (8x-1x1024xf32, 1024xf32) + add_26 = paddle._C_ops.add(matmul_14, parameter_32) + del parameter_32 + + # pd_op.gelu: (8x-1x1024xf32) <- (8x-1x1024xf32) + gelu_3 = paddle._C_ops.gelu(add_26, False) + + # pd_op.matmul: (8x-1x256xf32) <- (8x-1x1024xf32, 1024x256xf32) + matmul_15 = paddle._C_ops.matmul(gelu_3, parameter_31, False, False) + del parameter_31 + + # pd_op.add: (8x-1x256xf32) <- (8x-1x256xf32, 256xf32) + add_27 = paddle._C_ops.add(matmul_15, parameter_30) + del parameter_30 + + # pd_op.full: (xf64) <- () + full_13 = paddle._C_ops.full( + [], float("0"), paddle.float64, paddle.framework._current_expected_place() + ) + + # pd_op.assign_value_: (xf64) <- (xf64) + assign_value__7 = paddle._C_ops.assign_value_( + full_13, + [], + paddle.float64, + [float("0.947059")], + paddle.framework._current_expected_place(), + ) + del full_13 + + # pd_op.cast: (xf32) <- (xf64) + cast_7 = paddle._C_ops.cast(assign_value__7, paddle.float32) + del assign_value__7 + + # pd_op.shape64: (3xi64) <- (8x-1x256xf32) + shape64_9 = paddle._C_ops.shape64(add_27) + + # pd_op.slice: (xi64) <- (3xi64, 1xi64, 1xi64) + slice_16 = paddle._C_ops.slice( + shape64_9, [0], full_int_array_1, full_int_array_2, [1], [0] + ) + del shape64_9 + + # builtin.combine: ([xi64, xi64, xi64]) <- (xi64, xi64, xi64) + combine_9 = [slice_16, full_1, full_1] + del slice_16 + + # pd_op.stack: (3xi64) <- ([xi64, xi64, xi64]) + stack_9 = paddle._C_ops.stack(combine_9, 0) + del combine_9 + + # pd_op.uniform: (-1x1x1xf32) <- (3xi64, 1xf32, 1xf32) + uniform_7 = paddle._C_ops.uniform( + stack_9, + paddle.float32, + full_2, + full_3, + 0, + paddle.framework._current_expected_place(), + ) + del stack_9 + + # pd_op.add: (-1x1x1xf32) <- (xf32, -1x1x1xf32) + add_28 = paddle._C_ops.add(cast_7, uniform_7) + del uniform_7 + + # pd_op.floor: (-1x1x1xf32) <- (-1x1x1xf32) + floor_7 = paddle._C_ops.floor(add_28) + del add_28 + + # pd_op.divide: (8x-1x256xf32) <- (8x-1x256xf32, xf32) + divide_7 = paddle._C_ops.divide(add_27, cast_7) + + # pd_op.multiply: (8x-1x256xf32) <- (8x-1x256xf32, -1x1x1xf32) + multiply_7 = paddle._C_ops.multiply(divide_7, floor_7) + + # pd_op.add: (8x-1x256xf32) <- (8x-1x256xf32, 8x-1x256xf32) + add_29 = paddle._C_ops.add(layer_norm_19, multiply_7) + + # pd_op.layer_norm: (8x-1x256xf32, 8x-1xf32, 8x-1xf32) <- (8x-1x256xf32, 256xf32, 256xf32) + layer_norm_22, layer_norm_23, layer_norm_24 = (lambda x, f: f(x))( + paddle._C_ops.layer_norm( + add_29, parameter_29, parameter_28, float("1e-06"), 2 + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None, None), + ) + del parameter_28, parameter_29 + + # pd_op.matmul: (8x-1x768xf32) <- (8x-1x256xf32, 256x768xf32) + matmul_16 = paddle._C_ops.matmul(layer_norm_22, parameter_27, False, False) + del parameter_27 + + # pd_op.add: (8x-1x768xf32) <- (8x-1x768xf32, 768xf32) + add_30 = paddle._C_ops.add(matmul_16, parameter_26) + del parameter_26 + + # pd_op.reshape: (8x-1x3x8x32xf32) <- (8x-1x768xf32, 5xi64) + reshape_8 = paddle._C_ops.reshape(add_30, full_int_array_5) + + # pd_op.transpose: (3x8x8x-1x32xf32) <- (8x-1x3x8x32xf32) + transpose_11 = paddle._C_ops.transpose(reshape_8, [2, 0, 3, 1, 4]) + del reshape_8 + + # pd_op.slice: (8x8x-1x32xf32) <- (3x8x8x-1x32xf32, 1xi64, 1xi64) + slice_17 = paddle._C_ops.slice( + transpose_11, [0], full_int_array_1, full_int_array_2, [1], [0] + ) + + # pd_op.slice: (8x8x-1x32xf32) <- (3x8x8x-1x32xf32, 1xi64, 1xi64) + slice_18 = paddle._C_ops.slice( + transpose_11, [0], full_int_array_2, full_int_array_3, [1], [0] + ) + + # pd_op.slice: (8x8x-1x32xf32) <- (3x8x8x-1x32xf32, 1xi64, 1xi64) + slice_19 = paddle._C_ops.slice( + transpose_11, [0], full_int_array_3, full_int_array_4, [1], [0] + ) + + # pd_op.transpose: (8x8x32x-1xf32) <- (8x8x-1x32xf32) + transpose_12 = paddle._C_ops.transpose(slice_18, [0, 1, 3, 2]) + del slice_18 + + # pd_op.matmul: (8x8x-1x-1xf32) <- (8x8x-1x32xf32, 8x8x32x-1xf32) + matmul_17 = paddle._C_ops.matmul(slice_17, transpose_12, False, False) + + # pd_op.scale: (8x8x-1x-1xf32) <- (8x8x-1x-1xf32, 1xf32) + scale_2 = paddle._C_ops.scale(matmul_17, full_9, float("0"), True) + del matmul_17 + + # pd_op.softmax: (8x8x-1x-1xf32) <- (8x8x-1x-1xf32) + softmax_2 = paddle._C_ops.softmax(scale_2, -1) + del scale_2 + + # pd_op.matmul: (8x8x-1x32xf32) <- (8x8x-1x-1xf32, 8x8x-1x32xf32) + matmul_18 = paddle._C_ops.matmul(softmax_2, slice_19, False, False) + + # pd_op.transpose: (8x-1x8x32xf32) <- (8x8x-1x32xf32) + transpose_13 = paddle._C_ops.transpose(matmul_18, [0, 2, 1, 3]) + del matmul_18 + + # pd_op.reshape: (8x-1x256xf32) <- (8x-1x8x32xf32, 3xi64) + reshape_9 = paddle._C_ops.reshape(transpose_13, full_int_array_6) + + # pd_op.matmul: (8x-1x256xf32) <- (8x-1x256xf32, 256x256xf32) + matmul_19 = paddle._C_ops.matmul(reshape_9, parameter_25, False, False) + del parameter_25 + + # pd_op.add: (8x-1x256xf32) <- (8x-1x256xf32, 256xf32) + add_31 = paddle._C_ops.add(matmul_19, parameter_24) + del parameter_24 + + # pd_op.full: (xf64) <- () + full_14 = paddle._C_ops.full( + [], float("0"), paddle.float64, paddle.framework._current_expected_place() + ) + + # pd_op.assign_value_: (xf64) <- (xf64) + assign_value__8 = paddle._C_ops.assign_value_( + full_14, + [], + paddle.float64, + [float("0.941176")], + paddle.framework._current_expected_place(), + ) + del full_14 + + # pd_op.cast: (xf32) <- (xf64) + cast_8 = paddle._C_ops.cast(assign_value__8, paddle.float32) + del assign_value__8 + + # pd_op.shape64: (3xi64) <- (8x-1x256xf32) + shape64_10 = paddle._C_ops.shape64(add_31) + + # pd_op.slice: (xi64) <- (3xi64, 1xi64, 1xi64) + slice_20 = paddle._C_ops.slice( + shape64_10, [0], full_int_array_1, full_int_array_2, [1], [0] + ) + del shape64_10 + + # builtin.combine: ([xi64, xi64, xi64]) <- (xi64, xi64, xi64) + combine_10 = [slice_20, full_1, full_1] + del slice_20 + + # pd_op.stack: (3xi64) <- ([xi64, xi64, xi64]) + stack_10 = paddle._C_ops.stack(combine_10, 0) + del combine_10 + + # pd_op.uniform: (-1x1x1xf32) <- (3xi64, 1xf32, 1xf32) + uniform_8 = paddle._C_ops.uniform( + stack_10, + paddle.float32, + full_2, + full_3, + 0, + paddle.framework._current_expected_place(), + ) + del stack_10 + + # pd_op.add: (-1x1x1xf32) <- (xf32, -1x1x1xf32) + add_32 = paddle._C_ops.add(cast_8, uniform_8) + del uniform_8 + + # pd_op.floor: (-1x1x1xf32) <- (-1x1x1xf32) + floor_8 = paddle._C_ops.floor(add_32) + del add_32 + + # pd_op.divide: (8x-1x256xf32) <- (8x-1x256xf32, xf32) + divide_8 = paddle._C_ops.divide(add_31, cast_8) + + # pd_op.multiply: (8x-1x256xf32) <- (8x-1x256xf32, -1x1x1xf32) + multiply_8 = paddle._C_ops.multiply(divide_8, floor_8) + + # pd_op.add: (8x-1x256xf32) <- (8x-1x256xf32, 8x-1x256xf32) + add_33 = paddle._C_ops.add(layer_norm_22, multiply_8) + + # pd_op.layer_norm: (8x-1x256xf32, 8x-1xf32, 8x-1xf32) <- (8x-1x256xf32, 256xf32, 256xf32) + layer_norm_25, layer_norm_26, layer_norm_27 = (lambda x, f: f(x))( + paddle._C_ops.layer_norm( + add_33, parameter_23, parameter_22, float("1e-06"), 2 + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None, None), + ) + del parameter_22, parameter_23 + + # pd_op.matmul: (8x-1x1024xf32) <- (8x-1x256xf32, 256x1024xf32) + matmul_20 = paddle._C_ops.matmul(layer_norm_25, parameter_21, False, False) + del parameter_21 + + # pd_op.add: (8x-1x1024xf32) <- (8x-1x1024xf32, 1024xf32) + add_34 = paddle._C_ops.add(matmul_20, parameter_20) + del parameter_20 + + # pd_op.gelu: (8x-1x1024xf32) <- (8x-1x1024xf32) + gelu_4 = paddle._C_ops.gelu(add_34, False) + + # pd_op.matmul: (8x-1x256xf32) <- (8x-1x1024xf32, 1024x256xf32) + matmul_21 = paddle._C_ops.matmul(gelu_4, parameter_19, False, False) + del parameter_19 + + # pd_op.add: (8x-1x256xf32) <- (8x-1x256xf32, 256xf32) + add_35 = paddle._C_ops.add(matmul_21, parameter_18) + del parameter_18 + + # pd_op.full: (xf64) <- () + full_15 = paddle._C_ops.full( + [], float("0"), paddle.float64, paddle.framework._current_expected_place() + ) + + # pd_op.assign_value_: (xf64) <- (xf64) + assign_value__9 = paddle._C_ops.assign_value_( + full_15, + [], + paddle.float64, + [float("0.941176")], + paddle.framework._current_expected_place(), + ) + del full_15 + + # pd_op.cast: (xf32) <- (xf64) + cast_9 = paddle._C_ops.cast(assign_value__9, paddle.float32) + del assign_value__9 + + # pd_op.shape64: (3xi64) <- (8x-1x256xf32) + shape64_11 = paddle._C_ops.shape64(add_35) + + # pd_op.slice: (xi64) <- (3xi64, 1xi64, 1xi64) + slice_21 = paddle._C_ops.slice( + shape64_11, [0], full_int_array_1, full_int_array_2, [1], [0] + ) + del shape64_11 + + # builtin.combine: ([xi64, xi64, xi64]) <- (xi64, xi64, xi64) + combine_11 = [slice_21, full_1, full_1] + del slice_21 + + # pd_op.stack: (3xi64) <- ([xi64, xi64, xi64]) + stack_11 = paddle._C_ops.stack(combine_11, 0) + del combine_11 + + # pd_op.uniform: (-1x1x1xf32) <- (3xi64, 1xf32, 1xf32) + uniform_9 = paddle._C_ops.uniform( + stack_11, + paddle.float32, + full_2, + full_3, + 0, + paddle.framework._current_expected_place(), + ) + del stack_11 + + # pd_op.add: (-1x1x1xf32) <- (xf32, -1x1x1xf32) + add_36 = paddle._C_ops.add(cast_9, uniform_9) + del uniform_9 + + # pd_op.floor: (-1x1x1xf32) <- (-1x1x1xf32) + floor_9 = paddle._C_ops.floor(add_36) + del add_36 + + # pd_op.divide: (8x-1x256xf32) <- (8x-1x256xf32, xf32) + divide_9 = paddle._C_ops.divide(add_35, cast_9) + + # pd_op.multiply: (8x-1x256xf32) <- (8x-1x256xf32, -1x1x1xf32) + multiply_9 = paddle._C_ops.multiply(divide_9, floor_9) + + # pd_op.add: (8x-1x256xf32) <- (8x-1x256xf32, 8x-1x256xf32) + add_37 = paddle._C_ops.add(layer_norm_25, multiply_9) + + # pd_op.layer_norm: (8x-1x256xf32, 8x-1xf32, 8x-1xf32) <- (8x-1x256xf32, 256xf32, 256xf32) + layer_norm_28, layer_norm_29, layer_norm_30 = (lambda x, f: f(x))( + paddle._C_ops.layer_norm( + add_37, parameter_17, parameter_16, float("1e-06"), 2 + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None, None), + ) + del parameter_16, parameter_17 + + # pd_op.matmul: (8x-1x768xf32) <- (8x-1x256xf32, 256x768xf32) + matmul_22 = paddle._C_ops.matmul(layer_norm_28, parameter_15, False, False) + del parameter_15 + + # pd_op.add: (8x-1x768xf32) <- (8x-1x768xf32, 768xf32) + add_38 = paddle._C_ops.add(matmul_22, parameter_14) + del parameter_14 + + # pd_op.reshape: (8x-1x3x8x32xf32) <- (8x-1x768xf32, 5xi64) + reshape_10 = paddle._C_ops.reshape(add_38, full_int_array_5) + del full_int_array_5 + + # pd_op.transpose: (3x8x8x-1x32xf32) <- (8x-1x3x8x32xf32) + transpose_14 = paddle._C_ops.transpose(reshape_10, [2, 0, 3, 1, 4]) + del reshape_10 + + # pd_op.slice: (8x8x-1x32xf32) <- (3x8x8x-1x32xf32, 1xi64, 1xi64) + slice_22 = paddle._C_ops.slice( + transpose_14, [0], full_int_array_1, full_int_array_2, [1], [0] + ) + + # pd_op.slice: (8x8x-1x32xf32) <- (3x8x8x-1x32xf32, 1xi64, 1xi64) + slice_23 = paddle._C_ops.slice( + transpose_14, [0], full_int_array_2, full_int_array_3, [1], [0] + ) + + # pd_op.slice: (8x8x-1x32xf32) <- (3x8x8x-1x32xf32, 1xi64, 1xi64) + slice_24 = paddle._C_ops.slice( + transpose_14, [0], full_int_array_3, full_int_array_4, [1], [0] + ) + + # pd_op.transpose: (8x8x32x-1xf32) <- (8x8x-1x32xf32) + transpose_15 = paddle._C_ops.transpose(slice_23, [0, 1, 3, 2]) + del slice_23 + + # pd_op.matmul: (8x8x-1x-1xf32) <- (8x8x-1x32xf32, 8x8x32x-1xf32) + matmul_23 = paddle._C_ops.matmul(slice_22, transpose_15, False, False) + + # pd_op.scale: (8x8x-1x-1xf32) <- (8x8x-1x-1xf32, 1xf32) + scale_3 = paddle._C_ops.scale(matmul_23, full_9, float("0"), True) + del matmul_23 + + # pd_op.softmax: (8x8x-1x-1xf32) <- (8x8x-1x-1xf32) + softmax_3 = paddle._C_ops.softmax(scale_3, -1) + del scale_3 + + # pd_op.matmul: (8x8x-1x32xf32) <- (8x8x-1x-1xf32, 8x8x-1x32xf32) + matmul_24 = paddle._C_ops.matmul(softmax_3, slice_24, False, False) + + # pd_op.transpose: (8x-1x8x32xf32) <- (8x8x-1x32xf32) + transpose_16 = paddle._C_ops.transpose(matmul_24, [0, 2, 1, 3]) + del matmul_24 + + # pd_op.reshape: (8x-1x256xf32) <- (8x-1x8x32xf32, 3xi64) + reshape_11 = paddle._C_ops.reshape(transpose_16, full_int_array_6) + del full_int_array_6 + + # pd_op.matmul: (8x-1x256xf32) <- (8x-1x256xf32, 256x256xf32) + matmul_25 = paddle._C_ops.matmul(reshape_11, parameter_13, False, False) + del parameter_13 + + # pd_op.add: (8x-1x256xf32) <- (8x-1x256xf32, 256xf32) + add_39 = paddle._C_ops.add(matmul_25, parameter_12) + del parameter_12 + + # pd_op.full: (xf64) <- () + full_16 = paddle._C_ops.full( + [], float("0"), paddle.float64, paddle.framework._current_expected_place() + ) + + # pd_op.assign_value_: (xf64) <- (xf64) + assign_value__10 = paddle._C_ops.assign_value_( + full_16, + [], + paddle.float64, + [float("0.935294")], + paddle.framework._current_expected_place(), + ) + del full_16 + + # pd_op.cast: (xf32) <- (xf64) + cast_10 = paddle._C_ops.cast(assign_value__10, paddle.float32) + del assign_value__10 + + # pd_op.shape64: (3xi64) <- (8x-1x256xf32) + shape64_12 = paddle._C_ops.shape64(add_39) + + # pd_op.slice: (xi64) <- (3xi64, 1xi64, 1xi64) + slice_25 = paddle._C_ops.slice( + shape64_12, [0], full_int_array_1, full_int_array_2, [1], [0] + ) + del shape64_12 + + # builtin.combine: ([xi64, xi64, xi64]) <- (xi64, xi64, xi64) + combine_12 = [slice_25, full_1, full_1] + del slice_25 + + # pd_op.stack: (3xi64) <- ([xi64, xi64, xi64]) + stack_12 = paddle._C_ops.stack(combine_12, 0) + del combine_12 + + # pd_op.uniform: (-1x1x1xf32) <- (3xi64, 1xf32, 1xf32) + uniform_10 = paddle._C_ops.uniform( + stack_12, + paddle.float32, + full_2, + full_3, + 0, + paddle.framework._current_expected_place(), + ) + del stack_12 + + # pd_op.add: (-1x1x1xf32) <- (xf32, -1x1x1xf32) + add_40 = paddle._C_ops.add(cast_10, uniform_10) + del uniform_10 + + # pd_op.floor: (-1x1x1xf32) <- (-1x1x1xf32) + floor_10 = paddle._C_ops.floor(add_40) + del add_40 + + # pd_op.divide: (8x-1x256xf32) <- (8x-1x256xf32, xf32) + divide_10 = paddle._C_ops.divide(add_39, cast_10) + + # pd_op.multiply: (8x-1x256xf32) <- (8x-1x256xf32, -1x1x1xf32) + multiply_10 = paddle._C_ops.multiply(divide_10, floor_10) + + # pd_op.add: (8x-1x256xf32) <- (8x-1x256xf32, 8x-1x256xf32) + add_41 = paddle._C_ops.add(layer_norm_28, multiply_10) + + # pd_op.layer_norm: (8x-1x256xf32, 8x-1xf32, 8x-1xf32) <- (8x-1x256xf32, 256xf32, 256xf32) + layer_norm_31, layer_norm_32, layer_norm_33 = (lambda x, f: f(x))( + paddle._C_ops.layer_norm( + add_41, parameter_11, parameter_10, float("1e-06"), 2 + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None, None), + ) + del parameter_10, parameter_11 + + # pd_op.matmul: (8x-1x1024xf32) <- (8x-1x256xf32, 256x1024xf32) + matmul_26 = paddle._C_ops.matmul(layer_norm_31, parameter_9, False, False) + del parameter_9 + + # pd_op.add: (8x-1x1024xf32) <- (8x-1x1024xf32, 1024xf32) + add_42 = paddle._C_ops.add(matmul_26, parameter_8) + del parameter_8 + + # pd_op.gelu: (8x-1x1024xf32) <- (8x-1x1024xf32) + gelu_5 = paddle._C_ops.gelu(add_42, False) + + # pd_op.matmul: (8x-1x256xf32) <- (8x-1x1024xf32, 1024x256xf32) + matmul_27 = paddle._C_ops.matmul(gelu_5, parameter_7, False, False) + del parameter_7 + + # pd_op.add: (8x-1x256xf32) <- (8x-1x256xf32, 256xf32) + add_43 = paddle._C_ops.add(matmul_27, parameter_6) + del parameter_6 + + # pd_op.full: (xf64) <- () + full_17 = paddle._C_ops.full( + [], float("0"), paddle.float64, paddle.framework._current_expected_place() + ) + + # pd_op.assign_value_: (xf64) <- (xf64) + assign_value__11 = paddle._C_ops.assign_value_( + full_17, + [], + paddle.float64, + [float("0.935294")], + paddle.framework._current_expected_place(), + ) + del full_17 + + # pd_op.cast: (xf32) <- (xf64) + cast_11 = paddle._C_ops.cast(assign_value__11, paddle.float32) + del assign_value__11 + + # pd_op.shape64: (3xi64) <- (8x-1x256xf32) + shape64_13 = paddle._C_ops.shape64(add_43) + + # pd_op.slice: (xi64) <- (3xi64, 1xi64, 1xi64) + slice_26 = paddle._C_ops.slice( + shape64_13, [0], full_int_array_1, full_int_array_2, [1], [0] + ) + del full_int_array_1, shape64_13 + + # builtin.combine: ([xi64, xi64, xi64]) <- (xi64, xi64, xi64) + combine_13 = [slice_26, full_1, full_1] + del full_1, slice_26 + + # pd_op.stack: (3xi64) <- ([xi64, xi64, xi64]) + stack_13 = paddle._C_ops.stack(combine_13, 0) + del combine_13 + + # pd_op.uniform: (-1x1x1xf32) <- (3xi64, 1xf32, 1xf32) + uniform_11 = paddle._C_ops.uniform( + stack_13, + paddle.float32, + full_2, + full_3, + 0, + paddle.framework._current_expected_place(), + ) + del full_2, full_3, stack_13 + + # pd_op.add: (-1x1x1xf32) <- (xf32, -1x1x1xf32) + add_44 = paddle._C_ops.add(cast_11, uniform_11) + del uniform_11 + + # pd_op.floor: (-1x1x1xf32) <- (-1x1x1xf32) + floor_11 = paddle._C_ops.floor(add_44) + del add_44 + + # pd_op.divide: (8x-1x256xf32) <- (8x-1x256xf32, xf32) + divide_11 = paddle._C_ops.divide(add_43, cast_11) + + # pd_op.multiply: (8x-1x256xf32) <- (8x-1x256xf32, -1x1x1xf32) + multiply_11 = paddle._C_ops.multiply(divide_11, floor_11) + + # pd_op.add: (8x-1x256xf32) <- (8x-1x256xf32, 8x-1x256xf32) + add_45 = paddle._C_ops.add(layer_norm_31, multiply_11) + + # pd_op.layer_norm: (8x-1x256xf32, 8x-1xf32, 8x-1xf32) <- (8x-1x256xf32, 256xf32, 256xf32) + layer_norm_34, layer_norm_35, layer_norm_36 = (lambda x, f: f(x))( + paddle._C_ops.layer_norm( + add_45, parameter_5, parameter_4, float("1e-06"), 2 + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None, None), + ) + del parameter_4, parameter_5 + + # pd_op.shape64: (3xi64) <- (8x-1x256xf32) + shape64_14 = paddle._C_ops.shape64(layer_norm_34) + + # pd_op.slice: (xi64) <- (3xi64, 1xi64, 1xi64) + slice_27 = paddle._C_ops.slice( + shape64_14, [0], full_int_array_2, full_int_array_3, [1], [0] + ) + del full_int_array_2, shape64_14 + + # pd_op.transpose: (8x256x-1xf32) <- (8x-1x256xf32) + transpose_17 = paddle._C_ops.transpose(layer_norm_34, [0, 2, 1]) + del layer_norm_34 + + # pd_op.full: (xi64) <- () + full_18 = paddle._C_ops.full( + [], float("256"), paddle.int64, paddle.core.CPUPlace() + ) + + # builtin.combine: ([xi64, xi64, xi64, xi64]) <- (xi64, xi64, xi64, xi64) + combine_14 = [full_5, full_18, data_0, full_6] + del data_0, full_18, full_5, full_6 + + # pd_op.stack: (4xi64) <- ([xi64, xi64, xi64, xi64]) + stack_14 = paddle._C_ops.stack(combine_14, 0) + del combine_14 + + # pd_op.reshape: (8x256x-1x80xf32) <- (8x256x-1xf32, 4xi64) + reshape_12 = paddle._C_ops.reshape(transpose_17, stack_14) + del stack_14 + + # pd_op.conv2d: (8x384x-1x80xf32) <- (8x256x-1x80xf32, 384x256x3x3xf32) + conv2d_2 = paddle._C_ops.conv2d( + reshape_12, parameter_3, [2, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_3 + + # pd_op.reshape: (1x384x1x1xf32) <- (384xf32, 4xi64) + reshape_13 = paddle._C_ops.reshape(parameter_2, full_int_array_0) + del full_int_array_0, parameter_2 + + # pd_op.add: (8x384x-1x80xf32) <- (8x384x-1x80xf32, 1x384x1x1xf32) + add_46 = paddle._C_ops.add(conv2d_2, reshape_13) + + # pd_op.shape64: (4xi64) <- (8x384x-1x80xf32) + shape64_15 = paddle._C_ops.shape64(add_46) + + # pd_op.slice: (xi64) <- (4xi64, 1xi64, 1xi64) + slice_0 = paddle._C_ops.slice( + shape64_15, [0], full_int_array_3, full_int_array_4, [1], [0] + ) + del full_int_array_3, full_int_array_4, shape64_15 + + # pd_op.flatten: (8x384x-1xf32) <- (8x384x-1x80xf32) + flatten_3 = paddle._C_ops.flatten(add_46, 2, 3) + + # pd_op.transpose: (8x-1x384xf32) <- (8x384x-1xf32) + transpose_18 = paddle._C_ops.transpose(flatten_3, [0, 2, 1]) + del flatten_3 + + # pd_op.layer_norm: (8x-1x384xf32, 8x-1xf32, 8x-1xf32) <- (8x-1x384xf32, 384xf32, 384xf32) + layer_norm_0, layer_norm_37, layer_norm_38 = (lambda x, f: f(x))( + paddle._C_ops.layer_norm( + transpose_18, parameter_1, parameter_0, float("1e-05"), 2 + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None, None), + ) + del ( + add_0, + add_10, + add_11, + add_13, + add_14, + add_15, + add_17, + add_18, + add_19, + add_2, + add_21, + add_22, + add_23, + add_25, + add_26, + add_27, + add_29, + add_3, + add_30, + add_31, + add_33, + add_34, + add_35, + add_37, + add_38, + add_39, + add_4, + add_41, + add_42, + add_43, + add_45, + add_46, + add_6, + add_7, + add_9, + assign_0, + assign_1, + assign_10, + assign_11, + assign_12, + assign_13, + assign_14, + assign_15, + assign_16, + assign_17, + assign_18, + assign_19, + assign_2, + assign_20, + assign_21, + assign_22, + assign_23, + assign_24, + assign_25, + assign_26, + assign_3, + assign_4, + assign_5, + assign_6, + assign_7, + assign_8, + assign_9, + cast_0, + cast_1, + cast_10, + cast_11, + cast_2, + cast_3, + cast_4, + cast_5, + cast_6, + cast_7, + cast_8, + cast_9, + conv2d_0, + conv2d_1, + conv2d_2, + divide_0, + divide_1, + divide_10, + divide_11, + divide_2, + divide_3, + divide_4, + divide_5, + divide_6, + divide_7, + divide_8, + divide_9, + floor_0, + floor_1, + floor_10, + floor_11, + floor_2, + floor_3, + floor_4, + floor_5, + floor_6, + floor_7, + floor_8, + floor_9, + full_9, + gelu_0, + gelu_1, + gelu_2, + gelu_3, + gelu_4, + gelu_5, + layer_norm_1, + layer_norm_11, + layer_norm_12, + layer_norm_13, + layer_norm_14, + layer_norm_15, + layer_norm_16, + layer_norm_17, + layer_norm_18, + layer_norm_19, + layer_norm_2, + layer_norm_20, + layer_norm_21, + layer_norm_22, + layer_norm_23, + layer_norm_24, + layer_norm_25, + layer_norm_26, + layer_norm_27, + layer_norm_28, + layer_norm_29, + layer_norm_3, + layer_norm_30, + layer_norm_31, + layer_norm_32, + layer_norm_33, + layer_norm_35, + layer_norm_36, + layer_norm_5, + layer_norm_6, + layer_norm_7, + layer_norm_8, + layer_norm_9, + matmul_0, + matmul_1, + matmul_10, + matmul_13, + matmul_14, + matmul_15, + matmul_16, + matmul_19, + matmul_2, + matmul_20, + matmul_21, + matmul_22, + matmul_25, + matmul_26, + matmul_27, + matmul_3, + matmul_4, + matmul_7, + matmul_8, + matmul_9, + multiply_0, + multiply_1, + multiply_10, + multiply_11, + multiply_2, + multiply_3, + multiply_4, + multiply_5, + multiply_6, + multiply_7, + multiply_8, + multiply_9, + parameter_0, + parameter_1, + reshape_0, + reshape_1, + reshape_11, + reshape_12, + reshape_13, + reshape_2, + reshape_3, + reshape_5, + reshape_7, + reshape_9, + slice_12, + slice_14, + slice_17, + slice_19, + slice_22, + slice_24, + slice_7, + slice_9, + softmax_0, + softmax_1, + softmax_2, + softmax_3, + transpose_0, + transpose_1, + transpose_10, + transpose_11, + transpose_12, + transpose_13, + transpose_14, + transpose_15, + transpose_16, + transpose_17, + transpose_18, + transpose_2, + transpose_3, + transpose_4, + transpose_5, + transpose_6, + transpose_7, + transpose_8, + transpose_9, + ) + + return layer_norm_0, slice_0 diff --git a/paddle_samples/PaddleX/ch_SVTRv2_rec/subgraph_11/weight_meta.py b/paddle_samples/PaddleX/ch_SVTRv2_rec/subgraph_11/weight_meta.py new file mode 100644 index 000000000..2dcb644c9 --- /dev/null +++ b/paddle_samples/PaddleX/ch_SVTRv2_rec/subgraph_11/weight_meta.py @@ -0,0 +1,790 @@ +class Program_weight_tensor_parameter_0: + name = "parameter_0" + shape = [384] + dtype = "float32" + min_val = float("-0.611573") + max_val = float("0.567798") + mean = float("0.00370712") + std = float("0.148267") + data = None + + +class Program_weight_tensor_parameter_1: + name = "parameter_1" + shape = [384] + dtype = "float32" + min_val = float("-0.0176942") + max_val = float("1.1113") + mean = float("0.805979") + std = float("0.318479") + data = None + + +class Program_weight_tensor_parameter_2: + name = "parameter_2" + shape = [384] + dtype = "float32" + min_val = float("-2.13167") + max_val = float("3.1016") + mean = float("0.0038158") + std = float("0.480805") + data = None + + +class Program_weight_tensor_parameter_3: + name = "parameter_3" + shape = [384, 256, 3, 3] + dtype = "float32" + min_val = float("-4.81624") + max_val = float("3.58691") + mean = float("-0.000687498") + std = float("0.436287") + data = None + + +class Program_weight_tensor_parameter_4: + name = "parameter_4" + shape = [256] + dtype = "float32" + min_val = float("-1.00302") + max_val = float("1.58895") + mean = float("-0.117761") + std = float("0.210592") + data = None + + +class Program_weight_tensor_parameter_5: + name = "parameter_5" + shape = [256] + dtype = "float32" + min_val = float("0.22568") + max_val = float("1.68552") + mean = float("1.24757") + std = float("0.144412") + data = None + + +class Program_weight_tensor_parameter_6: + name = "parameter_6" + shape = [256] + dtype = "float32" + min_val = float("-8.09258") + max_val = float("0.641712") + mean = float("-0.0151932") + std = float("0.534823") + data = None + + +class Program_weight_tensor_parameter_7: + name = "parameter_7" + shape = [1024, 256] + dtype = "float32" + min_val = float("-1.43969") + max_val = float("1.06018") + mean = float("1.17521e-05") + std = float("0.0575092") + data = None + + +class Program_weight_tensor_parameter_8: + name = "parameter_8" + shape = [1024] + dtype = "float32" + min_val = float("-1.46189") + max_val = float("-0.019999") + mean = float("-0.687724") + std = float("0.224643") + data = None + + +class Program_weight_tensor_parameter_9: + name = "parameter_9" + shape = [256, 1024] + dtype = "float32" + min_val = float("-0.347586") + max_val = float("0.302351") + mean = float("-0.00564142") + std = float("0.0566072") + data = None + + +class Program_weight_tensor_parameter_10: + name = "parameter_10" + shape = [256] + dtype = "float32" + min_val = float("-3.06699") + max_val = float("1.76906") + mean = float("0.0968607") + std = float("0.35562") + data = None + + +class Program_weight_tensor_parameter_11: + name = "parameter_11" + shape = [256] + dtype = "float32" + min_val = float("0.112453") + max_val = float("2.92041") + mean = float("0.694894") + std = float("0.22916") + data = None + + +class Program_weight_tensor_parameter_12: + name = "parameter_12" + shape = [256] + dtype = "float32" + min_val = float("-1.23851") + max_val = float("1.00643") + mean = float("-0.00873763") + std = float("0.143725") + data = None + + +class Program_weight_tensor_parameter_13: + name = "parameter_13" + shape = [256, 256] + dtype = "float32" + min_val = float("-0.324324") + max_val = float("0.542329") + mean = float("-9.51245e-05") + std = float("0.0446306") + data = None + + +class Program_weight_tensor_parameter_14: + name = "parameter_14" + shape = [768] + dtype = "float32" + min_val = float("-3.23557") + max_val = float("3.07665") + mean = float("0.0189733") + std = float("0.322926") + data = None + + +class Program_weight_tensor_parameter_15: + name = "parameter_15" + shape = [256, 768] + dtype = "float32" + min_val = float("-0.604016") + max_val = float("0.497953") + mean = float("-3.84765e-05") + std = float("0.0527454") + data = None + + +class Program_weight_tensor_parameter_16: + name = "parameter_16" + shape = [256] + dtype = "float32" + min_val = float("-1.03089") + max_val = float("1.35712") + mean = float("-0.012765") + std = float("0.214998") + data = None + + +class Program_weight_tensor_parameter_17: + name = "parameter_17" + shape = [256] + dtype = "float32" + min_val = float("0.287937") + max_val = float("1.59293") + mean = float("0.900996") + std = float("0.111622") + data = None + + +class Program_weight_tensor_parameter_18: + name = "parameter_18" + shape = [256] + dtype = "float32" + min_val = float("-2.05805") + max_val = float("0.660414") + mean = float("-0.00209053") + std = float("0.194515") + data = None + + +class Program_weight_tensor_parameter_19: + name = "parameter_19" + shape = [1024, 256] + dtype = "float32" + min_val = float("-0.562583") + max_val = float("0.814642") + mean = float("1.22868e-05") + std = float("0.056914") + data = None + + +class Program_weight_tensor_parameter_20: + name = "parameter_20" + shape = [1024] + dtype = "float32" + min_val = float("-1.33321") + max_val = float("-0.255077") + mean = float("-0.694005") + std = float("0.209896") + data = None + + +class Program_weight_tensor_parameter_21: + name = "parameter_21" + shape = [256, 1024] + dtype = "float32" + min_val = float("-0.381275") + max_val = float("0.37081") + mean = float("-0.0038066") + std = float("0.0556894") + data = None + + +class Program_weight_tensor_parameter_22: + name = "parameter_22" + shape = [256] + dtype = "float32" + min_val = float("-2.22319") + max_val = float("1.37969") + mean = float("0.0664931") + std = float("0.30259") + data = None + + +class Program_weight_tensor_parameter_23: + name = "parameter_23" + shape = [256] + dtype = "float32" + min_val = float("0.081885") + max_val = float("2.63842") + mean = float("0.745383") + std = float("0.189694") + data = None + + +class Program_weight_tensor_parameter_24: + name = "parameter_24" + shape = [256] + dtype = "float32" + min_val = float("-1.62126") + max_val = float("1.39196") + mean = float("-0.0111102") + std = float("0.186807") + data = None + + +class Program_weight_tensor_parameter_25: + name = "parameter_25" + shape = [256, 256] + dtype = "float32" + min_val = float("-0.486573") + max_val = float("0.315077") + mean = float("8.92714e-05") + std = float("0.048068") + data = None + + +class Program_weight_tensor_parameter_26: + name = "parameter_26" + shape = [768] + dtype = "float32" + min_val = float("-2.94557") + max_val = float("3.085") + mean = float("0.0137264") + std = float("0.347131") + data = None + + +class Program_weight_tensor_parameter_27: + name = "parameter_27" + shape = [256, 768] + dtype = "float32" + min_val = float("-0.415111") + max_val = float("0.44239") + mean = float("-2.87335e-05") + std = float("0.0532726") + data = None + + +class Program_weight_tensor_parameter_28: + name = "parameter_28" + shape = [256] + dtype = "float32" + min_val = float("-1.52143") + max_val = float("1.36531") + mean = float("-0.0082483") + std = float("0.293583") + data = None + + +class Program_weight_tensor_parameter_29: + name = "parameter_29" + shape = [256] + dtype = "float32" + min_val = float("0.263449") + max_val = float("2.44346") + mean = float("0.924853") + std = float("0.191072") + data = None + + +class Program_weight_tensor_parameter_30: + name = "parameter_30" + shape = [256] + dtype = "float32" + min_val = float("-0.74461") + max_val = float("0.594306") + mean = float("-0.000304156") + std = float("0.132218") + data = None + + +class Program_weight_tensor_parameter_31: + name = "parameter_31" + shape = [1024, 256] + dtype = "float32" + min_val = float("-0.504825") + max_val = float("0.476818") + mean = float("-5.34588e-05") + std = float("0.0617784") + data = None + + +class Program_weight_tensor_parameter_32: + name = "parameter_32" + shape = [1024] + dtype = "float32" + min_val = float("-1.1179") + max_val = float("-0.0345218") + mean = float("-0.639639") + std = float("0.206906") + data = None + + +class Program_weight_tensor_parameter_33: + name = "parameter_33" + shape = [256, 1024] + dtype = "float32" + min_val = float("-0.477723") + max_val = float("0.357132") + mean = float("-0.00145091") + std = float("0.0546795") + data = None + + +class Program_weight_tensor_parameter_34: + name = "parameter_34" + shape = [256] + dtype = "float32" + min_val = float("-2.66275") + max_val = float("2.16434") + mean = float("0.0282077") + std = float("0.48519") + data = None + + +class Program_weight_tensor_parameter_35: + name = "parameter_35" + shape = [256] + dtype = "float32" + min_val = float("0.0184295") + max_val = float("2.3764") + mean = float("0.799007") + std = float("0.238441") + data = None + + +class Program_weight_tensor_parameter_36: + name = "parameter_36" + shape = [256] + dtype = "float32" + min_val = float("-1.94903") + max_val = float("1.32835") + mean = float("-0.0101001") + std = float("0.260235") + data = None + + +class Program_weight_tensor_parameter_37: + name = "parameter_37" + shape = [256, 256] + dtype = "float32" + min_val = float("-0.334037") + max_val = float("0.302804") + mean = float("7.46288e-05") + std = float("0.0485713") + data = None + + +class Program_weight_tensor_parameter_38: + name = "parameter_38" + shape = [768] + dtype = "float32" + min_val = float("-3.24135") + max_val = float("3.1687") + mean = float("0.0112129") + std = float("0.360574") + data = None + + +class Program_weight_tensor_parameter_39: + name = "parameter_39" + shape = [256, 768] + dtype = "float32" + min_val = float("-0.484463") + max_val = float("0.382816") + mean = float("-3.79066e-06") + std = float("0.0528972") + data = None + + +class Program_weight_tensor_parameter_40: + name = "parameter_40" + shape = [256] + dtype = "float32" + min_val = float("-1.22621") + max_val = float("1.13198") + mean = float("-0.00584792") + std = float("0.315598") + data = None + + +class Program_weight_tensor_parameter_41: + name = "parameter_41" + shape = [256] + dtype = "float32" + min_val = float("0.0741299") + max_val = float("2.1798") + mean = float("0.899628") + std = float("0.215349") + data = None + + +class Program_weight_tensor_parameter_42: + name = "parameter_42" + shape = [256] + dtype = "float32" + min_val = float("-0.594821") + max_val = float("0.878347") + mean = float("0.00450388") + std = float("0.136994") + data = None + + +class Program_weight_tensor_parameter_43: + name = "parameter_43" + shape = [1024, 256] + dtype = "float32" + min_val = float("-0.448166") + max_val = float("0.410483") + mean = float("5.85686e-05") + std = float("0.0642513") + data = None + + +class Program_weight_tensor_parameter_44: + name = "parameter_44" + shape = [1024] + dtype = "float32" + min_val = float("-1.13055") + max_val = float("-0.0592707") + mean = float("-0.617823") + std = float("0.192932") + data = None + + +class Program_weight_tensor_parameter_45: + name = "parameter_45" + shape = [256, 1024] + dtype = "float32" + min_val = float("-0.338649") + max_val = float("0.369028") + mean = float("0.000543811") + std = float("0.0538612") + data = None + + +class Program_weight_tensor_parameter_46: + name = "parameter_46" + shape = [256] + dtype = "float32" + min_val = float("-2.27003") + max_val = float("2.47408") + mean = float("-0.00602015") + std = float("0.575732") + data = None + + +class Program_weight_tensor_parameter_47: + name = "parameter_47" + shape = [256] + dtype = "float32" + min_val = float("0.085421") + max_val = float("2.25771") + mean = float("0.788097") + std = float("0.203732") + data = None + + +class Program_weight_tensor_parameter_48: + name = "parameter_48" + shape = [256] + dtype = "float32" + min_val = float("-1.86822") + max_val = float("1.88784") + mean = float("-0.00538365") + std = float("0.221636") + data = None + + +class Program_weight_tensor_parameter_49: + name = "parameter_49" + shape = [256, 256] + dtype = "float32" + min_val = float("-0.429145") + max_val = float("0.534035") + mean = float("2.21364e-05") + std = float("0.0481658") + data = None + + +class Program_weight_tensor_parameter_50: + name = "parameter_50" + shape = [768] + dtype = "float32" + min_val = float("-2.90161") + max_val = float("2.60702") + mean = float("-0.00468604") + std = float("0.35132") + data = None + + +class Program_weight_tensor_parameter_51: + name = "parameter_51" + shape = [256, 768] + dtype = "float32" + min_val = float("-0.307903") + max_val = float("0.422378") + mean = float("4.29359e-05") + std = float("0.0528653") + data = None + + +class Program_weight_tensor_parameter_52: + name = "parameter_52" + shape = [256] + dtype = "float32" + min_val = float("-1.72865") + max_val = float("1.36761") + mean = float("0.000939122") + std = float("0.417524") + data = None + + +class Program_weight_tensor_parameter_53: + name = "parameter_53" + shape = [256] + dtype = "float32" + min_val = float("0.295198") + max_val = float("1.38916") + mean = float("0.917769") + std = float("0.182554") + data = None + + +class Program_weight_tensor_parameter_54: + name = "parameter_54" + shape = [256] + dtype = "float32" + min_val = float("-0.541449") + max_val = float("0.438496") + mean = float("-0.000324496") + std = float("0.137469") + data = None + + +class Program_weight_tensor_parameter_55: + name = "parameter_55" + shape = [1024, 256] + dtype = "float32" + min_val = float("-0.438234") + max_val = float("0.346777") + mean = float("0.000112901") + std = float("0.0633795") + data = None + + +class Program_weight_tensor_parameter_56: + name = "parameter_56" + shape = [1024] + dtype = "float32" + min_val = float("-0.869132") + max_val = float("-0.146016") + mean = float("-0.578405") + std = float("0.148015") + data = None + + +class Program_weight_tensor_parameter_57: + name = "parameter_57" + shape = [256, 1024] + dtype = "float32" + min_val = float("-0.622617") + max_val = float("0.424336") + mean = float("0.000771501") + std = float("0.0470252") + data = None + + +class Program_weight_tensor_parameter_58: + name = "parameter_58" + shape = [256] + dtype = "float32" + min_val = float("-1.54389") + max_val = float("2.06104") + mean = float("-0.0245243") + std = float("0.666343") + data = None + + +class Program_weight_tensor_parameter_59: + name = "parameter_59" + shape = [256] + dtype = "float32" + min_val = float("0.25324") + max_val = float("1.7475") + mean = float("0.929699") + std = float("0.194515") + data = None + + +class Program_weight_tensor_parameter_60: + name = "parameter_60" + shape = [256] + dtype = "float32" + min_val = float("-2.27709") + max_val = float("2.10237") + mean = float("0.00489338") + std = float("0.356161") + data = None + + +class Program_weight_tensor_parameter_61: + name = "parameter_61" + shape = [256, 32, 5, 5] + dtype = "float32" + min_val = float("-0.271916") + max_val = float("0.52117") + mean = float("0.00141176") + std = float("0.0431897") + data = None + + +class Program_weight_tensor_parameter_62: + name = "parameter_62" + shape = [256] + dtype = "float32" + min_val = float("-0.633171") + max_val = float("0.74339") + mean = float("-0.0520087") + std = float("0.188559") + data = None + + +class Program_weight_tensor_parameter_63: + name = "parameter_63" + shape = [256] + dtype = "float32" + min_val = float("0.0638512") + max_val = float("1.86322") + mean = float("1.31402") + std = float("0.15703") + data = None + + +class Program_weight_tensor_parameter_64: + name = "parameter_64" + shape = [256] + dtype = "float32" + min_val = float("-8.93611") + max_val = float("0.457052") + mean = float("-0.0257168") + std = float("0.585021") + data = None + + +class Program_weight_tensor_parameter_65: + name = "parameter_65" + shape = [1024, 256] + dtype = "float32" + min_val = float("-1.32594") + max_val = float("1.16715") + mean = float("-2.85247e-05") + std = float("0.05611") + data = None + + +class Program_weight_tensor_parameter_66: + name = "parameter_66" + shape = [1024] + dtype = "float32" + min_val = float("-1.46672") + max_val = float("-0.131769") + mean = float("-0.664938") + std = float("0.178196") + data = None + + +class Program_weight_tensor_parameter_67: + name = "parameter_67" + shape = [256, 1024] + dtype = "float32" + min_val = float("-0.324049") + max_val = float("0.367399") + mean = float("0.000789244") + std = float("0.0569423") + data = None + + +class Program_weight_tensor_parameter_68: + name = "parameter_68" + shape = [256] + dtype = "float32" + min_val = float("-0.661923") + max_val = float("0.721955") + mean = float("-0.0107626") + std = float("0.197668") + data = None + + +class Program_weight_tensor_parameter_69: + name = "parameter_69" + shape = [256] + dtype = "float32" + min_val = float("0.404883") + max_val = float("1.8329") + mean = float("0.68668") + std = float("0.133497") + data = None + + +class Program_weight_tensor_parameter_70: + name = "parameter_70" + shape = [256] + dtype = "float32" + min_val = float("-2.16229") + max_val = float("1.48621") + mean = float("-0.00283954") + std = float("0.300709") + data = None + + +class Program_weight_tensor_parameter_71: + name = "parameter_71" + shape = [256, 32, 5, 5] + dtype = "float32" + min_val = float("-0.265171") + max_val = float("0.496926") + mean = float("0.00112411") + std = float("0.0415432") + data = None diff --git a/paddle_samples/PaddleX/ch_SVTRv2_rec/subgraph_12/graph_hash.txt b/paddle_samples/PaddleX/ch_SVTRv2_rec/subgraph_12/graph_hash.txt new file mode 100644 index 000000000..5d15958c6 --- /dev/null +++ b/paddle_samples/PaddleX/ch_SVTRv2_rec/subgraph_12/graph_hash.txt @@ -0,0 +1 @@ +5fa2fb0a4a2f5ac153edf09f6a38cca5e796b27a226fe7a6d6130a84603ae6cc \ No newline at end of file diff --git a/paddle_samples/PaddleX/ch_SVTRv2_rec/subgraph_12/graph_net.json b/paddle_samples/PaddleX/ch_SVTRv2_rec/subgraph_12/graph_net.json new file mode 100644 index 000000000..4e1cc3c0d --- /dev/null +++ b/paddle_samples/PaddleX/ch_SVTRv2_rec/subgraph_12/graph_net.json @@ -0,0 +1,6 @@ +{ + "framework": "paddle", + "model_name": "ch_SVTRv2_rec", + "num_devices_required": 1, + "num_nodes_required": 1 +} \ No newline at end of file diff --git a/paddle_samples/PaddleX/ch_SVTRv2_rec/subgraph_12/input_meta.py b/paddle_samples/PaddleX/ch_SVTRv2_rec/subgraph_12/input_meta.py new file mode 100644 index 000000000..62259f248 --- /dev/null +++ b/paddle_samples/PaddleX/ch_SVTRv2_rec/subgraph_12/input_meta.py @@ -0,0 +1,9 @@ +class Program_weight_tensor_data_0: + name = "data_0" + shape = [8, 128, 8, 80] + dtype = "float32" + min_val = float("-0.169971") + max_val = float("76.8014") + mean = float("0.230222") + std = float("1.19983") + data = None diff --git a/paddle_samples/PaddleX/ch_SVTRv2_rec/subgraph_12/model.py b/paddle_samples/PaddleX/ch_SVTRv2_rec/subgraph_12/model.py new file mode 100644 index 000000000..2d8122eab --- /dev/null +++ b/paddle_samples/PaddleX/ch_SVTRv2_rec/subgraph_12/model.py @@ -0,0 +1,1332 @@ +import paddle + + +class GraphModule(paddle.nn.Layer): + def __init__(self): + super().__init__() + + def forward( + self, + parameter_0, + parameter_1, + parameter_2, + parameter_3, + parameter_4, + parameter_5, + parameter_6, + parameter_7, + parameter_8, + parameter_9, + parameter_10, + parameter_11, + parameter_12, + parameter_13, + parameter_14, + parameter_15, + parameter_16, + parameter_17, + parameter_18, + parameter_19, + parameter_20, + parameter_21, + parameter_22, + parameter_23, + parameter_24, + parameter_25, + parameter_26, + parameter_27, + parameter_28, + parameter_29, + parameter_30, + parameter_31, + parameter_32, + parameter_33, + parameter_34, + parameter_35, + parameter_36, + parameter_37, + parameter_38, + parameter_39, + parameter_40, + parameter_41, + parameter_42, + parameter_43, + parameter_44, + parameter_45, + parameter_46, + parameter_47, + parameter_48, + parameter_49, + parameter_50, + parameter_51, + parameter_52, + parameter_53, + parameter_54, + parameter_55, + parameter_56, + parameter_57, + parameter_58, + parameter_59, + parameter_60, + parameter_61, + parameter_62, + parameter_63, + data_0, + ): + # pd_op.conv2d: (8x128x8x80xf32) <- (8x128x8x80xf32, 128x32x5x5xf32) + conv2d_0 = paddle._C_ops.conv2d( + data_0, parameter_63, [1, 1], [2, 2], "EXPLICIT", [1, 1], 4, "NCHW" + ) + del parameter_63 + + # pd_op.full_int_array: (4xi64) <- () + full_int_array_0 = [1, -1, 1, 1] + + # pd_op.reshape: (1x128x1x1xf32) <- (128xf32, 4xi64) + reshape_1 = paddle._C_ops.reshape(parameter_62, full_int_array_0) + del parameter_62 + + # pd_op.add: (8x128x8x80xf32) <- (8x128x8x80xf32, 1x128x1x1xf32) + add_0 = paddle._C_ops.add(conv2d_0, reshape_1) + + # pd_op.add: (8x128x8x80xf32) <- (8x128x8x80xf32, 8x128x8x80xf32) + add_1 = paddle._C_ops.add(data_0, add_0) + del data_0 + + # pd_op.flatten: (8x128x640xf32) <- (8x128x8x80xf32) + flatten_0 = paddle._C_ops.flatten(add_1, 2, 3) + + # pd_op.transpose: (8x640x128xf32) <- (8x128x640xf32) + transpose_0 = paddle._C_ops.transpose(flatten_0, [0, 2, 1]) + del flatten_0 + + # pd_op.layer_norm: (8x640x128xf32, 8x640xf32, 8x640xf32) <- (8x640x128xf32, 128xf32, 128xf32) + layer_norm_0, layer_norm_1, layer_norm_2 = (lambda x, f: f(x))( + paddle._C_ops.layer_norm( + transpose_0, parameter_61, parameter_60, float("1e-06"), 2 + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None, None), + ) + del parameter_60, parameter_61 + + # pd_op.matmul: (8x640x512xf32) <- (8x640x128xf32, 128x512xf32) + matmul_0 = paddle._C_ops.matmul(layer_norm_0, parameter_59, False, False) + del parameter_59 + + # pd_op.add: (8x640x512xf32) <- (8x640x512xf32, 512xf32) + add_2 = paddle._C_ops.add(matmul_0, parameter_58) + del parameter_58 + + # pd_op.gelu: (8x640x512xf32) <- (8x640x512xf32) + gelu_0 = paddle._C_ops.gelu(add_2, False) + + # pd_op.matmul: (8x640x128xf32) <- (8x640x512xf32, 512x128xf32) + matmul_1 = paddle._C_ops.matmul(gelu_0, parameter_57, False, False) + del parameter_57 + + # pd_op.add: (8x640x128xf32) <- (8x640x128xf32, 128xf32) + add_3 = paddle._C_ops.add(matmul_1, parameter_56) + del parameter_56 + + # pd_op.add: (8x640x128xf32) <- (8x640x128xf32, 8x640x128xf32) + add_4 = paddle._C_ops.add(layer_norm_0, add_3) + + # pd_op.layer_norm: (8x640x128xf32, 8x640xf32, 8x640xf32) <- (8x640x128xf32, 128xf32, 128xf32) + layer_norm_3, layer_norm_4, layer_norm_5 = (lambda x, f: f(x))( + paddle._C_ops.layer_norm( + add_4, parameter_55, parameter_54, float("1e-06"), 2 + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None, None), + ) + del parameter_54, parameter_55 + + # pd_op.transpose: (8x128x640xf32) <- (8x640x128xf32) + transpose_1 = paddle._C_ops.transpose(layer_norm_3, [0, 2, 1]) + del layer_norm_3 + + # pd_op.full_int_array: (4xi64) <- () + full_int_array_1 = [0, 128, 8, 80] + + # pd_op.reshape: (8x128x8x80xf32) <- (8x128x640xf32, 4xi64) + reshape_2 = paddle._C_ops.reshape(transpose_1, full_int_array_1) + + # pd_op.conv2d: (8x128x8x80xf32) <- (8x128x8x80xf32, 128x32x5x5xf32) + conv2d_1 = paddle._C_ops.conv2d( + reshape_2, parameter_53, [1, 1], [2, 2], "EXPLICIT", [1, 1], 4, "NCHW" + ) + del parameter_53 + + # pd_op.reshape: (1x128x1x1xf32) <- (128xf32, 4xi64) + reshape_3 = paddle._C_ops.reshape(parameter_52, full_int_array_0) + del parameter_52 + + # pd_op.add: (8x128x8x80xf32) <- (8x128x8x80xf32, 1x128x1x1xf32) + add_5 = paddle._C_ops.add(conv2d_1, reshape_3) + + # pd_op.full: (xf64) <- () + full_0 = paddle._C_ops.full( + [], float("0"), paddle.float64, paddle.framework._current_expected_place() + ) + + # pd_op.assign_value_: (xf64) <- (xf64) + assign_value__0 = paddle._C_ops.assign_value_( + full_0, + [], + paddle.float64, + [float("0.994118")], + paddle.framework._current_expected_place(), + ) + del full_0 + + # pd_op.cast: (xf32) <- (xf64) + cast_0 = paddle._C_ops.cast(assign_value__0, paddle.float32) + del assign_value__0 + + # pd_op.shape64: (4xi64) <- (8x128x8x80xf32) + shape64_0 = paddle._C_ops.shape64(add_5) + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_2 = [0] + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_3 = [1] + + # pd_op.slice: (xi64) <- (4xi64, 1xi64, 1xi64) + slice_0 = paddle._C_ops.slice( + shape64_0, [0], full_int_array_2, full_int_array_3, [1], [0] + ) + del shape64_0 + + # pd_op.full: (xi64) <- () + full_1 = paddle._C_ops.full( + [], float("1"), paddle.int64, paddle.core.CPUPlace() + ) + + # builtin.combine: ([xi64, xi64, xi64, xi64]) <- (xi64, xi64, xi64, xi64) + combine_0 = [slice_0, full_1, full_1, full_1] + del slice_0 + + # pd_op.stack: (4xi64) <- ([xi64, xi64, xi64, xi64]) + stack_0 = paddle._C_ops.stack(combine_0, 0) + del combine_0 + + # pd_op.full: (1xf32) <- () + full_2 = paddle._C_ops.full( + [1], float("0"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.full: (1xf32) <- () + full_3 = paddle._C_ops.full( + [1], float("1"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.uniform: (-1x1x1x1xf32) <- (4xi64, 1xf32, 1xf32) + uniform_0 = paddle._C_ops.uniform( + stack_0, + paddle.float32, + full_2, + full_3, + 0, + paddle.framework._current_expected_place(), + ) + del stack_0 + + # pd_op.add: (-1x1x1x1xf32) <- (xf32, -1x1x1x1xf32) + add_6 = paddle._C_ops.add(cast_0, uniform_0) + del uniform_0 + + # pd_op.floor: (-1x1x1x1xf32) <- (-1x1x1x1xf32) + floor_0 = paddle._C_ops.floor(add_6) + del add_6 + + # pd_op.divide: (8x128x8x80xf32) <- (8x128x8x80xf32, xf32) + divide_0 = paddle._C_ops.divide(add_5, cast_0) + + # pd_op.multiply: (8x128x8x80xf32) <- (8x128x8x80xf32, -1x1x1x1xf32) + multiply_0 = paddle._C_ops.multiply(divide_0, floor_0) + + # pd_op.add: (8x128x8x80xf32) <- (8x128x8x80xf32, 8x128x8x80xf32) + add_7 = paddle._C_ops.add(reshape_2, multiply_0) + + # pd_op.flatten: (8x128x640xf32) <- (8x128x8x80xf32) + flatten_1 = paddle._C_ops.flatten(add_7, 2, 3) + + # pd_op.transpose: (8x640x128xf32) <- (8x128x640xf32) + transpose_2 = paddle._C_ops.transpose(flatten_1, [0, 2, 1]) + del flatten_1 + + # pd_op.layer_norm: (8x640x128xf32, 8x640xf32, 8x640xf32) <- (8x640x128xf32, 128xf32, 128xf32) + layer_norm_6, layer_norm_7, layer_norm_8 = (lambda x, f: f(x))( + paddle._C_ops.layer_norm( + transpose_2, parameter_51, parameter_50, float("1e-06"), 2 + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None, None), + ) + del parameter_50, parameter_51 + + # pd_op.matmul: (8x640x512xf32) <- (8x640x128xf32, 128x512xf32) + matmul_2 = paddle._C_ops.matmul(layer_norm_6, parameter_49, False, False) + del parameter_49 + + # pd_op.add: (8x640x512xf32) <- (8x640x512xf32, 512xf32) + add_8 = paddle._C_ops.add(matmul_2, parameter_48) + del parameter_48 + + # pd_op.gelu: (8x640x512xf32) <- (8x640x512xf32) + gelu_1 = paddle._C_ops.gelu(add_8, False) + + # pd_op.matmul: (8x640x128xf32) <- (8x640x512xf32, 512x128xf32) + matmul_3 = paddle._C_ops.matmul(gelu_1, parameter_47, False, False) + del parameter_47 + + # pd_op.add: (8x640x128xf32) <- (8x640x128xf32, 128xf32) + add_9 = paddle._C_ops.add(matmul_3, parameter_46) + del parameter_46 + + # pd_op.full: (xf64) <- () + full_4 = paddle._C_ops.full( + [], float("0"), paddle.float64, paddle.framework._current_expected_place() + ) + + # pd_op.assign_value_: (xf64) <- (xf64) + assign_value__1 = paddle._C_ops.assign_value_( + full_4, + [], + paddle.float64, + [float("0.994118")], + paddle.framework._current_expected_place(), + ) + del full_4 + + # pd_op.cast: (xf32) <- (xf64) + cast_1 = paddle._C_ops.cast(assign_value__1, paddle.float32) + del assign_value__1 + + # pd_op.shape64: (3xi64) <- (8x640x128xf32) + shape64_1 = paddle._C_ops.shape64(add_9) + + # pd_op.slice: (xi64) <- (3xi64, 1xi64, 1xi64) + slice_1 = paddle._C_ops.slice( + shape64_1, [0], full_int_array_2, full_int_array_3, [1], [0] + ) + del shape64_1 + + # builtin.combine: ([xi64, xi64, xi64]) <- (xi64, xi64, xi64) + combine_1 = [slice_1, full_1, full_1] + del slice_1 + + # pd_op.stack: (3xi64) <- ([xi64, xi64, xi64]) + stack_1 = paddle._C_ops.stack(combine_1, 0) + del combine_1 + + # pd_op.uniform: (-1x1x1xf32) <- (3xi64, 1xf32, 1xf32) + uniform_1 = paddle._C_ops.uniform( + stack_1, + paddle.float32, + full_2, + full_3, + 0, + paddle.framework._current_expected_place(), + ) + del stack_1 + + # pd_op.add: (-1x1x1xf32) <- (xf32, -1x1x1xf32) + add_10 = paddle._C_ops.add(cast_1, uniform_1) + del uniform_1 + + # pd_op.floor: (-1x1x1xf32) <- (-1x1x1xf32) + floor_1 = paddle._C_ops.floor(add_10) + del add_10 + + # pd_op.divide: (8x640x128xf32) <- (8x640x128xf32, xf32) + divide_1 = paddle._C_ops.divide(add_9, cast_1) + + # pd_op.multiply: (8x640x128xf32) <- (8x640x128xf32, -1x1x1xf32) + multiply_1 = paddle._C_ops.multiply(divide_1, floor_1) + + # pd_op.add: (8x640x128xf32) <- (8x640x128xf32, 8x640x128xf32) + add_11 = paddle._C_ops.add(layer_norm_6, multiply_1) + + # pd_op.layer_norm: (8x640x128xf32, 8x640xf32, 8x640xf32) <- (8x640x128xf32, 128xf32, 128xf32) + layer_norm_9, layer_norm_10, layer_norm_11 = (lambda x, f: f(x))( + paddle._C_ops.layer_norm( + add_11, parameter_45, parameter_44, float("1e-06"), 2 + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None, None), + ) + del parameter_44, parameter_45 + + # pd_op.transpose: (8x128x640xf32) <- (8x640x128xf32) + transpose_3 = paddle._C_ops.transpose(layer_norm_9, [0, 2, 1]) + del layer_norm_9 + + # pd_op.reshape: (8x128x8x80xf32) <- (8x128x640xf32, 4xi64) + reshape_4 = paddle._C_ops.reshape(transpose_3, full_int_array_1) + + # pd_op.conv2d: (8x128x8x80xf32) <- (8x128x8x80xf32, 128x32x5x5xf32) + conv2d_2 = paddle._C_ops.conv2d( + reshape_4, parameter_43, [1, 1], [2, 2], "EXPLICIT", [1, 1], 4, "NCHW" + ) + del parameter_43 + + # pd_op.reshape: (1x128x1x1xf32) <- (128xf32, 4xi64) + reshape_5 = paddle._C_ops.reshape(parameter_42, full_int_array_0) + del parameter_42 + + # pd_op.add: (8x128x8x80xf32) <- (8x128x8x80xf32, 1x128x1x1xf32) + add_12 = paddle._C_ops.add(conv2d_2, reshape_5) + + # pd_op.full: (xf64) <- () + full_5 = paddle._C_ops.full( + [], float("0"), paddle.float64, paddle.framework._current_expected_place() + ) + + # pd_op.assign_value_: (xf64) <- (xf64) + assign_value__2 = paddle._C_ops.assign_value_( + full_5, + [], + paddle.float64, + [float("0.988235")], + paddle.framework._current_expected_place(), + ) + del full_5 + + # pd_op.cast: (xf32) <- (xf64) + cast_2 = paddle._C_ops.cast(assign_value__2, paddle.float32) + del assign_value__2 + + # pd_op.shape64: (4xi64) <- (8x128x8x80xf32) + shape64_2 = paddle._C_ops.shape64(add_12) + + # pd_op.slice: (xi64) <- (4xi64, 1xi64, 1xi64) + slice_2 = paddle._C_ops.slice( + shape64_2, [0], full_int_array_2, full_int_array_3, [1], [0] + ) + del shape64_2 + + # builtin.combine: ([xi64, xi64, xi64, xi64]) <- (xi64, xi64, xi64, xi64) + combine_2 = [slice_2, full_1, full_1, full_1] + del slice_2 + + # pd_op.stack: (4xi64) <- ([xi64, xi64, xi64, xi64]) + stack_2 = paddle._C_ops.stack(combine_2, 0) + del combine_2 + + # pd_op.uniform: (-1x1x1x1xf32) <- (4xi64, 1xf32, 1xf32) + uniform_2 = paddle._C_ops.uniform( + stack_2, + paddle.float32, + full_2, + full_3, + 0, + paddle.framework._current_expected_place(), + ) + del stack_2 + + # pd_op.add: (-1x1x1x1xf32) <- (xf32, -1x1x1x1xf32) + add_13 = paddle._C_ops.add(cast_2, uniform_2) + del uniform_2 + + # pd_op.floor: (-1x1x1x1xf32) <- (-1x1x1x1xf32) + floor_2 = paddle._C_ops.floor(add_13) + del add_13 + + # pd_op.divide: (8x128x8x80xf32) <- (8x128x8x80xf32, xf32) + divide_2 = paddle._C_ops.divide(add_12, cast_2) + + # pd_op.multiply: (8x128x8x80xf32) <- (8x128x8x80xf32, -1x1x1x1xf32) + multiply_2 = paddle._C_ops.multiply(divide_2, floor_2) + + # pd_op.add: (8x128x8x80xf32) <- (8x128x8x80xf32, 8x128x8x80xf32) + add_14 = paddle._C_ops.add(reshape_4, multiply_2) + + # pd_op.flatten: (8x128x640xf32) <- (8x128x8x80xf32) + flatten_2 = paddle._C_ops.flatten(add_14, 2, 3) + + # pd_op.transpose: (8x640x128xf32) <- (8x128x640xf32) + transpose_4 = paddle._C_ops.transpose(flatten_2, [0, 2, 1]) + del flatten_2 + + # pd_op.layer_norm: (8x640x128xf32, 8x640xf32, 8x640xf32) <- (8x640x128xf32, 128xf32, 128xf32) + layer_norm_12, layer_norm_13, layer_norm_14 = (lambda x, f: f(x))( + paddle._C_ops.layer_norm( + transpose_4, parameter_41, parameter_40, float("1e-06"), 2 + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None, None), + ) + del parameter_40, parameter_41 + + # pd_op.matmul: (8x640x512xf32) <- (8x640x128xf32, 128x512xf32) + matmul_4 = paddle._C_ops.matmul(layer_norm_12, parameter_39, False, False) + del parameter_39 + + # pd_op.add: (8x640x512xf32) <- (8x640x512xf32, 512xf32) + add_15 = paddle._C_ops.add(matmul_4, parameter_38) + del parameter_38 + + # pd_op.gelu: (8x640x512xf32) <- (8x640x512xf32) + gelu_2 = paddle._C_ops.gelu(add_15, False) + + # pd_op.matmul: (8x640x128xf32) <- (8x640x512xf32, 512x128xf32) + matmul_5 = paddle._C_ops.matmul(gelu_2, parameter_37, False, False) + del parameter_37 + + # pd_op.add: (8x640x128xf32) <- (8x640x128xf32, 128xf32) + add_16 = paddle._C_ops.add(matmul_5, parameter_36) + del parameter_36 + + # pd_op.full: (xf64) <- () + full_6 = paddle._C_ops.full( + [], float("0"), paddle.float64, paddle.framework._current_expected_place() + ) + + # pd_op.assign_value_: (xf64) <- (xf64) + assign_value__3 = paddle._C_ops.assign_value_( + full_6, + [], + paddle.float64, + [float("0.988235")], + paddle.framework._current_expected_place(), + ) + del full_6 + + # pd_op.cast: (xf32) <- (xf64) + cast_3 = paddle._C_ops.cast(assign_value__3, paddle.float32) + del assign_value__3 + + # pd_op.shape64: (3xi64) <- (8x640x128xf32) + shape64_3 = paddle._C_ops.shape64(add_16) + + # pd_op.slice: (xi64) <- (3xi64, 1xi64, 1xi64) + slice_3 = paddle._C_ops.slice( + shape64_3, [0], full_int_array_2, full_int_array_3, [1], [0] + ) + del shape64_3 + + # builtin.combine: ([xi64, xi64, xi64]) <- (xi64, xi64, xi64) + combine_3 = [slice_3, full_1, full_1] + del slice_3 + + # pd_op.stack: (3xi64) <- ([xi64, xi64, xi64]) + stack_3 = paddle._C_ops.stack(combine_3, 0) + del combine_3 + + # pd_op.uniform: (-1x1x1xf32) <- (3xi64, 1xf32, 1xf32) + uniform_3 = paddle._C_ops.uniform( + stack_3, + paddle.float32, + full_2, + full_3, + 0, + paddle.framework._current_expected_place(), + ) + del stack_3 + + # pd_op.add: (-1x1x1xf32) <- (xf32, -1x1x1xf32) + add_17 = paddle._C_ops.add(cast_3, uniform_3) + del uniform_3 + + # pd_op.floor: (-1x1x1xf32) <- (-1x1x1xf32) + floor_3 = paddle._C_ops.floor(add_17) + del add_17 + + # pd_op.divide: (8x640x128xf32) <- (8x640x128xf32, xf32) + divide_3 = paddle._C_ops.divide(add_16, cast_3) + + # pd_op.multiply: (8x640x128xf32) <- (8x640x128xf32, -1x1x1xf32) + multiply_3 = paddle._C_ops.multiply(divide_3, floor_3) + + # pd_op.add: (8x640x128xf32) <- (8x640x128xf32, 8x640x128xf32) + add_18 = paddle._C_ops.add(layer_norm_12, multiply_3) + + # pd_op.layer_norm: (8x640x128xf32, 8x640xf32, 8x640xf32) <- (8x640x128xf32, 128xf32, 128xf32) + layer_norm_15, layer_norm_16, layer_norm_17 = (lambda x, f: f(x))( + paddle._C_ops.layer_norm( + add_18, parameter_35, parameter_34, float("1e-06"), 2 + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None, None), + ) + del parameter_34, parameter_35 + + # pd_op.transpose: (8x128x640xf32) <- (8x640x128xf32) + transpose_5 = paddle._C_ops.transpose(layer_norm_15, [0, 2, 1]) + del layer_norm_15 + + # pd_op.reshape: (8x128x8x80xf32) <- (8x128x640xf32, 4xi64) + reshape_6 = paddle._C_ops.reshape(transpose_5, full_int_array_1) + + # pd_op.conv2d: (8x128x8x80xf32) <- (8x128x8x80xf32, 128x32x5x5xf32) + conv2d_3 = paddle._C_ops.conv2d( + reshape_6, parameter_33, [1, 1], [2, 2], "EXPLICIT", [1, 1], 4, "NCHW" + ) + del parameter_33 + + # pd_op.reshape: (1x128x1x1xf32) <- (128xf32, 4xi64) + reshape_7 = paddle._C_ops.reshape(parameter_32, full_int_array_0) + del parameter_32 + + # pd_op.add: (8x128x8x80xf32) <- (8x128x8x80xf32, 1x128x1x1xf32) + add_19 = paddle._C_ops.add(conv2d_3, reshape_7) + + # pd_op.full: (xf64) <- () + full_7 = paddle._C_ops.full( + [], float("0"), paddle.float64, paddle.framework._current_expected_place() + ) + + # pd_op.assign_value_: (xf64) <- (xf64) + assign_value__4 = paddle._C_ops.assign_value_( + full_7, + [], + paddle.float64, + [float("0.982353")], + paddle.framework._current_expected_place(), + ) + del full_7 + + # pd_op.cast: (xf32) <- (xf64) + cast_4 = paddle._C_ops.cast(assign_value__4, paddle.float32) + del assign_value__4 + + # pd_op.shape64: (4xi64) <- (8x128x8x80xf32) + shape64_4 = paddle._C_ops.shape64(add_19) + + # pd_op.slice: (xi64) <- (4xi64, 1xi64, 1xi64) + slice_4 = paddle._C_ops.slice( + shape64_4, [0], full_int_array_2, full_int_array_3, [1], [0] + ) + del shape64_4 + + # builtin.combine: ([xi64, xi64, xi64, xi64]) <- (xi64, xi64, xi64, xi64) + combine_4 = [slice_4, full_1, full_1, full_1] + del slice_4 + + # pd_op.stack: (4xi64) <- ([xi64, xi64, xi64, xi64]) + stack_4 = paddle._C_ops.stack(combine_4, 0) + del combine_4 + + # pd_op.uniform: (-1x1x1x1xf32) <- (4xi64, 1xf32, 1xf32) + uniform_4 = paddle._C_ops.uniform( + stack_4, + paddle.float32, + full_2, + full_3, + 0, + paddle.framework._current_expected_place(), + ) + del stack_4 + + # pd_op.add: (-1x1x1x1xf32) <- (xf32, -1x1x1x1xf32) + add_20 = paddle._C_ops.add(cast_4, uniform_4) + del uniform_4 + + # pd_op.floor: (-1x1x1x1xf32) <- (-1x1x1x1xf32) + floor_4 = paddle._C_ops.floor(add_20) + del add_20 + + # pd_op.divide: (8x128x8x80xf32) <- (8x128x8x80xf32, xf32) + divide_4 = paddle._C_ops.divide(add_19, cast_4) + + # pd_op.multiply: (8x128x8x80xf32) <- (8x128x8x80xf32, -1x1x1x1xf32) + multiply_4 = paddle._C_ops.multiply(divide_4, floor_4) + + # pd_op.add: (8x128x8x80xf32) <- (8x128x8x80xf32, 8x128x8x80xf32) + add_21 = paddle._C_ops.add(reshape_6, multiply_4) + + # pd_op.flatten: (8x128x640xf32) <- (8x128x8x80xf32) + flatten_3 = paddle._C_ops.flatten(add_21, 2, 3) + + # pd_op.transpose: (8x640x128xf32) <- (8x128x640xf32) + transpose_6 = paddle._C_ops.transpose(flatten_3, [0, 2, 1]) + del flatten_3 + + # pd_op.layer_norm: (8x640x128xf32, 8x640xf32, 8x640xf32) <- (8x640x128xf32, 128xf32, 128xf32) + layer_norm_18, layer_norm_19, layer_norm_20 = (lambda x, f: f(x))( + paddle._C_ops.layer_norm( + transpose_6, parameter_31, parameter_30, float("1e-06"), 2 + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None, None), + ) + del parameter_30, parameter_31 + + # pd_op.matmul: (8x640x512xf32) <- (8x640x128xf32, 128x512xf32) + matmul_6 = paddle._C_ops.matmul(layer_norm_18, parameter_29, False, False) + del parameter_29 + + # pd_op.add: (8x640x512xf32) <- (8x640x512xf32, 512xf32) + add_22 = paddle._C_ops.add(matmul_6, parameter_28) + del parameter_28 + + # pd_op.gelu: (8x640x512xf32) <- (8x640x512xf32) + gelu_3 = paddle._C_ops.gelu(add_22, False) + + # pd_op.matmul: (8x640x128xf32) <- (8x640x512xf32, 512x128xf32) + matmul_7 = paddle._C_ops.matmul(gelu_3, parameter_27, False, False) + del parameter_27 + + # pd_op.add: (8x640x128xf32) <- (8x640x128xf32, 128xf32) + add_23 = paddle._C_ops.add(matmul_7, parameter_26) + del parameter_26 + + # pd_op.full: (xf64) <- () + full_8 = paddle._C_ops.full( + [], float("0"), paddle.float64, paddle.framework._current_expected_place() + ) + + # pd_op.assign_value_: (xf64) <- (xf64) + assign_value__5 = paddle._C_ops.assign_value_( + full_8, + [], + paddle.float64, + [float("0.982353")], + paddle.framework._current_expected_place(), + ) + del full_8 + + # pd_op.cast: (xf32) <- (xf64) + cast_5 = paddle._C_ops.cast(assign_value__5, paddle.float32) + del assign_value__5 + + # pd_op.shape64: (3xi64) <- (8x640x128xf32) + shape64_5 = paddle._C_ops.shape64(add_23) + + # pd_op.slice: (xi64) <- (3xi64, 1xi64, 1xi64) + slice_5 = paddle._C_ops.slice( + shape64_5, [0], full_int_array_2, full_int_array_3, [1], [0] + ) + del shape64_5 + + # builtin.combine: ([xi64, xi64, xi64]) <- (xi64, xi64, xi64) + combine_5 = [slice_5, full_1, full_1] + del slice_5 + + # pd_op.stack: (3xi64) <- ([xi64, xi64, xi64]) + stack_5 = paddle._C_ops.stack(combine_5, 0) + del combine_5 + + # pd_op.uniform: (-1x1x1xf32) <- (3xi64, 1xf32, 1xf32) + uniform_5 = paddle._C_ops.uniform( + stack_5, + paddle.float32, + full_2, + full_3, + 0, + paddle.framework._current_expected_place(), + ) + del stack_5 + + # pd_op.add: (-1x1x1xf32) <- (xf32, -1x1x1xf32) + add_24 = paddle._C_ops.add(cast_5, uniform_5) + del uniform_5 + + # pd_op.floor: (-1x1x1xf32) <- (-1x1x1xf32) + floor_5 = paddle._C_ops.floor(add_24) + del add_24 + + # pd_op.divide: (8x640x128xf32) <- (8x640x128xf32, xf32) + divide_5 = paddle._C_ops.divide(add_23, cast_5) + + # pd_op.multiply: (8x640x128xf32) <- (8x640x128xf32, -1x1x1xf32) + multiply_5 = paddle._C_ops.multiply(divide_5, floor_5) + + # pd_op.add: (8x640x128xf32) <- (8x640x128xf32, 8x640x128xf32) + add_25 = paddle._C_ops.add(layer_norm_18, multiply_5) + + # pd_op.layer_norm: (8x640x128xf32, 8x640xf32, 8x640xf32) <- (8x640x128xf32, 128xf32, 128xf32) + layer_norm_21, layer_norm_22, layer_norm_23 = (lambda x, f: f(x))( + paddle._C_ops.layer_norm( + add_25, parameter_25, parameter_24, float("1e-06"), 2 + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None, None), + ) + del parameter_24, parameter_25 + + # pd_op.transpose: (8x128x640xf32) <- (8x640x128xf32) + transpose_7 = paddle._C_ops.transpose(layer_norm_21, [0, 2, 1]) + del layer_norm_21 + + # pd_op.reshape: (8x128x8x80xf32) <- (8x128x640xf32, 4xi64) + reshape_8 = paddle._C_ops.reshape(transpose_7, full_int_array_1) + + # pd_op.conv2d: (8x128x8x80xf32) <- (8x128x8x80xf32, 128x32x5x5xf32) + conv2d_4 = paddle._C_ops.conv2d( + reshape_8, parameter_23, [1, 1], [2, 2], "EXPLICIT", [1, 1], 4, "NCHW" + ) + del parameter_23 + + # pd_op.reshape: (1x128x1x1xf32) <- (128xf32, 4xi64) + reshape_9 = paddle._C_ops.reshape(parameter_22, full_int_array_0) + del parameter_22 + + # pd_op.add: (8x128x8x80xf32) <- (8x128x8x80xf32, 1x128x1x1xf32) + add_26 = paddle._C_ops.add(conv2d_4, reshape_9) + + # pd_op.full: (xf64) <- () + full_9 = paddle._C_ops.full( + [], float("0"), paddle.float64, paddle.framework._current_expected_place() + ) + + # pd_op.assign_value_: (xf64) <- (xf64) + assign_value__6 = paddle._C_ops.assign_value_( + full_9, + [], + paddle.float64, + [float("0.976471")], + paddle.framework._current_expected_place(), + ) + del full_9 + + # pd_op.cast: (xf32) <- (xf64) + cast_6 = paddle._C_ops.cast(assign_value__6, paddle.float32) + del assign_value__6 + + # pd_op.shape64: (4xi64) <- (8x128x8x80xf32) + shape64_6 = paddle._C_ops.shape64(add_26) + + # pd_op.slice: (xi64) <- (4xi64, 1xi64, 1xi64) + slice_6 = paddle._C_ops.slice( + shape64_6, [0], full_int_array_2, full_int_array_3, [1], [0] + ) + del shape64_6 + + # builtin.combine: ([xi64, xi64, xi64, xi64]) <- (xi64, xi64, xi64, xi64) + combine_6 = [slice_6, full_1, full_1, full_1] + del slice_6 + + # pd_op.stack: (4xi64) <- ([xi64, xi64, xi64, xi64]) + stack_6 = paddle._C_ops.stack(combine_6, 0) + del combine_6 + + # pd_op.uniform: (-1x1x1x1xf32) <- (4xi64, 1xf32, 1xf32) + uniform_6 = paddle._C_ops.uniform( + stack_6, + paddle.float32, + full_2, + full_3, + 0, + paddle.framework._current_expected_place(), + ) + del stack_6 + + # pd_op.add: (-1x1x1x1xf32) <- (xf32, -1x1x1x1xf32) + add_27 = paddle._C_ops.add(cast_6, uniform_6) + del uniform_6 + + # pd_op.floor: (-1x1x1x1xf32) <- (-1x1x1x1xf32) + floor_6 = paddle._C_ops.floor(add_27) + del add_27 + + # pd_op.divide: (8x128x8x80xf32) <- (8x128x8x80xf32, xf32) + divide_6 = paddle._C_ops.divide(add_26, cast_6) + + # pd_op.multiply: (8x128x8x80xf32) <- (8x128x8x80xf32, -1x1x1x1xf32) + multiply_6 = paddle._C_ops.multiply(divide_6, floor_6) + + # pd_op.add: (8x128x8x80xf32) <- (8x128x8x80xf32, 8x128x8x80xf32) + add_28 = paddle._C_ops.add(reshape_8, multiply_6) + + # pd_op.flatten: (8x128x640xf32) <- (8x128x8x80xf32) + flatten_4 = paddle._C_ops.flatten(add_28, 2, 3) + + # pd_op.transpose: (8x640x128xf32) <- (8x128x640xf32) + transpose_8 = paddle._C_ops.transpose(flatten_4, [0, 2, 1]) + del flatten_4 + + # pd_op.layer_norm: (8x640x128xf32, 8x640xf32, 8x640xf32) <- (8x640x128xf32, 128xf32, 128xf32) + layer_norm_24, layer_norm_25, layer_norm_26 = (lambda x, f: f(x))( + paddle._C_ops.layer_norm( + transpose_8, parameter_21, parameter_20, float("1e-06"), 2 + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None, None), + ) + del parameter_20, parameter_21 + + # pd_op.matmul: (8x640x512xf32) <- (8x640x128xf32, 128x512xf32) + matmul_8 = paddle._C_ops.matmul(layer_norm_24, parameter_19, False, False) + del parameter_19 + + # pd_op.add: (8x640x512xf32) <- (8x640x512xf32, 512xf32) + add_29 = paddle._C_ops.add(matmul_8, parameter_18) + del parameter_18 + + # pd_op.gelu: (8x640x512xf32) <- (8x640x512xf32) + gelu_4 = paddle._C_ops.gelu(add_29, False) + + # pd_op.matmul: (8x640x128xf32) <- (8x640x512xf32, 512x128xf32) + matmul_9 = paddle._C_ops.matmul(gelu_4, parameter_17, False, False) + del parameter_17 + + # pd_op.add: (8x640x128xf32) <- (8x640x128xf32, 128xf32) + add_30 = paddle._C_ops.add(matmul_9, parameter_16) + del parameter_16 + + # pd_op.full: (xf64) <- () + full_10 = paddle._C_ops.full( + [], float("0"), paddle.float64, paddle.framework._current_expected_place() + ) + + # pd_op.assign_value_: (xf64) <- (xf64) + assign_value__7 = paddle._C_ops.assign_value_( + full_10, + [], + paddle.float64, + [float("0.976471")], + paddle.framework._current_expected_place(), + ) + del full_10 + + # pd_op.cast: (xf32) <- (xf64) + cast_7 = paddle._C_ops.cast(assign_value__7, paddle.float32) + del assign_value__7 + + # pd_op.shape64: (3xi64) <- (8x640x128xf32) + shape64_7 = paddle._C_ops.shape64(add_30) + + # pd_op.slice: (xi64) <- (3xi64, 1xi64, 1xi64) + slice_7 = paddle._C_ops.slice( + shape64_7, [0], full_int_array_2, full_int_array_3, [1], [0] + ) + del shape64_7 + + # builtin.combine: ([xi64, xi64, xi64]) <- (xi64, xi64, xi64) + combine_7 = [slice_7, full_1, full_1] + del slice_7 + + # pd_op.stack: (3xi64) <- ([xi64, xi64, xi64]) + stack_7 = paddle._C_ops.stack(combine_7, 0) + del combine_7 + + # pd_op.uniform: (-1x1x1xf32) <- (3xi64, 1xf32, 1xf32) + uniform_7 = paddle._C_ops.uniform( + stack_7, + paddle.float32, + full_2, + full_3, + 0, + paddle.framework._current_expected_place(), + ) + del stack_7 + + # pd_op.add: (-1x1x1xf32) <- (xf32, -1x1x1xf32) + add_31 = paddle._C_ops.add(cast_7, uniform_7) + del uniform_7 + + # pd_op.floor: (-1x1x1xf32) <- (-1x1x1xf32) + floor_7 = paddle._C_ops.floor(add_31) + del add_31 + + # pd_op.divide: (8x640x128xf32) <- (8x640x128xf32, xf32) + divide_7 = paddle._C_ops.divide(add_30, cast_7) + + # pd_op.multiply: (8x640x128xf32) <- (8x640x128xf32, -1x1x1xf32) + multiply_7 = paddle._C_ops.multiply(divide_7, floor_7) + + # pd_op.add: (8x640x128xf32) <- (8x640x128xf32, 8x640x128xf32) + add_32 = paddle._C_ops.add(layer_norm_24, multiply_7) + + # pd_op.layer_norm: (8x640x128xf32, 8x640xf32, 8x640xf32) <- (8x640x128xf32, 128xf32, 128xf32) + layer_norm_27, layer_norm_28, layer_norm_29 = (lambda x, f: f(x))( + paddle._C_ops.layer_norm( + add_32, parameter_15, parameter_14, float("1e-06"), 2 + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None, None), + ) + del parameter_14, parameter_15 + + # pd_op.transpose: (8x128x640xf32) <- (8x640x128xf32) + transpose_9 = paddle._C_ops.transpose(layer_norm_27, [0, 2, 1]) + del layer_norm_27 + + # pd_op.reshape: (8x128x8x80xf32) <- (8x128x640xf32, 4xi64) + reshape_10 = paddle._C_ops.reshape(transpose_9, full_int_array_1) + + # pd_op.conv2d: (8x128x8x80xf32) <- (8x128x8x80xf32, 128x32x5x5xf32) + conv2d_5 = paddle._C_ops.conv2d( + reshape_10, parameter_13, [1, 1], [2, 2], "EXPLICIT", [1, 1], 4, "NCHW" + ) + del parameter_13 + + # pd_op.reshape: (1x128x1x1xf32) <- (128xf32, 4xi64) + reshape_11 = paddle._C_ops.reshape(parameter_12, full_int_array_0) + del parameter_12 + + # pd_op.add: (8x128x8x80xf32) <- (8x128x8x80xf32, 1x128x1x1xf32) + add_33 = paddle._C_ops.add(conv2d_5, reshape_11) + + # pd_op.full: (xf64) <- () + full_11 = paddle._C_ops.full( + [], float("0"), paddle.float64, paddle.framework._current_expected_place() + ) + + # pd_op.assign_value_: (xf64) <- (xf64) + assign_value__8 = paddle._C_ops.assign_value_( + full_11, + [], + paddle.float64, + [float("0.970588")], + paddle.framework._current_expected_place(), + ) + del full_11 + + # pd_op.cast: (xf32) <- (xf64) + cast_8 = paddle._C_ops.cast(assign_value__8, paddle.float32) + del assign_value__8 + + # pd_op.shape64: (4xi64) <- (8x128x8x80xf32) + shape64_8 = paddle._C_ops.shape64(add_33) + + # pd_op.slice: (xi64) <- (4xi64, 1xi64, 1xi64) + slice_8 = paddle._C_ops.slice( + shape64_8, [0], full_int_array_2, full_int_array_3, [1], [0] + ) + del shape64_8 + + # builtin.combine: ([xi64, xi64, xi64, xi64]) <- (xi64, xi64, xi64, xi64) + combine_8 = [slice_8, full_1, full_1, full_1] + del slice_8 + + # pd_op.stack: (4xi64) <- ([xi64, xi64, xi64, xi64]) + stack_8 = paddle._C_ops.stack(combine_8, 0) + del combine_8 + + # pd_op.uniform: (-1x1x1x1xf32) <- (4xi64, 1xf32, 1xf32) + uniform_8 = paddle._C_ops.uniform( + stack_8, + paddle.float32, + full_2, + full_3, + 0, + paddle.framework._current_expected_place(), + ) + del stack_8 + + # pd_op.add: (-1x1x1x1xf32) <- (xf32, -1x1x1x1xf32) + add_34 = paddle._C_ops.add(cast_8, uniform_8) + del uniform_8 + + # pd_op.floor: (-1x1x1x1xf32) <- (-1x1x1x1xf32) + floor_8 = paddle._C_ops.floor(add_34) + del add_34 + + # pd_op.divide: (8x128x8x80xf32) <- (8x128x8x80xf32, xf32) + divide_8 = paddle._C_ops.divide(add_33, cast_8) + + # pd_op.multiply: (8x128x8x80xf32) <- (8x128x8x80xf32, -1x1x1x1xf32) + multiply_8 = paddle._C_ops.multiply(divide_8, floor_8) + + # pd_op.add: (8x128x8x80xf32) <- (8x128x8x80xf32, 8x128x8x80xf32) + add_35 = paddle._C_ops.add(reshape_10, multiply_8) + + # pd_op.flatten: (8x128x640xf32) <- (8x128x8x80xf32) + flatten_5 = paddle._C_ops.flatten(add_35, 2, 3) + + # pd_op.transpose: (8x640x128xf32) <- (8x128x640xf32) + transpose_10 = paddle._C_ops.transpose(flatten_5, [0, 2, 1]) + del flatten_5 + + # pd_op.layer_norm: (8x640x128xf32, 8x640xf32, 8x640xf32) <- (8x640x128xf32, 128xf32, 128xf32) + layer_norm_30, layer_norm_31, layer_norm_32 = (lambda x, f: f(x))( + paddle._C_ops.layer_norm( + transpose_10, parameter_11, parameter_10, float("1e-06"), 2 + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None, None), + ) + del parameter_10, parameter_11 + + # pd_op.matmul: (8x640x512xf32) <- (8x640x128xf32, 128x512xf32) + matmul_10 = paddle._C_ops.matmul(layer_norm_30, parameter_9, False, False) + del parameter_9 + + # pd_op.add: (8x640x512xf32) <- (8x640x512xf32, 512xf32) + add_36 = paddle._C_ops.add(matmul_10, parameter_8) + del parameter_8 + + # pd_op.gelu: (8x640x512xf32) <- (8x640x512xf32) + gelu_5 = paddle._C_ops.gelu(add_36, False) + + # pd_op.matmul: (8x640x128xf32) <- (8x640x512xf32, 512x128xf32) + matmul_11 = paddle._C_ops.matmul(gelu_5, parameter_7, False, False) + del parameter_7 + + # pd_op.add: (8x640x128xf32) <- (8x640x128xf32, 128xf32) + add_37 = paddle._C_ops.add(matmul_11, parameter_6) + del parameter_6 + + # pd_op.full: (xf64) <- () + full_12 = paddle._C_ops.full( + [], float("0"), paddle.float64, paddle.framework._current_expected_place() + ) + + # pd_op.assign_value_: (xf64) <- (xf64) + assign_value__9 = paddle._C_ops.assign_value_( + full_12, + [], + paddle.float64, + [float("0.970588")], + paddle.framework._current_expected_place(), + ) + del full_12 + + # pd_op.cast: (xf32) <- (xf64) + cast_9 = paddle._C_ops.cast(assign_value__9, paddle.float32) + del assign_value__9 + + # pd_op.shape64: (3xi64) <- (8x640x128xf32) + shape64_9 = paddle._C_ops.shape64(add_37) + + # pd_op.slice: (xi64) <- (3xi64, 1xi64, 1xi64) + slice_9 = paddle._C_ops.slice( + shape64_9, [0], full_int_array_2, full_int_array_3, [1], [0] + ) + del full_int_array_2, full_int_array_3, shape64_9 + + # builtin.combine: ([xi64, xi64, xi64]) <- (xi64, xi64, xi64) + combine_9 = [slice_9, full_1, full_1] + del full_1, slice_9 + + # pd_op.stack: (3xi64) <- ([xi64, xi64, xi64]) + stack_9 = paddle._C_ops.stack(combine_9, 0) + del combine_9 + + # pd_op.uniform: (-1x1x1xf32) <- (3xi64, 1xf32, 1xf32) + uniform_9 = paddle._C_ops.uniform( + stack_9, + paddle.float32, + full_2, + full_3, + 0, + paddle.framework._current_expected_place(), + ) + del full_2, full_3, stack_9 + + # pd_op.add: (-1x1x1xf32) <- (xf32, -1x1x1xf32) + add_38 = paddle._C_ops.add(cast_9, uniform_9) + del uniform_9 + + # pd_op.floor: (-1x1x1xf32) <- (-1x1x1xf32) + floor_9 = paddle._C_ops.floor(add_38) + del add_38 + + # pd_op.divide: (8x640x128xf32) <- (8x640x128xf32, xf32) + divide_9 = paddle._C_ops.divide(add_37, cast_9) + + # pd_op.multiply: (8x640x128xf32) <- (8x640x128xf32, -1x1x1xf32) + multiply_9 = paddle._C_ops.multiply(divide_9, floor_9) + + # pd_op.add: (8x640x128xf32) <- (8x640x128xf32, 8x640x128xf32) + add_39 = paddle._C_ops.add(layer_norm_30, multiply_9) + + # pd_op.layer_norm: (8x640x128xf32, 8x640xf32, 8x640xf32) <- (8x640x128xf32, 128xf32, 128xf32) + layer_norm_33, layer_norm_34, layer_norm_35 = (lambda x, f: f(x))( + paddle._C_ops.layer_norm( + add_39, parameter_5, parameter_4, float("1e-06"), 2 + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None, None), + ) + del parameter_4, parameter_5 + + # pd_op.transpose: (8x128x640xf32) <- (8x640x128xf32) + transpose_11 = paddle._C_ops.transpose(layer_norm_33, [0, 2, 1]) + del layer_norm_33 + + # pd_op.reshape: (8x128x8x80xf32) <- (8x128x640xf32, 4xi64) + reshape_12 = paddle._C_ops.reshape(transpose_11, full_int_array_1) + del full_int_array_1 + + # pd_op.conv2d: (8x256x4x80xf32) <- (8x128x8x80xf32, 256x128x3x3xf32) + conv2d_6 = paddle._C_ops.conv2d( + reshape_12, parameter_3, [2, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_3 + + # pd_op.reshape: (1x256x1x1xf32) <- (256xf32, 4xi64) + reshape_13 = paddle._C_ops.reshape(parameter_2, full_int_array_0) + del full_int_array_0, parameter_2 + + # pd_op.add: (8x256x4x80xf32) <- (8x256x4x80xf32, 1x256x1x1xf32) + add_40 = paddle._C_ops.add(conv2d_6, reshape_13) + + # pd_op.flatten: (8x256x320xf32) <- (8x256x4x80xf32) + flatten_6 = paddle._C_ops.flatten(add_40, 2, 3) + + # pd_op.transpose: (8x320x256xf32) <- (8x256x320xf32) + transpose_12 = paddle._C_ops.transpose(flatten_6, [0, 2, 1]) + del flatten_6 + + # pd_op.layer_norm: (8x320x256xf32, 8x320xf32, 8x320xf32) <- (8x320x256xf32, 256xf32, 256xf32) + layer_norm_36, layer_norm_37, layer_norm_38 = (lambda x, f: f(x))( + paddle._C_ops.layer_norm( + transpose_12, parameter_1, parameter_0, float("1e-05"), 2 + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None, None), + ) + del parameter_0, parameter_1 + + # pd_op.transpose: (8x256x320xf32) <- (8x320x256xf32) + transpose_13 = paddle._C_ops.transpose(layer_norm_36, [0, 2, 1]) + del layer_norm_36 + + # pd_op.full_int_array: (4xi64) <- () + full_int_array_4 = [0, 256, 4, 80] + + # pd_op.reshape: (8x256x4x80xf32) <- (8x256x320xf32, 4xi64) + reshape_0 = paddle._C_ops.reshape(transpose_13, full_int_array_4) + del ( + add_0, + add_1, + add_11, + add_12, + add_14, + add_15, + add_16, + add_18, + add_19, + add_2, + add_21, + add_22, + add_23, + add_25, + add_26, + add_28, + add_29, + add_3, + add_30, + add_32, + add_33, + add_35, + add_36, + add_37, + add_39, + add_4, + add_40, + add_5, + add_7, + add_8, + add_9, + cast_0, + cast_1, + cast_2, + cast_3, + cast_4, + cast_5, + cast_6, + cast_7, + cast_8, + cast_9, + conv2d_0, + conv2d_1, + conv2d_2, + conv2d_3, + conv2d_4, + conv2d_5, + conv2d_6, + divide_0, + divide_1, + divide_2, + divide_3, + divide_4, + divide_5, + divide_6, + divide_7, + divide_8, + divide_9, + floor_0, + floor_1, + floor_2, + floor_3, + floor_4, + floor_5, + floor_6, + floor_7, + floor_8, + floor_9, + full_int_array_4, + gelu_0, + gelu_1, + gelu_2, + gelu_3, + gelu_4, + gelu_5, + layer_norm_0, + layer_norm_1, + layer_norm_10, + layer_norm_11, + layer_norm_12, + layer_norm_13, + layer_norm_14, + layer_norm_16, + layer_norm_17, + layer_norm_18, + layer_norm_19, + layer_norm_2, + layer_norm_20, + layer_norm_22, + layer_norm_23, + layer_norm_24, + layer_norm_25, + layer_norm_26, + layer_norm_28, + layer_norm_29, + layer_norm_30, + layer_norm_31, + layer_norm_32, + layer_norm_34, + layer_norm_35, + layer_norm_37, + layer_norm_38, + layer_norm_4, + layer_norm_5, + layer_norm_6, + layer_norm_7, + layer_norm_8, + matmul_0, + matmul_1, + matmul_10, + matmul_11, + matmul_2, + matmul_3, + matmul_4, + matmul_5, + matmul_6, + matmul_7, + matmul_8, + matmul_9, + multiply_0, + multiply_1, + multiply_2, + multiply_3, + multiply_4, + multiply_5, + multiply_6, + multiply_7, + multiply_8, + multiply_9, + reshape_1, + reshape_10, + reshape_11, + reshape_12, + reshape_13, + reshape_2, + reshape_3, + reshape_4, + reshape_5, + reshape_6, + reshape_7, + reshape_8, + reshape_9, + transpose_0, + transpose_1, + transpose_10, + transpose_11, + transpose_12, + transpose_13, + transpose_2, + transpose_3, + transpose_4, + transpose_5, + transpose_6, + transpose_7, + transpose_8, + transpose_9, + ) + + return reshape_0 diff --git a/paddle_samples/PaddleX/ch_SVTRv2_rec/subgraph_12/weight_meta.py b/paddle_samples/PaddleX/ch_SVTRv2_rec/subgraph_12/weight_meta.py new file mode 100644 index 000000000..13ef60850 --- /dev/null +++ b/paddle_samples/PaddleX/ch_SVTRv2_rec/subgraph_12/weight_meta.py @@ -0,0 +1,702 @@ +class Program_weight_tensor_parameter_0: + name = "parameter_0" + shape = [256] + dtype = "float32" + min_val = float("-0.668521") + max_val = float("1.94369") + mean = float("-0.0632413") + std = float("0.230014") + data = None + + +class Program_weight_tensor_parameter_1: + name = "parameter_1" + shape = [256] + dtype = "float32" + min_val = float("0.187715") + max_val = float("2.04484") + mean = float("1.29986") + std = float("0.242333") + data = None + + +class Program_weight_tensor_parameter_2: + name = "parameter_2" + shape = [256] + dtype = "float32" + min_val = float("-8.25526") + max_val = float("0.841186") + mean = float("-0.0148969") + std = float("0.558246") + data = None + + +class Program_weight_tensor_parameter_3: + name = "parameter_3" + shape = [256, 128, 3, 3] + dtype = "float32" + min_val = float("-6.30351") + max_val = float("7.58677") + mean = float("-0.00014357") + std = float("0.411001") + data = None + + +class Program_weight_tensor_parameter_4: + name = "parameter_4" + shape = [128] + dtype = "float32" + min_val = float("-0.716209") + max_val = float("0.756504") + mean = float("-0.0934846") + std = float("0.22947") + data = None + + +class Program_weight_tensor_parameter_5: + name = "parameter_5" + shape = [128] + dtype = "float32" + min_val = float("0.40875") + max_val = float("1.66399") + mean = float("1.24008") + std = float("0.174513") + data = None + + +class Program_weight_tensor_parameter_6: + name = "parameter_6" + shape = [128] + dtype = "float32" + min_val = float("-4.08424") + max_val = float("0.55395") + mean = float("-0.00506745") + std = float("0.404986") + data = None + + +class Program_weight_tensor_parameter_7: + name = "parameter_7" + shape = [512, 128] + dtype = "float32" + min_val = float("-0.471752") + max_val = float("0.41674") + mean = float("7.42555e-06") + std = float("0.0632707") + data = None + + +class Program_weight_tensor_parameter_8: + name = "parameter_8" + shape = [512] + dtype = "float32" + min_val = float("-1.89779") + max_val = float("-0.000424536") + mean = float("-0.660915") + std = float("0.276851") + data = None + + +class Program_weight_tensor_parameter_9: + name = "parameter_9" + shape = [128, 512] + dtype = "float32" + min_val = float("-0.322236") + max_val = float("0.346725") + mean = float("-0.000322571") + std = float("0.0642253") + data = None + + +class Program_weight_tensor_parameter_10: + name = "parameter_10" + shape = [128] + dtype = "float32" + min_val = float("-0.681698") + max_val = float("0.411014") + mean = float("-0.0107907") + std = float("0.165352") + data = None + + +class Program_weight_tensor_parameter_11: + name = "parameter_11" + shape = [128] + dtype = "float32" + min_val = float("0.572885") + max_val = float("2.47782") + mean = float("0.914634") + std = float("0.194054") + data = None + + +class Program_weight_tensor_parameter_12: + name = "parameter_12" + shape = [128] + dtype = "float32" + min_val = float("-0.7091") + max_val = float("1.03592") + mean = float("0.00280991") + std = float("0.290276") + data = None + + +class Program_weight_tensor_parameter_13: + name = "parameter_13" + shape = [128, 32, 5, 5] + dtype = "float32" + min_val = float("-0.399123") + max_val = float("0.347374") + mean = float("0.00131884") + std = float("0.0437688") + data = None + + +class Program_weight_tensor_parameter_14: + name = "parameter_14" + shape = [128] + dtype = "float32" + min_val = float("-0.594308") + max_val = float("0.729356") + mean = float("-0.0827838") + std = float("0.190602") + data = None + + +class Program_weight_tensor_parameter_15: + name = "parameter_15" + shape = [128] + dtype = "float32" + min_val = float("0.274019") + max_val = float("1.91845") + mean = float("1.28924") + std = float("0.253914") + data = None + + +class Program_weight_tensor_parameter_16: + name = "parameter_16" + shape = [128] + dtype = "float32" + min_val = float("-5.1138") + max_val = float("0.431996") + mean = float("-0.00639552") + std = float("0.478386") + data = None + + +class Program_weight_tensor_parameter_17: + name = "parameter_17" + shape = [512, 128] + dtype = "float32" + min_val = float("-0.818937") + max_val = float("0.75812") + mean = float("4.95905e-05") + std = float("0.069647") + data = None + + +class Program_weight_tensor_parameter_18: + name = "parameter_18" + shape = [512] + dtype = "float32" + min_val = float("-1.37765") + max_val = float("-0.0641338") + mean = float("-0.726835") + std = float("0.184613") + data = None + + +class Program_weight_tensor_parameter_19: + name = "parameter_19" + shape = [128, 512] + dtype = "float32" + min_val = float("-0.405105") + max_val = float("0.322186") + mean = float("-0.00107139") + std = float("0.0676507") + data = None + + +class Program_weight_tensor_parameter_20: + name = "parameter_20" + shape = [128] + dtype = "float32" + min_val = float("-0.815685") + max_val = float("0.703344") + mean = float("0.0189572") + std = float("0.202864") + data = None + + +class Program_weight_tensor_parameter_21: + name = "parameter_21" + shape = [128] + dtype = "float32" + min_val = float("0.685492") + max_val = float("2.07758") + mean = float("0.965816") + std = float("0.155292") + data = None + + +class Program_weight_tensor_parameter_22: + name = "parameter_22" + shape = [128] + dtype = "float32" + min_val = float("-0.885409") + max_val = float("1.11823") + mean = float("-0.00471085") + std = float("0.309713") + data = None + + +class Program_weight_tensor_parameter_23: + name = "parameter_23" + shape = [128, 32, 5, 5] + dtype = "float32" + min_val = float("-0.317961") + max_val = float("0.311825") + mean = float("0.00121326") + std = float("0.0447484") + data = None + + +class Program_weight_tensor_parameter_24: + name = "parameter_24" + shape = [128] + dtype = "float32" + min_val = float("-0.55219") + max_val = float("0.487629") + mean = float("0.0477595") + std = float("0.164838") + data = None + + +class Program_weight_tensor_parameter_25: + name = "parameter_25" + shape = [128] + dtype = "float32" + min_val = float("0.0629769") + max_val = float("1.88076") + mean = float("1.29016") + std = float("0.229317") + data = None + + +class Program_weight_tensor_parameter_26: + name = "parameter_26" + shape = [128] + dtype = "float32" + min_val = float("-0.830581") + max_val = float("10.3139") + mean = float("0.0615956") + std = float("0.927675") + data = None + + +class Program_weight_tensor_parameter_27: + name = "parameter_27" + shape = [512, 128] + dtype = "float32" + min_val = float("-0.988648") + max_val = float("0.942632") + mean = float("0.000107006") + std = float("0.0724812") + data = None + + +class Program_weight_tensor_parameter_28: + name = "parameter_28" + shape = [512] + dtype = "float32" + min_val = float("-1.56353") + max_val = float("-0.150537") + mean = float("-0.720642") + std = float("0.207041") + data = None + + +class Program_weight_tensor_parameter_29: + name = "parameter_29" + shape = [128, 512] + dtype = "float32" + min_val = float("-0.414822") + max_val = float("0.380912") + mean = float("-0.00149326") + std = float("0.0678869") + data = None + + +class Program_weight_tensor_parameter_30: + name = "parameter_30" + shape = [128] + dtype = "float32" + min_val = float("-0.706417") + max_val = float("0.874461") + mean = float("0.0387127") + std = float("0.225457") + data = None + + +class Program_weight_tensor_parameter_31: + name = "parameter_31" + shape = [128] + dtype = "float32" + min_val = float("0.782251") + max_val = float("1.93768") + mean = float("1.00745") + std = float("0.134693") + data = None + + +class Program_weight_tensor_parameter_32: + name = "parameter_32" + shape = [128] + dtype = "float32" + min_val = float("-0.829148") + max_val = float("1.01714") + mean = float("-0.0103483") + std = float("0.291259") + data = None + + +class Program_weight_tensor_parameter_33: + name = "parameter_33" + shape = [128, 32, 5, 5] + dtype = "float32" + min_val = float("-0.276089") + max_val = float("0.369555") + mean = float("0.000770056") + std = float("0.043952") + data = None + + +class Program_weight_tensor_parameter_34: + name = "parameter_34" + shape = [128] + dtype = "float32" + min_val = float("-1.43839") + max_val = float("0.954128") + mean = float("-0.041584") + std = float("0.255434") + data = None + + +class Program_weight_tensor_parameter_35: + name = "parameter_35" + shape = [128] + dtype = "float32" + min_val = float("0.2699") + max_val = float("2.00705") + mean = float("1.31203") + std = float("0.264854") + data = None + + +class Program_weight_tensor_parameter_36: + name = "parameter_36" + shape = [128] + dtype = "float32" + min_val = float("-4.80893") + max_val = float("4.33112") + mean = float("0.0111943") + std = float("0.607984") + data = None + + +class Program_weight_tensor_parameter_37: + name = "parameter_37" + shape = [512, 128] + dtype = "float32" + min_val = float("-0.66374") + max_val = float("0.872887") + mean = float("8.08444e-05") + std = float("0.0719889") + data = None + + +class Program_weight_tensor_parameter_38: + name = "parameter_38" + shape = [512] + dtype = "float32" + min_val = float("-1.38975") + max_val = float("0.0599212") + mean = float("-0.715143") + std = float("0.202181") + data = None + + +class Program_weight_tensor_parameter_39: + name = "parameter_39" + shape = [128, 512] + dtype = "float32" + min_val = float("-0.337775") + max_val = float("0.355338") + mean = float("0.00103362") + std = float("0.0675632") + data = None + + +class Program_weight_tensor_parameter_40: + name = "parameter_40" + shape = [128] + dtype = "float32" + min_val = float("-1.21058") + max_val = float("0.779394") + mean = float("-0.0441957") + std = float("0.253557") + data = None + + +class Program_weight_tensor_parameter_41: + name = "parameter_41" + shape = [128] + dtype = "float32" + min_val = float("0.698989") + max_val = float("1.37895") + mean = float("1.03219") + std = float("0.134822") + data = None + + +class Program_weight_tensor_parameter_42: + name = "parameter_42" + shape = [128] + dtype = "float32" + min_val = float("-1.06") + max_val = float("1.09196") + mean = float("0.0111421") + std = float("0.326226") + data = None + + +class Program_weight_tensor_parameter_43: + name = "parameter_43" + shape = [128, 32, 5, 5] + dtype = "float32" + min_val = float("-0.351555") + max_val = float("0.424499") + mean = float("0.00116859") + std = float("0.0431898") + data = None + + +class Program_weight_tensor_parameter_44: + name = "parameter_44" + shape = [128] + dtype = "float32" + min_val = float("-0.978141") + max_val = float("0.885814") + mean = float("-0.0673286") + std = float("0.257847") + data = None + + +class Program_weight_tensor_parameter_45: + name = "parameter_45" + shape = [128] + dtype = "float32" + min_val = float("0.301306") + max_val = float("1.9109") + mean = float("1.29689") + std = float("0.256564") + data = None + + +class Program_weight_tensor_parameter_46: + name = "parameter_46" + shape = [128] + dtype = "float32" + min_val = float("-4.93517") + max_val = float("1.00089") + mean = float("-0.000170423") + std = float("0.486882") + data = None + + +class Program_weight_tensor_parameter_47: + name = "parameter_47" + shape = [512, 128] + dtype = "float32" + min_val = float("-0.634287") + max_val = float("0.452401") + mean = float("0.000115191") + std = float("0.0689642") + data = None + + +class Program_weight_tensor_parameter_48: + name = "parameter_48" + shape = [512] + dtype = "float32" + min_val = float("-1.76981") + max_val = float("-0.239422") + mean = float("-0.739457") + std = float("0.207037") + data = None + + +class Program_weight_tensor_parameter_49: + name = "parameter_49" + shape = [128, 512] + dtype = "float32" + min_val = float("-0.369617") + max_val = float("0.377451") + mean = float("-0.000336863") + std = float("0.0663584") + data = None + + +class Program_weight_tensor_parameter_50: + name = "parameter_50" + shape = [128] + dtype = "float32" + min_val = float("-0.941485") + max_val = float("0.540083") + mean = float("-0.00308673") + std = float("0.214223") + data = None + + +class Program_weight_tensor_parameter_51: + name = "parameter_51" + shape = [128] + dtype = "float32" + min_val = float("0.631475") + max_val = float("1.69504") + mean = float("1.07861") + std = float("0.153476") + data = None + + +class Program_weight_tensor_parameter_52: + name = "parameter_52" + shape = [128] + dtype = "float32" + min_val = float("-1.6476") + max_val = float("0.972547") + mean = float("-0.00739113") + std = float("0.381321") + data = None + + +class Program_weight_tensor_parameter_53: + name = "parameter_53" + shape = [128, 32, 5, 5] + dtype = "float32" + min_val = float("-0.423287") + max_val = float("0.51164") + mean = float("0.000883394") + std = float("0.0425554") + data = None + + +class Program_weight_tensor_parameter_54: + name = "parameter_54" + shape = [128] + dtype = "float32" + min_val = float("-1.06966") + max_val = float("0.668333") + mean = float("0.0116222") + std = float("0.317468") + data = None + + +class Program_weight_tensor_parameter_55: + name = "parameter_55" + shape = [128] + dtype = "float32" + min_val = float("0.476229") + max_val = float("1.97542") + mean = float("1.18636") + std = float("0.265356") + data = None + + +class Program_weight_tensor_parameter_56: + name = "parameter_56" + shape = [128] + dtype = "float32" + min_val = float("-0.885903") + max_val = float("2.54609") + mean = float("-0.00492146") + std = float("0.347268") + data = None + + +class Program_weight_tensor_parameter_57: + name = "parameter_57" + shape = [512, 128] + dtype = "float32" + min_val = float("-0.397077") + max_val = float("0.601582") + mean = float("0.00040258") + std = float("0.0666728") + data = None + + +class Program_weight_tensor_parameter_58: + name = "parameter_58" + shape = [512] + dtype = "float32" + min_val = float("-1.34719") + max_val = float("0.109614") + mean = float("-0.665017") + std = float("0.258953") + data = None + + +class Program_weight_tensor_parameter_59: + name = "parameter_59" + shape = [128, 512] + dtype = "float32" + min_val = float("-0.549137") + max_val = float("0.626329") + mean = float("-0.00198159") + std = float("0.0662168") + data = None + + +class Program_weight_tensor_parameter_60: + name = "parameter_60" + shape = [128] + dtype = "float32" + min_val = float("-0.778871") + max_val = float("1.00296") + mean = float("0.0775527") + std = float("0.31264") + data = None + + +class Program_weight_tensor_parameter_61: + name = "parameter_61" + shape = [128] + dtype = "float32" + min_val = float("0.533039") + max_val = float("1.87165") + mean = float("1.12727") + std = float("0.232322") + data = None + + +class Program_weight_tensor_parameter_62: + name = "parameter_62" + shape = [128] + dtype = "float32" + min_val = float("-0.822867") + max_val = float("2.60621") + mean = float("-0.00422463") + std = float("0.389965") + data = None + + +class Program_weight_tensor_parameter_63: + name = "parameter_63" + shape = [128, 32, 5, 5] + dtype = "float32" + min_val = float("-0.486222") + max_val = float("0.596873") + mean = float("0.0015935") + std = float("0.0440849") + data = None diff --git a/paddle_samples/PaddleX/ch_SVTRv2_rec/subgraph_13/graph_hash.txt b/paddle_samples/PaddleX/ch_SVTRv2_rec/subgraph_13/graph_hash.txt new file mode 100644 index 000000000..76058b79d --- /dev/null +++ b/paddle_samples/PaddleX/ch_SVTRv2_rec/subgraph_13/graph_hash.txt @@ -0,0 +1 @@ +71b45662c8e88195365ea2e8f6a9033fcd2512b09d4c3f88330952d2f4e1ad98 \ No newline at end of file diff --git a/paddle_samples/PaddleX/ch_SVTRv2_rec/subgraph_13/graph_net.json b/paddle_samples/PaddleX/ch_SVTRv2_rec/subgraph_13/graph_net.json new file mode 100644 index 000000000..4e1cc3c0d --- /dev/null +++ b/paddle_samples/PaddleX/ch_SVTRv2_rec/subgraph_13/graph_net.json @@ -0,0 +1,6 @@ +{ + "framework": "paddle", + "model_name": "ch_SVTRv2_rec", + "num_devices_required": 1, + "num_nodes_required": 1 +} \ No newline at end of file diff --git a/paddle_samples/PaddleX/ch_SVTRv2_rec/subgraph_13/input_meta.py b/paddle_samples/PaddleX/ch_SVTRv2_rec/subgraph_13/input_meta.py new file mode 100644 index 000000000..24f33cd65 --- /dev/null +++ b/paddle_samples/PaddleX/ch_SVTRv2_rec/subgraph_13/input_meta.py @@ -0,0 +1,9 @@ +class Program_weight_tensor_data_0: + name = "data_0" + shape = [4, 384, 1, 40] + dtype = "float32" + min_val = float("-6.75135") + max_val = float("3.74564") + mean = float("0.00225273") + std = float("0.411442") + data = None diff --git a/paddle_samples/PaddleX/ch_SVTRv2_rec/subgraph_13/model.py b/paddle_samples/PaddleX/ch_SVTRv2_rec/subgraph_13/model.py new file mode 100644 index 000000000..d374899d1 --- /dev/null +++ b/paddle_samples/PaddleX/ch_SVTRv2_rec/subgraph_13/model.py @@ -0,0 +1,804 @@ +import paddle + + +class GraphModule(paddle.nn.Layer): + def __init__(self): + super().__init__() + + def forward( + self, + parameter_0, + parameter_1, + parameter_2, + parameter_3, + parameter_4, + parameter_5, + parameter_6, + parameter_7, + parameter_8, + parameter_9, + parameter_10, + parameter_11, + parameter_12, + parameter_13, + parameter_14, + parameter_15, + parameter_16, + parameter_17, + parameter_18, + parameter_19, + parameter_20, + parameter_21, + parameter_22, + parameter_23, + parameter_24, + parameter_25, + parameter_26, + parameter_27, + parameter_28, + parameter_29, + parameter_30, + parameter_31, + parameter_32, + parameter_33, + parameter_34, + parameter_35, + parameter_36, + parameter_37, + parameter_38, + parameter_39, + parameter_40, + parameter_41, + parameter_42, + parameter_43, + parameter_44, + parameter_45, + parameter_46, + parameter_47, + parameter_48, + parameter_49, + parameter_50, + parameter_51, + parameter_52, + data_0, + ): + # pd_op.assign: (4x384x1x40xf32) <- (4x384x1x40xf32) + assign_0 = data_0 + del data_0 + + # pd_op.conv2d: (4x48x1x40xf32) <- (4x384x1x40xf32, 48x384x1x3xf32) + conv2d_0 = paddle._C_ops.conv2d( + assign_0, parameter_52, [1, 1], [0, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_52 + + # pd_op.batch_norm_: (4x48x1x40xf32, 48xf32, 48xf32, 48xf32, 48xf32, -1xui8) <- (4x48x1x40xf32, 48xf32, 48xf32, 48xf32, 48xf32) + ( + batch_norm__0, + batch_norm__1, + batch_norm__2, + batch_norm__3, + batch_norm__4, + batch_norm__5, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_0, + parameter_51, + parameter_50, + parameter_49, + parameter_48, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_48, parameter_49, parameter_50, parameter_51 + + # pd_op.swish: (4x48x1x40xf32) <- (4x48x1x40xf32) + swish_0 = paddle._C_ops.swish(batch_norm__0) + + # pd_op.conv2d: (4x256x1x40xf32) <- (4x48x1x40xf32, 256x48x1x1xf32) + conv2d_1 = paddle._C_ops.conv2d( + swish_0, parameter_47, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_47 + + # pd_op.batch_norm_: (4x256x1x40xf32, 256xf32, 256xf32, 256xf32, 256xf32, -1xui8) <- (4x256x1x40xf32, 256xf32, 256xf32, 256xf32, 256xf32) + ( + batch_norm__6, + batch_norm__7, + batch_norm__8, + batch_norm__9, + batch_norm__10, + batch_norm__11, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_1, + parameter_46, + parameter_45, + parameter_44, + parameter_43, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_43, parameter_44, parameter_45, parameter_46 + + # pd_op.swish: (4x256x1x40xf32) <- (4x256x1x40xf32) + swish_1 = paddle._C_ops.swish(batch_norm__6) + + # pd_op.flatten: (4x256x40xf32) <- (4x256x1x40xf32) + flatten_0 = paddle._C_ops.flatten(swish_1, 2, 3) + + # pd_op.transpose: (4x40x256xf32) <- (4x256x40xf32) + transpose_0 = paddle._C_ops.transpose(flatten_0, [0, 2, 1]) + del flatten_0 + + # pd_op.layer_norm: (4x40x256xf32, 4x40xf32, 4x40xf32) <- (4x40x256xf32, 256xf32, 256xf32) + layer_norm_0, layer_norm_1, layer_norm_2 = (lambda x, f: f(x))( + paddle._C_ops.layer_norm( + transpose_0, parameter_42, parameter_41, float("1e-05"), 2 + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None, None), + ) + del parameter_41, parameter_42 + + # pd_op.matmul: (4x40x768xf32) <- (4x40x256xf32, 256x768xf32) + matmul_0 = paddle._C_ops.matmul(layer_norm_0, parameter_40, False, False) + del parameter_40 + + # pd_op.add: (4x40x768xf32) <- (4x40x768xf32, 768xf32) + add_1 = paddle._C_ops.add(matmul_0, parameter_39) + del parameter_39 + + # pd_op.full_int_array: (5xi64) <- () + full_int_array_0 = [0, -1, 3, 8, 32] + + # pd_op.reshape: (4x40x3x8x32xf32) <- (4x40x768xf32, 5xi64) + reshape_0 = paddle._C_ops.reshape(add_1, full_int_array_0) + + # pd_op.transpose: (3x4x8x40x32xf32) <- (4x40x3x8x32xf32) + transpose_1 = paddle._C_ops.transpose(reshape_0, [2, 0, 3, 1, 4]) + del reshape_0 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_1 = [0] + + # pd_op.assign: (1xi64) <- (1xi64) + assign_1 = full_int_array_1 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_2 = [1] + + # pd_op.assign: (1xi64) <- (1xi64) + assign_2 = full_int_array_2 + + # pd_op.assign: (1xi64) <- (1xi64) + assign_3 = full_int_array_2 + + # pd_op.assign: (1xi64) <- (1xi64) + assign_4 = full_int_array_2 + + # pd_op.slice: (4x8x40x32xf32) <- (3x4x8x40x32xf32, 1xi64, 1xi64) + slice_0 = paddle._C_ops.slice( + transpose_1, [0], full_int_array_1, full_int_array_2, [1], [0] + ) + + # pd_op.full: (1xf32) <- () + full_0 = paddle._C_ops.full( + [1], float("0.176777"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.assign: (1xf32) <- (1xf32) + assign_5 = full_0 + + # pd_op.scale: (4x8x40x32xf32) <- (4x8x40x32xf32, 1xf32) + scale_0 = paddle._C_ops.scale(slice_0, full_0, float("0"), True) + del slice_0 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_3 = [2] + + # pd_op.assign: (1xi64) <- (1xi64) + assign_6 = full_int_array_3 + + # pd_op.assign: (1xi64) <- (1xi64) + assign_7 = full_int_array_3 + + # pd_op.assign: (1xi64) <- (1xi64) + assign_8 = full_int_array_3 + + # pd_op.assign: (1xi64) <- (1xi64) + assign_9 = full_int_array_3 + + # pd_op.slice: (4x8x40x32xf32) <- (3x4x8x40x32xf32, 1xi64, 1xi64) + slice_1 = paddle._C_ops.slice( + transpose_1, [0], full_int_array_2, full_int_array_3, [1], [0] + ) + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_4 = [3] + + # pd_op.assign: (1xi64) <- (1xi64) + assign_10 = full_int_array_4 + + # pd_op.slice: (4x8x40x32xf32) <- (3x4x8x40x32xf32, 1xi64, 1xi64) + slice_2 = paddle._C_ops.slice( + transpose_1, [0], full_int_array_3, full_int_array_4, [1], [0] + ) + + # pd_op.transpose: (4x8x32x40xf32) <- (4x8x40x32xf32) + transpose_2 = paddle._C_ops.transpose(slice_1, [0, 1, 3, 2]) + del slice_1 + + # pd_op.matmul: (4x8x40x40xf32) <- (4x8x40x32xf32, 4x8x32x40xf32) + matmul_1 = paddle._C_ops.matmul(scale_0, transpose_2, False, False) + + # pd_op.softmax: (4x8x40x40xf32) <- (4x8x40x40xf32) + softmax_0 = paddle._C_ops.softmax(matmul_1, -1) + del matmul_1 + + # pd_op.full: (1xf32) <- () + full_1 = paddle._C_ops.full( + [1], float("0.1"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.assign: (1xf32) <- (1xf32) + assign_11 = full_1 + + # pd_op.assign: (1xf32) <- (1xf32) + assign_12 = full_1 + + # pd_op.assign: (1xf32) <- (1xf32) + assign_13 = full_1 + + # pd_op.assign: (1xf32) <- (1xf32) + assign_14 = full_1 + + # pd_op.assign: (1xf32) <- (1xf32) + assign_15 = full_1 + + # pd_op.assign: (1xf32) <- (1xf32) + assign_16 = full_1 + + # pd_op.assign: (1xf32) <- (1xf32) + assign_17 = full_1 + + # pd_op.dropout: (4x8x40x40xf32, 4x8x40x40xui8) <- (4x8x40x40xf32, None, 1xf32) + dropout_0, dropout_1 = (lambda x, f: f(x))( + paddle._C_ops.dropout( + softmax_0, None, full_1, False, "upscale_in_train", 0, False + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None), + ) + + # pd_op.matmul: (4x8x40x32xf32) <- (4x8x40x40xf32, 4x8x40x32xf32) + matmul_2 = paddle._C_ops.matmul(dropout_0, slice_2, False, False) + + # pd_op.transpose: (4x40x8x32xf32) <- (4x8x40x32xf32) + transpose_3 = paddle._C_ops.transpose(matmul_2, [0, 2, 1, 3]) + del matmul_2 + + # pd_op.full_int_array: (3xi64) <- () + full_int_array_5 = [0, -1, 256] + + # pd_op.reshape: (4x40x256xf32) <- (4x40x8x32xf32, 3xi64) + reshape_1 = paddle._C_ops.reshape(transpose_3, full_int_array_5) + + # pd_op.matmul: (4x40x256xf32) <- (4x40x256xf32, 256x256xf32) + matmul_3 = paddle._C_ops.matmul(reshape_1, parameter_38, False, False) + del parameter_38 + + # pd_op.add: (4x40x256xf32) <- (4x40x256xf32, 256xf32) + add_2 = paddle._C_ops.add(matmul_3, parameter_37) + del parameter_37 + + # pd_op.dropout: (4x40x256xf32, 4x40x256xui8) <- (4x40x256xf32, None, 1xf32) + dropout_2, dropout_3 = (lambda x, f: f(x))( + paddle._C_ops.dropout( + add_2, None, full_1, False, "upscale_in_train", 0, False + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None), + ) + del add_2 + + # pd_op.add: (4x40x256xf32) <- (4x40x256xf32, 4x40x256xf32) + add_3 = paddle._C_ops.add(transpose_0, dropout_2) + + # pd_op.layer_norm: (4x40x256xf32, 4x40xf32, 4x40xf32) <- (4x40x256xf32, 256xf32, 256xf32) + layer_norm_3, layer_norm_4, layer_norm_5 = (lambda x, f: f(x))( + paddle._C_ops.layer_norm( + add_3, parameter_36, parameter_35, float("1e-05"), 2 + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None, None), + ) + del parameter_35, parameter_36 + + # pd_op.matmul: (4x40x512xf32) <- (4x40x256xf32, 256x512xf32) + matmul_4 = paddle._C_ops.matmul(layer_norm_3, parameter_34, False, False) + del parameter_34 + + # pd_op.add: (4x40x512xf32) <- (4x40x512xf32, 512xf32) + add_4 = paddle._C_ops.add(matmul_4, parameter_33) + del parameter_33 + + # pd_op.swish: (4x40x512xf32) <- (4x40x512xf32) + swish_2 = paddle._C_ops.swish(add_4) + + # pd_op.dropout: (4x40x512xf32, 4x40x512xui8) <- (4x40x512xf32, None, 1xf32) + dropout_4, dropout_5 = (lambda x, f: f(x))( + paddle._C_ops.dropout( + swish_2, None, full_1, False, "upscale_in_train", 0, False + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None), + ) + del swish_2 + + # pd_op.matmul: (4x40x256xf32) <- (4x40x512xf32, 512x256xf32) + matmul_5 = paddle._C_ops.matmul(dropout_4, parameter_32, False, False) + del parameter_32 + + # pd_op.add: (4x40x256xf32) <- (4x40x256xf32, 256xf32) + add_5 = paddle._C_ops.add(matmul_5, parameter_31) + del parameter_31 + + # pd_op.dropout: (4x40x256xf32, 4x40x256xui8) <- (4x40x256xf32, None, 1xf32) + dropout_6, dropout_7 = (lambda x, f: f(x))( + paddle._C_ops.dropout( + add_5, None, full_1, False, "upscale_in_train", 0, False + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None), + ) + del add_5 + + # pd_op.add: (4x40x256xf32) <- (4x40x256xf32, 4x40x256xf32) + add_6 = paddle._C_ops.add(add_3, dropout_6) + + # pd_op.layer_norm: (4x40x256xf32, 4x40xf32, 4x40xf32) <- (4x40x256xf32, 256xf32, 256xf32) + layer_norm_6, layer_norm_7, layer_norm_8 = (lambda x, f: f(x))( + paddle._C_ops.layer_norm( + add_6, parameter_30, parameter_29, float("1e-05"), 2 + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None, None), + ) + del parameter_29, parameter_30 + + # pd_op.matmul: (4x40x768xf32) <- (4x40x256xf32, 256x768xf32) + matmul_6 = paddle._C_ops.matmul(layer_norm_6, parameter_28, False, False) + del parameter_28 + + # pd_op.add: (4x40x768xf32) <- (4x40x768xf32, 768xf32) + add_7 = paddle._C_ops.add(matmul_6, parameter_27) + del parameter_27 + + # pd_op.reshape: (4x40x3x8x32xf32) <- (4x40x768xf32, 5xi64) + reshape_2 = paddle._C_ops.reshape(add_7, full_int_array_0) + del full_int_array_0 + + # pd_op.transpose: (3x4x8x40x32xf32) <- (4x40x3x8x32xf32) + transpose_4 = paddle._C_ops.transpose(reshape_2, [2, 0, 3, 1, 4]) + del reshape_2 + + # pd_op.slice: (4x8x40x32xf32) <- (3x4x8x40x32xf32, 1xi64, 1xi64) + slice_3 = paddle._C_ops.slice( + transpose_4, [0], full_int_array_1, full_int_array_2, [1], [0] + ) + + # pd_op.scale: (4x8x40x32xf32) <- (4x8x40x32xf32, 1xf32) + scale_1 = paddle._C_ops.scale(slice_3, full_0, float("0"), True) + del slice_3 + + # pd_op.slice: (4x8x40x32xf32) <- (3x4x8x40x32xf32, 1xi64, 1xi64) + slice_4 = paddle._C_ops.slice( + transpose_4, [0], full_int_array_2, full_int_array_3, [1], [0] + ) + + # pd_op.slice: (4x8x40x32xf32) <- (3x4x8x40x32xf32, 1xi64, 1xi64) + slice_5 = paddle._C_ops.slice( + transpose_4, [0], full_int_array_3, full_int_array_4, [1], [0] + ) + + # pd_op.transpose: (4x8x32x40xf32) <- (4x8x40x32xf32) + transpose_5 = paddle._C_ops.transpose(slice_4, [0, 1, 3, 2]) + del slice_4 + + # pd_op.matmul: (4x8x40x40xf32) <- (4x8x40x32xf32, 4x8x32x40xf32) + matmul_7 = paddle._C_ops.matmul(scale_1, transpose_5, False, False) + + # pd_op.softmax: (4x8x40x40xf32) <- (4x8x40x40xf32) + softmax_1 = paddle._C_ops.softmax(matmul_7, -1) + del matmul_7 + + # pd_op.dropout: (4x8x40x40xf32, 4x8x40x40xui8) <- (4x8x40x40xf32, None, 1xf32) + dropout_8, dropout_9 = (lambda x, f: f(x))( + paddle._C_ops.dropout( + softmax_1, None, full_1, False, "upscale_in_train", 0, False + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None), + ) + + # pd_op.matmul: (4x8x40x32xf32) <- (4x8x40x40xf32, 4x8x40x32xf32) + matmul_8 = paddle._C_ops.matmul(dropout_8, slice_5, False, False) + + # pd_op.transpose: (4x40x8x32xf32) <- (4x8x40x32xf32) + transpose_6 = paddle._C_ops.transpose(matmul_8, [0, 2, 1, 3]) + del matmul_8 + + # pd_op.reshape: (4x40x256xf32) <- (4x40x8x32xf32, 3xi64) + reshape_3 = paddle._C_ops.reshape(transpose_6, full_int_array_5) + del full_int_array_5 + + # pd_op.matmul: (4x40x256xf32) <- (4x40x256xf32, 256x256xf32) + matmul_9 = paddle._C_ops.matmul(reshape_3, parameter_26, False, False) + del parameter_26 + + # pd_op.add: (4x40x256xf32) <- (4x40x256xf32, 256xf32) + add_8 = paddle._C_ops.add(matmul_9, parameter_25) + del parameter_25 + + # pd_op.dropout: (4x40x256xf32, 4x40x256xui8) <- (4x40x256xf32, None, 1xf32) + dropout_10, dropout_11 = (lambda x, f: f(x))( + paddle._C_ops.dropout( + add_8, None, full_1, False, "upscale_in_train", 0, False + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None), + ) + del add_8 + + # pd_op.add: (4x40x256xf32) <- (4x40x256xf32, 4x40x256xf32) + add_9 = paddle._C_ops.add(add_6, dropout_10) + + # pd_op.layer_norm: (4x40x256xf32, 4x40xf32, 4x40xf32) <- (4x40x256xf32, 256xf32, 256xf32) + layer_norm_9, layer_norm_10, layer_norm_11 = (lambda x, f: f(x))( + paddle._C_ops.layer_norm( + add_9, parameter_24, parameter_23, float("1e-05"), 2 + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None, None), + ) + del parameter_23, parameter_24 + + # pd_op.matmul: (4x40x512xf32) <- (4x40x256xf32, 256x512xf32) + matmul_10 = paddle._C_ops.matmul(layer_norm_9, parameter_22, False, False) + del parameter_22 + + # pd_op.add: (4x40x512xf32) <- (4x40x512xf32, 512xf32) + add_10 = paddle._C_ops.add(matmul_10, parameter_21) + del parameter_21 + + # pd_op.swish: (4x40x512xf32) <- (4x40x512xf32) + swish_3 = paddle._C_ops.swish(add_10) + + # pd_op.dropout: (4x40x512xf32, 4x40x512xui8) <- (4x40x512xf32, None, 1xf32) + dropout_12, dropout_13 = (lambda x, f: f(x))( + paddle._C_ops.dropout( + swish_3, None, full_1, False, "upscale_in_train", 0, False + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None), + ) + del swish_3 + + # pd_op.matmul: (4x40x256xf32) <- (4x40x512xf32, 512x256xf32) + matmul_11 = paddle._C_ops.matmul(dropout_12, parameter_20, False, False) + del parameter_20 + + # pd_op.add: (4x40x256xf32) <- (4x40x256xf32, 256xf32) + add_11 = paddle._C_ops.add(matmul_11, parameter_19) + del parameter_19 + + # pd_op.dropout: (4x40x256xf32, 4x40x256xui8) <- (4x40x256xf32, None, 1xf32) + dropout_14, dropout_15 = (lambda x, f: f(x))( + paddle._C_ops.dropout( + add_11, None, full_1, False, "upscale_in_train", 0, False + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None), + ) + del add_11 + + # pd_op.add: (4x40x256xf32) <- (4x40x256xf32, 4x40x256xf32) + add_12 = paddle._C_ops.add(add_9, dropout_14) + + # pd_op.layer_norm: (4x40x256xf32, 4x40xf32, 4x40xf32) <- (4x40x256xf32, 256xf32, 256xf32) + layer_norm_12, layer_norm_13, layer_norm_14 = (lambda x, f: f(x))( + paddle._C_ops.layer_norm( + add_12, parameter_18, parameter_17, float("1e-06"), 2 + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None, None), + ) + del parameter_17, parameter_18 + + # pd_op.full_int_array: (4xi64) <- () + full_int_array_6 = [0, 1, 40, 256] + + # pd_op.reshape: (4x1x40x256xf32) <- (4x40x256xf32, 4xi64) + reshape_4 = paddle._C_ops.reshape(layer_norm_12, full_int_array_6) + del full_int_array_6 + + # pd_op.transpose: (4x256x1x40xf32) <- (4x1x40x256xf32) + transpose_7 = paddle._C_ops.transpose(reshape_4, [0, 3, 1, 2]) + del reshape_4 + + # pd_op.conv2d: (4x384x1x40xf32) <- (4x256x1x40xf32, 384x256x1x1xf32) + conv2d_2 = paddle._C_ops.conv2d( + transpose_7, parameter_16, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_16 + + # pd_op.batch_norm_: (4x384x1x40xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (4x384x1x40xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__12, + batch_norm__13, + batch_norm__14, + batch_norm__15, + batch_norm__16, + batch_norm__17, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_2, + parameter_15, + parameter_14, + parameter_13, + parameter_12, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_12, parameter_13, parameter_14, parameter_15 + + # pd_op.swish: (4x384x1x40xf32) <- (4x384x1x40xf32) + swish_4 = paddle._C_ops.swish(batch_norm__12) + + # pd_op.full: (1xi32) <- () + full_2 = paddle._C_ops.full( + [1], float("1"), paddle.int32, paddle.core.CPUPlace() + ) + + # builtin.combine: ([4x384x1x40xf32, 4x384x1x40xf32]) <- (4x384x1x40xf32, 4x384x1x40xf32) + combine_0 = [assign_0, swish_4] + + # pd_op.concat: (4x768x1x40xf32) <- ([4x384x1x40xf32, 4x384x1x40xf32], 1xi32) + concat_0 = paddle._C_ops.concat(combine_0, full_2) + del combine_0 + + # pd_op.conv2d: (4x48x1x40xf32) <- (4x768x1x40xf32, 48x768x1x3xf32) + conv2d_3 = paddle._C_ops.conv2d( + concat_0, parameter_11, [1, 1], [0, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_11 + + # pd_op.batch_norm_: (4x48x1x40xf32, 48xf32, 48xf32, 48xf32, 48xf32, -1xui8) <- (4x48x1x40xf32, 48xf32, 48xf32, 48xf32, 48xf32) + ( + batch_norm__18, + batch_norm__19, + batch_norm__20, + batch_norm__21, + batch_norm__22, + batch_norm__23, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_3, + parameter_10, + parameter_9, + parameter_8, + parameter_7, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_10, parameter_7, parameter_8, parameter_9 + + # pd_op.swish: (4x48x1x40xf32) <- (4x48x1x40xf32) + swish_5 = paddle._C_ops.swish(batch_norm__18) + + # pd_op.conv2d: (4x256x1x40xf32) <- (4x48x1x40xf32, 256x48x1x1xf32) + conv2d_4 = paddle._C_ops.conv2d( + swish_5, parameter_6, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_6 + + # pd_op.batch_norm_: (4x256x1x40xf32, 256xf32, 256xf32, 256xf32, 256xf32, -1xui8) <- (4x256x1x40xf32, 256xf32, 256xf32, 256xf32, 256xf32) + ( + batch_norm__24, + batch_norm__25, + batch_norm__26, + batch_norm__27, + batch_norm__28, + batch_norm__29, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_4, + parameter_5, + parameter_4, + parameter_3, + parameter_2, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_2, parameter_3, parameter_4, parameter_5 + + # pd_op.swish: (4x256x1x40xf32) <- (4x256x1x40xf32) + swish_6 = paddle._C_ops.swish(batch_norm__24) + + # pd_op.squeeze: (4x256x40xf32) <- (4x256x1x40xf32, 1xi64) + squeeze_0 = paddle._C_ops.squeeze(swish_6, full_int_array_3) + + # pd_op.transpose: (4x40x256xf32) <- (4x256x40xf32) + transpose_8 = paddle._C_ops.transpose(squeeze_0, [0, 2, 1]) + del squeeze_0 + + # pd_op.matmul: (4x40x6625xf32) <- (4x40x256xf32, 256x6625xf32) + matmul_12 = paddle._C_ops.matmul(transpose_8, parameter_1, False, False) + del parameter_1 + + # pd_op.add: (4x40x6625xf32) <- (4x40x6625xf32, 6625xf32) + add_0 = paddle._C_ops.add(matmul_12, parameter_0) + del ( + add_1, + add_10, + add_12, + add_3, + add_4, + add_6, + add_7, + add_9, + assign_0, + assign_1, + assign_10, + assign_11, + assign_12, + assign_13, + assign_14, + assign_15, + assign_16, + assign_17, + assign_2, + assign_3, + assign_4, + assign_5, + assign_6, + assign_7, + assign_8, + assign_9, + batch_norm__0, + batch_norm__1, + batch_norm__10, + batch_norm__11, + batch_norm__12, + batch_norm__13, + batch_norm__14, + batch_norm__15, + batch_norm__16, + batch_norm__17, + batch_norm__18, + batch_norm__19, + batch_norm__2, + batch_norm__20, + batch_norm__21, + batch_norm__22, + batch_norm__23, + batch_norm__24, + batch_norm__25, + batch_norm__26, + batch_norm__27, + batch_norm__28, + batch_norm__29, + batch_norm__3, + batch_norm__4, + batch_norm__5, + batch_norm__6, + batch_norm__7, + batch_norm__8, + batch_norm__9, + concat_0, + conv2d_0, + conv2d_1, + conv2d_2, + conv2d_3, + conv2d_4, + dropout_0, + dropout_1, + dropout_10, + dropout_11, + dropout_12, + dropout_13, + dropout_14, + dropout_15, + dropout_2, + dropout_3, + dropout_4, + dropout_5, + dropout_6, + dropout_7, + dropout_8, + dropout_9, + full_0, + full_1, + full_2, + full_int_array_1, + full_int_array_2, + full_int_array_3, + full_int_array_4, + layer_norm_0, + layer_norm_1, + layer_norm_10, + layer_norm_11, + layer_norm_12, + layer_norm_13, + layer_norm_14, + layer_norm_2, + layer_norm_3, + layer_norm_4, + layer_norm_5, + layer_norm_6, + layer_norm_7, + layer_norm_8, + layer_norm_9, + matmul_0, + matmul_10, + matmul_11, + matmul_12, + matmul_3, + matmul_4, + matmul_5, + matmul_6, + matmul_9, + parameter_0, + reshape_1, + reshape_3, + scale_0, + scale_1, + slice_2, + slice_5, + softmax_0, + softmax_1, + swish_0, + swish_1, + swish_4, + swish_5, + swish_6, + transpose_0, + transpose_1, + transpose_2, + transpose_3, + transpose_4, + transpose_5, + transpose_6, + transpose_7, + transpose_8, + ) + + return add_0 diff --git a/paddle_samples/PaddleX/ch_SVTRv2_rec/subgraph_13/weight_meta.py b/paddle_samples/PaddleX/ch_SVTRv2_rec/subgraph_13/weight_meta.py new file mode 100644 index 000000000..16a4561a2 --- /dev/null +++ b/paddle_samples/PaddleX/ch_SVTRv2_rec/subgraph_13/weight_meta.py @@ -0,0 +1,565 @@ +class Program_weight_tensor_parameter_0: + name = "parameter_0" + shape = [6625] + dtype = "float32" + min_val = float("-1.46606") + max_val = float("1.53074") + mean = float("-0.244704") + std = float("0.129444") + data = None + + +class Program_weight_tensor_parameter_1: + name = "parameter_1" + shape = [256, 6625] + dtype = "float32" + min_val = float("-0.842937") + max_val = float("0.387786") + mean = float("-0.230259") + std = float("0.0941191") + data = None + + +class Program_weight_tensor_parameter_2: + name = "parameter_2" + shape = [256] + dtype = "float32" + min_val = float("-3.44711") + max_val = float("9.32881") + mean = float("-0.0176235") + std = float("1.09428") + data = None + + +class Program_weight_tensor_parameter_3: + name = "parameter_3" + shape = [256] + dtype = "float32" + min_val = float("1.46858") + max_val = float("4.88491") + mean = float("2.71251") + std = float("0.50832") + data = None + + +class Program_weight_tensor_parameter_4: + name = "parameter_4" + shape = [256] + dtype = "float32" + min_val = float("0.0231144") + max_val = float("0.139857") + mean = float("0.0357936") + std = float("0.0143259") + data = None + + +class Program_weight_tensor_parameter_5: + name = "parameter_5" + shape = [256] + dtype = "float32" + min_val = float("-1.63971") + max_val = float("1.06628") + mean = float("0.232705") + std = float("0.393637") + data = None + + +class Program_weight_tensor_parameter_6: + name = "parameter_6" + shape = [256, 48, 1, 1] + dtype = "float32" + min_val = float("-0.346652") + max_val = float("0.199764") + mean = float("0.00135982") + std = float("0.0454835") + data = None + + +class Program_weight_tensor_parameter_7: + name = "parameter_7" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_8: + name = "parameter_8" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_9: + name = "parameter_9" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_10: + name = "parameter_10" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_11: + name = "parameter_11" + shape = [48, 768, 1, 3] + dtype = "float32" + min_val = float("-0.411824") + max_val = float("0.447639") + mean = float("0.000202113") + std = float("0.0461906") + data = None + + +class Program_weight_tensor_parameter_12: + name = "parameter_12" + shape = [384] + dtype = "float32" + min_val = float("-2.40135") + max_val = float("0.0933204") + mean = float("-1.07436") + std = float("0.346628") + data = None + + +class Program_weight_tensor_parameter_13: + name = "parameter_13" + shape = [384] + dtype = "float32" + min_val = float("0.392586") + max_val = float("2.96008") + mean = float("0.795887") + std = float("0.220098") + data = None + + +class Program_weight_tensor_parameter_14: + name = "parameter_14" + shape = [384] + dtype = "float32" + min_val = float("0.186257") + max_val = float("2.85396") + mean = float("0.419038") + std = float("0.201914") + data = None + + +class Program_weight_tensor_parameter_15: + name = "parameter_15" + shape = [384] + dtype = "float32" + min_val = float("-1.35379") + max_val = float("2.10763") + mean = float("0.168892") + std = float("0.559553") + data = None + + +class Program_weight_tensor_parameter_16: + name = "parameter_16" + shape = [384, 256, 1, 1] + dtype = "float32" + min_val = float("-0.334565") + max_val = float("0.246529") + mean = float("0.000726154") + std = float("0.0450263") + data = None + + +class Program_weight_tensor_parameter_17: + name = "parameter_17" + shape = [256] + dtype = "float32" + min_val = float("-1.32656") + max_val = float("1.39396") + mean = float("0.00907315") + std = float("0.384049") + data = None + + +class Program_weight_tensor_parameter_18: + name = "parameter_18" + shape = [256] + dtype = "float32" + min_val = float("0.24478") + max_val = float("2.11884") + mean = float("1.17906") + std = float("0.297452") + data = None + + +class Program_weight_tensor_parameter_19: + name = "parameter_19" + shape = [256] + dtype = "float32" + min_val = float("-2.38568") + max_val = float("2.51877") + mean = float("-0.0175149") + std = float("0.463899") + data = None + + +class Program_weight_tensor_parameter_20: + name = "parameter_20" + shape = [512, 256] + dtype = "float32" + min_val = float("-0.770555") + max_val = float("0.653487") + mean = float("0.00010081") + std = float("0.0694743") + data = None + + +class Program_weight_tensor_parameter_21: + name = "parameter_21" + shape = [512] + dtype = "float32" + min_val = float("-1.72071") + max_val = float("-0.0488819") + mean = float("-0.690943") + std = float("0.191943") + data = None + + +class Program_weight_tensor_parameter_22: + name = "parameter_22" + shape = [256, 512] + dtype = "float32" + min_val = float("-0.486813") + max_val = float("0.349136") + mean = float("-0.0109179") + std = float("0.0585589") + data = None + + +class Program_weight_tensor_parameter_23: + name = "parameter_23" + shape = [256] + dtype = "float32" + min_val = float("-0.830129") + max_val = float("3.54568") + mean = float("0.401418") + std = float("0.553377") + data = None + + +class Program_weight_tensor_parameter_24: + name = "parameter_24" + shape = [256] + dtype = "float32" + min_val = float("0.515229") + max_val = float("2.65418") + mean = float("1.58716") + std = float("0.339216") + data = None + + +class Program_weight_tensor_parameter_25: + name = "parameter_25" + shape = [256] + dtype = "float32" + min_val = float("-0.910073") + max_val = float("0.803297") + mean = float("0.013143") + std = float("0.180515") + data = None + + +class Program_weight_tensor_parameter_26: + name = "parameter_26" + shape = [256, 256] + dtype = "float32" + min_val = float("-0.336543") + max_val = float("0.384959") + mean = float("3.31584e-05") + std = float("0.0567728") + data = None + + +class Program_weight_tensor_parameter_27: + name = "parameter_27" + shape = [768] + dtype = "float32" + min_val = float("-2.12229") + max_val = float("2.02694") + mean = float("0.0144932") + std = float("0.382514") + data = None + + +class Program_weight_tensor_parameter_28: + name = "parameter_28" + shape = [256, 768] + dtype = "float32" + min_val = float("-0.52327") + max_val = float("0.465244") + mean = float("-3.15509e-05") + std = float("0.0524875") + data = None + + +class Program_weight_tensor_parameter_29: + name = "parameter_29" + shape = [256] + dtype = "float32" + min_val = float("-1.36519") + max_val = float("0.608402") + mean = float("0.0183697") + std = float("0.298894") + data = None + + +class Program_weight_tensor_parameter_30: + name = "parameter_30" + shape = [256] + dtype = "float32" + min_val = float("0.0883802") + max_val = float("1.58715") + mean = float("0.962013") + std = float("0.21906") + data = None + + +class Program_weight_tensor_parameter_31: + name = "parameter_31" + shape = [256] + dtype = "float32" + min_val = float("-1.50972") + max_val = float("0.606858") + mean = float("-0.0166074") + std = float("0.219025") + data = None + + +class Program_weight_tensor_parameter_32: + name = "parameter_32" + shape = [512, 256] + dtype = "float32" + min_val = float("-0.57403") + max_val = float("0.528108") + mean = float("-0.000393892") + std = float("0.0602463") + data = None + + +class Program_weight_tensor_parameter_33: + name = "parameter_33" + shape = [512] + dtype = "float32" + min_val = float("-1.37099") + max_val = float("0.0959241") + mean = float("-0.599216") + std = float("0.309803") + data = None + + +class Program_weight_tensor_parameter_34: + name = "parameter_34" + shape = [256, 512] + dtype = "float32" + min_val = float("-0.809903") + max_val = float("0.393094") + mean = float("-0.0150369") + std = float("0.063342") + data = None + + +class Program_weight_tensor_parameter_35: + name = "parameter_35" + shape = [256] + dtype = "float32" + min_val = float("-1.37513") + max_val = float("2.50846") + mean = float("0.558993") + std = float("0.614385") + data = None + + +class Program_weight_tensor_parameter_36: + name = "parameter_36" + shape = [256] + dtype = "float32" + min_val = float("-0.00444758") + max_val = float("2.37752") + mean = float("1.32518") + std = float("0.352321") + data = None + + +class Program_weight_tensor_parameter_37: + name = "parameter_37" + shape = [256] + dtype = "float32" + min_val = float("-0.889222") + max_val = float("0.34839") + mean = float("-0.000576614") + std = float("0.114881") + data = None + + +class Program_weight_tensor_parameter_38: + name = "parameter_38" + shape = [256, 256] + dtype = "float32" + min_val = float("-0.295902") + max_val = float("0.236114") + mean = float("-8.30219e-05") + std = float("0.0511669") + data = None + + +class Program_weight_tensor_parameter_39: + name = "parameter_39" + shape = [768] + dtype = "float32" + min_val = float("-1.65431") + max_val = float("1.70769") + mean = float("-0.00397458") + std = float("0.42148") + data = None + + +class Program_weight_tensor_parameter_40: + name = "parameter_40" + shape = [256, 768] + dtype = "float32" + min_val = float("-0.330457") + max_val = float("0.458014") + mean = float("6.30045e-05") + std = float("0.0509823") + data = None + + +class Program_weight_tensor_parameter_41: + name = "parameter_41" + shape = [256] + dtype = "float32" + min_val = float("-0.627416") + max_val = float("0.45842") + mean = float("0.0475874") + std = float("0.164939") + data = None + + +class Program_weight_tensor_parameter_42: + name = "parameter_42" + shape = [256] + dtype = "float32" + min_val = float("-0.239407") + max_val = float("1.06255") + mean = float("0.472467") + std = float("0.228607") + data = None + + +class Program_weight_tensor_parameter_43: + name = "parameter_43" + shape = [256] + dtype = "float32" + min_val = float("-1.8788") + max_val = float("2.96198") + mean = float("0.0198295") + std = float("0.837533") + data = None + + +class Program_weight_tensor_parameter_44: + name = "parameter_44" + shape = [256] + dtype = "float32" + min_val = float("0.545192") + max_val = float("5.24688") + mean = float("1.85214") + std = float("0.675565") + data = None + + +class Program_weight_tensor_parameter_45: + name = "parameter_45" + shape = [256] + dtype = "float32" + min_val = float("0.0291677") + max_val = float("0.954449") + mean = float("0.0942913") + std = float("0.120471") + data = None + + +class Program_weight_tensor_parameter_46: + name = "parameter_46" + shape = [256] + dtype = "float32" + min_val = float("-2.4665") + max_val = float("2.21725") + mean = float("-0.069404") + std = float("0.82517") + data = None + + +class Program_weight_tensor_parameter_47: + name = "parameter_47" + shape = [256, 48, 1, 1] + dtype = "float32" + min_val = float("-0.397741") + max_val = float("0.601989") + mean = float("-0.000414698") + std = float("0.0453336") + data = None + + +class Program_weight_tensor_parameter_48: + name = "parameter_48" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_49: + name = "parameter_49" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_50: + name = "parameter_50" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_51: + name = "parameter_51" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_52: + name = "parameter_52" + shape = [48, 384, 1, 3] + dtype = "float32" + min_val = float("-0.496082") + max_val = float("0.336987") + mean = float("0.000211926") + std = float("0.0403539") + data = None diff --git a/paddle_samples/PaddleX/ch_SVTRv2_rec/subgraph_14/graph_hash.txt b/paddle_samples/PaddleX/ch_SVTRv2_rec/subgraph_14/graph_hash.txt new file mode 100644 index 000000000..3a93b7d59 --- /dev/null +++ b/paddle_samples/PaddleX/ch_SVTRv2_rec/subgraph_14/graph_hash.txt @@ -0,0 +1 @@ +3cf9c726846dfc01838c2dc3b03680c0562c863765d2b18ad1152d57d8b44ce6 \ No newline at end of file diff --git a/paddle_samples/PaddleX/ch_SVTRv2_rec/subgraph_14/graph_net.json b/paddle_samples/PaddleX/ch_SVTRv2_rec/subgraph_14/graph_net.json new file mode 100644 index 000000000..4e1cc3c0d --- /dev/null +++ b/paddle_samples/PaddleX/ch_SVTRv2_rec/subgraph_14/graph_net.json @@ -0,0 +1,6 @@ +{ + "framework": "paddle", + "model_name": "ch_SVTRv2_rec", + "num_devices_required": 1, + "num_nodes_required": 1 +} \ No newline at end of file diff --git a/paddle_samples/PaddleX/ch_SVTRv2_rec/subgraph_14/input_meta.py b/paddle_samples/PaddleX/ch_SVTRv2_rec/subgraph_14/input_meta.py new file mode 100644 index 000000000..6726d5813 --- /dev/null +++ b/paddle_samples/PaddleX/ch_SVTRv2_rec/subgraph_14/input_meta.py @@ -0,0 +1,36 @@ +class Program_weight_tensor_data_0: + name = "data_0" + shape = [4, 384, 1, 40] + dtype = "float32" + min_val = float("-6.75135") + max_val = float("3.74564") + mean = float("0.00225273") + std = float("0.411442") + data = None + + +class Program_weight_tensor_data_1: + name = "data_1" + shape = [4, 25] + dtype = "int64" + min_val = 0 + max_val = 6627 + data = None + + +class Program_weight_tensor_data_2: + name = "data_2" + shape = [4] + dtype = "int64" + data = [3, 4, 3, 3] + + +class Program_weight_tensor_data_3: + name = "data_3" + shape = [5000, 1, 384] + dtype = "float32" + min_val = float("-1.0") + max_val = float("1.0") + mean = float("0.131787") + std = float("0.694717") + data = None diff --git a/paddle_samples/PaddleX/ch_SVTRv2_rec/subgraph_14/model.py b/paddle_samples/PaddleX/ch_SVTRv2_rec/subgraph_14/model.py new file mode 100644 index 000000000..b08d01ec8 --- /dev/null +++ b/paddle_samples/PaddleX/ch_SVTRv2_rec/subgraph_14/model.py @@ -0,0 +1,1089 @@ +import paddle + + +class GraphModule(paddle.nn.Layer): + def __init__(self): + super().__init__() + + def forward( + self, + parameter_0, + parameter_1, + parameter_2, + parameter_3, + parameter_4, + parameter_5, + parameter_6, + parameter_7, + parameter_8, + parameter_9, + parameter_10, + parameter_11, + parameter_12, + parameter_13, + parameter_14, + parameter_15, + parameter_16, + parameter_17, + parameter_18, + parameter_19, + parameter_20, + parameter_21, + parameter_22, + parameter_23, + parameter_24, + parameter_25, + parameter_26, + parameter_27, + parameter_28, + parameter_29, + parameter_30, + parameter_31, + parameter_32, + parameter_33, + parameter_34, + parameter_35, + parameter_36, + parameter_37, + parameter_38, + parameter_39, + parameter_40, + parameter_41, + parameter_42, + data_0, + data_1, + data_2, + data_3, + ): + # pd_op.flatten: (4x384x40xf32) <- (4x384x1x40xf32) + flatten_0 = paddle._C_ops.flatten(data_0, 2, 3) + del data_0 + + # pd_op.transpose: (4x40x384xf32) <- (4x384x40xf32) + transpose_0 = paddle._C_ops.transpose(flatten_0, [0, 2, 1]) + del flatten_0 + + # pd_op.matmul: (4x40x384xf32) <- (4x40x384xf32, 384x384xf32) + matmul_1 = paddle._C_ops.matmul(transpose_0, parameter_42, False, False) + del parameter_42 + + # pd_op.full_int_array: (0xi64) <- () + full_int_array_0 = [] + + # pd_op.max: (xi64) <- (4xi64, 0xi64) + max_0 = paddle._C_ops.max(data_2, full_int_array_0, False) + del data_2, full_int_array_0 + + # pd_op.full: (1xf32) <- () + full_0 = paddle._C_ops.full( + [1], float("1"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.scale: (xi64) <- (xi64, 1xf32) + scale_0 = paddle._C_ops.scale(max_0, full_0, float("2"), True) + del full_0, max_0 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_1 = [0] + + # pd_op.assign: (1xi64) <- (1xi64) + assign_0 = full_int_array_1 + + # pd_op.assign: (1xi64) <- (1xi64) + assign_1 = full_int_array_1 + + # pd_op.assign: (1xi64) <- (1xi64) + assign_2 = full_int_array_1 + + # pd_op.assign: (1xi64) <- (1xi64) + assign_3 = full_int_array_1 + + # builtin.combine: ([xi64]) <- (xi64) + combine_0 = [scale_0] + del scale_0 + + # pd_op.stack: (1xi64) <- ([xi64]) + stack_0 = paddle._C_ops.stack(combine_0, 0) + del combine_0 + + # pd_op.slice: (4x-1xi64) <- (4x25xi64, 1xi64, 1xi64) + slice_0 = paddle._C_ops.slice(data_1, [1], full_int_array_1, stack_0, [-1], []) + del data_1, stack_0 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_2 = [-1] + + # pd_op.slice: (4x-1xi64) <- (4x-1xi64, 1xi64, 1xi64) + slice_1 = paddle._C_ops.slice( + slice_0, [1], full_int_array_1, full_int_array_2, [1], [] + ) + del full_int_array_2, slice_0 + + # pd_op.embedding: (4x-1x384xf32) <- (4x-1xi64, 6629x384xf32) + embedding_0 = paddle._C_ops.embedding(slice_1, parameter_41, 0, False) + del parameter_41 + + # pd_op.full: (1xf32) <- () + full_1 = paddle._C_ops.full( + [1], float("19.5959"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.scale: (4x-1x384xf32) <- (4x-1x384xf32, 1xf32) + scale_1 = paddle._C_ops.scale(embedding_0, full_1, float("0"), True) + del embedding_0 + + # pd_op.transpose: (-1x4x384xf32) <- (4x-1x384xf32) + transpose_1 = paddle._C_ops.transpose(scale_1, [1, 0, 2]) + del scale_1 + + # pd_op.shape64: (3xi64) <- (-1x4x384xf32) + shape64_0 = paddle._C_ops.shape64(transpose_1) + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_3 = [1] + + # pd_op.assign: (1xi64) <- (1xi64) + assign_4 = full_int_array_3 + + # pd_op.assign: (1xi64) <- (1xi64) + assign_5 = full_int_array_3 + + # pd_op.assign: (1xi64) <- (1xi64) + assign_6 = full_int_array_3 + + # pd_op.assign: (1xi64) <- (1xi64) + assign_7 = full_int_array_3 + + # pd_op.assign: (1xi64) <- (1xi64) + assign_8 = full_int_array_3 + + # pd_op.assign: (1xi64) <- (1xi64) + assign_9 = full_int_array_3 + + # pd_op.assign: (1xi64) <- (1xi64) + assign_10 = full_int_array_3 + + # pd_op.assign: (1xi64) <- (1xi64) + assign_11 = full_int_array_3 + + # pd_op.slice: (xi64) <- (3xi64, 1xi64, 1xi64) + slice_2 = paddle._C_ops.slice( + shape64_0, [0], full_int_array_1, full_int_array_3, [1], [0] + ) + del shape64_0 + + # builtin.combine: ([xi64]) <- (xi64) + combine_1 = [slice_2] + del slice_2 + + # pd_op.stack: (1xi64) <- ([xi64]) + stack_1 = paddle._C_ops.stack(combine_1, 0) + del combine_1 + + # pd_op.slice: (-1x1x384xf32) <- (5000x1x384xf32, 1xi64, 1xi64) + slice_3 = paddle._C_ops.slice(data_3, [0], full_int_array_1, stack_1, [-1], []) + del data_3, stack_1 + + # pd_op.add: (-1x4x384xf32) <- (-1x4x384xf32, -1x1x384xf32) + add_0 = paddle._C_ops.add(transpose_1, slice_3) + + # pd_op.full: (1xf32) <- () + full_2 = paddle._C_ops.full( + [1], float("0.1"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.assign: (1xf32) <- (1xf32) + assign_12 = full_2 + + # pd_op.assign: (1xf32) <- (1xf32) + assign_13 = full_2 + + # pd_op.assign: (1xf32) <- (1xf32) + assign_14 = full_2 + + # pd_op.assign: (1xf32) <- (1xf32) + assign_15 = full_2 + + # pd_op.assign: (1xf32) <- (1xf32) + assign_16 = full_2 + + # pd_op.assign: (1xf32) <- (1xf32) + assign_17 = full_2 + + # pd_op.assign: (1xf32) <- (1xf32) + assign_18 = full_2 + + # pd_op.assign: (1xf32) <- (1xf32) + assign_19 = full_2 + + # pd_op.assign: (1xf32) <- (1xf32) + assign_20 = full_2 + + # pd_op.assign: (1xf32) <- (1xf32) + assign_21 = full_2 + + # pd_op.dropout: (-1x4x384xf32, -1x4x384xui8) <- (-1x4x384xf32, None, 1xf32) + dropout_0, dropout_1 = (lambda x, f: f(x))( + paddle._C_ops.dropout( + add_0, None, full_2, False, "upscale_in_train", 0, False + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None), + ) + del add_0 + + # pd_op.transpose: (4x-1x384xf32) <- (-1x4x384xf32) + transpose_2 = paddle._C_ops.transpose(dropout_0, [1, 0, 2]) + del dropout_0 + + # pd_op.shape64: (3xi64) <- (4x-1x384xf32) + shape64_1 = paddle._C_ops.shape64(transpose_2) + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_4 = [2] + + # pd_op.assign: (1xi64) <- (1xi64) + assign_22 = full_int_array_4 + + # pd_op.assign: (1xi64) <- (1xi64) + assign_23 = full_int_array_4 + + # pd_op.assign: (1xi64) <- (1xi64) + assign_24 = full_int_array_4 + + # pd_op.assign: (1xi64) <- (1xi64) + assign_25 = full_int_array_4 + + # pd_op.assign: (1xi64) <- (1xi64) + assign_26 = full_int_array_4 + + # pd_op.assign: (1xi64) <- (1xi64) + assign_27 = full_int_array_4 + + # pd_op.slice: (xi64) <- (3xi64, 1xi64, 1xi64) + slice_4 = paddle._C_ops.slice( + shape64_1, [0], full_int_array_3, full_int_array_4, [1], [0] + ) + del shape64_1 + + # builtin.combine: ([xi64, xi64]) <- (xi64, xi64) + combine_2 = [slice_4, slice_4] + + # pd_op.stack: (2xi64) <- ([xi64, xi64]) + stack_2 = paddle._C_ops.stack(combine_2, 0) + del combine_2 + + # pd_op.full: (1xf32) <- () + full_3 = paddle._C_ops.full( + [1], float("0"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.full_with_tensor: (-1x-1xf32) <- (1xf32, 2xi64) + full_with_tensor_0 = paddle._C_ops.full_with_tensor( + full_3, stack_2, paddle.float32 + ) + del full_3, stack_2 + + # builtin.combine: ([xi64, xi64]) <- (xi64, xi64) + combine_3 = [slice_4, slice_4] + + # pd_op.stack: (2xi64) <- ([xi64, xi64]) + stack_3 = paddle._C_ops.stack(combine_3, 0) + del combine_3 + + # pd_op.full: (1xf32) <- () + full_4 = paddle._C_ops.full( + [1], float("-inf"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.full_with_tensor: (-1x-1xf32) <- (1xf32, 2xi64) + full_with_tensor_1 = paddle._C_ops.full_with_tensor( + full_4, stack_3, paddle.float32 + ) + del full_4, stack_3 + + # pd_op.triu: (-1x-1xf32) <- (-1x-1xf32) + triu_0 = paddle._C_ops.triu(full_with_tensor_1, 1) + del full_with_tensor_1 + + # pd_op.add: (-1x-1xf32) <- (-1x-1xf32, -1x-1xf32) + add_1 = paddle._C_ops.add(full_with_tensor_0, triu_0) + del full_with_tensor_0, triu_0 + + # pd_op.full_int_array: (2xi64) <- () + full_int_array_5 = [0, 1] + + # pd_op.unsqueeze: (1x1x-1x-1xf32) <- (-1x-1xf32, 2xi64) + unsqueeze_0 = paddle._C_ops.unsqueeze(add_1, full_int_array_5) + del add_1, full_int_array_5 + + # pd_op.matmul: (4x-1x1152xf32) <- (4x-1x384xf32, 384x1152xf32) + matmul_2 = paddle._C_ops.matmul(transpose_2, parameter_40, False, False) + del parameter_40 + + # pd_op.add: (4x-1x1152xf32) <- (4x-1x1152xf32, 1152xf32) + add_2 = paddle._C_ops.add(matmul_2, parameter_39) + del parameter_39 + + # pd_op.full: (xi64) <- () + full_5 = paddle._C_ops.full( + [], float("0"), paddle.int64, paddle.core.CPUPlace() + ) + + # pd_op.full: (xi64) <- () + full_6 = paddle._C_ops.full( + [], float("3"), paddle.int64, paddle.core.CPUPlace() + ) + + # pd_op.full: (xi64) <- () + full_7 = paddle._C_ops.full( + [], float("12"), paddle.int64, paddle.core.CPUPlace() + ) + + # pd_op.full: (xi64) <- () + full_8 = paddle._C_ops.full( + [], float("32"), paddle.int64, paddle.core.CPUPlace() + ) + + # builtin.combine: ([xi64, xi64, xi64, xi64, xi64]) <- (xi64, xi64, xi64, xi64, xi64) + combine_4 = [full_5, slice_4, full_6, full_7, full_8] + + # pd_op.stack: (5xi64) <- ([xi64, xi64, xi64, xi64, xi64]) + stack_4 = paddle._C_ops.stack(combine_4, 0) + del combine_4 + + # pd_op.reshape: (4x-1x3x12x32xf32) <- (4x-1x1152xf32, 5xi64) + reshape_0 = paddle._C_ops.reshape(add_2, stack_4) + del stack_4 + + # pd_op.transpose: (3x4x12x-1x32xf32) <- (4x-1x3x12x32xf32) + transpose_3 = paddle._C_ops.transpose(reshape_0, [2, 0, 3, 1, 4]) + del reshape_0 + + # pd_op.slice: (4x12x-1x32xf32) <- (3x4x12x-1x32xf32, 1xi64, 1xi64) + slice_5 = paddle._C_ops.slice( + transpose_3, [0], full_int_array_1, full_int_array_3, [1], [0] + ) + + # pd_op.slice: (4x12x-1x32xf32) <- (3x4x12x-1x32xf32, 1xi64, 1xi64) + slice_6 = paddle._C_ops.slice( + transpose_3, [0], full_int_array_3, full_int_array_4, [1], [0] + ) + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_6 = [3] + + # pd_op.assign: (1xi64) <- (1xi64) + assign_28 = full_int_array_6 + + # pd_op.slice: (4x12x-1x32xf32) <- (3x4x12x-1x32xf32, 1xi64, 1xi64) + slice_7 = paddle._C_ops.slice( + transpose_3, [0], full_int_array_4, full_int_array_6, [1], [0] + ) + + # pd_op.transpose: (4x12x32x-1xf32) <- (4x12x-1x32xf32) + transpose_4 = paddle._C_ops.transpose(slice_6, [0, 1, 3, 2]) + del slice_6 + + # pd_op.matmul: (4x12x-1x-1xf32) <- (4x12x-1x32xf32, 4x12x32x-1xf32) + matmul_3 = paddle._C_ops.matmul(slice_5, transpose_4, False, False) + + # pd_op.full: (1xf32) <- () + full_9 = paddle._C_ops.full( + [1], float("0.176777"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.assign: (1xf32) <- (1xf32) + assign_29 = full_9 + + # pd_op.assign: (1xf32) <- (1xf32) + assign_30 = full_9 + + # pd_op.assign: (1xf32) <- (1xf32) + assign_31 = full_9 + + # pd_op.scale: (4x12x-1x-1xf32) <- (4x12x-1x-1xf32, 1xf32) + scale_2 = paddle._C_ops.scale(matmul_3, full_9, float("0"), True) + del matmul_3 + + # pd_op.add: (4x12x-1x-1xf32) <- (4x12x-1x-1xf32, 1x1x-1x-1xf32) + add_3 = paddle._C_ops.add(scale_2, unsqueeze_0) + + # pd_op.softmax: (4x12x-1x-1xf32) <- (4x12x-1x-1xf32) + softmax_0 = paddle._C_ops.softmax(add_3, -1) + del add_3 + + # pd_op.matmul: (4x12x-1x32xf32) <- (4x12x-1x-1xf32, 4x12x-1x32xf32) + matmul_4 = paddle._C_ops.matmul(softmax_0, slice_7, False, False) + + # pd_op.transpose: (4x-1x12x32xf32) <- (4x12x-1x32xf32) + transpose_5 = paddle._C_ops.transpose(matmul_4, [0, 2, 1, 3]) + del matmul_4 + + # pd_op.full: (xi64) <- () + full_10 = paddle._C_ops.full( + [], float("384"), paddle.int64, paddle.core.CPUPlace() + ) + + # builtin.combine: ([xi64, xi64, xi64]) <- (xi64, xi64, xi64) + combine_5 = [full_5, slice_4, full_10] + del slice_4 + + # pd_op.stack: (3xi64) <- ([xi64, xi64, xi64]) + stack_5 = paddle._C_ops.stack(combine_5, 0) + del combine_5 + + # pd_op.reshape: (4x-1x384xf32) <- (4x-1x12x32xf32, 3xi64) + reshape_1 = paddle._C_ops.reshape(transpose_5, stack_5) + del stack_5 + + # pd_op.matmul: (4x-1x384xf32) <- (4x-1x384xf32, 384x384xf32) + matmul_5 = paddle._C_ops.matmul(reshape_1, parameter_38, False, False) + del parameter_38 + + # pd_op.add: (4x-1x384xf32) <- (4x-1x384xf32, 384xf32) + add_4 = paddle._C_ops.add(matmul_5, parameter_37) + del parameter_37 + + # pd_op.dropout: (4x-1x384xf32, 4x-1x384xui8) <- (4x-1x384xf32, None, 1xf32) + dropout_2, dropout_3 = (lambda x, f: f(x))( + paddle._C_ops.dropout( + add_4, None, full_2, False, "upscale_in_train", 0, False + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None), + ) + del add_4 + + # pd_op.add: (4x-1x384xf32) <- (4x-1x384xf32, 4x-1x384xf32) + add_5 = paddle._C_ops.add(transpose_2, dropout_2) + + # pd_op.layer_norm: (4x-1x384xf32, 4x-1xf32, 4x-1xf32) <- (4x-1x384xf32, 384xf32, 384xf32) + layer_norm_0, layer_norm_1, layer_norm_2 = (lambda x, f: f(x))( + paddle._C_ops.layer_norm( + add_5, parameter_36, parameter_35, float("1e-05"), 2 + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None, None), + ) + del parameter_35, parameter_36 + + # pd_op.shape64: (3xi64) <- (4x-1x384xf32) + shape64_2 = paddle._C_ops.shape64(layer_norm_0) + + # pd_op.slice: (xi64) <- (3xi64, 1xi64, 1xi64) + slice_8 = paddle._C_ops.slice( + shape64_2, [0], full_int_array_3, full_int_array_4, [1], [0] + ) + del shape64_2 + + # pd_op.matmul: (4x-1x384xf32) <- (4x-1x384xf32, 384x384xf32) + matmul_6 = paddle._C_ops.matmul(layer_norm_0, parameter_34, False, False) + del parameter_34 + + # pd_op.add: (4x-1x384xf32) <- (4x-1x384xf32, 384xf32) + add_6 = paddle._C_ops.add(matmul_6, parameter_33) + del parameter_33 + + # builtin.combine: ([xi64, xi64, xi64, xi64]) <- (xi64, xi64, xi64, xi64) + combine_6 = [full_5, slice_8, full_7, full_8] + + # pd_op.stack: (4xi64) <- ([xi64, xi64, xi64, xi64]) + stack_6 = paddle._C_ops.stack(combine_6, 0) + del combine_6 + + # pd_op.reshape: (4x-1x12x32xf32) <- (4x-1x384xf32, 4xi64) + reshape_2 = paddle._C_ops.reshape(add_6, stack_6) + del stack_6 + + # pd_op.transpose: (4x12x-1x32xf32) <- (4x-1x12x32xf32) + transpose_6 = paddle._C_ops.transpose(reshape_2, [0, 2, 1, 3]) + del reshape_2 + + # pd_op.matmul: (4x40x768xf32) <- (4x40x384xf32, 384x768xf32) + matmul_7 = paddle._C_ops.matmul(matmul_1, parameter_32, False, False) + del parameter_32 + + # pd_op.add: (4x40x768xf32) <- (4x40x768xf32, 768xf32) + add_7 = paddle._C_ops.add(matmul_7, parameter_31) + del parameter_31 + + # pd_op.full_int_array: (5xi64) <- () + full_int_array_7 = [0, 40, 2, 12, 32] + + # pd_op.reshape: (4x40x2x12x32xf32) <- (4x40x768xf32, 5xi64) + reshape_3 = paddle._C_ops.reshape(add_7, full_int_array_7) + + # pd_op.transpose: (2x4x12x40x32xf32) <- (4x40x2x12x32xf32) + transpose_7 = paddle._C_ops.transpose(reshape_3, [2, 0, 3, 1, 4]) + del reshape_3 + + # pd_op.slice: (4x12x40x32xf32) <- (2x4x12x40x32xf32, 1xi64, 1xi64) + slice_9 = paddle._C_ops.slice( + transpose_7, [0], full_int_array_1, full_int_array_3, [1], [0] + ) + + # pd_op.slice: (4x12x40x32xf32) <- (2x4x12x40x32xf32, 1xi64, 1xi64) + slice_10 = paddle._C_ops.slice( + transpose_7, [0], full_int_array_3, full_int_array_4, [1], [0] + ) + + # pd_op.transpose: (4x12x32x40xf32) <- (4x12x40x32xf32) + transpose_8 = paddle._C_ops.transpose(slice_9, [0, 1, 3, 2]) + del slice_9 + + # pd_op.matmul: (4x12x-1x40xf32) <- (4x12x-1x32xf32, 4x12x32x40xf32) + matmul_8 = paddle._C_ops.matmul(transpose_6, transpose_8, False, False) + + # pd_op.scale: (4x12x-1x40xf32) <- (4x12x-1x40xf32, 1xf32) + scale_3 = paddle._C_ops.scale(matmul_8, full_9, float("0"), True) + del matmul_8 + + # pd_op.softmax: (4x12x-1x40xf32) <- (4x12x-1x40xf32) + softmax_1 = paddle._C_ops.softmax(scale_3, -1) + del scale_3 + + # pd_op.matmul: (4x12x-1x32xf32) <- (4x12x-1x40xf32, 4x12x40x32xf32) + matmul_9 = paddle._C_ops.matmul(softmax_1, slice_10, False, False) + + # pd_op.transpose: (4x-1x12x32xf32) <- (4x12x-1x32xf32) + transpose_9 = paddle._C_ops.transpose(matmul_9, [0, 2, 1, 3]) + del matmul_9 + + # builtin.combine: ([xi64, xi64, xi64]) <- (xi64, xi64, xi64) + combine_7 = [full_5, slice_8, full_10] + del slice_8 + + # pd_op.stack: (3xi64) <- ([xi64, xi64, xi64]) + stack_7 = paddle._C_ops.stack(combine_7, 0) + del combine_7 + + # pd_op.reshape: (4x-1x384xf32) <- (4x-1x12x32xf32, 3xi64) + reshape_4 = paddle._C_ops.reshape(transpose_9, stack_7) + del stack_7 + + # pd_op.matmul: (4x-1x384xf32) <- (4x-1x384xf32, 384x384xf32) + matmul_10 = paddle._C_ops.matmul(reshape_4, parameter_30, False, False) + del parameter_30 + + # pd_op.add: (4x-1x384xf32) <- (4x-1x384xf32, 384xf32) + add_8 = paddle._C_ops.add(matmul_10, parameter_29) + del parameter_29 + + # pd_op.dropout: (4x-1x384xf32, 4x-1x384xui8) <- (4x-1x384xf32, None, 1xf32) + dropout_4, dropout_5 = (lambda x, f: f(x))( + paddle._C_ops.dropout( + add_8, None, full_2, False, "upscale_in_train", 0, False + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None), + ) + del add_8 + + # pd_op.add: (4x-1x384xf32) <- (4x-1x384xf32, 4x-1x384xf32) + add_9 = paddle._C_ops.add(layer_norm_0, dropout_4) + + # pd_op.layer_norm: (4x-1x384xf32, 4x-1xf32, 4x-1xf32) <- (4x-1x384xf32, 384xf32, 384xf32) + layer_norm_3, layer_norm_4, layer_norm_5 = (lambda x, f: f(x))( + paddle._C_ops.layer_norm( + add_9, parameter_28, parameter_27, float("1e-05"), 2 + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None, None), + ) + del parameter_27, parameter_28 + + # pd_op.matmul: (4x-1x1536xf32) <- (4x-1x384xf32, 384x1536xf32) + matmul_11 = paddle._C_ops.matmul(layer_norm_3, parameter_26, False, False) + del parameter_26 + + # pd_op.add: (4x-1x1536xf32) <- (4x-1x1536xf32, 1536xf32) + add_10 = paddle._C_ops.add(matmul_11, parameter_25) + del parameter_25 + + # pd_op.relu: (4x-1x1536xf32) <- (4x-1x1536xf32) + relu_0 = paddle._C_ops.relu(add_10) + del add_10 + + # pd_op.dropout: (4x-1x1536xf32, 4x-1x1536xui8) <- (4x-1x1536xf32, None, 1xf32) + dropout_6, dropout_7 = (lambda x, f: f(x))( + paddle._C_ops.dropout( + relu_0, None, full_2, False, "upscale_in_train", 0, False + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None), + ) + + # pd_op.matmul: (4x-1x384xf32) <- (4x-1x1536xf32, 1536x384xf32) + matmul_12 = paddle._C_ops.matmul(dropout_6, parameter_24, False, False) + del parameter_24 + + # pd_op.add: (4x-1x384xf32) <- (4x-1x384xf32, 384xf32) + add_11 = paddle._C_ops.add(matmul_12, parameter_23) + del parameter_23 + + # pd_op.dropout: (4x-1x384xf32, 4x-1x384xui8) <- (4x-1x384xf32, None, 1xf32) + dropout_8, dropout_9 = (lambda x, f: f(x))( + paddle._C_ops.dropout( + add_11, None, full_2, False, "upscale_in_train", 0, False + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None), + ) + del add_11 + + # pd_op.dropout: (4x-1x384xf32, 4x-1x384xui8) <- (4x-1x384xf32, None, 1xf32) + dropout_10, dropout_11 = (lambda x, f: f(x))( + paddle._C_ops.dropout( + dropout_8, None, full_2, False, "upscale_in_train", 0, False + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None), + ) + del dropout_8 + + # pd_op.add: (4x-1x384xf32) <- (4x-1x384xf32, 4x-1x384xf32) + add_12 = paddle._C_ops.add(layer_norm_3, dropout_10) + + # pd_op.layer_norm: (4x-1x384xf32, 4x-1xf32, 4x-1xf32) <- (4x-1x384xf32, 384xf32, 384xf32) + layer_norm_6, layer_norm_7, layer_norm_8 = (lambda x, f: f(x))( + paddle._C_ops.layer_norm( + add_12, parameter_22, parameter_21, float("1e-05"), 2 + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None, None), + ) + del parameter_21, parameter_22 + + # pd_op.shape64: (3xi64) <- (4x-1x384xf32) + shape64_3 = paddle._C_ops.shape64(layer_norm_6) + + # pd_op.slice: (xi64) <- (3xi64, 1xi64, 1xi64) + slice_11 = paddle._C_ops.slice( + shape64_3, [0], full_int_array_3, full_int_array_4, [1], [0] + ) + del shape64_3 + + # pd_op.matmul: (4x-1x1152xf32) <- (4x-1x384xf32, 384x1152xf32) + matmul_13 = paddle._C_ops.matmul(layer_norm_6, parameter_20, False, False) + del parameter_20 + + # pd_op.add: (4x-1x1152xf32) <- (4x-1x1152xf32, 1152xf32) + add_13 = paddle._C_ops.add(matmul_13, parameter_19) + del parameter_19 + + # builtin.combine: ([xi64, xi64, xi64, xi64, xi64]) <- (xi64, xi64, xi64, xi64, xi64) + combine_8 = [full_5, slice_11, full_6, full_7, full_8] + del full_6 + + # pd_op.stack: (5xi64) <- ([xi64, xi64, xi64, xi64, xi64]) + stack_8 = paddle._C_ops.stack(combine_8, 0) + del combine_8 + + # pd_op.reshape: (4x-1x3x12x32xf32) <- (4x-1x1152xf32, 5xi64) + reshape_5 = paddle._C_ops.reshape(add_13, stack_8) + del stack_8 + + # pd_op.transpose: (3x4x12x-1x32xf32) <- (4x-1x3x12x32xf32) + transpose_10 = paddle._C_ops.transpose(reshape_5, [2, 0, 3, 1, 4]) + del reshape_5 + + # pd_op.slice: (4x12x-1x32xf32) <- (3x4x12x-1x32xf32, 1xi64, 1xi64) + slice_12 = paddle._C_ops.slice( + transpose_10, [0], full_int_array_1, full_int_array_3, [1], [0] + ) + + # pd_op.slice: (4x12x-1x32xf32) <- (3x4x12x-1x32xf32, 1xi64, 1xi64) + slice_13 = paddle._C_ops.slice( + transpose_10, [0], full_int_array_3, full_int_array_4, [1], [0] + ) + + # pd_op.slice: (4x12x-1x32xf32) <- (3x4x12x-1x32xf32, 1xi64, 1xi64) + slice_14 = paddle._C_ops.slice( + transpose_10, [0], full_int_array_4, full_int_array_6, [1], [0] + ) + + # pd_op.transpose: (4x12x32x-1xf32) <- (4x12x-1x32xf32) + transpose_11 = paddle._C_ops.transpose(slice_13, [0, 1, 3, 2]) + del slice_13 + + # pd_op.matmul: (4x12x-1x-1xf32) <- (4x12x-1x32xf32, 4x12x32x-1xf32) + matmul_14 = paddle._C_ops.matmul(slice_12, transpose_11, False, False) + + # pd_op.scale: (4x12x-1x-1xf32) <- (4x12x-1x-1xf32, 1xf32) + scale_4 = paddle._C_ops.scale(matmul_14, full_9, float("0"), True) + del matmul_14 + + # pd_op.add: (4x12x-1x-1xf32) <- (4x12x-1x-1xf32, 1x1x-1x-1xf32) + add_14 = paddle._C_ops.add(scale_4, unsqueeze_0) + + # pd_op.softmax: (4x12x-1x-1xf32) <- (4x12x-1x-1xf32) + softmax_2 = paddle._C_ops.softmax(add_14, -1) + del add_14 + + # pd_op.matmul: (4x12x-1x32xf32) <- (4x12x-1x-1xf32, 4x12x-1x32xf32) + matmul_15 = paddle._C_ops.matmul(softmax_2, slice_14, False, False) + + # pd_op.transpose: (4x-1x12x32xf32) <- (4x12x-1x32xf32) + transpose_12 = paddle._C_ops.transpose(matmul_15, [0, 2, 1, 3]) + del matmul_15 + + # builtin.combine: ([xi64, xi64, xi64]) <- (xi64, xi64, xi64) + combine_9 = [full_5, slice_11, full_10] + del slice_11 + + # pd_op.stack: (3xi64) <- ([xi64, xi64, xi64]) + stack_9 = paddle._C_ops.stack(combine_9, 0) + del combine_9 + + # pd_op.reshape: (4x-1x384xf32) <- (4x-1x12x32xf32, 3xi64) + reshape_6 = paddle._C_ops.reshape(transpose_12, stack_9) + del stack_9 + + # pd_op.matmul: (4x-1x384xf32) <- (4x-1x384xf32, 384x384xf32) + matmul_16 = paddle._C_ops.matmul(reshape_6, parameter_18, False, False) + del parameter_18 + + # pd_op.add: (4x-1x384xf32) <- (4x-1x384xf32, 384xf32) + add_15 = paddle._C_ops.add(matmul_16, parameter_17) + del parameter_17 + + # pd_op.dropout: (4x-1x384xf32, 4x-1x384xui8) <- (4x-1x384xf32, None, 1xf32) + dropout_12, dropout_13 = (lambda x, f: f(x))( + paddle._C_ops.dropout( + add_15, None, full_2, False, "upscale_in_train", 0, False + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None), + ) + del add_15 + + # pd_op.add: (4x-1x384xf32) <- (4x-1x384xf32, 4x-1x384xf32) + add_16 = paddle._C_ops.add(layer_norm_6, dropout_12) + + # pd_op.layer_norm: (4x-1x384xf32, 4x-1xf32, 4x-1xf32) <- (4x-1x384xf32, 384xf32, 384xf32) + layer_norm_9, layer_norm_10, layer_norm_11 = (lambda x, f: f(x))( + paddle._C_ops.layer_norm( + add_16, parameter_16, parameter_15, float("1e-05"), 2 + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None, None), + ) + del parameter_15, parameter_16 + + # pd_op.shape64: (3xi64) <- (4x-1x384xf32) + shape64_4 = paddle._C_ops.shape64(layer_norm_9) + + # pd_op.slice: (xi64) <- (3xi64, 1xi64, 1xi64) + slice_15 = paddle._C_ops.slice( + shape64_4, [0], full_int_array_3, full_int_array_4, [1], [0] + ) + del shape64_4 + + # pd_op.matmul: (4x-1x384xf32) <- (4x-1x384xf32, 384x384xf32) + matmul_17 = paddle._C_ops.matmul(layer_norm_9, parameter_14, False, False) + del parameter_14 + + # pd_op.add: (4x-1x384xf32) <- (4x-1x384xf32, 384xf32) + add_17 = paddle._C_ops.add(matmul_17, parameter_13) + del parameter_13 + + # builtin.combine: ([xi64, xi64, xi64, xi64]) <- (xi64, xi64, xi64, xi64) + combine_10 = [full_5, slice_15, full_7, full_8] + del full_7, full_8 + + # pd_op.stack: (4xi64) <- ([xi64, xi64, xi64, xi64]) + stack_10 = paddle._C_ops.stack(combine_10, 0) + del combine_10 + + # pd_op.reshape: (4x-1x12x32xf32) <- (4x-1x384xf32, 4xi64) + reshape_7 = paddle._C_ops.reshape(add_17, stack_10) + del stack_10 + + # pd_op.transpose: (4x12x-1x32xf32) <- (4x-1x12x32xf32) + transpose_13 = paddle._C_ops.transpose(reshape_7, [0, 2, 1, 3]) + del reshape_7 + + # pd_op.matmul: (4x40x768xf32) <- (4x40x384xf32, 384x768xf32) + matmul_18 = paddle._C_ops.matmul(matmul_1, parameter_12, False, False) + del parameter_12 + + # pd_op.add: (4x40x768xf32) <- (4x40x768xf32, 768xf32) + add_18 = paddle._C_ops.add(matmul_18, parameter_11) + del parameter_11 + + # pd_op.reshape: (4x40x2x12x32xf32) <- (4x40x768xf32, 5xi64) + reshape_8 = paddle._C_ops.reshape(add_18, full_int_array_7) + del full_int_array_7 + + # pd_op.transpose: (2x4x12x40x32xf32) <- (4x40x2x12x32xf32) + transpose_14 = paddle._C_ops.transpose(reshape_8, [2, 0, 3, 1, 4]) + del reshape_8 + + # pd_op.slice: (4x12x40x32xf32) <- (2x4x12x40x32xf32, 1xi64, 1xi64) + slice_16 = paddle._C_ops.slice( + transpose_14, [0], full_int_array_1, full_int_array_3, [1], [0] + ) + del full_int_array_1 + + # pd_op.slice: (4x12x40x32xf32) <- (2x4x12x40x32xf32, 1xi64, 1xi64) + slice_17 = paddle._C_ops.slice( + transpose_14, [0], full_int_array_3, full_int_array_4, [1], [0] + ) + del full_int_array_3, full_int_array_4 + + # pd_op.transpose: (4x12x32x40xf32) <- (4x12x40x32xf32) + transpose_15 = paddle._C_ops.transpose(slice_16, [0, 1, 3, 2]) + del slice_16 + + # pd_op.matmul: (4x12x-1x40xf32) <- (4x12x-1x32xf32, 4x12x32x40xf32) + matmul_19 = paddle._C_ops.matmul(transpose_13, transpose_15, False, False) + + # pd_op.scale: (4x12x-1x40xf32) <- (4x12x-1x40xf32, 1xf32) + scale_5 = paddle._C_ops.scale(matmul_19, full_9, float("0"), True) + del matmul_19 + + # pd_op.softmax: (4x12x-1x40xf32) <- (4x12x-1x40xf32) + softmax_3 = paddle._C_ops.softmax(scale_5, -1) + del scale_5 + + # pd_op.matmul: (4x12x-1x32xf32) <- (4x12x-1x40xf32, 4x12x40x32xf32) + matmul_20 = paddle._C_ops.matmul(softmax_3, slice_17, False, False) + + # pd_op.transpose: (4x-1x12x32xf32) <- (4x12x-1x32xf32) + transpose_16 = paddle._C_ops.transpose(matmul_20, [0, 2, 1, 3]) + del matmul_20 + + # builtin.combine: ([xi64, xi64, xi64]) <- (xi64, xi64, xi64) + combine_11 = [full_5, slice_15, full_10] + del full_10, full_5, slice_15 + + # pd_op.stack: (3xi64) <- ([xi64, xi64, xi64]) + stack_11 = paddle._C_ops.stack(combine_11, 0) + del combine_11 + + # pd_op.reshape: (4x-1x384xf32) <- (4x-1x12x32xf32, 3xi64) + reshape_9 = paddle._C_ops.reshape(transpose_16, stack_11) + del stack_11 + + # pd_op.matmul: (4x-1x384xf32) <- (4x-1x384xf32, 384x384xf32) + matmul_21 = paddle._C_ops.matmul(reshape_9, parameter_10, False, False) + del parameter_10 + + # pd_op.add: (4x-1x384xf32) <- (4x-1x384xf32, 384xf32) + add_19 = paddle._C_ops.add(matmul_21, parameter_9) + del parameter_9 + + # pd_op.dropout: (4x-1x384xf32, 4x-1x384xui8) <- (4x-1x384xf32, None, 1xf32) + dropout_14, dropout_15 = (lambda x, f: f(x))( + paddle._C_ops.dropout( + add_19, None, full_2, False, "upscale_in_train", 0, False + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None), + ) + del add_19 + + # pd_op.add: (4x-1x384xf32) <- (4x-1x384xf32, 4x-1x384xf32) + add_20 = paddle._C_ops.add(layer_norm_9, dropout_14) + + # pd_op.layer_norm: (4x-1x384xf32, 4x-1xf32, 4x-1xf32) <- (4x-1x384xf32, 384xf32, 384xf32) + layer_norm_12, layer_norm_13, layer_norm_14 = (lambda x, f: f(x))( + paddle._C_ops.layer_norm( + add_20, parameter_8, parameter_7, float("1e-05"), 2 + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None, None), + ) + del parameter_7, parameter_8 + + # pd_op.matmul: (4x-1x1536xf32) <- (4x-1x384xf32, 384x1536xf32) + matmul_22 = paddle._C_ops.matmul(layer_norm_12, parameter_6, False, False) + del parameter_6 + + # pd_op.add: (4x-1x1536xf32) <- (4x-1x1536xf32, 1536xf32) + add_21 = paddle._C_ops.add(matmul_22, parameter_5) + del parameter_5 + + # pd_op.relu: (4x-1x1536xf32) <- (4x-1x1536xf32) + relu_1 = paddle._C_ops.relu(add_21) + del add_21 + + # pd_op.dropout: (4x-1x1536xf32, 4x-1x1536xui8) <- (4x-1x1536xf32, None, 1xf32) + dropout_16, dropout_17 = (lambda x, f: f(x))( + paddle._C_ops.dropout( + relu_1, None, full_2, False, "upscale_in_train", 0, False + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None), + ) + + # pd_op.matmul: (4x-1x384xf32) <- (4x-1x1536xf32, 1536x384xf32) + matmul_23 = paddle._C_ops.matmul(dropout_16, parameter_4, False, False) + del parameter_4 + + # pd_op.add: (4x-1x384xf32) <- (4x-1x384xf32, 384xf32) + add_22 = paddle._C_ops.add(matmul_23, parameter_3) + del parameter_3 + + # pd_op.dropout: (4x-1x384xf32, 4x-1x384xui8) <- (4x-1x384xf32, None, 1xf32) + dropout_18, dropout_19 = (lambda x, f: f(x))( + paddle._C_ops.dropout( + add_22, None, full_2, False, "upscale_in_train", 0, False + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None), + ) + del add_22 + + # pd_op.dropout: (4x-1x384xf32, 4x-1x384xui8) <- (4x-1x384xf32, None, 1xf32) + dropout_20, dropout_21 = (lambda x, f: f(x))( + paddle._C_ops.dropout( + dropout_18, None, full_2, False, "upscale_in_train", 0, False + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None), + ) + del dropout_18 + + # pd_op.add: (4x-1x384xf32) <- (4x-1x384xf32, 4x-1x384xf32) + add_23 = paddle._C_ops.add(layer_norm_12, dropout_20) + + # pd_op.layer_norm: (4x-1x384xf32, 4x-1xf32, 4x-1xf32) <- (4x-1x384xf32, 384xf32, 384xf32) + layer_norm_15, layer_norm_16, layer_norm_17 = (lambda x, f: f(x))( + paddle._C_ops.layer_norm( + add_23, parameter_2, parameter_1, float("1e-05"), 2 + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None, None), + ) + del parameter_1, parameter_2 + + # pd_op.matmul: (4x-1x6629xf32) <- (4x-1x384xf32, 384x6629xf32) + matmul_0 = paddle._C_ops.matmul(layer_norm_15, parameter_0, False, False) + del ( + add_12, + add_13, + add_16, + add_17, + add_18, + add_2, + add_20, + add_23, + add_5, + add_6, + add_7, + add_9, + assign_0, + assign_1, + assign_10, + assign_11, + assign_12, + assign_13, + assign_14, + assign_15, + assign_16, + assign_17, + assign_18, + assign_19, + assign_2, + assign_20, + assign_21, + assign_22, + assign_23, + assign_24, + assign_25, + assign_26, + assign_27, + assign_28, + assign_29, + assign_3, + assign_30, + assign_31, + assign_4, + assign_5, + assign_6, + assign_7, + assign_8, + assign_9, + dropout_1, + dropout_10, + dropout_11, + dropout_12, + dropout_13, + dropout_14, + dropout_15, + dropout_16, + dropout_17, + dropout_19, + dropout_2, + dropout_20, + dropout_21, + dropout_3, + dropout_4, + dropout_5, + dropout_6, + dropout_7, + dropout_9, + full_1, + full_2, + full_9, + full_int_array_6, + layer_norm_0, + layer_norm_1, + layer_norm_10, + layer_norm_11, + layer_norm_12, + layer_norm_13, + layer_norm_14, + layer_norm_15, + layer_norm_16, + layer_norm_17, + layer_norm_2, + layer_norm_3, + layer_norm_4, + layer_norm_5, + layer_norm_6, + layer_norm_7, + layer_norm_8, + layer_norm_9, + matmul_1, + matmul_10, + matmul_11, + matmul_12, + matmul_13, + matmul_16, + matmul_17, + matmul_18, + matmul_2, + matmul_21, + matmul_22, + matmul_23, + matmul_5, + matmul_6, + matmul_7, + parameter_0, + relu_0, + relu_1, + reshape_1, + reshape_4, + reshape_6, + reshape_9, + scale_2, + scale_4, + slice_1, + slice_10, + slice_12, + slice_14, + slice_17, + slice_3, + slice_5, + slice_7, + softmax_0, + softmax_1, + softmax_2, + softmax_3, + transpose_0, + transpose_1, + transpose_10, + transpose_11, + transpose_12, + transpose_13, + transpose_14, + transpose_15, + transpose_16, + transpose_2, + transpose_3, + transpose_4, + transpose_5, + transpose_6, + transpose_7, + transpose_8, + transpose_9, + unsqueeze_0, + ) + + return matmul_0 diff --git a/paddle_samples/PaddleX/ch_SVTRv2_rec/subgraph_14/weight_meta.py b/paddle_samples/PaddleX/ch_SVTRv2_rec/subgraph_14/weight_meta.py new file mode 100644 index 000000000..81f08147b --- /dev/null +++ b/paddle_samples/PaddleX/ch_SVTRv2_rec/subgraph_14/weight_meta.py @@ -0,0 +1,471 @@ +class Program_weight_tensor_parameter_0: + name = "parameter_0" + shape = [384, 6629] + dtype = "float32" + min_val = float("-0.342261") + max_val = float("0.948178") + mean = float("0.00919608") + std = float("0.0588392") + data = None + + +class Program_weight_tensor_parameter_1: + name = "parameter_1" + shape = [384] + dtype = "float32" + min_val = float("-2.01452") + max_val = float("1.49928") + mean = float("0.0611141") + std = float("0.42782") + data = None + + +class Program_weight_tensor_parameter_2: + name = "parameter_2" + shape = [384] + dtype = "float32" + min_val = float("0.163422") + max_val = float("8.24113") + mean = float("6.07627") + std = float("1.63106") + data = None + + +class Program_weight_tensor_parameter_3: + name = "parameter_3" + shape = [384] + dtype = "float32" + min_val = float("-0.646689") + max_val = float("0.213369") + mean = float("-0.00456252") + std = float("0.0499786") + data = None + + +class Program_weight_tensor_parameter_4: + name = "parameter_4" + shape = [1536, 384] + dtype = "float32" + min_val = float("-1.60316") + max_val = float("1.60728") + mean = float("5.14812e-05") + std = float("0.118933") + data = None + + +class Program_weight_tensor_parameter_5: + name = "parameter_5" + shape = [1536] + dtype = "float32" + min_val = float("-2.77784") + max_val = float("0.305267") + mean = float("-0.701704") + std = float("0.245434") + data = None + + +class Program_weight_tensor_parameter_6: + name = "parameter_6" + shape = [384, 1536] + dtype = "float32" + min_val = float("-0.471037") + max_val = float("0.399332") + mean = float("-0.016715") + std = float("0.0690504") + data = None + + +class Program_weight_tensor_parameter_7: + name = "parameter_7" + shape = [384] + dtype = "float32" + min_val = float("-2.65602") + max_val = float("15.7776") + mean = float("0.185458") + std = float("0.82799") + data = None + + +class Program_weight_tensor_parameter_8: + name = "parameter_8" + shape = [384] + dtype = "float32" + min_val = float("0.0759988") + max_val = float("2.38861") + mean = float("0.5129") + std = float("0.138242") + data = None + + +class Program_weight_tensor_parameter_9: + name = "parameter_9" + shape = [384] + dtype = "float32" + min_val = float("-0.648992") + max_val = float("0.794819") + mean = float("0.00363502") + std = float("0.11223") + data = None + + +class Program_weight_tensor_parameter_10: + name = "parameter_10" + shape = [384, 384] + dtype = "float32" + min_val = float("-0.506227") + max_val = float("0.487122") + mean = float("-2.28861e-05") + std = float("0.055764") + data = None + + +class Program_weight_tensor_parameter_11: + name = "parameter_11" + shape = [768] + dtype = "float32" + min_val = float("-0.815102") + max_val = float("0.8321") + mean = float("0.00773128") + std = float("0.233797") + data = None + + +class Program_weight_tensor_parameter_12: + name = "parameter_12" + shape = [384, 768] + dtype = "float32" + min_val = float("-0.540834") + max_val = float("0.682762") + mean = float("-0.000107196") + std = float("0.0572062") + data = None + + +class Program_weight_tensor_parameter_13: + name = "parameter_13" + shape = [384] + dtype = "float32" + min_val = float("-3.03302") + max_val = float("2.79748") + mean = float("-0.00706997") + std = float("0.61245") + data = None + + +class Program_weight_tensor_parameter_14: + name = "parameter_14" + shape = [384, 384] + dtype = "float32" + min_val = float("-0.385865") + max_val = float("0.395078") + mean = float("-0.000120641") + std = float("0.0588496") + data = None + + +class Program_weight_tensor_parameter_15: + name = "parameter_15" + shape = [384] + dtype = "float32" + min_val = float("-2.49454") + max_val = float("1.55107") + mean = float("-0.0496227") + std = float("0.205971") + data = None + + +class Program_weight_tensor_parameter_16: + name = "parameter_16" + shape = [384] + dtype = "float32" + min_val = float("-0.106595") + max_val = float("3.05073") + mean = float("1.06759") + std = float("0.283875") + data = None + + +class Program_weight_tensor_parameter_17: + name = "parameter_17" + shape = [384] + dtype = "float32" + min_val = float("-7.57866") + max_val = float("7.19772") + mean = float("-0.0336126") + std = float("0.758333") + data = None + + +class Program_weight_tensor_parameter_18: + name = "parameter_18" + shape = [384, 384] + dtype = "float32" + min_val = float("-1.09284") + max_val = float("1.1878") + mean = float("-3.29478e-05") + std = float("0.0636084") + data = None + + +class Program_weight_tensor_parameter_19: + name = "parameter_19" + shape = [1152] + dtype = "float32" + min_val = float("-2.83286") + max_val = float("3.12454") + mean = float("-0.0115364") + std = float("0.377304") + data = None + + +class Program_weight_tensor_parameter_20: + name = "parameter_20" + shape = [384, 1152] + dtype = "float32" + min_val = float("-0.339692") + max_val = float("0.40515") + mean = float("-5.27188e-05") + std = float("0.0561111") + data = None + + +class Program_weight_tensor_parameter_21: + name = "parameter_21" + shape = [384] + dtype = "float32" + min_val = float("-0.485227") + max_val = float("0.484989") + mean = float("-0.0342452") + std = float("0.183996") + data = None + + +class Program_weight_tensor_parameter_22: + name = "parameter_22" + shape = [384] + dtype = "float32" + min_val = float("-0.00470934") + max_val = float("1.34072") + mean = float("1.06065") + std = float("0.184693") + data = None + + +class Program_weight_tensor_parameter_23: + name = "parameter_23" + shape = [384] + dtype = "float32" + min_val = float("-0.324262") + max_val = float("0.421889") + mean = float("0.000407339") + std = float("0.0507274") + data = None + + +class Program_weight_tensor_parameter_24: + name = "parameter_24" + shape = [1536, 384] + dtype = "float32" + min_val = float("-1.52937") + max_val = float("1.96086") + mean = float("0.000100578") + std = float("0.0423433") + data = None + + +class Program_weight_tensor_parameter_25: + name = "parameter_25" + shape = [1536] + dtype = "float32" + min_val = float("-2.05469") + max_val = float("-0.0696607") + mean = float("-0.444252") + std = float("0.32641") + data = None + + +class Program_weight_tensor_parameter_26: + name = "parameter_26" + shape = [384, 1536] + dtype = "float32" + min_val = float("-0.378399") + max_val = float("0.331921") + mean = float("0.00142004") + std = float("0.0430308") + data = None + + +class Program_weight_tensor_parameter_27: + name = "parameter_27" + shape = [384] + dtype = "float32" + min_val = float("-4.4161") + max_val = float("4.29707") + mean = float("-0.0987831") + std = float("0.652449") + data = None + + +class Program_weight_tensor_parameter_28: + name = "parameter_28" + shape = [384] + dtype = "float32" + min_val = float("0.596234") + max_val = float("3.95271") + mean = float("0.892283") + std = float("0.286497") + data = None + + +class Program_weight_tensor_parameter_29: + name = "parameter_29" + shape = [384] + dtype = "float32" + min_val = float("-0.398408") + max_val = float("0.199058") + mean = float("0.00136061") + std = float("0.0421304") + data = None + + +class Program_weight_tensor_parameter_30: + name = "parameter_30" + shape = [384, 384] + dtype = "float32" + min_val = float("-0.70338") + max_val = float("0.63271") + mean = float("3.93469e-05") + std = float("0.0483079") + data = None + + +class Program_weight_tensor_parameter_31: + name = "parameter_31" + shape = [768] + dtype = "float32" + min_val = float("-0.907861") + max_val = float("1.21747") + mean = float("-0.00558146") + std = float("0.216238") + data = None + + +class Program_weight_tensor_parameter_32: + name = "parameter_32" + shape = [384, 768] + dtype = "float32" + min_val = float("-0.337297") + max_val = float("0.306511") + mean = float("6.81156e-06") + std = float("0.0484196") + data = None + + +class Program_weight_tensor_parameter_33: + name = "parameter_33" + shape = [384] + dtype = "float32" + min_val = float("-3.12403") + max_val = float("2.62535") + mean = float("0.0243058") + std = float("0.584006") + data = None + + +class Program_weight_tensor_parameter_34: + name = "parameter_34" + shape = [384, 384] + dtype = "float32" + min_val = float("-0.222749") + max_val = float("0.275213") + mean = float("-5.63188e-05") + std = float("0.047597") + data = None + + +class Program_weight_tensor_parameter_35: + name = "parameter_35" + shape = [384] + dtype = "float32" + min_val = float("-1.38136") + max_val = float("2.5909") + mean = float("0.0208505") + std = float("0.302698") + data = None + + +class Program_weight_tensor_parameter_36: + name = "parameter_36" + shape = [384] + dtype = "float32" + min_val = float("0.114127") + max_val = float("3.84426") + mean = float("1.15762") + std = float("0.36469") + data = None + + +class Program_weight_tensor_parameter_37: + name = "parameter_37" + shape = [384] + dtype = "float32" + min_val = float("-0.927027") + max_val = float("2.06527") + mean = float("0.00738836") + std = float("0.161") + data = None + + +class Program_weight_tensor_parameter_38: + name = "parameter_38" + shape = [384, 384] + dtype = "float32" + min_val = float("-0.301063") + max_val = float("0.380668") + mean = float("2.4739e-05") + std = float("0.0463501") + data = None + + +class Program_weight_tensor_parameter_39: + name = "parameter_39" + shape = [1152] + dtype = "float32" + min_val = float("-3.37262") + max_val = float("3.31708") + mean = float("-0.023914") + std = float("0.798284") + data = None + + +class Program_weight_tensor_parameter_40: + name = "parameter_40" + shape = [384, 1152] + dtype = "float32" + min_val = float("-0.742585") + max_val = float("0.617597") + mean = float("0.000251766") + std = float("0.044023") + data = None + + +class Program_weight_tensor_parameter_41: + name = "parameter_41" + shape = [6629, 384] + dtype = "float32" + min_val = float("-0.386902") + max_val = float("0.30948") + mean = float("-0.00467325") + std = float("0.0369254") + data = None + + +class Program_weight_tensor_parameter_42: + name = "parameter_42" + shape = [384, 384] + dtype = "float32" + min_val = float("-0.465531") + max_val = float("0.690117") + mean = float("-5.55391e-05") + std = float("0.0506901") + data = None diff --git a/paddle_samples/PaddleX/ch_SVTRv2_rec/subgraph_15/graph_hash.txt b/paddle_samples/PaddleX/ch_SVTRv2_rec/subgraph_15/graph_hash.txt new file mode 100644 index 000000000..b687ea4b6 --- /dev/null +++ b/paddle_samples/PaddleX/ch_SVTRv2_rec/subgraph_15/graph_hash.txt @@ -0,0 +1 @@ +ea7b35e16430be4ee8516b3678c74d5cb553ebf0a30704fa54ea2196925d8736 \ No newline at end of file diff --git a/paddle_samples/PaddleX/ch_SVTRv2_rec/subgraph_15/graph_net.json b/paddle_samples/PaddleX/ch_SVTRv2_rec/subgraph_15/graph_net.json new file mode 100644 index 000000000..4e1cc3c0d --- /dev/null +++ b/paddle_samples/PaddleX/ch_SVTRv2_rec/subgraph_15/graph_net.json @@ -0,0 +1,6 @@ +{ + "framework": "paddle", + "model_name": "ch_SVTRv2_rec", + "num_devices_required": 1, + "num_nodes_required": 1 +} \ No newline at end of file diff --git a/paddle_samples/PaddleX/ch_SVTRv2_rec/subgraph_15/input_meta.py b/paddle_samples/PaddleX/ch_SVTRv2_rec/subgraph_15/input_meta.py new file mode 100644 index 000000000..ad5479cbf --- /dev/null +++ b/paddle_samples/PaddleX/ch_SVTRv2_rec/subgraph_15/input_meta.py @@ -0,0 +1,9 @@ +class Program_weight_tensor_data_0: + name = "data_0" + shape = [8, 384, 1, 40] + dtype = "float32" + min_val = float("-5.21401") + max_val = float("3.55319") + mean = float("0.00677272") + std = float("0.368823") + data = None diff --git a/paddle_samples/PaddleX/ch_SVTRv2_rec/subgraph_15/model.py b/paddle_samples/PaddleX/ch_SVTRv2_rec/subgraph_15/model.py new file mode 100644 index 000000000..e1dd20b23 --- /dev/null +++ b/paddle_samples/PaddleX/ch_SVTRv2_rec/subgraph_15/model.py @@ -0,0 +1,666 @@ +import paddle + + +class GraphModule(paddle.nn.Layer): + def __init__(self): + super().__init__() + + def forward( + self, + parameter_0, + parameter_1, + parameter_2, + parameter_3, + parameter_4, + parameter_5, + parameter_6, + parameter_7, + parameter_8, + parameter_9, + parameter_10, + parameter_11, + parameter_12, + parameter_13, + parameter_14, + parameter_15, + parameter_16, + parameter_17, + parameter_18, + parameter_19, + parameter_20, + parameter_21, + parameter_22, + parameter_23, + parameter_24, + parameter_25, + parameter_26, + parameter_27, + parameter_28, + parameter_29, + parameter_30, + parameter_31, + parameter_32, + parameter_33, + parameter_34, + parameter_35, + parameter_36, + parameter_37, + parameter_38, + parameter_39, + parameter_40, + parameter_41, + parameter_42, + parameter_43, + parameter_44, + parameter_45, + parameter_46, + parameter_47, + parameter_48, + parameter_49, + parameter_50, + parameter_51, + parameter_52, + data_0, + ): + # pd_op.assign: (-1x384x1x40xf32) <- (-1x384x1x40xf32) + assign_0 = data_0 + del data_0 + + # pd_op.conv2d: (-1x48x1x40xf32) <- (-1x384x1x40xf32, 48x384x1x3xf32) + conv2d_0 = paddle._C_ops.conv2d( + assign_0, parameter_52, [1, 1], [0, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_52 + + # pd_op.batch_norm_: (-1x48x1x40xf32, 48xf32, 48xf32, 48xf32, 48xf32, -1xui8) <- (-1x48x1x40xf32, 48xf32, 48xf32, 48xf32, 48xf32) + ( + batch_norm__0, + batch_norm__1, + batch_norm__2, + batch_norm__3, + batch_norm__4, + batch_norm__5, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_0, + parameter_51, + parameter_50, + parameter_49, + parameter_48, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_0, parameter_48, parameter_49, parameter_50, parameter_51 + + # pd_op.swish: (-1x48x1x40xf32) <- (-1x48x1x40xf32) + swish_0 = paddle._C_ops.swish(batch_norm__0) + del batch_norm__0 + + # pd_op.conv2d: (-1x256x1x40xf32) <- (-1x48x1x40xf32, 256x48x1x1xf32) + conv2d_1 = paddle._C_ops.conv2d( + swish_0, parameter_47, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_47, swish_0 + + # pd_op.batch_norm_: (-1x256x1x40xf32, 256xf32, 256xf32, 256xf32, 256xf32, -1xui8) <- (-1x256x1x40xf32, 256xf32, 256xf32, 256xf32, 256xf32) + ( + batch_norm__6, + batch_norm__7, + batch_norm__8, + batch_norm__9, + batch_norm__10, + batch_norm__11, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_1, + parameter_46, + parameter_45, + parameter_44, + parameter_43, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_1, parameter_43, parameter_44, parameter_45, parameter_46 + + # pd_op.swish: (-1x256x1x40xf32) <- (-1x256x1x40xf32) + swish_1 = paddle._C_ops.swish(batch_norm__6) + del batch_norm__6 + + # pd_op.shape64: (4xi64) <- (-1x256x1x40xf32) + shape64_0 = paddle._C_ops.shape64(swish_1) + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_0 = [0] + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_1 = [1] + + # pd_op.slice: (xi64) <- (4xi64, 1xi64, 1xi64) + slice_0 = paddle._C_ops.slice( + shape64_0, [0], full_int_array_0, full_int_array_1, [1], [0] + ) + del shape64_0 + + # pd_op.flatten: (-1x256x40xf32) <- (-1x256x1x40xf32) + flatten_0 = paddle._C_ops.flatten(swish_1, 2, 3) + del swish_1 + + # pd_op.transpose: (-1x40x256xf32) <- (-1x256x40xf32) + transpose_0 = paddle._C_ops.transpose(flatten_0, [0, 2, 1]) + del flatten_0 + + # pd_op.layer_norm: (-1x40x256xf32, -1x40xf32, -1x40xf32) <- (-1x40x256xf32, 256xf32, 256xf32) + layer_norm_0, layer_norm_1, layer_norm_2 = (lambda x, f: f(x))( + paddle._C_ops.layer_norm( + transpose_0, parameter_42, parameter_41, float("1e-05"), 2 + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None, None), + ) + del parameter_41, parameter_42 + + # pd_op.matmul: (-1x40x768xf32) <- (-1x40x256xf32, 256x768xf32) + matmul_0 = paddle._C_ops.matmul(layer_norm_0, parameter_40, False, False) + del layer_norm_0, parameter_40 + + # pd_op.add: (-1x40x768xf32) <- (-1x40x768xf32, 768xf32) + add_0 = paddle._C_ops.add(matmul_0, parameter_39) + del matmul_0, parameter_39 + + # pd_op.full_int_array: (5xi64) <- () + full_int_array_2 = [0, -1, 3, 8, 32] + + # pd_op.reshape: (-1x-1x3x8x32xf32) <- (-1x40x768xf32, 5xi64) + reshape_0 = paddle._C_ops.reshape(add_0, full_int_array_2) + del add_0 + + # pd_op.transpose: (3x-1x8x-1x32xf32) <- (-1x-1x3x8x32xf32) + transpose_1 = paddle._C_ops.transpose(reshape_0, [2, 0, 3, 1, 4]) + del reshape_0 + + # pd_op.slice: (-1x8x-1x32xf32) <- (3x-1x8x-1x32xf32, 1xi64, 1xi64) + slice_1 = paddle._C_ops.slice( + transpose_1, [0], full_int_array_0, full_int_array_1, [1], [0] + ) + + # pd_op.full: (1xf32) <- () + full_0 = paddle._C_ops.full( + [1], float("0.176777"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.scale: (-1x8x-1x32xf32) <- (-1x8x-1x32xf32, 1xf32) + scale_0 = paddle._C_ops.scale(slice_1, full_0, float("0"), True) + del slice_1 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_3 = [2] + + # pd_op.slice: (-1x8x-1x32xf32) <- (3x-1x8x-1x32xf32, 1xi64, 1xi64) + slice_2 = paddle._C_ops.slice( + transpose_1, [0], full_int_array_1, full_int_array_3, [1], [0] + ) + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_4 = [3] + + # pd_op.slice: (-1x8x-1x32xf32) <- (3x-1x8x-1x32xf32, 1xi64, 1xi64) + slice_3 = paddle._C_ops.slice( + transpose_1, [0], full_int_array_3, full_int_array_4, [1], [0] + ) + del transpose_1 + + # pd_op.transpose: (-1x8x32x-1xf32) <- (-1x8x-1x32xf32) + transpose_2 = paddle._C_ops.transpose(slice_2, [0, 1, 3, 2]) + del slice_2 + + # pd_op.matmul: (-1x8x-1x-1xf32) <- (-1x8x-1x32xf32, -1x8x32x-1xf32) + matmul_1 = paddle._C_ops.matmul(scale_0, transpose_2, False, False) + del scale_0, transpose_2 + + # pd_op.softmax: (-1x8x-1x-1xf32) <- (-1x8x-1x-1xf32) + softmax_1 = paddle._C_ops.softmax(matmul_1, -1) + del matmul_1 + + # pd_op.full: (1xf32) <- () + full_1 = paddle._C_ops.full( + [1], float("0.1"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.dropout: (-1x8x-1x-1xf32, -1x8x-1x-1xui8) <- (-1x8x-1x-1xf32, None, 1xf32) + dropout_0, dropout_1 = (lambda x, f: f(x))( + paddle._C_ops.dropout( + softmax_1, None, full_1, True, "upscale_in_train", 0, False + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None), + ) + del softmax_1 + + # pd_op.matmul: (-1x8x-1x32xf32) <- (-1x8x-1x-1xf32, -1x8x-1x32xf32) + matmul_2 = paddle._C_ops.matmul(dropout_0, slice_3, False, False) + del dropout_0, slice_3 + + # pd_op.transpose: (-1x-1x8x32xf32) <- (-1x8x-1x32xf32) + transpose_3 = paddle._C_ops.transpose(matmul_2, [0, 2, 1, 3]) + del matmul_2 + + # pd_op.full_int_array: (3xi64) <- () + full_int_array_5 = [0, -1, 256] + + # pd_op.reshape: (-1x-1x256xf32) <- (-1x-1x8x32xf32, 3xi64) + reshape_1 = paddle._C_ops.reshape(transpose_3, full_int_array_5) + del transpose_3 + + # pd_op.matmul: (-1x-1x256xf32) <- (-1x-1x256xf32, 256x256xf32) + matmul_3 = paddle._C_ops.matmul(reshape_1, parameter_38, False, False) + del parameter_38, reshape_1 + + # pd_op.add: (-1x-1x256xf32) <- (-1x-1x256xf32, 256xf32) + add_1 = paddle._C_ops.add(matmul_3, parameter_37) + del matmul_3, parameter_37 + + # pd_op.dropout: (-1x-1x256xf32, -1x-1x256xui8) <- (-1x-1x256xf32, None, 1xf32) + dropout_2, dropout_3 = (lambda x, f: f(x))( + paddle._C_ops.dropout( + add_1, None, full_1, True, "upscale_in_train", 0, False + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None), + ) + del add_1 + + # pd_op.add: (-1x40x256xf32) <- (-1x40x256xf32, -1x-1x256xf32) + add_2 = paddle._C_ops.add(transpose_0, dropout_2) + del dropout_2, transpose_0 + + # pd_op.layer_norm: (-1x40x256xf32, -1x40xf32, -1x40xf32) <- (-1x40x256xf32, 256xf32, 256xf32) + layer_norm_3, layer_norm_4, layer_norm_5 = (lambda x, f: f(x))( + paddle._C_ops.layer_norm( + add_2, parameter_36, parameter_35, float("1e-05"), 2 + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None, None), + ) + del parameter_35, parameter_36 + + # pd_op.matmul: (-1x40x512xf32) <- (-1x40x256xf32, 256x512xf32) + matmul_4 = paddle._C_ops.matmul(layer_norm_3, parameter_34, False, False) + del layer_norm_3, parameter_34 + + # pd_op.add: (-1x40x512xf32) <- (-1x40x512xf32, 512xf32) + add_3 = paddle._C_ops.add(matmul_4, parameter_33) + del matmul_4, parameter_33 + + # pd_op.swish: (-1x40x512xf32) <- (-1x40x512xf32) + swish_2 = paddle._C_ops.swish(add_3) + del add_3 + + # pd_op.dropout: (-1x40x512xf32, -1x40x512xui8) <- (-1x40x512xf32, None, 1xf32) + dropout_4, dropout_5 = (lambda x, f: f(x))( + paddle._C_ops.dropout( + swish_2, None, full_1, True, "upscale_in_train", 0, False + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None), + ) + del swish_2 + + # pd_op.matmul: (-1x40x256xf32) <- (-1x40x512xf32, 512x256xf32) + matmul_5 = paddle._C_ops.matmul(dropout_4, parameter_32, False, False) + del dropout_4, parameter_32 + + # pd_op.add: (-1x40x256xf32) <- (-1x40x256xf32, 256xf32) + add_4 = paddle._C_ops.add(matmul_5, parameter_31) + del matmul_5, parameter_31 + + # pd_op.dropout: (-1x40x256xf32, -1x40x256xui8) <- (-1x40x256xf32, None, 1xf32) + dropout_6, dropout_7 = (lambda x, f: f(x))( + paddle._C_ops.dropout( + add_4, None, full_1, True, "upscale_in_train", 0, False + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None), + ) + del add_4 + + # pd_op.add: (-1x40x256xf32) <- (-1x40x256xf32, -1x40x256xf32) + add_5 = paddle._C_ops.add(add_2, dropout_6) + del add_2, dropout_6 + + # pd_op.layer_norm: (-1x40x256xf32, -1x40xf32, -1x40xf32) <- (-1x40x256xf32, 256xf32, 256xf32) + layer_norm_6, layer_norm_7, layer_norm_8 = (lambda x, f: f(x))( + paddle._C_ops.layer_norm( + add_5, parameter_30, parameter_29, float("1e-05"), 2 + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None, None), + ) + del parameter_29, parameter_30 + + # pd_op.matmul: (-1x40x768xf32) <- (-1x40x256xf32, 256x768xf32) + matmul_6 = paddle._C_ops.matmul(layer_norm_6, parameter_28, False, False) + del layer_norm_6, parameter_28 + + # pd_op.add: (-1x40x768xf32) <- (-1x40x768xf32, 768xf32) + add_6 = paddle._C_ops.add(matmul_6, parameter_27) + del matmul_6, parameter_27 + + # pd_op.reshape: (-1x-1x3x8x32xf32) <- (-1x40x768xf32, 5xi64) + reshape_2 = paddle._C_ops.reshape(add_6, full_int_array_2) + del add_6, full_int_array_2 + + # pd_op.transpose: (3x-1x8x-1x32xf32) <- (-1x-1x3x8x32xf32) + transpose_4 = paddle._C_ops.transpose(reshape_2, [2, 0, 3, 1, 4]) + del reshape_2 + + # pd_op.slice: (-1x8x-1x32xf32) <- (3x-1x8x-1x32xf32, 1xi64, 1xi64) + slice_4 = paddle._C_ops.slice( + transpose_4, [0], full_int_array_0, full_int_array_1, [1], [0] + ) + + # pd_op.scale: (-1x8x-1x32xf32) <- (-1x8x-1x32xf32, 1xf32) + scale_1 = paddle._C_ops.scale(slice_4, full_0, float("0"), True) + del full_0, slice_4 + + # pd_op.slice: (-1x8x-1x32xf32) <- (3x-1x8x-1x32xf32, 1xi64, 1xi64) + slice_5 = paddle._C_ops.slice( + transpose_4, [0], full_int_array_1, full_int_array_3, [1], [0] + ) + + # pd_op.slice: (-1x8x-1x32xf32) <- (3x-1x8x-1x32xf32, 1xi64, 1xi64) + slice_6 = paddle._C_ops.slice( + transpose_4, [0], full_int_array_3, full_int_array_4, [1], [0] + ) + del full_int_array_4, transpose_4 + + # pd_op.transpose: (-1x8x32x-1xf32) <- (-1x8x-1x32xf32) + transpose_5 = paddle._C_ops.transpose(slice_5, [0, 1, 3, 2]) + del slice_5 + + # pd_op.matmul: (-1x8x-1x-1xf32) <- (-1x8x-1x32xf32, -1x8x32x-1xf32) + matmul_7 = paddle._C_ops.matmul(scale_1, transpose_5, False, False) + del scale_1, transpose_5 + + # pd_op.softmax: (-1x8x-1x-1xf32) <- (-1x8x-1x-1xf32) + softmax_2 = paddle._C_ops.softmax(matmul_7, -1) + del matmul_7 + + # pd_op.dropout: (-1x8x-1x-1xf32, -1x8x-1x-1xui8) <- (-1x8x-1x-1xf32, None, 1xf32) + dropout_8, dropout_9 = (lambda x, f: f(x))( + paddle._C_ops.dropout( + softmax_2, None, full_1, True, "upscale_in_train", 0, False + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None), + ) + del softmax_2 + + # pd_op.matmul: (-1x8x-1x32xf32) <- (-1x8x-1x-1xf32, -1x8x-1x32xf32) + matmul_8 = paddle._C_ops.matmul(dropout_8, slice_6, False, False) + del dropout_8, slice_6 + + # pd_op.transpose: (-1x-1x8x32xf32) <- (-1x8x-1x32xf32) + transpose_6 = paddle._C_ops.transpose(matmul_8, [0, 2, 1, 3]) + del matmul_8 + + # pd_op.reshape: (-1x-1x256xf32) <- (-1x-1x8x32xf32, 3xi64) + reshape_3 = paddle._C_ops.reshape(transpose_6, full_int_array_5) + del full_int_array_5, transpose_6 + + # pd_op.matmul: (-1x-1x256xf32) <- (-1x-1x256xf32, 256x256xf32) + matmul_9 = paddle._C_ops.matmul(reshape_3, parameter_26, False, False) + del parameter_26, reshape_3 + + # pd_op.add: (-1x-1x256xf32) <- (-1x-1x256xf32, 256xf32) + add_7 = paddle._C_ops.add(matmul_9, parameter_25) + del matmul_9, parameter_25 + + # pd_op.dropout: (-1x-1x256xf32, -1x-1x256xui8) <- (-1x-1x256xf32, None, 1xf32) + dropout_10, dropout_11 = (lambda x, f: f(x))( + paddle._C_ops.dropout( + add_7, None, full_1, True, "upscale_in_train", 0, False + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None), + ) + del add_7 + + # pd_op.add: (-1x40x256xf32) <- (-1x40x256xf32, -1x-1x256xf32) + add_8 = paddle._C_ops.add(add_5, dropout_10) + del add_5, dropout_10 + + # pd_op.layer_norm: (-1x40x256xf32, -1x40xf32, -1x40xf32) <- (-1x40x256xf32, 256xf32, 256xf32) + layer_norm_9, layer_norm_10, layer_norm_11 = (lambda x, f: f(x))( + paddle._C_ops.layer_norm( + add_8, parameter_24, parameter_23, float("1e-05"), 2 + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None, None), + ) + del parameter_23, parameter_24 + + # pd_op.matmul: (-1x40x512xf32) <- (-1x40x256xf32, 256x512xf32) + matmul_10 = paddle._C_ops.matmul(layer_norm_9, parameter_22, False, False) + del layer_norm_9, parameter_22 + + # pd_op.add: (-1x40x512xf32) <- (-1x40x512xf32, 512xf32) + add_9 = paddle._C_ops.add(matmul_10, parameter_21) + del matmul_10, parameter_21 + + # pd_op.swish: (-1x40x512xf32) <- (-1x40x512xf32) + swish_3 = paddle._C_ops.swish(add_9) + del add_9 + + # pd_op.dropout: (-1x40x512xf32, -1x40x512xui8) <- (-1x40x512xf32, None, 1xf32) + dropout_12, dropout_13 = (lambda x, f: f(x))( + paddle._C_ops.dropout( + swish_3, None, full_1, True, "upscale_in_train", 0, False + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None), + ) + del swish_3 + + # pd_op.matmul: (-1x40x256xf32) <- (-1x40x512xf32, 512x256xf32) + matmul_11 = paddle._C_ops.matmul(dropout_12, parameter_20, False, False) + del dropout_12, parameter_20 + + # pd_op.add: (-1x40x256xf32) <- (-1x40x256xf32, 256xf32) + add_10 = paddle._C_ops.add(matmul_11, parameter_19) + del matmul_11, parameter_19 + + # pd_op.dropout: (-1x40x256xf32, -1x40x256xui8) <- (-1x40x256xf32, None, 1xf32) + dropout_14, dropout_15 = (lambda x, f: f(x))( + paddle._C_ops.dropout( + add_10, None, full_1, True, "upscale_in_train", 0, False + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None), + ) + del add_10, full_1 + + # pd_op.add: (-1x40x256xf32) <- (-1x40x256xf32, -1x40x256xf32) + add_11 = paddle._C_ops.add(add_8, dropout_14) + del add_8, dropout_14 + + # pd_op.layer_norm: (-1x40x256xf32, -1x40xf32, -1x40xf32) <- (-1x40x256xf32, 256xf32, 256xf32) + layer_norm_12, layer_norm_13, layer_norm_14 = (lambda x, f: f(x))( + paddle._C_ops.layer_norm( + add_11, parameter_18, parameter_17, float("1e-06"), 2 + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None, None), + ) + del add_11, parameter_17, parameter_18 + + # pd_op.full_int_array: (4xi64) <- () + full_int_array_6 = [0, 1, 40, 256] + + # pd_op.reshape: (-1x1x40x256xf32) <- (-1x40x256xf32, 4xi64) + reshape_4 = paddle._C_ops.reshape(layer_norm_12, full_int_array_6) + del full_int_array_6, layer_norm_12 + + # pd_op.transpose: (-1x256x1x40xf32) <- (-1x1x40x256xf32) + transpose_7 = paddle._C_ops.transpose(reshape_4, [0, 3, 1, 2]) + del reshape_4 + + # pd_op.conv2d: (-1x384x1x40xf32) <- (-1x256x1x40xf32, 384x256x1x1xf32) + conv2d_2 = paddle._C_ops.conv2d( + transpose_7, parameter_16, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_16, transpose_7 + + # pd_op.batch_norm_: (-1x384x1x40xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (-1x384x1x40xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__12, + batch_norm__13, + batch_norm__14, + batch_norm__15, + batch_norm__16, + batch_norm__17, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_2, + parameter_15, + parameter_14, + parameter_13, + parameter_12, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_2, parameter_12, parameter_13, parameter_14, parameter_15 + + # pd_op.swish: (-1x384x1x40xf32) <- (-1x384x1x40xf32) + swish_4 = paddle._C_ops.swish(batch_norm__12) + del batch_norm__12 + + # pd_op.full: (1xi32) <- () + full_2 = paddle._C_ops.full( + [1], float("1"), paddle.int32, paddle.core.CPUPlace() + ) + + # builtin.combine: ([-1x384x1x40xf32, -1x384x1x40xf32]) <- (-1x384x1x40xf32, -1x384x1x40xf32) + combine_0 = [assign_0, swish_4] + del assign_0, swish_4 + + # pd_op.concat: (-1x768x1x40xf32) <- ([-1x384x1x40xf32, -1x384x1x40xf32], 1xi32) + concat_0 = paddle._C_ops.concat(combine_0, full_2) + del combine_0, full_2 + + # pd_op.conv2d: (-1x48x1x40xf32) <- (-1x768x1x40xf32, 48x768x1x3xf32) + conv2d_3 = paddle._C_ops.conv2d( + concat_0, parameter_11, [1, 1], [0, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del concat_0, parameter_11 + + # pd_op.batch_norm_: (-1x48x1x40xf32, 48xf32, 48xf32, 48xf32, 48xf32, -1xui8) <- (-1x48x1x40xf32, 48xf32, 48xf32, 48xf32, 48xf32) + ( + batch_norm__18, + batch_norm__19, + batch_norm__20, + batch_norm__21, + batch_norm__22, + batch_norm__23, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_3, + parameter_10, + parameter_9, + parameter_8, + parameter_7, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_3, parameter_10, parameter_7, parameter_8, parameter_9 + + # pd_op.swish: (-1x48x1x40xf32) <- (-1x48x1x40xf32) + swish_5 = paddle._C_ops.swish(batch_norm__18) + del batch_norm__18 + + # pd_op.conv2d: (-1x256x1x40xf32) <- (-1x48x1x40xf32, 256x48x1x1xf32) + conv2d_4 = paddle._C_ops.conv2d( + swish_5, parameter_6, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_6, swish_5 + + # pd_op.batch_norm_: (-1x256x1x40xf32, 256xf32, 256xf32, 256xf32, 256xf32, -1xui8) <- (-1x256x1x40xf32, 256xf32, 256xf32, 256xf32, 256xf32) + ( + batch_norm__24, + batch_norm__25, + batch_norm__26, + batch_norm__27, + batch_norm__28, + batch_norm__29, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_4, + parameter_5, + parameter_4, + parameter_3, + parameter_2, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_4, parameter_2, parameter_3, parameter_4, parameter_5 + + # pd_op.swish: (-1x256x1x40xf32) <- (-1x256x1x40xf32) + swish_6 = paddle._C_ops.swish(batch_norm__24) + del batch_norm__24 + + # pd_op.shape64: (4xi64) <- (-1x256x1x40xf32) + shape64_1 = paddle._C_ops.shape64(swish_6) + + # pd_op.slice: (xi64) <- (4xi64, 1xi64, 1xi64) + slice_7 = paddle._C_ops.slice( + shape64_1, [0], full_int_array_0, full_int_array_1, [1], [0] + ) + del full_int_array_0, full_int_array_1, shape64_1 + + # pd_op.squeeze: (-1x256x40xf32) <- (-1x256x1x40xf32, 1xi64) + squeeze_0 = paddle._C_ops.squeeze(swish_6, full_int_array_3) + del full_int_array_3, swish_6 + + # pd_op.transpose: (-1x40x256xf32) <- (-1x256x40xf32) + transpose_8 = paddle._C_ops.transpose(squeeze_0, [0, 2, 1]) + del squeeze_0 + + # pd_op.matmul: (-1x40x6625xf32) <- (-1x40x256xf32, 256x6625xf32) + matmul_12 = paddle._C_ops.matmul(transpose_8, parameter_1, False, False) + del parameter_1, transpose_8 + + # pd_op.add: (-1x40x6625xf32) <- (-1x40x6625xf32, 6625xf32) + add_12 = paddle._C_ops.add(matmul_12, parameter_0) + del matmul_12, parameter_0 + + # pd_op.softmax: (-1x40x6625xf32) <- (-1x40x6625xf32) + softmax_0 = paddle._C_ops.softmax(add_12, 2) + del add_12 + + return softmax_0 diff --git a/paddle_samples/PaddleX/ch_SVTRv2_rec/subgraph_15/weight_meta.py b/paddle_samples/PaddleX/ch_SVTRv2_rec/subgraph_15/weight_meta.py new file mode 100644 index 000000000..bf36c68d4 --- /dev/null +++ b/paddle_samples/PaddleX/ch_SVTRv2_rec/subgraph_15/weight_meta.py @@ -0,0 +1,565 @@ +class Program_weight_tensor_parameter_0: + name = "parameter_0" + shape = [6625] + dtype = "float32" + min_val = float("-1.46597") + max_val = float("1.53145") + mean = float("-0.235656") + std = float("0.128753") + data = None + + +class Program_weight_tensor_parameter_1: + name = "parameter_1" + shape = [256, 6625] + dtype = "float32" + min_val = float("-0.825597") + max_val = float("0.384394") + mean = float("-0.217128") + std = float("0.0879434") + data = None + + +class Program_weight_tensor_parameter_2: + name = "parameter_2" + shape = [256] + dtype = "float32" + min_val = float("-3.44799") + max_val = float("9.33653") + mean = float("-0.0178818") + std = float("1.09478") + data = None + + +class Program_weight_tensor_parameter_3: + name = "parameter_3" + shape = [256] + dtype = "float32" + min_val = float("1.4685") + max_val = float("4.88037") + mean = float("2.7111") + std = float("0.50817") + data = None + + +class Program_weight_tensor_parameter_4: + name = "parameter_4" + shape = [256] + dtype = "float32" + min_val = float("0.0124112") + max_val = float("0.221146") + mean = float("0.0370696") + std = float("0.0241856") + data = None + + +class Program_weight_tensor_parameter_5: + name = "parameter_5" + shape = [256] + dtype = "float32" + min_val = float("-1.56002") + max_val = float("1.06439") + mean = float("0.247325") + std = float("0.414336") + data = None + + +class Program_weight_tensor_parameter_6: + name = "parameter_6" + shape = [256, 48, 1, 1] + dtype = "float32" + min_val = float("-0.346063") + max_val = float("0.203151") + mean = float("0.00157656") + std = float("0.0454131") + data = None + + +class Program_weight_tensor_parameter_7: + name = "parameter_7" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_8: + name = "parameter_8" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_9: + name = "parameter_9" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_10: + name = "parameter_10" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_11: + name = "parameter_11" + shape = [48, 768, 1, 3] + dtype = "float32" + min_val = float("-0.411515") + max_val = float("0.445292") + mean = float("0.000223959") + std = float("0.0461216") + data = None + + +class Program_weight_tensor_parameter_12: + name = "parameter_12" + shape = [384] + dtype = "float32" + min_val = float("-2.40199") + max_val = float("0.0944552") + mean = float("-1.07426") + std = float("0.346892") + data = None + + +class Program_weight_tensor_parameter_13: + name = "parameter_13" + shape = [384] + dtype = "float32" + min_val = float("0.39211") + max_val = float("2.96053") + mean = float("0.795937") + std = float("0.2202") + data = None + + +class Program_weight_tensor_parameter_14: + name = "parameter_14" + shape = [384] + dtype = "float32" + min_val = float("0.154412") + max_val = float("2.49152") + mean = float("0.444497") + std = float("0.23015") + data = None + + +class Program_weight_tensor_parameter_15: + name = "parameter_15" + shape = [384] + dtype = "float32" + min_val = float("-1.22803") + max_val = float("1.95997") + mean = float("0.174858") + std = float("0.540679") + data = None + + +class Program_weight_tensor_parameter_16: + name = "parameter_16" + shape = [384, 256, 1, 1] + dtype = "float32" + min_val = float("-0.331627") + max_val = float("0.248219") + mean = float("0.000731438") + std = float("0.044942") + data = None + + +class Program_weight_tensor_parameter_17: + name = "parameter_17" + shape = [256] + dtype = "float32" + min_val = float("-1.32629") + max_val = float("1.39354") + mean = float("0.00891438") + std = float("0.384027") + data = None + + +class Program_weight_tensor_parameter_18: + name = "parameter_18" + shape = [256] + dtype = "float32" + min_val = float("0.24556") + max_val = float("2.12018") + mean = float("1.17895") + std = float("0.297276") + data = None + + +class Program_weight_tensor_parameter_19: + name = "parameter_19" + shape = [256] + dtype = "float32" + min_val = float("-2.38278") + max_val = float("2.51975") + mean = float("-0.0174621") + std = float("0.463879") + data = None + + +class Program_weight_tensor_parameter_20: + name = "parameter_20" + shape = [512, 256] + dtype = "float32" + min_val = float("-0.766091") + max_val = float("0.648558") + mean = float("9.66252e-05") + std = float("0.0692842") + data = None + + +class Program_weight_tensor_parameter_21: + name = "parameter_21" + shape = [512] + dtype = "float32" + min_val = float("-1.723") + max_val = float("-0.0499443") + mean = float("-0.691033") + std = float("0.191916") + data = None + + +class Program_weight_tensor_parameter_22: + name = "parameter_22" + shape = [256, 512] + dtype = "float32" + min_val = float("-0.483874") + max_val = float("0.3465") + mean = float("-0.0108961") + std = float("0.0584003") + data = None + + +class Program_weight_tensor_parameter_23: + name = "parameter_23" + shape = [256] + dtype = "float32" + min_val = float("-0.829863") + max_val = float("3.54468") + mean = float("0.401273") + std = float("0.553268") + data = None + + +class Program_weight_tensor_parameter_24: + name = "parameter_24" + shape = [256] + dtype = "float32" + min_val = float("0.516836") + max_val = float("2.65586") + mean = float("1.58753") + std = float("0.33914") + data = None + + +class Program_weight_tensor_parameter_25: + name = "parameter_25" + shape = [256] + dtype = "float32" + min_val = float("-0.912488") + max_val = float("0.805287") + mean = float("0.0130542") + std = float("0.181094") + data = None + + +class Program_weight_tensor_parameter_26: + name = "parameter_26" + shape = [256, 256] + dtype = "float32" + min_val = float("-0.334787") + max_val = float("0.384853") + mean = float("3.68162e-05") + std = float("0.0566386") + data = None + + +class Program_weight_tensor_parameter_27: + name = "parameter_27" + shape = [768] + dtype = "float32" + min_val = float("-2.12336") + max_val = float("2.02898") + mean = float("0.014515") + std = float("0.38241") + data = None + + +class Program_weight_tensor_parameter_28: + name = "parameter_28" + shape = [256, 768] + dtype = "float32" + min_val = float("-0.518178") + max_val = float("0.46375") + mean = float("-2.81217e-05") + std = float("0.0523803") + data = None + + +class Program_weight_tensor_parameter_29: + name = "parameter_29" + shape = [256] + dtype = "float32" + min_val = float("-1.36796") + max_val = float("0.605412") + mean = float("0.0180544") + std = float("0.298778") + data = None + + +class Program_weight_tensor_parameter_30: + name = "parameter_30" + shape = [256] + dtype = "float32" + min_val = float("0.0894259") + max_val = float("1.58745") + mean = float("0.962722") + std = float("0.218984") + data = None + + +class Program_weight_tensor_parameter_31: + name = "parameter_31" + shape = [256] + dtype = "float32" + min_val = float("-1.50767") + max_val = float("0.606274") + mean = float("-0.016677") + std = float("0.218997") + data = None + + +class Program_weight_tensor_parameter_32: + name = "parameter_32" + shape = [512, 256] + dtype = "float32" + min_val = float("-0.572787") + max_val = float("0.523545") + mean = float("-0.000352103") + std = float("0.0600868") + data = None + + +class Program_weight_tensor_parameter_33: + name = "parameter_33" + shape = [512] + dtype = "float32" + min_val = float("-1.37236") + max_val = float("0.0974118") + mean = float("-0.599186") + std = float("0.309987") + data = None + + +class Program_weight_tensor_parameter_34: + name = "parameter_34" + shape = [256, 512] + dtype = "float32" + min_val = float("-0.803425") + max_val = float("0.390108") + mean = float("-0.0149656") + std = float("0.0631599") + data = None + + +class Program_weight_tensor_parameter_35: + name = "parameter_35" + shape = [256] + dtype = "float32" + min_val = float("-1.37596") + max_val = float("2.50826") + mean = float("0.558037") + std = float("0.613565") + data = None + + +class Program_weight_tensor_parameter_36: + name = "parameter_36" + shape = [256] + dtype = "float32" + min_val = float("-0.00451118") + max_val = float("2.37945") + mean = float("1.32673") + std = float("0.35236") + data = None + + +class Program_weight_tensor_parameter_37: + name = "parameter_37" + shape = [256] + dtype = "float32" + min_val = float("-0.889473") + max_val = float("0.347882") + mean = float("-0.000677475") + std = float("0.115036") + data = None + + +class Program_weight_tensor_parameter_38: + name = "parameter_38" + shape = [256, 256] + dtype = "float32" + min_val = float("-0.295005") + max_val = float("0.237079") + mean = float("-8.20122e-05") + std = float("0.0510661") + data = None + + +class Program_weight_tensor_parameter_39: + name = "parameter_39" + shape = [768] + dtype = "float32" + min_val = float("-1.65256") + max_val = float("1.70355") + mean = float("-0.00392284") + std = float("0.421056") + data = None + + +class Program_weight_tensor_parameter_40: + name = "parameter_40" + shape = [256, 768] + dtype = "float32" + min_val = float("-0.328245") + max_val = float("0.456591") + mean = float("6.15425e-05") + std = float("0.0509074") + data = None + + +class Program_weight_tensor_parameter_41: + name = "parameter_41" + shape = [256] + dtype = "float32" + min_val = float("-0.626027") + max_val = float("0.458634") + mean = float("0.0475809") + std = float("0.165043") + data = None + + +class Program_weight_tensor_parameter_42: + name = "parameter_42" + shape = [256] + dtype = "float32" + min_val = float("-0.235583") + max_val = float("1.06059") + mean = float("0.472906") + std = float("0.228653") + data = None + + +class Program_weight_tensor_parameter_43: + name = "parameter_43" + shape = [256] + dtype = "float32" + min_val = float("-1.87845") + max_val = float("2.96015") + mean = float("0.0196129") + std = float("0.837465") + data = None + + +class Program_weight_tensor_parameter_44: + name = "parameter_44" + shape = [256] + dtype = "float32" + min_val = float("0.54729") + max_val = float("5.24025") + mean = float("1.85161") + std = float("0.675141") + data = None + + +class Program_weight_tensor_parameter_45: + name = "parameter_45" + shape = [256] + dtype = "float32" + min_val = float("0.0188482") + max_val = float("0.759877") + mean = float("0.0915536") + std = float("0.104839") + data = None + + +class Program_weight_tensor_parameter_46: + name = "parameter_46" + shape = [256] + dtype = "float32" + min_val = float("-2.47059") + max_val = float("2.24563") + mean = float("-0.0717596") + std = float("0.816748") + data = None + + +class Program_weight_tensor_parameter_47: + name = "parameter_47" + shape = [256, 48, 1, 1] + dtype = "float32" + min_val = float("-0.397248") + max_val = float("0.601075") + mean = float("-0.00041253") + std = float("0.0452287") + data = None + + +class Program_weight_tensor_parameter_48: + name = "parameter_48" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_49: + name = "parameter_49" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_50: + name = "parameter_50" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_51: + name = "parameter_51" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_52: + name = "parameter_52" + shape = [48, 384, 1, 3] + dtype = "float32" + min_val = float("-0.494518") + max_val = float("0.342539") + mean = float("0.000191539") + std = float("0.0402968") + data = None diff --git a/paddle_samples/PaddleX/ch_SVTRv2_rec/subgraph_2/graph_hash.txt b/paddle_samples/PaddleX/ch_SVTRv2_rec/subgraph_2/graph_hash.txt new file mode 100644 index 000000000..06c3fb687 --- /dev/null +++ b/paddle_samples/PaddleX/ch_SVTRv2_rec/subgraph_2/graph_hash.txt @@ -0,0 +1 @@ +584a9945a71de056f2fa7b5aee34258b6d4468e900decc0f3fdb80a4beb1db67 \ No newline at end of file diff --git a/paddle_samples/PaddleX/ch_SVTRv2_rec/subgraph_2/graph_net.json b/paddle_samples/PaddleX/ch_SVTRv2_rec/subgraph_2/graph_net.json new file mode 100644 index 000000000..4e1cc3c0d --- /dev/null +++ b/paddle_samples/PaddleX/ch_SVTRv2_rec/subgraph_2/graph_net.json @@ -0,0 +1,6 @@ +{ + "framework": "paddle", + "model_name": "ch_SVTRv2_rec", + "num_devices_required": 1, + "num_nodes_required": 1 +} \ No newline at end of file diff --git a/paddle_samples/PaddleX/ch_SVTRv2_rec/subgraph_2/input_meta.py b/paddle_samples/PaddleX/ch_SVTRv2_rec/subgraph_2/input_meta.py new file mode 100644 index 000000000..08c4f0044 --- /dev/null +++ b/paddle_samples/PaddleX/ch_SVTRv2_rec/subgraph_2/input_meta.py @@ -0,0 +1,16 @@ +class Program_weight_tensor_data_0: + name = "data_0" + shape = [] + dtype = "int64" + data = [48] + + +class Program_weight_tensor_data_1: + name = "data_1" + shape = [8, 3, 48, 320] + dtype = "float32" + min_val = float("-1.0") + max_val = float("1.0") + mean = float("-0.0236384") + std = float("0.223792") + data = None diff --git a/paddle_samples/PaddleX/ch_SVTRv2_rec/subgraph_2/model.py b/paddle_samples/PaddleX/ch_SVTRv2_rec/subgraph_2/model.py new file mode 100644 index 000000000..cda318456 --- /dev/null +++ b/paddle_samples/PaddleX/ch_SVTRv2_rec/subgraph_2/model.py @@ -0,0 +1,129 @@ +import paddle + + +class GraphModule(paddle.nn.Layer): + def __init__(self): + super().__init__() + + def forward( + self, + parameter_0, + parameter_1, + parameter_2, + parameter_3, + parameter_4, + parameter_5, + parameter_6, + parameter_7, + parameter_8, + parameter_9, + parameter_10, + parameter_11, + data_0, + data_1, + ): + # pd_op.conv2d: (-1x64x-1x160xf32) <- (-1x3x-1x320xf32, 64x3x3x3xf32) + conv2d_0 = paddle._C_ops.conv2d( + data_1, parameter_11, [2, 2], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del data_1, parameter_11 + + # pd_op.full_int_array: (4xi64) <- () + full_int_array_0 = [1, -1, 1, 1] + + # pd_op.reshape: (1x64x1x1xf32) <- (64xf32, 4xi64) + reshape_0 = paddle._C_ops.reshape(parameter_10, full_int_array_0) + del parameter_10 + + # pd_op.add: (-1x64x-1x160xf32) <- (-1x64x-1x160xf32, 1x64x1x1xf32) + add_0 = paddle._C_ops.add(conv2d_0, reshape_0) + del conv2d_0, reshape_0 + + # pd_op.batch_norm_: (-1x64x-1x160xf32, 64xf32, 64xf32, 64xf32, 64xf32, -1xui8) <- (-1x64x-1x160xf32, 64xf32, 64xf32, 64xf32, 64xf32) + ( + batch_norm__0, + batch_norm__1, + batch_norm__2, + batch_norm__3, + batch_norm__4, + batch_norm__5, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + add_0, + parameter_9, + parameter_8, + parameter_7, + parameter_6, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del add_0, parameter_6, parameter_7, parameter_8, parameter_9 + + # pd_op.gelu: (-1x64x-1x160xf32) <- (-1x64x-1x160xf32) + gelu_1 = paddle._C_ops.gelu(batch_norm__0, False) + del batch_norm__0 + + # pd_op.conv2d: (-1x128x-1x80xf32) <- (-1x64x-1x160xf32, 128x64x3x3xf32) + conv2d_1 = paddle._C_ops.conv2d( + gelu_1, parameter_5, [2, 2], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del gelu_1, parameter_5 + + # pd_op.reshape: (1x128x1x1xf32) <- (128xf32, 4xi64) + reshape_1 = paddle._C_ops.reshape(parameter_4, full_int_array_0) + del full_int_array_0, parameter_4 + + # pd_op.add: (-1x128x-1x80xf32) <- (-1x128x-1x80xf32, 1x128x1x1xf32) + add_1 = paddle._C_ops.add(conv2d_1, reshape_1) + del conv2d_1, reshape_1 + + # pd_op.batch_norm_: (-1x128x-1x80xf32, 128xf32, 128xf32, 128xf32, 128xf32, -1xui8) <- (-1x128x-1x80xf32, 128xf32, 128xf32, 128xf32, 128xf32) + ( + batch_norm__6, + batch_norm__7, + batch_norm__8, + batch_norm__9, + batch_norm__10, + batch_norm__11, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + add_1, + parameter_3, + parameter_2, + parameter_1, + parameter_0, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del add_1, parameter_0, parameter_1, parameter_2, parameter_3 + + # pd_op.gelu: (-1x128x-1x80xf32) <- (-1x128x-1x80xf32) + gelu_0 = paddle._C_ops.gelu(batch_norm__6, False) + del batch_norm__6 + + # pd_op.full: (xi64) <- () + full_0 = paddle._C_ops.full( + [], float("4"), paddle.int64, paddle.framework._current_expected_place() + ) + + # pd_op.floor_divide: (xi64) <- (xi64, xi64) + floor_divide_0 = paddle._C_ops.floor_divide(data_0, full_0) + del data_0, full_0 + + return gelu_0, floor_divide_0 diff --git a/paddle_samples/PaddleX/ch_SVTRv2_rec/subgraph_2/weight_meta.py b/paddle_samples/PaddleX/ch_SVTRv2_rec/subgraph_2/weight_meta.py new file mode 100644 index 000000000..8b16b0073 --- /dev/null +++ b/paddle_samples/PaddleX/ch_SVTRv2_rec/subgraph_2/weight_meta.py @@ -0,0 +1,120 @@ +class Program_weight_tensor_parameter_0: + name = "parameter_0" + shape = [128] + dtype = "float32" + min_val = float("-1.27581") + max_val = float("0.533685") + mean = float("-0.209089") + std = float("0.3565") + data = None + + +class Program_weight_tensor_parameter_1: + name = "parameter_1" + shape = [128] + dtype = "float32" + min_val = float("1.05545") + max_val = float("5.05056") + mean = float("2.2024") + std = float("0.741249") + data = None + + +class Program_weight_tensor_parameter_2: + name = "parameter_2" + shape = [128] + dtype = "float32" + min_val = float("29.5206") + max_val = float("2412.19") + mean = float("341.44") + std = float("329.668") + data = None + + +class Program_weight_tensor_parameter_3: + name = "parameter_3" + shape = [128] + dtype = "float32" + min_val = float("-47.1082") + max_val = float("117.919") + mean = float("12.0747") + std = float("27.3301") + data = None + + +class Program_weight_tensor_parameter_4: + name = "parameter_4" + shape = [128] + dtype = "float32" + min_val = float("-0.0298378") + max_val = float("0.0201253") + mean = float("0.00175348") + std = float("0.00726275") + data = None + + +class Program_weight_tensor_parameter_5: + name = "parameter_5" + shape = [128, 64, 3, 3] + dtype = "float32" + min_val = float("-5.5545") + max_val = float("4.24816") + mean = float("0.00147952") + std = float("0.453711") + data = None + + +class Program_weight_tensor_parameter_6: + name = "parameter_6" + shape = [64] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_7: + name = "parameter_7" + shape = [64] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_8: + name = "parameter_8" + shape = [64] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_9: + name = "parameter_9" + shape = [64] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_10: + name = "parameter_10" + shape = [64] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_11: + name = "parameter_11" + shape = [64, 3, 3, 3] + dtype = "float32" + min_val = float("-1.85515") + max_val = float("1.90975") + mean = float("-0.00199541") + std = float("0.457543") + data = None diff --git a/paddle_samples/PaddleX/ch_SVTRv2_rec/subgraph_3/graph_hash.txt b/paddle_samples/PaddleX/ch_SVTRv2_rec/subgraph_3/graph_hash.txt new file mode 100644 index 000000000..2fe83a635 --- /dev/null +++ b/paddle_samples/PaddleX/ch_SVTRv2_rec/subgraph_3/graph_hash.txt @@ -0,0 +1 @@ +870cef134bc6ea0a9c621405ce6cf9b325c923873385c00ce1cf267d3c8399a9 \ No newline at end of file diff --git a/paddle_samples/PaddleX/ch_SVTRv2_rec/subgraph_3/graph_net.json b/paddle_samples/PaddleX/ch_SVTRv2_rec/subgraph_3/graph_net.json new file mode 100644 index 000000000..4e1cc3c0d --- /dev/null +++ b/paddle_samples/PaddleX/ch_SVTRv2_rec/subgraph_3/graph_net.json @@ -0,0 +1,6 @@ +{ + "framework": "paddle", + "model_name": "ch_SVTRv2_rec", + "num_devices_required": 1, + "num_nodes_required": 1 +} \ No newline at end of file diff --git a/paddle_samples/PaddleX/ch_SVTRv2_rec/subgraph_3/input_meta.py b/paddle_samples/PaddleX/ch_SVTRv2_rec/subgraph_3/input_meta.py new file mode 100644 index 000000000..763e77ebf --- /dev/null +++ b/paddle_samples/PaddleX/ch_SVTRv2_rec/subgraph_3/input_meta.py @@ -0,0 +1,9 @@ +class Program_weight_tensor_data_0: + name = "data_0" + shape = [4, 3, 64, 320] + dtype = "float32" + min_val = float("-1.0") + max_val = float("1.0") + mean = float("-0.139868") + std = float("0.426517") + data = None diff --git a/paddle_samples/PaddleX/ch_SVTRv2_rec/subgraph_3/model.py b/paddle_samples/PaddleX/ch_SVTRv2_rec/subgraph_3/model.py new file mode 100644 index 000000000..aeee0edfc --- /dev/null +++ b/paddle_samples/PaddleX/ch_SVTRv2_rec/subgraph_3/model.py @@ -0,0 +1,5013 @@ +import paddle + + +class GraphModule(paddle.nn.Layer): + def __init__(self): + super().__init__() + + def forward( + self, + parameter_0, + parameter_1, + parameter_2, + parameter_3, + parameter_4, + parameter_5, + parameter_6, + parameter_7, + parameter_8, + parameter_9, + parameter_10, + parameter_11, + parameter_12, + parameter_13, + parameter_14, + parameter_15, + parameter_16, + parameter_17, + parameter_18, + parameter_19, + parameter_20, + parameter_21, + parameter_22, + parameter_23, + parameter_24, + parameter_25, + parameter_26, + parameter_27, + parameter_28, + parameter_29, + parameter_30, + parameter_31, + parameter_32, + parameter_33, + parameter_34, + parameter_35, + parameter_36, + parameter_37, + parameter_38, + parameter_39, + parameter_40, + parameter_41, + parameter_42, + parameter_43, + parameter_44, + parameter_45, + parameter_46, + parameter_47, + parameter_48, + parameter_49, + parameter_50, + parameter_51, + parameter_52, + parameter_53, + parameter_54, + parameter_55, + parameter_56, + parameter_57, + parameter_58, + parameter_59, + parameter_60, + parameter_61, + parameter_62, + parameter_63, + parameter_64, + parameter_65, + parameter_66, + parameter_67, + parameter_68, + parameter_69, + parameter_70, + parameter_71, + parameter_72, + parameter_73, + parameter_74, + parameter_75, + parameter_76, + parameter_77, + parameter_78, + parameter_79, + parameter_80, + parameter_81, + parameter_82, + parameter_83, + parameter_84, + parameter_85, + parameter_86, + parameter_87, + parameter_88, + parameter_89, + parameter_90, + parameter_91, + parameter_92, + parameter_93, + parameter_94, + parameter_95, + parameter_96, + parameter_97, + parameter_98, + parameter_99, + parameter_100, + parameter_101, + parameter_102, + parameter_103, + parameter_104, + parameter_105, + parameter_106, + parameter_107, + parameter_108, + parameter_109, + parameter_110, + parameter_111, + parameter_112, + parameter_113, + parameter_114, + parameter_115, + parameter_116, + parameter_117, + parameter_118, + parameter_119, + parameter_120, + parameter_121, + parameter_122, + parameter_123, + parameter_124, + parameter_125, + parameter_126, + parameter_127, + parameter_128, + parameter_129, + parameter_130, + parameter_131, + parameter_132, + parameter_133, + parameter_134, + parameter_135, + parameter_136, + parameter_137, + parameter_138, + parameter_139, + parameter_140, + parameter_141, + parameter_142, + parameter_143, + parameter_144, + parameter_145, + parameter_146, + parameter_147, + parameter_148, + parameter_149, + parameter_150, + parameter_151, + parameter_152, + parameter_153, + parameter_154, + parameter_155, + parameter_156, + parameter_157, + parameter_158, + parameter_159, + parameter_160, + parameter_161, + parameter_162, + parameter_163, + parameter_164, + parameter_165, + parameter_166, + parameter_167, + parameter_168, + parameter_169, + parameter_170, + parameter_171, + parameter_172, + parameter_173, + parameter_174, + parameter_175, + parameter_176, + parameter_177, + parameter_178, + parameter_179, + parameter_180, + parameter_181, + parameter_182, + parameter_183, + parameter_184, + parameter_185, + parameter_186, + parameter_187, + parameter_188, + parameter_189, + parameter_190, + parameter_191, + parameter_192, + parameter_193, + parameter_194, + parameter_195, + parameter_196, + parameter_197, + parameter_198, + parameter_199, + parameter_200, + parameter_201, + parameter_202, + parameter_203, + parameter_204, + parameter_205, + parameter_206, + parameter_207, + parameter_208, + parameter_209, + parameter_210, + parameter_211, + parameter_212, + parameter_213, + parameter_214, + parameter_215, + parameter_216, + parameter_217, + parameter_218, + parameter_219, + data_0, + ): + # pd_op.conv2d: (4x64x32x160xf32) <- (4x3x64x320xf32, 64x3x3x3xf32) + conv2d_0 = paddle._C_ops.conv2d( + data_0, parameter_219, [2, 2], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del data_0, parameter_219 + + # pd_op.full_int_array: (4xi64) <- () + full_int_array_0 = [1, -1, 1, 1] + + # pd_op.reshape: (1x64x1x1xf32) <- (64xf32, 4xi64) + reshape_0 = paddle._C_ops.reshape(parameter_218, full_int_array_0) + del parameter_218 + + # pd_op.add: (4x64x32x160xf32) <- (4x64x32x160xf32, 1x64x1x1xf32) + add_0 = paddle._C_ops.add(conv2d_0, reshape_0) + + # pd_op.batch_norm_: (4x64x32x160xf32, 64xf32, 64xf32, 64xf32, 64xf32, -1xui8) <- (4x64x32x160xf32, 64xf32, 64xf32, 64xf32, 64xf32) + ( + batch_norm__0, + batch_norm__1, + batch_norm__2, + batch_norm__3, + batch_norm__4, + batch_norm__5, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + add_0, + parameter_217, + parameter_216, + parameter_215, + parameter_214, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_214, parameter_215, parameter_216, parameter_217 + + # pd_op.gelu: (4x64x32x160xf32) <- (4x64x32x160xf32) + gelu_0 = paddle._C_ops.gelu(batch_norm__0, False) + + # pd_op.conv2d: (4x128x16x80xf32) <- (4x64x32x160xf32, 128x64x3x3xf32) + conv2d_1 = paddle._C_ops.conv2d( + gelu_0, parameter_213, [2, 2], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_213 + + # pd_op.reshape: (1x128x1x1xf32) <- (128xf32, 4xi64) + reshape_1 = paddle._C_ops.reshape(parameter_212, full_int_array_0) + del parameter_212 + + # pd_op.add: (4x128x16x80xf32) <- (4x128x16x80xf32, 1x128x1x1xf32) + add_1 = paddle._C_ops.add(conv2d_1, reshape_1) + + # pd_op.batch_norm_: (4x128x16x80xf32, 128xf32, 128xf32, 128xf32, 128xf32, -1xui8) <- (4x128x16x80xf32, 128xf32, 128xf32, 128xf32, 128xf32) + ( + batch_norm__6, + batch_norm__7, + batch_norm__8, + batch_norm__9, + batch_norm__10, + batch_norm__11, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + add_1, + parameter_211, + parameter_210, + parameter_209, + parameter_208, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_208, parameter_209, parameter_210, parameter_211 + + # pd_op.gelu: (4x128x16x80xf32) <- (4x128x16x80xf32) + gelu_1 = paddle._C_ops.gelu(batch_norm__6, False) + + # pd_op.conv2d: (4x128x16x80xf32) <- (4x128x16x80xf32, 128x32x5x5xf32) + conv2d_2 = paddle._C_ops.conv2d( + gelu_1, parameter_207, [1, 1], [2, 2], "EXPLICIT", [1, 1], 4, "NCHW" + ) + del parameter_207 + + # pd_op.reshape: (1x128x1x1xf32) <- (128xf32, 4xi64) + reshape_2 = paddle._C_ops.reshape(parameter_206, full_int_array_0) + del parameter_206 + + # pd_op.add: (4x128x16x80xf32) <- (4x128x16x80xf32, 1x128x1x1xf32) + add_2 = paddle._C_ops.add(conv2d_2, reshape_2) + + # pd_op.add: (4x128x16x80xf32) <- (4x128x16x80xf32, 4x128x16x80xf32) + add_3 = paddle._C_ops.add(gelu_1, add_2) + + # pd_op.flatten: (4x128x1280xf32) <- (4x128x16x80xf32) + flatten_0 = paddle._C_ops.flatten(add_3, 2, 3) + + # pd_op.transpose: (4x1280x128xf32) <- (4x128x1280xf32) + transpose_0 = paddle._C_ops.transpose(flatten_0, [0, 2, 1]) + del flatten_0 + + # pd_op.layer_norm: (4x1280x128xf32, 4x1280xf32, 4x1280xf32) <- (4x1280x128xf32, 128xf32, 128xf32) + layer_norm_0, layer_norm_1, layer_norm_2 = (lambda x, f: f(x))( + paddle._C_ops.layer_norm( + transpose_0, parameter_205, parameter_204, float("1e-06"), 2 + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None, None), + ) + del parameter_204, parameter_205 + + # pd_op.matmul: (4x1280x512xf32) <- (4x1280x128xf32, 128x512xf32) + matmul_0 = paddle._C_ops.matmul(layer_norm_0, parameter_203, False, False) + del parameter_203 + + # pd_op.add: (4x1280x512xf32) <- (4x1280x512xf32, 512xf32) + add_4 = paddle._C_ops.add(matmul_0, parameter_202) + del parameter_202 + + # pd_op.gelu: (4x1280x512xf32) <- (4x1280x512xf32) + gelu_2 = paddle._C_ops.gelu(add_4, False) + + # pd_op.matmul: (4x1280x128xf32) <- (4x1280x512xf32, 512x128xf32) + matmul_1 = paddle._C_ops.matmul(gelu_2, parameter_201, False, False) + del parameter_201 + + # pd_op.add: (4x1280x128xf32) <- (4x1280x128xf32, 128xf32) + add_5 = paddle._C_ops.add(matmul_1, parameter_200) + del parameter_200 + + # pd_op.add: (4x1280x128xf32) <- (4x1280x128xf32, 4x1280x128xf32) + add_6 = paddle._C_ops.add(layer_norm_0, add_5) + + # pd_op.layer_norm: (4x1280x128xf32, 4x1280xf32, 4x1280xf32) <- (4x1280x128xf32, 128xf32, 128xf32) + layer_norm_3, layer_norm_4, layer_norm_5 = (lambda x, f: f(x))( + paddle._C_ops.layer_norm( + add_6, parameter_199, parameter_198, float("1e-06"), 2 + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None, None), + ) + del parameter_198, parameter_199 + + # pd_op.transpose: (4x128x1280xf32) <- (4x1280x128xf32) + transpose_1 = paddle._C_ops.transpose(layer_norm_3, [0, 2, 1]) + del layer_norm_3 + + # pd_op.full_int_array: (4xi64) <- () + full_int_array_1 = [0, 128, 16, 80] + + # pd_op.reshape: (4x128x16x80xf32) <- (4x128x1280xf32, 4xi64) + reshape_3 = paddle._C_ops.reshape(transpose_1, full_int_array_1) + + # pd_op.conv2d: (4x128x16x80xf32) <- (4x128x16x80xf32, 128x32x5x5xf32) + conv2d_3 = paddle._C_ops.conv2d( + reshape_3, parameter_197, [1, 1], [2, 2], "EXPLICIT", [1, 1], 4, "NCHW" + ) + del parameter_197 + + # pd_op.reshape: (1x128x1x1xf32) <- (128xf32, 4xi64) + reshape_4 = paddle._C_ops.reshape(parameter_196, full_int_array_0) + del parameter_196 + + # pd_op.add: (4x128x16x80xf32) <- (4x128x16x80xf32, 1x128x1x1xf32) + add_7 = paddle._C_ops.add(conv2d_3, reshape_4) + + # pd_op.full: (xf64) <- () + full_0 = paddle._C_ops.full( + [], float("0"), paddle.float64, paddle.framework._current_expected_place() + ) + + # pd_op.assign_value_: (xf64) <- (xf64) + assign_value__0 = paddle._C_ops.assign_value_( + full_0, + [], + paddle.float64, + [float("0.994118")], + paddle.framework._current_expected_place(), + ) + del full_0 + + # pd_op.cast: (xf32) <- (xf64) + cast_0 = paddle._C_ops.cast(assign_value__0, paddle.float32) + del assign_value__0 + + # pd_op.shape64: (4xi64) <- (4x128x16x80xf32) + shape64_0 = paddle._C_ops.shape64(add_7) + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_2 = [0] + + # pd_op.assign: (1xi64) <- (1xi64) + assign_0 = full_int_array_2 + + # pd_op.assign: (1xi64) <- (1xi64) + assign_1 = full_int_array_2 + + # pd_op.assign: (1xi64) <- (1xi64) + assign_2 = full_int_array_2 + + # pd_op.assign: (1xi64) <- (1xi64) + assign_3 = full_int_array_2 + + # pd_op.assign: (1xi64) <- (1xi64) + assign_4 = full_int_array_2 + + # pd_op.assign: (1xi64) <- (1xi64) + assign_5 = full_int_array_2 + + # pd_op.assign: (1xi64) <- (1xi64) + assign_6 = full_int_array_2 + + # pd_op.assign: (1xi64) <- (1xi64) + assign_7 = full_int_array_2 + + # pd_op.assign: (1xi64) <- (1xi64) + assign_8 = full_int_array_2 + + # pd_op.assign: (1xi64) <- (1xi64) + assign_9 = full_int_array_2 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_3 = [1] + + # pd_op.assign: (1xi64) <- (1xi64) + assign_10 = full_int_array_3 + + # pd_op.assign: (1xi64) <- (1xi64) + assign_11 = full_int_array_3 + + # pd_op.assign: (1xi64) <- (1xi64) + assign_12 = full_int_array_3 + + # pd_op.assign: (1xi64) <- (1xi64) + assign_13 = full_int_array_3 + + # pd_op.assign: (1xi64) <- (1xi64) + assign_14 = full_int_array_3 + + # pd_op.assign: (1xi64) <- (1xi64) + assign_15 = full_int_array_3 + + # pd_op.assign: (1xi64) <- (1xi64) + assign_16 = full_int_array_3 + + # pd_op.assign: (1xi64) <- (1xi64) + assign_17 = full_int_array_3 + + # pd_op.assign: (1xi64) <- (1xi64) + assign_18 = full_int_array_3 + + # pd_op.assign: (1xi64) <- (1xi64) + assign_19 = full_int_array_3 + + # pd_op.assign: (1xi64) <- (1xi64) + assign_20 = full_int_array_3 + + # pd_op.assign: (1xi64) <- (1xi64) + assign_21 = full_int_array_3 + + # pd_op.assign: (1xi64) <- (1xi64) + assign_22 = full_int_array_3 + + # pd_op.assign: (1xi64) <- (1xi64) + assign_23 = full_int_array_3 + + # pd_op.assign: (1xi64) <- (1xi64) + assign_24 = full_int_array_3 + + # pd_op.assign: (1xi64) <- (1xi64) + assign_25 = full_int_array_3 + + # pd_op.assign: (1xi64) <- (1xi64) + assign_26 = full_int_array_3 + + # pd_op.assign: (1xi64) <- (1xi64) + assign_27 = full_int_array_3 + + # pd_op.assign: (1xi64) <- (1xi64) + assign_28 = full_int_array_3 + + # pd_op.assign: (1xi64) <- (1xi64) + assign_29 = full_int_array_3 + + # pd_op.slice: (xi64) <- (4xi64, 1xi64, 1xi64) + slice_0 = paddle._C_ops.slice( + shape64_0, [0], full_int_array_2, full_int_array_3, [1], [0] + ) + del shape64_0 + + # pd_op.full: (xi64) <- () + full_1 = paddle._C_ops.full( + [], float("1"), paddle.int64, paddle.core.CPUPlace() + ) + + # builtin.combine: ([xi64, xi64, xi64, xi64]) <- (xi64, xi64, xi64, xi64) + combine_0 = [slice_0, full_1, full_1, full_1] + del slice_0 + + # pd_op.stack: (4xi64) <- ([xi64, xi64, xi64, xi64]) + stack_0 = paddle._C_ops.stack(combine_0, 0) + del combine_0 + + # pd_op.full: (1xf32) <- () + full_2 = paddle._C_ops.full( + [1], float("0"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.full: (1xf32) <- () + full_3 = paddle._C_ops.full( + [1], float("1"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.uniform: (-1x1x1x1xf32) <- (4xi64, 1xf32, 1xf32) + uniform_0 = paddle._C_ops.uniform( + stack_0, + paddle.float32, + full_2, + full_3, + 0, + paddle.framework._current_expected_place(), + ) + del stack_0 + + # pd_op.add: (-1x1x1x1xf32) <- (xf32, -1x1x1x1xf32) + add_8 = paddle._C_ops.add(cast_0, uniform_0) + del uniform_0 + + # pd_op.floor: (-1x1x1x1xf32) <- (-1x1x1x1xf32) + floor_0 = paddle._C_ops.floor(add_8) + del add_8 + + # pd_op.divide: (4x128x16x80xf32) <- (4x128x16x80xf32, xf32) + divide_0 = paddle._C_ops.divide(add_7, cast_0) + + # pd_op.multiply: (4x128x16x80xf32) <- (4x128x16x80xf32, -1x1x1x1xf32) + multiply_0 = paddle._C_ops.multiply(divide_0, floor_0) + + # pd_op.add: (4x128x16x80xf32) <- (4x128x16x80xf32, 4x128x16x80xf32) + add_9 = paddle._C_ops.add(reshape_3, multiply_0) + + # pd_op.flatten: (4x128x1280xf32) <- (4x128x16x80xf32) + flatten_1 = paddle._C_ops.flatten(add_9, 2, 3) + + # pd_op.transpose: (4x1280x128xf32) <- (4x128x1280xf32) + transpose_2 = paddle._C_ops.transpose(flatten_1, [0, 2, 1]) + del flatten_1 + + # pd_op.layer_norm: (4x1280x128xf32, 4x1280xf32, 4x1280xf32) <- (4x1280x128xf32, 128xf32, 128xf32) + layer_norm_6, layer_norm_7, layer_norm_8 = (lambda x, f: f(x))( + paddle._C_ops.layer_norm( + transpose_2, parameter_195, parameter_194, float("1e-06"), 2 + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None, None), + ) + del parameter_194, parameter_195 + + # pd_op.matmul: (4x1280x512xf32) <- (4x1280x128xf32, 128x512xf32) + matmul_2 = paddle._C_ops.matmul(layer_norm_6, parameter_193, False, False) + del parameter_193 + + # pd_op.add: (4x1280x512xf32) <- (4x1280x512xf32, 512xf32) + add_10 = paddle._C_ops.add(matmul_2, parameter_192) + del parameter_192 + + # pd_op.gelu: (4x1280x512xf32) <- (4x1280x512xf32) + gelu_3 = paddle._C_ops.gelu(add_10, False) + + # pd_op.matmul: (4x1280x128xf32) <- (4x1280x512xf32, 512x128xf32) + matmul_3 = paddle._C_ops.matmul(gelu_3, parameter_191, False, False) + del parameter_191 + + # pd_op.add: (4x1280x128xf32) <- (4x1280x128xf32, 128xf32) + add_11 = paddle._C_ops.add(matmul_3, parameter_190) + del parameter_190 + + # pd_op.full: (xf64) <- () + full_4 = paddle._C_ops.full( + [], float("0"), paddle.float64, paddle.framework._current_expected_place() + ) + + # pd_op.assign_value_: (xf64) <- (xf64) + assign_value__1 = paddle._C_ops.assign_value_( + full_4, + [], + paddle.float64, + [float("0.994118")], + paddle.framework._current_expected_place(), + ) + del full_4 + + # pd_op.cast: (xf32) <- (xf64) + cast_1 = paddle._C_ops.cast(assign_value__1, paddle.float32) + del assign_value__1 + + # pd_op.shape64: (3xi64) <- (4x1280x128xf32) + shape64_1 = paddle._C_ops.shape64(add_11) + + # pd_op.slice: (xi64) <- (3xi64, 1xi64, 1xi64) + slice_1 = paddle._C_ops.slice( + shape64_1, [0], full_int_array_2, full_int_array_3, [1], [0] + ) + del shape64_1 + + # builtin.combine: ([xi64, xi64, xi64]) <- (xi64, xi64, xi64) + combine_1 = [slice_1, full_1, full_1] + del slice_1 + + # pd_op.stack: (3xi64) <- ([xi64, xi64, xi64]) + stack_1 = paddle._C_ops.stack(combine_1, 0) + del combine_1 + + # pd_op.uniform: (-1x1x1xf32) <- (3xi64, 1xf32, 1xf32) + uniform_1 = paddle._C_ops.uniform( + stack_1, + paddle.float32, + full_2, + full_3, + 0, + paddle.framework._current_expected_place(), + ) + del stack_1 + + # pd_op.add: (-1x1x1xf32) <- (xf32, -1x1x1xf32) + add_12 = paddle._C_ops.add(cast_1, uniform_1) + del uniform_1 + + # pd_op.floor: (-1x1x1xf32) <- (-1x1x1xf32) + floor_1 = paddle._C_ops.floor(add_12) + del add_12 + + # pd_op.divide: (4x1280x128xf32) <- (4x1280x128xf32, xf32) + divide_1 = paddle._C_ops.divide(add_11, cast_1) + + # pd_op.multiply: (4x1280x128xf32) <- (4x1280x128xf32, -1x1x1xf32) + multiply_1 = paddle._C_ops.multiply(divide_1, floor_1) + + # pd_op.add: (4x1280x128xf32) <- (4x1280x128xf32, 4x1280x128xf32) + add_13 = paddle._C_ops.add(layer_norm_6, multiply_1) + + # pd_op.layer_norm: (4x1280x128xf32, 4x1280xf32, 4x1280xf32) <- (4x1280x128xf32, 128xf32, 128xf32) + layer_norm_9, layer_norm_10, layer_norm_11 = (lambda x, f: f(x))( + paddle._C_ops.layer_norm( + add_13, parameter_189, parameter_188, float("1e-06"), 2 + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None, None), + ) + del parameter_188, parameter_189 + + # pd_op.transpose: (4x128x1280xf32) <- (4x1280x128xf32) + transpose_3 = paddle._C_ops.transpose(layer_norm_9, [0, 2, 1]) + del layer_norm_9 + + # pd_op.reshape: (4x128x16x80xf32) <- (4x128x1280xf32, 4xi64) + reshape_5 = paddle._C_ops.reshape(transpose_3, full_int_array_1) + + # pd_op.conv2d: (4x128x16x80xf32) <- (4x128x16x80xf32, 128x32x5x5xf32) + conv2d_4 = paddle._C_ops.conv2d( + reshape_5, parameter_187, [1, 1], [2, 2], "EXPLICIT", [1, 1], 4, "NCHW" + ) + del parameter_187 + + # pd_op.reshape: (1x128x1x1xf32) <- (128xf32, 4xi64) + reshape_6 = paddle._C_ops.reshape(parameter_186, full_int_array_0) + del parameter_186 + + # pd_op.add: (4x128x16x80xf32) <- (4x128x16x80xf32, 1x128x1x1xf32) + add_14 = paddle._C_ops.add(conv2d_4, reshape_6) + + # pd_op.full: (xf64) <- () + full_5 = paddle._C_ops.full( + [], float("0"), paddle.float64, paddle.framework._current_expected_place() + ) + + # pd_op.assign_value_: (xf64) <- (xf64) + assign_value__2 = paddle._C_ops.assign_value_( + full_5, + [], + paddle.float64, + [float("0.988235")], + paddle.framework._current_expected_place(), + ) + del full_5 + + # pd_op.cast: (xf32) <- (xf64) + cast_2 = paddle._C_ops.cast(assign_value__2, paddle.float32) + del assign_value__2 + + # pd_op.shape64: (4xi64) <- (4x128x16x80xf32) + shape64_2 = paddle._C_ops.shape64(add_14) + + # pd_op.slice: (xi64) <- (4xi64, 1xi64, 1xi64) + slice_2 = paddle._C_ops.slice( + shape64_2, [0], full_int_array_2, full_int_array_3, [1], [0] + ) + del shape64_2 + + # builtin.combine: ([xi64, xi64, xi64, xi64]) <- (xi64, xi64, xi64, xi64) + combine_2 = [slice_2, full_1, full_1, full_1] + del slice_2 + + # pd_op.stack: (4xi64) <- ([xi64, xi64, xi64, xi64]) + stack_2 = paddle._C_ops.stack(combine_2, 0) + del combine_2 + + # pd_op.uniform: (-1x1x1x1xf32) <- (4xi64, 1xf32, 1xf32) + uniform_2 = paddle._C_ops.uniform( + stack_2, + paddle.float32, + full_2, + full_3, + 0, + paddle.framework._current_expected_place(), + ) + del stack_2 + + # pd_op.add: (-1x1x1x1xf32) <- (xf32, -1x1x1x1xf32) + add_15 = paddle._C_ops.add(cast_2, uniform_2) + del uniform_2 + + # pd_op.floor: (-1x1x1x1xf32) <- (-1x1x1x1xf32) + floor_2 = paddle._C_ops.floor(add_15) + del add_15 + + # pd_op.divide: (4x128x16x80xf32) <- (4x128x16x80xf32, xf32) + divide_2 = paddle._C_ops.divide(add_14, cast_2) + + # pd_op.multiply: (4x128x16x80xf32) <- (4x128x16x80xf32, -1x1x1x1xf32) + multiply_2 = paddle._C_ops.multiply(divide_2, floor_2) + + # pd_op.add: (4x128x16x80xf32) <- (4x128x16x80xf32, 4x128x16x80xf32) + add_16 = paddle._C_ops.add(reshape_5, multiply_2) + + # pd_op.flatten: (4x128x1280xf32) <- (4x128x16x80xf32) + flatten_2 = paddle._C_ops.flatten(add_16, 2, 3) + + # pd_op.transpose: (4x1280x128xf32) <- (4x128x1280xf32) + transpose_4 = paddle._C_ops.transpose(flatten_2, [0, 2, 1]) + del flatten_2 + + # pd_op.layer_norm: (4x1280x128xf32, 4x1280xf32, 4x1280xf32) <- (4x1280x128xf32, 128xf32, 128xf32) + layer_norm_12, layer_norm_13, layer_norm_14 = (lambda x, f: f(x))( + paddle._C_ops.layer_norm( + transpose_4, parameter_185, parameter_184, float("1e-06"), 2 + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None, None), + ) + del parameter_184, parameter_185 + + # pd_op.matmul: (4x1280x512xf32) <- (4x1280x128xf32, 128x512xf32) + matmul_4 = paddle._C_ops.matmul(layer_norm_12, parameter_183, False, False) + del parameter_183 + + # pd_op.add: (4x1280x512xf32) <- (4x1280x512xf32, 512xf32) + add_17 = paddle._C_ops.add(matmul_4, parameter_182) + del parameter_182 + + # pd_op.gelu: (4x1280x512xf32) <- (4x1280x512xf32) + gelu_4 = paddle._C_ops.gelu(add_17, False) + + # pd_op.matmul: (4x1280x128xf32) <- (4x1280x512xf32, 512x128xf32) + matmul_5 = paddle._C_ops.matmul(gelu_4, parameter_181, False, False) + del parameter_181 + + # pd_op.add: (4x1280x128xf32) <- (4x1280x128xf32, 128xf32) + add_18 = paddle._C_ops.add(matmul_5, parameter_180) + del parameter_180 + + # pd_op.full: (xf64) <- () + full_6 = paddle._C_ops.full( + [], float("0"), paddle.float64, paddle.framework._current_expected_place() + ) + + # pd_op.assign_value_: (xf64) <- (xf64) + assign_value__3 = paddle._C_ops.assign_value_( + full_6, + [], + paddle.float64, + [float("0.988235")], + paddle.framework._current_expected_place(), + ) + del full_6 + + # pd_op.cast: (xf32) <- (xf64) + cast_3 = paddle._C_ops.cast(assign_value__3, paddle.float32) + del assign_value__3 + + # pd_op.shape64: (3xi64) <- (4x1280x128xf32) + shape64_3 = paddle._C_ops.shape64(add_18) + + # pd_op.slice: (xi64) <- (3xi64, 1xi64, 1xi64) + slice_3 = paddle._C_ops.slice( + shape64_3, [0], full_int_array_2, full_int_array_3, [1], [0] + ) + del shape64_3 + + # builtin.combine: ([xi64, xi64, xi64]) <- (xi64, xi64, xi64) + combine_3 = [slice_3, full_1, full_1] + del slice_3 + + # pd_op.stack: (3xi64) <- ([xi64, xi64, xi64]) + stack_3 = paddle._C_ops.stack(combine_3, 0) + del combine_3 + + # pd_op.uniform: (-1x1x1xf32) <- (3xi64, 1xf32, 1xf32) + uniform_3 = paddle._C_ops.uniform( + stack_3, + paddle.float32, + full_2, + full_3, + 0, + paddle.framework._current_expected_place(), + ) + del stack_3 + + # pd_op.add: (-1x1x1xf32) <- (xf32, -1x1x1xf32) + add_19 = paddle._C_ops.add(cast_3, uniform_3) + del uniform_3 + + # pd_op.floor: (-1x1x1xf32) <- (-1x1x1xf32) + floor_3 = paddle._C_ops.floor(add_19) + del add_19 + + # pd_op.divide: (4x1280x128xf32) <- (4x1280x128xf32, xf32) + divide_3 = paddle._C_ops.divide(add_18, cast_3) + + # pd_op.multiply: (4x1280x128xf32) <- (4x1280x128xf32, -1x1x1xf32) + multiply_3 = paddle._C_ops.multiply(divide_3, floor_3) + + # pd_op.add: (4x1280x128xf32) <- (4x1280x128xf32, 4x1280x128xf32) + add_20 = paddle._C_ops.add(layer_norm_12, multiply_3) + + # pd_op.layer_norm: (4x1280x128xf32, 4x1280xf32, 4x1280xf32) <- (4x1280x128xf32, 128xf32, 128xf32) + layer_norm_15, layer_norm_16, layer_norm_17 = (lambda x, f: f(x))( + paddle._C_ops.layer_norm( + add_20, parameter_179, parameter_178, float("1e-06"), 2 + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None, None), + ) + del parameter_178, parameter_179 + + # pd_op.transpose: (4x128x1280xf32) <- (4x1280x128xf32) + transpose_5 = paddle._C_ops.transpose(layer_norm_15, [0, 2, 1]) + del layer_norm_15 + + # pd_op.reshape: (4x128x16x80xf32) <- (4x128x1280xf32, 4xi64) + reshape_7 = paddle._C_ops.reshape(transpose_5, full_int_array_1) + + # pd_op.conv2d: (4x128x16x80xf32) <- (4x128x16x80xf32, 128x32x5x5xf32) + conv2d_5 = paddle._C_ops.conv2d( + reshape_7, parameter_177, [1, 1], [2, 2], "EXPLICIT", [1, 1], 4, "NCHW" + ) + del parameter_177 + + # pd_op.reshape: (1x128x1x1xf32) <- (128xf32, 4xi64) + reshape_8 = paddle._C_ops.reshape(parameter_176, full_int_array_0) + del parameter_176 + + # pd_op.add: (4x128x16x80xf32) <- (4x128x16x80xf32, 1x128x1x1xf32) + add_21 = paddle._C_ops.add(conv2d_5, reshape_8) + + # pd_op.full: (xf64) <- () + full_7 = paddle._C_ops.full( + [], float("0"), paddle.float64, paddle.framework._current_expected_place() + ) + + # pd_op.assign_value_: (xf64) <- (xf64) + assign_value__4 = paddle._C_ops.assign_value_( + full_7, + [], + paddle.float64, + [float("0.982353")], + paddle.framework._current_expected_place(), + ) + del full_7 + + # pd_op.cast: (xf32) <- (xf64) + cast_4 = paddle._C_ops.cast(assign_value__4, paddle.float32) + del assign_value__4 + + # pd_op.shape64: (4xi64) <- (4x128x16x80xf32) + shape64_4 = paddle._C_ops.shape64(add_21) + + # pd_op.slice: (xi64) <- (4xi64, 1xi64, 1xi64) + slice_4 = paddle._C_ops.slice( + shape64_4, [0], full_int_array_2, full_int_array_3, [1], [0] + ) + del shape64_4 + + # builtin.combine: ([xi64, xi64, xi64, xi64]) <- (xi64, xi64, xi64, xi64) + combine_4 = [slice_4, full_1, full_1, full_1] + del slice_4 + + # pd_op.stack: (4xi64) <- ([xi64, xi64, xi64, xi64]) + stack_4 = paddle._C_ops.stack(combine_4, 0) + del combine_4 + + # pd_op.uniform: (-1x1x1x1xf32) <- (4xi64, 1xf32, 1xf32) + uniform_4 = paddle._C_ops.uniform( + stack_4, + paddle.float32, + full_2, + full_3, + 0, + paddle.framework._current_expected_place(), + ) + del stack_4 + + # pd_op.add: (-1x1x1x1xf32) <- (xf32, -1x1x1x1xf32) + add_22 = paddle._C_ops.add(cast_4, uniform_4) + del uniform_4 + + # pd_op.floor: (-1x1x1x1xf32) <- (-1x1x1x1xf32) + floor_4 = paddle._C_ops.floor(add_22) + del add_22 + + # pd_op.divide: (4x128x16x80xf32) <- (4x128x16x80xf32, xf32) + divide_4 = paddle._C_ops.divide(add_21, cast_4) + + # pd_op.multiply: (4x128x16x80xf32) <- (4x128x16x80xf32, -1x1x1x1xf32) + multiply_4 = paddle._C_ops.multiply(divide_4, floor_4) + + # pd_op.add: (4x128x16x80xf32) <- (4x128x16x80xf32, 4x128x16x80xf32) + add_23 = paddle._C_ops.add(reshape_7, multiply_4) + + # pd_op.flatten: (4x128x1280xf32) <- (4x128x16x80xf32) + flatten_3 = paddle._C_ops.flatten(add_23, 2, 3) + + # pd_op.transpose: (4x1280x128xf32) <- (4x128x1280xf32) + transpose_6 = paddle._C_ops.transpose(flatten_3, [0, 2, 1]) + del flatten_3 + + # pd_op.layer_norm: (4x1280x128xf32, 4x1280xf32, 4x1280xf32) <- (4x1280x128xf32, 128xf32, 128xf32) + layer_norm_18, layer_norm_19, layer_norm_20 = (lambda x, f: f(x))( + paddle._C_ops.layer_norm( + transpose_6, parameter_175, parameter_174, float("1e-06"), 2 + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None, None), + ) + del parameter_174, parameter_175 + + # pd_op.matmul: (4x1280x512xf32) <- (4x1280x128xf32, 128x512xf32) + matmul_6 = paddle._C_ops.matmul(layer_norm_18, parameter_173, False, False) + del parameter_173 + + # pd_op.add: (4x1280x512xf32) <- (4x1280x512xf32, 512xf32) + add_24 = paddle._C_ops.add(matmul_6, parameter_172) + del parameter_172 + + # pd_op.gelu: (4x1280x512xf32) <- (4x1280x512xf32) + gelu_5 = paddle._C_ops.gelu(add_24, False) + + # pd_op.matmul: (4x1280x128xf32) <- (4x1280x512xf32, 512x128xf32) + matmul_7 = paddle._C_ops.matmul(gelu_5, parameter_171, False, False) + del parameter_171 + + # pd_op.add: (4x1280x128xf32) <- (4x1280x128xf32, 128xf32) + add_25 = paddle._C_ops.add(matmul_7, parameter_170) + del parameter_170 + + # pd_op.full: (xf64) <- () + full_8 = paddle._C_ops.full( + [], float("0"), paddle.float64, paddle.framework._current_expected_place() + ) + + # pd_op.assign_value_: (xf64) <- (xf64) + assign_value__5 = paddle._C_ops.assign_value_( + full_8, + [], + paddle.float64, + [float("0.982353")], + paddle.framework._current_expected_place(), + ) + del full_8 + + # pd_op.cast: (xf32) <- (xf64) + cast_5 = paddle._C_ops.cast(assign_value__5, paddle.float32) + del assign_value__5 + + # pd_op.shape64: (3xi64) <- (4x1280x128xf32) + shape64_5 = paddle._C_ops.shape64(add_25) + + # pd_op.slice: (xi64) <- (3xi64, 1xi64, 1xi64) + slice_5 = paddle._C_ops.slice( + shape64_5, [0], full_int_array_2, full_int_array_3, [1], [0] + ) + del shape64_5 + + # builtin.combine: ([xi64, xi64, xi64]) <- (xi64, xi64, xi64) + combine_5 = [slice_5, full_1, full_1] + del slice_5 + + # pd_op.stack: (3xi64) <- ([xi64, xi64, xi64]) + stack_5 = paddle._C_ops.stack(combine_5, 0) + del combine_5 + + # pd_op.uniform: (-1x1x1xf32) <- (3xi64, 1xf32, 1xf32) + uniform_5 = paddle._C_ops.uniform( + stack_5, + paddle.float32, + full_2, + full_3, + 0, + paddle.framework._current_expected_place(), + ) + del stack_5 + + # pd_op.add: (-1x1x1xf32) <- (xf32, -1x1x1xf32) + add_26 = paddle._C_ops.add(cast_5, uniform_5) + del uniform_5 + + # pd_op.floor: (-1x1x1xf32) <- (-1x1x1xf32) + floor_5 = paddle._C_ops.floor(add_26) + del add_26 + + # pd_op.divide: (4x1280x128xf32) <- (4x1280x128xf32, xf32) + divide_5 = paddle._C_ops.divide(add_25, cast_5) + + # pd_op.multiply: (4x1280x128xf32) <- (4x1280x128xf32, -1x1x1xf32) + multiply_5 = paddle._C_ops.multiply(divide_5, floor_5) + + # pd_op.add: (4x1280x128xf32) <- (4x1280x128xf32, 4x1280x128xf32) + add_27 = paddle._C_ops.add(layer_norm_18, multiply_5) + + # pd_op.layer_norm: (4x1280x128xf32, 4x1280xf32, 4x1280xf32) <- (4x1280x128xf32, 128xf32, 128xf32) + layer_norm_21, layer_norm_22, layer_norm_23 = (lambda x, f: f(x))( + paddle._C_ops.layer_norm( + add_27, parameter_169, parameter_168, float("1e-06"), 2 + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None, None), + ) + del parameter_168, parameter_169 + + # pd_op.transpose: (4x128x1280xf32) <- (4x1280x128xf32) + transpose_7 = paddle._C_ops.transpose(layer_norm_21, [0, 2, 1]) + del layer_norm_21 + + # pd_op.reshape: (4x128x16x80xf32) <- (4x128x1280xf32, 4xi64) + reshape_9 = paddle._C_ops.reshape(transpose_7, full_int_array_1) + + # pd_op.conv2d: (4x128x16x80xf32) <- (4x128x16x80xf32, 128x32x5x5xf32) + conv2d_6 = paddle._C_ops.conv2d( + reshape_9, parameter_167, [1, 1], [2, 2], "EXPLICIT", [1, 1], 4, "NCHW" + ) + del parameter_167 + + # pd_op.reshape: (1x128x1x1xf32) <- (128xf32, 4xi64) + reshape_10 = paddle._C_ops.reshape(parameter_166, full_int_array_0) + del parameter_166 + + # pd_op.add: (4x128x16x80xf32) <- (4x128x16x80xf32, 1x128x1x1xf32) + add_28 = paddle._C_ops.add(conv2d_6, reshape_10) + + # pd_op.full: (xf64) <- () + full_9 = paddle._C_ops.full( + [], float("0"), paddle.float64, paddle.framework._current_expected_place() + ) + + # pd_op.assign_value_: (xf64) <- (xf64) + assign_value__6 = paddle._C_ops.assign_value_( + full_9, + [], + paddle.float64, + [float("0.976471")], + paddle.framework._current_expected_place(), + ) + del full_9 + + # pd_op.cast: (xf32) <- (xf64) + cast_6 = paddle._C_ops.cast(assign_value__6, paddle.float32) + del assign_value__6 + + # pd_op.shape64: (4xi64) <- (4x128x16x80xf32) + shape64_6 = paddle._C_ops.shape64(add_28) + + # pd_op.slice: (xi64) <- (4xi64, 1xi64, 1xi64) + slice_6 = paddle._C_ops.slice( + shape64_6, [0], full_int_array_2, full_int_array_3, [1], [0] + ) + del shape64_6 + + # builtin.combine: ([xi64, xi64, xi64, xi64]) <- (xi64, xi64, xi64, xi64) + combine_6 = [slice_6, full_1, full_1, full_1] + del slice_6 + + # pd_op.stack: (4xi64) <- ([xi64, xi64, xi64, xi64]) + stack_6 = paddle._C_ops.stack(combine_6, 0) + del combine_6 + + # pd_op.uniform: (-1x1x1x1xf32) <- (4xi64, 1xf32, 1xf32) + uniform_6 = paddle._C_ops.uniform( + stack_6, + paddle.float32, + full_2, + full_3, + 0, + paddle.framework._current_expected_place(), + ) + del stack_6 + + # pd_op.add: (-1x1x1x1xf32) <- (xf32, -1x1x1x1xf32) + add_29 = paddle._C_ops.add(cast_6, uniform_6) + del uniform_6 + + # pd_op.floor: (-1x1x1x1xf32) <- (-1x1x1x1xf32) + floor_6 = paddle._C_ops.floor(add_29) + del add_29 + + # pd_op.divide: (4x128x16x80xf32) <- (4x128x16x80xf32, xf32) + divide_6 = paddle._C_ops.divide(add_28, cast_6) + + # pd_op.multiply: (4x128x16x80xf32) <- (4x128x16x80xf32, -1x1x1x1xf32) + multiply_6 = paddle._C_ops.multiply(divide_6, floor_6) + + # pd_op.add: (4x128x16x80xf32) <- (4x128x16x80xf32, 4x128x16x80xf32) + add_30 = paddle._C_ops.add(reshape_9, multiply_6) + + # pd_op.flatten: (4x128x1280xf32) <- (4x128x16x80xf32) + flatten_4 = paddle._C_ops.flatten(add_30, 2, 3) + + # pd_op.transpose: (4x1280x128xf32) <- (4x128x1280xf32) + transpose_8 = paddle._C_ops.transpose(flatten_4, [0, 2, 1]) + del flatten_4 + + # pd_op.layer_norm: (4x1280x128xf32, 4x1280xf32, 4x1280xf32) <- (4x1280x128xf32, 128xf32, 128xf32) + layer_norm_24, layer_norm_25, layer_norm_26 = (lambda x, f: f(x))( + paddle._C_ops.layer_norm( + transpose_8, parameter_165, parameter_164, float("1e-06"), 2 + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None, None), + ) + del parameter_164, parameter_165 + + # pd_op.matmul: (4x1280x512xf32) <- (4x1280x128xf32, 128x512xf32) + matmul_8 = paddle._C_ops.matmul(layer_norm_24, parameter_163, False, False) + del parameter_163 + + # pd_op.add: (4x1280x512xf32) <- (4x1280x512xf32, 512xf32) + add_31 = paddle._C_ops.add(matmul_8, parameter_162) + del parameter_162 + + # pd_op.gelu: (4x1280x512xf32) <- (4x1280x512xf32) + gelu_6 = paddle._C_ops.gelu(add_31, False) + + # pd_op.matmul: (4x1280x128xf32) <- (4x1280x512xf32, 512x128xf32) + matmul_9 = paddle._C_ops.matmul(gelu_6, parameter_161, False, False) + del parameter_161 + + # pd_op.add: (4x1280x128xf32) <- (4x1280x128xf32, 128xf32) + add_32 = paddle._C_ops.add(matmul_9, parameter_160) + del parameter_160 + + # pd_op.full: (xf64) <- () + full_10 = paddle._C_ops.full( + [], float("0"), paddle.float64, paddle.framework._current_expected_place() + ) + + # pd_op.assign_value_: (xf64) <- (xf64) + assign_value__7 = paddle._C_ops.assign_value_( + full_10, + [], + paddle.float64, + [float("0.976471")], + paddle.framework._current_expected_place(), + ) + del full_10 + + # pd_op.cast: (xf32) <- (xf64) + cast_7 = paddle._C_ops.cast(assign_value__7, paddle.float32) + del assign_value__7 + + # pd_op.shape64: (3xi64) <- (4x1280x128xf32) + shape64_7 = paddle._C_ops.shape64(add_32) + + # pd_op.slice: (xi64) <- (3xi64, 1xi64, 1xi64) + slice_7 = paddle._C_ops.slice( + shape64_7, [0], full_int_array_2, full_int_array_3, [1], [0] + ) + del shape64_7 + + # builtin.combine: ([xi64, xi64, xi64]) <- (xi64, xi64, xi64) + combine_7 = [slice_7, full_1, full_1] + del slice_7 + + # pd_op.stack: (3xi64) <- ([xi64, xi64, xi64]) + stack_7 = paddle._C_ops.stack(combine_7, 0) + del combine_7 + + # pd_op.uniform: (-1x1x1xf32) <- (3xi64, 1xf32, 1xf32) + uniform_7 = paddle._C_ops.uniform( + stack_7, + paddle.float32, + full_2, + full_3, + 0, + paddle.framework._current_expected_place(), + ) + del stack_7 + + # pd_op.add: (-1x1x1xf32) <- (xf32, -1x1x1xf32) + add_33 = paddle._C_ops.add(cast_7, uniform_7) + del uniform_7 + + # pd_op.floor: (-1x1x1xf32) <- (-1x1x1xf32) + floor_7 = paddle._C_ops.floor(add_33) + del add_33 + + # pd_op.divide: (4x1280x128xf32) <- (4x1280x128xf32, xf32) + divide_7 = paddle._C_ops.divide(add_32, cast_7) + + # pd_op.multiply: (4x1280x128xf32) <- (4x1280x128xf32, -1x1x1xf32) + multiply_7 = paddle._C_ops.multiply(divide_7, floor_7) + + # pd_op.add: (4x1280x128xf32) <- (4x1280x128xf32, 4x1280x128xf32) + add_34 = paddle._C_ops.add(layer_norm_24, multiply_7) + + # pd_op.layer_norm: (4x1280x128xf32, 4x1280xf32, 4x1280xf32) <- (4x1280x128xf32, 128xf32, 128xf32) + layer_norm_27, layer_norm_28, layer_norm_29 = (lambda x, f: f(x))( + paddle._C_ops.layer_norm( + add_34, parameter_159, parameter_158, float("1e-06"), 2 + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None, None), + ) + del parameter_158, parameter_159 + + # pd_op.transpose: (4x128x1280xf32) <- (4x1280x128xf32) + transpose_9 = paddle._C_ops.transpose(layer_norm_27, [0, 2, 1]) + del layer_norm_27 + + # pd_op.reshape: (4x128x16x80xf32) <- (4x128x1280xf32, 4xi64) + reshape_11 = paddle._C_ops.reshape(transpose_9, full_int_array_1) + + # pd_op.conv2d: (4x128x16x80xf32) <- (4x128x16x80xf32, 128x32x5x5xf32) + conv2d_7 = paddle._C_ops.conv2d( + reshape_11, parameter_157, [1, 1], [2, 2], "EXPLICIT", [1, 1], 4, "NCHW" + ) + del parameter_157 + + # pd_op.reshape: (1x128x1x1xf32) <- (128xf32, 4xi64) + reshape_12 = paddle._C_ops.reshape(parameter_156, full_int_array_0) + del parameter_156 + + # pd_op.add: (4x128x16x80xf32) <- (4x128x16x80xf32, 1x128x1x1xf32) + add_35 = paddle._C_ops.add(conv2d_7, reshape_12) + + # pd_op.full: (xf64) <- () + full_11 = paddle._C_ops.full( + [], float("0"), paddle.float64, paddle.framework._current_expected_place() + ) + + # pd_op.assign_value_: (xf64) <- (xf64) + assign_value__8 = paddle._C_ops.assign_value_( + full_11, + [], + paddle.float64, + [float("0.970588")], + paddle.framework._current_expected_place(), + ) + del full_11 + + # pd_op.cast: (xf32) <- (xf64) + cast_8 = paddle._C_ops.cast(assign_value__8, paddle.float32) + del assign_value__8 + + # pd_op.shape64: (4xi64) <- (4x128x16x80xf32) + shape64_8 = paddle._C_ops.shape64(add_35) + + # pd_op.slice: (xi64) <- (4xi64, 1xi64, 1xi64) + slice_8 = paddle._C_ops.slice( + shape64_8, [0], full_int_array_2, full_int_array_3, [1], [0] + ) + del shape64_8 + + # builtin.combine: ([xi64, xi64, xi64, xi64]) <- (xi64, xi64, xi64, xi64) + combine_8 = [slice_8, full_1, full_1, full_1] + del slice_8 + + # pd_op.stack: (4xi64) <- ([xi64, xi64, xi64, xi64]) + stack_8 = paddle._C_ops.stack(combine_8, 0) + del combine_8 + + # pd_op.uniform: (-1x1x1x1xf32) <- (4xi64, 1xf32, 1xf32) + uniform_8 = paddle._C_ops.uniform( + stack_8, + paddle.float32, + full_2, + full_3, + 0, + paddle.framework._current_expected_place(), + ) + del stack_8 + + # pd_op.add: (-1x1x1x1xf32) <- (xf32, -1x1x1x1xf32) + add_36 = paddle._C_ops.add(cast_8, uniform_8) + del uniform_8 + + # pd_op.floor: (-1x1x1x1xf32) <- (-1x1x1x1xf32) + floor_8 = paddle._C_ops.floor(add_36) + del add_36 + + # pd_op.divide: (4x128x16x80xf32) <- (4x128x16x80xf32, xf32) + divide_8 = paddle._C_ops.divide(add_35, cast_8) + + # pd_op.multiply: (4x128x16x80xf32) <- (4x128x16x80xf32, -1x1x1x1xf32) + multiply_8 = paddle._C_ops.multiply(divide_8, floor_8) + + # pd_op.add: (4x128x16x80xf32) <- (4x128x16x80xf32, 4x128x16x80xf32) + add_37 = paddle._C_ops.add(reshape_11, multiply_8) + + # pd_op.flatten: (4x128x1280xf32) <- (4x128x16x80xf32) + flatten_5 = paddle._C_ops.flatten(add_37, 2, 3) + + # pd_op.transpose: (4x1280x128xf32) <- (4x128x1280xf32) + transpose_10 = paddle._C_ops.transpose(flatten_5, [0, 2, 1]) + del flatten_5 + + # pd_op.layer_norm: (4x1280x128xf32, 4x1280xf32, 4x1280xf32) <- (4x1280x128xf32, 128xf32, 128xf32) + layer_norm_30, layer_norm_31, layer_norm_32 = (lambda x, f: f(x))( + paddle._C_ops.layer_norm( + transpose_10, parameter_155, parameter_154, float("1e-06"), 2 + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None, None), + ) + del parameter_154, parameter_155 + + # pd_op.matmul: (4x1280x512xf32) <- (4x1280x128xf32, 128x512xf32) + matmul_10 = paddle._C_ops.matmul(layer_norm_30, parameter_153, False, False) + del parameter_153 + + # pd_op.add: (4x1280x512xf32) <- (4x1280x512xf32, 512xf32) + add_38 = paddle._C_ops.add(matmul_10, parameter_152) + del parameter_152 + + # pd_op.gelu: (4x1280x512xf32) <- (4x1280x512xf32) + gelu_7 = paddle._C_ops.gelu(add_38, False) + + # pd_op.matmul: (4x1280x128xf32) <- (4x1280x512xf32, 512x128xf32) + matmul_11 = paddle._C_ops.matmul(gelu_7, parameter_151, False, False) + del parameter_151 + + # pd_op.add: (4x1280x128xf32) <- (4x1280x128xf32, 128xf32) + add_39 = paddle._C_ops.add(matmul_11, parameter_150) + del parameter_150 + + # pd_op.full: (xf64) <- () + full_12 = paddle._C_ops.full( + [], float("0"), paddle.float64, paddle.framework._current_expected_place() + ) + + # pd_op.assign_value_: (xf64) <- (xf64) + assign_value__9 = paddle._C_ops.assign_value_( + full_12, + [], + paddle.float64, + [float("0.970588")], + paddle.framework._current_expected_place(), + ) + del full_12 + + # pd_op.cast: (xf32) <- (xf64) + cast_9 = paddle._C_ops.cast(assign_value__9, paddle.float32) + del assign_value__9 + + # pd_op.shape64: (3xi64) <- (4x1280x128xf32) + shape64_9 = paddle._C_ops.shape64(add_39) + + # pd_op.slice: (xi64) <- (3xi64, 1xi64, 1xi64) + slice_9 = paddle._C_ops.slice( + shape64_9, [0], full_int_array_2, full_int_array_3, [1], [0] + ) + del shape64_9 + + # builtin.combine: ([xi64, xi64, xi64]) <- (xi64, xi64, xi64) + combine_9 = [slice_9, full_1, full_1] + del slice_9 + + # pd_op.stack: (3xi64) <- ([xi64, xi64, xi64]) + stack_9 = paddle._C_ops.stack(combine_9, 0) + del combine_9 + + # pd_op.uniform: (-1x1x1xf32) <- (3xi64, 1xf32, 1xf32) + uniform_9 = paddle._C_ops.uniform( + stack_9, + paddle.float32, + full_2, + full_3, + 0, + paddle.framework._current_expected_place(), + ) + del stack_9 + + # pd_op.add: (-1x1x1xf32) <- (xf32, -1x1x1xf32) + add_40 = paddle._C_ops.add(cast_9, uniform_9) + del uniform_9 + + # pd_op.floor: (-1x1x1xf32) <- (-1x1x1xf32) + floor_9 = paddle._C_ops.floor(add_40) + del add_40 + + # pd_op.divide: (4x1280x128xf32) <- (4x1280x128xf32, xf32) + divide_9 = paddle._C_ops.divide(add_39, cast_9) + + # pd_op.multiply: (4x1280x128xf32) <- (4x1280x128xf32, -1x1x1xf32) + multiply_9 = paddle._C_ops.multiply(divide_9, floor_9) + + # pd_op.add: (4x1280x128xf32) <- (4x1280x128xf32, 4x1280x128xf32) + add_41 = paddle._C_ops.add(layer_norm_30, multiply_9) + + # pd_op.layer_norm: (4x1280x128xf32, 4x1280xf32, 4x1280xf32) <- (4x1280x128xf32, 128xf32, 128xf32) + layer_norm_33, layer_norm_34, layer_norm_35 = (lambda x, f: f(x))( + paddle._C_ops.layer_norm( + add_41, parameter_149, parameter_148, float("1e-06"), 2 + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None, None), + ) + del parameter_148, parameter_149 + + # pd_op.transpose: (4x128x1280xf32) <- (4x1280x128xf32) + transpose_11 = paddle._C_ops.transpose(layer_norm_33, [0, 2, 1]) + del layer_norm_33 + + # pd_op.reshape: (4x128x16x80xf32) <- (4x128x1280xf32, 4xi64) + reshape_13 = paddle._C_ops.reshape(transpose_11, full_int_array_1) + del full_int_array_1 + + # pd_op.conv2d: (4x256x8x80xf32) <- (4x128x16x80xf32, 256x128x3x3xf32) + conv2d_8 = paddle._C_ops.conv2d( + reshape_13, parameter_147, [2, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_147 + + # pd_op.reshape: (1x256x1x1xf32) <- (256xf32, 4xi64) + reshape_14 = paddle._C_ops.reshape(parameter_146, full_int_array_0) + del parameter_146 + + # pd_op.add: (4x256x8x80xf32) <- (4x256x8x80xf32, 1x256x1x1xf32) + add_42 = paddle._C_ops.add(conv2d_8, reshape_14) + + # pd_op.flatten: (4x256x640xf32) <- (4x256x8x80xf32) + flatten_6 = paddle._C_ops.flatten(add_42, 2, 3) + + # pd_op.transpose: (4x640x256xf32) <- (4x256x640xf32) + transpose_12 = paddle._C_ops.transpose(flatten_6, [0, 2, 1]) + del flatten_6 + + # pd_op.layer_norm: (4x640x256xf32, 4x640xf32, 4x640xf32) <- (4x640x256xf32, 256xf32, 256xf32) + layer_norm_36, layer_norm_37, layer_norm_38 = (lambda x, f: f(x))( + paddle._C_ops.layer_norm( + transpose_12, parameter_145, parameter_144, float("1e-05"), 2 + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None, None), + ) + del parameter_144, parameter_145 + + # pd_op.transpose: (4x256x640xf32) <- (4x640x256xf32) + transpose_13 = paddle._C_ops.transpose(layer_norm_36, [0, 2, 1]) + del layer_norm_36 + + # pd_op.full_int_array: (4xi64) <- () + full_int_array_4 = [0, 256, 8, 80] + + # pd_op.reshape: (4x256x8x80xf32) <- (4x256x640xf32, 4xi64) + reshape_15 = paddle._C_ops.reshape(transpose_13, full_int_array_4) + + # pd_op.conv2d: (4x256x8x80xf32) <- (4x256x8x80xf32, 256x32x5x5xf32) + conv2d_9 = paddle._C_ops.conv2d( + reshape_15, parameter_143, [1, 1], [2, 2], "EXPLICIT", [1, 1], 8, "NCHW" + ) + del parameter_143 + + # pd_op.reshape: (1x256x1x1xf32) <- (256xf32, 4xi64) + reshape_16 = paddle._C_ops.reshape(parameter_142, full_int_array_0) + del parameter_142 + + # pd_op.add: (4x256x8x80xf32) <- (4x256x8x80xf32, 1x256x1x1xf32) + add_43 = paddle._C_ops.add(conv2d_9, reshape_16) + + # pd_op.full: (xf64) <- () + full_13 = paddle._C_ops.full( + [], float("0"), paddle.float64, paddle.framework._current_expected_place() + ) + + # pd_op.assign_value_: (xf64) <- (xf64) + assign_value__10 = paddle._C_ops.assign_value_( + full_13, + [], + paddle.float64, + [float("0.964706")], + paddle.framework._current_expected_place(), + ) + del full_13 + + # pd_op.cast: (xf32) <- (xf64) + cast_10 = paddle._C_ops.cast(assign_value__10, paddle.float32) + del assign_value__10 + + # pd_op.shape64: (4xi64) <- (4x256x8x80xf32) + shape64_10 = paddle._C_ops.shape64(add_43) + + # pd_op.slice: (xi64) <- (4xi64, 1xi64, 1xi64) + slice_10 = paddle._C_ops.slice( + shape64_10, [0], full_int_array_2, full_int_array_3, [1], [0] + ) + del shape64_10 + + # builtin.combine: ([xi64, xi64, xi64, xi64]) <- (xi64, xi64, xi64, xi64) + combine_10 = [slice_10, full_1, full_1, full_1] + del slice_10 + + # pd_op.stack: (4xi64) <- ([xi64, xi64, xi64, xi64]) + stack_10 = paddle._C_ops.stack(combine_10, 0) + del combine_10 + + # pd_op.uniform: (-1x1x1x1xf32) <- (4xi64, 1xf32, 1xf32) + uniform_10 = paddle._C_ops.uniform( + stack_10, + paddle.float32, + full_2, + full_3, + 0, + paddle.framework._current_expected_place(), + ) + del stack_10 + + # pd_op.add: (-1x1x1x1xf32) <- (xf32, -1x1x1x1xf32) + add_44 = paddle._C_ops.add(cast_10, uniform_10) + del uniform_10 + + # pd_op.floor: (-1x1x1x1xf32) <- (-1x1x1x1xf32) + floor_10 = paddle._C_ops.floor(add_44) + del add_44 + + # pd_op.divide: (4x256x8x80xf32) <- (4x256x8x80xf32, xf32) + divide_10 = paddle._C_ops.divide(add_43, cast_10) + + # pd_op.multiply: (4x256x8x80xf32) <- (4x256x8x80xf32, -1x1x1x1xf32) + multiply_10 = paddle._C_ops.multiply(divide_10, floor_10) + + # pd_op.add: (4x256x8x80xf32) <- (4x256x8x80xf32, 4x256x8x80xf32) + add_45 = paddle._C_ops.add(reshape_15, multiply_10) + + # pd_op.flatten: (4x256x640xf32) <- (4x256x8x80xf32) + flatten_7 = paddle._C_ops.flatten(add_45, 2, 3) + + # pd_op.transpose: (4x640x256xf32) <- (4x256x640xf32) + transpose_14 = paddle._C_ops.transpose(flatten_7, [0, 2, 1]) + del flatten_7 + + # pd_op.layer_norm: (4x640x256xf32, 4x640xf32, 4x640xf32) <- (4x640x256xf32, 256xf32, 256xf32) + layer_norm_39, layer_norm_40, layer_norm_41 = (lambda x, f: f(x))( + paddle._C_ops.layer_norm( + transpose_14, parameter_141, parameter_140, float("1e-06"), 2 + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None, None), + ) + del parameter_140, parameter_141 + + # pd_op.matmul: (4x640x1024xf32) <- (4x640x256xf32, 256x1024xf32) + matmul_12 = paddle._C_ops.matmul(layer_norm_39, parameter_139, False, False) + del parameter_139 + + # pd_op.add: (4x640x1024xf32) <- (4x640x1024xf32, 1024xf32) + add_46 = paddle._C_ops.add(matmul_12, parameter_138) + del parameter_138 + + # pd_op.gelu: (4x640x1024xf32) <- (4x640x1024xf32) + gelu_8 = paddle._C_ops.gelu(add_46, False) + + # pd_op.matmul: (4x640x256xf32) <- (4x640x1024xf32, 1024x256xf32) + matmul_13 = paddle._C_ops.matmul(gelu_8, parameter_137, False, False) + del parameter_137 + + # pd_op.add: (4x640x256xf32) <- (4x640x256xf32, 256xf32) + add_47 = paddle._C_ops.add(matmul_13, parameter_136) + del parameter_136 + + # pd_op.full: (xf64) <- () + full_14 = paddle._C_ops.full( + [], float("0"), paddle.float64, paddle.framework._current_expected_place() + ) + + # pd_op.assign_value_: (xf64) <- (xf64) + assign_value__11 = paddle._C_ops.assign_value_( + full_14, + [], + paddle.float64, + [float("0.964706")], + paddle.framework._current_expected_place(), + ) + del full_14 + + # pd_op.cast: (xf32) <- (xf64) + cast_11 = paddle._C_ops.cast(assign_value__11, paddle.float32) + del assign_value__11 + + # pd_op.shape64: (3xi64) <- (4x640x256xf32) + shape64_11 = paddle._C_ops.shape64(add_47) + + # pd_op.slice: (xi64) <- (3xi64, 1xi64, 1xi64) + slice_11 = paddle._C_ops.slice( + shape64_11, [0], full_int_array_2, full_int_array_3, [1], [0] + ) + del shape64_11 + + # builtin.combine: ([xi64, xi64, xi64]) <- (xi64, xi64, xi64) + combine_11 = [slice_11, full_1, full_1] + del slice_11 + + # pd_op.stack: (3xi64) <- ([xi64, xi64, xi64]) + stack_11 = paddle._C_ops.stack(combine_11, 0) + del combine_11 + + # pd_op.uniform: (-1x1x1xf32) <- (3xi64, 1xf32, 1xf32) + uniform_11 = paddle._C_ops.uniform( + stack_11, + paddle.float32, + full_2, + full_3, + 0, + paddle.framework._current_expected_place(), + ) + del stack_11 + + # pd_op.add: (-1x1x1xf32) <- (xf32, -1x1x1xf32) + add_48 = paddle._C_ops.add(cast_11, uniform_11) + del uniform_11 + + # pd_op.floor: (-1x1x1xf32) <- (-1x1x1xf32) + floor_11 = paddle._C_ops.floor(add_48) + del add_48 + + # pd_op.divide: (4x640x256xf32) <- (4x640x256xf32, xf32) + divide_11 = paddle._C_ops.divide(add_47, cast_11) + + # pd_op.multiply: (4x640x256xf32) <- (4x640x256xf32, -1x1x1xf32) + multiply_11 = paddle._C_ops.multiply(divide_11, floor_11) + + # pd_op.add: (4x640x256xf32) <- (4x640x256xf32, 4x640x256xf32) + add_49 = paddle._C_ops.add(layer_norm_39, multiply_11) + + # pd_op.layer_norm: (4x640x256xf32, 4x640xf32, 4x640xf32) <- (4x640x256xf32, 256xf32, 256xf32) + layer_norm_42, layer_norm_43, layer_norm_44 = (lambda x, f: f(x))( + paddle._C_ops.layer_norm( + add_49, parameter_135, parameter_134, float("1e-06"), 2 + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None, None), + ) + del parameter_134, parameter_135 + + # pd_op.transpose: (4x256x640xf32) <- (4x640x256xf32) + transpose_15 = paddle._C_ops.transpose(layer_norm_42, [0, 2, 1]) + del layer_norm_42 + + # pd_op.reshape: (4x256x8x80xf32) <- (4x256x640xf32, 4xi64) + reshape_17 = paddle._C_ops.reshape(transpose_15, full_int_array_4) + + # pd_op.conv2d: (4x256x8x80xf32) <- (4x256x8x80xf32, 256x32x5x5xf32) + conv2d_10 = paddle._C_ops.conv2d( + reshape_17, parameter_133, [1, 1], [2, 2], "EXPLICIT", [1, 1], 8, "NCHW" + ) + del parameter_133 + + # pd_op.reshape: (1x256x1x1xf32) <- (256xf32, 4xi64) + reshape_18 = paddle._C_ops.reshape(parameter_132, full_int_array_0) + del parameter_132 + + # pd_op.add: (4x256x8x80xf32) <- (4x256x8x80xf32, 1x256x1x1xf32) + add_50 = paddle._C_ops.add(conv2d_10, reshape_18) + + # pd_op.full: (xf64) <- () + full_15 = paddle._C_ops.full( + [], float("0"), paddle.float64, paddle.framework._current_expected_place() + ) + + # pd_op.assign_value_: (xf64) <- (xf64) + assign_value__12 = paddle._C_ops.assign_value_( + full_15, + [], + paddle.float64, + [float("0.958824")], + paddle.framework._current_expected_place(), + ) + del full_15 + + # pd_op.cast: (xf32) <- (xf64) + cast_12 = paddle._C_ops.cast(assign_value__12, paddle.float32) + del assign_value__12 + + # pd_op.shape64: (4xi64) <- (4x256x8x80xf32) + shape64_12 = paddle._C_ops.shape64(add_50) + + # pd_op.slice: (xi64) <- (4xi64, 1xi64, 1xi64) + slice_12 = paddle._C_ops.slice( + shape64_12, [0], full_int_array_2, full_int_array_3, [1], [0] + ) + del shape64_12 + + # builtin.combine: ([xi64, xi64, xi64, xi64]) <- (xi64, xi64, xi64, xi64) + combine_12 = [slice_12, full_1, full_1, full_1] + del slice_12 + + # pd_op.stack: (4xi64) <- ([xi64, xi64, xi64, xi64]) + stack_12 = paddle._C_ops.stack(combine_12, 0) + del combine_12 + + # pd_op.uniform: (-1x1x1x1xf32) <- (4xi64, 1xf32, 1xf32) + uniform_12 = paddle._C_ops.uniform( + stack_12, + paddle.float32, + full_2, + full_3, + 0, + paddle.framework._current_expected_place(), + ) + del stack_12 + + # pd_op.add: (-1x1x1x1xf32) <- (xf32, -1x1x1x1xf32) + add_51 = paddle._C_ops.add(cast_12, uniform_12) + del uniform_12 + + # pd_op.floor: (-1x1x1x1xf32) <- (-1x1x1x1xf32) + floor_12 = paddle._C_ops.floor(add_51) + del add_51 + + # pd_op.divide: (4x256x8x80xf32) <- (4x256x8x80xf32, xf32) + divide_12 = paddle._C_ops.divide(add_50, cast_12) + + # pd_op.multiply: (4x256x8x80xf32) <- (4x256x8x80xf32, -1x1x1x1xf32) + multiply_12 = paddle._C_ops.multiply(divide_12, floor_12) + + # pd_op.add: (4x256x8x80xf32) <- (4x256x8x80xf32, 4x256x8x80xf32) + add_52 = paddle._C_ops.add(reshape_17, multiply_12) + + # pd_op.flatten: (4x256x640xf32) <- (4x256x8x80xf32) + flatten_8 = paddle._C_ops.flatten(add_52, 2, 3) + + # pd_op.transpose: (4x640x256xf32) <- (4x256x640xf32) + transpose_16 = paddle._C_ops.transpose(flatten_8, [0, 2, 1]) + del flatten_8 + + # pd_op.layer_norm: (4x640x256xf32, 4x640xf32, 4x640xf32) <- (4x640x256xf32, 256xf32, 256xf32) + layer_norm_45, layer_norm_46, layer_norm_47 = (lambda x, f: f(x))( + paddle._C_ops.layer_norm( + transpose_16, parameter_131, parameter_130, float("1e-06"), 2 + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None, None), + ) + del parameter_130, parameter_131 + + # pd_op.matmul: (4x640x1024xf32) <- (4x640x256xf32, 256x1024xf32) + matmul_14 = paddle._C_ops.matmul(layer_norm_45, parameter_129, False, False) + del parameter_129 + + # pd_op.add: (4x640x1024xf32) <- (4x640x1024xf32, 1024xf32) + add_53 = paddle._C_ops.add(matmul_14, parameter_128) + del parameter_128 + + # pd_op.gelu: (4x640x1024xf32) <- (4x640x1024xf32) + gelu_9 = paddle._C_ops.gelu(add_53, False) + + # pd_op.matmul: (4x640x256xf32) <- (4x640x1024xf32, 1024x256xf32) + matmul_15 = paddle._C_ops.matmul(gelu_9, parameter_127, False, False) + del parameter_127 + + # pd_op.add: (4x640x256xf32) <- (4x640x256xf32, 256xf32) + add_54 = paddle._C_ops.add(matmul_15, parameter_126) + del parameter_126 + + # pd_op.full: (xf64) <- () + full_16 = paddle._C_ops.full( + [], float("0"), paddle.float64, paddle.framework._current_expected_place() + ) + + # pd_op.assign_value_: (xf64) <- (xf64) + assign_value__13 = paddle._C_ops.assign_value_( + full_16, + [], + paddle.float64, + [float("0.958824")], + paddle.framework._current_expected_place(), + ) + del full_16 + + # pd_op.cast: (xf32) <- (xf64) + cast_13 = paddle._C_ops.cast(assign_value__13, paddle.float32) + del assign_value__13 + + # pd_op.shape64: (3xi64) <- (4x640x256xf32) + shape64_13 = paddle._C_ops.shape64(add_54) + + # pd_op.slice: (xi64) <- (3xi64, 1xi64, 1xi64) + slice_13 = paddle._C_ops.slice( + shape64_13, [0], full_int_array_2, full_int_array_3, [1], [0] + ) + del shape64_13 + + # builtin.combine: ([xi64, xi64, xi64]) <- (xi64, xi64, xi64) + combine_13 = [slice_13, full_1, full_1] + del slice_13 + + # pd_op.stack: (3xi64) <- ([xi64, xi64, xi64]) + stack_13 = paddle._C_ops.stack(combine_13, 0) + del combine_13 + + # pd_op.uniform: (-1x1x1xf32) <- (3xi64, 1xf32, 1xf32) + uniform_13 = paddle._C_ops.uniform( + stack_13, + paddle.float32, + full_2, + full_3, + 0, + paddle.framework._current_expected_place(), + ) + del stack_13 + + # pd_op.add: (-1x1x1xf32) <- (xf32, -1x1x1xf32) + add_55 = paddle._C_ops.add(cast_13, uniform_13) + del uniform_13 + + # pd_op.floor: (-1x1x1xf32) <- (-1x1x1xf32) + floor_13 = paddle._C_ops.floor(add_55) + del add_55 + + # pd_op.divide: (4x640x256xf32) <- (4x640x256xf32, xf32) + divide_13 = paddle._C_ops.divide(add_54, cast_13) + + # pd_op.multiply: (4x640x256xf32) <- (4x640x256xf32, -1x1x1xf32) + multiply_13 = paddle._C_ops.multiply(divide_13, floor_13) + + # pd_op.add: (4x640x256xf32) <- (4x640x256xf32, 4x640x256xf32) + add_56 = paddle._C_ops.add(layer_norm_45, multiply_13) + + # pd_op.layer_norm: (4x640x256xf32, 4x640xf32, 4x640xf32) <- (4x640x256xf32, 256xf32, 256xf32) + layer_norm_48, layer_norm_49, layer_norm_50 = (lambda x, f: f(x))( + paddle._C_ops.layer_norm( + add_56, parameter_125, parameter_124, float("1e-06"), 2 + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None, None), + ) + del parameter_124, parameter_125 + + # pd_op.transpose: (4x256x640xf32) <- (4x640x256xf32) + transpose_17 = paddle._C_ops.transpose(layer_norm_48, [0, 2, 1]) + del layer_norm_48 + + # pd_op.reshape: (4x256x8x80xf32) <- (4x256x640xf32, 4xi64) + reshape_19 = paddle._C_ops.reshape(transpose_17, full_int_array_4) + + # pd_op.flatten: (4x256x640xf32) <- (4x256x8x80xf32) + flatten_9 = paddle._C_ops.flatten(reshape_19, 2, 3) + + # pd_op.transpose: (4x640x256xf32) <- (4x256x640xf32) + transpose_18 = paddle._C_ops.transpose(flatten_9, [0, 2, 1]) + del flatten_9 + + # pd_op.matmul: (4x640x768xf32) <- (4x640x256xf32, 256x768xf32) + matmul_16 = paddle._C_ops.matmul(transpose_18, parameter_123, False, False) + del parameter_123 + + # pd_op.add: (4x640x768xf32) <- (4x640x768xf32, 768xf32) + add_57 = paddle._C_ops.add(matmul_16, parameter_122) + del parameter_122 + + # pd_op.full_int_array: (5xi64) <- () + full_int_array_5 = [0, -1, 3, 8, 32] + + # pd_op.reshape: (4x640x3x8x32xf32) <- (4x640x768xf32, 5xi64) + reshape_20 = paddle._C_ops.reshape(add_57, full_int_array_5) + + # pd_op.transpose: (3x4x8x640x32xf32) <- (4x640x3x8x32xf32) + transpose_19 = paddle._C_ops.transpose(reshape_20, [2, 0, 3, 1, 4]) + del reshape_20 + + # pd_op.slice: (4x8x640x32xf32) <- (3x4x8x640x32xf32, 1xi64, 1xi64) + slice_14 = paddle._C_ops.slice( + transpose_19, [0], full_int_array_2, full_int_array_3, [1], [0] + ) + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_6 = [2] + + # pd_op.assign: (1xi64) <- (1xi64) + assign_30 = full_int_array_6 + + # pd_op.assign: (1xi64) <- (1xi64) + assign_31 = full_int_array_6 + + # pd_op.assign: (1xi64) <- (1xi64) + assign_32 = full_int_array_6 + + # pd_op.assign: (1xi64) <- (1xi64) + assign_33 = full_int_array_6 + + # pd_op.assign: (1xi64) <- (1xi64) + assign_34 = full_int_array_6 + + # pd_op.assign: (1xi64) <- (1xi64) + assign_35 = full_int_array_6 + + # pd_op.assign: (1xi64) <- (1xi64) + assign_36 = full_int_array_6 + + # pd_op.assign: (1xi64) <- (1xi64) + assign_37 = full_int_array_6 + + # pd_op.assign: (1xi64) <- (1xi64) + assign_38 = full_int_array_6 + + # pd_op.assign: (1xi64) <- (1xi64) + assign_39 = full_int_array_6 + + # pd_op.assign: (1xi64) <- (1xi64) + assign_40 = full_int_array_6 + + # pd_op.assign: (1xi64) <- (1xi64) + assign_41 = full_int_array_6 + + # pd_op.assign: (1xi64) <- (1xi64) + assign_42 = full_int_array_6 + + # pd_op.assign: (1xi64) <- (1xi64) + assign_43 = full_int_array_6 + + # pd_op.assign: (1xi64) <- (1xi64) + assign_44 = full_int_array_6 + + # pd_op.assign: (1xi64) <- (1xi64) + assign_45 = full_int_array_6 + + # pd_op.assign: (1xi64) <- (1xi64) + assign_46 = full_int_array_6 + + # pd_op.assign: (1xi64) <- (1xi64) + assign_47 = full_int_array_6 + + # pd_op.assign: (1xi64) <- (1xi64) + assign_48 = full_int_array_6 + + # pd_op.slice: (4x8x640x32xf32) <- (3x4x8x640x32xf32, 1xi64, 1xi64) + slice_15 = paddle._C_ops.slice( + transpose_19, [0], full_int_array_3, full_int_array_6, [1], [0] + ) + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_7 = [3] + + # pd_op.assign: (1xi64) <- (1xi64) + assign_49 = full_int_array_7 + + # pd_op.assign: (1xi64) <- (1xi64) + assign_50 = full_int_array_7 + + # pd_op.assign: (1xi64) <- (1xi64) + assign_51 = full_int_array_7 + + # pd_op.assign: (1xi64) <- (1xi64) + assign_52 = full_int_array_7 + + # pd_op.assign: (1xi64) <- (1xi64) + assign_53 = full_int_array_7 + + # pd_op.assign: (1xi64) <- (1xi64) + assign_54 = full_int_array_7 + + # pd_op.assign: (1xi64) <- (1xi64) + assign_55 = full_int_array_7 + + # pd_op.assign: (1xi64) <- (1xi64) + assign_56 = full_int_array_7 + + # pd_op.assign: (1xi64) <- (1xi64) + assign_57 = full_int_array_7 + + # pd_op.slice: (4x8x640x32xf32) <- (3x4x8x640x32xf32, 1xi64, 1xi64) + slice_16 = paddle._C_ops.slice( + transpose_19, [0], full_int_array_6, full_int_array_7, [1], [0] + ) + + # pd_op.transpose: (4x8x32x640xf32) <- (4x8x640x32xf32) + transpose_20 = paddle._C_ops.transpose(slice_15, [0, 1, 3, 2]) + del slice_15 + + # pd_op.matmul: (4x8x640x640xf32) <- (4x8x640x32xf32, 4x8x32x640xf32) + matmul_17 = paddle._C_ops.matmul(slice_14, transpose_20, False, False) + + # pd_op.full: (1xf32) <- () + full_17 = paddle._C_ops.full( + [1], float("0.176777"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.assign: (1xf32) <- (1xf32) + assign_58 = full_17 + + # pd_op.assign: (1xf32) <- (1xf32) + assign_59 = full_17 + + # pd_op.assign: (1xf32) <- (1xf32) + assign_60 = full_17 + + # pd_op.assign: (1xf32) <- (1xf32) + assign_61 = full_17 + + # pd_op.assign: (1xf32) <- (1xf32) + assign_62 = full_17 + + # pd_op.assign: (1xf32) <- (1xf32) + assign_63 = full_17 + + # pd_op.assign: (1xf32) <- (1xf32) + assign_64 = full_17 + + # pd_op.assign: (1xf32) <- (1xf32) + assign_65 = full_17 + + # pd_op.assign: (1xf32) <- (1xf32) + assign_66 = full_17 + + # pd_op.scale: (4x8x640x640xf32) <- (4x8x640x640xf32, 1xf32) + scale_0 = paddle._C_ops.scale(matmul_17, full_17, float("0"), True) + del matmul_17 + + # pd_op.softmax: (4x8x640x640xf32) <- (4x8x640x640xf32) + softmax_0 = paddle._C_ops.softmax(scale_0, -1) + del scale_0 + + # pd_op.matmul: (4x8x640x32xf32) <- (4x8x640x640xf32, 4x8x640x32xf32) + matmul_18 = paddle._C_ops.matmul(softmax_0, slice_16, False, False) + + # pd_op.transpose: (4x640x8x32xf32) <- (4x8x640x32xf32) + transpose_21 = paddle._C_ops.transpose(matmul_18, [0, 2, 1, 3]) + del matmul_18 + + # pd_op.full_int_array: (3xi64) <- () + full_int_array_8 = [0, -1, 256] + + # pd_op.reshape: (4x640x256xf32) <- (4x640x8x32xf32, 3xi64) + reshape_21 = paddle._C_ops.reshape(transpose_21, full_int_array_8) + + # pd_op.matmul: (4x640x256xf32) <- (4x640x256xf32, 256x256xf32) + matmul_19 = paddle._C_ops.matmul(reshape_21, parameter_121, False, False) + del parameter_121 + + # pd_op.add: (4x640x256xf32) <- (4x640x256xf32, 256xf32) + add_58 = paddle._C_ops.add(matmul_19, parameter_120) + del parameter_120 + + # pd_op.full: (xf64) <- () + full_18 = paddle._C_ops.full( + [], float("0"), paddle.float64, paddle.framework._current_expected_place() + ) + + # pd_op.assign_value_: (xf64) <- (xf64) + assign_value__14 = paddle._C_ops.assign_value_( + full_18, + [], + paddle.float64, + [float("0.952941")], + paddle.framework._current_expected_place(), + ) + del full_18 + + # pd_op.cast: (xf32) <- (xf64) + cast_14 = paddle._C_ops.cast(assign_value__14, paddle.float32) + del assign_value__14 + + # pd_op.shape64: (3xi64) <- (4x640x256xf32) + shape64_14 = paddle._C_ops.shape64(add_58) + + # pd_op.slice: (xi64) <- (3xi64, 1xi64, 1xi64) + slice_17 = paddle._C_ops.slice( + shape64_14, [0], full_int_array_2, full_int_array_3, [1], [0] + ) + del shape64_14 + + # builtin.combine: ([xi64, xi64, xi64]) <- (xi64, xi64, xi64) + combine_14 = [slice_17, full_1, full_1] + del slice_17 + + # pd_op.stack: (3xi64) <- ([xi64, xi64, xi64]) + stack_14 = paddle._C_ops.stack(combine_14, 0) + del combine_14 + + # pd_op.uniform: (-1x1x1xf32) <- (3xi64, 1xf32, 1xf32) + uniform_14 = paddle._C_ops.uniform( + stack_14, + paddle.float32, + full_2, + full_3, + 0, + paddle.framework._current_expected_place(), + ) + del stack_14 + + # pd_op.add: (-1x1x1xf32) <- (xf32, -1x1x1xf32) + add_59 = paddle._C_ops.add(cast_14, uniform_14) + del uniform_14 + + # pd_op.floor: (-1x1x1xf32) <- (-1x1x1xf32) + floor_14 = paddle._C_ops.floor(add_59) + del add_59 + + # pd_op.divide: (4x640x256xf32) <- (4x640x256xf32, xf32) + divide_14 = paddle._C_ops.divide(add_58, cast_14) + + # pd_op.multiply: (4x640x256xf32) <- (4x640x256xf32, -1x1x1xf32) + multiply_14 = paddle._C_ops.multiply(divide_14, floor_14) + + # pd_op.add: (4x640x256xf32) <- (4x640x256xf32, 4x640x256xf32) + add_60 = paddle._C_ops.add(transpose_18, multiply_14) + + # pd_op.layer_norm: (4x640x256xf32, 4x640xf32, 4x640xf32) <- (4x640x256xf32, 256xf32, 256xf32) + layer_norm_51, layer_norm_52, layer_norm_53 = (lambda x, f: f(x))( + paddle._C_ops.layer_norm( + add_60, parameter_119, parameter_118, float("1e-06"), 2 + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None, None), + ) + del parameter_118, parameter_119 + + # pd_op.matmul: (4x640x1024xf32) <- (4x640x256xf32, 256x1024xf32) + matmul_20 = paddle._C_ops.matmul(layer_norm_51, parameter_117, False, False) + del parameter_117 + + # pd_op.add: (4x640x1024xf32) <- (4x640x1024xf32, 1024xf32) + add_61 = paddle._C_ops.add(matmul_20, parameter_116) + del parameter_116 + + # pd_op.gelu: (4x640x1024xf32) <- (4x640x1024xf32) + gelu_10 = paddle._C_ops.gelu(add_61, False) + + # pd_op.matmul: (4x640x256xf32) <- (4x640x1024xf32, 1024x256xf32) + matmul_21 = paddle._C_ops.matmul(gelu_10, parameter_115, False, False) + del parameter_115 + + # pd_op.add: (4x640x256xf32) <- (4x640x256xf32, 256xf32) + add_62 = paddle._C_ops.add(matmul_21, parameter_114) + del parameter_114 + + # pd_op.full: (xf64) <- () + full_19 = paddle._C_ops.full( + [], float("0"), paddle.float64, paddle.framework._current_expected_place() + ) + + # pd_op.assign_value_: (xf64) <- (xf64) + assign_value__15 = paddle._C_ops.assign_value_( + full_19, + [], + paddle.float64, + [float("0.952941")], + paddle.framework._current_expected_place(), + ) + del full_19 + + # pd_op.cast: (xf32) <- (xf64) + cast_15 = paddle._C_ops.cast(assign_value__15, paddle.float32) + del assign_value__15 + + # pd_op.shape64: (3xi64) <- (4x640x256xf32) + shape64_15 = paddle._C_ops.shape64(add_62) + + # pd_op.slice: (xi64) <- (3xi64, 1xi64, 1xi64) + slice_18 = paddle._C_ops.slice( + shape64_15, [0], full_int_array_2, full_int_array_3, [1], [0] + ) + del shape64_15 + + # builtin.combine: ([xi64, xi64, xi64]) <- (xi64, xi64, xi64) + combine_15 = [slice_18, full_1, full_1] + del slice_18 + + # pd_op.stack: (3xi64) <- ([xi64, xi64, xi64]) + stack_15 = paddle._C_ops.stack(combine_15, 0) + del combine_15 + + # pd_op.uniform: (-1x1x1xf32) <- (3xi64, 1xf32, 1xf32) + uniform_15 = paddle._C_ops.uniform( + stack_15, + paddle.float32, + full_2, + full_3, + 0, + paddle.framework._current_expected_place(), + ) + del stack_15 + + # pd_op.add: (-1x1x1xf32) <- (xf32, -1x1x1xf32) + add_63 = paddle._C_ops.add(cast_15, uniform_15) + del uniform_15 + + # pd_op.floor: (-1x1x1xf32) <- (-1x1x1xf32) + floor_15 = paddle._C_ops.floor(add_63) + del add_63 + + # pd_op.divide: (4x640x256xf32) <- (4x640x256xf32, xf32) + divide_15 = paddle._C_ops.divide(add_62, cast_15) + + # pd_op.multiply: (4x640x256xf32) <- (4x640x256xf32, -1x1x1xf32) + multiply_15 = paddle._C_ops.multiply(divide_15, floor_15) + + # pd_op.add: (4x640x256xf32) <- (4x640x256xf32, 4x640x256xf32) + add_64 = paddle._C_ops.add(layer_norm_51, multiply_15) + + # pd_op.layer_norm: (4x640x256xf32, 4x640xf32, 4x640xf32) <- (4x640x256xf32, 256xf32, 256xf32) + layer_norm_54, layer_norm_55, layer_norm_56 = (lambda x, f: f(x))( + paddle._C_ops.layer_norm( + add_64, parameter_113, parameter_112, float("1e-06"), 2 + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None, None), + ) + del parameter_112, parameter_113 + + # pd_op.matmul: (4x640x768xf32) <- (4x640x256xf32, 256x768xf32) + matmul_22 = paddle._C_ops.matmul(layer_norm_54, parameter_111, False, False) + del parameter_111 + + # pd_op.add: (4x640x768xf32) <- (4x640x768xf32, 768xf32) + add_65 = paddle._C_ops.add(matmul_22, parameter_110) + del parameter_110 + + # pd_op.reshape: (4x640x3x8x32xf32) <- (4x640x768xf32, 5xi64) + reshape_22 = paddle._C_ops.reshape(add_65, full_int_array_5) + + # pd_op.transpose: (3x4x8x640x32xf32) <- (4x640x3x8x32xf32) + transpose_22 = paddle._C_ops.transpose(reshape_22, [2, 0, 3, 1, 4]) + del reshape_22 + + # pd_op.slice: (4x8x640x32xf32) <- (3x4x8x640x32xf32, 1xi64, 1xi64) + slice_19 = paddle._C_ops.slice( + transpose_22, [0], full_int_array_2, full_int_array_3, [1], [0] + ) + + # pd_op.slice: (4x8x640x32xf32) <- (3x4x8x640x32xf32, 1xi64, 1xi64) + slice_20 = paddle._C_ops.slice( + transpose_22, [0], full_int_array_3, full_int_array_6, [1], [0] + ) + + # pd_op.slice: (4x8x640x32xf32) <- (3x4x8x640x32xf32, 1xi64, 1xi64) + slice_21 = paddle._C_ops.slice( + transpose_22, [0], full_int_array_6, full_int_array_7, [1], [0] + ) + + # pd_op.transpose: (4x8x32x640xf32) <- (4x8x640x32xf32) + transpose_23 = paddle._C_ops.transpose(slice_20, [0, 1, 3, 2]) + del slice_20 + + # pd_op.matmul: (4x8x640x640xf32) <- (4x8x640x32xf32, 4x8x32x640xf32) + matmul_23 = paddle._C_ops.matmul(slice_19, transpose_23, False, False) + + # pd_op.scale: (4x8x640x640xf32) <- (4x8x640x640xf32, 1xf32) + scale_1 = paddle._C_ops.scale(matmul_23, full_17, float("0"), True) + del matmul_23 + + # pd_op.softmax: (4x8x640x640xf32) <- (4x8x640x640xf32) + softmax_1 = paddle._C_ops.softmax(scale_1, -1) + del scale_1 + + # pd_op.matmul: (4x8x640x32xf32) <- (4x8x640x640xf32, 4x8x640x32xf32) + matmul_24 = paddle._C_ops.matmul(softmax_1, slice_21, False, False) + + # pd_op.transpose: (4x640x8x32xf32) <- (4x8x640x32xf32) + transpose_24 = paddle._C_ops.transpose(matmul_24, [0, 2, 1, 3]) + del matmul_24 + + # pd_op.reshape: (4x640x256xf32) <- (4x640x8x32xf32, 3xi64) + reshape_23 = paddle._C_ops.reshape(transpose_24, full_int_array_8) + + # pd_op.matmul: (4x640x256xf32) <- (4x640x256xf32, 256x256xf32) + matmul_25 = paddle._C_ops.matmul(reshape_23, parameter_109, False, False) + del parameter_109 + + # pd_op.add: (4x640x256xf32) <- (4x640x256xf32, 256xf32) + add_66 = paddle._C_ops.add(matmul_25, parameter_108) + del parameter_108 + + # pd_op.full: (xf64) <- () + full_20 = paddle._C_ops.full( + [], float("0"), paddle.float64, paddle.framework._current_expected_place() + ) + + # pd_op.assign_value_: (xf64) <- (xf64) + assign_value__16 = paddle._C_ops.assign_value_( + full_20, + [], + paddle.float64, + [float("0.947059")], + paddle.framework._current_expected_place(), + ) + del full_20 + + # pd_op.cast: (xf32) <- (xf64) + cast_16 = paddle._C_ops.cast(assign_value__16, paddle.float32) + del assign_value__16 + + # pd_op.shape64: (3xi64) <- (4x640x256xf32) + shape64_16 = paddle._C_ops.shape64(add_66) + + # pd_op.slice: (xi64) <- (3xi64, 1xi64, 1xi64) + slice_22 = paddle._C_ops.slice( + shape64_16, [0], full_int_array_2, full_int_array_3, [1], [0] + ) + del shape64_16 + + # builtin.combine: ([xi64, xi64, xi64]) <- (xi64, xi64, xi64) + combine_16 = [slice_22, full_1, full_1] + del slice_22 + + # pd_op.stack: (3xi64) <- ([xi64, xi64, xi64]) + stack_16 = paddle._C_ops.stack(combine_16, 0) + del combine_16 + + # pd_op.uniform: (-1x1x1xf32) <- (3xi64, 1xf32, 1xf32) + uniform_16 = paddle._C_ops.uniform( + stack_16, + paddle.float32, + full_2, + full_3, + 0, + paddle.framework._current_expected_place(), + ) + del stack_16 + + # pd_op.add: (-1x1x1xf32) <- (xf32, -1x1x1xf32) + add_67 = paddle._C_ops.add(cast_16, uniform_16) + del uniform_16 + + # pd_op.floor: (-1x1x1xf32) <- (-1x1x1xf32) + floor_16 = paddle._C_ops.floor(add_67) + del add_67 + + # pd_op.divide: (4x640x256xf32) <- (4x640x256xf32, xf32) + divide_16 = paddle._C_ops.divide(add_66, cast_16) + + # pd_op.multiply: (4x640x256xf32) <- (4x640x256xf32, -1x1x1xf32) + multiply_16 = paddle._C_ops.multiply(divide_16, floor_16) + + # pd_op.add: (4x640x256xf32) <- (4x640x256xf32, 4x640x256xf32) + add_68 = paddle._C_ops.add(layer_norm_54, multiply_16) + + # pd_op.layer_norm: (4x640x256xf32, 4x640xf32, 4x640xf32) <- (4x640x256xf32, 256xf32, 256xf32) + layer_norm_57, layer_norm_58, layer_norm_59 = (lambda x, f: f(x))( + paddle._C_ops.layer_norm( + add_68, parameter_107, parameter_106, float("1e-06"), 2 + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None, None), + ) + del parameter_106, parameter_107 + + # pd_op.matmul: (4x640x1024xf32) <- (4x640x256xf32, 256x1024xf32) + matmul_26 = paddle._C_ops.matmul(layer_norm_57, parameter_105, False, False) + del parameter_105 + + # pd_op.add: (4x640x1024xf32) <- (4x640x1024xf32, 1024xf32) + add_69 = paddle._C_ops.add(matmul_26, parameter_104) + del parameter_104 + + # pd_op.gelu: (4x640x1024xf32) <- (4x640x1024xf32) + gelu_11 = paddle._C_ops.gelu(add_69, False) + + # pd_op.matmul: (4x640x256xf32) <- (4x640x1024xf32, 1024x256xf32) + matmul_27 = paddle._C_ops.matmul(gelu_11, parameter_103, False, False) + del parameter_103 + + # pd_op.add: (4x640x256xf32) <- (4x640x256xf32, 256xf32) + add_70 = paddle._C_ops.add(matmul_27, parameter_102) + del parameter_102 + + # pd_op.full: (xf64) <- () + full_21 = paddle._C_ops.full( + [], float("0"), paddle.float64, paddle.framework._current_expected_place() + ) + + # pd_op.assign_value_: (xf64) <- (xf64) + assign_value__17 = paddle._C_ops.assign_value_( + full_21, + [], + paddle.float64, + [float("0.947059")], + paddle.framework._current_expected_place(), + ) + del full_21 + + # pd_op.cast: (xf32) <- (xf64) + cast_17 = paddle._C_ops.cast(assign_value__17, paddle.float32) + del assign_value__17 + + # pd_op.shape64: (3xi64) <- (4x640x256xf32) + shape64_17 = paddle._C_ops.shape64(add_70) + + # pd_op.slice: (xi64) <- (3xi64, 1xi64, 1xi64) + slice_23 = paddle._C_ops.slice( + shape64_17, [0], full_int_array_2, full_int_array_3, [1], [0] + ) + del shape64_17 + + # builtin.combine: ([xi64, xi64, xi64]) <- (xi64, xi64, xi64) + combine_17 = [slice_23, full_1, full_1] + del slice_23 + + # pd_op.stack: (3xi64) <- ([xi64, xi64, xi64]) + stack_17 = paddle._C_ops.stack(combine_17, 0) + del combine_17 + + # pd_op.uniform: (-1x1x1xf32) <- (3xi64, 1xf32, 1xf32) + uniform_17 = paddle._C_ops.uniform( + stack_17, + paddle.float32, + full_2, + full_3, + 0, + paddle.framework._current_expected_place(), + ) + del stack_17 + + # pd_op.add: (-1x1x1xf32) <- (xf32, -1x1x1xf32) + add_71 = paddle._C_ops.add(cast_17, uniform_17) + del uniform_17 + + # pd_op.floor: (-1x1x1xf32) <- (-1x1x1xf32) + floor_17 = paddle._C_ops.floor(add_71) + del add_71 + + # pd_op.divide: (4x640x256xf32) <- (4x640x256xf32, xf32) + divide_17 = paddle._C_ops.divide(add_70, cast_17) + + # pd_op.multiply: (4x640x256xf32) <- (4x640x256xf32, -1x1x1xf32) + multiply_17 = paddle._C_ops.multiply(divide_17, floor_17) + + # pd_op.add: (4x640x256xf32) <- (4x640x256xf32, 4x640x256xf32) + add_72 = paddle._C_ops.add(layer_norm_57, multiply_17) + + # pd_op.layer_norm: (4x640x256xf32, 4x640xf32, 4x640xf32) <- (4x640x256xf32, 256xf32, 256xf32) + layer_norm_60, layer_norm_61, layer_norm_62 = (lambda x, f: f(x))( + paddle._C_ops.layer_norm( + add_72, parameter_101, parameter_100, float("1e-06"), 2 + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None, None), + ) + del parameter_100, parameter_101 + + # pd_op.matmul: (4x640x768xf32) <- (4x640x256xf32, 256x768xf32) + matmul_28 = paddle._C_ops.matmul(layer_norm_60, parameter_99, False, False) + del parameter_99 + + # pd_op.add: (4x640x768xf32) <- (4x640x768xf32, 768xf32) + add_73 = paddle._C_ops.add(matmul_28, parameter_98) + del parameter_98 + + # pd_op.reshape: (4x640x3x8x32xf32) <- (4x640x768xf32, 5xi64) + reshape_24 = paddle._C_ops.reshape(add_73, full_int_array_5) + + # pd_op.transpose: (3x4x8x640x32xf32) <- (4x640x3x8x32xf32) + transpose_25 = paddle._C_ops.transpose(reshape_24, [2, 0, 3, 1, 4]) + del reshape_24 + + # pd_op.slice: (4x8x640x32xf32) <- (3x4x8x640x32xf32, 1xi64, 1xi64) + slice_24 = paddle._C_ops.slice( + transpose_25, [0], full_int_array_2, full_int_array_3, [1], [0] + ) + + # pd_op.slice: (4x8x640x32xf32) <- (3x4x8x640x32xf32, 1xi64, 1xi64) + slice_25 = paddle._C_ops.slice( + transpose_25, [0], full_int_array_3, full_int_array_6, [1], [0] + ) + + # pd_op.slice: (4x8x640x32xf32) <- (3x4x8x640x32xf32, 1xi64, 1xi64) + slice_26 = paddle._C_ops.slice( + transpose_25, [0], full_int_array_6, full_int_array_7, [1], [0] + ) + + # pd_op.transpose: (4x8x32x640xf32) <- (4x8x640x32xf32) + transpose_26 = paddle._C_ops.transpose(slice_25, [0, 1, 3, 2]) + del slice_25 + + # pd_op.matmul: (4x8x640x640xf32) <- (4x8x640x32xf32, 4x8x32x640xf32) + matmul_29 = paddle._C_ops.matmul(slice_24, transpose_26, False, False) + + # pd_op.scale: (4x8x640x640xf32) <- (4x8x640x640xf32, 1xf32) + scale_2 = paddle._C_ops.scale(matmul_29, full_17, float("0"), True) + del matmul_29 + + # pd_op.softmax: (4x8x640x640xf32) <- (4x8x640x640xf32) + softmax_2 = paddle._C_ops.softmax(scale_2, -1) + del scale_2 + + # pd_op.matmul: (4x8x640x32xf32) <- (4x8x640x640xf32, 4x8x640x32xf32) + matmul_30 = paddle._C_ops.matmul(softmax_2, slice_26, False, False) + + # pd_op.transpose: (4x640x8x32xf32) <- (4x8x640x32xf32) + transpose_27 = paddle._C_ops.transpose(matmul_30, [0, 2, 1, 3]) + del matmul_30 + + # pd_op.reshape: (4x640x256xf32) <- (4x640x8x32xf32, 3xi64) + reshape_25 = paddle._C_ops.reshape(transpose_27, full_int_array_8) + + # pd_op.matmul: (4x640x256xf32) <- (4x640x256xf32, 256x256xf32) + matmul_31 = paddle._C_ops.matmul(reshape_25, parameter_97, False, False) + del parameter_97 + + # pd_op.add: (4x640x256xf32) <- (4x640x256xf32, 256xf32) + add_74 = paddle._C_ops.add(matmul_31, parameter_96) + del parameter_96 + + # pd_op.full: (xf64) <- () + full_22 = paddle._C_ops.full( + [], float("0"), paddle.float64, paddle.framework._current_expected_place() + ) + + # pd_op.assign_value_: (xf64) <- (xf64) + assign_value__18 = paddle._C_ops.assign_value_( + full_22, + [], + paddle.float64, + [float("0.941176")], + paddle.framework._current_expected_place(), + ) + del full_22 + + # pd_op.cast: (xf32) <- (xf64) + cast_18 = paddle._C_ops.cast(assign_value__18, paddle.float32) + del assign_value__18 + + # pd_op.shape64: (3xi64) <- (4x640x256xf32) + shape64_18 = paddle._C_ops.shape64(add_74) + + # pd_op.slice: (xi64) <- (3xi64, 1xi64, 1xi64) + slice_27 = paddle._C_ops.slice( + shape64_18, [0], full_int_array_2, full_int_array_3, [1], [0] + ) + del shape64_18 + + # builtin.combine: ([xi64, xi64, xi64]) <- (xi64, xi64, xi64) + combine_18 = [slice_27, full_1, full_1] + del slice_27 + + # pd_op.stack: (3xi64) <- ([xi64, xi64, xi64]) + stack_18 = paddle._C_ops.stack(combine_18, 0) + del combine_18 + + # pd_op.uniform: (-1x1x1xf32) <- (3xi64, 1xf32, 1xf32) + uniform_18 = paddle._C_ops.uniform( + stack_18, + paddle.float32, + full_2, + full_3, + 0, + paddle.framework._current_expected_place(), + ) + del stack_18 + + # pd_op.add: (-1x1x1xf32) <- (xf32, -1x1x1xf32) + add_75 = paddle._C_ops.add(cast_18, uniform_18) + del uniform_18 + + # pd_op.floor: (-1x1x1xf32) <- (-1x1x1xf32) + floor_18 = paddle._C_ops.floor(add_75) + del add_75 + + # pd_op.divide: (4x640x256xf32) <- (4x640x256xf32, xf32) + divide_18 = paddle._C_ops.divide(add_74, cast_18) + + # pd_op.multiply: (4x640x256xf32) <- (4x640x256xf32, -1x1x1xf32) + multiply_18 = paddle._C_ops.multiply(divide_18, floor_18) + + # pd_op.add: (4x640x256xf32) <- (4x640x256xf32, 4x640x256xf32) + add_76 = paddle._C_ops.add(layer_norm_60, multiply_18) + + # pd_op.layer_norm: (4x640x256xf32, 4x640xf32, 4x640xf32) <- (4x640x256xf32, 256xf32, 256xf32) + layer_norm_63, layer_norm_64, layer_norm_65 = (lambda x, f: f(x))( + paddle._C_ops.layer_norm( + add_76, parameter_95, parameter_94, float("1e-06"), 2 + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None, None), + ) + del parameter_94, parameter_95 + + # pd_op.matmul: (4x640x1024xf32) <- (4x640x256xf32, 256x1024xf32) + matmul_32 = paddle._C_ops.matmul(layer_norm_63, parameter_93, False, False) + del parameter_93 + + # pd_op.add: (4x640x1024xf32) <- (4x640x1024xf32, 1024xf32) + add_77 = paddle._C_ops.add(matmul_32, parameter_92) + del parameter_92 + + # pd_op.gelu: (4x640x1024xf32) <- (4x640x1024xf32) + gelu_12 = paddle._C_ops.gelu(add_77, False) + + # pd_op.matmul: (4x640x256xf32) <- (4x640x1024xf32, 1024x256xf32) + matmul_33 = paddle._C_ops.matmul(gelu_12, parameter_91, False, False) + del parameter_91 + + # pd_op.add: (4x640x256xf32) <- (4x640x256xf32, 256xf32) + add_78 = paddle._C_ops.add(matmul_33, parameter_90) + del parameter_90 + + # pd_op.full: (xf64) <- () + full_23 = paddle._C_ops.full( + [], float("0"), paddle.float64, paddle.framework._current_expected_place() + ) + + # pd_op.assign_value_: (xf64) <- (xf64) + assign_value__19 = paddle._C_ops.assign_value_( + full_23, + [], + paddle.float64, + [float("0.941176")], + paddle.framework._current_expected_place(), + ) + del full_23 + + # pd_op.cast: (xf32) <- (xf64) + cast_19 = paddle._C_ops.cast(assign_value__19, paddle.float32) + del assign_value__19 + + # pd_op.shape64: (3xi64) <- (4x640x256xf32) + shape64_19 = paddle._C_ops.shape64(add_78) + + # pd_op.slice: (xi64) <- (3xi64, 1xi64, 1xi64) + slice_28 = paddle._C_ops.slice( + shape64_19, [0], full_int_array_2, full_int_array_3, [1], [0] + ) + del shape64_19 + + # builtin.combine: ([xi64, xi64, xi64]) <- (xi64, xi64, xi64) + combine_19 = [slice_28, full_1, full_1] + del slice_28 + + # pd_op.stack: (3xi64) <- ([xi64, xi64, xi64]) + stack_19 = paddle._C_ops.stack(combine_19, 0) + del combine_19 + + # pd_op.uniform: (-1x1x1xf32) <- (3xi64, 1xf32, 1xf32) + uniform_19 = paddle._C_ops.uniform( + stack_19, + paddle.float32, + full_2, + full_3, + 0, + paddle.framework._current_expected_place(), + ) + del stack_19 + + # pd_op.add: (-1x1x1xf32) <- (xf32, -1x1x1xf32) + add_79 = paddle._C_ops.add(cast_19, uniform_19) + del uniform_19 + + # pd_op.floor: (-1x1x1xf32) <- (-1x1x1xf32) + floor_19 = paddle._C_ops.floor(add_79) + del add_79 + + # pd_op.divide: (4x640x256xf32) <- (4x640x256xf32, xf32) + divide_19 = paddle._C_ops.divide(add_78, cast_19) + + # pd_op.multiply: (4x640x256xf32) <- (4x640x256xf32, -1x1x1xf32) + multiply_19 = paddle._C_ops.multiply(divide_19, floor_19) + + # pd_op.add: (4x640x256xf32) <- (4x640x256xf32, 4x640x256xf32) + add_80 = paddle._C_ops.add(layer_norm_63, multiply_19) + + # pd_op.layer_norm: (4x640x256xf32, 4x640xf32, 4x640xf32) <- (4x640x256xf32, 256xf32, 256xf32) + layer_norm_66, layer_norm_67, layer_norm_68 = (lambda x, f: f(x))( + paddle._C_ops.layer_norm( + add_80, parameter_89, parameter_88, float("1e-06"), 2 + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None, None), + ) + del parameter_88, parameter_89 + + # pd_op.matmul: (4x640x768xf32) <- (4x640x256xf32, 256x768xf32) + matmul_34 = paddle._C_ops.matmul(layer_norm_66, parameter_87, False, False) + del parameter_87 + + # pd_op.add: (4x640x768xf32) <- (4x640x768xf32, 768xf32) + add_81 = paddle._C_ops.add(matmul_34, parameter_86) + del parameter_86 + + # pd_op.reshape: (4x640x3x8x32xf32) <- (4x640x768xf32, 5xi64) + reshape_26 = paddle._C_ops.reshape(add_81, full_int_array_5) + del full_int_array_5 + + # pd_op.transpose: (3x4x8x640x32xf32) <- (4x640x3x8x32xf32) + transpose_28 = paddle._C_ops.transpose(reshape_26, [2, 0, 3, 1, 4]) + del reshape_26 + + # pd_op.slice: (4x8x640x32xf32) <- (3x4x8x640x32xf32, 1xi64, 1xi64) + slice_29 = paddle._C_ops.slice( + transpose_28, [0], full_int_array_2, full_int_array_3, [1], [0] + ) + + # pd_op.slice: (4x8x640x32xf32) <- (3x4x8x640x32xf32, 1xi64, 1xi64) + slice_30 = paddle._C_ops.slice( + transpose_28, [0], full_int_array_3, full_int_array_6, [1], [0] + ) + + # pd_op.slice: (4x8x640x32xf32) <- (3x4x8x640x32xf32, 1xi64, 1xi64) + slice_31 = paddle._C_ops.slice( + transpose_28, [0], full_int_array_6, full_int_array_7, [1], [0] + ) + + # pd_op.transpose: (4x8x32x640xf32) <- (4x8x640x32xf32) + transpose_29 = paddle._C_ops.transpose(slice_30, [0, 1, 3, 2]) + del slice_30 + + # pd_op.matmul: (4x8x640x640xf32) <- (4x8x640x32xf32, 4x8x32x640xf32) + matmul_35 = paddle._C_ops.matmul(slice_29, transpose_29, False, False) + + # pd_op.scale: (4x8x640x640xf32) <- (4x8x640x640xf32, 1xf32) + scale_3 = paddle._C_ops.scale(matmul_35, full_17, float("0"), True) + del matmul_35 + + # pd_op.softmax: (4x8x640x640xf32) <- (4x8x640x640xf32) + softmax_3 = paddle._C_ops.softmax(scale_3, -1) + del scale_3 + + # pd_op.matmul: (4x8x640x32xf32) <- (4x8x640x640xf32, 4x8x640x32xf32) + matmul_36 = paddle._C_ops.matmul(softmax_3, slice_31, False, False) + + # pd_op.transpose: (4x640x8x32xf32) <- (4x8x640x32xf32) + transpose_30 = paddle._C_ops.transpose(matmul_36, [0, 2, 1, 3]) + del matmul_36 + + # pd_op.reshape: (4x640x256xf32) <- (4x640x8x32xf32, 3xi64) + reshape_27 = paddle._C_ops.reshape(transpose_30, full_int_array_8) + del full_int_array_8 + + # pd_op.matmul: (4x640x256xf32) <- (4x640x256xf32, 256x256xf32) + matmul_37 = paddle._C_ops.matmul(reshape_27, parameter_85, False, False) + del parameter_85 + + # pd_op.add: (4x640x256xf32) <- (4x640x256xf32, 256xf32) + add_82 = paddle._C_ops.add(matmul_37, parameter_84) + del parameter_84 + + # pd_op.full: (xf64) <- () + full_24 = paddle._C_ops.full( + [], float("0"), paddle.float64, paddle.framework._current_expected_place() + ) + + # pd_op.assign_value_: (xf64) <- (xf64) + assign_value__20 = paddle._C_ops.assign_value_( + full_24, + [], + paddle.float64, + [float("0.935294")], + paddle.framework._current_expected_place(), + ) + del full_24 + + # pd_op.cast: (xf32) <- (xf64) + cast_20 = paddle._C_ops.cast(assign_value__20, paddle.float32) + del assign_value__20 + + # pd_op.shape64: (3xi64) <- (4x640x256xf32) + shape64_20 = paddle._C_ops.shape64(add_82) + + # pd_op.slice: (xi64) <- (3xi64, 1xi64, 1xi64) + slice_32 = paddle._C_ops.slice( + shape64_20, [0], full_int_array_2, full_int_array_3, [1], [0] + ) + del shape64_20 + + # builtin.combine: ([xi64, xi64, xi64]) <- (xi64, xi64, xi64) + combine_20 = [slice_32, full_1, full_1] + del slice_32 + + # pd_op.stack: (3xi64) <- ([xi64, xi64, xi64]) + stack_20 = paddle._C_ops.stack(combine_20, 0) + del combine_20 + + # pd_op.uniform: (-1x1x1xf32) <- (3xi64, 1xf32, 1xf32) + uniform_20 = paddle._C_ops.uniform( + stack_20, + paddle.float32, + full_2, + full_3, + 0, + paddle.framework._current_expected_place(), + ) + del stack_20 + + # pd_op.add: (-1x1x1xf32) <- (xf32, -1x1x1xf32) + add_83 = paddle._C_ops.add(cast_20, uniform_20) + del uniform_20 + + # pd_op.floor: (-1x1x1xf32) <- (-1x1x1xf32) + floor_20 = paddle._C_ops.floor(add_83) + del add_83 + + # pd_op.divide: (4x640x256xf32) <- (4x640x256xf32, xf32) + divide_20 = paddle._C_ops.divide(add_82, cast_20) + + # pd_op.multiply: (4x640x256xf32) <- (4x640x256xf32, -1x1x1xf32) + multiply_20 = paddle._C_ops.multiply(divide_20, floor_20) + + # pd_op.add: (4x640x256xf32) <- (4x640x256xf32, 4x640x256xf32) + add_84 = paddle._C_ops.add(layer_norm_66, multiply_20) + + # pd_op.layer_norm: (4x640x256xf32, 4x640xf32, 4x640xf32) <- (4x640x256xf32, 256xf32, 256xf32) + layer_norm_69, layer_norm_70, layer_norm_71 = (lambda x, f: f(x))( + paddle._C_ops.layer_norm( + add_84, parameter_83, parameter_82, float("1e-06"), 2 + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None, None), + ) + del parameter_82, parameter_83 + + # pd_op.matmul: (4x640x1024xf32) <- (4x640x256xf32, 256x1024xf32) + matmul_38 = paddle._C_ops.matmul(layer_norm_69, parameter_81, False, False) + del parameter_81 + + # pd_op.add: (4x640x1024xf32) <- (4x640x1024xf32, 1024xf32) + add_85 = paddle._C_ops.add(matmul_38, parameter_80) + del parameter_80 + + # pd_op.gelu: (4x640x1024xf32) <- (4x640x1024xf32) + gelu_13 = paddle._C_ops.gelu(add_85, False) + + # pd_op.matmul: (4x640x256xf32) <- (4x640x1024xf32, 1024x256xf32) + matmul_39 = paddle._C_ops.matmul(gelu_13, parameter_79, False, False) + del parameter_79 + + # pd_op.add: (4x640x256xf32) <- (4x640x256xf32, 256xf32) + add_86 = paddle._C_ops.add(matmul_39, parameter_78) + del parameter_78 + + # pd_op.full: (xf64) <- () + full_25 = paddle._C_ops.full( + [], float("0"), paddle.float64, paddle.framework._current_expected_place() + ) + + # pd_op.assign_value_: (xf64) <- (xf64) + assign_value__21 = paddle._C_ops.assign_value_( + full_25, + [], + paddle.float64, + [float("0.935294")], + paddle.framework._current_expected_place(), + ) + del full_25 + + # pd_op.cast: (xf32) <- (xf64) + cast_21 = paddle._C_ops.cast(assign_value__21, paddle.float32) + del assign_value__21 + + # pd_op.shape64: (3xi64) <- (4x640x256xf32) + shape64_21 = paddle._C_ops.shape64(add_86) + + # pd_op.slice: (xi64) <- (3xi64, 1xi64, 1xi64) + slice_33 = paddle._C_ops.slice( + shape64_21, [0], full_int_array_2, full_int_array_3, [1], [0] + ) + del shape64_21 + + # builtin.combine: ([xi64, xi64, xi64]) <- (xi64, xi64, xi64) + combine_21 = [slice_33, full_1, full_1] + del slice_33 + + # pd_op.stack: (3xi64) <- ([xi64, xi64, xi64]) + stack_21 = paddle._C_ops.stack(combine_21, 0) + del combine_21 + + # pd_op.uniform: (-1x1x1xf32) <- (3xi64, 1xf32, 1xf32) + uniform_21 = paddle._C_ops.uniform( + stack_21, + paddle.float32, + full_2, + full_3, + 0, + paddle.framework._current_expected_place(), + ) + del stack_21 + + # pd_op.add: (-1x1x1xf32) <- (xf32, -1x1x1xf32) + add_87 = paddle._C_ops.add(cast_21, uniform_21) + del uniform_21 + + # pd_op.floor: (-1x1x1xf32) <- (-1x1x1xf32) + floor_21 = paddle._C_ops.floor(add_87) + del add_87 + + # pd_op.divide: (4x640x256xf32) <- (4x640x256xf32, xf32) + divide_21 = paddle._C_ops.divide(add_86, cast_21) + + # pd_op.multiply: (4x640x256xf32) <- (4x640x256xf32, -1x1x1xf32) + multiply_21 = paddle._C_ops.multiply(divide_21, floor_21) + + # pd_op.add: (4x640x256xf32) <- (4x640x256xf32, 4x640x256xf32) + add_88 = paddle._C_ops.add(layer_norm_69, multiply_21) + + # pd_op.layer_norm: (4x640x256xf32, 4x640xf32, 4x640xf32) <- (4x640x256xf32, 256xf32, 256xf32) + layer_norm_72, layer_norm_73, layer_norm_74 = (lambda x, f: f(x))( + paddle._C_ops.layer_norm( + add_88, parameter_77, parameter_76, float("1e-06"), 2 + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None, None), + ) + del parameter_76, parameter_77 + + # pd_op.transpose: (4x256x640xf32) <- (4x640x256xf32) + transpose_31 = paddle._C_ops.transpose(layer_norm_72, [0, 2, 1]) + del layer_norm_72 + + # pd_op.reshape: (4x256x8x80xf32) <- (4x256x640xf32, 4xi64) + reshape_28 = paddle._C_ops.reshape(transpose_31, full_int_array_4) + del full_int_array_4 + + # pd_op.conv2d: (4x384x4x80xf32) <- (4x256x8x80xf32, 384x256x3x3xf32) + conv2d_11 = paddle._C_ops.conv2d( + reshape_28, parameter_75, [2, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_75 + + # pd_op.reshape: (1x384x1x1xf32) <- (384xf32, 4xi64) + reshape_29 = paddle._C_ops.reshape(parameter_74, full_int_array_0) + del full_int_array_0, parameter_74 + + # pd_op.add: (4x384x4x80xf32) <- (4x384x4x80xf32, 1x384x1x1xf32) + add_89 = paddle._C_ops.add(conv2d_11, reshape_29) + + # pd_op.flatten: (4x384x320xf32) <- (4x384x4x80xf32) + flatten_10 = paddle._C_ops.flatten(add_89, 2, 3) + + # pd_op.transpose: (4x320x384xf32) <- (4x384x320xf32) + transpose_32 = paddle._C_ops.transpose(flatten_10, [0, 2, 1]) + del flatten_10 + + # pd_op.layer_norm: (4x320x384xf32, 4x320xf32, 4x320xf32) <- (4x320x384xf32, 384xf32, 384xf32) + layer_norm_75, layer_norm_76, layer_norm_77 = (lambda x, f: f(x))( + paddle._C_ops.layer_norm( + transpose_32, parameter_73, parameter_72, float("1e-05"), 2 + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None, None), + ) + del parameter_72, parameter_73 + + # pd_op.matmul: (4x320x1152xf32) <- (4x320x384xf32, 384x1152xf32) + matmul_40 = paddle._C_ops.matmul(layer_norm_75, parameter_71, False, False) + del parameter_71 + + # pd_op.add: (4x320x1152xf32) <- (4x320x1152xf32, 1152xf32) + add_90 = paddle._C_ops.add(matmul_40, parameter_70) + del parameter_70 + + # pd_op.full_int_array: (5xi64) <- () + full_int_array_9 = [0, -1, 3, 12, 32] + + # pd_op.reshape: (4x320x3x12x32xf32) <- (4x320x1152xf32, 5xi64) + reshape_30 = paddle._C_ops.reshape(add_90, full_int_array_9) + + # pd_op.transpose: (3x4x12x320x32xf32) <- (4x320x3x12x32xf32) + transpose_33 = paddle._C_ops.transpose(reshape_30, [2, 0, 3, 1, 4]) + del reshape_30 + + # pd_op.slice: (4x12x320x32xf32) <- (3x4x12x320x32xf32, 1xi64, 1xi64) + slice_34 = paddle._C_ops.slice( + transpose_33, [0], full_int_array_2, full_int_array_3, [1], [0] + ) + + # pd_op.slice: (4x12x320x32xf32) <- (3x4x12x320x32xf32, 1xi64, 1xi64) + slice_35 = paddle._C_ops.slice( + transpose_33, [0], full_int_array_3, full_int_array_6, [1], [0] + ) + + # pd_op.slice: (4x12x320x32xf32) <- (3x4x12x320x32xf32, 1xi64, 1xi64) + slice_36 = paddle._C_ops.slice( + transpose_33, [0], full_int_array_6, full_int_array_7, [1], [0] + ) + + # pd_op.transpose: (4x12x32x320xf32) <- (4x12x320x32xf32) + transpose_34 = paddle._C_ops.transpose(slice_35, [0, 1, 3, 2]) + del slice_35 + + # pd_op.matmul: (4x12x320x320xf32) <- (4x12x320x32xf32, 4x12x32x320xf32) + matmul_41 = paddle._C_ops.matmul(slice_34, transpose_34, False, False) + + # pd_op.scale: (4x12x320x320xf32) <- (4x12x320x320xf32, 1xf32) + scale_4 = paddle._C_ops.scale(matmul_41, full_17, float("0"), True) + del matmul_41 + + # pd_op.softmax: (4x12x320x320xf32) <- (4x12x320x320xf32) + softmax_4 = paddle._C_ops.softmax(scale_4, -1) + del scale_4 + + # pd_op.matmul: (4x12x320x32xf32) <- (4x12x320x320xf32, 4x12x320x32xf32) + matmul_42 = paddle._C_ops.matmul(softmax_4, slice_36, False, False) + + # pd_op.transpose: (4x320x12x32xf32) <- (4x12x320x32xf32) + transpose_35 = paddle._C_ops.transpose(matmul_42, [0, 2, 1, 3]) + del matmul_42 + + # pd_op.full_int_array: (3xi64) <- () + full_int_array_10 = [0, -1, 384] + + # pd_op.reshape: (4x320x384xf32) <- (4x320x12x32xf32, 3xi64) + reshape_31 = paddle._C_ops.reshape(transpose_35, full_int_array_10) + + # pd_op.matmul: (4x320x384xf32) <- (4x320x384xf32, 384x384xf32) + matmul_43 = paddle._C_ops.matmul(reshape_31, parameter_69, False, False) + del parameter_69 + + # pd_op.add: (4x320x384xf32) <- (4x320x384xf32, 384xf32) + add_91 = paddle._C_ops.add(matmul_43, parameter_68) + del parameter_68 + + # pd_op.full: (xf64) <- () + full_26 = paddle._C_ops.full( + [], float("0"), paddle.float64, paddle.framework._current_expected_place() + ) + + # pd_op.assign_value_: (xf64) <- (xf64) + assign_value__22 = paddle._C_ops.assign_value_( + full_26, + [], + paddle.float64, + [float("0.929412")], + paddle.framework._current_expected_place(), + ) + del full_26 + + # pd_op.cast: (xf32) <- (xf64) + cast_22 = paddle._C_ops.cast(assign_value__22, paddle.float32) + del assign_value__22 + + # pd_op.shape64: (3xi64) <- (4x320x384xf32) + shape64_22 = paddle._C_ops.shape64(add_91) + + # pd_op.slice: (xi64) <- (3xi64, 1xi64, 1xi64) + slice_37 = paddle._C_ops.slice( + shape64_22, [0], full_int_array_2, full_int_array_3, [1], [0] + ) + del shape64_22 + + # builtin.combine: ([xi64, xi64, xi64]) <- (xi64, xi64, xi64) + combine_22 = [slice_37, full_1, full_1] + del slice_37 + + # pd_op.stack: (3xi64) <- ([xi64, xi64, xi64]) + stack_22 = paddle._C_ops.stack(combine_22, 0) + del combine_22 + + # pd_op.uniform: (-1x1x1xf32) <- (3xi64, 1xf32, 1xf32) + uniform_22 = paddle._C_ops.uniform( + stack_22, + paddle.float32, + full_2, + full_3, + 0, + paddle.framework._current_expected_place(), + ) + del stack_22 + + # pd_op.add: (-1x1x1xf32) <- (xf32, -1x1x1xf32) + add_92 = paddle._C_ops.add(cast_22, uniform_22) + del uniform_22 + + # pd_op.floor: (-1x1x1xf32) <- (-1x1x1xf32) + floor_22 = paddle._C_ops.floor(add_92) + del add_92 + + # pd_op.divide: (4x320x384xf32) <- (4x320x384xf32, xf32) + divide_22 = paddle._C_ops.divide(add_91, cast_22) + + # pd_op.multiply: (4x320x384xf32) <- (4x320x384xf32, -1x1x1xf32) + multiply_22 = paddle._C_ops.multiply(divide_22, floor_22) + + # pd_op.add: (4x320x384xf32) <- (4x320x384xf32, 4x320x384xf32) + add_93 = paddle._C_ops.add(layer_norm_75, multiply_22) + + # pd_op.layer_norm: (4x320x384xf32, 4x320xf32, 4x320xf32) <- (4x320x384xf32, 384xf32, 384xf32) + layer_norm_78, layer_norm_79, layer_norm_80 = (lambda x, f: f(x))( + paddle._C_ops.layer_norm( + add_93, parameter_67, parameter_66, float("1e-06"), 2 + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None, None), + ) + del parameter_66, parameter_67 + + # pd_op.matmul: (4x320x1536xf32) <- (4x320x384xf32, 384x1536xf32) + matmul_44 = paddle._C_ops.matmul(layer_norm_78, parameter_65, False, False) + del parameter_65 + + # pd_op.add: (4x320x1536xf32) <- (4x320x1536xf32, 1536xf32) + add_94 = paddle._C_ops.add(matmul_44, parameter_64) + del parameter_64 + + # pd_op.gelu: (4x320x1536xf32) <- (4x320x1536xf32) + gelu_14 = paddle._C_ops.gelu(add_94, False) + + # pd_op.matmul: (4x320x384xf32) <- (4x320x1536xf32, 1536x384xf32) + matmul_45 = paddle._C_ops.matmul(gelu_14, parameter_63, False, False) + del parameter_63 + + # pd_op.add: (4x320x384xf32) <- (4x320x384xf32, 384xf32) + add_95 = paddle._C_ops.add(matmul_45, parameter_62) + del parameter_62 + + # pd_op.full: (xf64) <- () + full_27 = paddle._C_ops.full( + [], float("0"), paddle.float64, paddle.framework._current_expected_place() + ) + + # pd_op.assign_value_: (xf64) <- (xf64) + assign_value__23 = paddle._C_ops.assign_value_( + full_27, + [], + paddle.float64, + [float("0.929412")], + paddle.framework._current_expected_place(), + ) + del full_27 + + # pd_op.cast: (xf32) <- (xf64) + cast_23 = paddle._C_ops.cast(assign_value__23, paddle.float32) + del assign_value__23 + + # pd_op.shape64: (3xi64) <- (4x320x384xf32) + shape64_23 = paddle._C_ops.shape64(add_95) + + # pd_op.slice: (xi64) <- (3xi64, 1xi64, 1xi64) + slice_38 = paddle._C_ops.slice( + shape64_23, [0], full_int_array_2, full_int_array_3, [1], [0] + ) + del shape64_23 + + # builtin.combine: ([xi64, xi64, xi64]) <- (xi64, xi64, xi64) + combine_23 = [slice_38, full_1, full_1] + del slice_38 + + # pd_op.stack: (3xi64) <- ([xi64, xi64, xi64]) + stack_23 = paddle._C_ops.stack(combine_23, 0) + del combine_23 + + # pd_op.uniform: (-1x1x1xf32) <- (3xi64, 1xf32, 1xf32) + uniform_23 = paddle._C_ops.uniform( + stack_23, + paddle.float32, + full_2, + full_3, + 0, + paddle.framework._current_expected_place(), + ) + del stack_23 + + # pd_op.add: (-1x1x1xf32) <- (xf32, -1x1x1xf32) + add_96 = paddle._C_ops.add(cast_23, uniform_23) + del uniform_23 + + # pd_op.floor: (-1x1x1xf32) <- (-1x1x1xf32) + floor_23 = paddle._C_ops.floor(add_96) + del add_96 + + # pd_op.divide: (4x320x384xf32) <- (4x320x384xf32, xf32) + divide_23 = paddle._C_ops.divide(add_95, cast_23) + + # pd_op.multiply: (4x320x384xf32) <- (4x320x384xf32, -1x1x1xf32) + multiply_23 = paddle._C_ops.multiply(divide_23, floor_23) + + # pd_op.add: (4x320x384xf32) <- (4x320x384xf32, 4x320x384xf32) + add_97 = paddle._C_ops.add(layer_norm_78, multiply_23) + + # pd_op.layer_norm: (4x320x384xf32, 4x320xf32, 4x320xf32) <- (4x320x384xf32, 384xf32, 384xf32) + layer_norm_81, layer_norm_82, layer_norm_83 = (lambda x, f: f(x))( + paddle._C_ops.layer_norm( + add_97, parameter_61, parameter_60, float("1e-06"), 2 + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None, None), + ) + del parameter_60, parameter_61 + + # pd_op.matmul: (4x320x1152xf32) <- (4x320x384xf32, 384x1152xf32) + matmul_46 = paddle._C_ops.matmul(layer_norm_81, parameter_59, False, False) + del parameter_59 + + # pd_op.add: (4x320x1152xf32) <- (4x320x1152xf32, 1152xf32) + add_98 = paddle._C_ops.add(matmul_46, parameter_58) + del parameter_58 + + # pd_op.reshape: (4x320x3x12x32xf32) <- (4x320x1152xf32, 5xi64) + reshape_32 = paddle._C_ops.reshape(add_98, full_int_array_9) + + # pd_op.transpose: (3x4x12x320x32xf32) <- (4x320x3x12x32xf32) + transpose_36 = paddle._C_ops.transpose(reshape_32, [2, 0, 3, 1, 4]) + del reshape_32 + + # pd_op.slice: (4x12x320x32xf32) <- (3x4x12x320x32xf32, 1xi64, 1xi64) + slice_39 = paddle._C_ops.slice( + transpose_36, [0], full_int_array_2, full_int_array_3, [1], [0] + ) + + # pd_op.slice: (4x12x320x32xf32) <- (3x4x12x320x32xf32, 1xi64, 1xi64) + slice_40 = paddle._C_ops.slice( + transpose_36, [0], full_int_array_3, full_int_array_6, [1], [0] + ) + + # pd_op.slice: (4x12x320x32xf32) <- (3x4x12x320x32xf32, 1xi64, 1xi64) + slice_41 = paddle._C_ops.slice( + transpose_36, [0], full_int_array_6, full_int_array_7, [1], [0] + ) + + # pd_op.transpose: (4x12x32x320xf32) <- (4x12x320x32xf32) + transpose_37 = paddle._C_ops.transpose(slice_40, [0, 1, 3, 2]) + del slice_40 + + # pd_op.matmul: (4x12x320x320xf32) <- (4x12x320x32xf32, 4x12x32x320xf32) + matmul_47 = paddle._C_ops.matmul(slice_39, transpose_37, False, False) + + # pd_op.scale: (4x12x320x320xf32) <- (4x12x320x320xf32, 1xf32) + scale_5 = paddle._C_ops.scale(matmul_47, full_17, float("0"), True) + del matmul_47 + + # pd_op.softmax: (4x12x320x320xf32) <- (4x12x320x320xf32) + softmax_5 = paddle._C_ops.softmax(scale_5, -1) + del scale_5 + + # pd_op.matmul: (4x12x320x32xf32) <- (4x12x320x320xf32, 4x12x320x32xf32) + matmul_48 = paddle._C_ops.matmul(softmax_5, slice_41, False, False) + + # pd_op.transpose: (4x320x12x32xf32) <- (4x12x320x32xf32) + transpose_38 = paddle._C_ops.transpose(matmul_48, [0, 2, 1, 3]) + del matmul_48 + + # pd_op.reshape: (4x320x384xf32) <- (4x320x12x32xf32, 3xi64) + reshape_33 = paddle._C_ops.reshape(transpose_38, full_int_array_10) + + # pd_op.matmul: (4x320x384xf32) <- (4x320x384xf32, 384x384xf32) + matmul_49 = paddle._C_ops.matmul(reshape_33, parameter_57, False, False) + del parameter_57 + + # pd_op.add: (4x320x384xf32) <- (4x320x384xf32, 384xf32) + add_99 = paddle._C_ops.add(matmul_49, parameter_56) + del parameter_56 + + # pd_op.full: (xf64) <- () + full_28 = paddle._C_ops.full( + [], float("0"), paddle.float64, paddle.framework._current_expected_place() + ) + + # pd_op.assign_value_: (xf64) <- (xf64) + assign_value__24 = paddle._C_ops.assign_value_( + full_28, + [], + paddle.float64, + [float("0.923529")], + paddle.framework._current_expected_place(), + ) + del full_28 + + # pd_op.cast: (xf32) <- (xf64) + cast_24 = paddle._C_ops.cast(assign_value__24, paddle.float32) + del assign_value__24 + + # pd_op.shape64: (3xi64) <- (4x320x384xf32) + shape64_24 = paddle._C_ops.shape64(add_99) + + # pd_op.slice: (xi64) <- (3xi64, 1xi64, 1xi64) + slice_42 = paddle._C_ops.slice( + shape64_24, [0], full_int_array_2, full_int_array_3, [1], [0] + ) + del shape64_24 + + # builtin.combine: ([xi64, xi64, xi64]) <- (xi64, xi64, xi64) + combine_24 = [slice_42, full_1, full_1] + del slice_42 + + # pd_op.stack: (3xi64) <- ([xi64, xi64, xi64]) + stack_24 = paddle._C_ops.stack(combine_24, 0) + del combine_24 + + # pd_op.uniform: (-1x1x1xf32) <- (3xi64, 1xf32, 1xf32) + uniform_24 = paddle._C_ops.uniform( + stack_24, + paddle.float32, + full_2, + full_3, + 0, + paddle.framework._current_expected_place(), + ) + del stack_24 + + # pd_op.add: (-1x1x1xf32) <- (xf32, -1x1x1xf32) + add_100 = paddle._C_ops.add(cast_24, uniform_24) + del uniform_24 + + # pd_op.floor: (-1x1x1xf32) <- (-1x1x1xf32) + floor_24 = paddle._C_ops.floor(add_100) + del add_100 + + # pd_op.divide: (4x320x384xf32) <- (4x320x384xf32, xf32) + divide_24 = paddle._C_ops.divide(add_99, cast_24) + + # pd_op.multiply: (4x320x384xf32) <- (4x320x384xf32, -1x1x1xf32) + multiply_24 = paddle._C_ops.multiply(divide_24, floor_24) + + # pd_op.add: (4x320x384xf32) <- (4x320x384xf32, 4x320x384xf32) + add_101 = paddle._C_ops.add(layer_norm_81, multiply_24) + + # pd_op.layer_norm: (4x320x384xf32, 4x320xf32, 4x320xf32) <- (4x320x384xf32, 384xf32, 384xf32) + layer_norm_84, layer_norm_85, layer_norm_86 = (lambda x, f: f(x))( + paddle._C_ops.layer_norm( + add_101, parameter_55, parameter_54, float("1e-06"), 2 + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None, None), + ) + del parameter_54, parameter_55 + + # pd_op.matmul: (4x320x1536xf32) <- (4x320x384xf32, 384x1536xf32) + matmul_50 = paddle._C_ops.matmul(layer_norm_84, parameter_53, False, False) + del parameter_53 + + # pd_op.add: (4x320x1536xf32) <- (4x320x1536xf32, 1536xf32) + add_102 = paddle._C_ops.add(matmul_50, parameter_52) + del parameter_52 + + # pd_op.gelu: (4x320x1536xf32) <- (4x320x1536xf32) + gelu_15 = paddle._C_ops.gelu(add_102, False) + + # pd_op.matmul: (4x320x384xf32) <- (4x320x1536xf32, 1536x384xf32) + matmul_51 = paddle._C_ops.matmul(gelu_15, parameter_51, False, False) + del parameter_51 + + # pd_op.add: (4x320x384xf32) <- (4x320x384xf32, 384xf32) + add_103 = paddle._C_ops.add(matmul_51, parameter_50) + del parameter_50 + + # pd_op.full: (xf64) <- () + full_29 = paddle._C_ops.full( + [], float("0"), paddle.float64, paddle.framework._current_expected_place() + ) + + # pd_op.assign_value_: (xf64) <- (xf64) + assign_value__25 = paddle._C_ops.assign_value_( + full_29, + [], + paddle.float64, + [float("0.923529")], + paddle.framework._current_expected_place(), + ) + del full_29 + + # pd_op.cast: (xf32) <- (xf64) + cast_25 = paddle._C_ops.cast(assign_value__25, paddle.float32) + del assign_value__25 + + # pd_op.shape64: (3xi64) <- (4x320x384xf32) + shape64_25 = paddle._C_ops.shape64(add_103) + + # pd_op.slice: (xi64) <- (3xi64, 1xi64, 1xi64) + slice_43 = paddle._C_ops.slice( + shape64_25, [0], full_int_array_2, full_int_array_3, [1], [0] + ) + del shape64_25 + + # builtin.combine: ([xi64, xi64, xi64]) <- (xi64, xi64, xi64) + combine_25 = [slice_43, full_1, full_1] + del slice_43 + + # pd_op.stack: (3xi64) <- ([xi64, xi64, xi64]) + stack_25 = paddle._C_ops.stack(combine_25, 0) + del combine_25 + + # pd_op.uniform: (-1x1x1xf32) <- (3xi64, 1xf32, 1xf32) + uniform_25 = paddle._C_ops.uniform( + stack_25, + paddle.float32, + full_2, + full_3, + 0, + paddle.framework._current_expected_place(), + ) + del stack_25 + + # pd_op.add: (-1x1x1xf32) <- (xf32, -1x1x1xf32) + add_104 = paddle._C_ops.add(cast_25, uniform_25) + del uniform_25 + + # pd_op.floor: (-1x1x1xf32) <- (-1x1x1xf32) + floor_25 = paddle._C_ops.floor(add_104) + del add_104 + + # pd_op.divide: (4x320x384xf32) <- (4x320x384xf32, xf32) + divide_25 = paddle._C_ops.divide(add_103, cast_25) + + # pd_op.multiply: (4x320x384xf32) <- (4x320x384xf32, -1x1x1xf32) + multiply_25 = paddle._C_ops.multiply(divide_25, floor_25) + + # pd_op.add: (4x320x384xf32) <- (4x320x384xf32, 4x320x384xf32) + add_105 = paddle._C_ops.add(layer_norm_84, multiply_25) + + # pd_op.layer_norm: (4x320x384xf32, 4x320xf32, 4x320xf32) <- (4x320x384xf32, 384xf32, 384xf32) + layer_norm_87, layer_norm_88, layer_norm_89 = (lambda x, f: f(x))( + paddle._C_ops.layer_norm( + add_105, parameter_49, parameter_48, float("1e-06"), 2 + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None, None), + ) + del parameter_48, parameter_49 + + # pd_op.matmul: (4x320x1152xf32) <- (4x320x384xf32, 384x1152xf32) + matmul_52 = paddle._C_ops.matmul(layer_norm_87, parameter_47, False, False) + del parameter_47 + + # pd_op.add: (4x320x1152xf32) <- (4x320x1152xf32, 1152xf32) + add_106 = paddle._C_ops.add(matmul_52, parameter_46) + del parameter_46 + + # pd_op.reshape: (4x320x3x12x32xf32) <- (4x320x1152xf32, 5xi64) + reshape_34 = paddle._C_ops.reshape(add_106, full_int_array_9) + + # pd_op.transpose: (3x4x12x320x32xf32) <- (4x320x3x12x32xf32) + transpose_39 = paddle._C_ops.transpose(reshape_34, [2, 0, 3, 1, 4]) + del reshape_34 + + # pd_op.slice: (4x12x320x32xf32) <- (3x4x12x320x32xf32, 1xi64, 1xi64) + slice_44 = paddle._C_ops.slice( + transpose_39, [0], full_int_array_2, full_int_array_3, [1], [0] + ) + + # pd_op.slice: (4x12x320x32xf32) <- (3x4x12x320x32xf32, 1xi64, 1xi64) + slice_45 = paddle._C_ops.slice( + transpose_39, [0], full_int_array_3, full_int_array_6, [1], [0] + ) + + # pd_op.slice: (4x12x320x32xf32) <- (3x4x12x320x32xf32, 1xi64, 1xi64) + slice_46 = paddle._C_ops.slice( + transpose_39, [0], full_int_array_6, full_int_array_7, [1], [0] + ) + + # pd_op.transpose: (4x12x32x320xf32) <- (4x12x320x32xf32) + transpose_40 = paddle._C_ops.transpose(slice_45, [0, 1, 3, 2]) + del slice_45 + + # pd_op.matmul: (4x12x320x320xf32) <- (4x12x320x32xf32, 4x12x32x320xf32) + matmul_53 = paddle._C_ops.matmul(slice_44, transpose_40, False, False) + + # pd_op.scale: (4x12x320x320xf32) <- (4x12x320x320xf32, 1xf32) + scale_6 = paddle._C_ops.scale(matmul_53, full_17, float("0"), True) + del matmul_53 + + # pd_op.softmax: (4x12x320x320xf32) <- (4x12x320x320xf32) + softmax_6 = paddle._C_ops.softmax(scale_6, -1) + del scale_6 + + # pd_op.matmul: (4x12x320x32xf32) <- (4x12x320x320xf32, 4x12x320x32xf32) + matmul_54 = paddle._C_ops.matmul(softmax_6, slice_46, False, False) + + # pd_op.transpose: (4x320x12x32xf32) <- (4x12x320x32xf32) + transpose_41 = paddle._C_ops.transpose(matmul_54, [0, 2, 1, 3]) + del matmul_54 + + # pd_op.reshape: (4x320x384xf32) <- (4x320x12x32xf32, 3xi64) + reshape_35 = paddle._C_ops.reshape(transpose_41, full_int_array_10) + + # pd_op.matmul: (4x320x384xf32) <- (4x320x384xf32, 384x384xf32) + matmul_55 = paddle._C_ops.matmul(reshape_35, parameter_45, False, False) + del parameter_45 + + # pd_op.add: (4x320x384xf32) <- (4x320x384xf32, 384xf32) + add_107 = paddle._C_ops.add(matmul_55, parameter_44) + del parameter_44 + + # pd_op.full: (xf64) <- () + full_30 = paddle._C_ops.full( + [], float("0"), paddle.float64, paddle.framework._current_expected_place() + ) + + # pd_op.assign_value_: (xf64) <- (xf64) + assign_value__26 = paddle._C_ops.assign_value_( + full_30, + [], + paddle.float64, + [float("0.917647")], + paddle.framework._current_expected_place(), + ) + del full_30 + + # pd_op.cast: (xf32) <- (xf64) + cast_26 = paddle._C_ops.cast(assign_value__26, paddle.float32) + del assign_value__26 + + # pd_op.shape64: (3xi64) <- (4x320x384xf32) + shape64_26 = paddle._C_ops.shape64(add_107) + + # pd_op.slice: (xi64) <- (3xi64, 1xi64, 1xi64) + slice_47 = paddle._C_ops.slice( + shape64_26, [0], full_int_array_2, full_int_array_3, [1], [0] + ) + del shape64_26 + + # builtin.combine: ([xi64, xi64, xi64]) <- (xi64, xi64, xi64) + combine_26 = [slice_47, full_1, full_1] + del slice_47 + + # pd_op.stack: (3xi64) <- ([xi64, xi64, xi64]) + stack_26 = paddle._C_ops.stack(combine_26, 0) + del combine_26 + + # pd_op.uniform: (-1x1x1xf32) <- (3xi64, 1xf32, 1xf32) + uniform_26 = paddle._C_ops.uniform( + stack_26, + paddle.float32, + full_2, + full_3, + 0, + paddle.framework._current_expected_place(), + ) + del stack_26 + + # pd_op.add: (-1x1x1xf32) <- (xf32, -1x1x1xf32) + add_108 = paddle._C_ops.add(cast_26, uniform_26) + del uniform_26 + + # pd_op.floor: (-1x1x1xf32) <- (-1x1x1xf32) + floor_26 = paddle._C_ops.floor(add_108) + del add_108 + + # pd_op.divide: (4x320x384xf32) <- (4x320x384xf32, xf32) + divide_26 = paddle._C_ops.divide(add_107, cast_26) + + # pd_op.multiply: (4x320x384xf32) <- (4x320x384xf32, -1x1x1xf32) + multiply_26 = paddle._C_ops.multiply(divide_26, floor_26) + + # pd_op.add: (4x320x384xf32) <- (4x320x384xf32, 4x320x384xf32) + add_109 = paddle._C_ops.add(layer_norm_87, multiply_26) + + # pd_op.layer_norm: (4x320x384xf32, 4x320xf32, 4x320xf32) <- (4x320x384xf32, 384xf32, 384xf32) + layer_norm_90, layer_norm_91, layer_norm_92 = (lambda x, f: f(x))( + paddle._C_ops.layer_norm( + add_109, parameter_43, parameter_42, float("1e-06"), 2 + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None, None), + ) + del parameter_42, parameter_43 + + # pd_op.matmul: (4x320x1536xf32) <- (4x320x384xf32, 384x1536xf32) + matmul_56 = paddle._C_ops.matmul(layer_norm_90, parameter_41, False, False) + del parameter_41 + + # pd_op.add: (4x320x1536xf32) <- (4x320x1536xf32, 1536xf32) + add_110 = paddle._C_ops.add(matmul_56, parameter_40) + del parameter_40 + + # pd_op.gelu: (4x320x1536xf32) <- (4x320x1536xf32) + gelu_16 = paddle._C_ops.gelu(add_110, False) + + # pd_op.matmul: (4x320x384xf32) <- (4x320x1536xf32, 1536x384xf32) + matmul_57 = paddle._C_ops.matmul(gelu_16, parameter_39, False, False) + del parameter_39 + + # pd_op.add: (4x320x384xf32) <- (4x320x384xf32, 384xf32) + add_111 = paddle._C_ops.add(matmul_57, parameter_38) + del parameter_38 + + # pd_op.full: (xf64) <- () + full_31 = paddle._C_ops.full( + [], float("0"), paddle.float64, paddle.framework._current_expected_place() + ) + + # pd_op.assign_value_: (xf64) <- (xf64) + assign_value__27 = paddle._C_ops.assign_value_( + full_31, + [], + paddle.float64, + [float("0.917647")], + paddle.framework._current_expected_place(), + ) + del full_31 + + # pd_op.cast: (xf32) <- (xf64) + cast_27 = paddle._C_ops.cast(assign_value__27, paddle.float32) + del assign_value__27 + + # pd_op.shape64: (3xi64) <- (4x320x384xf32) + shape64_27 = paddle._C_ops.shape64(add_111) + + # pd_op.slice: (xi64) <- (3xi64, 1xi64, 1xi64) + slice_48 = paddle._C_ops.slice( + shape64_27, [0], full_int_array_2, full_int_array_3, [1], [0] + ) + del shape64_27 + + # builtin.combine: ([xi64, xi64, xi64]) <- (xi64, xi64, xi64) + combine_27 = [slice_48, full_1, full_1] + del slice_48 + + # pd_op.stack: (3xi64) <- ([xi64, xi64, xi64]) + stack_27 = paddle._C_ops.stack(combine_27, 0) + del combine_27 + + # pd_op.uniform: (-1x1x1xf32) <- (3xi64, 1xf32, 1xf32) + uniform_27 = paddle._C_ops.uniform( + stack_27, + paddle.float32, + full_2, + full_3, + 0, + paddle.framework._current_expected_place(), + ) + del stack_27 + + # pd_op.add: (-1x1x1xf32) <- (xf32, -1x1x1xf32) + add_112 = paddle._C_ops.add(cast_27, uniform_27) + del uniform_27 + + # pd_op.floor: (-1x1x1xf32) <- (-1x1x1xf32) + floor_27 = paddle._C_ops.floor(add_112) + del add_112 + + # pd_op.divide: (4x320x384xf32) <- (4x320x384xf32, xf32) + divide_27 = paddle._C_ops.divide(add_111, cast_27) + + # pd_op.multiply: (4x320x384xf32) <- (4x320x384xf32, -1x1x1xf32) + multiply_27 = paddle._C_ops.multiply(divide_27, floor_27) + + # pd_op.add: (4x320x384xf32) <- (4x320x384xf32, 4x320x384xf32) + add_113 = paddle._C_ops.add(layer_norm_90, multiply_27) + + # pd_op.layer_norm: (4x320x384xf32, 4x320xf32, 4x320xf32) <- (4x320x384xf32, 384xf32, 384xf32) + layer_norm_93, layer_norm_94, layer_norm_95 = (lambda x, f: f(x))( + paddle._C_ops.layer_norm( + add_113, parameter_37, parameter_36, float("1e-06"), 2 + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None, None), + ) + del parameter_36, parameter_37 + + # pd_op.matmul: (4x320x1152xf32) <- (4x320x384xf32, 384x1152xf32) + matmul_58 = paddle._C_ops.matmul(layer_norm_93, parameter_35, False, False) + del parameter_35 + + # pd_op.add: (4x320x1152xf32) <- (4x320x1152xf32, 1152xf32) + add_114 = paddle._C_ops.add(matmul_58, parameter_34) + del parameter_34 + + # pd_op.reshape: (4x320x3x12x32xf32) <- (4x320x1152xf32, 5xi64) + reshape_36 = paddle._C_ops.reshape(add_114, full_int_array_9) + + # pd_op.transpose: (3x4x12x320x32xf32) <- (4x320x3x12x32xf32) + transpose_42 = paddle._C_ops.transpose(reshape_36, [2, 0, 3, 1, 4]) + del reshape_36 + + # pd_op.slice: (4x12x320x32xf32) <- (3x4x12x320x32xf32, 1xi64, 1xi64) + slice_49 = paddle._C_ops.slice( + transpose_42, [0], full_int_array_2, full_int_array_3, [1], [0] + ) + + # pd_op.slice: (4x12x320x32xf32) <- (3x4x12x320x32xf32, 1xi64, 1xi64) + slice_50 = paddle._C_ops.slice( + transpose_42, [0], full_int_array_3, full_int_array_6, [1], [0] + ) + + # pd_op.slice: (4x12x320x32xf32) <- (3x4x12x320x32xf32, 1xi64, 1xi64) + slice_51 = paddle._C_ops.slice( + transpose_42, [0], full_int_array_6, full_int_array_7, [1], [0] + ) + + # pd_op.transpose: (4x12x32x320xf32) <- (4x12x320x32xf32) + transpose_43 = paddle._C_ops.transpose(slice_50, [0, 1, 3, 2]) + del slice_50 + + # pd_op.matmul: (4x12x320x320xf32) <- (4x12x320x32xf32, 4x12x32x320xf32) + matmul_59 = paddle._C_ops.matmul(slice_49, transpose_43, False, False) + + # pd_op.scale: (4x12x320x320xf32) <- (4x12x320x320xf32, 1xf32) + scale_7 = paddle._C_ops.scale(matmul_59, full_17, float("0"), True) + del matmul_59 + + # pd_op.softmax: (4x12x320x320xf32) <- (4x12x320x320xf32) + softmax_7 = paddle._C_ops.softmax(scale_7, -1) + del scale_7 + + # pd_op.matmul: (4x12x320x32xf32) <- (4x12x320x320xf32, 4x12x320x32xf32) + matmul_60 = paddle._C_ops.matmul(softmax_7, slice_51, False, False) + + # pd_op.transpose: (4x320x12x32xf32) <- (4x12x320x32xf32) + transpose_44 = paddle._C_ops.transpose(matmul_60, [0, 2, 1, 3]) + del matmul_60 + + # pd_op.reshape: (4x320x384xf32) <- (4x320x12x32xf32, 3xi64) + reshape_37 = paddle._C_ops.reshape(transpose_44, full_int_array_10) + + # pd_op.matmul: (4x320x384xf32) <- (4x320x384xf32, 384x384xf32) + matmul_61 = paddle._C_ops.matmul(reshape_37, parameter_33, False, False) + del parameter_33 + + # pd_op.add: (4x320x384xf32) <- (4x320x384xf32, 384xf32) + add_115 = paddle._C_ops.add(matmul_61, parameter_32) + del parameter_32 + + # pd_op.full: (xf64) <- () + full_32 = paddle._C_ops.full( + [], float("0"), paddle.float64, paddle.framework._current_expected_place() + ) + + # pd_op.assign_value_: (xf64) <- (xf64) + assign_value__28 = paddle._C_ops.assign_value_( + full_32, + [], + paddle.float64, + [float("0.911765")], + paddle.framework._current_expected_place(), + ) + del full_32 + + # pd_op.cast: (xf32) <- (xf64) + cast_28 = paddle._C_ops.cast(assign_value__28, paddle.float32) + del assign_value__28 + + # pd_op.shape64: (3xi64) <- (4x320x384xf32) + shape64_28 = paddle._C_ops.shape64(add_115) + + # pd_op.slice: (xi64) <- (3xi64, 1xi64, 1xi64) + slice_52 = paddle._C_ops.slice( + shape64_28, [0], full_int_array_2, full_int_array_3, [1], [0] + ) + del shape64_28 + + # builtin.combine: ([xi64, xi64, xi64]) <- (xi64, xi64, xi64) + combine_28 = [slice_52, full_1, full_1] + del slice_52 + + # pd_op.stack: (3xi64) <- ([xi64, xi64, xi64]) + stack_28 = paddle._C_ops.stack(combine_28, 0) + del combine_28 + + # pd_op.uniform: (-1x1x1xf32) <- (3xi64, 1xf32, 1xf32) + uniform_28 = paddle._C_ops.uniform( + stack_28, + paddle.float32, + full_2, + full_3, + 0, + paddle.framework._current_expected_place(), + ) + del stack_28 + + # pd_op.add: (-1x1x1xf32) <- (xf32, -1x1x1xf32) + add_116 = paddle._C_ops.add(cast_28, uniform_28) + del uniform_28 + + # pd_op.floor: (-1x1x1xf32) <- (-1x1x1xf32) + floor_28 = paddle._C_ops.floor(add_116) + del add_116 + + # pd_op.divide: (4x320x384xf32) <- (4x320x384xf32, xf32) + divide_28 = paddle._C_ops.divide(add_115, cast_28) + + # pd_op.multiply: (4x320x384xf32) <- (4x320x384xf32, -1x1x1xf32) + multiply_28 = paddle._C_ops.multiply(divide_28, floor_28) + + # pd_op.add: (4x320x384xf32) <- (4x320x384xf32, 4x320x384xf32) + add_117 = paddle._C_ops.add(layer_norm_93, multiply_28) + + # pd_op.layer_norm: (4x320x384xf32, 4x320xf32, 4x320xf32) <- (4x320x384xf32, 384xf32, 384xf32) + layer_norm_96, layer_norm_97, layer_norm_98 = (lambda x, f: f(x))( + paddle._C_ops.layer_norm( + add_117, parameter_31, parameter_30, float("1e-06"), 2 + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None, None), + ) + del parameter_30, parameter_31 + + # pd_op.matmul: (4x320x1536xf32) <- (4x320x384xf32, 384x1536xf32) + matmul_62 = paddle._C_ops.matmul(layer_norm_96, parameter_29, False, False) + del parameter_29 + + # pd_op.add: (4x320x1536xf32) <- (4x320x1536xf32, 1536xf32) + add_118 = paddle._C_ops.add(matmul_62, parameter_28) + del parameter_28 + + # pd_op.gelu: (4x320x1536xf32) <- (4x320x1536xf32) + gelu_17 = paddle._C_ops.gelu(add_118, False) + + # pd_op.matmul: (4x320x384xf32) <- (4x320x1536xf32, 1536x384xf32) + matmul_63 = paddle._C_ops.matmul(gelu_17, parameter_27, False, False) + del parameter_27 + + # pd_op.add: (4x320x384xf32) <- (4x320x384xf32, 384xf32) + add_119 = paddle._C_ops.add(matmul_63, parameter_26) + del parameter_26 + + # pd_op.full: (xf64) <- () + full_33 = paddle._C_ops.full( + [], float("0"), paddle.float64, paddle.framework._current_expected_place() + ) + + # pd_op.assign_value_: (xf64) <- (xf64) + assign_value__29 = paddle._C_ops.assign_value_( + full_33, + [], + paddle.float64, + [float("0.911765")], + paddle.framework._current_expected_place(), + ) + del full_33 + + # pd_op.cast: (xf32) <- (xf64) + cast_29 = paddle._C_ops.cast(assign_value__29, paddle.float32) + del assign_value__29 + + # pd_op.shape64: (3xi64) <- (4x320x384xf32) + shape64_29 = paddle._C_ops.shape64(add_119) + + # pd_op.slice: (xi64) <- (3xi64, 1xi64, 1xi64) + slice_53 = paddle._C_ops.slice( + shape64_29, [0], full_int_array_2, full_int_array_3, [1], [0] + ) + del shape64_29 + + # builtin.combine: ([xi64, xi64, xi64]) <- (xi64, xi64, xi64) + combine_29 = [slice_53, full_1, full_1] + del slice_53 + + # pd_op.stack: (3xi64) <- ([xi64, xi64, xi64]) + stack_29 = paddle._C_ops.stack(combine_29, 0) + del combine_29 + + # pd_op.uniform: (-1x1x1xf32) <- (3xi64, 1xf32, 1xf32) + uniform_29 = paddle._C_ops.uniform( + stack_29, + paddle.float32, + full_2, + full_3, + 0, + paddle.framework._current_expected_place(), + ) + del stack_29 + + # pd_op.add: (-1x1x1xf32) <- (xf32, -1x1x1xf32) + add_120 = paddle._C_ops.add(cast_29, uniform_29) + del uniform_29 + + # pd_op.floor: (-1x1x1xf32) <- (-1x1x1xf32) + floor_29 = paddle._C_ops.floor(add_120) + del add_120 + + # pd_op.divide: (4x320x384xf32) <- (4x320x384xf32, xf32) + divide_29 = paddle._C_ops.divide(add_119, cast_29) + + # pd_op.multiply: (4x320x384xf32) <- (4x320x384xf32, -1x1x1xf32) + multiply_29 = paddle._C_ops.multiply(divide_29, floor_29) + + # pd_op.add: (4x320x384xf32) <- (4x320x384xf32, 4x320x384xf32) + add_121 = paddle._C_ops.add(layer_norm_96, multiply_29) + + # pd_op.layer_norm: (4x320x384xf32, 4x320xf32, 4x320xf32) <- (4x320x384xf32, 384xf32, 384xf32) + layer_norm_99, layer_norm_100, layer_norm_101 = (lambda x, f: f(x))( + paddle._C_ops.layer_norm( + add_121, parameter_25, parameter_24, float("1e-06"), 2 + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None, None), + ) + del parameter_24, parameter_25 + + # pd_op.matmul: (4x320x1152xf32) <- (4x320x384xf32, 384x1152xf32) + matmul_64 = paddle._C_ops.matmul(layer_norm_99, parameter_23, False, False) + del parameter_23 + + # pd_op.add: (4x320x1152xf32) <- (4x320x1152xf32, 1152xf32) + add_122 = paddle._C_ops.add(matmul_64, parameter_22) + del parameter_22 + + # pd_op.reshape: (4x320x3x12x32xf32) <- (4x320x1152xf32, 5xi64) + reshape_38 = paddle._C_ops.reshape(add_122, full_int_array_9) + + # pd_op.transpose: (3x4x12x320x32xf32) <- (4x320x3x12x32xf32) + transpose_45 = paddle._C_ops.transpose(reshape_38, [2, 0, 3, 1, 4]) + del reshape_38 + + # pd_op.slice: (4x12x320x32xf32) <- (3x4x12x320x32xf32, 1xi64, 1xi64) + slice_54 = paddle._C_ops.slice( + transpose_45, [0], full_int_array_2, full_int_array_3, [1], [0] + ) + + # pd_op.slice: (4x12x320x32xf32) <- (3x4x12x320x32xf32, 1xi64, 1xi64) + slice_55 = paddle._C_ops.slice( + transpose_45, [0], full_int_array_3, full_int_array_6, [1], [0] + ) + + # pd_op.slice: (4x12x320x32xf32) <- (3x4x12x320x32xf32, 1xi64, 1xi64) + slice_56 = paddle._C_ops.slice( + transpose_45, [0], full_int_array_6, full_int_array_7, [1], [0] + ) + + # pd_op.transpose: (4x12x32x320xf32) <- (4x12x320x32xf32) + transpose_46 = paddle._C_ops.transpose(slice_55, [0, 1, 3, 2]) + del slice_55 + + # pd_op.matmul: (4x12x320x320xf32) <- (4x12x320x32xf32, 4x12x32x320xf32) + matmul_65 = paddle._C_ops.matmul(slice_54, transpose_46, False, False) + + # pd_op.scale: (4x12x320x320xf32) <- (4x12x320x320xf32, 1xf32) + scale_8 = paddle._C_ops.scale(matmul_65, full_17, float("0"), True) + del matmul_65 + + # pd_op.softmax: (4x12x320x320xf32) <- (4x12x320x320xf32) + softmax_8 = paddle._C_ops.softmax(scale_8, -1) + del scale_8 + + # pd_op.matmul: (4x12x320x32xf32) <- (4x12x320x320xf32, 4x12x320x32xf32) + matmul_66 = paddle._C_ops.matmul(softmax_8, slice_56, False, False) + + # pd_op.transpose: (4x320x12x32xf32) <- (4x12x320x32xf32) + transpose_47 = paddle._C_ops.transpose(matmul_66, [0, 2, 1, 3]) + del matmul_66 + + # pd_op.reshape: (4x320x384xf32) <- (4x320x12x32xf32, 3xi64) + reshape_39 = paddle._C_ops.reshape(transpose_47, full_int_array_10) + + # pd_op.matmul: (4x320x384xf32) <- (4x320x384xf32, 384x384xf32) + matmul_67 = paddle._C_ops.matmul(reshape_39, parameter_21, False, False) + del parameter_21 + + # pd_op.add: (4x320x384xf32) <- (4x320x384xf32, 384xf32) + add_123 = paddle._C_ops.add(matmul_67, parameter_20) + del parameter_20 + + # pd_op.full: (xf64) <- () + full_34 = paddle._C_ops.full( + [], float("0"), paddle.float64, paddle.framework._current_expected_place() + ) + + # pd_op.assign_value_: (xf64) <- (xf64) + assign_value__30 = paddle._C_ops.assign_value_( + full_34, + [], + paddle.float64, + [float("0.905882")], + paddle.framework._current_expected_place(), + ) + del full_34 + + # pd_op.cast: (xf32) <- (xf64) + cast_30 = paddle._C_ops.cast(assign_value__30, paddle.float32) + del assign_value__30 + + # pd_op.shape64: (3xi64) <- (4x320x384xf32) + shape64_30 = paddle._C_ops.shape64(add_123) + + # pd_op.slice: (xi64) <- (3xi64, 1xi64, 1xi64) + slice_57 = paddle._C_ops.slice( + shape64_30, [0], full_int_array_2, full_int_array_3, [1], [0] + ) + del shape64_30 + + # builtin.combine: ([xi64, xi64, xi64]) <- (xi64, xi64, xi64) + combine_30 = [slice_57, full_1, full_1] + del slice_57 + + # pd_op.stack: (3xi64) <- ([xi64, xi64, xi64]) + stack_30 = paddle._C_ops.stack(combine_30, 0) + del combine_30 + + # pd_op.uniform: (-1x1x1xf32) <- (3xi64, 1xf32, 1xf32) + uniform_30 = paddle._C_ops.uniform( + stack_30, + paddle.float32, + full_2, + full_3, + 0, + paddle.framework._current_expected_place(), + ) + del stack_30 + + # pd_op.add: (-1x1x1xf32) <- (xf32, -1x1x1xf32) + add_124 = paddle._C_ops.add(cast_30, uniform_30) + del uniform_30 + + # pd_op.floor: (-1x1x1xf32) <- (-1x1x1xf32) + floor_30 = paddle._C_ops.floor(add_124) + del add_124 + + # pd_op.divide: (4x320x384xf32) <- (4x320x384xf32, xf32) + divide_30 = paddle._C_ops.divide(add_123, cast_30) + + # pd_op.multiply: (4x320x384xf32) <- (4x320x384xf32, -1x1x1xf32) + multiply_30 = paddle._C_ops.multiply(divide_30, floor_30) + + # pd_op.add: (4x320x384xf32) <- (4x320x384xf32, 4x320x384xf32) + add_125 = paddle._C_ops.add(layer_norm_99, multiply_30) + + # pd_op.layer_norm: (4x320x384xf32, 4x320xf32, 4x320xf32) <- (4x320x384xf32, 384xf32, 384xf32) + layer_norm_102, layer_norm_103, layer_norm_104 = (lambda x, f: f(x))( + paddle._C_ops.layer_norm( + add_125, parameter_19, parameter_18, float("1e-06"), 2 + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None, None), + ) + del parameter_18, parameter_19 + + # pd_op.matmul: (4x320x1536xf32) <- (4x320x384xf32, 384x1536xf32) + matmul_68 = paddle._C_ops.matmul(layer_norm_102, parameter_17, False, False) + del parameter_17 + + # pd_op.add: (4x320x1536xf32) <- (4x320x1536xf32, 1536xf32) + add_126 = paddle._C_ops.add(matmul_68, parameter_16) + del parameter_16 + + # pd_op.gelu: (4x320x1536xf32) <- (4x320x1536xf32) + gelu_18 = paddle._C_ops.gelu(add_126, False) + + # pd_op.matmul: (4x320x384xf32) <- (4x320x1536xf32, 1536x384xf32) + matmul_69 = paddle._C_ops.matmul(gelu_18, parameter_15, False, False) + del parameter_15 + + # pd_op.add: (4x320x384xf32) <- (4x320x384xf32, 384xf32) + add_127 = paddle._C_ops.add(matmul_69, parameter_14) + del parameter_14 + + # pd_op.full: (xf64) <- () + full_35 = paddle._C_ops.full( + [], float("0"), paddle.float64, paddle.framework._current_expected_place() + ) + + # pd_op.assign_value_: (xf64) <- (xf64) + assign_value__31 = paddle._C_ops.assign_value_( + full_35, + [], + paddle.float64, + [float("0.905882")], + paddle.framework._current_expected_place(), + ) + del full_35 + + # pd_op.cast: (xf32) <- (xf64) + cast_31 = paddle._C_ops.cast(assign_value__31, paddle.float32) + del assign_value__31 + + # pd_op.shape64: (3xi64) <- (4x320x384xf32) + shape64_31 = paddle._C_ops.shape64(add_127) + + # pd_op.slice: (xi64) <- (3xi64, 1xi64, 1xi64) + slice_58 = paddle._C_ops.slice( + shape64_31, [0], full_int_array_2, full_int_array_3, [1], [0] + ) + del shape64_31 + + # builtin.combine: ([xi64, xi64, xi64]) <- (xi64, xi64, xi64) + combine_31 = [slice_58, full_1, full_1] + del slice_58 + + # pd_op.stack: (3xi64) <- ([xi64, xi64, xi64]) + stack_31 = paddle._C_ops.stack(combine_31, 0) + del combine_31 + + # pd_op.uniform: (-1x1x1xf32) <- (3xi64, 1xf32, 1xf32) + uniform_31 = paddle._C_ops.uniform( + stack_31, + paddle.float32, + full_2, + full_3, + 0, + paddle.framework._current_expected_place(), + ) + del stack_31 + + # pd_op.add: (-1x1x1xf32) <- (xf32, -1x1x1xf32) + add_128 = paddle._C_ops.add(cast_31, uniform_31) + del uniform_31 + + # pd_op.floor: (-1x1x1xf32) <- (-1x1x1xf32) + floor_31 = paddle._C_ops.floor(add_128) + del add_128 + + # pd_op.divide: (4x320x384xf32) <- (4x320x384xf32, xf32) + divide_31 = paddle._C_ops.divide(add_127, cast_31) + + # pd_op.multiply: (4x320x384xf32) <- (4x320x384xf32, -1x1x1xf32) + multiply_31 = paddle._C_ops.multiply(divide_31, floor_31) + + # pd_op.add: (4x320x384xf32) <- (4x320x384xf32, 4x320x384xf32) + add_129 = paddle._C_ops.add(layer_norm_102, multiply_31) + + # pd_op.layer_norm: (4x320x384xf32, 4x320xf32, 4x320xf32) <- (4x320x384xf32, 384xf32, 384xf32) + layer_norm_105, layer_norm_106, layer_norm_107 = (lambda x, f: f(x))( + paddle._C_ops.layer_norm( + add_129, parameter_13, parameter_12, float("1e-06"), 2 + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None, None), + ) + del parameter_12, parameter_13 + + # pd_op.matmul: (4x320x1152xf32) <- (4x320x384xf32, 384x1152xf32) + matmul_70 = paddle._C_ops.matmul(layer_norm_105, parameter_11, False, False) + del parameter_11 + + # pd_op.add: (4x320x1152xf32) <- (4x320x1152xf32, 1152xf32) + add_130 = paddle._C_ops.add(matmul_70, parameter_10) + del parameter_10 + + # pd_op.reshape: (4x320x3x12x32xf32) <- (4x320x1152xf32, 5xi64) + reshape_40 = paddle._C_ops.reshape(add_130, full_int_array_9) + del full_int_array_9 + + # pd_op.transpose: (3x4x12x320x32xf32) <- (4x320x3x12x32xf32) + transpose_48 = paddle._C_ops.transpose(reshape_40, [2, 0, 3, 1, 4]) + del reshape_40 + + # pd_op.slice: (4x12x320x32xf32) <- (3x4x12x320x32xf32, 1xi64, 1xi64) + slice_59 = paddle._C_ops.slice( + transpose_48, [0], full_int_array_2, full_int_array_3, [1], [0] + ) + + # pd_op.slice: (4x12x320x32xf32) <- (3x4x12x320x32xf32, 1xi64, 1xi64) + slice_60 = paddle._C_ops.slice( + transpose_48, [0], full_int_array_3, full_int_array_6, [1], [0] + ) + + # pd_op.slice: (4x12x320x32xf32) <- (3x4x12x320x32xf32, 1xi64, 1xi64) + slice_61 = paddle._C_ops.slice( + transpose_48, [0], full_int_array_6, full_int_array_7, [1], [0] + ) + + # pd_op.transpose: (4x12x32x320xf32) <- (4x12x320x32xf32) + transpose_49 = paddle._C_ops.transpose(slice_60, [0, 1, 3, 2]) + del slice_60 + + # pd_op.matmul: (4x12x320x320xf32) <- (4x12x320x32xf32, 4x12x32x320xf32) + matmul_71 = paddle._C_ops.matmul(slice_59, transpose_49, False, False) + + # pd_op.scale: (4x12x320x320xf32) <- (4x12x320x320xf32, 1xf32) + scale_9 = paddle._C_ops.scale(matmul_71, full_17, float("0"), True) + del matmul_71 + + # pd_op.softmax: (4x12x320x320xf32) <- (4x12x320x320xf32) + softmax_9 = paddle._C_ops.softmax(scale_9, -1) + del scale_9 + + # pd_op.matmul: (4x12x320x32xf32) <- (4x12x320x320xf32, 4x12x320x32xf32) + matmul_72 = paddle._C_ops.matmul(softmax_9, slice_61, False, False) + + # pd_op.transpose: (4x320x12x32xf32) <- (4x12x320x32xf32) + transpose_50 = paddle._C_ops.transpose(matmul_72, [0, 2, 1, 3]) + del matmul_72 + + # pd_op.reshape: (4x320x384xf32) <- (4x320x12x32xf32, 3xi64) + reshape_41 = paddle._C_ops.reshape(transpose_50, full_int_array_10) + del full_int_array_10 + + # pd_op.matmul: (4x320x384xf32) <- (4x320x384xf32, 384x384xf32) + matmul_73 = paddle._C_ops.matmul(reshape_41, parameter_9, False, False) + del parameter_9 + + # pd_op.add: (4x320x384xf32) <- (4x320x384xf32, 384xf32) + add_131 = paddle._C_ops.add(matmul_73, parameter_8) + del parameter_8 + + # pd_op.full: (xf64) <- () + full_36 = paddle._C_ops.full( + [], float("0"), paddle.float64, paddle.framework._current_expected_place() + ) + + # pd_op.assign_value_: (xf64) <- (xf64) + assign_value__32 = paddle._C_ops.assign_value_( + full_36, + [], + paddle.float64, + [float("0.9")], + paddle.framework._current_expected_place(), + ) + del full_36 + + # pd_op.cast: (xf32) <- (xf64) + cast_32 = paddle._C_ops.cast(assign_value__32, paddle.float32) + del assign_value__32 + + # pd_op.shape64: (3xi64) <- (4x320x384xf32) + shape64_32 = paddle._C_ops.shape64(add_131) + + # pd_op.slice: (xi64) <- (3xi64, 1xi64, 1xi64) + slice_62 = paddle._C_ops.slice( + shape64_32, [0], full_int_array_2, full_int_array_3, [1], [0] + ) + del shape64_32 + + # builtin.combine: ([xi64, xi64, xi64]) <- (xi64, xi64, xi64) + combine_32 = [slice_62, full_1, full_1] + del slice_62 + + # pd_op.stack: (3xi64) <- ([xi64, xi64, xi64]) + stack_32 = paddle._C_ops.stack(combine_32, 0) + del combine_32 + + # pd_op.uniform: (-1x1x1xf32) <- (3xi64, 1xf32, 1xf32) + uniform_32 = paddle._C_ops.uniform( + stack_32, + paddle.float32, + full_2, + full_3, + 0, + paddle.framework._current_expected_place(), + ) + del stack_32 + + # pd_op.add: (-1x1x1xf32) <- (xf32, -1x1x1xf32) + add_132 = paddle._C_ops.add(cast_32, uniform_32) + del uniform_32 + + # pd_op.floor: (-1x1x1xf32) <- (-1x1x1xf32) + floor_32 = paddle._C_ops.floor(add_132) + del add_132 + + # pd_op.divide: (4x320x384xf32) <- (4x320x384xf32, xf32) + divide_32 = paddle._C_ops.divide(add_131, cast_32) + + # pd_op.multiply: (4x320x384xf32) <- (4x320x384xf32, -1x1x1xf32) + multiply_32 = paddle._C_ops.multiply(divide_32, floor_32) + + # pd_op.add: (4x320x384xf32) <- (4x320x384xf32, 4x320x384xf32) + add_133 = paddle._C_ops.add(layer_norm_105, multiply_32) + + # pd_op.layer_norm: (4x320x384xf32, 4x320xf32, 4x320xf32) <- (4x320x384xf32, 384xf32, 384xf32) + layer_norm_108, layer_norm_109, layer_norm_110 = (lambda x, f: f(x))( + paddle._C_ops.layer_norm( + add_133, parameter_7, parameter_6, float("1e-06"), 2 + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None, None), + ) + del parameter_6, parameter_7 + + # pd_op.matmul: (4x320x1536xf32) <- (4x320x384xf32, 384x1536xf32) + matmul_74 = paddle._C_ops.matmul(layer_norm_108, parameter_5, False, False) + del parameter_5 + + # pd_op.add: (4x320x1536xf32) <- (4x320x1536xf32, 1536xf32) + add_134 = paddle._C_ops.add(matmul_74, parameter_4) + del parameter_4 + + # pd_op.gelu: (4x320x1536xf32) <- (4x320x1536xf32) + gelu_19 = paddle._C_ops.gelu(add_134, False) + + # pd_op.matmul: (4x320x384xf32) <- (4x320x1536xf32, 1536x384xf32) + matmul_75 = paddle._C_ops.matmul(gelu_19, parameter_3, False, False) + del parameter_3 + + # pd_op.add: (4x320x384xf32) <- (4x320x384xf32, 384xf32) + add_135 = paddle._C_ops.add(matmul_75, parameter_2) + del parameter_2 + + # pd_op.full: (xf64) <- () + full_37 = paddle._C_ops.full( + [], float("0"), paddle.float64, paddle.framework._current_expected_place() + ) + + # pd_op.assign_value_: (xf64) <- (xf64) + assign_value__33 = paddle._C_ops.assign_value_( + full_37, + [], + paddle.float64, + [float("0.9")], + paddle.framework._current_expected_place(), + ) + del full_37 + + # pd_op.cast: (xf32) <- (xf64) + cast_33 = paddle._C_ops.cast(assign_value__33, paddle.float32) + del assign_value__33 + + # pd_op.shape64: (3xi64) <- (4x320x384xf32) + shape64_33 = paddle._C_ops.shape64(add_135) + + # pd_op.slice: (xi64) <- (3xi64, 1xi64, 1xi64) + slice_63 = paddle._C_ops.slice( + shape64_33, [0], full_int_array_2, full_int_array_3, [1], [0] + ) + del full_int_array_2, full_int_array_3, shape64_33 + + # builtin.combine: ([xi64, xi64, xi64]) <- (xi64, xi64, xi64) + combine_33 = [slice_63, full_1, full_1] + del full_1, slice_63 + + # pd_op.stack: (3xi64) <- ([xi64, xi64, xi64]) + stack_33 = paddle._C_ops.stack(combine_33, 0) + del combine_33 + + # pd_op.uniform: (-1x1x1xf32) <- (3xi64, 1xf32, 1xf32) + uniform_33 = paddle._C_ops.uniform( + stack_33, + paddle.float32, + full_2, + full_3, + 0, + paddle.framework._current_expected_place(), + ) + del full_2, full_3, stack_33 + + # pd_op.add: (-1x1x1xf32) <- (xf32, -1x1x1xf32) + add_136 = paddle._C_ops.add(cast_33, uniform_33) + del uniform_33 + + # pd_op.floor: (-1x1x1xf32) <- (-1x1x1xf32) + floor_33 = paddle._C_ops.floor(add_136) + del add_136 + + # pd_op.divide: (4x320x384xf32) <- (4x320x384xf32, xf32) + divide_33 = paddle._C_ops.divide(add_135, cast_33) + + # pd_op.multiply: (4x320x384xf32) <- (4x320x384xf32, -1x1x1xf32) + multiply_33 = paddle._C_ops.multiply(divide_33, floor_33) + + # pd_op.add: (4x320x384xf32) <- (4x320x384xf32, 4x320x384xf32) + add_137 = paddle._C_ops.add(layer_norm_108, multiply_33) + + # pd_op.layer_norm: (4x320x384xf32, 4x320xf32, 4x320xf32) <- (4x320x384xf32, 384xf32, 384xf32) + layer_norm_111, layer_norm_112, layer_norm_113 = (lambda x, f: f(x))( + paddle._C_ops.layer_norm( + add_137, parameter_1, parameter_0, float("1e-06"), 2 + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None, None), + ) + del parameter_0, parameter_1 + + # pd_op.transpose: (4x384x320xf32) <- (4x320x384xf32) + transpose_51 = paddle._C_ops.transpose(layer_norm_111, [0, 2, 1]) + del layer_norm_111 + + # pd_op.full_int_array: (4xi64) <- () + full_int_array_11 = [0, 384, 4, 80] + + # pd_op.reshape: (4x384x4x80xf32) <- (4x384x320xf32, 4xi64) + reshape_42 = paddle._C_ops.reshape(transpose_51, full_int_array_11) + del full_int_array_11 + + # pd_op.full_int_array: (2xi64) <- () + full_int_array_12 = [4, 2] + + # pd_op.pool2d: (4x384x1x40xf32) <- (4x384x4x80xf32, 2xi64) + pool2d_0 = paddle._C_ops.pool2d( + reshape_42, + full_int_array_12, + [4, 2], + [0, 0], + False, + True, + "NCHW", + "avg", + False, + False, + "EXPLICIT", + ) + del ( + add_0, + add_1, + add_10, + add_101, + add_102, + add_103, + add_105, + add_106, + add_107, + add_109, + add_11, + add_110, + add_111, + add_113, + add_114, + add_115, + add_117, + add_118, + add_119, + add_121, + add_122, + add_123, + add_125, + add_126, + add_127, + add_129, + add_13, + add_130, + add_131, + add_133, + add_134, + add_135, + add_137, + add_14, + add_16, + add_17, + add_18, + add_2, + add_20, + add_21, + add_23, + add_24, + add_25, + add_27, + add_28, + add_3, + add_30, + add_31, + add_32, + add_34, + add_35, + add_37, + add_38, + add_39, + add_4, + add_41, + add_42, + add_43, + add_45, + add_46, + add_47, + add_49, + add_5, + add_50, + add_52, + add_53, + add_54, + add_56, + add_57, + add_58, + add_6, + add_60, + add_61, + add_62, + add_64, + add_65, + add_66, + add_68, + add_69, + add_7, + add_70, + add_72, + add_73, + add_74, + add_76, + add_77, + add_78, + add_80, + add_81, + add_82, + add_84, + add_85, + add_86, + add_88, + add_89, + add_9, + add_90, + add_91, + add_93, + add_94, + add_95, + add_97, + add_98, + add_99, + assign_0, + assign_1, + assign_10, + assign_11, + assign_12, + assign_13, + assign_14, + assign_15, + assign_16, + assign_17, + assign_18, + assign_19, + assign_2, + assign_20, + assign_21, + assign_22, + assign_23, + assign_24, + assign_25, + assign_26, + assign_27, + assign_28, + assign_29, + assign_3, + assign_30, + assign_31, + assign_32, + assign_33, + assign_34, + assign_35, + assign_36, + assign_37, + assign_38, + assign_39, + assign_4, + assign_40, + assign_41, + assign_42, + assign_43, + assign_44, + assign_45, + assign_46, + assign_47, + assign_48, + assign_49, + assign_5, + assign_50, + assign_51, + assign_52, + assign_53, + assign_54, + assign_55, + assign_56, + assign_57, + assign_58, + assign_59, + assign_6, + assign_60, + assign_61, + assign_62, + assign_63, + assign_64, + assign_65, + assign_66, + assign_7, + assign_8, + assign_9, + batch_norm__0, + batch_norm__1, + batch_norm__10, + batch_norm__11, + batch_norm__2, + batch_norm__3, + batch_norm__4, + batch_norm__5, + batch_norm__6, + batch_norm__7, + batch_norm__8, + batch_norm__9, + cast_0, + cast_1, + cast_10, + cast_11, + cast_12, + cast_13, + cast_14, + cast_15, + cast_16, + cast_17, + cast_18, + cast_19, + cast_2, + cast_20, + cast_21, + cast_22, + cast_23, + cast_24, + cast_25, + cast_26, + cast_27, + cast_28, + cast_29, + cast_3, + cast_30, + cast_31, + cast_32, + cast_33, + cast_4, + cast_5, + cast_6, + cast_7, + cast_8, + cast_9, + conv2d_0, + conv2d_1, + conv2d_10, + conv2d_11, + conv2d_2, + conv2d_3, + conv2d_4, + conv2d_5, + conv2d_6, + conv2d_7, + conv2d_8, + conv2d_9, + divide_0, + divide_1, + divide_10, + divide_11, + divide_12, + divide_13, + divide_14, + divide_15, + divide_16, + divide_17, + divide_18, + divide_19, + divide_2, + divide_20, + divide_21, + divide_22, + divide_23, + divide_24, + divide_25, + divide_26, + divide_27, + divide_28, + divide_29, + divide_3, + divide_30, + divide_31, + divide_32, + divide_33, + divide_4, + divide_5, + divide_6, + divide_7, + divide_8, + divide_9, + floor_0, + floor_1, + floor_10, + floor_11, + floor_12, + floor_13, + floor_14, + floor_15, + floor_16, + floor_17, + floor_18, + floor_19, + floor_2, + floor_20, + floor_21, + floor_22, + floor_23, + floor_24, + floor_25, + floor_26, + floor_27, + floor_28, + floor_29, + floor_3, + floor_30, + floor_31, + floor_32, + floor_33, + floor_4, + floor_5, + floor_6, + floor_7, + floor_8, + floor_9, + full_17, + full_int_array_12, + full_int_array_6, + full_int_array_7, + gelu_0, + gelu_1, + gelu_10, + gelu_11, + gelu_12, + gelu_13, + gelu_14, + gelu_15, + gelu_16, + gelu_17, + gelu_18, + gelu_19, + gelu_2, + gelu_3, + gelu_4, + gelu_5, + gelu_6, + gelu_7, + gelu_8, + gelu_9, + layer_norm_0, + layer_norm_1, + layer_norm_10, + layer_norm_100, + layer_norm_101, + layer_norm_102, + layer_norm_103, + layer_norm_104, + layer_norm_105, + layer_norm_106, + layer_norm_107, + layer_norm_108, + layer_norm_109, + layer_norm_11, + layer_norm_110, + layer_norm_112, + layer_norm_113, + layer_norm_12, + layer_norm_13, + layer_norm_14, + layer_norm_16, + layer_norm_17, + layer_norm_18, + layer_norm_19, + layer_norm_2, + layer_norm_20, + layer_norm_22, + layer_norm_23, + layer_norm_24, + layer_norm_25, + layer_norm_26, + layer_norm_28, + layer_norm_29, + layer_norm_30, + layer_norm_31, + layer_norm_32, + layer_norm_34, + layer_norm_35, + layer_norm_37, + layer_norm_38, + layer_norm_39, + layer_norm_4, + layer_norm_40, + layer_norm_41, + layer_norm_43, + layer_norm_44, + layer_norm_45, + layer_norm_46, + layer_norm_47, + layer_norm_49, + layer_norm_5, + layer_norm_50, + layer_norm_51, + layer_norm_52, + layer_norm_53, + layer_norm_54, + layer_norm_55, + layer_norm_56, + layer_norm_57, + layer_norm_58, + layer_norm_59, + layer_norm_6, + layer_norm_60, + layer_norm_61, + layer_norm_62, + layer_norm_63, + layer_norm_64, + layer_norm_65, + layer_norm_66, + layer_norm_67, + layer_norm_68, + layer_norm_69, + layer_norm_7, + layer_norm_70, + layer_norm_71, + layer_norm_73, + layer_norm_74, + layer_norm_75, + layer_norm_76, + layer_norm_77, + layer_norm_78, + layer_norm_79, + layer_norm_8, + layer_norm_80, + layer_norm_81, + layer_norm_82, + layer_norm_83, + layer_norm_84, + layer_norm_85, + layer_norm_86, + layer_norm_87, + layer_norm_88, + layer_norm_89, + layer_norm_90, + layer_norm_91, + layer_norm_92, + layer_norm_93, + layer_norm_94, + layer_norm_95, + layer_norm_96, + layer_norm_97, + layer_norm_98, + layer_norm_99, + matmul_0, + matmul_1, + matmul_10, + matmul_11, + matmul_12, + matmul_13, + matmul_14, + matmul_15, + matmul_16, + matmul_19, + matmul_2, + matmul_20, + matmul_21, + matmul_22, + matmul_25, + matmul_26, + matmul_27, + matmul_28, + matmul_3, + matmul_31, + matmul_32, + matmul_33, + matmul_34, + matmul_37, + matmul_38, + matmul_39, + matmul_4, + matmul_40, + matmul_43, + matmul_44, + matmul_45, + matmul_46, + matmul_49, + matmul_5, + matmul_50, + matmul_51, + matmul_52, + matmul_55, + matmul_56, + matmul_57, + matmul_58, + matmul_6, + matmul_61, + matmul_62, + matmul_63, + matmul_64, + matmul_67, + matmul_68, + matmul_69, + matmul_7, + matmul_70, + matmul_73, + matmul_74, + matmul_75, + matmul_8, + matmul_9, + multiply_0, + multiply_1, + multiply_10, + multiply_11, + multiply_12, + multiply_13, + multiply_14, + multiply_15, + multiply_16, + multiply_17, + multiply_18, + multiply_19, + multiply_2, + multiply_20, + multiply_21, + multiply_22, + multiply_23, + multiply_24, + multiply_25, + multiply_26, + multiply_27, + multiply_28, + multiply_29, + multiply_3, + multiply_30, + multiply_31, + multiply_32, + multiply_33, + multiply_4, + multiply_5, + multiply_6, + multiply_7, + multiply_8, + multiply_9, + reshape_0, + reshape_1, + reshape_10, + reshape_11, + reshape_12, + reshape_13, + reshape_14, + reshape_15, + reshape_16, + reshape_17, + reshape_18, + reshape_19, + reshape_2, + reshape_21, + reshape_23, + reshape_25, + reshape_27, + reshape_28, + reshape_29, + reshape_3, + reshape_31, + reshape_33, + reshape_35, + reshape_37, + reshape_39, + reshape_4, + reshape_41, + reshape_42, + reshape_5, + reshape_6, + reshape_7, + reshape_8, + reshape_9, + slice_14, + slice_16, + slice_19, + slice_21, + slice_24, + slice_26, + slice_29, + slice_31, + slice_34, + slice_36, + slice_39, + slice_41, + slice_44, + slice_46, + slice_49, + slice_51, + slice_54, + slice_56, + slice_59, + slice_61, + softmax_0, + softmax_1, + softmax_2, + softmax_3, + softmax_4, + softmax_5, + softmax_6, + softmax_7, + softmax_8, + softmax_9, + transpose_0, + transpose_1, + transpose_10, + transpose_11, + transpose_12, + transpose_13, + transpose_14, + transpose_15, + transpose_16, + transpose_17, + transpose_18, + transpose_19, + transpose_2, + transpose_20, + transpose_21, + transpose_22, + transpose_23, + transpose_24, + transpose_25, + transpose_26, + transpose_27, + transpose_28, + transpose_29, + transpose_3, + transpose_30, + transpose_31, + transpose_32, + transpose_33, + transpose_34, + transpose_35, + transpose_36, + transpose_37, + transpose_38, + transpose_39, + transpose_4, + transpose_40, + transpose_41, + transpose_42, + transpose_43, + transpose_44, + transpose_45, + transpose_46, + transpose_47, + transpose_48, + transpose_49, + transpose_5, + transpose_50, + transpose_51, + transpose_6, + transpose_7, + transpose_8, + transpose_9, + ) + + return pool2d_0 diff --git a/paddle_samples/PaddleX/ch_SVTRv2_rec/subgraph_3/weight_meta.py b/paddle_samples/PaddleX/ch_SVTRv2_rec/subgraph_3/weight_meta.py new file mode 100644 index 000000000..9653d49e8 --- /dev/null +++ b/paddle_samples/PaddleX/ch_SVTRv2_rec/subgraph_3/weight_meta.py @@ -0,0 +1,2408 @@ +class Program_weight_tensor_parameter_0: + name = "parameter_0" + shape = [384] + dtype = "float32" + min_val = float("-0.391234") + max_val = float("2.33804") + mean = float("-0.00605126") + std = float("0.141709") + data = None + + +class Program_weight_tensor_parameter_1: + name = "parameter_1" + shape = [384] + dtype = "float32" + min_val = float("-0.00273431") + max_val = float("1.27834") + mean = float("0.633188") + std = float("0.504115") + data = None + + +class Program_weight_tensor_parameter_2: + name = "parameter_2" + shape = [384] + dtype = "float32" + min_val = float("-6.21156") + max_val = float("3.16201") + mean = float("0.00811925") + std = float("0.55936") + data = None + + +class Program_weight_tensor_parameter_3: + name = "parameter_3" + shape = [1536, 384] + dtype = "float32" + min_val = float("-1.26586") + max_val = float("0.731109") + mean = float("-0.000131416") + std = float("0.0594808") + data = None + + +class Program_weight_tensor_parameter_4: + name = "parameter_4" + shape = [1536] + dtype = "float32" + min_val = float("-1.33895") + max_val = float("1.11383") + mean = float("-0.691732") + std = float("0.261174") + data = None + + +class Program_weight_tensor_parameter_5: + name = "parameter_5" + shape = [384, 1536] + dtype = "float32" + min_val = float("-0.294714") + max_val = float("0.394449") + mean = float("-0.00138281") + std = float("0.05379") + data = None + + +class Program_weight_tensor_parameter_6: + name = "parameter_6" + shape = [384] + dtype = "float32" + min_val = float("-5.66332") + max_val = float("0.774539") + mean = float("-0.00881374") + std = float("0.387828") + data = None + + +class Program_weight_tensor_parameter_7: + name = "parameter_7" + shape = [384] + dtype = "float32" + min_val = float("-0.738694") + max_val = float("6.11451") + mean = float("0.679105") + std = float("0.448438") + data = None + + +class Program_weight_tensor_parameter_8: + name = "parameter_8" + shape = [384] + dtype = "float32" + min_val = float("-0.828408") + max_val = float("1.51244") + mean = float("-0.0176604") + std = float("0.163826") + data = None + + +class Program_weight_tensor_parameter_9: + name = "parameter_9" + shape = [384, 384] + dtype = "float32" + min_val = float("-0.36161") + max_val = float("0.452343") + mean = float("0.000357919") + std = float("0.0465118") + data = None + + +class Program_weight_tensor_parameter_10: + name = "parameter_10" + shape = [1152] + dtype = "float32" + min_val = float("-2.96073") + max_val = float("3.21206") + mean = float("0.00577589") + std = float("0.35175") + data = None + + +class Program_weight_tensor_parameter_11: + name = "parameter_11" + shape = [384, 1152] + dtype = "float32" + min_val = float("-0.359938") + max_val = float("0.355583") + mean = float("-9.0429e-05") + std = float("0.0501151") + data = None + + +class Program_weight_tensor_parameter_12: + name = "parameter_12" + shape = [384] + dtype = "float32" + min_val = float("-0.966974") + max_val = float("0.727955") + mean = float("-0.0132417") + std = float("0.255698") + data = None + + +class Program_weight_tensor_parameter_13: + name = "parameter_13" + shape = [384] + dtype = "float32" + min_val = float("-0.00660634") + max_val = float("1.93532") + mean = float("0.840973") + std = float("0.330999") + data = None + + +class Program_weight_tensor_parameter_14: + name = "parameter_14" + shape = [384] + dtype = "float32" + min_val = float("-1.16391") + max_val = float("1.11822") + mean = float("-0.0173586") + std = float("0.170928") + data = None + + +class Program_weight_tensor_parameter_15: + name = "parameter_15" + shape = [1536, 384] + dtype = "float32" + min_val = float("-0.644514") + max_val = float("0.573222") + mean = float("1.29278e-05") + std = float("0.0672336") + data = None + + +class Program_weight_tensor_parameter_16: + name = "parameter_16" + shape = [1536] + dtype = "float32" + min_val = float("-1.29829") + max_val = float("0.188374") + mean = float("-0.60564") + std = float("0.151127") + data = None + + +class Program_weight_tensor_parameter_17: + name = "parameter_17" + shape = [384, 1536] + dtype = "float32" + min_val = float("-0.371481") + max_val = float("0.412588") + mean = float("-0.00187947") + std = float("0.0543025") + data = None + + +class Program_weight_tensor_parameter_18: + name = "parameter_18" + shape = [384] + dtype = "float32" + min_val = float("-3.00944") + max_val = float("1.12072") + mean = float("0.02978") + std = float("0.365241") + data = None + + +class Program_weight_tensor_parameter_19: + name = "parameter_19" + shape = [384] + dtype = "float32" + min_val = float("-0.252943") + max_val = float("2.80685") + mean = float("0.684697") + std = float("0.319556") + data = None + + +class Program_weight_tensor_parameter_20: + name = "parameter_20" + shape = [384] + dtype = "float32" + min_val = float("-1.02449") + max_val = float("1.04887") + mean = float("-0.0149435") + std = float("0.151017") + data = None + + +class Program_weight_tensor_parameter_21: + name = "parameter_21" + shape = [384, 384] + dtype = "float32" + min_val = float("-0.32742") + max_val = float("0.275816") + mean = float("0.000217811") + std = float("0.0475238") + data = None + + +class Program_weight_tensor_parameter_22: + name = "parameter_22" + shape = [1152] + dtype = "float32" + min_val = float("-2.91644") + max_val = float("2.62449") + mean = float("-0.00818085") + std = float("0.32054") + data = None + + +class Program_weight_tensor_parameter_23: + name = "parameter_23" + shape = [384, 1152] + dtype = "float32" + min_val = float("-0.36594") + max_val = float("0.310166") + mean = float("-8.03291e-05") + std = float("0.0500189") + data = None + + +class Program_weight_tensor_parameter_24: + name = "parameter_24" + shape = [384] + dtype = "float32" + min_val = float("-0.957574") + max_val = float("1.27513") + mean = float("-0.0170723") + std = float("0.244341") + data = None + + +class Program_weight_tensor_parameter_25: + name = "parameter_25" + shape = [384] + dtype = "float32" + min_val = float("-0.00498034") + max_val = float("1.45274") + mean = float("0.817863") + std = float("0.306469") + data = None + + +class Program_weight_tensor_parameter_26: + name = "parameter_26" + shape = [384] + dtype = "float32" + min_val = float("-1.07558") + max_val = float("1.26154") + mean = float("0.00131697") + std = float("0.189135") + data = None + + +class Program_weight_tensor_parameter_27: + name = "parameter_27" + shape = [1536, 384] + dtype = "float32" + min_val = float("-0.659897") + max_val = float("0.62102") + mean = float("8.2435e-05") + std = float("0.056888") + data = None + + +class Program_weight_tensor_parameter_28: + name = "parameter_28" + shape = [1536] + dtype = "float32" + min_val = float("-1.18677") + max_val = float("-0.114134") + mean = float("-0.611999") + std = float("0.159574") + data = None + + +class Program_weight_tensor_parameter_29: + name = "parameter_29" + shape = [384, 1536] + dtype = "float32" + min_val = float("-0.445094") + max_val = float("0.48006") + mean = float("-0.00115824") + std = float("0.0517858") + data = None + + +class Program_weight_tensor_parameter_30: + name = "parameter_30" + shape = [384] + dtype = "float32" + min_val = float("-2.24688") + max_val = float("1.86867") + mean = float("0.0159623") + std = float("0.362233") + data = None + + +class Program_weight_tensor_parameter_31: + name = "parameter_31" + shape = [384] + dtype = "float32" + min_val = float("-1.69377") + max_val = float("2.23635") + mean = float("0.624084") + std = float("0.348942") + data = None + + +class Program_weight_tensor_parameter_32: + name = "parameter_32" + shape = [384] + dtype = "float32" + min_val = float("-0.90244") + max_val = float("1.05957") + mean = float("-0.00717212") + std = float("0.164042") + data = None + + +class Program_weight_tensor_parameter_33: + name = "parameter_33" + shape = [384, 384] + dtype = "float32" + min_val = float("-0.382145") + max_val = float("0.337451") + mean = float("0.000116599") + std = float("0.0459039") + data = None + + +class Program_weight_tensor_parameter_34: + name = "parameter_34" + shape = [1152] + dtype = "float32" + min_val = float("-2.91195") + max_val = float("2.61562") + mean = float("0.00594852") + std = float("0.313983") + data = None + + +class Program_weight_tensor_parameter_35: + name = "parameter_35" + shape = [384, 1152] + dtype = "float32" + min_val = float("-0.430739") + max_val = float("0.44533") + mean = float("-4.49102e-05") + std = float("0.0491566") + data = None + + +class Program_weight_tensor_parameter_36: + name = "parameter_36" + shape = [384] + dtype = "float32" + min_val = float("-0.916805") + max_val = float("1.74928") + mean = float("-0.00816528") + std = float("0.287587") + data = None + + +class Program_weight_tensor_parameter_37: + name = "parameter_37" + shape = [384] + dtype = "float32" + min_val = float("-0.726943") + max_val = float("1.49198") + mean = float("0.707845") + std = float("0.338272") + data = None + + +class Program_weight_tensor_parameter_38: + name = "parameter_38" + shape = [384] + dtype = "float32" + min_val = float("-1.78986") + max_val = float("1.51204") + mean = float("0.0134232") + std = float("0.187376") + data = None + + +class Program_weight_tensor_parameter_39: + name = "parameter_39" + shape = [1536, 384] + dtype = "float32" + min_val = float("-0.560883") + max_val = float("0.459562") + mean = float("6.25964e-05") + std = float("0.0557709") + data = None + + +class Program_weight_tensor_parameter_40: + name = "parameter_40" + shape = [1536] + dtype = "float32" + min_val = float("-1.17196") + max_val = float("0.105926") + mean = float("-0.589018") + std = float("0.266652") + data = None + + +class Program_weight_tensor_parameter_41: + name = "parameter_41" + shape = [384, 1536] + dtype = "float32" + min_val = float("-0.754343") + max_val = float("0.536855") + mean = float("-0.00042982") + std = float("0.0503471") + data = None + + +class Program_weight_tensor_parameter_42: + name = "parameter_42" + shape = [384] + dtype = "float32" + min_val = float("-4.64774") + max_val = float("1.3273") + mean = float("-0.0045675") + std = float("0.498174") + data = None + + +class Program_weight_tensor_parameter_43: + name = "parameter_43" + shape = [384] + dtype = "float32" + min_val = float("-1.46338") + max_val = float("2.61752") + mean = float("0.626304") + std = float("0.370925") + data = None + + +class Program_weight_tensor_parameter_44: + name = "parameter_44" + shape = [384] + dtype = "float32" + min_val = float("-0.820121") + max_val = float("1.46921") + mean = float("-0.00686809") + std = float("0.175591") + data = None + + +class Program_weight_tensor_parameter_45: + name = "parameter_45" + shape = [384, 384] + dtype = "float32" + min_val = float("-0.453601") + max_val = float("0.455748") + mean = float("-8.5355e-05") + std = float("0.0479006") + data = None + + +class Program_weight_tensor_parameter_46: + name = "parameter_46" + shape = [1152] + dtype = "float32" + min_val = float("-3.22004") + max_val = float("3.23421") + mean = float("0.0090323") + std = float("0.346161") + data = None + + +class Program_weight_tensor_parameter_47: + name = "parameter_47" + shape = [384, 1152] + dtype = "float32" + min_val = float("-0.413394") + max_val = float("0.583397") + mean = float("3.22832e-05") + std = float("0.0491461") + data = None + + +class Program_weight_tensor_parameter_48: + name = "parameter_48" + shape = [384] + dtype = "float32" + min_val = float("-0.991885") + max_val = float("1.80415") + mean = float("-0.0109873") + std = float("0.316639") + data = None + + +class Program_weight_tensor_parameter_49: + name = "parameter_49" + shape = [384] + dtype = "float32" + min_val = float("-0.0522129") + max_val = float("2.78471") + mean = float("0.797124") + std = float("0.429758") + data = None + + +class Program_weight_tensor_parameter_50: + name = "parameter_50" + shape = [384] + dtype = "float32" + min_val = float("-1.50586") + max_val = float("1.40754") + mean = float("0.0228976") + std = float("0.354798") + data = None + + +class Program_weight_tensor_parameter_51: + name = "parameter_51" + shape = [1536, 384] + dtype = "float32" + min_val = float("-0.673087") + max_val = float("0.303375") + mean = float("0.000279213") + std = float("0.0561281") + data = None + + +class Program_weight_tensor_parameter_52: + name = "parameter_52" + shape = [1536] + dtype = "float32" + min_val = float("-0.743186") + max_val = float("0.014731") + mean = float("-0.503284") + std = float("0.118682") + data = None + + +class Program_weight_tensor_parameter_53: + name = "parameter_53" + shape = [384, 1536] + dtype = "float32" + min_val = float("-0.299899") + max_val = float("0.457353") + mean = float("-0.000994882") + std = float("0.0430651") + data = None + + +class Program_weight_tensor_parameter_54: + name = "parameter_54" + shape = [384] + dtype = "float32" + min_val = float("-5.35791") + max_val = float("1.78323") + mean = float("0.00427389") + std = float("0.727217") + data = None + + +class Program_weight_tensor_parameter_55: + name = "parameter_55" + shape = [384] + dtype = "float32" + min_val = float("-0.0899557") + max_val = float("3.46991") + mean = float("0.7977") + std = float("0.403236") + data = None + + +class Program_weight_tensor_parameter_56: + name = "parameter_56" + shape = [384] + dtype = "float32" + min_val = float("-1.34634") + max_val = float("1.73571") + mean = float("-0.00690144") + std = float("0.242646") + data = None + + +class Program_weight_tensor_parameter_57: + name = "parameter_57" + shape = [384, 384] + dtype = "float32" + min_val = float("-0.301549") + max_val = float("0.319018") + mean = float("-0.000210251") + std = float("0.0505271") + data = None + + +class Program_weight_tensor_parameter_58: + name = "parameter_58" + shape = [1152] + dtype = "float32" + min_val = float("-2.09165") + max_val = float("3.47442") + mean = float("0.00849669") + std = float("0.343971") + data = None + + +class Program_weight_tensor_parameter_59: + name = "parameter_59" + shape = [384, 1152] + dtype = "float32" + min_val = float("-0.403793") + max_val = float("0.385277") + mean = float("-5.15206e-05") + std = float("0.0496014") + data = None + + +class Program_weight_tensor_parameter_60: + name = "parameter_60" + shape = [384] + dtype = "float32" + min_val = float("-1.11619") + max_val = float("1.82325") + mean = float("-0.0106111") + std = float("0.348095") + data = None + + +class Program_weight_tensor_parameter_61: + name = "parameter_61" + shape = [384] + dtype = "float32" + min_val = float("-0.0581639") + max_val = float("2.66693") + mean = float("0.818678") + std = float("0.408545") + data = None + + +class Program_weight_tensor_parameter_62: + name = "parameter_62" + shape = [384] + dtype = "float32" + min_val = float("-1.44023") + max_val = float("1.84817") + mean = float("0.0176878") + std = float("0.410639") + data = None + + +class Program_weight_tensor_parameter_63: + name = "parameter_63" + shape = [1536, 384] + dtype = "float32" + min_val = float("-0.925741") + max_val = float("0.378696") + mean = float("0.000206148") + std = float("0.0564303") + data = None + + +class Program_weight_tensor_parameter_64: + name = "parameter_64" + shape = [1536] + dtype = "float32" + min_val = float("-0.688288") + max_val = float("-0.00753526") + mean = float("-0.499897") + std = float("0.12361") + data = None + + +class Program_weight_tensor_parameter_65: + name = "parameter_65" + shape = [384, 1536] + dtype = "float32" + min_val = float("-0.503913") + max_val = float("0.309683") + mean = float("9.58096e-05") + std = float("0.0411907") + data = None + + +class Program_weight_tensor_parameter_66: + name = "parameter_66" + shape = [384] + dtype = "float32" + min_val = float("-3.63391") + max_val = float("1.65044") + mean = float("-0.00951912") + std = float("0.763258") + data = None + + +class Program_weight_tensor_parameter_67: + name = "parameter_67" + shape = [384] + dtype = "float32" + min_val = float("-0.0887419") + max_val = float("3.50729") + mean = float("0.779768") + std = float("0.369595") + data = None + + +class Program_weight_tensor_parameter_68: + name = "parameter_68" + shape = [384] + dtype = "float32" + min_val = float("-0.674133") + max_val = float("0.951668") + mean = float("0.00424016") + std = float("0.176231") + data = None + + +class Program_weight_tensor_parameter_69: + name = "parameter_69" + shape = [384, 384] + dtype = "float32" + min_val = float("-0.363554") + max_val = float("0.395794") + mean = float("0.000259539") + std = float("0.0463676") + data = None + + +class Program_weight_tensor_parameter_70: + name = "parameter_70" + shape = [1152] + dtype = "float32" + min_val = float("-2.58158") + max_val = float("2.52891") + mean = float("-0.00799768") + std = float("0.316833") + data = None + + +class Program_weight_tensor_parameter_71: + name = "parameter_71" + shape = [384, 1152] + dtype = "float32" + min_val = float("-0.30861") + max_val = float("0.399761") + mean = float("4.28164e-05") + std = float("0.047099") + data = None + + +class Program_weight_tensor_parameter_72: + name = "parameter_72" + shape = [384] + dtype = "float32" + min_val = float("-0.611573") + max_val = float("0.567798") + mean = float("0.00370713") + std = float("0.148267") + data = None + + +class Program_weight_tensor_parameter_73: + name = "parameter_73" + shape = [384] + dtype = "float32" + min_val = float("-0.0176943") + max_val = float("1.1113") + mean = float("0.805979") + std = float("0.318479") + data = None + + +class Program_weight_tensor_parameter_74: + name = "parameter_74" + shape = [384] + dtype = "float32" + min_val = float("-2.13167") + max_val = float("3.1016") + mean = float("0.0038158") + std = float("0.480805") + data = None + + +class Program_weight_tensor_parameter_75: + name = "parameter_75" + shape = [384, 256, 3, 3] + dtype = "float32" + min_val = float("-4.81624") + max_val = float("3.58691") + mean = float("-0.000687498") + std = float("0.436287") + data = None + + +class Program_weight_tensor_parameter_76: + name = "parameter_76" + shape = [256] + dtype = "float32" + min_val = float("-1.00302") + max_val = float("1.58895") + mean = float("-0.117761") + std = float("0.210592") + data = None + + +class Program_weight_tensor_parameter_77: + name = "parameter_77" + shape = [256] + dtype = "float32" + min_val = float("0.22568") + max_val = float("1.68552") + mean = float("1.24757") + std = float("0.144412") + data = None + + +class Program_weight_tensor_parameter_78: + name = "parameter_78" + shape = [256] + dtype = "float32" + min_val = float("-8.09258") + max_val = float("0.641712") + mean = float("-0.0151932") + std = float("0.534823") + data = None + + +class Program_weight_tensor_parameter_79: + name = "parameter_79" + shape = [1024, 256] + dtype = "float32" + min_val = float("-1.43969") + max_val = float("1.06018") + mean = float("1.17526e-05") + std = float("0.0575092") + data = None + + +class Program_weight_tensor_parameter_80: + name = "parameter_80" + shape = [1024] + dtype = "float32" + min_val = float("-1.46189") + max_val = float("-0.0199992") + mean = float("-0.687724") + std = float("0.224643") + data = None + + +class Program_weight_tensor_parameter_81: + name = "parameter_81" + shape = [256, 1024] + dtype = "float32" + min_val = float("-0.347586") + max_val = float("0.302351") + mean = float("-0.00564142") + std = float("0.0566072") + data = None + + +class Program_weight_tensor_parameter_82: + name = "parameter_82" + shape = [256] + dtype = "float32" + min_val = float("-3.06699") + max_val = float("1.76906") + mean = float("0.0968607") + std = float("0.35562") + data = None + + +class Program_weight_tensor_parameter_83: + name = "parameter_83" + shape = [256] + dtype = "float32" + min_val = float("0.112453") + max_val = float("2.92041") + mean = float("0.694894") + std = float("0.22916") + data = None + + +class Program_weight_tensor_parameter_84: + name = "parameter_84" + shape = [256] + dtype = "float32" + min_val = float("-1.23851") + max_val = float("1.00643") + mean = float("-0.00873763") + std = float("0.143725") + data = None + + +class Program_weight_tensor_parameter_85: + name = "parameter_85" + shape = [256, 256] + dtype = "float32" + min_val = float("-0.324324") + max_val = float("0.542329") + mean = float("-9.51249e-05") + std = float("0.0446306") + data = None + + +class Program_weight_tensor_parameter_86: + name = "parameter_86" + shape = [768] + dtype = "float32" + min_val = float("-3.23557") + max_val = float("3.07665") + mean = float("0.0189733") + std = float("0.322926") + data = None + + +class Program_weight_tensor_parameter_87: + name = "parameter_87" + shape = [256, 768] + dtype = "float32" + min_val = float("-0.604016") + max_val = float("0.497953") + mean = float("-3.8477e-05") + std = float("0.0527454") + data = None + + +class Program_weight_tensor_parameter_88: + name = "parameter_88" + shape = [256] + dtype = "float32" + min_val = float("-1.03089") + max_val = float("1.35712") + mean = float("-0.012765") + std = float("0.214998") + data = None + + +class Program_weight_tensor_parameter_89: + name = "parameter_89" + shape = [256] + dtype = "float32" + min_val = float("0.287937") + max_val = float("1.59293") + mean = float("0.900996") + std = float("0.111622") + data = None + + +class Program_weight_tensor_parameter_90: + name = "parameter_90" + shape = [256] + dtype = "float32" + min_val = float("-2.05805") + max_val = float("0.660414") + mean = float("-0.00209053") + std = float("0.194515") + data = None + + +class Program_weight_tensor_parameter_91: + name = "parameter_91" + shape = [1024, 256] + dtype = "float32" + min_val = float("-0.562583") + max_val = float("0.814642") + mean = float("1.22862e-05") + std = float("0.056914") + data = None + + +class Program_weight_tensor_parameter_92: + name = "parameter_92" + shape = [1024] + dtype = "float32" + min_val = float("-1.33321") + max_val = float("-0.255077") + mean = float("-0.694005") + std = float("0.209896") + data = None + + +class Program_weight_tensor_parameter_93: + name = "parameter_93" + shape = [256, 1024] + dtype = "float32" + min_val = float("-0.381275") + max_val = float("0.37081") + mean = float("-0.0038066") + std = float("0.0556894") + data = None + + +class Program_weight_tensor_parameter_94: + name = "parameter_94" + shape = [256] + dtype = "float32" + min_val = float("-2.22319") + max_val = float("1.37969") + mean = float("0.0664931") + std = float("0.30259") + data = None + + +class Program_weight_tensor_parameter_95: + name = "parameter_95" + shape = [256] + dtype = "float32" + min_val = float("0.0818852") + max_val = float("2.63842") + mean = float("0.745383") + std = float("0.189694") + data = None + + +class Program_weight_tensor_parameter_96: + name = "parameter_96" + shape = [256] + dtype = "float32" + min_val = float("-1.62126") + max_val = float("1.39196") + mean = float("-0.0111102") + std = float("0.186807") + data = None + + +class Program_weight_tensor_parameter_97: + name = "parameter_97" + shape = [256, 256] + dtype = "float32" + min_val = float("-0.486574") + max_val = float("0.315077") + mean = float("8.92716e-05") + std = float("0.048068") + data = None + + +class Program_weight_tensor_parameter_98: + name = "parameter_98" + shape = [768] + dtype = "float32" + min_val = float("-2.94557") + max_val = float("3.085") + mean = float("0.0137264") + std = float("0.347131") + data = None + + +class Program_weight_tensor_parameter_99: + name = "parameter_99" + shape = [256, 768] + dtype = "float32" + min_val = float("-0.415111") + max_val = float("0.44239") + mean = float("-2.87333e-05") + std = float("0.0532726") + data = None + + +class Program_weight_tensor_parameter_100: + name = "parameter_100" + shape = [256] + dtype = "float32" + min_val = float("-1.52143") + max_val = float("1.36531") + mean = float("-0.0082483") + std = float("0.293583") + data = None + + +class Program_weight_tensor_parameter_101: + name = "parameter_101" + shape = [256] + dtype = "float32" + min_val = float("0.263449") + max_val = float("2.44346") + mean = float("0.924853") + std = float("0.191072") + data = None + + +class Program_weight_tensor_parameter_102: + name = "parameter_102" + shape = [256] + dtype = "float32" + min_val = float("-0.74461") + max_val = float("0.594306") + mean = float("-0.000304155") + std = float("0.132218") + data = None + + +class Program_weight_tensor_parameter_103: + name = "parameter_103" + shape = [1024, 256] + dtype = "float32" + min_val = float("-0.504825") + max_val = float("0.476818") + mean = float("-5.34611e-05") + std = float("0.0617784") + data = None + + +class Program_weight_tensor_parameter_104: + name = "parameter_104" + shape = [1024] + dtype = "float32" + min_val = float("-1.1179") + max_val = float("-0.0345217") + mean = float("-0.639639") + std = float("0.206906") + data = None + + +class Program_weight_tensor_parameter_105: + name = "parameter_105" + shape = [256, 1024] + dtype = "float32" + min_val = float("-0.477724") + max_val = float("0.357132") + mean = float("-0.00145091") + std = float("0.0546795") + data = None + + +class Program_weight_tensor_parameter_106: + name = "parameter_106" + shape = [256] + dtype = "float32" + min_val = float("-2.66275") + max_val = float("2.16434") + mean = float("0.0282077") + std = float("0.48519") + data = None + + +class Program_weight_tensor_parameter_107: + name = "parameter_107" + shape = [256] + dtype = "float32" + min_val = float("0.0184293") + max_val = float("2.3764") + mean = float("0.799007") + std = float("0.238441") + data = None + + +class Program_weight_tensor_parameter_108: + name = "parameter_108" + shape = [256] + dtype = "float32" + min_val = float("-1.94903") + max_val = float("1.32835") + mean = float("-0.0101001") + std = float("0.260235") + data = None + + +class Program_weight_tensor_parameter_109: + name = "parameter_109" + shape = [256, 256] + dtype = "float32" + min_val = float("-0.334037") + max_val = float("0.302805") + mean = float("7.46293e-05") + std = float("0.0485713") + data = None + + +class Program_weight_tensor_parameter_110: + name = "parameter_110" + shape = [768] + dtype = "float32" + min_val = float("-3.24135") + max_val = float("3.1687") + mean = float("0.0112129") + std = float("0.360574") + data = None + + +class Program_weight_tensor_parameter_111: + name = "parameter_111" + shape = [256, 768] + dtype = "float32" + min_val = float("-0.484464") + max_val = float("0.382816") + mean = float("-3.79009e-06") + std = float("0.0528972") + data = None + + +class Program_weight_tensor_parameter_112: + name = "parameter_112" + shape = [256] + dtype = "float32" + min_val = float("-1.22621") + max_val = float("1.13198") + mean = float("-0.00584791") + std = float("0.315598") + data = None + + +class Program_weight_tensor_parameter_113: + name = "parameter_113" + shape = [256] + dtype = "float32" + min_val = float("0.0741301") + max_val = float("2.1798") + mean = float("0.899628") + std = float("0.215349") + data = None + + +class Program_weight_tensor_parameter_114: + name = "parameter_114" + shape = [256] + dtype = "float32" + min_val = float("-0.594822") + max_val = float("0.878347") + mean = float("0.00450388") + std = float("0.136994") + data = None + + +class Program_weight_tensor_parameter_115: + name = "parameter_115" + shape = [1024, 256] + dtype = "float32" + min_val = float("-0.448166") + max_val = float("0.410483") + mean = float("5.85688e-05") + std = float("0.0642513") + data = None + + +class Program_weight_tensor_parameter_116: + name = "parameter_116" + shape = [1024] + dtype = "float32" + min_val = float("-1.13055") + max_val = float("-0.0592708") + mean = float("-0.617824") + std = float("0.192932") + data = None + + +class Program_weight_tensor_parameter_117: + name = "parameter_117" + shape = [256, 1024] + dtype = "float32" + min_val = float("-0.338649") + max_val = float("0.369027") + mean = float("0.000543811") + std = float("0.0538612") + data = None + + +class Program_weight_tensor_parameter_118: + name = "parameter_118" + shape = [256] + dtype = "float32" + min_val = float("-2.27003") + max_val = float("2.47408") + mean = float("-0.00602015") + std = float("0.575732") + data = None + + +class Program_weight_tensor_parameter_119: + name = "parameter_119" + shape = [256] + dtype = "float32" + min_val = float("0.0854208") + max_val = float("2.25771") + mean = float("0.788097") + std = float("0.203732") + data = None + + +class Program_weight_tensor_parameter_120: + name = "parameter_120" + shape = [256] + dtype = "float32" + min_val = float("-1.86822") + max_val = float("1.88784") + mean = float("-0.00538365") + std = float("0.221636") + data = None + + +class Program_weight_tensor_parameter_121: + name = "parameter_121" + shape = [256, 256] + dtype = "float32" + min_val = float("-0.429145") + max_val = float("0.534035") + mean = float("2.21363e-05") + std = float("0.0481658") + data = None + + +class Program_weight_tensor_parameter_122: + name = "parameter_122" + shape = [768] + dtype = "float32" + min_val = float("-2.90161") + max_val = float("2.60702") + mean = float("-0.00468604") + std = float("0.35132") + data = None + + +class Program_weight_tensor_parameter_123: + name = "parameter_123" + shape = [256, 768] + dtype = "float32" + min_val = float("-0.307903") + max_val = float("0.422378") + mean = float("4.29359e-05") + std = float("0.0528653") + data = None + + +class Program_weight_tensor_parameter_124: + name = "parameter_124" + shape = [256] + dtype = "float32" + min_val = float("-1.72865") + max_val = float("1.36761") + mean = float("0.000939111") + std = float("0.417524") + data = None + + +class Program_weight_tensor_parameter_125: + name = "parameter_125" + shape = [256] + dtype = "float32" + min_val = float("0.295198") + max_val = float("1.38915") + mean = float("0.917769") + std = float("0.182554") + data = None + + +class Program_weight_tensor_parameter_126: + name = "parameter_126" + shape = [256] + dtype = "float32" + min_val = float("-0.541449") + max_val = float("0.438496") + mean = float("-0.000324507") + std = float("0.137469") + data = None + + +class Program_weight_tensor_parameter_127: + name = "parameter_127" + shape = [1024, 256] + dtype = "float32" + min_val = float("-0.438235") + max_val = float("0.346777") + mean = float("0.000112901") + std = float("0.0633795") + data = None + + +class Program_weight_tensor_parameter_128: + name = "parameter_128" + shape = [1024] + dtype = "float32" + min_val = float("-0.869132") + max_val = float("-0.146015") + mean = float("-0.578405") + std = float("0.148015") + data = None + + +class Program_weight_tensor_parameter_129: + name = "parameter_129" + shape = [256, 1024] + dtype = "float32" + min_val = float("-0.622617") + max_val = float("0.424336") + mean = float("0.000771501") + std = float("0.0470252") + data = None + + +class Program_weight_tensor_parameter_130: + name = "parameter_130" + shape = [256] + dtype = "float32" + min_val = float("-1.54389") + max_val = float("2.06104") + mean = float("-0.0245243") + std = float("0.666343") + data = None + + +class Program_weight_tensor_parameter_131: + name = "parameter_131" + shape = [256] + dtype = "float32" + min_val = float("0.253241") + max_val = float("1.7475") + mean = float("0.929699") + std = float("0.194515") + data = None + + +class Program_weight_tensor_parameter_132: + name = "parameter_132" + shape = [256] + dtype = "float32" + min_val = float("-2.27709") + max_val = float("2.10237") + mean = float("0.00489337") + std = float("0.356161") + data = None + + +class Program_weight_tensor_parameter_133: + name = "parameter_133" + shape = [256, 32, 5, 5] + dtype = "float32" + min_val = float("-0.271916") + max_val = float("0.52117") + mean = float("0.00141176") + std = float("0.0431897") + data = None + + +class Program_weight_tensor_parameter_134: + name = "parameter_134" + shape = [256] + dtype = "float32" + min_val = float("-0.633171") + max_val = float("0.74339") + mean = float("-0.0520087") + std = float("0.188559") + data = None + + +class Program_weight_tensor_parameter_135: + name = "parameter_135" + shape = [256] + dtype = "float32" + min_val = float("0.0638513") + max_val = float("1.86322") + mean = float("1.31402") + std = float("0.15703") + data = None + + +class Program_weight_tensor_parameter_136: + name = "parameter_136" + shape = [256] + dtype = "float32" + min_val = float("-8.93611") + max_val = float("0.457052") + mean = float("-0.0257168") + std = float("0.585021") + data = None + + +class Program_weight_tensor_parameter_137: + name = "parameter_137" + shape = [1024, 256] + dtype = "float32" + min_val = float("-1.32594") + max_val = float("1.16715") + mean = float("-2.85246e-05") + std = float("0.05611") + data = None + + +class Program_weight_tensor_parameter_138: + name = "parameter_138" + shape = [1024] + dtype = "float32" + min_val = float("-1.46672") + max_val = float("-0.131769") + mean = float("-0.664938") + std = float("0.178196") + data = None + + +class Program_weight_tensor_parameter_139: + name = "parameter_139" + shape = [256, 1024] + dtype = "float32" + min_val = float("-0.324049") + max_val = float("0.367398") + mean = float("0.000789244") + std = float("0.0569423") + data = None + + +class Program_weight_tensor_parameter_140: + name = "parameter_140" + shape = [256] + dtype = "float32" + min_val = float("-0.661923") + max_val = float("0.721955") + mean = float("-0.0107626") + std = float("0.197668") + data = None + + +class Program_weight_tensor_parameter_141: + name = "parameter_141" + shape = [256] + dtype = "float32" + min_val = float("0.404883") + max_val = float("1.8329") + mean = float("0.68668") + std = float("0.133497") + data = None + + +class Program_weight_tensor_parameter_142: + name = "parameter_142" + shape = [256] + dtype = "float32" + min_val = float("-2.16229") + max_val = float("1.48621") + mean = float("-0.00283954") + std = float("0.300709") + data = None + + +class Program_weight_tensor_parameter_143: + name = "parameter_143" + shape = [256, 32, 5, 5] + dtype = "float32" + min_val = float("-0.265172") + max_val = float("0.496926") + mean = float("0.00112411") + std = float("0.0415432") + data = None + + +class Program_weight_tensor_parameter_144: + name = "parameter_144" + shape = [256] + dtype = "float32" + min_val = float("-0.668521") + max_val = float("1.94369") + mean = float("-0.0632413") + std = float("0.230014") + data = None + + +class Program_weight_tensor_parameter_145: + name = "parameter_145" + shape = [256] + dtype = "float32" + min_val = float("0.187715") + max_val = float("2.04484") + mean = float("1.29986") + std = float("0.242333") + data = None + + +class Program_weight_tensor_parameter_146: + name = "parameter_146" + shape = [256] + dtype = "float32" + min_val = float("-8.25526") + max_val = float("0.841186") + mean = float("-0.0148969") + std = float("0.558246") + data = None + + +class Program_weight_tensor_parameter_147: + name = "parameter_147" + shape = [256, 128, 3, 3] + dtype = "float32" + min_val = float("-6.30351") + max_val = float("7.58677") + mean = float("-0.00014357") + std = float("0.411001") + data = None + + +class Program_weight_tensor_parameter_148: + name = "parameter_148" + shape = [128] + dtype = "float32" + min_val = float("-0.71621") + max_val = float("0.756503") + mean = float("-0.0934846") + std = float("0.22947") + data = None + + +class Program_weight_tensor_parameter_149: + name = "parameter_149" + shape = [128] + dtype = "float32" + min_val = float("0.40875") + max_val = float("1.66399") + mean = float("1.24008") + std = float("0.174513") + data = None + + +class Program_weight_tensor_parameter_150: + name = "parameter_150" + shape = [128] + dtype = "float32" + min_val = float("-4.08424") + max_val = float("0.55395") + mean = float("-0.00506746") + std = float("0.404986") + data = None + + +class Program_weight_tensor_parameter_151: + name = "parameter_151" + shape = [512, 128] + dtype = "float32" + min_val = float("-0.471752") + max_val = float("0.416741") + mean = float("7.42731e-06") + std = float("0.0632707") + data = None + + +class Program_weight_tensor_parameter_152: + name = "parameter_152" + shape = [512] + dtype = "float32" + min_val = float("-1.89779") + max_val = float("-0.000424735") + mean = float("-0.660915") + std = float("0.276851") + data = None + + +class Program_weight_tensor_parameter_153: + name = "parameter_153" + shape = [128, 512] + dtype = "float32" + min_val = float("-0.322236") + max_val = float("0.346724") + mean = float("-0.000322571") + std = float("0.0642253") + data = None + + +class Program_weight_tensor_parameter_154: + name = "parameter_154" + shape = [128] + dtype = "float32" + min_val = float("-0.681697") + max_val = float("0.411014") + mean = float("-0.0107907") + std = float("0.165352") + data = None + + +class Program_weight_tensor_parameter_155: + name = "parameter_155" + shape = [128] + dtype = "float32" + min_val = float("0.572885") + max_val = float("2.47782") + mean = float("0.914634") + std = float("0.194054") + data = None + + +class Program_weight_tensor_parameter_156: + name = "parameter_156" + shape = [128] + dtype = "float32" + min_val = float("-0.7091") + max_val = float("1.03592") + mean = float("0.0028099") + std = float("0.290276") + data = None + + +class Program_weight_tensor_parameter_157: + name = "parameter_157" + shape = [128, 32, 5, 5] + dtype = "float32" + min_val = float("-0.399124") + max_val = float("0.347374") + mean = float("0.00131884") + std = float("0.0437688") + data = None + + +class Program_weight_tensor_parameter_158: + name = "parameter_158" + shape = [128] + dtype = "float32" + min_val = float("-0.594308") + max_val = float("0.729355") + mean = float("-0.0827838") + std = float("0.190602") + data = None + + +class Program_weight_tensor_parameter_159: + name = "parameter_159" + shape = [128] + dtype = "float32" + min_val = float("0.27402") + max_val = float("1.91845") + mean = float("1.28924") + std = float("0.253914") + data = None + + +class Program_weight_tensor_parameter_160: + name = "parameter_160" + shape = [128] + dtype = "float32" + min_val = float("-5.1138") + max_val = float("0.431996") + mean = float("-0.00639554") + std = float("0.478386") + data = None + + +class Program_weight_tensor_parameter_161: + name = "parameter_161" + shape = [512, 128] + dtype = "float32" + min_val = float("-0.818937") + max_val = float("0.758119") + mean = float("4.95922e-05") + std = float("0.069647") + data = None + + +class Program_weight_tensor_parameter_162: + name = "parameter_162" + shape = [512] + dtype = "float32" + min_val = float("-1.37765") + max_val = float("-0.0641336") + mean = float("-0.726835") + std = float("0.184613") + data = None + + +class Program_weight_tensor_parameter_163: + name = "parameter_163" + shape = [128, 512] + dtype = "float32" + min_val = float("-0.405105") + max_val = float("0.322186") + mean = float("-0.00107139") + std = float("0.0676507") + data = None + + +class Program_weight_tensor_parameter_164: + name = "parameter_164" + shape = [128] + dtype = "float32" + min_val = float("-0.815685") + max_val = float("0.703344") + mean = float("0.0189572") + std = float("0.202864") + data = None + + +class Program_weight_tensor_parameter_165: + name = "parameter_165" + shape = [128] + dtype = "float32" + min_val = float("0.685492") + max_val = float("2.07758") + mean = float("0.965816") + std = float("0.155292") + data = None + + +class Program_weight_tensor_parameter_166: + name = "parameter_166" + shape = [128] + dtype = "float32" + min_val = float("-0.885409") + max_val = float("1.11823") + mean = float("-0.00471084") + std = float("0.309713") + data = None + + +class Program_weight_tensor_parameter_167: + name = "parameter_167" + shape = [128, 32, 5, 5] + dtype = "float32" + min_val = float("-0.317962") + max_val = float("0.311825") + mean = float("0.00121326") + std = float("0.0447484") + data = None + + +class Program_weight_tensor_parameter_168: + name = "parameter_168" + shape = [128] + dtype = "float32" + min_val = float("-0.55219") + max_val = float("0.48763") + mean = float("0.0477595") + std = float("0.164838") + data = None + + +class Program_weight_tensor_parameter_169: + name = "parameter_169" + shape = [128] + dtype = "float32" + min_val = float("0.0629771") + max_val = float("1.88076") + mean = float("1.29016") + std = float("0.229317") + data = None + + +class Program_weight_tensor_parameter_170: + name = "parameter_170" + shape = [128] + dtype = "float32" + min_val = float("-0.830581") + max_val = float("10.3139") + mean = float("0.0615956") + std = float("0.927675") + data = None + + +class Program_weight_tensor_parameter_171: + name = "parameter_171" + shape = [512, 128] + dtype = "float32" + min_val = float("-0.988649") + max_val = float("0.942632") + mean = float("0.000107008") + std = float("0.0724812") + data = None + + +class Program_weight_tensor_parameter_172: + name = "parameter_172" + shape = [512] + dtype = "float32" + min_val = float("-1.56353") + max_val = float("-0.150537") + mean = float("-0.720642") + std = float("0.207041") + data = None + + +class Program_weight_tensor_parameter_173: + name = "parameter_173" + shape = [128, 512] + dtype = "float32" + min_val = float("-0.414822") + max_val = float("0.380912") + mean = float("-0.00149326") + std = float("0.0678869") + data = None + + +class Program_weight_tensor_parameter_174: + name = "parameter_174" + shape = [128] + dtype = "float32" + min_val = float("-0.706417") + max_val = float("0.874461") + mean = float("0.0387127") + std = float("0.225457") + data = None + + +class Program_weight_tensor_parameter_175: + name = "parameter_175" + shape = [128] + dtype = "float32" + min_val = float("0.782251") + max_val = float("1.93768") + mean = float("1.00745") + std = float("0.134693") + data = None + + +class Program_weight_tensor_parameter_176: + name = "parameter_176" + shape = [128] + dtype = "float32" + min_val = float("-0.829148") + max_val = float("1.01714") + mean = float("-0.0103484") + std = float("0.291259") + data = None + + +class Program_weight_tensor_parameter_177: + name = "parameter_177" + shape = [128, 32, 5, 5] + dtype = "float32" + min_val = float("-0.276089") + max_val = float("0.369554") + mean = float("0.000770057") + std = float("0.043952") + data = None + + +class Program_weight_tensor_parameter_178: + name = "parameter_178" + shape = [128] + dtype = "float32" + min_val = float("-1.43839") + max_val = float("0.954129") + mean = float("-0.041584") + std = float("0.255434") + data = None + + +class Program_weight_tensor_parameter_179: + name = "parameter_179" + shape = [128] + dtype = "float32" + min_val = float("0.2699") + max_val = float("2.00705") + mean = float("1.31203") + std = float("0.264854") + data = None + + +class Program_weight_tensor_parameter_180: + name = "parameter_180" + shape = [128] + dtype = "float32" + min_val = float("-4.80893") + max_val = float("4.33112") + mean = float("0.0111943") + std = float("0.607984") + data = None + + +class Program_weight_tensor_parameter_181: + name = "parameter_181" + shape = [512, 128] + dtype = "float32" + min_val = float("-0.663739") + max_val = float("0.872886") + mean = float("8.08476e-05") + std = float("0.0719889") + data = None + + +class Program_weight_tensor_parameter_182: + name = "parameter_182" + shape = [512] + dtype = "float32" + min_val = float("-1.38975") + max_val = float("0.0599214") + mean = float("-0.715143") + std = float("0.202181") + data = None + + +class Program_weight_tensor_parameter_183: + name = "parameter_183" + shape = [128, 512] + dtype = "float32" + min_val = float("-0.337775") + max_val = float("0.355338") + mean = float("0.00103362") + std = float("0.0675632") + data = None + + +class Program_weight_tensor_parameter_184: + name = "parameter_184" + shape = [128] + dtype = "float32" + min_val = float("-1.21058") + max_val = float("0.779394") + mean = float("-0.0441957") + std = float("0.253557") + data = None + + +class Program_weight_tensor_parameter_185: + name = "parameter_185" + shape = [128] + dtype = "float32" + min_val = float("0.69899") + max_val = float("1.37895") + mean = float("1.03219") + std = float("0.134822") + data = None + + +class Program_weight_tensor_parameter_186: + name = "parameter_186" + shape = [128] + dtype = "float32" + min_val = float("-1.06") + max_val = float("1.09196") + mean = float("0.0111421") + std = float("0.326226") + data = None + + +class Program_weight_tensor_parameter_187: + name = "parameter_187" + shape = [128, 32, 5, 5] + dtype = "float32" + min_val = float("-0.351555") + max_val = float("0.424499") + mean = float("0.00116859") + std = float("0.0431898") + data = None + + +class Program_weight_tensor_parameter_188: + name = "parameter_188" + shape = [128] + dtype = "float32" + min_val = float("-0.978142") + max_val = float("0.885815") + mean = float("-0.0673286") + std = float("0.257847") + data = None + + +class Program_weight_tensor_parameter_189: + name = "parameter_189" + shape = [128] + dtype = "float32" + min_val = float("0.301307") + max_val = float("1.9109") + mean = float("1.29689") + std = float("0.256564") + data = None + + +class Program_weight_tensor_parameter_190: + name = "parameter_190" + shape = [128] + dtype = "float32" + min_val = float("-4.93517") + max_val = float("1.00089") + mean = float("-0.000170407") + std = float("0.486882") + data = None + + +class Program_weight_tensor_parameter_191: + name = "parameter_191" + shape = [512, 128] + dtype = "float32" + min_val = float("-0.634287") + max_val = float("0.452401") + mean = float("0.000115189") + std = float("0.0689642") + data = None + + +class Program_weight_tensor_parameter_192: + name = "parameter_192" + shape = [512] + dtype = "float32" + min_val = float("-1.76981") + max_val = float("-0.239422") + mean = float("-0.739457") + std = float("0.207037") + data = None + + +class Program_weight_tensor_parameter_193: + name = "parameter_193" + shape = [128, 512] + dtype = "float32" + min_val = float("-0.369617") + max_val = float("0.377452") + mean = float("-0.000336863") + std = float("0.0663584") + data = None + + +class Program_weight_tensor_parameter_194: + name = "parameter_194" + shape = [128] + dtype = "float32" + min_val = float("-0.941485") + max_val = float("0.540084") + mean = float("-0.00308671") + std = float("0.214223") + data = None + + +class Program_weight_tensor_parameter_195: + name = "parameter_195" + shape = [128] + dtype = "float32" + min_val = float("0.631475") + max_val = float("1.69504") + mean = float("1.07861") + std = float("0.153476") + data = None + + +class Program_weight_tensor_parameter_196: + name = "parameter_196" + shape = [128] + dtype = "float32" + min_val = float("-1.6476") + max_val = float("0.972547") + mean = float("-0.00739112") + std = float("0.381321") + data = None + + +class Program_weight_tensor_parameter_197: + name = "parameter_197" + shape = [128, 32, 5, 5] + dtype = "float32" + min_val = float("-0.423287") + max_val = float("0.51164") + mean = float("0.000883391") + std = float("0.0425554") + data = None + + +class Program_weight_tensor_parameter_198: + name = "parameter_198" + shape = [128] + dtype = "float32" + min_val = float("-1.06966") + max_val = float("0.668333") + mean = float("0.0116222") + std = float("0.317468") + data = None + + +class Program_weight_tensor_parameter_199: + name = "parameter_199" + shape = [128] + dtype = "float32" + min_val = float("0.476229") + max_val = float("1.97542") + mean = float("1.18636") + std = float("0.265356") + data = None + + +class Program_weight_tensor_parameter_200: + name = "parameter_200" + shape = [128] + dtype = "float32" + min_val = float("-0.885903") + max_val = float("2.54609") + mean = float("-0.00492147") + std = float("0.347268") + data = None + + +class Program_weight_tensor_parameter_201: + name = "parameter_201" + shape = [512, 128] + dtype = "float32" + min_val = float("-0.397077") + max_val = float("0.601582") + mean = float("0.00040258") + std = float("0.0666728") + data = None + + +class Program_weight_tensor_parameter_202: + name = "parameter_202" + shape = [512] + dtype = "float32" + min_val = float("-1.34719") + max_val = float("0.109614") + mean = float("-0.665017") + std = float("0.258953") + data = None + + +class Program_weight_tensor_parameter_203: + name = "parameter_203" + shape = [128, 512] + dtype = "float32" + min_val = float("-0.549137") + max_val = float("0.626329") + mean = float("-0.00198159") + std = float("0.0662168") + data = None + + +class Program_weight_tensor_parameter_204: + name = "parameter_204" + shape = [128] + dtype = "float32" + min_val = float("-0.778871") + max_val = float("1.00296") + mean = float("0.0775527") + std = float("0.31264") + data = None + + +class Program_weight_tensor_parameter_205: + name = "parameter_205" + shape = [128] + dtype = "float32" + min_val = float("0.533038") + max_val = float("1.87165") + mean = float("1.12727") + std = float("0.232322") + data = None + + +class Program_weight_tensor_parameter_206: + name = "parameter_206" + shape = [128] + dtype = "float32" + min_val = float("-0.822867") + max_val = float("2.60621") + mean = float("-0.00422463") + std = float("0.389965") + data = None + + +class Program_weight_tensor_parameter_207: + name = "parameter_207" + shape = [128, 32, 5, 5] + dtype = "float32" + min_val = float("-0.486222") + max_val = float("0.596873") + mean = float("0.0015935") + std = float("0.0440849") + data = None + + +class Program_weight_tensor_parameter_208: + name = "parameter_208" + shape = [128] + dtype = "float32" + min_val = float("-1.26996") + max_val = float("0.531419") + mean = float("-0.208442") + std = float("0.35605") + data = None + + +class Program_weight_tensor_parameter_209: + name = "parameter_209" + shape = [128] + dtype = "float32" + min_val = float("1.05904") + max_val = float("5.05638") + mean = float("2.20278") + std = float("0.741601") + data = None + + +class Program_weight_tensor_parameter_210: + name = "parameter_210" + shape = [128] + dtype = "float32" + min_val = float("63.4499") + max_val = float("2395.71") + mean = float("356.285") + std = float("301.744") + data = None + + +class Program_weight_tensor_parameter_211: + name = "parameter_211" + shape = [128] + dtype = "float32" + min_val = float("-45.0605") + max_val = float("122.73") + mean = float("12.0335") + std = float("27.5862") + data = None + + +class Program_weight_tensor_parameter_212: + name = "parameter_212" + shape = [128] + dtype = "float32" + min_val = float("-0.0297299") + max_val = float("0.0202179") + mean = float("0.00175161") + std = float("0.00725481") + data = None + + +class Program_weight_tensor_parameter_213: + name = "parameter_213" + shape = [128, 64, 3, 3] + dtype = "float32" + min_val = float("-5.5775") + max_val = float("4.26487") + mean = float("0.00154343") + std = float("0.455524") + data = None + + +class Program_weight_tensor_parameter_214: + name = "parameter_214" + shape = [64] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_215: + name = "parameter_215" + shape = [64] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_216: + name = "parameter_216" + shape = [64] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_217: + name = "parameter_217" + shape = [64] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_218: + name = "parameter_218" + shape = [64] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_219: + name = "parameter_219" + shape = [64, 3, 3, 3] + dtype = "float32" + min_val = float("-1.86415") + max_val = float("1.9166") + mean = float("-0.0020959") + std = float("0.459356") + data = None diff --git a/paddle_samples/PaddleX/ch_SVTRv2_rec/subgraph_4/graph_hash.txt b/paddle_samples/PaddleX/ch_SVTRv2_rec/subgraph_4/graph_hash.txt new file mode 100644 index 000000000..526915ab3 --- /dev/null +++ b/paddle_samples/PaddleX/ch_SVTRv2_rec/subgraph_4/graph_hash.txt @@ -0,0 +1 @@ +38a708b8b912dd287035961b4c7bd4277e4779b8bbe59dca7db5523857b64b0a \ No newline at end of file diff --git a/paddle_samples/PaddleX/ch_SVTRv2_rec/subgraph_4/graph_net.json b/paddle_samples/PaddleX/ch_SVTRv2_rec/subgraph_4/graph_net.json new file mode 100644 index 000000000..4e1cc3c0d --- /dev/null +++ b/paddle_samples/PaddleX/ch_SVTRv2_rec/subgraph_4/graph_net.json @@ -0,0 +1,6 @@ +{ + "framework": "paddle", + "model_name": "ch_SVTRv2_rec", + "num_devices_required": 1, + "num_nodes_required": 1 +} \ No newline at end of file diff --git a/paddle_samples/PaddleX/ch_SVTRv2_rec/subgraph_4/input_meta.py b/paddle_samples/PaddleX/ch_SVTRv2_rec/subgraph_4/input_meta.py new file mode 100644 index 000000000..5e9db5ea9 --- /dev/null +++ b/paddle_samples/PaddleX/ch_SVTRv2_rec/subgraph_4/input_meta.py @@ -0,0 +1,23 @@ +class Program_weight_tensor_data_0: + name = "data_0" + shape = [] + dtype = "int64" + data = [128] + + +class Program_weight_tensor_data_1: + name = "data_1" + shape = [] + dtype = "int64" + data = [12] + + +class Program_weight_tensor_data_2: + name = "data_2" + shape = [8, 128, 12, 80] + dtype = "float32" + min_val = float("-0.169971") + max_val = float("30.5483") + mean = float("0.221281") + std = float("0.83631") + data = None diff --git a/paddle_samples/PaddleX/ch_SVTRv2_rec/subgraph_4/model.py b/paddle_samples/PaddleX/ch_SVTRv2_rec/subgraph_4/model.py new file mode 100644 index 000000000..49ab515b4 --- /dev/null +++ b/paddle_samples/PaddleX/ch_SVTRv2_rec/subgraph_4/model.py @@ -0,0 +1,736 @@ +import paddle + + +class GraphModule(paddle.nn.Layer): + def __init__(self): + super().__init__() + + def forward( + self, + parameter_0, + parameter_1, + parameter_2, + parameter_3, + parameter_4, + parameter_5, + parameter_6, + parameter_7, + parameter_8, + parameter_9, + parameter_10, + parameter_11, + parameter_12, + parameter_13, + parameter_14, + parameter_15, + parameter_16, + parameter_17, + parameter_18, + parameter_19, + parameter_20, + parameter_21, + parameter_22, + parameter_23, + parameter_24, + parameter_25, + parameter_26, + parameter_27, + parameter_28, + parameter_29, + parameter_30, + parameter_31, + parameter_32, + parameter_33, + parameter_34, + parameter_35, + parameter_36, + parameter_37, + parameter_38, + parameter_39, + parameter_40, + parameter_41, + parameter_42, + parameter_43, + parameter_44, + parameter_45, + parameter_46, + parameter_47, + parameter_48, + parameter_49, + parameter_50, + parameter_51, + parameter_52, + parameter_53, + parameter_54, + parameter_55, + parameter_56, + parameter_57, + parameter_58, + parameter_59, + parameter_60, + parameter_61, + parameter_62, + parameter_63, + data_0, + data_1, + data_2, + ): + # pd_op.conv2d: (8x128x-1x80xf32) <- (8x-1x-1x80xf32, 128x32x5x5xf32) + conv2d_0 = paddle._C_ops.conv2d( + data_2, parameter_63, [1, 1], [2, 2], "EXPLICIT", [1, 1], 4, "NCHW" + ) + del parameter_63 + + # pd_op.full_int_array: (4xi64) <- () + full_int_array_0 = [1, -1, 1, 1] + + # pd_op.reshape: (1x128x1x1xf32) <- (128xf32, 4xi64) + reshape_1 = paddle._C_ops.reshape(parameter_62, full_int_array_0) + del parameter_62 + + # pd_op.add: (8x128x-1x80xf32) <- (8x128x-1x80xf32, 1x128x1x1xf32) + add_0 = paddle._C_ops.add(conv2d_0, reshape_1) + del conv2d_0, reshape_1 + + # pd_op.add: (8x128x-1x80xf32) <- (8x-1x-1x80xf32, 8x128x-1x80xf32) + add_1 = paddle._C_ops.add(data_2, add_0) + del add_0, data_2 + + # pd_op.flatten: (8x128x-1xf32) <- (8x128x-1x80xf32) + flatten_0 = paddle._C_ops.flatten(add_1, 2, 3) + del add_1 + + # pd_op.transpose: (8x-1x128xf32) <- (8x128x-1xf32) + transpose_0 = paddle._C_ops.transpose(flatten_0, [0, 2, 1]) + del flatten_0 + + # pd_op.layer_norm: (8x-1x128xf32, 8x-1xf32, 8x-1xf32) <- (8x-1x128xf32, 128xf32, 128xf32) + layer_norm_0, layer_norm_1, layer_norm_2 = (lambda x, f: f(x))( + paddle._C_ops.layer_norm( + transpose_0, parameter_61, parameter_60, float("1e-06"), 2 + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None, None), + ) + del parameter_60, parameter_61, transpose_0 + + # pd_op.matmul: (8x-1x512xf32) <- (8x-1x128xf32, 128x512xf32) + matmul_0 = paddle._C_ops.matmul(layer_norm_0, parameter_59, False, False) + del parameter_59 + + # pd_op.add: (8x-1x512xf32) <- (8x-1x512xf32, 512xf32) + add_2 = paddle._C_ops.add(matmul_0, parameter_58) + del matmul_0, parameter_58 + + # pd_op.gelu: (8x-1x512xf32) <- (8x-1x512xf32) + gelu_0 = paddle._C_ops.gelu(add_2, False) + del add_2 + + # pd_op.matmul: (8x-1x128xf32) <- (8x-1x512xf32, 512x128xf32) + matmul_1 = paddle._C_ops.matmul(gelu_0, parameter_57, False, False) + del gelu_0, parameter_57 + + # pd_op.add: (8x-1x128xf32) <- (8x-1x128xf32, 128xf32) + add_3 = paddle._C_ops.add(matmul_1, parameter_56) + del matmul_1, parameter_56 + + # pd_op.add: (8x-1x128xf32) <- (8x-1x128xf32, 8x-1x128xf32) + add_4 = paddle._C_ops.add(layer_norm_0, add_3) + del add_3, layer_norm_0 + + # pd_op.layer_norm: (8x-1x128xf32, 8x-1xf32, 8x-1xf32) <- (8x-1x128xf32, 128xf32, 128xf32) + layer_norm_3, layer_norm_4, layer_norm_5 = (lambda x, f: f(x))( + paddle._C_ops.layer_norm( + add_4, parameter_55, parameter_54, float("1e-06"), 2 + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None, None), + ) + del add_4, parameter_54, parameter_55 + + # pd_op.transpose: (8x128x-1xf32) <- (8x-1x128xf32) + transpose_1 = paddle._C_ops.transpose(layer_norm_3, [0, 2, 1]) + del layer_norm_3 + + # pd_op.full: (xi64) <- () + full_0 = paddle._C_ops.full( + [], float("0"), paddle.int64, paddle.core.CPUPlace() + ) + + # pd_op.full: (xi64) <- () + full_1 = paddle._C_ops.full( + [], float("80"), paddle.int64, paddle.core.CPUPlace() + ) + + # builtin.combine: ([xi64, xi64, xi64, xi64]) <- (xi64, xi64, xi64, xi64) + combine_0 = [full_0, data_0, data_1, full_1] + del data_0, data_1 + + # pd_op.stack: (4xi64) <- ([xi64, xi64, xi64, xi64]) + stack_0 = paddle._C_ops.stack(combine_0, 0) + del combine_0 + + # pd_op.reshape: (8x-1x-1x80xf32) <- (8x128x-1xf32, 4xi64) + reshape_2 = paddle._C_ops.reshape(transpose_1, stack_0) + del stack_0, transpose_1 + + # pd_op.shape64: (4xi64) <- (8x-1x-1x80xf32) + shape64_0 = paddle._C_ops.shape64(reshape_2) + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_1 = [1] + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_2 = [2] + + # pd_op.slice: (xi64) <- (4xi64, 1xi64, 1xi64) + slice_0 = paddle._C_ops.slice( + shape64_0, [0], full_int_array_1, full_int_array_2, [1], [0] + ) + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_3 = [3] + + # pd_op.slice: (xi64) <- (4xi64, 1xi64, 1xi64) + slice_1 = paddle._C_ops.slice( + shape64_0, [0], full_int_array_2, full_int_array_3, [1], [0] + ) + del shape64_0 + + # pd_op.conv2d: (8x128x-1x80xf32) <- (8x-1x-1x80xf32, 128x32x5x5xf32) + conv2d_1 = paddle._C_ops.conv2d( + reshape_2, parameter_53, [1, 1], [2, 2], "EXPLICIT", [1, 1], 4, "NCHW" + ) + del parameter_53 + + # pd_op.reshape: (1x128x1x1xf32) <- (128xf32, 4xi64) + reshape_3 = paddle._C_ops.reshape(parameter_52, full_int_array_0) + del parameter_52 + + # pd_op.add: (8x128x-1x80xf32) <- (8x128x-1x80xf32, 1x128x1x1xf32) + add_5 = paddle._C_ops.add(conv2d_1, reshape_3) + del conv2d_1, reshape_3 + + # pd_op.add: (8x128x-1x80xf32) <- (8x-1x-1x80xf32, 8x128x-1x80xf32) + add_6 = paddle._C_ops.add(reshape_2, add_5) + del add_5, reshape_2 + + # pd_op.flatten: (8x128x-1xf32) <- (8x128x-1x80xf32) + flatten_1 = paddle._C_ops.flatten(add_6, 2, 3) + del add_6 + + # pd_op.transpose: (8x-1x128xf32) <- (8x128x-1xf32) + transpose_2 = paddle._C_ops.transpose(flatten_1, [0, 2, 1]) + del flatten_1 + + # pd_op.layer_norm: (8x-1x128xf32, 8x-1xf32, 8x-1xf32) <- (8x-1x128xf32, 128xf32, 128xf32) + layer_norm_6, layer_norm_7, layer_norm_8 = (lambda x, f: f(x))( + paddle._C_ops.layer_norm( + transpose_2, parameter_51, parameter_50, float("1e-06"), 2 + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None, None), + ) + del parameter_50, parameter_51, transpose_2 + + # pd_op.matmul: (8x-1x512xf32) <- (8x-1x128xf32, 128x512xf32) + matmul_2 = paddle._C_ops.matmul(layer_norm_6, parameter_49, False, False) + del parameter_49 + + # pd_op.add: (8x-1x512xf32) <- (8x-1x512xf32, 512xf32) + add_7 = paddle._C_ops.add(matmul_2, parameter_48) + del matmul_2, parameter_48 + + # pd_op.gelu: (8x-1x512xf32) <- (8x-1x512xf32) + gelu_1 = paddle._C_ops.gelu(add_7, False) + del add_7 + + # pd_op.matmul: (8x-1x128xf32) <- (8x-1x512xf32, 512x128xf32) + matmul_3 = paddle._C_ops.matmul(gelu_1, parameter_47, False, False) + del gelu_1, parameter_47 + + # pd_op.add: (8x-1x128xf32) <- (8x-1x128xf32, 128xf32) + add_8 = paddle._C_ops.add(matmul_3, parameter_46) + del matmul_3, parameter_46 + + # pd_op.add: (8x-1x128xf32) <- (8x-1x128xf32, 8x-1x128xf32) + add_9 = paddle._C_ops.add(layer_norm_6, add_8) + del add_8, layer_norm_6 + + # pd_op.layer_norm: (8x-1x128xf32, 8x-1xf32, 8x-1xf32) <- (8x-1x128xf32, 128xf32, 128xf32) + layer_norm_9, layer_norm_10, layer_norm_11 = (lambda x, f: f(x))( + paddle._C_ops.layer_norm( + add_9, parameter_45, parameter_44, float("1e-06"), 2 + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None, None), + ) + del add_9, parameter_44, parameter_45 + + # pd_op.transpose: (8x128x-1xf32) <- (8x-1x128xf32) + transpose_3 = paddle._C_ops.transpose(layer_norm_9, [0, 2, 1]) + del layer_norm_9 + + # builtin.combine: ([xi64, xi64, xi64, xi64]) <- (xi64, xi64, xi64, xi64) + combine_1 = [full_0, slice_0, slice_1, full_1] + del slice_0, slice_1 + + # pd_op.stack: (4xi64) <- ([xi64, xi64, xi64, xi64]) + stack_1 = paddle._C_ops.stack(combine_1, 0) + del combine_1 + + # pd_op.reshape: (8x-1x-1x80xf32) <- (8x128x-1xf32, 4xi64) + reshape_4 = paddle._C_ops.reshape(transpose_3, stack_1) + del stack_1, transpose_3 + + # pd_op.shape64: (4xi64) <- (8x-1x-1x80xf32) + shape64_1 = paddle._C_ops.shape64(reshape_4) + + # pd_op.slice: (xi64) <- (4xi64, 1xi64, 1xi64) + slice_2 = paddle._C_ops.slice( + shape64_1, [0], full_int_array_1, full_int_array_2, [1], [0] + ) + + # pd_op.slice: (xi64) <- (4xi64, 1xi64, 1xi64) + slice_3 = paddle._C_ops.slice( + shape64_1, [0], full_int_array_2, full_int_array_3, [1], [0] + ) + del shape64_1 + + # pd_op.conv2d: (8x128x-1x80xf32) <- (8x-1x-1x80xf32, 128x32x5x5xf32) + conv2d_2 = paddle._C_ops.conv2d( + reshape_4, parameter_43, [1, 1], [2, 2], "EXPLICIT", [1, 1], 4, "NCHW" + ) + del parameter_43 + + # pd_op.reshape: (1x128x1x1xf32) <- (128xf32, 4xi64) + reshape_5 = paddle._C_ops.reshape(parameter_42, full_int_array_0) + del parameter_42 + + # pd_op.add: (8x128x-1x80xf32) <- (8x128x-1x80xf32, 1x128x1x1xf32) + add_10 = paddle._C_ops.add(conv2d_2, reshape_5) + del conv2d_2, reshape_5 + + # pd_op.add: (8x128x-1x80xf32) <- (8x-1x-1x80xf32, 8x128x-1x80xf32) + add_11 = paddle._C_ops.add(reshape_4, add_10) + del add_10, reshape_4 + + # pd_op.flatten: (8x128x-1xf32) <- (8x128x-1x80xf32) + flatten_2 = paddle._C_ops.flatten(add_11, 2, 3) + del add_11 + + # pd_op.transpose: (8x-1x128xf32) <- (8x128x-1xf32) + transpose_4 = paddle._C_ops.transpose(flatten_2, [0, 2, 1]) + del flatten_2 + + # pd_op.layer_norm: (8x-1x128xf32, 8x-1xf32, 8x-1xf32) <- (8x-1x128xf32, 128xf32, 128xf32) + layer_norm_12, layer_norm_13, layer_norm_14 = (lambda x, f: f(x))( + paddle._C_ops.layer_norm( + transpose_4, parameter_41, parameter_40, float("1e-06"), 2 + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None, None), + ) + del parameter_40, parameter_41, transpose_4 + + # pd_op.matmul: (8x-1x512xf32) <- (8x-1x128xf32, 128x512xf32) + matmul_4 = paddle._C_ops.matmul(layer_norm_12, parameter_39, False, False) + del parameter_39 + + # pd_op.add: (8x-1x512xf32) <- (8x-1x512xf32, 512xf32) + add_12 = paddle._C_ops.add(matmul_4, parameter_38) + del matmul_4, parameter_38 + + # pd_op.gelu: (8x-1x512xf32) <- (8x-1x512xf32) + gelu_2 = paddle._C_ops.gelu(add_12, False) + del add_12 + + # pd_op.matmul: (8x-1x128xf32) <- (8x-1x512xf32, 512x128xf32) + matmul_5 = paddle._C_ops.matmul(gelu_2, parameter_37, False, False) + del gelu_2, parameter_37 + + # pd_op.add: (8x-1x128xf32) <- (8x-1x128xf32, 128xf32) + add_13 = paddle._C_ops.add(matmul_5, parameter_36) + del matmul_5, parameter_36 + + # pd_op.add: (8x-1x128xf32) <- (8x-1x128xf32, 8x-1x128xf32) + add_14 = paddle._C_ops.add(layer_norm_12, add_13) + del add_13, layer_norm_12 + + # pd_op.layer_norm: (8x-1x128xf32, 8x-1xf32, 8x-1xf32) <- (8x-1x128xf32, 128xf32, 128xf32) + layer_norm_15, layer_norm_16, layer_norm_17 = (lambda x, f: f(x))( + paddle._C_ops.layer_norm( + add_14, parameter_35, parameter_34, float("1e-06"), 2 + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None, None), + ) + del add_14, parameter_34, parameter_35 + + # pd_op.transpose: (8x128x-1xf32) <- (8x-1x128xf32) + transpose_5 = paddle._C_ops.transpose(layer_norm_15, [0, 2, 1]) + del layer_norm_15 + + # builtin.combine: ([xi64, xi64, xi64, xi64]) <- (xi64, xi64, xi64, xi64) + combine_2 = [full_0, slice_2, slice_3, full_1] + del slice_2, slice_3 + + # pd_op.stack: (4xi64) <- ([xi64, xi64, xi64, xi64]) + stack_2 = paddle._C_ops.stack(combine_2, 0) + del combine_2 + + # pd_op.reshape: (8x-1x-1x80xf32) <- (8x128x-1xf32, 4xi64) + reshape_6 = paddle._C_ops.reshape(transpose_5, stack_2) + del stack_2, transpose_5 + + # pd_op.shape64: (4xi64) <- (8x-1x-1x80xf32) + shape64_2 = paddle._C_ops.shape64(reshape_6) + + # pd_op.slice: (xi64) <- (4xi64, 1xi64, 1xi64) + slice_4 = paddle._C_ops.slice( + shape64_2, [0], full_int_array_1, full_int_array_2, [1], [0] + ) + + # pd_op.slice: (xi64) <- (4xi64, 1xi64, 1xi64) + slice_5 = paddle._C_ops.slice( + shape64_2, [0], full_int_array_2, full_int_array_3, [1], [0] + ) + del shape64_2 + + # pd_op.conv2d: (8x128x-1x80xf32) <- (8x-1x-1x80xf32, 128x32x5x5xf32) + conv2d_3 = paddle._C_ops.conv2d( + reshape_6, parameter_33, [1, 1], [2, 2], "EXPLICIT", [1, 1], 4, "NCHW" + ) + del parameter_33 + + # pd_op.reshape: (1x128x1x1xf32) <- (128xf32, 4xi64) + reshape_7 = paddle._C_ops.reshape(parameter_32, full_int_array_0) + del parameter_32 + + # pd_op.add: (8x128x-1x80xf32) <- (8x128x-1x80xf32, 1x128x1x1xf32) + add_15 = paddle._C_ops.add(conv2d_3, reshape_7) + del conv2d_3, reshape_7 + + # pd_op.add: (8x128x-1x80xf32) <- (8x-1x-1x80xf32, 8x128x-1x80xf32) + add_16 = paddle._C_ops.add(reshape_6, add_15) + del add_15, reshape_6 + + # pd_op.flatten: (8x128x-1xf32) <- (8x128x-1x80xf32) + flatten_3 = paddle._C_ops.flatten(add_16, 2, 3) + del add_16 + + # pd_op.transpose: (8x-1x128xf32) <- (8x128x-1xf32) + transpose_6 = paddle._C_ops.transpose(flatten_3, [0, 2, 1]) + del flatten_3 + + # pd_op.layer_norm: (8x-1x128xf32, 8x-1xf32, 8x-1xf32) <- (8x-1x128xf32, 128xf32, 128xf32) + layer_norm_18, layer_norm_19, layer_norm_20 = (lambda x, f: f(x))( + paddle._C_ops.layer_norm( + transpose_6, parameter_31, parameter_30, float("1e-06"), 2 + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None, None), + ) + del parameter_30, parameter_31, transpose_6 + + # pd_op.matmul: (8x-1x512xf32) <- (8x-1x128xf32, 128x512xf32) + matmul_6 = paddle._C_ops.matmul(layer_norm_18, parameter_29, False, False) + del parameter_29 + + # pd_op.add: (8x-1x512xf32) <- (8x-1x512xf32, 512xf32) + add_17 = paddle._C_ops.add(matmul_6, parameter_28) + del matmul_6, parameter_28 + + # pd_op.gelu: (8x-1x512xf32) <- (8x-1x512xf32) + gelu_3 = paddle._C_ops.gelu(add_17, False) + del add_17 + + # pd_op.matmul: (8x-1x128xf32) <- (8x-1x512xf32, 512x128xf32) + matmul_7 = paddle._C_ops.matmul(gelu_3, parameter_27, False, False) + del gelu_3, parameter_27 + + # pd_op.add: (8x-1x128xf32) <- (8x-1x128xf32, 128xf32) + add_18 = paddle._C_ops.add(matmul_7, parameter_26) + del matmul_7, parameter_26 + + # pd_op.add: (8x-1x128xf32) <- (8x-1x128xf32, 8x-1x128xf32) + add_19 = paddle._C_ops.add(layer_norm_18, add_18) + del add_18, layer_norm_18 + + # pd_op.layer_norm: (8x-1x128xf32, 8x-1xf32, 8x-1xf32) <- (8x-1x128xf32, 128xf32, 128xf32) + layer_norm_21, layer_norm_22, layer_norm_23 = (lambda x, f: f(x))( + paddle._C_ops.layer_norm( + add_19, parameter_25, parameter_24, float("1e-06"), 2 + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None, None), + ) + del add_19, parameter_24, parameter_25 + + # pd_op.transpose: (8x128x-1xf32) <- (8x-1x128xf32) + transpose_7 = paddle._C_ops.transpose(layer_norm_21, [0, 2, 1]) + del layer_norm_21 + + # builtin.combine: ([xi64, xi64, xi64, xi64]) <- (xi64, xi64, xi64, xi64) + combine_3 = [full_0, slice_4, slice_5, full_1] + del slice_4, slice_5 + + # pd_op.stack: (4xi64) <- ([xi64, xi64, xi64, xi64]) + stack_3 = paddle._C_ops.stack(combine_3, 0) + del combine_3 + + # pd_op.reshape: (8x-1x-1x80xf32) <- (8x128x-1xf32, 4xi64) + reshape_8 = paddle._C_ops.reshape(transpose_7, stack_3) + del stack_3, transpose_7 + + # pd_op.shape64: (4xi64) <- (8x-1x-1x80xf32) + shape64_3 = paddle._C_ops.shape64(reshape_8) + + # pd_op.slice: (xi64) <- (4xi64, 1xi64, 1xi64) + slice_6 = paddle._C_ops.slice( + shape64_3, [0], full_int_array_1, full_int_array_2, [1], [0] + ) + + # pd_op.slice: (xi64) <- (4xi64, 1xi64, 1xi64) + slice_7 = paddle._C_ops.slice( + shape64_3, [0], full_int_array_2, full_int_array_3, [1], [0] + ) + del shape64_3 + + # pd_op.conv2d: (8x128x-1x80xf32) <- (8x-1x-1x80xf32, 128x32x5x5xf32) + conv2d_4 = paddle._C_ops.conv2d( + reshape_8, parameter_23, [1, 1], [2, 2], "EXPLICIT", [1, 1], 4, "NCHW" + ) + del parameter_23 + + # pd_op.reshape: (1x128x1x1xf32) <- (128xf32, 4xi64) + reshape_9 = paddle._C_ops.reshape(parameter_22, full_int_array_0) + del parameter_22 + + # pd_op.add: (8x128x-1x80xf32) <- (8x128x-1x80xf32, 1x128x1x1xf32) + add_20 = paddle._C_ops.add(conv2d_4, reshape_9) + del conv2d_4, reshape_9 + + # pd_op.add: (8x128x-1x80xf32) <- (8x-1x-1x80xf32, 8x128x-1x80xf32) + add_21 = paddle._C_ops.add(reshape_8, add_20) + del add_20, reshape_8 + + # pd_op.flatten: (8x128x-1xf32) <- (8x128x-1x80xf32) + flatten_4 = paddle._C_ops.flatten(add_21, 2, 3) + del add_21 + + # pd_op.transpose: (8x-1x128xf32) <- (8x128x-1xf32) + transpose_8 = paddle._C_ops.transpose(flatten_4, [0, 2, 1]) + del flatten_4 + + # pd_op.layer_norm: (8x-1x128xf32, 8x-1xf32, 8x-1xf32) <- (8x-1x128xf32, 128xf32, 128xf32) + layer_norm_24, layer_norm_25, layer_norm_26 = (lambda x, f: f(x))( + paddle._C_ops.layer_norm( + transpose_8, parameter_21, parameter_20, float("1e-06"), 2 + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None, None), + ) + del parameter_20, parameter_21, transpose_8 + + # pd_op.matmul: (8x-1x512xf32) <- (8x-1x128xf32, 128x512xf32) + matmul_8 = paddle._C_ops.matmul(layer_norm_24, parameter_19, False, False) + del parameter_19 + + # pd_op.add: (8x-1x512xf32) <- (8x-1x512xf32, 512xf32) + add_22 = paddle._C_ops.add(matmul_8, parameter_18) + del matmul_8, parameter_18 + + # pd_op.gelu: (8x-1x512xf32) <- (8x-1x512xf32) + gelu_4 = paddle._C_ops.gelu(add_22, False) + del add_22 + + # pd_op.matmul: (8x-1x128xf32) <- (8x-1x512xf32, 512x128xf32) + matmul_9 = paddle._C_ops.matmul(gelu_4, parameter_17, False, False) + del gelu_4, parameter_17 + + # pd_op.add: (8x-1x128xf32) <- (8x-1x128xf32, 128xf32) + add_23 = paddle._C_ops.add(matmul_9, parameter_16) + del matmul_9, parameter_16 + + # pd_op.add: (8x-1x128xf32) <- (8x-1x128xf32, 8x-1x128xf32) + add_24 = paddle._C_ops.add(layer_norm_24, add_23) + del add_23, layer_norm_24 + + # pd_op.layer_norm: (8x-1x128xf32, 8x-1xf32, 8x-1xf32) <- (8x-1x128xf32, 128xf32, 128xf32) + layer_norm_27, layer_norm_28, layer_norm_29 = (lambda x, f: f(x))( + paddle._C_ops.layer_norm( + add_24, parameter_15, parameter_14, float("1e-06"), 2 + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None, None), + ) + del add_24, parameter_14, parameter_15 + + # pd_op.transpose: (8x128x-1xf32) <- (8x-1x128xf32) + transpose_9 = paddle._C_ops.transpose(layer_norm_27, [0, 2, 1]) + del layer_norm_27 + + # builtin.combine: ([xi64, xi64, xi64, xi64]) <- (xi64, xi64, xi64, xi64) + combine_4 = [full_0, slice_6, slice_7, full_1] + del slice_6, slice_7 + + # pd_op.stack: (4xi64) <- ([xi64, xi64, xi64, xi64]) + stack_4 = paddle._C_ops.stack(combine_4, 0) + del combine_4 + + # pd_op.reshape: (8x-1x-1x80xf32) <- (8x128x-1xf32, 4xi64) + reshape_10 = paddle._C_ops.reshape(transpose_9, stack_4) + del stack_4, transpose_9 + + # pd_op.shape64: (4xi64) <- (8x-1x-1x80xf32) + shape64_4 = paddle._C_ops.shape64(reshape_10) + + # pd_op.slice: (xi64) <- (4xi64, 1xi64, 1xi64) + slice_8 = paddle._C_ops.slice( + shape64_4, [0], full_int_array_1, full_int_array_2, [1], [0] + ) + del full_int_array_1 + + # pd_op.slice: (xi64) <- (4xi64, 1xi64, 1xi64) + slice_9 = paddle._C_ops.slice( + shape64_4, [0], full_int_array_2, full_int_array_3, [1], [0] + ) + del shape64_4 + + # pd_op.conv2d: (8x128x-1x80xf32) <- (8x-1x-1x80xf32, 128x32x5x5xf32) + conv2d_5 = paddle._C_ops.conv2d( + reshape_10, parameter_13, [1, 1], [2, 2], "EXPLICIT", [1, 1], 4, "NCHW" + ) + del parameter_13 + + # pd_op.reshape: (1x128x1x1xf32) <- (128xf32, 4xi64) + reshape_11 = paddle._C_ops.reshape(parameter_12, full_int_array_0) + del parameter_12 + + # pd_op.add: (8x128x-1x80xf32) <- (8x128x-1x80xf32, 1x128x1x1xf32) + add_25 = paddle._C_ops.add(conv2d_5, reshape_11) + del conv2d_5, reshape_11 + + # pd_op.add: (8x128x-1x80xf32) <- (8x-1x-1x80xf32, 8x128x-1x80xf32) + add_26 = paddle._C_ops.add(reshape_10, add_25) + del add_25, reshape_10 + + # pd_op.flatten: (8x128x-1xf32) <- (8x128x-1x80xf32) + flatten_5 = paddle._C_ops.flatten(add_26, 2, 3) + del add_26 + + # pd_op.transpose: (8x-1x128xf32) <- (8x128x-1xf32) + transpose_10 = paddle._C_ops.transpose(flatten_5, [0, 2, 1]) + del flatten_5 + + # pd_op.layer_norm: (8x-1x128xf32, 8x-1xf32, 8x-1xf32) <- (8x-1x128xf32, 128xf32, 128xf32) + layer_norm_30, layer_norm_31, layer_norm_32 = (lambda x, f: f(x))( + paddle._C_ops.layer_norm( + transpose_10, parameter_11, parameter_10, float("1e-06"), 2 + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None, None), + ) + del parameter_10, parameter_11, transpose_10 + + # pd_op.matmul: (8x-1x512xf32) <- (8x-1x128xf32, 128x512xf32) + matmul_10 = paddle._C_ops.matmul(layer_norm_30, parameter_9, False, False) + del parameter_9 + + # pd_op.add: (8x-1x512xf32) <- (8x-1x512xf32, 512xf32) + add_27 = paddle._C_ops.add(matmul_10, parameter_8) + del matmul_10, parameter_8 + + # pd_op.gelu: (8x-1x512xf32) <- (8x-1x512xf32) + gelu_5 = paddle._C_ops.gelu(add_27, False) + del add_27 + + # pd_op.matmul: (8x-1x128xf32) <- (8x-1x512xf32, 512x128xf32) + matmul_11 = paddle._C_ops.matmul(gelu_5, parameter_7, False, False) + del gelu_5, parameter_7 + + # pd_op.add: (8x-1x128xf32) <- (8x-1x128xf32, 128xf32) + add_28 = paddle._C_ops.add(matmul_11, parameter_6) + del matmul_11, parameter_6 + + # pd_op.add: (8x-1x128xf32) <- (8x-1x128xf32, 8x-1x128xf32) + add_29 = paddle._C_ops.add(layer_norm_30, add_28) + del add_28, layer_norm_30 + + # pd_op.layer_norm: (8x-1x128xf32, 8x-1xf32, 8x-1xf32) <- (8x-1x128xf32, 128xf32, 128xf32) + layer_norm_33, layer_norm_34, layer_norm_35 = (lambda x, f: f(x))( + paddle._C_ops.layer_norm( + add_29, parameter_5, parameter_4, float("1e-06"), 2 + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None, None), + ) + del add_29, parameter_4, parameter_5 + + # pd_op.transpose: (8x128x-1xf32) <- (8x-1x128xf32) + transpose_11 = paddle._C_ops.transpose(layer_norm_33, [0, 2, 1]) + del layer_norm_33 + + # builtin.combine: ([xi64, xi64, xi64, xi64]) <- (xi64, xi64, xi64, xi64) + combine_5 = [full_0, slice_8, slice_9, full_1] + del slice_8, slice_9 + + # pd_op.stack: (4xi64) <- ([xi64, xi64, xi64, xi64]) + stack_5 = paddle._C_ops.stack(combine_5, 0) + del combine_5 + + # pd_op.reshape: (8x-1x-1x80xf32) <- (8x128x-1xf32, 4xi64) + reshape_12 = paddle._C_ops.reshape(transpose_11, stack_5) + del stack_5, transpose_11 + + # pd_op.conv2d: (8x256x-1x80xf32) <- (8x-1x-1x80xf32, 256x128x3x3xf32) + conv2d_6 = paddle._C_ops.conv2d( + reshape_12, parameter_3, [2, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_3, reshape_12 + + # pd_op.reshape: (1x256x1x1xf32) <- (256xf32, 4xi64) + reshape_13 = paddle._C_ops.reshape(parameter_2, full_int_array_0) + del full_int_array_0, parameter_2 + + # pd_op.add: (8x256x-1x80xf32) <- (8x256x-1x80xf32, 1x256x1x1xf32) + add_30 = paddle._C_ops.add(conv2d_6, reshape_13) + del conv2d_6, reshape_13 + + # pd_op.shape64: (4xi64) <- (8x256x-1x80xf32) + shape64_5 = paddle._C_ops.shape64(add_30) + + # pd_op.slice: (xi64) <- (4xi64, 1xi64, 1xi64) + slice_10 = paddle._C_ops.slice( + shape64_5, [0], full_int_array_2, full_int_array_3, [1], [0] + ) + del full_int_array_2, full_int_array_3, shape64_5 + + # pd_op.flatten: (8x256x-1xf32) <- (8x256x-1x80xf32) + flatten_6 = paddle._C_ops.flatten(add_30, 2, 3) + del add_30 + + # pd_op.transpose: (8x-1x256xf32) <- (8x256x-1xf32) + transpose_12 = paddle._C_ops.transpose(flatten_6, [0, 2, 1]) + del flatten_6 + + # pd_op.layer_norm: (8x-1x256xf32, 8x-1xf32, 8x-1xf32) <- (8x-1x256xf32, 256xf32, 256xf32) + layer_norm_36, layer_norm_37, layer_norm_38 = (lambda x, f: f(x))( + paddle._C_ops.layer_norm( + transpose_12, parameter_1, parameter_0, float("1e-05"), 2 + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None, None), + ) + del parameter_0, parameter_1, transpose_12 + + # pd_op.transpose: (8x256x-1xf32) <- (8x-1x256xf32) + transpose_13 = paddle._C_ops.transpose(layer_norm_36, [0, 2, 1]) + del layer_norm_36 + + # pd_op.full: (xi64) <- () + full_2 = paddle._C_ops.full( + [], float("256"), paddle.int64, paddle.core.CPUPlace() + ) + + # builtin.combine: ([xi64, xi64, xi64, xi64]) <- (xi64, xi64, xi64, xi64) + combine_6 = [full_0, full_2, slice_10, full_1] + del full_0, full_1, full_2 + + # pd_op.stack: (4xi64) <- ([xi64, xi64, xi64, xi64]) + stack_6 = paddle._C_ops.stack(combine_6, 0) + del combine_6 + + # pd_op.reshape: (8x256x-1x80xf32) <- (8x256x-1xf32, 4xi64) + reshape_0 = paddle._C_ops.reshape(transpose_13, stack_6) + del slice_10, stack_6, transpose_13 + + return reshape_0 diff --git a/paddle_samples/PaddleX/ch_SVTRv2_rec/subgraph_4/weight_meta.py b/paddle_samples/PaddleX/ch_SVTRv2_rec/subgraph_4/weight_meta.py new file mode 100644 index 000000000..646429dac --- /dev/null +++ b/paddle_samples/PaddleX/ch_SVTRv2_rec/subgraph_4/weight_meta.py @@ -0,0 +1,702 @@ +class Program_weight_tensor_parameter_0: + name = "parameter_0" + shape = [256] + dtype = "float32" + min_val = float("-0.666999") + max_val = float("1.94454") + mean = float("-0.0632324") + std = float("0.229855") + data = None + + +class Program_weight_tensor_parameter_1: + name = "parameter_1" + shape = [256] + dtype = "float32" + min_val = float("0.187662") + max_val = float("2.04423") + mean = float("1.29981") + std = float("0.242534") + data = None + + +class Program_weight_tensor_parameter_2: + name = "parameter_2" + shape = [256] + dtype = "float32" + min_val = float("-8.25495") + max_val = float("0.841741") + mean = float("-0.0148986") + std = float("0.558256") + data = None + + +class Program_weight_tensor_parameter_3: + name = "parameter_3" + shape = [256, 128, 3, 3] + dtype = "float32" + min_val = float("-6.28341") + max_val = float("7.55648") + mean = float("-0.000141799") + std = float("0.409401") + data = None + + +class Program_weight_tensor_parameter_4: + name = "parameter_4" + shape = [128] + dtype = "float32" + min_val = float("-0.716555") + max_val = float("0.756297") + mean = float("-0.0932256") + std = float("0.229428") + data = None + + +class Program_weight_tensor_parameter_5: + name = "parameter_5" + shape = [128] + dtype = "float32" + min_val = float("0.407815") + max_val = float("1.66548") + mean = float("1.24021") + std = float("0.174828") + data = None + + +class Program_weight_tensor_parameter_6: + name = "parameter_6" + shape = [128] + dtype = "float32" + min_val = float("-4.0857") + max_val = float("0.55345") + mean = float("-0.00504505") + std = float("0.405061") + data = None + + +class Program_weight_tensor_parameter_7: + name = "parameter_7" + shape = [512, 128] + dtype = "float32" + min_val = float("-0.465604") + max_val = float("0.41407") + mean = float("8.15453e-06") + std = float("0.0631297") + data = None + + +class Program_weight_tensor_parameter_8: + name = "parameter_8" + shape = [512] + dtype = "float32" + min_val = float("-1.89715") + max_val = float("-0.00380283") + mean = float("-0.661344") + std = float("0.276575") + data = None + + +class Program_weight_tensor_parameter_9: + name = "parameter_9" + shape = [128, 512] + dtype = "float32" + min_val = float("-0.323395") + max_val = float("0.34724") + mean = float("-0.000334755") + std = float("0.0640659") + data = None + + +class Program_weight_tensor_parameter_10: + name = "parameter_10" + shape = [128] + dtype = "float32" + min_val = float("-0.681688") + max_val = float("0.411228") + mean = float("-0.0107829") + std = float("0.165223") + data = None + + +class Program_weight_tensor_parameter_11: + name = "parameter_11" + shape = [128] + dtype = "float32" + min_val = float("0.570606") + max_val = float("2.4798") + mean = float("0.914432") + std = float("0.193946") + data = None + + +class Program_weight_tensor_parameter_12: + name = "parameter_12" + shape = [128] + dtype = "float32" + min_val = float("-0.711524") + max_val = float("1.03562") + mean = float("0.00274896") + std = float("0.290359") + data = None + + +class Program_weight_tensor_parameter_13: + name = "parameter_13" + shape = [128, 32, 5, 5] + dtype = "float32" + min_val = float("-0.396919") + max_val = float("0.347899") + mean = float("0.00133062") + std = float("0.0437229") + data = None + + +class Program_weight_tensor_parameter_14: + name = "parameter_14" + shape = [128] + dtype = "float32" + min_val = float("-0.594578") + max_val = float("0.727502") + mean = float("-0.0827572") + std = float("0.190148") + data = None + + +class Program_weight_tensor_parameter_15: + name = "parameter_15" + shape = [128] + dtype = "float32" + min_val = float("0.276318") + max_val = float("1.92052") + mean = float("1.2893") + std = float("0.253956") + data = None + + +class Program_weight_tensor_parameter_16: + name = "parameter_16" + shape = [128] + dtype = "float32" + min_val = float("-5.11606") + max_val = float("0.435776") + mean = float("-0.00641674") + std = float("0.478598") + data = None + + +class Program_weight_tensor_parameter_17: + name = "parameter_17" + shape = [512, 128] + dtype = "float32" + min_val = float("-0.816106") + max_val = float("0.75502") + mean = float("3.2276e-05") + std = float("0.0694773") + data = None + + +class Program_weight_tensor_parameter_18: + name = "parameter_18" + shape = [512] + dtype = "float32" + min_val = float("-1.37513") + max_val = float("-0.0663068") + mean = float("-0.727074") + std = float("0.184327") + data = None + + +class Program_weight_tensor_parameter_19: + name = "parameter_19" + shape = [128, 512] + dtype = "float32" + min_val = float("-0.404452") + max_val = float("0.318467") + mean = float("-0.001074") + std = float("0.0674755") + data = None + + +class Program_weight_tensor_parameter_20: + name = "parameter_20" + shape = [128] + dtype = "float32" + min_val = float("-0.816263") + max_val = float("0.705552") + mean = float("0.018937") + std = float("0.203023") + data = None + + +class Program_weight_tensor_parameter_21: + name = "parameter_21" + shape = [128] + dtype = "float32" + min_val = float("0.681668") + max_val = float("2.07793") + mean = float("0.965596") + std = float("0.155191") + data = None + + +class Program_weight_tensor_parameter_22: + name = "parameter_22" + shape = [128] + dtype = "float32" + min_val = float("-0.884613") + max_val = float("1.11801") + mean = float("-0.0047423") + std = float("0.310113") + data = None + + +class Program_weight_tensor_parameter_23: + name = "parameter_23" + shape = [128, 32, 5, 5] + dtype = "float32" + min_val = float("-0.316914") + max_val = float("0.311167") + mean = float("0.00120938") + std = float("0.0446949") + data = None + + +class Program_weight_tensor_parameter_24: + name = "parameter_24" + shape = [128] + dtype = "float32" + min_val = float("-0.548556") + max_val = float("0.484334") + mean = float("0.0476606") + std = float("0.164417") + data = None + + +class Program_weight_tensor_parameter_25: + name = "parameter_25" + shape = [128] + dtype = "float32" + min_val = float("0.0658702") + max_val = float("1.88199") + mean = float("1.29018") + std = float("0.230061") + data = None + + +class Program_weight_tensor_parameter_26: + name = "parameter_26" + shape = [128] + dtype = "float32" + min_val = float("-0.831083") + max_val = float("10.3151") + mean = float("0.0614709") + std = float("0.927821") + data = None + + +class Program_weight_tensor_parameter_27: + name = "parameter_27" + shape = [512, 128] + dtype = "float32" + min_val = float("-0.989168") + max_val = float("0.940987") + mean = float("9.70106e-05") + std = float("0.0723179") + data = None + + +class Program_weight_tensor_parameter_28: + name = "parameter_28" + shape = [512] + dtype = "float32" + min_val = float("-1.56143") + max_val = float("-0.146009") + mean = float("-0.720624") + std = float("0.207032") + data = None + + +class Program_weight_tensor_parameter_29: + name = "parameter_29" + shape = [128, 512] + dtype = "float32" + min_val = float("-0.412412") + max_val = float("0.376367") + mean = float("-0.00148105") + std = float("0.0677546") + data = None + + +class Program_weight_tensor_parameter_30: + name = "parameter_30" + shape = [128] + dtype = "float32" + min_val = float("-0.705766") + max_val = float("0.875929") + mean = float("0.0385367") + std = float("0.225602") + data = None + + +class Program_weight_tensor_parameter_31: + name = "parameter_31" + shape = [128] + dtype = "float32" + min_val = float("0.782324") + max_val = float("1.9462") + mean = float("1.00761") + std = float("0.135489") + data = None + + +class Program_weight_tensor_parameter_32: + name = "parameter_32" + shape = [128] + dtype = "float32" + min_val = float("-0.830154") + max_val = float("1.01877") + mean = float("-0.0104181") + std = float("0.291501") + data = None + + +class Program_weight_tensor_parameter_33: + name = "parameter_33" + shape = [128, 32, 5, 5] + dtype = "float32" + min_val = float("-0.278542") + max_val = float("0.369799") + mean = float("0.000775973") + std = float("0.0439049") + data = None + + +class Program_weight_tensor_parameter_34: + name = "parameter_34" + shape = [128] + dtype = "float32" + min_val = float("-1.43719") + max_val = float("0.951975") + mean = float("-0.0416506") + std = float("0.255307") + data = None + + +class Program_weight_tensor_parameter_35: + name = "parameter_35" + shape = [128] + dtype = "float32" + min_val = float("0.27034") + max_val = float("2.00745") + mean = float("1.31189") + std = float("0.265053") + data = None + + +class Program_weight_tensor_parameter_36: + name = "parameter_36" + shape = [128] + dtype = "float32" + min_val = float("-4.81183") + max_val = float("4.33146") + mean = float("0.0110147") + std = float("0.608121") + data = None + + +class Program_weight_tensor_parameter_37: + name = "parameter_37" + shape = [512, 128] + dtype = "float32" + min_val = float("-0.663282") + max_val = float("0.866291") + mean = float("0.000100857") + std = float("0.0717828") + data = None + + +class Program_weight_tensor_parameter_38: + name = "parameter_38" + shape = [512] + dtype = "float32" + min_val = float("-1.38788") + max_val = float("0.0592296") + mean = float("-0.715335") + std = float("0.20229") + data = None + + +class Program_weight_tensor_parameter_39: + name = "parameter_39" + shape = [128, 512] + dtype = "float32" + min_val = float("-0.332646") + max_val = float("0.354521") + mean = float("0.00102769") + std = float("0.0673979") + data = None + + +class Program_weight_tensor_parameter_40: + name = "parameter_40" + shape = [128] + dtype = "float32" + min_val = float("-1.20981") + max_val = float("0.7807") + mean = float("-0.0443681") + std = float("0.253751") + data = None + + +class Program_weight_tensor_parameter_41: + name = "parameter_41" + shape = [128] + dtype = "float32" + min_val = float("0.69911") + max_val = float("1.38123") + mean = float("1.03224") + std = float("0.135013") + data = None + + +class Program_weight_tensor_parameter_42: + name = "parameter_42" + shape = [128] + dtype = "float32" + min_val = float("-1.06287") + max_val = float("1.09272") + mean = float("0.0110842") + std = float("0.326265") + data = None + + +class Program_weight_tensor_parameter_43: + name = "parameter_43" + shape = [128, 32, 5, 5] + dtype = "float32" + min_val = float("-0.350237") + max_val = float("0.425112") + mean = float("0.0011219") + std = float("0.0431467") + data = None + + +class Program_weight_tensor_parameter_44: + name = "parameter_44" + shape = [128] + dtype = "float32" + min_val = float("-0.974087") + max_val = float("0.883122") + mean = float("-0.0672362") + std = float("0.257969") + data = None + + +class Program_weight_tensor_parameter_45: + name = "parameter_45" + shape = [128] + dtype = "float32" + min_val = float("0.304824") + max_val = float("1.90846") + mean = float("1.2968") + std = float("0.25675") + data = None + + +class Program_weight_tensor_parameter_46: + name = "parameter_46" + shape = [128] + dtype = "float32" + min_val = float("-4.93988") + max_val = float("1.00487") + mean = float("-0.000311874") + std = float("0.48712") + data = None + + +class Program_weight_tensor_parameter_47: + name = "parameter_47" + shape = [512, 128] + dtype = "float32" + min_val = float("-0.628975") + max_val = float("0.454042") + mean = float("0.000128257") + std = float("0.0687935") + data = None + + +class Program_weight_tensor_parameter_48: + name = "parameter_48" + shape = [512] + dtype = "float32" + min_val = float("-1.7681") + max_val = float("-0.236422") + mean = float("-0.739609") + std = float("0.206943") + data = None + + +class Program_weight_tensor_parameter_49: + name = "parameter_49" + shape = [128, 512] + dtype = "float32" + min_val = float("-0.369958") + max_val = float("0.376997") + mean = float("-0.000334754") + std = float("0.0662186") + data = None + + +class Program_weight_tensor_parameter_50: + name = "parameter_50" + shape = [128] + dtype = "float32" + min_val = float("-0.941021") + max_val = float("0.538957") + mean = float("-0.00319627") + std = float("0.214387") + data = None + + +class Program_weight_tensor_parameter_51: + name = "parameter_51" + shape = [128] + dtype = "float32" + min_val = float("0.628951") + max_val = float("1.69866") + mean = float("1.07869") + std = float("0.154158") + data = None + + +class Program_weight_tensor_parameter_52: + name = "parameter_52" + shape = [128] + dtype = "float32" + min_val = float("-1.64539") + max_val = float("0.970554") + mean = float("-0.00739898") + std = float("0.381025") + data = None + + +class Program_weight_tensor_parameter_53: + name = "parameter_53" + shape = [128, 32, 5, 5] + dtype = "float32" + min_val = float("-0.422591") + max_val = float("0.508687") + mean = float("0.000882156") + std = float("0.0425074") + data = None + + +class Program_weight_tensor_parameter_54: + name = "parameter_54" + shape = [128] + dtype = "float32" + min_val = float("-1.0661") + max_val = float("0.668543") + mean = float("0.0115453") + std = float("0.317515") + data = None + + +class Program_weight_tensor_parameter_55: + name = "parameter_55" + shape = [128] + dtype = "float32" + min_val = float("0.480466") + max_val = float("1.97896") + mean = float("1.18633") + std = float("0.265363") + data = None + + +class Program_weight_tensor_parameter_56: + name = "parameter_56" + shape = [128] + dtype = "float32" + min_val = float("-0.885874") + max_val = float("2.55066") + mean = float("-0.00497777") + std = float("0.347597") + data = None + + +class Program_weight_tensor_parameter_57: + name = "parameter_57" + shape = [512, 128] + dtype = "float32" + min_val = float("-0.399023") + max_val = float("0.603667") + mean = float("0.000401173") + std = float("0.0665492") + data = None + + +class Program_weight_tensor_parameter_58: + name = "parameter_58" + shape = [512] + dtype = "float32" + min_val = float("-1.34575") + max_val = float("0.110961") + mean = float("-0.665135") + std = float("0.258847") + data = None + + +class Program_weight_tensor_parameter_59: + name = "parameter_59" + shape = [128, 512] + dtype = "float32" + min_val = float("-0.54626") + max_val = float("0.616623") + mean = float("-0.00198333") + std = float("0.0660375") + data = None + + +class Program_weight_tensor_parameter_60: + name = "parameter_60" + shape = [128] + dtype = "float32" + min_val = float("-0.778003") + max_val = float("1.003") + mean = float("0.0770851") + std = float("0.312652") + data = None + + +class Program_weight_tensor_parameter_61: + name = "parameter_61" + shape = [128] + dtype = "float32" + min_val = float("0.535087") + max_val = float("1.87554") + mean = float("1.12663") + std = float("0.232375") + data = None + + +class Program_weight_tensor_parameter_62: + name = "parameter_62" + shape = [128] + dtype = "float32" + min_val = float("-0.822469") + max_val = float("2.60999") + mean = float("-0.00431402") + std = float("0.390087") + data = None + + +class Program_weight_tensor_parameter_63: + name = "parameter_63" + shape = [128, 32, 5, 5] + dtype = "float32" + min_val = float("-0.482774") + max_val = float("0.593399") + mean = float("0.00156795") + std = float("0.0440294") + data = None diff --git a/paddle_samples/PaddleX/ch_SVTRv2_rec/subgraph_5/graph_hash.txt b/paddle_samples/PaddleX/ch_SVTRv2_rec/subgraph_5/graph_hash.txt new file mode 100644 index 000000000..b0ce44625 --- /dev/null +++ b/paddle_samples/PaddleX/ch_SVTRv2_rec/subgraph_5/graph_hash.txt @@ -0,0 +1 @@ +9d7ab6c820abffef8decdd1fcf438007f183b463b97bbdb1444e070cdd4f95ee \ No newline at end of file diff --git a/paddle_samples/PaddleX/ch_SVTRv2_rec/subgraph_5/graph_net.json b/paddle_samples/PaddleX/ch_SVTRv2_rec/subgraph_5/graph_net.json new file mode 100644 index 000000000..4e1cc3c0d --- /dev/null +++ b/paddle_samples/PaddleX/ch_SVTRv2_rec/subgraph_5/graph_net.json @@ -0,0 +1,6 @@ +{ + "framework": "paddle", + "model_name": "ch_SVTRv2_rec", + "num_devices_required": 1, + "num_nodes_required": 1 +} \ No newline at end of file diff --git a/paddle_samples/PaddleX/ch_SVTRv2_rec/subgraph_5/input_meta.py b/paddle_samples/PaddleX/ch_SVTRv2_rec/subgraph_5/input_meta.py new file mode 100644 index 000000000..ff505c6cb --- /dev/null +++ b/paddle_samples/PaddleX/ch_SVTRv2_rec/subgraph_5/input_meta.py @@ -0,0 +1,16 @@ +class Program_weight_tensor_data_0: + name = "data_0" + shape = [] + dtype = "int64" + data = [384] + + +class Program_weight_tensor_data_1: + name = "data_1" + shape = [4, 240, 384] + dtype = "float32" + min_val = float("-6.95054") + max_val = float("7.74043") + mean = float("0.00676179") + std = float("0.47165") + data = None diff --git a/paddle_samples/PaddleX/ch_SVTRv2_rec/subgraph_5/model.py b/paddle_samples/PaddleX/ch_SVTRv2_rec/subgraph_5/model.py new file mode 100644 index 000000000..37a850498 --- /dev/null +++ b/paddle_samples/PaddleX/ch_SVTRv2_rec/subgraph_5/model.py @@ -0,0 +1,59 @@ +import paddle + + +class GraphModule(paddle.nn.Layer): + def __init__(self): + super().__init__() + + def forward(self, data_0, data_1): + # pd_op.transpose: (-1x-1x-1xf32) <- (-1x-1x-1xf32) + transpose_0 = paddle._C_ops.transpose(data_1, [0, 2, 1]) + del data_1 + + # pd_op.full: (xi64) <- () + full_0 = paddle._C_ops.full( + [], float("0"), paddle.int64, paddle.core.CPUPlace() + ) + + # pd_op.full: (xi64) <- () + full_1 = paddle._C_ops.full( + [], float("3"), paddle.int64, paddle.core.CPUPlace() + ) + + # pd_op.full: (xi64) <- () + full_2 = paddle._C_ops.full( + [], float("80"), paddle.int64, paddle.core.CPUPlace() + ) + + # builtin.combine: ([xi64, xi64, xi64, xi64]) <- (xi64, xi64, xi64, xi64) + combine_0 = [full_0, data_0, full_1, full_2] + del data_0, full_0, full_1, full_2 + + # pd_op.stack: (4xi64) <- ([xi64, xi64, xi64, xi64]) + stack_0 = paddle._C_ops.stack(combine_0, 0) + del combine_0 + + # pd_op.reshape: (-1x-1x3x80xf32) <- (-1x-1x-1xf32, 4xi64) + reshape_0 = paddle._C_ops.reshape(transpose_0, stack_0) + del stack_0, transpose_0 + + # pd_op.full_int_array: (2xi64) <- () + full_int_array_0 = [3, 2] + + # pd_op.pool2d: (-1x-1x1x40xf32) <- (-1x-1x3x80xf32, 2xi64) + pool2d_0 = paddle._C_ops.pool2d( + reshape_0, + full_int_array_0, + [3, 2], + [0, 0], + False, + True, + "NCHW", + "avg", + False, + False, + "EXPLICIT", + ) + del full_int_array_0, reshape_0 + + return pool2d_0 diff --git a/paddle_samples/PaddleX/ch_SVTRv2_rec/subgraph_5/weight_meta.py b/paddle_samples/PaddleX/ch_SVTRv2_rec/subgraph_5/weight_meta.py new file mode 100644 index 000000000..8b1378917 --- /dev/null +++ b/paddle_samples/PaddleX/ch_SVTRv2_rec/subgraph_5/weight_meta.py @@ -0,0 +1 @@ + diff --git a/paddle_samples/PaddleX/ch_SVTRv2_rec/subgraph_6/graph_hash.txt b/paddle_samples/PaddleX/ch_SVTRv2_rec/subgraph_6/graph_hash.txt new file mode 100644 index 000000000..fa1eb2a2f --- /dev/null +++ b/paddle_samples/PaddleX/ch_SVTRv2_rec/subgraph_6/graph_hash.txt @@ -0,0 +1 @@ +6aea37a6646f9874b39ff517b9581e94a17162f998d0c2463c446cf911c23a6e \ No newline at end of file diff --git a/paddle_samples/PaddleX/ch_SVTRv2_rec/subgraph_6/graph_net.json b/paddle_samples/PaddleX/ch_SVTRv2_rec/subgraph_6/graph_net.json new file mode 100644 index 000000000..4e1cc3c0d --- /dev/null +++ b/paddle_samples/PaddleX/ch_SVTRv2_rec/subgraph_6/graph_net.json @@ -0,0 +1,6 @@ +{ + "framework": "paddle", + "model_name": "ch_SVTRv2_rec", + "num_devices_required": 1, + "num_nodes_required": 1 +} \ No newline at end of file diff --git a/paddle_samples/PaddleX/ch_SVTRv2_rec/subgraph_6/input_meta.py b/paddle_samples/PaddleX/ch_SVTRv2_rec/subgraph_6/input_meta.py new file mode 100644 index 000000000..6ef527d86 --- /dev/null +++ b/paddle_samples/PaddleX/ch_SVTRv2_rec/subgraph_6/input_meta.py @@ -0,0 +1,23 @@ +class Program_weight_tensor_data_0: + name = "data_0" + shape = [] + dtype = "int64" + data = [128] + + +class Program_weight_tensor_data_1: + name = "data_1" + shape = [] + dtype = "int64" + data = [12] + + +class Program_weight_tensor_data_2: + name = "data_2" + shape = [4, 128, 12, 80] + dtype = "float32" + min_val = float("-0.169971") + max_val = float("49.5854") + mean = float("0.248544") + std = float("1.02496") + data = None diff --git a/paddle_samples/PaddleX/ch_SVTRv2_rec/subgraph_6/model.py b/paddle_samples/PaddleX/ch_SVTRv2_rec/subgraph_6/model.py new file mode 100644 index 000000000..651a309e9 --- /dev/null +++ b/paddle_samples/PaddleX/ch_SVTRv2_rec/subgraph_6/model.py @@ -0,0 +1,769 @@ +import paddle + + +class GraphModule(paddle.nn.Layer): + def __init__(self): + super().__init__() + + def forward( + self, + parameter_0, + parameter_1, + parameter_2, + parameter_3, + parameter_4, + parameter_5, + parameter_6, + parameter_7, + parameter_8, + parameter_9, + parameter_10, + parameter_11, + parameter_12, + parameter_13, + parameter_14, + parameter_15, + parameter_16, + parameter_17, + parameter_18, + parameter_19, + parameter_20, + parameter_21, + parameter_22, + parameter_23, + parameter_24, + parameter_25, + parameter_26, + parameter_27, + parameter_28, + parameter_29, + parameter_30, + parameter_31, + parameter_32, + parameter_33, + parameter_34, + parameter_35, + parameter_36, + parameter_37, + parameter_38, + parameter_39, + parameter_40, + parameter_41, + parameter_42, + parameter_43, + parameter_44, + parameter_45, + parameter_46, + parameter_47, + parameter_48, + parameter_49, + parameter_50, + parameter_51, + parameter_52, + parameter_53, + parameter_54, + parameter_55, + parameter_56, + parameter_57, + parameter_58, + parameter_59, + parameter_60, + parameter_61, + parameter_62, + parameter_63, + data_0, + data_1, + data_2, + ): + # pd_op.conv2d: (-1x128x-1x80xf32) <- (-1x-1x-1x80xf32, 128x32x5x5xf32) + conv2d_0 = paddle._C_ops.conv2d( + data_2, parameter_63, [1, 1], [2, 2], "EXPLICIT", [1, 1], 4, "NCHW" + ) + del parameter_63 + + # pd_op.full_int_array: (4xi64) <- () + full_int_array_0 = [1, -1, 1, 1] + + # pd_op.reshape: (1x128x1x1xf32) <- (128xf32, 4xi64) + reshape_1 = paddle._C_ops.reshape(parameter_62, full_int_array_0) + del parameter_62 + + # pd_op.add: (-1x128x-1x80xf32) <- (-1x128x-1x80xf32, 1x128x1x1xf32) + add_0 = paddle._C_ops.add(conv2d_0, reshape_1) + del conv2d_0, reshape_1 + + # pd_op.add: (-1x128x-1x80xf32) <- (-1x-1x-1x80xf32, -1x128x-1x80xf32) + add_1 = paddle._C_ops.add(data_2, add_0) + del add_0, data_2 + + # pd_op.flatten: (-1x128x-1xf32) <- (-1x128x-1x80xf32) + flatten_0 = paddle._C_ops.flatten(add_1, 2, 3) + del add_1 + + # pd_op.transpose: (-1x-1x128xf32) <- (-1x128x-1xf32) + transpose_0 = paddle._C_ops.transpose(flatten_0, [0, 2, 1]) + del flatten_0 + + # pd_op.layer_norm: (-1x-1x128xf32, -1x-1xf32, -1x-1xf32) <- (-1x-1x128xf32, 128xf32, 128xf32) + layer_norm_0, layer_norm_1, layer_norm_2 = (lambda x, f: f(x))( + paddle._C_ops.layer_norm( + transpose_0, parameter_61, parameter_60, float("1e-06"), 2 + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None, None), + ) + del parameter_60, parameter_61, transpose_0 + + # pd_op.matmul: (-1x-1x512xf32) <- (-1x-1x128xf32, 128x512xf32) + matmul_0 = paddle._C_ops.matmul(layer_norm_0, parameter_59, False, False) + del parameter_59 + + # pd_op.add: (-1x-1x512xf32) <- (-1x-1x512xf32, 512xf32) + add_2 = paddle._C_ops.add(matmul_0, parameter_58) + del matmul_0, parameter_58 + + # pd_op.gelu: (-1x-1x512xf32) <- (-1x-1x512xf32) + gelu_0 = paddle._C_ops.gelu(add_2, False) + del add_2 + + # pd_op.matmul: (-1x-1x128xf32) <- (-1x-1x512xf32, 512x128xf32) + matmul_1 = paddle._C_ops.matmul(gelu_0, parameter_57, False, False) + del gelu_0, parameter_57 + + # pd_op.add: (-1x-1x128xf32) <- (-1x-1x128xf32, 128xf32) + add_3 = paddle._C_ops.add(matmul_1, parameter_56) + del matmul_1, parameter_56 + + # pd_op.add: (-1x-1x128xf32) <- (-1x-1x128xf32, -1x-1x128xf32) + add_4 = paddle._C_ops.add(layer_norm_0, add_3) + del add_3, layer_norm_0 + + # pd_op.layer_norm: (-1x-1x128xf32, -1x-1xf32, -1x-1xf32) <- (-1x-1x128xf32, 128xf32, 128xf32) + layer_norm_3, layer_norm_4, layer_norm_5 = (lambda x, f: f(x))( + paddle._C_ops.layer_norm( + add_4, parameter_55, parameter_54, float("1e-06"), 2 + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None, None), + ) + del add_4, parameter_54, parameter_55 + + # pd_op.transpose: (-1x128x-1xf32) <- (-1x-1x128xf32) + transpose_1 = paddle._C_ops.transpose(layer_norm_3, [0, 2, 1]) + del layer_norm_3 + + # pd_op.full: (xi64) <- () + full_0 = paddle._C_ops.full( + [], float("0"), paddle.int64, paddle.core.CPUPlace() + ) + + # pd_op.full: (xi64) <- () + full_1 = paddle._C_ops.full( + [], float("80"), paddle.int64, paddle.core.CPUPlace() + ) + + # builtin.combine: ([xi64, xi64, xi64, xi64]) <- (xi64, xi64, xi64, xi64) + combine_0 = [full_0, data_0, data_1, full_1] + del data_0, data_1 + + # pd_op.stack: (4xi64) <- ([xi64, xi64, xi64, xi64]) + stack_0 = paddle._C_ops.stack(combine_0, 0) + del combine_0 + + # pd_op.reshape: (-1x-1x-1x80xf32) <- (-1x128x-1xf32, 4xi64) + reshape_2 = paddle._C_ops.reshape(transpose_1, stack_0) + del stack_0, transpose_1 + + # pd_op.shape64: (4xi64) <- (-1x-1x-1x80xf32) + shape64_0 = paddle._C_ops.shape64(reshape_2) + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_1 = [0] + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_2 = [1] + + # pd_op.slice: (xi64) <- (4xi64, 1xi64, 1xi64) + slice_0 = paddle._C_ops.slice( + shape64_0, [0], full_int_array_1, full_int_array_2, [1], [0] + ) + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_3 = [2] + + # pd_op.slice: (xi64) <- (4xi64, 1xi64, 1xi64) + slice_1 = paddle._C_ops.slice( + shape64_0, [0], full_int_array_2, full_int_array_3, [1], [0] + ) + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_4 = [3] + + # pd_op.slice: (xi64) <- (4xi64, 1xi64, 1xi64) + slice_2 = paddle._C_ops.slice( + shape64_0, [0], full_int_array_3, full_int_array_4, [1], [0] + ) + del shape64_0 + + # pd_op.conv2d: (-1x128x-1x80xf32) <- (-1x-1x-1x80xf32, 128x32x5x5xf32) + conv2d_1 = paddle._C_ops.conv2d( + reshape_2, parameter_53, [1, 1], [2, 2], "EXPLICIT", [1, 1], 4, "NCHW" + ) + del parameter_53 + + # pd_op.reshape: (1x128x1x1xf32) <- (128xf32, 4xi64) + reshape_3 = paddle._C_ops.reshape(parameter_52, full_int_array_0) + del parameter_52 + + # pd_op.add: (-1x128x-1x80xf32) <- (-1x128x-1x80xf32, 1x128x1x1xf32) + add_5 = paddle._C_ops.add(conv2d_1, reshape_3) + del conv2d_1, reshape_3 + + # pd_op.add: (-1x128x-1x80xf32) <- (-1x-1x-1x80xf32, -1x128x-1x80xf32) + add_6 = paddle._C_ops.add(reshape_2, add_5) + del add_5, reshape_2 + + # pd_op.flatten: (-1x128x-1xf32) <- (-1x128x-1x80xf32) + flatten_1 = paddle._C_ops.flatten(add_6, 2, 3) + del add_6 + + # pd_op.transpose: (-1x-1x128xf32) <- (-1x128x-1xf32) + transpose_2 = paddle._C_ops.transpose(flatten_1, [0, 2, 1]) + del flatten_1 + + # pd_op.layer_norm: (-1x-1x128xf32, -1x-1xf32, -1x-1xf32) <- (-1x-1x128xf32, 128xf32, 128xf32) + layer_norm_6, layer_norm_7, layer_norm_8 = (lambda x, f: f(x))( + paddle._C_ops.layer_norm( + transpose_2, parameter_51, parameter_50, float("1e-06"), 2 + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None, None), + ) + del parameter_50, parameter_51, transpose_2 + + # pd_op.matmul: (-1x-1x512xf32) <- (-1x-1x128xf32, 128x512xf32) + matmul_2 = paddle._C_ops.matmul(layer_norm_6, parameter_49, False, False) + del parameter_49 + + # pd_op.add: (-1x-1x512xf32) <- (-1x-1x512xf32, 512xf32) + add_7 = paddle._C_ops.add(matmul_2, parameter_48) + del matmul_2, parameter_48 + + # pd_op.gelu: (-1x-1x512xf32) <- (-1x-1x512xf32) + gelu_1 = paddle._C_ops.gelu(add_7, False) + del add_7 + + # pd_op.matmul: (-1x-1x128xf32) <- (-1x-1x512xf32, 512x128xf32) + matmul_3 = paddle._C_ops.matmul(gelu_1, parameter_47, False, False) + del gelu_1, parameter_47 + + # pd_op.add: (-1x-1x128xf32) <- (-1x-1x128xf32, 128xf32) + add_8 = paddle._C_ops.add(matmul_3, parameter_46) + del matmul_3, parameter_46 + + # pd_op.add: (-1x-1x128xf32) <- (-1x-1x128xf32, -1x-1x128xf32) + add_9 = paddle._C_ops.add(layer_norm_6, add_8) + del add_8, layer_norm_6 + + # pd_op.layer_norm: (-1x-1x128xf32, -1x-1xf32, -1x-1xf32) <- (-1x-1x128xf32, 128xf32, 128xf32) + layer_norm_9, layer_norm_10, layer_norm_11 = (lambda x, f: f(x))( + paddle._C_ops.layer_norm( + add_9, parameter_45, parameter_44, float("1e-06"), 2 + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None, None), + ) + del add_9, parameter_44, parameter_45 + + # pd_op.transpose: (-1x128x-1xf32) <- (-1x-1x128xf32) + transpose_3 = paddle._C_ops.transpose(layer_norm_9, [0, 2, 1]) + del layer_norm_9 + + # builtin.combine: ([xi64, xi64, xi64, xi64]) <- (xi64, xi64, xi64, xi64) + combine_1 = [full_0, slice_1, slice_2, full_1] + del slice_1, slice_2 + + # pd_op.stack: (4xi64) <- ([xi64, xi64, xi64, xi64]) + stack_1 = paddle._C_ops.stack(combine_1, 0) + del combine_1 + + # pd_op.reshape: (-1x-1x-1x80xf32) <- (-1x128x-1xf32, 4xi64) + reshape_4 = paddle._C_ops.reshape(transpose_3, stack_1) + del stack_1, transpose_3 + + # pd_op.shape64: (4xi64) <- (-1x-1x-1x80xf32) + shape64_1 = paddle._C_ops.shape64(reshape_4) + + # pd_op.slice: (xi64) <- (4xi64, 1xi64, 1xi64) + slice_3 = paddle._C_ops.slice( + shape64_1, [0], full_int_array_1, full_int_array_2, [1], [0] + ) + + # pd_op.slice: (xi64) <- (4xi64, 1xi64, 1xi64) + slice_4 = paddle._C_ops.slice( + shape64_1, [0], full_int_array_2, full_int_array_3, [1], [0] + ) + + # pd_op.slice: (xi64) <- (4xi64, 1xi64, 1xi64) + slice_5 = paddle._C_ops.slice( + shape64_1, [0], full_int_array_3, full_int_array_4, [1], [0] + ) + del shape64_1 + + # pd_op.conv2d: (-1x128x-1x80xf32) <- (-1x-1x-1x80xf32, 128x32x5x5xf32) + conv2d_2 = paddle._C_ops.conv2d( + reshape_4, parameter_43, [1, 1], [2, 2], "EXPLICIT", [1, 1], 4, "NCHW" + ) + del parameter_43 + + # pd_op.reshape: (1x128x1x1xf32) <- (128xf32, 4xi64) + reshape_5 = paddle._C_ops.reshape(parameter_42, full_int_array_0) + del parameter_42 + + # pd_op.add: (-1x128x-1x80xf32) <- (-1x128x-1x80xf32, 1x128x1x1xf32) + add_10 = paddle._C_ops.add(conv2d_2, reshape_5) + del conv2d_2, reshape_5 + + # pd_op.add: (-1x128x-1x80xf32) <- (-1x-1x-1x80xf32, -1x128x-1x80xf32) + add_11 = paddle._C_ops.add(reshape_4, add_10) + del add_10, reshape_4 + + # pd_op.flatten: (-1x128x-1xf32) <- (-1x128x-1x80xf32) + flatten_2 = paddle._C_ops.flatten(add_11, 2, 3) + del add_11 + + # pd_op.transpose: (-1x-1x128xf32) <- (-1x128x-1xf32) + transpose_4 = paddle._C_ops.transpose(flatten_2, [0, 2, 1]) + del flatten_2 + + # pd_op.layer_norm: (-1x-1x128xf32, -1x-1xf32, -1x-1xf32) <- (-1x-1x128xf32, 128xf32, 128xf32) + layer_norm_12, layer_norm_13, layer_norm_14 = (lambda x, f: f(x))( + paddle._C_ops.layer_norm( + transpose_4, parameter_41, parameter_40, float("1e-06"), 2 + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None, None), + ) + del parameter_40, parameter_41, transpose_4 + + # pd_op.matmul: (-1x-1x512xf32) <- (-1x-1x128xf32, 128x512xf32) + matmul_4 = paddle._C_ops.matmul(layer_norm_12, parameter_39, False, False) + del parameter_39 + + # pd_op.add: (-1x-1x512xf32) <- (-1x-1x512xf32, 512xf32) + add_12 = paddle._C_ops.add(matmul_4, parameter_38) + del matmul_4, parameter_38 + + # pd_op.gelu: (-1x-1x512xf32) <- (-1x-1x512xf32) + gelu_2 = paddle._C_ops.gelu(add_12, False) + del add_12 + + # pd_op.matmul: (-1x-1x128xf32) <- (-1x-1x512xf32, 512x128xf32) + matmul_5 = paddle._C_ops.matmul(gelu_2, parameter_37, False, False) + del gelu_2, parameter_37 + + # pd_op.add: (-1x-1x128xf32) <- (-1x-1x128xf32, 128xf32) + add_13 = paddle._C_ops.add(matmul_5, parameter_36) + del matmul_5, parameter_36 + + # pd_op.add: (-1x-1x128xf32) <- (-1x-1x128xf32, -1x-1x128xf32) + add_14 = paddle._C_ops.add(layer_norm_12, add_13) + del add_13, layer_norm_12 + + # pd_op.layer_norm: (-1x-1x128xf32, -1x-1xf32, -1x-1xf32) <- (-1x-1x128xf32, 128xf32, 128xf32) + layer_norm_15, layer_norm_16, layer_norm_17 = (lambda x, f: f(x))( + paddle._C_ops.layer_norm( + add_14, parameter_35, parameter_34, float("1e-06"), 2 + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None, None), + ) + del add_14, parameter_34, parameter_35 + + # pd_op.transpose: (-1x128x-1xf32) <- (-1x-1x128xf32) + transpose_5 = paddle._C_ops.transpose(layer_norm_15, [0, 2, 1]) + del layer_norm_15 + + # builtin.combine: ([xi64, xi64, xi64, xi64]) <- (xi64, xi64, xi64, xi64) + combine_2 = [full_0, slice_4, slice_5, full_1] + del slice_4, slice_5 + + # pd_op.stack: (4xi64) <- ([xi64, xi64, xi64, xi64]) + stack_2 = paddle._C_ops.stack(combine_2, 0) + del combine_2 + + # pd_op.reshape: (-1x-1x-1x80xf32) <- (-1x128x-1xf32, 4xi64) + reshape_6 = paddle._C_ops.reshape(transpose_5, stack_2) + del stack_2, transpose_5 + + # pd_op.shape64: (4xi64) <- (-1x-1x-1x80xf32) + shape64_2 = paddle._C_ops.shape64(reshape_6) + + # pd_op.slice: (xi64) <- (4xi64, 1xi64, 1xi64) + slice_6 = paddle._C_ops.slice( + shape64_2, [0], full_int_array_1, full_int_array_2, [1], [0] + ) + + # pd_op.slice: (xi64) <- (4xi64, 1xi64, 1xi64) + slice_7 = paddle._C_ops.slice( + shape64_2, [0], full_int_array_2, full_int_array_3, [1], [0] + ) + + # pd_op.slice: (xi64) <- (4xi64, 1xi64, 1xi64) + slice_8 = paddle._C_ops.slice( + shape64_2, [0], full_int_array_3, full_int_array_4, [1], [0] + ) + del shape64_2 + + # pd_op.conv2d: (-1x128x-1x80xf32) <- (-1x-1x-1x80xf32, 128x32x5x5xf32) + conv2d_3 = paddle._C_ops.conv2d( + reshape_6, parameter_33, [1, 1], [2, 2], "EXPLICIT", [1, 1], 4, "NCHW" + ) + del parameter_33 + + # pd_op.reshape: (1x128x1x1xf32) <- (128xf32, 4xi64) + reshape_7 = paddle._C_ops.reshape(parameter_32, full_int_array_0) + del parameter_32 + + # pd_op.add: (-1x128x-1x80xf32) <- (-1x128x-1x80xf32, 1x128x1x1xf32) + add_15 = paddle._C_ops.add(conv2d_3, reshape_7) + del conv2d_3, reshape_7 + + # pd_op.add: (-1x128x-1x80xf32) <- (-1x-1x-1x80xf32, -1x128x-1x80xf32) + add_16 = paddle._C_ops.add(reshape_6, add_15) + del add_15, reshape_6 + + # pd_op.flatten: (-1x128x-1xf32) <- (-1x128x-1x80xf32) + flatten_3 = paddle._C_ops.flatten(add_16, 2, 3) + del add_16 + + # pd_op.transpose: (-1x-1x128xf32) <- (-1x128x-1xf32) + transpose_6 = paddle._C_ops.transpose(flatten_3, [0, 2, 1]) + del flatten_3 + + # pd_op.layer_norm: (-1x-1x128xf32, -1x-1xf32, -1x-1xf32) <- (-1x-1x128xf32, 128xf32, 128xf32) + layer_norm_18, layer_norm_19, layer_norm_20 = (lambda x, f: f(x))( + paddle._C_ops.layer_norm( + transpose_6, parameter_31, parameter_30, float("1e-06"), 2 + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None, None), + ) + del parameter_30, parameter_31, transpose_6 + + # pd_op.matmul: (-1x-1x512xf32) <- (-1x-1x128xf32, 128x512xf32) + matmul_6 = paddle._C_ops.matmul(layer_norm_18, parameter_29, False, False) + del parameter_29 + + # pd_op.add: (-1x-1x512xf32) <- (-1x-1x512xf32, 512xf32) + add_17 = paddle._C_ops.add(matmul_6, parameter_28) + del matmul_6, parameter_28 + + # pd_op.gelu: (-1x-1x512xf32) <- (-1x-1x512xf32) + gelu_3 = paddle._C_ops.gelu(add_17, False) + del add_17 + + # pd_op.matmul: (-1x-1x128xf32) <- (-1x-1x512xf32, 512x128xf32) + matmul_7 = paddle._C_ops.matmul(gelu_3, parameter_27, False, False) + del gelu_3, parameter_27 + + # pd_op.add: (-1x-1x128xf32) <- (-1x-1x128xf32, 128xf32) + add_18 = paddle._C_ops.add(matmul_7, parameter_26) + del matmul_7, parameter_26 + + # pd_op.add: (-1x-1x128xf32) <- (-1x-1x128xf32, -1x-1x128xf32) + add_19 = paddle._C_ops.add(layer_norm_18, add_18) + del add_18, layer_norm_18 + + # pd_op.layer_norm: (-1x-1x128xf32, -1x-1xf32, -1x-1xf32) <- (-1x-1x128xf32, 128xf32, 128xf32) + layer_norm_21, layer_norm_22, layer_norm_23 = (lambda x, f: f(x))( + paddle._C_ops.layer_norm( + add_19, parameter_25, parameter_24, float("1e-06"), 2 + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None, None), + ) + del add_19, parameter_24, parameter_25 + + # pd_op.transpose: (-1x128x-1xf32) <- (-1x-1x128xf32) + transpose_7 = paddle._C_ops.transpose(layer_norm_21, [0, 2, 1]) + del layer_norm_21 + + # builtin.combine: ([xi64, xi64, xi64, xi64]) <- (xi64, xi64, xi64, xi64) + combine_3 = [full_0, slice_7, slice_8, full_1] + del slice_7, slice_8 + + # pd_op.stack: (4xi64) <- ([xi64, xi64, xi64, xi64]) + stack_3 = paddle._C_ops.stack(combine_3, 0) + del combine_3 + + # pd_op.reshape: (-1x-1x-1x80xf32) <- (-1x128x-1xf32, 4xi64) + reshape_8 = paddle._C_ops.reshape(transpose_7, stack_3) + del stack_3, transpose_7 + + # pd_op.shape64: (4xi64) <- (-1x-1x-1x80xf32) + shape64_3 = paddle._C_ops.shape64(reshape_8) + + # pd_op.slice: (xi64) <- (4xi64, 1xi64, 1xi64) + slice_9 = paddle._C_ops.slice( + shape64_3, [0], full_int_array_1, full_int_array_2, [1], [0] + ) + + # pd_op.slice: (xi64) <- (4xi64, 1xi64, 1xi64) + slice_10 = paddle._C_ops.slice( + shape64_3, [0], full_int_array_2, full_int_array_3, [1], [0] + ) + + # pd_op.slice: (xi64) <- (4xi64, 1xi64, 1xi64) + slice_11 = paddle._C_ops.slice( + shape64_3, [0], full_int_array_3, full_int_array_4, [1], [0] + ) + del shape64_3 + + # pd_op.conv2d: (-1x128x-1x80xf32) <- (-1x-1x-1x80xf32, 128x32x5x5xf32) + conv2d_4 = paddle._C_ops.conv2d( + reshape_8, parameter_23, [1, 1], [2, 2], "EXPLICIT", [1, 1], 4, "NCHW" + ) + del parameter_23 + + # pd_op.reshape: (1x128x1x1xf32) <- (128xf32, 4xi64) + reshape_9 = paddle._C_ops.reshape(parameter_22, full_int_array_0) + del parameter_22 + + # pd_op.add: (-1x128x-1x80xf32) <- (-1x128x-1x80xf32, 1x128x1x1xf32) + add_20 = paddle._C_ops.add(conv2d_4, reshape_9) + del conv2d_4, reshape_9 + + # pd_op.add: (-1x128x-1x80xf32) <- (-1x-1x-1x80xf32, -1x128x-1x80xf32) + add_21 = paddle._C_ops.add(reshape_8, add_20) + del add_20, reshape_8 + + # pd_op.flatten: (-1x128x-1xf32) <- (-1x128x-1x80xf32) + flatten_4 = paddle._C_ops.flatten(add_21, 2, 3) + del add_21 + + # pd_op.transpose: (-1x-1x128xf32) <- (-1x128x-1xf32) + transpose_8 = paddle._C_ops.transpose(flatten_4, [0, 2, 1]) + del flatten_4 + + # pd_op.layer_norm: (-1x-1x128xf32, -1x-1xf32, -1x-1xf32) <- (-1x-1x128xf32, 128xf32, 128xf32) + layer_norm_24, layer_norm_25, layer_norm_26 = (lambda x, f: f(x))( + paddle._C_ops.layer_norm( + transpose_8, parameter_21, parameter_20, float("1e-06"), 2 + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None, None), + ) + del parameter_20, parameter_21, transpose_8 + + # pd_op.matmul: (-1x-1x512xf32) <- (-1x-1x128xf32, 128x512xf32) + matmul_8 = paddle._C_ops.matmul(layer_norm_24, parameter_19, False, False) + del parameter_19 + + # pd_op.add: (-1x-1x512xf32) <- (-1x-1x512xf32, 512xf32) + add_22 = paddle._C_ops.add(matmul_8, parameter_18) + del matmul_8, parameter_18 + + # pd_op.gelu: (-1x-1x512xf32) <- (-1x-1x512xf32) + gelu_4 = paddle._C_ops.gelu(add_22, False) + del add_22 + + # pd_op.matmul: (-1x-1x128xf32) <- (-1x-1x512xf32, 512x128xf32) + matmul_9 = paddle._C_ops.matmul(gelu_4, parameter_17, False, False) + del gelu_4, parameter_17 + + # pd_op.add: (-1x-1x128xf32) <- (-1x-1x128xf32, 128xf32) + add_23 = paddle._C_ops.add(matmul_9, parameter_16) + del matmul_9, parameter_16 + + # pd_op.add: (-1x-1x128xf32) <- (-1x-1x128xf32, -1x-1x128xf32) + add_24 = paddle._C_ops.add(layer_norm_24, add_23) + del add_23, layer_norm_24 + + # pd_op.layer_norm: (-1x-1x128xf32, -1x-1xf32, -1x-1xf32) <- (-1x-1x128xf32, 128xf32, 128xf32) + layer_norm_27, layer_norm_28, layer_norm_29 = (lambda x, f: f(x))( + paddle._C_ops.layer_norm( + add_24, parameter_15, parameter_14, float("1e-06"), 2 + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None, None), + ) + del add_24, parameter_14, parameter_15 + + # pd_op.transpose: (-1x128x-1xf32) <- (-1x-1x128xf32) + transpose_9 = paddle._C_ops.transpose(layer_norm_27, [0, 2, 1]) + del layer_norm_27 + + # builtin.combine: ([xi64, xi64, xi64, xi64]) <- (xi64, xi64, xi64, xi64) + combine_4 = [full_0, slice_10, slice_11, full_1] + del slice_10, slice_11 + + # pd_op.stack: (4xi64) <- ([xi64, xi64, xi64, xi64]) + stack_4 = paddle._C_ops.stack(combine_4, 0) + del combine_4 + + # pd_op.reshape: (-1x-1x-1x80xf32) <- (-1x128x-1xf32, 4xi64) + reshape_10 = paddle._C_ops.reshape(transpose_9, stack_4) + del stack_4, transpose_9 + + # pd_op.shape64: (4xi64) <- (-1x-1x-1x80xf32) + shape64_4 = paddle._C_ops.shape64(reshape_10) + + # pd_op.slice: (xi64) <- (4xi64, 1xi64, 1xi64) + slice_12 = paddle._C_ops.slice( + shape64_4, [0], full_int_array_1, full_int_array_2, [1], [0] + ) + + # pd_op.slice: (xi64) <- (4xi64, 1xi64, 1xi64) + slice_13 = paddle._C_ops.slice( + shape64_4, [0], full_int_array_2, full_int_array_3, [1], [0] + ) + + # pd_op.slice: (xi64) <- (4xi64, 1xi64, 1xi64) + slice_14 = paddle._C_ops.slice( + shape64_4, [0], full_int_array_3, full_int_array_4, [1], [0] + ) + del shape64_4 + + # pd_op.conv2d: (-1x128x-1x80xf32) <- (-1x-1x-1x80xf32, 128x32x5x5xf32) + conv2d_5 = paddle._C_ops.conv2d( + reshape_10, parameter_13, [1, 1], [2, 2], "EXPLICIT", [1, 1], 4, "NCHW" + ) + del parameter_13 + + # pd_op.reshape: (1x128x1x1xf32) <- (128xf32, 4xi64) + reshape_11 = paddle._C_ops.reshape(parameter_12, full_int_array_0) + del parameter_12 + + # pd_op.add: (-1x128x-1x80xf32) <- (-1x128x-1x80xf32, 1x128x1x1xf32) + add_25 = paddle._C_ops.add(conv2d_5, reshape_11) + del conv2d_5, reshape_11 + + # pd_op.add: (-1x128x-1x80xf32) <- (-1x-1x-1x80xf32, -1x128x-1x80xf32) + add_26 = paddle._C_ops.add(reshape_10, add_25) + del add_25, reshape_10 + + # pd_op.flatten: (-1x128x-1xf32) <- (-1x128x-1x80xf32) + flatten_5 = paddle._C_ops.flatten(add_26, 2, 3) + del add_26 + + # pd_op.transpose: (-1x-1x128xf32) <- (-1x128x-1xf32) + transpose_10 = paddle._C_ops.transpose(flatten_5, [0, 2, 1]) + del flatten_5 + + # pd_op.layer_norm: (-1x-1x128xf32, -1x-1xf32, -1x-1xf32) <- (-1x-1x128xf32, 128xf32, 128xf32) + layer_norm_30, layer_norm_31, layer_norm_32 = (lambda x, f: f(x))( + paddle._C_ops.layer_norm( + transpose_10, parameter_11, parameter_10, float("1e-06"), 2 + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None, None), + ) + del parameter_10, parameter_11, transpose_10 + + # pd_op.matmul: (-1x-1x512xf32) <- (-1x-1x128xf32, 128x512xf32) + matmul_10 = paddle._C_ops.matmul(layer_norm_30, parameter_9, False, False) + del parameter_9 + + # pd_op.add: (-1x-1x512xf32) <- (-1x-1x512xf32, 512xf32) + add_27 = paddle._C_ops.add(matmul_10, parameter_8) + del matmul_10, parameter_8 + + # pd_op.gelu: (-1x-1x512xf32) <- (-1x-1x512xf32) + gelu_5 = paddle._C_ops.gelu(add_27, False) + del add_27 + + # pd_op.matmul: (-1x-1x128xf32) <- (-1x-1x512xf32, 512x128xf32) + matmul_11 = paddle._C_ops.matmul(gelu_5, parameter_7, False, False) + del gelu_5, parameter_7 + + # pd_op.add: (-1x-1x128xf32) <- (-1x-1x128xf32, 128xf32) + add_28 = paddle._C_ops.add(matmul_11, parameter_6) + del matmul_11, parameter_6 + + # pd_op.add: (-1x-1x128xf32) <- (-1x-1x128xf32, -1x-1x128xf32) + add_29 = paddle._C_ops.add(layer_norm_30, add_28) + del add_28, layer_norm_30 + + # pd_op.layer_norm: (-1x-1x128xf32, -1x-1xf32, -1x-1xf32) <- (-1x-1x128xf32, 128xf32, 128xf32) + layer_norm_33, layer_norm_34, layer_norm_35 = (lambda x, f: f(x))( + paddle._C_ops.layer_norm( + add_29, parameter_5, parameter_4, float("1e-06"), 2 + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None, None), + ) + del add_29, parameter_4, parameter_5 + + # pd_op.transpose: (-1x128x-1xf32) <- (-1x-1x128xf32) + transpose_11 = paddle._C_ops.transpose(layer_norm_33, [0, 2, 1]) + del layer_norm_33 + + # builtin.combine: ([xi64, xi64, xi64, xi64]) <- (xi64, xi64, xi64, xi64) + combine_5 = [full_0, slice_13, slice_14, full_1] + del slice_13, slice_14 + + # pd_op.stack: (4xi64) <- ([xi64, xi64, xi64, xi64]) + stack_5 = paddle._C_ops.stack(combine_5, 0) + del combine_5 + + # pd_op.reshape: (-1x-1x-1x80xf32) <- (-1x128x-1xf32, 4xi64) + reshape_12 = paddle._C_ops.reshape(transpose_11, stack_5) + del stack_5, transpose_11 + + # pd_op.conv2d: (-1x256x-1x80xf32) <- (-1x-1x-1x80xf32, 256x128x3x3xf32) + conv2d_6 = paddle._C_ops.conv2d( + reshape_12, parameter_3, [2, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_3, reshape_12 + + # pd_op.reshape: (1x256x1x1xf32) <- (256xf32, 4xi64) + reshape_13 = paddle._C_ops.reshape(parameter_2, full_int_array_0) + del full_int_array_0, parameter_2 + + # pd_op.add: (-1x256x-1x80xf32) <- (-1x256x-1x80xf32, 1x256x1x1xf32) + add_30 = paddle._C_ops.add(conv2d_6, reshape_13) + del conv2d_6, reshape_13 + + # pd_op.shape64: (4xi64) <- (-1x256x-1x80xf32) + shape64_5 = paddle._C_ops.shape64(add_30) + + # pd_op.slice: (xi64) <- (4xi64, 1xi64, 1xi64) + slice_15 = paddle._C_ops.slice( + shape64_5, [0], full_int_array_1, full_int_array_2, [1], [0] + ) + del full_int_array_1, full_int_array_2 + + # pd_op.slice: (xi64) <- (4xi64, 1xi64, 1xi64) + slice_16 = paddle._C_ops.slice( + shape64_5, [0], full_int_array_3, full_int_array_4, [1], [0] + ) + del full_int_array_3, full_int_array_4, shape64_5 + + # pd_op.flatten: (-1x256x-1xf32) <- (-1x256x-1x80xf32) + flatten_6 = paddle._C_ops.flatten(add_30, 2, 3) + del add_30 + + # pd_op.transpose: (-1x-1x256xf32) <- (-1x256x-1xf32) + transpose_12 = paddle._C_ops.transpose(flatten_6, [0, 2, 1]) + del flatten_6 + + # pd_op.layer_norm: (-1x-1x256xf32, -1x-1xf32, -1x-1xf32) <- (-1x-1x256xf32, 256xf32, 256xf32) + layer_norm_36, layer_norm_37, layer_norm_38 = (lambda x, f: f(x))( + paddle._C_ops.layer_norm( + transpose_12, parameter_1, parameter_0, float("1e-05"), 2 + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None, None), + ) + del parameter_0, parameter_1, transpose_12 + + # pd_op.transpose: (-1x256x-1xf32) <- (-1x-1x256xf32) + transpose_13 = paddle._C_ops.transpose(layer_norm_36, [0, 2, 1]) + del layer_norm_36 + + # pd_op.full: (xi64) <- () + full_2 = paddle._C_ops.full( + [], float("256"), paddle.int64, paddle.core.CPUPlace() + ) + + # builtin.combine: ([xi64, xi64, xi64, xi64]) <- (xi64, xi64, xi64, xi64) + combine_6 = [full_0, full_2, slice_16, full_1] + del full_0, full_1, full_2 + + # pd_op.stack: (4xi64) <- ([xi64, xi64, xi64, xi64]) + stack_6 = paddle._C_ops.stack(combine_6, 0) + del combine_6 + + # pd_op.reshape: (-1x256x-1x80xf32) <- (-1x256x-1xf32, 4xi64) + reshape_0 = paddle._C_ops.reshape(transpose_13, stack_6) + del slice_16, stack_6, transpose_13 + + return reshape_0 diff --git a/paddle_samples/PaddleX/ch_SVTRv2_rec/subgraph_6/weight_meta.py b/paddle_samples/PaddleX/ch_SVTRv2_rec/subgraph_6/weight_meta.py new file mode 100644 index 000000000..646429dac --- /dev/null +++ b/paddle_samples/PaddleX/ch_SVTRv2_rec/subgraph_6/weight_meta.py @@ -0,0 +1,702 @@ +class Program_weight_tensor_parameter_0: + name = "parameter_0" + shape = [256] + dtype = "float32" + min_val = float("-0.666999") + max_val = float("1.94454") + mean = float("-0.0632324") + std = float("0.229855") + data = None + + +class Program_weight_tensor_parameter_1: + name = "parameter_1" + shape = [256] + dtype = "float32" + min_val = float("0.187662") + max_val = float("2.04423") + mean = float("1.29981") + std = float("0.242534") + data = None + + +class Program_weight_tensor_parameter_2: + name = "parameter_2" + shape = [256] + dtype = "float32" + min_val = float("-8.25495") + max_val = float("0.841741") + mean = float("-0.0148986") + std = float("0.558256") + data = None + + +class Program_weight_tensor_parameter_3: + name = "parameter_3" + shape = [256, 128, 3, 3] + dtype = "float32" + min_val = float("-6.28341") + max_val = float("7.55648") + mean = float("-0.000141799") + std = float("0.409401") + data = None + + +class Program_weight_tensor_parameter_4: + name = "parameter_4" + shape = [128] + dtype = "float32" + min_val = float("-0.716555") + max_val = float("0.756297") + mean = float("-0.0932256") + std = float("0.229428") + data = None + + +class Program_weight_tensor_parameter_5: + name = "parameter_5" + shape = [128] + dtype = "float32" + min_val = float("0.407815") + max_val = float("1.66548") + mean = float("1.24021") + std = float("0.174828") + data = None + + +class Program_weight_tensor_parameter_6: + name = "parameter_6" + shape = [128] + dtype = "float32" + min_val = float("-4.0857") + max_val = float("0.55345") + mean = float("-0.00504505") + std = float("0.405061") + data = None + + +class Program_weight_tensor_parameter_7: + name = "parameter_7" + shape = [512, 128] + dtype = "float32" + min_val = float("-0.465604") + max_val = float("0.41407") + mean = float("8.15453e-06") + std = float("0.0631297") + data = None + + +class Program_weight_tensor_parameter_8: + name = "parameter_8" + shape = [512] + dtype = "float32" + min_val = float("-1.89715") + max_val = float("-0.00380283") + mean = float("-0.661344") + std = float("0.276575") + data = None + + +class Program_weight_tensor_parameter_9: + name = "parameter_9" + shape = [128, 512] + dtype = "float32" + min_val = float("-0.323395") + max_val = float("0.34724") + mean = float("-0.000334755") + std = float("0.0640659") + data = None + + +class Program_weight_tensor_parameter_10: + name = "parameter_10" + shape = [128] + dtype = "float32" + min_val = float("-0.681688") + max_val = float("0.411228") + mean = float("-0.0107829") + std = float("0.165223") + data = None + + +class Program_weight_tensor_parameter_11: + name = "parameter_11" + shape = [128] + dtype = "float32" + min_val = float("0.570606") + max_val = float("2.4798") + mean = float("0.914432") + std = float("0.193946") + data = None + + +class Program_weight_tensor_parameter_12: + name = "parameter_12" + shape = [128] + dtype = "float32" + min_val = float("-0.711524") + max_val = float("1.03562") + mean = float("0.00274896") + std = float("0.290359") + data = None + + +class Program_weight_tensor_parameter_13: + name = "parameter_13" + shape = [128, 32, 5, 5] + dtype = "float32" + min_val = float("-0.396919") + max_val = float("0.347899") + mean = float("0.00133062") + std = float("0.0437229") + data = None + + +class Program_weight_tensor_parameter_14: + name = "parameter_14" + shape = [128] + dtype = "float32" + min_val = float("-0.594578") + max_val = float("0.727502") + mean = float("-0.0827572") + std = float("0.190148") + data = None + + +class Program_weight_tensor_parameter_15: + name = "parameter_15" + shape = [128] + dtype = "float32" + min_val = float("0.276318") + max_val = float("1.92052") + mean = float("1.2893") + std = float("0.253956") + data = None + + +class Program_weight_tensor_parameter_16: + name = "parameter_16" + shape = [128] + dtype = "float32" + min_val = float("-5.11606") + max_val = float("0.435776") + mean = float("-0.00641674") + std = float("0.478598") + data = None + + +class Program_weight_tensor_parameter_17: + name = "parameter_17" + shape = [512, 128] + dtype = "float32" + min_val = float("-0.816106") + max_val = float("0.75502") + mean = float("3.2276e-05") + std = float("0.0694773") + data = None + + +class Program_weight_tensor_parameter_18: + name = "parameter_18" + shape = [512] + dtype = "float32" + min_val = float("-1.37513") + max_val = float("-0.0663068") + mean = float("-0.727074") + std = float("0.184327") + data = None + + +class Program_weight_tensor_parameter_19: + name = "parameter_19" + shape = [128, 512] + dtype = "float32" + min_val = float("-0.404452") + max_val = float("0.318467") + mean = float("-0.001074") + std = float("0.0674755") + data = None + + +class Program_weight_tensor_parameter_20: + name = "parameter_20" + shape = [128] + dtype = "float32" + min_val = float("-0.816263") + max_val = float("0.705552") + mean = float("0.018937") + std = float("0.203023") + data = None + + +class Program_weight_tensor_parameter_21: + name = "parameter_21" + shape = [128] + dtype = "float32" + min_val = float("0.681668") + max_val = float("2.07793") + mean = float("0.965596") + std = float("0.155191") + data = None + + +class Program_weight_tensor_parameter_22: + name = "parameter_22" + shape = [128] + dtype = "float32" + min_val = float("-0.884613") + max_val = float("1.11801") + mean = float("-0.0047423") + std = float("0.310113") + data = None + + +class Program_weight_tensor_parameter_23: + name = "parameter_23" + shape = [128, 32, 5, 5] + dtype = "float32" + min_val = float("-0.316914") + max_val = float("0.311167") + mean = float("0.00120938") + std = float("0.0446949") + data = None + + +class Program_weight_tensor_parameter_24: + name = "parameter_24" + shape = [128] + dtype = "float32" + min_val = float("-0.548556") + max_val = float("0.484334") + mean = float("0.0476606") + std = float("0.164417") + data = None + + +class Program_weight_tensor_parameter_25: + name = "parameter_25" + shape = [128] + dtype = "float32" + min_val = float("0.0658702") + max_val = float("1.88199") + mean = float("1.29018") + std = float("0.230061") + data = None + + +class Program_weight_tensor_parameter_26: + name = "parameter_26" + shape = [128] + dtype = "float32" + min_val = float("-0.831083") + max_val = float("10.3151") + mean = float("0.0614709") + std = float("0.927821") + data = None + + +class Program_weight_tensor_parameter_27: + name = "parameter_27" + shape = [512, 128] + dtype = "float32" + min_val = float("-0.989168") + max_val = float("0.940987") + mean = float("9.70106e-05") + std = float("0.0723179") + data = None + + +class Program_weight_tensor_parameter_28: + name = "parameter_28" + shape = [512] + dtype = "float32" + min_val = float("-1.56143") + max_val = float("-0.146009") + mean = float("-0.720624") + std = float("0.207032") + data = None + + +class Program_weight_tensor_parameter_29: + name = "parameter_29" + shape = [128, 512] + dtype = "float32" + min_val = float("-0.412412") + max_val = float("0.376367") + mean = float("-0.00148105") + std = float("0.0677546") + data = None + + +class Program_weight_tensor_parameter_30: + name = "parameter_30" + shape = [128] + dtype = "float32" + min_val = float("-0.705766") + max_val = float("0.875929") + mean = float("0.0385367") + std = float("0.225602") + data = None + + +class Program_weight_tensor_parameter_31: + name = "parameter_31" + shape = [128] + dtype = "float32" + min_val = float("0.782324") + max_val = float("1.9462") + mean = float("1.00761") + std = float("0.135489") + data = None + + +class Program_weight_tensor_parameter_32: + name = "parameter_32" + shape = [128] + dtype = "float32" + min_val = float("-0.830154") + max_val = float("1.01877") + mean = float("-0.0104181") + std = float("0.291501") + data = None + + +class Program_weight_tensor_parameter_33: + name = "parameter_33" + shape = [128, 32, 5, 5] + dtype = "float32" + min_val = float("-0.278542") + max_val = float("0.369799") + mean = float("0.000775973") + std = float("0.0439049") + data = None + + +class Program_weight_tensor_parameter_34: + name = "parameter_34" + shape = [128] + dtype = "float32" + min_val = float("-1.43719") + max_val = float("0.951975") + mean = float("-0.0416506") + std = float("0.255307") + data = None + + +class Program_weight_tensor_parameter_35: + name = "parameter_35" + shape = [128] + dtype = "float32" + min_val = float("0.27034") + max_val = float("2.00745") + mean = float("1.31189") + std = float("0.265053") + data = None + + +class Program_weight_tensor_parameter_36: + name = "parameter_36" + shape = [128] + dtype = "float32" + min_val = float("-4.81183") + max_val = float("4.33146") + mean = float("0.0110147") + std = float("0.608121") + data = None + + +class Program_weight_tensor_parameter_37: + name = "parameter_37" + shape = [512, 128] + dtype = "float32" + min_val = float("-0.663282") + max_val = float("0.866291") + mean = float("0.000100857") + std = float("0.0717828") + data = None + + +class Program_weight_tensor_parameter_38: + name = "parameter_38" + shape = [512] + dtype = "float32" + min_val = float("-1.38788") + max_val = float("0.0592296") + mean = float("-0.715335") + std = float("0.20229") + data = None + + +class Program_weight_tensor_parameter_39: + name = "parameter_39" + shape = [128, 512] + dtype = "float32" + min_val = float("-0.332646") + max_val = float("0.354521") + mean = float("0.00102769") + std = float("0.0673979") + data = None + + +class Program_weight_tensor_parameter_40: + name = "parameter_40" + shape = [128] + dtype = "float32" + min_val = float("-1.20981") + max_val = float("0.7807") + mean = float("-0.0443681") + std = float("0.253751") + data = None + + +class Program_weight_tensor_parameter_41: + name = "parameter_41" + shape = [128] + dtype = "float32" + min_val = float("0.69911") + max_val = float("1.38123") + mean = float("1.03224") + std = float("0.135013") + data = None + + +class Program_weight_tensor_parameter_42: + name = "parameter_42" + shape = [128] + dtype = "float32" + min_val = float("-1.06287") + max_val = float("1.09272") + mean = float("0.0110842") + std = float("0.326265") + data = None + + +class Program_weight_tensor_parameter_43: + name = "parameter_43" + shape = [128, 32, 5, 5] + dtype = "float32" + min_val = float("-0.350237") + max_val = float("0.425112") + mean = float("0.0011219") + std = float("0.0431467") + data = None + + +class Program_weight_tensor_parameter_44: + name = "parameter_44" + shape = [128] + dtype = "float32" + min_val = float("-0.974087") + max_val = float("0.883122") + mean = float("-0.0672362") + std = float("0.257969") + data = None + + +class Program_weight_tensor_parameter_45: + name = "parameter_45" + shape = [128] + dtype = "float32" + min_val = float("0.304824") + max_val = float("1.90846") + mean = float("1.2968") + std = float("0.25675") + data = None + + +class Program_weight_tensor_parameter_46: + name = "parameter_46" + shape = [128] + dtype = "float32" + min_val = float("-4.93988") + max_val = float("1.00487") + mean = float("-0.000311874") + std = float("0.48712") + data = None + + +class Program_weight_tensor_parameter_47: + name = "parameter_47" + shape = [512, 128] + dtype = "float32" + min_val = float("-0.628975") + max_val = float("0.454042") + mean = float("0.000128257") + std = float("0.0687935") + data = None + + +class Program_weight_tensor_parameter_48: + name = "parameter_48" + shape = [512] + dtype = "float32" + min_val = float("-1.7681") + max_val = float("-0.236422") + mean = float("-0.739609") + std = float("0.206943") + data = None + + +class Program_weight_tensor_parameter_49: + name = "parameter_49" + shape = [128, 512] + dtype = "float32" + min_val = float("-0.369958") + max_val = float("0.376997") + mean = float("-0.000334754") + std = float("0.0662186") + data = None + + +class Program_weight_tensor_parameter_50: + name = "parameter_50" + shape = [128] + dtype = "float32" + min_val = float("-0.941021") + max_val = float("0.538957") + mean = float("-0.00319627") + std = float("0.214387") + data = None + + +class Program_weight_tensor_parameter_51: + name = "parameter_51" + shape = [128] + dtype = "float32" + min_val = float("0.628951") + max_val = float("1.69866") + mean = float("1.07869") + std = float("0.154158") + data = None + + +class Program_weight_tensor_parameter_52: + name = "parameter_52" + shape = [128] + dtype = "float32" + min_val = float("-1.64539") + max_val = float("0.970554") + mean = float("-0.00739898") + std = float("0.381025") + data = None + + +class Program_weight_tensor_parameter_53: + name = "parameter_53" + shape = [128, 32, 5, 5] + dtype = "float32" + min_val = float("-0.422591") + max_val = float("0.508687") + mean = float("0.000882156") + std = float("0.0425074") + data = None + + +class Program_weight_tensor_parameter_54: + name = "parameter_54" + shape = [128] + dtype = "float32" + min_val = float("-1.0661") + max_val = float("0.668543") + mean = float("0.0115453") + std = float("0.317515") + data = None + + +class Program_weight_tensor_parameter_55: + name = "parameter_55" + shape = [128] + dtype = "float32" + min_val = float("0.480466") + max_val = float("1.97896") + mean = float("1.18633") + std = float("0.265363") + data = None + + +class Program_weight_tensor_parameter_56: + name = "parameter_56" + shape = [128] + dtype = "float32" + min_val = float("-0.885874") + max_val = float("2.55066") + mean = float("-0.00497777") + std = float("0.347597") + data = None + + +class Program_weight_tensor_parameter_57: + name = "parameter_57" + shape = [512, 128] + dtype = "float32" + min_val = float("-0.399023") + max_val = float("0.603667") + mean = float("0.000401173") + std = float("0.0665492") + data = None + + +class Program_weight_tensor_parameter_58: + name = "parameter_58" + shape = [512] + dtype = "float32" + min_val = float("-1.34575") + max_val = float("0.110961") + mean = float("-0.665135") + std = float("0.258847") + data = None + + +class Program_weight_tensor_parameter_59: + name = "parameter_59" + shape = [128, 512] + dtype = "float32" + min_val = float("-0.54626") + max_val = float("0.616623") + mean = float("-0.00198333") + std = float("0.0660375") + data = None + + +class Program_weight_tensor_parameter_60: + name = "parameter_60" + shape = [128] + dtype = "float32" + min_val = float("-0.778003") + max_val = float("1.003") + mean = float("0.0770851") + std = float("0.312652") + data = None + + +class Program_weight_tensor_parameter_61: + name = "parameter_61" + shape = [128] + dtype = "float32" + min_val = float("0.535087") + max_val = float("1.87554") + mean = float("1.12663") + std = float("0.232375") + data = None + + +class Program_weight_tensor_parameter_62: + name = "parameter_62" + shape = [128] + dtype = "float32" + min_val = float("-0.822469") + max_val = float("2.60999") + mean = float("-0.00431402") + std = float("0.390087") + data = None + + +class Program_weight_tensor_parameter_63: + name = "parameter_63" + shape = [128, 32, 5, 5] + dtype = "float32" + min_val = float("-0.482774") + max_val = float("0.593399") + mean = float("0.00156795") + std = float("0.0440294") + data = None diff --git a/paddle_samples/PaddleX/ch_SVTRv2_rec/subgraph_7/graph_hash.txt b/paddle_samples/PaddleX/ch_SVTRv2_rec/subgraph_7/graph_hash.txt new file mode 100644 index 000000000..9957af478 --- /dev/null +++ b/paddle_samples/PaddleX/ch_SVTRv2_rec/subgraph_7/graph_hash.txt @@ -0,0 +1 @@ +309279f5fdddd40671adb92dc0fe936fd80e7a0bc172cfd322a6f45250640d9a \ No newline at end of file diff --git a/paddle_samples/PaddleX/ch_SVTRv2_rec/subgraph_7/graph_net.json b/paddle_samples/PaddleX/ch_SVTRv2_rec/subgraph_7/graph_net.json new file mode 100644 index 000000000..4e1cc3c0d --- /dev/null +++ b/paddle_samples/PaddleX/ch_SVTRv2_rec/subgraph_7/graph_net.json @@ -0,0 +1,6 @@ +{ + "framework": "paddle", + "model_name": "ch_SVTRv2_rec", + "num_devices_required": 1, + "num_nodes_required": 1 +} \ No newline at end of file diff --git a/paddle_samples/PaddleX/ch_SVTRv2_rec/subgraph_7/input_meta.py b/paddle_samples/PaddleX/ch_SVTRv2_rec/subgraph_7/input_meta.py new file mode 100644 index 000000000..629175ff7 --- /dev/null +++ b/paddle_samples/PaddleX/ch_SVTRv2_rec/subgraph_7/input_meta.py @@ -0,0 +1,51 @@ +class Program_weight_tensor_data_0: + name = "data_0" + shape = [] + dtype = "int64" + data = [12] + + +class Program_weight_tensor_data_1: + name = "data_1" + shape = [] + dtype = "int64" + data = [384] + + +class Program_weight_tensor_data_2: + name = "data_2" + shape = [] + dtype = "int64" + data = [12] + + +class Program_weight_tensor_data_3: + name = "data_3" + shape = [] + dtype = "int64" + data = [384] + + +class Program_weight_tensor_data_4: + name = "data_4" + shape = [] + dtype = "int64" + data = [12] + + +class Program_weight_tensor_data_5: + name = "data_5" + shape = [] + dtype = "int64" + data = [384] + + +class Program_weight_tensor_data_6: + name = "data_6" + shape = [8, 160, 384] + dtype = "float32" + min_val = float("-5.65986") + max_val = float("4.19296") + mean = float("0.00409935") + std = float("0.428938") + data = None diff --git a/paddle_samples/PaddleX/ch_SVTRv2_rec/subgraph_7/model.py b/paddle_samples/PaddleX/ch_SVTRv2_rec/subgraph_7/model.py new file mode 100644 index 000000000..e956ff8e6 --- /dev/null +++ b/paddle_samples/PaddleX/ch_SVTRv2_rec/subgraph_7/model.py @@ -0,0 +1,1918 @@ +import paddle + + +class GraphModule(paddle.nn.Layer): + def __init__(self): + super().__init__() + + def forward( + self, + parameter_0, + parameter_1, + parameter_2, + parameter_3, + parameter_4, + parameter_5, + parameter_6, + parameter_7, + parameter_8, + parameter_9, + parameter_10, + parameter_11, + parameter_12, + parameter_13, + parameter_14, + parameter_15, + parameter_16, + parameter_17, + parameter_18, + parameter_19, + parameter_20, + parameter_21, + parameter_22, + parameter_23, + parameter_24, + parameter_25, + parameter_26, + parameter_27, + parameter_28, + parameter_29, + parameter_30, + parameter_31, + parameter_32, + parameter_33, + parameter_34, + parameter_35, + parameter_36, + parameter_37, + parameter_38, + parameter_39, + parameter_40, + parameter_41, + parameter_42, + parameter_43, + parameter_44, + parameter_45, + parameter_46, + parameter_47, + parameter_48, + parameter_49, + parameter_50, + parameter_51, + parameter_52, + parameter_53, + parameter_54, + parameter_55, + parameter_56, + parameter_57, + parameter_58, + parameter_59, + parameter_60, + parameter_61, + parameter_62, + parameter_63, + parameter_64, + parameter_65, + parameter_66, + parameter_67, + parameter_68, + parameter_69, + parameter_70, + parameter_71, + data_0, + data_1, + data_2, + data_3, + data_4, + data_5, + data_6, + ): + # pd_op.matmul: (8x-1x1152xf32) <- (8x-1x-1xf32, 384x1152xf32) + matmul_0 = paddle._C_ops.matmul(data_6, parameter_71, False, False) + del parameter_71 + + # pd_op.add: (8x-1x1152xf32) <- (8x-1x1152xf32, 1152xf32) + add_0 = paddle._C_ops.add(matmul_0, parameter_70) + del parameter_70 + + # pd_op.full_int_array: (5xi64) <- () + full_int_array_0 = [0, -1, 3, 12, 32] + + # pd_op.reshape: (8x-1x3x12x32xf32) <- (8x-1x1152xf32, 5xi64) + reshape_0 = paddle._C_ops.reshape(add_0, full_int_array_0) + + # pd_op.transpose: (3x8x12x-1x32xf32) <- (8x-1x3x12x32xf32) + transpose_0 = paddle._C_ops.transpose(reshape_0, [2, 0, 3, 1, 4]) + del reshape_0 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_1 = [0] + + # pd_op.assign: (1xi64) <- (1xi64) + assign_0 = full_int_array_1 + + # pd_op.assign: (1xi64) <- (1xi64) + assign_1 = full_int_array_1 + + # pd_op.assign: (1xi64) <- (1xi64) + assign_2 = full_int_array_1 + + # pd_op.assign: (1xi64) <- (1xi64) + assign_3 = full_int_array_1 + + # pd_op.assign: (1xi64) <- (1xi64) + assign_4 = full_int_array_1 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_2 = [1] + + # pd_op.assign: (1xi64) <- (1xi64) + assign_5 = full_int_array_2 + + # pd_op.assign: (1xi64) <- (1xi64) + assign_6 = full_int_array_2 + + # pd_op.assign: (1xi64) <- (1xi64) + assign_7 = full_int_array_2 + + # pd_op.assign: (1xi64) <- (1xi64) + assign_8 = full_int_array_2 + + # pd_op.assign: (1xi64) <- (1xi64) + assign_9 = full_int_array_2 + + # pd_op.assign: (1xi64) <- (1xi64) + assign_10 = full_int_array_2 + + # pd_op.assign: (1xi64) <- (1xi64) + assign_11 = full_int_array_2 + + # pd_op.assign: (1xi64) <- (1xi64) + assign_12 = full_int_array_2 + + # pd_op.assign: (1xi64) <- (1xi64) + assign_13 = full_int_array_2 + + # pd_op.assign: (1xi64) <- (1xi64) + assign_14 = full_int_array_2 + + # pd_op.assign: (1xi64) <- (1xi64) + assign_15 = full_int_array_2 + + # pd_op.slice: (8x12x-1x32xf32) <- (3x8x12x-1x32xf32, 1xi64, 1xi64) + slice_0 = paddle._C_ops.slice( + transpose_0, [0], full_int_array_1, full_int_array_2, [1], [0] + ) + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_3 = [2] + + # pd_op.assign: (1xi64) <- (1xi64) + assign_16 = full_int_array_3 + + # pd_op.assign: (1xi64) <- (1xi64) + assign_17 = full_int_array_3 + + # pd_op.assign: (1xi64) <- (1xi64) + assign_18 = full_int_array_3 + + # pd_op.assign: (1xi64) <- (1xi64) + assign_19 = full_int_array_3 + + # pd_op.assign: (1xi64) <- (1xi64) + assign_20 = full_int_array_3 + + # pd_op.assign: (1xi64) <- (1xi64) + assign_21 = full_int_array_3 + + # pd_op.assign: (1xi64) <- (1xi64) + assign_22 = full_int_array_3 + + # pd_op.assign: (1xi64) <- (1xi64) + assign_23 = full_int_array_3 + + # pd_op.assign: (1xi64) <- (1xi64) + assign_24 = full_int_array_3 + + # pd_op.assign: (1xi64) <- (1xi64) + assign_25 = full_int_array_3 + + # pd_op.assign: (1xi64) <- (1xi64) + assign_26 = full_int_array_3 + + # pd_op.slice: (8x12x-1x32xf32) <- (3x8x12x-1x32xf32, 1xi64, 1xi64) + slice_1 = paddle._C_ops.slice( + transpose_0, [0], full_int_array_2, full_int_array_3, [1], [0] + ) + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_4 = [3] + + # pd_op.assign: (1xi64) <- (1xi64) + assign_27 = full_int_array_4 + + # pd_op.assign: (1xi64) <- (1xi64) + assign_28 = full_int_array_4 + + # pd_op.assign: (1xi64) <- (1xi64) + assign_29 = full_int_array_4 + + # pd_op.assign: (1xi64) <- (1xi64) + assign_30 = full_int_array_4 + + # pd_op.assign: (1xi64) <- (1xi64) + assign_31 = full_int_array_4 + + # pd_op.slice: (8x12x-1x32xf32) <- (3x8x12x-1x32xf32, 1xi64, 1xi64) + slice_2 = paddle._C_ops.slice( + transpose_0, [0], full_int_array_3, full_int_array_4, [1], [0] + ) + + # pd_op.transpose: (8x12x32x-1xf32) <- (8x12x-1x32xf32) + transpose_1 = paddle._C_ops.transpose(slice_1, [0, 1, 3, 2]) + del slice_1 + + # pd_op.matmul: (8x12x-1x-1xf32) <- (8x12x-1x32xf32, 8x12x32x-1xf32) + matmul_1 = paddle._C_ops.matmul(slice_0, transpose_1, False, False) + + # pd_op.full: (1xf32) <- () + full_0 = paddle._C_ops.full( + [1], float("0.176777"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.assign: (1xf32) <- (1xf32) + assign_32 = full_0 + + # pd_op.assign: (1xf32) <- (1xf32) + assign_33 = full_0 + + # pd_op.assign: (1xf32) <- (1xf32) + assign_34 = full_0 + + # pd_op.assign: (1xf32) <- (1xf32) + assign_35 = full_0 + + # pd_op.assign: (1xf32) <- (1xf32) + assign_36 = full_0 + + # pd_op.scale: (8x12x-1x-1xf32) <- (8x12x-1x-1xf32, 1xf32) + scale_0 = paddle._C_ops.scale(matmul_1, full_0, float("0"), True) + del matmul_1 + + # pd_op.softmax: (8x12x-1x-1xf32) <- (8x12x-1x-1xf32) + softmax_0 = paddle._C_ops.softmax(scale_0, -1) + del scale_0 + + # pd_op.matmul: (8x12x-1x32xf32) <- (8x12x-1x-1xf32, 8x12x-1x32xf32) + matmul_2 = paddle._C_ops.matmul(softmax_0, slice_2, False, False) + + # pd_op.transpose: (8x-1x12x32xf32) <- (8x12x-1x32xf32) + transpose_2 = paddle._C_ops.transpose(matmul_2, [0, 2, 1, 3]) + del matmul_2 + + # pd_op.full_int_array: (3xi64) <- () + full_int_array_5 = [0, -1, 384] + + # pd_op.reshape: (8x-1x384xf32) <- (8x-1x12x32xf32, 3xi64) + reshape_1 = paddle._C_ops.reshape(transpose_2, full_int_array_5) + + # pd_op.matmul: (8x-1x384xf32) <- (8x-1x384xf32, 384x384xf32) + matmul_3 = paddle._C_ops.matmul(reshape_1, parameter_69, False, False) + del parameter_69 + + # pd_op.add: (8x-1x384xf32) <- (8x-1x384xf32, 384xf32) + add_1 = paddle._C_ops.add(matmul_3, parameter_68) + del parameter_68 + + # pd_op.full: (xf64) <- () + full_1 = paddle._C_ops.full( + [], float("0"), paddle.float64, paddle.framework._current_expected_place() + ) + + # pd_op.assign_value_: (xf64) <- (xf64) + assign_value__0 = paddle._C_ops.assign_value_( + full_1, + [], + paddle.float64, + [float("0.929412")], + paddle.framework._current_expected_place(), + ) + del full_1 + + # pd_op.cast: (xf32) <- (xf64) + cast_0 = paddle._C_ops.cast(assign_value__0, paddle.float32) + del assign_value__0 + + # pd_op.shape64: (3xi64) <- (8x-1x384xf32) + shape64_0 = paddle._C_ops.shape64(add_1) + + # pd_op.slice: (xi64) <- (3xi64, 1xi64, 1xi64) + slice_3 = paddle._C_ops.slice( + shape64_0, [0], full_int_array_1, full_int_array_2, [1], [0] + ) + del shape64_0 + + # pd_op.full: (xi64) <- () + full_2 = paddle._C_ops.full( + [], float("1"), paddle.int64, paddle.core.CPUPlace() + ) + + # builtin.combine: ([xi64, xi64, xi64]) <- (xi64, xi64, xi64) + combine_0 = [slice_3, full_2, full_2] + del slice_3 + + # pd_op.stack: (3xi64) <- ([xi64, xi64, xi64]) + stack_0 = paddle._C_ops.stack(combine_0, 0) + del combine_0 + + # pd_op.full: (1xf32) <- () + full_3 = paddle._C_ops.full( + [1], float("0"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.full: (1xf32) <- () + full_4 = paddle._C_ops.full( + [1], float("1"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.uniform: (-1x1x1xf32) <- (3xi64, 1xf32, 1xf32) + uniform_0 = paddle._C_ops.uniform( + stack_0, + paddle.float32, + full_3, + full_4, + 0, + paddle.framework._current_expected_place(), + ) + del stack_0 + + # pd_op.add: (-1x1x1xf32) <- (xf32, -1x1x1xf32) + add_2 = paddle._C_ops.add(cast_0, uniform_0) + del uniform_0 + + # pd_op.floor: (-1x1x1xf32) <- (-1x1x1xf32) + floor_0 = paddle._C_ops.floor(add_2) + del add_2 + + # pd_op.divide: (8x-1x384xf32) <- (8x-1x384xf32, xf32) + divide_0 = paddle._C_ops.divide(add_1, cast_0) + + # pd_op.multiply: (8x-1x384xf32) <- (8x-1x384xf32, -1x1x1xf32) + multiply_0 = paddle._C_ops.multiply(divide_0, floor_0) + + # pd_op.add: (8x-1x384xf32) <- (8x-1x-1xf32, 8x-1x384xf32) + add_3 = paddle._C_ops.add(data_6, multiply_0) + del data_6 + + # pd_op.layer_norm: (8x-1x384xf32, 8x-1xf32, 8x-1xf32) <- (8x-1x384xf32, 384xf32, 384xf32) + layer_norm_1, layer_norm_2, layer_norm_3 = (lambda x, f: f(x))( + paddle._C_ops.layer_norm( + add_3, parameter_67, parameter_66, float("1e-06"), 2 + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None, None), + ) + del parameter_66, parameter_67 + + # pd_op.matmul: (8x-1x1536xf32) <- (8x-1x384xf32, 384x1536xf32) + matmul_4 = paddle._C_ops.matmul(layer_norm_1, parameter_65, False, False) + del parameter_65 + + # pd_op.add: (8x-1x1536xf32) <- (8x-1x1536xf32, 1536xf32) + add_4 = paddle._C_ops.add(matmul_4, parameter_64) + del parameter_64 + + # pd_op.gelu: (8x-1x1536xf32) <- (8x-1x1536xf32) + gelu_0 = paddle._C_ops.gelu(add_4, False) + + # pd_op.matmul: (8x-1x384xf32) <- (8x-1x1536xf32, 1536x384xf32) + matmul_5 = paddle._C_ops.matmul(gelu_0, parameter_63, False, False) + del parameter_63 + + # pd_op.add: (8x-1x384xf32) <- (8x-1x384xf32, 384xf32) + add_5 = paddle._C_ops.add(matmul_5, parameter_62) + del parameter_62 + + # pd_op.full: (xf64) <- () + full_5 = paddle._C_ops.full( + [], float("0"), paddle.float64, paddle.framework._current_expected_place() + ) + + # pd_op.assign_value_: (xf64) <- (xf64) + assign_value__1 = paddle._C_ops.assign_value_( + full_5, + [], + paddle.float64, + [float("0.929412")], + paddle.framework._current_expected_place(), + ) + del full_5 + + # pd_op.cast: (xf32) <- (xf64) + cast_1 = paddle._C_ops.cast(assign_value__1, paddle.float32) + del assign_value__1 + + # pd_op.shape64: (3xi64) <- (8x-1x384xf32) + shape64_1 = paddle._C_ops.shape64(add_5) + + # pd_op.slice: (xi64) <- (3xi64, 1xi64, 1xi64) + slice_4 = paddle._C_ops.slice( + shape64_1, [0], full_int_array_1, full_int_array_2, [1], [0] + ) + del shape64_1 + + # builtin.combine: ([xi64, xi64, xi64]) <- (xi64, xi64, xi64) + combine_1 = [slice_4, full_2, full_2] + del slice_4 + + # pd_op.stack: (3xi64) <- ([xi64, xi64, xi64]) + stack_1 = paddle._C_ops.stack(combine_1, 0) + del combine_1 + + # pd_op.uniform: (-1x1x1xf32) <- (3xi64, 1xf32, 1xf32) + uniform_1 = paddle._C_ops.uniform( + stack_1, + paddle.float32, + full_3, + full_4, + 0, + paddle.framework._current_expected_place(), + ) + del stack_1 + + # pd_op.add: (-1x1x1xf32) <- (xf32, -1x1x1xf32) + add_6 = paddle._C_ops.add(cast_1, uniform_1) + del uniform_1 + + # pd_op.floor: (-1x1x1xf32) <- (-1x1x1xf32) + floor_1 = paddle._C_ops.floor(add_6) + del add_6 + + # pd_op.divide: (8x-1x384xf32) <- (8x-1x384xf32, xf32) + divide_1 = paddle._C_ops.divide(add_5, cast_1) + + # pd_op.multiply: (8x-1x384xf32) <- (8x-1x384xf32, -1x1x1xf32) + multiply_1 = paddle._C_ops.multiply(divide_1, floor_1) + + # pd_op.add: (8x-1x384xf32) <- (8x-1x384xf32, 8x-1x384xf32) + add_7 = paddle._C_ops.add(layer_norm_1, multiply_1) + + # pd_op.layer_norm: (8x-1x384xf32, 8x-1xf32, 8x-1xf32) <- (8x-1x384xf32, 384xf32, 384xf32) + layer_norm_4, layer_norm_5, layer_norm_6 = (lambda x, f: f(x))( + paddle._C_ops.layer_norm( + add_7, parameter_61, parameter_60, float("1e-06"), 2 + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None, None), + ) + del parameter_60, parameter_61 + + # pd_op.matmul: (8x-1x1152xf32) <- (8x-1x384xf32, 384x1152xf32) + matmul_6 = paddle._C_ops.matmul(layer_norm_4, parameter_59, False, False) + del parameter_59 + + # pd_op.add: (8x-1x1152xf32) <- (8x-1x1152xf32, 1152xf32) + add_8 = paddle._C_ops.add(matmul_6, parameter_58) + del parameter_58 + + # pd_op.reshape: (8x-1x3x12x32xf32) <- (8x-1x1152xf32, 5xi64) + reshape_2 = paddle._C_ops.reshape(add_8, full_int_array_0) + + # pd_op.transpose: (3x8x12x-1x32xf32) <- (8x-1x3x12x32xf32) + transpose_3 = paddle._C_ops.transpose(reshape_2, [2, 0, 3, 1, 4]) + del reshape_2 + + # pd_op.slice: (8x12x-1x32xf32) <- (3x8x12x-1x32xf32, 1xi64, 1xi64) + slice_5 = paddle._C_ops.slice( + transpose_3, [0], full_int_array_1, full_int_array_2, [1], [0] + ) + + # pd_op.slice: (8x12x-1x32xf32) <- (3x8x12x-1x32xf32, 1xi64, 1xi64) + slice_6 = paddle._C_ops.slice( + transpose_3, [0], full_int_array_2, full_int_array_3, [1], [0] + ) + + # pd_op.slice: (8x12x-1x32xf32) <- (3x8x12x-1x32xf32, 1xi64, 1xi64) + slice_7 = paddle._C_ops.slice( + transpose_3, [0], full_int_array_3, full_int_array_4, [1], [0] + ) + + # pd_op.transpose: (8x12x32x-1xf32) <- (8x12x-1x32xf32) + transpose_4 = paddle._C_ops.transpose(slice_6, [0, 1, 3, 2]) + del slice_6 + + # pd_op.matmul: (8x12x-1x-1xf32) <- (8x12x-1x32xf32, 8x12x32x-1xf32) + matmul_7 = paddle._C_ops.matmul(slice_5, transpose_4, False, False) + + # pd_op.scale: (8x12x-1x-1xf32) <- (8x12x-1x-1xf32, 1xf32) + scale_1 = paddle._C_ops.scale(matmul_7, full_0, float("0"), True) + del matmul_7 + + # pd_op.softmax: (8x12x-1x-1xf32) <- (8x12x-1x-1xf32) + softmax_1 = paddle._C_ops.softmax(scale_1, -1) + del scale_1 + + # pd_op.matmul: (8x12x-1x32xf32) <- (8x12x-1x-1xf32, 8x12x-1x32xf32) + matmul_8 = paddle._C_ops.matmul(softmax_1, slice_7, False, False) + + # pd_op.transpose: (8x-1x12x32xf32) <- (8x12x-1x32xf32) + transpose_5 = paddle._C_ops.transpose(matmul_8, [0, 2, 1, 3]) + del matmul_8 + + # pd_op.reshape: (8x-1x384xf32) <- (8x-1x12x32xf32, 3xi64) + reshape_3 = paddle._C_ops.reshape(transpose_5, full_int_array_5) + + # pd_op.matmul: (8x-1x384xf32) <- (8x-1x384xf32, 384x384xf32) + matmul_9 = paddle._C_ops.matmul(reshape_3, parameter_57, False, False) + del parameter_57 + + # pd_op.add: (8x-1x384xf32) <- (8x-1x384xf32, 384xf32) + add_9 = paddle._C_ops.add(matmul_9, parameter_56) + del parameter_56 + + # pd_op.full: (xf64) <- () + full_6 = paddle._C_ops.full( + [], float("0"), paddle.float64, paddle.framework._current_expected_place() + ) + + # pd_op.assign_value_: (xf64) <- (xf64) + assign_value__2 = paddle._C_ops.assign_value_( + full_6, + [], + paddle.float64, + [float("0.923529")], + paddle.framework._current_expected_place(), + ) + del full_6 + + # pd_op.cast: (xf32) <- (xf64) + cast_2 = paddle._C_ops.cast(assign_value__2, paddle.float32) + del assign_value__2 + + # pd_op.shape64: (3xi64) <- (8x-1x384xf32) + shape64_2 = paddle._C_ops.shape64(add_9) + + # pd_op.slice: (xi64) <- (3xi64, 1xi64, 1xi64) + slice_8 = paddle._C_ops.slice( + shape64_2, [0], full_int_array_1, full_int_array_2, [1], [0] + ) + del shape64_2 + + # builtin.combine: ([xi64, xi64, xi64]) <- (xi64, xi64, xi64) + combine_2 = [slice_8, full_2, full_2] + del slice_8 + + # pd_op.stack: (3xi64) <- ([xi64, xi64, xi64]) + stack_2 = paddle._C_ops.stack(combine_2, 0) + del combine_2 + + # pd_op.uniform: (-1x1x1xf32) <- (3xi64, 1xf32, 1xf32) + uniform_2 = paddle._C_ops.uniform( + stack_2, + paddle.float32, + full_3, + full_4, + 0, + paddle.framework._current_expected_place(), + ) + del stack_2 + + # pd_op.add: (-1x1x1xf32) <- (xf32, -1x1x1xf32) + add_10 = paddle._C_ops.add(cast_2, uniform_2) + del uniform_2 + + # pd_op.floor: (-1x1x1xf32) <- (-1x1x1xf32) + floor_2 = paddle._C_ops.floor(add_10) + del add_10 + + # pd_op.divide: (8x-1x384xf32) <- (8x-1x384xf32, xf32) + divide_2 = paddle._C_ops.divide(add_9, cast_2) + + # pd_op.multiply: (8x-1x384xf32) <- (8x-1x384xf32, -1x1x1xf32) + multiply_2 = paddle._C_ops.multiply(divide_2, floor_2) + + # pd_op.add: (8x-1x384xf32) <- (8x-1x384xf32, 8x-1x384xf32) + add_11 = paddle._C_ops.add(layer_norm_4, multiply_2) + + # pd_op.layer_norm: (8x-1x384xf32, 8x-1xf32, 8x-1xf32) <- (8x-1x384xf32, 384xf32, 384xf32) + layer_norm_7, layer_norm_8, layer_norm_9 = (lambda x, f: f(x))( + paddle._C_ops.layer_norm( + add_11, parameter_55, parameter_54, float("1e-06"), 2 + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None, None), + ) + del parameter_54, parameter_55 + + # pd_op.matmul: (8x-1x1536xf32) <- (8x-1x384xf32, 384x1536xf32) + matmul_10 = paddle._C_ops.matmul(layer_norm_7, parameter_53, False, False) + del parameter_53 + + # pd_op.add: (8x-1x1536xf32) <- (8x-1x1536xf32, 1536xf32) + add_12 = paddle._C_ops.add(matmul_10, parameter_52) + del parameter_52 + + # pd_op.gelu: (8x-1x1536xf32) <- (8x-1x1536xf32) + gelu_1 = paddle._C_ops.gelu(add_12, False) + + # pd_op.matmul: (8x-1x384xf32) <- (8x-1x1536xf32, 1536x384xf32) + matmul_11 = paddle._C_ops.matmul(gelu_1, parameter_51, False, False) + del parameter_51 + + # pd_op.add: (8x-1x384xf32) <- (8x-1x384xf32, 384xf32) + add_13 = paddle._C_ops.add(matmul_11, parameter_50) + del parameter_50 + + # pd_op.full: (xf64) <- () + full_7 = paddle._C_ops.full( + [], float("0"), paddle.float64, paddle.framework._current_expected_place() + ) + + # pd_op.assign_value_: (xf64) <- (xf64) + assign_value__3 = paddle._C_ops.assign_value_( + full_7, + [], + paddle.float64, + [float("0.923529")], + paddle.framework._current_expected_place(), + ) + del full_7 + + # pd_op.cast: (xf32) <- (xf64) + cast_3 = paddle._C_ops.cast(assign_value__3, paddle.float32) + del assign_value__3 + + # pd_op.shape64: (3xi64) <- (8x-1x384xf32) + shape64_3 = paddle._C_ops.shape64(add_13) + + # pd_op.slice: (xi64) <- (3xi64, 1xi64, 1xi64) + slice_9 = paddle._C_ops.slice( + shape64_3, [0], full_int_array_1, full_int_array_2, [1], [0] + ) + del shape64_3 + + # builtin.combine: ([xi64, xi64, xi64]) <- (xi64, xi64, xi64) + combine_3 = [slice_9, full_2, full_2] + del slice_9 + + # pd_op.stack: (3xi64) <- ([xi64, xi64, xi64]) + stack_3 = paddle._C_ops.stack(combine_3, 0) + del combine_3 + + # pd_op.uniform: (-1x1x1xf32) <- (3xi64, 1xf32, 1xf32) + uniform_3 = paddle._C_ops.uniform( + stack_3, + paddle.float32, + full_3, + full_4, + 0, + paddle.framework._current_expected_place(), + ) + del stack_3 + + # pd_op.add: (-1x1x1xf32) <- (xf32, -1x1x1xf32) + add_14 = paddle._C_ops.add(cast_3, uniform_3) + del uniform_3 + + # pd_op.floor: (-1x1x1xf32) <- (-1x1x1xf32) + floor_3 = paddle._C_ops.floor(add_14) + del add_14 + + # pd_op.divide: (8x-1x384xf32) <- (8x-1x384xf32, xf32) + divide_3 = paddle._C_ops.divide(add_13, cast_3) + + # pd_op.multiply: (8x-1x384xf32) <- (8x-1x384xf32, -1x1x1xf32) + multiply_3 = paddle._C_ops.multiply(divide_3, floor_3) + + # pd_op.add: (8x-1x384xf32) <- (8x-1x384xf32, 8x-1x384xf32) + add_15 = paddle._C_ops.add(layer_norm_7, multiply_3) + + # pd_op.layer_norm: (8x-1x384xf32, 8x-1xf32, 8x-1xf32) <- (8x-1x384xf32, 384xf32, 384xf32) + layer_norm_10, layer_norm_11, layer_norm_12 = (lambda x, f: f(x))( + paddle._C_ops.layer_norm( + add_15, parameter_49, parameter_48, float("1e-06"), 2 + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None, None), + ) + del parameter_48, parameter_49 + + # pd_op.matmul: (8x-1x1152xf32) <- (8x-1x384xf32, 384x1152xf32) + matmul_12 = paddle._C_ops.matmul(layer_norm_10, parameter_47, False, False) + del parameter_47 + + # pd_op.add: (8x-1x1152xf32) <- (8x-1x1152xf32, 1152xf32) + add_16 = paddle._C_ops.add(matmul_12, parameter_46) + del parameter_46 + + # pd_op.reshape: (8x-1x3x12x32xf32) <- (8x-1x1152xf32, 5xi64) + reshape_4 = paddle._C_ops.reshape(add_16, full_int_array_0) + del full_int_array_0 + + # pd_op.transpose: (3x8x12x-1x32xf32) <- (8x-1x3x12x32xf32) + transpose_6 = paddle._C_ops.transpose(reshape_4, [2, 0, 3, 1, 4]) + del reshape_4 + + # pd_op.slice: (8x12x-1x32xf32) <- (3x8x12x-1x32xf32, 1xi64, 1xi64) + slice_10 = paddle._C_ops.slice( + transpose_6, [0], full_int_array_1, full_int_array_2, [1], [0] + ) + + # pd_op.slice: (8x12x-1x32xf32) <- (3x8x12x-1x32xf32, 1xi64, 1xi64) + slice_11 = paddle._C_ops.slice( + transpose_6, [0], full_int_array_2, full_int_array_3, [1], [0] + ) + + # pd_op.slice: (8x12x-1x32xf32) <- (3x8x12x-1x32xf32, 1xi64, 1xi64) + slice_12 = paddle._C_ops.slice( + transpose_6, [0], full_int_array_3, full_int_array_4, [1], [0] + ) + + # pd_op.transpose: (8x12x32x-1xf32) <- (8x12x-1x32xf32) + transpose_7 = paddle._C_ops.transpose(slice_11, [0, 1, 3, 2]) + del slice_11 + + # pd_op.matmul: (8x12x-1x-1xf32) <- (8x12x-1x32xf32, 8x12x32x-1xf32) + matmul_13 = paddle._C_ops.matmul(slice_10, transpose_7, False, False) + + # pd_op.scale: (8x12x-1x-1xf32) <- (8x12x-1x-1xf32, 1xf32) + scale_2 = paddle._C_ops.scale(matmul_13, full_0, float("0"), True) + del matmul_13 + + # pd_op.softmax: (8x12x-1x-1xf32) <- (8x12x-1x-1xf32) + softmax_2 = paddle._C_ops.softmax(scale_2, -1) + del scale_2 + + # pd_op.matmul: (8x12x-1x32xf32) <- (8x12x-1x-1xf32, 8x12x-1x32xf32) + matmul_14 = paddle._C_ops.matmul(softmax_2, slice_12, False, False) + + # pd_op.transpose: (8x-1x12x32xf32) <- (8x12x-1x32xf32) + transpose_8 = paddle._C_ops.transpose(matmul_14, [0, 2, 1, 3]) + del matmul_14 + + # pd_op.reshape: (8x-1x384xf32) <- (8x-1x12x32xf32, 3xi64) + reshape_5 = paddle._C_ops.reshape(transpose_8, full_int_array_5) + del full_int_array_5 + + # pd_op.matmul: (8x-1x384xf32) <- (8x-1x384xf32, 384x384xf32) + matmul_15 = paddle._C_ops.matmul(reshape_5, parameter_45, False, False) + del parameter_45 + + # pd_op.add: (8x-1x384xf32) <- (8x-1x384xf32, 384xf32) + add_17 = paddle._C_ops.add(matmul_15, parameter_44) + del parameter_44 + + # pd_op.full: (xf64) <- () + full_8 = paddle._C_ops.full( + [], float("0"), paddle.float64, paddle.framework._current_expected_place() + ) + + # pd_op.assign_value_: (xf64) <- (xf64) + assign_value__4 = paddle._C_ops.assign_value_( + full_8, + [], + paddle.float64, + [float("0.917647")], + paddle.framework._current_expected_place(), + ) + del full_8 + + # pd_op.cast: (xf32) <- (xf64) + cast_4 = paddle._C_ops.cast(assign_value__4, paddle.float32) + del assign_value__4 + + # pd_op.shape64: (3xi64) <- (8x-1x384xf32) + shape64_4 = paddle._C_ops.shape64(add_17) + + # pd_op.slice: (xi64) <- (3xi64, 1xi64, 1xi64) + slice_13 = paddle._C_ops.slice( + shape64_4, [0], full_int_array_1, full_int_array_2, [1], [0] + ) + del shape64_4 + + # builtin.combine: ([xi64, xi64, xi64]) <- (xi64, xi64, xi64) + combine_4 = [slice_13, full_2, full_2] + del slice_13 + + # pd_op.stack: (3xi64) <- ([xi64, xi64, xi64]) + stack_4 = paddle._C_ops.stack(combine_4, 0) + del combine_4 + + # pd_op.uniform: (-1x1x1xf32) <- (3xi64, 1xf32, 1xf32) + uniform_4 = paddle._C_ops.uniform( + stack_4, + paddle.float32, + full_3, + full_4, + 0, + paddle.framework._current_expected_place(), + ) + del stack_4 + + # pd_op.add: (-1x1x1xf32) <- (xf32, -1x1x1xf32) + add_18 = paddle._C_ops.add(cast_4, uniform_4) + del uniform_4 + + # pd_op.floor: (-1x1x1xf32) <- (-1x1x1xf32) + floor_4 = paddle._C_ops.floor(add_18) + del add_18 + + # pd_op.divide: (8x-1x384xf32) <- (8x-1x384xf32, xf32) + divide_4 = paddle._C_ops.divide(add_17, cast_4) + + # pd_op.multiply: (8x-1x384xf32) <- (8x-1x384xf32, -1x1x1xf32) + multiply_4 = paddle._C_ops.multiply(divide_4, floor_4) + + # pd_op.add: (8x-1x384xf32) <- (8x-1x384xf32, 8x-1x384xf32) + add_19 = paddle._C_ops.add(layer_norm_10, multiply_4) + + # pd_op.layer_norm: (8x-1x384xf32, 8x-1xf32, 8x-1xf32) <- (8x-1x384xf32, 384xf32, 384xf32) + layer_norm_13, layer_norm_14, layer_norm_15 = (lambda x, f: f(x))( + paddle._C_ops.layer_norm( + add_19, parameter_43, parameter_42, float("1e-06"), 2 + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None, None), + ) + del parameter_42, parameter_43 + + # pd_op.matmul: (8x-1x1536xf32) <- (8x-1x384xf32, 384x1536xf32) + matmul_16 = paddle._C_ops.matmul(layer_norm_13, parameter_41, False, False) + del parameter_41 + + # pd_op.add: (8x-1x1536xf32) <- (8x-1x1536xf32, 1536xf32) + add_20 = paddle._C_ops.add(matmul_16, parameter_40) + del parameter_40 + + # pd_op.gelu: (8x-1x1536xf32) <- (8x-1x1536xf32) + gelu_2 = paddle._C_ops.gelu(add_20, False) + + # pd_op.matmul: (8x-1x384xf32) <- (8x-1x1536xf32, 1536x384xf32) + matmul_17 = paddle._C_ops.matmul(gelu_2, parameter_39, False, False) + del parameter_39 + + # pd_op.add: (8x-1x384xf32) <- (8x-1x384xf32, 384xf32) + add_21 = paddle._C_ops.add(matmul_17, parameter_38) + del parameter_38 + + # pd_op.full: (xf64) <- () + full_9 = paddle._C_ops.full( + [], float("0"), paddle.float64, paddle.framework._current_expected_place() + ) + + # pd_op.assign_value_: (xf64) <- (xf64) + assign_value__5 = paddle._C_ops.assign_value_( + full_9, + [], + paddle.float64, + [float("0.917647")], + paddle.framework._current_expected_place(), + ) + del full_9 + + # pd_op.cast: (xf32) <- (xf64) + cast_5 = paddle._C_ops.cast(assign_value__5, paddle.float32) + del assign_value__5 + + # pd_op.shape64: (3xi64) <- (8x-1x384xf32) + shape64_5 = paddle._C_ops.shape64(add_21) + + # pd_op.slice: (xi64) <- (3xi64, 1xi64, 1xi64) + slice_14 = paddle._C_ops.slice( + shape64_5, [0], full_int_array_1, full_int_array_2, [1], [0] + ) + del shape64_5 + + # builtin.combine: ([xi64, xi64, xi64]) <- (xi64, xi64, xi64) + combine_5 = [slice_14, full_2, full_2] + del slice_14 + + # pd_op.stack: (3xi64) <- ([xi64, xi64, xi64]) + stack_5 = paddle._C_ops.stack(combine_5, 0) + del combine_5 + + # pd_op.uniform: (-1x1x1xf32) <- (3xi64, 1xf32, 1xf32) + uniform_5 = paddle._C_ops.uniform( + stack_5, + paddle.float32, + full_3, + full_4, + 0, + paddle.framework._current_expected_place(), + ) + del stack_5 + + # pd_op.add: (-1x1x1xf32) <- (xf32, -1x1x1xf32) + add_22 = paddle._C_ops.add(cast_5, uniform_5) + del uniform_5 + + # pd_op.floor: (-1x1x1xf32) <- (-1x1x1xf32) + floor_5 = paddle._C_ops.floor(add_22) + del add_22 + + # pd_op.divide: (8x-1x384xf32) <- (8x-1x384xf32, xf32) + divide_5 = paddle._C_ops.divide(add_21, cast_5) + + # pd_op.multiply: (8x-1x384xf32) <- (8x-1x384xf32, -1x1x1xf32) + multiply_5 = paddle._C_ops.multiply(divide_5, floor_5) + + # pd_op.add: (8x-1x384xf32) <- (8x-1x384xf32, 8x-1x384xf32) + add_23 = paddle._C_ops.add(layer_norm_13, multiply_5) + + # pd_op.layer_norm: (8x-1x384xf32, 8x-1xf32, 8x-1xf32) <- (8x-1x384xf32, 384xf32, 384xf32) + layer_norm_16, layer_norm_17, layer_norm_18 = (lambda x, f: f(x))( + paddle._C_ops.layer_norm( + add_23, parameter_37, parameter_36, float("1e-06"), 2 + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None, None), + ) + del parameter_36, parameter_37 + + # pd_op.matmul: (8x-1x1152xf32) <- (8x-1x384xf32, 384x1152xf32) + matmul_18 = paddle._C_ops.matmul(layer_norm_16, parameter_35, False, False) + del parameter_35 + + # pd_op.add: (8x-1x1152xf32) <- (8x-1x1152xf32, 1152xf32) + add_24 = paddle._C_ops.add(matmul_18, parameter_34) + del parameter_34 + + # pd_op.full: (xi64) <- () + full_10 = paddle._C_ops.full( + [], float("0"), paddle.int64, paddle.core.CPUPlace() + ) + + # pd_op.full: (xi64) <- () + full_11 = paddle._C_ops.full( + [], float("-1"), paddle.int64, paddle.core.CPUPlace() + ) + + # pd_op.full: (xi64) <- () + full_12 = paddle._C_ops.full( + [], float("3"), paddle.int64, paddle.core.CPUPlace() + ) + + # pd_op.full: (xi64) <- () + full_13 = paddle._C_ops.full( + [], float("32"), paddle.int64, paddle.core.CPUPlace() + ) + + # builtin.combine: ([xi64, xi64, xi64, xi64, xi64]) <- (xi64, xi64, xi64, xi64, xi64) + combine_6 = [full_10, full_11, full_12, data_0, full_13] + del data_0 + + # pd_op.stack: (5xi64) <- ([xi64, xi64, xi64, xi64, xi64]) + stack_6 = paddle._C_ops.stack(combine_6, 0) + del combine_6 + + # pd_op.reshape: (8x-1x3x-1x32xf32) <- (8x-1x1152xf32, 5xi64) + reshape_6 = paddle._C_ops.reshape(add_24, stack_6) + del stack_6 + + # pd_op.transpose: (3x8x-1x-1x32xf32) <- (8x-1x3x-1x32xf32) + transpose_9 = paddle._C_ops.transpose(reshape_6, [2, 0, 3, 1, 4]) + del reshape_6 + + # pd_op.slice: (8x-1x-1x32xf32) <- (3x8x-1x-1x32xf32, 1xi64, 1xi64) + slice_15 = paddle._C_ops.slice( + transpose_9, [0], full_int_array_1, full_int_array_2, [1], [0] + ) + + # pd_op.slice: (8x-1x-1x32xf32) <- (3x8x-1x-1x32xf32, 1xi64, 1xi64) + slice_16 = paddle._C_ops.slice( + transpose_9, [0], full_int_array_2, full_int_array_3, [1], [0] + ) + + # pd_op.slice: (8x-1x-1x32xf32) <- (3x8x-1x-1x32xf32, 1xi64, 1xi64) + slice_17 = paddle._C_ops.slice( + transpose_9, [0], full_int_array_3, full_int_array_4, [1], [0] + ) + + # pd_op.transpose: (8x-1x32x-1xf32) <- (8x-1x-1x32xf32) + transpose_10 = paddle._C_ops.transpose(slice_16, [0, 1, 3, 2]) + del slice_16 + + # pd_op.matmul: (8x-1x-1x-1xf32) <- (8x-1x-1x32xf32, 8x-1x32x-1xf32) + matmul_19 = paddle._C_ops.matmul(slice_15, transpose_10, False, False) + + # pd_op.scale: (8x-1x-1x-1xf32) <- (8x-1x-1x-1xf32, 1xf32) + scale_3 = paddle._C_ops.scale(matmul_19, full_0, float("0"), True) + del matmul_19 + + # pd_op.softmax: (8x-1x-1x-1xf32) <- (8x-1x-1x-1xf32) + softmax_3 = paddle._C_ops.softmax(scale_3, -1) + del scale_3 + + # pd_op.matmul: (8x-1x-1x32xf32) <- (8x-1x-1x-1xf32, 8x-1x-1x32xf32) + matmul_20 = paddle._C_ops.matmul(softmax_3, slice_17, False, False) + + # pd_op.transpose: (8x-1x-1x32xf32) <- (8x-1x-1x32xf32) + transpose_11 = paddle._C_ops.transpose(matmul_20, [0, 2, 1, 3]) + del matmul_20 + + # builtin.combine: ([xi64, xi64, xi64]) <- (xi64, xi64, xi64) + combine_7 = [full_10, full_11, data_1] + del data_1 + + # pd_op.stack: (3xi64) <- ([xi64, xi64, xi64]) + stack_7 = paddle._C_ops.stack(combine_7, 0) + del combine_7 + + # pd_op.reshape: (8x-1x-1xf32) <- (8x-1x-1x32xf32, 3xi64) + reshape_7 = paddle._C_ops.reshape(transpose_11, stack_7) + del stack_7 + + # pd_op.matmul: (8x-1x384xf32) <- (8x-1x-1xf32, 384x384xf32) + matmul_21 = paddle._C_ops.matmul(reshape_7, parameter_33, False, False) + del parameter_33 + + # pd_op.add: (8x-1x384xf32) <- (8x-1x384xf32, 384xf32) + add_25 = paddle._C_ops.add(matmul_21, parameter_32) + del parameter_32 + + # pd_op.full: (xf64) <- () + full_14 = paddle._C_ops.full( + [], float("0"), paddle.float64, paddle.framework._current_expected_place() + ) + + # pd_op.assign_value_: (xf64) <- (xf64) + assign_value__6 = paddle._C_ops.assign_value_( + full_14, + [], + paddle.float64, + [float("0.911765")], + paddle.framework._current_expected_place(), + ) + del full_14 + + # pd_op.cast: (xf32) <- (xf64) + cast_6 = paddle._C_ops.cast(assign_value__6, paddle.float32) + del assign_value__6 + + # pd_op.shape64: (3xi64) <- (8x-1x384xf32) + shape64_6 = paddle._C_ops.shape64(add_25) + + # pd_op.slice: (xi64) <- (3xi64, 1xi64, 1xi64) + slice_18 = paddle._C_ops.slice( + shape64_6, [0], full_int_array_1, full_int_array_2, [1], [0] + ) + del shape64_6 + + # builtin.combine: ([xi64, xi64, xi64]) <- (xi64, xi64, xi64) + combine_8 = [slice_18, full_2, full_2] + del slice_18 + + # pd_op.stack: (3xi64) <- ([xi64, xi64, xi64]) + stack_8 = paddle._C_ops.stack(combine_8, 0) + del combine_8 + + # pd_op.uniform: (-1x1x1xf32) <- (3xi64, 1xf32, 1xf32) + uniform_6 = paddle._C_ops.uniform( + stack_8, + paddle.float32, + full_3, + full_4, + 0, + paddle.framework._current_expected_place(), + ) + del stack_8 + + # pd_op.add: (-1x1x1xf32) <- (xf32, -1x1x1xf32) + add_26 = paddle._C_ops.add(cast_6, uniform_6) + del uniform_6 + + # pd_op.floor: (-1x1x1xf32) <- (-1x1x1xf32) + floor_6 = paddle._C_ops.floor(add_26) + del add_26 + + # pd_op.divide: (8x-1x384xf32) <- (8x-1x384xf32, xf32) + divide_6 = paddle._C_ops.divide(add_25, cast_6) + + # pd_op.multiply: (8x-1x384xf32) <- (8x-1x384xf32, -1x1x1xf32) + multiply_6 = paddle._C_ops.multiply(divide_6, floor_6) + + # pd_op.add: (8x-1x384xf32) <- (8x-1x384xf32, 8x-1x384xf32) + add_27 = paddle._C_ops.add(layer_norm_16, multiply_6) + + # pd_op.layer_norm: (8x-1x384xf32, 8x-1xf32, 8x-1xf32) <- (8x-1x384xf32, 384xf32, 384xf32) + layer_norm_19, layer_norm_20, layer_norm_21 = (lambda x, f: f(x))( + paddle._C_ops.layer_norm( + add_27, parameter_31, parameter_30, float("1e-06"), 2 + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None, None), + ) + del parameter_30, parameter_31 + + # pd_op.matmul: (8x-1x1536xf32) <- (8x-1x384xf32, 384x1536xf32) + matmul_22 = paddle._C_ops.matmul(layer_norm_19, parameter_29, False, False) + del parameter_29 + + # pd_op.add: (8x-1x1536xf32) <- (8x-1x1536xf32, 1536xf32) + add_28 = paddle._C_ops.add(matmul_22, parameter_28) + del parameter_28 + + # pd_op.gelu: (8x-1x1536xf32) <- (8x-1x1536xf32) + gelu_3 = paddle._C_ops.gelu(add_28, False) + + # pd_op.matmul: (8x-1x384xf32) <- (8x-1x1536xf32, 1536x384xf32) + matmul_23 = paddle._C_ops.matmul(gelu_3, parameter_27, False, False) + del parameter_27 + + # pd_op.add: (8x-1x384xf32) <- (8x-1x384xf32, 384xf32) + add_29 = paddle._C_ops.add(matmul_23, parameter_26) + del parameter_26 + + # pd_op.full: (xf64) <- () + full_15 = paddle._C_ops.full( + [], float("0"), paddle.float64, paddle.framework._current_expected_place() + ) + + # pd_op.assign_value_: (xf64) <- (xf64) + assign_value__7 = paddle._C_ops.assign_value_( + full_15, + [], + paddle.float64, + [float("0.911765")], + paddle.framework._current_expected_place(), + ) + del full_15 + + # pd_op.cast: (xf32) <- (xf64) + cast_7 = paddle._C_ops.cast(assign_value__7, paddle.float32) + del assign_value__7 + + # pd_op.shape64: (3xi64) <- (8x-1x384xf32) + shape64_7 = paddle._C_ops.shape64(add_29) + + # pd_op.slice: (xi64) <- (3xi64, 1xi64, 1xi64) + slice_19 = paddle._C_ops.slice( + shape64_7, [0], full_int_array_1, full_int_array_2, [1], [0] + ) + del shape64_7 + + # builtin.combine: ([xi64, xi64, xi64]) <- (xi64, xi64, xi64) + combine_9 = [slice_19, full_2, full_2] + del slice_19 + + # pd_op.stack: (3xi64) <- ([xi64, xi64, xi64]) + stack_9 = paddle._C_ops.stack(combine_9, 0) + del combine_9 + + # pd_op.uniform: (-1x1x1xf32) <- (3xi64, 1xf32, 1xf32) + uniform_7 = paddle._C_ops.uniform( + stack_9, + paddle.float32, + full_3, + full_4, + 0, + paddle.framework._current_expected_place(), + ) + del stack_9 + + # pd_op.add: (-1x1x1xf32) <- (xf32, -1x1x1xf32) + add_30 = paddle._C_ops.add(cast_7, uniform_7) + del uniform_7 + + # pd_op.floor: (-1x1x1xf32) <- (-1x1x1xf32) + floor_7 = paddle._C_ops.floor(add_30) + del add_30 + + # pd_op.divide: (8x-1x384xf32) <- (8x-1x384xf32, xf32) + divide_7 = paddle._C_ops.divide(add_29, cast_7) + + # pd_op.multiply: (8x-1x384xf32) <- (8x-1x384xf32, -1x1x1xf32) + multiply_7 = paddle._C_ops.multiply(divide_7, floor_7) + + # pd_op.add: (8x-1x384xf32) <- (8x-1x384xf32, 8x-1x384xf32) + add_31 = paddle._C_ops.add(layer_norm_19, multiply_7) + + # pd_op.layer_norm: (8x-1x384xf32, 8x-1xf32, 8x-1xf32) <- (8x-1x384xf32, 384xf32, 384xf32) + layer_norm_22, layer_norm_23, layer_norm_24 = (lambda x, f: f(x))( + paddle._C_ops.layer_norm( + add_31, parameter_25, parameter_24, float("1e-06"), 2 + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None, None), + ) + del parameter_24, parameter_25 + + # pd_op.matmul: (8x-1x1152xf32) <- (8x-1x384xf32, 384x1152xf32) + matmul_24 = paddle._C_ops.matmul(layer_norm_22, parameter_23, False, False) + del parameter_23 + + # pd_op.add: (8x-1x1152xf32) <- (8x-1x1152xf32, 1152xf32) + add_32 = paddle._C_ops.add(matmul_24, parameter_22) + del parameter_22 + + # builtin.combine: ([xi64, xi64, xi64, xi64, xi64]) <- (xi64, xi64, xi64, xi64, xi64) + combine_10 = [full_10, full_11, full_12, data_2, full_13] + del data_2 + + # pd_op.stack: (5xi64) <- ([xi64, xi64, xi64, xi64, xi64]) + stack_10 = paddle._C_ops.stack(combine_10, 0) + del combine_10 + + # pd_op.reshape: (8x-1x3x-1x32xf32) <- (8x-1x1152xf32, 5xi64) + reshape_8 = paddle._C_ops.reshape(add_32, stack_10) + del stack_10 + + # pd_op.transpose: (3x8x-1x-1x32xf32) <- (8x-1x3x-1x32xf32) + transpose_12 = paddle._C_ops.transpose(reshape_8, [2, 0, 3, 1, 4]) + del reshape_8 + + # pd_op.slice: (8x-1x-1x32xf32) <- (3x8x-1x-1x32xf32, 1xi64, 1xi64) + slice_20 = paddle._C_ops.slice( + transpose_12, [0], full_int_array_1, full_int_array_2, [1], [0] + ) + + # pd_op.slice: (8x-1x-1x32xf32) <- (3x8x-1x-1x32xf32, 1xi64, 1xi64) + slice_21 = paddle._C_ops.slice( + transpose_12, [0], full_int_array_2, full_int_array_3, [1], [0] + ) + + # pd_op.slice: (8x-1x-1x32xf32) <- (3x8x-1x-1x32xf32, 1xi64, 1xi64) + slice_22 = paddle._C_ops.slice( + transpose_12, [0], full_int_array_3, full_int_array_4, [1], [0] + ) + + # pd_op.transpose: (8x-1x32x-1xf32) <- (8x-1x-1x32xf32) + transpose_13 = paddle._C_ops.transpose(slice_21, [0, 1, 3, 2]) + del slice_21 + + # pd_op.matmul: (8x-1x-1x-1xf32) <- (8x-1x-1x32xf32, 8x-1x32x-1xf32) + matmul_25 = paddle._C_ops.matmul(slice_20, transpose_13, False, False) + + # pd_op.scale: (8x-1x-1x-1xf32) <- (8x-1x-1x-1xf32, 1xf32) + scale_4 = paddle._C_ops.scale(matmul_25, full_0, float("0"), True) + del matmul_25 + + # pd_op.softmax: (8x-1x-1x-1xf32) <- (8x-1x-1x-1xf32) + softmax_4 = paddle._C_ops.softmax(scale_4, -1) + del scale_4 + + # pd_op.matmul: (8x-1x-1x32xf32) <- (8x-1x-1x-1xf32, 8x-1x-1x32xf32) + matmul_26 = paddle._C_ops.matmul(softmax_4, slice_22, False, False) + + # pd_op.transpose: (8x-1x-1x32xf32) <- (8x-1x-1x32xf32) + transpose_14 = paddle._C_ops.transpose(matmul_26, [0, 2, 1, 3]) + del matmul_26 + + # builtin.combine: ([xi64, xi64, xi64]) <- (xi64, xi64, xi64) + combine_11 = [full_10, full_11, data_3] + del data_3 + + # pd_op.stack: (3xi64) <- ([xi64, xi64, xi64]) + stack_11 = paddle._C_ops.stack(combine_11, 0) + del combine_11 + + # pd_op.reshape: (8x-1x-1xf32) <- (8x-1x-1x32xf32, 3xi64) + reshape_9 = paddle._C_ops.reshape(transpose_14, stack_11) + del stack_11 + + # pd_op.matmul: (8x-1x384xf32) <- (8x-1x-1xf32, 384x384xf32) + matmul_27 = paddle._C_ops.matmul(reshape_9, parameter_21, False, False) + del parameter_21 + + # pd_op.add: (8x-1x384xf32) <- (8x-1x384xf32, 384xf32) + add_33 = paddle._C_ops.add(matmul_27, parameter_20) + del parameter_20 + + # pd_op.full: (xf64) <- () + full_16 = paddle._C_ops.full( + [], float("0"), paddle.float64, paddle.framework._current_expected_place() + ) + + # pd_op.assign_value_: (xf64) <- (xf64) + assign_value__8 = paddle._C_ops.assign_value_( + full_16, + [], + paddle.float64, + [float("0.905882")], + paddle.framework._current_expected_place(), + ) + del full_16 + + # pd_op.cast: (xf32) <- (xf64) + cast_8 = paddle._C_ops.cast(assign_value__8, paddle.float32) + del assign_value__8 + + # pd_op.shape64: (3xi64) <- (8x-1x384xf32) + shape64_8 = paddle._C_ops.shape64(add_33) + + # pd_op.slice: (xi64) <- (3xi64, 1xi64, 1xi64) + slice_23 = paddle._C_ops.slice( + shape64_8, [0], full_int_array_1, full_int_array_2, [1], [0] + ) + del shape64_8 + + # builtin.combine: ([xi64, xi64, xi64]) <- (xi64, xi64, xi64) + combine_12 = [slice_23, full_2, full_2] + del slice_23 + + # pd_op.stack: (3xi64) <- ([xi64, xi64, xi64]) + stack_12 = paddle._C_ops.stack(combine_12, 0) + del combine_12 + + # pd_op.uniform: (-1x1x1xf32) <- (3xi64, 1xf32, 1xf32) + uniform_8 = paddle._C_ops.uniform( + stack_12, + paddle.float32, + full_3, + full_4, + 0, + paddle.framework._current_expected_place(), + ) + del stack_12 + + # pd_op.add: (-1x1x1xf32) <- (xf32, -1x1x1xf32) + add_34 = paddle._C_ops.add(cast_8, uniform_8) + del uniform_8 + + # pd_op.floor: (-1x1x1xf32) <- (-1x1x1xf32) + floor_8 = paddle._C_ops.floor(add_34) + del add_34 + + # pd_op.divide: (8x-1x384xf32) <- (8x-1x384xf32, xf32) + divide_8 = paddle._C_ops.divide(add_33, cast_8) + + # pd_op.multiply: (8x-1x384xf32) <- (8x-1x384xf32, -1x1x1xf32) + multiply_8 = paddle._C_ops.multiply(divide_8, floor_8) + + # pd_op.add: (8x-1x384xf32) <- (8x-1x384xf32, 8x-1x384xf32) + add_35 = paddle._C_ops.add(layer_norm_22, multiply_8) + + # pd_op.layer_norm: (8x-1x384xf32, 8x-1xf32, 8x-1xf32) <- (8x-1x384xf32, 384xf32, 384xf32) + layer_norm_25, layer_norm_26, layer_norm_27 = (lambda x, f: f(x))( + paddle._C_ops.layer_norm( + add_35, parameter_19, parameter_18, float("1e-06"), 2 + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None, None), + ) + del parameter_18, parameter_19 + + # pd_op.matmul: (8x-1x1536xf32) <- (8x-1x384xf32, 384x1536xf32) + matmul_28 = paddle._C_ops.matmul(layer_norm_25, parameter_17, False, False) + del parameter_17 + + # pd_op.add: (8x-1x1536xf32) <- (8x-1x1536xf32, 1536xf32) + add_36 = paddle._C_ops.add(matmul_28, parameter_16) + del parameter_16 + + # pd_op.gelu: (8x-1x1536xf32) <- (8x-1x1536xf32) + gelu_4 = paddle._C_ops.gelu(add_36, False) + + # pd_op.matmul: (8x-1x384xf32) <- (8x-1x1536xf32, 1536x384xf32) + matmul_29 = paddle._C_ops.matmul(gelu_4, parameter_15, False, False) + del parameter_15 + + # pd_op.add: (8x-1x384xf32) <- (8x-1x384xf32, 384xf32) + add_37 = paddle._C_ops.add(matmul_29, parameter_14) + del parameter_14 + + # pd_op.full: (xf64) <- () + full_17 = paddle._C_ops.full( + [], float("0"), paddle.float64, paddle.framework._current_expected_place() + ) + + # pd_op.assign_value_: (xf64) <- (xf64) + assign_value__9 = paddle._C_ops.assign_value_( + full_17, + [], + paddle.float64, + [float("0.905882")], + paddle.framework._current_expected_place(), + ) + del full_17 + + # pd_op.cast: (xf32) <- (xf64) + cast_9 = paddle._C_ops.cast(assign_value__9, paddle.float32) + del assign_value__9 + + # pd_op.shape64: (3xi64) <- (8x-1x384xf32) + shape64_9 = paddle._C_ops.shape64(add_37) + + # pd_op.slice: (xi64) <- (3xi64, 1xi64, 1xi64) + slice_24 = paddle._C_ops.slice( + shape64_9, [0], full_int_array_1, full_int_array_2, [1], [0] + ) + del shape64_9 + + # builtin.combine: ([xi64, xi64, xi64]) <- (xi64, xi64, xi64) + combine_13 = [slice_24, full_2, full_2] + del slice_24 + + # pd_op.stack: (3xi64) <- ([xi64, xi64, xi64]) + stack_13 = paddle._C_ops.stack(combine_13, 0) + del combine_13 + + # pd_op.uniform: (-1x1x1xf32) <- (3xi64, 1xf32, 1xf32) + uniform_9 = paddle._C_ops.uniform( + stack_13, + paddle.float32, + full_3, + full_4, + 0, + paddle.framework._current_expected_place(), + ) + del stack_13 + + # pd_op.add: (-1x1x1xf32) <- (xf32, -1x1x1xf32) + add_38 = paddle._C_ops.add(cast_9, uniform_9) + del uniform_9 + + # pd_op.floor: (-1x1x1xf32) <- (-1x1x1xf32) + floor_9 = paddle._C_ops.floor(add_38) + del add_38 + + # pd_op.divide: (8x-1x384xf32) <- (8x-1x384xf32, xf32) + divide_9 = paddle._C_ops.divide(add_37, cast_9) + + # pd_op.multiply: (8x-1x384xf32) <- (8x-1x384xf32, -1x1x1xf32) + multiply_9 = paddle._C_ops.multiply(divide_9, floor_9) + + # pd_op.add: (8x-1x384xf32) <- (8x-1x384xf32, 8x-1x384xf32) + add_39 = paddle._C_ops.add(layer_norm_25, multiply_9) + + # pd_op.layer_norm: (8x-1x384xf32, 8x-1xf32, 8x-1xf32) <- (8x-1x384xf32, 384xf32, 384xf32) + layer_norm_28, layer_norm_29, layer_norm_30 = (lambda x, f: f(x))( + paddle._C_ops.layer_norm( + add_39, parameter_13, parameter_12, float("1e-06"), 2 + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None, None), + ) + del parameter_12, parameter_13 + + # pd_op.matmul: (8x-1x1152xf32) <- (8x-1x384xf32, 384x1152xf32) + matmul_30 = paddle._C_ops.matmul(layer_norm_28, parameter_11, False, False) + del parameter_11 + + # pd_op.add: (8x-1x1152xf32) <- (8x-1x1152xf32, 1152xf32) + add_40 = paddle._C_ops.add(matmul_30, parameter_10) + del parameter_10 + + # builtin.combine: ([xi64, xi64, xi64, xi64, xi64]) <- (xi64, xi64, xi64, xi64, xi64) + combine_14 = [full_10, full_11, full_12, data_4, full_13] + del data_4, full_12, full_13 + + # pd_op.stack: (5xi64) <- ([xi64, xi64, xi64, xi64, xi64]) + stack_14 = paddle._C_ops.stack(combine_14, 0) + del combine_14 + + # pd_op.reshape: (8x-1x3x-1x32xf32) <- (8x-1x1152xf32, 5xi64) + reshape_10 = paddle._C_ops.reshape(add_40, stack_14) + del stack_14 + + # pd_op.transpose: (3x8x-1x-1x32xf32) <- (8x-1x3x-1x32xf32) + transpose_15 = paddle._C_ops.transpose(reshape_10, [2, 0, 3, 1, 4]) + del reshape_10 + + # pd_op.slice: (8x-1x-1x32xf32) <- (3x8x-1x-1x32xf32, 1xi64, 1xi64) + slice_25 = paddle._C_ops.slice( + transpose_15, [0], full_int_array_1, full_int_array_2, [1], [0] + ) + + # pd_op.slice: (8x-1x-1x32xf32) <- (3x8x-1x-1x32xf32, 1xi64, 1xi64) + slice_26 = paddle._C_ops.slice( + transpose_15, [0], full_int_array_2, full_int_array_3, [1], [0] + ) + + # pd_op.slice: (8x-1x-1x32xf32) <- (3x8x-1x-1x32xf32, 1xi64, 1xi64) + slice_27 = paddle._C_ops.slice( + transpose_15, [0], full_int_array_3, full_int_array_4, [1], [0] + ) + + # pd_op.transpose: (8x-1x32x-1xf32) <- (8x-1x-1x32xf32) + transpose_16 = paddle._C_ops.transpose(slice_26, [0, 1, 3, 2]) + del slice_26 + + # pd_op.matmul: (8x-1x-1x-1xf32) <- (8x-1x-1x32xf32, 8x-1x32x-1xf32) + matmul_31 = paddle._C_ops.matmul(slice_25, transpose_16, False, False) + + # pd_op.scale: (8x-1x-1x-1xf32) <- (8x-1x-1x-1xf32, 1xf32) + scale_5 = paddle._C_ops.scale(matmul_31, full_0, float("0"), True) + del matmul_31 + + # pd_op.softmax: (8x-1x-1x-1xf32) <- (8x-1x-1x-1xf32) + softmax_5 = paddle._C_ops.softmax(scale_5, -1) + del scale_5 + + # pd_op.matmul: (8x-1x-1x32xf32) <- (8x-1x-1x-1xf32, 8x-1x-1x32xf32) + matmul_32 = paddle._C_ops.matmul(softmax_5, slice_27, False, False) + + # pd_op.transpose: (8x-1x-1x32xf32) <- (8x-1x-1x32xf32) + transpose_17 = paddle._C_ops.transpose(matmul_32, [0, 2, 1, 3]) + del matmul_32 + + # builtin.combine: ([xi64, xi64, xi64]) <- (xi64, xi64, xi64) + combine_15 = [full_10, full_11, data_5] + del data_5, full_10, full_11 + + # pd_op.stack: (3xi64) <- ([xi64, xi64, xi64]) + stack_15 = paddle._C_ops.stack(combine_15, 0) + del combine_15 + + # pd_op.reshape: (8x-1x-1xf32) <- (8x-1x-1x32xf32, 3xi64) + reshape_11 = paddle._C_ops.reshape(transpose_17, stack_15) + del stack_15 + + # pd_op.matmul: (8x-1x384xf32) <- (8x-1x-1xf32, 384x384xf32) + matmul_33 = paddle._C_ops.matmul(reshape_11, parameter_9, False, False) + del parameter_9 + + # pd_op.add: (8x-1x384xf32) <- (8x-1x384xf32, 384xf32) + add_41 = paddle._C_ops.add(matmul_33, parameter_8) + del parameter_8 + + # pd_op.full: (xf64) <- () + full_18 = paddle._C_ops.full( + [], float("0"), paddle.float64, paddle.framework._current_expected_place() + ) + + # pd_op.assign_value_: (xf64) <- (xf64) + assign_value__10 = paddle._C_ops.assign_value_( + full_18, + [], + paddle.float64, + [float("0.9")], + paddle.framework._current_expected_place(), + ) + del full_18 + + # pd_op.cast: (xf32) <- (xf64) + cast_10 = paddle._C_ops.cast(assign_value__10, paddle.float32) + del assign_value__10 + + # pd_op.shape64: (3xi64) <- (8x-1x384xf32) + shape64_10 = paddle._C_ops.shape64(add_41) + + # pd_op.slice: (xi64) <- (3xi64, 1xi64, 1xi64) + slice_28 = paddle._C_ops.slice( + shape64_10, [0], full_int_array_1, full_int_array_2, [1], [0] + ) + del shape64_10 + + # builtin.combine: ([xi64, xi64, xi64]) <- (xi64, xi64, xi64) + combine_16 = [slice_28, full_2, full_2] + del slice_28 + + # pd_op.stack: (3xi64) <- ([xi64, xi64, xi64]) + stack_16 = paddle._C_ops.stack(combine_16, 0) + del combine_16 + + # pd_op.uniform: (-1x1x1xf32) <- (3xi64, 1xf32, 1xf32) + uniform_10 = paddle._C_ops.uniform( + stack_16, + paddle.float32, + full_3, + full_4, + 0, + paddle.framework._current_expected_place(), + ) + del stack_16 + + # pd_op.add: (-1x1x1xf32) <- (xf32, -1x1x1xf32) + add_42 = paddle._C_ops.add(cast_10, uniform_10) + del uniform_10 + + # pd_op.floor: (-1x1x1xf32) <- (-1x1x1xf32) + floor_10 = paddle._C_ops.floor(add_42) + del add_42 + + # pd_op.divide: (8x-1x384xf32) <- (8x-1x384xf32, xf32) + divide_10 = paddle._C_ops.divide(add_41, cast_10) + + # pd_op.multiply: (8x-1x384xf32) <- (8x-1x384xf32, -1x1x1xf32) + multiply_10 = paddle._C_ops.multiply(divide_10, floor_10) + + # pd_op.add: (8x-1x384xf32) <- (8x-1x384xf32, 8x-1x384xf32) + add_43 = paddle._C_ops.add(layer_norm_28, multiply_10) + + # pd_op.layer_norm: (8x-1x384xf32, 8x-1xf32, 8x-1xf32) <- (8x-1x384xf32, 384xf32, 384xf32) + layer_norm_31, layer_norm_32, layer_norm_33 = (lambda x, f: f(x))( + paddle._C_ops.layer_norm( + add_43, parameter_7, parameter_6, float("1e-06"), 2 + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None, None), + ) + del parameter_6, parameter_7 + + # pd_op.matmul: (8x-1x1536xf32) <- (8x-1x384xf32, 384x1536xf32) + matmul_34 = paddle._C_ops.matmul(layer_norm_31, parameter_5, False, False) + del parameter_5 + + # pd_op.add: (8x-1x1536xf32) <- (8x-1x1536xf32, 1536xf32) + add_44 = paddle._C_ops.add(matmul_34, parameter_4) + del parameter_4 + + # pd_op.gelu: (8x-1x1536xf32) <- (8x-1x1536xf32) + gelu_5 = paddle._C_ops.gelu(add_44, False) + + # pd_op.matmul: (8x-1x384xf32) <- (8x-1x1536xf32, 1536x384xf32) + matmul_35 = paddle._C_ops.matmul(gelu_5, parameter_3, False, False) + del parameter_3 + + # pd_op.add: (8x-1x384xf32) <- (8x-1x384xf32, 384xf32) + add_45 = paddle._C_ops.add(matmul_35, parameter_2) + del parameter_2 + + # pd_op.full: (xf64) <- () + full_19 = paddle._C_ops.full( + [], float("0"), paddle.float64, paddle.framework._current_expected_place() + ) + + # pd_op.assign_value_: (xf64) <- (xf64) + assign_value__11 = paddle._C_ops.assign_value_( + full_19, + [], + paddle.float64, + [float("0.9")], + paddle.framework._current_expected_place(), + ) + del full_19 + + # pd_op.cast: (xf32) <- (xf64) + cast_11 = paddle._C_ops.cast(assign_value__11, paddle.float32) + del assign_value__11 + + # pd_op.shape64: (3xi64) <- (8x-1x384xf32) + shape64_11 = paddle._C_ops.shape64(add_45) + + # pd_op.slice: (xi64) <- (3xi64, 1xi64, 1xi64) + slice_29 = paddle._C_ops.slice( + shape64_11, [0], full_int_array_1, full_int_array_2, [1], [0] + ) + del shape64_11 + + # builtin.combine: ([xi64, xi64, xi64]) <- (xi64, xi64, xi64) + combine_17 = [slice_29, full_2, full_2] + del full_2, slice_29 + + # pd_op.stack: (3xi64) <- ([xi64, xi64, xi64]) + stack_17 = paddle._C_ops.stack(combine_17, 0) + del combine_17 + + # pd_op.uniform: (-1x1x1xf32) <- (3xi64, 1xf32, 1xf32) + uniform_11 = paddle._C_ops.uniform( + stack_17, + paddle.float32, + full_3, + full_4, + 0, + paddle.framework._current_expected_place(), + ) + del full_3, full_4, stack_17 + + # pd_op.add: (-1x1x1xf32) <- (xf32, -1x1x1xf32) + add_46 = paddle._C_ops.add(cast_11, uniform_11) + del uniform_11 + + # pd_op.floor: (-1x1x1xf32) <- (-1x1x1xf32) + floor_11 = paddle._C_ops.floor(add_46) + del add_46 + + # pd_op.divide: (8x-1x384xf32) <- (8x-1x384xf32, xf32) + divide_11 = paddle._C_ops.divide(add_45, cast_11) + + # pd_op.multiply: (8x-1x384xf32) <- (8x-1x384xf32, -1x1x1xf32) + multiply_11 = paddle._C_ops.multiply(divide_11, floor_11) + + # pd_op.add: (8x-1x384xf32) <- (8x-1x384xf32, 8x-1x384xf32) + add_47 = paddle._C_ops.add(layer_norm_31, multiply_11) + + # pd_op.layer_norm: (8x-1x384xf32, 8x-1xf32, 8x-1xf32) <- (8x-1x384xf32, 384xf32, 384xf32) + layer_norm_0, layer_norm_34, layer_norm_35 = (lambda x, f: f(x))( + paddle._C_ops.layer_norm( + add_47, parameter_1, parameter_0, float("1e-06"), 2 + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None, None), + ) + del ( + add_0, + add_1, + add_11, + add_12, + add_13, + add_15, + add_16, + add_17, + add_19, + add_20, + add_21, + add_23, + add_24, + add_25, + add_27, + add_28, + add_29, + add_3, + add_31, + add_32, + add_33, + add_35, + add_36, + add_37, + add_39, + add_4, + add_40, + add_41, + add_43, + add_44, + add_45, + add_47, + add_5, + add_7, + add_8, + add_9, + assign_0, + assign_1, + assign_10, + assign_11, + assign_12, + assign_13, + assign_14, + assign_15, + assign_16, + assign_17, + assign_18, + assign_19, + assign_2, + assign_20, + assign_21, + assign_22, + assign_23, + assign_24, + assign_25, + assign_26, + assign_27, + assign_28, + assign_29, + assign_3, + assign_30, + assign_31, + assign_32, + assign_33, + assign_34, + assign_35, + assign_36, + assign_4, + assign_5, + assign_6, + assign_7, + assign_8, + assign_9, + cast_0, + cast_1, + cast_10, + cast_11, + cast_2, + cast_3, + cast_4, + cast_5, + cast_6, + cast_7, + cast_8, + cast_9, + divide_0, + divide_1, + divide_10, + divide_11, + divide_2, + divide_3, + divide_4, + divide_5, + divide_6, + divide_7, + divide_8, + divide_9, + floor_0, + floor_1, + floor_10, + floor_11, + floor_2, + floor_3, + floor_4, + floor_5, + floor_6, + floor_7, + floor_8, + floor_9, + full_0, + full_int_array_1, + full_int_array_2, + full_int_array_3, + full_int_array_4, + gelu_0, + gelu_1, + gelu_2, + gelu_3, + gelu_4, + gelu_5, + layer_norm_1, + layer_norm_10, + layer_norm_11, + layer_norm_12, + layer_norm_13, + layer_norm_14, + layer_norm_15, + layer_norm_16, + layer_norm_17, + layer_norm_18, + layer_norm_19, + layer_norm_2, + layer_norm_20, + layer_norm_21, + layer_norm_22, + layer_norm_23, + layer_norm_24, + layer_norm_25, + layer_norm_26, + layer_norm_27, + layer_norm_28, + layer_norm_29, + layer_norm_3, + layer_norm_30, + layer_norm_31, + layer_norm_32, + layer_norm_33, + layer_norm_4, + layer_norm_5, + layer_norm_6, + layer_norm_7, + layer_norm_8, + layer_norm_9, + matmul_0, + matmul_10, + matmul_11, + matmul_12, + matmul_15, + matmul_16, + matmul_17, + matmul_18, + matmul_21, + matmul_22, + matmul_23, + matmul_24, + matmul_27, + matmul_28, + matmul_29, + matmul_3, + matmul_30, + matmul_33, + matmul_34, + matmul_35, + matmul_4, + matmul_5, + matmul_6, + matmul_9, + multiply_0, + multiply_1, + multiply_10, + multiply_11, + multiply_2, + multiply_3, + multiply_4, + multiply_5, + multiply_6, + multiply_7, + multiply_8, + multiply_9, + parameter_0, + parameter_1, + reshape_1, + reshape_11, + reshape_3, + reshape_5, + reshape_7, + reshape_9, + slice_0, + slice_10, + slice_12, + slice_15, + slice_17, + slice_2, + slice_20, + slice_22, + slice_25, + slice_27, + slice_5, + slice_7, + softmax_0, + softmax_1, + softmax_2, + softmax_3, + softmax_4, + softmax_5, + transpose_0, + transpose_1, + transpose_10, + transpose_11, + transpose_12, + transpose_13, + transpose_14, + transpose_15, + transpose_16, + transpose_17, + transpose_2, + transpose_3, + transpose_4, + transpose_5, + transpose_6, + transpose_7, + transpose_8, + transpose_9, + ) + + return layer_norm_0 diff --git a/paddle_samples/PaddleX/ch_SVTRv2_rec/subgraph_7/weight_meta.py b/paddle_samples/PaddleX/ch_SVTRv2_rec/subgraph_7/weight_meta.py new file mode 100644 index 000000000..323191e4d --- /dev/null +++ b/paddle_samples/PaddleX/ch_SVTRv2_rec/subgraph_7/weight_meta.py @@ -0,0 +1,790 @@ +class Program_weight_tensor_parameter_0: + name = "parameter_0" + shape = [384] + dtype = "float32" + min_val = float("-0.391234") + max_val = float("2.33804") + mean = float("-0.00605127") + std = float("0.141709") + data = None + + +class Program_weight_tensor_parameter_1: + name = "parameter_1" + shape = [384] + dtype = "float32" + min_val = float("-0.00273413") + max_val = float("1.27834") + mean = float("0.633188") + std = float("0.504115") + data = None + + +class Program_weight_tensor_parameter_2: + name = "parameter_2" + shape = [384] + dtype = "float32" + min_val = float("-6.21156") + max_val = float("3.16201") + mean = float("0.00811922") + std = float("0.55936") + data = None + + +class Program_weight_tensor_parameter_3: + name = "parameter_3" + shape = [1536, 384] + dtype = "float32" + min_val = float("-1.26586") + max_val = float("0.731109") + mean = float("-0.0001314") + std = float("0.0594808") + data = None + + +class Program_weight_tensor_parameter_4: + name = "parameter_4" + shape = [1536] + dtype = "float32" + min_val = float("-1.33895") + max_val = float("1.11383") + mean = float("-0.691732") + std = float("0.261174") + data = None + + +class Program_weight_tensor_parameter_5: + name = "parameter_5" + shape = [384, 1536] + dtype = "float32" + min_val = float("-0.294714") + max_val = float("0.394448") + mean = float("-0.00138281") + std = float("0.05379") + data = None + + +class Program_weight_tensor_parameter_6: + name = "parameter_6" + shape = [384] + dtype = "float32" + min_val = float("-5.66332") + max_val = float("0.774539") + mean = float("-0.00881374") + std = float("0.387828") + data = None + + +class Program_weight_tensor_parameter_7: + name = "parameter_7" + shape = [384] + dtype = "float32" + min_val = float("-0.738694") + max_val = float("6.11451") + mean = float("0.679105") + std = float("0.448438") + data = None + + +class Program_weight_tensor_parameter_8: + name = "parameter_8" + shape = [384] + dtype = "float32" + min_val = float("-0.828408") + max_val = float("1.51244") + mean = float("-0.0176604") + std = float("0.163826") + data = None + + +class Program_weight_tensor_parameter_9: + name = "parameter_9" + shape = [384, 384] + dtype = "float32" + min_val = float("-0.361609") + max_val = float("0.452343") + mean = float("0.000357919") + std = float("0.0465118") + data = None + + +class Program_weight_tensor_parameter_10: + name = "parameter_10" + shape = [1152] + dtype = "float32" + min_val = float("-2.96073") + max_val = float("3.21206") + mean = float("0.00577588") + std = float("0.35175") + data = None + + +class Program_weight_tensor_parameter_11: + name = "parameter_11" + shape = [384, 1152] + dtype = "float32" + min_val = float("-0.359938") + max_val = float("0.355583") + mean = float("-9.04293e-05") + std = float("0.0501151") + data = None + + +class Program_weight_tensor_parameter_12: + name = "parameter_12" + shape = [384] + dtype = "float32" + min_val = float("-0.966974") + max_val = float("0.727955") + mean = float("-0.0132417") + std = float("0.255698") + data = None + + +class Program_weight_tensor_parameter_13: + name = "parameter_13" + shape = [384] + dtype = "float32" + min_val = float("-0.00660637") + max_val = float("1.93532") + mean = float("0.840973") + std = float("0.330999") + data = None + + +class Program_weight_tensor_parameter_14: + name = "parameter_14" + shape = [384] + dtype = "float32" + min_val = float("-1.16391") + max_val = float("1.11822") + mean = float("-0.0173586") + std = float("0.170928") + data = None + + +class Program_weight_tensor_parameter_15: + name = "parameter_15" + shape = [1536, 384] + dtype = "float32" + min_val = float("-0.644514") + max_val = float("0.573222") + mean = float("1.29267e-05") + std = float("0.0672336") + data = None + + +class Program_weight_tensor_parameter_16: + name = "parameter_16" + shape = [1536] + dtype = "float32" + min_val = float("-1.29829") + max_val = float("0.188374") + mean = float("-0.60564") + std = float("0.151127") + data = None + + +class Program_weight_tensor_parameter_17: + name = "parameter_17" + shape = [384, 1536] + dtype = "float32" + min_val = float("-0.371481") + max_val = float("0.412588") + mean = float("-0.00187947") + std = float("0.0543025") + data = None + + +class Program_weight_tensor_parameter_18: + name = "parameter_18" + shape = [384] + dtype = "float32" + min_val = float("-3.00944") + max_val = float("1.12072") + mean = float("0.02978") + std = float("0.365241") + data = None + + +class Program_weight_tensor_parameter_19: + name = "parameter_19" + shape = [384] + dtype = "float32" + min_val = float("-0.252943") + max_val = float("2.80685") + mean = float("0.684697") + std = float("0.319556") + data = None + + +class Program_weight_tensor_parameter_20: + name = "parameter_20" + shape = [384] + dtype = "float32" + min_val = float("-1.02449") + max_val = float("1.04887") + mean = float("-0.0149435") + std = float("0.151017") + data = None + + +class Program_weight_tensor_parameter_21: + name = "parameter_21" + shape = [384, 384] + dtype = "float32" + min_val = float("-0.32742") + max_val = float("0.275816") + mean = float("0.000217811") + std = float("0.0475238") + data = None + + +class Program_weight_tensor_parameter_22: + name = "parameter_22" + shape = [1152] + dtype = "float32" + min_val = float("-2.91644") + max_val = float("2.62449") + mean = float("-0.00818085") + std = float("0.32054") + data = None + + +class Program_weight_tensor_parameter_23: + name = "parameter_23" + shape = [384, 1152] + dtype = "float32" + min_val = float("-0.36594") + max_val = float("0.310166") + mean = float("-8.03293e-05") + std = float("0.0500189") + data = None + + +class Program_weight_tensor_parameter_24: + name = "parameter_24" + shape = [384] + dtype = "float32" + min_val = float("-0.957574") + max_val = float("1.27513") + mean = float("-0.0170723") + std = float("0.244341") + data = None + + +class Program_weight_tensor_parameter_25: + name = "parameter_25" + shape = [384] + dtype = "float32" + min_val = float("-0.00498009") + max_val = float("1.45274") + mean = float("0.817863") + std = float("0.306469") + data = None + + +class Program_weight_tensor_parameter_26: + name = "parameter_26" + shape = [384] + dtype = "float32" + min_val = float("-1.07558") + max_val = float("1.26154") + mean = float("0.00131696") + std = float("0.189135") + data = None + + +class Program_weight_tensor_parameter_27: + name = "parameter_27" + shape = [1536, 384] + dtype = "float32" + min_val = float("-0.659897") + max_val = float("0.62102") + mean = float("8.24432e-05") + std = float("0.056888") + data = None + + +class Program_weight_tensor_parameter_28: + name = "parameter_28" + shape = [1536] + dtype = "float32" + min_val = float("-1.18677") + max_val = float("-0.114134") + mean = float("-0.611999") + std = float("0.159574") + data = None + + +class Program_weight_tensor_parameter_29: + name = "parameter_29" + shape = [384, 1536] + dtype = "float32" + min_val = float("-0.445094") + max_val = float("0.48006") + mean = float("-0.00115824") + std = float("0.0517858") + data = None + + +class Program_weight_tensor_parameter_30: + name = "parameter_30" + shape = [384] + dtype = "float32" + min_val = float("-2.24688") + max_val = float("1.86867") + mean = float("0.0159623") + std = float("0.362233") + data = None + + +class Program_weight_tensor_parameter_31: + name = "parameter_31" + shape = [384] + dtype = "float32" + min_val = float("-1.69377") + max_val = float("2.23635") + mean = float("0.624084") + std = float("0.348942") + data = None + + +class Program_weight_tensor_parameter_32: + name = "parameter_32" + shape = [384] + dtype = "float32" + min_val = float("-0.90244") + max_val = float("1.05957") + mean = float("-0.00717212") + std = float("0.164042") + data = None + + +class Program_weight_tensor_parameter_33: + name = "parameter_33" + shape = [384, 384] + dtype = "float32" + min_val = float("-0.382146") + max_val = float("0.337451") + mean = float("0.000116599") + std = float("0.0459039") + data = None + + +class Program_weight_tensor_parameter_34: + name = "parameter_34" + shape = [1152] + dtype = "float32" + min_val = float("-2.91195") + max_val = float("2.61562") + mean = float("0.00594852") + std = float("0.313983") + data = None + + +class Program_weight_tensor_parameter_35: + name = "parameter_35" + shape = [384, 1152] + dtype = "float32" + min_val = float("-0.430739") + max_val = float("0.44533") + mean = float("-4.491e-05") + std = float("0.0491566") + data = None + + +class Program_weight_tensor_parameter_36: + name = "parameter_36" + shape = [384] + dtype = "float32" + min_val = float("-0.916805") + max_val = float("1.74928") + mean = float("-0.00816528") + std = float("0.287587") + data = None + + +class Program_weight_tensor_parameter_37: + name = "parameter_37" + shape = [384] + dtype = "float32" + min_val = float("-0.726943") + max_val = float("1.49198") + mean = float("0.707845") + std = float("0.338272") + data = None + + +class Program_weight_tensor_parameter_38: + name = "parameter_38" + shape = [384] + dtype = "float32" + min_val = float("-1.78986") + max_val = float("1.51204") + mean = float("0.0134232") + std = float("0.187376") + data = None + + +class Program_weight_tensor_parameter_39: + name = "parameter_39" + shape = [1536, 384] + dtype = "float32" + min_val = float("-0.560883") + max_val = float("0.459562") + mean = float("6.25826e-05") + std = float("0.0557709") + data = None + + +class Program_weight_tensor_parameter_40: + name = "parameter_40" + shape = [1536] + dtype = "float32" + min_val = float("-1.17196") + max_val = float("0.105926") + mean = float("-0.589018") + std = float("0.266652") + data = None + + +class Program_weight_tensor_parameter_41: + name = "parameter_41" + shape = [384, 1536] + dtype = "float32" + min_val = float("-0.754343") + max_val = float("0.536855") + mean = float("-0.00042982") + std = float("0.0503471") + data = None + + +class Program_weight_tensor_parameter_42: + name = "parameter_42" + shape = [384] + dtype = "float32" + min_val = float("-4.64774") + max_val = float("1.3273") + mean = float("-0.00456749") + std = float("0.498174") + data = None + + +class Program_weight_tensor_parameter_43: + name = "parameter_43" + shape = [384] + dtype = "float32" + min_val = float("-1.46338") + max_val = float("2.61752") + mean = float("0.626304") + std = float("0.370925") + data = None + + +class Program_weight_tensor_parameter_44: + name = "parameter_44" + shape = [384] + dtype = "float32" + min_val = float("-0.820121") + max_val = float("1.46921") + mean = float("-0.00686808") + std = float("0.175591") + data = None + + +class Program_weight_tensor_parameter_45: + name = "parameter_45" + shape = [384, 384] + dtype = "float32" + min_val = float("-0.453601") + max_val = float("0.455748") + mean = float("-8.5354e-05") + std = float("0.0479006") + data = None + + +class Program_weight_tensor_parameter_46: + name = "parameter_46" + shape = [1152] + dtype = "float32" + min_val = float("-3.22004") + max_val = float("3.23421") + mean = float("0.00903229") + std = float("0.346161") + data = None + + +class Program_weight_tensor_parameter_47: + name = "parameter_47" + shape = [384, 1152] + dtype = "float32" + min_val = float("-0.413393") + max_val = float("0.583397") + mean = float("3.22833e-05") + std = float("0.0491461") + data = None + + +class Program_weight_tensor_parameter_48: + name = "parameter_48" + shape = [384] + dtype = "float32" + min_val = float("-0.991885") + max_val = float("1.80415") + mean = float("-0.0109873") + std = float("0.316639") + data = None + + +class Program_weight_tensor_parameter_49: + name = "parameter_49" + shape = [384] + dtype = "float32" + min_val = float("-0.0522131") + max_val = float("2.78471") + mean = float("0.797124") + std = float("0.429758") + data = None + + +class Program_weight_tensor_parameter_50: + name = "parameter_50" + shape = [384] + dtype = "float32" + min_val = float("-1.50586") + max_val = float("1.40754") + mean = float("0.0228976") + std = float("0.354798") + data = None + + +class Program_weight_tensor_parameter_51: + name = "parameter_51" + shape = [1536, 384] + dtype = "float32" + min_val = float("-0.673087") + max_val = float("0.303375") + mean = float("0.0002792") + std = float("0.0561281") + data = None + + +class Program_weight_tensor_parameter_52: + name = "parameter_52" + shape = [1536] + dtype = "float32" + min_val = float("-0.743186") + max_val = float("0.0147312") + mean = float("-0.503284") + std = float("0.118682") + data = None + + +class Program_weight_tensor_parameter_53: + name = "parameter_53" + shape = [384, 1536] + dtype = "float32" + min_val = float("-0.299899") + max_val = float("0.457353") + mean = float("-0.000994882") + std = float("0.0430651") + data = None + + +class Program_weight_tensor_parameter_54: + name = "parameter_54" + shape = [384] + dtype = "float32" + min_val = float("-5.35791") + max_val = float("1.78323") + mean = float("0.0042739") + std = float("0.727217") + data = None + + +class Program_weight_tensor_parameter_55: + name = "parameter_55" + shape = [384] + dtype = "float32" + min_val = float("-0.0899559") + max_val = float("3.46991") + mean = float("0.7977") + std = float("0.403236") + data = None + + +class Program_weight_tensor_parameter_56: + name = "parameter_56" + shape = [384] + dtype = "float32" + min_val = float("-1.34634") + max_val = float("1.73571") + mean = float("-0.00690144") + std = float("0.242646") + data = None + + +class Program_weight_tensor_parameter_57: + name = "parameter_57" + shape = [384, 384] + dtype = "float32" + min_val = float("-0.301549") + max_val = float("0.319018") + mean = float("-0.000210251") + std = float("0.0505271") + data = None + + +class Program_weight_tensor_parameter_58: + name = "parameter_58" + shape = [1152] + dtype = "float32" + min_val = float("-2.09165") + max_val = float("3.47442") + mean = float("0.00849669") + std = float("0.343971") + data = None + + +class Program_weight_tensor_parameter_59: + name = "parameter_59" + shape = [384, 1152] + dtype = "float32" + min_val = float("-0.403793") + max_val = float("0.385278") + mean = float("-5.15207e-05") + std = float("0.0496014") + data = None + + +class Program_weight_tensor_parameter_60: + name = "parameter_60" + shape = [384] + dtype = "float32" + min_val = float("-1.11619") + max_val = float("1.82325") + mean = float("-0.0106112") + std = float("0.348095") + data = None + + +class Program_weight_tensor_parameter_61: + name = "parameter_61" + shape = [384] + dtype = "float32" + min_val = float("-0.0581641") + max_val = float("2.66693") + mean = float("0.818678") + std = float("0.408545") + data = None + + +class Program_weight_tensor_parameter_62: + name = "parameter_62" + shape = [384] + dtype = "float32" + min_val = float("-1.44023") + max_val = float("1.84817") + mean = float("0.0176878") + std = float("0.410639") + data = None + + +class Program_weight_tensor_parameter_63: + name = "parameter_63" + shape = [1536, 384] + dtype = "float32" + min_val = float("-0.925741") + max_val = float("0.378696") + mean = float("0.00020614") + std = float("0.0564303") + data = None + + +class Program_weight_tensor_parameter_64: + name = "parameter_64" + shape = [1536] + dtype = "float32" + min_val = float("-0.688288") + max_val = float("-0.00753507") + mean = float("-0.499897") + std = float("0.12361") + data = None + + +class Program_weight_tensor_parameter_65: + name = "parameter_65" + shape = [384, 1536] + dtype = "float32" + min_val = float("-0.503913") + max_val = float("0.309683") + mean = float("9.58097e-05") + std = float("0.0411907") + data = None + + +class Program_weight_tensor_parameter_66: + name = "parameter_66" + shape = [384] + dtype = "float32" + min_val = float("-3.63391") + max_val = float("1.65044") + mean = float("-0.00951911") + std = float("0.763258") + data = None + + +class Program_weight_tensor_parameter_67: + name = "parameter_67" + shape = [384] + dtype = "float32" + min_val = float("-0.0887417") + max_val = float("3.50729") + mean = float("0.779768") + std = float("0.369595") + data = None + + +class Program_weight_tensor_parameter_68: + name = "parameter_68" + shape = [384] + dtype = "float32" + min_val = float("-0.674133") + max_val = float("0.951669") + mean = float("0.00424016") + std = float("0.176231") + data = None + + +class Program_weight_tensor_parameter_69: + name = "parameter_69" + shape = [384, 384] + dtype = "float32" + min_val = float("-0.363553") + max_val = float("0.395794") + mean = float("0.000259539") + std = float("0.0463676") + data = None + + +class Program_weight_tensor_parameter_70: + name = "parameter_70" + shape = [1152] + dtype = "float32" + min_val = float("-2.58158") + max_val = float("2.52891") + mean = float("-0.00799767") + std = float("0.316833") + data = None + + +class Program_weight_tensor_parameter_71: + name = "parameter_71" + shape = [384, 1152] + dtype = "float32" + min_val = float("-0.30861") + max_val = float("0.39976") + mean = float("4.28168e-05") + std = float("0.047099") + data = None diff --git a/paddle_samples/PaddleX/ch_SVTRv2_rec/subgraph_8/graph_hash.txt b/paddle_samples/PaddleX/ch_SVTRv2_rec/subgraph_8/graph_hash.txt new file mode 100644 index 000000000..93295742c --- /dev/null +++ b/paddle_samples/PaddleX/ch_SVTRv2_rec/subgraph_8/graph_hash.txt @@ -0,0 +1 @@ +72635eaa8cddb1bb3c1593fbda3647b1fd40c21ad3692ffc43e63053353b44e5 \ No newline at end of file diff --git a/paddle_samples/PaddleX/ch_SVTRv2_rec/subgraph_8/graph_net.json b/paddle_samples/PaddleX/ch_SVTRv2_rec/subgraph_8/graph_net.json new file mode 100644 index 000000000..4e1cc3c0d --- /dev/null +++ b/paddle_samples/PaddleX/ch_SVTRv2_rec/subgraph_8/graph_net.json @@ -0,0 +1,6 @@ +{ + "framework": "paddle", + "model_name": "ch_SVTRv2_rec", + "num_devices_required": 1, + "num_nodes_required": 1 +} \ No newline at end of file diff --git a/paddle_samples/PaddleX/ch_SVTRv2_rec/subgraph_8/input_meta.py b/paddle_samples/PaddleX/ch_SVTRv2_rec/subgraph_8/input_meta.py new file mode 100644 index 000000000..d9f8e6bc0 --- /dev/null +++ b/paddle_samples/PaddleX/ch_SVTRv2_rec/subgraph_8/input_meta.py @@ -0,0 +1,36 @@ +class Program_weight_tensor_data_0: + name = "data_0" + shape = [5, 384, 1, 40] + dtype = "float32" + min_val = float("-8.03003") + max_val = float("4.27638") + mean = float("0.000711681") + std = float("0.433526") + data = None + + +class Program_weight_tensor_data_1: + name = "data_1" + shape = [5, 25] + dtype = "int64" + min_val = 0 + max_val = 4925 + data = None + + +class Program_weight_tensor_data_2: + name = "data_2" + shape = [5] + dtype = "int64" + data = [5, 4, 5, 10, 3] + + +class Program_weight_tensor_data_3: + name = "data_3" + shape = [5000, 1, 384] + dtype = "float32" + min_val = float("-1.0") + max_val = float("1.0") + mean = float("0.131787") + std = float("0.694717") + data = None diff --git a/paddle_samples/PaddleX/ch_SVTRv2_rec/subgraph_8/model.py b/paddle_samples/PaddleX/ch_SVTRv2_rec/subgraph_8/model.py new file mode 100644 index 000000000..49d8cc24b --- /dev/null +++ b/paddle_samples/PaddleX/ch_SVTRv2_rec/subgraph_8/model.py @@ -0,0 +1,1139 @@ +import paddle + + +class GraphModule(paddle.nn.Layer): + def __init__(self): + super().__init__() + + def forward( + self, + parameter_0, + parameter_1, + parameter_2, + parameter_3, + parameter_4, + parameter_5, + parameter_6, + parameter_7, + parameter_8, + parameter_9, + parameter_10, + parameter_11, + parameter_12, + parameter_13, + parameter_14, + parameter_15, + parameter_16, + parameter_17, + parameter_18, + parameter_19, + parameter_20, + parameter_21, + parameter_22, + parameter_23, + parameter_24, + parameter_25, + parameter_26, + parameter_27, + parameter_28, + parameter_29, + parameter_30, + parameter_31, + parameter_32, + parameter_33, + parameter_34, + parameter_35, + parameter_36, + parameter_37, + parameter_38, + parameter_39, + parameter_40, + parameter_41, + parameter_42, + data_0, + data_1, + data_2, + data_3, + ): + # pd_op.flatten: (-1x384x40xf32) <- (-1x384x1x40xf32) + flatten_0 = paddle._C_ops.flatten(data_0, 2, 3) + del data_0 + + # pd_op.transpose: (-1x40x384xf32) <- (-1x384x40xf32) + transpose_0 = paddle._C_ops.transpose(flatten_0, [0, 2, 1]) + del flatten_0 + + # pd_op.matmul: (-1x40x384xf32) <- (-1x40x384xf32, 384x384xf32) + matmul_1 = paddle._C_ops.matmul(transpose_0, parameter_42, False, False) + del parameter_42 + + # pd_op.full_int_array: (0xi64) <- () + full_int_array_0 = [] + + # pd_op.max: (xi64) <- (-1xi64, 0xi64) + max_0 = paddle._C_ops.max(data_2, full_int_array_0, False) + del data_2, full_int_array_0 + + # pd_op.full: (1xf32) <- () + full_0 = paddle._C_ops.full( + [1], float("1"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.scale: (xi64) <- (xi64, 1xf32) + scale_0 = paddle._C_ops.scale(max_0, full_0, float("2"), True) + del full_0, max_0 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_1 = [0] + + # pd_op.assign: (1xi64) <- (1xi64) + assign_0 = full_int_array_1 + + # pd_op.assign: (1xi64) <- (1xi64) + assign_1 = full_int_array_1 + + # pd_op.assign: (1xi64) <- (1xi64) + assign_2 = full_int_array_1 + + # pd_op.assign: (1xi64) <- (1xi64) + assign_3 = full_int_array_1 + + # builtin.combine: ([xi64]) <- (xi64) + combine_0 = [scale_0] + del scale_0 + + # pd_op.stack: (1xi64) <- ([xi64]) + stack_0 = paddle._C_ops.stack(combine_0, 0) + del combine_0 + + # pd_op.slice: (-1x-1xi64) <- (-1x25xi64, 1xi64, 1xi64) + slice_0 = paddle._C_ops.slice(data_1, [1], full_int_array_1, stack_0, [-1], []) + del data_1, stack_0 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_2 = [-1] + + # pd_op.slice: (-1x-1xi64) <- (-1x-1xi64, 1xi64, 1xi64) + slice_1 = paddle._C_ops.slice( + slice_0, [1], full_int_array_1, full_int_array_2, [1], [] + ) + del full_int_array_2, slice_0 + + # pd_op.embedding: (-1x-1x384xf32) <- (-1x-1xi64, 6629x384xf32) + embedding_0 = paddle._C_ops.embedding(slice_1, parameter_41, 0, False) + del parameter_41 + + # pd_op.full: (1xf32) <- () + full_1 = paddle._C_ops.full( + [1], float("19.5959"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.scale: (-1x-1x384xf32) <- (-1x-1x384xf32, 1xf32) + scale_1 = paddle._C_ops.scale(embedding_0, full_1, float("0"), True) + del embedding_0 + + # pd_op.transpose: (-1x-1x384xf32) <- (-1x-1x384xf32) + transpose_1 = paddle._C_ops.transpose(scale_1, [1, 0, 2]) + del scale_1 + + # pd_op.shape64: (3xi64) <- (-1x-1x384xf32) + shape64_0 = paddle._C_ops.shape64(transpose_1) + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_3 = [1] + + # pd_op.assign: (1xi64) <- (1xi64) + assign_4 = full_int_array_3 + + # pd_op.assign: (1xi64) <- (1xi64) + assign_5 = full_int_array_3 + + # pd_op.assign: (1xi64) <- (1xi64) + assign_6 = full_int_array_3 + + # pd_op.assign: (1xi64) <- (1xi64) + assign_7 = full_int_array_3 + + # pd_op.assign: (1xi64) <- (1xi64) + assign_8 = full_int_array_3 + + # pd_op.assign: (1xi64) <- (1xi64) + assign_9 = full_int_array_3 + + # pd_op.assign: (1xi64) <- (1xi64) + assign_10 = full_int_array_3 + + # pd_op.assign: (1xi64) <- (1xi64) + assign_11 = full_int_array_3 + + # pd_op.slice: (xi64) <- (3xi64, 1xi64, 1xi64) + slice_2 = paddle._C_ops.slice( + shape64_0, [0], full_int_array_1, full_int_array_3, [1], [0] + ) + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_4 = [2] + + # pd_op.assign: (1xi64) <- (1xi64) + assign_12 = full_int_array_4 + + # pd_op.assign: (1xi64) <- (1xi64) + assign_13 = full_int_array_4 + + # pd_op.assign: (1xi64) <- (1xi64) + assign_14 = full_int_array_4 + + # pd_op.assign: (1xi64) <- (1xi64) + assign_15 = full_int_array_4 + + # pd_op.assign: (1xi64) <- (1xi64) + assign_16 = full_int_array_4 + + # pd_op.assign: (1xi64) <- (1xi64) + assign_17 = full_int_array_4 + + # pd_op.slice: (xi64) <- (3xi64, 1xi64, 1xi64) + slice_3 = paddle._C_ops.slice( + shape64_0, [0], full_int_array_3, full_int_array_4, [1], [0] + ) + del shape64_0 + + # builtin.combine: ([xi64]) <- (xi64) + combine_1 = [slice_2] + del slice_2 + + # pd_op.stack: (1xi64) <- ([xi64]) + stack_1 = paddle._C_ops.stack(combine_1, 0) + del combine_1 + + # pd_op.slice: (-1x1x384xf32) <- (5000x1x384xf32, 1xi64, 1xi64) + slice_4 = paddle._C_ops.slice(data_3, [0], full_int_array_1, stack_1, [-1], []) + del data_3, stack_1 + + # pd_op.add: (-1x-1x384xf32) <- (-1x-1x384xf32, -1x1x384xf32) + add_0 = paddle._C_ops.add(transpose_1, slice_4) + + # pd_op.full: (1xf32) <- () + full_2 = paddle._C_ops.full( + [1], float("0.1"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.assign: (1xf32) <- (1xf32) + assign_18 = full_2 + + # pd_op.assign: (1xf32) <- (1xf32) + assign_19 = full_2 + + # pd_op.assign: (1xf32) <- (1xf32) + assign_20 = full_2 + + # pd_op.assign: (1xf32) <- (1xf32) + assign_21 = full_2 + + # pd_op.assign: (1xf32) <- (1xf32) + assign_22 = full_2 + + # pd_op.assign: (1xf32) <- (1xf32) + assign_23 = full_2 + + # pd_op.assign: (1xf32) <- (1xf32) + assign_24 = full_2 + + # pd_op.assign: (1xf32) <- (1xf32) + assign_25 = full_2 + + # pd_op.assign: (1xf32) <- (1xf32) + assign_26 = full_2 + + # pd_op.assign: (1xf32) <- (1xf32) + assign_27 = full_2 + + # pd_op.dropout: (-1x-1x384xf32, -1x-1x384xui8) <- (-1x-1x384xf32, None, 1xf32) + dropout_0, dropout_1 = (lambda x, f: f(x))( + paddle._C_ops.dropout( + add_0, None, full_2, False, "upscale_in_train", 0, False + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None), + ) + del add_0 + + # pd_op.transpose: (-1x-1x384xf32) <- (-1x-1x384xf32) + transpose_2 = paddle._C_ops.transpose(dropout_0, [1, 0, 2]) + del dropout_0 + + # pd_op.shape64: (3xi64) <- (-1x-1x384xf32) + shape64_1 = paddle._C_ops.shape64(transpose_2) + + # pd_op.slice: (xi64) <- (3xi64, 1xi64, 1xi64) + slice_5 = paddle._C_ops.slice( + shape64_1, [0], full_int_array_1, full_int_array_3, [1], [0] + ) + del shape64_1 + + # pd_op.shape64: (3xi64) <- (-1x-1x384xf32) + shape64_2 = paddle._C_ops.shape64(transpose_2) + + # pd_op.slice: (xi64) <- (3xi64, 1xi64, 1xi64) + slice_6 = paddle._C_ops.slice( + shape64_2, [0], full_int_array_3, full_int_array_4, [1], [0] + ) + del shape64_2 + + # builtin.combine: ([xi64, xi64]) <- (xi64, xi64) + combine_2 = [slice_6, slice_6] + + # pd_op.stack: (2xi64) <- ([xi64, xi64]) + stack_2 = paddle._C_ops.stack(combine_2, 0) + del combine_2 + + # pd_op.full: (1xf32) <- () + full_3 = paddle._C_ops.full( + [1], float("0"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.full_with_tensor: (-1x-1xf32) <- (1xf32, 2xi64) + full_with_tensor_0 = paddle._C_ops.full_with_tensor( + full_3, stack_2, paddle.float32 + ) + del full_3, stack_2 + + # builtin.combine: ([xi64, xi64]) <- (xi64, xi64) + combine_3 = [slice_6, slice_6] + + # pd_op.stack: (2xi64) <- ([xi64, xi64]) + stack_3 = paddle._C_ops.stack(combine_3, 0) + del combine_3 + + # pd_op.full: (1xf32) <- () + full_4 = paddle._C_ops.full( + [1], float("-inf"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.full_with_tensor: (-1x-1xf32) <- (1xf32, 2xi64) + full_with_tensor_1 = paddle._C_ops.full_with_tensor( + full_4, stack_3, paddle.float32 + ) + del full_4, stack_3 + + # pd_op.triu: (-1x-1xf32) <- (-1x-1xf32) + triu_0 = paddle._C_ops.triu(full_with_tensor_1, 1) + del full_with_tensor_1 + + # pd_op.add: (-1x-1xf32) <- (-1x-1xf32, -1x-1xf32) + add_1 = paddle._C_ops.add(full_with_tensor_0, triu_0) + del full_with_tensor_0, triu_0 + + # pd_op.full_int_array: (2xi64) <- () + full_int_array_5 = [0, 1] + + # pd_op.unsqueeze: (1x1x-1x-1xf32) <- (-1x-1xf32, 2xi64) + unsqueeze_0 = paddle._C_ops.unsqueeze(add_1, full_int_array_5) + del add_1, full_int_array_5 + + # pd_op.matmul: (-1x-1x1152xf32) <- (-1x-1x384xf32, 384x1152xf32) + matmul_2 = paddle._C_ops.matmul(transpose_2, parameter_40, False, False) + del parameter_40 + + # pd_op.add: (-1x-1x1152xf32) <- (-1x-1x1152xf32, 1152xf32) + add_2 = paddle._C_ops.add(matmul_2, parameter_39) + del parameter_39 + + # pd_op.full: (xi64) <- () + full_5 = paddle._C_ops.full( + [], float("0"), paddle.int64, paddle.core.CPUPlace() + ) + + # pd_op.full: (xi64) <- () + full_6 = paddle._C_ops.full( + [], float("3"), paddle.int64, paddle.core.CPUPlace() + ) + + # pd_op.full: (xi64) <- () + full_7 = paddle._C_ops.full( + [], float("12"), paddle.int64, paddle.core.CPUPlace() + ) + + # pd_op.full: (xi64) <- () + full_8 = paddle._C_ops.full( + [], float("32"), paddle.int64, paddle.core.CPUPlace() + ) + + # builtin.combine: ([xi64, xi64, xi64, xi64, xi64]) <- (xi64, xi64, xi64, xi64, xi64) + combine_4 = [full_5, slice_6, full_6, full_7, full_8] + + # pd_op.stack: (5xi64) <- ([xi64, xi64, xi64, xi64, xi64]) + stack_4 = paddle._C_ops.stack(combine_4, 0) + del combine_4 + + # pd_op.reshape: (-1x-1x3x12x32xf32) <- (-1x-1x1152xf32, 5xi64) + reshape_0 = paddle._C_ops.reshape(add_2, stack_4) + del stack_4 + + # pd_op.transpose: (3x-1x12x-1x32xf32) <- (-1x-1x3x12x32xf32) + transpose_3 = paddle._C_ops.transpose(reshape_0, [2, 0, 3, 1, 4]) + del reshape_0 + + # pd_op.slice: (-1x12x-1x32xf32) <- (3x-1x12x-1x32xf32, 1xi64, 1xi64) + slice_7 = paddle._C_ops.slice( + transpose_3, [0], full_int_array_1, full_int_array_3, [1], [0] + ) + + # pd_op.slice: (-1x12x-1x32xf32) <- (3x-1x12x-1x32xf32, 1xi64, 1xi64) + slice_8 = paddle._C_ops.slice( + transpose_3, [0], full_int_array_3, full_int_array_4, [1], [0] + ) + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_6 = [3] + + # pd_op.assign: (1xi64) <- (1xi64) + assign_28 = full_int_array_6 + + # pd_op.slice: (-1x12x-1x32xf32) <- (3x-1x12x-1x32xf32, 1xi64, 1xi64) + slice_9 = paddle._C_ops.slice( + transpose_3, [0], full_int_array_4, full_int_array_6, [1], [0] + ) + + # pd_op.transpose: (-1x12x32x-1xf32) <- (-1x12x-1x32xf32) + transpose_4 = paddle._C_ops.transpose(slice_8, [0, 1, 3, 2]) + del slice_8 + + # pd_op.matmul: (-1x12x-1x-1xf32) <- (-1x12x-1x32xf32, -1x12x32x-1xf32) + matmul_3 = paddle._C_ops.matmul(slice_7, transpose_4, False, False) + + # pd_op.full: (1xf32) <- () + full_9 = paddle._C_ops.full( + [1], float("0.176777"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.assign: (1xf32) <- (1xf32) + assign_29 = full_9 + + # pd_op.assign: (1xf32) <- (1xf32) + assign_30 = full_9 + + # pd_op.assign: (1xf32) <- (1xf32) + assign_31 = full_9 + + # pd_op.scale: (-1x12x-1x-1xf32) <- (-1x12x-1x-1xf32, 1xf32) + scale_2 = paddle._C_ops.scale(matmul_3, full_9, float("0"), True) + del matmul_3 + + # pd_op.add: (-1x12x-1x-1xf32) <- (-1x12x-1x-1xf32, 1x1x-1x-1xf32) + add_3 = paddle._C_ops.add(scale_2, unsqueeze_0) + + # pd_op.softmax: (-1x12x-1x-1xf32) <- (-1x12x-1x-1xf32) + softmax_0 = paddle._C_ops.softmax(add_3, -1) + del add_3 + + # pd_op.matmul: (-1x12x-1x32xf32) <- (-1x12x-1x-1xf32, -1x12x-1x32xf32) + matmul_4 = paddle._C_ops.matmul(softmax_0, slice_9, False, False) + + # pd_op.transpose: (-1x-1x12x32xf32) <- (-1x12x-1x32xf32) + transpose_5 = paddle._C_ops.transpose(matmul_4, [0, 2, 1, 3]) + del matmul_4 + + # pd_op.full: (xi64) <- () + full_10 = paddle._C_ops.full( + [], float("384"), paddle.int64, paddle.core.CPUPlace() + ) + + # builtin.combine: ([xi64, xi64, xi64]) <- (xi64, xi64, xi64) + combine_5 = [full_5, slice_6, full_10] + del slice_6 + + # pd_op.stack: (3xi64) <- ([xi64, xi64, xi64]) + stack_5 = paddle._C_ops.stack(combine_5, 0) + del combine_5 + + # pd_op.reshape: (-1x-1x384xf32) <- (-1x-1x12x32xf32, 3xi64) + reshape_1 = paddle._C_ops.reshape(transpose_5, stack_5) + del stack_5 + + # pd_op.matmul: (-1x-1x384xf32) <- (-1x-1x384xf32, 384x384xf32) + matmul_5 = paddle._C_ops.matmul(reshape_1, parameter_38, False, False) + del parameter_38 + + # pd_op.add: (-1x-1x384xf32) <- (-1x-1x384xf32, 384xf32) + add_4 = paddle._C_ops.add(matmul_5, parameter_37) + del parameter_37 + + # pd_op.dropout: (-1x-1x384xf32, -1x-1x384xui8) <- (-1x-1x384xf32, None, 1xf32) + dropout_2, dropout_3 = (lambda x, f: f(x))( + paddle._C_ops.dropout( + add_4, None, full_2, False, "upscale_in_train", 0, False + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None), + ) + del add_4 + + # pd_op.add: (-1x-1x384xf32) <- (-1x-1x384xf32, -1x-1x384xf32) + add_5 = paddle._C_ops.add(transpose_2, dropout_2) + + # pd_op.layer_norm: (-1x-1x384xf32, -1x-1xf32, -1x-1xf32) <- (-1x-1x384xf32, 384xf32, 384xf32) + layer_norm_0, layer_norm_1, layer_norm_2 = (lambda x, f: f(x))( + paddle._C_ops.layer_norm( + add_5, parameter_36, parameter_35, float("1e-05"), 2 + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None, None), + ) + del parameter_35, parameter_36 + + # pd_op.shape64: (3xi64) <- (-1x-1x384xf32) + shape64_3 = paddle._C_ops.shape64(layer_norm_0) + + # pd_op.slice: (xi64) <- (3xi64, 1xi64, 1xi64) + slice_10 = paddle._C_ops.slice( + shape64_3, [0], full_int_array_1, full_int_array_3, [1], [0] + ) + del shape64_3 + + # pd_op.shape64: (3xi64) <- (-1x-1x384xf32) + shape64_4 = paddle._C_ops.shape64(layer_norm_0) + + # pd_op.slice: (xi64) <- (3xi64, 1xi64, 1xi64) + slice_11 = paddle._C_ops.slice( + shape64_4, [0], full_int_array_3, full_int_array_4, [1], [0] + ) + del shape64_4 + + # pd_op.shape64: (3xi64) <- (-1x40x384xf32) + shape64_5 = paddle._C_ops.shape64(matmul_1) + + # pd_op.slice: (xi64) <- (3xi64, 1xi64, 1xi64) + slice_12 = paddle._C_ops.slice( + shape64_5, [0], full_int_array_1, full_int_array_3, [1], [0] + ) + del shape64_5 + + # pd_op.matmul: (-1x-1x384xf32) <- (-1x-1x384xf32, 384x384xf32) + matmul_6 = paddle._C_ops.matmul(layer_norm_0, parameter_34, False, False) + del parameter_34 + + # pd_op.add: (-1x-1x384xf32) <- (-1x-1x384xf32, 384xf32) + add_6 = paddle._C_ops.add(matmul_6, parameter_33) + del parameter_33 + + # builtin.combine: ([xi64, xi64, xi64, xi64]) <- (xi64, xi64, xi64, xi64) + combine_6 = [full_5, slice_11, full_7, full_8] + + # pd_op.stack: (4xi64) <- ([xi64, xi64, xi64, xi64]) + stack_6 = paddle._C_ops.stack(combine_6, 0) + del combine_6 + + # pd_op.reshape: (-1x-1x12x32xf32) <- (-1x-1x384xf32, 4xi64) + reshape_2 = paddle._C_ops.reshape(add_6, stack_6) + del stack_6 + + # pd_op.transpose: (-1x12x-1x32xf32) <- (-1x-1x12x32xf32) + transpose_6 = paddle._C_ops.transpose(reshape_2, [0, 2, 1, 3]) + del reshape_2 + + # pd_op.matmul: (-1x40x768xf32) <- (-1x40x384xf32, 384x768xf32) + matmul_7 = paddle._C_ops.matmul(matmul_1, parameter_32, False, False) + del parameter_32 + + # pd_op.add: (-1x40x768xf32) <- (-1x40x768xf32, 768xf32) + add_7 = paddle._C_ops.add(matmul_7, parameter_31) + del parameter_31 + + # pd_op.full_int_array: (5xi64) <- () + full_int_array_7 = [0, 40, 2, 12, 32] + + # pd_op.reshape: (-1x40x2x12x32xf32) <- (-1x40x768xf32, 5xi64) + reshape_3 = paddle._C_ops.reshape(add_7, full_int_array_7) + + # pd_op.transpose: (2x-1x12x40x32xf32) <- (-1x40x2x12x32xf32) + transpose_7 = paddle._C_ops.transpose(reshape_3, [2, 0, 3, 1, 4]) + del reshape_3 + + # pd_op.slice: (-1x12x40x32xf32) <- (2x-1x12x40x32xf32, 1xi64, 1xi64) + slice_13 = paddle._C_ops.slice( + transpose_7, [0], full_int_array_1, full_int_array_3, [1], [0] + ) + + # pd_op.slice: (-1x12x40x32xf32) <- (2x-1x12x40x32xf32, 1xi64, 1xi64) + slice_14 = paddle._C_ops.slice( + transpose_7, [0], full_int_array_3, full_int_array_4, [1], [0] + ) + + # pd_op.transpose: (-1x12x32x40xf32) <- (-1x12x40x32xf32) + transpose_8 = paddle._C_ops.transpose(slice_13, [0, 1, 3, 2]) + del slice_13 + + # pd_op.matmul: (-1x12x-1x40xf32) <- (-1x12x-1x32xf32, -1x12x32x40xf32) + matmul_8 = paddle._C_ops.matmul(transpose_6, transpose_8, False, False) + + # pd_op.scale: (-1x12x-1x40xf32) <- (-1x12x-1x40xf32, 1xf32) + scale_3 = paddle._C_ops.scale(matmul_8, full_9, float("0"), True) + del matmul_8 + + # pd_op.softmax: (-1x12x-1x40xf32) <- (-1x12x-1x40xf32) + softmax_1 = paddle._C_ops.softmax(scale_3, -1) + del scale_3 + + # pd_op.matmul: (-1x12x-1x32xf32) <- (-1x12x-1x40xf32, -1x12x40x32xf32) + matmul_9 = paddle._C_ops.matmul(softmax_1, slice_14, False, False) + + # pd_op.transpose: (-1x-1x12x32xf32) <- (-1x12x-1x32xf32) + transpose_9 = paddle._C_ops.transpose(matmul_9, [0, 2, 1, 3]) + del matmul_9 + + # builtin.combine: ([xi64, xi64, xi64]) <- (xi64, xi64, xi64) + combine_7 = [full_5, slice_11, full_10] + del slice_11 + + # pd_op.stack: (3xi64) <- ([xi64, xi64, xi64]) + stack_7 = paddle._C_ops.stack(combine_7, 0) + del combine_7 + + # pd_op.reshape: (-1x-1x384xf32) <- (-1x-1x12x32xf32, 3xi64) + reshape_4 = paddle._C_ops.reshape(transpose_9, stack_7) + del stack_7 + + # pd_op.matmul: (-1x-1x384xf32) <- (-1x-1x384xf32, 384x384xf32) + matmul_10 = paddle._C_ops.matmul(reshape_4, parameter_30, False, False) + del parameter_30 + + # pd_op.add: (-1x-1x384xf32) <- (-1x-1x384xf32, 384xf32) + add_8 = paddle._C_ops.add(matmul_10, parameter_29) + del parameter_29 + + # pd_op.dropout: (-1x-1x384xf32, -1x-1x384xui8) <- (-1x-1x384xf32, None, 1xf32) + dropout_4, dropout_5 = (lambda x, f: f(x))( + paddle._C_ops.dropout( + add_8, None, full_2, False, "upscale_in_train", 0, False + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None), + ) + del add_8 + + # pd_op.add: (-1x-1x384xf32) <- (-1x-1x384xf32, -1x-1x384xf32) + add_9 = paddle._C_ops.add(layer_norm_0, dropout_4) + + # pd_op.layer_norm: (-1x-1x384xf32, -1x-1xf32, -1x-1xf32) <- (-1x-1x384xf32, 384xf32, 384xf32) + layer_norm_3, layer_norm_4, layer_norm_5 = (lambda x, f: f(x))( + paddle._C_ops.layer_norm( + add_9, parameter_28, parameter_27, float("1e-05"), 2 + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None, None), + ) + del parameter_27, parameter_28 + + # pd_op.matmul: (-1x-1x1536xf32) <- (-1x-1x384xf32, 384x1536xf32) + matmul_11 = paddle._C_ops.matmul(layer_norm_3, parameter_26, False, False) + del parameter_26 + + # pd_op.add: (-1x-1x1536xf32) <- (-1x-1x1536xf32, 1536xf32) + add_10 = paddle._C_ops.add(matmul_11, parameter_25) + del parameter_25 + + # pd_op.relu: (-1x-1x1536xf32) <- (-1x-1x1536xf32) + relu_0 = paddle._C_ops.relu(add_10) + del add_10 + + # pd_op.dropout: (-1x-1x1536xf32, -1x-1x1536xui8) <- (-1x-1x1536xf32, None, 1xf32) + dropout_6, dropout_7 = (lambda x, f: f(x))( + paddle._C_ops.dropout( + relu_0, None, full_2, False, "upscale_in_train", 0, False + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None), + ) + + # pd_op.matmul: (-1x-1x384xf32) <- (-1x-1x1536xf32, 1536x384xf32) + matmul_12 = paddle._C_ops.matmul(dropout_6, parameter_24, False, False) + del parameter_24 + + # pd_op.add: (-1x-1x384xf32) <- (-1x-1x384xf32, 384xf32) + add_11 = paddle._C_ops.add(matmul_12, parameter_23) + del parameter_23 + + # pd_op.dropout: (-1x-1x384xf32, -1x-1x384xui8) <- (-1x-1x384xf32, None, 1xf32) + dropout_8, dropout_9 = (lambda x, f: f(x))( + paddle._C_ops.dropout( + add_11, None, full_2, False, "upscale_in_train", 0, False + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None), + ) + del add_11 + + # pd_op.dropout: (-1x-1x384xf32, -1x-1x384xui8) <- (-1x-1x384xf32, None, 1xf32) + dropout_10, dropout_11 = (lambda x, f: f(x))( + paddle._C_ops.dropout( + dropout_8, None, full_2, False, "upscale_in_train", 0, False + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None), + ) + del dropout_8 + + # pd_op.add: (-1x-1x384xf32) <- (-1x-1x384xf32, -1x-1x384xf32) + add_12 = paddle._C_ops.add(layer_norm_3, dropout_10) + + # pd_op.layer_norm: (-1x-1x384xf32, -1x-1xf32, -1x-1xf32) <- (-1x-1x384xf32, 384xf32, 384xf32) + layer_norm_6, layer_norm_7, layer_norm_8 = (lambda x, f: f(x))( + paddle._C_ops.layer_norm( + add_12, parameter_22, parameter_21, float("1e-05"), 2 + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None, None), + ) + del parameter_21, parameter_22 + + # pd_op.shape64: (3xi64) <- (-1x-1x384xf32) + shape64_6 = paddle._C_ops.shape64(layer_norm_6) + + # pd_op.slice: (xi64) <- (3xi64, 1xi64, 1xi64) + slice_15 = paddle._C_ops.slice( + shape64_6, [0], full_int_array_1, full_int_array_3, [1], [0] + ) + del shape64_6 + + # pd_op.shape64: (3xi64) <- (-1x-1x384xf32) + shape64_7 = paddle._C_ops.shape64(layer_norm_6) + + # pd_op.slice: (xi64) <- (3xi64, 1xi64, 1xi64) + slice_16 = paddle._C_ops.slice( + shape64_7, [0], full_int_array_3, full_int_array_4, [1], [0] + ) + del shape64_7 + + # pd_op.matmul: (-1x-1x1152xf32) <- (-1x-1x384xf32, 384x1152xf32) + matmul_13 = paddle._C_ops.matmul(layer_norm_6, parameter_20, False, False) + del parameter_20 + + # pd_op.add: (-1x-1x1152xf32) <- (-1x-1x1152xf32, 1152xf32) + add_13 = paddle._C_ops.add(matmul_13, parameter_19) + del parameter_19 + + # builtin.combine: ([xi64, xi64, xi64, xi64, xi64]) <- (xi64, xi64, xi64, xi64, xi64) + combine_8 = [full_5, slice_16, full_6, full_7, full_8] + del full_6 + + # pd_op.stack: (5xi64) <- ([xi64, xi64, xi64, xi64, xi64]) + stack_8 = paddle._C_ops.stack(combine_8, 0) + del combine_8 + + # pd_op.reshape: (-1x-1x3x12x32xf32) <- (-1x-1x1152xf32, 5xi64) + reshape_5 = paddle._C_ops.reshape(add_13, stack_8) + del stack_8 + + # pd_op.transpose: (3x-1x12x-1x32xf32) <- (-1x-1x3x12x32xf32) + transpose_10 = paddle._C_ops.transpose(reshape_5, [2, 0, 3, 1, 4]) + del reshape_5 + + # pd_op.slice: (-1x12x-1x32xf32) <- (3x-1x12x-1x32xf32, 1xi64, 1xi64) + slice_17 = paddle._C_ops.slice( + transpose_10, [0], full_int_array_1, full_int_array_3, [1], [0] + ) + + # pd_op.slice: (-1x12x-1x32xf32) <- (3x-1x12x-1x32xf32, 1xi64, 1xi64) + slice_18 = paddle._C_ops.slice( + transpose_10, [0], full_int_array_3, full_int_array_4, [1], [0] + ) + + # pd_op.slice: (-1x12x-1x32xf32) <- (3x-1x12x-1x32xf32, 1xi64, 1xi64) + slice_19 = paddle._C_ops.slice( + transpose_10, [0], full_int_array_4, full_int_array_6, [1], [0] + ) + + # pd_op.transpose: (-1x12x32x-1xf32) <- (-1x12x-1x32xf32) + transpose_11 = paddle._C_ops.transpose(slice_18, [0, 1, 3, 2]) + del slice_18 + + # pd_op.matmul: (-1x12x-1x-1xf32) <- (-1x12x-1x32xf32, -1x12x32x-1xf32) + matmul_14 = paddle._C_ops.matmul(slice_17, transpose_11, False, False) + + # pd_op.scale: (-1x12x-1x-1xf32) <- (-1x12x-1x-1xf32, 1xf32) + scale_4 = paddle._C_ops.scale(matmul_14, full_9, float("0"), True) + del matmul_14 + + # pd_op.add: (-1x12x-1x-1xf32) <- (-1x12x-1x-1xf32, 1x1x-1x-1xf32) + add_14 = paddle._C_ops.add(scale_4, unsqueeze_0) + + # pd_op.softmax: (-1x12x-1x-1xf32) <- (-1x12x-1x-1xf32) + softmax_2 = paddle._C_ops.softmax(add_14, -1) + del add_14 + + # pd_op.matmul: (-1x12x-1x32xf32) <- (-1x12x-1x-1xf32, -1x12x-1x32xf32) + matmul_15 = paddle._C_ops.matmul(softmax_2, slice_19, False, False) + + # pd_op.transpose: (-1x-1x12x32xf32) <- (-1x12x-1x32xf32) + transpose_12 = paddle._C_ops.transpose(matmul_15, [0, 2, 1, 3]) + del matmul_15 + + # builtin.combine: ([xi64, xi64, xi64]) <- (xi64, xi64, xi64) + combine_9 = [full_5, slice_16, full_10] + del slice_16 + + # pd_op.stack: (3xi64) <- ([xi64, xi64, xi64]) + stack_9 = paddle._C_ops.stack(combine_9, 0) + del combine_9 + + # pd_op.reshape: (-1x-1x384xf32) <- (-1x-1x12x32xf32, 3xi64) + reshape_6 = paddle._C_ops.reshape(transpose_12, stack_9) + del stack_9 + + # pd_op.matmul: (-1x-1x384xf32) <- (-1x-1x384xf32, 384x384xf32) + matmul_16 = paddle._C_ops.matmul(reshape_6, parameter_18, False, False) + del parameter_18 + + # pd_op.add: (-1x-1x384xf32) <- (-1x-1x384xf32, 384xf32) + add_15 = paddle._C_ops.add(matmul_16, parameter_17) + del parameter_17 + + # pd_op.dropout: (-1x-1x384xf32, -1x-1x384xui8) <- (-1x-1x384xf32, None, 1xf32) + dropout_12, dropout_13 = (lambda x, f: f(x))( + paddle._C_ops.dropout( + add_15, None, full_2, False, "upscale_in_train", 0, False + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None), + ) + del add_15 + + # pd_op.add: (-1x-1x384xf32) <- (-1x-1x384xf32, -1x-1x384xf32) + add_16 = paddle._C_ops.add(layer_norm_6, dropout_12) + + # pd_op.layer_norm: (-1x-1x384xf32, -1x-1xf32, -1x-1xf32) <- (-1x-1x384xf32, 384xf32, 384xf32) + layer_norm_9, layer_norm_10, layer_norm_11 = (lambda x, f: f(x))( + paddle._C_ops.layer_norm( + add_16, parameter_16, parameter_15, float("1e-05"), 2 + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None, None), + ) + del parameter_15, parameter_16 + + # pd_op.shape64: (3xi64) <- (-1x-1x384xf32) + shape64_8 = paddle._C_ops.shape64(layer_norm_9) + + # pd_op.slice: (xi64) <- (3xi64, 1xi64, 1xi64) + slice_20 = paddle._C_ops.slice( + shape64_8, [0], full_int_array_1, full_int_array_3, [1], [0] + ) + del shape64_8 + + # pd_op.shape64: (3xi64) <- (-1x-1x384xf32) + shape64_9 = paddle._C_ops.shape64(layer_norm_9) + + # pd_op.slice: (xi64) <- (3xi64, 1xi64, 1xi64) + slice_21 = paddle._C_ops.slice( + shape64_9, [0], full_int_array_3, full_int_array_4, [1], [0] + ) + del shape64_9 + + # pd_op.matmul: (-1x-1x384xf32) <- (-1x-1x384xf32, 384x384xf32) + matmul_17 = paddle._C_ops.matmul(layer_norm_9, parameter_14, False, False) + del parameter_14 + + # pd_op.add: (-1x-1x384xf32) <- (-1x-1x384xf32, 384xf32) + add_17 = paddle._C_ops.add(matmul_17, parameter_13) + del parameter_13 + + # builtin.combine: ([xi64, xi64, xi64, xi64]) <- (xi64, xi64, xi64, xi64) + combine_10 = [full_5, slice_21, full_7, full_8] + del full_7, full_8 + + # pd_op.stack: (4xi64) <- ([xi64, xi64, xi64, xi64]) + stack_10 = paddle._C_ops.stack(combine_10, 0) + del combine_10 + + # pd_op.reshape: (-1x-1x12x32xf32) <- (-1x-1x384xf32, 4xi64) + reshape_7 = paddle._C_ops.reshape(add_17, stack_10) + del stack_10 + + # pd_op.transpose: (-1x12x-1x32xf32) <- (-1x-1x12x32xf32) + transpose_13 = paddle._C_ops.transpose(reshape_7, [0, 2, 1, 3]) + del reshape_7 + + # pd_op.matmul: (-1x40x768xf32) <- (-1x40x384xf32, 384x768xf32) + matmul_18 = paddle._C_ops.matmul(matmul_1, parameter_12, False, False) + del parameter_12 + + # pd_op.add: (-1x40x768xf32) <- (-1x40x768xf32, 768xf32) + add_18 = paddle._C_ops.add(matmul_18, parameter_11) + del parameter_11 + + # pd_op.reshape: (-1x40x2x12x32xf32) <- (-1x40x768xf32, 5xi64) + reshape_8 = paddle._C_ops.reshape(add_18, full_int_array_7) + del full_int_array_7 + + # pd_op.transpose: (2x-1x12x40x32xf32) <- (-1x40x2x12x32xf32) + transpose_14 = paddle._C_ops.transpose(reshape_8, [2, 0, 3, 1, 4]) + del reshape_8 + + # pd_op.slice: (-1x12x40x32xf32) <- (2x-1x12x40x32xf32, 1xi64, 1xi64) + slice_22 = paddle._C_ops.slice( + transpose_14, [0], full_int_array_1, full_int_array_3, [1], [0] + ) + del full_int_array_1 + + # pd_op.slice: (-1x12x40x32xf32) <- (2x-1x12x40x32xf32, 1xi64, 1xi64) + slice_23 = paddle._C_ops.slice( + transpose_14, [0], full_int_array_3, full_int_array_4, [1], [0] + ) + del full_int_array_3, full_int_array_4 + + # pd_op.transpose: (-1x12x32x40xf32) <- (-1x12x40x32xf32) + transpose_15 = paddle._C_ops.transpose(slice_22, [0, 1, 3, 2]) + del slice_22 + + # pd_op.matmul: (-1x12x-1x40xf32) <- (-1x12x-1x32xf32, -1x12x32x40xf32) + matmul_19 = paddle._C_ops.matmul(transpose_13, transpose_15, False, False) + + # pd_op.scale: (-1x12x-1x40xf32) <- (-1x12x-1x40xf32, 1xf32) + scale_5 = paddle._C_ops.scale(matmul_19, full_9, float("0"), True) + del matmul_19 + + # pd_op.softmax: (-1x12x-1x40xf32) <- (-1x12x-1x40xf32) + softmax_3 = paddle._C_ops.softmax(scale_5, -1) + del scale_5 + + # pd_op.matmul: (-1x12x-1x32xf32) <- (-1x12x-1x40xf32, -1x12x40x32xf32) + matmul_20 = paddle._C_ops.matmul(softmax_3, slice_23, False, False) + + # pd_op.transpose: (-1x-1x12x32xf32) <- (-1x12x-1x32xf32) + transpose_16 = paddle._C_ops.transpose(matmul_20, [0, 2, 1, 3]) + del matmul_20 + + # builtin.combine: ([xi64, xi64, xi64]) <- (xi64, xi64, xi64) + combine_11 = [full_5, slice_21, full_10] + del full_10, full_5, slice_21 + + # pd_op.stack: (3xi64) <- ([xi64, xi64, xi64]) + stack_11 = paddle._C_ops.stack(combine_11, 0) + del combine_11 + + # pd_op.reshape: (-1x-1x384xf32) <- (-1x-1x12x32xf32, 3xi64) + reshape_9 = paddle._C_ops.reshape(transpose_16, stack_11) + del stack_11 + + # pd_op.matmul: (-1x-1x384xf32) <- (-1x-1x384xf32, 384x384xf32) + matmul_21 = paddle._C_ops.matmul(reshape_9, parameter_10, False, False) + del parameter_10 + + # pd_op.add: (-1x-1x384xf32) <- (-1x-1x384xf32, 384xf32) + add_19 = paddle._C_ops.add(matmul_21, parameter_9) + del parameter_9 + + # pd_op.dropout: (-1x-1x384xf32, -1x-1x384xui8) <- (-1x-1x384xf32, None, 1xf32) + dropout_14, dropout_15 = (lambda x, f: f(x))( + paddle._C_ops.dropout( + add_19, None, full_2, False, "upscale_in_train", 0, False + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None), + ) + del add_19 + + # pd_op.add: (-1x-1x384xf32) <- (-1x-1x384xf32, -1x-1x384xf32) + add_20 = paddle._C_ops.add(layer_norm_9, dropout_14) + + # pd_op.layer_norm: (-1x-1x384xf32, -1x-1xf32, -1x-1xf32) <- (-1x-1x384xf32, 384xf32, 384xf32) + layer_norm_12, layer_norm_13, layer_norm_14 = (lambda x, f: f(x))( + paddle._C_ops.layer_norm( + add_20, parameter_8, parameter_7, float("1e-05"), 2 + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None, None), + ) + del parameter_7, parameter_8 + + # pd_op.matmul: (-1x-1x1536xf32) <- (-1x-1x384xf32, 384x1536xf32) + matmul_22 = paddle._C_ops.matmul(layer_norm_12, parameter_6, False, False) + del parameter_6 + + # pd_op.add: (-1x-1x1536xf32) <- (-1x-1x1536xf32, 1536xf32) + add_21 = paddle._C_ops.add(matmul_22, parameter_5) + del parameter_5 + + # pd_op.relu: (-1x-1x1536xf32) <- (-1x-1x1536xf32) + relu_1 = paddle._C_ops.relu(add_21) + del add_21 + + # pd_op.dropout: (-1x-1x1536xf32, -1x-1x1536xui8) <- (-1x-1x1536xf32, None, 1xf32) + dropout_16, dropout_17 = (lambda x, f: f(x))( + paddle._C_ops.dropout( + relu_1, None, full_2, False, "upscale_in_train", 0, False + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None), + ) + + # pd_op.matmul: (-1x-1x384xf32) <- (-1x-1x1536xf32, 1536x384xf32) + matmul_23 = paddle._C_ops.matmul(dropout_16, parameter_4, False, False) + del parameter_4 + + # pd_op.add: (-1x-1x384xf32) <- (-1x-1x384xf32, 384xf32) + add_22 = paddle._C_ops.add(matmul_23, parameter_3) + del parameter_3 + + # pd_op.dropout: (-1x-1x384xf32, -1x-1x384xui8) <- (-1x-1x384xf32, None, 1xf32) + dropout_18, dropout_19 = (lambda x, f: f(x))( + paddle._C_ops.dropout( + add_22, None, full_2, False, "upscale_in_train", 0, False + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None), + ) + del add_22 + + # pd_op.dropout: (-1x-1x384xf32, -1x-1x384xui8) <- (-1x-1x384xf32, None, 1xf32) + dropout_20, dropout_21 = (lambda x, f: f(x))( + paddle._C_ops.dropout( + dropout_18, None, full_2, False, "upscale_in_train", 0, False + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None), + ) + del dropout_18 + + # pd_op.add: (-1x-1x384xf32) <- (-1x-1x384xf32, -1x-1x384xf32) + add_23 = paddle._C_ops.add(layer_norm_12, dropout_20) + + # pd_op.layer_norm: (-1x-1x384xf32, -1x-1xf32, -1x-1xf32) <- (-1x-1x384xf32, 384xf32, 384xf32) + layer_norm_15, layer_norm_16, layer_norm_17 = (lambda x, f: f(x))( + paddle._C_ops.layer_norm( + add_23, parameter_2, parameter_1, float("1e-05"), 2 + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None, None), + ) + del parameter_1, parameter_2 + + # pd_op.matmul: (-1x-1x6629xf32) <- (-1x-1x384xf32, 384x6629xf32) + matmul_0 = paddle._C_ops.matmul(layer_norm_15, parameter_0, False, False) + del ( + add_12, + add_13, + add_16, + add_17, + add_18, + add_2, + add_20, + add_23, + add_5, + add_6, + add_7, + add_9, + assign_0, + assign_1, + assign_10, + assign_11, + assign_12, + assign_13, + assign_14, + assign_15, + assign_16, + assign_17, + assign_18, + assign_19, + assign_2, + assign_20, + assign_21, + assign_22, + assign_23, + assign_24, + assign_25, + assign_26, + assign_27, + assign_28, + assign_29, + assign_3, + assign_30, + assign_31, + assign_4, + assign_5, + assign_6, + assign_7, + assign_8, + assign_9, + dropout_1, + dropout_10, + dropout_11, + dropout_12, + dropout_13, + dropout_14, + dropout_15, + dropout_16, + dropout_17, + dropout_19, + dropout_2, + dropout_20, + dropout_21, + dropout_3, + dropout_4, + dropout_5, + dropout_6, + dropout_7, + dropout_9, + full_1, + full_2, + full_9, + full_int_array_6, + layer_norm_0, + layer_norm_1, + layer_norm_10, + layer_norm_11, + layer_norm_12, + layer_norm_13, + layer_norm_14, + layer_norm_15, + layer_norm_16, + layer_norm_17, + layer_norm_2, + layer_norm_3, + layer_norm_4, + layer_norm_5, + layer_norm_6, + layer_norm_7, + layer_norm_8, + layer_norm_9, + matmul_1, + matmul_10, + matmul_11, + matmul_12, + matmul_13, + matmul_16, + matmul_17, + matmul_18, + matmul_2, + matmul_21, + matmul_22, + matmul_23, + matmul_5, + matmul_6, + matmul_7, + parameter_0, + relu_0, + relu_1, + reshape_1, + reshape_4, + reshape_6, + reshape_9, + scale_2, + scale_4, + slice_1, + slice_14, + slice_17, + slice_19, + slice_23, + slice_4, + slice_7, + slice_9, + softmax_0, + softmax_1, + softmax_2, + softmax_3, + transpose_0, + transpose_1, + transpose_10, + transpose_11, + transpose_12, + transpose_13, + transpose_14, + transpose_15, + transpose_16, + transpose_2, + transpose_3, + transpose_4, + transpose_5, + transpose_6, + transpose_7, + transpose_8, + transpose_9, + unsqueeze_0, + ) + + return matmul_0 diff --git a/paddle_samples/PaddleX/ch_SVTRv2_rec/subgraph_8/weight_meta.py b/paddle_samples/PaddleX/ch_SVTRv2_rec/subgraph_8/weight_meta.py new file mode 100644 index 000000000..81f08147b --- /dev/null +++ b/paddle_samples/PaddleX/ch_SVTRv2_rec/subgraph_8/weight_meta.py @@ -0,0 +1,471 @@ +class Program_weight_tensor_parameter_0: + name = "parameter_0" + shape = [384, 6629] + dtype = "float32" + min_val = float("-0.342261") + max_val = float("0.948178") + mean = float("0.00919608") + std = float("0.0588392") + data = None + + +class Program_weight_tensor_parameter_1: + name = "parameter_1" + shape = [384] + dtype = "float32" + min_val = float("-2.01452") + max_val = float("1.49928") + mean = float("0.0611141") + std = float("0.42782") + data = None + + +class Program_weight_tensor_parameter_2: + name = "parameter_2" + shape = [384] + dtype = "float32" + min_val = float("0.163422") + max_val = float("8.24113") + mean = float("6.07627") + std = float("1.63106") + data = None + + +class Program_weight_tensor_parameter_3: + name = "parameter_3" + shape = [384] + dtype = "float32" + min_val = float("-0.646689") + max_val = float("0.213369") + mean = float("-0.00456252") + std = float("0.0499786") + data = None + + +class Program_weight_tensor_parameter_4: + name = "parameter_4" + shape = [1536, 384] + dtype = "float32" + min_val = float("-1.60316") + max_val = float("1.60728") + mean = float("5.14812e-05") + std = float("0.118933") + data = None + + +class Program_weight_tensor_parameter_5: + name = "parameter_5" + shape = [1536] + dtype = "float32" + min_val = float("-2.77784") + max_val = float("0.305267") + mean = float("-0.701704") + std = float("0.245434") + data = None + + +class Program_weight_tensor_parameter_6: + name = "parameter_6" + shape = [384, 1536] + dtype = "float32" + min_val = float("-0.471037") + max_val = float("0.399332") + mean = float("-0.016715") + std = float("0.0690504") + data = None + + +class Program_weight_tensor_parameter_7: + name = "parameter_7" + shape = [384] + dtype = "float32" + min_val = float("-2.65602") + max_val = float("15.7776") + mean = float("0.185458") + std = float("0.82799") + data = None + + +class Program_weight_tensor_parameter_8: + name = "parameter_8" + shape = [384] + dtype = "float32" + min_val = float("0.0759988") + max_val = float("2.38861") + mean = float("0.5129") + std = float("0.138242") + data = None + + +class Program_weight_tensor_parameter_9: + name = "parameter_9" + shape = [384] + dtype = "float32" + min_val = float("-0.648992") + max_val = float("0.794819") + mean = float("0.00363502") + std = float("0.11223") + data = None + + +class Program_weight_tensor_parameter_10: + name = "parameter_10" + shape = [384, 384] + dtype = "float32" + min_val = float("-0.506227") + max_val = float("0.487122") + mean = float("-2.28861e-05") + std = float("0.055764") + data = None + + +class Program_weight_tensor_parameter_11: + name = "parameter_11" + shape = [768] + dtype = "float32" + min_val = float("-0.815102") + max_val = float("0.8321") + mean = float("0.00773128") + std = float("0.233797") + data = None + + +class Program_weight_tensor_parameter_12: + name = "parameter_12" + shape = [384, 768] + dtype = "float32" + min_val = float("-0.540834") + max_val = float("0.682762") + mean = float("-0.000107196") + std = float("0.0572062") + data = None + + +class Program_weight_tensor_parameter_13: + name = "parameter_13" + shape = [384] + dtype = "float32" + min_val = float("-3.03302") + max_val = float("2.79748") + mean = float("-0.00706997") + std = float("0.61245") + data = None + + +class Program_weight_tensor_parameter_14: + name = "parameter_14" + shape = [384, 384] + dtype = "float32" + min_val = float("-0.385865") + max_val = float("0.395078") + mean = float("-0.000120641") + std = float("0.0588496") + data = None + + +class Program_weight_tensor_parameter_15: + name = "parameter_15" + shape = [384] + dtype = "float32" + min_val = float("-2.49454") + max_val = float("1.55107") + mean = float("-0.0496227") + std = float("0.205971") + data = None + + +class Program_weight_tensor_parameter_16: + name = "parameter_16" + shape = [384] + dtype = "float32" + min_val = float("-0.106595") + max_val = float("3.05073") + mean = float("1.06759") + std = float("0.283875") + data = None + + +class Program_weight_tensor_parameter_17: + name = "parameter_17" + shape = [384] + dtype = "float32" + min_val = float("-7.57866") + max_val = float("7.19772") + mean = float("-0.0336126") + std = float("0.758333") + data = None + + +class Program_weight_tensor_parameter_18: + name = "parameter_18" + shape = [384, 384] + dtype = "float32" + min_val = float("-1.09284") + max_val = float("1.1878") + mean = float("-3.29478e-05") + std = float("0.0636084") + data = None + + +class Program_weight_tensor_parameter_19: + name = "parameter_19" + shape = [1152] + dtype = "float32" + min_val = float("-2.83286") + max_val = float("3.12454") + mean = float("-0.0115364") + std = float("0.377304") + data = None + + +class Program_weight_tensor_parameter_20: + name = "parameter_20" + shape = [384, 1152] + dtype = "float32" + min_val = float("-0.339692") + max_val = float("0.40515") + mean = float("-5.27188e-05") + std = float("0.0561111") + data = None + + +class Program_weight_tensor_parameter_21: + name = "parameter_21" + shape = [384] + dtype = "float32" + min_val = float("-0.485227") + max_val = float("0.484989") + mean = float("-0.0342452") + std = float("0.183996") + data = None + + +class Program_weight_tensor_parameter_22: + name = "parameter_22" + shape = [384] + dtype = "float32" + min_val = float("-0.00470934") + max_val = float("1.34072") + mean = float("1.06065") + std = float("0.184693") + data = None + + +class Program_weight_tensor_parameter_23: + name = "parameter_23" + shape = [384] + dtype = "float32" + min_val = float("-0.324262") + max_val = float("0.421889") + mean = float("0.000407339") + std = float("0.0507274") + data = None + + +class Program_weight_tensor_parameter_24: + name = "parameter_24" + shape = [1536, 384] + dtype = "float32" + min_val = float("-1.52937") + max_val = float("1.96086") + mean = float("0.000100578") + std = float("0.0423433") + data = None + + +class Program_weight_tensor_parameter_25: + name = "parameter_25" + shape = [1536] + dtype = "float32" + min_val = float("-2.05469") + max_val = float("-0.0696607") + mean = float("-0.444252") + std = float("0.32641") + data = None + + +class Program_weight_tensor_parameter_26: + name = "parameter_26" + shape = [384, 1536] + dtype = "float32" + min_val = float("-0.378399") + max_val = float("0.331921") + mean = float("0.00142004") + std = float("0.0430308") + data = None + + +class Program_weight_tensor_parameter_27: + name = "parameter_27" + shape = [384] + dtype = "float32" + min_val = float("-4.4161") + max_val = float("4.29707") + mean = float("-0.0987831") + std = float("0.652449") + data = None + + +class Program_weight_tensor_parameter_28: + name = "parameter_28" + shape = [384] + dtype = "float32" + min_val = float("0.596234") + max_val = float("3.95271") + mean = float("0.892283") + std = float("0.286497") + data = None + + +class Program_weight_tensor_parameter_29: + name = "parameter_29" + shape = [384] + dtype = "float32" + min_val = float("-0.398408") + max_val = float("0.199058") + mean = float("0.00136061") + std = float("0.0421304") + data = None + + +class Program_weight_tensor_parameter_30: + name = "parameter_30" + shape = [384, 384] + dtype = "float32" + min_val = float("-0.70338") + max_val = float("0.63271") + mean = float("3.93469e-05") + std = float("0.0483079") + data = None + + +class Program_weight_tensor_parameter_31: + name = "parameter_31" + shape = [768] + dtype = "float32" + min_val = float("-0.907861") + max_val = float("1.21747") + mean = float("-0.00558146") + std = float("0.216238") + data = None + + +class Program_weight_tensor_parameter_32: + name = "parameter_32" + shape = [384, 768] + dtype = "float32" + min_val = float("-0.337297") + max_val = float("0.306511") + mean = float("6.81156e-06") + std = float("0.0484196") + data = None + + +class Program_weight_tensor_parameter_33: + name = "parameter_33" + shape = [384] + dtype = "float32" + min_val = float("-3.12403") + max_val = float("2.62535") + mean = float("0.0243058") + std = float("0.584006") + data = None + + +class Program_weight_tensor_parameter_34: + name = "parameter_34" + shape = [384, 384] + dtype = "float32" + min_val = float("-0.222749") + max_val = float("0.275213") + mean = float("-5.63188e-05") + std = float("0.047597") + data = None + + +class Program_weight_tensor_parameter_35: + name = "parameter_35" + shape = [384] + dtype = "float32" + min_val = float("-1.38136") + max_val = float("2.5909") + mean = float("0.0208505") + std = float("0.302698") + data = None + + +class Program_weight_tensor_parameter_36: + name = "parameter_36" + shape = [384] + dtype = "float32" + min_val = float("0.114127") + max_val = float("3.84426") + mean = float("1.15762") + std = float("0.36469") + data = None + + +class Program_weight_tensor_parameter_37: + name = "parameter_37" + shape = [384] + dtype = "float32" + min_val = float("-0.927027") + max_val = float("2.06527") + mean = float("0.00738836") + std = float("0.161") + data = None + + +class Program_weight_tensor_parameter_38: + name = "parameter_38" + shape = [384, 384] + dtype = "float32" + min_val = float("-0.301063") + max_val = float("0.380668") + mean = float("2.4739e-05") + std = float("0.0463501") + data = None + + +class Program_weight_tensor_parameter_39: + name = "parameter_39" + shape = [1152] + dtype = "float32" + min_val = float("-3.37262") + max_val = float("3.31708") + mean = float("-0.023914") + std = float("0.798284") + data = None + + +class Program_weight_tensor_parameter_40: + name = "parameter_40" + shape = [384, 1152] + dtype = "float32" + min_val = float("-0.742585") + max_val = float("0.617597") + mean = float("0.000251766") + std = float("0.044023") + data = None + + +class Program_weight_tensor_parameter_41: + name = "parameter_41" + shape = [6629, 384] + dtype = "float32" + min_val = float("-0.386902") + max_val = float("0.30948") + mean = float("-0.00467325") + std = float("0.0369254") + data = None + + +class Program_weight_tensor_parameter_42: + name = "parameter_42" + shape = [384, 384] + dtype = "float32" + min_val = float("-0.465531") + max_val = float("0.690117") + mean = float("-5.55391e-05") + std = float("0.0506901") + data = None diff --git a/paddle_samples/PaddleX/ch_SVTRv2_rec/subgraph_9/graph_hash.txt b/paddle_samples/PaddleX/ch_SVTRv2_rec/subgraph_9/graph_hash.txt new file mode 100644 index 000000000..b3652acc6 --- /dev/null +++ b/paddle_samples/PaddleX/ch_SVTRv2_rec/subgraph_9/graph_hash.txt @@ -0,0 +1 @@ +6bc364131227030b344e9dadbd92fc5fcbb6cbd73c96e610d45b18a2aa3a20bf \ No newline at end of file diff --git a/paddle_samples/PaddleX/ch_SVTRv2_rec/subgraph_9/graph_net.json b/paddle_samples/PaddleX/ch_SVTRv2_rec/subgraph_9/graph_net.json new file mode 100644 index 000000000..4e1cc3c0d --- /dev/null +++ b/paddle_samples/PaddleX/ch_SVTRv2_rec/subgraph_9/graph_net.json @@ -0,0 +1,6 @@ +{ + "framework": "paddle", + "model_name": "ch_SVTRv2_rec", + "num_devices_required": 1, + "num_nodes_required": 1 +} \ No newline at end of file diff --git a/paddle_samples/PaddleX/ch_SVTRv2_rec/subgraph_9/input_meta.py b/paddle_samples/PaddleX/ch_SVTRv2_rec/subgraph_9/input_meta.py new file mode 100644 index 000000000..7b188eabc --- /dev/null +++ b/paddle_samples/PaddleX/ch_SVTRv2_rec/subgraph_9/input_meta.py @@ -0,0 +1,65 @@ +class Program_weight_tensor_data_0: + name = "data_0" + shape = [] + dtype = "int64" + data = [256] + + +class Program_weight_tensor_data_1: + name = "data_1" + shape = [] + dtype = "int64" + data = [6] + + +class Program_weight_tensor_data_2: + name = "data_2" + shape = [] + dtype = "int64" + data = [8] + + +class Program_weight_tensor_data_3: + name = "data_3" + shape = [] + dtype = "int64" + data = [256] + + +class Program_weight_tensor_data_4: + name = "data_4" + shape = [] + dtype = "int64" + data = [8] + + +class Program_weight_tensor_data_5: + name = "data_5" + shape = [] + dtype = "int64" + data = [256] + + +class Program_weight_tensor_data_6: + name = "data_6" + shape = [] + dtype = "int64" + data = [8] + + +class Program_weight_tensor_data_7: + name = "data_7" + shape = [] + dtype = "int64" + data = [256] + + +class Program_weight_tensor_data_8: + name = "data_8" + shape = [4, 256, 6, 80] + dtype = "float32" + min_val = float("-7.6361") + max_val = float("6.25435") + mean = float("0.00417176") + std = float("0.487843") + data = None diff --git a/paddle_samples/PaddleX/ch_SVTRv2_rec/subgraph_9/model.py b/paddle_samples/PaddleX/ch_SVTRv2_rec/subgraph_9/model.py new file mode 100644 index 000000000..26f0b0715 --- /dev/null +++ b/paddle_samples/PaddleX/ch_SVTRv2_rec/subgraph_9/model.py @@ -0,0 +1,907 @@ +import paddle + + +class GraphModule(paddle.nn.Layer): + def __init__(self): + super().__init__() + + def forward( + self, + parameter_0, + parameter_1, + parameter_2, + parameter_3, + parameter_4, + parameter_5, + parameter_6, + parameter_7, + parameter_8, + parameter_9, + parameter_10, + parameter_11, + parameter_12, + parameter_13, + parameter_14, + parameter_15, + parameter_16, + parameter_17, + parameter_18, + parameter_19, + parameter_20, + parameter_21, + parameter_22, + parameter_23, + parameter_24, + parameter_25, + parameter_26, + parameter_27, + parameter_28, + parameter_29, + parameter_30, + parameter_31, + parameter_32, + parameter_33, + parameter_34, + parameter_35, + parameter_36, + parameter_37, + parameter_38, + parameter_39, + parameter_40, + parameter_41, + parameter_42, + parameter_43, + parameter_44, + parameter_45, + parameter_46, + parameter_47, + parameter_48, + parameter_49, + parameter_50, + parameter_51, + parameter_52, + parameter_53, + parameter_54, + parameter_55, + parameter_56, + parameter_57, + parameter_58, + parameter_59, + parameter_60, + parameter_61, + parameter_62, + parameter_63, + parameter_64, + parameter_65, + parameter_66, + parameter_67, + parameter_68, + parameter_69, + parameter_70, + parameter_71, + data_0, + data_1, + data_2, + data_3, + data_4, + data_5, + data_6, + data_7, + data_8, + ): + # pd_op.conv2d: (-1x256x-1x80xf32) <- (-1x-1x-1x80xf32, 256x32x5x5xf32) + conv2d_0 = paddle._C_ops.conv2d( + data_8, parameter_71, [1, 1], [2, 2], "EXPLICIT", [1, 1], 8, "NCHW" + ) + del parameter_71 + + # pd_op.full_int_array: (4xi64) <- () + full_int_array_0 = [1, -1, 1, 1] + + # pd_op.reshape: (1x256x1x1xf32) <- (256xf32, 4xi64) + reshape_0 = paddle._C_ops.reshape(parameter_70, full_int_array_0) + del parameter_70 + + # pd_op.add: (-1x256x-1x80xf32) <- (-1x256x-1x80xf32, 1x256x1x1xf32) + add_0 = paddle._C_ops.add(conv2d_0, reshape_0) + del conv2d_0, reshape_0 + + # pd_op.add: (-1x256x-1x80xf32) <- (-1x-1x-1x80xf32, -1x256x-1x80xf32) + add_1 = paddle._C_ops.add(data_8, add_0) + del add_0, data_8 + + # pd_op.flatten: (-1x256x-1xf32) <- (-1x256x-1x80xf32) + flatten_0 = paddle._C_ops.flatten(add_1, 2, 3) + del add_1 + + # pd_op.transpose: (-1x-1x256xf32) <- (-1x256x-1xf32) + transpose_0 = paddle._C_ops.transpose(flatten_0, [0, 2, 1]) + del flatten_0 + + # pd_op.layer_norm: (-1x-1x256xf32, -1x-1xf32, -1x-1xf32) <- (-1x-1x256xf32, 256xf32, 256xf32) + layer_norm_1, layer_norm_2, layer_norm_3 = (lambda x, f: f(x))( + paddle._C_ops.layer_norm( + transpose_0, parameter_69, parameter_68, float("1e-06"), 2 + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None, None), + ) + del parameter_68, parameter_69, transpose_0 + + # pd_op.matmul: (-1x-1x1024xf32) <- (-1x-1x256xf32, 256x1024xf32) + matmul_0 = paddle._C_ops.matmul(layer_norm_1, parameter_67, False, False) + del parameter_67 + + # pd_op.add: (-1x-1x1024xf32) <- (-1x-1x1024xf32, 1024xf32) + add_2 = paddle._C_ops.add(matmul_0, parameter_66) + del matmul_0, parameter_66 + + # pd_op.gelu: (-1x-1x1024xf32) <- (-1x-1x1024xf32) + gelu_0 = paddle._C_ops.gelu(add_2, False) + del add_2 + + # pd_op.matmul: (-1x-1x256xf32) <- (-1x-1x1024xf32, 1024x256xf32) + matmul_1 = paddle._C_ops.matmul(gelu_0, parameter_65, False, False) + del gelu_0, parameter_65 + + # pd_op.add: (-1x-1x256xf32) <- (-1x-1x256xf32, 256xf32) + add_3 = paddle._C_ops.add(matmul_1, parameter_64) + del matmul_1, parameter_64 + + # pd_op.add: (-1x-1x256xf32) <- (-1x-1x256xf32, -1x-1x256xf32) + add_4 = paddle._C_ops.add(layer_norm_1, add_3) + del add_3, layer_norm_1 + + # pd_op.layer_norm: (-1x-1x256xf32, -1x-1xf32, -1x-1xf32) <- (-1x-1x256xf32, 256xf32, 256xf32) + layer_norm_4, layer_norm_5, layer_norm_6 = (lambda x, f: f(x))( + paddle._C_ops.layer_norm( + add_4, parameter_63, parameter_62, float("1e-06"), 2 + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None, None), + ) + del add_4, parameter_62, parameter_63 + + # pd_op.transpose: (-1x256x-1xf32) <- (-1x-1x256xf32) + transpose_1 = paddle._C_ops.transpose(layer_norm_4, [0, 2, 1]) + del layer_norm_4 + + # pd_op.full: (xi64) <- () + full_0 = paddle._C_ops.full( + [], float("0"), paddle.int64, paddle.core.CPUPlace() + ) + + # pd_op.full: (xi64) <- () + full_1 = paddle._C_ops.full( + [], float("80"), paddle.int64, paddle.core.CPUPlace() + ) + + # builtin.combine: ([xi64, xi64, xi64, xi64]) <- (xi64, xi64, xi64, xi64) + combine_0 = [full_0, data_0, data_1, full_1] + del data_0, data_1 + + # pd_op.stack: (4xi64) <- ([xi64, xi64, xi64, xi64]) + stack_0 = paddle._C_ops.stack(combine_0, 0) + del combine_0 + + # pd_op.reshape: (-1x-1x-1x80xf32) <- (-1x256x-1xf32, 4xi64) + reshape_1 = paddle._C_ops.reshape(transpose_1, stack_0) + del stack_0, transpose_1 + + # pd_op.shape64: (4xi64) <- (-1x-1x-1x80xf32) + shape64_0 = paddle._C_ops.shape64(reshape_1) + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_1 = [0] + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_2 = [1] + + # pd_op.slice: (xi64) <- (4xi64, 1xi64, 1xi64) + slice_0 = paddle._C_ops.slice( + shape64_0, [0], full_int_array_1, full_int_array_2, [1], [0] + ) + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_3 = [2] + + # pd_op.slice: (xi64) <- (4xi64, 1xi64, 1xi64) + slice_1 = paddle._C_ops.slice( + shape64_0, [0], full_int_array_2, full_int_array_3, [1], [0] + ) + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_4 = [3] + + # pd_op.slice: (xi64) <- (4xi64, 1xi64, 1xi64) + slice_2 = paddle._C_ops.slice( + shape64_0, [0], full_int_array_3, full_int_array_4, [1], [0] + ) + del shape64_0 + + # pd_op.conv2d: (-1x256x-1x80xf32) <- (-1x-1x-1x80xf32, 256x32x5x5xf32) + conv2d_1 = paddle._C_ops.conv2d( + reshape_1, parameter_61, [1, 1], [2, 2], "EXPLICIT", [1, 1], 8, "NCHW" + ) + del parameter_61 + + # pd_op.reshape: (1x256x1x1xf32) <- (256xf32, 4xi64) + reshape_2 = paddle._C_ops.reshape(parameter_60, full_int_array_0) + del parameter_60 + + # pd_op.add: (-1x256x-1x80xf32) <- (-1x256x-1x80xf32, 1x256x1x1xf32) + add_5 = paddle._C_ops.add(conv2d_1, reshape_2) + del conv2d_1, reshape_2 + + # pd_op.add: (-1x256x-1x80xf32) <- (-1x-1x-1x80xf32, -1x256x-1x80xf32) + add_6 = paddle._C_ops.add(reshape_1, add_5) + del add_5, reshape_1 + + # pd_op.flatten: (-1x256x-1xf32) <- (-1x256x-1x80xf32) + flatten_1 = paddle._C_ops.flatten(add_6, 2, 3) + del add_6 + + # pd_op.transpose: (-1x-1x256xf32) <- (-1x256x-1xf32) + transpose_2 = paddle._C_ops.transpose(flatten_1, [0, 2, 1]) + del flatten_1 + + # pd_op.layer_norm: (-1x-1x256xf32, -1x-1xf32, -1x-1xf32) <- (-1x-1x256xf32, 256xf32, 256xf32) + layer_norm_7, layer_norm_8, layer_norm_9 = (lambda x, f: f(x))( + paddle._C_ops.layer_norm( + transpose_2, parameter_59, parameter_58, float("1e-06"), 2 + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None, None), + ) + del parameter_58, parameter_59, transpose_2 + + # pd_op.matmul: (-1x-1x1024xf32) <- (-1x-1x256xf32, 256x1024xf32) + matmul_2 = paddle._C_ops.matmul(layer_norm_7, parameter_57, False, False) + del parameter_57 + + # pd_op.add: (-1x-1x1024xf32) <- (-1x-1x1024xf32, 1024xf32) + add_7 = paddle._C_ops.add(matmul_2, parameter_56) + del matmul_2, parameter_56 + + # pd_op.gelu: (-1x-1x1024xf32) <- (-1x-1x1024xf32) + gelu_1 = paddle._C_ops.gelu(add_7, False) + del add_7 + + # pd_op.matmul: (-1x-1x256xf32) <- (-1x-1x1024xf32, 1024x256xf32) + matmul_3 = paddle._C_ops.matmul(gelu_1, parameter_55, False, False) + del gelu_1, parameter_55 + + # pd_op.add: (-1x-1x256xf32) <- (-1x-1x256xf32, 256xf32) + add_8 = paddle._C_ops.add(matmul_3, parameter_54) + del matmul_3, parameter_54 + + # pd_op.add: (-1x-1x256xf32) <- (-1x-1x256xf32, -1x-1x256xf32) + add_9 = paddle._C_ops.add(layer_norm_7, add_8) + del add_8, layer_norm_7 + + # pd_op.layer_norm: (-1x-1x256xf32, -1x-1xf32, -1x-1xf32) <- (-1x-1x256xf32, 256xf32, 256xf32) + layer_norm_10, layer_norm_11, layer_norm_12 = (lambda x, f: f(x))( + paddle._C_ops.layer_norm( + add_9, parameter_53, parameter_52, float("1e-06"), 2 + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None, None), + ) + del add_9, parameter_52, parameter_53 + + # pd_op.transpose: (-1x256x-1xf32) <- (-1x-1x256xf32) + transpose_3 = paddle._C_ops.transpose(layer_norm_10, [0, 2, 1]) + del layer_norm_10 + + # builtin.combine: ([xi64, xi64, xi64, xi64]) <- (xi64, xi64, xi64, xi64) + combine_1 = [full_0, slice_1, slice_2, full_1] + del full_1, slice_1, slice_2 + + # pd_op.stack: (4xi64) <- ([xi64, xi64, xi64, xi64]) + stack_1 = paddle._C_ops.stack(combine_1, 0) + del combine_1 + + # pd_op.reshape: (-1x-1x-1x80xf32) <- (-1x256x-1xf32, 4xi64) + reshape_3 = paddle._C_ops.reshape(transpose_3, stack_1) + del stack_1, transpose_3 + + # pd_op.flatten: (-1x-1x-1xf32) <- (-1x-1x-1x80xf32) + flatten_2 = paddle._C_ops.flatten(reshape_3, 2, 3) + del reshape_3 + + # pd_op.transpose: (-1x-1x-1xf32) <- (-1x-1x-1xf32) + transpose_4 = paddle._C_ops.transpose(flatten_2, [0, 2, 1]) + del flatten_2 + + # pd_op.matmul: (-1x-1x768xf32) <- (-1x-1x-1xf32, 256x768xf32) + matmul_4 = paddle._C_ops.matmul(transpose_4, parameter_51, False, False) + del parameter_51 + + # pd_op.add: (-1x-1x768xf32) <- (-1x-1x768xf32, 768xf32) + add_10 = paddle._C_ops.add(matmul_4, parameter_50) + del matmul_4, parameter_50 + + # pd_op.full: (xi64) <- () + full_2 = paddle._C_ops.full( + [], float("-1"), paddle.int64, paddle.core.CPUPlace() + ) + + # pd_op.full: (xi64) <- () + full_3 = paddle._C_ops.full( + [], float("3"), paddle.int64, paddle.core.CPUPlace() + ) + + # pd_op.full: (xi64) <- () + full_4 = paddle._C_ops.full( + [], float("32"), paddle.int64, paddle.core.CPUPlace() + ) + + # builtin.combine: ([xi64, xi64, xi64, xi64, xi64]) <- (xi64, xi64, xi64, xi64, xi64) + combine_2 = [full_0, full_2, full_3, data_2, full_4] + del data_2 + + # pd_op.stack: (5xi64) <- ([xi64, xi64, xi64, xi64, xi64]) + stack_2 = paddle._C_ops.stack(combine_2, 0) + del combine_2 + + # pd_op.reshape: (-1x-1x3x-1x32xf32) <- (-1x-1x768xf32, 5xi64) + reshape_4 = paddle._C_ops.reshape(add_10, stack_2) + del add_10, stack_2 + + # pd_op.transpose: (3x-1x-1x-1x32xf32) <- (-1x-1x3x-1x32xf32) + transpose_5 = paddle._C_ops.transpose(reshape_4, [2, 0, 3, 1, 4]) + del reshape_4 + + # pd_op.slice: (-1x-1x-1x32xf32) <- (3x-1x-1x-1x32xf32, 1xi64, 1xi64) + slice_3 = paddle._C_ops.slice( + transpose_5, [0], full_int_array_1, full_int_array_2, [1], [0] + ) + + # pd_op.slice: (-1x-1x-1x32xf32) <- (3x-1x-1x-1x32xf32, 1xi64, 1xi64) + slice_4 = paddle._C_ops.slice( + transpose_5, [0], full_int_array_2, full_int_array_3, [1], [0] + ) + + # pd_op.slice: (-1x-1x-1x32xf32) <- (3x-1x-1x-1x32xf32, 1xi64, 1xi64) + slice_5 = paddle._C_ops.slice( + transpose_5, [0], full_int_array_3, full_int_array_4, [1], [0] + ) + del transpose_5 + + # pd_op.transpose: (-1x-1x32x-1xf32) <- (-1x-1x-1x32xf32) + transpose_6 = paddle._C_ops.transpose(slice_4, [0, 1, 3, 2]) + del slice_4 + + # pd_op.matmul: (-1x-1x-1x-1xf32) <- (-1x-1x-1x32xf32, -1x-1x32x-1xf32) + matmul_5 = paddle._C_ops.matmul(slice_3, transpose_6, False, False) + del slice_3, transpose_6 + + # pd_op.full: (1xf32) <- () + full_5 = paddle._C_ops.full( + [1], float("0.176777"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.scale: (-1x-1x-1x-1xf32) <- (-1x-1x-1x-1xf32, 1xf32) + scale_0 = paddle._C_ops.scale(matmul_5, full_5, float("0"), True) + del matmul_5 + + # pd_op.softmax: (-1x-1x-1x-1xf32) <- (-1x-1x-1x-1xf32) + softmax_0 = paddle._C_ops.softmax(scale_0, -1) + del scale_0 + + # pd_op.matmul: (-1x-1x-1x32xf32) <- (-1x-1x-1x-1xf32, -1x-1x-1x32xf32) + matmul_6 = paddle._C_ops.matmul(softmax_0, slice_5, False, False) + del slice_5, softmax_0 + + # pd_op.transpose: (-1x-1x-1x32xf32) <- (-1x-1x-1x32xf32) + transpose_7 = paddle._C_ops.transpose(matmul_6, [0, 2, 1, 3]) + del matmul_6 + + # builtin.combine: ([xi64, xi64, xi64]) <- (xi64, xi64, xi64) + combine_3 = [full_0, full_2, data_3] + del data_3 + + # pd_op.stack: (3xi64) <- ([xi64, xi64, xi64]) + stack_3 = paddle._C_ops.stack(combine_3, 0) + del combine_3 + + # pd_op.reshape: (-1x-1x-1xf32) <- (-1x-1x-1x32xf32, 3xi64) + reshape_5 = paddle._C_ops.reshape(transpose_7, stack_3) + del stack_3, transpose_7 + + # pd_op.matmul: (-1x-1x256xf32) <- (-1x-1x-1xf32, 256x256xf32) + matmul_7 = paddle._C_ops.matmul(reshape_5, parameter_49, False, False) + del parameter_49, reshape_5 + + # pd_op.add: (-1x-1x256xf32) <- (-1x-1x256xf32, 256xf32) + add_11 = paddle._C_ops.add(matmul_7, parameter_48) + del matmul_7, parameter_48 + + # pd_op.add: (-1x-1x256xf32) <- (-1x-1x-1xf32, -1x-1x256xf32) + add_12 = paddle._C_ops.add(transpose_4, add_11) + del add_11, transpose_4 + + # pd_op.layer_norm: (-1x-1x256xf32, -1x-1xf32, -1x-1xf32) <- (-1x-1x256xf32, 256xf32, 256xf32) + layer_norm_13, layer_norm_14, layer_norm_15 = (lambda x, f: f(x))( + paddle._C_ops.layer_norm( + add_12, parameter_47, parameter_46, float("1e-06"), 2 + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None, None), + ) + del add_12, parameter_46, parameter_47 + + # pd_op.matmul: (-1x-1x1024xf32) <- (-1x-1x256xf32, 256x1024xf32) + matmul_8 = paddle._C_ops.matmul(layer_norm_13, parameter_45, False, False) + del parameter_45 + + # pd_op.add: (-1x-1x1024xf32) <- (-1x-1x1024xf32, 1024xf32) + add_13 = paddle._C_ops.add(matmul_8, parameter_44) + del matmul_8, parameter_44 + + # pd_op.gelu: (-1x-1x1024xf32) <- (-1x-1x1024xf32) + gelu_2 = paddle._C_ops.gelu(add_13, False) + del add_13 + + # pd_op.matmul: (-1x-1x256xf32) <- (-1x-1x1024xf32, 1024x256xf32) + matmul_9 = paddle._C_ops.matmul(gelu_2, parameter_43, False, False) + del gelu_2, parameter_43 + + # pd_op.add: (-1x-1x256xf32) <- (-1x-1x256xf32, 256xf32) + add_14 = paddle._C_ops.add(matmul_9, parameter_42) + del matmul_9, parameter_42 + + # pd_op.add: (-1x-1x256xf32) <- (-1x-1x256xf32, -1x-1x256xf32) + add_15 = paddle._C_ops.add(layer_norm_13, add_14) + del add_14, layer_norm_13 + + # pd_op.layer_norm: (-1x-1x256xf32, -1x-1xf32, -1x-1xf32) <- (-1x-1x256xf32, 256xf32, 256xf32) + layer_norm_16, layer_norm_17, layer_norm_18 = (lambda x, f: f(x))( + paddle._C_ops.layer_norm( + add_15, parameter_41, parameter_40, float("1e-06"), 2 + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None, None), + ) + del add_15, parameter_40, parameter_41 + + # pd_op.matmul: (-1x-1x768xf32) <- (-1x-1x256xf32, 256x768xf32) + matmul_10 = paddle._C_ops.matmul(layer_norm_16, parameter_39, False, False) + del parameter_39 + + # pd_op.add: (-1x-1x768xf32) <- (-1x-1x768xf32, 768xf32) + add_16 = paddle._C_ops.add(matmul_10, parameter_38) + del matmul_10, parameter_38 + + # builtin.combine: ([xi64, xi64, xi64, xi64, xi64]) <- (xi64, xi64, xi64, xi64, xi64) + combine_4 = [full_0, full_2, full_3, data_4, full_4] + del data_4 + + # pd_op.stack: (5xi64) <- ([xi64, xi64, xi64, xi64, xi64]) + stack_4 = paddle._C_ops.stack(combine_4, 0) + del combine_4 + + # pd_op.reshape: (-1x-1x3x-1x32xf32) <- (-1x-1x768xf32, 5xi64) + reshape_6 = paddle._C_ops.reshape(add_16, stack_4) + del add_16, stack_4 + + # pd_op.transpose: (3x-1x-1x-1x32xf32) <- (-1x-1x3x-1x32xf32) + transpose_8 = paddle._C_ops.transpose(reshape_6, [2, 0, 3, 1, 4]) + del reshape_6 + + # pd_op.slice: (-1x-1x-1x32xf32) <- (3x-1x-1x-1x32xf32, 1xi64, 1xi64) + slice_6 = paddle._C_ops.slice( + transpose_8, [0], full_int_array_1, full_int_array_2, [1], [0] + ) + + # pd_op.slice: (-1x-1x-1x32xf32) <- (3x-1x-1x-1x32xf32, 1xi64, 1xi64) + slice_7 = paddle._C_ops.slice( + transpose_8, [0], full_int_array_2, full_int_array_3, [1], [0] + ) + + # pd_op.slice: (-1x-1x-1x32xf32) <- (3x-1x-1x-1x32xf32, 1xi64, 1xi64) + slice_8 = paddle._C_ops.slice( + transpose_8, [0], full_int_array_3, full_int_array_4, [1], [0] + ) + del transpose_8 + + # pd_op.transpose: (-1x-1x32x-1xf32) <- (-1x-1x-1x32xf32) + transpose_9 = paddle._C_ops.transpose(slice_7, [0, 1, 3, 2]) + del slice_7 + + # pd_op.matmul: (-1x-1x-1x-1xf32) <- (-1x-1x-1x32xf32, -1x-1x32x-1xf32) + matmul_11 = paddle._C_ops.matmul(slice_6, transpose_9, False, False) + del slice_6, transpose_9 + + # pd_op.scale: (-1x-1x-1x-1xf32) <- (-1x-1x-1x-1xf32, 1xf32) + scale_1 = paddle._C_ops.scale(matmul_11, full_5, float("0"), True) + del matmul_11 + + # pd_op.softmax: (-1x-1x-1x-1xf32) <- (-1x-1x-1x-1xf32) + softmax_1 = paddle._C_ops.softmax(scale_1, -1) + del scale_1 + + # pd_op.matmul: (-1x-1x-1x32xf32) <- (-1x-1x-1x-1xf32, -1x-1x-1x32xf32) + matmul_12 = paddle._C_ops.matmul(softmax_1, slice_8, False, False) + del slice_8, softmax_1 + + # pd_op.transpose: (-1x-1x-1x32xf32) <- (-1x-1x-1x32xf32) + transpose_10 = paddle._C_ops.transpose(matmul_12, [0, 2, 1, 3]) + del matmul_12 + + # builtin.combine: ([xi64, xi64, xi64]) <- (xi64, xi64, xi64) + combine_5 = [full_0, full_2, data_5] + del data_5 + + # pd_op.stack: (3xi64) <- ([xi64, xi64, xi64]) + stack_5 = paddle._C_ops.stack(combine_5, 0) + del combine_5 + + # pd_op.reshape: (-1x-1x-1xf32) <- (-1x-1x-1x32xf32, 3xi64) + reshape_7 = paddle._C_ops.reshape(transpose_10, stack_5) + del stack_5, transpose_10 + + # pd_op.matmul: (-1x-1x256xf32) <- (-1x-1x-1xf32, 256x256xf32) + matmul_13 = paddle._C_ops.matmul(reshape_7, parameter_37, False, False) + del parameter_37, reshape_7 + + # pd_op.add: (-1x-1x256xf32) <- (-1x-1x256xf32, 256xf32) + add_17 = paddle._C_ops.add(matmul_13, parameter_36) + del matmul_13, parameter_36 + + # pd_op.add: (-1x-1x256xf32) <- (-1x-1x256xf32, -1x-1x256xf32) + add_18 = paddle._C_ops.add(layer_norm_16, add_17) + del add_17, layer_norm_16 + + # pd_op.layer_norm: (-1x-1x256xf32, -1x-1xf32, -1x-1xf32) <- (-1x-1x256xf32, 256xf32, 256xf32) + layer_norm_19, layer_norm_20, layer_norm_21 = (lambda x, f: f(x))( + paddle._C_ops.layer_norm( + add_18, parameter_35, parameter_34, float("1e-06"), 2 + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None, None), + ) + del add_18, parameter_34, parameter_35 + + # pd_op.matmul: (-1x-1x1024xf32) <- (-1x-1x256xf32, 256x1024xf32) + matmul_14 = paddle._C_ops.matmul(layer_norm_19, parameter_33, False, False) + del parameter_33 + + # pd_op.add: (-1x-1x1024xf32) <- (-1x-1x1024xf32, 1024xf32) + add_19 = paddle._C_ops.add(matmul_14, parameter_32) + del matmul_14, parameter_32 + + # pd_op.gelu: (-1x-1x1024xf32) <- (-1x-1x1024xf32) + gelu_3 = paddle._C_ops.gelu(add_19, False) + del add_19 + + # pd_op.matmul: (-1x-1x256xf32) <- (-1x-1x1024xf32, 1024x256xf32) + matmul_15 = paddle._C_ops.matmul(gelu_3, parameter_31, False, False) + del gelu_3, parameter_31 + + # pd_op.add: (-1x-1x256xf32) <- (-1x-1x256xf32, 256xf32) + add_20 = paddle._C_ops.add(matmul_15, parameter_30) + del matmul_15, parameter_30 + + # pd_op.add: (-1x-1x256xf32) <- (-1x-1x256xf32, -1x-1x256xf32) + add_21 = paddle._C_ops.add(layer_norm_19, add_20) + del add_20, layer_norm_19 + + # pd_op.layer_norm: (-1x-1x256xf32, -1x-1xf32, -1x-1xf32) <- (-1x-1x256xf32, 256xf32, 256xf32) + layer_norm_22, layer_norm_23, layer_norm_24 = (lambda x, f: f(x))( + paddle._C_ops.layer_norm( + add_21, parameter_29, parameter_28, float("1e-06"), 2 + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None, None), + ) + del add_21, parameter_28, parameter_29 + + # pd_op.matmul: (-1x-1x768xf32) <- (-1x-1x256xf32, 256x768xf32) + matmul_16 = paddle._C_ops.matmul(layer_norm_22, parameter_27, False, False) + del parameter_27 + + # pd_op.add: (-1x-1x768xf32) <- (-1x-1x768xf32, 768xf32) + add_22 = paddle._C_ops.add(matmul_16, parameter_26) + del matmul_16, parameter_26 + + # builtin.combine: ([xi64, xi64, xi64, xi64, xi64]) <- (xi64, xi64, xi64, xi64, xi64) + combine_6 = [full_0, full_2, full_3, data_6, full_4] + del data_6, full_3, full_4 + + # pd_op.stack: (5xi64) <- ([xi64, xi64, xi64, xi64, xi64]) + stack_6 = paddle._C_ops.stack(combine_6, 0) + del combine_6 + + # pd_op.reshape: (-1x-1x3x-1x32xf32) <- (-1x-1x768xf32, 5xi64) + reshape_8 = paddle._C_ops.reshape(add_22, stack_6) + del add_22, stack_6 + + # pd_op.transpose: (3x-1x-1x-1x32xf32) <- (-1x-1x3x-1x32xf32) + transpose_11 = paddle._C_ops.transpose(reshape_8, [2, 0, 3, 1, 4]) + del reshape_8 + + # pd_op.slice: (-1x-1x-1x32xf32) <- (3x-1x-1x-1x32xf32, 1xi64, 1xi64) + slice_9 = paddle._C_ops.slice( + transpose_11, [0], full_int_array_1, full_int_array_2, [1], [0] + ) + + # pd_op.slice: (-1x-1x-1x32xf32) <- (3x-1x-1x-1x32xf32, 1xi64, 1xi64) + slice_10 = paddle._C_ops.slice( + transpose_11, [0], full_int_array_2, full_int_array_3, [1], [0] + ) + + # pd_op.slice: (-1x-1x-1x32xf32) <- (3x-1x-1x-1x32xf32, 1xi64, 1xi64) + slice_11 = paddle._C_ops.slice( + transpose_11, [0], full_int_array_3, full_int_array_4, [1], [0] + ) + del transpose_11 + + # pd_op.transpose: (-1x-1x32x-1xf32) <- (-1x-1x-1x32xf32) + transpose_12 = paddle._C_ops.transpose(slice_10, [0, 1, 3, 2]) + del slice_10 + + # pd_op.matmul: (-1x-1x-1x-1xf32) <- (-1x-1x-1x32xf32, -1x-1x32x-1xf32) + matmul_17 = paddle._C_ops.matmul(slice_9, transpose_12, False, False) + del slice_9, transpose_12 + + # pd_op.scale: (-1x-1x-1x-1xf32) <- (-1x-1x-1x-1xf32, 1xf32) + scale_2 = paddle._C_ops.scale(matmul_17, full_5, float("0"), True) + del matmul_17 + + # pd_op.softmax: (-1x-1x-1x-1xf32) <- (-1x-1x-1x-1xf32) + softmax_2 = paddle._C_ops.softmax(scale_2, -1) + del scale_2 + + # pd_op.matmul: (-1x-1x-1x32xf32) <- (-1x-1x-1x-1xf32, -1x-1x-1x32xf32) + matmul_18 = paddle._C_ops.matmul(softmax_2, slice_11, False, False) + del slice_11, softmax_2 + + # pd_op.transpose: (-1x-1x-1x32xf32) <- (-1x-1x-1x32xf32) + transpose_13 = paddle._C_ops.transpose(matmul_18, [0, 2, 1, 3]) + del matmul_18 + + # builtin.combine: ([xi64, xi64, xi64]) <- (xi64, xi64, xi64) + combine_7 = [full_0, full_2, data_7] + del data_7, full_0, full_2 + + # pd_op.stack: (3xi64) <- ([xi64, xi64, xi64]) + stack_7 = paddle._C_ops.stack(combine_7, 0) + del combine_7 + + # pd_op.reshape: (-1x-1x-1xf32) <- (-1x-1x-1x32xf32, 3xi64) + reshape_9 = paddle._C_ops.reshape(transpose_13, stack_7) + del stack_7, transpose_13 + + # pd_op.matmul: (-1x-1x256xf32) <- (-1x-1x-1xf32, 256x256xf32) + matmul_19 = paddle._C_ops.matmul(reshape_9, parameter_25, False, False) + del parameter_25, reshape_9 + + # pd_op.add: (-1x-1x256xf32) <- (-1x-1x256xf32, 256xf32) + add_23 = paddle._C_ops.add(matmul_19, parameter_24) + del matmul_19, parameter_24 + + # pd_op.add: (-1x-1x256xf32) <- (-1x-1x256xf32, -1x-1x256xf32) + add_24 = paddle._C_ops.add(layer_norm_22, add_23) + del add_23, layer_norm_22 + + # pd_op.layer_norm: (-1x-1x256xf32, -1x-1xf32, -1x-1xf32) <- (-1x-1x256xf32, 256xf32, 256xf32) + layer_norm_25, layer_norm_26, layer_norm_27 = (lambda x, f: f(x))( + paddle._C_ops.layer_norm( + add_24, parameter_23, parameter_22, float("1e-06"), 2 + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None, None), + ) + del add_24, parameter_22, parameter_23 + + # pd_op.matmul: (-1x-1x1024xf32) <- (-1x-1x256xf32, 256x1024xf32) + matmul_20 = paddle._C_ops.matmul(layer_norm_25, parameter_21, False, False) + del parameter_21 + + # pd_op.add: (-1x-1x1024xf32) <- (-1x-1x1024xf32, 1024xf32) + add_25 = paddle._C_ops.add(matmul_20, parameter_20) + del matmul_20, parameter_20 + + # pd_op.gelu: (-1x-1x1024xf32) <- (-1x-1x1024xf32) + gelu_4 = paddle._C_ops.gelu(add_25, False) + del add_25 + + # pd_op.matmul: (-1x-1x256xf32) <- (-1x-1x1024xf32, 1024x256xf32) + matmul_21 = paddle._C_ops.matmul(gelu_4, parameter_19, False, False) + del gelu_4, parameter_19 + + # pd_op.add: (-1x-1x256xf32) <- (-1x-1x256xf32, 256xf32) + add_26 = paddle._C_ops.add(matmul_21, parameter_18) + del matmul_21, parameter_18 + + # pd_op.add: (-1x-1x256xf32) <- (-1x-1x256xf32, -1x-1x256xf32) + add_27 = paddle._C_ops.add(layer_norm_25, add_26) + del add_26, layer_norm_25 + + # pd_op.layer_norm: (-1x-1x256xf32, -1x-1xf32, -1x-1xf32) <- (-1x-1x256xf32, 256xf32, 256xf32) + layer_norm_28, layer_norm_29, layer_norm_30 = (lambda x, f: f(x))( + paddle._C_ops.layer_norm( + add_27, parameter_17, parameter_16, float("1e-06"), 2 + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None, None), + ) + del add_27, parameter_16, parameter_17 + + # pd_op.matmul: (-1x-1x768xf32) <- (-1x-1x256xf32, 256x768xf32) + matmul_22 = paddle._C_ops.matmul(layer_norm_28, parameter_15, False, False) + del parameter_15 + + # pd_op.add: (-1x-1x768xf32) <- (-1x-1x768xf32, 768xf32) + add_28 = paddle._C_ops.add(matmul_22, parameter_14) + del matmul_22, parameter_14 + + # pd_op.full_int_array: (5xi64) <- () + full_int_array_5 = [0, -1, 3, 8, 32] + + # pd_op.reshape: (-1x-1x3x8x32xf32) <- (-1x-1x768xf32, 5xi64) + reshape_10 = paddle._C_ops.reshape(add_28, full_int_array_5) + del add_28, full_int_array_5 + + # pd_op.transpose: (3x-1x8x-1x32xf32) <- (-1x-1x3x8x32xf32) + transpose_14 = paddle._C_ops.transpose(reshape_10, [2, 0, 3, 1, 4]) + del reshape_10 + + # pd_op.slice: (-1x8x-1x32xf32) <- (3x-1x8x-1x32xf32, 1xi64, 1xi64) + slice_12 = paddle._C_ops.slice( + transpose_14, [0], full_int_array_1, full_int_array_2, [1], [0] + ) + + # pd_op.slice: (-1x8x-1x32xf32) <- (3x-1x8x-1x32xf32, 1xi64, 1xi64) + slice_13 = paddle._C_ops.slice( + transpose_14, [0], full_int_array_2, full_int_array_3, [1], [0] + ) + + # pd_op.slice: (-1x8x-1x32xf32) <- (3x-1x8x-1x32xf32, 1xi64, 1xi64) + slice_14 = paddle._C_ops.slice( + transpose_14, [0], full_int_array_3, full_int_array_4, [1], [0] + ) + del full_int_array_4, transpose_14 + + # pd_op.transpose: (-1x8x32x-1xf32) <- (-1x8x-1x32xf32) + transpose_15 = paddle._C_ops.transpose(slice_13, [0, 1, 3, 2]) + del slice_13 + + # pd_op.matmul: (-1x8x-1x-1xf32) <- (-1x8x-1x32xf32, -1x8x32x-1xf32) + matmul_23 = paddle._C_ops.matmul(slice_12, transpose_15, False, False) + del slice_12, transpose_15 + + # pd_op.scale: (-1x8x-1x-1xf32) <- (-1x8x-1x-1xf32, 1xf32) + scale_3 = paddle._C_ops.scale(matmul_23, full_5, float("0"), True) + del full_5, matmul_23 + + # pd_op.softmax: (-1x8x-1x-1xf32) <- (-1x8x-1x-1xf32) + softmax_3 = paddle._C_ops.softmax(scale_3, -1) + del scale_3 + + # pd_op.matmul: (-1x8x-1x32xf32) <- (-1x8x-1x-1xf32, -1x8x-1x32xf32) + matmul_24 = paddle._C_ops.matmul(softmax_3, slice_14, False, False) + del slice_14, softmax_3 + + # pd_op.transpose: (-1x-1x8x32xf32) <- (-1x8x-1x32xf32) + transpose_16 = paddle._C_ops.transpose(matmul_24, [0, 2, 1, 3]) + del matmul_24 + + # pd_op.full_int_array: (3xi64) <- () + full_int_array_6 = [0, -1, 256] + + # pd_op.reshape: (-1x-1x256xf32) <- (-1x-1x8x32xf32, 3xi64) + reshape_11 = paddle._C_ops.reshape(transpose_16, full_int_array_6) + del full_int_array_6, transpose_16 + + # pd_op.matmul: (-1x-1x256xf32) <- (-1x-1x256xf32, 256x256xf32) + matmul_25 = paddle._C_ops.matmul(reshape_11, parameter_13, False, False) + del parameter_13, reshape_11 + + # pd_op.add: (-1x-1x256xf32) <- (-1x-1x256xf32, 256xf32) + add_29 = paddle._C_ops.add(matmul_25, parameter_12) + del matmul_25, parameter_12 + + # pd_op.add: (-1x-1x256xf32) <- (-1x-1x256xf32, -1x-1x256xf32) + add_30 = paddle._C_ops.add(layer_norm_28, add_29) + del add_29, layer_norm_28 + + # pd_op.layer_norm: (-1x-1x256xf32, -1x-1xf32, -1x-1xf32) <- (-1x-1x256xf32, 256xf32, 256xf32) + layer_norm_31, layer_norm_32, layer_norm_33 = (lambda x, f: f(x))( + paddle._C_ops.layer_norm( + add_30, parameter_11, parameter_10, float("1e-06"), 2 + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None, None), + ) + del add_30, parameter_10, parameter_11 + + # pd_op.matmul: (-1x-1x1024xf32) <- (-1x-1x256xf32, 256x1024xf32) + matmul_26 = paddle._C_ops.matmul(layer_norm_31, parameter_9, False, False) + del parameter_9 + + # pd_op.add: (-1x-1x1024xf32) <- (-1x-1x1024xf32, 1024xf32) + add_31 = paddle._C_ops.add(matmul_26, parameter_8) + del matmul_26, parameter_8 + + # pd_op.gelu: (-1x-1x1024xf32) <- (-1x-1x1024xf32) + gelu_5 = paddle._C_ops.gelu(add_31, False) + del add_31 + + # pd_op.matmul: (-1x-1x256xf32) <- (-1x-1x1024xf32, 1024x256xf32) + matmul_27 = paddle._C_ops.matmul(gelu_5, parameter_7, False, False) + del gelu_5, parameter_7 + + # pd_op.add: (-1x-1x256xf32) <- (-1x-1x256xf32, 256xf32) + add_32 = paddle._C_ops.add(matmul_27, parameter_6) + del matmul_27, parameter_6 + + # pd_op.add: (-1x-1x256xf32) <- (-1x-1x256xf32, -1x-1x256xf32) + add_33 = paddle._C_ops.add(layer_norm_31, add_32) + del add_32, layer_norm_31 + + # pd_op.layer_norm: (-1x-1x256xf32, -1x-1xf32, -1x-1xf32) <- (-1x-1x256xf32, 256xf32, 256xf32) + layer_norm_34, layer_norm_35, layer_norm_36 = (lambda x, f: f(x))( + paddle._C_ops.layer_norm( + add_33, parameter_5, parameter_4, float("1e-06"), 2 + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None, None), + ) + del add_33, parameter_4, parameter_5 + + # pd_op.shape64: (3xi64) <- (-1x-1x256xf32) + shape64_1 = paddle._C_ops.shape64(layer_norm_34) + + # pd_op.slice: (xi64) <- (3xi64, 1xi64, 1xi64) + slice_15 = paddle._C_ops.slice( + shape64_1, [0], full_int_array_1, full_int_array_2, [1], [0] + ) + + # pd_op.slice: (xi64) <- (3xi64, 1xi64, 1xi64) + slice_16 = paddle._C_ops.slice( + shape64_1, [0], full_int_array_2, full_int_array_3, [1], [0] + ) + del full_int_array_3, shape64_1 + + # pd_op.transpose: (-1x256x-1xf32) <- (-1x-1x256xf32) + transpose_17 = paddle._C_ops.transpose(layer_norm_34, [0, 2, 1]) + del layer_norm_34 + + # pd_op.full_int_array: (4xi64) <- () + full_int_array_7 = [0, 256, 6, 80] + + # pd_op.reshape: (-1x256x6x80xf32) <- (-1x256x-1xf32, 4xi64) + reshape_12 = paddle._C_ops.reshape(transpose_17, full_int_array_7) + del full_int_array_7, transpose_17 + + # pd_op.conv2d: (-1x384x3x80xf32) <- (-1x256x6x80xf32, 384x256x3x3xf32) + conv2d_2 = paddle._C_ops.conv2d( + reshape_12, parameter_3, [2, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_3, reshape_12 + + # pd_op.reshape: (1x384x1x1xf32) <- (384xf32, 4xi64) + reshape_13 = paddle._C_ops.reshape(parameter_2, full_int_array_0) + del full_int_array_0, parameter_2 + + # pd_op.add: (-1x384x3x80xf32) <- (-1x384x3x80xf32, 1x384x1x1xf32) + add_34 = paddle._C_ops.add(conv2d_2, reshape_13) + del conv2d_2, reshape_13 + + # pd_op.shape64: (4xi64) <- (-1x384x3x80xf32) + shape64_2 = paddle._C_ops.shape64(add_34) + + # pd_op.slice: (xi64) <- (4xi64, 1xi64, 1xi64) + slice_17 = paddle._C_ops.slice( + shape64_2, [0], full_int_array_1, full_int_array_2, [1], [0] + ) + del full_int_array_1, full_int_array_2, shape64_2 + + # pd_op.flatten: (-1x384x240xf32) <- (-1x384x3x80xf32) + flatten_3 = paddle._C_ops.flatten(add_34, 2, 3) + del add_34 + + # pd_op.transpose: (-1x240x384xf32) <- (-1x384x240xf32) + transpose_18 = paddle._C_ops.transpose(flatten_3, [0, 2, 1]) + del flatten_3 + + # pd_op.layer_norm: (-1x240x384xf32, -1x240xf32, -1x240xf32) <- (-1x240x384xf32, 384xf32, 384xf32) + layer_norm_0, layer_norm_37, layer_norm_38 = (lambda x, f: f(x))( + paddle._C_ops.layer_norm( + transpose_18, parameter_1, parameter_0, float("1e-05"), 2 + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None, None), + ) + del parameter_0, parameter_1, transpose_18 + + return layer_norm_0 diff --git a/paddle_samples/PaddleX/ch_SVTRv2_rec/subgraph_9/weight_meta.py b/paddle_samples/PaddleX/ch_SVTRv2_rec/subgraph_9/weight_meta.py new file mode 100644 index 000000000..ec27cb5b2 --- /dev/null +++ b/paddle_samples/PaddleX/ch_SVTRv2_rec/subgraph_9/weight_meta.py @@ -0,0 +1,790 @@ +class Program_weight_tensor_parameter_0: + name = "parameter_0" + shape = [384] + dtype = "float32" + min_val = float("-0.606548") + max_val = float("0.572658") + mean = float("0.00375302") + std = float("0.148426") + data = None + + +class Program_weight_tensor_parameter_1: + name = "parameter_1" + shape = [384] + dtype = "float32" + min_val = float("-0.0136385") + max_val = float("1.10781") + mean = float("0.805448") + std = float("0.318495") + data = None + + +class Program_weight_tensor_parameter_2: + name = "parameter_2" + shape = [384] + dtype = "float32" + min_val = float("-2.13381") + max_val = float("3.10468") + mean = float("0.00370893") + std = float("0.481209") + data = None + + +class Program_weight_tensor_parameter_3: + name = "parameter_3" + shape = [384, 256, 3, 3] + dtype = "float32" + min_val = float("-4.80588") + max_val = float("3.58056") + mean = float("-0.000669643") + std = float("0.434603") + data = None + + +class Program_weight_tensor_parameter_4: + name = "parameter_4" + shape = [256] + dtype = "float32" + min_val = float("-1.00377") + max_val = float("1.5866") + mean = float("-0.117704") + std = float("0.210453") + data = None + + +class Program_weight_tensor_parameter_5: + name = "parameter_5" + shape = [256] + dtype = "float32" + min_val = float("0.227411") + max_val = float("1.68324") + mean = float("1.24738") + std = float("0.144259") + data = None + + +class Program_weight_tensor_parameter_6: + name = "parameter_6" + shape = [256] + dtype = "float32" + min_val = float("-8.09277") + max_val = float("0.640735") + mean = float("-0.0152048") + std = float("0.534922") + data = None + + +class Program_weight_tensor_parameter_7: + name = "parameter_7" + shape = [1024, 256] + dtype = "float32" + min_val = float("-1.43496") + max_val = float("1.05897") + mean = float("1.53746e-05") + std = float("0.0573661") + data = None + + +class Program_weight_tensor_parameter_8: + name = "parameter_8" + shape = [1024] + dtype = "float32" + min_val = float("-1.46232") + max_val = float("-0.0231789") + mean = float("-0.687925") + std = float("0.224782") + data = None + + +class Program_weight_tensor_parameter_9: + name = "parameter_9" + shape = [256, 1024] + dtype = "float32" + min_val = float("-0.346092") + max_val = float("0.299287") + mean = float("-0.00565399") + std = float("0.0564931") + data = None + + +class Program_weight_tensor_parameter_10: + name = "parameter_10" + shape = [256] + dtype = "float32" + min_val = float("-3.06533") + max_val = float("1.76981") + mean = float("0.0968756") + std = float("0.355712") + data = None + + +class Program_weight_tensor_parameter_11: + name = "parameter_11" + shape = [256] + dtype = "float32" + min_val = float("0.111695") + max_val = float("2.92098") + mean = float("0.694832") + std = float("0.22963") + data = None + + +class Program_weight_tensor_parameter_12: + name = "parameter_12" + shape = [256] + dtype = "float32" + min_val = float("-1.23681") + max_val = float("1.0048") + mean = float("-0.00871141") + std = float("0.14359") + data = None + + +class Program_weight_tensor_parameter_13: + name = "parameter_13" + shape = [256, 256] + dtype = "float32" + min_val = float("-0.32287") + max_val = float("0.545039") + mean = float("-9.46548e-05") + std = float("0.0445592") + data = None + + +class Program_weight_tensor_parameter_14: + name = "parameter_14" + shape = [768] + dtype = "float32" + min_val = float("-3.23849") + max_val = float("3.07741") + mean = float("0.0190537") + std = float("0.323153") + data = None + + +class Program_weight_tensor_parameter_15: + name = "parameter_15" + shape = [256, 768] + dtype = "float32" + min_val = float("-0.599278") + max_val = float("0.496422") + mean = float("-3.80634e-05") + std = float("0.052645") + data = None + + +class Program_weight_tensor_parameter_16: + name = "parameter_16" + shape = [256] + dtype = "float32" + min_val = float("-1.03015") + max_val = float("1.35403") + mean = float("-0.012842") + std = float("0.214878") + data = None + + +class Program_weight_tensor_parameter_17: + name = "parameter_17" + shape = [256] + dtype = "float32" + min_val = float("0.291876") + max_val = float("1.5882") + mean = float("0.900833") + std = float("0.111472") + data = None + + +class Program_weight_tensor_parameter_18: + name = "parameter_18" + shape = [256] + dtype = "float32" + min_val = float("-2.05991") + max_val = float("0.660192") + mean = float("-0.00212401") + std = float("0.194412") + data = None + + +class Program_weight_tensor_parameter_19: + name = "parameter_19" + shape = [1024, 256] + dtype = "float32" + min_val = float("-0.563975") + max_val = float("0.813296") + mean = float("2.61305e-05") + std = float("0.0567821") + data = None + + +class Program_weight_tensor_parameter_20: + name = "parameter_20" + shape = [1024] + dtype = "float32" + min_val = float("-1.33353") + max_val = float("-0.258815") + mean = float("-0.694224") + std = float("0.209829") + data = None + + +class Program_weight_tensor_parameter_21: + name = "parameter_21" + shape = [256, 1024] + dtype = "float32" + min_val = float("-0.379618") + max_val = float("0.370024") + mean = float("-0.00381384") + std = float("0.0555629") + data = None + + +class Program_weight_tensor_parameter_22: + name = "parameter_22" + shape = [256] + dtype = "float32" + min_val = float("-2.22821") + max_val = float("1.38206") + mean = float("0.0666401") + std = float("0.302843") + data = None + + +class Program_weight_tensor_parameter_23: + name = "parameter_23" + shape = [256] + dtype = "float32" + min_val = float("0.0820868") + max_val = float("2.64053") + mean = float("0.745009") + std = float("0.190088") + data = None + + +class Program_weight_tensor_parameter_24: + name = "parameter_24" + shape = [256] + dtype = "float32" + min_val = float("-1.62208") + max_val = float("1.39124") + mean = float("-0.0110466") + std = float("0.1868") + data = None + + +class Program_weight_tensor_parameter_25: + name = "parameter_25" + shape = [256, 256] + dtype = "float32" + min_val = float("-0.484073") + max_val = float("0.322057") + mean = float("9.14735e-05") + std = float("0.0479608") + data = None + + +class Program_weight_tensor_parameter_26: + name = "parameter_26" + shape = [768] + dtype = "float32" + min_val = float("-2.94499") + max_val = float("3.08614") + mean = float("0.0137281") + std = float("0.347329") + data = None + + +class Program_weight_tensor_parameter_27: + name = "parameter_27" + shape = [256, 768] + dtype = "float32" + min_val = float("-0.406168") + max_val = float("0.440281") + mean = float("-2.74141e-05") + std = float("0.0531518") + data = None + + +class Program_weight_tensor_parameter_28: + name = "parameter_28" + shape = [256] + dtype = "float32" + min_val = float("-1.5211") + max_val = float("1.36009") + mean = float("-0.00825827") + std = float("0.293386") + data = None + + +class Program_weight_tensor_parameter_29: + name = "parameter_29" + shape = [256] + dtype = "float32" + min_val = float("0.261202") + max_val = float("2.43577") + mean = float("0.92451") + std = float("0.190809") + data = None + + +class Program_weight_tensor_parameter_30: + name = "parameter_30" + shape = [256] + dtype = "float32" + min_val = float("-0.749751") + max_val = float("0.592661") + mean = float("-0.000329425") + std = float("0.132286") + data = None + + +class Program_weight_tensor_parameter_31: + name = "parameter_31" + shape = [1024, 256] + dtype = "float32" + min_val = float("-0.505895") + max_val = float("0.472223") + mean = float("-3.80705e-05") + std = float("0.0616234") + data = None + + +class Program_weight_tensor_parameter_32: + name = "parameter_32" + shape = [1024] + dtype = "float32" + min_val = float("-1.11978") + max_val = float("-0.0299963") + mean = float("-0.639817") + std = float("0.206774") + data = None + + +class Program_weight_tensor_parameter_33: + name = "parameter_33" + shape = [256, 1024] + dtype = "float32" + min_val = float("-0.473619") + max_val = float("0.354136") + mean = float("-0.00145026") + std = float("0.0545714") + data = None + + +class Program_weight_tensor_parameter_34: + name = "parameter_34" + shape = [256] + dtype = "float32" + min_val = float("-2.6692") + max_val = float("2.16447") + mean = float("0.0282511") + std = float("0.485632") + data = None + + +class Program_weight_tensor_parameter_35: + name = "parameter_35" + shape = [256] + dtype = "float32" + min_val = float("0.0202418") + max_val = float("2.37993") + mean = float("0.79868") + std = float("0.238688") + data = None + + +class Program_weight_tensor_parameter_36: + name = "parameter_36" + shape = [256] + dtype = "float32" + min_val = float("-1.95204") + max_val = float("1.32873") + mean = float("-0.0100322") + std = float("0.260344") + data = None + + +class Program_weight_tensor_parameter_37: + name = "parameter_37" + shape = [256, 256] + dtype = "float32" + min_val = float("-0.331363") + max_val = float("0.302074") + mean = float("6.85545e-05") + std = float("0.0484705") + data = None + + +class Program_weight_tensor_parameter_38: + name = "parameter_38" + shape = [768] + dtype = "float32" + min_val = float("-3.23988") + max_val = float("3.17341") + mean = float("0.0112835") + std = float("0.360818") + data = None + + +class Program_weight_tensor_parameter_39: + name = "parameter_39" + shape = [256, 768] + dtype = "float32" + min_val = float("-0.485666") + max_val = float("0.382872") + mean = float("8.84596e-07") + std = float("0.0527938") + data = None + + +class Program_weight_tensor_parameter_40: + name = "parameter_40" + shape = [256] + dtype = "float32" + min_val = float("-1.22705") + max_val = float("1.12567") + mean = float("-0.00583161") + std = float("0.31517") + data = None + + +class Program_weight_tensor_parameter_41: + name = "parameter_41" + shape = [256] + dtype = "float32" + min_val = float("0.0719479") + max_val = float("2.17936") + mean = float("0.899499") + std = float("0.215358") + data = None + + +class Program_weight_tensor_parameter_42: + name = "parameter_42" + shape = [256] + dtype = "float32" + min_val = float("-0.593713") + max_val = float("0.880143") + mean = float("0.00452423") + std = float("0.137268") + data = None + + +class Program_weight_tensor_parameter_43: + name = "parameter_43" + shape = [1024, 256] + dtype = "float32" + min_val = float("-0.44878") + max_val = float("0.40673") + mean = float("5.79501e-05") + std = float("0.0640804") + data = None + + +class Program_weight_tensor_parameter_44: + name = "parameter_44" + shape = [1024] + dtype = "float32" + min_val = float("-1.1324") + max_val = float("-0.0597701") + mean = float("-0.618033") + std = float("0.192981") + data = None + + +class Program_weight_tensor_parameter_45: + name = "parameter_45" + shape = [256, 1024] + dtype = "float32" + min_val = float("-0.333676") + max_val = float("0.370896") + mean = float("0.000545573") + std = float("0.0537775") + data = None + + +class Program_weight_tensor_parameter_46: + name = "parameter_46" + shape = [256] + dtype = "float32" + min_val = float("-2.27663") + max_val = float("2.47616") + mean = float("-0.00601392") + std = float("0.576123") + data = None + + +class Program_weight_tensor_parameter_47: + name = "parameter_47" + shape = [256] + dtype = "float32" + min_val = float("0.0863369") + max_val = float("2.26134") + mean = float("0.787782") + std = float("0.203747") + data = None + + +class Program_weight_tensor_parameter_48: + name = "parameter_48" + shape = [256] + dtype = "float32" + min_val = float("-1.86604") + max_val = float("1.88891") + mean = float("-0.00533099") + std = float("0.221605") + data = None + + +class Program_weight_tensor_parameter_49: + name = "parameter_49" + shape = [256, 256] + dtype = "float32" + min_val = float("-0.427967") + max_val = float("0.523471") + mean = float("2.46172e-05") + std = float("0.048039") + data = None + + +class Program_weight_tensor_parameter_50: + name = "parameter_50" + shape = [768] + dtype = "float32" + min_val = float("-2.90043") + max_val = float("2.60659") + mean = float("-0.00467167") + std = float("0.351381") + data = None + + +class Program_weight_tensor_parameter_51: + name = "parameter_51" + shape = [256, 768] + dtype = "float32" + min_val = float("-0.303316") + max_val = float("0.421669") + mean = float("4.09005e-05") + std = float("0.0527594") + data = None + + +class Program_weight_tensor_parameter_52: + name = "parameter_52" + shape = [256] + dtype = "float32" + min_val = float("-1.72865") + max_val = float("1.36101") + mean = float("0.000942415") + std = float("0.4173") + data = None + + +class Program_weight_tensor_parameter_53: + name = "parameter_53" + shape = [256] + dtype = "float32" + min_val = float("0.301877") + max_val = float("1.3885") + mean = float("0.917692") + std = float("0.182302") + data = None + + +class Program_weight_tensor_parameter_54: + name = "parameter_54" + shape = [256] + dtype = "float32" + min_val = float("-0.546974") + max_val = float("0.441399") + mean = float("-0.00034962") + std = float("0.137598") + data = None + + +class Program_weight_tensor_parameter_55: + name = "parameter_55" + shape = [1024, 256] + dtype = "float32" + min_val = float("-0.438771") + max_val = float("0.348308") + mean = float("0.000119522") + std = float("0.06319") + data = None + + +class Program_weight_tensor_parameter_56: + name = "parameter_56" + shape = [1024] + dtype = "float32" + min_val = float("-0.870234") + max_val = float("-0.148508") + mean = float("-0.578987") + std = float("0.148147") + data = None + + +class Program_weight_tensor_parameter_57: + name = "parameter_57" + shape = [256, 1024] + dtype = "float32" + min_val = float("-0.618111") + max_val = float("0.42523") + mean = float("0.000793249") + std = float("0.0470161") + data = None + + +class Program_weight_tensor_parameter_58: + name = "parameter_58" + shape = [256] + dtype = "float32" + min_val = float("-1.54587") + max_val = float("2.06195") + mean = float("-0.0245377") + std = float("0.666909") + data = None + + +class Program_weight_tensor_parameter_59: + name = "parameter_59" + shape = [256] + dtype = "float32" + min_val = float("0.249175") + max_val = float("1.74662") + mean = float("0.92944") + std = float("0.194932") + data = None + + +class Program_weight_tensor_parameter_60: + name = "parameter_60" + shape = [256] + dtype = "float32" + min_val = float("-2.28339") + max_val = float("2.10392") + mean = float("0.00488615") + std = float("0.356684") + data = None + + +class Program_weight_tensor_parameter_61: + name = "parameter_61" + shape = [256, 32, 5, 5] + dtype = "float32" + min_val = float("-0.271139") + max_val = float("0.519153") + mean = float("0.00138608") + std = float("0.0431195") + data = None + + +class Program_weight_tensor_parameter_62: + name = "parameter_62" + shape = [256] + dtype = "float32" + min_val = float("-0.633619") + max_val = float("0.740488") + mean = float("-0.0519645") + std = float("0.1882") + data = None + + +class Program_weight_tensor_parameter_63: + name = "parameter_63" + shape = [256] + dtype = "float32" + min_val = float("0.0676594") + max_val = float("1.86373") + mean = float("1.3139") + std = float("0.156863") + data = None + + +class Program_weight_tensor_parameter_64: + name = "parameter_64" + shape = [256] + dtype = "float32" + min_val = float("-8.93803") + max_val = float("0.458159") + mean = float("-0.0257134") + std = float("0.585232") + data = None + + +class Program_weight_tensor_parameter_65: + name = "parameter_65" + shape = [1024, 256] + dtype = "float32" + min_val = float("-1.32384") + max_val = float("1.1636") + mean = float("-3.25298e-05") + std = float("0.0559817") + data = None + + +class Program_weight_tensor_parameter_66: + name = "parameter_66" + shape = [1024] + dtype = "float32" + min_val = float("-1.46769") + max_val = float("-0.128175") + mean = float("-0.6653") + std = float("0.178159") + data = None + + +class Program_weight_tensor_parameter_67: + name = "parameter_67" + shape = [256, 1024] + dtype = "float32" + min_val = float("-0.325814") + max_val = float("0.365852") + mean = float("0.000794164") + std = float("0.0568346") + data = None + + +class Program_weight_tensor_parameter_68: + name = "parameter_68" + shape = [256] + dtype = "float32" + min_val = float("-0.65967") + max_val = float("0.72201") + mean = float("-0.0107726") + std = float("0.19808") + data = None + + +class Program_weight_tensor_parameter_69: + name = "parameter_69" + shape = [256] + dtype = "float32" + min_val = float("0.402273") + max_val = float("1.83328") + mean = float("0.686545") + std = float("0.133889") + data = None + + +class Program_weight_tensor_parameter_70: + name = "parameter_70" + shape = [256] + dtype = "float32" + min_val = float("-2.1692") + max_val = float("1.48687") + mean = float("-0.00278559") + std = float("0.301189") + data = None + + +class Program_weight_tensor_parameter_71: + name = "parameter_71" + shape = [256, 32, 5, 5] + dtype = "float32" + min_val = float("-0.260716") + max_val = float("0.493174") + mean = float("0.00108033") + std = float("0.0415011") + data = None From 02131314bff2d17b1df843f6959943823426e261 Mon Sep 17 00:00:00 2001 From: Liu Yiqun Date: Fri, 12 Sep 2025 10:31:24 +0800 Subject: [PATCH 3/5] Update graph_hash. --- paddle_samples/PaddleNLP/bert-base-cased/graph_hash.txt | 2 +- paddle_samples/PaddleNLP/bert-large-cased/graph_hash.txt | 2 +- paddle_samples/PaddleNLP/convbert-base/graph_hash.txt | 2 +- paddle_samples/PaddleNLP/convbert-medium-small/graph_hash.txt | 2 +- paddle_samples/PaddleNLP/convbert-small/graph_hash.txt | 2 +- paddle_samples/PaddleNLP/ernie-1.0/graph_hash.txt | 2 +- paddle_samples/PaddleNLP/ernie-2.0-base-zh/graph_hash.txt | 2 +- paddle_samples/PaddleNLP/ernie-2.0-large-zh/graph_hash.txt | 2 +- paddle_samples/PaddleNLP/ernie-3.0-base-zh/graph_hash.txt | 2 +- paddle_samples/PaddleNLP/ernie-3.0-medium-zh/graph_hash.txt | 2 +- paddle_samples/PaddleNLP/ernie-3.0-micro-zh/graph_hash.txt | 2 +- paddle_samples/PaddleNLP/ernie-3.0-mini-zh/graph_hash.txt | 2 +- paddle_samples/PaddleNLP/ernie-3.0-nano-zh/graph_hash.txt | 2 +- .../PaddleNLP/ernie-3.0-tiny-base-v2-zh/graph_hash.txt | 2 +- .../PaddleNLP/ernie-3.0-tiny-medium-v2-zh/graph_hash.txt | 2 +- .../PaddleNLP/ernie-3.0-tiny-micro-v2-zh/graph_hash.txt | 2 +- .../PaddleNLP/ernie-3.0-tiny-mini-v2-zh/graph_hash.txt | 2 +- .../PaddleNLP/ernie-3.0-tiny-nano-v2-zh/graph_hash.txt | 2 +- .../PaddleNLP/ernie-3.0-tiny-pico-v2-zh/graph_hash.txt | 2 +- paddle_samples/PaddleNLP/ernie-3.0-xbase-zh/graph_hash.txt | 2 +- paddle_samples/PaddleNLP/ernie-m-base/graph_hash.txt | 2 +- paddle_samples/PaddleNLP/ernie-m-large/graph_hash.txt | 2 +- .../ernie-search-large-cross-encoder-marco-en/graph_hash.txt | 2 +- paddle_samples/PaddleNLP/ernie-tiny/graph_hash.txt | 2 +- paddle_samples/PaddleNLP/facebook_llama-7b/graph_hash.txt | 2 +- paddle_samples/PaddleNLP/gpt2-medium-en/graph_hash.txt | 2 +- paddle_samples/PaddleNLP/nezha-base-chinese/graph_hash.txt | 2 +- paddle_samples/PaddleNLP/nezha-large-chinese/graph_hash.txt | 2 +- paddle_samples/PaddleNLP/ppminilm-6l-768h/graph_hash.txt | 2 +- .../PaddleNLP/rocketqa-base-cross-encoder/graph_hash.txt | 2 +- .../PaddleNLP/rocketqa-medium-cross-encoder/graph_hash.txt | 2 +- .../PaddleNLP/rocketqa-micro-cross-encoder/graph_hash.txt | 2 +- .../PaddleNLP/rocketqa-mini-cross-encoder/graph_hash.txt | 2 +- .../PaddleNLP/rocketqa-nano-cross-encoder/graph_hash.txt | 2 +- paddle_samples/PaddleNLP/roformer-chinese-base/graph_hash.txt | 2 +- .../PaddleNLP/roformer-chinese-char-base/graph_hash.txt | 2 +- .../PaddleNLP/roformer-chinese-char-small/graph_hash.txt | 2 +- .../PaddleNLP/roformer-chinese-sim-char-base/graph_hash.txt | 2 +- .../PaddleNLP/roformer-chinese-sim-char-small/graph_hash.txt | 2 +- paddle_samples/PaddleNLP/roformer-chinese-small/graph_hash.txt | 2 +- .../roformer-english-small-discriminator/graph_hash.txt | 2 +- .../PaddleNLP/roformer-english-small-generator/graph_hash.txt | 2 +- paddle_samples/PaddleNLP/skep_ernie_1.0_large_ch/graph_hash.txt | 2 +- paddle_samples/PaddleNLP/skep_ernie_2.0_large_en/graph_hash.txt | 2 +- .../PaddleNLP/uer_chinese-roberta-6l-768h/graph_hash.txt | 2 +- .../PaddleNLP/uer_chinese-roberta-medium/graph_hash.txt | 2 +- .../PaddleNLP/uer_chinese-roberta-mini/graph_hash.txt | 2 +- .../PaddleNLP/uer_chinese-roberta-small/graph_hash.txt | 2 +- .../PaddleNLP/uer_chinese-roberta-tiny/graph_hash.txt | 2 +- paddle_samples/PaddleNLP/utc-base/graph_hash.txt | 2 +- paddle_samples/PaddleNLP/utc-large/graph_hash.txt | 2 +- paddle_samples/PaddleNLP/utc-medium/graph_hash.txt | 2 +- paddle_samples/PaddleNLP/utc-micro/graph_hash.txt | 2 +- paddle_samples/PaddleNLP/utc-mini/graph_hash.txt | 2 +- paddle_samples/PaddleNLP/utc-nano/graph_hash.txt | 2 +- paddle_samples/PaddleNLP/utc-pico/graph_hash.txt | 2 +- paddle_samples/PaddleNLP/utc-xbase/graph_hash.txt | 2 +- .../PaddleScience/euler_beam/subgraph_0/graph_hash.txt | 2 +- .../PaddleScience/euler_beam/subgraph_1/graph_hash.txt | 2 +- paddle_samples/PaddleX/AutoEncoder_ad/graph_hash.txt | 2 +- .../PaddleX/BlazeFace-FPN-SSH/subgraph_0/graph_hash.txt | 2 +- .../PaddleX/BlazeFace-FPN-SSH/subgraph_1/graph_hash.txt | 2 +- paddle_samples/PaddleX/BlazeFace/subgraph_0/graph_hash.txt | 2 +- paddle_samples/PaddleX/BlazeFace/subgraph_1/graph_hash.txt | 2 +- paddle_samples/PaddleX/BlazeFace/subgraph_2/graph_hash.txt | 2 +- paddle_samples/PaddleX/BlazeFace/subgraph_3/graph_hash.txt | 2 +- paddle_samples/PaddleX/BlazeFace/subgraph_4/graph_hash.txt | 2 +- paddle_samples/PaddleX/BlazeFace/subgraph_5/graph_hash.txt | 2 +- paddle_samples/PaddleX/BlazeFace/subgraph_6/graph_hash.txt | 2 +- paddle_samples/PaddleX/BlazeFace/subgraph_7/graph_hash.txt | 2 +- paddle_samples/PaddleX/BlazeFace/subgraph_8/graph_hash.txt | 2 +- paddle_samples/PaddleX/BlazeFace/subgraph_9/graph_hash.txt | 2 +- .../PaddleX/CLIP_vit_base_patch16_224/subgraph_0/graph_hash.txt | 2 +- .../PaddleX/CLIP_vit_base_patch16_224/subgraph_1/graph_hash.txt | 2 +- .../CLIP_vit_base_patch16_448_ML/subgraph_0/graph_hash.txt | 2 +- .../CLIP_vit_base_patch16_448_ML/subgraph_1/graph_hash.txt | 2 +- .../CLIP_vit_base_patch16_448_ML/subgraph_2/graph_hash.txt | 2 +- .../CLIP_vit_large_patch14_224/subgraph_0/graph_hash.txt | 2 +- .../CLIP_vit_large_patch14_224/subgraph_1/graph_hash.txt | 2 +- .../PaddleX/CenterNet-DLA-34/subgraph_0/graph_hash.txt | 2 +- .../PaddleX/CenterNet-DLA-34/subgraph_1/graph_hash.txt | 2 +- .../PaddleX/CenterNet-ResNet50/subgraph_0/graph_hash.txt | 2 +- .../PaddleX/CenterNet-ResNet50/subgraph_1/graph_hash.txt | 2 +- .../PaddleX/ConvNeXt_base_224/subgraph_0/graph_hash.txt | 2 +- .../PaddleX/ConvNeXt_base_224/subgraph_1/graph_hash.txt | 2 +- paddle_samples/PaddleX/ConvNeXt_base_384/graph_hash.txt | 2 +- .../PaddleX/ConvNeXt_large_224/subgraph_0/graph_hash.txt | 2 +- .../PaddleX/ConvNeXt_large_224/subgraph_1/graph_hash.txt | 2 +- paddle_samples/PaddleX/ConvNeXt_large_384/graph_hash.txt | 2 +- paddle_samples/PaddleX/ConvNeXt_small/subgraph_0/graph_hash.txt | 2 +- paddle_samples/PaddleX/ConvNeXt_small/subgraph_1/graph_hash.txt | 2 +- paddle_samples/PaddleX/ConvNeXt_tiny/subgraph_0/graph_hash.txt | 2 +- paddle_samples/PaddleX/ConvNeXt_tiny/subgraph_1/graph_hash.txt | 2 +- paddle_samples/PaddleX/DLinear/graph_hash.txt | 2 +- paddle_samples/PaddleX/DLinear_ad/graph_hash.txt | 2 +- paddle_samples/PaddleX/Deeplabv3-R101/subgraph_0/graph_hash.txt | 2 +- paddle_samples/PaddleX/Deeplabv3-R101/subgraph_1/graph_hash.txt | 2 +- paddle_samples/PaddleX/Deeplabv3-R50/subgraph_0/graph_hash.txt | 2 +- paddle_samples/PaddleX/Deeplabv3-R50/subgraph_1/graph_hash.txt | 2 +- .../PaddleX/Deeplabv3_Plus-R101/subgraph_0/graph_hash.txt | 2 +- .../PaddleX/Deeplabv3_Plus-R101/subgraph_1/graph_hash.txt | 2 +- .../PaddleX/Deeplabv3_Plus-R50/subgraph_0/graph_hash.txt | 2 +- .../PaddleX/Deeplabv3_Plus-R50/subgraph_1/graph_hash.txt | 2 +- paddle_samples/PaddleX/FCOS-ResNet50/subgraph_0/graph_hash.txt | 2 +- paddle_samples/PaddleX/FCOS-ResNet50/subgraph_1/graph_hash.txt | 2 +- paddle_samples/PaddleX/FCOS-ResNet50/subgraph_2/graph_hash.txt | 2 +- paddle_samples/PaddleX/FCOS-ResNet50/subgraph_3/graph_hash.txt | 2 +- paddle_samples/PaddleX/FCOS-ResNet50/subgraph_4/graph_hash.txt | 2 +- paddle_samples/PaddleX/FasterNet-L/subgraph_0/graph_hash.txt | 2 +- paddle_samples/PaddleX/FasterNet-L/subgraph_1/graph_hash.txt | 2 +- paddle_samples/PaddleX/FasterNet-L/subgraph_2/graph_hash.txt | 2 +- paddle_samples/PaddleX/FasterNet-M/subgraph_0/graph_hash.txt | 2 +- paddle_samples/PaddleX/FasterNet-M/subgraph_1/graph_hash.txt | 2 +- paddle_samples/PaddleX/FasterNet-M/subgraph_2/graph_hash.txt | 2 +- paddle_samples/PaddleX/FasterNet-S/subgraph_0/graph_hash.txt | 2 +- paddle_samples/PaddleX/FasterNet-S/subgraph_1/graph_hash.txt | 2 +- paddle_samples/PaddleX/FasterNet-S/subgraph_2/graph_hash.txt | 2 +- paddle_samples/PaddleX/FasterNet-T0/subgraph_0/graph_hash.txt | 2 +- paddle_samples/PaddleX/FasterNet-T0/subgraph_1/graph_hash.txt | 2 +- paddle_samples/PaddleX/FasterNet-T1/subgraph_0/graph_hash.txt | 2 +- paddle_samples/PaddleX/FasterNet-T1/subgraph_1/graph_hash.txt | 2 +- paddle_samples/PaddleX/FasterNet-T1/subgraph_2/graph_hash.txt | 2 +- paddle_samples/PaddleX/FasterNet-T2/subgraph_0/graph_hash.txt | 2 +- paddle_samples/PaddleX/FasterNet-T2/subgraph_1/graph_hash.txt | 2 +- paddle_samples/PaddleX/FasterNet-T2/subgraph_2/graph_hash.txt | 2 +- .../PaddleX/MaskFormer_small/subgraph_0/graph_hash.txt | 2 +- .../PaddleX/MaskFormer_small/subgraph_1/graph_hash.txt | 2 +- .../PaddleX/MaskFormer_small/subgraph_2/graph_hash.txt | 2 +- .../PaddleX/MaskFormer_tiny/subgraph_0/graph_hash.txt | 2 +- .../PaddleX/MaskFormer_tiny/subgraph_1/graph_hash.txt | 2 +- paddle_samples/PaddleX/MobileFaceNet/subgraph_0/graph_hash.txt | 2 +- paddle_samples/PaddleX/MobileFaceNet/subgraph_1/graph_hash.txt | 2 +- paddle_samples/PaddleX/MobileFaceNet/subgraph_2/graph_hash.txt | 2 +- paddle_samples/PaddleX/MobileNetV1_x0_25/graph_hash.txt | 2 +- paddle_samples/PaddleX/MobileNetV1_x0_5/graph_hash.txt | 2 +- paddle_samples/PaddleX/MobileNetV1_x0_75/graph_hash.txt | 2 +- paddle_samples/PaddleX/MobileNetV2_x0_25/graph_hash.txt | 2 +- paddle_samples/PaddleX/MobileNetV2_x0_5/graph_hash.txt | 2 +- paddle_samples/PaddleX/MobileNetV2_x1_0/graph_hash.txt | 2 +- paddle_samples/PaddleX/MobileNetV2_x1_5/graph_hash.txt | 2 +- paddle_samples/PaddleX/MobileNetV2_x2_0/graph_hash.txt | 2 +- .../PaddleX/MobileNetV3_large_x0_35/subgraph_0/graph_hash.txt | 2 +- .../PaddleX/MobileNetV3_large_x0_35/subgraph_1/graph_hash.txt | 2 +- .../PaddleX/MobileNetV3_large_x0_5/subgraph_0/graph_hash.txt | 2 +- .../PaddleX/MobileNetV3_large_x0_5/subgraph_1/graph_hash.txt | 2 +- .../PaddleX/MobileNetV3_large_x0_75/subgraph_0/graph_hash.txt | 2 +- .../PaddleX/MobileNetV3_large_x0_75/subgraph_1/graph_hash.txt | 2 +- .../PaddleX/MobileNetV3_large_x1_0/subgraph_0/graph_hash.txt | 2 +- .../PaddleX/MobileNetV3_large_x1_0/subgraph_1/graph_hash.txt | 2 +- .../PaddleX/MobileNetV3_large_x1_25/subgraph_0/graph_hash.txt | 2 +- .../PaddleX/MobileNetV3_large_x1_25/subgraph_1/graph_hash.txt | 2 +- .../PaddleX/MobileNetV3_small_x0_35/subgraph_0/graph_hash.txt | 2 +- .../PaddleX/MobileNetV3_small_x0_35/subgraph_1/graph_hash.txt | 2 +- .../PaddleX/MobileNetV3_small_x0_5/subgraph_0/graph_hash.txt | 2 +- .../PaddleX/MobileNetV3_small_x0_5/subgraph_1/graph_hash.txt | 2 +- .../PaddleX/MobileNetV3_small_x0_75/subgraph_0/graph_hash.txt | 2 +- .../PaddleX/MobileNetV3_small_x0_75/subgraph_1/graph_hash.txt | 2 +- .../PaddleX/MobileNetV3_small_x1_0/subgraph_0/graph_hash.txt | 2 +- .../PaddleX/MobileNetV3_small_x1_0/subgraph_1/graph_hash.txt | 2 +- .../PaddleX/MobileNetV3_small_x1_25/subgraph_0/graph_hash.txt | 2 +- .../PaddleX/MobileNetV3_small_x1_25/subgraph_1/graph_hash.txt | 2 +- .../PaddleX/MobileNetV4_conv_large/subgraph_0/graph_hash.txt | 2 +- .../PaddleX/MobileNetV4_conv_large/subgraph_1/graph_hash.txt | 2 +- .../PaddleX/MobileNetV4_conv_large/subgraph_2/graph_hash.txt | 2 +- .../PaddleX/MobileNetV4_conv_medium/subgraph_0/graph_hash.txt | 2 +- .../PaddleX/MobileNetV4_conv_medium/subgraph_1/graph_hash.txt | 2 +- .../PaddleX/MobileNetV4_conv_medium/subgraph_2/graph_hash.txt | 2 +- .../PaddleX/MobileNetV4_conv_small/subgraph_0/graph_hash.txt | 2 +- .../PaddleX/MobileNetV4_conv_small/subgraph_1/graph_hash.txt | 2 +- .../PaddleX/MobileNetV4_conv_small/subgraph_2/graph_hash.txt | 2 +- .../PaddleX/MobileNetV4_hybrid_large/subgraph_0/graph_hash.txt | 2 +- .../PaddleX/MobileNetV4_hybrid_large/subgraph_1/graph_hash.txt | 2 +- .../PaddleX/MobileNetV4_hybrid_large/subgraph_2/graph_hash.txt | 2 +- .../PaddleX/MobileNetV4_hybrid_medium/subgraph_0/graph_hash.txt | 2 +- .../PaddleX/MobileNetV4_hybrid_medium/subgraph_1/graph_hash.txt | 2 +- .../PaddleX/MobileNetV4_hybrid_medium/subgraph_2/graph_hash.txt | 2 +- paddle_samples/PaddleX/NLinear/subgraph_0/graph_hash.txt | 2 +- paddle_samples/PaddleX/NLinear/subgraph_1/graph_hash.txt | 2 +- paddle_samples/PaddleX/NLinear/subgraph_2/graph_hash.txt | 2 +- paddle_samples/PaddleX/Nonstationary/subgraph_0/graph_hash.txt | 2 +- paddle_samples/PaddleX/Nonstationary/subgraph_1/graph_hash.txt | 2 +- paddle_samples/PaddleX/Nonstationary/subgraph_10/graph_hash.txt | 2 +- paddle_samples/PaddleX/Nonstationary/subgraph_11/graph_hash.txt | 2 +- paddle_samples/PaddleX/Nonstationary/subgraph_2/graph_hash.txt | 2 +- paddle_samples/PaddleX/Nonstationary/subgraph_3/graph_hash.txt | 2 +- paddle_samples/PaddleX/Nonstationary/subgraph_4/graph_hash.txt | 2 +- paddle_samples/PaddleX/Nonstationary/subgraph_5/graph_hash.txt | 2 +- paddle_samples/PaddleX/Nonstationary/subgraph_6/graph_hash.txt | 2 +- paddle_samples/PaddleX/Nonstationary/subgraph_7/graph_hash.txt | 2 +- paddle_samples/PaddleX/Nonstationary/subgraph_8/graph_hash.txt | 2 +- paddle_samples/PaddleX/Nonstationary/subgraph_9/graph_hash.txt | 2 +- .../PaddleX/Nonstationary_ad/subgraph_0/graph_hash.txt | 2 +- .../PaddleX/Nonstationary_ad/subgraph_1/graph_hash.txt | 2 +- .../PaddleX/Nonstationary_ad/subgraph_2/graph_hash.txt | 2 +- .../PaddleX/OCRNet_HRNet-W48/subgraph_0/graph_hash.txt | 2 +- .../PaddleX/OCRNet_HRNet-W48/subgraph_1/graph_hash.txt | 2 +- paddle_samples/PaddleX/PP-DocLayout-M/subgraph_0/graph_hash.txt | 2 +- paddle_samples/PaddleX/PP-DocLayout-M/subgraph_1/graph_hash.txt | 2 +- .../PaddleX/PP-DocLayout-M/subgraph_10/graph_hash.txt | 2 +- .../PaddleX/PP-DocLayout-M/subgraph_11/graph_hash.txt | 2 +- .../PaddleX/PP-DocLayout-M/subgraph_12/graph_hash.txt | 2 +- .../PaddleX/PP-DocLayout-M/subgraph_13/graph_hash.txt | 2 +- .../PaddleX/PP-DocLayout-M/subgraph_14/graph_hash.txt | 2 +- .../PaddleX/PP-DocLayout-M/subgraph_15/graph_hash.txt | 2 +- .../PaddleX/PP-DocLayout-M/subgraph_16/graph_hash.txt | 2 +- .../PaddleX/PP-DocLayout-M/subgraph_17/graph_hash.txt | 2 +- paddle_samples/PaddleX/PP-DocLayout-M/subgraph_2/graph_hash.txt | 2 +- paddle_samples/PaddleX/PP-DocLayout-M/subgraph_3/graph_hash.txt | 2 +- paddle_samples/PaddleX/PP-DocLayout-M/subgraph_4/graph_hash.txt | 2 +- paddle_samples/PaddleX/PP-DocLayout-M/subgraph_5/graph_hash.txt | 2 +- paddle_samples/PaddleX/PP-DocLayout-M/subgraph_6/graph_hash.txt | 2 +- paddle_samples/PaddleX/PP-DocLayout-M/subgraph_7/graph_hash.txt | 2 +- paddle_samples/PaddleX/PP-DocLayout-M/subgraph_8/graph_hash.txt | 2 +- paddle_samples/PaddleX/PP-DocLayout-M/subgraph_9/graph_hash.txt | 2 +- .../PaddleX/PP-FormulaNet-S/subgraph_0/graph_hash.txt | 2 +- .../PaddleX/PP-FormulaNet-S/subgraph_1/graph_hash.txt | 2 +- paddle_samples/PaddleX/PP-HGNetV2-B0/subgraph_0/graph_hash.txt | 2 +- paddle_samples/PaddleX/PP-HGNetV2-B0/subgraph_1/graph_hash.txt | 2 +- .../PaddleX/PP-HGNetV2-B0_ML/subgraph_0/graph_hash.txt | 2 +- .../PaddleX/PP-HGNetV2-B0_ML/subgraph_1/graph_hash.txt | 2 +- .../PaddleX/PP-HGNetV2-B0_ML/subgraph_2/graph_hash.txt | 2 +- paddle_samples/PaddleX/PP-HGNetV2-B1/subgraph_0/graph_hash.txt | 2 +- paddle_samples/PaddleX/PP-HGNetV2-B1/subgraph_1/graph_hash.txt | 2 +- paddle_samples/PaddleX/PP-HGNetV2-B2/subgraph_0/graph_hash.txt | 2 +- paddle_samples/PaddleX/PP-HGNetV2-B2/subgraph_1/graph_hash.txt | 2 +- paddle_samples/PaddleX/PP-HGNetV2-B3/subgraph_0/graph_hash.txt | 2 +- paddle_samples/PaddleX/PP-HGNetV2-B3/subgraph_1/graph_hash.txt | 2 +- paddle_samples/PaddleX/PP-HGNetV2-B4/subgraph_0/graph_hash.txt | 2 +- paddle_samples/PaddleX/PP-HGNetV2-B4/subgraph_1/graph_hash.txt | 2 +- .../PaddleX/PP-HGNetV2-B4_ML/subgraph_0/graph_hash.txt | 2 +- .../PaddleX/PP-HGNetV2-B4_ML/subgraph_1/graph_hash.txt | 2 +- .../PaddleX/PP-HGNetV2-B4_ML/subgraph_2/graph_hash.txt | 2 +- paddle_samples/PaddleX/PP-HGNetV2-B5/subgraph_0/graph_hash.txt | 2 +- paddle_samples/PaddleX/PP-HGNetV2-B5/subgraph_1/graph_hash.txt | 2 +- paddle_samples/PaddleX/PP-HGNetV2-B6/subgraph_0/graph_hash.txt | 2 +- paddle_samples/PaddleX/PP-HGNetV2-B6/subgraph_1/graph_hash.txt | 2 +- .../PaddleX/PP-HGNetV2-B6_ML/subgraph_0/graph_hash.txt | 2 +- .../PaddleX/PP-HGNetV2-B6_ML/subgraph_1/graph_hash.txt | 2 +- .../PaddleX/PP-HGNetV2-B6_ML/subgraph_2/graph_hash.txt | 2 +- paddle_samples/PaddleX/PP-HGNet_base/graph_hash.txt | 2 +- paddle_samples/PaddleX/PP-HGNet_small/graph_hash.txt | 2 +- paddle_samples/PaddleX/PP-HGNet_tiny/graph_hash.txt | 2 +- .../PaddleX/PP-LCNetV2_base/subgraph_0/graph_hash.txt | 2 +- .../PaddleX/PP-LCNetV2_base/subgraph_1/graph_hash.txt | 2 +- .../PaddleX/PP-LCNetV2_large/subgraph_0/graph_hash.txt | 2 +- .../PaddleX/PP-LCNetV2_large/subgraph_1/graph_hash.txt | 2 +- .../PaddleX/PP-LCNetV2_small/subgraph_0/graph_hash.txt | 2 +- .../PaddleX/PP-LCNetV2_small/subgraph_1/graph_hash.txt | 2 +- paddle_samples/PaddleX/PP-LCNet_x0_25/graph_hash.txt | 2 +- .../PP-LCNet_x0_25_textline_ori/subgraph_0/graph_hash.txt | 2 +- .../PP-LCNet_x0_25_textline_ori/subgraph_1/graph_hash.txt | 2 +- paddle_samples/PaddleX/PP-LCNet_x0_35/graph_hash.txt | 2 +- paddle_samples/PaddleX/PP-LCNet_x0_5/graph_hash.txt | 2 +- paddle_samples/PaddleX/PP-LCNet_x0_75/graph_hash.txt | 2 +- paddle_samples/PaddleX/PP-LCNet_x1_0/graph_hash.txt | 2 +- .../PaddleX/PP-LCNet_x1_0_ML/subgraph_0/graph_hash.txt | 2 +- .../PaddleX/PP-LCNet_x1_0_ML/subgraph_1/graph_hash.txt | 2 +- .../PaddleX/PP-LCNet_x1_0_ML/subgraph_2/graph_hash.txt | 2 +- .../PaddleX/PP-LCNet_x1_0_doc_ori/subgraph_0/graph_hash.txt | 2 +- .../PaddleX/PP-LCNet_x1_0_doc_ori/subgraph_1/graph_hash.txt | 2 +- paddle_samples/PaddleX/PP-LCNet_x1_5/graph_hash.txt | 2 +- paddle_samples/PaddleX/PP-LCNet_x2_0/graph_hash.txt | 2 +- paddle_samples/PaddleX/PP-LCNet_x2_5/graph_hash.txt | 2 +- paddle_samples/PaddleX/PP-LiteSeg-B/subgraph_0/graph_hash.txt | 2 +- paddle_samples/PaddleX/PP-LiteSeg-B/subgraph_1/graph_hash.txt | 2 +- paddle_samples/PaddleX/PP-LiteSeg-B/subgraph_2/graph_hash.txt | 2 +- paddle_samples/PaddleX/PP-LiteSeg-B/subgraph_3/graph_hash.txt | 2 +- paddle_samples/PaddleX/PP-LiteSeg-B/subgraph_4/graph_hash.txt | 2 +- paddle_samples/PaddleX/PP-LiteSeg-B/subgraph_5/graph_hash.txt | 2 +- paddle_samples/PaddleX/PP-LiteSeg-T/subgraph_0/graph_hash.txt | 2 +- paddle_samples/PaddleX/PP-LiteSeg-T/subgraph_1/graph_hash.txt | 2 +- .../PaddleX/PP-OCRv3_mobile_det/subgraph_0/graph_hash.txt | 2 +- .../PaddleX/PP-OCRv3_mobile_det/subgraph_1/graph_hash.txt | 2 +- .../PaddleX/PP-OCRv3_mobile_rec/subgraph_0/graph_hash.txt | 2 +- .../PaddleX/PP-OCRv3_mobile_rec/subgraph_1/graph_hash.txt | 2 +- .../PaddleX/PP-OCRv3_mobile_rec/subgraph_10/graph_hash.txt | 2 +- .../PaddleX/PP-OCRv3_mobile_rec/subgraph_2/graph_hash.txt | 2 +- .../PaddleX/PP-OCRv3_mobile_rec/subgraph_3/graph_hash.txt | 2 +- .../PaddleX/PP-OCRv3_mobile_rec/subgraph_4/graph_hash.txt | 2 +- .../PaddleX/PP-OCRv3_mobile_rec/subgraph_5/graph_hash.txt | 2 +- .../PaddleX/PP-OCRv3_mobile_rec/subgraph_6/graph_hash.txt | 2 +- .../PaddleX/PP-OCRv3_mobile_rec/subgraph_7/graph_hash.txt | 2 +- .../PaddleX/PP-OCRv3_mobile_rec/subgraph_8/graph_hash.txt | 2 +- .../PaddleX/PP-OCRv3_mobile_rec/subgraph_9/graph_hash.txt | 2 +- .../PaddleX/PP-OCRv3_server_det/subgraph_0/graph_hash.txt | 2 +- .../PaddleX/PP-OCRv3_server_det/subgraph_1/graph_hash.txt | 2 +- .../PaddleX/PP-OCRv4_mobile_det/subgraph_0/graph_hash.txt | 2 +- .../PaddleX/PP-OCRv4_mobile_det/subgraph_1/graph_hash.txt | 2 +- paddle_samples/PaddleX/PP-OCRv4_mobile_rec/graph_hash.txt | 2 +- paddle_samples/PaddleX/PP-OCRv4_mobile_seal_det/graph_hash.txt | 2 +- .../PaddleX/PP-OCRv4_server_det/subgraph_0/graph_hash.txt | 2 +- .../PaddleX/PP-OCRv4_server_det/subgraph_1/graph_hash.txt | 2 +- .../PaddleX/PP-OCRv4_server_rec/subgraph_0/graph_hash.txt | 2 +- .../PaddleX/PP-OCRv4_server_rec/subgraph_1/graph_hash.txt | 2 +- .../PaddleX/PP-OCRv4_server_rec/subgraph_2/graph_hash.txt | 2 +- .../PaddleX/PP-OCRv4_server_rec/subgraph_3/graph_hash.txt | 2 +- .../PaddleX/PP-OCRv4_server_rec/subgraph_4/graph_hash.txt | 2 +- .../PaddleX/PP-OCRv4_server_rec/subgraph_5/graph_hash.txt | 2 +- paddle_samples/PaddleX/PP-OCRv4_server_seal_det/graph_hash.txt | 2 +- paddle_samples/PaddleX/PP-ShiTuV2_rec/subgraph_0/graph_hash.txt | 2 +- paddle_samples/PaddleX/PP-ShiTuV2_rec/subgraph_1/graph_hash.txt | 2 +- .../PP-ShiTuV2_rec_CLIP_vit_base/subgraph_0/graph_hash.txt | 2 +- .../PP-ShiTuV2_rec_CLIP_vit_base/subgraph_1/graph_hash.txt | 2 +- .../PP-ShiTuV2_rec_CLIP_vit_base/subgraph_2/graph_hash.txt | 2 +- .../PP-ShiTuV2_rec_CLIP_vit_large/subgraph_0/graph_hash.txt | 2 +- .../PP-ShiTuV2_rec_CLIP_vit_large/subgraph_1/graph_hash.txt | 2 +- .../PP-ShiTuV2_rec_CLIP_vit_large/subgraph_2/graph_hash.txt | 2 +- .../PaddleX/PP-YOLOE-L_human/subgraph_0/graph_hash.txt | 2 +- .../PaddleX/PP-YOLOE-L_human/subgraph_1/graph_hash.txt | 2 +- .../PaddleX/PP-YOLOE-L_human/subgraph_10/graph_hash.txt | 2 +- .../PaddleX/PP-YOLOE-L_human/subgraph_11/graph_hash.txt | 2 +- .../PaddleX/PP-YOLOE-L_human/subgraph_12/graph_hash.txt | 2 +- .../PaddleX/PP-YOLOE-L_human/subgraph_13/graph_hash.txt | 2 +- .../PaddleX/PP-YOLOE-L_human/subgraph_14/graph_hash.txt | 2 +- .../PaddleX/PP-YOLOE-L_human/subgraph_15/graph_hash.txt | 2 +- .../PaddleX/PP-YOLOE-L_human/subgraph_16/graph_hash.txt | 2 +- .../PaddleX/PP-YOLOE-L_human/subgraph_2/graph_hash.txt | 2 +- .../PaddleX/PP-YOLOE-L_human/subgraph_3/graph_hash.txt | 2 +- .../PaddleX/PP-YOLOE-L_human/subgraph_4/graph_hash.txt | 2 +- .../PaddleX/PP-YOLOE-L_human/subgraph_5/graph_hash.txt | 2 +- .../PaddleX/PP-YOLOE-L_human/subgraph_6/graph_hash.txt | 2 +- .../PaddleX/PP-YOLOE-L_human/subgraph_7/graph_hash.txt | 2 +- .../PaddleX/PP-YOLOE-L_human/subgraph_8/graph_hash.txt | 2 +- .../PaddleX/PP-YOLOE-L_human/subgraph_9/graph_hash.txt | 2 +- .../PaddleX/PP-YOLOE-L_vehicle/subgraph_0/graph_hash.txt | 2 +- .../PaddleX/PP-YOLOE-L_vehicle/subgraph_11/graph_hash.txt | 2 +- .../PaddleX/PP-YOLOE-L_vehicle/subgraph_13/graph_hash.txt | 2 +- .../PaddleX/PP-YOLOE-L_vehicle/subgraph_14/graph_hash.txt | 2 +- .../PaddleX/PP-YOLOE-L_vehicle/subgraph_15/graph_hash.txt | 2 +- .../PaddleX/PP-YOLOE-L_vehicle/subgraph_16/graph_hash.txt | 2 +- .../PaddleX/PP-YOLOE-L_vehicle/subgraph_17/graph_hash.txt | 2 +- .../PaddleX/PP-YOLOE-L_vehicle/subgraph_2/graph_hash.txt | 2 +- .../PaddleX/PP-YOLOE-L_vehicle/subgraph_3/graph_hash.txt | 2 +- .../PaddleX/PP-YOLOE-L_vehicle/subgraph_5/graph_hash.txt | 2 +- .../PaddleX/PP-YOLOE-L_vehicle/subgraph_7/graph_hash.txt | 2 +- .../PaddleX/PP-YOLOE-L_vehicle/subgraph_8/graph_hash.txt | 2 +- paddle_samples/PaddleX/PP-YOLOE-R-L/subgraph_0/graph_hash.txt | 2 +- paddle_samples/PaddleX/PP-YOLOE-R-L/subgraph_1/graph_hash.txt | 2 +- paddle_samples/PaddleX/PP-YOLOE-R-L/subgraph_10/graph_hash.txt | 2 +- paddle_samples/PaddleX/PP-YOLOE-R-L/subgraph_11/graph_hash.txt | 2 +- paddle_samples/PaddleX/PP-YOLOE-R-L/subgraph_12/graph_hash.txt | 2 +- paddle_samples/PaddleX/PP-YOLOE-R-L/subgraph_13/graph_hash.txt | 2 +- paddle_samples/PaddleX/PP-YOLOE-R-L/subgraph_14/graph_hash.txt | 2 +- paddle_samples/PaddleX/PP-YOLOE-R-L/subgraph_15/graph_hash.txt | 2 +- paddle_samples/PaddleX/PP-YOLOE-R-L/subgraph_2/graph_hash.txt | 2 +- paddle_samples/PaddleX/PP-YOLOE-R-L/subgraph_3/graph_hash.txt | 2 +- paddle_samples/PaddleX/PP-YOLOE-R-L/subgraph_4/graph_hash.txt | 2 +- paddle_samples/PaddleX/PP-YOLOE-R-L/subgraph_5/graph_hash.txt | 2 +- paddle_samples/PaddleX/PP-YOLOE-R-L/subgraph_6/graph_hash.txt | 2 +- paddle_samples/PaddleX/PP-YOLOE-R-L/subgraph_7/graph_hash.txt | 2 +- paddle_samples/PaddleX/PP-YOLOE-R-L/subgraph_8/graph_hash.txt | 2 +- paddle_samples/PaddleX/PP-YOLOE-R-L/subgraph_9/graph_hash.txt | 2 +- .../PaddleX/PP-YOLOE-S_human/subgraph_0/graph_hash.txt | 2 +- .../PaddleX/PP-YOLOE-S_human/subgraph_1/graph_hash.txt | 2 +- .../PaddleX/PP-YOLOE-S_human/subgraph_10/graph_hash.txt | 2 +- .../PaddleX/PP-YOLOE-S_human/subgraph_11/graph_hash.txt | 2 +- .../PaddleX/PP-YOLOE-S_human/subgraph_12/graph_hash.txt | 2 +- .../PaddleX/PP-YOLOE-S_human/subgraph_13/graph_hash.txt | 2 +- .../PaddleX/PP-YOLOE-S_human/subgraph_14/graph_hash.txt | 2 +- .../PaddleX/PP-YOLOE-S_human/subgraph_15/graph_hash.txt | 2 +- .../PaddleX/PP-YOLOE-S_human/subgraph_16/graph_hash.txt | 2 +- .../PaddleX/PP-YOLOE-S_human/subgraph_2/graph_hash.txt | 2 +- .../PaddleX/PP-YOLOE-S_human/subgraph_3/graph_hash.txt | 2 +- .../PaddleX/PP-YOLOE-S_human/subgraph_4/graph_hash.txt | 2 +- .../PaddleX/PP-YOLOE-S_human/subgraph_5/graph_hash.txt | 2 +- .../PaddleX/PP-YOLOE-S_human/subgraph_6/graph_hash.txt | 2 +- .../PaddleX/PP-YOLOE-S_human/subgraph_7/graph_hash.txt | 2 +- .../PaddleX/PP-YOLOE-S_human/subgraph_8/graph_hash.txt | 2 +- .../PaddleX/PP-YOLOE-S_human/subgraph_9/graph_hash.txt | 2 +- .../PaddleX/PP-YOLOE-S_vehicle/subgraph_0/graph_hash.txt | 2 +- .../PaddleX/PP-YOLOE-S_vehicle/subgraph_1/graph_hash.txt | 2 +- .../PaddleX/PP-YOLOE-S_vehicle/subgraph_10/graph_hash.txt | 2 +- .../PaddleX/PP-YOLOE-S_vehicle/subgraph_11/graph_hash.txt | 2 +- .../PaddleX/PP-YOLOE-S_vehicle/subgraph_12/graph_hash.txt | 2 +- .../PaddleX/PP-YOLOE-S_vehicle/subgraph_13/graph_hash.txt | 2 +- .../PaddleX/PP-YOLOE-S_vehicle/subgraph_14/graph_hash.txt | 2 +- .../PaddleX/PP-YOLOE-S_vehicle/subgraph_15/graph_hash.txt | 2 +- .../PaddleX/PP-YOLOE-S_vehicle/subgraph_16/graph_hash.txt | 2 +- .../PaddleX/PP-YOLOE-S_vehicle/subgraph_2/graph_hash.txt | 2 +- .../PaddleX/PP-YOLOE-S_vehicle/subgraph_3/graph_hash.txt | 2 +- .../PaddleX/PP-YOLOE-S_vehicle/subgraph_4/graph_hash.txt | 2 +- .../PaddleX/PP-YOLOE-S_vehicle/subgraph_5/graph_hash.txt | 2 +- .../PaddleX/PP-YOLOE-S_vehicle/subgraph_6/graph_hash.txt | 2 +- .../PaddleX/PP-YOLOE-S_vehicle/subgraph_7/graph_hash.txt | 2 +- .../PaddleX/PP-YOLOE-S_vehicle/subgraph_8/graph_hash.txt | 2 +- .../PaddleX/PP-YOLOE-S_vehicle/subgraph_9/graph_hash.txt | 2 +- .../PaddleX/PP-YOLOE_plus-L/subgraph_0/graph_hash.txt | 2 +- .../PaddleX/PP-YOLOE_plus-L/subgraph_1/graph_hash.txt | 2 +- .../PaddleX/PP-YOLOE_plus-L/subgraph_10/graph_hash.txt | 2 +- .../PaddleX/PP-YOLOE_plus-L/subgraph_12/graph_hash.txt | 2 +- .../PaddleX/PP-YOLOE_plus-L/subgraph_13/graph_hash.txt | 2 +- .../PaddleX/PP-YOLOE_plus-L/subgraph_15/graph_hash.txt | 2 +- .../PaddleX/PP-YOLOE_plus-L/subgraph_16/graph_hash.txt | 2 +- .../PaddleX/PP-YOLOE_plus-L/subgraph_2/graph_hash.txt | 2 +- .../PaddleX/PP-YOLOE_plus-L/subgraph_3/graph_hash.txt | 2 +- .../PaddleX/PP-YOLOE_plus-L/subgraph_5/graph_hash.txt | 2 +- .../PaddleX/PP-YOLOE_plus-L/subgraph_6/graph_hash.txt | 2 +- .../PaddleX/PP-YOLOE_plus-L/subgraph_7/graph_hash.txt | 2 +- .../PaddleX/PP-YOLOE_plus-L/subgraph_8/graph_hash.txt | 2 +- .../PaddleX/PP-YOLOE_plus-L/subgraph_9/graph_hash.txt | 2 +- .../PaddleX/PP-YOLOE_plus-M/subgraph_0/graph_hash.txt | 2 +- .../PaddleX/PP-YOLOE_plus-M/subgraph_1/graph_hash.txt | 2 +- .../PaddleX/PP-YOLOE_plus-M/subgraph_10/graph_hash.txt | 2 +- .../PaddleX/PP-YOLOE_plus-M/subgraph_11/graph_hash.txt | 2 +- .../PaddleX/PP-YOLOE_plus-M/subgraph_13/graph_hash.txt | 2 +- .../PaddleX/PP-YOLOE_plus-M/subgraph_14/graph_hash.txt | 2 +- .../PaddleX/PP-YOLOE_plus-M/subgraph_15/graph_hash.txt | 2 +- .../PaddleX/PP-YOLOE_plus-M/subgraph_16/graph_hash.txt | 2 +- .../PaddleX/PP-YOLOE_plus-M/subgraph_17/graph_hash.txt | 2 +- .../PaddleX/PP-YOLOE_plus-M/subgraph_2/graph_hash.txt | 2 +- .../PaddleX/PP-YOLOE_plus-M/subgraph_3/graph_hash.txt | 2 +- .../PaddleX/PP-YOLOE_plus-M/subgraph_4/graph_hash.txt | 2 +- .../PaddleX/PP-YOLOE_plus-M/subgraph_6/graph_hash.txt | 2 +- .../PaddleX/PP-YOLOE_plus-M/subgraph_7/graph_hash.txt | 2 +- .../PaddleX/PP-YOLOE_plus-M/subgraph_9/graph_hash.txt | 2 +- .../PaddleX/PP-YOLOE_plus-S/subgraph_0/graph_hash.txt | 2 +- .../PaddleX/PP-YOLOE_plus-S/subgraph_1/graph_hash.txt | 2 +- .../PaddleX/PP-YOLOE_plus-S/subgraph_10/graph_hash.txt | 2 +- .../PaddleX/PP-YOLOE_plus-S/subgraph_11/graph_hash.txt | 2 +- .../PaddleX/PP-YOLOE_plus-S/subgraph_12/graph_hash.txt | 2 +- .../PaddleX/PP-YOLOE_plus-S/subgraph_13/graph_hash.txt | 2 +- .../PaddleX/PP-YOLOE_plus-S/subgraph_14/graph_hash.txt | 2 +- .../PaddleX/PP-YOLOE_plus-S/subgraph_15/graph_hash.txt | 2 +- .../PaddleX/PP-YOLOE_plus-S/subgraph_16/graph_hash.txt | 2 +- .../PaddleX/PP-YOLOE_plus-S/subgraph_17/graph_hash.txt | 2 +- .../PaddleX/PP-YOLOE_plus-S/subgraph_18/graph_hash.txt | 2 +- .../PaddleX/PP-YOLOE_plus-S/subgraph_2/graph_hash.txt | 2 +- .../PaddleX/PP-YOLOE_plus-S/subgraph_3/graph_hash.txt | 2 +- .../PaddleX/PP-YOLOE_plus-S/subgraph_4/graph_hash.txt | 2 +- .../PaddleX/PP-YOLOE_plus-S/subgraph_5/graph_hash.txt | 2 +- .../PaddleX/PP-YOLOE_plus-S/subgraph_6/graph_hash.txt | 2 +- .../PaddleX/PP-YOLOE_plus-S/subgraph_7/graph_hash.txt | 2 +- .../PaddleX/PP-YOLOE_plus-S/subgraph_8/graph_hash.txt | 2 +- .../PaddleX/PP-YOLOE_plus-S/subgraph_9/graph_hash.txt | 2 +- .../PaddleX/PP-YOLOE_plus_SOD-L/subgraph_0/graph_hash.txt | 2 +- .../PaddleX/PP-YOLOE_plus_SOD-L/subgraph_1/graph_hash.txt | 2 +- .../PaddleX/PP-YOLOE_plus_SOD-L/subgraph_10/graph_hash.txt | 2 +- .../PaddleX/PP-YOLOE_plus_SOD-L/subgraph_11/graph_hash.txt | 2 +- .../PaddleX/PP-YOLOE_plus_SOD-L/subgraph_12/graph_hash.txt | 2 +- .../PaddleX/PP-YOLOE_plus_SOD-L/subgraph_13/graph_hash.txt | 2 +- .../PaddleX/PP-YOLOE_plus_SOD-L/subgraph_14/graph_hash.txt | 2 +- .../PaddleX/PP-YOLOE_plus_SOD-L/subgraph_18/graph_hash.txt | 2 +- .../PaddleX/PP-YOLOE_plus_SOD-L/subgraph_2/graph_hash.txt | 2 +- .../PaddleX/PP-YOLOE_plus_SOD-L/subgraph_3/graph_hash.txt | 2 +- .../PaddleX/PP-YOLOE_plus_SOD-L/subgraph_4/graph_hash.txt | 2 +- .../PaddleX/PP-YOLOE_plus_SOD-L/subgraph_5/graph_hash.txt | 2 +- .../PaddleX/PP-YOLOE_plus_SOD-L/subgraph_6/graph_hash.txt | 2 +- .../PaddleX/PP-YOLOE_plus_SOD-L/subgraph_8/graph_hash.txt | 2 +- .../PaddleX/PP-YOLOE_plus_SOD-S/subgraph_0/graph_hash.txt | 2 +- .../PaddleX/PP-YOLOE_plus_SOD-S/subgraph_1/graph_hash.txt | 2 +- .../PaddleX/PP-YOLOE_plus_SOD-S/subgraph_10/graph_hash.txt | 2 +- .../PaddleX/PP-YOLOE_plus_SOD-S/subgraph_11/graph_hash.txt | 2 +- .../PaddleX/PP-YOLOE_plus_SOD-S/subgraph_12/graph_hash.txt | 2 +- .../PaddleX/PP-YOLOE_plus_SOD-S/subgraph_13/graph_hash.txt | 2 +- .../PaddleX/PP-YOLOE_plus_SOD-S/subgraph_14/graph_hash.txt | 2 +- .../PaddleX/PP-YOLOE_plus_SOD-S/subgraph_15/graph_hash.txt | 2 +- .../PaddleX/PP-YOLOE_plus_SOD-S/subgraph_16/graph_hash.txt | 2 +- .../PaddleX/PP-YOLOE_plus_SOD-S/subgraph_17/graph_hash.txt | 2 +- .../PaddleX/PP-YOLOE_plus_SOD-S/subgraph_18/graph_hash.txt | 2 +- .../PaddleX/PP-YOLOE_plus_SOD-S/subgraph_2/graph_hash.txt | 2 +- .../PaddleX/PP-YOLOE_plus_SOD-S/subgraph_3/graph_hash.txt | 2 +- .../PaddleX/PP-YOLOE_plus_SOD-S/subgraph_4/graph_hash.txt | 2 +- .../PaddleX/PP-YOLOE_plus_SOD-S/subgraph_5/graph_hash.txt | 2 +- .../PaddleX/PP-YOLOE_plus_SOD-S/subgraph_6/graph_hash.txt | 2 +- .../PaddleX/PP-YOLOE_plus_SOD-S/subgraph_7/graph_hash.txt | 2 +- .../PaddleX/PP-YOLOE_plus_SOD-S/subgraph_8/graph_hash.txt | 2 +- .../PaddleX/PP-YOLOE_plus_SOD-S/subgraph_9/graph_hash.txt | 2 +- .../PP-YOLOE_plus_SOD-largesize-L/subgraph_0/graph_hash.txt | 2 +- .../PP-YOLOE_plus_SOD-largesize-L/subgraph_1/graph_hash.txt | 2 +- .../PP-YOLOE_plus_SOD-largesize-L/subgraph_10/graph_hash.txt | 2 +- .../PP-YOLOE_plus_SOD-largesize-L/subgraph_11/graph_hash.txt | 2 +- .../PP-YOLOE_plus_SOD-largesize-L/subgraph_12/graph_hash.txt | 2 +- .../PP-YOLOE_plus_SOD-largesize-L/subgraph_13/graph_hash.txt | 2 +- .../PP-YOLOE_plus_SOD-largesize-L/subgraph_14/graph_hash.txt | 2 +- .../PP-YOLOE_plus_SOD-largesize-L/subgraph_2/graph_hash.txt | 2 +- .../PP-YOLOE_plus_SOD-largesize-L/subgraph_3/graph_hash.txt | 2 +- .../PP-YOLOE_plus_SOD-largesize-L/subgraph_4/graph_hash.txt | 2 +- .../PP-YOLOE_plus_SOD-largesize-L/subgraph_5/graph_hash.txt | 2 +- .../PP-YOLOE_plus_SOD-largesize-L/subgraph_6/graph_hash.txt | 2 +- .../PP-YOLOE_plus_SOD-largesize-L/subgraph_7/graph_hash.txt | 2 +- .../PP-YOLOE_plus_SOD-largesize-L/subgraph_8/graph_hash.txt | 2 +- .../PP-YOLOE_plus_SOD-largesize-L/subgraph_9/graph_hash.txt | 2 +- paddle_samples/PaddleX/PatchTST/subgraph_0/graph_hash.txt | 2 +- paddle_samples/PaddleX/PatchTST/subgraph_1/graph_hash.txt | 2 +- paddle_samples/PaddleX/PatchTST/subgraph_2/graph_hash.txt | 2 +- paddle_samples/PaddleX/PatchTST_ad/subgraph_0/graph_hash.txt | 2 +- paddle_samples/PaddleX/PatchTST_ad/subgraph_1/graph_hash.txt | 2 +- paddle_samples/PaddleX/PatchTST_ad/subgraph_2/graph_hash.txt | 2 +- paddle_samples/PaddleX/RLinear/graph_hash.txt | 2 +- paddle_samples/PaddleX/ResNet101/graph_hash.txt | 2 +- paddle_samples/PaddleX/ResNet101_vd/graph_hash.txt | 2 +- paddle_samples/PaddleX/ResNet152/graph_hash.txt | 2 +- paddle_samples/PaddleX/ResNet152_vd/graph_hash.txt | 2 +- paddle_samples/PaddleX/ResNet18/graph_hash.txt | 2 +- paddle_samples/PaddleX/ResNet18_vd/graph_hash.txt | 2 +- paddle_samples/PaddleX/ResNet200_vd/graph_hash.txt | 2 +- paddle_samples/PaddleX/ResNet34/graph_hash.txt | 2 +- paddle_samples/PaddleX/ResNet34_vd/graph_hash.txt | 2 +- paddle_samples/PaddleX/ResNet50/graph_hash.txt | 2 +- paddle_samples/PaddleX/ResNet50_ML/subgraph_0/graph_hash.txt | 2 +- paddle_samples/PaddleX/ResNet50_ML/subgraph_1/graph_hash.txt | 2 +- paddle_samples/PaddleX/ResNet50_face/subgraph_0/graph_hash.txt | 2 +- paddle_samples/PaddleX/ResNet50_face/subgraph_1/graph_hash.txt | 2 +- paddle_samples/PaddleX/ResNet50_face/subgraph_2/graph_hash.txt | 2 +- paddle_samples/PaddleX/ResNet50_vd/graph_hash.txt | 2 +- paddle_samples/PaddleX/SLANet/subgraph_0/graph_hash.txt | 2 +- paddle_samples/PaddleX/SLANet/subgraph_1/graph_hash.txt | 2 +- paddle_samples/PaddleX/SLANet/subgraph_2/graph_hash.txt | 2 +- paddle_samples/PaddleX/SOLOv2/subgraph_0/graph_hash.txt | 2 +- paddle_samples/PaddleX/SOLOv2/subgraph_1/graph_hash.txt | 2 +- paddle_samples/PaddleX/SOLOv2/subgraph_2/graph_hash.txt | 2 +- paddle_samples/PaddleX/SOLOv2/subgraph_3/graph_hash.txt | 2 +- paddle_samples/PaddleX/SOLOv2/subgraph_4/graph_hash.txt | 2 +- paddle_samples/PaddleX/SOLOv2/subgraph_5/graph_hash.txt | 2 +- paddle_samples/PaddleX/SOLOv2/subgraph_6/graph_hash.txt | 2 +- paddle_samples/PaddleX/SOLOv2/subgraph_7/graph_hash.txt | 2 +- paddle_samples/PaddleX/SOLOv2/subgraph_8/graph_hash.txt | 2 +- paddle_samples/PaddleX/SOLOv2/subgraph_9/graph_hash.txt | 2 +- paddle_samples/PaddleX/SeaFormer_base/subgraph_0/graph_hash.txt | 2 +- paddle_samples/PaddleX/SeaFormer_base/subgraph_1/graph_hash.txt | 2 +- paddle_samples/PaddleX/SeaFormer_base/subgraph_2/graph_hash.txt | 2 +- .../PaddleX/SeaFormer_large/subgraph_0/graph_hash.txt | 2 +- .../PaddleX/SeaFormer_large/subgraph_1/graph_hash.txt | 2 +- .../PaddleX/SeaFormer_small/subgraph_0/graph_hash.txt | 2 +- .../PaddleX/SeaFormer_small/subgraph_1/graph_hash.txt | 2 +- paddle_samples/PaddleX/SeaFormer_tiny/subgraph_0/graph_hash.txt | 2 +- paddle_samples/PaddleX/SeaFormer_tiny/subgraph_1/graph_hash.txt | 2 +- paddle_samples/PaddleX/SegFormer-B0/subgraph_0/graph_hash.txt | 2 +- paddle_samples/PaddleX/SegFormer-B0/subgraph_1/graph_hash.txt | 2 +- paddle_samples/PaddleX/SegFormer-B1/subgraph_0/graph_hash.txt | 2 +- paddle_samples/PaddleX/SegFormer-B1/subgraph_1/graph_hash.txt | 2 +- paddle_samples/PaddleX/SegFormer-B2/subgraph_0/graph_hash.txt | 2 +- paddle_samples/PaddleX/SegFormer-B2/subgraph_1/graph_hash.txt | 2 +- paddle_samples/PaddleX/SegFormer-B3/subgraph_0/graph_hash.txt | 2 +- paddle_samples/PaddleX/SegFormer-B3/subgraph_1/graph_hash.txt | 2 +- paddle_samples/PaddleX/SegFormer-B4/subgraph_0/graph_hash.txt | 2 +- paddle_samples/PaddleX/SegFormer-B4/subgraph_1/graph_hash.txt | 2 +- paddle_samples/PaddleX/SegFormer-B5/subgraph_0/graph_hash.txt | 2 +- paddle_samples/PaddleX/SegFormer-B5/subgraph_1/graph_hash.txt | 2 +- paddle_samples/PaddleX/StarNet-S1/graph_hash.txt | 2 +- paddle_samples/PaddleX/StarNet-S2/subgraph_0/graph_hash.txt | 2 +- paddle_samples/PaddleX/StarNet-S2/subgraph_1/graph_hash.txt | 2 +- paddle_samples/PaddleX/StarNet-S2/subgraph_2/graph_hash.txt | 2 +- paddle_samples/PaddleX/StarNet-S3/subgraph_0/graph_hash.txt | 2 +- paddle_samples/PaddleX/StarNet-S3/subgraph_1/graph_hash.txt | 2 +- paddle_samples/PaddleX/StarNet-S3/subgraph_2/graph_hash.txt | 2 +- paddle_samples/PaddleX/StarNet-S4/subgraph_0/graph_hash.txt | 2 +- paddle_samples/PaddleX/StarNet-S4/subgraph_1/graph_hash.txt | 2 +- paddle_samples/PaddleX/StarNet-S4/subgraph_2/graph_hash.txt | 2 +- .../subgraph_0/graph_hash.txt | 2 +- .../subgraph_1/graph_hash.txt | 2 +- .../subgraph_0/graph_hash.txt | 2 +- .../subgraph_1/graph_hash.txt | 2 +- .../subgraph_2/graph_hash.txt | 2 +- .../subgraph_0/graph_hash.txt | 2 +- .../subgraph_1/graph_hash.txt | 2 +- .../subgraph_0/graph_hash.txt | 2 +- .../subgraph_1/graph_hash.txt | 2 +- .../subgraph_2/graph_hash.txt | 2 +- .../subgraph_0/graph_hash.txt | 2 +- .../subgraph_1/graph_hash.txt | 2 +- .../subgraph_2/graph_hash.txt | 2 +- .../subgraph_0/graph_hash.txt | 2 +- .../subgraph_1/graph_hash.txt | 2 +- .../subgraph_2/graph_hash.txt | 2 +- paddle_samples/PaddleX/TimesNet/subgraph_0/graph_hash.txt | 2 +- paddle_samples/PaddleX/TimesNet/subgraph_1/graph_hash.txt | 2 +- paddle_samples/PaddleX/TimesNet/subgraph_10/graph_hash.txt | 2 +- paddle_samples/PaddleX/TimesNet/subgraph_11/graph_hash.txt | 2 +- paddle_samples/PaddleX/TimesNet/subgraph_12/graph_hash.txt | 2 +- paddle_samples/PaddleX/TimesNet/subgraph_13/graph_hash.txt | 2 +- paddle_samples/PaddleX/TimesNet/subgraph_2/graph_hash.txt | 2 +- paddle_samples/PaddleX/TimesNet/subgraph_3/graph_hash.txt | 2 +- paddle_samples/PaddleX/TimesNet/subgraph_4/graph_hash.txt | 2 +- paddle_samples/PaddleX/TimesNet/subgraph_5/graph_hash.txt | 2 +- paddle_samples/PaddleX/TimesNet/subgraph_6/graph_hash.txt | 2 +- paddle_samples/PaddleX/TimesNet/subgraph_7/graph_hash.txt | 2 +- paddle_samples/PaddleX/TimesNet/subgraph_8/graph_hash.txt | 2 +- paddle_samples/PaddleX/TimesNet/subgraph_9/graph_hash.txt | 2 +- paddle_samples/PaddleX/TimesNet_ad/subgraph_0/graph_hash.txt | 2 +- paddle_samples/PaddleX/TimesNet_ad/subgraph_1/graph_hash.txt | 2 +- paddle_samples/PaddleX/TimesNet_ad/subgraph_10/graph_hash.txt | 2 +- paddle_samples/PaddleX/TimesNet_ad/subgraph_11/graph_hash.txt | 2 +- paddle_samples/PaddleX/TimesNet_ad/subgraph_12/graph_hash.txt | 2 +- paddle_samples/PaddleX/TimesNet_ad/subgraph_2/graph_hash.txt | 2 +- paddle_samples/PaddleX/TimesNet_ad/subgraph_3/graph_hash.txt | 2 +- paddle_samples/PaddleX/TimesNet_ad/subgraph_4/graph_hash.txt | 2 +- paddle_samples/PaddleX/TimesNet_ad/subgraph_5/graph_hash.txt | 2 +- paddle_samples/PaddleX/TimesNet_ad/subgraph_6/graph_hash.txt | 2 +- paddle_samples/PaddleX/TimesNet_ad/subgraph_7/graph_hash.txt | 2 +- paddle_samples/PaddleX/TimesNet_ad/subgraph_8/graph_hash.txt | 2 +- paddle_samples/PaddleX/TimesNet_ad/subgraph_9/graph_hash.txt | 2 +- paddle_samples/PaddleX/TimesNet_cls/subgraph_0/graph_hash.txt | 2 +- paddle_samples/PaddleX/TimesNet_cls/subgraph_1/graph_hash.txt | 2 +- paddle_samples/PaddleX/TimesNet_cls/subgraph_2/graph_hash.txt | 2 +- paddle_samples/PaddleX/TimesNet_cls/subgraph_3/graph_hash.txt | 2 +- paddle_samples/PaddleX/TimesNet_cls/subgraph_4/graph_hash.txt | 2 +- paddle_samples/PaddleX/TimesNet_cls/subgraph_5/graph_hash.txt | 2 +- paddle_samples/PaddleX/TimesNet_cls/subgraph_6/graph_hash.txt | 2 +- paddle_samples/PaddleX/TimesNet_cls/subgraph_8/graph_hash.txt | 2 +- paddle_samples/PaddleX/TimesNet_cls/subgraph_9/graph_hash.txt | 2 +- .../PaddleX/YOLOv3-DarkNet53/subgraph_0/graph_hash.txt | 2 +- .../PaddleX/YOLOv3-DarkNet53/subgraph_1/graph_hash.txt | 2 +- .../PaddleX/YOLOv3-DarkNet53/subgraph_2/graph_hash.txt | 2 +- .../PaddleX/YOLOv3-DarkNet53/subgraph_3/graph_hash.txt | 2 +- .../PaddleX/YOLOv3-MobileNetV3/subgraph_0/graph_hash.txt | 2 +- .../PaddleX/YOLOv3-MobileNetV3/subgraph_1/graph_hash.txt | 2 +- .../PaddleX/YOLOv3-MobileNetV3/subgraph_2/graph_hash.txt | 2 +- .../PaddleX/YOLOv3-ResNet50_vd_DCN/subgraph_0/graph_hash.txt | 2 +- .../PaddleX/YOLOv3-ResNet50_vd_DCN/subgraph_1/graph_hash.txt | 2 +- .../PaddleX/YOLOv3-ResNet50_vd_DCN/subgraph_2/graph_hash.txt | 2 +- paddle_samples/PaddleX/ch_RepSVTR_rec/subgraph_0/graph_hash.txt | 2 +- paddle_samples/PaddleX/ch_RepSVTR_rec/subgraph_1/graph_hash.txt | 2 +- paddle_samples/PaddleX/ch_RepSVTR_rec/subgraph_2/graph_hash.txt | 2 +- paddle_samples/PaddleX/ch_RepSVTR_rec/subgraph_3/graph_hash.txt | 2 +- paddle_samples/PaddleX/ch_RepSVTR_rec/subgraph_4/graph_hash.txt | 2 +- paddle_samples/PaddleX/ch_RepSVTR_rec/subgraph_5/graph_hash.txt | 2 +- paddle_samples/PaddleX/ch_SVTRv2_rec/subgraph_0/graph_hash.txt | 2 +- paddle_samples/PaddleX/ch_SVTRv2_rec/subgraph_1/graph_hash.txt | 2 +- paddle_samples/PaddleX/ch_SVTRv2_rec/subgraph_10/graph_hash.txt | 2 +- paddle_samples/PaddleX/ch_SVTRv2_rec/subgraph_11/graph_hash.txt | 2 +- paddle_samples/PaddleX/ch_SVTRv2_rec/subgraph_12/graph_hash.txt | 2 +- paddle_samples/PaddleX/ch_SVTRv2_rec/subgraph_13/graph_hash.txt | 2 +- paddle_samples/PaddleX/ch_SVTRv2_rec/subgraph_14/graph_hash.txt | 2 +- paddle_samples/PaddleX/ch_SVTRv2_rec/subgraph_15/graph_hash.txt | 2 +- paddle_samples/PaddleX/ch_SVTRv2_rec/subgraph_2/graph_hash.txt | 2 +- paddle_samples/PaddleX/ch_SVTRv2_rec/subgraph_3/graph_hash.txt | 2 +- paddle_samples/PaddleX/ch_SVTRv2_rec/subgraph_4/graph_hash.txt | 2 +- paddle_samples/PaddleX/ch_SVTRv2_rec/subgraph_5/graph_hash.txt | 2 +- paddle_samples/PaddleX/ch_SVTRv2_rec/subgraph_6/graph_hash.txt | 2 +- paddle_samples/PaddleX/ch_SVTRv2_rec/subgraph_7/graph_hash.txt | 2 +- paddle_samples/PaddleX/ch_SVTRv2_rec/subgraph_8/graph_hash.txt | 2 +- paddle_samples/PaddleX/ch_SVTRv2_rec/subgraph_9/graph_hash.txt | 2 +- tools/ci/check_validate.sh | 1 - 634 files changed, 633 insertions(+), 634 deletions(-) diff --git a/paddle_samples/PaddleNLP/bert-base-cased/graph_hash.txt b/paddle_samples/PaddleNLP/bert-base-cased/graph_hash.txt index 628d62c12..0c116bc18 100644 --- a/paddle_samples/PaddleNLP/bert-base-cased/graph_hash.txt +++ b/paddle_samples/PaddleNLP/bert-base-cased/graph_hash.txt @@ -1 +1 @@ -f2b5a332b1b19703e7ccfb450de96c9c12244144c7b9d305d20587f772fb6672 \ No newline at end of file +517608d4d2699e09c6171648da38a4f924556cf25abd97875599acfdda5807e4 \ No newline at end of file diff --git a/paddle_samples/PaddleNLP/bert-large-cased/graph_hash.txt b/paddle_samples/PaddleNLP/bert-large-cased/graph_hash.txt index 824afda4d..fc6adfa90 100644 --- a/paddle_samples/PaddleNLP/bert-large-cased/graph_hash.txt +++ b/paddle_samples/PaddleNLP/bert-large-cased/graph_hash.txt @@ -1 +1 @@ -02fa10efca360c8ba7818c367cdeb9979e2af8c72cf489913396a1f241bbad07 \ No newline at end of file +2a46a550da3ca0bd5aa6157a26aff525a3bc69ff8f67fe35b4424303c12e2820 \ No newline at end of file diff --git a/paddle_samples/PaddleNLP/convbert-base/graph_hash.txt b/paddle_samples/PaddleNLP/convbert-base/graph_hash.txt index 3bc4a3340..ad6ce0385 100644 --- a/paddle_samples/PaddleNLP/convbert-base/graph_hash.txt +++ b/paddle_samples/PaddleNLP/convbert-base/graph_hash.txt @@ -1 +1 @@ -482fd9e9f201b45c2ce0b22b3037878aa3d139cc203fb35c781fd470140ec962 \ No newline at end of file +e13d4b5e10e7aadcf05e891979bb73813fb3c4c1407b2688fb6ac8f849cdcee0 \ No newline at end of file diff --git a/paddle_samples/PaddleNLP/convbert-medium-small/graph_hash.txt b/paddle_samples/PaddleNLP/convbert-medium-small/graph_hash.txt index 3b1bfa97b..f28c06d6b 100644 --- a/paddle_samples/PaddleNLP/convbert-medium-small/graph_hash.txt +++ b/paddle_samples/PaddleNLP/convbert-medium-small/graph_hash.txt @@ -1 +1 @@ -e013c0a1d9173f7db5ed91398ad65fa43154e3bc8ce2e15c2d5a6637ddec61d8 \ No newline at end of file +2511edee7164b3327d5efcce7879c5a19a19aec8a86e74e233ae83db0807ed46 \ No newline at end of file diff --git a/paddle_samples/PaddleNLP/convbert-small/graph_hash.txt b/paddle_samples/PaddleNLP/convbert-small/graph_hash.txt index 31d5efd44..fa7c5d89e 100644 --- a/paddle_samples/PaddleNLP/convbert-small/graph_hash.txt +++ b/paddle_samples/PaddleNLP/convbert-small/graph_hash.txt @@ -1 +1 @@ -55b1fcce22aee360f71154396a1f528446cae70ebd991927c0abf6c06016d201 \ No newline at end of file +c1e76a465ae2ac6d1cb568acb5f17db4bca92d6d0239061cd319f2d591ba82b9 \ No newline at end of file diff --git a/paddle_samples/PaddleNLP/ernie-1.0/graph_hash.txt b/paddle_samples/PaddleNLP/ernie-1.0/graph_hash.txt index 6c06c8ddd..b1f3221de 100644 --- a/paddle_samples/PaddleNLP/ernie-1.0/graph_hash.txt +++ b/paddle_samples/PaddleNLP/ernie-1.0/graph_hash.txt @@ -1 +1 @@ -2ea1f7f9bb52a294ff9fb5fd9876b9e9ed8b4af2fdb6cce93985eedfe50c7a94 \ No newline at end of file +b23ce390b79f214cdbd74ea52c32d6dc141d93b179a7bf75f94bb12e8bd91561 \ No newline at end of file diff --git a/paddle_samples/PaddleNLP/ernie-2.0-base-zh/graph_hash.txt b/paddle_samples/PaddleNLP/ernie-2.0-base-zh/graph_hash.txt index 0add43bc7..c2d79682d 100644 --- a/paddle_samples/PaddleNLP/ernie-2.0-base-zh/graph_hash.txt +++ b/paddle_samples/PaddleNLP/ernie-2.0-base-zh/graph_hash.txt @@ -1 +1 @@ -40fde6163a995d989050cf8b78b44132b4b62ce218f604dad67aff1f4f5a56f0 \ No newline at end of file +94a3256e834ecd7e836da57b44da751d75ef9e095b04ac00abc37a5e18a01390 \ No newline at end of file diff --git a/paddle_samples/PaddleNLP/ernie-2.0-large-zh/graph_hash.txt b/paddle_samples/PaddleNLP/ernie-2.0-large-zh/graph_hash.txt index 8990f7296..1d720dfb1 100644 --- a/paddle_samples/PaddleNLP/ernie-2.0-large-zh/graph_hash.txt +++ b/paddle_samples/PaddleNLP/ernie-2.0-large-zh/graph_hash.txt @@ -1 +1 @@ -3a7feb40180804f8f08663997cafa9e489e00e701df32aa996bfb102b8fbd830 \ No newline at end of file +fa8d65ab7f6feea97d152f37822ac8b05f79c08caf95b37ad92f9e2d817e7c72 \ No newline at end of file diff --git a/paddle_samples/PaddleNLP/ernie-3.0-base-zh/graph_hash.txt b/paddle_samples/PaddleNLP/ernie-3.0-base-zh/graph_hash.txt index 247040828..c9a64382c 100644 --- a/paddle_samples/PaddleNLP/ernie-3.0-base-zh/graph_hash.txt +++ b/paddle_samples/PaddleNLP/ernie-3.0-base-zh/graph_hash.txt @@ -1 +1 @@ -dd86f56770fe419ac8c9d7bd8426e60faa05682c72c30cfdfc7d01a1a3da21c4 \ No newline at end of file +548e0e93a810037492943c7eb60592dbd3cdab50b07662af2ccb1ecd723c2e6f \ No newline at end of file diff --git a/paddle_samples/PaddleNLP/ernie-3.0-medium-zh/graph_hash.txt b/paddle_samples/PaddleNLP/ernie-3.0-medium-zh/graph_hash.txt index 482e99776..5621f2978 100644 --- a/paddle_samples/PaddleNLP/ernie-3.0-medium-zh/graph_hash.txt +++ b/paddle_samples/PaddleNLP/ernie-3.0-medium-zh/graph_hash.txt @@ -1 +1 @@ -48344d251c95a3f7f38838cf0ab9657eac5cb033f7b03132c1934adddde24f5d \ No newline at end of file +db93f51be889fecf20bbd7cab1124b61fd36d550f434c0aa7a70b25c651d09ae \ No newline at end of file diff --git a/paddle_samples/PaddleNLP/ernie-3.0-micro-zh/graph_hash.txt b/paddle_samples/PaddleNLP/ernie-3.0-micro-zh/graph_hash.txt index a1cb28748..9bf8cb337 100644 --- a/paddle_samples/PaddleNLP/ernie-3.0-micro-zh/graph_hash.txt +++ b/paddle_samples/PaddleNLP/ernie-3.0-micro-zh/graph_hash.txt @@ -1 +1 @@ -245f9c8ca479848f19fd1a5d3ff83eaf92645a86cb53968ece04fe3191bf511a \ No newline at end of file +1bdf89e29eaab83b48f9def1a32e5de256cee5d80cf0d0eee3d69eb0e5fc918a \ No newline at end of file diff --git a/paddle_samples/PaddleNLP/ernie-3.0-mini-zh/graph_hash.txt b/paddle_samples/PaddleNLP/ernie-3.0-mini-zh/graph_hash.txt index 5db0f43b9..50464e222 100644 --- a/paddle_samples/PaddleNLP/ernie-3.0-mini-zh/graph_hash.txt +++ b/paddle_samples/PaddleNLP/ernie-3.0-mini-zh/graph_hash.txt @@ -1 +1 @@ -58e65c27f593dcea8ddd01589ed498ffa8d2d8640a1e1097cfde261a3fe232c3 \ No newline at end of file +067750b40b2f044a694f9dfb03c8de8b319f8ebee2c3140afe2272bfbbbb6ebf \ No newline at end of file diff --git a/paddle_samples/PaddleNLP/ernie-3.0-nano-zh/graph_hash.txt b/paddle_samples/PaddleNLP/ernie-3.0-nano-zh/graph_hash.txt index fac988831..3a09f453c 100644 --- a/paddle_samples/PaddleNLP/ernie-3.0-nano-zh/graph_hash.txt +++ b/paddle_samples/PaddleNLP/ernie-3.0-nano-zh/graph_hash.txt @@ -1 +1 @@ -076fbe76fccfe420c82cfec8b7b3a439d5950e5e8968a0d66398c4e26807d3aa \ No newline at end of file +f0c00eac569b366a71e66c603643af45b6f86b465e20e20f512c108667022bfe \ No newline at end of file diff --git a/paddle_samples/PaddleNLP/ernie-3.0-tiny-base-v2-zh/graph_hash.txt b/paddle_samples/PaddleNLP/ernie-3.0-tiny-base-v2-zh/graph_hash.txt index 3c3728ff0..c8de02b0d 100644 --- a/paddle_samples/PaddleNLP/ernie-3.0-tiny-base-v2-zh/graph_hash.txt +++ b/paddle_samples/PaddleNLP/ernie-3.0-tiny-base-v2-zh/graph_hash.txt @@ -1 +1 @@ -1d1c6e22f774dfa1dcc245efdf6538aa8d111dc9ba366e91de0ad0b1987f28a4 \ No newline at end of file +bf9dbdb2e01c78466b890df7aa1a7e644a09485ff15a62130b8389ca6f06c9e5 \ No newline at end of file diff --git a/paddle_samples/PaddleNLP/ernie-3.0-tiny-medium-v2-zh/graph_hash.txt b/paddle_samples/PaddleNLP/ernie-3.0-tiny-medium-v2-zh/graph_hash.txt index 1df0474aa..20767e2bf 100644 --- a/paddle_samples/PaddleNLP/ernie-3.0-tiny-medium-v2-zh/graph_hash.txt +++ b/paddle_samples/PaddleNLP/ernie-3.0-tiny-medium-v2-zh/graph_hash.txt @@ -1 +1 @@ -6dda1d769139a2cb61941e6f069bd42f5350ea24d70d8d237dcc3fbe95d28824 \ No newline at end of file +ff08b923d3d63a4486793ba49dbf0cedb18eb9dfc6bdaf11e8ca28c1eef44a73 \ No newline at end of file diff --git a/paddle_samples/PaddleNLP/ernie-3.0-tiny-micro-v2-zh/graph_hash.txt b/paddle_samples/PaddleNLP/ernie-3.0-tiny-micro-v2-zh/graph_hash.txt index 289e42a87..cf1f38c36 100644 --- a/paddle_samples/PaddleNLP/ernie-3.0-tiny-micro-v2-zh/graph_hash.txt +++ b/paddle_samples/PaddleNLP/ernie-3.0-tiny-micro-v2-zh/graph_hash.txt @@ -1 +1 @@ -8c7bd4ff0aadb54fd17dcac3b7fc5c530fab0ac7ef931ee435f82836f7f56e1a \ No newline at end of file +17a60317dee8157e2a84af545cd215213c8eebfc62316aa0dfeae61a4ca690a1 \ No newline at end of file diff --git a/paddle_samples/PaddleNLP/ernie-3.0-tiny-mini-v2-zh/graph_hash.txt b/paddle_samples/PaddleNLP/ernie-3.0-tiny-mini-v2-zh/graph_hash.txt index b5cfadf96..e15a13c8e 100644 --- a/paddle_samples/PaddleNLP/ernie-3.0-tiny-mini-v2-zh/graph_hash.txt +++ b/paddle_samples/PaddleNLP/ernie-3.0-tiny-mini-v2-zh/graph_hash.txt @@ -1 +1 @@ -dc0063011bb320a7f5931fea39b1ac41c89b18bb48c901cbc49ebbd4f4daf247 \ No newline at end of file +bfa73dbc43cad25217ce2a8100caee4d0e41ed306a073080400eaf2d0ea4b008 \ No newline at end of file diff --git a/paddle_samples/PaddleNLP/ernie-3.0-tiny-nano-v2-zh/graph_hash.txt b/paddle_samples/PaddleNLP/ernie-3.0-tiny-nano-v2-zh/graph_hash.txt index 597328193..e0e4ad788 100644 --- a/paddle_samples/PaddleNLP/ernie-3.0-tiny-nano-v2-zh/graph_hash.txt +++ b/paddle_samples/PaddleNLP/ernie-3.0-tiny-nano-v2-zh/graph_hash.txt @@ -1 +1 @@ -57fc54f7e6af8a83fd3fc5d76dfd850fcb09c0031f50a2b12d9fe305fa8ebb27 \ No newline at end of file +9729781fe87ae53d4015874111e49e1758a0210a60e150a54df2a40d2fc6a0f0 \ No newline at end of file diff --git a/paddle_samples/PaddleNLP/ernie-3.0-tiny-pico-v2-zh/graph_hash.txt b/paddle_samples/PaddleNLP/ernie-3.0-tiny-pico-v2-zh/graph_hash.txt index 245519ffa..d376744bb 100644 --- a/paddle_samples/PaddleNLP/ernie-3.0-tiny-pico-v2-zh/graph_hash.txt +++ b/paddle_samples/PaddleNLP/ernie-3.0-tiny-pico-v2-zh/graph_hash.txt @@ -1 +1 @@ -e7e1a39e89be40667a96c4328ee450e998bcdbfd63561a1124ff6bcda7acddfd \ No newline at end of file +a03f5e293be7ce414125e9869059abbfd40ea4afd600fe014c4fbae62c7ba3e0 \ No newline at end of file diff --git a/paddle_samples/PaddleNLP/ernie-3.0-xbase-zh/graph_hash.txt b/paddle_samples/PaddleNLP/ernie-3.0-xbase-zh/graph_hash.txt index f09eef313..2d0445e20 100644 --- a/paddle_samples/PaddleNLP/ernie-3.0-xbase-zh/graph_hash.txt +++ b/paddle_samples/PaddleNLP/ernie-3.0-xbase-zh/graph_hash.txt @@ -1 +1 @@ -fab73a89eb89acf2cc5148bcd7dba425ac19d782b823482d72c7b7a896e6c946 \ No newline at end of file +c6f960fc9a50eeb396e6a394b8994fd1ecd1bcb49fc2ca1c3623edbff2cb0eb3 \ No newline at end of file diff --git a/paddle_samples/PaddleNLP/ernie-m-base/graph_hash.txt b/paddle_samples/PaddleNLP/ernie-m-base/graph_hash.txt index 9cc143863..71d80045c 100644 --- a/paddle_samples/PaddleNLP/ernie-m-base/graph_hash.txt +++ b/paddle_samples/PaddleNLP/ernie-m-base/graph_hash.txt @@ -1 +1 @@ -bc1692f95f97cd0e08a9a8d9d9024df027c12c0a80b8a4980e0916daf7d7a2dc \ No newline at end of file +7d4cc15ba9342580fdc04fdb662b9c0233fb986c7fc57c47be0ce907b493f780 \ No newline at end of file diff --git a/paddle_samples/PaddleNLP/ernie-m-large/graph_hash.txt b/paddle_samples/PaddleNLP/ernie-m-large/graph_hash.txt index af5217ccb..84471eac9 100644 --- a/paddle_samples/PaddleNLP/ernie-m-large/graph_hash.txt +++ b/paddle_samples/PaddleNLP/ernie-m-large/graph_hash.txt @@ -1 +1 @@ -f5a2b5a3ed88293b1643f5ff48d787655038e15be4e19c51cab914e3f75c74dc \ No newline at end of file +6ec2b7a241cf5af899ead45f59561d8c09fc3198aae4733d77f6365d641a6b5c \ No newline at end of file diff --git a/paddle_samples/PaddleNLP/ernie-search-large-cross-encoder-marco-en/graph_hash.txt b/paddle_samples/PaddleNLP/ernie-search-large-cross-encoder-marco-en/graph_hash.txt index 47d436658..6b7168392 100644 --- a/paddle_samples/PaddleNLP/ernie-search-large-cross-encoder-marco-en/graph_hash.txt +++ b/paddle_samples/PaddleNLP/ernie-search-large-cross-encoder-marco-en/graph_hash.txt @@ -1 +1 @@ -a0eb1177220779b3dfce38fa401f5f980dbbc62a121ed740b1e52192a0f9736e \ No newline at end of file +9fd685e22bd085cb63b9491c25d9a42839c343546790add95924e300a1bfc094 \ No newline at end of file diff --git a/paddle_samples/PaddleNLP/ernie-tiny/graph_hash.txt b/paddle_samples/PaddleNLP/ernie-tiny/graph_hash.txt index e1c1925c4..26a639797 100644 --- a/paddle_samples/PaddleNLP/ernie-tiny/graph_hash.txt +++ b/paddle_samples/PaddleNLP/ernie-tiny/graph_hash.txt @@ -1 +1 @@ -6dedb596800536d3d576bd324fe499585d5ea55379abce2325c472fc742e7a74 \ No newline at end of file +1ab1e873d42514b1fca6adc397e45033099ec5966dc03082c2c497203373f824 \ No newline at end of file diff --git a/paddle_samples/PaddleNLP/facebook_llama-7b/graph_hash.txt b/paddle_samples/PaddleNLP/facebook_llama-7b/graph_hash.txt index 5f513c068..7896613f2 100644 --- a/paddle_samples/PaddleNLP/facebook_llama-7b/graph_hash.txt +++ b/paddle_samples/PaddleNLP/facebook_llama-7b/graph_hash.txt @@ -1 +1 @@ -d426eda7b3e49ffe4fc7dab17e51c07ac39c7fd99f06d5d7431a7d1b34b98dbc \ No newline at end of file +82cea3cf667b7a9a4b811cec291e1a6936e8ae07300c8120e4bc35f701533496 \ No newline at end of file diff --git a/paddle_samples/PaddleNLP/gpt2-medium-en/graph_hash.txt b/paddle_samples/PaddleNLP/gpt2-medium-en/graph_hash.txt index 36c9c827a..6f41efc95 100644 --- a/paddle_samples/PaddleNLP/gpt2-medium-en/graph_hash.txt +++ b/paddle_samples/PaddleNLP/gpt2-medium-en/graph_hash.txt @@ -1 +1 @@ -7f1c8957fca37619a68f2a59e3a8967f7dba7c14dbea4ee0df259e0f2318f143 \ No newline at end of file +bfcff021d1b41227d138304334e6d524b69ed22ac380f1e28a2a7101036cef1f \ No newline at end of file diff --git a/paddle_samples/PaddleNLP/nezha-base-chinese/graph_hash.txt b/paddle_samples/PaddleNLP/nezha-base-chinese/graph_hash.txt index 06643cf7f..4f2b6f279 100644 --- a/paddle_samples/PaddleNLP/nezha-base-chinese/graph_hash.txt +++ b/paddle_samples/PaddleNLP/nezha-base-chinese/graph_hash.txt @@ -1 +1 @@ -222830e57f3442cd04d555c7f785735dc98739f4463f0faf4bb57ca27b361d9a \ No newline at end of file +1e5bb77b8297af59033160eddc24a9974dfa33885567a98cc5322fda0160ef5b \ No newline at end of file diff --git a/paddle_samples/PaddleNLP/nezha-large-chinese/graph_hash.txt b/paddle_samples/PaddleNLP/nezha-large-chinese/graph_hash.txt index 85c4c9cd7..c259f002c 100644 --- a/paddle_samples/PaddleNLP/nezha-large-chinese/graph_hash.txt +++ b/paddle_samples/PaddleNLP/nezha-large-chinese/graph_hash.txt @@ -1 +1 @@ -08ac06b39c04a5a0d4eb4d6d5d4783eb99fc5998dac8e993e01bacdb53b465d7 \ No newline at end of file +50a3aaaeb3bcd8ecec274dd705c354b076896fb43737715e9642cec4118cb9a7 \ No newline at end of file diff --git a/paddle_samples/PaddleNLP/ppminilm-6l-768h/graph_hash.txt b/paddle_samples/PaddleNLP/ppminilm-6l-768h/graph_hash.txt index bbef6bff2..eff266d16 100644 --- a/paddle_samples/PaddleNLP/ppminilm-6l-768h/graph_hash.txt +++ b/paddle_samples/PaddleNLP/ppminilm-6l-768h/graph_hash.txt @@ -1 +1 @@ -a0f0737d524f8e4ca4f1e056e556480a1b89c8d7b138c32cd9797d398a04a9ff \ No newline at end of file +91520475abb0fcb88270eac4fd1b785ca926bbd5d6e4fb2364a1539d764620e4 \ No newline at end of file diff --git a/paddle_samples/PaddleNLP/rocketqa-base-cross-encoder/graph_hash.txt b/paddle_samples/PaddleNLP/rocketqa-base-cross-encoder/graph_hash.txt index f23445326..aea163ede 100644 --- a/paddle_samples/PaddleNLP/rocketqa-base-cross-encoder/graph_hash.txt +++ b/paddle_samples/PaddleNLP/rocketqa-base-cross-encoder/graph_hash.txt @@ -1 +1 @@ -62cc3d05adaf6e4219e2b653fec24cce7290406e2f80064a1e914ebc82570775 \ No newline at end of file +c8a7a60f61d78d967326f0907d58699e883998e68c9958aca32b46d1828831d5 \ No newline at end of file diff --git a/paddle_samples/PaddleNLP/rocketqa-medium-cross-encoder/graph_hash.txt b/paddle_samples/PaddleNLP/rocketqa-medium-cross-encoder/graph_hash.txt index d38f31dac..8d56525eb 100644 --- a/paddle_samples/PaddleNLP/rocketqa-medium-cross-encoder/graph_hash.txt +++ b/paddle_samples/PaddleNLP/rocketqa-medium-cross-encoder/graph_hash.txt @@ -1 +1 @@ -9c08a3bb0bd05b2164df44d5548a5c5776eeaa49bd03e9e376c7ee59fdae46d0 \ No newline at end of file +3b6f4591bccb1c69034d3d8ac92a4dc437903c7abcf585efb76e544507e56af3 \ No newline at end of file diff --git a/paddle_samples/PaddleNLP/rocketqa-micro-cross-encoder/graph_hash.txt b/paddle_samples/PaddleNLP/rocketqa-micro-cross-encoder/graph_hash.txt index f7916b3b5..4b0686532 100644 --- a/paddle_samples/PaddleNLP/rocketqa-micro-cross-encoder/graph_hash.txt +++ b/paddle_samples/PaddleNLP/rocketqa-micro-cross-encoder/graph_hash.txt @@ -1 +1 @@ -bced5e643225a92c10192e7115db15a90598c668a04537760cf5238a14b450c8 \ No newline at end of file +20053469ce4a136a36346ab08261ae0c4a4e5873e11495e8b38fa134c1d7e5d0 \ No newline at end of file diff --git a/paddle_samples/PaddleNLP/rocketqa-mini-cross-encoder/graph_hash.txt b/paddle_samples/PaddleNLP/rocketqa-mini-cross-encoder/graph_hash.txt index 19056c06b..ca2e13c87 100644 --- a/paddle_samples/PaddleNLP/rocketqa-mini-cross-encoder/graph_hash.txt +++ b/paddle_samples/PaddleNLP/rocketqa-mini-cross-encoder/graph_hash.txt @@ -1 +1 @@ -6cdbba0eeaa64240e6ac13d0ab9517defda396e0b3a28912d7e0de857f2ce360 \ No newline at end of file +e478da94c8f176161f7e73ee5ecd9e885afc48a8c86af7554df492d87e5ba9ac \ No newline at end of file diff --git a/paddle_samples/PaddleNLP/rocketqa-nano-cross-encoder/graph_hash.txt b/paddle_samples/PaddleNLP/rocketqa-nano-cross-encoder/graph_hash.txt index 40cc95a2d..9dc47bc27 100644 --- a/paddle_samples/PaddleNLP/rocketqa-nano-cross-encoder/graph_hash.txt +++ b/paddle_samples/PaddleNLP/rocketqa-nano-cross-encoder/graph_hash.txt @@ -1 +1 @@ -7cc6b608e6de4cbdd01a02eec2e0f7b2944ae5c728d6dd4941dc72ff85c75043 \ No newline at end of file +2a355ec5169286cda1d5f087125420badd67453b548ac78b048998bb97c7ac9b \ No newline at end of file diff --git a/paddle_samples/PaddleNLP/roformer-chinese-base/graph_hash.txt b/paddle_samples/PaddleNLP/roformer-chinese-base/graph_hash.txt index 37635824d..aea221282 100644 --- a/paddle_samples/PaddleNLP/roformer-chinese-base/graph_hash.txt +++ b/paddle_samples/PaddleNLP/roformer-chinese-base/graph_hash.txt @@ -1 +1 @@ -0f79e348a95fcebb8d8879f8c542c184d72c1ac615c928c17e6699e289b3a914 \ No newline at end of file +5c3428c15276312edd46e0dc16a8fea4e7191209eb1a3f6e3c1b695b1e20cf25 \ No newline at end of file diff --git a/paddle_samples/PaddleNLP/roformer-chinese-char-base/graph_hash.txt b/paddle_samples/PaddleNLP/roformer-chinese-char-base/graph_hash.txt index aacd33e2c..a4ea1059f 100644 --- a/paddle_samples/PaddleNLP/roformer-chinese-char-base/graph_hash.txt +++ b/paddle_samples/PaddleNLP/roformer-chinese-char-base/graph_hash.txt @@ -1 +1 @@ -effb9e0c852b7bcd7d668832c3b1569603ef15ecce91c6befeabab7e652f3efc \ No newline at end of file +53064f2e24993e2819f7493635251dc7e6dbce0c96774f8cdc3c4c07576aa614 \ No newline at end of file diff --git a/paddle_samples/PaddleNLP/roformer-chinese-char-small/graph_hash.txt b/paddle_samples/PaddleNLP/roformer-chinese-char-small/graph_hash.txt index ed77f1e3a..000018dcb 100644 --- a/paddle_samples/PaddleNLP/roformer-chinese-char-small/graph_hash.txt +++ b/paddle_samples/PaddleNLP/roformer-chinese-char-small/graph_hash.txt @@ -1 +1 @@ -743dc9654e25ceb1d1af7938d15f50f2516b335e5d85e66d1c44c4376c7443e5 \ No newline at end of file +53a34d9f6c1134452df0690bedf3a09bb9447187acdffc89286cd53af852ff4d \ No newline at end of file diff --git a/paddle_samples/PaddleNLP/roformer-chinese-sim-char-base/graph_hash.txt b/paddle_samples/PaddleNLP/roformer-chinese-sim-char-base/graph_hash.txt index 856736a6b..dfa850c77 100644 --- a/paddle_samples/PaddleNLP/roformer-chinese-sim-char-base/graph_hash.txt +++ b/paddle_samples/PaddleNLP/roformer-chinese-sim-char-base/graph_hash.txt @@ -1 +1 @@ -ac7c29f0c10115309e4ac782586e1a5a0fd80293790c956e696119a24cd5dc54 \ No newline at end of file +f201b586d6804064c363c97e1af33cfa95327378563dedd1d52d519a8d44e040 \ No newline at end of file diff --git a/paddle_samples/PaddleNLP/roformer-chinese-sim-char-small/graph_hash.txt b/paddle_samples/PaddleNLP/roformer-chinese-sim-char-small/graph_hash.txt index b5c8d6425..e807eddd5 100644 --- a/paddle_samples/PaddleNLP/roformer-chinese-sim-char-small/graph_hash.txt +++ b/paddle_samples/PaddleNLP/roformer-chinese-sim-char-small/graph_hash.txt @@ -1 +1 @@ -d5a06418c2f1d40774413b6a84ed47630ccffeea660744a4d750e6345dac1d7d \ No newline at end of file +47eafbb593972c1338c0a81da2fa47ae1759fcbbcb3fc47b002284205e0e5e62 \ No newline at end of file diff --git a/paddle_samples/PaddleNLP/roformer-chinese-small/graph_hash.txt b/paddle_samples/PaddleNLP/roformer-chinese-small/graph_hash.txt index d9878fe76..8201da156 100644 --- a/paddle_samples/PaddleNLP/roformer-chinese-small/graph_hash.txt +++ b/paddle_samples/PaddleNLP/roformer-chinese-small/graph_hash.txt @@ -1 +1 @@ -a2b91ca0dcfe90f676cebdd8ce70ed25ab4a19a0e5d3d7f6d3afbe824b8f5940 \ No newline at end of file +4721e812ebcf794fa02469f72c66e78fa638bfa14009668336d5cd7aec1d6c1b \ No newline at end of file diff --git a/paddle_samples/PaddleNLP/roformer-english-small-discriminator/graph_hash.txt b/paddle_samples/PaddleNLP/roformer-english-small-discriminator/graph_hash.txt index f29ab372e..464ac9bbb 100644 --- a/paddle_samples/PaddleNLP/roformer-english-small-discriminator/graph_hash.txt +++ b/paddle_samples/PaddleNLP/roformer-english-small-discriminator/graph_hash.txt @@ -1 +1 @@ -0337f9ba075f621057b51aa2473d06aa2a767fe5a38a69020fecee4788e01897 \ No newline at end of file +b56bdd7f1a0d1f0194c170cb17dd377c3fd3611e6d527d9ca018e8b0b4241606 \ No newline at end of file diff --git a/paddle_samples/PaddleNLP/roformer-english-small-generator/graph_hash.txt b/paddle_samples/PaddleNLP/roformer-english-small-generator/graph_hash.txt index f17b8c657..343c08b9e 100644 --- a/paddle_samples/PaddleNLP/roformer-english-small-generator/graph_hash.txt +++ b/paddle_samples/PaddleNLP/roformer-english-small-generator/graph_hash.txt @@ -1 +1 @@ -ea25b56bfbf5b928d72465c4b78f7fd85dbf465d0d7e561fc82d62dac602051d \ No newline at end of file +ec02a57205a597f670b2e0d70c2e78d86b69f539eee30a79e323ae676304f733 \ No newline at end of file diff --git a/paddle_samples/PaddleNLP/skep_ernie_1.0_large_ch/graph_hash.txt b/paddle_samples/PaddleNLP/skep_ernie_1.0_large_ch/graph_hash.txt index 877064484..4b2add4a2 100644 --- a/paddle_samples/PaddleNLP/skep_ernie_1.0_large_ch/graph_hash.txt +++ b/paddle_samples/PaddleNLP/skep_ernie_1.0_large_ch/graph_hash.txt @@ -1 +1 @@ -14b2d1e4140658134efe653f341f7afe466787ae6d6aa309d9669566846ee682 \ No newline at end of file +2a1a38318cef4b3970cd82bd45509354f5a002c3b59fc4c7f6afe215529b412c \ No newline at end of file diff --git a/paddle_samples/PaddleNLP/skep_ernie_2.0_large_en/graph_hash.txt b/paddle_samples/PaddleNLP/skep_ernie_2.0_large_en/graph_hash.txt index 6cf938420..c453e6643 100644 --- a/paddle_samples/PaddleNLP/skep_ernie_2.0_large_en/graph_hash.txt +++ b/paddle_samples/PaddleNLP/skep_ernie_2.0_large_en/graph_hash.txt @@ -1 +1 @@ -3f6835de7964f219d2277cfacbe9da8e681a61e90a50d626f4811d49b4366418 \ No newline at end of file +4a787b67f80f202bb4fca25275211cc35035ea9c6f4921e941b53bf591852527 \ No newline at end of file diff --git a/paddle_samples/PaddleNLP/uer_chinese-roberta-6l-768h/graph_hash.txt b/paddle_samples/PaddleNLP/uer_chinese-roberta-6l-768h/graph_hash.txt index 91cf30263..4cfc923ff 100644 --- a/paddle_samples/PaddleNLP/uer_chinese-roberta-6l-768h/graph_hash.txt +++ b/paddle_samples/PaddleNLP/uer_chinese-roberta-6l-768h/graph_hash.txt @@ -1 +1 @@ -10743b0d3e0b6bf06a3efa5340af1698b6bd587ee1d20a447186be2c38f8939f \ No newline at end of file +3999786464155ccc74dd73dd605624bf9ff5fb16cb3fcd5429eedfe07cd3ce68 \ No newline at end of file diff --git a/paddle_samples/PaddleNLP/uer_chinese-roberta-medium/graph_hash.txt b/paddle_samples/PaddleNLP/uer_chinese-roberta-medium/graph_hash.txt index 66792f1b8..e84726b5e 100644 --- a/paddle_samples/PaddleNLP/uer_chinese-roberta-medium/graph_hash.txt +++ b/paddle_samples/PaddleNLP/uer_chinese-roberta-medium/graph_hash.txt @@ -1 +1 @@ -502c06bb696ef061b972e5bf35280fea35704699d04c7fe9ceb4bb6e4cff329f \ No newline at end of file +bb5004576b179b76d8667d1c19edfacf81c8d4c715c34887e4965f9c7efac6c9 \ No newline at end of file diff --git a/paddle_samples/PaddleNLP/uer_chinese-roberta-mini/graph_hash.txt b/paddle_samples/PaddleNLP/uer_chinese-roberta-mini/graph_hash.txt index 1d8da86d2..412047681 100644 --- a/paddle_samples/PaddleNLP/uer_chinese-roberta-mini/graph_hash.txt +++ b/paddle_samples/PaddleNLP/uer_chinese-roberta-mini/graph_hash.txt @@ -1 +1 @@ -a75f85f129c28c95329ab6a86365047d63da6bdb69ea1b1a21ba8a32781a54c3 \ No newline at end of file +c33fec027d46c39a7f3f6c09fc943f06e0cb2c29ccd09ae06c0290d9f4a4f368 \ No newline at end of file diff --git a/paddle_samples/PaddleNLP/uer_chinese-roberta-small/graph_hash.txt b/paddle_samples/PaddleNLP/uer_chinese-roberta-small/graph_hash.txt index 8f0bd95cb..ba141e36a 100644 --- a/paddle_samples/PaddleNLP/uer_chinese-roberta-small/graph_hash.txt +++ b/paddle_samples/PaddleNLP/uer_chinese-roberta-small/graph_hash.txt @@ -1 +1 @@ -410fe72f6e34c86109634efcbafa8aa4defe10c122dc61f0ee3030e588a65062 \ No newline at end of file +cc6281320be6103256f47c72bf7edbddbfc5dc70c539f93cf98d625af89ad7d1 \ No newline at end of file diff --git a/paddle_samples/PaddleNLP/uer_chinese-roberta-tiny/graph_hash.txt b/paddle_samples/PaddleNLP/uer_chinese-roberta-tiny/graph_hash.txt index 886537cf6..53f48e04a 100644 --- a/paddle_samples/PaddleNLP/uer_chinese-roberta-tiny/graph_hash.txt +++ b/paddle_samples/PaddleNLP/uer_chinese-roberta-tiny/graph_hash.txt @@ -1 +1 @@ -3dcb5ad9952ee31409fc1883741ea0f2c8db2b95ab84e583512e1a348420a7a9 \ No newline at end of file +fad92e6920962b23fc1938d4e759f66587d3811192316043f90e52da16d78b18 \ No newline at end of file diff --git a/paddle_samples/PaddleNLP/utc-base/graph_hash.txt b/paddle_samples/PaddleNLP/utc-base/graph_hash.txt index 64f219403..1f6ae33c8 100644 --- a/paddle_samples/PaddleNLP/utc-base/graph_hash.txt +++ b/paddle_samples/PaddleNLP/utc-base/graph_hash.txt @@ -1 +1 @@ -58f6f8319a02432f55efca078df4c74b372dd0ba432afaa4ce769ca3862fd927 \ No newline at end of file +b9b4dc1bc40bdc166713c2852b1e55764fbfe5fab902636613abab2b6727cef1 \ No newline at end of file diff --git a/paddle_samples/PaddleNLP/utc-large/graph_hash.txt b/paddle_samples/PaddleNLP/utc-large/graph_hash.txt index 2326010dd..2f531b703 100644 --- a/paddle_samples/PaddleNLP/utc-large/graph_hash.txt +++ b/paddle_samples/PaddleNLP/utc-large/graph_hash.txt @@ -1 +1 @@ -0a920b69eaadc770e1ae2bafbc230b5d197c88c5003421e77f162e7923d98d2f \ No newline at end of file +67222ab4e269a401723327ed54c55ec3ab6cf90029364ba472cfb69efc9645bb \ No newline at end of file diff --git a/paddle_samples/PaddleNLP/utc-medium/graph_hash.txt b/paddle_samples/PaddleNLP/utc-medium/graph_hash.txt index c17c294df..7c2d7b70a 100644 --- a/paddle_samples/PaddleNLP/utc-medium/graph_hash.txt +++ b/paddle_samples/PaddleNLP/utc-medium/graph_hash.txt @@ -1 +1 @@ -85b1adf22cbd5287447ba0bcd6a9ce65756fb45e60481fb1c45dc0c953719d3b \ No newline at end of file +ec800d8dcad7772db9a4ddb5dce274b488446b4c7595e164c757ca2ecac2b836 \ No newline at end of file diff --git a/paddle_samples/PaddleNLP/utc-micro/graph_hash.txt b/paddle_samples/PaddleNLP/utc-micro/graph_hash.txt index 783280d2b..36c87d578 100644 --- a/paddle_samples/PaddleNLP/utc-micro/graph_hash.txt +++ b/paddle_samples/PaddleNLP/utc-micro/graph_hash.txt @@ -1 +1 @@ -37a88389b1fd630972c3281b79387e59b38b0fe03e108e17025f8e8848927f40 \ No newline at end of file +858e958489f7ee1e3fecdacb4f0971aca28bce628d2e1b81b112153e464d5990 \ No newline at end of file diff --git a/paddle_samples/PaddleNLP/utc-mini/graph_hash.txt b/paddle_samples/PaddleNLP/utc-mini/graph_hash.txt index 1d23e0084..dacc503ea 100644 --- a/paddle_samples/PaddleNLP/utc-mini/graph_hash.txt +++ b/paddle_samples/PaddleNLP/utc-mini/graph_hash.txt @@ -1 +1 @@ -b41ca68b34d7ffb7543fba0b691f4004da01ba74217bc4397b43c13deed4ba6a \ No newline at end of file +e6c5c5ae7e0d535df8624cd1906487568253d3d3355beed28193cb4876b197d8 \ No newline at end of file diff --git a/paddle_samples/PaddleNLP/utc-nano/graph_hash.txt b/paddle_samples/PaddleNLP/utc-nano/graph_hash.txt index 032225e4e..0b33f8c16 100644 --- a/paddle_samples/PaddleNLP/utc-nano/graph_hash.txt +++ b/paddle_samples/PaddleNLP/utc-nano/graph_hash.txt @@ -1 +1 @@ -e2fd3647885ed83d3378b86eca0b60e08c8d4024322d4945a67bd41cb1149626 \ No newline at end of file +4975e5cfd2d1ec8a524a426fed00a658c74abe507341628053ecde34b29f291c \ No newline at end of file diff --git a/paddle_samples/PaddleNLP/utc-pico/graph_hash.txt b/paddle_samples/PaddleNLP/utc-pico/graph_hash.txt index aa3aab4df..a718d052e 100644 --- a/paddle_samples/PaddleNLP/utc-pico/graph_hash.txt +++ b/paddle_samples/PaddleNLP/utc-pico/graph_hash.txt @@ -1 +1 @@ -e020bedf12bc138749ce39b08afe1a0a2a38bdcf27709995cb8c35a461e48a09 \ No newline at end of file +2e13d9e71e4f71b5940d67f75e3b61eb0fc317dfb981686e561786a96983987a \ No newline at end of file diff --git a/paddle_samples/PaddleNLP/utc-xbase/graph_hash.txt b/paddle_samples/PaddleNLP/utc-xbase/graph_hash.txt index b7b340ced..a30635948 100644 --- a/paddle_samples/PaddleNLP/utc-xbase/graph_hash.txt +++ b/paddle_samples/PaddleNLP/utc-xbase/graph_hash.txt @@ -1 +1 @@ -b9687d9e1c6a6a7d67ee0f33fa9986a36cec249c6f1f9cce3fb4e9e3ada9a8d0 \ No newline at end of file +9f21603d1a47f6af22d1f0548310f43165409cb300e1f8026a7164b34125fd43 \ No newline at end of file diff --git a/paddle_samples/PaddleScience/euler_beam/subgraph_0/graph_hash.txt b/paddle_samples/PaddleScience/euler_beam/subgraph_0/graph_hash.txt index 3fc66dd45..08223c446 100644 --- a/paddle_samples/PaddleScience/euler_beam/subgraph_0/graph_hash.txt +++ b/paddle_samples/PaddleScience/euler_beam/subgraph_0/graph_hash.txt @@ -1 +1 @@ -cc80681add836df428c67ac97235458fb43a7b287cc481c428a59a798f0405e1 \ No newline at end of file +83318d710c1f2b5750cbaed392db3fe0e4ef4e95f59594aa33f1e1723881b861 \ No newline at end of file diff --git a/paddle_samples/PaddleScience/euler_beam/subgraph_1/graph_hash.txt b/paddle_samples/PaddleScience/euler_beam/subgraph_1/graph_hash.txt index 338c41b60..fc7398f34 100644 --- a/paddle_samples/PaddleScience/euler_beam/subgraph_1/graph_hash.txt +++ b/paddle_samples/PaddleScience/euler_beam/subgraph_1/graph_hash.txt @@ -1 +1 @@ -6a200693d12f8fd9ed7f82ed145f098358c6110c9eea5a325db33858fd9a970c \ No newline at end of file +8a514867dc1b61523ef5fd473fd97c54677e7e1e9a6ee47da291a19d22757de2 \ No newline at end of file diff --git a/paddle_samples/PaddleX/AutoEncoder_ad/graph_hash.txt b/paddle_samples/PaddleX/AutoEncoder_ad/graph_hash.txt index 74012a5d2..dfcee22fb 100644 --- a/paddle_samples/PaddleX/AutoEncoder_ad/graph_hash.txt +++ b/paddle_samples/PaddleX/AutoEncoder_ad/graph_hash.txt @@ -1 +1 @@ -2c49ff97f987487128eb08aba6769445168406254145db3293651d1a6063fa63 \ No newline at end of file +c25c2164f4be4b4d88e3814847d3c4a82d5c577ebf24e42d6974580c0f9f5bf4 \ No newline at end of file diff --git a/paddle_samples/PaddleX/BlazeFace-FPN-SSH/subgraph_0/graph_hash.txt b/paddle_samples/PaddleX/BlazeFace-FPN-SSH/subgraph_0/graph_hash.txt index fabb8dbca..c70b73962 100644 --- a/paddle_samples/PaddleX/BlazeFace-FPN-SSH/subgraph_0/graph_hash.txt +++ b/paddle_samples/PaddleX/BlazeFace-FPN-SSH/subgraph_0/graph_hash.txt @@ -1 +1 @@ -256c18df369e1fd1bf5c66bdf10442132323fb1bfa22679bdb38b0727c49b013 \ No newline at end of file +70a61f9be645777bf0f52e7db2e5a1160ddf2cf000f7fb42b86cec8993154b9a \ No newline at end of file diff --git a/paddle_samples/PaddleX/BlazeFace-FPN-SSH/subgraph_1/graph_hash.txt b/paddle_samples/PaddleX/BlazeFace-FPN-SSH/subgraph_1/graph_hash.txt index e288630fc..a2699de9e 100644 --- a/paddle_samples/PaddleX/BlazeFace-FPN-SSH/subgraph_1/graph_hash.txt +++ b/paddle_samples/PaddleX/BlazeFace-FPN-SSH/subgraph_1/graph_hash.txt @@ -1 +1 @@ -0831bb8355759c3dc255347e6c154e104f75359e5dc89d72b2d2e294743f94bd \ No newline at end of file +0d92690c160d0cf12cb547c1cd3b87d2190c132c9f1dd8897f1e303d8bfab7b9 \ No newline at end of file diff --git a/paddle_samples/PaddleX/BlazeFace/subgraph_0/graph_hash.txt b/paddle_samples/PaddleX/BlazeFace/subgraph_0/graph_hash.txt index f7247ba79..0d1c94f7a 100644 --- a/paddle_samples/PaddleX/BlazeFace/subgraph_0/graph_hash.txt +++ b/paddle_samples/PaddleX/BlazeFace/subgraph_0/graph_hash.txt @@ -1 +1 @@ -d5cf1876de9f3a36be22c33acb7d538a666b27b805b404faa7c878ee1e4e694d \ No newline at end of file +280421433fe13e5eb570f5229e20b0eaabe15e3a6997a9b03e06cb6ac23534a6 \ No newline at end of file diff --git a/paddle_samples/PaddleX/BlazeFace/subgraph_1/graph_hash.txt b/paddle_samples/PaddleX/BlazeFace/subgraph_1/graph_hash.txt index 5662df894..39b1037fe 100644 --- a/paddle_samples/PaddleX/BlazeFace/subgraph_1/graph_hash.txt +++ b/paddle_samples/PaddleX/BlazeFace/subgraph_1/graph_hash.txt @@ -1 +1 @@ -eb0f6c1aa007c19037b62cbd0178fc7740e173fc1d66b77e318024dac8bbc2de \ No newline at end of file +33295a7e24286ff7789bc5366c9810e126bc853188801b46f61e78bd8944763b \ No newline at end of file diff --git a/paddle_samples/PaddleX/BlazeFace/subgraph_2/graph_hash.txt b/paddle_samples/PaddleX/BlazeFace/subgraph_2/graph_hash.txt index 46d8833f6..5d1a0e53a 100644 --- a/paddle_samples/PaddleX/BlazeFace/subgraph_2/graph_hash.txt +++ b/paddle_samples/PaddleX/BlazeFace/subgraph_2/graph_hash.txt @@ -1 +1 @@ -d207034db8fcbf509b86ec1be4306b07b3fcbb02483af005c8502051b51779cc \ No newline at end of file +0872bd467165d55cf96121f79dd293debe3a102881c8bac7621e91b3419f72eb \ No newline at end of file diff --git a/paddle_samples/PaddleX/BlazeFace/subgraph_3/graph_hash.txt b/paddle_samples/PaddleX/BlazeFace/subgraph_3/graph_hash.txt index 7729c6b56..be1b16ded 100644 --- a/paddle_samples/PaddleX/BlazeFace/subgraph_3/graph_hash.txt +++ b/paddle_samples/PaddleX/BlazeFace/subgraph_3/graph_hash.txt @@ -1 +1 @@ -bb6d6c93042dcd4cea89dabceecd42cfcda5d9c8914efd69f24d58bc0e868d8b \ No newline at end of file +124a6c3ef22cbaa6cd0231a44434112241ca972a66ed26ba0f959dee6ef999d9 \ No newline at end of file diff --git a/paddle_samples/PaddleX/BlazeFace/subgraph_4/graph_hash.txt b/paddle_samples/PaddleX/BlazeFace/subgraph_4/graph_hash.txt index 22be6dce9..439caf7ac 100644 --- a/paddle_samples/PaddleX/BlazeFace/subgraph_4/graph_hash.txt +++ b/paddle_samples/PaddleX/BlazeFace/subgraph_4/graph_hash.txt @@ -1 +1 @@ -ddc98aa4517e17f5898f37e27bd110db7146c2e789bf569a980ba4201bf6d8cc \ No newline at end of file +153847a606ca855288d1d811cf36b10daa8a07cf8cab723140fd4206b9321737 \ No newline at end of file diff --git a/paddle_samples/PaddleX/BlazeFace/subgraph_5/graph_hash.txt b/paddle_samples/PaddleX/BlazeFace/subgraph_5/graph_hash.txt index 82adea4a9..e3b761388 100644 --- a/paddle_samples/PaddleX/BlazeFace/subgraph_5/graph_hash.txt +++ b/paddle_samples/PaddleX/BlazeFace/subgraph_5/graph_hash.txt @@ -1 +1 @@ -721225ab8bed148704d6e4977784e7240bb3daddff88da89c3a29c69ebd43c1f \ No newline at end of file +3b321d157637b24b374186233dc5a06a8fb6f8dadd6469775f8c9518f4eb4839 \ No newline at end of file diff --git a/paddle_samples/PaddleX/BlazeFace/subgraph_6/graph_hash.txt b/paddle_samples/PaddleX/BlazeFace/subgraph_6/graph_hash.txt index 0f83386a5..4d144998e 100644 --- a/paddle_samples/PaddleX/BlazeFace/subgraph_6/graph_hash.txt +++ b/paddle_samples/PaddleX/BlazeFace/subgraph_6/graph_hash.txt @@ -1 +1 @@ -c4fdad07c14b6fab0ca8ccf5f6cf815117975b9707c7eec9e822c3eaf9a188c0 \ No newline at end of file +e4a96a479148b0052fef2ac498c7512b661b4694ee0aaa465ea33c31d5fcc1a6 \ No newline at end of file diff --git a/paddle_samples/PaddleX/BlazeFace/subgraph_7/graph_hash.txt b/paddle_samples/PaddleX/BlazeFace/subgraph_7/graph_hash.txt index 1932b0c08..31f8fd314 100644 --- a/paddle_samples/PaddleX/BlazeFace/subgraph_7/graph_hash.txt +++ b/paddle_samples/PaddleX/BlazeFace/subgraph_7/graph_hash.txt @@ -1 +1 @@ -47d56d040d883898c99b1d0fb79e6bf402dcb888bc16b18fa453a750a20e0539 \ No newline at end of file +18a852ad880473507191eb54f8a8608f811a784a84fe0bbffea60b8d2672dafc \ No newline at end of file diff --git a/paddle_samples/PaddleX/BlazeFace/subgraph_8/graph_hash.txt b/paddle_samples/PaddleX/BlazeFace/subgraph_8/graph_hash.txt index f5ad73703..6ad994493 100644 --- a/paddle_samples/PaddleX/BlazeFace/subgraph_8/graph_hash.txt +++ b/paddle_samples/PaddleX/BlazeFace/subgraph_8/graph_hash.txt @@ -1 +1 @@ -dba6be48cd5bc0a5da59f495e449d3aa5012d07bcd14065493a8f1cb5da2626d \ No newline at end of file +935f5057a77e86856d7abbd9c569fd00446500e956abb6136950f76e44aec89d \ No newline at end of file diff --git a/paddle_samples/PaddleX/BlazeFace/subgraph_9/graph_hash.txt b/paddle_samples/PaddleX/BlazeFace/subgraph_9/graph_hash.txt index 83eb1f3c3..439097187 100644 --- a/paddle_samples/PaddleX/BlazeFace/subgraph_9/graph_hash.txt +++ b/paddle_samples/PaddleX/BlazeFace/subgraph_9/graph_hash.txt @@ -1 +1 @@ -d41355b25bbebfcf073261daf285d4c76047c4e7b04e191d4a36ef1756676b31 \ No newline at end of file +636f6aa713c6e9964ccb8745eb29a42ff24988ffed98203ce9df66397114f620 \ No newline at end of file diff --git a/paddle_samples/PaddleX/CLIP_vit_base_patch16_224/subgraph_0/graph_hash.txt b/paddle_samples/PaddleX/CLIP_vit_base_patch16_224/subgraph_0/graph_hash.txt index 77a714878..180933f01 100644 --- a/paddle_samples/PaddleX/CLIP_vit_base_patch16_224/subgraph_0/graph_hash.txt +++ b/paddle_samples/PaddleX/CLIP_vit_base_patch16_224/subgraph_0/graph_hash.txt @@ -1 +1 @@ -e159d6fac3da3f8c658a858d28f41008aa74afd44e2857214cdf766a7958b039 \ No newline at end of file +7c8c3c853e70514df75171b0a42ef1061337518a2a840d0c133318af05670d2c \ No newline at end of file diff --git a/paddle_samples/PaddleX/CLIP_vit_base_patch16_224/subgraph_1/graph_hash.txt b/paddle_samples/PaddleX/CLIP_vit_base_patch16_224/subgraph_1/graph_hash.txt index 1795f0ca4..d975a7357 100644 --- a/paddle_samples/PaddleX/CLIP_vit_base_patch16_224/subgraph_1/graph_hash.txt +++ b/paddle_samples/PaddleX/CLIP_vit_base_patch16_224/subgraph_1/graph_hash.txt @@ -1 +1 @@ -aff76a4cf2a36c43c225380d7630b9086052ae23af4ed74c81fb62fe4974414f \ No newline at end of file +e9c7429644e531c5c2b16743e3e56c2531e7cb465599eada17c6f5de039b24d6 \ No newline at end of file diff --git a/paddle_samples/PaddleX/CLIP_vit_base_patch16_448_ML/subgraph_0/graph_hash.txt b/paddle_samples/PaddleX/CLIP_vit_base_patch16_448_ML/subgraph_0/graph_hash.txt index 7073f102c..2280356e6 100644 --- a/paddle_samples/PaddleX/CLIP_vit_base_patch16_448_ML/subgraph_0/graph_hash.txt +++ b/paddle_samples/PaddleX/CLIP_vit_base_patch16_448_ML/subgraph_0/graph_hash.txt @@ -1 +1 @@ -94f68a32a189a13a1025d0e8a1d6e6269b722b9018d7342b47c28793031842cc \ No newline at end of file +5ad8ac888e8191b21655b278a211892a845879262e1c4fa6ae91bbb6d7e3b516 \ No newline at end of file diff --git a/paddle_samples/PaddleX/CLIP_vit_base_patch16_448_ML/subgraph_1/graph_hash.txt b/paddle_samples/PaddleX/CLIP_vit_base_patch16_448_ML/subgraph_1/graph_hash.txt index 2d31ffb13..66a01839a 100644 --- a/paddle_samples/PaddleX/CLIP_vit_base_patch16_448_ML/subgraph_1/graph_hash.txt +++ b/paddle_samples/PaddleX/CLIP_vit_base_patch16_448_ML/subgraph_1/graph_hash.txt @@ -1 +1 @@ -7087678fe9f7b1e6445aa1e0e25dc7d8977116378d51e10070aa13c78bb71c94 \ No newline at end of file +1c4e650f392249c85f37f84834feeafaf360a13a935cc39c11bf9e613dfe17c6 \ No newline at end of file diff --git a/paddle_samples/PaddleX/CLIP_vit_base_patch16_448_ML/subgraph_2/graph_hash.txt b/paddle_samples/PaddleX/CLIP_vit_base_patch16_448_ML/subgraph_2/graph_hash.txt index 30ac202a1..b52cc3769 100644 --- a/paddle_samples/PaddleX/CLIP_vit_base_patch16_448_ML/subgraph_2/graph_hash.txt +++ b/paddle_samples/PaddleX/CLIP_vit_base_patch16_448_ML/subgraph_2/graph_hash.txt @@ -1 +1 @@ -fc3d71e5e44e3346ffdc7abdbf1bd025d7433615bdfa363a0b6b3e1c4a3e1634 \ No newline at end of file +3df00056e6fee0305490d3c405c7792f9e576e70a130175483c8e40f8b0ab997 \ No newline at end of file diff --git a/paddle_samples/PaddleX/CLIP_vit_large_patch14_224/subgraph_0/graph_hash.txt b/paddle_samples/PaddleX/CLIP_vit_large_patch14_224/subgraph_0/graph_hash.txt index c6057651c..76e5d9537 100644 --- a/paddle_samples/PaddleX/CLIP_vit_large_patch14_224/subgraph_0/graph_hash.txt +++ b/paddle_samples/PaddleX/CLIP_vit_large_patch14_224/subgraph_0/graph_hash.txt @@ -1 +1 @@ -e41283ec69d76a2b7180fe9dcf5a80f5662fdbbe08fe8404c4f0f9500f9adb24 \ No newline at end of file +018e77964d3bdab3b64ad67f71a35f0ab3a679c5d33b0ce1764442102a25d8d5 \ No newline at end of file diff --git a/paddle_samples/PaddleX/CLIP_vit_large_patch14_224/subgraph_1/graph_hash.txt b/paddle_samples/PaddleX/CLIP_vit_large_patch14_224/subgraph_1/graph_hash.txt index 37e9267c4..d63ffc8ea 100644 --- a/paddle_samples/PaddleX/CLIP_vit_large_patch14_224/subgraph_1/graph_hash.txt +++ b/paddle_samples/PaddleX/CLIP_vit_large_patch14_224/subgraph_1/graph_hash.txt @@ -1 +1 @@ -c9c27572e2d56fe42ea8a914d13ff5684b6d518ea64b881a51eda815d5d03573 \ No newline at end of file +4b199343158a8a72256f92d7f5d4d0fd97120a56b4c93fe62f8358f44626788c \ No newline at end of file diff --git a/paddle_samples/PaddleX/CenterNet-DLA-34/subgraph_0/graph_hash.txt b/paddle_samples/PaddleX/CenterNet-DLA-34/subgraph_0/graph_hash.txt index 4b0496b4d..acef05540 100644 --- a/paddle_samples/PaddleX/CenterNet-DLA-34/subgraph_0/graph_hash.txt +++ b/paddle_samples/PaddleX/CenterNet-DLA-34/subgraph_0/graph_hash.txt @@ -1 +1 @@ -bf069843108c2429d42b7f2c6e762b8bea7b771c97872f1bdb8f4f3a70093645 \ No newline at end of file +a3ea83a74dd4392c10def0f1a184f04fb1e114b35e3eda0afdfb118f58ff66e8 \ No newline at end of file diff --git a/paddle_samples/PaddleX/CenterNet-DLA-34/subgraph_1/graph_hash.txt b/paddle_samples/PaddleX/CenterNet-DLA-34/subgraph_1/graph_hash.txt index 4d0308a8a..ac5557e1a 100644 --- a/paddle_samples/PaddleX/CenterNet-DLA-34/subgraph_1/graph_hash.txt +++ b/paddle_samples/PaddleX/CenterNet-DLA-34/subgraph_1/graph_hash.txt @@ -1 +1 @@ -c6138de53bd3a9bda181d4716df6dc76c1a5d162f5044e44b4d1bcc46ae6d178 \ No newline at end of file +5408652b3702fa3247026f1337aaa7fe1494256826e1290d1db56ec1bd58a30e \ No newline at end of file diff --git a/paddle_samples/PaddleX/CenterNet-ResNet50/subgraph_0/graph_hash.txt b/paddle_samples/PaddleX/CenterNet-ResNet50/subgraph_0/graph_hash.txt index 518c800aa..fa2a9bf88 100644 --- a/paddle_samples/PaddleX/CenterNet-ResNet50/subgraph_0/graph_hash.txt +++ b/paddle_samples/PaddleX/CenterNet-ResNet50/subgraph_0/graph_hash.txt @@ -1 +1 @@ -79611d328ea664041b015ac217e9c5040d75899ecc31367b529bb4d4f76e3978 \ No newline at end of file +8df780cbb572000857d68775aa21689f370217ff2e0fd55030fd007dc7342bec \ No newline at end of file diff --git a/paddle_samples/PaddleX/CenterNet-ResNet50/subgraph_1/graph_hash.txt b/paddle_samples/PaddleX/CenterNet-ResNet50/subgraph_1/graph_hash.txt index 576d610cd..95370a0da 100644 --- a/paddle_samples/PaddleX/CenterNet-ResNet50/subgraph_1/graph_hash.txt +++ b/paddle_samples/PaddleX/CenterNet-ResNet50/subgraph_1/graph_hash.txt @@ -1 +1 @@ -dd50e92a9e7c8fac627e3916196c618eb19eec0ba71507d67569238b8e537f59 \ No newline at end of file +3d91c91c412d88910c0cc2f06a61b39c26b30e2d839d0fb86cbf792ef0911698 \ No newline at end of file diff --git a/paddle_samples/PaddleX/ConvNeXt_base_224/subgraph_0/graph_hash.txt b/paddle_samples/PaddleX/ConvNeXt_base_224/subgraph_0/graph_hash.txt index a040e3544..7ff9ee183 100644 --- a/paddle_samples/PaddleX/ConvNeXt_base_224/subgraph_0/graph_hash.txt +++ b/paddle_samples/PaddleX/ConvNeXt_base_224/subgraph_0/graph_hash.txt @@ -1 +1 @@ -f656e992aad6f57c8a3b9b2c9c9a050a06d027c2a4fb855889cc788d30b905a7 \ No newline at end of file +f5634a8c9bbb2b122eb56c53150362c02e8641518b7a747d2f6ffa39d1f960ae \ No newline at end of file diff --git a/paddle_samples/PaddleX/ConvNeXt_base_224/subgraph_1/graph_hash.txt b/paddle_samples/PaddleX/ConvNeXt_base_224/subgraph_1/graph_hash.txt index 126474ff6..d9a8a66b7 100644 --- a/paddle_samples/PaddleX/ConvNeXt_base_224/subgraph_1/graph_hash.txt +++ b/paddle_samples/PaddleX/ConvNeXt_base_224/subgraph_1/graph_hash.txt @@ -1 +1 @@ -ecb9ef0513798c8f0a3de75d7edec4dec8c9c42035ea2dd8b04c3e9945a5c45d \ No newline at end of file +84359dabff6793be26cfed9dfbdcdd4f79e35e54d744b6342ebf42bdf890312b \ No newline at end of file diff --git a/paddle_samples/PaddleX/ConvNeXt_base_384/graph_hash.txt b/paddle_samples/PaddleX/ConvNeXt_base_384/graph_hash.txt index 76efc62e0..a4c7ff91a 100644 --- a/paddle_samples/PaddleX/ConvNeXt_base_384/graph_hash.txt +++ b/paddle_samples/PaddleX/ConvNeXt_base_384/graph_hash.txt @@ -1 +1 @@ -151a9e4f6417e38e393113505323e4fa615ab45e8eb5dfcf9ed86e989fe00eff \ No newline at end of file +4663f183e805fba6641d1c195c71c6bd2364e996b570c04e430b9df6f3d586b1 \ No newline at end of file diff --git a/paddle_samples/PaddleX/ConvNeXt_large_224/subgraph_0/graph_hash.txt b/paddle_samples/PaddleX/ConvNeXt_large_224/subgraph_0/graph_hash.txt index 6348c90c6..8aa475f76 100644 --- a/paddle_samples/PaddleX/ConvNeXt_large_224/subgraph_0/graph_hash.txt +++ b/paddle_samples/PaddleX/ConvNeXt_large_224/subgraph_0/graph_hash.txt @@ -1 +1 @@ -035c732aa371dff510df7ee9123b85ccd3758ff9a682add5b0e4d206c87c3a23 \ No newline at end of file +b7d13273019f1a96cd1835dec92978e17d414b8771d5546db8db937b74d9c57a \ No newline at end of file diff --git a/paddle_samples/PaddleX/ConvNeXt_large_224/subgraph_1/graph_hash.txt b/paddle_samples/PaddleX/ConvNeXt_large_224/subgraph_1/graph_hash.txt index 207926f3d..36ec821a2 100644 --- a/paddle_samples/PaddleX/ConvNeXt_large_224/subgraph_1/graph_hash.txt +++ b/paddle_samples/PaddleX/ConvNeXt_large_224/subgraph_1/graph_hash.txt @@ -1 +1 @@ -25b391da8d7e7f6afae7aeef2fce6797500a7aeac10da6cb69f2589333a6c794 \ No newline at end of file +4ec5ec179f6e252081fb9f2fe429b6e55779a43248450db542752f8bd456ce14 \ No newline at end of file diff --git a/paddle_samples/PaddleX/ConvNeXt_large_384/graph_hash.txt b/paddle_samples/PaddleX/ConvNeXt_large_384/graph_hash.txt index 1ca4aebda..23f81f8c0 100644 --- a/paddle_samples/PaddleX/ConvNeXt_large_384/graph_hash.txt +++ b/paddle_samples/PaddleX/ConvNeXt_large_384/graph_hash.txt @@ -1 +1 @@ -6aea928612f94a1517674115cc85a40f2f106193a8bf902dd8ca911a05e4d564 \ No newline at end of file +2f8135dab300dcd9c090a05b02d9e4a13aa53b1682667d22d20f2926fa9feca1 \ No newline at end of file diff --git a/paddle_samples/PaddleX/ConvNeXt_small/subgraph_0/graph_hash.txt b/paddle_samples/PaddleX/ConvNeXt_small/subgraph_0/graph_hash.txt index c305281fa..23117fde7 100644 --- a/paddle_samples/PaddleX/ConvNeXt_small/subgraph_0/graph_hash.txt +++ b/paddle_samples/PaddleX/ConvNeXt_small/subgraph_0/graph_hash.txt @@ -1 +1 @@ -1da17064603f0385c547d85d8f758e7b89a6680019b0b243361f3740cb28f581 \ No newline at end of file +d7c71362a0051358b77c41d8c6766c5b06db91bb8172b28c92551f5914a1f3b2 \ No newline at end of file diff --git a/paddle_samples/PaddleX/ConvNeXt_small/subgraph_1/graph_hash.txt b/paddle_samples/PaddleX/ConvNeXt_small/subgraph_1/graph_hash.txt index d0f65dc6c..584b4817b 100644 --- a/paddle_samples/PaddleX/ConvNeXt_small/subgraph_1/graph_hash.txt +++ b/paddle_samples/PaddleX/ConvNeXt_small/subgraph_1/graph_hash.txt @@ -1 +1 @@ -66037bd79690577f16a499d6ef6c2ae4bb52579ad77ece9142e94186b33ff52f \ No newline at end of file +105ee2fe8bb4ef36ac07d672c694a2d6c537a0b9ff70cfabdfe313e06a061b2d \ No newline at end of file diff --git a/paddle_samples/PaddleX/ConvNeXt_tiny/subgraph_0/graph_hash.txt b/paddle_samples/PaddleX/ConvNeXt_tiny/subgraph_0/graph_hash.txt index a628187ba..244c008da 100644 --- a/paddle_samples/PaddleX/ConvNeXt_tiny/subgraph_0/graph_hash.txt +++ b/paddle_samples/PaddleX/ConvNeXt_tiny/subgraph_0/graph_hash.txt @@ -1 +1 @@ -21cb434adff52a3273f3310b7700a39684f9c7b4c625a5e7ea539219581e2392 \ No newline at end of file +48b9b1739b65f1f41acb119ad60e559aeb2149bb6db8cc6cb27c2fa7c325f9e8 \ No newline at end of file diff --git a/paddle_samples/PaddleX/ConvNeXt_tiny/subgraph_1/graph_hash.txt b/paddle_samples/PaddleX/ConvNeXt_tiny/subgraph_1/graph_hash.txt index 72607ff71..9c19344cb 100644 --- a/paddle_samples/PaddleX/ConvNeXt_tiny/subgraph_1/graph_hash.txt +++ b/paddle_samples/PaddleX/ConvNeXt_tiny/subgraph_1/graph_hash.txt @@ -1 +1 @@ -ecbf33374a1d420d6799a5c7540287e04c773f11604e9b9ff3e3b59009e2c1e1 \ No newline at end of file +b2ee8f26339e7acb245618c9ca3ed3831aabafde89a2b9d5f5fba448bebe5905 \ No newline at end of file diff --git a/paddle_samples/PaddleX/DLinear/graph_hash.txt b/paddle_samples/PaddleX/DLinear/graph_hash.txt index 92125a56c..ee5cd5daa 100644 --- a/paddle_samples/PaddleX/DLinear/graph_hash.txt +++ b/paddle_samples/PaddleX/DLinear/graph_hash.txt @@ -1 +1 @@ -fcf6c1ed442abf29744c6faf94432ae444fe462944833d5844208cfee776e338 \ No newline at end of file +424874f9ec916f288ae0f703d4117a9ca9d412bb335168440e7cd8fd7e40d09d \ No newline at end of file diff --git a/paddle_samples/PaddleX/DLinear_ad/graph_hash.txt b/paddle_samples/PaddleX/DLinear_ad/graph_hash.txt index 6c451cb61..401ded153 100644 --- a/paddle_samples/PaddleX/DLinear_ad/graph_hash.txt +++ b/paddle_samples/PaddleX/DLinear_ad/graph_hash.txt @@ -1 +1 @@ -dbc13f857b9d2ddfc6be0e804fbf4de16f12eb063f199e9f067a4e13a11d7a66 \ No newline at end of file +956c78d9b474f527559e2ab61c01528cfef5d83aecb164f01a43513cb58fab09 \ No newline at end of file diff --git a/paddle_samples/PaddleX/Deeplabv3-R101/subgraph_0/graph_hash.txt b/paddle_samples/PaddleX/Deeplabv3-R101/subgraph_0/graph_hash.txt index 5ac76b2f8..b1378ec2d 100644 --- a/paddle_samples/PaddleX/Deeplabv3-R101/subgraph_0/graph_hash.txt +++ b/paddle_samples/PaddleX/Deeplabv3-R101/subgraph_0/graph_hash.txt @@ -1 +1 @@ -bb7db4a1858d625de07d1f722fdc246a387e545199270b0de906b031c0b3bb98 \ No newline at end of file +e11e5baf5a8fec7dc586cb22a7c9aa8197f805b16bd78458fa49eb03fd77c974 \ No newline at end of file diff --git a/paddle_samples/PaddleX/Deeplabv3-R101/subgraph_1/graph_hash.txt b/paddle_samples/PaddleX/Deeplabv3-R101/subgraph_1/graph_hash.txt index 1488c32d2..43c925df8 100644 --- a/paddle_samples/PaddleX/Deeplabv3-R101/subgraph_1/graph_hash.txt +++ b/paddle_samples/PaddleX/Deeplabv3-R101/subgraph_1/graph_hash.txt @@ -1 +1 @@ -39e483aa11e7e82b13541fc3a74ffb331a0ed0ccb9eb45138fb2e8faae31854d \ No newline at end of file +5a7149dbdd2a64286eb8dbf1d6f0cb3df8df6fb1c0f9cf033dd0f645382e18f6 \ No newline at end of file diff --git a/paddle_samples/PaddleX/Deeplabv3-R50/subgraph_0/graph_hash.txt b/paddle_samples/PaddleX/Deeplabv3-R50/subgraph_0/graph_hash.txt index 922e30b5f..1dffa9b24 100644 --- a/paddle_samples/PaddleX/Deeplabv3-R50/subgraph_0/graph_hash.txt +++ b/paddle_samples/PaddleX/Deeplabv3-R50/subgraph_0/graph_hash.txt @@ -1 +1 @@ -c8c863dc1364f2dcdbe16b36345c9908de752575f406bad5fbee2e4022a61834 \ No newline at end of file +7226dd2cb0283e4a31ac78973519e6c8d1e7b90d49cef8303cd3f7f50f0e51aa \ No newline at end of file diff --git a/paddle_samples/PaddleX/Deeplabv3-R50/subgraph_1/graph_hash.txt b/paddle_samples/PaddleX/Deeplabv3-R50/subgraph_1/graph_hash.txt index 569f52c78..d0c356baf 100644 --- a/paddle_samples/PaddleX/Deeplabv3-R50/subgraph_1/graph_hash.txt +++ b/paddle_samples/PaddleX/Deeplabv3-R50/subgraph_1/graph_hash.txt @@ -1 +1 @@ -40a2fda5af187ccf0bbe2bebc871dba270428aa4488eeae9ab41f38d1a020aa8 \ No newline at end of file +71ea0f170bf625ad56b3a44f1dfb7af8103239afa2374d9fac14be1a7ce618ab \ No newline at end of file diff --git a/paddle_samples/PaddleX/Deeplabv3_Plus-R101/subgraph_0/graph_hash.txt b/paddle_samples/PaddleX/Deeplabv3_Plus-R101/subgraph_0/graph_hash.txt index 18234208c..f1421d2a7 100644 --- a/paddle_samples/PaddleX/Deeplabv3_Plus-R101/subgraph_0/graph_hash.txt +++ b/paddle_samples/PaddleX/Deeplabv3_Plus-R101/subgraph_0/graph_hash.txt @@ -1 +1 @@ -4dcba8dcb96cb4227a3ac6601308f845a6c2ac96fa09b42e7f78cb02dcd9359d \ No newline at end of file +5df7065a9990cf7404ea35095723c9720ddbd0f04858ac43921bf962c0c217c9 \ No newline at end of file diff --git a/paddle_samples/PaddleX/Deeplabv3_Plus-R101/subgraph_1/graph_hash.txt b/paddle_samples/PaddleX/Deeplabv3_Plus-R101/subgraph_1/graph_hash.txt index 3d3b34924..91ce3f1ee 100644 --- a/paddle_samples/PaddleX/Deeplabv3_Plus-R101/subgraph_1/graph_hash.txt +++ b/paddle_samples/PaddleX/Deeplabv3_Plus-R101/subgraph_1/graph_hash.txt @@ -1 +1 @@ -2be0ca575ab046ac79978cdd3900e95caa008eda663c7cc4ed9ca4d0f476b4bc \ No newline at end of file +df4370e0ba800c17574bc04e861d18eacd9ab4bca48bbfe666659a193b53d801 \ No newline at end of file diff --git a/paddle_samples/PaddleX/Deeplabv3_Plus-R50/subgraph_0/graph_hash.txt b/paddle_samples/PaddleX/Deeplabv3_Plus-R50/subgraph_0/graph_hash.txt index ec47b8148..275564ef1 100644 --- a/paddle_samples/PaddleX/Deeplabv3_Plus-R50/subgraph_0/graph_hash.txt +++ b/paddle_samples/PaddleX/Deeplabv3_Plus-R50/subgraph_0/graph_hash.txt @@ -1 +1 @@ -82ac8bac2fc249196c2483e751ff4db2e00ed749fcec1c1c650acaf2c21cd9e7 \ No newline at end of file +bd51565aa57b6fac0d038d1dea894710835bcc80aed5b39b7ba69f24f44f78b5 \ No newline at end of file diff --git a/paddle_samples/PaddleX/Deeplabv3_Plus-R50/subgraph_1/graph_hash.txt b/paddle_samples/PaddleX/Deeplabv3_Plus-R50/subgraph_1/graph_hash.txt index 9d05aeec0..1365c504a 100644 --- a/paddle_samples/PaddleX/Deeplabv3_Plus-R50/subgraph_1/graph_hash.txt +++ b/paddle_samples/PaddleX/Deeplabv3_Plus-R50/subgraph_1/graph_hash.txt @@ -1 +1 @@ -d8c049ac30b3f97f61e0336d96e4aea702d40401e36f4444316647a9d3d8e15c \ No newline at end of file +50852b0796e766a03524154c2df11bc8b29458c2c11c9eaf60b8fea910dbfad6 \ No newline at end of file diff --git a/paddle_samples/PaddleX/FCOS-ResNet50/subgraph_0/graph_hash.txt b/paddle_samples/PaddleX/FCOS-ResNet50/subgraph_0/graph_hash.txt index d48d3bd95..0148538e8 100644 --- a/paddle_samples/PaddleX/FCOS-ResNet50/subgraph_0/graph_hash.txt +++ b/paddle_samples/PaddleX/FCOS-ResNet50/subgraph_0/graph_hash.txt @@ -1 +1 @@ -b1e0b6ecf140dfafe8516039778f6e4b103b6ca2959a2dca9c26c8205109ea26 \ No newline at end of file +35727ae52123b64da65ef61f2135476ffa0275710b3f0d49d6c9c6620b4c97b7 \ No newline at end of file diff --git a/paddle_samples/PaddleX/FCOS-ResNet50/subgraph_1/graph_hash.txt b/paddle_samples/PaddleX/FCOS-ResNet50/subgraph_1/graph_hash.txt index c91166920..6abb50868 100644 --- a/paddle_samples/PaddleX/FCOS-ResNet50/subgraph_1/graph_hash.txt +++ b/paddle_samples/PaddleX/FCOS-ResNet50/subgraph_1/graph_hash.txt @@ -1 +1 @@ -8a729c8c22a7d03afcb655e132388b5b4c6b651c699482861457c8e2d80bba48 \ No newline at end of file +789c6f01391a2a14a7297a05e91b5f9c0d8ec767c438cb05f4ccf695ad5f40c7 \ No newline at end of file diff --git a/paddle_samples/PaddleX/FCOS-ResNet50/subgraph_2/graph_hash.txt b/paddle_samples/PaddleX/FCOS-ResNet50/subgraph_2/graph_hash.txt index 519b528c9..a732e88de 100644 --- a/paddle_samples/PaddleX/FCOS-ResNet50/subgraph_2/graph_hash.txt +++ b/paddle_samples/PaddleX/FCOS-ResNet50/subgraph_2/graph_hash.txt @@ -1 +1 @@ -0a39a207d2eeff871a9e6484624a5e533530c68668619d85dab3205721b1e0d3 \ No newline at end of file +ffb791f5e2dd3ba4956ea8c7939e0e7d1aad62c660981ab6a5e26e77bae014b5 \ No newline at end of file diff --git a/paddle_samples/PaddleX/FCOS-ResNet50/subgraph_3/graph_hash.txt b/paddle_samples/PaddleX/FCOS-ResNet50/subgraph_3/graph_hash.txt index 440240485..48ca4ed1c 100644 --- a/paddle_samples/PaddleX/FCOS-ResNet50/subgraph_3/graph_hash.txt +++ b/paddle_samples/PaddleX/FCOS-ResNet50/subgraph_3/graph_hash.txt @@ -1 +1 @@ -f6c205737e30a146b28de4da5749b3e05bb4c0a83a8783d084b51fd12d658106 \ No newline at end of file +de9631d73ed86a1ffa120a52646ce5b4aeac38be5f10eec8f65bc5d420ee5ddb \ No newline at end of file diff --git a/paddle_samples/PaddleX/FCOS-ResNet50/subgraph_4/graph_hash.txt b/paddle_samples/PaddleX/FCOS-ResNet50/subgraph_4/graph_hash.txt index f79bc10f9..d784f6a88 100644 --- a/paddle_samples/PaddleX/FCOS-ResNet50/subgraph_4/graph_hash.txt +++ b/paddle_samples/PaddleX/FCOS-ResNet50/subgraph_4/graph_hash.txt @@ -1 +1 @@ -03f2705aaf42b687f36a9ebc53537e0bfae7f2e5c07f51e00a78b3fa1492490c \ No newline at end of file +d2776b81f1e4a8bc3f5ece756c4f4d2f0fd4d35324e2409b44bd42eb4832a52d \ No newline at end of file diff --git a/paddle_samples/PaddleX/FasterNet-L/subgraph_0/graph_hash.txt b/paddle_samples/PaddleX/FasterNet-L/subgraph_0/graph_hash.txt index 3b96c3018..8ad31d4f1 100644 --- a/paddle_samples/PaddleX/FasterNet-L/subgraph_0/graph_hash.txt +++ b/paddle_samples/PaddleX/FasterNet-L/subgraph_0/graph_hash.txt @@ -1 +1 @@ -fbd93ce7d51534bf1cbcec289a3a8f1c7b02b5cb5f57529ce2fbd86897b5e088 \ No newline at end of file +b855d4373fd34de412adfc58ee6bf0c57c51f4f71dc24fd1280096c086012bbd \ No newline at end of file diff --git a/paddle_samples/PaddleX/FasterNet-L/subgraph_1/graph_hash.txt b/paddle_samples/PaddleX/FasterNet-L/subgraph_1/graph_hash.txt index 2fcd5cd4d..c4fe6cbb6 100644 --- a/paddle_samples/PaddleX/FasterNet-L/subgraph_1/graph_hash.txt +++ b/paddle_samples/PaddleX/FasterNet-L/subgraph_1/graph_hash.txt @@ -1 +1 @@ -8104d422d0fb245dd39ab8fc1e5967662040e05eba90e5efde0ad02474ff9ae7 \ No newline at end of file +4d69771d3816e546ac5a27c06bd8ad0ff130dc3be7e68a081e5959713a42c828 \ No newline at end of file diff --git a/paddle_samples/PaddleX/FasterNet-L/subgraph_2/graph_hash.txt b/paddle_samples/PaddleX/FasterNet-L/subgraph_2/graph_hash.txt index cfe9f70b5..46fd53973 100644 --- a/paddle_samples/PaddleX/FasterNet-L/subgraph_2/graph_hash.txt +++ b/paddle_samples/PaddleX/FasterNet-L/subgraph_2/graph_hash.txt @@ -1 +1 @@ -d58792db69eaa8db7787b7539bb5b5189a9ffeb25d42a26b98358e998954a2f4 \ No newline at end of file +a0edd5714867716597b922347a5a7d7428afdd02cf0b143e3b897f79f7fc0948 \ No newline at end of file diff --git a/paddle_samples/PaddleX/FasterNet-M/subgraph_0/graph_hash.txt b/paddle_samples/PaddleX/FasterNet-M/subgraph_0/graph_hash.txt index 66992e13f..810931ea1 100644 --- a/paddle_samples/PaddleX/FasterNet-M/subgraph_0/graph_hash.txt +++ b/paddle_samples/PaddleX/FasterNet-M/subgraph_0/graph_hash.txt @@ -1 +1 @@ -6cdd54863e52de87d7a2e3c97afc9c6520e2308eba579fcbc6cecaa6d8e3c0cf \ No newline at end of file +9369a54440e191c3a385f3eed9d6c9fb81e7961ca81559be5245724b0fac5038 \ No newline at end of file diff --git a/paddle_samples/PaddleX/FasterNet-M/subgraph_1/graph_hash.txt b/paddle_samples/PaddleX/FasterNet-M/subgraph_1/graph_hash.txt index 23282344e..101a63042 100644 --- a/paddle_samples/PaddleX/FasterNet-M/subgraph_1/graph_hash.txt +++ b/paddle_samples/PaddleX/FasterNet-M/subgraph_1/graph_hash.txt @@ -1 +1 @@ -0da9d6a385fa481527408a86aad8d4bbc4f0a36353378a03d44983155950332c \ No newline at end of file +10d56e710ec28b1b8f4ef76a886732ef5a36ed82a36d33dccbf173a274179739 \ No newline at end of file diff --git a/paddle_samples/PaddleX/FasterNet-M/subgraph_2/graph_hash.txt b/paddle_samples/PaddleX/FasterNet-M/subgraph_2/graph_hash.txt index c15e6f7ce..f8e3eec1e 100644 --- a/paddle_samples/PaddleX/FasterNet-M/subgraph_2/graph_hash.txt +++ b/paddle_samples/PaddleX/FasterNet-M/subgraph_2/graph_hash.txt @@ -1 +1 @@ -68f494f22f6c93aaf24c8ef2a11942233ada7d5c41bf8b0329b447a50c3b28fd \ No newline at end of file +84454f1bf8379b25eb322e5eb02addad175d6ea5f9de388667e6698661350fb5 \ No newline at end of file diff --git a/paddle_samples/PaddleX/FasterNet-S/subgraph_0/graph_hash.txt b/paddle_samples/PaddleX/FasterNet-S/subgraph_0/graph_hash.txt index ab1557cef..6ba04ac46 100644 --- a/paddle_samples/PaddleX/FasterNet-S/subgraph_0/graph_hash.txt +++ b/paddle_samples/PaddleX/FasterNet-S/subgraph_0/graph_hash.txt @@ -1 +1 @@ -150c3610f39936550047e80c529acd468940ea031ee4d09053963b76a5980afe \ No newline at end of file +cf18307ceefdac615d658f6796f2d24fa9869a420be4a361854f9c154e08847d \ No newline at end of file diff --git a/paddle_samples/PaddleX/FasterNet-S/subgraph_1/graph_hash.txt b/paddle_samples/PaddleX/FasterNet-S/subgraph_1/graph_hash.txt index 807a7d426..38045b97a 100644 --- a/paddle_samples/PaddleX/FasterNet-S/subgraph_1/graph_hash.txt +++ b/paddle_samples/PaddleX/FasterNet-S/subgraph_1/graph_hash.txt @@ -1 +1 @@ -e486a79255785e325546ddb0c8c07171b33344765abe89ae9639a9233d8d48aa \ No newline at end of file +c365f5b67d4e6ed4b4b2febb442c82ba15075dee37588dc5201216b03359bfba \ No newline at end of file diff --git a/paddle_samples/PaddleX/FasterNet-S/subgraph_2/graph_hash.txt b/paddle_samples/PaddleX/FasterNet-S/subgraph_2/graph_hash.txt index f0af65276..0063e40e7 100644 --- a/paddle_samples/PaddleX/FasterNet-S/subgraph_2/graph_hash.txt +++ b/paddle_samples/PaddleX/FasterNet-S/subgraph_2/graph_hash.txt @@ -1 +1 @@ -d2b5c2c49ef1d70cbed149ffce114af068cf821694ef73781e4f7a87eeb93bd4 \ No newline at end of file +90b2ae7338f0208932a31e345cbb49df928ba71adfa2814bea59a78464a1ff8f \ No newline at end of file diff --git a/paddle_samples/PaddleX/FasterNet-T0/subgraph_0/graph_hash.txt b/paddle_samples/PaddleX/FasterNet-T0/subgraph_0/graph_hash.txt index 7ea35d3af..e6c9f422f 100644 --- a/paddle_samples/PaddleX/FasterNet-T0/subgraph_0/graph_hash.txt +++ b/paddle_samples/PaddleX/FasterNet-T0/subgraph_0/graph_hash.txt @@ -1 +1 @@ -61a41afbfc525fda3d1ca862c95a768fde5d77b87faba42323317993a9ec64fa \ No newline at end of file +0139fc3722b83fbb14554842841c240302a0ad9d32f74f6abacdb4ee3089f087 \ No newline at end of file diff --git a/paddle_samples/PaddleX/FasterNet-T0/subgraph_1/graph_hash.txt b/paddle_samples/PaddleX/FasterNet-T0/subgraph_1/graph_hash.txt index 025698a0a..71cf13072 100644 --- a/paddle_samples/PaddleX/FasterNet-T0/subgraph_1/graph_hash.txt +++ b/paddle_samples/PaddleX/FasterNet-T0/subgraph_1/graph_hash.txt @@ -1 +1 @@ -acba68f23dd992bf8f9fa13e24bb159026167f2f8c7b2b402e162b9c73898474 \ No newline at end of file +df342a2af1cfcdf4b9d6e9cf7abee5c5bd438c80f85a31a9af689587894044c1 \ No newline at end of file diff --git a/paddle_samples/PaddleX/FasterNet-T1/subgraph_0/graph_hash.txt b/paddle_samples/PaddleX/FasterNet-T1/subgraph_0/graph_hash.txt index 74fe2662a..afb540de3 100644 --- a/paddle_samples/PaddleX/FasterNet-T1/subgraph_0/graph_hash.txt +++ b/paddle_samples/PaddleX/FasterNet-T1/subgraph_0/graph_hash.txt @@ -1 +1 @@ -cb8fa8b7aa6cc1ff9df4440c4b5029181fbe0642bc13b36cf26d8ba47793dffd \ No newline at end of file +0a50faa46775601220ec857c84e8a2a82a4fc160227a8a3f33d4a9139e3cf2b9 \ No newline at end of file diff --git a/paddle_samples/PaddleX/FasterNet-T1/subgraph_1/graph_hash.txt b/paddle_samples/PaddleX/FasterNet-T1/subgraph_1/graph_hash.txt index 651ac8c6b..05fdeb89c 100644 --- a/paddle_samples/PaddleX/FasterNet-T1/subgraph_1/graph_hash.txt +++ b/paddle_samples/PaddleX/FasterNet-T1/subgraph_1/graph_hash.txt @@ -1 +1 @@ -5e3866c81db04e03898fdca53863a2201f42ce66c95375702e11d5bc2e93ec1e \ No newline at end of file +190185b124a0b6475ca9ff57d112b58adce010c5b878fc435e6de7e6e52a0005 \ No newline at end of file diff --git a/paddle_samples/PaddleX/FasterNet-T1/subgraph_2/graph_hash.txt b/paddle_samples/PaddleX/FasterNet-T1/subgraph_2/graph_hash.txt index 98815374d..1499e734b 100644 --- a/paddle_samples/PaddleX/FasterNet-T1/subgraph_2/graph_hash.txt +++ b/paddle_samples/PaddleX/FasterNet-T1/subgraph_2/graph_hash.txt @@ -1 +1 @@ -ef385ce68c9cfa84f3cadca7600fd624b7fe8c65b34041a50b9cf382b68dae84 \ No newline at end of file +7b208cf63841898bce817257e52b909c34f2455f049f84348cf30b8cb1ba8445 \ No newline at end of file diff --git a/paddle_samples/PaddleX/FasterNet-T2/subgraph_0/graph_hash.txt b/paddle_samples/PaddleX/FasterNet-T2/subgraph_0/graph_hash.txt index 9ea355372..a59321562 100644 --- a/paddle_samples/PaddleX/FasterNet-T2/subgraph_0/graph_hash.txt +++ b/paddle_samples/PaddleX/FasterNet-T2/subgraph_0/graph_hash.txt @@ -1 +1 @@ -bbd0d6d74c4fb7722f5cda40478ac635cbef8febd0dc196f8f0bc9bfeab4963c \ No newline at end of file +c000a3d13484b950b5e4e199ab98fea9c616e1f5c9f1b38beb32e55a5fafd90f \ No newline at end of file diff --git a/paddle_samples/PaddleX/FasterNet-T2/subgraph_1/graph_hash.txt b/paddle_samples/PaddleX/FasterNet-T2/subgraph_1/graph_hash.txt index 3fe56addb..0fde0bbfb 100644 --- a/paddle_samples/PaddleX/FasterNet-T2/subgraph_1/graph_hash.txt +++ b/paddle_samples/PaddleX/FasterNet-T2/subgraph_1/graph_hash.txt @@ -1 +1 @@ -45c8bd73ee5ab379a107e1bb75440b7ddecda471c81cb0a049ddd5b30a3cd32b \ No newline at end of file +4b6c2d7dd3fd85afabc6e8cb29e92d516e3a33643e2354d835cb20024ed3291f \ No newline at end of file diff --git a/paddle_samples/PaddleX/FasterNet-T2/subgraph_2/graph_hash.txt b/paddle_samples/PaddleX/FasterNet-T2/subgraph_2/graph_hash.txt index 82345631b..7a9240650 100644 --- a/paddle_samples/PaddleX/FasterNet-T2/subgraph_2/graph_hash.txt +++ b/paddle_samples/PaddleX/FasterNet-T2/subgraph_2/graph_hash.txt @@ -1 +1 @@ -63dbe71104880562fb6176a95f59b13d0c6337efb6a2d0c0f874e6a882ee3191 \ No newline at end of file +7f8d5316e851c296ed2e34746208255fef56e8b032b6bda0fcfde663d24c543b \ No newline at end of file diff --git a/paddle_samples/PaddleX/MaskFormer_small/subgraph_0/graph_hash.txt b/paddle_samples/PaddleX/MaskFormer_small/subgraph_0/graph_hash.txt index a4d85f236..d69f889c9 100644 --- a/paddle_samples/PaddleX/MaskFormer_small/subgraph_0/graph_hash.txt +++ b/paddle_samples/PaddleX/MaskFormer_small/subgraph_0/graph_hash.txt @@ -1 +1 @@ -13a4eb62f5e30e651c4a9f26570a3a2365e0e416033754a6acacea594e0ba692 \ No newline at end of file +bcdce4d1959b90316fa89cb4a04c8f577b8fcfff4b2d4cbf2e01d935d13c7382 \ No newline at end of file diff --git a/paddle_samples/PaddleX/MaskFormer_small/subgraph_1/graph_hash.txt b/paddle_samples/PaddleX/MaskFormer_small/subgraph_1/graph_hash.txt index b9d10f891..a1d6d654d 100644 --- a/paddle_samples/PaddleX/MaskFormer_small/subgraph_1/graph_hash.txt +++ b/paddle_samples/PaddleX/MaskFormer_small/subgraph_1/graph_hash.txt @@ -1 +1 @@ -bb557e7ec9344eb919b3a3ad48a35566a4d4e1a5e5e1012633e5ff0f9811d033 \ No newline at end of file +4fe070469ba3273f7c86a23ea04b748776292f6f6a6807198fbe1ae5f7d2798b \ No newline at end of file diff --git a/paddle_samples/PaddleX/MaskFormer_small/subgraph_2/graph_hash.txt b/paddle_samples/PaddleX/MaskFormer_small/subgraph_2/graph_hash.txt index 6657cb794..8f9b0d9a6 100644 --- a/paddle_samples/PaddleX/MaskFormer_small/subgraph_2/graph_hash.txt +++ b/paddle_samples/PaddleX/MaskFormer_small/subgraph_2/graph_hash.txt @@ -1 +1 @@ -227e06cf239d4821eb71556acea557473e0bf11fcb451f27be954f453abf477f \ No newline at end of file +dffb9f1be61de6e958c2f7636713e3905512ff896c1fb00d16378f1b9f06ed6a \ No newline at end of file diff --git a/paddle_samples/PaddleX/MaskFormer_tiny/subgraph_0/graph_hash.txt b/paddle_samples/PaddleX/MaskFormer_tiny/subgraph_0/graph_hash.txt index f731a58cb..d39f81d02 100644 --- a/paddle_samples/PaddleX/MaskFormer_tiny/subgraph_0/graph_hash.txt +++ b/paddle_samples/PaddleX/MaskFormer_tiny/subgraph_0/graph_hash.txt @@ -1 +1 @@ -a57d6f5263876e72a21f20e4ccd56bbe2c89e43991f037544cb72df387ca64d6 \ No newline at end of file +c3a3477a9c70d8fac1c8eca8f0350fa512c10dd6b46778e0816682e3ef7bd1e8 \ No newline at end of file diff --git a/paddle_samples/PaddleX/MaskFormer_tiny/subgraph_1/graph_hash.txt b/paddle_samples/PaddleX/MaskFormer_tiny/subgraph_1/graph_hash.txt index e06abd179..db59ae6ab 100644 --- a/paddle_samples/PaddleX/MaskFormer_tiny/subgraph_1/graph_hash.txt +++ b/paddle_samples/PaddleX/MaskFormer_tiny/subgraph_1/graph_hash.txt @@ -1 +1 @@ -cc2adc694e085445d577dc2c3adf73b54097157bd927231fde656fbe72599a99 \ No newline at end of file +e91f584461a117bc9e520d6377ef7559ab9b5b38f6f82c22434b4d5eefcf0805 \ No newline at end of file diff --git a/paddle_samples/PaddleX/MobileFaceNet/subgraph_0/graph_hash.txt b/paddle_samples/PaddleX/MobileFaceNet/subgraph_0/graph_hash.txt index db41a9327..1702bca17 100644 --- a/paddle_samples/PaddleX/MobileFaceNet/subgraph_0/graph_hash.txt +++ b/paddle_samples/PaddleX/MobileFaceNet/subgraph_0/graph_hash.txt @@ -1 +1 @@ -29970c467ff3312483e2e97dcd564d0143e29b6ea0dbb34ad17522c1e37bfca4 \ No newline at end of file +9b1d65c0b47f8e5f4402d007c5367b602fea23af92cd42cc883c1c9f0cc1dee4 \ No newline at end of file diff --git a/paddle_samples/PaddleX/MobileFaceNet/subgraph_1/graph_hash.txt b/paddle_samples/PaddleX/MobileFaceNet/subgraph_1/graph_hash.txt index 5eaccace9..0ebc3d52e 100644 --- a/paddle_samples/PaddleX/MobileFaceNet/subgraph_1/graph_hash.txt +++ b/paddle_samples/PaddleX/MobileFaceNet/subgraph_1/graph_hash.txt @@ -1 +1 @@ -74042d9fea54f54d2f53072d1620aa23220b98e54700eda9ca439e3f19c2a4b2 \ No newline at end of file +ea267aca9aeb0ad2a45f36ba1a35072d1e170a07e48c3f761af112236f594180 \ No newline at end of file diff --git a/paddle_samples/PaddleX/MobileFaceNet/subgraph_2/graph_hash.txt b/paddle_samples/PaddleX/MobileFaceNet/subgraph_2/graph_hash.txt index 491cefd79..b50985c02 100644 --- a/paddle_samples/PaddleX/MobileFaceNet/subgraph_2/graph_hash.txt +++ b/paddle_samples/PaddleX/MobileFaceNet/subgraph_2/graph_hash.txt @@ -1 +1 @@ -51083893007de0213b82b35d6e65bc0d0e60e8e6905e3c86485a4d1902c9c3dd \ No newline at end of file +91e2f8239e50078ec9fb981550427ab6b706c801810322113cdf25c6827e5976 \ No newline at end of file diff --git a/paddle_samples/PaddleX/MobileNetV1_x0_25/graph_hash.txt b/paddle_samples/PaddleX/MobileNetV1_x0_25/graph_hash.txt index 9a2376bb1..1f9e5a48c 100644 --- a/paddle_samples/PaddleX/MobileNetV1_x0_25/graph_hash.txt +++ b/paddle_samples/PaddleX/MobileNetV1_x0_25/graph_hash.txt @@ -1 +1 @@ -624005a9e82ec3171598daf14c63b79ad668026c3bc63c756f2a4b83b568374f \ No newline at end of file +553329d1538220d76707294bb46a4fb4554fbb44023c583e72bedd7251a6fc9c \ No newline at end of file diff --git a/paddle_samples/PaddleX/MobileNetV1_x0_5/graph_hash.txt b/paddle_samples/PaddleX/MobileNetV1_x0_5/graph_hash.txt index e3e68c21d..3765676e4 100644 --- a/paddle_samples/PaddleX/MobileNetV1_x0_5/graph_hash.txt +++ b/paddle_samples/PaddleX/MobileNetV1_x0_5/graph_hash.txt @@ -1 +1 @@ -029f8d1edcdd84819ac140901aec2b4093e6e3813a3f8f9289a87919cd25362d \ No newline at end of file +70824ac3988f0e6e5fc8112f16cab9a369cd11b336dc44ead82e736b6ce87ede \ No newline at end of file diff --git a/paddle_samples/PaddleX/MobileNetV1_x0_75/graph_hash.txt b/paddle_samples/PaddleX/MobileNetV1_x0_75/graph_hash.txt index cb65f6a2c..f91f1805a 100644 --- a/paddle_samples/PaddleX/MobileNetV1_x0_75/graph_hash.txt +++ b/paddle_samples/PaddleX/MobileNetV1_x0_75/graph_hash.txt @@ -1 +1 @@ -5d50927f03d7dcb59ec2779734555d0c58837d424a7ab0b1c1d5108a4925f162 \ No newline at end of file +fa7255dd59d1abb7eacc77ab31ae4605171460c3ba02a105c206ab45c7532e70 \ No newline at end of file diff --git a/paddle_samples/PaddleX/MobileNetV2_x0_25/graph_hash.txt b/paddle_samples/PaddleX/MobileNetV2_x0_25/graph_hash.txt index 2396d5bb5..3d50ddba7 100644 --- a/paddle_samples/PaddleX/MobileNetV2_x0_25/graph_hash.txt +++ b/paddle_samples/PaddleX/MobileNetV2_x0_25/graph_hash.txt @@ -1 +1 @@ -61465fd5a57db6310ff5d69ce4c483a6c43b506ef7544ed31bee481dc8216aa8 \ No newline at end of file +8c24f6d61bbd4d47c38634756eaa58dc9b5c0c1f8c26cb9e6f83e1f60ef6a536 \ No newline at end of file diff --git a/paddle_samples/PaddleX/MobileNetV2_x0_5/graph_hash.txt b/paddle_samples/PaddleX/MobileNetV2_x0_5/graph_hash.txt index 979e5985b..dcfc07384 100644 --- a/paddle_samples/PaddleX/MobileNetV2_x0_5/graph_hash.txt +++ b/paddle_samples/PaddleX/MobileNetV2_x0_5/graph_hash.txt @@ -1 +1 @@ -0bd52b0350aec564f31feec8743ea59df1f6fd61937ad1eec7e752aa946bf1e1 \ No newline at end of file +6292b94c30a6d617b9c8ac132e520345a941995292c5a974fffb82c5dd4c6341 \ No newline at end of file diff --git a/paddle_samples/PaddleX/MobileNetV2_x1_0/graph_hash.txt b/paddle_samples/PaddleX/MobileNetV2_x1_0/graph_hash.txt index 49ad03277..c5f20b749 100644 --- a/paddle_samples/PaddleX/MobileNetV2_x1_0/graph_hash.txt +++ b/paddle_samples/PaddleX/MobileNetV2_x1_0/graph_hash.txt @@ -1 +1 @@ -42ca1722e9ee0ed0b7d1e7fe557ce1e20d7e283cf41c36cd57913974e41c9d51 \ No newline at end of file +46195ce68a5a4ce4c87c2b27e833367c482e0b5707fe129a119ace5f356efa3d \ No newline at end of file diff --git a/paddle_samples/PaddleX/MobileNetV2_x1_5/graph_hash.txt b/paddle_samples/PaddleX/MobileNetV2_x1_5/graph_hash.txt index 8ee3d18f9..f508087c5 100644 --- a/paddle_samples/PaddleX/MobileNetV2_x1_5/graph_hash.txt +++ b/paddle_samples/PaddleX/MobileNetV2_x1_5/graph_hash.txt @@ -1 +1 @@ -b16bea2ceffa2cfde4a7ad1ff744acfde9bfc72a3740ac4271568a072995f269 \ No newline at end of file +3f14560e0247c806b1747df378e98099645afed87c8fc905eeb31a10bd2e8cc6 \ No newline at end of file diff --git a/paddle_samples/PaddleX/MobileNetV2_x2_0/graph_hash.txt b/paddle_samples/PaddleX/MobileNetV2_x2_0/graph_hash.txt index 9da8a59ec..3c42b6370 100644 --- a/paddle_samples/PaddleX/MobileNetV2_x2_0/graph_hash.txt +++ b/paddle_samples/PaddleX/MobileNetV2_x2_0/graph_hash.txt @@ -1 +1 @@ -e191a168437158e7ccb79aaab144b015fb318265172a533fa5b47c0788b0d6a9 \ No newline at end of file +0b5f4a867079af55eb9e655eda9224441769b71e5456cc6b021a9db0f631eb7b \ No newline at end of file diff --git a/paddle_samples/PaddleX/MobileNetV3_large_x0_35/subgraph_0/graph_hash.txt b/paddle_samples/PaddleX/MobileNetV3_large_x0_35/subgraph_0/graph_hash.txt index 536164288..98135dbab 100644 --- a/paddle_samples/PaddleX/MobileNetV3_large_x0_35/subgraph_0/graph_hash.txt +++ b/paddle_samples/PaddleX/MobileNetV3_large_x0_35/subgraph_0/graph_hash.txt @@ -1 +1 @@ -c75419c95d841a13d7c7a401edc4d4b14a6b8407e94946160d9c8e41e936590c \ No newline at end of file +aad9b8d510570281fb2addaba3aef86a771719e2b738515b46216a760e038bcc \ No newline at end of file diff --git a/paddle_samples/PaddleX/MobileNetV3_large_x0_35/subgraph_1/graph_hash.txt b/paddle_samples/PaddleX/MobileNetV3_large_x0_35/subgraph_1/graph_hash.txt index 2f5130de3..b5d418cd2 100644 --- a/paddle_samples/PaddleX/MobileNetV3_large_x0_35/subgraph_1/graph_hash.txt +++ b/paddle_samples/PaddleX/MobileNetV3_large_x0_35/subgraph_1/graph_hash.txt @@ -1 +1 @@ -258683b4d16b19a1f096f86fb2b18dd790d0aab8f789ff59b9f955a156eb3ca2 \ No newline at end of file +057287e16f55e4c141f49ed68e9f66e622f7d3bb16a741ba793d15aeb4e17a75 \ No newline at end of file diff --git a/paddle_samples/PaddleX/MobileNetV3_large_x0_5/subgraph_0/graph_hash.txt b/paddle_samples/PaddleX/MobileNetV3_large_x0_5/subgraph_0/graph_hash.txt index ab9cb39c8..81366c4de 100644 --- a/paddle_samples/PaddleX/MobileNetV3_large_x0_5/subgraph_0/graph_hash.txt +++ b/paddle_samples/PaddleX/MobileNetV3_large_x0_5/subgraph_0/graph_hash.txt @@ -1 +1 @@ -6e1699818981846f68f907a5ee89a1aef004a5a194ca684b50d18dc2c3914e60 \ No newline at end of file +6ec2253266ae10f69687f11330ffebe1b1d7e28abc74900ca1f4dffdfd57fb3f \ No newline at end of file diff --git a/paddle_samples/PaddleX/MobileNetV3_large_x0_5/subgraph_1/graph_hash.txt b/paddle_samples/PaddleX/MobileNetV3_large_x0_5/subgraph_1/graph_hash.txt index ecc829e5b..e574c0877 100644 --- a/paddle_samples/PaddleX/MobileNetV3_large_x0_5/subgraph_1/graph_hash.txt +++ b/paddle_samples/PaddleX/MobileNetV3_large_x0_5/subgraph_1/graph_hash.txt @@ -1 +1 @@ -12e9029ff792ed78e72236b54234d11c6a5e854f02ad0b11e81306e693143b0b \ No newline at end of file +9181c0abdbb7c0bb11acdcb7482f055271621651c6858a8864acda70c9e79651 \ No newline at end of file diff --git a/paddle_samples/PaddleX/MobileNetV3_large_x0_75/subgraph_0/graph_hash.txt b/paddle_samples/PaddleX/MobileNetV3_large_x0_75/subgraph_0/graph_hash.txt index 64e4ab7c7..d44dff62d 100644 --- a/paddle_samples/PaddleX/MobileNetV3_large_x0_75/subgraph_0/graph_hash.txt +++ b/paddle_samples/PaddleX/MobileNetV3_large_x0_75/subgraph_0/graph_hash.txt @@ -1 +1 @@ -9023c7b23fcac7cb354d825312f3886f881c3088bb836a9a392227b349faa62e \ No newline at end of file +681f78653d396fe64dbb7af7c8d5472a87a750d0efe7a8d93dc3ce9eff7103ea \ No newline at end of file diff --git a/paddle_samples/PaddleX/MobileNetV3_large_x0_75/subgraph_1/graph_hash.txt b/paddle_samples/PaddleX/MobileNetV3_large_x0_75/subgraph_1/graph_hash.txt index 0e6f3fe9d..d7b473997 100644 --- a/paddle_samples/PaddleX/MobileNetV3_large_x0_75/subgraph_1/graph_hash.txt +++ b/paddle_samples/PaddleX/MobileNetV3_large_x0_75/subgraph_1/graph_hash.txt @@ -1 +1 @@ -fecda482151b94826032d36dbd319433604603ab81aea4533cadbeceb8992435 \ No newline at end of file +ddcf2950943335ea5d7683e4ff8c95f0f851e4bf9c22e97badaeea22b1fae5ca \ No newline at end of file diff --git a/paddle_samples/PaddleX/MobileNetV3_large_x1_0/subgraph_0/graph_hash.txt b/paddle_samples/PaddleX/MobileNetV3_large_x1_0/subgraph_0/graph_hash.txt index d6a9b0038..f6195ec9b 100644 --- a/paddle_samples/PaddleX/MobileNetV3_large_x1_0/subgraph_0/graph_hash.txt +++ b/paddle_samples/PaddleX/MobileNetV3_large_x1_0/subgraph_0/graph_hash.txt @@ -1 +1 @@ -56517318f5427f56b643606da6e66b043dd30ac69e25f6f7f3755192c7eadb6f \ No newline at end of file +988b9f99d9476529537085aadd08069a32c67f9dcdd0f1828e5ee5f074c7288c \ No newline at end of file diff --git a/paddle_samples/PaddleX/MobileNetV3_large_x1_0/subgraph_1/graph_hash.txt b/paddle_samples/PaddleX/MobileNetV3_large_x1_0/subgraph_1/graph_hash.txt index 50248f379..058a02a3e 100644 --- a/paddle_samples/PaddleX/MobileNetV3_large_x1_0/subgraph_1/graph_hash.txt +++ b/paddle_samples/PaddleX/MobileNetV3_large_x1_0/subgraph_1/graph_hash.txt @@ -1 +1 @@ -b6d49618fad9f3954ef3915bc05d0f2bbc7be19fa5741d726e6cfae1139bdc25 \ No newline at end of file +fc2a8eea7e236ffe4babd2475729c2bef14085e05e33bdea308a04b283316ae5 \ No newline at end of file diff --git a/paddle_samples/PaddleX/MobileNetV3_large_x1_25/subgraph_0/graph_hash.txt b/paddle_samples/PaddleX/MobileNetV3_large_x1_25/subgraph_0/graph_hash.txt index 6e2758d1a..85001f66b 100644 --- a/paddle_samples/PaddleX/MobileNetV3_large_x1_25/subgraph_0/graph_hash.txt +++ b/paddle_samples/PaddleX/MobileNetV3_large_x1_25/subgraph_0/graph_hash.txt @@ -1 +1 @@ -65245fe60f576a8a476e52b7c91a2690b8c7f2fcbdce58731293430bb873ac2f \ No newline at end of file +b9df4155bf9f45553a194dfbfa0e8449fbe527ea401fa602d914405531b1a4db \ No newline at end of file diff --git a/paddle_samples/PaddleX/MobileNetV3_large_x1_25/subgraph_1/graph_hash.txt b/paddle_samples/PaddleX/MobileNetV3_large_x1_25/subgraph_1/graph_hash.txt index bbc54bbd2..bc242bd3e 100644 --- a/paddle_samples/PaddleX/MobileNetV3_large_x1_25/subgraph_1/graph_hash.txt +++ b/paddle_samples/PaddleX/MobileNetV3_large_x1_25/subgraph_1/graph_hash.txt @@ -1 +1 @@ -a4050d66af8fc9443654d8510d769a0f3ddd1afaeb0866cae1c0d303acce592c \ No newline at end of file +361a1c43e1fae3091627d14cf7e0a5a95d2ad527c25ac7334a370caa81879e7c \ No newline at end of file diff --git a/paddle_samples/PaddleX/MobileNetV3_small_x0_35/subgraph_0/graph_hash.txt b/paddle_samples/PaddleX/MobileNetV3_small_x0_35/subgraph_0/graph_hash.txt index 6b11f3941..86fc89afc 100644 --- a/paddle_samples/PaddleX/MobileNetV3_small_x0_35/subgraph_0/graph_hash.txt +++ b/paddle_samples/PaddleX/MobileNetV3_small_x0_35/subgraph_0/graph_hash.txt @@ -1 +1 @@ -65a2a1ccf02cd885092ab2ae5934a849c2c5cc25082f9644bc51797ebf16cc79 \ No newline at end of file +5c75fa321a9467bc512a4d7e39ec447a0570ef4c74c3798862aa3ce7ca4678b5 \ No newline at end of file diff --git a/paddle_samples/PaddleX/MobileNetV3_small_x0_35/subgraph_1/graph_hash.txt b/paddle_samples/PaddleX/MobileNetV3_small_x0_35/subgraph_1/graph_hash.txt index 303f076aa..f1146802e 100644 --- a/paddle_samples/PaddleX/MobileNetV3_small_x0_35/subgraph_1/graph_hash.txt +++ b/paddle_samples/PaddleX/MobileNetV3_small_x0_35/subgraph_1/graph_hash.txt @@ -1 +1 @@ -6e7596ecc5a23458cf518ef41dac1644b6ab8e5d030df60487e13adfd8afd57a \ No newline at end of file +dc7d5ebac75f68fa43ad64013162eadd5c8293653817da612064f2bf8aa4a4a5 \ No newline at end of file diff --git a/paddle_samples/PaddleX/MobileNetV3_small_x0_5/subgraph_0/graph_hash.txt b/paddle_samples/PaddleX/MobileNetV3_small_x0_5/subgraph_0/graph_hash.txt index c16c29928..5270bb284 100644 --- a/paddle_samples/PaddleX/MobileNetV3_small_x0_5/subgraph_0/graph_hash.txt +++ b/paddle_samples/PaddleX/MobileNetV3_small_x0_5/subgraph_0/graph_hash.txt @@ -1 +1 @@ -6808d415d5d978071cca6644f5736c46d00c59b71168cbc35a18babbedfd2730 \ No newline at end of file +8043b4c6b70ca88b9904422c395d21ddb1ae95bc83c702c573ea5405c168a143 \ No newline at end of file diff --git a/paddle_samples/PaddleX/MobileNetV3_small_x0_5/subgraph_1/graph_hash.txt b/paddle_samples/PaddleX/MobileNetV3_small_x0_5/subgraph_1/graph_hash.txt index 7ff7e2077..ece3634a6 100644 --- a/paddle_samples/PaddleX/MobileNetV3_small_x0_5/subgraph_1/graph_hash.txt +++ b/paddle_samples/PaddleX/MobileNetV3_small_x0_5/subgraph_1/graph_hash.txt @@ -1 +1 @@ -6329f1afc2faf15a93d399612264b57127cec5cc29d7c502c99cd9e56e674e10 \ No newline at end of file +e856c40c1034237c0cbb655eff09a9fa0c608e3b25b671ee15b07a138852614f \ No newline at end of file diff --git a/paddle_samples/PaddleX/MobileNetV3_small_x0_75/subgraph_0/graph_hash.txt b/paddle_samples/PaddleX/MobileNetV3_small_x0_75/subgraph_0/graph_hash.txt index 4fc32f20a..214f7c14f 100644 --- a/paddle_samples/PaddleX/MobileNetV3_small_x0_75/subgraph_0/graph_hash.txt +++ b/paddle_samples/PaddleX/MobileNetV3_small_x0_75/subgraph_0/graph_hash.txt @@ -1 +1 @@ -e93dfcf8c4d4cfa8ee029cee1f0452d270df96ea3b55fd0efe33feab656e94aa \ No newline at end of file +91f4ad0d141999b8e578070466de18948efac4bc6f49e201b7c37b7f560a590c \ No newline at end of file diff --git a/paddle_samples/PaddleX/MobileNetV3_small_x0_75/subgraph_1/graph_hash.txt b/paddle_samples/PaddleX/MobileNetV3_small_x0_75/subgraph_1/graph_hash.txt index 5721ebf69..07d60739e 100644 --- a/paddle_samples/PaddleX/MobileNetV3_small_x0_75/subgraph_1/graph_hash.txt +++ b/paddle_samples/PaddleX/MobileNetV3_small_x0_75/subgraph_1/graph_hash.txt @@ -1 +1 @@ -816c7d7724621486e2e647199c484f03d4ea2a2fce80b2285f857bff2f598a1e \ No newline at end of file +17e2725912a82a700af27a19ac03e8b073296cee2dbcc0b61ab45d36b887e39b \ No newline at end of file diff --git a/paddle_samples/PaddleX/MobileNetV3_small_x1_0/subgraph_0/graph_hash.txt b/paddle_samples/PaddleX/MobileNetV3_small_x1_0/subgraph_0/graph_hash.txt index d33a9f003..b00b5cc5c 100644 --- a/paddle_samples/PaddleX/MobileNetV3_small_x1_0/subgraph_0/graph_hash.txt +++ b/paddle_samples/PaddleX/MobileNetV3_small_x1_0/subgraph_0/graph_hash.txt @@ -1 +1 @@ -38fd818db3b62a2cbbe53bf810598c3859dcb2aa0efaaeb32633c639241d9192 \ No newline at end of file +4cf6d217a11c1aef21cd7622348499c6c2ca0f02b401f70037af83f1e7841feb \ No newline at end of file diff --git a/paddle_samples/PaddleX/MobileNetV3_small_x1_0/subgraph_1/graph_hash.txt b/paddle_samples/PaddleX/MobileNetV3_small_x1_0/subgraph_1/graph_hash.txt index 38637d8e3..7800577fa 100644 --- a/paddle_samples/PaddleX/MobileNetV3_small_x1_0/subgraph_1/graph_hash.txt +++ b/paddle_samples/PaddleX/MobileNetV3_small_x1_0/subgraph_1/graph_hash.txt @@ -1 +1 @@ -4cbd22ad8750fd55a794bac14830e78a714ebd52b0dd791836cd7657d3a27169 \ No newline at end of file +d9d0b4c43b9a79b36b073e335c28fe4b667675be8d525721918c54039c69e581 \ No newline at end of file diff --git a/paddle_samples/PaddleX/MobileNetV3_small_x1_25/subgraph_0/graph_hash.txt b/paddle_samples/PaddleX/MobileNetV3_small_x1_25/subgraph_0/graph_hash.txt index 8a1927fc1..5c616a482 100644 --- a/paddle_samples/PaddleX/MobileNetV3_small_x1_25/subgraph_0/graph_hash.txt +++ b/paddle_samples/PaddleX/MobileNetV3_small_x1_25/subgraph_0/graph_hash.txt @@ -1 +1 @@ -382dc1d215648fa367927b30f68d786c165b32e65d76966da13baaef188f61fc \ No newline at end of file +f5d4c1bf049f5ad6edb8fae03ddbb1bdb7e82c2a5748a8148d0a824f9ec3be34 \ No newline at end of file diff --git a/paddle_samples/PaddleX/MobileNetV3_small_x1_25/subgraph_1/graph_hash.txt b/paddle_samples/PaddleX/MobileNetV3_small_x1_25/subgraph_1/graph_hash.txt index 7a896f586..f3f9594f4 100644 --- a/paddle_samples/PaddleX/MobileNetV3_small_x1_25/subgraph_1/graph_hash.txt +++ b/paddle_samples/PaddleX/MobileNetV3_small_x1_25/subgraph_1/graph_hash.txt @@ -1 +1 @@ -cd4fef541ec2b57b5a0a142df710509ef408a84f32b832232a8f10c3f77895cf \ No newline at end of file +c443f6718b70daf0009b38fbf6f4bf54273ad56b83451bd18784071d8e4cf28e \ No newline at end of file diff --git a/paddle_samples/PaddleX/MobileNetV4_conv_large/subgraph_0/graph_hash.txt b/paddle_samples/PaddleX/MobileNetV4_conv_large/subgraph_0/graph_hash.txt index e26122b37..523a22312 100644 --- a/paddle_samples/PaddleX/MobileNetV4_conv_large/subgraph_0/graph_hash.txt +++ b/paddle_samples/PaddleX/MobileNetV4_conv_large/subgraph_0/graph_hash.txt @@ -1 +1 @@ -50c50156c5ce2fa6951e75a1bcc84d0c5001458d8795f89b1d062ad2f84023d1 \ No newline at end of file +68a11a0cd2c46d63ebcc772a78b94d1e9143e8f91aed8b0ca68d04a1978f833c \ No newline at end of file diff --git a/paddle_samples/PaddleX/MobileNetV4_conv_large/subgraph_1/graph_hash.txt b/paddle_samples/PaddleX/MobileNetV4_conv_large/subgraph_1/graph_hash.txt index a7deb7528..2df13875f 100644 --- a/paddle_samples/PaddleX/MobileNetV4_conv_large/subgraph_1/graph_hash.txt +++ b/paddle_samples/PaddleX/MobileNetV4_conv_large/subgraph_1/graph_hash.txt @@ -1 +1 @@ -a43ca771c0b1fcfd8bc122a0c0c96e06b64b44c14e1f645b9328b23200bcda9d \ No newline at end of file +99f51f92bb18429134023884c83cf704c382728793956fa706caf8ecef248c9b \ No newline at end of file diff --git a/paddle_samples/PaddleX/MobileNetV4_conv_large/subgraph_2/graph_hash.txt b/paddle_samples/PaddleX/MobileNetV4_conv_large/subgraph_2/graph_hash.txt index 9dab362c8..456355af6 100644 --- a/paddle_samples/PaddleX/MobileNetV4_conv_large/subgraph_2/graph_hash.txt +++ b/paddle_samples/PaddleX/MobileNetV4_conv_large/subgraph_2/graph_hash.txt @@ -1 +1 @@ -9fb40baf79ae271f26c63db963522a712757284ef85f67d4db7a8f6a13dee746 \ No newline at end of file +b670d12121f472b962016893d6d0a4075667a6a3d55eb95c8f1167570082fad6 \ No newline at end of file diff --git a/paddle_samples/PaddleX/MobileNetV4_conv_medium/subgraph_0/graph_hash.txt b/paddle_samples/PaddleX/MobileNetV4_conv_medium/subgraph_0/graph_hash.txt index 217e143c0..3c25bfb77 100644 --- a/paddle_samples/PaddleX/MobileNetV4_conv_medium/subgraph_0/graph_hash.txt +++ b/paddle_samples/PaddleX/MobileNetV4_conv_medium/subgraph_0/graph_hash.txt @@ -1 +1 @@ -23fdf5cddfc9f78acb01c0f11fa9f956c18b7478c0b99b41a4236c6cd2244648 \ No newline at end of file +0364a86f49385b2ef08e7896d45f676063b70a4d8d597c3cf92768ab1ee268d7 \ No newline at end of file diff --git a/paddle_samples/PaddleX/MobileNetV4_conv_medium/subgraph_1/graph_hash.txt b/paddle_samples/PaddleX/MobileNetV4_conv_medium/subgraph_1/graph_hash.txt index a3e981c50..13f42ec6d 100644 --- a/paddle_samples/PaddleX/MobileNetV4_conv_medium/subgraph_1/graph_hash.txt +++ b/paddle_samples/PaddleX/MobileNetV4_conv_medium/subgraph_1/graph_hash.txt @@ -1 +1 @@ -0d346506ce2de293dca0749b361be5703c59eb3bc384f8c877fc9812d4782a73 \ No newline at end of file +d6850e7733b4c8a8eb21da25fef3d42f422f8356f351dde0f16360f57f816841 \ No newline at end of file diff --git a/paddle_samples/PaddleX/MobileNetV4_conv_medium/subgraph_2/graph_hash.txt b/paddle_samples/PaddleX/MobileNetV4_conv_medium/subgraph_2/graph_hash.txt index 06604f531..215134432 100644 --- a/paddle_samples/PaddleX/MobileNetV4_conv_medium/subgraph_2/graph_hash.txt +++ b/paddle_samples/PaddleX/MobileNetV4_conv_medium/subgraph_2/graph_hash.txt @@ -1 +1 @@ -857ba78588edbe74cf0c1a9455837b644208daf1a7da6779a69c075f993c3149 \ No newline at end of file +5a166761650f0d975442b5cbdf9089f8666d3df4d6c514ca6f7a09129d1be42c \ No newline at end of file diff --git a/paddle_samples/PaddleX/MobileNetV4_conv_small/subgraph_0/graph_hash.txt b/paddle_samples/PaddleX/MobileNetV4_conv_small/subgraph_0/graph_hash.txt index 0a04074b4..33ef8f277 100644 --- a/paddle_samples/PaddleX/MobileNetV4_conv_small/subgraph_0/graph_hash.txt +++ b/paddle_samples/PaddleX/MobileNetV4_conv_small/subgraph_0/graph_hash.txt @@ -1 +1 @@ -6904918b8c96485b4fd58487c6ef880d95f7ebd65e08a0bc50f526b0c9d87636 \ No newline at end of file +e000ef2fde6181b4dfbb1b24e78e3182907f5aff453854c7c84162542ab32f78 \ No newline at end of file diff --git a/paddle_samples/PaddleX/MobileNetV4_conv_small/subgraph_1/graph_hash.txt b/paddle_samples/PaddleX/MobileNetV4_conv_small/subgraph_1/graph_hash.txt index ce13ee5ee..635731c9a 100644 --- a/paddle_samples/PaddleX/MobileNetV4_conv_small/subgraph_1/graph_hash.txt +++ b/paddle_samples/PaddleX/MobileNetV4_conv_small/subgraph_1/graph_hash.txt @@ -1 +1 @@ -11a826c01892944efa2f6838038c66dda0248864958dac84053fbff9ccbf2155 \ No newline at end of file +b045faaf1b2eeba7eee496a56354782bf124d2e58bd319813b730f30823d351b \ No newline at end of file diff --git a/paddle_samples/PaddleX/MobileNetV4_conv_small/subgraph_2/graph_hash.txt b/paddle_samples/PaddleX/MobileNetV4_conv_small/subgraph_2/graph_hash.txt index 5efd2b045..40a496955 100644 --- a/paddle_samples/PaddleX/MobileNetV4_conv_small/subgraph_2/graph_hash.txt +++ b/paddle_samples/PaddleX/MobileNetV4_conv_small/subgraph_2/graph_hash.txt @@ -1 +1 @@ -fbb273bf6acfc68aeb6a6e7d7d51a04542cd064f82ce775a8d8fd9d4e4acf762 \ No newline at end of file +dfc3483b96f5c5329f65ab449d31d5c704651d2e25f9a243ef1003b6c48d0d77 \ No newline at end of file diff --git a/paddle_samples/PaddleX/MobileNetV4_hybrid_large/subgraph_0/graph_hash.txt b/paddle_samples/PaddleX/MobileNetV4_hybrid_large/subgraph_0/graph_hash.txt index f7e7c6d20..29bbfaa6a 100644 --- a/paddle_samples/PaddleX/MobileNetV4_hybrid_large/subgraph_0/graph_hash.txt +++ b/paddle_samples/PaddleX/MobileNetV4_hybrid_large/subgraph_0/graph_hash.txt @@ -1 +1 @@ -f9423221227a42544f254fd0695efd52491f186e12a03a6dd319ce68d0f75abe \ No newline at end of file +eb41f788a65215bd20990101be464db410f88afd04c17b3091502aae2e72fa47 \ No newline at end of file diff --git a/paddle_samples/PaddleX/MobileNetV4_hybrid_large/subgraph_1/graph_hash.txt b/paddle_samples/PaddleX/MobileNetV4_hybrid_large/subgraph_1/graph_hash.txt index a7252b77d..311b0f860 100644 --- a/paddle_samples/PaddleX/MobileNetV4_hybrid_large/subgraph_1/graph_hash.txt +++ b/paddle_samples/PaddleX/MobileNetV4_hybrid_large/subgraph_1/graph_hash.txt @@ -1 +1 @@ -4aefc431016e02db79b1fb7faa43bbe21513526637d628493ebf50e550986a92 \ No newline at end of file +33f8ea308b400326ab70033dda6be814537dce5ef6ebd3d1f12619e18138cfa4 \ No newline at end of file diff --git a/paddle_samples/PaddleX/MobileNetV4_hybrid_large/subgraph_2/graph_hash.txt b/paddle_samples/PaddleX/MobileNetV4_hybrid_large/subgraph_2/graph_hash.txt index 92cc63e3f..0776fd9db 100644 --- a/paddle_samples/PaddleX/MobileNetV4_hybrid_large/subgraph_2/graph_hash.txt +++ b/paddle_samples/PaddleX/MobileNetV4_hybrid_large/subgraph_2/graph_hash.txt @@ -1 +1 @@ -2f2b67d857533a6a01ba16f8ddf641b52c52244b024ce09a0b40305230a330a6 \ No newline at end of file +694322776960e77136cd108f6d5a38ad05c0123b6da6b2ad1f34e47c25de703a \ No newline at end of file diff --git a/paddle_samples/PaddleX/MobileNetV4_hybrid_medium/subgraph_0/graph_hash.txt b/paddle_samples/PaddleX/MobileNetV4_hybrid_medium/subgraph_0/graph_hash.txt index dfcec5348..ff589d8cf 100644 --- a/paddle_samples/PaddleX/MobileNetV4_hybrid_medium/subgraph_0/graph_hash.txt +++ b/paddle_samples/PaddleX/MobileNetV4_hybrid_medium/subgraph_0/graph_hash.txt @@ -1 +1 @@ -54c720736e439e2c910704130d69bcdf597a6a6e8e5ee64a981d940c7785921e \ No newline at end of file +0b91e3d7b8638df20c0cc78c9859a102c6e76cac8f18f66f145a5f02e752af22 \ No newline at end of file diff --git a/paddle_samples/PaddleX/MobileNetV4_hybrid_medium/subgraph_1/graph_hash.txt b/paddle_samples/PaddleX/MobileNetV4_hybrid_medium/subgraph_1/graph_hash.txt index 08f04e84e..692b85523 100644 --- a/paddle_samples/PaddleX/MobileNetV4_hybrid_medium/subgraph_1/graph_hash.txt +++ b/paddle_samples/PaddleX/MobileNetV4_hybrid_medium/subgraph_1/graph_hash.txt @@ -1 +1 @@ -2e923d26e5934ff583eaeb98462600386b47a283dcd3a608533b2877cc2bde1f \ No newline at end of file +4cb360485673221ff0f46c36bfa062813eab16e6374ae7237e5681f555f0bd65 \ No newline at end of file diff --git a/paddle_samples/PaddleX/MobileNetV4_hybrid_medium/subgraph_2/graph_hash.txt b/paddle_samples/PaddleX/MobileNetV4_hybrid_medium/subgraph_2/graph_hash.txt index 471125fcd..c632831d9 100644 --- a/paddle_samples/PaddleX/MobileNetV4_hybrid_medium/subgraph_2/graph_hash.txt +++ b/paddle_samples/PaddleX/MobileNetV4_hybrid_medium/subgraph_2/graph_hash.txt @@ -1 +1 @@ -f4d3b5dd0f4800b96e5503924443cefdf6ac9943f269e42d9d532de118390e1e \ No newline at end of file +d3c7ab06470a4a0dcc5b50b49df3fd384230ff639c4acbf04ed459dab849c76c \ No newline at end of file diff --git a/paddle_samples/PaddleX/NLinear/subgraph_0/graph_hash.txt b/paddle_samples/PaddleX/NLinear/subgraph_0/graph_hash.txt index 35c718b69..4d8407947 100644 --- a/paddle_samples/PaddleX/NLinear/subgraph_0/graph_hash.txt +++ b/paddle_samples/PaddleX/NLinear/subgraph_0/graph_hash.txt @@ -1 +1 @@ -170e70a030cb3b467bceaac30f2a253461ba59310bfb39cf33a96568be9add7e \ No newline at end of file +3c896ba71dcecabc6cd717fd26b0c1a4211624785f5cd4fbc9cefdeba8702d94 \ No newline at end of file diff --git a/paddle_samples/PaddleX/NLinear/subgraph_1/graph_hash.txt b/paddle_samples/PaddleX/NLinear/subgraph_1/graph_hash.txt index e4355533a..80c7250ff 100644 --- a/paddle_samples/PaddleX/NLinear/subgraph_1/graph_hash.txt +++ b/paddle_samples/PaddleX/NLinear/subgraph_1/graph_hash.txt @@ -1 +1 @@ -c29b0f8f01ab3db4989ee3c5c7702d1620e342b0e334ebb36cdd5ad853509e98 \ No newline at end of file +e05a423ff9c4a8f7294b7aa69fc8d50d0dba2010a6dc8834539f1436a6145ea0 \ No newline at end of file diff --git a/paddle_samples/PaddleX/NLinear/subgraph_2/graph_hash.txt b/paddle_samples/PaddleX/NLinear/subgraph_2/graph_hash.txt index 4429911d6..4b2c4a9ab 100644 --- a/paddle_samples/PaddleX/NLinear/subgraph_2/graph_hash.txt +++ b/paddle_samples/PaddleX/NLinear/subgraph_2/graph_hash.txt @@ -1 +1 @@ -8ef113bcaa76826edc18c7afe591b2ad1057e687953159d6958f9e7a9e4b5292 \ No newline at end of file +f9ad4d60a1ef3f049b1007ca1e56a640a5f48d0f08448525a1f904f93b588a2c \ No newline at end of file diff --git a/paddle_samples/PaddleX/Nonstationary/subgraph_0/graph_hash.txt b/paddle_samples/PaddleX/Nonstationary/subgraph_0/graph_hash.txt index 0f86f5190..f64bdf1ff 100644 --- a/paddle_samples/PaddleX/Nonstationary/subgraph_0/graph_hash.txt +++ b/paddle_samples/PaddleX/Nonstationary/subgraph_0/graph_hash.txt @@ -1 +1 @@ -87e111bb444c08b20429854b567149d52c99bf4d25b122eb5b425f75c7ed8942 \ No newline at end of file +c61c52bd67f662c96dc5043ce97db84f9314fb7f1ed0da204428a79ae07d3e63 \ No newline at end of file diff --git a/paddle_samples/PaddleX/Nonstationary/subgraph_1/graph_hash.txt b/paddle_samples/PaddleX/Nonstationary/subgraph_1/graph_hash.txt index bbd12eddf..983ce3961 100644 --- a/paddle_samples/PaddleX/Nonstationary/subgraph_1/graph_hash.txt +++ b/paddle_samples/PaddleX/Nonstationary/subgraph_1/graph_hash.txt @@ -1 +1 @@ -f25e2cabc82fb9c0da64a6a26e3df0d65a589fe220cb918d2cedefb5f3b5d9b6 \ No newline at end of file +839183f603cbb7a23e357b4100da3870c924c8f1d0bb0a63c3c23a80962510b0 \ No newline at end of file diff --git a/paddle_samples/PaddleX/Nonstationary/subgraph_10/graph_hash.txt b/paddle_samples/PaddleX/Nonstationary/subgraph_10/graph_hash.txt index 4753e3668..4a315bd5c 100644 --- a/paddle_samples/PaddleX/Nonstationary/subgraph_10/graph_hash.txt +++ b/paddle_samples/PaddleX/Nonstationary/subgraph_10/graph_hash.txt @@ -1 +1 @@ -d7843cd7c0386fde7018814545e8875d82f04f1c2196f7d003d9620e344f8cf9 \ No newline at end of file +7477984bf17e572805cf49c90d389a7d2ba67a2ce69b9d845c93d10e7fcedfca \ No newline at end of file diff --git a/paddle_samples/PaddleX/Nonstationary/subgraph_11/graph_hash.txt b/paddle_samples/PaddleX/Nonstationary/subgraph_11/graph_hash.txt index ff06dca70..af7b55e72 100644 --- a/paddle_samples/PaddleX/Nonstationary/subgraph_11/graph_hash.txt +++ b/paddle_samples/PaddleX/Nonstationary/subgraph_11/graph_hash.txt @@ -1 +1 @@ -f42f153332a0a4876d2eb1705722de3e3b5364eba1fc200390141a5e0cd731e9 \ No newline at end of file +b96db943a0797153ca8641206fd5b711e788a1d5ff747a09f59c2988ad37453b \ No newline at end of file diff --git a/paddle_samples/PaddleX/Nonstationary/subgraph_2/graph_hash.txt b/paddle_samples/PaddleX/Nonstationary/subgraph_2/graph_hash.txt index 020c10233..a9810ccaa 100644 --- a/paddle_samples/PaddleX/Nonstationary/subgraph_2/graph_hash.txt +++ b/paddle_samples/PaddleX/Nonstationary/subgraph_2/graph_hash.txt @@ -1 +1 @@ -10f78c8c0374cfe39ccb3910595419b7d121f10d0a8acbb7bc2cc6ea8f82338f \ No newline at end of file +98b91c34d21c653f15a9c19ae8886613c919b26240e9a44dbd223185333700ef \ No newline at end of file diff --git a/paddle_samples/PaddleX/Nonstationary/subgraph_3/graph_hash.txt b/paddle_samples/PaddleX/Nonstationary/subgraph_3/graph_hash.txt index b1842d61a..81d19b0e7 100644 --- a/paddle_samples/PaddleX/Nonstationary/subgraph_3/graph_hash.txt +++ b/paddle_samples/PaddleX/Nonstationary/subgraph_3/graph_hash.txt @@ -1 +1 @@ -3142f6ef5efe6dbce5f20592af8b004888e7b7d8a80f99c7f117676ff6e70f50 \ No newline at end of file +f21648082acd6c02d0ac7987adfd2ae7d93b9d2b5501dcaafd508b0c7e1307df \ No newline at end of file diff --git a/paddle_samples/PaddleX/Nonstationary/subgraph_4/graph_hash.txt b/paddle_samples/PaddleX/Nonstationary/subgraph_4/graph_hash.txt index a092416d1..ecbcb4970 100644 --- a/paddle_samples/PaddleX/Nonstationary/subgraph_4/graph_hash.txt +++ b/paddle_samples/PaddleX/Nonstationary/subgraph_4/graph_hash.txt @@ -1 +1 @@ -032356cfa273f39af883975a649b9718eaf519bdc3eeb1bc91d47820972ce24b \ No newline at end of file +f700b1c125492ee681391fe3d0e665cf26b2969718d3be0d412c5f8f1361f8b3 \ No newline at end of file diff --git a/paddle_samples/PaddleX/Nonstationary/subgraph_5/graph_hash.txt b/paddle_samples/PaddleX/Nonstationary/subgraph_5/graph_hash.txt index 093bdb9f7..e6352f0da 100644 --- a/paddle_samples/PaddleX/Nonstationary/subgraph_5/graph_hash.txt +++ b/paddle_samples/PaddleX/Nonstationary/subgraph_5/graph_hash.txt @@ -1 +1 @@ -ba62857f1bb017eaa210bec0adeaa37a6bd0f04662fba6014acc1eda87914649 \ No newline at end of file +d6047808f7e1473f40a92790fd134585abcbeb7b50f4c082c5a8264f3d68bc91 \ No newline at end of file diff --git a/paddle_samples/PaddleX/Nonstationary/subgraph_6/graph_hash.txt b/paddle_samples/PaddleX/Nonstationary/subgraph_6/graph_hash.txt index 5275e779f..9d80e1c6f 100644 --- a/paddle_samples/PaddleX/Nonstationary/subgraph_6/graph_hash.txt +++ b/paddle_samples/PaddleX/Nonstationary/subgraph_6/graph_hash.txt @@ -1 +1 @@ -2cd6953494751b5fbf78191ecfcda8d8ae0a0e7998d59fff3da7ddf077a77bfa \ No newline at end of file +bf374e2f768d87fe0250e39be5d88c439a8a4ea6034765caf281cb066c54cd80 \ No newline at end of file diff --git a/paddle_samples/PaddleX/Nonstationary/subgraph_7/graph_hash.txt b/paddle_samples/PaddleX/Nonstationary/subgraph_7/graph_hash.txt index d3eb17b0a..0498ef7e1 100644 --- a/paddle_samples/PaddleX/Nonstationary/subgraph_7/graph_hash.txt +++ b/paddle_samples/PaddleX/Nonstationary/subgraph_7/graph_hash.txt @@ -1 +1 @@ -91f94b67dd9dbcbdd483b58641ca25ee392a56eaf1f957fff7eb9f32b7c3f848 \ No newline at end of file +a84ab81d164f804399b3ad8d4117acab03a999c11f27cb88507084aef573e421 \ No newline at end of file diff --git a/paddle_samples/PaddleX/Nonstationary/subgraph_8/graph_hash.txt b/paddle_samples/PaddleX/Nonstationary/subgraph_8/graph_hash.txt index cb55cb03a..b94f7481e 100644 --- a/paddle_samples/PaddleX/Nonstationary/subgraph_8/graph_hash.txt +++ b/paddle_samples/PaddleX/Nonstationary/subgraph_8/graph_hash.txt @@ -1 +1 @@ -2cf15df08c3744d30d800212091f1fe0c4981e649a8d64a95a1aa11b2398afec \ No newline at end of file +86150d04cae2d2049423aeb2e587f952e9ab2a2864f4f7e1f0651687c44feb78 \ No newline at end of file diff --git a/paddle_samples/PaddleX/Nonstationary/subgraph_9/graph_hash.txt b/paddle_samples/PaddleX/Nonstationary/subgraph_9/graph_hash.txt index f5ec257f1..05549148c 100644 --- a/paddle_samples/PaddleX/Nonstationary/subgraph_9/graph_hash.txt +++ b/paddle_samples/PaddleX/Nonstationary/subgraph_9/graph_hash.txt @@ -1 +1 @@ -ac26e971dd1a4866631e372e4866262a726d356932ff89fe6c794066f3b1af23 \ No newline at end of file +b3eefd879bf569b820145c7a080523d504bf575b71aa5d20f6785ac2b99efe4d \ No newline at end of file diff --git a/paddle_samples/PaddleX/Nonstationary_ad/subgraph_0/graph_hash.txt b/paddle_samples/PaddleX/Nonstationary_ad/subgraph_0/graph_hash.txt index 31bb34c18..d95815bd0 100644 --- a/paddle_samples/PaddleX/Nonstationary_ad/subgraph_0/graph_hash.txt +++ b/paddle_samples/PaddleX/Nonstationary_ad/subgraph_0/graph_hash.txt @@ -1 +1 @@ -b8f6cf64539e3582a87a876bc0df50e24effa2cffe37eb664d171ac005c52cd5 \ No newline at end of file +1215833de3b98c9113913653d4924c2d5bc29cc341457e2a43d7c9ac7754d61b \ No newline at end of file diff --git a/paddle_samples/PaddleX/Nonstationary_ad/subgraph_1/graph_hash.txt b/paddle_samples/PaddleX/Nonstationary_ad/subgraph_1/graph_hash.txt index 4f3115da9..62b2a185c 100644 --- a/paddle_samples/PaddleX/Nonstationary_ad/subgraph_1/graph_hash.txt +++ b/paddle_samples/PaddleX/Nonstationary_ad/subgraph_1/graph_hash.txt @@ -1 +1 @@ -ca6b3bfd2da3d6c8e48829e8c27590b09e8cde9ec0131a741bc84b89ceb9ade4 \ No newline at end of file +1e488e7212f51fffce49201cc541f27734da93dd3255af9aa198d7ceba2c4bbb \ No newline at end of file diff --git a/paddle_samples/PaddleX/Nonstationary_ad/subgraph_2/graph_hash.txt b/paddle_samples/PaddleX/Nonstationary_ad/subgraph_2/graph_hash.txt index 07255f990..5dab15562 100644 --- a/paddle_samples/PaddleX/Nonstationary_ad/subgraph_2/graph_hash.txt +++ b/paddle_samples/PaddleX/Nonstationary_ad/subgraph_2/graph_hash.txt @@ -1 +1 @@ -846aef18ce4edb3a0b6418b94bf37944e700145d2410bb21fcc073a2cd0f2818 \ No newline at end of file +743b7e1240af52a25c2d8f6e181f1862ad27f85717b8469f09a7fd5c2b6949ec \ No newline at end of file diff --git a/paddle_samples/PaddleX/OCRNet_HRNet-W48/subgraph_0/graph_hash.txt b/paddle_samples/PaddleX/OCRNet_HRNet-W48/subgraph_0/graph_hash.txt index f91f17b75..2fbe32ccf 100644 --- a/paddle_samples/PaddleX/OCRNet_HRNet-W48/subgraph_0/graph_hash.txt +++ b/paddle_samples/PaddleX/OCRNet_HRNet-W48/subgraph_0/graph_hash.txt @@ -1 +1 @@ -21035b1e1e4909d592f920162d5ef94ef2b5b25150600b1d9adfc792c94e1ed0 \ No newline at end of file +56bb801890e0682d803b888a73da0a735157947b10fc3fc6d2dfda4cbf150163 \ No newline at end of file diff --git a/paddle_samples/PaddleX/OCRNet_HRNet-W48/subgraph_1/graph_hash.txt b/paddle_samples/PaddleX/OCRNet_HRNet-W48/subgraph_1/graph_hash.txt index 39c900584..20ae6cd52 100644 --- a/paddle_samples/PaddleX/OCRNet_HRNet-W48/subgraph_1/graph_hash.txt +++ b/paddle_samples/PaddleX/OCRNet_HRNet-W48/subgraph_1/graph_hash.txt @@ -1 +1 @@ -617b04009f62c815271bd42cf30c9a938c3fa59a9a57ffe5924f58ba17acef3f \ No newline at end of file +5d852843c032e1e718e8d2debd587c749c02921c2c1bf3569f7ca87cdc7c74be \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-DocLayout-M/subgraph_0/graph_hash.txt b/paddle_samples/PaddleX/PP-DocLayout-M/subgraph_0/graph_hash.txt index d5ccc8aac..7a01b374c 100644 --- a/paddle_samples/PaddleX/PP-DocLayout-M/subgraph_0/graph_hash.txt +++ b/paddle_samples/PaddleX/PP-DocLayout-M/subgraph_0/graph_hash.txt @@ -1 +1 @@ -436f3f6f93dfde4c710404815f0db94b8f7b7393560fc52082f361c8700943a5 \ No newline at end of file +d0a70b2af079f265d2496a56acc9315531c5588fc73199e7a0337644f2e13188 \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-DocLayout-M/subgraph_1/graph_hash.txt b/paddle_samples/PaddleX/PP-DocLayout-M/subgraph_1/graph_hash.txt index 55e3629ce..cf4d9e736 100644 --- a/paddle_samples/PaddleX/PP-DocLayout-M/subgraph_1/graph_hash.txt +++ b/paddle_samples/PaddleX/PP-DocLayout-M/subgraph_1/graph_hash.txt @@ -1 +1 @@ -488b13c55345f609c55fc67aab14fbf0b27ea2cb2c6485aa90a61da490901502 \ No newline at end of file +bbf0dd08259c418e5ae37c713b8dda25239db1fd47c7598e1c11db0c99dd8cb3 \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-DocLayout-M/subgraph_10/graph_hash.txt b/paddle_samples/PaddleX/PP-DocLayout-M/subgraph_10/graph_hash.txt index 12260efa8..8f13dd570 100644 --- a/paddle_samples/PaddleX/PP-DocLayout-M/subgraph_10/graph_hash.txt +++ b/paddle_samples/PaddleX/PP-DocLayout-M/subgraph_10/graph_hash.txt @@ -1 +1 @@ -d2fca6868ae9755bedbb611af234f7404fba9dbd0d2d6b80c2558ec5c1f951b8 \ No newline at end of file +ca3d5271bcdac0dd9f21249324253c1027e1cb8cbdce533410b4ed4f4bfbc4bd \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-DocLayout-M/subgraph_11/graph_hash.txt b/paddle_samples/PaddleX/PP-DocLayout-M/subgraph_11/graph_hash.txt index 5db3fb5d4..8e7c071f2 100644 --- a/paddle_samples/PaddleX/PP-DocLayout-M/subgraph_11/graph_hash.txt +++ b/paddle_samples/PaddleX/PP-DocLayout-M/subgraph_11/graph_hash.txt @@ -1 +1 @@ -21b997a7ad24aecdd3287e4b13db6d4d7045b699c4f2f7d845f5802c2d5ea602 \ No newline at end of file +51328c45fac4c8d53f5c6ec1fab3ece093437e625655c5fc40cb491fb2583526 \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-DocLayout-M/subgraph_12/graph_hash.txt b/paddle_samples/PaddleX/PP-DocLayout-M/subgraph_12/graph_hash.txt index 6176437ef..027b79e9d 100644 --- a/paddle_samples/PaddleX/PP-DocLayout-M/subgraph_12/graph_hash.txt +++ b/paddle_samples/PaddleX/PP-DocLayout-M/subgraph_12/graph_hash.txt @@ -1 +1 @@ -ce455ccf6903179637c13dadc820764e7da7aa1812be7d120d5a836a1e851760 \ No newline at end of file +a2a0a39e6333793a7bbd9cb4e36f3281d8da12c6a83ada3b23dbc76b0db11118 \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-DocLayout-M/subgraph_13/graph_hash.txt b/paddle_samples/PaddleX/PP-DocLayout-M/subgraph_13/graph_hash.txt index f332df0a1..eeb98b3ae 100644 --- a/paddle_samples/PaddleX/PP-DocLayout-M/subgraph_13/graph_hash.txt +++ b/paddle_samples/PaddleX/PP-DocLayout-M/subgraph_13/graph_hash.txt @@ -1 +1 @@ -eba5e6b4ce972cd37ee97da2d7ea1a8c7ec9ff06a3e4294fb8b30fe4172c01d6 \ No newline at end of file +20fa84d387d42a9ecef2b3297b5fcf0e2e8dff3ddba014bd4079f9d4eb07f37b \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-DocLayout-M/subgraph_14/graph_hash.txt b/paddle_samples/PaddleX/PP-DocLayout-M/subgraph_14/graph_hash.txt index c2014dda9..5eb2d9c53 100644 --- a/paddle_samples/PaddleX/PP-DocLayout-M/subgraph_14/graph_hash.txt +++ b/paddle_samples/PaddleX/PP-DocLayout-M/subgraph_14/graph_hash.txt @@ -1 +1 @@ -83d381d425942c055e716adac75a31fbb9f28b6a94ae9658d2af1c94b3966b54 \ No newline at end of file +685994ba5055506ab45515e898051e995b4ce706e875ee9dbc275d706629a5e1 \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-DocLayout-M/subgraph_15/graph_hash.txt b/paddle_samples/PaddleX/PP-DocLayout-M/subgraph_15/graph_hash.txt index f76ab3c75..c430479dc 100644 --- a/paddle_samples/PaddleX/PP-DocLayout-M/subgraph_15/graph_hash.txt +++ b/paddle_samples/PaddleX/PP-DocLayout-M/subgraph_15/graph_hash.txt @@ -1 +1 @@ -e6b07390ecc264d34159afdf09f3372f66087988a412702a3ff90f50b5d89b97 \ No newline at end of file +a6a0eda7bf5832ef9b476d69381efe654735d7437a657ef81f8292ac3434a5ca \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-DocLayout-M/subgraph_16/graph_hash.txt b/paddle_samples/PaddleX/PP-DocLayout-M/subgraph_16/graph_hash.txt index 5af05ebd4..85d9448f9 100644 --- a/paddle_samples/PaddleX/PP-DocLayout-M/subgraph_16/graph_hash.txt +++ b/paddle_samples/PaddleX/PP-DocLayout-M/subgraph_16/graph_hash.txt @@ -1 +1 @@ -09dcaa02c333400bdd1d08ee2b3fdcbe7a2c0d7ee1257834158218dacceb19c1 \ No newline at end of file +6638e7a88c9f31c1f3ba229baa1256349bf19419b86788f91995e596aa893aa7 \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-DocLayout-M/subgraph_17/graph_hash.txt b/paddle_samples/PaddleX/PP-DocLayout-M/subgraph_17/graph_hash.txt index e8a417f38..b091201a6 100644 --- a/paddle_samples/PaddleX/PP-DocLayout-M/subgraph_17/graph_hash.txt +++ b/paddle_samples/PaddleX/PP-DocLayout-M/subgraph_17/graph_hash.txt @@ -1 +1 @@ -179f1deb7156fb59284fad88a911712981fc00ef3d26a1900cca44f20d526583 \ No newline at end of file +002fce987a7597c3f58c14503d0bf12cfb15fdd94f6592b5a7a5788d3a83e602 \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-DocLayout-M/subgraph_2/graph_hash.txt b/paddle_samples/PaddleX/PP-DocLayout-M/subgraph_2/graph_hash.txt index 7b269a4fc..c590cfa6f 100644 --- a/paddle_samples/PaddleX/PP-DocLayout-M/subgraph_2/graph_hash.txt +++ b/paddle_samples/PaddleX/PP-DocLayout-M/subgraph_2/graph_hash.txt @@ -1 +1 @@ -6d6f2c9c9ef3af548cf88e1bf54264b303882c64b6c5a29f24bd261b46da7ed3 \ No newline at end of file +dbe605965e239a28d26b648724fa71cc0e8f8afdd40f94ac4d404d90026d5ed0 \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-DocLayout-M/subgraph_3/graph_hash.txt b/paddle_samples/PaddleX/PP-DocLayout-M/subgraph_3/graph_hash.txt index ba0a1cdaa..88cadd4a1 100644 --- a/paddle_samples/PaddleX/PP-DocLayout-M/subgraph_3/graph_hash.txt +++ b/paddle_samples/PaddleX/PP-DocLayout-M/subgraph_3/graph_hash.txt @@ -1 +1 @@ -aee51f07341e98d452a2561b4036b470ba72ac38e5ff480edfedbf1b34ef856e \ No newline at end of file +cda212a45a12accca3861494f598039558d335419a8ea59a0693de9313c76f4c \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-DocLayout-M/subgraph_4/graph_hash.txt b/paddle_samples/PaddleX/PP-DocLayout-M/subgraph_4/graph_hash.txt index 479c81f78..92359fdac 100644 --- a/paddle_samples/PaddleX/PP-DocLayout-M/subgraph_4/graph_hash.txt +++ b/paddle_samples/PaddleX/PP-DocLayout-M/subgraph_4/graph_hash.txt @@ -1 +1 @@ -0abdc8831ff78ef2837b9d36e0b70316aaf4acebdffa350163a1da7b8df3099d \ No newline at end of file +ee2317e1c346d192d65165921fe4111bbcca62787f93451da9bf6da17eb08f46 \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-DocLayout-M/subgraph_5/graph_hash.txt b/paddle_samples/PaddleX/PP-DocLayout-M/subgraph_5/graph_hash.txt index ac7005705..37247bcfc 100644 --- a/paddle_samples/PaddleX/PP-DocLayout-M/subgraph_5/graph_hash.txt +++ b/paddle_samples/PaddleX/PP-DocLayout-M/subgraph_5/graph_hash.txt @@ -1 +1 @@ -7709ba2153f3126dda880c2badf2655d046d7ae517609e5321e99f32f288ce89 \ No newline at end of file +2c064b6a13f4854594bea413eae10aa074794f5d944de3549e53a005e636993a \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-DocLayout-M/subgraph_6/graph_hash.txt b/paddle_samples/PaddleX/PP-DocLayout-M/subgraph_6/graph_hash.txt index 0a5df33f3..773d3716a 100644 --- a/paddle_samples/PaddleX/PP-DocLayout-M/subgraph_6/graph_hash.txt +++ b/paddle_samples/PaddleX/PP-DocLayout-M/subgraph_6/graph_hash.txt @@ -1 +1 @@ -1aa239f70295a3da27b5ea248ef0f3277774d4ef2c07cab157b623e70787015e \ No newline at end of file +337faa4c6fba6d723e638e41d7f1636fe3b25b2b473168e8e9caade39a9154ff \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-DocLayout-M/subgraph_7/graph_hash.txt b/paddle_samples/PaddleX/PP-DocLayout-M/subgraph_7/graph_hash.txt index cdc4ada80..5f0c44ab4 100644 --- a/paddle_samples/PaddleX/PP-DocLayout-M/subgraph_7/graph_hash.txt +++ b/paddle_samples/PaddleX/PP-DocLayout-M/subgraph_7/graph_hash.txt @@ -1 +1 @@ -797441c553dd07c3b490eba8366136b5dbc4ba06ae45da81e2789b745b250db3 \ No newline at end of file +fc49f818034b8040eda537752a083b7ee4546fcdb2f81b8794ae3562e59478f9 \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-DocLayout-M/subgraph_8/graph_hash.txt b/paddle_samples/PaddleX/PP-DocLayout-M/subgraph_8/graph_hash.txt index 0c293ad87..38c58119a 100644 --- a/paddle_samples/PaddleX/PP-DocLayout-M/subgraph_8/graph_hash.txt +++ b/paddle_samples/PaddleX/PP-DocLayout-M/subgraph_8/graph_hash.txt @@ -1 +1 @@ -54f6cde8ce78b1624a8ec310f07a47625f0d46e704ba48d40aa0581e4c58691e \ No newline at end of file +8234c49aee53c338e2177f9d092cf7b77c5fc84df73fa85094c9f1961e5da438 \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-DocLayout-M/subgraph_9/graph_hash.txt b/paddle_samples/PaddleX/PP-DocLayout-M/subgraph_9/graph_hash.txt index ca87e2c42..e49624948 100644 --- a/paddle_samples/PaddleX/PP-DocLayout-M/subgraph_9/graph_hash.txt +++ b/paddle_samples/PaddleX/PP-DocLayout-M/subgraph_9/graph_hash.txt @@ -1 +1 @@ -0fa7d323d0622396f6da330ddbf59dc2309bb10b03202a7e2e4eab8d42603e7a \ No newline at end of file +05b63f8ac67e60c7f0a1390eccdb9be86f7178607c9a49aa83291599b1c4c724 \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-FormulaNet-S/subgraph_0/graph_hash.txt b/paddle_samples/PaddleX/PP-FormulaNet-S/subgraph_0/graph_hash.txt index c98261443..87a53eb5a 100644 --- a/paddle_samples/PaddleX/PP-FormulaNet-S/subgraph_0/graph_hash.txt +++ b/paddle_samples/PaddleX/PP-FormulaNet-S/subgraph_0/graph_hash.txt @@ -1 +1 @@ -69f5611ba73bc967ded0e0a6fd8b31dae0abc8a9e54eaaae2207b026bc86896e \ No newline at end of file +ce5e937657fdf923d3358a0b2b76370b1f5e8b12bb6e376b79109f02954950de \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-FormulaNet-S/subgraph_1/graph_hash.txt b/paddle_samples/PaddleX/PP-FormulaNet-S/subgraph_1/graph_hash.txt index 90a8db5d3..8fdfd02f7 100644 --- a/paddle_samples/PaddleX/PP-FormulaNet-S/subgraph_1/graph_hash.txt +++ b/paddle_samples/PaddleX/PP-FormulaNet-S/subgraph_1/graph_hash.txt @@ -1 +1 @@ -96c837a92ffc1762d45a998f567410dcafef6604ba393c6c0b067cfccc952ba6 \ No newline at end of file +f271e04765ae0bd0b860db7db968d26f81e5db7c295b67c06c09860dc22d34ef \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-HGNetV2-B0/subgraph_0/graph_hash.txt b/paddle_samples/PaddleX/PP-HGNetV2-B0/subgraph_0/graph_hash.txt index 746de70e4..636bb7207 100644 --- a/paddle_samples/PaddleX/PP-HGNetV2-B0/subgraph_0/graph_hash.txt +++ b/paddle_samples/PaddleX/PP-HGNetV2-B0/subgraph_0/graph_hash.txt @@ -1 +1 @@ -c32e50d4d8afd8f20410b0432554e997eced3bd3719a34a35b1dee7996fef5a5 \ No newline at end of file +7fbe5be08529d676bfa7053636b68e079e6a8db12047f94c4dcf32d66a13e0b2 \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-HGNetV2-B0/subgraph_1/graph_hash.txt b/paddle_samples/PaddleX/PP-HGNetV2-B0/subgraph_1/graph_hash.txt index 196f079b8..a771ab93f 100644 --- a/paddle_samples/PaddleX/PP-HGNetV2-B0/subgraph_1/graph_hash.txt +++ b/paddle_samples/PaddleX/PP-HGNetV2-B0/subgraph_1/graph_hash.txt @@ -1 +1 @@ -a0cf0cd3e25e9b8fe7f0ea4938994fb32557d9e13830de73b76c27cbd3477f9b \ No newline at end of file +d645ea568487dddc7911afb32062cfb2160a28c978a73e9b80acafa8646a37af \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-HGNetV2-B0_ML/subgraph_0/graph_hash.txt b/paddle_samples/PaddleX/PP-HGNetV2-B0_ML/subgraph_0/graph_hash.txt index 567f708d9..11c8ca42a 100644 --- a/paddle_samples/PaddleX/PP-HGNetV2-B0_ML/subgraph_0/graph_hash.txt +++ b/paddle_samples/PaddleX/PP-HGNetV2-B0_ML/subgraph_0/graph_hash.txt @@ -1 +1 @@ -56756f056ffcd42a8a9ba614ae6618e55ebb9782bbaf02135d6b3505b081ff0f \ No newline at end of file +ea7d83815e80a618f4a77d05612f63d21e2e367a681e11e2060a33cf5ff27064 \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-HGNetV2-B0_ML/subgraph_1/graph_hash.txt b/paddle_samples/PaddleX/PP-HGNetV2-B0_ML/subgraph_1/graph_hash.txt index b8b79c00b..cdbda4af2 100644 --- a/paddle_samples/PaddleX/PP-HGNetV2-B0_ML/subgraph_1/graph_hash.txt +++ b/paddle_samples/PaddleX/PP-HGNetV2-B0_ML/subgraph_1/graph_hash.txt @@ -1 +1 @@ -bdeb6b8dc7d719adc1360597f19760a46984ca02c54f86a5133eeb81a89871e6 \ No newline at end of file +a4d0f69058e147cd5398df4be1d67f3feb3ba6b3bf07f2119a6110997d649794 \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-HGNetV2-B0_ML/subgraph_2/graph_hash.txt b/paddle_samples/PaddleX/PP-HGNetV2-B0_ML/subgraph_2/graph_hash.txt index faffa43f4..8c5ec9617 100644 --- a/paddle_samples/PaddleX/PP-HGNetV2-B0_ML/subgraph_2/graph_hash.txt +++ b/paddle_samples/PaddleX/PP-HGNetV2-B0_ML/subgraph_2/graph_hash.txt @@ -1 +1 @@ -d256f974900d4f6232adc964b212ac2d8cf67e4b498dd2bba9542f2f9d3a0c8f \ No newline at end of file +5a133236660f8f36d368c4b045e389a55f071fc2030e6e0af782c9e4497e4545 \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-HGNetV2-B1/subgraph_0/graph_hash.txt b/paddle_samples/PaddleX/PP-HGNetV2-B1/subgraph_0/graph_hash.txt index 0ff7b8fbd..d19bf9876 100644 --- a/paddle_samples/PaddleX/PP-HGNetV2-B1/subgraph_0/graph_hash.txt +++ b/paddle_samples/PaddleX/PP-HGNetV2-B1/subgraph_0/graph_hash.txt @@ -1 +1 @@ -9b6ad09d8af49e0e3a95523c646a10c6f3029482574a578482e0ace9eaa8d118 \ No newline at end of file +e7df9698a218c3837f7fd62b5a1dd7657cafa251d3a064554a0a398599231b61 \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-HGNetV2-B1/subgraph_1/graph_hash.txt b/paddle_samples/PaddleX/PP-HGNetV2-B1/subgraph_1/graph_hash.txt index 1f2a774eb..93b28198b 100644 --- a/paddle_samples/PaddleX/PP-HGNetV2-B1/subgraph_1/graph_hash.txt +++ b/paddle_samples/PaddleX/PP-HGNetV2-B1/subgraph_1/graph_hash.txt @@ -1 +1 @@ -9917b16c9a505f52821ae9f4b8b51f31471ae3a5feb7ec57035478dc3b761cf3 \ No newline at end of file +3c10fee26242dbe9973d17349ea907378f9347ee09a284f5fc62aca1fd4e1777 \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-HGNetV2-B2/subgraph_0/graph_hash.txt b/paddle_samples/PaddleX/PP-HGNetV2-B2/subgraph_0/graph_hash.txt index fa7aab79d..543ce3142 100644 --- a/paddle_samples/PaddleX/PP-HGNetV2-B2/subgraph_0/graph_hash.txt +++ b/paddle_samples/PaddleX/PP-HGNetV2-B2/subgraph_0/graph_hash.txt @@ -1 +1 @@ -2d6910ca4b0fb0c837f48d9bb250cbdc67f6bb0c2d96643ae88d5caef54f17cb \ No newline at end of file +5ea4a6243029ec62a3739db8f349b646278262bf462dc71bc570c379371ab5d2 \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-HGNetV2-B2/subgraph_1/graph_hash.txt b/paddle_samples/PaddleX/PP-HGNetV2-B2/subgraph_1/graph_hash.txt index 4f3e00b21..f47484fa8 100644 --- a/paddle_samples/PaddleX/PP-HGNetV2-B2/subgraph_1/graph_hash.txt +++ b/paddle_samples/PaddleX/PP-HGNetV2-B2/subgraph_1/graph_hash.txt @@ -1 +1 @@ -06d5635b04f3a5cbb383e17b866495e196e1f504fbf91847d069450c1ae2805f \ No newline at end of file +dc2588dc83d38f1cfdf346a1ce9eca75ea5fe60bd86a761d9642ad7f2b77a359 \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-HGNetV2-B3/subgraph_0/graph_hash.txt b/paddle_samples/PaddleX/PP-HGNetV2-B3/subgraph_0/graph_hash.txt index 9393a1380..cd5f6fe7a 100644 --- a/paddle_samples/PaddleX/PP-HGNetV2-B3/subgraph_0/graph_hash.txt +++ b/paddle_samples/PaddleX/PP-HGNetV2-B3/subgraph_0/graph_hash.txt @@ -1 +1 @@ -add38a8995552fe2873c7482b161b435cb10f6587a41160fa7e6bfb08f2b28c2 \ No newline at end of file +5a560109f905a002f261531a7c33462f33ebb323fee4512bb3eb96cc391af64c \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-HGNetV2-B3/subgraph_1/graph_hash.txt b/paddle_samples/PaddleX/PP-HGNetV2-B3/subgraph_1/graph_hash.txt index e44e01d2c..21ff68846 100644 --- a/paddle_samples/PaddleX/PP-HGNetV2-B3/subgraph_1/graph_hash.txt +++ b/paddle_samples/PaddleX/PP-HGNetV2-B3/subgraph_1/graph_hash.txt @@ -1 +1 @@ -86e391c3260b6e143f3fdb1597c417f6bbae67d03ec53449d82e14636ace7a89 \ No newline at end of file +5fdab473d63c4c8917c25cc42fe5ec3029b3b0112cf97d27ab7b186ff2f849e3 \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-HGNetV2-B4/subgraph_0/graph_hash.txt b/paddle_samples/PaddleX/PP-HGNetV2-B4/subgraph_0/graph_hash.txt index 3e2dd85d5..466f832d5 100644 --- a/paddle_samples/PaddleX/PP-HGNetV2-B4/subgraph_0/graph_hash.txt +++ b/paddle_samples/PaddleX/PP-HGNetV2-B4/subgraph_0/graph_hash.txt @@ -1 +1 @@ -69afb2710b8412412850adc8e9793f6715c267544cf217aa941635db88bc9a26 \ No newline at end of file +08b27009ecf8d48b5a5472953d07475f3380f319cd5d809052f951ae4a8bef78 \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-HGNetV2-B4/subgraph_1/graph_hash.txt b/paddle_samples/PaddleX/PP-HGNetV2-B4/subgraph_1/graph_hash.txt index 7c29786a0..1e87a77de 100644 --- a/paddle_samples/PaddleX/PP-HGNetV2-B4/subgraph_1/graph_hash.txt +++ b/paddle_samples/PaddleX/PP-HGNetV2-B4/subgraph_1/graph_hash.txt @@ -1 +1 @@ -dad36645d9a28c9c5fb09f3014943c16b2bf6f9dd7895c18e34fbdb9508f128b \ No newline at end of file +a86abbaab71a0cd630815fbb0d4d21da5a085319cc250074c608c0ce4d113b70 \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-HGNetV2-B4_ML/subgraph_0/graph_hash.txt b/paddle_samples/PaddleX/PP-HGNetV2-B4_ML/subgraph_0/graph_hash.txt index a375711e1..d02c93e0c 100644 --- a/paddle_samples/PaddleX/PP-HGNetV2-B4_ML/subgraph_0/graph_hash.txt +++ b/paddle_samples/PaddleX/PP-HGNetV2-B4_ML/subgraph_0/graph_hash.txt @@ -1 +1 @@ -2c358368edcdcaa3fa64261fdf3ef04fd1f82d49177ea814f07b149e178fe4d7 \ No newline at end of file +4b942315bd7e810d30dbfa2c746b20ad23cccc9074556390107641a1316d9ea1 \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-HGNetV2-B4_ML/subgraph_1/graph_hash.txt b/paddle_samples/PaddleX/PP-HGNetV2-B4_ML/subgraph_1/graph_hash.txt index d921c6f6f..dc24104f4 100644 --- a/paddle_samples/PaddleX/PP-HGNetV2-B4_ML/subgraph_1/graph_hash.txt +++ b/paddle_samples/PaddleX/PP-HGNetV2-B4_ML/subgraph_1/graph_hash.txt @@ -1 +1 @@ -a319c553a8bb33944e597972c1e15a0ae62ccf4189c51b5d5fac03a3962d76e8 \ No newline at end of file +e44bbfb9e3b2fb88988f59618bda7592a1b6cf46f245508dc14d7e630bc99209 \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-HGNetV2-B4_ML/subgraph_2/graph_hash.txt b/paddle_samples/PaddleX/PP-HGNetV2-B4_ML/subgraph_2/graph_hash.txt index d5a9ed8b6..da58ae2fd 100644 --- a/paddle_samples/PaddleX/PP-HGNetV2-B4_ML/subgraph_2/graph_hash.txt +++ b/paddle_samples/PaddleX/PP-HGNetV2-B4_ML/subgraph_2/graph_hash.txt @@ -1 +1 @@ -3cc048caf3123db87846bdf47f5b5abc2732f59d1ef8450d870c488ff8edd380 \ No newline at end of file +8b06c7d778b7ec19229c471a08d398a5cf1ed7f18cd6d4ff744303b41801f2a9 \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-HGNetV2-B5/subgraph_0/graph_hash.txt b/paddle_samples/PaddleX/PP-HGNetV2-B5/subgraph_0/graph_hash.txt index 038db2951..df86686d4 100644 --- a/paddle_samples/PaddleX/PP-HGNetV2-B5/subgraph_0/graph_hash.txt +++ b/paddle_samples/PaddleX/PP-HGNetV2-B5/subgraph_0/graph_hash.txt @@ -1 +1 @@ -2eaa69ea185e3a2a253ba9abf22621a3f0152e12879a532ff5b896d559f72f31 \ No newline at end of file +4cd265e3cb47dd2e1d34c521e2c4fe806613b4db7f4affbd58d9adf553d654fe \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-HGNetV2-B5/subgraph_1/graph_hash.txt b/paddle_samples/PaddleX/PP-HGNetV2-B5/subgraph_1/graph_hash.txt index 6906bb1c2..15e6ee795 100644 --- a/paddle_samples/PaddleX/PP-HGNetV2-B5/subgraph_1/graph_hash.txt +++ b/paddle_samples/PaddleX/PP-HGNetV2-B5/subgraph_1/graph_hash.txt @@ -1 +1 @@ -e0208e098993a6f51e262fe8a692e51ea8c364a217c1dbdcb3e0f41afc1a20e4 \ No newline at end of file +0f01c69abcbf57a01ca6e3681e304c18f694865c9fe9b0b23d4a72ee81a105ec \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-HGNetV2-B6/subgraph_0/graph_hash.txt b/paddle_samples/PaddleX/PP-HGNetV2-B6/subgraph_0/graph_hash.txt index 72ada02e5..42e96c6f5 100644 --- a/paddle_samples/PaddleX/PP-HGNetV2-B6/subgraph_0/graph_hash.txt +++ b/paddle_samples/PaddleX/PP-HGNetV2-B6/subgraph_0/graph_hash.txt @@ -1 +1 @@ -2f9043246cb3b4bd1a2b38f6bf662c4fbb42ee89e47e92580adbf58357503944 \ No newline at end of file +aa5d31892438c0b646e4fb085cc450ff144d4a31f976aca597c3764ed3f82ac8 \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-HGNetV2-B6/subgraph_1/graph_hash.txt b/paddle_samples/PaddleX/PP-HGNetV2-B6/subgraph_1/graph_hash.txt index 64e2f1333..924ecec89 100644 --- a/paddle_samples/PaddleX/PP-HGNetV2-B6/subgraph_1/graph_hash.txt +++ b/paddle_samples/PaddleX/PP-HGNetV2-B6/subgraph_1/graph_hash.txt @@ -1 +1 @@ -93ebd080cc1fc5a0880bf50409ad8c1a6b23c0b379083aecfcaae1c05f2244e2 \ No newline at end of file +fbc9b783b4152bfa012319e4eaf28d1ecb4e84c9beec90a558dc3fafc8874898 \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-HGNetV2-B6_ML/subgraph_0/graph_hash.txt b/paddle_samples/PaddleX/PP-HGNetV2-B6_ML/subgraph_0/graph_hash.txt index 54b911a0d..0d7ac70f7 100644 --- a/paddle_samples/PaddleX/PP-HGNetV2-B6_ML/subgraph_0/graph_hash.txt +++ b/paddle_samples/PaddleX/PP-HGNetV2-B6_ML/subgraph_0/graph_hash.txt @@ -1 +1 @@ -a16e850dc564ccb7daf32bcde4878dc75cc5f116a496e85e431dc6cb3f9dcfd8 \ No newline at end of file +714e41346afcdc32fcd643c69b3edf5f99f1211668aed95bd9b9b30372f7e397 \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-HGNetV2-B6_ML/subgraph_1/graph_hash.txt b/paddle_samples/PaddleX/PP-HGNetV2-B6_ML/subgraph_1/graph_hash.txt index f27b7a65b..49898ab4c 100644 --- a/paddle_samples/PaddleX/PP-HGNetV2-B6_ML/subgraph_1/graph_hash.txt +++ b/paddle_samples/PaddleX/PP-HGNetV2-B6_ML/subgraph_1/graph_hash.txt @@ -1 +1 @@ -fdb5a4562cad0f9a29568eee67c7940a0df1e694a4ca1862bcea88fcee536a27 \ No newline at end of file +9cd3ebc3ee39832d0f5b5d3ad4ab6c43ba3bc3983c4f174231a757e393ccfaf9 \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-HGNetV2-B6_ML/subgraph_2/graph_hash.txt b/paddle_samples/PaddleX/PP-HGNetV2-B6_ML/subgraph_2/graph_hash.txt index 125daac57..b39e58ef7 100644 --- a/paddle_samples/PaddleX/PP-HGNetV2-B6_ML/subgraph_2/graph_hash.txt +++ b/paddle_samples/PaddleX/PP-HGNetV2-B6_ML/subgraph_2/graph_hash.txt @@ -1 +1 @@ -dea177db53cc55719c23105f608ad5aa0351c8de78d790dccc939a1dfd863abe \ No newline at end of file +a675d1c6c11d5319969625779efae518f587818250cf7c980a6bf1b09b4e22c7 \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-HGNet_base/graph_hash.txt b/paddle_samples/PaddleX/PP-HGNet_base/graph_hash.txt index e4effd477..a506a6c05 100644 --- a/paddle_samples/PaddleX/PP-HGNet_base/graph_hash.txt +++ b/paddle_samples/PaddleX/PP-HGNet_base/graph_hash.txt @@ -1 +1 @@ -6deb691d3c1c9c60f62217470037bbda1e2582e597b2fee2fef72bff52d101cd \ No newline at end of file +e6a60154bac0f43dfb3178e2d0368d08f20f22dec4c86aa7920d8e098426d343 \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-HGNet_small/graph_hash.txt b/paddle_samples/PaddleX/PP-HGNet_small/graph_hash.txt index 1d8c2e976..0394a1b3d 100644 --- a/paddle_samples/PaddleX/PP-HGNet_small/graph_hash.txt +++ b/paddle_samples/PaddleX/PP-HGNet_small/graph_hash.txt @@ -1 +1 @@ -f9a79c2540cabf7019e3d7a04973ce786acb87a429b0e5075a064659a1ccefb1 \ No newline at end of file +36d929d23003549236b91a236d1fcefc6985108c5e0b1968fab2c26fcf72378f \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-HGNet_tiny/graph_hash.txt b/paddle_samples/PaddleX/PP-HGNet_tiny/graph_hash.txt index 612dc1fb5..2d5d36824 100644 --- a/paddle_samples/PaddleX/PP-HGNet_tiny/graph_hash.txt +++ b/paddle_samples/PaddleX/PP-HGNet_tiny/graph_hash.txt @@ -1 +1 @@ -dae445adb52abd0b0950652276e86ec4403e21be604ac5ff14b6b487c998b6b7 \ No newline at end of file +111653f0bf0663d3a03ff35fe6af48776a55435d10a9e960b5f2bebd0d4b8a15 \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-LCNetV2_base/subgraph_0/graph_hash.txt b/paddle_samples/PaddleX/PP-LCNetV2_base/subgraph_0/graph_hash.txt index bbc9df446..3dbf4aa07 100644 --- a/paddle_samples/PaddleX/PP-LCNetV2_base/subgraph_0/graph_hash.txt +++ b/paddle_samples/PaddleX/PP-LCNetV2_base/subgraph_0/graph_hash.txt @@ -1 +1 @@ -5df02be4591e332dd58f98f7f31fce5970add9843c7074a8f26f5737734203ee \ No newline at end of file +9b5cbb27f9ad6c83f06ad90df2bf3491fa81decbabedb360880555aba8789d90 \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-LCNetV2_base/subgraph_1/graph_hash.txt b/paddle_samples/PaddleX/PP-LCNetV2_base/subgraph_1/graph_hash.txt index 46b275f81..8fb61cfd5 100644 --- a/paddle_samples/PaddleX/PP-LCNetV2_base/subgraph_1/graph_hash.txt +++ b/paddle_samples/PaddleX/PP-LCNetV2_base/subgraph_1/graph_hash.txt @@ -1 +1 @@ -621ada71a106611e864897bb370a925fe296a566f6b48feb13502da556967cf8 \ No newline at end of file +3d5acd9445da8d576269f64e00966e2ce5b9b95193def1986d649df38aa36054 \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-LCNetV2_large/subgraph_0/graph_hash.txt b/paddle_samples/PaddleX/PP-LCNetV2_large/subgraph_0/graph_hash.txt index 429a9f50f..a2eca4645 100644 --- a/paddle_samples/PaddleX/PP-LCNetV2_large/subgraph_0/graph_hash.txt +++ b/paddle_samples/PaddleX/PP-LCNetV2_large/subgraph_0/graph_hash.txt @@ -1 +1 @@ -971b4863318ad74a6869b28df69321b0ed46ce578ae1ea225922b864e2c6f7e4 \ No newline at end of file +f5a4a5fd2a1dc6d09f5f21e309a816134b42b288e63600ec839108de64d5f67b \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-LCNetV2_large/subgraph_1/graph_hash.txt b/paddle_samples/PaddleX/PP-LCNetV2_large/subgraph_1/graph_hash.txt index 0460d5dfb..dd9ac133e 100644 --- a/paddle_samples/PaddleX/PP-LCNetV2_large/subgraph_1/graph_hash.txt +++ b/paddle_samples/PaddleX/PP-LCNetV2_large/subgraph_1/graph_hash.txt @@ -1 +1 @@ -973329d63bac1983447a8f53ad16428475953cf0047bace2563f4085b6aea6e5 \ No newline at end of file +eed359d0f21f43d41c257ef17829003e9fbc0eacf27796d20f5032d813257960 \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-LCNetV2_small/subgraph_0/graph_hash.txt b/paddle_samples/PaddleX/PP-LCNetV2_small/subgraph_0/graph_hash.txt index c821d2ace..0ff050cc4 100644 --- a/paddle_samples/PaddleX/PP-LCNetV2_small/subgraph_0/graph_hash.txt +++ b/paddle_samples/PaddleX/PP-LCNetV2_small/subgraph_0/graph_hash.txt @@ -1 +1 @@ -d19694589933f81524d5ee9a8f53830f63ef29fcfcd6613feee211dfe277ee7f \ No newline at end of file +8dbc49a3f93e78f224b7208effa159ada59888c08a092af1985f81cdb7d36bce \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-LCNetV2_small/subgraph_1/graph_hash.txt b/paddle_samples/PaddleX/PP-LCNetV2_small/subgraph_1/graph_hash.txt index 23dec9cf2..62715c5d8 100644 --- a/paddle_samples/PaddleX/PP-LCNetV2_small/subgraph_1/graph_hash.txt +++ b/paddle_samples/PaddleX/PP-LCNetV2_small/subgraph_1/graph_hash.txt @@ -1 +1 @@ -0cd6ec79d8d3432167f0ec52eca267934f44e7558dd9d5c7ed2918218878749d \ No newline at end of file +63ef6437c2413cd8f940871bc9b9608316c1569d95a251879464b2bcfb9e5875 \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-LCNet_x0_25/graph_hash.txt b/paddle_samples/PaddleX/PP-LCNet_x0_25/graph_hash.txt index 13c577e5d..9d102ef6f 100644 --- a/paddle_samples/PaddleX/PP-LCNet_x0_25/graph_hash.txt +++ b/paddle_samples/PaddleX/PP-LCNet_x0_25/graph_hash.txt @@ -1 +1 @@ -4aa347b7517edf6dc7a030eb197d70490e5af6cbe485d0818bfc5ad7e1c2b450 \ No newline at end of file +7aff06f1ba1500c93c9bd87efa7c52da18c9cf8c195701c2a61eaeadb33ff3da \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-LCNet_x0_25_textline_ori/subgraph_0/graph_hash.txt b/paddle_samples/PaddleX/PP-LCNet_x0_25_textline_ori/subgraph_0/graph_hash.txt index 3b9a0dc5f..dcecd1cf9 100644 --- a/paddle_samples/PaddleX/PP-LCNet_x0_25_textline_ori/subgraph_0/graph_hash.txt +++ b/paddle_samples/PaddleX/PP-LCNet_x0_25_textline_ori/subgraph_0/graph_hash.txt @@ -1 +1 @@ -4e6ff989e0ac84f9aa78bd17f960809f973503336bb44243e4dd523e87b21545 \ No newline at end of file +64dccd5c0851e6907ff15f65fababb43d65858d2899aebf51dac032bba9425a5 \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-LCNet_x0_25_textline_ori/subgraph_1/graph_hash.txt b/paddle_samples/PaddleX/PP-LCNet_x0_25_textline_ori/subgraph_1/graph_hash.txt index 6de79c56b..591083d32 100644 --- a/paddle_samples/PaddleX/PP-LCNet_x0_25_textline_ori/subgraph_1/graph_hash.txt +++ b/paddle_samples/PaddleX/PP-LCNet_x0_25_textline_ori/subgraph_1/graph_hash.txt @@ -1 +1 @@ -a8e017b73f40453300bc052f1b7f00d358e360d80cd1a2c889dfe5ed772098c2 \ No newline at end of file +7610d5f95f6aae2232f615da8f8c7c260265854a4fa73aff9d70ed329f55a030 \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-LCNet_x0_35/graph_hash.txt b/paddle_samples/PaddleX/PP-LCNet_x0_35/graph_hash.txt index 1bd205dc1..e1dff51db 100644 --- a/paddle_samples/PaddleX/PP-LCNet_x0_35/graph_hash.txt +++ b/paddle_samples/PaddleX/PP-LCNet_x0_35/graph_hash.txt @@ -1 +1 @@ -b36cc696d2738f12daa76cf86c572e6fc09245e8d0bc2ddf8263d5ef5592c81e \ No newline at end of file +a4f54d7971c0c1c723444d48a57e026ba869a2fa7b3d83256db1c6ed7d8a3dae \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-LCNet_x0_5/graph_hash.txt b/paddle_samples/PaddleX/PP-LCNet_x0_5/graph_hash.txt index 32b836112..56144e85c 100644 --- a/paddle_samples/PaddleX/PP-LCNet_x0_5/graph_hash.txt +++ b/paddle_samples/PaddleX/PP-LCNet_x0_5/graph_hash.txt @@ -1 +1 @@ -4a94fb27616bb1b387575f43a2abb08518f94898cd9fb5b9d586748ce02abc5a \ No newline at end of file +d06307f8e6733058ad5995f37bdaa38beee88bee54beaf118dfc8c84a273a619 \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-LCNet_x0_75/graph_hash.txt b/paddle_samples/PaddleX/PP-LCNet_x0_75/graph_hash.txt index 6749ad1be..c77bc0554 100644 --- a/paddle_samples/PaddleX/PP-LCNet_x0_75/graph_hash.txt +++ b/paddle_samples/PaddleX/PP-LCNet_x0_75/graph_hash.txt @@ -1 +1 @@ -30ec45a63f4868025202aa9cc610a51a57e0637f26b0b798c3d72c4d495eea51 \ No newline at end of file +7ac73030f7034003033637828743224454dd82610c9fb1e69a6b58bb3bacc785 \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-LCNet_x1_0/graph_hash.txt b/paddle_samples/PaddleX/PP-LCNet_x1_0/graph_hash.txt index b863e1b74..442f73039 100644 --- a/paddle_samples/PaddleX/PP-LCNet_x1_0/graph_hash.txt +++ b/paddle_samples/PaddleX/PP-LCNet_x1_0/graph_hash.txt @@ -1 +1 @@ -48f795410c50b4bcac355f612a3ce94604528217c217063730e7a57ad7ceb200 \ No newline at end of file +36395029680cc6a75802586e003b629a11b6e0e21b16a991cfb14e6241d8cc5d \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-LCNet_x1_0_ML/subgraph_0/graph_hash.txt b/paddle_samples/PaddleX/PP-LCNet_x1_0_ML/subgraph_0/graph_hash.txt index 88104727a..5185c42d7 100644 --- a/paddle_samples/PaddleX/PP-LCNet_x1_0_ML/subgraph_0/graph_hash.txt +++ b/paddle_samples/PaddleX/PP-LCNet_x1_0_ML/subgraph_0/graph_hash.txt @@ -1 +1 @@ -7f97ec7cda468bcb3edc7ef4d749e570435c857bb666267c298e34a4ddd95a9a \ No newline at end of file +f3736957a32b458e9857d6afc54369c02af3eb7e57e9389e57fb6436a4c692e2 \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-LCNet_x1_0_ML/subgraph_1/graph_hash.txt b/paddle_samples/PaddleX/PP-LCNet_x1_0_ML/subgraph_1/graph_hash.txt index 97c91e184..1b507ffec 100644 --- a/paddle_samples/PaddleX/PP-LCNet_x1_0_ML/subgraph_1/graph_hash.txt +++ b/paddle_samples/PaddleX/PP-LCNet_x1_0_ML/subgraph_1/graph_hash.txt @@ -1 +1 @@ -e2beb003e279e3a4dedea959dc3eaad752d12fc61afb8b7734e52537b7afe88e \ No newline at end of file +2721b211cd04a82a31d16c873f04d0a162b20fd578a41a06d7c011f00f42693c \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-LCNet_x1_0_ML/subgraph_2/graph_hash.txt b/paddle_samples/PaddleX/PP-LCNet_x1_0_ML/subgraph_2/graph_hash.txt index a9289cbb2..05b2cbfa9 100644 --- a/paddle_samples/PaddleX/PP-LCNet_x1_0_ML/subgraph_2/graph_hash.txt +++ b/paddle_samples/PaddleX/PP-LCNet_x1_0_ML/subgraph_2/graph_hash.txt @@ -1 +1 @@ -23b1ce67e7732f9b84b388830fcfb251d003714ea0751c9d9e745a187b14cd24 \ No newline at end of file +16711fe6946bb9343b2c1510f8b61f4b1db2ed5ddb504cbe76e1e640155a9178 \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-LCNet_x1_0_doc_ori/subgraph_0/graph_hash.txt b/paddle_samples/PaddleX/PP-LCNet_x1_0_doc_ori/subgraph_0/graph_hash.txt index 1ad3e6af3..402497a02 100644 --- a/paddle_samples/PaddleX/PP-LCNet_x1_0_doc_ori/subgraph_0/graph_hash.txt +++ b/paddle_samples/PaddleX/PP-LCNet_x1_0_doc_ori/subgraph_0/graph_hash.txt @@ -1 +1 @@ -4611b39c9c1fee82f2ddaae8f718e84d4bad93860226094fbedb474e97577987 \ No newline at end of file +58c82ad9f2907d511e29bdda26c5920dc6e35d145ba89c4a517010931b599d4b \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-LCNet_x1_0_doc_ori/subgraph_1/graph_hash.txt b/paddle_samples/PaddleX/PP-LCNet_x1_0_doc_ori/subgraph_1/graph_hash.txt index 58d910357..7963857f8 100644 --- a/paddle_samples/PaddleX/PP-LCNet_x1_0_doc_ori/subgraph_1/graph_hash.txt +++ b/paddle_samples/PaddleX/PP-LCNet_x1_0_doc_ori/subgraph_1/graph_hash.txt @@ -1 +1 @@ -44f72a7db4555cf32818cfdbc91e2c08b9b5730cf562d8186e78a3bd8bacaf65 \ No newline at end of file +3846fe79b8f9fc5cf728915588ca59d92eab4bf157a16c35903f5ca101a40bdb \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-LCNet_x1_5/graph_hash.txt b/paddle_samples/PaddleX/PP-LCNet_x1_5/graph_hash.txt index 93dea2663..719c9cfc2 100644 --- a/paddle_samples/PaddleX/PP-LCNet_x1_5/graph_hash.txt +++ b/paddle_samples/PaddleX/PP-LCNet_x1_5/graph_hash.txt @@ -1 +1 @@ -6d39b472196d1523887607281df4205945d479bb14bf30bc8ab44b63888d1991 \ No newline at end of file +de6db3df3096a43a5d1056f225a05f88f8fc4ac881ac80564cee3301ec11da50 \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-LCNet_x2_0/graph_hash.txt b/paddle_samples/PaddleX/PP-LCNet_x2_0/graph_hash.txt index 800234f54..a34452f01 100644 --- a/paddle_samples/PaddleX/PP-LCNet_x2_0/graph_hash.txt +++ b/paddle_samples/PaddleX/PP-LCNet_x2_0/graph_hash.txt @@ -1 +1 @@ -e37aa543ff6ffe01c0b2f9a238e6e7d52b62752edf03ac6ea4fe5f96f43bfd66 \ No newline at end of file +0f19009e89ece89b5c1dee98c2dd1af5d4b8c86c5f8a572f3ab7745d3a24fe0e \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-LCNet_x2_5/graph_hash.txt b/paddle_samples/PaddleX/PP-LCNet_x2_5/graph_hash.txt index 41f869cb0..fe4cf8f1a 100644 --- a/paddle_samples/PaddleX/PP-LCNet_x2_5/graph_hash.txt +++ b/paddle_samples/PaddleX/PP-LCNet_x2_5/graph_hash.txt @@ -1 +1 @@ -dc9ea8d1d25f97bdc146144e58aa32935367bd96ee8244a812b07e61e51595ee \ No newline at end of file +3d47b0300ff781b6aea85c41ef8c94bc067dbdc97c70c07bf5b3e720b0fdc8b5 \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-LiteSeg-B/subgraph_0/graph_hash.txt b/paddle_samples/PaddleX/PP-LiteSeg-B/subgraph_0/graph_hash.txt index e1a3aa3ca..5ba44c574 100644 --- a/paddle_samples/PaddleX/PP-LiteSeg-B/subgraph_0/graph_hash.txt +++ b/paddle_samples/PaddleX/PP-LiteSeg-B/subgraph_0/graph_hash.txt @@ -1 +1 @@ -17260fdbe06eeae7b448edd28a2f5f501320a383a7b37bd10b3e6f90e72c36d9 \ No newline at end of file +b463698f51ade66826ddc939b8dbb9fdd7543262d2e043151109eed911d1cd3b \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-LiteSeg-B/subgraph_1/graph_hash.txt b/paddle_samples/PaddleX/PP-LiteSeg-B/subgraph_1/graph_hash.txt index 7cfda0be1..971af22fe 100644 --- a/paddle_samples/PaddleX/PP-LiteSeg-B/subgraph_1/graph_hash.txt +++ b/paddle_samples/PaddleX/PP-LiteSeg-B/subgraph_1/graph_hash.txt @@ -1 +1 @@ -c2c38f47179362c0350f7ca6912530b5d37308a0350468fe2e0cfaa2c35be452 \ No newline at end of file +4cf2ceb1396b5ecca4ba61128987c707ea2e1aac67ec9cd9f99a61b2cb73c1c2 \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-LiteSeg-B/subgraph_2/graph_hash.txt b/paddle_samples/PaddleX/PP-LiteSeg-B/subgraph_2/graph_hash.txt index aaa04198e..9f3c6ab20 100644 --- a/paddle_samples/PaddleX/PP-LiteSeg-B/subgraph_2/graph_hash.txt +++ b/paddle_samples/PaddleX/PP-LiteSeg-B/subgraph_2/graph_hash.txt @@ -1 +1 @@ -22fb900f38d4bd0c75761774cfad8b47748a1b750274aacf7e91695a836e9fb4 \ No newline at end of file +4d9bbe82fc731d2465997ab3e3fa89a095fe6480b186a2c5088ec168ac64b32d \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-LiteSeg-B/subgraph_3/graph_hash.txt b/paddle_samples/PaddleX/PP-LiteSeg-B/subgraph_3/graph_hash.txt index 7b330f807..60ae51325 100644 --- a/paddle_samples/PaddleX/PP-LiteSeg-B/subgraph_3/graph_hash.txt +++ b/paddle_samples/PaddleX/PP-LiteSeg-B/subgraph_3/graph_hash.txt @@ -1 +1 @@ -859934266a34a0427e9527b4a92e12d39f95df8e8517a3fda910a7c12aa31c47 \ No newline at end of file +07cc572ea0546a2cb2bc44216d0e0918d97520304a8dc516dcb5a282c4527c54 \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-LiteSeg-B/subgraph_4/graph_hash.txt b/paddle_samples/PaddleX/PP-LiteSeg-B/subgraph_4/graph_hash.txt index 8acc26a46..a3b824025 100644 --- a/paddle_samples/PaddleX/PP-LiteSeg-B/subgraph_4/graph_hash.txt +++ b/paddle_samples/PaddleX/PP-LiteSeg-B/subgraph_4/graph_hash.txt @@ -1 +1 @@ -539f6bf38c4e99043581ffbf90e01409e575442614bcad6144ded2938ce0f021 \ No newline at end of file +da2b6b5464659b5713670419fbf0d42263cf5151f94eb49dbc48805e86deb9a0 \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-LiteSeg-B/subgraph_5/graph_hash.txt b/paddle_samples/PaddleX/PP-LiteSeg-B/subgraph_5/graph_hash.txt index 639bd74fa..3cc4629dd 100644 --- a/paddle_samples/PaddleX/PP-LiteSeg-B/subgraph_5/graph_hash.txt +++ b/paddle_samples/PaddleX/PP-LiteSeg-B/subgraph_5/graph_hash.txt @@ -1 +1 @@ -d4bea0f58cd7ad427d8da13e58924cbaa31ceaca6415d24490c2a0d1b15b6231 \ No newline at end of file +e4a043bcad835c9f1a7db81265f1a580f7f88da3347c981aaf1760f5fbf89cb1 \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-LiteSeg-T/subgraph_0/graph_hash.txt b/paddle_samples/PaddleX/PP-LiteSeg-T/subgraph_0/graph_hash.txt index 6d6c9294a..2b8550a2f 100644 --- a/paddle_samples/PaddleX/PP-LiteSeg-T/subgraph_0/graph_hash.txt +++ b/paddle_samples/PaddleX/PP-LiteSeg-T/subgraph_0/graph_hash.txt @@ -1 +1 @@ -2afa51f0d89b437d27c908f1bc14631f2c48afc339a4ee229133d7ad16ae9aa3 \ No newline at end of file +93c748805a8afb1b1cedf9fcd900b6cffd917976459a83655594aecdc7caeaf0 \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-LiteSeg-T/subgraph_1/graph_hash.txt b/paddle_samples/PaddleX/PP-LiteSeg-T/subgraph_1/graph_hash.txt index beb0ae95f..256217a74 100644 --- a/paddle_samples/PaddleX/PP-LiteSeg-T/subgraph_1/graph_hash.txt +++ b/paddle_samples/PaddleX/PP-LiteSeg-T/subgraph_1/graph_hash.txt @@ -1 +1 @@ -56504855650fe09557ef71ae948f5528621f0049a83c6ce4b1e9aa2f42c1c7ad \ No newline at end of file +5be0d14d8b21d9d8d830fa0591f237bc9555481537310780e70d125f64cdd2a3 \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-OCRv3_mobile_det/subgraph_0/graph_hash.txt b/paddle_samples/PaddleX/PP-OCRv3_mobile_det/subgraph_0/graph_hash.txt index 86f8bbc32..386c166dc 100644 --- a/paddle_samples/PaddleX/PP-OCRv3_mobile_det/subgraph_0/graph_hash.txt +++ b/paddle_samples/PaddleX/PP-OCRv3_mobile_det/subgraph_0/graph_hash.txt @@ -1 +1 @@ -8e1c5018eef210dccf1b250e685644327203ef21ca89032c827797bfb7a7c843 \ No newline at end of file +e71f17e77402db52bf808793617946f28a6f9632aca4687dfd843a3b2d03543c \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-OCRv3_mobile_det/subgraph_1/graph_hash.txt b/paddle_samples/PaddleX/PP-OCRv3_mobile_det/subgraph_1/graph_hash.txt index 083978e80..b81fb96eb 100644 --- a/paddle_samples/PaddleX/PP-OCRv3_mobile_det/subgraph_1/graph_hash.txt +++ b/paddle_samples/PaddleX/PP-OCRv3_mobile_det/subgraph_1/graph_hash.txt @@ -1 +1 @@ -9ff4b35802bf9e19b9120b5f75a4e3a9ebc1015d03deaf1affd08c55d7ddf476 \ No newline at end of file +2215645bbc3f40e6e0afd32b279196e9446a7e5d6bd17f0caf9c8b108a18fb0f \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-OCRv3_mobile_rec/subgraph_0/graph_hash.txt b/paddle_samples/PaddleX/PP-OCRv3_mobile_rec/subgraph_0/graph_hash.txt index 4a6bb8a80..1a0a00813 100644 --- a/paddle_samples/PaddleX/PP-OCRv3_mobile_rec/subgraph_0/graph_hash.txt +++ b/paddle_samples/PaddleX/PP-OCRv3_mobile_rec/subgraph_0/graph_hash.txt @@ -1 +1 @@ -6e14247884f18d61a5175ae39e8a13548e42ca059d5bb8dae132d71d507d8d18 \ No newline at end of file +c7e1cb74438ff6c874df496d7e47ad310d446d13798ae47a328d4ba2ca7eef72 \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-OCRv3_mobile_rec/subgraph_1/graph_hash.txt b/paddle_samples/PaddleX/PP-OCRv3_mobile_rec/subgraph_1/graph_hash.txt index 5799af452..4af5c8139 100644 --- a/paddle_samples/PaddleX/PP-OCRv3_mobile_rec/subgraph_1/graph_hash.txt +++ b/paddle_samples/PaddleX/PP-OCRv3_mobile_rec/subgraph_1/graph_hash.txt @@ -1 +1 @@ -64e2e08846acb3c913ee71113176ef47ee987044b7c7f694e7569e92bde81ce7 \ No newline at end of file +1d24344228ec7f8bdde5bfeb99d2f13f2e1df0e49e4dd85d77ccc726e2d0c129 \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-OCRv3_mobile_rec/subgraph_10/graph_hash.txt b/paddle_samples/PaddleX/PP-OCRv3_mobile_rec/subgraph_10/graph_hash.txt index 73ab95847..fb7e32e14 100644 --- a/paddle_samples/PaddleX/PP-OCRv3_mobile_rec/subgraph_10/graph_hash.txt +++ b/paddle_samples/PaddleX/PP-OCRv3_mobile_rec/subgraph_10/graph_hash.txt @@ -1 +1 @@ -a3e8dc68e40a1e1a56c892531c38bfa9d35e4080ff571434f85919fdc4ec5df2 \ No newline at end of file +b1f58fbb6d984c4e17d8faf88626882ec42d0ec41301ad8020ec36cf768bb084 \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-OCRv3_mobile_rec/subgraph_2/graph_hash.txt b/paddle_samples/PaddleX/PP-OCRv3_mobile_rec/subgraph_2/graph_hash.txt index d05b1513f..e6761e886 100644 --- a/paddle_samples/PaddleX/PP-OCRv3_mobile_rec/subgraph_2/graph_hash.txt +++ b/paddle_samples/PaddleX/PP-OCRv3_mobile_rec/subgraph_2/graph_hash.txt @@ -1 +1 @@ -048de6a944fa418cb405e398255ca8f81c765fe973ea3963e1293e80a8956ed7 \ No newline at end of file +9fbb48572762758963849f7a08be5ca34adad5355d1c50e639f4f1fdee68011f \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-OCRv3_mobile_rec/subgraph_3/graph_hash.txt b/paddle_samples/PaddleX/PP-OCRv3_mobile_rec/subgraph_3/graph_hash.txt index 7a360ecf8..fdf606222 100644 --- a/paddle_samples/PaddleX/PP-OCRv3_mobile_rec/subgraph_3/graph_hash.txt +++ b/paddle_samples/PaddleX/PP-OCRv3_mobile_rec/subgraph_3/graph_hash.txt @@ -1 +1 @@ -3a97b5d27d67627d5a107385b311cd467dd48e1126e5e0d26d29e7b648cbc6a2 \ No newline at end of file +32666b606e1039622a4406c454a0308116e541e3ee1d951bf07c79f9dbc87a7a \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-OCRv3_mobile_rec/subgraph_4/graph_hash.txt b/paddle_samples/PaddleX/PP-OCRv3_mobile_rec/subgraph_4/graph_hash.txt index 9c8d9277e..7830f3371 100644 --- a/paddle_samples/PaddleX/PP-OCRv3_mobile_rec/subgraph_4/graph_hash.txt +++ b/paddle_samples/PaddleX/PP-OCRv3_mobile_rec/subgraph_4/graph_hash.txt @@ -1 +1 @@ -8c102e7dc9047680a4acce979cd377f1163f5202a978bc3717cc7d91e985a3cb \ No newline at end of file +1d0c3d32211cf9da1f8ed44f942de822a62fe64c5eca6d39915add27003fd1f7 \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-OCRv3_mobile_rec/subgraph_5/graph_hash.txt b/paddle_samples/PaddleX/PP-OCRv3_mobile_rec/subgraph_5/graph_hash.txt index dba345193..d85f2a01e 100644 --- a/paddle_samples/PaddleX/PP-OCRv3_mobile_rec/subgraph_5/graph_hash.txt +++ b/paddle_samples/PaddleX/PP-OCRv3_mobile_rec/subgraph_5/graph_hash.txt @@ -1 +1 @@ -3a4d2102f9f1e0529e1231b28ed12842864d2c11bf66dc7fe8d52582972a2faf \ No newline at end of file +da2e883003b093da5f4baa0694331a041af434a5bef8b485b7dcddcccf1d199f \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-OCRv3_mobile_rec/subgraph_6/graph_hash.txt b/paddle_samples/PaddleX/PP-OCRv3_mobile_rec/subgraph_6/graph_hash.txt index e846e4adb..64f5d4480 100644 --- a/paddle_samples/PaddleX/PP-OCRv3_mobile_rec/subgraph_6/graph_hash.txt +++ b/paddle_samples/PaddleX/PP-OCRv3_mobile_rec/subgraph_6/graph_hash.txt @@ -1 +1 @@ -55d8f61d74e448d5127ce01028deb67fb3999d01653aa5f82373f65dceeb71ff \ No newline at end of file +b3beb6a5bb4a6c178692ba75fefc84d2fa5b3a4f41411ddf959be0a8ee21ebfb \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-OCRv3_mobile_rec/subgraph_7/graph_hash.txt b/paddle_samples/PaddleX/PP-OCRv3_mobile_rec/subgraph_7/graph_hash.txt index 8954562a4..88c6b0755 100644 --- a/paddle_samples/PaddleX/PP-OCRv3_mobile_rec/subgraph_7/graph_hash.txt +++ b/paddle_samples/PaddleX/PP-OCRv3_mobile_rec/subgraph_7/graph_hash.txt @@ -1 +1 @@ -3c0581957e4dc90efeb656504e6c756d99634bfd60a327fb25c4ab52f8ed9494 \ No newline at end of file +ad4733eef7eaedc188adb7c6b7586b93762509f56df528ec6a356f7a135b5912 \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-OCRv3_mobile_rec/subgraph_8/graph_hash.txt b/paddle_samples/PaddleX/PP-OCRv3_mobile_rec/subgraph_8/graph_hash.txt index 556425964..6a58410e6 100644 --- a/paddle_samples/PaddleX/PP-OCRv3_mobile_rec/subgraph_8/graph_hash.txt +++ b/paddle_samples/PaddleX/PP-OCRv3_mobile_rec/subgraph_8/graph_hash.txt @@ -1 +1 @@ -191b451ee9ab92acfcb4e6040792558084caccf2504f69966683a93f81fbf80b \ No newline at end of file +841289af03b17fca512c1a6041fde7f5511edc9204de48ae6eb4a4716b511d09 \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-OCRv3_mobile_rec/subgraph_9/graph_hash.txt b/paddle_samples/PaddleX/PP-OCRv3_mobile_rec/subgraph_9/graph_hash.txt index b39968cf6..5b730b5db 100644 --- a/paddle_samples/PaddleX/PP-OCRv3_mobile_rec/subgraph_9/graph_hash.txt +++ b/paddle_samples/PaddleX/PP-OCRv3_mobile_rec/subgraph_9/graph_hash.txt @@ -1 +1 @@ -52169c818b1f22c50ffd0e3682973a1a793a24b40927dc1481c9bbc7c7e736a3 \ No newline at end of file +e7b5d226a7046fc647ee07d35f8aad53a3e4bd72f3debf0bf334450ced142222 \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-OCRv3_server_det/subgraph_0/graph_hash.txt b/paddle_samples/PaddleX/PP-OCRv3_server_det/subgraph_0/graph_hash.txt index 888f1f958..468e8307b 100644 --- a/paddle_samples/PaddleX/PP-OCRv3_server_det/subgraph_0/graph_hash.txt +++ b/paddle_samples/PaddleX/PP-OCRv3_server_det/subgraph_0/graph_hash.txt @@ -1 +1 @@ -5b97fd0a9f21146ddb8753245ba8675711b282a1211a12623cb424fb9ad1518e \ No newline at end of file +e7a84778066df316f5327c0fb7e314d16bf5c3bf9f211fdfe53b5fce223f5009 \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-OCRv3_server_det/subgraph_1/graph_hash.txt b/paddle_samples/PaddleX/PP-OCRv3_server_det/subgraph_1/graph_hash.txt index 9b55b6422..d27687bcc 100644 --- a/paddle_samples/PaddleX/PP-OCRv3_server_det/subgraph_1/graph_hash.txt +++ b/paddle_samples/PaddleX/PP-OCRv3_server_det/subgraph_1/graph_hash.txt @@ -1 +1 @@ -f04a1575e10175e2fefbd36cfd446dec369c034c3231a72aa4da077838130714 \ No newline at end of file +546a43b2090924f6bba469c3906fd546725a54569efc38574b91d8a0693a8e0e \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-OCRv4_mobile_det/subgraph_0/graph_hash.txt b/paddle_samples/PaddleX/PP-OCRv4_mobile_det/subgraph_0/graph_hash.txt index 315188324..62a4dea34 100644 --- a/paddle_samples/PaddleX/PP-OCRv4_mobile_det/subgraph_0/graph_hash.txt +++ b/paddle_samples/PaddleX/PP-OCRv4_mobile_det/subgraph_0/graph_hash.txt @@ -1 +1 @@ -5a0fbc29b0a3a1de7f469d4055ee60b97349c9fb8a693be8fd3b6c72b8bf06c3 \ No newline at end of file +37112410e167ef2a57a03fc1dda068c81586fb806a02261783fe1ec569164f6e \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-OCRv4_mobile_det/subgraph_1/graph_hash.txt b/paddle_samples/PaddleX/PP-OCRv4_mobile_det/subgraph_1/graph_hash.txt index 5d54a7459..3466c8559 100644 --- a/paddle_samples/PaddleX/PP-OCRv4_mobile_det/subgraph_1/graph_hash.txt +++ b/paddle_samples/PaddleX/PP-OCRv4_mobile_det/subgraph_1/graph_hash.txt @@ -1 +1 @@ -bef19741bf0f4db772f50ac43e0a2dd337096fcd40982187cbceed8bd573a83e \ No newline at end of file +919f1fd88fa8c749c874ee586ad1d682571fda307e32276073c205a5d827144e \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-OCRv4_mobile_rec/graph_hash.txt b/paddle_samples/PaddleX/PP-OCRv4_mobile_rec/graph_hash.txt index f7a3f0c5f..d97112a23 100644 --- a/paddle_samples/PaddleX/PP-OCRv4_mobile_rec/graph_hash.txt +++ b/paddle_samples/PaddleX/PP-OCRv4_mobile_rec/graph_hash.txt @@ -1 +1 @@ -2d1a6032d0055cae6478cfca7821a43b53613c5ef018c01205af403ff7317f85 \ No newline at end of file +590804b4cc1de050e71a2ac8d8964322990d8a2751795c57746ad44803748ac6 \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-OCRv4_mobile_seal_det/graph_hash.txt b/paddle_samples/PaddleX/PP-OCRv4_mobile_seal_det/graph_hash.txt index afa216190..b838ef4d4 100644 --- a/paddle_samples/PaddleX/PP-OCRv4_mobile_seal_det/graph_hash.txt +++ b/paddle_samples/PaddleX/PP-OCRv4_mobile_seal_det/graph_hash.txt @@ -1 +1 @@ -ad6b415f85e26f927a12ddf00dc2dad5c24322d2af7b5c4198c12f5a241f1ed6 \ No newline at end of file +1cef8ba80261618c7850916b6a7dd73a19b16a0e07c4ee217d043f6d40855f93 \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-OCRv4_server_det/subgraph_0/graph_hash.txt b/paddle_samples/PaddleX/PP-OCRv4_server_det/subgraph_0/graph_hash.txt index 36d76cc9d..b7ff88a70 100644 --- a/paddle_samples/PaddleX/PP-OCRv4_server_det/subgraph_0/graph_hash.txt +++ b/paddle_samples/PaddleX/PP-OCRv4_server_det/subgraph_0/graph_hash.txt @@ -1 +1 @@ -5297168695cb600bee400b573723be5a00bf08d9f6ee0102fc6839d2a516667e \ No newline at end of file +bafff5cd165597ee4c76a5093958d7cf4fd45bba2a0a1ffa79a3c16852da272d \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-OCRv4_server_det/subgraph_1/graph_hash.txt b/paddle_samples/PaddleX/PP-OCRv4_server_det/subgraph_1/graph_hash.txt index b73a4bfb8..a9f261bce 100644 --- a/paddle_samples/PaddleX/PP-OCRv4_server_det/subgraph_1/graph_hash.txt +++ b/paddle_samples/PaddleX/PP-OCRv4_server_det/subgraph_1/graph_hash.txt @@ -1 +1 @@ -9b7bec44ad1fe5175ad1a1e919111e407addd909a1ba6a1ceceb75a1fc1e1fd2 \ No newline at end of file +0a8253ed92b126a0adc8d227bd2fb02b512010d7260c16e62d11417fb5411787 \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-OCRv4_server_rec/subgraph_0/graph_hash.txt b/paddle_samples/PaddleX/PP-OCRv4_server_rec/subgraph_0/graph_hash.txt index a68f1510c..b09b5a8c3 100644 --- a/paddle_samples/PaddleX/PP-OCRv4_server_rec/subgraph_0/graph_hash.txt +++ b/paddle_samples/PaddleX/PP-OCRv4_server_rec/subgraph_0/graph_hash.txt @@ -1 +1 @@ -6f12cec7c062bb684bd00600fc81bde704083ff541f6cd3d6d2d881b0067a0cc \ No newline at end of file +2e032475f29475ea6f5bc6d8a39e390ce8b15c610e3964f1536d2a7c51c60241 \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-OCRv4_server_rec/subgraph_1/graph_hash.txt b/paddle_samples/PaddleX/PP-OCRv4_server_rec/subgraph_1/graph_hash.txt index c8a3705f1..992c75986 100644 --- a/paddle_samples/PaddleX/PP-OCRv4_server_rec/subgraph_1/graph_hash.txt +++ b/paddle_samples/PaddleX/PP-OCRv4_server_rec/subgraph_1/graph_hash.txt @@ -1 +1 @@ -2c7547013d21aa29bd59b633810943d816bee35aa047b1e48ffb1d43db17c108 \ No newline at end of file +8212a762ef212cf479be594fe49e22465f30a669fda9a38c7daa6498e2c22926 \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-OCRv4_server_rec/subgraph_2/graph_hash.txt b/paddle_samples/PaddleX/PP-OCRv4_server_rec/subgraph_2/graph_hash.txt index 9311bf2ad..fa1c5fa16 100644 --- a/paddle_samples/PaddleX/PP-OCRv4_server_rec/subgraph_2/graph_hash.txt +++ b/paddle_samples/PaddleX/PP-OCRv4_server_rec/subgraph_2/graph_hash.txt @@ -1 +1 @@ -7b736059050d99642689811aec3221ad7294d35c82153889c35fa7388d41714e \ No newline at end of file +8834e22cdcd050b6929d2f58106319eef4ff02da4c8e460b1ef20c1eae257ef6 \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-OCRv4_server_rec/subgraph_3/graph_hash.txt b/paddle_samples/PaddleX/PP-OCRv4_server_rec/subgraph_3/graph_hash.txt index 63033891b..0866e7df4 100644 --- a/paddle_samples/PaddleX/PP-OCRv4_server_rec/subgraph_3/graph_hash.txt +++ b/paddle_samples/PaddleX/PP-OCRv4_server_rec/subgraph_3/graph_hash.txt @@ -1 +1 @@ -94aa6e6f45a115a50df9861212bf12b4a45907b13d360c3e93ef7036499834c5 \ No newline at end of file +81aea3c03962fe850e0d580a7ed3306830b93cdf4d801335ba8e7bb207dda7d8 \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-OCRv4_server_rec/subgraph_4/graph_hash.txt b/paddle_samples/PaddleX/PP-OCRv4_server_rec/subgraph_4/graph_hash.txt index c51994cfd..95b159315 100644 --- a/paddle_samples/PaddleX/PP-OCRv4_server_rec/subgraph_4/graph_hash.txt +++ b/paddle_samples/PaddleX/PP-OCRv4_server_rec/subgraph_4/graph_hash.txt @@ -1 +1 @@ -c017509ddc18e26b06d7d0c87fda35091a31c2451a67076b29984a09f121b5fd \ No newline at end of file +048f2328ba5b252a32b6b699b8c4a1c83a9f3390f4111e08e2cf69d9656c94ae \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-OCRv4_server_rec/subgraph_5/graph_hash.txt b/paddle_samples/PaddleX/PP-OCRv4_server_rec/subgraph_5/graph_hash.txt index 9e2128dc5..1c6bc8849 100644 --- a/paddle_samples/PaddleX/PP-OCRv4_server_rec/subgraph_5/graph_hash.txt +++ b/paddle_samples/PaddleX/PP-OCRv4_server_rec/subgraph_5/graph_hash.txt @@ -1 +1 @@ -9da03ade39915ff631f895c11acc92200f2488fe365642e852788cfdb6a831ec \ No newline at end of file +59fedf81a92b1ebc103d169a67f719e8169796ca2cfe745222244e04cddfcece \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-OCRv4_server_seal_det/graph_hash.txt b/paddle_samples/PaddleX/PP-OCRv4_server_seal_det/graph_hash.txt index d0ebd236f..4bcd16cb5 100644 --- a/paddle_samples/PaddleX/PP-OCRv4_server_seal_det/graph_hash.txt +++ b/paddle_samples/PaddleX/PP-OCRv4_server_seal_det/graph_hash.txt @@ -1 +1 @@ -f109d32db95895eb39256cba4b59397cce581d95777d562e99d1f38fd29f08b5 \ No newline at end of file +eb85cca489a483a2e4e7cb6dda7975a8676791400a7ba8e787cee0c666832910 \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-ShiTuV2_rec/subgraph_0/graph_hash.txt b/paddle_samples/PaddleX/PP-ShiTuV2_rec/subgraph_0/graph_hash.txt index 65b0420be..f6cd2c00a 100644 --- a/paddle_samples/PaddleX/PP-ShiTuV2_rec/subgraph_0/graph_hash.txt +++ b/paddle_samples/PaddleX/PP-ShiTuV2_rec/subgraph_0/graph_hash.txt @@ -1 +1 @@ -5d9f100fceedad9b205cde6f1302c37b4e15f75f7eb749bb52525fc966017c9c \ No newline at end of file +6883f61c4256037b1c93eab2b8e7cd6df2be73e3042794ca10dfe87f27abd6ee \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-ShiTuV2_rec/subgraph_1/graph_hash.txt b/paddle_samples/PaddleX/PP-ShiTuV2_rec/subgraph_1/graph_hash.txt index d9f48ced5..a8ce9ab35 100644 --- a/paddle_samples/PaddleX/PP-ShiTuV2_rec/subgraph_1/graph_hash.txt +++ b/paddle_samples/PaddleX/PP-ShiTuV2_rec/subgraph_1/graph_hash.txt @@ -1 +1 @@ -876f68f4d201db326465dd691f7ae51aa1b7acdf1791c42bc3af0ef1e4b10b2e \ No newline at end of file +3ee5034c8777a156de23755f3e53781fb75c2d8a79604d25a31dcf851208c381 \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-ShiTuV2_rec_CLIP_vit_base/subgraph_0/graph_hash.txt b/paddle_samples/PaddleX/PP-ShiTuV2_rec_CLIP_vit_base/subgraph_0/graph_hash.txt index b5d5c6d76..1894ca077 100644 --- a/paddle_samples/PaddleX/PP-ShiTuV2_rec_CLIP_vit_base/subgraph_0/graph_hash.txt +++ b/paddle_samples/PaddleX/PP-ShiTuV2_rec_CLIP_vit_base/subgraph_0/graph_hash.txt @@ -1 +1 @@ -5c0ad1af661389359decafdf174bc63dfd7f63603872c6456dcb95f7f91b0196 \ No newline at end of file +da9f4875ef41be52c01d3c474e69ce770bcf8da0a68fe8c158c989e2caf2e3d3 \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-ShiTuV2_rec_CLIP_vit_base/subgraph_1/graph_hash.txt b/paddle_samples/PaddleX/PP-ShiTuV2_rec_CLIP_vit_base/subgraph_1/graph_hash.txt index b5cabd72b..3612d1093 100644 --- a/paddle_samples/PaddleX/PP-ShiTuV2_rec_CLIP_vit_base/subgraph_1/graph_hash.txt +++ b/paddle_samples/PaddleX/PP-ShiTuV2_rec_CLIP_vit_base/subgraph_1/graph_hash.txt @@ -1 +1 @@ -b70e9c9fe93412f4ed7ae760bdf3798656692c7764edfeaa12a649159a9868c0 \ No newline at end of file +4962c31b787d19da6459f48a3e70b1f8b55010dffb3580e246b1c24c659683d2 \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-ShiTuV2_rec_CLIP_vit_base/subgraph_2/graph_hash.txt b/paddle_samples/PaddleX/PP-ShiTuV2_rec_CLIP_vit_base/subgraph_2/graph_hash.txt index d7a36a048..329f979d1 100644 --- a/paddle_samples/PaddleX/PP-ShiTuV2_rec_CLIP_vit_base/subgraph_2/graph_hash.txt +++ b/paddle_samples/PaddleX/PP-ShiTuV2_rec_CLIP_vit_base/subgraph_2/graph_hash.txt @@ -1 +1 @@ -85f90d4c8401a88ffb99ae4e30f282dd49bf1a44ef00e465cb6ecaecb0371a21 \ No newline at end of file +8eff89147126470898ecf195421727c821ba791190ea717db8f6b000051523d8 \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-ShiTuV2_rec_CLIP_vit_large/subgraph_0/graph_hash.txt b/paddle_samples/PaddleX/PP-ShiTuV2_rec_CLIP_vit_large/subgraph_0/graph_hash.txt index f5a713b71..e1e2d609e 100644 --- a/paddle_samples/PaddleX/PP-ShiTuV2_rec_CLIP_vit_large/subgraph_0/graph_hash.txt +++ b/paddle_samples/PaddleX/PP-ShiTuV2_rec_CLIP_vit_large/subgraph_0/graph_hash.txt @@ -1 +1 @@ -b24547ed092033072baa26ff5f80e35cae09511b8b0bcfc04e6912c145441ff3 \ No newline at end of file +2d45619e7d675ffe81a071922e02f15a14db6a699ec595b79800289d450ab3aa \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-ShiTuV2_rec_CLIP_vit_large/subgraph_1/graph_hash.txt b/paddle_samples/PaddleX/PP-ShiTuV2_rec_CLIP_vit_large/subgraph_1/graph_hash.txt index 47942aae4..b650c9113 100644 --- a/paddle_samples/PaddleX/PP-ShiTuV2_rec_CLIP_vit_large/subgraph_1/graph_hash.txt +++ b/paddle_samples/PaddleX/PP-ShiTuV2_rec_CLIP_vit_large/subgraph_1/graph_hash.txt @@ -1 +1 @@ -2cc1922d2dd54ff9eeb5390cbe8d43f5c9d6a6a5c4c0f9ecc1c7fc6305c1adec \ No newline at end of file +d2380752bee8512f00a10900750eb6ea8265ed52767915005222c6ea2e78c056 \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-ShiTuV2_rec_CLIP_vit_large/subgraph_2/graph_hash.txt b/paddle_samples/PaddleX/PP-ShiTuV2_rec_CLIP_vit_large/subgraph_2/graph_hash.txt index 1a844dcff..261b1f173 100644 --- a/paddle_samples/PaddleX/PP-ShiTuV2_rec_CLIP_vit_large/subgraph_2/graph_hash.txt +++ b/paddle_samples/PaddleX/PP-ShiTuV2_rec_CLIP_vit_large/subgraph_2/graph_hash.txt @@ -1 +1 @@ -f6f926d22212df15e9eb42627f830d8e031f62e679fc2e43fefd973109acfa8b \ No newline at end of file +c3f26537be49d15355e9e7273a9d542bcf4358e48316850ebf0035ce70ee7829 \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_0/graph_hash.txt b/paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_0/graph_hash.txt index 8a3ea7694..3a198bc60 100644 --- a/paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_0/graph_hash.txt +++ b/paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_0/graph_hash.txt @@ -1 +1 @@ -a29b29f75d677bfc4d3d22fe15a38544cc611ed4c5426ad85c9ccd866320ff3a \ No newline at end of file +bfcad265ced09de3d41f5c6ed667e3d03526d992a84e6eacb1408af671245a60 \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_1/graph_hash.txt b/paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_1/graph_hash.txt index b08da1263..82d83ca0b 100644 --- a/paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_1/graph_hash.txt +++ b/paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_1/graph_hash.txt @@ -1 +1 @@ -4d26d0110d82b3e8a1ea0c4059adeb25427870d4527791748b1197dfa32e25ef \ No newline at end of file +2e1652dec5c10dbfbb766b39e6dd1a2a376f1e03061d76cd66a79bf79d0616f3 \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_10/graph_hash.txt b/paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_10/graph_hash.txt index abcd385f1..7b5429dac 100644 --- a/paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_10/graph_hash.txt +++ b/paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_10/graph_hash.txt @@ -1 +1 @@ -eaac049364bed4d6c67fc3935e79acb80e63ba96c2283c6c4364c4ef58871728 \ No newline at end of file +d079f80b47d3627f0f874f2ab967abc6e350f273029f03bcf40572198642b666 \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_11/graph_hash.txt b/paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_11/graph_hash.txt index d7f7b18a2..eb0adff13 100644 --- a/paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_11/graph_hash.txt +++ b/paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_11/graph_hash.txt @@ -1 +1 @@ -7bb8a2b2502a471463ad03a6babccb8a2db42f0cbace538187cdc42cf672f3d5 \ No newline at end of file +c1b8c019f1768926fb1763d743b6f1af638d4214fa9152321214c544c752c751 \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_12/graph_hash.txt b/paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_12/graph_hash.txt index c50f83e98..f3ee1e92c 100644 --- a/paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_12/graph_hash.txt +++ b/paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_12/graph_hash.txt @@ -1 +1 @@ -167a64ff79f6c85c326535ed7c795e778140d55e3adb01cadae8d8f479d3f197 \ No newline at end of file +ccef89c785301336e92928a91f66ad752e4ce87933446e15f2082e86ac6642d6 \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_13/graph_hash.txt b/paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_13/graph_hash.txt index a0a4bdddf..1b08335a3 100644 --- a/paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_13/graph_hash.txt +++ b/paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_13/graph_hash.txt @@ -1 +1 @@ -92d3ca6357b660bcf1334e1feb5b6b37f7616ff93ec1a53ea5a94dd2dd47ce97 \ No newline at end of file +999d81ae5f8f7aa80107216e32ce8e5d9c0b867a696357b475912426f0891658 \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_14/graph_hash.txt b/paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_14/graph_hash.txt index 5b6734509..f667cd716 100644 --- a/paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_14/graph_hash.txt +++ b/paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_14/graph_hash.txt @@ -1 +1 @@ -9e1e78a2b9529fe1a568cac2694d2e415d0a0de0add87039ed78bf63f19ac354 \ No newline at end of file +6eae9d07eec352b068f56eeceedb50c3de67909a5597f94efe81153acb758785 \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_15/graph_hash.txt b/paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_15/graph_hash.txt index 0467b2504..fa10de35c 100644 --- a/paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_15/graph_hash.txt +++ b/paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_15/graph_hash.txt @@ -1 +1 @@ -dd67c9b14d6f4a8d4f7743618ef2dc8d751095eadf10338c08c3930ba5bb87f1 \ No newline at end of file +dd4756aee481284a2f105818ca2768c725dcd96b0e5b32c9865f9e346e77ab73 \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_16/graph_hash.txt b/paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_16/graph_hash.txt index 463698320..896fa94fd 100644 --- a/paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_16/graph_hash.txt +++ b/paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_16/graph_hash.txt @@ -1 +1 @@ -611aaf40802ae728109fb4c99495540bcf765813a1a25b37c13dde4b4b8683ee \ No newline at end of file +66b8a266cc256b5299b432a3f1cf5582a5f9a5321ba1505da3c19b3bc24f5624 \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_2/graph_hash.txt b/paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_2/graph_hash.txt index 8644d9e4f..df39cfc7f 100644 --- a/paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_2/graph_hash.txt +++ b/paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_2/graph_hash.txt @@ -1 +1 @@ -ab00f78367b398fd627ea53927a42dac99230f6540cf3006c528aebbab4be04a \ No newline at end of file +ae4943d1bf44626f825f88e91e02d5245f0173aa7e1af9f7c50571f314d6f983 \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_3/graph_hash.txt b/paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_3/graph_hash.txt index a90272c96..cf9cecf24 100644 --- a/paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_3/graph_hash.txt +++ b/paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_3/graph_hash.txt @@ -1 +1 @@ -32cd2923bc0a61a7c5f1bc48378db5c4de25399c2f9388132502f296c7a71760 \ No newline at end of file +b570e084ecf7776a98a81e353b5f581d19f9ab243969ebfb5988dfda43a8a50f \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_4/graph_hash.txt b/paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_4/graph_hash.txt index 302d9614d..299d8f62a 100644 --- a/paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_4/graph_hash.txt +++ b/paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_4/graph_hash.txt @@ -1 +1 @@ -03f141519fd2680b171e881b52c9f1915c126a7a8316d88046de2da35494a18e \ No newline at end of file +34a4a64a29aed61e73745aff7d436824981e60ec9ca77c8aae3243813273e432 \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_5/graph_hash.txt b/paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_5/graph_hash.txt index 887348c41..5647b7c51 100644 --- a/paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_5/graph_hash.txt +++ b/paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_5/graph_hash.txt @@ -1 +1 @@ -1e402c4f0d4d39dfe54d4f81fddd4df61c186704bf895e847864ff205620eab8 \ No newline at end of file +70c25547ac26d27ae7affb4e8c19f6d0d16514dcc4bdfe8654f255166e01813c \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_6/graph_hash.txt b/paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_6/graph_hash.txt index babd6567f..7fbb8551e 100644 --- a/paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_6/graph_hash.txt +++ b/paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_6/graph_hash.txt @@ -1 +1 @@ -3d13d62f659e1dd50de7ed21b396c52b0de085fbe77030af69ce5dcd93ef5c05 \ No newline at end of file +b58b47a10405b5de0e1c7f3dab25881ba3cc8c8bdd1045e44640464fa936bf04 \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_7/graph_hash.txt b/paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_7/graph_hash.txt index 665cc1cb7..d3368bec1 100644 --- a/paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_7/graph_hash.txt +++ b/paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_7/graph_hash.txt @@ -1 +1 @@ -4c194f1b47af22d5dbdc2dc8f63cad5abcfa9a3548b3439131bcdfe6c15f25bd \ No newline at end of file +b31d9174479d0938255cd2ec58334899ecf03916288acd4eafc0c43a6b55388d \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_8/graph_hash.txt b/paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_8/graph_hash.txt index ad26781d8..82cfbe272 100644 --- a/paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_8/graph_hash.txt +++ b/paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_8/graph_hash.txt @@ -1 +1 @@ -99bd1d3460daced0b08a14965df1673734764218c3df6e74b7dd522b3b824b95 \ No newline at end of file +1cf576ba841b87c90f808dd83fc76cf6b31385742f2a6f80953fa0a3f614b06c \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_9/graph_hash.txt b/paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_9/graph_hash.txt index 2f9daab91..27bd82e0e 100644 --- a/paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_9/graph_hash.txt +++ b/paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_9/graph_hash.txt @@ -1 +1 @@ -237f1666acd796c265f31ddd9bc78ab95e4da70095a07017ec39287d25ff30bd \ No newline at end of file +8d319f5ec2a187a0cbeb6b629bf54f2df054e934514d4a5042fa9c577597355f \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE-L_vehicle/subgraph_0/graph_hash.txt b/paddle_samples/PaddleX/PP-YOLOE-L_vehicle/subgraph_0/graph_hash.txt index a90272c96..cf9cecf24 100644 --- a/paddle_samples/PaddleX/PP-YOLOE-L_vehicle/subgraph_0/graph_hash.txt +++ b/paddle_samples/PaddleX/PP-YOLOE-L_vehicle/subgraph_0/graph_hash.txt @@ -1 +1 @@ -32cd2923bc0a61a7c5f1bc48378db5c4de25399c2f9388132502f296c7a71760 \ No newline at end of file +b570e084ecf7776a98a81e353b5f581d19f9ab243969ebfb5988dfda43a8a50f \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE-L_vehicle/subgraph_11/graph_hash.txt b/paddle_samples/PaddleX/PP-YOLOE-L_vehicle/subgraph_11/graph_hash.txt index e94f1ce5a..065083791 100644 --- a/paddle_samples/PaddleX/PP-YOLOE-L_vehicle/subgraph_11/graph_hash.txt +++ b/paddle_samples/PaddleX/PP-YOLOE-L_vehicle/subgraph_11/graph_hash.txt @@ -1 +1 @@ -e0ff4c70e120c4ec0e602d727b74e15e6f4e61525ade2ac5f305aac2c206752c \ No newline at end of file +753a38369a6c1cbad2faa292d21d2c22dc88ca45239db771fc9c111eed147698 \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE-L_vehicle/subgraph_13/graph_hash.txt b/paddle_samples/PaddleX/PP-YOLOE-L_vehicle/subgraph_13/graph_hash.txt index a93514ba4..a2c6d8b8a 100644 --- a/paddle_samples/PaddleX/PP-YOLOE-L_vehicle/subgraph_13/graph_hash.txt +++ b/paddle_samples/PaddleX/PP-YOLOE-L_vehicle/subgraph_13/graph_hash.txt @@ -1 +1 @@ -be2fb57bd448a9ffeb7401288b396cc0d51942b463c2f34662d7485236768468 \ No newline at end of file +81e1c79881631ba9c8fea543662e0c88108b9a0a23f037c923767f08270a38b3 \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE-L_vehicle/subgraph_14/graph_hash.txt b/paddle_samples/PaddleX/PP-YOLOE-L_vehicle/subgraph_14/graph_hash.txt index 2f9daab91..27bd82e0e 100644 --- a/paddle_samples/PaddleX/PP-YOLOE-L_vehicle/subgraph_14/graph_hash.txt +++ b/paddle_samples/PaddleX/PP-YOLOE-L_vehicle/subgraph_14/graph_hash.txt @@ -1 +1 @@ -237f1666acd796c265f31ddd9bc78ab95e4da70095a07017ec39287d25ff30bd \ No newline at end of file +8d319f5ec2a187a0cbeb6b629bf54f2df054e934514d4a5042fa9c577597355f \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE-L_vehicle/subgraph_15/graph_hash.txt b/paddle_samples/PaddleX/PP-YOLOE-L_vehicle/subgraph_15/graph_hash.txt index babd6567f..7fbb8551e 100644 --- a/paddle_samples/PaddleX/PP-YOLOE-L_vehicle/subgraph_15/graph_hash.txt +++ b/paddle_samples/PaddleX/PP-YOLOE-L_vehicle/subgraph_15/graph_hash.txt @@ -1 +1 @@ -3d13d62f659e1dd50de7ed21b396c52b0de085fbe77030af69ce5dcd93ef5c05 \ No newline at end of file +b58b47a10405b5de0e1c7f3dab25881ba3cc8c8bdd1045e44640464fa936bf04 \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE-L_vehicle/subgraph_16/graph_hash.txt b/paddle_samples/PaddleX/PP-YOLOE-L_vehicle/subgraph_16/graph_hash.txt index 463698320..896fa94fd 100644 --- a/paddle_samples/PaddleX/PP-YOLOE-L_vehicle/subgraph_16/graph_hash.txt +++ b/paddle_samples/PaddleX/PP-YOLOE-L_vehicle/subgraph_16/graph_hash.txt @@ -1 +1 @@ -611aaf40802ae728109fb4c99495540bcf765813a1a25b37c13dde4b4b8683ee \ No newline at end of file +66b8a266cc256b5299b432a3f1cf5582a5f9a5321ba1505da3c19b3bc24f5624 \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE-L_vehicle/subgraph_17/graph_hash.txt b/paddle_samples/PaddleX/PP-YOLOE-L_vehicle/subgraph_17/graph_hash.txt index 1fb84b936..353f8f884 100644 --- a/paddle_samples/PaddleX/PP-YOLOE-L_vehicle/subgraph_17/graph_hash.txt +++ b/paddle_samples/PaddleX/PP-YOLOE-L_vehicle/subgraph_17/graph_hash.txt @@ -1 +1 @@ -7151a27ee13106ef0614d1a21e8ba0cf50c805a756c390f71a03d77fcff10b9f \ No newline at end of file +08a098f21657c7b30d6d76353b09026c37c3d8117fe79f45a73221c13072771d \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE-L_vehicle/subgraph_2/graph_hash.txt b/paddle_samples/PaddleX/PP-YOLOE-L_vehicle/subgraph_2/graph_hash.txt index abcd385f1..7b5429dac 100644 --- a/paddle_samples/PaddleX/PP-YOLOE-L_vehicle/subgraph_2/graph_hash.txt +++ b/paddle_samples/PaddleX/PP-YOLOE-L_vehicle/subgraph_2/graph_hash.txt @@ -1 +1 @@ -eaac049364bed4d6c67fc3935e79acb80e63ba96c2283c6c4364c4ef58871728 \ No newline at end of file +d079f80b47d3627f0f874f2ab967abc6e350f273029f03bcf40572198642b666 \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE-L_vehicle/subgraph_3/graph_hash.txt b/paddle_samples/PaddleX/PP-YOLOE-L_vehicle/subgraph_3/graph_hash.txt index a0a4bdddf..1b08335a3 100644 --- a/paddle_samples/PaddleX/PP-YOLOE-L_vehicle/subgraph_3/graph_hash.txt +++ b/paddle_samples/PaddleX/PP-YOLOE-L_vehicle/subgraph_3/graph_hash.txt @@ -1 +1 @@ -92d3ca6357b660bcf1334e1feb5b6b37f7616ff93ec1a53ea5a94dd2dd47ce97 \ No newline at end of file +999d81ae5f8f7aa80107216e32ce8e5d9c0b867a696357b475912426f0891658 \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE-L_vehicle/subgraph_5/graph_hash.txt b/paddle_samples/PaddleX/PP-YOLOE-L_vehicle/subgraph_5/graph_hash.txt index 326177577..5a5c73cb5 100644 --- a/paddle_samples/PaddleX/PP-YOLOE-L_vehicle/subgraph_5/graph_hash.txt +++ b/paddle_samples/PaddleX/PP-YOLOE-L_vehicle/subgraph_5/graph_hash.txt @@ -1 +1 @@ -a359c7e1d53cf1fb3878706e33bab7ffa5b4304c0589d7e759965f8ab6ff7f98 \ No newline at end of file +e7b7e25c61625d326cacc8dd082d9e064f725c78447257a261d2d0d52c62c7d7 \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE-L_vehicle/subgraph_7/graph_hash.txt b/paddle_samples/PaddleX/PP-YOLOE-L_vehicle/subgraph_7/graph_hash.txt index e914be348..e1dc0923c 100644 --- a/paddle_samples/PaddleX/PP-YOLOE-L_vehicle/subgraph_7/graph_hash.txt +++ b/paddle_samples/PaddleX/PP-YOLOE-L_vehicle/subgraph_7/graph_hash.txt @@ -1 +1 @@ -9da1bb687362f1446a27742d64e3f86d5a3d7446f117fa51faab14ad46d53591 \ No newline at end of file +96270fd68d8040809a8cc15cb8e3f37952250d7449d9ec83b4c6e3e07777bdb3 \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE-L_vehicle/subgraph_8/graph_hash.txt b/paddle_samples/PaddleX/PP-YOLOE-L_vehicle/subgraph_8/graph_hash.txt index 8803a71d1..a62e3c346 100644 --- a/paddle_samples/PaddleX/PP-YOLOE-L_vehicle/subgraph_8/graph_hash.txt +++ b/paddle_samples/PaddleX/PP-YOLOE-L_vehicle/subgraph_8/graph_hash.txt @@ -1 +1 @@ -caff3ce11eecc715bd3f3941781af9e1fe02099d04db57f533f2d44d2d0dd33f \ No newline at end of file +b3076122b18bff71174d70b804480bc7272686491c85cf6a5e3c4b3eba6b39ef \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE-R-L/subgraph_0/graph_hash.txt b/paddle_samples/PaddleX/PP-YOLOE-R-L/subgraph_0/graph_hash.txt index 712096340..a04aa3401 100644 --- a/paddle_samples/PaddleX/PP-YOLOE-R-L/subgraph_0/graph_hash.txt +++ b/paddle_samples/PaddleX/PP-YOLOE-R-L/subgraph_0/graph_hash.txt @@ -1 +1 @@ -c0ac440995621c9e2a73dee011d634dac3ca35eca6918a25e9e939c3d6586f0b \ No newline at end of file +11758ac67a4c9ce4e3983a0d47d3727d7d42e98bb95346836db0af324291e9da \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE-R-L/subgraph_1/graph_hash.txt b/paddle_samples/PaddleX/PP-YOLOE-R-L/subgraph_1/graph_hash.txt index 316d9bf6a..88913eb2a 100644 --- a/paddle_samples/PaddleX/PP-YOLOE-R-L/subgraph_1/graph_hash.txt +++ b/paddle_samples/PaddleX/PP-YOLOE-R-L/subgraph_1/graph_hash.txt @@ -1 +1 @@ -a7d47f7ed75ec9b55a42cf1c60b5fc2030e2639e9672ffbc87bea34164440ec7 \ No newline at end of file +1660089eab20509d100cb2fca4deb2066f12563dcb21b0459bee34eac3fd59bd \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE-R-L/subgraph_10/graph_hash.txt b/paddle_samples/PaddleX/PP-YOLOE-R-L/subgraph_10/graph_hash.txt index 68e54c8f3..2c2da0579 100644 --- a/paddle_samples/PaddleX/PP-YOLOE-R-L/subgraph_10/graph_hash.txt +++ b/paddle_samples/PaddleX/PP-YOLOE-R-L/subgraph_10/graph_hash.txt @@ -1 +1 @@ -a1c982d6209f1af82607ac615f8d65901a61675e46badb73a97a8a6613374fbc \ No newline at end of file +064044aec83cd2b03dbc5574e21a053b55f8c9296d97fa79c0a8d867110e258a \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE-R-L/subgraph_11/graph_hash.txt b/paddle_samples/PaddleX/PP-YOLOE-R-L/subgraph_11/graph_hash.txt index 5ad04b22b..988534b72 100644 --- a/paddle_samples/PaddleX/PP-YOLOE-R-L/subgraph_11/graph_hash.txt +++ b/paddle_samples/PaddleX/PP-YOLOE-R-L/subgraph_11/graph_hash.txt @@ -1 +1 @@ -fc6c1013a1c537ed9069d967e1ede3864e15bc7a5dd09d50bf07295e0ee1d545 \ No newline at end of file +213caa3db343502bbe709ae92852197fe1651b11d283baaa9e45c7f0b5b8889a \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE-R-L/subgraph_12/graph_hash.txt b/paddle_samples/PaddleX/PP-YOLOE-R-L/subgraph_12/graph_hash.txt index 4d4ae9229..637c3e064 100644 --- a/paddle_samples/PaddleX/PP-YOLOE-R-L/subgraph_12/graph_hash.txt +++ b/paddle_samples/PaddleX/PP-YOLOE-R-L/subgraph_12/graph_hash.txt @@ -1 +1 @@ -abb57645d2484441c98fdd426775c0779eb8f171f1d589f9f8c3a56db2cc1026 \ No newline at end of file +a05ee1dd02d9006d81989fcff1f35e9d2947312e0d8822323f9291cebd3e7aa8 \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE-R-L/subgraph_13/graph_hash.txt b/paddle_samples/PaddleX/PP-YOLOE-R-L/subgraph_13/graph_hash.txt index 0dad282f9..316b84702 100644 --- a/paddle_samples/PaddleX/PP-YOLOE-R-L/subgraph_13/graph_hash.txt +++ b/paddle_samples/PaddleX/PP-YOLOE-R-L/subgraph_13/graph_hash.txt @@ -1 +1 @@ -9c2ba69b001c30f35f1fff615eb077d3cc490e07ffb0a0a5110d683d166c9e2e \ No newline at end of file +8ac2a7e06c1b12c8919f3d3ffe45bfc4bc8d60d57ba8f6b1886ede9e5a15eebb \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE-R-L/subgraph_14/graph_hash.txt b/paddle_samples/PaddleX/PP-YOLOE-R-L/subgraph_14/graph_hash.txt index dc95f7eb1..195bb0704 100644 --- a/paddle_samples/PaddleX/PP-YOLOE-R-L/subgraph_14/graph_hash.txt +++ b/paddle_samples/PaddleX/PP-YOLOE-R-L/subgraph_14/graph_hash.txt @@ -1 +1 @@ -dfc510274da062c371bd6a71687a1b4af2f590fd054d89e9c1d4f310a3f1a66c \ No newline at end of file +627740428144c5c24fe1381cb2b9944a4e5062c1b9c67ff2cf42745c5a541450 \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE-R-L/subgraph_15/graph_hash.txt b/paddle_samples/PaddleX/PP-YOLOE-R-L/subgraph_15/graph_hash.txt index bd368ecea..2a648a0fc 100644 --- a/paddle_samples/PaddleX/PP-YOLOE-R-L/subgraph_15/graph_hash.txt +++ b/paddle_samples/PaddleX/PP-YOLOE-R-L/subgraph_15/graph_hash.txt @@ -1 +1 @@ -0739535a303e963f75e3d3f35bf46eb7168b11f70d4b9ad32cbdbb7a3b49e344 \ No newline at end of file +9ce4bf27641cec424c76266f8f3dde955f3ed3b34c19bebede2db6063aa5fff6 \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE-R-L/subgraph_2/graph_hash.txt b/paddle_samples/PaddleX/PP-YOLOE-R-L/subgraph_2/graph_hash.txt index 2a28039c8..bc8a499ca 100644 --- a/paddle_samples/PaddleX/PP-YOLOE-R-L/subgraph_2/graph_hash.txt +++ b/paddle_samples/PaddleX/PP-YOLOE-R-L/subgraph_2/graph_hash.txt @@ -1 +1 @@ -76de3a25bbfdb9c5decc06d06c4e92a9c9aaf027b35f42107faadedc97adb77a \ No newline at end of file +de96bf6fd896421b8952dcaa147cb2f563af27fab1dacc66620c0980bd3af1fb \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE-R-L/subgraph_3/graph_hash.txt b/paddle_samples/PaddleX/PP-YOLOE-R-L/subgraph_3/graph_hash.txt index e36947479..dc976669a 100644 --- a/paddle_samples/PaddleX/PP-YOLOE-R-L/subgraph_3/graph_hash.txt +++ b/paddle_samples/PaddleX/PP-YOLOE-R-L/subgraph_3/graph_hash.txt @@ -1 +1 @@ -933a1da5ea0509dc0825c747220a06198cbaaf34ad1a890eaca225939ed98247 \ No newline at end of file +b921bbfee18428d8610c310bd96f386049a3dd2218bbf714070a45727bf614ff \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE-R-L/subgraph_4/graph_hash.txt b/paddle_samples/PaddleX/PP-YOLOE-R-L/subgraph_4/graph_hash.txt index 78ddeb23d..9965afc04 100644 --- a/paddle_samples/PaddleX/PP-YOLOE-R-L/subgraph_4/graph_hash.txt +++ b/paddle_samples/PaddleX/PP-YOLOE-R-L/subgraph_4/graph_hash.txt @@ -1 +1 @@ -2fc04306fb10cd9cb65a1a6c768f4e1be6ed27375ecebadec001a8bd38306733 \ No newline at end of file +e773b34d738b6457ea0e52073a6caa54e4f6fb4537932d1f6da1d63d44c01c90 \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE-R-L/subgraph_5/graph_hash.txt b/paddle_samples/PaddleX/PP-YOLOE-R-L/subgraph_5/graph_hash.txt index f2b2e24e8..b6a0f4c93 100644 --- a/paddle_samples/PaddleX/PP-YOLOE-R-L/subgraph_5/graph_hash.txt +++ b/paddle_samples/PaddleX/PP-YOLOE-R-L/subgraph_5/graph_hash.txt @@ -1 +1 @@ -f7702ce22ebd08a9d0227ec828a8615448e94e4ee70413df0be086ee761d1ad7 \ No newline at end of file +9d5cec58743096b628365b2c75496f477a9d001db301cf8b437cfbf4cf652d60 \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE-R-L/subgraph_6/graph_hash.txt b/paddle_samples/PaddleX/PP-YOLOE-R-L/subgraph_6/graph_hash.txt index 7ef9d4761..2f18c147a 100644 --- a/paddle_samples/PaddleX/PP-YOLOE-R-L/subgraph_6/graph_hash.txt +++ b/paddle_samples/PaddleX/PP-YOLOE-R-L/subgraph_6/graph_hash.txt @@ -1 +1 @@ -f57fc80a3f5c6d7b77d51fc6aa07f467eafce95a075d35dcbab196e3c37e5f1e \ No newline at end of file +849c8337294fb8b26c7ba7eb981065ecfca5e6650c2e9dd75291ebeb2e9799b4 \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE-R-L/subgraph_7/graph_hash.txt b/paddle_samples/PaddleX/PP-YOLOE-R-L/subgraph_7/graph_hash.txt index 1a621aabd..ade792b54 100644 --- a/paddle_samples/PaddleX/PP-YOLOE-R-L/subgraph_7/graph_hash.txt +++ b/paddle_samples/PaddleX/PP-YOLOE-R-L/subgraph_7/graph_hash.txt @@ -1 +1 @@ -39d4100483aa4e64e442723fc1c71201bbd35f6b269473df8b274bae6e0cde24 \ No newline at end of file +3836fac6cea1c912765cb5cd7f72d4d7a8d10c3d7f1d4c26a9c90da8fa16edf0 \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE-R-L/subgraph_8/graph_hash.txt b/paddle_samples/PaddleX/PP-YOLOE-R-L/subgraph_8/graph_hash.txt index e9f7ce787..be1905f12 100644 --- a/paddle_samples/PaddleX/PP-YOLOE-R-L/subgraph_8/graph_hash.txt +++ b/paddle_samples/PaddleX/PP-YOLOE-R-L/subgraph_8/graph_hash.txt @@ -1 +1 @@ -03997de5ec3a4cd5ed5db2d1365661435b2432ebcacd906a6be3edec6dc9f223 \ No newline at end of file +d051a16fcde295d41bcbf9851d17feb873c9d5d75574dc5afa5328f56dc60a84 \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE-R-L/subgraph_9/graph_hash.txt b/paddle_samples/PaddleX/PP-YOLOE-R-L/subgraph_9/graph_hash.txt index 7d7ecfd98..a099868b0 100644 --- a/paddle_samples/PaddleX/PP-YOLOE-R-L/subgraph_9/graph_hash.txt +++ b/paddle_samples/PaddleX/PP-YOLOE-R-L/subgraph_9/graph_hash.txt @@ -1 +1 @@ -d3c0cf999304c69bc5a1b32523a7375e4782a894dfbb9b4bc1e31a8a45349cfe \ No newline at end of file +db298e3ef13ef9045a1d6c124da85db1c50d471fb5d8c8f78e85297a3f27d966 \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_0/graph_hash.txt b/paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_0/graph_hash.txt index 4e2142c30..9e010ed0a 100644 --- a/paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_0/graph_hash.txt +++ b/paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_0/graph_hash.txt @@ -1 +1 @@ -933a277bd96b5968d6442985553c5c06a3642ca4a7c70d2a5dbb73274c8a9440 \ No newline at end of file +7079a2746f95c95103bab62fbcdce91e663d70fd55597de2100a0b654fbdd66c \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_1/graph_hash.txt b/paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_1/graph_hash.txt index a0a4bdddf..1b08335a3 100644 --- a/paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_1/graph_hash.txt +++ b/paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_1/graph_hash.txt @@ -1 +1 @@ -92d3ca6357b660bcf1334e1feb5b6b37f7616ff93ec1a53ea5a94dd2dd47ce97 \ No newline at end of file +999d81ae5f8f7aa80107216e32ce8e5d9c0b867a696357b475912426f0891658 \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_10/graph_hash.txt b/paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_10/graph_hash.txt index 740832943..a21679610 100644 --- a/paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_10/graph_hash.txt +++ b/paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_10/graph_hash.txt @@ -1 +1 @@ -abe7a59d414d67e8b36ea3baf93dd665033fd681f03833e93a277b4a1096aad3 \ No newline at end of file +9950c1f0a716c3b900cccedec1f4dab335765398c1e5ddc0809a2c904750e9f1 \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_11/graph_hash.txt b/paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_11/graph_hash.txt index ad26781d8..82cfbe272 100644 --- a/paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_11/graph_hash.txt +++ b/paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_11/graph_hash.txt @@ -1 +1 @@ -99bd1d3460daced0b08a14965df1673734764218c3df6e74b7dd522b3b824b95 \ No newline at end of file +1cf576ba841b87c90f808dd83fc76cf6b31385742f2a6f80953fa0a3f614b06c \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_12/graph_hash.txt b/paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_12/graph_hash.txt index 8644d9e4f..df39cfc7f 100644 --- a/paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_12/graph_hash.txt +++ b/paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_12/graph_hash.txt @@ -1 +1 @@ -ab00f78367b398fd627ea53927a42dac99230f6540cf3006c528aebbab4be04a \ No newline at end of file +ae4943d1bf44626f825f88e91e02d5245f0173aa7e1af9f7c50571f314d6f983 \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_13/graph_hash.txt b/paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_13/graph_hash.txt index 4e276d96d..315669793 100644 --- a/paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_13/graph_hash.txt +++ b/paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_13/graph_hash.txt @@ -1 +1 @@ -dd9b6f7fec930c53532803207e780b6bed2e5fa78a9c0c93e36150ac8b0d9463 \ No newline at end of file +772280e4e0e6df48fdaa1b3bea8b0dd5cbaf7dad6c90d940fb9d38ade5d09fa6 \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_14/graph_hash.txt b/paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_14/graph_hash.txt index a93514ba4..a2c6d8b8a 100644 --- a/paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_14/graph_hash.txt +++ b/paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_14/graph_hash.txt @@ -1 +1 @@ -be2fb57bd448a9ffeb7401288b396cc0d51942b463c2f34662d7485236768468 \ No newline at end of file +81e1c79881631ba9c8fea543662e0c88108b9a0a23f037c923767f08270a38b3 \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_15/graph_hash.txt b/paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_15/graph_hash.txt index a90272c96..cf9cecf24 100644 --- a/paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_15/graph_hash.txt +++ b/paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_15/graph_hash.txt @@ -1 +1 @@ -32cd2923bc0a61a7c5f1bc48378db5c4de25399c2f9388132502f296c7a71760 \ No newline at end of file +b570e084ecf7776a98a81e353b5f581d19f9ab243969ebfb5988dfda43a8a50f \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_16/graph_hash.txt b/paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_16/graph_hash.txt index 665cc1cb7..d3368bec1 100644 --- a/paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_16/graph_hash.txt +++ b/paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_16/graph_hash.txt @@ -1 +1 @@ -4c194f1b47af22d5dbdc2dc8f63cad5abcfa9a3548b3439131bcdfe6c15f25bd \ No newline at end of file +b31d9174479d0938255cd2ec58334899ecf03916288acd4eafc0c43a6b55388d \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_2/graph_hash.txt b/paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_2/graph_hash.txt index 658f2bbb3..a74290359 100644 --- a/paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_2/graph_hash.txt +++ b/paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_2/graph_hash.txt @@ -1 +1 @@ -421938027b4b9ee89be16cfe46e23d1f0bea007eb4a694b280e1afd3bfbe8afb \ No newline at end of file +f5e703eaa6eba4edaf2ac46812165256f791df79475b2f48fa7766032b11ca57 \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_3/graph_hash.txt b/paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_3/graph_hash.txt index 2f9daab91..27bd82e0e 100644 --- a/paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_3/graph_hash.txt +++ b/paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_3/graph_hash.txt @@ -1 +1 @@ -237f1666acd796c265f31ddd9bc78ab95e4da70095a07017ec39287d25ff30bd \ No newline at end of file +8d319f5ec2a187a0cbeb6b629bf54f2df054e934514d4a5042fa9c577597355f \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_4/graph_hash.txt b/paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_4/graph_hash.txt index babd6567f..7fbb8551e 100644 --- a/paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_4/graph_hash.txt +++ b/paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_4/graph_hash.txt @@ -1 +1 @@ -3d13d62f659e1dd50de7ed21b396c52b0de085fbe77030af69ce5dcd93ef5c05 \ No newline at end of file +b58b47a10405b5de0e1c7f3dab25881ba3cc8c8bdd1045e44640464fa936bf04 \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_5/graph_hash.txt b/paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_5/graph_hash.txt index d7f7b18a2..eb0adff13 100644 --- a/paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_5/graph_hash.txt +++ b/paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_5/graph_hash.txt @@ -1 +1 @@ -7bb8a2b2502a471463ad03a6babccb8a2db42f0cbace538187cdc42cf672f3d5 \ No newline at end of file +c1b8c019f1768926fb1763d743b6f1af638d4214fa9152321214c544c752c751 \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_6/graph_hash.txt b/paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_6/graph_hash.txt index 463698320..896fa94fd 100644 --- a/paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_6/graph_hash.txt +++ b/paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_6/graph_hash.txt @@ -1 +1 @@ -611aaf40802ae728109fb4c99495540bcf765813a1a25b37c13dde4b4b8683ee \ No newline at end of file +66b8a266cc256b5299b432a3f1cf5582a5f9a5321ba1505da3c19b3bc24f5624 \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_7/graph_hash.txt b/paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_7/graph_hash.txt index 35c8b5035..0ccfc3272 100644 --- a/paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_7/graph_hash.txt +++ b/paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_7/graph_hash.txt @@ -1 +1 @@ -9b97a8be55a85c114a1defeeda79b51bdacb0da2022b5c09e40b3c3e409a91cc \ No newline at end of file +6343e32eeca78539191a9490fdd662b5775659b2a787e2402d27d6f0296824b7 \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_8/graph_hash.txt b/paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_8/graph_hash.txt index 8a3ea7694..3a198bc60 100644 --- a/paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_8/graph_hash.txt +++ b/paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_8/graph_hash.txt @@ -1 +1 @@ -a29b29f75d677bfc4d3d22fe15a38544cc611ed4c5426ad85c9ccd866320ff3a \ No newline at end of file +bfcad265ced09de3d41f5c6ed667e3d03526d992a84e6eacb1408af671245a60 \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_9/graph_hash.txt b/paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_9/graph_hash.txt index b08da1263..82d83ca0b 100644 --- a/paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_9/graph_hash.txt +++ b/paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_9/graph_hash.txt @@ -1 +1 @@ -4d26d0110d82b3e8a1ea0c4059adeb25427870d4527791748b1197dfa32e25ef \ No newline at end of file +2e1652dec5c10dbfbb766b39e6dd1a2a376f1e03061d76cd66a79bf79d0616f3 \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_0/graph_hash.txt b/paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_0/graph_hash.txt index df6fb86a6..21b305551 100644 --- a/paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_0/graph_hash.txt +++ b/paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_0/graph_hash.txt @@ -1 +1 @@ -0edecae6372779122c0886fc76c53f28d45d32c115180d22e204d37f4f709ce2 \ No newline at end of file +700c99cae481b4de7b4ae0500e225ae03e4708238020ea1d75b3fa409c1ef3e9 \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_1/graph_hash.txt b/paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_1/graph_hash.txt index ff5ee4421..248541aad 100644 --- a/paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_1/graph_hash.txt +++ b/paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_1/graph_hash.txt @@ -1 +1 @@ -926ad786388dcdbc5641be69f210f8d6d088d56e6f57512e3ca7ee358a2b968b \ No newline at end of file +c94cf7aa14030cc58b016c1f88c7cd23c6ff7ce7be2dce9f0f95c5469f2ac412 \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_10/graph_hash.txt b/paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_10/graph_hash.txt index babd6567f..7fbb8551e 100644 --- a/paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_10/graph_hash.txt +++ b/paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_10/graph_hash.txt @@ -1 +1 @@ -3d13d62f659e1dd50de7ed21b396c52b0de085fbe77030af69ce5dcd93ef5c05 \ No newline at end of file +b58b47a10405b5de0e1c7f3dab25881ba3cc8c8bdd1045e44640464fa936bf04 \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_11/graph_hash.txt b/paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_11/graph_hash.txt index 273d2ae5d..e5ec97328 100644 --- a/paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_11/graph_hash.txt +++ b/paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_11/graph_hash.txt @@ -1 +1 @@ -b8f543de30ca185d01a94256b2c02f15007155c859cd8c5c4308a69806d92a45 \ No newline at end of file +d8e2807e0c261d57e00c887aba4b333ffd83562a1bf230d5a26bacf0379fad87 \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_12/graph_hash.txt b/paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_12/graph_hash.txt index 5677bb473..18426c718 100644 --- a/paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_12/graph_hash.txt +++ b/paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_12/graph_hash.txt @@ -1 +1 @@ -90cbaafb4a2694392a3b8cfac44e3931402031cc7dfec40c50c3d7cbd5f0c41b \ No newline at end of file +c4af36497f7852167288dc3ac1e4b55956d1b6c42ca46e70cd27bb1ccc05b8bd \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_13/graph_hash.txt b/paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_13/graph_hash.txt index a90272c96..cf9cecf24 100644 --- a/paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_13/graph_hash.txt +++ b/paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_13/graph_hash.txt @@ -1 +1 @@ -32cd2923bc0a61a7c5f1bc48378db5c4de25399c2f9388132502f296c7a71760 \ No newline at end of file +b570e084ecf7776a98a81e353b5f581d19f9ab243969ebfb5988dfda43a8a50f \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_14/graph_hash.txt b/paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_14/graph_hash.txt index b08da1263..82d83ca0b 100644 --- a/paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_14/graph_hash.txt +++ b/paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_14/graph_hash.txt @@ -1 +1 @@ -4d26d0110d82b3e8a1ea0c4059adeb25427870d4527791748b1197dfa32e25ef \ No newline at end of file +2e1652dec5c10dbfbb766b39e6dd1a2a376f1e03061d76cd66a79bf79d0616f3 \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_15/graph_hash.txt b/paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_15/graph_hash.txt index 6198709b8..168b53be9 100644 --- a/paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_15/graph_hash.txt +++ b/paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_15/graph_hash.txt @@ -1 +1 @@ -fb732d9fddca2e574feb08da62b81b28bc53bcf9dfe5a1522a8ffb71ef252d85 \ No newline at end of file +519225de24ab7e47b3551471bab23db242df95467d8bc5652b816d75bc2b10ea \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_16/graph_hash.txt b/paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_16/graph_hash.txt index 94ae09a2a..6ac9d23ff 100644 --- a/paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_16/graph_hash.txt +++ b/paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_16/graph_hash.txt @@ -1 +1 @@ -6b4257ba1ea6147f1b8bb7bffb48a1fb61d1b3eb51b1c340874756e4ffc52693 \ No newline at end of file +4386995a6a00133c7db276591465ce4ec1f82a0512fc13aeb9bb4745fafa593c \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_2/graph_hash.txt b/paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_2/graph_hash.txt index a966d3244..c00ab6d8b 100644 --- a/paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_2/graph_hash.txt +++ b/paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_2/graph_hash.txt @@ -1 +1 @@ -de7af2c23556cc5dc0e9a4dcaa2df9ba4a70606752e4671dd5b9d5a4d76e5de7 \ No newline at end of file +65d3614abbd2ef389b7cf238f14225f701dc8087775ebca72684cad377953c54 \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_3/graph_hash.txt b/paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_3/graph_hash.txt index 463698320..896fa94fd 100644 --- a/paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_3/graph_hash.txt +++ b/paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_3/graph_hash.txt @@ -1 +1 @@ -611aaf40802ae728109fb4c99495540bcf765813a1a25b37c13dde4b4b8683ee \ No newline at end of file +66b8a266cc256b5299b432a3f1cf5582a5f9a5321ba1505da3c19b3bc24f5624 \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_4/graph_hash.txt b/paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_4/graph_hash.txt index 9b79cc15a..4d04a175d 100644 --- a/paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_4/graph_hash.txt +++ b/paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_4/graph_hash.txt @@ -1 +1 @@ -b7415b84d8b0e7f854eeb429f2ca3c71394b5140d8727a259198ad3e98435ab5 \ No newline at end of file +6f0f40cbf909627fa867337174f532d3e179ced4784c7fc2c9cb00ae6193ac2e \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_5/graph_hash.txt b/paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_5/graph_hash.txt index 4e276d96d..315669793 100644 --- a/paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_5/graph_hash.txt +++ b/paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_5/graph_hash.txt @@ -1 +1 @@ -dd9b6f7fec930c53532803207e780b6bed2e5fa78a9c0c93e36150ac8b0d9463 \ No newline at end of file +772280e4e0e6df48fdaa1b3bea8b0dd5cbaf7dad6c90d940fb9d38ade5d09fa6 \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_6/graph_hash.txt b/paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_6/graph_hash.txt index d7f7b18a2..eb0adff13 100644 --- a/paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_6/graph_hash.txt +++ b/paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_6/graph_hash.txt @@ -1 +1 @@ -7bb8a2b2502a471463ad03a6babccb8a2db42f0cbace538187cdc42cf672f3d5 \ No newline at end of file +c1b8c019f1768926fb1763d743b6f1af638d4214fa9152321214c544c752c751 \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_7/graph_hash.txt b/paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_7/graph_hash.txt index a0a4bdddf..1b08335a3 100644 --- a/paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_7/graph_hash.txt +++ b/paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_7/graph_hash.txt @@ -1 +1 @@ -92d3ca6357b660bcf1334e1feb5b6b37f7616ff93ec1a53ea5a94dd2dd47ce97 \ No newline at end of file +999d81ae5f8f7aa80107216e32ce8e5d9c0b867a696357b475912426f0891658 \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_8/graph_hash.txt b/paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_8/graph_hash.txt index f0876b5b3..88f716dff 100644 --- a/paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_8/graph_hash.txt +++ b/paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_8/graph_hash.txt @@ -1 +1 @@ -f34aa271170a2591bf3e52dc6e22e71b666cb770bb99ef237e369b16b462bfac \ No newline at end of file +839bed95f06a549ca0a6c49aa3c1a018fbd7c4f0023cedf35760437922761076 \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_9/graph_hash.txt b/paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_9/graph_hash.txt index 2f9daab91..27bd82e0e 100644 --- a/paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_9/graph_hash.txt +++ b/paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_9/graph_hash.txt @@ -1 +1 @@ -237f1666acd796c265f31ddd9bc78ab95e4da70095a07017ec39287d25ff30bd \ No newline at end of file +8d319f5ec2a187a0cbeb6b629bf54f2df054e934514d4a5042fa9c577597355f \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus-L/subgraph_0/graph_hash.txt b/paddle_samples/PaddleX/PP-YOLOE_plus-L/subgraph_0/graph_hash.txt index df6fb86a6..21b305551 100644 --- a/paddle_samples/PaddleX/PP-YOLOE_plus-L/subgraph_0/graph_hash.txt +++ b/paddle_samples/PaddleX/PP-YOLOE_plus-L/subgraph_0/graph_hash.txt @@ -1 +1 @@ -0edecae6372779122c0886fc76c53f28d45d32c115180d22e204d37f4f709ce2 \ No newline at end of file +700c99cae481b4de7b4ae0500e225ae03e4708238020ea1d75b3fa409c1ef3e9 \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus-L/subgraph_1/graph_hash.txt b/paddle_samples/PaddleX/PP-YOLOE_plus-L/subgraph_1/graph_hash.txt index 89a179d30..b8f4ef5e0 100644 --- a/paddle_samples/PaddleX/PP-YOLOE_plus-L/subgraph_1/graph_hash.txt +++ b/paddle_samples/PaddleX/PP-YOLOE_plus-L/subgraph_1/graph_hash.txt @@ -1 +1 @@ -de57ef5ee73523dbca992bd101b3d1d176ccd0302317b6af8565a53d1e458be9 \ No newline at end of file +682209c0a8e978802d5c42f97ff15adfb194c128c9b10d89f4eae55bdb7aac4f \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus-L/subgraph_10/graph_hash.txt b/paddle_samples/PaddleX/PP-YOLOE_plus-L/subgraph_10/graph_hash.txt index 4f6800c87..95c7fa710 100644 --- a/paddle_samples/PaddleX/PP-YOLOE_plus-L/subgraph_10/graph_hash.txt +++ b/paddle_samples/PaddleX/PP-YOLOE_plus-L/subgraph_10/graph_hash.txt @@ -1 +1 @@ -2110ebb6abb561e779d61c83f0d5c8580b4c167db46c1ce92f6ce0e75e4a0e62 \ No newline at end of file +0ca9776f46c68e9c47d78262b0e7a676b5b201ebb813b00dd6cdd710477953d6 \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus-L/subgraph_12/graph_hash.txt b/paddle_samples/PaddleX/PP-YOLOE_plus-L/subgraph_12/graph_hash.txt index 06c2e2c81..5a3b9f807 100644 --- a/paddle_samples/PaddleX/PP-YOLOE_plus-L/subgraph_12/graph_hash.txt +++ b/paddle_samples/PaddleX/PP-YOLOE_plus-L/subgraph_12/graph_hash.txt @@ -1 +1 @@ -e4523a4b80f6d91bda08e60737d8d55feb6499fbd5650e10a21e653f89b2a688 \ No newline at end of file +844261737f8230902d017b25798482fe873277e37f04757e32206af29ccc9250 \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus-L/subgraph_13/graph_hash.txt b/paddle_samples/PaddleX/PP-YOLOE_plus-L/subgraph_13/graph_hash.txt index 921f13953..51f7ea2f5 100644 --- a/paddle_samples/PaddleX/PP-YOLOE_plus-L/subgraph_13/graph_hash.txt +++ b/paddle_samples/PaddleX/PP-YOLOE_plus-L/subgraph_13/graph_hash.txt @@ -1 +1 @@ -3fc0f07cdf416f4962400bce4989ce1d8133a51df64f85e39eba593d301c104c \ No newline at end of file +15daaa1fb802e63eb7201d9224130b2d687280387657cf94c22b712a5ddfde9c \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus-L/subgraph_15/graph_hash.txt b/paddle_samples/PaddleX/PP-YOLOE_plus-L/subgraph_15/graph_hash.txt index a90272c96..cf9cecf24 100644 --- a/paddle_samples/PaddleX/PP-YOLOE_plus-L/subgraph_15/graph_hash.txt +++ b/paddle_samples/PaddleX/PP-YOLOE_plus-L/subgraph_15/graph_hash.txt @@ -1 +1 @@ -32cd2923bc0a61a7c5f1bc48378db5c4de25399c2f9388132502f296c7a71760 \ No newline at end of file +b570e084ecf7776a98a81e353b5f581d19f9ab243969ebfb5988dfda43a8a50f \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus-L/subgraph_16/graph_hash.txt b/paddle_samples/PaddleX/PP-YOLOE_plus-L/subgraph_16/graph_hash.txt index 22e5f661b..b9524442e 100644 --- a/paddle_samples/PaddleX/PP-YOLOE_plus-L/subgraph_16/graph_hash.txt +++ b/paddle_samples/PaddleX/PP-YOLOE_plus-L/subgraph_16/graph_hash.txt @@ -1 +1 @@ -e99f0bbda5d49a054d0abd2b59c60c1e02a4fbc2ea84bf54ca2ec398c82ba1ca \ No newline at end of file +1b3e44536900198ab3efc2e63cae09846206db37f08f7b0ac3fb11360f59b845 \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus-L/subgraph_2/graph_hash.txt b/paddle_samples/PaddleX/PP-YOLOE_plus-L/subgraph_2/graph_hash.txt index b08da1263..82d83ca0b 100644 --- a/paddle_samples/PaddleX/PP-YOLOE_plus-L/subgraph_2/graph_hash.txt +++ b/paddle_samples/PaddleX/PP-YOLOE_plus-L/subgraph_2/graph_hash.txt @@ -1 +1 @@ -4d26d0110d82b3e8a1ea0c4059adeb25427870d4527791748b1197dfa32e25ef \ No newline at end of file +2e1652dec5c10dbfbb766b39e6dd1a2a376f1e03061d76cd66a79bf79d0616f3 \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus-L/subgraph_3/graph_hash.txt b/paddle_samples/PaddleX/PP-YOLOE_plus-L/subgraph_3/graph_hash.txt index 2f9daab91..27bd82e0e 100644 --- a/paddle_samples/PaddleX/PP-YOLOE_plus-L/subgraph_3/graph_hash.txt +++ b/paddle_samples/PaddleX/PP-YOLOE_plus-L/subgraph_3/graph_hash.txt @@ -1 +1 @@ -237f1666acd796c265f31ddd9bc78ab95e4da70095a07017ec39287d25ff30bd \ No newline at end of file +8d319f5ec2a187a0cbeb6b629bf54f2df054e934514d4a5042fa9c577597355f \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus-L/subgraph_5/graph_hash.txt b/paddle_samples/PaddleX/PP-YOLOE_plus-L/subgraph_5/graph_hash.txt index fc6df8003..04a94e06c 100644 --- a/paddle_samples/PaddleX/PP-YOLOE_plus-L/subgraph_5/graph_hash.txt +++ b/paddle_samples/PaddleX/PP-YOLOE_plus-L/subgraph_5/graph_hash.txt @@ -1 +1 @@ -04a47b3e6a28ad406660a0b13a9bb86de942c8058abd14e7f9ffdf52c75c884d \ No newline at end of file +bad9511933297116e3f564919a85809c5c29647d857a25c4a19f4a67e2c0b51c \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus-L/subgraph_6/graph_hash.txt b/paddle_samples/PaddleX/PP-YOLOE_plus-L/subgraph_6/graph_hash.txt index 64e121445..d15259c49 100644 --- a/paddle_samples/PaddleX/PP-YOLOE_plus-L/subgraph_6/graph_hash.txt +++ b/paddle_samples/PaddleX/PP-YOLOE_plus-L/subgraph_6/graph_hash.txt @@ -1 +1 @@ -0621664d615e3c9d853112c02ae864f2d69542a45086eee97ddcd3e82bebddd6 \ No newline at end of file +a6658fa2fb342f963e549f08c1b73729d662eba241da80438af7d5534dbd6003 \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus-L/subgraph_7/graph_hash.txt b/paddle_samples/PaddleX/PP-YOLOE_plus-L/subgraph_7/graph_hash.txt index 463698320..896fa94fd 100644 --- a/paddle_samples/PaddleX/PP-YOLOE_plus-L/subgraph_7/graph_hash.txt +++ b/paddle_samples/PaddleX/PP-YOLOE_plus-L/subgraph_7/graph_hash.txt @@ -1 +1 @@ -611aaf40802ae728109fb4c99495540bcf765813a1a25b37c13dde4b4b8683ee \ No newline at end of file +66b8a266cc256b5299b432a3f1cf5582a5f9a5321ba1505da3c19b3bc24f5624 \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus-L/subgraph_8/graph_hash.txt b/paddle_samples/PaddleX/PP-YOLOE_plus-L/subgraph_8/graph_hash.txt index f0876b5b3..88f716dff 100644 --- a/paddle_samples/PaddleX/PP-YOLOE_plus-L/subgraph_8/graph_hash.txt +++ b/paddle_samples/PaddleX/PP-YOLOE_plus-L/subgraph_8/graph_hash.txt @@ -1 +1 @@ -f34aa271170a2591bf3e52dc6e22e71b666cb770bb99ef237e369b16b462bfac \ No newline at end of file +839bed95f06a549ca0a6c49aa3c1a018fbd7c4f0023cedf35760437922761076 \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus-L/subgraph_9/graph_hash.txt b/paddle_samples/PaddleX/PP-YOLOE_plus-L/subgraph_9/graph_hash.txt index 690b123d1..efa879b36 100644 --- a/paddle_samples/PaddleX/PP-YOLOE_plus-L/subgraph_9/graph_hash.txt +++ b/paddle_samples/PaddleX/PP-YOLOE_plus-L/subgraph_9/graph_hash.txt @@ -1 +1 @@ -f74c5f134fbcd6078b4299d3edb74995ec2517b5f16472e409844e6a38edce19 \ No newline at end of file +e98172b16f1b0a5022e17341958c7348d3c38f47fbbd435acea6cb34167725f0 \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus-M/subgraph_0/graph_hash.txt b/paddle_samples/PaddleX/PP-YOLOE_plus-M/subgraph_0/graph_hash.txt index e30fb0baf..7c7251b43 100644 --- a/paddle_samples/PaddleX/PP-YOLOE_plus-M/subgraph_0/graph_hash.txt +++ b/paddle_samples/PaddleX/PP-YOLOE_plus-M/subgraph_0/graph_hash.txt @@ -1 +1 @@ -dcd5abc9718b27b6f605484f99e90ce1a8d7ade48c343d11c49b03f022454574 \ No newline at end of file +bd7f2299946fb0d8475e69b23a3d4e9a8cf7570bfb949e741618c8f0f6ab16f9 \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus-M/subgraph_1/graph_hash.txt b/paddle_samples/PaddleX/PP-YOLOE_plus-M/subgraph_1/graph_hash.txt index 8c9b2b120..1988a5284 100644 --- a/paddle_samples/PaddleX/PP-YOLOE_plus-M/subgraph_1/graph_hash.txt +++ b/paddle_samples/PaddleX/PP-YOLOE_plus-M/subgraph_1/graph_hash.txt @@ -1 +1 @@ -905a514146c90ef236c43b3359c0945b67c9f3ddc63042b50bd2aa47acea4ebc \ No newline at end of file +a09afe2116cf927b22c5bc091fbb0d3336e4b685ced53e94db2fd8ab509f7a65 \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus-M/subgraph_10/graph_hash.txt b/paddle_samples/PaddleX/PP-YOLOE_plus-M/subgraph_10/graph_hash.txt index 463698320..896fa94fd 100644 --- a/paddle_samples/PaddleX/PP-YOLOE_plus-M/subgraph_10/graph_hash.txt +++ b/paddle_samples/PaddleX/PP-YOLOE_plus-M/subgraph_10/graph_hash.txt @@ -1 +1 @@ -611aaf40802ae728109fb4c99495540bcf765813a1a25b37c13dde4b4b8683ee \ No newline at end of file +66b8a266cc256b5299b432a3f1cf5582a5f9a5321ba1505da3c19b3bc24f5624 \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus-M/subgraph_11/graph_hash.txt b/paddle_samples/PaddleX/PP-YOLOE_plus-M/subgraph_11/graph_hash.txt index dd073cdbd..0eb7b765d 100644 --- a/paddle_samples/PaddleX/PP-YOLOE_plus-M/subgraph_11/graph_hash.txt +++ b/paddle_samples/PaddleX/PP-YOLOE_plus-M/subgraph_11/graph_hash.txt @@ -1 +1 @@ -1579552a399f5b61d19d64b39a0d675865b1c5d0f8673f65021f53d8d7e39e2e \ No newline at end of file +87f1c9d15791927678923354dcfc589bd225484f29e377f70153217772641dce \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus-M/subgraph_13/graph_hash.txt b/paddle_samples/PaddleX/PP-YOLOE_plus-M/subgraph_13/graph_hash.txt index b08da1263..82d83ca0b 100644 --- a/paddle_samples/PaddleX/PP-YOLOE_plus-M/subgraph_13/graph_hash.txt +++ b/paddle_samples/PaddleX/PP-YOLOE_plus-M/subgraph_13/graph_hash.txt @@ -1 +1 @@ -4d26d0110d82b3e8a1ea0c4059adeb25427870d4527791748b1197dfa32e25ef \ No newline at end of file +2e1652dec5c10dbfbb766b39e6dd1a2a376f1e03061d76cd66a79bf79d0616f3 \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus-M/subgraph_14/graph_hash.txt b/paddle_samples/PaddleX/PP-YOLOE_plus-M/subgraph_14/graph_hash.txt index 64e121445..d15259c49 100644 --- a/paddle_samples/PaddleX/PP-YOLOE_plus-M/subgraph_14/graph_hash.txt +++ b/paddle_samples/PaddleX/PP-YOLOE_plus-M/subgraph_14/graph_hash.txt @@ -1 +1 @@ -0621664d615e3c9d853112c02ae864f2d69542a45086eee97ddcd3e82bebddd6 \ No newline at end of file +a6658fa2fb342f963e549f08c1b73729d662eba241da80438af7d5534dbd6003 \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus-M/subgraph_15/graph_hash.txt b/paddle_samples/PaddleX/PP-YOLOE_plus-M/subgraph_15/graph_hash.txt index c1f079b97..bcf85f70d 100644 --- a/paddle_samples/PaddleX/PP-YOLOE_plus-M/subgraph_15/graph_hash.txt +++ b/paddle_samples/PaddleX/PP-YOLOE_plus-M/subgraph_15/graph_hash.txt @@ -1 +1 @@ -5b39a063673d2a842c85a42f33c248189fce8d0384b21578827b861bc8710041 \ No newline at end of file +79d36f8b3b83773115a0eec5c7e5e317486cffc279d107ab5b11f2f7c791fb82 \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus-M/subgraph_16/graph_hash.txt b/paddle_samples/PaddleX/PP-YOLOE_plus-M/subgraph_16/graph_hash.txt index 90e73cbfa..a631662fb 100644 --- a/paddle_samples/PaddleX/PP-YOLOE_plus-M/subgraph_16/graph_hash.txt +++ b/paddle_samples/PaddleX/PP-YOLOE_plus-M/subgraph_16/graph_hash.txt @@ -1 +1 @@ -0a0002dc4306801597d8ccf7195c18d5e87a708a3d9df65495993580fffe3755 \ No newline at end of file +4a4130d505fd79b75ece8660de0a133444eb6d64e011fadfdb7a93853cd87181 \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus-M/subgraph_17/graph_hash.txt b/paddle_samples/PaddleX/PP-YOLOE_plus-M/subgraph_17/graph_hash.txt index 06c2e2c81..5a3b9f807 100644 --- a/paddle_samples/PaddleX/PP-YOLOE_plus-M/subgraph_17/graph_hash.txt +++ b/paddle_samples/PaddleX/PP-YOLOE_plus-M/subgraph_17/graph_hash.txt @@ -1 +1 @@ -e4523a4b80f6d91bda08e60737d8d55feb6499fbd5650e10a21e653f89b2a688 \ No newline at end of file +844261737f8230902d017b25798482fe873277e37f04757e32206af29ccc9250 \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus-M/subgraph_2/graph_hash.txt b/paddle_samples/PaddleX/PP-YOLOE_plus-M/subgraph_2/graph_hash.txt index 2f9daab91..27bd82e0e 100644 --- a/paddle_samples/PaddleX/PP-YOLOE_plus-M/subgraph_2/graph_hash.txt +++ b/paddle_samples/PaddleX/PP-YOLOE_plus-M/subgraph_2/graph_hash.txt @@ -1 +1 @@ -237f1666acd796c265f31ddd9bc78ab95e4da70095a07017ec39287d25ff30bd \ No newline at end of file +8d319f5ec2a187a0cbeb6b629bf54f2df054e934514d4a5042fa9c577597355f \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus-M/subgraph_3/graph_hash.txt b/paddle_samples/PaddleX/PP-YOLOE_plus-M/subgraph_3/graph_hash.txt index 504c9bb0f..2642b5bd2 100644 --- a/paddle_samples/PaddleX/PP-YOLOE_plus-M/subgraph_3/graph_hash.txt +++ b/paddle_samples/PaddleX/PP-YOLOE_plus-M/subgraph_3/graph_hash.txt @@ -1 +1 @@ -ecf678dd2052f1907293d06bf3fd26d9af9924bd94292392430215512db3f6b4 \ No newline at end of file +a622f168876dc1c97085651103b0da7385b7614edb98f5ff44258705ba2db1d4 \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus-M/subgraph_4/graph_hash.txt b/paddle_samples/PaddleX/PP-YOLOE_plus-M/subgraph_4/graph_hash.txt index a90272c96..cf9cecf24 100644 --- a/paddle_samples/PaddleX/PP-YOLOE_plus-M/subgraph_4/graph_hash.txt +++ b/paddle_samples/PaddleX/PP-YOLOE_plus-M/subgraph_4/graph_hash.txt @@ -1 +1 @@ -32cd2923bc0a61a7c5f1bc48378db5c4de25399c2f9388132502f296c7a71760 \ No newline at end of file +b570e084ecf7776a98a81e353b5f581d19f9ab243969ebfb5988dfda43a8a50f \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus-M/subgraph_6/graph_hash.txt b/paddle_samples/PaddleX/PP-YOLOE_plus-M/subgraph_6/graph_hash.txt index df6fb86a6..21b305551 100644 --- a/paddle_samples/PaddleX/PP-YOLOE_plus-M/subgraph_6/graph_hash.txt +++ b/paddle_samples/PaddleX/PP-YOLOE_plus-M/subgraph_6/graph_hash.txt @@ -1 +1 @@ -0edecae6372779122c0886fc76c53f28d45d32c115180d22e204d37f4f709ce2 \ No newline at end of file +700c99cae481b4de7b4ae0500e225ae03e4708238020ea1d75b3fa409c1ef3e9 \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus-M/subgraph_7/graph_hash.txt b/paddle_samples/PaddleX/PP-YOLOE_plus-M/subgraph_7/graph_hash.txt index f48f7fa71..0f1417140 100644 --- a/paddle_samples/PaddleX/PP-YOLOE_plus-M/subgraph_7/graph_hash.txt +++ b/paddle_samples/PaddleX/PP-YOLOE_plus-M/subgraph_7/graph_hash.txt @@ -1 +1 @@ -8db43af37301e6c5df30b75f04449dc531bb9453471e6360b201d2b66ed93ff3 \ No newline at end of file +4e12c05fa6e6a5e1e133bd3dbc35b49e9bfa6622e2eadfc18e6c6100e8b158af \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus-M/subgraph_9/graph_hash.txt b/paddle_samples/PaddleX/PP-YOLOE_plus-M/subgraph_9/graph_hash.txt index f0876b5b3..88f716dff 100644 --- a/paddle_samples/PaddleX/PP-YOLOE_plus-M/subgraph_9/graph_hash.txt +++ b/paddle_samples/PaddleX/PP-YOLOE_plus-M/subgraph_9/graph_hash.txt @@ -1 +1 @@ -f34aa271170a2591bf3e52dc6e22e71b666cb770bb99ef237e369b16b462bfac \ No newline at end of file +839bed95f06a549ca0a6c49aa3c1a018fbd7c4f0023cedf35760437922761076 \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_0/graph_hash.txt b/paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_0/graph_hash.txt index b3b436c8b..059a4fd8e 100644 --- a/paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_0/graph_hash.txt +++ b/paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_0/graph_hash.txt @@ -1 +1 @@ -6490cc553bfd8efe25a6bcddb96022c016e6b36651b6c1cd94673f681bb3e5ae \ No newline at end of file +5d14e60cda05dbf85d78d15eb134fb4ddb2b95e3403b970f130085fb0d805e6d \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_1/graph_hash.txt b/paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_1/graph_hash.txt index f0876b5b3..88f716dff 100644 --- a/paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_1/graph_hash.txt +++ b/paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_1/graph_hash.txt @@ -1 +1 @@ -f34aa271170a2591bf3e52dc6e22e71b666cb770bb99ef237e369b16b462bfac \ No newline at end of file +839bed95f06a549ca0a6c49aa3c1a018fbd7c4f0023cedf35760437922761076 \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_10/graph_hash.txt b/paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_10/graph_hash.txt index 03a7b6b29..ba562ca44 100644 --- a/paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_10/graph_hash.txt +++ b/paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_10/graph_hash.txt @@ -1 +1 @@ -576e796dddb1c9463be097477eb9b76fc566d49529154ed8afd0bc3681a6d4f1 \ No newline at end of file +5183b74b324dbafc4e359508b9781f665b284b8f9f5c1c88a2ab1b21f81d4801 \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_11/graph_hash.txt b/paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_11/graph_hash.txt index b08da1263..82d83ca0b 100644 --- a/paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_11/graph_hash.txt +++ b/paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_11/graph_hash.txt @@ -1 +1 @@ -4d26d0110d82b3e8a1ea0c4059adeb25427870d4527791748b1197dfa32e25ef \ No newline at end of file +2e1652dec5c10dbfbb766b39e6dd1a2a376f1e03061d76cd66a79bf79d0616f3 \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_12/graph_hash.txt b/paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_12/graph_hash.txt index b543568a7..46e554be1 100644 --- a/paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_12/graph_hash.txt +++ b/paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_12/graph_hash.txt @@ -1 +1 @@ -42e184615f6f96cecc56992123e65e294dfcbd7742d8a37b4b66c28fff78eae8 \ No newline at end of file +48c8fed96995a8800e769458c3459c34a78e41ed051e581cdf8bf11943aac4cd \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_13/graph_hash.txt b/paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_13/graph_hash.txt index 48f9cc2af..4e31d2a95 100644 --- a/paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_13/graph_hash.txt +++ b/paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_13/graph_hash.txt @@ -1 +1 @@ -8c3a658f63f5d96dc089da6403b65de7b000c817cbbdf585fcbddf0b4ead5739 \ No newline at end of file +99b4af063b618d68715794ee5155f75232952db9337134de2e054f8e5cd74929 \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_14/graph_hash.txt b/paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_14/graph_hash.txt index 19a6a7486..11d9c775b 100644 --- a/paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_14/graph_hash.txt +++ b/paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_14/graph_hash.txt @@ -1 +1 @@ -f00b56f762b48c06ba6bd348567cc70cacaf2c88f5db73f5a23278722ad0441d \ No newline at end of file +0a9ec7d1012241a6f30c84cfdb7c5571497d5d1333c9ef40d28b3f975ceb5116 \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_15/graph_hash.txt b/paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_15/graph_hash.txt index 68a340abd..7c2a2166f 100644 --- a/paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_15/graph_hash.txt +++ b/paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_15/graph_hash.txt @@ -1 +1 @@ -ab6d73d0a64827dfc3d86c89e20b92de656589ccd700aa1d955ce4d12d055477 \ No newline at end of file +800267164f1ddbe2a67c4a61861628fb153501260cec5a441518cfc8acb8a219 \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_16/graph_hash.txt b/paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_16/graph_hash.txt index 64e121445..d15259c49 100644 --- a/paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_16/graph_hash.txt +++ b/paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_16/graph_hash.txt @@ -1 +1 @@ -0621664d615e3c9d853112c02ae864f2d69542a45086eee97ddcd3e82bebddd6 \ No newline at end of file +a6658fa2fb342f963e549f08c1b73729d662eba241da80438af7d5534dbd6003 \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_17/graph_hash.txt b/paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_17/graph_hash.txt index 463698320..896fa94fd 100644 --- a/paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_17/graph_hash.txt +++ b/paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_17/graph_hash.txt @@ -1 +1 @@ -611aaf40802ae728109fb4c99495540bcf765813a1a25b37c13dde4b4b8683ee \ No newline at end of file +66b8a266cc256b5299b432a3f1cf5582a5f9a5321ba1505da3c19b3bc24f5624 \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_18/graph_hash.txt b/paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_18/graph_hash.txt index 47096223e..7d5235887 100644 --- a/paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_18/graph_hash.txt +++ b/paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_18/graph_hash.txt @@ -1 +1 @@ -34be229c6a80fe6038498c3f804db3a9ddb98cc1739d81aa46402cdec43f7140 \ No newline at end of file +133047a13810bf52476dc78c99770e02c9acbae98f33644c273e8cf924b164e5 \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_2/graph_hash.txt b/paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_2/graph_hash.txt index 24bfcda48..96abe2ee5 100644 --- a/paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_2/graph_hash.txt +++ b/paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_2/graph_hash.txt @@ -1 +1 @@ -6118b74c6cea4582fed887d697e8ba8dc4d4b2ee9d3ce0ac46f01fdaefb305ca \ No newline at end of file +48ed6effb27eea4685225ce59cd9380120ce8bc9ff2442a3377f904b15410e20 \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_3/graph_hash.txt b/paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_3/graph_hash.txt index a90272c96..cf9cecf24 100644 --- a/paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_3/graph_hash.txt +++ b/paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_3/graph_hash.txt @@ -1 +1 @@ -32cd2923bc0a61a7c5f1bc48378db5c4de25399c2f9388132502f296c7a71760 \ No newline at end of file +b570e084ecf7776a98a81e353b5f581d19f9ab243969ebfb5988dfda43a8a50f \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_4/graph_hash.txt b/paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_4/graph_hash.txt index df6fb86a6..21b305551 100644 --- a/paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_4/graph_hash.txt +++ b/paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_4/graph_hash.txt @@ -1 +1 @@ -0edecae6372779122c0886fc76c53f28d45d32c115180d22e204d37f4f709ce2 \ No newline at end of file +700c99cae481b4de7b4ae0500e225ae03e4708238020ea1d75b3fa409c1ef3e9 \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_5/graph_hash.txt b/paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_5/graph_hash.txt index 6198709b8..168b53be9 100644 --- a/paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_5/graph_hash.txt +++ b/paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_5/graph_hash.txt @@ -1 +1 @@ -fb732d9fddca2e574feb08da62b81b28bc53bcf9dfe5a1522a8ffb71ef252d85 \ No newline at end of file +519225de24ab7e47b3551471bab23db242df95467d8bc5652b816d75bc2b10ea \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_6/graph_hash.txt b/paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_6/graph_hash.txt index 2f9daab91..27bd82e0e 100644 --- a/paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_6/graph_hash.txt +++ b/paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_6/graph_hash.txt @@ -1 +1 @@ -237f1666acd796c265f31ddd9bc78ab95e4da70095a07017ec39287d25ff30bd \ No newline at end of file +8d319f5ec2a187a0cbeb6b629bf54f2df054e934514d4a5042fa9c577597355f \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_7/graph_hash.txt b/paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_7/graph_hash.txt index 1f67a84ff..f2b2bee68 100644 --- a/paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_7/graph_hash.txt +++ b/paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_7/graph_hash.txt @@ -1 +1 @@ -a7e683199f714648457015168b4be16a53e004c283c3d7ca5ebc5418fe27ea75 \ No newline at end of file +188e90b170e265edcb85aba0eac97e9e373e306bb64e3b725ae31e635f48088e \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_8/graph_hash.txt b/paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_8/graph_hash.txt index b50f88ce3..dbf341ae3 100644 --- a/paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_8/graph_hash.txt +++ b/paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_8/graph_hash.txt @@ -1 +1 @@ -d807d9117d9ce029721550bdd5354682553d85490a71e92888f424c4238f045b \ No newline at end of file +4d9c4fdb6f89872cff5a1d7b782191d3432f8c3de1e21cadd795d8358285930d \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_9/graph_hash.txt b/paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_9/graph_hash.txt index 06c2e2c81..5a3b9f807 100644 --- a/paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_9/graph_hash.txt +++ b/paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_9/graph_hash.txt @@ -1 +1 @@ -e4523a4b80f6d91bda08e60737d8d55feb6499fbd5650e10a21e653f89b2a688 \ No newline at end of file +844261737f8230902d017b25798482fe873277e37f04757e32206af29ccc9250 \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/subgraph_0/graph_hash.txt b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/subgraph_0/graph_hash.txt index 44b201d32..9f2c25298 100644 --- a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/subgraph_0/graph_hash.txt +++ b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/subgraph_0/graph_hash.txt @@ -1 +1 @@ -4e7e6ee2874f6062a0fb0f9f01605da0630c431ba471c345582d30d102a90ff5 \ No newline at end of file +29275b179bcf74ddc948ceb4a1919f72d6d4d43dd851f10082ba5173cb3c88dc \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/subgraph_1/graph_hash.txt b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/subgraph_1/graph_hash.txt index a90272c96..cf9cecf24 100644 --- a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/subgraph_1/graph_hash.txt +++ b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/subgraph_1/graph_hash.txt @@ -1 +1 @@ -32cd2923bc0a61a7c5f1bc48378db5c4de25399c2f9388132502f296c7a71760 \ No newline at end of file +b570e084ecf7776a98a81e353b5f581d19f9ab243969ebfb5988dfda43a8a50f \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/subgraph_10/graph_hash.txt b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/subgraph_10/graph_hash.txt index 463698320..896fa94fd 100644 --- a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/subgraph_10/graph_hash.txt +++ b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/subgraph_10/graph_hash.txt @@ -1 +1 @@ -611aaf40802ae728109fb4c99495540bcf765813a1a25b37c13dde4b4b8683ee \ No newline at end of file +66b8a266cc256b5299b432a3f1cf5582a5f9a5321ba1505da3c19b3bc24f5624 \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/subgraph_11/graph_hash.txt b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/subgraph_11/graph_hash.txt index c22f27fef..d7d509a2f 100644 --- a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/subgraph_11/graph_hash.txt +++ b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/subgraph_11/graph_hash.txt @@ -1 +1 @@ -7dca5f9adbbd5ed82b1a226d0df398f3eb50454e9abf47936b65d6ecf998ef9f \ No newline at end of file +2474d5c0140e3ca8b342671eaefb285e6ee8c6c96b775962f701e40c7ee40211 \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/subgraph_12/graph_hash.txt b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/subgraph_12/graph_hash.txt index a28abb868..ef13ddd7e 100644 --- a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/subgraph_12/graph_hash.txt +++ b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/subgraph_12/graph_hash.txt @@ -1 +1 @@ -f60d5bbaab8b228442ebb3c058e4d6372677794bde1ea09b0e24c99fbe4ce211 \ No newline at end of file +5a0efce50442936c174003f6338fd05f5366aa6cf630787a35d4c3d9fd30bc22 \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/subgraph_13/graph_hash.txt b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/subgraph_13/graph_hash.txt index f7e774060..f1aa15364 100644 --- a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/subgraph_13/graph_hash.txt +++ b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/subgraph_13/graph_hash.txt @@ -1 +1 @@ -b3c06a5ff7d63f2fddf054f6dc5423b30986582d3ae6be2b7b5c8465e31f6fac \ No newline at end of file +09f7308dca33192a680fa6e253963eb73d874927eb1eaddbb7fe31eeed376574 \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/subgraph_14/graph_hash.txt b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/subgraph_14/graph_hash.txt index a0378c0af..a40003657 100644 --- a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/subgraph_14/graph_hash.txt +++ b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/subgraph_14/graph_hash.txt @@ -1 +1 @@ -232d82e8296e7401dfecdd4d02b443354860807e4c42c73d06bee142cc219425 \ No newline at end of file +ac81b9367304363d1522da93dc3448033678191263a3117ee14573fd719ce66f \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/subgraph_18/graph_hash.txt b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/subgraph_18/graph_hash.txt index b08da1263..82d83ca0b 100644 --- a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/subgraph_18/graph_hash.txt +++ b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/subgraph_18/graph_hash.txt @@ -1 +1 @@ -4d26d0110d82b3e8a1ea0c4059adeb25427870d4527791748b1197dfa32e25ef \ No newline at end of file +2e1652dec5c10dbfbb766b39e6dd1a2a376f1e03061d76cd66a79bf79d0616f3 \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/subgraph_2/graph_hash.txt b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/subgraph_2/graph_hash.txt index d7a44c4ca..f33996b4a 100644 --- a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/subgraph_2/graph_hash.txt +++ b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/subgraph_2/graph_hash.txt @@ -1 +1 @@ -5866f5c88d45db322dda97cc81b9a2145ea211417cab73af1890d9d8753ae5cb \ No newline at end of file +331d31c12329e180f8072f92c095e5fa3ed0d1dbf984c2e334eff6b5b3862c64 \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/subgraph_3/graph_hash.txt b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/subgraph_3/graph_hash.txt index 3b4c91958..303d49362 100644 --- a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/subgraph_3/graph_hash.txt +++ b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/subgraph_3/graph_hash.txt @@ -1 +1 @@ -2c26a537c16306bff9e45eef0a8e89a4bfc4ef2474aa9287527ef7c6aae5a086 \ No newline at end of file +0d838ae9c799b8f1ab11dae768474bd1a90cb99967969c129656d57d09380aa2 \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/subgraph_4/graph_hash.txt b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/subgraph_4/graph_hash.txt index 1dc0e3cd9..6fe7297b8 100644 --- a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/subgraph_4/graph_hash.txt +++ b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/subgraph_4/graph_hash.txt @@ -1 +1 @@ -1ebfa0731cd404fc0d11b70f0637266da6c267aa7f705df9df9bd0ea79785b39 \ No newline at end of file +8b11cddc56e8bf2fc7551237b756b7f8f8a4e9dd2f556be8d328af948ebf41da \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/subgraph_5/graph_hash.txt b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/subgraph_5/graph_hash.txt index a0da828a8..0cf23b344 100644 --- a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/subgraph_5/graph_hash.txt +++ b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/subgraph_5/graph_hash.txt @@ -1 +1 @@ -92e7456bc3fe2575d932c96b8342e31711029bd21b23c8a9bf17b02309d4c336 \ No newline at end of file +82564cb272bfe4052ec183285332a6999eb8f2e9097ea2f1faf3c1a1650939fa \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/subgraph_6/graph_hash.txt b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/subgraph_6/graph_hash.txt index 26ca02140..a11a69efc 100644 --- a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/subgraph_6/graph_hash.txt +++ b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/subgraph_6/graph_hash.txt @@ -1 +1 @@ -8a2815233af5ac01ede0f5b0291f7fbde332769a8e8425665222d469c075cedc \ No newline at end of file +e51621c1344d3da65f6a657749e919a558ba3fbedad58e6e8c7e6f48e6194981 \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/subgraph_8/graph_hash.txt b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/subgraph_8/graph_hash.txt index 084577211..4813c5fb9 100644 --- a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/subgraph_8/graph_hash.txt +++ b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/subgraph_8/graph_hash.txt @@ -1 +1 @@ -e9163af023b6284e12d8e094164eb61c7fb313e0b3f9a53ed5592562b0ac28a6 \ No newline at end of file +0ade118bf5113a2f63aa1b27c2409d65cdb05050be009516c3881c1dd15b2c66 \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-S/subgraph_0/graph_hash.txt b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-S/subgraph_0/graph_hash.txt index 98dc188be..c0183384d 100644 --- a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-S/subgraph_0/graph_hash.txt +++ b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-S/subgraph_0/graph_hash.txt @@ -1 +1 @@ -2f7e328616242ad3e2a3699e8e0aa947042688f032eb97cd618d4e16392e47fe \ No newline at end of file +2fec9d183180aa423511bfffcc1407e65b5300001d488dcd2a33b75c4617dcce \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-S/subgraph_1/graph_hash.txt b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-S/subgraph_1/graph_hash.txt index 67b2ea2b5..aee9da3ec 100644 --- a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-S/subgraph_1/graph_hash.txt +++ b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-S/subgraph_1/graph_hash.txt @@ -1 +1 @@ -1b6cda785ee348e006a4ad4adf240bb805da997f66d069af736c20f1756774c5 \ No newline at end of file +d70c95cb38f1691238207ff482ddeba1213eb8b254421a15420ab67275df40f3 \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-S/subgraph_10/graph_hash.txt b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-S/subgraph_10/graph_hash.txt index 550f78f19..12d3113c2 100644 --- a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-S/subgraph_10/graph_hash.txt +++ b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-S/subgraph_10/graph_hash.txt @@ -1 +1 @@ -602c7b60afd4d84c2495c136df21f6a27d2065e01dded183accc3ee4e4ff58af \ No newline at end of file +8638d8db5f2d58476e50d0bc965b2f8c4869ccc0bec1bc235abea448b98e3e16 \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-S/subgraph_11/graph_hash.txt b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-S/subgraph_11/graph_hash.txt index f7e774060..f1aa15364 100644 --- a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-S/subgraph_11/graph_hash.txt +++ b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-S/subgraph_11/graph_hash.txt @@ -1 +1 @@ -b3c06a5ff7d63f2fddf054f6dc5423b30986582d3ae6be2b7b5c8465e31f6fac \ No newline at end of file +09f7308dca33192a680fa6e253963eb73d874927eb1eaddbb7fe31eeed376574 \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-S/subgraph_12/graph_hash.txt b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-S/subgraph_12/graph_hash.txt index 26ca02140..a11a69efc 100644 --- a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-S/subgraph_12/graph_hash.txt +++ b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-S/subgraph_12/graph_hash.txt @@ -1 +1 @@ -8a2815233af5ac01ede0f5b0291f7fbde332769a8e8425665222d469c075cedc \ No newline at end of file +e51621c1344d3da65f6a657749e919a558ba3fbedad58e6e8c7e6f48e6194981 \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-S/subgraph_13/graph_hash.txt b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-S/subgraph_13/graph_hash.txt index ef3c2f741..3099ac657 100644 --- a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-S/subgraph_13/graph_hash.txt +++ b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-S/subgraph_13/graph_hash.txt @@ -1 +1 @@ -6d51865e3c32cbca7e4a68c8f2e502f2b509535978ab0d3bf37be09133954c81 \ No newline at end of file +decf014943e9d1a959f5378161bcaf6185708c0146e74a871cb20ce9950a3e6a \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-S/subgraph_14/graph_hash.txt b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-S/subgraph_14/graph_hash.txt index 5cab9badf..7b322f770 100644 --- a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-S/subgraph_14/graph_hash.txt +++ b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-S/subgraph_14/graph_hash.txt @@ -1 +1 @@ -989b2703fbe877de377ed3188931f22704f691b68378de50cfb501eb2585215a \ No newline at end of file +ddebf9b46c8835e3087454a08d94985a05999f4d3a4b531ddae8751a76335266 \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-S/subgraph_15/graph_hash.txt b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-S/subgraph_15/graph_hash.txt index a0bcabb82..efee688b7 100644 --- a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-S/subgraph_15/graph_hash.txt +++ b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-S/subgraph_15/graph_hash.txt @@ -1 +1 @@ -ec435c57522cff4aa2e0b88c81cfd985851c951e344bf1f29bad03418a045d50 \ No newline at end of file +47a8dab848c8682860e7f3d01d559b111f25912e5ab79517b35000cc5df6986b \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-S/subgraph_16/graph_hash.txt b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-S/subgraph_16/graph_hash.txt index 54a609c33..510fac8ff 100644 --- a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-S/subgraph_16/graph_hash.txt +++ b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-S/subgraph_16/graph_hash.txt @@ -1 +1 @@ -a999d5921e887e45f1813dad835878d69c80f2ace07b55d5437ad7d4132486cf \ No newline at end of file +355f494a39c48b6df4cbaa2fd5b171f78e285d9d215a0a583ad8f6f31d6c607c \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-S/subgraph_17/graph_hash.txt b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-S/subgraph_17/graph_hash.txt index 463698320..896fa94fd 100644 --- a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-S/subgraph_17/graph_hash.txt +++ b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-S/subgraph_17/graph_hash.txt @@ -1 +1 @@ -611aaf40802ae728109fb4c99495540bcf765813a1a25b37c13dde4b4b8683ee \ No newline at end of file +66b8a266cc256b5299b432a3f1cf5582a5f9a5321ba1505da3c19b3bc24f5624 \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-S/subgraph_18/graph_hash.txt b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-S/subgraph_18/graph_hash.txt index 324872d83..bfa76b594 100644 --- a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-S/subgraph_18/graph_hash.txt +++ b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-S/subgraph_18/graph_hash.txt @@ -1 +1 @@ -9a8c4e1b7378010caa6e7b8e301735887344173917cbd1df9b66817f174a100c \ No newline at end of file +fa6eb9b0757bd8e932b3ff14c3ba9c809889599d2cfbb8e5c4dc486ee670a158 \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-S/subgraph_2/graph_hash.txt b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-S/subgraph_2/graph_hash.txt index 3b4c91958..303d49362 100644 --- a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-S/subgraph_2/graph_hash.txt +++ b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-S/subgraph_2/graph_hash.txt @@ -1 +1 @@ -2c26a537c16306bff9e45eef0a8e89a4bfc4ef2474aa9287527ef7c6aae5a086 \ No newline at end of file +0d838ae9c799b8f1ab11dae768474bd1a90cb99967969c129656d57d09380aa2 \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-S/subgraph_3/graph_hash.txt b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-S/subgraph_3/graph_hash.txt index 3a428f405..b8661fa05 100644 --- a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-S/subgraph_3/graph_hash.txt +++ b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-S/subgraph_3/graph_hash.txt @@ -1 +1 @@ -b624ace49d286bb7132767299810ec40d3177bfc836e08f1f20bb12437c85436 \ No newline at end of file +8cb0fc6cdb78d86cf6d256242a60b5a0b1b34ddc73b5350534f200e4488d756d \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-S/subgraph_4/graph_hash.txt b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-S/subgraph_4/graph_hash.txt index 4e14ba7ec..76cbf7d3b 100644 --- a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-S/subgraph_4/graph_hash.txt +++ b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-S/subgraph_4/graph_hash.txt @@ -1 +1 @@ -5cbc5021225385505f1108fab63db5c78abc03b8e6dd37e4cd359d0c7f8a7039 \ No newline at end of file +c0fd4c9c946d7a6abe3f2da69dd6c94829ff29410aa4f817db8d26eac5a3877b \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-S/subgraph_5/graph_hash.txt b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-S/subgraph_5/graph_hash.txt index a90272c96..cf9cecf24 100644 --- a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-S/subgraph_5/graph_hash.txt +++ b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-S/subgraph_5/graph_hash.txt @@ -1 +1 @@ -32cd2923bc0a61a7c5f1bc48378db5c4de25399c2f9388132502f296c7a71760 \ No newline at end of file +b570e084ecf7776a98a81e353b5f581d19f9ab243969ebfb5988dfda43a8a50f \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-S/subgraph_6/graph_hash.txt b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-S/subgraph_6/graph_hash.txt index 4396963bd..41609984f 100644 --- a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-S/subgraph_6/graph_hash.txt +++ b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-S/subgraph_6/graph_hash.txt @@ -1 +1 @@ -7e641c3cae870d8e911f0ad7dadcaeff908d67ccf007dae85cd9995bbaf2bbd3 \ No newline at end of file +5153ff0b30f7913379eb0cdf044d207016d3418923cee34fae2a9bf5ba36df9b \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-S/subgraph_7/graph_hash.txt b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-S/subgraph_7/graph_hash.txt index 64ef15f84..182442b9d 100644 --- a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-S/subgraph_7/graph_hash.txt +++ b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-S/subgraph_7/graph_hash.txt @@ -1 +1 @@ -b28bf90ac95a22521022a0ac3c4a08d766b56e12bda87396cd4425fcb91c1c29 \ No newline at end of file +2485a2b54fab16a901a7ed38b4f7e03b3c5d5ee99316309319a1704e31fd9041 \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-S/subgraph_8/graph_hash.txt b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-S/subgraph_8/graph_hash.txt index e4d87c73f..1c3bca949 100644 --- a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-S/subgraph_8/graph_hash.txt +++ b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-S/subgraph_8/graph_hash.txt @@ -1 +1 @@ -3c00943b4a99c0dabb02d091e9638b26cae706e7081d61ba73c72e9e733042bf \ No newline at end of file +9768a4faaf5686b3c06cb3af386b092f6fdadf4794765ff5cd285d8602bc2314 \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-S/subgraph_9/graph_hash.txt b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-S/subgraph_9/graph_hash.txt index 0e41bf3ac..ad81bd8c6 100644 --- a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-S/subgraph_9/graph_hash.txt +++ b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-S/subgraph_9/graph_hash.txt @@ -1 +1 @@ -56b33ce8013681e5774a98f1aef660acf3db6206cf15004487eb3e77248d98b0 \ No newline at end of file +534511a45272e1a9267697d5f10a3014cf1cc0c049585f9685655501ff68f0b9 \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_0/graph_hash.txt b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_0/graph_hash.txt index b08da1263..82d83ca0b 100644 --- a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_0/graph_hash.txt +++ b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_0/graph_hash.txt @@ -1 +1 @@ -4d26d0110d82b3e8a1ea0c4059adeb25427870d4527791748b1197dfa32e25ef \ No newline at end of file +2e1652dec5c10dbfbb766b39e6dd1a2a376f1e03061d76cd66a79bf79d0616f3 \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_1/graph_hash.txt b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_1/graph_hash.txt index a90272c96..cf9cecf24 100644 --- a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_1/graph_hash.txt +++ b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_1/graph_hash.txt @@ -1 +1 @@ -32cd2923bc0a61a7c5f1bc48378db5c4de25399c2f9388132502f296c7a71760 \ No newline at end of file +b570e084ecf7776a98a81e353b5f581d19f9ab243969ebfb5988dfda43a8a50f \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_10/graph_hash.txt b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_10/graph_hash.txt index 9b36a0018..7376f9083 100644 --- a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_10/graph_hash.txt +++ b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_10/graph_hash.txt @@ -1 +1 @@ -82e75ce23c7b46c514fc50a98c682d1cfca4b1645d371c225ab17772ef29a450 \ No newline at end of file +ecf2fbd10676ba1da33488b082d96ec1f1edbf59f2c45107f85006bf675778d2 \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_11/graph_hash.txt b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_11/graph_hash.txt index e6d623fb0..30aa43ee8 100644 --- a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_11/graph_hash.txt +++ b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_11/graph_hash.txt @@ -1 +1 @@ -bf1f631da16bb2dc01a60c71c55618c40ceba9451302c9c325fd97977839b501 \ No newline at end of file +88501beeaec43b439a11d2de2e1804c791b519dccd7cb4a81a7324f69c27681f \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_12/graph_hash.txt b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_12/graph_hash.txt index 496563ea1..7248f3b80 100644 --- a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_12/graph_hash.txt +++ b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_12/graph_hash.txt @@ -1 +1 @@ -786abe68abca12db6864fd56c948ebc989117954d84f405345ed2db9d4231827 \ No newline at end of file +80c6a3012fae16e53b556d8b6ef2a40e2378ccb66ef0a81269f362d7dab93afe \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_13/graph_hash.txt b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_13/graph_hash.txt index 51cc26f7d..d26369202 100644 --- a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_13/graph_hash.txt +++ b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_13/graph_hash.txt @@ -1 +1 @@ -0291aa8f0123c85e18600babdb8ed46589822734c20e7a75cb5b99f64ad81067 \ No newline at end of file +c570d43f53acac4f3957a2ca875002b813fd4f0945c65adb5e86d7b2292f59e3 \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_14/graph_hash.txt b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_14/graph_hash.txt index 889a488e6..18e3bbc11 100644 --- a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_14/graph_hash.txt +++ b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_14/graph_hash.txt @@ -1 +1 @@ -791525d2a1842eea753f34e744dc5933e8bf52c2061a0c0c9d7772ac9dd0afe6 \ No newline at end of file +bbf0a5774c2acf6ee92cd237b35e2c556a5dfa443149969bd012be16856599a9 \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_2/graph_hash.txt b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_2/graph_hash.txt index 61ec9b696..3060b8e66 100644 --- a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_2/graph_hash.txt +++ b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_2/graph_hash.txt @@ -1 +1 @@ -f7d2795b3f58b48affda49a5b442b89fd5091c1bc54718fe79a24f6c241dbf36 \ No newline at end of file +cfd03838c2e98747bf694d5196a2c44d4f61ead673e0d40c68b441b98f84ef34 \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_3/graph_hash.txt b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_3/graph_hash.txt index 219233c00..352c4c248 100644 --- a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_3/graph_hash.txt +++ b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_3/graph_hash.txt @@ -1 +1 @@ -2846bd6e07113ea79b009a65c357a7bcda1ab2bbcb3cd794972513936663d342 \ No newline at end of file +16190cdee49c5a19612aa893cd02068a0d7cda8a3ce3ab04b10db216f5b07563 \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_4/graph_hash.txt b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_4/graph_hash.txt index 0e41bf3ac..ad81bd8c6 100644 --- a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_4/graph_hash.txt +++ b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_4/graph_hash.txt @@ -1 +1 @@ -56b33ce8013681e5774a98f1aef660acf3db6206cf15004487eb3e77248d98b0 \ No newline at end of file +534511a45272e1a9267697d5f10a3014cf1cc0c049585f9685655501ff68f0b9 \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_5/graph_hash.txt b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_5/graph_hash.txt index 65c76e393..7d70180d4 100644 --- a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_5/graph_hash.txt +++ b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_5/graph_hash.txt @@ -1 +1 @@ -8f0135cbbe42b981c8797a64fd4a5ae056bd1fb7b99b39eaa845c7b47dd391d9 \ No newline at end of file +6a8e990486f4d85b4371bedc5cecd294f81764cd153a3e25085e136f63ec707c \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_6/graph_hash.txt b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_6/graph_hash.txt index 67621f064..951232222 100644 --- a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_6/graph_hash.txt +++ b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_6/graph_hash.txt @@ -1 +1 @@ -17ecceda6c26b3eed13c84f7e0875457f7c0022056dc13a8ed61c7b6e215b286 \ No newline at end of file +7f6edfb359b9bae12be4cef48e9822a33ba3dfa983349265b87f2f8e000679cb \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_7/graph_hash.txt b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_7/graph_hash.txt index 1dc0e3cd9..6fe7297b8 100644 --- a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_7/graph_hash.txt +++ b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_7/graph_hash.txt @@ -1 +1 @@ -1ebfa0731cd404fc0d11b70f0637266da6c267aa7f705df9df9bd0ea79785b39 \ No newline at end of file +8b11cddc56e8bf2fc7551237b756b7f8f8a4e9dd2f556be8d328af948ebf41da \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_8/graph_hash.txt b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_8/graph_hash.txt index c231ef3a7..1ba099c96 100644 --- a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_8/graph_hash.txt +++ b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_8/graph_hash.txt @@ -1 +1 @@ -152334c04694bdeb79b77702f66c2b2b0bf6070104e39ed497a16b2b7c9bf19f \ No newline at end of file +a1e871dca6015fd870e153211f3cd48512ab629d616889d648d8f93c88df3e51 \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_9/graph_hash.txt b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_9/graph_hash.txt index f7e774060..f1aa15364 100644 --- a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_9/graph_hash.txt +++ b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_9/graph_hash.txt @@ -1 +1 @@ -b3c06a5ff7d63f2fddf054f6dc5423b30986582d3ae6be2b7b5c8465e31f6fac \ No newline at end of file +09f7308dca33192a680fa6e253963eb73d874927eb1eaddbb7fe31eeed376574 \ No newline at end of file diff --git a/paddle_samples/PaddleX/PatchTST/subgraph_0/graph_hash.txt b/paddle_samples/PaddleX/PatchTST/subgraph_0/graph_hash.txt index 464aeeca2..cd9c38b84 100644 --- a/paddle_samples/PaddleX/PatchTST/subgraph_0/graph_hash.txt +++ b/paddle_samples/PaddleX/PatchTST/subgraph_0/graph_hash.txt @@ -1 +1 @@ -e1e0371195b2a94d158a1a07934b265244eb4ce9cd08c18587a5fb3ce7f74a85 \ No newline at end of file +d22f6e45f20477954b8893f9897e6a73edefa07c03f82daabc3176a17e043c16 \ No newline at end of file diff --git a/paddle_samples/PaddleX/PatchTST/subgraph_1/graph_hash.txt b/paddle_samples/PaddleX/PatchTST/subgraph_1/graph_hash.txt index 7ff57df72..f04924629 100644 --- a/paddle_samples/PaddleX/PatchTST/subgraph_1/graph_hash.txt +++ b/paddle_samples/PaddleX/PatchTST/subgraph_1/graph_hash.txt @@ -1 +1 @@ -7aac90d8da748b2edbe77b7967197c36163b379a5bf8b6d41ef58f2fdfea01aa \ No newline at end of file +b5d8f356d03ab71652d28162f048c78b30cfa50fb83f81a7629127cabbf05fd8 \ No newline at end of file diff --git a/paddle_samples/PaddleX/PatchTST/subgraph_2/graph_hash.txt b/paddle_samples/PaddleX/PatchTST/subgraph_2/graph_hash.txt index 154b49b45..9c7eb4ccc 100644 --- a/paddle_samples/PaddleX/PatchTST/subgraph_2/graph_hash.txt +++ b/paddle_samples/PaddleX/PatchTST/subgraph_2/graph_hash.txt @@ -1 +1 @@ -a2029554f44dc1aee5d4e556e98186dfa3d9ea8e0f4455251a25fe686e852fe9 \ No newline at end of file +8126faf6ed1764908007465a9ad7518174786b9b3d6b22e5d490414047d43b4c \ No newline at end of file diff --git a/paddle_samples/PaddleX/PatchTST_ad/subgraph_0/graph_hash.txt b/paddle_samples/PaddleX/PatchTST_ad/subgraph_0/graph_hash.txt index c887be3c1..c480fa3bd 100644 --- a/paddle_samples/PaddleX/PatchTST_ad/subgraph_0/graph_hash.txt +++ b/paddle_samples/PaddleX/PatchTST_ad/subgraph_0/graph_hash.txt @@ -1 +1 @@ -37ca7794364aa4f02a2dc0f4c8c6692d10b3f8b4f536dca0a075231246c26d22 \ No newline at end of file +f875cdca0fcd412df728b051624fbd9cd89c091bc45c4a00fdecad0dd8099832 \ No newline at end of file diff --git a/paddle_samples/PaddleX/PatchTST_ad/subgraph_1/graph_hash.txt b/paddle_samples/PaddleX/PatchTST_ad/subgraph_1/graph_hash.txt index 1afe342c3..a8b3ac0a3 100644 --- a/paddle_samples/PaddleX/PatchTST_ad/subgraph_1/graph_hash.txt +++ b/paddle_samples/PaddleX/PatchTST_ad/subgraph_1/graph_hash.txt @@ -1 +1 @@ -7a2368a57e593a87aa49ed083e7949b776ab2239f208e8db9506781c813ba826 \ No newline at end of file +1f66d3723c0800305bfab9b3d34300cf3fdd8c6566a778078f29bc6201f1daad \ No newline at end of file diff --git a/paddle_samples/PaddleX/PatchTST_ad/subgraph_2/graph_hash.txt b/paddle_samples/PaddleX/PatchTST_ad/subgraph_2/graph_hash.txt index 0a64dca4c..26da0d227 100644 --- a/paddle_samples/PaddleX/PatchTST_ad/subgraph_2/graph_hash.txt +++ b/paddle_samples/PaddleX/PatchTST_ad/subgraph_2/graph_hash.txt @@ -1 +1 @@ -d227b51e80ff54f666749cb008097e3698aeba36b766d220b436ead91a143700 \ No newline at end of file +1bc56c5eafcb3a9455e13e1064cdd089379eb1d5b1ed7fd271a212eee8a79617 \ No newline at end of file diff --git a/paddle_samples/PaddleX/RLinear/graph_hash.txt b/paddle_samples/PaddleX/RLinear/graph_hash.txt index 00309b1b7..50a64ec1b 100644 --- a/paddle_samples/PaddleX/RLinear/graph_hash.txt +++ b/paddle_samples/PaddleX/RLinear/graph_hash.txt @@ -1 +1 @@ -00ae44091843c2a9956b30b205ce3391d40303ff99f31f0474beb42f37a083d1 \ No newline at end of file +c0ebf9b3287cd7fc40e32d0853552d5800ec5d14059de0831a17c28dc82ab39b \ No newline at end of file diff --git a/paddle_samples/PaddleX/ResNet101/graph_hash.txt b/paddle_samples/PaddleX/ResNet101/graph_hash.txt index e57f7f114..c55532a10 100644 --- a/paddle_samples/PaddleX/ResNet101/graph_hash.txt +++ b/paddle_samples/PaddleX/ResNet101/graph_hash.txt @@ -1 +1 @@ -861fcf0fd314d9bb1a29c6ef4547d599e0bc9e683e8c48a56a4c6ad963307ee5 \ No newline at end of file +0a180e097d236cf7b5945267431964cce795157ef5e9061cbfe492f08e103e35 \ No newline at end of file diff --git a/paddle_samples/PaddleX/ResNet101_vd/graph_hash.txt b/paddle_samples/PaddleX/ResNet101_vd/graph_hash.txt index ec24f0d21..dc55e1a7a 100644 --- a/paddle_samples/PaddleX/ResNet101_vd/graph_hash.txt +++ b/paddle_samples/PaddleX/ResNet101_vd/graph_hash.txt @@ -1 +1 @@ -8eaf57cd4b855a0c9e066b3be66d80f01782f388309399a74f8bf8925d5644f0 \ No newline at end of file +fa8a895a6b9cb4c64a7378bc53b10c539560b28e7ca9529ee290e254f626881f \ No newline at end of file diff --git a/paddle_samples/PaddleX/ResNet152/graph_hash.txt b/paddle_samples/PaddleX/ResNet152/graph_hash.txt index 46ce4c5cc..a307ace5e 100644 --- a/paddle_samples/PaddleX/ResNet152/graph_hash.txt +++ b/paddle_samples/PaddleX/ResNet152/graph_hash.txt @@ -1 +1 @@ -d97fc8703002b45bd62a3f503c00736b155105228445621f9f70d041e7bf894c \ No newline at end of file +11913d99bd2de706eef048fb18047f49efc806767e791e9b42f909eb55b28230 \ No newline at end of file diff --git a/paddle_samples/PaddleX/ResNet152_vd/graph_hash.txt b/paddle_samples/PaddleX/ResNet152_vd/graph_hash.txt index f141ad2dc..1c72b70dc 100644 --- a/paddle_samples/PaddleX/ResNet152_vd/graph_hash.txt +++ b/paddle_samples/PaddleX/ResNet152_vd/graph_hash.txt @@ -1 +1 @@ -df830c493212bc2feafb4980c28a563351a14e993533503ccbbbc1f85ad7c4fb \ No newline at end of file +e8a35930dbf04a41c4c426ff3d613afa69a9efcae1183e12c790181b67badccd \ No newline at end of file diff --git a/paddle_samples/PaddleX/ResNet18/graph_hash.txt b/paddle_samples/PaddleX/ResNet18/graph_hash.txt index 8c26fb98a..d4256c02e 100644 --- a/paddle_samples/PaddleX/ResNet18/graph_hash.txt +++ b/paddle_samples/PaddleX/ResNet18/graph_hash.txt @@ -1 +1 @@ -718a985e00d74c398626c25cb235d95fbd8fded091a7f4940a6052e6b8923c8e \ No newline at end of file +043ae71c1f4c90265b53651a5a68c23877fb34cfd470a85ef9644bd50beb249c \ No newline at end of file diff --git a/paddle_samples/PaddleX/ResNet18_vd/graph_hash.txt b/paddle_samples/PaddleX/ResNet18_vd/graph_hash.txt index ce10abe5a..560fc1d36 100644 --- a/paddle_samples/PaddleX/ResNet18_vd/graph_hash.txt +++ b/paddle_samples/PaddleX/ResNet18_vd/graph_hash.txt @@ -1 +1 @@ -16a68c3f7b76d0eb913ce014a2a3be0dac329273d7148230beb55424db40e83a \ No newline at end of file +54720c584c594ee340ae3710142aa98a7a9312b0d888b05305a0c1fd7049f934 \ No newline at end of file diff --git a/paddle_samples/PaddleX/ResNet200_vd/graph_hash.txt b/paddle_samples/PaddleX/ResNet200_vd/graph_hash.txt index 4e087680f..ba21a4941 100644 --- a/paddle_samples/PaddleX/ResNet200_vd/graph_hash.txt +++ b/paddle_samples/PaddleX/ResNet200_vd/graph_hash.txt @@ -1 +1 @@ -5e9fe71ff09eb3f20c970fef04f6ea2b7b824d800cbe150498a95636562aa711 \ No newline at end of file +899efb1fce28b9c3c7b26c1723d6dcc1fc6f0d79ca285c93114418833b1638fd \ No newline at end of file diff --git a/paddle_samples/PaddleX/ResNet34/graph_hash.txt b/paddle_samples/PaddleX/ResNet34/graph_hash.txt index 297e6dd55..7d836e8d1 100644 --- a/paddle_samples/PaddleX/ResNet34/graph_hash.txt +++ b/paddle_samples/PaddleX/ResNet34/graph_hash.txt @@ -1 +1 @@ -eb5cdcbfdaadda41aa63bb20da163b569cef0715d8b17da330351de575ee1dd4 \ No newline at end of file +bde227a888748f9d572427136dc3f329659268f50db89d5d01a92d34c91f1647 \ No newline at end of file diff --git a/paddle_samples/PaddleX/ResNet34_vd/graph_hash.txt b/paddle_samples/PaddleX/ResNet34_vd/graph_hash.txt index c2cd5a7c0..ffc58cdda 100644 --- a/paddle_samples/PaddleX/ResNet34_vd/graph_hash.txt +++ b/paddle_samples/PaddleX/ResNet34_vd/graph_hash.txt @@ -1 +1 @@ -6845da19452a2cee7d3109f21c4683a11c88415912e404534eb27280f0912de1 \ No newline at end of file +677c3d978c282cc69db2177cab8e7692c68a62dbe36735bc9ad57fb1eb7549c3 \ No newline at end of file diff --git a/paddle_samples/PaddleX/ResNet50/graph_hash.txt b/paddle_samples/PaddleX/ResNet50/graph_hash.txt index e2f2ae9f7..924772358 100644 --- a/paddle_samples/PaddleX/ResNet50/graph_hash.txt +++ b/paddle_samples/PaddleX/ResNet50/graph_hash.txt @@ -1 +1 @@ -4f97aba6c36e8edba9fbc83b6a29472b483b8fe5b821ff5a203dbbfc8bcad2dc \ No newline at end of file +6622b9ebdf8fc13340f4e83c19fabf8a63ab4847988dcfd2153f143853f4f62d \ No newline at end of file diff --git a/paddle_samples/PaddleX/ResNet50_ML/subgraph_0/graph_hash.txt b/paddle_samples/PaddleX/ResNet50_ML/subgraph_0/graph_hash.txt index 84e3cc9d6..12039a34e 100644 --- a/paddle_samples/PaddleX/ResNet50_ML/subgraph_0/graph_hash.txt +++ b/paddle_samples/PaddleX/ResNet50_ML/subgraph_0/graph_hash.txt @@ -1 +1 @@ -0a6d36b8da86093a4bba62659b922d51e4c77ecf5cda0fdd6d20188705c3424a \ No newline at end of file +d71d94f4374d84aca659dcfc110007bfaffaabb3c722665e2bf8140f6376828d \ No newline at end of file diff --git a/paddle_samples/PaddleX/ResNet50_ML/subgraph_1/graph_hash.txt b/paddle_samples/PaddleX/ResNet50_ML/subgraph_1/graph_hash.txt index ff44604f4..d12ff4288 100644 --- a/paddle_samples/PaddleX/ResNet50_ML/subgraph_1/graph_hash.txt +++ b/paddle_samples/PaddleX/ResNet50_ML/subgraph_1/graph_hash.txt @@ -1 +1 @@ -3cb088ac381c9d3059886468e9ec26bacaac3a0f97ba32e32f862e9f5dcdcb0b \ No newline at end of file +7f05641d0d4a560d1753f8dd02bf7112a72fd46ddfefae110fb287ecdd957140 \ No newline at end of file diff --git a/paddle_samples/PaddleX/ResNet50_face/subgraph_0/graph_hash.txt b/paddle_samples/PaddleX/ResNet50_face/subgraph_0/graph_hash.txt index 7258dad10..108f049c5 100644 --- a/paddle_samples/PaddleX/ResNet50_face/subgraph_0/graph_hash.txt +++ b/paddle_samples/PaddleX/ResNet50_face/subgraph_0/graph_hash.txt @@ -1 +1 @@ -b0ce0a8a07430a2ef3abb2d35f1a11a77f909a966f8e7efdba1bb3ad6226ae00 \ No newline at end of file +7cbe8c82ce3f733dc5bda4cb05aae2238a5be04a950122dbff8cdfbb4e13a691 \ No newline at end of file diff --git a/paddle_samples/PaddleX/ResNet50_face/subgraph_1/graph_hash.txt b/paddle_samples/PaddleX/ResNet50_face/subgraph_1/graph_hash.txt index 51e241382..cc3d2b67b 100644 --- a/paddle_samples/PaddleX/ResNet50_face/subgraph_1/graph_hash.txt +++ b/paddle_samples/PaddleX/ResNet50_face/subgraph_1/graph_hash.txt @@ -1 +1 @@ -fe22c5195c53c70abb8f7c3e422ef7016d7a92355ac552477ed17100e9b1ff55 \ No newline at end of file +77f30644af7b2e81874bc6b58d5614f2a616185a8c93328bd7bdad62caa330b1 \ No newline at end of file diff --git a/paddle_samples/PaddleX/ResNet50_face/subgraph_2/graph_hash.txt b/paddle_samples/PaddleX/ResNet50_face/subgraph_2/graph_hash.txt index 403d2b9d3..dc5fe2dfc 100644 --- a/paddle_samples/PaddleX/ResNet50_face/subgraph_2/graph_hash.txt +++ b/paddle_samples/PaddleX/ResNet50_face/subgraph_2/graph_hash.txt @@ -1 +1 @@ -26cb268c001f2b8364d2f8c57f2a655b4a15a40055c0501e73a0ee8dceeaa405 \ No newline at end of file +06f4b4eeb7d3944892ea0c1b991bdce35deb923f7748d21aa16ff01ecdd48289 \ No newline at end of file diff --git a/paddle_samples/PaddleX/ResNet50_vd/graph_hash.txt b/paddle_samples/PaddleX/ResNet50_vd/graph_hash.txt index 621d8e451..b8c94115c 100644 --- a/paddle_samples/PaddleX/ResNet50_vd/graph_hash.txt +++ b/paddle_samples/PaddleX/ResNet50_vd/graph_hash.txt @@ -1 +1 @@ -42ef9b59d8cfff69974dcb01b0938bf9cb614b82cd11efb35e8901e9a834085f \ No newline at end of file +b08fd4436391acb5a4dc1566a4d527ba0e88c6a9f58cf58cd578b700e22a5c9a \ No newline at end of file diff --git a/paddle_samples/PaddleX/SLANet/subgraph_0/graph_hash.txt b/paddle_samples/PaddleX/SLANet/subgraph_0/graph_hash.txt index a4c380396..7d84964a9 100644 --- a/paddle_samples/PaddleX/SLANet/subgraph_0/graph_hash.txt +++ b/paddle_samples/PaddleX/SLANet/subgraph_0/graph_hash.txt @@ -1 +1 @@ -4ffe5b40729219374931516c52b52e0c8714a5a418f8b1d1c0c861778521a3fe \ No newline at end of file +d7927565ab066e37161292b524b45d046e874cfce6ceacd755537cdde306f2bf \ No newline at end of file diff --git a/paddle_samples/PaddleX/SLANet/subgraph_1/graph_hash.txt b/paddle_samples/PaddleX/SLANet/subgraph_1/graph_hash.txt index 5b411d41b..5469a670e 100644 --- a/paddle_samples/PaddleX/SLANet/subgraph_1/graph_hash.txt +++ b/paddle_samples/PaddleX/SLANet/subgraph_1/graph_hash.txt @@ -1 +1 @@ -c01eafa2193cc7a7f921595f241939ece307a51df8a08752715580b9baffe950 \ No newline at end of file +770c75099b3a9a2fb08f2934dccda1a3d26c2866c986730d9a21cb34a1ffbe10 \ No newline at end of file diff --git a/paddle_samples/PaddleX/SLANet/subgraph_2/graph_hash.txt b/paddle_samples/PaddleX/SLANet/subgraph_2/graph_hash.txt index 5db56d3c5..dca653fe9 100644 --- a/paddle_samples/PaddleX/SLANet/subgraph_2/graph_hash.txt +++ b/paddle_samples/PaddleX/SLANet/subgraph_2/graph_hash.txt @@ -1 +1 @@ -a26859fdc73ab9c62449dc1fdf50ab5b4e7bb5b758df8d8c9c3b75a9874b4e75 \ No newline at end of file +d79f732996df8eec8f76cd6b6079521d8a81fd2593c5b4872d3cdbf0f2e2766c \ No newline at end of file diff --git a/paddle_samples/PaddleX/SOLOv2/subgraph_0/graph_hash.txt b/paddle_samples/PaddleX/SOLOv2/subgraph_0/graph_hash.txt index b9ddde7cd..d119e2a48 100644 --- a/paddle_samples/PaddleX/SOLOv2/subgraph_0/graph_hash.txt +++ b/paddle_samples/PaddleX/SOLOv2/subgraph_0/graph_hash.txt @@ -1 +1 @@ -6dde9b3999847b971fd8655181659e46538a8e2213a97717d67b09d9fdacdcd0 \ No newline at end of file +72cc96d6940f94ee0d60c4725e0c0d91026fb73ff64cf5dd367c7f45263bce0e \ No newline at end of file diff --git a/paddle_samples/PaddleX/SOLOv2/subgraph_1/graph_hash.txt b/paddle_samples/PaddleX/SOLOv2/subgraph_1/graph_hash.txt index ab134e425..d8543b708 100644 --- a/paddle_samples/PaddleX/SOLOv2/subgraph_1/graph_hash.txt +++ b/paddle_samples/PaddleX/SOLOv2/subgraph_1/graph_hash.txt @@ -1 +1 @@ -e5b28d9563e0c8c0b6c97c531d1648c40841384f325ca7d47b87502f67952c8e \ No newline at end of file +e0a9c8f47150c7f95309893d60bda0901d45e6cb361ed0fe67dfd21553a072a1 \ No newline at end of file diff --git a/paddle_samples/PaddleX/SOLOv2/subgraph_2/graph_hash.txt b/paddle_samples/PaddleX/SOLOv2/subgraph_2/graph_hash.txt index d65f184ba..4c2baddd8 100644 --- a/paddle_samples/PaddleX/SOLOv2/subgraph_2/graph_hash.txt +++ b/paddle_samples/PaddleX/SOLOv2/subgraph_2/graph_hash.txt @@ -1 +1 @@ -2e6d0097251e1dd497d96c82ea9bff6c91bfae2fb5d733a187b0334611f12364 \ No newline at end of file +84ba7e1d7c567111c8854836d139b4604bf49c153a61df699935a7264379cbf4 \ No newline at end of file diff --git a/paddle_samples/PaddleX/SOLOv2/subgraph_3/graph_hash.txt b/paddle_samples/PaddleX/SOLOv2/subgraph_3/graph_hash.txt index 13132b8ae..d773d0cc6 100644 --- a/paddle_samples/PaddleX/SOLOv2/subgraph_3/graph_hash.txt +++ b/paddle_samples/PaddleX/SOLOv2/subgraph_3/graph_hash.txt @@ -1 +1 @@ -1ea9f83c910201b9c22d32433c75a7d47d1a11f15bc12245cf2788eeee5d17d5 \ No newline at end of file +142da3bc3470c98073f674f7f2767dfbb0e4bade7acabc0662182afdf6aba707 \ No newline at end of file diff --git a/paddle_samples/PaddleX/SOLOv2/subgraph_4/graph_hash.txt b/paddle_samples/PaddleX/SOLOv2/subgraph_4/graph_hash.txt index 3366fc57d..091cabef4 100644 --- a/paddle_samples/PaddleX/SOLOv2/subgraph_4/graph_hash.txt +++ b/paddle_samples/PaddleX/SOLOv2/subgraph_4/graph_hash.txt @@ -1 +1 @@ -5f4b39704b823bc0571121283ed212cdeb399e5816fcf0142365b58bbdf77174 \ No newline at end of file +db226e36dcb1536b05ab15b0c103cc9acb829c03c531bc5dc49ccebf03e47cbf \ No newline at end of file diff --git a/paddle_samples/PaddleX/SOLOv2/subgraph_5/graph_hash.txt b/paddle_samples/PaddleX/SOLOv2/subgraph_5/graph_hash.txt index 3d610d869..4ce973be5 100644 --- a/paddle_samples/PaddleX/SOLOv2/subgraph_5/graph_hash.txt +++ b/paddle_samples/PaddleX/SOLOv2/subgraph_5/graph_hash.txt @@ -1 +1 @@ -892a21687443236e06b6ff04746d6c0d8d5387b64dd201e8784c838088358399 \ No newline at end of file +1d5b7ab19389dfc9f8d974af13fb2b33369dd88ad51a6a13286b81889c851949 \ No newline at end of file diff --git a/paddle_samples/PaddleX/SOLOv2/subgraph_6/graph_hash.txt b/paddle_samples/PaddleX/SOLOv2/subgraph_6/graph_hash.txt index f0746bdb3..2ad639f52 100644 --- a/paddle_samples/PaddleX/SOLOv2/subgraph_6/graph_hash.txt +++ b/paddle_samples/PaddleX/SOLOv2/subgraph_6/graph_hash.txt @@ -1 +1 @@ -ceb8e72515b1bcba0a48a8ff9e1105f8f4babee8bb743def723b163543be41f5 \ No newline at end of file +52917e7ee86ee0db45af6f574fc2edf44299fe15e1b1d9881813080214fc3c8c \ No newline at end of file diff --git a/paddle_samples/PaddleX/SOLOv2/subgraph_7/graph_hash.txt b/paddle_samples/PaddleX/SOLOv2/subgraph_7/graph_hash.txt index 08841a320..dafa2b912 100644 --- a/paddle_samples/PaddleX/SOLOv2/subgraph_7/graph_hash.txt +++ b/paddle_samples/PaddleX/SOLOv2/subgraph_7/graph_hash.txt @@ -1 +1 @@ -44b0e5a0abd31e56b8f6b2ef348d91bd50489c26e6a903a4aaf04592b7055754 \ No newline at end of file +f5160012fd6e35de357abaa7d8ed12535c586eab67d5af6fd0d934a5adde5c5c \ No newline at end of file diff --git a/paddle_samples/PaddleX/SOLOv2/subgraph_8/graph_hash.txt b/paddle_samples/PaddleX/SOLOv2/subgraph_8/graph_hash.txt index 028cfcf1d..0c5caab00 100644 --- a/paddle_samples/PaddleX/SOLOv2/subgraph_8/graph_hash.txt +++ b/paddle_samples/PaddleX/SOLOv2/subgraph_8/graph_hash.txt @@ -1 +1 @@ -7beb3812b29fbb67389359ab98b6d74fea785189036c6a05cc32bf8079176365 \ No newline at end of file +cdca3017a1493c3fd28cd6848923f4e807516da9a692c8520308c750f33b3e9b \ No newline at end of file diff --git a/paddle_samples/PaddleX/SOLOv2/subgraph_9/graph_hash.txt b/paddle_samples/PaddleX/SOLOv2/subgraph_9/graph_hash.txt index 5734db492..6f346cf31 100644 --- a/paddle_samples/PaddleX/SOLOv2/subgraph_9/graph_hash.txt +++ b/paddle_samples/PaddleX/SOLOv2/subgraph_9/graph_hash.txt @@ -1 +1 @@ -874de3a15489cde34e66f862555ec31a7e4d9af002f657d235dcbb4266ad0395 \ No newline at end of file +ab6646f86486195826bfff7f1a4669548eb5376fe22a5a3cb92a5b7ca5dcf1de \ No newline at end of file diff --git a/paddle_samples/PaddleX/SeaFormer_base/subgraph_0/graph_hash.txt b/paddle_samples/PaddleX/SeaFormer_base/subgraph_0/graph_hash.txt index a77dbbd86..2502479a4 100644 --- a/paddle_samples/PaddleX/SeaFormer_base/subgraph_0/graph_hash.txt +++ b/paddle_samples/PaddleX/SeaFormer_base/subgraph_0/graph_hash.txt @@ -1 +1 @@ -fd6106f4e7439a497d04ab5f9e73165141ae29638efb12ccbc58c5115513f7bc \ No newline at end of file +1067dabd4938eab7590575b1ecd01611aca7cd6a17325b3c34bab20ed7ef1f71 \ No newline at end of file diff --git a/paddle_samples/PaddleX/SeaFormer_base/subgraph_1/graph_hash.txt b/paddle_samples/PaddleX/SeaFormer_base/subgraph_1/graph_hash.txt index dc99c422f..9ecbdc254 100644 --- a/paddle_samples/PaddleX/SeaFormer_base/subgraph_1/graph_hash.txt +++ b/paddle_samples/PaddleX/SeaFormer_base/subgraph_1/graph_hash.txt @@ -1 +1 @@ -9be921a1ba306123d1a6418757644b6c81d41fa2209ee4aee9af5a64a15372b9 \ No newline at end of file +97ac0b19ac9242ecab9f45105a70640599e96f9550b21b69c798a6539eafb9c9 \ No newline at end of file diff --git a/paddle_samples/PaddleX/SeaFormer_base/subgraph_2/graph_hash.txt b/paddle_samples/PaddleX/SeaFormer_base/subgraph_2/graph_hash.txt index 6cc7db39e..087b761a3 100644 --- a/paddle_samples/PaddleX/SeaFormer_base/subgraph_2/graph_hash.txt +++ b/paddle_samples/PaddleX/SeaFormer_base/subgraph_2/graph_hash.txt @@ -1 +1 @@ -4d4a37f369dd4afb3b2d31545ffe6f6afab7b848ac5508f9957ec00fe0bee0c2 \ No newline at end of file +e719714861d6ea1ff33bf61e4c916096b2c614d627b8a382b0b9f50b388b45f4 \ No newline at end of file diff --git a/paddle_samples/PaddleX/SeaFormer_large/subgraph_0/graph_hash.txt b/paddle_samples/PaddleX/SeaFormer_large/subgraph_0/graph_hash.txt index 30f8978e5..75412a8f0 100644 --- a/paddle_samples/PaddleX/SeaFormer_large/subgraph_0/graph_hash.txt +++ b/paddle_samples/PaddleX/SeaFormer_large/subgraph_0/graph_hash.txt @@ -1 +1 @@ -93d47441ae8993aa35f99c023d025138bf37a645e95601f0833186558b875149 \ No newline at end of file +a04d73e7ebcec909c7e829553727968e9634e8806e51afb46c45b176608ac3e4 \ No newline at end of file diff --git a/paddle_samples/PaddleX/SeaFormer_large/subgraph_1/graph_hash.txt b/paddle_samples/PaddleX/SeaFormer_large/subgraph_1/graph_hash.txt index c9abf5a08..8765bb19e 100644 --- a/paddle_samples/PaddleX/SeaFormer_large/subgraph_1/graph_hash.txt +++ b/paddle_samples/PaddleX/SeaFormer_large/subgraph_1/graph_hash.txt @@ -1 +1 @@ -f4da8c42bb54cf2b405a2ebf9f0cf77df7a14ee680ab45581b7d1dcccbb9445e \ No newline at end of file +a310e7c1a8ffe49816629fd81b5d2ac323d5edd1f0ffb1d6dbd1e70c8f76b024 \ No newline at end of file diff --git a/paddle_samples/PaddleX/SeaFormer_small/subgraph_0/graph_hash.txt b/paddle_samples/PaddleX/SeaFormer_small/subgraph_0/graph_hash.txt index ccbb9a598..cbeb06dab 100644 --- a/paddle_samples/PaddleX/SeaFormer_small/subgraph_0/graph_hash.txt +++ b/paddle_samples/PaddleX/SeaFormer_small/subgraph_0/graph_hash.txt @@ -1 +1 @@ -f4d9d8cb2f7436e347d20c8482a1a02f00062aee0eda83e799866ae4775941ad \ No newline at end of file +d9588e47542c65e9f908536fb19c6d2d4e188b13d875c4e577481f620a5943e2 \ No newline at end of file diff --git a/paddle_samples/PaddleX/SeaFormer_small/subgraph_1/graph_hash.txt b/paddle_samples/PaddleX/SeaFormer_small/subgraph_1/graph_hash.txt index b43c9dc01..72d2b010a 100644 --- a/paddle_samples/PaddleX/SeaFormer_small/subgraph_1/graph_hash.txt +++ b/paddle_samples/PaddleX/SeaFormer_small/subgraph_1/graph_hash.txt @@ -1 +1 @@ -f41341bd88a3ca39472f163d3dcf4d7bdf3479d22c42c7325f2e457de8981dd1 \ No newline at end of file +aeb2e0f376dd0f94845a0e25d9a0be1a0f08fda648ce8af9d59f0fe6f521c0c3 \ No newline at end of file diff --git a/paddle_samples/PaddleX/SeaFormer_tiny/subgraph_0/graph_hash.txt b/paddle_samples/PaddleX/SeaFormer_tiny/subgraph_0/graph_hash.txt index 50587876b..07d0cf379 100644 --- a/paddle_samples/PaddleX/SeaFormer_tiny/subgraph_0/graph_hash.txt +++ b/paddle_samples/PaddleX/SeaFormer_tiny/subgraph_0/graph_hash.txt @@ -1 +1 @@ -e79083edd74367af704b72494cb9b076a4b503f561effb6c622052b95e68014a \ No newline at end of file +ba4d2b5818059ff44837e84f01144cd9e249a44de9b208c9de5d27f2bd7128fb \ No newline at end of file diff --git a/paddle_samples/PaddleX/SeaFormer_tiny/subgraph_1/graph_hash.txt b/paddle_samples/PaddleX/SeaFormer_tiny/subgraph_1/graph_hash.txt index 9b9aa3beb..28855113d 100644 --- a/paddle_samples/PaddleX/SeaFormer_tiny/subgraph_1/graph_hash.txt +++ b/paddle_samples/PaddleX/SeaFormer_tiny/subgraph_1/graph_hash.txt @@ -1 +1 @@ -39deea3a098b28cec83f8b135066c04e72ff18bf3e328fbc2086cbac0cd95754 \ No newline at end of file +f598177bb249f2705ace24ec623704e367bc54afb7b40e6d4bca24f1f4786e4d \ No newline at end of file diff --git a/paddle_samples/PaddleX/SegFormer-B0/subgraph_0/graph_hash.txt b/paddle_samples/PaddleX/SegFormer-B0/subgraph_0/graph_hash.txt index d794a50c1..db0805e90 100644 --- a/paddle_samples/PaddleX/SegFormer-B0/subgraph_0/graph_hash.txt +++ b/paddle_samples/PaddleX/SegFormer-B0/subgraph_0/graph_hash.txt @@ -1 +1 @@ -41b61e2b6e61dc26f3750331f3cddfe6ade917d8d878b1f345103e74ad323857 \ No newline at end of file +48923788a20ed8afb315f157543a6935ef36c621728cbd06c4bda15d3501e943 \ No newline at end of file diff --git a/paddle_samples/PaddleX/SegFormer-B0/subgraph_1/graph_hash.txt b/paddle_samples/PaddleX/SegFormer-B0/subgraph_1/graph_hash.txt index bad06f704..3a504c00b 100644 --- a/paddle_samples/PaddleX/SegFormer-B0/subgraph_1/graph_hash.txt +++ b/paddle_samples/PaddleX/SegFormer-B0/subgraph_1/graph_hash.txt @@ -1 +1 @@ -bce2f5dc065bc3343af83282700b7898e7d8d05dc280ab0d04ef0c131a25ea25 \ No newline at end of file +b139846d185ce63dae1bc8fe0a9edcda77752eef6b8418150ebc42401f8e8a3a \ No newline at end of file diff --git a/paddle_samples/PaddleX/SegFormer-B1/subgraph_0/graph_hash.txt b/paddle_samples/PaddleX/SegFormer-B1/subgraph_0/graph_hash.txt index 00784c5c7..d7bef0d78 100644 --- a/paddle_samples/PaddleX/SegFormer-B1/subgraph_0/graph_hash.txt +++ b/paddle_samples/PaddleX/SegFormer-B1/subgraph_0/graph_hash.txt @@ -1 +1 @@ -c2f86ff02cd33ee2eefd90f49ed2cde10f2f17cba7b94694c79df2792ddd672e \ No newline at end of file +b72f8d4e1be4dbb9d5a6740338dbf59d3abe8a96ecac12a065d3dae044b706f8 \ No newline at end of file diff --git a/paddle_samples/PaddleX/SegFormer-B1/subgraph_1/graph_hash.txt b/paddle_samples/PaddleX/SegFormer-B1/subgraph_1/graph_hash.txt index 7b6a855ef..993959350 100644 --- a/paddle_samples/PaddleX/SegFormer-B1/subgraph_1/graph_hash.txt +++ b/paddle_samples/PaddleX/SegFormer-B1/subgraph_1/graph_hash.txt @@ -1 +1 @@ -5da448310b43c0808f7828ea38d0c93669d2fc109208786bc2c781020d3ef269 \ No newline at end of file +8d8042f2e48243e739fff1b94f673085cc62756189d03a26ee3387da94a39c2e \ No newline at end of file diff --git a/paddle_samples/PaddleX/SegFormer-B2/subgraph_0/graph_hash.txt b/paddle_samples/PaddleX/SegFormer-B2/subgraph_0/graph_hash.txt index 016cb6585..af49bedae 100644 --- a/paddle_samples/PaddleX/SegFormer-B2/subgraph_0/graph_hash.txt +++ b/paddle_samples/PaddleX/SegFormer-B2/subgraph_0/graph_hash.txt @@ -1 +1 @@ -f238990294174c0c3928f66a9fca90760effde9328d3d3346bd64b20aa16738f \ No newline at end of file +261a1d7c31245c63532541fb32e30d1219b1da58c55ff4cc8d101b86ffffc4e5 \ No newline at end of file diff --git a/paddle_samples/PaddleX/SegFormer-B2/subgraph_1/graph_hash.txt b/paddle_samples/PaddleX/SegFormer-B2/subgraph_1/graph_hash.txt index 2217b268a..97bf11596 100644 --- a/paddle_samples/PaddleX/SegFormer-B2/subgraph_1/graph_hash.txt +++ b/paddle_samples/PaddleX/SegFormer-B2/subgraph_1/graph_hash.txt @@ -1 +1 @@ -459f95be7068b19cb366cab07b09e437d6415b0cfffe85baa5b83a9c306828fa \ No newline at end of file +3276c5d04e1c97e867a4e560453ef401e176d8ccbf9db99bccb96adf83173ed6 \ No newline at end of file diff --git a/paddle_samples/PaddleX/SegFormer-B3/subgraph_0/graph_hash.txt b/paddle_samples/PaddleX/SegFormer-B3/subgraph_0/graph_hash.txt index ebc30b486..e1c4d0f2a 100644 --- a/paddle_samples/PaddleX/SegFormer-B3/subgraph_0/graph_hash.txt +++ b/paddle_samples/PaddleX/SegFormer-B3/subgraph_0/graph_hash.txt @@ -1 +1 @@ -c6e8c2e6d19572c82821c13f1c8797f325f65f7b89d3294e5e5133ae4f000281 \ No newline at end of file +2d9993f000babb1fb97642d1ec032efc66dd80ab1fd0f436a532e30bebdaf405 \ No newline at end of file diff --git a/paddle_samples/PaddleX/SegFormer-B3/subgraph_1/graph_hash.txt b/paddle_samples/PaddleX/SegFormer-B3/subgraph_1/graph_hash.txt index b05f3926c..deda4345d 100644 --- a/paddle_samples/PaddleX/SegFormer-B3/subgraph_1/graph_hash.txt +++ b/paddle_samples/PaddleX/SegFormer-B3/subgraph_1/graph_hash.txt @@ -1 +1 @@ -b551dd302cc23702ae470c8e8ff337a51e227acd0419513f589ec7a82e4fb0a4 \ No newline at end of file +3d24c05e90996089e7bf6f8dbdc6361956ee7b6e16ecf8d88925a7f791ee7227 \ No newline at end of file diff --git a/paddle_samples/PaddleX/SegFormer-B4/subgraph_0/graph_hash.txt b/paddle_samples/PaddleX/SegFormer-B4/subgraph_0/graph_hash.txt index 364b59998..a80b4c395 100644 --- a/paddle_samples/PaddleX/SegFormer-B4/subgraph_0/graph_hash.txt +++ b/paddle_samples/PaddleX/SegFormer-B4/subgraph_0/graph_hash.txt @@ -1 +1 @@ -d9fc7c374080de2c2cd1179ccb44fc51b9935528ded5bcb25a8161d4850ca273 \ No newline at end of file +c7c6d07680453d75c9c678f5de29e78dd3fb50fa79588a137b7820e7c7128c7d \ No newline at end of file diff --git a/paddle_samples/PaddleX/SegFormer-B4/subgraph_1/graph_hash.txt b/paddle_samples/PaddleX/SegFormer-B4/subgraph_1/graph_hash.txt index a1780da54..0e28a3998 100644 --- a/paddle_samples/PaddleX/SegFormer-B4/subgraph_1/graph_hash.txt +++ b/paddle_samples/PaddleX/SegFormer-B4/subgraph_1/graph_hash.txt @@ -1 +1 @@ -b1e803abeb83ebc3595285626343b2445d153441da09ad0ecedb874dacb95e2b \ No newline at end of file +f1f343035956eacfbaf51e8bd348f5546cbec19f2f6d34f98c8626ea42781cb5 \ No newline at end of file diff --git a/paddle_samples/PaddleX/SegFormer-B5/subgraph_0/graph_hash.txt b/paddle_samples/PaddleX/SegFormer-B5/subgraph_0/graph_hash.txt index 86ab7d62f..580d5b4ca 100644 --- a/paddle_samples/PaddleX/SegFormer-B5/subgraph_0/graph_hash.txt +++ b/paddle_samples/PaddleX/SegFormer-B5/subgraph_0/graph_hash.txt @@ -1 +1 @@ -e287998fdafff137754fc94038a2ef6eea0ba6a0db82ac9aa49db6d1737e2e18 \ No newline at end of file +72bde178f17685288cc916900393ae6581db5bc411467e74b79014c9ad217291 \ No newline at end of file diff --git a/paddle_samples/PaddleX/SegFormer-B5/subgraph_1/graph_hash.txt b/paddle_samples/PaddleX/SegFormer-B5/subgraph_1/graph_hash.txt index 9eaede4c9..886726eb1 100644 --- a/paddle_samples/PaddleX/SegFormer-B5/subgraph_1/graph_hash.txt +++ b/paddle_samples/PaddleX/SegFormer-B5/subgraph_1/graph_hash.txt @@ -1 +1 @@ -6300c6382ad66379093f3db2c7e56a27b15467b5c1ab447a7613e5fb5521b507 \ No newline at end of file +549d9247d21b9aec3be635495a71a26d573380753a7384fbca304e977ecf7017 \ No newline at end of file diff --git a/paddle_samples/PaddleX/StarNet-S1/graph_hash.txt b/paddle_samples/PaddleX/StarNet-S1/graph_hash.txt index 8a2187bad..f91c3907b 100644 --- a/paddle_samples/PaddleX/StarNet-S1/graph_hash.txt +++ b/paddle_samples/PaddleX/StarNet-S1/graph_hash.txt @@ -1 +1 @@ -7d97afb0237320d98a008103e62624b3b0b3aecebb4b31baaa605a38bde32580 \ No newline at end of file +60586340b7d7c9678c8970f54c84438c03791db2de93a1bba583fb7c53b12bca \ No newline at end of file diff --git a/paddle_samples/PaddleX/StarNet-S2/subgraph_0/graph_hash.txt b/paddle_samples/PaddleX/StarNet-S2/subgraph_0/graph_hash.txt index 4fd62d32f..f89074a24 100644 --- a/paddle_samples/PaddleX/StarNet-S2/subgraph_0/graph_hash.txt +++ b/paddle_samples/PaddleX/StarNet-S2/subgraph_0/graph_hash.txt @@ -1 +1 @@ -435764818fd69b1d324c5d6a8d1acb3e047c4a320e846dec35e43499369e2567 \ No newline at end of file +afe19b9b4581d341936394569c681fbf622f2ad0fed94f63b7192afe029ee3d2 \ No newline at end of file diff --git a/paddle_samples/PaddleX/StarNet-S2/subgraph_1/graph_hash.txt b/paddle_samples/PaddleX/StarNet-S2/subgraph_1/graph_hash.txt index 95270473f..9b86f3657 100644 --- a/paddle_samples/PaddleX/StarNet-S2/subgraph_1/graph_hash.txt +++ b/paddle_samples/PaddleX/StarNet-S2/subgraph_1/graph_hash.txt @@ -1 +1 @@ -cbf7413df10f6155866852ab9810b23d9d00f17955b9916b13d07809873bceba \ No newline at end of file +ec7b854fc898e2cf308ce551870a21f08fffd9f5f6efda246fba85337ea68169 \ No newline at end of file diff --git a/paddle_samples/PaddleX/StarNet-S2/subgraph_2/graph_hash.txt b/paddle_samples/PaddleX/StarNet-S2/subgraph_2/graph_hash.txt index af976afab..53db4c023 100644 --- a/paddle_samples/PaddleX/StarNet-S2/subgraph_2/graph_hash.txt +++ b/paddle_samples/PaddleX/StarNet-S2/subgraph_2/graph_hash.txt @@ -1 +1 @@ -00b859db9f361676efed0eaa7e2ff4885c1ec381eac48bafcb3afd466cc966db \ No newline at end of file +e98100c3e17f2d267239c8d7591f2d547c4d21365f0811113c2d3589060b5684 \ No newline at end of file diff --git a/paddle_samples/PaddleX/StarNet-S3/subgraph_0/graph_hash.txt b/paddle_samples/PaddleX/StarNet-S3/subgraph_0/graph_hash.txt index 8fa82c1c8..edeb5f381 100644 --- a/paddle_samples/PaddleX/StarNet-S3/subgraph_0/graph_hash.txt +++ b/paddle_samples/PaddleX/StarNet-S3/subgraph_0/graph_hash.txt @@ -1 +1 @@ -54cabb369f01ef8bd7db42597f42a9fa6a354d00d0f8b845cf50f7bda960e6e0 \ No newline at end of file +616309c8cfbbf8c70bee7c5c46b85d2950611ee97124d271c66b57e43dd6d61a \ No newline at end of file diff --git a/paddle_samples/PaddleX/StarNet-S3/subgraph_1/graph_hash.txt b/paddle_samples/PaddleX/StarNet-S3/subgraph_1/graph_hash.txt index bec76fba3..1b1ff58c8 100644 --- a/paddle_samples/PaddleX/StarNet-S3/subgraph_1/graph_hash.txt +++ b/paddle_samples/PaddleX/StarNet-S3/subgraph_1/graph_hash.txt @@ -1 +1 @@ -291f2770a22e5bd163049ad0fe010ab3c7db528577d826a76cbb58b5608578cb \ No newline at end of file +be03b84f544a5bc1eb962e37bcec52724f11a9511ce02cd584e890932195ceed \ No newline at end of file diff --git a/paddle_samples/PaddleX/StarNet-S3/subgraph_2/graph_hash.txt b/paddle_samples/PaddleX/StarNet-S3/subgraph_2/graph_hash.txt index c4a9bfcac..cd599e869 100644 --- a/paddle_samples/PaddleX/StarNet-S3/subgraph_2/graph_hash.txt +++ b/paddle_samples/PaddleX/StarNet-S3/subgraph_2/graph_hash.txt @@ -1 +1 @@ -6dffb85ca98569840254be0b3b0b5e685296e2a732f0c3971f8c3ddc65fbf62b \ No newline at end of file +f17897bfcaaa1c1dc8ce458803edb1e36c5f5a53b60a7cb01ba230936ce1fe15 \ No newline at end of file diff --git a/paddle_samples/PaddleX/StarNet-S4/subgraph_0/graph_hash.txt b/paddle_samples/PaddleX/StarNet-S4/subgraph_0/graph_hash.txt index cbb97cd82..ba1cfba30 100644 --- a/paddle_samples/PaddleX/StarNet-S4/subgraph_0/graph_hash.txt +++ b/paddle_samples/PaddleX/StarNet-S4/subgraph_0/graph_hash.txt @@ -1 +1 @@ -bff6640988300fcf7c13c5520dd49da976dad77f2dedc3bc2236aa7e69f2608a \ No newline at end of file +0d69372b00f79e5a74cadf73facabc82b7ec7981b0b6b3e91cc5da525cac532e \ No newline at end of file diff --git a/paddle_samples/PaddleX/StarNet-S4/subgraph_1/graph_hash.txt b/paddle_samples/PaddleX/StarNet-S4/subgraph_1/graph_hash.txt index 0c892470e..d9f068215 100644 --- a/paddle_samples/PaddleX/StarNet-S4/subgraph_1/graph_hash.txt +++ b/paddle_samples/PaddleX/StarNet-S4/subgraph_1/graph_hash.txt @@ -1 +1 @@ -6e916c650d7d573b8b68325f03a26f49a705899af7b7910a68221316cbfa2017 \ No newline at end of file +8e7dc6783b5e54f01d6bf3c6546a4c21bfdb84c17ec6a6e73dcdf6b894ee8616 \ No newline at end of file diff --git a/paddle_samples/PaddleX/StarNet-S4/subgraph_2/graph_hash.txt b/paddle_samples/PaddleX/StarNet-S4/subgraph_2/graph_hash.txt index 9ccc245a6..483209cf5 100644 --- a/paddle_samples/PaddleX/StarNet-S4/subgraph_2/graph_hash.txt +++ b/paddle_samples/PaddleX/StarNet-S4/subgraph_2/graph_hash.txt @@ -1 +1 @@ -e23fe8baafa81188aafa422371051603f7d4879a66aa3bcf11cebff7cd14b898 \ No newline at end of file +ca7d25eb412328f2ea36dad988baaa0ffd64e715bcc2026e93c9d86b8c677c1a \ No newline at end of file diff --git a/paddle_samples/PaddleX/SwinTransformer_base_patch4_window12_384/subgraph_0/graph_hash.txt b/paddle_samples/PaddleX/SwinTransformer_base_patch4_window12_384/subgraph_0/graph_hash.txt index 294c36e2e..df893fea6 100644 --- a/paddle_samples/PaddleX/SwinTransformer_base_patch4_window12_384/subgraph_0/graph_hash.txt +++ b/paddle_samples/PaddleX/SwinTransformer_base_patch4_window12_384/subgraph_0/graph_hash.txt @@ -1 +1 @@ -713ae42e028505aa137066826046c857a7ba629e82740de8aff3d1fba19d4833 \ No newline at end of file +6952393c0f1ee089485187037901e0336f7dd38eeca9e679cf2bc8ae2fcab4c7 \ No newline at end of file diff --git a/paddle_samples/PaddleX/SwinTransformer_base_patch4_window12_384/subgraph_1/graph_hash.txt b/paddle_samples/PaddleX/SwinTransformer_base_patch4_window12_384/subgraph_1/graph_hash.txt index d629fd861..86bba9559 100644 --- a/paddle_samples/PaddleX/SwinTransformer_base_patch4_window12_384/subgraph_1/graph_hash.txt +++ b/paddle_samples/PaddleX/SwinTransformer_base_patch4_window12_384/subgraph_1/graph_hash.txt @@ -1 +1 @@ -f7d8df1d25958a60386f54dd6cff2e27036aba901a23a0a3eb0be36321ddb754 \ No newline at end of file +1f6bbae8c83ff38a65599ef25ecbde51a5007f0531edb0ba83ebdc9236a0ee95 \ No newline at end of file diff --git a/paddle_samples/PaddleX/SwinTransformer_base_patch4_window7_224/subgraph_0/graph_hash.txt b/paddle_samples/PaddleX/SwinTransformer_base_patch4_window7_224/subgraph_0/graph_hash.txt index a566034e9..39c5f5b04 100644 --- a/paddle_samples/PaddleX/SwinTransformer_base_patch4_window7_224/subgraph_0/graph_hash.txt +++ b/paddle_samples/PaddleX/SwinTransformer_base_patch4_window7_224/subgraph_0/graph_hash.txt @@ -1 +1 @@ -9ce3dcbdc2cb6c36cf3d0f7e745bbd0d5dffcfba1e45d72de12006ef37b15d70 \ No newline at end of file +21ae1900f88b310641cc2b429911dc637125d04e9b78090650a2f061253f1b63 \ No newline at end of file diff --git a/paddle_samples/PaddleX/SwinTransformer_base_patch4_window7_224/subgraph_1/graph_hash.txt b/paddle_samples/PaddleX/SwinTransformer_base_patch4_window7_224/subgraph_1/graph_hash.txt index dc3bf341c..5d090b5bd 100644 --- a/paddle_samples/PaddleX/SwinTransformer_base_patch4_window7_224/subgraph_1/graph_hash.txt +++ b/paddle_samples/PaddleX/SwinTransformer_base_patch4_window7_224/subgraph_1/graph_hash.txt @@ -1 +1 @@ -6f1cba512207e544d17f92aeffee7036593b9505a965efa4859efece003fe113 \ No newline at end of file +8d0861ecc85977e49e28b7b596cc17ba4e42a68e889d179010794825ebea8b67 \ No newline at end of file diff --git a/paddle_samples/PaddleX/SwinTransformer_base_patch4_window7_224/subgraph_2/graph_hash.txt b/paddle_samples/PaddleX/SwinTransformer_base_patch4_window7_224/subgraph_2/graph_hash.txt index 90ba66d81..2ffe56788 100644 --- a/paddle_samples/PaddleX/SwinTransformer_base_patch4_window7_224/subgraph_2/graph_hash.txt +++ b/paddle_samples/PaddleX/SwinTransformer_base_patch4_window7_224/subgraph_2/graph_hash.txt @@ -1 +1 @@ -856ee44336c1a3c840297d3f81b53e6aa72fa7d17a5b910f2a193ca9c465cd2b \ No newline at end of file +b4868b697d94210fafe71029a6a4dc9a327d4dfdb332602296732544eafe5e07 \ No newline at end of file diff --git a/paddle_samples/PaddleX/SwinTransformer_large_patch4_window12_384/subgraph_0/graph_hash.txt b/paddle_samples/PaddleX/SwinTransformer_large_patch4_window12_384/subgraph_0/graph_hash.txt index 1d1f8527d..32bc9ec53 100644 --- a/paddle_samples/PaddleX/SwinTransformer_large_patch4_window12_384/subgraph_0/graph_hash.txt +++ b/paddle_samples/PaddleX/SwinTransformer_large_patch4_window12_384/subgraph_0/graph_hash.txt @@ -1 +1 @@ -c47efbad491af03ab6fedf67f9dc73ac9c3f4835531ad9a55704703a439e02b6 \ No newline at end of file +1dae4ae2c6e2e5f834cf3ada4cc7a037db4f81f1a80390ce8759b22a6cae82b9 \ No newline at end of file diff --git a/paddle_samples/PaddleX/SwinTransformer_large_patch4_window12_384/subgraph_1/graph_hash.txt b/paddle_samples/PaddleX/SwinTransformer_large_patch4_window12_384/subgraph_1/graph_hash.txt index b9aca170a..89018ae6e 100644 --- a/paddle_samples/PaddleX/SwinTransformer_large_patch4_window12_384/subgraph_1/graph_hash.txt +++ b/paddle_samples/PaddleX/SwinTransformer_large_patch4_window12_384/subgraph_1/graph_hash.txt @@ -1 +1 @@ -4f59a4316e8ed9c28adc55de37096195c4ab154249c01393dceebc3e8fb6f906 \ No newline at end of file +4c4ee24152c7f1785810ebe7080658ea8799b80babce66c72fd96d7432775f84 \ No newline at end of file diff --git a/paddle_samples/PaddleX/SwinTransformer_large_patch4_window7_224/subgraph_0/graph_hash.txt b/paddle_samples/PaddleX/SwinTransformer_large_patch4_window7_224/subgraph_0/graph_hash.txt index 2d17c6658..2ceefa29f 100644 --- a/paddle_samples/PaddleX/SwinTransformer_large_patch4_window7_224/subgraph_0/graph_hash.txt +++ b/paddle_samples/PaddleX/SwinTransformer_large_patch4_window7_224/subgraph_0/graph_hash.txt @@ -1 +1 @@ -6373dbfae53f9ef9e7b21d9d0eb558ffb2b3840a74d71e16169452ca9d324495 \ No newline at end of file +9435658107c2b3ff4a1d6f64aaeb4d37534619485b9e404f2f7da3def05102aa \ No newline at end of file diff --git a/paddle_samples/PaddleX/SwinTransformer_large_patch4_window7_224/subgraph_1/graph_hash.txt b/paddle_samples/PaddleX/SwinTransformer_large_patch4_window7_224/subgraph_1/graph_hash.txt index 6e6bcf90e..5eb40d307 100644 --- a/paddle_samples/PaddleX/SwinTransformer_large_patch4_window7_224/subgraph_1/graph_hash.txt +++ b/paddle_samples/PaddleX/SwinTransformer_large_patch4_window7_224/subgraph_1/graph_hash.txt @@ -1 +1 @@ -ee7a520a636631d2d355a298b8ddaf5b4284f66f210b3e324cc927302c8c58d7 \ No newline at end of file +f8cdfb6ff13b1824f2ed33655d9c08717c3927a280d546fccd93d106c33abccb \ No newline at end of file diff --git a/paddle_samples/PaddleX/SwinTransformer_large_patch4_window7_224/subgraph_2/graph_hash.txt b/paddle_samples/PaddleX/SwinTransformer_large_patch4_window7_224/subgraph_2/graph_hash.txt index e4f379b57..a78ffe80e 100644 --- a/paddle_samples/PaddleX/SwinTransformer_large_patch4_window7_224/subgraph_2/graph_hash.txt +++ b/paddle_samples/PaddleX/SwinTransformer_large_patch4_window7_224/subgraph_2/graph_hash.txt @@ -1 +1 @@ -ca1b1cdf84a7b1583f58301018a479f6c541373829956890848ef69bd418b17f \ No newline at end of file +8c0b481df4f0fb1126b8bd2260f0f044bd2cc331c520a4e95a3694059810c468 \ No newline at end of file diff --git a/paddle_samples/PaddleX/SwinTransformer_small_patch4_window7_224/subgraph_0/graph_hash.txt b/paddle_samples/PaddleX/SwinTransformer_small_patch4_window7_224/subgraph_0/graph_hash.txt index 36fe70820..b4ba65b34 100644 --- a/paddle_samples/PaddleX/SwinTransformer_small_patch4_window7_224/subgraph_0/graph_hash.txt +++ b/paddle_samples/PaddleX/SwinTransformer_small_patch4_window7_224/subgraph_0/graph_hash.txt @@ -1 +1 @@ -2bc4eaf49016451eec1a9c0bd27554ca0c0f661fc58b7f2d8f4a664a26244650 \ No newline at end of file +829b0fdec37654e7e8ac0f81696d901aabbc9e684a941533142b8b0853548369 \ No newline at end of file diff --git a/paddle_samples/PaddleX/SwinTransformer_small_patch4_window7_224/subgraph_1/graph_hash.txt b/paddle_samples/PaddleX/SwinTransformer_small_patch4_window7_224/subgraph_1/graph_hash.txt index 743072dd3..58a189828 100644 --- a/paddle_samples/PaddleX/SwinTransformer_small_patch4_window7_224/subgraph_1/graph_hash.txt +++ b/paddle_samples/PaddleX/SwinTransformer_small_patch4_window7_224/subgraph_1/graph_hash.txt @@ -1 +1 @@ -2429501f356acd0cee172d67ec1167c166a5d1f27fdc10b506387cc997a42df6 \ No newline at end of file +33d0de4e9bbb67cc191a293dd632eafb27cdc0726435a9ec954f0aaab0390b8f \ No newline at end of file diff --git a/paddle_samples/PaddleX/SwinTransformer_small_patch4_window7_224/subgraph_2/graph_hash.txt b/paddle_samples/PaddleX/SwinTransformer_small_patch4_window7_224/subgraph_2/graph_hash.txt index 448e7950b..8d39806c7 100644 --- a/paddle_samples/PaddleX/SwinTransformer_small_patch4_window7_224/subgraph_2/graph_hash.txt +++ b/paddle_samples/PaddleX/SwinTransformer_small_patch4_window7_224/subgraph_2/graph_hash.txt @@ -1 +1 @@ -7eca3c42966d4c9c3247ea3013d83d247a5e8d76d671a71bf0abadb4570260c6 \ No newline at end of file +33dd8e1768e63445a1a7e2727555a61eee8036d346299e74e62672401b0df37f \ No newline at end of file diff --git a/paddle_samples/PaddleX/SwinTransformer_tiny_patch4_window7_224/subgraph_0/graph_hash.txt b/paddle_samples/PaddleX/SwinTransformer_tiny_patch4_window7_224/subgraph_0/graph_hash.txt index 809bbfbaf..d41d14d7c 100644 --- a/paddle_samples/PaddleX/SwinTransformer_tiny_patch4_window7_224/subgraph_0/graph_hash.txt +++ b/paddle_samples/PaddleX/SwinTransformer_tiny_patch4_window7_224/subgraph_0/graph_hash.txt @@ -1 +1 @@ -411e07459c61f015723854d40512d41f1de04984aed27f0c8e7d58cf15838e32 \ No newline at end of file +49a5cc759645d7fa0626abfe38a63e89fe79928eaec35bd523b1704eac5ea168 \ No newline at end of file diff --git a/paddle_samples/PaddleX/SwinTransformer_tiny_patch4_window7_224/subgraph_1/graph_hash.txt b/paddle_samples/PaddleX/SwinTransformer_tiny_patch4_window7_224/subgraph_1/graph_hash.txt index 2d28f9372..4ed83ebfc 100644 --- a/paddle_samples/PaddleX/SwinTransformer_tiny_patch4_window7_224/subgraph_1/graph_hash.txt +++ b/paddle_samples/PaddleX/SwinTransformer_tiny_patch4_window7_224/subgraph_1/graph_hash.txt @@ -1 +1 @@ -e5d562f6a7295bcb4ab64f7b6ded5d751df19509d44aeb623942246e6be1caa0 \ No newline at end of file +763aab0d0b72f91674bb2fc2f6a4b5ca88d6b62c73771f18c1a85e0830dac4c5 \ No newline at end of file diff --git a/paddle_samples/PaddleX/SwinTransformer_tiny_patch4_window7_224/subgraph_2/graph_hash.txt b/paddle_samples/PaddleX/SwinTransformer_tiny_patch4_window7_224/subgraph_2/graph_hash.txt index 4511986d2..5be9eb767 100644 --- a/paddle_samples/PaddleX/SwinTransformer_tiny_patch4_window7_224/subgraph_2/graph_hash.txt +++ b/paddle_samples/PaddleX/SwinTransformer_tiny_patch4_window7_224/subgraph_2/graph_hash.txt @@ -1 +1 @@ -2a0b9eb48b863d6028c666d1e73a6593cc71e561a163be8b0719ac25a3b424df \ No newline at end of file +d575eda90c54876f4b8a8a18307acc03c3a13c2f7540e15c02d4b7a1cd4bd8c3 \ No newline at end of file diff --git a/paddle_samples/PaddleX/TimesNet/subgraph_0/graph_hash.txt b/paddle_samples/PaddleX/TimesNet/subgraph_0/graph_hash.txt index ee2d352b9..c823602cb 100644 --- a/paddle_samples/PaddleX/TimesNet/subgraph_0/graph_hash.txt +++ b/paddle_samples/PaddleX/TimesNet/subgraph_0/graph_hash.txt @@ -1 +1 @@ -c1cb87f0287a3fcb24a3b45b530a98d9dec2a44e18da7b06651cfe11d2f53243 \ No newline at end of file +3baf8bff927945de3283400590ccc4fbde81dacb3d0f61b9f6fb44892b20fa09 \ No newline at end of file diff --git a/paddle_samples/PaddleX/TimesNet/subgraph_1/graph_hash.txt b/paddle_samples/PaddleX/TimesNet/subgraph_1/graph_hash.txt index c00e7783f..5af9e9bde 100644 --- a/paddle_samples/PaddleX/TimesNet/subgraph_1/graph_hash.txt +++ b/paddle_samples/PaddleX/TimesNet/subgraph_1/graph_hash.txt @@ -1 +1 @@ -407b15c03c74c9b8977ce4970deb83767907c2c418ebb97a894bac7d97061ec5 \ No newline at end of file +e264cb4aa45d429c6ddd9bc6fb98ecbb4602896e64fc021cf3929c027fdd07fc \ No newline at end of file diff --git a/paddle_samples/PaddleX/TimesNet/subgraph_10/graph_hash.txt b/paddle_samples/PaddleX/TimesNet/subgraph_10/graph_hash.txt index 59b6a77fb..d278dca7a 100644 --- a/paddle_samples/PaddleX/TimesNet/subgraph_10/graph_hash.txt +++ b/paddle_samples/PaddleX/TimesNet/subgraph_10/graph_hash.txt @@ -1 +1 @@ -0f8a6b7104d3aca48b5037c599fd68df9cebbccbe6d26742ef5ec762870148e3 \ No newline at end of file +588d2cbe393fcc4e9b6203711b7f255ed90b5473e89288d65921f4e33ed636d6 \ No newline at end of file diff --git a/paddle_samples/PaddleX/TimesNet/subgraph_11/graph_hash.txt b/paddle_samples/PaddleX/TimesNet/subgraph_11/graph_hash.txt index 5b9740b6d..de0a7a5ae 100644 --- a/paddle_samples/PaddleX/TimesNet/subgraph_11/graph_hash.txt +++ b/paddle_samples/PaddleX/TimesNet/subgraph_11/graph_hash.txt @@ -1 +1 @@ -c788ee53ec7446be5d13146cc01d890972d8ea81ab50143e201ddeb574745fb3 \ No newline at end of file +8da99a4a821e3b37fa37693ad690251149c108e684abe721d88d088c4b17168b \ No newline at end of file diff --git a/paddle_samples/PaddleX/TimesNet/subgraph_12/graph_hash.txt b/paddle_samples/PaddleX/TimesNet/subgraph_12/graph_hash.txt index fce86584a..b076c2ade 100644 --- a/paddle_samples/PaddleX/TimesNet/subgraph_12/graph_hash.txt +++ b/paddle_samples/PaddleX/TimesNet/subgraph_12/graph_hash.txt @@ -1 +1 @@ -4f91c3f0b64160211924301dbdf78ec6dc0174a3afa7a61a30995162d1e27b07 \ No newline at end of file +fc89e165ae90ffa63337609f26c10f53d88c1ae5744f99363342d66e3b99cb41 \ No newline at end of file diff --git a/paddle_samples/PaddleX/TimesNet/subgraph_13/graph_hash.txt b/paddle_samples/PaddleX/TimesNet/subgraph_13/graph_hash.txt index e4f46efd7..e9fa6c7be 100644 --- a/paddle_samples/PaddleX/TimesNet/subgraph_13/graph_hash.txt +++ b/paddle_samples/PaddleX/TimesNet/subgraph_13/graph_hash.txt @@ -1 +1 @@ -188cf83c0475a855a984c8a226bb1a089806fdcd434d27a2ca33aaee9a4ba9ef \ No newline at end of file +b68e08a2156f2c676820085376bbdc83f1c9cf4d9be583011332893513654c62 \ No newline at end of file diff --git a/paddle_samples/PaddleX/TimesNet/subgraph_2/graph_hash.txt b/paddle_samples/PaddleX/TimesNet/subgraph_2/graph_hash.txt index c8237f439..03d16627c 100644 --- a/paddle_samples/PaddleX/TimesNet/subgraph_2/graph_hash.txt +++ b/paddle_samples/PaddleX/TimesNet/subgraph_2/graph_hash.txt @@ -1 +1 @@ -b2f90bd3d39f65b7d05191485f78b9cf3162cbcce2f87f1f22000f97a6635e8f \ No newline at end of file +2a455178b90715f1eb03faec5474d69cf88fd0e74a3cd3ce1f4de4ed1f9f00f5 \ No newline at end of file diff --git a/paddle_samples/PaddleX/TimesNet/subgraph_3/graph_hash.txt b/paddle_samples/PaddleX/TimesNet/subgraph_3/graph_hash.txt index dede65584..7acb63337 100644 --- a/paddle_samples/PaddleX/TimesNet/subgraph_3/graph_hash.txt +++ b/paddle_samples/PaddleX/TimesNet/subgraph_3/graph_hash.txt @@ -1 +1 @@ -540f1a1c8b15551cdd0f329d60f7fe7cc322a6a3f15298423e5b68b13c4cc43c \ No newline at end of file +a5a67ab11cc75ca3ced458a09c9970a3feae1815c41e04e50889b16531c2e4e2 \ No newline at end of file diff --git a/paddle_samples/PaddleX/TimesNet/subgraph_4/graph_hash.txt b/paddle_samples/PaddleX/TimesNet/subgraph_4/graph_hash.txt index c670f6da8..f669ebca0 100644 --- a/paddle_samples/PaddleX/TimesNet/subgraph_4/graph_hash.txt +++ b/paddle_samples/PaddleX/TimesNet/subgraph_4/graph_hash.txt @@ -1 +1 @@ -20fef56040727df448eeb1a05f7a18e5785d7ae35839f722fde80d4a22f8f5f4 \ No newline at end of file +a43b89971904cb072317bea52311a4cce20c436d39933cd62f648fd5a1f8b26e \ No newline at end of file diff --git a/paddle_samples/PaddleX/TimesNet/subgraph_5/graph_hash.txt b/paddle_samples/PaddleX/TimesNet/subgraph_5/graph_hash.txt index 3ea29a59a..ca2e06a53 100644 --- a/paddle_samples/PaddleX/TimesNet/subgraph_5/graph_hash.txt +++ b/paddle_samples/PaddleX/TimesNet/subgraph_5/graph_hash.txt @@ -1 +1 @@ -7096065249a442fe087b073010031bbe4a1c8d6e2707d0000f4d8be72faab72e \ No newline at end of file +f6b0e12402c532fc463303fcca490174c52b7807942b1dd2b2476c67c8d00bb1 \ No newline at end of file diff --git a/paddle_samples/PaddleX/TimesNet/subgraph_6/graph_hash.txt b/paddle_samples/PaddleX/TimesNet/subgraph_6/graph_hash.txt index 9c533910c..42eb669de 100644 --- a/paddle_samples/PaddleX/TimesNet/subgraph_6/graph_hash.txt +++ b/paddle_samples/PaddleX/TimesNet/subgraph_6/graph_hash.txt @@ -1 +1 @@ -9c2bbb3e4b6b3e46cd5a3be27873b919ff3a4707a8afa5460cb8d23f3c81fe4f \ No newline at end of file +df975dd006204690618cd6ca90b24c63dceba4d95a5a8977847f7bb228d32f74 \ No newline at end of file diff --git a/paddle_samples/PaddleX/TimesNet/subgraph_7/graph_hash.txt b/paddle_samples/PaddleX/TimesNet/subgraph_7/graph_hash.txt index 1fa817232..dec6b36a9 100644 --- a/paddle_samples/PaddleX/TimesNet/subgraph_7/graph_hash.txt +++ b/paddle_samples/PaddleX/TimesNet/subgraph_7/graph_hash.txt @@ -1 +1 @@ -61c25a8642cca6853f0da98a5de760f183321f50fd1db253f0c679f86130d597 \ No newline at end of file +0025976831659bc85bca2ce1218c6fa7f81bd9aa04bd3e05029798180bceabd8 \ No newline at end of file diff --git a/paddle_samples/PaddleX/TimesNet/subgraph_8/graph_hash.txt b/paddle_samples/PaddleX/TimesNet/subgraph_8/graph_hash.txt index 828b60c10..e01e42e07 100644 --- a/paddle_samples/PaddleX/TimesNet/subgraph_8/graph_hash.txt +++ b/paddle_samples/PaddleX/TimesNet/subgraph_8/graph_hash.txt @@ -1 +1 @@ -443ea3bb7515bb372aab3c199d2d8cafa28adad053deff1304f18a2ab5126c29 \ No newline at end of file +8ee551fa92caf1d9a545c38c559624de45f47cce69a9dc025830058439033c56 \ No newline at end of file diff --git a/paddle_samples/PaddleX/TimesNet/subgraph_9/graph_hash.txt b/paddle_samples/PaddleX/TimesNet/subgraph_9/graph_hash.txt index 5b83ce4fe..357fc6e10 100644 --- a/paddle_samples/PaddleX/TimesNet/subgraph_9/graph_hash.txt +++ b/paddle_samples/PaddleX/TimesNet/subgraph_9/graph_hash.txt @@ -1 +1 @@ -bd56bcf5d21fc7ae38447b3f591ffe9628bd1b4f2991fc12452cc57baaf25272 \ No newline at end of file +ffe22400d1802b47823ed26cf9fa3ec1b1076e1ce777fbccbab738968001bd9d \ No newline at end of file diff --git a/paddle_samples/PaddleX/TimesNet_ad/subgraph_0/graph_hash.txt b/paddle_samples/PaddleX/TimesNet_ad/subgraph_0/graph_hash.txt index f539e935b..e6652f73d 100644 --- a/paddle_samples/PaddleX/TimesNet_ad/subgraph_0/graph_hash.txt +++ b/paddle_samples/PaddleX/TimesNet_ad/subgraph_0/graph_hash.txt @@ -1 +1 @@ -015f6a3551680938ba72619056276641ee3a7ac6cfdbe40edde396d9f09e7e47 \ No newline at end of file +6f8a904fbd6aba459f7f68c644a03f740c70eda6a4f65d3af70d121516e7c0e6 \ No newline at end of file diff --git a/paddle_samples/PaddleX/TimesNet_ad/subgraph_1/graph_hash.txt b/paddle_samples/PaddleX/TimesNet_ad/subgraph_1/graph_hash.txt index d67a70ea3..19ee6563b 100644 --- a/paddle_samples/PaddleX/TimesNet_ad/subgraph_1/graph_hash.txt +++ b/paddle_samples/PaddleX/TimesNet_ad/subgraph_1/graph_hash.txt @@ -1 +1 @@ -3fb65681e9f9a0fc473c9d65f70cbfb29bfb92e719d9d0c6562eca4978ec51e5 \ No newline at end of file +6cce8c5a5bb967c8e4742c62a5d75d53f2b889737eb1628e39f038f9475c8866 \ No newline at end of file diff --git a/paddle_samples/PaddleX/TimesNet_ad/subgraph_10/graph_hash.txt b/paddle_samples/PaddleX/TimesNet_ad/subgraph_10/graph_hash.txt index 3ea29a59a..ca2e06a53 100644 --- a/paddle_samples/PaddleX/TimesNet_ad/subgraph_10/graph_hash.txt +++ b/paddle_samples/PaddleX/TimesNet_ad/subgraph_10/graph_hash.txt @@ -1 +1 @@ -7096065249a442fe087b073010031bbe4a1c8d6e2707d0000f4d8be72faab72e \ No newline at end of file +f6b0e12402c532fc463303fcca490174c52b7807942b1dd2b2476c67c8d00bb1 \ No newline at end of file diff --git a/paddle_samples/PaddleX/TimesNet_ad/subgraph_11/graph_hash.txt b/paddle_samples/PaddleX/TimesNet_ad/subgraph_11/graph_hash.txt index 628bed2cb..a969e5089 100644 --- a/paddle_samples/PaddleX/TimesNet_ad/subgraph_11/graph_hash.txt +++ b/paddle_samples/PaddleX/TimesNet_ad/subgraph_11/graph_hash.txt @@ -1 +1 @@ -d542d39e895f6c99fcdd793a5d5d813c0497991729db1137c2fc19139a08cc42 \ No newline at end of file +25c8345fc2b36e75b1aaf987fe048c9788132c20411101c06ace6f50aebe689e \ No newline at end of file diff --git a/paddle_samples/PaddleX/TimesNet_ad/subgraph_12/graph_hash.txt b/paddle_samples/PaddleX/TimesNet_ad/subgraph_12/graph_hash.txt index 5b9740b6d..de0a7a5ae 100644 --- a/paddle_samples/PaddleX/TimesNet_ad/subgraph_12/graph_hash.txt +++ b/paddle_samples/PaddleX/TimesNet_ad/subgraph_12/graph_hash.txt @@ -1 +1 @@ -c788ee53ec7446be5d13146cc01d890972d8ea81ab50143e201ddeb574745fb3 \ No newline at end of file +8da99a4a821e3b37fa37693ad690251149c108e684abe721d88d088c4b17168b \ No newline at end of file diff --git a/paddle_samples/PaddleX/TimesNet_ad/subgraph_2/graph_hash.txt b/paddle_samples/PaddleX/TimesNet_ad/subgraph_2/graph_hash.txt index 0502190db..b17dd4d2e 100644 --- a/paddle_samples/PaddleX/TimesNet_ad/subgraph_2/graph_hash.txt +++ b/paddle_samples/PaddleX/TimesNet_ad/subgraph_2/graph_hash.txt @@ -1 +1 @@ -9451eec787ebbf6d81d05cf078e17cb1b8017eefbaeabcf25382ff692dd94b12 \ No newline at end of file +5b5c0c1969352536abf568bf9ba056fdfe00404fb6fef6168ab43a74d08d3ae3 \ No newline at end of file diff --git a/paddle_samples/PaddleX/TimesNet_ad/subgraph_3/graph_hash.txt b/paddle_samples/PaddleX/TimesNet_ad/subgraph_3/graph_hash.txt index 59b6a77fb..d278dca7a 100644 --- a/paddle_samples/PaddleX/TimesNet_ad/subgraph_3/graph_hash.txt +++ b/paddle_samples/PaddleX/TimesNet_ad/subgraph_3/graph_hash.txt @@ -1 +1 @@ -0f8a6b7104d3aca48b5037c599fd68df9cebbccbe6d26742ef5ec762870148e3 \ No newline at end of file +588d2cbe393fcc4e9b6203711b7f255ed90b5473e89288d65921f4e33ed636d6 \ No newline at end of file diff --git a/paddle_samples/PaddleX/TimesNet_ad/subgraph_4/graph_hash.txt b/paddle_samples/PaddleX/TimesNet_ad/subgraph_4/graph_hash.txt index 19b5c8150..7bb3cbfdc 100644 --- a/paddle_samples/PaddleX/TimesNet_ad/subgraph_4/graph_hash.txt +++ b/paddle_samples/PaddleX/TimesNet_ad/subgraph_4/graph_hash.txt @@ -1 +1 @@ -06943ae7ebc7532ff1bcd6ca3875dfb2729eb0348f9169988043fc4aa3ce050b \ No newline at end of file +aa5aaf97436574463d1b03785cb5be47dabceded3443c4667332727c0d99c9e5 \ No newline at end of file diff --git a/paddle_samples/PaddleX/TimesNet_ad/subgraph_5/graph_hash.txt b/paddle_samples/PaddleX/TimesNet_ad/subgraph_5/graph_hash.txt index c562da014..fdc3e59a3 100644 --- a/paddle_samples/PaddleX/TimesNet_ad/subgraph_5/graph_hash.txt +++ b/paddle_samples/PaddleX/TimesNet_ad/subgraph_5/graph_hash.txt @@ -1 +1 @@ -c4459a986fcd6ab51cf96bcd62d9df977c2f0352291ee922d5ecfb21cda64b80 \ No newline at end of file +3cd4bc2847c4a2497d037953c86c406ad7c1349e97eb5347ed9bea4bd8958f13 \ No newline at end of file diff --git a/paddle_samples/PaddleX/TimesNet_ad/subgraph_6/graph_hash.txt b/paddle_samples/PaddleX/TimesNet_ad/subgraph_6/graph_hash.txt index 44b2ee351..e38033243 100644 --- a/paddle_samples/PaddleX/TimesNet_ad/subgraph_6/graph_hash.txt +++ b/paddle_samples/PaddleX/TimesNet_ad/subgraph_6/graph_hash.txt @@ -1 +1 @@ -b1a67bcc15427ee0367a3a1b199f542e3368882ab0bf73dc2d8cb9fe481a112d \ No newline at end of file +9fdac9340430adc480f8c20b6b5eccc3612b6eaf503bd5560b799c4e792bbb48 \ No newline at end of file diff --git a/paddle_samples/PaddleX/TimesNet_ad/subgraph_7/graph_hash.txt b/paddle_samples/PaddleX/TimesNet_ad/subgraph_7/graph_hash.txt index f14aa14e3..d678b3407 100644 --- a/paddle_samples/PaddleX/TimesNet_ad/subgraph_7/graph_hash.txt +++ b/paddle_samples/PaddleX/TimesNet_ad/subgraph_7/graph_hash.txt @@ -1 +1 @@ -f56463afba6c18c9082b9fd47939fa06137c08c2c8f638602b3eed63df795895 \ No newline at end of file +bd83a2199c0d222bbf4499fa080b49261c19ecd9832433da3dacfd6681f71235 \ No newline at end of file diff --git a/paddle_samples/PaddleX/TimesNet_ad/subgraph_8/graph_hash.txt b/paddle_samples/PaddleX/TimesNet_ad/subgraph_8/graph_hash.txt index 08a9c2299..80a614cb9 100644 --- a/paddle_samples/PaddleX/TimesNet_ad/subgraph_8/graph_hash.txt +++ b/paddle_samples/PaddleX/TimesNet_ad/subgraph_8/graph_hash.txt @@ -1 +1 @@ -d04dcb16bbb8bceb9a186ee5f5bcd178fc2377f29963a707cc07b94f519ab7ff \ No newline at end of file +f20fa2aea49acb9d72c5b16122ab198b770984aba72a223636b5cad5ab17931d \ No newline at end of file diff --git a/paddle_samples/PaddleX/TimesNet_ad/subgraph_9/graph_hash.txt b/paddle_samples/PaddleX/TimesNet_ad/subgraph_9/graph_hash.txt index 7d5c89e14..7894d329b 100644 --- a/paddle_samples/PaddleX/TimesNet_ad/subgraph_9/graph_hash.txt +++ b/paddle_samples/PaddleX/TimesNet_ad/subgraph_9/graph_hash.txt @@ -1 +1 @@ -a80937a58b6502456681671049b582ccfb3dbb036758ae01f1b804819ade0464 \ No newline at end of file +f840fc131a017403504e3016d24501e1e6830927f902d749bd588525a36a1980 \ No newline at end of file diff --git a/paddle_samples/PaddleX/TimesNet_cls/subgraph_0/graph_hash.txt b/paddle_samples/PaddleX/TimesNet_cls/subgraph_0/graph_hash.txt index d50a2c1fc..904b6fa12 100644 --- a/paddle_samples/PaddleX/TimesNet_cls/subgraph_0/graph_hash.txt +++ b/paddle_samples/PaddleX/TimesNet_cls/subgraph_0/graph_hash.txt @@ -1 +1 @@ -2c54d953e620a4fb6482bb71e87cbc34eaa47c195f93ffa0634b877b4b7579d3 \ No newline at end of file +4651080d1cfd8ac7eb27cc228c4b508a662d8f9650ba4e0476cf9b97908db4e0 \ No newline at end of file diff --git a/paddle_samples/PaddleX/TimesNet_cls/subgraph_1/graph_hash.txt b/paddle_samples/PaddleX/TimesNet_cls/subgraph_1/graph_hash.txt index c2f1152de..37fd5e205 100644 --- a/paddle_samples/PaddleX/TimesNet_cls/subgraph_1/graph_hash.txt +++ b/paddle_samples/PaddleX/TimesNet_cls/subgraph_1/graph_hash.txt @@ -1 +1 @@ -e92156205e42b2441b82d71a0940d5ef74197298882d280cad587da9cad0bbb8 \ No newline at end of file +c810e2097fa7c27dce4843ac0472169a0c8ad96b37ef95444fe766101257cccb \ No newline at end of file diff --git a/paddle_samples/PaddleX/TimesNet_cls/subgraph_2/graph_hash.txt b/paddle_samples/PaddleX/TimesNet_cls/subgraph_2/graph_hash.txt index dbf5d1680..f0270a176 100644 --- a/paddle_samples/PaddleX/TimesNet_cls/subgraph_2/graph_hash.txt +++ b/paddle_samples/PaddleX/TimesNet_cls/subgraph_2/graph_hash.txt @@ -1 +1 @@ -0d01b53e37ea4d00e7ca170493dcc1a034955a35fe5920d8c49798302d01457b \ No newline at end of file +532577396956d4df69eaac6eb0465c9631ba1c796181b4ffb70a62fa15aeb9ab \ No newline at end of file diff --git a/paddle_samples/PaddleX/TimesNet_cls/subgraph_3/graph_hash.txt b/paddle_samples/PaddleX/TimesNet_cls/subgraph_3/graph_hash.txt index 8b0cdf304..888679393 100644 --- a/paddle_samples/PaddleX/TimesNet_cls/subgraph_3/graph_hash.txt +++ b/paddle_samples/PaddleX/TimesNet_cls/subgraph_3/graph_hash.txt @@ -1 +1 @@ -0bdb4aa40b9c866241dde20d16f12a08dfdef9fcb8d56597fdae08744f673964 \ No newline at end of file +94b42b4884ad8e1fddb99b4175ea8547a05edb9643b4209937df95bd77ce8786 \ No newline at end of file diff --git a/paddle_samples/PaddleX/TimesNet_cls/subgraph_4/graph_hash.txt b/paddle_samples/PaddleX/TimesNet_cls/subgraph_4/graph_hash.txt index 055ebcca9..06482199c 100644 --- a/paddle_samples/PaddleX/TimesNet_cls/subgraph_4/graph_hash.txt +++ b/paddle_samples/PaddleX/TimesNet_cls/subgraph_4/graph_hash.txt @@ -1 +1 @@ -244321bc84fa4ab6f485de7a0c7146beabf59c446ac2dca06dfc26ec31964ba7 \ No newline at end of file +a648274efe2d585c98ba16fde8c2a40ff1182c79f364d23780445325b149637f \ No newline at end of file diff --git a/paddle_samples/PaddleX/TimesNet_cls/subgraph_5/graph_hash.txt b/paddle_samples/PaddleX/TimesNet_cls/subgraph_5/graph_hash.txt index a23694b72..f3b5b6564 100644 --- a/paddle_samples/PaddleX/TimesNet_cls/subgraph_5/graph_hash.txt +++ b/paddle_samples/PaddleX/TimesNet_cls/subgraph_5/graph_hash.txt @@ -1 +1 @@ -7c2b02b97365bcd245d9aeb7d1470bc140da19e49b4d21fdc1b40a8aaaf7f68f \ No newline at end of file +a4b4c79d12dfde11e6c011bde5cd8960dd6f20c2975d297a2e7fdc5ae96cbb4a \ No newline at end of file diff --git a/paddle_samples/PaddleX/TimesNet_cls/subgraph_6/graph_hash.txt b/paddle_samples/PaddleX/TimesNet_cls/subgraph_6/graph_hash.txt index 5a2bec85d..663a577c8 100644 --- a/paddle_samples/PaddleX/TimesNet_cls/subgraph_6/graph_hash.txt +++ b/paddle_samples/PaddleX/TimesNet_cls/subgraph_6/graph_hash.txt @@ -1 +1 @@ -e5db7af1e6377319ccf8fc48e81b13a7c3e2bbc34b6c3c7da121030977da3abc \ No newline at end of file +420389f6e8309b0ae36aa651ad17b15420438c7e6c387982aa0cb215598e3f00 \ No newline at end of file diff --git a/paddle_samples/PaddleX/TimesNet_cls/subgraph_8/graph_hash.txt b/paddle_samples/PaddleX/TimesNet_cls/subgraph_8/graph_hash.txt index 920d7b232..24814e148 100644 --- a/paddle_samples/PaddleX/TimesNet_cls/subgraph_8/graph_hash.txt +++ b/paddle_samples/PaddleX/TimesNet_cls/subgraph_8/graph_hash.txt @@ -1 +1 @@ -e1ecbf15f7c366dbf3fdc8b7ebf2dcad1dacd500344631ae7716add712a41849 \ No newline at end of file +1cf332850b237a2ffc3d6904d2e5060b28769ce55aafb4adae051adb9c663cca \ No newline at end of file diff --git a/paddle_samples/PaddleX/TimesNet_cls/subgraph_9/graph_hash.txt b/paddle_samples/PaddleX/TimesNet_cls/subgraph_9/graph_hash.txt index 783ecfcf7..177c40173 100644 --- a/paddle_samples/PaddleX/TimesNet_cls/subgraph_9/graph_hash.txt +++ b/paddle_samples/PaddleX/TimesNet_cls/subgraph_9/graph_hash.txt @@ -1 +1 @@ -fe721ccf1cd65da315d7193de6608142ca1ddfd9abb684448865f22f76f861b0 \ No newline at end of file +b6ea7dc378d406b2802fd046a7a3cd6f3f6649f8891e211f284a3dc662969d92 \ No newline at end of file diff --git a/paddle_samples/PaddleX/YOLOv3-DarkNet53/subgraph_0/graph_hash.txt b/paddle_samples/PaddleX/YOLOv3-DarkNet53/subgraph_0/graph_hash.txt index b558d3714..94b7780b3 100644 --- a/paddle_samples/PaddleX/YOLOv3-DarkNet53/subgraph_0/graph_hash.txt +++ b/paddle_samples/PaddleX/YOLOv3-DarkNet53/subgraph_0/graph_hash.txt @@ -1 +1 @@ -8b4e00670a68159e146cc55716d5b6bd7ce57e2be7cae76edc522422bf70771f \ No newline at end of file +d3bb1072a0db0eb825da0b857175f4f3e659808d4a8a84f720a77a936f975d2b \ No newline at end of file diff --git a/paddle_samples/PaddleX/YOLOv3-DarkNet53/subgraph_1/graph_hash.txt b/paddle_samples/PaddleX/YOLOv3-DarkNet53/subgraph_1/graph_hash.txt index d17261c01..6d590eca3 100644 --- a/paddle_samples/PaddleX/YOLOv3-DarkNet53/subgraph_1/graph_hash.txt +++ b/paddle_samples/PaddleX/YOLOv3-DarkNet53/subgraph_1/graph_hash.txt @@ -1 +1 @@ -a4acc9f335307b0087902a7ec72d68ffc60aad1ee645e85fa0b1bc34bf8226ce \ No newline at end of file +f1b8395e4f40cf7387dbb85af8e162323909336ee3bafe49fe2b37897a7000b6 \ No newline at end of file diff --git a/paddle_samples/PaddleX/YOLOv3-DarkNet53/subgraph_2/graph_hash.txt b/paddle_samples/PaddleX/YOLOv3-DarkNet53/subgraph_2/graph_hash.txt index bec294e7c..216d6e483 100644 --- a/paddle_samples/PaddleX/YOLOv3-DarkNet53/subgraph_2/graph_hash.txt +++ b/paddle_samples/PaddleX/YOLOv3-DarkNet53/subgraph_2/graph_hash.txt @@ -1 +1 @@ -aadd2ef532a10d89b06cfc86e51edaa99b4e8010f7b8936ce3bab67b7e422051 \ No newline at end of file +164da76e028da3d49c9c3bbc8653b14a3aa8acf2c39cd15ba31c0dbc19d98144 \ No newline at end of file diff --git a/paddle_samples/PaddleX/YOLOv3-DarkNet53/subgraph_3/graph_hash.txt b/paddle_samples/PaddleX/YOLOv3-DarkNet53/subgraph_3/graph_hash.txt index a9e50eabf..cecfafec8 100644 --- a/paddle_samples/PaddleX/YOLOv3-DarkNet53/subgraph_3/graph_hash.txt +++ b/paddle_samples/PaddleX/YOLOv3-DarkNet53/subgraph_3/graph_hash.txt @@ -1 +1 @@ -c9cfb0a9a8dddf4fc520b51ddb0505c49ceccced7050b439b0cebebeab647eee \ No newline at end of file +40a3c129ca651a2fc8baacd4e2c8b702bbbd1b37c2026577756e0cfc60a22b25 \ No newline at end of file diff --git a/paddle_samples/PaddleX/YOLOv3-MobileNetV3/subgraph_0/graph_hash.txt b/paddle_samples/PaddleX/YOLOv3-MobileNetV3/subgraph_0/graph_hash.txt index b09128628..6fc8b0325 100644 --- a/paddle_samples/PaddleX/YOLOv3-MobileNetV3/subgraph_0/graph_hash.txt +++ b/paddle_samples/PaddleX/YOLOv3-MobileNetV3/subgraph_0/graph_hash.txt @@ -1 +1 @@ -7797a71310007f10e0f788ff40101b00053b0097e06f2496cf55c2a338ebcf3e \ No newline at end of file +e08db7c66fb414e34dd1fb00c07ab6a97aa77d06d3e07e0d9ebc27d6170c5d02 \ No newline at end of file diff --git a/paddle_samples/PaddleX/YOLOv3-MobileNetV3/subgraph_1/graph_hash.txt b/paddle_samples/PaddleX/YOLOv3-MobileNetV3/subgraph_1/graph_hash.txt index d6f6f1116..125816cb3 100644 --- a/paddle_samples/PaddleX/YOLOv3-MobileNetV3/subgraph_1/graph_hash.txt +++ b/paddle_samples/PaddleX/YOLOv3-MobileNetV3/subgraph_1/graph_hash.txt @@ -1 +1 @@ -e40cd3555494e42488f74945c3f9e6cbbdb43e3c219fd09726251f94204937d5 \ No newline at end of file +f55cd6bc2c5955d0664cc2e147cde740855776a25c5ffd29b99a2a7707394c1d \ No newline at end of file diff --git a/paddle_samples/PaddleX/YOLOv3-MobileNetV3/subgraph_2/graph_hash.txt b/paddle_samples/PaddleX/YOLOv3-MobileNetV3/subgraph_2/graph_hash.txt index c9a7af83c..4dc51d175 100644 --- a/paddle_samples/PaddleX/YOLOv3-MobileNetV3/subgraph_2/graph_hash.txt +++ b/paddle_samples/PaddleX/YOLOv3-MobileNetV3/subgraph_2/graph_hash.txt @@ -1 +1 @@ -205d83a5dc5a2733cfe59aa6345427ba5237a5c774e17265bd20eae0e8480498 \ No newline at end of file +93c556b2df669dd1dbcf58cbf1627d858f55f1bb808529a702446a39f702eca1 \ No newline at end of file diff --git a/paddle_samples/PaddleX/YOLOv3-ResNet50_vd_DCN/subgraph_0/graph_hash.txt b/paddle_samples/PaddleX/YOLOv3-ResNet50_vd_DCN/subgraph_0/graph_hash.txt index 692eef737..cc6403bad 100644 --- a/paddle_samples/PaddleX/YOLOv3-ResNet50_vd_DCN/subgraph_0/graph_hash.txt +++ b/paddle_samples/PaddleX/YOLOv3-ResNet50_vd_DCN/subgraph_0/graph_hash.txt @@ -1 +1 @@ -5eb19406fc8a871a2dfa7eaf69d43207450e5c6702c226435d1bf23eedcae1fb \ No newline at end of file +62c35143c920bf6fe66b618f145821c79edd037d87c9947dec0927781e801c70 \ No newline at end of file diff --git a/paddle_samples/PaddleX/YOLOv3-ResNet50_vd_DCN/subgraph_1/graph_hash.txt b/paddle_samples/PaddleX/YOLOv3-ResNet50_vd_DCN/subgraph_1/graph_hash.txt index e941ee432..6e88c4b67 100644 --- a/paddle_samples/PaddleX/YOLOv3-ResNet50_vd_DCN/subgraph_1/graph_hash.txt +++ b/paddle_samples/PaddleX/YOLOv3-ResNet50_vd_DCN/subgraph_1/graph_hash.txt @@ -1 +1 @@ -7a38eea5736696b33fe614b162948762e8be510f532e3683957b4e12d32f10ac \ No newline at end of file +1fa49f2810d2d1f75596a23e9cb2c196f4e2fe9fc8130b8e3a4f34739915d991 \ No newline at end of file diff --git a/paddle_samples/PaddleX/YOLOv3-ResNet50_vd_DCN/subgraph_2/graph_hash.txt b/paddle_samples/PaddleX/YOLOv3-ResNet50_vd_DCN/subgraph_2/graph_hash.txt index 5ee4d0747..b79dbe501 100644 --- a/paddle_samples/PaddleX/YOLOv3-ResNet50_vd_DCN/subgraph_2/graph_hash.txt +++ b/paddle_samples/PaddleX/YOLOv3-ResNet50_vd_DCN/subgraph_2/graph_hash.txt @@ -1 +1 @@ -ca60e77a73e59d5c418647f8b12a5d041a5f633b8262eeb4e9be29fa8255f3e5 \ No newline at end of file +38f3f357cb5004a8f39b73e7a66648b4c367741ebcd5101b90fc2506fd4116b5 \ No newline at end of file diff --git a/paddle_samples/PaddleX/ch_RepSVTR_rec/subgraph_0/graph_hash.txt b/paddle_samples/PaddleX/ch_RepSVTR_rec/subgraph_0/graph_hash.txt index e62be3852..cbafcb299 100644 --- a/paddle_samples/PaddleX/ch_RepSVTR_rec/subgraph_0/graph_hash.txt +++ b/paddle_samples/PaddleX/ch_RepSVTR_rec/subgraph_0/graph_hash.txt @@ -1 +1 @@ -76baf8657e274f84af2cfffb732987c82476056daa1fa734a2adad01a4bd3c28 \ No newline at end of file +6ba838419659b86bb00954156a27ccbb1beab52ed523847d8841125f53e57353 \ No newline at end of file diff --git a/paddle_samples/PaddleX/ch_RepSVTR_rec/subgraph_1/graph_hash.txt b/paddle_samples/PaddleX/ch_RepSVTR_rec/subgraph_1/graph_hash.txt index b687ea4b6..b0c90384c 100644 --- a/paddle_samples/PaddleX/ch_RepSVTR_rec/subgraph_1/graph_hash.txt +++ b/paddle_samples/PaddleX/ch_RepSVTR_rec/subgraph_1/graph_hash.txt @@ -1 +1 @@ -ea7b35e16430be4ee8516b3678c74d5cb553ebf0a30704fa54ea2196925d8736 \ No newline at end of file +aabc6b43f8cab6a6c906043868317704e5b8a6e3ea491561262c182c67bbe7e1 \ No newline at end of file diff --git a/paddle_samples/PaddleX/ch_RepSVTR_rec/subgraph_2/graph_hash.txt b/paddle_samples/PaddleX/ch_RepSVTR_rec/subgraph_2/graph_hash.txt index 93295742c..b414d27fc 100644 --- a/paddle_samples/PaddleX/ch_RepSVTR_rec/subgraph_2/graph_hash.txt +++ b/paddle_samples/PaddleX/ch_RepSVTR_rec/subgraph_2/graph_hash.txt @@ -1 +1 @@ -72635eaa8cddb1bb3c1593fbda3647b1fd40c21ad3692ffc43e63053353b44e5 \ No newline at end of file +bdb2f767798ea246af61da39e7ddb151c49633bb78cf34bcf9f6ed6d5f01c51d \ No newline at end of file diff --git a/paddle_samples/PaddleX/ch_RepSVTR_rec/subgraph_3/graph_hash.txt b/paddle_samples/PaddleX/ch_RepSVTR_rec/subgraph_3/graph_hash.txt index 3a93b7d59..af29ab847 100644 --- a/paddle_samples/PaddleX/ch_RepSVTR_rec/subgraph_3/graph_hash.txt +++ b/paddle_samples/PaddleX/ch_RepSVTR_rec/subgraph_3/graph_hash.txt @@ -1 +1 @@ -3cf9c726846dfc01838c2dc3b03680c0562c863765d2b18ad1152d57d8b44ce6 \ No newline at end of file +e94dbe68d55ee8767835c55f4344f98e5a19aa1bded3e6e4ba11a1c210231188 \ No newline at end of file diff --git a/paddle_samples/PaddleX/ch_RepSVTR_rec/subgraph_4/graph_hash.txt b/paddle_samples/PaddleX/ch_RepSVTR_rec/subgraph_4/graph_hash.txt index 758d3cf6c..b68fe7661 100644 --- a/paddle_samples/PaddleX/ch_RepSVTR_rec/subgraph_4/graph_hash.txt +++ b/paddle_samples/PaddleX/ch_RepSVTR_rec/subgraph_4/graph_hash.txt @@ -1 +1 @@ -e2ef048605e2438c1fcbd1ee00942eb3f372ac0ba43100bb502f52e55211e0f0 \ No newline at end of file +90f5ad58ff9bcbbf15fcede3279253e0efba1056e86baaeb1bc99ccdedeaa9fe \ No newline at end of file diff --git a/paddle_samples/PaddleX/ch_RepSVTR_rec/subgraph_5/graph_hash.txt b/paddle_samples/PaddleX/ch_RepSVTR_rec/subgraph_5/graph_hash.txt index 76058b79d..e40a69acf 100644 --- a/paddle_samples/PaddleX/ch_RepSVTR_rec/subgraph_5/graph_hash.txt +++ b/paddle_samples/PaddleX/ch_RepSVTR_rec/subgraph_5/graph_hash.txt @@ -1 +1 @@ -71b45662c8e88195365ea2e8f6a9033fcd2512b09d4c3f88330952d2f4e1ad98 \ No newline at end of file +c8f2494cc64b68cfb415c8edf6408e3d383db93242784a0d215c99e14900e1c5 \ No newline at end of file diff --git a/paddle_samples/PaddleX/ch_SVTRv2_rec/subgraph_0/graph_hash.txt b/paddle_samples/PaddleX/ch_SVTRv2_rec/subgraph_0/graph_hash.txt index b869f645c..435cef27f 100644 --- a/paddle_samples/PaddleX/ch_SVTRv2_rec/subgraph_0/graph_hash.txt +++ b/paddle_samples/PaddleX/ch_SVTRv2_rec/subgraph_0/graph_hash.txt @@ -1 +1 @@ -acb18fe3df13e7be558fc9325eb78ade5fdd8f993b06397cc0704de9673ce006 \ No newline at end of file +00642bf6c8d6b75aff38f14a837bfb3d30c4bfcae69e59b849f0cbb85bf0fd27 \ No newline at end of file diff --git a/paddle_samples/PaddleX/ch_SVTRv2_rec/subgraph_1/graph_hash.txt b/paddle_samples/PaddleX/ch_SVTRv2_rec/subgraph_1/graph_hash.txt index 58336ba55..b47d4eb26 100644 --- a/paddle_samples/PaddleX/ch_SVTRv2_rec/subgraph_1/graph_hash.txt +++ b/paddle_samples/PaddleX/ch_SVTRv2_rec/subgraph_1/graph_hash.txt @@ -1 +1 @@ -a15eb770dc17564594d30f8f56aac3cf032511a8e6c9feb11c06ac1ded7f7636 \ No newline at end of file +ac604448a813eab4e7521e5e8153a57e6fceddb82d1caf094ca27341518f20d3 \ No newline at end of file diff --git a/paddle_samples/PaddleX/ch_SVTRv2_rec/subgraph_10/graph_hash.txt b/paddle_samples/PaddleX/ch_SVTRv2_rec/subgraph_10/graph_hash.txt index 758d3cf6c..b68fe7661 100644 --- a/paddle_samples/PaddleX/ch_SVTRv2_rec/subgraph_10/graph_hash.txt +++ b/paddle_samples/PaddleX/ch_SVTRv2_rec/subgraph_10/graph_hash.txt @@ -1 +1 @@ -e2ef048605e2438c1fcbd1ee00942eb3f372ac0ba43100bb502f52e55211e0f0 \ No newline at end of file +90f5ad58ff9bcbbf15fcede3279253e0efba1056e86baaeb1bc99ccdedeaa9fe \ No newline at end of file diff --git a/paddle_samples/PaddleX/ch_SVTRv2_rec/subgraph_11/graph_hash.txt b/paddle_samples/PaddleX/ch_SVTRv2_rec/subgraph_11/graph_hash.txt index 192c53245..110bbe846 100644 --- a/paddle_samples/PaddleX/ch_SVTRv2_rec/subgraph_11/graph_hash.txt +++ b/paddle_samples/PaddleX/ch_SVTRv2_rec/subgraph_11/graph_hash.txt @@ -1 +1 @@ -ea6c0f8c5a7a0f0dda4c8ff3c1fab4d0c43521b6e0eb52f0ce940ddde0178ca3 \ No newline at end of file +b05f95a65971b1590f309d359f5259acabf8101a66dc1fe1c0cf2d8a0ff08424 \ No newline at end of file diff --git a/paddle_samples/PaddleX/ch_SVTRv2_rec/subgraph_12/graph_hash.txt b/paddle_samples/PaddleX/ch_SVTRv2_rec/subgraph_12/graph_hash.txt index 5d15958c6..321befc72 100644 --- a/paddle_samples/PaddleX/ch_SVTRv2_rec/subgraph_12/graph_hash.txt +++ b/paddle_samples/PaddleX/ch_SVTRv2_rec/subgraph_12/graph_hash.txt @@ -1 +1 @@ -5fa2fb0a4a2f5ac153edf09f6a38cca5e796b27a226fe7a6d6130a84603ae6cc \ No newline at end of file +8b58f82fe2f9970b03d63136cf72c163d7717fa757b22d740a635dd581ab1ae7 \ No newline at end of file diff --git a/paddle_samples/PaddleX/ch_SVTRv2_rec/subgraph_13/graph_hash.txt b/paddle_samples/PaddleX/ch_SVTRv2_rec/subgraph_13/graph_hash.txt index 76058b79d..e40a69acf 100644 --- a/paddle_samples/PaddleX/ch_SVTRv2_rec/subgraph_13/graph_hash.txt +++ b/paddle_samples/PaddleX/ch_SVTRv2_rec/subgraph_13/graph_hash.txt @@ -1 +1 @@ -71b45662c8e88195365ea2e8f6a9033fcd2512b09d4c3f88330952d2f4e1ad98 \ No newline at end of file +c8f2494cc64b68cfb415c8edf6408e3d383db93242784a0d215c99e14900e1c5 \ No newline at end of file diff --git a/paddle_samples/PaddleX/ch_SVTRv2_rec/subgraph_14/graph_hash.txt b/paddle_samples/PaddleX/ch_SVTRv2_rec/subgraph_14/graph_hash.txt index 3a93b7d59..af29ab847 100644 --- a/paddle_samples/PaddleX/ch_SVTRv2_rec/subgraph_14/graph_hash.txt +++ b/paddle_samples/PaddleX/ch_SVTRv2_rec/subgraph_14/graph_hash.txt @@ -1 +1 @@ -3cf9c726846dfc01838c2dc3b03680c0562c863765d2b18ad1152d57d8b44ce6 \ No newline at end of file +e94dbe68d55ee8767835c55f4344f98e5a19aa1bded3e6e4ba11a1c210231188 \ No newline at end of file diff --git a/paddle_samples/PaddleX/ch_SVTRv2_rec/subgraph_15/graph_hash.txt b/paddle_samples/PaddleX/ch_SVTRv2_rec/subgraph_15/graph_hash.txt index b687ea4b6..b0c90384c 100644 --- a/paddle_samples/PaddleX/ch_SVTRv2_rec/subgraph_15/graph_hash.txt +++ b/paddle_samples/PaddleX/ch_SVTRv2_rec/subgraph_15/graph_hash.txt @@ -1 +1 @@ -ea7b35e16430be4ee8516b3678c74d5cb553ebf0a30704fa54ea2196925d8736 \ No newline at end of file +aabc6b43f8cab6a6c906043868317704e5b8a6e3ea491561262c182c67bbe7e1 \ No newline at end of file diff --git a/paddle_samples/PaddleX/ch_SVTRv2_rec/subgraph_2/graph_hash.txt b/paddle_samples/PaddleX/ch_SVTRv2_rec/subgraph_2/graph_hash.txt index 06c3fb687..84db42b46 100644 --- a/paddle_samples/PaddleX/ch_SVTRv2_rec/subgraph_2/graph_hash.txt +++ b/paddle_samples/PaddleX/ch_SVTRv2_rec/subgraph_2/graph_hash.txt @@ -1 +1 @@ -584a9945a71de056f2fa7b5aee34258b6d4468e900decc0f3fdb80a4beb1db67 \ No newline at end of file +2cf041b6597c89c4a9d13848bf12c5fc7c910d6df1d9327fa7a5fa5687277bbb \ No newline at end of file diff --git a/paddle_samples/PaddleX/ch_SVTRv2_rec/subgraph_3/graph_hash.txt b/paddle_samples/PaddleX/ch_SVTRv2_rec/subgraph_3/graph_hash.txt index 2fe83a635..0ef9994c7 100644 --- a/paddle_samples/PaddleX/ch_SVTRv2_rec/subgraph_3/graph_hash.txt +++ b/paddle_samples/PaddleX/ch_SVTRv2_rec/subgraph_3/graph_hash.txt @@ -1 +1 @@ -870cef134bc6ea0a9c621405ce6cf9b325c923873385c00ce1cf267d3c8399a9 \ No newline at end of file +de437a8d3a1e993e4fc840cb4cb91a31119ee0d1efd2696fbd14e730a7f56d56 \ No newline at end of file diff --git a/paddle_samples/PaddleX/ch_SVTRv2_rec/subgraph_4/graph_hash.txt b/paddle_samples/PaddleX/ch_SVTRv2_rec/subgraph_4/graph_hash.txt index 526915ab3..9d9262d6b 100644 --- a/paddle_samples/PaddleX/ch_SVTRv2_rec/subgraph_4/graph_hash.txt +++ b/paddle_samples/PaddleX/ch_SVTRv2_rec/subgraph_4/graph_hash.txt @@ -1 +1 @@ -38a708b8b912dd287035961b4c7bd4277e4779b8bbe59dca7db5523857b64b0a \ No newline at end of file +d1f2eb09594b655e174fca6ab4a8a7230d23d213295ec5f6b29dd00a9b1ed220 \ No newline at end of file diff --git a/paddle_samples/PaddleX/ch_SVTRv2_rec/subgraph_5/graph_hash.txt b/paddle_samples/PaddleX/ch_SVTRv2_rec/subgraph_5/graph_hash.txt index b0ce44625..8b1b0b6f0 100644 --- a/paddle_samples/PaddleX/ch_SVTRv2_rec/subgraph_5/graph_hash.txt +++ b/paddle_samples/PaddleX/ch_SVTRv2_rec/subgraph_5/graph_hash.txt @@ -1 +1 @@ -9d7ab6c820abffef8decdd1fcf438007f183b463b97bbdb1444e070cdd4f95ee \ No newline at end of file +ea2213dc23f9628bd621438c380535c411c74e32a642bd71504784f570e26a2a \ No newline at end of file diff --git a/paddle_samples/PaddleX/ch_SVTRv2_rec/subgraph_6/graph_hash.txt b/paddle_samples/PaddleX/ch_SVTRv2_rec/subgraph_6/graph_hash.txt index fa1eb2a2f..9b2078edc 100644 --- a/paddle_samples/PaddleX/ch_SVTRv2_rec/subgraph_6/graph_hash.txt +++ b/paddle_samples/PaddleX/ch_SVTRv2_rec/subgraph_6/graph_hash.txt @@ -1 +1 @@ -6aea37a6646f9874b39ff517b9581e94a17162f998d0c2463c446cf911c23a6e \ No newline at end of file +25c9d6ab7bf15aa3561434322c9b4c76fc1b3cd4db0b465090afa5d1ac43e89c \ No newline at end of file diff --git a/paddle_samples/PaddleX/ch_SVTRv2_rec/subgraph_7/graph_hash.txt b/paddle_samples/PaddleX/ch_SVTRv2_rec/subgraph_7/graph_hash.txt index 9957af478..b3e4a9a3e 100644 --- a/paddle_samples/PaddleX/ch_SVTRv2_rec/subgraph_7/graph_hash.txt +++ b/paddle_samples/PaddleX/ch_SVTRv2_rec/subgraph_7/graph_hash.txt @@ -1 +1 @@ -309279f5fdddd40671adb92dc0fe936fd80e7a0bc172cfd322a6f45250640d9a \ No newline at end of file +3dc039f24b2d10ae5a080675fecc47e9b3ef7fc53cd61a4e803ad14e10cd7a09 \ No newline at end of file diff --git a/paddle_samples/PaddleX/ch_SVTRv2_rec/subgraph_8/graph_hash.txt b/paddle_samples/PaddleX/ch_SVTRv2_rec/subgraph_8/graph_hash.txt index 93295742c..b414d27fc 100644 --- a/paddle_samples/PaddleX/ch_SVTRv2_rec/subgraph_8/graph_hash.txt +++ b/paddle_samples/PaddleX/ch_SVTRv2_rec/subgraph_8/graph_hash.txt @@ -1 +1 @@ -72635eaa8cddb1bb3c1593fbda3647b1fd40c21ad3692ffc43e63053353b44e5 \ No newline at end of file +bdb2f767798ea246af61da39e7ddb151c49633bb78cf34bcf9f6ed6d5f01c51d \ No newline at end of file diff --git a/paddle_samples/PaddleX/ch_SVTRv2_rec/subgraph_9/graph_hash.txt b/paddle_samples/PaddleX/ch_SVTRv2_rec/subgraph_9/graph_hash.txt index b3652acc6..bbda0a067 100644 --- a/paddle_samples/PaddleX/ch_SVTRv2_rec/subgraph_9/graph_hash.txt +++ b/paddle_samples/PaddleX/ch_SVTRv2_rec/subgraph_9/graph_hash.txt @@ -1 +1 @@ -6bc364131227030b344e9dadbd92fc5fcbb6cbd73c96e610d45b18a2aa3a20bf \ No newline at end of file +f21fbf084a73e0241c8f91d48c8aab584532c93461b27c7fa05b2f7cd1ac55f3 \ No newline at end of file diff --git a/tools/ci/check_validate.sh b/tools/ci/check_validate.sh index d51f9ed61..f443cd9ea 100644 --- a/tools/ci/check_validate.sh +++ b/tools/ci/check_validate.sh @@ -37,7 +37,6 @@ function prepare_torch_env() { LOG "[INFO] Update pip ..." env http_proxy="" https_proxy="" pip install -U pip > /dev/null [ $? -ne 0 ] && LOG "[FATAL] Update pip failed!" && exit -1 - pip install astor # install torch pip install --pre torch --index-url https://download.pytorch.org/whl/nightly/cu126 > /dev/null [ $? -ne 0 ] && LOG "[FATAL] Install torch2.9.0 failed!" && exit -1 From ea531b9274a7f0b79851a02aa77212bec7d24681 Mon Sep 17 00:00:00 2001 From: Liu Yiqun Date: Fri, 12 Sep 2025 10:46:07 +0800 Subject: [PATCH 4/5] Remove redundant subgraphs. --- .../subgraph_0/graph_hash.txt | 2 +- .../PP-YOLOE-L_human/subgraph_0/input_meta.py | 10 +- .../PP-YOLOE-L_human/subgraph_0/model.py | 7398 +++++++++++++- .../input_meta.py | 8 +- .../weight_meta.py | 5136 +++++----- .../subgraph_0/weight_meta.py | 7563 ++++++++++++++ .../subgraph_1/graph_hash.txt | 2 +- .../PP-YOLOE-L_human/subgraph_1/input_meta.py | 116 +- .../PP-YOLOE-L_human/subgraph_1/model.py | 174 +- .../subgraph_10/graph_hash.txt | 1 - .../subgraph_10/graph_net.json | 6 - .../subgraph_10/input_meta.py | 9 - .../PP-YOLOE-L_human/subgraph_10/model.py | 7396 -------------- .../subgraph_11/graph_hash.txt | 1 - .../subgraph_11/graph_net.json | 6 - .../subgraph_11/input_meta.py | 156 - .../PP-YOLOE-L_human/subgraph_11/model.py | 385 - .../subgraph_12/graph_hash.txt | 1 - .../subgraph_12/graph_net.json | 6 - .../subgraph_12/input_meta.py | 121 - .../PP-YOLOE-L_human/subgraph_12/model.py | 175 - .../subgraph_13/graph_hash.txt | 1 - .../subgraph_13/graph_net.json | 6 - .../PP-YOLOE-L_human/subgraph_13/model.py | 162 - .../subgraph_14/graph_hash.txt | 1 - .../subgraph_14/graph_net.json | 6 - .../subgraph_14/input_meta.py | 141 - .../PP-YOLOE-L_human/subgraph_14/model.py | 338 - .../subgraph_15/graph_hash.txt | 1 - .../subgraph_15/graph_net.json | 6 - .../subgraph_15/input_meta.py | 105 - .../PP-YOLOE-L_human/subgraph_15/model.py | 223 - .../subgraph_16/graph_hash.txt | 1 - .../subgraph_16/graph_net.json | 6 - .../subgraph_16/input_meta.py | 38 - .../PP-YOLOE-L_human/subgraph_16/model.py | 94 - .../subgraph_2/graph_hash.txt | 2 +- .../PP-YOLOE-L_human/subgraph_2/input_meta.py | 139 +- .../PP-YOLOE-L_human/subgraph_2/model.py | 364 +- .../subgraph_3/graph_hash.txt | 2 +- .../PP-YOLOE-L_human/subgraph_3/input_meta.py | 122 +- .../PP-YOLOE-L_human/subgraph_3/model.py | 1215 +-- .../subgraph_3/weight_meta.py | 585 -- .../subgraph_6/graph_hash.txt | 1 - .../subgraph_6/graph_net.json | 6 - .../PP-YOLOE-L_human/subgraph_6/input_meta.py | 49 - .../PP-YOLOE-L_human/subgraph_6/model.py | 192 - .../subgraph_7/graph_hash.txt | 1 - .../subgraph_7/graph_net.json | 6 - .../PP-YOLOE-L_human/subgraph_7/input_meta.py | 130 - .../PP-YOLOE-L_human/subgraph_7/model.py | 258 - .../subgraph_8/graph_hash.txt | 1 - .../subgraph_8/graph_net.json | 6 - .../PP-YOLOE-L_human/subgraph_8/input_meta.py | 103 - .../PP-YOLOE-L_human/subgraph_8/model.py | 195 - .../subgraph_9/graph_hash.txt | 1 - .../subgraph_9/graph_net.json | 6 - .../PP-YOLOE-L_human/subgraph_9/model.py | 509 - .../subgraph_0/graph_hash.txt | 2 +- .../subgraph_0/input_meta.py | 74 +- .../PP-YOLOE-L_vehicle/subgraph_0/model.py | 7361 +++++++++++-- .../subgraph_0/weight_meta.py | 8005 ++++++++++++++- .../{subgraph_7 => subgraph_1}/graph_hash.txt | 0 .../graph_net.json | 0 .../{subgraph_7 => subgraph_1}/input_meta.py | 0 .../{subgraph_7 => subgraph_1}/model.py | 0 .../subgraph_1}/weight_meta.py | 0 .../subgraph_11/graph_hash.txt | 1 - .../subgraph_11/input_meta.py | 69 - .../PP-YOLOE-L_vehicle/subgraph_11/model.py | 248 - .../subgraph_13/graph_hash.txt | 1 - .../PP-YOLOE-L_vehicle/subgraph_13/model.py | 1050 -- .../subgraph_14/graph_hash.txt | 1 - .../subgraph_14/graph_net.json | 6 - .../PP-YOLOE-L_vehicle/subgraph_14/model.py | 509 - .../subgraph_15/graph_hash.txt | 1 - .../subgraph_15/graph_net.json | 6 - .../subgraph_15/input_meta.py | 48 - .../PP-YOLOE-L_vehicle/subgraph_15/model.py | 192 - .../subgraph_16/graph_hash.txt | 1 - .../subgraph_16/graph_net.json | 6 - .../subgraph_16/input_meta.py | 38 - .../PP-YOLOE-L_vehicle/subgraph_16/model.py | 94 - .../subgraph_17/graph_net.json | 6 - .../subgraph_2/graph_hash.txt | 2 +- .../subgraph_2/input_meta.py | 63 +- .../PP-YOLOE-L_vehicle/subgraph_2/model.py | 7475 +------------- .../subgraph_2/weight_meta.py | 7563 -------------- .../subgraph_3/graph_hash.txt | 2 +- .../subgraph_3/input_meta.py | 71 +- .../PP-YOLOE-L_vehicle/subgraph_3/model.py | 308 +- .../subgraph_3/weight_meta.py | 8 +- .../graph_hash.txt | 0 .../graph_net.json | 0 .../{subgraph_17 => subgraph_4}/input_meta.py | 0 .../{subgraph_17 => subgraph_4}/model.py | 0 .../subgraph_4}/weight_meta.py | 0 .../subgraph_5/graph_hash.txt | 1 - .../subgraph_5/graph_net.json | 6 - .../PP-YOLOE-L_vehicle/subgraph_5/model.py | 7159 ------------- .../subgraph_5/weight_meta.py | 8161 --------------- .../subgraph_7/graph_net.json | 6 - .../subgraph_8/graph_hash.txt | 1 - .../subgraph_8/graph_net.json | 6 - .../subgraph_8/input_meta.py | 62 - .../PP-YOLOE-L_vehicle/subgraph_8/model.py | 229 - .../input_meta.py | 0 .../weight_meta.py | 0 .../input_meta.py | 0 .../weight_meta.py | 0 .../subgraph_13/graph_hash.txt | 1 - .../subgraph_13/graph_net.json | 6 - .../subgraph_13/input_meta.py | 9 - .../PP-YOLOE-S_human/subgraph_13/model.py | 4048 -------- .../subgraph_14/graph_hash.txt | 1 - .../subgraph_14/graph_net.json | 6 - .../subgraph_14/input_meta.py | 31 - .../PP-YOLOE-S_human/subgraph_14/model.py | 1050 -- .../subgraph_15/graph_hash.txt | 1 - .../subgraph_15/graph_net.json | 6 - .../PP-YOLOE-S_human/subgraph_15/model.py | 1144 --- .../subgraph_16/graph_hash.txt | 1 - .../subgraph_16/graph_net.json | 6 - .../subgraph_16/input_meta.py | 124 - .../PP-YOLOE-S_human/subgraph_16/model.py | 258 - .../subgraph_3/graph_hash.txt | 2 +- .../PP-YOLOE-S_human/subgraph_3/input_meta.py | 125 +- .../PP-YOLOE-S_human/subgraph_3/model.py | 613 +- .../input_meta.py | 0 .../weight_meta.py | 0 .../input_meta.py | 0 .../weight_meta.py | 0 .../subgraph_6/graph_hash.txt | 2 +- .../PP-YOLOE-S_human/subgraph_6/input_meta.py | 37 +- .../PP-YOLOE-S_human/subgraph_6/model.py | 1070 +- .../input_meta.py | 0 .../weight_meta.py | 0 .../subgraph_6/weight_meta.py | 585 ++ .../subgraph_9/graph_hash.txt | 2 +- .../PP-YOLOE-S_human/subgraph_9/input_meta.py | 22 +- .../PP-YOLOE-S_human/subgraph_9/model.py | 4053 +++++++- .../input_meta.py | 9 + .../weight_meta.py | 2200 ++-- .../subgraph_9/weight_meta.py | 3859 +++++++ .../subgraph_0/graph_hash.txt | 2 +- .../subgraph_0/input_meta.py | 78 +- .../PP-YOLOE-S_vehicle/subgraph_0/model.py | 235 +- .../subgraph_1/graph_hash.txt | 2 +- .../subgraph_1/input_meta.py | 77 +- .../PP-YOLOE-S_vehicle/subgraph_1/model.py | 4332 +++++++- .../subgraph_1/weight_meta.py | 4456 ++++++++ .../subgraph_10/graph_hash.txt | 1 - .../subgraph_10/graph_net.json | 6 - .../PP-YOLOE-S_vehicle/subgraph_10/model.py | 192 - .../subgraph_11/graph_hash.txt | 1 - .../subgraph_11/graph_net.json | 6 - .../subgraph_11/input_meta.py | 91 - .../PP-YOLOE-S_vehicle/subgraph_11/model.py | 229 - .../subgraph_12/graph_hash.txt | 1 - .../subgraph_12/graph_net.json | 6 - .../subgraph_12/input_meta.py | 83 - .../PP-YOLOE-S_vehicle/subgraph_12/model.py | 264 - .../subgraph_13/graph_hash.txt | 1 - .../subgraph_13/graph_net.json | 6 - .../PP-YOLOE-S_vehicle/subgraph_13/model.py | 1144 --- .../subgraph_14/graph_hash.txt | 1 - .../subgraph_14/graph_net.json | 6 - .../subgraph_14/input_meta.py | 19 - .../PP-YOLOE-S_vehicle/subgraph_14/model.py | 43 - .../subgraph_15/graph_hash.txt | 1 - .../subgraph_15/graph_net.json | 6 - .../PP-YOLOE-S_vehicle/subgraph_15/model.py | 1050 -- .../subgraph_16/graph_hash.txt | 1 - .../subgraph_16/graph_net.json | 6 - .../subgraph_16/input_meta.py | 52 - .../PP-YOLOE-S_vehicle/subgraph_16/model.py | 338 - .../subgraph_2/graph_hash.txt | 2 +- .../subgraph_2/input_meta.py | 60 +- .../PP-YOLOE-S_vehicle/subgraph_2/model.py | 4329 +------- .../subgraph_2/weight_meta.py | 4456 -------- .../subgraph_3/graph_hash.txt | 2 +- .../subgraph_3/input_meta.py | 87 +- .../PP-YOLOE-S_vehicle/subgraph_3/model.py | 253 +- .../subgraph_4/graph_hash.txt | 2 +- .../subgraph_4/input_meta.py | 76 +- .../PP-YOLOE-S_vehicle/subgraph_4/model.py | 272 +- .../subgraph_5/graph_hash.txt | 2 +- .../subgraph_5/input_meta.py | 53 +- .../PP-YOLOE-S_vehicle/subgraph_5/model.py | 4174 +------- .../subgraph_5/weight_meta.py | 3859 ------- .../subgraph_6/graph_hash.txt | 1 - .../subgraph_6/graph_net.json | 6 - .../PP-YOLOE-S_vehicle/subgraph_6/model.py | 385 - .../subgraph_7/graph_hash.txt | 1 - .../subgraph_7/graph_net.json | 6 - .../PP-YOLOE-S_vehicle/subgraph_7/model.py | 162 - .../subgraph_8/graph_hash.txt | 1 - .../subgraph_8/graph_net.json | 6 - .../PP-YOLOE-S_vehicle/subgraph_8/model.py | 34 - .../subgraph_9/graph_hash.txt | 1 - .../subgraph_9/graph_net.json | 6 - .../PP-YOLOE-S_vehicle/subgraph_9/model.py | 509 - .../PP-YOLOE_plus-L/subgraph_0/graph_hash.txt | 2 +- .../PP-YOLOE_plus-L/subgraph_0/input_meta.py | 135 +- .../PP-YOLOE_plus-L/subgraph_0/model.py | 7303 ++++++++++++- .../PP-YOLOE_plus-L/subgraph_0/weight_meta.py | 8160 +++++++++++++++ .../subgraph_10/graph_hash.txt | 1 - .../PP-YOLOE_plus-L/subgraph_10/input_meta.py | 76 - .../PP-YOLOE_plus-L/subgraph_10/model.py | 287 - .../subgraph_12/graph_hash.txt | 1 - .../subgraph_12/graph_net.json | 6 - .../PP-YOLOE_plus-L/subgraph_12/model.py | 158 - .../subgraph_12/weight_meta.py | 7 - .../subgraph_13/graph_hash.txt | 1 - .../subgraph_13/graph_net.json | 6 - .../PP-YOLOE_plus-L/subgraph_13/input_meta.py | 49 - .../PP-YOLOE_plus-L/subgraph_13/model.py | 263 - .../subgraph_15/graph_hash.txt | 1 - .../subgraph_15/graph_net.json | 6 - .../PP-YOLOE_plus-L/subgraph_15/model.py | 1144 --- .../subgraph_16/graph_net.json | 6 - .../PP-YOLOE_plus-L/subgraph_2/graph_hash.txt | 2 +- .../PP-YOLOE_plus-L/subgraph_2/input_meta.py | 71 +- .../PP-YOLOE_plus-L/subgraph_2/model.py | 286 +- .../PP-YOLOE_plus-L/subgraph_3/graph_hash.txt | 2 +- .../PP-YOLOE_plus-L/subgraph_3/input_meta.py | 100 +- .../PP-YOLOE_plus-L/subgraph_3/model.py | 602 +- .../graph_hash.txt | 0 .../graph_net.json | 0 .../{subgraph_16 => subgraph_4}/input_meta.py | 0 .../{subgraph_16 => subgraph_4}/model.py | 0 .../weight_meta.py | 0 .../PP-YOLOE_plus-L/subgraph_5/graph_hash.txt | 2 +- .../PP-YOLOE_plus-L/subgraph_5/input_meta.py | 137 +- .../PP-YOLOE_plus-L/subgraph_5/model.py | 7513 ++------------ .../PP-YOLOE_plus-L/subgraph_5/weight_meta.py | 7997 +-------------- .../PP-YOLOE_plus-L/subgraph_6/graph_hash.txt | 1 - .../PP-YOLOE_plus-L/subgraph_6/graph_net.json | 6 - .../PP-YOLOE_plus-L/subgraph_6/model.py | 247 - .../PP-YOLOE_plus-L/subgraph_6/weight_meta.py | 1 - .../PP-YOLOE_plus-L/subgraph_7/graph_hash.txt | 1 - .../PP-YOLOE_plus-L/subgraph_7/graph_net.json | 6 - .../PP-YOLOE_plus-L/subgraph_7/input_meta.py | 38 - .../PP-YOLOE_plus-L/subgraph_7/model.py | 94 - .../PP-YOLOE_plus-L/subgraph_7/weight_meta.py | 1 - .../PP-YOLOE_plus-L/subgraph_8/graph_hash.txt | 1 - .../PP-YOLOE_plus-L/subgraph_8/graph_net.json | 6 - .../PP-YOLOE_plus-L/subgraph_8/model.py | 34 - .../PP-YOLOE_plus-L/subgraph_8/weight_meta.py | 1 - .../PP-YOLOE_plus-L/subgraph_9/graph_hash.txt | 1 - .../PP-YOLOE_plus-L/subgraph_9/graph_net.json | 6 - .../PP-YOLOE_plus-L/subgraph_9/input_meta.py | 31 - .../PP-YOLOE_plus-L/subgraph_9/model.py | 1050 -- .../subgraph_10/graph_hash.txt | 1 - .../PP-YOLOE_plus-M/subgraph_10/input_meta.py | 38 - .../PP-YOLOE_plus-M/subgraph_10/model.py | 94 - .../subgraph_10/weight_meta.py | 1 - .../subgraph_11/graph_hash.txt | 1 - .../subgraph_11/graph_net.json | 6 - .../PP-YOLOE_plus-M/subgraph_11/input_meta.py | 76 - .../PP-YOLOE_plus-M/subgraph_11/model.py | 287 - .../subgraph_11/weight_meta.py | 1 - .../subgraph_13/graph_hash.txt | 1 - .../subgraph_13/graph_net.json | 6 - .../PP-YOLOE_plus-M/subgraph_13/input_meta.py | 19 - .../PP-YOLOE_plus-M/subgraph_13/model.py | 43 - .../subgraph_13/weight_meta.py | 1 - .../subgraph_14/graph_hash.txt | 1 - .../subgraph_14/graph_net.json | 6 - .../PP-YOLOE_plus-M/subgraph_14/model.py | 247 - .../subgraph_14/weight_meta.py | 1 - .../subgraph_15/graph_hash.txt | 1 - .../subgraph_15/graph_net.json | 6 - .../PP-YOLOE_plus-M/subgraph_15/model.py | 5806 ----------- .../subgraph_15/weight_meta.py | 6370 ------------ .../subgraph_16/graph_net.json | 6 - .../subgraph_16/weight_meta.py | 1 - .../subgraph_17/graph_hash.txt | 1 - .../subgraph_17/graph_net.json | 6 - .../PP-YOLOE_plus-M/subgraph_17/model.py | 158 - .../subgraph_17/weight_meta.py | 7 - .../PP-YOLOE_plus-M/subgraph_2/graph_hash.txt | 2 +- .../PP-YOLOE_plus-M/subgraph_2/input_meta.py | 96 +- .../PP-YOLOE_plus-M/subgraph_2/model.py | 630 +- .../PP-YOLOE_plus-M/subgraph_4/graph_hash.txt | 2 +- .../PP-YOLOE_plus-M/subgraph_4/input_meta.py | 89 +- .../PP-YOLOE_plus-M/subgraph_4/model.py | 6092 +++++++++-- .../PP-YOLOE_plus-M/subgraph_4/weight_meta.py | 6218 ++++++++++- .../graph_hash.txt | 0 .../graph_net.json | 0 .../{subgraph_16 => subgraph_5}/input_meta.py | 0 .../{subgraph_16 => subgraph_5}/model.py | 0 .../subgraph_5}/weight_meta.py | 0 .../PP-YOLOE_plus-M/subgraph_6/graph_hash.txt | 2 +- .../PP-YOLOE_plus-M/subgraph_6/input_meta.py | 40 +- .../PP-YOLOE_plus-M/subgraph_6/model.py | 208 +- .../PP-YOLOE_plus-M/subgraph_6/weight_meta.py | 8 +- .../PP-YOLOE_plus-M/subgraph_7/graph_hash.txt | 1 - .../PP-YOLOE_plus-M/subgraph_7/graph_net.json | 6 - .../PP-YOLOE_plus-M/subgraph_7/input_meta.py | 38 - .../PP-YOLOE_plus-M/subgraph_7/model.py | 188 - .../PP-YOLOE_plus-M/subgraph_9/graph_hash.txt | 1 - .../PP-YOLOE_plus-M/subgraph_9/graph_net.json | 6 - .../PP-YOLOE_plus-M/subgraph_9/model.py | 34 - .../PP-YOLOE_plus-M/subgraph_9/weight_meta.py | 1 - .../input_meta.py | 0 .../weight_meta.py | 0 .../input_meta.py | 0 .../weight_meta.py | 0 .../input_meta.py | 0 .../weight_meta.py | 0 .../input_meta.py | 0 .../weight_meta.py | 0 .../input_meta.py | 0 .../weight_meta.py | 0 .../input_meta.py | 0 .../weight_meta.py | 0 .../input_meta.py | 73 + .../weight_meta.py | 586 ++ .../input_meta.py | 73 + .../weight_meta.py | 396 +- .../input_meta.py | 0 .../weight_meta.py | 0 .../input_meta.py | 0 .../weight_meta.py | 0 .../input_meta.py | 0 .../weight_meta.py | 0 .../input_meta.py | 73 + .../weight_meta.py | 586 ++ .../input_meta.py | 73 + .../weight_meta.py | 580 ++ .../input_meta.py | 73 + .../weight_meta.py | 426 +- .../input_meta.py | 73 + .../weight_meta.py | 586 ++ .../input_meta.py | 28 + .../weight_meta.py | 0 .../input_meta.py | 27 + .../weight_meta.py | 0 .../input_meta.py | 28 + .../weight_meta.py | 0 .../input_meta.py | 0 .../weight_meta.py | 0 .../input_meta.py | 0 .../weight_meta.py | 0 .../input_meta.py | 49 +- .../weight_meta.py | 0 .../input_meta.py | 0 .../weight_meta.py | 0 .../input_meta.py | 67 + .../weight_meta.py | 0 .../input_meta.py | 68 + .../weight_meta.py | 0 .../input_meta.py | 0 .../weight_meta.py | 0 .../input_meta.py | 0 .../weight_meta.py | 0 .../subgraph_1/graph_hash.txt | 2 +- .../subgraph_1/input_meta.py | 101 +- .../PP-YOLOE_plus_SOD-L/subgraph_1/model.py | 1214 +-- .../subgraph_1/weight_meta.py | 579 -- .../subgraph_10/graph_hash.txt | 1 - .../subgraph_10/input_meta.py | 38 - .../PP-YOLOE_plus_SOD-L/subgraph_10/model.py | 94 - .../subgraph_10/weight_meta.py | 1 - .../subgraph_11/graph_hash.txt | 1 - .../subgraph_11/graph_net.json | 6 - .../subgraph_11/input_meta.py | 64 - .../PP-YOLOE_plus_SOD-L/subgraph_11/model.py | 244 - .../subgraph_11/weight_meta.py | 1 - .../subgraph_12/graph_hash.txt | 1 - .../subgraph_12/graph_net.json | 6 - .../subgraph_12/input_meta.py | 222 - .../PP-YOLOE_plus_SOD-L/subgraph_12/model.py | 8874 ---------------- .../subgraph_12/weight_meta.py | 8004 --------------- .../subgraph_13/graph_hash.txt | 1 - .../subgraph_13/graph_net.json | 6 - .../PP-YOLOE_plus_SOD-L/subgraph_13/model.py | 110 - .../subgraph_13/weight_meta.py | 1 - .../subgraph_14/graph_net.json | 6 - .../subgraph_18/graph_hash.txt | 1 - .../subgraph_18/graph_net.json | 6 - .../subgraph_18/input_meta.py | 19 - .../PP-YOLOE_plus_SOD-L/subgraph_18/model.py | 43 - .../subgraph_18/weight_meta.py | 1 - .../subgraph_2/graph_hash.txt | 2 +- .../subgraph_2/input_meta.py | 156 +- .../PP-YOLOE_plus_SOD-L/subgraph_2/model.py | 4126 +++++++- .../input_meta.py | 75 +- .../weight_meta.py | 3989 ++++++++ .../subgraph_2/weight_meta.py | 3988 ++++++++ .../subgraph_3/graph_hash.txt | 2 +- .../subgraph_3/input_meta.py | 59 +- .../PP-YOLOE_plus_SOD-L/subgraph_3/model.py | 536 +- .../subgraph_3/weight_meta.py | 8 +- .../subgraph_4/graph_hash.txt | 2 +- .../subgraph_4/input_meta.py | 143 +- .../PP-YOLOE_plus_SOD-L/subgraph_4/model.py | 7687 ++++++++++---- .../subgraph_4/weight_meta.py | 7906 +++++++++++--- .../subgraph_5/graph_hash.txt | 2 +- .../subgraph_5/input_meta.py | 45 +- .../PP-YOLOE_plus_SOD-L/subgraph_5/model.py | 584 +- .../subgraph_6/graph_hash.txt | 2 +- .../subgraph_6/input_meta.py | 223 +- .../PP-YOLOE_plus_SOD-L/subgraph_6/model.py | 9066 ++++++++++++++++- .../subgraph_6/weight_meta.py | 8003 +++++++++++++++ .../graph_hash.txt | 0 .../graph_net.json | 0 .../{subgraph_14 => subgraph_7}/input_meta.py | 0 .../{subgraph_14 => subgraph_7}/model.py | 0 .../weight_meta.py | 0 .../subgraph_8/graph_hash.txt | 1 - .../subgraph_8/graph_net.json | 6 - .../subgraph_8/input_meta.py | 233 - .../PP-YOLOE_plus_SOD-L/subgraph_8/model.py | 8235 --------------- .../subgraph_8/weight_meta.py | 8595 ---------------- .../input_meta.py | 0 .../weight_meta.py | 0 .../input_meta.py | 28 + .../weight_meta.py | 0 .../input_meta.py | 34 +- .../weight_meta.py | 0 .../subgraph_17/graph_hash.txt | 1 - .../subgraph_17/graph_net.json | 6 - .../PP-YOLOE_plus_SOD-S/subgraph_17/model.py | 94 - .../subgraph_17/weight_meta.py | 1 - .../subgraph_18/graph_hash.txt | 1 - .../subgraph_18/graph_net.json | 6 - .../subgraph_18/input_meta.py | 138 - .../PP-YOLOE_plus_SOD-S/subgraph_18/model.py | 212 - .../subgraph_18/weight_meta.py | 1 - .../input_meta.py | 30 +- .../weight_meta.py | 2 +- .../subgraph_5/graph_hash.txt | 2 +- .../subgraph_5/input_meta.py | 125 +- .../PP-YOLOE_plus_SOD-S/subgraph_5/model.py | 1218 +-- .../subgraph_5/weight_meta.py | 579 -- .../input_meta.py | 7 + .../weight_meta.py | 0 .../subgraph_0/graph_hash.txt | 2 +- .../subgraph_0/input_meta.py | 112 +- .../subgraph_0/model.py | 4856 ++++++++- .../subgraph_0/weight_meta.py | 4012 ++++++++ .../subgraph_1/graph_hash.txt | 2 +- .../subgraph_1/input_meta.py | 74 +- .../subgraph_1/model.py | 658 +- .../subgraph_1/weight_meta.py | 96 +- .../subgraph_10/graph_hash.txt | 1 - .../subgraph_10/graph_net.json | 6 - .../subgraph_10/model.py | 162 - .../subgraph_10/weight_meta.py | 7 - .../subgraph_11/graph_hash.txt | 1 - .../subgraph_11/graph_net.json | 6 - .../subgraph_11/input_meta.py | 222 - .../subgraph_11/model.py | 8874 ---------------- .../subgraph_11/weight_meta.py | 8004 --------------- .../subgraph_12/graph_hash.txt | 1 - .../subgraph_12/graph_net.json | 6 - .../subgraph_12/input_meta.py | 102 - .../subgraph_12/model.py | 244 - .../subgraph_12/weight_meta.py | 1 - .../subgraph_13/graph_hash.txt | 1 - .../subgraph_13/graph_net.json | 6 - .../subgraph_13/model.py | 514 - .../subgraph_13/weight_meta.py | 1 - .../subgraph_14/graph_hash.txt | 1 - .../subgraph_14/graph_net.json | 6 - .../subgraph_14/input_meta.py | 145 - .../subgraph_14/model.py | 499 - .../subgraph_14/weight_meta.py | 1 - .../subgraph_2/graph_hash.txt | 2 +- .../subgraph_2/input_meta.py | 211 +- .../subgraph_2/model.py | 5069 +-------- .../subgraph_2/weight_meta.py | 4012 -------- .../subgraph_3/graph_hash.txt | 2 +- .../subgraph_3/input_meta.py | 134 +- .../subgraph_3/model.py | 1156 +-- .../subgraph_3/weight_meta.py | 585 -- .../subgraph_4/graph_hash.txt | 2 +- .../subgraph_4/input_meta.py | 107 +- .../subgraph_4/model.py | 194 +- .../subgraph_5/graph_hash.txt | 2 +- .../subgraph_5/input_meta.py | 154 +- .../subgraph_5/model.py | 576 +- .../subgraph_5/weight_meta.py | 8 +- .../subgraph_6/graph_hash.txt | 2 +- .../subgraph_6/input_meta.py | 269 +- .../subgraph_6/model.py | 8976 +++++++++++++++- .../subgraph_6/weight_meta.py | 8003 +++++++++++++++ .../subgraph_7/graph_hash.txt | 2 +- .../subgraph_7/input_meta.py | 173 +- .../subgraph_7/model.py | 4148 +------- .../subgraph_7/weight_meta.py | 3988 -------- .../subgraph_8/graph_hash.txt | 2 +- .../subgraph_8/input_meta.py | 108 +- .../subgraph_8/model.py | 576 +- .../subgraph_9/graph_hash.txt | 2 +- .../subgraph_9/input_meta.py | 143 +- .../subgraph_9/model.py | 519 +- .../shape_patches_TimesNet_ad/input_meta.py | 9 + .../shape_patches_TimesNet_ad}/weight_meta.py | 96 +- .../shape_patches_TimesNet_ad}/input_meta.py | 0 .../shape_patches_TimesNet_ad}/weight_meta.py | 0 .../shape_patches_TimesNet_ad}/input_meta.py | 0 .../shape_patches_TimesNet_ad}/weight_meta.py | 0 .../TimesNet_ad/subgraph_10/graph_hash.txt | 1 - .../TimesNet_ad/subgraph_10/graph_net.json | 6 - .../PaddleX/TimesNet_ad/subgraph_10/model.py | 37 - .../TimesNet_ad/subgraph_10/weight_meta.py | 1 - .../TimesNet_ad/subgraph_11/graph_hash.txt | 1 - .../TimesNet_ad/subgraph_11/graph_net.json | 6 - .../TimesNet_ad/subgraph_11/input_meta.py | 37 - .../PaddleX/TimesNet_ad/subgraph_11/model.py | 401 - .../TimesNet_ad/subgraph_12/graph_hash.txt | 1 - .../TimesNet_ad/subgraph_12/graph_net.json | 6 - .../PaddleX/TimesNet_ad/subgraph_12/model.py | 54 - .../TimesNet_ad/subgraph_12/weight_meta.py | 1 - .../TimesNet_ad/subgraph_3/graph_hash.txt | 2 +- .../TimesNet_ad/subgraph_3/input_meta.py | 38 +- .../PaddleX/TimesNet_ad/subgraph_3/model.py | 304 +- .../TimesNet_ad/subgraph_3/weight_meta.py | 96 +- .../TimesNet_ad/subgraph_4/graph_hash.txt | 2 +- .../TimesNet_ad/subgraph_4/input_meta.py | 48 +- .../PaddleX/TimesNet_ad/subgraph_4/model.py | 377 +- .../TimesNet_ad/subgraph_4/weight_meta.py | 237 - .../TimesNet_ad/subgraph_5/graph_hash.txt | 2 +- .../TimesNet_ad/subgraph_5/input_meta.py | 110 +- .../PaddleX/TimesNet_ad/subgraph_5/model.py | 87 +- .../TimesNet_ad/subgraph_5/weight_meta.py | 15 + .../TimesNet_ad/subgraph_6/graph_hash.txt | 2 +- .../TimesNet_ad/subgraph_6/input_meta.py | 90 +- .../PaddleX/TimesNet_ad/subgraph_6/model.py | 123 +- .../TimesNet_ad/subgraph_6/weight_meta.py | 15 - .../TimesNet_ad/subgraph_7/graph_hash.txt | 2 +- .../TimesNet_ad/subgraph_7/input_meta.py | 21 +- .../PaddleX/TimesNet_ad/subgraph_7/model.py | 218 +- .../TimesNet_ad/subgraph_7/weight_meta.py | 10 +- .../TimesNet_ad/subgraph_8/graph_hash.txt | 2 +- .../TimesNet_ad/subgraph_8/input_meta.py | 8 +- .../PaddleX/TimesNet_ad/subgraph_8/model.py | 87 +- .../TimesNet_ad/subgraph_8/weight_meta.py | 8 +- .../TimesNet_ad/subgraph_9/graph_hash.txt | 2 +- .../TimesNet_ad/subgraph_9/input_meta.py | 41 +- .../PaddleX/TimesNet_ad/subgraph_9/model.py | 457 +- .../TimesNet_ad/subgraph_9/weight_meta.py | 239 +- .../ch_RepSVTR_rec/subgraph_1/graph_hash.txt | 1 - .../ch_RepSVTR_rec/subgraph_1/graph_net.json | 6 - .../ch_RepSVTR_rec/subgraph_1/input_meta.py | 9 - .../ch_RepSVTR_rec/subgraph_1/model.py | 666 -- .../ch_RepSVTR_rec/subgraph_1/weight_meta.py | 565 - .../ch_RepSVTR_rec/subgraph_2/graph_hash.txt | 1 - .../ch_RepSVTR_rec/subgraph_2/graph_net.json | 6 - .../ch_RepSVTR_rec/subgraph_2/input_meta.py | 36 - .../ch_RepSVTR_rec/subgraph_2/model.py | 1139 --- .../ch_RepSVTR_rec/subgraph_2/weight_meta.py | 471 - .../ch_RepSVTR_rec/subgraph_3/graph_hash.txt | 1 - .../ch_RepSVTR_rec/subgraph_3/graph_net.json | 6 - .../ch_RepSVTR_rec/subgraph_3/input_meta.py | 36 - .../ch_RepSVTR_rec/subgraph_3/model.py | 1089 -- .../ch_RepSVTR_rec/subgraph_3/weight_meta.py | 471 - .../ch_RepSVTR_rec/subgraph_4/graph_hash.txt | 1 - .../ch_RepSVTR_rec/subgraph_4/graph_net.json | 6 - .../ch_RepSVTR_rec/subgraph_4/input_meta.py | 9 - .../ch_RepSVTR_rec/subgraph_4/model.py | 828 -- .../ch_RepSVTR_rec/subgraph_4/weight_meta.py | 565 - .../ch_RepSVTR_rec/subgraph_5/graph_hash.txt | 1 - .../ch_RepSVTR_rec/subgraph_5/graph_net.json | 6 - .../ch_RepSVTR_rec/subgraph_5/input_meta.py | 9 - .../ch_RepSVTR_rec/subgraph_5/model.py | 804 -- .../ch_RepSVTR_rec/subgraph_5/weight_meta.py | 565 - 570 files changed, 159134 insertions(+), 204873 deletions(-) rename paddle_samples/PaddleX/{PP-YOLOE-L_vehicle/subgraph_5 => PP-YOLOE-L_human/subgraph_0/shape_patches_PP-YOLOE-L_vehicle}/input_meta.py (50%) rename paddle_samples/PaddleX/PP-YOLOE-L_human/{subgraph_10 => subgraph_0/shape_patches_PP-YOLOE-L_vehicle}/weight_meta.py (54%) delete mode 100644 paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_10/graph_hash.txt delete mode 100644 paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_10/graph_net.json delete mode 100644 paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_10/input_meta.py delete mode 100644 paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_10/model.py delete mode 100644 paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_11/graph_hash.txt delete mode 100644 paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_11/graph_net.json delete mode 100644 paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_11/input_meta.py delete mode 100644 paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_11/model.py delete mode 100644 paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_12/graph_hash.txt delete mode 100644 paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_12/graph_net.json delete mode 100644 paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_12/input_meta.py delete mode 100644 paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_12/model.py delete mode 100644 paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_13/graph_hash.txt delete mode 100644 paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_13/graph_net.json delete mode 100644 paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_13/model.py delete mode 100644 paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_14/graph_hash.txt delete mode 100644 paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_14/graph_net.json delete mode 100644 paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_14/input_meta.py delete mode 100644 paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_14/model.py delete mode 100644 paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_15/graph_hash.txt delete mode 100644 paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_15/graph_net.json delete mode 100644 paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_15/input_meta.py delete mode 100644 paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_15/model.py delete mode 100644 paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_16/graph_hash.txt delete mode 100644 paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_16/graph_net.json delete mode 100644 paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_16/input_meta.py delete mode 100644 paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_16/model.py delete mode 100644 paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_6/graph_hash.txt delete mode 100644 paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_6/graph_net.json delete mode 100644 paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_6/input_meta.py delete mode 100644 paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_6/model.py delete mode 100644 paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_7/graph_hash.txt delete mode 100644 paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_7/graph_net.json delete mode 100644 paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_7/input_meta.py delete mode 100644 paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_7/model.py delete mode 100644 paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_8/graph_hash.txt delete mode 100644 paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_8/graph_net.json delete mode 100644 paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_8/input_meta.py delete mode 100644 paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_8/model.py delete mode 100644 paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_9/graph_hash.txt delete mode 100644 paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_9/graph_net.json delete mode 100644 paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_9/model.py rename paddle_samples/PaddleX/PP-YOLOE-L_vehicle/{subgraph_7 => subgraph_1}/graph_hash.txt (100%) rename paddle_samples/PaddleX/PP-YOLOE-L_vehicle/{subgraph_11 => subgraph_1}/graph_net.json (100%) rename paddle_samples/PaddleX/PP-YOLOE-L_vehicle/{subgraph_7 => subgraph_1}/input_meta.py (100%) rename paddle_samples/PaddleX/PP-YOLOE-L_vehicle/{subgraph_7 => subgraph_1}/model.py (100%) rename paddle_samples/PaddleX/{PP-YOLOE-L_human/subgraph_11 => PP-YOLOE-L_vehicle/subgraph_1}/weight_meta.py (100%) delete mode 100644 paddle_samples/PaddleX/PP-YOLOE-L_vehicle/subgraph_11/graph_hash.txt delete mode 100644 paddle_samples/PaddleX/PP-YOLOE-L_vehicle/subgraph_11/input_meta.py delete mode 100644 paddle_samples/PaddleX/PP-YOLOE-L_vehicle/subgraph_11/model.py delete mode 100644 paddle_samples/PaddleX/PP-YOLOE-L_vehicle/subgraph_13/graph_hash.txt delete mode 100644 paddle_samples/PaddleX/PP-YOLOE-L_vehicle/subgraph_13/model.py delete mode 100644 paddle_samples/PaddleX/PP-YOLOE-L_vehicle/subgraph_14/graph_hash.txt delete mode 100644 paddle_samples/PaddleX/PP-YOLOE-L_vehicle/subgraph_14/graph_net.json delete mode 100644 paddle_samples/PaddleX/PP-YOLOE-L_vehicle/subgraph_14/model.py delete mode 100644 paddle_samples/PaddleX/PP-YOLOE-L_vehicle/subgraph_15/graph_hash.txt delete mode 100644 paddle_samples/PaddleX/PP-YOLOE-L_vehicle/subgraph_15/graph_net.json delete mode 100644 paddle_samples/PaddleX/PP-YOLOE-L_vehicle/subgraph_15/input_meta.py delete mode 100644 paddle_samples/PaddleX/PP-YOLOE-L_vehicle/subgraph_15/model.py delete mode 100644 paddle_samples/PaddleX/PP-YOLOE-L_vehicle/subgraph_16/graph_hash.txt delete mode 100644 paddle_samples/PaddleX/PP-YOLOE-L_vehicle/subgraph_16/graph_net.json delete mode 100644 paddle_samples/PaddleX/PP-YOLOE-L_vehicle/subgraph_16/input_meta.py delete mode 100644 paddle_samples/PaddleX/PP-YOLOE-L_vehicle/subgraph_16/model.py delete mode 100644 paddle_samples/PaddleX/PP-YOLOE-L_vehicle/subgraph_17/graph_net.json rename paddle_samples/PaddleX/PP-YOLOE-L_vehicle/{subgraph_17 => subgraph_4}/graph_hash.txt (100%) rename paddle_samples/PaddleX/PP-YOLOE-L_vehicle/{subgraph_13 => subgraph_4}/graph_net.json (100%) rename paddle_samples/PaddleX/PP-YOLOE-L_vehicle/{subgraph_17 => subgraph_4}/input_meta.py (100%) rename paddle_samples/PaddleX/PP-YOLOE-L_vehicle/{subgraph_17 => subgraph_4}/model.py (100%) rename paddle_samples/PaddleX/{PP-YOLOE-L_human/subgraph_12 => PP-YOLOE-L_vehicle/subgraph_4}/weight_meta.py (100%) delete mode 100644 paddle_samples/PaddleX/PP-YOLOE-L_vehicle/subgraph_5/graph_hash.txt delete mode 100644 paddle_samples/PaddleX/PP-YOLOE-L_vehicle/subgraph_5/graph_net.json delete mode 100644 paddle_samples/PaddleX/PP-YOLOE-L_vehicle/subgraph_5/model.py delete mode 100644 paddle_samples/PaddleX/PP-YOLOE-L_vehicle/subgraph_5/weight_meta.py delete mode 100644 paddle_samples/PaddleX/PP-YOLOE-L_vehicle/subgraph_7/graph_net.json delete mode 100644 paddle_samples/PaddleX/PP-YOLOE-L_vehicle/subgraph_8/graph_hash.txt delete mode 100644 paddle_samples/PaddleX/PP-YOLOE-L_vehicle/subgraph_8/graph_net.json delete mode 100644 paddle_samples/PaddleX/PP-YOLOE-L_vehicle/subgraph_8/input_meta.py delete mode 100644 paddle_samples/PaddleX/PP-YOLOE-L_vehicle/subgraph_8/model.py rename paddle_samples/PaddleX/{PP-YOLOE-L_human/subgraph_13 => PP-YOLOE-S_human/subgraph_1/shape_patches_PP-YOLOE-L_human}/input_meta.py (100%) rename paddle_samples/PaddleX/{PP-YOLOE-L_human/subgraph_13 => PP-YOLOE-S_human/subgraph_1/shape_patches_PP-YOLOE-L_human}/weight_meta.py (100%) rename paddle_samples/PaddleX/{PP-YOLOE-S_vehicle/subgraph_7 => PP-YOLOE-S_human/subgraph_1/shape_patches_PP-YOLOE-S_vehicle}/input_meta.py (100%) rename paddle_samples/PaddleX/{PP-YOLOE-L_human/subgraph_6 => PP-YOLOE-S_human/subgraph_1/shape_patches_PP-YOLOE-S_vehicle}/weight_meta.py (100%) delete mode 100644 paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_13/graph_hash.txt delete mode 100644 paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_13/graph_net.json delete mode 100644 paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_13/input_meta.py delete mode 100644 paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_13/model.py delete mode 100644 paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_14/graph_hash.txt delete mode 100644 paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_14/graph_net.json delete mode 100644 paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_14/input_meta.py delete mode 100644 paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_14/model.py delete mode 100644 paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_15/graph_hash.txt delete mode 100644 paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_15/graph_net.json delete mode 100644 paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_15/model.py delete mode 100644 paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_16/graph_hash.txt delete mode 100644 paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_16/graph_net.json delete mode 100644 paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_16/input_meta.py delete mode 100644 paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_16/model.py rename paddle_samples/PaddleX/{PP-YOLOE-S_vehicle/subgraph_10 => PP-YOLOE-S_human/subgraph_4/shape_patches_PP-YOLOE-S_vehicle}/input_meta.py (100%) rename paddle_samples/PaddleX/{PP-YOLOE-L_vehicle/subgraph_15 => PP-YOLOE-S_human/subgraph_4/shape_patches_PP-YOLOE-S_vehicle}/weight_meta.py (100%) rename paddle_samples/PaddleX/{PP-YOLOE-S_vehicle/subgraph_6 => PP-YOLOE-S_human/subgraph_5/shape_patches_PP-YOLOE-S_vehicle}/input_meta.py (100%) rename paddle_samples/PaddleX/{PP-YOLOE-L_human/subgraph_14 => PP-YOLOE-S_human/subgraph_5/shape_patches_PP-YOLOE-S_vehicle}/weight_meta.py (100%) rename paddle_samples/PaddleX/{PP-YOLOE-L_vehicle/subgraph_13 => PP-YOLOE-S_human/subgraph_6/shape_patches_PP-YOLOE-L_vehicle}/input_meta.py (100%) rename paddle_samples/PaddleX/{PP-YOLOE-L_vehicle/subgraph_13 => PP-YOLOE-S_human/subgraph_6/shape_patches_PP-YOLOE-L_vehicle}/weight_meta.py (100%) create mode 100644 paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_9/shape_patches_PP-YOLOE-S_vehicle/input_meta.py rename paddle_samples/PaddleX/PP-YOLOE-S_human/{subgraph_13 => subgraph_9/shape_patches_PP-YOLOE-S_vehicle}/weight_meta.py (60%) delete mode 100644 paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_10/graph_hash.txt delete mode 100644 paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_10/graph_net.json delete mode 100644 paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_10/model.py delete mode 100644 paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_11/graph_hash.txt delete mode 100644 paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_11/graph_net.json delete mode 100644 paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_11/input_meta.py delete mode 100644 paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_11/model.py delete mode 100644 paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_12/graph_hash.txt delete mode 100644 paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_12/graph_net.json delete mode 100644 paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_12/input_meta.py delete mode 100644 paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_12/model.py delete mode 100644 paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_13/graph_hash.txt delete mode 100644 paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_13/graph_net.json delete mode 100644 paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_13/model.py delete mode 100644 paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_14/graph_hash.txt delete mode 100644 paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_14/graph_net.json delete mode 100644 paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_14/input_meta.py delete mode 100644 paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_14/model.py delete mode 100644 paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_15/graph_hash.txt delete mode 100644 paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_15/graph_net.json delete mode 100644 paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_15/model.py delete mode 100644 paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_16/graph_hash.txt delete mode 100644 paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_16/graph_net.json delete mode 100644 paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_16/input_meta.py delete mode 100644 paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_16/model.py delete mode 100644 paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_6/graph_hash.txt delete mode 100644 paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_6/graph_net.json delete mode 100644 paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_6/model.py delete mode 100644 paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_7/graph_hash.txt delete mode 100644 paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_7/graph_net.json delete mode 100644 paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_7/model.py delete mode 100644 paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_8/graph_hash.txt delete mode 100644 paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_8/graph_net.json delete mode 100644 paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_8/model.py delete mode 100644 paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_9/graph_hash.txt delete mode 100644 paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_9/graph_net.json delete mode 100644 paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_9/model.py delete mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus-L/subgraph_10/graph_hash.txt delete mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus-L/subgraph_10/input_meta.py delete mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus-L/subgraph_10/model.py delete mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus-L/subgraph_12/graph_hash.txt delete mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus-L/subgraph_12/graph_net.json delete mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus-L/subgraph_12/model.py delete mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus-L/subgraph_12/weight_meta.py delete mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus-L/subgraph_13/graph_hash.txt delete mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus-L/subgraph_13/graph_net.json delete mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus-L/subgraph_13/input_meta.py delete mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus-L/subgraph_13/model.py delete mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus-L/subgraph_15/graph_hash.txt delete mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus-L/subgraph_15/graph_net.json delete mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus-L/subgraph_15/model.py delete mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus-L/subgraph_16/graph_net.json rename paddle_samples/PaddleX/PP-YOLOE_plus-L/{subgraph_16 => subgraph_4}/graph_hash.txt (100%) rename paddle_samples/PaddleX/PP-YOLOE_plus-L/{subgraph_10 => subgraph_4}/graph_net.json (100%) rename paddle_samples/PaddleX/PP-YOLOE_plus-L/{subgraph_16 => subgraph_4}/input_meta.py (100%) rename paddle_samples/PaddleX/PP-YOLOE_plus-L/{subgraph_16 => subgraph_4}/model.py (100%) rename paddle_samples/PaddleX/PP-YOLOE_plus-L/{subgraph_16 => subgraph_4}/weight_meta.py (100%) delete mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus-L/subgraph_6/graph_hash.txt delete mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus-L/subgraph_6/graph_net.json delete mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus-L/subgraph_6/model.py delete mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus-L/subgraph_6/weight_meta.py delete mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus-L/subgraph_7/graph_hash.txt delete mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus-L/subgraph_7/graph_net.json delete mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus-L/subgraph_7/input_meta.py delete mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus-L/subgraph_7/model.py delete mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus-L/subgraph_7/weight_meta.py delete mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus-L/subgraph_8/graph_hash.txt delete mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus-L/subgraph_8/graph_net.json delete mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus-L/subgraph_8/model.py delete mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus-L/subgraph_8/weight_meta.py delete mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus-L/subgraph_9/graph_hash.txt delete mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus-L/subgraph_9/graph_net.json delete mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus-L/subgraph_9/input_meta.py delete mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus-L/subgraph_9/model.py delete mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus-M/subgraph_10/graph_hash.txt delete mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus-M/subgraph_10/input_meta.py delete mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus-M/subgraph_10/model.py delete mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus-M/subgraph_10/weight_meta.py delete mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus-M/subgraph_11/graph_hash.txt delete mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus-M/subgraph_11/graph_net.json delete mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus-M/subgraph_11/input_meta.py delete mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus-M/subgraph_11/model.py delete mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus-M/subgraph_11/weight_meta.py delete mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus-M/subgraph_13/graph_hash.txt delete mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus-M/subgraph_13/graph_net.json delete mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus-M/subgraph_13/input_meta.py delete mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus-M/subgraph_13/model.py delete mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus-M/subgraph_13/weight_meta.py delete mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus-M/subgraph_14/graph_hash.txt delete mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus-M/subgraph_14/graph_net.json delete mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus-M/subgraph_14/model.py delete mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus-M/subgraph_14/weight_meta.py delete mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus-M/subgraph_15/graph_hash.txt delete mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus-M/subgraph_15/graph_net.json delete mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus-M/subgraph_15/model.py delete mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus-M/subgraph_15/weight_meta.py delete mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus-M/subgraph_16/graph_net.json delete mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus-M/subgraph_16/weight_meta.py delete mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus-M/subgraph_17/graph_hash.txt delete mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus-M/subgraph_17/graph_net.json delete mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus-M/subgraph_17/model.py delete mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus-M/subgraph_17/weight_meta.py rename paddle_samples/PaddleX/PP-YOLOE_plus-M/{subgraph_16 => subgraph_5}/graph_hash.txt (100%) rename paddle_samples/PaddleX/PP-YOLOE_plus-M/{subgraph_10 => subgraph_5}/graph_net.json (100%) rename paddle_samples/PaddleX/PP-YOLOE_plus-M/{subgraph_16 => subgraph_5}/input_meta.py (100%) rename paddle_samples/PaddleX/PP-YOLOE_plus-M/{subgraph_16 => subgraph_5}/model.py (100%) rename paddle_samples/PaddleX/{PP-YOLOE-L_human/subgraph_15 => PP-YOLOE_plus-M/subgraph_5}/weight_meta.py (100%) delete mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus-M/subgraph_7/graph_hash.txt delete mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus-M/subgraph_7/graph_net.json delete mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus-M/subgraph_7/input_meta.py delete mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus-M/subgraph_7/model.py delete mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus-M/subgraph_9/graph_hash.txt delete mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus-M/subgraph_9/graph_net.json delete mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus-M/subgraph_9/model.py delete mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus-M/subgraph_9/weight_meta.py rename paddle_samples/PaddleX/{PP-YOLOE-S_vehicle/subgraph_8 => PP-YOLOE_plus-S/subgraph_1/shape_patches_PP-YOLOE-S_vehicle}/input_meta.py (100%) rename paddle_samples/PaddleX/{PP-YOLOE-L_human/subgraph_16 => PP-YOLOE_plus-S/subgraph_1/shape_patches_PP-YOLOE-S_vehicle}/weight_meta.py (100%) rename paddle_samples/PaddleX/{PP-YOLOE_plus-L/subgraph_8 => PP-YOLOE_plus-S/subgraph_1/shape_patches_PP-YOLOE_plus-L}/input_meta.py (100%) rename paddle_samples/PaddleX/{PP-YOLOE-L_human/subgraph_7 => PP-YOLOE_plus-S/subgraph_1/shape_patches_PP-YOLOE_plus-L}/weight_meta.py (100%) rename paddle_samples/PaddleX/{PP-YOLOE_plus-M/subgraph_9 => PP-YOLOE_plus-S/subgraph_1/shape_patches_PP-YOLOE_plus-M}/input_meta.py (100%) rename paddle_samples/PaddleX/{PP-YOLOE-L_human/subgraph_8 => PP-YOLOE_plus-S/subgraph_1/shape_patches_PP-YOLOE_plus-M}/weight_meta.py (100%) rename paddle_samples/PaddleX/{PP-YOLOE_plus-L/subgraph_6 => PP-YOLOE_plus-S/subgraph_16/shape_patches_PP-YOLOE_plus-L}/input_meta.py (100%) rename paddle_samples/PaddleX/{PP-YOLOE-L_human/subgraph_9 => PP-YOLOE_plus-S/subgraph_16/shape_patches_PP-YOLOE_plus-L}/weight_meta.py (100%) rename paddle_samples/PaddleX/{PP-YOLOE_plus-M/subgraph_14 => PP-YOLOE_plus-S/subgraph_16/shape_patches_PP-YOLOE_plus-M}/input_meta.py (100%) rename paddle_samples/PaddleX/{PP-YOLOE-L_vehicle/subgraph_11 => PP-YOLOE_plus-S/subgraph_16/shape_patches_PP-YOLOE_plus-M}/weight_meta.py (100%) rename paddle_samples/PaddleX/{PP-YOLOE_plus_SOD-S/subgraph_17 => PP-YOLOE_plus-S/subgraph_17/shape_patches_PP-YOLOE_plus_SOD-S}/input_meta.py (100%) rename paddle_samples/PaddleX/{PP-YOLOE-L_vehicle/subgraph_14 => PP-YOLOE_plus-S/subgraph_17/shape_patches_PP-YOLOE_plus_SOD-S}/weight_meta.py (100%) create mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_3/shape_patches_PP-YOLOE-L_human/input_meta.py create mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_3/shape_patches_PP-YOLOE-L_human/weight_meta.py create mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_3/shape_patches_PP-YOLOE-L_vehicle/input_meta.py rename paddle_samples/PaddleX/{PP-YOLOE_plus-L/subgraph_9 => PP-YOLOE_plus-S/subgraph_3/shape_patches_PP-YOLOE-L_vehicle}/weight_meta.py (52%) rename paddle_samples/PaddleX/{PP-YOLOE-S_human/subgraph_15 => PP-YOLOE_plus-S/subgraph_3/shape_patches_PP-YOLOE-S_human}/input_meta.py (100%) rename paddle_samples/PaddleX/{PP-YOLOE-S_human/subgraph_15 => PP-YOLOE_plus-S/subgraph_3/shape_patches_PP-YOLOE-S_human}/weight_meta.py (100%) rename paddle_samples/PaddleX/{PP-YOLOE-S_vehicle/subgraph_13 => PP-YOLOE_plus-S/subgraph_3/shape_patches_PP-YOLOE-S_vehicle}/input_meta.py (100%) rename paddle_samples/PaddleX/{PP-YOLOE-S_vehicle/subgraph_13 => PP-YOLOE_plus-S/subgraph_3/shape_patches_PP-YOLOE-S_vehicle}/weight_meta.py (100%) rename paddle_samples/PaddleX/{PP-YOLOE_plus-L/subgraph_15 => PP-YOLOE_plus-S/subgraph_3/shape_patches_PP-YOLOE_plus-L}/input_meta.py (100%) rename paddle_samples/PaddleX/{PP-YOLOE_plus-L/subgraph_15 => PP-YOLOE_plus-S/subgraph_3/shape_patches_PP-YOLOE_plus-L}/weight_meta.py (100%) create mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_3/shape_patches_PP-YOLOE_plus-M/input_meta.py create mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_3/shape_patches_PP-YOLOE_plus-M/weight_meta.py create mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_3/shape_patches_PP-YOLOE_plus_SOD-L/input_meta.py create mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_3/shape_patches_PP-YOLOE_plus_SOD-L/weight_meta.py create mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_3/shape_patches_PP-YOLOE_plus_SOD-S/input_meta.py rename paddle_samples/PaddleX/{PP-YOLOE-S_human/subgraph_14 => PP-YOLOE_plus-S/subgraph_3/shape_patches_PP-YOLOE_plus_SOD-S}/weight_meta.py (50%) create mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_3/shape_patches_PP-YOLOE_plus_SOD-largesize-L/input_meta.py create mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_3/shape_patches_PP-YOLOE_plus_SOD-largesize-L/weight_meta.py create mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_4/shape_patches_PP-YOLOE-S_vehicle/input_meta.py rename paddle_samples/PaddleX/{PP-YOLOE-L_vehicle/subgraph_16 => PP-YOLOE_plus-S/subgraph_4/shape_patches_PP-YOLOE-S_vehicle}/weight_meta.py (100%) create mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_4/shape_patches_PP-YOLOE_plus-L/input_meta.py rename paddle_samples/PaddleX/{PP-YOLOE-L_vehicle/subgraph_17 => PP-YOLOE_plus-S/subgraph_4/shape_patches_PP-YOLOE_plus-L}/weight_meta.py (100%) create mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_4/shape_patches_PP-YOLOE_plus-M/input_meta.py rename paddle_samples/PaddleX/{PP-YOLOE-L_vehicle/subgraph_7 => PP-YOLOE_plus-S/subgraph_4/shape_patches_PP-YOLOE_plus-M}/weight_meta.py (100%) rename paddle_samples/PaddleX/{PP-YOLOE-S_vehicle/subgraph_15 => PP-YOLOE_plus-S/subgraph_5/shape_patches_PP-YOLOE-S_vehicle}/input_meta.py (100%) rename paddle_samples/PaddleX/{PP-YOLOE-S_vehicle/subgraph_15 => PP-YOLOE_plus-S/subgraph_5/shape_patches_PP-YOLOE-S_vehicle}/weight_meta.py (100%) rename paddle_samples/PaddleX/{PP-YOLOE-L_human/subgraph_9 => PP-YOLOE_plus-S/subgraph_6/shape_patches_PP-YOLOE-L_human}/input_meta.py (100%) rename paddle_samples/PaddleX/{PP-YOLOE-L_vehicle/subgraph_8 => PP-YOLOE_plus-S/subgraph_6/shape_patches_PP-YOLOE-L_human}/weight_meta.py (100%) rename paddle_samples/PaddleX/{PP-YOLOE_plus_SOD-largesize-L/subgraph_13 => PP-YOLOE_plus-S/subgraph_6/shape_patches_PP-YOLOE-S_human}/input_meta.py (50%) rename paddle_samples/PaddleX/{PP-YOLOE-S_human/subgraph_16 => PP-YOLOE_plus-S/subgraph_6/shape_patches_PP-YOLOE-S_human}/weight_meta.py (100%) rename paddle_samples/PaddleX/{PP-YOLOE-S_vehicle/subgraph_9 => PP-YOLOE_plus-S/subgraph_6/shape_patches_PP-YOLOE-S_vehicle}/input_meta.py (100%) rename paddle_samples/PaddleX/{PP-YOLOE-S_vehicle/subgraph_11 => PP-YOLOE_plus-S/subgraph_6/shape_patches_PP-YOLOE-S_vehicle}/weight_meta.py (100%) create mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_6/shape_patches_PP-YOLOE_plus-L/input_meta.py rename paddle_samples/PaddleX/{PP-YOLOE-S_vehicle/subgraph_12 => PP-YOLOE_plus-S/subgraph_6/shape_patches_PP-YOLOE_plus-L}/weight_meta.py (100%) create mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_6/shape_patches_PP-YOLOE_plus-M/input_meta.py rename paddle_samples/PaddleX/{PP-YOLOE-S_vehicle/subgraph_14 => PP-YOLOE_plus-S/subgraph_6/shape_patches_PP-YOLOE_plus-M}/weight_meta.py (100%) rename paddle_samples/PaddleX/{PP-YOLOE_plus-L/subgraph_12 => PP-YOLOE_plus-S/subgraph_9/shape_patches_PP-YOLOE_plus-L}/input_meta.py (100%) rename paddle_samples/PaddleX/{PP-YOLOE-S_vehicle/subgraph_10 => PP-YOLOE_plus-S/subgraph_9/shape_patches_PP-YOLOE_plus-L}/weight_meta.py (100%) rename paddle_samples/PaddleX/{PP-YOLOE_plus-M/subgraph_17 => PP-YOLOE_plus-S/subgraph_9/shape_patches_PP-YOLOE_plus-M}/input_meta.py (100%) rename paddle_samples/PaddleX/{PP-YOLOE-S_vehicle/subgraph_7 => PP-YOLOE_plus-S/subgraph_9/shape_patches_PP-YOLOE_plus-M}/weight_meta.py (100%) delete mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/subgraph_10/graph_hash.txt delete mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/subgraph_10/input_meta.py delete mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/subgraph_10/model.py delete mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/subgraph_10/weight_meta.py delete mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/subgraph_11/graph_hash.txt delete mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/subgraph_11/graph_net.json delete mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/subgraph_11/input_meta.py delete mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/subgraph_11/model.py delete mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/subgraph_11/weight_meta.py delete mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/subgraph_12/graph_hash.txt delete mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/subgraph_12/graph_net.json delete mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/subgraph_12/input_meta.py delete mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/subgraph_12/model.py delete mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/subgraph_12/weight_meta.py delete mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/subgraph_13/graph_hash.txt delete mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/subgraph_13/graph_net.json delete mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/subgraph_13/model.py delete mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/subgraph_13/weight_meta.py delete mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/subgraph_14/graph_net.json delete mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/subgraph_18/graph_hash.txt delete mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/subgraph_18/graph_net.json delete mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/subgraph_18/input_meta.py delete mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/subgraph_18/model.py delete mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/subgraph_18/weight_meta.py rename paddle_samples/PaddleX/{PP-YOLOE_plus-M/subgraph_15 => PP-YOLOE_plus_SOD-L/subgraph_2/shape_patches_PP-YOLOE_plus_SOD-largesize-L}/input_meta.py (52%) create mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/subgraph_2/shape_patches_PP-YOLOE_plus_SOD-largesize-L/weight_meta.py rename paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/{subgraph_14 => subgraph_7}/graph_hash.txt (100%) rename paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/{subgraph_10 => subgraph_7}/graph_net.json (100%) rename paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/{subgraph_14 => subgraph_7}/input_meta.py (100%) rename paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/{subgraph_14 => subgraph_7}/model.py (100%) rename paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/{subgraph_14 => subgraph_7}/weight_meta.py (100%) delete mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/subgraph_8/graph_hash.txt delete mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/subgraph_8/graph_net.json delete mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/subgraph_8/input_meta.py delete mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/subgraph_8/model.py delete mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/subgraph_8/weight_meta.py rename paddle_samples/PaddleX/{PP-YOLOE_plus_SOD-L/subgraph_13 => PP-YOLOE_plus_SOD-S/subgraph_11/shape_patches_PP-YOLOE_plus_SOD-L}/input_meta.py (100%) rename paddle_samples/PaddleX/{PP-YOLOE-S_vehicle/subgraph_16 => PP-YOLOE_plus_SOD-S/subgraph_11/shape_patches_PP-YOLOE_plus_SOD-L}/weight_meta.py (100%) create mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus_SOD-S/subgraph_11/shape_patches_PP-YOLOE_plus_SOD-largesize-L/input_meta.py rename paddle_samples/PaddleX/{PP-YOLOE-S_vehicle/subgraph_6 => PP-YOLOE_plus_SOD-S/subgraph_11/shape_patches_PP-YOLOE_plus_SOD-largesize-L}/weight_meta.py (100%) rename paddle_samples/PaddleX/{PP-YOLOE-L_vehicle/subgraph_14 => PP-YOLOE_plus_SOD-S/subgraph_12/shape_patches_PP-YOLOE_plus_SOD-L}/input_meta.py (64%) rename paddle_samples/PaddleX/{PP-YOLOE-S_vehicle/subgraph_8 => PP-YOLOE_plus_SOD-S/subgraph_12/shape_patches_PP-YOLOE_plus_SOD-L}/weight_meta.py (100%) delete mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus_SOD-S/subgraph_17/graph_hash.txt delete mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus_SOD-S/subgraph_17/graph_net.json delete mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus_SOD-S/subgraph_17/model.py delete mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus_SOD-S/subgraph_17/weight_meta.py delete mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus_SOD-S/subgraph_18/graph_hash.txt delete mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus_SOD-S/subgraph_18/graph_net.json delete mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus_SOD-S/subgraph_18/input_meta.py delete mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus_SOD-S/subgraph_18/model.py delete mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus_SOD-S/subgraph_18/weight_meta.py rename paddle_samples/PaddleX/{PP-YOLOE_plus_SOD-largesize-L/subgraph_10 => PP-YOLOE_plus_SOD-S/subgraph_2/shape_patches_PP-YOLOE_plus_SOD-L}/input_meta.py (54%) rename paddle_samples/PaddleX/{PP-YOLOE_plus-M/subgraph_7 => PP-YOLOE_plus_SOD-S/subgraph_2/shape_patches_PP-YOLOE_plus_SOD-L}/weight_meta.py (85%) create mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus_SOD-S/subgraph_9/shape_patches_PP-YOLOE_plus_SOD-largesize-L/input_meta.py rename paddle_samples/PaddleX/{PP-YOLOE-S_vehicle/subgraph_9 => PP-YOLOE_plus_SOD-S/subgraph_9/shape_patches_PP-YOLOE_plus_SOD-largesize-L}/weight_meta.py (100%) delete mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_10/graph_hash.txt delete mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_10/graph_net.json delete mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_10/model.py delete mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_10/weight_meta.py delete mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_11/graph_hash.txt delete mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_11/graph_net.json delete mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_11/input_meta.py delete mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_11/model.py delete mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_11/weight_meta.py delete mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_12/graph_hash.txt delete mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_12/graph_net.json delete mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_12/input_meta.py delete mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_12/model.py delete mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_12/weight_meta.py delete mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_13/graph_hash.txt delete mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_13/graph_net.json delete mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_13/model.py delete mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_13/weight_meta.py delete mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_14/graph_hash.txt delete mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_14/graph_net.json delete mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_14/input_meta.py delete mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_14/model.py delete mode 100644 paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_14/weight_meta.py create mode 100644 paddle_samples/PaddleX/TimesNet/subgraph_10/shape_patches_TimesNet_ad/input_meta.py rename paddle_samples/PaddleX/{TimesNet_ad/subgraph_11 => TimesNet/subgraph_10/shape_patches_TimesNet_ad}/weight_meta.py (71%) rename paddle_samples/PaddleX/{TimesNet_ad/subgraph_12 => TimesNet/subgraph_11/shape_patches_TimesNet_ad}/input_meta.py (100%) rename paddle_samples/PaddleX/{PP-YOLOE_plus-L/subgraph_10 => TimesNet/subgraph_11/shape_patches_TimesNet_ad}/weight_meta.py (100%) rename paddle_samples/PaddleX/{TimesNet_ad/subgraph_10 => TimesNet/subgraph_5/shape_patches_TimesNet_ad}/input_meta.py (100%) rename paddle_samples/PaddleX/{PP-YOLOE_plus-L/subgraph_13 => TimesNet/subgraph_5/shape_patches_TimesNet_ad}/weight_meta.py (100%) delete mode 100644 paddle_samples/PaddleX/TimesNet_ad/subgraph_10/graph_hash.txt delete mode 100644 paddle_samples/PaddleX/TimesNet_ad/subgraph_10/graph_net.json delete mode 100644 paddle_samples/PaddleX/TimesNet_ad/subgraph_10/model.py delete mode 100644 paddle_samples/PaddleX/TimesNet_ad/subgraph_10/weight_meta.py delete mode 100644 paddle_samples/PaddleX/TimesNet_ad/subgraph_11/graph_hash.txt delete mode 100644 paddle_samples/PaddleX/TimesNet_ad/subgraph_11/graph_net.json delete mode 100644 paddle_samples/PaddleX/TimesNet_ad/subgraph_11/input_meta.py delete mode 100644 paddle_samples/PaddleX/TimesNet_ad/subgraph_11/model.py delete mode 100644 paddle_samples/PaddleX/TimesNet_ad/subgraph_12/graph_hash.txt delete mode 100644 paddle_samples/PaddleX/TimesNet_ad/subgraph_12/graph_net.json delete mode 100644 paddle_samples/PaddleX/TimesNet_ad/subgraph_12/model.py delete mode 100644 paddle_samples/PaddleX/TimesNet_ad/subgraph_12/weight_meta.py delete mode 100644 paddle_samples/PaddleX/ch_RepSVTR_rec/subgraph_1/graph_hash.txt delete mode 100644 paddle_samples/PaddleX/ch_RepSVTR_rec/subgraph_1/graph_net.json delete mode 100644 paddle_samples/PaddleX/ch_RepSVTR_rec/subgraph_1/input_meta.py delete mode 100644 paddle_samples/PaddleX/ch_RepSVTR_rec/subgraph_1/model.py delete mode 100644 paddle_samples/PaddleX/ch_RepSVTR_rec/subgraph_1/weight_meta.py delete mode 100644 paddle_samples/PaddleX/ch_RepSVTR_rec/subgraph_2/graph_hash.txt delete mode 100644 paddle_samples/PaddleX/ch_RepSVTR_rec/subgraph_2/graph_net.json delete mode 100644 paddle_samples/PaddleX/ch_RepSVTR_rec/subgraph_2/input_meta.py delete mode 100644 paddle_samples/PaddleX/ch_RepSVTR_rec/subgraph_2/model.py delete mode 100644 paddle_samples/PaddleX/ch_RepSVTR_rec/subgraph_2/weight_meta.py delete mode 100644 paddle_samples/PaddleX/ch_RepSVTR_rec/subgraph_3/graph_hash.txt delete mode 100644 paddle_samples/PaddleX/ch_RepSVTR_rec/subgraph_3/graph_net.json delete mode 100644 paddle_samples/PaddleX/ch_RepSVTR_rec/subgraph_3/input_meta.py delete mode 100644 paddle_samples/PaddleX/ch_RepSVTR_rec/subgraph_3/model.py delete mode 100644 paddle_samples/PaddleX/ch_RepSVTR_rec/subgraph_3/weight_meta.py delete mode 100644 paddle_samples/PaddleX/ch_RepSVTR_rec/subgraph_4/graph_hash.txt delete mode 100644 paddle_samples/PaddleX/ch_RepSVTR_rec/subgraph_4/graph_net.json delete mode 100644 paddle_samples/PaddleX/ch_RepSVTR_rec/subgraph_4/input_meta.py delete mode 100644 paddle_samples/PaddleX/ch_RepSVTR_rec/subgraph_4/model.py delete mode 100644 paddle_samples/PaddleX/ch_RepSVTR_rec/subgraph_4/weight_meta.py delete mode 100644 paddle_samples/PaddleX/ch_RepSVTR_rec/subgraph_5/graph_hash.txt delete mode 100644 paddle_samples/PaddleX/ch_RepSVTR_rec/subgraph_5/graph_net.json delete mode 100644 paddle_samples/PaddleX/ch_RepSVTR_rec/subgraph_5/input_meta.py delete mode 100644 paddle_samples/PaddleX/ch_RepSVTR_rec/subgraph_5/model.py delete mode 100644 paddle_samples/PaddleX/ch_RepSVTR_rec/subgraph_5/weight_meta.py diff --git a/paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_0/graph_hash.txt b/paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_0/graph_hash.txt index 3a198bc60..7b5429dac 100644 --- a/paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_0/graph_hash.txt +++ b/paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_0/graph_hash.txt @@ -1 +1 @@ -bfcad265ced09de3d41f5c6ed667e3d03526d992a84e6eacb1408af671245a60 \ No newline at end of file +d079f80b47d3627f0f874f2ab967abc6e350f273029f03bcf40572198642b666 \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_0/input_meta.py b/paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_0/input_meta.py index eca445c75..30a284ba9 100644 --- a/paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_0/input_meta.py +++ b/paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_0/input_meta.py @@ -1,7 +1,9 @@ class Program_weight_tensor_data_0: name = "data_0" - shape = [2, 3024] - dtype = "int32" - min_val = 0 - max_val = 1 + shape = [2, 3, 384, 384] + dtype = "float32" + min_val = float("-2.1179") + max_val = float("2.64") + mean = float("0.147246") + std = float("1.17749") data = None diff --git a/paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_0/model.py b/paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_0/model.py index e57fc793b..36853978e 100644 --- a/paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_0/model.py +++ b/paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_0/model.py @@ -5,30 +5,7392 @@ class GraphModule(paddle.nn.Layer): def __init__(self): super().__init__() - def forward(self, data_0): - # pd_op.full: (xi32) <- () + def forward( + self, + parameter_0, + parameter_1, + parameter_2, + parameter_3, + parameter_4, + parameter_5, + parameter_6, + parameter_7, + parameter_8, + parameter_9, + parameter_10, + parameter_11, + parameter_12, + parameter_13, + parameter_14, + parameter_15, + parameter_16, + parameter_17, + parameter_18, + parameter_19, + parameter_20, + parameter_21, + parameter_22, + parameter_23, + parameter_24, + parameter_25, + parameter_26, + parameter_27, + parameter_28, + parameter_29, + parameter_30, + parameter_31, + parameter_32, + parameter_33, + parameter_34, + parameter_35, + parameter_36, + parameter_37, + parameter_38, + parameter_39, + parameter_40, + parameter_41, + parameter_42, + parameter_43, + parameter_44, + parameter_45, + parameter_46, + parameter_47, + parameter_48, + parameter_49, + parameter_50, + parameter_51, + parameter_52, + parameter_53, + parameter_54, + parameter_55, + parameter_56, + parameter_57, + parameter_58, + parameter_59, + parameter_60, + parameter_61, + parameter_62, + parameter_63, + parameter_64, + parameter_65, + parameter_66, + parameter_67, + parameter_68, + parameter_69, + parameter_70, + parameter_71, + parameter_72, + parameter_73, + parameter_74, + parameter_75, + parameter_76, + parameter_77, + parameter_78, + parameter_79, + parameter_80, + parameter_81, + parameter_82, + parameter_83, + parameter_84, + parameter_85, + parameter_86, + parameter_87, + parameter_88, + parameter_89, + parameter_90, + parameter_91, + parameter_92, + parameter_93, + parameter_94, + parameter_95, + parameter_96, + parameter_97, + parameter_98, + parameter_99, + parameter_100, + parameter_101, + parameter_102, + parameter_103, + parameter_104, + parameter_105, + parameter_106, + parameter_107, + parameter_108, + parameter_109, + parameter_110, + parameter_111, + parameter_112, + parameter_113, + parameter_114, + parameter_115, + parameter_116, + parameter_117, + parameter_118, + parameter_119, + parameter_120, + parameter_121, + parameter_122, + parameter_123, + parameter_124, + parameter_125, + parameter_126, + parameter_127, + parameter_128, + parameter_129, + parameter_130, + parameter_131, + parameter_132, + parameter_133, + parameter_134, + parameter_135, + parameter_136, + parameter_137, + parameter_138, + parameter_139, + parameter_140, + parameter_141, + parameter_142, + parameter_143, + parameter_144, + parameter_145, + parameter_146, + parameter_147, + parameter_148, + parameter_149, + parameter_150, + parameter_151, + parameter_152, + parameter_153, + parameter_154, + parameter_155, + parameter_156, + parameter_157, + parameter_158, + parameter_159, + parameter_160, + parameter_161, + parameter_162, + parameter_163, + parameter_164, + parameter_165, + parameter_166, + parameter_167, + parameter_168, + parameter_169, + parameter_170, + parameter_171, + parameter_172, + parameter_173, + parameter_174, + parameter_175, + parameter_176, + parameter_177, + parameter_178, + parameter_179, + parameter_180, + parameter_181, + parameter_182, + parameter_183, + parameter_184, + parameter_185, + parameter_186, + parameter_187, + parameter_188, + parameter_189, + parameter_190, + parameter_191, + parameter_192, + parameter_193, + parameter_194, + parameter_195, + parameter_196, + parameter_197, + parameter_198, + parameter_199, + parameter_200, + parameter_201, + parameter_202, + parameter_203, + parameter_204, + parameter_205, + parameter_206, + parameter_207, + parameter_208, + parameter_209, + parameter_210, + parameter_211, + parameter_212, + parameter_213, + parameter_214, + parameter_215, + parameter_216, + parameter_217, + parameter_218, + parameter_219, + parameter_220, + parameter_221, + parameter_222, + parameter_223, + parameter_224, + parameter_225, + parameter_226, + parameter_227, + parameter_228, + parameter_229, + parameter_230, + parameter_231, + parameter_232, + parameter_233, + parameter_234, + parameter_235, + parameter_236, + parameter_237, + parameter_238, + parameter_239, + parameter_240, + parameter_241, + parameter_242, + parameter_243, + parameter_244, + parameter_245, + parameter_246, + parameter_247, + parameter_248, + parameter_249, + parameter_250, + parameter_251, + parameter_252, + parameter_253, + parameter_254, + parameter_255, + parameter_256, + parameter_257, + parameter_258, + parameter_259, + parameter_260, + parameter_261, + parameter_262, + parameter_263, + parameter_264, + parameter_265, + parameter_266, + parameter_267, + parameter_268, + parameter_269, + parameter_270, + parameter_271, + parameter_272, + parameter_273, + parameter_274, + parameter_275, + parameter_276, + parameter_277, + parameter_278, + parameter_279, + parameter_280, + parameter_281, + parameter_282, + parameter_283, + parameter_284, + parameter_285, + parameter_286, + parameter_287, + parameter_288, + parameter_289, + parameter_290, + parameter_291, + parameter_292, + parameter_293, + parameter_294, + parameter_295, + parameter_296, + parameter_297, + parameter_298, + parameter_299, + parameter_300, + parameter_301, + parameter_302, + parameter_303, + parameter_304, + parameter_305, + parameter_306, + parameter_307, + parameter_308, + parameter_309, + parameter_310, + parameter_311, + parameter_312, + parameter_313, + parameter_314, + parameter_315, + parameter_316, + parameter_317, + parameter_318, + parameter_319, + parameter_320, + parameter_321, + parameter_322, + parameter_323, + parameter_324, + parameter_325, + parameter_326, + parameter_327, + parameter_328, + parameter_329, + parameter_330, + parameter_331, + parameter_332, + parameter_333, + parameter_334, + parameter_335, + parameter_336, + parameter_337, + parameter_338, + parameter_339, + parameter_340, + parameter_341, + parameter_342, + parameter_343, + parameter_344, + parameter_345, + parameter_346, + parameter_347, + parameter_348, + parameter_349, + parameter_350, + parameter_351, + parameter_352, + parameter_353, + parameter_354, + parameter_355, + parameter_356, + parameter_357, + parameter_358, + parameter_359, + parameter_360, + parameter_361, + parameter_362, + parameter_363, + parameter_364, + parameter_365, + parameter_366, + parameter_367, + parameter_368, + parameter_369, + parameter_370, + parameter_371, + parameter_372, + parameter_373, + parameter_374, + parameter_375, + parameter_376, + parameter_377, + parameter_378, + parameter_379, + parameter_380, + parameter_381, + parameter_382, + parameter_383, + parameter_384, + parameter_385, + parameter_386, + parameter_387, + parameter_388, + parameter_389, + parameter_390, + parameter_391, + parameter_392, + parameter_393, + parameter_394, + parameter_395, + parameter_396, + parameter_397, + parameter_398, + parameter_399, + parameter_400, + parameter_401, + parameter_402, + parameter_403, + parameter_404, + parameter_405, + parameter_406, + parameter_407, + parameter_408, + parameter_409, + parameter_410, + parameter_411, + parameter_412, + parameter_413, + parameter_414, + parameter_415, + parameter_416, + parameter_417, + parameter_418, + parameter_419, + parameter_420, + parameter_421, + parameter_422, + parameter_423, + parameter_424, + parameter_425, + parameter_426, + parameter_427, + parameter_428, + parameter_429, + parameter_430, + parameter_431, + parameter_432, + parameter_433, + parameter_434, + parameter_435, + parameter_436, + parameter_437, + parameter_438, + parameter_439, + parameter_440, + parameter_441, + parameter_442, + parameter_443, + parameter_444, + parameter_445, + parameter_446, + parameter_447, + parameter_448, + parameter_449, + parameter_450, + parameter_451, + parameter_452, + parameter_453, + parameter_454, + parameter_455, + parameter_456, + parameter_457, + parameter_458, + parameter_459, + parameter_460, + parameter_461, + parameter_462, + parameter_463, + parameter_464, + parameter_465, + parameter_466, + parameter_467, + parameter_468, + parameter_469, + parameter_470, + parameter_471, + parameter_472, + parameter_473, + parameter_474, + parameter_475, + parameter_476, + parameter_477, + parameter_478, + parameter_479, + parameter_480, + parameter_481, + parameter_482, + parameter_483, + parameter_484, + parameter_485, + parameter_486, + parameter_487, + parameter_488, + parameter_489, + parameter_490, + parameter_491, + parameter_492, + parameter_493, + parameter_494, + parameter_495, + parameter_496, + parameter_497, + parameter_498, + parameter_499, + parameter_500, + parameter_501, + parameter_502, + parameter_503, + parameter_504, + parameter_505, + parameter_506, + parameter_507, + parameter_508, + parameter_509, + parameter_510, + parameter_511, + parameter_512, + parameter_513, + parameter_514, + parameter_515, + parameter_516, + parameter_517, + parameter_518, + parameter_519, + parameter_520, + parameter_521, + parameter_522, + parameter_523, + parameter_524, + parameter_525, + parameter_526, + parameter_527, + parameter_528, + parameter_529, + parameter_530, + parameter_531, + parameter_532, + parameter_533, + parameter_534, + parameter_535, + parameter_536, + parameter_537, + parameter_538, + parameter_539, + parameter_540, + parameter_541, + parameter_542, + parameter_543, + parameter_544, + parameter_545, + parameter_546, + parameter_547, + parameter_548, + parameter_549, + parameter_550, + parameter_551, + parameter_552, + parameter_553, + parameter_554, + parameter_555, + parameter_556, + parameter_557, + parameter_558, + parameter_559, + parameter_560, + parameter_561, + parameter_562, + parameter_563, + parameter_564, + parameter_565, + parameter_566, + parameter_567, + parameter_568, + parameter_569, + parameter_570, + parameter_571, + parameter_572, + parameter_573, + parameter_574, + parameter_575, + parameter_576, + parameter_577, + parameter_578, + parameter_579, + parameter_580, + parameter_581, + parameter_582, + parameter_583, + parameter_584, + parameter_585, + parameter_586, + parameter_587, + parameter_588, + parameter_589, + parameter_590, + parameter_591, + parameter_592, + parameter_593, + parameter_594, + parameter_595, + parameter_596, + parameter_597, + parameter_598, + parameter_599, + parameter_600, + parameter_601, + parameter_602, + parameter_603, + parameter_604, + parameter_605, + parameter_606, + parameter_607, + parameter_608, + parameter_609, + parameter_610, + parameter_611, + parameter_612, + parameter_613, + parameter_614, + parameter_615, + parameter_616, + parameter_617, + parameter_618, + parameter_619, + parameter_620, + parameter_621, + parameter_622, + parameter_623, + parameter_624, + parameter_625, + parameter_626, + parameter_627, + parameter_628, + parameter_629, + parameter_630, + parameter_631, + parameter_632, + parameter_633, + parameter_634, + parameter_635, + parameter_636, + parameter_637, + parameter_638, + parameter_639, + parameter_640, + parameter_641, + parameter_642, + parameter_643, + parameter_644, + parameter_645, + parameter_646, + parameter_647, + parameter_648, + parameter_649, + parameter_650, + parameter_651, + parameter_652, + parameter_653, + parameter_654, + parameter_655, + parameter_656, + parameter_657, + parameter_658, + parameter_659, + parameter_660, + parameter_661, + parameter_662, + parameter_663, + parameter_664, + parameter_665, + parameter_666, + parameter_667, + parameter_668, + parameter_669, + parameter_670, + parameter_671, + parameter_672, + parameter_673, + parameter_674, + parameter_675, + parameter_676, + parameter_677, + parameter_678, + parameter_679, + parameter_680, + parameter_681, + parameter_682, + parameter_683, + parameter_684, + parameter_685, + parameter_686, + parameter_687, + parameter_688, + parameter_689, + parameter_690, + parameter_691, + parameter_692, + parameter_693, + parameter_694, + parameter_695, + parameter_696, + parameter_697, + data_0, + ): + # pd_op.conv2d: (2x32x-1x-1xf32) <- (2x3x-1x-1xf32, 32x3x3x3xf32) + conv2d_0 = paddle._C_ops.conv2d( + data_0, parameter_697, [2, 2], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del data_0, parameter_697 + + # pd_op.batch_norm_: (2x32x-1x-1xf32, 32xf32, 32xf32, 32xf32, 32xf32, -1xui8) <- (2x32x-1x-1xf32, 32xf32, 32xf32, 32xf32, 32xf32) + ( + batch_norm__0, + batch_norm__1, + batch_norm__2, + batch_norm__3, + batch_norm__4, + batch_norm__5, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_0, + parameter_696, + parameter_695, + parameter_694, + parameter_693, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_693, parameter_694, parameter_695, parameter_696 + + # pd_op.swish: (2x32x-1x-1xf32) <- (2x32x-1x-1xf32) + swish_1 = paddle._C_ops.swish(batch_norm__0) + + # pd_op.conv2d: (2x32x-1x-1xf32) <- (2x32x-1x-1xf32, 32x32x3x3xf32) + conv2d_1 = paddle._C_ops.conv2d( + swish_1, parameter_692, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_692 + + # pd_op.batch_norm_: (2x32x-1x-1xf32, 32xf32, 32xf32, 32xf32, 32xf32, -1xui8) <- (2x32x-1x-1xf32, 32xf32, 32xf32, 32xf32, 32xf32) + ( + batch_norm__6, + batch_norm__7, + batch_norm__8, + batch_norm__9, + batch_norm__10, + batch_norm__11, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_1, + parameter_691, + parameter_690, + parameter_689, + parameter_688, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_688, parameter_689, parameter_690, parameter_691 + + # pd_op.swish: (2x32x-1x-1xf32) <- (2x32x-1x-1xf32) + swish_2 = paddle._C_ops.swish(batch_norm__6) + + # pd_op.conv2d: (2x64x-1x-1xf32) <- (2x32x-1x-1xf32, 64x32x3x3xf32) + conv2d_2 = paddle._C_ops.conv2d( + swish_2, parameter_687, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_687 + + # pd_op.batch_norm_: (2x64x-1x-1xf32, 64xf32, 64xf32, 64xf32, 64xf32, -1xui8) <- (2x64x-1x-1xf32, 64xf32, 64xf32, 64xf32, 64xf32) + ( + batch_norm__12, + batch_norm__13, + batch_norm__14, + batch_norm__15, + batch_norm__16, + batch_norm__17, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_2, + parameter_686, + parameter_685, + parameter_684, + parameter_683, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_683, parameter_684, parameter_685, parameter_686 + + # pd_op.swish: (2x64x-1x-1xf32) <- (2x64x-1x-1xf32) + swish_3 = paddle._C_ops.swish(batch_norm__12) + + # pd_op.conv2d: (2x96x-1x-1xf32) <- (2x64x-1x-1xf32, 96x64x3x3xf32) + conv2d_3 = paddle._C_ops.conv2d( + swish_3, parameter_682, [2, 2], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_682 + + # pd_op.batch_norm_: (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__18, + batch_norm__19, + batch_norm__20, + batch_norm__21, + batch_norm__22, + batch_norm__23, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_3, + parameter_681, + parameter_680, + parameter_679, + parameter_678, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_678, parameter_679, parameter_680, parameter_681 + + # pd_op.swish: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32) + swish_4 = paddle._C_ops.swish(batch_norm__18) + + # pd_op.conv2d: (2x48x-1x-1xf32) <- (2x96x-1x-1xf32, 48x96x1x1xf32) + conv2d_4 = paddle._C_ops.conv2d( + swish_4, parameter_677, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_677 + + # pd_op.batch_norm_: (2x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32, -1xui8) <- (2x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32) + ( + batch_norm__24, + batch_norm__25, + batch_norm__26, + batch_norm__27, + batch_norm__28, + batch_norm__29, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_4, + parameter_676, + parameter_675, + parameter_674, + parameter_673, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_673, parameter_674, parameter_675, parameter_676 + + # pd_op.swish: (2x48x-1x-1xf32) <- (2x48x-1x-1xf32) + swish_5 = paddle._C_ops.swish(batch_norm__24) + + # pd_op.conv2d: (2x48x-1x-1xf32) <- (2x96x-1x-1xf32, 48x96x1x1xf32) + conv2d_5 = paddle._C_ops.conv2d( + swish_4, parameter_672, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_672 + + # pd_op.batch_norm_: (2x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32, -1xui8) <- (2x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32) + ( + batch_norm__30, + batch_norm__31, + batch_norm__32, + batch_norm__33, + batch_norm__34, + batch_norm__35, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_5, + parameter_671, + parameter_670, + parameter_669, + parameter_668, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_668, parameter_669, parameter_670, parameter_671 + + # pd_op.swish: (2x48x-1x-1xf32) <- (2x48x-1x-1xf32) + swish_6 = paddle._C_ops.swish(batch_norm__30) + + # pd_op.conv2d: (2x48x-1x-1xf32) <- (2x48x-1x-1xf32, 48x48x3x3xf32) + conv2d_6 = paddle._C_ops.conv2d( + swish_6, parameter_667, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_667 + + # pd_op.batch_norm_: (2x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32, -1xui8) <- (2x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32) + ( + batch_norm__36, + batch_norm__37, + batch_norm__38, + batch_norm__39, + batch_norm__40, + batch_norm__41, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_6, + parameter_666, + parameter_665, + parameter_664, + parameter_663, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_663, parameter_664, parameter_665, parameter_666 + + # pd_op.swish: (2x48x-1x-1xf32) <- (2x48x-1x-1xf32) + swish_7 = paddle._C_ops.swish(batch_norm__36) + + # pd_op.conv2d: (2x48x-1x-1xf32) <- (2x48x-1x-1xf32, 48x48x3x3xf32) + conv2d_7 = paddle._C_ops.conv2d( + swish_7, parameter_662, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_662 + + # pd_op.batch_norm_: (2x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32, -1xui8) <- (2x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32) + ( + batch_norm__42, + batch_norm__43, + batch_norm__44, + batch_norm__45, + batch_norm__46, + batch_norm__47, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_7, + parameter_661, + parameter_660, + parameter_659, + parameter_658, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_658, parameter_659, parameter_660, parameter_661 + + # pd_op.conv2d: (2x48x-1x-1xf32) <- (2x48x-1x-1xf32, 48x48x1x1xf32) + conv2d_8 = paddle._C_ops.conv2d( + swish_7, parameter_657, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_657 + + # pd_op.batch_norm_: (2x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32, -1xui8) <- (2x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32) + ( + batch_norm__48, + batch_norm__49, + batch_norm__50, + batch_norm__51, + batch_norm__52, + batch_norm__53, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_8, + parameter_656, + parameter_655, + parameter_654, + parameter_653, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_653, parameter_654, parameter_655, parameter_656 + + # pd_op.add: (2x48x-1x-1xf32) <- (2x48x-1x-1xf32, 2x48x-1x-1xf32) + add_0 = paddle._C_ops.add(batch_norm__42, batch_norm__48) + + # pd_op.swish: (2x48x-1x-1xf32) <- (2x48x-1x-1xf32) + swish_8 = paddle._C_ops.swish(add_0) + + # pd_op.add: (2x48x-1x-1xf32) <- (2x48x-1x-1xf32, 2x48x-1x-1xf32) + add_1 = paddle._C_ops.add(swish_6, swish_8) + + # pd_op.conv2d: (2x48x-1x-1xf32) <- (2x48x-1x-1xf32, 48x48x3x3xf32) + conv2d_9 = paddle._C_ops.conv2d( + add_1, parameter_652, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_652 + + # pd_op.batch_norm_: (2x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32, -1xui8) <- (2x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32) + ( + batch_norm__54, + batch_norm__55, + batch_norm__56, + batch_norm__57, + batch_norm__58, + batch_norm__59, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_9, + parameter_651, + parameter_650, + parameter_649, + parameter_648, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_648, parameter_649, parameter_650, parameter_651 + + # pd_op.swish: (2x48x-1x-1xf32) <- (2x48x-1x-1xf32) + swish_9 = paddle._C_ops.swish(batch_norm__54) + + # pd_op.conv2d: (2x48x-1x-1xf32) <- (2x48x-1x-1xf32, 48x48x3x3xf32) + conv2d_10 = paddle._C_ops.conv2d( + swish_9, parameter_647, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_647 + + # pd_op.batch_norm_: (2x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32, -1xui8) <- (2x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32) + ( + batch_norm__60, + batch_norm__61, + batch_norm__62, + batch_norm__63, + batch_norm__64, + batch_norm__65, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_10, + parameter_646, + parameter_645, + parameter_644, + parameter_643, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_643, parameter_644, parameter_645, parameter_646 + + # pd_op.conv2d: (2x48x-1x-1xf32) <- (2x48x-1x-1xf32, 48x48x1x1xf32) + conv2d_11 = paddle._C_ops.conv2d( + swish_9, parameter_642, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_642 + + # pd_op.batch_norm_: (2x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32, -1xui8) <- (2x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32) + ( + batch_norm__66, + batch_norm__67, + batch_norm__68, + batch_norm__69, + batch_norm__70, + batch_norm__71, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_11, + parameter_641, + parameter_640, + parameter_639, + parameter_638, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_638, parameter_639, parameter_640, parameter_641 + + # pd_op.add: (2x48x-1x-1xf32) <- (2x48x-1x-1xf32, 2x48x-1x-1xf32) + add_2 = paddle._C_ops.add(batch_norm__60, batch_norm__66) + + # pd_op.swish: (2x48x-1x-1xf32) <- (2x48x-1x-1xf32) + swish_10 = paddle._C_ops.swish(add_2) + + # pd_op.add: (2x48x-1x-1xf32) <- (2x48x-1x-1xf32, 2x48x-1x-1xf32) + add_3 = paddle._C_ops.add(add_1, swish_10) + + # pd_op.conv2d: (2x48x-1x-1xf32) <- (2x48x-1x-1xf32, 48x48x3x3xf32) + conv2d_12 = paddle._C_ops.conv2d( + add_3, parameter_637, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_637 + + # pd_op.batch_norm_: (2x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32, -1xui8) <- (2x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32) + ( + batch_norm__72, + batch_norm__73, + batch_norm__74, + batch_norm__75, + batch_norm__76, + batch_norm__77, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_12, + parameter_636, + parameter_635, + parameter_634, + parameter_633, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_633, parameter_634, parameter_635, parameter_636 + + # pd_op.swish: (2x48x-1x-1xf32) <- (2x48x-1x-1xf32) + swish_11 = paddle._C_ops.swish(batch_norm__72) + + # pd_op.conv2d: (2x48x-1x-1xf32) <- (2x48x-1x-1xf32, 48x48x3x3xf32) + conv2d_13 = paddle._C_ops.conv2d( + swish_11, parameter_632, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_632 + + # pd_op.batch_norm_: (2x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32, -1xui8) <- (2x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32) + ( + batch_norm__78, + batch_norm__79, + batch_norm__80, + batch_norm__81, + batch_norm__82, + batch_norm__83, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_13, + parameter_631, + parameter_630, + parameter_629, + parameter_628, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_628, parameter_629, parameter_630, parameter_631 + + # pd_op.conv2d: (2x48x-1x-1xf32) <- (2x48x-1x-1xf32, 48x48x1x1xf32) + conv2d_14 = paddle._C_ops.conv2d( + swish_11, parameter_627, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_627 + + # pd_op.batch_norm_: (2x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32, -1xui8) <- (2x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32) + ( + batch_norm__84, + batch_norm__85, + batch_norm__86, + batch_norm__87, + batch_norm__88, + batch_norm__89, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_14, + parameter_626, + parameter_625, + parameter_624, + parameter_623, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_623, parameter_624, parameter_625, parameter_626 + + # pd_op.add: (2x48x-1x-1xf32) <- (2x48x-1x-1xf32, 2x48x-1x-1xf32) + add_4 = paddle._C_ops.add(batch_norm__78, batch_norm__84) + + # pd_op.swish: (2x48x-1x-1xf32) <- (2x48x-1x-1xf32) + swish_12 = paddle._C_ops.swish(add_4) + + # pd_op.add: (2x48x-1x-1xf32) <- (2x48x-1x-1xf32, 2x48x-1x-1xf32) + add_5 = paddle._C_ops.add(add_3, swish_12) + + # pd_op.full: (1xi32) <- () full_0 = paddle._C_ops.full( - [], float("1"), paddle.int32, paddle.framework._current_expected_place() + [1], float("1"), paddle.int32, paddle.core.CPUPlace() + ) + + # pd_op.assign: (1xi32) <- (1xi32) + assign_0 = full_0 + + # pd_op.assign: (1xi32) <- (1xi32) + assign_1 = full_0 + + # pd_op.assign: (1xi32) <- (1xi32) + assign_2 = full_0 + + # pd_op.assign: (1xi32) <- (1xi32) + assign_3 = full_0 + + # pd_op.assign: (1xi32) <- (1xi32) + assign_4 = full_0 + + # pd_op.assign: (1xi32) <- (1xi32) + assign_5 = full_0 + + # pd_op.assign: (1xi32) <- (1xi32) + assign_6 = full_0 + + # pd_op.assign: (1xi32) <- (1xi32) + assign_7 = full_0 + + # pd_op.assign: (1xi32) <- (1xi32) + assign_8 = full_0 + + # pd_op.assign: (1xi32) <- (1xi32) + assign_9 = full_0 + + # pd_op.assign: (1xi32) <- (1xi32) + assign_10 = full_0 + + # pd_op.assign: (1xi32) <- (1xi32) + assign_11 = full_0 + + # pd_op.assign: (1xi32) <- (1xi32) + assign_12 = full_0 + + # builtin.combine: ([2x48x-1x-1xf32, 2x48x-1x-1xf32]) <- (2x48x-1x-1xf32, 2x48x-1x-1xf32) + combine_0 = [swish_5, add_5] + + # pd_op.concat: (2x96x-1x-1xf32) <- ([2x48x-1x-1xf32, 2x48x-1x-1xf32], 1xi32) + concat_0 = paddle._C_ops.concat(combine_0, full_0) + del combine_0 + + # pd_op.full_int_array: (2xi64) <- () + full_int_array_0 = [2, 3] + + # pd_op.assign: (2xi64) <- (2xi64) + assign_13 = full_int_array_0 + + # pd_op.assign: (2xi64) <- (2xi64) + assign_14 = full_int_array_0 + + # pd_op.assign: (2xi64) <- (2xi64) + assign_15 = full_int_array_0 + + # pd_op.mean: (2x96x1x1xf32) <- (2x96x-1x-1xf32, 2xi64) + mean_0 = paddle._C_ops.mean(concat_0, full_int_array_0, True) + + # pd_op.conv2d: (2x96x1x1xf32) <- (2x96x1x1xf32, 96x96x1x1xf32) + conv2d_15 = paddle._C_ops.conv2d( + mean_0, parameter_622, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_622 + + # pd_op.full_int_array: (4xi64) <- () + full_int_array_1 = [1, -1, 1, 1] + + # pd_op.reshape: (1x96x1x1xf32) <- (96xf32, 4xi64) + reshape_0 = paddle._C_ops.reshape(parameter_621, full_int_array_1) + del parameter_621 + + # pd_op.add: (2x96x1x1xf32) <- (2x96x1x1xf32, 1x96x1x1xf32) + add_6 = paddle._C_ops.add(conv2d_15, reshape_0) + + # pd_op.hardsigmoid: (2x96x1x1xf32) <- (2x96x1x1xf32) + hardsigmoid_0 = paddle._C_ops.hardsigmoid( + add_6, float("0.166667"), float("0.5") + ) + del add_6 + + # pd_op.multiply: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, 2x96x1x1xf32) + multiply_0 = paddle._C_ops.multiply(concat_0, hardsigmoid_0) + + # pd_op.conv2d: (2x128x-1x-1xf32) <- (2x96x-1x-1xf32, 128x96x1x1xf32) + conv2d_16 = paddle._C_ops.conv2d( + multiply_0, parameter_620, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_620 + + # pd_op.batch_norm_: (2x128x-1x-1xf32, 128xf32, 128xf32, 128xf32, 128xf32, -1xui8) <- (2x128x-1x-1xf32, 128xf32, 128xf32, 128xf32, 128xf32) + ( + batch_norm__90, + batch_norm__91, + batch_norm__92, + batch_norm__93, + batch_norm__94, + batch_norm__95, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_16, + parameter_619, + parameter_618, + parameter_617, + parameter_616, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_616, parameter_617, parameter_618, parameter_619 + + # pd_op.swish: (2x128x-1x-1xf32) <- (2x128x-1x-1xf32) + swish_13 = paddle._C_ops.swish(batch_norm__90) + + # pd_op.conv2d: (2x192x-1x-1xf32) <- (2x128x-1x-1xf32, 192x128x3x3xf32) + conv2d_17 = paddle._C_ops.conv2d( + swish_13, parameter_615, [2, 2], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_615 + + # pd_op.batch_norm_: (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__96, + batch_norm__97, + batch_norm__98, + batch_norm__99, + batch_norm__100, + batch_norm__101, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_17, + parameter_614, + parameter_613, + parameter_612, + parameter_611, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_611, parameter_612, parameter_613, parameter_614 + + # pd_op.swish: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32) + swish_14 = paddle._C_ops.swish(batch_norm__96) + + # pd_op.conv2d: (2x96x-1x-1xf32) <- (2x192x-1x-1xf32, 96x192x1x1xf32) + conv2d_18 = paddle._C_ops.conv2d( + swish_14, parameter_610, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_610 + + # pd_op.batch_norm_: (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__102, + batch_norm__103, + batch_norm__104, + batch_norm__105, + batch_norm__106, + batch_norm__107, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_18, + parameter_609, + parameter_608, + parameter_607, + parameter_606, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_606, parameter_607, parameter_608, parameter_609 + + # pd_op.swish: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32) + swish_15 = paddle._C_ops.swish(batch_norm__102) + + # pd_op.conv2d: (2x96x-1x-1xf32) <- (2x192x-1x-1xf32, 96x192x1x1xf32) + conv2d_19 = paddle._C_ops.conv2d( + swish_14, parameter_605, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_605 + + # pd_op.batch_norm_: (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__108, + batch_norm__109, + batch_norm__110, + batch_norm__111, + batch_norm__112, + batch_norm__113, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_19, + parameter_604, + parameter_603, + parameter_602, + parameter_601, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_601, parameter_602, parameter_603, parameter_604 + + # pd_op.swish: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32) + swish_16 = paddle._C_ops.swish(batch_norm__108) + + # pd_op.conv2d: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, 96x96x3x3xf32) + conv2d_20 = paddle._C_ops.conv2d( + swish_16, parameter_600, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" ) + del parameter_600 - # pd_op.not_equal: (2x-1xb) <- (2x-1xi32, xi32) - not_equal_0 = paddle._C_ops.not_equal(data_0, full_0) - del data_0, full_0 + # pd_op.batch_norm_: (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__114, + batch_norm__115, + batch_norm__116, + batch_norm__117, + batch_norm__118, + batch_norm__119, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_20, + parameter_599, + parameter_598, + parameter_597, + parameter_596, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_596, parameter_597, parameter_598, parameter_599 + + # pd_op.swish: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32) + swish_17 = paddle._C_ops.swish(batch_norm__114) + + # pd_op.conv2d: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, 96x96x3x3xf32) + conv2d_21 = paddle._C_ops.conv2d( + swish_17, parameter_595, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_595 + + # pd_op.batch_norm_: (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__120, + batch_norm__121, + batch_norm__122, + batch_norm__123, + batch_norm__124, + batch_norm__125, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_21, + parameter_594, + parameter_593, + parameter_592, + parameter_591, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_591, parameter_592, parameter_593, parameter_594 + + # pd_op.conv2d: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, 96x96x1x1xf32) + conv2d_22 = paddle._C_ops.conv2d( + swish_17, parameter_590, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_590 + + # pd_op.batch_norm_: (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__126, + batch_norm__127, + batch_norm__128, + batch_norm__129, + batch_norm__130, + batch_norm__131, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_22, + parameter_589, + parameter_588, + parameter_587, + parameter_586, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_586, parameter_587, parameter_588, parameter_589 + + # pd_op.add: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, 2x96x-1x-1xf32) + add_7 = paddle._C_ops.add(batch_norm__120, batch_norm__126) + + # pd_op.swish: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32) + swish_18 = paddle._C_ops.swish(add_7) + + # pd_op.add: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, 2x96x-1x-1xf32) + add_8 = paddle._C_ops.add(swish_16, swish_18) + + # pd_op.conv2d: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, 96x96x3x3xf32) + conv2d_23 = paddle._C_ops.conv2d( + add_8, parameter_585, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_585 + + # pd_op.batch_norm_: (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__132, + batch_norm__133, + batch_norm__134, + batch_norm__135, + batch_norm__136, + batch_norm__137, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_23, + parameter_584, + parameter_583, + parameter_582, + parameter_581, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_581, parameter_582, parameter_583, parameter_584 + + # pd_op.swish: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32) + swish_19 = paddle._C_ops.swish(batch_norm__132) + + # pd_op.conv2d: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, 96x96x3x3xf32) + conv2d_24 = paddle._C_ops.conv2d( + swish_19, parameter_580, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_580 + + # pd_op.batch_norm_: (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__138, + batch_norm__139, + batch_norm__140, + batch_norm__141, + batch_norm__142, + batch_norm__143, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_24, + parameter_579, + parameter_578, + parameter_577, + parameter_576, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_576, parameter_577, parameter_578, parameter_579 + + # pd_op.conv2d: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, 96x96x1x1xf32) + conv2d_25 = paddle._C_ops.conv2d( + swish_19, parameter_575, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_575 + + # pd_op.batch_norm_: (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__144, + batch_norm__145, + batch_norm__146, + batch_norm__147, + batch_norm__148, + batch_norm__149, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_25, + parameter_574, + parameter_573, + parameter_572, + parameter_571, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_571, parameter_572, parameter_573, parameter_574 + + # pd_op.add: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, 2x96x-1x-1xf32) + add_9 = paddle._C_ops.add(batch_norm__138, batch_norm__144) + + # pd_op.swish: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32) + swish_20 = paddle._C_ops.swish(add_9) + + # pd_op.add: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, 2x96x-1x-1xf32) + add_10 = paddle._C_ops.add(add_8, swish_20) + + # pd_op.conv2d: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, 96x96x3x3xf32) + conv2d_26 = paddle._C_ops.conv2d( + add_10, parameter_570, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_570 + + # pd_op.batch_norm_: (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__150, + batch_norm__151, + batch_norm__152, + batch_norm__153, + batch_norm__154, + batch_norm__155, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_26, + parameter_569, + parameter_568, + parameter_567, + parameter_566, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_566, parameter_567, parameter_568, parameter_569 + + # pd_op.swish: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32) + swish_21 = paddle._C_ops.swish(batch_norm__150) + + # pd_op.conv2d: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, 96x96x3x3xf32) + conv2d_27 = paddle._C_ops.conv2d( + swish_21, parameter_565, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_565 + + # pd_op.batch_norm_: (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__156, + batch_norm__157, + batch_norm__158, + batch_norm__159, + batch_norm__160, + batch_norm__161, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_27, + parameter_564, + parameter_563, + parameter_562, + parameter_561, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_561, parameter_562, parameter_563, parameter_564 + + # pd_op.conv2d: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, 96x96x1x1xf32) + conv2d_28 = paddle._C_ops.conv2d( + swish_21, parameter_560, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_560 + + # pd_op.batch_norm_: (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__162, + batch_norm__163, + batch_norm__164, + batch_norm__165, + batch_norm__166, + batch_norm__167, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_28, + parameter_559, + parameter_558, + parameter_557, + parameter_556, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_556, parameter_557, parameter_558, parameter_559 + + # pd_op.add: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, 2x96x-1x-1xf32) + add_11 = paddle._C_ops.add(batch_norm__156, batch_norm__162) + + # pd_op.swish: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32) + swish_22 = paddle._C_ops.swish(add_11) + + # pd_op.add: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, 2x96x-1x-1xf32) + add_12 = paddle._C_ops.add(add_10, swish_22) + + # pd_op.conv2d: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, 96x96x3x3xf32) + conv2d_29 = paddle._C_ops.conv2d( + add_12, parameter_555, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_555 + + # pd_op.batch_norm_: (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__168, + batch_norm__169, + batch_norm__170, + batch_norm__171, + batch_norm__172, + batch_norm__173, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_29, + parameter_554, + parameter_553, + parameter_552, + parameter_551, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_551, parameter_552, parameter_553, parameter_554 + + # pd_op.swish: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32) + swish_23 = paddle._C_ops.swish(batch_norm__168) + + # pd_op.conv2d: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, 96x96x3x3xf32) + conv2d_30 = paddle._C_ops.conv2d( + swish_23, parameter_550, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_550 + + # pd_op.batch_norm_: (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__174, + batch_norm__175, + batch_norm__176, + batch_norm__177, + batch_norm__178, + batch_norm__179, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_30, + parameter_549, + parameter_548, + parameter_547, + parameter_546, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_546, parameter_547, parameter_548, parameter_549 + + # pd_op.conv2d: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, 96x96x1x1xf32) + conv2d_31 = paddle._C_ops.conv2d( + swish_23, parameter_545, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_545 + + # pd_op.batch_norm_: (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__180, + batch_norm__181, + batch_norm__182, + batch_norm__183, + batch_norm__184, + batch_norm__185, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_31, + parameter_544, + parameter_543, + parameter_542, + parameter_541, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_541, parameter_542, parameter_543, parameter_544 + + # pd_op.add: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, 2x96x-1x-1xf32) + add_13 = paddle._C_ops.add(batch_norm__174, batch_norm__180) + + # pd_op.swish: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32) + swish_24 = paddle._C_ops.swish(add_13) + + # pd_op.add: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, 2x96x-1x-1xf32) + add_14 = paddle._C_ops.add(add_12, swish_24) + + # pd_op.conv2d: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, 96x96x3x3xf32) + conv2d_32 = paddle._C_ops.conv2d( + add_14, parameter_540, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_540 + + # pd_op.batch_norm_: (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__186, + batch_norm__187, + batch_norm__188, + batch_norm__189, + batch_norm__190, + batch_norm__191, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_32, + parameter_539, + parameter_538, + parameter_537, + parameter_536, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_536, parameter_537, parameter_538, parameter_539 + + # pd_op.swish: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32) + swish_25 = paddle._C_ops.swish(batch_norm__186) + + # pd_op.conv2d: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, 96x96x3x3xf32) + conv2d_33 = paddle._C_ops.conv2d( + swish_25, parameter_535, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_535 + + # pd_op.batch_norm_: (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__192, + batch_norm__193, + batch_norm__194, + batch_norm__195, + batch_norm__196, + batch_norm__197, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_33, + parameter_534, + parameter_533, + parameter_532, + parameter_531, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_531, parameter_532, parameter_533, parameter_534 + + # pd_op.conv2d: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, 96x96x1x1xf32) + conv2d_34 = paddle._C_ops.conv2d( + swish_25, parameter_530, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_530 + + # pd_op.batch_norm_: (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__198, + batch_norm__199, + batch_norm__200, + batch_norm__201, + batch_norm__202, + batch_norm__203, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_34, + parameter_529, + parameter_528, + parameter_527, + parameter_526, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_526, parameter_527, parameter_528, parameter_529 + + # pd_op.add: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, 2x96x-1x-1xf32) + add_15 = paddle._C_ops.add(batch_norm__192, batch_norm__198) + + # pd_op.swish: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32) + swish_26 = paddle._C_ops.swish(add_15) + + # pd_op.add: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, 2x96x-1x-1xf32) + add_16 = paddle._C_ops.add(add_14, swish_26) + + # pd_op.conv2d: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, 96x96x3x3xf32) + conv2d_35 = paddle._C_ops.conv2d( + add_16, parameter_525, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_525 + + # pd_op.batch_norm_: (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__204, + batch_norm__205, + batch_norm__206, + batch_norm__207, + batch_norm__208, + batch_norm__209, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_35, + parameter_524, + parameter_523, + parameter_522, + parameter_521, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_521, parameter_522, parameter_523, parameter_524 + + # pd_op.swish: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32) + swish_27 = paddle._C_ops.swish(batch_norm__204) + + # pd_op.conv2d: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, 96x96x3x3xf32) + conv2d_36 = paddle._C_ops.conv2d( + swish_27, parameter_520, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_520 + + # pd_op.batch_norm_: (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__210, + batch_norm__211, + batch_norm__212, + batch_norm__213, + batch_norm__214, + batch_norm__215, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_36, + parameter_519, + parameter_518, + parameter_517, + parameter_516, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_516, parameter_517, parameter_518, parameter_519 + + # pd_op.conv2d: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, 96x96x1x1xf32) + conv2d_37 = paddle._C_ops.conv2d( + swish_27, parameter_515, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_515 + + # pd_op.batch_norm_: (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__216, + batch_norm__217, + batch_norm__218, + batch_norm__219, + batch_norm__220, + batch_norm__221, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_37, + parameter_514, + parameter_513, + parameter_512, + parameter_511, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_511, parameter_512, parameter_513, parameter_514 + + # pd_op.add: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, 2x96x-1x-1xf32) + add_17 = paddle._C_ops.add(batch_norm__210, batch_norm__216) + + # pd_op.swish: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32) + swish_28 = paddle._C_ops.swish(add_17) + + # pd_op.add: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, 2x96x-1x-1xf32) + add_18 = paddle._C_ops.add(add_16, swish_28) + + # builtin.combine: ([2x96x-1x-1xf32, 2x96x-1x-1xf32]) <- (2x96x-1x-1xf32, 2x96x-1x-1xf32) + combine_1 = [swish_15, add_18] + + # pd_op.concat: (2x192x-1x-1xf32) <- ([2x96x-1x-1xf32, 2x96x-1x-1xf32], 1xi32) + concat_1 = paddle._C_ops.concat(combine_1, full_0) + del combine_1 + + # pd_op.mean: (2x192x1x1xf32) <- (2x192x-1x-1xf32, 2xi64) + mean_1 = paddle._C_ops.mean(concat_1, full_int_array_0, True) - # pd_op.full_int_array: (0xi64) <- () - full_int_array_0 = [] + # pd_op.conv2d: (2x192x1x1xf32) <- (2x192x1x1xf32, 192x192x1x1xf32) + conv2d_38 = paddle._C_ops.conv2d( + mean_1, parameter_510, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_510 + + # pd_op.reshape: (1x192x1x1xf32) <- (192xf32, 4xi64) + reshape_1 = paddle._C_ops.reshape(parameter_509, full_int_array_1) + del parameter_509 + + # pd_op.add: (2x192x1x1xf32) <- (2x192x1x1xf32, 1x192x1x1xf32) + add_19 = paddle._C_ops.add(conv2d_38, reshape_1) + + # pd_op.hardsigmoid: (2x192x1x1xf32) <- (2x192x1x1xf32) + hardsigmoid_1 = paddle._C_ops.hardsigmoid( + add_19, float("0.166667"), float("0.5") + ) + del add_19 + + # pd_op.multiply: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 2x192x1x1xf32) + multiply_1 = paddle._C_ops.multiply(concat_1, hardsigmoid_1) + + # pd_op.conv2d: (2x256x-1x-1xf32) <- (2x192x-1x-1xf32, 256x192x1x1xf32) + conv2d_39 = paddle._C_ops.conv2d( + multiply_1, parameter_508, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_508 + + # pd_op.batch_norm_: (2x256x-1x-1xf32, 256xf32, 256xf32, 256xf32, 256xf32, -1xui8) <- (2x256x-1x-1xf32, 256xf32, 256xf32, 256xf32, 256xf32) + ( + batch_norm__222, + batch_norm__223, + batch_norm__224, + batch_norm__225, + batch_norm__226, + batch_norm__227, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_39, + parameter_507, + parameter_506, + parameter_505, + parameter_504, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_504, parameter_505, parameter_506, parameter_507 + + # pd_op.swish: (2x256x-1x-1xf32) <- (2x256x-1x-1xf32) + swish_29 = paddle._C_ops.swish(batch_norm__222) + + # pd_op.conv2d: (2x384x-1x-1xf32) <- (2x256x-1x-1xf32, 384x256x3x3xf32) + conv2d_40 = paddle._C_ops.conv2d( + swish_29, parameter_503, [2, 2], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_503 + + # pd_op.batch_norm_: (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__228, + batch_norm__229, + batch_norm__230, + batch_norm__231, + batch_norm__232, + batch_norm__233, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_40, + parameter_502, + parameter_501, + parameter_500, + parameter_499, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_499, parameter_500, parameter_501, parameter_502 + + # pd_op.swish: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32) + swish_30 = paddle._C_ops.swish(batch_norm__228) - # pd_op.sum: (xi64) <- (2x-1xb, 0xi64) - sum_0 = paddle._C_ops.sum(not_equal_0, full_int_array_0, None, False) - del full_int_array_0 + # pd_op.conv2d: (2x192x-1x-1xf32) <- (2x384x-1x-1xf32, 192x384x1x1xf32) + conv2d_41 = paddle._C_ops.conv2d( + swish_30, parameter_498, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_498 + + # pd_op.batch_norm_: (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__234, + batch_norm__235, + batch_norm__236, + batch_norm__237, + batch_norm__238, + batch_norm__239, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_41, + parameter_497, + parameter_496, + parameter_495, + parameter_494, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_494, parameter_495, parameter_496, parameter_497 + + # pd_op.swish: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32) + swish_31 = paddle._C_ops.swish(batch_norm__234) + + # pd_op.conv2d: (2x192x-1x-1xf32) <- (2x384x-1x-1xf32, 192x384x1x1xf32) + conv2d_42 = paddle._C_ops.conv2d( + swish_30, parameter_493, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_493 + + # pd_op.batch_norm_: (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__240, + batch_norm__241, + batch_norm__242, + batch_norm__243, + batch_norm__244, + batch_norm__245, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_42, + parameter_492, + parameter_491, + parameter_490, + parameter_489, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_489, parameter_490, parameter_491, parameter_492 + + # pd_op.swish: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32) + swish_32 = paddle._C_ops.swish(batch_norm__240) + + # pd_op.conv2d: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 192x192x3x3xf32) + conv2d_43 = paddle._C_ops.conv2d( + swish_32, parameter_488, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_488 + + # pd_op.batch_norm_: (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__246, + batch_norm__247, + batch_norm__248, + batch_norm__249, + batch_norm__250, + batch_norm__251, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_43, + parameter_487, + parameter_486, + parameter_485, + parameter_484, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_484, parameter_485, parameter_486, parameter_487 + + # pd_op.swish: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32) + swish_33 = paddle._C_ops.swish(batch_norm__246) + + # pd_op.conv2d: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 192x192x3x3xf32) + conv2d_44 = paddle._C_ops.conv2d( + swish_33, parameter_483, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_483 - # pd_op.full: (xi64) <- () - full_1 = paddle._C_ops.full( - [], float("0"), paddle.int64, paddle.framework._current_expected_place() + # pd_op.batch_norm_: (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__252, + batch_norm__253, + batch_norm__254, + batch_norm__255, + batch_norm__256, + batch_norm__257, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_44, + parameter_482, + parameter_481, + parameter_480, + parameter_479, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), ) + del parameter_479, parameter_480, parameter_481, parameter_482 - # pd_op.greater_than: (xb) <- (xi64, xi64) - greater_than_0 = paddle._C_ops.greater_than(sum_0, full_1) - del full_1, not_equal_0, sum_0 + # pd_op.conv2d: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 192x192x1x1xf32) + conv2d_45 = paddle._C_ops.conv2d( + swish_33, parameter_478, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_478 + + # pd_op.batch_norm_: (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__258, + batch_norm__259, + batch_norm__260, + batch_norm__261, + batch_norm__262, + batch_norm__263, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_45, + parameter_477, + parameter_476, + parameter_475, + parameter_474, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_474, parameter_475, parameter_476, parameter_477 + + # pd_op.add: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 2x192x-1x-1xf32) + add_20 = paddle._C_ops.add(batch_norm__252, batch_norm__258) + + # pd_op.swish: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32) + swish_34 = paddle._C_ops.swish(add_20) + + # pd_op.add: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 2x192x-1x-1xf32) + add_21 = paddle._C_ops.add(swish_32, swish_34) + + # pd_op.conv2d: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 192x192x3x3xf32) + conv2d_46 = paddle._C_ops.conv2d( + add_21, parameter_473, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_473 + + # pd_op.batch_norm_: (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__264, + batch_norm__265, + batch_norm__266, + batch_norm__267, + batch_norm__268, + batch_norm__269, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_46, + parameter_472, + parameter_471, + parameter_470, + parameter_469, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_469, parameter_470, parameter_471, parameter_472 + + # pd_op.swish: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32) + swish_35 = paddle._C_ops.swish(batch_norm__264) + + # pd_op.conv2d: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 192x192x3x3xf32) + conv2d_47 = paddle._C_ops.conv2d( + swish_35, parameter_468, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_468 + + # pd_op.batch_norm_: (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__270, + batch_norm__271, + batch_norm__272, + batch_norm__273, + batch_norm__274, + batch_norm__275, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_47, + parameter_467, + parameter_466, + parameter_465, + parameter_464, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_464, parameter_465, parameter_466, parameter_467 + + # pd_op.conv2d: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 192x192x1x1xf32) + conv2d_48 = paddle._C_ops.conv2d( + swish_35, parameter_463, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_463 + + # pd_op.batch_norm_: (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__276, + batch_norm__277, + batch_norm__278, + batch_norm__279, + batch_norm__280, + batch_norm__281, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_48, + parameter_462, + parameter_461, + parameter_460, + parameter_459, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_459, parameter_460, parameter_461, parameter_462 + + # pd_op.add: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 2x192x-1x-1xf32) + add_22 = paddle._C_ops.add(batch_norm__270, batch_norm__276) + + # pd_op.swish: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32) + swish_36 = paddle._C_ops.swish(add_22) + + # pd_op.add: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 2x192x-1x-1xf32) + add_23 = paddle._C_ops.add(add_21, swish_36) + + # pd_op.conv2d: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 192x192x3x3xf32) + conv2d_49 = paddle._C_ops.conv2d( + add_23, parameter_458, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_458 + + # pd_op.batch_norm_: (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__282, + batch_norm__283, + batch_norm__284, + batch_norm__285, + batch_norm__286, + batch_norm__287, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_49, + parameter_457, + parameter_456, + parameter_455, + parameter_454, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_454, parameter_455, parameter_456, parameter_457 + + # pd_op.swish: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32) + swish_37 = paddle._C_ops.swish(batch_norm__282) + + # pd_op.conv2d: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 192x192x3x3xf32) + conv2d_50 = paddle._C_ops.conv2d( + swish_37, parameter_453, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_453 + + # pd_op.batch_norm_: (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__288, + batch_norm__289, + batch_norm__290, + batch_norm__291, + batch_norm__292, + batch_norm__293, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_50, + parameter_452, + parameter_451, + parameter_450, + parameter_449, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_449, parameter_450, parameter_451, parameter_452 + + # pd_op.conv2d: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 192x192x1x1xf32) + conv2d_51 = paddle._C_ops.conv2d( + swish_37, parameter_448, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_448 + + # pd_op.batch_norm_: (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__294, + batch_norm__295, + batch_norm__296, + batch_norm__297, + batch_norm__298, + batch_norm__299, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_51, + parameter_447, + parameter_446, + parameter_445, + parameter_444, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_444, parameter_445, parameter_446, parameter_447 + + # pd_op.add: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 2x192x-1x-1xf32) + add_24 = paddle._C_ops.add(batch_norm__288, batch_norm__294) + + # pd_op.swish: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32) + swish_38 = paddle._C_ops.swish(add_24) + + # pd_op.add: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 2x192x-1x-1xf32) + add_25 = paddle._C_ops.add(add_23, swish_38) + + # pd_op.conv2d: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 192x192x3x3xf32) + conv2d_52 = paddle._C_ops.conv2d( + add_25, parameter_443, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_443 + + # pd_op.batch_norm_: (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__300, + batch_norm__301, + batch_norm__302, + batch_norm__303, + batch_norm__304, + batch_norm__305, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_52, + parameter_442, + parameter_441, + parameter_440, + parameter_439, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_439, parameter_440, parameter_441, parameter_442 + + # pd_op.swish: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32) + swish_39 = paddle._C_ops.swish(batch_norm__300) + + # pd_op.conv2d: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 192x192x3x3xf32) + conv2d_53 = paddle._C_ops.conv2d( + swish_39, parameter_438, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_438 + + # pd_op.batch_norm_: (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__306, + batch_norm__307, + batch_norm__308, + batch_norm__309, + batch_norm__310, + batch_norm__311, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_53, + parameter_437, + parameter_436, + parameter_435, + parameter_434, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_434, parameter_435, parameter_436, parameter_437 + + # pd_op.conv2d: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 192x192x1x1xf32) + conv2d_54 = paddle._C_ops.conv2d( + swish_39, parameter_433, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_433 + + # pd_op.batch_norm_: (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__312, + batch_norm__313, + batch_norm__314, + batch_norm__315, + batch_norm__316, + batch_norm__317, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_54, + parameter_432, + parameter_431, + parameter_430, + parameter_429, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_429, parameter_430, parameter_431, parameter_432 + + # pd_op.add: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 2x192x-1x-1xf32) + add_26 = paddle._C_ops.add(batch_norm__306, batch_norm__312) + + # pd_op.swish: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32) + swish_40 = paddle._C_ops.swish(add_26) + + # pd_op.add: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 2x192x-1x-1xf32) + add_27 = paddle._C_ops.add(add_25, swish_40) + + # pd_op.conv2d: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 192x192x3x3xf32) + conv2d_55 = paddle._C_ops.conv2d( + add_27, parameter_428, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_428 + + # pd_op.batch_norm_: (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__318, + batch_norm__319, + batch_norm__320, + batch_norm__321, + batch_norm__322, + batch_norm__323, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_55, + parameter_427, + parameter_426, + parameter_425, + parameter_424, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_424, parameter_425, parameter_426, parameter_427 + + # pd_op.swish: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32) + swish_41 = paddle._C_ops.swish(batch_norm__318) + + # pd_op.conv2d: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 192x192x3x3xf32) + conv2d_56 = paddle._C_ops.conv2d( + swish_41, parameter_423, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_423 + + # pd_op.batch_norm_: (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__324, + batch_norm__325, + batch_norm__326, + batch_norm__327, + batch_norm__328, + batch_norm__329, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_56, + parameter_422, + parameter_421, + parameter_420, + parameter_419, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_419, parameter_420, parameter_421, parameter_422 + + # pd_op.conv2d: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 192x192x1x1xf32) + conv2d_57 = paddle._C_ops.conv2d( + swish_41, parameter_418, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_418 + + # pd_op.batch_norm_: (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__330, + batch_norm__331, + batch_norm__332, + batch_norm__333, + batch_norm__334, + batch_norm__335, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_57, + parameter_417, + parameter_416, + parameter_415, + parameter_414, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_414, parameter_415, parameter_416, parameter_417 + + # pd_op.add: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 2x192x-1x-1xf32) + add_28 = paddle._C_ops.add(batch_norm__324, batch_norm__330) + + # pd_op.swish: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32) + swish_42 = paddle._C_ops.swish(add_28) + + # pd_op.add: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 2x192x-1x-1xf32) + add_29 = paddle._C_ops.add(add_27, swish_42) + + # pd_op.conv2d: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 192x192x3x3xf32) + conv2d_58 = paddle._C_ops.conv2d( + add_29, parameter_413, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_413 + + # pd_op.batch_norm_: (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__336, + batch_norm__337, + batch_norm__338, + batch_norm__339, + batch_norm__340, + batch_norm__341, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_58, + parameter_412, + parameter_411, + parameter_410, + parameter_409, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_409, parameter_410, parameter_411, parameter_412 + + # pd_op.swish: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32) + swish_43 = paddle._C_ops.swish(batch_norm__336) + + # pd_op.conv2d: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 192x192x3x3xf32) + conv2d_59 = paddle._C_ops.conv2d( + swish_43, parameter_408, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_408 + + # pd_op.batch_norm_: (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__342, + batch_norm__343, + batch_norm__344, + batch_norm__345, + batch_norm__346, + batch_norm__347, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_59, + parameter_407, + parameter_406, + parameter_405, + parameter_404, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_404, parameter_405, parameter_406, parameter_407 + + # pd_op.conv2d: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 192x192x1x1xf32) + conv2d_60 = paddle._C_ops.conv2d( + swish_43, parameter_403, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_403 + + # pd_op.batch_norm_: (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__348, + batch_norm__349, + batch_norm__350, + batch_norm__351, + batch_norm__352, + batch_norm__353, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_60, + parameter_402, + parameter_401, + parameter_400, + parameter_399, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_399, parameter_400, parameter_401, parameter_402 + + # pd_op.add: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 2x192x-1x-1xf32) + add_30 = paddle._C_ops.add(batch_norm__342, batch_norm__348) + + # pd_op.swish: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32) + swish_44 = paddle._C_ops.swish(add_30) + + # pd_op.add: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 2x192x-1x-1xf32) + add_31 = paddle._C_ops.add(add_29, swish_44) + + # builtin.combine: ([2x192x-1x-1xf32, 2x192x-1x-1xf32]) <- (2x192x-1x-1xf32, 2x192x-1x-1xf32) + combine_2 = [swish_31, add_31] + + # pd_op.concat: (2x384x-1x-1xf32) <- ([2x192x-1x-1xf32, 2x192x-1x-1xf32], 1xi32) + concat_2 = paddle._C_ops.concat(combine_2, full_0) + del combine_2 + + # pd_op.mean: (2x384x1x1xf32) <- (2x384x-1x-1xf32, 2xi64) + mean_2 = paddle._C_ops.mean(concat_2, full_int_array_0, True) + + # pd_op.conv2d: (2x384x1x1xf32) <- (2x384x1x1xf32, 384x384x1x1xf32) + conv2d_61 = paddle._C_ops.conv2d( + mean_2, parameter_398, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_398 + + # pd_op.reshape: (1x384x1x1xf32) <- (384xf32, 4xi64) + reshape_2 = paddle._C_ops.reshape(parameter_397, full_int_array_1) + del parameter_397 + + # pd_op.add: (2x384x1x1xf32) <- (2x384x1x1xf32, 1x384x1x1xf32) + add_32 = paddle._C_ops.add(conv2d_61, reshape_2) + + # pd_op.hardsigmoid: (2x384x1x1xf32) <- (2x384x1x1xf32) + hardsigmoid_2 = paddle._C_ops.hardsigmoid( + add_32, float("0.166667"), float("0.5") + ) + del add_32 + + # pd_op.multiply: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32, 2x384x1x1xf32) + multiply_2 = paddle._C_ops.multiply(concat_2, hardsigmoid_2) + + # pd_op.conv2d: (2x512x-1x-1xf32) <- (2x384x-1x-1xf32, 512x384x1x1xf32) + conv2d_62 = paddle._C_ops.conv2d( + multiply_2, parameter_396, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_396 + + # pd_op.batch_norm_: (2x512x-1x-1xf32, 512xf32, 512xf32, 512xf32, 512xf32, -1xui8) <- (2x512x-1x-1xf32, 512xf32, 512xf32, 512xf32, 512xf32) + ( + batch_norm__354, + batch_norm__355, + batch_norm__356, + batch_norm__357, + batch_norm__358, + batch_norm__359, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_62, + parameter_395, + parameter_394, + parameter_393, + parameter_392, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_392, parameter_393, parameter_394, parameter_395 + + # pd_op.swish: (2x512x-1x-1xf32) <- (2x512x-1x-1xf32) + swish_45 = paddle._C_ops.swish(batch_norm__354) + + # pd_op.conv2d: (2x768x-1x-1xf32) <- (2x512x-1x-1xf32, 768x512x3x3xf32) + conv2d_63 = paddle._C_ops.conv2d( + swish_45, parameter_391, [2, 2], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_391 + + # pd_op.batch_norm_: (2x768x-1x-1xf32, 768xf32, 768xf32, 768xf32, 768xf32, -1xui8) <- (2x768x-1x-1xf32, 768xf32, 768xf32, 768xf32, 768xf32) + ( + batch_norm__360, + batch_norm__361, + batch_norm__362, + batch_norm__363, + batch_norm__364, + batch_norm__365, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_63, + parameter_390, + parameter_389, + parameter_388, + parameter_387, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_387, parameter_388, parameter_389, parameter_390 + + # pd_op.swish: (2x768x-1x-1xf32) <- (2x768x-1x-1xf32) + swish_46 = paddle._C_ops.swish(batch_norm__360) + + # pd_op.conv2d: (2x384x-1x-1xf32) <- (2x768x-1x-1xf32, 384x768x1x1xf32) + conv2d_64 = paddle._C_ops.conv2d( + swish_46, parameter_386, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_386 + + # pd_op.batch_norm_: (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__366, + batch_norm__367, + batch_norm__368, + batch_norm__369, + batch_norm__370, + batch_norm__371, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_64, + parameter_385, + parameter_384, + parameter_383, + parameter_382, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_382, parameter_383, parameter_384, parameter_385 + + # pd_op.swish: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32) + swish_47 = paddle._C_ops.swish(batch_norm__366) + + # pd_op.conv2d: (2x384x-1x-1xf32) <- (2x768x-1x-1xf32, 384x768x1x1xf32) + conv2d_65 = paddle._C_ops.conv2d( + swish_46, parameter_381, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_381 + + # pd_op.batch_norm_: (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__372, + batch_norm__373, + batch_norm__374, + batch_norm__375, + batch_norm__376, + batch_norm__377, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_65, + parameter_380, + parameter_379, + parameter_378, + parameter_377, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_377, parameter_378, parameter_379, parameter_380 + + # pd_op.swish: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32) + swish_48 = paddle._C_ops.swish(batch_norm__372) + + # pd_op.conv2d: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32, 384x384x3x3xf32) + conv2d_66 = paddle._C_ops.conv2d( + swish_48, parameter_376, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_376 + + # pd_op.batch_norm_: (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__378, + batch_norm__379, + batch_norm__380, + batch_norm__381, + batch_norm__382, + batch_norm__383, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_66, + parameter_375, + parameter_374, + parameter_373, + parameter_372, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_372, parameter_373, parameter_374, parameter_375 + + # pd_op.swish: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32) + swish_49 = paddle._C_ops.swish(batch_norm__378) + + # pd_op.conv2d: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32, 384x384x3x3xf32) + conv2d_67 = paddle._C_ops.conv2d( + swish_49, parameter_371, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_371 + + # pd_op.batch_norm_: (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__384, + batch_norm__385, + batch_norm__386, + batch_norm__387, + batch_norm__388, + batch_norm__389, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_67, + parameter_370, + parameter_369, + parameter_368, + parameter_367, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_367, parameter_368, parameter_369, parameter_370 + + # pd_op.conv2d: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32, 384x384x1x1xf32) + conv2d_68 = paddle._C_ops.conv2d( + swish_49, parameter_366, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_366 + + # pd_op.batch_norm_: (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__390, + batch_norm__391, + batch_norm__392, + batch_norm__393, + batch_norm__394, + batch_norm__395, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_68, + parameter_365, + parameter_364, + parameter_363, + parameter_362, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_362, parameter_363, parameter_364, parameter_365 + + # pd_op.add: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32, 2x384x-1x-1xf32) + add_33 = paddle._C_ops.add(batch_norm__384, batch_norm__390) + + # pd_op.swish: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32) + swish_50 = paddle._C_ops.swish(add_33) + + # pd_op.add: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32, 2x384x-1x-1xf32) + add_34 = paddle._C_ops.add(swish_48, swish_50) + + # pd_op.conv2d: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32, 384x384x3x3xf32) + conv2d_69 = paddle._C_ops.conv2d( + add_34, parameter_361, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_361 + + # pd_op.batch_norm_: (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__396, + batch_norm__397, + batch_norm__398, + batch_norm__399, + batch_norm__400, + batch_norm__401, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_69, + parameter_360, + parameter_359, + parameter_358, + parameter_357, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_357, parameter_358, parameter_359, parameter_360 + + # pd_op.swish: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32) + swish_51 = paddle._C_ops.swish(batch_norm__396) + + # pd_op.conv2d: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32, 384x384x3x3xf32) + conv2d_70 = paddle._C_ops.conv2d( + swish_51, parameter_356, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_356 + + # pd_op.batch_norm_: (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__402, + batch_norm__403, + batch_norm__404, + batch_norm__405, + batch_norm__406, + batch_norm__407, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_70, + parameter_355, + parameter_354, + parameter_353, + parameter_352, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_352, parameter_353, parameter_354, parameter_355 + + # pd_op.conv2d: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32, 384x384x1x1xf32) + conv2d_71 = paddle._C_ops.conv2d( + swish_51, parameter_351, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_351 + + # pd_op.batch_norm_: (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__408, + batch_norm__409, + batch_norm__410, + batch_norm__411, + batch_norm__412, + batch_norm__413, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_71, + parameter_350, + parameter_349, + parameter_348, + parameter_347, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_347, parameter_348, parameter_349, parameter_350 + + # pd_op.add: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32, 2x384x-1x-1xf32) + add_35 = paddle._C_ops.add(batch_norm__402, batch_norm__408) + + # pd_op.swish: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32) + swish_52 = paddle._C_ops.swish(add_35) + + # pd_op.add: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32, 2x384x-1x-1xf32) + add_36 = paddle._C_ops.add(add_34, swish_52) + + # pd_op.conv2d: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32, 384x384x3x3xf32) + conv2d_72 = paddle._C_ops.conv2d( + add_36, parameter_346, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_346 + + # pd_op.batch_norm_: (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__414, + batch_norm__415, + batch_norm__416, + batch_norm__417, + batch_norm__418, + batch_norm__419, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_72, + parameter_345, + parameter_344, + parameter_343, + parameter_342, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_342, parameter_343, parameter_344, parameter_345 + + # pd_op.swish: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32) + swish_53 = paddle._C_ops.swish(batch_norm__414) + + # pd_op.conv2d: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32, 384x384x3x3xf32) + conv2d_73 = paddle._C_ops.conv2d( + swish_53, parameter_341, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_341 + + # pd_op.batch_norm_: (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__420, + batch_norm__421, + batch_norm__422, + batch_norm__423, + batch_norm__424, + batch_norm__425, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_73, + parameter_340, + parameter_339, + parameter_338, + parameter_337, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_337, parameter_338, parameter_339, parameter_340 + + # pd_op.conv2d: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32, 384x384x1x1xf32) + conv2d_74 = paddle._C_ops.conv2d( + swish_53, parameter_336, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_336 + + # pd_op.batch_norm_: (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__426, + batch_norm__427, + batch_norm__428, + batch_norm__429, + batch_norm__430, + batch_norm__431, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_74, + parameter_335, + parameter_334, + parameter_333, + parameter_332, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_332, parameter_333, parameter_334, parameter_335 + + # pd_op.add: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32, 2x384x-1x-1xf32) + add_37 = paddle._C_ops.add(batch_norm__420, batch_norm__426) + + # pd_op.swish: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32) + swish_54 = paddle._C_ops.swish(add_37) + + # pd_op.add: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32, 2x384x-1x-1xf32) + add_38 = paddle._C_ops.add(add_36, swish_54) + + # builtin.combine: ([2x384x-1x-1xf32, 2x384x-1x-1xf32]) <- (2x384x-1x-1xf32, 2x384x-1x-1xf32) + combine_3 = [swish_47, add_38] + + # pd_op.concat: (2x768x-1x-1xf32) <- ([2x384x-1x-1xf32, 2x384x-1x-1xf32], 1xi32) + concat_3 = paddle._C_ops.concat(combine_3, full_0) + del combine_3 + + # pd_op.mean: (2x768x1x1xf32) <- (2x768x-1x-1xf32, 2xi64) + mean_3 = paddle._C_ops.mean(concat_3, full_int_array_0, True) + + # pd_op.conv2d: (2x768x1x1xf32) <- (2x768x1x1xf32, 768x768x1x1xf32) + conv2d_75 = paddle._C_ops.conv2d( + mean_3, parameter_331, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_331 + + # pd_op.reshape: (1x768x1x1xf32) <- (768xf32, 4xi64) + reshape_3 = paddle._C_ops.reshape(parameter_330, full_int_array_1) + del full_int_array_1, parameter_330 + + # pd_op.add: (2x768x1x1xf32) <- (2x768x1x1xf32, 1x768x1x1xf32) + add_39 = paddle._C_ops.add(conv2d_75, reshape_3) + + # pd_op.hardsigmoid: (2x768x1x1xf32) <- (2x768x1x1xf32) + hardsigmoid_3 = paddle._C_ops.hardsigmoid( + add_39, float("0.166667"), float("0.5") + ) + del add_39 + + # pd_op.multiply: (2x768x-1x-1xf32) <- (2x768x-1x-1xf32, 2x768x1x1xf32) + multiply_3 = paddle._C_ops.multiply(concat_3, hardsigmoid_3) + + # pd_op.conv2d: (2x1024x-1x-1xf32) <- (2x768x-1x-1xf32, 1024x768x1x1xf32) + conv2d_76 = paddle._C_ops.conv2d( + multiply_3, parameter_329, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_329 + + # pd_op.batch_norm_: (2x1024x-1x-1xf32, 1024xf32, 1024xf32, 1024xf32, 1024xf32, -1xui8) <- (2x1024x-1x-1xf32, 1024xf32, 1024xf32, 1024xf32, 1024xf32) + ( + batch_norm__432, + batch_norm__433, + batch_norm__434, + batch_norm__435, + batch_norm__436, + batch_norm__437, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_76, + parameter_328, + parameter_327, + parameter_326, + parameter_325, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_325, parameter_326, parameter_327, parameter_328 + + # pd_op.swish: (2x1024x-1x-1xf32) <- (2x1024x-1x-1xf32) + swish_55 = paddle._C_ops.swish(batch_norm__432) + + # pd_op.conv2d: (2x384x-1x-1xf32) <- (2x1024x-1x-1xf32, 384x1024x1x1xf32) + conv2d_77 = paddle._C_ops.conv2d( + swish_55, parameter_324, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_324 + + # pd_op.batch_norm_: (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__438, + batch_norm__439, + batch_norm__440, + batch_norm__441, + batch_norm__442, + batch_norm__443, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_77, + parameter_323, + parameter_322, + parameter_321, + parameter_320, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_320, parameter_321, parameter_322, parameter_323 + + # pd_op.swish: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32) + swish_56 = paddle._C_ops.swish(batch_norm__438) + + # pd_op.conv2d: (2x384x-1x-1xf32) <- (2x1024x-1x-1xf32, 384x1024x1x1xf32) + conv2d_78 = paddle._C_ops.conv2d( + swish_55, parameter_319, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_319 + + # pd_op.batch_norm_: (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__444, + batch_norm__445, + batch_norm__446, + batch_norm__447, + batch_norm__448, + batch_norm__449, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_78, + parameter_318, + parameter_317, + parameter_316, + parameter_315, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_315, parameter_316, parameter_317, parameter_318 + + # pd_op.swish: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32) + swish_57 = paddle._C_ops.swish(batch_norm__444) + + # pd_op.conv2d: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32, 384x384x3x3xf32) + conv2d_79 = paddle._C_ops.conv2d( + swish_57, parameter_314, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_314 + + # pd_op.batch_norm_: (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__450, + batch_norm__451, + batch_norm__452, + batch_norm__453, + batch_norm__454, + batch_norm__455, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_79, + parameter_313, + parameter_312, + parameter_311, + parameter_310, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_310, parameter_311, parameter_312, parameter_313 + + # pd_op.swish: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32) + swish_58 = paddle._C_ops.swish(batch_norm__450) + + # pd_op.conv2d: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32, 384x384x3x3xf32) + conv2d_80 = paddle._C_ops.conv2d( + swish_58, parameter_309, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_309 + + # pd_op.batch_norm_: (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__456, + batch_norm__457, + batch_norm__458, + batch_norm__459, + batch_norm__460, + batch_norm__461, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_80, + parameter_308, + parameter_307, + parameter_306, + parameter_305, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_305, parameter_306, parameter_307, parameter_308 + + # pd_op.conv2d: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32, 384x384x1x1xf32) + conv2d_81 = paddle._C_ops.conv2d( + swish_58, parameter_304, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_304 + + # pd_op.batch_norm_: (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__462, + batch_norm__463, + batch_norm__464, + batch_norm__465, + batch_norm__466, + batch_norm__467, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_81, + parameter_303, + parameter_302, + parameter_301, + parameter_300, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_300, parameter_301, parameter_302, parameter_303 + + # pd_op.add: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32, 2x384x-1x-1xf32) + add_40 = paddle._C_ops.add(batch_norm__456, batch_norm__462) + + # pd_op.swish: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32) + swish_59 = paddle._C_ops.swish(add_40) + + # pd_op.conv2d: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32, 384x384x3x3xf32) + conv2d_82 = paddle._C_ops.conv2d( + swish_59, parameter_299, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_299 + + # pd_op.batch_norm_: (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__468, + batch_norm__469, + batch_norm__470, + batch_norm__471, + batch_norm__472, + batch_norm__473, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_82, + parameter_298, + parameter_297, + parameter_296, + parameter_295, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_295, parameter_296, parameter_297, parameter_298 + + # pd_op.swish: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32) + swish_60 = paddle._C_ops.swish(batch_norm__468) + + # pd_op.conv2d: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32, 384x384x3x3xf32) + conv2d_83 = paddle._C_ops.conv2d( + swish_60, parameter_294, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_294 + + # pd_op.batch_norm_: (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__474, + batch_norm__475, + batch_norm__476, + batch_norm__477, + batch_norm__478, + batch_norm__479, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_83, + parameter_293, + parameter_292, + parameter_291, + parameter_290, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_290, parameter_291, parameter_292, parameter_293 + + # pd_op.conv2d: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32, 384x384x1x1xf32) + conv2d_84 = paddle._C_ops.conv2d( + swish_60, parameter_289, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_289 + + # pd_op.batch_norm_: (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__480, + batch_norm__481, + batch_norm__482, + batch_norm__483, + batch_norm__484, + batch_norm__485, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_84, + parameter_288, + parameter_287, + parameter_286, + parameter_285, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_285, parameter_286, parameter_287, parameter_288 + + # pd_op.add: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32, 2x384x-1x-1xf32) + add_41 = paddle._C_ops.add(batch_norm__474, batch_norm__480) + + # pd_op.swish: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32) + swish_61 = paddle._C_ops.swish(add_41) + + # pd_op.full_int_array: (2xi64) <- () + full_int_array_2 = [5, 5] + + # pd_op.pool2d: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32, 2xi64) + pool2d_0 = paddle._C_ops.pool2d( + swish_61, + full_int_array_2, + [1, 1], + [2, 2], + False, + True, + "NCHW", + "max", + False, + False, + "EXPLICIT", + ) + + # pd_op.full_int_array: (2xi64) <- () + full_int_array_3 = [9, 9] + + # pd_op.pool2d: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32, 2xi64) + pool2d_1 = paddle._C_ops.pool2d( + swish_61, + full_int_array_3, + [1, 1], + [4, 4], + False, + True, + "NCHW", + "max", + False, + False, + "EXPLICIT", + ) + + # pd_op.full_int_array: (2xi64) <- () + full_int_array_4 = [13, 13] + + # pd_op.pool2d: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32, 2xi64) + pool2d_2 = paddle._C_ops.pool2d( + swish_61, + full_int_array_4, + [1, 1], + [6, 6], + False, + True, + "NCHW", + "max", + False, + False, + "EXPLICIT", + ) + + # builtin.combine: ([2x384x-1x-1xf32, 2x384x-1x-1xf32, 2x384x-1x-1xf32, 2x384x-1x-1xf32]) <- (2x384x-1x-1xf32, 2x384x-1x-1xf32, 2x384x-1x-1xf32, 2x384x-1x-1xf32) + combine_4 = [swish_61, pool2d_0, pool2d_1, pool2d_2] + + # pd_op.concat: (2x1536x-1x-1xf32) <- ([2x384x-1x-1xf32, 2x384x-1x-1xf32, 2x384x-1x-1xf32, 2x384x-1x-1xf32], 1xi32) + concat_4 = paddle._C_ops.concat(combine_4, full_0) + del combine_4 + + # pd_op.conv2d: (2x384x-1x-1xf32) <- (2x1536x-1x-1xf32, 384x1536x1x1xf32) + conv2d_85 = paddle._C_ops.conv2d( + concat_4, parameter_284, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_284 + + # pd_op.batch_norm_: (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__486, + batch_norm__487, + batch_norm__488, + batch_norm__489, + batch_norm__490, + batch_norm__491, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_85, + parameter_283, + parameter_282, + parameter_281, + parameter_280, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_280, parameter_281, parameter_282, parameter_283 + + # pd_op.swish: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32) + swish_62 = paddle._C_ops.swish(batch_norm__486) + + # pd_op.conv2d: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32, 384x384x3x3xf32) + conv2d_86 = paddle._C_ops.conv2d( + swish_62, parameter_279, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_279 + + # pd_op.batch_norm_: (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__492, + batch_norm__493, + batch_norm__494, + batch_norm__495, + batch_norm__496, + batch_norm__497, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_86, + parameter_278, + parameter_277, + parameter_276, + parameter_275, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_275, parameter_276, parameter_277, parameter_278 + + # pd_op.swish: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32) + swish_63 = paddle._C_ops.swish(batch_norm__492) + + # pd_op.conv2d: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32, 384x384x3x3xf32) + conv2d_87 = paddle._C_ops.conv2d( + swish_63, parameter_274, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_274 + + # pd_op.batch_norm_: (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__498, + batch_norm__499, + batch_norm__500, + batch_norm__501, + batch_norm__502, + batch_norm__503, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_87, + parameter_273, + parameter_272, + parameter_271, + parameter_270, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_270, parameter_271, parameter_272, parameter_273 + + # pd_op.conv2d: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32, 384x384x1x1xf32) + conv2d_88 = paddle._C_ops.conv2d( + swish_63, parameter_269, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_269 + + # pd_op.batch_norm_: (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__504, + batch_norm__505, + batch_norm__506, + batch_norm__507, + batch_norm__508, + batch_norm__509, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_88, + parameter_268, + parameter_267, + parameter_266, + parameter_265, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_265, parameter_266, parameter_267, parameter_268 + + # pd_op.add: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32, 2x384x-1x-1xf32) + add_42 = paddle._C_ops.add(batch_norm__498, batch_norm__504) + + # pd_op.swish: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32) + swish_64 = paddle._C_ops.swish(add_42) + + # builtin.combine: ([2x384x-1x-1xf32, 2x384x-1x-1xf32]) <- (2x384x-1x-1xf32, 2x384x-1x-1xf32) + combine_5 = [swish_56, swish_64] + + # pd_op.concat: (2x768x-1x-1xf32) <- ([2x384x-1x-1xf32, 2x384x-1x-1xf32], 1xi32) + concat_5 = paddle._C_ops.concat(combine_5, full_0) + del combine_5 + + # pd_op.conv2d: (2x768x-1x-1xf32) <- (2x768x-1x-1xf32, 768x768x1x1xf32) + conv2d_89 = paddle._C_ops.conv2d( + concat_5, parameter_264, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_264 + + # pd_op.batch_norm_: (2x768x-1x-1xf32, 768xf32, 768xf32, 768xf32, 768xf32, -1xui8) <- (2x768x-1x-1xf32, 768xf32, 768xf32, 768xf32, 768xf32) + ( + batch_norm__510, + batch_norm__511, + batch_norm__512, + batch_norm__513, + batch_norm__514, + batch_norm__515, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_89, + parameter_263, + parameter_262, + parameter_261, + parameter_260, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_260, parameter_261, parameter_262, parameter_263 + + # pd_op.swish: (2x768x-1x-1xf32) <- (2x768x-1x-1xf32) + swish_65 = paddle._C_ops.swish(batch_norm__510) + + # pd_op.conv2d: (2x384x-1x-1xf32) <- (2x768x-1x-1xf32, 384x768x1x1xf32) + conv2d_90 = paddle._C_ops.conv2d( + swish_65, parameter_259, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_259 + + # pd_op.batch_norm_: (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__516, + batch_norm__517, + batch_norm__518, + batch_norm__519, + batch_norm__520, + batch_norm__521, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_90, + parameter_258, + parameter_257, + parameter_256, + parameter_255, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_255, parameter_256, parameter_257, parameter_258 + + # pd_op.swish: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32) + swish_66 = paddle._C_ops.swish(batch_norm__516) + + # pd_op.nearest_interp: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32, None, None, None) + nearest_interp_0 = paddle._C_ops.nearest_interp( + swish_66, + None, + None, + None, + "NCHW", + -1, + -1, + -1, + [float("2"), float("2")], + "nearest", + False, + 0, + ) + + # builtin.combine: ([2x384x-1x-1xf32, 2x512x-1x-1xf32]) <- (2x384x-1x-1xf32, 2x512x-1x-1xf32) + combine_6 = [nearest_interp_0, swish_45] + + # pd_op.concat: (2x896x-1x-1xf32) <- ([2x384x-1x-1xf32, 2x512x-1x-1xf32], 1xi32) + concat_6 = paddle._C_ops.concat(combine_6, full_0) + del combine_6 + + # pd_op.conv2d: (2x192x-1x-1xf32) <- (2x896x-1x-1xf32, 192x896x1x1xf32) + conv2d_91 = paddle._C_ops.conv2d( + concat_6, parameter_254, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_254 + + # pd_op.batch_norm_: (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__522, + batch_norm__523, + batch_norm__524, + batch_norm__525, + batch_norm__526, + batch_norm__527, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_91, + parameter_253, + parameter_252, + parameter_251, + parameter_250, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_250, parameter_251, parameter_252, parameter_253 + + # pd_op.swish: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32) + swish_67 = paddle._C_ops.swish(batch_norm__522) + + # pd_op.conv2d: (2x192x-1x-1xf32) <- (2x896x-1x-1xf32, 192x896x1x1xf32) + conv2d_92 = paddle._C_ops.conv2d( + concat_6, parameter_249, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_249 + + # pd_op.batch_norm_: (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__528, + batch_norm__529, + batch_norm__530, + batch_norm__531, + batch_norm__532, + batch_norm__533, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_92, + parameter_248, + parameter_247, + parameter_246, + parameter_245, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_245, parameter_246, parameter_247, parameter_248 + + # pd_op.swish: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32) + swish_68 = paddle._C_ops.swish(batch_norm__528) + + # pd_op.conv2d: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 192x192x3x3xf32) + conv2d_93 = paddle._C_ops.conv2d( + swish_68, parameter_244, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_244 + + # pd_op.batch_norm_: (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__534, + batch_norm__535, + batch_norm__536, + batch_norm__537, + batch_norm__538, + batch_norm__539, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_93, + parameter_243, + parameter_242, + parameter_241, + parameter_240, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_240, parameter_241, parameter_242, parameter_243 + + # pd_op.swish: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32) + swish_69 = paddle._C_ops.swish(batch_norm__534) + + # pd_op.conv2d: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 192x192x3x3xf32) + conv2d_94 = paddle._C_ops.conv2d( + swish_69, parameter_239, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_239 + + # pd_op.batch_norm_: (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__540, + batch_norm__541, + batch_norm__542, + batch_norm__543, + batch_norm__544, + batch_norm__545, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_94, + parameter_238, + parameter_237, + parameter_236, + parameter_235, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_235, parameter_236, parameter_237, parameter_238 + + # pd_op.conv2d: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 192x192x1x1xf32) + conv2d_95 = paddle._C_ops.conv2d( + swish_69, parameter_234, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_234 + + # pd_op.batch_norm_: (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__546, + batch_norm__547, + batch_norm__548, + batch_norm__549, + batch_norm__550, + batch_norm__551, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_95, + parameter_233, + parameter_232, + parameter_231, + parameter_230, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_230, parameter_231, parameter_232, parameter_233 + + # pd_op.add: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 2x192x-1x-1xf32) + add_43 = paddle._C_ops.add(batch_norm__540, batch_norm__546) + + # pd_op.swish: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32) + swish_70 = paddle._C_ops.swish(add_43) + + # pd_op.conv2d: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 192x192x3x3xf32) + conv2d_96 = paddle._C_ops.conv2d( + swish_70, parameter_229, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_229 + + # pd_op.batch_norm_: (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__552, + batch_norm__553, + batch_norm__554, + batch_norm__555, + batch_norm__556, + batch_norm__557, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_96, + parameter_228, + parameter_227, + parameter_226, + parameter_225, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_225, parameter_226, parameter_227, parameter_228 + + # pd_op.swish: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32) + swish_71 = paddle._C_ops.swish(batch_norm__552) + + # pd_op.conv2d: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 192x192x3x3xf32) + conv2d_97 = paddle._C_ops.conv2d( + swish_71, parameter_224, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_224 + + # pd_op.batch_norm_: (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__558, + batch_norm__559, + batch_norm__560, + batch_norm__561, + batch_norm__562, + batch_norm__563, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_97, + parameter_223, + parameter_222, + parameter_221, + parameter_220, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_220, parameter_221, parameter_222, parameter_223 + + # pd_op.conv2d: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 192x192x1x1xf32) + conv2d_98 = paddle._C_ops.conv2d( + swish_71, parameter_219, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_219 + + # pd_op.batch_norm_: (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__564, + batch_norm__565, + batch_norm__566, + batch_norm__567, + batch_norm__568, + batch_norm__569, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_98, + parameter_218, + parameter_217, + parameter_216, + parameter_215, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_215, parameter_216, parameter_217, parameter_218 + + # pd_op.add: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 2x192x-1x-1xf32) + add_44 = paddle._C_ops.add(batch_norm__558, batch_norm__564) + + # pd_op.swish: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32) + swish_72 = paddle._C_ops.swish(add_44) + + # pd_op.conv2d: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 192x192x3x3xf32) + conv2d_99 = paddle._C_ops.conv2d( + swish_72, parameter_214, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_214 + + # pd_op.batch_norm_: (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__570, + batch_norm__571, + batch_norm__572, + batch_norm__573, + batch_norm__574, + batch_norm__575, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_99, + parameter_213, + parameter_212, + parameter_211, + parameter_210, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_210, parameter_211, parameter_212, parameter_213 + + # pd_op.swish: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32) + swish_73 = paddle._C_ops.swish(batch_norm__570) + + # pd_op.conv2d: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 192x192x3x3xf32) + conv2d_100 = paddle._C_ops.conv2d( + swish_73, parameter_209, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_209 + + # pd_op.batch_norm_: (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__576, + batch_norm__577, + batch_norm__578, + batch_norm__579, + batch_norm__580, + batch_norm__581, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_100, + parameter_208, + parameter_207, + parameter_206, + parameter_205, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_205, parameter_206, parameter_207, parameter_208 + + # pd_op.conv2d: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 192x192x1x1xf32) + conv2d_101 = paddle._C_ops.conv2d( + swish_73, parameter_204, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_204 + + # pd_op.batch_norm_: (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__582, + batch_norm__583, + batch_norm__584, + batch_norm__585, + batch_norm__586, + batch_norm__587, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_101, + parameter_203, + parameter_202, + parameter_201, + parameter_200, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_200, parameter_201, parameter_202, parameter_203 + + # pd_op.add: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 2x192x-1x-1xf32) + add_45 = paddle._C_ops.add(batch_norm__576, batch_norm__582) + + # pd_op.swish: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32) + swish_74 = paddle._C_ops.swish(add_45) + + # builtin.combine: ([2x192x-1x-1xf32, 2x192x-1x-1xf32]) <- (2x192x-1x-1xf32, 2x192x-1x-1xf32) + combine_7 = [swish_67, swish_74] + + # pd_op.concat: (2x384x-1x-1xf32) <- ([2x192x-1x-1xf32, 2x192x-1x-1xf32], 1xi32) + concat_7 = paddle._C_ops.concat(combine_7, full_0) + del combine_7 + + # pd_op.conv2d: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32, 384x384x1x1xf32) + conv2d_102 = paddle._C_ops.conv2d( + concat_7, parameter_199, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_199 + + # pd_op.batch_norm_: (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__588, + batch_norm__589, + batch_norm__590, + batch_norm__591, + batch_norm__592, + batch_norm__593, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_102, + parameter_198, + parameter_197, + parameter_196, + parameter_195, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_195, parameter_196, parameter_197, parameter_198 + + # pd_op.swish: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32) + swish_75 = paddle._C_ops.swish(batch_norm__588) + + # pd_op.conv2d: (2x192x-1x-1xf32) <- (2x384x-1x-1xf32, 192x384x1x1xf32) + conv2d_103 = paddle._C_ops.conv2d( + swish_75, parameter_194, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_194 + + # pd_op.batch_norm_: (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__594, + batch_norm__595, + batch_norm__596, + batch_norm__597, + batch_norm__598, + batch_norm__599, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_103, + parameter_193, + parameter_192, + parameter_191, + parameter_190, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_190, parameter_191, parameter_192, parameter_193 + + # pd_op.swish: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32) + swish_76 = paddle._C_ops.swish(batch_norm__594) + + # pd_op.nearest_interp: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, None, None, None) + nearest_interp_1 = paddle._C_ops.nearest_interp( + swish_76, + None, + None, + None, + "NCHW", + -1, + -1, + -1, + [float("2"), float("2")], + "nearest", + False, + 0, + ) + + # builtin.combine: ([2x192x-1x-1xf32, 2x256x-1x-1xf32]) <- (2x192x-1x-1xf32, 2x256x-1x-1xf32) + combine_8 = [nearest_interp_1, swish_29] + + # pd_op.concat: (2x448x-1x-1xf32) <- ([2x192x-1x-1xf32, 2x256x-1x-1xf32], 1xi32) + concat_8 = paddle._C_ops.concat(combine_8, full_0) + del combine_8 + + # pd_op.conv2d: (2x96x-1x-1xf32) <- (2x448x-1x-1xf32, 96x448x1x1xf32) + conv2d_104 = paddle._C_ops.conv2d( + concat_8, parameter_189, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_189 + + # pd_op.batch_norm_: (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__600, + batch_norm__601, + batch_norm__602, + batch_norm__603, + batch_norm__604, + batch_norm__605, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_104, + parameter_188, + parameter_187, + parameter_186, + parameter_185, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_185, parameter_186, parameter_187, parameter_188 + + # pd_op.swish: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32) + swish_77 = paddle._C_ops.swish(batch_norm__600) + + # pd_op.conv2d: (2x96x-1x-1xf32) <- (2x448x-1x-1xf32, 96x448x1x1xf32) + conv2d_105 = paddle._C_ops.conv2d( + concat_8, parameter_184, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_184 + + # pd_op.batch_norm_: (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__606, + batch_norm__607, + batch_norm__608, + batch_norm__609, + batch_norm__610, + batch_norm__611, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_105, + parameter_183, + parameter_182, + parameter_181, + parameter_180, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_180, parameter_181, parameter_182, parameter_183 + + # pd_op.swish: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32) + swish_78 = paddle._C_ops.swish(batch_norm__606) + + # pd_op.conv2d: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, 96x96x3x3xf32) + conv2d_106 = paddle._C_ops.conv2d( + swish_78, parameter_179, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_179 + + # pd_op.batch_norm_: (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__612, + batch_norm__613, + batch_norm__614, + batch_norm__615, + batch_norm__616, + batch_norm__617, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_106, + parameter_178, + parameter_177, + parameter_176, + parameter_175, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_175, parameter_176, parameter_177, parameter_178 + + # pd_op.swish: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32) + swish_79 = paddle._C_ops.swish(batch_norm__612) + + # pd_op.conv2d: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, 96x96x3x3xf32) + conv2d_107 = paddle._C_ops.conv2d( + swish_79, parameter_174, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_174 + + # pd_op.batch_norm_: (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__618, + batch_norm__619, + batch_norm__620, + batch_norm__621, + batch_norm__622, + batch_norm__623, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_107, + parameter_173, + parameter_172, + parameter_171, + parameter_170, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_170, parameter_171, parameter_172, parameter_173 + + # pd_op.conv2d: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, 96x96x1x1xf32) + conv2d_108 = paddle._C_ops.conv2d( + swish_79, parameter_169, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_169 + + # pd_op.batch_norm_: (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__624, + batch_norm__625, + batch_norm__626, + batch_norm__627, + batch_norm__628, + batch_norm__629, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_108, + parameter_168, + parameter_167, + parameter_166, + parameter_165, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_165, parameter_166, parameter_167, parameter_168 + + # pd_op.add: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, 2x96x-1x-1xf32) + add_46 = paddle._C_ops.add(batch_norm__618, batch_norm__624) + + # pd_op.swish: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32) + swish_80 = paddle._C_ops.swish(add_46) + + # pd_op.conv2d: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, 96x96x3x3xf32) + conv2d_109 = paddle._C_ops.conv2d( + swish_80, parameter_164, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_164 + + # pd_op.batch_norm_: (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__630, + batch_norm__631, + batch_norm__632, + batch_norm__633, + batch_norm__634, + batch_norm__635, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_109, + parameter_163, + parameter_162, + parameter_161, + parameter_160, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_160, parameter_161, parameter_162, parameter_163 + + # pd_op.swish: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32) + swish_81 = paddle._C_ops.swish(batch_norm__630) + + # pd_op.conv2d: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, 96x96x3x3xf32) + conv2d_110 = paddle._C_ops.conv2d( + swish_81, parameter_159, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_159 + + # pd_op.batch_norm_: (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__636, + batch_norm__637, + batch_norm__638, + batch_norm__639, + batch_norm__640, + batch_norm__641, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_110, + parameter_158, + parameter_157, + parameter_156, + parameter_155, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_155, parameter_156, parameter_157, parameter_158 + + # pd_op.conv2d: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, 96x96x1x1xf32) + conv2d_111 = paddle._C_ops.conv2d( + swish_81, parameter_154, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_154 + + # pd_op.batch_norm_: (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__642, + batch_norm__643, + batch_norm__644, + batch_norm__645, + batch_norm__646, + batch_norm__647, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_111, + parameter_153, + parameter_152, + parameter_151, + parameter_150, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_150, parameter_151, parameter_152, parameter_153 + + # pd_op.add: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, 2x96x-1x-1xf32) + add_47 = paddle._C_ops.add(batch_norm__636, batch_norm__642) + + # pd_op.swish: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32) + swish_82 = paddle._C_ops.swish(add_47) + + # pd_op.conv2d: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, 96x96x3x3xf32) + conv2d_112 = paddle._C_ops.conv2d( + swish_82, parameter_149, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_149 + + # pd_op.batch_norm_: (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__648, + batch_norm__649, + batch_norm__650, + batch_norm__651, + batch_norm__652, + batch_norm__653, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_112, + parameter_148, + parameter_147, + parameter_146, + parameter_145, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_145, parameter_146, parameter_147, parameter_148 + + # pd_op.swish: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32) + swish_83 = paddle._C_ops.swish(batch_norm__648) + + # pd_op.conv2d: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, 96x96x3x3xf32) + conv2d_113 = paddle._C_ops.conv2d( + swish_83, parameter_144, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_144 + + # pd_op.batch_norm_: (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__654, + batch_norm__655, + batch_norm__656, + batch_norm__657, + batch_norm__658, + batch_norm__659, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_113, + parameter_143, + parameter_142, + parameter_141, + parameter_140, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_140, parameter_141, parameter_142, parameter_143 + + # pd_op.conv2d: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, 96x96x1x1xf32) + conv2d_114 = paddle._C_ops.conv2d( + swish_83, parameter_139, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_139 + + # pd_op.batch_norm_: (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__660, + batch_norm__661, + batch_norm__662, + batch_norm__663, + batch_norm__664, + batch_norm__665, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_114, + parameter_138, + parameter_137, + parameter_136, + parameter_135, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_135, parameter_136, parameter_137, parameter_138 + + # pd_op.add: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, 2x96x-1x-1xf32) + add_48 = paddle._C_ops.add(batch_norm__654, batch_norm__660) + + # pd_op.swish: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32) + swish_84 = paddle._C_ops.swish(add_48) + + # builtin.combine: ([2x96x-1x-1xf32, 2x96x-1x-1xf32]) <- (2x96x-1x-1xf32, 2x96x-1x-1xf32) + combine_9 = [swish_77, swish_84] + + # pd_op.concat: (2x192x-1x-1xf32) <- ([2x96x-1x-1xf32, 2x96x-1x-1xf32], 1xi32) + concat_9 = paddle._C_ops.concat(combine_9, full_0) + del combine_9 + + # pd_op.conv2d: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 192x192x1x1xf32) + conv2d_115 = paddle._C_ops.conv2d( + concat_9, parameter_134, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_134 + + # pd_op.batch_norm_: (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__666, + batch_norm__667, + batch_norm__668, + batch_norm__669, + batch_norm__670, + batch_norm__671, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_115, + parameter_133, + parameter_132, + parameter_131, + parameter_130, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_130, parameter_131, parameter_132, parameter_133 + + # pd_op.swish: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32) + swish_85 = paddle._C_ops.swish(batch_norm__666) + + # pd_op.conv2d: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 192x192x3x3xf32) + conv2d_116 = paddle._C_ops.conv2d( + swish_85, parameter_129, [2, 2], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_129 + + # pd_op.batch_norm_: (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__672, + batch_norm__673, + batch_norm__674, + batch_norm__675, + batch_norm__676, + batch_norm__677, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_116, + parameter_128, + parameter_127, + parameter_126, + parameter_125, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_125, parameter_126, parameter_127, parameter_128 + + # pd_op.swish: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32) + swish_86 = paddle._C_ops.swish(batch_norm__672) + + # builtin.combine: ([2x192x-1x-1xf32, 2x384x-1x-1xf32]) <- (2x192x-1x-1xf32, 2x384x-1x-1xf32) + combine_10 = [swish_86, swish_75] + + # pd_op.concat: (2x576x-1x-1xf32) <- ([2x192x-1x-1xf32, 2x384x-1x-1xf32], 1xi32) + concat_10 = paddle._C_ops.concat(combine_10, full_0) + del combine_10 + + # pd_op.conv2d: (2x192x-1x-1xf32) <- (2x576x-1x-1xf32, 192x576x1x1xf32) + conv2d_117 = paddle._C_ops.conv2d( + concat_10, parameter_124, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_124 + + # pd_op.batch_norm_: (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__678, + batch_norm__679, + batch_norm__680, + batch_norm__681, + batch_norm__682, + batch_norm__683, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_117, + parameter_123, + parameter_122, + parameter_121, + parameter_120, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_120, parameter_121, parameter_122, parameter_123 + + # pd_op.swish: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32) + swish_87 = paddle._C_ops.swish(batch_norm__678) + + # pd_op.conv2d: (2x192x-1x-1xf32) <- (2x576x-1x-1xf32, 192x576x1x1xf32) + conv2d_118 = paddle._C_ops.conv2d( + concat_10, parameter_119, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_119 + + # pd_op.batch_norm_: (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__684, + batch_norm__685, + batch_norm__686, + batch_norm__687, + batch_norm__688, + batch_norm__689, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_118, + parameter_118, + parameter_117, + parameter_116, + parameter_115, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_115, parameter_116, parameter_117, parameter_118 + + # pd_op.swish: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32) + swish_88 = paddle._C_ops.swish(batch_norm__684) + + # pd_op.conv2d: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 192x192x3x3xf32) + conv2d_119 = paddle._C_ops.conv2d( + swish_88, parameter_114, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_114 + + # pd_op.batch_norm_: (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__690, + batch_norm__691, + batch_norm__692, + batch_norm__693, + batch_norm__694, + batch_norm__695, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_119, + parameter_113, + parameter_112, + parameter_111, + parameter_110, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_110, parameter_111, parameter_112, parameter_113 + + # pd_op.swish: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32) + swish_89 = paddle._C_ops.swish(batch_norm__690) + + # pd_op.conv2d: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 192x192x3x3xf32) + conv2d_120 = paddle._C_ops.conv2d( + swish_89, parameter_109, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_109 + + # pd_op.batch_norm_: (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__696, + batch_norm__697, + batch_norm__698, + batch_norm__699, + batch_norm__700, + batch_norm__701, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_120, + parameter_108, + parameter_107, + parameter_106, + parameter_105, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_105, parameter_106, parameter_107, parameter_108 + + # pd_op.conv2d: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 192x192x1x1xf32) + conv2d_121 = paddle._C_ops.conv2d( + swish_89, parameter_104, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_104 + + # pd_op.batch_norm_: (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__702, + batch_norm__703, + batch_norm__704, + batch_norm__705, + batch_norm__706, + batch_norm__707, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_121, + parameter_103, + parameter_102, + parameter_101, + parameter_100, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_100, parameter_101, parameter_102, parameter_103 + + # pd_op.add: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 2x192x-1x-1xf32) + add_49 = paddle._C_ops.add(batch_norm__696, batch_norm__702) + + # pd_op.swish: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32) + swish_90 = paddle._C_ops.swish(add_49) + + # pd_op.conv2d: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 192x192x3x3xf32) + conv2d_122 = paddle._C_ops.conv2d( + swish_90, parameter_99, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_99 + + # pd_op.batch_norm_: (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__708, + batch_norm__709, + batch_norm__710, + batch_norm__711, + batch_norm__712, + batch_norm__713, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_122, + parameter_98, + parameter_97, + parameter_96, + parameter_95, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_95, parameter_96, parameter_97, parameter_98 + + # pd_op.swish: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32) + swish_91 = paddle._C_ops.swish(batch_norm__708) + + # pd_op.conv2d: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 192x192x3x3xf32) + conv2d_123 = paddle._C_ops.conv2d( + swish_91, parameter_94, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_94 + + # pd_op.batch_norm_: (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__714, + batch_norm__715, + batch_norm__716, + batch_norm__717, + batch_norm__718, + batch_norm__719, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_123, + parameter_93, + parameter_92, + parameter_91, + parameter_90, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_90, parameter_91, parameter_92, parameter_93 + + # pd_op.conv2d: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 192x192x1x1xf32) + conv2d_124 = paddle._C_ops.conv2d( + swish_91, parameter_89, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_89 + + # pd_op.batch_norm_: (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__720, + batch_norm__721, + batch_norm__722, + batch_norm__723, + batch_norm__724, + batch_norm__725, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_124, + parameter_88, + parameter_87, + parameter_86, + parameter_85, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_85, parameter_86, parameter_87, parameter_88 + + # pd_op.add: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 2x192x-1x-1xf32) + add_50 = paddle._C_ops.add(batch_norm__714, batch_norm__720) + + # pd_op.swish: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32) + swish_92 = paddle._C_ops.swish(add_50) + + # pd_op.conv2d: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 192x192x3x3xf32) + conv2d_125 = paddle._C_ops.conv2d( + swish_92, parameter_84, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_84 + + # pd_op.batch_norm_: (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__726, + batch_norm__727, + batch_norm__728, + batch_norm__729, + batch_norm__730, + batch_norm__731, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_125, + parameter_83, + parameter_82, + parameter_81, + parameter_80, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_80, parameter_81, parameter_82, parameter_83 + + # pd_op.swish: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32) + swish_93 = paddle._C_ops.swish(batch_norm__726) + + # pd_op.conv2d: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 192x192x3x3xf32) + conv2d_126 = paddle._C_ops.conv2d( + swish_93, parameter_79, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_79 + + # pd_op.batch_norm_: (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__732, + batch_norm__733, + batch_norm__734, + batch_norm__735, + batch_norm__736, + batch_norm__737, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_126, + parameter_78, + parameter_77, + parameter_76, + parameter_75, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_75, parameter_76, parameter_77, parameter_78 + + # pd_op.conv2d: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 192x192x1x1xf32) + conv2d_127 = paddle._C_ops.conv2d( + swish_93, parameter_74, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_74 + + # pd_op.batch_norm_: (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__738, + batch_norm__739, + batch_norm__740, + batch_norm__741, + batch_norm__742, + batch_norm__743, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_127, + parameter_73, + parameter_72, + parameter_71, + parameter_70, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_70, parameter_71, parameter_72, parameter_73 + + # pd_op.add: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 2x192x-1x-1xf32) + add_51 = paddle._C_ops.add(batch_norm__732, batch_norm__738) + + # pd_op.swish: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32) + swish_94 = paddle._C_ops.swish(add_51) + + # builtin.combine: ([2x192x-1x-1xf32, 2x192x-1x-1xf32]) <- (2x192x-1x-1xf32, 2x192x-1x-1xf32) + combine_11 = [swish_87, swish_94] + + # pd_op.concat: (2x384x-1x-1xf32) <- ([2x192x-1x-1xf32, 2x192x-1x-1xf32], 1xi32) + concat_11 = paddle._C_ops.concat(combine_11, full_0) + del combine_11 + + # pd_op.conv2d: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32, 384x384x1x1xf32) + conv2d_128 = paddle._C_ops.conv2d( + concat_11, parameter_69, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_69 + + # pd_op.batch_norm_: (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__744, + batch_norm__745, + batch_norm__746, + batch_norm__747, + batch_norm__748, + batch_norm__749, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_128, + parameter_68, + parameter_67, + parameter_66, + parameter_65, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_65, parameter_66, parameter_67, parameter_68 + + # pd_op.swish: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32) + swish_95 = paddle._C_ops.swish(batch_norm__744) + + # pd_op.conv2d: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32, 384x384x3x3xf32) + conv2d_129 = paddle._C_ops.conv2d( + swish_95, parameter_64, [2, 2], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_64 + + # pd_op.batch_norm_: (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__750, + batch_norm__751, + batch_norm__752, + batch_norm__753, + batch_norm__754, + batch_norm__755, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_129, + parameter_63, + parameter_62, + parameter_61, + parameter_60, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_60, parameter_61, parameter_62, parameter_63 + + # pd_op.swish: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32) + swish_96 = paddle._C_ops.swish(batch_norm__750) + + # builtin.combine: ([2x384x-1x-1xf32, 2x768x-1x-1xf32]) <- (2x384x-1x-1xf32, 2x768x-1x-1xf32) + combine_12 = [swish_96, swish_65] + + # pd_op.concat: (2x1152x-1x-1xf32) <- ([2x384x-1x-1xf32, 2x768x-1x-1xf32], 1xi32) + concat_12 = paddle._C_ops.concat(combine_12, full_0) + del combine_12 + + # pd_op.conv2d: (2x384x-1x-1xf32) <- (2x1152x-1x-1xf32, 384x1152x1x1xf32) + conv2d_130 = paddle._C_ops.conv2d( + concat_12, parameter_59, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_59 + + # pd_op.batch_norm_: (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__756, + batch_norm__757, + batch_norm__758, + batch_norm__759, + batch_norm__760, + batch_norm__761, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_130, + parameter_58, + parameter_57, + parameter_56, + parameter_55, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_55, parameter_56, parameter_57, parameter_58 + + # pd_op.swish: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32) + swish_97 = paddle._C_ops.swish(batch_norm__756) + + # pd_op.conv2d: (2x384x-1x-1xf32) <- (2x1152x-1x-1xf32, 384x1152x1x1xf32) + conv2d_131 = paddle._C_ops.conv2d( + concat_12, parameter_54, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_54 + + # pd_op.batch_norm_: (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__762, + batch_norm__763, + batch_norm__764, + batch_norm__765, + batch_norm__766, + batch_norm__767, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_131, + parameter_53, + parameter_52, + parameter_51, + parameter_50, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_50, parameter_51, parameter_52, parameter_53 + + # pd_op.swish: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32) + swish_98 = paddle._C_ops.swish(batch_norm__762) + + # pd_op.conv2d: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32, 384x384x3x3xf32) + conv2d_132 = paddle._C_ops.conv2d( + swish_98, parameter_49, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_49 + + # pd_op.batch_norm_: (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__768, + batch_norm__769, + batch_norm__770, + batch_norm__771, + batch_norm__772, + batch_norm__773, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_132, + parameter_48, + parameter_47, + parameter_46, + parameter_45, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_45, parameter_46, parameter_47, parameter_48 + + # pd_op.swish: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32) + swish_99 = paddle._C_ops.swish(batch_norm__768) + + # pd_op.conv2d: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32, 384x384x3x3xf32) + conv2d_133 = paddle._C_ops.conv2d( + swish_99, parameter_44, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_44 + + # pd_op.batch_norm_: (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__774, + batch_norm__775, + batch_norm__776, + batch_norm__777, + batch_norm__778, + batch_norm__779, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_133, + parameter_43, + parameter_42, + parameter_41, + parameter_40, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_40, parameter_41, parameter_42, parameter_43 + + # pd_op.conv2d: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32, 384x384x1x1xf32) + conv2d_134 = paddle._C_ops.conv2d( + swish_99, parameter_39, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_39 + + # pd_op.batch_norm_: (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__780, + batch_norm__781, + batch_norm__782, + batch_norm__783, + batch_norm__784, + batch_norm__785, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_134, + parameter_38, + parameter_37, + parameter_36, + parameter_35, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_35, parameter_36, parameter_37, parameter_38 + + # pd_op.add: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32, 2x384x-1x-1xf32) + add_52 = paddle._C_ops.add(batch_norm__774, batch_norm__780) + + # pd_op.swish: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32) + swish_100 = paddle._C_ops.swish(add_52) + + # pd_op.conv2d: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32, 384x384x3x3xf32) + conv2d_135 = paddle._C_ops.conv2d( + swish_100, parameter_34, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_34 + + # pd_op.batch_norm_: (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__786, + batch_norm__787, + batch_norm__788, + batch_norm__789, + batch_norm__790, + batch_norm__791, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_135, + parameter_33, + parameter_32, + parameter_31, + parameter_30, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_30, parameter_31, parameter_32, parameter_33 + + # pd_op.swish: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32) + swish_101 = paddle._C_ops.swish(batch_norm__786) + + # pd_op.conv2d: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32, 384x384x3x3xf32) + conv2d_136 = paddle._C_ops.conv2d( + swish_101, parameter_29, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_29 + + # pd_op.batch_norm_: (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__792, + batch_norm__793, + batch_norm__794, + batch_norm__795, + batch_norm__796, + batch_norm__797, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_136, + parameter_28, + parameter_27, + parameter_26, + parameter_25, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_25, parameter_26, parameter_27, parameter_28 + + # pd_op.conv2d: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32, 384x384x1x1xf32) + conv2d_137 = paddle._C_ops.conv2d( + swish_101, parameter_24, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_24 + + # pd_op.batch_norm_: (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__798, + batch_norm__799, + batch_norm__800, + batch_norm__801, + batch_norm__802, + batch_norm__803, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_137, + parameter_23, + parameter_22, + parameter_21, + parameter_20, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_20, parameter_21, parameter_22, parameter_23 + + # pd_op.add: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32, 2x384x-1x-1xf32) + add_53 = paddle._C_ops.add(batch_norm__792, batch_norm__798) + + # pd_op.swish: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32) + swish_102 = paddle._C_ops.swish(add_53) + + # pd_op.conv2d: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32, 384x384x3x3xf32) + conv2d_138 = paddle._C_ops.conv2d( + swish_102, parameter_19, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_19 + + # pd_op.batch_norm_: (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__804, + batch_norm__805, + batch_norm__806, + batch_norm__807, + batch_norm__808, + batch_norm__809, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_138, + parameter_18, + parameter_17, + parameter_16, + parameter_15, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_15, parameter_16, parameter_17, parameter_18 + + # pd_op.swish: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32) + swish_103 = paddle._C_ops.swish(batch_norm__804) + + # pd_op.conv2d: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32, 384x384x3x3xf32) + conv2d_139 = paddle._C_ops.conv2d( + swish_103, parameter_14, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_14 + + # pd_op.batch_norm_: (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__810, + batch_norm__811, + batch_norm__812, + batch_norm__813, + batch_norm__814, + batch_norm__815, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_139, + parameter_13, + parameter_12, + parameter_11, + parameter_10, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_10, parameter_11, parameter_12, parameter_13 + + # pd_op.conv2d: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32, 384x384x1x1xf32) + conv2d_140 = paddle._C_ops.conv2d( + swish_103, parameter_9, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_9 + + # pd_op.batch_norm_: (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__816, + batch_norm__817, + batch_norm__818, + batch_norm__819, + batch_norm__820, + batch_norm__821, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_140, + parameter_8, + parameter_7, + parameter_6, + parameter_5, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_5, parameter_6, parameter_7, parameter_8 + + # pd_op.add: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32, 2x384x-1x-1xf32) + add_54 = paddle._C_ops.add(batch_norm__810, batch_norm__816) + + # pd_op.swish: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32) + swish_104 = paddle._C_ops.swish(add_54) + + # builtin.combine: ([2x384x-1x-1xf32, 2x384x-1x-1xf32]) <- (2x384x-1x-1xf32, 2x384x-1x-1xf32) + combine_13 = [swish_97, swish_104] + + # pd_op.concat: (2x768x-1x-1xf32) <- ([2x384x-1x-1xf32, 2x384x-1x-1xf32], 1xi32) + concat_13 = paddle._C_ops.concat(combine_13, full_0) + del combine_13 + + # pd_op.conv2d: (2x768x-1x-1xf32) <- (2x768x-1x-1xf32, 768x768x1x1xf32) + conv2d_141 = paddle._C_ops.conv2d( + concat_13, parameter_4, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_4 + + # pd_op.batch_norm_: (2x768x-1x-1xf32, 768xf32, 768xf32, 768xf32, 768xf32, -1xui8) <- (2x768x-1x-1xf32, 768xf32, 768xf32, 768xf32, 768xf32) + ( + batch_norm__822, + batch_norm__823, + batch_norm__824, + batch_norm__825, + batch_norm__826, + batch_norm__827, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_141, + parameter_3, + parameter_2, + parameter_1, + parameter_0, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_0, parameter_1, parameter_2, parameter_3 + + # pd_op.swish: (2x768x-1x-1xf32) <- (2x768x-1x-1xf32) + swish_0 = paddle._C_ops.swish(batch_norm__822) + del ( + add_0, + add_1, + add_10, + add_11, + add_12, + add_13, + add_14, + add_15, + add_16, + add_17, + add_18, + add_2, + add_20, + add_21, + add_22, + add_23, + add_24, + add_25, + add_26, + add_27, + add_28, + add_29, + add_3, + add_30, + add_31, + add_33, + add_34, + add_35, + add_36, + add_37, + add_38, + add_4, + add_40, + add_41, + add_42, + add_43, + add_44, + add_45, + add_46, + add_47, + add_48, + add_49, + add_5, + add_50, + add_51, + add_52, + add_53, + add_54, + add_7, + add_8, + add_9, + assign_0, + assign_1, + assign_10, + assign_11, + assign_12, + assign_13, + assign_14, + assign_15, + assign_2, + assign_3, + assign_4, + assign_5, + assign_6, + assign_7, + assign_8, + assign_9, + batch_norm__0, + batch_norm__1, + batch_norm__10, + batch_norm__100, + batch_norm__101, + batch_norm__102, + batch_norm__103, + batch_norm__104, + batch_norm__105, + batch_norm__106, + batch_norm__107, + batch_norm__108, + batch_norm__109, + batch_norm__11, + batch_norm__110, + batch_norm__111, + batch_norm__112, + batch_norm__113, + batch_norm__114, + batch_norm__115, + batch_norm__116, + batch_norm__117, + batch_norm__118, + batch_norm__119, + batch_norm__12, + batch_norm__120, + batch_norm__121, + batch_norm__122, + batch_norm__123, + batch_norm__124, + batch_norm__125, + batch_norm__126, + batch_norm__127, + batch_norm__128, + batch_norm__129, + batch_norm__13, + batch_norm__130, + batch_norm__131, + batch_norm__132, + batch_norm__133, + batch_norm__134, + batch_norm__135, + batch_norm__136, + batch_norm__137, + batch_norm__138, + batch_norm__139, + batch_norm__14, + batch_norm__140, + batch_norm__141, + batch_norm__142, + batch_norm__143, + batch_norm__144, + batch_norm__145, + batch_norm__146, + batch_norm__147, + batch_norm__148, + batch_norm__149, + batch_norm__15, + batch_norm__150, + batch_norm__151, + batch_norm__152, + batch_norm__153, + batch_norm__154, + batch_norm__155, + batch_norm__156, + batch_norm__157, + batch_norm__158, + batch_norm__159, + batch_norm__16, + batch_norm__160, + batch_norm__161, + batch_norm__162, + batch_norm__163, + batch_norm__164, + batch_norm__165, + batch_norm__166, + batch_norm__167, + batch_norm__168, + batch_norm__169, + batch_norm__17, + batch_norm__170, + batch_norm__171, + batch_norm__172, + batch_norm__173, + batch_norm__174, + batch_norm__175, + batch_norm__176, + batch_norm__177, + batch_norm__178, + batch_norm__179, + batch_norm__18, + batch_norm__180, + batch_norm__181, + batch_norm__182, + batch_norm__183, + batch_norm__184, + batch_norm__185, + batch_norm__186, + batch_norm__187, + batch_norm__188, + batch_norm__189, + batch_norm__19, + batch_norm__190, + batch_norm__191, + batch_norm__192, + batch_norm__193, + batch_norm__194, + batch_norm__195, + batch_norm__196, + batch_norm__197, + batch_norm__198, + batch_norm__199, + batch_norm__2, + batch_norm__20, + batch_norm__200, + batch_norm__201, + batch_norm__202, + batch_norm__203, + batch_norm__204, + batch_norm__205, + batch_norm__206, + batch_norm__207, + batch_norm__208, + batch_norm__209, + batch_norm__21, + batch_norm__210, + batch_norm__211, + batch_norm__212, + batch_norm__213, + batch_norm__214, + batch_norm__215, + batch_norm__216, + batch_norm__217, + batch_norm__218, + batch_norm__219, + batch_norm__22, + batch_norm__220, + batch_norm__221, + batch_norm__222, + batch_norm__223, + batch_norm__224, + batch_norm__225, + batch_norm__226, + batch_norm__227, + batch_norm__228, + batch_norm__229, + batch_norm__23, + batch_norm__230, + batch_norm__231, + batch_norm__232, + batch_norm__233, + batch_norm__234, + batch_norm__235, + batch_norm__236, + batch_norm__237, + batch_norm__238, + batch_norm__239, + batch_norm__24, + batch_norm__240, + batch_norm__241, + batch_norm__242, + batch_norm__243, + batch_norm__244, + batch_norm__245, + batch_norm__246, + batch_norm__247, + batch_norm__248, + batch_norm__249, + batch_norm__25, + batch_norm__250, + batch_norm__251, + batch_norm__252, + batch_norm__253, + batch_norm__254, + batch_norm__255, + batch_norm__256, + batch_norm__257, + batch_norm__258, + batch_norm__259, + batch_norm__26, + batch_norm__260, + batch_norm__261, + batch_norm__262, + batch_norm__263, + batch_norm__264, + batch_norm__265, + batch_norm__266, + batch_norm__267, + batch_norm__268, + batch_norm__269, + batch_norm__27, + batch_norm__270, + batch_norm__271, + batch_norm__272, + batch_norm__273, + batch_norm__274, + batch_norm__275, + batch_norm__276, + batch_norm__277, + batch_norm__278, + batch_norm__279, + batch_norm__28, + batch_norm__280, + batch_norm__281, + batch_norm__282, + batch_norm__283, + batch_norm__284, + batch_norm__285, + batch_norm__286, + batch_norm__287, + batch_norm__288, + batch_norm__289, + batch_norm__29, + batch_norm__290, + batch_norm__291, + batch_norm__292, + batch_norm__293, + batch_norm__294, + batch_norm__295, + batch_norm__296, + batch_norm__297, + batch_norm__298, + batch_norm__299, + batch_norm__3, + batch_norm__30, + batch_norm__300, + batch_norm__301, + batch_norm__302, + batch_norm__303, + batch_norm__304, + batch_norm__305, + batch_norm__306, + batch_norm__307, + batch_norm__308, + batch_norm__309, + batch_norm__31, + batch_norm__310, + batch_norm__311, + batch_norm__312, + batch_norm__313, + batch_norm__314, + batch_norm__315, + batch_norm__316, + batch_norm__317, + batch_norm__318, + batch_norm__319, + batch_norm__32, + batch_norm__320, + batch_norm__321, + batch_norm__322, + batch_norm__323, + batch_norm__324, + batch_norm__325, + batch_norm__326, + batch_norm__327, + batch_norm__328, + batch_norm__329, + batch_norm__33, + batch_norm__330, + batch_norm__331, + batch_norm__332, + batch_norm__333, + batch_norm__334, + batch_norm__335, + batch_norm__336, + batch_norm__337, + batch_norm__338, + batch_norm__339, + batch_norm__34, + batch_norm__340, + batch_norm__341, + batch_norm__342, + batch_norm__343, + batch_norm__344, + batch_norm__345, + batch_norm__346, + batch_norm__347, + batch_norm__348, + batch_norm__349, + batch_norm__35, + batch_norm__350, + batch_norm__351, + batch_norm__352, + batch_norm__353, + batch_norm__354, + batch_norm__355, + batch_norm__356, + batch_norm__357, + batch_norm__358, + batch_norm__359, + batch_norm__36, + batch_norm__360, + batch_norm__361, + batch_norm__362, + batch_norm__363, + batch_norm__364, + batch_norm__365, + batch_norm__366, + batch_norm__367, + batch_norm__368, + batch_norm__369, + batch_norm__37, + batch_norm__370, + batch_norm__371, + batch_norm__372, + batch_norm__373, + batch_norm__374, + batch_norm__375, + batch_norm__376, + batch_norm__377, + batch_norm__378, + batch_norm__379, + batch_norm__38, + batch_norm__380, + batch_norm__381, + batch_norm__382, + batch_norm__383, + batch_norm__384, + batch_norm__385, + batch_norm__386, + batch_norm__387, + batch_norm__388, + batch_norm__389, + batch_norm__39, + batch_norm__390, + batch_norm__391, + batch_norm__392, + batch_norm__393, + batch_norm__394, + batch_norm__395, + batch_norm__396, + batch_norm__397, + batch_norm__398, + batch_norm__399, + batch_norm__4, + batch_norm__40, + batch_norm__400, + batch_norm__401, + batch_norm__402, + batch_norm__403, + batch_norm__404, + batch_norm__405, + batch_norm__406, + batch_norm__407, + batch_norm__408, + batch_norm__409, + batch_norm__41, + batch_norm__410, + batch_norm__411, + batch_norm__412, + batch_norm__413, + batch_norm__414, + batch_norm__415, + batch_norm__416, + batch_norm__417, + batch_norm__418, + batch_norm__419, + batch_norm__42, + batch_norm__420, + batch_norm__421, + batch_norm__422, + batch_norm__423, + batch_norm__424, + batch_norm__425, + batch_norm__426, + batch_norm__427, + batch_norm__428, + batch_norm__429, + batch_norm__43, + batch_norm__430, + batch_norm__431, + batch_norm__432, + batch_norm__433, + batch_norm__434, + batch_norm__435, + batch_norm__436, + batch_norm__437, + batch_norm__438, + batch_norm__439, + batch_norm__44, + batch_norm__440, + batch_norm__441, + batch_norm__442, + batch_norm__443, + batch_norm__444, + batch_norm__445, + batch_norm__446, + batch_norm__447, + batch_norm__448, + batch_norm__449, + batch_norm__45, + batch_norm__450, + batch_norm__451, + batch_norm__452, + batch_norm__453, + batch_norm__454, + batch_norm__455, + batch_norm__456, + batch_norm__457, + batch_norm__458, + batch_norm__459, + batch_norm__46, + batch_norm__460, + batch_norm__461, + batch_norm__462, + batch_norm__463, + batch_norm__464, + batch_norm__465, + batch_norm__466, + batch_norm__467, + batch_norm__468, + batch_norm__469, + batch_norm__47, + batch_norm__470, + batch_norm__471, + batch_norm__472, + batch_norm__473, + batch_norm__474, + batch_norm__475, + batch_norm__476, + batch_norm__477, + batch_norm__478, + batch_norm__479, + batch_norm__48, + batch_norm__480, + batch_norm__481, + batch_norm__482, + batch_norm__483, + batch_norm__484, + batch_norm__485, + batch_norm__486, + batch_norm__487, + batch_norm__488, + batch_norm__489, + batch_norm__49, + batch_norm__490, + batch_norm__491, + batch_norm__492, + batch_norm__493, + batch_norm__494, + batch_norm__495, + batch_norm__496, + batch_norm__497, + batch_norm__498, + batch_norm__499, + batch_norm__5, + batch_norm__50, + batch_norm__500, + batch_norm__501, + batch_norm__502, + batch_norm__503, + batch_norm__504, + batch_norm__505, + batch_norm__506, + batch_norm__507, + batch_norm__508, + batch_norm__509, + batch_norm__51, + batch_norm__510, + batch_norm__511, + batch_norm__512, + batch_norm__513, + batch_norm__514, + batch_norm__515, + batch_norm__516, + batch_norm__517, + batch_norm__518, + batch_norm__519, + batch_norm__52, + batch_norm__520, + batch_norm__521, + batch_norm__522, + batch_norm__523, + batch_norm__524, + batch_norm__525, + batch_norm__526, + batch_norm__527, + batch_norm__528, + batch_norm__529, + batch_norm__53, + batch_norm__530, + batch_norm__531, + batch_norm__532, + batch_norm__533, + batch_norm__534, + batch_norm__535, + batch_norm__536, + batch_norm__537, + batch_norm__538, + batch_norm__539, + batch_norm__54, + batch_norm__540, + batch_norm__541, + batch_norm__542, + batch_norm__543, + batch_norm__544, + batch_norm__545, + batch_norm__546, + batch_norm__547, + batch_norm__548, + batch_norm__549, + batch_norm__55, + batch_norm__550, + batch_norm__551, + batch_norm__552, + batch_norm__553, + batch_norm__554, + batch_norm__555, + batch_norm__556, + batch_norm__557, + batch_norm__558, + batch_norm__559, + batch_norm__56, + batch_norm__560, + batch_norm__561, + batch_norm__562, + batch_norm__563, + batch_norm__564, + batch_norm__565, + batch_norm__566, + batch_norm__567, + batch_norm__568, + batch_norm__569, + batch_norm__57, + batch_norm__570, + batch_norm__571, + batch_norm__572, + batch_norm__573, + batch_norm__574, + batch_norm__575, + batch_norm__576, + batch_norm__577, + batch_norm__578, + batch_norm__579, + batch_norm__58, + batch_norm__580, + batch_norm__581, + batch_norm__582, + batch_norm__583, + batch_norm__584, + batch_norm__585, + batch_norm__586, + batch_norm__587, + batch_norm__588, + batch_norm__589, + batch_norm__59, + batch_norm__590, + batch_norm__591, + batch_norm__592, + batch_norm__593, + batch_norm__594, + batch_norm__595, + batch_norm__596, + batch_norm__597, + batch_norm__598, + batch_norm__599, + batch_norm__6, + batch_norm__60, + batch_norm__600, + batch_norm__601, + batch_norm__602, + batch_norm__603, + batch_norm__604, + batch_norm__605, + batch_norm__606, + batch_norm__607, + batch_norm__608, + batch_norm__609, + batch_norm__61, + batch_norm__610, + batch_norm__611, + batch_norm__612, + batch_norm__613, + batch_norm__614, + batch_norm__615, + batch_norm__616, + batch_norm__617, + batch_norm__618, + batch_norm__619, + batch_norm__62, + batch_norm__620, + batch_norm__621, + batch_norm__622, + batch_norm__623, + batch_norm__624, + batch_norm__625, + batch_norm__626, + batch_norm__627, + batch_norm__628, + batch_norm__629, + batch_norm__63, + batch_norm__630, + batch_norm__631, + batch_norm__632, + batch_norm__633, + batch_norm__634, + batch_norm__635, + batch_norm__636, + batch_norm__637, + batch_norm__638, + batch_norm__639, + batch_norm__64, + batch_norm__640, + batch_norm__641, + batch_norm__642, + batch_norm__643, + batch_norm__644, + batch_norm__645, + batch_norm__646, + batch_norm__647, + batch_norm__648, + batch_norm__649, + batch_norm__65, + batch_norm__650, + batch_norm__651, + batch_norm__652, + batch_norm__653, + batch_norm__654, + batch_norm__655, + batch_norm__656, + batch_norm__657, + batch_norm__658, + batch_norm__659, + batch_norm__66, + batch_norm__660, + batch_norm__661, + batch_norm__662, + batch_norm__663, + batch_norm__664, + batch_norm__665, + batch_norm__666, + batch_norm__667, + batch_norm__668, + batch_norm__669, + batch_norm__67, + batch_norm__670, + batch_norm__671, + batch_norm__672, + batch_norm__673, + batch_norm__674, + batch_norm__675, + batch_norm__676, + batch_norm__677, + batch_norm__678, + batch_norm__679, + batch_norm__68, + batch_norm__680, + batch_norm__681, + batch_norm__682, + batch_norm__683, + batch_norm__684, + batch_norm__685, + batch_norm__686, + batch_norm__687, + batch_norm__688, + batch_norm__689, + batch_norm__69, + batch_norm__690, + batch_norm__691, + batch_norm__692, + batch_norm__693, + batch_norm__694, + batch_norm__695, + batch_norm__696, + batch_norm__697, + batch_norm__698, + batch_norm__699, + batch_norm__7, + batch_norm__70, + batch_norm__700, + batch_norm__701, + batch_norm__702, + batch_norm__703, + batch_norm__704, + batch_norm__705, + batch_norm__706, + batch_norm__707, + batch_norm__708, + batch_norm__709, + batch_norm__71, + batch_norm__710, + batch_norm__711, + batch_norm__712, + batch_norm__713, + batch_norm__714, + batch_norm__715, + batch_norm__716, + batch_norm__717, + batch_norm__718, + batch_norm__719, + batch_norm__72, + batch_norm__720, + batch_norm__721, + batch_norm__722, + batch_norm__723, + batch_norm__724, + batch_norm__725, + batch_norm__726, + batch_norm__727, + batch_norm__728, + batch_norm__729, + batch_norm__73, + batch_norm__730, + batch_norm__731, + batch_norm__732, + batch_norm__733, + batch_norm__734, + batch_norm__735, + batch_norm__736, + batch_norm__737, + batch_norm__738, + batch_norm__739, + batch_norm__74, + batch_norm__740, + batch_norm__741, + batch_norm__742, + batch_norm__743, + batch_norm__744, + batch_norm__745, + batch_norm__746, + batch_norm__747, + batch_norm__748, + batch_norm__749, + batch_norm__75, + batch_norm__750, + batch_norm__751, + batch_norm__752, + batch_norm__753, + batch_norm__754, + batch_norm__755, + batch_norm__756, + batch_norm__757, + batch_norm__758, + batch_norm__759, + batch_norm__76, + batch_norm__760, + batch_norm__761, + batch_norm__762, + batch_norm__763, + batch_norm__764, + batch_norm__765, + batch_norm__766, + batch_norm__767, + batch_norm__768, + batch_norm__769, + batch_norm__77, + batch_norm__770, + batch_norm__771, + batch_norm__772, + batch_norm__773, + batch_norm__774, + batch_norm__775, + batch_norm__776, + batch_norm__777, + batch_norm__778, + batch_norm__779, + batch_norm__78, + batch_norm__780, + batch_norm__781, + batch_norm__782, + batch_norm__783, + batch_norm__784, + batch_norm__785, + batch_norm__786, + batch_norm__787, + batch_norm__788, + batch_norm__789, + batch_norm__79, + batch_norm__790, + batch_norm__791, + batch_norm__792, + batch_norm__793, + batch_norm__794, + batch_norm__795, + batch_norm__796, + batch_norm__797, + batch_norm__798, + batch_norm__799, + batch_norm__8, + batch_norm__80, + batch_norm__800, + batch_norm__801, + batch_norm__802, + batch_norm__803, + batch_norm__804, + batch_norm__805, + batch_norm__806, + batch_norm__807, + batch_norm__808, + batch_norm__809, + batch_norm__81, + batch_norm__810, + batch_norm__811, + batch_norm__812, + batch_norm__813, + batch_norm__814, + batch_norm__815, + batch_norm__816, + batch_norm__817, + batch_norm__818, + batch_norm__819, + batch_norm__82, + batch_norm__820, + batch_norm__821, + batch_norm__822, + batch_norm__823, + batch_norm__824, + batch_norm__825, + batch_norm__826, + batch_norm__827, + batch_norm__83, + batch_norm__84, + batch_norm__85, + batch_norm__86, + batch_norm__87, + batch_norm__88, + batch_norm__89, + batch_norm__9, + batch_norm__90, + batch_norm__91, + batch_norm__92, + batch_norm__93, + batch_norm__94, + batch_norm__95, + batch_norm__96, + batch_norm__97, + batch_norm__98, + batch_norm__99, + concat_0, + concat_1, + concat_10, + concat_11, + concat_12, + concat_13, + concat_2, + concat_3, + concat_4, + concat_5, + concat_6, + concat_7, + concat_8, + concat_9, + conv2d_0, + conv2d_1, + conv2d_10, + conv2d_100, + conv2d_101, + conv2d_102, + conv2d_103, + conv2d_104, + conv2d_105, + conv2d_106, + conv2d_107, + conv2d_108, + conv2d_109, + conv2d_11, + conv2d_110, + conv2d_111, + conv2d_112, + conv2d_113, + conv2d_114, + conv2d_115, + conv2d_116, + conv2d_117, + conv2d_118, + conv2d_119, + conv2d_12, + conv2d_120, + conv2d_121, + conv2d_122, + conv2d_123, + conv2d_124, + conv2d_125, + conv2d_126, + conv2d_127, + conv2d_128, + conv2d_129, + conv2d_13, + conv2d_130, + conv2d_131, + conv2d_132, + conv2d_133, + conv2d_134, + conv2d_135, + conv2d_136, + conv2d_137, + conv2d_138, + conv2d_139, + conv2d_14, + conv2d_140, + conv2d_141, + conv2d_15, + conv2d_16, + conv2d_17, + conv2d_18, + conv2d_19, + conv2d_2, + conv2d_20, + conv2d_21, + conv2d_22, + conv2d_23, + conv2d_24, + conv2d_25, + conv2d_26, + conv2d_27, + conv2d_28, + conv2d_29, + conv2d_3, + conv2d_30, + conv2d_31, + conv2d_32, + conv2d_33, + conv2d_34, + conv2d_35, + conv2d_36, + conv2d_37, + conv2d_38, + conv2d_39, + conv2d_4, + conv2d_40, + conv2d_41, + conv2d_42, + conv2d_43, + conv2d_44, + conv2d_45, + conv2d_46, + conv2d_47, + conv2d_48, + conv2d_49, + conv2d_5, + conv2d_50, + conv2d_51, + conv2d_52, + conv2d_53, + conv2d_54, + conv2d_55, + conv2d_56, + conv2d_57, + conv2d_58, + conv2d_59, + conv2d_6, + conv2d_60, + conv2d_61, + conv2d_62, + conv2d_63, + conv2d_64, + conv2d_65, + conv2d_66, + conv2d_67, + conv2d_68, + conv2d_69, + conv2d_7, + conv2d_70, + conv2d_71, + conv2d_72, + conv2d_73, + conv2d_74, + conv2d_75, + conv2d_76, + conv2d_77, + conv2d_78, + conv2d_79, + conv2d_8, + conv2d_80, + conv2d_81, + conv2d_82, + conv2d_83, + conv2d_84, + conv2d_85, + conv2d_86, + conv2d_87, + conv2d_88, + conv2d_89, + conv2d_9, + conv2d_90, + conv2d_91, + conv2d_92, + conv2d_93, + conv2d_94, + conv2d_95, + conv2d_96, + conv2d_97, + conv2d_98, + conv2d_99, + full_0, + full_int_array_0, + full_int_array_2, + full_int_array_3, + full_int_array_4, + hardsigmoid_0, + hardsigmoid_1, + hardsigmoid_2, + hardsigmoid_3, + mean_0, + mean_1, + mean_2, + mean_3, + multiply_0, + multiply_1, + multiply_2, + multiply_3, + nearest_interp_0, + nearest_interp_1, + pool2d_0, + pool2d_1, + pool2d_2, + reshape_0, + reshape_1, + reshape_2, + reshape_3, + swish_1, + swish_10, + swish_100, + swish_101, + swish_102, + swish_103, + swish_104, + swish_11, + swish_12, + swish_13, + swish_14, + swish_15, + swish_16, + swish_17, + swish_18, + swish_19, + swish_2, + swish_20, + swish_21, + swish_22, + swish_23, + swish_24, + swish_25, + swish_26, + swish_27, + swish_28, + swish_29, + swish_3, + swish_30, + swish_31, + swish_32, + swish_33, + swish_34, + swish_35, + swish_36, + swish_37, + swish_38, + swish_39, + swish_4, + swish_40, + swish_41, + swish_42, + swish_43, + swish_44, + swish_45, + swish_46, + swish_47, + swish_48, + swish_49, + swish_5, + swish_50, + swish_51, + swish_52, + swish_53, + swish_54, + swish_55, + swish_56, + swish_57, + swish_58, + swish_59, + swish_6, + swish_60, + swish_61, + swish_62, + swish_63, + swish_64, + swish_65, + swish_66, + swish_67, + swish_68, + swish_69, + swish_7, + swish_70, + swish_71, + swish_72, + swish_73, + swish_74, + swish_75, + swish_76, + swish_77, + swish_78, + swish_79, + swish_8, + swish_80, + swish_81, + swish_82, + swish_83, + swish_84, + swish_85, + swish_86, + swish_87, + swish_88, + swish_89, + swish_9, + swish_90, + swish_91, + swish_92, + swish_93, + swish_94, + swish_95, + swish_96, + swish_97, + swish_98, + swish_99, + ) - return greater_than_0 + return swish_0 diff --git a/paddle_samples/PaddleX/PP-YOLOE-L_vehicle/subgraph_5/input_meta.py b/paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_0/shape_patches_PP-YOLOE-L_vehicle/input_meta.py similarity index 50% rename from paddle_samples/PaddleX/PP-YOLOE-L_vehicle/subgraph_5/input_meta.py rename to paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_0/shape_patches_PP-YOLOE-L_vehicle/input_meta.py index f58dc071b..ec4e87e7b 100644 --- a/paddle_samples/PaddleX/PP-YOLOE-L_vehicle/subgraph_5/input_meta.py +++ b/paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_0/shape_patches_PP-YOLOE-L_vehicle/input_meta.py @@ -1,9 +1,9 @@ class Program_weight_tensor_data_0: name = "data_0" - shape = [2, 3, 640, 640] + shape = [2, 3, 512, 512] dtype = "float32" - min_val = float("-2.01516") + min_val = float("-1.9517") max_val = float("2.64") - mean = float("0.187747") - std = float("0.681331") + mean = float("-0.133605") + std = float("0.697919") data = None diff --git a/paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_10/weight_meta.py b/paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_0/shape_patches_PP-YOLOE-L_vehicle/weight_meta.py similarity index 54% rename from paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_10/weight_meta.py rename to paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_0/shape_patches_PP-YOLOE-L_vehicle/weight_meta.py index 133b954fb..25c54b0a2 100644 --- a/paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_10/weight_meta.py +++ b/paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_0/shape_patches_PP-YOLOE-L_vehicle/weight_meta.py @@ -2,10 +2,10 @@ class Program_weight_tensor_parameter_0: name = "parameter_0" shape = [768] dtype = "float32" - min_val = float("-0.743033") - max_val = float("0.507361") - mean = float("0.020663") - std = float("0.126347") + min_val = float("-0.241183") + max_val = float("0.339114") + mean = float("0.111688") + std = float("0.0760614") data = None @@ -13,10 +13,10 @@ class Program_weight_tensor_parameter_1: name = "parameter_1" shape = [768] dtype = "float32" - min_val = float("0.898341") - max_val = float("1.48377") - mean = float("0.987983") - std = float("0.0346382") + min_val = float("0.855286") + max_val = float("1.34027") + mean = float("1.09294") + std = float("0.0413897") data = None @@ -24,10 +24,10 @@ class Program_weight_tensor_parameter_2: name = "parameter_2" shape = [768] dtype = "float32" - min_val = float("0.00273834") - max_val = float("0.117552") - mean = float("0.0109298") - std = float("0.0105056") + min_val = float("0.000633402") + max_val = float("0.0240355") + mean = float("0.002005") + std = float("0.00159656") data = None @@ -35,10 +35,10 @@ class Program_weight_tensor_parameter_3: name = "parameter_3" shape = [768] dtype = "float32" - min_val = float("-0.37129") - max_val = float("0.129239") - mean = float("-0.0442188") - std = float("0.0445754") + min_val = float("-0.0975389") + max_val = float("0.0882256") + mean = float("-0.0156178") + std = float("0.0147547") data = None @@ -46,10 +46,10 @@ class Program_weight_tensor_parameter_4: name = "parameter_4" shape = [768, 768, 1, 1] dtype = "float32" - min_val = float("-0.0737572") - max_val = float("0.0366322") - mean = float("-0.000187312") - std = float("0.00262376") + min_val = float("-0.0352945") + max_val = float("0.027285") + mean = float("-9.50896e-05") + std = float("0.0014892") data = None @@ -57,10 +57,10 @@ class Program_weight_tensor_parameter_5: name = "parameter_5" shape = [384] dtype = "float32" - min_val = float("-0.254357") - max_val = float("0.0598301") - mean = float("-0.0304517") - std = float("0.0365455") + min_val = float("-0.220092") + max_val = float("0.0360991") + mean = float("-0.0289701") + std = float("0.0325433") data = None @@ -68,10 +68,10 @@ class Program_weight_tensor_parameter_6: name = "parameter_6" shape = [384] dtype = "float32" - min_val = float("0.944225") - max_val = float("1.06993") - mean = float("0.98764") - std = float("0.0170711") + min_val = float("0.949185") + max_val = float("1.03882") + mean = float("0.984992") + std = float("0.0122396") data = None @@ -79,10 +79,10 @@ class Program_weight_tensor_parameter_7: name = "parameter_7" shape = [384] dtype = "float32" - min_val = float("0.000787555") - max_val = float("0.0292413") - mean = float("0.00503152") - std = float("0.00299112") + min_val = float("0.000285216") + max_val = float("0.00478627") + mean = float("0.00122854") + std = float("0.00061932") data = None @@ -90,10 +90,10 @@ class Program_weight_tensor_parameter_8: name = "parameter_8" shape = [384] dtype = "float32" - min_val = float("-0.0613106") - max_val = float("0.0666846") - mean = float("-0.0116626") - std = float("0.0163377") + min_val = float("-0.0285195") + max_val = float("0.0323284") + mean = float("0.00097355") + std = float("0.01139") data = None @@ -101,10 +101,10 @@ class Program_weight_tensor_parameter_9: name = "parameter_9" shape = [384, 384, 1, 1] dtype = "float32" - min_val = float("-0.0343938") - max_val = float("0.0285101") - mean = float("-0.000206475") - std = float("0.00196258") + min_val = float("-0.021078") + max_val = float("0.0140467") + mean = float("5.46721e-06") + std = float("0.00115269") data = None @@ -112,10 +112,10 @@ class Program_weight_tensor_parameter_10: name = "parameter_10" shape = [384] dtype = "float32" - min_val = float("-0.254357") - max_val = float("0.0598301") - mean = float("-0.0304517") - std = float("0.0365455") + min_val = float("-0.220092") + max_val = float("0.0360991") + mean = float("-0.0289701") + std = float("0.0325433") data = None @@ -123,10 +123,10 @@ class Program_weight_tensor_parameter_11: name = "parameter_11" shape = [384] dtype = "float32" - min_val = float("0.872284") - max_val = float("1.23739") - mean = float("1.03025") - std = float("0.03542") + min_val = float("0.853263") + max_val = float("1.12594") + mean = float("1.01951") + std = float("0.020693") data = None @@ -134,10 +134,10 @@ class Program_weight_tensor_parameter_12: name = "parameter_12" shape = [384] dtype = "float32" - min_val = float("0.00298812") - max_val = float("0.06913") - mean = float("0.0100035") - std = float("0.00526466") + min_val = float("0.000688955") + max_val = float("0.010658") + mean = float("0.00259363") + std = float("0.001307") data = None @@ -145,10 +145,10 @@ class Program_weight_tensor_parameter_13: name = "parameter_13" shape = [384] dtype = "float32" - min_val = float("-0.217798") - max_val = float("0.069544") - mean = float("-0.0277671") - std = float("0.0366238") + min_val = float("-0.0898062") + max_val = float("0.0697168") + mean = float("-0.0204018") + std = float("0.0193672") data = None @@ -156,10 +156,10 @@ class Program_weight_tensor_parameter_14: name = "parameter_14" shape = [384, 384, 3, 3] dtype = "float32" - min_val = float("-0.0371442") - max_val = float("0.0433044") - mean = float("-5.51279e-05") - std = float("0.00132722") + min_val = float("-0.0184926") + max_val = float("0.0231329") + mean = float("-3.89981e-05") + std = float("0.000775433") data = None @@ -167,10 +167,10 @@ class Program_weight_tensor_parameter_15: name = "parameter_15" shape = [384] dtype = "float32" - min_val = float("-0.374067") - max_val = float("0.0523626") - mean = float("-0.0502448") - std = float("0.0490669") + min_val = float("-0.187634") + max_val = float("0.0397745") + mean = float("-0.0496692") + std = float("0.0341143") data = None @@ -178,10 +178,10 @@ class Program_weight_tensor_parameter_16: name = "parameter_16" shape = [384] dtype = "float32" - min_val = float("0.960272") - max_val = float("1.34778") - mean = float("1.02211") - std = float("0.0411657") + min_val = float("0.923384") + max_val = float("1.15592") + mean = float("1.01789") + std = float("0.0319458") data = None @@ -189,10 +189,10 @@ class Program_weight_tensor_parameter_17: name = "parameter_17" shape = [384] dtype = "float32" - min_val = float("0.00848703") - max_val = float("0.127638") - mean = float("0.0290221") - std = float("0.0141029") + min_val = float("0.00169357") + max_val = float("0.0414876") + mean = float("0.00683062") + std = float("0.00397382") data = None @@ -200,10 +200,10 @@ class Program_weight_tensor_parameter_18: name = "parameter_18" shape = [384] dtype = "float32" - min_val = float("-0.267895") - max_val = float("0.386565") - mean = float("-0.0388769") - std = float("0.0578821") + min_val = float("-0.144802") + max_val = float("0.086576") + mean = float("-0.0246949") + std = float("0.0237346") data = None @@ -211,10 +211,10 @@ class Program_weight_tensor_parameter_19: name = "parameter_19" shape = [384, 384, 3, 3] dtype = "float32" - min_val = float("-0.025652") - max_val = float("0.0582461") - mean = float("-5.85454e-05") - std = float("0.00148034") + min_val = float("-0.0202825") + max_val = float("0.0290433") + mean = float("-4.22358e-05") + std = float("0.000884567") data = None @@ -222,10 +222,10 @@ class Program_weight_tensor_parameter_20: name = "parameter_20" shape = [384] dtype = "float32" - min_val = float("-0.156742") - max_val = float("0.0105973") - mean = float("-0.0499453") - std = float("0.029959") + min_val = float("-0.13611") + max_val = float("0.0209956") + mean = float("-0.0495068") + std = float("0.0271924") data = None @@ -233,10 +233,10 @@ class Program_weight_tensor_parameter_21: name = "parameter_21" shape = [384] dtype = "float32" - min_val = float("0.910836") - max_val = float("1.05523") - mean = float("0.980981") - std = float("0.013256") + min_val = float("0.940917") + max_val = float("1.03841") + mean = float("0.986233") + std = float("0.0130345") data = None @@ -244,10 +244,10 @@ class Program_weight_tensor_parameter_22: name = "parameter_22" shape = [384] dtype = "float32" - min_val = float("0.000818602") - max_val = float("0.015916") - mean = float("0.00425207") - std = float("0.00240053") + min_val = float("0.000300689") + max_val = float("0.00457802") + mean = float("0.00153576") + std = float("0.000801307") data = None @@ -255,10 +255,10 @@ class Program_weight_tensor_parameter_23: name = "parameter_23" shape = [384] dtype = "float32" - min_val = float("-0.0553324") - max_val = float("0.0530611") - mean = float("-0.00573343") - std = float("0.0163695") + min_val = float("-0.037531") + max_val = float("0.034864") + mean = float("0.000281664") + std = float("0.0103515") data = None @@ -266,10 +266,10 @@ class Program_weight_tensor_parameter_24: name = "parameter_24" shape = [384, 384, 1, 1] dtype = "float32" - min_val = float("-0.0380534") - max_val = float("0.0291262") - mean = float("-0.000145551") - std = float("0.00195161") + min_val = float("-0.019341") + max_val = float("0.0169804") + mean = float("-1.66912e-05") + std = float("0.0011822") data = None @@ -277,10 +277,10 @@ class Program_weight_tensor_parameter_25: name = "parameter_25" shape = [384] dtype = "float32" - min_val = float("-0.156742") - max_val = float("0.0105973") - mean = float("-0.0499453") - std = float("0.029959") + min_val = float("-0.13611") + max_val = float("0.0209956") + mean = float("-0.0495068") + std = float("0.0271924") data = None @@ -288,10 +288,10 @@ class Program_weight_tensor_parameter_26: name = "parameter_26" shape = [384] dtype = "float32" - min_val = float("0.970749") - max_val = float("1.20629") - mean = float("1.02831") - std = float("0.0394471") + min_val = float("0.966625") + max_val = float("1.10386") + mean = float("1.01865") + std = float("0.0185725") data = None @@ -299,10 +299,10 @@ class Program_weight_tensor_parameter_27: name = "parameter_27" shape = [384] dtype = "float32" - min_val = float("0.00291281") - max_val = float("0.0619909") - mean = float("0.0115855") - std = float("0.0064419") + min_val = float("0.00102883") + max_val = float("0.0119425") + mean = float("0.00332718") + std = float("0.00160318") data = None @@ -310,10 +310,10 @@ class Program_weight_tensor_parameter_28: name = "parameter_28" shape = [384] dtype = "float32" - min_val = float("-0.14316") - max_val = float("0.0743786") - mean = float("-0.0336338") - std = float("0.0347498") + min_val = float("-0.087175") + max_val = float("0.0661578") + mean = float("-0.0253732") + std = float("0.0194598") data = None @@ -321,10 +321,10 @@ class Program_weight_tensor_parameter_29: name = "parameter_29" shape = [384, 384, 3, 3] dtype = "float32" - min_val = float("-0.0326419") - max_val = float("0.0530926") - mean = float("-6.71983e-05") - std = float("0.00132282") + min_val = float("-0.0204281") + max_val = float("0.0272238") + mean = float("-4.94366e-05") + std = float("0.000795963") data = None @@ -332,10 +332,10 @@ class Program_weight_tensor_parameter_30: name = "parameter_30" shape = [384] dtype = "float32" - min_val = float("-0.246103") - max_val = float("0.0162123") - mean = float("-0.0546987") - std = float("0.0390461") + min_val = float("-0.148287") + max_val = float("0.0257711") + mean = float("-0.0507511") + std = float("0.0264773") data = None @@ -343,10 +343,10 @@ class Program_weight_tensor_parameter_31: name = "parameter_31" shape = [384] dtype = "float32" - min_val = float("0.935276") - max_val = float("1.24725") - mean = float("1.0196") - std = float("0.0429498") + min_val = float("0.938638") + max_val = float("1.11514") + mean = float("1.0147") + std = float("0.0355173") data = None @@ -354,10 +354,10 @@ class Program_weight_tensor_parameter_32: name = "parameter_32" shape = [384] dtype = "float32" - min_val = float("0.00642887") - max_val = float("0.14985") - mean = float("0.0275346") - std = float("0.0174424") + min_val = float("0.00170241") + max_val = float("0.0180036") + mean = float("0.00504014") + std = float("0.00243654") data = None @@ -365,10 +365,10 @@ class Program_weight_tensor_parameter_33: name = "parameter_33" shape = [384] dtype = "float32" - min_val = float("-0.213687") - max_val = float("0.100992") - mean = float("-0.0487334") - std = float("0.0524185") + min_val = float("-0.0935511") + max_val = float("0.0491545") + mean = float("-0.00834149") + std = float("0.0230017") data = None @@ -376,10 +376,10 @@ class Program_weight_tensor_parameter_34: name = "parameter_34" shape = [384, 384, 3, 3] dtype = "float32" - min_val = float("-0.0313679") - max_val = float("0.0549118") - mean = float("-6.83835e-05") - std = float("0.00148433") + min_val = float("-0.0202815") + max_val = float("0.0279557") + mean = float("-3.32955e-05") + std = float("0.000914897") data = None @@ -387,10 +387,10 @@ class Program_weight_tensor_parameter_35: name = "parameter_35" shape = [384] dtype = "float32" - min_val = float("-0.184919") - max_val = float("0.0322844") - mean = float("-0.053517") - std = float("0.0327273") + min_val = float("-0.153278") + max_val = float("0.0451778") + mean = float("-0.0555992") + std = float("0.0277362") data = None @@ -398,10 +398,10 @@ class Program_weight_tensor_parameter_36: name = "parameter_36" shape = [384] dtype = "float32" - min_val = float("0.904243") - max_val = float("1.01869") - mean = float("0.979303") - std = float("0.0159215") + min_val = float("0.932352") + max_val = float("1.05292") + mean = float("0.984355") + std = float("0.0159156") data = None @@ -409,10 +409,10 @@ class Program_weight_tensor_parameter_37: name = "parameter_37" shape = [384] dtype = "float32" - min_val = float("0.00121268") - max_val = float("0.0185698") - mean = float("0.0049722") - std = float("0.00233631") + min_val = float("0.000433048") + max_val = float("0.0042084") + mean = float("0.00195875") + std = float("0.000692107") data = None @@ -420,10 +420,10 @@ class Program_weight_tensor_parameter_38: name = "parameter_38" shape = [384] dtype = "float32" - min_val = float("-0.130073") - max_val = float("0.0529869") - mean = float("-0.00273802") - std = float("0.0173354") + min_val = float("-0.0284852") + max_val = float("0.0252557") + mean = float("-0.00507782") + std = float("0.008977") data = None @@ -431,10 +431,10 @@ class Program_weight_tensor_parameter_39: name = "parameter_39" shape = [384, 384, 1, 1] dtype = "float32" - min_val = float("-0.0417716") - max_val = float("0.0284739") - mean = float("-9.60982e-05") - std = float("0.00206623") + min_val = float("-0.0207499") + max_val = float("0.0193701") + mean = float("-0.000100869") + std = float("0.00123961") data = None @@ -442,10 +442,10 @@ class Program_weight_tensor_parameter_40: name = "parameter_40" shape = [384] dtype = "float32" - min_val = float("-0.184919") - max_val = float("0.0322844") - mean = float("-0.053517") - std = float("0.0327273") + min_val = float("-0.153278") + max_val = float("0.0451778") + mean = float("-0.0555992") + std = float("0.0277362") data = None @@ -453,10 +453,10 @@ class Program_weight_tensor_parameter_41: name = "parameter_41" shape = [384] dtype = "float32" - min_val = float("0.974999") - max_val = float("1.21351") - mean = float("1.03068") - std = float("0.0413997") + min_val = float("0.963645") + max_val = float("1.1319") + mean = float("1.02215") + std = float("0.0263352") data = None @@ -464,10 +464,10 @@ class Program_weight_tensor_parameter_42: name = "parameter_42" shape = [384] dtype = "float32" - min_val = float("0.00469089") - max_val = float("0.0653512") - mean = float("0.0143986") - std = float("0.00774657") + min_val = float("0.0016078") + max_val = float("0.0222238") + mean = float("0.00525679") + std = float("0.00277651") data = None @@ -475,10 +475,10 @@ class Program_weight_tensor_parameter_43: name = "parameter_43" shape = [384] dtype = "float32" - min_val = float("-0.187123") - max_val = float("0.0984266") - mean = float("-0.0228005") - std = float("0.0323402") + min_val = float("-0.104951") + max_val = float("0.0448978") + mean = float("-0.0148056") + std = float("0.0210608") data = None @@ -486,10 +486,10 @@ class Program_weight_tensor_parameter_44: name = "parameter_44" shape = [384, 384, 3, 3] dtype = "float32" - min_val = float("-0.0269551") - max_val = float("0.0469683") - mean = float("-4.80811e-05") - std = float("0.00136096") + min_val = float("-0.0196928") + max_val = float("0.0250277") + mean = float("-3.1454e-05") + std = float("0.000855646") data = None @@ -497,10 +497,10 @@ class Program_weight_tensor_parameter_45: name = "parameter_45" shape = [384] dtype = "float32" - min_val = float("-0.213445") - max_val = float("0.026485") - mean = float("-0.0541075") - std = float("0.0385587") + min_val = float("-0.161193") + max_val = float("0.0517648") + mean = float("-0.053389") + std = float("0.0280913") data = None @@ -508,10 +508,10 @@ class Program_weight_tensor_parameter_46: name = "parameter_46" shape = [384] dtype = "float32" - min_val = float("0.92487") - max_val = float("1.16824") - mean = float("1.02125") - std = float("0.0372863") + min_val = float("0.917844") + max_val = float("1.15227") + mean = float("1.01538") + std = float("0.0358938") data = None @@ -519,10 +519,10 @@ class Program_weight_tensor_parameter_47: name = "parameter_47" shape = [384] dtype = "float32" - min_val = float("0.00425419") - max_val = float("0.0987383") - mean = float("0.0223376") - std = float("0.0149169") + min_val = float("0.00206138") + max_val = float("0.0284275") + mean = float("0.00546661") + std = float("0.0027318") data = None @@ -530,10 +530,10 @@ class Program_weight_tensor_parameter_48: name = "parameter_48" shape = [384] dtype = "float32" - min_val = float("-0.187104") - max_val = float("0.162072") - mean = float("-0.0516316") - std = float("0.0556028") + min_val = float("-0.0944194") + max_val = float("0.0675621") + mean = float("-0.0218124") + std = float("0.0260175") data = None @@ -541,10 +541,10 @@ class Program_weight_tensor_parameter_49: name = "parameter_49" shape = [384, 384, 3, 3] dtype = "float32" - min_val = float("-0.0191691") - max_val = float("0.0361507") - mean = float("-8.79686e-05") - std = float("0.00151782") + min_val = float("-0.0217502") + max_val = float("0.0223443") + mean = float("-4.13226e-05") + std = float("0.000970614") data = None @@ -552,10 +552,10 @@ class Program_weight_tensor_parameter_50: name = "parameter_50" shape = [384] dtype = "float32" - min_val = float("-0.121349") - max_val = float("0.0778676") - mean = float("-0.0298326") - std = float("0.02283") + min_val = float("-0.101781") + max_val = float("0.056876") + mean = float("-0.0401516") + std = float("0.0228938") data = None @@ -563,10 +563,10 @@ class Program_weight_tensor_parameter_51: name = "parameter_51" shape = [384] dtype = "float32" - min_val = float("0.963274") - max_val = float("1.12241") - mean = float("1.01457") - std = float("0.0265398") + min_val = float("0.963822") + max_val = float("1.11711") + mean = float("1.01317") + std = float("0.0243344") data = None @@ -574,10 +574,10 @@ class Program_weight_tensor_parameter_52: name = "parameter_52" shape = [384] dtype = "float32" - min_val = float("0.00391785") - max_val = float("0.053385") - mean = float("0.00936041") - std = float("0.00450165") + min_val = float("0.00110984") + max_val = float("0.00597262") + mean = float("0.0020846") + std = float("0.000693443") data = None @@ -585,10 +585,10 @@ class Program_weight_tensor_parameter_53: name = "parameter_53" shape = [384] dtype = "float32" - min_val = float("-0.108579") - max_val = float("0.125758") - mean = float("-0.0221365") - std = float("0.0301141") + min_val = float("-0.0495382") + max_val = float("0.0540655") + mean = float("-0.0114017") + std = float("0.0130666") data = None @@ -596,10 +596,10 @@ class Program_weight_tensor_parameter_54: name = "parameter_54" shape = [384, 1152, 1, 1] dtype = "float32" - min_val = float("-0.0419679") - max_val = float("0.0632127") - mean = float("-0.000100512") - std = float("0.00237333") + min_val = float("-0.0410642") + max_val = float("0.0455197") + mean = float("-5.58216e-05") + std = float("0.0014784") data = None @@ -607,10 +607,10 @@ class Program_weight_tensor_parameter_55: name = "parameter_55" shape = [384] dtype = "float32" - min_val = float("-0.116774") - max_val = float("0.0159135") - mean = float("-0.0176811") - std = float("0.0160398") + min_val = float("-0.0709703") + max_val = float("0.0169244") + mean = float("-0.0176548") + std = float("0.0126642") data = None @@ -618,10 +618,10 @@ class Program_weight_tensor_parameter_56: name = "parameter_56" shape = [384] dtype = "float32" - min_val = float("0.940853") - max_val = float("1.23804") - mean = float("1.0135") - std = float("0.0258691") + min_val = float("0.913666") + max_val = float("1.1021") + mean = float("1.00906") + std = float("0.0166227") data = None @@ -629,10 +629,10 @@ class Program_weight_tensor_parameter_57: name = "parameter_57" shape = [384] dtype = "float32" - min_val = float("0.00285411") - max_val = float("0.0532654") - mean = float("0.00768658") - std = float("0.00402365") + min_val = float("0.000694125") + max_val = float("0.00878753") + mean = float("0.00162036") + std = float("0.000892236") data = None @@ -640,10 +640,10 @@ class Program_weight_tensor_parameter_58: name = "parameter_58" shape = [384] dtype = "float32" - min_val = float("-0.101351") - max_val = float("0.0758357") - mean = float("-0.0327951") - std = float("0.029396") + min_val = float("-0.0470721") + max_val = float("0.0304015") + mean = float("-0.0113723") + std = float("0.0122988") data = None @@ -651,10 +651,10 @@ class Program_weight_tensor_parameter_59: name = "parameter_59" shape = [384, 1152, 1, 1] dtype = "float32" - min_val = float("-0.0323273") - max_val = float("0.0419256") - mean = float("-0.000154794") - std = float("0.00225172") + min_val = float("-0.0379563") + max_val = float("0.0300735") + mean = float("-5.92181e-05") + std = float("0.00129426") data = None @@ -662,10 +662,10 @@ class Program_weight_tensor_parameter_60: name = "parameter_60" shape = [384] dtype = "float32" - min_val = float("-0.0965203") - max_val = float("0.00581689") - mean = float("-0.0225958") - std = float("0.0169398") + min_val = float("-0.0787137") + max_val = float("0.0034001") + mean = float("-0.0252802") + std = float("0.0143999") data = None @@ -673,10 +673,10 @@ class Program_weight_tensor_parameter_61: name = "parameter_61" shape = [384] dtype = "float32" - min_val = float("0.958471") - max_val = float("1.19094") - mean = float("1.0392") - std = float("0.0317908") + min_val = float("0.98146") + max_val = float("1.12391") + mean = float("1.02741") + std = float("0.0212668") data = None @@ -684,10 +684,10 @@ class Program_weight_tensor_parameter_62: name = "parameter_62" shape = [384] dtype = "float32" - min_val = float("0.00448496") - max_val = float("0.0556805") - mean = float("0.0128026") - std = float("0.00693747") + min_val = float("0.00215154") + max_val = float("0.0252834") + mean = float("0.0067307") + std = float("0.00354134") data = None @@ -695,10 +695,10 @@ class Program_weight_tensor_parameter_63: name = "parameter_63" shape = [384] dtype = "float32" - min_val = float("-0.233107") - max_val = float("0.160893") - mean = float("-0.0188617") - std = float("0.0553687") + min_val = float("-0.277707") + max_val = float("0.114193") + mean = float("-0.0247189") + std = float("0.0461422") data = None @@ -706,10 +706,10 @@ class Program_weight_tensor_parameter_64: name = "parameter_64" shape = [384, 384, 3, 3] dtype = "float32" - min_val = float("-0.0228928") - max_val = float("0.0361422") - mean = float("-2.08226e-05") - std = float("0.00120166") + min_val = float("-0.031519") + max_val = float("0.0263953") + mean = float("-1.72944e-05") + std = float("0.000847953") data = None @@ -717,10 +717,10 @@ class Program_weight_tensor_parameter_65: name = "parameter_65" shape = [384] dtype = "float32" - min_val = float("-0.46246") - max_val = float("0.417289") - mean = float("0.0796105") - std = float("0.135843") + min_val = float("-0.413088") + max_val = float("0.667853") + mean = float("0.255716") + std = float("0.158995") data = None @@ -728,10 +728,10 @@ class Program_weight_tensor_parameter_66: name = "parameter_66" shape = [384] dtype = "float32" - min_val = float("0.861824") - max_val = float("1.37925") - mean = float("0.999753") - std = float("0.0519702") + min_val = float("0.924404") + max_val = float("1.6694") + mean = float("1.17434") + std = float("0.0909085") data = None @@ -739,10 +739,10 @@ class Program_weight_tensor_parameter_67: name = "parameter_67" shape = [384] dtype = "float32" - min_val = float("0.005019") - max_val = float("0.151152") - mean = float("0.0195404") - std = float("0.0157973") + min_val = float("0.00152847") + max_val = float("0.0344264") + mean = float("0.00519486") + std = float("0.00331994") data = None @@ -750,10 +750,10 @@ class Program_weight_tensor_parameter_68: name = "parameter_68" shape = [384] dtype = "float32" - min_val = float("-0.230007") - max_val = float("0.114525") - mean = float("-0.0460026") - std = float("0.0471628") + min_val = float("-0.0946215") + max_val = float("0.0705854") + mean = float("-0.019859") + std = float("0.0214886") data = None @@ -761,10 +761,10 @@ class Program_weight_tensor_parameter_69: name = "parameter_69" shape = [384, 384, 1, 1] dtype = "float32" - min_val = float("-0.109087") - max_val = float("0.0690511") - mean = float("-0.000406287") - std = float("0.00575579") + min_val = float("-0.066096") + max_val = float("0.062936") + mean = float("-0.000217452") + std = float("0.00361837") data = None @@ -772,10 +772,10 @@ class Program_weight_tensor_parameter_70: name = "parameter_70" shape = [192] dtype = "float32" - min_val = float("-0.165038") - max_val = float("0.0547988") - mean = float("-0.0386347") - std = float("0.0421154") + min_val = float("-0.257401") + max_val = float("0.0756873") + mean = float("-0.0386734") + std = float("0.0605711") data = None @@ -783,10 +783,10 @@ class Program_weight_tensor_parameter_71: name = "parameter_71" shape = [192] dtype = "float32" - min_val = float("0.859852") - max_val = float("1.08355") - mean = float("0.961161") - std = float("0.0320714") + min_val = float("0.913002") + max_val = float("1.05482") + mean = float("0.97016") + std = float("0.0253977") data = None @@ -794,10 +794,10 @@ class Program_weight_tensor_parameter_72: name = "parameter_72" shape = [192] dtype = "float32" - min_val = float("0.00155973") - max_val = float("0.0305334") - mean = float("0.00919111") - std = float("0.00640562") + min_val = float("0.000555623") + max_val = float("0.0115202") + mean = float("0.00332226") + std = float("0.00222795") data = None @@ -805,10 +805,10 @@ class Program_weight_tensor_parameter_73: name = "parameter_73" shape = [192] dtype = "float32" - min_val = float("-0.0680704") - max_val = float("0.083547") - mean = float("-0.0167198") - std = float("0.027735") + min_val = float("-0.0431262") + max_val = float("0.0415504") + mean = float("-0.00850673") + std = float("0.0137204") data = None @@ -816,10 +816,10 @@ class Program_weight_tensor_parameter_74: name = "parameter_74" shape = [192, 192, 1, 1] dtype = "float32" - min_val = float("-0.0553851") - max_val = float("0.0384638") - mean = float("-0.000583231") - std = float("0.00426065") + min_val = float("-0.0403293") + max_val = float("0.0259594") + mean = float("-0.00030975") + std = float("0.00273458") data = None @@ -827,10 +827,10 @@ class Program_weight_tensor_parameter_75: name = "parameter_75" shape = [192] dtype = "float32" - min_val = float("-0.165038") - max_val = float("0.0547988") - mean = float("-0.0386347") - std = float("0.0421154") + min_val = float("-0.257401") + max_val = float("0.0756873") + mean = float("-0.0386734") + std = float("0.0605711") data = None @@ -838,10 +838,10 @@ class Program_weight_tensor_parameter_76: name = "parameter_76" shape = [192] dtype = "float32" - min_val = float("0.909392") - max_val = float("1.22799") - mean = float("1.04913") - std = float("0.0517355") + min_val = float("0.67347") + max_val = float("1.16453") + mean = float("1.02581") + std = float("0.0489") data = None @@ -849,10 +849,10 @@ class Program_weight_tensor_parameter_77: name = "parameter_77" shape = [192] dtype = "float32" - min_val = float("0.00618387") - max_val = float("0.0601428") - mean = float("0.0162134") - std = float("0.00795116") + min_val = float("0.00163203") + max_val = float("0.0223524") + mean = float("0.00610887") + std = float("0.00292424") data = None @@ -860,10 +860,10 @@ class Program_weight_tensor_parameter_78: name = "parameter_78" shape = [192] dtype = "float32" - min_val = float("-0.21459") - max_val = float("0.292505") - mean = float("-0.0268324") - std = float("0.057947") + min_val = float("-0.110504") + max_val = float("0.0737091") + mean = float("-0.0141394") + std = float("0.0249903") data = None @@ -871,10 +871,10 @@ class Program_weight_tensor_parameter_79: name = "parameter_79" shape = [192, 192, 3, 3] dtype = "float32" - min_val = float("-0.0437504") - max_val = float("0.0604602") - mean = float("-8.54298e-05") - std = float("0.00300512") + min_val = float("-0.0282871") + max_val = float("0.036242") + mean = float("-4.29571e-05") + std = float("0.00185962") data = None @@ -882,10 +882,10 @@ class Program_weight_tensor_parameter_80: name = "parameter_80" shape = [192] dtype = "float32" - min_val = float("-0.30904") - max_val = float("0.0489039") - mean = float("-0.0760544") - std = float("0.0618318") + min_val = float("-0.254657") + max_val = float("0.0957903") + mean = float("-0.080577") + std = float("0.0593186") data = None @@ -893,10 +893,10 @@ class Program_weight_tensor_parameter_81: name = "parameter_81" shape = [192] dtype = "float32" - min_val = float("0.911397") - max_val = float("1.34725") - mean = float("1.02609") - std = float("0.0557122") + min_val = float("0.858086") + max_val = float("1.31453") + mean = float("1.01565") + std = float("0.0626041") data = None @@ -904,10 +904,10 @@ class Program_weight_tensor_parameter_82: name = "parameter_82" shape = [192] dtype = "float32" - min_val = float("0.00973595") - max_val = float("0.152819") - mean = float("0.0448439") - std = float("0.024794") + min_val = float("0.00360845") + max_val = float("0.0470869") + mean = float("0.0123099") + std = float("0.00713766") data = None @@ -915,10 +915,10 @@ class Program_weight_tensor_parameter_83: name = "parameter_83" shape = [192] dtype = "float32" - min_val = float("-0.186501") - max_val = float("0.249142") - mean = float("-0.0277286") - std = float("0.0570363") + min_val = float("-0.112606") + max_val = float("0.159199") + mean = float("-0.0134364") + std = float("0.0271641") data = None @@ -926,10 +926,10 @@ class Program_weight_tensor_parameter_84: name = "parameter_84" shape = [192, 192, 3, 3] dtype = "float32" - min_val = float("-0.0495299") - max_val = float("0.0581408") - mean = float("-0.000118023") - std = float("0.00333522") + min_val = float("-0.0377322") + max_val = float("0.0505119") + mean = float("-6.78957e-05") + std = float("0.00209937") data = None @@ -937,10 +937,10 @@ class Program_weight_tensor_parameter_85: name = "parameter_85" shape = [192] dtype = "float32" - min_val = float("-0.27082") - max_val = float("0.0055029") - mean = float("-0.0905993") - std = float("0.0443888") + min_val = float("-0.2187") + max_val = float("0.0411154") + mean = float("-0.100397") + std = float("0.0464196") data = None @@ -948,10 +948,10 @@ class Program_weight_tensor_parameter_86: name = "parameter_86" shape = [192] dtype = "float32" - min_val = float("0.88134") - max_val = float("1.1226") - mean = float("0.962732") - std = float("0.0271064") + min_val = float("0.889894") + max_val = float("1.08345") + mean = float("0.97001") + std = float("0.0267277") data = None @@ -959,10 +959,10 @@ class Program_weight_tensor_parameter_87: name = "parameter_87" shape = [192] dtype = "float32" - min_val = float("0.00249203") - max_val = float("0.0283251") - mean = float("0.00900569") - std = float("0.00424473") + min_val = float("0.00101443") + max_val = float("0.00913514") + mean = float("0.00312222") + std = float("0.00149668") data = None @@ -970,10 +970,10 @@ class Program_weight_tensor_parameter_88: name = "parameter_88" shape = [192] dtype = "float32" - min_val = float("-0.0503456") - max_val = float("0.0503521") - mean = float("-0.0123481") - std = float("0.015716") + min_val = float("-0.0424533") + max_val = float("0.0299395") + mean = float("-0.00723978") + std = float("0.0103044") data = None @@ -981,10 +981,10 @@ class Program_weight_tensor_parameter_89: name = "parameter_89" shape = [192, 192, 1, 1] dtype = "float32" - min_val = float("-0.0558178") - max_val = float("0.0423444") - mean = float("-0.000674284") - std = float("0.00453073") + min_val = float("-0.0374012") + max_val = float("0.0307031") + mean = float("-0.000380153") + std = float("0.00281281") data = None @@ -992,10 +992,10 @@ class Program_weight_tensor_parameter_90: name = "parameter_90" shape = [192] dtype = "float32" - min_val = float("-0.27082") - max_val = float("0.0055029") - mean = float("-0.0905993") - std = float("0.0443888") + min_val = float("-0.2187") + max_val = float("0.0411154") + mean = float("-0.100397") + std = float("0.0464196") data = None @@ -1003,10 +1003,10 @@ class Program_weight_tensor_parameter_91: name = "parameter_91" shape = [192] dtype = "float32" - min_val = float("0.941929") - max_val = float("1.2385") - mean = float("1.04237") - std = float("0.0442231") + min_val = float("0.930532") + max_val = float("1.13532") + mean = float("1.02386") + std = float("0.0379141") data = None @@ -1014,10 +1014,10 @@ class Program_weight_tensor_parameter_92: name = "parameter_92" shape = [192] dtype = "float32" - min_val = float("0.00587151") - max_val = float("0.0741055") - mean = float("0.0191318") - std = float("0.010681") + min_val = float("0.00223834") + max_val = float("0.0246658") + mean = float("0.00757584") + std = float("0.00405972") data = None @@ -1025,10 +1025,10 @@ class Program_weight_tensor_parameter_93: name = "parameter_93" shape = [192] dtype = "float32" - min_val = float("-0.148072") - max_val = float("0.0938185") - mean = float("-0.0237551") - std = float("0.0412601") + min_val = float("-0.0893976") + max_val = float("0.0617831") + mean = float("-0.0197007") + std = float("0.0216284") data = None @@ -1036,10 +1036,10 @@ class Program_weight_tensor_parameter_94: name = "parameter_94" shape = [192, 192, 3, 3] dtype = "float32" - min_val = float("-0.0454471") - max_val = float("0.0592261") - mean = float("-0.000107954") - std = float("0.00304373") + min_val = float("-0.03875") + max_val = float("0.0477189") + mean = float("-8.51208e-05") + std = float("0.0019455") data = None @@ -1047,10 +1047,10 @@ class Program_weight_tensor_parameter_95: name = "parameter_95" shape = [192] dtype = "float32" - min_val = float("-0.307199") - max_val = float("0.0609234") - mean = float("-0.111087") - std = float("0.0621482") + min_val = float("-0.230434") + max_val = float("0.0106795") + mean = float("-0.106056") + std = float("0.0515807") data = None @@ -1058,10 +1058,10 @@ class Program_weight_tensor_parameter_96: name = "parameter_96" shape = [192] dtype = "float32" - min_val = float("0.918678") - max_val = float("1.21815") - mean = float("1.02816") - std = float("0.0566506") + min_val = float("0.866582") + max_val = float("1.19747") + mean = float("1.01744") + std = float("0.0615714") data = None @@ -1069,10 +1069,10 @@ class Program_weight_tensor_parameter_97: name = "parameter_97" shape = [192] dtype = "float32" - min_val = float("0.0128713") - max_val = float("0.177337") - mean = float("0.0412506") - std = float("0.0235929") + min_val = float("0.00388242") + max_val = float("0.0321491") + mean = float("0.00938872") + std = float("0.00493863") data = None @@ -1080,10 +1080,10 @@ class Program_weight_tensor_parameter_98: name = "parameter_98" shape = [192] dtype = "float32" - min_val = float("-0.172019") - max_val = float("0.0591701") - mean = float("-0.0428226") - std = float("0.0398324") + min_val = float("-0.0911383") + max_val = float("0.0437329") + mean = float("-0.0127117") + std = float("0.0237587") data = None @@ -1091,10 +1091,10 @@ class Program_weight_tensor_parameter_99: name = "parameter_99" shape = [192, 192, 3, 3] dtype = "float32" - min_val = float("-0.0466526") - max_val = float("0.0663792") - mean = float("-0.000155177") - std = float("0.00347012") + min_val = float("-0.0428683") + max_val = float("0.060501") + mean = float("-7.44884e-05") + std = float("0.00218307") data = None @@ -1102,10 +1102,10 @@ class Program_weight_tensor_parameter_100: name = "parameter_100" shape = [192] dtype = "float32" - min_val = float("-0.366754") - max_val = float("-0.0134222") - mean = float("-0.100255") - std = float("0.0548872") + min_val = float("-0.331729") + max_val = float("0.0596956") + mean = float("-0.122847") + std = float("0.0596309") data = None @@ -1113,10 +1113,10 @@ class Program_weight_tensor_parameter_101: name = "parameter_101" shape = [192] dtype = "float32" - min_val = float("0.868592") - max_val = float("1.06216") - mean = float("0.959936") - std = float("0.0231102") + min_val = float("0.864417") + max_val = float("1.08298") + mean = float("0.967068") + std = float("0.029395") data = None @@ -1124,10 +1124,10 @@ class Program_weight_tensor_parameter_102: name = "parameter_102" shape = [192] dtype = "float32" - min_val = float("0.00298303") - max_val = float("0.0244492") - mean = float("0.00883176") - std = float("0.00373019") + min_val = float("0.00115593") + max_val = float("0.00832892") + mean = float("0.00330322") + std = float("0.0012261") data = None @@ -1135,10 +1135,10 @@ class Program_weight_tensor_parameter_103: name = "parameter_103" shape = [192] dtype = "float32" - min_val = float("-0.072098") - max_val = float("0.042818") - mean = float("-0.0280475") - std = float("0.0244836") + min_val = float("-0.029956") + max_val = float("0.0246558") + mean = float("-0.00718649") + std = float("0.0115832") data = None @@ -1146,10 +1146,10 @@ class Program_weight_tensor_parameter_104: name = "parameter_104" shape = [192, 192, 1, 1] dtype = "float32" - min_val = float("-0.0469252") - max_val = float("0.0444719") - mean = float("-0.00112462") - std = float("0.00488669") + min_val = float("-0.0306946") + max_val = float("0.0748886") + mean = float("-0.000418031") + std = float("0.00305454") data = None @@ -1157,10 +1157,10 @@ class Program_weight_tensor_parameter_105: name = "parameter_105" shape = [192] dtype = "float32" - min_val = float("-0.366754") - max_val = float("-0.0134222") - mean = float("-0.100255") - std = float("0.0548872") + min_val = float("-0.331729") + max_val = float("0.0596956") + mean = float("-0.122847") + std = float("0.0596309") data = None @@ -1168,10 +1168,10 @@ class Program_weight_tensor_parameter_106: name = "parameter_106" shape = [192] dtype = "float32" - min_val = float("0.935173") - max_val = float("1.20371") - mean = float("1.03374") - std = float("0.043616") + min_val = float("0.930062") + max_val = float("1.13715") + mean = float("1.02214") + std = float("0.0316853") data = None @@ -1179,10 +1179,10 @@ class Program_weight_tensor_parameter_107: name = "parameter_107" shape = [192] dtype = "float32" - min_val = float("0.00859096") - max_val = float("0.107539") - mean = float("0.0237503") - std = float("0.0166029") + min_val = float("0.00308637") + max_val = float("0.0441344") + mean = float("0.00890809") + std = float("0.00562068") data = None @@ -1190,10 +1190,10 @@ class Program_weight_tensor_parameter_108: name = "parameter_108" shape = [192] dtype = "float32" - min_val = float("-0.162328") - max_val = float("0.123377") - mean = float("-0.0156432") - std = float("0.0485434") + min_val = float("-0.101372") + max_val = float("0.040673") + mean = float("-0.0117882") + std = float("0.0223792") data = None @@ -1201,10 +1201,10 @@ class Program_weight_tensor_parameter_109: name = "parameter_109" shape = [192, 192, 3, 3] dtype = "float32" - min_val = float("-0.0520373") - max_val = float("0.0633059") - mean = float("-6.95137e-05") - std = float("0.00322718") + min_val = float("-0.0356841") + max_val = float("0.0579415") + mean = float("-6.02996e-05") + std = float("0.00208653") data = None @@ -1212,10 +1212,10 @@ class Program_weight_tensor_parameter_110: name = "parameter_110" shape = [192] dtype = "float32" - min_val = float("-0.516767") - max_val = float("-0.0203543") - mean = float("-0.126034") - std = float("0.0638195") + min_val = float("-0.348121") + max_val = float("0.134135") + mean = float("-0.132564") + std = float("0.0683482") data = None @@ -1223,10 +1223,10 @@ class Program_weight_tensor_parameter_111: name = "parameter_111" shape = [192] dtype = "float32" - min_val = float("0.849485") - max_val = float("1.2535") - mean = float("1.02927") - std = float("0.0639047") + min_val = float("0.883098") + max_val = float("1.33245") + mean = float("1.01684") + std = float("0.066269") data = None @@ -1234,10 +1234,10 @@ class Program_weight_tensor_parameter_112: name = "parameter_112" shape = [192] dtype = "float32" - min_val = float("0.0148703") - max_val = float("0.241855") - mean = float("0.0348458") - std = float("0.0220686") + min_val = float("0.00419839") + max_val = float("0.0370941") + mean = float("0.0100871") + std = float("0.00543772") data = None @@ -1245,10 +1245,10 @@ class Program_weight_tensor_parameter_113: name = "parameter_113" shape = [192] dtype = "float32" - min_val = float("-0.185102") - max_val = float("0.184245") - mean = float("-0.0439409") - std = float("0.0540873") + min_val = float("-0.10148") + max_val = float("0.0669823") + mean = float("-0.0195411") + std = float("0.0241009") data = None @@ -1256,10 +1256,10 @@ class Program_weight_tensor_parameter_114: name = "parameter_114" shape = [192, 192, 3, 3] dtype = "float32" - min_val = float("-0.0395493") - max_val = float("0.0583323") - mean = float("-0.000157674") - std = float("0.00373071") + min_val = float("-0.0441361") + max_val = float("0.0879773") + mean = float("-7.14624e-05") + std = float("0.00242393") data = None @@ -1267,10 +1267,10 @@ class Program_weight_tensor_parameter_115: name = "parameter_115" shape = [192] dtype = "float32" - min_val = float("-0.2713") - max_val = float("0.0560855") - mean = float("-0.0865079") - std = float("0.0441199") + min_val = float("-0.248584") + max_val = float("0.0643379") + mean = float("-0.0972664") + std = float("0.0449312") data = None @@ -1278,10 +1278,10 @@ class Program_weight_tensor_parameter_116: name = "parameter_116" shape = [192] dtype = "float32" - min_val = float("0.915361") - max_val = float("1.26769") - mean = float("1.02552") - std = float("0.0564528") + min_val = float("0.916261") + max_val = float("1.23422") + mean = float("1.01788") + std = float("0.0458154") data = None @@ -1289,10 +1289,10 @@ class Program_weight_tensor_parameter_117: name = "parameter_117" shape = [192] dtype = "float32" - min_val = float("0.00711924") - max_val = float("0.0896093") - mean = float("0.0157688") - std = float("0.00827259") + min_val = float("0.00242366") + max_val = float("0.0139676") + mean = float("0.00484591") + std = float("0.00171238") data = None @@ -1300,10 +1300,10 @@ class Program_weight_tensor_parameter_118: name = "parameter_118" shape = [192] dtype = "float32" - min_val = float("-0.126929") - max_val = float("0.130507") - mean = float("-0.026764") - std = float("0.0351332") + min_val = float("-0.0716758") + max_val = float("0.0467604") + mean = float("-0.01717") + std = float("0.0186884") data = None @@ -1311,10 +1311,10 @@ class Program_weight_tensor_parameter_119: name = "parameter_119" shape = [192, 576, 1, 1] dtype = "float32" - min_val = float("-0.0617836") - max_val = float("0.0625637") - mean = float("-0.000234352") - std = float("0.00557708") + min_val = float("-0.0539295") + max_val = float("0.0629534") + mean = float("-0.000154038") + std = float("0.00352968") data = None @@ -1322,10 +1322,10 @@ class Program_weight_tensor_parameter_120: name = "parameter_120" shape = [192] dtype = "float32" - min_val = float("-0.162411") - max_val = float("0.0343933") - mean = float("-0.0146927") - std = float("0.026659") + min_val = float("-0.165453") + max_val = float("0.0408002") + mean = float("-0.033107") + std = float("0.0302826") data = None @@ -1333,10 +1333,10 @@ class Program_weight_tensor_parameter_121: name = "parameter_121" shape = [192] dtype = "float32" - min_val = float("0.896164") - max_val = float("1.15119") - mean = float("1.0042") - std = float("0.0388476") + min_val = float("0.914357") + max_val = float("1.29586") + mean = float("1.00068") + std = float("0.0391306") data = None @@ -1344,10 +1344,10 @@ class Program_weight_tensor_parameter_122: name = "parameter_122" shape = [192] dtype = "float32" - min_val = float("0.00389326") - max_val = float("0.0742727") - mean = float("0.0112922") - std = float("0.00784653") + min_val = float("0.00125772") + max_val = float("0.0253186") + mean = float("0.0035712") + std = float("0.00260193") data = None @@ -1355,10 +1355,10 @@ class Program_weight_tensor_parameter_123: name = "parameter_123" shape = [192] dtype = "float32" - min_val = float("-0.0913461") - max_val = float("0.0634663") - mean = float("-0.0244524") - std = float("0.0247669") + min_val = float("-0.0483963") + max_val = float("0.0286654") + mean = float("-0.0102664") + std = float("0.0142139") data = None @@ -1366,10 +1366,10 @@ class Program_weight_tensor_parameter_124: name = "parameter_124" shape = [192, 576, 1, 1] dtype = "float32" - min_val = float("-0.0566803") - max_val = float("0.0710687") - mean = float("-0.000235042") - std = float("0.00475399") + min_val = float("-0.052518") + max_val = float("0.0664707") + mean = float("-9.00415e-05") + std = float("0.00298878") data = None @@ -1377,10 +1377,10 @@ class Program_weight_tensor_parameter_125: name = "parameter_125" shape = [192] dtype = "float32" - min_val = float("-0.14785") - max_val = float("0.00919369") - mean = float("-0.043826") - std = float("0.0292536") + min_val = float("-0.156099") + max_val = float("0.0110499") + mean = float("-0.0538026") + std = float("0.030917") data = None @@ -1388,10 +1388,10 @@ class Program_weight_tensor_parameter_126: name = "parameter_126" shape = [192] dtype = "float32" - min_val = float("0.94402") - max_val = float("1.18839") - mean = float("1.03601") - std = float("0.0346975") + min_val = float("0.853169") + max_val = float("1.1744") + mean = float("1.00826") + std = float("0.0376149") data = None @@ -1399,10 +1399,10 @@ class Program_weight_tensor_parameter_127: name = "parameter_127" shape = [192] dtype = "float32" - min_val = float("0.00378721") - max_val = float("0.0744969") - mean = float("0.0185453") - std = float("0.0106259") + min_val = float("0.00348672") + max_val = float("0.0403159") + mean = float("0.0111201") + std = float("0.00590273") data = None @@ -1410,10 +1410,10 @@ class Program_weight_tensor_parameter_128: name = "parameter_128" shape = [192] dtype = "float32" - min_val = float("-0.529234") - max_val = float("0.400014") - mean = float("-0.0329347") - std = float("0.115644") + min_val = float("-0.272761") + max_val = float("0.304479") + mean = float("-0.0311626") + std = float("0.0877005") data = None @@ -1421,10 +1421,10 @@ class Program_weight_tensor_parameter_129: name = "parameter_129" shape = [192, 192, 3, 3] dtype = "float32" - min_val = float("-0.033215") - max_val = float("0.0441369") - mean = float("-4.11432e-05") - std = float("0.0028055") + min_val = float("-0.0436434") + max_val = float("0.0390175") + mean = float("-2.75791e-05") + std = float("0.00207461") data = None @@ -1432,10 +1432,10 @@ class Program_weight_tensor_parameter_130: name = "parameter_130" shape = [192] dtype = "float32" - min_val = float("-0.797229") - max_val = float("1.67957") - mean = float("0.205711") - std = float("0.338337") + min_val = float("-0.731877") + max_val = float("1.79262") + mean = float("0.373936") + std = float("0.434119") data = None @@ -1443,10 +1443,10 @@ class Program_weight_tensor_parameter_131: name = "parameter_131" shape = [192] dtype = "float32" - min_val = float("0.583548") - max_val = float("1.53646") - mean = float("0.971373") - std = float("0.108279") + min_val = float("0.625408") + max_val = float("1.70704") + mean = float("1.16439") + std = float("0.199739") data = None @@ -1454,10 +1454,10 @@ class Program_weight_tensor_parameter_132: name = "parameter_132" shape = [192] dtype = "float32" - min_val = float("0.00968032") - max_val = float("0.326665") - mean = float("0.0366729") - std = float("0.0364719") + min_val = float("0.00270591") + max_val = float("0.0641962") + mean = float("0.0133792") + std = float("0.0107877") data = None @@ -1465,10 +1465,10 @@ class Program_weight_tensor_parameter_133: name = "parameter_133" shape = [192] dtype = "float32" - min_val = float("-0.384641") - max_val = float("0.143155") - mean = float("-0.0453675") - std = float("0.063487") + min_val = float("-0.201918") + max_val = float("0.126312") + mean = float("-0.02014") + std = float("0.0395955") data = None @@ -1476,10 +1476,10 @@ class Program_weight_tensor_parameter_134: name = "parameter_134" shape = [192, 192, 1, 1] dtype = "float32" - min_val = float("-0.162906") - max_val = float("0.103755") - mean = float("-0.000761071") - std = float("0.0121564") + min_val = float("-0.124441") + max_val = float("0.101297") + mean = float("-0.000428392") + std = float("0.00839381") data = None @@ -1487,10 +1487,10 @@ class Program_weight_tensor_parameter_135: name = "parameter_135" shape = [96] dtype = "float32" - min_val = float("-0.282247") - max_val = float("0.281851") - mean = float("0.00710046") - std = float("0.11669") + min_val = float("-0.646764") + max_val = float("0.287418") + mean = float("-0.036847") + std = float("0.203964") data = None @@ -1498,10 +1498,10 @@ class Program_weight_tensor_parameter_136: name = "parameter_136" shape = [96] dtype = "float32" - min_val = float("0.77923") - max_val = float("1.27017") - mean = float("0.926291") - std = float("0.0711138") + min_val = float("0.743727") + max_val = float("1.31816") + mean = float("0.928051") + std = float("0.0891314") data = None @@ -1509,10 +1509,10 @@ class Program_weight_tensor_parameter_137: name = "parameter_137" shape = [96] dtype = "float32" - min_val = float("0.00244381") - max_val = float("0.0367146") - mean = float("0.014305") - std = float("0.0077109") + min_val = float("0.00126457") + max_val = float("0.0283809") + mean = float("0.00872036") + std = float("0.0074912") data = None @@ -1520,10 +1520,10 @@ class Program_weight_tensor_parameter_138: name = "parameter_138" shape = [96] dtype = "float32" - min_val = float("-0.0509503") - max_val = float("0.0931656") - mean = float("-0.0103877") - std = float("0.0225298") + min_val = float("-0.0533211") + max_val = float("0.054261") + mean = float("-0.00777041") + std = float("0.0241502") data = None @@ -1531,10 +1531,10 @@ class Program_weight_tensor_parameter_139: name = "parameter_139" shape = [96, 96, 1, 1] dtype = "float32" - min_val = float("-0.10638") - max_val = float("0.0638405") - mean = float("-0.00110258") - std = float("0.00883031") + min_val = float("-0.0662853") + max_val = float("0.0498423") + mean = float("-0.000819471") + std = float("0.00712985") data = None @@ -1542,10 +1542,10 @@ class Program_weight_tensor_parameter_140: name = "parameter_140" shape = [96] dtype = "float32" - min_val = float("-0.282247") - max_val = float("0.281851") - mean = float("0.00710046") - std = float("0.11669") + min_val = float("-0.646764") + max_val = float("0.287418") + mean = float("-0.036847") + std = float("0.203964") data = None @@ -1553,10 +1553,10 @@ class Program_weight_tensor_parameter_141: name = "parameter_141" shape = [96] dtype = "float32" - min_val = float("0.686595") - max_val = float("1.34871") - mean = float("1.04697") - std = float("0.0915013") + min_val = float("0.478759") + max_val = float("1.3967") + mean = float("1.04303") + std = float("0.135316") data = None @@ -1564,10 +1564,10 @@ class Program_weight_tensor_parameter_142: name = "parameter_142" shape = [96] dtype = "float32" - min_val = float("0.00849613") - max_val = float("0.0476423") - mean = float("0.0241855") - std = float("0.00965118") + min_val = float("0.00456047") + max_val = float("0.0957772") + mean = float("0.0187912") + std = float("0.0159306") data = None @@ -1575,10 +1575,10 @@ class Program_weight_tensor_parameter_143: name = "parameter_143" shape = [96] dtype = "float32" - min_val = float("-0.176894") - max_val = float("0.124313") - mean = float("-0.0161684") - std = float("0.0511988") + min_val = float("-0.189351") + max_val = float("0.148294") + mean = float("-0.00279725") + std = float("0.0509242") data = None @@ -1586,10 +1586,10 @@ class Program_weight_tensor_parameter_144: name = "parameter_144" shape = [96, 96, 3, 3] dtype = "float32" - min_val = float("-0.074123") - max_val = float("0.0992917") - mean = float("-0.000128278") - std = float("0.00660925") + min_val = float("-0.0753107") + max_val = float("0.0647631") + mean = float("-7.46307e-05") + std = float("0.00493989") data = None @@ -1597,10 +1597,10 @@ class Program_weight_tensor_parameter_145: name = "parameter_145" shape = [96] dtype = "float32" - min_val = float("-0.457141") - max_val = float("0.284175") - mean = float("-0.137858") - std = float("0.150133") + min_val = float("-0.807012") + max_val = float("0.639205") + mean = float("-0.135788") + std = float("0.222961") data = None @@ -1608,10 +1608,10 @@ class Program_weight_tensor_parameter_146: name = "parameter_146" shape = [96] dtype = "float32" - min_val = float("0.828432") - max_val = float("1.7552") - mean = float("1.0098") - std = float("0.141163") + min_val = float("0.500992") + max_val = float("1.52738") + mean = float("0.986272") + std = float("0.139979") data = None @@ -1619,10 +1619,10 @@ class Program_weight_tensor_parameter_147: name = "parameter_147" shape = [96] dtype = "float32" - min_val = float("0.0197657") - max_val = float("0.21109") - mean = float("0.0558951") - std = float("0.0328392") + min_val = float("0.00448636") + max_val = float("0.078761") + mean = float("0.0176369") + std = float("0.0131972") data = None @@ -1630,10 +1630,10 @@ class Program_weight_tensor_parameter_148: name = "parameter_148" shape = [96] dtype = "float32" - min_val = float("-0.177509") - max_val = float("0.117139") - mean = float("-0.0272736") - std = float("0.0445487") + min_val = float("-0.232282") + max_val = float("0.112253") + mean = float("0.00311495") + std = float("0.0583791") data = None @@ -1641,10 +1641,10 @@ class Program_weight_tensor_parameter_149: name = "parameter_149" shape = [96, 96, 3, 3] dtype = "float32" - min_val = float("-0.0848417") - max_val = float("0.0927675") - mean = float("-0.000388559") - std = float("0.00736544") + min_val = float("-0.0632043") + max_val = float("0.0542453") + mean = float("-0.000286044") + std = float("0.00552684") data = None @@ -1652,10 +1652,10 @@ class Program_weight_tensor_parameter_150: name = "parameter_150" shape = [96] dtype = "float32" - min_val = float("-0.394379") - max_val = float("0.00738925") - mean = float("-0.148925") - std = float("0.0838505") + min_val = float("-0.375565") + max_val = float("0.21532") + mean = float("-0.182874") + std = float("0.131104") data = None @@ -1663,10 +1663,10 @@ class Program_weight_tensor_parameter_151: name = "parameter_151" shape = [96] dtype = "float32" - min_val = float("0.717037") - max_val = float("0.995563") - mean = float("0.881515") - std = float("0.0561052") + min_val = float("0.660313") + max_val = float("1.1612") + mean = float("0.867514") + std = float("0.0695054") data = None @@ -1674,10 +1674,10 @@ class Program_weight_tensor_parameter_152: name = "parameter_152" shape = [96] dtype = "float32" - min_val = float("0.00228925") - max_val = float("0.0352953") - mean = float("0.0132225") - std = float("0.00553136") + min_val = float("0.00209929") + max_val = float("0.0217899") + mean = float("0.00748623") + std = float("0.00298326") data = None @@ -1685,10 +1685,10 @@ class Program_weight_tensor_parameter_153: name = "parameter_153" shape = [96] dtype = "float32" - min_val = float("-0.0407708") - max_val = float("0.0474609") - mean = float("0.00908712") - std = float("0.0191192") + min_val = float("-0.0465666") + max_val = float("0.039584") + mean = float("-0.0139749") + std = float("0.0171722") data = None @@ -1696,10 +1696,10 @@ class Program_weight_tensor_parameter_154: name = "parameter_154" shape = [96, 96, 1, 1] dtype = "float32" - min_val = float("-0.0741001") - max_val = float("0.0598664") - mean = float("-0.000306918") - std = float("0.00908011") + min_val = float("-0.063312") + max_val = float("0.0527997") + mean = float("-0.00133358") + std = float("0.00750932") data = None @@ -1707,10 +1707,10 @@ class Program_weight_tensor_parameter_155: name = "parameter_155" shape = [96] dtype = "float32" - min_val = float("-0.394379") - max_val = float("0.00738925") - mean = float("-0.148925") - std = float("0.0838505") + min_val = float("-0.375565") + max_val = float("0.21532") + mean = float("-0.182874") + std = float("0.131104") data = None @@ -1718,10 +1718,10 @@ class Program_weight_tensor_parameter_156: name = "parameter_156" shape = [96] dtype = "float32" - min_val = float("0.805436") - max_val = float("1.22568") - mean = float("1.05162") - std = float("0.0896051") + min_val = float("0.801681") + max_val = float("1.29162") + mean = float("1.01024") + std = float("0.0817165") data = None @@ -1729,10 +1729,10 @@ class Program_weight_tensor_parameter_157: name = "parameter_157" shape = [96] dtype = "float32" - min_val = float("0.00804892") - max_val = float("0.0701516") - mean = float("0.0242923") - std = float("0.0134998") + min_val = float("0.00485304") + max_val = float("0.0746222") + mean = float("0.0201209") + std = float("0.0145062") data = None @@ -1740,10 +1740,10 @@ class Program_weight_tensor_parameter_158: name = "parameter_158" shape = [96] dtype = "float32" - min_val = float("-0.12638") - max_val = float("0.0436454") - mean = float("-0.0376916") - std = float("0.0295355") + min_val = float("-0.102996") + max_val = float("0.0561913") + mean = float("-0.0165577") + std = float("0.0346552") data = None @@ -1751,10 +1751,10 @@ class Program_weight_tensor_parameter_159: name = "parameter_159" shape = [96, 96, 3, 3] dtype = "float32" - min_val = float("-0.0809044") - max_val = float("0.126721") - mean = float("-0.00048597") - std = float("0.00701167") + min_val = float("-0.0719449") + max_val = float("0.0662486") + mean = float("-0.000336108") + std = float("0.0053214") data = None @@ -1762,10 +1762,10 @@ class Program_weight_tensor_parameter_160: name = "parameter_160" shape = [96] dtype = "float32" - min_val = float("-0.497069") - max_val = float("0.0662031") - mean = float("-0.208094") - std = float("0.125894") + min_val = float("-0.500131") + max_val = float("0.31631") + mean = float("-0.192663") + std = float("0.162844") data = None @@ -1773,10 +1773,10 @@ class Program_weight_tensor_parameter_161: name = "parameter_161" shape = [96] dtype = "float32" - min_val = float("0.70186") - max_val = float("1.54155") - mean = float("0.99652") - std = float("0.130955") + min_val = float("0.725877") + max_val = float("1.31136") + mean = float("0.944603") + std = float("0.105848") data = None @@ -1784,10 +1784,10 @@ class Program_weight_tensor_parameter_162: name = "parameter_162" shape = [96] dtype = "float32" - min_val = float("0.0257306") - max_val = float("0.191174") - mean = float("0.0583405") - std = float("0.0285068") + min_val = float("0.00343356") + max_val = float("0.0319398") + mean = float("0.00955323") + std = float("0.00603822") data = None @@ -1795,10 +1795,10 @@ class Program_weight_tensor_parameter_163: name = "parameter_163" shape = [96] dtype = "float32" - min_val = float("-0.124903") - max_val = float("0.0136205") - mean = float("-0.0566505") - std = float("0.0340341") + min_val = float("-0.114596") + max_val = float("0.088809") + mean = float("0.0165881") + std = float("0.0398273") data = None @@ -1806,10 +1806,10 @@ class Program_weight_tensor_parameter_164: name = "parameter_164" shape = [96, 96, 3, 3] dtype = "float32" - min_val = float("-0.0825468") - max_val = float("0.0998638") - mean = float("-0.000585607") - std = float("0.00815449") + min_val = float("-0.0822766") + max_val = float("0.0772172") + mean = float("-0.000347346") + std = float("0.00616296") data = None @@ -1817,10 +1817,10 @@ class Program_weight_tensor_parameter_165: name = "parameter_165" shape = [96] dtype = "float32" - min_val = float("-0.488308") - max_val = float("0.0969995") - mean = float("-0.1903") - std = float("0.108395") + min_val = float("-0.60397") + max_val = float("0.0882115") + mean = float("-0.234695") + std = float("0.141888") data = None @@ -1828,10 +1828,10 @@ class Program_weight_tensor_parameter_166: name = "parameter_166" shape = [96] dtype = "float32" - min_val = float("0.693424") - max_val = float("1.02499") - mean = float("0.878418") - std = float("0.0578356") + min_val = float("0.705014") + max_val = float("1.02807") + mean = float("0.909017") + std = float("0.0648434") data = None @@ -1839,10 +1839,10 @@ class Program_weight_tensor_parameter_167: name = "parameter_167" shape = [96] dtype = "float32" - min_val = float("0.00563897") - max_val = float("0.0250253") - mean = float("0.0115188") - std = float("0.00399237") + min_val = float("0.00465651") + max_val = float("0.0323499") + mean = float("0.0105922") + std = float("0.00456131") data = None @@ -1850,10 +1850,10 @@ class Program_weight_tensor_parameter_168: name = "parameter_168" shape = [96] dtype = "float32" - min_val = float("-0.0555785") - max_val = float("0.0182289") - mean = float("-0.0150525") - std = float("0.0144486") + min_val = float("-0.0572868") + max_val = float("0.0528458") + mean = float("-0.0101465") + std = float("0.0295067") data = None @@ -1861,10 +1861,10 @@ class Program_weight_tensor_parameter_169: name = "parameter_169" shape = [96, 96, 1, 1] dtype = "float32" - min_val = float("-0.0682249") - max_val = float("0.0726027") - mean = float("-0.00190267") - std = float("0.0112706") + min_val = float("-0.0723145") + max_val = float("0.0736894") + mean = float("-0.0014416") + std = float("0.00966079") data = None @@ -1872,10 +1872,10 @@ class Program_weight_tensor_parameter_170: name = "parameter_170" shape = [96] dtype = "float32" - min_val = float("-0.488308") - max_val = float("0.0969995") - mean = float("-0.1903") - std = float("0.108395") + min_val = float("-0.60397") + max_val = float("0.0882115") + mean = float("-0.234695") + std = float("0.141888") data = None @@ -1883,10 +1883,10 @@ class Program_weight_tensor_parameter_171: name = "parameter_171" shape = [96] dtype = "float32" - min_val = float("0.64497") - max_val = float("1.26041") - mean = float("1.02347") - std = float("0.096841") + min_val = float("0.618521") + max_val = float("1.21767") + mean = float("0.958308") + std = float("0.106787") data = None @@ -1894,10 +1894,10 @@ class Program_weight_tensor_parameter_172: name = "parameter_172" shape = [96] dtype = "float32" - min_val = float("0.0135838") - max_val = float("0.108919") - mean = float("0.0303804") - std = float("0.0148297") + min_val = float("0.00884074") + max_val = float("0.0914745") + mean = float("0.0314621") + std = float("0.0183233") data = None @@ -1905,10 +1905,10 @@ class Program_weight_tensor_parameter_173: name = "parameter_173" shape = [96] dtype = "float32" - min_val = float("-0.0953459") - max_val = float("0.045938") - mean = float("-0.0243831") - std = float("0.0289356") + min_val = float("-0.160652") + max_val = float("0.12782") + mean = float("-0.004871") + std = float("0.0578784") data = None @@ -1916,10 +1916,10 @@ class Program_weight_tensor_parameter_174: name = "parameter_174" shape = [96, 96, 3, 3] dtype = "float32" - min_val = float("-0.0690141") - max_val = float("0.0907724") - mean = float("-0.000336498") - std = float("0.00782096") + min_val = float("-0.0638916") + max_val = float("0.0818362") + mean = float("-0.000246873") + std = float("0.00600333") data = None @@ -1927,10 +1927,10 @@ class Program_weight_tensor_parameter_175: name = "parameter_175" shape = [96] dtype = "float32" - min_val = float("-0.751265") - max_val = float("0.0462032") - mean = float("-0.246976") - std = float("0.15091") + min_val = float("-0.81651") + max_val = float("0.693971") + mean = float("-0.221258") + std = float("0.247267") data = None @@ -1938,10 +1938,10 @@ class Program_weight_tensor_parameter_176: name = "parameter_176" shape = [96] dtype = "float32" - min_val = float("0.715124") - max_val = float("1.24904") - mean = float("0.989016") - std = float("0.0990378") + min_val = float("0.652024") + max_val = float("1.50005") + mean = float("0.904973") + std = float("0.115969") data = None @@ -1949,10 +1949,10 @@ class Program_weight_tensor_parameter_177: name = "parameter_177" shape = [96] dtype = "float32" - min_val = float("0.019195") - max_val = float("0.100826") - mean = float("0.0410936") - std = float("0.0178574") + min_val = float("0.00583431") + max_val = float("0.0907615") + mean = float("0.0165982") + std = float("0.0160351") data = None @@ -1960,10 +1960,10 @@ class Program_weight_tensor_parameter_178: name = "parameter_178" shape = [96] dtype = "float32" - min_val = float("-0.191045") - max_val = float("0.167209") - mean = float("-0.0509906") - std = float("0.0585368") + min_val = float("-0.201578") + max_val = float("0.211771") + mean = float("-0.00815532") + std = float("0.0732757") data = None @@ -1971,10 +1971,10 @@ class Program_weight_tensor_parameter_179: name = "parameter_179" shape = [96, 96, 3, 3] dtype = "float32" - min_val = float("-0.109709") - max_val = float("0.102873") - mean = float("-0.000302796") - std = float("0.00950446") + min_val = float("-0.0998433") + max_val = float("0.159214") + mean = float("-0.000138757") + std = float("0.00702056") data = None @@ -1982,10 +1982,10 @@ class Program_weight_tensor_parameter_180: name = "parameter_180" shape = [96] dtype = "float32" - min_val = float("-0.682344") - max_val = float("0.560615") - mean = float("-0.183267") - std = float("0.262805") + min_val = float("-0.737142") + max_val = float("1.03848") + mean = float("-0.0970708") + std = float("0.357165") data = None @@ -1993,10 +1993,10 @@ class Program_weight_tensor_parameter_181: name = "parameter_181" shape = [96] dtype = "float32" - min_val = float("0.650671") - max_val = float("1.28138") - mean = float("0.926136") - std = float("0.127332") + min_val = float("0.485726") + max_val = float("1.17258") + mean = float("0.785471") + std = float("0.141168") data = None @@ -2004,10 +2004,10 @@ class Program_weight_tensor_parameter_182: name = "parameter_182" shape = [96] dtype = "float32" - min_val = float("0.0166935") - max_val = float("0.0740427") - mean = float("0.0351758") - std = float("0.0134938") + min_val = float("0.00479017") + max_val = float("0.0498613") + mean = float("0.0140149") + std = float("0.00706644") data = None @@ -2015,10 +2015,10 @@ class Program_weight_tensor_parameter_183: name = "parameter_183" shape = [96] dtype = "float32" - min_val = float("-0.139363") - max_val = float("0.124573") - mean = float("-0.0021096") - std = float("0.0513469") + min_val = float("-0.140604") + max_val = float("0.0620771") + mean = float("-0.00136434") + std = float("0.0304237") data = None @@ -2026,10 +2026,10 @@ class Program_weight_tensor_parameter_184: name = "parameter_184" shape = [96, 448, 1, 1] dtype = "float32" - min_val = float("-0.176164") - max_val = float("0.183176") - mean = float("-0.000547214") - std = float("0.0128837") + min_val = float("-0.138366") + max_val = float("0.118931") + mean = float("-0.000333914") + std = float("0.00899348") data = None @@ -2037,10 +2037,10 @@ class Program_weight_tensor_parameter_185: name = "parameter_185" shape = [96] dtype = "float32" - min_val = float("-0.163476") - max_val = float("0.175629") - mean = float("0.0382556") - std = float("0.0657315") + min_val = float("-0.100614") + max_val = float("0.279575") + mean = float("0.0624506") + std = float("0.0700954") data = None @@ -2048,10 +2048,10 @@ class Program_weight_tensor_parameter_186: name = "parameter_186" shape = [96] dtype = "float32" - min_val = float("0.668729") - max_val = float("1.38231") - mean = float("0.957819") - std = float("0.134079") + min_val = float("0.726756") + max_val = float("1.15922") + mean = float("0.8968") + std = float("0.0750659") data = None @@ -2059,10 +2059,10 @@ class Program_weight_tensor_parameter_187: name = "parameter_187" shape = [96] dtype = "float32" - min_val = float("0.0053491") - max_val = float("0.0408792") - mean = float("0.0111961") - std = float("0.00625857") + min_val = float("0.00172884") + max_val = float("0.0284937") + mean = float("0.0037772") + std = float("0.00304651") data = None @@ -2070,10 +2070,10 @@ class Program_weight_tensor_parameter_188: name = "parameter_188" shape = [96] dtype = "float32" - min_val = float("-0.0998904") - max_val = float("0.112465") - mean = float("-0.0129474") - std = float("0.0345809") + min_val = float("-0.0587997") + max_val = float("0.0664821") + mean = float("-0.0051813") + std = float("0.0161551") data = None @@ -2081,10 +2081,10 @@ class Program_weight_tensor_parameter_189: name = "parameter_189" shape = [96, 448, 1, 1] dtype = "float32" - min_val = float("-0.10245") - max_val = float("0.143631") - mean = float("-0.000321447") - std = float("0.0083028") + min_val = float("-0.0960914") + max_val = float("0.114711") + mean = float("-0.000131248") + std = float("0.00503197") data = None @@ -2092,10 +2092,10 @@ class Program_weight_tensor_parameter_190: name = "parameter_190" shape = [192] dtype = "float32" - min_val = float("-0.339438") - max_val = float("0.0668367") - mean = float("-0.126421") - std = float("0.0576214") + min_val = float("-0.419878") + max_val = float("0.305803") + mean = float("-0.0843783") + std = float("0.0933839") data = None @@ -2103,10 +2103,10 @@ class Program_weight_tensor_parameter_191: name = "parameter_191" shape = [192] dtype = "float32" - min_val = float("0.709181") - max_val = float("1.31558") - mean = float("0.850576") - std = float("0.0685498") + min_val = float("0.660092") + max_val = float("1.60427") + mean = float("0.830813") + std = float("0.0949805") data = None @@ -2114,10 +2114,10 @@ class Program_weight_tensor_parameter_192: name = "parameter_192" shape = [192] dtype = "float32" - min_val = float("0.00982368") - max_val = float("0.120733") - mean = float("0.0222883") - std = float("0.0126702") + min_val = float("0.00393118") + max_val = float("0.106249") + mean = float("0.0110337") + std = float("0.00808965") data = None @@ -2125,10 +2125,10 @@ class Program_weight_tensor_parameter_193: name = "parameter_193" shape = [192] dtype = "float32" - min_val = float("-0.158372") - max_val = float("0.119076") - mean = float("-0.0375286") - std = float("0.0397228") + min_val = float("-0.0959304") + max_val = float("0.0379394") + mean = float("-0.0249515") + std = float("0.0229359") data = None @@ -2136,10 +2136,10 @@ class Program_weight_tensor_parameter_194: name = "parameter_194" shape = [192, 384, 1, 1] dtype = "float32" - min_val = float("-0.0618403") - max_val = float("0.087686") - mean = float("-0.000670588") - std = float("0.00876173") + min_val = float("-0.0812206") + max_val = float("0.0824483") + mean = float("-0.000421448") + std = float("0.00583855") data = None @@ -2147,10 +2147,10 @@ class Program_weight_tensor_parameter_195: name = "parameter_195" shape = [384] dtype = "float32" - min_val = float("-0.246737") - max_val = float("0.0626357") - mean = float("-0.104556") - std = float("0.0398377") + min_val = float("-0.373089") + max_val = float("0.1651") + mean = float("-0.0928755") + std = float("0.059282") data = None @@ -2158,10 +2158,10 @@ class Program_weight_tensor_parameter_196: name = "parameter_196" shape = [384] dtype = "float32" - min_val = float("0.86139") - max_val = float("1.36552") - mean = float("1.03591") - std = float("0.0538437") + min_val = float("0.877299") + max_val = float("1.57429") + mean = float("1.01597") + std = float("0.0852694") data = None @@ -2169,10 +2169,10 @@ class Program_weight_tensor_parameter_197: name = "parameter_197" shape = [384] dtype = "float32" - min_val = float("0.0112363") - max_val = float("0.181727") - mean = float("0.0226795") - std = float("0.0127742") + min_val = float("0.00323605") + max_val = float("0.0532371") + mean = float("0.00771792") + std = float("0.00446179") data = None @@ -2180,10 +2180,10 @@ class Program_weight_tensor_parameter_198: name = "parameter_198" shape = [384] dtype = "float32" - min_val = float("-0.257774") - max_val = float("0.11719") - mean = float("-0.0706271") - std = float("0.0443703") + min_val = float("-0.165965") + max_val = float("0.109289") + mean = float("-0.031791") + std = float("0.0291846") data = None @@ -2191,10 +2191,10 @@ class Program_weight_tensor_parameter_199: name = "parameter_199" shape = [384, 384, 1, 1] dtype = "float32" - min_val = float("-0.0766884") - max_val = float("0.0811282") - mean = float("-0.000905669") - std = float("0.00825814") + min_val = float("-0.130749") + max_val = float("0.0789119") + mean = float("-0.000445351") + std = float("0.00532961") data = None @@ -2202,10 +2202,10 @@ class Program_weight_tensor_parameter_200: name = "parameter_200" shape = [192] dtype = "float32" - min_val = float("-0.163787") - max_val = float("-0.0178854") - mean = float("-0.0640593") - std = float("0.0247121") + min_val = float("-0.256683") + max_val = float("0.0671899") + mean = float("-0.0839381") + std = float("0.0440531") data = None @@ -2213,10 +2213,10 @@ class Program_weight_tensor_parameter_201: name = "parameter_201" shape = [192] dtype = "float32" - min_val = float("0.863464") - max_val = float("1.00313") - mean = float("0.954504") - std = float("0.0217909") + min_val = float("0.819788") + max_val = float("0.987271") + mean = float("0.928926") + std = float("0.0272814") data = None @@ -2224,10 +2224,10 @@ class Program_weight_tensor_parameter_202: name = "parameter_202" shape = [192] dtype = "float32" - min_val = float("0.00426603") - max_val = float("0.0274319") - mean = float("0.00904264") - std = float("0.00343389") + min_val = float("0.00236419") + max_val = float("0.0177991") + mean = float("0.00505779") + std = float("0.00197103") data = None @@ -2235,10 +2235,10 @@ class Program_weight_tensor_parameter_203: name = "parameter_203" shape = [192] dtype = "float32" - min_val = float("-0.128303") - max_val = float("0.0856769") - mean = float("-0.0260934") - std = float("0.0402736") + min_val = float("-0.0602631") + max_val = float("0.0372194") + mean = float("-0.0145273") + std = float("0.0175294") data = None @@ -2246,10 +2246,10 @@ class Program_weight_tensor_parameter_204: name = "parameter_204" shape = [192, 192, 1, 1] dtype = "float32" - min_val = float("-0.0361114") - max_val = float("0.0350956") - mean = float("-0.000748032") - std = float("0.00552871") + min_val = float("-0.0359811") + max_val = float("0.0341578") + mean = float("-0.000499181") + std = float("0.00410174") data = None @@ -2257,10 +2257,10 @@ class Program_weight_tensor_parameter_205: name = "parameter_205" shape = [192] dtype = "float32" - min_val = float("-0.163787") - max_val = float("-0.0178854") - mean = float("-0.0640593") - std = float("0.0247121") + min_val = float("-0.256683") + max_val = float("0.0671899") + mean = float("-0.0839381") + std = float("0.0440531") data = None @@ -2268,10 +2268,10 @@ class Program_weight_tensor_parameter_206: name = "parameter_206" shape = [192] dtype = "float32" - min_val = float("0.931794") - max_val = float("1.03438") - mean = float("0.986242") - std = float("0.0213097") + min_val = float("0.900374") + max_val = float("1.08407") + mean = float("0.991603") + std = float("0.0256836") data = None @@ -2279,10 +2279,10 @@ class Program_weight_tensor_parameter_207: name = "parameter_207" shape = [192] dtype = "float32" - min_val = float("0.0162213") - max_val = float("0.15521") - mean = float("0.0388931") - std = float("0.0172201") + min_val = float("0.00869434") + max_val = float("0.103153") + mean = float("0.0195607") + std = float("0.00998195") data = None @@ -2290,10 +2290,10 @@ class Program_weight_tensor_parameter_208: name = "parameter_208" shape = [192] dtype = "float32" - min_val = float("-0.27218") - max_val = float("0.153326") - mean = float("-0.0421112") - std = float("0.072068") + min_val = float("-0.148147") + max_val = float("0.083841") + mean = float("-0.0215099") + std = float("0.0357341") data = None @@ -2301,10 +2301,10 @@ class Program_weight_tensor_parameter_209: name = "parameter_209" shape = [192, 192, 3, 3] dtype = "float32" - min_val = float("-0.0286582") - max_val = float("0.0402364") - mean = float("-0.000141068") - std = float("0.00300371") + min_val = float("-0.0560753") + max_val = float("0.0798715") + mean = float("-6.90239e-05") + std = float("0.00228177") data = None @@ -2312,10 +2312,10 @@ class Program_weight_tensor_parameter_210: name = "parameter_210" shape = [192] dtype = "float32" - min_val = float("-0.26148") - max_val = float("0.0033202") - mean = float("-0.0610375") - std = float("0.0375229") + min_val = float("-0.28296") + max_val = float("0.00624382") + mean = float("-0.108697") + std = float("0.054323") data = None @@ -2323,10 +2323,10 @@ class Program_weight_tensor_parameter_211: name = "parameter_211" shape = [192] dtype = "float32" - min_val = float("0.955773") - max_val = float("1.12848") - mean = float("1.0249") - std = float("0.0296608") + min_val = float("0.935362") + max_val = float("1.19786") + mean = float("1.03662") + std = float("0.044863") data = None @@ -2334,10 +2334,10 @@ class Program_weight_tensor_parameter_212: name = "parameter_212" shape = [192] dtype = "float32" - min_val = float("0.0478933") - max_val = float("0.390037") - mean = float("0.10908") - std = float("0.0501276") + min_val = float("0.0153049") + max_val = float("0.150077") + mean = float("0.0321082") + std = float("0.0130511") data = None @@ -2345,10 +2345,10 @@ class Program_weight_tensor_parameter_213: name = "parameter_213" shape = [192] dtype = "float32" - min_val = float("-0.460456") - max_val = float("0.58138") - mean = float("-0.0865142") - std = float("0.149413") + min_val = float("-0.205475") + max_val = float("0.151818") + mean = float("-0.048646") + std = float("0.0468518") data = None @@ -2356,10 +2356,10 @@ class Program_weight_tensor_parameter_214: name = "parameter_214" shape = [192, 192, 3, 3] dtype = "float32" - min_val = float("-0.0310591") - max_val = float("0.0529931") - mean = float("-0.000143811") - std = float("0.00358108") + min_val = float("-0.0723395") + max_val = float("0.0757746") + mean = float("-0.000117075") + std = float("0.00276706") data = None @@ -2367,10 +2367,10 @@ class Program_weight_tensor_parameter_215: name = "parameter_215" shape = [192] dtype = "float32" - min_val = float("-0.141458") - max_val = float("-0.0079347") - mean = float("-0.0529687") - std = float("0.0249688") + min_val = float("-0.253549") + max_val = float("-0.0252667") + mean = float("-0.111396") + std = float("0.0515085") data = None @@ -2378,10 +2378,10 @@ class Program_weight_tensor_parameter_216: name = "parameter_216" shape = [192] dtype = "float32" - min_val = float("0.959304") - max_val = float("1.04419") - mean = float("0.9895") - std = float("0.0122273") + min_val = float("0.915269") + max_val = float("1.08382") + mean = float("0.975487") + std = float("0.0197657") data = None @@ -2389,10 +2389,10 @@ class Program_weight_tensor_parameter_217: name = "parameter_217" shape = [192] dtype = "float32" - min_val = float("0.00246453") - max_val = float("0.0136491") - mean = float("0.00528701") - std = float("0.00174133") + min_val = float("0.00131146") + max_val = float("0.00746504") + mean = float("0.00271604") + std = float("0.00086359") data = None @@ -2400,10 +2400,10 @@ class Program_weight_tensor_parameter_218: name = "parameter_218" shape = [192] dtype = "float32" - min_val = float("-0.0640118") - max_val = float("0.0474594") - mean = float("-0.0144983") - std = float("0.0208838") + min_val = float("-0.0571962") + max_val = float("0.0503431") + mean = float("-0.0146694") + std = float("0.014805") data = None @@ -2411,10 +2411,10 @@ class Program_weight_tensor_parameter_219: name = "parameter_219" shape = [192, 192, 1, 1] dtype = "float32" - min_val = float("-0.031823") - max_val = float("0.0528765") - mean = float("-0.000451702") - std = float("0.00561687") + min_val = float("-0.0237103") + max_val = float("0.0309154") + mean = float("-0.000535383") + std = float("0.00421215") data = None @@ -2422,10 +2422,10 @@ class Program_weight_tensor_parameter_220: name = "parameter_220" shape = [192] dtype = "float32" - min_val = float("-0.141458") - max_val = float("-0.0079347") - mean = float("-0.0529687") - std = float("0.0249688") + min_val = float("-0.253549") + max_val = float("-0.0252667") + mean = float("-0.111396") + std = float("0.0515085") data = None @@ -2433,10 +2433,10 @@ class Program_weight_tensor_parameter_221: name = "parameter_221" shape = [192] dtype = "float32" - min_val = float("0.975086") - max_val = float("1.08421") - mean = float("1.01035") - std = float("0.0198136") + min_val = float("0.93953") + max_val = float("1.1319") + mean = float("1.00446") + std = float("0.0346886") data = None @@ -2444,10 +2444,10 @@ class Program_weight_tensor_parameter_222: name = "parameter_222" shape = [192] dtype = "float32" - min_val = float("0.0102") - max_val = float("0.0531645") - mean = float("0.0191559") - std = float("0.0068919") + min_val = float("0.00555641") + max_val = float("0.0273444") + mean = float("0.0100098") + std = float("0.00301259") data = None @@ -2455,10 +2455,10 @@ class Program_weight_tensor_parameter_223: name = "parameter_223" shape = [192] dtype = "float32" - min_val = float("-0.158448") - max_val = float("0.0748376") - mean = float("-0.0355524") - std = float("0.0377289") + min_val = float("-0.142862") + max_val = float("0.0842238") + mean = float("-0.0289718") + std = float("0.0284312") data = None @@ -2466,10 +2466,10 @@ class Program_weight_tensor_parameter_224: name = "parameter_224" shape = [192, 192, 3, 3] dtype = "float32" - min_val = float("-0.0272448") - max_val = float("0.0453462") - mean = float("-0.000119593") - std = float("0.00283721") + min_val = float("-0.0463499") + max_val = float("0.0648047") + mean = float("-0.000117258") + std = float("0.00236655") data = None @@ -2477,10 +2477,10 @@ class Program_weight_tensor_parameter_225: name = "parameter_225" shape = [192] dtype = "float32" - min_val = float("-0.176533") - max_val = float("-0.0140918") - mean = float("-0.0627926") - std = float("0.0255225") + min_val = float("-0.397852") + max_val = float("-0.024517") + mean = float("-0.135045") + std = float("0.0579008") data = None @@ -2488,10 +2488,10 @@ class Program_weight_tensor_parameter_226: name = "parameter_226" shape = [192] dtype = "float32" - min_val = float("0.959453") - max_val = float("1.17829") - mean = float("1.01679") - std = float("0.0291818") + min_val = float("0.935578") + max_val = float("1.28416") + mean = float("1.02712") + std = float("0.0581486") data = None @@ -2499,10 +2499,10 @@ class Program_weight_tensor_parameter_227: name = "parameter_227" shape = [192] dtype = "float32" - min_val = float("0.049348") - max_val = float("0.418024") - mean = float("0.115489") - std = float("0.0507256") + min_val = float("0.0152128") + max_val = float("0.0761776") + mean = float("0.0305415") + std = float("0.00974539") data = None @@ -2510,10 +2510,10 @@ class Program_weight_tensor_parameter_228: name = "parameter_228" shape = [192] dtype = "float32" - min_val = float("-0.807392") - max_val = float("0.359917") - mean = float("-0.202071") - std = float("0.159679") + min_val = float("-0.258209") + max_val = float("0.302634") + mean = float("-0.0505409") + std = float("0.057312") data = None @@ -2521,10 +2521,10 @@ class Program_weight_tensor_parameter_229: name = "parameter_229" shape = [192, 192, 3, 3] dtype = "float32" - min_val = float("-0.0318035") - max_val = float("0.0525248") - mean = float("-0.000289932") - std = float("0.00370175") + min_val = float("-0.0333938") + max_val = float("0.0459033") + mean = float("-0.000119163") + std = float("0.00292916") data = None @@ -2532,10 +2532,10 @@ class Program_weight_tensor_parameter_230: name = "parameter_230" shape = [192] dtype = "float32" - min_val = float("-0.101496") - max_val = float("0.00229944") - mean = float("-0.0432151") - std = float("0.017968") + min_val = float("-0.291413") + max_val = float("-0.0234132") + mean = float("-0.113952") + std = float("0.046762") data = None @@ -2543,10 +2543,10 @@ class Program_weight_tensor_parameter_231: name = "parameter_231" shape = [192] dtype = "float32" - min_val = float("0.948209") - max_val = float("1.06033") - mean = float("0.999231") - std = float("0.0183132") + min_val = float("0.906693") + max_val = float("1.13942") + mean = float("0.996984") + std = float("0.036522") data = None @@ -2554,10 +2554,10 @@ class Program_weight_tensor_parameter_232: name = "parameter_232" shape = [192] dtype = "float32" - min_val = float("0.00265606") - max_val = float("0.0159575") - mean = float("0.00530049") - std = float("0.0019803") + min_val = float("0.0014244") + max_val = float("0.00546528") + mean = float("0.00250251") + std = float("0.000770997") data = None @@ -2565,10 +2565,10 @@ class Program_weight_tensor_parameter_233: name = "parameter_233" shape = [192] dtype = "float32" - min_val = float("-0.0698203") - max_val = float("0.09676") - mean = float("-0.0136201") - std = float("0.0227753") + min_val = float("-0.0389427") + max_val = float("0.0867383") + mean = float("-0.00930065") + std = float("0.0135574") data = None @@ -2576,10 +2576,10 @@ class Program_weight_tensor_parameter_234: name = "parameter_234" shape = [192, 192, 1, 1] dtype = "float32" - min_val = float("-0.0267064") - max_val = float("0.0456144") - mean = float("-0.000432224") - std = float("0.0062217") + min_val = float("-0.0346133") + max_val = float("0.0524164") + mean = float("-0.000326125") + std = float("0.00484124") data = None @@ -2587,10 +2587,10 @@ class Program_weight_tensor_parameter_235: name = "parameter_235" shape = [192] dtype = "float32" - min_val = float("-0.101496") - max_val = float("0.00229944") - mean = float("-0.0432151") - std = float("0.017968") + min_val = float("-0.291413") + max_val = float("-0.0234132") + mean = float("-0.113952") + std = float("0.046762") data = None @@ -2598,10 +2598,10 @@ class Program_weight_tensor_parameter_236: name = "parameter_236" shape = [192] dtype = "float32" - min_val = float("0.965303") - max_val = float("1.08528") - mean = float("1.00108") - std = float("0.0207208") + min_val = float("0.909728") + max_val = float("1.14814") + mean = float("0.986983") + std = float("0.0373625") data = None @@ -2609,10 +2609,10 @@ class Program_weight_tensor_parameter_237: name = "parameter_237" shape = [192] dtype = "float32" - min_val = float("0.00866238") - max_val = float("0.0846476") - mean = float("0.0232237") - std = float("0.0107339") + min_val = float("0.00562666") + max_val = float("0.0219922") + mean = float("0.0109758") + std = float("0.00338237") data = None @@ -2620,10 +2620,10 @@ class Program_weight_tensor_parameter_238: name = "parameter_238" shape = [192] dtype = "float32" - min_val = float("-0.172128") - max_val = float("0.121261") - mean = float("-0.0385422") - std = float("0.048641") + min_val = float("-0.21375") + max_val = float("0.0270211") + mean = float("-0.0354808") + std = float("0.0287563") data = None @@ -2631,10 +2631,10 @@ class Program_weight_tensor_parameter_239: name = "parameter_239" shape = [192, 192, 3, 3] dtype = "float32" - min_val = float("-0.0216114") - max_val = float("0.0354893") - mean = float("-0.00013956") - std = float("0.00290758") + min_val = float("-0.024049") + max_val = float("0.0316937") + mean = float("-0.00014643") + std = float("0.00232213") data = None @@ -2642,10 +2642,10 @@ class Program_weight_tensor_parameter_240: name = "parameter_240" shape = [192] dtype = "float32" - min_val = float("-0.204286") - max_val = float("-0.0144314") - mean = float("-0.0931084") - std = float("0.0313584") + min_val = float("-0.370567") + max_val = float("-0.0105869") + mean = float("-0.161477") + std = float("0.0602506") data = None @@ -2653,10 +2653,10 @@ class Program_weight_tensor_parameter_241: name = "parameter_241" shape = [192] dtype = "float32" - min_val = float("0.924946") - max_val = float("1.13197") - mean = float("1.03248") - std = float("0.033681") + min_val = float("0.905056") + max_val = float("1.22126") + mean = float("1.03042") + std = float("0.0494268") data = None @@ -2664,10 +2664,10 @@ class Program_weight_tensor_parameter_242: name = "parameter_242" shape = [192] dtype = "float32" - min_val = float("0.0171774") - max_val = float("0.151451") - mean = float("0.0404337") - std = float("0.0194303") + min_val = float("0.00753308") + max_val = float("0.0302844") + mean = float("0.0141411") + std = float("0.00443453") data = None @@ -2675,10 +2675,10 @@ class Program_weight_tensor_parameter_243: name = "parameter_243" shape = [192] dtype = "float32" - min_val = float("-0.214493") - max_val = float("0.18665") - mean = float("-0.031976") - std = float("0.0594646") + min_val = float("-0.100325") + max_val = float("0.0739225") + mean = float("-0.0300805") + std = float("0.0307536") data = None @@ -2686,10 +2686,10 @@ class Program_weight_tensor_parameter_244: name = "parameter_244" shape = [192, 192, 3, 3] dtype = "float32" - min_val = float("-0.0401705") - max_val = float("0.0541615") - mean = float("-0.000152877") - std = float("0.00458084") + min_val = float("-0.0896984") + max_val = float("0.04215") + mean = float("-0.000139982") + std = float("0.00324088") data = None @@ -2697,10 +2697,10 @@ class Program_weight_tensor_parameter_245: name = "parameter_245" shape = [192] dtype = "float32" - min_val = float("-0.283235") - max_val = float("-0.0399838") - mean = float("-0.14066") - std = float("0.0427785") + min_val = float("-0.397771") + max_val = float("0.0863893") + mean = float("-0.165521") + std = float("0.0737187") data = None @@ -2708,10 +2708,10 @@ class Program_weight_tensor_parameter_246: name = "parameter_246" shape = [192] dtype = "float32" - min_val = float("0.926396") - max_val = float("1.23818") - mean = float("1.04777") - std = float("0.044691") + min_val = float("0.878799") + max_val = float("1.17577") + mean = float("1.01746") + std = float("0.0565899") data = None @@ -2719,10 +2719,10 @@ class Program_weight_tensor_parameter_247: name = "parameter_247" shape = [192] dtype = "float32" - min_val = float("0.00760291") - max_val = float("0.0292301") - mean = float("0.0135064") - std = float("0.00386928") + min_val = float("0.00278118") + max_val = float("0.0114293") + mean = float("0.00483493") + std = float("0.00146785") data = None @@ -2730,10 +2730,10 @@ class Program_weight_tensor_parameter_248: name = "parameter_248" shape = [192] dtype = "float32" - min_val = float("-0.082307") - max_val = float("0.240184") - mean = float("0.034674") - std = float("0.0405637") + min_val = float("-0.0560004") + max_val = float("0.0581845") + mean = float("0.0102661") + std = float("0.0202822") data = None @@ -2741,10 +2741,10 @@ class Program_weight_tensor_parameter_249: name = "parameter_249" shape = [192, 896, 1, 1] dtype = "float32" - min_val = float("-0.0633244") - max_val = float("0.0929278") - mean = float("-0.000269954") - std = float("0.00689158") + min_val = float("-0.101477") + max_val = float("0.142492") + mean = float("-0.000156828") + std = float("0.00446408") data = None @@ -2752,10 +2752,10 @@ class Program_weight_tensor_parameter_250: name = "parameter_250" shape = [192] dtype = "float32" - min_val = float("-0.259346") - max_val = float("0.089777") - mean = float("-0.101264") - std = float("0.0626651") + min_val = float("-0.151112") + max_val = float("0.501803") + mean = float("-0.00466305") + std = float("0.073986") data = None @@ -2763,10 +2763,10 @@ class Program_weight_tensor_parameter_251: name = "parameter_251" shape = [192] dtype = "float32" - min_val = float("0.942808") - max_val = float("1.43592") - mean = float("1.10635") - std = float("0.0692502") + min_val = float("0.932258") + max_val = float("1.23138") + mean = float("1.04643") + std = float("0.0627369") data = None @@ -2774,10 +2774,10 @@ class Program_weight_tensor_parameter_252: name = "parameter_252" shape = [192] dtype = "float32" - min_val = float("0.00933576") - max_val = float("0.0689556") - mean = float("0.0172674") - std = float("0.00566401") + min_val = float("0.00250556") + max_val = float("0.0461735") + mean = float("0.00561131") + std = float("0.00346797") data = None @@ -2785,10 +2785,10 @@ class Program_weight_tensor_parameter_253: name = "parameter_253" shape = [192] dtype = "float32" - min_val = float("-0.0892463") - max_val = float("0.100943") - mean = float("0.0114547") - std = float("0.0327796") + min_val = float("-0.0548003") + max_val = float("0.0495759") + mean = float("-0.000104753") + std = float("0.0205629") data = None @@ -2796,10 +2796,10 @@ class Program_weight_tensor_parameter_254: name = "parameter_254" shape = [192, 896, 1, 1] dtype = "float32" - min_val = float("-0.0570768") - max_val = float("0.156442") - mean = float("-0.000259374") - std = float("0.0074107") + min_val = float("-0.134457") + max_val = float("0.0882326") + mean = float("-0.000122861") + std = float("0.00449465") data = None @@ -2807,10 +2807,10 @@ class Program_weight_tensor_parameter_255: name = "parameter_255" shape = [384] dtype = "float32" - min_val = float("-0.294876") - max_val = float("-0.0604914") - mean = float("-0.156798") - std = float("0.0456771") + min_val = float("-0.31215") + max_val = float("-0.0444473") + mean = float("-0.171263") + std = float("0.0441121") data = None @@ -2818,10 +2818,10 @@ class Program_weight_tensor_parameter_256: name = "parameter_256" shape = [384] dtype = "float32" - min_val = float("0.724694") - max_val = float("1.03284") - mean = float("0.862991") - std = float("0.0407095") + min_val = float("0.786726") + max_val = float("1.17526") + mean = float("0.885032") + std = float("0.0343599") data = None @@ -2829,10 +2829,10 @@ class Program_weight_tensor_parameter_257: name = "parameter_257" shape = [384] dtype = "float32" - min_val = float("0.0113886") - max_val = float("0.0802941") - mean = float("0.0276917") - std = float("0.0102752") + min_val = float("0.00467646") + max_val = float("0.0521003") + mean = float("0.0100483") + std = float("0.00415136") data = None @@ -2840,10 +2840,10 @@ class Program_weight_tensor_parameter_258: name = "parameter_258" shape = [384] dtype = "float32" - min_val = float("-0.162031") - max_val = float("0.204487") - mean = float("-0.0755192") - std = float("0.0459278") + min_val = float("-0.0852832") + max_val = float("0.0645434") + mean = float("-0.0324354") + std = float("0.0213669") data = None @@ -2851,10 +2851,10 @@ class Program_weight_tensor_parameter_259: name = "parameter_259" shape = [384, 768, 1, 1] dtype = "float32" - min_val = float("-0.0292396") - max_val = float("0.0392698") - mean = float("-0.000553119") - std = float("0.00501583") + min_val = float("-0.0336805") + max_val = float("0.0404763") + mean = float("-0.000294351") + std = float("0.00351804") data = None @@ -2862,10 +2862,10 @@ class Program_weight_tensor_parameter_260: name = "parameter_260" shape = [768] dtype = "float32" - min_val = float("-0.119028") - max_val = float("0.0118783") - mean = float("-0.0644734") - std = float("0.0183049") + min_val = float("-0.164743") + max_val = float("0.115333") + mean = float("-0.0875639") + std = float("0.0238268") data = None @@ -2873,10 +2873,10 @@ class Program_weight_tensor_parameter_261: name = "parameter_261" shape = [768] dtype = "float32" - min_val = float("0.943984") - max_val = float("1.15965") - mean = float("1.02714") - std = float("0.0265502") + min_val = float("0.935282") + max_val = float("1.27924") + mean = float("1.02951") + std = float("0.0299064") data = None @@ -2884,10 +2884,10 @@ class Program_weight_tensor_parameter_262: name = "parameter_262" shape = [768] dtype = "float32" - min_val = float("0.00981212") - max_val = float("0.0919141") - mean = float("0.019193") - std = float("0.00623331") + min_val = float("0.00328574") + max_val = float("0.0307873") + mean = float("0.00666914") + std = float("0.00225792") data = None @@ -2895,10 +2895,10 @@ class Program_weight_tensor_parameter_263: name = "parameter_263" shape = [768] dtype = "float32" - min_val = float("-0.163952") - max_val = float("0.165282") - mean = float("-0.0496244") - std = float("0.0363468") + min_val = float("-0.13377") + max_val = float("0.10924") + mean = float("-0.0334799") + std = float("0.0256795") data = None @@ -2906,10 +2906,10 @@ class Program_weight_tensor_parameter_264: name = "parameter_264" shape = [768, 768, 1, 1] dtype = "float32" - min_val = float("-0.0555945") - max_val = float("0.0656988") - mean = float("-0.000287705") - std = float("0.00431166") + min_val = float("-0.0417249") + max_val = float("0.0674709") + mean = float("-0.000227656") + std = float("0.00301788") data = None @@ -2917,10 +2917,10 @@ class Program_weight_tensor_parameter_265: name = "parameter_265" shape = [384] dtype = "float32" - min_val = float("-0.185724") - max_val = float("0.101576") - mean = float("-0.0493899") - std = float("0.0232288") + min_val = float("-0.175755") + max_val = float("0.114994") + mean = float("-0.059758") + std = float("0.0316004") data = None @@ -2928,10 +2928,10 @@ class Program_weight_tensor_parameter_266: name = "parameter_266" shape = [384] dtype = "float32" - min_val = float("0.895849") - max_val = float("1.01586") - mean = float("0.977906") - std = float("0.0132131") + min_val = float("0.875498") + max_val = float("1.05486") + mean = float("0.97544") + std = float("0.0178167") data = None @@ -2939,10 +2939,10 @@ class Program_weight_tensor_parameter_267: name = "parameter_267" shape = [384] dtype = "float32" - min_val = float("0.00471585") - max_val = float("0.0485148") - mean = float("0.0128933") - std = float("0.00549912") + min_val = float("0.00187857") + max_val = float("0.0214535") + mean = float("0.0049821") + std = float("0.00205769") data = None @@ -2950,10 +2950,10 @@ class Program_weight_tensor_parameter_268: name = "parameter_268" shape = [384] dtype = "float32" - min_val = float("-0.0800717") - max_val = float("0.0775817") - mean = float("-0.0161284") - std = float("0.027053") + min_val = float("-0.048337") + max_val = float("0.0454388") + mean = float("-0.00746495") + std = float("0.0193299") data = None @@ -2961,10 +2961,10 @@ class Program_weight_tensor_parameter_269: name = "parameter_269" shape = [384, 384, 1, 1] dtype = "float32" - min_val = float("-0.0257666") - max_val = float("0.0430036") - mean = float("-0.000213548") - std = float("0.0035453") + min_val = float("-0.0520415") + max_val = float("0.0516332") + mean = float("-0.000102198") + std = float("0.00275407") data = None @@ -2972,10 +2972,10 @@ class Program_weight_tensor_parameter_270: name = "parameter_270" shape = [384] dtype = "float32" - min_val = float("-0.185724") - max_val = float("0.101576") - mean = float("-0.0493899") - std = float("0.0232288") + min_val = float("-0.175755") + max_val = float("0.114994") + mean = float("-0.059758") + std = float("0.0316004") data = None @@ -2983,10 +2983,10 @@ class Program_weight_tensor_parameter_271: name = "parameter_271" shape = [384] dtype = "float32" - min_val = float("0.894262") - max_val = float("1.09303") - mean = float("0.980357") - std = float("0.0138837") + min_val = float("0.939765") + max_val = float("1.08488") + mean = float("0.993679") + std = float("0.0187977") data = None @@ -2994,10 +2994,10 @@ class Program_weight_tensor_parameter_272: name = "parameter_272" shape = [384] dtype = "float32" - min_val = float("0.0282483") - max_val = float("0.349296") - mean = float("0.0853497") - std = float("0.0378846") + min_val = float("0.0136676") + max_val = float("0.24702") + mean = float("0.0361577") + std = float("0.0178113") data = None @@ -3005,10 +3005,10 @@ class Program_weight_tensor_parameter_273: name = "parameter_273" shape = [384] dtype = "float32" - min_val = float("-0.301014") - max_val = float("0.153614") - mean = float("-0.0944117") - std = float("0.0767887") + min_val = float("-0.193526") + max_val = float("0.128898") + mean = float("-0.0635078") + std = float("0.0592118") data = None @@ -3016,10 +3016,10 @@ class Program_weight_tensor_parameter_274: name = "parameter_274" shape = [384, 384, 3, 3] dtype = "float32" - min_val = float("-0.0401543") - max_val = float("0.0468426") - mean = float("-0.000144951") - std = float("0.00128804") + min_val = float("-0.0262406") + max_val = float("0.0342993") + mean = float("-0.000104765") + std = float("0.00102972") data = None @@ -3027,10 +3027,10 @@ class Program_weight_tensor_parameter_275: name = "parameter_275" shape = [384] dtype = "float32" - min_val = float("-0.0960581") - max_val = float("0.159253") - mean = float("-0.016533") - std = float("0.0220565") + min_val = float("-0.151307") + max_val = float("0.102932") + mean = float("-0.036051") + std = float("0.0243773") data = None @@ -3038,10 +3038,10 @@ class Program_weight_tensor_parameter_276: name = "parameter_276" shape = [384] dtype = "float32" - min_val = float("0.948727") - max_val = float("1.24141") - mean = float("1.01995") - std = float("0.03235") + min_val = float("0.948438") + max_val = float("1.23653") + mean = float("1.02223") + std = float("0.0374692") data = None @@ -3049,10 +3049,10 @@ class Program_weight_tensor_parameter_277: name = "parameter_277" shape = [384] dtype = "float32" - min_val = float("0.0143644") - max_val = float("0.251509") - mean = float("0.0688272") - std = float("0.0333456") + min_val = float("0.00885489") + max_val = float("0.121229") + mean = float("0.0291848") + std = float("0.0116348") data = None @@ -3060,10 +3060,10 @@ class Program_weight_tensor_parameter_278: name = "parameter_278" shape = [384] dtype = "float32" - min_val = float("-0.226292") - max_val = float("0.268143") - mean = float("-0.0368348") - std = float("0.087958") + min_val = float("-0.180611") + max_val = float("0.112007") + mean = float("-0.0377734") + std = float("0.047009") data = None @@ -3071,10 +3071,10 @@ class Program_weight_tensor_parameter_279: name = "parameter_279" shape = [384, 384, 3, 3] dtype = "float32" - min_val = float("-0.0312562") - max_val = float("0.0430391") - mean = float("-5.96221e-05") - std = float("0.00162276") + min_val = float("-0.0233099") + max_val = float("0.0327733") + mean = float("-6.46831e-05") + std = float("0.00135178") data = None @@ -3082,10 +3082,10 @@ class Program_weight_tensor_parameter_280: name = "parameter_280" shape = [384] dtype = "float32" - min_val = float("-0.1174") - max_val = float("0.0562049") - mean = float("-0.0226758") - std = float("0.0171998") + min_val = float("-0.12739") + max_val = float("0.0293372") + mean = float("-0.0396596") + std = float("0.0204788") data = None @@ -3093,10 +3093,10 @@ class Program_weight_tensor_parameter_281: name = "parameter_281" shape = [384] dtype = "float32" - min_val = float("0.959883") - max_val = float("1.14924") - mean = float("1.02097") - std = float("0.0314417") + min_val = float("0.935426") + max_val = float("1.22654") + mean = float("1.02217") + std = float("0.0409336") data = None @@ -3104,10 +3104,10 @@ class Program_weight_tensor_parameter_282: name = "parameter_282" shape = [384] dtype = "float32" - min_val = float("0.0684558") - max_val = float("0.632888") - mean = float("0.21699") - std = float("0.0818797") + min_val = float("0.026841") + max_val = float("0.276428") + mean = float("0.0890046") + std = float("0.0287197") data = None @@ -3115,10 +3115,10 @@ class Program_weight_tensor_parameter_283: name = "parameter_283" shape = [384] dtype = "float32" - min_val = float("-2.54336") - max_val = float("2.10932") - mean = float("-0.081684") - std = float("0.72804") + min_val = float("-0.988092") + max_val = float("1.24067") + mean = float("-0.0106803") + std = float("0.356169") data = None @@ -3126,10 +3126,10 @@ class Program_weight_tensor_parameter_284: name = "parameter_284" shape = [384, 1536, 1, 1] dtype = "float32" - min_val = float("-0.0302692") - max_val = float("0.0501981") - mean = float("3.98672e-05") - std = float("0.00292002") + min_val = float("-0.0437694") + max_val = float("0.0478719") + mean = float("3.92494e-05") + std = float("0.00226745") data = None @@ -3137,10 +3137,10 @@ class Program_weight_tensor_parameter_285: name = "parameter_285" shape = [384] dtype = "float32" - min_val = float("-0.0227048") - max_val = float("0.0393072") - mean = float("-0.00516438") - std = float("0.00791319") + min_val = float("-0.0351363") + max_val = float("0.0448389") + mean = float("-0.00202565") + std = float("0.0112301") data = None @@ -3148,10 +3148,10 @@ class Program_weight_tensor_parameter_286: name = "parameter_286" shape = [384] dtype = "float32" - min_val = float("0.954966") - max_val = float("1.13128") - mean = float("0.992945") - std = float("0.0198847") + min_val = float("0.955166") + max_val = float("1.07949") + mean = float("0.99032") + std = float("0.0160701") data = None @@ -3159,10 +3159,10 @@ class Program_weight_tensor_parameter_287: name = "parameter_287" shape = [384] dtype = "float32" - min_val = float("0.00285281") - max_val = float("0.0151266") - mean = float("0.00653893") - std = float("0.00215647") + min_val = float("0.00173498") + max_val = float("0.00643269") + mean = float("0.00313556") + std = float("0.000799758") data = None @@ -3170,10 +3170,10 @@ class Program_weight_tensor_parameter_288: name = "parameter_288" shape = [384] dtype = "float32" - min_val = float("-0.103457") - max_val = float("0.0644516") - mean = float("-0.0399251") - std = float("0.0284929") + min_val = float("-0.0717004") + max_val = float("0.0408329") + mean = float("-0.0299012") + std = float("0.0176195") data = None @@ -3181,10 +3181,10 @@ class Program_weight_tensor_parameter_289: name = "parameter_289" shape = [384, 384, 1, 1] dtype = "float32" - min_val = float("-0.0241085") - max_val = float("0.0314178") - mean = float("-0.000494811") - std = float("0.00322186") + min_val = float("-0.0228554") + max_val = float("0.0328209") + mean = float("-0.000375757") + std = float("0.00255745") data = None @@ -3192,10 +3192,10 @@ class Program_weight_tensor_parameter_290: name = "parameter_290" shape = [384] dtype = "float32" - min_val = float("-0.0227048") - max_val = float("0.0393072") - mean = float("-0.00516438") - std = float("0.00791319") + min_val = float("-0.0351365") + max_val = float("0.0448389") + mean = float("-0.00202565") + std = float("0.0112301") data = None @@ -3203,10 +3203,10 @@ class Program_weight_tensor_parameter_291: name = "parameter_291" shape = [384] dtype = "float32" - min_val = float("0.942703") - max_val = float("1.15329") - mean = float("1.00343") - std = float("0.0304117") + min_val = float("0.957996") + max_val = float("1.12454") + mean = float("1.00467") + std = float("0.0255587") data = None @@ -3214,10 +3214,10 @@ class Program_weight_tensor_parameter_292: name = "parameter_292" shape = [384] dtype = "float32" - min_val = float("0.0131764") - max_val = float("0.0914844") - mean = float("0.038932") - std = float("0.0152216") + min_val = float("0.0083014") + max_val = float("0.0352855") + mean = float("0.0175327") + std = float("0.00454233") data = None @@ -3225,10 +3225,10 @@ class Program_weight_tensor_parameter_293: name = "parameter_293" shape = [384] dtype = "float32" - min_val = float("-0.27583") - max_val = float("0.129426") - mean = float("-0.115384") - std = float("0.0654124") + min_val = float("-0.247265") + max_val = float("0.101324") + mean = float("-0.0772083") + std = float("0.0418546") data = None @@ -3236,10 +3236,10 @@ class Program_weight_tensor_parameter_294: name = "parameter_294" shape = [384, 384, 3, 3] dtype = "float32" - min_val = float("-0.00991428") - max_val = float("0.0235268") - mean = float("-0.000178382") - std = float("0.00124102") + min_val = float("-0.0259612") + max_val = float("0.0450877") + mean = float("-0.000119198") + std = float("0.00109556") data = None @@ -3247,10 +3247,10 @@ class Program_weight_tensor_parameter_295: name = "parameter_295" shape = [384] dtype = "float32" - min_val = float("-0.0484464") - max_val = float("0.0216319") - mean = float("-0.00460097") - std = float("0.00967076") + min_val = float("-0.0757949") + max_val = float("0.0143529") + mean = float("-0.0184104") + std = float("0.0130086") data = None @@ -3258,10 +3258,10 @@ class Program_weight_tensor_parameter_296: name = "parameter_296" shape = [384] dtype = "float32" - min_val = float("0.963084") - max_val = float("1.21833") - mean = float("1.01835") - std = float("0.0242792") + min_val = float("0.948442") + max_val = float("1.1913") + mean = float("1.02005") + std = float("0.0307435") data = None @@ -3269,10 +3269,10 @@ class Program_weight_tensor_parameter_297: name = "parameter_297" shape = [384] dtype = "float32" - min_val = float("0.0846965") - max_val = float("0.477757") - mean = float("0.218773") - std = float("0.0720263") + min_val = float("0.0341549") + max_val = float("0.134905") + mean = float("0.0680685") + std = float("0.0155983") data = None @@ -3280,10 +3280,10 @@ class Program_weight_tensor_parameter_298: name = "parameter_298" shape = [384] dtype = "float32" - min_val = float("-1.09295") - max_val = float("1.28986") - mean = float("-0.341772") - std = float("0.305414") + min_val = float("-0.699112") + max_val = float("0.534627") + mean = float("-0.130478") + std = float("0.14264") data = None @@ -3291,10 +3291,10 @@ class Program_weight_tensor_parameter_299: name = "parameter_299" shape = [384, 384, 3, 3] dtype = "float32" - min_val = float("-0.0119066") - max_val = float("0.018937") - mean = float("-0.000190711") - std = float("0.00143766") + min_val = float("-0.0199475") + max_val = float("0.0262388") + mean = float("-8.6257e-05") + std = float("0.00132087") data = None @@ -3302,10 +3302,10 @@ class Program_weight_tensor_parameter_300: name = "parameter_300" shape = [384] dtype = "float32" - min_val = float("-0.0328709") - max_val = float("0.0296847") - mean = float("0.00167559") - std = float("0.0101838") + min_val = float("-0.0646182") + max_val = float("0.0263686") + mean = float("-0.0175816") + std = float("0.0121433") data = None @@ -3313,10 +3313,10 @@ class Program_weight_tensor_parameter_301: name = "parameter_301" shape = [384] dtype = "float32" - min_val = float("0.982379") - max_val = float("1.06457") - mean = float("1.00463") - std = float("0.00875427") + min_val = float("0.976895") + max_val = float("1.05226") + mean = float("0.998698") + std = float("0.0104278") data = None @@ -3324,10 +3324,10 @@ class Program_weight_tensor_parameter_302: name = "parameter_302" shape = [384] dtype = "float32" - min_val = float("0.00182393") - max_val = float("0.0117111") - mean = float("0.00446161") - std = float("0.00149495") + min_val = float("0.000942471") + max_val = float("0.00502155") + mean = float("0.00201621") + std = float("0.000551618") data = None @@ -3335,10 +3335,10 @@ class Program_weight_tensor_parameter_303: name = "parameter_303" shape = [384] dtype = "float32" - min_val = float("-0.0690861") - max_val = float("0.16253") - mean = float("-0.0277446") - std = float("0.0287758") + min_val = float("-0.0479207") + max_val = float("0.0928967") + mean = float("-0.0128137") + std = float("0.0168058") data = None @@ -3346,10 +3346,10 @@ class Program_weight_tensor_parameter_304: name = "parameter_304" shape = [384, 384, 1, 1] dtype = "float32" - min_val = float("-0.0149167") - max_val = float("0.024544") - mean = float("-0.00035583") - std = float("0.00252494") + min_val = float("-0.019998") + max_val = float("0.0336093") + mean = float("-0.0001808") + std = float("0.00225336") data = None @@ -3357,10 +3357,10 @@ class Program_weight_tensor_parameter_305: name = "parameter_305" shape = [384] dtype = "float32" - min_val = float("-0.0328709") - max_val = float("0.0296847") - mean = float("0.00167559") - std = float("0.0101838") + min_val = float("-0.0646181") + max_val = float("0.0263686") + mean = float("-0.0175816") + std = float("0.0121433") data = None @@ -3368,10 +3368,10 @@ class Program_weight_tensor_parameter_306: name = "parameter_306" shape = [384] dtype = "float32" - min_val = float("0.972532") - max_val = float("1.06753") - mean = float("1.00429") - std = float("0.0130159") + min_val = float("0.976548") + max_val = float("1.1009") + mean = float("1.00714") + std = float("0.0202515") data = None @@ -3379,10 +3379,10 @@ class Program_weight_tensor_parameter_307: name = "parameter_307" shape = [384] dtype = "float32" - min_val = float("0.00916589") - max_val = float("0.0757382") - mean = float("0.026734") - std = float("0.00932033") + min_val = float("0.00525835") + max_val = float("0.0300494") + mean = float("0.0114248") + std = float("0.0035076") data = None @@ -3390,10 +3390,10 @@ class Program_weight_tensor_parameter_308: name = "parameter_308" shape = [384] dtype = "float32" - min_val = float("-0.212158") - max_val = float("0.398515") - mean = float("-0.0802837") - std = float("0.079264") + min_val = float("-0.129246") + max_val = float("0.173103") + mean = float("-0.0523329") + std = float("0.0390795") data = None @@ -3401,10 +3401,10 @@ class Program_weight_tensor_parameter_309: name = "parameter_309" shape = [384, 384, 3, 3] dtype = "float32" - min_val = float("-0.0093195") - max_val = float("0.0136616") - mean = float("-0.000127923") - std = float("0.000958249") + min_val = float("-0.0126416") + max_val = float("0.0247245") + mean = float("-9.22132e-05") + std = float("0.000953698") data = None @@ -3412,10 +3412,10 @@ class Program_weight_tensor_parameter_310: name = "parameter_310" shape = [384] dtype = "float32" - min_val = float("-0.0472265") - max_val = float("0.0147404") - mean = float("-0.0123072") - std = float("0.0107235") + min_val = float("-0.0846385") + max_val = float("-0.0012331") + mean = float("-0.0378149") + std = float("0.0147169") data = None @@ -3423,10 +3423,10 @@ class Program_weight_tensor_parameter_311: name = "parameter_311" shape = [384] dtype = "float32" - min_val = float("0.97566") - max_val = float("1.10735") - mean = float("1.01405") - std = float("0.0167737") + min_val = float("0.961605") + max_val = float("1.1164") + mean = float("1.01863") + std = float("0.0258138") data = None @@ -3434,10 +3434,10 @@ class Program_weight_tensor_parameter_312: name = "parameter_312" shape = [384] dtype = "float32" - min_val = float("0.0120215") - max_val = float("0.0818828") - mean = float("0.0302745") - std = float("0.00902368") + min_val = float("0.00543412") + max_val = float("0.034") + mean = float("0.0114679") + std = float("0.00281078") data = None @@ -3445,10 +3445,10 @@ class Program_weight_tensor_parameter_313: name = "parameter_313" shape = [384] dtype = "float32" - min_val = float("-0.144457") - max_val = float("0.215418") - mean = float("-0.0320907") - std = float("0.0550216") + min_val = float("-0.105395") + max_val = float("0.0875829") + mean = float("-0.0276342") + std = float("0.0283604") data = None @@ -3456,10 +3456,10 @@ class Program_weight_tensor_parameter_314: name = "parameter_314" shape = [384, 384, 3, 3] dtype = "float32" - min_val = float("-0.00932646") - max_val = float("0.0180928") - mean = float("-5.9912e-05") - std = float("0.00138574") + min_val = float("-0.0131868") + max_val = float("0.0207652") + mean = float("-5.34705e-05") + std = float("0.00131228") data = None @@ -3467,10 +3467,10 @@ class Program_weight_tensor_parameter_315: name = "parameter_315" shape = [384] dtype = "float32" - min_val = float("-0.0636266") - max_val = float("0.0504044") - mean = float("-0.0321583") - std = float("0.01479") + min_val = float("-0.107271") + max_val = float("0.0233646") + mean = float("-0.0562337") + std = float("0.0197346") data = None @@ -3478,10 +3478,10 @@ class Program_weight_tensor_parameter_316: name = "parameter_316" shape = [384] dtype = "float32" - min_val = float("0.963145") - max_val = float("1.05901") - mean = float("1.01824") - std = float("0.0133966") + min_val = float("0.981244") + max_val = float("1.07279") + mean = float("1.02152") + std = float("0.0138952") data = None @@ -3489,10 +3489,10 @@ class Program_weight_tensor_parameter_317: name = "parameter_317" shape = [384] dtype = "float32" - min_val = float("0.017764") - max_val = float("0.0645009") - mean = float("0.0305608") - std = float("0.00719559") + min_val = float("0.00227472") + max_val = float("0.00995654") + mean = float("0.00327401") + std = float("0.000751464") data = None @@ -3500,10 +3500,10 @@ class Program_weight_tensor_parameter_318: name = "parameter_318" shape = [384] dtype = "float32" - min_val = float("-0.20154") - max_val = float("0.391038") - mean = float("-0.0613575") - std = float("0.0615632") + min_val = float("-0.0410182") + max_val = float("0.0248085") + mean = float("0.0060818") + std = float("0.00760998") data = None @@ -3511,10 +3511,10 @@ class Program_weight_tensor_parameter_319: name = "parameter_319" shape = [384, 1024, 1, 1] dtype = "float32" - min_val = float("-0.0154521") - max_val = float("0.0405892") - mean = float("-0.000219009") - std = float("0.00320931") + min_val = float("-0.0184221") + max_val = float("0.0482036") + mean = float("-0.000162631") + std = float("0.00257425") data = None @@ -3522,10 +3522,10 @@ class Program_weight_tensor_parameter_320: name = "parameter_320" shape = [384] dtype = "float32" - min_val = float("-0.0181982") - max_val = float("0.0407238") - mean = float("0.0138314") - std = float("0.0107853") + min_val = float("-0.058929") + max_val = float("0.0342356") + mean = float("-0.00850635") + std = float("0.0113521") data = None @@ -3533,10 +3533,10 @@ class Program_weight_tensor_parameter_321: name = "parameter_321" shape = [384] dtype = "float32" - min_val = float("1.02428") - max_val = float("1.13089") - mean = float("1.07731") - std = float("0.018385") + min_val = float("1.00879") + max_val = float("1.21413") + mean = float("1.05118") + std = float("0.0209519") data = None @@ -3544,10 +3544,10 @@ class Program_weight_tensor_parameter_322: name = "parameter_322" shape = [384] dtype = "float32" - min_val = float("0.0297022") - max_val = float("0.0861284") - mean = float("0.0459045") - std = float("0.0083673") + min_val = float("0.00216519") + max_val = float("0.00959873") + mean = float("0.00317997") + std = float("0.000757622") data = None @@ -3555,10 +3555,10 @@ class Program_weight_tensor_parameter_323: name = "parameter_323" shape = [384] dtype = "float32" - min_val = float("-0.214848") - max_val = float("0.119899") - mean = float("-0.107077") - std = float("0.0483314") + min_val = float("-0.0134693") + max_val = float("0.019212") + mean = float("0.00436546") + std = float("0.00543387") data = None @@ -3566,10 +3566,10 @@ class Program_weight_tensor_parameter_324: name = "parameter_324" shape = [384, 1024, 1, 1] dtype = "float32" - min_val = float("-0.0202669") - max_val = float("0.0292336") - mean = float("-0.000379697") - std = float("0.00443318") + min_val = float("-0.0415669") + max_val = float("0.0455758") + mean = float("-0.000161607") + std = float("0.00274744") data = None @@ -3577,10 +3577,10 @@ class Program_weight_tensor_parameter_325: name = "parameter_325" shape = [1024] dtype = "float32" - min_val = float("-3.76738") - max_val = float("-0.74061") - mean = float("-2.19149") - std = float("0.429665") + min_val = float("-3.76504") + max_val = float("-0.731072") + mean = float("-2.1942") + std = float("0.428366") data = None @@ -3588,10 +3588,10 @@ class Program_weight_tensor_parameter_326: name = "parameter_326" shape = [1024] dtype = "float32" - min_val = float("1.61677") - max_val = float("4.4298") - mean = float("3.07687") - std = float("0.251634") + min_val = float("1.62418") + max_val = float("4.43435") + mean = float("3.07417") + std = float("0.255202") data = None @@ -3599,10 +3599,10 @@ class Program_weight_tensor_parameter_327: name = "parameter_327" shape = [1024] dtype = "float32" - min_val = float("0.00318616") - max_val = float("0.0156112") - mean = float("0.00619051") - std = float("0.00132122") + min_val = float("0.00545502") + max_val = float("0.0375841") + mean = float("0.0112259") + std = float("0.00369148") data = None @@ -3610,10 +3610,10 @@ class Program_weight_tensor_parameter_328: name = "parameter_328" shape = [1024] dtype = "float32" - min_val = float("-0.151272") - max_val = float("0.141612") - mean = float("-0.0486746") - std = float("0.0293873") + min_val = float("-0.116582") + max_val = float("0.0584592") + mean = float("-0.0421243") + std = float("0.0189653") data = None @@ -3621,10 +3621,10 @@ class Program_weight_tensor_parameter_329: name = "parameter_329" shape = [1024, 768, 1, 1] dtype = "float32" - min_val = float("-0.0373496") - max_val = float("0.0729797") - mean = float("-0.000461198") - std = float("0.00502166") + min_val = float("-0.0682658") + max_val = float("0.08076") + mean = float("-0.00031367") + std = float("0.00305502") data = None @@ -3632,10 +3632,10 @@ class Program_weight_tensor_parameter_330: name = "parameter_330" shape = [768] dtype = "float32" - min_val = float("-0.0138178") - max_val = float("0.000466847") - mean = float("-0.00284092") - std = float("0.00262746") + min_val = float("-0.0121546") + max_val = float("0.00561678") + mean = float("-0.000568828") + std = float("0.00173927") data = None @@ -3643,10 +3643,10 @@ class Program_weight_tensor_parameter_331: name = "parameter_331" shape = [768, 768, 1, 1] dtype = "float32" - min_val = float("-0.119554") - max_val = float("0.123314") - mean = float("-0.00126139") - std = float("0.00352046") + min_val = float("-0.104186") + max_val = float("0.108443") + mean = float("-0.000213298") + std = float("0.00130075") data = None @@ -3654,10 +3654,10 @@ class Program_weight_tensor_parameter_332: name = "parameter_332" shape = [384] dtype = "float32" - min_val = float("-1.77338") - max_val = float("0.412596") - mean = float("-0.274223") - std = float("0.294015") + min_val = float("-1.77817") + max_val = float("0.498255") + mean = float("-0.30001") + std = float("0.296857") data = None @@ -3665,10 +3665,10 @@ class Program_weight_tensor_parameter_333: name = "parameter_333" shape = [384] dtype = "float32" - min_val = float("0.235525") - max_val = float("2.08023") - mean = float("0.662181") - std = float("0.284497") + min_val = float("0.18984") + max_val = float("1.98158") + mean = float("0.620203") + std = float("0.2786") data = None @@ -3676,10 +3676,10 @@ class Program_weight_tensor_parameter_334: name = "parameter_334" shape = [384] dtype = "float32" - min_val = float("0.000109661") - max_val = float("0.00172742") - mean = float("0.000430241") - std = float("0.000193304") + min_val = float("3.45478e-05") + max_val = float("0.000862679") + mean = float("0.000152348") + std = float("8.47812e-05") data = None @@ -3687,10 +3687,10 @@ class Program_weight_tensor_parameter_335: name = "parameter_335" shape = [384] dtype = "float32" - min_val = float("-0.130554") - max_val = float("0.0981204") - mean = float("0.0202639") - std = float("0.0336592") + min_val = float("-0.0224284") + max_val = float("0.0569889") + mean = float("0.0173439") + std = float("0.0133472") data = None @@ -3698,10 +3698,10 @@ class Program_weight_tensor_parameter_336: name = "parameter_336" shape = [384, 384, 1, 1] dtype = "float32" - min_val = float("-0.0274341") - max_val = float("0.0353346") - mean = float("-0.000364024") - std = float("0.0038463") + min_val = float("-0.0196332") + max_val = float("0.0252926") + mean = float("-0.000291589") + std = float("0.00211517") data = None @@ -3709,10 +3709,10 @@ class Program_weight_tensor_parameter_337: name = "parameter_337" shape = [384] dtype = "float32" - min_val = float("-1.77338") - max_val = float("0.412596") - mean = float("-0.274223") - std = float("0.294015") + min_val = float("-1.77817") + max_val = float("0.498255") + mean = float("-0.30001") + std = float("0.296857") data = None @@ -3720,10 +3720,10 @@ class Program_weight_tensor_parameter_338: name = "parameter_338" shape = [384] dtype = "float32" - min_val = float("0.350868") - max_val = float("2.87683") - mean = float("1.11168") - std = float("0.332422") + min_val = float("0.36588") + max_val = float("2.77771") + mean = float("1.04742") + std = float("0.308644") data = None @@ -3731,10 +3731,10 @@ class Program_weight_tensor_parameter_339: name = "parameter_339" shape = [384] dtype = "float32" - min_val = float("0.000733881") - max_val = float("0.0108386") - mean = float("0.002852") - std = float("0.0011359") + min_val = float("0.000357258") + max_val = float("0.00471916") + mean = float("0.00106354") + std = float("0.000429049") data = None @@ -3742,10 +3742,10 @@ class Program_weight_tensor_parameter_340: name = "parameter_340" shape = [384] dtype = "float32" - min_val = float("-0.119853") - max_val = float("0.177988") - mean = float("0.00969028") - std = float("0.0360493") + min_val = float("-0.199773") + max_val = float("0.0734005") + mean = float("0.0127227") + std = float("0.0242137") data = None @@ -3753,10 +3753,10 @@ class Program_weight_tensor_parameter_341: name = "parameter_341" shape = [384, 384, 3, 3] dtype = "float32" - min_val = float("-0.0215559") - max_val = float("0.0430067") - mean = float("-3.73056e-05") - std = float("0.00229731") + min_val = float("-0.0170937") + max_val = float("0.0209283") + mean = float("-2.76977e-05") + std = float("0.0013628") data = None @@ -3764,10 +3764,10 @@ class Program_weight_tensor_parameter_342: name = "parameter_342" shape = [384] dtype = "float32" - min_val = float("-2.6275") - max_val = float("0.015896") - mean = float("-1.59098") - std = float("0.414959") + min_val = float("-2.61203") + max_val = float("0.0551867") + mean = float("-1.58323") + std = float("0.416329") data = None @@ -3775,10 +3775,10 @@ class Program_weight_tensor_parameter_343: name = "parameter_343" shape = [384] dtype = "float32" - min_val = float("0.47721") - max_val = float("1.87134") - mean = float("1.1266") - std = float("0.14685") + min_val = float("0.567765") + max_val = float("1.67644") + mean = float("1.1241") + std = float("0.146785") data = None @@ -3786,10 +3786,10 @@ class Program_weight_tensor_parameter_344: name = "parameter_344" shape = [384] dtype = "float32" - min_val = float("0.120369") - max_val = float("0.515457") - mean = float("0.241946") - std = float("0.0512298") + min_val = float("0.0222405") + max_val = float("0.126805") + mean = float("0.0503725") + std = float("0.0157052") data = None @@ -3797,10 +3797,10 @@ class Program_weight_tensor_parameter_345: name = "parameter_345" shape = [384] dtype = "float32" - min_val = float("-1.59625") - max_val = float("1.32189") - mean = float("-0.654999") - std = float("0.281837") + min_val = float("-0.656718") + max_val = float("0.389056") + mean = float("-0.183173") + std = float("0.0983499") data = None @@ -3808,10 +3808,10 @@ class Program_weight_tensor_parameter_346: name = "parameter_346" shape = [384, 384, 3, 3] dtype = "float32" - min_val = float("-0.0216367") - max_val = float("0.0531612") - mean = float("-0.000286661") - std = float("0.00308802") + min_val = float("-0.0169074") + max_val = float("0.0427054") + mean = float("-0.000129063") + std = float("0.00175192") data = None @@ -3819,10 +3819,10 @@ class Program_weight_tensor_parameter_347: name = "parameter_347" shape = [384] dtype = "float32" - min_val = float("-1.93672") - max_val = float("1.08927") - mean = float("-0.561118") - std = float("0.377358") + min_val = float("-1.93759") + max_val = float("0.733668") + mean = float("-0.570541") + std = float("0.365887") data = None @@ -3830,10 +3830,10 @@ class Program_weight_tensor_parameter_348: name = "parameter_348" shape = [384] dtype = "float32" - min_val = float("0.163384") - max_val = float("2.03243") - mean = float("0.568654") - std = float("0.23058") + min_val = float("0.140501") + max_val = float("2.06358") + mean = float("0.563187") + std = float("0.226696") data = None @@ -3841,10 +3841,10 @@ class Program_weight_tensor_parameter_349: name = "parameter_349" shape = [384] dtype = "float32" - min_val = float("0.000211366") - max_val = float("0.00209824") - mean = float("0.000563789") - std = float("0.000253525") + min_val = float("4.15388e-05") + max_val = float("0.000906682") + mean = float("0.00019597") + std = float("9.97336e-05") data = None @@ -3852,10 +3852,10 @@ class Program_weight_tensor_parameter_350: name = "parameter_350" shape = [384] dtype = "float32" - min_val = float("-0.0508295") - max_val = float("0.116156") - mean = float("0.0281283") - std = float("0.0230871") + min_val = float("-0.0259493") + max_val = float("0.0583295") + mean = float("0.0172821") + std = float("0.0111211") data = None @@ -3863,10 +3863,10 @@ class Program_weight_tensor_parameter_351: name = "parameter_351" shape = [384, 384, 1, 1] dtype = "float32" - min_val = float("-0.0278888") - max_val = float("0.0403076") - mean = float("-0.000537846") - std = float("0.00370404") + min_val = float("-0.0185543") + max_val = float("0.0208855") + mean = float("-0.000319504") + std = float("0.00201805") data = None @@ -3874,10 +3874,10 @@ class Program_weight_tensor_parameter_352: name = "parameter_352" shape = [384] dtype = "float32" - min_val = float("-1.93672") - max_val = float("1.08927") - mean = float("-0.561118") - std = float("0.377358") + min_val = float("-1.93768") + max_val = float("0.733668") + mean = float("-0.570541") + std = float("0.365888") data = None @@ -3885,10 +3885,10 @@ class Program_weight_tensor_parameter_353: name = "parameter_353" shape = [384] dtype = "float32" - min_val = float("0.579866") - max_val = float("2.2087") - mean = float("1.11942") - std = float("0.260049") + min_val = float("0.57953") + max_val = float("2.10385") + mean = float("1.09247") + std = float("0.254048") data = None @@ -3896,10 +3896,10 @@ class Program_weight_tensor_parameter_354: name = "parameter_354" shape = [384] dtype = "float32" - min_val = float("0.00147811") - max_val = float("0.014418") - mean = float("0.00450143") - std = float("0.00147944") + min_val = float("0.000627586") + max_val = float("0.00452718") + mean = float("0.00162004") + std = float("0.000525088") data = None @@ -3907,10 +3907,10 @@ class Program_weight_tensor_parameter_355: name = "parameter_355" shape = [384] dtype = "float32" - min_val = float("-0.198479") - max_val = float("0.184089") - mean = float("0.0300434") - std = float("0.0432483") + min_val = float("-0.0456402") + max_val = float("0.100243") + mean = float("0.0222313") + std = float("0.0223563") data = None @@ -3918,10 +3918,10 @@ class Program_weight_tensor_parameter_356: name = "parameter_356" shape = [384, 384, 3, 3] dtype = "float32" - min_val = float("-0.0243065") - max_val = float("0.0531256") - mean = float("-8.65998e-05") - std = float("0.00245086") + min_val = float("-0.017816") + max_val = float("0.0248569") + mean = float("-5.76546e-05") + std = float("0.00141105") data = None @@ -3929,10 +3929,10 @@ class Program_weight_tensor_parameter_357: name = "parameter_357" shape = [384] dtype = "float32" - min_val = float("-2.40101") - max_val = float("0.85896") - mean = float("-1.44185") - std = float("0.356455") + min_val = float("-2.4273") + max_val = float("0.839141") + mean = float("-1.42155") + std = float("0.360806") data = None @@ -3940,10 +3940,10 @@ class Program_weight_tensor_parameter_358: name = "parameter_358" shape = [384] dtype = "float32" - min_val = float("0.363523") - max_val = float("1.90593") - mean = float("1.14998") - std = float("0.141316") + min_val = float("0.438688") + max_val = float("1.84355") + mean = float("1.15674") + std = float("0.142333") data = None @@ -3951,10 +3951,10 @@ class Program_weight_tensor_parameter_359: name = "parameter_359" shape = [384] dtype = "float32" - min_val = float("0.0902803") - max_val = float("0.317551") - mean = float("0.162194") - std = float("0.0353807") + min_val = float("0.0169176") + max_val = float("0.0834605") + mean = float("0.0339828") + std = float("0.0113006") data = None @@ -3962,10 +3962,10 @@ class Program_weight_tensor_parameter_360: name = "parameter_360" shape = [384] dtype = "float32" - min_val = float("-0.96643") - max_val = float("1.13281") - mean = float("-0.366938") - std = float("0.183667") + min_val = float("-0.537141") + max_val = float("0.620008") + mean = float("-0.11866") + std = float("0.0759343") data = None @@ -3973,10 +3973,10 @@ class Program_weight_tensor_parameter_361: name = "parameter_361" shape = [384, 384, 3, 3] dtype = "float32" - min_val = float("-0.0266427") - max_val = float("0.0728291") - mean = float("-0.000293523") - std = float("0.00309007") + min_val = float("-0.0215698") + max_val = float("0.0331226") + mean = float("-0.000122919") + std = float("0.00173093") data = None @@ -3984,10 +3984,10 @@ class Program_weight_tensor_parameter_362: name = "parameter_362" shape = [384] dtype = "float32" - min_val = float("-1.88671") - max_val = float("0.68709") - mean = float("-0.470258") - std = float("0.394922") + min_val = float("-1.88491") + max_val = float("0.489301") + mean = float("-0.478428") + std = float("0.384422") data = None @@ -3995,10 +3995,10 @@ class Program_weight_tensor_parameter_363: name = "parameter_363" shape = [384] dtype = "float32" - min_val = float("0.081144") - max_val = float("2.10167") - mean = float("0.444389") - std = float("0.213461") + min_val = float("0.0963961") + max_val = float("2.12165") + mean = float("0.441644") + std = float("0.215765") data = None @@ -4006,10 +4006,10 @@ class Program_weight_tensor_parameter_364: name = "parameter_364" shape = [384] dtype = "float32" - min_val = float("0.000181328") - max_val = float("0.00238775") - mean = float("0.000598608") - std = float("0.000263232") + min_val = float("4.83567e-05") + max_val = float("0.00122351") + mean = float("0.000214336") + std = float("0.000121244") data = None @@ -4017,10 +4017,10 @@ class Program_weight_tensor_parameter_365: name = "parameter_365" shape = [384] dtype = "float32" - min_val = float("-0.122912") - max_val = float("0.116066") - mean = float("0.0424948") - std = float("0.0250559") + min_val = float("-0.0604177") + max_val = float("0.0648457") + mean = float("0.0205045") + std = float("0.0130396") data = None @@ -4028,10 +4028,10 @@ class Program_weight_tensor_parameter_366: name = "parameter_366" shape = [384, 384, 1, 1] dtype = "float32" - min_val = float("-0.035118") - max_val = float("0.0374998") - mean = float("-0.000747404") - std = float("0.00319117") + min_val = float("-0.0178981") + max_val = float("0.0199833") + mean = float("-0.000383584") + std = float("0.00172042") data = None @@ -4039,10 +4039,10 @@ class Program_weight_tensor_parameter_367: name = "parameter_367" shape = [384] dtype = "float32" - min_val = float("-1.88671") - max_val = float("0.68709") - mean = float("-0.470258") - std = float("0.394922") + min_val = float("-1.88491") + max_val = float("0.489301") + mean = float("-0.478428") + std = float("0.384422") data = None @@ -4050,10 +4050,10 @@ class Program_weight_tensor_parameter_368: name = "parameter_368" shape = [384] dtype = "float32" - min_val = float("0.542057") - max_val = float("2.23163") - mean = float("1.09037") - std = float("0.258504") + min_val = float("0.572695") + max_val = float("2.21595") + mean = float("1.06078") + std = float("0.254685") data = None @@ -4061,10 +4061,10 @@ class Program_weight_tensor_parameter_369: name = "parameter_369" shape = [384] dtype = "float32" - min_val = float("0.00171885") - max_val = float("0.0206226") - mean = float("0.00584658") - std = float("0.00207454") + min_val = float("0.000726389") + max_val = float("0.00447183") + mean = float("0.00202693") + std = float("0.000650997") data = None @@ -4072,10 +4072,10 @@ class Program_weight_tensor_parameter_370: name = "parameter_370" shape = [384] dtype = "float32" - min_val = float("-0.160396") - max_val = float("0.169188") - mean = float("0.0475079") - std = float("0.0427583") + min_val = float("-0.161605") + max_val = float("0.0850251") + mean = float("0.0261345") + std = float("0.0250145") data = None @@ -4083,10 +4083,10 @@ class Program_weight_tensor_parameter_371: name = "parameter_371" shape = [384, 384, 3, 3] dtype = "float32" - min_val = float("-0.0204974") - max_val = float("0.0381036") - mean = float("-0.000108104") - std = float("0.00256466") + min_val = float("-0.016375") + max_val = float("0.0234752") + mean = float("-5.94736e-05") + std = float("0.00147549") data = None @@ -4094,10 +4094,10 @@ class Program_weight_tensor_parameter_372: name = "parameter_372" shape = [384] dtype = "float32" - min_val = float("-2.18087") - max_val = float("0.356852") - mean = float("-1.40712") - std = float("0.272169") + min_val = float("-2.15512") + max_val = float("0.42913") + mean = float("-1.38221") + std = float("0.277652") data = None @@ -4105,10 +4105,10 @@ class Program_weight_tensor_parameter_373: name = "parameter_373" shape = [384] dtype = "float32" - min_val = float("0.61486") - max_val = float("1.63036") - mean = float("1.12182") - std = float("0.102341") + min_val = float("0.714131") + max_val = float("1.63294") + mean = float("1.13544") + std = float("0.0992085") data = None @@ -4116,10 +4116,10 @@ class Program_weight_tensor_parameter_374: name = "parameter_374" shape = [384] dtype = "float32" - min_val = float("0.0686106") - max_val = float("0.300719") - mean = float("0.120296") - std = float("0.0324319") + min_val = float("0.0110827") + max_val = float("0.0639734") + mean = float("0.0257205") + std = float("0.00835153") data = None @@ -4127,10 +4127,10 @@ class Program_weight_tensor_parameter_375: name = "parameter_375" shape = [384] dtype = "float32" - min_val = float("-1.00958") - max_val = float("0.205067") - mean = float("-0.252895") - std = float("0.152032") + min_val = float("-0.516632") + max_val = float("0.224516") + mean = float("-0.0886359") + std = float("0.0647703") data = None @@ -4138,10 +4138,10 @@ class Program_weight_tensor_parameter_376: name = "parameter_376" shape = [384, 384, 3, 3] dtype = "float32" - min_val = float("-0.0201776") - max_val = float("0.0515865") - mean = float("-0.000247363") - std = float("0.00287082") + min_val = float("-0.0257925") + max_val = float("0.0417178") + mean = float("-0.000106541") + std = float("0.00163045") data = None @@ -4149,10 +4149,10 @@ class Program_weight_tensor_parameter_377: name = "parameter_377" shape = [384] dtype = "float32" - min_val = float("-2.9181") - max_val = float("2.39818") - mean = float("-0.744825") - std = float("0.666127") + min_val = float("-2.93083") + max_val = float("1.76129") + mean = float("-0.76492") + std = float("0.654104") data = None @@ -4160,10 +4160,10 @@ class Program_weight_tensor_parameter_378: name = "parameter_378" shape = [384] dtype = "float32" - min_val = float("1.00684") - max_val = float("2.89816") - mean = float("1.92298") - std = float("0.268903") + min_val = float("0.973816") + max_val = float("2.91102") + mean = float("1.85247") + std = float("0.272485") data = None @@ -4171,10 +4171,10 @@ class Program_weight_tensor_parameter_379: name = "parameter_379" shape = [384] dtype = "float32" - min_val = float("0.00323555") - max_val = float("0.0164731") - mean = float("0.00686751") - std = float("0.00157055") + min_val = float("0.00124108") + max_val = float("0.00551094") + mean = float("0.00251181") + std = float("0.000624682") data = None @@ -4182,10 +4182,10 @@ class Program_weight_tensor_parameter_380: name = "parameter_380" shape = [384] dtype = "float32" - min_val = float("-0.29054") - max_val = float("0.150754") - mean = float("0.0807349") - std = float("0.0368912") + min_val = float("-0.154056") + max_val = float("0.0997407") + mean = float("0.0427987") + std = float("0.0198229") data = None @@ -4193,10 +4193,10 @@ class Program_weight_tensor_parameter_381: name = "parameter_381" shape = [384, 768, 1, 1] dtype = "float32" - min_val = float("-0.0488105") - max_val = float("0.0678759") - mean = float("-0.000895552") - std = float("0.00678193") + min_val = float("-0.0482202") + max_val = float("0.0395783") + mean = float("-0.000489332") + std = float("0.00371088") data = None @@ -4204,10 +4204,10 @@ class Program_weight_tensor_parameter_382: name = "parameter_382" shape = [384] dtype = "float32" - min_val = float("-2.26258") - max_val = float("0.746331") - mean = float("-0.781383") - std = float("0.476609") + min_val = float("-2.24474") + max_val = float("0.693989") + mean = float("-0.776354") + std = float("0.476138") data = None @@ -4215,10 +4215,10 @@ class Program_weight_tensor_parameter_383: name = "parameter_383" shape = [384] dtype = "float32" - min_val = float("0.922112") - max_val = float("2.88383") - mean = float("2.09535") - std = float("0.309922") + min_val = float("0.973795") + max_val = float("2.89093") + mean = float("2.10265") + std = float("0.302973") data = None @@ -4226,10 +4226,10 @@ class Program_weight_tensor_parameter_384: name = "parameter_384" shape = [384] dtype = "float32" - min_val = float("0.0011051") - max_val = float("0.00520625") - mean = float("0.00250761") - std = float("0.00049518") + min_val = float("0.000459391") + max_val = float("0.00364653") + mean = float("0.00103832") + std = float("0.000354294") data = None @@ -4237,10 +4237,10 @@ class Program_weight_tensor_parameter_385: name = "parameter_385" shape = [384] dtype = "float32" - min_val = float("-0.0778026") - max_val = float("0.0934732") - mean = float("0.049973") - std = float("0.0213988") + min_val = float("-0.036311") + max_val = float("0.0640884") + mean = float("0.0228094") + std = float("0.0124348") data = None @@ -4248,10 +4248,10 @@ class Program_weight_tensor_parameter_386: name = "parameter_386" shape = [384, 768, 1, 1] dtype = "float32" - min_val = float("-0.0427813") - max_val = float("0.0835518") - mean = float("-0.000478385") - std = float("0.00455026") + min_val = float("-0.129421") + max_val = float("0.063091") + mean = float("-0.000254003") + std = float("0.00272646") data = None @@ -4259,10 +4259,10 @@ class Program_weight_tensor_parameter_387: name = "parameter_387" shape = [768] dtype = "float32" - min_val = float("-2.45583") - max_val = float("0.559171") - mean = float("-0.951569") - std = float("0.337772") + min_val = float("-2.41043") + max_val = float("0.654464") + mean = float("-0.915927") + std = float("0.344484") data = None @@ -4270,10 +4270,10 @@ class Program_weight_tensor_parameter_388: name = "parameter_388" shape = [768] dtype = "float32" - min_val = float("0.470726") - max_val = float("1.80127") - mean = float("0.915463") - std = float("0.147887") + min_val = float("0.519256") + max_val = float("1.87666") + mean = float("0.912499") + std = float("0.147147") data = None @@ -4281,10 +4281,10 @@ class Program_weight_tensor_parameter_389: name = "parameter_389" shape = [768] dtype = "float32" - min_val = float("0.0124823") - max_val = float("0.0978023") - mean = float("0.0216824") - std = float("0.00583908") + min_val = float("0.00362407") + max_val = float("0.0346536") + mean = float("0.00806678") + std = float("0.00278567") data = None @@ -4292,10 +4292,10 @@ class Program_weight_tensor_parameter_390: name = "parameter_390" shape = [768] dtype = "float32" - min_val = float("-0.476605") - max_val = float("0.316374") - mean = float("0.0538298") - std = float("0.0770574") + min_val = float("-0.180838") + max_val = float("0.1547") + mean = float("0.0229543") + std = float("0.0362646") data = None @@ -4303,10 +4303,10 @@ class Program_weight_tensor_parameter_391: name = "parameter_391" shape = [768, 512, 3, 3] dtype = "float32" - min_val = float("-0.033942") - max_val = float("0.048741") - mean = float("-0.00014743") - std = float("0.00292614") + min_val = float("-0.050247") + max_val = float("0.0427207") + mean = float("-6.42878e-05") + std = float("0.00168004") data = None @@ -4314,10 +4314,10 @@ class Program_weight_tensor_parameter_392: name = "parameter_392" shape = [512] dtype = "float32" - min_val = float("-3.41671") - max_val = float("1.90925") - mean = float("-1.24212") - std = float("0.518646") + min_val = float("-3.38705") + max_val = float("1.66488") + mean = float("-1.17821") + std = float("0.526891") data = None @@ -4325,10 +4325,10 @@ class Program_weight_tensor_parameter_393: name = "parameter_393" shape = [512] dtype = "float32" - min_val = float("0.417792") - max_val = float("1.62807") - mean = float("1.12265") - std = float("0.144739") + min_val = float("0.488671") + max_val = float("1.69914") + mean = float("1.10955") + std = float("0.15027") data = None @@ -4336,10 +4336,10 @@ class Program_weight_tensor_parameter_394: name = "parameter_394" shape = [512] dtype = "float32" - min_val = float("0.00686698") - max_val = float("0.0300204") - mean = float("0.0132355") - std = float("0.00263341") + min_val = float("0.00126485") + max_val = float("0.00820921") + mean = float("0.00340923") + std = float("0.00106595") data = None @@ -4347,10 +4347,10 @@ class Program_weight_tensor_parameter_395: name = "parameter_395" shape = [512] dtype = "float32" - min_val = float("-0.204822") - max_val = float("0.0997006") - mean = float("-0.0635602") - std = float("0.0500124") + min_val = float("-0.11898") + max_val = float("0.0891435") + mean = float("-0.0369126") + std = float("0.0289632") data = None @@ -4358,10 +4358,10 @@ class Program_weight_tensor_parameter_396: name = "parameter_396" shape = [512, 384, 1, 1] dtype = "float32" - min_val = float("-0.0779687") - max_val = float("0.204082") - mean = float("-0.000758003") - std = float("0.00949015") + min_val = float("-0.225953") + max_val = float("0.196097") + mean = float("-0.000408665") + std = float("0.00566642") data = None @@ -4369,10 +4369,10 @@ class Program_weight_tensor_parameter_397: name = "parameter_397" shape = [384] dtype = "float32" - min_val = float("-0.010737") - max_val = float("0.00241083") - mean = float("-0.00328027") - std = float("0.00285957") + min_val = float("-0.00866309") + max_val = float("0.000825011") + mean = float("-0.00232518") + std = float("0.0017594") data = None @@ -4380,10 +4380,10 @@ class Program_weight_tensor_parameter_398: name = "parameter_398" shape = [384, 384, 1, 1] dtype = "float32" - min_val = float("-0.214148") - max_val = float("0.108238") - mean = float("-0.00206829") - std = float("0.00564515") + min_val = float("-0.202855") + max_val = float("0.135115") + mean = float("-0.00175899") + std = float("0.00410018") data = None @@ -4391,10 +4391,10 @@ class Program_weight_tensor_parameter_399: name = "parameter_399" shape = [192] dtype = "float32" - min_val = float("-1.9212") - max_val = float("0.337783") - mean = float("-0.37118") - std = float("0.319858") + min_val = float("-1.95281") + max_val = float("0.504414") + mean = float("-0.323169") + std = float("0.341204") data = None @@ -4402,10 +4402,10 @@ class Program_weight_tensor_parameter_400: name = "parameter_400" shape = [192] dtype = "float32" - min_val = float("0.000863766") - max_val = float("2.25766") - mean = float("0.550683") - std = float("0.447335") + min_val = float("0.0702988") + max_val = float("2.23409") + mean = float("0.601912") + std = float("0.439789") data = None @@ -4413,10 +4413,10 @@ class Program_weight_tensor_parameter_401: name = "parameter_401" shape = [192] dtype = "float32" - min_val = float("5.24031e-07") - max_val = float("0.00212704") - mean = float("0.000573903") - std = float("0.000337641") + min_val = float("6.06397e-05") + max_val = float("0.000857671") + mean = float("0.000292524") + std = float("0.000171414") data = None @@ -4424,10 +4424,10 @@ class Program_weight_tensor_parameter_402: name = "parameter_402" shape = [192] dtype = "float32" - min_val = float("-0.0361206") - max_val = float("0.0752975") - mean = float("0.0112954") - std = float("0.019226") + min_val = float("-0.0341788") + max_val = float("0.0367911") + mean = float("0.0035144") + std = float("0.0110465") data = None @@ -4435,10 +4435,10 @@ class Program_weight_tensor_parameter_403: name = "parameter_403" shape = [192, 192, 1, 1] dtype = "float32" - min_val = float("-0.0324994") - max_val = float("0.0613531") - mean = float("-0.000532142") - std = float("0.00461667") + min_val = float("-0.0204711") + max_val = float("0.0537535") + mean = float("-0.000251412") + std = float("0.00321926") data = None @@ -4446,10 +4446,10 @@ class Program_weight_tensor_parameter_404: name = "parameter_404" shape = [192] dtype = "float32" - min_val = float("-1.9212") - max_val = float("0.337783") - mean = float("-0.37118") - std = float("0.319858") + min_val = float("-1.95281") + max_val = float("0.504414") + mean = float("-0.323169") + std = float("0.341204") data = None @@ -4457,10 +4457,10 @@ class Program_weight_tensor_parameter_405: name = "parameter_405" shape = [192] dtype = "float32" - min_val = float("0.331404") - max_val = float("2.85491") - mean = float("1.19184") - std = float("0.517709") + min_val = float("0.384773") + max_val = float("2.87161") + mean = float("1.22954") + std = float("0.52094") data = None @@ -4468,10 +4468,10 @@ class Program_weight_tensor_parameter_406: name = "parameter_406" shape = [192] dtype = "float32" - min_val = float("0.00140744") - max_val = float("0.0176487") - mean = float("0.00594824") - std = float("0.00242133") + min_val = float("0.00047849") + max_val = float("0.0105565") + mean = float("0.00264743") + std = float("0.0011636") data = None @@ -4479,10 +4479,10 @@ class Program_weight_tensor_parameter_407: name = "parameter_407" shape = [192] dtype = "float32" - min_val = float("-0.149957") - max_val = float("0.183059") - mean = float("0.0286224") - std = float("0.0529416") + min_val = float("-0.0760633") + max_val = float("0.0960629") + mean = float("0.0132737") + std = float("0.0293694") data = None @@ -4490,10 +4490,10 @@ class Program_weight_tensor_parameter_408: name = "parameter_408" shape = [192, 192, 3, 3] dtype = "float32" - min_val = float("-0.0304763") - max_val = float("0.0467537") - mean = float("-0.000182961") - std = float("0.00351809") + min_val = float("-0.0214696") + max_val = float("0.0330379") + mean = float("-0.000105926") + std = float("0.00236944") data = None @@ -4501,10 +4501,10 @@ class Program_weight_tensor_parameter_409: name = "parameter_409" shape = [192] dtype = "float32" - min_val = float("-2.92889") - max_val = float("-0.215128") - mean = float("-1.34775") - std = float("0.401955") + min_val = float("-2.88997") + max_val = float("-0.124639") + mean = float("-1.33303") + std = float("0.398") data = None @@ -4512,10 +4512,10 @@ class Program_weight_tensor_parameter_410: name = "parameter_410" shape = [192] dtype = "float32" - min_val = float("0.691777") - max_val = float("2.01593") - mean = float("1.16727") - std = float("0.167346") + min_val = float("0.720106") + max_val = float("2.09477") + mean = float("1.16316") + std = float("0.171513") data = None @@ -4523,10 +4523,10 @@ class Program_weight_tensor_parameter_411: name = "parameter_411" shape = [192] dtype = "float32" - min_val = float("0.12144") - max_val = float("0.513783") - mean = float("0.2084") - std = float("0.0576939") + min_val = float("0.0350146") + max_val = float("0.281849") + mean = float("0.0801621") + std = float("0.0325573") data = None @@ -4534,10 +4534,10 @@ class Program_weight_tensor_parameter_412: name = "parameter_412" shape = [192] dtype = "float32" - min_val = float("-3.52808") - max_val = float("1.58137") - mean = float("-0.298547") - std = float("0.409608") + min_val = float("-2.97017") + max_val = float("2.23437") + mean = float("-0.175713") + std = float("0.385326") data = None @@ -4545,10 +4545,10 @@ class Program_weight_tensor_parameter_413: name = "parameter_413" shape = [192, 192, 3, 3] dtype = "float32" - min_val = float("-0.0306673") - max_val = float("0.0446454") - mean = float("-0.000270107") - std = float("0.00414217") + min_val = float("-0.0303358") + max_val = float("0.0416184") + mean = float("-0.000150412") + std = float("0.00285456") data = None @@ -4556,10 +4556,10 @@ class Program_weight_tensor_parameter_414: name = "parameter_414" shape = [192] dtype = "float32" - min_val = float("-1.93263") - max_val = float("0.441273") - mean = float("-0.309778") - std = float("0.3114") + min_val = float("-1.92902") + max_val = float("0.596353") + mean = float("-0.261587") + std = float("0.334409") data = None @@ -4567,10 +4567,10 @@ class Program_weight_tensor_parameter_415: name = "parameter_415" shape = [192] dtype = "float32" - min_val = float("1.31901e-05") - max_val = float("1.74196") - mean = float("0.401521") - std = float("0.316685") + min_val = float("0.0489169") + max_val = float("1.76734") + mean = float("0.453534") + std = float("0.302569") data = None @@ -4578,10 +4578,10 @@ class Program_weight_tensor_parameter_416: name = "parameter_416" shape = [192] dtype = "float32" - min_val = float("2.4925e-10") - max_val = float("0.00266824") - mean = float("0.000571331") - std = float("0.000381815") + min_val = float("5.20947e-05") + max_val = float("0.00150981") + mean = float("0.000284881") + std = float("0.00019177") data = None @@ -4589,10 +4589,10 @@ class Program_weight_tensor_parameter_417: name = "parameter_417" shape = [192] dtype = "float32" - min_val = float("-0.0718396") - max_val = float("0.0473328") - mean = float("0.0120177") - std = float("0.0141073") + min_val = float("-0.0223658") + max_val = float("0.0363701") + mean = float("0.00787701") + std = float("0.00970774") data = None @@ -4600,10 +4600,10 @@ class Program_weight_tensor_parameter_418: name = "parameter_418" shape = [192, 192, 1, 1] dtype = "float32" - min_val = float("-0.0369971") - max_val = float("0.0474525") - mean = float("-0.000537935") - std = float("0.00432741") + min_val = float("-0.0239734") + max_val = float("0.0302964") + mean = float("-0.000361483") + std = float("0.00299226") data = None @@ -4611,10 +4611,10 @@ class Program_weight_tensor_parameter_419: name = "parameter_419" shape = [192] dtype = "float32" - min_val = float("-1.93263") - max_val = float("0.441273") - mean = float("-0.309778") - std = float("0.3114") + min_val = float("-1.92902") + max_val = float("0.596353") + mean = float("-0.261587") + std = float("0.334409") data = None @@ -4622,10 +4622,10 @@ class Program_weight_tensor_parameter_420: name = "parameter_420" shape = [192] dtype = "float32" - min_val = float("0.42444") - max_val = float("2.26483") - mean = float("1.12589") - std = float("0.378215") + min_val = float("0.419546") + max_val = float("2.27565") + mean = float("1.1481") + std = float("0.38095") data = None @@ -4633,10 +4633,10 @@ class Program_weight_tensor_parameter_421: name = "parameter_421" shape = [192] dtype = "float32" - min_val = float("0.0024206") - max_val = float("0.0137874") - mean = float("0.00667971") - std = float("0.00197733") + min_val = float("0.00122219") + max_val = float("0.0063277") + mean = float("0.00282241") + std = float("0.000859264") data = None @@ -4644,10 +4644,10 @@ class Program_weight_tensor_parameter_422: name = "parameter_422" shape = [192] dtype = "float32" - min_val = float("-0.156973") - max_val = float("0.138921") - mean = float("0.0395744") - std = float("0.0387458") + min_val = float("-0.0745243") + max_val = float("0.0844685") + mean = float("0.0227952") + std = float("0.0246463") data = None @@ -4655,10 +4655,10 @@ class Program_weight_tensor_parameter_423: name = "parameter_423" shape = [192, 192, 3, 3] dtype = "float32" - min_val = float("-0.0297468") - max_val = float("0.061426") - mean = float("-0.000214963") - std = float("0.0038424") + min_val = float("-0.0186838") + max_val = float("0.0254977") + mean = float("-0.000124378") + std = float("0.00251097") data = None @@ -4666,10 +4666,10 @@ class Program_weight_tensor_parameter_424: name = "parameter_424" shape = [192] dtype = "float32" - min_val = float("-2.52744") - max_val = float("-0.176752") - mean = float("-1.32827") - std = float("0.439675") + min_val = float("-2.53592") + max_val = float("-0.131672") + mean = float("-1.31612") + std = float("0.443757") data = None @@ -4677,10 +4677,10 @@ class Program_weight_tensor_parameter_425: name = "parameter_425" shape = [192] dtype = "float32" - min_val = float("0.648699") - max_val = float("1.69649") - mean = float("1.1831") - std = float("0.164999") + min_val = float("0.717982") + max_val = float("1.65612") + mean = float("1.17887") + std = float("0.161137") data = None @@ -4688,10 +4688,10 @@ class Program_weight_tensor_parameter_426: name = "parameter_426" shape = [192] dtype = "float32" - min_val = float("0.0811482") - max_val = float("0.262619") - mean = float("0.148698") - std = float("0.0343621") + min_val = float("0.019859") + max_val = float("0.109361") + mean = float("0.053725") + std = float("0.0174112") data = None @@ -4699,10 +4699,10 @@ class Program_weight_tensor_parameter_427: name = "parameter_427" shape = [192] dtype = "float32" - min_val = float("-2.61184") - max_val = float("0.442873") - mean = float("-0.178286") - std = float("0.277058") + min_val = float("-1.6874") + max_val = float("0.715917") + mean = float("-0.0525487") + std = float("0.202935") data = None @@ -4710,10 +4710,10 @@ class Program_weight_tensor_parameter_428: name = "parameter_428" shape = [192, 192, 3, 3] dtype = "float32" - min_val = float("-0.0353353") - max_val = float("0.0475915") - mean = float("-0.000311195") - std = float("0.00440006") + min_val = float("-0.0424222") + max_val = float("0.0424072") + mean = float("-0.000164971") + std = float("0.00292092") data = None @@ -4721,10 +4721,10 @@ class Program_weight_tensor_parameter_429: name = "parameter_429" shape = [192] dtype = "float32" - min_val = float("-1.76929") - max_val = float("0.366299") - mean = float("-0.296944") - std = float("0.323923") + min_val = float("-1.76329") + max_val = float("0.544922") + mean = float("-0.246307") + std = float("0.349367") data = None @@ -4732,10 +4732,10 @@ class Program_weight_tensor_parameter_430: name = "parameter_430" shape = [192] dtype = "float32" - min_val = float("1.65802e-05") - max_val = float("1.6632") - mean = float("0.314337") - std = float("0.259063") + min_val = float("0.00838759") + max_val = float("1.66366") + mean = float("0.357305") + std = float("0.246704") data = None @@ -4743,10 +4743,10 @@ class Program_weight_tensor_parameter_431: name = "parameter_431" shape = [192] dtype = "float32" - min_val = float("1.21154e-10") - max_val = float("0.00374479") - mean = float("0.000521225") - std = float("0.000463093") + min_val = float("2.7519e-06") + max_val = float("0.00136124") + mean = float("0.000270016") + std = float("0.000205115") data = None @@ -4754,10 +4754,10 @@ class Program_weight_tensor_parameter_432: name = "parameter_432" shape = [192] dtype = "float32" - min_val = float("-0.052794") - max_val = float("0.0958051") - mean = float("0.0142202") - std = float("0.0171986") + min_val = float("-0.0246477") + max_val = float("0.0419057") + mean = float("0.00937144") + std = float("0.00982806") data = None @@ -4765,10 +4765,10 @@ class Program_weight_tensor_parameter_433: name = "parameter_433" shape = [192, 192, 1, 1] dtype = "float32" - min_val = float("-0.0479075") - max_val = float("0.0473163") - mean = float("-0.000576931") - std = float("0.00422766") + min_val = float("-0.0257052") + max_val = float("0.0227599") + mean = float("-0.000402919") + std = float("0.00286132") data = None @@ -4776,10 +4776,10 @@ class Program_weight_tensor_parameter_434: name = "parameter_434" shape = [192] dtype = "float32" - min_val = float("-1.76929") - max_val = float("0.366299") - mean = float("-0.296944") - std = float("0.323923") + min_val = float("-1.76329") + max_val = float("0.544922") + mean = float("-0.246307") + std = float("0.349367") data = None @@ -4787,10 +4787,10 @@ class Program_weight_tensor_parameter_435: name = "parameter_435" shape = [192] dtype = "float32" - min_val = float("0.376374") - max_val = float("1.96514") - mean = float("1.05296") - std = float("0.33661") + min_val = float("0.385511") + max_val = float("1.96748") + mean = float("1.06974") + std = float("0.336052") data = None @@ -4798,10 +4798,10 @@ class Program_weight_tensor_parameter_436: name = "parameter_436" shape = [192] dtype = "float32" - min_val = float("0.00319475") - max_val = float("0.0156312") - mean = float("0.0069418") - std = float("0.00223012") + min_val = float("0.00118411") + max_val = float("0.00608141") + mean = float("0.0031663") + std = float("0.000994709") data = None @@ -4809,10 +4809,10 @@ class Program_weight_tensor_parameter_437: name = "parameter_437" shape = [192] dtype = "float32" - min_val = float("-0.138894") - max_val = float("0.117271") - mean = float("0.0385623") - std = float("0.0372869") + min_val = float("-0.0656616") + max_val = float("0.0775184") + mean = float("0.0234637") + std = float("0.0196456") data = None @@ -4820,10 +4820,10 @@ class Program_weight_tensor_parameter_438: name = "parameter_438" shape = [192, 192, 3, 3] dtype = "float32" - min_val = float("-0.0312578") - max_val = float("0.0469487") - mean = float("-0.000193498") - std = float("0.00398813") + min_val = float("-0.0234567") + max_val = float("0.031622") + mean = float("-0.000127451") + std = float("0.0025894") data = None @@ -4831,10 +4831,10 @@ class Program_weight_tensor_parameter_439: name = "parameter_439" shape = [192] dtype = "float32" - min_val = float("-2.54978") - max_val = float("0.123303") - mean = float("-1.28641") - std = float("0.420062") + min_val = float("-2.51425") + max_val = float("0.158605") + mean = float("-1.26745") + std = float("0.427693") data = None @@ -4842,10 +4842,10 @@ class Program_weight_tensor_parameter_440: name = "parameter_440" shape = [192] dtype = "float32" - min_val = float("0.65748") - max_val = float("1.74041") - mean = float("1.14789") - std = float("0.166573") + min_val = float("0.599436") + max_val = float("1.78145") + mean = float("1.14926") + std = float("0.161525") data = None @@ -4853,10 +4853,10 @@ class Program_weight_tensor_parameter_441: name = "parameter_441" shape = [192] dtype = "float32" - min_val = float("0.0607768") - max_val = float("0.189401") - mean = float("0.103113") - std = float("0.0244967") + min_val = float("0.0150449") + max_val = float("0.0928161") + mean = float("0.0356273") + std = float("0.0112392") data = None @@ -4864,10 +4864,10 @@ class Program_weight_tensor_parameter_442: name = "parameter_442" shape = [192] dtype = "float32" - min_val = float("-1.77358") - max_val = float("0.232054") - mean = float("-0.181866") - std = float("0.205799") + min_val = float("-1.33124") + max_val = float("0.435574") + mean = float("-0.0281949") + std = float("0.145649") data = None @@ -4875,10 +4875,10 @@ class Program_weight_tensor_parameter_443: name = "parameter_443" shape = [192, 192, 3, 3] dtype = "float32" - min_val = float("-0.035669") - max_val = float("0.0555704") - mean = float("-0.000362505") - std = float("0.00447118") + min_val = float("-0.0368305") + max_val = float("0.0462592") + mean = float("-0.000170748") + std = float("0.0029624") data = None @@ -4886,10 +4886,10 @@ class Program_weight_tensor_parameter_444: name = "parameter_444" shape = [192] dtype = "float32" - min_val = float("-2.08733") - max_val = float("0.454803") - mean = float("-0.308331") - std = float("0.365117") + min_val = float("-2.0885") + max_val = float("0.649037") + mean = float("-0.259743") + std = float("0.386626") data = None @@ -4897,10 +4897,10 @@ class Program_weight_tensor_parameter_445: name = "parameter_445" shape = [192] dtype = "float32" - min_val = float("4.74169e-06") - max_val = float("0.678347") - mean = float("0.185861") - std = float("0.145635") + min_val = float("0.000296745") + max_val = float("0.72258") + mean = float("0.216526") + std = float("0.135201") data = None @@ -4908,10 +4908,10 @@ class Program_weight_tensor_parameter_446: name = "parameter_446" shape = [192] dtype = "float32" - min_val = float("1.05153e-11") - max_val = float("0.00148836") - mean = float("0.000320402") - std = float("0.000237363") + min_val = float("8.11875e-09") + max_val = float("0.000665964") + mean = float("0.000158357") + std = float("9.40236e-05") data = None @@ -4919,10 +4919,10 @@ class Program_weight_tensor_parameter_447: name = "parameter_447" shape = [192] dtype = "float32" - min_val = float("-0.0594774") - max_val = float("0.0478072") - mean = float("0.00930335") - std = float("0.0146157") + min_val = float("-0.0160817") + max_val = float("0.0306913") + mean = float("0.00616664") + std = float("0.0078993") data = None @@ -4930,10 +4930,10 @@ class Program_weight_tensor_parameter_448: name = "parameter_448" shape = [192, 192, 1, 1] dtype = "float32" - min_val = float("-0.0234346") - max_val = float("0.0490819") - mean = float("-0.000384362") - std = float("0.00376562") + min_val = float("-0.0133827") + max_val = float("0.0222178") + mean = float("-0.000251321") + std = float("0.00253254") data = None @@ -4941,10 +4941,10 @@ class Program_weight_tensor_parameter_449: name = "parameter_449" shape = [192] dtype = "float32" - min_val = float("-2.08733") - max_val = float("0.454803") - mean = float("-0.308331") - std = float("0.365117") + min_val = float("-2.0885") + max_val = float("0.649037") + mean = float("-0.259743") + std = float("0.386626") data = None @@ -4952,10 +4952,10 @@ class Program_weight_tensor_parameter_450: name = "parameter_450" shape = [192] dtype = "float32" - min_val = float("0.39604") - max_val = float("1.9294") - mean = float("0.959521") - std = float("0.305318") + min_val = float("0.394568") + max_val = float("1.95749") + mean = float("0.953795") + std = float("0.30538") data = None @@ -4963,10 +4963,10 @@ class Program_weight_tensor_parameter_451: name = "parameter_451" shape = [192] dtype = "float32" - min_val = float("0.00246386") - max_val = float("0.018286") - mean = float("0.00690414") - std = float("0.00229679") + min_val = float("0.0012967") + max_val = float("0.00756705") + mean = float("0.00306035") + std = float("0.000987711") data = None @@ -4974,10 +4974,10 @@ class Program_weight_tensor_parameter_452: name = "parameter_452" shape = [192] dtype = "float32" - min_val = float("-0.0963721") - max_val = float("0.139837") - mean = float("0.0375609") - std = float("0.0391308") + min_val = float("-0.0322819") + max_val = float("0.0806911") + mean = float("0.0286436") + std = float("0.0227917") data = None @@ -4985,10 +4985,10 @@ class Program_weight_tensor_parameter_453: name = "parameter_453" shape = [192, 192, 3, 3] dtype = "float32" - min_val = float("-0.0382843") - max_val = float("0.0441675") - mean = float("-0.000189265") - std = float("0.00417807") + min_val = float("-0.0259952") + max_val = float("0.0272212") + mean = float("-0.000147606") + std = float("0.00265528") data = None @@ -4996,10 +4996,10 @@ class Program_weight_tensor_parameter_454: name = "parameter_454" shape = [192] dtype = "float32" - min_val = float("-2.82542") - max_val = float("-0.165072") - mean = float("-1.2819") - std = float("0.428551") + min_val = float("-2.7731") + max_val = float("-0.0376525") + mean = float("-1.25978") + std = float("0.434928") data = None @@ -5007,10 +5007,10 @@ class Program_weight_tensor_parameter_455: name = "parameter_455" shape = [192] dtype = "float32" - min_val = float("0.761717") - max_val = float("1.48005") - mean = float("1.12799") - std = float("0.136186") + min_val = float("0.744519") + max_val = float("1.562") + mean = float("1.13319") + std = float("0.139729") data = None @@ -5018,10 +5018,10 @@ class Program_weight_tensor_parameter_456: name = "parameter_456" shape = [192] dtype = "float32" - min_val = float("0.0437385") - max_val = float("0.134822") - mean = float("0.0772096") - std = float("0.0171041") + min_val = float("0.0113947") + max_val = float("0.0533248") + mean = float("0.0251346") + std = float("0.00756214") data = None @@ -5029,10 +5029,10 @@ class Program_weight_tensor_parameter_457: name = "parameter_457" shape = [192] dtype = "float32" - min_val = float("-1.32712") - max_val = float("0.174196") - mean = float("-0.153249") - std = float("0.161134") + min_val = float("-0.991719") + max_val = float("0.260327") + mean = float("-0.0417286") + std = float("0.119228") data = None @@ -5040,10 +5040,10 @@ class Program_weight_tensor_parameter_458: name = "parameter_458" shape = [192, 192, 3, 3] dtype = "float32" - min_val = float("-0.0470685") - max_val = float("0.0540297") - mean = float("-0.000367007") - std = float("0.00453239") + min_val = float("-0.0456909") + max_val = float("0.0485113") + mean = float("-0.000180492") + std = float("0.00293373") data = None @@ -5051,10 +5051,10 @@ class Program_weight_tensor_parameter_459: name = "parameter_459" shape = [192] dtype = "float32" - min_val = float("-1.21591") - max_val = float("0.408817") - mean = float("-0.268583") - std = float("0.326001") + min_val = float("-1.20653") + max_val = float("0.515872") + mean = float("-0.218122") + std = float("0.352265") data = None @@ -5062,10 +5062,10 @@ class Program_weight_tensor_parameter_460: name = "parameter_460" shape = [192] dtype = "float32" - min_val = float("1.13966e-07") - max_val = float("0.671077") - mean = float("0.16575") - std = float("0.128785") + min_val = float("-3.46641e-06") + max_val = float("0.680648") + mean = float("0.195002") + std = float("0.117181") data = None @@ -5073,10 +5073,10 @@ class Program_weight_tensor_parameter_461: name = "parameter_461" shape = [192] dtype = "float32" - min_val = float("4.36695e-14") - max_val = float("0.00121919") - mean = float("0.000288819") - std = float("0.000228404") + min_val = float("1.40005e-12") + max_val = float("0.000626638") + mean = float("0.000157763") + std = float("9.56391e-05") data = None @@ -5084,10 +5084,10 @@ class Program_weight_tensor_parameter_462: name = "parameter_462" shape = [192] dtype = "float32" - min_val = float("-0.0449358") - max_val = float("0.0625559") - mean = float("0.00941777") - std = float("0.0159613") + min_val = float("-0.0271548") + max_val = float("0.033376") + mean = float("0.0067476") + std = float("0.00878243") data = None @@ -5095,10 +5095,10 @@ class Program_weight_tensor_parameter_463: name = "parameter_463" shape = [192, 192, 1, 1] dtype = "float32" - min_val = float("-0.0445329") - max_val = float("0.0469969") - mean = float("-0.000357929") - std = float("0.00375663") + min_val = float("-0.0239047") + max_val = float("0.0291998") + mean = float("-0.00027316") + std = float("0.00260504") data = None @@ -5106,10 +5106,10 @@ class Program_weight_tensor_parameter_464: name = "parameter_464" shape = [192] dtype = "float32" - min_val = float("-1.21591") - max_val = float("0.408817") - mean = float("-0.268583") - std = float("0.326001") + min_val = float("-1.20653") + max_val = float("0.515872") + mean = float("-0.218122") + std = float("0.352265") data = None @@ -5117,10 +5117,10 @@ class Program_weight_tensor_parameter_465: name = "parameter_465" shape = [192] dtype = "float32" - min_val = float("0.342519") - max_val = float("1.57267") - mean = float("0.856133") - std = float("0.266894") + min_val = float("0.398504") + max_val = float("1.57172") + mean = float("0.848038") + std = float("0.259251") data = None @@ -5128,10 +5128,10 @@ class Program_weight_tensor_parameter_466: name = "parameter_466" shape = [192] dtype = "float32" - min_val = float("0.00236989") - max_val = float("0.012972") - mean = float("0.0063056") - std = float("0.00193128") + min_val = float("0.00112489") + max_val = float("0.00610025") + mean = float("0.00301719") + std = float("0.000917632") data = None @@ -5139,10 +5139,10 @@ class Program_weight_tensor_parameter_467: name = "parameter_467" shape = [192] dtype = "float32" - min_val = float("-0.108302") - max_val = float("0.156299") - mean = float("0.0414147") - std = float("0.0423383") + min_val = float("-0.0478429") + max_val = float("0.0836548") + mean = float("0.0242454") + std = float("0.0221161") data = None @@ -5150,10 +5150,10 @@ class Program_weight_tensor_parameter_468: name = "parameter_468" shape = [192, 192, 3, 3] dtype = "float32" - min_val = float("-0.0362289") - max_val = float("0.0435947") - mean = float("-0.000180448") - std = float("0.00414403") + min_val = float("-0.0240424") + max_val = float("0.0295695") + mean = float("-0.000120615") + std = float("0.00264505") data = None @@ -5161,10 +5161,10 @@ class Program_weight_tensor_parameter_469: name = "parameter_469" shape = [192] dtype = "float32" - min_val = float("-2.55484") - max_val = float("-0.225384") - mean = float("-1.29607") - std = float("0.412734") + min_val = float("-2.48813") + max_val = float("-0.081453") + mean = float("-1.27066") + std = float("0.42002") data = None @@ -5172,10 +5172,10 @@ class Program_weight_tensor_parameter_470: name = "parameter_470" shape = [192] dtype = "float32" - min_val = float("0.652885") - max_val = float("1.46467") - mean = float("1.10229") - std = float("0.128509") + min_val = float("0.694183") + max_val = float("1.5418") + mean = float("1.10669") + std = float("0.135408") data = None @@ -5183,10 +5183,10 @@ class Program_weight_tensor_parameter_471: name = "parameter_471" shape = [192] dtype = "float32" - min_val = float("0.0350575") - max_val = float("0.115971") - mean = float("0.0597254") - std = float("0.016366") + min_val = float("0.00729749") + max_val = float("0.037708") + mean = float("0.0179887") + std = float("0.00554409") data = None @@ -5194,10 +5194,10 @@ class Program_weight_tensor_parameter_472: name = "parameter_472" shape = [192] dtype = "float32" - min_val = float("-0.464018") - max_val = float("0.240961") - mean = float("-0.124572") - std = float("0.139434") + min_val = float("-0.396006") + max_val = float("0.372514") + mean = float("-0.0446145") + std = float("0.0976403") data = None @@ -5205,10 +5205,10 @@ class Program_weight_tensor_parameter_473: name = "parameter_473" shape = [192, 192, 3, 3] dtype = "float32" - min_val = float("-0.0705414") - max_val = float("0.0741061") - mean = float("-0.000347681") - std = float("0.00450759") + min_val = float("-0.0442693") + max_val = float("0.0514986") + mean = float("-0.000169164") + std = float("0.00292394") data = None @@ -5216,10 +5216,10 @@ class Program_weight_tensor_parameter_474: name = "parameter_474" shape = [192] dtype = "float32" - min_val = float("-1.25114") - max_val = float("0.496985") - mean = float("-0.195696") - std = float("0.286609") + min_val = float("-1.23276") + max_val = float("0.509269") + mean = float("-0.153678") + std = float("0.304039") data = None @@ -5227,10 +5227,10 @@ class Program_weight_tensor_parameter_475: name = "parameter_475" shape = [192] dtype = "float32" - min_val = float("6.01345e-05") - max_val = float("1.52688") - mean = float("0.227253") - std = float("0.219405") + min_val = float("0.00230822") + max_val = float("1.53088") + mean = float("0.236662") + std = float("0.211142") data = None @@ -5238,10 +5238,10 @@ class Program_weight_tensor_parameter_476: name = "parameter_476" shape = [192] dtype = "float32" - min_val = float("2.92882e-09") - max_val = float("0.00984128") - mean = float("0.00070971") - std = float("0.00095936") + min_val = float("1.5422e-06") + max_val = float("0.00552255") + mean = float("0.000356895") + std = float("0.000492703") data = None @@ -5249,10 +5249,10 @@ class Program_weight_tensor_parameter_477: name = "parameter_477" shape = [192] dtype = "float32" - min_val = float("-0.0592165") - max_val = float("0.107792") - mean = float("0.0157093") - std = float("0.0208809") + min_val = float("-0.0329799") + max_val = float("0.071078") + mean = float("0.00817666") + std = float("0.0127163") data = None @@ -5260,10 +5260,10 @@ class Program_weight_tensor_parameter_478: name = "parameter_478" shape = [192, 192, 1, 1] dtype = "float32" - min_val = float("-0.055296") - max_val = float("0.0451825") - mean = float("-0.000685397") - std = float("0.0046336") + min_val = float("-0.0505573") + max_val = float("0.0215882") + mean = float("-0.000354289") + std = float("0.00312677") data = None @@ -5271,10 +5271,10 @@ class Program_weight_tensor_parameter_479: name = "parameter_479" shape = [192] dtype = "float32" - min_val = float("-1.25114") - max_val = float("0.496985") - mean = float("-0.195696") - std = float("0.286609") + min_val = float("-1.23276") + max_val = float("0.509269") + mean = float("-0.153678") + std = float("0.304039") data = None @@ -5282,10 +5282,10 @@ class Program_weight_tensor_parameter_480: name = "parameter_480" shape = [192] dtype = "float32" - min_val = float("0.361257") - max_val = float("1.38672") - mean = float("0.77414") - std = float("0.223602") + min_val = float("0.332725") + max_val = float("1.44096") + mean = float("0.751287") + std = float("0.218777") data = None @@ -5293,10 +5293,10 @@ class Program_weight_tensor_parameter_481: name = "parameter_481" shape = [192] dtype = "float32" - min_val = float("0.00343985") - max_val = float("0.024217") - mean = float("0.0103742") - std = float("0.00341779") + min_val = float("0.00203765") + max_val = float("0.010713") + mean = float("0.00478482") + std = float("0.00160695") data = None @@ -5304,10 +5304,10 @@ class Program_weight_tensor_parameter_482: name = "parameter_482" shape = [192] dtype = "float32" - min_val = float("-0.109521") - max_val = float("0.160426") - mean = float("0.0591583") - std = float("0.050798") + min_val = float("-0.0848523") + max_val = float("0.0954295") + mean = float("0.0308101") + std = float("0.029966") data = None @@ -5315,10 +5315,10 @@ class Program_weight_tensor_parameter_483: name = "parameter_483" shape = [192, 192, 3, 3] dtype = "float32" - min_val = float("-0.052784") - max_val = float("0.0464242") - mean = float("-0.000289158") - std = float("0.00403919") + min_val = float("-0.0446459") + max_val = float("0.043586") + mean = float("-0.000155276") + std = float("0.00261469") data = None @@ -5326,10 +5326,10 @@ class Program_weight_tensor_parameter_484: name = "parameter_484" shape = [192] dtype = "float32" - min_val = float("-1.93294") - max_val = float("-0.217802") - mean = float("-1.1784") - std = float("0.32417") + min_val = float("-1.86947") + max_val = float("-0.187775") + mean = float("-1.16389") + std = float("0.325632") data = None @@ -5337,10 +5337,10 @@ class Program_weight_tensor_parameter_485: name = "parameter_485" shape = [192] dtype = "float32" - min_val = float("0.740846") - max_val = float("1.58017") - mean = float("1.10678") - std = float("0.141855") + min_val = float("0.751502") + max_val = float("1.6189") + mean = float("1.10951") + std = float("0.131899") data = None @@ -5348,10 +5348,10 @@ class Program_weight_tensor_parameter_486: name = "parameter_486" shape = [192] dtype = "float32" - min_val = float("0.0251568") - max_val = float("0.110892") - mean = float("0.053406") - std = float("0.0180815") + min_val = float("0.00675098") + max_val = float("0.0288212") + mean = float("0.0150362") + std = float("0.00493022") data = None @@ -5359,10 +5359,10 @@ class Program_weight_tensor_parameter_487: name = "parameter_487" shape = [192] dtype = "float32" - min_val = float("-1.08676") - max_val = float("0.26685") - mean = float("-0.082947") - std = float("0.142479") + min_val = float("-0.630258") + max_val = float("0.15393") + mean = float("-0.0440278") + std = float("0.0837471") data = None @@ -5370,10 +5370,10 @@ class Program_weight_tensor_parameter_488: name = "parameter_488" shape = [192, 192, 3, 3] dtype = "float32" - min_val = float("-0.0681332") - max_val = float("0.0849403") - mean = float("-0.000286395") - std = float("0.00425012") + min_val = float("-0.0520183") + max_val = float("0.060427") + mean = float("-0.000134748") + std = float("0.00285085") data = None @@ -5381,10 +5381,10 @@ class Program_weight_tensor_parameter_489: name = "parameter_489" shape = [192] dtype = "float32" - min_val = float("-2.8666") - max_val = float("1.61594") - mean = float("-0.0521098") - std = float("0.754176") + min_val = float("-2.8152") + max_val = float("1.61438") + mean = float("-0.0255439") + std = float("0.761416") data = None @@ -5392,10 +5392,10 @@ class Program_weight_tensor_parameter_490: name = "parameter_490" shape = [192] dtype = "float32" - min_val = float("0.385687") - max_val = float("2.01902") - mean = float("0.957509") - std = float("0.228151") + min_val = float("0.478669") + max_val = float("2.07816") + mean = float("0.879851") + std = float("0.224413") data = None @@ -5403,10 +5403,10 @@ class Program_weight_tensor_parameter_491: name = "parameter_491" shape = [192] dtype = "float32" - min_val = float("0.0120072") - max_val = float("0.108766") - mean = float("0.0349151") - std = float("0.0150818") + min_val = float("0.00476784") + max_val = float("0.0318025") + mean = float("0.0116175") + std = float("0.00470038") data = None @@ -5414,10 +5414,10 @@ class Program_weight_tensor_parameter_492: name = "parameter_492" shape = [192] dtype = "float32" - min_val = float("-0.256396") - max_val = float("0.483456") - mean = float("-0.0666677") - std = float("0.0824818") + min_val = float("-0.131857") + max_val = float("0.222727") + mean = float("-0.026891") + std = float("0.042331") data = None @@ -5425,10 +5425,10 @@ class Program_weight_tensor_parameter_493: name = "parameter_493" shape = [192, 384, 1, 1] dtype = "float32" - min_val = float("-0.115145") - max_val = float("0.117107") - mean = float("-0.000804966") - std = float("0.00946446") + min_val = float("-0.0777914") + max_val = float("0.0808518") + mean = float("-0.000351302") + std = float("0.00605248") data = None @@ -5436,10 +5436,10 @@ class Program_weight_tensor_parameter_494: name = "parameter_494" shape = [192] dtype = "float32" - min_val = float("-2.99057") - max_val = float("1.19128") - mean = float("0.0645638") - std = float("0.651107") + min_val = float("-2.91406") + max_val = float("2.11419") + mean = float("0.103692") + std = float("0.66786") data = None @@ -5447,10 +5447,10 @@ class Program_weight_tensor_parameter_495: name = "parameter_495" shape = [192] dtype = "float32" - min_val = float("0.897146") - max_val = float("5.48142") - mean = float("1.93592") - std = float("0.914038") + min_val = float("0.856639") + max_val = float("5.70993") + mean = float("1.92505") + std = float("0.968671") data = None @@ -5458,10 +5458,10 @@ class Program_weight_tensor_parameter_496: name = "parameter_496" shape = [192] dtype = "float32" - min_val = float("0.00686488") - max_val = float("0.0692764") - mean = float("0.0235044") - std = float("0.0082593") + min_val = float("0.00257245") + max_val = float("0.0283236") + mean = float("0.00901804") + std = float("0.00367608") data = None @@ -5469,10 +5469,10 @@ class Program_weight_tensor_parameter_497: name = "parameter_497" shape = [192] dtype = "float32" - min_val = float("-0.189901") - max_val = float("0.13753") - mean = float("-0.032472") - std = float("0.0640675") + min_val = float("-0.109946") + max_val = float("0.115643") + mean = float("-0.0160117") + std = float("0.0391476") data = None @@ -5480,10 +5480,10 @@ class Program_weight_tensor_parameter_498: name = "parameter_498" shape = [192, 384, 1, 1] dtype = "float32" - min_val = float("-0.0766857") - max_val = float("0.113871") - mean = float("-0.00064456") - std = float("0.00873627") + min_val = float("-0.0685027") + max_val = float("0.128425") + mean = float("-0.000355146") + std = float("0.0056262") data = None @@ -5491,10 +5491,10 @@ class Program_weight_tensor_parameter_499: name = "parameter_499" shape = [384] dtype = "float32" - min_val = float("-2.88369") - max_val = float("1.29531") - mean = float("-0.324976") - std = float("0.576845") + min_val = float("-2.9274") + max_val = float("1.33653") + mean = float("-0.313448") + std = float("0.57221") data = None @@ -5502,10 +5502,10 @@ class Program_weight_tensor_parameter_500: name = "parameter_500" shape = [384] dtype = "float32" - min_val = float("0.63927") - max_val = float("2.42129") - mean = float("1.16101") - std = float("0.261198") + min_val = float("0.700648") + max_val = float("2.45294") + mean = float("1.1469") + std = float("0.257954") data = None @@ -5513,10 +5513,10 @@ class Program_weight_tensor_parameter_501: name = "parameter_501" shape = [384] dtype = "float32" - min_val = float("0.0160522") - max_val = float("0.14684") - mean = float("0.035732") - std = float("0.018691") + min_val = float("0.00449195") + max_val = float("0.0530031") + mean = float("0.0124585") + std = float("0.00683387") data = None @@ -5524,10 +5524,10 @@ class Program_weight_tensor_parameter_502: name = "parameter_502" shape = [384] dtype = "float32" - min_val = float("-0.444625") - max_val = float("0.224227") - mean = float("0.0242203") - std = float("0.0770511") + min_val = float("-0.1972") + max_val = float("0.152797") + mean = float("0.0131924") + std = float("0.044096") data = None @@ -5535,10 +5535,10 @@ class Program_weight_tensor_parameter_503: name = "parameter_503" shape = [384, 256, 3, 3] dtype = "float32" - min_val = float("-0.0733418") - max_val = float("0.082808") - mean = float("-0.000120386") - std = float("0.00455694") + min_val = float("-0.0546488") + max_val = float("0.0575398") + mean = float("-6.44059e-05") + std = float("0.00299774") data = None @@ -5546,10 +5546,10 @@ class Program_weight_tensor_parameter_504: name = "parameter_504" shape = [256] dtype = "float32" - min_val = float("-2.17306") - max_val = float("1.35014") - mean = float("-0.983635") - std = float("0.560538") + min_val = float("-2.08086") + max_val = float("1.23876") + mean = float("-0.929492") + std = float("0.560306") data = None @@ -5557,10 +5557,10 @@ class Program_weight_tensor_parameter_505: name = "parameter_505" shape = [256] dtype = "float32" - min_val = float("0.525409") - max_val = float("1.72142") - mean = float("1.09457") - std = float("0.181495") + min_val = float("0.460814") + max_val = float("1.60591") + mean = float("1.03747") + std = float("0.187077") data = None @@ -5568,10 +5568,10 @@ class Program_weight_tensor_parameter_506: name = "parameter_506" shape = [256] dtype = "float32" - min_val = float("0.00327494") - max_val = float("0.0357364") - mean = float("0.00983111") - std = float("0.00444547") + min_val = float("0.000450162") + max_val = float("0.0138338") + mean = float("0.00234151") + std = float("0.00124114") data = None @@ -5579,10 +5579,10 @@ class Program_weight_tensor_parameter_507: name = "parameter_507" shape = [256] dtype = "float32" - min_val = float("-0.281973") - max_val = float("0.212193") - mean = float("-0.0584459") - std = float("0.0749526") + min_val = float("-0.15446") + max_val = float("0.117599") + mean = float("-0.0301123") + std = float("0.0510926") data = None @@ -5590,10 +5590,10 @@ class Program_weight_tensor_parameter_508: name = "parameter_508" shape = [256, 192, 1, 1] dtype = "float32" - min_val = float("-0.228144") - max_val = float("0.18937") - mean = float("-0.000934833") - std = float("0.0150912") + min_val = float("-0.215021") + max_val = float("0.160071") + mean = float("-0.000576881") + std = float("0.00999512") data = None @@ -5601,10 +5601,10 @@ class Program_weight_tensor_parameter_509: name = "parameter_509" shape = [192] dtype = "float32" - min_val = float("-0.0160867") - max_val = float("0.00291467") - mean = float("-0.00513708") - std = float("0.00438686") + min_val = float("-0.0144925") + max_val = float("0.00179001") + mean = float("-0.00408812") + std = float("0.00285198") data = None @@ -5612,10 +5612,10 @@ class Program_weight_tensor_parameter_510: name = "parameter_510" shape = [192, 192, 1, 1] dtype = "float32" - min_val = float("-0.301238") - max_val = float("0.279827") - mean = float("-0.0036602") - std = float("0.0109363") + min_val = float("-0.536113") + max_val = float("0.170091") + mean = float("-0.00352777") + std = float("0.00872308") data = None @@ -5623,10 +5623,10 @@ class Program_weight_tensor_parameter_511: name = "parameter_511" shape = [96] dtype = "float32" - min_val = float("-1.88507") - max_val = float("0.408619") - mean = float("-0.264055") - std = float("0.417878") + min_val = float("-1.89842") + max_val = float("0.649199") + mean = float("-0.163374") + std = float("0.446007") data = None @@ -5634,10 +5634,10 @@ class Program_weight_tensor_parameter_512: name = "parameter_512" shape = [96] dtype = "float32" - min_val = float("0.075817") - max_val = float("3.31686") - mean = float("0.585907") - std = float("0.696242") + min_val = float("0.118793") + max_val = float("3.45032") + mean = float("0.648909") + std = float("0.709122") data = None @@ -5645,10 +5645,10 @@ class Program_weight_tensor_parameter_513: name = "parameter_513" shape = [96] dtype = "float32" - min_val = float("9.73872e-05") - max_val = float("0.00192041") - mean = float("0.000475576") - std = float("0.00034983") + min_val = float("3.48017e-05") + max_val = float("0.00129092") + mean = float("0.000353523") + std = float("0.000270336") data = None @@ -5656,10 +5656,10 @@ class Program_weight_tensor_parameter_514: name = "parameter_514" shape = [96] dtype = "float32" - min_val = float("-0.0602378") - max_val = float("0.0724714") - mean = float("0.00884973") - std = float("0.025982") + min_val = float("-0.0383633") + max_val = float("0.0410815") + mean = float("0.00326046") + std = float("0.0151102") data = None @@ -5667,10 +5667,10 @@ class Program_weight_tensor_parameter_515: name = "parameter_515" shape = [96, 96, 1, 1] dtype = "float32" - min_val = float("-0.0575483") - max_val = float("0.0998151") - mean = float("-0.00071542") - std = float("0.00806398") + min_val = float("-0.0396602") + max_val = float("0.0712072") + mean = float("-0.000415922") + std = float("0.00589655") data = None @@ -5678,10 +5678,10 @@ class Program_weight_tensor_parameter_516: name = "parameter_516" shape = [96] dtype = "float32" - min_val = float("-1.88507") - max_val = float("0.408619") - mean = float("-0.264055") - std = float("0.417878") + min_val = float("-1.89842") + max_val = float("0.649199") + mean = float("-0.163374") + std = float("0.446007") data = None @@ -5689,10 +5689,10 @@ class Program_weight_tensor_parameter_517: name = "parameter_517" shape = [96] dtype = "float32" - min_val = float("0.328165") - max_val = float("5.49297") - mean = float("1.04367") - std = float("0.907579") + min_val = float("0.27629") + max_val = float("5.76398") + mean = float("1.11237") + std = float("0.941275") data = None @@ -5700,10 +5700,10 @@ class Program_weight_tensor_parameter_518: name = "parameter_518" shape = [96] dtype = "float32" - min_val = float("0.00063499") - max_val = float("0.0106201") - mean = float("0.00347763") - std = float("0.00169617") + min_val = float("0.00035511") + max_val = float("0.00718247") + mean = float("0.0023089") + std = float("0.00133833") data = None @@ -5711,10 +5711,10 @@ class Program_weight_tensor_parameter_519: name = "parameter_519" shape = [96] dtype = "float32" - min_val = float("-0.207875") - max_val = float("0.204035") - mean = float("0.0224973") - std = float("0.0704149") + min_val = float("-0.0920791") + max_val = float("0.0842519") + mean = float("0.0093933") + std = float("0.0381936") data = None @@ -5722,10 +5722,10 @@ class Program_weight_tensor_parameter_520: name = "parameter_520" shape = [96, 96, 3, 3] dtype = "float32" - min_val = float("-0.0433834") - max_val = float("0.0664612") - mean = float("-0.000206276") - std = float("0.00585214") + min_val = float("-0.0346361") + max_val = float("0.0508237") + mean = float("-0.000158614") + std = float("0.00427877") data = None @@ -5733,10 +5733,10 @@ class Program_weight_tensor_parameter_521: name = "parameter_521" shape = [96] dtype = "float32" - min_val = float("-2.41446") - max_val = float("-0.0351507") - mean = float("-1.27023") - std = float("0.444263") + min_val = float("-2.47268") + max_val = float("-0.0395751") + mean = float("-1.25288") + std = float("0.438506") data = None @@ -5744,10 +5744,10 @@ class Program_weight_tensor_parameter_522: name = "parameter_522" shape = [96] dtype = "float32" - min_val = float("0.478655") - max_val = float("1.73921") - mean = float("0.924149") - std = float("0.17682") + min_val = float("0.484514") + max_val = float("1.73163") + mean = float("0.919464") + std = float("0.175598") data = None @@ -5755,10 +5755,10 @@ class Program_weight_tensor_parameter_523: name = "parameter_523" shape = [96] dtype = "float32" - min_val = float("0.0485854") - max_val = float("0.347665") - mean = float("0.116634") - std = float("0.0397481") + min_val = float("0.0182282") + max_val = float("0.116164") + mean = float("0.0425678") + std = float("0.017739") data = None @@ -5766,10 +5766,10 @@ class Program_weight_tensor_parameter_524: name = "parameter_524" shape = [96] dtype = "float32" - min_val = float("-4.59321") - max_val = float("0.829715") - mean = float("-0.207516") - std = float("0.572839") + min_val = float("-2.55824") + max_val = float("1.13418") + mean = float("-0.120129") + std = float("0.381008") data = None @@ -5777,10 +5777,10 @@ class Program_weight_tensor_parameter_525: name = "parameter_525" shape = [96, 96, 3, 3] dtype = "float32" - min_val = float("-0.143913") - max_val = float("0.105902") - mean = float("-0.000391481") - std = float("0.0073659") + min_val = float("-0.127128") + max_val = float("0.0861504") + mean = float("-0.000218704") + std = float("0.00529549") data = None @@ -5788,10 +5788,10 @@ class Program_weight_tensor_parameter_526: name = "parameter_526" shape = [96] dtype = "float32" - min_val = float("-1.40027") - max_val = float("0.433022") - mean = float("-0.187531") - std = float("0.338824") + min_val = float("-1.35987") + max_val = float("0.614259") + mean = float("-0.108904") + std = float("0.363421") data = None @@ -5799,10 +5799,10 @@ class Program_weight_tensor_parameter_527: name = "parameter_527" shape = [96] dtype = "float32" - min_val = float("0.00378303") - max_val = float("1.87179") - mean = float("0.411102") - std = float("0.370104") + min_val = float("0.00955654") + max_val = float("1.85224") + mean = float("0.454916") + std = float("0.359478") data = None @@ -5810,10 +5810,10 @@ class Program_weight_tensor_parameter_528: name = "parameter_528" shape = [96] dtype = "float32" - min_val = float("7.53826e-06") - max_val = float("0.00354419") - mean = float("0.000777177") - std = float("0.000707669") + min_val = float("4.00506e-06") + max_val = float("0.00234563") + mean = float("0.000538477") + std = float("0.000453658") data = None @@ -5821,10 +5821,10 @@ class Program_weight_tensor_parameter_529: name = "parameter_529" shape = [96] dtype = "float32" - min_val = float("-0.0550316") - max_val = float("0.0548868") - mean = float("0.00763059") - std = float("0.0197358") + min_val = float("-0.0357937") + max_val = float("0.0432553") + mean = float("0.00761236") + std = float("0.014379") data = None @@ -5832,10 +5832,10 @@ class Program_weight_tensor_parameter_530: name = "parameter_530" shape = [96, 96, 1, 1] dtype = "float32" - min_val = float("-0.0516744") - max_val = float("0.091015") - mean = float("-0.000705691") - std = float("0.00756381") + min_val = float("-0.0375291") + max_val = float("0.0336522") + mean = float("-0.000679886") + std = float("0.00538009") data = None @@ -5843,10 +5843,10 @@ class Program_weight_tensor_parameter_531: name = "parameter_531" shape = [96] dtype = "float32" - min_val = float("-1.40027") - max_val = float("0.433022") - mean = float("-0.187531") - std = float("0.338824") + min_val = float("-1.35987") + max_val = float("0.614259") + mean = float("-0.108904") + std = float("0.363421") data = None @@ -5854,10 +5854,10 @@ class Program_weight_tensor_parameter_532: name = "parameter_532" shape = [96] dtype = "float32" - min_val = float("0.340683") - max_val = float("2.2745") - mean = float("0.862345") - std = float("0.431657") + min_val = float("0.381916") + max_val = float("2.31118") + mean = float("0.904033") + std = float("0.422742") data = None @@ -5865,10 +5865,10 @@ class Program_weight_tensor_parameter_533: name = "parameter_533" shape = [96] dtype = "float32" - min_val = float("0.00255349") - max_val = float("0.0155132") - mean = float("0.00548608") - std = float("0.00239986") + min_val = float("0.00152583") + max_val = float("0.0130956") + mean = float("0.00360983") + std = float("0.00182045") data = None @@ -5876,10 +5876,10 @@ class Program_weight_tensor_parameter_534: name = "parameter_534" shape = [96] dtype = "float32" - min_val = float("-0.17823") - max_val = float("0.137982") - mean = float("0.0322525") - std = float("0.046662") + min_val = float("-0.0602139") + max_val = float("0.0926286") + mean = float("0.0240804") + std = float("0.0293413") data = None @@ -5887,10 +5887,10 @@ class Program_weight_tensor_parameter_535: name = "parameter_535" shape = [96, 96, 3, 3] dtype = "float32" - min_val = float("-0.0674981") - max_val = float("0.0570277") - mean = float("-0.00032752") - std = float("0.006034") + min_val = float("-0.0526704") + max_val = float("0.0617496") + mean = float("-0.00024044") + std = float("0.00425527") data = None @@ -5898,10 +5898,10 @@ class Program_weight_tensor_parameter_536: name = "parameter_536" shape = [96] dtype = "float32" - min_val = float("-3.35834") - max_val = float("0.28772") - mean = float("-1.21666") - std = float("0.561104") + min_val = float("-3.30872") + max_val = float("0.356571") + mean = float("-1.21876") + std = float("0.556947") data = None @@ -5909,10 +5909,10 @@ class Program_weight_tensor_parameter_537: name = "parameter_537" shape = [96] dtype = "float32" - min_val = float("0.416818") - max_val = float("1.89857") - mean = float("1.01497") - std = float("0.241794") + min_val = float("0.42521") + max_val = float("1.92533") + mean = float("1.00705") + std = float("0.236551") data = None @@ -5920,10 +5920,10 @@ class Program_weight_tensor_parameter_538: name = "parameter_538" shape = [96] dtype = "float32" - min_val = float("0.0432234") - max_val = float("0.174066") - mean = float("0.0777276") - std = float("0.0209529") + min_val = float("0.0150869") + max_val = float("0.0812702") + mean = float("0.0265418") + std = float("0.00908418") data = None @@ -5931,10 +5931,10 @@ class Program_weight_tensor_parameter_539: name = "parameter_539" shape = [96] dtype = "float32" - min_val = float("-1.16964") - max_val = float("0.598494") - mean = float("-0.139571") - std = float("0.296264") + min_val = float("-0.89497") + max_val = float("0.555961") + mean = float("-0.0363605") + std = float("0.206065") data = None @@ -5942,10 +5942,10 @@ class Program_weight_tensor_parameter_540: name = "parameter_540" shape = [96, 96, 3, 3] dtype = "float32" - min_val = float("-0.159737") - max_val = float("0.152684") - mean = float("-0.000521222") - std = float("0.00746838") + min_val = float("-0.132978") + max_val = float("0.135745") + mean = float("-0.000279374") + std = float("0.00518152") data = None @@ -5953,10 +5953,10 @@ class Program_weight_tensor_parameter_541: name = "parameter_541" shape = [96] dtype = "float32" - min_val = float("-1.27259") - max_val = float("0.580805") - mean = float("-0.160154") - std = float("0.285669") + min_val = float("-1.22354") + max_val = float("0.655745") + mean = float("-0.0920703") + std = float("0.30607") data = None @@ -5964,10 +5964,10 @@ class Program_weight_tensor_parameter_542: name = "parameter_542" shape = [96] dtype = "float32" - min_val = float("1.78897e-05") - max_val = float("1.24216") - mean = float("0.291835") - std = float("0.195371") + min_val = float("0.0320682") + max_val = float("1.28684") + mean = float("0.312972") + std = float("0.193547") data = None @@ -5975,10 +5975,10 @@ class Program_weight_tensor_parameter_543: name = "parameter_543" shape = [96] dtype = "float32" - min_val = float("9.8845e-11") - max_val = float("0.00345974") - mean = float("0.000742287") - std = float("0.000570769") + min_val = float("1.25242e-05") + max_val = float("0.00304423") + mean = float("0.000494706") + std = float("0.000483804") data = None @@ -5986,10 +5986,10 @@ class Program_weight_tensor_parameter_544: name = "parameter_544" shape = [96] dtype = "float32" - min_val = float("-0.0514278") - max_val = float("0.0447507") - mean = float("0.00739843") - std = float("0.0166169") + min_val = float("-0.0280188") + max_val = float("0.0438614") + mean = float("0.0066925") + std = float("0.0132689") data = None @@ -5997,10 +5997,10 @@ class Program_weight_tensor_parameter_545: name = "parameter_545" shape = [96, 96, 1, 1] dtype = "float32" - min_val = float("-0.0512371") - max_val = float("0.058203") - mean = float("-0.000637488") - std = float("0.00765631") + min_val = float("-0.0359891") + max_val = float("0.037578") + mean = float("-0.000549529") + std = float("0.00543459") data = None @@ -6008,10 +6008,10 @@ class Program_weight_tensor_parameter_546: name = "parameter_546" shape = [96] dtype = "float32" - min_val = float("-1.27259") - max_val = float("0.580805") - mean = float("-0.160154") - std = float("0.285669") + min_val = float("-1.22354") + max_val = float("0.655745") + mean = float("-0.0920703") + std = float("0.30607") data = None @@ -6019,10 +6019,10 @@ class Program_weight_tensor_parameter_547: name = "parameter_547" shape = [96] dtype = "float32" - min_val = float("0.228783") - max_val = float("1.64106") - mean = float("0.722497") - std = float("0.264866") + min_val = float("0.321517") + max_val = float("1.60435") + mean = float("0.742508") + std = float("0.256539") data = None @@ -6030,10 +6030,10 @@ class Program_weight_tensor_parameter_548: name = "parameter_548" shape = [96] dtype = "float32" - min_val = float("0.00213447") - max_val = float("0.0186589") - mean = float("0.00667835") - std = float("0.00312485") + min_val = float("0.0010948") + max_val = float("0.0109667") + mean = float("0.00391925") + std = float("0.00180583") data = None @@ -6041,10 +6041,10 @@ class Program_weight_tensor_parameter_549: name = "parameter_549" shape = [96] dtype = "float32" - min_val = float("-0.0988952") - max_val = float("0.152065") - mean = float("0.0248752") - std = float("0.0430747") + min_val = float("-0.0465221") + max_val = float("0.11619") + mean = float("0.0224584") + std = float("0.0290944") data = None @@ -6052,10 +6052,10 @@ class Program_weight_tensor_parameter_550: name = "parameter_550" shape = [96, 96, 3, 3] dtype = "float32" - min_val = float("-0.0680949") - max_val = float("0.071738") - mean = float("-0.000264866") - std = float("0.00622902") + min_val = float("-0.0464603") + max_val = float("0.0403376") + mean = float("-0.000230737") + std = float("0.00435378") data = None @@ -6063,10 +6063,10 @@ class Program_weight_tensor_parameter_551: name = "parameter_551" shape = [96] dtype = "float32" - min_val = float("-3.6614") - max_val = float("0.205968") - mean = float("-1.16919") - std = float("0.589988") + min_val = float("-3.56355") + max_val = float("0.31361") + mean = float("-1.16302") + std = float("0.578576") data = None @@ -6074,10 +6074,10 @@ class Program_weight_tensor_parameter_552: name = "parameter_552" shape = [96] dtype = "float32" - min_val = float("0.512145") - max_val = float("2.1309") - mean = float("1.02688") - std = float("0.241984") + min_val = float("0.516248") + max_val = float("2.22549") + mean = float("1.01872") + std = float("0.244167") data = None @@ -6085,10 +6085,10 @@ class Program_weight_tensor_parameter_553: name = "parameter_553" shape = [96] dtype = "float32" - min_val = float("0.0345738") - max_val = float("0.101406") - mean = float("0.0616151") - std = float("0.0129723") + min_val = float("0.00947191") + max_val = float("0.0394704") + mean = float("0.019952") + std = float("0.00531941") data = None @@ -6096,10 +6096,10 @@ class Program_weight_tensor_parameter_554: name = "parameter_554" shape = [96] dtype = "float32" - min_val = float("-1.10731") - max_val = float("0.608096") - mean = float("-0.0544066") - std = float("0.26029") + min_val = float("-0.543762") + max_val = float("0.453745") + mean = float("-0.00190825") + std = float("0.159635") data = None @@ -6107,10 +6107,10 @@ class Program_weight_tensor_parameter_555: name = "parameter_555" shape = [96, 96, 3, 3] dtype = "float32" - min_val = float("-0.107257") - max_val = float("0.139746") - mean = float("-0.000428188") - std = float("0.00759317") + min_val = float("-0.104936") + max_val = float("0.123929") + mean = float("-0.000230516") + std = float("0.00527145") data = None @@ -6118,10 +6118,10 @@ class Program_weight_tensor_parameter_556: name = "parameter_556" shape = [96] dtype = "float32" - min_val = float("-0.931411") - max_val = float("0.406494") - mean = float("-0.216229") - std = float("0.275567") + min_val = float("-0.914544") + max_val = float("0.549399") + mean = float("-0.147604") + std = float("0.291572") data = None @@ -6129,10 +6129,10 @@ class Program_weight_tensor_parameter_557: name = "parameter_557" shape = [96] dtype = "float32" - min_val = float("2.7851e-05") - max_val = float("1.34666") - mean = float("0.301153") - std = float("0.211126") + min_val = float("0.0335844") + max_val = float("1.37871") + mean = float("0.313952") + std = float("0.205577") data = None @@ -6140,10 +6140,10 @@ class Program_weight_tensor_parameter_558: name = "parameter_558" shape = [96] dtype = "float32" - min_val = float("2.44855e-10") - max_val = float("0.00276123") - mean = float("0.000787333") - std = float("0.000529563") + min_val = float("2.17369e-05") + max_val = float("0.00299857") + mean = float("0.000531722") + std = float("0.000408053") data = None @@ -6151,10 +6151,10 @@ class Program_weight_tensor_parameter_559: name = "parameter_559" shape = [96] dtype = "float32" - min_val = float("-0.0511623") - max_val = float("0.0677894") - mean = float("0.00865361") - std = float("0.0199128") + min_val = float("-0.0202019") + max_val = float("0.0401913") + mean = float("0.00828191") + std = float("0.0127605") data = None @@ -6162,10 +6162,10 @@ class Program_weight_tensor_parameter_560: name = "parameter_560" shape = [96, 96, 1, 1] dtype = "float32" - min_val = float("-0.0714816") - max_val = float("0.0528174") - mean = float("-0.00080202") - std = float("0.00805684") + min_val = float("-0.0457306") + max_val = float("0.0364814") + mean = float("-0.00067745") + std = float("0.00557391") data = None @@ -6173,10 +6173,10 @@ class Program_weight_tensor_parameter_561: name = "parameter_561" shape = [96] dtype = "float32" - min_val = float("-0.931411") - max_val = float("0.406494") - mean = float("-0.216229") - std = float("0.275567") + min_val = float("-0.914544") + max_val = float("0.549399") + mean = float("-0.147604") + std = float("0.291572") data = None @@ -6184,10 +6184,10 @@ class Program_weight_tensor_parameter_562: name = "parameter_562" shape = [96] dtype = "float32" - min_val = float("0.141158") - max_val = float("1.7851") - mean = float("0.707756") - std = float("0.28548") + min_val = float("0.141171") + max_val = float("1.73846") + mean = float("0.702424") + std = float("0.28575") data = None @@ -6195,10 +6195,10 @@ class Program_weight_tensor_parameter_563: name = "parameter_563" shape = [96] dtype = "float32" - min_val = float("0.0020027") - max_val = float("0.0193217") - mean = float("0.00726263") - std = float("0.00306558") + min_val = float("0.000436992") + max_val = float("0.0116807") + mean = float("0.00437206") + std = float("0.00181053") data = None @@ -6206,10 +6206,10 @@ class Program_weight_tensor_parameter_564: name = "parameter_564" shape = [96] dtype = "float32" - min_val = float("-0.118899") - max_val = float("0.18858") - mean = float("0.0361517") - std = float("0.0485395") + min_val = float("-0.0611763") + max_val = float("0.0849162") + mean = float("0.0256109") + std = float("0.0259056") data = None @@ -6217,10 +6217,10 @@ class Program_weight_tensor_parameter_565: name = "parameter_565" shape = [96, 96, 3, 3] dtype = "float32" - min_val = float("-0.0623542") - max_val = float("0.0588107") - mean = float("-0.000362252") - std = float("0.00634091") + min_val = float("-0.0711506") + max_val = float("0.0585904") + mean = float("-0.000252199") + std = float("0.00442523") data = None @@ -6228,10 +6228,10 @@ class Program_weight_tensor_parameter_566: name = "parameter_566" shape = [96] dtype = "float32" - min_val = float("-3.19799") - max_val = float("0.0441781") - mean = float("-1.1066") - std = float("0.512528") + min_val = float("-2.62308") + max_val = float("0.0475886") + mean = float("-1.09223") + std = float("0.492262") data = None @@ -6239,10 +6239,10 @@ class Program_weight_tensor_parameter_567: name = "parameter_567" shape = [96] dtype = "float32" - min_val = float("0.556846") - max_val = float("1.70748") - mean = float("0.988099") - std = float("0.180513") + min_val = float("0.546141") + max_val = float("1.74737") + mean = float("0.990116") + std = float("0.183616") data = None @@ -6250,10 +6250,10 @@ class Program_weight_tensor_parameter_568: name = "parameter_568" shape = [96] dtype = "float32" - min_val = float("0.0248385") - max_val = float("0.353521") - mean = float("0.0516013") - std = float("0.0336755") + min_val = float("0.00780535") + max_val = float("0.0286571") + mean = float("0.0150858") + std = float("0.00396297") data = None @@ -6261,10 +6261,10 @@ class Program_weight_tensor_parameter_569: name = "parameter_569" shape = [96] dtype = "float32" - min_val = float("-3.41151") - max_val = float("0.407488") - mean = float("-0.0719755") - std = float("0.386288") + min_val = float("-0.397648") + max_val = float("0.372501") + mean = float("-0.0250664") + std = float("0.127281") data = None @@ -6272,10 +6272,10 @@ class Program_weight_tensor_parameter_570: name = "parameter_570" shape = [96, 96, 3, 3] dtype = "float32" - min_val = float("-0.0620146") - max_val = float("0.0633394") - mean = float("-0.000448888") - std = float("0.00731171") + min_val = float("-0.0589674") + max_val = float("0.10126") + mean = float("-0.000249985") + std = float("0.00515185") data = None @@ -6283,10 +6283,10 @@ class Program_weight_tensor_parameter_571: name = "parameter_571" shape = [96] dtype = "float32" - min_val = float("-0.974533") - max_val = float("0.654029") - mean = float("-0.172382") - std = float("0.268825") + min_val = float("-0.982631") + max_val = float("0.556017") + mean = float("-0.128051") + std = float("0.290117") data = None @@ -6294,10 +6294,10 @@ class Program_weight_tensor_parameter_572: name = "parameter_572" shape = [96] dtype = "float32" - min_val = float("0.0445082") - max_val = float("1.22303") - mean = float("0.290206") - std = float("0.187352") + min_val = float("0.0693376") + max_val = float("1.15446") + mean = float("0.273121") + std = float("0.164903") data = None @@ -6305,10 +6305,10 @@ class Program_weight_tensor_parameter_573: name = "parameter_573" shape = [96] dtype = "float32" - min_val = float("9.51178e-05") - max_val = float("0.00465114") - mean = float("0.00132027") - std = float("0.00087515") + min_val = float("6.83451e-05") + max_val = float("0.00211645") + mean = float("0.000639177") + std = float("0.000387327") data = None @@ -6316,10 +6316,10 @@ class Program_weight_tensor_parameter_574: name = "parameter_574" shape = [96] dtype = "float32" - min_val = float("-0.0394857") - max_val = float("0.0556089") - mean = float("0.00847204") - std = float("0.0204605") + min_val = float("-0.0410594") + max_val = float("0.0527878") + mean = float("0.00475022") + std = float("0.0156944") data = None @@ -6327,10 +6327,10 @@ class Program_weight_tensor_parameter_575: name = "parameter_575" shape = [96, 96, 1, 1] dtype = "float32" - min_val = float("-0.0591107") - max_val = float("0.084198") - mean = float("-0.000990773") - std = float("0.00883492") + min_val = float("-0.0522607") + max_val = float("0.0599666") + mean = float("-0.000547462") + std = float("0.00617448") data = None @@ -6338,10 +6338,10 @@ class Program_weight_tensor_parameter_576: name = "parameter_576" shape = [96] dtype = "float32" - min_val = float("-0.974533") - max_val = float("0.654028") - mean = float("-0.172382") - std = float("0.268825") + min_val = float("-0.982631") + max_val = float("0.556015") + mean = float("-0.128051") + std = float("0.290117") data = None @@ -6349,10 +6349,10 @@ class Program_weight_tensor_parameter_577: name = "parameter_577" shape = [96] dtype = "float32" - min_val = float("0.207034") - max_val = float("1.47417") - mean = float("0.603427") - std = float("0.233027") + min_val = float("0.179543") + max_val = float("1.52891") + mean = float("0.577267") + std = float("0.230524") data = None @@ -6360,10 +6360,10 @@ class Program_weight_tensor_parameter_578: name = "parameter_578" shape = [96] dtype = "float32" - min_val = float("0.00207586") - max_val = float("0.0251574") - mean = float("0.0107863") - std = float("0.00456461") + min_val = float("0.00185316") + max_val = float("0.0151893") + mean = float("0.0053405") + std = float("0.00217932") data = None @@ -6371,10 +6371,10 @@ class Program_weight_tensor_parameter_579: name = "parameter_579" shape = [96] dtype = "float32" - min_val = float("-0.110009") - max_val = float("0.147658") - mean = float("0.0285999") - std = float("0.050264") + min_val = float("-0.062003") + max_val = float("0.105803") + mean = float("0.0194326") + std = float("0.0299898") data = None @@ -6382,10 +6382,10 @@ class Program_weight_tensor_parameter_580: name = "parameter_580" shape = [96, 96, 3, 3] dtype = "float32" - min_val = float("-0.070149") - max_val = float("0.0470186") - mean = float("-0.000362618") - std = float("0.00621856") + min_val = float("-0.052588") + max_val = float("0.0401134") + mean = float("-0.000216222") + std = float("0.00433571") data = None @@ -6393,10 +6393,10 @@ class Program_weight_tensor_parameter_581: name = "parameter_581" shape = [96] dtype = "float32" - min_val = float("-3.53234") - max_val = float("0.173836") - mean = float("-1.04711") - std = float("0.571399") + min_val = float("-3.34591") + max_val = float("0.217567") + mean = float("-1.0205") + std = float("0.542044") data = None @@ -6404,10 +6404,10 @@ class Program_weight_tensor_parameter_582: name = "parameter_582" shape = [96] dtype = "float32" - min_val = float("0.597874") - max_val = float("2.39872") - mean = float("1.0552") - std = float("0.205323") + min_val = float("0.541294") + max_val = float("2.73375") + mean = float("1.0434") + std = float("0.234097") data = None @@ -6415,10 +6415,10 @@ class Program_weight_tensor_parameter_583: name = "parameter_583" shape = [96] dtype = "float32" - min_val = float("0.0185202") - max_val = float("0.112167") - mean = float("0.0429341") - std = float("0.0167429") + min_val = float("0.00623622") + max_val = float("0.0298161") + mean = float("0.0123004") + std = float("0.00410441") data = None @@ -6426,10 +6426,10 @@ class Program_weight_tensor_parameter_584: name = "parameter_584" shape = [96] dtype = "float32" - min_val = float("-0.443067") - max_val = float("0.447293") - mean = float("-0.0529377") - std = float("0.167364") + min_val = float("-0.316894") + max_val = float("0.213951") + mean = float("-0.0247624") + std = float("0.0998134") data = None @@ -6437,10 +6437,10 @@ class Program_weight_tensor_parameter_585: name = "parameter_585" shape = [96, 96, 3, 3] dtype = "float32" - min_val = float("-0.063488") - max_val = float("0.083032") - mean = float("-0.000377604") - std = float("0.00741283") + min_val = float("-0.0787901") + max_val = float("0.0721749") + mean = float("-0.000275213") + std = float("0.00519315") data = None @@ -6448,10 +6448,10 @@ class Program_weight_tensor_parameter_586: name = "parameter_586" shape = [96] dtype = "float32" - min_val = float("-0.693542") - max_val = float("0.601255") - mean = float("-0.0971658") - std = float("0.280506") + min_val = float("-0.603406") + max_val = float("0.46876") + mean = float("-0.0838298") + std = float("0.256426") data = None @@ -6459,10 +6459,10 @@ class Program_weight_tensor_parameter_587: name = "parameter_587" shape = [96] dtype = "float32" - min_val = float("0.0450765") - max_val = float("1.29061") - mean = float("0.303813") - std = float("0.204412") + min_val = float("0.0549309") + max_val = float("1.22997") + mean = float("0.285879") + std = float("0.196767") data = None @@ -6470,10 +6470,10 @@ class Program_weight_tensor_parameter_588: name = "parameter_588" shape = [96] dtype = "float32" - min_val = float("0.000388448") - max_val = float("0.0258625") - mean = float("0.00450419") - std = float("0.00431847") + min_val = float("0.00027077") + max_val = float("0.0173696") + mean = float("0.00296255") + std = float("0.00270399") data = None @@ -6481,10 +6481,10 @@ class Program_weight_tensor_parameter_589: name = "parameter_589" shape = [96] dtype = "float32" - min_val = float("-0.0432957") - max_val = float("0.0269978") - mean = float("-0.00076361") - std = float("0.0138503") + min_val = float("-0.0278518") + max_val = float("0.0236883") + mean = float("0.000353097") + std = float("0.00822438") data = None @@ -6492,10 +6492,10 @@ class Program_weight_tensor_parameter_590: name = "parameter_590" shape = [96, 96, 1, 1] dtype = "float32" - min_val = float("-0.106785") - max_val = float("0.0776316") - mean = float("-0.00136769") - std = float("0.0107474") + min_val = float("-0.0745539") + max_val = float("0.0558059") + mean = float("-0.000956811") + std = float("0.00693171") data = None @@ -6503,10 +6503,10 @@ class Program_weight_tensor_parameter_591: name = "parameter_591" shape = [96] dtype = "float32" - min_val = float("-0.693542") - max_val = float("0.601255") - mean = float("-0.0971658") - std = float("0.280506") + min_val = float("-0.603406") + max_val = float("0.468759") + mean = float("-0.0838298") + std = float("0.256426") data = None @@ -6514,10 +6514,10 @@ class Program_weight_tensor_parameter_592: name = "parameter_592" shape = [96] dtype = "float32" - min_val = float("0.117864") - max_val = float("1.42918") - mean = float("0.534336") - std = float("0.282789") + min_val = float("0.184847") + max_val = float("1.32269") + mean = float("0.519013") + std = float("0.258771") data = None @@ -6525,10 +6525,10 @@ class Program_weight_tensor_parameter_593: name = "parameter_593" shape = [96] dtype = "float32" - min_val = float("0.00952757") - max_val = float("0.13009") - mean = float("0.03226") - std = float("0.0210741") + min_val = float("0.00359367") + max_val = float("0.0619606") + mean = float("0.0188235") + std = float("0.00993846") data = None @@ -6536,10 +6536,10 @@ class Program_weight_tensor_parameter_594: name = "parameter_594" shape = [96] dtype = "float32" - min_val = float("-0.182934") - max_val = float("0.0815284") - mean = float("0.000193777") - std = float("0.0473769") + min_val = float("-0.0720302") + max_val = float("0.0583683") + mean = float("-0.00652554") + std = float("0.0276525") data = None @@ -6547,10 +6547,10 @@ class Program_weight_tensor_parameter_595: name = "parameter_595" shape = [96, 96, 3, 3] dtype = "float32" - min_val = float("-0.09962") - max_val = float("0.0585965") - mean = float("-0.000472593") - std = float("0.0059293") + min_val = float("-0.077949") + max_val = float("0.0416607") + mean = float("-0.000187561") + std = float("0.00433555") data = None @@ -6558,10 +6558,10 @@ class Program_weight_tensor_parameter_596: name = "parameter_596" shape = [96] dtype = "float32" - min_val = float("-2.17807") - max_val = float("0.517143") - mean = float("-0.845295") - std = float("0.487541") + min_val = float("-2.41266") + max_val = float("0.498184") + mean = float("-0.836552") + std = float("0.475182") data = None @@ -6569,10 +6569,10 @@ class Program_weight_tensor_parameter_597: name = "parameter_597" shape = [96] dtype = "float32" - min_val = float("0.772289") - max_val = float("2.3588") - mean = float("1.25972") - std = float("0.22461") + min_val = float("0.828403") + max_val = float("2.26623") + mean = float("1.25294") + std = float("0.215447") data = None @@ -6580,10 +6580,10 @@ class Program_weight_tensor_parameter_598: name = "parameter_598" shape = [96] dtype = "float32" - min_val = float("0.0176127") - max_val = float("0.188596") - mean = float("0.0443549") - std = float("0.0256738") + min_val = float("0.00371984") + max_val = float("0.0266859") + mean = float("0.00901981") + std = float("0.00402208") data = None @@ -6591,10 +6591,10 @@ class Program_weight_tensor_parameter_599: name = "parameter_599" shape = [96] dtype = "float32" - min_val = float("-0.978829") - max_val = float("0.469855") - mean = float("-0.0195263") - std = float("0.204639") + min_val = float("-0.356962") + max_val = float("0.240426") + mean = float("-0.0246764") + std = float("0.102141") data = None @@ -6602,10 +6602,10 @@ class Program_weight_tensor_parameter_600: name = "parameter_600" shape = [96, 96, 3, 3] dtype = "float32" - min_val = float("-0.191231") - max_val = float("0.182087") - mean = float("-0.000212875") - std = float("0.00772769") + min_val = float("-0.111863") + max_val = float("0.117189") + mean = float("-0.00012768") + std = float("0.00536188") data = None @@ -6613,10 +6613,10 @@ class Program_weight_tensor_parameter_601: name = "parameter_601" shape = [96] dtype = "float32" - min_val = float("-3.43026") - max_val = float("1.98535") - mean = float("0.505807") - std = float("0.879788") + min_val = float("-3.19124") + max_val = float("1.93359") + mean = float("0.508377") + std = float("0.871805") data = None @@ -6624,10 +6624,10 @@ class Program_weight_tensor_parameter_602: name = "parameter_602" shape = [96] dtype = "float32" - min_val = float("0.234711") - max_val = float("2.355") - mean = float("0.651409") - std = float("0.28777") + min_val = float("0.236398") + max_val = float("2.59722") + mean = float("0.515435") + std = float("0.323564") data = None @@ -6635,10 +6635,10 @@ class Program_weight_tensor_parameter_603: name = "parameter_603" shape = [96] dtype = "float32" - min_val = float("0.0114431") - max_val = float("0.213174") - mean = float("0.044291") - std = float("0.0324628") + min_val = float("0.00484403") + max_val = float("0.0706225") + mean = float("0.0146031") + std = float("0.0110259") data = None @@ -6646,10 +6646,10 @@ class Program_weight_tensor_parameter_604: name = "parameter_604" shape = [96] dtype = "float32" - min_val = float("-0.393192") - max_val = float("0.404316") - mean = float("-0.0345236") - std = float("0.111426") + min_val = float("-0.209383") + max_val = float("0.238176") + mean = float("-0.0128806") + std = float("0.0740322") data = None @@ -6657,10 +6657,10 @@ class Program_weight_tensor_parameter_605: name = "parameter_605" shape = [96, 192, 1, 1] dtype = "float32" - min_val = float("-0.192313") - max_val = float("0.22459") - mean = float("-0.000851966") - std = float("0.016247") + min_val = float("-0.169334") + max_val = float("0.152895") + mean = float("-0.000316404") + std = float("0.0110428") data = None @@ -6668,10 +6668,10 @@ class Program_weight_tensor_parameter_606: name = "parameter_606" shape = [96] dtype = "float32" - min_val = float("-4.88256") - max_val = float("1.51768") - mean = float("0.319321") - std = float("1.02555") + min_val = float("-4.89364") + max_val = float("1.73104") + mean = float("0.4216") + std = float("1.0546") data = None @@ -6679,10 +6679,10 @@ class Program_weight_tensor_parameter_607: name = "parameter_607" shape = [96] dtype = "float32" - min_val = float("0.584545") - max_val = float("6.91604") - mean = float("1.75143") - std = float("1.27947") + min_val = float("0.36737") + max_val = float("6.94832") + mean = float("1.69928") + std = float("1.37496") data = None @@ -6690,10 +6690,10 @@ class Program_weight_tensor_parameter_608: name = "parameter_608" shape = [96] dtype = "float32" - min_val = float("0.00474677") - max_val = float("0.161777") - mean = float("0.0349508") - std = float("0.0294242") + min_val = float("0.00220095") + max_val = float("0.0892683") + mean = float("0.0142797") + std = float("0.011982") data = None @@ -6701,10 +6701,10 @@ class Program_weight_tensor_parameter_609: name = "parameter_609" shape = [96] dtype = "float32" - min_val = float("-0.246975") - max_val = float("0.361227") - mean = float("0.00345537") - std = float("0.112749") + min_val = float("-0.162445") + max_val = float("0.294147") + mean = float("0.00715431") + std = float("0.0821284") data = None @@ -6712,10 +6712,10 @@ class Program_weight_tensor_parameter_610: name = "parameter_610" shape = [96, 192, 1, 1] dtype = "float32" - min_val = float("-0.126516") - max_val = float("0.194387") - mean = float("-0.000239444") - std = float("0.0147307") + min_val = float("-0.0897093") + max_val = float("0.18944") + mean = float("9.40014e-05") + std = float("0.0102431") data = None @@ -6723,10 +6723,10 @@ class Program_weight_tensor_parameter_611: name = "parameter_611" shape = [192] dtype = "float32" - min_val = float("-2.32829") - max_val = float("1.72494") - mean = float("-0.148208") - std = float("0.746996") + min_val = float("-2.2631") + max_val = float("1.81642") + mean = float("-0.104199") + std = float("0.765409") data = None @@ -6734,10 +6734,10 @@ class Program_weight_tensor_parameter_612: name = "parameter_612" shape = [192] dtype = "float32" - min_val = float("0.622318") - max_val = float("2.80938") - mean = float("1.10417") - std = float("0.281992") + min_val = float("0.55386") + max_val = float("3.06666") + mean = float("1.03418") + std = float("0.295019") data = None @@ -6745,10 +6745,10 @@ class Program_weight_tensor_parameter_613: name = "parameter_613" shape = [192] dtype = "float32" - min_val = float("0.0114845") - max_val = float("0.292646") - mean = float("0.0547784") - std = float("0.0457904") + min_val = float("0.00449313") + max_val = float("0.113476") + mean = float("0.0166471") + std = float("0.0141621") data = None @@ -6756,10 +6756,10 @@ class Program_weight_tensor_parameter_614: name = "parameter_614" shape = [192] dtype = "float32" - min_val = float("-0.529517") - max_val = float("0.374347") - mean = float("-0.0709328") - std = float("0.134011") + min_val = float("-0.370599") + max_val = float("0.182049") + mean = float("-0.036688") + std = float("0.0839067") data = None @@ -6767,10 +6767,10 @@ class Program_weight_tensor_parameter_615: name = "parameter_615" shape = [192, 128, 3, 3] dtype = "float32" - min_val = float("-0.0839254") - max_val = float("0.110947") - mean = float("-0.000137724") - std = float("0.0074586") + min_val = float("-0.0617191") + max_val = float("0.0972102") + mean = float("-0.000107627") + std = float("0.00527157") data = None @@ -6778,10 +6778,10 @@ class Program_weight_tensor_parameter_616: name = "parameter_616" shape = [128] dtype = "float32" - min_val = float("-2.79544") - max_val = float("1.93522") - mean = float("-0.737016") - std = float("0.682776") + min_val = float("-2.77261") + max_val = float("1.9501") + mean = float("-0.747738") + std = float("0.667569") data = None @@ -6789,10 +6789,10 @@ class Program_weight_tensor_parameter_617: name = "parameter_617" shape = [128] dtype = "float32" - min_val = float("0.292294") - max_val = float("2.14222") - mean = float("1.05749") - std = float("0.241285") + min_val = float("0.286557") + max_val = float("2.12971") + mean = float("0.963078") + std = float("0.252374") data = None @@ -6800,10 +6800,10 @@ class Program_weight_tensor_parameter_618: name = "parameter_618" shape = [128] dtype = "float32" - min_val = float("0.00168553") - max_val = float("0.0271169") - mean = float("0.00561304") - std = float("0.00326986") + min_val = float("0.000299552") + max_val = float("0.00717751") + mean = float("0.00205572") + std = float("0.00102826") data = None @@ -6811,10 +6811,10 @@ class Program_weight_tensor_parameter_619: name = "parameter_619" shape = [128] dtype = "float32" - min_val = float("-0.250703") - max_val = float("0.2342") - mean = float("0.00559302") - std = float("0.0940671") + min_val = float("-0.172927") + max_val = float("0.263946") + mean = float("0.00188719") + std = float("0.0703671") data = None @@ -6822,10 +6822,10 @@ class Program_weight_tensor_parameter_620: name = "parameter_620" shape = [128, 96, 1, 1] dtype = "float32" - min_val = float("-0.161201") - max_val = float("0.174641") - mean = float("-0.00154525") - std = float("0.0227048") + min_val = float("-0.15476") + max_val = float("0.142506") + mean = float("-0.00109664") + std = float("0.0155992") data = None @@ -6833,10 +6833,10 @@ class Program_weight_tensor_parameter_621: name = "parameter_621" shape = [96] dtype = "float32" - min_val = float("-0.0177029") - max_val = float("0.000400182") - mean = float("-0.00757502") - std = float("0.00539744") + min_val = float("-0.0126476") + max_val = float("-0.000993819") + mean = float("-0.00607335") + std = float("0.00318226") data = None @@ -6844,10 +6844,10 @@ class Program_weight_tensor_parameter_622: name = "parameter_622" shape = [96, 96, 1, 1] dtype = "float32" - min_val = float("-0.328403") - max_val = float("0.138399") - mean = float("-0.00747706") - std = float("0.0181007") + min_val = float("-0.201162") + max_val = float("0.136016") + mean = float("-0.00720846") + std = float("0.0140613") data = None @@ -6891,10 +6891,10 @@ class Program_weight_tensor_parameter_627: name = "parameter_627" shape = [48, 48, 1, 1] dtype = "float32" - min_val = float("-0.0686131") - max_val = float("0.07745") - mean = float("-0.00194833") - std = float("0.0132318") + min_val = float("-0.0505865") + max_val = float("0.0461092") + mean = float("-0.00117516") + std = float("0.00977429") data = None @@ -6938,10 +6938,10 @@ class Program_weight_tensor_parameter_632: name = "parameter_632" shape = [48, 48, 3, 3] dtype = "float32" - min_val = float("-0.0503947") - max_val = float("0.0591655") - mean = float("-0.000553561") - std = float("0.0104874") + min_val = float("-0.0457659") + max_val = float("0.0697484") + mean = float("-0.00021736") + std = float("0.00778433") data = None @@ -6985,10 +6985,10 @@ class Program_weight_tensor_parameter_637: name = "parameter_637" shape = [48, 48, 3, 3] dtype = "float32" - min_val = float("-0.0666035") - max_val = float("0.0834523") - mean = float("-0.000534285") - std = float("0.0120052") + min_val = float("-0.0742706") + max_val = float("0.0873867") + mean = float("-0.000456091") + std = float("0.00888736") data = None @@ -7032,10 +7032,10 @@ class Program_weight_tensor_parameter_642: name = "parameter_642" shape = [48, 48, 1, 1] dtype = "float32" - min_val = float("-0.0891029") - max_val = float("0.0714005") - mean = float("-0.00154811") - std = float("0.0144626") + min_val = float("-0.0664849") + max_val = float("0.0489036") + mean = float("-0.00150188") + std = float("0.0103773") data = None @@ -7079,10 +7079,10 @@ class Program_weight_tensor_parameter_647: name = "parameter_647" shape = [48, 48, 3, 3] dtype = "float32" - min_val = float("-0.0625883") - max_val = float("0.0562251") - mean = float("-0.000735936") - std = float("0.0107194") + min_val = float("-0.0407101") + max_val = float("0.050469") + mean = float("-0.000484431") + std = float("0.00782941") data = None @@ -7126,10 +7126,10 @@ class Program_weight_tensor_parameter_652: name = "parameter_652" shape = [48, 48, 3, 3] dtype = "float32" - min_val = float("-0.103104") - max_val = float("0.0787725") - mean = float("-0.000430431") - std = float("0.0124082") + min_val = float("-0.0793737") + max_val = float("0.0613154") + mean = float("-0.000271202") + std = float("0.00895769") data = None @@ -7173,10 +7173,10 @@ class Program_weight_tensor_parameter_657: name = "parameter_657" shape = [48, 48, 1, 1] dtype = "float32" - min_val = float("-0.0876833") - max_val = float("0.07366") - mean = float("-0.00206219") - std = float("0.0169761") + min_val = float("-0.0919037") + max_val = float("0.0493862") + mean = float("-0.00106003") + std = float("0.0128276") data = None @@ -7220,10 +7220,10 @@ class Program_weight_tensor_parameter_662: name = "parameter_662" shape = [48, 48, 3, 3] dtype = "float32" - min_val = float("-0.0801655") - max_val = float("0.0747513") - mean = float("-0.000602095") - std = float("0.0109849") + min_val = float("-0.0804163") + max_val = float("0.0565016") + mean = float("-0.000357967") + std = float("0.00821755") data = None @@ -7267,10 +7267,10 @@ class Program_weight_tensor_parameter_667: name = "parameter_667" shape = [48, 48, 3, 3] dtype = "float32" - min_val = float("-0.0959795") - max_val = float("0.103953") - mean = float("-0.000426331") - std = float("0.0129461") + min_val = float("-0.0770235") + max_val = float("0.067704") + mean = float("-0.000344409") + std = float("0.00960955") data = None @@ -7314,10 +7314,10 @@ class Program_weight_tensor_parameter_672: name = "parameter_672" shape = [48, 96, 1, 1] dtype = "float32" - min_val = float("-0.164541") - max_val = float("0.137911") - mean = float("-0.0025117") - std = float("0.0243799") + min_val = float("-0.118598") + max_val = float("0.0941444") + mean = float("-0.00163172") + std = float("0.0167723") data = None @@ -7361,10 +7361,10 @@ class Program_weight_tensor_parameter_677: name = "parameter_677" shape = [48, 96, 1, 1] dtype = "float32" - min_val = float("-0.160023") - max_val = float("0.155941") - mean = float("-0.00065559") - std = float("0.0231529") + min_val = float("-0.0929333") + max_val = float("0.130766") + mean = float("-0.000650292") + std = float("0.0158564") data = None @@ -7372,10 +7372,10 @@ class Program_weight_tensor_parameter_678: name = "parameter_678" shape = [96] dtype = "float32" - min_val = float("-3.44006") - max_val = float("3.33808") - mean = float("0.314835") - std = float("1.15264") + min_val = float("-3.11018") + max_val = float("3.25616") + mean = float("0.366533") + std = float("1.14177") data = None @@ -7383,10 +7383,10 @@ class Program_weight_tensor_parameter_679: name = "parameter_679" shape = [96] dtype = "float32" - min_val = float("0.89902") - max_val = float("4.778") - mean = float("1.94257") - std = float("0.714568") + min_val = float("0.792014") + max_val = float("4.97259") + mean = float("1.87775") + std = float("0.779521") data = None @@ -7394,10 +7394,10 @@ class Program_weight_tensor_parameter_680: name = "parameter_680" shape = [96] dtype = "float32" - min_val = float("0.835229") - max_val = float("17.8402") - mean = float("2.94892") - std = float("2.46279") + min_val = float("0.315668") + max_val = float("13.6419") + mean = float("1.47445") + std = float("1.58175") data = None @@ -7405,10 +7405,10 @@ class Program_weight_tensor_parameter_681: name = "parameter_681" shape = [96] dtype = "float32" - min_val = float("-2.00554") - max_val = float("2.58848") - mean = float("-0.340428") - std = float("0.699341") + min_val = float("-1.15247") + max_val = float("1.963") + mean = float("-0.208063") + std = float("0.507653") data = None @@ -7416,10 +7416,10 @@ class Program_weight_tensor_parameter_682: name = "parameter_682" shape = [96, 64, 3, 3] dtype = "float32" - min_val = float("-0.120992") - max_val = float("0.107231") - mean = float("-0.000508647") - std = float("0.0129212") + min_val = float("-0.0933882") + max_val = float("0.0936018") + mean = float("-0.000299565") + std = float("0.00912066") data = None @@ -7463,10 +7463,10 @@ class Program_weight_tensor_parameter_687: name = "parameter_687" shape = [64, 32, 3, 3] dtype = "float32" - min_val = float("-0.14708") - max_val = float("0.16461") - mean = float("-0.000709544") - std = float("0.0199361") + min_val = float("-0.111523") + max_val = float("0.120134") + mean = float("-0.000452619") + std = float("0.0141786") data = None @@ -7510,10 +7510,10 @@ class Program_weight_tensor_parameter_692: name = "parameter_692" shape = [32, 32, 3, 3] dtype = "float32" - min_val = float("-0.249357") - max_val = float("0.202774") - mean = float("-0.000201137") - std = float("0.0258896") + min_val = float("-0.225894") + max_val = float("0.134795") + mean = float("-0.0001144") + std = float("0.0181569") data = None @@ -7557,8 +7557,8 @@ class Program_weight_tensor_parameter_697: name = "parameter_697" shape = [32, 3, 3, 3] dtype = "float32" - min_val = float("-0.28905") - max_val = float("0.284627") - mean = float("-0.00254561") - std = float("0.0658851") + min_val = float("-0.214027") + max_val = float("0.221186") + mean = float("-0.00133857") + std = float("0.0524068") data = None diff --git a/paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_0/weight_meta.py b/paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_0/weight_meta.py index 8b1378917..133b954fb 100644 --- a/paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_0/weight_meta.py +++ b/paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_0/weight_meta.py @@ -1 +1,7564 @@ +class Program_weight_tensor_parameter_0: + name = "parameter_0" + shape = [768] + dtype = "float32" + min_val = float("-0.743033") + max_val = float("0.507361") + mean = float("0.020663") + std = float("0.126347") + data = None + +class Program_weight_tensor_parameter_1: + name = "parameter_1" + shape = [768] + dtype = "float32" + min_val = float("0.898341") + max_val = float("1.48377") + mean = float("0.987983") + std = float("0.0346382") + data = None + + +class Program_weight_tensor_parameter_2: + name = "parameter_2" + shape = [768] + dtype = "float32" + min_val = float("0.00273834") + max_val = float("0.117552") + mean = float("0.0109298") + std = float("0.0105056") + data = None + + +class Program_weight_tensor_parameter_3: + name = "parameter_3" + shape = [768] + dtype = "float32" + min_val = float("-0.37129") + max_val = float("0.129239") + mean = float("-0.0442188") + std = float("0.0445754") + data = None + + +class Program_weight_tensor_parameter_4: + name = "parameter_4" + shape = [768, 768, 1, 1] + dtype = "float32" + min_val = float("-0.0737572") + max_val = float("0.0366322") + mean = float("-0.000187312") + std = float("0.00262376") + data = None + + +class Program_weight_tensor_parameter_5: + name = "parameter_5" + shape = [384] + dtype = "float32" + min_val = float("-0.254357") + max_val = float("0.0598301") + mean = float("-0.0304517") + std = float("0.0365455") + data = None + + +class Program_weight_tensor_parameter_6: + name = "parameter_6" + shape = [384] + dtype = "float32" + min_val = float("0.944225") + max_val = float("1.06993") + mean = float("0.98764") + std = float("0.0170711") + data = None + + +class Program_weight_tensor_parameter_7: + name = "parameter_7" + shape = [384] + dtype = "float32" + min_val = float("0.000787555") + max_val = float("0.0292413") + mean = float("0.00503152") + std = float("0.00299112") + data = None + + +class Program_weight_tensor_parameter_8: + name = "parameter_8" + shape = [384] + dtype = "float32" + min_val = float("-0.0613106") + max_val = float("0.0666846") + mean = float("-0.0116626") + std = float("0.0163377") + data = None + + +class Program_weight_tensor_parameter_9: + name = "parameter_9" + shape = [384, 384, 1, 1] + dtype = "float32" + min_val = float("-0.0343938") + max_val = float("0.0285101") + mean = float("-0.000206475") + std = float("0.00196258") + data = None + + +class Program_weight_tensor_parameter_10: + name = "parameter_10" + shape = [384] + dtype = "float32" + min_val = float("-0.254357") + max_val = float("0.0598301") + mean = float("-0.0304517") + std = float("0.0365455") + data = None + + +class Program_weight_tensor_parameter_11: + name = "parameter_11" + shape = [384] + dtype = "float32" + min_val = float("0.872284") + max_val = float("1.23739") + mean = float("1.03025") + std = float("0.03542") + data = None + + +class Program_weight_tensor_parameter_12: + name = "parameter_12" + shape = [384] + dtype = "float32" + min_val = float("0.00298812") + max_val = float("0.06913") + mean = float("0.0100035") + std = float("0.00526466") + data = None + + +class Program_weight_tensor_parameter_13: + name = "parameter_13" + shape = [384] + dtype = "float32" + min_val = float("-0.217798") + max_val = float("0.069544") + mean = float("-0.0277671") + std = float("0.0366238") + data = None + + +class Program_weight_tensor_parameter_14: + name = "parameter_14" + shape = [384, 384, 3, 3] + dtype = "float32" + min_val = float("-0.0371442") + max_val = float("0.0433044") + mean = float("-5.51279e-05") + std = float("0.00132722") + data = None + + +class Program_weight_tensor_parameter_15: + name = "parameter_15" + shape = [384] + dtype = "float32" + min_val = float("-0.374067") + max_val = float("0.0523626") + mean = float("-0.0502448") + std = float("0.0490669") + data = None + + +class Program_weight_tensor_parameter_16: + name = "parameter_16" + shape = [384] + dtype = "float32" + min_val = float("0.960272") + max_val = float("1.34778") + mean = float("1.02211") + std = float("0.0411657") + data = None + + +class Program_weight_tensor_parameter_17: + name = "parameter_17" + shape = [384] + dtype = "float32" + min_val = float("0.00848703") + max_val = float("0.127638") + mean = float("0.0290221") + std = float("0.0141029") + data = None + + +class Program_weight_tensor_parameter_18: + name = "parameter_18" + shape = [384] + dtype = "float32" + min_val = float("-0.267895") + max_val = float("0.386565") + mean = float("-0.0388769") + std = float("0.0578821") + data = None + + +class Program_weight_tensor_parameter_19: + name = "parameter_19" + shape = [384, 384, 3, 3] + dtype = "float32" + min_val = float("-0.025652") + max_val = float("0.0582461") + mean = float("-5.85454e-05") + std = float("0.00148034") + data = None + + +class Program_weight_tensor_parameter_20: + name = "parameter_20" + shape = [384] + dtype = "float32" + min_val = float("-0.156742") + max_val = float("0.0105973") + mean = float("-0.0499453") + std = float("0.029959") + data = None + + +class Program_weight_tensor_parameter_21: + name = "parameter_21" + shape = [384] + dtype = "float32" + min_val = float("0.910836") + max_val = float("1.05523") + mean = float("0.980981") + std = float("0.013256") + data = None + + +class Program_weight_tensor_parameter_22: + name = "parameter_22" + shape = [384] + dtype = "float32" + min_val = float("0.000818602") + max_val = float("0.015916") + mean = float("0.00425207") + std = float("0.00240053") + data = None + + +class Program_weight_tensor_parameter_23: + name = "parameter_23" + shape = [384] + dtype = "float32" + min_val = float("-0.0553324") + max_val = float("0.0530611") + mean = float("-0.00573343") + std = float("0.0163695") + data = None + + +class Program_weight_tensor_parameter_24: + name = "parameter_24" + shape = [384, 384, 1, 1] + dtype = "float32" + min_val = float("-0.0380534") + max_val = float("0.0291262") + mean = float("-0.000145551") + std = float("0.00195161") + data = None + + +class Program_weight_tensor_parameter_25: + name = "parameter_25" + shape = [384] + dtype = "float32" + min_val = float("-0.156742") + max_val = float("0.0105973") + mean = float("-0.0499453") + std = float("0.029959") + data = None + + +class Program_weight_tensor_parameter_26: + name = "parameter_26" + shape = [384] + dtype = "float32" + min_val = float("0.970749") + max_val = float("1.20629") + mean = float("1.02831") + std = float("0.0394471") + data = None + + +class Program_weight_tensor_parameter_27: + name = "parameter_27" + shape = [384] + dtype = "float32" + min_val = float("0.00291281") + max_val = float("0.0619909") + mean = float("0.0115855") + std = float("0.0064419") + data = None + + +class Program_weight_tensor_parameter_28: + name = "parameter_28" + shape = [384] + dtype = "float32" + min_val = float("-0.14316") + max_val = float("0.0743786") + mean = float("-0.0336338") + std = float("0.0347498") + data = None + + +class Program_weight_tensor_parameter_29: + name = "parameter_29" + shape = [384, 384, 3, 3] + dtype = "float32" + min_val = float("-0.0326419") + max_val = float("0.0530926") + mean = float("-6.71983e-05") + std = float("0.00132282") + data = None + + +class Program_weight_tensor_parameter_30: + name = "parameter_30" + shape = [384] + dtype = "float32" + min_val = float("-0.246103") + max_val = float("0.0162123") + mean = float("-0.0546987") + std = float("0.0390461") + data = None + + +class Program_weight_tensor_parameter_31: + name = "parameter_31" + shape = [384] + dtype = "float32" + min_val = float("0.935276") + max_val = float("1.24725") + mean = float("1.0196") + std = float("0.0429498") + data = None + + +class Program_weight_tensor_parameter_32: + name = "parameter_32" + shape = [384] + dtype = "float32" + min_val = float("0.00642887") + max_val = float("0.14985") + mean = float("0.0275346") + std = float("0.0174424") + data = None + + +class Program_weight_tensor_parameter_33: + name = "parameter_33" + shape = [384] + dtype = "float32" + min_val = float("-0.213687") + max_val = float("0.100992") + mean = float("-0.0487334") + std = float("0.0524185") + data = None + + +class Program_weight_tensor_parameter_34: + name = "parameter_34" + shape = [384, 384, 3, 3] + dtype = "float32" + min_val = float("-0.0313679") + max_val = float("0.0549118") + mean = float("-6.83835e-05") + std = float("0.00148433") + data = None + + +class Program_weight_tensor_parameter_35: + name = "parameter_35" + shape = [384] + dtype = "float32" + min_val = float("-0.184919") + max_val = float("0.0322844") + mean = float("-0.053517") + std = float("0.0327273") + data = None + + +class Program_weight_tensor_parameter_36: + name = "parameter_36" + shape = [384] + dtype = "float32" + min_val = float("0.904243") + max_val = float("1.01869") + mean = float("0.979303") + std = float("0.0159215") + data = None + + +class Program_weight_tensor_parameter_37: + name = "parameter_37" + shape = [384] + dtype = "float32" + min_val = float("0.00121268") + max_val = float("0.0185698") + mean = float("0.0049722") + std = float("0.00233631") + data = None + + +class Program_weight_tensor_parameter_38: + name = "parameter_38" + shape = [384] + dtype = "float32" + min_val = float("-0.130073") + max_val = float("0.0529869") + mean = float("-0.00273802") + std = float("0.0173354") + data = None + + +class Program_weight_tensor_parameter_39: + name = "parameter_39" + shape = [384, 384, 1, 1] + dtype = "float32" + min_val = float("-0.0417716") + max_val = float("0.0284739") + mean = float("-9.60982e-05") + std = float("0.00206623") + data = None + + +class Program_weight_tensor_parameter_40: + name = "parameter_40" + shape = [384] + dtype = "float32" + min_val = float("-0.184919") + max_val = float("0.0322844") + mean = float("-0.053517") + std = float("0.0327273") + data = None + + +class Program_weight_tensor_parameter_41: + name = "parameter_41" + shape = [384] + dtype = "float32" + min_val = float("0.974999") + max_val = float("1.21351") + mean = float("1.03068") + std = float("0.0413997") + data = None + + +class Program_weight_tensor_parameter_42: + name = "parameter_42" + shape = [384] + dtype = "float32" + min_val = float("0.00469089") + max_val = float("0.0653512") + mean = float("0.0143986") + std = float("0.00774657") + data = None + + +class Program_weight_tensor_parameter_43: + name = "parameter_43" + shape = [384] + dtype = "float32" + min_val = float("-0.187123") + max_val = float("0.0984266") + mean = float("-0.0228005") + std = float("0.0323402") + data = None + + +class Program_weight_tensor_parameter_44: + name = "parameter_44" + shape = [384, 384, 3, 3] + dtype = "float32" + min_val = float("-0.0269551") + max_val = float("0.0469683") + mean = float("-4.80811e-05") + std = float("0.00136096") + data = None + + +class Program_weight_tensor_parameter_45: + name = "parameter_45" + shape = [384] + dtype = "float32" + min_val = float("-0.213445") + max_val = float("0.026485") + mean = float("-0.0541075") + std = float("0.0385587") + data = None + + +class Program_weight_tensor_parameter_46: + name = "parameter_46" + shape = [384] + dtype = "float32" + min_val = float("0.92487") + max_val = float("1.16824") + mean = float("1.02125") + std = float("0.0372863") + data = None + + +class Program_weight_tensor_parameter_47: + name = "parameter_47" + shape = [384] + dtype = "float32" + min_val = float("0.00425419") + max_val = float("0.0987383") + mean = float("0.0223376") + std = float("0.0149169") + data = None + + +class Program_weight_tensor_parameter_48: + name = "parameter_48" + shape = [384] + dtype = "float32" + min_val = float("-0.187104") + max_val = float("0.162072") + mean = float("-0.0516316") + std = float("0.0556028") + data = None + + +class Program_weight_tensor_parameter_49: + name = "parameter_49" + shape = [384, 384, 3, 3] + dtype = "float32" + min_val = float("-0.0191691") + max_val = float("0.0361507") + mean = float("-8.79686e-05") + std = float("0.00151782") + data = None + + +class Program_weight_tensor_parameter_50: + name = "parameter_50" + shape = [384] + dtype = "float32" + min_val = float("-0.121349") + max_val = float("0.0778676") + mean = float("-0.0298326") + std = float("0.02283") + data = None + + +class Program_weight_tensor_parameter_51: + name = "parameter_51" + shape = [384] + dtype = "float32" + min_val = float("0.963274") + max_val = float("1.12241") + mean = float("1.01457") + std = float("0.0265398") + data = None + + +class Program_weight_tensor_parameter_52: + name = "parameter_52" + shape = [384] + dtype = "float32" + min_val = float("0.00391785") + max_val = float("0.053385") + mean = float("0.00936041") + std = float("0.00450165") + data = None + + +class Program_weight_tensor_parameter_53: + name = "parameter_53" + shape = [384] + dtype = "float32" + min_val = float("-0.108579") + max_val = float("0.125758") + mean = float("-0.0221365") + std = float("0.0301141") + data = None + + +class Program_weight_tensor_parameter_54: + name = "parameter_54" + shape = [384, 1152, 1, 1] + dtype = "float32" + min_val = float("-0.0419679") + max_val = float("0.0632127") + mean = float("-0.000100512") + std = float("0.00237333") + data = None + + +class Program_weight_tensor_parameter_55: + name = "parameter_55" + shape = [384] + dtype = "float32" + min_val = float("-0.116774") + max_val = float("0.0159135") + mean = float("-0.0176811") + std = float("0.0160398") + data = None + + +class Program_weight_tensor_parameter_56: + name = "parameter_56" + shape = [384] + dtype = "float32" + min_val = float("0.940853") + max_val = float("1.23804") + mean = float("1.0135") + std = float("0.0258691") + data = None + + +class Program_weight_tensor_parameter_57: + name = "parameter_57" + shape = [384] + dtype = "float32" + min_val = float("0.00285411") + max_val = float("0.0532654") + mean = float("0.00768658") + std = float("0.00402365") + data = None + + +class Program_weight_tensor_parameter_58: + name = "parameter_58" + shape = [384] + dtype = "float32" + min_val = float("-0.101351") + max_val = float("0.0758357") + mean = float("-0.0327951") + std = float("0.029396") + data = None + + +class Program_weight_tensor_parameter_59: + name = "parameter_59" + shape = [384, 1152, 1, 1] + dtype = "float32" + min_val = float("-0.0323273") + max_val = float("0.0419256") + mean = float("-0.000154794") + std = float("0.00225172") + data = None + + +class Program_weight_tensor_parameter_60: + name = "parameter_60" + shape = [384] + dtype = "float32" + min_val = float("-0.0965203") + max_val = float("0.00581689") + mean = float("-0.0225958") + std = float("0.0169398") + data = None + + +class Program_weight_tensor_parameter_61: + name = "parameter_61" + shape = [384] + dtype = "float32" + min_val = float("0.958471") + max_val = float("1.19094") + mean = float("1.0392") + std = float("0.0317908") + data = None + + +class Program_weight_tensor_parameter_62: + name = "parameter_62" + shape = [384] + dtype = "float32" + min_val = float("0.00448496") + max_val = float("0.0556805") + mean = float("0.0128026") + std = float("0.00693747") + data = None + + +class Program_weight_tensor_parameter_63: + name = "parameter_63" + shape = [384] + dtype = "float32" + min_val = float("-0.233107") + max_val = float("0.160893") + mean = float("-0.0188617") + std = float("0.0553687") + data = None + + +class Program_weight_tensor_parameter_64: + name = "parameter_64" + shape = [384, 384, 3, 3] + dtype = "float32" + min_val = float("-0.0228928") + max_val = float("0.0361422") + mean = float("-2.08226e-05") + std = float("0.00120166") + data = None + + +class Program_weight_tensor_parameter_65: + name = "parameter_65" + shape = [384] + dtype = "float32" + min_val = float("-0.46246") + max_val = float("0.417289") + mean = float("0.0796105") + std = float("0.135843") + data = None + + +class Program_weight_tensor_parameter_66: + name = "parameter_66" + shape = [384] + dtype = "float32" + min_val = float("0.861824") + max_val = float("1.37925") + mean = float("0.999753") + std = float("0.0519702") + data = None + + +class Program_weight_tensor_parameter_67: + name = "parameter_67" + shape = [384] + dtype = "float32" + min_val = float("0.005019") + max_val = float("0.151152") + mean = float("0.0195404") + std = float("0.0157973") + data = None + + +class Program_weight_tensor_parameter_68: + name = "parameter_68" + shape = [384] + dtype = "float32" + min_val = float("-0.230007") + max_val = float("0.114525") + mean = float("-0.0460026") + std = float("0.0471628") + data = None + + +class Program_weight_tensor_parameter_69: + name = "parameter_69" + shape = [384, 384, 1, 1] + dtype = "float32" + min_val = float("-0.109087") + max_val = float("0.0690511") + mean = float("-0.000406287") + std = float("0.00575579") + data = None + + +class Program_weight_tensor_parameter_70: + name = "parameter_70" + shape = [192] + dtype = "float32" + min_val = float("-0.165038") + max_val = float("0.0547988") + mean = float("-0.0386347") + std = float("0.0421154") + data = None + + +class Program_weight_tensor_parameter_71: + name = "parameter_71" + shape = [192] + dtype = "float32" + min_val = float("0.859852") + max_val = float("1.08355") + mean = float("0.961161") + std = float("0.0320714") + data = None + + +class Program_weight_tensor_parameter_72: + name = "parameter_72" + shape = [192] + dtype = "float32" + min_val = float("0.00155973") + max_val = float("0.0305334") + mean = float("0.00919111") + std = float("0.00640562") + data = None + + +class Program_weight_tensor_parameter_73: + name = "parameter_73" + shape = [192] + dtype = "float32" + min_val = float("-0.0680704") + max_val = float("0.083547") + mean = float("-0.0167198") + std = float("0.027735") + data = None + + +class Program_weight_tensor_parameter_74: + name = "parameter_74" + shape = [192, 192, 1, 1] + dtype = "float32" + min_val = float("-0.0553851") + max_val = float("0.0384638") + mean = float("-0.000583231") + std = float("0.00426065") + data = None + + +class Program_weight_tensor_parameter_75: + name = "parameter_75" + shape = [192] + dtype = "float32" + min_val = float("-0.165038") + max_val = float("0.0547988") + mean = float("-0.0386347") + std = float("0.0421154") + data = None + + +class Program_weight_tensor_parameter_76: + name = "parameter_76" + shape = [192] + dtype = "float32" + min_val = float("0.909392") + max_val = float("1.22799") + mean = float("1.04913") + std = float("0.0517355") + data = None + + +class Program_weight_tensor_parameter_77: + name = "parameter_77" + shape = [192] + dtype = "float32" + min_val = float("0.00618387") + max_val = float("0.0601428") + mean = float("0.0162134") + std = float("0.00795116") + data = None + + +class Program_weight_tensor_parameter_78: + name = "parameter_78" + shape = [192] + dtype = "float32" + min_val = float("-0.21459") + max_val = float("0.292505") + mean = float("-0.0268324") + std = float("0.057947") + data = None + + +class Program_weight_tensor_parameter_79: + name = "parameter_79" + shape = [192, 192, 3, 3] + dtype = "float32" + min_val = float("-0.0437504") + max_val = float("0.0604602") + mean = float("-8.54298e-05") + std = float("0.00300512") + data = None + + +class Program_weight_tensor_parameter_80: + name = "parameter_80" + shape = [192] + dtype = "float32" + min_val = float("-0.30904") + max_val = float("0.0489039") + mean = float("-0.0760544") + std = float("0.0618318") + data = None + + +class Program_weight_tensor_parameter_81: + name = "parameter_81" + shape = [192] + dtype = "float32" + min_val = float("0.911397") + max_val = float("1.34725") + mean = float("1.02609") + std = float("0.0557122") + data = None + + +class Program_weight_tensor_parameter_82: + name = "parameter_82" + shape = [192] + dtype = "float32" + min_val = float("0.00973595") + max_val = float("0.152819") + mean = float("0.0448439") + std = float("0.024794") + data = None + + +class Program_weight_tensor_parameter_83: + name = "parameter_83" + shape = [192] + dtype = "float32" + min_val = float("-0.186501") + max_val = float("0.249142") + mean = float("-0.0277286") + std = float("0.0570363") + data = None + + +class Program_weight_tensor_parameter_84: + name = "parameter_84" + shape = [192, 192, 3, 3] + dtype = "float32" + min_val = float("-0.0495299") + max_val = float("0.0581408") + mean = float("-0.000118023") + std = float("0.00333522") + data = None + + +class Program_weight_tensor_parameter_85: + name = "parameter_85" + shape = [192] + dtype = "float32" + min_val = float("-0.27082") + max_val = float("0.0055029") + mean = float("-0.0905993") + std = float("0.0443888") + data = None + + +class Program_weight_tensor_parameter_86: + name = "parameter_86" + shape = [192] + dtype = "float32" + min_val = float("0.88134") + max_val = float("1.1226") + mean = float("0.962732") + std = float("0.0271064") + data = None + + +class Program_weight_tensor_parameter_87: + name = "parameter_87" + shape = [192] + dtype = "float32" + min_val = float("0.00249203") + max_val = float("0.0283251") + mean = float("0.00900569") + std = float("0.00424473") + data = None + + +class Program_weight_tensor_parameter_88: + name = "parameter_88" + shape = [192] + dtype = "float32" + min_val = float("-0.0503456") + max_val = float("0.0503521") + mean = float("-0.0123481") + std = float("0.015716") + data = None + + +class Program_weight_tensor_parameter_89: + name = "parameter_89" + shape = [192, 192, 1, 1] + dtype = "float32" + min_val = float("-0.0558178") + max_val = float("0.0423444") + mean = float("-0.000674284") + std = float("0.00453073") + data = None + + +class Program_weight_tensor_parameter_90: + name = "parameter_90" + shape = [192] + dtype = "float32" + min_val = float("-0.27082") + max_val = float("0.0055029") + mean = float("-0.0905993") + std = float("0.0443888") + data = None + + +class Program_weight_tensor_parameter_91: + name = "parameter_91" + shape = [192] + dtype = "float32" + min_val = float("0.941929") + max_val = float("1.2385") + mean = float("1.04237") + std = float("0.0442231") + data = None + + +class Program_weight_tensor_parameter_92: + name = "parameter_92" + shape = [192] + dtype = "float32" + min_val = float("0.00587151") + max_val = float("0.0741055") + mean = float("0.0191318") + std = float("0.010681") + data = None + + +class Program_weight_tensor_parameter_93: + name = "parameter_93" + shape = [192] + dtype = "float32" + min_val = float("-0.148072") + max_val = float("0.0938185") + mean = float("-0.0237551") + std = float("0.0412601") + data = None + + +class Program_weight_tensor_parameter_94: + name = "parameter_94" + shape = [192, 192, 3, 3] + dtype = "float32" + min_val = float("-0.0454471") + max_val = float("0.0592261") + mean = float("-0.000107954") + std = float("0.00304373") + data = None + + +class Program_weight_tensor_parameter_95: + name = "parameter_95" + shape = [192] + dtype = "float32" + min_val = float("-0.307199") + max_val = float("0.0609234") + mean = float("-0.111087") + std = float("0.0621482") + data = None + + +class Program_weight_tensor_parameter_96: + name = "parameter_96" + shape = [192] + dtype = "float32" + min_val = float("0.918678") + max_val = float("1.21815") + mean = float("1.02816") + std = float("0.0566506") + data = None + + +class Program_weight_tensor_parameter_97: + name = "parameter_97" + shape = [192] + dtype = "float32" + min_val = float("0.0128713") + max_val = float("0.177337") + mean = float("0.0412506") + std = float("0.0235929") + data = None + + +class Program_weight_tensor_parameter_98: + name = "parameter_98" + shape = [192] + dtype = "float32" + min_val = float("-0.172019") + max_val = float("0.0591701") + mean = float("-0.0428226") + std = float("0.0398324") + data = None + + +class Program_weight_tensor_parameter_99: + name = "parameter_99" + shape = [192, 192, 3, 3] + dtype = "float32" + min_val = float("-0.0466526") + max_val = float("0.0663792") + mean = float("-0.000155177") + std = float("0.00347012") + data = None + + +class Program_weight_tensor_parameter_100: + name = "parameter_100" + shape = [192] + dtype = "float32" + min_val = float("-0.366754") + max_val = float("-0.0134222") + mean = float("-0.100255") + std = float("0.0548872") + data = None + + +class Program_weight_tensor_parameter_101: + name = "parameter_101" + shape = [192] + dtype = "float32" + min_val = float("0.868592") + max_val = float("1.06216") + mean = float("0.959936") + std = float("0.0231102") + data = None + + +class Program_weight_tensor_parameter_102: + name = "parameter_102" + shape = [192] + dtype = "float32" + min_val = float("0.00298303") + max_val = float("0.0244492") + mean = float("0.00883176") + std = float("0.00373019") + data = None + + +class Program_weight_tensor_parameter_103: + name = "parameter_103" + shape = [192] + dtype = "float32" + min_val = float("-0.072098") + max_val = float("0.042818") + mean = float("-0.0280475") + std = float("0.0244836") + data = None + + +class Program_weight_tensor_parameter_104: + name = "parameter_104" + shape = [192, 192, 1, 1] + dtype = "float32" + min_val = float("-0.0469252") + max_val = float("0.0444719") + mean = float("-0.00112462") + std = float("0.00488669") + data = None + + +class Program_weight_tensor_parameter_105: + name = "parameter_105" + shape = [192] + dtype = "float32" + min_val = float("-0.366754") + max_val = float("-0.0134222") + mean = float("-0.100255") + std = float("0.0548872") + data = None + + +class Program_weight_tensor_parameter_106: + name = "parameter_106" + shape = [192] + dtype = "float32" + min_val = float("0.935173") + max_val = float("1.20371") + mean = float("1.03374") + std = float("0.043616") + data = None + + +class Program_weight_tensor_parameter_107: + name = "parameter_107" + shape = [192] + dtype = "float32" + min_val = float("0.00859096") + max_val = float("0.107539") + mean = float("0.0237503") + std = float("0.0166029") + data = None + + +class Program_weight_tensor_parameter_108: + name = "parameter_108" + shape = [192] + dtype = "float32" + min_val = float("-0.162328") + max_val = float("0.123377") + mean = float("-0.0156432") + std = float("0.0485434") + data = None + + +class Program_weight_tensor_parameter_109: + name = "parameter_109" + shape = [192, 192, 3, 3] + dtype = "float32" + min_val = float("-0.0520373") + max_val = float("0.0633059") + mean = float("-6.95137e-05") + std = float("0.00322718") + data = None + + +class Program_weight_tensor_parameter_110: + name = "parameter_110" + shape = [192] + dtype = "float32" + min_val = float("-0.516767") + max_val = float("-0.0203543") + mean = float("-0.126034") + std = float("0.0638195") + data = None + + +class Program_weight_tensor_parameter_111: + name = "parameter_111" + shape = [192] + dtype = "float32" + min_val = float("0.849485") + max_val = float("1.2535") + mean = float("1.02927") + std = float("0.0639047") + data = None + + +class Program_weight_tensor_parameter_112: + name = "parameter_112" + shape = [192] + dtype = "float32" + min_val = float("0.0148703") + max_val = float("0.241855") + mean = float("0.0348458") + std = float("0.0220686") + data = None + + +class Program_weight_tensor_parameter_113: + name = "parameter_113" + shape = [192] + dtype = "float32" + min_val = float("-0.185102") + max_val = float("0.184245") + mean = float("-0.0439409") + std = float("0.0540873") + data = None + + +class Program_weight_tensor_parameter_114: + name = "parameter_114" + shape = [192, 192, 3, 3] + dtype = "float32" + min_val = float("-0.0395493") + max_val = float("0.0583323") + mean = float("-0.000157674") + std = float("0.00373071") + data = None + + +class Program_weight_tensor_parameter_115: + name = "parameter_115" + shape = [192] + dtype = "float32" + min_val = float("-0.2713") + max_val = float("0.0560855") + mean = float("-0.0865079") + std = float("0.0441199") + data = None + + +class Program_weight_tensor_parameter_116: + name = "parameter_116" + shape = [192] + dtype = "float32" + min_val = float("0.915361") + max_val = float("1.26769") + mean = float("1.02552") + std = float("0.0564528") + data = None + + +class Program_weight_tensor_parameter_117: + name = "parameter_117" + shape = [192] + dtype = "float32" + min_val = float("0.00711924") + max_val = float("0.0896093") + mean = float("0.0157688") + std = float("0.00827259") + data = None + + +class Program_weight_tensor_parameter_118: + name = "parameter_118" + shape = [192] + dtype = "float32" + min_val = float("-0.126929") + max_val = float("0.130507") + mean = float("-0.026764") + std = float("0.0351332") + data = None + + +class Program_weight_tensor_parameter_119: + name = "parameter_119" + shape = [192, 576, 1, 1] + dtype = "float32" + min_val = float("-0.0617836") + max_val = float("0.0625637") + mean = float("-0.000234352") + std = float("0.00557708") + data = None + + +class Program_weight_tensor_parameter_120: + name = "parameter_120" + shape = [192] + dtype = "float32" + min_val = float("-0.162411") + max_val = float("0.0343933") + mean = float("-0.0146927") + std = float("0.026659") + data = None + + +class Program_weight_tensor_parameter_121: + name = "parameter_121" + shape = [192] + dtype = "float32" + min_val = float("0.896164") + max_val = float("1.15119") + mean = float("1.0042") + std = float("0.0388476") + data = None + + +class Program_weight_tensor_parameter_122: + name = "parameter_122" + shape = [192] + dtype = "float32" + min_val = float("0.00389326") + max_val = float("0.0742727") + mean = float("0.0112922") + std = float("0.00784653") + data = None + + +class Program_weight_tensor_parameter_123: + name = "parameter_123" + shape = [192] + dtype = "float32" + min_val = float("-0.0913461") + max_val = float("0.0634663") + mean = float("-0.0244524") + std = float("0.0247669") + data = None + + +class Program_weight_tensor_parameter_124: + name = "parameter_124" + shape = [192, 576, 1, 1] + dtype = "float32" + min_val = float("-0.0566803") + max_val = float("0.0710687") + mean = float("-0.000235042") + std = float("0.00475399") + data = None + + +class Program_weight_tensor_parameter_125: + name = "parameter_125" + shape = [192] + dtype = "float32" + min_val = float("-0.14785") + max_val = float("0.00919369") + mean = float("-0.043826") + std = float("0.0292536") + data = None + + +class Program_weight_tensor_parameter_126: + name = "parameter_126" + shape = [192] + dtype = "float32" + min_val = float("0.94402") + max_val = float("1.18839") + mean = float("1.03601") + std = float("0.0346975") + data = None + + +class Program_weight_tensor_parameter_127: + name = "parameter_127" + shape = [192] + dtype = "float32" + min_val = float("0.00378721") + max_val = float("0.0744969") + mean = float("0.0185453") + std = float("0.0106259") + data = None + + +class Program_weight_tensor_parameter_128: + name = "parameter_128" + shape = [192] + dtype = "float32" + min_val = float("-0.529234") + max_val = float("0.400014") + mean = float("-0.0329347") + std = float("0.115644") + data = None + + +class Program_weight_tensor_parameter_129: + name = "parameter_129" + shape = [192, 192, 3, 3] + dtype = "float32" + min_val = float("-0.033215") + max_val = float("0.0441369") + mean = float("-4.11432e-05") + std = float("0.0028055") + data = None + + +class Program_weight_tensor_parameter_130: + name = "parameter_130" + shape = [192] + dtype = "float32" + min_val = float("-0.797229") + max_val = float("1.67957") + mean = float("0.205711") + std = float("0.338337") + data = None + + +class Program_weight_tensor_parameter_131: + name = "parameter_131" + shape = [192] + dtype = "float32" + min_val = float("0.583548") + max_val = float("1.53646") + mean = float("0.971373") + std = float("0.108279") + data = None + + +class Program_weight_tensor_parameter_132: + name = "parameter_132" + shape = [192] + dtype = "float32" + min_val = float("0.00968032") + max_val = float("0.326665") + mean = float("0.0366729") + std = float("0.0364719") + data = None + + +class Program_weight_tensor_parameter_133: + name = "parameter_133" + shape = [192] + dtype = "float32" + min_val = float("-0.384641") + max_val = float("0.143155") + mean = float("-0.0453675") + std = float("0.063487") + data = None + + +class Program_weight_tensor_parameter_134: + name = "parameter_134" + shape = [192, 192, 1, 1] + dtype = "float32" + min_val = float("-0.162906") + max_val = float("0.103755") + mean = float("-0.000761071") + std = float("0.0121564") + data = None + + +class Program_weight_tensor_parameter_135: + name = "parameter_135" + shape = [96] + dtype = "float32" + min_val = float("-0.282247") + max_val = float("0.281851") + mean = float("0.00710046") + std = float("0.11669") + data = None + + +class Program_weight_tensor_parameter_136: + name = "parameter_136" + shape = [96] + dtype = "float32" + min_val = float("0.77923") + max_val = float("1.27017") + mean = float("0.926291") + std = float("0.0711138") + data = None + + +class Program_weight_tensor_parameter_137: + name = "parameter_137" + shape = [96] + dtype = "float32" + min_val = float("0.00244381") + max_val = float("0.0367146") + mean = float("0.014305") + std = float("0.0077109") + data = None + + +class Program_weight_tensor_parameter_138: + name = "parameter_138" + shape = [96] + dtype = "float32" + min_val = float("-0.0509503") + max_val = float("0.0931656") + mean = float("-0.0103877") + std = float("0.0225298") + data = None + + +class Program_weight_tensor_parameter_139: + name = "parameter_139" + shape = [96, 96, 1, 1] + dtype = "float32" + min_val = float("-0.10638") + max_val = float("0.0638405") + mean = float("-0.00110258") + std = float("0.00883031") + data = None + + +class Program_weight_tensor_parameter_140: + name = "parameter_140" + shape = [96] + dtype = "float32" + min_val = float("-0.282247") + max_val = float("0.281851") + mean = float("0.00710046") + std = float("0.11669") + data = None + + +class Program_weight_tensor_parameter_141: + name = "parameter_141" + shape = [96] + dtype = "float32" + min_val = float("0.686595") + max_val = float("1.34871") + mean = float("1.04697") + std = float("0.0915013") + data = None + + +class Program_weight_tensor_parameter_142: + name = "parameter_142" + shape = [96] + dtype = "float32" + min_val = float("0.00849613") + max_val = float("0.0476423") + mean = float("0.0241855") + std = float("0.00965118") + data = None + + +class Program_weight_tensor_parameter_143: + name = "parameter_143" + shape = [96] + dtype = "float32" + min_val = float("-0.176894") + max_val = float("0.124313") + mean = float("-0.0161684") + std = float("0.0511988") + data = None + + +class Program_weight_tensor_parameter_144: + name = "parameter_144" + shape = [96, 96, 3, 3] + dtype = "float32" + min_val = float("-0.074123") + max_val = float("0.0992917") + mean = float("-0.000128278") + std = float("0.00660925") + data = None + + +class Program_weight_tensor_parameter_145: + name = "parameter_145" + shape = [96] + dtype = "float32" + min_val = float("-0.457141") + max_val = float("0.284175") + mean = float("-0.137858") + std = float("0.150133") + data = None + + +class Program_weight_tensor_parameter_146: + name = "parameter_146" + shape = [96] + dtype = "float32" + min_val = float("0.828432") + max_val = float("1.7552") + mean = float("1.0098") + std = float("0.141163") + data = None + + +class Program_weight_tensor_parameter_147: + name = "parameter_147" + shape = [96] + dtype = "float32" + min_val = float("0.0197657") + max_val = float("0.21109") + mean = float("0.0558951") + std = float("0.0328392") + data = None + + +class Program_weight_tensor_parameter_148: + name = "parameter_148" + shape = [96] + dtype = "float32" + min_val = float("-0.177509") + max_val = float("0.117139") + mean = float("-0.0272736") + std = float("0.0445487") + data = None + + +class Program_weight_tensor_parameter_149: + name = "parameter_149" + shape = [96, 96, 3, 3] + dtype = "float32" + min_val = float("-0.0848417") + max_val = float("0.0927675") + mean = float("-0.000388559") + std = float("0.00736544") + data = None + + +class Program_weight_tensor_parameter_150: + name = "parameter_150" + shape = [96] + dtype = "float32" + min_val = float("-0.394379") + max_val = float("0.00738925") + mean = float("-0.148925") + std = float("0.0838505") + data = None + + +class Program_weight_tensor_parameter_151: + name = "parameter_151" + shape = [96] + dtype = "float32" + min_val = float("0.717037") + max_val = float("0.995563") + mean = float("0.881515") + std = float("0.0561052") + data = None + + +class Program_weight_tensor_parameter_152: + name = "parameter_152" + shape = [96] + dtype = "float32" + min_val = float("0.00228925") + max_val = float("0.0352953") + mean = float("0.0132225") + std = float("0.00553136") + data = None + + +class Program_weight_tensor_parameter_153: + name = "parameter_153" + shape = [96] + dtype = "float32" + min_val = float("-0.0407708") + max_val = float("0.0474609") + mean = float("0.00908712") + std = float("0.0191192") + data = None + + +class Program_weight_tensor_parameter_154: + name = "parameter_154" + shape = [96, 96, 1, 1] + dtype = "float32" + min_val = float("-0.0741001") + max_val = float("0.0598664") + mean = float("-0.000306918") + std = float("0.00908011") + data = None + + +class Program_weight_tensor_parameter_155: + name = "parameter_155" + shape = [96] + dtype = "float32" + min_val = float("-0.394379") + max_val = float("0.00738925") + mean = float("-0.148925") + std = float("0.0838505") + data = None + + +class Program_weight_tensor_parameter_156: + name = "parameter_156" + shape = [96] + dtype = "float32" + min_val = float("0.805436") + max_val = float("1.22568") + mean = float("1.05162") + std = float("0.0896051") + data = None + + +class Program_weight_tensor_parameter_157: + name = "parameter_157" + shape = [96] + dtype = "float32" + min_val = float("0.00804892") + max_val = float("0.0701516") + mean = float("0.0242923") + std = float("0.0134998") + data = None + + +class Program_weight_tensor_parameter_158: + name = "parameter_158" + shape = [96] + dtype = "float32" + min_val = float("-0.12638") + max_val = float("0.0436454") + mean = float("-0.0376916") + std = float("0.0295355") + data = None + + +class Program_weight_tensor_parameter_159: + name = "parameter_159" + shape = [96, 96, 3, 3] + dtype = "float32" + min_val = float("-0.0809044") + max_val = float("0.126721") + mean = float("-0.00048597") + std = float("0.00701167") + data = None + + +class Program_weight_tensor_parameter_160: + name = "parameter_160" + shape = [96] + dtype = "float32" + min_val = float("-0.497069") + max_val = float("0.0662031") + mean = float("-0.208094") + std = float("0.125894") + data = None + + +class Program_weight_tensor_parameter_161: + name = "parameter_161" + shape = [96] + dtype = "float32" + min_val = float("0.70186") + max_val = float("1.54155") + mean = float("0.99652") + std = float("0.130955") + data = None + + +class Program_weight_tensor_parameter_162: + name = "parameter_162" + shape = [96] + dtype = "float32" + min_val = float("0.0257306") + max_val = float("0.191174") + mean = float("0.0583405") + std = float("0.0285068") + data = None + + +class Program_weight_tensor_parameter_163: + name = "parameter_163" + shape = [96] + dtype = "float32" + min_val = float("-0.124903") + max_val = float("0.0136205") + mean = float("-0.0566505") + std = float("0.0340341") + data = None + + +class Program_weight_tensor_parameter_164: + name = "parameter_164" + shape = [96, 96, 3, 3] + dtype = "float32" + min_val = float("-0.0825468") + max_val = float("0.0998638") + mean = float("-0.000585607") + std = float("0.00815449") + data = None + + +class Program_weight_tensor_parameter_165: + name = "parameter_165" + shape = [96] + dtype = "float32" + min_val = float("-0.488308") + max_val = float("0.0969995") + mean = float("-0.1903") + std = float("0.108395") + data = None + + +class Program_weight_tensor_parameter_166: + name = "parameter_166" + shape = [96] + dtype = "float32" + min_val = float("0.693424") + max_val = float("1.02499") + mean = float("0.878418") + std = float("0.0578356") + data = None + + +class Program_weight_tensor_parameter_167: + name = "parameter_167" + shape = [96] + dtype = "float32" + min_val = float("0.00563897") + max_val = float("0.0250253") + mean = float("0.0115188") + std = float("0.00399237") + data = None + + +class Program_weight_tensor_parameter_168: + name = "parameter_168" + shape = [96] + dtype = "float32" + min_val = float("-0.0555785") + max_val = float("0.0182289") + mean = float("-0.0150525") + std = float("0.0144486") + data = None + + +class Program_weight_tensor_parameter_169: + name = "parameter_169" + shape = [96, 96, 1, 1] + dtype = "float32" + min_val = float("-0.0682249") + max_val = float("0.0726027") + mean = float("-0.00190267") + std = float("0.0112706") + data = None + + +class Program_weight_tensor_parameter_170: + name = "parameter_170" + shape = [96] + dtype = "float32" + min_val = float("-0.488308") + max_val = float("0.0969995") + mean = float("-0.1903") + std = float("0.108395") + data = None + + +class Program_weight_tensor_parameter_171: + name = "parameter_171" + shape = [96] + dtype = "float32" + min_val = float("0.64497") + max_val = float("1.26041") + mean = float("1.02347") + std = float("0.096841") + data = None + + +class Program_weight_tensor_parameter_172: + name = "parameter_172" + shape = [96] + dtype = "float32" + min_val = float("0.0135838") + max_val = float("0.108919") + mean = float("0.0303804") + std = float("0.0148297") + data = None + + +class Program_weight_tensor_parameter_173: + name = "parameter_173" + shape = [96] + dtype = "float32" + min_val = float("-0.0953459") + max_val = float("0.045938") + mean = float("-0.0243831") + std = float("0.0289356") + data = None + + +class Program_weight_tensor_parameter_174: + name = "parameter_174" + shape = [96, 96, 3, 3] + dtype = "float32" + min_val = float("-0.0690141") + max_val = float("0.0907724") + mean = float("-0.000336498") + std = float("0.00782096") + data = None + + +class Program_weight_tensor_parameter_175: + name = "parameter_175" + shape = [96] + dtype = "float32" + min_val = float("-0.751265") + max_val = float("0.0462032") + mean = float("-0.246976") + std = float("0.15091") + data = None + + +class Program_weight_tensor_parameter_176: + name = "parameter_176" + shape = [96] + dtype = "float32" + min_val = float("0.715124") + max_val = float("1.24904") + mean = float("0.989016") + std = float("0.0990378") + data = None + + +class Program_weight_tensor_parameter_177: + name = "parameter_177" + shape = [96] + dtype = "float32" + min_val = float("0.019195") + max_val = float("0.100826") + mean = float("0.0410936") + std = float("0.0178574") + data = None + + +class Program_weight_tensor_parameter_178: + name = "parameter_178" + shape = [96] + dtype = "float32" + min_val = float("-0.191045") + max_val = float("0.167209") + mean = float("-0.0509906") + std = float("0.0585368") + data = None + + +class Program_weight_tensor_parameter_179: + name = "parameter_179" + shape = [96, 96, 3, 3] + dtype = "float32" + min_val = float("-0.109709") + max_val = float("0.102873") + mean = float("-0.000302796") + std = float("0.00950446") + data = None + + +class Program_weight_tensor_parameter_180: + name = "parameter_180" + shape = [96] + dtype = "float32" + min_val = float("-0.682344") + max_val = float("0.560615") + mean = float("-0.183267") + std = float("0.262805") + data = None + + +class Program_weight_tensor_parameter_181: + name = "parameter_181" + shape = [96] + dtype = "float32" + min_val = float("0.650671") + max_val = float("1.28138") + mean = float("0.926136") + std = float("0.127332") + data = None + + +class Program_weight_tensor_parameter_182: + name = "parameter_182" + shape = [96] + dtype = "float32" + min_val = float("0.0166935") + max_val = float("0.0740427") + mean = float("0.0351758") + std = float("0.0134938") + data = None + + +class Program_weight_tensor_parameter_183: + name = "parameter_183" + shape = [96] + dtype = "float32" + min_val = float("-0.139363") + max_val = float("0.124573") + mean = float("-0.0021096") + std = float("0.0513469") + data = None + + +class Program_weight_tensor_parameter_184: + name = "parameter_184" + shape = [96, 448, 1, 1] + dtype = "float32" + min_val = float("-0.176164") + max_val = float("0.183176") + mean = float("-0.000547214") + std = float("0.0128837") + data = None + + +class Program_weight_tensor_parameter_185: + name = "parameter_185" + shape = [96] + dtype = "float32" + min_val = float("-0.163476") + max_val = float("0.175629") + mean = float("0.0382556") + std = float("0.0657315") + data = None + + +class Program_weight_tensor_parameter_186: + name = "parameter_186" + shape = [96] + dtype = "float32" + min_val = float("0.668729") + max_val = float("1.38231") + mean = float("0.957819") + std = float("0.134079") + data = None + + +class Program_weight_tensor_parameter_187: + name = "parameter_187" + shape = [96] + dtype = "float32" + min_val = float("0.0053491") + max_val = float("0.0408792") + mean = float("0.0111961") + std = float("0.00625857") + data = None + + +class Program_weight_tensor_parameter_188: + name = "parameter_188" + shape = [96] + dtype = "float32" + min_val = float("-0.0998904") + max_val = float("0.112465") + mean = float("-0.0129474") + std = float("0.0345809") + data = None + + +class Program_weight_tensor_parameter_189: + name = "parameter_189" + shape = [96, 448, 1, 1] + dtype = "float32" + min_val = float("-0.10245") + max_val = float("0.143631") + mean = float("-0.000321447") + std = float("0.0083028") + data = None + + +class Program_weight_tensor_parameter_190: + name = "parameter_190" + shape = [192] + dtype = "float32" + min_val = float("-0.339438") + max_val = float("0.0668367") + mean = float("-0.126421") + std = float("0.0576214") + data = None + + +class Program_weight_tensor_parameter_191: + name = "parameter_191" + shape = [192] + dtype = "float32" + min_val = float("0.709181") + max_val = float("1.31558") + mean = float("0.850576") + std = float("0.0685498") + data = None + + +class Program_weight_tensor_parameter_192: + name = "parameter_192" + shape = [192] + dtype = "float32" + min_val = float("0.00982368") + max_val = float("0.120733") + mean = float("0.0222883") + std = float("0.0126702") + data = None + + +class Program_weight_tensor_parameter_193: + name = "parameter_193" + shape = [192] + dtype = "float32" + min_val = float("-0.158372") + max_val = float("0.119076") + mean = float("-0.0375286") + std = float("0.0397228") + data = None + + +class Program_weight_tensor_parameter_194: + name = "parameter_194" + shape = [192, 384, 1, 1] + dtype = "float32" + min_val = float("-0.0618403") + max_val = float("0.087686") + mean = float("-0.000670588") + std = float("0.00876173") + data = None + + +class Program_weight_tensor_parameter_195: + name = "parameter_195" + shape = [384] + dtype = "float32" + min_val = float("-0.246737") + max_val = float("0.0626357") + mean = float("-0.104556") + std = float("0.0398377") + data = None + + +class Program_weight_tensor_parameter_196: + name = "parameter_196" + shape = [384] + dtype = "float32" + min_val = float("0.86139") + max_val = float("1.36552") + mean = float("1.03591") + std = float("0.0538437") + data = None + + +class Program_weight_tensor_parameter_197: + name = "parameter_197" + shape = [384] + dtype = "float32" + min_val = float("0.0112363") + max_val = float("0.181727") + mean = float("0.0226795") + std = float("0.0127742") + data = None + + +class Program_weight_tensor_parameter_198: + name = "parameter_198" + shape = [384] + dtype = "float32" + min_val = float("-0.257774") + max_val = float("0.11719") + mean = float("-0.0706271") + std = float("0.0443703") + data = None + + +class Program_weight_tensor_parameter_199: + name = "parameter_199" + shape = [384, 384, 1, 1] + dtype = "float32" + min_val = float("-0.0766884") + max_val = float("0.0811282") + mean = float("-0.000905669") + std = float("0.00825814") + data = None + + +class Program_weight_tensor_parameter_200: + name = "parameter_200" + shape = [192] + dtype = "float32" + min_val = float("-0.163787") + max_val = float("-0.0178854") + mean = float("-0.0640593") + std = float("0.0247121") + data = None + + +class Program_weight_tensor_parameter_201: + name = "parameter_201" + shape = [192] + dtype = "float32" + min_val = float("0.863464") + max_val = float("1.00313") + mean = float("0.954504") + std = float("0.0217909") + data = None + + +class Program_weight_tensor_parameter_202: + name = "parameter_202" + shape = [192] + dtype = "float32" + min_val = float("0.00426603") + max_val = float("0.0274319") + mean = float("0.00904264") + std = float("0.00343389") + data = None + + +class Program_weight_tensor_parameter_203: + name = "parameter_203" + shape = [192] + dtype = "float32" + min_val = float("-0.128303") + max_val = float("0.0856769") + mean = float("-0.0260934") + std = float("0.0402736") + data = None + + +class Program_weight_tensor_parameter_204: + name = "parameter_204" + shape = [192, 192, 1, 1] + dtype = "float32" + min_val = float("-0.0361114") + max_val = float("0.0350956") + mean = float("-0.000748032") + std = float("0.00552871") + data = None + + +class Program_weight_tensor_parameter_205: + name = "parameter_205" + shape = [192] + dtype = "float32" + min_val = float("-0.163787") + max_val = float("-0.0178854") + mean = float("-0.0640593") + std = float("0.0247121") + data = None + + +class Program_weight_tensor_parameter_206: + name = "parameter_206" + shape = [192] + dtype = "float32" + min_val = float("0.931794") + max_val = float("1.03438") + mean = float("0.986242") + std = float("0.0213097") + data = None + + +class Program_weight_tensor_parameter_207: + name = "parameter_207" + shape = [192] + dtype = "float32" + min_val = float("0.0162213") + max_val = float("0.15521") + mean = float("0.0388931") + std = float("0.0172201") + data = None + + +class Program_weight_tensor_parameter_208: + name = "parameter_208" + shape = [192] + dtype = "float32" + min_val = float("-0.27218") + max_val = float("0.153326") + mean = float("-0.0421112") + std = float("0.072068") + data = None + + +class Program_weight_tensor_parameter_209: + name = "parameter_209" + shape = [192, 192, 3, 3] + dtype = "float32" + min_val = float("-0.0286582") + max_val = float("0.0402364") + mean = float("-0.000141068") + std = float("0.00300371") + data = None + + +class Program_weight_tensor_parameter_210: + name = "parameter_210" + shape = [192] + dtype = "float32" + min_val = float("-0.26148") + max_val = float("0.0033202") + mean = float("-0.0610375") + std = float("0.0375229") + data = None + + +class Program_weight_tensor_parameter_211: + name = "parameter_211" + shape = [192] + dtype = "float32" + min_val = float("0.955773") + max_val = float("1.12848") + mean = float("1.0249") + std = float("0.0296608") + data = None + + +class Program_weight_tensor_parameter_212: + name = "parameter_212" + shape = [192] + dtype = "float32" + min_val = float("0.0478933") + max_val = float("0.390037") + mean = float("0.10908") + std = float("0.0501276") + data = None + + +class Program_weight_tensor_parameter_213: + name = "parameter_213" + shape = [192] + dtype = "float32" + min_val = float("-0.460456") + max_val = float("0.58138") + mean = float("-0.0865142") + std = float("0.149413") + data = None + + +class Program_weight_tensor_parameter_214: + name = "parameter_214" + shape = [192, 192, 3, 3] + dtype = "float32" + min_val = float("-0.0310591") + max_val = float("0.0529931") + mean = float("-0.000143811") + std = float("0.00358108") + data = None + + +class Program_weight_tensor_parameter_215: + name = "parameter_215" + shape = [192] + dtype = "float32" + min_val = float("-0.141458") + max_val = float("-0.0079347") + mean = float("-0.0529687") + std = float("0.0249688") + data = None + + +class Program_weight_tensor_parameter_216: + name = "parameter_216" + shape = [192] + dtype = "float32" + min_val = float("0.959304") + max_val = float("1.04419") + mean = float("0.9895") + std = float("0.0122273") + data = None + + +class Program_weight_tensor_parameter_217: + name = "parameter_217" + shape = [192] + dtype = "float32" + min_val = float("0.00246453") + max_val = float("0.0136491") + mean = float("0.00528701") + std = float("0.00174133") + data = None + + +class Program_weight_tensor_parameter_218: + name = "parameter_218" + shape = [192] + dtype = "float32" + min_val = float("-0.0640118") + max_val = float("0.0474594") + mean = float("-0.0144983") + std = float("0.0208838") + data = None + + +class Program_weight_tensor_parameter_219: + name = "parameter_219" + shape = [192, 192, 1, 1] + dtype = "float32" + min_val = float("-0.031823") + max_val = float("0.0528765") + mean = float("-0.000451702") + std = float("0.00561687") + data = None + + +class Program_weight_tensor_parameter_220: + name = "parameter_220" + shape = [192] + dtype = "float32" + min_val = float("-0.141458") + max_val = float("-0.0079347") + mean = float("-0.0529687") + std = float("0.0249688") + data = None + + +class Program_weight_tensor_parameter_221: + name = "parameter_221" + shape = [192] + dtype = "float32" + min_val = float("0.975086") + max_val = float("1.08421") + mean = float("1.01035") + std = float("0.0198136") + data = None + + +class Program_weight_tensor_parameter_222: + name = "parameter_222" + shape = [192] + dtype = "float32" + min_val = float("0.0102") + max_val = float("0.0531645") + mean = float("0.0191559") + std = float("0.0068919") + data = None + + +class Program_weight_tensor_parameter_223: + name = "parameter_223" + shape = [192] + dtype = "float32" + min_val = float("-0.158448") + max_val = float("0.0748376") + mean = float("-0.0355524") + std = float("0.0377289") + data = None + + +class Program_weight_tensor_parameter_224: + name = "parameter_224" + shape = [192, 192, 3, 3] + dtype = "float32" + min_val = float("-0.0272448") + max_val = float("0.0453462") + mean = float("-0.000119593") + std = float("0.00283721") + data = None + + +class Program_weight_tensor_parameter_225: + name = "parameter_225" + shape = [192] + dtype = "float32" + min_val = float("-0.176533") + max_val = float("-0.0140918") + mean = float("-0.0627926") + std = float("0.0255225") + data = None + + +class Program_weight_tensor_parameter_226: + name = "parameter_226" + shape = [192] + dtype = "float32" + min_val = float("0.959453") + max_val = float("1.17829") + mean = float("1.01679") + std = float("0.0291818") + data = None + + +class Program_weight_tensor_parameter_227: + name = "parameter_227" + shape = [192] + dtype = "float32" + min_val = float("0.049348") + max_val = float("0.418024") + mean = float("0.115489") + std = float("0.0507256") + data = None + + +class Program_weight_tensor_parameter_228: + name = "parameter_228" + shape = [192] + dtype = "float32" + min_val = float("-0.807392") + max_val = float("0.359917") + mean = float("-0.202071") + std = float("0.159679") + data = None + + +class Program_weight_tensor_parameter_229: + name = "parameter_229" + shape = [192, 192, 3, 3] + dtype = "float32" + min_val = float("-0.0318035") + max_val = float("0.0525248") + mean = float("-0.000289932") + std = float("0.00370175") + data = None + + +class Program_weight_tensor_parameter_230: + name = "parameter_230" + shape = [192] + dtype = "float32" + min_val = float("-0.101496") + max_val = float("0.00229944") + mean = float("-0.0432151") + std = float("0.017968") + data = None + + +class Program_weight_tensor_parameter_231: + name = "parameter_231" + shape = [192] + dtype = "float32" + min_val = float("0.948209") + max_val = float("1.06033") + mean = float("0.999231") + std = float("0.0183132") + data = None + + +class Program_weight_tensor_parameter_232: + name = "parameter_232" + shape = [192] + dtype = "float32" + min_val = float("0.00265606") + max_val = float("0.0159575") + mean = float("0.00530049") + std = float("0.0019803") + data = None + + +class Program_weight_tensor_parameter_233: + name = "parameter_233" + shape = [192] + dtype = "float32" + min_val = float("-0.0698203") + max_val = float("0.09676") + mean = float("-0.0136201") + std = float("0.0227753") + data = None + + +class Program_weight_tensor_parameter_234: + name = "parameter_234" + shape = [192, 192, 1, 1] + dtype = "float32" + min_val = float("-0.0267064") + max_val = float("0.0456144") + mean = float("-0.000432224") + std = float("0.0062217") + data = None + + +class Program_weight_tensor_parameter_235: + name = "parameter_235" + shape = [192] + dtype = "float32" + min_val = float("-0.101496") + max_val = float("0.00229944") + mean = float("-0.0432151") + std = float("0.017968") + data = None + + +class Program_weight_tensor_parameter_236: + name = "parameter_236" + shape = [192] + dtype = "float32" + min_val = float("0.965303") + max_val = float("1.08528") + mean = float("1.00108") + std = float("0.0207208") + data = None + + +class Program_weight_tensor_parameter_237: + name = "parameter_237" + shape = [192] + dtype = "float32" + min_val = float("0.00866238") + max_val = float("0.0846476") + mean = float("0.0232237") + std = float("0.0107339") + data = None + + +class Program_weight_tensor_parameter_238: + name = "parameter_238" + shape = [192] + dtype = "float32" + min_val = float("-0.172128") + max_val = float("0.121261") + mean = float("-0.0385422") + std = float("0.048641") + data = None + + +class Program_weight_tensor_parameter_239: + name = "parameter_239" + shape = [192, 192, 3, 3] + dtype = "float32" + min_val = float("-0.0216114") + max_val = float("0.0354893") + mean = float("-0.00013956") + std = float("0.00290758") + data = None + + +class Program_weight_tensor_parameter_240: + name = "parameter_240" + shape = [192] + dtype = "float32" + min_val = float("-0.204286") + max_val = float("-0.0144314") + mean = float("-0.0931084") + std = float("0.0313584") + data = None + + +class Program_weight_tensor_parameter_241: + name = "parameter_241" + shape = [192] + dtype = "float32" + min_val = float("0.924946") + max_val = float("1.13197") + mean = float("1.03248") + std = float("0.033681") + data = None + + +class Program_weight_tensor_parameter_242: + name = "parameter_242" + shape = [192] + dtype = "float32" + min_val = float("0.0171774") + max_val = float("0.151451") + mean = float("0.0404337") + std = float("0.0194303") + data = None + + +class Program_weight_tensor_parameter_243: + name = "parameter_243" + shape = [192] + dtype = "float32" + min_val = float("-0.214493") + max_val = float("0.18665") + mean = float("-0.031976") + std = float("0.0594646") + data = None + + +class Program_weight_tensor_parameter_244: + name = "parameter_244" + shape = [192, 192, 3, 3] + dtype = "float32" + min_val = float("-0.0401705") + max_val = float("0.0541615") + mean = float("-0.000152877") + std = float("0.00458084") + data = None + + +class Program_weight_tensor_parameter_245: + name = "parameter_245" + shape = [192] + dtype = "float32" + min_val = float("-0.283235") + max_val = float("-0.0399838") + mean = float("-0.14066") + std = float("0.0427785") + data = None + + +class Program_weight_tensor_parameter_246: + name = "parameter_246" + shape = [192] + dtype = "float32" + min_val = float("0.926396") + max_val = float("1.23818") + mean = float("1.04777") + std = float("0.044691") + data = None + + +class Program_weight_tensor_parameter_247: + name = "parameter_247" + shape = [192] + dtype = "float32" + min_val = float("0.00760291") + max_val = float("0.0292301") + mean = float("0.0135064") + std = float("0.00386928") + data = None + + +class Program_weight_tensor_parameter_248: + name = "parameter_248" + shape = [192] + dtype = "float32" + min_val = float("-0.082307") + max_val = float("0.240184") + mean = float("0.034674") + std = float("0.0405637") + data = None + + +class Program_weight_tensor_parameter_249: + name = "parameter_249" + shape = [192, 896, 1, 1] + dtype = "float32" + min_val = float("-0.0633244") + max_val = float("0.0929278") + mean = float("-0.000269954") + std = float("0.00689158") + data = None + + +class Program_weight_tensor_parameter_250: + name = "parameter_250" + shape = [192] + dtype = "float32" + min_val = float("-0.259346") + max_val = float("0.089777") + mean = float("-0.101264") + std = float("0.0626651") + data = None + + +class Program_weight_tensor_parameter_251: + name = "parameter_251" + shape = [192] + dtype = "float32" + min_val = float("0.942808") + max_val = float("1.43592") + mean = float("1.10635") + std = float("0.0692502") + data = None + + +class Program_weight_tensor_parameter_252: + name = "parameter_252" + shape = [192] + dtype = "float32" + min_val = float("0.00933576") + max_val = float("0.0689556") + mean = float("0.0172674") + std = float("0.00566401") + data = None + + +class Program_weight_tensor_parameter_253: + name = "parameter_253" + shape = [192] + dtype = "float32" + min_val = float("-0.0892463") + max_val = float("0.100943") + mean = float("0.0114547") + std = float("0.0327796") + data = None + + +class Program_weight_tensor_parameter_254: + name = "parameter_254" + shape = [192, 896, 1, 1] + dtype = "float32" + min_val = float("-0.0570768") + max_val = float("0.156442") + mean = float("-0.000259374") + std = float("0.0074107") + data = None + + +class Program_weight_tensor_parameter_255: + name = "parameter_255" + shape = [384] + dtype = "float32" + min_val = float("-0.294876") + max_val = float("-0.0604914") + mean = float("-0.156798") + std = float("0.0456771") + data = None + + +class Program_weight_tensor_parameter_256: + name = "parameter_256" + shape = [384] + dtype = "float32" + min_val = float("0.724694") + max_val = float("1.03284") + mean = float("0.862991") + std = float("0.0407095") + data = None + + +class Program_weight_tensor_parameter_257: + name = "parameter_257" + shape = [384] + dtype = "float32" + min_val = float("0.0113886") + max_val = float("0.0802941") + mean = float("0.0276917") + std = float("0.0102752") + data = None + + +class Program_weight_tensor_parameter_258: + name = "parameter_258" + shape = [384] + dtype = "float32" + min_val = float("-0.162031") + max_val = float("0.204487") + mean = float("-0.0755192") + std = float("0.0459278") + data = None + + +class Program_weight_tensor_parameter_259: + name = "parameter_259" + shape = [384, 768, 1, 1] + dtype = "float32" + min_val = float("-0.0292396") + max_val = float("0.0392698") + mean = float("-0.000553119") + std = float("0.00501583") + data = None + + +class Program_weight_tensor_parameter_260: + name = "parameter_260" + shape = [768] + dtype = "float32" + min_val = float("-0.119028") + max_val = float("0.0118783") + mean = float("-0.0644734") + std = float("0.0183049") + data = None + + +class Program_weight_tensor_parameter_261: + name = "parameter_261" + shape = [768] + dtype = "float32" + min_val = float("0.943984") + max_val = float("1.15965") + mean = float("1.02714") + std = float("0.0265502") + data = None + + +class Program_weight_tensor_parameter_262: + name = "parameter_262" + shape = [768] + dtype = "float32" + min_val = float("0.00981212") + max_val = float("0.0919141") + mean = float("0.019193") + std = float("0.00623331") + data = None + + +class Program_weight_tensor_parameter_263: + name = "parameter_263" + shape = [768] + dtype = "float32" + min_val = float("-0.163952") + max_val = float("0.165282") + mean = float("-0.0496244") + std = float("0.0363468") + data = None + + +class Program_weight_tensor_parameter_264: + name = "parameter_264" + shape = [768, 768, 1, 1] + dtype = "float32" + min_val = float("-0.0555945") + max_val = float("0.0656988") + mean = float("-0.000287705") + std = float("0.00431166") + data = None + + +class Program_weight_tensor_parameter_265: + name = "parameter_265" + shape = [384] + dtype = "float32" + min_val = float("-0.185724") + max_val = float("0.101576") + mean = float("-0.0493899") + std = float("0.0232288") + data = None + + +class Program_weight_tensor_parameter_266: + name = "parameter_266" + shape = [384] + dtype = "float32" + min_val = float("0.895849") + max_val = float("1.01586") + mean = float("0.977906") + std = float("0.0132131") + data = None + + +class Program_weight_tensor_parameter_267: + name = "parameter_267" + shape = [384] + dtype = "float32" + min_val = float("0.00471585") + max_val = float("0.0485148") + mean = float("0.0128933") + std = float("0.00549912") + data = None + + +class Program_weight_tensor_parameter_268: + name = "parameter_268" + shape = [384] + dtype = "float32" + min_val = float("-0.0800717") + max_val = float("0.0775817") + mean = float("-0.0161284") + std = float("0.027053") + data = None + + +class Program_weight_tensor_parameter_269: + name = "parameter_269" + shape = [384, 384, 1, 1] + dtype = "float32" + min_val = float("-0.0257666") + max_val = float("0.0430036") + mean = float("-0.000213548") + std = float("0.0035453") + data = None + + +class Program_weight_tensor_parameter_270: + name = "parameter_270" + shape = [384] + dtype = "float32" + min_val = float("-0.185724") + max_val = float("0.101576") + mean = float("-0.0493899") + std = float("0.0232288") + data = None + + +class Program_weight_tensor_parameter_271: + name = "parameter_271" + shape = [384] + dtype = "float32" + min_val = float("0.894262") + max_val = float("1.09303") + mean = float("0.980357") + std = float("0.0138837") + data = None + + +class Program_weight_tensor_parameter_272: + name = "parameter_272" + shape = [384] + dtype = "float32" + min_val = float("0.0282483") + max_val = float("0.349296") + mean = float("0.0853497") + std = float("0.0378846") + data = None + + +class Program_weight_tensor_parameter_273: + name = "parameter_273" + shape = [384] + dtype = "float32" + min_val = float("-0.301014") + max_val = float("0.153614") + mean = float("-0.0944117") + std = float("0.0767887") + data = None + + +class Program_weight_tensor_parameter_274: + name = "parameter_274" + shape = [384, 384, 3, 3] + dtype = "float32" + min_val = float("-0.0401543") + max_val = float("0.0468426") + mean = float("-0.000144951") + std = float("0.00128804") + data = None + + +class Program_weight_tensor_parameter_275: + name = "parameter_275" + shape = [384] + dtype = "float32" + min_val = float("-0.0960581") + max_val = float("0.159253") + mean = float("-0.016533") + std = float("0.0220565") + data = None + + +class Program_weight_tensor_parameter_276: + name = "parameter_276" + shape = [384] + dtype = "float32" + min_val = float("0.948727") + max_val = float("1.24141") + mean = float("1.01995") + std = float("0.03235") + data = None + + +class Program_weight_tensor_parameter_277: + name = "parameter_277" + shape = [384] + dtype = "float32" + min_val = float("0.0143644") + max_val = float("0.251509") + mean = float("0.0688272") + std = float("0.0333456") + data = None + + +class Program_weight_tensor_parameter_278: + name = "parameter_278" + shape = [384] + dtype = "float32" + min_val = float("-0.226292") + max_val = float("0.268143") + mean = float("-0.0368348") + std = float("0.087958") + data = None + + +class Program_weight_tensor_parameter_279: + name = "parameter_279" + shape = [384, 384, 3, 3] + dtype = "float32" + min_val = float("-0.0312562") + max_val = float("0.0430391") + mean = float("-5.96221e-05") + std = float("0.00162276") + data = None + + +class Program_weight_tensor_parameter_280: + name = "parameter_280" + shape = [384] + dtype = "float32" + min_val = float("-0.1174") + max_val = float("0.0562049") + mean = float("-0.0226758") + std = float("0.0171998") + data = None + + +class Program_weight_tensor_parameter_281: + name = "parameter_281" + shape = [384] + dtype = "float32" + min_val = float("0.959883") + max_val = float("1.14924") + mean = float("1.02097") + std = float("0.0314417") + data = None + + +class Program_weight_tensor_parameter_282: + name = "parameter_282" + shape = [384] + dtype = "float32" + min_val = float("0.0684558") + max_val = float("0.632888") + mean = float("0.21699") + std = float("0.0818797") + data = None + + +class Program_weight_tensor_parameter_283: + name = "parameter_283" + shape = [384] + dtype = "float32" + min_val = float("-2.54336") + max_val = float("2.10932") + mean = float("-0.081684") + std = float("0.72804") + data = None + + +class Program_weight_tensor_parameter_284: + name = "parameter_284" + shape = [384, 1536, 1, 1] + dtype = "float32" + min_val = float("-0.0302692") + max_val = float("0.0501981") + mean = float("3.98672e-05") + std = float("0.00292002") + data = None + + +class Program_weight_tensor_parameter_285: + name = "parameter_285" + shape = [384] + dtype = "float32" + min_val = float("-0.0227048") + max_val = float("0.0393072") + mean = float("-0.00516438") + std = float("0.00791319") + data = None + + +class Program_weight_tensor_parameter_286: + name = "parameter_286" + shape = [384] + dtype = "float32" + min_val = float("0.954966") + max_val = float("1.13128") + mean = float("0.992945") + std = float("0.0198847") + data = None + + +class Program_weight_tensor_parameter_287: + name = "parameter_287" + shape = [384] + dtype = "float32" + min_val = float("0.00285281") + max_val = float("0.0151266") + mean = float("0.00653893") + std = float("0.00215647") + data = None + + +class Program_weight_tensor_parameter_288: + name = "parameter_288" + shape = [384] + dtype = "float32" + min_val = float("-0.103457") + max_val = float("0.0644516") + mean = float("-0.0399251") + std = float("0.0284929") + data = None + + +class Program_weight_tensor_parameter_289: + name = "parameter_289" + shape = [384, 384, 1, 1] + dtype = "float32" + min_val = float("-0.0241085") + max_val = float("0.0314178") + mean = float("-0.000494811") + std = float("0.00322186") + data = None + + +class Program_weight_tensor_parameter_290: + name = "parameter_290" + shape = [384] + dtype = "float32" + min_val = float("-0.0227048") + max_val = float("0.0393072") + mean = float("-0.00516438") + std = float("0.00791319") + data = None + + +class Program_weight_tensor_parameter_291: + name = "parameter_291" + shape = [384] + dtype = "float32" + min_val = float("0.942703") + max_val = float("1.15329") + mean = float("1.00343") + std = float("0.0304117") + data = None + + +class Program_weight_tensor_parameter_292: + name = "parameter_292" + shape = [384] + dtype = "float32" + min_val = float("0.0131764") + max_val = float("0.0914844") + mean = float("0.038932") + std = float("0.0152216") + data = None + + +class Program_weight_tensor_parameter_293: + name = "parameter_293" + shape = [384] + dtype = "float32" + min_val = float("-0.27583") + max_val = float("0.129426") + mean = float("-0.115384") + std = float("0.0654124") + data = None + + +class Program_weight_tensor_parameter_294: + name = "parameter_294" + shape = [384, 384, 3, 3] + dtype = "float32" + min_val = float("-0.00991428") + max_val = float("0.0235268") + mean = float("-0.000178382") + std = float("0.00124102") + data = None + + +class Program_weight_tensor_parameter_295: + name = "parameter_295" + shape = [384] + dtype = "float32" + min_val = float("-0.0484464") + max_val = float("0.0216319") + mean = float("-0.00460097") + std = float("0.00967076") + data = None + + +class Program_weight_tensor_parameter_296: + name = "parameter_296" + shape = [384] + dtype = "float32" + min_val = float("0.963084") + max_val = float("1.21833") + mean = float("1.01835") + std = float("0.0242792") + data = None + + +class Program_weight_tensor_parameter_297: + name = "parameter_297" + shape = [384] + dtype = "float32" + min_val = float("0.0846965") + max_val = float("0.477757") + mean = float("0.218773") + std = float("0.0720263") + data = None + + +class Program_weight_tensor_parameter_298: + name = "parameter_298" + shape = [384] + dtype = "float32" + min_val = float("-1.09295") + max_val = float("1.28986") + mean = float("-0.341772") + std = float("0.305414") + data = None + + +class Program_weight_tensor_parameter_299: + name = "parameter_299" + shape = [384, 384, 3, 3] + dtype = "float32" + min_val = float("-0.0119066") + max_val = float("0.018937") + mean = float("-0.000190711") + std = float("0.00143766") + data = None + + +class Program_weight_tensor_parameter_300: + name = "parameter_300" + shape = [384] + dtype = "float32" + min_val = float("-0.0328709") + max_val = float("0.0296847") + mean = float("0.00167559") + std = float("0.0101838") + data = None + + +class Program_weight_tensor_parameter_301: + name = "parameter_301" + shape = [384] + dtype = "float32" + min_val = float("0.982379") + max_val = float("1.06457") + mean = float("1.00463") + std = float("0.00875427") + data = None + + +class Program_weight_tensor_parameter_302: + name = "parameter_302" + shape = [384] + dtype = "float32" + min_val = float("0.00182393") + max_val = float("0.0117111") + mean = float("0.00446161") + std = float("0.00149495") + data = None + + +class Program_weight_tensor_parameter_303: + name = "parameter_303" + shape = [384] + dtype = "float32" + min_val = float("-0.0690861") + max_val = float("0.16253") + mean = float("-0.0277446") + std = float("0.0287758") + data = None + + +class Program_weight_tensor_parameter_304: + name = "parameter_304" + shape = [384, 384, 1, 1] + dtype = "float32" + min_val = float("-0.0149167") + max_val = float("0.024544") + mean = float("-0.00035583") + std = float("0.00252494") + data = None + + +class Program_weight_tensor_parameter_305: + name = "parameter_305" + shape = [384] + dtype = "float32" + min_val = float("-0.0328709") + max_val = float("0.0296847") + mean = float("0.00167559") + std = float("0.0101838") + data = None + + +class Program_weight_tensor_parameter_306: + name = "parameter_306" + shape = [384] + dtype = "float32" + min_val = float("0.972532") + max_val = float("1.06753") + mean = float("1.00429") + std = float("0.0130159") + data = None + + +class Program_weight_tensor_parameter_307: + name = "parameter_307" + shape = [384] + dtype = "float32" + min_val = float("0.00916589") + max_val = float("0.0757382") + mean = float("0.026734") + std = float("0.00932033") + data = None + + +class Program_weight_tensor_parameter_308: + name = "parameter_308" + shape = [384] + dtype = "float32" + min_val = float("-0.212158") + max_val = float("0.398515") + mean = float("-0.0802837") + std = float("0.079264") + data = None + + +class Program_weight_tensor_parameter_309: + name = "parameter_309" + shape = [384, 384, 3, 3] + dtype = "float32" + min_val = float("-0.0093195") + max_val = float("0.0136616") + mean = float("-0.000127923") + std = float("0.000958249") + data = None + + +class Program_weight_tensor_parameter_310: + name = "parameter_310" + shape = [384] + dtype = "float32" + min_val = float("-0.0472265") + max_val = float("0.0147404") + mean = float("-0.0123072") + std = float("0.0107235") + data = None + + +class Program_weight_tensor_parameter_311: + name = "parameter_311" + shape = [384] + dtype = "float32" + min_val = float("0.97566") + max_val = float("1.10735") + mean = float("1.01405") + std = float("0.0167737") + data = None + + +class Program_weight_tensor_parameter_312: + name = "parameter_312" + shape = [384] + dtype = "float32" + min_val = float("0.0120215") + max_val = float("0.0818828") + mean = float("0.0302745") + std = float("0.00902368") + data = None + + +class Program_weight_tensor_parameter_313: + name = "parameter_313" + shape = [384] + dtype = "float32" + min_val = float("-0.144457") + max_val = float("0.215418") + mean = float("-0.0320907") + std = float("0.0550216") + data = None + + +class Program_weight_tensor_parameter_314: + name = "parameter_314" + shape = [384, 384, 3, 3] + dtype = "float32" + min_val = float("-0.00932646") + max_val = float("0.0180928") + mean = float("-5.9912e-05") + std = float("0.00138574") + data = None + + +class Program_weight_tensor_parameter_315: + name = "parameter_315" + shape = [384] + dtype = "float32" + min_val = float("-0.0636266") + max_val = float("0.0504044") + mean = float("-0.0321583") + std = float("0.01479") + data = None + + +class Program_weight_tensor_parameter_316: + name = "parameter_316" + shape = [384] + dtype = "float32" + min_val = float("0.963145") + max_val = float("1.05901") + mean = float("1.01824") + std = float("0.0133966") + data = None + + +class Program_weight_tensor_parameter_317: + name = "parameter_317" + shape = [384] + dtype = "float32" + min_val = float("0.017764") + max_val = float("0.0645009") + mean = float("0.0305608") + std = float("0.00719559") + data = None + + +class Program_weight_tensor_parameter_318: + name = "parameter_318" + shape = [384] + dtype = "float32" + min_val = float("-0.20154") + max_val = float("0.391038") + mean = float("-0.0613575") + std = float("0.0615632") + data = None + + +class Program_weight_tensor_parameter_319: + name = "parameter_319" + shape = [384, 1024, 1, 1] + dtype = "float32" + min_val = float("-0.0154521") + max_val = float("0.0405892") + mean = float("-0.000219009") + std = float("0.00320931") + data = None + + +class Program_weight_tensor_parameter_320: + name = "parameter_320" + shape = [384] + dtype = "float32" + min_val = float("-0.0181982") + max_val = float("0.0407238") + mean = float("0.0138314") + std = float("0.0107853") + data = None + + +class Program_weight_tensor_parameter_321: + name = "parameter_321" + shape = [384] + dtype = "float32" + min_val = float("1.02428") + max_val = float("1.13089") + mean = float("1.07731") + std = float("0.018385") + data = None + + +class Program_weight_tensor_parameter_322: + name = "parameter_322" + shape = [384] + dtype = "float32" + min_val = float("0.0297022") + max_val = float("0.0861284") + mean = float("0.0459045") + std = float("0.0083673") + data = None + + +class Program_weight_tensor_parameter_323: + name = "parameter_323" + shape = [384] + dtype = "float32" + min_val = float("-0.214848") + max_val = float("0.119899") + mean = float("-0.107077") + std = float("0.0483314") + data = None + + +class Program_weight_tensor_parameter_324: + name = "parameter_324" + shape = [384, 1024, 1, 1] + dtype = "float32" + min_val = float("-0.0202669") + max_val = float("0.0292336") + mean = float("-0.000379697") + std = float("0.00443318") + data = None + + +class Program_weight_tensor_parameter_325: + name = "parameter_325" + shape = [1024] + dtype = "float32" + min_val = float("-3.76738") + max_val = float("-0.74061") + mean = float("-2.19149") + std = float("0.429665") + data = None + + +class Program_weight_tensor_parameter_326: + name = "parameter_326" + shape = [1024] + dtype = "float32" + min_val = float("1.61677") + max_val = float("4.4298") + mean = float("3.07687") + std = float("0.251634") + data = None + + +class Program_weight_tensor_parameter_327: + name = "parameter_327" + shape = [1024] + dtype = "float32" + min_val = float("0.00318616") + max_val = float("0.0156112") + mean = float("0.00619051") + std = float("0.00132122") + data = None + + +class Program_weight_tensor_parameter_328: + name = "parameter_328" + shape = [1024] + dtype = "float32" + min_val = float("-0.151272") + max_val = float("0.141612") + mean = float("-0.0486746") + std = float("0.0293873") + data = None + + +class Program_weight_tensor_parameter_329: + name = "parameter_329" + shape = [1024, 768, 1, 1] + dtype = "float32" + min_val = float("-0.0373496") + max_val = float("0.0729797") + mean = float("-0.000461198") + std = float("0.00502166") + data = None + + +class Program_weight_tensor_parameter_330: + name = "parameter_330" + shape = [768] + dtype = "float32" + min_val = float("-0.0138178") + max_val = float("0.000466847") + mean = float("-0.00284092") + std = float("0.00262746") + data = None + + +class Program_weight_tensor_parameter_331: + name = "parameter_331" + shape = [768, 768, 1, 1] + dtype = "float32" + min_val = float("-0.119554") + max_val = float("0.123314") + mean = float("-0.00126139") + std = float("0.00352046") + data = None + + +class Program_weight_tensor_parameter_332: + name = "parameter_332" + shape = [384] + dtype = "float32" + min_val = float("-1.77338") + max_val = float("0.412596") + mean = float("-0.274223") + std = float("0.294015") + data = None + + +class Program_weight_tensor_parameter_333: + name = "parameter_333" + shape = [384] + dtype = "float32" + min_val = float("0.235525") + max_val = float("2.08023") + mean = float("0.662181") + std = float("0.284497") + data = None + + +class Program_weight_tensor_parameter_334: + name = "parameter_334" + shape = [384] + dtype = "float32" + min_val = float("0.000109661") + max_val = float("0.00172742") + mean = float("0.000430241") + std = float("0.000193304") + data = None + + +class Program_weight_tensor_parameter_335: + name = "parameter_335" + shape = [384] + dtype = "float32" + min_val = float("-0.130554") + max_val = float("0.0981204") + mean = float("0.0202639") + std = float("0.0336592") + data = None + + +class Program_weight_tensor_parameter_336: + name = "parameter_336" + shape = [384, 384, 1, 1] + dtype = "float32" + min_val = float("-0.0274341") + max_val = float("0.0353346") + mean = float("-0.000364024") + std = float("0.0038463") + data = None + + +class Program_weight_tensor_parameter_337: + name = "parameter_337" + shape = [384] + dtype = "float32" + min_val = float("-1.77338") + max_val = float("0.412596") + mean = float("-0.274223") + std = float("0.294015") + data = None + + +class Program_weight_tensor_parameter_338: + name = "parameter_338" + shape = [384] + dtype = "float32" + min_val = float("0.350868") + max_val = float("2.87683") + mean = float("1.11168") + std = float("0.332422") + data = None + + +class Program_weight_tensor_parameter_339: + name = "parameter_339" + shape = [384] + dtype = "float32" + min_val = float("0.000733881") + max_val = float("0.0108386") + mean = float("0.002852") + std = float("0.0011359") + data = None + + +class Program_weight_tensor_parameter_340: + name = "parameter_340" + shape = [384] + dtype = "float32" + min_val = float("-0.119853") + max_val = float("0.177988") + mean = float("0.00969028") + std = float("0.0360493") + data = None + + +class Program_weight_tensor_parameter_341: + name = "parameter_341" + shape = [384, 384, 3, 3] + dtype = "float32" + min_val = float("-0.0215559") + max_val = float("0.0430067") + mean = float("-3.73056e-05") + std = float("0.00229731") + data = None + + +class Program_weight_tensor_parameter_342: + name = "parameter_342" + shape = [384] + dtype = "float32" + min_val = float("-2.6275") + max_val = float("0.015896") + mean = float("-1.59098") + std = float("0.414959") + data = None + + +class Program_weight_tensor_parameter_343: + name = "parameter_343" + shape = [384] + dtype = "float32" + min_val = float("0.47721") + max_val = float("1.87134") + mean = float("1.1266") + std = float("0.14685") + data = None + + +class Program_weight_tensor_parameter_344: + name = "parameter_344" + shape = [384] + dtype = "float32" + min_val = float("0.120369") + max_val = float("0.515457") + mean = float("0.241946") + std = float("0.0512298") + data = None + + +class Program_weight_tensor_parameter_345: + name = "parameter_345" + shape = [384] + dtype = "float32" + min_val = float("-1.59625") + max_val = float("1.32189") + mean = float("-0.654999") + std = float("0.281837") + data = None + + +class Program_weight_tensor_parameter_346: + name = "parameter_346" + shape = [384, 384, 3, 3] + dtype = "float32" + min_val = float("-0.0216367") + max_val = float("0.0531612") + mean = float("-0.000286661") + std = float("0.00308802") + data = None + + +class Program_weight_tensor_parameter_347: + name = "parameter_347" + shape = [384] + dtype = "float32" + min_val = float("-1.93672") + max_val = float("1.08927") + mean = float("-0.561118") + std = float("0.377358") + data = None + + +class Program_weight_tensor_parameter_348: + name = "parameter_348" + shape = [384] + dtype = "float32" + min_val = float("0.163384") + max_val = float("2.03243") + mean = float("0.568654") + std = float("0.23058") + data = None + + +class Program_weight_tensor_parameter_349: + name = "parameter_349" + shape = [384] + dtype = "float32" + min_val = float("0.000211366") + max_val = float("0.00209824") + mean = float("0.000563789") + std = float("0.000253525") + data = None + + +class Program_weight_tensor_parameter_350: + name = "parameter_350" + shape = [384] + dtype = "float32" + min_val = float("-0.0508295") + max_val = float("0.116156") + mean = float("0.0281283") + std = float("0.0230871") + data = None + + +class Program_weight_tensor_parameter_351: + name = "parameter_351" + shape = [384, 384, 1, 1] + dtype = "float32" + min_val = float("-0.0278888") + max_val = float("0.0403076") + mean = float("-0.000537846") + std = float("0.00370404") + data = None + + +class Program_weight_tensor_parameter_352: + name = "parameter_352" + shape = [384] + dtype = "float32" + min_val = float("-1.93672") + max_val = float("1.08927") + mean = float("-0.561118") + std = float("0.377358") + data = None + + +class Program_weight_tensor_parameter_353: + name = "parameter_353" + shape = [384] + dtype = "float32" + min_val = float("0.579866") + max_val = float("2.2087") + mean = float("1.11942") + std = float("0.260049") + data = None + + +class Program_weight_tensor_parameter_354: + name = "parameter_354" + shape = [384] + dtype = "float32" + min_val = float("0.00147811") + max_val = float("0.014418") + mean = float("0.00450143") + std = float("0.00147944") + data = None + + +class Program_weight_tensor_parameter_355: + name = "parameter_355" + shape = [384] + dtype = "float32" + min_val = float("-0.198479") + max_val = float("0.184089") + mean = float("0.0300434") + std = float("0.0432483") + data = None + + +class Program_weight_tensor_parameter_356: + name = "parameter_356" + shape = [384, 384, 3, 3] + dtype = "float32" + min_val = float("-0.0243065") + max_val = float("0.0531256") + mean = float("-8.65998e-05") + std = float("0.00245086") + data = None + + +class Program_weight_tensor_parameter_357: + name = "parameter_357" + shape = [384] + dtype = "float32" + min_val = float("-2.40101") + max_val = float("0.85896") + mean = float("-1.44185") + std = float("0.356455") + data = None + + +class Program_weight_tensor_parameter_358: + name = "parameter_358" + shape = [384] + dtype = "float32" + min_val = float("0.363523") + max_val = float("1.90593") + mean = float("1.14998") + std = float("0.141316") + data = None + + +class Program_weight_tensor_parameter_359: + name = "parameter_359" + shape = [384] + dtype = "float32" + min_val = float("0.0902803") + max_val = float("0.317551") + mean = float("0.162194") + std = float("0.0353807") + data = None + + +class Program_weight_tensor_parameter_360: + name = "parameter_360" + shape = [384] + dtype = "float32" + min_val = float("-0.96643") + max_val = float("1.13281") + mean = float("-0.366938") + std = float("0.183667") + data = None + + +class Program_weight_tensor_parameter_361: + name = "parameter_361" + shape = [384, 384, 3, 3] + dtype = "float32" + min_val = float("-0.0266427") + max_val = float("0.0728291") + mean = float("-0.000293523") + std = float("0.00309007") + data = None + + +class Program_weight_tensor_parameter_362: + name = "parameter_362" + shape = [384] + dtype = "float32" + min_val = float("-1.88671") + max_val = float("0.68709") + mean = float("-0.470258") + std = float("0.394922") + data = None + + +class Program_weight_tensor_parameter_363: + name = "parameter_363" + shape = [384] + dtype = "float32" + min_val = float("0.081144") + max_val = float("2.10167") + mean = float("0.444389") + std = float("0.213461") + data = None + + +class Program_weight_tensor_parameter_364: + name = "parameter_364" + shape = [384] + dtype = "float32" + min_val = float("0.000181328") + max_val = float("0.00238775") + mean = float("0.000598608") + std = float("0.000263232") + data = None + + +class Program_weight_tensor_parameter_365: + name = "parameter_365" + shape = [384] + dtype = "float32" + min_val = float("-0.122912") + max_val = float("0.116066") + mean = float("0.0424948") + std = float("0.0250559") + data = None + + +class Program_weight_tensor_parameter_366: + name = "parameter_366" + shape = [384, 384, 1, 1] + dtype = "float32" + min_val = float("-0.035118") + max_val = float("0.0374998") + mean = float("-0.000747404") + std = float("0.00319117") + data = None + + +class Program_weight_tensor_parameter_367: + name = "parameter_367" + shape = [384] + dtype = "float32" + min_val = float("-1.88671") + max_val = float("0.68709") + mean = float("-0.470258") + std = float("0.394922") + data = None + + +class Program_weight_tensor_parameter_368: + name = "parameter_368" + shape = [384] + dtype = "float32" + min_val = float("0.542057") + max_val = float("2.23163") + mean = float("1.09037") + std = float("0.258504") + data = None + + +class Program_weight_tensor_parameter_369: + name = "parameter_369" + shape = [384] + dtype = "float32" + min_val = float("0.00171885") + max_val = float("0.0206226") + mean = float("0.00584658") + std = float("0.00207454") + data = None + + +class Program_weight_tensor_parameter_370: + name = "parameter_370" + shape = [384] + dtype = "float32" + min_val = float("-0.160396") + max_val = float("0.169188") + mean = float("0.0475079") + std = float("0.0427583") + data = None + + +class Program_weight_tensor_parameter_371: + name = "parameter_371" + shape = [384, 384, 3, 3] + dtype = "float32" + min_val = float("-0.0204974") + max_val = float("0.0381036") + mean = float("-0.000108104") + std = float("0.00256466") + data = None + + +class Program_weight_tensor_parameter_372: + name = "parameter_372" + shape = [384] + dtype = "float32" + min_val = float("-2.18087") + max_val = float("0.356852") + mean = float("-1.40712") + std = float("0.272169") + data = None + + +class Program_weight_tensor_parameter_373: + name = "parameter_373" + shape = [384] + dtype = "float32" + min_val = float("0.61486") + max_val = float("1.63036") + mean = float("1.12182") + std = float("0.102341") + data = None + + +class Program_weight_tensor_parameter_374: + name = "parameter_374" + shape = [384] + dtype = "float32" + min_val = float("0.0686106") + max_val = float("0.300719") + mean = float("0.120296") + std = float("0.0324319") + data = None + + +class Program_weight_tensor_parameter_375: + name = "parameter_375" + shape = [384] + dtype = "float32" + min_val = float("-1.00958") + max_val = float("0.205067") + mean = float("-0.252895") + std = float("0.152032") + data = None + + +class Program_weight_tensor_parameter_376: + name = "parameter_376" + shape = [384, 384, 3, 3] + dtype = "float32" + min_val = float("-0.0201776") + max_val = float("0.0515865") + mean = float("-0.000247363") + std = float("0.00287082") + data = None + + +class Program_weight_tensor_parameter_377: + name = "parameter_377" + shape = [384] + dtype = "float32" + min_val = float("-2.9181") + max_val = float("2.39818") + mean = float("-0.744825") + std = float("0.666127") + data = None + + +class Program_weight_tensor_parameter_378: + name = "parameter_378" + shape = [384] + dtype = "float32" + min_val = float("1.00684") + max_val = float("2.89816") + mean = float("1.92298") + std = float("0.268903") + data = None + + +class Program_weight_tensor_parameter_379: + name = "parameter_379" + shape = [384] + dtype = "float32" + min_val = float("0.00323555") + max_val = float("0.0164731") + mean = float("0.00686751") + std = float("0.00157055") + data = None + + +class Program_weight_tensor_parameter_380: + name = "parameter_380" + shape = [384] + dtype = "float32" + min_val = float("-0.29054") + max_val = float("0.150754") + mean = float("0.0807349") + std = float("0.0368912") + data = None + + +class Program_weight_tensor_parameter_381: + name = "parameter_381" + shape = [384, 768, 1, 1] + dtype = "float32" + min_val = float("-0.0488105") + max_val = float("0.0678759") + mean = float("-0.000895552") + std = float("0.00678193") + data = None + + +class Program_weight_tensor_parameter_382: + name = "parameter_382" + shape = [384] + dtype = "float32" + min_val = float("-2.26258") + max_val = float("0.746331") + mean = float("-0.781383") + std = float("0.476609") + data = None + + +class Program_weight_tensor_parameter_383: + name = "parameter_383" + shape = [384] + dtype = "float32" + min_val = float("0.922112") + max_val = float("2.88383") + mean = float("2.09535") + std = float("0.309922") + data = None + + +class Program_weight_tensor_parameter_384: + name = "parameter_384" + shape = [384] + dtype = "float32" + min_val = float("0.0011051") + max_val = float("0.00520625") + mean = float("0.00250761") + std = float("0.00049518") + data = None + + +class Program_weight_tensor_parameter_385: + name = "parameter_385" + shape = [384] + dtype = "float32" + min_val = float("-0.0778026") + max_val = float("0.0934732") + mean = float("0.049973") + std = float("0.0213988") + data = None + + +class Program_weight_tensor_parameter_386: + name = "parameter_386" + shape = [384, 768, 1, 1] + dtype = "float32" + min_val = float("-0.0427813") + max_val = float("0.0835518") + mean = float("-0.000478385") + std = float("0.00455026") + data = None + + +class Program_weight_tensor_parameter_387: + name = "parameter_387" + shape = [768] + dtype = "float32" + min_val = float("-2.45583") + max_val = float("0.559171") + mean = float("-0.951569") + std = float("0.337772") + data = None + + +class Program_weight_tensor_parameter_388: + name = "parameter_388" + shape = [768] + dtype = "float32" + min_val = float("0.470726") + max_val = float("1.80127") + mean = float("0.915463") + std = float("0.147887") + data = None + + +class Program_weight_tensor_parameter_389: + name = "parameter_389" + shape = [768] + dtype = "float32" + min_val = float("0.0124823") + max_val = float("0.0978023") + mean = float("0.0216824") + std = float("0.00583908") + data = None + + +class Program_weight_tensor_parameter_390: + name = "parameter_390" + shape = [768] + dtype = "float32" + min_val = float("-0.476605") + max_val = float("0.316374") + mean = float("0.0538298") + std = float("0.0770574") + data = None + + +class Program_weight_tensor_parameter_391: + name = "parameter_391" + shape = [768, 512, 3, 3] + dtype = "float32" + min_val = float("-0.033942") + max_val = float("0.048741") + mean = float("-0.00014743") + std = float("0.00292614") + data = None + + +class Program_weight_tensor_parameter_392: + name = "parameter_392" + shape = [512] + dtype = "float32" + min_val = float("-3.41671") + max_val = float("1.90925") + mean = float("-1.24212") + std = float("0.518646") + data = None + + +class Program_weight_tensor_parameter_393: + name = "parameter_393" + shape = [512] + dtype = "float32" + min_val = float("0.417792") + max_val = float("1.62807") + mean = float("1.12265") + std = float("0.144739") + data = None + + +class Program_weight_tensor_parameter_394: + name = "parameter_394" + shape = [512] + dtype = "float32" + min_val = float("0.00686698") + max_val = float("0.0300204") + mean = float("0.0132355") + std = float("0.00263341") + data = None + + +class Program_weight_tensor_parameter_395: + name = "parameter_395" + shape = [512] + dtype = "float32" + min_val = float("-0.204822") + max_val = float("0.0997006") + mean = float("-0.0635602") + std = float("0.0500124") + data = None + + +class Program_weight_tensor_parameter_396: + name = "parameter_396" + shape = [512, 384, 1, 1] + dtype = "float32" + min_val = float("-0.0779687") + max_val = float("0.204082") + mean = float("-0.000758003") + std = float("0.00949015") + data = None + + +class Program_weight_tensor_parameter_397: + name = "parameter_397" + shape = [384] + dtype = "float32" + min_val = float("-0.010737") + max_val = float("0.00241083") + mean = float("-0.00328027") + std = float("0.00285957") + data = None + + +class Program_weight_tensor_parameter_398: + name = "parameter_398" + shape = [384, 384, 1, 1] + dtype = "float32" + min_val = float("-0.214148") + max_val = float("0.108238") + mean = float("-0.00206829") + std = float("0.00564515") + data = None + + +class Program_weight_tensor_parameter_399: + name = "parameter_399" + shape = [192] + dtype = "float32" + min_val = float("-1.9212") + max_val = float("0.337783") + mean = float("-0.37118") + std = float("0.319858") + data = None + + +class Program_weight_tensor_parameter_400: + name = "parameter_400" + shape = [192] + dtype = "float32" + min_val = float("0.000863766") + max_val = float("2.25766") + mean = float("0.550683") + std = float("0.447335") + data = None + + +class Program_weight_tensor_parameter_401: + name = "parameter_401" + shape = [192] + dtype = "float32" + min_val = float("5.24031e-07") + max_val = float("0.00212704") + mean = float("0.000573903") + std = float("0.000337641") + data = None + + +class Program_weight_tensor_parameter_402: + name = "parameter_402" + shape = [192] + dtype = "float32" + min_val = float("-0.0361206") + max_val = float("0.0752975") + mean = float("0.0112954") + std = float("0.019226") + data = None + + +class Program_weight_tensor_parameter_403: + name = "parameter_403" + shape = [192, 192, 1, 1] + dtype = "float32" + min_val = float("-0.0324994") + max_val = float("0.0613531") + mean = float("-0.000532142") + std = float("0.00461667") + data = None + + +class Program_weight_tensor_parameter_404: + name = "parameter_404" + shape = [192] + dtype = "float32" + min_val = float("-1.9212") + max_val = float("0.337783") + mean = float("-0.37118") + std = float("0.319858") + data = None + + +class Program_weight_tensor_parameter_405: + name = "parameter_405" + shape = [192] + dtype = "float32" + min_val = float("0.331404") + max_val = float("2.85491") + mean = float("1.19184") + std = float("0.517709") + data = None + + +class Program_weight_tensor_parameter_406: + name = "parameter_406" + shape = [192] + dtype = "float32" + min_val = float("0.00140744") + max_val = float("0.0176487") + mean = float("0.00594824") + std = float("0.00242133") + data = None + + +class Program_weight_tensor_parameter_407: + name = "parameter_407" + shape = [192] + dtype = "float32" + min_val = float("-0.149957") + max_val = float("0.183059") + mean = float("0.0286224") + std = float("0.0529416") + data = None + + +class Program_weight_tensor_parameter_408: + name = "parameter_408" + shape = [192, 192, 3, 3] + dtype = "float32" + min_val = float("-0.0304763") + max_val = float("0.0467537") + mean = float("-0.000182961") + std = float("0.00351809") + data = None + + +class Program_weight_tensor_parameter_409: + name = "parameter_409" + shape = [192] + dtype = "float32" + min_val = float("-2.92889") + max_val = float("-0.215128") + mean = float("-1.34775") + std = float("0.401955") + data = None + + +class Program_weight_tensor_parameter_410: + name = "parameter_410" + shape = [192] + dtype = "float32" + min_val = float("0.691777") + max_val = float("2.01593") + mean = float("1.16727") + std = float("0.167346") + data = None + + +class Program_weight_tensor_parameter_411: + name = "parameter_411" + shape = [192] + dtype = "float32" + min_val = float("0.12144") + max_val = float("0.513783") + mean = float("0.2084") + std = float("0.0576939") + data = None + + +class Program_weight_tensor_parameter_412: + name = "parameter_412" + shape = [192] + dtype = "float32" + min_val = float("-3.52808") + max_val = float("1.58137") + mean = float("-0.298547") + std = float("0.409608") + data = None + + +class Program_weight_tensor_parameter_413: + name = "parameter_413" + shape = [192, 192, 3, 3] + dtype = "float32" + min_val = float("-0.0306673") + max_val = float("0.0446454") + mean = float("-0.000270107") + std = float("0.00414217") + data = None + + +class Program_weight_tensor_parameter_414: + name = "parameter_414" + shape = [192] + dtype = "float32" + min_val = float("-1.93263") + max_val = float("0.441273") + mean = float("-0.309778") + std = float("0.3114") + data = None + + +class Program_weight_tensor_parameter_415: + name = "parameter_415" + shape = [192] + dtype = "float32" + min_val = float("1.31901e-05") + max_val = float("1.74196") + mean = float("0.401521") + std = float("0.316685") + data = None + + +class Program_weight_tensor_parameter_416: + name = "parameter_416" + shape = [192] + dtype = "float32" + min_val = float("2.4925e-10") + max_val = float("0.00266824") + mean = float("0.000571331") + std = float("0.000381815") + data = None + + +class Program_weight_tensor_parameter_417: + name = "parameter_417" + shape = [192] + dtype = "float32" + min_val = float("-0.0718396") + max_val = float("0.0473328") + mean = float("0.0120177") + std = float("0.0141073") + data = None + + +class Program_weight_tensor_parameter_418: + name = "parameter_418" + shape = [192, 192, 1, 1] + dtype = "float32" + min_val = float("-0.0369971") + max_val = float("0.0474525") + mean = float("-0.000537935") + std = float("0.00432741") + data = None + + +class Program_weight_tensor_parameter_419: + name = "parameter_419" + shape = [192] + dtype = "float32" + min_val = float("-1.93263") + max_val = float("0.441273") + mean = float("-0.309778") + std = float("0.3114") + data = None + + +class Program_weight_tensor_parameter_420: + name = "parameter_420" + shape = [192] + dtype = "float32" + min_val = float("0.42444") + max_val = float("2.26483") + mean = float("1.12589") + std = float("0.378215") + data = None + + +class Program_weight_tensor_parameter_421: + name = "parameter_421" + shape = [192] + dtype = "float32" + min_val = float("0.0024206") + max_val = float("0.0137874") + mean = float("0.00667971") + std = float("0.00197733") + data = None + + +class Program_weight_tensor_parameter_422: + name = "parameter_422" + shape = [192] + dtype = "float32" + min_val = float("-0.156973") + max_val = float("0.138921") + mean = float("0.0395744") + std = float("0.0387458") + data = None + + +class Program_weight_tensor_parameter_423: + name = "parameter_423" + shape = [192, 192, 3, 3] + dtype = "float32" + min_val = float("-0.0297468") + max_val = float("0.061426") + mean = float("-0.000214963") + std = float("0.0038424") + data = None + + +class Program_weight_tensor_parameter_424: + name = "parameter_424" + shape = [192] + dtype = "float32" + min_val = float("-2.52744") + max_val = float("-0.176752") + mean = float("-1.32827") + std = float("0.439675") + data = None + + +class Program_weight_tensor_parameter_425: + name = "parameter_425" + shape = [192] + dtype = "float32" + min_val = float("0.648699") + max_val = float("1.69649") + mean = float("1.1831") + std = float("0.164999") + data = None + + +class Program_weight_tensor_parameter_426: + name = "parameter_426" + shape = [192] + dtype = "float32" + min_val = float("0.0811482") + max_val = float("0.262619") + mean = float("0.148698") + std = float("0.0343621") + data = None + + +class Program_weight_tensor_parameter_427: + name = "parameter_427" + shape = [192] + dtype = "float32" + min_val = float("-2.61184") + max_val = float("0.442873") + mean = float("-0.178286") + std = float("0.277058") + data = None + + +class Program_weight_tensor_parameter_428: + name = "parameter_428" + shape = [192, 192, 3, 3] + dtype = "float32" + min_val = float("-0.0353353") + max_val = float("0.0475915") + mean = float("-0.000311195") + std = float("0.00440006") + data = None + + +class Program_weight_tensor_parameter_429: + name = "parameter_429" + shape = [192] + dtype = "float32" + min_val = float("-1.76929") + max_val = float("0.366299") + mean = float("-0.296944") + std = float("0.323923") + data = None + + +class Program_weight_tensor_parameter_430: + name = "parameter_430" + shape = [192] + dtype = "float32" + min_val = float("1.65802e-05") + max_val = float("1.6632") + mean = float("0.314337") + std = float("0.259063") + data = None + + +class Program_weight_tensor_parameter_431: + name = "parameter_431" + shape = [192] + dtype = "float32" + min_val = float("1.21154e-10") + max_val = float("0.00374479") + mean = float("0.000521225") + std = float("0.000463093") + data = None + + +class Program_weight_tensor_parameter_432: + name = "parameter_432" + shape = [192] + dtype = "float32" + min_val = float("-0.052794") + max_val = float("0.0958051") + mean = float("0.0142202") + std = float("0.0171986") + data = None + + +class Program_weight_tensor_parameter_433: + name = "parameter_433" + shape = [192, 192, 1, 1] + dtype = "float32" + min_val = float("-0.0479075") + max_val = float("0.0473163") + mean = float("-0.000576931") + std = float("0.00422766") + data = None + + +class Program_weight_tensor_parameter_434: + name = "parameter_434" + shape = [192] + dtype = "float32" + min_val = float("-1.76929") + max_val = float("0.366299") + mean = float("-0.296944") + std = float("0.323923") + data = None + + +class Program_weight_tensor_parameter_435: + name = "parameter_435" + shape = [192] + dtype = "float32" + min_val = float("0.376374") + max_val = float("1.96514") + mean = float("1.05296") + std = float("0.33661") + data = None + + +class Program_weight_tensor_parameter_436: + name = "parameter_436" + shape = [192] + dtype = "float32" + min_val = float("0.00319475") + max_val = float("0.0156312") + mean = float("0.0069418") + std = float("0.00223012") + data = None + + +class Program_weight_tensor_parameter_437: + name = "parameter_437" + shape = [192] + dtype = "float32" + min_val = float("-0.138894") + max_val = float("0.117271") + mean = float("0.0385623") + std = float("0.0372869") + data = None + + +class Program_weight_tensor_parameter_438: + name = "parameter_438" + shape = [192, 192, 3, 3] + dtype = "float32" + min_val = float("-0.0312578") + max_val = float("0.0469487") + mean = float("-0.000193498") + std = float("0.00398813") + data = None + + +class Program_weight_tensor_parameter_439: + name = "parameter_439" + shape = [192] + dtype = "float32" + min_val = float("-2.54978") + max_val = float("0.123303") + mean = float("-1.28641") + std = float("0.420062") + data = None + + +class Program_weight_tensor_parameter_440: + name = "parameter_440" + shape = [192] + dtype = "float32" + min_val = float("0.65748") + max_val = float("1.74041") + mean = float("1.14789") + std = float("0.166573") + data = None + + +class Program_weight_tensor_parameter_441: + name = "parameter_441" + shape = [192] + dtype = "float32" + min_val = float("0.0607768") + max_val = float("0.189401") + mean = float("0.103113") + std = float("0.0244967") + data = None + + +class Program_weight_tensor_parameter_442: + name = "parameter_442" + shape = [192] + dtype = "float32" + min_val = float("-1.77358") + max_val = float("0.232054") + mean = float("-0.181866") + std = float("0.205799") + data = None + + +class Program_weight_tensor_parameter_443: + name = "parameter_443" + shape = [192, 192, 3, 3] + dtype = "float32" + min_val = float("-0.035669") + max_val = float("0.0555704") + mean = float("-0.000362505") + std = float("0.00447118") + data = None + + +class Program_weight_tensor_parameter_444: + name = "parameter_444" + shape = [192] + dtype = "float32" + min_val = float("-2.08733") + max_val = float("0.454803") + mean = float("-0.308331") + std = float("0.365117") + data = None + + +class Program_weight_tensor_parameter_445: + name = "parameter_445" + shape = [192] + dtype = "float32" + min_val = float("4.74169e-06") + max_val = float("0.678347") + mean = float("0.185861") + std = float("0.145635") + data = None + + +class Program_weight_tensor_parameter_446: + name = "parameter_446" + shape = [192] + dtype = "float32" + min_val = float("1.05153e-11") + max_val = float("0.00148836") + mean = float("0.000320402") + std = float("0.000237363") + data = None + + +class Program_weight_tensor_parameter_447: + name = "parameter_447" + shape = [192] + dtype = "float32" + min_val = float("-0.0594774") + max_val = float("0.0478072") + mean = float("0.00930335") + std = float("0.0146157") + data = None + + +class Program_weight_tensor_parameter_448: + name = "parameter_448" + shape = [192, 192, 1, 1] + dtype = "float32" + min_val = float("-0.0234346") + max_val = float("0.0490819") + mean = float("-0.000384362") + std = float("0.00376562") + data = None + + +class Program_weight_tensor_parameter_449: + name = "parameter_449" + shape = [192] + dtype = "float32" + min_val = float("-2.08733") + max_val = float("0.454803") + mean = float("-0.308331") + std = float("0.365117") + data = None + + +class Program_weight_tensor_parameter_450: + name = "parameter_450" + shape = [192] + dtype = "float32" + min_val = float("0.39604") + max_val = float("1.9294") + mean = float("0.959521") + std = float("0.305318") + data = None + + +class Program_weight_tensor_parameter_451: + name = "parameter_451" + shape = [192] + dtype = "float32" + min_val = float("0.00246386") + max_val = float("0.018286") + mean = float("0.00690414") + std = float("0.00229679") + data = None + + +class Program_weight_tensor_parameter_452: + name = "parameter_452" + shape = [192] + dtype = "float32" + min_val = float("-0.0963721") + max_val = float("0.139837") + mean = float("0.0375609") + std = float("0.0391308") + data = None + + +class Program_weight_tensor_parameter_453: + name = "parameter_453" + shape = [192, 192, 3, 3] + dtype = "float32" + min_val = float("-0.0382843") + max_val = float("0.0441675") + mean = float("-0.000189265") + std = float("0.00417807") + data = None + + +class Program_weight_tensor_parameter_454: + name = "parameter_454" + shape = [192] + dtype = "float32" + min_val = float("-2.82542") + max_val = float("-0.165072") + mean = float("-1.2819") + std = float("0.428551") + data = None + + +class Program_weight_tensor_parameter_455: + name = "parameter_455" + shape = [192] + dtype = "float32" + min_val = float("0.761717") + max_val = float("1.48005") + mean = float("1.12799") + std = float("0.136186") + data = None + + +class Program_weight_tensor_parameter_456: + name = "parameter_456" + shape = [192] + dtype = "float32" + min_val = float("0.0437385") + max_val = float("0.134822") + mean = float("0.0772096") + std = float("0.0171041") + data = None + + +class Program_weight_tensor_parameter_457: + name = "parameter_457" + shape = [192] + dtype = "float32" + min_val = float("-1.32712") + max_val = float("0.174196") + mean = float("-0.153249") + std = float("0.161134") + data = None + + +class Program_weight_tensor_parameter_458: + name = "parameter_458" + shape = [192, 192, 3, 3] + dtype = "float32" + min_val = float("-0.0470685") + max_val = float("0.0540297") + mean = float("-0.000367007") + std = float("0.00453239") + data = None + + +class Program_weight_tensor_parameter_459: + name = "parameter_459" + shape = [192] + dtype = "float32" + min_val = float("-1.21591") + max_val = float("0.408817") + mean = float("-0.268583") + std = float("0.326001") + data = None + + +class Program_weight_tensor_parameter_460: + name = "parameter_460" + shape = [192] + dtype = "float32" + min_val = float("1.13966e-07") + max_val = float("0.671077") + mean = float("0.16575") + std = float("0.128785") + data = None + + +class Program_weight_tensor_parameter_461: + name = "parameter_461" + shape = [192] + dtype = "float32" + min_val = float("4.36695e-14") + max_val = float("0.00121919") + mean = float("0.000288819") + std = float("0.000228404") + data = None + + +class Program_weight_tensor_parameter_462: + name = "parameter_462" + shape = [192] + dtype = "float32" + min_val = float("-0.0449358") + max_val = float("0.0625559") + mean = float("0.00941777") + std = float("0.0159613") + data = None + + +class Program_weight_tensor_parameter_463: + name = "parameter_463" + shape = [192, 192, 1, 1] + dtype = "float32" + min_val = float("-0.0445329") + max_val = float("0.0469969") + mean = float("-0.000357929") + std = float("0.00375663") + data = None + + +class Program_weight_tensor_parameter_464: + name = "parameter_464" + shape = [192] + dtype = "float32" + min_val = float("-1.21591") + max_val = float("0.408817") + mean = float("-0.268583") + std = float("0.326001") + data = None + + +class Program_weight_tensor_parameter_465: + name = "parameter_465" + shape = [192] + dtype = "float32" + min_val = float("0.342519") + max_val = float("1.57267") + mean = float("0.856133") + std = float("0.266894") + data = None + + +class Program_weight_tensor_parameter_466: + name = "parameter_466" + shape = [192] + dtype = "float32" + min_val = float("0.00236989") + max_val = float("0.012972") + mean = float("0.0063056") + std = float("0.00193128") + data = None + + +class Program_weight_tensor_parameter_467: + name = "parameter_467" + shape = [192] + dtype = "float32" + min_val = float("-0.108302") + max_val = float("0.156299") + mean = float("0.0414147") + std = float("0.0423383") + data = None + + +class Program_weight_tensor_parameter_468: + name = "parameter_468" + shape = [192, 192, 3, 3] + dtype = "float32" + min_val = float("-0.0362289") + max_val = float("0.0435947") + mean = float("-0.000180448") + std = float("0.00414403") + data = None + + +class Program_weight_tensor_parameter_469: + name = "parameter_469" + shape = [192] + dtype = "float32" + min_val = float("-2.55484") + max_val = float("-0.225384") + mean = float("-1.29607") + std = float("0.412734") + data = None + + +class Program_weight_tensor_parameter_470: + name = "parameter_470" + shape = [192] + dtype = "float32" + min_val = float("0.652885") + max_val = float("1.46467") + mean = float("1.10229") + std = float("0.128509") + data = None + + +class Program_weight_tensor_parameter_471: + name = "parameter_471" + shape = [192] + dtype = "float32" + min_val = float("0.0350575") + max_val = float("0.115971") + mean = float("0.0597254") + std = float("0.016366") + data = None + + +class Program_weight_tensor_parameter_472: + name = "parameter_472" + shape = [192] + dtype = "float32" + min_val = float("-0.464018") + max_val = float("0.240961") + mean = float("-0.124572") + std = float("0.139434") + data = None + + +class Program_weight_tensor_parameter_473: + name = "parameter_473" + shape = [192, 192, 3, 3] + dtype = "float32" + min_val = float("-0.0705414") + max_val = float("0.0741061") + mean = float("-0.000347681") + std = float("0.00450759") + data = None + + +class Program_weight_tensor_parameter_474: + name = "parameter_474" + shape = [192] + dtype = "float32" + min_val = float("-1.25114") + max_val = float("0.496985") + mean = float("-0.195696") + std = float("0.286609") + data = None + + +class Program_weight_tensor_parameter_475: + name = "parameter_475" + shape = [192] + dtype = "float32" + min_val = float("6.01345e-05") + max_val = float("1.52688") + mean = float("0.227253") + std = float("0.219405") + data = None + + +class Program_weight_tensor_parameter_476: + name = "parameter_476" + shape = [192] + dtype = "float32" + min_val = float("2.92882e-09") + max_val = float("0.00984128") + mean = float("0.00070971") + std = float("0.00095936") + data = None + + +class Program_weight_tensor_parameter_477: + name = "parameter_477" + shape = [192] + dtype = "float32" + min_val = float("-0.0592165") + max_val = float("0.107792") + mean = float("0.0157093") + std = float("0.0208809") + data = None + + +class Program_weight_tensor_parameter_478: + name = "parameter_478" + shape = [192, 192, 1, 1] + dtype = "float32" + min_val = float("-0.055296") + max_val = float("0.0451825") + mean = float("-0.000685397") + std = float("0.0046336") + data = None + + +class Program_weight_tensor_parameter_479: + name = "parameter_479" + shape = [192] + dtype = "float32" + min_val = float("-1.25114") + max_val = float("0.496985") + mean = float("-0.195696") + std = float("0.286609") + data = None + + +class Program_weight_tensor_parameter_480: + name = "parameter_480" + shape = [192] + dtype = "float32" + min_val = float("0.361257") + max_val = float("1.38672") + mean = float("0.77414") + std = float("0.223602") + data = None + + +class Program_weight_tensor_parameter_481: + name = "parameter_481" + shape = [192] + dtype = "float32" + min_val = float("0.00343985") + max_val = float("0.024217") + mean = float("0.0103742") + std = float("0.00341779") + data = None + + +class Program_weight_tensor_parameter_482: + name = "parameter_482" + shape = [192] + dtype = "float32" + min_val = float("-0.109521") + max_val = float("0.160426") + mean = float("0.0591583") + std = float("0.050798") + data = None + + +class Program_weight_tensor_parameter_483: + name = "parameter_483" + shape = [192, 192, 3, 3] + dtype = "float32" + min_val = float("-0.052784") + max_val = float("0.0464242") + mean = float("-0.000289158") + std = float("0.00403919") + data = None + + +class Program_weight_tensor_parameter_484: + name = "parameter_484" + shape = [192] + dtype = "float32" + min_val = float("-1.93294") + max_val = float("-0.217802") + mean = float("-1.1784") + std = float("0.32417") + data = None + + +class Program_weight_tensor_parameter_485: + name = "parameter_485" + shape = [192] + dtype = "float32" + min_val = float("0.740846") + max_val = float("1.58017") + mean = float("1.10678") + std = float("0.141855") + data = None + + +class Program_weight_tensor_parameter_486: + name = "parameter_486" + shape = [192] + dtype = "float32" + min_val = float("0.0251568") + max_val = float("0.110892") + mean = float("0.053406") + std = float("0.0180815") + data = None + + +class Program_weight_tensor_parameter_487: + name = "parameter_487" + shape = [192] + dtype = "float32" + min_val = float("-1.08676") + max_val = float("0.26685") + mean = float("-0.082947") + std = float("0.142479") + data = None + + +class Program_weight_tensor_parameter_488: + name = "parameter_488" + shape = [192, 192, 3, 3] + dtype = "float32" + min_val = float("-0.0681332") + max_val = float("0.0849403") + mean = float("-0.000286395") + std = float("0.00425012") + data = None + + +class Program_weight_tensor_parameter_489: + name = "parameter_489" + shape = [192] + dtype = "float32" + min_val = float("-2.8666") + max_val = float("1.61594") + mean = float("-0.0521098") + std = float("0.754176") + data = None + + +class Program_weight_tensor_parameter_490: + name = "parameter_490" + shape = [192] + dtype = "float32" + min_val = float("0.385687") + max_val = float("2.01902") + mean = float("0.957509") + std = float("0.228151") + data = None + + +class Program_weight_tensor_parameter_491: + name = "parameter_491" + shape = [192] + dtype = "float32" + min_val = float("0.0120072") + max_val = float("0.108766") + mean = float("0.0349151") + std = float("0.0150818") + data = None + + +class Program_weight_tensor_parameter_492: + name = "parameter_492" + shape = [192] + dtype = "float32" + min_val = float("-0.256396") + max_val = float("0.483456") + mean = float("-0.0666677") + std = float("0.0824818") + data = None + + +class Program_weight_tensor_parameter_493: + name = "parameter_493" + shape = [192, 384, 1, 1] + dtype = "float32" + min_val = float("-0.115145") + max_val = float("0.117107") + mean = float("-0.000804966") + std = float("0.00946446") + data = None + + +class Program_weight_tensor_parameter_494: + name = "parameter_494" + shape = [192] + dtype = "float32" + min_val = float("-2.99057") + max_val = float("1.19128") + mean = float("0.0645638") + std = float("0.651107") + data = None + + +class Program_weight_tensor_parameter_495: + name = "parameter_495" + shape = [192] + dtype = "float32" + min_val = float("0.897146") + max_val = float("5.48142") + mean = float("1.93592") + std = float("0.914038") + data = None + + +class Program_weight_tensor_parameter_496: + name = "parameter_496" + shape = [192] + dtype = "float32" + min_val = float("0.00686488") + max_val = float("0.0692764") + mean = float("0.0235044") + std = float("0.0082593") + data = None + + +class Program_weight_tensor_parameter_497: + name = "parameter_497" + shape = [192] + dtype = "float32" + min_val = float("-0.189901") + max_val = float("0.13753") + mean = float("-0.032472") + std = float("0.0640675") + data = None + + +class Program_weight_tensor_parameter_498: + name = "parameter_498" + shape = [192, 384, 1, 1] + dtype = "float32" + min_val = float("-0.0766857") + max_val = float("0.113871") + mean = float("-0.00064456") + std = float("0.00873627") + data = None + + +class Program_weight_tensor_parameter_499: + name = "parameter_499" + shape = [384] + dtype = "float32" + min_val = float("-2.88369") + max_val = float("1.29531") + mean = float("-0.324976") + std = float("0.576845") + data = None + + +class Program_weight_tensor_parameter_500: + name = "parameter_500" + shape = [384] + dtype = "float32" + min_val = float("0.63927") + max_val = float("2.42129") + mean = float("1.16101") + std = float("0.261198") + data = None + + +class Program_weight_tensor_parameter_501: + name = "parameter_501" + shape = [384] + dtype = "float32" + min_val = float("0.0160522") + max_val = float("0.14684") + mean = float("0.035732") + std = float("0.018691") + data = None + + +class Program_weight_tensor_parameter_502: + name = "parameter_502" + shape = [384] + dtype = "float32" + min_val = float("-0.444625") + max_val = float("0.224227") + mean = float("0.0242203") + std = float("0.0770511") + data = None + + +class Program_weight_tensor_parameter_503: + name = "parameter_503" + shape = [384, 256, 3, 3] + dtype = "float32" + min_val = float("-0.0733418") + max_val = float("0.082808") + mean = float("-0.000120386") + std = float("0.00455694") + data = None + + +class Program_weight_tensor_parameter_504: + name = "parameter_504" + shape = [256] + dtype = "float32" + min_val = float("-2.17306") + max_val = float("1.35014") + mean = float("-0.983635") + std = float("0.560538") + data = None + + +class Program_weight_tensor_parameter_505: + name = "parameter_505" + shape = [256] + dtype = "float32" + min_val = float("0.525409") + max_val = float("1.72142") + mean = float("1.09457") + std = float("0.181495") + data = None + + +class Program_weight_tensor_parameter_506: + name = "parameter_506" + shape = [256] + dtype = "float32" + min_val = float("0.00327494") + max_val = float("0.0357364") + mean = float("0.00983111") + std = float("0.00444547") + data = None + + +class Program_weight_tensor_parameter_507: + name = "parameter_507" + shape = [256] + dtype = "float32" + min_val = float("-0.281973") + max_val = float("0.212193") + mean = float("-0.0584459") + std = float("0.0749526") + data = None + + +class Program_weight_tensor_parameter_508: + name = "parameter_508" + shape = [256, 192, 1, 1] + dtype = "float32" + min_val = float("-0.228144") + max_val = float("0.18937") + mean = float("-0.000934833") + std = float("0.0150912") + data = None + + +class Program_weight_tensor_parameter_509: + name = "parameter_509" + shape = [192] + dtype = "float32" + min_val = float("-0.0160867") + max_val = float("0.00291467") + mean = float("-0.00513708") + std = float("0.00438686") + data = None + + +class Program_weight_tensor_parameter_510: + name = "parameter_510" + shape = [192, 192, 1, 1] + dtype = "float32" + min_val = float("-0.301238") + max_val = float("0.279827") + mean = float("-0.0036602") + std = float("0.0109363") + data = None + + +class Program_weight_tensor_parameter_511: + name = "parameter_511" + shape = [96] + dtype = "float32" + min_val = float("-1.88507") + max_val = float("0.408619") + mean = float("-0.264055") + std = float("0.417878") + data = None + + +class Program_weight_tensor_parameter_512: + name = "parameter_512" + shape = [96] + dtype = "float32" + min_val = float("0.075817") + max_val = float("3.31686") + mean = float("0.585907") + std = float("0.696242") + data = None + + +class Program_weight_tensor_parameter_513: + name = "parameter_513" + shape = [96] + dtype = "float32" + min_val = float("9.73872e-05") + max_val = float("0.00192041") + mean = float("0.000475576") + std = float("0.00034983") + data = None + + +class Program_weight_tensor_parameter_514: + name = "parameter_514" + shape = [96] + dtype = "float32" + min_val = float("-0.0602378") + max_val = float("0.0724714") + mean = float("0.00884973") + std = float("0.025982") + data = None + + +class Program_weight_tensor_parameter_515: + name = "parameter_515" + shape = [96, 96, 1, 1] + dtype = "float32" + min_val = float("-0.0575483") + max_val = float("0.0998151") + mean = float("-0.00071542") + std = float("0.00806398") + data = None + + +class Program_weight_tensor_parameter_516: + name = "parameter_516" + shape = [96] + dtype = "float32" + min_val = float("-1.88507") + max_val = float("0.408619") + mean = float("-0.264055") + std = float("0.417878") + data = None + + +class Program_weight_tensor_parameter_517: + name = "parameter_517" + shape = [96] + dtype = "float32" + min_val = float("0.328165") + max_val = float("5.49297") + mean = float("1.04367") + std = float("0.907579") + data = None + + +class Program_weight_tensor_parameter_518: + name = "parameter_518" + shape = [96] + dtype = "float32" + min_val = float("0.00063499") + max_val = float("0.0106201") + mean = float("0.00347763") + std = float("0.00169617") + data = None + + +class Program_weight_tensor_parameter_519: + name = "parameter_519" + shape = [96] + dtype = "float32" + min_val = float("-0.207875") + max_val = float("0.204035") + mean = float("0.0224973") + std = float("0.0704149") + data = None + + +class Program_weight_tensor_parameter_520: + name = "parameter_520" + shape = [96, 96, 3, 3] + dtype = "float32" + min_val = float("-0.0433834") + max_val = float("0.0664612") + mean = float("-0.000206276") + std = float("0.00585214") + data = None + + +class Program_weight_tensor_parameter_521: + name = "parameter_521" + shape = [96] + dtype = "float32" + min_val = float("-2.41446") + max_val = float("-0.0351507") + mean = float("-1.27023") + std = float("0.444263") + data = None + + +class Program_weight_tensor_parameter_522: + name = "parameter_522" + shape = [96] + dtype = "float32" + min_val = float("0.478655") + max_val = float("1.73921") + mean = float("0.924149") + std = float("0.17682") + data = None + + +class Program_weight_tensor_parameter_523: + name = "parameter_523" + shape = [96] + dtype = "float32" + min_val = float("0.0485854") + max_val = float("0.347665") + mean = float("0.116634") + std = float("0.0397481") + data = None + + +class Program_weight_tensor_parameter_524: + name = "parameter_524" + shape = [96] + dtype = "float32" + min_val = float("-4.59321") + max_val = float("0.829715") + mean = float("-0.207516") + std = float("0.572839") + data = None + + +class Program_weight_tensor_parameter_525: + name = "parameter_525" + shape = [96, 96, 3, 3] + dtype = "float32" + min_val = float("-0.143913") + max_val = float("0.105902") + mean = float("-0.000391481") + std = float("0.0073659") + data = None + + +class Program_weight_tensor_parameter_526: + name = "parameter_526" + shape = [96] + dtype = "float32" + min_val = float("-1.40027") + max_val = float("0.433022") + mean = float("-0.187531") + std = float("0.338824") + data = None + + +class Program_weight_tensor_parameter_527: + name = "parameter_527" + shape = [96] + dtype = "float32" + min_val = float("0.00378303") + max_val = float("1.87179") + mean = float("0.411102") + std = float("0.370104") + data = None + + +class Program_weight_tensor_parameter_528: + name = "parameter_528" + shape = [96] + dtype = "float32" + min_val = float("7.53826e-06") + max_val = float("0.00354419") + mean = float("0.000777177") + std = float("0.000707669") + data = None + + +class Program_weight_tensor_parameter_529: + name = "parameter_529" + shape = [96] + dtype = "float32" + min_val = float("-0.0550316") + max_val = float("0.0548868") + mean = float("0.00763059") + std = float("0.0197358") + data = None + + +class Program_weight_tensor_parameter_530: + name = "parameter_530" + shape = [96, 96, 1, 1] + dtype = "float32" + min_val = float("-0.0516744") + max_val = float("0.091015") + mean = float("-0.000705691") + std = float("0.00756381") + data = None + + +class Program_weight_tensor_parameter_531: + name = "parameter_531" + shape = [96] + dtype = "float32" + min_val = float("-1.40027") + max_val = float("0.433022") + mean = float("-0.187531") + std = float("0.338824") + data = None + + +class Program_weight_tensor_parameter_532: + name = "parameter_532" + shape = [96] + dtype = "float32" + min_val = float("0.340683") + max_val = float("2.2745") + mean = float("0.862345") + std = float("0.431657") + data = None + + +class Program_weight_tensor_parameter_533: + name = "parameter_533" + shape = [96] + dtype = "float32" + min_val = float("0.00255349") + max_val = float("0.0155132") + mean = float("0.00548608") + std = float("0.00239986") + data = None + + +class Program_weight_tensor_parameter_534: + name = "parameter_534" + shape = [96] + dtype = "float32" + min_val = float("-0.17823") + max_val = float("0.137982") + mean = float("0.0322525") + std = float("0.046662") + data = None + + +class Program_weight_tensor_parameter_535: + name = "parameter_535" + shape = [96, 96, 3, 3] + dtype = "float32" + min_val = float("-0.0674981") + max_val = float("0.0570277") + mean = float("-0.00032752") + std = float("0.006034") + data = None + + +class Program_weight_tensor_parameter_536: + name = "parameter_536" + shape = [96] + dtype = "float32" + min_val = float("-3.35834") + max_val = float("0.28772") + mean = float("-1.21666") + std = float("0.561104") + data = None + + +class Program_weight_tensor_parameter_537: + name = "parameter_537" + shape = [96] + dtype = "float32" + min_val = float("0.416818") + max_val = float("1.89857") + mean = float("1.01497") + std = float("0.241794") + data = None + + +class Program_weight_tensor_parameter_538: + name = "parameter_538" + shape = [96] + dtype = "float32" + min_val = float("0.0432234") + max_val = float("0.174066") + mean = float("0.0777276") + std = float("0.0209529") + data = None + + +class Program_weight_tensor_parameter_539: + name = "parameter_539" + shape = [96] + dtype = "float32" + min_val = float("-1.16964") + max_val = float("0.598494") + mean = float("-0.139571") + std = float("0.296264") + data = None + + +class Program_weight_tensor_parameter_540: + name = "parameter_540" + shape = [96, 96, 3, 3] + dtype = "float32" + min_val = float("-0.159737") + max_val = float("0.152684") + mean = float("-0.000521222") + std = float("0.00746838") + data = None + + +class Program_weight_tensor_parameter_541: + name = "parameter_541" + shape = [96] + dtype = "float32" + min_val = float("-1.27259") + max_val = float("0.580805") + mean = float("-0.160154") + std = float("0.285669") + data = None + + +class Program_weight_tensor_parameter_542: + name = "parameter_542" + shape = [96] + dtype = "float32" + min_val = float("1.78897e-05") + max_val = float("1.24216") + mean = float("0.291835") + std = float("0.195371") + data = None + + +class Program_weight_tensor_parameter_543: + name = "parameter_543" + shape = [96] + dtype = "float32" + min_val = float("9.8845e-11") + max_val = float("0.00345974") + mean = float("0.000742287") + std = float("0.000570769") + data = None + + +class Program_weight_tensor_parameter_544: + name = "parameter_544" + shape = [96] + dtype = "float32" + min_val = float("-0.0514278") + max_val = float("0.0447507") + mean = float("0.00739843") + std = float("0.0166169") + data = None + + +class Program_weight_tensor_parameter_545: + name = "parameter_545" + shape = [96, 96, 1, 1] + dtype = "float32" + min_val = float("-0.0512371") + max_val = float("0.058203") + mean = float("-0.000637488") + std = float("0.00765631") + data = None + + +class Program_weight_tensor_parameter_546: + name = "parameter_546" + shape = [96] + dtype = "float32" + min_val = float("-1.27259") + max_val = float("0.580805") + mean = float("-0.160154") + std = float("0.285669") + data = None + + +class Program_weight_tensor_parameter_547: + name = "parameter_547" + shape = [96] + dtype = "float32" + min_val = float("0.228783") + max_val = float("1.64106") + mean = float("0.722497") + std = float("0.264866") + data = None + + +class Program_weight_tensor_parameter_548: + name = "parameter_548" + shape = [96] + dtype = "float32" + min_val = float("0.00213447") + max_val = float("0.0186589") + mean = float("0.00667835") + std = float("0.00312485") + data = None + + +class Program_weight_tensor_parameter_549: + name = "parameter_549" + shape = [96] + dtype = "float32" + min_val = float("-0.0988952") + max_val = float("0.152065") + mean = float("0.0248752") + std = float("0.0430747") + data = None + + +class Program_weight_tensor_parameter_550: + name = "parameter_550" + shape = [96, 96, 3, 3] + dtype = "float32" + min_val = float("-0.0680949") + max_val = float("0.071738") + mean = float("-0.000264866") + std = float("0.00622902") + data = None + + +class Program_weight_tensor_parameter_551: + name = "parameter_551" + shape = [96] + dtype = "float32" + min_val = float("-3.6614") + max_val = float("0.205968") + mean = float("-1.16919") + std = float("0.589988") + data = None + + +class Program_weight_tensor_parameter_552: + name = "parameter_552" + shape = [96] + dtype = "float32" + min_val = float("0.512145") + max_val = float("2.1309") + mean = float("1.02688") + std = float("0.241984") + data = None + + +class Program_weight_tensor_parameter_553: + name = "parameter_553" + shape = [96] + dtype = "float32" + min_val = float("0.0345738") + max_val = float("0.101406") + mean = float("0.0616151") + std = float("0.0129723") + data = None + + +class Program_weight_tensor_parameter_554: + name = "parameter_554" + shape = [96] + dtype = "float32" + min_val = float("-1.10731") + max_val = float("0.608096") + mean = float("-0.0544066") + std = float("0.26029") + data = None + + +class Program_weight_tensor_parameter_555: + name = "parameter_555" + shape = [96, 96, 3, 3] + dtype = "float32" + min_val = float("-0.107257") + max_val = float("0.139746") + mean = float("-0.000428188") + std = float("0.00759317") + data = None + + +class Program_weight_tensor_parameter_556: + name = "parameter_556" + shape = [96] + dtype = "float32" + min_val = float("-0.931411") + max_val = float("0.406494") + mean = float("-0.216229") + std = float("0.275567") + data = None + + +class Program_weight_tensor_parameter_557: + name = "parameter_557" + shape = [96] + dtype = "float32" + min_val = float("2.7851e-05") + max_val = float("1.34666") + mean = float("0.301153") + std = float("0.211126") + data = None + + +class Program_weight_tensor_parameter_558: + name = "parameter_558" + shape = [96] + dtype = "float32" + min_val = float("2.44855e-10") + max_val = float("0.00276123") + mean = float("0.000787333") + std = float("0.000529563") + data = None + + +class Program_weight_tensor_parameter_559: + name = "parameter_559" + shape = [96] + dtype = "float32" + min_val = float("-0.0511623") + max_val = float("0.0677894") + mean = float("0.00865361") + std = float("0.0199128") + data = None + + +class Program_weight_tensor_parameter_560: + name = "parameter_560" + shape = [96, 96, 1, 1] + dtype = "float32" + min_val = float("-0.0714816") + max_val = float("0.0528174") + mean = float("-0.00080202") + std = float("0.00805684") + data = None + + +class Program_weight_tensor_parameter_561: + name = "parameter_561" + shape = [96] + dtype = "float32" + min_val = float("-0.931411") + max_val = float("0.406494") + mean = float("-0.216229") + std = float("0.275567") + data = None + + +class Program_weight_tensor_parameter_562: + name = "parameter_562" + shape = [96] + dtype = "float32" + min_val = float("0.141158") + max_val = float("1.7851") + mean = float("0.707756") + std = float("0.28548") + data = None + + +class Program_weight_tensor_parameter_563: + name = "parameter_563" + shape = [96] + dtype = "float32" + min_val = float("0.0020027") + max_val = float("0.0193217") + mean = float("0.00726263") + std = float("0.00306558") + data = None + + +class Program_weight_tensor_parameter_564: + name = "parameter_564" + shape = [96] + dtype = "float32" + min_val = float("-0.118899") + max_val = float("0.18858") + mean = float("0.0361517") + std = float("0.0485395") + data = None + + +class Program_weight_tensor_parameter_565: + name = "parameter_565" + shape = [96, 96, 3, 3] + dtype = "float32" + min_val = float("-0.0623542") + max_val = float("0.0588107") + mean = float("-0.000362252") + std = float("0.00634091") + data = None + + +class Program_weight_tensor_parameter_566: + name = "parameter_566" + shape = [96] + dtype = "float32" + min_val = float("-3.19799") + max_val = float("0.0441781") + mean = float("-1.1066") + std = float("0.512528") + data = None + + +class Program_weight_tensor_parameter_567: + name = "parameter_567" + shape = [96] + dtype = "float32" + min_val = float("0.556846") + max_val = float("1.70748") + mean = float("0.988099") + std = float("0.180513") + data = None + + +class Program_weight_tensor_parameter_568: + name = "parameter_568" + shape = [96] + dtype = "float32" + min_val = float("0.0248385") + max_val = float("0.353521") + mean = float("0.0516013") + std = float("0.0336755") + data = None + + +class Program_weight_tensor_parameter_569: + name = "parameter_569" + shape = [96] + dtype = "float32" + min_val = float("-3.41151") + max_val = float("0.407488") + mean = float("-0.0719755") + std = float("0.386288") + data = None + + +class Program_weight_tensor_parameter_570: + name = "parameter_570" + shape = [96, 96, 3, 3] + dtype = "float32" + min_val = float("-0.0620146") + max_val = float("0.0633394") + mean = float("-0.000448888") + std = float("0.00731171") + data = None + + +class Program_weight_tensor_parameter_571: + name = "parameter_571" + shape = [96] + dtype = "float32" + min_val = float("-0.974533") + max_val = float("0.654029") + mean = float("-0.172382") + std = float("0.268825") + data = None + + +class Program_weight_tensor_parameter_572: + name = "parameter_572" + shape = [96] + dtype = "float32" + min_val = float("0.0445082") + max_val = float("1.22303") + mean = float("0.290206") + std = float("0.187352") + data = None + + +class Program_weight_tensor_parameter_573: + name = "parameter_573" + shape = [96] + dtype = "float32" + min_val = float("9.51178e-05") + max_val = float("0.00465114") + mean = float("0.00132027") + std = float("0.00087515") + data = None + + +class Program_weight_tensor_parameter_574: + name = "parameter_574" + shape = [96] + dtype = "float32" + min_val = float("-0.0394857") + max_val = float("0.0556089") + mean = float("0.00847204") + std = float("0.0204605") + data = None + + +class Program_weight_tensor_parameter_575: + name = "parameter_575" + shape = [96, 96, 1, 1] + dtype = "float32" + min_val = float("-0.0591107") + max_val = float("0.084198") + mean = float("-0.000990773") + std = float("0.00883492") + data = None + + +class Program_weight_tensor_parameter_576: + name = "parameter_576" + shape = [96] + dtype = "float32" + min_val = float("-0.974533") + max_val = float("0.654028") + mean = float("-0.172382") + std = float("0.268825") + data = None + + +class Program_weight_tensor_parameter_577: + name = "parameter_577" + shape = [96] + dtype = "float32" + min_val = float("0.207034") + max_val = float("1.47417") + mean = float("0.603427") + std = float("0.233027") + data = None + + +class Program_weight_tensor_parameter_578: + name = "parameter_578" + shape = [96] + dtype = "float32" + min_val = float("0.00207586") + max_val = float("0.0251574") + mean = float("0.0107863") + std = float("0.00456461") + data = None + + +class Program_weight_tensor_parameter_579: + name = "parameter_579" + shape = [96] + dtype = "float32" + min_val = float("-0.110009") + max_val = float("0.147658") + mean = float("0.0285999") + std = float("0.050264") + data = None + + +class Program_weight_tensor_parameter_580: + name = "parameter_580" + shape = [96, 96, 3, 3] + dtype = "float32" + min_val = float("-0.070149") + max_val = float("0.0470186") + mean = float("-0.000362618") + std = float("0.00621856") + data = None + + +class Program_weight_tensor_parameter_581: + name = "parameter_581" + shape = [96] + dtype = "float32" + min_val = float("-3.53234") + max_val = float("0.173836") + mean = float("-1.04711") + std = float("0.571399") + data = None + + +class Program_weight_tensor_parameter_582: + name = "parameter_582" + shape = [96] + dtype = "float32" + min_val = float("0.597874") + max_val = float("2.39872") + mean = float("1.0552") + std = float("0.205323") + data = None + + +class Program_weight_tensor_parameter_583: + name = "parameter_583" + shape = [96] + dtype = "float32" + min_val = float("0.0185202") + max_val = float("0.112167") + mean = float("0.0429341") + std = float("0.0167429") + data = None + + +class Program_weight_tensor_parameter_584: + name = "parameter_584" + shape = [96] + dtype = "float32" + min_val = float("-0.443067") + max_val = float("0.447293") + mean = float("-0.0529377") + std = float("0.167364") + data = None + + +class Program_weight_tensor_parameter_585: + name = "parameter_585" + shape = [96, 96, 3, 3] + dtype = "float32" + min_val = float("-0.063488") + max_val = float("0.083032") + mean = float("-0.000377604") + std = float("0.00741283") + data = None + + +class Program_weight_tensor_parameter_586: + name = "parameter_586" + shape = [96] + dtype = "float32" + min_val = float("-0.693542") + max_val = float("0.601255") + mean = float("-0.0971658") + std = float("0.280506") + data = None + + +class Program_weight_tensor_parameter_587: + name = "parameter_587" + shape = [96] + dtype = "float32" + min_val = float("0.0450765") + max_val = float("1.29061") + mean = float("0.303813") + std = float("0.204412") + data = None + + +class Program_weight_tensor_parameter_588: + name = "parameter_588" + shape = [96] + dtype = "float32" + min_val = float("0.000388448") + max_val = float("0.0258625") + mean = float("0.00450419") + std = float("0.00431847") + data = None + + +class Program_weight_tensor_parameter_589: + name = "parameter_589" + shape = [96] + dtype = "float32" + min_val = float("-0.0432957") + max_val = float("0.0269978") + mean = float("-0.00076361") + std = float("0.0138503") + data = None + + +class Program_weight_tensor_parameter_590: + name = "parameter_590" + shape = [96, 96, 1, 1] + dtype = "float32" + min_val = float("-0.106785") + max_val = float("0.0776316") + mean = float("-0.00136769") + std = float("0.0107474") + data = None + + +class Program_weight_tensor_parameter_591: + name = "parameter_591" + shape = [96] + dtype = "float32" + min_val = float("-0.693542") + max_val = float("0.601255") + mean = float("-0.0971658") + std = float("0.280506") + data = None + + +class Program_weight_tensor_parameter_592: + name = "parameter_592" + shape = [96] + dtype = "float32" + min_val = float("0.117864") + max_val = float("1.42918") + mean = float("0.534336") + std = float("0.282789") + data = None + + +class Program_weight_tensor_parameter_593: + name = "parameter_593" + shape = [96] + dtype = "float32" + min_val = float("0.00952757") + max_val = float("0.13009") + mean = float("0.03226") + std = float("0.0210741") + data = None + + +class Program_weight_tensor_parameter_594: + name = "parameter_594" + shape = [96] + dtype = "float32" + min_val = float("-0.182934") + max_val = float("0.0815284") + mean = float("0.000193777") + std = float("0.0473769") + data = None + + +class Program_weight_tensor_parameter_595: + name = "parameter_595" + shape = [96, 96, 3, 3] + dtype = "float32" + min_val = float("-0.09962") + max_val = float("0.0585965") + mean = float("-0.000472593") + std = float("0.0059293") + data = None + + +class Program_weight_tensor_parameter_596: + name = "parameter_596" + shape = [96] + dtype = "float32" + min_val = float("-2.17807") + max_val = float("0.517143") + mean = float("-0.845295") + std = float("0.487541") + data = None + + +class Program_weight_tensor_parameter_597: + name = "parameter_597" + shape = [96] + dtype = "float32" + min_val = float("0.772289") + max_val = float("2.3588") + mean = float("1.25972") + std = float("0.22461") + data = None + + +class Program_weight_tensor_parameter_598: + name = "parameter_598" + shape = [96] + dtype = "float32" + min_val = float("0.0176127") + max_val = float("0.188596") + mean = float("0.0443549") + std = float("0.0256738") + data = None + + +class Program_weight_tensor_parameter_599: + name = "parameter_599" + shape = [96] + dtype = "float32" + min_val = float("-0.978829") + max_val = float("0.469855") + mean = float("-0.0195263") + std = float("0.204639") + data = None + + +class Program_weight_tensor_parameter_600: + name = "parameter_600" + shape = [96, 96, 3, 3] + dtype = "float32" + min_val = float("-0.191231") + max_val = float("0.182087") + mean = float("-0.000212875") + std = float("0.00772769") + data = None + + +class Program_weight_tensor_parameter_601: + name = "parameter_601" + shape = [96] + dtype = "float32" + min_val = float("-3.43026") + max_val = float("1.98535") + mean = float("0.505807") + std = float("0.879788") + data = None + + +class Program_weight_tensor_parameter_602: + name = "parameter_602" + shape = [96] + dtype = "float32" + min_val = float("0.234711") + max_val = float("2.355") + mean = float("0.651409") + std = float("0.28777") + data = None + + +class Program_weight_tensor_parameter_603: + name = "parameter_603" + shape = [96] + dtype = "float32" + min_val = float("0.0114431") + max_val = float("0.213174") + mean = float("0.044291") + std = float("0.0324628") + data = None + + +class Program_weight_tensor_parameter_604: + name = "parameter_604" + shape = [96] + dtype = "float32" + min_val = float("-0.393192") + max_val = float("0.404316") + mean = float("-0.0345236") + std = float("0.111426") + data = None + + +class Program_weight_tensor_parameter_605: + name = "parameter_605" + shape = [96, 192, 1, 1] + dtype = "float32" + min_val = float("-0.192313") + max_val = float("0.22459") + mean = float("-0.000851966") + std = float("0.016247") + data = None + + +class Program_weight_tensor_parameter_606: + name = "parameter_606" + shape = [96] + dtype = "float32" + min_val = float("-4.88256") + max_val = float("1.51768") + mean = float("0.319321") + std = float("1.02555") + data = None + + +class Program_weight_tensor_parameter_607: + name = "parameter_607" + shape = [96] + dtype = "float32" + min_val = float("0.584545") + max_val = float("6.91604") + mean = float("1.75143") + std = float("1.27947") + data = None + + +class Program_weight_tensor_parameter_608: + name = "parameter_608" + shape = [96] + dtype = "float32" + min_val = float("0.00474677") + max_val = float("0.161777") + mean = float("0.0349508") + std = float("0.0294242") + data = None + + +class Program_weight_tensor_parameter_609: + name = "parameter_609" + shape = [96] + dtype = "float32" + min_val = float("-0.246975") + max_val = float("0.361227") + mean = float("0.00345537") + std = float("0.112749") + data = None + + +class Program_weight_tensor_parameter_610: + name = "parameter_610" + shape = [96, 192, 1, 1] + dtype = "float32" + min_val = float("-0.126516") + max_val = float("0.194387") + mean = float("-0.000239444") + std = float("0.0147307") + data = None + + +class Program_weight_tensor_parameter_611: + name = "parameter_611" + shape = [192] + dtype = "float32" + min_val = float("-2.32829") + max_val = float("1.72494") + mean = float("-0.148208") + std = float("0.746996") + data = None + + +class Program_weight_tensor_parameter_612: + name = "parameter_612" + shape = [192] + dtype = "float32" + min_val = float("0.622318") + max_val = float("2.80938") + mean = float("1.10417") + std = float("0.281992") + data = None + + +class Program_weight_tensor_parameter_613: + name = "parameter_613" + shape = [192] + dtype = "float32" + min_val = float("0.0114845") + max_val = float("0.292646") + mean = float("0.0547784") + std = float("0.0457904") + data = None + + +class Program_weight_tensor_parameter_614: + name = "parameter_614" + shape = [192] + dtype = "float32" + min_val = float("-0.529517") + max_val = float("0.374347") + mean = float("-0.0709328") + std = float("0.134011") + data = None + + +class Program_weight_tensor_parameter_615: + name = "parameter_615" + shape = [192, 128, 3, 3] + dtype = "float32" + min_val = float("-0.0839254") + max_val = float("0.110947") + mean = float("-0.000137724") + std = float("0.0074586") + data = None + + +class Program_weight_tensor_parameter_616: + name = "parameter_616" + shape = [128] + dtype = "float32" + min_val = float("-2.79544") + max_val = float("1.93522") + mean = float("-0.737016") + std = float("0.682776") + data = None + + +class Program_weight_tensor_parameter_617: + name = "parameter_617" + shape = [128] + dtype = "float32" + min_val = float("0.292294") + max_val = float("2.14222") + mean = float("1.05749") + std = float("0.241285") + data = None + + +class Program_weight_tensor_parameter_618: + name = "parameter_618" + shape = [128] + dtype = "float32" + min_val = float("0.00168553") + max_val = float("0.0271169") + mean = float("0.00561304") + std = float("0.00326986") + data = None + + +class Program_weight_tensor_parameter_619: + name = "parameter_619" + shape = [128] + dtype = "float32" + min_val = float("-0.250703") + max_val = float("0.2342") + mean = float("0.00559302") + std = float("0.0940671") + data = None + + +class Program_weight_tensor_parameter_620: + name = "parameter_620" + shape = [128, 96, 1, 1] + dtype = "float32" + min_val = float("-0.161201") + max_val = float("0.174641") + mean = float("-0.00154525") + std = float("0.0227048") + data = None + + +class Program_weight_tensor_parameter_621: + name = "parameter_621" + shape = [96] + dtype = "float32" + min_val = float("-0.0177029") + max_val = float("0.000400182") + mean = float("-0.00757502") + std = float("0.00539744") + data = None + + +class Program_weight_tensor_parameter_622: + name = "parameter_622" + shape = [96, 96, 1, 1] + dtype = "float32" + min_val = float("-0.328403") + max_val = float("0.138399") + mean = float("-0.00747706") + std = float("0.0181007") + data = None + + +class Program_weight_tensor_parameter_623: + name = "parameter_623" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_624: + name = "parameter_624" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_625: + name = "parameter_625" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_626: + name = "parameter_626" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_627: + name = "parameter_627" + shape = [48, 48, 1, 1] + dtype = "float32" + min_val = float("-0.0686131") + max_val = float("0.07745") + mean = float("-0.00194833") + std = float("0.0132318") + data = None + + +class Program_weight_tensor_parameter_628: + name = "parameter_628" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_629: + name = "parameter_629" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_630: + name = "parameter_630" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_631: + name = "parameter_631" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_632: + name = "parameter_632" + shape = [48, 48, 3, 3] + dtype = "float32" + min_val = float("-0.0503947") + max_val = float("0.0591655") + mean = float("-0.000553561") + std = float("0.0104874") + data = None + + +class Program_weight_tensor_parameter_633: + name = "parameter_633" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_634: + name = "parameter_634" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_635: + name = "parameter_635" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_636: + name = "parameter_636" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_637: + name = "parameter_637" + shape = [48, 48, 3, 3] + dtype = "float32" + min_val = float("-0.0666035") + max_val = float("0.0834523") + mean = float("-0.000534285") + std = float("0.0120052") + data = None + + +class Program_weight_tensor_parameter_638: + name = "parameter_638" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_639: + name = "parameter_639" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_640: + name = "parameter_640" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_641: + name = "parameter_641" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_642: + name = "parameter_642" + shape = [48, 48, 1, 1] + dtype = "float32" + min_val = float("-0.0891029") + max_val = float("0.0714005") + mean = float("-0.00154811") + std = float("0.0144626") + data = None + + +class Program_weight_tensor_parameter_643: + name = "parameter_643" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_644: + name = "parameter_644" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_645: + name = "parameter_645" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_646: + name = "parameter_646" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_647: + name = "parameter_647" + shape = [48, 48, 3, 3] + dtype = "float32" + min_val = float("-0.0625883") + max_val = float("0.0562251") + mean = float("-0.000735936") + std = float("0.0107194") + data = None + + +class Program_weight_tensor_parameter_648: + name = "parameter_648" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_649: + name = "parameter_649" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_650: + name = "parameter_650" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_651: + name = "parameter_651" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_652: + name = "parameter_652" + shape = [48, 48, 3, 3] + dtype = "float32" + min_val = float("-0.103104") + max_val = float("0.0787725") + mean = float("-0.000430431") + std = float("0.0124082") + data = None + + +class Program_weight_tensor_parameter_653: + name = "parameter_653" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_654: + name = "parameter_654" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_655: + name = "parameter_655" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_656: + name = "parameter_656" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_657: + name = "parameter_657" + shape = [48, 48, 1, 1] + dtype = "float32" + min_val = float("-0.0876833") + max_val = float("0.07366") + mean = float("-0.00206219") + std = float("0.0169761") + data = None + + +class Program_weight_tensor_parameter_658: + name = "parameter_658" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_659: + name = "parameter_659" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_660: + name = "parameter_660" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_661: + name = "parameter_661" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_662: + name = "parameter_662" + shape = [48, 48, 3, 3] + dtype = "float32" + min_val = float("-0.0801655") + max_val = float("0.0747513") + mean = float("-0.000602095") + std = float("0.0109849") + data = None + + +class Program_weight_tensor_parameter_663: + name = "parameter_663" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_664: + name = "parameter_664" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_665: + name = "parameter_665" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_666: + name = "parameter_666" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_667: + name = "parameter_667" + shape = [48, 48, 3, 3] + dtype = "float32" + min_val = float("-0.0959795") + max_val = float("0.103953") + mean = float("-0.000426331") + std = float("0.0129461") + data = None + + +class Program_weight_tensor_parameter_668: + name = "parameter_668" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_669: + name = "parameter_669" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_670: + name = "parameter_670" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_671: + name = "parameter_671" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_672: + name = "parameter_672" + shape = [48, 96, 1, 1] + dtype = "float32" + min_val = float("-0.164541") + max_val = float("0.137911") + mean = float("-0.0025117") + std = float("0.0243799") + data = None + + +class Program_weight_tensor_parameter_673: + name = "parameter_673" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_674: + name = "parameter_674" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_675: + name = "parameter_675" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_676: + name = "parameter_676" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_677: + name = "parameter_677" + shape = [48, 96, 1, 1] + dtype = "float32" + min_val = float("-0.160023") + max_val = float("0.155941") + mean = float("-0.00065559") + std = float("0.0231529") + data = None + + +class Program_weight_tensor_parameter_678: + name = "parameter_678" + shape = [96] + dtype = "float32" + min_val = float("-3.44006") + max_val = float("3.33808") + mean = float("0.314835") + std = float("1.15264") + data = None + + +class Program_weight_tensor_parameter_679: + name = "parameter_679" + shape = [96] + dtype = "float32" + min_val = float("0.89902") + max_val = float("4.778") + mean = float("1.94257") + std = float("0.714568") + data = None + + +class Program_weight_tensor_parameter_680: + name = "parameter_680" + shape = [96] + dtype = "float32" + min_val = float("0.835229") + max_val = float("17.8402") + mean = float("2.94892") + std = float("2.46279") + data = None + + +class Program_weight_tensor_parameter_681: + name = "parameter_681" + shape = [96] + dtype = "float32" + min_val = float("-2.00554") + max_val = float("2.58848") + mean = float("-0.340428") + std = float("0.699341") + data = None + + +class Program_weight_tensor_parameter_682: + name = "parameter_682" + shape = [96, 64, 3, 3] + dtype = "float32" + min_val = float("-0.120992") + max_val = float("0.107231") + mean = float("-0.000508647") + std = float("0.0129212") + data = None + + +class Program_weight_tensor_parameter_683: + name = "parameter_683" + shape = [64] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_684: + name = "parameter_684" + shape = [64] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_685: + name = "parameter_685" + shape = [64] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_686: + name = "parameter_686" + shape = [64] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_687: + name = "parameter_687" + shape = [64, 32, 3, 3] + dtype = "float32" + min_val = float("-0.14708") + max_val = float("0.16461") + mean = float("-0.000709544") + std = float("0.0199361") + data = None + + +class Program_weight_tensor_parameter_688: + name = "parameter_688" + shape = [32] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_689: + name = "parameter_689" + shape = [32] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_690: + name = "parameter_690" + shape = [32] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_691: + name = "parameter_691" + shape = [32] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_692: + name = "parameter_692" + shape = [32, 32, 3, 3] + dtype = "float32" + min_val = float("-0.249357") + max_val = float("0.202774") + mean = float("-0.000201137") + std = float("0.0258896") + data = None + + +class Program_weight_tensor_parameter_693: + name = "parameter_693" + shape = [32] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_694: + name = "parameter_694" + shape = [32] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_695: + name = "parameter_695" + shape = [32] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_696: + name = "parameter_696" + shape = [32] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_697: + name = "parameter_697" + shape = [32, 3, 3, 3] + dtype = "float32" + min_val = float("-0.28905") + max_val = float("0.284627") + mean = float("-0.00254561") + std = float("0.0658851") + data = None diff --git a/paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_1/graph_hash.txt b/paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_1/graph_hash.txt index 82d83ca0b..f3ee1e92c 100644 --- a/paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_1/graph_hash.txt +++ b/paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_1/graph_hash.txt @@ -1 +1 @@ -2e1652dec5c10dbfbb766b39e6dd1a2a376f1e03061d76cd66a79bf79d0616f3 \ No newline at end of file +ccef89c785301336e92928a91f66ad752e4ce87933446e15f2082e86ac6642d6 \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_1/input_meta.py b/paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_1/input_meta.py index d890c95a4..38dba74a4 100644 --- a/paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_1/input_meta.py +++ b/paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_1/input_meta.py @@ -1,19 +1,121 @@ class Program_weight_tensor_data_0: name = "data_0" - shape = [] + shape = [2, 29, 8400] dtype = "float32" - data = [0.335797] + max_val = float("1.0") + mean = float("0.000933908") + std = float("0.0305456") + data = None class Program_weight_tensor_data_1: name = "data_1" - shape = [] - dtype = "float32" - data = [1.17666] + shape = [2, 1] + dtype = "int32" + data = [0, 1] class Program_weight_tensor_data_2: name = "data_2" - shape = [] + shape = [2, 29, 1] + dtype = "int32" + data = [ + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + ] + + +class Program_weight_tensor_data_3: + name = "data_3" + shape = [2, 8400] + dtype = "float32" + max_val = float("1.0") + mean = float("0.0270833") + std = float("0.162326") + data = None + + +class Program_weight_tensor_data_4: + name = "data_4" + shape = [2, 29, 4] + dtype = "float32" + max_val = float("640.0") + mean = float("225.221") + std = float("222.396") + data = None + + +class Program_weight_tensor_data_5: + name = "data_5" + shape = [2, 29, 8400] + dtype = "float32" + max_val = float("0.332125") + mean = float("6.7312e-05") + std = float("0.00215112") + data = None + + +class Program_weight_tensor_data_6: + name = "data_6" + shape = [2, 29, 8400] dtype = "float32" - data = [0.769235] + max_val = float("0.941182") + mean = float("0.00754317") + std = float("0.0517711") + data = None diff --git a/paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_1/model.py b/paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_1/model.py index 4cccb2b8e..08f9efbb7 100644 --- a/paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_1/model.py +++ b/paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_1/model.py @@ -5,39 +5,171 @@ class GraphModule(paddle.nn.Layer): def __init__(self): super().__init__() - def forward(self, data_0, data_1, data_2): - # pd_op.full: (1xf32) <- () + def forward(self, data_0, data_1, data_2, data_3, data_4, data_5, data_6): + # pd_op.full: (1xi64) <- () full_0 = paddle._C_ops.full( - [1], float("1"), paddle.float32, paddle.core.CPUPlace() + [1], float("-2"), paddle.int64, paddle.core.CPUPlace() ) - # pd_op.scale: (xf32) <- (xf32, 1xf32) - scale_0 = paddle._C_ops.scale(data_2, full_0, float("0"), True) - del data_2 + # pd_op.argmax: (2x8400xi64) <- (2x29x8400xf32, 1xi64) + argmax_0 = paddle._C_ops.argmax(data_0, full_0, False, False, paddle.int64) + del full_0 # pd_op.full: (1xf32) <- () full_1 = paddle._C_ops.full( - [1], float("2.5"), paddle.float32, paddle.core.CPUPlace() + [1], float("29"), paddle.float32, paddle.core.CPUPlace() ) - # pd_op.scale: (xf32) <- (xf32, 1xf32) - scale_1 = paddle._C_ops.scale(data_0, full_1, float("0"), True) - del data_0 + # pd_op.scale: (2x1xi32) <- (2x1xi32, 1xf32) + scale_0 = paddle._C_ops.scale(data_1, full_1, float("0"), True) + del data_1, full_1 - # pd_op.add: (xf32) <- (xf32, xf32) - add_1 = paddle._C_ops.add(scale_0, scale_1) + # pd_op.cast: (2x1xi64) <- (2x1xi32) + cast_0 = paddle._C_ops.cast(scale_0, paddle.int64) + del scale_0 - # pd_op.full: (1xf32) <- () + # pd_op.add: (2x8400xi64) <- (2x8400xi64, 2x1xi64) + add_0 = paddle._C_ops.add(argmax_0, cast_0) + del argmax_0, cast_0 + + # pd_op.flatten: (58xi32) <- (2x29x1xi32) + flatten_0 = paddle._C_ops.flatten(data_2, 0, 2) + del data_2 + + # pd_op.flatten: (16800xi64) <- (2x8400xi64) + flatten_1 = paddle._C_ops.flatten(add_0, 0, 1) + del add_0 + + # pd_op.full: (1xi32) <- () full_2 = paddle._C_ops.full( - [1], float("0.5"), paddle.float32, paddle.core.CPUPlace() + [1], float("0"), paddle.int32, paddle.core.CPUPlace() ) - # pd_op.scale: (xf32) <- (xf32, 1xf32) - scale_2 = paddle._C_ops.scale(data_1, full_2, float("0"), True) - del data_1 + # pd_op.gather: (16800xi32) <- (58xi32, 16800xi64, 1xi32) + gather_0 = paddle._C_ops.gather(flatten_0, flatten_1, full_2) + del flatten_0 + + # pd_op.full_int_array: (2xi64) <- () + full_int_array_0 = [2, 8400] + + # pd_op.reshape: (2x8400xi32) <- (16800xi32, 2xi64) + reshape_1 = paddle._C_ops.reshape(gather_0, full_int_array_0) + del full_int_array_0, gather_0 + + # pd_op.full: (xf32) <- () + full_3 = paddle._C_ops.full( + [], float("0"), paddle.float32, paddle.framework._current_expected_place() + ) + + # pd_op.greater_than: (2x8400xb) <- (2x8400xf32, xf32) + greater_than_0 = paddle._C_ops.greater_than(data_3, full_3) + del data_3, full_3 + + # pd_op.full: (1xf32) <- () + full_4 = paddle._C_ops.full( + [1], float("1"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.full_like: (2x8400xi32) <- (2x8400xi32, 1xf32) + full_like_0 = paddle._C_ops.full_like( + reshape_1, full_4, paddle.int32, paddle.framework._current_expected_place() + ) + + # pd_op.where: (2x8400xi32) <- (2x8400xb, 2x8400xi32, 2x8400xi32) + where_0 = paddle._C_ops.where(greater_than_0, reshape_1, full_like_0) + del full_like_0, greater_than_0, reshape_1 + + # pd_op.full_int_array: (2xi64) <- () + full_int_array_1 = [-1, 4] + + # pd_op.reshape: (58x4xf32) <- (2x29x4xf32, 2xi64) + reshape_2 = paddle._C_ops.reshape(data_4, full_int_array_1) + del data_4, full_int_array_1 + + # pd_op.gather: (16800x4xf32) <- (58x4xf32, 16800xi64, 1xi32) + gather_1 = paddle._C_ops.gather(reshape_2, flatten_1, full_2) + del flatten_1, full_2, reshape_2 + + # pd_op.full_int_array: (3xi64) <- () + full_int_array_2 = [2, 8400, 4] + + # pd_op.reshape: (2x8400x4xf32) <- (16800x4xf32, 3xi64) + reshape_0 = paddle._C_ops.reshape(gather_1, full_int_array_2) + del full_int_array_2, gather_1 + + # pd_op.full: (1xi32) <- () + full_5 = paddle._C_ops.full( + [1], float("2"), paddle.int32, paddle.core.CPUPlace() + ) + + # pd_op.one_hot: (2x8400x2xf32) <- (2x8400xi32, 1xi32) + one_hot_0 = paddle._C_ops.one_hot( + where_0 % paddle.cast(full_5, where_0.dtype), full_5 + ) + del full_5 + + # pd_op.full: (1xi64) <- () + full_6 = paddle._C_ops.full( + [1], float("0"), paddle.int64, paddle.framework._current_expected_place() + ) + + # pd_op.assign_value_: (1xi64) <- (1xi64) + assign_value__0 = paddle._C_ops.assign_value_( + full_6, + [1], + paddle.int64, + [float("0")], + paddle.framework._current_expected_place(), + ) + del full_6 + + # pd_op.index_select: (2x8400x1xf32) <- (2x8400x2xf32, 1xi64) + index_select_0 = paddle._C_ops.index_select(one_hot_0, assign_value__0, -1) + del assign_value__0, one_hot_0 + + # pd_op.multiply: (2x29x8400xf32) <- (2x29x8400xf32, 2x29x8400xf32) + multiply_1 = paddle._C_ops.multiply(data_5, data_0) + del data_5 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_3 = [-1] + + # pd_op.max: (2x29x1xf32) <- (2x29x8400xf32, 1xi64) + max_0 = paddle._C_ops.max(multiply_1, full_int_array_3, True) + + # pd_op.multiply: (2x29x8400xf32) <- (2x29x8400xf32, 2x29x8400xf32) + multiply_2 = paddle._C_ops.multiply(data_6, data_0) + del data_0, data_6 + + # pd_op.max: (2x29x1xf32) <- (2x29x8400xf32, 1xi64) + max_1 = paddle._C_ops.max(multiply_2, full_int_array_3, True) + del multiply_2 + + # pd_op.scale: (2x29x1xf32) <- (2x29x1xf32, 1xf32) + scale_1 = paddle._C_ops.scale(max_0, full_4, float("1e-09"), True) + del full_4, max_0 + + # pd_op.divide: (2x29x8400xf32) <- (2x29x8400xf32, 2x29x1xf32) + divide_0 = paddle._C_ops.divide(multiply_1, scale_1) + del multiply_1, scale_1 + + # pd_op.multiply: (2x29x8400xf32) <- (2x29x8400xf32, 2x29x1xf32) + multiply_3 = paddle._C_ops.multiply(divide_0, max_1) + del divide_0, max_1 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_4 = [-2] + + # pd_op.max: (2x8400xf32) <- (2x29x8400xf32, 1xi64) + max_2 = paddle._C_ops.max(multiply_3, full_int_array_4, False) + del full_int_array_4, multiply_3 + + # pd_op.unsqueeze: (2x8400x1xf32) <- (2x8400xf32, 1xi64) + unsqueeze_0 = paddle._C_ops.unsqueeze(max_2, full_int_array_3) + del full_int_array_3, max_2 - # pd_op.add: (xf32) <- (xf32, xf32) - add_0 = paddle._C_ops.add(add_1, scale_2) - del add_1, full_0, full_1, full_2, scale_0, scale_1, scale_2 + # pd_op.multiply: (2x8400x1xf32) <- (2x8400x1xf32, 2x8400x1xf32) + multiply_0 = paddle._C_ops.multiply(index_select_0, unsqueeze_0) + del index_select_0, unsqueeze_0, where_0 - return add_0 + return reshape_0, multiply_0 diff --git a/paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_10/graph_hash.txt b/paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_10/graph_hash.txt deleted file mode 100644 index 7b5429dac..000000000 --- a/paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_10/graph_hash.txt +++ /dev/null @@ -1 +0,0 @@ -d079f80b47d3627f0f874f2ab967abc6e350f273029f03bcf40572198642b666 \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_10/graph_net.json b/paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_10/graph_net.json deleted file mode 100644 index cf4d2108b..000000000 --- a/paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_10/graph_net.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "framework": "paddle", - "model_name": "PP-YOLOE-L_human", - "num_devices_required": 1, - "num_nodes_required": 1 -} \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_10/input_meta.py b/paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_10/input_meta.py deleted file mode 100644 index 30a284ba9..000000000 --- a/paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_10/input_meta.py +++ /dev/null @@ -1,9 +0,0 @@ -class Program_weight_tensor_data_0: - name = "data_0" - shape = [2, 3, 384, 384] - dtype = "float32" - min_val = float("-2.1179") - max_val = float("2.64") - mean = float("0.147246") - std = float("1.17749") - data = None diff --git a/paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_10/model.py b/paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_10/model.py deleted file mode 100644 index 36853978e..000000000 --- a/paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_10/model.py +++ /dev/null @@ -1,7396 +0,0 @@ -import paddle - - -class GraphModule(paddle.nn.Layer): - def __init__(self): - super().__init__() - - def forward( - self, - parameter_0, - parameter_1, - parameter_2, - parameter_3, - parameter_4, - parameter_5, - parameter_6, - parameter_7, - parameter_8, - parameter_9, - parameter_10, - parameter_11, - parameter_12, - parameter_13, - parameter_14, - parameter_15, - parameter_16, - parameter_17, - parameter_18, - parameter_19, - parameter_20, - parameter_21, - parameter_22, - parameter_23, - parameter_24, - parameter_25, - parameter_26, - parameter_27, - parameter_28, - parameter_29, - parameter_30, - parameter_31, - parameter_32, - parameter_33, - parameter_34, - parameter_35, - parameter_36, - parameter_37, - parameter_38, - parameter_39, - parameter_40, - parameter_41, - parameter_42, - parameter_43, - parameter_44, - parameter_45, - parameter_46, - parameter_47, - parameter_48, - parameter_49, - parameter_50, - parameter_51, - parameter_52, - parameter_53, - parameter_54, - parameter_55, - parameter_56, - parameter_57, - parameter_58, - parameter_59, - parameter_60, - parameter_61, - parameter_62, - parameter_63, - parameter_64, - parameter_65, - parameter_66, - parameter_67, - parameter_68, - parameter_69, - parameter_70, - parameter_71, - parameter_72, - parameter_73, - parameter_74, - parameter_75, - parameter_76, - parameter_77, - parameter_78, - parameter_79, - parameter_80, - parameter_81, - parameter_82, - parameter_83, - parameter_84, - parameter_85, - parameter_86, - parameter_87, - parameter_88, - parameter_89, - parameter_90, - parameter_91, - parameter_92, - parameter_93, - parameter_94, - parameter_95, - parameter_96, - parameter_97, - parameter_98, - parameter_99, - parameter_100, - parameter_101, - parameter_102, - parameter_103, - parameter_104, - parameter_105, - parameter_106, - parameter_107, - parameter_108, - parameter_109, - parameter_110, - parameter_111, - parameter_112, - parameter_113, - parameter_114, - parameter_115, - parameter_116, - parameter_117, - parameter_118, - parameter_119, - parameter_120, - parameter_121, - parameter_122, - parameter_123, - parameter_124, - parameter_125, - parameter_126, - parameter_127, - parameter_128, - parameter_129, - parameter_130, - parameter_131, - parameter_132, - parameter_133, - parameter_134, - parameter_135, - parameter_136, - parameter_137, - parameter_138, - parameter_139, - parameter_140, - parameter_141, - parameter_142, - parameter_143, - parameter_144, - parameter_145, - parameter_146, - parameter_147, - parameter_148, - parameter_149, - parameter_150, - parameter_151, - parameter_152, - parameter_153, - parameter_154, - parameter_155, - parameter_156, - parameter_157, - parameter_158, - parameter_159, - parameter_160, - parameter_161, - parameter_162, - parameter_163, - parameter_164, - parameter_165, - parameter_166, - parameter_167, - parameter_168, - parameter_169, - parameter_170, - parameter_171, - parameter_172, - parameter_173, - parameter_174, - parameter_175, - parameter_176, - parameter_177, - parameter_178, - parameter_179, - parameter_180, - parameter_181, - parameter_182, - parameter_183, - parameter_184, - parameter_185, - parameter_186, - parameter_187, - parameter_188, - parameter_189, - parameter_190, - parameter_191, - parameter_192, - parameter_193, - parameter_194, - parameter_195, - parameter_196, - parameter_197, - parameter_198, - parameter_199, - parameter_200, - parameter_201, - parameter_202, - parameter_203, - parameter_204, - parameter_205, - parameter_206, - parameter_207, - parameter_208, - parameter_209, - parameter_210, - parameter_211, - parameter_212, - parameter_213, - parameter_214, - parameter_215, - parameter_216, - parameter_217, - parameter_218, - parameter_219, - parameter_220, - parameter_221, - parameter_222, - parameter_223, - parameter_224, - parameter_225, - parameter_226, - parameter_227, - parameter_228, - parameter_229, - parameter_230, - parameter_231, - parameter_232, - parameter_233, - parameter_234, - parameter_235, - parameter_236, - parameter_237, - parameter_238, - parameter_239, - parameter_240, - parameter_241, - parameter_242, - parameter_243, - parameter_244, - parameter_245, - parameter_246, - parameter_247, - parameter_248, - parameter_249, - parameter_250, - parameter_251, - parameter_252, - parameter_253, - parameter_254, - parameter_255, - parameter_256, - parameter_257, - parameter_258, - parameter_259, - parameter_260, - parameter_261, - parameter_262, - parameter_263, - parameter_264, - parameter_265, - parameter_266, - parameter_267, - parameter_268, - parameter_269, - parameter_270, - parameter_271, - parameter_272, - parameter_273, - parameter_274, - parameter_275, - parameter_276, - parameter_277, - parameter_278, - parameter_279, - parameter_280, - parameter_281, - parameter_282, - parameter_283, - parameter_284, - parameter_285, - parameter_286, - parameter_287, - parameter_288, - parameter_289, - parameter_290, - parameter_291, - parameter_292, - parameter_293, - parameter_294, - parameter_295, - parameter_296, - parameter_297, - parameter_298, - parameter_299, - parameter_300, - parameter_301, - parameter_302, - parameter_303, - parameter_304, - parameter_305, - parameter_306, - parameter_307, - parameter_308, - parameter_309, - parameter_310, - parameter_311, - parameter_312, - parameter_313, - parameter_314, - parameter_315, - parameter_316, - parameter_317, - parameter_318, - parameter_319, - parameter_320, - parameter_321, - parameter_322, - parameter_323, - parameter_324, - parameter_325, - parameter_326, - parameter_327, - parameter_328, - parameter_329, - parameter_330, - parameter_331, - parameter_332, - parameter_333, - parameter_334, - parameter_335, - parameter_336, - parameter_337, - parameter_338, - parameter_339, - parameter_340, - parameter_341, - parameter_342, - parameter_343, - parameter_344, - parameter_345, - parameter_346, - parameter_347, - parameter_348, - parameter_349, - parameter_350, - parameter_351, - parameter_352, - parameter_353, - parameter_354, - parameter_355, - parameter_356, - parameter_357, - parameter_358, - parameter_359, - parameter_360, - parameter_361, - parameter_362, - parameter_363, - parameter_364, - parameter_365, - parameter_366, - parameter_367, - parameter_368, - parameter_369, - parameter_370, - parameter_371, - parameter_372, - parameter_373, - parameter_374, - parameter_375, - parameter_376, - parameter_377, - parameter_378, - parameter_379, - parameter_380, - parameter_381, - parameter_382, - parameter_383, - parameter_384, - parameter_385, - parameter_386, - parameter_387, - parameter_388, - parameter_389, - parameter_390, - parameter_391, - parameter_392, - parameter_393, - parameter_394, - parameter_395, - parameter_396, - parameter_397, - parameter_398, - parameter_399, - parameter_400, - parameter_401, - parameter_402, - parameter_403, - parameter_404, - parameter_405, - parameter_406, - parameter_407, - parameter_408, - parameter_409, - parameter_410, - parameter_411, - parameter_412, - parameter_413, - parameter_414, - parameter_415, - parameter_416, - parameter_417, - parameter_418, - parameter_419, - parameter_420, - parameter_421, - parameter_422, - parameter_423, - parameter_424, - parameter_425, - parameter_426, - parameter_427, - parameter_428, - parameter_429, - parameter_430, - parameter_431, - parameter_432, - parameter_433, - parameter_434, - parameter_435, - parameter_436, - parameter_437, - parameter_438, - parameter_439, - parameter_440, - parameter_441, - parameter_442, - parameter_443, - parameter_444, - parameter_445, - parameter_446, - parameter_447, - parameter_448, - parameter_449, - parameter_450, - parameter_451, - parameter_452, - parameter_453, - parameter_454, - parameter_455, - parameter_456, - parameter_457, - parameter_458, - parameter_459, - parameter_460, - parameter_461, - parameter_462, - parameter_463, - parameter_464, - parameter_465, - parameter_466, - parameter_467, - parameter_468, - parameter_469, - parameter_470, - parameter_471, - parameter_472, - parameter_473, - parameter_474, - parameter_475, - parameter_476, - parameter_477, - parameter_478, - parameter_479, - parameter_480, - parameter_481, - parameter_482, - parameter_483, - parameter_484, - parameter_485, - parameter_486, - parameter_487, - parameter_488, - parameter_489, - parameter_490, - parameter_491, - parameter_492, - parameter_493, - parameter_494, - parameter_495, - parameter_496, - parameter_497, - parameter_498, - parameter_499, - parameter_500, - parameter_501, - parameter_502, - parameter_503, - parameter_504, - parameter_505, - parameter_506, - parameter_507, - parameter_508, - parameter_509, - parameter_510, - parameter_511, - parameter_512, - parameter_513, - parameter_514, - parameter_515, - parameter_516, - parameter_517, - parameter_518, - parameter_519, - parameter_520, - parameter_521, - parameter_522, - parameter_523, - parameter_524, - parameter_525, - parameter_526, - parameter_527, - parameter_528, - parameter_529, - parameter_530, - parameter_531, - parameter_532, - parameter_533, - parameter_534, - parameter_535, - parameter_536, - parameter_537, - parameter_538, - parameter_539, - parameter_540, - parameter_541, - parameter_542, - parameter_543, - parameter_544, - parameter_545, - parameter_546, - parameter_547, - parameter_548, - parameter_549, - parameter_550, - parameter_551, - parameter_552, - parameter_553, - parameter_554, - parameter_555, - parameter_556, - parameter_557, - parameter_558, - parameter_559, - parameter_560, - parameter_561, - parameter_562, - parameter_563, - parameter_564, - parameter_565, - parameter_566, - parameter_567, - parameter_568, - parameter_569, - parameter_570, - parameter_571, - parameter_572, - parameter_573, - parameter_574, - parameter_575, - parameter_576, - parameter_577, - parameter_578, - parameter_579, - parameter_580, - parameter_581, - parameter_582, - parameter_583, - parameter_584, - parameter_585, - parameter_586, - parameter_587, - parameter_588, - parameter_589, - parameter_590, - parameter_591, - parameter_592, - parameter_593, - parameter_594, - parameter_595, - parameter_596, - parameter_597, - parameter_598, - parameter_599, - parameter_600, - parameter_601, - parameter_602, - parameter_603, - parameter_604, - parameter_605, - parameter_606, - parameter_607, - parameter_608, - parameter_609, - parameter_610, - parameter_611, - parameter_612, - parameter_613, - parameter_614, - parameter_615, - parameter_616, - parameter_617, - parameter_618, - parameter_619, - parameter_620, - parameter_621, - parameter_622, - parameter_623, - parameter_624, - parameter_625, - parameter_626, - parameter_627, - parameter_628, - parameter_629, - parameter_630, - parameter_631, - parameter_632, - parameter_633, - parameter_634, - parameter_635, - parameter_636, - parameter_637, - parameter_638, - parameter_639, - parameter_640, - parameter_641, - parameter_642, - parameter_643, - parameter_644, - parameter_645, - parameter_646, - parameter_647, - parameter_648, - parameter_649, - parameter_650, - parameter_651, - parameter_652, - parameter_653, - parameter_654, - parameter_655, - parameter_656, - parameter_657, - parameter_658, - parameter_659, - parameter_660, - parameter_661, - parameter_662, - parameter_663, - parameter_664, - parameter_665, - parameter_666, - parameter_667, - parameter_668, - parameter_669, - parameter_670, - parameter_671, - parameter_672, - parameter_673, - parameter_674, - parameter_675, - parameter_676, - parameter_677, - parameter_678, - parameter_679, - parameter_680, - parameter_681, - parameter_682, - parameter_683, - parameter_684, - parameter_685, - parameter_686, - parameter_687, - parameter_688, - parameter_689, - parameter_690, - parameter_691, - parameter_692, - parameter_693, - parameter_694, - parameter_695, - parameter_696, - parameter_697, - data_0, - ): - # pd_op.conv2d: (2x32x-1x-1xf32) <- (2x3x-1x-1xf32, 32x3x3x3xf32) - conv2d_0 = paddle._C_ops.conv2d( - data_0, parameter_697, [2, 2], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del data_0, parameter_697 - - # pd_op.batch_norm_: (2x32x-1x-1xf32, 32xf32, 32xf32, 32xf32, 32xf32, -1xui8) <- (2x32x-1x-1xf32, 32xf32, 32xf32, 32xf32, 32xf32) - ( - batch_norm__0, - batch_norm__1, - batch_norm__2, - batch_norm__3, - batch_norm__4, - batch_norm__5, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_0, - parameter_696, - parameter_695, - parameter_694, - parameter_693, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_693, parameter_694, parameter_695, parameter_696 - - # pd_op.swish: (2x32x-1x-1xf32) <- (2x32x-1x-1xf32) - swish_1 = paddle._C_ops.swish(batch_norm__0) - - # pd_op.conv2d: (2x32x-1x-1xf32) <- (2x32x-1x-1xf32, 32x32x3x3xf32) - conv2d_1 = paddle._C_ops.conv2d( - swish_1, parameter_692, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_692 - - # pd_op.batch_norm_: (2x32x-1x-1xf32, 32xf32, 32xf32, 32xf32, 32xf32, -1xui8) <- (2x32x-1x-1xf32, 32xf32, 32xf32, 32xf32, 32xf32) - ( - batch_norm__6, - batch_norm__7, - batch_norm__8, - batch_norm__9, - batch_norm__10, - batch_norm__11, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_1, - parameter_691, - parameter_690, - parameter_689, - parameter_688, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_688, parameter_689, parameter_690, parameter_691 - - # pd_op.swish: (2x32x-1x-1xf32) <- (2x32x-1x-1xf32) - swish_2 = paddle._C_ops.swish(batch_norm__6) - - # pd_op.conv2d: (2x64x-1x-1xf32) <- (2x32x-1x-1xf32, 64x32x3x3xf32) - conv2d_2 = paddle._C_ops.conv2d( - swish_2, parameter_687, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_687 - - # pd_op.batch_norm_: (2x64x-1x-1xf32, 64xf32, 64xf32, 64xf32, 64xf32, -1xui8) <- (2x64x-1x-1xf32, 64xf32, 64xf32, 64xf32, 64xf32) - ( - batch_norm__12, - batch_norm__13, - batch_norm__14, - batch_norm__15, - batch_norm__16, - batch_norm__17, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_2, - parameter_686, - parameter_685, - parameter_684, - parameter_683, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_683, parameter_684, parameter_685, parameter_686 - - # pd_op.swish: (2x64x-1x-1xf32) <- (2x64x-1x-1xf32) - swish_3 = paddle._C_ops.swish(batch_norm__12) - - # pd_op.conv2d: (2x96x-1x-1xf32) <- (2x64x-1x-1xf32, 96x64x3x3xf32) - conv2d_3 = paddle._C_ops.conv2d( - swish_3, parameter_682, [2, 2], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_682 - - # pd_op.batch_norm_: (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) - ( - batch_norm__18, - batch_norm__19, - batch_norm__20, - batch_norm__21, - batch_norm__22, - batch_norm__23, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_3, - parameter_681, - parameter_680, - parameter_679, - parameter_678, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_678, parameter_679, parameter_680, parameter_681 - - # pd_op.swish: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32) - swish_4 = paddle._C_ops.swish(batch_norm__18) - - # pd_op.conv2d: (2x48x-1x-1xf32) <- (2x96x-1x-1xf32, 48x96x1x1xf32) - conv2d_4 = paddle._C_ops.conv2d( - swish_4, parameter_677, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_677 - - # pd_op.batch_norm_: (2x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32, -1xui8) <- (2x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32) - ( - batch_norm__24, - batch_norm__25, - batch_norm__26, - batch_norm__27, - batch_norm__28, - batch_norm__29, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_4, - parameter_676, - parameter_675, - parameter_674, - parameter_673, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_673, parameter_674, parameter_675, parameter_676 - - # pd_op.swish: (2x48x-1x-1xf32) <- (2x48x-1x-1xf32) - swish_5 = paddle._C_ops.swish(batch_norm__24) - - # pd_op.conv2d: (2x48x-1x-1xf32) <- (2x96x-1x-1xf32, 48x96x1x1xf32) - conv2d_5 = paddle._C_ops.conv2d( - swish_4, parameter_672, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_672 - - # pd_op.batch_norm_: (2x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32, -1xui8) <- (2x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32) - ( - batch_norm__30, - batch_norm__31, - batch_norm__32, - batch_norm__33, - batch_norm__34, - batch_norm__35, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_5, - parameter_671, - parameter_670, - parameter_669, - parameter_668, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_668, parameter_669, parameter_670, parameter_671 - - # pd_op.swish: (2x48x-1x-1xf32) <- (2x48x-1x-1xf32) - swish_6 = paddle._C_ops.swish(batch_norm__30) - - # pd_op.conv2d: (2x48x-1x-1xf32) <- (2x48x-1x-1xf32, 48x48x3x3xf32) - conv2d_6 = paddle._C_ops.conv2d( - swish_6, parameter_667, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_667 - - # pd_op.batch_norm_: (2x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32, -1xui8) <- (2x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32) - ( - batch_norm__36, - batch_norm__37, - batch_norm__38, - batch_norm__39, - batch_norm__40, - batch_norm__41, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_6, - parameter_666, - parameter_665, - parameter_664, - parameter_663, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_663, parameter_664, parameter_665, parameter_666 - - # pd_op.swish: (2x48x-1x-1xf32) <- (2x48x-1x-1xf32) - swish_7 = paddle._C_ops.swish(batch_norm__36) - - # pd_op.conv2d: (2x48x-1x-1xf32) <- (2x48x-1x-1xf32, 48x48x3x3xf32) - conv2d_7 = paddle._C_ops.conv2d( - swish_7, parameter_662, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_662 - - # pd_op.batch_norm_: (2x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32, -1xui8) <- (2x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32) - ( - batch_norm__42, - batch_norm__43, - batch_norm__44, - batch_norm__45, - batch_norm__46, - batch_norm__47, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_7, - parameter_661, - parameter_660, - parameter_659, - parameter_658, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_658, parameter_659, parameter_660, parameter_661 - - # pd_op.conv2d: (2x48x-1x-1xf32) <- (2x48x-1x-1xf32, 48x48x1x1xf32) - conv2d_8 = paddle._C_ops.conv2d( - swish_7, parameter_657, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_657 - - # pd_op.batch_norm_: (2x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32, -1xui8) <- (2x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32) - ( - batch_norm__48, - batch_norm__49, - batch_norm__50, - batch_norm__51, - batch_norm__52, - batch_norm__53, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_8, - parameter_656, - parameter_655, - parameter_654, - parameter_653, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_653, parameter_654, parameter_655, parameter_656 - - # pd_op.add: (2x48x-1x-1xf32) <- (2x48x-1x-1xf32, 2x48x-1x-1xf32) - add_0 = paddle._C_ops.add(batch_norm__42, batch_norm__48) - - # pd_op.swish: (2x48x-1x-1xf32) <- (2x48x-1x-1xf32) - swish_8 = paddle._C_ops.swish(add_0) - - # pd_op.add: (2x48x-1x-1xf32) <- (2x48x-1x-1xf32, 2x48x-1x-1xf32) - add_1 = paddle._C_ops.add(swish_6, swish_8) - - # pd_op.conv2d: (2x48x-1x-1xf32) <- (2x48x-1x-1xf32, 48x48x3x3xf32) - conv2d_9 = paddle._C_ops.conv2d( - add_1, parameter_652, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_652 - - # pd_op.batch_norm_: (2x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32, -1xui8) <- (2x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32) - ( - batch_norm__54, - batch_norm__55, - batch_norm__56, - batch_norm__57, - batch_norm__58, - batch_norm__59, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_9, - parameter_651, - parameter_650, - parameter_649, - parameter_648, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_648, parameter_649, parameter_650, parameter_651 - - # pd_op.swish: (2x48x-1x-1xf32) <- (2x48x-1x-1xf32) - swish_9 = paddle._C_ops.swish(batch_norm__54) - - # pd_op.conv2d: (2x48x-1x-1xf32) <- (2x48x-1x-1xf32, 48x48x3x3xf32) - conv2d_10 = paddle._C_ops.conv2d( - swish_9, parameter_647, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_647 - - # pd_op.batch_norm_: (2x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32, -1xui8) <- (2x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32) - ( - batch_norm__60, - batch_norm__61, - batch_norm__62, - batch_norm__63, - batch_norm__64, - batch_norm__65, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_10, - parameter_646, - parameter_645, - parameter_644, - parameter_643, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_643, parameter_644, parameter_645, parameter_646 - - # pd_op.conv2d: (2x48x-1x-1xf32) <- (2x48x-1x-1xf32, 48x48x1x1xf32) - conv2d_11 = paddle._C_ops.conv2d( - swish_9, parameter_642, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_642 - - # pd_op.batch_norm_: (2x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32, -1xui8) <- (2x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32) - ( - batch_norm__66, - batch_norm__67, - batch_norm__68, - batch_norm__69, - batch_norm__70, - batch_norm__71, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_11, - parameter_641, - parameter_640, - parameter_639, - parameter_638, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_638, parameter_639, parameter_640, parameter_641 - - # pd_op.add: (2x48x-1x-1xf32) <- (2x48x-1x-1xf32, 2x48x-1x-1xf32) - add_2 = paddle._C_ops.add(batch_norm__60, batch_norm__66) - - # pd_op.swish: (2x48x-1x-1xf32) <- (2x48x-1x-1xf32) - swish_10 = paddle._C_ops.swish(add_2) - - # pd_op.add: (2x48x-1x-1xf32) <- (2x48x-1x-1xf32, 2x48x-1x-1xf32) - add_3 = paddle._C_ops.add(add_1, swish_10) - - # pd_op.conv2d: (2x48x-1x-1xf32) <- (2x48x-1x-1xf32, 48x48x3x3xf32) - conv2d_12 = paddle._C_ops.conv2d( - add_3, parameter_637, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_637 - - # pd_op.batch_norm_: (2x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32, -1xui8) <- (2x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32) - ( - batch_norm__72, - batch_norm__73, - batch_norm__74, - batch_norm__75, - batch_norm__76, - batch_norm__77, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_12, - parameter_636, - parameter_635, - parameter_634, - parameter_633, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_633, parameter_634, parameter_635, parameter_636 - - # pd_op.swish: (2x48x-1x-1xf32) <- (2x48x-1x-1xf32) - swish_11 = paddle._C_ops.swish(batch_norm__72) - - # pd_op.conv2d: (2x48x-1x-1xf32) <- (2x48x-1x-1xf32, 48x48x3x3xf32) - conv2d_13 = paddle._C_ops.conv2d( - swish_11, parameter_632, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_632 - - # pd_op.batch_norm_: (2x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32, -1xui8) <- (2x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32) - ( - batch_norm__78, - batch_norm__79, - batch_norm__80, - batch_norm__81, - batch_norm__82, - batch_norm__83, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_13, - parameter_631, - parameter_630, - parameter_629, - parameter_628, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_628, parameter_629, parameter_630, parameter_631 - - # pd_op.conv2d: (2x48x-1x-1xf32) <- (2x48x-1x-1xf32, 48x48x1x1xf32) - conv2d_14 = paddle._C_ops.conv2d( - swish_11, parameter_627, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_627 - - # pd_op.batch_norm_: (2x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32, -1xui8) <- (2x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32) - ( - batch_norm__84, - batch_norm__85, - batch_norm__86, - batch_norm__87, - batch_norm__88, - batch_norm__89, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_14, - parameter_626, - parameter_625, - parameter_624, - parameter_623, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_623, parameter_624, parameter_625, parameter_626 - - # pd_op.add: (2x48x-1x-1xf32) <- (2x48x-1x-1xf32, 2x48x-1x-1xf32) - add_4 = paddle._C_ops.add(batch_norm__78, batch_norm__84) - - # pd_op.swish: (2x48x-1x-1xf32) <- (2x48x-1x-1xf32) - swish_12 = paddle._C_ops.swish(add_4) - - # pd_op.add: (2x48x-1x-1xf32) <- (2x48x-1x-1xf32, 2x48x-1x-1xf32) - add_5 = paddle._C_ops.add(add_3, swish_12) - - # pd_op.full: (1xi32) <- () - full_0 = paddle._C_ops.full( - [1], float("1"), paddle.int32, paddle.core.CPUPlace() - ) - - # pd_op.assign: (1xi32) <- (1xi32) - assign_0 = full_0 - - # pd_op.assign: (1xi32) <- (1xi32) - assign_1 = full_0 - - # pd_op.assign: (1xi32) <- (1xi32) - assign_2 = full_0 - - # pd_op.assign: (1xi32) <- (1xi32) - assign_3 = full_0 - - # pd_op.assign: (1xi32) <- (1xi32) - assign_4 = full_0 - - # pd_op.assign: (1xi32) <- (1xi32) - assign_5 = full_0 - - # pd_op.assign: (1xi32) <- (1xi32) - assign_6 = full_0 - - # pd_op.assign: (1xi32) <- (1xi32) - assign_7 = full_0 - - # pd_op.assign: (1xi32) <- (1xi32) - assign_8 = full_0 - - # pd_op.assign: (1xi32) <- (1xi32) - assign_9 = full_0 - - # pd_op.assign: (1xi32) <- (1xi32) - assign_10 = full_0 - - # pd_op.assign: (1xi32) <- (1xi32) - assign_11 = full_0 - - # pd_op.assign: (1xi32) <- (1xi32) - assign_12 = full_0 - - # builtin.combine: ([2x48x-1x-1xf32, 2x48x-1x-1xf32]) <- (2x48x-1x-1xf32, 2x48x-1x-1xf32) - combine_0 = [swish_5, add_5] - - # pd_op.concat: (2x96x-1x-1xf32) <- ([2x48x-1x-1xf32, 2x48x-1x-1xf32], 1xi32) - concat_0 = paddle._C_ops.concat(combine_0, full_0) - del combine_0 - - # pd_op.full_int_array: (2xi64) <- () - full_int_array_0 = [2, 3] - - # pd_op.assign: (2xi64) <- (2xi64) - assign_13 = full_int_array_0 - - # pd_op.assign: (2xi64) <- (2xi64) - assign_14 = full_int_array_0 - - # pd_op.assign: (2xi64) <- (2xi64) - assign_15 = full_int_array_0 - - # pd_op.mean: (2x96x1x1xf32) <- (2x96x-1x-1xf32, 2xi64) - mean_0 = paddle._C_ops.mean(concat_0, full_int_array_0, True) - - # pd_op.conv2d: (2x96x1x1xf32) <- (2x96x1x1xf32, 96x96x1x1xf32) - conv2d_15 = paddle._C_ops.conv2d( - mean_0, parameter_622, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_622 - - # pd_op.full_int_array: (4xi64) <- () - full_int_array_1 = [1, -1, 1, 1] - - # pd_op.reshape: (1x96x1x1xf32) <- (96xf32, 4xi64) - reshape_0 = paddle._C_ops.reshape(parameter_621, full_int_array_1) - del parameter_621 - - # pd_op.add: (2x96x1x1xf32) <- (2x96x1x1xf32, 1x96x1x1xf32) - add_6 = paddle._C_ops.add(conv2d_15, reshape_0) - - # pd_op.hardsigmoid: (2x96x1x1xf32) <- (2x96x1x1xf32) - hardsigmoid_0 = paddle._C_ops.hardsigmoid( - add_6, float("0.166667"), float("0.5") - ) - del add_6 - - # pd_op.multiply: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, 2x96x1x1xf32) - multiply_0 = paddle._C_ops.multiply(concat_0, hardsigmoid_0) - - # pd_op.conv2d: (2x128x-1x-1xf32) <- (2x96x-1x-1xf32, 128x96x1x1xf32) - conv2d_16 = paddle._C_ops.conv2d( - multiply_0, parameter_620, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_620 - - # pd_op.batch_norm_: (2x128x-1x-1xf32, 128xf32, 128xf32, 128xf32, 128xf32, -1xui8) <- (2x128x-1x-1xf32, 128xf32, 128xf32, 128xf32, 128xf32) - ( - batch_norm__90, - batch_norm__91, - batch_norm__92, - batch_norm__93, - batch_norm__94, - batch_norm__95, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_16, - parameter_619, - parameter_618, - parameter_617, - parameter_616, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_616, parameter_617, parameter_618, parameter_619 - - # pd_op.swish: (2x128x-1x-1xf32) <- (2x128x-1x-1xf32) - swish_13 = paddle._C_ops.swish(batch_norm__90) - - # pd_op.conv2d: (2x192x-1x-1xf32) <- (2x128x-1x-1xf32, 192x128x3x3xf32) - conv2d_17 = paddle._C_ops.conv2d( - swish_13, parameter_615, [2, 2], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_615 - - # pd_op.batch_norm_: (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) - ( - batch_norm__96, - batch_norm__97, - batch_norm__98, - batch_norm__99, - batch_norm__100, - batch_norm__101, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_17, - parameter_614, - parameter_613, - parameter_612, - parameter_611, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_611, parameter_612, parameter_613, parameter_614 - - # pd_op.swish: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32) - swish_14 = paddle._C_ops.swish(batch_norm__96) - - # pd_op.conv2d: (2x96x-1x-1xf32) <- (2x192x-1x-1xf32, 96x192x1x1xf32) - conv2d_18 = paddle._C_ops.conv2d( - swish_14, parameter_610, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_610 - - # pd_op.batch_norm_: (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) - ( - batch_norm__102, - batch_norm__103, - batch_norm__104, - batch_norm__105, - batch_norm__106, - batch_norm__107, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_18, - parameter_609, - parameter_608, - parameter_607, - parameter_606, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_606, parameter_607, parameter_608, parameter_609 - - # pd_op.swish: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32) - swish_15 = paddle._C_ops.swish(batch_norm__102) - - # pd_op.conv2d: (2x96x-1x-1xf32) <- (2x192x-1x-1xf32, 96x192x1x1xf32) - conv2d_19 = paddle._C_ops.conv2d( - swish_14, parameter_605, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_605 - - # pd_op.batch_norm_: (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) - ( - batch_norm__108, - batch_norm__109, - batch_norm__110, - batch_norm__111, - batch_norm__112, - batch_norm__113, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_19, - parameter_604, - parameter_603, - parameter_602, - parameter_601, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_601, parameter_602, parameter_603, parameter_604 - - # pd_op.swish: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32) - swish_16 = paddle._C_ops.swish(batch_norm__108) - - # pd_op.conv2d: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, 96x96x3x3xf32) - conv2d_20 = paddle._C_ops.conv2d( - swish_16, parameter_600, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_600 - - # pd_op.batch_norm_: (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) - ( - batch_norm__114, - batch_norm__115, - batch_norm__116, - batch_norm__117, - batch_norm__118, - batch_norm__119, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_20, - parameter_599, - parameter_598, - parameter_597, - parameter_596, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_596, parameter_597, parameter_598, parameter_599 - - # pd_op.swish: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32) - swish_17 = paddle._C_ops.swish(batch_norm__114) - - # pd_op.conv2d: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, 96x96x3x3xf32) - conv2d_21 = paddle._C_ops.conv2d( - swish_17, parameter_595, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_595 - - # pd_op.batch_norm_: (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) - ( - batch_norm__120, - batch_norm__121, - batch_norm__122, - batch_norm__123, - batch_norm__124, - batch_norm__125, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_21, - parameter_594, - parameter_593, - parameter_592, - parameter_591, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_591, parameter_592, parameter_593, parameter_594 - - # pd_op.conv2d: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, 96x96x1x1xf32) - conv2d_22 = paddle._C_ops.conv2d( - swish_17, parameter_590, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_590 - - # pd_op.batch_norm_: (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) - ( - batch_norm__126, - batch_norm__127, - batch_norm__128, - batch_norm__129, - batch_norm__130, - batch_norm__131, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_22, - parameter_589, - parameter_588, - parameter_587, - parameter_586, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_586, parameter_587, parameter_588, parameter_589 - - # pd_op.add: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, 2x96x-1x-1xf32) - add_7 = paddle._C_ops.add(batch_norm__120, batch_norm__126) - - # pd_op.swish: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32) - swish_18 = paddle._C_ops.swish(add_7) - - # pd_op.add: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, 2x96x-1x-1xf32) - add_8 = paddle._C_ops.add(swish_16, swish_18) - - # pd_op.conv2d: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, 96x96x3x3xf32) - conv2d_23 = paddle._C_ops.conv2d( - add_8, parameter_585, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_585 - - # pd_op.batch_norm_: (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) - ( - batch_norm__132, - batch_norm__133, - batch_norm__134, - batch_norm__135, - batch_norm__136, - batch_norm__137, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_23, - parameter_584, - parameter_583, - parameter_582, - parameter_581, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_581, parameter_582, parameter_583, parameter_584 - - # pd_op.swish: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32) - swish_19 = paddle._C_ops.swish(batch_norm__132) - - # pd_op.conv2d: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, 96x96x3x3xf32) - conv2d_24 = paddle._C_ops.conv2d( - swish_19, parameter_580, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_580 - - # pd_op.batch_norm_: (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) - ( - batch_norm__138, - batch_norm__139, - batch_norm__140, - batch_norm__141, - batch_norm__142, - batch_norm__143, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_24, - parameter_579, - parameter_578, - parameter_577, - parameter_576, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_576, parameter_577, parameter_578, parameter_579 - - # pd_op.conv2d: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, 96x96x1x1xf32) - conv2d_25 = paddle._C_ops.conv2d( - swish_19, parameter_575, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_575 - - # pd_op.batch_norm_: (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) - ( - batch_norm__144, - batch_norm__145, - batch_norm__146, - batch_norm__147, - batch_norm__148, - batch_norm__149, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_25, - parameter_574, - parameter_573, - parameter_572, - parameter_571, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_571, parameter_572, parameter_573, parameter_574 - - # pd_op.add: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, 2x96x-1x-1xf32) - add_9 = paddle._C_ops.add(batch_norm__138, batch_norm__144) - - # pd_op.swish: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32) - swish_20 = paddle._C_ops.swish(add_9) - - # pd_op.add: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, 2x96x-1x-1xf32) - add_10 = paddle._C_ops.add(add_8, swish_20) - - # pd_op.conv2d: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, 96x96x3x3xf32) - conv2d_26 = paddle._C_ops.conv2d( - add_10, parameter_570, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_570 - - # pd_op.batch_norm_: (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) - ( - batch_norm__150, - batch_norm__151, - batch_norm__152, - batch_norm__153, - batch_norm__154, - batch_norm__155, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_26, - parameter_569, - parameter_568, - parameter_567, - parameter_566, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_566, parameter_567, parameter_568, parameter_569 - - # pd_op.swish: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32) - swish_21 = paddle._C_ops.swish(batch_norm__150) - - # pd_op.conv2d: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, 96x96x3x3xf32) - conv2d_27 = paddle._C_ops.conv2d( - swish_21, parameter_565, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_565 - - # pd_op.batch_norm_: (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) - ( - batch_norm__156, - batch_norm__157, - batch_norm__158, - batch_norm__159, - batch_norm__160, - batch_norm__161, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_27, - parameter_564, - parameter_563, - parameter_562, - parameter_561, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_561, parameter_562, parameter_563, parameter_564 - - # pd_op.conv2d: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, 96x96x1x1xf32) - conv2d_28 = paddle._C_ops.conv2d( - swish_21, parameter_560, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_560 - - # pd_op.batch_norm_: (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) - ( - batch_norm__162, - batch_norm__163, - batch_norm__164, - batch_norm__165, - batch_norm__166, - batch_norm__167, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_28, - parameter_559, - parameter_558, - parameter_557, - parameter_556, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_556, parameter_557, parameter_558, parameter_559 - - # pd_op.add: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, 2x96x-1x-1xf32) - add_11 = paddle._C_ops.add(batch_norm__156, batch_norm__162) - - # pd_op.swish: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32) - swish_22 = paddle._C_ops.swish(add_11) - - # pd_op.add: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, 2x96x-1x-1xf32) - add_12 = paddle._C_ops.add(add_10, swish_22) - - # pd_op.conv2d: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, 96x96x3x3xf32) - conv2d_29 = paddle._C_ops.conv2d( - add_12, parameter_555, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_555 - - # pd_op.batch_norm_: (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) - ( - batch_norm__168, - batch_norm__169, - batch_norm__170, - batch_norm__171, - batch_norm__172, - batch_norm__173, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_29, - parameter_554, - parameter_553, - parameter_552, - parameter_551, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_551, parameter_552, parameter_553, parameter_554 - - # pd_op.swish: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32) - swish_23 = paddle._C_ops.swish(batch_norm__168) - - # pd_op.conv2d: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, 96x96x3x3xf32) - conv2d_30 = paddle._C_ops.conv2d( - swish_23, parameter_550, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_550 - - # pd_op.batch_norm_: (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) - ( - batch_norm__174, - batch_norm__175, - batch_norm__176, - batch_norm__177, - batch_norm__178, - batch_norm__179, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_30, - parameter_549, - parameter_548, - parameter_547, - parameter_546, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_546, parameter_547, parameter_548, parameter_549 - - # pd_op.conv2d: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, 96x96x1x1xf32) - conv2d_31 = paddle._C_ops.conv2d( - swish_23, parameter_545, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_545 - - # pd_op.batch_norm_: (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) - ( - batch_norm__180, - batch_norm__181, - batch_norm__182, - batch_norm__183, - batch_norm__184, - batch_norm__185, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_31, - parameter_544, - parameter_543, - parameter_542, - parameter_541, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_541, parameter_542, parameter_543, parameter_544 - - # pd_op.add: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, 2x96x-1x-1xf32) - add_13 = paddle._C_ops.add(batch_norm__174, batch_norm__180) - - # pd_op.swish: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32) - swish_24 = paddle._C_ops.swish(add_13) - - # pd_op.add: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, 2x96x-1x-1xf32) - add_14 = paddle._C_ops.add(add_12, swish_24) - - # pd_op.conv2d: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, 96x96x3x3xf32) - conv2d_32 = paddle._C_ops.conv2d( - add_14, parameter_540, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_540 - - # pd_op.batch_norm_: (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) - ( - batch_norm__186, - batch_norm__187, - batch_norm__188, - batch_norm__189, - batch_norm__190, - batch_norm__191, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_32, - parameter_539, - parameter_538, - parameter_537, - parameter_536, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_536, parameter_537, parameter_538, parameter_539 - - # pd_op.swish: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32) - swish_25 = paddle._C_ops.swish(batch_norm__186) - - # pd_op.conv2d: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, 96x96x3x3xf32) - conv2d_33 = paddle._C_ops.conv2d( - swish_25, parameter_535, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_535 - - # pd_op.batch_norm_: (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) - ( - batch_norm__192, - batch_norm__193, - batch_norm__194, - batch_norm__195, - batch_norm__196, - batch_norm__197, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_33, - parameter_534, - parameter_533, - parameter_532, - parameter_531, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_531, parameter_532, parameter_533, parameter_534 - - # pd_op.conv2d: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, 96x96x1x1xf32) - conv2d_34 = paddle._C_ops.conv2d( - swish_25, parameter_530, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_530 - - # pd_op.batch_norm_: (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) - ( - batch_norm__198, - batch_norm__199, - batch_norm__200, - batch_norm__201, - batch_norm__202, - batch_norm__203, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_34, - parameter_529, - parameter_528, - parameter_527, - parameter_526, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_526, parameter_527, parameter_528, parameter_529 - - # pd_op.add: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, 2x96x-1x-1xf32) - add_15 = paddle._C_ops.add(batch_norm__192, batch_norm__198) - - # pd_op.swish: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32) - swish_26 = paddle._C_ops.swish(add_15) - - # pd_op.add: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, 2x96x-1x-1xf32) - add_16 = paddle._C_ops.add(add_14, swish_26) - - # pd_op.conv2d: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, 96x96x3x3xf32) - conv2d_35 = paddle._C_ops.conv2d( - add_16, parameter_525, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_525 - - # pd_op.batch_norm_: (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) - ( - batch_norm__204, - batch_norm__205, - batch_norm__206, - batch_norm__207, - batch_norm__208, - batch_norm__209, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_35, - parameter_524, - parameter_523, - parameter_522, - parameter_521, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_521, parameter_522, parameter_523, parameter_524 - - # pd_op.swish: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32) - swish_27 = paddle._C_ops.swish(batch_norm__204) - - # pd_op.conv2d: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, 96x96x3x3xf32) - conv2d_36 = paddle._C_ops.conv2d( - swish_27, parameter_520, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_520 - - # pd_op.batch_norm_: (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) - ( - batch_norm__210, - batch_norm__211, - batch_norm__212, - batch_norm__213, - batch_norm__214, - batch_norm__215, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_36, - parameter_519, - parameter_518, - parameter_517, - parameter_516, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_516, parameter_517, parameter_518, parameter_519 - - # pd_op.conv2d: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, 96x96x1x1xf32) - conv2d_37 = paddle._C_ops.conv2d( - swish_27, parameter_515, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_515 - - # pd_op.batch_norm_: (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) - ( - batch_norm__216, - batch_norm__217, - batch_norm__218, - batch_norm__219, - batch_norm__220, - batch_norm__221, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_37, - parameter_514, - parameter_513, - parameter_512, - parameter_511, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_511, parameter_512, parameter_513, parameter_514 - - # pd_op.add: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, 2x96x-1x-1xf32) - add_17 = paddle._C_ops.add(batch_norm__210, batch_norm__216) - - # pd_op.swish: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32) - swish_28 = paddle._C_ops.swish(add_17) - - # pd_op.add: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, 2x96x-1x-1xf32) - add_18 = paddle._C_ops.add(add_16, swish_28) - - # builtin.combine: ([2x96x-1x-1xf32, 2x96x-1x-1xf32]) <- (2x96x-1x-1xf32, 2x96x-1x-1xf32) - combine_1 = [swish_15, add_18] - - # pd_op.concat: (2x192x-1x-1xf32) <- ([2x96x-1x-1xf32, 2x96x-1x-1xf32], 1xi32) - concat_1 = paddle._C_ops.concat(combine_1, full_0) - del combine_1 - - # pd_op.mean: (2x192x1x1xf32) <- (2x192x-1x-1xf32, 2xi64) - mean_1 = paddle._C_ops.mean(concat_1, full_int_array_0, True) - - # pd_op.conv2d: (2x192x1x1xf32) <- (2x192x1x1xf32, 192x192x1x1xf32) - conv2d_38 = paddle._C_ops.conv2d( - mean_1, parameter_510, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_510 - - # pd_op.reshape: (1x192x1x1xf32) <- (192xf32, 4xi64) - reshape_1 = paddle._C_ops.reshape(parameter_509, full_int_array_1) - del parameter_509 - - # pd_op.add: (2x192x1x1xf32) <- (2x192x1x1xf32, 1x192x1x1xf32) - add_19 = paddle._C_ops.add(conv2d_38, reshape_1) - - # pd_op.hardsigmoid: (2x192x1x1xf32) <- (2x192x1x1xf32) - hardsigmoid_1 = paddle._C_ops.hardsigmoid( - add_19, float("0.166667"), float("0.5") - ) - del add_19 - - # pd_op.multiply: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 2x192x1x1xf32) - multiply_1 = paddle._C_ops.multiply(concat_1, hardsigmoid_1) - - # pd_op.conv2d: (2x256x-1x-1xf32) <- (2x192x-1x-1xf32, 256x192x1x1xf32) - conv2d_39 = paddle._C_ops.conv2d( - multiply_1, parameter_508, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_508 - - # pd_op.batch_norm_: (2x256x-1x-1xf32, 256xf32, 256xf32, 256xf32, 256xf32, -1xui8) <- (2x256x-1x-1xf32, 256xf32, 256xf32, 256xf32, 256xf32) - ( - batch_norm__222, - batch_norm__223, - batch_norm__224, - batch_norm__225, - batch_norm__226, - batch_norm__227, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_39, - parameter_507, - parameter_506, - parameter_505, - parameter_504, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_504, parameter_505, parameter_506, parameter_507 - - # pd_op.swish: (2x256x-1x-1xf32) <- (2x256x-1x-1xf32) - swish_29 = paddle._C_ops.swish(batch_norm__222) - - # pd_op.conv2d: (2x384x-1x-1xf32) <- (2x256x-1x-1xf32, 384x256x3x3xf32) - conv2d_40 = paddle._C_ops.conv2d( - swish_29, parameter_503, [2, 2], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_503 - - # pd_op.batch_norm_: (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) - ( - batch_norm__228, - batch_norm__229, - batch_norm__230, - batch_norm__231, - batch_norm__232, - batch_norm__233, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_40, - parameter_502, - parameter_501, - parameter_500, - parameter_499, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_499, parameter_500, parameter_501, parameter_502 - - # pd_op.swish: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32) - swish_30 = paddle._C_ops.swish(batch_norm__228) - - # pd_op.conv2d: (2x192x-1x-1xf32) <- (2x384x-1x-1xf32, 192x384x1x1xf32) - conv2d_41 = paddle._C_ops.conv2d( - swish_30, parameter_498, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_498 - - # pd_op.batch_norm_: (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) - ( - batch_norm__234, - batch_norm__235, - batch_norm__236, - batch_norm__237, - batch_norm__238, - batch_norm__239, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_41, - parameter_497, - parameter_496, - parameter_495, - parameter_494, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_494, parameter_495, parameter_496, parameter_497 - - # pd_op.swish: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32) - swish_31 = paddle._C_ops.swish(batch_norm__234) - - # pd_op.conv2d: (2x192x-1x-1xf32) <- (2x384x-1x-1xf32, 192x384x1x1xf32) - conv2d_42 = paddle._C_ops.conv2d( - swish_30, parameter_493, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_493 - - # pd_op.batch_norm_: (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) - ( - batch_norm__240, - batch_norm__241, - batch_norm__242, - batch_norm__243, - batch_norm__244, - batch_norm__245, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_42, - parameter_492, - parameter_491, - parameter_490, - parameter_489, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_489, parameter_490, parameter_491, parameter_492 - - # pd_op.swish: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32) - swish_32 = paddle._C_ops.swish(batch_norm__240) - - # pd_op.conv2d: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 192x192x3x3xf32) - conv2d_43 = paddle._C_ops.conv2d( - swish_32, parameter_488, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_488 - - # pd_op.batch_norm_: (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) - ( - batch_norm__246, - batch_norm__247, - batch_norm__248, - batch_norm__249, - batch_norm__250, - batch_norm__251, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_43, - parameter_487, - parameter_486, - parameter_485, - parameter_484, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_484, parameter_485, parameter_486, parameter_487 - - # pd_op.swish: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32) - swish_33 = paddle._C_ops.swish(batch_norm__246) - - # pd_op.conv2d: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 192x192x3x3xf32) - conv2d_44 = paddle._C_ops.conv2d( - swish_33, parameter_483, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_483 - - # pd_op.batch_norm_: (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) - ( - batch_norm__252, - batch_norm__253, - batch_norm__254, - batch_norm__255, - batch_norm__256, - batch_norm__257, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_44, - parameter_482, - parameter_481, - parameter_480, - parameter_479, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_479, parameter_480, parameter_481, parameter_482 - - # pd_op.conv2d: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 192x192x1x1xf32) - conv2d_45 = paddle._C_ops.conv2d( - swish_33, parameter_478, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_478 - - # pd_op.batch_norm_: (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) - ( - batch_norm__258, - batch_norm__259, - batch_norm__260, - batch_norm__261, - batch_norm__262, - batch_norm__263, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_45, - parameter_477, - parameter_476, - parameter_475, - parameter_474, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_474, parameter_475, parameter_476, parameter_477 - - # pd_op.add: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 2x192x-1x-1xf32) - add_20 = paddle._C_ops.add(batch_norm__252, batch_norm__258) - - # pd_op.swish: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32) - swish_34 = paddle._C_ops.swish(add_20) - - # pd_op.add: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 2x192x-1x-1xf32) - add_21 = paddle._C_ops.add(swish_32, swish_34) - - # pd_op.conv2d: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 192x192x3x3xf32) - conv2d_46 = paddle._C_ops.conv2d( - add_21, parameter_473, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_473 - - # pd_op.batch_norm_: (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) - ( - batch_norm__264, - batch_norm__265, - batch_norm__266, - batch_norm__267, - batch_norm__268, - batch_norm__269, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_46, - parameter_472, - parameter_471, - parameter_470, - parameter_469, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_469, parameter_470, parameter_471, parameter_472 - - # pd_op.swish: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32) - swish_35 = paddle._C_ops.swish(batch_norm__264) - - # pd_op.conv2d: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 192x192x3x3xf32) - conv2d_47 = paddle._C_ops.conv2d( - swish_35, parameter_468, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_468 - - # pd_op.batch_norm_: (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) - ( - batch_norm__270, - batch_norm__271, - batch_norm__272, - batch_norm__273, - batch_norm__274, - batch_norm__275, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_47, - parameter_467, - parameter_466, - parameter_465, - parameter_464, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_464, parameter_465, parameter_466, parameter_467 - - # pd_op.conv2d: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 192x192x1x1xf32) - conv2d_48 = paddle._C_ops.conv2d( - swish_35, parameter_463, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_463 - - # pd_op.batch_norm_: (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) - ( - batch_norm__276, - batch_norm__277, - batch_norm__278, - batch_norm__279, - batch_norm__280, - batch_norm__281, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_48, - parameter_462, - parameter_461, - parameter_460, - parameter_459, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_459, parameter_460, parameter_461, parameter_462 - - # pd_op.add: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 2x192x-1x-1xf32) - add_22 = paddle._C_ops.add(batch_norm__270, batch_norm__276) - - # pd_op.swish: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32) - swish_36 = paddle._C_ops.swish(add_22) - - # pd_op.add: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 2x192x-1x-1xf32) - add_23 = paddle._C_ops.add(add_21, swish_36) - - # pd_op.conv2d: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 192x192x3x3xf32) - conv2d_49 = paddle._C_ops.conv2d( - add_23, parameter_458, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_458 - - # pd_op.batch_norm_: (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) - ( - batch_norm__282, - batch_norm__283, - batch_norm__284, - batch_norm__285, - batch_norm__286, - batch_norm__287, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_49, - parameter_457, - parameter_456, - parameter_455, - parameter_454, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_454, parameter_455, parameter_456, parameter_457 - - # pd_op.swish: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32) - swish_37 = paddle._C_ops.swish(batch_norm__282) - - # pd_op.conv2d: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 192x192x3x3xf32) - conv2d_50 = paddle._C_ops.conv2d( - swish_37, parameter_453, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_453 - - # pd_op.batch_norm_: (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) - ( - batch_norm__288, - batch_norm__289, - batch_norm__290, - batch_norm__291, - batch_norm__292, - batch_norm__293, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_50, - parameter_452, - parameter_451, - parameter_450, - parameter_449, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_449, parameter_450, parameter_451, parameter_452 - - # pd_op.conv2d: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 192x192x1x1xf32) - conv2d_51 = paddle._C_ops.conv2d( - swish_37, parameter_448, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_448 - - # pd_op.batch_norm_: (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) - ( - batch_norm__294, - batch_norm__295, - batch_norm__296, - batch_norm__297, - batch_norm__298, - batch_norm__299, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_51, - parameter_447, - parameter_446, - parameter_445, - parameter_444, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_444, parameter_445, parameter_446, parameter_447 - - # pd_op.add: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 2x192x-1x-1xf32) - add_24 = paddle._C_ops.add(batch_norm__288, batch_norm__294) - - # pd_op.swish: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32) - swish_38 = paddle._C_ops.swish(add_24) - - # pd_op.add: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 2x192x-1x-1xf32) - add_25 = paddle._C_ops.add(add_23, swish_38) - - # pd_op.conv2d: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 192x192x3x3xf32) - conv2d_52 = paddle._C_ops.conv2d( - add_25, parameter_443, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_443 - - # pd_op.batch_norm_: (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) - ( - batch_norm__300, - batch_norm__301, - batch_norm__302, - batch_norm__303, - batch_norm__304, - batch_norm__305, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_52, - parameter_442, - parameter_441, - parameter_440, - parameter_439, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_439, parameter_440, parameter_441, parameter_442 - - # pd_op.swish: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32) - swish_39 = paddle._C_ops.swish(batch_norm__300) - - # pd_op.conv2d: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 192x192x3x3xf32) - conv2d_53 = paddle._C_ops.conv2d( - swish_39, parameter_438, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_438 - - # pd_op.batch_norm_: (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) - ( - batch_norm__306, - batch_norm__307, - batch_norm__308, - batch_norm__309, - batch_norm__310, - batch_norm__311, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_53, - parameter_437, - parameter_436, - parameter_435, - parameter_434, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_434, parameter_435, parameter_436, parameter_437 - - # pd_op.conv2d: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 192x192x1x1xf32) - conv2d_54 = paddle._C_ops.conv2d( - swish_39, parameter_433, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_433 - - # pd_op.batch_norm_: (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) - ( - batch_norm__312, - batch_norm__313, - batch_norm__314, - batch_norm__315, - batch_norm__316, - batch_norm__317, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_54, - parameter_432, - parameter_431, - parameter_430, - parameter_429, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_429, parameter_430, parameter_431, parameter_432 - - # pd_op.add: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 2x192x-1x-1xf32) - add_26 = paddle._C_ops.add(batch_norm__306, batch_norm__312) - - # pd_op.swish: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32) - swish_40 = paddle._C_ops.swish(add_26) - - # pd_op.add: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 2x192x-1x-1xf32) - add_27 = paddle._C_ops.add(add_25, swish_40) - - # pd_op.conv2d: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 192x192x3x3xf32) - conv2d_55 = paddle._C_ops.conv2d( - add_27, parameter_428, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_428 - - # pd_op.batch_norm_: (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) - ( - batch_norm__318, - batch_norm__319, - batch_norm__320, - batch_norm__321, - batch_norm__322, - batch_norm__323, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_55, - parameter_427, - parameter_426, - parameter_425, - parameter_424, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_424, parameter_425, parameter_426, parameter_427 - - # pd_op.swish: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32) - swish_41 = paddle._C_ops.swish(batch_norm__318) - - # pd_op.conv2d: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 192x192x3x3xf32) - conv2d_56 = paddle._C_ops.conv2d( - swish_41, parameter_423, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_423 - - # pd_op.batch_norm_: (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) - ( - batch_norm__324, - batch_norm__325, - batch_norm__326, - batch_norm__327, - batch_norm__328, - batch_norm__329, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_56, - parameter_422, - parameter_421, - parameter_420, - parameter_419, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_419, parameter_420, parameter_421, parameter_422 - - # pd_op.conv2d: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 192x192x1x1xf32) - conv2d_57 = paddle._C_ops.conv2d( - swish_41, parameter_418, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_418 - - # pd_op.batch_norm_: (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) - ( - batch_norm__330, - batch_norm__331, - batch_norm__332, - batch_norm__333, - batch_norm__334, - batch_norm__335, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_57, - parameter_417, - parameter_416, - parameter_415, - parameter_414, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_414, parameter_415, parameter_416, parameter_417 - - # pd_op.add: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 2x192x-1x-1xf32) - add_28 = paddle._C_ops.add(batch_norm__324, batch_norm__330) - - # pd_op.swish: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32) - swish_42 = paddle._C_ops.swish(add_28) - - # pd_op.add: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 2x192x-1x-1xf32) - add_29 = paddle._C_ops.add(add_27, swish_42) - - # pd_op.conv2d: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 192x192x3x3xf32) - conv2d_58 = paddle._C_ops.conv2d( - add_29, parameter_413, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_413 - - # pd_op.batch_norm_: (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) - ( - batch_norm__336, - batch_norm__337, - batch_norm__338, - batch_norm__339, - batch_norm__340, - batch_norm__341, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_58, - parameter_412, - parameter_411, - parameter_410, - parameter_409, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_409, parameter_410, parameter_411, parameter_412 - - # pd_op.swish: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32) - swish_43 = paddle._C_ops.swish(batch_norm__336) - - # pd_op.conv2d: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 192x192x3x3xf32) - conv2d_59 = paddle._C_ops.conv2d( - swish_43, parameter_408, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_408 - - # pd_op.batch_norm_: (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) - ( - batch_norm__342, - batch_norm__343, - batch_norm__344, - batch_norm__345, - batch_norm__346, - batch_norm__347, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_59, - parameter_407, - parameter_406, - parameter_405, - parameter_404, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_404, parameter_405, parameter_406, parameter_407 - - # pd_op.conv2d: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 192x192x1x1xf32) - conv2d_60 = paddle._C_ops.conv2d( - swish_43, parameter_403, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_403 - - # pd_op.batch_norm_: (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) - ( - batch_norm__348, - batch_norm__349, - batch_norm__350, - batch_norm__351, - batch_norm__352, - batch_norm__353, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_60, - parameter_402, - parameter_401, - parameter_400, - parameter_399, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_399, parameter_400, parameter_401, parameter_402 - - # pd_op.add: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 2x192x-1x-1xf32) - add_30 = paddle._C_ops.add(batch_norm__342, batch_norm__348) - - # pd_op.swish: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32) - swish_44 = paddle._C_ops.swish(add_30) - - # pd_op.add: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 2x192x-1x-1xf32) - add_31 = paddle._C_ops.add(add_29, swish_44) - - # builtin.combine: ([2x192x-1x-1xf32, 2x192x-1x-1xf32]) <- (2x192x-1x-1xf32, 2x192x-1x-1xf32) - combine_2 = [swish_31, add_31] - - # pd_op.concat: (2x384x-1x-1xf32) <- ([2x192x-1x-1xf32, 2x192x-1x-1xf32], 1xi32) - concat_2 = paddle._C_ops.concat(combine_2, full_0) - del combine_2 - - # pd_op.mean: (2x384x1x1xf32) <- (2x384x-1x-1xf32, 2xi64) - mean_2 = paddle._C_ops.mean(concat_2, full_int_array_0, True) - - # pd_op.conv2d: (2x384x1x1xf32) <- (2x384x1x1xf32, 384x384x1x1xf32) - conv2d_61 = paddle._C_ops.conv2d( - mean_2, parameter_398, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_398 - - # pd_op.reshape: (1x384x1x1xf32) <- (384xf32, 4xi64) - reshape_2 = paddle._C_ops.reshape(parameter_397, full_int_array_1) - del parameter_397 - - # pd_op.add: (2x384x1x1xf32) <- (2x384x1x1xf32, 1x384x1x1xf32) - add_32 = paddle._C_ops.add(conv2d_61, reshape_2) - - # pd_op.hardsigmoid: (2x384x1x1xf32) <- (2x384x1x1xf32) - hardsigmoid_2 = paddle._C_ops.hardsigmoid( - add_32, float("0.166667"), float("0.5") - ) - del add_32 - - # pd_op.multiply: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32, 2x384x1x1xf32) - multiply_2 = paddle._C_ops.multiply(concat_2, hardsigmoid_2) - - # pd_op.conv2d: (2x512x-1x-1xf32) <- (2x384x-1x-1xf32, 512x384x1x1xf32) - conv2d_62 = paddle._C_ops.conv2d( - multiply_2, parameter_396, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_396 - - # pd_op.batch_norm_: (2x512x-1x-1xf32, 512xf32, 512xf32, 512xf32, 512xf32, -1xui8) <- (2x512x-1x-1xf32, 512xf32, 512xf32, 512xf32, 512xf32) - ( - batch_norm__354, - batch_norm__355, - batch_norm__356, - batch_norm__357, - batch_norm__358, - batch_norm__359, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_62, - parameter_395, - parameter_394, - parameter_393, - parameter_392, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_392, parameter_393, parameter_394, parameter_395 - - # pd_op.swish: (2x512x-1x-1xf32) <- (2x512x-1x-1xf32) - swish_45 = paddle._C_ops.swish(batch_norm__354) - - # pd_op.conv2d: (2x768x-1x-1xf32) <- (2x512x-1x-1xf32, 768x512x3x3xf32) - conv2d_63 = paddle._C_ops.conv2d( - swish_45, parameter_391, [2, 2], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_391 - - # pd_op.batch_norm_: (2x768x-1x-1xf32, 768xf32, 768xf32, 768xf32, 768xf32, -1xui8) <- (2x768x-1x-1xf32, 768xf32, 768xf32, 768xf32, 768xf32) - ( - batch_norm__360, - batch_norm__361, - batch_norm__362, - batch_norm__363, - batch_norm__364, - batch_norm__365, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_63, - parameter_390, - parameter_389, - parameter_388, - parameter_387, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_387, parameter_388, parameter_389, parameter_390 - - # pd_op.swish: (2x768x-1x-1xf32) <- (2x768x-1x-1xf32) - swish_46 = paddle._C_ops.swish(batch_norm__360) - - # pd_op.conv2d: (2x384x-1x-1xf32) <- (2x768x-1x-1xf32, 384x768x1x1xf32) - conv2d_64 = paddle._C_ops.conv2d( - swish_46, parameter_386, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_386 - - # pd_op.batch_norm_: (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) - ( - batch_norm__366, - batch_norm__367, - batch_norm__368, - batch_norm__369, - batch_norm__370, - batch_norm__371, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_64, - parameter_385, - parameter_384, - parameter_383, - parameter_382, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_382, parameter_383, parameter_384, parameter_385 - - # pd_op.swish: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32) - swish_47 = paddle._C_ops.swish(batch_norm__366) - - # pd_op.conv2d: (2x384x-1x-1xf32) <- (2x768x-1x-1xf32, 384x768x1x1xf32) - conv2d_65 = paddle._C_ops.conv2d( - swish_46, parameter_381, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_381 - - # pd_op.batch_norm_: (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) - ( - batch_norm__372, - batch_norm__373, - batch_norm__374, - batch_norm__375, - batch_norm__376, - batch_norm__377, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_65, - parameter_380, - parameter_379, - parameter_378, - parameter_377, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_377, parameter_378, parameter_379, parameter_380 - - # pd_op.swish: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32) - swish_48 = paddle._C_ops.swish(batch_norm__372) - - # pd_op.conv2d: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32, 384x384x3x3xf32) - conv2d_66 = paddle._C_ops.conv2d( - swish_48, parameter_376, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_376 - - # pd_op.batch_norm_: (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) - ( - batch_norm__378, - batch_norm__379, - batch_norm__380, - batch_norm__381, - batch_norm__382, - batch_norm__383, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_66, - parameter_375, - parameter_374, - parameter_373, - parameter_372, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_372, parameter_373, parameter_374, parameter_375 - - # pd_op.swish: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32) - swish_49 = paddle._C_ops.swish(batch_norm__378) - - # pd_op.conv2d: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32, 384x384x3x3xf32) - conv2d_67 = paddle._C_ops.conv2d( - swish_49, parameter_371, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_371 - - # pd_op.batch_norm_: (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) - ( - batch_norm__384, - batch_norm__385, - batch_norm__386, - batch_norm__387, - batch_norm__388, - batch_norm__389, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_67, - parameter_370, - parameter_369, - parameter_368, - parameter_367, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_367, parameter_368, parameter_369, parameter_370 - - # pd_op.conv2d: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32, 384x384x1x1xf32) - conv2d_68 = paddle._C_ops.conv2d( - swish_49, parameter_366, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_366 - - # pd_op.batch_norm_: (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) - ( - batch_norm__390, - batch_norm__391, - batch_norm__392, - batch_norm__393, - batch_norm__394, - batch_norm__395, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_68, - parameter_365, - parameter_364, - parameter_363, - parameter_362, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_362, parameter_363, parameter_364, parameter_365 - - # pd_op.add: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32, 2x384x-1x-1xf32) - add_33 = paddle._C_ops.add(batch_norm__384, batch_norm__390) - - # pd_op.swish: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32) - swish_50 = paddle._C_ops.swish(add_33) - - # pd_op.add: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32, 2x384x-1x-1xf32) - add_34 = paddle._C_ops.add(swish_48, swish_50) - - # pd_op.conv2d: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32, 384x384x3x3xf32) - conv2d_69 = paddle._C_ops.conv2d( - add_34, parameter_361, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_361 - - # pd_op.batch_norm_: (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) - ( - batch_norm__396, - batch_norm__397, - batch_norm__398, - batch_norm__399, - batch_norm__400, - batch_norm__401, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_69, - parameter_360, - parameter_359, - parameter_358, - parameter_357, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_357, parameter_358, parameter_359, parameter_360 - - # pd_op.swish: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32) - swish_51 = paddle._C_ops.swish(batch_norm__396) - - # pd_op.conv2d: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32, 384x384x3x3xf32) - conv2d_70 = paddle._C_ops.conv2d( - swish_51, parameter_356, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_356 - - # pd_op.batch_norm_: (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) - ( - batch_norm__402, - batch_norm__403, - batch_norm__404, - batch_norm__405, - batch_norm__406, - batch_norm__407, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_70, - parameter_355, - parameter_354, - parameter_353, - parameter_352, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_352, parameter_353, parameter_354, parameter_355 - - # pd_op.conv2d: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32, 384x384x1x1xf32) - conv2d_71 = paddle._C_ops.conv2d( - swish_51, parameter_351, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_351 - - # pd_op.batch_norm_: (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) - ( - batch_norm__408, - batch_norm__409, - batch_norm__410, - batch_norm__411, - batch_norm__412, - batch_norm__413, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_71, - parameter_350, - parameter_349, - parameter_348, - parameter_347, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_347, parameter_348, parameter_349, parameter_350 - - # pd_op.add: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32, 2x384x-1x-1xf32) - add_35 = paddle._C_ops.add(batch_norm__402, batch_norm__408) - - # pd_op.swish: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32) - swish_52 = paddle._C_ops.swish(add_35) - - # pd_op.add: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32, 2x384x-1x-1xf32) - add_36 = paddle._C_ops.add(add_34, swish_52) - - # pd_op.conv2d: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32, 384x384x3x3xf32) - conv2d_72 = paddle._C_ops.conv2d( - add_36, parameter_346, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_346 - - # pd_op.batch_norm_: (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) - ( - batch_norm__414, - batch_norm__415, - batch_norm__416, - batch_norm__417, - batch_norm__418, - batch_norm__419, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_72, - parameter_345, - parameter_344, - parameter_343, - parameter_342, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_342, parameter_343, parameter_344, parameter_345 - - # pd_op.swish: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32) - swish_53 = paddle._C_ops.swish(batch_norm__414) - - # pd_op.conv2d: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32, 384x384x3x3xf32) - conv2d_73 = paddle._C_ops.conv2d( - swish_53, parameter_341, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_341 - - # pd_op.batch_norm_: (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) - ( - batch_norm__420, - batch_norm__421, - batch_norm__422, - batch_norm__423, - batch_norm__424, - batch_norm__425, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_73, - parameter_340, - parameter_339, - parameter_338, - parameter_337, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_337, parameter_338, parameter_339, parameter_340 - - # pd_op.conv2d: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32, 384x384x1x1xf32) - conv2d_74 = paddle._C_ops.conv2d( - swish_53, parameter_336, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_336 - - # pd_op.batch_norm_: (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) - ( - batch_norm__426, - batch_norm__427, - batch_norm__428, - batch_norm__429, - batch_norm__430, - batch_norm__431, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_74, - parameter_335, - parameter_334, - parameter_333, - parameter_332, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_332, parameter_333, parameter_334, parameter_335 - - # pd_op.add: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32, 2x384x-1x-1xf32) - add_37 = paddle._C_ops.add(batch_norm__420, batch_norm__426) - - # pd_op.swish: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32) - swish_54 = paddle._C_ops.swish(add_37) - - # pd_op.add: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32, 2x384x-1x-1xf32) - add_38 = paddle._C_ops.add(add_36, swish_54) - - # builtin.combine: ([2x384x-1x-1xf32, 2x384x-1x-1xf32]) <- (2x384x-1x-1xf32, 2x384x-1x-1xf32) - combine_3 = [swish_47, add_38] - - # pd_op.concat: (2x768x-1x-1xf32) <- ([2x384x-1x-1xf32, 2x384x-1x-1xf32], 1xi32) - concat_3 = paddle._C_ops.concat(combine_3, full_0) - del combine_3 - - # pd_op.mean: (2x768x1x1xf32) <- (2x768x-1x-1xf32, 2xi64) - mean_3 = paddle._C_ops.mean(concat_3, full_int_array_0, True) - - # pd_op.conv2d: (2x768x1x1xf32) <- (2x768x1x1xf32, 768x768x1x1xf32) - conv2d_75 = paddle._C_ops.conv2d( - mean_3, parameter_331, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_331 - - # pd_op.reshape: (1x768x1x1xf32) <- (768xf32, 4xi64) - reshape_3 = paddle._C_ops.reshape(parameter_330, full_int_array_1) - del full_int_array_1, parameter_330 - - # pd_op.add: (2x768x1x1xf32) <- (2x768x1x1xf32, 1x768x1x1xf32) - add_39 = paddle._C_ops.add(conv2d_75, reshape_3) - - # pd_op.hardsigmoid: (2x768x1x1xf32) <- (2x768x1x1xf32) - hardsigmoid_3 = paddle._C_ops.hardsigmoid( - add_39, float("0.166667"), float("0.5") - ) - del add_39 - - # pd_op.multiply: (2x768x-1x-1xf32) <- (2x768x-1x-1xf32, 2x768x1x1xf32) - multiply_3 = paddle._C_ops.multiply(concat_3, hardsigmoid_3) - - # pd_op.conv2d: (2x1024x-1x-1xf32) <- (2x768x-1x-1xf32, 1024x768x1x1xf32) - conv2d_76 = paddle._C_ops.conv2d( - multiply_3, parameter_329, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_329 - - # pd_op.batch_norm_: (2x1024x-1x-1xf32, 1024xf32, 1024xf32, 1024xf32, 1024xf32, -1xui8) <- (2x1024x-1x-1xf32, 1024xf32, 1024xf32, 1024xf32, 1024xf32) - ( - batch_norm__432, - batch_norm__433, - batch_norm__434, - batch_norm__435, - batch_norm__436, - batch_norm__437, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_76, - parameter_328, - parameter_327, - parameter_326, - parameter_325, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_325, parameter_326, parameter_327, parameter_328 - - # pd_op.swish: (2x1024x-1x-1xf32) <- (2x1024x-1x-1xf32) - swish_55 = paddle._C_ops.swish(batch_norm__432) - - # pd_op.conv2d: (2x384x-1x-1xf32) <- (2x1024x-1x-1xf32, 384x1024x1x1xf32) - conv2d_77 = paddle._C_ops.conv2d( - swish_55, parameter_324, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_324 - - # pd_op.batch_norm_: (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) - ( - batch_norm__438, - batch_norm__439, - batch_norm__440, - batch_norm__441, - batch_norm__442, - batch_norm__443, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_77, - parameter_323, - parameter_322, - parameter_321, - parameter_320, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_320, parameter_321, parameter_322, parameter_323 - - # pd_op.swish: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32) - swish_56 = paddle._C_ops.swish(batch_norm__438) - - # pd_op.conv2d: (2x384x-1x-1xf32) <- (2x1024x-1x-1xf32, 384x1024x1x1xf32) - conv2d_78 = paddle._C_ops.conv2d( - swish_55, parameter_319, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_319 - - # pd_op.batch_norm_: (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) - ( - batch_norm__444, - batch_norm__445, - batch_norm__446, - batch_norm__447, - batch_norm__448, - batch_norm__449, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_78, - parameter_318, - parameter_317, - parameter_316, - parameter_315, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_315, parameter_316, parameter_317, parameter_318 - - # pd_op.swish: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32) - swish_57 = paddle._C_ops.swish(batch_norm__444) - - # pd_op.conv2d: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32, 384x384x3x3xf32) - conv2d_79 = paddle._C_ops.conv2d( - swish_57, parameter_314, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_314 - - # pd_op.batch_norm_: (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) - ( - batch_norm__450, - batch_norm__451, - batch_norm__452, - batch_norm__453, - batch_norm__454, - batch_norm__455, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_79, - parameter_313, - parameter_312, - parameter_311, - parameter_310, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_310, parameter_311, parameter_312, parameter_313 - - # pd_op.swish: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32) - swish_58 = paddle._C_ops.swish(batch_norm__450) - - # pd_op.conv2d: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32, 384x384x3x3xf32) - conv2d_80 = paddle._C_ops.conv2d( - swish_58, parameter_309, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_309 - - # pd_op.batch_norm_: (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) - ( - batch_norm__456, - batch_norm__457, - batch_norm__458, - batch_norm__459, - batch_norm__460, - batch_norm__461, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_80, - parameter_308, - parameter_307, - parameter_306, - parameter_305, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_305, parameter_306, parameter_307, parameter_308 - - # pd_op.conv2d: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32, 384x384x1x1xf32) - conv2d_81 = paddle._C_ops.conv2d( - swish_58, parameter_304, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_304 - - # pd_op.batch_norm_: (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) - ( - batch_norm__462, - batch_norm__463, - batch_norm__464, - batch_norm__465, - batch_norm__466, - batch_norm__467, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_81, - parameter_303, - parameter_302, - parameter_301, - parameter_300, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_300, parameter_301, parameter_302, parameter_303 - - # pd_op.add: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32, 2x384x-1x-1xf32) - add_40 = paddle._C_ops.add(batch_norm__456, batch_norm__462) - - # pd_op.swish: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32) - swish_59 = paddle._C_ops.swish(add_40) - - # pd_op.conv2d: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32, 384x384x3x3xf32) - conv2d_82 = paddle._C_ops.conv2d( - swish_59, parameter_299, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_299 - - # pd_op.batch_norm_: (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) - ( - batch_norm__468, - batch_norm__469, - batch_norm__470, - batch_norm__471, - batch_norm__472, - batch_norm__473, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_82, - parameter_298, - parameter_297, - parameter_296, - parameter_295, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_295, parameter_296, parameter_297, parameter_298 - - # pd_op.swish: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32) - swish_60 = paddle._C_ops.swish(batch_norm__468) - - # pd_op.conv2d: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32, 384x384x3x3xf32) - conv2d_83 = paddle._C_ops.conv2d( - swish_60, parameter_294, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_294 - - # pd_op.batch_norm_: (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) - ( - batch_norm__474, - batch_norm__475, - batch_norm__476, - batch_norm__477, - batch_norm__478, - batch_norm__479, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_83, - parameter_293, - parameter_292, - parameter_291, - parameter_290, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_290, parameter_291, parameter_292, parameter_293 - - # pd_op.conv2d: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32, 384x384x1x1xf32) - conv2d_84 = paddle._C_ops.conv2d( - swish_60, parameter_289, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_289 - - # pd_op.batch_norm_: (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) - ( - batch_norm__480, - batch_norm__481, - batch_norm__482, - batch_norm__483, - batch_norm__484, - batch_norm__485, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_84, - parameter_288, - parameter_287, - parameter_286, - parameter_285, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_285, parameter_286, parameter_287, parameter_288 - - # pd_op.add: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32, 2x384x-1x-1xf32) - add_41 = paddle._C_ops.add(batch_norm__474, batch_norm__480) - - # pd_op.swish: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32) - swish_61 = paddle._C_ops.swish(add_41) - - # pd_op.full_int_array: (2xi64) <- () - full_int_array_2 = [5, 5] - - # pd_op.pool2d: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32, 2xi64) - pool2d_0 = paddle._C_ops.pool2d( - swish_61, - full_int_array_2, - [1, 1], - [2, 2], - False, - True, - "NCHW", - "max", - False, - False, - "EXPLICIT", - ) - - # pd_op.full_int_array: (2xi64) <- () - full_int_array_3 = [9, 9] - - # pd_op.pool2d: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32, 2xi64) - pool2d_1 = paddle._C_ops.pool2d( - swish_61, - full_int_array_3, - [1, 1], - [4, 4], - False, - True, - "NCHW", - "max", - False, - False, - "EXPLICIT", - ) - - # pd_op.full_int_array: (2xi64) <- () - full_int_array_4 = [13, 13] - - # pd_op.pool2d: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32, 2xi64) - pool2d_2 = paddle._C_ops.pool2d( - swish_61, - full_int_array_4, - [1, 1], - [6, 6], - False, - True, - "NCHW", - "max", - False, - False, - "EXPLICIT", - ) - - # builtin.combine: ([2x384x-1x-1xf32, 2x384x-1x-1xf32, 2x384x-1x-1xf32, 2x384x-1x-1xf32]) <- (2x384x-1x-1xf32, 2x384x-1x-1xf32, 2x384x-1x-1xf32, 2x384x-1x-1xf32) - combine_4 = [swish_61, pool2d_0, pool2d_1, pool2d_2] - - # pd_op.concat: (2x1536x-1x-1xf32) <- ([2x384x-1x-1xf32, 2x384x-1x-1xf32, 2x384x-1x-1xf32, 2x384x-1x-1xf32], 1xi32) - concat_4 = paddle._C_ops.concat(combine_4, full_0) - del combine_4 - - # pd_op.conv2d: (2x384x-1x-1xf32) <- (2x1536x-1x-1xf32, 384x1536x1x1xf32) - conv2d_85 = paddle._C_ops.conv2d( - concat_4, parameter_284, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_284 - - # pd_op.batch_norm_: (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) - ( - batch_norm__486, - batch_norm__487, - batch_norm__488, - batch_norm__489, - batch_norm__490, - batch_norm__491, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_85, - parameter_283, - parameter_282, - parameter_281, - parameter_280, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_280, parameter_281, parameter_282, parameter_283 - - # pd_op.swish: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32) - swish_62 = paddle._C_ops.swish(batch_norm__486) - - # pd_op.conv2d: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32, 384x384x3x3xf32) - conv2d_86 = paddle._C_ops.conv2d( - swish_62, parameter_279, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_279 - - # pd_op.batch_norm_: (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) - ( - batch_norm__492, - batch_norm__493, - batch_norm__494, - batch_norm__495, - batch_norm__496, - batch_norm__497, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_86, - parameter_278, - parameter_277, - parameter_276, - parameter_275, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_275, parameter_276, parameter_277, parameter_278 - - # pd_op.swish: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32) - swish_63 = paddle._C_ops.swish(batch_norm__492) - - # pd_op.conv2d: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32, 384x384x3x3xf32) - conv2d_87 = paddle._C_ops.conv2d( - swish_63, parameter_274, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_274 - - # pd_op.batch_norm_: (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) - ( - batch_norm__498, - batch_norm__499, - batch_norm__500, - batch_norm__501, - batch_norm__502, - batch_norm__503, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_87, - parameter_273, - parameter_272, - parameter_271, - parameter_270, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_270, parameter_271, parameter_272, parameter_273 - - # pd_op.conv2d: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32, 384x384x1x1xf32) - conv2d_88 = paddle._C_ops.conv2d( - swish_63, parameter_269, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_269 - - # pd_op.batch_norm_: (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) - ( - batch_norm__504, - batch_norm__505, - batch_norm__506, - batch_norm__507, - batch_norm__508, - batch_norm__509, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_88, - parameter_268, - parameter_267, - parameter_266, - parameter_265, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_265, parameter_266, parameter_267, parameter_268 - - # pd_op.add: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32, 2x384x-1x-1xf32) - add_42 = paddle._C_ops.add(batch_norm__498, batch_norm__504) - - # pd_op.swish: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32) - swish_64 = paddle._C_ops.swish(add_42) - - # builtin.combine: ([2x384x-1x-1xf32, 2x384x-1x-1xf32]) <- (2x384x-1x-1xf32, 2x384x-1x-1xf32) - combine_5 = [swish_56, swish_64] - - # pd_op.concat: (2x768x-1x-1xf32) <- ([2x384x-1x-1xf32, 2x384x-1x-1xf32], 1xi32) - concat_5 = paddle._C_ops.concat(combine_5, full_0) - del combine_5 - - # pd_op.conv2d: (2x768x-1x-1xf32) <- (2x768x-1x-1xf32, 768x768x1x1xf32) - conv2d_89 = paddle._C_ops.conv2d( - concat_5, parameter_264, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_264 - - # pd_op.batch_norm_: (2x768x-1x-1xf32, 768xf32, 768xf32, 768xf32, 768xf32, -1xui8) <- (2x768x-1x-1xf32, 768xf32, 768xf32, 768xf32, 768xf32) - ( - batch_norm__510, - batch_norm__511, - batch_norm__512, - batch_norm__513, - batch_norm__514, - batch_norm__515, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_89, - parameter_263, - parameter_262, - parameter_261, - parameter_260, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_260, parameter_261, parameter_262, parameter_263 - - # pd_op.swish: (2x768x-1x-1xf32) <- (2x768x-1x-1xf32) - swish_65 = paddle._C_ops.swish(batch_norm__510) - - # pd_op.conv2d: (2x384x-1x-1xf32) <- (2x768x-1x-1xf32, 384x768x1x1xf32) - conv2d_90 = paddle._C_ops.conv2d( - swish_65, parameter_259, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_259 - - # pd_op.batch_norm_: (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) - ( - batch_norm__516, - batch_norm__517, - batch_norm__518, - batch_norm__519, - batch_norm__520, - batch_norm__521, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_90, - parameter_258, - parameter_257, - parameter_256, - parameter_255, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_255, parameter_256, parameter_257, parameter_258 - - # pd_op.swish: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32) - swish_66 = paddle._C_ops.swish(batch_norm__516) - - # pd_op.nearest_interp: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32, None, None, None) - nearest_interp_0 = paddle._C_ops.nearest_interp( - swish_66, - None, - None, - None, - "NCHW", - -1, - -1, - -1, - [float("2"), float("2")], - "nearest", - False, - 0, - ) - - # builtin.combine: ([2x384x-1x-1xf32, 2x512x-1x-1xf32]) <- (2x384x-1x-1xf32, 2x512x-1x-1xf32) - combine_6 = [nearest_interp_0, swish_45] - - # pd_op.concat: (2x896x-1x-1xf32) <- ([2x384x-1x-1xf32, 2x512x-1x-1xf32], 1xi32) - concat_6 = paddle._C_ops.concat(combine_6, full_0) - del combine_6 - - # pd_op.conv2d: (2x192x-1x-1xf32) <- (2x896x-1x-1xf32, 192x896x1x1xf32) - conv2d_91 = paddle._C_ops.conv2d( - concat_6, parameter_254, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_254 - - # pd_op.batch_norm_: (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) - ( - batch_norm__522, - batch_norm__523, - batch_norm__524, - batch_norm__525, - batch_norm__526, - batch_norm__527, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_91, - parameter_253, - parameter_252, - parameter_251, - parameter_250, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_250, parameter_251, parameter_252, parameter_253 - - # pd_op.swish: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32) - swish_67 = paddle._C_ops.swish(batch_norm__522) - - # pd_op.conv2d: (2x192x-1x-1xf32) <- (2x896x-1x-1xf32, 192x896x1x1xf32) - conv2d_92 = paddle._C_ops.conv2d( - concat_6, parameter_249, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_249 - - # pd_op.batch_norm_: (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) - ( - batch_norm__528, - batch_norm__529, - batch_norm__530, - batch_norm__531, - batch_norm__532, - batch_norm__533, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_92, - parameter_248, - parameter_247, - parameter_246, - parameter_245, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_245, parameter_246, parameter_247, parameter_248 - - # pd_op.swish: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32) - swish_68 = paddle._C_ops.swish(batch_norm__528) - - # pd_op.conv2d: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 192x192x3x3xf32) - conv2d_93 = paddle._C_ops.conv2d( - swish_68, parameter_244, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_244 - - # pd_op.batch_norm_: (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) - ( - batch_norm__534, - batch_norm__535, - batch_norm__536, - batch_norm__537, - batch_norm__538, - batch_norm__539, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_93, - parameter_243, - parameter_242, - parameter_241, - parameter_240, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_240, parameter_241, parameter_242, parameter_243 - - # pd_op.swish: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32) - swish_69 = paddle._C_ops.swish(batch_norm__534) - - # pd_op.conv2d: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 192x192x3x3xf32) - conv2d_94 = paddle._C_ops.conv2d( - swish_69, parameter_239, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_239 - - # pd_op.batch_norm_: (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) - ( - batch_norm__540, - batch_norm__541, - batch_norm__542, - batch_norm__543, - batch_norm__544, - batch_norm__545, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_94, - parameter_238, - parameter_237, - parameter_236, - parameter_235, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_235, parameter_236, parameter_237, parameter_238 - - # pd_op.conv2d: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 192x192x1x1xf32) - conv2d_95 = paddle._C_ops.conv2d( - swish_69, parameter_234, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_234 - - # pd_op.batch_norm_: (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) - ( - batch_norm__546, - batch_norm__547, - batch_norm__548, - batch_norm__549, - batch_norm__550, - batch_norm__551, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_95, - parameter_233, - parameter_232, - parameter_231, - parameter_230, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_230, parameter_231, parameter_232, parameter_233 - - # pd_op.add: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 2x192x-1x-1xf32) - add_43 = paddle._C_ops.add(batch_norm__540, batch_norm__546) - - # pd_op.swish: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32) - swish_70 = paddle._C_ops.swish(add_43) - - # pd_op.conv2d: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 192x192x3x3xf32) - conv2d_96 = paddle._C_ops.conv2d( - swish_70, parameter_229, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_229 - - # pd_op.batch_norm_: (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) - ( - batch_norm__552, - batch_norm__553, - batch_norm__554, - batch_norm__555, - batch_norm__556, - batch_norm__557, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_96, - parameter_228, - parameter_227, - parameter_226, - parameter_225, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_225, parameter_226, parameter_227, parameter_228 - - # pd_op.swish: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32) - swish_71 = paddle._C_ops.swish(batch_norm__552) - - # pd_op.conv2d: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 192x192x3x3xf32) - conv2d_97 = paddle._C_ops.conv2d( - swish_71, parameter_224, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_224 - - # pd_op.batch_norm_: (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) - ( - batch_norm__558, - batch_norm__559, - batch_norm__560, - batch_norm__561, - batch_norm__562, - batch_norm__563, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_97, - parameter_223, - parameter_222, - parameter_221, - parameter_220, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_220, parameter_221, parameter_222, parameter_223 - - # pd_op.conv2d: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 192x192x1x1xf32) - conv2d_98 = paddle._C_ops.conv2d( - swish_71, parameter_219, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_219 - - # pd_op.batch_norm_: (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) - ( - batch_norm__564, - batch_norm__565, - batch_norm__566, - batch_norm__567, - batch_norm__568, - batch_norm__569, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_98, - parameter_218, - parameter_217, - parameter_216, - parameter_215, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_215, parameter_216, parameter_217, parameter_218 - - # pd_op.add: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 2x192x-1x-1xf32) - add_44 = paddle._C_ops.add(batch_norm__558, batch_norm__564) - - # pd_op.swish: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32) - swish_72 = paddle._C_ops.swish(add_44) - - # pd_op.conv2d: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 192x192x3x3xf32) - conv2d_99 = paddle._C_ops.conv2d( - swish_72, parameter_214, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_214 - - # pd_op.batch_norm_: (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) - ( - batch_norm__570, - batch_norm__571, - batch_norm__572, - batch_norm__573, - batch_norm__574, - batch_norm__575, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_99, - parameter_213, - parameter_212, - parameter_211, - parameter_210, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_210, parameter_211, parameter_212, parameter_213 - - # pd_op.swish: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32) - swish_73 = paddle._C_ops.swish(batch_norm__570) - - # pd_op.conv2d: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 192x192x3x3xf32) - conv2d_100 = paddle._C_ops.conv2d( - swish_73, parameter_209, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_209 - - # pd_op.batch_norm_: (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) - ( - batch_norm__576, - batch_norm__577, - batch_norm__578, - batch_norm__579, - batch_norm__580, - batch_norm__581, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_100, - parameter_208, - parameter_207, - parameter_206, - parameter_205, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_205, parameter_206, parameter_207, parameter_208 - - # pd_op.conv2d: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 192x192x1x1xf32) - conv2d_101 = paddle._C_ops.conv2d( - swish_73, parameter_204, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_204 - - # pd_op.batch_norm_: (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) - ( - batch_norm__582, - batch_norm__583, - batch_norm__584, - batch_norm__585, - batch_norm__586, - batch_norm__587, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_101, - parameter_203, - parameter_202, - parameter_201, - parameter_200, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_200, parameter_201, parameter_202, parameter_203 - - # pd_op.add: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 2x192x-1x-1xf32) - add_45 = paddle._C_ops.add(batch_norm__576, batch_norm__582) - - # pd_op.swish: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32) - swish_74 = paddle._C_ops.swish(add_45) - - # builtin.combine: ([2x192x-1x-1xf32, 2x192x-1x-1xf32]) <- (2x192x-1x-1xf32, 2x192x-1x-1xf32) - combine_7 = [swish_67, swish_74] - - # pd_op.concat: (2x384x-1x-1xf32) <- ([2x192x-1x-1xf32, 2x192x-1x-1xf32], 1xi32) - concat_7 = paddle._C_ops.concat(combine_7, full_0) - del combine_7 - - # pd_op.conv2d: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32, 384x384x1x1xf32) - conv2d_102 = paddle._C_ops.conv2d( - concat_7, parameter_199, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_199 - - # pd_op.batch_norm_: (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) - ( - batch_norm__588, - batch_norm__589, - batch_norm__590, - batch_norm__591, - batch_norm__592, - batch_norm__593, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_102, - parameter_198, - parameter_197, - parameter_196, - parameter_195, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_195, parameter_196, parameter_197, parameter_198 - - # pd_op.swish: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32) - swish_75 = paddle._C_ops.swish(batch_norm__588) - - # pd_op.conv2d: (2x192x-1x-1xf32) <- (2x384x-1x-1xf32, 192x384x1x1xf32) - conv2d_103 = paddle._C_ops.conv2d( - swish_75, parameter_194, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_194 - - # pd_op.batch_norm_: (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) - ( - batch_norm__594, - batch_norm__595, - batch_norm__596, - batch_norm__597, - batch_norm__598, - batch_norm__599, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_103, - parameter_193, - parameter_192, - parameter_191, - parameter_190, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_190, parameter_191, parameter_192, parameter_193 - - # pd_op.swish: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32) - swish_76 = paddle._C_ops.swish(batch_norm__594) - - # pd_op.nearest_interp: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, None, None, None) - nearest_interp_1 = paddle._C_ops.nearest_interp( - swish_76, - None, - None, - None, - "NCHW", - -1, - -1, - -1, - [float("2"), float("2")], - "nearest", - False, - 0, - ) - - # builtin.combine: ([2x192x-1x-1xf32, 2x256x-1x-1xf32]) <- (2x192x-1x-1xf32, 2x256x-1x-1xf32) - combine_8 = [nearest_interp_1, swish_29] - - # pd_op.concat: (2x448x-1x-1xf32) <- ([2x192x-1x-1xf32, 2x256x-1x-1xf32], 1xi32) - concat_8 = paddle._C_ops.concat(combine_8, full_0) - del combine_8 - - # pd_op.conv2d: (2x96x-1x-1xf32) <- (2x448x-1x-1xf32, 96x448x1x1xf32) - conv2d_104 = paddle._C_ops.conv2d( - concat_8, parameter_189, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_189 - - # pd_op.batch_norm_: (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) - ( - batch_norm__600, - batch_norm__601, - batch_norm__602, - batch_norm__603, - batch_norm__604, - batch_norm__605, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_104, - parameter_188, - parameter_187, - parameter_186, - parameter_185, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_185, parameter_186, parameter_187, parameter_188 - - # pd_op.swish: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32) - swish_77 = paddle._C_ops.swish(batch_norm__600) - - # pd_op.conv2d: (2x96x-1x-1xf32) <- (2x448x-1x-1xf32, 96x448x1x1xf32) - conv2d_105 = paddle._C_ops.conv2d( - concat_8, parameter_184, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_184 - - # pd_op.batch_norm_: (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) - ( - batch_norm__606, - batch_norm__607, - batch_norm__608, - batch_norm__609, - batch_norm__610, - batch_norm__611, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_105, - parameter_183, - parameter_182, - parameter_181, - parameter_180, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_180, parameter_181, parameter_182, parameter_183 - - # pd_op.swish: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32) - swish_78 = paddle._C_ops.swish(batch_norm__606) - - # pd_op.conv2d: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, 96x96x3x3xf32) - conv2d_106 = paddle._C_ops.conv2d( - swish_78, parameter_179, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_179 - - # pd_op.batch_norm_: (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) - ( - batch_norm__612, - batch_norm__613, - batch_norm__614, - batch_norm__615, - batch_norm__616, - batch_norm__617, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_106, - parameter_178, - parameter_177, - parameter_176, - parameter_175, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_175, parameter_176, parameter_177, parameter_178 - - # pd_op.swish: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32) - swish_79 = paddle._C_ops.swish(batch_norm__612) - - # pd_op.conv2d: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, 96x96x3x3xf32) - conv2d_107 = paddle._C_ops.conv2d( - swish_79, parameter_174, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_174 - - # pd_op.batch_norm_: (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) - ( - batch_norm__618, - batch_norm__619, - batch_norm__620, - batch_norm__621, - batch_norm__622, - batch_norm__623, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_107, - parameter_173, - parameter_172, - parameter_171, - parameter_170, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_170, parameter_171, parameter_172, parameter_173 - - # pd_op.conv2d: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, 96x96x1x1xf32) - conv2d_108 = paddle._C_ops.conv2d( - swish_79, parameter_169, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_169 - - # pd_op.batch_norm_: (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) - ( - batch_norm__624, - batch_norm__625, - batch_norm__626, - batch_norm__627, - batch_norm__628, - batch_norm__629, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_108, - parameter_168, - parameter_167, - parameter_166, - parameter_165, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_165, parameter_166, parameter_167, parameter_168 - - # pd_op.add: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, 2x96x-1x-1xf32) - add_46 = paddle._C_ops.add(batch_norm__618, batch_norm__624) - - # pd_op.swish: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32) - swish_80 = paddle._C_ops.swish(add_46) - - # pd_op.conv2d: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, 96x96x3x3xf32) - conv2d_109 = paddle._C_ops.conv2d( - swish_80, parameter_164, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_164 - - # pd_op.batch_norm_: (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) - ( - batch_norm__630, - batch_norm__631, - batch_norm__632, - batch_norm__633, - batch_norm__634, - batch_norm__635, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_109, - parameter_163, - parameter_162, - parameter_161, - parameter_160, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_160, parameter_161, parameter_162, parameter_163 - - # pd_op.swish: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32) - swish_81 = paddle._C_ops.swish(batch_norm__630) - - # pd_op.conv2d: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, 96x96x3x3xf32) - conv2d_110 = paddle._C_ops.conv2d( - swish_81, parameter_159, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_159 - - # pd_op.batch_norm_: (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) - ( - batch_norm__636, - batch_norm__637, - batch_norm__638, - batch_norm__639, - batch_norm__640, - batch_norm__641, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_110, - parameter_158, - parameter_157, - parameter_156, - parameter_155, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_155, parameter_156, parameter_157, parameter_158 - - # pd_op.conv2d: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, 96x96x1x1xf32) - conv2d_111 = paddle._C_ops.conv2d( - swish_81, parameter_154, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_154 - - # pd_op.batch_norm_: (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) - ( - batch_norm__642, - batch_norm__643, - batch_norm__644, - batch_norm__645, - batch_norm__646, - batch_norm__647, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_111, - parameter_153, - parameter_152, - parameter_151, - parameter_150, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_150, parameter_151, parameter_152, parameter_153 - - # pd_op.add: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, 2x96x-1x-1xf32) - add_47 = paddle._C_ops.add(batch_norm__636, batch_norm__642) - - # pd_op.swish: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32) - swish_82 = paddle._C_ops.swish(add_47) - - # pd_op.conv2d: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, 96x96x3x3xf32) - conv2d_112 = paddle._C_ops.conv2d( - swish_82, parameter_149, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_149 - - # pd_op.batch_norm_: (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) - ( - batch_norm__648, - batch_norm__649, - batch_norm__650, - batch_norm__651, - batch_norm__652, - batch_norm__653, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_112, - parameter_148, - parameter_147, - parameter_146, - parameter_145, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_145, parameter_146, parameter_147, parameter_148 - - # pd_op.swish: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32) - swish_83 = paddle._C_ops.swish(batch_norm__648) - - # pd_op.conv2d: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, 96x96x3x3xf32) - conv2d_113 = paddle._C_ops.conv2d( - swish_83, parameter_144, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_144 - - # pd_op.batch_norm_: (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) - ( - batch_norm__654, - batch_norm__655, - batch_norm__656, - batch_norm__657, - batch_norm__658, - batch_norm__659, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_113, - parameter_143, - parameter_142, - parameter_141, - parameter_140, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_140, parameter_141, parameter_142, parameter_143 - - # pd_op.conv2d: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, 96x96x1x1xf32) - conv2d_114 = paddle._C_ops.conv2d( - swish_83, parameter_139, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_139 - - # pd_op.batch_norm_: (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) - ( - batch_norm__660, - batch_norm__661, - batch_norm__662, - batch_norm__663, - batch_norm__664, - batch_norm__665, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_114, - parameter_138, - parameter_137, - parameter_136, - parameter_135, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_135, parameter_136, parameter_137, parameter_138 - - # pd_op.add: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, 2x96x-1x-1xf32) - add_48 = paddle._C_ops.add(batch_norm__654, batch_norm__660) - - # pd_op.swish: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32) - swish_84 = paddle._C_ops.swish(add_48) - - # builtin.combine: ([2x96x-1x-1xf32, 2x96x-1x-1xf32]) <- (2x96x-1x-1xf32, 2x96x-1x-1xf32) - combine_9 = [swish_77, swish_84] - - # pd_op.concat: (2x192x-1x-1xf32) <- ([2x96x-1x-1xf32, 2x96x-1x-1xf32], 1xi32) - concat_9 = paddle._C_ops.concat(combine_9, full_0) - del combine_9 - - # pd_op.conv2d: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 192x192x1x1xf32) - conv2d_115 = paddle._C_ops.conv2d( - concat_9, parameter_134, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_134 - - # pd_op.batch_norm_: (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) - ( - batch_norm__666, - batch_norm__667, - batch_norm__668, - batch_norm__669, - batch_norm__670, - batch_norm__671, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_115, - parameter_133, - parameter_132, - parameter_131, - parameter_130, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_130, parameter_131, parameter_132, parameter_133 - - # pd_op.swish: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32) - swish_85 = paddle._C_ops.swish(batch_norm__666) - - # pd_op.conv2d: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 192x192x3x3xf32) - conv2d_116 = paddle._C_ops.conv2d( - swish_85, parameter_129, [2, 2], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_129 - - # pd_op.batch_norm_: (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) - ( - batch_norm__672, - batch_norm__673, - batch_norm__674, - batch_norm__675, - batch_norm__676, - batch_norm__677, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_116, - parameter_128, - parameter_127, - parameter_126, - parameter_125, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_125, parameter_126, parameter_127, parameter_128 - - # pd_op.swish: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32) - swish_86 = paddle._C_ops.swish(batch_norm__672) - - # builtin.combine: ([2x192x-1x-1xf32, 2x384x-1x-1xf32]) <- (2x192x-1x-1xf32, 2x384x-1x-1xf32) - combine_10 = [swish_86, swish_75] - - # pd_op.concat: (2x576x-1x-1xf32) <- ([2x192x-1x-1xf32, 2x384x-1x-1xf32], 1xi32) - concat_10 = paddle._C_ops.concat(combine_10, full_0) - del combine_10 - - # pd_op.conv2d: (2x192x-1x-1xf32) <- (2x576x-1x-1xf32, 192x576x1x1xf32) - conv2d_117 = paddle._C_ops.conv2d( - concat_10, parameter_124, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_124 - - # pd_op.batch_norm_: (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) - ( - batch_norm__678, - batch_norm__679, - batch_norm__680, - batch_norm__681, - batch_norm__682, - batch_norm__683, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_117, - parameter_123, - parameter_122, - parameter_121, - parameter_120, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_120, parameter_121, parameter_122, parameter_123 - - # pd_op.swish: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32) - swish_87 = paddle._C_ops.swish(batch_norm__678) - - # pd_op.conv2d: (2x192x-1x-1xf32) <- (2x576x-1x-1xf32, 192x576x1x1xf32) - conv2d_118 = paddle._C_ops.conv2d( - concat_10, parameter_119, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_119 - - # pd_op.batch_norm_: (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) - ( - batch_norm__684, - batch_norm__685, - batch_norm__686, - batch_norm__687, - batch_norm__688, - batch_norm__689, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_118, - parameter_118, - parameter_117, - parameter_116, - parameter_115, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_115, parameter_116, parameter_117, parameter_118 - - # pd_op.swish: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32) - swish_88 = paddle._C_ops.swish(batch_norm__684) - - # pd_op.conv2d: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 192x192x3x3xf32) - conv2d_119 = paddle._C_ops.conv2d( - swish_88, parameter_114, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_114 - - # pd_op.batch_norm_: (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) - ( - batch_norm__690, - batch_norm__691, - batch_norm__692, - batch_norm__693, - batch_norm__694, - batch_norm__695, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_119, - parameter_113, - parameter_112, - parameter_111, - parameter_110, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_110, parameter_111, parameter_112, parameter_113 - - # pd_op.swish: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32) - swish_89 = paddle._C_ops.swish(batch_norm__690) - - # pd_op.conv2d: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 192x192x3x3xf32) - conv2d_120 = paddle._C_ops.conv2d( - swish_89, parameter_109, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_109 - - # pd_op.batch_norm_: (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) - ( - batch_norm__696, - batch_norm__697, - batch_norm__698, - batch_norm__699, - batch_norm__700, - batch_norm__701, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_120, - parameter_108, - parameter_107, - parameter_106, - parameter_105, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_105, parameter_106, parameter_107, parameter_108 - - # pd_op.conv2d: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 192x192x1x1xf32) - conv2d_121 = paddle._C_ops.conv2d( - swish_89, parameter_104, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_104 - - # pd_op.batch_norm_: (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) - ( - batch_norm__702, - batch_norm__703, - batch_norm__704, - batch_norm__705, - batch_norm__706, - batch_norm__707, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_121, - parameter_103, - parameter_102, - parameter_101, - parameter_100, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_100, parameter_101, parameter_102, parameter_103 - - # pd_op.add: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 2x192x-1x-1xf32) - add_49 = paddle._C_ops.add(batch_norm__696, batch_norm__702) - - # pd_op.swish: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32) - swish_90 = paddle._C_ops.swish(add_49) - - # pd_op.conv2d: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 192x192x3x3xf32) - conv2d_122 = paddle._C_ops.conv2d( - swish_90, parameter_99, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_99 - - # pd_op.batch_norm_: (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) - ( - batch_norm__708, - batch_norm__709, - batch_norm__710, - batch_norm__711, - batch_norm__712, - batch_norm__713, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_122, - parameter_98, - parameter_97, - parameter_96, - parameter_95, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_95, parameter_96, parameter_97, parameter_98 - - # pd_op.swish: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32) - swish_91 = paddle._C_ops.swish(batch_norm__708) - - # pd_op.conv2d: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 192x192x3x3xf32) - conv2d_123 = paddle._C_ops.conv2d( - swish_91, parameter_94, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_94 - - # pd_op.batch_norm_: (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) - ( - batch_norm__714, - batch_norm__715, - batch_norm__716, - batch_norm__717, - batch_norm__718, - batch_norm__719, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_123, - parameter_93, - parameter_92, - parameter_91, - parameter_90, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_90, parameter_91, parameter_92, parameter_93 - - # pd_op.conv2d: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 192x192x1x1xf32) - conv2d_124 = paddle._C_ops.conv2d( - swish_91, parameter_89, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_89 - - # pd_op.batch_norm_: (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) - ( - batch_norm__720, - batch_norm__721, - batch_norm__722, - batch_norm__723, - batch_norm__724, - batch_norm__725, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_124, - parameter_88, - parameter_87, - parameter_86, - parameter_85, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_85, parameter_86, parameter_87, parameter_88 - - # pd_op.add: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 2x192x-1x-1xf32) - add_50 = paddle._C_ops.add(batch_norm__714, batch_norm__720) - - # pd_op.swish: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32) - swish_92 = paddle._C_ops.swish(add_50) - - # pd_op.conv2d: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 192x192x3x3xf32) - conv2d_125 = paddle._C_ops.conv2d( - swish_92, parameter_84, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_84 - - # pd_op.batch_norm_: (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) - ( - batch_norm__726, - batch_norm__727, - batch_norm__728, - batch_norm__729, - batch_norm__730, - batch_norm__731, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_125, - parameter_83, - parameter_82, - parameter_81, - parameter_80, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_80, parameter_81, parameter_82, parameter_83 - - # pd_op.swish: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32) - swish_93 = paddle._C_ops.swish(batch_norm__726) - - # pd_op.conv2d: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 192x192x3x3xf32) - conv2d_126 = paddle._C_ops.conv2d( - swish_93, parameter_79, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_79 - - # pd_op.batch_norm_: (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) - ( - batch_norm__732, - batch_norm__733, - batch_norm__734, - batch_norm__735, - batch_norm__736, - batch_norm__737, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_126, - parameter_78, - parameter_77, - parameter_76, - parameter_75, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_75, parameter_76, parameter_77, parameter_78 - - # pd_op.conv2d: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 192x192x1x1xf32) - conv2d_127 = paddle._C_ops.conv2d( - swish_93, parameter_74, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_74 - - # pd_op.batch_norm_: (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) - ( - batch_norm__738, - batch_norm__739, - batch_norm__740, - batch_norm__741, - batch_norm__742, - batch_norm__743, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_127, - parameter_73, - parameter_72, - parameter_71, - parameter_70, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_70, parameter_71, parameter_72, parameter_73 - - # pd_op.add: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 2x192x-1x-1xf32) - add_51 = paddle._C_ops.add(batch_norm__732, batch_norm__738) - - # pd_op.swish: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32) - swish_94 = paddle._C_ops.swish(add_51) - - # builtin.combine: ([2x192x-1x-1xf32, 2x192x-1x-1xf32]) <- (2x192x-1x-1xf32, 2x192x-1x-1xf32) - combine_11 = [swish_87, swish_94] - - # pd_op.concat: (2x384x-1x-1xf32) <- ([2x192x-1x-1xf32, 2x192x-1x-1xf32], 1xi32) - concat_11 = paddle._C_ops.concat(combine_11, full_0) - del combine_11 - - # pd_op.conv2d: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32, 384x384x1x1xf32) - conv2d_128 = paddle._C_ops.conv2d( - concat_11, parameter_69, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_69 - - # pd_op.batch_norm_: (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) - ( - batch_norm__744, - batch_norm__745, - batch_norm__746, - batch_norm__747, - batch_norm__748, - batch_norm__749, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_128, - parameter_68, - parameter_67, - parameter_66, - parameter_65, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_65, parameter_66, parameter_67, parameter_68 - - # pd_op.swish: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32) - swish_95 = paddle._C_ops.swish(batch_norm__744) - - # pd_op.conv2d: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32, 384x384x3x3xf32) - conv2d_129 = paddle._C_ops.conv2d( - swish_95, parameter_64, [2, 2], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_64 - - # pd_op.batch_norm_: (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) - ( - batch_norm__750, - batch_norm__751, - batch_norm__752, - batch_norm__753, - batch_norm__754, - batch_norm__755, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_129, - parameter_63, - parameter_62, - parameter_61, - parameter_60, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_60, parameter_61, parameter_62, parameter_63 - - # pd_op.swish: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32) - swish_96 = paddle._C_ops.swish(batch_norm__750) - - # builtin.combine: ([2x384x-1x-1xf32, 2x768x-1x-1xf32]) <- (2x384x-1x-1xf32, 2x768x-1x-1xf32) - combine_12 = [swish_96, swish_65] - - # pd_op.concat: (2x1152x-1x-1xf32) <- ([2x384x-1x-1xf32, 2x768x-1x-1xf32], 1xi32) - concat_12 = paddle._C_ops.concat(combine_12, full_0) - del combine_12 - - # pd_op.conv2d: (2x384x-1x-1xf32) <- (2x1152x-1x-1xf32, 384x1152x1x1xf32) - conv2d_130 = paddle._C_ops.conv2d( - concat_12, parameter_59, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_59 - - # pd_op.batch_norm_: (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) - ( - batch_norm__756, - batch_norm__757, - batch_norm__758, - batch_norm__759, - batch_norm__760, - batch_norm__761, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_130, - parameter_58, - parameter_57, - parameter_56, - parameter_55, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_55, parameter_56, parameter_57, parameter_58 - - # pd_op.swish: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32) - swish_97 = paddle._C_ops.swish(batch_norm__756) - - # pd_op.conv2d: (2x384x-1x-1xf32) <- (2x1152x-1x-1xf32, 384x1152x1x1xf32) - conv2d_131 = paddle._C_ops.conv2d( - concat_12, parameter_54, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_54 - - # pd_op.batch_norm_: (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) - ( - batch_norm__762, - batch_norm__763, - batch_norm__764, - batch_norm__765, - batch_norm__766, - batch_norm__767, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_131, - parameter_53, - parameter_52, - parameter_51, - parameter_50, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_50, parameter_51, parameter_52, parameter_53 - - # pd_op.swish: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32) - swish_98 = paddle._C_ops.swish(batch_norm__762) - - # pd_op.conv2d: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32, 384x384x3x3xf32) - conv2d_132 = paddle._C_ops.conv2d( - swish_98, parameter_49, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_49 - - # pd_op.batch_norm_: (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) - ( - batch_norm__768, - batch_norm__769, - batch_norm__770, - batch_norm__771, - batch_norm__772, - batch_norm__773, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_132, - parameter_48, - parameter_47, - parameter_46, - parameter_45, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_45, parameter_46, parameter_47, parameter_48 - - # pd_op.swish: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32) - swish_99 = paddle._C_ops.swish(batch_norm__768) - - # pd_op.conv2d: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32, 384x384x3x3xf32) - conv2d_133 = paddle._C_ops.conv2d( - swish_99, parameter_44, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_44 - - # pd_op.batch_norm_: (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) - ( - batch_norm__774, - batch_norm__775, - batch_norm__776, - batch_norm__777, - batch_norm__778, - batch_norm__779, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_133, - parameter_43, - parameter_42, - parameter_41, - parameter_40, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_40, parameter_41, parameter_42, parameter_43 - - # pd_op.conv2d: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32, 384x384x1x1xf32) - conv2d_134 = paddle._C_ops.conv2d( - swish_99, parameter_39, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_39 - - # pd_op.batch_norm_: (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) - ( - batch_norm__780, - batch_norm__781, - batch_norm__782, - batch_norm__783, - batch_norm__784, - batch_norm__785, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_134, - parameter_38, - parameter_37, - parameter_36, - parameter_35, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_35, parameter_36, parameter_37, parameter_38 - - # pd_op.add: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32, 2x384x-1x-1xf32) - add_52 = paddle._C_ops.add(batch_norm__774, batch_norm__780) - - # pd_op.swish: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32) - swish_100 = paddle._C_ops.swish(add_52) - - # pd_op.conv2d: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32, 384x384x3x3xf32) - conv2d_135 = paddle._C_ops.conv2d( - swish_100, parameter_34, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_34 - - # pd_op.batch_norm_: (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) - ( - batch_norm__786, - batch_norm__787, - batch_norm__788, - batch_norm__789, - batch_norm__790, - batch_norm__791, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_135, - parameter_33, - parameter_32, - parameter_31, - parameter_30, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_30, parameter_31, parameter_32, parameter_33 - - # pd_op.swish: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32) - swish_101 = paddle._C_ops.swish(batch_norm__786) - - # pd_op.conv2d: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32, 384x384x3x3xf32) - conv2d_136 = paddle._C_ops.conv2d( - swish_101, parameter_29, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_29 - - # pd_op.batch_norm_: (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) - ( - batch_norm__792, - batch_norm__793, - batch_norm__794, - batch_norm__795, - batch_norm__796, - batch_norm__797, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_136, - parameter_28, - parameter_27, - parameter_26, - parameter_25, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_25, parameter_26, parameter_27, parameter_28 - - # pd_op.conv2d: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32, 384x384x1x1xf32) - conv2d_137 = paddle._C_ops.conv2d( - swish_101, parameter_24, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_24 - - # pd_op.batch_norm_: (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) - ( - batch_norm__798, - batch_norm__799, - batch_norm__800, - batch_norm__801, - batch_norm__802, - batch_norm__803, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_137, - parameter_23, - parameter_22, - parameter_21, - parameter_20, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_20, parameter_21, parameter_22, parameter_23 - - # pd_op.add: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32, 2x384x-1x-1xf32) - add_53 = paddle._C_ops.add(batch_norm__792, batch_norm__798) - - # pd_op.swish: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32) - swish_102 = paddle._C_ops.swish(add_53) - - # pd_op.conv2d: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32, 384x384x3x3xf32) - conv2d_138 = paddle._C_ops.conv2d( - swish_102, parameter_19, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_19 - - # pd_op.batch_norm_: (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) - ( - batch_norm__804, - batch_norm__805, - batch_norm__806, - batch_norm__807, - batch_norm__808, - batch_norm__809, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_138, - parameter_18, - parameter_17, - parameter_16, - parameter_15, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_15, parameter_16, parameter_17, parameter_18 - - # pd_op.swish: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32) - swish_103 = paddle._C_ops.swish(batch_norm__804) - - # pd_op.conv2d: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32, 384x384x3x3xf32) - conv2d_139 = paddle._C_ops.conv2d( - swish_103, parameter_14, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_14 - - # pd_op.batch_norm_: (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) - ( - batch_norm__810, - batch_norm__811, - batch_norm__812, - batch_norm__813, - batch_norm__814, - batch_norm__815, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_139, - parameter_13, - parameter_12, - parameter_11, - parameter_10, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_10, parameter_11, parameter_12, parameter_13 - - # pd_op.conv2d: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32, 384x384x1x1xf32) - conv2d_140 = paddle._C_ops.conv2d( - swish_103, parameter_9, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_9 - - # pd_op.batch_norm_: (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) - ( - batch_norm__816, - batch_norm__817, - batch_norm__818, - batch_norm__819, - batch_norm__820, - batch_norm__821, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_140, - parameter_8, - parameter_7, - parameter_6, - parameter_5, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_5, parameter_6, parameter_7, parameter_8 - - # pd_op.add: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32, 2x384x-1x-1xf32) - add_54 = paddle._C_ops.add(batch_norm__810, batch_norm__816) - - # pd_op.swish: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32) - swish_104 = paddle._C_ops.swish(add_54) - - # builtin.combine: ([2x384x-1x-1xf32, 2x384x-1x-1xf32]) <- (2x384x-1x-1xf32, 2x384x-1x-1xf32) - combine_13 = [swish_97, swish_104] - - # pd_op.concat: (2x768x-1x-1xf32) <- ([2x384x-1x-1xf32, 2x384x-1x-1xf32], 1xi32) - concat_13 = paddle._C_ops.concat(combine_13, full_0) - del combine_13 - - # pd_op.conv2d: (2x768x-1x-1xf32) <- (2x768x-1x-1xf32, 768x768x1x1xf32) - conv2d_141 = paddle._C_ops.conv2d( - concat_13, parameter_4, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_4 - - # pd_op.batch_norm_: (2x768x-1x-1xf32, 768xf32, 768xf32, 768xf32, 768xf32, -1xui8) <- (2x768x-1x-1xf32, 768xf32, 768xf32, 768xf32, 768xf32) - ( - batch_norm__822, - batch_norm__823, - batch_norm__824, - batch_norm__825, - batch_norm__826, - batch_norm__827, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_141, - parameter_3, - parameter_2, - parameter_1, - parameter_0, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_0, parameter_1, parameter_2, parameter_3 - - # pd_op.swish: (2x768x-1x-1xf32) <- (2x768x-1x-1xf32) - swish_0 = paddle._C_ops.swish(batch_norm__822) - del ( - add_0, - add_1, - add_10, - add_11, - add_12, - add_13, - add_14, - add_15, - add_16, - add_17, - add_18, - add_2, - add_20, - add_21, - add_22, - add_23, - add_24, - add_25, - add_26, - add_27, - add_28, - add_29, - add_3, - add_30, - add_31, - add_33, - add_34, - add_35, - add_36, - add_37, - add_38, - add_4, - add_40, - add_41, - add_42, - add_43, - add_44, - add_45, - add_46, - add_47, - add_48, - add_49, - add_5, - add_50, - add_51, - add_52, - add_53, - add_54, - add_7, - add_8, - add_9, - assign_0, - assign_1, - assign_10, - assign_11, - assign_12, - assign_13, - assign_14, - assign_15, - assign_2, - assign_3, - assign_4, - assign_5, - assign_6, - assign_7, - assign_8, - assign_9, - batch_norm__0, - batch_norm__1, - batch_norm__10, - batch_norm__100, - batch_norm__101, - batch_norm__102, - batch_norm__103, - batch_norm__104, - batch_norm__105, - batch_norm__106, - batch_norm__107, - batch_norm__108, - batch_norm__109, - batch_norm__11, - batch_norm__110, - batch_norm__111, - batch_norm__112, - batch_norm__113, - batch_norm__114, - batch_norm__115, - batch_norm__116, - batch_norm__117, - batch_norm__118, - batch_norm__119, - batch_norm__12, - batch_norm__120, - batch_norm__121, - batch_norm__122, - batch_norm__123, - batch_norm__124, - batch_norm__125, - batch_norm__126, - batch_norm__127, - batch_norm__128, - batch_norm__129, - batch_norm__13, - batch_norm__130, - batch_norm__131, - batch_norm__132, - batch_norm__133, - batch_norm__134, - batch_norm__135, - batch_norm__136, - batch_norm__137, - batch_norm__138, - batch_norm__139, - batch_norm__14, - batch_norm__140, - batch_norm__141, - batch_norm__142, - batch_norm__143, - batch_norm__144, - batch_norm__145, - batch_norm__146, - batch_norm__147, - batch_norm__148, - batch_norm__149, - batch_norm__15, - batch_norm__150, - batch_norm__151, - batch_norm__152, - batch_norm__153, - batch_norm__154, - batch_norm__155, - batch_norm__156, - batch_norm__157, - batch_norm__158, - batch_norm__159, - batch_norm__16, - batch_norm__160, - batch_norm__161, - batch_norm__162, - batch_norm__163, - batch_norm__164, - batch_norm__165, - batch_norm__166, - batch_norm__167, - batch_norm__168, - batch_norm__169, - batch_norm__17, - batch_norm__170, - batch_norm__171, - batch_norm__172, - batch_norm__173, - batch_norm__174, - batch_norm__175, - batch_norm__176, - batch_norm__177, - batch_norm__178, - batch_norm__179, - batch_norm__18, - batch_norm__180, - batch_norm__181, - batch_norm__182, - batch_norm__183, - batch_norm__184, - batch_norm__185, - batch_norm__186, - batch_norm__187, - batch_norm__188, - batch_norm__189, - batch_norm__19, - batch_norm__190, - batch_norm__191, - batch_norm__192, - batch_norm__193, - batch_norm__194, - batch_norm__195, - batch_norm__196, - batch_norm__197, - batch_norm__198, - batch_norm__199, - batch_norm__2, - batch_norm__20, - batch_norm__200, - batch_norm__201, - batch_norm__202, - batch_norm__203, - batch_norm__204, - batch_norm__205, - batch_norm__206, - batch_norm__207, - batch_norm__208, - batch_norm__209, - batch_norm__21, - batch_norm__210, - batch_norm__211, - batch_norm__212, - batch_norm__213, - batch_norm__214, - batch_norm__215, - batch_norm__216, - batch_norm__217, - batch_norm__218, - batch_norm__219, - batch_norm__22, - batch_norm__220, - batch_norm__221, - batch_norm__222, - batch_norm__223, - batch_norm__224, - batch_norm__225, - batch_norm__226, - batch_norm__227, - batch_norm__228, - batch_norm__229, - batch_norm__23, - batch_norm__230, - batch_norm__231, - batch_norm__232, - batch_norm__233, - batch_norm__234, - batch_norm__235, - batch_norm__236, - batch_norm__237, - batch_norm__238, - batch_norm__239, - batch_norm__24, - batch_norm__240, - batch_norm__241, - batch_norm__242, - batch_norm__243, - batch_norm__244, - batch_norm__245, - batch_norm__246, - batch_norm__247, - batch_norm__248, - batch_norm__249, - batch_norm__25, - batch_norm__250, - batch_norm__251, - batch_norm__252, - batch_norm__253, - batch_norm__254, - batch_norm__255, - batch_norm__256, - batch_norm__257, - batch_norm__258, - batch_norm__259, - batch_norm__26, - batch_norm__260, - batch_norm__261, - batch_norm__262, - batch_norm__263, - batch_norm__264, - batch_norm__265, - batch_norm__266, - batch_norm__267, - batch_norm__268, - batch_norm__269, - batch_norm__27, - batch_norm__270, - batch_norm__271, - batch_norm__272, - batch_norm__273, - batch_norm__274, - batch_norm__275, - batch_norm__276, - batch_norm__277, - batch_norm__278, - batch_norm__279, - batch_norm__28, - batch_norm__280, - batch_norm__281, - batch_norm__282, - batch_norm__283, - batch_norm__284, - batch_norm__285, - batch_norm__286, - batch_norm__287, - batch_norm__288, - batch_norm__289, - batch_norm__29, - batch_norm__290, - batch_norm__291, - batch_norm__292, - batch_norm__293, - batch_norm__294, - batch_norm__295, - batch_norm__296, - batch_norm__297, - batch_norm__298, - batch_norm__299, - batch_norm__3, - batch_norm__30, - batch_norm__300, - batch_norm__301, - batch_norm__302, - batch_norm__303, - batch_norm__304, - batch_norm__305, - batch_norm__306, - batch_norm__307, - batch_norm__308, - batch_norm__309, - batch_norm__31, - batch_norm__310, - batch_norm__311, - batch_norm__312, - batch_norm__313, - batch_norm__314, - batch_norm__315, - batch_norm__316, - batch_norm__317, - batch_norm__318, - batch_norm__319, - batch_norm__32, - batch_norm__320, - batch_norm__321, - batch_norm__322, - batch_norm__323, - batch_norm__324, - batch_norm__325, - batch_norm__326, - batch_norm__327, - batch_norm__328, - batch_norm__329, - batch_norm__33, - batch_norm__330, - batch_norm__331, - batch_norm__332, - batch_norm__333, - batch_norm__334, - batch_norm__335, - batch_norm__336, - batch_norm__337, - batch_norm__338, - batch_norm__339, - batch_norm__34, - batch_norm__340, - batch_norm__341, - batch_norm__342, - batch_norm__343, - batch_norm__344, - batch_norm__345, - batch_norm__346, - batch_norm__347, - batch_norm__348, - batch_norm__349, - batch_norm__35, - batch_norm__350, - batch_norm__351, - batch_norm__352, - batch_norm__353, - batch_norm__354, - batch_norm__355, - batch_norm__356, - batch_norm__357, - batch_norm__358, - batch_norm__359, - batch_norm__36, - batch_norm__360, - batch_norm__361, - batch_norm__362, - batch_norm__363, - batch_norm__364, - batch_norm__365, - batch_norm__366, - batch_norm__367, - batch_norm__368, - batch_norm__369, - batch_norm__37, - batch_norm__370, - batch_norm__371, - batch_norm__372, - batch_norm__373, - batch_norm__374, - batch_norm__375, - batch_norm__376, - batch_norm__377, - batch_norm__378, - batch_norm__379, - batch_norm__38, - batch_norm__380, - batch_norm__381, - batch_norm__382, - batch_norm__383, - batch_norm__384, - batch_norm__385, - batch_norm__386, - batch_norm__387, - batch_norm__388, - batch_norm__389, - batch_norm__39, - batch_norm__390, - batch_norm__391, - batch_norm__392, - batch_norm__393, - batch_norm__394, - batch_norm__395, - batch_norm__396, - batch_norm__397, - batch_norm__398, - batch_norm__399, - batch_norm__4, - batch_norm__40, - batch_norm__400, - batch_norm__401, - batch_norm__402, - batch_norm__403, - batch_norm__404, - batch_norm__405, - batch_norm__406, - batch_norm__407, - batch_norm__408, - batch_norm__409, - batch_norm__41, - batch_norm__410, - batch_norm__411, - batch_norm__412, - batch_norm__413, - batch_norm__414, - batch_norm__415, - batch_norm__416, - batch_norm__417, - batch_norm__418, - batch_norm__419, - batch_norm__42, - batch_norm__420, - batch_norm__421, - batch_norm__422, - batch_norm__423, - batch_norm__424, - batch_norm__425, - batch_norm__426, - batch_norm__427, - batch_norm__428, - batch_norm__429, - batch_norm__43, - batch_norm__430, - batch_norm__431, - batch_norm__432, - batch_norm__433, - batch_norm__434, - batch_norm__435, - batch_norm__436, - batch_norm__437, - batch_norm__438, - batch_norm__439, - batch_norm__44, - batch_norm__440, - batch_norm__441, - batch_norm__442, - batch_norm__443, - batch_norm__444, - batch_norm__445, - batch_norm__446, - batch_norm__447, - batch_norm__448, - batch_norm__449, - batch_norm__45, - batch_norm__450, - batch_norm__451, - batch_norm__452, - batch_norm__453, - batch_norm__454, - batch_norm__455, - batch_norm__456, - batch_norm__457, - batch_norm__458, - batch_norm__459, - batch_norm__46, - batch_norm__460, - batch_norm__461, - batch_norm__462, - batch_norm__463, - batch_norm__464, - batch_norm__465, - batch_norm__466, - batch_norm__467, - batch_norm__468, - batch_norm__469, - batch_norm__47, - batch_norm__470, - batch_norm__471, - batch_norm__472, - batch_norm__473, - batch_norm__474, - batch_norm__475, - batch_norm__476, - batch_norm__477, - batch_norm__478, - batch_norm__479, - batch_norm__48, - batch_norm__480, - batch_norm__481, - batch_norm__482, - batch_norm__483, - batch_norm__484, - batch_norm__485, - batch_norm__486, - batch_norm__487, - batch_norm__488, - batch_norm__489, - batch_norm__49, - batch_norm__490, - batch_norm__491, - batch_norm__492, - batch_norm__493, - batch_norm__494, - batch_norm__495, - batch_norm__496, - batch_norm__497, - batch_norm__498, - batch_norm__499, - batch_norm__5, - batch_norm__50, - batch_norm__500, - batch_norm__501, - batch_norm__502, - batch_norm__503, - batch_norm__504, - batch_norm__505, - batch_norm__506, - batch_norm__507, - batch_norm__508, - batch_norm__509, - batch_norm__51, - batch_norm__510, - batch_norm__511, - batch_norm__512, - batch_norm__513, - batch_norm__514, - batch_norm__515, - batch_norm__516, - batch_norm__517, - batch_norm__518, - batch_norm__519, - batch_norm__52, - batch_norm__520, - batch_norm__521, - batch_norm__522, - batch_norm__523, - batch_norm__524, - batch_norm__525, - batch_norm__526, - batch_norm__527, - batch_norm__528, - batch_norm__529, - batch_norm__53, - batch_norm__530, - batch_norm__531, - batch_norm__532, - batch_norm__533, - batch_norm__534, - batch_norm__535, - batch_norm__536, - batch_norm__537, - batch_norm__538, - batch_norm__539, - batch_norm__54, - batch_norm__540, - batch_norm__541, - batch_norm__542, - batch_norm__543, - batch_norm__544, - batch_norm__545, - batch_norm__546, - batch_norm__547, - batch_norm__548, - batch_norm__549, - batch_norm__55, - batch_norm__550, - batch_norm__551, - batch_norm__552, - batch_norm__553, - batch_norm__554, - batch_norm__555, - batch_norm__556, - batch_norm__557, - batch_norm__558, - batch_norm__559, - batch_norm__56, - batch_norm__560, - batch_norm__561, - batch_norm__562, - batch_norm__563, - batch_norm__564, - batch_norm__565, - batch_norm__566, - batch_norm__567, - batch_norm__568, - batch_norm__569, - batch_norm__57, - batch_norm__570, - batch_norm__571, - batch_norm__572, - batch_norm__573, - batch_norm__574, - batch_norm__575, - batch_norm__576, - batch_norm__577, - batch_norm__578, - batch_norm__579, - batch_norm__58, - batch_norm__580, - batch_norm__581, - batch_norm__582, - batch_norm__583, - batch_norm__584, - batch_norm__585, - batch_norm__586, - batch_norm__587, - batch_norm__588, - batch_norm__589, - batch_norm__59, - batch_norm__590, - batch_norm__591, - batch_norm__592, - batch_norm__593, - batch_norm__594, - batch_norm__595, - batch_norm__596, - batch_norm__597, - batch_norm__598, - batch_norm__599, - batch_norm__6, - batch_norm__60, - batch_norm__600, - batch_norm__601, - batch_norm__602, - batch_norm__603, - batch_norm__604, - batch_norm__605, - batch_norm__606, - batch_norm__607, - batch_norm__608, - batch_norm__609, - batch_norm__61, - batch_norm__610, - batch_norm__611, - batch_norm__612, - batch_norm__613, - batch_norm__614, - batch_norm__615, - batch_norm__616, - batch_norm__617, - batch_norm__618, - batch_norm__619, - batch_norm__62, - batch_norm__620, - batch_norm__621, - batch_norm__622, - batch_norm__623, - batch_norm__624, - batch_norm__625, - batch_norm__626, - batch_norm__627, - batch_norm__628, - batch_norm__629, - batch_norm__63, - batch_norm__630, - batch_norm__631, - batch_norm__632, - batch_norm__633, - batch_norm__634, - batch_norm__635, - batch_norm__636, - batch_norm__637, - batch_norm__638, - batch_norm__639, - batch_norm__64, - batch_norm__640, - batch_norm__641, - batch_norm__642, - batch_norm__643, - batch_norm__644, - batch_norm__645, - batch_norm__646, - batch_norm__647, - batch_norm__648, - batch_norm__649, - batch_norm__65, - batch_norm__650, - batch_norm__651, - batch_norm__652, - batch_norm__653, - batch_norm__654, - batch_norm__655, - batch_norm__656, - batch_norm__657, - batch_norm__658, - batch_norm__659, - batch_norm__66, - batch_norm__660, - batch_norm__661, - batch_norm__662, - batch_norm__663, - batch_norm__664, - batch_norm__665, - batch_norm__666, - batch_norm__667, - batch_norm__668, - batch_norm__669, - batch_norm__67, - batch_norm__670, - batch_norm__671, - batch_norm__672, - batch_norm__673, - batch_norm__674, - batch_norm__675, - batch_norm__676, - batch_norm__677, - batch_norm__678, - batch_norm__679, - batch_norm__68, - batch_norm__680, - batch_norm__681, - batch_norm__682, - batch_norm__683, - batch_norm__684, - batch_norm__685, - batch_norm__686, - batch_norm__687, - batch_norm__688, - batch_norm__689, - batch_norm__69, - batch_norm__690, - batch_norm__691, - batch_norm__692, - batch_norm__693, - batch_norm__694, - batch_norm__695, - batch_norm__696, - batch_norm__697, - batch_norm__698, - batch_norm__699, - batch_norm__7, - batch_norm__70, - batch_norm__700, - batch_norm__701, - batch_norm__702, - batch_norm__703, - batch_norm__704, - batch_norm__705, - batch_norm__706, - batch_norm__707, - batch_norm__708, - batch_norm__709, - batch_norm__71, - batch_norm__710, - batch_norm__711, - batch_norm__712, - batch_norm__713, - batch_norm__714, - batch_norm__715, - batch_norm__716, - batch_norm__717, - batch_norm__718, - batch_norm__719, - batch_norm__72, - batch_norm__720, - batch_norm__721, - batch_norm__722, - batch_norm__723, - batch_norm__724, - batch_norm__725, - batch_norm__726, - batch_norm__727, - batch_norm__728, - batch_norm__729, - batch_norm__73, - batch_norm__730, - batch_norm__731, - batch_norm__732, - batch_norm__733, - batch_norm__734, - batch_norm__735, - batch_norm__736, - batch_norm__737, - batch_norm__738, - batch_norm__739, - batch_norm__74, - batch_norm__740, - batch_norm__741, - batch_norm__742, - batch_norm__743, - batch_norm__744, - batch_norm__745, - batch_norm__746, - batch_norm__747, - batch_norm__748, - batch_norm__749, - batch_norm__75, - batch_norm__750, - batch_norm__751, - batch_norm__752, - batch_norm__753, - batch_norm__754, - batch_norm__755, - batch_norm__756, - batch_norm__757, - batch_norm__758, - batch_norm__759, - batch_norm__76, - batch_norm__760, - batch_norm__761, - batch_norm__762, - batch_norm__763, - batch_norm__764, - batch_norm__765, - batch_norm__766, - batch_norm__767, - batch_norm__768, - batch_norm__769, - batch_norm__77, - batch_norm__770, - batch_norm__771, - batch_norm__772, - batch_norm__773, - batch_norm__774, - batch_norm__775, - batch_norm__776, - batch_norm__777, - batch_norm__778, - batch_norm__779, - batch_norm__78, - batch_norm__780, - batch_norm__781, - batch_norm__782, - batch_norm__783, - batch_norm__784, - batch_norm__785, - batch_norm__786, - batch_norm__787, - batch_norm__788, - batch_norm__789, - batch_norm__79, - batch_norm__790, - batch_norm__791, - batch_norm__792, - batch_norm__793, - batch_norm__794, - batch_norm__795, - batch_norm__796, - batch_norm__797, - batch_norm__798, - batch_norm__799, - batch_norm__8, - batch_norm__80, - batch_norm__800, - batch_norm__801, - batch_norm__802, - batch_norm__803, - batch_norm__804, - batch_norm__805, - batch_norm__806, - batch_norm__807, - batch_norm__808, - batch_norm__809, - batch_norm__81, - batch_norm__810, - batch_norm__811, - batch_norm__812, - batch_norm__813, - batch_norm__814, - batch_norm__815, - batch_norm__816, - batch_norm__817, - batch_norm__818, - batch_norm__819, - batch_norm__82, - batch_norm__820, - batch_norm__821, - batch_norm__822, - batch_norm__823, - batch_norm__824, - batch_norm__825, - batch_norm__826, - batch_norm__827, - batch_norm__83, - batch_norm__84, - batch_norm__85, - batch_norm__86, - batch_norm__87, - batch_norm__88, - batch_norm__89, - batch_norm__9, - batch_norm__90, - batch_norm__91, - batch_norm__92, - batch_norm__93, - batch_norm__94, - batch_norm__95, - batch_norm__96, - batch_norm__97, - batch_norm__98, - batch_norm__99, - concat_0, - concat_1, - concat_10, - concat_11, - concat_12, - concat_13, - concat_2, - concat_3, - concat_4, - concat_5, - concat_6, - concat_7, - concat_8, - concat_9, - conv2d_0, - conv2d_1, - conv2d_10, - conv2d_100, - conv2d_101, - conv2d_102, - conv2d_103, - conv2d_104, - conv2d_105, - conv2d_106, - conv2d_107, - conv2d_108, - conv2d_109, - conv2d_11, - conv2d_110, - conv2d_111, - conv2d_112, - conv2d_113, - conv2d_114, - conv2d_115, - conv2d_116, - conv2d_117, - conv2d_118, - conv2d_119, - conv2d_12, - conv2d_120, - conv2d_121, - conv2d_122, - conv2d_123, - conv2d_124, - conv2d_125, - conv2d_126, - conv2d_127, - conv2d_128, - conv2d_129, - conv2d_13, - conv2d_130, - conv2d_131, - conv2d_132, - conv2d_133, - conv2d_134, - conv2d_135, - conv2d_136, - conv2d_137, - conv2d_138, - conv2d_139, - conv2d_14, - conv2d_140, - conv2d_141, - conv2d_15, - conv2d_16, - conv2d_17, - conv2d_18, - conv2d_19, - conv2d_2, - conv2d_20, - conv2d_21, - conv2d_22, - conv2d_23, - conv2d_24, - conv2d_25, - conv2d_26, - conv2d_27, - conv2d_28, - conv2d_29, - conv2d_3, - conv2d_30, - conv2d_31, - conv2d_32, - conv2d_33, - conv2d_34, - conv2d_35, - conv2d_36, - conv2d_37, - conv2d_38, - conv2d_39, - conv2d_4, - conv2d_40, - conv2d_41, - conv2d_42, - conv2d_43, - conv2d_44, - conv2d_45, - conv2d_46, - conv2d_47, - conv2d_48, - conv2d_49, - conv2d_5, - conv2d_50, - conv2d_51, - conv2d_52, - conv2d_53, - conv2d_54, - conv2d_55, - conv2d_56, - conv2d_57, - conv2d_58, - conv2d_59, - conv2d_6, - conv2d_60, - conv2d_61, - conv2d_62, - conv2d_63, - conv2d_64, - conv2d_65, - conv2d_66, - conv2d_67, - conv2d_68, - conv2d_69, - conv2d_7, - conv2d_70, - conv2d_71, - conv2d_72, - conv2d_73, - conv2d_74, - conv2d_75, - conv2d_76, - conv2d_77, - conv2d_78, - conv2d_79, - conv2d_8, - conv2d_80, - conv2d_81, - conv2d_82, - conv2d_83, - conv2d_84, - conv2d_85, - conv2d_86, - conv2d_87, - conv2d_88, - conv2d_89, - conv2d_9, - conv2d_90, - conv2d_91, - conv2d_92, - conv2d_93, - conv2d_94, - conv2d_95, - conv2d_96, - conv2d_97, - conv2d_98, - conv2d_99, - full_0, - full_int_array_0, - full_int_array_2, - full_int_array_3, - full_int_array_4, - hardsigmoid_0, - hardsigmoid_1, - hardsigmoid_2, - hardsigmoid_3, - mean_0, - mean_1, - mean_2, - mean_3, - multiply_0, - multiply_1, - multiply_2, - multiply_3, - nearest_interp_0, - nearest_interp_1, - pool2d_0, - pool2d_1, - pool2d_2, - reshape_0, - reshape_1, - reshape_2, - reshape_3, - swish_1, - swish_10, - swish_100, - swish_101, - swish_102, - swish_103, - swish_104, - swish_11, - swish_12, - swish_13, - swish_14, - swish_15, - swish_16, - swish_17, - swish_18, - swish_19, - swish_2, - swish_20, - swish_21, - swish_22, - swish_23, - swish_24, - swish_25, - swish_26, - swish_27, - swish_28, - swish_29, - swish_3, - swish_30, - swish_31, - swish_32, - swish_33, - swish_34, - swish_35, - swish_36, - swish_37, - swish_38, - swish_39, - swish_4, - swish_40, - swish_41, - swish_42, - swish_43, - swish_44, - swish_45, - swish_46, - swish_47, - swish_48, - swish_49, - swish_5, - swish_50, - swish_51, - swish_52, - swish_53, - swish_54, - swish_55, - swish_56, - swish_57, - swish_58, - swish_59, - swish_6, - swish_60, - swish_61, - swish_62, - swish_63, - swish_64, - swish_65, - swish_66, - swish_67, - swish_68, - swish_69, - swish_7, - swish_70, - swish_71, - swish_72, - swish_73, - swish_74, - swish_75, - swish_76, - swish_77, - swish_78, - swish_79, - swish_8, - swish_80, - swish_81, - swish_82, - swish_83, - swish_84, - swish_85, - swish_86, - swish_87, - swish_88, - swish_89, - swish_9, - swish_90, - swish_91, - swish_92, - swish_93, - swish_94, - swish_95, - swish_96, - swish_97, - swish_98, - swish_99, - ) - - return swish_0 diff --git a/paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_11/graph_hash.txt b/paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_11/graph_hash.txt deleted file mode 100644 index eb0adff13..000000000 --- a/paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_11/graph_hash.txt +++ /dev/null @@ -1 +0,0 @@ -c1b8c019f1768926fb1763d743b6f1af638d4214fa9152321214c544c752c751 \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_11/graph_net.json b/paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_11/graph_net.json deleted file mode 100644 index cf4d2108b..000000000 --- a/paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_11/graph_net.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "framework": "paddle", - "model_name": "PP-YOLOE-L_human", - "num_devices_required": 1, - "num_nodes_required": 1 -} \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_11/input_meta.py b/paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_11/input_meta.py deleted file mode 100644 index 93b3f9bb6..000000000 --- a/paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_11/input_meta.py +++ /dev/null @@ -1,156 +0,0 @@ -class Program_weight_tensor_data_0: - name = "data_0" - shape = [] - dtype = "int64" - data = [23] - - -class Program_weight_tensor_data_1: - name = "data_1" - shape = [2, 3024, 1] - dtype = "float32" - min_val = float("4.30403e-05") - max_val = float("0.88968") - mean = float("0.054047") - std = float("0.118534") - data = None - - -class Program_weight_tensor_data_2: - name = "data_2" - shape = [2, 3024, 4] - dtype = "float32" - min_val = float("-300.913") - max_val = float("713.889") - mean = float("192.573") - std = float("133.469") - data = None - - -class Program_weight_tensor_data_3: - name = "data_3" - shape = [3024, 2] - dtype = "float32" - min_val = float("4.0") - max_val = float("380.0") - mean = float("192.0") - std = float("110.796") - data = None - - -class Program_weight_tensor_data_4: - name = "data_4" - shape = [2, 23, 1] - dtype = "int32" - data = [ - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - ] - - -class Program_weight_tensor_data_5: - name = "data_5" - shape = [2, 23, 4] - dtype = "float32" - max_val = float("383.232") - mean = float("161.173") - std = float("98.87") - data = None - - -class Program_weight_tensor_data_6: - name = "data_6" - shape = [2, 23, 1] - dtype = "float32" - data = [ - 1.0, - 1.0, - 1.0, - 1.0, - 1.0, - 1.0, - 1.0, - 1.0, - 1.0, - 1.0, - 1.0, - 1.0, - 1.0, - 1.0, - 1.0, - 1.0, - 1.0, - 1.0, - 1.0, - 1.0, - 1.0, - 1.0, - 1.0, - 1.0, - 1.0, - 1.0, - 1.0, - 1.0, - 1.0, - 1.0, - 1.0, - 1.0, - 1.0, - 1.0, - 1.0, - 1.0, - 1.0, - 1.0, - 1.0, - 1.0, - 1.0, - 1.0, - 0.0, - 0.0, - 0.0, - 0.0, - ] diff --git a/paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_11/model.py b/paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_11/model.py deleted file mode 100644 index c8d4c25a1..000000000 --- a/paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_11/model.py +++ /dev/null @@ -1,385 +0,0 @@ -import paddle - - -class GraphModule(paddle.nn.Layer): - def __init__(self): - super().__init__() - - def forward(self, data_0, data_1, data_2, data_3, data_4, data_5, data_6): - # pd_op.full: (xi64) <- () - full_0 = paddle._C_ops.full( - [], float("0"), paddle.int64, paddle.framework._current_expected_place() - ) - - # pd_op.equal: (xb) <- (xi64, xi64) - equal_0 = paddle._C_ops.equal(data_0, full_0) - - # pd_op.cast: (xi64) <- (xb) - cast_0 = paddle._C_ops.cast(equal_0, paddle.int64) - del equal_0 - - # pd_op.not_equal: (xb) <- (xi64, xi64) - not_equal_0 = paddle._C_ops.not_equal(cast_0, full_0) - del cast_0 - - # pd_op.cast: (xi64) <- (xb) - cast_1 = paddle._C_ops.cast(not_equal_0, paddle.int64) - del not_equal_0 - - # pd_op.equal: (xb) <- (xi64, xi64) - equal_1 = paddle._C_ops.equal(cast_1, full_0) - del cast_1, full_0 - - # pd_op.full_int_array: (1xi64) <- () - full_int_array_0 = [2] - - # pd_op.unsqueeze: (2x-1x1x4xf32) <- (2x-1x4xf32, 1xi64) - unsqueeze_0 = paddle._C_ops.unsqueeze(data_5, full_int_array_0) - del data_5 - - # pd_op.full_int_array: (1xi64) <- () - full_int_array_1 = [1] - - # pd_op.unsqueeze: (2x1x-1x4xf32) <- (2x-1x4xf32, 1xi64) - unsqueeze_1 = paddle._C_ops.unsqueeze(data_2, full_int_array_1) - del data_2 - - # pd_op.full_int_array: (1xi64) <- () - full_int_array_2 = [0] - - # pd_op.slice: (2x-1x1x2xf32) <- (2x-1x1x4xf32, 1xi64, 1xi64) - slice_0 = paddle._C_ops.slice( - unsqueeze_0, [3], full_int_array_2, full_int_array_0, [1], [] - ) - - # pd_op.full_int_array: (1xi64) <- () - full_int_array_3 = [2147483647] - - # pd_op.slice: (2x-1x1x2xf32) <- (2x-1x1x4xf32, 1xi64, 1xi64) - slice_1 = paddle._C_ops.slice( - unsqueeze_0, [3], full_int_array_0, full_int_array_3, [1], [] - ) - - # pd_op.slice: (2x1x-1x2xf32) <- (2x1x-1x4xf32, 1xi64, 1xi64) - slice_2 = paddle._C_ops.slice( - unsqueeze_1, [3], full_int_array_2, full_int_array_0, [1], [] - ) - del full_int_array_2 - - # pd_op.slice: (2x1x-1x2xf32) <- (2x1x-1x4xf32, 1xi64, 1xi64) - slice_3 = paddle._C_ops.slice( - unsqueeze_1, [3], full_int_array_0, full_int_array_3, [1], [] - ) - del full_int_array_3, unsqueeze_1 - - # pd_op.maximum: (2x-1x-1x2xf32) <- (2x-1x1x2xf32, 2x1x-1x2xf32) - maximum_0 = paddle._C_ops.maximum(slice_0, slice_2) - - # pd_op.minimum: (2x-1x-1x2xf32) <- (2x-1x1x2xf32, 2x1x-1x2xf32) - minimum_0 = paddle._C_ops.minimum(slice_1, slice_3) - - # pd_op.subtract: (2x-1x-1x2xf32) <- (2x-1x-1x2xf32, 2x-1x-1x2xf32) - subtract_0 = paddle._C_ops.subtract(minimum_0, maximum_0) - del maximum_0, minimum_0 - - # pd_op.full: (1xf32) <- () - full_1 = paddle._C_ops.full( - [1], float("0"), paddle.float32, paddle.core.CPUPlace() - ) - - # pd_op.full: (1xf32) <- () - full_2 = paddle._C_ops.full( - [1], float("3.40282e+38"), paddle.float32, paddle.core.CPUPlace() - ) - - # pd_op.clip: (2x-1x-1x2xf32) <- (2x-1x-1x2xf32, 1xf32, 1xf32) - clip_0 = paddle._C_ops.clip(subtract_0, full_1, full_2) - del subtract_0 - - # pd_op.full_int_array: (1xi64) <- () - full_int_array_4 = [-1] - - # pd_op.prod: (2x-1x-1xf32) <- (2x-1x-1x2xf32, 1xi64) - prod_0 = paddle._C_ops.prod(clip_0, full_int_array_4, False, False) - del clip_0 - - # pd_op.subtract: (2x-1x1x2xf32) <- (2x-1x1x2xf32, 2x-1x1x2xf32) - subtract_1 = paddle._C_ops.subtract(slice_1, slice_0) - del slice_0, slice_1 - - # pd_op.clip: (2x-1x1x2xf32) <- (2x-1x1x2xf32, 1xf32, 1xf32) - clip_1 = paddle._C_ops.clip(subtract_1, full_1, full_2) - del subtract_1 - - # pd_op.prod: (2x-1x1xf32) <- (2x-1x1x2xf32, 1xi64) - prod_1 = paddle._C_ops.prod(clip_1, full_int_array_4, False, False) - del clip_1 - - # pd_op.subtract: (2x1x-1x2xf32) <- (2x1x-1x2xf32, 2x1x-1x2xf32) - subtract_2 = paddle._C_ops.subtract(slice_3, slice_2) - del slice_2, slice_3 - - # pd_op.clip: (2x1x-1x2xf32) <- (2x1x-1x2xf32, 1xf32, 1xf32) - clip_2 = paddle._C_ops.clip(subtract_2, full_1, full_2) - del full_1, full_2, subtract_2 - - # pd_op.prod: (2x1x-1xf32) <- (2x1x-1x2xf32, 1xi64) - prod_2 = paddle._C_ops.prod(clip_2, full_int_array_4, False, False) - del clip_2 - - # pd_op.add: (2x-1x-1xf32) <- (2x-1x1xf32, 2x1x-1xf32) - add_0 = paddle._C_ops.add(prod_1, prod_2) - del prod_1, prod_2 - - # pd_op.subtract: (2x-1x-1xf32) <- (2x-1x-1xf32, 2x-1x-1xf32) - subtract_3 = paddle._C_ops.subtract(add_0, prod_0) - del add_0 - - # pd_op.full: (1xf32) <- () - full_3 = paddle._C_ops.full( - [1], float("1"), paddle.float32, paddle.core.CPUPlace() - ) - - # pd_op.scale: (2x-1x-1xf32) <- (2x-1x-1xf32, 1xf32) - scale_0 = paddle._C_ops.scale(subtract_3, full_3, float("1e-09"), True) - del full_3, subtract_3 - - # pd_op.divide: (2x-1x-1xf32) <- (2x-1x-1xf32, 2x-1x-1xf32) - divide_0 = paddle._C_ops.divide(prod_0, scale_0) - del prod_0, scale_0 - - # pd_op.transpose: (2x1x-1xf32) <- (2x-1x1xf32) - transpose_0 = paddle._C_ops.transpose(data_1, [0, 2, 1]) - del data_1 - - # pd_op.full: (1xf64) <- () - full_4 = paddle._C_ops.full( - [1], float("0"), paddle.float64, paddle.core.CPUPlace() - ) - - # pd_op.full: (1xf64) <- () - full_5 = paddle._C_ops.full( - [1], float("2"), paddle.float64, paddle.core.CPUPlace() - ) - - # pd_op.full: (1xf64) <- () - full_6 = paddle._C_ops.full( - [1], float("1"), paddle.float64, paddle.core.CPUPlace() - ) - - # pd_op.arange: (2xi32) <- (1xf64, 1xf64, 1xf64) - arange_0 = paddle.arange(full_4, full_5, full_6, dtype="int32") - del full_4, full_5, full_6 - - # pd_op.unsqueeze: (2x1xi32) <- (2xi32, 1xi64) - unsqueeze_2 = paddle._C_ops.unsqueeze(arange_0, full_int_array_4) - del arange_0 - - # pd_op.full: (xi64) <- () - full_7 = paddle._C_ops.full( - [], float("1"), paddle.int64, paddle.core.CPUPlace() - ) - - # builtin.combine: ([xi64, xi64]) <- (xi64, xi64) - combine_0 = [full_7, data_0] - del data_0, full_7 - - # pd_op.stack: (2xi64) <- ([xi64, xi64]) - stack_0 = paddle._C_ops.stack(combine_0, 0) - del combine_0 - - # pd_op.tile: (2x-1xi32) <- (2x1xi32, 2xi64) - tile_0 = paddle._C_ops.tile(unsqueeze_2, stack_0) - del stack_0 - - # pd_op.squeeze: (2x-1xi32) <- (2x-1x1xi32, 1xi64) - squeeze_0 = paddle._C_ops.squeeze(data_4, full_int_array_4) - del data_4 - - # builtin.combine: ([2x-1xi32, 2x-1xi32]) <- (2x-1xi32, 2x-1xi32) - combine_1 = [tile_0, squeeze_0] - del squeeze_0, tile_0 - - # pd_op.stack: (2x-1x2xi32) <- ([2x-1xi32, 2x-1xi32]) - stack_1 = paddle._C_ops.stack(combine_1, -1) - del combine_1 - - # pd_op.gather_nd: (2x-1x-1xf32) <- (2x1x-1xf32, 2x-1x2xi32) - gather_nd_0 = paddle._C_ops.gather_nd(transpose_0, stack_1) - del stack_1, transpose_0 - - # pd_op.pow: (2x-1x-1xf32) <- (2x-1x-1xf32) - pow_0 = paddle._C_ops.pow(gather_nd_0, float("1")) - del gather_nd_0 - - # pd_op.pow: (2x-1x-1xf32) <- (2x-1x-1xf32) - pow_1 = paddle._C_ops.pow(divide_0, float("6")) - - # pd_op.multiply: (2x-1x-1xf32) <- (2x-1x-1xf32, 2x-1x-1xf32) - multiply_0 = paddle._C_ops.multiply(pow_0, pow_1) - del pow_0, pow_1 - - # pd_op.full_int_array: (2xi64) <- () - full_int_array_5 = [0, 1] - - # pd_op.unsqueeze: (1x1x-1x2xf32) <- (-1x2xf32, 2xi64) - unsqueeze_3 = paddle._C_ops.unsqueeze(data_3, full_int_array_5) - del data_3, full_int_array_5 - - # pd_op.full: (1xi32) <- () - full_8 = paddle._C_ops.full( - [1], float("3"), paddle.int32, paddle.core.CPUPlace() - ) - - # pd_op.split_with_num: ([1x1x-1x1xf32, 1x1x-1x1xf32]) <- (1x1x-1x2xf32, 1xi32) - split_with_num_0 = paddle._C_ops.split_with_num(unsqueeze_3, 2, full_8) - del unsqueeze_3 - - # builtin.split: (1x1x-1x1xf32, 1x1x-1x1xf32) <- ([1x1x-1x1xf32, 1x1x-1x1xf32]) - ( - split_0, - split_1, - ) = split_with_num_0 - del split_with_num_0 - - # pd_op.split_with_num: ([2x-1x1x1xf32, 2x-1x1x1xf32, 2x-1x1x1xf32, 2x-1x1x1xf32]) <- (2x-1x1x4xf32, 1xi32) - split_with_num_1 = paddle._C_ops.split_with_num(unsqueeze_0, 4, full_8) - del full_8, unsqueeze_0 - - # builtin.split: (2x-1x1x1xf32, 2x-1x1x1xf32, 2x-1x1x1xf32, 2x-1x1x1xf32) <- ([2x-1x1x1xf32, 2x-1x1x1xf32, 2x-1x1x1xf32, 2x-1x1x1xf32]) - ( - split_2, - split_3, - split_4, - split_5, - ) = split_with_num_1 - del split_with_num_1 - - # pd_op.subtract: (2x-1x-1x1xf32) <- (1x1x-1x1xf32, 2x-1x1x1xf32) - subtract_4 = paddle._C_ops.subtract(split_0, split_2) - del split_2 - - # pd_op.subtract: (2x-1x-1x1xf32) <- (1x1x-1x1xf32, 2x-1x1x1xf32) - subtract_5 = paddle._C_ops.subtract(split_1, split_3) - del split_3 - - # pd_op.subtract: (2x-1x-1x1xf32) <- (2x-1x1x1xf32, 1x1x-1x1xf32) - subtract_6 = paddle._C_ops.subtract(split_4, split_0) - del split_0, split_4 - - # pd_op.subtract: (2x-1x-1x1xf32) <- (2x-1x1x1xf32, 1x1x-1x1xf32) - subtract_7 = paddle._C_ops.subtract(split_5, split_1) - del split_1, split_5 - - # pd_op.full: (1xi32) <- () - full_9 = paddle._C_ops.full( - [1], float("-1"), paddle.int32, paddle.core.CPUPlace() - ) - - # builtin.combine: ([2x-1x-1x1xf32, 2x-1x-1x1xf32, 2x-1x-1x1xf32, 2x-1x-1x1xf32]) <- (2x-1x-1x1xf32, 2x-1x-1x1xf32, 2x-1x-1x1xf32, 2x-1x-1x1xf32) - combine_2 = [subtract_4, subtract_5, subtract_6, subtract_7] - del subtract_4, subtract_5, subtract_6, subtract_7 - - # pd_op.concat: (2x-1x-1x4xf32) <- ([2x-1x-1x1xf32, 2x-1x-1x1xf32, 2x-1x-1x1xf32, 2x-1x-1x1xf32], 1xi32) - concat_0 = paddle._C_ops.concat(combine_2, full_9) - del combine_2, full_9 - - # pd_op.min: (2x-1x-1xf32) <- (2x-1x-1x4xf32, 1xi64) - min_0 = paddle._C_ops.min(concat_0, full_int_array_4, False) - del concat_0, full_int_array_4 - - # pd_op.full: (xf32) <- () - full_10 = paddle._C_ops.full( - [], - float("1e-09"), - paddle.float32, - paddle.framework._current_expected_place(), - ) - - # pd_op.greater_than: (2x-1x-1xb) <- (2x-1x-1xf32, xf32) - greater_than_1 = paddle._C_ops.greater_than(min_0, full_10) - del full_10, min_0 - - # pd_op.cast: (2x-1x-1xf32) <- (2x-1x-1xb) - cast_2 = paddle._C_ops.cast(greater_than_1, paddle.float32) - del greater_than_1 - - # pd_op.multiply: (2x-1x-1xf32) <- (2x-1x-1xf32, 2x-1x-1xf32) - multiply_1 = paddle._C_ops.multiply(multiply_0, cast_2) - - # pd_op.shape64: (3xi64) <- (2x-1x-1xf32) - shape64_0 = paddle._C_ops.shape64(multiply_1) - - # pd_op.slice: (xi64) <- (3xi64, 1xi64, 1xi64) - slice_4 = paddle._C_ops.slice( - shape64_0, [0], full_int_array_1, full_int_array_0, [1], [0] - ) - del full_int_array_1 - - # pd_op.full_int_array: (1xi64) <- () - full_int_array_6 = [3] - - # pd_op.slice: (xi64) <- (3xi64, 1xi64, 1xi64) - slice_5 = paddle._C_ops.slice( - shape64_0, [0], full_int_array_0, full_int_array_6, [1], [0] - ) - del full_int_array_0, full_int_array_6, shape64_0 - - # pd_op.full: (1xi32) <- () - full_11 = paddle._C_ops.full( - [1], float("13"), paddle.int32, paddle.core.CPUPlace() - ) - - # pd_op.topk: (2x-1x13xf32, 2x-1x13xi64) <- (2x-1x-1xf32, 1xi32) - topk_0, topk_1 = (lambda x, f: f(x))( - paddle._C_ops.topk(multiply_1, full_11, -1, True, True), - lambda out: out if isinstance(out, (list, tuple)) else (out, None), - ) - del full_11, multiply_1 - - # pd_op.one_hot: (2x-1x13x-1xf32) <- (2x-1x13xi64, xi64) - one_hot_0 = paddle._C_ops.one_hot( - topk_1 % paddle.cast(slice_5, topk_1.dtype), slice_5 - ) - del slice_5, topk_1 - - # pd_op.full_int_array: (1xi64) <- () - full_int_array_7 = [-2] - - # pd_op.sum: (2x-1x-1xf32) <- (2x-1x13x-1xf32, 1xi64) - sum_0 = paddle._C_ops.sum(one_hot_0, full_int_array_7, None, False) - del one_hot_0 - - # pd_op.multiply: (2x-1x-1xf32) <- (2x-1x-1xf32, 2x-1x1xf32) - multiply_2 = paddle._C_ops.multiply(sum_0, data_6) - del sum_0 - - # pd_op.multiply: (2x-1x-1xf32) <- (2x-1x-1xf32, 2x-1x-1xf32) - multiply_3 = paddle._C_ops.multiply(multiply_2, cast_2) - del cast_2, multiply_2 - - # pd_op.multiply: (2x-1x-1xf32) <- (2x-1x-1xf32, 2x-1x1xf32) - multiply_4 = paddle._C_ops.multiply(multiply_3, data_6) - del data_6, multiply_3 - - # pd_op.sum: (2x-1xf32) <- (2x-1x-1xf32, 1xi64) - sum_1 = paddle._C_ops.sum(multiply_4, full_int_array_7, None, False) - del full_int_array_7 - - # pd_op.full_int_array: (0xi64) <- () - full_int_array_8 = [] - - # pd_op.max: (xf32) <- (2x-1xf32, 0xi64) - max_0 = paddle._C_ops.max(sum_1, full_int_array_8, False) - del full_int_array_8 - - # pd_op.full: (xf32) <- () - full_12 = paddle._C_ops.full( - [], float("1"), paddle.float32, paddle.framework._current_expected_place() - ) - - # pd_op.greater_than: (xb) <- (xf32, xf32) - greater_than_0 = paddle._C_ops.greater_than(max_0, full_12) - del divide_0, full_12, max_0, multiply_0, multiply_4, sum_1, unsqueeze_2 - - return greater_than_0 diff --git a/paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_12/graph_hash.txt b/paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_12/graph_hash.txt deleted file mode 100644 index f3ee1e92c..000000000 --- a/paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_12/graph_hash.txt +++ /dev/null @@ -1 +0,0 @@ -ccef89c785301336e92928a91f66ad752e4ce87933446e15f2082e86ac6642d6 \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_12/graph_net.json b/paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_12/graph_net.json deleted file mode 100644 index cf4d2108b..000000000 --- a/paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_12/graph_net.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "framework": "paddle", - "model_name": "PP-YOLOE-L_human", - "num_devices_required": 1, - "num_nodes_required": 1 -} \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_12/input_meta.py b/paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_12/input_meta.py deleted file mode 100644 index 38dba74a4..000000000 --- a/paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_12/input_meta.py +++ /dev/null @@ -1,121 +0,0 @@ -class Program_weight_tensor_data_0: - name = "data_0" - shape = [2, 29, 8400] - dtype = "float32" - max_val = float("1.0") - mean = float("0.000933908") - std = float("0.0305456") - data = None - - -class Program_weight_tensor_data_1: - name = "data_1" - shape = [2, 1] - dtype = "int32" - data = [0, 1] - - -class Program_weight_tensor_data_2: - name = "data_2" - shape = [2, 29, 1] - dtype = "int32" - data = [ - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - ] - - -class Program_weight_tensor_data_3: - name = "data_3" - shape = [2, 8400] - dtype = "float32" - max_val = float("1.0") - mean = float("0.0270833") - std = float("0.162326") - data = None - - -class Program_weight_tensor_data_4: - name = "data_4" - shape = [2, 29, 4] - dtype = "float32" - max_val = float("640.0") - mean = float("225.221") - std = float("222.396") - data = None - - -class Program_weight_tensor_data_5: - name = "data_5" - shape = [2, 29, 8400] - dtype = "float32" - max_val = float("0.332125") - mean = float("6.7312e-05") - std = float("0.00215112") - data = None - - -class Program_weight_tensor_data_6: - name = "data_6" - shape = [2, 29, 8400] - dtype = "float32" - max_val = float("0.941182") - mean = float("0.00754317") - std = float("0.0517711") - data = None diff --git a/paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_12/model.py b/paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_12/model.py deleted file mode 100644 index 08f9efbb7..000000000 --- a/paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_12/model.py +++ /dev/null @@ -1,175 +0,0 @@ -import paddle - - -class GraphModule(paddle.nn.Layer): - def __init__(self): - super().__init__() - - def forward(self, data_0, data_1, data_2, data_3, data_4, data_5, data_6): - # pd_op.full: (1xi64) <- () - full_0 = paddle._C_ops.full( - [1], float("-2"), paddle.int64, paddle.core.CPUPlace() - ) - - # pd_op.argmax: (2x8400xi64) <- (2x29x8400xf32, 1xi64) - argmax_0 = paddle._C_ops.argmax(data_0, full_0, False, False, paddle.int64) - del full_0 - - # pd_op.full: (1xf32) <- () - full_1 = paddle._C_ops.full( - [1], float("29"), paddle.float32, paddle.core.CPUPlace() - ) - - # pd_op.scale: (2x1xi32) <- (2x1xi32, 1xf32) - scale_0 = paddle._C_ops.scale(data_1, full_1, float("0"), True) - del data_1, full_1 - - # pd_op.cast: (2x1xi64) <- (2x1xi32) - cast_0 = paddle._C_ops.cast(scale_0, paddle.int64) - del scale_0 - - # pd_op.add: (2x8400xi64) <- (2x8400xi64, 2x1xi64) - add_0 = paddle._C_ops.add(argmax_0, cast_0) - del argmax_0, cast_0 - - # pd_op.flatten: (58xi32) <- (2x29x1xi32) - flatten_0 = paddle._C_ops.flatten(data_2, 0, 2) - del data_2 - - # pd_op.flatten: (16800xi64) <- (2x8400xi64) - flatten_1 = paddle._C_ops.flatten(add_0, 0, 1) - del add_0 - - # pd_op.full: (1xi32) <- () - full_2 = paddle._C_ops.full( - [1], float("0"), paddle.int32, paddle.core.CPUPlace() - ) - - # pd_op.gather: (16800xi32) <- (58xi32, 16800xi64, 1xi32) - gather_0 = paddle._C_ops.gather(flatten_0, flatten_1, full_2) - del flatten_0 - - # pd_op.full_int_array: (2xi64) <- () - full_int_array_0 = [2, 8400] - - # pd_op.reshape: (2x8400xi32) <- (16800xi32, 2xi64) - reshape_1 = paddle._C_ops.reshape(gather_0, full_int_array_0) - del full_int_array_0, gather_0 - - # pd_op.full: (xf32) <- () - full_3 = paddle._C_ops.full( - [], float("0"), paddle.float32, paddle.framework._current_expected_place() - ) - - # pd_op.greater_than: (2x8400xb) <- (2x8400xf32, xf32) - greater_than_0 = paddle._C_ops.greater_than(data_3, full_3) - del data_3, full_3 - - # pd_op.full: (1xf32) <- () - full_4 = paddle._C_ops.full( - [1], float("1"), paddle.float32, paddle.core.CPUPlace() - ) - - # pd_op.full_like: (2x8400xi32) <- (2x8400xi32, 1xf32) - full_like_0 = paddle._C_ops.full_like( - reshape_1, full_4, paddle.int32, paddle.framework._current_expected_place() - ) - - # pd_op.where: (2x8400xi32) <- (2x8400xb, 2x8400xi32, 2x8400xi32) - where_0 = paddle._C_ops.where(greater_than_0, reshape_1, full_like_0) - del full_like_0, greater_than_0, reshape_1 - - # pd_op.full_int_array: (2xi64) <- () - full_int_array_1 = [-1, 4] - - # pd_op.reshape: (58x4xf32) <- (2x29x4xf32, 2xi64) - reshape_2 = paddle._C_ops.reshape(data_4, full_int_array_1) - del data_4, full_int_array_1 - - # pd_op.gather: (16800x4xf32) <- (58x4xf32, 16800xi64, 1xi32) - gather_1 = paddle._C_ops.gather(reshape_2, flatten_1, full_2) - del flatten_1, full_2, reshape_2 - - # pd_op.full_int_array: (3xi64) <- () - full_int_array_2 = [2, 8400, 4] - - # pd_op.reshape: (2x8400x4xf32) <- (16800x4xf32, 3xi64) - reshape_0 = paddle._C_ops.reshape(gather_1, full_int_array_2) - del full_int_array_2, gather_1 - - # pd_op.full: (1xi32) <- () - full_5 = paddle._C_ops.full( - [1], float("2"), paddle.int32, paddle.core.CPUPlace() - ) - - # pd_op.one_hot: (2x8400x2xf32) <- (2x8400xi32, 1xi32) - one_hot_0 = paddle._C_ops.one_hot( - where_0 % paddle.cast(full_5, where_0.dtype), full_5 - ) - del full_5 - - # pd_op.full: (1xi64) <- () - full_6 = paddle._C_ops.full( - [1], float("0"), paddle.int64, paddle.framework._current_expected_place() - ) - - # pd_op.assign_value_: (1xi64) <- (1xi64) - assign_value__0 = paddle._C_ops.assign_value_( - full_6, - [1], - paddle.int64, - [float("0")], - paddle.framework._current_expected_place(), - ) - del full_6 - - # pd_op.index_select: (2x8400x1xf32) <- (2x8400x2xf32, 1xi64) - index_select_0 = paddle._C_ops.index_select(one_hot_0, assign_value__0, -1) - del assign_value__0, one_hot_0 - - # pd_op.multiply: (2x29x8400xf32) <- (2x29x8400xf32, 2x29x8400xf32) - multiply_1 = paddle._C_ops.multiply(data_5, data_0) - del data_5 - - # pd_op.full_int_array: (1xi64) <- () - full_int_array_3 = [-1] - - # pd_op.max: (2x29x1xf32) <- (2x29x8400xf32, 1xi64) - max_0 = paddle._C_ops.max(multiply_1, full_int_array_3, True) - - # pd_op.multiply: (2x29x8400xf32) <- (2x29x8400xf32, 2x29x8400xf32) - multiply_2 = paddle._C_ops.multiply(data_6, data_0) - del data_0, data_6 - - # pd_op.max: (2x29x1xf32) <- (2x29x8400xf32, 1xi64) - max_1 = paddle._C_ops.max(multiply_2, full_int_array_3, True) - del multiply_2 - - # pd_op.scale: (2x29x1xf32) <- (2x29x1xf32, 1xf32) - scale_1 = paddle._C_ops.scale(max_0, full_4, float("1e-09"), True) - del full_4, max_0 - - # pd_op.divide: (2x29x8400xf32) <- (2x29x8400xf32, 2x29x1xf32) - divide_0 = paddle._C_ops.divide(multiply_1, scale_1) - del multiply_1, scale_1 - - # pd_op.multiply: (2x29x8400xf32) <- (2x29x8400xf32, 2x29x1xf32) - multiply_3 = paddle._C_ops.multiply(divide_0, max_1) - del divide_0, max_1 - - # pd_op.full_int_array: (1xi64) <- () - full_int_array_4 = [-2] - - # pd_op.max: (2x8400xf32) <- (2x29x8400xf32, 1xi64) - max_2 = paddle._C_ops.max(multiply_3, full_int_array_4, False) - del full_int_array_4, multiply_3 - - # pd_op.unsqueeze: (2x8400x1xf32) <- (2x8400xf32, 1xi64) - unsqueeze_0 = paddle._C_ops.unsqueeze(max_2, full_int_array_3) - del full_int_array_3, max_2 - - # pd_op.multiply: (2x8400x1xf32) <- (2x8400x1xf32, 2x8400x1xf32) - multiply_0 = paddle._C_ops.multiply(index_select_0, unsqueeze_0) - del index_select_0, unsqueeze_0, where_0 - - return reshape_0, multiply_0 diff --git a/paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_13/graph_hash.txt b/paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_13/graph_hash.txt deleted file mode 100644 index 1b08335a3..000000000 --- a/paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_13/graph_hash.txt +++ /dev/null @@ -1 +0,0 @@ -999d81ae5f8f7aa80107216e32ce8e5d9c0b867a696357b475912426f0891658 \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_13/graph_net.json b/paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_13/graph_net.json deleted file mode 100644 index cf4d2108b..000000000 --- a/paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_13/graph_net.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "framework": "paddle", - "model_name": "PP-YOLOE-L_human", - "num_devices_required": 1, - "num_nodes_required": 1 -} \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_13/model.py b/paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_13/model.py deleted file mode 100644 index c3a688445..000000000 --- a/paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_13/model.py +++ /dev/null @@ -1,162 +0,0 @@ -import paddle - - -class GraphModule(paddle.nn.Layer): - def __init__(self): - super().__init__() - - def forward(self, parameter_0, data_0, data_1, data_2, data_3): - # pd_op.divide: (2100x2xf32) <- (2100x2xf32, 2100x1xf32) - divide_0 = paddle._C_ops.divide(data_2, data_3) - del data_2 - - # pd_op.shape64: (3xi64) <- (2x2100x68xf32) - shape64_0 = paddle._C_ops.shape64(data_1) - - # pd_op.full_int_array: (1xi64) <- () - full_int_array_0 = [0] - - # pd_op.full_int_array: (1xi64) <- () - full_int_array_1 = [1] - - # pd_op.assign: (1xi64) <- (1xi64) - assign_0 = full_int_array_1 - - # pd_op.slice: (xi64) <- (3xi64, 1xi64, 1xi64) - slice_0 = paddle._C_ops.slice( - shape64_0, [0], full_int_array_0, full_int_array_1, [1], [0] - ) - del full_int_array_0 - - # pd_op.full_int_array: (1xi64) <- () - full_int_array_2 = [2] - - # pd_op.slice: (xi64) <- (3xi64, 1xi64, 1xi64) - slice_1 = paddle._C_ops.slice( - shape64_0, [0], full_int_array_1, full_int_array_2, [1], [0] - ) - - # pd_op.full_int_array: (1xi64) <- () - full_int_array_3 = [3] - - # pd_op.slice: (xi64) <- (3xi64, 1xi64, 1xi64) - slice_2 = paddle._C_ops.slice( - shape64_0, [0], full_int_array_2, full_int_array_3, [1], [0] - ) - del full_int_array_2, full_int_array_3, shape64_0 - - # pd_op.full: (xi64) <- () - full_0 = paddle._C_ops.full( - [], float("-1"), paddle.int64, paddle.core.CPUPlace() - ) - - # pd_op.full: (xi64) <- () - full_1 = paddle._C_ops.full( - [], float("4"), paddle.int64, paddle.core.CPUPlace() - ) - - # pd_op.full: (xi64) <- () - full_2 = paddle._C_ops.full( - [], float("17"), paddle.int64, paddle.core.CPUPlace() - ) - - # builtin.combine: ([xi64, xi64, xi64, xi64]) <- (xi64, xi64, xi64, xi64) - combine_0 = [full_0, slice_1, full_1, full_2] - del full_0, full_1, full_2, slice_1 - - # pd_op.stack: (4xi64) <- ([xi64, xi64, xi64, xi64]) - stack_0 = paddle._C_ops.stack(combine_0, 0) - del combine_0 - - # pd_op.reshape: (-1x-1x4x17xf32) <- (2x2100x68xf32, 4xi64) - reshape_0 = paddle._C_ops.reshape(data_1, stack_0) - del data_1, stack_0 - - # pd_op.softmax: (-1x-1x4x17xf32) <- (-1x-1x4x17xf32) - softmax_0 = paddle._C_ops.softmax(reshape_0, -1) - del reshape_0 - - # pd_op.transpose: (-1x17x-1x4xf32) <- (-1x-1x4x17xf32) - transpose_0 = paddle._C_ops.transpose(softmax_0, [0, 3, 1, 2]) - - # pd_op.conv2d: (-1x1x-1x4xf32) <- (-1x17x-1x4xf32, 1x17x1x1xf32) - conv2d_0 = paddle._C_ops.conv2d( - transpose_0, parameter_0, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_0 - - # pd_op.squeeze: (-1x-1x4xf32) <- (-1x1x-1x4xf32, 1xi64) - squeeze_0 = paddle._C_ops.squeeze(conv2d_0, full_int_array_1) - del full_int_array_1 - - # pd_op.full: (1xi32) <- () - full_3 = paddle._C_ops.full( - [1], float("2"), paddle.int32, paddle.core.CPUPlace() - ) - - # pd_op.split_with_num: ([-1x-1x2xf32, -1x-1x2xf32]) <- (-1x-1x4xf32, 1xi32) - split_with_num_0 = paddle._C_ops.split_with_num(squeeze_0, 2, full_3) - del squeeze_0 - - # builtin.split: (-1x-1x2xf32, -1x-1x2xf32) <- ([-1x-1x2xf32, -1x-1x2xf32]) - ( - split_0, - split_1, - ) = split_with_num_0 - del split_with_num_0 - - # pd_op.full: (1xf32) <- () - full_4 = paddle._C_ops.full( - [1], float("-1"), paddle.float32, paddle.core.CPUPlace() - ) - - # pd_op.scale: (-1x-1x2xf32) <- (-1x-1x2xf32, 1xf32) - scale_0 = paddle._C_ops.scale(split_0, full_4, float("0"), True) - del split_0 - - # pd_op.add: (-1x2100x2xf32) <- (-1x-1x2xf32, 2100x2xf32) - add_0 = paddle._C_ops.add(scale_0, divide_0) - - # pd_op.add: (-1x2100x2xf32) <- (-1x-1x2xf32, 2100x2xf32) - add_1 = paddle._C_ops.add(split_1, divide_0) - - # pd_op.full: (1xi32) <- () - full_5 = paddle._C_ops.full( - [1], float("-1"), paddle.int32, paddle.core.CPUPlace() - ) - - # builtin.combine: ([-1x2100x2xf32, -1x2100x2xf32]) <- (-1x2100x2xf32, -1x2100x2xf32) - combine_1 = [add_0, add_1] - - # pd_op.concat: (-1x2100x4xf32) <- ([-1x2100x2xf32, -1x2100x2xf32], 1xi32) - concat_0 = paddle._C_ops.concat(combine_1, full_5) - del combine_1 - - # pd_op.share_data_: (2x2100x1xf32) <- (2x2100x1xf32) - share_data__0 = data_0.detach() - del data_0 - - # pd_op.share_data_: (-1x2100x4xf32) <- (-1x2100x4xf32) - share_data__1 = concat_0.detach() - - # pd_op.multiply: (-1x2100x4xf32) <- (-1x2100x4xf32, 2100x1xf32) - multiply_0 = paddle._C_ops.multiply(share_data__1, data_3) - del ( - add_0, - add_1, - assign_0, - concat_0, - conv2d_0, - data_3, - divide_0, - full_3, - full_4, - full_5, - scale_0, - share_data__1, - softmax_0, - split_1, - transpose_0, - ) - - return share_data__0, multiply_0 diff --git a/paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_14/graph_hash.txt b/paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_14/graph_hash.txt deleted file mode 100644 index f667cd716..000000000 --- a/paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_14/graph_hash.txt +++ /dev/null @@ -1 +0,0 @@ -6eae9d07eec352b068f56eeceedb50c3de67909a5597f94efe81153acb758785 \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_14/graph_net.json b/paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_14/graph_net.json deleted file mode 100644 index cf4d2108b..000000000 --- a/paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_14/graph_net.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "framework": "paddle", - "model_name": "PP-YOLOE-L_human", - "num_devices_required": 1, - "num_nodes_required": 1 -} \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_14/input_meta.py b/paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_14/input_meta.py deleted file mode 100644 index d779c49f5..000000000 --- a/paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_14/input_meta.py +++ /dev/null @@ -1,141 +0,0 @@ -class Program_weight_tensor_data_0: - name = "data_0" - shape = [2, 2100, 1] - dtype = "float32" - min_val = float("0.000301052") - max_val = float("0.902879") - mean = float("0.0454822") - std = float("0.116377") - data = None - - -class Program_weight_tensor_data_1: - name = "data_1" - shape = [2, 2100, 4] - dtype = "float32" - min_val = float("-292.871") - max_val = float("632.104") - mean = float("160.31") - std = float("118.239") - data = None - - -class Program_weight_tensor_data_2: - name = "data_2" - shape = [2100, 2] - dtype = "float32" - min_val = float("4.0") - max_val = float("316.0") - mean = float("160.0") - std = float("92.31") - data = None - - -class Program_weight_tensor_data_3: - name = "data_3" - shape = [2, 21, 1] - dtype = "int32" - data = [ - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - ] - - -class Program_weight_tensor_data_4: - name = "data_4" - shape = [2, 21, 4] - dtype = "float32" - max_val = float("320.0") - mean = float("97.1572") - std = float("87.9478") - data = None - - -class Program_weight_tensor_data_5: - name = "data_5" - shape = [2, 21, 1] - dtype = "float32" - data = [ - 1.0, - 1.0, - 1.0, - 1.0, - 1.0, - 1.0, - 1.0, - 1.0, - 1.0, - 1.0, - 1.0, - 1.0, - 1.0, - 1.0, - 1.0, - 1.0, - 1.0, - 1.0, - 1.0, - 1.0, - 1.0, - 1.0, - 1.0, - 1.0, - 1.0, - 1.0, - 1.0, - 1.0, - 1.0, - 1.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - ] diff --git a/paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_14/model.py b/paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_14/model.py deleted file mode 100644 index db89bc12d..000000000 --- a/paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_14/model.py +++ /dev/null @@ -1,338 +0,0 @@ -import paddle - - -class GraphModule(paddle.nn.Layer): - def __init__(self): - super().__init__() - - def forward(self, data_0, data_1, data_2, data_3, data_4, data_5): - # pd_op.full_int_array: (1xi64) <- () - full_int_array_0 = [2] - - # pd_op.unsqueeze: (2x21x1x4xf32) <- (2x21x4xf32, 1xi64) - unsqueeze_0 = paddle._C_ops.unsqueeze(data_4, full_int_array_0) - del data_4 - - # pd_op.full_int_array: (1xi64) <- () - full_int_array_1 = [1] - - # pd_op.unsqueeze: (2x1x2100x4xf32) <- (2x2100x4xf32, 1xi64) - unsqueeze_1 = paddle._C_ops.unsqueeze(data_1, full_int_array_1) - del data_1, full_int_array_1 - - # pd_op.full_int_array: (1xi64) <- () - full_int_array_2 = [0] - - # pd_op.slice: (2x21x1x2xf32) <- (2x21x1x4xf32, 1xi64, 1xi64) - slice_0 = paddle._C_ops.slice( - unsqueeze_0, [3], full_int_array_2, full_int_array_0, [1], [] - ) - - # pd_op.full_int_array: (1xi64) <- () - full_int_array_3 = [2147483647] - - # pd_op.slice: (2x21x1x2xf32) <- (2x21x1x4xf32, 1xi64, 1xi64) - slice_1 = paddle._C_ops.slice( - unsqueeze_0, [3], full_int_array_0, full_int_array_3, [1], [] - ) - - # pd_op.slice: (2x1x2100x2xf32) <- (2x1x2100x4xf32, 1xi64, 1xi64) - slice_2 = paddle._C_ops.slice( - unsqueeze_1, [3], full_int_array_2, full_int_array_0, [1], [] - ) - del full_int_array_2 - - # pd_op.slice: (2x1x2100x2xf32) <- (2x1x2100x4xf32, 1xi64, 1xi64) - slice_3 = paddle._C_ops.slice( - unsqueeze_1, [3], full_int_array_0, full_int_array_3, [1], [] - ) - del full_int_array_0, full_int_array_3, unsqueeze_1 - - # pd_op.maximum: (2x21x2100x2xf32) <- (2x21x1x2xf32, 2x1x2100x2xf32) - maximum_0 = paddle._C_ops.maximum(slice_0, slice_2) - - # pd_op.minimum: (2x21x2100x2xf32) <- (2x21x1x2xf32, 2x1x2100x2xf32) - minimum_0 = paddle._C_ops.minimum(slice_1, slice_3) - - # pd_op.subtract: (2x21x2100x2xf32) <- (2x21x2100x2xf32, 2x21x2100x2xf32) - subtract_0 = paddle._C_ops.subtract(minimum_0, maximum_0) - del maximum_0, minimum_0 - - # pd_op.full: (1xf32) <- () - full_0 = paddle._C_ops.full( - [1], float("0"), paddle.float32, paddle.core.CPUPlace() - ) - - # pd_op.full: (1xf32) <- () - full_1 = paddle._C_ops.full( - [1], float("3.40282e+38"), paddle.float32, paddle.core.CPUPlace() - ) - - # pd_op.clip: (2x21x2100x2xf32) <- (2x21x2100x2xf32, 1xf32, 1xf32) - clip_0 = paddle._C_ops.clip(subtract_0, full_0, full_1) - del subtract_0 - - # pd_op.full_int_array: (1xi64) <- () - full_int_array_4 = [-1] - - # pd_op.prod: (2x21x2100xf32) <- (2x21x2100x2xf32, 1xi64) - prod_0 = paddle._C_ops.prod(clip_0, full_int_array_4, False, False) - del clip_0 - - # pd_op.subtract: (2x21x1x2xf32) <- (2x21x1x2xf32, 2x21x1x2xf32) - subtract_1 = paddle._C_ops.subtract(slice_1, slice_0) - del slice_0, slice_1 - - # pd_op.clip: (2x21x1x2xf32) <- (2x21x1x2xf32, 1xf32, 1xf32) - clip_1 = paddle._C_ops.clip(subtract_1, full_0, full_1) - del subtract_1 - - # pd_op.prod: (2x21x1xf32) <- (2x21x1x2xf32, 1xi64) - prod_1 = paddle._C_ops.prod(clip_1, full_int_array_4, False, False) - del clip_1 - - # pd_op.subtract: (2x1x2100x2xf32) <- (2x1x2100x2xf32, 2x1x2100x2xf32) - subtract_2 = paddle._C_ops.subtract(slice_3, slice_2) - del slice_2, slice_3 - - # pd_op.clip: (2x1x2100x2xf32) <- (2x1x2100x2xf32, 1xf32, 1xf32) - clip_2 = paddle._C_ops.clip(subtract_2, full_0, full_1) - del full_0, full_1, subtract_2 - - # pd_op.prod: (2x1x2100xf32) <- (2x1x2100x2xf32, 1xi64) - prod_2 = paddle._C_ops.prod(clip_2, full_int_array_4, False, False) - del clip_2 - - # pd_op.add: (2x21x2100xf32) <- (2x21x1xf32, 2x1x2100xf32) - add_0 = paddle._C_ops.add(prod_1, prod_2) - del prod_1, prod_2 - - # pd_op.subtract: (2x21x2100xf32) <- (2x21x2100xf32, 2x21x2100xf32) - subtract_3 = paddle._C_ops.subtract(add_0, prod_0) - del add_0 - - # pd_op.full: (1xf32) <- () - full_2 = paddle._C_ops.full( - [1], float("1"), paddle.float32, paddle.core.CPUPlace() - ) - - # pd_op.scale: (2x21x2100xf32) <- (2x21x2100xf32, 1xf32) - scale_0 = paddle._C_ops.scale(subtract_3, full_2, float("1e-09"), True) - del full_2, subtract_3 - - # pd_op.divide: (2x21x2100xf32) <- (2x21x2100xf32, 2x21x2100xf32) - divide_0 = paddle._C_ops.divide(prod_0, scale_0) - del prod_0, scale_0 - - # pd_op.transpose: (2x1x2100xf32) <- (2x2100x1xf32) - transpose_0 = paddle._C_ops.transpose(data_0, [0, 2, 1]) - del data_0 - - # pd_op.full: (1xf64) <- () - full_3 = paddle._C_ops.full( - [1], float("0"), paddle.float64, paddle.core.CPUPlace() - ) - - # pd_op.full: (1xf64) <- () - full_4 = paddle._C_ops.full( - [1], float("2"), paddle.float64, paddle.core.CPUPlace() - ) - - # pd_op.full: (1xf64) <- () - full_5 = paddle._C_ops.full( - [1], float("1"), paddle.float64, paddle.core.CPUPlace() - ) - - # pd_op.arange: (2xi32) <- (1xf64, 1xf64, 1xf64) - arange_0 = paddle.arange(full_3, full_4, full_5, dtype="int32") - del full_3, full_4, full_5 - - # pd_op.unsqueeze: (2x1xi32) <- (2xi32, 1xi64) - unsqueeze_2 = paddle._C_ops.unsqueeze(arange_0, full_int_array_4) - del arange_0 - - # pd_op.full_int_array: (2xi64) <- () - full_int_array_5 = [1, 21] - - # pd_op.tile: (2x21xi32) <- (2x1xi32, 2xi64) - tile_0 = paddle._C_ops.tile(unsqueeze_2, full_int_array_5) - del full_int_array_5 - - # pd_op.squeeze: (2x21xi32) <- (2x21x1xi32, 1xi64) - squeeze_0 = paddle._C_ops.squeeze(data_3, full_int_array_4) - del data_3 - - # builtin.combine: ([2x21xi32, 2x21xi32]) <- (2x21xi32, 2x21xi32) - combine_0 = [tile_0, squeeze_0] - del squeeze_0, tile_0 - - # pd_op.stack: (2x21x2xi32) <- ([2x21xi32, 2x21xi32]) - stack_0 = paddle._C_ops.stack(combine_0, -1) - del combine_0 - - # pd_op.gather_nd: (2x21x2100xf32) <- (2x1x2100xf32, 2x21x2xi32) - gather_nd_0 = paddle._C_ops.gather_nd(transpose_0, stack_0) - del stack_0, transpose_0 - - # pd_op.pow: (2x21x2100xf32) <- (2x21x2100xf32) - pow_0 = paddle._C_ops.pow(gather_nd_0, float("1")) - del gather_nd_0 - - # pd_op.pow: (2x21x2100xf32) <- (2x21x2100xf32) - pow_1 = paddle._C_ops.pow(divide_0, float("6")) - - # pd_op.multiply: (2x21x2100xf32) <- (2x21x2100xf32, 2x21x2100xf32) - multiply_0 = paddle._C_ops.multiply(pow_0, pow_1) - del pow_0, pow_1 - - # pd_op.full_int_array: (2xi64) <- () - full_int_array_6 = [0, 1] - - # pd_op.unsqueeze: (1x1x2100x2xf32) <- (2100x2xf32, 2xi64) - unsqueeze_3 = paddle._C_ops.unsqueeze(data_2, full_int_array_6) - del data_2, full_int_array_6 - - # pd_op.full: (1xi32) <- () - full_6 = paddle._C_ops.full( - [1], float("3"), paddle.int32, paddle.core.CPUPlace() - ) - - # pd_op.split_with_num: ([1x1x2100x1xf32, 1x1x2100x1xf32]) <- (1x1x2100x2xf32, 1xi32) - split_with_num_0 = paddle._C_ops.split_with_num(unsqueeze_3, 2, full_6) - del unsqueeze_3 - - # builtin.split: (1x1x2100x1xf32, 1x1x2100x1xf32) <- ([1x1x2100x1xf32, 1x1x2100x1xf32]) - ( - split_0, - split_1, - ) = split_with_num_0 - del split_with_num_0 - - # pd_op.split_with_num: ([2x21x1x1xf32, 2x21x1x1xf32, 2x21x1x1xf32, 2x21x1x1xf32]) <- (2x21x1x4xf32, 1xi32) - split_with_num_1 = paddle._C_ops.split_with_num(unsqueeze_0, 4, full_6) - del full_6, unsqueeze_0 - - # builtin.split: (2x21x1x1xf32, 2x21x1x1xf32, 2x21x1x1xf32, 2x21x1x1xf32) <- ([2x21x1x1xf32, 2x21x1x1xf32, 2x21x1x1xf32, 2x21x1x1xf32]) - ( - split_2, - split_3, - split_4, - split_5, - ) = split_with_num_1 - del split_with_num_1 - - # pd_op.subtract: (2x21x2100x1xf32) <- (1x1x2100x1xf32, 2x21x1x1xf32) - subtract_4 = paddle._C_ops.subtract(split_0, split_2) - del split_2 - - # pd_op.subtract: (2x21x2100x1xf32) <- (1x1x2100x1xf32, 2x21x1x1xf32) - subtract_5 = paddle._C_ops.subtract(split_1, split_3) - del split_3 - - # pd_op.subtract: (2x21x2100x1xf32) <- (2x21x1x1xf32, 1x1x2100x1xf32) - subtract_6 = paddle._C_ops.subtract(split_4, split_0) - del split_0, split_4 - - # pd_op.subtract: (2x21x2100x1xf32) <- (2x21x1x1xf32, 1x1x2100x1xf32) - subtract_7 = paddle._C_ops.subtract(split_5, split_1) - del split_1, split_5 - - # pd_op.full: (1xi32) <- () - full_7 = paddle._C_ops.full( - [1], float("-1"), paddle.int32, paddle.core.CPUPlace() - ) - - # builtin.combine: ([2x21x2100x1xf32, 2x21x2100x1xf32, 2x21x2100x1xf32, 2x21x2100x1xf32]) <- (2x21x2100x1xf32, 2x21x2100x1xf32, 2x21x2100x1xf32, 2x21x2100x1xf32) - combine_1 = [subtract_4, subtract_5, subtract_6, subtract_7] - del subtract_4, subtract_5, subtract_6, subtract_7 - - # pd_op.concat: (2x21x2100x4xf32) <- ([2x21x2100x1xf32, 2x21x2100x1xf32, 2x21x2100x1xf32, 2x21x2100x1xf32], 1xi32) - concat_0 = paddle._C_ops.concat(combine_1, full_7) - del combine_1, full_7 - - # pd_op.min: (2x21x2100xf32) <- (2x21x2100x4xf32, 1xi64) - min_0 = paddle._C_ops.min(concat_0, full_int_array_4, False) - del concat_0, full_int_array_4 - - # pd_op.full: (xf32) <- () - full_8 = paddle._C_ops.full( - [], - float("1e-09"), - paddle.float32, - paddle.framework._current_expected_place(), - ) - - # pd_op.greater_than: (2x21x2100xb) <- (2x21x2100xf32, xf32) - greater_than_1 = paddle._C_ops.greater_than(min_0, full_8) - del full_8, min_0 - - # pd_op.cast: (2x21x2100xf32) <- (2x21x2100xb) - cast_0 = paddle._C_ops.cast(greater_than_1, paddle.float32) - del greater_than_1 - - # pd_op.multiply: (2x21x2100xf32) <- (2x21x2100xf32, 2x21x2100xf32) - multiply_1 = paddle._C_ops.multiply(multiply_0, cast_0) - - # pd_op.full: (1xi32) <- () - full_9 = paddle._C_ops.full( - [1], float("13"), paddle.int32, paddle.core.CPUPlace() - ) - - # pd_op.topk: (2x21x13xf32, 2x21x13xi64) <- (2x21x2100xf32, 1xi32) - topk_0, topk_1 = (lambda x, f: f(x))( - paddle._C_ops.topk(multiply_1, full_9, -1, True, True), - lambda out: out if isinstance(out, (list, tuple)) else (out, None), - ) - del full_9, multiply_1 - - # pd_op.full: (1xi32) <- () - full_10 = paddle._C_ops.full( - [1], float("2100"), paddle.int32, paddle.core.CPUPlace() - ) - - # pd_op.one_hot: (2x21x13x2100xf32) <- (2x21x13xi64, 1xi32) - one_hot_0 = paddle._C_ops.one_hot( - topk_1 % paddle.cast(full_10, topk_1.dtype), full_10 - ) - del full_10, topk_1 - - # pd_op.full_int_array: (1xi64) <- () - full_int_array_7 = [-2] - - # pd_op.sum: (2x21x2100xf32) <- (2x21x13x2100xf32, 1xi64) - sum_0 = paddle._C_ops.sum(one_hot_0, full_int_array_7, None, False) - del one_hot_0 - - # pd_op.multiply: (2x21x2100xf32) <- (2x21x2100xf32, 2x21x1xf32) - multiply_2 = paddle._C_ops.multiply(sum_0, data_5) - del sum_0 - - # pd_op.multiply: (2x21x2100xf32) <- (2x21x2100xf32, 2x21x2100xf32) - multiply_3 = paddle._C_ops.multiply(multiply_2, cast_0) - del cast_0, multiply_2 - - # pd_op.multiply: (2x21x2100xf32) <- (2x21x2100xf32, 2x21x1xf32) - multiply_4 = paddle._C_ops.multiply(multiply_3, data_5) - del data_5, multiply_3 - - # pd_op.sum: (2x2100xf32) <- (2x21x2100xf32, 1xi64) - sum_1 = paddle._C_ops.sum(multiply_4, full_int_array_7, None, False) - del full_int_array_7 - - # pd_op.full_int_array: (0xi64) <- () - full_int_array_8 = [] - - # pd_op.max: (xf32) <- (2x2100xf32, 0xi64) - max_0 = paddle._C_ops.max(sum_1, full_int_array_8, False) - del full_int_array_8 - - # pd_op.full: (xf32) <- () - full_11 = paddle._C_ops.full( - [], float("1"), paddle.float32, paddle.framework._current_expected_place() - ) - - # pd_op.greater_than: (xb) <- (xf32, xf32) - greater_than_0 = paddle._C_ops.greater_than(max_0, full_11) - del divide_0, full_11, max_0, multiply_0, multiply_4, sum_1, unsqueeze_2 - - return greater_than_0 diff --git a/paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_15/graph_hash.txt b/paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_15/graph_hash.txt deleted file mode 100644 index fa10de35c..000000000 --- a/paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_15/graph_hash.txt +++ /dev/null @@ -1 +0,0 @@ -dd4756aee481284a2f105818ca2768c725dcd96b0e5b32c9865f9e346e77ab73 \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_15/graph_net.json b/paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_15/graph_net.json deleted file mode 100644 index cf4d2108b..000000000 --- a/paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_15/graph_net.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "framework": "paddle", - "model_name": "PP-YOLOE-L_human", - "num_devices_required": 1, - "num_nodes_required": 1 -} \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_15/input_meta.py b/paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_15/input_meta.py deleted file mode 100644 index 71f1bf11a..000000000 --- a/paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_15/input_meta.py +++ /dev/null @@ -1,105 +0,0 @@ -class Program_weight_tensor_data_0: - name = "data_0" - shape = [2, 2100] - dtype = "float32" - max_val = float("3.0") - mean = float("0.0607143") - std = float("0.274954") - data = None - - -class Program_weight_tensor_data_1: - name = "data_1" - shape = [2, 21, 2100] - dtype = "float32" - max_val = float("0.912588") - mean = float("0.02087") - std = float("0.0876462") - data = None - - -class Program_weight_tensor_data_2: - name = "data_2" - shape = [2, 21, 2100] - dtype = "float32" - max_val = float("1.0") - mean = float("0.00289116") - std = float("0.0536917") - data = None - - -class Program_weight_tensor_data_3: - name = "data_3" - shape = [2, 1] - dtype = "int32" - data = [0, 1] - - -class Program_weight_tensor_data_4: - name = "data_4" - shape = [2, 21, 1] - dtype = "int32" - data = [ - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - ] - - -class Program_weight_tensor_data_5: - name = "data_5" - shape = [2, 21, 4] - dtype = "float32" - max_val = float("320.0") - mean = float("97.1572") - std = float("87.9478") - data = None - - -class Program_weight_tensor_data_6: - name = "data_6" - shape = [2, 21, 2100] - dtype = "float32" - max_val = float("0.241469") - mean = float("0.000188227") - std = float("0.00363226") - data = None diff --git a/paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_15/model.py b/paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_15/model.py deleted file mode 100644 index 23048d518..000000000 --- a/paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_15/model.py +++ /dev/null @@ -1,223 +0,0 @@ -import paddle - - -class GraphModule(paddle.nn.Layer): - def __init__(self): - super().__init__() - - def forward(self, data_0, data_1, data_2, data_3, data_4, data_5, data_6): - # pd_op.full_int_array: (1xi64) <- () - full_int_array_0 = [1] - - # pd_op.unsqueeze: (2x1x2100xf32) <- (2x2100xf32, 1xi64) - unsqueeze_0 = paddle._C_ops.unsqueeze(data_0, full_int_array_0) - del data_0, full_int_array_0 - - # pd_op.full: (xf32) <- () - full_0 = paddle._C_ops.full( - [], float("1"), paddle.float32, paddle.framework._current_expected_place() - ) - - # pd_op.greater_than: (2x1x2100xb) <- (2x1x2100xf32, xf32) - greater_than_0 = paddle._C_ops.greater_than(unsqueeze_0, full_0) - del full_0, unsqueeze_0 - - # pd_op.full_int_array: (3xi64) <- () - full_int_array_1 = [1, 21, 1] - - # pd_op.tile: (2x21x2100xb) <- (2x1x2100xb, 3xi64) - tile_0 = paddle._C_ops.tile(greater_than_0, full_int_array_1) - del full_int_array_1, greater_than_0 - - # pd_op.full: (1xi64) <- () - full_1 = paddle._C_ops.full( - [1], float("-2"), paddle.int64, paddle.core.CPUPlace() - ) - - # pd_op.argmax: (2x2100xi64) <- (2x21x2100xf32, 1xi64) - argmax_0 = paddle._C_ops.argmax(data_1, full_1, False, False, paddle.int64) - - # pd_op.full: (1xi32) <- () - full_2 = paddle._C_ops.full( - [1], float("21"), paddle.int32, paddle.core.CPUPlace() - ) - - # pd_op.one_hot: (2x2100x21xf32) <- (2x2100xi64, 1xi32) - one_hot_0 = paddle._C_ops.one_hot( - argmax_0 % paddle.cast(full_2, argmax_0.dtype), full_2 - ) - del argmax_0, full_2 - - # pd_op.transpose: (2x21x2100xf32) <- (2x2100x21xf32) - transpose_0 = paddle._C_ops.transpose(one_hot_0, [0, 2, 1]) - del one_hot_0 - - # pd_op.where: (2x21x2100xf32) <- (2x21x2100xb, 2x21x2100xf32, 2x21x2100xf32) - where_0 = paddle._C_ops.where(tile_0, transpose_0, data_2) - del data_2, tile_0, transpose_0 - - # pd_op.full_int_array: (1xi64) <- () - full_int_array_2 = [-2] - - # pd_op.sum: (2x2100xf32) <- (2x21x2100xf32, 1xi64) - sum_0 = paddle._C_ops.sum(where_0, full_int_array_2, None, False) - - # pd_op.argmax: (2x2100xi64) <- (2x21x2100xf32, 1xi64) - argmax_1 = paddle._C_ops.argmax(where_0, full_1, False, False, paddle.int64) - del full_1 - - # pd_op.full: (1xf32) <- () - full_3 = paddle._C_ops.full( - [1], float("21"), paddle.float32, paddle.core.CPUPlace() - ) - - # pd_op.scale: (2x1xi32) <- (2x1xi32, 1xf32) - scale_0 = paddle._C_ops.scale(data_3, full_3, float("0"), True) - del data_3, full_3 - - # pd_op.cast: (2x1xi64) <- (2x1xi32) - cast_0 = paddle._C_ops.cast(scale_0, paddle.int64) - del scale_0 - - # pd_op.add: (2x2100xi64) <- (2x2100xi64, 2x1xi64) - add_0 = paddle._C_ops.add(argmax_1, cast_0) - del argmax_1, cast_0 - - # pd_op.flatten: (42xi32) <- (2x21x1xi32) - flatten_0 = paddle._C_ops.flatten(data_4, 0, 2) - del data_4 - - # pd_op.flatten: (4200xi64) <- (2x2100xi64) - flatten_1 = paddle._C_ops.flatten(add_0, 0, 1) - del add_0 - - # pd_op.full: (1xi32) <- () - full_4 = paddle._C_ops.full( - [1], float("0"), paddle.int32, paddle.core.CPUPlace() - ) - - # pd_op.gather: (4200xi32) <- (42xi32, 4200xi64, 1xi32) - gather_0 = paddle._C_ops.gather(flatten_0, flatten_1, full_4) - del flatten_0 - - # pd_op.full_int_array: (2xi64) <- () - full_int_array_3 = [2, 2100] - - # pd_op.reshape: (2x2100xi32) <- (4200xi32, 2xi64) - reshape_1 = paddle._C_ops.reshape(gather_0, full_int_array_3) - del full_int_array_3, gather_0 - - # pd_op.full: (xf32) <- () - full_5 = paddle._C_ops.full( - [], float("0"), paddle.float32, paddle.framework._current_expected_place() - ) - - # pd_op.greater_than: (2x2100xb) <- (2x2100xf32, xf32) - greater_than_1 = paddle._C_ops.greater_than(sum_0, full_5) - del full_5, sum_0 - - # pd_op.full: (1xf32) <- () - full_6 = paddle._C_ops.full( - [1], float("1"), paddle.float32, paddle.core.CPUPlace() - ) - - # pd_op.full_like: (2x2100xi32) <- (2x2100xi32, 1xf32) - full_like_0 = paddle._C_ops.full_like( - reshape_1, full_6, paddle.int32, paddle.framework._current_expected_place() - ) - - # pd_op.where: (2x2100xi32) <- (2x2100xb, 2x2100xi32, 2x2100xi32) - where_1 = paddle._C_ops.where(greater_than_1, reshape_1, full_like_0) - del full_like_0, greater_than_1, reshape_1 - - # pd_op.full_int_array: (2xi64) <- () - full_int_array_4 = [-1, 4] - - # pd_op.reshape: (42x4xf32) <- (2x21x4xf32, 2xi64) - reshape_2 = paddle._C_ops.reshape(data_5, full_int_array_4) - del data_5, full_int_array_4 - - # pd_op.gather: (4200x4xf32) <- (42x4xf32, 4200xi64, 1xi32) - gather_1 = paddle._C_ops.gather(reshape_2, flatten_1, full_4) - del flatten_1, full_4, reshape_2 - - # pd_op.full_int_array: (3xi64) <- () - full_int_array_5 = [2, 2100, 4] - - # pd_op.reshape: (2x2100x4xf32) <- (4200x4xf32, 3xi64) - reshape_0 = paddle._C_ops.reshape(gather_1, full_int_array_5) - del full_int_array_5, gather_1 - - # pd_op.full: (1xi32) <- () - full_7 = paddle._C_ops.full( - [1], float("2"), paddle.int32, paddle.core.CPUPlace() - ) - - # pd_op.one_hot: (2x2100x2xf32) <- (2x2100xi32, 1xi32) - one_hot_1 = paddle._C_ops.one_hot( - where_1 % paddle.cast(full_7, where_1.dtype), full_7 - ) - del full_7 - - # pd_op.full: (1xi64) <- () - full_8 = paddle._C_ops.full( - [1], float("0"), paddle.int64, paddle.framework._current_expected_place() - ) - - # pd_op.assign_value_: (1xi64) <- (1xi64) - assign_value__0 = paddle._C_ops.assign_value_( - full_8, - [1], - paddle.int64, - [float("0")], - paddle.framework._current_expected_place(), - ) - del full_8 - - # pd_op.index_select: (2x2100x1xf32) <- (2x2100x2xf32, 1xi64) - index_select_0 = paddle._C_ops.index_select(one_hot_1, assign_value__0, -1) - del assign_value__0, one_hot_1 - - # pd_op.multiply: (2x21x2100xf32) <- (2x21x2100xf32, 2x21x2100xf32) - multiply_1 = paddle._C_ops.multiply(data_6, where_0) - del data_6 - - # pd_op.full_int_array: (1xi64) <- () - full_int_array_6 = [-1] - - # pd_op.max: (2x21x1xf32) <- (2x21x2100xf32, 1xi64) - max_0 = paddle._C_ops.max(multiply_1, full_int_array_6, True) - - # pd_op.multiply: (2x21x2100xf32) <- (2x21x2100xf32, 2x21x2100xf32) - multiply_2 = paddle._C_ops.multiply(data_1, where_0) - del data_1, where_0 - - # pd_op.max: (2x21x1xf32) <- (2x21x2100xf32, 1xi64) - max_1 = paddle._C_ops.max(multiply_2, full_int_array_6, True) - del multiply_2 - - # pd_op.scale: (2x21x1xf32) <- (2x21x1xf32, 1xf32) - scale_1 = paddle._C_ops.scale(max_0, full_6, float("1e-09"), True) - del full_6, max_0 - - # pd_op.divide: (2x21x2100xf32) <- (2x21x2100xf32, 2x21x1xf32) - divide_0 = paddle._C_ops.divide(multiply_1, scale_1) - del multiply_1, scale_1 - - # pd_op.multiply: (2x21x2100xf32) <- (2x21x2100xf32, 2x21x1xf32) - multiply_3 = paddle._C_ops.multiply(divide_0, max_1) - del divide_0, max_1 - - # pd_op.max: (2x2100xf32) <- (2x21x2100xf32, 1xi64) - max_2 = paddle._C_ops.max(multiply_3, full_int_array_2, False) - del full_int_array_2, multiply_3 - - # pd_op.unsqueeze: (2x2100x1xf32) <- (2x2100xf32, 1xi64) - unsqueeze_1 = paddle._C_ops.unsqueeze(max_2, full_int_array_6) - del full_int_array_6, max_2 - - # pd_op.multiply: (2x2100x1xf32) <- (2x2100x1xf32, 2x2100x1xf32) - multiply_0 = paddle._C_ops.multiply(index_select_0, unsqueeze_1) - del index_select_0, unsqueeze_1, where_1 - - return reshape_0, multiply_0 diff --git a/paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_16/graph_hash.txt b/paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_16/graph_hash.txt deleted file mode 100644 index 896fa94fd..000000000 --- a/paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_16/graph_hash.txt +++ /dev/null @@ -1 +0,0 @@ -66b8a266cc256b5299b432a3f1cf5582a5f9a5321ba1505da3c19b3bc24f5624 \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_16/graph_net.json b/paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_16/graph_net.json deleted file mode 100644 index cf4d2108b..000000000 --- a/paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_16/graph_net.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "framework": "paddle", - "model_name": "PP-YOLOE-L_human", - "num_devices_required": 1, - "num_nodes_required": 1 -} \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_16/input_meta.py b/paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_16/input_meta.py deleted file mode 100644 index d8668962d..000000000 --- a/paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_16/input_meta.py +++ /dev/null @@ -1,38 +0,0 @@ -class Program_weight_tensor_data_0: - name = "data_0" - shape = [2, 8400, 4] - dtype = "float32" - min_val = float("0.170512") - max_val = float("14.8417") - mean = float("5.20202") - std = float("3.42596") - data = None - - -class Program_weight_tensor_data_1: - name = "data_1" - shape = [8400, 2] - dtype = "float32" - min_val = float("0.5") - max_val = float("79.5") - mean = float("34.7619") - std = float("22.9098") - data = None - - -class Program_weight_tensor_data_2: - name = "data_2" - shape = [8400, 1] - dtype = "float32" - min_val = float("8.0") - max_val = float("32.0") - mean = float("10.6667") - std = float("5.70157") - data = None - - -class Program_weight_tensor_data_3: - name = "data_3" - shape = [2, 2] - dtype = "float32" - data = [1.20075, 0.802005, 1.74863, 1.16364] diff --git a/paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_16/model.py b/paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_16/model.py deleted file mode 100644 index 561c0c35b..000000000 --- a/paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_16/model.py +++ /dev/null @@ -1,94 +0,0 @@ -import paddle - - -class GraphModule(paddle.nn.Layer): - def __init__(self): - super().__init__() - - def forward(self, data_0, data_1, data_2, data_3): - # pd_op.full: (1xi32) <- () - full_0 = paddle._C_ops.full( - [1], float("2"), paddle.int32, paddle.core.CPUPlace() - ) - - # pd_op.split_with_num: ([2x8400x2xf32, 2x8400x2xf32]) <- (2x8400x4xf32, 1xi32) - split_with_num_0 = paddle._C_ops.split_with_num(data_0, 2, full_0) - del data_0, full_0 - - # builtin.split: (2x8400x2xf32, 2x8400x2xf32) <- ([2x8400x2xf32, 2x8400x2xf32]) - ( - split_0, - split_1, - ) = split_with_num_0 - del split_with_num_0 - - # pd_op.full: (1xf32) <- () - full_1 = paddle._C_ops.full( - [1], float("-1"), paddle.float32, paddle.core.CPUPlace() - ) - - # pd_op.scale: (2x8400x2xf32) <- (2x8400x2xf32, 1xf32) - scale_0 = paddle._C_ops.scale(split_0, full_1, float("0"), True) - del full_1, split_0 - - # pd_op.add: (2x8400x2xf32) <- (2x8400x2xf32, 8400x2xf32) - add_0 = paddle._C_ops.add(scale_0, data_1) - del scale_0 - - # pd_op.add: (2x8400x2xf32) <- (2x8400x2xf32, 8400x2xf32) - add_1 = paddle._C_ops.add(split_1, data_1) - del data_1, split_1 - - # pd_op.full: (1xi32) <- () - full_2 = paddle._C_ops.full( - [1], float("-1"), paddle.int32, paddle.core.CPUPlace() - ) - - # builtin.combine: ([2x8400x2xf32, 2x8400x2xf32]) <- (2x8400x2xf32, 2x8400x2xf32) - combine_0 = [add_0, add_1] - del add_0, add_1 - - # pd_op.concat: (2x8400x4xf32) <- ([2x8400x2xf32, 2x8400x2xf32], 1xi32) - concat_0 = paddle._C_ops.concat(combine_0, full_2) - del combine_0 - - # pd_op.multiply: (2x8400x4xf32) <- (2x8400x4xf32, 8400x1xf32) - multiply_0 = paddle._C_ops.multiply(concat_0, data_2) - del concat_0, data_2 - - # pd_op.full: (1xi32) <- () - full_3 = paddle._C_ops.full( - [1], float("1"), paddle.int32, paddle.core.CPUPlace() - ) - - # pd_op.split_with_num: ([2x1xf32, 2x1xf32]) <- (2x2xf32, 1xi32) - split_with_num_1 = paddle._C_ops.split_with_num(data_3, 2, full_3) - del data_3, full_3 - - # builtin.split: (2x1xf32, 2x1xf32) <- ([2x1xf32, 2x1xf32]) - ( - split_2, - split_3, - ) = split_with_num_1 - del split_with_num_1 - - # builtin.combine: ([2x1xf32, 2x1xf32, 2x1xf32, 2x1xf32]) <- (2x1xf32, 2x1xf32, 2x1xf32, 2x1xf32) - combine_1 = [split_3, split_2, split_3, split_2] - del split_2, split_3 - - # pd_op.concat: (2x4xf32) <- ([2x1xf32, 2x1xf32, 2x1xf32, 2x1xf32], 1xi32) - concat_1 = paddle._C_ops.concat(combine_1, full_2) - del combine_1, full_2 - - # pd_op.full_int_array: (3xi64) <- () - full_int_array_0 = [-1, 1, 4] - - # pd_op.reshape: (2x1x4xf32) <- (2x4xf32, 3xi64) - reshape_0 = paddle._C_ops.reshape(concat_1, full_int_array_0) - del concat_1, full_int_array_0 - - # pd_op.divide: (2x8400x4xf32) <- (2x8400x4xf32, 2x1x4xf32) - divide_0 = paddle._C_ops.divide(multiply_0, reshape_0) - del multiply_0, reshape_0 - - return divide_0 diff --git a/paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_2/graph_hash.txt b/paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_2/graph_hash.txt index df39cfc7f..f667cd716 100644 --- a/paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_2/graph_hash.txt +++ b/paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_2/graph_hash.txt @@ -1 +1 @@ -ae4943d1bf44626f825f88e91e02d5245f0173aa7e1af9f7c50571f314d6f983 \ No newline at end of file +6eae9d07eec352b068f56eeceedb50c3de67909a5597f94efe81153acb758785 \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_2/input_meta.py b/paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_2/input_meta.py index 9392746ec..d779c49f5 100644 --- a/paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_2/input_meta.py +++ b/paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_2/input_meta.py @@ -1,28 +1,141 @@ class Program_weight_tensor_data_0: name = "data_0" - shape = [2, 3024, 1] + shape = [2, 2100, 1] dtype = "float32" - min_val = float("4.30403e-05") - max_val = float("0.88968") - mean = float("0.054047") - std = float("0.118534") + min_val = float("0.000301052") + max_val = float("0.902879") + mean = float("0.0454822") + std = float("0.116377") data = None class Program_weight_tensor_data_1: name = "data_1" - shape = [2, 3024] - dtype = "int32" - min_val = 0 - max_val = 1 + shape = [2, 2100, 4] + dtype = "float32" + min_val = float("-292.871") + max_val = float("632.104") + mean = float("160.31") + std = float("118.239") data = None class Program_weight_tensor_data_2: name = "data_2" - shape = [2, 3024, 1] + shape = [2100, 2] + dtype = "float32" + min_val = float("4.0") + max_val = float("316.0") + mean = float("160.0") + std = float("92.31") + data = None + + +class Program_weight_tensor_data_3: + name = "data_3" + shape = [2, 21, 1] + dtype = "int32" + data = [ + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + ] + + +class Program_weight_tensor_data_4: + name = "data_4" + shape = [2, 21, 4] dtype = "float32" - max_val = float("0.909315") - mean = float("0.0306404") - std = float("0.125461") + max_val = float("320.0") + mean = float("97.1572") + std = float("87.9478") data = None + + +class Program_weight_tensor_data_5: + name = "data_5" + shape = [2, 21, 1] + dtype = "float32" + data = [ + 1.0, + 1.0, + 1.0, + 1.0, + 1.0, + 1.0, + 1.0, + 1.0, + 1.0, + 1.0, + 1.0, + 1.0, + 1.0, + 1.0, + 1.0, + 1.0, + 1.0, + 1.0, + 1.0, + 1.0, + 1.0, + 1.0, + 1.0, + 1.0, + 1.0, + 1.0, + 1.0, + 1.0, + 1.0, + 1.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + ] diff --git a/paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_2/model.py b/paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_2/model.py index 747380d08..db89bc12d 100644 --- a/paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_2/model.py +++ b/paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_2/model.py @@ -5,106 +5,334 @@ class GraphModule(paddle.nn.Layer): def __init__(self): super().__init__() - def forward(self, data_0, data_1, data_2): - # pd_op.full: (1xi32) <- () - full_0 = paddle._C_ops.full( - [1], float("2"), paddle.int32, paddle.core.CPUPlace() - ) + def forward(self, data_0, data_1, data_2, data_3, data_4, data_5): + # pd_op.full_int_array: (1xi64) <- () + full_int_array_0 = [2] - # pd_op.one_hot: (2x-1x2xf32) <- (2x-1xi32, 1xi32) - one_hot_0 = paddle._C_ops.one_hot( - data_1 % paddle.cast(full_0, data_1.dtype), full_0 - ) - del data_1, full_0 + # pd_op.unsqueeze: (2x21x1x4xf32) <- (2x21x4xf32, 1xi64) + unsqueeze_0 = paddle._C_ops.unsqueeze(data_4, full_int_array_0) + del data_4 # pd_op.full_int_array: (1xi64) <- () - full_int_array_0 = [0] + full_int_array_1 = [1] + + # pd_op.unsqueeze: (2x1x2100x4xf32) <- (2x2100x4xf32, 1xi64) + unsqueeze_1 = paddle._C_ops.unsqueeze(data_1, full_int_array_1) + del data_1, full_int_array_1 # pd_op.full_int_array: (1xi64) <- () - full_int_array_1 = [-1] + full_int_array_2 = [0] - # pd_op.slice: (2x-1x1xf32) <- (2x-1x2xf32, 1xi64, 1xi64) + # pd_op.slice: (2x21x1x2xf32) <- (2x21x1x4xf32, 1xi64, 1xi64) slice_0 = paddle._C_ops.slice( - one_hot_0, [2], full_int_array_0, full_int_array_1, [1], [] + unsqueeze_0, [3], full_int_array_2, full_int_array_0, [1], [] ) - del full_int_array_0, full_int_array_1, one_hot_0 - # pd_op.pow: (2x-1x1xf32) <- (2x-1x1xf32) - pow_0 = paddle._C_ops.pow(data_0, float("2")) + # pd_op.full_int_array: (1xi64) <- () + full_int_array_3 = [2147483647] - # pd_op.full: (1xf32) <- () - full_1 = paddle._C_ops.full( - [1], float("0.75"), paddle.float32, paddle.core.CPUPlace() + # pd_op.slice: (2x21x1x2xf32) <- (2x21x1x4xf32, 1xi64, 1xi64) + slice_1 = paddle._C_ops.slice( + unsqueeze_0, [3], full_int_array_0, full_int_array_3, [1], [] ) - # pd_op.scale: (2x-1x1xf32) <- (2x-1x1xf32, 1xf32) - scale_0 = paddle._C_ops.scale(pow_0, full_1, float("0"), True) - del pow_0 + # pd_op.slice: (2x1x2100x2xf32) <- (2x1x2100x4xf32, 1xi64, 1xi64) + slice_2 = paddle._C_ops.slice( + unsqueeze_1, [3], full_int_array_2, full_int_array_0, [1], [] + ) + del full_int_array_2 + + # pd_op.slice: (2x1x2100x2xf32) <- (2x1x2100x4xf32, 1xi64, 1xi64) + slice_3 = paddle._C_ops.slice( + unsqueeze_1, [3], full_int_array_0, full_int_array_3, [1], [] + ) + del full_int_array_0, full_int_array_3, unsqueeze_1 + + # pd_op.maximum: (2x21x2100x2xf32) <- (2x21x1x2xf32, 2x1x2100x2xf32) + maximum_0 = paddle._C_ops.maximum(slice_0, slice_2) + + # pd_op.minimum: (2x21x2100x2xf32) <- (2x21x1x2xf32, 2x1x2100x2xf32) + minimum_0 = paddle._C_ops.minimum(slice_1, slice_3) + + # pd_op.subtract: (2x21x2100x2xf32) <- (2x21x2100x2xf32, 2x21x2100x2xf32) + subtract_0 = paddle._C_ops.subtract(minimum_0, maximum_0) + del maximum_0, minimum_0 # pd_op.full: (1xf32) <- () - full_2 = paddle._C_ops.full( - [1], float("-1"), paddle.float32, paddle.core.CPUPlace() + full_0 = paddle._C_ops.full( + [1], float("0"), paddle.float32, paddle.core.CPUPlace() ) - # pd_op.scale: (2x-1x1xf32) <- (2x-1x1xf32, 1xf32) - scale_1 = paddle._C_ops.scale(slice_0, full_2, float("1"), True) - del full_2 + # pd_op.full: (1xf32) <- () + full_1 = paddle._C_ops.full( + [1], float("3.40282e+38"), paddle.float32, paddle.core.CPUPlace() + ) - # pd_op.multiply: (2x-1x1xf32) <- (2x-1x1xf32, 2x-1x1xf32) - multiply_0 = paddle._C_ops.multiply(scale_0, scale_1) + # pd_op.clip: (2x21x2100x2xf32) <- (2x21x2100x2xf32, 1xf32, 1xf32) + clip_0 = paddle._C_ops.clip(subtract_0, full_0, full_1) + del subtract_0 - # pd_op.multiply: (2x-1x1xf32) <- (2x-1x1xf32, 2x-1x1xf32) - multiply_1 = paddle._C_ops.multiply(data_2, slice_0) - del slice_0 + # pd_op.full_int_array: (1xi64) <- () + full_int_array_4 = [-1] - # pd_op.add: (2x-1x1xf32) <- (2x-1x1xf32, 2x-1x1xf32) - add_0 = paddle._C_ops.add(multiply_0, multiply_1) + # pd_op.prod: (2x21x2100xf32) <- (2x21x2100x2xf32, 1xi64) + prod_0 = paddle._C_ops.prod(clip_0, full_int_array_4, False, False) + del clip_0 - # pd_op.bce_loss: (2x-1x1xf32) <- (2x-1x1xf32, 2x-1x1xf32) - bce_loss_0 = paddle._C_ops.bce_loss(data_0, data_2) - del data_0 + # pd_op.subtract: (2x21x1x2xf32) <- (2x21x1x2xf32, 2x21x1x2xf32) + subtract_1 = paddle._C_ops.subtract(slice_1, slice_0) + del slice_0, slice_1 - # pd_op.multiply: (2x-1x1xf32) <- (2x-1x1xf32, 2x-1x1xf32) - multiply_2 = paddle._C_ops.multiply(bce_loss_0, add_0) + # pd_op.clip: (2x21x1x2xf32) <- (2x21x1x2xf32, 1xf32, 1xf32) + clip_1 = paddle._C_ops.clip(subtract_1, full_0, full_1) + del subtract_1 - # pd_op.full_int_array: (0xi64) <- () - full_int_array_2 = [] + # pd_op.prod: (2x21x1xf32) <- (2x21x1x2xf32, 1xi64) + prod_1 = paddle._C_ops.prod(clip_1, full_int_array_4, False, False) + del clip_1 - # pd_op.sum: (xf32) <- (2x-1x1xf32, 0xi64) - sum_0 = paddle._C_ops.sum(multiply_2, full_int_array_2, None, False) + # pd_op.subtract: (2x1x2100x2xf32) <- (2x1x2100x2xf32, 2x1x2100x2xf32) + subtract_2 = paddle._C_ops.subtract(slice_3, slice_2) + del slice_2, slice_3 - # pd_op.sum: (xf32) <- (2x-1x1xf32, 0xi64) - sum_1 = paddle._C_ops.sum(data_2, full_int_array_2, None, False) - del data_2 + # pd_op.clip: (2x1x2100x2xf32) <- (2x1x2100x2xf32, 1xf32, 1xf32) + clip_2 = paddle._C_ops.clip(subtract_2, full_0, full_1) + del full_0, full_1, subtract_2 + + # pd_op.prod: (2x1x2100xf32) <- (2x1x2100x2xf32, 1xi64) + prod_2 = paddle._C_ops.prod(clip_2, full_int_array_4, False, False) + del clip_2 + + # pd_op.add: (2x21x2100xf32) <- (2x21x1xf32, 2x1x2100xf32) + add_0 = paddle._C_ops.add(prod_1, prod_2) + del prod_1, prod_2 + + # pd_op.subtract: (2x21x2100xf32) <- (2x21x2100xf32, 2x21x2100xf32) + subtract_3 = paddle._C_ops.subtract(add_0, prod_0) + del add_0 # pd_op.full: (1xf32) <- () - full_3 = paddle._C_ops.full( + full_2 = paddle._C_ops.full( [1], float("1"), paddle.float32, paddle.core.CPUPlace() ) - # pd_op.full: (1xf32) <- () + # pd_op.scale: (2x21x2100xf32) <- (2x21x2100xf32, 1xf32) + scale_0 = paddle._C_ops.scale(subtract_3, full_2, float("1e-09"), True) + del full_2, subtract_3 + + # pd_op.divide: (2x21x2100xf32) <- (2x21x2100xf32, 2x21x2100xf32) + divide_0 = paddle._C_ops.divide(prod_0, scale_0) + del prod_0, scale_0 + + # pd_op.transpose: (2x1x2100xf32) <- (2x2100x1xf32) + transpose_0 = paddle._C_ops.transpose(data_0, [0, 2, 1]) + del data_0 + + # pd_op.full: (1xf64) <- () + full_3 = paddle._C_ops.full( + [1], float("0"), paddle.float64, paddle.core.CPUPlace() + ) + + # pd_op.full: (1xf64) <- () full_4 = paddle._C_ops.full( - [1], float("3.40282e+38"), paddle.float32, paddle.core.CPUPlace() + [1], float("2"), paddle.float64, paddle.core.CPUPlace() + ) + + # pd_op.full: (1xf64) <- () + full_5 = paddle._C_ops.full( + [1], float("1"), paddle.float64, paddle.core.CPUPlace() ) - # pd_op.clip: (xf32) <- (xf32, 1xf32, 1xf32) - clip_0 = paddle._C_ops.clip(sum_1, full_3, full_4) - del full_3, full_4, sum_1 + # pd_op.arange: (2xi32) <- (1xf64, 1xf64, 1xf64) + arange_0 = paddle.arange(full_3, full_4, full_5, dtype="int32") + del full_3, full_4, full_5 + + # pd_op.unsqueeze: (2x1xi32) <- (2xi32, 1xi64) + unsqueeze_2 = paddle._C_ops.unsqueeze(arange_0, full_int_array_4) + del arange_0 + + # pd_op.full_int_array: (2xi64) <- () + full_int_array_5 = [1, 21] + + # pd_op.tile: (2x21xi32) <- (2x1xi32, 2xi64) + tile_0 = paddle._C_ops.tile(unsqueeze_2, full_int_array_5) + del full_int_array_5 + + # pd_op.squeeze: (2x21xi32) <- (2x21x1xi32, 1xi64) + squeeze_0 = paddle._C_ops.squeeze(data_3, full_int_array_4) + del data_3 + + # builtin.combine: ([2x21xi32, 2x21xi32]) <- (2x21xi32, 2x21xi32) + combine_0 = [tile_0, squeeze_0] + del squeeze_0, tile_0 + + # pd_op.stack: (2x21x2xi32) <- ([2x21xi32, 2x21xi32]) + stack_0 = paddle._C_ops.stack(combine_0, -1) + del combine_0 + + # pd_op.gather_nd: (2x21x2100xf32) <- (2x1x2100xf32, 2x21x2xi32) + gather_nd_0 = paddle._C_ops.gather_nd(transpose_0, stack_0) + del stack_0, transpose_0 + + # pd_op.pow: (2x21x2100xf32) <- (2x21x2100xf32) + pow_0 = paddle._C_ops.pow(gather_nd_0, float("1")) + del gather_nd_0 - # pd_op.divide: (xf32) <- (xf32, xf32) - divide_0 = paddle._C_ops.divide(sum_0, clip_0) - del ( - add_0, - bce_loss_0, - clip_0, - full_1, - full_int_array_2, - multiply_0, - multiply_1, - multiply_2, - scale_0, - scale_1, - sum_0, + # pd_op.pow: (2x21x2100xf32) <- (2x21x2100xf32) + pow_1 = paddle._C_ops.pow(divide_0, float("6")) + + # pd_op.multiply: (2x21x2100xf32) <- (2x21x2100xf32, 2x21x2100xf32) + multiply_0 = paddle._C_ops.multiply(pow_0, pow_1) + del pow_0, pow_1 + + # pd_op.full_int_array: (2xi64) <- () + full_int_array_6 = [0, 1] + + # pd_op.unsqueeze: (1x1x2100x2xf32) <- (2100x2xf32, 2xi64) + unsqueeze_3 = paddle._C_ops.unsqueeze(data_2, full_int_array_6) + del data_2, full_int_array_6 + + # pd_op.full: (1xi32) <- () + full_6 = paddle._C_ops.full( + [1], float("3"), paddle.int32, paddle.core.CPUPlace() + ) + + # pd_op.split_with_num: ([1x1x2100x1xf32, 1x1x2100x1xf32]) <- (1x1x2100x2xf32, 1xi32) + split_with_num_0 = paddle._C_ops.split_with_num(unsqueeze_3, 2, full_6) + del unsqueeze_3 + + # builtin.split: (1x1x2100x1xf32, 1x1x2100x1xf32) <- ([1x1x2100x1xf32, 1x1x2100x1xf32]) + ( + split_0, + split_1, + ) = split_with_num_0 + del split_with_num_0 + + # pd_op.split_with_num: ([2x21x1x1xf32, 2x21x1x1xf32, 2x21x1x1xf32, 2x21x1x1xf32]) <- (2x21x1x4xf32, 1xi32) + split_with_num_1 = paddle._C_ops.split_with_num(unsqueeze_0, 4, full_6) + del full_6, unsqueeze_0 + + # builtin.split: (2x21x1x1xf32, 2x21x1x1xf32, 2x21x1x1xf32, 2x21x1x1xf32) <- ([2x21x1x1xf32, 2x21x1x1xf32, 2x21x1x1xf32, 2x21x1x1xf32]) + ( + split_2, + split_3, + split_4, + split_5, + ) = split_with_num_1 + del split_with_num_1 + + # pd_op.subtract: (2x21x2100x1xf32) <- (1x1x2100x1xf32, 2x21x1x1xf32) + subtract_4 = paddle._C_ops.subtract(split_0, split_2) + del split_2 + + # pd_op.subtract: (2x21x2100x1xf32) <- (1x1x2100x1xf32, 2x21x1x1xf32) + subtract_5 = paddle._C_ops.subtract(split_1, split_3) + del split_3 + + # pd_op.subtract: (2x21x2100x1xf32) <- (2x21x1x1xf32, 1x1x2100x1xf32) + subtract_6 = paddle._C_ops.subtract(split_4, split_0) + del split_0, split_4 + + # pd_op.subtract: (2x21x2100x1xf32) <- (2x21x1x1xf32, 1x1x2100x1xf32) + subtract_7 = paddle._C_ops.subtract(split_5, split_1) + del split_1, split_5 + + # pd_op.full: (1xi32) <- () + full_7 = paddle._C_ops.full( + [1], float("-1"), paddle.int32, paddle.core.CPUPlace() + ) + + # builtin.combine: ([2x21x2100x1xf32, 2x21x2100x1xf32, 2x21x2100x1xf32, 2x21x2100x1xf32]) <- (2x21x2100x1xf32, 2x21x2100x1xf32, 2x21x2100x1xf32, 2x21x2100x1xf32) + combine_1 = [subtract_4, subtract_5, subtract_6, subtract_7] + del subtract_4, subtract_5, subtract_6, subtract_7 + + # pd_op.concat: (2x21x2100x4xf32) <- ([2x21x2100x1xf32, 2x21x2100x1xf32, 2x21x2100x1xf32, 2x21x2100x1xf32], 1xi32) + concat_0 = paddle._C_ops.concat(combine_1, full_7) + del combine_1, full_7 + + # pd_op.min: (2x21x2100xf32) <- (2x21x2100x4xf32, 1xi64) + min_0 = paddle._C_ops.min(concat_0, full_int_array_4, False) + del concat_0, full_int_array_4 + + # pd_op.full: (xf32) <- () + full_8 = paddle._C_ops.full( + [], + float("1e-09"), + paddle.float32, + paddle.framework._current_expected_place(), + ) + + # pd_op.greater_than: (2x21x2100xb) <- (2x21x2100xf32, xf32) + greater_than_1 = paddle._C_ops.greater_than(min_0, full_8) + del full_8, min_0 + + # pd_op.cast: (2x21x2100xf32) <- (2x21x2100xb) + cast_0 = paddle._C_ops.cast(greater_than_1, paddle.float32) + del greater_than_1 + + # pd_op.multiply: (2x21x2100xf32) <- (2x21x2100xf32, 2x21x2100xf32) + multiply_1 = paddle._C_ops.multiply(multiply_0, cast_0) + + # pd_op.full: (1xi32) <- () + full_9 = paddle._C_ops.full( + [1], float("13"), paddle.int32, paddle.core.CPUPlace() + ) + + # pd_op.topk: (2x21x13xf32, 2x21x13xi64) <- (2x21x2100xf32, 1xi32) + topk_0, topk_1 = (lambda x, f: f(x))( + paddle._C_ops.topk(multiply_1, full_9, -1, True, True), + lambda out: out if isinstance(out, (list, tuple)) else (out, None), + ) + del full_9, multiply_1 + + # pd_op.full: (1xi32) <- () + full_10 = paddle._C_ops.full( + [1], float("2100"), paddle.int32, paddle.core.CPUPlace() + ) + + # pd_op.one_hot: (2x21x13x2100xf32) <- (2x21x13xi64, 1xi32) + one_hot_0 = paddle._C_ops.one_hot( + topk_1 % paddle.cast(full_10, topk_1.dtype), full_10 + ) + del full_10, topk_1 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_7 = [-2] + + # pd_op.sum: (2x21x2100xf32) <- (2x21x13x2100xf32, 1xi64) + sum_0 = paddle._C_ops.sum(one_hot_0, full_int_array_7, None, False) + del one_hot_0 + + # pd_op.multiply: (2x21x2100xf32) <- (2x21x2100xf32, 2x21x1xf32) + multiply_2 = paddle._C_ops.multiply(sum_0, data_5) + del sum_0 + + # pd_op.multiply: (2x21x2100xf32) <- (2x21x2100xf32, 2x21x2100xf32) + multiply_3 = paddle._C_ops.multiply(multiply_2, cast_0) + del cast_0, multiply_2 + + # pd_op.multiply: (2x21x2100xf32) <- (2x21x2100xf32, 2x21x1xf32) + multiply_4 = paddle._C_ops.multiply(multiply_3, data_5) + del data_5, multiply_3 + + # pd_op.sum: (2x2100xf32) <- (2x21x2100xf32, 1xi64) + sum_1 = paddle._C_ops.sum(multiply_4, full_int_array_7, None, False) + del full_int_array_7 + + # pd_op.full_int_array: (0xi64) <- () + full_int_array_8 = [] + + # pd_op.max: (xf32) <- (2x2100xf32, 0xi64) + max_0 = paddle._C_ops.max(sum_1, full_int_array_8, False) + del full_int_array_8 + + # pd_op.full: (xf32) <- () + full_11 = paddle._C_ops.full( + [], float("1"), paddle.float32, paddle.framework._current_expected_place() ) - return divide_0 + # pd_op.greater_than: (xb) <- (xf32, xf32) + greater_than_0 = paddle._C_ops.greater_than(max_0, full_11) + del divide_0, full_11, max_0, multiply_0, multiply_4, sum_1, unsqueeze_2 + + return greater_than_0 diff --git a/paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_3/graph_hash.txt b/paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_3/graph_hash.txt index cf9cecf24..fa10de35c 100644 --- a/paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_3/graph_hash.txt +++ b/paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_3/graph_hash.txt @@ -1 +1 @@ -b570e084ecf7776a98a81e353b5f581d19f9ab243969ebfb5988dfda43a8a50f \ No newline at end of file +dd4756aee481284a2f105818ca2768c725dcd96b0e5b32c9865f9e346e77ab73 \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_3/input_meta.py b/paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_3/input_meta.py index 8b3874e61..71f1bf11a 100644 --- a/paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_3/input_meta.py +++ b/paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_3/input_meta.py @@ -1,73 +1,105 @@ class Program_weight_tensor_data_0: name = "data_0" - shape = [] - dtype = "int64" - data = [14] + shape = [2, 2100] + dtype = "float32" + max_val = float("3.0") + mean = float("0.0607143") + std = float("0.274954") + data = None class Program_weight_tensor_data_1: name = "data_1" - shape = [] - dtype = "int64" - data = [14] + shape = [2, 21, 2100] + dtype = "float32" + max_val = float("0.912588") + mean = float("0.02087") + std = float("0.0876462") + data = None class Program_weight_tensor_data_2: name = "data_2" - shape = [] - dtype = "int64" - data = [28] + shape = [2, 21, 2100] + dtype = "float32" + max_val = float("1.0") + mean = float("0.00289116") + std = float("0.0536917") + data = None class Program_weight_tensor_data_3: name = "data_3" - shape = [] - dtype = "int64" - data = [28] + shape = [2, 1] + dtype = "int32" + data = [0, 1] class Program_weight_tensor_data_4: name = "data_4" - shape = [] - dtype = "int64" - data = [56] + shape = [2, 21, 1] + dtype = "int32" + data = [ + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + ] class Program_weight_tensor_data_5: name = "data_5" - shape = [] - dtype = "int64" - data = [56] - - -class Program_weight_tensor_data_6: - name = "data_6" - shape = [2, 768, 14, 14] - dtype = "float32" - min_val = float("-0.278465") - max_val = float("6.38042") - mean = float("0.198977") - std = float("0.492361") - data = None - - -class Program_weight_tensor_data_7: - name = "data_7" - shape = [2, 384, 28, 28] + shape = [2, 21, 4] dtype = "float32" - min_val = float("-0.278465") - max_val = float("8.58115") - mean = float("0.212227") - std = float("0.589868") + max_val = float("320.0") + mean = float("97.1572") + std = float("87.9478") data = None -class Program_weight_tensor_data_8: - name = "data_8" - shape = [2, 192, 56, 56] +class Program_weight_tensor_data_6: + name = "data_6" + shape = [2, 21, 2100] dtype = "float32" - min_val = float("-0.278465") - max_val = float("8.27989") - mean = float("0.302059") - std = float("0.556663") + max_val = float("0.241469") + mean = float("0.000188227") + std = float("0.00363226") data = None diff --git a/paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_3/model.py b/paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_3/model.py index f7f899d11..23048d518 100644 --- a/paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_3/model.py +++ b/paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_3/model.py @@ -5,1140 +5,219 @@ class GraphModule(paddle.nn.Layer): def __init__(self): super().__init__() - def forward( - self, - parameter_0, - parameter_1, - parameter_2, - parameter_3, - parameter_4, - parameter_5, - parameter_6, - parameter_7, - parameter_8, - parameter_9, - parameter_10, - parameter_11, - parameter_12, - parameter_13, - parameter_14, - parameter_15, - parameter_16, - parameter_17, - parameter_18, - parameter_19, - parameter_20, - parameter_21, - parameter_22, - parameter_23, - parameter_24, - parameter_25, - parameter_26, - parameter_27, - parameter_28, - parameter_29, - parameter_30, - parameter_31, - parameter_32, - parameter_33, - parameter_34, - parameter_35, - parameter_36, - parameter_37, - parameter_38, - parameter_39, - parameter_40, - parameter_41, - parameter_42, - parameter_43, - parameter_44, - parameter_45, - parameter_46, - parameter_47, - parameter_48, - parameter_49, - parameter_50, - parameter_51, - parameter_52, - parameter_53, - data_0, - data_1, - data_2, - data_3, - data_4, - data_5, - data_6, - data_7, - data_8, - ): - # pd_op.full: (1xi64) <- () - full_0 = paddle._C_ops.full( - [1], float("0"), paddle.int64, paddle.core.CPUPlace() - ) - - # pd_op.full: (1xi64) <- () - full_1 = paddle._C_ops.full( - [1], float("1"), paddle.int64, paddle.core.CPUPlace() - ) - - # pd_op.arange: (-1xi64) <- (1xi64, xi64, 1xi64) - arange_0 = paddle.arange(full_0, data_1, full_1, dtype="int64") - del data_1 - - # pd_op.cast: (-1xf32) <- (-1xi64) - cast_0 = paddle._C_ops.cast(arange_0, paddle.float32) - del arange_0 - - # pd_op.full: (1xf32) <- () - full_2 = paddle._C_ops.full( - [1], float("1"), paddle.float32, paddle.core.CPUPlace() - ) - - # pd_op.scale: (-1xf32) <- (-1xf32, 1xf32) - scale_0 = paddle._C_ops.scale(cast_0, full_2, float("0.5"), True) - del cast_0 - - # pd_op.full: (1xf32) <- () - full_3 = paddle._C_ops.full( - [1], float("32"), paddle.float32, paddle.core.CPUPlace() - ) - - # pd_op.scale: (-1xf32) <- (-1xf32, 1xf32) - scale_1 = paddle._C_ops.scale(scale_0, full_3, float("0"), True) - del scale_0 - - # pd_op.arange: (-1xi64) <- (1xi64, xi64, 1xi64) - arange_1 = paddle.arange(full_0, data_0, full_1, dtype="int64") - del data_0 - - # pd_op.cast: (-1xf32) <- (-1xi64) - cast_1 = paddle._C_ops.cast(arange_1, paddle.float32) - del arange_1 - - # pd_op.scale: (-1xf32) <- (-1xf32, 1xf32) - scale_2 = paddle._C_ops.scale(cast_1, full_2, float("0.5"), True) - del cast_1 - - # pd_op.scale: (-1xf32) <- (-1xf32, 1xf32) - scale_3 = paddle._C_ops.scale(scale_2, full_3, float("0"), True) - del scale_2 - - # builtin.combine: ([-1xf32, -1xf32]) <- (-1xf32, -1xf32) - combine_0 = [scale_3, scale_1] - del scale_1, scale_3 - - # pd_op.meshgrid: ([-1x-1xf32, -1x-1xf32]) <- ([-1xf32, -1xf32]) - meshgrid_0 = paddle._C_ops.meshgrid(combine_0) - del combine_0 - - # builtin.split: (-1x-1xf32, -1x-1xf32) <- ([-1x-1xf32, -1x-1xf32]) - ( - split_0, - split_1, - ) = meshgrid_0 - del meshgrid_0 - - # pd_op.scale: (-1x-1xf32) <- (-1x-1xf32, 1xf32) - scale_4 = paddle._C_ops.scale(split_1, full_2, float("-80"), True) - - # pd_op.scale: (-1x-1xf32) <- (-1x-1xf32, 1xf32) - scale_5 = paddle._C_ops.scale(split_0, full_2, float("-80"), True) - - # pd_op.scale: (-1x-1xf32) <- (-1x-1xf32, 1xf32) - scale_6 = paddle._C_ops.scale(split_1, full_2, float("80"), True) - - # pd_op.scale: (-1x-1xf32) <- (-1x-1xf32, 1xf32) - scale_7 = paddle._C_ops.scale(split_0, full_2, float("80"), True) - - # builtin.combine: ([-1x-1xf32, -1x-1xf32, -1x-1xf32, -1x-1xf32]) <- (-1x-1xf32, -1x-1xf32, -1x-1xf32, -1x-1xf32) - combine_1 = [scale_4, scale_5, scale_6, scale_7] - del scale_4, scale_5, scale_6, scale_7 - - # pd_op.stack: (-1x-1x4xf32) <- ([-1x-1xf32, -1x-1xf32, -1x-1xf32, -1x-1xf32]) - stack_0 = paddle._C_ops.stack(combine_1, -1) - del combine_1 - - # builtin.combine: ([-1x-1xf32, -1x-1xf32]) <- (-1x-1xf32, -1x-1xf32) - combine_2 = [split_1, split_0] - del split_0, split_1 - - # pd_op.stack: (-1x-1x2xf32) <- ([-1x-1xf32, -1x-1xf32]) - stack_1 = paddle._C_ops.stack(combine_2, -1) - del combine_2 - - # pd_op.full_int_array: (2xi64) <- () - full_int_array_0 = [-1, 4] - - # pd_op.reshape: (-1x4xf32) <- (-1x-1x4xf32, 2xi64) - reshape_0 = paddle._C_ops.reshape(stack_0, full_int_array_0) - del stack_0 - - # pd_op.full_int_array: (2xi64) <- () - full_int_array_1 = [-1, 2] - - # pd_op.reshape: (-1x2xf32) <- (-1x-1x2xf32, 2xi64) - reshape_1 = paddle._C_ops.reshape(stack_1, full_int_array_1) - del stack_1 - - # pd_op.shape64: (2xi64) <- (-1x4xf32) - shape64_0 = paddle._C_ops.shape64(reshape_0) - + def forward(self, data_0, data_1, data_2, data_3, data_4, data_5, data_6): # pd_op.full_int_array: (1xi64) <- () - full_int_array_2 = [0] + full_int_array_0 = [1] - # pd_op.full_int_array: (1xi64) <- () - full_int_array_3 = [1] + # pd_op.unsqueeze: (2x1x2100xf32) <- (2x2100xf32, 1xi64) + unsqueeze_0 = paddle._C_ops.unsqueeze(data_0, full_int_array_0) + del data_0, full_int_array_0 - # pd_op.slice: (xi64) <- (2xi64, 1xi64, 1xi64) - slice_0 = paddle._C_ops.slice( - shape64_0, [0], full_int_array_2, full_int_array_3, [1], [0] + # pd_op.full: (xf32) <- () + full_0 = paddle._C_ops.full( + [], float("1"), paddle.float32, paddle.framework._current_expected_place() ) - del shape64_0 - # pd_op.full: (xi64) <- () - full_4 = paddle._C_ops.full( - [], float("1"), paddle.int64, paddle.core.CPUPlace() - ) + # pd_op.greater_than: (2x1x2100xb) <- (2x1x2100xf32, xf32) + greater_than_0 = paddle._C_ops.greater_than(unsqueeze_0, full_0) + del full_0, unsqueeze_0 - # builtin.combine: ([xi64, xi64]) <- (xi64, xi64) - combine_3 = [slice_0, full_4] + # pd_op.full_int_array: (3xi64) <- () + full_int_array_1 = [1, 21, 1] - # pd_op.stack: (2xi64) <- ([xi64, xi64]) - stack_2 = paddle._C_ops.stack(combine_3, 0) - del combine_3 + # pd_op.tile: (2x21x2100xb) <- (2x1x2100xb, 3xi64) + tile_0 = paddle._C_ops.tile(greater_than_0, full_int_array_1) + del full_int_array_1, greater_than_0 - # pd_op.full_with_tensor: (-1x1xf32) <- (1xf32, 2xi64) - full_with_tensor_0 = paddle._C_ops.full_with_tensor( - full_3, stack_2, paddle.float32 + # pd_op.full: (1xi64) <- () + full_1 = paddle._C_ops.full( + [1], float("-2"), paddle.int64, paddle.core.CPUPlace() ) - del full_3, stack_2 - - # pd_op.arange: (-1xi64) <- (1xi64, xi64, 1xi64) - arange_2 = paddle.arange(full_0, data_3, full_1, dtype="int64") - del data_3 - - # pd_op.cast: (-1xf32) <- (-1xi64) - cast_2 = paddle._C_ops.cast(arange_2, paddle.float32) - del arange_2 - # pd_op.scale: (-1xf32) <- (-1xf32, 1xf32) - scale_8 = paddle._C_ops.scale(cast_2, full_2, float("0.5"), True) - del cast_2 + # pd_op.argmax: (2x2100xi64) <- (2x21x2100xf32, 1xi64) + argmax_0 = paddle._C_ops.argmax(data_1, full_1, False, False, paddle.int64) - # pd_op.full: (1xf32) <- () - full_5 = paddle._C_ops.full( - [1], float("16"), paddle.float32, paddle.core.CPUPlace() + # pd_op.full: (1xi32) <- () + full_2 = paddle._C_ops.full( + [1], float("21"), paddle.int32, paddle.core.CPUPlace() ) - # pd_op.scale: (-1xf32) <- (-1xf32, 1xf32) - scale_9 = paddle._C_ops.scale(scale_8, full_5, float("0"), True) - del scale_8 - - # pd_op.arange: (-1xi64) <- (1xi64, xi64, 1xi64) - arange_3 = paddle.arange(full_0, data_2, full_1, dtype="int64") - del data_2 - - # pd_op.cast: (-1xf32) <- (-1xi64) - cast_3 = paddle._C_ops.cast(arange_3, paddle.float32) - del arange_3 - - # pd_op.scale: (-1xf32) <- (-1xf32, 1xf32) - scale_10 = paddle._C_ops.scale(cast_3, full_2, float("0.5"), True) - del cast_3 - - # pd_op.scale: (-1xf32) <- (-1xf32, 1xf32) - scale_11 = paddle._C_ops.scale(scale_10, full_5, float("0"), True) - del scale_10 - - # builtin.combine: ([-1xf32, -1xf32]) <- (-1xf32, -1xf32) - combine_4 = [scale_11, scale_9] - del scale_11, scale_9 - - # pd_op.meshgrid: ([-1x-1xf32, -1x-1xf32]) <- ([-1xf32, -1xf32]) - meshgrid_1 = paddle._C_ops.meshgrid(combine_4) - del combine_4 - - # builtin.split: (-1x-1xf32, -1x-1xf32) <- ([-1x-1xf32, -1x-1xf32]) - ( - split_2, - split_3, - ) = meshgrid_1 - del meshgrid_1 - - # pd_op.scale: (-1x-1xf32) <- (-1x-1xf32, 1xf32) - scale_12 = paddle._C_ops.scale(split_3, full_2, float("-40"), True) - - # pd_op.scale: (-1x-1xf32) <- (-1x-1xf32, 1xf32) - scale_13 = paddle._C_ops.scale(split_2, full_2, float("-40"), True) - - # pd_op.scale: (-1x-1xf32) <- (-1x-1xf32, 1xf32) - scale_14 = paddle._C_ops.scale(split_3, full_2, float("40"), True) - - # pd_op.scale: (-1x-1xf32) <- (-1x-1xf32, 1xf32) - scale_15 = paddle._C_ops.scale(split_2, full_2, float("40"), True) - - # builtin.combine: ([-1x-1xf32, -1x-1xf32, -1x-1xf32, -1x-1xf32]) <- (-1x-1xf32, -1x-1xf32, -1x-1xf32, -1x-1xf32) - combine_5 = [scale_12, scale_13, scale_14, scale_15] - del scale_12, scale_13, scale_14, scale_15 - - # pd_op.stack: (-1x-1x4xf32) <- ([-1x-1xf32, -1x-1xf32, -1x-1xf32, -1x-1xf32]) - stack_3 = paddle._C_ops.stack(combine_5, -1) - del combine_5 - - # builtin.combine: ([-1x-1xf32, -1x-1xf32]) <- (-1x-1xf32, -1x-1xf32) - combine_6 = [split_3, split_2] - del split_2, split_3 - - # pd_op.stack: (-1x-1x2xf32) <- ([-1x-1xf32, -1x-1xf32]) - stack_4 = paddle._C_ops.stack(combine_6, -1) - del combine_6 - - # pd_op.reshape: (-1x4xf32) <- (-1x-1x4xf32, 2xi64) - reshape_2 = paddle._C_ops.reshape(stack_3, full_int_array_0) - del stack_3 - - # pd_op.reshape: (-1x2xf32) <- (-1x-1x2xf32, 2xi64) - reshape_3 = paddle._C_ops.reshape(stack_4, full_int_array_1) - del stack_4 - - # pd_op.shape64: (2xi64) <- (-1x4xf32) - shape64_1 = paddle._C_ops.shape64(reshape_2) - - # pd_op.slice: (xi64) <- (2xi64, 1xi64, 1xi64) - slice_1 = paddle._C_ops.slice( - shape64_1, [0], full_int_array_2, full_int_array_3, [1], [0] + # pd_op.one_hot: (2x2100x21xf32) <- (2x2100xi64, 1xi32) + one_hot_0 = paddle._C_ops.one_hot( + argmax_0 % paddle.cast(full_2, argmax_0.dtype), full_2 ) - del shape64_1 - - # builtin.combine: ([xi64, xi64]) <- (xi64, xi64) - combine_7 = [slice_1, full_4] + del argmax_0, full_2 - # pd_op.stack: (2xi64) <- ([xi64, xi64]) - stack_5 = paddle._C_ops.stack(combine_7, 0) - del combine_7 + # pd_op.transpose: (2x21x2100xf32) <- (2x2100x21xf32) + transpose_0 = paddle._C_ops.transpose(one_hot_0, [0, 2, 1]) + del one_hot_0 - # pd_op.full_with_tensor: (-1x1xf32) <- (1xf32, 2xi64) - full_with_tensor_1 = paddle._C_ops.full_with_tensor( - full_5, stack_5, paddle.float32 - ) - del full_5, stack_5 + # pd_op.where: (2x21x2100xf32) <- (2x21x2100xb, 2x21x2100xf32, 2x21x2100xf32) + where_0 = paddle._C_ops.where(tile_0, transpose_0, data_2) + del data_2, tile_0, transpose_0 - # pd_op.arange: (-1xi64) <- (1xi64, xi64, 1xi64) - arange_4 = paddle.arange(full_0, data_5, full_1, dtype="int64") - del data_5 + # pd_op.full_int_array: (1xi64) <- () + full_int_array_2 = [-2] - # pd_op.cast: (-1xf32) <- (-1xi64) - cast_4 = paddle._C_ops.cast(arange_4, paddle.float32) - del arange_4 + # pd_op.sum: (2x2100xf32) <- (2x21x2100xf32, 1xi64) + sum_0 = paddle._C_ops.sum(where_0, full_int_array_2, None, False) - # pd_op.scale: (-1xf32) <- (-1xf32, 1xf32) - scale_16 = paddle._C_ops.scale(cast_4, full_2, float("0.5"), True) - del cast_4 + # pd_op.argmax: (2x2100xi64) <- (2x21x2100xf32, 1xi64) + argmax_1 = paddle._C_ops.argmax(where_0, full_1, False, False, paddle.int64) + del full_1 # pd_op.full: (1xf32) <- () - full_6 = paddle._C_ops.full( - [1], float("8"), paddle.float32, paddle.core.CPUPlace() + full_3 = paddle._C_ops.full( + [1], float("21"), paddle.float32, paddle.core.CPUPlace() ) - # pd_op.scale: (-1xf32) <- (-1xf32, 1xf32) - scale_17 = paddle._C_ops.scale(scale_16, full_6, float("0"), True) - del scale_16 - - # pd_op.arange: (-1xi64) <- (1xi64, xi64, 1xi64) - arange_5 = paddle.arange(full_0, data_4, full_1, dtype="int64") - del data_4, full_0, full_1 - - # pd_op.cast: (-1xf32) <- (-1xi64) - cast_5 = paddle._C_ops.cast(arange_5, paddle.float32) - del arange_5 - - # pd_op.scale: (-1xf32) <- (-1xf32, 1xf32) - scale_18 = paddle._C_ops.scale(cast_5, full_2, float("0.5"), True) - del cast_5 + # pd_op.scale: (2x1xi32) <- (2x1xi32, 1xf32) + scale_0 = paddle._C_ops.scale(data_3, full_3, float("0"), True) + del data_3, full_3 - # pd_op.scale: (-1xf32) <- (-1xf32, 1xf32) - scale_19 = paddle._C_ops.scale(scale_18, full_6, float("0"), True) - del scale_18 - - # builtin.combine: ([-1xf32, -1xf32]) <- (-1xf32, -1xf32) - combine_8 = [scale_19, scale_17] - del scale_17, scale_19 - - # pd_op.meshgrid: ([-1x-1xf32, -1x-1xf32]) <- ([-1xf32, -1xf32]) - meshgrid_2 = paddle._C_ops.meshgrid(combine_8) - del combine_8 - - # builtin.split: (-1x-1xf32, -1x-1xf32) <- ([-1x-1xf32, -1x-1xf32]) - ( - split_4, - split_5, - ) = meshgrid_2 - del meshgrid_2 - - # pd_op.scale: (-1x-1xf32) <- (-1x-1xf32, 1xf32) - scale_20 = paddle._C_ops.scale(split_5, full_2, float("-20"), True) - - # pd_op.scale: (-1x-1xf32) <- (-1x-1xf32, 1xf32) - scale_21 = paddle._C_ops.scale(split_4, full_2, float("-20"), True) - - # pd_op.scale: (-1x-1xf32) <- (-1x-1xf32, 1xf32) - scale_22 = paddle._C_ops.scale(split_5, full_2, float("20"), True) - - # pd_op.scale: (-1x-1xf32) <- (-1x-1xf32, 1xf32) - scale_23 = paddle._C_ops.scale(split_4, full_2, float("20"), True) - del full_2 - - # builtin.combine: ([-1x-1xf32, -1x-1xf32, -1x-1xf32, -1x-1xf32]) <- (-1x-1xf32, -1x-1xf32, -1x-1xf32, -1x-1xf32) - combine_9 = [scale_20, scale_21, scale_22, scale_23] - del scale_20, scale_21, scale_22, scale_23 - - # pd_op.stack: (-1x-1x4xf32) <- ([-1x-1xf32, -1x-1xf32, -1x-1xf32, -1x-1xf32]) - stack_6 = paddle._C_ops.stack(combine_9, -1) - del combine_9 - - # builtin.combine: ([-1x-1xf32, -1x-1xf32]) <- (-1x-1xf32, -1x-1xf32) - combine_10 = [split_5, split_4] - del split_4, split_5 - - # pd_op.stack: (-1x-1x2xf32) <- ([-1x-1xf32, -1x-1xf32]) - stack_7 = paddle._C_ops.stack(combine_10, -1) - del combine_10 - - # pd_op.reshape: (-1x4xf32) <- (-1x-1x4xf32, 2xi64) - reshape_4 = paddle._C_ops.reshape(stack_6, full_int_array_0) - del full_int_array_0, stack_6 - - # pd_op.reshape: (-1x2xf32) <- (-1x-1x2xf32, 2xi64) - reshape_5 = paddle._C_ops.reshape(stack_7, full_int_array_1) - del full_int_array_1, stack_7 - - # pd_op.shape64: (2xi64) <- (-1x4xf32) - shape64_2 = paddle._C_ops.shape64(reshape_4) - - # pd_op.slice: (xi64) <- (2xi64, 1xi64, 1xi64) - slice_2 = paddle._C_ops.slice( - shape64_2, [0], full_int_array_2, full_int_array_3, [1], [0] - ) - del full_int_array_2, full_int_array_3, shape64_2 + # pd_op.cast: (2x1xi64) <- (2x1xi32) + cast_0 = paddle._C_ops.cast(scale_0, paddle.int64) + del scale_0 - # builtin.combine: ([xi64, xi64]) <- (xi64, xi64) - combine_11 = [slice_2, full_4] - del full_4 + # pd_op.add: (2x2100xi64) <- (2x2100xi64, 2x1xi64) + add_0 = paddle._C_ops.add(argmax_1, cast_0) + del argmax_1, cast_0 - # pd_op.stack: (2xi64) <- ([xi64, xi64]) - stack_8 = paddle._C_ops.stack(combine_11, 0) - del combine_11 + # pd_op.flatten: (42xi32) <- (2x21x1xi32) + flatten_0 = paddle._C_ops.flatten(data_4, 0, 2) + del data_4 - # pd_op.full_with_tensor: (-1x1xf32) <- (1xf32, 2xi64) - full_with_tensor_2 = paddle._C_ops.full_with_tensor( - full_6, stack_8, paddle.float32 - ) - del full_6, stack_8 + # pd_op.flatten: (4200xi64) <- (2x2100xi64) + flatten_1 = paddle._C_ops.flatten(add_0, 0, 1) + del add_0 # pd_op.full: (1xi32) <- () - full_7 = paddle._C_ops.full( + full_4 = paddle._C_ops.full( [1], float("0"), paddle.int32, paddle.core.CPUPlace() ) - # builtin.combine: ([-1x4xf32, -1x4xf32, -1x4xf32]) <- (-1x4xf32, -1x4xf32, -1x4xf32) - combine_12 = [reshape_0, reshape_2, reshape_4] - del reshape_0, reshape_2, reshape_4 - - # pd_op.concat: (-1x4xf32) <- ([-1x4xf32, -1x4xf32, -1x4xf32], 1xi32) - concat_2 = paddle._C_ops.concat(combine_12, full_7) - del combine_12 - - # builtin.combine: ([-1x2xf32, -1x2xf32, -1x2xf32]) <- (-1x2xf32, -1x2xf32, -1x2xf32) - combine_13 = [reshape_1, reshape_3, reshape_5] - del reshape_1, reshape_3, reshape_5 - - # pd_op.concat: (-1x2xf32) <- ([-1x2xf32, -1x2xf32, -1x2xf32], 1xi32) - concat_3 = paddle._C_ops.concat(combine_13, full_7) - del combine_13 - - # builtin.combine: ([-1x1xf32, -1x1xf32, -1x1xf32]) <- (-1x1xf32, -1x1xf32, -1x1xf32) - combine_14 = [full_with_tensor_0, full_with_tensor_1, full_with_tensor_2] - del full_with_tensor_0, full_with_tensor_1, full_with_tensor_2 - - # pd_op.concat: (-1x1xf32) <- ([-1x1xf32, -1x1xf32, -1x1xf32], 1xi32) - concat_4 = paddle._C_ops.concat(combine_14, full_7) - del combine_14, full_7 - - # pd_op.full_int_array: (2xi64) <- () - full_int_array_4 = [1, 1] - - # pd_op.assign: (2xi64) <- (2xi64) - assign_0 = full_int_array_4 - - # pd_op.assign: (2xi64) <- (2xi64) - assign_1 = full_int_array_4 - - # pd_op.pool2d: (2x768x1x1xf32) <- (2x768x-1x-1xf32, 2xi64) - pool2d_0 = paddle._C_ops.pool2d( - data_6, - full_int_array_4, - [1, 1], - [0, 0], - False, - True, - "NCHW", - "avg", - False, - True, - "EXPLICIT", - ) - - # pd_op.conv2d: (2x768x1x1xf32) <- (2x768x1x1xf32, 768x768x1x1xf32) - conv2d_0 = paddle._C_ops.conv2d( - pool2d_0, parameter_53, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_53 - - # pd_op.full_int_array: (4xi64) <- () - full_int_array_5 = [1, -1, 1, 1] - - # pd_op.reshape: (1x768x1x1xf32) <- (768xf32, 4xi64) - reshape_6 = paddle._C_ops.reshape(parameter_52, full_int_array_5) - del parameter_52 - - # pd_op.add: (2x768x1x1xf32) <- (2x768x1x1xf32, 1x768x1x1xf32) - add_0 = paddle._C_ops.add(conv2d_0, reshape_6) - - # pd_op.sigmoid: (2x768x1x1xf32) <- (2x768x1x1xf32) - sigmoid_0 = paddle._C_ops.sigmoid(add_0) - del add_0 - - # pd_op.multiply: (2x768x-1x-1xf32) <- (2x768x-1x-1xf32, 2x768x1x1xf32) - multiply_0 = paddle._C_ops.multiply(data_6, sigmoid_0) - - # pd_op.conv2d: (2x768x-1x-1xf32) <- (2x768x-1x-1xf32, 768x768x1x1xf32) - conv2d_1 = paddle._C_ops.conv2d( - multiply_0, parameter_51, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_51 - - # pd_op.batch_norm_: (2x768x-1x-1xf32, 768xf32, 768xf32, 768xf32, 768xf32, -1xui8) <- (2x768x-1x-1xf32, 768xf32, 768xf32, 768xf32, 768xf32) - ( - batch_norm__0, - batch_norm__1, - batch_norm__2, - batch_norm__3, - batch_norm__4, - batch_norm__5, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_1, - parameter_50, - parameter_49, - parameter_48, - parameter_47, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_47, parameter_48, parameter_49, parameter_50 - - # pd_op.swish: (2x768x-1x-1xf32) <- (2x768x-1x-1xf32) - swish_0 = paddle._C_ops.swish(batch_norm__0) - - # pd_op.add: (2x768x-1x-1xf32) <- (2x768x-1x-1xf32, 2x768x-1x-1xf32) - add_1 = paddle._C_ops.add(swish_0, data_6) - - # pd_op.conv2d: (2x1x-1x-1xf32) <- (2x768x-1x-1xf32, 1x768x3x3xf32) - conv2d_2 = paddle._C_ops.conv2d( - add_1, parameter_46, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_46 - - # pd_op.reshape: (1x1x1x1xf32) <- (1xf32, 4xi64) - reshape_7 = paddle._C_ops.reshape(parameter_45, full_int_array_5) - del parameter_45 - - # pd_op.add: (2x1x-1x-1xf32) <- (2x1x-1x-1xf32, 1x1x1x1xf32) - add_2 = paddle._C_ops.add(conv2d_2, reshape_7) - - # pd_op.conv2d: (2x768x1x1xf32) <- (2x768x1x1xf32, 768x768x1x1xf32) - conv2d_3 = paddle._C_ops.conv2d( - pool2d_0, parameter_44, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_44 - - # pd_op.reshape: (1x768x1x1xf32) <- (768xf32, 4xi64) - reshape_8 = paddle._C_ops.reshape(parameter_43, full_int_array_5) - del parameter_43 - - # pd_op.add: (2x768x1x1xf32) <- (2x768x1x1xf32, 1x768x1x1xf32) - add_3 = paddle._C_ops.add(conv2d_3, reshape_8) - - # pd_op.sigmoid: (2x768x1x1xf32) <- (2x768x1x1xf32) - sigmoid_1 = paddle._C_ops.sigmoid(add_3) - del add_3 - - # pd_op.multiply: (2x768x-1x-1xf32) <- (2x768x-1x-1xf32, 2x768x1x1xf32) - multiply_1 = paddle._C_ops.multiply(data_6, sigmoid_1) - del data_6 - - # pd_op.conv2d: (2x768x-1x-1xf32) <- (2x768x-1x-1xf32, 768x768x1x1xf32) - conv2d_4 = paddle._C_ops.conv2d( - multiply_1, parameter_42, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_42 - - # pd_op.batch_norm_: (2x768x-1x-1xf32, 768xf32, 768xf32, 768xf32, 768xf32, -1xui8) <- (2x768x-1x-1xf32, 768xf32, 768xf32, 768xf32, 768xf32) - ( - batch_norm__6, - batch_norm__7, - batch_norm__8, - batch_norm__9, - batch_norm__10, - batch_norm__11, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_4, - parameter_41, - parameter_40, - parameter_39, - parameter_38, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_38, parameter_39, parameter_40, parameter_41 - - # pd_op.swish: (2x768x-1x-1xf32) <- (2x768x-1x-1xf32) - swish_1 = paddle._C_ops.swish(batch_norm__6) - - # pd_op.conv2d: (2x68x-1x-1xf32) <- (2x768x-1x-1xf32, 68x768x3x3xf32) - conv2d_5 = paddle._C_ops.conv2d( - swish_1, parameter_37, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_37 - - # pd_op.reshape: (1x68x1x1xf32) <- (68xf32, 4xi64) - reshape_9 = paddle._C_ops.reshape(parameter_36, full_int_array_5) - del parameter_36 - - # pd_op.add: (2x68x-1x-1xf32) <- (2x68x-1x-1xf32, 1x68x1x1xf32) - add_4 = paddle._C_ops.add(conv2d_5, reshape_9) - - # pd_op.sigmoid: (2x1x-1x-1xf32) <- (2x1x-1x-1xf32) - sigmoid_2 = paddle._C_ops.sigmoid(add_2) - del add_2 - - # pd_op.flatten: (2x1x-1xf32) <- (2x1x-1x-1xf32) - flatten_0 = paddle._C_ops.flatten(sigmoid_2, 2, 3) - - # pd_op.transpose: (2x-1x1xf32) <- (2x1x-1xf32) - transpose_0 = paddle._C_ops.transpose(flatten_0, [0, 2, 1]) + # pd_op.gather: (4200xi32) <- (42xi32, 4200xi64, 1xi32) + gather_0 = paddle._C_ops.gather(flatten_0, flatten_1, full_4) del flatten_0 - # pd_op.flatten: (2x68x-1xf32) <- (2x68x-1x-1xf32) - flatten_1 = paddle._C_ops.flatten(add_4, 2, 3) - - # pd_op.transpose: (2x-1x68xf32) <- (2x68x-1xf32) - transpose_1 = paddle._C_ops.transpose(flatten_1, [0, 2, 1]) - del flatten_1 - - # pd_op.pool2d: (2x384x1x1xf32) <- (2x384x-1x-1xf32, 2xi64) - pool2d_1 = paddle._C_ops.pool2d( - data_7, - full_int_array_4, - [1, 1], - [0, 0], - False, - True, - "NCHW", - "avg", - False, - True, - "EXPLICIT", - ) - - # pd_op.conv2d: (2x384x1x1xf32) <- (2x384x1x1xf32, 384x384x1x1xf32) - conv2d_6 = paddle._C_ops.conv2d( - pool2d_1, parameter_35, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_35 - - # pd_op.reshape: (1x384x1x1xf32) <- (384xf32, 4xi64) - reshape_10 = paddle._C_ops.reshape(parameter_34, full_int_array_5) - del parameter_34 - - # pd_op.add: (2x384x1x1xf32) <- (2x384x1x1xf32, 1x384x1x1xf32) - add_5 = paddle._C_ops.add(conv2d_6, reshape_10) - - # pd_op.sigmoid: (2x384x1x1xf32) <- (2x384x1x1xf32) - sigmoid_3 = paddle._C_ops.sigmoid(add_5) - del add_5 - - # pd_op.multiply: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32, 2x384x1x1xf32) - multiply_2 = paddle._C_ops.multiply(data_7, sigmoid_3) - - # pd_op.conv2d: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32, 384x384x1x1xf32) - conv2d_7 = paddle._C_ops.conv2d( - multiply_2, parameter_33, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_33 - - # pd_op.batch_norm_: (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) - ( - batch_norm__12, - batch_norm__13, - batch_norm__14, - batch_norm__15, - batch_norm__16, - batch_norm__17, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_7, - parameter_32, - parameter_31, - parameter_30, - parameter_29, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_29, parameter_30, parameter_31, parameter_32 - - # pd_op.swish: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32) - swish_2 = paddle._C_ops.swish(batch_norm__12) - - # pd_op.add: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32, 2x384x-1x-1xf32) - add_6 = paddle._C_ops.add(swish_2, data_7) - - # pd_op.conv2d: (2x1x-1x-1xf32) <- (2x384x-1x-1xf32, 1x384x3x3xf32) - conv2d_8 = paddle._C_ops.conv2d( - add_6, parameter_28, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_28 - - # pd_op.reshape: (1x1x1x1xf32) <- (1xf32, 4xi64) - reshape_11 = paddle._C_ops.reshape(parameter_27, full_int_array_5) - del parameter_27 - - # pd_op.add: (2x1x-1x-1xf32) <- (2x1x-1x-1xf32, 1x1x1x1xf32) - add_7 = paddle._C_ops.add(conv2d_8, reshape_11) - - # pd_op.conv2d: (2x384x1x1xf32) <- (2x384x1x1xf32, 384x384x1x1xf32) - conv2d_9 = paddle._C_ops.conv2d( - pool2d_1, parameter_26, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_26 - - # pd_op.reshape: (1x384x1x1xf32) <- (384xf32, 4xi64) - reshape_12 = paddle._C_ops.reshape(parameter_25, full_int_array_5) - del parameter_25 - - # pd_op.add: (2x384x1x1xf32) <- (2x384x1x1xf32, 1x384x1x1xf32) - add_8 = paddle._C_ops.add(conv2d_9, reshape_12) - - # pd_op.sigmoid: (2x384x1x1xf32) <- (2x384x1x1xf32) - sigmoid_4 = paddle._C_ops.sigmoid(add_8) - del add_8 - - # pd_op.multiply: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32, 2x384x1x1xf32) - multiply_3 = paddle._C_ops.multiply(data_7, sigmoid_4) - del data_7 - - # pd_op.conv2d: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32, 384x384x1x1xf32) - conv2d_10 = paddle._C_ops.conv2d( - multiply_3, parameter_24, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_24 - - # pd_op.batch_norm_: (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) - ( - batch_norm__18, - batch_norm__19, - batch_norm__20, - batch_norm__21, - batch_norm__22, - batch_norm__23, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_10, - parameter_23, - parameter_22, - parameter_21, - parameter_20, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_20, parameter_21, parameter_22, parameter_23 + # pd_op.full_int_array: (2xi64) <- () + full_int_array_3 = [2, 2100] - # pd_op.swish: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32) - swish_3 = paddle._C_ops.swish(batch_norm__18) + # pd_op.reshape: (2x2100xi32) <- (4200xi32, 2xi64) + reshape_1 = paddle._C_ops.reshape(gather_0, full_int_array_3) + del full_int_array_3, gather_0 - # pd_op.conv2d: (2x68x-1x-1xf32) <- (2x384x-1x-1xf32, 68x384x3x3xf32) - conv2d_11 = paddle._C_ops.conv2d( - swish_3, parameter_19, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + # pd_op.full: (xf32) <- () + full_5 = paddle._C_ops.full( + [], float("0"), paddle.float32, paddle.framework._current_expected_place() ) - del parameter_19 - - # pd_op.reshape: (1x68x1x1xf32) <- (68xf32, 4xi64) - reshape_13 = paddle._C_ops.reshape(parameter_18, full_int_array_5) - del parameter_18 - - # pd_op.add: (2x68x-1x-1xf32) <- (2x68x-1x-1xf32, 1x68x1x1xf32) - add_9 = paddle._C_ops.add(conv2d_11, reshape_13) - - # pd_op.sigmoid: (2x1x-1x-1xf32) <- (2x1x-1x-1xf32) - sigmoid_5 = paddle._C_ops.sigmoid(add_7) - del add_7 - # pd_op.flatten: (2x1x-1xf32) <- (2x1x-1x-1xf32) - flatten_2 = paddle._C_ops.flatten(sigmoid_5, 2, 3) + # pd_op.greater_than: (2x2100xb) <- (2x2100xf32, xf32) + greater_than_1 = paddle._C_ops.greater_than(sum_0, full_5) + del full_5, sum_0 - # pd_op.transpose: (2x-1x1xf32) <- (2x1x-1xf32) - transpose_2 = paddle._C_ops.transpose(flatten_2, [0, 2, 1]) - del flatten_2 - - # pd_op.flatten: (2x68x-1xf32) <- (2x68x-1x-1xf32) - flatten_3 = paddle._C_ops.flatten(add_9, 2, 3) - - # pd_op.transpose: (2x-1x68xf32) <- (2x68x-1xf32) - transpose_3 = paddle._C_ops.transpose(flatten_3, [0, 2, 1]) - del flatten_3 - - # pd_op.pool2d: (2x192x1x1xf32) <- (2x192x-1x-1xf32, 2xi64) - pool2d_2 = paddle._C_ops.pool2d( - data_8, - full_int_array_4, - [1, 1], - [0, 0], - False, - True, - "NCHW", - "avg", - False, - True, - "EXPLICIT", - ) - - # pd_op.conv2d: (2x192x1x1xf32) <- (2x192x1x1xf32, 192x192x1x1xf32) - conv2d_12 = paddle._C_ops.conv2d( - pool2d_2, parameter_17, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + # pd_op.full: (1xf32) <- () + full_6 = paddle._C_ops.full( + [1], float("1"), paddle.float32, paddle.core.CPUPlace() ) - del parameter_17 - - # pd_op.reshape: (1x192x1x1xf32) <- (192xf32, 4xi64) - reshape_14 = paddle._C_ops.reshape(parameter_16, full_int_array_5) - del parameter_16 - - # pd_op.add: (2x192x1x1xf32) <- (2x192x1x1xf32, 1x192x1x1xf32) - add_10 = paddle._C_ops.add(conv2d_12, reshape_14) - # pd_op.sigmoid: (2x192x1x1xf32) <- (2x192x1x1xf32) - sigmoid_6 = paddle._C_ops.sigmoid(add_10) - del add_10 - - # pd_op.multiply: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 2x192x1x1xf32) - multiply_4 = paddle._C_ops.multiply(data_8, sigmoid_6) - - # pd_op.conv2d: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 192x192x1x1xf32) - conv2d_13 = paddle._C_ops.conv2d( - multiply_4, parameter_15, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + # pd_op.full_like: (2x2100xi32) <- (2x2100xi32, 1xf32) + full_like_0 = paddle._C_ops.full_like( + reshape_1, full_6, paddle.int32, paddle.framework._current_expected_place() ) - del parameter_15 - # pd_op.batch_norm_: (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) - ( - batch_norm__24, - batch_norm__25, - batch_norm__26, - batch_norm__27, - batch_norm__28, - batch_norm__29, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_13, - parameter_14, - parameter_13, - parameter_12, - parameter_11, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_11, parameter_12, parameter_13, parameter_14 + # pd_op.where: (2x2100xi32) <- (2x2100xb, 2x2100xi32, 2x2100xi32) + where_1 = paddle._C_ops.where(greater_than_1, reshape_1, full_like_0) + del full_like_0, greater_than_1, reshape_1 - # pd_op.swish: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32) - swish_4 = paddle._C_ops.swish(batch_norm__24) + # pd_op.full_int_array: (2xi64) <- () + full_int_array_4 = [-1, 4] - # pd_op.add: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 2x192x-1x-1xf32) - add_11 = paddle._C_ops.add(swish_4, data_8) + # pd_op.reshape: (42x4xf32) <- (2x21x4xf32, 2xi64) + reshape_2 = paddle._C_ops.reshape(data_5, full_int_array_4) + del data_5, full_int_array_4 - # pd_op.conv2d: (2x1x-1x-1xf32) <- (2x192x-1x-1xf32, 1x192x3x3xf32) - conv2d_14 = paddle._C_ops.conv2d( - add_11, parameter_10, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_10 + # pd_op.gather: (4200x4xf32) <- (42x4xf32, 4200xi64, 1xi32) + gather_1 = paddle._C_ops.gather(reshape_2, flatten_1, full_4) + del flatten_1, full_4, reshape_2 - # pd_op.reshape: (1x1x1x1xf32) <- (1xf32, 4xi64) - reshape_15 = paddle._C_ops.reshape(parameter_9, full_int_array_5) - del parameter_9 + # pd_op.full_int_array: (3xi64) <- () + full_int_array_5 = [2, 2100, 4] - # pd_op.add: (2x1x-1x-1xf32) <- (2x1x-1x-1xf32, 1x1x1x1xf32) - add_12 = paddle._C_ops.add(conv2d_14, reshape_15) + # pd_op.reshape: (2x2100x4xf32) <- (4200x4xf32, 3xi64) + reshape_0 = paddle._C_ops.reshape(gather_1, full_int_array_5) + del full_int_array_5, gather_1 - # pd_op.conv2d: (2x192x1x1xf32) <- (2x192x1x1xf32, 192x192x1x1xf32) - conv2d_15 = paddle._C_ops.conv2d( - pool2d_2, parameter_8, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + # pd_op.full: (1xi32) <- () + full_7 = paddle._C_ops.full( + [1], float("2"), paddle.int32, paddle.core.CPUPlace() ) - del parameter_8 - - # pd_op.reshape: (1x192x1x1xf32) <- (192xf32, 4xi64) - reshape_16 = paddle._C_ops.reshape(parameter_7, full_int_array_5) - del parameter_7 - - # pd_op.add: (2x192x1x1xf32) <- (2x192x1x1xf32, 1x192x1x1xf32) - add_13 = paddle._C_ops.add(conv2d_15, reshape_16) - - # pd_op.sigmoid: (2x192x1x1xf32) <- (2x192x1x1xf32) - sigmoid_7 = paddle._C_ops.sigmoid(add_13) - del add_13 - # pd_op.multiply: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 2x192x1x1xf32) - multiply_5 = paddle._C_ops.multiply(data_8, sigmoid_7) - del data_8 - - # pd_op.conv2d: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 192x192x1x1xf32) - conv2d_16 = paddle._C_ops.conv2d( - multiply_5, parameter_6, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + # pd_op.one_hot: (2x2100x2xf32) <- (2x2100xi32, 1xi32) + one_hot_1 = paddle._C_ops.one_hot( + where_1 % paddle.cast(full_7, where_1.dtype), full_7 ) - del parameter_6 + del full_7 - # pd_op.batch_norm_: (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) - ( - batch_norm__30, - batch_norm__31, - batch_norm__32, - batch_norm__33, - batch_norm__34, - batch_norm__35, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_16, - parameter_5, - parameter_4, - parameter_3, - parameter_2, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), + # pd_op.full: (1xi64) <- () + full_8 = paddle._C_ops.full( + [1], float("0"), paddle.int64, paddle.framework._current_expected_place() ) - del parameter_2, parameter_3, parameter_4, parameter_5 - - # pd_op.swish: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32) - swish_5 = paddle._C_ops.swish(batch_norm__30) - # pd_op.conv2d: (2x68x-1x-1xf32) <- (2x192x-1x-1xf32, 68x192x3x3xf32) - conv2d_17 = paddle._C_ops.conv2d( - swish_5, parameter_1, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + # pd_op.assign_value_: (1xi64) <- (1xi64) + assign_value__0 = paddle._C_ops.assign_value_( + full_8, + [1], + paddle.int64, + [float("0")], + paddle.framework._current_expected_place(), ) - del parameter_1 - - # pd_op.reshape: (1x68x1x1xf32) <- (68xf32, 4xi64) - reshape_17 = paddle._C_ops.reshape(parameter_0, full_int_array_5) - del full_int_array_5, parameter_0 + del full_8 - # pd_op.add: (2x68x-1x-1xf32) <- (2x68x-1x-1xf32, 1x68x1x1xf32) - add_14 = paddle._C_ops.add(conv2d_17, reshape_17) + # pd_op.index_select: (2x2100x1xf32) <- (2x2100x2xf32, 1xi64) + index_select_0 = paddle._C_ops.index_select(one_hot_1, assign_value__0, -1) + del assign_value__0, one_hot_1 - # pd_op.sigmoid: (2x1x-1x-1xf32) <- (2x1x-1x-1xf32) - sigmoid_8 = paddle._C_ops.sigmoid(add_12) - del add_12 + # pd_op.multiply: (2x21x2100xf32) <- (2x21x2100xf32, 2x21x2100xf32) + multiply_1 = paddle._C_ops.multiply(data_6, where_0) + del data_6 - # pd_op.flatten: (2x1x-1xf32) <- (2x1x-1x-1xf32) - flatten_4 = paddle._C_ops.flatten(sigmoid_8, 2, 3) + # pd_op.full_int_array: (1xi64) <- () + full_int_array_6 = [-1] - # pd_op.transpose: (2x-1x1xf32) <- (2x1x-1xf32) - transpose_4 = paddle._C_ops.transpose(flatten_4, [0, 2, 1]) - del flatten_4 + # pd_op.max: (2x21x1xf32) <- (2x21x2100xf32, 1xi64) + max_0 = paddle._C_ops.max(multiply_1, full_int_array_6, True) - # pd_op.flatten: (2x68x-1xf32) <- (2x68x-1x-1xf32) - flatten_5 = paddle._C_ops.flatten(add_14, 2, 3) + # pd_op.multiply: (2x21x2100xf32) <- (2x21x2100xf32, 2x21x2100xf32) + multiply_2 = paddle._C_ops.multiply(data_1, where_0) + del data_1, where_0 - # pd_op.transpose: (2x-1x68xf32) <- (2x68x-1xf32) - transpose_5 = paddle._C_ops.transpose(flatten_5, [0, 2, 1]) - del flatten_5 + # pd_op.max: (2x21x1xf32) <- (2x21x2100xf32, 1xi64) + max_1 = paddle._C_ops.max(multiply_2, full_int_array_6, True) + del multiply_2 - # pd_op.full: (1xi32) <- () - full_8 = paddle._C_ops.full( - [1], float("1"), paddle.int32, paddle.core.CPUPlace() - ) + # pd_op.scale: (2x21x1xf32) <- (2x21x1xf32, 1xf32) + scale_1 = paddle._C_ops.scale(max_0, full_6, float("1e-09"), True) + del full_6, max_0 - # pd_op.assign: (1xi32) <- (1xi32) - assign_2 = full_8 + # pd_op.divide: (2x21x2100xf32) <- (2x21x2100xf32, 2x21x1xf32) + divide_0 = paddle._C_ops.divide(multiply_1, scale_1) + del multiply_1, scale_1 - # builtin.combine: ([2x-1x1xf32, 2x-1x1xf32, 2x-1x1xf32]) <- (2x-1x1xf32, 2x-1x1xf32, 2x-1x1xf32) - combine_15 = [transpose_0, transpose_2, transpose_4] + # pd_op.multiply: (2x21x2100xf32) <- (2x21x2100xf32, 2x21x1xf32) + multiply_3 = paddle._C_ops.multiply(divide_0, max_1) + del divide_0, max_1 - # pd_op.concat: (2x-1x1xf32) <- ([2x-1x1xf32, 2x-1x1xf32, 2x-1x1xf32], 1xi32) - concat_0 = paddle._C_ops.concat(combine_15, full_8) - del combine_15 + # pd_op.max: (2x2100xf32) <- (2x21x2100xf32, 1xi64) + max_2 = paddle._C_ops.max(multiply_3, full_int_array_2, False) + del full_int_array_2, multiply_3 - # builtin.combine: ([2x-1x68xf32, 2x-1x68xf32, 2x-1x68xf32]) <- (2x-1x68xf32, 2x-1x68xf32, 2x-1x68xf32) - combine_16 = [transpose_1, transpose_3, transpose_5] + # pd_op.unsqueeze: (2x2100x1xf32) <- (2x2100xf32, 1xi64) + unsqueeze_1 = paddle._C_ops.unsqueeze(max_2, full_int_array_6) + del full_int_array_6, max_2 - # pd_op.concat: (2x-1x68xf32) <- ([2x-1x68xf32, 2x-1x68xf32, 2x-1x68xf32], 1xi32) - concat_1 = paddle._C_ops.concat(combine_16, full_8) - del ( - add_1, - add_11, - add_14, - add_4, - add_6, - add_9, - assign_0, - assign_1, - assign_2, - batch_norm__0, - batch_norm__1, - batch_norm__10, - batch_norm__11, - batch_norm__12, - batch_norm__13, - batch_norm__14, - batch_norm__15, - batch_norm__16, - batch_norm__17, - batch_norm__18, - batch_norm__19, - batch_norm__2, - batch_norm__20, - batch_norm__21, - batch_norm__22, - batch_norm__23, - batch_norm__24, - batch_norm__25, - batch_norm__26, - batch_norm__27, - batch_norm__28, - batch_norm__29, - batch_norm__3, - batch_norm__30, - batch_norm__31, - batch_norm__32, - batch_norm__33, - batch_norm__34, - batch_norm__35, - batch_norm__4, - batch_norm__5, - batch_norm__6, - batch_norm__7, - batch_norm__8, - batch_norm__9, - combine_16, - conv2d_0, - conv2d_1, - conv2d_10, - conv2d_11, - conv2d_12, - conv2d_13, - conv2d_14, - conv2d_15, - conv2d_16, - conv2d_17, - conv2d_2, - conv2d_3, - conv2d_4, - conv2d_5, - conv2d_6, - conv2d_7, - conv2d_8, - conv2d_9, - full_8, - full_int_array_4, - multiply_0, - multiply_1, - multiply_2, - multiply_3, - multiply_4, - multiply_5, - pool2d_0, - pool2d_1, - pool2d_2, - reshape_10, - reshape_11, - reshape_12, - reshape_13, - reshape_14, - reshape_15, - reshape_16, - reshape_17, - reshape_6, - reshape_7, - reshape_8, - reshape_9, - sigmoid_0, - sigmoid_1, - sigmoid_2, - sigmoid_3, - sigmoid_4, - sigmoid_5, - sigmoid_6, - sigmoid_7, - sigmoid_8, - slice_0, - slice_1, - slice_2, - swish_0, - swish_1, - swish_2, - swish_3, - swish_4, - swish_5, - transpose_0, - transpose_1, - transpose_2, - transpose_3, - transpose_4, - transpose_5, - ) + # pd_op.multiply: (2x2100x1xf32) <- (2x2100x1xf32, 2x2100x1xf32) + multiply_0 = paddle._C_ops.multiply(index_select_0, unsqueeze_1) + del index_select_0, unsqueeze_1, where_1 - return concat_0, concat_1, concat_2, concat_3, concat_4 + return reshape_0, multiply_0 diff --git a/paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_3/weight_meta.py b/paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_3/weight_meta.py index e37ea3f3b..8b1378917 100644 --- a/paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_3/weight_meta.py +++ b/paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_3/weight_meta.py @@ -1,586 +1 @@ -class Program_weight_tensor_parameter_0: - name = "parameter_0" - shape = [68] - dtype = "float32" - min_val = float("-0.0120063") - max_val = float("0.0335161") - mean = float("1.74565e-07") - std = float("0.00787881") - data = None - -class Program_weight_tensor_parameter_1: - name = "parameter_1" - shape = [68, 192, 3, 3] - dtype = "float32" - min_val = float("-0.168682") - max_val = float("0.167259") - mean = float("7.71688e-08") - std = float("0.00779909") - data = None - - -class Program_weight_tensor_parameter_2: - name = "parameter_2" - shape = [192] - dtype = "float32" - min_val = float("-0.128229") - max_val = float("0.242306") - mean = float("0.03158") - std = float("0.0650427") - data = None - - -class Program_weight_tensor_parameter_3: - name = "parameter_3" - shape = [192] - dtype = "float32" - min_val = float("0.74995") - max_val = float("1.60753") - mean = float("1.16227") - std = float("0.142938") - data = None - - -class Program_weight_tensor_parameter_4: - name = "parameter_4" - shape = [192] - dtype = "float32" - min_val = float("0.000266157") - max_val = float("0.0040105") - mean = float("0.0010684") - std = float("0.000708224") - data = None - - -class Program_weight_tensor_parameter_5: - name = "parameter_5" - shape = [192] - dtype = "float32" - min_val = float("-0.0377588") - max_val = float("0.0284686") - mean = float("-0.00327812") - std = float("0.0104515") - data = None - - -class Program_weight_tensor_parameter_6: - name = "parameter_6" - shape = [192, 192, 1, 1] - dtype = "float32" - min_val = float("-0.08141") - max_val = float("0.0877749") - mean = float("-0.00031094") - std = float("0.00761982") - data = None - - -class Program_weight_tensor_parameter_7: - name = "parameter_7" - shape = [192] - dtype = "float32" - min_val = float("-0.00811822") - max_val = float("0.0146164") - mean = float("-0.000174084") - std = float("0.00426814") - data = None - - -class Program_weight_tensor_parameter_8: - name = "parameter_8" - shape = [192, 192, 1, 1] - dtype = "float32" - min_val = float("-0.0116131") - max_val = float("0.0197689") - mean = float("-0.000103125") - std = float("0.00169022") - data = None - - -class Program_weight_tensor_parameter_9: - name = "parameter_9" - shape = [1] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_10: - name = "parameter_10" - shape = [1, 192, 3, 3] - dtype = "float32" - min_val = float("-0.0678163") - max_val = float("0.0273811") - mean = float("-0.000125764") - std = float("0.00819254") - data = None - - -class Program_weight_tensor_parameter_11: - name = "parameter_11" - shape = [192] - dtype = "float32" - min_val = float("-0.461256") - max_val = float("0.536796") - mean = float("0.114829") - std = float("0.141718") - data = None - - -class Program_weight_tensor_parameter_12: - name = "parameter_12" - shape = [192] - dtype = "float32" - min_val = float("0.859577") - max_val = float("1.50378") - mean = float("1.09021") - std = float("0.0873597") - data = None - - -class Program_weight_tensor_parameter_13: - name = "parameter_13" - shape = [192] - dtype = "float32" - min_val = float("0.000431253") - max_val = float("0.0137074") - mean = float("0.00230506") - std = float("0.00181221") - data = None - - -class Program_weight_tensor_parameter_14: - name = "parameter_14" - shape = [192] - dtype = "float32" - min_val = float("-0.15411") - max_val = float("0.0351219") - mean = float("-0.0393248") - std = float("0.0376471") - data = None - - -class Program_weight_tensor_parameter_15: - name = "parameter_15" - shape = [192, 192, 1, 1] - dtype = "float32" - min_val = float("-0.0575188") - max_val = float("0.0739459") - mean = float("-0.00110288") - std = float("0.0076522") - data = None - - -class Program_weight_tensor_parameter_16: - name = "parameter_16" - shape = [192] - dtype = "float32" - min_val = float("-0.00397738") - max_val = float("0.0119464") - mean = float("-0.000221189") - std = float("0.00218333") - data = None - - -class Program_weight_tensor_parameter_17: - name = "parameter_17" - shape = [192, 192, 1, 1] - dtype = "float32" - min_val = float("-0.0524122") - max_val = float("0.0693195") - mean = float("-9.53701e-05") - std = float("0.00173109") - data = None - - -class Program_weight_tensor_parameter_18: - name = "parameter_18" - shape = [68] - dtype = "float32" - min_val = float("-0.00573793") - max_val = float("0.0217784") - mean = float("1.54891e-07") - std = float("0.00541763") - data = None - - -class Program_weight_tensor_parameter_19: - name = "parameter_19" - shape = [68, 384, 3, 3] - dtype = "float32" - min_val = float("-0.0981024") - max_val = float("0.124946") - mean = float("4.94838e-08") - std = float("0.00515546") - data = None - - -class Program_weight_tensor_parameter_20: - name = "parameter_20" - shape = [384] - dtype = "float32" - min_val = float("-0.0868715") - max_val = float("0.0669849") - mean = float("0.0121538") - std = float("0.0210256") - data = None - - -class Program_weight_tensor_parameter_21: - name = "parameter_21" - shape = [384] - dtype = "float32" - min_val = float("0.90122") - max_val = float("1.2108") - mean = float("1.07141") - std = float("0.0473601") - data = None - - -class Program_weight_tensor_parameter_22: - name = "parameter_22" - shape = [384] - dtype = "float32" - min_val = float("0.000115831") - max_val = float("0.00541574") - mean = float("0.000753911") - std = float("0.000632097") - data = None - - -class Program_weight_tensor_parameter_23: - name = "parameter_23" - shape = [384] - dtype = "float32" - min_val = float("-0.0423513") - max_val = float("0.0117177") - mean = float("-0.00494504") - std = float("0.00661737") - data = None - - -class Program_weight_tensor_parameter_24: - name = "parameter_24" - shape = [384, 384, 1, 1] - dtype = "float32" - min_val = float("-0.0495158") - max_val = float("0.0695034") - mean = float("-0.000125359") - std = float("0.00350255") - data = None - - -class Program_weight_tensor_parameter_25: - name = "parameter_25" - shape = [384] - dtype = "float32" - min_val = float("-0.00583976") - max_val = float("0.00760424") - mean = float("1.90588e-05") - std = float("0.0021531") - data = None - - -class Program_weight_tensor_parameter_26: - name = "parameter_26" - shape = [384, 384, 1, 1] - dtype = "float32" - min_val = float("-0.00305361") - max_val = float("0.00596843") - mean = float("-3.93664e-05") - std = float("0.000601011") - data = None - - -class Program_weight_tensor_parameter_27: - name = "parameter_27" - shape = [1] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_28: - name = "parameter_28" - shape = [1, 384, 3, 3] - dtype = "float32" - min_val = float("-0.0633952") - max_val = float("0.0250402") - mean = float("-0.000143176") - std = float("0.00571183") - data = None - - -class Program_weight_tensor_parameter_29: - name = "parameter_29" - shape = [384] - dtype = "float32" - min_val = float("-0.167044") - max_val = float("0.241629") - mean = float("0.0667936") - std = float("0.0667169") - data = None - - -class Program_weight_tensor_parameter_30: - name = "parameter_30" - shape = [384] - dtype = "float32" - min_val = float("0.949533") - max_val = float("1.31274") - mean = float("1.04702") - std = float("0.0484413") - data = None - - -class Program_weight_tensor_parameter_31: - name = "parameter_31" - shape = [384] - dtype = "float32" - min_val = float("0.000357813") - max_val = float("0.020638") - mean = float("0.00383275") - std = float("0.00340836") - data = None - - -class Program_weight_tensor_parameter_32: - name = "parameter_32" - shape = [384] - dtype = "float32" - min_val = float("-0.0843473") - max_val = float("0.02507") - mean = float("-0.027169") - std = float("0.0211633") - data = None - - -class Program_weight_tensor_parameter_33: - name = "parameter_33" - shape = [384, 384, 1, 1] - dtype = "float32" - min_val = float("-0.0580925") - max_val = float("0.0555854") - mean = float("-0.000603587") - std = float("0.00353173") - data = None - - -class Program_weight_tensor_parameter_34: - name = "parameter_34" - shape = [384] - dtype = "float32" - min_val = float("-0.00347224") - max_val = float("0.014402") - mean = float("-4.23883e-05") - std = float("0.0014551") - data = None - - -class Program_weight_tensor_parameter_35: - name = "parameter_35" - shape = [384, 384, 1, 1] - dtype = "float32" - min_val = float("-0.0163252") - max_val = float("0.0101556") - mean = float("-4.84765e-05") - std = float("0.000584142") - data = None - - -class Program_weight_tensor_parameter_36: - name = "parameter_36" - shape = [68] - dtype = "float32" - min_val = float("-0.00369389") - max_val = float("0.00674372") - mean = float("1.39786e-07") - std = float("0.00287983") - data = None - - -class Program_weight_tensor_parameter_37: - name = "parameter_37" - shape = [68, 768, 3, 3] - dtype = "float32" - min_val = float("-0.0366324") - max_val = float("0.0462164") - mean = float("-3.25235e-09") - std = float("0.00290201") - data = None - - -class Program_weight_tensor_parameter_38: - name = "parameter_38" - shape = [768] - dtype = "float32" - min_val = float("-0.0688838") - max_val = float("0.0553698") - mean = float("-0.000947437") - std = float("0.0161684") - data = None - - -class Program_weight_tensor_parameter_39: - name = "parameter_39" - shape = [768] - dtype = "float32" - min_val = float("0.964207") - max_val = float("1.26838") - mean = float("1.05105") - std = float("0.0346631") - data = None - - -class Program_weight_tensor_parameter_40: - name = "parameter_40" - shape = [768] - dtype = "float32" - min_val = float("3.12347e-05") - max_val = float("0.00203003") - mean = float("0.000380075") - std = float("0.00026984") - data = None - - -class Program_weight_tensor_parameter_41: - name = "parameter_41" - shape = [768] - dtype = "float32" - min_val = float("-0.0153051") - max_val = float("0.0154829") - mean = float("-0.00229315") - std = float("0.00345522") - data = None - - -class Program_weight_tensor_parameter_42: - name = "parameter_42" - shape = [768, 768, 1, 1] - dtype = "float32" - min_val = float("-0.0373301") - max_val = float("0.0386579") - mean = float("-3.68871e-05") - std = float("0.00154547") - data = None - - -class Program_weight_tensor_parameter_43: - name = "parameter_43" - shape = [768] - dtype = "float32" - min_val = float("-0.00311382") - max_val = float("0.00471731") - mean = float("1.99403e-05") - std = float("0.00111448") - data = None - - -class Program_weight_tensor_parameter_44: - name = "parameter_44" - shape = [768, 768, 1, 1] - dtype = "float32" - min_val = float("-0.003459") - max_val = float("0.00512325") - mean = float("-2.05348e-05") - std = float("0.000302259") - data = None - - -class Program_weight_tensor_parameter_45: - name = "parameter_45" - shape = [1] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_46: - name = "parameter_46" - shape = [1, 768, 3, 3] - dtype = "float32" - min_val = float("-0.0278575") - max_val = float("0.0206799") - mean = float("0.000226758") - std = float("0.00242893") - data = None - - -class Program_weight_tensor_parameter_47: - name = "parameter_47" - shape = [768] - dtype = "float32" - min_val = float("-0.270176") - max_val = float("0.251616") - mean = float("0.00962775") - std = float("0.0522785") - data = None - - -class Program_weight_tensor_parameter_48: - name = "parameter_48" - shape = [768] - dtype = "float32" - min_val = float("0.914105") - max_val = float("1.28201") - mean = float("1.03121") - std = float("0.0519879") - data = None - - -class Program_weight_tensor_parameter_49: - name = "parameter_49" - shape = [768] - dtype = "float32" - min_val = float("6.12283e-05") - max_val = float("0.00858465") - mean = float("0.00112119") - std = float("0.000817386") - data = None - - -class Program_weight_tensor_parameter_50: - name = "parameter_50" - shape = [768] - dtype = "float32" - min_val = float("-0.0881042") - max_val = float("0.0255774") - mean = float("-0.0239762") - std = float("0.0175392") - data = None - - -class Program_weight_tensor_parameter_51: - name = "parameter_51" - shape = [768, 768, 1, 1] - dtype = "float32" - min_val = float("-0.0500488") - max_val = float("0.0289173") - mean = float("-0.000294642") - std = float("0.0016182") - data = None - - -class Program_weight_tensor_parameter_52: - name = "parameter_52" - shape = [768] - dtype = "float32" - min_val = float("-0.0131155") - max_val = float("0.00722008") - mean = float("-3.7653e-05") - std = float("0.000847186") - data = None - - -class Program_weight_tensor_parameter_53: - name = "parameter_53" - shape = [768, 768, 1, 1] - dtype = "float32" - min_val = float("-0.026325") - max_val = float("0.0506588") - mean = float("7.82541e-06") - std = float("0.000366702") - data = None diff --git a/paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_6/graph_hash.txt b/paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_6/graph_hash.txt deleted file mode 100644 index 7fbb8551e..000000000 --- a/paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_6/graph_hash.txt +++ /dev/null @@ -1 +0,0 @@ -b58b47a10405b5de0e1c7f3dab25881ba3cc8c8bdd1045e44640464fa936bf04 \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_6/graph_net.json b/paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_6/graph_net.json deleted file mode 100644 index cf4d2108b..000000000 --- a/paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_6/graph_net.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "framework": "paddle", - "model_name": "PP-YOLOE-L_human", - "num_devices_required": 1, - "num_nodes_required": 1 -} \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_6/input_meta.py b/paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_6/input_meta.py deleted file mode 100644 index b762e8b43..000000000 --- a/paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_6/input_meta.py +++ /dev/null @@ -1,49 +0,0 @@ -class Program_weight_tensor_data_0: - name = "data_0" - shape = [] - dtype = "int64" - data = [1] - - -class Program_weight_tensor_data_1: - name = "data_1" - shape = [2, 4116, 1] - dtype = "float32" - min_val = float("4.23157e-09") - max_val = float("0.54831") - mean = float("0.0392222") - std = float("0.0590982") - data = None - - -class Program_weight_tensor_data_2: - name = "data_2" - shape = [2, 4116, 68] - dtype = "float32" - min_val = float("-7.32965") - max_val = float("11.6819") - mean = float("3.41484e-05") - std = float("1.50283") - data = None - - -class Program_weight_tensor_data_3: - name = "data_3" - shape = [4116, 2] - dtype = "float32" - min_val = float("4.0") - max_val = float("444.0") - mean = float("224.0") - std = float("129.279") - data = None - - -class Program_weight_tensor_data_4: - name = "data_4" - shape = [4116, 1] - dtype = "float32" - min_val = float("8.0") - max_val = float("32.0") - mean = float("10.6667") - std = float("5.70157") - data = None diff --git a/paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_6/model.py b/paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_6/model.py deleted file mode 100644 index 4bd73f18f..000000000 --- a/paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_6/model.py +++ /dev/null @@ -1,192 +0,0 @@ -import paddle - - -class GraphModule(paddle.nn.Layer): - def __init__(self): - super().__init__() - - def forward(self, parameter_0, data_0, data_1, data_2, data_3, data_4): - # pd_op.divide: (-1x2xf32) <- (-1x2xf32, -1x1xf32) - divide_0 = paddle._C_ops.divide(data_3, data_4) - del data_3 - - # pd_op.shape64: (3xi64) <- (2x-1x68xf32) - shape64_0 = paddle._C_ops.shape64(data_2) - - # pd_op.full_int_array: (1xi64) <- () - full_int_array_0 = [0] - - # pd_op.full_int_array: (1xi64) <- () - full_int_array_1 = [1] - - # pd_op.assign: (1xi64) <- (1xi64) - assign_0 = full_int_array_1 - - # pd_op.slice: (xi64) <- (3xi64, 1xi64, 1xi64) - slice_0 = paddle._C_ops.slice( - shape64_0, [0], full_int_array_0, full_int_array_1, [1], [0] - ) - del full_int_array_0 - - # pd_op.full_int_array: (1xi64) <- () - full_int_array_2 = [2] - - # pd_op.slice: (xi64) <- (3xi64, 1xi64, 1xi64) - slice_1 = paddle._C_ops.slice( - shape64_0, [0], full_int_array_1, full_int_array_2, [1], [0] - ) - - # pd_op.full_int_array: (1xi64) <- () - full_int_array_3 = [3] - - # pd_op.slice: (xi64) <- (3xi64, 1xi64, 1xi64) - slice_2 = paddle._C_ops.slice( - shape64_0, [0], full_int_array_2, full_int_array_3, [1], [0] - ) - del full_int_array_2, full_int_array_3, shape64_0 - - # pd_op.full: (xi64) <- () - full_0 = paddle._C_ops.full( - [], float("-1"), paddle.int64, paddle.core.CPUPlace() - ) - - # pd_op.full: (xi64) <- () - full_1 = paddle._C_ops.full( - [], float("4"), paddle.int64, paddle.core.CPUPlace() - ) - - # pd_op.full: (xi64) <- () - full_2 = paddle._C_ops.full( - [], float("17"), paddle.int64, paddle.core.CPUPlace() - ) - - # builtin.combine: ([xi64, xi64, xi64, xi64]) <- (xi64, xi64, xi64, xi64) - combine_0 = [full_0, slice_1, full_1, full_2] - del full_0, full_1, full_2, slice_1 - - # pd_op.stack: (4xi64) <- ([xi64, xi64, xi64, xi64]) - stack_0 = paddle._C_ops.stack(combine_0, 0) - del combine_0 - - # pd_op.reshape: (-1x-1x4x17xf32) <- (2x-1x68xf32, 4xi64) - reshape_0 = paddle._C_ops.reshape(data_2, stack_0) - del data_2, stack_0 - - # pd_op.softmax: (-1x-1x4x17xf32) <- (-1x-1x4x17xf32) - softmax_0 = paddle._C_ops.softmax(reshape_0, -1) - del reshape_0 - - # pd_op.transpose: (-1x17x-1x4xf32) <- (-1x-1x4x17xf32) - transpose_0 = paddle._C_ops.transpose(softmax_0, [0, 3, 1, 2]) - - # pd_op.conv2d: (-1x1x-1x4xf32) <- (-1x17x-1x4xf32, 1x17x1x1xf32) - conv2d_0 = paddle._C_ops.conv2d( - transpose_0, parameter_0, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_0 - - # pd_op.squeeze: (-1x-1x4xf32) <- (-1x1x-1x4xf32, 1xi64) - squeeze_0 = paddle._C_ops.squeeze(conv2d_0, full_int_array_1) - del full_int_array_1 - - # pd_op.full: (1xi32) <- () - full_3 = paddle._C_ops.full( - [1], float("2"), paddle.int32, paddle.core.CPUPlace() - ) - - # pd_op.split_with_num: ([-1x-1x2xf32, -1x-1x2xf32]) <- (-1x-1x4xf32, 1xi32) - split_with_num_0 = paddle._C_ops.split_with_num(squeeze_0, 2, full_3) - del squeeze_0 - - # builtin.split: (-1x-1x2xf32, -1x-1x2xf32) <- ([-1x-1x2xf32, -1x-1x2xf32]) - ( - split_0, - split_1, - ) = split_with_num_0 - del split_with_num_0 - - # pd_op.full: (1xf32) <- () - full_4 = paddle._C_ops.full( - [1], float("-1"), paddle.float32, paddle.core.CPUPlace() - ) - - # pd_op.scale: (-1x-1x2xf32) <- (-1x-1x2xf32, 1xf32) - scale_0 = paddle._C_ops.scale(split_0, full_4, float("0"), True) - del split_0 - - # pd_op.add: (-1x-1x2xf32) <- (-1x-1x2xf32, -1x2xf32) - add_0 = paddle._C_ops.add(scale_0, divide_0) - - # pd_op.add: (-1x-1x2xf32) <- (-1x-1x2xf32, -1x2xf32) - add_1 = paddle._C_ops.add(split_1, divide_0) - - # pd_op.full: (1xi32) <- () - full_5 = paddle._C_ops.full( - [1], float("-1"), paddle.int32, paddle.core.CPUPlace() - ) - - # builtin.combine: ([-1x-1x2xf32, -1x-1x2xf32]) <- (-1x-1x2xf32, -1x-1x2xf32) - combine_1 = [add_0, add_1] - - # pd_op.concat: (-1x-1x4xf32) <- ([-1x-1x2xf32, -1x-1x2xf32], 1xi32) - concat_0 = paddle._C_ops.concat(combine_1, full_5) - del combine_1 - - # pd_op.full: (xi64) <- () - full_6 = paddle._C_ops.full( - [], float("-1"), paddle.int64, paddle.framework._current_expected_place() - ) - - # pd_op.less_than: (xb) <- (xi64, xi64) - less_than_0 = paddle._C_ops.less_than(data_0, full_6) - del data_0, full_6 - - # pd_op.cast: (xi64) <- (xb) - cast_0 = paddle._C_ops.cast(less_than_0, paddle.int64) - del less_than_0 - - # pd_op.full: (xi64) <- () - full_7 = paddle._C_ops.full( - [], float("0"), paddle.int64, paddle.framework._current_expected_place() - ) - - # pd_op.not_equal: (xb) <- (xi64, xi64) - not_equal_0 = paddle._C_ops.not_equal(cast_0, full_7) - del cast_0 - - # pd_op.cast: (xi64) <- (xb) - cast_1 = paddle._C_ops.cast(not_equal_0, paddle.int64) - del not_equal_0 - - # pd_op.equal: (xb) <- (xi64, xi64) - equal_0 = paddle._C_ops.equal(cast_1, full_7) - del cast_1, full_7 - - # pd_op.share_data_: (2x-1x1xf32) <- (2x-1x1xf32) - share_data__0 = data_1.detach() - del data_1 - - # pd_op.share_data_: (-1x-1x4xf32) <- (-1x-1x4xf32) - share_data__1 = concat_0.detach() - - # pd_op.multiply: (-1x-1x4xf32) <- (-1x-1x4xf32, -1x1xf32) - multiply_0 = paddle._C_ops.multiply(share_data__1, data_4) - del ( - add_0, - add_1, - assign_0, - concat_0, - conv2d_0, - data_4, - divide_0, - full_3, - full_4, - full_5, - scale_0, - share_data__1, - softmax_0, - split_1, - transpose_0, - ) - - return share_data__0, multiply_0 diff --git a/paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_7/graph_hash.txt b/paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_7/graph_hash.txt deleted file mode 100644 index d3368bec1..000000000 --- a/paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_7/graph_hash.txt +++ /dev/null @@ -1 +0,0 @@ -b31d9174479d0938255cd2ec58334899ecf03916288acd4eafc0c43a6b55388d \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_7/graph_net.json b/paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_7/graph_net.json deleted file mode 100644 index cf4d2108b..000000000 --- a/paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_7/graph_net.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "framework": "paddle", - "model_name": "PP-YOLOE-L_human", - "num_devices_required": 1, - "num_nodes_required": 1 -} \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_7/input_meta.py b/paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_7/input_meta.py deleted file mode 100644 index cf0c3a83e..000000000 --- a/paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_7/input_meta.py +++ /dev/null @@ -1,130 +0,0 @@ -class Program_weight_tensor_data_0: - name = "data_0" - shape = [] - dtype = "int64" - data = [23] - - -class Program_weight_tensor_data_1: - name = "data_1" - shape = [] - dtype = "int64" - data = [3024] - - -class Program_weight_tensor_data_2: - name = "data_2" - shape = [] - dtype = "int64" - data = [23] - - -class Program_weight_tensor_data_3: - name = "data_3" - shape = [2, 3024] - dtype = "float32" - max_val = float("2.0") - mean = float("0.0825066") - std = float("0.285748") - data = None - - -class Program_weight_tensor_data_4: - name = "data_4" - shape = [2, 23, 3024] - dtype = "float32" - max_val = float("0.909315") - mean = float("0.0172789") - std = float("0.0769687") - data = None - - -class Program_weight_tensor_data_5: - name = "data_5" - shape = [2, 23, 3024] - dtype = "float32" - max_val = float("1.0") - mean = float("0.00358724") - std = float("0.0597861") - data = None - - -class Program_weight_tensor_data_6: - name = "data_6" - shape = [2, 1] - dtype = "int32" - data = [0, 1] - - -class Program_weight_tensor_data_7: - name = "data_7" - shape = [2, 23, 1] - dtype = "int32" - data = [ - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - ] - - -class Program_weight_tensor_data_8: - name = "data_8" - shape = [2, 23, 4] - dtype = "float32" - max_val = float("383.232") - mean = float("161.173") - std = float("98.87") - data = None - - -class Program_weight_tensor_data_9: - name = "data_9" - shape = [2, 23, 3024] - dtype = "float32" - max_val = float("0.356873") - mean = float("0.000194909") - std = float("0.00436737") - data = None diff --git a/paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_7/model.py b/paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_7/model.py deleted file mode 100644 index 0eefa8f8c..000000000 --- a/paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_7/model.py +++ /dev/null @@ -1,258 +0,0 @@ -import paddle - - -class GraphModule(paddle.nn.Layer): - def __init__(self): - super().__init__() - - def forward( - self, - data_0, - data_1, - data_2, - data_3, - data_4, - data_5, - data_6, - data_7, - data_8, - data_9, - ): - # pd_op.full_int_array: (1xi64) <- () - full_int_array_0 = [1] - - # pd_op.unsqueeze: (2x1x-1xf32) <- (2x-1xf32, 1xi64) - unsqueeze_0 = paddle._C_ops.unsqueeze(data_3, full_int_array_0) - del data_3, full_int_array_0 - - # pd_op.full: (xf32) <- () - full_0 = paddle._C_ops.full( - [], float("1"), paddle.float32, paddle.framework._current_expected_place() - ) - - # pd_op.greater_than: (2x1x-1xb) <- (2x1x-1xf32, xf32) - greater_than_0 = paddle._C_ops.greater_than(unsqueeze_0, full_0) - del full_0, unsqueeze_0 - - # pd_op.full: (xi64) <- () - full_1 = paddle._C_ops.full( - [], float("1"), paddle.int64, paddle.core.CPUPlace() - ) - - # builtin.combine: ([xi64, xi64, xi64]) <- (xi64, xi64, xi64) - combine_0 = [full_1, data_0, full_1] - del full_1 - - # pd_op.stack: (3xi64) <- ([xi64, xi64, xi64]) - stack_0 = paddle._C_ops.stack(combine_0, 0) - del combine_0 - - # pd_op.tile: (2x-1x-1xb) <- (2x1x-1xb, 3xi64) - tile_0 = paddle._C_ops.tile(greater_than_0, stack_0) - del greater_than_0, stack_0 - - # pd_op.full: (1xi64) <- () - full_2 = paddle._C_ops.full( - [1], float("-2"), paddle.int64, paddle.core.CPUPlace() - ) - - # pd_op.argmax: (2x-1xi64) <- (2x-1x-1xf32, 1xi64) - argmax_0 = paddle._C_ops.argmax(data_4, full_2, False, False, paddle.int64) - - # pd_op.one_hot: (2x-1x-1xf32) <- (2x-1xi64, xi64) - one_hot_0 = paddle._C_ops.one_hot( - argmax_0 % paddle.cast(data_2, argmax_0.dtype), data_2 - ) - del argmax_0, data_2 - - # pd_op.transpose: (2x-1x-1xf32) <- (2x-1x-1xf32) - transpose_0 = paddle._C_ops.transpose(one_hot_0, [0, 2, 1]) - del one_hot_0 - - # pd_op.where: (2x-1x-1xf32) <- (2x-1x-1xb, 2x-1x-1xf32, 2x-1x-1xf32) - where_0 = paddle._C_ops.where(tile_0, transpose_0, data_5) - del data_5, tile_0, transpose_0 - - # pd_op.full_int_array: (1xi64) <- () - full_int_array_1 = [-2] - - # pd_op.sum: (2x-1xf32) <- (2x-1x-1xf32, 1xi64) - sum_0 = paddle._C_ops.sum(where_0, full_int_array_1, None, False) - - # pd_op.argmax: (2x-1xi64) <- (2x-1x-1xf32, 1xi64) - argmax_1 = paddle._C_ops.argmax(where_0, full_2, False, False, paddle.int64) - del full_2 - - # pd_op.cast: (xi32) <- (xi64) - cast_0 = paddle._C_ops.cast(data_0, paddle.int32) - del data_0 - - # pd_op.multiply: (2x1xi32) <- (2x1xi32, xi32) - multiply_1 = paddle._C_ops.multiply(data_6, cast_0) - del cast_0, data_6 - - # pd_op.cast: (2x1xi64) <- (2x1xi32) - cast_1 = paddle._C_ops.cast(multiply_1, paddle.int64) - del multiply_1 - - # pd_op.add: (2x-1xi64) <- (2x-1xi64, 2x1xi64) - add_0 = paddle._C_ops.add(argmax_1, cast_1) - del argmax_1, cast_1 - - # pd_op.flatten: (-1xi32) <- (2x-1x1xi32) - flatten_0 = paddle._C_ops.flatten(data_7, 0, 2) - del data_7 - - # pd_op.flatten: (-1xi64) <- (2x-1xi64) - flatten_1 = paddle._C_ops.flatten(add_0, 0, 1) - del add_0 - - # pd_op.full: (1xi32) <- () - full_3 = paddle._C_ops.full( - [1], float("0"), paddle.int32, paddle.core.CPUPlace() - ) - - # pd_op.gather: (-1xi32) <- (-1xi32, -1xi64, 1xi32) - gather_0 = paddle._C_ops.gather(flatten_0, flatten_1, full_3) - del flatten_0 - - # pd_op.full: (xi64) <- () - full_4 = paddle._C_ops.full( - [], float("2"), paddle.int64, paddle.core.CPUPlace() - ) - - # builtin.combine: ([xi64, xi64]) <- (xi64, xi64) - combine_1 = [full_4, data_1] - - # pd_op.stack: (2xi64) <- ([xi64, xi64]) - stack_1 = paddle._C_ops.stack(combine_1, 0) - del combine_1 - - # pd_op.reshape: (2x-1xi32) <- (-1xi32, 2xi64) - reshape_1 = paddle._C_ops.reshape(gather_0, stack_1) - del gather_0, stack_1 - - # pd_op.full: (xf32) <- () - full_5 = paddle._C_ops.full( - [], float("0"), paddle.float32, paddle.framework._current_expected_place() - ) - - # pd_op.greater_than: (2x-1xb) <- (2x-1xf32, xf32) - greater_than_1 = paddle._C_ops.greater_than(sum_0, full_5) - del full_5, sum_0 - - # pd_op.full: (1xf32) <- () - full_6 = paddle._C_ops.full( - [1], float("1"), paddle.float32, paddle.core.CPUPlace() - ) - - # pd_op.full_like: (2x-1xi32) <- (2x-1xi32, 1xf32) - full_like_0 = paddle._C_ops.full_like( - reshape_1, full_6, paddle.int32, paddle.framework._current_expected_place() - ) - - # pd_op.where: (2x-1xi32) <- (2x-1xb, 2x-1xi32, 2x-1xi32) - where_1 = paddle._C_ops.where(greater_than_1, reshape_1, full_like_0) - del full_like_0, greater_than_1, reshape_1 - - # pd_op.full_int_array: (2xi64) <- () - full_int_array_2 = [-1, 4] - - # pd_op.reshape: (-1x4xf32) <- (2x-1x4xf32, 2xi64) - reshape_2 = paddle._C_ops.reshape(data_8, full_int_array_2) - del data_8, full_int_array_2 - - # pd_op.gather: (-1x4xf32) <- (-1x4xf32, -1xi64, 1xi32) - gather_1 = paddle._C_ops.gather(reshape_2, flatten_1, full_3) - del flatten_1, full_3, reshape_2 - - # pd_op.full: (xi64) <- () - full_7 = paddle._C_ops.full( - [], float("4"), paddle.int64, paddle.core.CPUPlace() - ) - - # builtin.combine: ([xi64, xi64, xi64]) <- (xi64, xi64, xi64) - combine_2 = [full_4, data_1, full_7] - del data_1, full_4, full_7 - - # pd_op.stack: (3xi64) <- ([xi64, xi64, xi64]) - stack_2 = paddle._C_ops.stack(combine_2, 0) - del combine_2 - - # pd_op.reshape: (2x-1x4xf32) <- (-1x4xf32, 3xi64) - reshape_0 = paddle._C_ops.reshape(gather_1, stack_2) - del gather_1, stack_2 - - # pd_op.full: (1xi32) <- () - full_8 = paddle._C_ops.full( - [1], float("2"), paddle.int32, paddle.core.CPUPlace() - ) - - # pd_op.one_hot: (2x-1x2xf32) <- (2x-1xi32, 1xi32) - one_hot_1 = paddle._C_ops.one_hot( - where_1 % paddle.cast(full_8, where_1.dtype), full_8 - ) - del full_8 - - # pd_op.full: (1xi64) <- () - full_9 = paddle._C_ops.full( - [1], float("0"), paddle.int64, paddle.framework._current_expected_place() - ) - - # pd_op.assign_value_: (1xi64) <- (1xi64) - assign_value__0 = paddle._C_ops.assign_value_( - full_9, - [1], - paddle.int64, - [float("0")], - paddle.framework._current_expected_place(), - ) - del full_9 - - # pd_op.index_select: (2x-1x1xf32) <- (2x-1x2xf32, 1xi64) - index_select_0 = paddle._C_ops.index_select(one_hot_1, assign_value__0, -1) - del assign_value__0, one_hot_1 - - # pd_op.multiply: (2x-1x-1xf32) <- (2x-1x-1xf32, 2x-1x-1xf32) - multiply_2 = paddle._C_ops.multiply(data_9, where_0) - del data_9 - - # pd_op.full_int_array: (1xi64) <- () - full_int_array_3 = [-1] - - # pd_op.max: (2x-1x1xf32) <- (2x-1x-1xf32, 1xi64) - max_0 = paddle._C_ops.max(multiply_2, full_int_array_3, True) - - # pd_op.multiply: (2x-1x-1xf32) <- (2x-1x-1xf32, 2x-1x-1xf32) - multiply_3 = paddle._C_ops.multiply(data_4, where_0) - del data_4, where_0 - - # pd_op.max: (2x-1x1xf32) <- (2x-1x-1xf32, 1xi64) - max_1 = paddle._C_ops.max(multiply_3, full_int_array_3, True) - del multiply_3 - - # pd_op.scale: (2x-1x1xf32) <- (2x-1x1xf32, 1xf32) - scale_0 = paddle._C_ops.scale(max_0, full_6, float("1e-09"), True) - del full_6, max_0 - - # pd_op.divide: (2x-1x-1xf32) <- (2x-1x-1xf32, 2x-1x1xf32) - divide_0 = paddle._C_ops.divide(multiply_2, scale_0) - del multiply_2, scale_0 - - # pd_op.multiply: (2x-1x-1xf32) <- (2x-1x-1xf32, 2x-1x1xf32) - multiply_4 = paddle._C_ops.multiply(divide_0, max_1) - del divide_0, max_1 - - # pd_op.max: (2x-1xf32) <- (2x-1x-1xf32, 1xi64) - max_2 = paddle._C_ops.max(multiply_4, full_int_array_1, False) - del full_int_array_1, multiply_4 - - # pd_op.unsqueeze: (2x-1x1xf32) <- (2x-1xf32, 1xi64) - unsqueeze_1 = paddle._C_ops.unsqueeze(max_2, full_int_array_3) - del full_int_array_3, max_2 - - # pd_op.multiply: (2x-1x1xf32) <- (2x-1x1xf32, 2x-1x1xf32) - multiply_0 = paddle._C_ops.multiply(index_select_0, unsqueeze_1) - del index_select_0, unsqueeze_1, where_1 - - return reshape_0, multiply_0 diff --git a/paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_8/graph_hash.txt b/paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_8/graph_hash.txt deleted file mode 100644 index 82cfbe272..000000000 --- a/paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_8/graph_hash.txt +++ /dev/null @@ -1 +0,0 @@ -1cf576ba841b87c90f808dd83fc76cf6b31385742f2a6f80953fa0a3f614b06c \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_8/graph_net.json b/paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_8/graph_net.json deleted file mode 100644 index cf4d2108b..000000000 --- a/paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_8/graph_net.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "framework": "paddle", - "model_name": "PP-YOLOE-L_human", - "num_devices_required": 1, - "num_nodes_required": 1 -} \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_8/input_meta.py b/paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_8/input_meta.py deleted file mode 100644 index 9285c370d..000000000 --- a/paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_8/input_meta.py +++ /dev/null @@ -1,103 +0,0 @@ -class Program_weight_tensor_data_0: - name = "data_0" - shape = [] - dtype = "int64" - data = [13] - - -class Program_weight_tensor_data_1: - name = "data_1" - shape = [] - dtype = "int64" - data = [6069] - - -class Program_weight_tensor_data_2: - name = "data_2" - shape = [2, 13, 6069] - dtype = "float32" - max_val = float("1.0") - mean = float("0.00205965") - std = float("0.0453366") - data = None - - -class Program_weight_tensor_data_3: - name = "data_3" - shape = [2, 1] - dtype = "int32" - data = [0, 1] - - -class Program_weight_tensor_data_4: - name = "data_4" - shape = [2, 13, 1] - dtype = "int32" - data = [ - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - ] - - -class Program_weight_tensor_data_5: - name = "data_5" - shape = [2, 6069] - dtype = "float32" - max_val = float("1.0") - mean = float("0.0267754") - std = float("0.161426") - data = None - - -class Program_weight_tensor_data_6: - name = "data_6" - shape = [2, 13, 4] - dtype = "float32" - max_val = float("544.0") - mean = float("318.798") - std = float("154.54") - data = None - - -class Program_weight_tensor_data_7: - name = "data_7" - shape = [2, 13, 6069] - dtype = "float32" - max_val = float("0.254782") - mean = float("0.000128415") - std = float("0.00278824") - data = None - - -class Program_weight_tensor_data_8: - name = "data_8" - shape = [2, 13, 6069] - dtype = "float32" - max_val = float("0.906178") - mean = float("0.0215299") - std = float("0.0809394") - data = None diff --git a/paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_8/model.py b/paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_8/model.py deleted file mode 100644 index 2c208d4ad..000000000 --- a/paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_8/model.py +++ /dev/null @@ -1,195 +0,0 @@ -import paddle - - -class GraphModule(paddle.nn.Layer): - def __init__(self): - super().__init__() - - def forward( - self, data_0, data_1, data_2, data_3, data_4, data_5, data_6, data_7, data_8 - ): - # pd_op.full: (1xi64) <- () - full_0 = paddle._C_ops.full( - [1], float("-2"), paddle.int64, paddle.core.CPUPlace() - ) - - # pd_op.argmax: (2x-1xi64) <- (2x-1x-1xf32, 1xi64) - argmax_0 = paddle._C_ops.argmax(data_2, full_0, False, False, paddle.int64) - del full_0 - - # pd_op.cast: (xi32) <- (xi64) - cast_0 = paddle._C_ops.cast(data_0, paddle.int32) - del data_0 - - # pd_op.multiply: (2x1xi32) <- (2x1xi32, xi32) - multiply_1 = paddle._C_ops.multiply(data_3, cast_0) - del cast_0, data_3 - - # pd_op.cast: (2x1xi64) <- (2x1xi32) - cast_1 = paddle._C_ops.cast(multiply_1, paddle.int64) - del multiply_1 - - # pd_op.add: (2x-1xi64) <- (2x-1xi64, 2x1xi64) - add_0 = paddle._C_ops.add(argmax_0, cast_1) - del argmax_0, cast_1 - - # pd_op.flatten: (-1xi32) <- (2x-1x1xi32) - flatten_0 = paddle._C_ops.flatten(data_4, 0, 2) - del data_4 - - # pd_op.flatten: (-1xi64) <- (2x-1xi64) - flatten_1 = paddle._C_ops.flatten(add_0, 0, 1) - del add_0 - - # pd_op.full: (1xi32) <- () - full_1 = paddle._C_ops.full( - [1], float("0"), paddle.int32, paddle.core.CPUPlace() - ) - - # pd_op.gather: (-1xi32) <- (-1xi32, -1xi64, 1xi32) - gather_0 = paddle._C_ops.gather(flatten_0, flatten_1, full_1) - del flatten_0 - - # pd_op.full: (xi64) <- () - full_2 = paddle._C_ops.full( - [], float("2"), paddle.int64, paddle.core.CPUPlace() - ) - - # builtin.combine: ([xi64, xi64]) <- (xi64, xi64) - combine_0 = [full_2, data_1] - - # pd_op.stack: (2xi64) <- ([xi64, xi64]) - stack_0 = paddle._C_ops.stack(combine_0, 0) - del combine_0 - - # pd_op.reshape: (2x-1xi32) <- (-1xi32, 2xi64) - reshape_1 = paddle._C_ops.reshape(gather_0, stack_0) - del gather_0, stack_0 - - # pd_op.full: (xf32) <- () - full_3 = paddle._C_ops.full( - [], float("0"), paddle.float32, paddle.framework._current_expected_place() - ) - - # pd_op.greater_than: (2x-1xb) <- (2x-1xf32, xf32) - greater_than_0 = paddle._C_ops.greater_than(data_5, full_3) - del data_5, full_3 - - # pd_op.full: (1xf32) <- () - full_4 = paddle._C_ops.full( - [1], float("1"), paddle.float32, paddle.core.CPUPlace() - ) - - # pd_op.full_like: (2x-1xi32) <- (2x-1xi32, 1xf32) - full_like_0 = paddle._C_ops.full_like( - reshape_1, full_4, paddle.int32, paddle.framework._current_expected_place() - ) - - # pd_op.where: (2x-1xi32) <- (2x-1xb, 2x-1xi32, 2x-1xi32) - where_0 = paddle._C_ops.where(greater_than_0, reshape_1, full_like_0) - del full_like_0, greater_than_0, reshape_1 - - # pd_op.full_int_array: (2xi64) <- () - full_int_array_0 = [-1, 4] - - # pd_op.reshape: (-1x4xf32) <- (2x-1x4xf32, 2xi64) - reshape_2 = paddle._C_ops.reshape(data_6, full_int_array_0) - del data_6, full_int_array_0 - - # pd_op.gather: (-1x4xf32) <- (-1x4xf32, -1xi64, 1xi32) - gather_1 = paddle._C_ops.gather(reshape_2, flatten_1, full_1) - del flatten_1, full_1, reshape_2 - - # pd_op.full: (xi64) <- () - full_5 = paddle._C_ops.full( - [], float("4"), paddle.int64, paddle.core.CPUPlace() - ) - - # builtin.combine: ([xi64, xi64, xi64]) <- (xi64, xi64, xi64) - combine_1 = [full_2, data_1, full_5] - del data_1, full_2, full_5 - - # pd_op.stack: (3xi64) <- ([xi64, xi64, xi64]) - stack_1 = paddle._C_ops.stack(combine_1, 0) - del combine_1 - - # pd_op.reshape: (2x-1x4xf32) <- (-1x4xf32, 3xi64) - reshape_0 = paddle._C_ops.reshape(gather_1, stack_1) - del gather_1, stack_1 - - # pd_op.full: (1xi32) <- () - full_6 = paddle._C_ops.full( - [1], float("2"), paddle.int32, paddle.core.CPUPlace() - ) - - # pd_op.one_hot: (2x-1x2xf32) <- (2x-1xi32, 1xi32) - one_hot_0 = paddle._C_ops.one_hot( - where_0 % paddle.cast(full_6, where_0.dtype), full_6 - ) - del full_6 - - # pd_op.full: (1xi64) <- () - full_7 = paddle._C_ops.full( - [1], float("0"), paddle.int64, paddle.framework._current_expected_place() - ) - - # pd_op.assign_value_: (1xi64) <- (1xi64) - assign_value__0 = paddle._C_ops.assign_value_( - full_7, - [1], - paddle.int64, - [float("0")], - paddle.framework._current_expected_place(), - ) - del full_7 - - # pd_op.index_select: (2x-1x1xf32) <- (2x-1x2xf32, 1xi64) - index_select_0 = paddle._C_ops.index_select(one_hot_0, assign_value__0, -1) - del assign_value__0, one_hot_0 - - # pd_op.multiply: (2x-1x-1xf32) <- (2x-1x-1xf32, 2x-1x-1xf32) - multiply_2 = paddle._C_ops.multiply(data_7, data_2) - del data_7 - - # pd_op.full_int_array: (1xi64) <- () - full_int_array_1 = [-1] - - # pd_op.max: (2x-1x1xf32) <- (2x-1x-1xf32, 1xi64) - max_0 = paddle._C_ops.max(multiply_2, full_int_array_1, True) - - # pd_op.multiply: (2x-1x-1xf32) <- (2x-1x-1xf32, 2x-1x-1xf32) - multiply_3 = paddle._C_ops.multiply(data_8, data_2) - del data_2, data_8 - - # pd_op.max: (2x-1x1xf32) <- (2x-1x-1xf32, 1xi64) - max_1 = paddle._C_ops.max(multiply_3, full_int_array_1, True) - del multiply_3 - - # pd_op.scale: (2x-1x1xf32) <- (2x-1x1xf32, 1xf32) - scale_0 = paddle._C_ops.scale(max_0, full_4, float("1e-09"), True) - del full_4, max_0 - - # pd_op.divide: (2x-1x-1xf32) <- (2x-1x-1xf32, 2x-1x1xf32) - divide_0 = paddle._C_ops.divide(multiply_2, scale_0) - del multiply_2, scale_0 - - # pd_op.multiply: (2x-1x-1xf32) <- (2x-1x-1xf32, 2x-1x1xf32) - multiply_4 = paddle._C_ops.multiply(divide_0, max_1) - del divide_0, max_1 - - # pd_op.full_int_array: (1xi64) <- () - full_int_array_2 = [-2] - - # pd_op.max: (2x-1xf32) <- (2x-1x-1xf32, 1xi64) - max_2 = paddle._C_ops.max(multiply_4, full_int_array_2, False) - del full_int_array_2, multiply_4 - - # pd_op.unsqueeze: (2x-1x1xf32) <- (2x-1xf32, 1xi64) - unsqueeze_0 = paddle._C_ops.unsqueeze(max_2, full_int_array_1) - del full_int_array_1, max_2 - - # pd_op.multiply: (2x-1x1xf32) <- (2x-1x1xf32, 2x-1x1xf32) - multiply_0 = paddle._C_ops.multiply(index_select_0, unsqueeze_0) - del index_select_0, unsqueeze_0, where_0 - - return reshape_0, multiply_0 diff --git a/paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_9/graph_hash.txt b/paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_9/graph_hash.txt deleted file mode 100644 index 27bd82e0e..000000000 --- a/paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_9/graph_hash.txt +++ /dev/null @@ -1 +0,0 @@ -8d319f5ec2a187a0cbeb6b629bf54f2df054e934514d4a5042fa9c577597355f \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_9/graph_net.json b/paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_9/graph_net.json deleted file mode 100644 index cf4d2108b..000000000 --- a/paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_9/graph_net.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "framework": "paddle", - "model_name": "PP-YOLOE-L_human", - "num_devices_required": 1, - "num_nodes_required": 1 -} \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_9/model.py b/paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_9/model.py deleted file mode 100644 index 7adfad7cf..000000000 --- a/paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_9/model.py +++ /dev/null @@ -1,509 +0,0 @@ -import paddle - - -class GraphModule(paddle.nn.Layer): - def __init__(self): - super().__init__() - - def forward(self, data_0, data_1, data_2, data_3, data_4, data_5, data_6): - # pd_op.cast: (2x2100xi32) <- (2x2100xb) - cast_0 = paddle._C_ops.cast(data_0, paddle.int32) - - # pd_op.full_int_array: (1xi64) <- () - full_int_array_0 = [-1] - - # pd_op.assign: (1xi64) <- (1xi64) - assign_0 = full_int_array_0 - - # pd_op.assign: (1xi64) <- (1xi64) - assign_1 = full_int_array_0 - - # pd_op.assign: (1xi64) <- (1xi64) - assign_2 = full_int_array_0 - - # pd_op.unsqueeze: (2x2100x1xi32) <- (2x2100xi32, 1xi64) - unsqueeze_0 = paddle._C_ops.unsqueeze(cast_0, full_int_array_0) - del cast_0 - - # pd_op.full_int_array: (3xi64) <- () - full_int_array_1 = [1, 1, 4] - - # pd_op.tile: (2x2100x4xi32) <- (2x2100x1xi32, 3xi64) - tile_0 = paddle._C_ops.tile(unsqueeze_0, full_int_array_1) - del full_int_array_1, unsqueeze_0 - - # pd_op.cast: (2x2100x4xb) <- (2x2100x4xi32) - cast_1 = paddle._C_ops.cast(tile_0, paddle.bool) - del tile_0 - - # pd_op.masked_select: (-1xf32) <- (2x2100x4xf32, 2x2100x4xb) - masked_select_0 = paddle._C_ops.masked_select(data_1, cast_1) - del data_1 - - # pd_op.full_int_array: (2xi64) <- () - full_int_array_2 = [-1, 4] - - # pd_op.reshape: (-1x4xf32) <- (-1xf32, 2xi64) - reshape_0 = paddle._C_ops.reshape(masked_select_0, full_int_array_2) - - # pd_op.masked_select: (-1xf32) <- (2x2100x4xf32, 2x2100x4xb) - masked_select_1 = paddle._C_ops.masked_select(data_2, cast_1) - - # pd_op.reshape: (-1x4xf32) <- (-1xf32, 2xi64) - reshape_1 = paddle._C_ops.reshape(masked_select_1, full_int_array_2) - del masked_select_1 - - # pd_op.sum: (2x2100xf32) <- (2x2100x1xf32, 1xi64) - sum_0 = paddle._C_ops.sum(data_3, full_int_array_0, None, False) - del data_3 - - # pd_op.masked_select: (-1xf32) <- (2x2100xf32, 2x2100xb) - masked_select_2 = paddle._C_ops.masked_select(sum_0, data_0) - del sum_0 - - # pd_op.unsqueeze: (-1x1xf32) <- (-1xf32, 1xi64) - unsqueeze_1 = paddle._C_ops.unsqueeze(masked_select_2, full_int_array_0) - del masked_select_2 - - # pd_op.subtract: (-1x4xf32) <- (-1x4xf32, -1x4xf32) - subtract_0 = paddle._C_ops.subtract(reshape_0, reshape_1) - - # pd_op.abs: (-1x4xf32) <- (-1x4xf32) - abs_0 = paddle._C_ops.abs(subtract_0) - - # pd_op.mean_all: (xf32) <- (-1x4xf32) - mean_all_0 = paddle._C_ops.mean_all(abs_0) - - # pd_op.full: (1xi32) <- () - full_0 = paddle._C_ops.full( - [1], float("1"), paddle.int32, paddle.core.CPUPlace() - ) - - # pd_op.split_with_num: ([-1x1xf32, -1x1xf32, -1x1xf32, -1x1xf32]) <- (-1x4xf32, 1xi32) - split_with_num_0 = paddle._C_ops.split_with_num(reshape_0, 4, full_0) - - # builtin.split: (-1x1xf32, -1x1xf32, -1x1xf32, -1x1xf32) <- ([-1x1xf32, -1x1xf32, -1x1xf32, -1x1xf32]) - ( - split_0, - split_1, - split_2, - split_3, - ) = split_with_num_0 - del split_with_num_0 - - # pd_op.split_with_num: ([-1x1xf32, -1x1xf32, -1x1xf32, -1x1xf32]) <- (-1x4xf32, 1xi32) - split_with_num_1 = paddle._C_ops.split_with_num(reshape_1, 4, full_0) - - # builtin.split: (-1x1xf32, -1x1xf32, -1x1xf32, -1x1xf32) <- ([-1x1xf32, -1x1xf32, -1x1xf32, -1x1xf32]) - ( - split_4, - split_5, - split_6, - split_7, - ) = split_with_num_1 - del split_with_num_1 - - # pd_op.maximum: (-1x1xf32) <- (-1x1xf32, -1x1xf32) - maximum_0 = paddle._C_ops.maximum(split_0, split_4) - - # pd_op.maximum: (-1x1xf32) <- (-1x1xf32, -1x1xf32) - maximum_1 = paddle._C_ops.maximum(split_1, split_5) - - # pd_op.minimum: (-1x1xf32) <- (-1x1xf32, -1x1xf32) - minimum_0 = paddle._C_ops.minimum(split_2, split_6) - - # pd_op.minimum: (-1x1xf32) <- (-1x1xf32, -1x1xf32) - minimum_1 = paddle._C_ops.minimum(split_3, split_7) - - # pd_op.subtract: (-1x1xf32) <- (-1x1xf32, -1x1xf32) - subtract_1 = paddle._C_ops.subtract(minimum_0, maximum_0) - - # pd_op.full: (1xf32) <- () - full_1 = paddle._C_ops.full( - [1], float("0"), paddle.float32, paddle.core.CPUPlace() - ) - - # pd_op.assign: (1xf32) <- (1xf32) - assign_3 = full_1 - - # pd_op.full: (1xf32) <- () - full_2 = paddle._C_ops.full( - [1], float("3.40282e+38"), paddle.float32, paddle.core.CPUPlace() - ) - - # pd_op.assign: (1xf32) <- (1xf32) - assign_4 = full_2 - - # pd_op.clip: (-1x1xf32) <- (-1x1xf32, 1xf32, 1xf32) - clip_0 = paddle._C_ops.clip(subtract_1, full_1, full_2) - - # pd_op.subtract: (-1x1xf32) <- (-1x1xf32, -1x1xf32) - subtract_2 = paddle._C_ops.subtract(minimum_1, maximum_1) - - # pd_op.clip: (-1x1xf32) <- (-1x1xf32, 1xf32, 1xf32) - clip_1 = paddle._C_ops.clip(subtract_2, full_1, full_2) - - # pd_op.multiply: (-1x1xf32) <- (-1x1xf32, -1x1xf32) - multiply_0 = paddle._C_ops.multiply(clip_0, clip_1) - - # pd_op.subtract: (-1x1xf32) <- (-1x1xf32, -1x1xf32) - subtract_3 = paddle._C_ops.subtract(split_2, split_0) - - # pd_op.subtract: (-1x1xf32) <- (-1x1xf32, -1x1xf32) - subtract_4 = paddle._C_ops.subtract(split_3, split_1) - - # pd_op.multiply: (-1x1xf32) <- (-1x1xf32, -1x1xf32) - multiply_1 = paddle._C_ops.multiply(subtract_3, subtract_4) - - # pd_op.subtract: (-1x1xf32) <- (-1x1xf32, -1x1xf32) - subtract_5 = paddle._C_ops.subtract(split_6, split_4) - - # pd_op.subtract: (-1x1xf32) <- (-1x1xf32, -1x1xf32) - subtract_6 = paddle._C_ops.subtract(split_7, split_5) - - # pd_op.multiply: (-1x1xf32) <- (-1x1xf32, -1x1xf32) - multiply_2 = paddle._C_ops.multiply(subtract_5, subtract_6) - del subtract_5, subtract_6 - - # pd_op.add: (-1x1xf32) <- (-1x1xf32, -1x1xf32) - add_0 = paddle._C_ops.add(multiply_1, multiply_2) - - # pd_op.subtract: (-1x1xf32) <- (-1x1xf32, -1x1xf32) - subtract_7 = paddle._C_ops.subtract(add_0, multiply_0) - - # pd_op.full: (1xf32) <- () - full_3 = paddle._C_ops.full( - [1], float("1"), paddle.float32, paddle.core.CPUPlace() - ) - - # pd_op.assign: (1xf32) <- (1xf32) - assign_5 = full_3 - - # pd_op.assign: (1xf32) <- (1xf32) - assign_6 = full_3 - - # pd_op.scale: (-1x1xf32) <- (-1x1xf32, 1xf32) - scale_0 = paddle._C_ops.scale(subtract_7, full_3, float("1e-10"), True) - del subtract_7 - - # pd_op.divide: (-1x1xf32) <- (-1x1xf32, -1x1xf32) - divide_2 = paddle._C_ops.divide(multiply_0, scale_0) - - # pd_op.minimum: (-1x1xf32) <- (-1x1xf32, -1x1xf32) - minimum_2 = paddle._C_ops.minimum(split_0, split_4) - - # pd_op.minimum: (-1x1xf32) <- (-1x1xf32, -1x1xf32) - minimum_3 = paddle._C_ops.minimum(split_1, split_5) - - # pd_op.maximum: (-1x1xf32) <- (-1x1xf32, -1x1xf32) - maximum_2 = paddle._C_ops.maximum(split_2, split_6) - - # pd_op.maximum: (-1x1xf32) <- (-1x1xf32, -1x1xf32) - maximum_3 = paddle._C_ops.maximum(split_3, split_7) - - # pd_op.subtract: (-1x1xf32) <- (-1x1xf32, -1x1xf32) - subtract_8 = paddle._C_ops.subtract(maximum_2, minimum_2) - - # pd_op.subtract: (-1x1xf32) <- (-1x1xf32, -1x1xf32) - subtract_9 = paddle._C_ops.subtract(maximum_3, minimum_3) - - # pd_op.multiply: (-1x1xf32) <- (-1x1xf32, -1x1xf32) - multiply_3 = paddle._C_ops.multiply(subtract_8, subtract_9) - - # pd_op.scale: (-1x1xf32) <- (-1x1xf32, 1xf32) - scale_1 = paddle._C_ops.scale(multiply_3, full_3, float("1e-10"), True) - del multiply_3 - - # pd_op.subtract: (-1x1xf32) <- (-1x1xf32, -1x1xf32) - subtract_10 = paddle._C_ops.subtract(scale_1, scale_0) - - # pd_op.divide: (-1x1xf32) <- (-1x1xf32, -1x1xf32) - divide_3 = paddle._C_ops.divide(subtract_10, scale_1) - - # pd_op.subtract: (-1x1xf32) <- (-1x1xf32, -1x1xf32) - subtract_11 = paddle._C_ops.subtract(divide_2, divide_3) - - # pd_op.full: (1xf32) <- () - full_4 = paddle._C_ops.full( - [1], float("-1"), paddle.float32, paddle.core.CPUPlace() - ) - - # pd_op.scale: (-1x1xf32) <- (-1x1xf32, 1xf32) - scale_2 = paddle._C_ops.scale(subtract_11, full_4, float("1"), True) - del subtract_11 - - # pd_op.scale: (-1x1xf32) <- (-1x1xf32, 1xf32) - scale_3 = paddle._C_ops.scale(scale_2, full_3, float("0"), True) - del scale_2 - - # pd_op.multiply: (-1x1xf32) <- (-1x1xf32, -1x1xf32) - multiply_4 = paddle._C_ops.multiply(scale_3, unsqueeze_1) - - # pd_op.full_int_array: (0xi64) <- () - full_int_array_3 = [] - - # pd_op.assign: (0xi64) <- (0xi64) - assign_7 = full_int_array_3 - - # pd_op.sum: (xf32) <- (-1x1xf32, 0xi64) - sum_1 = paddle._C_ops.sum(multiply_4, full_int_array_3, None, False) - - # pd_op.divide: (xf32) <- (xf32, xf32) - divide_0 = paddle._C_ops.divide(sum_1, data_4) - - # pd_op.unsqueeze: (2x2100x1xb) <- (2x2100xb, 1xi64) - unsqueeze_2 = paddle._C_ops.unsqueeze(data_0, full_int_array_0) - del data_0 - - # pd_op.cast: (2x2100x1xi32) <- (2x2100x1xb) - cast_2 = paddle._C_ops.cast(unsqueeze_2, paddle.int32) - del unsqueeze_2 - - # pd_op.full_int_array: (3xi64) <- () - full_int_array_4 = [1, 1, 68] - - # pd_op.tile: (2x2100x68xi32) <- (2x2100x1xi32, 3xi64) - tile_1 = paddle._C_ops.tile(cast_2, full_int_array_4) - del cast_2, full_int_array_4 - - # pd_op.cast: (2x2100x68xb) <- (2x2100x68xi32) - cast_3 = paddle._C_ops.cast(tile_1, paddle.bool) - del tile_1 - - # pd_op.masked_select: (-1xf32) <- (2x2100x68xf32, 2x2100x68xb) - masked_select_3 = paddle._C_ops.masked_select(data_5, cast_3) - del data_5 - - # pd_op.full_int_array: (3xi64) <- () - full_int_array_5 = [-1, 4, 17] - - # pd_op.reshape: (-1x4x17xf32) <- (-1xf32, 3xi64) - reshape_2 = paddle._C_ops.reshape(masked_select_3, full_int_array_5) - del full_int_array_5 - - # pd_op.full: (1xi32) <- () - full_5 = paddle._C_ops.full( - [1], float("2"), paddle.int32, paddle.core.CPUPlace() - ) - - # pd_op.split_with_num: ([2x2100x2xf32, 2x2100x2xf32]) <- (2x2100x4xf32, 1xi32) - split_with_num_2 = paddle._C_ops.split_with_num(data_2, 2, full_5) - del data_2, full_5 - - # builtin.split: (2x2100x2xf32, 2x2100x2xf32) <- ([2x2100x2xf32, 2x2100x2xf32]) - ( - split_8, - split_9, - ) = split_with_num_2 - del split_with_num_2 - - # pd_op.subtract: (2x2100x2xf32) <- (2100x2xf32, 2x2100x2xf32) - subtract_12 = paddle._C_ops.subtract(data_6, split_8) - del split_8 - - # pd_op.subtract: (2x2100x2xf32) <- (2x2100x2xf32, 2100x2xf32) - subtract_13 = paddle._C_ops.subtract(split_9, data_6) - del data_6, split_9 - - # pd_op.full: (1xi32) <- () - full_6 = paddle._C_ops.full( - [1], float("-1"), paddle.int32, paddle.core.CPUPlace() - ) - - # builtin.combine: ([2x2100x2xf32, 2x2100x2xf32]) <- (2x2100x2xf32, 2x2100x2xf32) - combine_0 = [subtract_12, subtract_13] - del subtract_12, subtract_13 - - # pd_op.concat: (2x2100x4xf32) <- ([2x2100x2xf32, 2x2100x2xf32], 1xi32) - concat_0 = paddle._C_ops.concat(combine_0, full_6) - del combine_0, full_6 - - # pd_op.full: (1xf32) <- () - full_7 = paddle._C_ops.full( - [1], float("15.99"), paddle.float32, paddle.core.CPUPlace() - ) - - # pd_op.clip: (2x2100x4xf32) <- (2x2100x4xf32, 1xf32, 1xf32) - clip_2 = paddle._C_ops.clip(concat_0, full_1, full_7) - del concat_0, full_7 - - # pd_op.masked_select: (-1xf32) <- (2x2100x4xf32, 2x2100x4xb) - masked_select_4 = paddle._C_ops.masked_select(clip_2, cast_1) - del clip_2 - - # pd_op.reshape: (-1x4xf32) <- (-1xf32, 2xi64) - reshape_3 = paddle._C_ops.reshape(masked_select_4, full_int_array_2) - del full_int_array_2, masked_select_4 - - # pd_op.floor: (-1x4xf32) <- (-1x4xf32) - floor_0 = paddle._C_ops.floor(reshape_3) - - # pd_op.cast: (-1x4xi64) <- (-1x4xf32) - cast_4 = paddle._C_ops.cast(floor_0, paddle.int64) - del floor_0 - - # pd_op.scale: (-1x4xi64) <- (-1x4xi64, 1xf32) - scale_4 = paddle._C_ops.scale(cast_4, full_3, float("1"), True) - - # pd_op.cast: (-1x4xf32) <- (-1x4xi64) - cast_5 = paddle._C_ops.cast(scale_4, paddle.float32) - - # pd_op.subtract: (-1x4xf32) <- (-1x4xf32, -1x4xf32) - subtract_14 = paddle._C_ops.subtract(cast_5, reshape_3) - del cast_5, reshape_3 - - # pd_op.scale: (-1x4xf32) <- (-1x4xf32, 1xf32) - scale_5 = paddle._C_ops.scale(subtract_14, full_4, float("1"), True) - - # pd_op.scale: (-1x4xi64) <- (-1x4xi64, 1xf32) - scale_6 = paddle._C_ops.scale(cast_4, full_3, float("0"), True) - del cast_4 - - # pd_op.unsqueeze: (-1x4x1xi64) <- (-1x4xi64, 1xi64) - unsqueeze_3 = paddle._C_ops.unsqueeze(scale_6, full_int_array_0) - del scale_6 - - # pd_op.cross_entropy_with_softmax: (-1x4x17xf32, -1x4x1xf32) <- (-1x4x17xf32, -1x4x1xi64) - cross_entropy_with_softmax_0, cross_entropy_with_softmax_2 = ( - lambda x, f: f(x) - )( - paddle._C_ops.cross_entropy_with_softmax( - reshape_2, unsqueeze_3, False, True, True, -100, -1 - ), - lambda out: out if isinstance(out, (list, tuple)) else (out, None), - ) - - # pd_op.squeeze: (-1x4xf32) <- (-1x4x1xf32, 1xi64) - squeeze_0 = paddle._C_ops.squeeze( - cross_entropy_with_softmax_2, full_int_array_0 - ) - - # pd_op.multiply: (-1x4xf32) <- (-1x4xf32, -1x4xf32) - multiply_5 = paddle._C_ops.multiply(squeeze_0, subtract_14) - - # pd_op.scale: (-1x4xi64) <- (-1x4xi64, 1xf32) - scale_7 = paddle._C_ops.scale(scale_4, full_3, float("0"), True) - del scale_4 - - # pd_op.unsqueeze: (-1x4x1xi64) <- (-1x4xi64, 1xi64) - unsqueeze_4 = paddle._C_ops.unsqueeze(scale_7, full_int_array_0) - del scale_7 - - # pd_op.cross_entropy_with_softmax: (-1x4x17xf32, -1x4x1xf32) <- (-1x4x17xf32, -1x4x1xi64) - cross_entropy_with_softmax_1, cross_entropy_with_softmax_3 = ( - lambda x, f: f(x) - )( - paddle._C_ops.cross_entropy_with_softmax( - reshape_2, unsqueeze_4, False, True, True, -100, -1 - ), - lambda out: out if isinstance(out, (list, tuple)) else (out, None), - ) - del reshape_2 - - # pd_op.squeeze: (-1x4xf32) <- (-1x4x1xf32, 1xi64) - squeeze_1 = paddle._C_ops.squeeze( - cross_entropy_with_softmax_3, full_int_array_0 - ) - - # pd_op.multiply: (-1x4xf32) <- (-1x4xf32, -1x4xf32) - multiply_6 = paddle._C_ops.multiply(squeeze_1, scale_5) - - # pd_op.add: (-1x4xf32) <- (-1x4xf32, -1x4xf32) - add_1 = paddle._C_ops.add(multiply_5, multiply_6) - - # pd_op.mean: (-1x1xf32) <- (-1x4xf32, 1xi64) - mean_0 = paddle._C_ops.mean(add_1, full_int_array_0, True) - del full_int_array_0 - - # pd_op.multiply: (-1x1xf32) <- (-1x1xf32, -1x1xf32) - multiply_7 = paddle._C_ops.multiply(mean_0, unsqueeze_1) - - # pd_op.sum: (xf32) <- (-1x1xf32, 0xi64) - sum_2 = paddle._C_ops.sum(multiply_7, full_int_array_3, None, False) - - # pd_op.divide: (xf32) <- (xf32, xf32) - divide_1 = paddle._C_ops.divide(sum_2, data_4) - del ( - abs_0, - add_0, - add_1, - assign_0, - assign_1, - assign_2, - assign_3, - assign_4, - assign_5, - assign_6, - assign_7, - cast_1, - cast_3, - clip_0, - clip_1, - cross_entropy_with_softmax_2, - cross_entropy_with_softmax_3, - data_4, - divide_2, - divide_3, - full_0, - full_1, - full_2, - full_3, - full_4, - full_int_array_3, - masked_select_0, - masked_select_3, - maximum_0, - maximum_1, - maximum_2, - maximum_3, - mean_0, - minimum_0, - minimum_1, - minimum_2, - minimum_3, - multiply_0, - multiply_1, - multiply_2, - multiply_4, - multiply_5, - multiply_6, - multiply_7, - reshape_0, - reshape_1, - scale_0, - scale_1, - scale_3, - scale_5, - split_0, - split_1, - split_2, - split_3, - split_4, - split_5, - split_6, - split_7, - squeeze_0, - squeeze_1, - subtract_0, - subtract_1, - subtract_10, - subtract_14, - subtract_2, - subtract_3, - subtract_4, - subtract_8, - subtract_9, - sum_1, - sum_2, - unsqueeze_1, - unsqueeze_3, - unsqueeze_4, - ) - - return ( - cross_entropy_with_softmax_0, - cross_entropy_with_softmax_1, - mean_all_0, - divide_0, - divide_1, - ) diff --git a/paddle_samples/PaddleX/PP-YOLOE-L_vehicle/subgraph_0/graph_hash.txt b/paddle_samples/PaddleX/PP-YOLOE-L_vehicle/subgraph_0/graph_hash.txt index cf9cecf24..5a5c73cb5 100644 --- a/paddle_samples/PaddleX/PP-YOLOE-L_vehicle/subgraph_0/graph_hash.txt +++ b/paddle_samples/PaddleX/PP-YOLOE-L_vehicle/subgraph_0/graph_hash.txt @@ -1 +1 @@ -b570e084ecf7776a98a81e353b5f581d19f9ab243969ebfb5988dfda43a8a50f \ No newline at end of file +e7b7e25c61625d326cacc8dd082d9e064f725c78447257a261d2d0d52c62c7d7 \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE-L_vehicle/subgraph_0/input_meta.py b/paddle_samples/PaddleX/PP-YOLOE-L_vehicle/subgraph_0/input_meta.py index 7bd7807fe..f58dc071b 100644 --- a/paddle_samples/PaddleX/PP-YOLOE-L_vehicle/subgraph_0/input_meta.py +++ b/paddle_samples/PaddleX/PP-YOLOE-L_vehicle/subgraph_0/input_meta.py @@ -1,73 +1,9 @@ class Program_weight_tensor_data_0: name = "data_0" - shape = [] - dtype = "int64" - data = [14] - - -class Program_weight_tensor_data_1: - name = "data_1" - shape = [] - dtype = "int64" - data = [14] - - -class Program_weight_tensor_data_2: - name = "data_2" - shape = [] - dtype = "int64" - data = [28] - - -class Program_weight_tensor_data_3: - name = "data_3" - shape = [] - dtype = "int64" - data = [28] - - -class Program_weight_tensor_data_4: - name = "data_4" - shape = [] - dtype = "int64" - data = [56] - - -class Program_weight_tensor_data_5: - name = "data_5" - shape = [] - dtype = "int64" - data = [56] - - -class Program_weight_tensor_data_6: - name = "data_6" - shape = [2, 768, 14, 14] + shape = [2, 3, 640, 640] dtype = "float32" - min_val = float("-0.278465") - max_val = float("11.1806") - mean = float("0.192012") - std = float("0.569649") - data = None - - -class Program_weight_tensor_data_7: - name = "data_7" - shape = [2, 384, 28, 28] - dtype = "float32" - min_val = float("-0.278465") - max_val = float("25.4887") - mean = float("0.232567") - std = float("0.845376") - data = None - - -class Program_weight_tensor_data_8: - name = "data_8" - shape = [2, 192, 56, 56] - dtype = "float32" - min_val = float("-0.278465") - max_val = float("28.7635") - mean = float("0.345133") - std = float("0.91854") + min_val = float("-2.01516") + max_val = float("2.64") + mean = float("0.187747") + std = float("0.681331") data = None diff --git a/paddle_samples/PaddleX/PP-YOLOE-L_vehicle/subgraph_0/model.py b/paddle_samples/PaddleX/PP-YOLOE-L_vehicle/subgraph_0/model.py index bdcca8e53..6bdb7ea24 100644 --- a/paddle_samples/PaddleX/PP-YOLOE-L_vehicle/subgraph_0/model.py +++ b/paddle_samples/PaddleX/PP-YOLOE-L_vehicle/subgraph_0/model.py @@ -61,422 +61,6384 @@ def forward( parameter_51, parameter_52, parameter_53, + parameter_54, + parameter_55, + parameter_56, + parameter_57, + parameter_58, + parameter_59, + parameter_60, + parameter_61, + parameter_62, + parameter_63, + parameter_64, + parameter_65, + parameter_66, + parameter_67, + parameter_68, + parameter_69, + parameter_70, + parameter_71, + parameter_72, + parameter_73, + parameter_74, + parameter_75, + parameter_76, + parameter_77, + parameter_78, + parameter_79, + parameter_80, + parameter_81, + parameter_82, + parameter_83, + parameter_84, + parameter_85, + parameter_86, + parameter_87, + parameter_88, + parameter_89, + parameter_90, + parameter_91, + parameter_92, + parameter_93, + parameter_94, + parameter_95, + parameter_96, + parameter_97, + parameter_98, + parameter_99, + parameter_100, + parameter_101, + parameter_102, + parameter_103, + parameter_104, + parameter_105, + parameter_106, + parameter_107, + parameter_108, + parameter_109, + parameter_110, + parameter_111, + parameter_112, + parameter_113, + parameter_114, + parameter_115, + parameter_116, + parameter_117, + parameter_118, + parameter_119, + parameter_120, + parameter_121, + parameter_122, + parameter_123, + parameter_124, + parameter_125, + parameter_126, + parameter_127, + parameter_128, + parameter_129, + parameter_130, + parameter_131, + parameter_132, + parameter_133, + parameter_134, + parameter_135, + parameter_136, + parameter_137, + parameter_138, + parameter_139, + parameter_140, + parameter_141, + parameter_142, + parameter_143, + parameter_144, + parameter_145, + parameter_146, + parameter_147, + parameter_148, + parameter_149, + parameter_150, + parameter_151, + parameter_152, + parameter_153, + parameter_154, + parameter_155, + parameter_156, + parameter_157, + parameter_158, + parameter_159, + parameter_160, + parameter_161, + parameter_162, + parameter_163, + parameter_164, + parameter_165, + parameter_166, + parameter_167, + parameter_168, + parameter_169, + parameter_170, + parameter_171, + parameter_172, + parameter_173, + parameter_174, + parameter_175, + parameter_176, + parameter_177, + parameter_178, + parameter_179, + parameter_180, + parameter_181, + parameter_182, + parameter_183, + parameter_184, + parameter_185, + parameter_186, + parameter_187, + parameter_188, + parameter_189, + parameter_190, + parameter_191, + parameter_192, + parameter_193, + parameter_194, + parameter_195, + parameter_196, + parameter_197, + parameter_198, + parameter_199, + parameter_200, + parameter_201, + parameter_202, + parameter_203, + parameter_204, + parameter_205, + parameter_206, + parameter_207, + parameter_208, + parameter_209, + parameter_210, + parameter_211, + parameter_212, + parameter_213, + parameter_214, + parameter_215, + parameter_216, + parameter_217, + parameter_218, + parameter_219, + parameter_220, + parameter_221, + parameter_222, + parameter_223, + parameter_224, + parameter_225, + parameter_226, + parameter_227, + parameter_228, + parameter_229, + parameter_230, + parameter_231, + parameter_232, + parameter_233, + parameter_234, + parameter_235, + parameter_236, + parameter_237, + parameter_238, + parameter_239, + parameter_240, + parameter_241, + parameter_242, + parameter_243, + parameter_244, + parameter_245, + parameter_246, + parameter_247, + parameter_248, + parameter_249, + parameter_250, + parameter_251, + parameter_252, + parameter_253, + parameter_254, + parameter_255, + parameter_256, + parameter_257, + parameter_258, + parameter_259, + parameter_260, + parameter_261, + parameter_262, + parameter_263, + parameter_264, + parameter_265, + parameter_266, + parameter_267, + parameter_268, + parameter_269, + parameter_270, + parameter_271, + parameter_272, + parameter_273, + parameter_274, + parameter_275, + parameter_276, + parameter_277, + parameter_278, + parameter_279, + parameter_280, + parameter_281, + parameter_282, + parameter_283, + parameter_284, + parameter_285, + parameter_286, + parameter_287, + parameter_288, + parameter_289, + parameter_290, + parameter_291, + parameter_292, + parameter_293, + parameter_294, + parameter_295, + parameter_296, + parameter_297, + parameter_298, + parameter_299, + parameter_300, + parameter_301, + parameter_302, + parameter_303, + parameter_304, + parameter_305, + parameter_306, + parameter_307, + parameter_308, + parameter_309, + parameter_310, + parameter_311, + parameter_312, + parameter_313, + parameter_314, + parameter_315, + parameter_316, + parameter_317, + parameter_318, + parameter_319, + parameter_320, + parameter_321, + parameter_322, + parameter_323, + parameter_324, + parameter_325, + parameter_326, + parameter_327, + parameter_328, + parameter_329, + parameter_330, + parameter_331, + parameter_332, + parameter_333, + parameter_334, + parameter_335, + parameter_336, + parameter_337, + parameter_338, + parameter_339, + parameter_340, + parameter_341, + parameter_342, + parameter_343, + parameter_344, + parameter_345, + parameter_346, + parameter_347, + parameter_348, + parameter_349, + parameter_350, + parameter_351, + parameter_352, + parameter_353, + parameter_354, + parameter_355, + parameter_356, + parameter_357, + parameter_358, + parameter_359, + parameter_360, + parameter_361, + parameter_362, + parameter_363, + parameter_364, + parameter_365, + parameter_366, + parameter_367, + parameter_368, + parameter_369, + parameter_370, + parameter_371, + parameter_372, + parameter_373, + parameter_374, + parameter_375, + parameter_376, + parameter_377, + parameter_378, + parameter_379, + parameter_380, + parameter_381, + parameter_382, + parameter_383, + parameter_384, + parameter_385, + parameter_386, + parameter_387, + parameter_388, + parameter_389, + parameter_390, + parameter_391, + parameter_392, + parameter_393, + parameter_394, + parameter_395, + parameter_396, + parameter_397, + parameter_398, + parameter_399, + parameter_400, + parameter_401, + parameter_402, + parameter_403, + parameter_404, + parameter_405, + parameter_406, + parameter_407, + parameter_408, + parameter_409, + parameter_410, + parameter_411, + parameter_412, + parameter_413, + parameter_414, + parameter_415, + parameter_416, + parameter_417, + parameter_418, + parameter_419, + parameter_420, + parameter_421, + parameter_422, + parameter_423, + parameter_424, + parameter_425, + parameter_426, + parameter_427, + parameter_428, + parameter_429, + parameter_430, + parameter_431, + parameter_432, + parameter_433, + parameter_434, + parameter_435, + parameter_436, + parameter_437, + parameter_438, + parameter_439, + parameter_440, + parameter_441, + parameter_442, + parameter_443, + parameter_444, + parameter_445, + parameter_446, + parameter_447, + parameter_448, + parameter_449, + parameter_450, + parameter_451, + parameter_452, + parameter_453, + parameter_454, + parameter_455, + parameter_456, + parameter_457, + parameter_458, + parameter_459, + parameter_460, + parameter_461, + parameter_462, + parameter_463, + parameter_464, + parameter_465, + parameter_466, + parameter_467, + parameter_468, + parameter_469, + parameter_470, + parameter_471, + parameter_472, + parameter_473, + parameter_474, + parameter_475, + parameter_476, + parameter_477, + parameter_478, + parameter_479, + parameter_480, + parameter_481, + parameter_482, + parameter_483, + parameter_484, + parameter_485, + parameter_486, + parameter_487, + parameter_488, + parameter_489, + parameter_490, + parameter_491, + parameter_492, + parameter_493, + parameter_494, + parameter_495, + parameter_496, + parameter_497, + parameter_498, + parameter_499, + parameter_500, + parameter_501, + parameter_502, + parameter_503, + parameter_504, + parameter_505, + parameter_506, + parameter_507, + parameter_508, + parameter_509, + parameter_510, + parameter_511, + parameter_512, + parameter_513, + parameter_514, + parameter_515, + parameter_516, + parameter_517, + parameter_518, + parameter_519, + parameter_520, + parameter_521, + parameter_522, + parameter_523, + parameter_524, + parameter_525, + parameter_526, + parameter_527, + parameter_528, + parameter_529, + parameter_530, + parameter_531, + parameter_532, + parameter_533, + parameter_534, + parameter_535, + parameter_536, + parameter_537, + parameter_538, + parameter_539, + parameter_540, + parameter_541, + parameter_542, + parameter_543, + parameter_544, + parameter_545, + parameter_546, + parameter_547, + parameter_548, + parameter_549, + parameter_550, + parameter_551, + parameter_552, + parameter_553, + parameter_554, + parameter_555, + parameter_556, + parameter_557, + parameter_558, + parameter_559, + parameter_560, + parameter_561, + parameter_562, + parameter_563, + parameter_564, + parameter_565, + parameter_566, + parameter_567, + parameter_568, + parameter_569, + parameter_570, + parameter_571, + parameter_572, + parameter_573, + parameter_574, + parameter_575, + parameter_576, + parameter_577, + parameter_578, + parameter_579, + parameter_580, + parameter_581, + parameter_582, + parameter_583, + parameter_584, + parameter_585, + parameter_586, + parameter_587, + parameter_588, + parameter_589, + parameter_590, + parameter_591, + parameter_592, + parameter_593, + parameter_594, + parameter_595, + parameter_596, + parameter_597, + parameter_598, + parameter_599, + parameter_600, + parameter_601, + parameter_602, + parameter_603, + parameter_604, + parameter_605, + parameter_606, + parameter_607, + parameter_608, + parameter_609, + parameter_610, + parameter_611, + parameter_612, + parameter_613, + parameter_614, + parameter_615, + parameter_616, + parameter_617, + parameter_618, + parameter_619, + parameter_620, + parameter_621, + parameter_622, + parameter_623, + parameter_624, + parameter_625, + parameter_626, + parameter_627, + parameter_628, + parameter_629, + parameter_630, + parameter_631, + parameter_632, + parameter_633, + parameter_634, + parameter_635, + parameter_636, + parameter_637, + parameter_638, + parameter_639, + parameter_640, + parameter_641, + parameter_642, + parameter_643, + parameter_644, + parameter_645, + parameter_646, + parameter_647, + parameter_648, + parameter_649, + parameter_650, + parameter_651, + parameter_652, + parameter_653, + parameter_654, + parameter_655, + parameter_656, + parameter_657, + parameter_658, + parameter_659, + parameter_660, + parameter_661, + parameter_662, + parameter_663, + parameter_664, + parameter_665, + parameter_666, + parameter_667, + parameter_668, + parameter_669, + parameter_670, + parameter_671, + parameter_672, + parameter_673, + parameter_674, + parameter_675, + parameter_676, + parameter_677, + parameter_678, + parameter_679, + parameter_680, + parameter_681, + parameter_682, + parameter_683, + parameter_684, + parameter_685, + parameter_686, + parameter_687, + parameter_688, + parameter_689, + parameter_690, + parameter_691, + parameter_692, + parameter_693, + parameter_694, + parameter_695, + parameter_696, + parameter_697, + parameter_698, + parameter_699, + parameter_700, + parameter_701, + parameter_702, + parameter_703, + parameter_704, + parameter_705, + parameter_706, + parameter_707, + parameter_708, + parameter_709, + parameter_710, + parameter_711, + parameter_712, + parameter_713, + parameter_714, + parameter_715, + parameter_716, + parameter_717, + parameter_718, + parameter_719, + parameter_720, + parameter_721, + parameter_722, + parameter_723, + parameter_724, + parameter_725, + parameter_726, + parameter_727, + parameter_728, + parameter_729, + parameter_730, + parameter_731, + parameter_732, + parameter_733, + parameter_734, + parameter_735, + parameter_736, + parameter_737, + parameter_738, + parameter_739, + parameter_740, + parameter_741, + parameter_742, + parameter_743, + parameter_744, + parameter_745, + parameter_746, + parameter_747, + parameter_748, + parameter_749, + parameter_750, + parameter_751, + parameter_752, data_0, - data_1, - data_2, - data_3, - data_4, - data_5, - data_6, - data_7, - data_8, ): - # pd_op.full: (1xi64) <- () + # pd_op.conv2d: (2x32x-1x-1xf32) <- (2x3x-1x-1xf32, 32x3x3x3xf32) + conv2d_0 = paddle._C_ops.conv2d( + data_0, parameter_752, [2, 2], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del data_0, parameter_752 + + # pd_op.batch_norm_: (2x32x-1x-1xf32, 32xf32, 32xf32, 32xf32, 32xf32, -1xui8) <- (2x32x-1x-1xf32, 32xf32, 32xf32, 32xf32, 32xf32) + ( + batch_norm__0, + batch_norm__1, + batch_norm__2, + batch_norm__3, + batch_norm__4, + batch_norm__5, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_0, + parameter_751, + parameter_750, + parameter_749, + parameter_748, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_0, parameter_748, parameter_749, parameter_750, parameter_751 + + # pd_op.swish: (2x32x-1x-1xf32) <- (2x32x-1x-1xf32) + swish_0 = paddle._C_ops.swish(batch_norm__0) + del batch_norm__0 + + # pd_op.conv2d: (2x32x-1x-1xf32) <- (2x32x-1x-1xf32, 32x32x3x3xf32) + conv2d_1 = paddle._C_ops.conv2d( + swish_0, parameter_747, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_747, swish_0 + + # pd_op.batch_norm_: (2x32x-1x-1xf32, 32xf32, 32xf32, 32xf32, 32xf32, -1xui8) <- (2x32x-1x-1xf32, 32xf32, 32xf32, 32xf32, 32xf32) + ( + batch_norm__6, + batch_norm__7, + batch_norm__8, + batch_norm__9, + batch_norm__10, + batch_norm__11, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_1, + parameter_746, + parameter_745, + parameter_744, + parameter_743, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_1, parameter_743, parameter_744, parameter_745, parameter_746 + + # pd_op.swish: (2x32x-1x-1xf32) <- (2x32x-1x-1xf32) + swish_1 = paddle._C_ops.swish(batch_norm__6) + del batch_norm__6 + + # pd_op.conv2d: (2x64x-1x-1xf32) <- (2x32x-1x-1xf32, 64x32x3x3xf32) + conv2d_2 = paddle._C_ops.conv2d( + swish_1, parameter_742, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_742, swish_1 + + # pd_op.batch_norm_: (2x64x-1x-1xf32, 64xf32, 64xf32, 64xf32, 64xf32, -1xui8) <- (2x64x-1x-1xf32, 64xf32, 64xf32, 64xf32, 64xf32) + ( + batch_norm__12, + batch_norm__13, + batch_norm__14, + batch_norm__15, + batch_norm__16, + batch_norm__17, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_2, + parameter_741, + parameter_740, + parameter_739, + parameter_738, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_2, parameter_738, parameter_739, parameter_740, parameter_741 + + # pd_op.swish: (2x64x-1x-1xf32) <- (2x64x-1x-1xf32) + swish_2 = paddle._C_ops.swish(batch_norm__12) + del batch_norm__12 + + # pd_op.conv2d: (2x96x-1x-1xf32) <- (2x64x-1x-1xf32, 96x64x3x3xf32) + conv2d_3 = paddle._C_ops.conv2d( + swish_2, parameter_737, [2, 2], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_737, swish_2 + + # pd_op.batch_norm_: (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__18, + batch_norm__19, + batch_norm__20, + batch_norm__21, + batch_norm__22, + batch_norm__23, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_3, + parameter_736, + parameter_735, + parameter_734, + parameter_733, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_3, parameter_733, parameter_734, parameter_735, parameter_736 + + # pd_op.swish: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32) + swish_3 = paddle._C_ops.swish(batch_norm__18) + del batch_norm__18 + + # pd_op.conv2d: (2x48x-1x-1xf32) <- (2x96x-1x-1xf32, 48x96x1x1xf32) + conv2d_4 = paddle._C_ops.conv2d( + swish_3, parameter_732, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_732 + + # pd_op.batch_norm_: (2x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32, -1xui8) <- (2x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32) + ( + batch_norm__24, + batch_norm__25, + batch_norm__26, + batch_norm__27, + batch_norm__28, + batch_norm__29, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_4, + parameter_731, + parameter_730, + parameter_729, + parameter_728, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_4, parameter_728, parameter_729, parameter_730, parameter_731 + + # pd_op.swish: (2x48x-1x-1xf32) <- (2x48x-1x-1xf32) + swish_4 = paddle._C_ops.swish(batch_norm__24) + del batch_norm__24 + + # pd_op.conv2d: (2x48x-1x-1xf32) <- (2x96x-1x-1xf32, 48x96x1x1xf32) + conv2d_5 = paddle._C_ops.conv2d( + swish_3, parameter_727, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_727, swish_3 + + # pd_op.batch_norm_: (2x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32, -1xui8) <- (2x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32) + ( + batch_norm__30, + batch_norm__31, + batch_norm__32, + batch_norm__33, + batch_norm__34, + batch_norm__35, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_5, + parameter_726, + parameter_725, + parameter_724, + parameter_723, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_5, parameter_723, parameter_724, parameter_725, parameter_726 + + # pd_op.swish: (2x48x-1x-1xf32) <- (2x48x-1x-1xf32) + swish_5 = paddle._C_ops.swish(batch_norm__30) + del batch_norm__30 + + # pd_op.conv2d: (2x48x-1x-1xf32) <- (2x48x-1x-1xf32, 48x48x3x3xf32) + conv2d_6 = paddle._C_ops.conv2d( + swish_5, parameter_722, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_722 + + # pd_op.batch_norm_: (2x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32, -1xui8) <- (2x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32) + ( + batch_norm__36, + batch_norm__37, + batch_norm__38, + batch_norm__39, + batch_norm__40, + batch_norm__41, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_6, + parameter_721, + parameter_720, + parameter_719, + parameter_718, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_6, parameter_718, parameter_719, parameter_720, parameter_721 + + # pd_op.swish: (2x48x-1x-1xf32) <- (2x48x-1x-1xf32) + swish_6 = paddle._C_ops.swish(batch_norm__36) + del batch_norm__36 + + # pd_op.conv2d: (2x48x-1x-1xf32) <- (2x48x-1x-1xf32, 48x48x3x3xf32) + conv2d_7 = paddle._C_ops.conv2d( + swish_6, parameter_717, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_717 + + # pd_op.batch_norm_: (2x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32, -1xui8) <- (2x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32) + ( + batch_norm__42, + batch_norm__43, + batch_norm__44, + batch_norm__45, + batch_norm__46, + batch_norm__47, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_7, + parameter_716, + parameter_715, + parameter_714, + parameter_713, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_7, parameter_713, parameter_714, parameter_715, parameter_716 + + # pd_op.conv2d: (2x48x-1x-1xf32) <- (2x48x-1x-1xf32, 48x48x1x1xf32) + conv2d_8 = paddle._C_ops.conv2d( + swish_6, parameter_712, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_712, swish_6 + + # pd_op.batch_norm_: (2x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32, -1xui8) <- (2x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32) + ( + batch_norm__48, + batch_norm__49, + batch_norm__50, + batch_norm__51, + batch_norm__52, + batch_norm__53, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_8, + parameter_711, + parameter_710, + parameter_709, + parameter_708, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_8, parameter_708, parameter_709, parameter_710, parameter_711 + + # pd_op.add: (2x48x-1x-1xf32) <- (2x48x-1x-1xf32, 2x48x-1x-1xf32) + add_0 = paddle._C_ops.add(batch_norm__42, batch_norm__48) + del batch_norm__42, batch_norm__48 + + # pd_op.swish: (2x48x-1x-1xf32) <- (2x48x-1x-1xf32) + swish_7 = paddle._C_ops.swish(add_0) + del add_0 + + # pd_op.add: (2x48x-1x-1xf32) <- (2x48x-1x-1xf32, 2x48x-1x-1xf32) + add_1 = paddle._C_ops.add(swish_5, swish_7) + del swish_5, swish_7 + + # pd_op.conv2d: (2x48x-1x-1xf32) <- (2x48x-1x-1xf32, 48x48x3x3xf32) + conv2d_9 = paddle._C_ops.conv2d( + add_1, parameter_707, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_707 + + # pd_op.batch_norm_: (2x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32, -1xui8) <- (2x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32) + ( + batch_norm__54, + batch_norm__55, + batch_norm__56, + batch_norm__57, + batch_norm__58, + batch_norm__59, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_9, + parameter_706, + parameter_705, + parameter_704, + parameter_703, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_9, parameter_703, parameter_704, parameter_705, parameter_706 + + # pd_op.swish: (2x48x-1x-1xf32) <- (2x48x-1x-1xf32) + swish_8 = paddle._C_ops.swish(batch_norm__54) + del batch_norm__54 + + # pd_op.conv2d: (2x48x-1x-1xf32) <- (2x48x-1x-1xf32, 48x48x3x3xf32) + conv2d_10 = paddle._C_ops.conv2d( + swish_8, parameter_702, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_702 + + # pd_op.batch_norm_: (2x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32, -1xui8) <- (2x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32) + ( + batch_norm__60, + batch_norm__61, + batch_norm__62, + batch_norm__63, + batch_norm__64, + batch_norm__65, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_10, + parameter_701, + parameter_700, + parameter_699, + parameter_698, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_10, parameter_698, parameter_699, parameter_700, parameter_701 + + # pd_op.conv2d: (2x48x-1x-1xf32) <- (2x48x-1x-1xf32, 48x48x1x1xf32) + conv2d_11 = paddle._C_ops.conv2d( + swish_8, parameter_697, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_697, swish_8 + + # pd_op.batch_norm_: (2x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32, -1xui8) <- (2x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32) + ( + batch_norm__66, + batch_norm__67, + batch_norm__68, + batch_norm__69, + batch_norm__70, + batch_norm__71, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_11, + parameter_696, + parameter_695, + parameter_694, + parameter_693, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_11, parameter_693, parameter_694, parameter_695, parameter_696 + + # pd_op.add: (2x48x-1x-1xf32) <- (2x48x-1x-1xf32, 2x48x-1x-1xf32) + add_2 = paddle._C_ops.add(batch_norm__60, batch_norm__66) + del batch_norm__60, batch_norm__66 + + # pd_op.swish: (2x48x-1x-1xf32) <- (2x48x-1x-1xf32) + swish_9 = paddle._C_ops.swish(add_2) + del add_2 + + # pd_op.add: (2x48x-1x-1xf32) <- (2x48x-1x-1xf32, 2x48x-1x-1xf32) + add_3 = paddle._C_ops.add(add_1, swish_9) + del add_1, swish_9 + + # pd_op.conv2d: (2x48x-1x-1xf32) <- (2x48x-1x-1xf32, 48x48x3x3xf32) + conv2d_12 = paddle._C_ops.conv2d( + add_3, parameter_692, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_692 + + # pd_op.batch_norm_: (2x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32, -1xui8) <- (2x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32) + ( + batch_norm__72, + batch_norm__73, + batch_norm__74, + batch_norm__75, + batch_norm__76, + batch_norm__77, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_12, + parameter_691, + parameter_690, + parameter_689, + parameter_688, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_12, parameter_688, parameter_689, parameter_690, parameter_691 + + # pd_op.swish: (2x48x-1x-1xf32) <- (2x48x-1x-1xf32) + swish_10 = paddle._C_ops.swish(batch_norm__72) + del batch_norm__72 + + # pd_op.conv2d: (2x48x-1x-1xf32) <- (2x48x-1x-1xf32, 48x48x3x3xf32) + conv2d_13 = paddle._C_ops.conv2d( + swish_10, parameter_687, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_687 + + # pd_op.batch_norm_: (2x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32, -1xui8) <- (2x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32) + ( + batch_norm__78, + batch_norm__79, + batch_norm__80, + batch_norm__81, + batch_norm__82, + batch_norm__83, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_13, + parameter_686, + parameter_685, + parameter_684, + parameter_683, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_13, parameter_683, parameter_684, parameter_685, parameter_686 + + # pd_op.conv2d: (2x48x-1x-1xf32) <- (2x48x-1x-1xf32, 48x48x1x1xf32) + conv2d_14 = paddle._C_ops.conv2d( + swish_10, parameter_682, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_682, swish_10 + + # pd_op.batch_norm_: (2x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32, -1xui8) <- (2x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32) + ( + batch_norm__84, + batch_norm__85, + batch_norm__86, + batch_norm__87, + batch_norm__88, + batch_norm__89, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_14, + parameter_681, + parameter_680, + parameter_679, + parameter_678, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_14, parameter_678, parameter_679, parameter_680, parameter_681 + + # pd_op.add: (2x48x-1x-1xf32) <- (2x48x-1x-1xf32, 2x48x-1x-1xf32) + add_4 = paddle._C_ops.add(batch_norm__78, batch_norm__84) + del batch_norm__78, batch_norm__84 + + # pd_op.swish: (2x48x-1x-1xf32) <- (2x48x-1x-1xf32) + swish_11 = paddle._C_ops.swish(add_4) + del add_4 + + # pd_op.add: (2x48x-1x-1xf32) <- (2x48x-1x-1xf32, 2x48x-1x-1xf32) + add_5 = paddle._C_ops.add(add_3, swish_11) + del add_3, swish_11 + + # pd_op.full: (1xi32) <- () full_0 = paddle._C_ops.full( - [1], float("0"), paddle.int64, paddle.core.CPUPlace() + [1], float("1"), paddle.int32, paddle.core.CPUPlace() + ) + + # builtin.combine: ([2x48x-1x-1xf32, 2x48x-1x-1xf32]) <- (2x48x-1x-1xf32, 2x48x-1x-1xf32) + combine_0 = [swish_4, add_5] + del add_5, swish_4 + + # pd_op.concat: (2x96x-1x-1xf32) <- ([2x48x-1x-1xf32, 2x48x-1x-1xf32], 1xi32) + concat_2 = paddle._C_ops.concat(combine_0, full_0) + del combine_0 + + # pd_op.full_int_array: (2xi64) <- () + full_int_array_0 = [2, 3] + + # pd_op.mean: (2x96x1x1xf32) <- (2x96x-1x-1xf32, 2xi64) + mean_0 = paddle._C_ops.mean(concat_2, full_int_array_0, True) + + # pd_op.conv2d: (2x96x1x1xf32) <- (2x96x1x1xf32, 96x96x1x1xf32) + conv2d_15 = paddle._C_ops.conv2d( + mean_0, parameter_677, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del mean_0, parameter_677 + + # pd_op.full_int_array: (4xi64) <- () + full_int_array_1 = [1, -1, 1, 1] + + # pd_op.reshape: (1x96x1x1xf32) <- (96xf32, 4xi64) + reshape_0 = paddle._C_ops.reshape(parameter_676, full_int_array_1) + del parameter_676 + + # pd_op.add: (2x96x1x1xf32) <- (2x96x1x1xf32, 1x96x1x1xf32) + add_6 = paddle._C_ops.add(conv2d_15, reshape_0) + del conv2d_15, reshape_0 + + # pd_op.hardsigmoid: (2x96x1x1xf32) <- (2x96x1x1xf32) + hardsigmoid_0 = paddle._C_ops.hardsigmoid( + add_6, float("0.166667"), float("0.5") + ) + del add_6 + + # pd_op.multiply: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, 2x96x1x1xf32) + multiply_0 = paddle._C_ops.multiply(concat_2, hardsigmoid_0) + del concat_2, hardsigmoid_0 + + # pd_op.conv2d: (2x128x-1x-1xf32) <- (2x96x-1x-1xf32, 128x96x1x1xf32) + conv2d_16 = paddle._C_ops.conv2d( + multiply_0, parameter_675, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del multiply_0, parameter_675 + + # pd_op.batch_norm_: (2x128x-1x-1xf32, 128xf32, 128xf32, 128xf32, 128xf32, -1xui8) <- (2x128x-1x-1xf32, 128xf32, 128xf32, 128xf32, 128xf32) + ( + batch_norm__90, + batch_norm__91, + batch_norm__92, + batch_norm__93, + batch_norm__94, + batch_norm__95, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_16, + parameter_674, + parameter_673, + parameter_672, + parameter_671, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_16, parameter_671, parameter_672, parameter_673, parameter_674 + + # pd_op.swish: (2x128x-1x-1xf32) <- (2x128x-1x-1xf32) + swish_12 = paddle._C_ops.swish(batch_norm__90) + del batch_norm__90 + + # pd_op.conv2d: (2x192x-1x-1xf32) <- (2x128x-1x-1xf32, 192x128x3x3xf32) + conv2d_17 = paddle._C_ops.conv2d( + swish_12, parameter_670, [2, 2], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_670, swish_12 + + # pd_op.batch_norm_: (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__96, + batch_norm__97, + batch_norm__98, + batch_norm__99, + batch_norm__100, + batch_norm__101, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_17, + parameter_669, + parameter_668, + parameter_667, + parameter_666, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_17, parameter_666, parameter_667, parameter_668, parameter_669 + + # pd_op.swish: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32) + swish_13 = paddle._C_ops.swish(batch_norm__96) + del batch_norm__96 + + # pd_op.conv2d: (2x96x-1x-1xf32) <- (2x192x-1x-1xf32, 96x192x1x1xf32) + conv2d_18 = paddle._C_ops.conv2d( + swish_13, parameter_665, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_665 + + # pd_op.batch_norm_: (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__102, + batch_norm__103, + batch_norm__104, + batch_norm__105, + batch_norm__106, + batch_norm__107, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_18, + parameter_664, + parameter_663, + parameter_662, + parameter_661, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_18, parameter_661, parameter_662, parameter_663, parameter_664 + + # pd_op.swish: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32) + swish_14 = paddle._C_ops.swish(batch_norm__102) + del batch_norm__102 + + # pd_op.conv2d: (2x96x-1x-1xf32) <- (2x192x-1x-1xf32, 96x192x1x1xf32) + conv2d_19 = paddle._C_ops.conv2d( + swish_13, parameter_660, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_660, swish_13 + + # pd_op.batch_norm_: (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__108, + batch_norm__109, + batch_norm__110, + batch_norm__111, + batch_norm__112, + batch_norm__113, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_19, + parameter_659, + parameter_658, + parameter_657, + parameter_656, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_19, parameter_656, parameter_657, parameter_658, parameter_659 + + # pd_op.swish: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32) + swish_15 = paddle._C_ops.swish(batch_norm__108) + del batch_norm__108 + + # pd_op.conv2d: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, 96x96x3x3xf32) + conv2d_20 = paddle._C_ops.conv2d( + swish_15, parameter_655, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_655 + + # pd_op.batch_norm_: (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__114, + batch_norm__115, + batch_norm__116, + batch_norm__117, + batch_norm__118, + batch_norm__119, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_20, + parameter_654, + parameter_653, + parameter_652, + parameter_651, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_20, parameter_651, parameter_652, parameter_653, parameter_654 + + # pd_op.swish: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32) + swish_16 = paddle._C_ops.swish(batch_norm__114) + del batch_norm__114 + + # pd_op.conv2d: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, 96x96x3x3xf32) + conv2d_21 = paddle._C_ops.conv2d( + swish_16, parameter_650, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_650 + + # pd_op.batch_norm_: (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__120, + batch_norm__121, + batch_norm__122, + batch_norm__123, + batch_norm__124, + batch_norm__125, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_21, + parameter_649, + parameter_648, + parameter_647, + parameter_646, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_21, parameter_646, parameter_647, parameter_648, parameter_649 + + # pd_op.conv2d: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, 96x96x1x1xf32) + conv2d_22 = paddle._C_ops.conv2d( + swish_16, parameter_645, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_645, swish_16 + + # pd_op.batch_norm_: (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__126, + batch_norm__127, + batch_norm__128, + batch_norm__129, + batch_norm__130, + batch_norm__131, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_22, + parameter_644, + parameter_643, + parameter_642, + parameter_641, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_22, parameter_641, parameter_642, parameter_643, parameter_644 + + # pd_op.add: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, 2x96x-1x-1xf32) + add_7 = paddle._C_ops.add(batch_norm__120, batch_norm__126) + del batch_norm__120, batch_norm__126 + + # pd_op.swish: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32) + swish_17 = paddle._C_ops.swish(add_7) + del add_7 + + # pd_op.add: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, 2x96x-1x-1xf32) + add_8 = paddle._C_ops.add(swish_15, swish_17) + del swish_15, swish_17 + + # pd_op.conv2d: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, 96x96x3x3xf32) + conv2d_23 = paddle._C_ops.conv2d( + add_8, parameter_640, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_640 + + # pd_op.batch_norm_: (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__132, + batch_norm__133, + batch_norm__134, + batch_norm__135, + batch_norm__136, + batch_norm__137, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_23, + parameter_639, + parameter_638, + parameter_637, + parameter_636, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_23, parameter_636, parameter_637, parameter_638, parameter_639 + + # pd_op.swish: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32) + swish_18 = paddle._C_ops.swish(batch_norm__132) + del batch_norm__132 + + # pd_op.conv2d: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, 96x96x3x3xf32) + conv2d_24 = paddle._C_ops.conv2d( + swish_18, parameter_635, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_635 + + # pd_op.batch_norm_: (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__138, + batch_norm__139, + batch_norm__140, + batch_norm__141, + batch_norm__142, + batch_norm__143, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_24, + parameter_634, + parameter_633, + parameter_632, + parameter_631, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_24, parameter_631, parameter_632, parameter_633, parameter_634 + + # pd_op.conv2d: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, 96x96x1x1xf32) + conv2d_25 = paddle._C_ops.conv2d( + swish_18, parameter_630, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_630, swish_18 + + # pd_op.batch_norm_: (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__144, + batch_norm__145, + batch_norm__146, + batch_norm__147, + batch_norm__148, + batch_norm__149, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_25, + parameter_629, + parameter_628, + parameter_627, + parameter_626, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_25, parameter_626, parameter_627, parameter_628, parameter_629 + + # pd_op.add: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, 2x96x-1x-1xf32) + add_9 = paddle._C_ops.add(batch_norm__138, batch_norm__144) + del batch_norm__138, batch_norm__144 + + # pd_op.swish: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32) + swish_19 = paddle._C_ops.swish(add_9) + del add_9 + + # pd_op.add: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, 2x96x-1x-1xf32) + add_10 = paddle._C_ops.add(add_8, swish_19) + del add_8, swish_19 + + # pd_op.conv2d: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, 96x96x3x3xf32) + conv2d_26 = paddle._C_ops.conv2d( + add_10, parameter_625, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_625 + + # pd_op.batch_norm_: (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__150, + batch_norm__151, + batch_norm__152, + batch_norm__153, + batch_norm__154, + batch_norm__155, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_26, + parameter_624, + parameter_623, + parameter_622, + parameter_621, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_26, parameter_621, parameter_622, parameter_623, parameter_624 + + # pd_op.swish: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32) + swish_20 = paddle._C_ops.swish(batch_norm__150) + del batch_norm__150 + + # pd_op.conv2d: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, 96x96x3x3xf32) + conv2d_27 = paddle._C_ops.conv2d( + swish_20, parameter_620, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_620 + + # pd_op.batch_norm_: (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__156, + batch_norm__157, + batch_norm__158, + batch_norm__159, + batch_norm__160, + batch_norm__161, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_27, + parameter_619, + parameter_618, + parameter_617, + parameter_616, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_27, parameter_616, parameter_617, parameter_618, parameter_619 + + # pd_op.conv2d: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, 96x96x1x1xf32) + conv2d_28 = paddle._C_ops.conv2d( + swish_20, parameter_615, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_615, swish_20 + + # pd_op.batch_norm_: (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__162, + batch_norm__163, + batch_norm__164, + batch_norm__165, + batch_norm__166, + batch_norm__167, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_28, + parameter_614, + parameter_613, + parameter_612, + parameter_611, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_28, parameter_611, parameter_612, parameter_613, parameter_614 + + # pd_op.add: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, 2x96x-1x-1xf32) + add_11 = paddle._C_ops.add(batch_norm__156, batch_norm__162) + del batch_norm__156, batch_norm__162 + + # pd_op.swish: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32) + swish_21 = paddle._C_ops.swish(add_11) + del add_11 + + # pd_op.add: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, 2x96x-1x-1xf32) + add_12 = paddle._C_ops.add(add_10, swish_21) + del add_10, swish_21 + + # pd_op.conv2d: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, 96x96x3x3xf32) + conv2d_29 = paddle._C_ops.conv2d( + add_12, parameter_610, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_610 + + # pd_op.batch_norm_: (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__168, + batch_norm__169, + batch_norm__170, + batch_norm__171, + batch_norm__172, + batch_norm__173, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_29, + parameter_609, + parameter_608, + parameter_607, + parameter_606, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_29, parameter_606, parameter_607, parameter_608, parameter_609 + + # pd_op.swish: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32) + swish_22 = paddle._C_ops.swish(batch_norm__168) + del batch_norm__168 + + # pd_op.conv2d: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, 96x96x3x3xf32) + conv2d_30 = paddle._C_ops.conv2d( + swish_22, parameter_605, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_605 + + # pd_op.batch_norm_: (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__174, + batch_norm__175, + batch_norm__176, + batch_norm__177, + batch_norm__178, + batch_norm__179, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_30, + parameter_604, + parameter_603, + parameter_602, + parameter_601, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_30, parameter_601, parameter_602, parameter_603, parameter_604 + + # pd_op.conv2d: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, 96x96x1x1xf32) + conv2d_31 = paddle._C_ops.conv2d( + swish_22, parameter_600, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_600, swish_22 + + # pd_op.batch_norm_: (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__180, + batch_norm__181, + batch_norm__182, + batch_norm__183, + batch_norm__184, + batch_norm__185, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_31, + parameter_599, + parameter_598, + parameter_597, + parameter_596, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_31, parameter_596, parameter_597, parameter_598, parameter_599 + + # pd_op.add: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, 2x96x-1x-1xf32) + add_13 = paddle._C_ops.add(batch_norm__174, batch_norm__180) + del batch_norm__174, batch_norm__180 + + # pd_op.swish: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32) + swish_23 = paddle._C_ops.swish(add_13) + del add_13 + + # pd_op.add: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, 2x96x-1x-1xf32) + add_14 = paddle._C_ops.add(add_12, swish_23) + del add_12, swish_23 + + # pd_op.conv2d: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, 96x96x3x3xf32) + conv2d_32 = paddle._C_ops.conv2d( + add_14, parameter_595, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_595 + + # pd_op.batch_norm_: (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__186, + batch_norm__187, + batch_norm__188, + batch_norm__189, + batch_norm__190, + batch_norm__191, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_32, + parameter_594, + parameter_593, + parameter_592, + parameter_591, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_32, parameter_591, parameter_592, parameter_593, parameter_594 + + # pd_op.swish: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32) + swish_24 = paddle._C_ops.swish(batch_norm__186) + del batch_norm__186 + + # pd_op.conv2d: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, 96x96x3x3xf32) + conv2d_33 = paddle._C_ops.conv2d( + swish_24, parameter_590, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_590 + + # pd_op.batch_norm_: (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__192, + batch_norm__193, + batch_norm__194, + batch_norm__195, + batch_norm__196, + batch_norm__197, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_33, + parameter_589, + parameter_588, + parameter_587, + parameter_586, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_33, parameter_586, parameter_587, parameter_588, parameter_589 + + # pd_op.conv2d: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, 96x96x1x1xf32) + conv2d_34 = paddle._C_ops.conv2d( + swish_24, parameter_585, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_585, swish_24 + + # pd_op.batch_norm_: (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__198, + batch_norm__199, + batch_norm__200, + batch_norm__201, + batch_norm__202, + batch_norm__203, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_34, + parameter_584, + parameter_583, + parameter_582, + parameter_581, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_34, parameter_581, parameter_582, parameter_583, parameter_584 + + # pd_op.add: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, 2x96x-1x-1xf32) + add_15 = paddle._C_ops.add(batch_norm__192, batch_norm__198) + del batch_norm__192, batch_norm__198 + + # pd_op.swish: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32) + swish_25 = paddle._C_ops.swish(add_15) + del add_15 + + # pd_op.add: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, 2x96x-1x-1xf32) + add_16 = paddle._C_ops.add(add_14, swish_25) + del add_14, swish_25 + + # pd_op.conv2d: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, 96x96x3x3xf32) + conv2d_35 = paddle._C_ops.conv2d( + add_16, parameter_580, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_580 + + # pd_op.batch_norm_: (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__204, + batch_norm__205, + batch_norm__206, + batch_norm__207, + batch_norm__208, + batch_norm__209, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_35, + parameter_579, + parameter_578, + parameter_577, + parameter_576, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_35, parameter_576, parameter_577, parameter_578, parameter_579 + + # pd_op.swish: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32) + swish_26 = paddle._C_ops.swish(batch_norm__204) + del batch_norm__204 + + # pd_op.conv2d: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, 96x96x3x3xf32) + conv2d_36 = paddle._C_ops.conv2d( + swish_26, parameter_575, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_575 + + # pd_op.batch_norm_: (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__210, + batch_norm__211, + batch_norm__212, + batch_norm__213, + batch_norm__214, + batch_norm__215, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_36, + parameter_574, + parameter_573, + parameter_572, + parameter_571, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_36, parameter_571, parameter_572, parameter_573, parameter_574 + + # pd_op.conv2d: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, 96x96x1x1xf32) + conv2d_37 = paddle._C_ops.conv2d( + swish_26, parameter_570, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_570, swish_26 + + # pd_op.batch_norm_: (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__216, + batch_norm__217, + batch_norm__218, + batch_norm__219, + batch_norm__220, + batch_norm__221, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_37, + parameter_569, + parameter_568, + parameter_567, + parameter_566, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_37, parameter_566, parameter_567, parameter_568, parameter_569 + + # pd_op.add: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, 2x96x-1x-1xf32) + add_17 = paddle._C_ops.add(batch_norm__210, batch_norm__216) + del batch_norm__210, batch_norm__216 + + # pd_op.swish: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32) + swish_27 = paddle._C_ops.swish(add_17) + del add_17 + + # pd_op.add: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, 2x96x-1x-1xf32) + add_18 = paddle._C_ops.add(add_16, swish_27) + del add_16, swish_27 + + # builtin.combine: ([2x96x-1x-1xf32, 2x96x-1x-1xf32]) <- (2x96x-1x-1xf32, 2x96x-1x-1xf32) + combine_1 = [swish_14, add_18] + del add_18, swish_14 + + # pd_op.concat: (2x192x-1x-1xf32) <- ([2x96x-1x-1xf32, 2x96x-1x-1xf32], 1xi32) + concat_3 = paddle._C_ops.concat(combine_1, full_0) + del combine_1 + + # pd_op.mean: (2x192x1x1xf32) <- (2x192x-1x-1xf32, 2xi64) + mean_1 = paddle._C_ops.mean(concat_3, full_int_array_0, True) + + # pd_op.conv2d: (2x192x1x1xf32) <- (2x192x1x1xf32, 192x192x1x1xf32) + conv2d_38 = paddle._C_ops.conv2d( + mean_1, parameter_565, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del mean_1, parameter_565 + + # pd_op.reshape: (1x192x1x1xf32) <- (192xf32, 4xi64) + reshape_1 = paddle._C_ops.reshape(parameter_564, full_int_array_1) + del parameter_564 + + # pd_op.add: (2x192x1x1xf32) <- (2x192x1x1xf32, 1x192x1x1xf32) + add_19 = paddle._C_ops.add(conv2d_38, reshape_1) + del conv2d_38, reshape_1 + + # pd_op.hardsigmoid: (2x192x1x1xf32) <- (2x192x1x1xf32) + hardsigmoid_1 = paddle._C_ops.hardsigmoid( + add_19, float("0.166667"), float("0.5") + ) + del add_19 + + # pd_op.multiply: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 2x192x1x1xf32) + multiply_1 = paddle._C_ops.multiply(concat_3, hardsigmoid_1) + del concat_3, hardsigmoid_1 + + # pd_op.conv2d: (2x256x-1x-1xf32) <- (2x192x-1x-1xf32, 256x192x1x1xf32) + conv2d_39 = paddle._C_ops.conv2d( + multiply_1, parameter_563, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del multiply_1, parameter_563 + + # pd_op.batch_norm_: (2x256x-1x-1xf32, 256xf32, 256xf32, 256xf32, 256xf32, -1xui8) <- (2x256x-1x-1xf32, 256xf32, 256xf32, 256xf32, 256xf32) + ( + batch_norm__222, + batch_norm__223, + batch_norm__224, + batch_norm__225, + batch_norm__226, + batch_norm__227, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_39, + parameter_562, + parameter_561, + parameter_560, + parameter_559, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_39, parameter_559, parameter_560, parameter_561, parameter_562 + + # pd_op.swish: (2x256x-1x-1xf32) <- (2x256x-1x-1xf32) + swish_28 = paddle._C_ops.swish(batch_norm__222) + del batch_norm__222 + + # pd_op.conv2d: (2x384x-1x-1xf32) <- (2x256x-1x-1xf32, 384x256x3x3xf32) + conv2d_40 = paddle._C_ops.conv2d( + swish_28, parameter_558, [2, 2], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_558 + + # pd_op.batch_norm_: (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__228, + batch_norm__229, + batch_norm__230, + batch_norm__231, + batch_norm__232, + batch_norm__233, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_40, + parameter_557, + parameter_556, + parameter_555, + parameter_554, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_40, parameter_554, parameter_555, parameter_556, parameter_557 + + # pd_op.swish: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32) + swish_29 = paddle._C_ops.swish(batch_norm__228) + del batch_norm__228 + + # pd_op.conv2d: (2x192x-1x-1xf32) <- (2x384x-1x-1xf32, 192x384x1x1xf32) + conv2d_41 = paddle._C_ops.conv2d( + swish_29, parameter_553, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_553 + + # pd_op.batch_norm_: (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__234, + batch_norm__235, + batch_norm__236, + batch_norm__237, + batch_norm__238, + batch_norm__239, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_41, + parameter_552, + parameter_551, + parameter_550, + parameter_549, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_41, parameter_549, parameter_550, parameter_551, parameter_552 + + # pd_op.swish: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32) + swish_30 = paddle._C_ops.swish(batch_norm__234) + del batch_norm__234 + + # pd_op.conv2d: (2x192x-1x-1xf32) <- (2x384x-1x-1xf32, 192x384x1x1xf32) + conv2d_42 = paddle._C_ops.conv2d( + swish_29, parameter_548, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_548, swish_29 + + # pd_op.batch_norm_: (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__240, + batch_norm__241, + batch_norm__242, + batch_norm__243, + batch_norm__244, + batch_norm__245, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_42, + parameter_547, + parameter_546, + parameter_545, + parameter_544, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_42, parameter_544, parameter_545, parameter_546, parameter_547 + + # pd_op.swish: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32) + swish_31 = paddle._C_ops.swish(batch_norm__240) + del batch_norm__240 + + # pd_op.conv2d: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 192x192x3x3xf32) + conv2d_43 = paddle._C_ops.conv2d( + swish_31, parameter_543, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_543 + + # pd_op.batch_norm_: (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__246, + batch_norm__247, + batch_norm__248, + batch_norm__249, + batch_norm__250, + batch_norm__251, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_43, + parameter_542, + parameter_541, + parameter_540, + parameter_539, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_43, parameter_539, parameter_540, parameter_541, parameter_542 + + # pd_op.swish: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32) + swish_32 = paddle._C_ops.swish(batch_norm__246) + del batch_norm__246 + + # pd_op.conv2d: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 192x192x3x3xf32) + conv2d_44 = paddle._C_ops.conv2d( + swish_32, parameter_538, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_538 + + # pd_op.batch_norm_: (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__252, + batch_norm__253, + batch_norm__254, + batch_norm__255, + batch_norm__256, + batch_norm__257, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_44, + parameter_537, + parameter_536, + parameter_535, + parameter_534, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_44, parameter_534, parameter_535, parameter_536, parameter_537 + + # pd_op.conv2d: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 192x192x1x1xf32) + conv2d_45 = paddle._C_ops.conv2d( + swish_32, parameter_533, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_533, swish_32 + + # pd_op.batch_norm_: (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__258, + batch_norm__259, + batch_norm__260, + batch_norm__261, + batch_norm__262, + batch_norm__263, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_45, + parameter_532, + parameter_531, + parameter_530, + parameter_529, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_45, parameter_529, parameter_530, parameter_531, parameter_532 + + # pd_op.add: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 2x192x-1x-1xf32) + add_20 = paddle._C_ops.add(batch_norm__252, batch_norm__258) + del batch_norm__252, batch_norm__258 + + # pd_op.swish: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32) + swish_33 = paddle._C_ops.swish(add_20) + del add_20 + + # pd_op.add: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 2x192x-1x-1xf32) + add_21 = paddle._C_ops.add(swish_31, swish_33) + del swish_31, swish_33 + + # pd_op.conv2d: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 192x192x3x3xf32) + conv2d_46 = paddle._C_ops.conv2d( + add_21, parameter_528, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_528 + + # pd_op.batch_norm_: (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__264, + batch_norm__265, + batch_norm__266, + batch_norm__267, + batch_norm__268, + batch_norm__269, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_46, + parameter_527, + parameter_526, + parameter_525, + parameter_524, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_46, parameter_524, parameter_525, parameter_526, parameter_527 + + # pd_op.swish: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32) + swish_34 = paddle._C_ops.swish(batch_norm__264) + del batch_norm__264 + + # pd_op.conv2d: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 192x192x3x3xf32) + conv2d_47 = paddle._C_ops.conv2d( + swish_34, parameter_523, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_523 + + # pd_op.batch_norm_: (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__270, + batch_norm__271, + batch_norm__272, + batch_norm__273, + batch_norm__274, + batch_norm__275, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_47, + parameter_522, + parameter_521, + parameter_520, + parameter_519, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_47, parameter_519, parameter_520, parameter_521, parameter_522 + + # pd_op.conv2d: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 192x192x1x1xf32) + conv2d_48 = paddle._C_ops.conv2d( + swish_34, parameter_518, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_518, swish_34 + + # pd_op.batch_norm_: (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__276, + batch_norm__277, + batch_norm__278, + batch_norm__279, + batch_norm__280, + batch_norm__281, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_48, + parameter_517, + parameter_516, + parameter_515, + parameter_514, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_48, parameter_514, parameter_515, parameter_516, parameter_517 + + # pd_op.add: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 2x192x-1x-1xf32) + add_22 = paddle._C_ops.add(batch_norm__270, batch_norm__276) + del batch_norm__270, batch_norm__276 + + # pd_op.swish: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32) + swish_35 = paddle._C_ops.swish(add_22) + del add_22 + + # pd_op.add: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 2x192x-1x-1xf32) + add_23 = paddle._C_ops.add(add_21, swish_35) + del add_21, swish_35 + + # pd_op.conv2d: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 192x192x3x3xf32) + conv2d_49 = paddle._C_ops.conv2d( + add_23, parameter_513, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_513 + + # pd_op.batch_norm_: (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__282, + batch_norm__283, + batch_norm__284, + batch_norm__285, + batch_norm__286, + batch_norm__287, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_49, + parameter_512, + parameter_511, + parameter_510, + parameter_509, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_49, parameter_509, parameter_510, parameter_511, parameter_512 + + # pd_op.swish: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32) + swish_36 = paddle._C_ops.swish(batch_norm__282) + del batch_norm__282 + + # pd_op.conv2d: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 192x192x3x3xf32) + conv2d_50 = paddle._C_ops.conv2d( + swish_36, parameter_508, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_508 + + # pd_op.batch_norm_: (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__288, + batch_norm__289, + batch_norm__290, + batch_norm__291, + batch_norm__292, + batch_norm__293, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_50, + parameter_507, + parameter_506, + parameter_505, + parameter_504, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_50, parameter_504, parameter_505, parameter_506, parameter_507 + + # pd_op.conv2d: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 192x192x1x1xf32) + conv2d_51 = paddle._C_ops.conv2d( + swish_36, parameter_503, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_503, swish_36 + + # pd_op.batch_norm_: (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__294, + batch_norm__295, + batch_norm__296, + batch_norm__297, + batch_norm__298, + batch_norm__299, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_51, + parameter_502, + parameter_501, + parameter_500, + parameter_499, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_51, parameter_499, parameter_500, parameter_501, parameter_502 + + # pd_op.add: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 2x192x-1x-1xf32) + add_24 = paddle._C_ops.add(batch_norm__288, batch_norm__294) + del batch_norm__288, batch_norm__294 + + # pd_op.swish: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32) + swish_37 = paddle._C_ops.swish(add_24) + del add_24 + + # pd_op.add: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 2x192x-1x-1xf32) + add_25 = paddle._C_ops.add(add_23, swish_37) + del add_23, swish_37 + + # pd_op.conv2d: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 192x192x3x3xf32) + conv2d_52 = paddle._C_ops.conv2d( + add_25, parameter_498, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_498 + + # pd_op.batch_norm_: (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__300, + batch_norm__301, + batch_norm__302, + batch_norm__303, + batch_norm__304, + batch_norm__305, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_52, + parameter_497, + parameter_496, + parameter_495, + parameter_494, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_52, parameter_494, parameter_495, parameter_496, parameter_497 + + # pd_op.swish: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32) + swish_38 = paddle._C_ops.swish(batch_norm__300) + del batch_norm__300 + + # pd_op.conv2d: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 192x192x3x3xf32) + conv2d_53 = paddle._C_ops.conv2d( + swish_38, parameter_493, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_493 + + # pd_op.batch_norm_: (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__306, + batch_norm__307, + batch_norm__308, + batch_norm__309, + batch_norm__310, + batch_norm__311, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_53, + parameter_492, + parameter_491, + parameter_490, + parameter_489, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_53, parameter_489, parameter_490, parameter_491, parameter_492 + + # pd_op.conv2d: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 192x192x1x1xf32) + conv2d_54 = paddle._C_ops.conv2d( + swish_38, parameter_488, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_488, swish_38 + + # pd_op.batch_norm_: (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__312, + batch_norm__313, + batch_norm__314, + batch_norm__315, + batch_norm__316, + batch_norm__317, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_54, + parameter_487, + parameter_486, + parameter_485, + parameter_484, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_54, parameter_484, parameter_485, parameter_486, parameter_487 + + # pd_op.add: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 2x192x-1x-1xf32) + add_26 = paddle._C_ops.add(batch_norm__306, batch_norm__312) + del batch_norm__306, batch_norm__312 + + # pd_op.swish: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32) + swish_39 = paddle._C_ops.swish(add_26) + del add_26 + + # pd_op.add: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 2x192x-1x-1xf32) + add_27 = paddle._C_ops.add(add_25, swish_39) + del add_25, swish_39 + + # pd_op.conv2d: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 192x192x3x3xf32) + conv2d_55 = paddle._C_ops.conv2d( + add_27, parameter_483, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_483 + + # pd_op.batch_norm_: (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__318, + batch_norm__319, + batch_norm__320, + batch_norm__321, + batch_norm__322, + batch_norm__323, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_55, + parameter_482, + parameter_481, + parameter_480, + parameter_479, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_55, parameter_479, parameter_480, parameter_481, parameter_482 + + # pd_op.swish: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32) + swish_40 = paddle._C_ops.swish(batch_norm__318) + del batch_norm__318 + + # pd_op.conv2d: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 192x192x3x3xf32) + conv2d_56 = paddle._C_ops.conv2d( + swish_40, parameter_478, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_478 + + # pd_op.batch_norm_: (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__324, + batch_norm__325, + batch_norm__326, + batch_norm__327, + batch_norm__328, + batch_norm__329, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_56, + parameter_477, + parameter_476, + parameter_475, + parameter_474, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_56, parameter_474, parameter_475, parameter_476, parameter_477 + + # pd_op.conv2d: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 192x192x1x1xf32) + conv2d_57 = paddle._C_ops.conv2d( + swish_40, parameter_473, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_473, swish_40 + + # pd_op.batch_norm_: (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__330, + batch_norm__331, + batch_norm__332, + batch_norm__333, + batch_norm__334, + batch_norm__335, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_57, + parameter_472, + parameter_471, + parameter_470, + parameter_469, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_57, parameter_469, parameter_470, parameter_471, parameter_472 + + # pd_op.add: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 2x192x-1x-1xf32) + add_28 = paddle._C_ops.add(batch_norm__324, batch_norm__330) + del batch_norm__324, batch_norm__330 + + # pd_op.swish: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32) + swish_41 = paddle._C_ops.swish(add_28) + del add_28 + + # pd_op.add: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 2x192x-1x-1xf32) + add_29 = paddle._C_ops.add(add_27, swish_41) + del add_27, swish_41 + + # pd_op.conv2d: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 192x192x3x3xf32) + conv2d_58 = paddle._C_ops.conv2d( + add_29, parameter_468, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_468 + + # pd_op.batch_norm_: (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__336, + batch_norm__337, + batch_norm__338, + batch_norm__339, + batch_norm__340, + batch_norm__341, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_58, + parameter_467, + parameter_466, + parameter_465, + parameter_464, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_58, parameter_464, parameter_465, parameter_466, parameter_467 + + # pd_op.swish: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32) + swish_42 = paddle._C_ops.swish(batch_norm__336) + del batch_norm__336 + + # pd_op.conv2d: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 192x192x3x3xf32) + conv2d_59 = paddle._C_ops.conv2d( + swish_42, parameter_463, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_463 + + # pd_op.batch_norm_: (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__342, + batch_norm__343, + batch_norm__344, + batch_norm__345, + batch_norm__346, + batch_norm__347, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_59, + parameter_462, + parameter_461, + parameter_460, + parameter_459, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_59, parameter_459, parameter_460, parameter_461, parameter_462 + + # pd_op.conv2d: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 192x192x1x1xf32) + conv2d_60 = paddle._C_ops.conv2d( + swish_42, parameter_458, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_458, swish_42 + + # pd_op.batch_norm_: (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__348, + batch_norm__349, + batch_norm__350, + batch_norm__351, + batch_norm__352, + batch_norm__353, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_60, + parameter_457, + parameter_456, + parameter_455, + parameter_454, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_60, parameter_454, parameter_455, parameter_456, parameter_457 + + # pd_op.add: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 2x192x-1x-1xf32) + add_30 = paddle._C_ops.add(batch_norm__342, batch_norm__348) + del batch_norm__342, batch_norm__348 + + # pd_op.swish: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32) + swish_43 = paddle._C_ops.swish(add_30) + del add_30 + + # pd_op.add: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 2x192x-1x-1xf32) + add_31 = paddle._C_ops.add(add_29, swish_43) + del add_29, swish_43 + + # builtin.combine: ([2x192x-1x-1xf32, 2x192x-1x-1xf32]) <- (2x192x-1x-1xf32, 2x192x-1x-1xf32) + combine_2 = [swish_30, add_31] + del add_31, swish_30 + + # pd_op.concat: (2x384x-1x-1xf32) <- ([2x192x-1x-1xf32, 2x192x-1x-1xf32], 1xi32) + concat_4 = paddle._C_ops.concat(combine_2, full_0) + del combine_2 + + # pd_op.mean: (2x384x1x1xf32) <- (2x384x-1x-1xf32, 2xi64) + mean_2 = paddle._C_ops.mean(concat_4, full_int_array_0, True) + + # pd_op.conv2d: (2x384x1x1xf32) <- (2x384x1x1xf32, 384x384x1x1xf32) + conv2d_61 = paddle._C_ops.conv2d( + mean_2, parameter_453, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del mean_2, parameter_453 + + # pd_op.reshape: (1x384x1x1xf32) <- (384xf32, 4xi64) + reshape_2 = paddle._C_ops.reshape(parameter_452, full_int_array_1) + del parameter_452 + + # pd_op.add: (2x384x1x1xf32) <- (2x384x1x1xf32, 1x384x1x1xf32) + add_32 = paddle._C_ops.add(conv2d_61, reshape_2) + del conv2d_61, reshape_2 + + # pd_op.hardsigmoid: (2x384x1x1xf32) <- (2x384x1x1xf32) + hardsigmoid_2 = paddle._C_ops.hardsigmoid( + add_32, float("0.166667"), float("0.5") + ) + del add_32 + + # pd_op.multiply: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32, 2x384x1x1xf32) + multiply_2 = paddle._C_ops.multiply(concat_4, hardsigmoid_2) + del concat_4, hardsigmoid_2 + + # pd_op.conv2d: (2x512x-1x-1xf32) <- (2x384x-1x-1xf32, 512x384x1x1xf32) + conv2d_62 = paddle._C_ops.conv2d( + multiply_2, parameter_451, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del multiply_2, parameter_451 + + # pd_op.batch_norm_: (2x512x-1x-1xf32, 512xf32, 512xf32, 512xf32, 512xf32, -1xui8) <- (2x512x-1x-1xf32, 512xf32, 512xf32, 512xf32, 512xf32) + ( + batch_norm__354, + batch_norm__355, + batch_norm__356, + batch_norm__357, + batch_norm__358, + batch_norm__359, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_62, + parameter_450, + parameter_449, + parameter_448, + parameter_447, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_62, parameter_447, parameter_448, parameter_449, parameter_450 + + # pd_op.swish: (2x512x-1x-1xf32) <- (2x512x-1x-1xf32) + swish_44 = paddle._C_ops.swish(batch_norm__354) + del batch_norm__354 + + # pd_op.conv2d: (2x768x-1x-1xf32) <- (2x512x-1x-1xf32, 768x512x3x3xf32) + conv2d_63 = paddle._C_ops.conv2d( + swish_44, parameter_446, [2, 2], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_446 + + # pd_op.batch_norm_: (2x768x-1x-1xf32, 768xf32, 768xf32, 768xf32, 768xf32, -1xui8) <- (2x768x-1x-1xf32, 768xf32, 768xf32, 768xf32, 768xf32) + ( + batch_norm__360, + batch_norm__361, + batch_norm__362, + batch_norm__363, + batch_norm__364, + batch_norm__365, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_63, + parameter_445, + parameter_444, + parameter_443, + parameter_442, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_63, parameter_442, parameter_443, parameter_444, parameter_445 + + # pd_op.swish: (2x768x-1x-1xf32) <- (2x768x-1x-1xf32) + swish_45 = paddle._C_ops.swish(batch_norm__360) + del batch_norm__360 + + # pd_op.conv2d: (2x384x-1x-1xf32) <- (2x768x-1x-1xf32, 384x768x1x1xf32) + conv2d_64 = paddle._C_ops.conv2d( + swish_45, parameter_441, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_441 + + # pd_op.batch_norm_: (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__366, + batch_norm__367, + batch_norm__368, + batch_norm__369, + batch_norm__370, + batch_norm__371, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_64, + parameter_440, + parameter_439, + parameter_438, + parameter_437, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_64, parameter_437, parameter_438, parameter_439, parameter_440 + + # pd_op.swish: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32) + swish_46 = paddle._C_ops.swish(batch_norm__366) + del batch_norm__366 + + # pd_op.conv2d: (2x384x-1x-1xf32) <- (2x768x-1x-1xf32, 384x768x1x1xf32) + conv2d_65 = paddle._C_ops.conv2d( + swish_45, parameter_436, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_436, swish_45 + + # pd_op.batch_norm_: (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__372, + batch_norm__373, + batch_norm__374, + batch_norm__375, + batch_norm__376, + batch_norm__377, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_65, + parameter_435, + parameter_434, + parameter_433, + parameter_432, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_65, parameter_432, parameter_433, parameter_434, parameter_435 + + # pd_op.swish: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32) + swish_47 = paddle._C_ops.swish(batch_norm__372) + del batch_norm__372 + + # pd_op.conv2d: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32, 384x384x3x3xf32) + conv2d_66 = paddle._C_ops.conv2d( + swish_47, parameter_431, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_431 + + # pd_op.batch_norm_: (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__378, + batch_norm__379, + batch_norm__380, + batch_norm__381, + batch_norm__382, + batch_norm__383, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_66, + parameter_430, + parameter_429, + parameter_428, + parameter_427, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_66, parameter_427, parameter_428, parameter_429, parameter_430 + + # pd_op.swish: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32) + swish_48 = paddle._C_ops.swish(batch_norm__378) + del batch_norm__378 + + # pd_op.conv2d: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32, 384x384x3x3xf32) + conv2d_67 = paddle._C_ops.conv2d( + swish_48, parameter_426, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_426 + + # pd_op.batch_norm_: (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__384, + batch_norm__385, + batch_norm__386, + batch_norm__387, + batch_norm__388, + batch_norm__389, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_67, + parameter_425, + parameter_424, + parameter_423, + parameter_422, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_67, parameter_422, parameter_423, parameter_424, parameter_425 + + # pd_op.conv2d: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32, 384x384x1x1xf32) + conv2d_68 = paddle._C_ops.conv2d( + swish_48, parameter_421, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_421, swish_48 + + # pd_op.batch_norm_: (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__390, + batch_norm__391, + batch_norm__392, + batch_norm__393, + batch_norm__394, + batch_norm__395, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_68, + parameter_420, + parameter_419, + parameter_418, + parameter_417, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_68, parameter_417, parameter_418, parameter_419, parameter_420 + + # pd_op.add: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32, 2x384x-1x-1xf32) + add_33 = paddle._C_ops.add(batch_norm__384, batch_norm__390) + del batch_norm__384, batch_norm__390 + + # pd_op.swish: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32) + swish_49 = paddle._C_ops.swish(add_33) + del add_33 + + # pd_op.add: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32, 2x384x-1x-1xf32) + add_34 = paddle._C_ops.add(swish_47, swish_49) + del swish_47, swish_49 + + # pd_op.conv2d: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32, 384x384x3x3xf32) + conv2d_69 = paddle._C_ops.conv2d( + add_34, parameter_416, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_416 + + # pd_op.batch_norm_: (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__396, + batch_norm__397, + batch_norm__398, + batch_norm__399, + batch_norm__400, + batch_norm__401, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_69, + parameter_415, + parameter_414, + parameter_413, + parameter_412, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_69, parameter_412, parameter_413, parameter_414, parameter_415 + + # pd_op.swish: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32) + swish_50 = paddle._C_ops.swish(batch_norm__396) + del batch_norm__396 + + # pd_op.conv2d: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32, 384x384x3x3xf32) + conv2d_70 = paddle._C_ops.conv2d( + swish_50, parameter_411, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_411 + + # pd_op.batch_norm_: (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__402, + batch_norm__403, + batch_norm__404, + batch_norm__405, + batch_norm__406, + batch_norm__407, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_70, + parameter_410, + parameter_409, + parameter_408, + parameter_407, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_70, parameter_407, parameter_408, parameter_409, parameter_410 + + # pd_op.conv2d: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32, 384x384x1x1xf32) + conv2d_71 = paddle._C_ops.conv2d( + swish_50, parameter_406, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_406, swish_50 + + # pd_op.batch_norm_: (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__408, + batch_norm__409, + batch_norm__410, + batch_norm__411, + batch_norm__412, + batch_norm__413, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_71, + parameter_405, + parameter_404, + parameter_403, + parameter_402, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_71, parameter_402, parameter_403, parameter_404, parameter_405 + + # pd_op.add: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32, 2x384x-1x-1xf32) + add_35 = paddle._C_ops.add(batch_norm__402, batch_norm__408) + del batch_norm__402, batch_norm__408 + + # pd_op.swish: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32) + swish_51 = paddle._C_ops.swish(add_35) + del add_35 + + # pd_op.add: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32, 2x384x-1x-1xf32) + add_36 = paddle._C_ops.add(add_34, swish_51) + del add_34, swish_51 + + # pd_op.conv2d: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32, 384x384x3x3xf32) + conv2d_72 = paddle._C_ops.conv2d( + add_36, parameter_401, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_401 + + # pd_op.batch_norm_: (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__414, + batch_norm__415, + batch_norm__416, + batch_norm__417, + batch_norm__418, + batch_norm__419, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_72, + parameter_400, + parameter_399, + parameter_398, + parameter_397, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_72, parameter_397, parameter_398, parameter_399, parameter_400 + + # pd_op.swish: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32) + swish_52 = paddle._C_ops.swish(batch_norm__414) + del batch_norm__414 + + # pd_op.conv2d: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32, 384x384x3x3xf32) + conv2d_73 = paddle._C_ops.conv2d( + swish_52, parameter_396, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_396 + + # pd_op.batch_norm_: (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__420, + batch_norm__421, + batch_norm__422, + batch_norm__423, + batch_norm__424, + batch_norm__425, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_73, + parameter_395, + parameter_394, + parameter_393, + parameter_392, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_73, parameter_392, parameter_393, parameter_394, parameter_395 + + # pd_op.conv2d: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32, 384x384x1x1xf32) + conv2d_74 = paddle._C_ops.conv2d( + swish_52, parameter_391, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_391, swish_52 + + # pd_op.batch_norm_: (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__426, + batch_norm__427, + batch_norm__428, + batch_norm__429, + batch_norm__430, + batch_norm__431, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_74, + parameter_390, + parameter_389, + parameter_388, + parameter_387, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_74, parameter_387, parameter_388, parameter_389, parameter_390 + + # pd_op.add: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32, 2x384x-1x-1xf32) + add_37 = paddle._C_ops.add(batch_norm__420, batch_norm__426) + del batch_norm__420, batch_norm__426 + + # pd_op.swish: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32) + swish_53 = paddle._C_ops.swish(add_37) + del add_37 + + # pd_op.add: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32, 2x384x-1x-1xf32) + add_38 = paddle._C_ops.add(add_36, swish_53) + del add_36, swish_53 + + # builtin.combine: ([2x384x-1x-1xf32, 2x384x-1x-1xf32]) <- (2x384x-1x-1xf32, 2x384x-1x-1xf32) + combine_3 = [swish_46, add_38] + del add_38, swish_46 + + # pd_op.concat: (2x768x-1x-1xf32) <- ([2x384x-1x-1xf32, 2x384x-1x-1xf32], 1xi32) + concat_5 = paddle._C_ops.concat(combine_3, full_0) + del combine_3 + + # pd_op.mean: (2x768x1x1xf32) <- (2x768x-1x-1xf32, 2xi64) + mean_3 = paddle._C_ops.mean(concat_5, full_int_array_0, True) + del full_int_array_0 + + # pd_op.conv2d: (2x768x1x1xf32) <- (2x768x1x1xf32, 768x768x1x1xf32) + conv2d_75 = paddle._C_ops.conv2d( + mean_3, parameter_386, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del mean_3, parameter_386 + + # pd_op.reshape: (1x768x1x1xf32) <- (768xf32, 4xi64) + reshape_3 = paddle._C_ops.reshape(parameter_385, full_int_array_1) + del parameter_385 + + # pd_op.add: (2x768x1x1xf32) <- (2x768x1x1xf32, 1x768x1x1xf32) + add_39 = paddle._C_ops.add(conv2d_75, reshape_3) + del conv2d_75, reshape_3 + + # pd_op.hardsigmoid: (2x768x1x1xf32) <- (2x768x1x1xf32) + hardsigmoid_3 = paddle._C_ops.hardsigmoid( + add_39, float("0.166667"), float("0.5") + ) + del add_39 + + # pd_op.multiply: (2x768x-1x-1xf32) <- (2x768x-1x-1xf32, 2x768x1x1xf32) + multiply_3 = paddle._C_ops.multiply(concat_5, hardsigmoid_3) + del concat_5, hardsigmoid_3 + + # pd_op.conv2d: (2x1024x-1x-1xf32) <- (2x768x-1x-1xf32, 1024x768x1x1xf32) + conv2d_76 = paddle._C_ops.conv2d( + multiply_3, parameter_384, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del multiply_3, parameter_384 + + # pd_op.batch_norm_: (2x1024x-1x-1xf32, 1024xf32, 1024xf32, 1024xf32, 1024xf32, -1xui8) <- (2x1024x-1x-1xf32, 1024xf32, 1024xf32, 1024xf32, 1024xf32) + ( + batch_norm__432, + batch_norm__433, + batch_norm__434, + batch_norm__435, + batch_norm__436, + batch_norm__437, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_76, + parameter_383, + parameter_382, + parameter_381, + parameter_380, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_76, parameter_380, parameter_381, parameter_382, parameter_383 + + # pd_op.swish: (2x1024x-1x-1xf32) <- (2x1024x-1x-1xf32) + swish_54 = paddle._C_ops.swish(batch_norm__432) + del batch_norm__432 + + # pd_op.conv2d: (2x384x-1x-1xf32) <- (2x1024x-1x-1xf32, 384x1024x1x1xf32) + conv2d_77 = paddle._C_ops.conv2d( + swish_54, parameter_379, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_379 + + # pd_op.batch_norm_: (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__438, + batch_norm__439, + batch_norm__440, + batch_norm__441, + batch_norm__442, + batch_norm__443, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_77, + parameter_378, + parameter_377, + parameter_376, + parameter_375, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_77, parameter_375, parameter_376, parameter_377, parameter_378 + + # pd_op.swish: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32) + swish_55 = paddle._C_ops.swish(batch_norm__438) + del batch_norm__438 + + # pd_op.conv2d: (2x384x-1x-1xf32) <- (2x1024x-1x-1xf32, 384x1024x1x1xf32) + conv2d_78 = paddle._C_ops.conv2d( + swish_54, parameter_374, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_374, swish_54 + + # pd_op.batch_norm_: (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__444, + batch_norm__445, + batch_norm__446, + batch_norm__447, + batch_norm__448, + batch_norm__449, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_78, + parameter_373, + parameter_372, + parameter_371, + parameter_370, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_78, parameter_370, parameter_371, parameter_372, parameter_373 + + # pd_op.swish: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32) + swish_56 = paddle._C_ops.swish(batch_norm__444) + del batch_norm__444 + + # pd_op.conv2d: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32, 384x384x3x3xf32) + conv2d_79 = paddle._C_ops.conv2d( + swish_56, parameter_369, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_369, swish_56 + + # pd_op.batch_norm_: (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__450, + batch_norm__451, + batch_norm__452, + batch_norm__453, + batch_norm__454, + batch_norm__455, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_79, + parameter_368, + parameter_367, + parameter_366, + parameter_365, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_79, parameter_365, parameter_366, parameter_367, parameter_368 + + # pd_op.swish: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32) + swish_57 = paddle._C_ops.swish(batch_norm__450) + del batch_norm__450 + + # pd_op.conv2d: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32, 384x384x3x3xf32) + conv2d_80 = paddle._C_ops.conv2d( + swish_57, parameter_364, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_364 + + # pd_op.batch_norm_: (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__456, + batch_norm__457, + batch_norm__458, + batch_norm__459, + batch_norm__460, + batch_norm__461, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_80, + parameter_363, + parameter_362, + parameter_361, + parameter_360, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_80, parameter_360, parameter_361, parameter_362, parameter_363 + + # pd_op.conv2d: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32, 384x384x1x1xf32) + conv2d_81 = paddle._C_ops.conv2d( + swish_57, parameter_359, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_359, swish_57 + + # pd_op.batch_norm_: (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__462, + batch_norm__463, + batch_norm__464, + batch_norm__465, + batch_norm__466, + batch_norm__467, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_81, + parameter_358, + parameter_357, + parameter_356, + parameter_355, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_81, parameter_355, parameter_356, parameter_357, parameter_358 + + # pd_op.add: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32, 2x384x-1x-1xf32) + add_40 = paddle._C_ops.add(batch_norm__456, batch_norm__462) + del batch_norm__456, batch_norm__462 + + # pd_op.swish: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32) + swish_58 = paddle._C_ops.swish(add_40) + del add_40 + + # pd_op.conv2d: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32, 384x384x3x3xf32) + conv2d_82 = paddle._C_ops.conv2d( + swish_58, parameter_354, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_354, swish_58 + + # pd_op.batch_norm_: (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__468, + batch_norm__469, + batch_norm__470, + batch_norm__471, + batch_norm__472, + batch_norm__473, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_82, + parameter_353, + parameter_352, + parameter_351, + parameter_350, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_82, parameter_350, parameter_351, parameter_352, parameter_353 + + # pd_op.swish: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32) + swish_59 = paddle._C_ops.swish(batch_norm__468) + del batch_norm__468 + + # pd_op.conv2d: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32, 384x384x3x3xf32) + conv2d_83 = paddle._C_ops.conv2d( + swish_59, parameter_349, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_349 + + # pd_op.batch_norm_: (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__474, + batch_norm__475, + batch_norm__476, + batch_norm__477, + batch_norm__478, + batch_norm__479, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_83, + parameter_348, + parameter_347, + parameter_346, + parameter_345, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_83, parameter_345, parameter_346, parameter_347, parameter_348 + + # pd_op.conv2d: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32, 384x384x1x1xf32) + conv2d_84 = paddle._C_ops.conv2d( + swish_59, parameter_344, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_344, swish_59 + + # pd_op.batch_norm_: (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__480, + batch_norm__481, + batch_norm__482, + batch_norm__483, + batch_norm__484, + batch_norm__485, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_84, + parameter_343, + parameter_342, + parameter_341, + parameter_340, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_84, parameter_340, parameter_341, parameter_342, parameter_343 + + # pd_op.add: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32, 2x384x-1x-1xf32) + add_41 = paddle._C_ops.add(batch_norm__474, batch_norm__480) + del batch_norm__474, batch_norm__480 + + # pd_op.swish: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32) + swish_60 = paddle._C_ops.swish(add_41) + del add_41 + + # pd_op.full_int_array: (2xi64) <- () + full_int_array_2 = [5, 5] + + # pd_op.pool2d: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32, 2xi64) + pool2d_0 = paddle._C_ops.pool2d( + swish_60, + full_int_array_2, + [1, 1], + [2, 2], + False, + True, + "NCHW", + "max", + False, + False, + "EXPLICIT", + ) + del full_int_array_2 + + # pd_op.full_int_array: (2xi64) <- () + full_int_array_3 = [9, 9] + + # pd_op.pool2d: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32, 2xi64) + pool2d_1 = paddle._C_ops.pool2d( + swish_60, + full_int_array_3, + [1, 1], + [4, 4], + False, + True, + "NCHW", + "max", + False, + False, + "EXPLICIT", + ) + del full_int_array_3 + + # pd_op.full_int_array: (2xi64) <- () + full_int_array_4 = [13, 13] + + # pd_op.pool2d: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32, 2xi64) + pool2d_2 = paddle._C_ops.pool2d( + swish_60, + full_int_array_4, + [1, 1], + [6, 6], + False, + True, + "NCHW", + "max", + False, + False, + "EXPLICIT", + ) + del full_int_array_4 + + # builtin.combine: ([2x384x-1x-1xf32, 2x384x-1x-1xf32, 2x384x-1x-1xf32, 2x384x-1x-1xf32]) <- (2x384x-1x-1xf32, 2x384x-1x-1xf32, 2x384x-1x-1xf32, 2x384x-1x-1xf32) + combine_4 = [swish_60, pool2d_0, pool2d_1, pool2d_2] + del pool2d_0, pool2d_1, pool2d_2, swish_60 + + # pd_op.concat: (2x1536x-1x-1xf32) <- ([2x384x-1x-1xf32, 2x384x-1x-1xf32, 2x384x-1x-1xf32, 2x384x-1x-1xf32], 1xi32) + concat_6 = paddle._C_ops.concat(combine_4, full_0) + del combine_4 + + # pd_op.conv2d: (2x384x-1x-1xf32) <- (2x1536x-1x-1xf32, 384x1536x1x1xf32) + conv2d_85 = paddle._C_ops.conv2d( + concat_6, parameter_339, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del concat_6, parameter_339 + + # pd_op.batch_norm_: (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__486, + batch_norm__487, + batch_norm__488, + batch_norm__489, + batch_norm__490, + batch_norm__491, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_85, + parameter_338, + parameter_337, + parameter_336, + parameter_335, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_85, parameter_335, parameter_336, parameter_337, parameter_338 + + # pd_op.swish: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32) + swish_61 = paddle._C_ops.swish(batch_norm__486) + del batch_norm__486 + + # pd_op.conv2d: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32, 384x384x3x3xf32) + conv2d_86 = paddle._C_ops.conv2d( + swish_61, parameter_334, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_334, swish_61 + + # pd_op.batch_norm_: (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__492, + batch_norm__493, + batch_norm__494, + batch_norm__495, + batch_norm__496, + batch_norm__497, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_86, + parameter_333, + parameter_332, + parameter_331, + parameter_330, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_86, parameter_330, parameter_331, parameter_332, parameter_333 + + # pd_op.swish: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32) + swish_62 = paddle._C_ops.swish(batch_norm__492) + del batch_norm__492 + + # pd_op.conv2d: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32, 384x384x3x3xf32) + conv2d_87 = paddle._C_ops.conv2d( + swish_62, parameter_329, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_329 + + # pd_op.batch_norm_: (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__498, + batch_norm__499, + batch_norm__500, + batch_norm__501, + batch_norm__502, + batch_norm__503, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_87, + parameter_328, + parameter_327, + parameter_326, + parameter_325, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_87, parameter_325, parameter_326, parameter_327, parameter_328 + + # pd_op.conv2d: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32, 384x384x1x1xf32) + conv2d_88 = paddle._C_ops.conv2d( + swish_62, parameter_324, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_324, swish_62 + + # pd_op.batch_norm_: (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__504, + batch_norm__505, + batch_norm__506, + batch_norm__507, + batch_norm__508, + batch_norm__509, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_88, + parameter_323, + parameter_322, + parameter_321, + parameter_320, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_88, parameter_320, parameter_321, parameter_322, parameter_323 + + # pd_op.add: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32, 2x384x-1x-1xf32) + add_42 = paddle._C_ops.add(batch_norm__498, batch_norm__504) + del batch_norm__498, batch_norm__504 + + # pd_op.swish: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32) + swish_63 = paddle._C_ops.swish(add_42) + del add_42 + + # builtin.combine: ([2x384x-1x-1xf32, 2x384x-1x-1xf32]) <- (2x384x-1x-1xf32, 2x384x-1x-1xf32) + combine_5 = [swish_55, swish_63] + del swish_55, swish_63 + + # pd_op.concat: (2x768x-1x-1xf32) <- ([2x384x-1x-1xf32, 2x384x-1x-1xf32], 1xi32) + concat_7 = paddle._C_ops.concat(combine_5, full_0) + del combine_5 + + # pd_op.conv2d: (2x768x-1x-1xf32) <- (2x768x-1x-1xf32, 768x768x1x1xf32) + conv2d_89 = paddle._C_ops.conv2d( + concat_7, parameter_319, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del concat_7, parameter_319 + + # pd_op.batch_norm_: (2x768x-1x-1xf32, 768xf32, 768xf32, 768xf32, 768xf32, -1xui8) <- (2x768x-1x-1xf32, 768xf32, 768xf32, 768xf32, 768xf32) + ( + batch_norm__510, + batch_norm__511, + batch_norm__512, + batch_norm__513, + batch_norm__514, + batch_norm__515, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_89, + parameter_318, + parameter_317, + parameter_316, + parameter_315, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_89, parameter_315, parameter_316, parameter_317, parameter_318 + + # pd_op.swish: (2x768x-1x-1xf32) <- (2x768x-1x-1xf32) + swish_64 = paddle._C_ops.swish(batch_norm__510) + del batch_norm__510 + + # pd_op.conv2d: (2x384x-1x-1xf32) <- (2x768x-1x-1xf32, 384x768x1x1xf32) + conv2d_90 = paddle._C_ops.conv2d( + swish_64, parameter_314, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_314 + + # pd_op.batch_norm_: (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__516, + batch_norm__517, + batch_norm__518, + batch_norm__519, + batch_norm__520, + batch_norm__521, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_90, + parameter_313, + parameter_312, + parameter_311, + parameter_310, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_90, parameter_310, parameter_311, parameter_312, parameter_313 + + # pd_op.swish: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32) + swish_65 = paddle._C_ops.swish(batch_norm__516) + del batch_norm__516 + + # pd_op.nearest_interp: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32, None, None, None) + nearest_interp_0 = paddle._C_ops.nearest_interp( + swish_65, + None, + None, + None, + "NCHW", + -1, + -1, + -1, + [float("2"), float("2")], + "nearest", + False, + 0, + ) + del swish_65 + + # builtin.combine: ([2x384x-1x-1xf32, 2x512x-1x-1xf32]) <- (2x384x-1x-1xf32, 2x512x-1x-1xf32) + combine_6 = [nearest_interp_0, swish_44] + del nearest_interp_0, swish_44 + + # pd_op.concat: (2x896x-1x-1xf32) <- ([2x384x-1x-1xf32, 2x512x-1x-1xf32], 1xi32) + concat_8 = paddle._C_ops.concat(combine_6, full_0) + del combine_6 + + # pd_op.conv2d: (2x192x-1x-1xf32) <- (2x896x-1x-1xf32, 192x896x1x1xf32) + conv2d_91 = paddle._C_ops.conv2d( + concat_8, parameter_309, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_309 + + # pd_op.batch_norm_: (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__522, + batch_norm__523, + batch_norm__524, + batch_norm__525, + batch_norm__526, + batch_norm__527, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_91, + parameter_308, + parameter_307, + parameter_306, + parameter_305, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_91, parameter_305, parameter_306, parameter_307, parameter_308 + + # pd_op.swish: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32) + swish_66 = paddle._C_ops.swish(batch_norm__522) + del batch_norm__522 + + # pd_op.conv2d: (2x192x-1x-1xf32) <- (2x896x-1x-1xf32, 192x896x1x1xf32) + conv2d_92 = paddle._C_ops.conv2d( + concat_8, parameter_304, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del concat_8, parameter_304 + + # pd_op.batch_norm_: (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__528, + batch_norm__529, + batch_norm__530, + batch_norm__531, + batch_norm__532, + batch_norm__533, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_92, + parameter_303, + parameter_302, + parameter_301, + parameter_300, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_92, parameter_300, parameter_301, parameter_302, parameter_303 + + # pd_op.swish: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32) + swish_67 = paddle._C_ops.swish(batch_norm__528) + del batch_norm__528 + + # pd_op.conv2d: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 192x192x3x3xf32) + conv2d_93 = paddle._C_ops.conv2d( + swish_67, parameter_299, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_299, swish_67 + + # pd_op.batch_norm_: (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__534, + batch_norm__535, + batch_norm__536, + batch_norm__537, + batch_norm__538, + batch_norm__539, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_93, + parameter_298, + parameter_297, + parameter_296, + parameter_295, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_93, parameter_295, parameter_296, parameter_297, parameter_298 + + # pd_op.swish: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32) + swish_68 = paddle._C_ops.swish(batch_norm__534) + del batch_norm__534 + + # pd_op.conv2d: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 192x192x3x3xf32) + conv2d_94 = paddle._C_ops.conv2d( + swish_68, parameter_294, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_294 + + # pd_op.batch_norm_: (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__540, + batch_norm__541, + batch_norm__542, + batch_norm__543, + batch_norm__544, + batch_norm__545, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_94, + parameter_293, + parameter_292, + parameter_291, + parameter_290, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_94, parameter_290, parameter_291, parameter_292, parameter_293 + + # pd_op.conv2d: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 192x192x1x1xf32) + conv2d_95 = paddle._C_ops.conv2d( + swish_68, parameter_289, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_289, swish_68 + + # pd_op.batch_norm_: (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__546, + batch_norm__547, + batch_norm__548, + batch_norm__549, + batch_norm__550, + batch_norm__551, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_95, + parameter_288, + parameter_287, + parameter_286, + parameter_285, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_95, parameter_285, parameter_286, parameter_287, parameter_288 + + # pd_op.add: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 2x192x-1x-1xf32) + add_43 = paddle._C_ops.add(batch_norm__540, batch_norm__546) + del batch_norm__540, batch_norm__546 + + # pd_op.swish: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32) + swish_69 = paddle._C_ops.swish(add_43) + del add_43 + + # pd_op.conv2d: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 192x192x3x3xf32) + conv2d_96 = paddle._C_ops.conv2d( + swish_69, parameter_284, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_284, swish_69 + + # pd_op.batch_norm_: (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__552, + batch_norm__553, + batch_norm__554, + batch_norm__555, + batch_norm__556, + batch_norm__557, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_96, + parameter_283, + parameter_282, + parameter_281, + parameter_280, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_96, parameter_280, parameter_281, parameter_282, parameter_283 + + # pd_op.swish: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32) + swish_70 = paddle._C_ops.swish(batch_norm__552) + del batch_norm__552 + + # pd_op.conv2d: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 192x192x3x3xf32) + conv2d_97 = paddle._C_ops.conv2d( + swish_70, parameter_279, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_279 + + # pd_op.batch_norm_: (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__558, + batch_norm__559, + batch_norm__560, + batch_norm__561, + batch_norm__562, + batch_norm__563, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_97, + parameter_278, + parameter_277, + parameter_276, + parameter_275, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_97, parameter_275, parameter_276, parameter_277, parameter_278 + + # pd_op.conv2d: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 192x192x1x1xf32) + conv2d_98 = paddle._C_ops.conv2d( + swish_70, parameter_274, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_274, swish_70 + + # pd_op.batch_norm_: (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__564, + batch_norm__565, + batch_norm__566, + batch_norm__567, + batch_norm__568, + batch_norm__569, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_98, + parameter_273, + parameter_272, + parameter_271, + parameter_270, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_98, parameter_270, parameter_271, parameter_272, parameter_273 + + # pd_op.add: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 2x192x-1x-1xf32) + add_44 = paddle._C_ops.add(batch_norm__558, batch_norm__564) + del batch_norm__558, batch_norm__564 + + # pd_op.swish: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32) + swish_71 = paddle._C_ops.swish(add_44) + del add_44 + + # pd_op.conv2d: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 192x192x3x3xf32) + conv2d_99 = paddle._C_ops.conv2d( + swish_71, parameter_269, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_269, swish_71 + + # pd_op.batch_norm_: (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__570, + batch_norm__571, + batch_norm__572, + batch_norm__573, + batch_norm__574, + batch_norm__575, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_99, + parameter_268, + parameter_267, + parameter_266, + parameter_265, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_99, parameter_265, parameter_266, parameter_267, parameter_268 + + # pd_op.swish: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32) + swish_72 = paddle._C_ops.swish(batch_norm__570) + del batch_norm__570 + + # pd_op.conv2d: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 192x192x3x3xf32) + conv2d_100 = paddle._C_ops.conv2d( + swish_72, parameter_264, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_264 + + # pd_op.batch_norm_: (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__576, + batch_norm__577, + batch_norm__578, + batch_norm__579, + batch_norm__580, + batch_norm__581, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_100, + parameter_263, + parameter_262, + parameter_261, + parameter_260, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_100, parameter_260, parameter_261, parameter_262, parameter_263 + + # pd_op.conv2d: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 192x192x1x1xf32) + conv2d_101 = paddle._C_ops.conv2d( + swish_72, parameter_259, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_259, swish_72 + + # pd_op.batch_norm_: (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__582, + batch_norm__583, + batch_norm__584, + batch_norm__585, + batch_norm__586, + batch_norm__587, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_101, + parameter_258, + parameter_257, + parameter_256, + parameter_255, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_101, parameter_255, parameter_256, parameter_257, parameter_258 + + # pd_op.add: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 2x192x-1x-1xf32) + add_45 = paddle._C_ops.add(batch_norm__576, batch_norm__582) + del batch_norm__576, batch_norm__582 + + # pd_op.swish: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32) + swish_73 = paddle._C_ops.swish(add_45) + del add_45 + + # builtin.combine: ([2x192x-1x-1xf32, 2x192x-1x-1xf32]) <- (2x192x-1x-1xf32, 2x192x-1x-1xf32) + combine_7 = [swish_66, swish_73] + del swish_66, swish_73 + + # pd_op.concat: (2x384x-1x-1xf32) <- ([2x192x-1x-1xf32, 2x192x-1x-1xf32], 1xi32) + concat_9 = paddle._C_ops.concat(combine_7, full_0) + del combine_7 + + # pd_op.conv2d: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32, 384x384x1x1xf32) + conv2d_102 = paddle._C_ops.conv2d( + concat_9, parameter_254, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del concat_9, parameter_254 + + # pd_op.batch_norm_: (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__588, + batch_norm__589, + batch_norm__590, + batch_norm__591, + batch_norm__592, + batch_norm__593, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_102, + parameter_253, + parameter_252, + parameter_251, + parameter_250, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_102, parameter_250, parameter_251, parameter_252, parameter_253 + + # pd_op.swish: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32) + swish_74 = paddle._C_ops.swish(batch_norm__588) + del batch_norm__588 + + # pd_op.conv2d: (2x192x-1x-1xf32) <- (2x384x-1x-1xf32, 192x384x1x1xf32) + conv2d_103 = paddle._C_ops.conv2d( + swish_74, parameter_249, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_249 + + # pd_op.batch_norm_: (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__594, + batch_norm__595, + batch_norm__596, + batch_norm__597, + batch_norm__598, + batch_norm__599, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_103, + parameter_248, + parameter_247, + parameter_246, + parameter_245, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_103, parameter_245, parameter_246, parameter_247, parameter_248 + + # pd_op.swish: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32) + swish_75 = paddle._C_ops.swish(batch_norm__594) + del batch_norm__594 + + # pd_op.nearest_interp: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, None, None, None) + nearest_interp_1 = paddle._C_ops.nearest_interp( + swish_75, + None, + None, + None, + "NCHW", + -1, + -1, + -1, + [float("2"), float("2")], + "nearest", + False, + 0, + ) + del swish_75 + + # builtin.combine: ([2x192x-1x-1xf32, 2x256x-1x-1xf32]) <- (2x192x-1x-1xf32, 2x256x-1x-1xf32) + combine_8 = [nearest_interp_1, swish_28] + del nearest_interp_1, swish_28 + + # pd_op.concat: (2x448x-1x-1xf32) <- ([2x192x-1x-1xf32, 2x256x-1x-1xf32], 1xi32) + concat_10 = paddle._C_ops.concat(combine_8, full_0) + del combine_8 + + # pd_op.conv2d: (2x96x-1x-1xf32) <- (2x448x-1x-1xf32, 96x448x1x1xf32) + conv2d_104 = paddle._C_ops.conv2d( + concat_10, parameter_244, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_244 + + # pd_op.batch_norm_: (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__600, + batch_norm__601, + batch_norm__602, + batch_norm__603, + batch_norm__604, + batch_norm__605, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_104, + parameter_243, + parameter_242, + parameter_241, + parameter_240, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_104, parameter_240, parameter_241, parameter_242, parameter_243 + + # pd_op.swish: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32) + swish_76 = paddle._C_ops.swish(batch_norm__600) + del batch_norm__600 + + # pd_op.conv2d: (2x96x-1x-1xf32) <- (2x448x-1x-1xf32, 96x448x1x1xf32) + conv2d_105 = paddle._C_ops.conv2d( + concat_10, parameter_239, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del concat_10, parameter_239 + + # pd_op.batch_norm_: (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__606, + batch_norm__607, + batch_norm__608, + batch_norm__609, + batch_norm__610, + batch_norm__611, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_105, + parameter_238, + parameter_237, + parameter_236, + parameter_235, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_105, parameter_235, parameter_236, parameter_237, parameter_238 + + # pd_op.swish: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32) + swish_77 = paddle._C_ops.swish(batch_norm__606) + del batch_norm__606 + + # pd_op.conv2d: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, 96x96x3x3xf32) + conv2d_106 = paddle._C_ops.conv2d( + swish_77, parameter_234, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_234, swish_77 + + # pd_op.batch_norm_: (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__612, + batch_norm__613, + batch_norm__614, + batch_norm__615, + batch_norm__616, + batch_norm__617, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_106, + parameter_233, + parameter_232, + parameter_231, + parameter_230, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_106, parameter_230, parameter_231, parameter_232, parameter_233 + + # pd_op.swish: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32) + swish_78 = paddle._C_ops.swish(batch_norm__612) + del batch_norm__612 + + # pd_op.conv2d: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, 96x96x3x3xf32) + conv2d_107 = paddle._C_ops.conv2d( + swish_78, parameter_229, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_229 + + # pd_op.batch_norm_: (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__618, + batch_norm__619, + batch_norm__620, + batch_norm__621, + batch_norm__622, + batch_norm__623, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_107, + parameter_228, + parameter_227, + parameter_226, + parameter_225, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_107, parameter_225, parameter_226, parameter_227, parameter_228 + + # pd_op.conv2d: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, 96x96x1x1xf32) + conv2d_108 = paddle._C_ops.conv2d( + swish_78, parameter_224, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_224, swish_78 + + # pd_op.batch_norm_: (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__624, + batch_norm__625, + batch_norm__626, + batch_norm__627, + batch_norm__628, + batch_norm__629, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_108, + parameter_223, + parameter_222, + parameter_221, + parameter_220, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_108, parameter_220, parameter_221, parameter_222, parameter_223 + + # pd_op.add: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, 2x96x-1x-1xf32) + add_46 = paddle._C_ops.add(batch_norm__618, batch_norm__624) + del batch_norm__618, batch_norm__624 + + # pd_op.swish: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32) + swish_79 = paddle._C_ops.swish(add_46) + del add_46 + + # pd_op.conv2d: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, 96x96x3x3xf32) + conv2d_109 = paddle._C_ops.conv2d( + swish_79, parameter_219, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_219, swish_79 + + # pd_op.batch_norm_: (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__630, + batch_norm__631, + batch_norm__632, + batch_norm__633, + batch_norm__634, + batch_norm__635, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_109, + parameter_218, + parameter_217, + parameter_216, + parameter_215, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_109, parameter_215, parameter_216, parameter_217, parameter_218 + + # pd_op.swish: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32) + swish_80 = paddle._C_ops.swish(batch_norm__630) + del batch_norm__630 + + # pd_op.conv2d: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, 96x96x3x3xf32) + conv2d_110 = paddle._C_ops.conv2d( + swish_80, parameter_214, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_214 + + # pd_op.batch_norm_: (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__636, + batch_norm__637, + batch_norm__638, + batch_norm__639, + batch_norm__640, + batch_norm__641, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_110, + parameter_213, + parameter_212, + parameter_211, + parameter_210, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), ) + del conv2d_110, parameter_210, parameter_211, parameter_212, parameter_213 - # pd_op.full: (1xi64) <- () - full_1 = paddle._C_ops.full( - [1], float("1"), paddle.int64, paddle.core.CPUPlace() + # pd_op.conv2d: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, 96x96x1x1xf32) + conv2d_111 = paddle._C_ops.conv2d( + swish_80, parameter_209, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_209, swish_80 + + # pd_op.batch_norm_: (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__642, + batch_norm__643, + batch_norm__644, + batch_norm__645, + batch_norm__646, + batch_norm__647, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_111, + parameter_208, + parameter_207, + parameter_206, + parameter_205, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_111, parameter_205, parameter_206, parameter_207, parameter_208 + + # pd_op.add: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, 2x96x-1x-1xf32) + add_47 = paddle._C_ops.add(batch_norm__636, batch_norm__642) + del batch_norm__636, batch_norm__642 + + # pd_op.swish: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32) + swish_81 = paddle._C_ops.swish(add_47) + del add_47 + + # pd_op.conv2d: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, 96x96x3x3xf32) + conv2d_112 = paddle._C_ops.conv2d( + swish_81, parameter_204, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_204, swish_81 + + # pd_op.batch_norm_: (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__648, + batch_norm__649, + batch_norm__650, + batch_norm__651, + batch_norm__652, + batch_norm__653, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_112, + parameter_203, + parameter_202, + parameter_201, + parameter_200, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_112, parameter_200, parameter_201, parameter_202, parameter_203 + + # pd_op.swish: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32) + swish_82 = paddle._C_ops.swish(batch_norm__648) + del batch_norm__648 + + # pd_op.conv2d: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, 96x96x3x3xf32) + conv2d_113 = paddle._C_ops.conv2d( + swish_82, parameter_199, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_199 + + # pd_op.batch_norm_: (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__654, + batch_norm__655, + batch_norm__656, + batch_norm__657, + batch_norm__658, + batch_norm__659, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_113, + parameter_198, + parameter_197, + parameter_196, + parameter_195, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_113, parameter_195, parameter_196, parameter_197, parameter_198 + + # pd_op.conv2d: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, 96x96x1x1xf32) + conv2d_114 = paddle._C_ops.conv2d( + swish_82, parameter_194, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_194, swish_82 + + # pd_op.batch_norm_: (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__660, + batch_norm__661, + batch_norm__662, + batch_norm__663, + batch_norm__664, + batch_norm__665, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_114, + parameter_193, + parameter_192, + parameter_191, + parameter_190, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_114, parameter_190, parameter_191, parameter_192, parameter_193 + + # pd_op.add: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, 2x96x-1x-1xf32) + add_48 = paddle._C_ops.add(batch_norm__654, batch_norm__660) + del batch_norm__654, batch_norm__660 + + # pd_op.swish: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32) + swish_83 = paddle._C_ops.swish(add_48) + del add_48 + + # builtin.combine: ([2x96x-1x-1xf32, 2x96x-1x-1xf32]) <- (2x96x-1x-1xf32, 2x96x-1x-1xf32) + combine_9 = [swish_76, swish_83] + del swish_76, swish_83 + + # pd_op.concat: (2x192x-1x-1xf32) <- ([2x96x-1x-1xf32, 2x96x-1x-1xf32], 1xi32) + concat_11 = paddle._C_ops.concat(combine_9, full_0) + del combine_9 + + # pd_op.conv2d: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 192x192x1x1xf32) + conv2d_115 = paddle._C_ops.conv2d( + concat_11, parameter_189, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del concat_11, parameter_189 + + # pd_op.batch_norm_: (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__666, + batch_norm__667, + batch_norm__668, + batch_norm__669, + batch_norm__670, + batch_norm__671, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_115, + parameter_188, + parameter_187, + parameter_186, + parameter_185, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_115, parameter_185, parameter_186, parameter_187, parameter_188 + + # pd_op.swish: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32) + swish_84 = paddle._C_ops.swish(batch_norm__666) + del batch_norm__666 + + # pd_op.conv2d: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 192x192x3x3xf32) + conv2d_116 = paddle._C_ops.conv2d( + swish_84, parameter_184, [2, 2], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_184 + + # pd_op.batch_norm_: (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__672, + batch_norm__673, + batch_norm__674, + batch_norm__675, + batch_norm__676, + batch_norm__677, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_116, + parameter_183, + parameter_182, + parameter_181, + parameter_180, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_116, parameter_180, parameter_181, parameter_182, parameter_183 + + # pd_op.swish: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32) + swish_85 = paddle._C_ops.swish(batch_norm__672) + del batch_norm__672 + + # builtin.combine: ([2x192x-1x-1xf32, 2x384x-1x-1xf32]) <- (2x192x-1x-1xf32, 2x384x-1x-1xf32) + combine_10 = [swish_85, swish_74] + del swish_74, swish_85 + + # pd_op.concat: (2x576x-1x-1xf32) <- ([2x192x-1x-1xf32, 2x384x-1x-1xf32], 1xi32) + concat_12 = paddle._C_ops.concat(combine_10, full_0) + del combine_10 + + # pd_op.conv2d: (2x192x-1x-1xf32) <- (2x576x-1x-1xf32, 192x576x1x1xf32) + conv2d_117 = paddle._C_ops.conv2d( + concat_12, parameter_179, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_179 + + # pd_op.batch_norm_: (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__678, + batch_norm__679, + batch_norm__680, + batch_norm__681, + batch_norm__682, + batch_norm__683, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_117, + parameter_178, + parameter_177, + parameter_176, + parameter_175, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_117, parameter_175, parameter_176, parameter_177, parameter_178 + + # pd_op.swish: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32) + swish_86 = paddle._C_ops.swish(batch_norm__678) + del batch_norm__678 + + # pd_op.conv2d: (2x192x-1x-1xf32) <- (2x576x-1x-1xf32, 192x576x1x1xf32) + conv2d_118 = paddle._C_ops.conv2d( + concat_12, parameter_174, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del concat_12, parameter_174 + + # pd_op.batch_norm_: (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__684, + batch_norm__685, + batch_norm__686, + batch_norm__687, + batch_norm__688, + batch_norm__689, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_118, + parameter_173, + parameter_172, + parameter_171, + parameter_170, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_118, parameter_170, parameter_171, parameter_172, parameter_173 + + # pd_op.swish: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32) + swish_87 = paddle._C_ops.swish(batch_norm__684) + del batch_norm__684 + + # pd_op.conv2d: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 192x192x3x3xf32) + conv2d_119 = paddle._C_ops.conv2d( + swish_87, parameter_169, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_169, swish_87 + + # pd_op.batch_norm_: (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__690, + batch_norm__691, + batch_norm__692, + batch_norm__693, + batch_norm__694, + batch_norm__695, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_119, + parameter_168, + parameter_167, + parameter_166, + parameter_165, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_119, parameter_165, parameter_166, parameter_167, parameter_168 + + # pd_op.swish: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32) + swish_88 = paddle._C_ops.swish(batch_norm__690) + del batch_norm__690 + + # pd_op.conv2d: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 192x192x3x3xf32) + conv2d_120 = paddle._C_ops.conv2d( + swish_88, parameter_164, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_164 + + # pd_op.batch_norm_: (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__696, + batch_norm__697, + batch_norm__698, + batch_norm__699, + batch_norm__700, + batch_norm__701, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_120, + parameter_163, + parameter_162, + parameter_161, + parameter_160, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_120, parameter_160, parameter_161, parameter_162, parameter_163 + + # pd_op.conv2d: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 192x192x1x1xf32) + conv2d_121 = paddle._C_ops.conv2d( + swish_88, parameter_159, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_159, swish_88 + + # pd_op.batch_norm_: (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__702, + batch_norm__703, + batch_norm__704, + batch_norm__705, + batch_norm__706, + batch_norm__707, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_121, + parameter_158, + parameter_157, + parameter_156, + parameter_155, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_121, parameter_155, parameter_156, parameter_157, parameter_158 + + # pd_op.add: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 2x192x-1x-1xf32) + add_49 = paddle._C_ops.add(batch_norm__696, batch_norm__702) + del batch_norm__696, batch_norm__702 + + # pd_op.swish: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32) + swish_89 = paddle._C_ops.swish(add_49) + del add_49 + + # pd_op.conv2d: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 192x192x3x3xf32) + conv2d_122 = paddle._C_ops.conv2d( + swish_89, parameter_154, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_154, swish_89 + + # pd_op.batch_norm_: (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__708, + batch_norm__709, + batch_norm__710, + batch_norm__711, + batch_norm__712, + batch_norm__713, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_122, + parameter_153, + parameter_152, + parameter_151, + parameter_150, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_122, parameter_150, parameter_151, parameter_152, parameter_153 + + # pd_op.swish: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32) + swish_90 = paddle._C_ops.swish(batch_norm__708) + del batch_norm__708 + + # pd_op.conv2d: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 192x192x3x3xf32) + conv2d_123 = paddle._C_ops.conv2d( + swish_90, parameter_149, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_149 + + # pd_op.batch_norm_: (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__714, + batch_norm__715, + batch_norm__716, + batch_norm__717, + batch_norm__718, + batch_norm__719, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_123, + parameter_148, + parameter_147, + parameter_146, + parameter_145, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_123, parameter_145, parameter_146, parameter_147, parameter_148 + + # pd_op.conv2d: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 192x192x1x1xf32) + conv2d_124 = paddle._C_ops.conv2d( + swish_90, parameter_144, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_144, swish_90 + + # pd_op.batch_norm_: (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__720, + batch_norm__721, + batch_norm__722, + batch_norm__723, + batch_norm__724, + batch_norm__725, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_124, + parameter_143, + parameter_142, + parameter_141, + parameter_140, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), ) + del conv2d_124, parameter_140, parameter_141, parameter_142, parameter_143 - # pd_op.arange: (-1xi64) <- (1xi64, xi64, 1xi64) - arange_0 = paddle.arange(full_0, data_1, full_1, dtype="int64") - del data_1 + # pd_op.add: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 2x192x-1x-1xf32) + add_50 = paddle._C_ops.add(batch_norm__714, batch_norm__720) + del batch_norm__714, batch_norm__720 - # pd_op.cast: (-1xf32) <- (-1xi64) - cast_0 = paddle._C_ops.cast(arange_0, paddle.float32) - del arange_0 + # pd_op.swish: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32) + swish_91 = paddle._C_ops.swish(add_50) + del add_50 - # pd_op.full: (1xf32) <- () - full_2 = paddle._C_ops.full( - [1], float("1"), paddle.float32, paddle.core.CPUPlace() + # pd_op.conv2d: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 192x192x3x3xf32) + conv2d_125 = paddle._C_ops.conv2d( + swish_91, parameter_139, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" ) + del parameter_139, swish_91 - # pd_op.scale: (-1xf32) <- (-1xf32, 1xf32) - scale_0 = paddle._C_ops.scale(cast_0, full_2, float("0.5"), True) - del cast_0 - - # pd_op.full: (1xf32) <- () - full_3 = paddle._C_ops.full( - [1], float("32"), paddle.float32, paddle.core.CPUPlace() + # pd_op.batch_norm_: (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__726, + batch_norm__727, + batch_norm__728, + batch_norm__729, + batch_norm__730, + batch_norm__731, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_125, + parameter_138, + parameter_137, + parameter_136, + parameter_135, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), ) + del conv2d_125, parameter_135, parameter_136, parameter_137, parameter_138 - # pd_op.scale: (-1xf32) <- (-1xf32, 1xf32) - scale_1 = paddle._C_ops.scale(scale_0, full_3, float("0"), True) - del scale_0 - - # pd_op.arange: (-1xi64) <- (1xi64, xi64, 1xi64) - arange_1 = paddle.arange(full_0, data_0, full_1, dtype="int64") - del data_0 - - # pd_op.cast: (-1xf32) <- (-1xi64) - cast_1 = paddle._C_ops.cast(arange_1, paddle.float32) - del arange_1 - - # pd_op.scale: (-1xf32) <- (-1xf32, 1xf32) - scale_2 = paddle._C_ops.scale(cast_1, full_2, float("0.5"), True) - del cast_1 - - # pd_op.scale: (-1xf32) <- (-1xf32, 1xf32) - scale_3 = paddle._C_ops.scale(scale_2, full_3, float("0"), True) - del scale_2 - - # builtin.combine: ([-1xf32, -1xf32]) <- (-1xf32, -1xf32) - combine_0 = [scale_3, scale_1] - del scale_1, scale_3 + # pd_op.swish: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32) + swish_92 = paddle._C_ops.swish(batch_norm__726) + del batch_norm__726 - # pd_op.meshgrid: ([-1x-1xf32, -1x-1xf32]) <- ([-1xf32, -1xf32]) - meshgrid_0 = paddle._C_ops.meshgrid(combine_0) - del combine_0 + # pd_op.conv2d: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 192x192x3x3xf32) + conv2d_126 = paddle._C_ops.conv2d( + swish_92, parameter_134, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_134 - # builtin.split: (-1x-1xf32, -1x-1xf32) <- ([-1x-1xf32, -1x-1xf32]) + # pd_op.batch_norm_: (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) ( - split_0, - split_1, - ) = meshgrid_0 - del meshgrid_0 - - # pd_op.scale: (-1x-1xf32) <- (-1x-1xf32, 1xf32) - scale_4 = paddle._C_ops.scale(split_1, full_2, float("-80"), True) + batch_norm__732, + batch_norm__733, + batch_norm__734, + batch_norm__735, + batch_norm__736, + batch_norm__737, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_126, + parameter_133, + parameter_132, + parameter_131, + parameter_130, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_126, parameter_130, parameter_131, parameter_132, parameter_133 - # pd_op.scale: (-1x-1xf32) <- (-1x-1xf32, 1xf32) - scale_5 = paddle._C_ops.scale(split_0, full_2, float("-80"), True) + # pd_op.conv2d: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 192x192x1x1xf32) + conv2d_127 = paddle._C_ops.conv2d( + swish_92, parameter_129, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_129, swish_92 - # pd_op.scale: (-1x-1xf32) <- (-1x-1xf32, 1xf32) - scale_6 = paddle._C_ops.scale(split_1, full_2, float("80"), True) + # pd_op.batch_norm_: (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__738, + batch_norm__739, + batch_norm__740, + batch_norm__741, + batch_norm__742, + batch_norm__743, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_127, + parameter_128, + parameter_127, + parameter_126, + parameter_125, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_127, parameter_125, parameter_126, parameter_127, parameter_128 - # pd_op.scale: (-1x-1xf32) <- (-1x-1xf32, 1xf32) - scale_7 = paddle._C_ops.scale(split_0, full_2, float("80"), True) + # pd_op.add: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 2x192x-1x-1xf32) + add_51 = paddle._C_ops.add(batch_norm__732, batch_norm__738) + del batch_norm__732, batch_norm__738 - # builtin.combine: ([-1x-1xf32, -1x-1xf32, -1x-1xf32, -1x-1xf32]) <- (-1x-1xf32, -1x-1xf32, -1x-1xf32, -1x-1xf32) - combine_1 = [scale_4, scale_5, scale_6, scale_7] - del scale_4, scale_5, scale_6, scale_7 + # pd_op.swish: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32) + swish_93 = paddle._C_ops.swish(add_51) + del add_51 - # pd_op.stack: (-1x-1x4xf32) <- ([-1x-1xf32, -1x-1xf32, -1x-1xf32, -1x-1xf32]) - stack_0 = paddle._C_ops.stack(combine_1, -1) - del combine_1 + # builtin.combine: ([2x192x-1x-1xf32, 2x192x-1x-1xf32]) <- (2x192x-1x-1xf32, 2x192x-1x-1xf32) + combine_11 = [swish_86, swish_93] + del swish_86, swish_93 - # builtin.combine: ([-1x-1xf32, -1x-1xf32]) <- (-1x-1xf32, -1x-1xf32) - combine_2 = [split_1, split_0] - del split_0, split_1 + # pd_op.concat: (2x384x-1x-1xf32) <- ([2x192x-1x-1xf32, 2x192x-1x-1xf32], 1xi32) + concat_13 = paddle._C_ops.concat(combine_11, full_0) + del combine_11 - # pd_op.stack: (-1x-1x2xf32) <- ([-1x-1xf32, -1x-1xf32]) - stack_1 = paddle._C_ops.stack(combine_2, -1) - del combine_2 + # pd_op.conv2d: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32, 384x384x1x1xf32) + conv2d_128 = paddle._C_ops.conv2d( + concat_13, parameter_124, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del concat_13, parameter_124 - # pd_op.full_int_array: (2xi64) <- () - full_int_array_0 = [-1, 4] + # pd_op.batch_norm_: (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__744, + batch_norm__745, + batch_norm__746, + batch_norm__747, + batch_norm__748, + batch_norm__749, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_128, + parameter_123, + parameter_122, + parameter_121, + parameter_120, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_128, parameter_120, parameter_121, parameter_122, parameter_123 - # pd_op.reshape: (-1x4xf32) <- (-1x-1x4xf32, 2xi64) - reshape_0 = paddle._C_ops.reshape(stack_0, full_int_array_0) - del stack_0 + # pd_op.swish: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32) + swish_94 = paddle._C_ops.swish(batch_norm__744) + del batch_norm__744 - # pd_op.full_int_array: (2xi64) <- () - full_int_array_1 = [-1, 2] + # pd_op.conv2d: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32, 384x384x3x3xf32) + conv2d_129 = paddle._C_ops.conv2d( + swish_94, parameter_119, [2, 2], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_119 - # pd_op.reshape: (-1x2xf32) <- (-1x-1x2xf32, 2xi64) - reshape_1 = paddle._C_ops.reshape(stack_1, full_int_array_1) - del stack_1 + # pd_op.batch_norm_: (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__750, + batch_norm__751, + batch_norm__752, + batch_norm__753, + batch_norm__754, + batch_norm__755, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_129, + parameter_118, + parameter_117, + parameter_116, + parameter_115, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_129, parameter_115, parameter_116, parameter_117, parameter_118 - # pd_op.shape64: (2xi64) <- (-1x4xf32) - shape64_0 = paddle._C_ops.shape64(reshape_0) + # pd_op.swish: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32) + swish_95 = paddle._C_ops.swish(batch_norm__750) + del batch_norm__750 - # pd_op.full_int_array: (1xi64) <- () - full_int_array_2 = [0] + # builtin.combine: ([2x384x-1x-1xf32, 2x768x-1x-1xf32]) <- (2x384x-1x-1xf32, 2x768x-1x-1xf32) + combine_12 = [swish_95, swish_64] + del swish_64, swish_95 - # pd_op.full_int_array: (1xi64) <- () - full_int_array_3 = [1] + # pd_op.concat: (2x1152x-1x-1xf32) <- ([2x384x-1x-1xf32, 2x768x-1x-1xf32], 1xi32) + concat_14 = paddle._C_ops.concat(combine_12, full_0) + del combine_12 - # pd_op.slice: (xi64) <- (2xi64, 1xi64, 1xi64) - slice_0 = paddle._C_ops.slice( - shape64_0, [0], full_int_array_2, full_int_array_3, [1], [0] + # pd_op.conv2d: (2x384x-1x-1xf32) <- (2x1152x-1x-1xf32, 384x1152x1x1xf32) + conv2d_130 = paddle._C_ops.conv2d( + concat_14, parameter_114, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" ) - del shape64_0 + del parameter_114 - # pd_op.full: (xi64) <- () - full_4 = paddle._C_ops.full( - [], float("1"), paddle.int64, paddle.core.CPUPlace() + # pd_op.batch_norm_: (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__756, + batch_norm__757, + batch_norm__758, + batch_norm__759, + batch_norm__760, + batch_norm__761, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_130, + parameter_113, + parameter_112, + parameter_111, + parameter_110, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), ) + del conv2d_130, parameter_110, parameter_111, parameter_112, parameter_113 - # builtin.combine: ([xi64, xi64]) <- (xi64, xi64) - combine_3 = [slice_0, full_4] - - # pd_op.stack: (2xi64) <- ([xi64, xi64]) - stack_2 = paddle._C_ops.stack(combine_3, 0) - del combine_3 + # pd_op.swish: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32) + swish_96 = paddle._C_ops.swish(batch_norm__756) + del batch_norm__756 - # pd_op.full_with_tensor: (-1x1xf32) <- (1xf32, 2xi64) - full_with_tensor_0 = paddle._C_ops.full_with_tensor( - full_3, stack_2, paddle.float32 + # pd_op.conv2d: (2x384x-1x-1xf32) <- (2x1152x-1x-1xf32, 384x1152x1x1xf32) + conv2d_131 = paddle._C_ops.conv2d( + concat_14, parameter_109, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" ) - del full_3, stack_2 - - # pd_op.arange: (-1xi64) <- (1xi64, xi64, 1xi64) - arange_2 = paddle.arange(full_0, data_3, full_1, dtype="int64") - del data_3 - - # pd_op.cast: (-1xf32) <- (-1xi64) - cast_2 = paddle._C_ops.cast(arange_2, paddle.float32) - del arange_2 + del concat_14, parameter_109 - # pd_op.scale: (-1xf32) <- (-1xf32, 1xf32) - scale_8 = paddle._C_ops.scale(cast_2, full_2, float("0.5"), True) - del cast_2 - - # pd_op.full: (1xf32) <- () - full_5 = paddle._C_ops.full( - [1], float("16"), paddle.float32, paddle.core.CPUPlace() + # pd_op.batch_norm_: (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__762, + batch_norm__763, + batch_norm__764, + batch_norm__765, + batch_norm__766, + batch_norm__767, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_131, + parameter_108, + parameter_107, + parameter_106, + parameter_105, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), ) + del conv2d_131, parameter_105, parameter_106, parameter_107, parameter_108 - # pd_op.scale: (-1xf32) <- (-1xf32, 1xf32) - scale_9 = paddle._C_ops.scale(scale_8, full_5, float("0"), True) - del scale_8 - - # pd_op.arange: (-1xi64) <- (1xi64, xi64, 1xi64) - arange_3 = paddle.arange(full_0, data_2, full_1, dtype="int64") - del data_2 - - # pd_op.cast: (-1xf32) <- (-1xi64) - cast_3 = paddle._C_ops.cast(arange_3, paddle.float32) - del arange_3 + # pd_op.swish: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32) + swish_97 = paddle._C_ops.swish(batch_norm__762) + del batch_norm__762 - # pd_op.scale: (-1xf32) <- (-1xf32, 1xf32) - scale_10 = paddle._C_ops.scale(cast_3, full_2, float("0.5"), True) - del cast_3 + # pd_op.conv2d: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32, 384x384x3x3xf32) + conv2d_132 = paddle._C_ops.conv2d( + swish_97, parameter_104, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_104, swish_97 - # pd_op.scale: (-1xf32) <- (-1xf32, 1xf32) - scale_11 = paddle._C_ops.scale(scale_10, full_5, float("0"), True) - del scale_10 + # pd_op.batch_norm_: (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__768, + batch_norm__769, + batch_norm__770, + batch_norm__771, + batch_norm__772, + batch_norm__773, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_132, + parameter_103, + parameter_102, + parameter_101, + parameter_100, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_132, parameter_100, parameter_101, parameter_102, parameter_103 - # builtin.combine: ([-1xf32, -1xf32]) <- (-1xf32, -1xf32) - combine_4 = [scale_11, scale_9] - del scale_11, scale_9 + # pd_op.swish: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32) + swish_98 = paddle._C_ops.swish(batch_norm__768) + del batch_norm__768 - # pd_op.meshgrid: ([-1x-1xf32, -1x-1xf32]) <- ([-1xf32, -1xf32]) - meshgrid_1 = paddle._C_ops.meshgrid(combine_4) - del combine_4 + # pd_op.conv2d: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32, 384x384x3x3xf32) + conv2d_133 = paddle._C_ops.conv2d( + swish_98, parameter_99, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_99 - # builtin.split: (-1x-1xf32, -1x-1xf32) <- ([-1x-1xf32, -1x-1xf32]) + # pd_op.batch_norm_: (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) ( - split_2, - split_3, - ) = meshgrid_1 - del meshgrid_1 - - # pd_op.scale: (-1x-1xf32) <- (-1x-1xf32, 1xf32) - scale_12 = paddle._C_ops.scale(split_3, full_2, float("-40"), True) + batch_norm__774, + batch_norm__775, + batch_norm__776, + batch_norm__777, + batch_norm__778, + batch_norm__779, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_133, + parameter_98, + parameter_97, + parameter_96, + parameter_95, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_133, parameter_95, parameter_96, parameter_97, parameter_98 - # pd_op.scale: (-1x-1xf32) <- (-1x-1xf32, 1xf32) - scale_13 = paddle._C_ops.scale(split_2, full_2, float("-40"), True) + # pd_op.conv2d: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32, 384x384x1x1xf32) + conv2d_134 = paddle._C_ops.conv2d( + swish_98, parameter_94, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_94, swish_98 - # pd_op.scale: (-1x-1xf32) <- (-1x-1xf32, 1xf32) - scale_14 = paddle._C_ops.scale(split_3, full_2, float("40"), True) + # pd_op.batch_norm_: (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__780, + batch_norm__781, + batch_norm__782, + batch_norm__783, + batch_norm__784, + batch_norm__785, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_134, + parameter_93, + parameter_92, + parameter_91, + parameter_90, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_134, parameter_90, parameter_91, parameter_92, parameter_93 - # pd_op.scale: (-1x-1xf32) <- (-1x-1xf32, 1xf32) - scale_15 = paddle._C_ops.scale(split_2, full_2, float("40"), True) + # pd_op.add: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32, 2x384x-1x-1xf32) + add_52 = paddle._C_ops.add(batch_norm__774, batch_norm__780) + del batch_norm__774, batch_norm__780 - # builtin.combine: ([-1x-1xf32, -1x-1xf32, -1x-1xf32, -1x-1xf32]) <- (-1x-1xf32, -1x-1xf32, -1x-1xf32, -1x-1xf32) - combine_5 = [scale_12, scale_13, scale_14, scale_15] - del scale_12, scale_13, scale_14, scale_15 + # pd_op.swish: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32) + swish_99 = paddle._C_ops.swish(add_52) + del add_52 - # pd_op.stack: (-1x-1x4xf32) <- ([-1x-1xf32, -1x-1xf32, -1x-1xf32, -1x-1xf32]) - stack_3 = paddle._C_ops.stack(combine_5, -1) - del combine_5 + # pd_op.conv2d: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32, 384x384x3x3xf32) + conv2d_135 = paddle._C_ops.conv2d( + swish_99, parameter_89, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_89, swish_99 - # builtin.combine: ([-1x-1xf32, -1x-1xf32]) <- (-1x-1xf32, -1x-1xf32) - combine_6 = [split_3, split_2] - del split_2, split_3 + # pd_op.batch_norm_: (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__786, + batch_norm__787, + batch_norm__788, + batch_norm__789, + batch_norm__790, + batch_norm__791, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_135, + parameter_88, + parameter_87, + parameter_86, + parameter_85, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_135, parameter_85, parameter_86, parameter_87, parameter_88 - # pd_op.stack: (-1x-1x2xf32) <- ([-1x-1xf32, -1x-1xf32]) - stack_4 = paddle._C_ops.stack(combine_6, -1) - del combine_6 + # pd_op.swish: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32) + swish_100 = paddle._C_ops.swish(batch_norm__786) + del batch_norm__786 - # pd_op.reshape: (-1x4xf32) <- (-1x-1x4xf32, 2xi64) - reshape_2 = paddle._C_ops.reshape(stack_3, full_int_array_0) - del stack_3 + # pd_op.conv2d: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32, 384x384x3x3xf32) + conv2d_136 = paddle._C_ops.conv2d( + swish_100, parameter_84, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_84 - # pd_op.reshape: (-1x2xf32) <- (-1x-1x2xf32, 2xi64) - reshape_3 = paddle._C_ops.reshape(stack_4, full_int_array_1) - del stack_4 + # pd_op.batch_norm_: (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__792, + batch_norm__793, + batch_norm__794, + batch_norm__795, + batch_norm__796, + batch_norm__797, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_136, + parameter_83, + parameter_82, + parameter_81, + parameter_80, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_136, parameter_80, parameter_81, parameter_82, parameter_83 - # pd_op.shape64: (2xi64) <- (-1x4xf32) - shape64_1 = paddle._C_ops.shape64(reshape_2) + # pd_op.conv2d: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32, 384x384x1x1xf32) + conv2d_137 = paddle._C_ops.conv2d( + swish_100, parameter_79, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_79, swish_100 - # pd_op.slice: (xi64) <- (2xi64, 1xi64, 1xi64) - slice_1 = paddle._C_ops.slice( - shape64_1, [0], full_int_array_2, full_int_array_3, [1], [0] + # pd_op.batch_norm_: (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__798, + batch_norm__799, + batch_norm__800, + batch_norm__801, + batch_norm__802, + batch_norm__803, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_137, + parameter_78, + parameter_77, + parameter_76, + parameter_75, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), ) - del shape64_1 + del conv2d_137, parameter_75, parameter_76, parameter_77, parameter_78 - # builtin.combine: ([xi64, xi64]) <- (xi64, xi64) - combine_7 = [slice_1, full_4] + # pd_op.add: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32, 2x384x-1x-1xf32) + add_53 = paddle._C_ops.add(batch_norm__792, batch_norm__798) + del batch_norm__792, batch_norm__798 - # pd_op.stack: (2xi64) <- ([xi64, xi64]) - stack_5 = paddle._C_ops.stack(combine_7, 0) - del combine_7 + # pd_op.swish: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32) + swish_101 = paddle._C_ops.swish(add_53) + del add_53 - # pd_op.full_with_tensor: (-1x1xf32) <- (1xf32, 2xi64) - full_with_tensor_1 = paddle._C_ops.full_with_tensor( - full_5, stack_5, paddle.float32 + # pd_op.conv2d: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32, 384x384x3x3xf32) + conv2d_138 = paddle._C_ops.conv2d( + swish_101, parameter_74, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" ) - del full_5, stack_5 + del parameter_74, swish_101 - # pd_op.arange: (-1xi64) <- (1xi64, xi64, 1xi64) - arange_4 = paddle.arange(full_0, data_5, full_1, dtype="int64") - del data_5 + # pd_op.batch_norm_: (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__804, + batch_norm__805, + batch_norm__806, + batch_norm__807, + batch_norm__808, + batch_norm__809, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_138, + parameter_73, + parameter_72, + parameter_71, + parameter_70, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_138, parameter_70, parameter_71, parameter_72, parameter_73 - # pd_op.cast: (-1xf32) <- (-1xi64) - cast_4 = paddle._C_ops.cast(arange_4, paddle.float32) - del arange_4 + # pd_op.swish: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32) + swish_102 = paddle._C_ops.swish(batch_norm__804) + del batch_norm__804 - # pd_op.scale: (-1xf32) <- (-1xf32, 1xf32) - scale_16 = paddle._C_ops.scale(cast_4, full_2, float("0.5"), True) - del cast_4 + # pd_op.conv2d: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32, 384x384x3x3xf32) + conv2d_139 = paddle._C_ops.conv2d( + swish_102, parameter_69, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_69 - # pd_op.full: (1xf32) <- () - full_6 = paddle._C_ops.full( - [1], float("8"), paddle.float32, paddle.core.CPUPlace() + # pd_op.batch_norm_: (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__810, + batch_norm__811, + batch_norm__812, + batch_norm__813, + batch_norm__814, + batch_norm__815, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_139, + parameter_68, + parameter_67, + parameter_66, + parameter_65, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), ) + del conv2d_139, parameter_65, parameter_66, parameter_67, parameter_68 - # pd_op.scale: (-1xf32) <- (-1xf32, 1xf32) - scale_17 = paddle._C_ops.scale(scale_16, full_6, float("0"), True) - del scale_16 - - # pd_op.arange: (-1xi64) <- (1xi64, xi64, 1xi64) - arange_5 = paddle.arange(full_0, data_4, full_1, dtype="int64") - del data_4, full_0, full_1 - - # pd_op.cast: (-1xf32) <- (-1xi64) - cast_5 = paddle._C_ops.cast(arange_5, paddle.float32) - del arange_5 - - # pd_op.scale: (-1xf32) <- (-1xf32, 1xf32) - scale_18 = paddle._C_ops.scale(cast_5, full_2, float("0.5"), True) - del cast_5 - - # pd_op.scale: (-1xf32) <- (-1xf32, 1xf32) - scale_19 = paddle._C_ops.scale(scale_18, full_6, float("0"), True) - del scale_18 - - # builtin.combine: ([-1xf32, -1xf32]) <- (-1xf32, -1xf32) - combine_8 = [scale_19, scale_17] - del scale_17, scale_19 - - # pd_op.meshgrid: ([-1x-1xf32, -1x-1xf32]) <- ([-1xf32, -1xf32]) - meshgrid_2 = paddle._C_ops.meshgrid(combine_8) - del combine_8 + # pd_op.conv2d: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32, 384x384x1x1xf32) + conv2d_140 = paddle._C_ops.conv2d( + swish_102, parameter_64, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_64, swish_102 - # builtin.split: (-1x-1xf32, -1x-1xf32) <- ([-1x-1xf32, -1x-1xf32]) + # pd_op.batch_norm_: (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) ( - split_4, - split_5, - ) = meshgrid_2 - del meshgrid_2 - - # pd_op.scale: (-1x-1xf32) <- (-1x-1xf32, 1xf32) - scale_20 = paddle._C_ops.scale(split_5, full_2, float("-20"), True) + batch_norm__816, + batch_norm__817, + batch_norm__818, + batch_norm__819, + batch_norm__820, + batch_norm__821, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_140, + parameter_63, + parameter_62, + parameter_61, + parameter_60, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_140, parameter_60, parameter_61, parameter_62, parameter_63 - # pd_op.scale: (-1x-1xf32) <- (-1x-1xf32, 1xf32) - scale_21 = paddle._C_ops.scale(split_4, full_2, float("-20"), True) + # pd_op.add: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32, 2x384x-1x-1xf32) + add_54 = paddle._C_ops.add(batch_norm__810, batch_norm__816) + del batch_norm__810, batch_norm__816 - # pd_op.scale: (-1x-1xf32) <- (-1x-1xf32, 1xf32) - scale_22 = paddle._C_ops.scale(split_5, full_2, float("20"), True) + # pd_op.swish: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32) + swish_103 = paddle._C_ops.swish(add_54) + del add_54 - # pd_op.scale: (-1x-1xf32) <- (-1x-1xf32, 1xf32) - scale_23 = paddle._C_ops.scale(split_4, full_2, float("20"), True) - del full_2 + # builtin.combine: ([2x384x-1x-1xf32, 2x384x-1x-1xf32]) <- (2x384x-1x-1xf32, 2x384x-1x-1xf32) + combine_13 = [swish_96, swish_103] + del swish_103, swish_96 - # builtin.combine: ([-1x-1xf32, -1x-1xf32, -1x-1xf32, -1x-1xf32]) <- (-1x-1xf32, -1x-1xf32, -1x-1xf32, -1x-1xf32) - combine_9 = [scale_20, scale_21, scale_22, scale_23] - del scale_20, scale_21, scale_22, scale_23 + # pd_op.concat: (2x768x-1x-1xf32) <- ([2x384x-1x-1xf32, 2x384x-1x-1xf32], 1xi32) + concat_15 = paddle._C_ops.concat(combine_13, full_0) + del combine_13 - # pd_op.stack: (-1x-1x4xf32) <- ([-1x-1xf32, -1x-1xf32, -1x-1xf32, -1x-1xf32]) - stack_6 = paddle._C_ops.stack(combine_9, -1) - del combine_9 + # pd_op.conv2d: (2x768x-1x-1xf32) <- (2x768x-1x-1xf32, 768x768x1x1xf32) + conv2d_141 = paddle._C_ops.conv2d( + concat_15, parameter_59, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del concat_15, parameter_59 - # builtin.combine: ([-1x-1xf32, -1x-1xf32]) <- (-1x-1xf32, -1x-1xf32) - combine_10 = [split_5, split_4] - del split_4, split_5 + # pd_op.batch_norm_: (2x768x-1x-1xf32, 768xf32, 768xf32, 768xf32, 768xf32, -1xui8) <- (2x768x-1x-1xf32, 768xf32, 768xf32, 768xf32, 768xf32) + ( + batch_norm__822, + batch_norm__823, + batch_norm__824, + batch_norm__825, + batch_norm__826, + batch_norm__827, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_141, + parameter_58, + parameter_57, + parameter_56, + parameter_55, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_141, parameter_55, parameter_56, parameter_57, parameter_58 - # pd_op.stack: (-1x-1x2xf32) <- ([-1x-1xf32, -1x-1xf32]) - stack_7 = paddle._C_ops.stack(combine_10, -1) - del combine_10 + # pd_op.swish: (2x768x-1x-1xf32) <- (2x768x-1x-1xf32) + swish_104 = paddle._C_ops.swish(batch_norm__822) + del batch_norm__822 - # pd_op.reshape: (-1x4xf32) <- (-1x-1x4xf32, 2xi64) - reshape_4 = paddle._C_ops.reshape(stack_6, full_int_array_0) - del full_int_array_0, stack_6 + # pd_op.shape64: (4xi64) <- (2x768x-1x-1xf32) + shape64_0 = paddle._C_ops.shape64(swish_104) - # pd_op.reshape: (-1x2xf32) <- (-1x-1x2xf32, 2xi64) - reshape_5 = paddle._C_ops.reshape(stack_7, full_int_array_1) - del full_int_array_1, stack_7 + # pd_op.full_int_array: (1xi64) <- () + full_int_array_5 = [2] - # pd_op.shape64: (2xi64) <- (-1x4xf32) - shape64_2 = paddle._C_ops.shape64(reshape_4) + # pd_op.full_int_array: (1xi64) <- () + full_int_array_6 = [3] - # pd_op.slice: (xi64) <- (2xi64, 1xi64, 1xi64) - slice_2 = paddle._C_ops.slice( - shape64_2, [0], full_int_array_2, full_int_array_3, [1], [0] + # pd_op.slice: (xi64) <- (4xi64, 1xi64, 1xi64) + slice_0 = paddle._C_ops.slice( + shape64_0, [0], full_int_array_5, full_int_array_6, [1], [0] ) - del full_int_array_2, full_int_array_3, shape64_2 - - # builtin.combine: ([xi64, xi64]) <- (xi64, xi64) - combine_11 = [slice_2, full_4] - del full_4 + del shape64_0 - # pd_op.stack: (2xi64) <- ([xi64, xi64]) - stack_8 = paddle._C_ops.stack(combine_11, 0) - del combine_11 + # pd_op.shape64: (4xi64) <- (2x768x-1x-1xf32) + shape64_1 = paddle._C_ops.shape64(swish_104) - # pd_op.full_with_tensor: (-1x1xf32) <- (1xf32, 2xi64) - full_with_tensor_2 = paddle._C_ops.full_with_tensor( - full_6, stack_8, paddle.float32 - ) - del full_6, stack_8 + # pd_op.full_int_array: (1xi64) <- () + full_int_array_7 = [4] - # pd_op.full: (1xi32) <- () - full_7 = paddle._C_ops.full( - [1], float("0"), paddle.int32, paddle.core.CPUPlace() + # pd_op.slice: (xi64) <- (4xi64, 1xi64, 1xi64) + slice_1 = paddle._C_ops.slice( + shape64_1, [0], full_int_array_6, full_int_array_7, [1], [0] ) + del shape64_1 - # builtin.combine: ([-1x4xf32, -1x4xf32, -1x4xf32]) <- (-1x4xf32, -1x4xf32, -1x4xf32) - combine_12 = [reshape_0, reshape_2, reshape_4] - del reshape_0, reshape_2, reshape_4 - - # pd_op.concat: (-1x4xf32) <- ([-1x4xf32, -1x4xf32, -1x4xf32], 1xi32) - concat_2 = paddle._C_ops.concat(combine_12, full_7) - del combine_12 - - # builtin.combine: ([-1x2xf32, -1x2xf32, -1x2xf32]) <- (-1x2xf32, -1x2xf32, -1x2xf32) - combine_13 = [reshape_1, reshape_3, reshape_5] - del reshape_1, reshape_3, reshape_5 - - # pd_op.concat: (-1x2xf32) <- ([-1x2xf32, -1x2xf32, -1x2xf32], 1xi32) - concat_3 = paddle._C_ops.concat(combine_13, full_7) - del combine_13 - - # builtin.combine: ([-1x1xf32, -1x1xf32, -1x1xf32]) <- (-1x1xf32, -1x1xf32, -1x1xf32) - combine_14 = [full_with_tensor_0, full_with_tensor_1, full_with_tensor_2] - del full_with_tensor_0, full_with_tensor_1, full_with_tensor_2 - - # pd_op.concat: (-1x1xf32) <- ([-1x1xf32, -1x1xf32, -1x1xf32], 1xi32) - concat_4 = paddle._C_ops.concat(combine_14, full_7) - del combine_14, full_7 + # pd_op.multiply: (xi64) <- (xi64, xi64) + multiply_4 = paddle._C_ops.multiply(slice_0, slice_1) + del slice_0, slice_1 # pd_op.full_int_array: (2xi64) <- () - full_int_array_4 = [1, 1] - - # pd_op.assign: (2xi64) <- (2xi64) - assign_0 = full_int_array_4 - - # pd_op.assign: (2xi64) <- (2xi64) - assign_1 = full_int_array_4 + full_int_array_8 = [1, 1] # pd_op.pool2d: (2x768x1x1xf32) <- (2x768x-1x-1xf32, 2xi64) - pool2d_0 = paddle._C_ops.pool2d( - data_6, - full_int_array_4, + pool2d_3 = paddle._C_ops.pool2d( + swish_104, + full_int_array_8, [1, 1], [0, 0], False, @@ -489,174 +6451,246 @@ def forward( ) # pd_op.conv2d: (2x768x1x1xf32) <- (2x768x1x1xf32, 768x768x1x1xf32) - conv2d_0 = paddle._C_ops.conv2d( - pool2d_0, parameter_53, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + conv2d_142 = paddle._C_ops.conv2d( + pool2d_3, parameter_54, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" ) - del parameter_53 - - # pd_op.full_int_array: (4xi64) <- () - full_int_array_5 = [1, -1, 1, 1] + del parameter_54 # pd_op.reshape: (1x768x1x1xf32) <- (768xf32, 4xi64) - reshape_6 = paddle._C_ops.reshape(parameter_52, full_int_array_5) - del parameter_52 + reshape_4 = paddle._C_ops.reshape(parameter_53, full_int_array_1) + del parameter_53 # pd_op.add: (2x768x1x1xf32) <- (2x768x1x1xf32, 1x768x1x1xf32) - add_0 = paddle._C_ops.add(conv2d_0, reshape_6) + add_55 = paddle._C_ops.add(conv2d_142, reshape_4) + del conv2d_142, reshape_4 # pd_op.sigmoid: (2x768x1x1xf32) <- (2x768x1x1xf32) - sigmoid_0 = paddle._C_ops.sigmoid(add_0) - del add_0 + sigmoid_0 = paddle._C_ops.sigmoid(add_55) + del add_55 # pd_op.multiply: (2x768x-1x-1xf32) <- (2x768x-1x-1xf32, 2x768x1x1xf32) - multiply_0 = paddle._C_ops.multiply(data_6, sigmoid_0) + multiply_5 = paddle._C_ops.multiply(swish_104, sigmoid_0) + del sigmoid_0 # pd_op.conv2d: (2x768x-1x-1xf32) <- (2x768x-1x-1xf32, 768x768x1x1xf32) - conv2d_1 = paddle._C_ops.conv2d( - multiply_0, parameter_51, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + conv2d_143 = paddle._C_ops.conv2d( + multiply_5, parameter_52, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" ) - del parameter_51 + del multiply_5, parameter_52 # pd_op.batch_norm_: (2x768x-1x-1xf32, 768xf32, 768xf32, 768xf32, 768xf32, -1xui8) <- (2x768x-1x-1xf32, 768xf32, 768xf32, 768xf32, 768xf32) ( - batch_norm__0, - batch_norm__1, - batch_norm__2, - batch_norm__3, - batch_norm__4, - batch_norm__5, + batch_norm__828, + batch_norm__829, + batch_norm__830, + batch_norm__831, + batch_norm__832, + batch_norm__833, ) = (lambda x, f: f(x))( paddle._C_ops.batch_norm( - conv2d_1, + conv2d_143, + parameter_51, parameter_50, parameter_49, parameter_48, - parameter_47, - False, + True, float("0.9"), float("1e-05"), "NCHW", - False, + True, False, ), lambda out: out if isinstance(out, (list, tuple)) else (out, None, None, None, None, None), ) - del parameter_47, parameter_48, parameter_49, parameter_50 + del conv2d_143, parameter_48, parameter_49, parameter_50, parameter_51 # pd_op.swish: (2x768x-1x-1xf32) <- (2x768x-1x-1xf32) - swish_0 = paddle._C_ops.swish(batch_norm__0) + swish_105 = paddle._C_ops.swish(batch_norm__828) + del batch_norm__828 # pd_op.add: (2x768x-1x-1xf32) <- (2x768x-1x-1xf32, 2x768x-1x-1xf32) - add_1 = paddle._C_ops.add(swish_0, data_6) + add_56 = paddle._C_ops.add(swish_105, swish_104) + del swish_105 # pd_op.conv2d: (2x4x-1x-1xf32) <- (2x768x-1x-1xf32, 4x768x3x3xf32) - conv2d_2 = paddle._C_ops.conv2d( - add_1, parameter_46, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + conv2d_144 = paddle._C_ops.conv2d( + add_56, parameter_47, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" ) - del parameter_46 + del add_56, parameter_47 # pd_op.reshape: (1x4x1x1xf32) <- (4xf32, 4xi64) - reshape_7 = paddle._C_ops.reshape(parameter_45, full_int_array_5) - del parameter_45 + reshape_5 = paddle._C_ops.reshape(parameter_46, full_int_array_1) + del parameter_46 # pd_op.add: (2x4x-1x-1xf32) <- (2x4x-1x-1xf32, 1x4x1x1xf32) - add_2 = paddle._C_ops.add(conv2d_2, reshape_7) + add_57 = paddle._C_ops.add(conv2d_144, reshape_5) + del conv2d_144, reshape_5 # pd_op.conv2d: (2x768x1x1xf32) <- (2x768x1x1xf32, 768x768x1x1xf32) - conv2d_3 = paddle._C_ops.conv2d( - pool2d_0, parameter_44, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + conv2d_145 = paddle._C_ops.conv2d( + pool2d_3, parameter_45, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" ) - del parameter_44 + del parameter_45, pool2d_3 # pd_op.reshape: (1x768x1x1xf32) <- (768xf32, 4xi64) - reshape_8 = paddle._C_ops.reshape(parameter_43, full_int_array_5) - del parameter_43 + reshape_6 = paddle._C_ops.reshape(parameter_44, full_int_array_1) + del parameter_44 # pd_op.add: (2x768x1x1xf32) <- (2x768x1x1xf32, 1x768x1x1xf32) - add_3 = paddle._C_ops.add(conv2d_3, reshape_8) + add_58 = paddle._C_ops.add(conv2d_145, reshape_6) + del conv2d_145, reshape_6 # pd_op.sigmoid: (2x768x1x1xf32) <- (2x768x1x1xf32) - sigmoid_1 = paddle._C_ops.sigmoid(add_3) - del add_3 + sigmoid_1 = paddle._C_ops.sigmoid(add_58) + del add_58 # pd_op.multiply: (2x768x-1x-1xf32) <- (2x768x-1x-1xf32, 2x768x1x1xf32) - multiply_1 = paddle._C_ops.multiply(data_6, sigmoid_1) - del data_6 + multiply_6 = paddle._C_ops.multiply(swish_104, sigmoid_1) + del sigmoid_1, swish_104 # pd_op.conv2d: (2x768x-1x-1xf32) <- (2x768x-1x-1xf32, 768x768x1x1xf32) - conv2d_4 = paddle._C_ops.conv2d( - multiply_1, parameter_42, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + conv2d_146 = paddle._C_ops.conv2d( + multiply_6, parameter_43, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" ) - del parameter_42 + del multiply_6, parameter_43 # pd_op.batch_norm_: (2x768x-1x-1xf32, 768xf32, 768xf32, 768xf32, 768xf32, -1xui8) <- (2x768x-1x-1xf32, 768xf32, 768xf32, 768xf32, 768xf32) ( - batch_norm__6, - batch_norm__7, - batch_norm__8, - batch_norm__9, - batch_norm__10, - batch_norm__11, + batch_norm__834, + batch_norm__835, + batch_norm__836, + batch_norm__837, + batch_norm__838, + batch_norm__839, ) = (lambda x, f: f(x))( paddle._C_ops.batch_norm( - conv2d_4, + conv2d_146, + parameter_42, parameter_41, parameter_40, parameter_39, - parameter_38, - False, + True, float("0.9"), float("1e-05"), "NCHW", - False, + True, False, ), lambda out: out if isinstance(out, (list, tuple)) else (out, None, None, None, None, None), ) - del parameter_38, parameter_39, parameter_40, parameter_41 + del conv2d_146, parameter_39, parameter_40, parameter_41, parameter_42 # pd_op.swish: (2x768x-1x-1xf32) <- (2x768x-1x-1xf32) - swish_1 = paddle._C_ops.swish(batch_norm__6) + swish_106 = paddle._C_ops.swish(batch_norm__834) + del batch_norm__834 # pd_op.conv2d: (2x68x-1x-1xf32) <- (2x768x-1x-1xf32, 68x768x3x3xf32) - conv2d_5 = paddle._C_ops.conv2d( - swish_1, parameter_37, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + conv2d_147 = paddle._C_ops.conv2d( + swish_106, parameter_38, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" ) - del parameter_37 + del parameter_38, swish_106 # pd_op.reshape: (1x68x1x1xf32) <- (68xf32, 4xi64) - reshape_9 = paddle._C_ops.reshape(parameter_36, full_int_array_5) - del parameter_36 + reshape_7 = paddle._C_ops.reshape(parameter_37, full_int_array_1) + del parameter_37 # pd_op.add: (2x68x-1x-1xf32) <- (2x68x-1x-1xf32, 1x68x1x1xf32) - add_4 = paddle._C_ops.add(conv2d_5, reshape_9) + add_59 = paddle._C_ops.add(conv2d_147, reshape_7) + del conv2d_147, reshape_7 + + # pd_op.full: (xi64) <- () + full_1 = paddle._C_ops.full( + [], float("-1"), paddle.int64, paddle.core.CPUPlace() + ) + + # pd_op.full: (xi64) <- () + full_2 = paddle._C_ops.full( + [], float("4"), paddle.int64, paddle.core.CPUPlace() + ) + + # pd_op.full: (xi64) <- () + full_3 = paddle._C_ops.full( + [], float("17"), paddle.int64, paddle.core.CPUPlace() + ) + + # builtin.combine: ([xi64, xi64, xi64, xi64]) <- (xi64, xi64, xi64, xi64) + combine_14 = [full_1, full_2, full_3, multiply_4] + + # pd_op.stack: (4xi64) <- ([xi64, xi64, xi64, xi64]) + stack_0 = paddle._C_ops.stack(combine_14, 0) + del combine_14 + + # pd_op.reshape: (-1x4x17x-1xf32) <- (2x68x-1x-1xf32, 4xi64) + reshape_8 = paddle._C_ops.reshape(add_59, stack_0) + del add_59, stack_0 + + # pd_op.transpose: (-1x17x-1x4xf32) <- (-1x4x17x-1xf32) + transpose_0 = paddle._C_ops.transpose(reshape_8, [0, 2, 3, 1]) + del reshape_8 + + # pd_op.softmax: (-1x17x-1x4xf32) <- (-1x17x-1x4xf32) + softmax_0 = paddle._C_ops.softmax(transpose_0, 1) + del transpose_0 + + # pd_op.conv2d: (-1x1x-1x4xf32) <- (-1x17x-1x4xf32, 1x17x1x1xf32) + conv2d_148 = paddle._C_ops.conv2d( + softmax_0, parameter_36, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del softmax_0 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_9 = [1] + + # pd_op.squeeze: (-1x-1x4xf32) <- (-1x1x-1x4xf32, 1xi64) + squeeze_0 = paddle._C_ops.squeeze(conv2d_148, full_int_array_9) + del conv2d_148 # pd_op.sigmoid: (2x4x-1x-1xf32) <- (2x4x-1x-1xf32) - sigmoid_2 = paddle._C_ops.sigmoid(add_2) - del add_2 + sigmoid_2 = paddle._C_ops.sigmoid(add_57) + del add_57 + + # builtin.combine: ([xi64, xi64, xi64]) <- (xi64, xi64, xi64) + combine_15 = [full_1, full_2, multiply_4] + del multiply_4 + + # pd_op.stack: (3xi64) <- ([xi64, xi64, xi64]) + stack_1 = paddle._C_ops.stack(combine_15, 0) + del combine_15 + + # pd_op.reshape: (-1x4x-1xf32) <- (2x4x-1x-1xf32, 3xi64) + reshape_9 = paddle._C_ops.reshape(sigmoid_2, stack_1) + del sigmoid_2, stack_1 - # pd_op.flatten: (2x4x-1xf32) <- (2x4x-1x-1xf32) - flatten_0 = paddle._C_ops.flatten(sigmoid_2, 2, 3) + # pd_op.shape64: (4xi64) <- (2x384x-1x-1xf32) + shape64_2 = paddle._C_ops.shape64(swish_94) + + # pd_op.slice: (xi64) <- (4xi64, 1xi64, 1xi64) + slice_2 = paddle._C_ops.slice( + shape64_2, [0], full_int_array_5, full_int_array_6, [1], [0] + ) + del shape64_2 - # pd_op.transpose: (2x-1x4xf32) <- (2x4x-1xf32) - transpose_0 = paddle._C_ops.transpose(flatten_0, [0, 2, 1]) - del flatten_0 + # pd_op.shape64: (4xi64) <- (2x384x-1x-1xf32) + shape64_3 = paddle._C_ops.shape64(swish_94) - # pd_op.flatten: (2x68x-1xf32) <- (2x68x-1x-1xf32) - flatten_1 = paddle._C_ops.flatten(add_4, 2, 3) + # pd_op.slice: (xi64) <- (4xi64, 1xi64, 1xi64) + slice_3 = paddle._C_ops.slice( + shape64_3, [0], full_int_array_6, full_int_array_7, [1], [0] + ) + del shape64_3 - # pd_op.transpose: (2x-1x68xf32) <- (2x68x-1xf32) - transpose_1 = paddle._C_ops.transpose(flatten_1, [0, 2, 1]) - del flatten_1 + # pd_op.multiply: (xi64) <- (xi64, xi64) + multiply_7 = paddle._C_ops.multiply(slice_2, slice_3) + del slice_2, slice_3 # pd_op.pool2d: (2x384x1x1xf32) <- (2x384x-1x-1xf32, 2xi64) - pool2d_1 = paddle._C_ops.pool2d( - data_7, - full_int_array_4, + pool2d_4 = paddle._C_ops.pool2d( + swish_94, + full_int_array_8, [1, 1], [0, 0], False, @@ -669,171 +6703,228 @@ def forward( ) # pd_op.conv2d: (2x384x1x1xf32) <- (2x384x1x1xf32, 384x384x1x1xf32) - conv2d_6 = paddle._C_ops.conv2d( - pool2d_1, parameter_35, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + conv2d_149 = paddle._C_ops.conv2d( + pool2d_4, parameter_35, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" ) del parameter_35 # pd_op.reshape: (1x384x1x1xf32) <- (384xf32, 4xi64) - reshape_10 = paddle._C_ops.reshape(parameter_34, full_int_array_5) + reshape_10 = paddle._C_ops.reshape(parameter_34, full_int_array_1) del parameter_34 # pd_op.add: (2x384x1x1xf32) <- (2x384x1x1xf32, 1x384x1x1xf32) - add_5 = paddle._C_ops.add(conv2d_6, reshape_10) + add_60 = paddle._C_ops.add(conv2d_149, reshape_10) + del conv2d_149, reshape_10 # pd_op.sigmoid: (2x384x1x1xf32) <- (2x384x1x1xf32) - sigmoid_3 = paddle._C_ops.sigmoid(add_5) - del add_5 + sigmoid_3 = paddle._C_ops.sigmoid(add_60) + del add_60 # pd_op.multiply: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32, 2x384x1x1xf32) - multiply_2 = paddle._C_ops.multiply(data_7, sigmoid_3) + multiply_8 = paddle._C_ops.multiply(swish_94, sigmoid_3) + del sigmoid_3 # pd_op.conv2d: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32, 384x384x1x1xf32) - conv2d_7 = paddle._C_ops.conv2d( - multiply_2, parameter_33, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + conv2d_150 = paddle._C_ops.conv2d( + multiply_8, parameter_33, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" ) - del parameter_33 + del multiply_8, parameter_33 # pd_op.batch_norm_: (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) ( - batch_norm__12, - batch_norm__13, - batch_norm__14, - batch_norm__15, - batch_norm__16, - batch_norm__17, + batch_norm__840, + batch_norm__841, + batch_norm__842, + batch_norm__843, + batch_norm__844, + batch_norm__845, ) = (lambda x, f: f(x))( paddle._C_ops.batch_norm( - conv2d_7, + conv2d_150, parameter_32, parameter_31, parameter_30, parameter_29, - False, + True, float("0.9"), float("1e-05"), "NCHW", - False, + True, False, ), lambda out: out if isinstance(out, (list, tuple)) else (out, None, None, None, None, None), ) - del parameter_29, parameter_30, parameter_31, parameter_32 + del conv2d_150, parameter_29, parameter_30, parameter_31, parameter_32 # pd_op.swish: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32) - swish_2 = paddle._C_ops.swish(batch_norm__12) + swish_107 = paddle._C_ops.swish(batch_norm__840) + del batch_norm__840 # pd_op.add: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32, 2x384x-1x-1xf32) - add_6 = paddle._C_ops.add(swish_2, data_7) + add_61 = paddle._C_ops.add(swish_107, swish_94) + del swish_107 # pd_op.conv2d: (2x4x-1x-1xf32) <- (2x384x-1x-1xf32, 4x384x3x3xf32) - conv2d_8 = paddle._C_ops.conv2d( - add_6, parameter_28, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + conv2d_151 = paddle._C_ops.conv2d( + add_61, parameter_28, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" ) - del parameter_28 + del add_61, parameter_28 # pd_op.reshape: (1x4x1x1xf32) <- (4xf32, 4xi64) - reshape_11 = paddle._C_ops.reshape(parameter_27, full_int_array_5) + reshape_11 = paddle._C_ops.reshape(parameter_27, full_int_array_1) del parameter_27 # pd_op.add: (2x4x-1x-1xf32) <- (2x4x-1x-1xf32, 1x4x1x1xf32) - add_7 = paddle._C_ops.add(conv2d_8, reshape_11) + add_62 = paddle._C_ops.add(conv2d_151, reshape_11) + del conv2d_151, reshape_11 # pd_op.conv2d: (2x384x1x1xf32) <- (2x384x1x1xf32, 384x384x1x1xf32) - conv2d_9 = paddle._C_ops.conv2d( - pool2d_1, parameter_26, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + conv2d_152 = paddle._C_ops.conv2d( + pool2d_4, parameter_26, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" ) - del parameter_26 + del parameter_26, pool2d_4 # pd_op.reshape: (1x384x1x1xf32) <- (384xf32, 4xi64) - reshape_12 = paddle._C_ops.reshape(parameter_25, full_int_array_5) + reshape_12 = paddle._C_ops.reshape(parameter_25, full_int_array_1) del parameter_25 # pd_op.add: (2x384x1x1xf32) <- (2x384x1x1xf32, 1x384x1x1xf32) - add_8 = paddle._C_ops.add(conv2d_9, reshape_12) + add_63 = paddle._C_ops.add(conv2d_152, reshape_12) + del conv2d_152, reshape_12 # pd_op.sigmoid: (2x384x1x1xf32) <- (2x384x1x1xf32) - sigmoid_4 = paddle._C_ops.sigmoid(add_8) - del add_8 + sigmoid_4 = paddle._C_ops.sigmoid(add_63) + del add_63 # pd_op.multiply: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32, 2x384x1x1xf32) - multiply_3 = paddle._C_ops.multiply(data_7, sigmoid_4) - del data_7 + multiply_9 = paddle._C_ops.multiply(swish_94, sigmoid_4) + del sigmoid_4, swish_94 # pd_op.conv2d: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32, 384x384x1x1xf32) - conv2d_10 = paddle._C_ops.conv2d( - multiply_3, parameter_24, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + conv2d_153 = paddle._C_ops.conv2d( + multiply_9, parameter_24, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" ) - del parameter_24 + del multiply_9, parameter_24 # pd_op.batch_norm_: (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) ( - batch_norm__18, - batch_norm__19, - batch_norm__20, - batch_norm__21, - batch_norm__22, - batch_norm__23, + batch_norm__846, + batch_norm__847, + batch_norm__848, + batch_norm__849, + batch_norm__850, + batch_norm__851, ) = (lambda x, f: f(x))( paddle._C_ops.batch_norm( - conv2d_10, + conv2d_153, parameter_23, parameter_22, parameter_21, parameter_20, - False, + True, float("0.9"), float("1e-05"), "NCHW", - False, + True, False, ), lambda out: out if isinstance(out, (list, tuple)) else (out, None, None, None, None, None), ) - del parameter_20, parameter_21, parameter_22, parameter_23 + del conv2d_153, parameter_20, parameter_21, parameter_22, parameter_23 # pd_op.swish: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32) - swish_3 = paddle._C_ops.swish(batch_norm__18) + swish_108 = paddle._C_ops.swish(batch_norm__846) + del batch_norm__846 # pd_op.conv2d: (2x68x-1x-1xf32) <- (2x384x-1x-1xf32, 68x384x3x3xf32) - conv2d_11 = paddle._C_ops.conv2d( - swish_3, parameter_19, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + conv2d_154 = paddle._C_ops.conv2d( + swish_108, parameter_19, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" ) - del parameter_19 + del parameter_19, swish_108 # pd_op.reshape: (1x68x1x1xf32) <- (68xf32, 4xi64) - reshape_13 = paddle._C_ops.reshape(parameter_18, full_int_array_5) + reshape_13 = paddle._C_ops.reshape(parameter_18, full_int_array_1) del parameter_18 # pd_op.add: (2x68x-1x-1xf32) <- (2x68x-1x-1xf32, 1x68x1x1xf32) - add_9 = paddle._C_ops.add(conv2d_11, reshape_13) + add_64 = paddle._C_ops.add(conv2d_154, reshape_13) + del conv2d_154, reshape_13 + + # builtin.combine: ([xi64, xi64, xi64, xi64]) <- (xi64, xi64, xi64, xi64) + combine_16 = [full_1, full_2, full_3, multiply_7] + + # pd_op.stack: (4xi64) <- ([xi64, xi64, xi64, xi64]) + stack_2 = paddle._C_ops.stack(combine_16, 0) + del combine_16 + + # pd_op.reshape: (-1x4x17x-1xf32) <- (2x68x-1x-1xf32, 4xi64) + reshape_14 = paddle._C_ops.reshape(add_64, stack_2) + del add_64, stack_2 + + # pd_op.transpose: (-1x17x-1x4xf32) <- (-1x4x17x-1xf32) + transpose_1 = paddle._C_ops.transpose(reshape_14, [0, 2, 3, 1]) + del reshape_14 + + # pd_op.softmax: (-1x17x-1x4xf32) <- (-1x17x-1x4xf32) + softmax_1 = paddle._C_ops.softmax(transpose_1, 1) + del transpose_1 + + # pd_op.conv2d: (-1x1x-1x4xf32) <- (-1x17x-1x4xf32, 1x17x1x1xf32) + conv2d_155 = paddle._C_ops.conv2d( + softmax_1, parameter_36, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del softmax_1 + + # pd_op.squeeze: (-1x-1x4xf32) <- (-1x1x-1x4xf32, 1xi64) + squeeze_1 = paddle._C_ops.squeeze(conv2d_155, full_int_array_9) + del conv2d_155 # pd_op.sigmoid: (2x4x-1x-1xf32) <- (2x4x-1x-1xf32) - sigmoid_5 = paddle._C_ops.sigmoid(add_7) - del add_7 + sigmoid_5 = paddle._C_ops.sigmoid(add_62) + del add_62 + + # builtin.combine: ([xi64, xi64, xi64]) <- (xi64, xi64, xi64) + combine_17 = [full_1, full_2, multiply_7] + del multiply_7 - # pd_op.flatten: (2x4x-1xf32) <- (2x4x-1x-1xf32) - flatten_2 = paddle._C_ops.flatten(sigmoid_5, 2, 3) + # pd_op.stack: (3xi64) <- ([xi64, xi64, xi64]) + stack_3 = paddle._C_ops.stack(combine_17, 0) + del combine_17 + + # pd_op.reshape: (-1x4x-1xf32) <- (2x4x-1x-1xf32, 3xi64) + reshape_15 = paddle._C_ops.reshape(sigmoid_5, stack_3) + del sigmoid_5, stack_3 + + # pd_op.shape64: (4xi64) <- (2x192x-1x-1xf32) + shape64_4 = paddle._C_ops.shape64(swish_84) + + # pd_op.slice: (xi64) <- (4xi64, 1xi64, 1xi64) + slice_4 = paddle._C_ops.slice( + shape64_4, [0], full_int_array_5, full_int_array_6, [1], [0] + ) + del full_int_array_5, shape64_4 - # pd_op.transpose: (2x-1x4xf32) <- (2x4x-1xf32) - transpose_2 = paddle._C_ops.transpose(flatten_2, [0, 2, 1]) - del flatten_2 + # pd_op.shape64: (4xi64) <- (2x192x-1x-1xf32) + shape64_5 = paddle._C_ops.shape64(swish_84) - # pd_op.flatten: (2x68x-1xf32) <- (2x68x-1x-1xf32) - flatten_3 = paddle._C_ops.flatten(add_9, 2, 3) + # pd_op.slice: (xi64) <- (4xi64, 1xi64, 1xi64) + slice_5 = paddle._C_ops.slice( + shape64_5, [0], full_int_array_6, full_int_array_7, [1], [0] + ) + del full_int_array_6, full_int_array_7, shape64_5 - # pd_op.transpose: (2x-1x68xf32) <- (2x68x-1xf32) - transpose_3 = paddle._C_ops.transpose(flatten_3, [0, 2, 1]) - del flatten_3 + # pd_op.multiply: (xi64) <- (xi64, xi64) + multiply_10 = paddle._C_ops.multiply(slice_4, slice_5) + del slice_4, slice_5 # pd_op.pool2d: (2x192x1x1xf32) <- (2x192x-1x-1xf32, 2xi64) - pool2d_2 = paddle._C_ops.pool2d( - data_8, - full_int_array_4, + pool2d_5 = paddle._C_ops.pool2d( + swish_84, + full_int_array_8, [1, 1], [0, 0], False, @@ -844,301 +6935,225 @@ def forward( True, "EXPLICIT", ) + del full_int_array_8 # pd_op.conv2d: (2x192x1x1xf32) <- (2x192x1x1xf32, 192x192x1x1xf32) - conv2d_12 = paddle._C_ops.conv2d( - pool2d_2, parameter_17, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + conv2d_156 = paddle._C_ops.conv2d( + pool2d_5, parameter_17, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" ) del parameter_17 # pd_op.reshape: (1x192x1x1xf32) <- (192xf32, 4xi64) - reshape_14 = paddle._C_ops.reshape(parameter_16, full_int_array_5) + reshape_16 = paddle._C_ops.reshape(parameter_16, full_int_array_1) del parameter_16 # pd_op.add: (2x192x1x1xf32) <- (2x192x1x1xf32, 1x192x1x1xf32) - add_10 = paddle._C_ops.add(conv2d_12, reshape_14) + add_65 = paddle._C_ops.add(conv2d_156, reshape_16) + del conv2d_156, reshape_16 # pd_op.sigmoid: (2x192x1x1xf32) <- (2x192x1x1xf32) - sigmoid_6 = paddle._C_ops.sigmoid(add_10) - del add_10 + sigmoid_6 = paddle._C_ops.sigmoid(add_65) + del add_65 # pd_op.multiply: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 2x192x1x1xf32) - multiply_4 = paddle._C_ops.multiply(data_8, sigmoid_6) + multiply_11 = paddle._C_ops.multiply(swish_84, sigmoid_6) + del sigmoid_6 # pd_op.conv2d: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 192x192x1x1xf32) - conv2d_13 = paddle._C_ops.conv2d( - multiply_4, parameter_15, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + conv2d_157 = paddle._C_ops.conv2d( + multiply_11, parameter_15, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" ) - del parameter_15 + del multiply_11, parameter_15 # pd_op.batch_norm_: (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) ( - batch_norm__24, - batch_norm__25, - batch_norm__26, - batch_norm__27, - batch_norm__28, - batch_norm__29, + batch_norm__852, + batch_norm__853, + batch_norm__854, + batch_norm__855, + batch_norm__856, + batch_norm__857, ) = (lambda x, f: f(x))( paddle._C_ops.batch_norm( - conv2d_13, + conv2d_157, parameter_14, parameter_13, parameter_12, parameter_11, - False, + True, float("0.9"), float("1e-05"), "NCHW", - False, + True, False, ), lambda out: out if isinstance(out, (list, tuple)) else (out, None, None, None, None, None), ) - del parameter_11, parameter_12, parameter_13, parameter_14 + del conv2d_157, parameter_11, parameter_12, parameter_13, parameter_14 # pd_op.swish: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32) - swish_4 = paddle._C_ops.swish(batch_norm__24) + swish_109 = paddle._C_ops.swish(batch_norm__852) + del batch_norm__852 # pd_op.add: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 2x192x-1x-1xf32) - add_11 = paddle._C_ops.add(swish_4, data_8) + add_66 = paddle._C_ops.add(swish_109, swish_84) + del swish_109 # pd_op.conv2d: (2x4x-1x-1xf32) <- (2x192x-1x-1xf32, 4x192x3x3xf32) - conv2d_14 = paddle._C_ops.conv2d( - add_11, parameter_10, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + conv2d_158 = paddle._C_ops.conv2d( + add_66, parameter_10, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" ) - del parameter_10 + del add_66, parameter_10 # pd_op.reshape: (1x4x1x1xf32) <- (4xf32, 4xi64) - reshape_15 = paddle._C_ops.reshape(parameter_9, full_int_array_5) + reshape_17 = paddle._C_ops.reshape(parameter_9, full_int_array_1) del parameter_9 # pd_op.add: (2x4x-1x-1xf32) <- (2x4x-1x-1xf32, 1x4x1x1xf32) - add_12 = paddle._C_ops.add(conv2d_14, reshape_15) + add_67 = paddle._C_ops.add(conv2d_158, reshape_17) + del conv2d_158, reshape_17 # pd_op.conv2d: (2x192x1x1xf32) <- (2x192x1x1xf32, 192x192x1x1xf32) - conv2d_15 = paddle._C_ops.conv2d( - pool2d_2, parameter_8, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + conv2d_159 = paddle._C_ops.conv2d( + pool2d_5, parameter_8, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" ) - del parameter_8 + del parameter_8, pool2d_5 # pd_op.reshape: (1x192x1x1xf32) <- (192xf32, 4xi64) - reshape_16 = paddle._C_ops.reshape(parameter_7, full_int_array_5) + reshape_18 = paddle._C_ops.reshape(parameter_7, full_int_array_1) del parameter_7 # pd_op.add: (2x192x1x1xf32) <- (2x192x1x1xf32, 1x192x1x1xf32) - add_13 = paddle._C_ops.add(conv2d_15, reshape_16) + add_68 = paddle._C_ops.add(conv2d_159, reshape_18) + del conv2d_159, reshape_18 # pd_op.sigmoid: (2x192x1x1xf32) <- (2x192x1x1xf32) - sigmoid_7 = paddle._C_ops.sigmoid(add_13) - del add_13 + sigmoid_7 = paddle._C_ops.sigmoid(add_68) + del add_68 # pd_op.multiply: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 2x192x1x1xf32) - multiply_5 = paddle._C_ops.multiply(data_8, sigmoid_7) - del data_8 + multiply_12 = paddle._C_ops.multiply(swish_84, sigmoid_7) + del sigmoid_7, swish_84 # pd_op.conv2d: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 192x192x1x1xf32) - conv2d_16 = paddle._C_ops.conv2d( - multiply_5, parameter_6, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + conv2d_160 = paddle._C_ops.conv2d( + multiply_12, parameter_6, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" ) - del parameter_6 + del multiply_12, parameter_6 # pd_op.batch_norm_: (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) ( - batch_norm__30, - batch_norm__31, - batch_norm__32, - batch_norm__33, - batch_norm__34, - batch_norm__35, + batch_norm__858, + batch_norm__859, + batch_norm__860, + batch_norm__861, + batch_norm__862, + batch_norm__863, ) = (lambda x, f: f(x))( paddle._C_ops.batch_norm( - conv2d_16, + conv2d_160, parameter_5, parameter_4, parameter_3, parameter_2, - False, + True, float("0.9"), float("1e-05"), "NCHW", - False, + True, False, ), lambda out: out if isinstance(out, (list, tuple)) else (out, None, None, None, None, None), ) - del parameter_2, parameter_3, parameter_4, parameter_5 + del conv2d_160, parameter_2, parameter_3, parameter_4, parameter_5 # pd_op.swish: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32) - swish_5 = paddle._C_ops.swish(batch_norm__30) + swish_110 = paddle._C_ops.swish(batch_norm__858) + del batch_norm__858 # pd_op.conv2d: (2x68x-1x-1xf32) <- (2x192x-1x-1xf32, 68x192x3x3xf32) - conv2d_17 = paddle._C_ops.conv2d( - swish_5, parameter_1, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + conv2d_161 = paddle._C_ops.conv2d( + swish_110, parameter_1, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" ) - del parameter_1 + del parameter_1, swish_110 # pd_op.reshape: (1x68x1x1xf32) <- (68xf32, 4xi64) - reshape_17 = paddle._C_ops.reshape(parameter_0, full_int_array_5) - del full_int_array_5, parameter_0 + reshape_19 = paddle._C_ops.reshape(parameter_0, full_int_array_1) + del full_int_array_1, parameter_0 # pd_op.add: (2x68x-1x-1xf32) <- (2x68x-1x-1xf32, 1x68x1x1xf32) - add_14 = paddle._C_ops.add(conv2d_17, reshape_17) + add_69 = paddle._C_ops.add(conv2d_161, reshape_19) + del conv2d_161, reshape_19 - # pd_op.sigmoid: (2x4x-1x-1xf32) <- (2x4x-1x-1xf32) - sigmoid_8 = paddle._C_ops.sigmoid(add_12) - del add_12 + # builtin.combine: ([xi64, xi64, xi64, xi64]) <- (xi64, xi64, xi64, xi64) + combine_18 = [full_1, full_2, full_3, multiply_10] + del full_3 + + # pd_op.stack: (4xi64) <- ([xi64, xi64, xi64, xi64]) + stack_4 = paddle._C_ops.stack(combine_18, 0) + del combine_18 - # pd_op.flatten: (2x4x-1xf32) <- (2x4x-1x-1xf32) - flatten_4 = paddle._C_ops.flatten(sigmoid_8, 2, 3) + # pd_op.reshape: (-1x4x17x-1xf32) <- (2x68x-1x-1xf32, 4xi64) + reshape_20 = paddle._C_ops.reshape(add_69, stack_4) + del add_69, stack_4 - # pd_op.transpose: (2x-1x4xf32) <- (2x4x-1xf32) - transpose_4 = paddle._C_ops.transpose(flatten_4, [0, 2, 1]) - del flatten_4 + # pd_op.transpose: (-1x17x-1x4xf32) <- (-1x4x17x-1xf32) + transpose_2 = paddle._C_ops.transpose(reshape_20, [0, 2, 3, 1]) + del reshape_20 - # pd_op.flatten: (2x68x-1xf32) <- (2x68x-1x-1xf32) - flatten_5 = paddle._C_ops.flatten(add_14, 2, 3) + # pd_op.softmax: (-1x17x-1x4xf32) <- (-1x17x-1x4xf32) + softmax_2 = paddle._C_ops.softmax(transpose_2, 1) + del transpose_2 - # pd_op.transpose: (2x-1x68xf32) <- (2x68x-1xf32) - transpose_5 = paddle._C_ops.transpose(flatten_5, [0, 2, 1]) - del flatten_5 + # pd_op.conv2d: (-1x1x-1x4xf32) <- (-1x17x-1x4xf32, 1x17x1x1xf32) + conv2d_162 = paddle._C_ops.conv2d( + softmax_2, parameter_36, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_36, softmax_2 + + # pd_op.squeeze: (-1x-1x4xf32) <- (-1x1x-1x4xf32, 1xi64) + squeeze_2 = paddle._C_ops.squeeze(conv2d_162, full_int_array_9) + del conv2d_162, full_int_array_9 + + # pd_op.sigmoid: (2x4x-1x-1xf32) <- (2x4x-1x-1xf32) + sigmoid_8 = paddle._C_ops.sigmoid(add_67) + del add_67 + + # builtin.combine: ([xi64, xi64, xi64]) <- (xi64, xi64, xi64) + combine_19 = [full_1, full_2, multiply_10] + del full_1, full_2, multiply_10 + + # pd_op.stack: (3xi64) <- ([xi64, xi64, xi64]) + stack_5 = paddle._C_ops.stack(combine_19, 0) + del combine_19 + + # pd_op.reshape: (-1x4x-1xf32) <- (2x4x-1x-1xf32, 3xi64) + reshape_21 = paddle._C_ops.reshape(sigmoid_8, stack_5) + del sigmoid_8, stack_5 # pd_op.full: (1xi32) <- () - full_8 = paddle._C_ops.full( - [1], float("1"), paddle.int32, paddle.core.CPUPlace() + full_4 = paddle._C_ops.full( + [1], float("-1"), paddle.int32, paddle.core.CPUPlace() ) - # pd_op.assign: (1xi32) <- (1xi32) - assign_2 = full_8 + # builtin.combine: ([-1x4x-1xf32, -1x4x-1xf32, -1x4x-1xf32]) <- (-1x4x-1xf32, -1x4x-1xf32, -1x4x-1xf32) + combine_20 = [reshape_9, reshape_15, reshape_21] + del reshape_15, reshape_21, reshape_9 - # builtin.combine: ([2x-1x4xf32, 2x-1x4xf32, 2x-1x4xf32]) <- (2x-1x4xf32, 2x-1x4xf32, 2x-1x4xf32) - combine_15 = [transpose_0, transpose_2, transpose_4] + # pd_op.concat: (-1x4x-1xf32) <- ([-1x4x-1xf32, -1x4x-1xf32, -1x4x-1xf32], 1xi32) + concat_0 = paddle._C_ops.concat(combine_20, full_4) + del combine_20, full_4 - # pd_op.concat: (2x-1x4xf32) <- ([2x-1x4xf32, 2x-1x4xf32, 2x-1x4xf32], 1xi32) - concat_0 = paddle._C_ops.concat(combine_15, full_8) - del combine_15 + # builtin.combine: ([-1x-1x4xf32, -1x-1x4xf32, -1x-1x4xf32]) <- (-1x-1x4xf32, -1x-1x4xf32, -1x-1x4xf32) + combine_21 = [squeeze_0, squeeze_1, squeeze_2] + del squeeze_0, squeeze_1, squeeze_2 - # builtin.combine: ([2x-1x68xf32, 2x-1x68xf32, 2x-1x68xf32]) <- (2x-1x68xf32, 2x-1x68xf32, 2x-1x68xf32) - combine_16 = [transpose_1, transpose_3, transpose_5] - - # pd_op.concat: (2x-1x68xf32) <- ([2x-1x68xf32, 2x-1x68xf32, 2x-1x68xf32], 1xi32) - concat_1 = paddle._C_ops.concat(combine_16, full_8) - del ( - add_1, - add_11, - add_14, - add_4, - add_6, - add_9, - assign_0, - assign_1, - assign_2, - batch_norm__0, - batch_norm__1, - batch_norm__10, - batch_norm__11, - batch_norm__12, - batch_norm__13, - batch_norm__14, - batch_norm__15, - batch_norm__16, - batch_norm__17, - batch_norm__18, - batch_norm__19, - batch_norm__2, - batch_norm__20, - batch_norm__21, - batch_norm__22, - batch_norm__23, - batch_norm__24, - batch_norm__25, - batch_norm__26, - batch_norm__27, - batch_norm__28, - batch_norm__29, - batch_norm__3, - batch_norm__30, - batch_norm__31, - batch_norm__32, - batch_norm__33, - batch_norm__34, - batch_norm__35, - batch_norm__4, - batch_norm__5, - batch_norm__6, - batch_norm__7, - batch_norm__8, - batch_norm__9, - combine_16, - conv2d_0, - conv2d_1, - conv2d_10, - conv2d_11, - conv2d_12, - conv2d_13, - conv2d_14, - conv2d_15, - conv2d_16, - conv2d_17, - conv2d_2, - conv2d_3, - conv2d_4, - conv2d_5, - conv2d_6, - conv2d_7, - conv2d_8, - conv2d_9, - full_8, - full_int_array_4, - multiply_0, - multiply_1, - multiply_2, - multiply_3, - multiply_4, - multiply_5, - pool2d_0, - pool2d_1, - pool2d_2, - reshape_10, - reshape_11, - reshape_12, - reshape_13, - reshape_14, - reshape_15, - reshape_16, - reshape_17, - reshape_6, - reshape_7, - reshape_8, - reshape_9, - sigmoid_0, - sigmoid_1, - sigmoid_2, - sigmoid_3, - sigmoid_4, - sigmoid_5, - sigmoid_6, - sigmoid_7, - sigmoid_8, - slice_0, - slice_1, - slice_2, - swish_0, - swish_1, - swish_2, - swish_3, - swish_4, - swish_5, - transpose_0, - transpose_1, - transpose_2, - transpose_3, - transpose_4, - transpose_5, - ) - - return concat_0, concat_1, concat_2, concat_3, concat_4 + # pd_op.concat: (-1x-1x4xf32) <- ([-1x-1x4xf32, -1x-1x4xf32, -1x-1x4xf32], 1xi32) + concat_1 = paddle._C_ops.concat(combine_21, full_0) + del combine_21, full_0 + + return concat_0, concat_1 diff --git a/paddle_samples/PaddleX/PP-YOLOE-L_vehicle/subgraph_0/weight_meta.py b/paddle_samples/PaddleX/PP-YOLOE-L_vehicle/subgraph_0/weight_meta.py index c7e0aa45d..bf4cf55ae 100644 --- a/paddle_samples/PaddleX/PP-YOLOE-L_vehicle/subgraph_0/weight_meta.py +++ b/paddle_samples/PaddleX/PP-YOLOE-L_vehicle/subgraph_0/weight_meta.py @@ -2,10 +2,10 @@ class Program_weight_tensor_parameter_0: name = "parameter_0" shape = [68] dtype = "float32" - min_val = float("-0.00925576") - max_val = float("0.0373948") - mean = float("8.72824e-08") - std = float("0.00862349") + min_val = float("-0.00892735") + max_val = float("0.0359164") + mean = float("8.74861e-08") + std = float("0.00810027") data = None @@ -13,10 +13,10 @@ class Program_weight_tensor_parameter_1: name = "parameter_1" shape = [68, 192, 3, 3] dtype = "float32" - min_val = float("-0.143827") - max_val = float("0.168304") + min_val = float("-0.14403") + max_val = float("0.16853") mean = float("5.68543e-08") - std = float("0.00711119") + std = float("0.00707932") data = None @@ -24,10 +24,10 @@ class Program_weight_tensor_parameter_2: name = "parameter_2" shape = [192] dtype = "float32" - min_val = float("-0.0740156") - max_val = float("0.254426") - mean = float("0.0633438") - std = float("0.0574767") + min_val = float("-0.0740081") + max_val = float("0.252888") + mean = float("0.0629457") + std = float("0.0572415") data = None @@ -35,10 +35,10 @@ class Program_weight_tensor_parameter_3: name = "parameter_3" shape = [192] dtype = "float32" - min_val = float("0.838238") - max_val = float("1.78552") - mean = float("1.2918") - std = float("0.191195") + min_val = float("0.838381") + max_val = float("1.78564") + mean = float("1.29191") + std = float("0.191219") data = None @@ -46,10 +46,10 @@ class Program_weight_tensor_parameter_4: name = "parameter_4" shape = [192] dtype = "float32" - min_val = float("0.000822901") - max_val = float("0.063123") - mean = float("0.0112077") - std = float("0.00842708") + min_val = float("0.000988996") + max_val = float("0.0390925") + mean = float("0.00961489") + std = float("0.00656614") data = None @@ -57,10 +57,10 @@ class Program_weight_tensor_parameter_5: name = "parameter_5" shape = [192] dtype = "float32" - min_val = float("-0.115789") - max_val = float("0.122016") - mean = float("-0.00147854") - std = float("0.0384486") + min_val = float("-0.11659") + max_val = float("0.132114") + mean = float("0.00156193") + std = float("0.0387838") data = None @@ -68,10 +68,10 @@ class Program_weight_tensor_parameter_6: name = "parameter_6" shape = [192, 192, 1, 1] dtype = "float32" - min_val = float("-0.0602355") - max_val = float("0.0853349") - mean = float("-0.000554639") - std = float("0.00702224") + min_val = float("-0.060318") + max_val = float("0.0853128") + mean = float("-0.000471342") + std = float("0.00698795") data = None @@ -79,10 +79,10 @@ class Program_weight_tensor_parameter_7: name = "parameter_7" shape = [192] dtype = "float32" - min_val = float("-0.00462773") - max_val = float("0.0060156") - mean = float("4.24223e-05") - std = float("0.0019278") + min_val = float("-0.00462196") + max_val = float("0.00603283") + mean = float("4.25491e-05") + std = float("0.00192614") data = None @@ -90,10 +90,10 @@ class Program_weight_tensor_parameter_8: name = "parameter_8" shape = [192, 192, 1, 1] dtype = "float32" - min_val = float("-0.0100771") - max_val = float("0.01135") - mean = float("-7.39524e-06") - std = float("0.00128718") + min_val = float("-0.0100888") + max_val = float("0.0113319") + mean = float("-9.49891e-06") + std = float("0.00128433") data = None @@ -110,10 +110,10 @@ class Program_weight_tensor_parameter_10: name = "parameter_10" shape = [4, 192, 3, 3] dtype = "float32" - min_val = float("-0.344983") - max_val = float("0.0528926") - mean = float("-0.0184469") - std = float("0.0468232") + min_val = float("-0.336461") + max_val = float("0.0527773") + mean = float("-0.0165997") + std = float("0.0458695") data = None @@ -121,10 +121,10 @@ class Program_weight_tensor_parameter_11: name = "parameter_11" shape = [192] dtype = "float32" - min_val = float("-0.445875") - max_val = float("1.50051") - mean = float("0.404315") - std = float("0.335424") + min_val = float("-0.446317") + max_val = float("1.5052") + mean = float("0.404532") + std = float("0.335901") data = None @@ -132,10 +132,10 @@ class Program_weight_tensor_parameter_12: name = "parameter_12" shape = [192] dtype = "float32" - min_val = float("0.959458") - max_val = float("2.22908") - mean = float("1.3733") - std = float("0.176915") + min_val = float("0.959521") + max_val = float("2.22919") + mean = float("1.3736") + std = float("0.176912") data = None @@ -143,10 +143,10 @@ class Program_weight_tensor_parameter_13: name = "parameter_13" shape = [192] dtype = "float32" - min_val = float("0.00434037") - max_val = float("26.8554") - mean = float("0.524987") - std = float("2.35391") + min_val = float("0.00279508") + max_val = float("23.2612") + mean = float("0.479151") + std = float("2.07157") data = None @@ -154,10 +154,10 @@ class Program_weight_tensor_parameter_14: name = "parameter_14" shape = [192] dtype = "float32" - min_val = float("-0.548344") - max_val = float("2.67378") - mean = float("0.0899683") - std = float("0.32715") + min_val = float("-0.559818") + max_val = float("2.59203") + mean = float("0.0883731") + std = float("0.322225") data = None @@ -165,10 +165,10 @@ class Program_weight_tensor_parameter_15: name = "parameter_15" shape = [192, 192, 1, 1] dtype = "float32" - min_val = float("-0.725619") - max_val = float("0.454136") - mean = float("0.00160697") - std = float("0.0255239") + min_val = float("-0.70809") + max_val = float("0.443594") + mean = float("0.00156292") + std = float("0.0251271") data = None @@ -176,10 +176,10 @@ class Program_weight_tensor_parameter_16: name = "parameter_16" shape = [192] dtype = "float32" - min_val = float("-0.0117797") - max_val = float("0.0160663") - mean = float("-8.90954e-05") - std = float("0.00280426") + min_val = float("-0.0114725") + max_val = float("0.0157792") + mean = float("-8.72761e-05") + std = float("0.00276262") data = None @@ -187,10 +187,10 @@ class Program_weight_tensor_parameter_17: name = "parameter_17" shape = [192, 192, 1, 1] dtype = "float32" - min_val = float("-0.0350776") - max_val = float("0.0312972") - mean = float("2.94677e-05") - std = float("0.00183097") + min_val = float("-0.0348732") + max_val = float("0.0307751") + mean = float("2.79732e-05") + std = float("0.00181061") data = None @@ -198,10 +198,10 @@ class Program_weight_tensor_parameter_18: name = "parameter_18" shape = [68] dtype = "float32" - min_val = float("-0.00469375") - max_val = float("0.0194489") - mean = float("6.58692e-08") - std = float("0.00452283") + min_val = float("-0.00445757") + max_val = float("0.0190964") + mean = float("6.58329e-08") + std = float("0.00445574") data = None @@ -209,10 +209,10 @@ class Program_weight_tensor_parameter_19: name = "parameter_19" shape = [68, 384, 3, 3] dtype = "float32" - min_val = float("-0.082678") - max_val = float("0.109438") - mean = float("3.68454e-08") - std = float("0.00410717") + min_val = float("-0.0827377") + max_val = float("0.109436") + mean = float("3.68891e-08") + std = float("0.00410645") data = None @@ -220,10 +220,10 @@ class Program_weight_tensor_parameter_20: name = "parameter_20" shape = [384] dtype = "float32" - min_val = float("-0.0150875") + min_val = float("-0.0151169") max_val = float("0.107487") - mean = float("0.0329244") - std = float("0.0178576") + mean = float("0.032917") + std = float("0.0178601") data = None @@ -231,10 +231,10 @@ class Program_weight_tensor_parameter_21: name = "parameter_21" shape = [384] dtype = "float32" - min_val = float("1.00854") - max_val = float("1.29001") - mean = float("1.14543") - std = float("0.0510595") + min_val = float("1.00868") + max_val = float("1.29019") + mean = float("1.1456") + std = float("0.0510674") data = None @@ -242,10 +242,10 @@ class Program_weight_tensor_parameter_22: name = "parameter_22" shape = [384] dtype = "float32" - min_val = float("0.000224402") - max_val = float("0.554167") - mean = float("0.0237249") - std = float("0.0506673") + min_val = float("0.000383663") + max_val = float("0.550054") + mean = float("0.0230272") + std = float("0.0486734") data = None @@ -253,10 +253,10 @@ class Program_weight_tensor_parameter_23: name = "parameter_23" shape = [384] dtype = "float32" - min_val = float("-0.10498") - max_val = float("0.125234") - mean = float("-0.0096529") - std = float("0.0315707") + min_val = float("-0.106161") + max_val = float("0.123469") + mean = float("-0.010298") + std = float("0.0313141") data = None @@ -264,10 +264,10 @@ class Program_weight_tensor_parameter_24: name = "parameter_24" shape = [384, 384, 1, 1] dtype = "float32" - min_val = float("-0.046203") - max_val = float("0.0533854") - mean = float("-0.00024935") - std = float("0.00299432") + min_val = float("-0.0462353") + max_val = float("0.0532962") + mean = float("-0.000249771") + std = float("0.00297506") data = None @@ -275,10 +275,10 @@ class Program_weight_tensor_parameter_25: name = "parameter_25" shape = [384] dtype = "float32" - min_val = float("-0.00255311") - max_val = float("0.00302266") - mean = float("9.24001e-05") - std = float("0.00102546") + min_val = float("-0.00255366") + max_val = float("0.00300366") + mean = float("9.24283e-05") + std = float("0.00102595") data = None @@ -286,10 +286,10 @@ class Program_weight_tensor_parameter_26: name = "parameter_26" shape = [384, 384, 1, 1] dtype = "float32" - min_val = float("-0.00218781") - max_val = float("0.00368412") - mean = float("2.14576e-05") - std = float("0.000454436") + min_val = float("-0.00218811") + max_val = float("0.00368407") + mean = float("2.14446e-05") + std = float("0.000454602") data = None @@ -306,10 +306,10 @@ class Program_weight_tensor_parameter_28: name = "parameter_28" shape = [4, 384, 3, 3] dtype = "float32" - min_val = float("-0.408866") - max_val = float("0.0192085") - mean = float("-0.0556575") - std = float("0.0692171") + min_val = float("-0.404012") + max_val = float("0.018674") + mean = float("-0.0535936") + std = float("0.0678225") data = None @@ -317,10 +317,10 @@ class Program_weight_tensor_parameter_29: name = "parameter_29" shape = [384] dtype = "float32" - min_val = float("-0.232909") - max_val = float("0.549512") - mean = float("0.280237") - std = float("0.126665") + min_val = float("-0.23292") + max_val = float("0.549526") + mean = float("0.280285") + std = float("0.126685") data = None @@ -328,10 +328,10 @@ class Program_weight_tensor_parameter_30: name = "parameter_30" shape = [384] dtype = "float32" - min_val = float("0.99194") - max_val = float("1.51118") - mean = float("1.23481") - std = float("0.0724215") + min_val = float("0.992106") + max_val = float("1.5115") + mean = float("1.23501") + std = float("0.0724327") data = None @@ -339,10 +339,10 @@ class Program_weight_tensor_parameter_31: name = "parameter_31" shape = [384] dtype = "float32" - min_val = float("0.00162732") - max_val = float("157.489") - mean = float("2.37771") - std = float("10.0765") + min_val = float("0.00193308") + max_val = float("158.649") + mean = float("2.35102") + std = float("10.1328") data = None @@ -350,10 +350,10 @@ class Program_weight_tensor_parameter_32: name = "parameter_32" shape = [384] dtype = "float32" - min_val = float("-3.4158") - max_val = float("2.26802") - mean = float("0.0219363") - std = float("0.412554") + min_val = float("-3.49971") + max_val = float("2.32388") + mean = float("0.0203273") + std = float("0.419635") data = None @@ -361,10 +361,10 @@ class Program_weight_tensor_parameter_33: name = "parameter_33" shape = [384, 384, 1, 1] dtype = "float32" - min_val = float("-0.303292") - max_val = float("0.19556") - mean = float("0.000605287") - std = float("0.0144313") + min_val = float("-0.299716") + max_val = float("0.19324") + mean = float("0.000565759") + std = float("0.0141589") data = None @@ -372,10 +372,10 @@ class Program_weight_tensor_parameter_34: name = "parameter_34" shape = [384] dtype = "float32" - min_val = float("-0.00209029") - max_val = float("0.00798783") - mean = float("1.7015e-06") - std = float("0.000935296") + min_val = float("-0.00201965") + max_val = float("0.00793739") + mean = float("1.85378e-06") + std = float("0.000924825") data = None @@ -383,43 +383,41 @@ class Program_weight_tensor_parameter_35: name = "parameter_35" shape = [384, 384, 1, 1] dtype = "float32" - min_val = float("-0.00535335") - max_val = float("0.0106302") - mean = float("3.37412e-06") - std = float("0.000515341") + min_val = float("-0.0053256") + max_val = float("0.0106358") + mean = float("3.65302e-06") + std = float("0.000513084") data = None class Program_weight_tensor_parameter_36: name = "parameter_36" - shape = [68] + shape = [1, 17, 1, 1] dtype = "float32" - min_val = float("-0.00525521") - max_val = float("0.0145446") - mean = float("1.87138e-08") - std = float("0.00475") + min_val = float("0") + max_val = float("0.5") data = None class Program_weight_tensor_parameter_37: name = "parameter_37" - shape = [68, 768, 3, 3] + shape = [68] dtype = "float32" - min_val = float("-0.0328719") - max_val = float("0.0658116") - mean = float("1.06011e-08") - std = float("0.00223125") + min_val = float("-0.00483921") + max_val = float("0.0132204") + mean = float("1.85537e-08") + std = float("0.00434121") data = None class Program_weight_tensor_parameter_38: name = "parameter_38" - shape = [768] + shape = [68, 768, 3, 3] dtype = "float32" - min_val = float("-0.0159444") - max_val = float("0.0716664") - mean = float("0.0156827") - std = float("0.0141851") + min_val = float("-0.033019") + max_val = float("0.0657063") + mean = float("1.06083e-08") + std = float("0.00221797") data = None @@ -427,10 +425,10 @@ class Program_weight_tensor_parameter_39: name = "parameter_39" shape = [768] dtype = "float32" - min_val = float("1.02746") - max_val = float("1.22316") - mean = float("1.09258") - std = float("0.0262997") + min_val = float("-0.0161648") + max_val = float("0.0716208") + mean = float("0.0156062") + std = float("0.014137") data = None @@ -438,10 +436,10 @@ class Program_weight_tensor_parameter_40: name = "parameter_40" shape = [768] dtype = "float32" - min_val = float("9.27059e-05") - max_val = float("0.0491799") - mean = float("0.00131356") - std = float("0.00220594") + min_val = float("1.02758") + max_val = float("1.22328") + mean = float("1.09273") + std = float("0.0263012") data = None @@ -449,74 +447,74 @@ class Program_weight_tensor_parameter_41: name = "parameter_41" shape = [768] dtype = "float32" - min_val = float("-0.118335") - max_val = float("0.0502021") - mean = float("-0.00579341") - std = float("0.0133184") + min_val = float("9.07716e-05") + max_val = float("0.044646") + mean = float("0.00124304") + std = float("0.0020634") data = None class Program_weight_tensor_parameter_42: name = "parameter_42" - shape = [768, 768, 1, 1] + shape = [768] dtype = "float32" - min_val = float("-0.0245855") - max_val = float("0.0259771") - mean = float("-9.1685e-05") - std = float("0.00109901") + min_val = float("-0.110096") + max_val = float("0.0490828") + mean = float("-0.00556926") + std = float("0.0125766") data = None class Program_weight_tensor_parameter_43: name = "parameter_43" - shape = [768] + shape = [768, 768, 1, 1] dtype = "float32" - min_val = float("-0.00228367") - max_val = float("0.0017984") - mean = float("9.02642e-05") - std = float("0.000464238") + min_val = float("-0.0245815") + max_val = float("0.0259975") + mean = float("-8.56304e-05") + std = float("0.00108959") data = None class Program_weight_tensor_parameter_44: name = "parameter_44" - shape = [768, 768, 1, 1] + shape = [768] dtype = "float32" - min_val = float("-0.00190114") - max_val = float("0.00166296") - mean = float("2.82487e-05") - std = float("0.000154769") + min_val = float("-0.00228552") + max_val = float("0.00179437") + mean = float("9.01848e-05") + std = float("0.000464165") data = None class Program_weight_tensor_parameter_45: name = "parameter_45" - shape = [4] + shape = [768, 768, 1, 1] dtype = "float32" - min_val = float("0") - max_val = float("0.5") + min_val = float("-0.00190183") + max_val = float("0.00166425") + mean = float("2.80024e-05") + std = float("0.000154694") data = None class Program_weight_tensor_parameter_46: name = "parameter_46" - shape = [4, 768, 3, 3] + shape = [4] dtype = "float32" - min_val = float("-0.398112") - max_val = float("0.0341335") - mean = float("-0.0183825") - std = float("0.0458581") + min_val = float("0") + max_val = float("0.5") data = None class Program_weight_tensor_parameter_47: name = "parameter_47" - shape = [768] + shape = [4, 768, 3, 3] dtype = "float32" - min_val = float("-0.149591") - max_val = float("0.25563") - mean = float("0.127271") - std = float("0.0546892") + min_val = float("-0.360602") + max_val = float("0.0321948") + mean = float("-0.017021") + std = float("0.0416566") data = None @@ -524,10 +522,10 @@ class Program_weight_tensor_parameter_48: name = "parameter_48" shape = [768] dtype = "float32" - min_val = float("1.01586") - max_val = float("1.35031") - mean = float("1.10996") - std = float("0.0353314") + min_val = float("-0.149733") + max_val = float("0.255597") + mean = float("0.127259") + std = float("0.0547028") data = None @@ -535,10 +533,10 @@ class Program_weight_tensor_parameter_49: name = "parameter_49" shape = [768] dtype = "float32" - min_val = float("7.61599e-05") - max_val = float("7.24991") - mean = float("0.129052") - std = float("0.546235") + min_val = float("1.01591") + max_val = float("1.35046") + mean = float("1.11013") + std = float("0.0353362") data = None @@ -546,41 +544,7618 @@ class Program_weight_tensor_parameter_50: name = "parameter_50" shape = [768] dtype = "float32" - min_val = float("-0.787154") - max_val = float("0.440341") - mean = float("-0.0324434") - std = float("0.112186") + min_val = float("0.000190586") + max_val = float("5.78833") + mean = float("0.106439") + std = float("0.43971") data = None class Program_weight_tensor_parameter_51: name = "parameter_51" - shape = [768, 768, 1, 1] + shape = [768] dtype = "float32" - min_val = float("-0.0434972") - max_val = float("0.028497") - mean = float("-0.00050544") - std = float("0.0033397") + min_val = float("-0.732685") + max_val = float("0.378772") + mean = float("-0.0300328") + std = float("0.0991444") data = None class Program_weight_tensor_parameter_52: name = "parameter_52" - shape = [768] + shape = [768, 768, 1, 1] dtype = "float32" - min_val = float("-0.0036321") - max_val = float("0.00249458") - mean = float("1.83682e-05") - std = float("0.000350058") + min_val = float("-0.0393304") + max_val = float("0.0266957") + mean = float("-0.000475184") + std = float("0.00312029") data = None class Program_weight_tensor_parameter_53: name = "parameter_53" + shape = [768] + dtype = "float32" + min_val = float("-0.00365788") + max_val = float("0.00249573") + mean = float("1.83923e-05") + std = float("0.00034543") + data = None + + +class Program_weight_tensor_parameter_54: + name = "parameter_54" shape = [768, 768, 1, 1] dtype = "float32" - min_val = float("-0.0129388") - max_val = float("0.040846") - mean = float("7.13241e-06") - std = float("0.000222922") + min_val = float("-0.0129539") + max_val = float("0.0408516") + mean = float("7.14053e-06") + std = float("0.000222292") + data = None + + +class Program_weight_tensor_parameter_55: + name = "parameter_55" + shape = [768] + dtype = "float32" + min_val = float("-0.239693") + max_val = float("0.342023") + mean = float("0.111992") + std = float("0.075942") + data = None + + +class Program_weight_tensor_parameter_56: + name = "parameter_56" + shape = [768] + dtype = "float32" + min_val = float("0.85597") + max_val = float("1.34121") + mean = float("1.0926") + std = float("0.041353") + data = None + + +class Program_weight_tensor_parameter_57: + name = "parameter_57" + shape = [768] + dtype = "float32" + min_val = float("0.00747408") + max_val = float("91.142") + mean = float("1.28785") + std = float("4.31139") + data = None + + +class Program_weight_tensor_parameter_58: + name = "parameter_58" + shape = [768] + dtype = "float32" + min_val = float("-0.940587") + max_val = float("0.533912") + mean = float("-0.0459939") + std = float("0.109863") + data = None + + +class Program_weight_tensor_parameter_59: + name = "parameter_59" + shape = [768, 768, 1, 1] + dtype = "float32" + min_val = float("-0.0494613") + max_val = float("0.0342687") + mean = float("-0.00053839") + std = float("0.00305259") + data = None + + +class Program_weight_tensor_parameter_60: + name = "parameter_60" + shape = [384] + dtype = "float32" + min_val = float("-0.220176") + max_val = float("0.0362122") + mean = float("-0.0290593") + std = float("0.0325532") + data = None + + +class Program_weight_tensor_parameter_61: + name = "parameter_61" + shape = [384] + dtype = "float32" + min_val = float("0.949198") + max_val = float("1.03905") + mean = float("0.985069") + std = float("0.0122127") + data = None + + +class Program_weight_tensor_parameter_62: + name = "parameter_62" + shape = [384] + dtype = "float32" + min_val = float("0.000362336") + max_val = float("5.86749") + mean = float("0.0824461") + std = float("0.456655") + data = None + + +class Program_weight_tensor_parameter_63: + name = "parameter_63" + shape = [384] + dtype = "float32" + min_val = float("-0.0999696") + max_val = float("0.203239") + mean = float("0.00744766") + std = float("0.024564") + data = None + + +class Program_weight_tensor_parameter_64: + name = "parameter_64" + shape = [384, 384, 1, 1] + dtype = "float32" + min_val = float("-0.0202515") + max_val = float("0.0297371") + mean = float("0.000259053") + std = float("0.00196863") + data = None + + +class Program_weight_tensor_parameter_65: + name = "parameter_65" + shape = [384] + dtype = "float32" + min_val = float("-0.220176") + max_val = float("0.0362122") + mean = float("-0.0290593") + std = float("0.0325532") + data = None + + +class Program_weight_tensor_parameter_66: + name = "parameter_66" + shape = [384] + dtype = "float32" + min_val = float("0.853608") + max_val = float("1.12594") + mean = float("1.01957") + std = float("0.0206557") + data = None + + +class Program_weight_tensor_parameter_67: + name = "parameter_67" + shape = [384] + dtype = "float32" + min_val = float("0.00676097") + max_val = float("25.3593") + mean = float("0.532137") + std = float("1.58471") + data = None + + +class Program_weight_tensor_parameter_68: + name = "parameter_68" + shape = [384] + dtype = "float32" + min_val = float("-0.311548") + max_val = float("0.516814") + mean = float("-0.00365134") + std = float("0.0774418") + data = None + + +class Program_weight_tensor_parameter_69: + name = "parameter_69" + shape = [384, 384, 3, 3] + dtype = "float32" + min_val = float("-0.018036") + max_val = float("0.0229965") + mean = float("-1.03648e-05") + std = float("0.00100784") + data = None + + +class Program_weight_tensor_parameter_70: + name = "parameter_70" + shape = [384] + dtype = "float32" + min_val = float("-0.187836") + max_val = float("0.0393232") + mean = float("-0.0497104") + std = float("0.0341181") + data = None + + +class Program_weight_tensor_parameter_71: + name = "parameter_71" + shape = [384] + dtype = "float32" + min_val = float("0.92338") + max_val = float("1.15649") + mean = float("1.01802") + std = float("0.0319672") + data = None + + +class Program_weight_tensor_parameter_72: + name = "parameter_72" + shape = [384] + dtype = "float32" + min_val = float("0.0260268") + max_val = float("9.39199") + mean = float("0.909698") + std = float("1.08737") + data = None + + +class Program_weight_tensor_parameter_73: + name = "parameter_73" + shape = [384] + dtype = "float32" + min_val = float("-0.352188") + max_val = float("0.490548") + mean = float("0.0139095") + std = float("0.119918") + data = None + + +class Program_weight_tensor_parameter_74: + name = "parameter_74" + shape = [384, 384, 3, 3] + dtype = "float32" + min_val = float("-0.0214424") + max_val = float("0.0293422") + mean = float("1.62345e-05") + std = float("0.00113367") + data = None + + +class Program_weight_tensor_parameter_75: + name = "parameter_75" + shape = [384] + dtype = "float32" + min_val = float("-0.136108") + max_val = float("0.0210026") + mean = float("-0.0495464") + std = float("0.0271927") + data = None + + +class Program_weight_tensor_parameter_76: + name = "parameter_76" + shape = [384] + dtype = "float32" + min_val = float("0.94096") + max_val = float("1.03866") + mean = float("0.98639") + std = float("0.0130591") + data = None + + +class Program_weight_tensor_parameter_77: + name = "parameter_77" + shape = [384] + dtype = "float32" + min_val = float("0.00109296") + max_val = float("0.435956") + mean = float("0.0262465") + std = float("0.047939") + data = None + + +class Program_weight_tensor_parameter_78: + name = "parameter_78" + shape = [384] + dtype = "float32" + min_val = float("-0.0984515") + max_val = float("0.0774922") + mean = float("0.00451097") + std = float("0.020161") + data = None + + +class Program_weight_tensor_parameter_79: + name = "parameter_79" + shape = [384, 384, 1, 1] + dtype = "float32" + min_val = float("-0.0187966") + max_val = float("0.015677") + mean = float("0.000104981") + std = float("0.00175477") + data = None + + +class Program_weight_tensor_parameter_80: + name = "parameter_80" + shape = [384] + dtype = "float32" + min_val = float("-0.136108") + max_val = float("0.0210026") + mean = float("-0.0495464") + std = float("0.0271927") + data = None + + +class Program_weight_tensor_parameter_81: + name = "parameter_81" + shape = [384] + dtype = "float32" + min_val = float("0.966771") + max_val = float("1.104") + mean = float("1.01877") + std = float("0.0185739") + data = None + + +class Program_weight_tensor_parameter_82: + name = "parameter_82" + shape = [384] + dtype = "float32" + min_val = float("0.006631") + max_val = float("4.5025") + mean = float("0.219574") + std = float("0.441772") + data = None + + +class Program_weight_tensor_parameter_83: + name = "parameter_83" + shape = [384] + dtype = "float32" + min_val = float("-0.29354") + max_val = float("0.27892") + mean = float("-0.00342736") + std = float("0.0628133") + data = None + + +class Program_weight_tensor_parameter_84: + name = "parameter_84" + shape = [384, 384, 3, 3] + dtype = "float32" + min_val = float("-0.0202651") + max_val = float("0.0277646") + mean = float("-7.89595e-06") + std = float("0.000982729") + data = None + + +class Program_weight_tensor_parameter_85: + name = "parameter_85" + shape = [384] + dtype = "float32" + min_val = float("-0.148346") + max_val = float("0.0258169") + mean = float("-0.0508039") + std = float("0.0264955") + data = None + + +class Program_weight_tensor_parameter_86: + name = "parameter_86" + shape = [384] + dtype = "float32" + min_val = float("0.938732") + max_val = float("1.11539") + mean = float("1.01484") + std = float("0.0355445") + data = None + + +class Program_weight_tensor_parameter_87: + name = "parameter_87" + shape = [384] + dtype = "float32" + min_val = float("0.00583479") + max_val = float("7.27592") + mean = float("0.403401") + std = float("0.610221") + data = None + + +class Program_weight_tensor_parameter_88: + name = "parameter_88" + shape = [384] + dtype = "float32" + min_val = float("-0.416572") + max_val = float("0.519537") + mean = float("0.0156239") + std = float("0.100455") + data = None + + +class Program_weight_tensor_parameter_89: + name = "parameter_89" + shape = [384, 384, 3, 3] + dtype = "float32" + min_val = float("-0.0194111") + max_val = float("0.027243") + mean = float("1.68141e-05") + std = float("0.00118306") + data = None + + +class Program_weight_tensor_parameter_90: + name = "parameter_90" + shape = [384] + dtype = "float32" + min_val = float("-0.153224") + max_val = float("0.0447664") + mean = float("-0.0556235") + std = float("0.0277297") + data = None + + +class Program_weight_tensor_parameter_91: + name = "parameter_91" + shape = [384] + dtype = "float32" + min_val = float("0.932483") + max_val = float("1.05427") + mean = float("0.984506") + std = float("0.0159277") + data = None + + +class Program_weight_tensor_parameter_92: + name = "parameter_92" + shape = [384] + dtype = "float32" + min_val = float("0.000534063") + max_val = float("0.217156") + mean = float("0.0168761") + std = float("0.0268172") + data = None + + +class Program_weight_tensor_parameter_93: + name = "parameter_93" + shape = [384] + dtype = "float32" + min_val = float("-0.0656613") + max_val = float("0.0732472") + mean = float("-0.00097402") + std = float("0.0152174") + data = None + + +class Program_weight_tensor_parameter_94: + name = "parameter_94" + shape = [384, 384, 1, 1] + dtype = "float32" + min_val = float("-0.0201466") + max_val = float("0.0213735") + mean = float("-4.74338e-05") + std = float("0.00168772") + data = None + + +class Program_weight_tensor_parameter_95: + name = "parameter_95" + shape = [384] + dtype = "float32" + min_val = float("-0.153224") + max_val = float("0.0447664") + mean = float("-0.0556235") + std = float("0.0277297") + data = None + + +class Program_weight_tensor_parameter_96: + name = "parameter_96" + shape = [384] + dtype = "float32" + min_val = float("0.963627") + max_val = float("1.13194") + mean = float("1.02226") + std = float("0.0263426") + data = None + + +class Program_weight_tensor_parameter_97: + name = "parameter_97" + shape = [384] + dtype = "float32" + min_val = float("0.0039872") + max_val = float("2.61582") + mean = float("0.128734") + std = float("0.223227") + data = None + + +class Program_weight_tensor_parameter_98: + name = "parameter_98" + shape = [384] + dtype = "float32" + min_val = float("-0.158487") + max_val = float("0.16455") + mean = float("-0.00243845") + std = float("0.0415247") + data = None + + +class Program_weight_tensor_parameter_99: + name = "parameter_99" + shape = [384, 384, 3, 3] + dtype = "float32" + min_val = float("-0.0197083") + max_val = float("0.0251654") + mean = float("-8.82344e-06") + std = float("0.00104335") + data = None + + +class Program_weight_tensor_parameter_100: + name = "parameter_100" + shape = [384] + dtype = "float32" + min_val = float("-0.161053") + max_val = float("0.0517202") + mean = float("-0.0534173") + std = float("0.0280919") + data = None + + +class Program_weight_tensor_parameter_101: + name = "parameter_101" + shape = [384] + dtype = "float32" + min_val = float("0.918049") + max_val = float("1.15232") + mean = float("1.01552") + std = float("0.0359069") + data = None + + +class Program_weight_tensor_parameter_102: + name = "parameter_102" + shape = [384] + dtype = "float32" + min_val = float("0.00751172") + max_val = float("4.27019") + mean = float("0.219163") + std = float("0.336004") + data = None + + +class Program_weight_tensor_parameter_103: + name = "parameter_103" + shape = [384] + dtype = "float32" + min_val = float("-0.172824") + max_val = float("0.238297") + mean = float("0.000740969") + std = float("0.0528198") + data = None + + +class Program_weight_tensor_parameter_104: + name = "parameter_104" + shape = [384, 384, 3, 3] + dtype = "float32" + min_val = float("-0.0223627") + max_val = float("0.0225632") + mean = float("4.76008e-06") + std = float("0.00121998") + data = None + + +class Program_weight_tensor_parameter_105: + name = "parameter_105" + shape = [384] + dtype = "float32" + min_val = float("-0.10183") + max_val = float("0.0571021") + mean = float("-0.0401848") + std = float("0.0228972") + data = None + + +class Program_weight_tensor_parameter_106: + name = "parameter_106" + shape = [384] + dtype = "float32" + min_val = float("0.964009") + max_val = float("1.11744") + mean = float("1.01332") + std = float("0.0243487") + data = None + + +class Program_weight_tensor_parameter_107: + name = "parameter_107" + shape = [384] + dtype = "float32" + min_val = float("0.0068207") + max_val = float("0.57439") + mean = float("0.0795897") + std = float("0.0829282") + data = None + + +class Program_weight_tensor_parameter_108: + name = "parameter_108" + shape = [384] + dtype = "float32" + min_val = float("-0.0720079") + max_val = float("0.133674") + mean = float("0.00403881") + std = float("0.0281469") + data = None + + +class Program_weight_tensor_parameter_109: + name = "parameter_109" + shape = [384, 1152, 1, 1] + dtype = "float32" + min_val = float("-0.040986") + max_val = float("0.0442725") + mean = float("2.35921e-05") + std = float("0.00180591") + data = None + + +class Program_weight_tensor_parameter_110: + name = "parameter_110" + shape = [384] + dtype = "float32" + min_val = float("-0.0710209") + max_val = float("0.0171363") + mean = float("-0.0176361") + std = float("0.0126887") + data = None + + +class Program_weight_tensor_parameter_111: + name = "parameter_111" + shape = [384] + dtype = "float32" + min_val = float("0.915116") + max_val = float("1.10226") + mean = float("1.00939") + std = float("0.0165555") + data = None + + +class Program_weight_tensor_parameter_112: + name = "parameter_112" + shape = [384] + dtype = "float32" + min_val = float("0.00101995") + max_val = float("1.60525") + mean = float("0.0940799") + std = float("0.172003") + data = None + + +class Program_weight_tensor_parameter_113: + name = "parameter_113" + shape = [384] + dtype = "float32" + min_val = float("-0.106481") + max_val = float("0.109302") + mean = float("0.00224785") + std = float("0.0268252") + data = None + + +class Program_weight_tensor_parameter_114: + name = "parameter_114" + shape = [384, 1152, 1, 1] + dtype = "float32" + min_val = float("-0.0370576") + max_val = float("0.0304661") + mean = float("3.8777e-05") + std = float("0.00152873") + data = None + + +class Program_weight_tensor_parameter_115: + name = "parameter_115" + shape = [384] + dtype = "float32" + min_val = float("-0.0787177") + max_val = float("0.00338451") + mean = float("-0.025285") + std = float("0.0143999") + data = None + + +class Program_weight_tensor_parameter_116: + name = "parameter_116" + shape = [384] + dtype = "float32" + min_val = float("0.98141") + max_val = float("1.12417") + mean = float("1.02758") + std = float("0.0212745") + data = None + + +class Program_weight_tensor_parameter_117: + name = "parameter_117" + shape = [384] + dtype = "float32" + min_val = float("0.00212422") + max_val = float("3.61176") + mean = float("0.161326") + std = float("0.310472") + data = None + + +class Program_weight_tensor_parameter_118: + name = "parameter_118" + shape = [384] + dtype = "float32" + min_val = float("-0.339877") + max_val = float("0.322401") + mean = float("-0.00463237") + std = float("0.0920204") + data = None + + +class Program_weight_tensor_parameter_119: + name = "parameter_119" + shape = [384, 384, 3, 3] + dtype = "float32" + min_val = float("-0.0316566") + max_val = float("0.0261297") + mean = float("-3.55618e-06") + std = float("0.000915228") + data = None + + +class Program_weight_tensor_parameter_120: + name = "parameter_120" + shape = [384] + dtype = "float32" + min_val = float("-0.413396") + max_val = float("0.666571") + mean = float("0.255151") + std = float("0.15886") + data = None + + +class Program_weight_tensor_parameter_121: + name = "parameter_121" + shape = [384] + dtype = "float32" + min_val = float("0.924159") + max_val = float("1.66721") + mean = float("1.17398") + std = float("0.0907099") + data = None + + +class Program_weight_tensor_parameter_122: + name = "parameter_122" + shape = [384] + dtype = "float32" + min_val = float("0.0144254") + max_val = float("122.174") + mean = float("3.89996") + std = float("13.8836") + data = None + + +class Program_weight_tensor_parameter_123: + name = "parameter_123" + shape = [384] + dtype = "float32" + min_val = float("-0.852841") + max_val = float("0.566699") + mean = float("-0.0134303") + std = float("0.139115") + data = None + + +class Program_weight_tensor_parameter_124: + name = "parameter_124" + shape = [384, 384, 1, 1] + dtype = "float32" + min_val = float("-0.136465") + max_val = float("0.10202") + mean = float("-0.000553339") + std = float("0.00889327") + data = None + + +class Program_weight_tensor_parameter_125: + name = "parameter_125" + shape = [192] + dtype = "float32" + min_val = float("-0.257665") + max_val = float("0.0752437") + mean = float("-0.0387492") + std = float("0.0605554") + data = None + + +class Program_weight_tensor_parameter_126: + name = "parameter_126" + shape = [192] + dtype = "float32" + min_val = float("0.912244") + max_val = float("1.05536") + mean = float("0.97033") + std = float("0.0254714") + data = None + + +class Program_weight_tensor_parameter_127: + name = "parameter_127" + shape = [192] + dtype = "float32" + min_val = float("0.000393611") + max_val = float("2.87493") + mean = float("0.111455") + std = float("0.279032") + data = None + + +class Program_weight_tensor_parameter_128: + name = "parameter_128" + shape = [192] + dtype = "float32" + min_val = float("-0.0556163") + max_val = float("0.0292508") + mean = float("-0.00194405") + std = float("0.0112362") + data = None + + +class Program_weight_tensor_parameter_129: + name = "parameter_129" + shape = [192, 192, 1, 1] + dtype = "float32" + min_val = float("-0.0439425") + max_val = float("0.0410791") + mean = float("-0.000352772") + std = float("0.00463785") + data = None + + +class Program_weight_tensor_parameter_130: + name = "parameter_130" + shape = [192] + dtype = "float32" + min_val = float("-0.257665") + max_val = float("0.0752437") + mean = float("-0.0387492") + std = float("0.0605554") + data = None + + +class Program_weight_tensor_parameter_131: + name = "parameter_131" + shape = [192] + dtype = "float32" + min_val = float("0.673684") + max_val = float("1.16438") + mean = float("1.02596") + std = float("0.0488779") + data = None + + +class Program_weight_tensor_parameter_132: + name = "parameter_132" + shape = [192] + dtype = "float32" + min_val = float("0.0133915") + max_val = float("25.98") + mean = float("1.07065") + std = float("2.9016") + data = None + + +class Program_weight_tensor_parameter_133: + name = "parameter_133" + shape = [192] + dtype = "float32" + min_val = float("-0.180444") + max_val = float("0.125675") + mean = float("-0.00551328") + std = float("0.0357889") + data = None + + +class Program_weight_tensor_parameter_134: + name = "parameter_134" + shape = [192, 192, 3, 3] + dtype = "float32" + min_val = float("-0.031421") + max_val = float("0.0394767") + mean = float("-0.000121284") + std = float("0.0024869") + data = None + + +class Program_weight_tensor_parameter_135: + name = "parameter_135" + shape = [192] + dtype = "float32" + min_val = float("-0.255521") + max_val = float("0.0954508") + mean = float("-0.0805929") + std = float("0.0593622") + data = None + + +class Program_weight_tensor_parameter_136: + name = "parameter_136" + shape = [192] + dtype = "float32" + min_val = float("0.856802") + max_val = float("1.31575") + mean = float("1.0158") + std = float("0.0628707") + data = None + + +class Program_weight_tensor_parameter_137: + name = "parameter_137" + shape = [192] + dtype = "float32" + min_val = float("0.0279533") + max_val = float("27.6276") + mean = float("1.05945") + std = float("2.31666") + data = None + + +class Program_weight_tensor_parameter_138: + name = "parameter_138" + shape = [192] + dtype = "float32" + min_val = float("-0.134847") + max_val = float("0.218809") + mean = float("0.00283619") + std = float("0.0445478") + data = None + + +class Program_weight_tensor_parameter_139: + name = "parameter_139" + shape = [192, 192, 3, 3] + dtype = "float32" + min_val = float("-0.0375382") + max_val = float("0.0517003") + mean = float("-3.45177e-05") + std = float("0.0026056") + data = None + + +class Program_weight_tensor_parameter_140: + name = "parameter_140" + shape = [192] + dtype = "float32" + min_val = float("-0.21883") + max_val = float("0.0413199") + mean = float("-0.100393") + std = float("0.0464424") + data = None + + +class Program_weight_tensor_parameter_141: + name = "parameter_141" + shape = [192] + dtype = "float32" + min_val = float("0.8901") + max_val = float("1.08415") + mean = float("0.970219") + std = float("0.0268324") + data = None + + +class Program_weight_tensor_parameter_142: + name = "parameter_142" + shape = [192] + dtype = "float32" + min_val = float("0.000612437") + max_val = float("0.233253") + mean = float("0.0124722") + std = float("0.0250989") + data = None + + +class Program_weight_tensor_parameter_143: + name = "parameter_143" + shape = [192] + dtype = "float32" + min_val = float("-0.0253723") + max_val = float("0.00984091") + mean = float("-0.0014133") + std = float("0.00517924") + data = None + + +class Program_weight_tensor_parameter_144: + name = "parameter_144" + shape = [192, 192, 1, 1] + dtype = "float32" + min_val = float("-0.0375517") + max_val = float("0.033268") + mean = float("-0.000569872") + std = float("0.00362479") + data = None + + +class Program_weight_tensor_parameter_145: + name = "parameter_145" + shape = [192] + dtype = "float32" + min_val = float("-0.21883") + max_val = float("0.0413199") + mean = float("-0.100393") + std = float("0.0464424") + data = None + + +class Program_weight_tensor_parameter_146: + name = "parameter_146" + shape = [192] + dtype = "float32" + min_val = float("0.930194") + max_val = float("1.13524") + mean = float("1.02396") + std = float("0.0379187") + data = None + + +class Program_weight_tensor_parameter_147: + name = "parameter_147" + shape = [192] + dtype = "float32" + min_val = float("0.00801575") + max_val = float("1.06604") + mean = float("0.0984606") + std = float("0.143442") + data = None + + +class Program_weight_tensor_parameter_148: + name = "parameter_148" + shape = [192] + dtype = "float32" + min_val = float("-0.0705495") + max_val = float("0.024401") + mean = float("-0.00514406") + std = float("0.0149741") + data = None + + +class Program_weight_tensor_parameter_149: + name = "parameter_149" + shape = [192, 192, 3, 3] + dtype = "float32" + min_val = float("-0.0383657") + max_val = float("0.0467087") + mean = float("-0.000148064") + std = float("0.00219098") + data = None + + +class Program_weight_tensor_parameter_150: + name = "parameter_150" + shape = [192] + dtype = "float32" + min_val = float("-0.230449") + max_val = float("0.0107375") + mean = float("-0.106032") + std = float("0.0515695") + data = None + + +class Program_weight_tensor_parameter_151: + name = "parameter_151" + shape = [192] + dtype = "float32" + min_val = float("0.866584") + max_val = float("1.19769") + mean = float("1.0176") + std = float("0.0616183") + data = None + + +class Program_weight_tensor_parameter_152: + name = "parameter_152" + shape = [192] + dtype = "float32" + min_val = float("0.0189783") + max_val = float("2.75021") + mean = float("0.20213") + std = float("0.25366") + data = None + + +class Program_weight_tensor_parameter_153: + name = "parameter_153" + shape = [192] + dtype = "float32" + min_val = float("-0.0862401") + max_val = float("0.0391613") + mean = float("-0.00692022") + std = float("0.0188185") + data = None + + +class Program_weight_tensor_parameter_154: + name = "parameter_154" + shape = [192, 192, 3, 3] + dtype = "float32" + min_val = float("-0.0440031") + max_val = float("0.0615391") + mean = float("-0.000144339") + std = float("0.00244733") + data = None + + +class Program_weight_tensor_parameter_155: + name = "parameter_155" + shape = [192] + dtype = "float32" + min_val = float("-0.331751") + max_val = float("0.0593176") + mean = float("-0.122839") + std = float("0.0596387") + data = None + + +class Program_weight_tensor_parameter_156: + name = "parameter_156" + shape = [192] + dtype = "float32" + min_val = float("0.864681") + max_val = float("1.08356") + mean = float("0.967237") + std = float("0.0294572") + data = None + + +class Program_weight_tensor_parameter_157: + name = "parameter_157" + shape = [192] + dtype = "float32" + min_val = float("0.000340173") + max_val = float("0.0775148") + mean = float("0.00814505") + std = float("0.0093693") + data = None + + +class Program_weight_tensor_parameter_158: + name = "parameter_158" + shape = [192] + dtype = "float32" + min_val = float("-0.0142046") + max_val = float("0.0108901") + mean = float("-0.000354694") + std = float("0.00406659") + data = None + + +class Program_weight_tensor_parameter_159: + name = "parameter_159" + shape = [192, 192, 1, 1] + dtype = "float32" + min_val = float("-0.0335535") + max_val = float("0.0763086") + mean = float("-0.000457842") + std = float("0.00358968") + data = None + + +class Program_weight_tensor_parameter_160: + name = "parameter_160" + shape = [192] + dtype = "float32" + min_val = float("-0.331751") + max_val = float("0.0593176") + mean = float("-0.122839") + std = float("0.0596387") + data = None + + +class Program_weight_tensor_parameter_161: + name = "parameter_161" + shape = [192] + dtype = "float32" + min_val = float("0.930627") + max_val = float("1.13713") + mean = float("1.02228") + std = float("0.0316434") + data = None + + +class Program_weight_tensor_parameter_162: + name = "parameter_162" + shape = [192] + dtype = "float32" + min_val = float("0.00494727") + max_val = float("1.02902") + mean = float("0.085425") + std = float("0.136729") + data = None + + +class Program_weight_tensor_parameter_163: + name = "parameter_163" + shape = [192] + dtype = "float32" + min_val = float("-0.0599128") + max_val = float("0.0420675") + mean = float("-0.00217438") + std = float("0.0152992") + data = None + + +class Program_weight_tensor_parameter_164: + name = "parameter_164" + shape = [192, 192, 3, 3] + dtype = "float32" + min_val = float("-0.0370746") + max_val = float("0.0560801") + mean = float("-0.000137326") + std = float("0.00230186") + data = None + + +class Program_weight_tensor_parameter_165: + name = "parameter_165" + shape = [192] + dtype = "float32" + min_val = float("-0.348258") + max_val = float("0.134414") + mean = float("-0.132516") + std = float("0.0683607") + data = None + + +class Program_weight_tensor_parameter_166: + name = "parameter_166" + shape = [192] + dtype = "float32" + min_val = float("0.882276") + max_val = float("1.33254") + mean = float("1.01699") + std = float("0.066298") + data = None + + +class Program_weight_tensor_parameter_167: + name = "parameter_167" + shape = [192] + dtype = "float32" + min_val = float("0.00563114") + max_val = float("2.12348") + mean = float("0.105603") + std = float("0.186094") + data = None + + +class Program_weight_tensor_parameter_168: + name = "parameter_168" + shape = [192] + dtype = "float32" + min_val = float("-0.0852457") + max_val = float("0.107334") + mean = float("-0.00720781") + std = float("0.0193172") + data = None + + +class Program_weight_tensor_parameter_169: + name = "parameter_169" + shape = [192, 192, 3, 3] + dtype = "float32" + min_val = float("-0.0441365") + max_val = float("0.0884734") + mean = float("-8.53865e-05") + std = float("0.00277581") + data = None + + +class Program_weight_tensor_parameter_170: + name = "parameter_170" + shape = [192] + dtype = "float32" + min_val = float("-0.248486") + max_val = float("0.0638204") + mean = float("-0.0972066") + std = float("0.0449139") + data = None + + +class Program_weight_tensor_parameter_171: + name = "parameter_171" + shape = [192] + dtype = "float32" + min_val = float("0.916404") + max_val = float("1.2336") + mean = float("1.01803") + std = float("0.0458357") + data = None + + +class Program_weight_tensor_parameter_172: + name = "parameter_172" + shape = [192] + dtype = "float32" + min_val = float("0.00285938") + max_val = float("1.25761") + mean = float("0.0895332") + std = float("0.162617") + data = None + + +class Program_weight_tensor_parameter_173: + name = "parameter_173" + shape = [192] + dtype = "float32" + min_val = float("-0.096256") + max_val = float("0.113259") + mean = float("-0.00855981") + std = float("0.0239588") + data = None + + +class Program_weight_tensor_parameter_174: + name = "parameter_174" + shape = [192, 576, 1, 1] + dtype = "float32" + min_val = float("-0.054491") + max_val = float("0.0593654") + mean = float("-0.000210848") + std = float("0.00429447") + data = None + + +class Program_weight_tensor_parameter_175: + name = "parameter_175" + shape = [192] + dtype = "float32" + min_val = float("-0.165888") + max_val = float("0.0407389") + mean = float("-0.0331057") + std = float("0.0303175") + data = None + + +class Program_weight_tensor_parameter_176: + name = "parameter_176" + shape = [192] + dtype = "float32" + min_val = float("0.914858") + max_val = float("1.29697") + mean = float("1.00082") + std = float("0.0391908") + data = None + + +class Program_weight_tensor_parameter_177: + name = "parameter_177" + shape = [192] + dtype = "float32" + min_val = float("0.00305941") + max_val = float("1.43751") + mean = float("0.124248") + std = float("0.190205") + data = None + + +class Program_weight_tensor_parameter_178: + name = "parameter_178" + shape = [192] + dtype = "float32" + min_val = float("-0.100544") + max_val = float("0.127463") + mean = float("0.00148309") + std = float("0.0308809") + data = None + + +class Program_weight_tensor_parameter_179: + name = "parameter_179" + shape = [192, 576, 1, 1] + dtype = "float32" + min_val = float("-0.0567269") + max_val = float("0.0677656") + mean = float("6.1846e-05") + std = float("0.00367772") + data = None + + +class Program_weight_tensor_parameter_180: + name = "parameter_180" + shape = [192] + dtype = "float32" + min_val = float("-0.15598") + max_val = float("0.0109365") + mean = float("-0.0537461") + std = float("0.0309099") + data = None + + +class Program_weight_tensor_parameter_181: + name = "parameter_181" + shape = [192] + dtype = "float32" + min_val = float("0.854102") + max_val = float("1.17499") + mean = float("1.00855") + std = float("0.0375922") + data = None + + +class Program_weight_tensor_parameter_182: + name = "parameter_182" + shape = [192] + dtype = "float32" + min_val = float("0.0177937") + max_val = float("2.32847") + mean = float("0.20173") + std = float("0.23775") + data = None + + +class Program_weight_tensor_parameter_183: + name = "parameter_183" + shape = [192] + dtype = "float32" + min_val = float("-0.582113") + max_val = float("0.599466") + mean = float("-0.0228734") + std = float("0.194881") + data = None + + +class Program_weight_tensor_parameter_184: + name = "parameter_184" + shape = [192, 192, 3, 3] + dtype = "float32" + min_val = float("-0.0460401") + max_val = float("0.0411278") + mean = float("-2.83696e-05") + std = float("0.00233677") + data = None + + +class Program_weight_tensor_parameter_185: + name = "parameter_185" + shape = [192] + dtype = "float32" + min_val = float("-0.727151") + max_val = float("1.77126") + mean = float("0.372279") + std = float("0.431884") + data = None + + +class Program_weight_tensor_parameter_186: + name = "parameter_186" + shape = [192] + dtype = "float32" + min_val = float("0.643738") + max_val = float("1.70973") + mean = float("1.1633") + std = float("0.19941") + data = None + + +class Program_weight_tensor_parameter_187: + name = "parameter_187" + shape = [192] + dtype = "float32" + min_val = float("0.0175227") + max_val = float("24.367") + mean = float("0.996291") + std = float("2.34855") + data = None + + +class Program_weight_tensor_parameter_188: + name = "parameter_188" + shape = [192] + dtype = "float32" + min_val = float("-0.649941") + max_val = float("0.526551") + mean = float("0.0190609") + std = float("0.138321") + data = None + + +class Program_weight_tensor_parameter_189: + name = "parameter_189" + shape = [192, 192, 1, 1] + dtype = "float32" + min_val = float("-0.222576") + max_val = float("0.181966") + mean = float("0.000145706") + std = float("0.0205848") + data = None + + +class Program_weight_tensor_parameter_190: + name = "parameter_190" + shape = [96] + dtype = "float32" + min_val = float("-0.646041") + max_val = float("0.287621") + mean = float("-0.0371052") + std = float("0.203701") + data = None + + +class Program_weight_tensor_parameter_191: + name = "parameter_191" + shape = [96] + dtype = "float32" + min_val = float("0.729587") + max_val = float("1.32793") + mean = float("0.928081") + std = float("0.0901068") + data = None + + +class Program_weight_tensor_parameter_192: + name = "parameter_192" + shape = [96] + dtype = "float32" + min_val = float("0.00961121") + max_val = float("5.95736") + mean = float("0.233741") + std = float("0.717086") + data = None + + +class Program_weight_tensor_parameter_193: + name = "parameter_193" + shape = [96] + dtype = "float32" + min_val = float("-0.0755287") + max_val = float("0.0554683") + mean = float("0.00113738") + std = float("0.0240865") + data = None + + +class Program_weight_tensor_parameter_194: + name = "parameter_194" + shape = [96, 96, 1, 1] + dtype = "float32" + min_val = float("-0.538395") + max_val = float("0.204203") + mean = float("-0.00127837") + std = float("0.021114") + data = None + + +class Program_weight_tensor_parameter_195: + name = "parameter_195" + shape = [96] + dtype = "float32" + min_val = float("-0.646041") + max_val = float("0.287621") + mean = float("-0.0371052") + std = float("0.203701") + data = None + + +class Program_weight_tensor_parameter_196: + name = "parameter_196" + shape = [96] + dtype = "float32" + min_val = float("0.482104") + max_val = float("1.39863") + mean = float("1.04313") + std = float("0.135699") + data = None + + +class Program_weight_tensor_parameter_197: + name = "parameter_197" + shape = [96] + dtype = "float32" + min_val = float("0.0457488") + max_val = float("30.6283") + mean = float("1.69124") + std = float("4.42624") + data = None + + +class Program_weight_tensor_parameter_198: + name = "parameter_198" + shape = [96] + dtype = "float32" + min_val = float("-0.384559") + max_val = float("0.181016") + mean = float("-0.0210207") + std = float("0.0873941") + data = None + + +class Program_weight_tensor_parameter_199: + name = "parameter_199" + shape = [96, 96, 3, 3] + dtype = "float32" + min_val = float("-0.247075") + max_val = float("0.10887") + mean = float("-0.000316918") + std = float("0.00915013") + data = None + + +class Program_weight_tensor_parameter_200: + name = "parameter_200" + shape = [96] + dtype = "float32" + min_val = float("-0.806157") + max_val = float("0.642647") + mean = float("-0.135704") + std = float("0.223383") + data = None + + +class Program_weight_tensor_parameter_201: + name = "parameter_201" + shape = [96] + dtype = "float32" + min_val = float("0.498225") + max_val = float("1.52038") + mean = float("0.986561") + std = float("0.14034") + data = None + + +class Program_weight_tensor_parameter_202: + name = "parameter_202" + shape = [96] + dtype = "float32" + min_val = float("0.133909") + max_val = float("27.5573") + mean = float("1.63725") + std = float("2.96894") + data = None + + +class Program_weight_tensor_parameter_203: + name = "parameter_203" + shape = [96] + dtype = "float32" + min_val = float("-0.14425") + max_val = float("0.110106") + mean = float("0.00161153") + std = float("0.0553599") + data = None + + +class Program_weight_tensor_parameter_204: + name = "parameter_204" + shape = [96, 96, 3, 3] + dtype = "float32" + min_val = float("-0.217565") + max_val = float("0.114085") + mean = float("-0.000489408") + std = float("0.00910554") + data = None + + +class Program_weight_tensor_parameter_205: + name = "parameter_205" + shape = [96] + dtype = "float32" + min_val = float("-0.376161") + max_val = float("0.21441") + mean = float("-0.182742") + std = float("0.131052") + data = None + + +class Program_weight_tensor_parameter_206: + name = "parameter_206" + shape = [96] + dtype = "float32" + min_val = float("0.657588") + max_val = float("1.16753") + mean = float("0.867648") + std = float("0.0697505") + data = None + + +class Program_weight_tensor_parameter_207: + name = "parameter_207" + shape = [96] + dtype = "float32" + min_val = float("0.00284137") + max_val = float("1.11982") + mean = float("0.0593209") + std = float("0.123554") + data = None + + +class Program_weight_tensor_parameter_208: + name = "parameter_208" + shape = [96] + dtype = "float32" + min_val = float("-0.0657001") + max_val = float("0.0660294") + mean = float("-0.0085817") + std = float("0.0166625") + data = None + + +class Program_weight_tensor_parameter_209: + name = "parameter_209" + shape = [96, 96, 1, 1] + dtype = "float32" + min_val = float("-0.112441") + max_val = float("0.145551") + mean = float("-0.00185134") + std = float("0.0132855") + data = None + + +class Program_weight_tensor_parameter_210: + name = "parameter_210" + shape = [96] + dtype = "float32" + min_val = float("-0.376161") + max_val = float("0.21441") + mean = float("-0.182742") + std = float("0.131052") + data = None + + +class Program_weight_tensor_parameter_211: + name = "parameter_211" + shape = [96] + dtype = "float32" + min_val = float("0.794636") + max_val = float("1.29933") + mean = float("1.01044") + std = float("0.0818406") + data = None + + +class Program_weight_tensor_parameter_212: + name = "parameter_212" + shape = [96] + dtype = "float32" + min_val = float("0.0372437") + max_val = float("9.07593") + mean = float("0.686368") + std = float("1.14539") + data = None + + +class Program_weight_tensor_parameter_213: + name = "parameter_213" + shape = [96] + dtype = "float32" + min_val = float("-0.192429") + max_val = float("0.242172") + mean = float("-0.0113233") + std = float("0.0649532") + data = None + + +class Program_weight_tensor_parameter_214: + name = "parameter_214" + shape = [96, 96, 3, 3] + dtype = "float32" + min_val = float("-0.0724886") + max_val = float("0.0676065") + mean = float("-0.000591651") + std = float("0.00719272") + data = None + + +class Program_weight_tensor_parameter_215: + name = "parameter_215" + shape = [96] + dtype = "float32" + min_val = float("-0.504724") + max_val = float("0.310511") + mean = float("-0.192469") + std = float("0.162887") + data = None + + +class Program_weight_tensor_parameter_216: + name = "parameter_216" + shape = [96] + dtype = "float32" + min_val = float("0.729078") + max_val = float("1.32546") + mean = float("0.944974") + std = float("0.106132") + data = None + + +class Program_weight_tensor_parameter_217: + name = "parameter_217" + shape = [96] + dtype = "float32" + min_val = float("0.0420698") + max_val = float("6.71555") + mean = float("1.0677") + std = float("1.20063") + data = None + + +class Program_weight_tensor_parameter_218: + name = "parameter_218" + shape = [96] + dtype = "float32" + min_val = float("-0.126845") + max_val = float("0.134215") + mean = float("0.0195531") + std = float("0.0565244") + data = None + + +class Program_weight_tensor_parameter_219: + name = "parameter_219" + shape = [96, 96, 3, 3] + dtype = "float32" + min_val = float("-0.086667") + max_val = float("0.0891793") + mean = float("-0.000463108") + std = float("0.00828012") + data = None + + +class Program_weight_tensor_parameter_220: + name = "parameter_220" + shape = [96] + dtype = "float32" + min_val = float("-0.603289") + max_val = float("0.0867803") + mean = float("-0.234451") + std = float("0.142047") + data = None + + +class Program_weight_tensor_parameter_221: + name = "parameter_221" + shape = [96] + dtype = "float32" + min_val = float("0.704426") + max_val = float("1.02604") + mean = float("0.909068") + std = float("0.0649952") + data = None + + +class Program_weight_tensor_parameter_222: + name = "parameter_222" + shape = [96] + dtype = "float32" + min_val = float("0.001266") + max_val = float("1.28419") + mean = float("0.0888219") + std = float("0.188276") + data = None + + +class Program_weight_tensor_parameter_223: + name = "parameter_223" + shape = [96] + dtype = "float32" + min_val = float("-0.0485395") + max_val = float("0.0643204") + mean = float("-0.00403548") + std = float("0.0232515") + data = None + + +class Program_weight_tensor_parameter_224: + name = "parameter_224" + shape = [96, 96, 1, 1] + dtype = "float32" + min_val = float("-0.0943453") + max_val = float("0.128335") + mean = float("-0.00194288") + std = float("0.0140302") + data = None + + +class Program_weight_tensor_parameter_225: + name = "parameter_225" + shape = [96] + dtype = "float32" + min_val = float("-0.603289") + max_val = float("0.0867803") + mean = float("-0.234451") + std = float("0.142047") + data = None + + +class Program_weight_tensor_parameter_226: + name = "parameter_226" + shape = [96] + dtype = "float32" + min_val = float("0.622021") + max_val = float("1.21586") + mean = float("0.958698") + std = float("0.106219") + data = None + + +class Program_weight_tensor_parameter_227: + name = "parameter_227" + shape = [96] + dtype = "float32" + min_val = float("0.00636808") + max_val = float("6.96019") + mean = float("0.706011") + std = float("1.27379") + data = None + + +class Program_weight_tensor_parameter_228: + name = "parameter_228" + shape = [96] + dtype = "float32" + min_val = float("-0.265289") + max_val = float("0.2344") + mean = float("6.01998e-05") + std = float("0.078052") + data = None + + +class Program_weight_tensor_parameter_229: + name = "parameter_229" + shape = [96, 96, 3, 3] + dtype = "float32" + min_val = float("-0.0724794") + max_val = float("0.0855183") + mean = float("-0.000671817") + std = float("0.00725462") + data = None + + +class Program_weight_tensor_parameter_230: + name = "parameter_230" + shape = [96] + dtype = "float32" + min_val = float("-0.81596") + max_val = float("0.694539") + mean = float("-0.220603") + std = float("0.24757") + data = None + + +class Program_weight_tensor_parameter_231: + name = "parameter_231" + shape = [96] + dtype = "float32" + min_val = float("0.653873") + max_val = float("1.50477") + mean = float("0.905064") + std = float("0.115987") + data = None + + +class Program_weight_tensor_parameter_232: + name = "parameter_232" + shape = [96] + dtype = "float32" + min_val = float("0.0138713") + max_val = float("5.6047") + mean = float("0.607674") + std = float("0.872345") + data = None + + +class Program_weight_tensor_parameter_233: + name = "parameter_233" + shape = [96] + dtype = "float32" + min_val = float("-0.18801") + max_val = float("0.361114") + mean = float("0.00822761") + std = float("0.104183") + data = None + + +class Program_weight_tensor_parameter_234: + name = "parameter_234" + shape = [96, 96, 3, 3] + dtype = "float32" + min_val = float("-0.112173") + max_val = float("0.152093") + mean = float("-0.000219951") + std = float("0.00849275") + data = None + + +class Program_weight_tensor_parameter_235: + name = "parameter_235" + shape = [96] + dtype = "float32" + min_val = float("-0.737721") + max_val = float("1.03803") + mean = float("-0.0965003") + std = float("0.357534") + data = None + + +class Program_weight_tensor_parameter_236: + name = "parameter_236" + shape = [96] + dtype = "float32" + min_val = float("0.486178") + max_val = float("1.16474") + mean = float("0.785591") + std = float("0.140337") + data = None + + +class Program_weight_tensor_parameter_237: + name = "parameter_237" + shape = [96] + dtype = "float32" + min_val = float("0.0220315") + max_val = float("1.90785") + mean = float("0.302731") + std = float("0.386768") + data = None + + +class Program_weight_tensor_parameter_238: + name = "parameter_238" + shape = [96] + dtype = "float32" + min_val = float("-0.265742") + max_val = float("0.176522") + mean = float("0.0019327") + std = float("0.0895195") + data = None + + +class Program_weight_tensor_parameter_239: + name = "parameter_239" + shape = [96, 448, 1, 1] + dtype = "float32" + min_val = float("-0.141003") + max_val = float("0.12526") + mean = float("-0.000255411") + std = float("0.0113464") + data = None + + +class Program_weight_tensor_parameter_240: + name = "parameter_240" + shape = [96] + dtype = "float32" + min_val = float("-0.101234") + max_val = float("0.280761") + mean = float("0.0626191") + std = float("0.0701217") + data = None + + +class Program_weight_tensor_parameter_241: + name = "parameter_241" + shape = [96] + dtype = "float32" + min_val = float("0.728134") + max_val = float("1.1619") + mean = float("0.897257") + std = float("0.075542") + data = None + + +class Program_weight_tensor_parameter_242: + name = "parameter_242" + shape = [96] + dtype = "float32" + min_val = float("0.00554226") + max_val = float("3.43284") + mean = float("0.265583") + std = float("0.559653") + data = None + + +class Program_weight_tensor_parameter_243: + name = "parameter_243" + shape = [96] + dtype = "float32" + min_val = float("-0.350261") + max_val = float("0.171463") + mean = float("-0.00933972") + std = float("0.0751828") + data = None + + +class Program_weight_tensor_parameter_244: + name = "parameter_244" + shape = [96, 448, 1, 1] + dtype = "float32" + min_val = float("-0.0930981") + max_val = float("0.109019") + mean = float("-1.41361e-05") + std = float("0.00738557") + data = None + + +class Program_weight_tensor_parameter_245: + name = "parameter_245" + shape = [192] + dtype = "float32" + min_val = float("-0.420169") + max_val = float("0.304077") + mean = float("-0.0846625") + std = float("0.0933085") + data = None + + +class Program_weight_tensor_parameter_246: + name = "parameter_246" + shape = [192] + dtype = "float32" + min_val = float("0.660034") + max_val = float("1.60045") + mean = float("0.830394") + std = float("0.0946997") + data = None + + +class Program_weight_tensor_parameter_247: + name = "parameter_247" + shape = [192] + dtype = "float32" + min_val = float("0.00802662") + max_val = float("1.1227") + mean = float("0.141375") + std = float("0.138512") + data = None + + +class Program_weight_tensor_parameter_248: + name = "parameter_248" + shape = [192] + dtype = "float32" + min_val = float("-0.0902271") + max_val = float("0.0555908") + mean = float("-0.00784275") + std = float("0.0256276") + data = None + + +class Program_weight_tensor_parameter_249: + name = "parameter_249" + shape = [192, 384, 1, 1] + dtype = "float32" + min_val = float("-0.0796293") + max_val = float("0.0883018") + mean = float("-0.000481953") + std = float("0.00665654") + data = None + + +class Program_weight_tensor_parameter_250: + name = "parameter_250" + shape = [384] + dtype = "float32" + min_val = float("-0.373051") + max_val = float("0.163758") + mean = float("-0.0929626") + std = float("0.0592494") + data = None + + +class Program_weight_tensor_parameter_251: + name = "parameter_251" + shape = [384] + dtype = "float32" + min_val = float("0.87726") + max_val = float("1.57409") + mean = float("1.01609") + std = float("0.085249") + data = None + + +class Program_weight_tensor_parameter_252: + name = "parameter_252" + shape = [384] + dtype = "float32" + min_val = float("0.00707231") + max_val = float("0.602354") + mean = float("0.080235") + std = float("0.0743166") + data = None + + +class Program_weight_tensor_parameter_253: + name = "parameter_253" + shape = [384] + dtype = "float32" + min_val = float("-0.136249") + max_val = float("0.144427") + mean = float("-0.0149049") + std = float("0.0284274") + data = None + + +class Program_weight_tensor_parameter_254: + name = "parameter_254" + shape = [384, 384, 1, 1] + dtype = "float32" + min_val = float("-0.129077") + max_val = float("0.0807547") + mean = float("-0.000487049") + std = float("0.00603461") + data = None + + +class Program_weight_tensor_parameter_255: + name = "parameter_255" + shape = [192] + dtype = "float32" + min_val = float("-0.256959") + max_val = float("0.0668543") + mean = float("-0.0840696") + std = float("0.0440415") + data = None + + +class Program_weight_tensor_parameter_256: + name = "parameter_256" + shape = [192] + dtype = "float32" + min_val = float("0.819568") + max_val = float("0.986962") + mean = float("0.929009") + std = float("0.0272595") + data = None + + +class Program_weight_tensor_parameter_257: + name = "parameter_257" + shape = [192] + dtype = "float32" + min_val = float("0.000592244") + max_val = float("0.300787") + mean = float("0.0342886") + std = float("0.0384523") + data = None + + +class Program_weight_tensor_parameter_258: + name = "parameter_258" + shape = [192] + dtype = "float32" + min_val = float("-0.0318971") + max_val = float("0.0175305") + mean = float("-0.00483448") + std = float("0.00913169") + data = None + + +class Program_weight_tensor_parameter_259: + name = "parameter_259" + shape = [192, 192, 1, 1] + dtype = "float32" + min_val = float("-0.035315") + max_val = float("0.0383402") + mean = float("-0.000675809") + std = float("0.0047554") + data = None + + +class Program_weight_tensor_parameter_260: + name = "parameter_260" + shape = [192] + dtype = "float32" + min_val = float("-0.256959") + max_val = float("0.0668543") + mean = float("-0.0840696") + std = float("0.0440415") + data = None + + +class Program_weight_tensor_parameter_261: + name = "parameter_261" + shape = [192] + dtype = "float32" + min_val = float("0.900377") + max_val = float("1.08413") + mean = float("0.991669") + std = float("0.0256929") + data = None + + +class Program_weight_tensor_parameter_262: + name = "parameter_262" + shape = [192] + dtype = "float32" + min_val = float("0.0159625") + max_val = float("1.75258") + mean = float("0.264178") + std = float("0.327029") + data = None + + +class Program_weight_tensor_parameter_263: + name = "parameter_263" + shape = [192] + dtype = "float32" + min_val = float("-0.0846459") + max_val = float("0.0435916") + mean = float("-0.01252") + std = float("0.0267311") + data = None + + +class Program_weight_tensor_parameter_264: + name = "parameter_264" + shape = [192, 192, 3, 3] + dtype = "float32" + min_val = float("-0.0581955") + max_val = float("0.0815388") + mean = float("-0.000159628") + std = float("0.00251421") + data = None + + +class Program_weight_tensor_parameter_265: + name = "parameter_265" + shape = [192] + dtype = "float32" + min_val = float("-0.283023") + max_val = float("0.00609804") + mean = float("-0.108732") + std = float("0.0543265") + data = None + + +class Program_weight_tensor_parameter_266: + name = "parameter_266" + shape = [192] + dtype = "float32" + min_val = float("0.93521") + max_val = float("1.19749") + mean = float("1.03676") + std = float("0.0449099") + data = None + + +class Program_weight_tensor_parameter_267: + name = "parameter_267" + shape = [192] + dtype = "float32" + min_val = float("0.0382762") + max_val = float("5.57442") + mean = float("0.725692") + std = float("0.823782") + data = None + + +class Program_weight_tensor_parameter_268: + name = "parameter_268" + shape = [192] + dtype = "float32" + min_val = float("-0.237511") + max_val = float("0.165957") + mean = float("-0.0152484") + std = float("0.0789669") + data = None + + +class Program_weight_tensor_parameter_269: + name = "parameter_269" + shape = [192, 192, 3, 3] + dtype = "float32" + min_val = float("-0.0710582") + max_val = float("0.0757265") + mean = float("-0.000116839") + std = float("0.00304633") + data = None + + +class Program_weight_tensor_parameter_270: + name = "parameter_270" + shape = [192] + dtype = "float32" + min_val = float("-0.253576") + max_val = float("-0.0258551") + mean = float("-0.111423") + std = float("0.0514888") + data = None + + +class Program_weight_tensor_parameter_271: + name = "parameter_271" + shape = [192] + dtype = "float32" + min_val = float("0.915196") + max_val = float("1.08387") + mean = float("0.975653") + std = float("0.0197955") + data = None + + +class Program_weight_tensor_parameter_272: + name = "parameter_272" + shape = [192] + dtype = "float32" + min_val = float("0.000784494") + max_val = float("0.170735") + mean = float("0.0158213") + std = float("0.0237635") + data = None + + +class Program_weight_tensor_parameter_273: + name = "parameter_273" + shape = [192] + dtype = "float32" + min_val = float("-0.0457468") + max_val = float("0.0201233") + mean = float("-0.00730915") + std = float("0.0101963") + data = None + + +class Program_weight_tensor_parameter_274: + name = "parameter_274" + shape = [192, 192, 1, 1] + dtype = "float32" + min_val = float("-0.0337886") + max_val = float("0.0331928") + mean = float("-0.000817084") + std = float("0.00496809") + data = None + + +class Program_weight_tensor_parameter_275: + name = "parameter_275" + shape = [192] + dtype = "float32" + min_val = float("-0.253576") + max_val = float("-0.0258551") + mean = float("-0.111423") + std = float("0.0514888") + data = None + + +class Program_weight_tensor_parameter_276: + name = "parameter_276" + shape = [192] + dtype = "float32" + min_val = float("0.940652") + max_val = float("1.13061") + mean = float("1.00459") + std = float("0.0346428") + data = None + + +class Program_weight_tensor_parameter_277: + name = "parameter_277" + shape = [192] + dtype = "float32" + min_val = float("0.00912887") + max_val = float("1.74385") + mean = float("0.136466") + std = float("0.22376") + data = None + + +class Program_weight_tensor_parameter_278: + name = "parameter_278" + shape = [192] + dtype = "float32" + min_val = float("-0.104047") + max_val = float("0.047509") + mean = float("-0.0196895") + std = float("0.0277402") + data = None + + +class Program_weight_tensor_parameter_279: + name = "parameter_279" + shape = [192, 192, 3, 3] + dtype = "float32" + min_val = float("-0.0458249") + max_val = float("0.0663524") + mean = float("-0.000235445") + std = float("0.00258708") + data = None + + +class Program_weight_tensor_parameter_280: + name = "parameter_280" + shape = [192] + dtype = "float32" + min_val = float("-0.397813") + max_val = float("-0.0235897") + mean = float("-0.135031") + std = float("0.0579049") + data = None + + +class Program_weight_tensor_parameter_281: + name = "parameter_281" + shape = [192] + dtype = "float32" + min_val = float("0.935628") + max_val = float("1.28762") + mean = float("1.02726") + std = float("0.0582587") + data = None + + +class Program_weight_tensor_parameter_282: + name = "parameter_282" + shape = [192] + dtype = "float32" + min_val = float("0.0350071") + max_val = float("3.67144") + mean = float("0.444898") + std = float("0.548341") + data = None + + +class Program_weight_tensor_parameter_283: + name = "parameter_283" + shape = [192] + dtype = "float32" + min_val = float("-0.456827") + max_val = float("0.250861") + mean = float("-0.0445605") + std = float("0.110845") + data = None + + +class Program_weight_tensor_parameter_284: + name = "parameter_284" + shape = [192, 192, 3, 3] + dtype = "float32" + min_val = float("-0.032414") + max_val = float("0.0474969") + mean = float("-0.000226297") + std = float("0.00328822") + data = None + + +class Program_weight_tensor_parameter_285: + name = "parameter_285" + shape = [192] + dtype = "float32" + min_val = float("-0.290864") + max_val = float("-0.0230562") + mean = float("-0.11391") + std = float("0.04676") + data = None + + +class Program_weight_tensor_parameter_286: + name = "parameter_286" + shape = [192] + dtype = "float32" + min_val = float("0.906862") + max_val = float("1.13958") + mean = float("0.997132") + std = float("0.0365506") + data = None + + +class Program_weight_tensor_parameter_287: + name = "parameter_287" + shape = [192] + dtype = "float32" + min_val = float("0.000630271") + max_val = float("0.126275") + mean = float("0.0156972") + std = float("0.0212032") + data = None + + +class Program_weight_tensor_parameter_288: + name = "parameter_288" + shape = [192] + dtype = "float32" + min_val = float("-0.032696") + max_val = float("0.0205933") + mean = float("-0.00387507") + std = float("0.00785712") + data = None + + +class Program_weight_tensor_parameter_289: + name = "parameter_289" + shape = [192, 192, 1, 1] + dtype = "float32" + min_val = float("-0.0348327") + max_val = float("0.0582176") + mean = float("-0.000388586") + std = float("0.0055446") + data = None + + +class Program_weight_tensor_parameter_290: + name = "parameter_290" + shape = [192] + dtype = "float32" + min_val = float("-0.290864") + max_val = float("-0.0230562") + mean = float("-0.11391") + std = float("0.04676") + data = None + + +class Program_weight_tensor_parameter_291: + name = "parameter_291" + shape = [192] + dtype = "float32" + min_val = float("0.90984") + max_val = float("1.14776") + mean = float("0.987138") + std = float("0.0373269") + data = None + + +class Program_weight_tensor_parameter_292: + name = "parameter_292" + shape = [192] + dtype = "float32" + min_val = float("0.0068577") + max_val = float("3.15529") + mean = float("0.115454") + std = float("0.251449") + data = None + + +class Program_weight_tensor_parameter_293: + name = "parameter_293" + shape = [192] + dtype = "float32" + min_val = float("-0.157661") + max_val = float("0.0523813") + mean = float("-0.0154723") + std = float("0.0237568") + data = None + + +class Program_weight_tensor_parameter_294: + name = "parameter_294" + shape = [192, 192, 3, 3] + dtype = "float32" + min_val = float("-0.0229903") + max_val = float("0.0319766") + mean = float("-0.000196356") + std = float("0.00258864") + data = None + + +class Program_weight_tensor_parameter_295: + name = "parameter_295" + shape = [192] + dtype = "float32" + min_val = float("-0.370486") + max_val = float("-0.0106218") + mean = float("-0.161455") + std = float("0.0602386") + data = None + + +class Program_weight_tensor_parameter_296: + name = "parameter_296" + shape = [192] + dtype = "float32" + min_val = float("0.9051") + max_val = float("1.22165") + mean = float("1.03058") + std = float("0.049462") + data = None + + +class Program_weight_tensor_parameter_297: + name = "parameter_297" + shape = [192] + dtype = "float32" + min_val = float("0.0103088") + max_val = float("3.73392") + mean = float("0.208286") + std = float("0.402475") + data = None + + +class Program_weight_tensor_parameter_298: + name = "parameter_298" + shape = [192] + dtype = "float32" + min_val = float("-0.0463211") + max_val = float("0.0315376") + mean = float("-0.00913568") + std = float("0.0128318") + data = None + + +class Program_weight_tensor_parameter_299: + name = "parameter_299" + shape = [192, 192, 3, 3] + dtype = "float32" + min_val = float("-0.0895417") + max_val = float("0.0442348") + mean = float("-0.000211153") + std = float("0.0035822") + data = None + + +class Program_weight_tensor_parameter_300: + name = "parameter_300" + shape = [192] + dtype = "float32" + min_val = float("-0.397496") + max_val = float("0.0864231") + mean = float("-0.165482") + std = float("0.0737238") + data = None + + +class Program_weight_tensor_parameter_301: + name = "parameter_301" + shape = [192] + dtype = "float32" + min_val = float("0.879897") + max_val = float("1.17541") + mean = float("1.01759") + std = float("0.0566345") + data = None + + +class Program_weight_tensor_parameter_302: + name = "parameter_302" + shape = [192] + dtype = "float32" + min_val = float("0.00621631") + max_val = float("1.89483") + mean = float("0.0866904") + std = float("0.204768") + data = None + + +class Program_weight_tensor_parameter_303: + name = "parameter_303" + shape = [192] + dtype = "float32" + min_val = float("-0.10704") + max_val = float("0.250321") + mean = float("0.0243905") + std = float("0.0576948") + data = None + + +class Program_weight_tensor_parameter_304: + name = "parameter_304" + shape = [192, 896, 1, 1] + dtype = "float32" + min_val = float("-0.102693") + max_val = float("0.140514") + mean = float("-0.000271279") + std = float("0.00498004") + data = None + + +class Program_weight_tensor_parameter_305: + name = "parameter_305" + shape = [192] + dtype = "float32" + min_val = float("-0.150926") + max_val = float("0.502386") + mean = float("-0.00458928") + std = float("0.0740063") + data = None + + +class Program_weight_tensor_parameter_306: + name = "parameter_306" + shape = [192] + dtype = "float32" + min_val = float("0.932811") + max_val = float("1.23138") + mean = float("1.04668") + std = float("0.0628145") + data = None + + +class Program_weight_tensor_parameter_307: + name = "parameter_307" + shape = [192] + dtype = "float32" + min_val = float("0.00500346") + max_val = float("0.658867") + mean = float("0.071691") + std = float("0.0871382") + data = None + + +class Program_weight_tensor_parameter_308: + name = "parameter_308" + shape = [192] + dtype = "float32" + min_val = float("-0.179438") + max_val = float("0.127582") + mean = float("0.00911666") + std = float("0.0540771") + data = None + + +class Program_weight_tensor_parameter_309: + name = "parameter_309" + shape = [192, 896, 1, 1] + dtype = "float32" + min_val = float("-0.135587") + max_val = float("0.0877653") + mean = float("-0.000157689") + std = float("0.00488948") + data = None + + +class Program_weight_tensor_parameter_310: + name = "parameter_310" + shape = [384] + dtype = "float32" + min_val = float("-0.312093") + max_val = float("-0.0453597") + mean = float("-0.171326") + std = float("0.0441224") + data = None + + +class Program_weight_tensor_parameter_311: + name = "parameter_311" + shape = [384] + dtype = "float32" + min_val = float("0.787891") + max_val = float("1.17398") + mean = float("0.88519") + std = float("0.0343002") + data = None + + +class Program_weight_tensor_parameter_312: + name = "parameter_312" + shape = [384] + dtype = "float32" + min_val = float("0.00518361") + max_val = float("0.717927") + mean = float("0.0818779") + std = float("0.074952") + data = None + + +class Program_weight_tensor_parameter_313: + name = "parameter_313" + shape = [384] + dtype = "float32" + min_val = float("-0.112216") + max_val = float("0.0645384") + mean = float("-0.00977235") + std = float("0.0242007") + data = None + + +class Program_weight_tensor_parameter_314: + name = "parameter_314" + shape = [384, 768, 1, 1] + dtype = "float32" + min_val = float("-0.0335371") + max_val = float("0.0385938") + mean = float("-0.000215569") + std = float("0.00376159") + data = None + + +class Program_weight_tensor_parameter_315: + name = "parameter_315" + shape = [768] + dtype = "float32" + min_val = float("-0.16492") + max_val = float("0.116104") + mean = float("-0.0876142") + std = float("0.0238545") + data = None + + +class Program_weight_tensor_parameter_316: + name = "parameter_316" + shape = [768] + dtype = "float32" + min_val = float("0.935583") + max_val = float("1.28041") + mean = float("1.02965") + std = float("0.0299369") + data = None + + +class Program_weight_tensor_parameter_317: + name = "parameter_317" + shape = [768] + dtype = "float32" + min_val = float("0.00577983") + max_val = float("0.559026") + mean = float("0.0765748") + std = float("0.0652625") + data = None + + +class Program_weight_tensor_parameter_318: + name = "parameter_318" + shape = [768] + dtype = "float32" + min_val = float("-0.154632") + max_val = float("0.121681") + mean = float("-0.00883901") + std = float("0.0433664") + data = None + + +class Program_weight_tensor_parameter_319: + name = "parameter_319" + shape = [768, 768, 1, 1] + dtype = "float32" + min_val = float("-0.0409931") + max_val = float("0.0668") + mean = float("-0.000149374") + std = float("0.00323186") + data = None + + +class Program_weight_tensor_parameter_320: + name = "parameter_320" + shape = [384] + dtype = "float32" + min_val = float("-0.175711") + max_val = float("0.114937") + mean = float("-0.0598235") + std = float("0.031613") + data = None + + +class Program_weight_tensor_parameter_321: + name = "parameter_321" + shape = [384] + dtype = "float32" + min_val = float("0.876243") + max_val = float("1.05485") + mean = float("0.975582") + std = float("0.0178294") + data = None + + +class Program_weight_tensor_parameter_322: + name = "parameter_322" + shape = [384] + dtype = "float32" + min_val = float("0.000782373") + max_val = float("0.603868") + mean = float("0.0317629") + std = float("0.0542092") + data = None + + +class Program_weight_tensor_parameter_323: + name = "parameter_323" + shape = [384] + dtype = "float32" + min_val = float("-0.0928944") + max_val = float("0.0888735") + mean = float("0.00210265") + std = float("0.0327669") + data = None + + +class Program_weight_tensor_parameter_324: + name = "parameter_324" + shape = [384, 384, 1, 1] + dtype = "float32" + min_val = float("-0.0523449") + max_val = float("0.052802") + mean = float("4.73102e-05") + std = float("0.00322689") + data = None + + +class Program_weight_tensor_parameter_325: + name = "parameter_325" + shape = [384] + dtype = "float32" + min_val = float("-0.175711") + max_val = float("0.114937") + mean = float("-0.0598235") + std = float("0.031613") + data = None + + +class Program_weight_tensor_parameter_326: + name = "parameter_326" + shape = [384] + dtype = "float32" + min_val = float("0.939826") + max_val = float("1.08521") + mean = float("0.993811") + std = float("0.0188195") + data = None + + +class Program_weight_tensor_parameter_327: + name = "parameter_327" + shape = [384] + dtype = "float32" + min_val = float("0.0100212") + max_val = float("3.79981") + mean = float("0.240529") + std = float("0.361857") + data = None + + +class Program_weight_tensor_parameter_328: + name = "parameter_328" + shape = [384] + dtype = "float32" + min_val = float("-0.247484") + max_val = float("0.338793") + mean = float("-0.0375155") + std = float("0.0861982") + data = None + + +class Program_weight_tensor_parameter_329: + name = "parameter_329" + shape = [384, 384, 3, 3] + dtype = "float32" + min_val = float("-0.0258772") + max_val = float("0.0360759") + mean = float("-7.49101e-05") + std = float("0.001213") + data = None + + +class Program_weight_tensor_parameter_330: + name = "parameter_330" + shape = [384] + dtype = "float32" + min_val = float("-0.151775") + max_val = float("0.102834") + mean = float("-0.0361192") + std = float("0.0243711") + data = None + + +class Program_weight_tensor_parameter_331: + name = "parameter_331" + shape = [384] + dtype = "float32" + min_val = float("0.94838") + max_val = float("1.23657") + mean = float("1.02234") + std = float("0.0374976") + data = None + + +class Program_weight_tensor_parameter_332: + name = "parameter_332" + shape = [384] + dtype = "float32" + min_val = float("0.0447625") + max_val = float("8.11219") + mean = float("0.62398") + std = float("1.02319") + data = None + + +class Program_weight_tensor_parameter_333: + name = "parameter_333" + shape = [384] + dtype = "float32" + min_val = float("-0.712325") + max_val = float("0.573616") + mean = float("0.00533148") + std = float("0.162416") + data = None + + +class Program_weight_tensor_parameter_334: + name = "parameter_334" + shape = [384, 384, 3, 3] + dtype = "float32" + min_val = float("-0.0241385") + max_val = float("0.0318638") + mean = float("1.18507e-05") + std = float("0.00161292") + data = None + + +class Program_weight_tensor_parameter_335: + name = "parameter_335" + shape = [384] + dtype = "float32" + min_val = float("-0.127682") + max_val = float("0.0290178") + mean = float("-0.0397355") + std = float("0.020477") + data = None + + +class Program_weight_tensor_parameter_336: + name = "parameter_336" + shape = [384] + dtype = "float32" + min_val = float("0.935742") + max_val = float("1.22687") + mean = float("1.02229") + std = float("0.040962") + data = None + + +class Program_weight_tensor_parameter_337: + name = "parameter_337" + shape = [384] + dtype = "float32" + min_val = float("0.255072") + max_val = float("120.906") + mean = float("9.80837") + std = float("15.5408") + data = None + + +class Program_weight_tensor_parameter_338: + name = "parameter_338" + shape = [384] + dtype = "float32" + min_val = float("-4.13105") + max_val = float("6.0128") + mean = float("0.0669194") + std = float("1.73557") + data = None + + +class Program_weight_tensor_parameter_339: + name = "parameter_339" + shape = [384, 1536, 1, 1] + dtype = "float32" + min_val = float("-0.0444577") + max_val = float("0.0503126") + mean = float("0.0001148") + std = float("0.00260266") + data = None + + +class Program_weight_tensor_parameter_340: + name = "parameter_340" + shape = [384] + dtype = "float32" + min_val = float("-0.0351402") + max_val = float("0.0448126") + mean = float("-0.00203657") + std = float("0.0112334") + data = None + + +class Program_weight_tensor_parameter_341: + name = "parameter_341" + shape = [384] + dtype = "float32" + min_val = float("0.955294") + max_val = float("1.07985") + mean = float("0.990475") + std = float("0.01608") + data = None + + +class Program_weight_tensor_parameter_342: + name = "parameter_342" + shape = [384] + dtype = "float32" + min_val = float("0.00198763") + max_val = float("0.0635025") + mean = float("0.0138523") + std = float("0.00926475") + data = None + + +class Program_weight_tensor_parameter_343: + name = "parameter_343" + shape = [384] + dtype = "float32" + min_val = float("-0.0391059") + max_val = float("0.0393871") + mean = float("-0.0105951") + std = float("0.0131426") + data = None + + +class Program_weight_tensor_parameter_344: + name = "parameter_344" + shape = [384, 384, 1, 1] + dtype = "float32" + min_val = float("-0.0224049") + max_val = float("0.0332456") + mean = float("-0.000297944") + std = float("0.00282752") + data = None + + +class Program_weight_tensor_parameter_345: + name = "parameter_345" + shape = [384] + dtype = "float32" + min_val = float("-0.0351403") + max_val = float("0.0448126") + mean = float("-0.00203657") + std = float("0.0112334") + data = None + + +class Program_weight_tensor_parameter_346: + name = "parameter_346" + shape = [384] + dtype = "float32" + min_val = float("0.958435") + max_val = float("1.12455") + mean = float("1.0048") + std = float("0.0255586") + data = None + + +class Program_weight_tensor_parameter_347: + name = "parameter_347" + shape = [384] + dtype = "float32" + min_val = float("0.0140512") + max_val = float("0.800076") + mean = float("0.108902") + std = float("0.0948212") + data = None + + +class Program_weight_tensor_parameter_348: + name = "parameter_348" + shape = [384] + dtype = "float32" + min_val = float("-0.115193") + max_val = float("0.079648") + mean = float("-0.0321172") + std = float("0.0341768") + data = None + + +class Program_weight_tensor_parameter_349: + name = "parameter_349" + shape = [384, 384, 3, 3] + dtype = "float32" + min_val = float("-0.0255902") + max_val = float("0.0443317") + mean = float("-0.00011048") + std = float("0.00120834") + data = None + + +class Program_weight_tensor_parameter_350: + name = "parameter_350" + shape = [384] + dtype = "float32" + min_val = float("-0.0758443") + max_val = float("0.0143564") + mean = float("-0.018454") + std = float("0.013002") + data = None + + +class Program_weight_tensor_parameter_351: + name = "parameter_351" + shape = [384] + dtype = "float32" + min_val = float("0.948675") + max_val = float("1.19194") + mean = float("1.02021") + std = float("0.0307393") + data = None + + +class Program_weight_tensor_parameter_352: + name = "parameter_352" + shape = [384] + dtype = "float32" + min_val = float("0.0278921") + max_val = float("3.5996") + mean = float("0.368141") + std = float("0.390009") + data = None + + +class Program_weight_tensor_parameter_353: + name = "parameter_353" + shape = [384] + dtype = "float32" + min_val = float("-0.273124") + max_val = float("0.208821") + mean = float("-0.0380423") + std = float("0.077889") + data = None + + +class Program_weight_tensor_parameter_354: + name = "parameter_354" + shape = [384, 384, 3, 3] + dtype = "float32" + min_val = float("-0.0198387") + max_val = float("0.0263313") + mean = float("-7.66766e-05") + std = float("0.00143257") + data = None + + +class Program_weight_tensor_parameter_355: + name = "parameter_355" + shape = [384] + dtype = "float32" + min_val = float("-0.0645948") + max_val = float("0.0263716") + mean = float("-0.0175864") + std = float("0.0121472") + data = None + + +class Program_weight_tensor_parameter_356: + name = "parameter_356" + shape = [384] + dtype = "float32" + min_val = float("0.977031") + max_val = float("1.05231") + mean = float("0.998866") + std = float("0.0104315") + data = None + + +class Program_weight_tensor_parameter_357: + name = "parameter_357" + shape = [384] + dtype = "float32" + min_val = float("0.000423634") + max_val = float("0.110283") + mean = float("0.0116174") + std = float("0.0134231") + data = None + + +class Program_weight_tensor_parameter_358: + name = "parameter_358" + shape = [384] + dtype = "float32" + min_val = float("-0.0211042") + max_val = float("0.0345802") + mean = float("-0.0023165") + std = float("0.00961282") + data = None + + +class Program_weight_tensor_parameter_359: + name = "parameter_359" + shape = [384, 384, 1, 1] + dtype = "float32" + min_val = float("-0.0201647") + max_val = float("0.0351393") + mean = float("-8.36299e-05") + std = float("0.00242893") + data = None + + +class Program_weight_tensor_parameter_360: + name = "parameter_360" + shape = [384] + dtype = "float32" + min_val = float("-0.0645947") + max_val = float("0.0263716") + mean = float("-0.0175864") + std = float("0.0121472") + data = None + + +class Program_weight_tensor_parameter_361: + name = "parameter_361" + shape = [384] + dtype = "float32" + min_val = float("0.976794") + max_val = float("1.10103") + mean = float("1.00726") + std = float("0.0202551") + data = None + + +class Program_weight_tensor_parameter_362: + name = "parameter_362" + shape = [384] + dtype = "float32" + min_val = float("0.00768908") + max_val = float("1.55127") + mean = float("0.109916") + std = float("0.159092") + data = None + + +class Program_weight_tensor_parameter_363: + name = "parameter_363" + shape = [384] + dtype = "float32" + min_val = float("-0.103719") + max_val = float("0.0862114") + mean = float("-0.0208973") + std = float("0.0271219") + data = None + + +class Program_weight_tensor_parameter_364: + name = "parameter_364" + shape = [384, 384, 3, 3] + dtype = "float32" + min_val = float("-0.0127177") + max_val = float("0.0247748") + mean = float("-9.78059e-05") + std = float("0.00103805") + data = None + + +class Program_weight_tensor_parameter_365: + name = "parameter_365" + shape = [384] + dtype = "float32" + min_val = float("-0.084671") + max_val = float("-0.001335") + mean = float("-0.0378383") + std = float("0.0147188") + data = None + + +class Program_weight_tensor_parameter_366: + name = "parameter_366" + shape = [384] + dtype = "float32" + min_val = float("0.961734") + max_val = float("1.11654") + mean = float("1.01877") + std = float("0.0258236") + data = None + + +class Program_weight_tensor_parameter_367: + name = "parameter_367" + shape = [384] + dtype = "float32" + min_val = float("0.00767187") + max_val = float("0.454337") + mean = float("0.0877939") + std = float("0.0760314") + data = None + + +class Program_weight_tensor_parameter_368: + name = "parameter_368" + shape = [384] + dtype = "float32" + min_val = float("-0.0506767") + max_val = float("0.0420926") + mean = float("-0.0059652") + std = float("0.018851") + data = None + + +class Program_weight_tensor_parameter_369: + name = "parameter_369" + shape = [384, 384, 3, 3] + dtype = "float32" + min_val = float("-0.0131907") + max_val = float("0.0210535") + mean = float("-3.41569e-05") + std = float("0.00141085") + data = None + + +class Program_weight_tensor_parameter_370: + name = "parameter_370" + shape = [384] + dtype = "float32" + min_val = float("-0.10718") + max_val = float("0.0233714") + mean = float("-0.0562477") + std = float("0.019742") + data = None + + +class Program_weight_tensor_parameter_371: + name = "parameter_371" + shape = [384] + dtype = "float32" + min_val = float("0.981369") + max_val = float("1.07286") + mean = float("1.02166") + std = float("0.0138993") + data = None + + +class Program_weight_tensor_parameter_372: + name = "parameter_372" + shape = [384] + dtype = "float32" + min_val = float("0.00628651") + max_val = float("2.59218") + mean = float("0.256612") + std = float("0.377235") + data = None + + +class Program_weight_tensor_parameter_373: + name = "parameter_373" + shape = [384] + dtype = "float32" + min_val = float("-0.10037") + max_val = float("0.159741") + mean = float("0.0326345") + std = float("0.0438385") + data = None + + +class Program_weight_tensor_parameter_374: + name = "parameter_374" + shape = [384, 1024, 1, 1] + dtype = "float32" + min_val = float("-0.017557") + max_val = float("0.0459387") + mean = float("-0.000156264") + std = float("0.00273104") + data = None + + +class Program_weight_tensor_parameter_375: + name = "parameter_375" + shape = [384] + dtype = "float32" + min_val = float("-0.0589739") + max_val = float("0.0343006") + mean = float("-0.00850519") + std = float("0.0113599") + data = None + + +class Program_weight_tensor_parameter_376: + name = "parameter_376" + shape = [384] + dtype = "float32" + min_val = float("1.00897") + max_val = float("1.21432") + mean = float("1.05133") + std = float("0.0209679") + data = None + + +class Program_weight_tensor_parameter_377: + name = "parameter_377" + shape = [384] + dtype = "float32" + min_val = float("0.0034229") + max_val = float("0.869117") + mean = float("0.100891") + std = float("0.0914111") + data = None + + +class Program_weight_tensor_parameter_378: + name = "parameter_378" + shape = [384] + dtype = "float32" + min_val = float("-0.0875187") + max_val = float("0.134152") + mean = float("0.0286449") + std = float("0.0311893") + data = None + + +class Program_weight_tensor_parameter_379: + name = "parameter_379" + shape = [384, 1024, 1, 1] + dtype = "float32" + min_val = float("-0.0404324") + max_val = float("0.0486178") + mean = float("-0.000170107") + std = float("0.00283689") + data = None + + +class Program_weight_tensor_parameter_380: + name = "parameter_380" + shape = [1024] + dtype = "float32" + min_val = float("-3.76561") + max_val = float("-0.731173") + mean = float("-2.19452") + std = float("0.428428") + data = None + + +class Program_weight_tensor_parameter_381: + name = "parameter_381" + shape = [1024] + dtype = "float32" + min_val = float("1.62445") + max_val = float("4.43497") + mean = float("3.07461") + std = float("0.255239") + data = None + + +class Program_weight_tensor_parameter_382: + name = "parameter_382" + shape = [1024] + dtype = "float32" + min_val = float("0.00300033") + max_val = float("1.03235") + mean = float("0.0732599") + std = float("0.0918229") + data = None + + +class Program_weight_tensor_parameter_383: + name = "parameter_383" + shape = [1024] + dtype = "float32" + min_val = float("-0.102441") + max_val = float("0.0886517") + mean = float("0.0176384") + std = float("0.0286918") + data = None + + +class Program_weight_tensor_parameter_384: + name = "parameter_384" + shape = [1024, 768, 1, 1] + dtype = "float32" + min_val = float("-0.0683686") + max_val = float("0.079499") + mean = float("-0.000295847") + std = float("0.00320057") + data = None + + +class Program_weight_tensor_parameter_385: + name = "parameter_385" + shape = [768] + dtype = "float32" + min_val = float("-0.0121386") + max_val = float("0.00527683") + mean = float("-0.00056885") + std = float("0.00174363") + data = None + + +class Program_weight_tensor_parameter_386: + name = "parameter_386" + shape = [768, 768, 1, 1] + dtype = "float32" + min_val = float("-0.104283") + max_val = float("0.108395") + mean = float("-0.000212643") + std = float("0.00130148") + data = None + + +class Program_weight_tensor_parameter_387: + name = "parameter_387" + shape = [384] + dtype = "float32" + min_val = float("-1.77842") + max_val = float("0.498407") + mean = float("-0.300061") + std = float("0.296898") + data = None + + +class Program_weight_tensor_parameter_388: + name = "parameter_388" + shape = [384] + dtype = "float32" + min_val = float("0.189938") + max_val = float("1.98187") + mean = float("0.620289") + std = float("0.278642") + data = None + + +class Program_weight_tensor_parameter_389: + name = "parameter_389" + shape = [384] + dtype = "float32" + min_val = float("9.91892e-05") + max_val = float("0.0270208") + mean = float("0.00199739") + std = float("0.00264807") + data = None + + +class Program_weight_tensor_parameter_390: + name = "parameter_390" + shape = [384] + dtype = "float32" + min_val = float("-0.0508396") + max_val = float("0.0837222") + mean = float("0.0276176") + std = float("0.0223824") + data = None + + +class Program_weight_tensor_parameter_391: + name = "parameter_391" + shape = [384, 384, 1, 1] + dtype = "float32" + min_val = float("-0.019634") + max_val = float("0.026249") + mean = float("-0.000305565") + std = float("0.00221217") + data = None + + +class Program_weight_tensor_parameter_392: + name = "parameter_392" + shape = [384] + dtype = "float32" + min_val = float("-1.77842") + max_val = float("0.498407") + mean = float("-0.300061") + std = float("0.296898") + data = None + + +class Program_weight_tensor_parameter_393: + name = "parameter_393" + shape = [384] + dtype = "float32" + min_val = float("0.365979") + max_val = float("2.77813") + mean = float("1.04754") + std = float("0.30869") + data = None + + +class Program_weight_tensor_parameter_394: + name = "parameter_394" + shape = [384] + dtype = "float32" + min_val = float("0.000885633") + max_val = float("0.640413") + mean = float("0.0266485") + std = float("0.0528161") + data = None + + +class Program_weight_tensor_parameter_395: + name = "parameter_395" + shape = [384] + dtype = "float32" + min_val = float("-0.2607") + max_val = float("0.252739") + mean = float("0.0237469") + std = float("0.0710539") + data = None + + +class Program_weight_tensor_parameter_396: + name = "parameter_396" + shape = [384, 384, 3, 3] + dtype = "float32" + min_val = float("-0.0161625") + max_val = float("0.0237353") + mean = float("-2.77227e-05") + std = float("0.00143234") + data = None + + +class Program_weight_tensor_parameter_397: + name = "parameter_397" + shape = [384] + dtype = "float32" + min_val = float("-2.61241") + max_val = float("0.0552853") + mean = float("-1.58347") + std = float("0.416394") + data = None + + +class Program_weight_tensor_parameter_398: + name = "parameter_398" + shape = [384] + dtype = "float32" + min_val = float("0.56794") + max_val = float("1.67647") + mean = float("1.12425") + std = float("0.146808") + data = None + + +class Program_weight_tensor_parameter_399: + name = "parameter_399" + shape = [384] + dtype = "float32" + min_val = float("0.0425016") + max_val = float("8.60413") + mean = float("0.715833") + std = float("0.95826") + data = None + + +class Program_weight_tensor_parameter_400: + name = "parameter_400" + shape = [384] + dtype = "float32" + min_val = float("-0.579806") + max_val = float("0.450828") + mean = float("0.0787798") + std = float("0.153293") + data = None + + +class Program_weight_tensor_parameter_401: + name = "parameter_401" + shape = [384, 384, 3, 3] + dtype = "float32" + min_val = float("-0.0172878") + max_val = float("0.0422016") + mean = float("-0.000115855") + std = float("0.00184295") + data = None + + +class Program_weight_tensor_parameter_402: + name = "parameter_402" + shape = [384] + dtype = "float32" + min_val = float("-1.93787") + max_val = float("0.733813") + mean = float("-0.570636") + std = float("0.36594") + data = None + + +class Program_weight_tensor_parameter_403: + name = "parameter_403" + shape = [384] + dtype = "float32" + min_val = float("0.140643") + max_val = float("2.06386") + mean = float("0.563265") + std = float("0.226726") + data = None + + +class Program_weight_tensor_parameter_404: + name = "parameter_404" + shape = [384] + dtype = "float32" + min_val = float("7.24306e-05") + max_val = float("0.0212711") + mean = float("0.00249773") + std = float("0.00263818") + data = None + + +class Program_weight_tensor_parameter_405: + name = "parameter_405" + shape = [384] + dtype = "float32" + min_val = float("-0.0480177") + max_val = float("0.0906403") + mean = float("0.0290943") + std = float("0.0218768") + data = None + + +class Program_weight_tensor_parameter_406: + name = "parameter_406" + shape = [384, 384, 1, 1] + dtype = "float32" + min_val = float("-0.0187865") + max_val = float("0.0197971") + mean = float("-0.000317036") + std = float("0.00213102") + data = None + + +class Program_weight_tensor_parameter_407: + name = "parameter_407" + shape = [384] + dtype = "float32" + min_val = float("-1.93797") + max_val = float("0.733813") + mean = float("-0.570636") + std = float("0.365941") + data = None + + +class Program_weight_tensor_parameter_408: + name = "parameter_408" + shape = [384] + dtype = "float32" + min_val = float("0.579568") + max_val = float("2.10408") + mean = float("1.0926") + std = float("0.254088") + data = None + + +class Program_weight_tensor_parameter_409: + name = "parameter_409" + shape = [384] + dtype = "float32" + min_val = float("0.00121035") + max_val = float("0.722758") + mean = float("0.0326783") + std = float("0.0604562") + data = None + + +class Program_weight_tensor_parameter_410: + name = "parameter_410" + shape = [384] + dtype = "float32" + min_val = float("-0.233337") + max_val = float("0.235021") + mean = float("0.0433769") + std = float("0.0724029") + data = None + + +class Program_weight_tensor_parameter_411: + name = "parameter_411" + shape = [384, 384, 3, 3] + dtype = "float32" + min_val = float("-0.017636") + max_val = float("0.0242342") + mean = float("-5.98118e-05") + std = float("0.00148823") + data = None + + +class Program_weight_tensor_parameter_412: + name = "parameter_412" + shape = [384] + dtype = "float32" + min_val = float("-2.42769") + max_val = float("0.839301") + mean = float("-1.42177") + std = float("0.360858") + data = None + + +class Program_weight_tensor_parameter_413: + name = "parameter_413" + shape = [384] + dtype = "float32" + min_val = float("0.438544") + max_val = float("1.84376") + mean = float("1.15689") + std = float("0.142353") + data = None + + +class Program_weight_tensor_parameter_414: + name = "parameter_414" + shape = [384] + dtype = "float32" + min_val = float("0.0203352") + max_val = float("3.181") + mean = float("0.368327") + std = float("0.465673") + data = None + + +class Program_weight_tensor_parameter_415: + name = "parameter_415" + shape = [384] + dtype = "float32" + min_val = float("-0.205922") + max_val = float("0.327803") + mean = float("0.0418561") + std = float("0.0756754") + data = None + + +class Program_weight_tensor_parameter_416: + name = "parameter_416" + shape = [384, 384, 3, 3] + dtype = "float32" + min_val = float("-0.0216487") + max_val = float("0.0329662") + mean = float("-0.000111566") + std = float("0.00182337") + data = None + + +class Program_weight_tensor_parameter_417: + name = "parameter_417" + shape = [384] + dtype = "float32" + min_val = float("-1.88519") + max_val = float("0.489282") + mean = float("-0.478509") + std = float("0.384475") + data = None + + +class Program_weight_tensor_parameter_418: + name = "parameter_418" + shape = [384] + dtype = "float32" + min_val = float("0.0964864") + max_val = float("2.12186") + mean = float("0.441698") + std = float("0.215794") + data = None + + +class Program_weight_tensor_parameter_419: + name = "parameter_419" + shape = [384] + dtype = "float32" + min_val = float("0.000190261") + max_val = float("0.0374628") + mean = float("0.00504513") + std = float("0.00528967") + data = None + + +class Program_weight_tensor_parameter_420: + name = "parameter_420" + shape = [384] + dtype = "float32" + min_val = float("-0.0833438") + max_val = float("0.106805") + mean = float("0.0319307") + std = float("0.0228581") + data = None + + +class Program_weight_tensor_parameter_421: + name = "parameter_421" + shape = [384, 384, 1, 1] + dtype = "float32" + min_val = float("-0.0196403") + max_val = float("0.0181013") + mean = float("-0.000367563") + std = float("0.00183188") + data = None + + +class Program_weight_tensor_parameter_422: + name = "parameter_422" + shape = [384] + dtype = "float32" + min_val = float("-1.88519") + max_val = float("0.489282") + mean = float("-0.478509") + std = float("0.384475") + data = None + + +class Program_weight_tensor_parameter_423: + name = "parameter_423" + shape = [384] + dtype = "float32" + min_val = float("0.572754") + max_val = float("2.21625") + mean = float("1.0609") + std = float("0.254729") + data = None + + +class Program_weight_tensor_parameter_424: + name = "parameter_424" + shape = [384] + dtype = "float32" + min_val = float("0.00314815") + max_val = float("0.609992") + mean = float("0.063887") + std = float("0.0854036") + data = None + + +class Program_weight_tensor_parameter_425: + name = "parameter_425" + shape = [384] + dtype = "float32" + min_val = float("-0.309743") + max_val = float("0.213002") + mean = float("0.0357878") + std = float("0.0857551") + data = None + + +class Program_weight_tensor_parameter_426: + name = "parameter_426" + shape = [384, 384, 3, 3] + dtype = "float32" + min_val = float("-0.0167874") + max_val = float("0.0235595") + mean = float("-4.10985e-05") + std = float("0.0015578") + data = None + + +class Program_weight_tensor_parameter_427: + name = "parameter_427" + shape = [384] + dtype = "float32" + min_val = float("-2.15541") + max_val = float("0.429211") + mean = float("-1.38242") + std = float("0.277701") + data = None + + +class Program_weight_tensor_parameter_428: + name = "parameter_428" + shape = [384] + dtype = "float32" + min_val = float("0.714293") + max_val = float("1.63322") + mean = float("1.13559") + std = float("0.0992203") + data = None + + +class Program_weight_tensor_parameter_429: + name = "parameter_429" + shape = [384] + dtype = "float32" + min_val = float("0.00874361") + max_val = float("1.63267") + mean = float("0.172849") + std = float("0.199136") + data = None + + +class Program_weight_tensor_parameter_430: + name = "parameter_430" + shape = [384] + dtype = "float32" + min_val = float("-0.262866") + max_val = float("0.171872") + mean = float("0.00667807") + std = float("0.0549583") + data = None + + +class Program_weight_tensor_parameter_431: + name = "parameter_431" + shape = [384, 384, 3, 3] + dtype = "float32" + min_val = float("-0.0255212") + max_val = float("0.0449234") + mean = float("-9.38033e-05") + std = float("0.00171679") + data = None + + +class Program_weight_tensor_parameter_432: + name = "parameter_432" + shape = [384] + dtype = "float32" + min_val = float("-2.9313") + max_val = float("1.76163") + mean = float("-0.765032") + std = float("0.654203") + data = None + + +class Program_weight_tensor_parameter_433: + name = "parameter_433" + shape = [384] + dtype = "float32" + min_val = float("0.974162") + max_val = float("2.91141") + mean = float("1.85277") + std = float("0.272517") + data = None + + +class Program_weight_tensor_parameter_434: + name = "parameter_434" + shape = [384] + dtype = "float32" + min_val = float("0.00181718") + max_val = float("0.234308") + mean = float("0.0412232") + std = float("0.0430638") + data = None + + +class Program_weight_tensor_parameter_435: + name = "parameter_435" + shape = [384] + dtype = "float32" + min_val = float("-0.234178") + max_val = float("0.245307") + mean = float("0.0695519") + std = float("0.0618743") + data = None + + +class Program_weight_tensor_parameter_436: + name = "parameter_436" + shape = [384, 768, 1, 1] + dtype = "float32" + min_val = float("-0.0469498") + max_val = float("0.0418139") + mean = float("-0.000433104") + std = float("0.00390566") + data = None + + +class Program_weight_tensor_parameter_437: + name = "parameter_437" + shape = [384] + dtype = "float32" + min_val = float("-2.24508") + max_val = float("0.69413") + mean = float("-0.776468") + std = float("0.476207") + data = None + + +class Program_weight_tensor_parameter_438: + name = "parameter_438" + shape = [384] + dtype = "float32" + min_val = float("0.973977") + max_val = float("2.89139") + mean = float("2.10296") + std = float("0.303008") + data = None + + +class Program_weight_tensor_parameter_439: + name = "parameter_439" + shape = [384] + dtype = "float32" + min_val = float("0.000334173") + max_val = float("0.779074") + mean = float("0.0234264") + std = float("0.0613748") + data = None + + +class Program_weight_tensor_parameter_440: + name = "parameter_440" + shape = [384] + dtype = "float32" + min_val = float("-0.246706") + max_val = float("0.197941") + mean = float("0.032747") + std = float("0.0469715") + data = None + + +class Program_weight_tensor_parameter_441: + name = "parameter_441" + shape = [384, 768, 1, 1] + dtype = "float32" + min_val = float("-0.128826") + max_val = float("0.0634304") + mean = float("-0.000209218") + std = float("0.0029129") + data = None + + +class Program_weight_tensor_parameter_442: + name = "parameter_442" + shape = [768] + dtype = "float32" + min_val = float("-2.41087") + max_val = float("0.654592") + mean = float("-0.916074") + std = float("0.344533") + data = None + + +class Program_weight_tensor_parameter_443: + name = "parameter_443" + shape = [768] + dtype = "float32" + min_val = float("0.51965") + max_val = float("1.8768") + mean = float("0.91262") + std = float("0.147168") + data = None + + +class Program_weight_tensor_parameter_444: + name = "parameter_444" + shape = [768] + dtype = "float32" + min_val = float("0.00391866") + max_val = float("6.24599") + mean = float("0.161767") + std = float("0.35048") + data = None + + +class Program_weight_tensor_parameter_445: + name = "parameter_445" + shape = [768] + dtype = "float32" + min_val = float("-0.309254") + max_val = float("0.670901") + mean = float("0.0400058") + std = float("0.130768") + data = None + + +class Program_weight_tensor_parameter_446: + name = "parameter_446" + shape = [768, 512, 3, 3] + dtype = "float32" + min_val = float("-0.049509") + max_val = float("0.0452558") + mean = float("-4.74502e-05") + std = float("0.00178091") + data = None + + +class Program_weight_tensor_parameter_447: + name = "parameter_447" + shape = [512] + dtype = "float32" + min_val = float("-3.38771") + max_val = float("1.66524") + mean = float("-1.17835") + std = float("0.526979") + data = None + + +class Program_weight_tensor_parameter_448: + name = "parameter_448" + shape = [512] + dtype = "float32" + min_val = float("0.490699") + max_val = float("1.69897") + mean = float("1.10972") + std = float("0.150271") + data = None + + +class Program_weight_tensor_parameter_449: + name = "parameter_449" + shape = [512] + dtype = "float32" + min_val = float("0.00174229") + max_val = float("1.5935") + mean = float("0.0661371") + std = float("0.125469") + data = None + + +class Program_weight_tensor_parameter_450: + name = "parameter_450" + shape = [512] + dtype = "float32" + min_val = float("-0.179287") + max_val = float("0.13044") + mean = float("-0.00872415") + std = float("0.0427454") + data = None + + +class Program_weight_tensor_parameter_451: + name = "parameter_451" + shape = [512, 384, 1, 1] + dtype = "float32" + min_val = float("-0.228408") + max_val = float("0.195018") + mean = float("-0.000416299") + std = float("0.00607886") + data = None + + +class Program_weight_tensor_parameter_452: + name = "parameter_452" + shape = [384] + dtype = "float32" + min_val = float("-0.00934079") + max_val = float("0.000912873") + mean = float("-0.00232942") + std = float("0.00179213") + data = None + + +class Program_weight_tensor_parameter_453: + name = "parameter_453" + shape = [384, 384, 1, 1] + dtype = "float32" + min_val = float("-0.202973") + max_val = float("0.134442") + mean = float("-0.00176474") + std = float("0.00412056") + data = None + + +class Program_weight_tensor_parameter_454: + name = "parameter_454" + shape = [192] + dtype = "float32" + min_val = float("-1.95309") + max_val = float("0.504365") + mean = float("-0.323219") + std = float("0.341256") + data = None + + +class Program_weight_tensor_parameter_455: + name = "parameter_455" + shape = [192] + dtype = "float32" + min_val = float("0.0702496") + max_val = float("2.23431") + mean = float("0.601975") + std = float("0.439856") + data = None + + +class Program_weight_tensor_parameter_456: + name = "parameter_456" + shape = [192] + dtype = "float32" + min_val = float("4.76391e-05") + max_val = float("0.0303151") + mean = float("0.00419476") + std = float("0.00513906") + data = None + + +class Program_weight_tensor_parameter_457: + name = "parameter_457" + shape = [192] + dtype = "float32" + min_val = float("-0.0732794") + max_val = float("0.0514422") + mean = float("0.00968913") + std = float("0.0191895") + data = None + + +class Program_weight_tensor_parameter_458: + name = "parameter_458" + shape = [192, 192, 1, 1] + dtype = "float32" + min_val = float("-0.0205319") + max_val = float("0.0540422") + mean = float("-0.000288173") + std = float("0.00337745") + data = None + + +class Program_weight_tensor_parameter_459: + name = "parameter_459" + shape = [192] + dtype = "float32" + min_val = float("-1.95309") + max_val = float("0.504365") + mean = float("-0.323219") + std = float("0.341256") + data = None + + +class Program_weight_tensor_parameter_460: + name = "parameter_460" + shape = [192] + dtype = "float32" + min_val = float("0.384813") + max_val = float("2.87188") + mean = float("1.22967") + std = float("0.52103") + data = None + + +class Program_weight_tensor_parameter_461: + name = "parameter_461" + shape = [192] + dtype = "float32" + min_val = float("0.00334316") + max_val = float("0.649378") + mean = float("0.0680944") + std = float("0.0920336") + data = None + + +class Program_weight_tensor_parameter_462: + name = "parameter_462" + shape = [192] + dtype = "float32" + min_val = float("-0.149357") + max_val = float("0.210071") + mean = float("0.0335995") + std = float("0.0682833") + data = None + + +class Program_weight_tensor_parameter_463: + name = "parameter_463" + shape = [192, 192, 3, 3] + dtype = "float32" + min_val = float("-0.0206109") + max_val = float("0.0328944") + mean = float("-0.000105819") + std = float("0.00247448") + data = None + + +class Program_weight_tensor_parameter_464: + name = "parameter_464" + shape = [192] + dtype = "float32" + min_val = float("-2.89041") + max_val = float("-0.124973") + mean = float("-1.33321") + std = float("0.398058") + data = None + + +class Program_weight_tensor_parameter_465: + name = "parameter_465" + shape = [192] + dtype = "float32" + min_val = float("0.719268") + max_val = float("2.09501") + mean = float("1.16332") + std = float("0.171594") + data = None + + +class Program_weight_tensor_parameter_466: + name = "parameter_466" + shape = [192] + dtype = "float32" + min_val = float("0.0195207") + max_val = float("8.32209") + mean = float("1.10631") + std = float("1.4008") + data = None + + +class Program_weight_tensor_parameter_467: + name = "parameter_467" + shape = [192] + dtype = "float32" + min_val = float("-1.98123") + max_val = float("1.21603") + mean = float("0.0516891") + std = float("0.330024") + data = None + + +class Program_weight_tensor_parameter_468: + name = "parameter_468" + shape = [192, 192, 3, 3] + dtype = "float32" + min_val = float("-0.0302761") + max_val = float("0.0413314") + mean = float("-9.04468e-05") + std = float("0.00300886") + data = None + + +class Program_weight_tensor_parameter_469: + name = "parameter_469" + shape = [192] + dtype = "float32" + min_val = float("-1.92933") + max_val = float("0.596271") + mean = float("-0.261624") + std = float("0.334454") + data = None + + +class Program_weight_tensor_parameter_470: + name = "parameter_470" + shape = [192] + dtype = "float32" + min_val = float("0.0488552") + max_val = float("1.76755") + mean = float("0.453568") + std = float("0.30262") + data = None + + +class Program_weight_tensor_parameter_471: + name = "parameter_471" + shape = [192] + dtype = "float32" + min_val = float("2.24294e-05") + max_val = float("0.164315") + mean = float("0.00587699") + std = float("0.0161003") + data = None + + +class Program_weight_tensor_parameter_472: + name = "parameter_472" + shape = [192] + dtype = "float32" + min_val = float("-0.0490808") + max_val = float("0.0669156") + mean = float("0.0149264") + std = float("0.0217292") + data = None + + +class Program_weight_tensor_parameter_473: + name = "parameter_473" + shape = [192, 192, 1, 1] + dtype = "float32" + min_val = float("-0.0242882") + max_val = float("0.0299433") + mean = float("-0.000340263") + std = float("0.00326035") + data = None + + +class Program_weight_tensor_parameter_474: + name = "parameter_474" + shape = [192] + dtype = "float32" + min_val = float("-1.92933") + max_val = float("0.596271") + mean = float("-0.261624") + std = float("0.334454") + data = None + + +class Program_weight_tensor_parameter_475: + name = "parameter_475" + shape = [192] + dtype = "float32" + min_val = float("0.419712") + max_val = float("2.27591") + mean = float("1.14823") + std = float("0.381008") + data = None + + +class Program_weight_tensor_parameter_476: + name = "parameter_476" + shape = [192] + dtype = "float32" + min_val = float("0.00492381") + max_val = float("1.92887") + mean = float("0.107118") + std = float("0.20391") + data = None + + +class Program_weight_tensor_parameter_477: + name = "parameter_477" + shape = [192] + dtype = "float32" + min_val = float("-0.242602") + max_val = float("0.295724") + mean = float("0.0375102") + std = float("0.0955899") + data = None + + +class Program_weight_tensor_parameter_478: + name = "parameter_478" + shape = [192, 192, 3, 3] + dtype = "float32" + min_val = float("-0.0188034") + max_val = float("0.0253279") + mean = float("-8.278e-05") + std = float("0.00266932") + data = None + + +class Program_weight_tensor_parameter_479: + name = "parameter_479" + shape = [192] + dtype = "float32" + min_val = float("-2.53631") + max_val = float("-0.131674") + mean = float("-1.31632") + std = float("0.443813") + data = None + + +class Program_weight_tensor_parameter_480: + name = "parameter_480" + shape = [192] + dtype = "float32" + min_val = float("0.718027") + max_val = float("1.65396") + mean = float("1.17902") + std = float("0.161151") + data = None + + +class Program_weight_tensor_parameter_481: + name = "parameter_481" + shape = [192] + dtype = "float32" + min_val = float("0.0311619") + max_val = float("12.4415") + mean = float("1.08409") + std = float("1.81585") + data = None + + +class Program_weight_tensor_parameter_482: + name = "parameter_482" + shape = [192] + dtype = "float32" + min_val = float("-1.09829") + max_val = float("0.629046") + mean = float("0.123942") + std = float("0.213918") + data = None + + +class Program_weight_tensor_parameter_483: + name = "parameter_483" + shape = [192, 192, 3, 3] + dtype = "float32" + min_val = float("-0.042754") + max_val = float("0.0429227") + mean = float("-0.000109615") + std = float("0.00313016") + data = None + + +class Program_weight_tensor_parameter_484: + name = "parameter_484" + shape = [192] + dtype = "float32" + min_val = float("-1.76349") + max_val = float("0.544904") + mean = float("-0.246355") + std = float("0.349411") + data = None + + +class Program_weight_tensor_parameter_485: + name = "parameter_485" + shape = [192] + dtype = "float32" + min_val = float("0.00801797") + max_val = float("1.66398") + mean = float("0.357353") + std = float("0.246738") + data = None + + +class Program_weight_tensor_parameter_486: + name = "parameter_486" + shape = [192] + dtype = "float32" + min_val = float("7.15677e-05") + max_val = float("0.105247") + mean = float("0.00410654") + std = float("0.0106252") + data = None + + +class Program_weight_tensor_parameter_487: + name = "parameter_487" + shape = [192] + dtype = "float32" + min_val = float("-0.0643907") + max_val = float("0.0951448") + mean = float("0.0170309") + std = float("0.0236018") + data = None + + +class Program_weight_tensor_parameter_488: + name = "parameter_488" + shape = [192, 192, 1, 1] + dtype = "float32" + min_val = float("-0.020302") + max_val = float("0.0261232") + mean = float("-0.00036246") + std = float("0.00323036") + data = None + + +class Program_weight_tensor_parameter_489: + name = "parameter_489" + shape = [192] + dtype = "float32" + min_val = float("-1.76349") + max_val = float("0.544905") + mean = float("-0.246355") + std = float("0.349411") + data = None + + +class Program_weight_tensor_parameter_490: + name = "parameter_490" + shape = [192] + dtype = "float32" + min_val = float("0.385673") + max_val = float("1.96785") + mean = float("1.06985") + std = float("0.336108") + data = None + + +class Program_weight_tensor_parameter_491: + name = "parameter_491" + shape = [192] + dtype = "float32" + min_val = float("0.00226675") + max_val = float("1.91593") + mean = float("0.101595") + std = float("0.227642") + data = None + + +class Program_weight_tensor_parameter_492: + name = "parameter_492" + shape = [192] + dtype = "float32" + min_val = float("-0.261671") + max_val = float("0.282769") + mean = float("0.0457371") + std = float("0.101536") + data = None + + +class Program_weight_tensor_parameter_493: + name = "parameter_493" + shape = [192, 192, 3, 3] + dtype = "float32" + min_val = float("-0.0263202") + max_val = float("0.031635") + mean = float("-0.000104025") + std = float("0.00284282") + data = None + + +class Program_weight_tensor_parameter_494: + name = "parameter_494" + shape = [192] + dtype = "float32" + min_val = float("-2.51462") + max_val = float("0.158257") + mean = float("-1.26766") + std = float("0.427753") + data = None + + +class Program_weight_tensor_parameter_495: + name = "parameter_495" + shape = [192] + dtype = "float32" + min_val = float("0.598242") + max_val = float("1.78157") + mean = float("1.1494") + std = float("0.161575") + data = None + + +class Program_weight_tensor_parameter_496: + name = "parameter_496" + shape = [192] + dtype = "float32" + min_val = float("0.0222714") + max_val = float("37.7966") + mean = float("1.49691") + std = float("3.97204") + data = None + + +class Program_weight_tensor_parameter_497: + name = "parameter_497" + shape = [192] + dtype = "float32" + min_val = float("-0.873886") + max_val = float("0.81748") + mean = float("0.0710219") + std = float("0.213003") + data = None + + +class Program_weight_tensor_parameter_498: + name = "parameter_498" + shape = [192, 192, 3, 3] + dtype = "float32" + min_val = float("-0.0380726") + max_val = float("0.047001") + mean = float("-0.000179735") + std = float("0.00344855") + data = None + + +class Program_weight_tensor_parameter_499: + name = "parameter_499" + shape = [192] + dtype = "float32" + min_val = float("-2.08881") + max_val = float("0.648999") + mean = float("-0.259801") + std = float("0.386671") + data = None + + +class Program_weight_tensor_parameter_500: + name = "parameter_500" + shape = [192] + dtype = "float32" + min_val = float("0.000298623") + max_val = float("0.722711") + mean = float("0.216526") + std = float("0.135243") + data = None + + +class Program_weight_tensor_parameter_501: + name = "parameter_501" + shape = [192] + dtype = "float32" + min_val = float("4.96868e-08") + max_val = float("0.107631") + mean = float("0.00291956") + std = float("0.00896913") + data = None + + +class Program_weight_tensor_parameter_502: + name = "parameter_502" + shape = [192] + dtype = "float32" + min_val = float("-0.0826544") + max_val = float("0.0880952") + mean = float("0.0119081") + std = float("0.0222372") + data = None + + +class Program_weight_tensor_parameter_503: + name = "parameter_503" + shape = [192, 192, 1, 1] + dtype = "float32" + min_val = float("-0.0383726") + max_val = float("0.0426544") + mean = float("-0.000299896") + std = float("0.00305223") + data = None + + +class Program_weight_tensor_parameter_504: + name = "parameter_504" + shape = [192] + dtype = "float32" + min_val = float("-2.08881") + max_val = float("0.648999") + mean = float("-0.259801") + std = float("0.386671") + data = None + + +class Program_weight_tensor_parameter_505: + name = "parameter_505" + shape = [192] + dtype = "float32" + min_val = float("0.394983") + max_val = float("1.95775") + mean = float("0.953923") + std = float("0.305397") + data = None + + +class Program_weight_tensor_parameter_506: + name = "parameter_506" + shape = [192] + dtype = "float32" + min_val = float("0.00262196") + max_val = float("7.469") + mean = float("0.131262") + std = float("0.559292") + data = None + + +class Program_weight_tensor_parameter_507: + name = "parameter_507" + shape = [192] + dtype = "float32" + min_val = float("-1.11459") + max_val = float("0.413148") + mean = float("0.0497411") + std = float("0.149456") + data = None + + +class Program_weight_tensor_parameter_508: + name = "parameter_508" + shape = [192, 192, 3, 3] + dtype = "float32" + min_val = float("-0.0273404") + max_val = float("0.0675831") + mean = float("-0.000146946") + std = float("0.00314256") + data = None + + +class Program_weight_tensor_parameter_509: + name = "parameter_509" + shape = [192] + dtype = "float32" + min_val = float("-2.77342") + max_val = float("-0.0376017") + mean = float("-1.25998") + std = float("0.434996") + data = None + + +class Program_weight_tensor_parameter_510: + name = "parameter_510" + shape = [192] + dtype = "float32" + min_val = float("0.744961") + max_val = float("1.56236") + mean = float("1.13335") + std = float("0.139709") + data = None + + +class Program_weight_tensor_parameter_511: + name = "parameter_511" + shape = [192] + dtype = "float32" + min_val = float("0.00886955") + max_val = float("38.6321") + mean = float("1.16498") + std = float("4.2895") + data = None + + +class Program_weight_tensor_parameter_512: + name = "parameter_512" + shape = [192] + dtype = "float32" + min_val = float("-0.81896") + max_val = float("0.818165") + mean = float("0.0290114") + std = float("0.197181") + data = None + + +class Program_weight_tensor_parameter_513: + name = "parameter_513" + shape = [192, 192, 3, 3] + dtype = "float32" + min_val = float("-0.0472119") + max_val = float("0.0488221") + mean = float("-0.000229853") + std = float("0.00339264") + data = None + + +class Program_weight_tensor_parameter_514: + name = "parameter_514" + shape = [192] + dtype = "float32" + min_val = float("-1.20653") + max_val = float("0.515824") + mean = float("-0.218129") + std = float("0.352315") + data = None + + +class Program_weight_tensor_parameter_515: + name = "parameter_515" + shape = [192] + dtype = "float32" + min_val = float("-3.41948e-06") + max_val = float("0.680384") + mean = float("0.195099") + std = float("0.117152") + data = None + + +class Program_weight_tensor_parameter_516: + name = "parameter_516" + shape = [192] + dtype = "float32" + min_val = float("6.06307e-12") + max_val = float("0.398718") + mean = float("0.00738152") + std = float("0.0311542") + data = None + + +class Program_weight_tensor_parameter_517: + name = "parameter_517" + shape = [192] + dtype = "float32" + min_val = float("-0.129674") + max_val = float("0.071682") + mean = float("0.0112656") + std = float("0.0230303") + data = None + + +class Program_weight_tensor_parameter_518: + name = "parameter_518" + shape = [192, 192, 1, 1] + dtype = "float32" + min_val = float("-0.0270501") + max_val = float("0.0345417") + mean = float("-0.000283263") + std = float("0.00318819") + data = None + + +class Program_weight_tensor_parameter_519: + name = "parameter_519" + shape = [192] + dtype = "float32" + min_val = float("-1.20653") + max_val = float("0.515824") + mean = float("-0.218129") + std = float("0.352315") + data = None + + +class Program_weight_tensor_parameter_520: + name = "parameter_520" + shape = [192] + dtype = "float32" + min_val = float("0.398135") + max_val = float("1.5715") + mean = float("0.848265") + std = float("0.259232") + data = None + + +class Program_weight_tensor_parameter_521: + name = "parameter_521" + shape = [192] + dtype = "float32" + min_val = float("0.00433799") + max_val = float("10.4863") + mean = float("0.263417") + std = float("0.977392") + data = None + + +class Program_weight_tensor_parameter_522: + name = "parameter_522" + shape = [192] + dtype = "float32" + min_val = float("-0.947376") + max_val = float("0.293492") + mean = float("0.0244436") + std = float("0.157364") + data = None + + +class Program_weight_tensor_parameter_523: + name = "parameter_523" + shape = [192, 192, 3, 3] + dtype = "float32" + min_val = float("-0.0235823") + max_val = float("0.0383498") + mean = float("-6.68298e-05") + std = float("0.00312995") + data = None + + +class Program_weight_tensor_parameter_524: + name = "parameter_524" + shape = [192] + dtype = "float32" + min_val = float("-2.48859") + max_val = float("-0.0816794") + mean = float("-1.27085") + std = float("0.420072") + data = None + + +class Program_weight_tensor_parameter_525: + name = "parameter_525" + shape = [192] + dtype = "float32" + min_val = float("0.694565") + max_val = float("1.54206") + mean = float("1.10687") + std = float("0.135381") + data = None + + +class Program_weight_tensor_parameter_526: + name = "parameter_526" + shape = [192] + dtype = "float32" + min_val = float("0.00545689") + max_val = float("4.02717") + mean = float("0.403302") + std = float("0.646807") + data = None + + +class Program_weight_tensor_parameter_527: + name = "parameter_527" + shape = [192] + dtype = "float32" + min_val = float("-0.588504") + max_val = float("0.746211") + mean = float("0.00482585") + std = float("0.188335") + data = None + + +class Program_weight_tensor_parameter_528: + name = "parameter_528" + shape = [192, 192, 3, 3] + dtype = "float32" + min_val = float("-0.0454542") + max_val = float("0.0510382") + mean = float("-0.00015484") + std = float("0.00337127") + data = None + + +class Program_weight_tensor_parameter_529: + name = "parameter_529" + shape = [192] + dtype = "float32" + min_val = float("-1.23302") + max_val = float("0.509381") + mean = float("-0.153645") + std = float("0.30407") + data = None + + +class Program_weight_tensor_parameter_530: + name = "parameter_530" + shape = [192] + dtype = "float32" + min_val = float("0.00227296") + max_val = float("1.53114") + mean = float("0.236795") + std = float("0.211228") + data = None + + +class Program_weight_tensor_parameter_531: + name = "parameter_531" + shape = [192] + dtype = "float32" + min_val = float("2.14271e-05") + max_val = float("0.127867") + mean = float("0.00908259") + std = float("0.0177169") + data = None + + +class Program_weight_tensor_parameter_532: + name = "parameter_532" + shape = [192] + dtype = "float32" + min_val = float("-0.0935318") + max_val = float("0.137813") + mean = float("0.0162608") + std = float("0.0290513") + data = None + + +class Program_weight_tensor_parameter_533: + name = "parameter_533" + shape = [192, 192, 1, 1] + dtype = "float32" + min_val = float("-0.0512067") + max_val = float("0.0280295") + mean = float("-0.000438388") + std = float("0.00368151") + data = None + + +class Program_weight_tensor_parameter_534: + name = "parameter_534" + shape = [192] + dtype = "float32" + min_val = float("-1.23302") + max_val = float("0.509381") + mean = float("-0.153645") + std = float("0.30407") + data = None + + +class Program_weight_tensor_parameter_535: + name = "parameter_535" + shape = [192] + dtype = "float32" + min_val = float("0.332594") + max_val = float("1.44107") + mean = float("0.751491") + std = float("0.218724") + data = None + + +class Program_weight_tensor_parameter_536: + name = "parameter_536" + shape = [192] + dtype = "float32" + min_val = float("0.00580545") + max_val = float("3.585") + mean = float("0.245801") + std = float("0.410758") + data = None + + +class Program_weight_tensor_parameter_537: + name = "parameter_537" + shape = [192] + dtype = "float32" + min_val = float("-0.654561") + max_val = float("0.520224") + mean = float("0.0637138") + std = float("0.132489") + data = None + + +class Program_weight_tensor_parameter_538: + name = "parameter_538" + shape = [192, 192, 3, 3] + dtype = "float32" + min_val = float("-0.0460525") + max_val = float("0.0463877") + mean = float("-0.000214011") + std = float("0.0030628") + data = None + + +class Program_weight_tensor_parameter_539: + name = "parameter_539" + shape = [192] + dtype = "float32" + min_val = float("-1.86975") + max_val = float("-0.187693") + mean = float("-1.16402") + std = float("0.325704") + data = None + + +class Program_weight_tensor_parameter_540: + name = "parameter_540" + shape = [192] + dtype = "float32" + min_val = float("0.751831") + max_val = float("1.61753") + mean = float("1.10973") + std = float("0.131817") + data = None + + +class Program_weight_tensor_parameter_541: + name = "parameter_541" + shape = [192] + dtype = "float32" + min_val = float("0.00623295") + max_val = float("7.47422") + mean = float("0.274923") + std = float("0.674046") + data = None + + +class Program_weight_tensor_parameter_542: + name = "parameter_542" + shape = [192] + dtype = "float32" + min_val = float("-0.566295") + max_val = float("0.711085") + mean = float("-0.0315813") + std = float("0.130281") + data = None + + +class Program_weight_tensor_parameter_543: + name = "parameter_543" + shape = [192, 192, 3, 3] + dtype = "float32" + min_val = float("-0.0494038") + max_val = float("0.0562226") + mean = float("-0.000159684") + std = float("0.00334459") + data = None + + +class Program_weight_tensor_parameter_544: + name = "parameter_544" + shape = [192] + dtype = "float32" + min_val = float("-2.81555") + max_val = float("1.61423") + mean = float("-0.0254536") + std = float("0.761522") + data = None + + +class Program_weight_tensor_parameter_545: + name = "parameter_545" + shape = [192] + dtype = "float32" + min_val = float("0.476452") + max_val = float("2.07853") + mean = float("0.880028") + std = float("0.224402") + data = None + + +class Program_weight_tensor_parameter_546: + name = "parameter_546" + shape = [192] + dtype = "float32" + min_val = float("0.00413651") + max_val = float("6.56568") + mean = float("0.232782") + std = float("0.576315") + data = None + + +class Program_weight_tensor_parameter_547: + name = "parameter_547" + shape = [192] + dtype = "float32" + min_val = float("-0.211511") + max_val = float("0.111527") + mean = float("-0.00608796") + std = float("0.0521257") + data = None + + +class Program_weight_tensor_parameter_548: + name = "parameter_548" + shape = [192, 384, 1, 1] + dtype = "float32" + min_val = float("-0.0748107") + max_val = float("0.0760846") + mean = float("-0.000563234") + std = float("0.00701903") + data = None + + +class Program_weight_tensor_parameter_549: + name = "parameter_549" + shape = [192] + dtype = "float32" + min_val = float("-2.91449") + max_val = float("2.11472") + mean = float("0.103721") + std = float("0.667968") + data = None + + +class Program_weight_tensor_parameter_550: + name = "parameter_550" + shape = [192] + dtype = "float32" + min_val = float("0.856446") + max_val = float("5.71069") + mean = float("1.92533") + std = float("0.968824") + data = None + + +class Program_weight_tensor_parameter_551: + name = "parameter_551" + shape = [192] + dtype = "float32" + min_val = float("0.00176998") + max_val = float("0.489107") + mean = float("0.0413273") + std = float("0.0662281") + data = None + + +class Program_weight_tensor_parameter_552: + name = "parameter_552" + shape = [192] + dtype = "float32" + min_val = float("-0.089043") + max_val = float("0.117142") + mean = float("0.00920289") + std = float("0.0350583") + data = None + + +class Program_weight_tensor_parameter_553: + name = "parameter_553" + shape = [192, 384, 1, 1] + dtype = "float32" + min_val = float("-0.0673953") + max_val = float("0.128402") + mean = float("-0.000430991") + std = float("0.00603374") + data = None + + +class Program_weight_tensor_parameter_554: + name = "parameter_554" + shape = [384] + dtype = "float32" + min_val = float("-2.92773") + max_val = float("1.33693") + mean = float("-0.313455") + std = float("0.572295") + data = None + + +class Program_weight_tensor_parameter_555: + name = "parameter_555" + shape = [384] + dtype = "float32" + min_val = float("0.699301") + max_val = float("2.45328") + mean = float("1.14709") + std = float("0.257994") + data = None + + +class Program_weight_tensor_parameter_556: + name = "parameter_556" + shape = [384] + dtype = "float32" + min_val = float("0.00380316") + max_val = float("33.1796") + mean = float("0.637666") + std = float("1.97202") + data = None + + +class Program_weight_tensor_parameter_557: + name = "parameter_557" + shape = [384] + dtype = "float32" + min_val = float("-0.920201") + max_val = float("0.383577") + mean = float("0.0242312") + std = float("0.154423") + data = None + + +class Program_weight_tensor_parameter_558: + name = "parameter_558" + shape = [384, 256, 3, 3] + dtype = "float32" + min_val = float("-0.0550514") + max_val = float("0.0583738") + mean = float("-6.14001e-05") + std = float("0.00339831") + data = None + + +class Program_weight_tensor_parameter_559: + name = "parameter_559" + shape = [256] + dtype = "float32" + min_val = float("-2.08101") + max_val = float("1.23907") + mean = float("-0.929071") + std = float("0.560762") + data = None + + +class Program_weight_tensor_parameter_560: + name = "parameter_560" + shape = [256] + dtype = "float32" + min_val = float("0.460852") + max_val = float("1.60647") + mean = float("1.03822") + std = float("0.186473") + data = None + + +class Program_weight_tensor_parameter_561: + name = "parameter_561" + shape = [256] + dtype = "float32" + min_val = float("0.00288698") + max_val = float("2.85435") + mean = float("0.124524") + std = float("0.278458") + data = None + + +class Program_weight_tensor_parameter_562: + name = "parameter_562" + shape = [256] + dtype = "float32" + min_val = float("-0.513886") + max_val = float("0.320015") + mean = float("-0.0168315") + std = float("0.107427") + data = None + + +class Program_weight_tensor_parameter_563: + name = "parameter_563" + shape = [256, 192, 1, 1] + dtype = "float32" + min_val = float("-0.22133") + max_val = float("0.170826") + mean = float("-0.000439134") + std = float("0.0126658") + data = None + + +class Program_weight_tensor_parameter_564: + name = "parameter_564" + shape = [192] + dtype = "float32" + min_val = float("-0.0149414") + max_val = float("0.00274012") + mean = float("-0.0039491") + std = float("0.00303571") + data = None + + +class Program_weight_tensor_parameter_565: + name = "parameter_565" + shape = [192, 192, 1, 1] + dtype = "float32" + min_val = float("-0.518434") + max_val = float("0.174661") + mean = float("-0.00334445") + std = float("0.00882037") + data = None + + +class Program_weight_tensor_parameter_566: + name = "parameter_566" + shape = [96] + dtype = "float32" + min_val = float("-1.89866") + max_val = float("0.648435") + mean = float("-0.163531") + std = float("0.445964") + data = None + + +class Program_weight_tensor_parameter_567: + name = "parameter_567" + shape = [96] + dtype = "float32" + min_val = float("0.118827") + max_val = float("3.45075") + mean = float("0.648866") + std = float("0.709221") + data = None + + +class Program_weight_tensor_parameter_568: + name = "parameter_568" + shape = [96] + dtype = "float32" + min_val = float("5.72145e-05") + max_val = float("0.0305947") + mean = float("0.00350272") + std = float("0.00465155") + data = None + + +class Program_weight_tensor_parameter_569: + name = "parameter_569" + shape = [96] + dtype = "float32" + min_val = float("-0.0537825") + max_val = float("0.0541921") + mean = float("0.010285") + std = float("0.0254635") + data = None + + +class Program_weight_tensor_parameter_570: + name = "parameter_570" + shape = [96, 96, 1, 1] + dtype = "float32" + min_val = float("-0.0435225") + max_val = float("0.0719533") + mean = float("-0.000638006") + std = float("0.00649986") + data = None + + +class Program_weight_tensor_parameter_571: + name = "parameter_571" + shape = [96] + dtype = "float32" + min_val = float("-1.89866") + max_val = float("0.648435") + mean = float("-0.163531") + std = float("0.445964") + data = None + + +class Program_weight_tensor_parameter_572: + name = "parameter_572" + shape = [96] + dtype = "float32" + min_val = float("0.274848") + max_val = float("5.76456") + mean = float("1.11235") + std = float("0.941375") + data = None + + +class Program_weight_tensor_parameter_573: + name = "parameter_573" + shape = [96] + dtype = "float32" + min_val = float("0.00262636") + max_val = float("0.731286") + mean = float("0.0545356") + std = float("0.080615") + data = None + + +class Program_weight_tensor_parameter_574: + name = "parameter_574" + shape = [96] + dtype = "float32" + min_val = float("-0.403692") + max_val = float("0.191464") + mean = float("0.0207246") + std = float("0.102896") + data = None + + +class Program_weight_tensor_parameter_575: + name = "parameter_575" + shape = [96, 96, 3, 3] + dtype = "float32" + min_val = float("-0.034156") + max_val = float("0.0485352") + mean = float("-0.000167324") + std = float("0.00457433") + data = None + + +class Program_weight_tensor_parameter_576: + name = "parameter_576" + shape = [96] + dtype = "float32" + min_val = float("-2.47237") + max_val = float("-0.039721") + mean = float("-1.25291") + std = float("0.438553") + data = None + + +class Program_weight_tensor_parameter_577: + name = "parameter_577" + shape = [96] + dtype = "float32" + min_val = float("0.484422") + max_val = float("1.73309") + mean = float("0.919688") + std = float("0.175697") + data = None + + +class Program_weight_tensor_parameter_578: + name = "parameter_578" + shape = [96] + dtype = "float32" + min_val = float("0.0913526") + max_val = float("5.78304") + mean = float("1.20582") + std = float("1.11379") + data = None + + +class Program_weight_tensor_parameter_579: + name = "parameter_579" + shape = [96] + dtype = "float32" + min_val = float("-2.67749") + max_val = float("1.89478") + mean = float("-0.0280634") + std = float("0.649795") + data = None + + +class Program_weight_tensor_parameter_580: + name = "parameter_580" + shape = [96, 96, 3, 3] + dtype = "float32" + min_val = float("-0.125626") + max_val = float("0.0867783") + mean = float("-0.000262143") + std = float("0.00565539") + data = None + + +class Program_weight_tensor_parameter_581: + name = "parameter_581" + shape = [96] + dtype = "float32" + min_val = float("-1.36009") + max_val = float("0.613418") + mean = float("-0.109119") + std = float("0.363358") + data = None + + +class Program_weight_tensor_parameter_582: + name = "parameter_582" + shape = [96] + dtype = "float32" + min_val = float("0.00951907") + max_val = float("1.85237") + mean = float("0.454878") + std = float("0.359562") + data = None + + +class Program_weight_tensor_parameter_583: + name = "parameter_583" + shape = [96] + dtype = "float32" + min_val = float("1.34543e-05") + max_val = float("0.100978") + mean = float("0.00659523") + std = float("0.0127025") + data = None + + +class Program_weight_tensor_parameter_584: + name = "parameter_584" + shape = [96] + dtype = "float32" + min_val = float("-0.0733295") + max_val = float("0.0665844") + mean = float("0.0164669") + std = float("0.0261863") + data = None + + +class Program_weight_tensor_parameter_585: + name = "parameter_585" + shape = [96, 96, 1, 1] + dtype = "float32" + min_val = float("-0.0616569") + max_val = float("0.0533414") + mean = float("-0.000915536") + std = float("0.0063832") + data = None + + +class Program_weight_tensor_parameter_586: + name = "parameter_586" + shape = [96] + dtype = "float32" + min_val = float("-1.36009") + max_val = float("0.613418") + mean = float("-0.109119") + std = float("0.363358") + data = None + + +class Program_weight_tensor_parameter_587: + name = "parameter_587" + shape = [96] + dtype = "float32" + min_val = float("0.381804") + max_val = float("2.31148") + mean = float("0.90401") + std = float("0.422733") + data = None + + +class Program_weight_tensor_parameter_588: + name = "parameter_588" + shape = [96] + dtype = "float32" + min_val = float("0.0028765") + max_val = float("1.96506") + mean = float("0.163957") + std = float("0.29453") + data = None + + +class Program_weight_tensor_parameter_589: + name = "parameter_589" + shape = [96] + dtype = "float32" + min_val = float("-0.169521") + max_val = float("0.382588") + mean = float("0.0552") + std = float("0.100991") + data = None + + +class Program_weight_tensor_parameter_590: + name = "parameter_590" + shape = [96, 96, 3, 3] + dtype = "float32" + min_val = float("-0.0539882") + max_val = float("0.0612803") + mean = float("-0.000352692") + std = float("0.0047621") + data = None + + +class Program_weight_tensor_parameter_591: + name = "parameter_591" + shape = [96] + dtype = "float32" + min_val = float("-3.30884") + max_val = float("0.356165") + mean = float("-1.21887") + std = float("0.556975") + data = None + + +class Program_weight_tensor_parameter_592: + name = "parameter_592" + shape = [96] + dtype = "float32" + min_val = float("0.424695") + max_val = float("1.92592") + mean = float("1.00728") + std = float("0.236573") + data = None + + +class Program_weight_tensor_parameter_593: + name = "parameter_593" + shape = [96] + dtype = "float32" + min_val = float("0.0237573") + max_val = float("6.63037") + mean = float("1.04207") + std = float("1.2277") + data = None + + +class Program_weight_tensor_parameter_594: + name = "parameter_594" + shape = [96] + dtype = "float32" + min_val = float("-1.1716") + max_val = float("1.33307") + mean = float("0.029565") + std = float("0.439463") + data = None + + +class Program_weight_tensor_parameter_595: + name = "parameter_595" + shape = [96, 96, 3, 3] + dtype = "float32" + min_val = float("-0.134962") + max_val = float("0.136054") + mean = float("-0.000198739") + std = float("0.0057204") + data = None + + +class Program_weight_tensor_parameter_596: + name = "parameter_596" + shape = [96] + dtype = "float32" + min_val = float("-1.22361") + max_val = float("0.654799") + mean = float("-0.0922317") + std = float("0.305958") + data = None + + +class Program_weight_tensor_parameter_597: + name = "parameter_597" + shape = [96] + dtype = "float32" + min_val = float("0.0316927") + max_val = float("1.28685") + mean = float("0.312909") + std = float("0.193627") + data = None + + +class Program_weight_tensor_parameter_598: + name = "parameter_598" + shape = [96] + dtype = "float32" + min_val = float("9.22096e-05") + max_val = float("0.267661") + mean = float("0.00780572") + std = float("0.0277819") + data = None + + +class Program_weight_tensor_parameter_599: + name = "parameter_599" + shape = [96] + dtype = "float32" + min_val = float("-0.127424") + max_val = float("0.0779931") + mean = float("0.00867301") + std = float("0.0281039") + data = None + + +class Program_weight_tensor_parameter_600: + name = "parameter_600" + shape = [96, 96, 1, 1] + dtype = "float32" + min_val = float("-0.0357805") + max_val = float("0.04937") + mean = float("-0.000468989") + std = float("0.00648871") + data = None + + +class Program_weight_tensor_parameter_601: + name = "parameter_601" + shape = [96] + dtype = "float32" + min_val = float("-1.22361") + max_val = float("0.654799") + mean = float("-0.0922317") + std = float("0.305958") + data = None + + +class Program_weight_tensor_parameter_602: + name = "parameter_602" + shape = [96] + dtype = "float32" + min_val = float("0.321215") + max_val = float("1.60468") + mean = float("0.742453") + std = float("0.256594") + data = None + + +class Program_weight_tensor_parameter_603: + name = "parameter_603" + shape = [96] + dtype = "float32" + min_val = float("0.00594866") + max_val = float("13.1468") + mean = float("0.280576") + std = float("1.35706") + data = None + + +class Program_weight_tensor_parameter_604: + name = "parameter_604" + shape = [96] + dtype = "float32" + min_val = float("-0.512798") + max_val = float("0.360902") + mean = float("0.024901") + std = float("0.131996") + data = None + + +class Program_weight_tensor_parameter_605: + name = "parameter_605" + shape = [96, 96, 3, 3] + dtype = "float32" + min_val = float("-0.0457283") + max_val = float("0.0529573") + mean = float("-0.000146386") + std = float("0.00507802") + data = None + + +class Program_weight_tensor_parameter_606: + name = "parameter_606" + shape = [96] + dtype = "float32" + min_val = float("-3.56417") + max_val = float("0.313374") + mean = float("-1.16309") + std = float("0.578663") + data = None + + +class Program_weight_tensor_parameter_607: + name = "parameter_607" + shape = [96] + dtype = "float32" + min_val = float("0.515839") + max_val = float("2.22553") + mean = float("1.01898") + std = float("0.244192") + data = None + + +class Program_weight_tensor_parameter_608: + name = "parameter_608" + shape = [96] + dtype = "float32" + min_val = float("0.0566049") + max_val = float("8.50527") + mean = float("0.821018") + std = float("1.25589") + data = None + + +class Program_weight_tensor_parameter_609: + name = "parameter_609" + shape = [96] + dtype = "float32" + min_val = float("-1.3259") + max_val = float("0.986206") + mean = float("0.0226801") + std = float("0.384225") + data = None + + +class Program_weight_tensor_parameter_610: + name = "parameter_610" + shape = [96, 96, 3, 3] + dtype = "float32" + min_val = float("-0.106683") + max_val = float("0.124295") + mean = float("-0.000180527") + std = float("0.00585965") + data = None + + +class Program_weight_tensor_parameter_611: + name = "parameter_611" + shape = [96] + dtype = "float32" + min_val = float("-0.914024") + max_val = float("0.548448") + mean = float("-0.147808") + std = float("0.291506") + data = None + + +class Program_weight_tensor_parameter_612: + name = "parameter_612" + shape = [96] + dtype = "float32" + min_val = float("0.0339392") + max_val = float("1.38054") + mean = float("0.313981") + std = float("0.205844") + data = None + + +class Program_weight_tensor_parameter_613: + name = "parameter_613" + shape = [96] + dtype = "float32" + min_val = float("2.21066e-05") + max_val = float("0.136525") + mean = float("0.00723304") + std = float("0.0189755") + data = None + + +class Program_weight_tensor_parameter_614: + name = "parameter_614" + shape = [96] + dtype = "float32" + min_val = float("-0.0806456") + max_val = float("0.0638613") + mean = float("0.013262") + std = float("0.0276148") + data = None + + +class Program_weight_tensor_parameter_615: + name = "parameter_615" + shape = [96, 96, 1, 1] + dtype = "float32" + min_val = float("-0.0590545") + max_val = float("0.0449412") + mean = float("-0.00069183") + std = float("0.00674782") + data = None + + +class Program_weight_tensor_parameter_616: + name = "parameter_616" + shape = [96] + dtype = "float32" + min_val = float("-0.914024") + max_val = float("0.548448") + mean = float("-0.147808") + std = float("0.291506") + data = None + + +class Program_weight_tensor_parameter_617: + name = "parameter_617" + shape = [96] + dtype = "float32" + min_val = float("0.142128") + max_val = float("1.73988") + mean = float("0.702206") + std = float("0.285683") + data = None + + +class Program_weight_tensor_parameter_618: + name = "parameter_618" + shape = [96] + dtype = "float32" + min_val = float("0.00387817") + max_val = float("1.69108") + mean = float("0.132084") + std = float("0.265544") + data = None + + +class Program_weight_tensor_parameter_619: + name = "parameter_619" + shape = [96] + dtype = "float32" + min_val = float("-0.365992") + max_val = float("0.29138") + mean = float("0.0334006") + std = float("0.11377") + data = None + + +class Program_weight_tensor_parameter_620: + name = "parameter_620" + shape = [96, 96, 3, 3] + dtype = "float32" + min_val = float("-0.0709519") + max_val = float("0.0589113") + mean = float("-0.000196033") + std = float("0.00498706") + data = None + + +class Program_weight_tensor_parameter_621: + name = "parameter_621" + shape = [96] + dtype = "float32" + min_val = float("-2.6239") + max_val = float("0.0469523") + mean = float("-1.09235") + std = float("0.492344") + data = None + + +class Program_weight_tensor_parameter_622: + name = "parameter_622" + shape = [96] + dtype = "float32" + min_val = float("0.546345") + max_val = float("1.74702") + mean = float("0.990286") + std = float("0.183688") + data = None + + +class Program_weight_tensor_parameter_623: + name = "parameter_623" + shape = [96] + dtype = "float32" + min_val = float("0.036742") + max_val = float("4.90946") + mean = float("0.515044") + std = float("0.805591") + data = None + + +class Program_weight_tensor_parameter_624: + name = "parameter_624" + shape = [96] + dtype = "float32" + min_val = float("-0.8935") + max_val = float("0.923151") + mean = float("-0.0110131") + std = float("0.335925") + data = None + + +class Program_weight_tensor_parameter_625: + name = "parameter_625" + shape = [96, 96, 3, 3] + dtype = "float32" + min_val = float("-0.0557588") + max_val = float("0.0977312") + mean = float("-0.000280993") + std = float("0.00584596") + data = None + + +class Program_weight_tensor_parameter_626: + name = "parameter_626" + shape = [96] + dtype = "float32" + min_val = float("-0.982144") + max_val = float("0.555605") + mean = float("-0.128127") + std = float("0.289988") + data = None + + +class Program_weight_tensor_parameter_627: + name = "parameter_627" + shape = [96] + dtype = "float32" + min_val = float("0.0646262") + max_val = float("1.15632") + mean = float("0.273046") + std = float("0.165216") + data = None + + +class Program_weight_tensor_parameter_628: + name = "parameter_628" + shape = [96] + dtype = "float32" + min_val = float("0.000135618") + max_val = float("0.187931") + mean = float("0.0150782") + std = float("0.0310702") + data = None + + +class Program_weight_tensor_parameter_629: + name = "parameter_629" + shape = [96] + dtype = "float32" + min_val = float("-0.0878502") + max_val = float("0.0810483") + mean = float("0.00536994") + std = float("0.0324391") + data = None + + +class Program_weight_tensor_parameter_630: + name = "parameter_630" + shape = [96, 96, 1, 1] + dtype = "float32" + min_val = float("-0.0693269") + max_val = float("0.0585016") + mean = float("-7.57148e-05") + std = float("0.00796075") + data = None + + +class Program_weight_tensor_parameter_631: + name = "parameter_631" + shape = [96] + dtype = "float32" + min_val = float("-0.982144") + max_val = float("0.555603") + mean = float("-0.128127") + std = float("0.289988") + data = None + + +class Program_weight_tensor_parameter_632: + name = "parameter_632" + shape = [96] + dtype = "float32" + min_val = float("0.178989") + max_val = float("1.52642") + mean = float("0.577049") + std = float("0.230661") + data = None + + +class Program_weight_tensor_parameter_633: + name = "parameter_633" + shape = [96] + dtype = "float32" + min_val = float("0.00908185") + max_val = float("6.25082") + mean = float("0.308875") + std = float("0.731709") + data = None + + +class Program_weight_tensor_parameter_634: + name = "parameter_634" + shape = [96] + dtype = "float32" + min_val = float("-0.503088") + max_val = float("0.306438") + mean = float("0.0144393") + std = float("0.128309") + data = None + + +class Program_weight_tensor_parameter_635: + name = "parameter_635" + shape = [96, 96, 3, 3] + dtype = "float32" + min_val = float("-0.0559299") + max_val = float("0.0452462") + mean = float("5.04339e-05") + std = float("0.0052732") + data = None + + +class Program_weight_tensor_parameter_636: + name = "parameter_636" + shape = [96] + dtype = "float32" + min_val = float("-3.34611") + max_val = float("0.216964") + mean = float("-1.02056") + std = float("0.542199") + data = None + + +class Program_weight_tensor_parameter_637: + name = "parameter_637" + shape = [96] + dtype = "float32" + min_val = float("0.541273") + max_val = float("2.73475") + mean = float("1.04359") + std = float("0.234238") + data = None + + +class Program_weight_tensor_parameter_638: + name = "parameter_638" + shape = [96] + dtype = "float32" + min_val = float("0.00839167") + max_val = float("10.3218") + mean = float("0.36125") + std = float("1.13641") + data = None + + +class Program_weight_tensor_parameter_639: + name = "parameter_639" + shape = [96] + dtype = "float32" + min_val = float("-0.676206") + max_val = float("2.05745") + mean = float("-0.000203486") + std = float("0.390477") + data = None + + +class Program_weight_tensor_parameter_640: + name = "parameter_640" + shape = [96, 96, 3, 3] + dtype = "float32" + min_val = float("-0.0823601") + max_val = float("0.0816342") + mean = float("-0.000177511") + std = float("0.00638281") + data = None + + +class Program_weight_tensor_parameter_641: + name = "parameter_641" + shape = [96] + dtype = "float32" + min_val = float("-0.60359") + max_val = float("0.468132") + mean = float("-0.0840719") + std = float("0.256343") + data = None + + +class Program_weight_tensor_parameter_642: + name = "parameter_642" + shape = [96] + dtype = "float32" + min_val = float("0.0544126") + max_val = float("1.22927") + mean = float("0.286115") + std = float("0.196735") + data = None + + +class Program_weight_tensor_parameter_643: + name = "parameter_643" + shape = [96] + dtype = "float32" + min_val = float("0.000143085") + max_val = float("0.359897") + mean = float("0.0363701") + std = float("0.0574546") + data = None + + +class Program_weight_tensor_parameter_644: + name = "parameter_644" + shape = [96] + dtype = "float32" + min_val = float("-0.032617") + max_val = float("0.0498112") + mean = float("0.0055907") + std = float("0.016775") + data = None + + +class Program_weight_tensor_parameter_645: + name = "parameter_645" + shape = [96, 96, 1, 1] + dtype = "float32" + min_val = float("-0.0773345") + max_val = float("0.0606068") + mean = float("-0.000857754") + std = float("0.00880052") + data = None + + +class Program_weight_tensor_parameter_646: + name = "parameter_646" + shape = [96] + dtype = "float32" + min_val = float("-0.60359") + max_val = float("0.468132") + mean = float("-0.0840719") + std = float("0.256343") + data = None + + +class Program_weight_tensor_parameter_647: + name = "parameter_647" + shape = [96] + dtype = "float32" + min_val = float("0.182862") + max_val = float("1.3226") + mean = float("0.518428") + std = float("0.258918") + data = None + + +class Program_weight_tensor_parameter_648: + name = "parameter_648" + shape = [96] + dtype = "float32" + min_val = float("0.00892751") + max_val = float("44.0063") + mean = float("0.942949") + std = float("4.46794") + data = None + + +class Program_weight_tensor_parameter_649: + name = "parameter_649" + shape = [96] + dtype = "float32" + min_val = float("-0.205703") + max_val = float("0.154926") + mean = float("0.000264955") + std = float("0.061708") + data = None + + +class Program_weight_tensor_parameter_650: + name = "parameter_650" + shape = [96, 96, 3, 3] + dtype = "float32" + min_val = float("-0.0785968") + max_val = float("0.0587748") + mean = float("6.78683e-05") + std = float("0.00555531") + data = None + + +class Program_weight_tensor_parameter_651: + name = "parameter_651" + shape = [96] + dtype = "float32" + min_val = float("-2.41369") + max_val = float("0.496388") + mean = float("-0.836608") + std = float("0.475164") + data = None + + +class Program_weight_tensor_parameter_652: + name = "parameter_652" + shape = [96] + dtype = "float32" + min_val = float("0.830242") + max_val = float("2.26639") + mean = float("1.25319") + std = float("0.21549") + data = None + + +class Program_weight_tensor_parameter_653: + name = "parameter_653" + shape = [96] + dtype = "float32" + min_val = float("0.00554491") + max_val = float("3.92354") + mean = float("0.225363") + std = float("0.491203") + data = None + + +class Program_weight_tensor_parameter_654: + name = "parameter_654" + shape = [96] + dtype = "float32" + min_val = float("-0.682011") + max_val = float("0.863938") + mean = float("-0.0559418") + std = float("0.30762") + data = None + + +class Program_weight_tensor_parameter_655: + name = "parameter_655" + shape = [96, 96, 3, 3] + dtype = "float32" + min_val = float("-0.12871") + max_val = float("0.120943") + mean = float("-7.30879e-05") + std = float("0.00678624") + data = None + + +class Program_weight_tensor_parameter_656: + name = "parameter_656" + shape = [96] + dtype = "float32" + min_val = float("-3.19003") + max_val = float("1.93308") + mean = float("0.508604") + std = float("0.871957") + data = None + + +class Program_weight_tensor_parameter_657: + name = "parameter_657" + shape = [96] + dtype = "float32" + min_val = float("0.229244") + max_val = float("2.60033") + mean = float("0.516748") + std = float("0.323392") + data = None + + +class Program_weight_tensor_parameter_658: + name = "parameter_658" + shape = [96] + dtype = "float32" + min_val = float("0.00224007") + max_val = float("3.81285") + mean = float("0.214005") + std = float("0.462532") + data = None + + +class Program_weight_tensor_parameter_659: + name = "parameter_659" + shape = [96] + dtype = "float32" + min_val = float("-0.449601") + max_val = float("0.320966") + mean = float("-0.0199257") + std = float("0.135458") + data = None + + +class Program_weight_tensor_parameter_660: + name = "parameter_660" + shape = [96, 192, 1, 1] + dtype = "float32" + min_val = float("-0.149955") + max_val = float("0.139515") + mean = float("-0.000577616") + std = float("0.0141809") + data = None + + +class Program_weight_tensor_parameter_661: + name = "parameter_661" + shape = [96] + dtype = "float32" + min_val = float("-4.89436") + max_val = float("1.73118") + mean = float("0.421666") + std = float("1.05477") + data = None + + +class Program_weight_tensor_parameter_662: + name = "parameter_662" + shape = [96] + dtype = "float32" + min_val = float("0.368666") + max_val = float("6.94933") + mean = float("1.70017") + std = float("1.37461") + data = None + + +class Program_weight_tensor_parameter_663: + name = "parameter_663" + shape = [96] + dtype = "float32" + min_val = float("0.00483646") + max_val = float("0.772254") + mean = float("0.109227") + std = float("0.134964") + data = None + + +class Program_weight_tensor_parameter_664: + name = "parameter_664" + shape = [96] + dtype = "float32" + min_val = float("-0.28843") + max_val = float("0.305353") + mean = float("0.017579") + std = float("0.109216") + data = None + + +class Program_weight_tensor_parameter_665: + name = "parameter_665" + shape = [96, 192, 1, 1] + dtype = "float32" + min_val = float("-0.085332") + max_val = float("0.209102") + mean = float("0.000131895") + std = float("0.0127058") + data = None + + +class Program_weight_tensor_parameter_666: + name = "parameter_666" + shape = [192] + dtype = "float32" + min_val = float("-2.26416") + max_val = float("1.81781") + mean = float("-0.104368") + std = float("0.765541") + data = None + + +class Program_weight_tensor_parameter_667: + name = "parameter_667" + shape = [192] + dtype = "float32" + min_val = float("0.557485") + max_val = float("3.06726") + mean = float("1.03433") + std = float("0.294829") + data = None + + +class Program_weight_tensor_parameter_668: + name = "parameter_668" + shape = [192] + dtype = "float32" + min_val = float("0.0122193") + max_val = float("31.99") + mean = float("0.691469") + std = float("2.78351") + data = None + + +class Program_weight_tensor_parameter_669: + name = "parameter_669" + shape = [192] + dtype = "float32" + min_val = float("-0.716405") + max_val = float("0.543476") + mean = float("-0.0205713") + std = float("0.180835") + data = None + + +class Program_weight_tensor_parameter_670: + name = "parameter_670" + shape = [192, 128, 3, 3] + dtype = "float32" + min_val = float("-0.088438") + max_val = float("0.0989281") + mean = float("-0.000264084") + std = float("0.00696874") + data = None + + +class Program_weight_tensor_parameter_671: + name = "parameter_671" + shape = [128] + dtype = "float32" + min_val = float("-2.77349") + max_val = float("1.94915") + mean = float("-0.748001") + std = float("0.667633") + data = None + + +class Program_weight_tensor_parameter_672: + name = "parameter_672" + shape = [128] + dtype = "float32" + min_val = float("0.282119") + max_val = float("2.11163") + mean = float("0.963232") + std = float("0.25164") + data = None + + +class Program_weight_tensor_parameter_673: + name = "parameter_673" + shape = [128] + dtype = "float32" + min_val = float("0.00348181") + max_val = float("0.641549") + mean = float("0.0466075") + std = float("0.0727225") + data = None + + +class Program_weight_tensor_parameter_674: + name = "parameter_674" + shape = [128] + dtype = "float32" + min_val = float("-0.799433") + max_val = float("0.769186") + mean = float("-0.000783121") + std = float("0.234277") + data = None + + +class Program_weight_tensor_parameter_675: + name = "parameter_675" + shape = [128, 96, 1, 1] + dtype = "float32" + min_val = float("-0.237331") + max_val = float("0.234872") + mean = float("-0.00058892") + std = float("0.0227538") + data = None + + +class Program_weight_tensor_parameter_676: + name = "parameter_676" + shape = [96] + dtype = "float32" + min_val = float("-0.0196388") + max_val = float("0.00429926") + mean = float("-0.00604744") + std = float("0.00423631") + data = None + + +class Program_weight_tensor_parameter_677: + name = "parameter_677" + shape = [96, 96, 1, 1] + dtype = "float32" + min_val = float("-0.202175") + max_val = float("0.139733") + mean = float("-0.00708148") + std = float("0.015418") + data = None + + +class Program_weight_tensor_parameter_678: + name = "parameter_678" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_679: + name = "parameter_679" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_680: + name = "parameter_680" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_681: + name = "parameter_681" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_682: + name = "parameter_682" + shape = [48, 48, 1, 1] + dtype = "float32" + min_val = float("-0.113449") + max_val = float("0.116382") + mean = float("-8.18214e-05") + std = float("0.0174881") + data = None + + +class Program_weight_tensor_parameter_683: + name = "parameter_683" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_684: + name = "parameter_684" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_685: + name = "parameter_685" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_686: + name = "parameter_686" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_687: + name = "parameter_687" + shape = [48, 48, 3, 3] + dtype = "float32" + min_val = float("-0.110314") + max_val = float("0.0737469") + mean = float("-6.02772e-05") + std = float("0.0116185") + data = None + + +class Program_weight_tensor_parameter_688: + name = "parameter_688" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_689: + name = "parameter_689" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_690: + name = "parameter_690" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_691: + name = "parameter_691" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_692: + name = "parameter_692" + shape = [48, 48, 3, 3] + dtype = "float32" + min_val = float("-0.114734") + max_val = float("0.10347") + mean = float("-0.000467215") + std = float("0.0140058") + data = None + + +class Program_weight_tensor_parameter_693: + name = "parameter_693" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_694: + name = "parameter_694" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_695: + name = "parameter_695" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_696: + name = "parameter_696" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_697: + name = "parameter_697" + shape = [48, 48, 1, 1] + dtype = "float32" + min_val = float("-0.195911") + max_val = float("0.178967") + mean = float("-0.00335025") + std = float("0.0224713") + data = None + + +class Program_weight_tensor_parameter_698: + name = "parameter_698" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_699: + name = "parameter_699" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_700: + name = "parameter_700" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_701: + name = "parameter_701" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_702: + name = "parameter_702" + shape = [48, 48, 3, 3] + dtype = "float32" + min_val = float("-0.100652") + max_val = float("0.124988") + mean = float("-0.00151496") + std = float("0.0136224") + data = None + + +class Program_weight_tensor_parameter_703: + name = "parameter_703" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_704: + name = "parameter_704" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_705: + name = "parameter_705" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_706: + name = "parameter_706" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_707: + name = "parameter_707" + shape = [48, 48, 3, 3] + dtype = "float32" + min_val = float("-0.177811") + max_val = float("0.145591") + mean = float("-0.00141726") + std = float("0.0186424") + data = None + + +class Program_weight_tensor_parameter_708: + name = "parameter_708" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_709: + name = "parameter_709" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_710: + name = "parameter_710" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_711: + name = "parameter_711" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_712: + name = "parameter_712" + shape = [48, 48, 1, 1] + dtype = "float32" + min_val = float("-0.374308") + max_val = float("0.181152") + mean = float("-0.000464201") + std = float("0.0312118") + data = None + + +class Program_weight_tensor_parameter_713: + name = "parameter_713" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_714: + name = "parameter_714" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_715: + name = "parameter_715" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_716: + name = "parameter_716" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_717: + name = "parameter_717" + shape = [48, 48, 3, 3] + dtype = "float32" + min_val = float("-0.310804") + max_val = float("0.0760952") + mean = float("-0.000399181") + std = float("0.0158955") + data = None + + +class Program_weight_tensor_parameter_718: + name = "parameter_718" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_719: + name = "parameter_719" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_720: + name = "parameter_720" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_721: + name = "parameter_721" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_722: + name = "parameter_722" + shape = [48, 48, 3, 3] + dtype = "float32" + min_val = float("-0.156863") + max_val = float("0.102575") + mean = float("-0.000657703") + std = float("0.0180323") + data = None + + +class Program_weight_tensor_parameter_723: + name = "parameter_723" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_724: + name = "parameter_724" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_725: + name = "parameter_725" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_726: + name = "parameter_726" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_727: + name = "parameter_727" + shape = [48, 96, 1, 1] + dtype = "float32" + min_val = float("-0.231057") + max_val = float("0.209028") + mean = float("-0.00477756") + std = float("0.0398542") + data = None + + +class Program_weight_tensor_parameter_728: + name = "parameter_728" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_729: + name = "parameter_729" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_730: + name = "parameter_730" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_731: + name = "parameter_731" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_732: + name = "parameter_732" + shape = [48, 96, 1, 1] + dtype = "float32" + min_val = float("-0.096208") + max_val = float("0.142834") + mean = float("-0.000778624") + std = float("0.0186653") + data = None + + +class Program_weight_tensor_parameter_733: + name = "parameter_733" + shape = [96] + dtype = "float32" + min_val = float("-3.10984") + max_val = float("3.25719") + mean = float("0.366741") + std = float("1.14155") + data = None + + +class Program_weight_tensor_parameter_734: + name = "parameter_734" + shape = [96] + dtype = "float32" + min_val = float("0.799139") + max_val = float("4.98464") + mean = float("1.87865") + std = float("0.779024") + data = None + + +class Program_weight_tensor_parameter_735: + name = "parameter_735" + shape = [96] + dtype = "float32" + min_val = float("1.76406") + max_val = float("1001.9") + mean = float("80.1803") + std = float("148.327") + data = None + + +class Program_weight_tensor_parameter_736: + name = "parameter_736" + shape = [96] + dtype = "float32" + min_val = float("-12.59") + max_val = float("13.3036") + mean = float("-0.396251") + std = float("4.14618") + data = None + + +class Program_weight_tensor_parameter_737: + name = "parameter_737" + shape = [96, 64, 3, 3] + dtype = "float32" + min_val = float("-0.0895351") + max_val = float("0.0977452") + mean = float("-0.000531484") + std = float("0.0157853") + data = None + + +class Program_weight_tensor_parameter_738: + name = "parameter_738" + shape = [64] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_739: + name = "parameter_739" + shape = [64] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_740: + name = "parameter_740" + shape = [64] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_741: + name = "parameter_741" + shape = [64] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_742: + name = "parameter_742" + shape = [64, 32, 3, 3] + dtype = "float32" + min_val = float("-0.15482") + max_val = float("0.145416") + mean = float("-0.000966986") + std = float("0.0231232") + data = None + + +class Program_weight_tensor_parameter_743: + name = "parameter_743" + shape = [32] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_744: + name = "parameter_744" + shape = [32] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_745: + name = "parameter_745" + shape = [32] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_746: + name = "parameter_746" + shape = [32] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_747: + name = "parameter_747" + shape = [32, 32, 3, 3] + dtype = "float32" + min_val = float("-0.27982") + max_val = float("0.160959") + mean = float("0.00175241") + std = float("0.0302711") + data = None + + +class Program_weight_tensor_parameter_748: + name = "parameter_748" + shape = [32] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_749: + name = "parameter_749" + shape = [32] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_750: + name = "parameter_750" + shape = [32] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_751: + name = "parameter_751" + shape = [32] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_752: + name = "parameter_752" + shape = [32, 3, 3, 3] + dtype = "float32" + min_val = float("-0.231799") + max_val = float("0.266845") + mean = float("0.00703356") + std = float("0.0617717") data = None diff --git a/paddle_samples/PaddleX/PP-YOLOE-L_vehicle/subgraph_7/graph_hash.txt b/paddle_samples/PaddleX/PP-YOLOE-L_vehicle/subgraph_1/graph_hash.txt similarity index 100% rename from paddle_samples/PaddleX/PP-YOLOE-L_vehicle/subgraph_7/graph_hash.txt rename to paddle_samples/PaddleX/PP-YOLOE-L_vehicle/subgraph_1/graph_hash.txt diff --git a/paddle_samples/PaddleX/PP-YOLOE-L_vehicle/subgraph_11/graph_net.json b/paddle_samples/PaddleX/PP-YOLOE-L_vehicle/subgraph_1/graph_net.json similarity index 100% rename from paddle_samples/PaddleX/PP-YOLOE-L_vehicle/subgraph_11/graph_net.json rename to paddle_samples/PaddleX/PP-YOLOE-L_vehicle/subgraph_1/graph_net.json diff --git a/paddle_samples/PaddleX/PP-YOLOE-L_vehicle/subgraph_7/input_meta.py b/paddle_samples/PaddleX/PP-YOLOE-L_vehicle/subgraph_1/input_meta.py similarity index 100% rename from paddle_samples/PaddleX/PP-YOLOE-L_vehicle/subgraph_7/input_meta.py rename to paddle_samples/PaddleX/PP-YOLOE-L_vehicle/subgraph_1/input_meta.py diff --git a/paddle_samples/PaddleX/PP-YOLOE-L_vehicle/subgraph_7/model.py b/paddle_samples/PaddleX/PP-YOLOE-L_vehicle/subgraph_1/model.py similarity index 100% rename from paddle_samples/PaddleX/PP-YOLOE-L_vehicle/subgraph_7/model.py rename to paddle_samples/PaddleX/PP-YOLOE-L_vehicle/subgraph_1/model.py diff --git a/paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_11/weight_meta.py b/paddle_samples/PaddleX/PP-YOLOE-L_vehicle/subgraph_1/weight_meta.py similarity index 100% rename from paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_11/weight_meta.py rename to paddle_samples/PaddleX/PP-YOLOE-L_vehicle/subgraph_1/weight_meta.py diff --git a/paddle_samples/PaddleX/PP-YOLOE-L_vehicle/subgraph_11/graph_hash.txt b/paddle_samples/PaddleX/PP-YOLOE-L_vehicle/subgraph_11/graph_hash.txt deleted file mode 100644 index 065083791..000000000 --- a/paddle_samples/PaddleX/PP-YOLOE-L_vehicle/subgraph_11/graph_hash.txt +++ /dev/null @@ -1 +0,0 @@ -753a38369a6c1cbad2faa292d21d2c22dc88ca45239db771fc9c111eed147698 \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE-L_vehicle/subgraph_11/input_meta.py b/paddle_samples/PaddleX/PP-YOLOE-L_vehicle/subgraph_11/input_meta.py deleted file mode 100644 index c0dc17231..000000000 --- a/paddle_samples/PaddleX/PP-YOLOE-L_vehicle/subgraph_11/input_meta.py +++ /dev/null @@ -1,69 +0,0 @@ -class Program_weight_tensor_data_0: - name = "data_0" - shape = [] - dtype = "int64" - data = [7581] - - -class Program_weight_tensor_data_1: - name = "data_1" - shape = [2, 7581] - dtype = "float32" - max_val = float("2.0") - mean = float("0.00995911") - std = float("0.10127") - data = None - - -class Program_weight_tensor_data_2: - name = "data_2" - shape = [2, 11, 7581] - dtype = "float32" - max_val = float("0.971142") - mean = float("0.00550478") - std = float("0.0532604") - data = None - - -class Program_weight_tensor_data_3: - name = "data_3" - shape = [2, 11, 7581] - dtype = "float32" - max_val = float("1.0") - mean = float("0.000905374") - std = float("0.0300758") - data = None - - -class Program_weight_tensor_data_4: - name = "data_4" - shape = [2, 1] - dtype = "int32" - data = [0, 1] - - -class Program_weight_tensor_data_5: - name = "data_5" - shape = [2, 11, 1] - dtype = "int32" - data = [0, 0, 0, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0] - - -class Program_weight_tensor_data_6: - name = "data_6" - shape = [2, 11, 4] - dtype = "float32" - max_val = float("608.0") - mean = float("218.815") - std = float("214.701") - data = None - - -class Program_weight_tensor_data_7: - name = "data_7" - shape = [2, 11, 7581] - dtype = "float32" - max_val = float("0.00885437") - mean = float("1.14459e-05") - std = float("0.000226499") - data = None diff --git a/paddle_samples/PaddleX/PP-YOLOE-L_vehicle/subgraph_11/model.py b/paddle_samples/PaddleX/PP-YOLOE-L_vehicle/subgraph_11/model.py deleted file mode 100644 index d6c3a388b..000000000 --- a/paddle_samples/PaddleX/PP-YOLOE-L_vehicle/subgraph_11/model.py +++ /dev/null @@ -1,248 +0,0 @@ -import paddle - - -class GraphModule(paddle.nn.Layer): - def __init__(self): - super().__init__() - - def forward(self, data_0, data_1, data_2, data_3, data_4, data_5, data_6, data_7): - # pd_op.full_int_array: (1xi64) <- () - full_int_array_0 = [1] - - # pd_op.unsqueeze: (2x1x-1xf32) <- (2x-1xf32, 1xi64) - unsqueeze_0 = paddle._C_ops.unsqueeze(data_1, full_int_array_0) - del data_1, full_int_array_0 - - # pd_op.full: (xf32) <- () - full_0 = paddle._C_ops.full( - [], float("1"), paddle.float32, paddle.framework._current_expected_place() - ) - - # pd_op.greater_than: (2x1x-1xb) <- (2x1x-1xf32, xf32) - greater_than_0 = paddle._C_ops.greater_than(unsqueeze_0, full_0) - del full_0, unsqueeze_0 - - # pd_op.full_int_array: (3xi64) <- () - full_int_array_1 = [1, 11, 1] - - # pd_op.tile: (2x11x-1xb) <- (2x1x-1xb, 3xi64) - tile_0 = paddle._C_ops.tile(greater_than_0, full_int_array_1) - del full_int_array_1, greater_than_0 - - # pd_op.full: (1xi64) <- () - full_1 = paddle._C_ops.full( - [1], float("-2"), paddle.int64, paddle.core.CPUPlace() - ) - - # pd_op.argmax: (2x-1xi64) <- (2x11x-1xf32, 1xi64) - argmax_0 = paddle._C_ops.argmax(data_2, full_1, False, False, paddle.int64) - - # pd_op.full: (1xi32) <- () - full_2 = paddle._C_ops.full( - [1], float("11"), paddle.int32, paddle.core.CPUPlace() - ) - - # pd_op.one_hot: (2x-1x11xf32) <- (2x-1xi64, 1xi32) - one_hot_0 = paddle._C_ops.one_hot( - argmax_0 % paddle.cast(full_2, argmax_0.dtype), full_2 - ) - del argmax_0, full_2 - - # pd_op.transpose: (2x11x-1xf32) <- (2x-1x11xf32) - transpose_0 = paddle._C_ops.transpose(one_hot_0, [0, 2, 1]) - del one_hot_0 - - # pd_op.where: (2x11x-1xf32) <- (2x11x-1xb, 2x11x-1xf32, 2x11x-1xf32) - where_0 = paddle._C_ops.where(tile_0, transpose_0, data_3) - del data_3, tile_0, transpose_0 - - # pd_op.full_int_array: (1xi64) <- () - full_int_array_2 = [-2] - - # pd_op.sum: (2x-1xf32) <- (2x11x-1xf32, 1xi64) - sum_0 = paddle._C_ops.sum(where_0, full_int_array_2, None, False) - - # pd_op.argmax: (2x-1xi64) <- (2x11x-1xf32, 1xi64) - argmax_1 = paddle._C_ops.argmax(where_0, full_1, False, False, paddle.int64) - del full_1 - - # pd_op.full: (1xf32) <- () - full_3 = paddle._C_ops.full( - [1], float("11"), paddle.float32, paddle.core.CPUPlace() - ) - - # pd_op.scale: (2x1xi32) <- (2x1xi32, 1xf32) - scale_0 = paddle._C_ops.scale(data_4, full_3, float("0"), True) - del data_4, full_3 - - # pd_op.cast: (2x1xi64) <- (2x1xi32) - cast_0 = paddle._C_ops.cast(scale_0, paddle.int64) - del scale_0 - - # pd_op.add: (2x-1xi64) <- (2x-1xi64, 2x1xi64) - add_0 = paddle._C_ops.add(argmax_1, cast_0) - del argmax_1, cast_0 - - # pd_op.flatten: (22xi32) <- (2x11x1xi32) - flatten_0 = paddle._C_ops.flatten(data_5, 0, 2) - del data_5 - - # pd_op.flatten: (-1xi64) <- (2x-1xi64) - flatten_1 = paddle._C_ops.flatten(add_0, 0, 1) - del add_0 - - # pd_op.full: (1xi32) <- () - full_4 = paddle._C_ops.full( - [1], float("0"), paddle.int32, paddle.core.CPUPlace() - ) - - # pd_op.gather: (-1xi32) <- (22xi32, -1xi64, 1xi32) - gather_0 = paddle._C_ops.gather(flatten_0, flatten_1, full_4) - del flatten_0 - - # pd_op.full: (xi64) <- () - full_5 = paddle._C_ops.full( - [], float("2"), paddle.int64, paddle.core.CPUPlace() - ) - - # builtin.combine: ([xi64, xi64]) <- (xi64, xi64) - combine_0 = [full_5, data_0] - - # pd_op.stack: (2xi64) <- ([xi64, xi64]) - stack_0 = paddle._C_ops.stack(combine_0, 0) - del combine_0 - - # pd_op.reshape: (2x-1xi32) <- (-1xi32, 2xi64) - reshape_1 = paddle._C_ops.reshape(gather_0, stack_0) - del gather_0, stack_0 - - # pd_op.full: (xf32) <- () - full_6 = paddle._C_ops.full( - [], float("0"), paddle.float32, paddle.framework._current_expected_place() - ) - - # pd_op.greater_than: (2x-1xb) <- (2x-1xf32, xf32) - greater_than_1 = paddle._C_ops.greater_than(sum_0, full_6) - del full_6, sum_0 - - # pd_op.full: (1xf32) <- () - full_7 = paddle._C_ops.full( - [1], float("4"), paddle.float32, paddle.core.CPUPlace() - ) - - # pd_op.full_like: (2x-1xi32) <- (2x-1xi32, 1xf32) - full_like_0 = paddle._C_ops.full_like( - reshape_1, full_7, paddle.int32, paddle.framework._current_expected_place() - ) - del full_7 - - # pd_op.where: (2x-1xi32) <- (2x-1xb, 2x-1xi32, 2x-1xi32) - where_1 = paddle._C_ops.where(greater_than_1, reshape_1, full_like_0) - del full_like_0, greater_than_1, reshape_1 - - # pd_op.full_int_array: (2xi64) <- () - full_int_array_3 = [-1, 4] - - # pd_op.reshape: (22x4xf32) <- (2x11x4xf32, 2xi64) - reshape_2 = paddle._C_ops.reshape(data_6, full_int_array_3) - del data_6, full_int_array_3 - - # pd_op.gather: (-1x4xf32) <- (22x4xf32, -1xi64, 1xi32) - gather_1 = paddle._C_ops.gather(reshape_2, flatten_1, full_4) - del flatten_1, full_4, reshape_2 - - # pd_op.full: (xi64) <- () - full_8 = paddle._C_ops.full( - [], float("4"), paddle.int64, paddle.core.CPUPlace() - ) - - # builtin.combine: ([xi64, xi64, xi64]) <- (xi64, xi64, xi64) - combine_1 = [full_5, data_0, full_8] - del data_0, full_5, full_8 - - # pd_op.stack: (3xi64) <- ([xi64, xi64, xi64]) - stack_1 = paddle._C_ops.stack(combine_1, 0) - del combine_1 - - # pd_op.reshape: (2x-1x4xf32) <- (-1x4xf32, 3xi64) - reshape_0 = paddle._C_ops.reshape(gather_1, stack_1) - del gather_1, stack_1 - - # pd_op.full: (1xi32) <- () - full_9 = paddle._C_ops.full( - [1], float("5"), paddle.int32, paddle.core.CPUPlace() - ) - - # pd_op.one_hot: (2x-1x5xf32) <- (2x-1xi32, 1xi32) - one_hot_1 = paddle._C_ops.one_hot( - where_1 % paddle.cast(full_9, where_1.dtype), full_9 - ) - del full_9 - - # pd_op.full: (4xi64) <- () - full_10 = paddle._C_ops.full( - [4], float("0"), paddle.int64, paddle.framework._current_expected_place() - ) - - # pd_op.assign_value_: (4xi64) <- (4xi64) - assign_value__0 = paddle._C_ops.assign_value_( - full_10, - [4], - paddle.int64, - [float("0"), float("1"), float("2"), float("3")], - paddle.framework._current_expected_place(), - ) - del full_10 - - # pd_op.index_select: (2x-1x4xf32) <- (2x-1x5xf32, 4xi64) - index_select_0 = paddle._C_ops.index_select(one_hot_1, assign_value__0, -1) - del assign_value__0, one_hot_1 - - # pd_op.multiply: (2x11x-1xf32) <- (2x11x-1xf32, 2x11x-1xf32) - multiply_1 = paddle._C_ops.multiply(data_7, where_0) - del data_7 - - # pd_op.full_int_array: (1xi64) <- () - full_int_array_4 = [-1] - - # pd_op.max: (2x11x1xf32) <- (2x11x-1xf32, 1xi64) - max_0 = paddle._C_ops.max(multiply_1, full_int_array_4, True) - - # pd_op.multiply: (2x11x-1xf32) <- (2x11x-1xf32, 2x11x-1xf32) - multiply_2 = paddle._C_ops.multiply(data_2, where_0) - del data_2, where_0 - - # pd_op.max: (2x11x1xf32) <- (2x11x-1xf32, 1xi64) - max_1 = paddle._C_ops.max(multiply_2, full_int_array_4, True) - del multiply_2 - - # pd_op.full: (1xf32) <- () - full_11 = paddle._C_ops.full( - [1], float("1"), paddle.float32, paddle.core.CPUPlace() - ) - - # pd_op.scale: (2x11x1xf32) <- (2x11x1xf32, 1xf32) - scale_1 = paddle._C_ops.scale(max_0, full_11, float("1e-09"), True) - del full_11, max_0 - - # pd_op.divide: (2x11x-1xf32) <- (2x11x-1xf32, 2x11x1xf32) - divide_0 = paddle._C_ops.divide(multiply_1, scale_1) - del multiply_1, scale_1 - - # pd_op.multiply: (2x11x-1xf32) <- (2x11x-1xf32, 2x11x1xf32) - multiply_3 = paddle._C_ops.multiply(divide_0, max_1) - del divide_0, max_1 - - # pd_op.max: (2x-1xf32) <- (2x11x-1xf32, 1xi64) - max_2 = paddle._C_ops.max(multiply_3, full_int_array_2, False) - del full_int_array_2, multiply_3 - - # pd_op.unsqueeze: (2x-1x1xf32) <- (2x-1xf32, 1xi64) - unsqueeze_1 = paddle._C_ops.unsqueeze(max_2, full_int_array_4) - del full_int_array_4, max_2 - - # pd_op.multiply: (2x-1x4xf32) <- (2x-1x4xf32, 2x-1x1xf32) - multiply_0 = paddle._C_ops.multiply(index_select_0, unsqueeze_1) - del index_select_0, unsqueeze_1, where_1 - - return reshape_0, multiply_0 diff --git a/paddle_samples/PaddleX/PP-YOLOE-L_vehicle/subgraph_13/graph_hash.txt b/paddle_samples/PaddleX/PP-YOLOE-L_vehicle/subgraph_13/graph_hash.txt deleted file mode 100644 index a2c6d8b8a..000000000 --- a/paddle_samples/PaddleX/PP-YOLOE-L_vehicle/subgraph_13/graph_hash.txt +++ /dev/null @@ -1 +0,0 @@ -81e1c79881631ba9c8fea543662e0c88108b9a0a23f037c923767f08270a38b3 \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE-L_vehicle/subgraph_13/model.py b/paddle_samples/PaddleX/PP-YOLOE-L_vehicle/subgraph_13/model.py deleted file mode 100644 index 0a0635ece..000000000 --- a/paddle_samples/PaddleX/PP-YOLOE-L_vehicle/subgraph_13/model.py +++ /dev/null @@ -1,1050 +0,0 @@ -import paddle - - -class GraphModule(paddle.nn.Layer): - def __init__(self): - super().__init__() - - def forward( - self, - parameter_0, - parameter_1, - parameter_2, - parameter_3, - parameter_4, - parameter_5, - parameter_6, - parameter_7, - parameter_8, - parameter_9, - parameter_10, - parameter_11, - parameter_12, - parameter_13, - parameter_14, - parameter_15, - parameter_16, - parameter_17, - parameter_18, - parameter_19, - parameter_20, - parameter_21, - parameter_22, - parameter_23, - parameter_24, - parameter_25, - parameter_26, - parameter_27, - parameter_28, - parameter_29, - parameter_30, - parameter_31, - parameter_32, - parameter_33, - parameter_34, - parameter_35, - parameter_36, - parameter_37, - parameter_38, - parameter_39, - parameter_40, - parameter_41, - parameter_42, - parameter_43, - parameter_44, - parameter_45, - parameter_46, - parameter_47, - parameter_48, - parameter_49, - parameter_50, - parameter_51, - parameter_52, - parameter_53, - data_0, - data_1, - data_2, - ): - # pd_op.full: (1xf64) <- () - full_0 = paddle._C_ops.full( - [1], float("0"), paddle.float64, paddle.core.CPUPlace() - ) - - # pd_op.full: (1xf64) <- () - full_1 = paddle._C_ops.full( - [1], float("16"), paddle.float64, paddle.core.CPUPlace() - ) - - # pd_op.full: (1xf64) <- () - full_2 = paddle._C_ops.full( - [1], float("1"), paddle.float64, paddle.core.CPUPlace() - ) - - # pd_op.arange: (16xi64) <- (1xf64, 1xf64, 1xf64) - arange_0 = paddle.arange(full_0, full_1, full_2, dtype="int64") - del full_1 - - # pd_op.cast: (16xf32) <- (16xi64) - cast_0 = paddle._C_ops.cast(arange_0, paddle.float32) - del arange_0 - - # pd_op.full: (1xf32) <- () - full_3 = paddle._C_ops.full( - [1], float("1"), paddle.float32, paddle.core.CPUPlace() - ) - - # pd_op.scale: (16xf32) <- (16xf32, 1xf32) - scale_0 = paddle._C_ops.scale(cast_0, full_3, float("0.5"), True) - del cast_0 - - # pd_op.full: (1xf32) <- () - full_4 = paddle._C_ops.full( - [1], float("32"), paddle.float32, paddle.core.CPUPlace() - ) - - # pd_op.scale: (16xf32) <- (16xf32, 1xf32) - scale_1 = paddle._C_ops.scale(scale_0, full_4, float("0"), True) - del full_4, scale_0 - - # builtin.combine: ([16xf32, 16xf32]) <- (16xf32, 16xf32) - combine_0 = [scale_1, scale_1] - del scale_1 - - # pd_op.meshgrid: ([16x16xf32, 16x16xf32]) <- ([16xf32, 16xf32]) - meshgrid_0 = paddle._C_ops.meshgrid(combine_0) - del combine_0 - - # builtin.split: (16x16xf32, 16x16xf32) <- ([16x16xf32, 16x16xf32]) - ( - split_0, - split_1, - ) = meshgrid_0 - del meshgrid_0 - - # pd_op.scale: (16x16xf32) <- (16x16xf32, 1xf32) - scale_2 = paddle._C_ops.scale(split_1, full_3, float("-80"), True) - - # pd_op.scale: (16x16xf32) <- (16x16xf32, 1xf32) - scale_3 = paddle._C_ops.scale(split_0, full_3, float("-80"), True) - - # pd_op.scale: (16x16xf32) <- (16x16xf32, 1xf32) - scale_4 = paddle._C_ops.scale(split_1, full_3, float("80"), True) - - # pd_op.scale: (16x16xf32) <- (16x16xf32, 1xf32) - scale_5 = paddle._C_ops.scale(split_0, full_3, float("80"), True) - - # builtin.combine: ([16x16xf32, 16x16xf32, 16x16xf32, 16x16xf32]) <- (16x16xf32, 16x16xf32, 16x16xf32, 16x16xf32) - combine_1 = [scale_2, scale_3, scale_4, scale_5] - del scale_2, scale_3, scale_4, scale_5 - - # pd_op.stack: (16x16x4xf32) <- ([16x16xf32, 16x16xf32, 16x16xf32, 16x16xf32]) - stack_0 = paddle._C_ops.stack(combine_1, -1) - del combine_1 - - # builtin.combine: ([16x16xf32, 16x16xf32]) <- (16x16xf32, 16x16xf32) - combine_2 = [split_1, split_0] - del split_0, split_1 - - # pd_op.stack: (16x16x2xf32) <- ([16x16xf32, 16x16xf32]) - stack_1 = paddle._C_ops.stack(combine_2, -1) - del combine_2 - - # pd_op.full_int_array: (2xi64) <- () - full_int_array_0 = [-1, 4] - - # pd_op.reshape: (256x4xf32) <- (16x16x4xf32, 2xi64) - reshape_0 = paddle._C_ops.reshape(stack_0, full_int_array_0) - del stack_0 - - # pd_op.full_int_array: (2xi64) <- () - full_int_array_1 = [-1, 2] - - # pd_op.reshape: (256x2xf32) <- (16x16x2xf32, 2xi64) - reshape_1 = paddle._C_ops.reshape(stack_1, full_int_array_1) - del stack_1 - - # pd_op.full: (256x1xf32) <- () - full_5 = paddle._C_ops.full( - [256, 1], - float("32"), - paddle.float32, - paddle.framework._current_expected_place(), - ) - - # pd_op.full: (1xf64) <- () - full_6 = paddle._C_ops.full( - [1], float("32"), paddle.float64, paddle.core.CPUPlace() - ) - - # pd_op.arange: (32xi64) <- (1xf64, 1xf64, 1xf64) - arange_1 = paddle.arange(full_0, full_6, full_2, dtype="int64") - del full_6 - - # pd_op.cast: (32xf32) <- (32xi64) - cast_1 = paddle._C_ops.cast(arange_1, paddle.float32) - del arange_1 - - # pd_op.scale: (32xf32) <- (32xf32, 1xf32) - scale_6 = paddle._C_ops.scale(cast_1, full_3, float("0.5"), True) - del cast_1 - - # pd_op.full: (1xf32) <- () - full_7 = paddle._C_ops.full( - [1], float("16"), paddle.float32, paddle.core.CPUPlace() - ) - - # pd_op.scale: (32xf32) <- (32xf32, 1xf32) - scale_7 = paddle._C_ops.scale(scale_6, full_7, float("0"), True) - del full_7, scale_6 - - # builtin.combine: ([32xf32, 32xf32]) <- (32xf32, 32xf32) - combine_3 = [scale_7, scale_7] - del scale_7 - - # pd_op.meshgrid: ([32x32xf32, 32x32xf32]) <- ([32xf32, 32xf32]) - meshgrid_1 = paddle._C_ops.meshgrid(combine_3) - del combine_3 - - # builtin.split: (32x32xf32, 32x32xf32) <- ([32x32xf32, 32x32xf32]) - ( - split_2, - split_3, - ) = meshgrid_1 - del meshgrid_1 - - # pd_op.scale: (32x32xf32) <- (32x32xf32, 1xf32) - scale_8 = paddle._C_ops.scale(split_3, full_3, float("-40"), True) - - # pd_op.scale: (32x32xf32) <- (32x32xf32, 1xf32) - scale_9 = paddle._C_ops.scale(split_2, full_3, float("-40"), True) - - # pd_op.scale: (32x32xf32) <- (32x32xf32, 1xf32) - scale_10 = paddle._C_ops.scale(split_3, full_3, float("40"), True) - - # pd_op.scale: (32x32xf32) <- (32x32xf32, 1xf32) - scale_11 = paddle._C_ops.scale(split_2, full_3, float("40"), True) - - # builtin.combine: ([32x32xf32, 32x32xf32, 32x32xf32, 32x32xf32]) <- (32x32xf32, 32x32xf32, 32x32xf32, 32x32xf32) - combine_4 = [scale_8, scale_9, scale_10, scale_11] - del scale_10, scale_11, scale_8, scale_9 - - # pd_op.stack: (32x32x4xf32) <- ([32x32xf32, 32x32xf32, 32x32xf32, 32x32xf32]) - stack_2 = paddle._C_ops.stack(combine_4, -1) - del combine_4 - - # builtin.combine: ([32x32xf32, 32x32xf32]) <- (32x32xf32, 32x32xf32) - combine_5 = [split_3, split_2] - del split_2, split_3 - - # pd_op.stack: (32x32x2xf32) <- ([32x32xf32, 32x32xf32]) - stack_3 = paddle._C_ops.stack(combine_5, -1) - del combine_5 - - # pd_op.reshape: (1024x4xf32) <- (32x32x4xf32, 2xi64) - reshape_2 = paddle._C_ops.reshape(stack_2, full_int_array_0) - del stack_2 - - # pd_op.reshape: (1024x2xf32) <- (32x32x2xf32, 2xi64) - reshape_3 = paddle._C_ops.reshape(stack_3, full_int_array_1) - del stack_3 - - # pd_op.full: (1024x1xf32) <- () - full_8 = paddle._C_ops.full( - [1024, 1], - float("16"), - paddle.float32, - paddle.framework._current_expected_place(), - ) - - # pd_op.full: (1xf64) <- () - full_9 = paddle._C_ops.full( - [1], float("64"), paddle.float64, paddle.core.CPUPlace() - ) - - # pd_op.arange: (64xi64) <- (1xf64, 1xf64, 1xf64) - arange_2 = paddle.arange(full_0, full_9, full_2, dtype="int64") - del full_0, full_2, full_9 - - # pd_op.cast: (64xf32) <- (64xi64) - cast_2 = paddle._C_ops.cast(arange_2, paddle.float32) - del arange_2 - - # pd_op.scale: (64xf32) <- (64xf32, 1xf32) - scale_12 = paddle._C_ops.scale(cast_2, full_3, float("0.5"), True) - del cast_2 - - # pd_op.full: (1xf32) <- () - full_10 = paddle._C_ops.full( - [1], float("8"), paddle.float32, paddle.core.CPUPlace() - ) - - # pd_op.scale: (64xf32) <- (64xf32, 1xf32) - scale_13 = paddle._C_ops.scale(scale_12, full_10, float("0"), True) - del full_10, scale_12 - - # builtin.combine: ([64xf32, 64xf32]) <- (64xf32, 64xf32) - combine_6 = [scale_13, scale_13] - del scale_13 - - # pd_op.meshgrid: ([64x64xf32, 64x64xf32]) <- ([64xf32, 64xf32]) - meshgrid_2 = paddle._C_ops.meshgrid(combine_6) - del combine_6 - - # builtin.split: (64x64xf32, 64x64xf32) <- ([64x64xf32, 64x64xf32]) - ( - split_4, - split_5, - ) = meshgrid_2 - del meshgrid_2 - - # pd_op.scale: (64x64xf32) <- (64x64xf32, 1xf32) - scale_14 = paddle._C_ops.scale(split_5, full_3, float("-20"), True) - - # pd_op.scale: (64x64xf32) <- (64x64xf32, 1xf32) - scale_15 = paddle._C_ops.scale(split_4, full_3, float("-20"), True) - - # pd_op.scale: (64x64xf32) <- (64x64xf32, 1xf32) - scale_16 = paddle._C_ops.scale(split_5, full_3, float("20"), True) - - # pd_op.scale: (64x64xf32) <- (64x64xf32, 1xf32) - scale_17 = paddle._C_ops.scale(split_4, full_3, float("20"), True) - del full_3 - - # builtin.combine: ([64x64xf32, 64x64xf32, 64x64xf32, 64x64xf32]) <- (64x64xf32, 64x64xf32, 64x64xf32, 64x64xf32) - combine_7 = [scale_14, scale_15, scale_16, scale_17] - del scale_14, scale_15, scale_16, scale_17 - - # pd_op.stack: (64x64x4xf32) <- ([64x64xf32, 64x64xf32, 64x64xf32, 64x64xf32]) - stack_4 = paddle._C_ops.stack(combine_7, -1) - del combine_7 - - # builtin.combine: ([64x64xf32, 64x64xf32]) <- (64x64xf32, 64x64xf32) - combine_8 = [split_5, split_4] - del split_4, split_5 - - # pd_op.stack: (64x64x2xf32) <- ([64x64xf32, 64x64xf32]) - stack_5 = paddle._C_ops.stack(combine_8, -1) - del combine_8 - - # pd_op.reshape: (4096x4xf32) <- (64x64x4xf32, 2xi64) - reshape_4 = paddle._C_ops.reshape(stack_4, full_int_array_0) - del full_int_array_0, stack_4 - - # pd_op.reshape: (4096x2xf32) <- (64x64x2xf32, 2xi64) - reshape_5 = paddle._C_ops.reshape(stack_5, full_int_array_1) - del full_int_array_1, stack_5 - - # pd_op.full: (4096x1xf32) <- () - full_11 = paddle._C_ops.full( - [4096, 1], - float("8"), - paddle.float32, - paddle.framework._current_expected_place(), - ) - - # pd_op.full: (1xi32) <- () - full_12 = paddle._C_ops.full( - [1], float("0"), paddle.int32, paddle.core.CPUPlace() - ) - - # builtin.combine: ([256x4xf32, 1024x4xf32, 4096x4xf32]) <- (256x4xf32, 1024x4xf32, 4096x4xf32) - combine_9 = [reshape_0, reshape_2, reshape_4] - - # pd_op.concat: (5376x4xf32) <- ([256x4xf32, 1024x4xf32, 4096x4xf32], 1xi32) - concat_2 = paddle._C_ops.concat(combine_9, full_12) - del combine_9 - - # builtin.combine: ([256x2xf32, 1024x2xf32, 4096x2xf32]) <- (256x2xf32, 1024x2xf32, 4096x2xf32) - combine_10 = [reshape_1, reshape_3, reshape_5] - del reshape_1, reshape_3, reshape_5 - - # pd_op.concat: (5376x2xf32) <- ([256x2xf32, 1024x2xf32, 4096x2xf32], 1xi32) - concat_3 = paddle._C_ops.concat(combine_10, full_12) - del combine_10 - - # builtin.combine: ([256x1xf32, 1024x1xf32, 4096x1xf32]) <- (256x1xf32, 1024x1xf32, 4096x1xf32) - combine_11 = [full_5, full_8, full_11] - del full_11, full_5, full_8 - - # pd_op.concat: (5376x1xf32) <- ([256x1xf32, 1024x1xf32, 4096x1xf32], 1xi32) - concat_4 = paddle._C_ops.concat(combine_11, full_12) - del combine_11, full_12 - - # pd_op.full_int_array: (2xi64) <- () - full_int_array_2 = [1, 1] - - # pd_op.assign: (2xi64) <- (2xi64) - assign_0 = full_int_array_2 - - # pd_op.assign: (2xi64) <- (2xi64) - assign_1 = full_int_array_2 - - # pd_op.pool2d: (2x768x1x1xf32) <- (2x768x16x16xf32, 2xi64) - pool2d_0 = paddle._C_ops.pool2d( - data_0, - full_int_array_2, - [1, 1], - [0, 0], - False, - True, - "NCHW", - "avg", - False, - True, - "EXPLICIT", - ) - - # pd_op.conv2d: (2x768x1x1xf32) <- (2x768x1x1xf32, 768x768x1x1xf32) - conv2d_0 = paddle._C_ops.conv2d( - pool2d_0, parameter_53, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_53 - - # pd_op.full_int_array: (4xi64) <- () - full_int_array_3 = [1, -1, 1, 1] - - # pd_op.reshape: (1x768x1x1xf32) <- (768xf32, 4xi64) - reshape_6 = paddle._C_ops.reshape(parameter_52, full_int_array_3) - del parameter_52 - - # pd_op.add: (2x768x1x1xf32) <- (2x768x1x1xf32, 1x768x1x1xf32) - add_0 = paddle._C_ops.add(conv2d_0, reshape_6) - - # pd_op.sigmoid: (2x768x1x1xf32) <- (2x768x1x1xf32) - sigmoid_0 = paddle._C_ops.sigmoid(add_0) - del add_0 - - # pd_op.multiply: (2x768x16x16xf32) <- (2x768x16x16xf32, 2x768x1x1xf32) - multiply_0 = paddle._C_ops.multiply(data_0, sigmoid_0) - - # pd_op.conv2d: (2x768x16x16xf32) <- (2x768x16x16xf32, 768x768x1x1xf32) - conv2d_1 = paddle._C_ops.conv2d( - multiply_0, parameter_51, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_51 - - # pd_op.batch_norm_: (2x768x16x16xf32, 768xf32, 768xf32, 768xf32, 768xf32, -1xui8) <- (2x768x16x16xf32, 768xf32, 768xf32, 768xf32, 768xf32) - ( - batch_norm__0, - batch_norm__1, - batch_norm__2, - batch_norm__3, - batch_norm__4, - batch_norm__5, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_1, - parameter_50, - parameter_49, - parameter_48, - parameter_47, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_47, parameter_48, parameter_49, parameter_50 - - # pd_op.swish: (2x768x16x16xf32) <- (2x768x16x16xf32) - swish_0 = paddle._C_ops.swish(batch_norm__0) - - # pd_op.add: (2x768x16x16xf32) <- (2x768x16x16xf32, 2x768x16x16xf32) - add_1 = paddle._C_ops.add(swish_0, data_0) - - # pd_op.conv2d: (2x4x16x16xf32) <- (2x768x16x16xf32, 4x768x3x3xf32) - conv2d_2 = paddle._C_ops.conv2d( - add_1, parameter_46, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_46 - - # pd_op.reshape: (1x4x1x1xf32) <- (4xf32, 4xi64) - reshape_7 = paddle._C_ops.reshape(parameter_45, full_int_array_3) - del parameter_45 - - # pd_op.add: (2x4x16x16xf32) <- (2x4x16x16xf32, 1x4x1x1xf32) - add_2 = paddle._C_ops.add(conv2d_2, reshape_7) - - # pd_op.conv2d: (2x768x1x1xf32) <- (2x768x1x1xf32, 768x768x1x1xf32) - conv2d_3 = paddle._C_ops.conv2d( - pool2d_0, parameter_44, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_44 - - # pd_op.reshape: (1x768x1x1xf32) <- (768xf32, 4xi64) - reshape_8 = paddle._C_ops.reshape(parameter_43, full_int_array_3) - del parameter_43 - - # pd_op.add: (2x768x1x1xf32) <- (2x768x1x1xf32, 1x768x1x1xf32) - add_3 = paddle._C_ops.add(conv2d_3, reshape_8) - - # pd_op.sigmoid: (2x768x1x1xf32) <- (2x768x1x1xf32) - sigmoid_1 = paddle._C_ops.sigmoid(add_3) - del add_3 - - # pd_op.multiply: (2x768x16x16xf32) <- (2x768x16x16xf32, 2x768x1x1xf32) - multiply_1 = paddle._C_ops.multiply(data_0, sigmoid_1) - del data_0 - - # pd_op.conv2d: (2x768x16x16xf32) <- (2x768x16x16xf32, 768x768x1x1xf32) - conv2d_4 = paddle._C_ops.conv2d( - multiply_1, parameter_42, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_42 - - # pd_op.batch_norm_: (2x768x16x16xf32, 768xf32, 768xf32, 768xf32, 768xf32, -1xui8) <- (2x768x16x16xf32, 768xf32, 768xf32, 768xf32, 768xf32) - ( - batch_norm__6, - batch_norm__7, - batch_norm__8, - batch_norm__9, - batch_norm__10, - batch_norm__11, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_4, - parameter_41, - parameter_40, - parameter_39, - parameter_38, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_38, parameter_39, parameter_40, parameter_41 - - # pd_op.swish: (2x768x16x16xf32) <- (2x768x16x16xf32) - swish_1 = paddle._C_ops.swish(batch_norm__6) - - # pd_op.conv2d: (2x68x16x16xf32) <- (2x768x16x16xf32, 68x768x3x3xf32) - conv2d_5 = paddle._C_ops.conv2d( - swish_1, parameter_37, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_37 - - # pd_op.reshape: (1x68x1x1xf32) <- (68xf32, 4xi64) - reshape_9 = paddle._C_ops.reshape(parameter_36, full_int_array_3) - del parameter_36 - - # pd_op.add: (2x68x16x16xf32) <- (2x68x16x16xf32, 1x68x1x1xf32) - add_4 = paddle._C_ops.add(conv2d_5, reshape_9) - - # pd_op.sigmoid: (2x4x16x16xf32) <- (2x4x16x16xf32) - sigmoid_2 = paddle._C_ops.sigmoid(add_2) - del add_2 - - # pd_op.flatten: (2x4x256xf32) <- (2x4x16x16xf32) - flatten_0 = paddle._C_ops.flatten(sigmoid_2, 2, 3) - - # pd_op.transpose: (2x256x4xf32) <- (2x4x256xf32) - transpose_0 = paddle._C_ops.transpose(flatten_0, [0, 2, 1]) - del flatten_0 - - # pd_op.flatten: (2x68x256xf32) <- (2x68x16x16xf32) - flatten_1 = paddle._C_ops.flatten(add_4, 2, 3) - - # pd_op.transpose: (2x256x68xf32) <- (2x68x256xf32) - transpose_1 = paddle._C_ops.transpose(flatten_1, [0, 2, 1]) - del flatten_1 - - # pd_op.pool2d: (2x384x1x1xf32) <- (2x384x32x32xf32, 2xi64) - pool2d_1 = paddle._C_ops.pool2d( - data_1, - full_int_array_2, - [1, 1], - [0, 0], - False, - True, - "NCHW", - "avg", - False, - True, - "EXPLICIT", - ) - - # pd_op.conv2d: (2x384x1x1xf32) <- (2x384x1x1xf32, 384x384x1x1xf32) - conv2d_6 = paddle._C_ops.conv2d( - pool2d_1, parameter_35, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_35 - - # pd_op.reshape: (1x384x1x1xf32) <- (384xf32, 4xi64) - reshape_10 = paddle._C_ops.reshape(parameter_34, full_int_array_3) - del parameter_34 - - # pd_op.add: (2x384x1x1xf32) <- (2x384x1x1xf32, 1x384x1x1xf32) - add_5 = paddle._C_ops.add(conv2d_6, reshape_10) - - # pd_op.sigmoid: (2x384x1x1xf32) <- (2x384x1x1xf32) - sigmoid_3 = paddle._C_ops.sigmoid(add_5) - del add_5 - - # pd_op.multiply: (2x384x32x32xf32) <- (2x384x32x32xf32, 2x384x1x1xf32) - multiply_2 = paddle._C_ops.multiply(data_1, sigmoid_3) - - # pd_op.conv2d: (2x384x32x32xf32) <- (2x384x32x32xf32, 384x384x1x1xf32) - conv2d_7 = paddle._C_ops.conv2d( - multiply_2, parameter_33, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_33 - - # pd_op.batch_norm_: (2x384x32x32xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x32x32xf32, 384xf32, 384xf32, 384xf32, 384xf32) - ( - batch_norm__12, - batch_norm__13, - batch_norm__14, - batch_norm__15, - batch_norm__16, - batch_norm__17, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_7, - parameter_32, - parameter_31, - parameter_30, - parameter_29, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_29, parameter_30, parameter_31, parameter_32 - - # pd_op.swish: (2x384x32x32xf32) <- (2x384x32x32xf32) - swish_2 = paddle._C_ops.swish(batch_norm__12) - - # pd_op.add: (2x384x32x32xf32) <- (2x384x32x32xf32, 2x384x32x32xf32) - add_6 = paddle._C_ops.add(swish_2, data_1) - - # pd_op.conv2d: (2x4x32x32xf32) <- (2x384x32x32xf32, 4x384x3x3xf32) - conv2d_8 = paddle._C_ops.conv2d( - add_6, parameter_28, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_28 - - # pd_op.reshape: (1x4x1x1xf32) <- (4xf32, 4xi64) - reshape_11 = paddle._C_ops.reshape(parameter_27, full_int_array_3) - del parameter_27 - - # pd_op.add: (2x4x32x32xf32) <- (2x4x32x32xf32, 1x4x1x1xf32) - add_7 = paddle._C_ops.add(conv2d_8, reshape_11) - - # pd_op.conv2d: (2x384x1x1xf32) <- (2x384x1x1xf32, 384x384x1x1xf32) - conv2d_9 = paddle._C_ops.conv2d( - pool2d_1, parameter_26, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_26 - - # pd_op.reshape: (1x384x1x1xf32) <- (384xf32, 4xi64) - reshape_12 = paddle._C_ops.reshape(parameter_25, full_int_array_3) - del parameter_25 - - # pd_op.add: (2x384x1x1xf32) <- (2x384x1x1xf32, 1x384x1x1xf32) - add_8 = paddle._C_ops.add(conv2d_9, reshape_12) - - # pd_op.sigmoid: (2x384x1x1xf32) <- (2x384x1x1xf32) - sigmoid_4 = paddle._C_ops.sigmoid(add_8) - del add_8 - - # pd_op.multiply: (2x384x32x32xf32) <- (2x384x32x32xf32, 2x384x1x1xf32) - multiply_3 = paddle._C_ops.multiply(data_1, sigmoid_4) - del data_1 - - # pd_op.conv2d: (2x384x32x32xf32) <- (2x384x32x32xf32, 384x384x1x1xf32) - conv2d_10 = paddle._C_ops.conv2d( - multiply_3, parameter_24, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_24 - - # pd_op.batch_norm_: (2x384x32x32xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x32x32xf32, 384xf32, 384xf32, 384xf32, 384xf32) - ( - batch_norm__18, - batch_norm__19, - batch_norm__20, - batch_norm__21, - batch_norm__22, - batch_norm__23, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_10, - parameter_23, - parameter_22, - parameter_21, - parameter_20, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_20, parameter_21, parameter_22, parameter_23 - - # pd_op.swish: (2x384x32x32xf32) <- (2x384x32x32xf32) - swish_3 = paddle._C_ops.swish(batch_norm__18) - - # pd_op.conv2d: (2x68x32x32xf32) <- (2x384x32x32xf32, 68x384x3x3xf32) - conv2d_11 = paddle._C_ops.conv2d( - swish_3, parameter_19, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_19 - - # pd_op.reshape: (1x68x1x1xf32) <- (68xf32, 4xi64) - reshape_13 = paddle._C_ops.reshape(parameter_18, full_int_array_3) - del parameter_18 - - # pd_op.add: (2x68x32x32xf32) <- (2x68x32x32xf32, 1x68x1x1xf32) - add_9 = paddle._C_ops.add(conv2d_11, reshape_13) - - # pd_op.sigmoid: (2x4x32x32xf32) <- (2x4x32x32xf32) - sigmoid_5 = paddle._C_ops.sigmoid(add_7) - del add_7 - - # pd_op.flatten: (2x4x1024xf32) <- (2x4x32x32xf32) - flatten_2 = paddle._C_ops.flatten(sigmoid_5, 2, 3) - - # pd_op.transpose: (2x1024x4xf32) <- (2x4x1024xf32) - transpose_2 = paddle._C_ops.transpose(flatten_2, [0, 2, 1]) - del flatten_2 - - # pd_op.flatten: (2x68x1024xf32) <- (2x68x32x32xf32) - flatten_3 = paddle._C_ops.flatten(add_9, 2, 3) - - # pd_op.transpose: (2x1024x68xf32) <- (2x68x1024xf32) - transpose_3 = paddle._C_ops.transpose(flatten_3, [0, 2, 1]) - del flatten_3 - - # pd_op.pool2d: (2x192x1x1xf32) <- (2x192x64x64xf32, 2xi64) - pool2d_2 = paddle._C_ops.pool2d( - data_2, - full_int_array_2, - [1, 1], - [0, 0], - False, - True, - "NCHW", - "avg", - False, - True, - "EXPLICIT", - ) - - # pd_op.conv2d: (2x192x1x1xf32) <- (2x192x1x1xf32, 192x192x1x1xf32) - conv2d_12 = paddle._C_ops.conv2d( - pool2d_2, parameter_17, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_17 - - # pd_op.reshape: (1x192x1x1xf32) <- (192xf32, 4xi64) - reshape_14 = paddle._C_ops.reshape(parameter_16, full_int_array_3) - del parameter_16 - - # pd_op.add: (2x192x1x1xf32) <- (2x192x1x1xf32, 1x192x1x1xf32) - add_10 = paddle._C_ops.add(conv2d_12, reshape_14) - - # pd_op.sigmoid: (2x192x1x1xf32) <- (2x192x1x1xf32) - sigmoid_6 = paddle._C_ops.sigmoid(add_10) - del add_10 - - # pd_op.multiply: (2x192x64x64xf32) <- (2x192x64x64xf32, 2x192x1x1xf32) - multiply_4 = paddle._C_ops.multiply(data_2, sigmoid_6) - - # pd_op.conv2d: (2x192x64x64xf32) <- (2x192x64x64xf32, 192x192x1x1xf32) - conv2d_13 = paddle._C_ops.conv2d( - multiply_4, parameter_15, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_15 - - # pd_op.batch_norm_: (2x192x64x64xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x64x64xf32, 192xf32, 192xf32, 192xf32, 192xf32) - ( - batch_norm__24, - batch_norm__25, - batch_norm__26, - batch_norm__27, - batch_norm__28, - batch_norm__29, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_13, - parameter_14, - parameter_13, - parameter_12, - parameter_11, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_11, parameter_12, parameter_13, parameter_14 - - # pd_op.swish: (2x192x64x64xf32) <- (2x192x64x64xf32) - swish_4 = paddle._C_ops.swish(batch_norm__24) - - # pd_op.add: (2x192x64x64xf32) <- (2x192x64x64xf32, 2x192x64x64xf32) - add_11 = paddle._C_ops.add(swish_4, data_2) - - # pd_op.conv2d: (2x4x64x64xf32) <- (2x192x64x64xf32, 4x192x3x3xf32) - conv2d_14 = paddle._C_ops.conv2d( - add_11, parameter_10, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_10 - - # pd_op.reshape: (1x4x1x1xf32) <- (4xf32, 4xi64) - reshape_15 = paddle._C_ops.reshape(parameter_9, full_int_array_3) - del parameter_9 - - # pd_op.add: (2x4x64x64xf32) <- (2x4x64x64xf32, 1x4x1x1xf32) - add_12 = paddle._C_ops.add(conv2d_14, reshape_15) - - # pd_op.conv2d: (2x192x1x1xf32) <- (2x192x1x1xf32, 192x192x1x1xf32) - conv2d_15 = paddle._C_ops.conv2d( - pool2d_2, parameter_8, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_8 - - # pd_op.reshape: (1x192x1x1xf32) <- (192xf32, 4xi64) - reshape_16 = paddle._C_ops.reshape(parameter_7, full_int_array_3) - del parameter_7 - - # pd_op.add: (2x192x1x1xf32) <- (2x192x1x1xf32, 1x192x1x1xf32) - add_13 = paddle._C_ops.add(conv2d_15, reshape_16) - - # pd_op.sigmoid: (2x192x1x1xf32) <- (2x192x1x1xf32) - sigmoid_7 = paddle._C_ops.sigmoid(add_13) - del add_13 - - # pd_op.multiply: (2x192x64x64xf32) <- (2x192x64x64xf32, 2x192x1x1xf32) - multiply_5 = paddle._C_ops.multiply(data_2, sigmoid_7) - del data_2 - - # pd_op.conv2d: (2x192x64x64xf32) <- (2x192x64x64xf32, 192x192x1x1xf32) - conv2d_16 = paddle._C_ops.conv2d( - multiply_5, parameter_6, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_6 - - # pd_op.batch_norm_: (2x192x64x64xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x64x64xf32, 192xf32, 192xf32, 192xf32, 192xf32) - ( - batch_norm__30, - batch_norm__31, - batch_norm__32, - batch_norm__33, - batch_norm__34, - batch_norm__35, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_16, - parameter_5, - parameter_4, - parameter_3, - parameter_2, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_2, parameter_3, parameter_4, parameter_5 - - # pd_op.swish: (2x192x64x64xf32) <- (2x192x64x64xf32) - swish_5 = paddle._C_ops.swish(batch_norm__30) - - # pd_op.conv2d: (2x68x64x64xf32) <- (2x192x64x64xf32, 68x192x3x3xf32) - conv2d_17 = paddle._C_ops.conv2d( - swish_5, parameter_1, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_1 - - # pd_op.reshape: (1x68x1x1xf32) <- (68xf32, 4xi64) - reshape_17 = paddle._C_ops.reshape(parameter_0, full_int_array_3) - del full_int_array_3, parameter_0 - - # pd_op.add: (2x68x64x64xf32) <- (2x68x64x64xf32, 1x68x1x1xf32) - add_14 = paddle._C_ops.add(conv2d_17, reshape_17) - - # pd_op.sigmoid: (2x4x64x64xf32) <- (2x4x64x64xf32) - sigmoid_8 = paddle._C_ops.sigmoid(add_12) - del add_12 - - # pd_op.flatten: (2x4x4096xf32) <- (2x4x64x64xf32) - flatten_4 = paddle._C_ops.flatten(sigmoid_8, 2, 3) - - # pd_op.transpose: (2x4096x4xf32) <- (2x4x4096xf32) - transpose_4 = paddle._C_ops.transpose(flatten_4, [0, 2, 1]) - del flatten_4 - - # pd_op.flatten: (2x68x4096xf32) <- (2x68x64x64xf32) - flatten_5 = paddle._C_ops.flatten(add_14, 2, 3) - - # pd_op.transpose: (2x4096x68xf32) <- (2x68x4096xf32) - transpose_5 = paddle._C_ops.transpose(flatten_5, [0, 2, 1]) - del flatten_5 - - # pd_op.full: (1xi32) <- () - full_13 = paddle._C_ops.full( - [1], float("1"), paddle.int32, paddle.core.CPUPlace() - ) - - # pd_op.assign: (1xi32) <- (1xi32) - assign_2 = full_13 - - # builtin.combine: ([2x256x4xf32, 2x1024x4xf32, 2x4096x4xf32]) <- (2x256x4xf32, 2x1024x4xf32, 2x4096x4xf32) - combine_12 = [transpose_0, transpose_2, transpose_4] - - # pd_op.concat: (2x5376x4xf32) <- ([2x256x4xf32, 2x1024x4xf32, 2x4096x4xf32], 1xi32) - concat_0 = paddle._C_ops.concat(combine_12, full_13) - del combine_12 - - # builtin.combine: ([2x256x68xf32, 2x1024x68xf32, 2x4096x68xf32]) <- (2x256x68xf32, 2x1024x68xf32, 2x4096x68xf32) - combine_13 = [transpose_1, transpose_3, transpose_5] - - # pd_op.concat: (2x5376x68xf32) <- ([2x256x68xf32, 2x1024x68xf32, 2x4096x68xf32], 1xi32) - concat_1 = paddle._C_ops.concat(combine_13, full_13) - del ( - add_1, - add_11, - add_14, - add_4, - add_6, - add_9, - assign_0, - assign_1, - assign_2, - batch_norm__0, - batch_norm__1, - batch_norm__10, - batch_norm__11, - batch_norm__12, - batch_norm__13, - batch_norm__14, - batch_norm__15, - batch_norm__16, - batch_norm__17, - batch_norm__18, - batch_norm__19, - batch_norm__2, - batch_norm__20, - batch_norm__21, - batch_norm__22, - batch_norm__23, - batch_norm__24, - batch_norm__25, - batch_norm__26, - batch_norm__27, - batch_norm__28, - batch_norm__29, - batch_norm__3, - batch_norm__30, - batch_norm__31, - batch_norm__32, - batch_norm__33, - batch_norm__34, - batch_norm__35, - batch_norm__4, - batch_norm__5, - batch_norm__6, - batch_norm__7, - batch_norm__8, - batch_norm__9, - combine_13, - conv2d_0, - conv2d_1, - conv2d_10, - conv2d_11, - conv2d_12, - conv2d_13, - conv2d_14, - conv2d_15, - conv2d_16, - conv2d_17, - conv2d_2, - conv2d_3, - conv2d_4, - conv2d_5, - conv2d_6, - conv2d_7, - conv2d_8, - conv2d_9, - full_13, - full_int_array_2, - multiply_0, - multiply_1, - multiply_2, - multiply_3, - multiply_4, - multiply_5, - pool2d_0, - pool2d_1, - pool2d_2, - reshape_0, - reshape_10, - reshape_11, - reshape_12, - reshape_13, - reshape_14, - reshape_15, - reshape_16, - reshape_17, - reshape_2, - reshape_4, - reshape_6, - reshape_7, - reshape_8, - reshape_9, - sigmoid_0, - sigmoid_1, - sigmoid_2, - sigmoid_3, - sigmoid_4, - sigmoid_5, - sigmoid_6, - sigmoid_7, - sigmoid_8, - swish_0, - swish_1, - swish_2, - swish_3, - swish_4, - swish_5, - transpose_0, - transpose_1, - transpose_2, - transpose_3, - transpose_4, - transpose_5, - ) - - return concat_0, concat_1, concat_2, concat_3, concat_4 diff --git a/paddle_samples/PaddleX/PP-YOLOE-L_vehicle/subgraph_14/graph_hash.txt b/paddle_samples/PaddleX/PP-YOLOE-L_vehicle/subgraph_14/graph_hash.txt deleted file mode 100644 index 27bd82e0e..000000000 --- a/paddle_samples/PaddleX/PP-YOLOE-L_vehicle/subgraph_14/graph_hash.txt +++ /dev/null @@ -1 +0,0 @@ -8d319f5ec2a187a0cbeb6b629bf54f2df054e934514d4a5042fa9c577597355f \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE-L_vehicle/subgraph_14/graph_net.json b/paddle_samples/PaddleX/PP-YOLOE-L_vehicle/subgraph_14/graph_net.json deleted file mode 100644 index 4a2e26ae4..000000000 --- a/paddle_samples/PaddleX/PP-YOLOE-L_vehicle/subgraph_14/graph_net.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "framework": "paddle", - "model_name": "PP-YOLOE-L_vehicle", - "num_devices_required": 1, - "num_nodes_required": 1 -} \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE-L_vehicle/subgraph_14/model.py b/paddle_samples/PaddleX/PP-YOLOE-L_vehicle/subgraph_14/model.py deleted file mode 100644 index 5db76f83d..000000000 --- a/paddle_samples/PaddleX/PP-YOLOE-L_vehicle/subgraph_14/model.py +++ /dev/null @@ -1,509 +0,0 @@ -import paddle - - -class GraphModule(paddle.nn.Layer): - def __init__(self): - super().__init__() - - def forward(self, data_0, data_1, data_2, data_3, data_4, data_5, data_6): - # pd_op.cast: (2x-1xi32) <- (2x-1xb) - cast_0 = paddle._C_ops.cast(data_0, paddle.int32) - - # pd_op.full_int_array: (1xi64) <- () - full_int_array_0 = [-1] - - # pd_op.assign: (1xi64) <- (1xi64) - assign_0 = full_int_array_0 - - # pd_op.assign: (1xi64) <- (1xi64) - assign_1 = full_int_array_0 - - # pd_op.assign: (1xi64) <- (1xi64) - assign_2 = full_int_array_0 - - # pd_op.unsqueeze: (2x-1x1xi32) <- (2x-1xi32, 1xi64) - unsqueeze_0 = paddle._C_ops.unsqueeze(cast_0, full_int_array_0) - del cast_0 - - # pd_op.full_int_array: (3xi64) <- () - full_int_array_1 = [1, 1, 4] - - # pd_op.tile: (2x-1x4xi32) <- (2x-1x1xi32, 3xi64) - tile_0 = paddle._C_ops.tile(unsqueeze_0, full_int_array_1) - del full_int_array_1, unsqueeze_0 - - # pd_op.cast: (2x-1x4xb) <- (2x-1x4xi32) - cast_1 = paddle._C_ops.cast(tile_0, paddle.bool) - del tile_0 - - # pd_op.masked_select: (-1xf32) <- (2x-1x4xf32, 2x-1x4xb) - masked_select_0 = paddle._C_ops.masked_select(data_1, cast_1) - del data_1 - - # pd_op.full_int_array: (2xi64) <- () - full_int_array_2 = [-1, 4] - - # pd_op.reshape: (-1x4xf32) <- (-1xf32, 2xi64) - reshape_0 = paddle._C_ops.reshape(masked_select_0, full_int_array_2) - - # pd_op.masked_select: (-1xf32) <- (2x-1x4xf32, 2x-1x4xb) - masked_select_1 = paddle._C_ops.masked_select(data_2, cast_1) - - # pd_op.reshape: (-1x4xf32) <- (-1xf32, 2xi64) - reshape_1 = paddle._C_ops.reshape(masked_select_1, full_int_array_2) - del masked_select_1 - - # pd_op.sum: (2x-1xf32) <- (2x-1x4xf32, 1xi64) - sum_0 = paddle._C_ops.sum(data_3, full_int_array_0, None, False) - del data_3 - - # pd_op.masked_select: (-1xf32) <- (2x-1xf32, 2x-1xb) - masked_select_2 = paddle._C_ops.masked_select(sum_0, data_0) - del sum_0 - - # pd_op.unsqueeze: (-1x1xf32) <- (-1xf32, 1xi64) - unsqueeze_1 = paddle._C_ops.unsqueeze(masked_select_2, full_int_array_0) - del masked_select_2 - - # pd_op.subtract: (-1x4xf32) <- (-1x4xf32, -1x4xf32) - subtract_0 = paddle._C_ops.subtract(reshape_0, reshape_1) - - # pd_op.abs: (-1x4xf32) <- (-1x4xf32) - abs_0 = paddle._C_ops.abs(subtract_0) - - # pd_op.mean_all: (xf32) <- (-1x4xf32) - mean_all_0 = paddle._C_ops.mean_all(abs_0) - - # pd_op.full: (1xi32) <- () - full_0 = paddle._C_ops.full( - [1], float("1"), paddle.int32, paddle.core.CPUPlace() - ) - - # pd_op.split_with_num: ([-1x1xf32, -1x1xf32, -1x1xf32, -1x1xf32]) <- (-1x4xf32, 1xi32) - split_with_num_0 = paddle._C_ops.split_with_num(reshape_0, 4, full_0) - - # builtin.split: (-1x1xf32, -1x1xf32, -1x1xf32, -1x1xf32) <- ([-1x1xf32, -1x1xf32, -1x1xf32, -1x1xf32]) - ( - split_0, - split_1, - split_2, - split_3, - ) = split_with_num_0 - del split_with_num_0 - - # pd_op.split_with_num: ([-1x1xf32, -1x1xf32, -1x1xf32, -1x1xf32]) <- (-1x4xf32, 1xi32) - split_with_num_1 = paddle._C_ops.split_with_num(reshape_1, 4, full_0) - - # builtin.split: (-1x1xf32, -1x1xf32, -1x1xf32, -1x1xf32) <- ([-1x1xf32, -1x1xf32, -1x1xf32, -1x1xf32]) - ( - split_4, - split_5, - split_6, - split_7, - ) = split_with_num_1 - del split_with_num_1 - - # pd_op.maximum: (-1x1xf32) <- (-1x1xf32, -1x1xf32) - maximum_0 = paddle._C_ops.maximum(split_0, split_4) - - # pd_op.maximum: (-1x1xf32) <- (-1x1xf32, -1x1xf32) - maximum_1 = paddle._C_ops.maximum(split_1, split_5) - - # pd_op.minimum: (-1x1xf32) <- (-1x1xf32, -1x1xf32) - minimum_0 = paddle._C_ops.minimum(split_2, split_6) - - # pd_op.minimum: (-1x1xf32) <- (-1x1xf32, -1x1xf32) - minimum_1 = paddle._C_ops.minimum(split_3, split_7) - - # pd_op.subtract: (-1x1xf32) <- (-1x1xf32, -1x1xf32) - subtract_1 = paddle._C_ops.subtract(minimum_0, maximum_0) - - # pd_op.full: (1xf32) <- () - full_1 = paddle._C_ops.full( - [1], float("0"), paddle.float32, paddle.core.CPUPlace() - ) - - # pd_op.assign: (1xf32) <- (1xf32) - assign_3 = full_1 - - # pd_op.full: (1xf32) <- () - full_2 = paddle._C_ops.full( - [1], float("3.40282e+38"), paddle.float32, paddle.core.CPUPlace() - ) - - # pd_op.assign: (1xf32) <- (1xf32) - assign_4 = full_2 - - # pd_op.clip: (-1x1xf32) <- (-1x1xf32, 1xf32, 1xf32) - clip_0 = paddle._C_ops.clip(subtract_1, full_1, full_2) - - # pd_op.subtract: (-1x1xf32) <- (-1x1xf32, -1x1xf32) - subtract_2 = paddle._C_ops.subtract(minimum_1, maximum_1) - - # pd_op.clip: (-1x1xf32) <- (-1x1xf32, 1xf32, 1xf32) - clip_1 = paddle._C_ops.clip(subtract_2, full_1, full_2) - - # pd_op.multiply: (-1x1xf32) <- (-1x1xf32, -1x1xf32) - multiply_0 = paddle._C_ops.multiply(clip_0, clip_1) - - # pd_op.subtract: (-1x1xf32) <- (-1x1xf32, -1x1xf32) - subtract_3 = paddle._C_ops.subtract(split_2, split_0) - - # pd_op.subtract: (-1x1xf32) <- (-1x1xf32, -1x1xf32) - subtract_4 = paddle._C_ops.subtract(split_3, split_1) - - # pd_op.multiply: (-1x1xf32) <- (-1x1xf32, -1x1xf32) - multiply_1 = paddle._C_ops.multiply(subtract_3, subtract_4) - - # pd_op.subtract: (-1x1xf32) <- (-1x1xf32, -1x1xf32) - subtract_5 = paddle._C_ops.subtract(split_6, split_4) - - # pd_op.subtract: (-1x1xf32) <- (-1x1xf32, -1x1xf32) - subtract_6 = paddle._C_ops.subtract(split_7, split_5) - - # pd_op.multiply: (-1x1xf32) <- (-1x1xf32, -1x1xf32) - multiply_2 = paddle._C_ops.multiply(subtract_5, subtract_6) - del subtract_5, subtract_6 - - # pd_op.add: (-1x1xf32) <- (-1x1xf32, -1x1xf32) - add_0 = paddle._C_ops.add(multiply_1, multiply_2) - - # pd_op.subtract: (-1x1xf32) <- (-1x1xf32, -1x1xf32) - subtract_7 = paddle._C_ops.subtract(add_0, multiply_0) - - # pd_op.full: (1xf32) <- () - full_3 = paddle._C_ops.full( - [1], float("1"), paddle.float32, paddle.core.CPUPlace() - ) - - # pd_op.assign: (1xf32) <- (1xf32) - assign_5 = full_3 - - # pd_op.assign: (1xf32) <- (1xf32) - assign_6 = full_3 - - # pd_op.scale: (-1x1xf32) <- (-1x1xf32, 1xf32) - scale_0 = paddle._C_ops.scale(subtract_7, full_3, float("1e-10"), True) - del subtract_7 - - # pd_op.divide: (-1x1xf32) <- (-1x1xf32, -1x1xf32) - divide_2 = paddle._C_ops.divide(multiply_0, scale_0) - - # pd_op.minimum: (-1x1xf32) <- (-1x1xf32, -1x1xf32) - minimum_2 = paddle._C_ops.minimum(split_0, split_4) - - # pd_op.minimum: (-1x1xf32) <- (-1x1xf32, -1x1xf32) - minimum_3 = paddle._C_ops.minimum(split_1, split_5) - - # pd_op.maximum: (-1x1xf32) <- (-1x1xf32, -1x1xf32) - maximum_2 = paddle._C_ops.maximum(split_2, split_6) - - # pd_op.maximum: (-1x1xf32) <- (-1x1xf32, -1x1xf32) - maximum_3 = paddle._C_ops.maximum(split_3, split_7) - - # pd_op.subtract: (-1x1xf32) <- (-1x1xf32, -1x1xf32) - subtract_8 = paddle._C_ops.subtract(maximum_2, minimum_2) - - # pd_op.subtract: (-1x1xf32) <- (-1x1xf32, -1x1xf32) - subtract_9 = paddle._C_ops.subtract(maximum_3, minimum_3) - - # pd_op.multiply: (-1x1xf32) <- (-1x1xf32, -1x1xf32) - multiply_3 = paddle._C_ops.multiply(subtract_8, subtract_9) - - # pd_op.scale: (-1x1xf32) <- (-1x1xf32, 1xf32) - scale_1 = paddle._C_ops.scale(multiply_3, full_3, float("1e-10"), True) - del multiply_3 - - # pd_op.subtract: (-1x1xf32) <- (-1x1xf32, -1x1xf32) - subtract_10 = paddle._C_ops.subtract(scale_1, scale_0) - - # pd_op.divide: (-1x1xf32) <- (-1x1xf32, -1x1xf32) - divide_3 = paddle._C_ops.divide(subtract_10, scale_1) - - # pd_op.subtract: (-1x1xf32) <- (-1x1xf32, -1x1xf32) - subtract_11 = paddle._C_ops.subtract(divide_2, divide_3) - - # pd_op.full: (1xf32) <- () - full_4 = paddle._C_ops.full( - [1], float("-1"), paddle.float32, paddle.core.CPUPlace() - ) - - # pd_op.scale: (-1x1xf32) <- (-1x1xf32, 1xf32) - scale_2 = paddle._C_ops.scale(subtract_11, full_4, float("1"), True) - del subtract_11 - - # pd_op.scale: (-1x1xf32) <- (-1x1xf32, 1xf32) - scale_3 = paddle._C_ops.scale(scale_2, full_3, float("0"), True) - del scale_2 - - # pd_op.multiply: (-1x1xf32) <- (-1x1xf32, -1x1xf32) - multiply_4 = paddle._C_ops.multiply(scale_3, unsqueeze_1) - - # pd_op.full_int_array: (0xi64) <- () - full_int_array_3 = [] - - # pd_op.assign: (0xi64) <- (0xi64) - assign_7 = full_int_array_3 - - # pd_op.sum: (xf32) <- (-1x1xf32, 0xi64) - sum_1 = paddle._C_ops.sum(multiply_4, full_int_array_3, None, False) - - # pd_op.divide: (xf32) <- (xf32, xf32) - divide_0 = paddle._C_ops.divide(sum_1, data_4) - - # pd_op.unsqueeze: (2x-1x1xb) <- (2x-1xb, 1xi64) - unsqueeze_2 = paddle._C_ops.unsqueeze(data_0, full_int_array_0) - del data_0 - - # pd_op.cast: (2x-1x1xi32) <- (2x-1x1xb) - cast_2 = paddle._C_ops.cast(unsqueeze_2, paddle.int32) - del unsqueeze_2 - - # pd_op.full_int_array: (3xi64) <- () - full_int_array_4 = [1, 1, 68] - - # pd_op.tile: (2x-1x68xi32) <- (2x-1x1xi32, 3xi64) - tile_1 = paddle._C_ops.tile(cast_2, full_int_array_4) - del cast_2, full_int_array_4 - - # pd_op.cast: (2x-1x68xb) <- (2x-1x68xi32) - cast_3 = paddle._C_ops.cast(tile_1, paddle.bool) - del tile_1 - - # pd_op.masked_select: (-1xf32) <- (2x-1x68xf32, 2x-1x68xb) - masked_select_3 = paddle._C_ops.masked_select(data_5, cast_3) - del data_5 - - # pd_op.full_int_array: (3xi64) <- () - full_int_array_5 = [-1, 4, 17] - - # pd_op.reshape: (-1x4x17xf32) <- (-1xf32, 3xi64) - reshape_2 = paddle._C_ops.reshape(masked_select_3, full_int_array_5) - del full_int_array_5 - - # pd_op.full: (1xi32) <- () - full_5 = paddle._C_ops.full( - [1], float("2"), paddle.int32, paddle.core.CPUPlace() - ) - - # pd_op.split_with_num: ([2x-1x2xf32, 2x-1x2xf32]) <- (2x-1x4xf32, 1xi32) - split_with_num_2 = paddle._C_ops.split_with_num(data_2, 2, full_5) - del data_2, full_5 - - # builtin.split: (2x-1x2xf32, 2x-1x2xf32) <- ([2x-1x2xf32, 2x-1x2xf32]) - ( - split_8, - split_9, - ) = split_with_num_2 - del split_with_num_2 - - # pd_op.subtract: (2x-1x2xf32) <- (-1x2xf32, 2x-1x2xf32) - subtract_12 = paddle._C_ops.subtract(data_6, split_8) - del split_8 - - # pd_op.subtract: (2x-1x2xf32) <- (2x-1x2xf32, -1x2xf32) - subtract_13 = paddle._C_ops.subtract(split_9, data_6) - del data_6, split_9 - - # pd_op.full: (1xi32) <- () - full_6 = paddle._C_ops.full( - [1], float("-1"), paddle.int32, paddle.core.CPUPlace() - ) - - # builtin.combine: ([2x-1x2xf32, 2x-1x2xf32]) <- (2x-1x2xf32, 2x-1x2xf32) - combine_0 = [subtract_12, subtract_13] - del subtract_12, subtract_13 - - # pd_op.concat: (2x-1x4xf32) <- ([2x-1x2xf32, 2x-1x2xf32], 1xi32) - concat_0 = paddle._C_ops.concat(combine_0, full_6) - del combine_0, full_6 - - # pd_op.full: (1xf32) <- () - full_7 = paddle._C_ops.full( - [1], float("15.99"), paddle.float32, paddle.core.CPUPlace() - ) - - # pd_op.clip: (2x-1x4xf32) <- (2x-1x4xf32, 1xf32, 1xf32) - clip_2 = paddle._C_ops.clip(concat_0, full_1, full_7) - del concat_0, full_7 - - # pd_op.masked_select: (-1xf32) <- (2x-1x4xf32, 2x-1x4xb) - masked_select_4 = paddle._C_ops.masked_select(clip_2, cast_1) - del clip_2 - - # pd_op.reshape: (-1x4xf32) <- (-1xf32, 2xi64) - reshape_3 = paddle._C_ops.reshape(masked_select_4, full_int_array_2) - del full_int_array_2, masked_select_4 - - # pd_op.floor: (-1x4xf32) <- (-1x4xf32) - floor_0 = paddle._C_ops.floor(reshape_3) - - # pd_op.cast: (-1x4xi64) <- (-1x4xf32) - cast_4 = paddle._C_ops.cast(floor_0, paddle.int64) - del floor_0 - - # pd_op.scale: (-1x4xi64) <- (-1x4xi64, 1xf32) - scale_4 = paddle._C_ops.scale(cast_4, full_3, float("1"), True) - - # pd_op.cast: (-1x4xf32) <- (-1x4xi64) - cast_5 = paddle._C_ops.cast(scale_4, paddle.float32) - - # pd_op.subtract: (-1x4xf32) <- (-1x4xf32, -1x4xf32) - subtract_14 = paddle._C_ops.subtract(cast_5, reshape_3) - del cast_5, reshape_3 - - # pd_op.scale: (-1x4xf32) <- (-1x4xf32, 1xf32) - scale_5 = paddle._C_ops.scale(subtract_14, full_4, float("1"), True) - - # pd_op.scale: (-1x4xi64) <- (-1x4xi64, 1xf32) - scale_6 = paddle._C_ops.scale(cast_4, full_3, float("0"), True) - del cast_4 - - # pd_op.unsqueeze: (-1x4x1xi64) <- (-1x4xi64, 1xi64) - unsqueeze_3 = paddle._C_ops.unsqueeze(scale_6, full_int_array_0) - del scale_6 - - # pd_op.cross_entropy_with_softmax: (-1x4x17xf32, -1x4x1xf32) <- (-1x4x17xf32, -1x4x1xi64) - cross_entropy_with_softmax_0, cross_entropy_with_softmax_2 = ( - lambda x, f: f(x) - )( - paddle._C_ops.cross_entropy_with_softmax( - reshape_2, unsqueeze_3, False, True, True, -100, -1 - ), - lambda out: out if isinstance(out, (list, tuple)) else (out, None), - ) - - # pd_op.squeeze: (-1x4xf32) <- (-1x4x1xf32, 1xi64) - squeeze_0 = paddle._C_ops.squeeze( - cross_entropy_with_softmax_2, full_int_array_0 - ) - - # pd_op.multiply: (-1x4xf32) <- (-1x4xf32, -1x4xf32) - multiply_5 = paddle._C_ops.multiply(squeeze_0, subtract_14) - - # pd_op.scale: (-1x4xi64) <- (-1x4xi64, 1xf32) - scale_7 = paddle._C_ops.scale(scale_4, full_3, float("0"), True) - del scale_4 - - # pd_op.unsqueeze: (-1x4x1xi64) <- (-1x4xi64, 1xi64) - unsqueeze_4 = paddle._C_ops.unsqueeze(scale_7, full_int_array_0) - del scale_7 - - # pd_op.cross_entropy_with_softmax: (-1x4x17xf32, -1x4x1xf32) <- (-1x4x17xf32, -1x4x1xi64) - cross_entropy_with_softmax_1, cross_entropy_with_softmax_3 = ( - lambda x, f: f(x) - )( - paddle._C_ops.cross_entropy_with_softmax( - reshape_2, unsqueeze_4, False, True, True, -100, -1 - ), - lambda out: out if isinstance(out, (list, tuple)) else (out, None), - ) - del reshape_2 - - # pd_op.squeeze: (-1x4xf32) <- (-1x4x1xf32, 1xi64) - squeeze_1 = paddle._C_ops.squeeze( - cross_entropy_with_softmax_3, full_int_array_0 - ) - - # pd_op.multiply: (-1x4xf32) <- (-1x4xf32, -1x4xf32) - multiply_6 = paddle._C_ops.multiply(squeeze_1, scale_5) - - # pd_op.add: (-1x4xf32) <- (-1x4xf32, -1x4xf32) - add_1 = paddle._C_ops.add(multiply_5, multiply_6) - - # pd_op.mean: (-1x1xf32) <- (-1x4xf32, 1xi64) - mean_0 = paddle._C_ops.mean(add_1, full_int_array_0, True) - del full_int_array_0 - - # pd_op.multiply: (-1x1xf32) <- (-1x1xf32, -1x1xf32) - multiply_7 = paddle._C_ops.multiply(mean_0, unsqueeze_1) - - # pd_op.sum: (xf32) <- (-1x1xf32, 0xi64) - sum_2 = paddle._C_ops.sum(multiply_7, full_int_array_3, None, False) - - # pd_op.divide: (xf32) <- (xf32, xf32) - divide_1 = paddle._C_ops.divide(sum_2, data_4) - del ( - abs_0, - add_0, - add_1, - assign_0, - assign_1, - assign_2, - assign_3, - assign_4, - assign_5, - assign_6, - assign_7, - cast_1, - cast_3, - clip_0, - clip_1, - cross_entropy_with_softmax_2, - cross_entropy_with_softmax_3, - data_4, - divide_2, - divide_3, - full_0, - full_1, - full_2, - full_3, - full_4, - full_int_array_3, - masked_select_0, - masked_select_3, - maximum_0, - maximum_1, - maximum_2, - maximum_3, - mean_0, - minimum_0, - minimum_1, - minimum_2, - minimum_3, - multiply_0, - multiply_1, - multiply_2, - multiply_4, - multiply_5, - multiply_6, - multiply_7, - reshape_0, - reshape_1, - scale_0, - scale_1, - scale_3, - scale_5, - split_0, - split_1, - split_2, - split_3, - split_4, - split_5, - split_6, - split_7, - squeeze_0, - squeeze_1, - subtract_0, - subtract_1, - subtract_10, - subtract_14, - subtract_2, - subtract_3, - subtract_4, - subtract_8, - subtract_9, - sum_1, - sum_2, - unsqueeze_1, - unsqueeze_3, - unsqueeze_4, - ) - - return ( - cross_entropy_with_softmax_0, - cross_entropy_with_softmax_1, - mean_all_0, - divide_0, - divide_1, - ) diff --git a/paddle_samples/PaddleX/PP-YOLOE-L_vehicle/subgraph_15/graph_hash.txt b/paddle_samples/PaddleX/PP-YOLOE-L_vehicle/subgraph_15/graph_hash.txt deleted file mode 100644 index 7fbb8551e..000000000 --- a/paddle_samples/PaddleX/PP-YOLOE-L_vehicle/subgraph_15/graph_hash.txt +++ /dev/null @@ -1 +0,0 @@ -b58b47a10405b5de0e1c7f3dab25881ba3cc8c8bdd1045e44640464fa936bf04 \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE-L_vehicle/subgraph_15/graph_net.json b/paddle_samples/PaddleX/PP-YOLOE-L_vehicle/subgraph_15/graph_net.json deleted file mode 100644 index 4a2e26ae4..000000000 --- a/paddle_samples/PaddleX/PP-YOLOE-L_vehicle/subgraph_15/graph_net.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "framework": "paddle", - "model_name": "PP-YOLOE-L_vehicle", - "num_devices_required": 1, - "num_nodes_required": 1 -} \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE-L_vehicle/subgraph_15/input_meta.py b/paddle_samples/PaddleX/PP-YOLOE-L_vehicle/subgraph_15/input_meta.py deleted file mode 100644 index 22e6b559d..000000000 --- a/paddle_samples/PaddleX/PP-YOLOE-L_vehicle/subgraph_15/input_meta.py +++ /dev/null @@ -1,48 +0,0 @@ -class Program_weight_tensor_data_0: - name = "data_0" - shape = [] - dtype = "int64" - data = [1] - - -class Program_weight_tensor_data_1: - name = "data_1" - shape = [2, 4116, 4] - dtype = "float32" - max_val = float("0.0452349") - mean = float("0.00261075") - std = float("0.00638883") - data = None - - -class Program_weight_tensor_data_2: - name = "data_2" - shape = [2, 4116, 68] - dtype = "float32" - min_val = float("-13.9672") - max_val = float("27.3274") - mean = float("1.46976e-05") - std = float("0.86683") - data = None - - -class Program_weight_tensor_data_3: - name = "data_3" - shape = [4116, 2] - dtype = "float32" - min_val = float("4.0") - max_val = float("444.0") - mean = float("224.0") - std = float("129.279") - data = None - - -class Program_weight_tensor_data_4: - name = "data_4" - shape = [4116, 1] - dtype = "float32" - min_val = float("8.0") - max_val = float("32.0") - mean = float("10.6667") - std = float("5.70157") - data = None diff --git a/paddle_samples/PaddleX/PP-YOLOE-L_vehicle/subgraph_15/model.py b/paddle_samples/PaddleX/PP-YOLOE-L_vehicle/subgraph_15/model.py deleted file mode 100644 index 2d525f6d3..000000000 --- a/paddle_samples/PaddleX/PP-YOLOE-L_vehicle/subgraph_15/model.py +++ /dev/null @@ -1,192 +0,0 @@ -import paddle - - -class GraphModule(paddle.nn.Layer): - def __init__(self): - super().__init__() - - def forward(self, parameter_0, data_0, data_1, data_2, data_3, data_4): - # pd_op.divide: (-1x2xf32) <- (-1x2xf32, -1x1xf32) - divide_0 = paddle._C_ops.divide(data_3, data_4) - del data_3 - - # pd_op.shape64: (3xi64) <- (2x-1x68xf32) - shape64_0 = paddle._C_ops.shape64(data_2) - - # pd_op.full_int_array: (1xi64) <- () - full_int_array_0 = [0] - - # pd_op.full_int_array: (1xi64) <- () - full_int_array_1 = [1] - - # pd_op.assign: (1xi64) <- (1xi64) - assign_0 = full_int_array_1 - - # pd_op.slice: (xi64) <- (3xi64, 1xi64, 1xi64) - slice_0 = paddle._C_ops.slice( - shape64_0, [0], full_int_array_0, full_int_array_1, [1], [0] - ) - del full_int_array_0 - - # pd_op.full_int_array: (1xi64) <- () - full_int_array_2 = [2] - - # pd_op.slice: (xi64) <- (3xi64, 1xi64, 1xi64) - slice_1 = paddle._C_ops.slice( - shape64_0, [0], full_int_array_1, full_int_array_2, [1], [0] - ) - - # pd_op.full_int_array: (1xi64) <- () - full_int_array_3 = [3] - - # pd_op.slice: (xi64) <- (3xi64, 1xi64, 1xi64) - slice_2 = paddle._C_ops.slice( - shape64_0, [0], full_int_array_2, full_int_array_3, [1], [0] - ) - del full_int_array_2, full_int_array_3, shape64_0 - - # pd_op.full: (xi64) <- () - full_0 = paddle._C_ops.full( - [], float("-1"), paddle.int64, paddle.core.CPUPlace() - ) - - # pd_op.full: (xi64) <- () - full_1 = paddle._C_ops.full( - [], float("4"), paddle.int64, paddle.core.CPUPlace() - ) - - # pd_op.full: (xi64) <- () - full_2 = paddle._C_ops.full( - [], float("17"), paddle.int64, paddle.core.CPUPlace() - ) - - # builtin.combine: ([xi64, xi64, xi64, xi64]) <- (xi64, xi64, xi64, xi64) - combine_0 = [full_0, slice_1, full_1, full_2] - del full_0, full_1, full_2, slice_1 - - # pd_op.stack: (4xi64) <- ([xi64, xi64, xi64, xi64]) - stack_0 = paddle._C_ops.stack(combine_0, 0) - del combine_0 - - # pd_op.reshape: (-1x-1x4x17xf32) <- (2x-1x68xf32, 4xi64) - reshape_0 = paddle._C_ops.reshape(data_2, stack_0) - del data_2, stack_0 - - # pd_op.softmax: (-1x-1x4x17xf32) <- (-1x-1x4x17xf32) - softmax_0 = paddle._C_ops.softmax(reshape_0, -1) - del reshape_0 - - # pd_op.transpose: (-1x17x-1x4xf32) <- (-1x-1x4x17xf32) - transpose_0 = paddle._C_ops.transpose(softmax_0, [0, 3, 1, 2]) - - # pd_op.conv2d: (-1x1x-1x4xf32) <- (-1x17x-1x4xf32, 1x17x1x1xf32) - conv2d_0 = paddle._C_ops.conv2d( - transpose_0, parameter_0, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_0 - - # pd_op.squeeze: (-1x-1x4xf32) <- (-1x1x-1x4xf32, 1xi64) - squeeze_0 = paddle._C_ops.squeeze(conv2d_0, full_int_array_1) - del full_int_array_1 - - # pd_op.full: (1xi32) <- () - full_3 = paddle._C_ops.full( - [1], float("2"), paddle.int32, paddle.core.CPUPlace() - ) - - # pd_op.split_with_num: ([-1x-1x2xf32, -1x-1x2xf32]) <- (-1x-1x4xf32, 1xi32) - split_with_num_0 = paddle._C_ops.split_with_num(squeeze_0, 2, full_3) - del squeeze_0 - - # builtin.split: (-1x-1x2xf32, -1x-1x2xf32) <- ([-1x-1x2xf32, -1x-1x2xf32]) - ( - split_0, - split_1, - ) = split_with_num_0 - del split_with_num_0 - - # pd_op.full: (1xf32) <- () - full_4 = paddle._C_ops.full( - [1], float("-1"), paddle.float32, paddle.core.CPUPlace() - ) - - # pd_op.scale: (-1x-1x2xf32) <- (-1x-1x2xf32, 1xf32) - scale_0 = paddle._C_ops.scale(split_0, full_4, float("0"), True) - del split_0 - - # pd_op.add: (-1x-1x2xf32) <- (-1x-1x2xf32, -1x2xf32) - add_0 = paddle._C_ops.add(scale_0, divide_0) - - # pd_op.add: (-1x-1x2xf32) <- (-1x-1x2xf32, -1x2xf32) - add_1 = paddle._C_ops.add(split_1, divide_0) - - # pd_op.full: (1xi32) <- () - full_5 = paddle._C_ops.full( - [1], float("-1"), paddle.int32, paddle.core.CPUPlace() - ) - - # builtin.combine: ([-1x-1x2xf32, -1x-1x2xf32]) <- (-1x-1x2xf32, -1x-1x2xf32) - combine_1 = [add_0, add_1] - - # pd_op.concat: (-1x-1x4xf32) <- ([-1x-1x2xf32, -1x-1x2xf32], 1xi32) - concat_0 = paddle._C_ops.concat(combine_1, full_5) - del combine_1 - - # pd_op.full: (xi64) <- () - full_6 = paddle._C_ops.full( - [], float("-1"), paddle.int64, paddle.framework._current_expected_place() - ) - - # pd_op.less_than: (xb) <- (xi64, xi64) - less_than_0 = paddle._C_ops.less_than(data_0, full_6) - del data_0, full_6 - - # pd_op.cast: (xi64) <- (xb) - cast_0 = paddle._C_ops.cast(less_than_0, paddle.int64) - del less_than_0 - - # pd_op.full: (xi64) <- () - full_7 = paddle._C_ops.full( - [], float("0"), paddle.int64, paddle.framework._current_expected_place() - ) - - # pd_op.not_equal: (xb) <- (xi64, xi64) - not_equal_0 = paddle._C_ops.not_equal(cast_0, full_7) - del cast_0 - - # pd_op.cast: (xi64) <- (xb) - cast_1 = paddle._C_ops.cast(not_equal_0, paddle.int64) - del not_equal_0 - - # pd_op.equal: (xb) <- (xi64, xi64) - equal_0 = paddle._C_ops.equal(cast_1, full_7) - del cast_1, full_7 - - # pd_op.share_data_: (2x-1x4xf32) <- (2x-1x4xf32) - share_data__0 = data_1.detach() - del data_1 - - # pd_op.share_data_: (-1x-1x4xf32) <- (-1x-1x4xf32) - share_data__1 = concat_0.detach() - - # pd_op.multiply: (-1x-1x4xf32) <- (-1x-1x4xf32, -1x1xf32) - multiply_0 = paddle._C_ops.multiply(share_data__1, data_4) - del ( - add_0, - add_1, - assign_0, - concat_0, - conv2d_0, - data_4, - divide_0, - full_3, - full_4, - full_5, - scale_0, - share_data__1, - softmax_0, - split_1, - transpose_0, - ) - - return share_data__0, multiply_0 diff --git a/paddle_samples/PaddleX/PP-YOLOE-L_vehicle/subgraph_16/graph_hash.txt b/paddle_samples/PaddleX/PP-YOLOE-L_vehicle/subgraph_16/graph_hash.txt deleted file mode 100644 index 896fa94fd..000000000 --- a/paddle_samples/PaddleX/PP-YOLOE-L_vehicle/subgraph_16/graph_hash.txt +++ /dev/null @@ -1 +0,0 @@ -66b8a266cc256b5299b432a3f1cf5582a5f9a5321ba1505da3c19b3bc24f5624 \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE-L_vehicle/subgraph_16/graph_net.json b/paddle_samples/PaddleX/PP-YOLOE-L_vehicle/subgraph_16/graph_net.json deleted file mode 100644 index 4a2e26ae4..000000000 --- a/paddle_samples/PaddleX/PP-YOLOE-L_vehicle/subgraph_16/graph_net.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "framework": "paddle", - "model_name": "PP-YOLOE-L_vehicle", - "num_devices_required": 1, - "num_nodes_required": 1 -} \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE-L_vehicle/subgraph_16/input_meta.py b/paddle_samples/PaddleX/PP-YOLOE-L_vehicle/subgraph_16/input_meta.py deleted file mode 100644 index 2064cb376..000000000 --- a/paddle_samples/PaddleX/PP-YOLOE-L_vehicle/subgraph_16/input_meta.py +++ /dev/null @@ -1,38 +0,0 @@ -class Program_weight_tensor_data_0: - name = "data_0" - shape = [2, 8400, 4] - dtype = "float32" - min_val = float("0.72227") - max_val = float("10.3192") - mean = float("5.96735") - std = float("1.32539") - data = None - - -class Program_weight_tensor_data_1: - name = "data_1" - shape = [8400, 2] - dtype = "float32" - min_val = float("0.5") - max_val = float("79.5") - mean = float("34.7619") - std = float("22.9098") - data = None - - -class Program_weight_tensor_data_2: - name = "data_2" - shape = [8400, 1] - dtype = "float32" - min_val = float("8.0") - max_val = float("32.0") - mean = float("10.6667") - std = float("5.70157") - data = None - - -class Program_weight_tensor_data_3: - name = "data_3" - shape = [2, 2] - dtype = "float32" - data = [1.18519, 0.666667, 1.18519, 0.666667] diff --git a/paddle_samples/PaddleX/PP-YOLOE-L_vehicle/subgraph_16/model.py b/paddle_samples/PaddleX/PP-YOLOE-L_vehicle/subgraph_16/model.py deleted file mode 100644 index 561c0c35b..000000000 --- a/paddle_samples/PaddleX/PP-YOLOE-L_vehicle/subgraph_16/model.py +++ /dev/null @@ -1,94 +0,0 @@ -import paddle - - -class GraphModule(paddle.nn.Layer): - def __init__(self): - super().__init__() - - def forward(self, data_0, data_1, data_2, data_3): - # pd_op.full: (1xi32) <- () - full_0 = paddle._C_ops.full( - [1], float("2"), paddle.int32, paddle.core.CPUPlace() - ) - - # pd_op.split_with_num: ([2x8400x2xf32, 2x8400x2xf32]) <- (2x8400x4xf32, 1xi32) - split_with_num_0 = paddle._C_ops.split_with_num(data_0, 2, full_0) - del data_0, full_0 - - # builtin.split: (2x8400x2xf32, 2x8400x2xf32) <- ([2x8400x2xf32, 2x8400x2xf32]) - ( - split_0, - split_1, - ) = split_with_num_0 - del split_with_num_0 - - # pd_op.full: (1xf32) <- () - full_1 = paddle._C_ops.full( - [1], float("-1"), paddle.float32, paddle.core.CPUPlace() - ) - - # pd_op.scale: (2x8400x2xf32) <- (2x8400x2xf32, 1xf32) - scale_0 = paddle._C_ops.scale(split_0, full_1, float("0"), True) - del full_1, split_0 - - # pd_op.add: (2x8400x2xf32) <- (2x8400x2xf32, 8400x2xf32) - add_0 = paddle._C_ops.add(scale_0, data_1) - del scale_0 - - # pd_op.add: (2x8400x2xf32) <- (2x8400x2xf32, 8400x2xf32) - add_1 = paddle._C_ops.add(split_1, data_1) - del data_1, split_1 - - # pd_op.full: (1xi32) <- () - full_2 = paddle._C_ops.full( - [1], float("-1"), paddle.int32, paddle.core.CPUPlace() - ) - - # builtin.combine: ([2x8400x2xf32, 2x8400x2xf32]) <- (2x8400x2xf32, 2x8400x2xf32) - combine_0 = [add_0, add_1] - del add_0, add_1 - - # pd_op.concat: (2x8400x4xf32) <- ([2x8400x2xf32, 2x8400x2xf32], 1xi32) - concat_0 = paddle._C_ops.concat(combine_0, full_2) - del combine_0 - - # pd_op.multiply: (2x8400x4xf32) <- (2x8400x4xf32, 8400x1xf32) - multiply_0 = paddle._C_ops.multiply(concat_0, data_2) - del concat_0, data_2 - - # pd_op.full: (1xi32) <- () - full_3 = paddle._C_ops.full( - [1], float("1"), paddle.int32, paddle.core.CPUPlace() - ) - - # pd_op.split_with_num: ([2x1xf32, 2x1xf32]) <- (2x2xf32, 1xi32) - split_with_num_1 = paddle._C_ops.split_with_num(data_3, 2, full_3) - del data_3, full_3 - - # builtin.split: (2x1xf32, 2x1xf32) <- ([2x1xf32, 2x1xf32]) - ( - split_2, - split_3, - ) = split_with_num_1 - del split_with_num_1 - - # builtin.combine: ([2x1xf32, 2x1xf32, 2x1xf32, 2x1xf32]) <- (2x1xf32, 2x1xf32, 2x1xf32, 2x1xf32) - combine_1 = [split_3, split_2, split_3, split_2] - del split_2, split_3 - - # pd_op.concat: (2x4xf32) <- ([2x1xf32, 2x1xf32, 2x1xf32, 2x1xf32], 1xi32) - concat_1 = paddle._C_ops.concat(combine_1, full_2) - del combine_1, full_2 - - # pd_op.full_int_array: (3xi64) <- () - full_int_array_0 = [-1, 1, 4] - - # pd_op.reshape: (2x1x4xf32) <- (2x4xf32, 3xi64) - reshape_0 = paddle._C_ops.reshape(concat_1, full_int_array_0) - del concat_1, full_int_array_0 - - # pd_op.divide: (2x8400x4xf32) <- (2x8400x4xf32, 2x1x4xf32) - divide_0 = paddle._C_ops.divide(multiply_0, reshape_0) - del multiply_0, reshape_0 - - return divide_0 diff --git a/paddle_samples/PaddleX/PP-YOLOE-L_vehicle/subgraph_17/graph_net.json b/paddle_samples/PaddleX/PP-YOLOE-L_vehicle/subgraph_17/graph_net.json deleted file mode 100644 index 4a2e26ae4..000000000 --- a/paddle_samples/PaddleX/PP-YOLOE-L_vehicle/subgraph_17/graph_net.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "framework": "paddle", - "model_name": "PP-YOLOE-L_vehicle", - "num_devices_required": 1, - "num_nodes_required": 1 -} \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE-L_vehicle/subgraph_2/graph_hash.txt b/paddle_samples/PaddleX/PP-YOLOE-L_vehicle/subgraph_2/graph_hash.txt index 7b5429dac..a62e3c346 100644 --- a/paddle_samples/PaddleX/PP-YOLOE-L_vehicle/subgraph_2/graph_hash.txt +++ b/paddle_samples/PaddleX/PP-YOLOE-L_vehicle/subgraph_2/graph_hash.txt @@ -1 +1 @@ -d079f80b47d3627f0f874f2ab967abc6e350f273029f03bcf40572198642b666 \ No newline at end of file +b3076122b18bff71174d70b804480bc7272686491c85cf6a5e3c4b3eba6b39ef \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE-L_vehicle/subgraph_2/input_meta.py b/paddle_samples/PaddleX/PP-YOLOE-L_vehicle/subgraph_2/input_meta.py index ec4e87e7b..b47fda69a 100644 --- a/paddle_samples/PaddleX/PP-YOLOE-L_vehicle/subgraph_2/input_meta.py +++ b/paddle_samples/PaddleX/PP-YOLOE-L_vehicle/subgraph_2/input_meta.py @@ -1,9 +1,62 @@ class Program_weight_tensor_data_0: name = "data_0" - shape = [2, 3, 512, 512] + shape = [2, 12096] dtype = "float32" - min_val = float("-1.9517") - max_val = float("2.64") - mean = float("-0.133605") - std = float("0.697919") + max_val = float("2.0") + mean = float("0.00223214") + std = float("0.0480607") + data = None + + +class Program_weight_tensor_data_1: + name = "data_1" + shape = [2, 11, 12096] + dtype = "float32" + max_val = float("0.940076") + mean = float("0.000804856") + std = float("0.0222414") + data = None + + +class Program_weight_tensor_data_2: + name = "data_2" + shape = [2, 11, 12096] + dtype = "float32" + max_val = float("1.0") + mean = float("0.000202922") + std = float("0.0142436") + data = None + + +class Program_weight_tensor_data_3: + name = "data_3" + shape = [2, 1] + dtype = "int32" + data = [0, 1] + + +class Program_weight_tensor_data_4: + name = "data_4" + shape = [2, 11, 1] + dtype = "int32" + data = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0] + + +class Program_weight_tensor_data_5: + name = "data_5" + shape = [2, 11, 4] + dtype = "float32" + max_val = float("629.571") + mean = float("192.521") + std = float("244.622") + data = None + + +class Program_weight_tensor_data_6: + name = "data_6" + shape = [2, 11, 12096] + dtype = "float32" + max_val = float("0.00694391") + mean = float("1.95634e-06") + std = float("9.01196e-05") data = None diff --git a/paddle_samples/PaddleX/PP-YOLOE-L_vehicle/subgraph_2/model.py b/paddle_samples/PaddleX/PP-YOLOE-L_vehicle/subgraph_2/model.py index 2a640202f..41382ee7e 100644 --- a/paddle_samples/PaddleX/PP-YOLOE-L_vehicle/subgraph_2/model.py +++ b/paddle_samples/PaddleX/PP-YOLOE-L_vehicle/subgraph_2/model.py @@ -5,7392 +5,225 @@ class GraphModule(paddle.nn.Layer): def __init__(self): super().__init__() - def forward( - self, - parameter_0, - parameter_1, - parameter_2, - parameter_3, - parameter_4, - parameter_5, - parameter_6, - parameter_7, - parameter_8, - parameter_9, - parameter_10, - parameter_11, - parameter_12, - parameter_13, - parameter_14, - parameter_15, - parameter_16, - parameter_17, - parameter_18, - parameter_19, - parameter_20, - parameter_21, - parameter_22, - parameter_23, - parameter_24, - parameter_25, - parameter_26, - parameter_27, - parameter_28, - parameter_29, - parameter_30, - parameter_31, - parameter_32, - parameter_33, - parameter_34, - parameter_35, - parameter_36, - parameter_37, - parameter_38, - parameter_39, - parameter_40, - parameter_41, - parameter_42, - parameter_43, - parameter_44, - parameter_45, - parameter_46, - parameter_47, - parameter_48, - parameter_49, - parameter_50, - parameter_51, - parameter_52, - parameter_53, - parameter_54, - parameter_55, - parameter_56, - parameter_57, - parameter_58, - parameter_59, - parameter_60, - parameter_61, - parameter_62, - parameter_63, - parameter_64, - parameter_65, - parameter_66, - parameter_67, - parameter_68, - parameter_69, - parameter_70, - parameter_71, - parameter_72, - parameter_73, - parameter_74, - parameter_75, - parameter_76, - parameter_77, - parameter_78, - parameter_79, - parameter_80, - parameter_81, - parameter_82, - parameter_83, - parameter_84, - parameter_85, - parameter_86, - parameter_87, - parameter_88, - parameter_89, - parameter_90, - parameter_91, - parameter_92, - parameter_93, - parameter_94, - parameter_95, - parameter_96, - parameter_97, - parameter_98, - parameter_99, - parameter_100, - parameter_101, - parameter_102, - parameter_103, - parameter_104, - parameter_105, - parameter_106, - parameter_107, - parameter_108, - parameter_109, - parameter_110, - parameter_111, - parameter_112, - parameter_113, - parameter_114, - parameter_115, - parameter_116, - parameter_117, - parameter_118, - parameter_119, - parameter_120, - parameter_121, - parameter_122, - parameter_123, - parameter_124, - parameter_125, - parameter_126, - parameter_127, - parameter_128, - parameter_129, - parameter_130, - parameter_131, - parameter_132, - parameter_133, - parameter_134, - parameter_135, - parameter_136, - parameter_137, - parameter_138, - parameter_139, - parameter_140, - parameter_141, - parameter_142, - parameter_143, - parameter_144, - parameter_145, - parameter_146, - parameter_147, - parameter_148, - parameter_149, - parameter_150, - parameter_151, - parameter_152, - parameter_153, - parameter_154, - parameter_155, - parameter_156, - parameter_157, - parameter_158, - parameter_159, - parameter_160, - parameter_161, - parameter_162, - parameter_163, - parameter_164, - parameter_165, - parameter_166, - parameter_167, - parameter_168, - parameter_169, - parameter_170, - parameter_171, - parameter_172, - parameter_173, - parameter_174, - parameter_175, - parameter_176, - parameter_177, - parameter_178, - parameter_179, - parameter_180, - parameter_181, - parameter_182, - parameter_183, - parameter_184, - parameter_185, - parameter_186, - parameter_187, - parameter_188, - parameter_189, - parameter_190, - parameter_191, - parameter_192, - parameter_193, - parameter_194, - parameter_195, - parameter_196, - parameter_197, - parameter_198, - parameter_199, - parameter_200, - parameter_201, - parameter_202, - parameter_203, - parameter_204, - parameter_205, - parameter_206, - parameter_207, - parameter_208, - parameter_209, - parameter_210, - parameter_211, - parameter_212, - parameter_213, - parameter_214, - parameter_215, - parameter_216, - parameter_217, - parameter_218, - parameter_219, - parameter_220, - parameter_221, - parameter_222, - parameter_223, - parameter_224, - parameter_225, - parameter_226, - parameter_227, - parameter_228, - parameter_229, - parameter_230, - parameter_231, - parameter_232, - parameter_233, - parameter_234, - parameter_235, - parameter_236, - parameter_237, - parameter_238, - parameter_239, - parameter_240, - parameter_241, - parameter_242, - parameter_243, - parameter_244, - parameter_245, - parameter_246, - parameter_247, - parameter_248, - parameter_249, - parameter_250, - parameter_251, - parameter_252, - parameter_253, - parameter_254, - parameter_255, - parameter_256, - parameter_257, - parameter_258, - parameter_259, - parameter_260, - parameter_261, - parameter_262, - parameter_263, - parameter_264, - parameter_265, - parameter_266, - parameter_267, - parameter_268, - parameter_269, - parameter_270, - parameter_271, - parameter_272, - parameter_273, - parameter_274, - parameter_275, - parameter_276, - parameter_277, - parameter_278, - parameter_279, - parameter_280, - parameter_281, - parameter_282, - parameter_283, - parameter_284, - parameter_285, - parameter_286, - parameter_287, - parameter_288, - parameter_289, - parameter_290, - parameter_291, - parameter_292, - parameter_293, - parameter_294, - parameter_295, - parameter_296, - parameter_297, - parameter_298, - parameter_299, - parameter_300, - parameter_301, - parameter_302, - parameter_303, - parameter_304, - parameter_305, - parameter_306, - parameter_307, - parameter_308, - parameter_309, - parameter_310, - parameter_311, - parameter_312, - parameter_313, - parameter_314, - parameter_315, - parameter_316, - parameter_317, - parameter_318, - parameter_319, - parameter_320, - parameter_321, - parameter_322, - parameter_323, - parameter_324, - parameter_325, - parameter_326, - parameter_327, - parameter_328, - parameter_329, - parameter_330, - parameter_331, - parameter_332, - parameter_333, - parameter_334, - parameter_335, - parameter_336, - parameter_337, - parameter_338, - parameter_339, - parameter_340, - parameter_341, - parameter_342, - parameter_343, - parameter_344, - parameter_345, - parameter_346, - parameter_347, - parameter_348, - parameter_349, - parameter_350, - parameter_351, - parameter_352, - parameter_353, - parameter_354, - parameter_355, - parameter_356, - parameter_357, - parameter_358, - parameter_359, - parameter_360, - parameter_361, - parameter_362, - parameter_363, - parameter_364, - parameter_365, - parameter_366, - parameter_367, - parameter_368, - parameter_369, - parameter_370, - parameter_371, - parameter_372, - parameter_373, - parameter_374, - parameter_375, - parameter_376, - parameter_377, - parameter_378, - parameter_379, - parameter_380, - parameter_381, - parameter_382, - parameter_383, - parameter_384, - parameter_385, - parameter_386, - parameter_387, - parameter_388, - parameter_389, - parameter_390, - parameter_391, - parameter_392, - parameter_393, - parameter_394, - parameter_395, - parameter_396, - parameter_397, - parameter_398, - parameter_399, - parameter_400, - parameter_401, - parameter_402, - parameter_403, - parameter_404, - parameter_405, - parameter_406, - parameter_407, - parameter_408, - parameter_409, - parameter_410, - parameter_411, - parameter_412, - parameter_413, - parameter_414, - parameter_415, - parameter_416, - parameter_417, - parameter_418, - parameter_419, - parameter_420, - parameter_421, - parameter_422, - parameter_423, - parameter_424, - parameter_425, - parameter_426, - parameter_427, - parameter_428, - parameter_429, - parameter_430, - parameter_431, - parameter_432, - parameter_433, - parameter_434, - parameter_435, - parameter_436, - parameter_437, - parameter_438, - parameter_439, - parameter_440, - parameter_441, - parameter_442, - parameter_443, - parameter_444, - parameter_445, - parameter_446, - parameter_447, - parameter_448, - parameter_449, - parameter_450, - parameter_451, - parameter_452, - parameter_453, - parameter_454, - parameter_455, - parameter_456, - parameter_457, - parameter_458, - parameter_459, - parameter_460, - parameter_461, - parameter_462, - parameter_463, - parameter_464, - parameter_465, - parameter_466, - parameter_467, - parameter_468, - parameter_469, - parameter_470, - parameter_471, - parameter_472, - parameter_473, - parameter_474, - parameter_475, - parameter_476, - parameter_477, - parameter_478, - parameter_479, - parameter_480, - parameter_481, - parameter_482, - parameter_483, - parameter_484, - parameter_485, - parameter_486, - parameter_487, - parameter_488, - parameter_489, - parameter_490, - parameter_491, - parameter_492, - parameter_493, - parameter_494, - parameter_495, - parameter_496, - parameter_497, - parameter_498, - parameter_499, - parameter_500, - parameter_501, - parameter_502, - parameter_503, - parameter_504, - parameter_505, - parameter_506, - parameter_507, - parameter_508, - parameter_509, - parameter_510, - parameter_511, - parameter_512, - parameter_513, - parameter_514, - parameter_515, - parameter_516, - parameter_517, - parameter_518, - parameter_519, - parameter_520, - parameter_521, - parameter_522, - parameter_523, - parameter_524, - parameter_525, - parameter_526, - parameter_527, - parameter_528, - parameter_529, - parameter_530, - parameter_531, - parameter_532, - parameter_533, - parameter_534, - parameter_535, - parameter_536, - parameter_537, - parameter_538, - parameter_539, - parameter_540, - parameter_541, - parameter_542, - parameter_543, - parameter_544, - parameter_545, - parameter_546, - parameter_547, - parameter_548, - parameter_549, - parameter_550, - parameter_551, - parameter_552, - parameter_553, - parameter_554, - parameter_555, - parameter_556, - parameter_557, - parameter_558, - parameter_559, - parameter_560, - parameter_561, - parameter_562, - parameter_563, - parameter_564, - parameter_565, - parameter_566, - parameter_567, - parameter_568, - parameter_569, - parameter_570, - parameter_571, - parameter_572, - parameter_573, - parameter_574, - parameter_575, - parameter_576, - parameter_577, - parameter_578, - parameter_579, - parameter_580, - parameter_581, - parameter_582, - parameter_583, - parameter_584, - parameter_585, - parameter_586, - parameter_587, - parameter_588, - parameter_589, - parameter_590, - parameter_591, - parameter_592, - parameter_593, - parameter_594, - parameter_595, - parameter_596, - parameter_597, - parameter_598, - parameter_599, - parameter_600, - parameter_601, - parameter_602, - parameter_603, - parameter_604, - parameter_605, - parameter_606, - parameter_607, - parameter_608, - parameter_609, - parameter_610, - parameter_611, - parameter_612, - parameter_613, - parameter_614, - parameter_615, - parameter_616, - parameter_617, - parameter_618, - parameter_619, - parameter_620, - parameter_621, - parameter_622, - parameter_623, - parameter_624, - parameter_625, - parameter_626, - parameter_627, - parameter_628, - parameter_629, - parameter_630, - parameter_631, - parameter_632, - parameter_633, - parameter_634, - parameter_635, - parameter_636, - parameter_637, - parameter_638, - parameter_639, - parameter_640, - parameter_641, - parameter_642, - parameter_643, - parameter_644, - parameter_645, - parameter_646, - parameter_647, - parameter_648, - parameter_649, - parameter_650, - parameter_651, - parameter_652, - parameter_653, - parameter_654, - parameter_655, - parameter_656, - parameter_657, - parameter_658, - parameter_659, - parameter_660, - parameter_661, - parameter_662, - parameter_663, - parameter_664, - parameter_665, - parameter_666, - parameter_667, - parameter_668, - parameter_669, - parameter_670, - parameter_671, - parameter_672, - parameter_673, - parameter_674, - parameter_675, - parameter_676, - parameter_677, - parameter_678, - parameter_679, - parameter_680, - parameter_681, - parameter_682, - parameter_683, - parameter_684, - parameter_685, - parameter_686, - parameter_687, - parameter_688, - parameter_689, - parameter_690, - parameter_691, - parameter_692, - parameter_693, - parameter_694, - parameter_695, - parameter_696, - parameter_697, - data_0, - ): - # pd_op.conv2d: (2x32x256x256xf32) <- (2x3x512x512xf32, 32x3x3x3xf32) - conv2d_0 = paddle._C_ops.conv2d( - data_0, parameter_697, [2, 2], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del data_0, parameter_697 - - # pd_op.batch_norm_: (2x32x256x256xf32, 32xf32, 32xf32, 32xf32, 32xf32, -1xui8) <- (2x32x256x256xf32, 32xf32, 32xf32, 32xf32, 32xf32) - ( - batch_norm__0, - batch_norm__1, - batch_norm__2, - batch_norm__3, - batch_norm__4, - batch_norm__5, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_0, - parameter_696, - parameter_695, - parameter_694, - parameter_693, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_693, parameter_694, parameter_695, parameter_696 - - # pd_op.swish: (2x32x256x256xf32) <- (2x32x256x256xf32) - swish_1 = paddle._C_ops.swish(batch_norm__0) - - # pd_op.conv2d: (2x32x256x256xf32) <- (2x32x256x256xf32, 32x32x3x3xf32) - conv2d_1 = paddle._C_ops.conv2d( - swish_1, parameter_692, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_692 - - # pd_op.batch_norm_: (2x32x256x256xf32, 32xf32, 32xf32, 32xf32, 32xf32, -1xui8) <- (2x32x256x256xf32, 32xf32, 32xf32, 32xf32, 32xf32) - ( - batch_norm__6, - batch_norm__7, - batch_norm__8, - batch_norm__9, - batch_norm__10, - batch_norm__11, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_1, - parameter_691, - parameter_690, - parameter_689, - parameter_688, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_688, parameter_689, parameter_690, parameter_691 - - # pd_op.swish: (2x32x256x256xf32) <- (2x32x256x256xf32) - swish_2 = paddle._C_ops.swish(batch_norm__6) - - # pd_op.conv2d: (2x64x256x256xf32) <- (2x32x256x256xf32, 64x32x3x3xf32) - conv2d_2 = paddle._C_ops.conv2d( - swish_2, parameter_687, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_687 - - # pd_op.batch_norm_: (2x64x256x256xf32, 64xf32, 64xf32, 64xf32, 64xf32, -1xui8) <- (2x64x256x256xf32, 64xf32, 64xf32, 64xf32, 64xf32) - ( - batch_norm__12, - batch_norm__13, - batch_norm__14, - batch_norm__15, - batch_norm__16, - batch_norm__17, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_2, - parameter_686, - parameter_685, - parameter_684, - parameter_683, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_683, parameter_684, parameter_685, parameter_686 - - # pd_op.swish: (2x64x256x256xf32) <- (2x64x256x256xf32) - swish_3 = paddle._C_ops.swish(batch_norm__12) - - # pd_op.conv2d: (2x96x128x128xf32) <- (2x64x256x256xf32, 96x64x3x3xf32) - conv2d_3 = paddle._C_ops.conv2d( - swish_3, parameter_682, [2, 2], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_682 - - # pd_op.batch_norm_: (2x96x128x128xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x128x128xf32, 96xf32, 96xf32, 96xf32, 96xf32) - ( - batch_norm__18, - batch_norm__19, - batch_norm__20, - batch_norm__21, - batch_norm__22, - batch_norm__23, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_3, - parameter_681, - parameter_680, - parameter_679, - parameter_678, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_678, parameter_679, parameter_680, parameter_681 - - # pd_op.swish: (2x96x128x128xf32) <- (2x96x128x128xf32) - swish_4 = paddle._C_ops.swish(batch_norm__18) - - # pd_op.conv2d: (2x48x128x128xf32) <- (2x96x128x128xf32, 48x96x1x1xf32) - conv2d_4 = paddle._C_ops.conv2d( - swish_4, parameter_677, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_677 - - # pd_op.batch_norm_: (2x48x128x128xf32, 48xf32, 48xf32, 48xf32, 48xf32, -1xui8) <- (2x48x128x128xf32, 48xf32, 48xf32, 48xf32, 48xf32) - ( - batch_norm__24, - batch_norm__25, - batch_norm__26, - batch_norm__27, - batch_norm__28, - batch_norm__29, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_4, - parameter_676, - parameter_675, - parameter_674, - parameter_673, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_673, parameter_674, parameter_675, parameter_676 - - # pd_op.swish: (2x48x128x128xf32) <- (2x48x128x128xf32) - swish_5 = paddle._C_ops.swish(batch_norm__24) - - # pd_op.conv2d: (2x48x128x128xf32) <- (2x96x128x128xf32, 48x96x1x1xf32) - conv2d_5 = paddle._C_ops.conv2d( - swish_4, parameter_672, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_672 - - # pd_op.batch_norm_: (2x48x128x128xf32, 48xf32, 48xf32, 48xf32, 48xf32, -1xui8) <- (2x48x128x128xf32, 48xf32, 48xf32, 48xf32, 48xf32) - ( - batch_norm__30, - batch_norm__31, - batch_norm__32, - batch_norm__33, - batch_norm__34, - batch_norm__35, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_5, - parameter_671, - parameter_670, - parameter_669, - parameter_668, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_668, parameter_669, parameter_670, parameter_671 - - # pd_op.swish: (2x48x128x128xf32) <- (2x48x128x128xf32) - swish_6 = paddle._C_ops.swish(batch_norm__30) - - # pd_op.conv2d: (2x48x128x128xf32) <- (2x48x128x128xf32, 48x48x3x3xf32) - conv2d_6 = paddle._C_ops.conv2d( - swish_6, parameter_667, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_667 - - # pd_op.batch_norm_: (2x48x128x128xf32, 48xf32, 48xf32, 48xf32, 48xf32, -1xui8) <- (2x48x128x128xf32, 48xf32, 48xf32, 48xf32, 48xf32) - ( - batch_norm__36, - batch_norm__37, - batch_norm__38, - batch_norm__39, - batch_norm__40, - batch_norm__41, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_6, - parameter_666, - parameter_665, - parameter_664, - parameter_663, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_663, parameter_664, parameter_665, parameter_666 - - # pd_op.swish: (2x48x128x128xf32) <- (2x48x128x128xf32) - swish_7 = paddle._C_ops.swish(batch_norm__36) - - # pd_op.conv2d: (2x48x128x128xf32) <- (2x48x128x128xf32, 48x48x3x3xf32) - conv2d_7 = paddle._C_ops.conv2d( - swish_7, parameter_662, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_662 + def forward(self, data_0, data_1, data_2, data_3, data_4, data_5, data_6): + # pd_op.full_int_array: (1xi64) <- () + full_int_array_0 = [1] - # pd_op.batch_norm_: (2x48x128x128xf32, 48xf32, 48xf32, 48xf32, 48xf32, -1xui8) <- (2x48x128x128xf32, 48xf32, 48xf32, 48xf32, 48xf32) - ( - batch_norm__42, - batch_norm__43, - batch_norm__44, - batch_norm__45, - batch_norm__46, - batch_norm__47, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_7, - parameter_661, - parameter_660, - parameter_659, - parameter_658, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_658, parameter_659, parameter_660, parameter_661 - - # pd_op.conv2d: (2x48x128x128xf32) <- (2x48x128x128xf32, 48x48x1x1xf32) - conv2d_8 = paddle._C_ops.conv2d( - swish_7, parameter_657, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_657 + # pd_op.unsqueeze: (2x1x12096xf32) <- (2x12096xf32, 1xi64) + unsqueeze_0 = paddle._C_ops.unsqueeze(data_0, full_int_array_0) + del data_0, full_int_array_0 - # pd_op.batch_norm_: (2x48x128x128xf32, 48xf32, 48xf32, 48xf32, 48xf32, -1xui8) <- (2x48x128x128xf32, 48xf32, 48xf32, 48xf32, 48xf32) - ( - batch_norm__48, - batch_norm__49, - batch_norm__50, - batch_norm__51, - batch_norm__52, - batch_norm__53, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_8, - parameter_656, - parameter_655, - parameter_654, - parameter_653, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), + # pd_op.full: (xf32) <- () + full_0 = paddle._C_ops.full( + [], float("1"), paddle.float32, paddle.framework._current_expected_place() ) - del parameter_653, parameter_654, parameter_655, parameter_656 - # pd_op.add: (2x48x128x128xf32) <- (2x48x128x128xf32, 2x48x128x128xf32) - add_0 = paddle._C_ops.add(batch_norm__42, batch_norm__48) + # pd_op.greater_than: (2x1x12096xb) <- (2x1x12096xf32, xf32) + greater_than_0 = paddle._C_ops.greater_than(unsqueeze_0, full_0) + del full_0, unsqueeze_0 - # pd_op.swish: (2x48x128x128xf32) <- (2x48x128x128xf32) - swish_8 = paddle._C_ops.swish(add_0) + # pd_op.full_int_array: (3xi64) <- () + full_int_array_1 = [1, 11, 1] - # pd_op.add: (2x48x128x128xf32) <- (2x48x128x128xf32, 2x48x128x128xf32) - add_1 = paddle._C_ops.add(swish_6, swish_8) + # pd_op.tile: (2x11x12096xb) <- (2x1x12096xb, 3xi64) + tile_0 = paddle._C_ops.tile(greater_than_0, full_int_array_1) + del full_int_array_1, greater_than_0 - # pd_op.conv2d: (2x48x128x128xf32) <- (2x48x128x128xf32, 48x48x3x3xf32) - conv2d_9 = paddle._C_ops.conv2d( - add_1, parameter_652, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + # pd_op.full: (1xi64) <- () + full_1 = paddle._C_ops.full( + [1], float("-2"), paddle.int64, paddle.core.CPUPlace() ) - del parameter_652 - # pd_op.batch_norm_: (2x48x128x128xf32, 48xf32, 48xf32, 48xf32, 48xf32, -1xui8) <- (2x48x128x128xf32, 48xf32, 48xf32, 48xf32, 48xf32) - ( - batch_norm__54, - batch_norm__55, - batch_norm__56, - batch_norm__57, - batch_norm__58, - batch_norm__59, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_9, - parameter_651, - parameter_650, - parameter_649, - parameter_648, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_648, parameter_649, parameter_650, parameter_651 - - # pd_op.swish: (2x48x128x128xf32) <- (2x48x128x128xf32) - swish_9 = paddle._C_ops.swish(batch_norm__54) - - # pd_op.conv2d: (2x48x128x128xf32) <- (2x48x128x128xf32, 48x48x3x3xf32) - conv2d_10 = paddle._C_ops.conv2d( - swish_9, parameter_647, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_647 + # pd_op.argmax: (2x12096xi64) <- (2x11x12096xf32, 1xi64) + argmax_0 = paddle._C_ops.argmax(data_1, full_1, False, False, paddle.int64) - # pd_op.batch_norm_: (2x48x128x128xf32, 48xf32, 48xf32, 48xf32, 48xf32, -1xui8) <- (2x48x128x128xf32, 48xf32, 48xf32, 48xf32, 48xf32) - ( - batch_norm__60, - batch_norm__61, - batch_norm__62, - batch_norm__63, - batch_norm__64, - batch_norm__65, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_10, - parameter_646, - parameter_645, - parameter_644, - parameter_643, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_643, parameter_644, parameter_645, parameter_646 - - # pd_op.conv2d: (2x48x128x128xf32) <- (2x48x128x128xf32, 48x48x1x1xf32) - conv2d_11 = paddle._C_ops.conv2d( - swish_9, parameter_642, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + # pd_op.full: (1xi32) <- () + full_2 = paddle._C_ops.full( + [1], float("11"), paddle.int32, paddle.core.CPUPlace() ) - del parameter_642 - # pd_op.batch_norm_: (2x48x128x128xf32, 48xf32, 48xf32, 48xf32, 48xf32, -1xui8) <- (2x48x128x128xf32, 48xf32, 48xf32, 48xf32, 48xf32) - ( - batch_norm__66, - batch_norm__67, - batch_norm__68, - batch_norm__69, - batch_norm__70, - batch_norm__71, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_11, - parameter_641, - parameter_640, - parameter_639, - parameter_638, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), + # pd_op.one_hot: (2x12096x11xf32) <- (2x12096xi64, 1xi32) + one_hot_0 = paddle._C_ops.one_hot( + argmax_0 % paddle.cast(full_2, argmax_0.dtype), full_2 ) - del parameter_638, parameter_639, parameter_640, parameter_641 - - # pd_op.add: (2x48x128x128xf32) <- (2x48x128x128xf32, 2x48x128x128xf32) - add_2 = paddle._C_ops.add(batch_norm__60, batch_norm__66) + del argmax_0, full_2 - # pd_op.swish: (2x48x128x128xf32) <- (2x48x128x128xf32) - swish_10 = paddle._C_ops.swish(add_2) + # pd_op.transpose: (2x11x12096xf32) <- (2x12096x11xf32) + transpose_0 = paddle._C_ops.transpose(one_hot_0, [0, 2, 1]) + del one_hot_0 - # pd_op.add: (2x48x128x128xf32) <- (2x48x128x128xf32, 2x48x128x128xf32) - add_3 = paddle._C_ops.add(add_1, swish_10) + # pd_op.where: (2x11x12096xf32) <- (2x11x12096xb, 2x11x12096xf32, 2x11x12096xf32) + where_0 = paddle._C_ops.where(tile_0, transpose_0, data_2) + del data_2, tile_0, transpose_0 - # pd_op.conv2d: (2x48x128x128xf32) <- (2x48x128x128xf32, 48x48x3x3xf32) - conv2d_12 = paddle._C_ops.conv2d( - add_3, parameter_637, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_637 - - # pd_op.batch_norm_: (2x48x128x128xf32, 48xf32, 48xf32, 48xf32, 48xf32, -1xui8) <- (2x48x128x128xf32, 48xf32, 48xf32, 48xf32, 48xf32) - ( - batch_norm__72, - batch_norm__73, - batch_norm__74, - batch_norm__75, - batch_norm__76, - batch_norm__77, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_12, - parameter_636, - parameter_635, - parameter_634, - parameter_633, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_633, parameter_634, parameter_635, parameter_636 + # pd_op.full_int_array: (1xi64) <- () + full_int_array_2 = [-2] - # pd_op.swish: (2x48x128x128xf32) <- (2x48x128x128xf32) - swish_11 = paddle._C_ops.swish(batch_norm__72) + # pd_op.sum: (2x12096xf32) <- (2x11x12096xf32, 1xi64) + sum_0 = paddle._C_ops.sum(where_0, full_int_array_2, None, False) - # pd_op.conv2d: (2x48x128x128xf32) <- (2x48x128x128xf32, 48x48x3x3xf32) - conv2d_13 = paddle._C_ops.conv2d( - swish_11, parameter_632, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_632 + # pd_op.argmax: (2x12096xi64) <- (2x11x12096xf32, 1xi64) + argmax_1 = paddle._C_ops.argmax(where_0, full_1, False, False, paddle.int64) + del full_1 - # pd_op.batch_norm_: (2x48x128x128xf32, 48xf32, 48xf32, 48xf32, 48xf32, -1xui8) <- (2x48x128x128xf32, 48xf32, 48xf32, 48xf32, 48xf32) - ( - batch_norm__78, - batch_norm__79, - batch_norm__80, - batch_norm__81, - batch_norm__82, - batch_norm__83, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_13, - parameter_631, - parameter_630, - parameter_629, - parameter_628, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), + # pd_op.full: (1xf32) <- () + full_3 = paddle._C_ops.full( + [1], float("11"), paddle.float32, paddle.core.CPUPlace() ) - del parameter_628, parameter_629, parameter_630, parameter_631 - # pd_op.conv2d: (2x48x128x128xf32) <- (2x48x128x128xf32, 48x48x1x1xf32) - conv2d_14 = paddle._C_ops.conv2d( - swish_11, parameter_627, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_627 + # pd_op.scale: (2x1xi32) <- (2x1xi32, 1xf32) + scale_0 = paddle._C_ops.scale(data_3, full_3, float("0"), True) + del data_3, full_3 - # pd_op.batch_norm_: (2x48x128x128xf32, 48xf32, 48xf32, 48xf32, 48xf32, -1xui8) <- (2x48x128x128xf32, 48xf32, 48xf32, 48xf32, 48xf32) - ( - batch_norm__84, - batch_norm__85, - batch_norm__86, - batch_norm__87, - batch_norm__88, - batch_norm__89, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_14, - parameter_626, - parameter_625, - parameter_624, - parameter_623, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_623, parameter_624, parameter_625, parameter_626 + # pd_op.cast: (2x1xi64) <- (2x1xi32) + cast_0 = paddle._C_ops.cast(scale_0, paddle.int64) + del scale_0 - # pd_op.add: (2x48x128x128xf32) <- (2x48x128x128xf32, 2x48x128x128xf32) - add_4 = paddle._C_ops.add(batch_norm__78, batch_norm__84) + # pd_op.add: (2x12096xi64) <- (2x12096xi64, 2x1xi64) + add_0 = paddle._C_ops.add(argmax_1, cast_0) + del argmax_1, cast_0 - # pd_op.swish: (2x48x128x128xf32) <- (2x48x128x128xf32) - swish_12 = paddle._C_ops.swish(add_4) + # pd_op.flatten: (22xi32) <- (2x11x1xi32) + flatten_0 = paddle._C_ops.flatten(data_4, 0, 2) + del data_4 - # pd_op.add: (2x48x128x128xf32) <- (2x48x128x128xf32, 2x48x128x128xf32) - add_5 = paddle._C_ops.add(add_3, swish_12) + # pd_op.flatten: (24192xi64) <- (2x12096xi64) + flatten_1 = paddle._C_ops.flatten(add_0, 0, 1) + del add_0 # pd_op.full: (1xi32) <- () - full_0 = paddle._C_ops.full( - [1], float("1"), paddle.int32, paddle.core.CPUPlace() + full_4 = paddle._C_ops.full( + [1], float("0"), paddle.int32, paddle.core.CPUPlace() ) - # pd_op.assign: (1xi32) <- (1xi32) - assign_0 = full_0 - - # pd_op.assign: (1xi32) <- (1xi32) - assign_1 = full_0 - - # pd_op.assign: (1xi32) <- (1xi32) - assign_2 = full_0 - - # pd_op.assign: (1xi32) <- (1xi32) - assign_3 = full_0 - - # pd_op.assign: (1xi32) <- (1xi32) - assign_4 = full_0 - - # pd_op.assign: (1xi32) <- (1xi32) - assign_5 = full_0 - - # pd_op.assign: (1xi32) <- (1xi32) - assign_6 = full_0 - - # pd_op.assign: (1xi32) <- (1xi32) - assign_7 = full_0 - - # pd_op.assign: (1xi32) <- (1xi32) - assign_8 = full_0 - - # pd_op.assign: (1xi32) <- (1xi32) - assign_9 = full_0 - - # pd_op.assign: (1xi32) <- (1xi32) - assign_10 = full_0 - - # pd_op.assign: (1xi32) <- (1xi32) - assign_11 = full_0 - - # pd_op.assign: (1xi32) <- (1xi32) - assign_12 = full_0 - - # builtin.combine: ([2x48x128x128xf32, 2x48x128x128xf32]) <- (2x48x128x128xf32, 2x48x128x128xf32) - combine_0 = [swish_5, add_5] - - # pd_op.concat: (2x96x128x128xf32) <- ([2x48x128x128xf32, 2x48x128x128xf32], 1xi32) - concat_0 = paddle._C_ops.concat(combine_0, full_0) - del combine_0 + # pd_op.gather: (24192xi32) <- (22xi32, 24192xi64, 1xi32) + gather_0 = paddle._C_ops.gather(flatten_0, flatten_1, full_4) + del flatten_0 # pd_op.full_int_array: (2xi64) <- () - full_int_array_0 = [2, 3] - - # pd_op.assign: (2xi64) <- (2xi64) - assign_13 = full_int_array_0 - - # pd_op.assign: (2xi64) <- (2xi64) - assign_14 = full_int_array_0 - - # pd_op.assign: (2xi64) <- (2xi64) - assign_15 = full_int_array_0 - - # pd_op.mean: (2x96x1x1xf32) <- (2x96x128x128xf32, 2xi64) - mean_0 = paddle._C_ops.mean(concat_0, full_int_array_0, True) - - # pd_op.conv2d: (2x96x1x1xf32) <- (2x96x1x1xf32, 96x96x1x1xf32) - conv2d_15 = paddle._C_ops.conv2d( - mean_0, parameter_622, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_622 - - # pd_op.full_int_array: (4xi64) <- () - full_int_array_1 = [1, -1, 1, 1] - - # pd_op.reshape: (1x96x1x1xf32) <- (96xf32, 4xi64) - reshape_0 = paddle._C_ops.reshape(parameter_621, full_int_array_1) - del parameter_621 - - # pd_op.add: (2x96x1x1xf32) <- (2x96x1x1xf32, 1x96x1x1xf32) - add_6 = paddle._C_ops.add(conv2d_15, reshape_0) - - # pd_op.hardsigmoid: (2x96x1x1xf32) <- (2x96x1x1xf32) - hardsigmoid_0 = paddle._C_ops.hardsigmoid( - add_6, float("0.166667"), float("0.5") - ) - del add_6 - - # pd_op.multiply: (2x96x128x128xf32) <- (2x96x128x128xf32, 2x96x1x1xf32) - multiply_0 = paddle._C_ops.multiply(concat_0, hardsigmoid_0) - - # pd_op.conv2d: (2x128x128x128xf32) <- (2x96x128x128xf32, 128x96x1x1xf32) - conv2d_16 = paddle._C_ops.conv2d( - multiply_0, parameter_620, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_620 - - # pd_op.batch_norm_: (2x128x128x128xf32, 128xf32, 128xf32, 128xf32, 128xf32, -1xui8) <- (2x128x128x128xf32, 128xf32, 128xf32, 128xf32, 128xf32) - ( - batch_norm__90, - batch_norm__91, - batch_norm__92, - batch_norm__93, - batch_norm__94, - batch_norm__95, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_16, - parameter_619, - parameter_618, - parameter_617, - parameter_616, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_616, parameter_617, parameter_618, parameter_619 - - # pd_op.swish: (2x128x128x128xf32) <- (2x128x128x128xf32) - swish_13 = paddle._C_ops.swish(batch_norm__90) - - # pd_op.conv2d: (2x192x64x64xf32) <- (2x128x128x128xf32, 192x128x3x3xf32) - conv2d_17 = paddle._C_ops.conv2d( - swish_13, parameter_615, [2, 2], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_615 - - # pd_op.batch_norm_: (2x192x64x64xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x64x64xf32, 192xf32, 192xf32, 192xf32, 192xf32) - ( - batch_norm__96, - batch_norm__97, - batch_norm__98, - batch_norm__99, - batch_norm__100, - batch_norm__101, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_17, - parameter_614, - parameter_613, - parameter_612, - parameter_611, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_611, parameter_612, parameter_613, parameter_614 - - # pd_op.swish: (2x192x64x64xf32) <- (2x192x64x64xf32) - swish_14 = paddle._C_ops.swish(batch_norm__96) - - # pd_op.conv2d: (2x96x64x64xf32) <- (2x192x64x64xf32, 96x192x1x1xf32) - conv2d_18 = paddle._C_ops.conv2d( - swish_14, parameter_610, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_610 - - # pd_op.batch_norm_: (2x96x64x64xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x64x64xf32, 96xf32, 96xf32, 96xf32, 96xf32) - ( - batch_norm__102, - batch_norm__103, - batch_norm__104, - batch_norm__105, - batch_norm__106, - batch_norm__107, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_18, - parameter_609, - parameter_608, - parameter_607, - parameter_606, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_606, parameter_607, parameter_608, parameter_609 - - # pd_op.swish: (2x96x64x64xf32) <- (2x96x64x64xf32) - swish_15 = paddle._C_ops.swish(batch_norm__102) - - # pd_op.conv2d: (2x96x64x64xf32) <- (2x192x64x64xf32, 96x192x1x1xf32) - conv2d_19 = paddle._C_ops.conv2d( - swish_14, parameter_605, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_605 - - # pd_op.batch_norm_: (2x96x64x64xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x64x64xf32, 96xf32, 96xf32, 96xf32, 96xf32) - ( - batch_norm__108, - batch_norm__109, - batch_norm__110, - batch_norm__111, - batch_norm__112, - batch_norm__113, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_19, - parameter_604, - parameter_603, - parameter_602, - parameter_601, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_601, parameter_602, parameter_603, parameter_604 - - # pd_op.swish: (2x96x64x64xf32) <- (2x96x64x64xf32) - swish_16 = paddle._C_ops.swish(batch_norm__108) - - # pd_op.conv2d: (2x96x64x64xf32) <- (2x96x64x64xf32, 96x96x3x3xf32) - conv2d_20 = paddle._C_ops.conv2d( - swish_16, parameter_600, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_600 - - # pd_op.batch_norm_: (2x96x64x64xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x64x64xf32, 96xf32, 96xf32, 96xf32, 96xf32) - ( - batch_norm__114, - batch_norm__115, - batch_norm__116, - batch_norm__117, - batch_norm__118, - batch_norm__119, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_20, - parameter_599, - parameter_598, - parameter_597, - parameter_596, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_596, parameter_597, parameter_598, parameter_599 - - # pd_op.swish: (2x96x64x64xf32) <- (2x96x64x64xf32) - swish_17 = paddle._C_ops.swish(batch_norm__114) - - # pd_op.conv2d: (2x96x64x64xf32) <- (2x96x64x64xf32, 96x96x3x3xf32) - conv2d_21 = paddle._C_ops.conv2d( - swish_17, parameter_595, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_595 - - # pd_op.batch_norm_: (2x96x64x64xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x64x64xf32, 96xf32, 96xf32, 96xf32, 96xf32) - ( - batch_norm__120, - batch_norm__121, - batch_norm__122, - batch_norm__123, - batch_norm__124, - batch_norm__125, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_21, - parameter_594, - parameter_593, - parameter_592, - parameter_591, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_591, parameter_592, parameter_593, parameter_594 - - # pd_op.conv2d: (2x96x64x64xf32) <- (2x96x64x64xf32, 96x96x1x1xf32) - conv2d_22 = paddle._C_ops.conv2d( - swish_17, parameter_590, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_590 - - # pd_op.batch_norm_: (2x96x64x64xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x64x64xf32, 96xf32, 96xf32, 96xf32, 96xf32) - ( - batch_norm__126, - batch_norm__127, - batch_norm__128, - batch_norm__129, - batch_norm__130, - batch_norm__131, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_22, - parameter_589, - parameter_588, - parameter_587, - parameter_586, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_586, parameter_587, parameter_588, parameter_589 - - # pd_op.add: (2x96x64x64xf32) <- (2x96x64x64xf32, 2x96x64x64xf32) - add_7 = paddle._C_ops.add(batch_norm__120, batch_norm__126) - - # pd_op.swish: (2x96x64x64xf32) <- (2x96x64x64xf32) - swish_18 = paddle._C_ops.swish(add_7) - - # pd_op.add: (2x96x64x64xf32) <- (2x96x64x64xf32, 2x96x64x64xf32) - add_8 = paddle._C_ops.add(swish_16, swish_18) - - # pd_op.conv2d: (2x96x64x64xf32) <- (2x96x64x64xf32, 96x96x3x3xf32) - conv2d_23 = paddle._C_ops.conv2d( - add_8, parameter_585, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_585 - - # pd_op.batch_norm_: (2x96x64x64xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x64x64xf32, 96xf32, 96xf32, 96xf32, 96xf32) - ( - batch_norm__132, - batch_norm__133, - batch_norm__134, - batch_norm__135, - batch_norm__136, - batch_norm__137, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_23, - parameter_584, - parameter_583, - parameter_582, - parameter_581, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_581, parameter_582, parameter_583, parameter_584 - - # pd_op.swish: (2x96x64x64xf32) <- (2x96x64x64xf32) - swish_19 = paddle._C_ops.swish(batch_norm__132) - - # pd_op.conv2d: (2x96x64x64xf32) <- (2x96x64x64xf32, 96x96x3x3xf32) - conv2d_24 = paddle._C_ops.conv2d( - swish_19, parameter_580, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_580 - - # pd_op.batch_norm_: (2x96x64x64xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x64x64xf32, 96xf32, 96xf32, 96xf32, 96xf32) - ( - batch_norm__138, - batch_norm__139, - batch_norm__140, - batch_norm__141, - batch_norm__142, - batch_norm__143, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_24, - parameter_579, - parameter_578, - parameter_577, - parameter_576, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_576, parameter_577, parameter_578, parameter_579 - - # pd_op.conv2d: (2x96x64x64xf32) <- (2x96x64x64xf32, 96x96x1x1xf32) - conv2d_25 = paddle._C_ops.conv2d( - swish_19, parameter_575, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_575 + full_int_array_3 = [2, 12096] - # pd_op.batch_norm_: (2x96x64x64xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x64x64xf32, 96xf32, 96xf32, 96xf32, 96xf32) - ( - batch_norm__144, - batch_norm__145, - batch_norm__146, - batch_norm__147, - batch_norm__148, - batch_norm__149, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_25, - parameter_574, - parameter_573, - parameter_572, - parameter_571, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_571, parameter_572, parameter_573, parameter_574 - - # pd_op.add: (2x96x64x64xf32) <- (2x96x64x64xf32, 2x96x64x64xf32) - add_9 = paddle._C_ops.add(batch_norm__138, batch_norm__144) - - # pd_op.swish: (2x96x64x64xf32) <- (2x96x64x64xf32) - swish_20 = paddle._C_ops.swish(add_9) - - # pd_op.add: (2x96x64x64xf32) <- (2x96x64x64xf32, 2x96x64x64xf32) - add_10 = paddle._C_ops.add(add_8, swish_20) - - # pd_op.conv2d: (2x96x64x64xf32) <- (2x96x64x64xf32, 96x96x3x3xf32) - conv2d_26 = paddle._C_ops.conv2d( - add_10, parameter_570, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_570 - - # pd_op.batch_norm_: (2x96x64x64xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x64x64xf32, 96xf32, 96xf32, 96xf32, 96xf32) - ( - batch_norm__150, - batch_norm__151, - batch_norm__152, - batch_norm__153, - batch_norm__154, - batch_norm__155, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_26, - parameter_569, - parameter_568, - parameter_567, - parameter_566, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_566, parameter_567, parameter_568, parameter_569 - - # pd_op.swish: (2x96x64x64xf32) <- (2x96x64x64xf32) - swish_21 = paddle._C_ops.swish(batch_norm__150) - - # pd_op.conv2d: (2x96x64x64xf32) <- (2x96x64x64xf32, 96x96x3x3xf32) - conv2d_27 = paddle._C_ops.conv2d( - swish_21, parameter_565, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_565 - - # pd_op.batch_norm_: (2x96x64x64xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x64x64xf32, 96xf32, 96xf32, 96xf32, 96xf32) - ( - batch_norm__156, - batch_norm__157, - batch_norm__158, - batch_norm__159, - batch_norm__160, - batch_norm__161, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_27, - parameter_564, - parameter_563, - parameter_562, - parameter_561, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_561, parameter_562, parameter_563, parameter_564 - - # pd_op.conv2d: (2x96x64x64xf32) <- (2x96x64x64xf32, 96x96x1x1xf32) - conv2d_28 = paddle._C_ops.conv2d( - swish_21, parameter_560, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_560 - - # pd_op.batch_norm_: (2x96x64x64xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x64x64xf32, 96xf32, 96xf32, 96xf32, 96xf32) - ( - batch_norm__162, - batch_norm__163, - batch_norm__164, - batch_norm__165, - batch_norm__166, - batch_norm__167, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_28, - parameter_559, - parameter_558, - parameter_557, - parameter_556, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_556, parameter_557, parameter_558, parameter_559 - - # pd_op.add: (2x96x64x64xf32) <- (2x96x64x64xf32, 2x96x64x64xf32) - add_11 = paddle._C_ops.add(batch_norm__156, batch_norm__162) - - # pd_op.swish: (2x96x64x64xf32) <- (2x96x64x64xf32) - swish_22 = paddle._C_ops.swish(add_11) - - # pd_op.add: (2x96x64x64xf32) <- (2x96x64x64xf32, 2x96x64x64xf32) - add_12 = paddle._C_ops.add(add_10, swish_22) - - # pd_op.conv2d: (2x96x64x64xf32) <- (2x96x64x64xf32, 96x96x3x3xf32) - conv2d_29 = paddle._C_ops.conv2d( - add_12, parameter_555, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_555 - - # pd_op.batch_norm_: (2x96x64x64xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x64x64xf32, 96xf32, 96xf32, 96xf32, 96xf32) - ( - batch_norm__168, - batch_norm__169, - batch_norm__170, - batch_norm__171, - batch_norm__172, - batch_norm__173, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_29, - parameter_554, - parameter_553, - parameter_552, - parameter_551, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_551, parameter_552, parameter_553, parameter_554 - - # pd_op.swish: (2x96x64x64xf32) <- (2x96x64x64xf32) - swish_23 = paddle._C_ops.swish(batch_norm__168) - - # pd_op.conv2d: (2x96x64x64xf32) <- (2x96x64x64xf32, 96x96x3x3xf32) - conv2d_30 = paddle._C_ops.conv2d( - swish_23, parameter_550, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_550 - - # pd_op.batch_norm_: (2x96x64x64xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x64x64xf32, 96xf32, 96xf32, 96xf32, 96xf32) - ( - batch_norm__174, - batch_norm__175, - batch_norm__176, - batch_norm__177, - batch_norm__178, - batch_norm__179, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_30, - parameter_549, - parameter_548, - parameter_547, - parameter_546, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_546, parameter_547, parameter_548, parameter_549 - - # pd_op.conv2d: (2x96x64x64xf32) <- (2x96x64x64xf32, 96x96x1x1xf32) - conv2d_31 = paddle._C_ops.conv2d( - swish_23, parameter_545, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_545 - - # pd_op.batch_norm_: (2x96x64x64xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x64x64xf32, 96xf32, 96xf32, 96xf32, 96xf32) - ( - batch_norm__180, - batch_norm__181, - batch_norm__182, - batch_norm__183, - batch_norm__184, - batch_norm__185, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_31, - parameter_544, - parameter_543, - parameter_542, - parameter_541, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_541, parameter_542, parameter_543, parameter_544 - - # pd_op.add: (2x96x64x64xf32) <- (2x96x64x64xf32, 2x96x64x64xf32) - add_13 = paddle._C_ops.add(batch_norm__174, batch_norm__180) - - # pd_op.swish: (2x96x64x64xf32) <- (2x96x64x64xf32) - swish_24 = paddle._C_ops.swish(add_13) - - # pd_op.add: (2x96x64x64xf32) <- (2x96x64x64xf32, 2x96x64x64xf32) - add_14 = paddle._C_ops.add(add_12, swish_24) - - # pd_op.conv2d: (2x96x64x64xf32) <- (2x96x64x64xf32, 96x96x3x3xf32) - conv2d_32 = paddle._C_ops.conv2d( - add_14, parameter_540, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_540 - - # pd_op.batch_norm_: (2x96x64x64xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x64x64xf32, 96xf32, 96xf32, 96xf32, 96xf32) - ( - batch_norm__186, - batch_norm__187, - batch_norm__188, - batch_norm__189, - batch_norm__190, - batch_norm__191, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_32, - parameter_539, - parameter_538, - parameter_537, - parameter_536, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_536, parameter_537, parameter_538, parameter_539 - - # pd_op.swish: (2x96x64x64xf32) <- (2x96x64x64xf32) - swish_25 = paddle._C_ops.swish(batch_norm__186) - - # pd_op.conv2d: (2x96x64x64xf32) <- (2x96x64x64xf32, 96x96x3x3xf32) - conv2d_33 = paddle._C_ops.conv2d( - swish_25, parameter_535, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_535 - - # pd_op.batch_norm_: (2x96x64x64xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x64x64xf32, 96xf32, 96xf32, 96xf32, 96xf32) - ( - batch_norm__192, - batch_norm__193, - batch_norm__194, - batch_norm__195, - batch_norm__196, - batch_norm__197, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_33, - parameter_534, - parameter_533, - parameter_532, - parameter_531, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_531, parameter_532, parameter_533, parameter_534 - - # pd_op.conv2d: (2x96x64x64xf32) <- (2x96x64x64xf32, 96x96x1x1xf32) - conv2d_34 = paddle._C_ops.conv2d( - swish_25, parameter_530, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_530 - - # pd_op.batch_norm_: (2x96x64x64xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x64x64xf32, 96xf32, 96xf32, 96xf32, 96xf32) - ( - batch_norm__198, - batch_norm__199, - batch_norm__200, - batch_norm__201, - batch_norm__202, - batch_norm__203, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_34, - parameter_529, - parameter_528, - parameter_527, - parameter_526, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_526, parameter_527, parameter_528, parameter_529 - - # pd_op.add: (2x96x64x64xf32) <- (2x96x64x64xf32, 2x96x64x64xf32) - add_15 = paddle._C_ops.add(batch_norm__192, batch_norm__198) - - # pd_op.swish: (2x96x64x64xf32) <- (2x96x64x64xf32) - swish_26 = paddle._C_ops.swish(add_15) - - # pd_op.add: (2x96x64x64xf32) <- (2x96x64x64xf32, 2x96x64x64xf32) - add_16 = paddle._C_ops.add(add_14, swish_26) - - # pd_op.conv2d: (2x96x64x64xf32) <- (2x96x64x64xf32, 96x96x3x3xf32) - conv2d_35 = paddle._C_ops.conv2d( - add_16, parameter_525, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_525 - - # pd_op.batch_norm_: (2x96x64x64xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x64x64xf32, 96xf32, 96xf32, 96xf32, 96xf32) - ( - batch_norm__204, - batch_norm__205, - batch_norm__206, - batch_norm__207, - batch_norm__208, - batch_norm__209, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_35, - parameter_524, - parameter_523, - parameter_522, - parameter_521, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_521, parameter_522, parameter_523, parameter_524 - - # pd_op.swish: (2x96x64x64xf32) <- (2x96x64x64xf32) - swish_27 = paddle._C_ops.swish(batch_norm__204) - - # pd_op.conv2d: (2x96x64x64xf32) <- (2x96x64x64xf32, 96x96x3x3xf32) - conv2d_36 = paddle._C_ops.conv2d( - swish_27, parameter_520, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_520 - - # pd_op.batch_norm_: (2x96x64x64xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x64x64xf32, 96xf32, 96xf32, 96xf32, 96xf32) - ( - batch_norm__210, - batch_norm__211, - batch_norm__212, - batch_norm__213, - batch_norm__214, - batch_norm__215, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_36, - parameter_519, - parameter_518, - parameter_517, - parameter_516, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_516, parameter_517, parameter_518, parameter_519 - - # pd_op.conv2d: (2x96x64x64xf32) <- (2x96x64x64xf32, 96x96x1x1xf32) - conv2d_37 = paddle._C_ops.conv2d( - swish_27, parameter_515, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_515 - - # pd_op.batch_norm_: (2x96x64x64xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x64x64xf32, 96xf32, 96xf32, 96xf32, 96xf32) - ( - batch_norm__216, - batch_norm__217, - batch_norm__218, - batch_norm__219, - batch_norm__220, - batch_norm__221, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_37, - parameter_514, - parameter_513, - parameter_512, - parameter_511, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_511, parameter_512, parameter_513, parameter_514 - - # pd_op.add: (2x96x64x64xf32) <- (2x96x64x64xf32, 2x96x64x64xf32) - add_17 = paddle._C_ops.add(batch_norm__210, batch_norm__216) - - # pd_op.swish: (2x96x64x64xf32) <- (2x96x64x64xf32) - swish_28 = paddle._C_ops.swish(add_17) - - # pd_op.add: (2x96x64x64xf32) <- (2x96x64x64xf32, 2x96x64x64xf32) - add_18 = paddle._C_ops.add(add_16, swish_28) - - # builtin.combine: ([2x96x64x64xf32, 2x96x64x64xf32]) <- (2x96x64x64xf32, 2x96x64x64xf32) - combine_1 = [swish_15, add_18] - - # pd_op.concat: (2x192x64x64xf32) <- ([2x96x64x64xf32, 2x96x64x64xf32], 1xi32) - concat_1 = paddle._C_ops.concat(combine_1, full_0) - del combine_1 - - # pd_op.mean: (2x192x1x1xf32) <- (2x192x64x64xf32, 2xi64) - mean_1 = paddle._C_ops.mean(concat_1, full_int_array_0, True) - - # pd_op.conv2d: (2x192x1x1xf32) <- (2x192x1x1xf32, 192x192x1x1xf32) - conv2d_38 = paddle._C_ops.conv2d( - mean_1, parameter_510, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_510 - - # pd_op.reshape: (1x192x1x1xf32) <- (192xf32, 4xi64) - reshape_1 = paddle._C_ops.reshape(parameter_509, full_int_array_1) - del parameter_509 - - # pd_op.add: (2x192x1x1xf32) <- (2x192x1x1xf32, 1x192x1x1xf32) - add_19 = paddle._C_ops.add(conv2d_38, reshape_1) - - # pd_op.hardsigmoid: (2x192x1x1xf32) <- (2x192x1x1xf32) - hardsigmoid_1 = paddle._C_ops.hardsigmoid( - add_19, float("0.166667"), float("0.5") - ) - del add_19 - - # pd_op.multiply: (2x192x64x64xf32) <- (2x192x64x64xf32, 2x192x1x1xf32) - multiply_1 = paddle._C_ops.multiply(concat_1, hardsigmoid_1) - - # pd_op.conv2d: (2x256x64x64xf32) <- (2x192x64x64xf32, 256x192x1x1xf32) - conv2d_39 = paddle._C_ops.conv2d( - multiply_1, parameter_508, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_508 - - # pd_op.batch_norm_: (2x256x64x64xf32, 256xf32, 256xf32, 256xf32, 256xf32, -1xui8) <- (2x256x64x64xf32, 256xf32, 256xf32, 256xf32, 256xf32) - ( - batch_norm__222, - batch_norm__223, - batch_norm__224, - batch_norm__225, - batch_norm__226, - batch_norm__227, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_39, - parameter_507, - parameter_506, - parameter_505, - parameter_504, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_504, parameter_505, parameter_506, parameter_507 - - # pd_op.swish: (2x256x64x64xf32) <- (2x256x64x64xf32) - swish_29 = paddle._C_ops.swish(batch_norm__222) - - # pd_op.conv2d: (2x384x32x32xf32) <- (2x256x64x64xf32, 384x256x3x3xf32) - conv2d_40 = paddle._C_ops.conv2d( - swish_29, parameter_503, [2, 2], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_503 - - # pd_op.batch_norm_: (2x384x32x32xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x32x32xf32, 384xf32, 384xf32, 384xf32, 384xf32) - ( - batch_norm__228, - batch_norm__229, - batch_norm__230, - batch_norm__231, - batch_norm__232, - batch_norm__233, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_40, - parameter_502, - parameter_501, - parameter_500, - parameter_499, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_499, parameter_500, parameter_501, parameter_502 - - # pd_op.swish: (2x384x32x32xf32) <- (2x384x32x32xf32) - swish_30 = paddle._C_ops.swish(batch_norm__228) - - # pd_op.conv2d: (2x192x32x32xf32) <- (2x384x32x32xf32, 192x384x1x1xf32) - conv2d_41 = paddle._C_ops.conv2d( - swish_30, parameter_498, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_498 - - # pd_op.batch_norm_: (2x192x32x32xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x32x32xf32, 192xf32, 192xf32, 192xf32, 192xf32) - ( - batch_norm__234, - batch_norm__235, - batch_norm__236, - batch_norm__237, - batch_norm__238, - batch_norm__239, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_41, - parameter_497, - parameter_496, - parameter_495, - parameter_494, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_494, parameter_495, parameter_496, parameter_497 - - # pd_op.swish: (2x192x32x32xf32) <- (2x192x32x32xf32) - swish_31 = paddle._C_ops.swish(batch_norm__234) - - # pd_op.conv2d: (2x192x32x32xf32) <- (2x384x32x32xf32, 192x384x1x1xf32) - conv2d_42 = paddle._C_ops.conv2d( - swish_30, parameter_493, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_493 - - # pd_op.batch_norm_: (2x192x32x32xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x32x32xf32, 192xf32, 192xf32, 192xf32, 192xf32) - ( - batch_norm__240, - batch_norm__241, - batch_norm__242, - batch_norm__243, - batch_norm__244, - batch_norm__245, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_42, - parameter_492, - parameter_491, - parameter_490, - parameter_489, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_489, parameter_490, parameter_491, parameter_492 - - # pd_op.swish: (2x192x32x32xf32) <- (2x192x32x32xf32) - swish_32 = paddle._C_ops.swish(batch_norm__240) - - # pd_op.conv2d: (2x192x32x32xf32) <- (2x192x32x32xf32, 192x192x3x3xf32) - conv2d_43 = paddle._C_ops.conv2d( - swish_32, parameter_488, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_488 - - # pd_op.batch_norm_: (2x192x32x32xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x32x32xf32, 192xf32, 192xf32, 192xf32, 192xf32) - ( - batch_norm__246, - batch_norm__247, - batch_norm__248, - batch_norm__249, - batch_norm__250, - batch_norm__251, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_43, - parameter_487, - parameter_486, - parameter_485, - parameter_484, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_484, parameter_485, parameter_486, parameter_487 - - # pd_op.swish: (2x192x32x32xf32) <- (2x192x32x32xf32) - swish_33 = paddle._C_ops.swish(batch_norm__246) - - # pd_op.conv2d: (2x192x32x32xf32) <- (2x192x32x32xf32, 192x192x3x3xf32) - conv2d_44 = paddle._C_ops.conv2d( - swish_33, parameter_483, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_483 - - # pd_op.batch_norm_: (2x192x32x32xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x32x32xf32, 192xf32, 192xf32, 192xf32, 192xf32) - ( - batch_norm__252, - batch_norm__253, - batch_norm__254, - batch_norm__255, - batch_norm__256, - batch_norm__257, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_44, - parameter_482, - parameter_481, - parameter_480, - parameter_479, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_479, parameter_480, parameter_481, parameter_482 - - # pd_op.conv2d: (2x192x32x32xf32) <- (2x192x32x32xf32, 192x192x1x1xf32) - conv2d_45 = paddle._C_ops.conv2d( - swish_33, parameter_478, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_478 + # pd_op.reshape: (2x12096xi32) <- (24192xi32, 2xi64) + reshape_1 = paddle._C_ops.reshape(gather_0, full_int_array_3) + del full_int_array_3, gather_0 - # pd_op.batch_norm_: (2x192x32x32xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x32x32xf32, 192xf32, 192xf32, 192xf32, 192xf32) - ( - batch_norm__258, - batch_norm__259, - batch_norm__260, - batch_norm__261, - batch_norm__262, - batch_norm__263, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_45, - parameter_477, - parameter_476, - parameter_475, - parameter_474, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), + # pd_op.full: (xf32) <- () + full_5 = paddle._C_ops.full( + [], float("0"), paddle.float32, paddle.framework._current_expected_place() ) - del parameter_474, parameter_475, parameter_476, parameter_477 - - # pd_op.add: (2x192x32x32xf32) <- (2x192x32x32xf32, 2x192x32x32xf32) - add_20 = paddle._C_ops.add(batch_norm__252, batch_norm__258) - - # pd_op.swish: (2x192x32x32xf32) <- (2x192x32x32xf32) - swish_34 = paddle._C_ops.swish(add_20) - - # pd_op.add: (2x192x32x32xf32) <- (2x192x32x32xf32, 2x192x32x32xf32) - add_21 = paddle._C_ops.add(swish_32, swish_34) - # pd_op.conv2d: (2x192x32x32xf32) <- (2x192x32x32xf32, 192x192x3x3xf32) - conv2d_46 = paddle._C_ops.conv2d( - add_21, parameter_473, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_473 + # pd_op.greater_than: (2x12096xb) <- (2x12096xf32, xf32) + greater_than_1 = paddle._C_ops.greater_than(sum_0, full_5) + del full_5, sum_0 - # pd_op.batch_norm_: (2x192x32x32xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x32x32xf32, 192xf32, 192xf32, 192xf32, 192xf32) - ( - batch_norm__264, - batch_norm__265, - batch_norm__266, - batch_norm__267, - batch_norm__268, - batch_norm__269, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_46, - parameter_472, - parameter_471, - parameter_470, - parameter_469, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), + # pd_op.full: (1xf32) <- () + full_6 = paddle._C_ops.full( + [1], float("4"), paddle.float32, paddle.core.CPUPlace() ) - del parameter_469, parameter_470, parameter_471, parameter_472 - - # pd_op.swish: (2x192x32x32xf32) <- (2x192x32x32xf32) - swish_35 = paddle._C_ops.swish(batch_norm__264) - # pd_op.conv2d: (2x192x32x32xf32) <- (2x192x32x32xf32, 192x192x3x3xf32) - conv2d_47 = paddle._C_ops.conv2d( - swish_35, parameter_468, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + # pd_op.full_like: (2x12096xi32) <- (2x12096xi32, 1xf32) + full_like_0 = paddle._C_ops.full_like( + reshape_1, full_6, paddle.int32, paddle.framework._current_expected_place() ) - del parameter_468 + del full_6 - # pd_op.batch_norm_: (2x192x32x32xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x32x32xf32, 192xf32, 192xf32, 192xf32, 192xf32) - ( - batch_norm__270, - batch_norm__271, - batch_norm__272, - batch_norm__273, - batch_norm__274, - batch_norm__275, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_47, - parameter_467, - parameter_466, - parameter_465, - parameter_464, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_464, parameter_465, parameter_466, parameter_467 + # pd_op.where: (2x12096xi32) <- (2x12096xb, 2x12096xi32, 2x12096xi32) + where_1 = paddle._C_ops.where(greater_than_1, reshape_1, full_like_0) + del full_like_0, greater_than_1, reshape_1 - # pd_op.conv2d: (2x192x32x32xf32) <- (2x192x32x32xf32, 192x192x1x1xf32) - conv2d_48 = paddle._C_ops.conv2d( - swish_35, parameter_463, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_463 + # pd_op.full_int_array: (2xi64) <- () + full_int_array_4 = [-1, 4] - # pd_op.batch_norm_: (2x192x32x32xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x32x32xf32, 192xf32, 192xf32, 192xf32, 192xf32) - ( - batch_norm__276, - batch_norm__277, - batch_norm__278, - batch_norm__279, - batch_norm__280, - batch_norm__281, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_48, - parameter_462, - parameter_461, - parameter_460, - parameter_459, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_459, parameter_460, parameter_461, parameter_462 + # pd_op.reshape: (22x4xf32) <- (2x11x4xf32, 2xi64) + reshape_2 = paddle._C_ops.reshape(data_5, full_int_array_4) + del data_5, full_int_array_4 - # pd_op.add: (2x192x32x32xf32) <- (2x192x32x32xf32, 2x192x32x32xf32) - add_22 = paddle._C_ops.add(batch_norm__270, batch_norm__276) + # pd_op.gather: (24192x4xf32) <- (22x4xf32, 24192xi64, 1xi32) + gather_1 = paddle._C_ops.gather(reshape_2, flatten_1, full_4) + del flatten_1, full_4, reshape_2 - # pd_op.swish: (2x192x32x32xf32) <- (2x192x32x32xf32) - swish_36 = paddle._C_ops.swish(add_22) + # pd_op.full_int_array: (3xi64) <- () + full_int_array_5 = [2, 12096, 4] - # pd_op.add: (2x192x32x32xf32) <- (2x192x32x32xf32, 2x192x32x32xf32) - add_23 = paddle._C_ops.add(add_21, swish_36) + # pd_op.reshape: (2x12096x4xf32) <- (24192x4xf32, 3xi64) + reshape_0 = paddle._C_ops.reshape(gather_1, full_int_array_5) + del full_int_array_5, gather_1 - # pd_op.conv2d: (2x192x32x32xf32) <- (2x192x32x32xf32, 192x192x3x3xf32) - conv2d_49 = paddle._C_ops.conv2d( - add_23, parameter_458, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + # pd_op.full: (1xi32) <- () + full_7 = paddle._C_ops.full( + [1], float("5"), paddle.int32, paddle.core.CPUPlace() ) - del parameter_458 - # pd_op.batch_norm_: (2x192x32x32xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x32x32xf32, 192xf32, 192xf32, 192xf32, 192xf32) - ( - batch_norm__282, - batch_norm__283, - batch_norm__284, - batch_norm__285, - batch_norm__286, - batch_norm__287, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_49, - parameter_457, - parameter_456, - parameter_455, - parameter_454, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), + # pd_op.one_hot: (2x12096x5xf32) <- (2x12096xi32, 1xi32) + one_hot_1 = paddle._C_ops.one_hot( + where_1 % paddle.cast(full_7, where_1.dtype), full_7 ) - del parameter_454, parameter_455, parameter_456, parameter_457 - - # pd_op.swish: (2x192x32x32xf32) <- (2x192x32x32xf32) - swish_37 = paddle._C_ops.swish(batch_norm__282) + del full_7 - # pd_op.conv2d: (2x192x32x32xf32) <- (2x192x32x32xf32, 192x192x3x3xf32) - conv2d_50 = paddle._C_ops.conv2d( - swish_37, parameter_453, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + # pd_op.full: (4xi64) <- () + full_8 = paddle._C_ops.full( + [4], float("0"), paddle.int64, paddle.framework._current_expected_place() ) - del parameter_453 - # pd_op.batch_norm_: (2x192x32x32xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x32x32xf32, 192xf32, 192xf32, 192xf32, 192xf32) - ( - batch_norm__288, - batch_norm__289, - batch_norm__290, - batch_norm__291, - batch_norm__292, - batch_norm__293, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_50, - parameter_452, - parameter_451, - parameter_450, - parameter_449, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), + # pd_op.assign_value_: (4xi64) <- (4xi64) + assign_value__0 = paddle._C_ops.assign_value_( + full_8, + [4], + paddle.int64, + [float("0"), float("1"), float("2"), float("3")], + paddle.framework._current_expected_place(), ) - del parameter_449, parameter_450, parameter_451, parameter_452 + del full_8 - # pd_op.conv2d: (2x192x32x32xf32) <- (2x192x32x32xf32, 192x192x1x1xf32) - conv2d_51 = paddle._C_ops.conv2d( - swish_37, parameter_448, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_448 + # pd_op.index_select: (2x12096x4xf32) <- (2x12096x5xf32, 4xi64) + index_select_0 = paddle._C_ops.index_select(one_hot_1, assign_value__0, -1) + del assign_value__0, one_hot_1 - # pd_op.batch_norm_: (2x192x32x32xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x32x32xf32, 192xf32, 192xf32, 192xf32, 192xf32) - ( - batch_norm__294, - batch_norm__295, - batch_norm__296, - batch_norm__297, - batch_norm__298, - batch_norm__299, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_51, - parameter_447, - parameter_446, - parameter_445, - parameter_444, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_444, parameter_445, parameter_446, parameter_447 + # pd_op.multiply: (2x11x12096xf32) <- (2x11x12096xf32, 2x11x12096xf32) + multiply_1 = paddle._C_ops.multiply(data_6, where_0) + del data_6 - # pd_op.add: (2x192x32x32xf32) <- (2x192x32x32xf32, 2x192x32x32xf32) - add_24 = paddle._C_ops.add(batch_norm__288, batch_norm__294) + # pd_op.full_int_array: (1xi64) <- () + full_int_array_6 = [-1] - # pd_op.swish: (2x192x32x32xf32) <- (2x192x32x32xf32) - swish_38 = paddle._C_ops.swish(add_24) + # pd_op.max: (2x11x1xf32) <- (2x11x12096xf32, 1xi64) + max_0 = paddle._C_ops.max(multiply_1, full_int_array_6, True) - # pd_op.add: (2x192x32x32xf32) <- (2x192x32x32xf32, 2x192x32x32xf32) - add_25 = paddle._C_ops.add(add_23, swish_38) + # pd_op.multiply: (2x11x12096xf32) <- (2x11x12096xf32, 2x11x12096xf32) + multiply_2 = paddle._C_ops.multiply(data_1, where_0) + del data_1, where_0 - # pd_op.conv2d: (2x192x32x32xf32) <- (2x192x32x32xf32, 192x192x3x3xf32) - conv2d_52 = paddle._C_ops.conv2d( - add_25, parameter_443, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_443 + # pd_op.max: (2x11x1xf32) <- (2x11x12096xf32, 1xi64) + max_1 = paddle._C_ops.max(multiply_2, full_int_array_6, True) + del multiply_2 - # pd_op.batch_norm_: (2x192x32x32xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x32x32xf32, 192xf32, 192xf32, 192xf32, 192xf32) - ( - batch_norm__300, - batch_norm__301, - batch_norm__302, - batch_norm__303, - batch_norm__304, - batch_norm__305, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_52, - parameter_442, - parameter_441, - parameter_440, - parameter_439, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), + # pd_op.full: (1xf32) <- () + full_9 = paddle._C_ops.full( + [1], float("1"), paddle.float32, paddle.core.CPUPlace() ) - del parameter_439, parameter_440, parameter_441, parameter_442 - - # pd_op.swish: (2x192x32x32xf32) <- (2x192x32x32xf32) - swish_39 = paddle._C_ops.swish(batch_norm__300) - # pd_op.conv2d: (2x192x32x32xf32) <- (2x192x32x32xf32, 192x192x3x3xf32) - conv2d_53 = paddle._C_ops.conv2d( - swish_39, parameter_438, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_438 + # pd_op.scale: (2x11x1xf32) <- (2x11x1xf32, 1xf32) + scale_1 = paddle._C_ops.scale(max_0, full_9, float("1e-09"), True) + del full_9, max_0 - # pd_op.batch_norm_: (2x192x32x32xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x32x32xf32, 192xf32, 192xf32, 192xf32, 192xf32) - ( - batch_norm__306, - batch_norm__307, - batch_norm__308, - batch_norm__309, - batch_norm__310, - batch_norm__311, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_53, - parameter_437, - parameter_436, - parameter_435, - parameter_434, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_434, parameter_435, parameter_436, parameter_437 + # pd_op.divide: (2x11x12096xf32) <- (2x11x12096xf32, 2x11x1xf32) + divide_0 = paddle._C_ops.divide(multiply_1, scale_1) + del multiply_1, scale_1 - # pd_op.conv2d: (2x192x32x32xf32) <- (2x192x32x32xf32, 192x192x1x1xf32) - conv2d_54 = paddle._C_ops.conv2d( - swish_39, parameter_433, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_433 + # pd_op.multiply: (2x11x12096xf32) <- (2x11x12096xf32, 2x11x1xf32) + multiply_3 = paddle._C_ops.multiply(divide_0, max_1) + del divide_0, max_1 - # pd_op.batch_norm_: (2x192x32x32xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x32x32xf32, 192xf32, 192xf32, 192xf32, 192xf32) - ( - batch_norm__312, - batch_norm__313, - batch_norm__314, - batch_norm__315, - batch_norm__316, - batch_norm__317, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_54, - parameter_432, - parameter_431, - parameter_430, - parameter_429, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_429, parameter_430, parameter_431, parameter_432 + # pd_op.max: (2x12096xf32) <- (2x11x12096xf32, 1xi64) + max_2 = paddle._C_ops.max(multiply_3, full_int_array_2, False) + del full_int_array_2, multiply_3 - # pd_op.add: (2x192x32x32xf32) <- (2x192x32x32xf32, 2x192x32x32xf32) - add_26 = paddle._C_ops.add(batch_norm__306, batch_norm__312) + # pd_op.unsqueeze: (2x12096x1xf32) <- (2x12096xf32, 1xi64) + unsqueeze_1 = paddle._C_ops.unsqueeze(max_2, full_int_array_6) + del full_int_array_6, max_2 - # pd_op.swish: (2x192x32x32xf32) <- (2x192x32x32xf32) - swish_40 = paddle._C_ops.swish(add_26) - - # pd_op.add: (2x192x32x32xf32) <- (2x192x32x32xf32, 2x192x32x32xf32) - add_27 = paddle._C_ops.add(add_25, swish_40) - - # pd_op.conv2d: (2x192x32x32xf32) <- (2x192x32x32xf32, 192x192x3x3xf32) - conv2d_55 = paddle._C_ops.conv2d( - add_27, parameter_428, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_428 - - # pd_op.batch_norm_: (2x192x32x32xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x32x32xf32, 192xf32, 192xf32, 192xf32, 192xf32) - ( - batch_norm__318, - batch_norm__319, - batch_norm__320, - batch_norm__321, - batch_norm__322, - batch_norm__323, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_55, - parameter_427, - parameter_426, - parameter_425, - parameter_424, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_424, parameter_425, parameter_426, parameter_427 - - # pd_op.swish: (2x192x32x32xf32) <- (2x192x32x32xf32) - swish_41 = paddle._C_ops.swish(batch_norm__318) - - # pd_op.conv2d: (2x192x32x32xf32) <- (2x192x32x32xf32, 192x192x3x3xf32) - conv2d_56 = paddle._C_ops.conv2d( - swish_41, parameter_423, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_423 - - # pd_op.batch_norm_: (2x192x32x32xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x32x32xf32, 192xf32, 192xf32, 192xf32, 192xf32) - ( - batch_norm__324, - batch_norm__325, - batch_norm__326, - batch_norm__327, - batch_norm__328, - batch_norm__329, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_56, - parameter_422, - parameter_421, - parameter_420, - parameter_419, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_419, parameter_420, parameter_421, parameter_422 - - # pd_op.conv2d: (2x192x32x32xf32) <- (2x192x32x32xf32, 192x192x1x1xf32) - conv2d_57 = paddle._C_ops.conv2d( - swish_41, parameter_418, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_418 - - # pd_op.batch_norm_: (2x192x32x32xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x32x32xf32, 192xf32, 192xf32, 192xf32, 192xf32) - ( - batch_norm__330, - batch_norm__331, - batch_norm__332, - batch_norm__333, - batch_norm__334, - batch_norm__335, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_57, - parameter_417, - parameter_416, - parameter_415, - parameter_414, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_414, parameter_415, parameter_416, parameter_417 - - # pd_op.add: (2x192x32x32xf32) <- (2x192x32x32xf32, 2x192x32x32xf32) - add_28 = paddle._C_ops.add(batch_norm__324, batch_norm__330) - - # pd_op.swish: (2x192x32x32xf32) <- (2x192x32x32xf32) - swish_42 = paddle._C_ops.swish(add_28) - - # pd_op.add: (2x192x32x32xf32) <- (2x192x32x32xf32, 2x192x32x32xf32) - add_29 = paddle._C_ops.add(add_27, swish_42) - - # pd_op.conv2d: (2x192x32x32xf32) <- (2x192x32x32xf32, 192x192x3x3xf32) - conv2d_58 = paddle._C_ops.conv2d( - add_29, parameter_413, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_413 - - # pd_op.batch_norm_: (2x192x32x32xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x32x32xf32, 192xf32, 192xf32, 192xf32, 192xf32) - ( - batch_norm__336, - batch_norm__337, - batch_norm__338, - batch_norm__339, - batch_norm__340, - batch_norm__341, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_58, - parameter_412, - parameter_411, - parameter_410, - parameter_409, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_409, parameter_410, parameter_411, parameter_412 - - # pd_op.swish: (2x192x32x32xf32) <- (2x192x32x32xf32) - swish_43 = paddle._C_ops.swish(batch_norm__336) - - # pd_op.conv2d: (2x192x32x32xf32) <- (2x192x32x32xf32, 192x192x3x3xf32) - conv2d_59 = paddle._C_ops.conv2d( - swish_43, parameter_408, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_408 - - # pd_op.batch_norm_: (2x192x32x32xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x32x32xf32, 192xf32, 192xf32, 192xf32, 192xf32) - ( - batch_norm__342, - batch_norm__343, - batch_norm__344, - batch_norm__345, - batch_norm__346, - batch_norm__347, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_59, - parameter_407, - parameter_406, - parameter_405, - parameter_404, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_404, parameter_405, parameter_406, parameter_407 - - # pd_op.conv2d: (2x192x32x32xf32) <- (2x192x32x32xf32, 192x192x1x1xf32) - conv2d_60 = paddle._C_ops.conv2d( - swish_43, parameter_403, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_403 - - # pd_op.batch_norm_: (2x192x32x32xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x32x32xf32, 192xf32, 192xf32, 192xf32, 192xf32) - ( - batch_norm__348, - batch_norm__349, - batch_norm__350, - batch_norm__351, - batch_norm__352, - batch_norm__353, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_60, - parameter_402, - parameter_401, - parameter_400, - parameter_399, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_399, parameter_400, parameter_401, parameter_402 - - # pd_op.add: (2x192x32x32xf32) <- (2x192x32x32xf32, 2x192x32x32xf32) - add_30 = paddle._C_ops.add(batch_norm__342, batch_norm__348) - - # pd_op.swish: (2x192x32x32xf32) <- (2x192x32x32xf32) - swish_44 = paddle._C_ops.swish(add_30) - - # pd_op.add: (2x192x32x32xf32) <- (2x192x32x32xf32, 2x192x32x32xf32) - add_31 = paddle._C_ops.add(add_29, swish_44) - - # builtin.combine: ([2x192x32x32xf32, 2x192x32x32xf32]) <- (2x192x32x32xf32, 2x192x32x32xf32) - combine_2 = [swish_31, add_31] - - # pd_op.concat: (2x384x32x32xf32) <- ([2x192x32x32xf32, 2x192x32x32xf32], 1xi32) - concat_2 = paddle._C_ops.concat(combine_2, full_0) - del combine_2 - - # pd_op.mean: (2x384x1x1xf32) <- (2x384x32x32xf32, 2xi64) - mean_2 = paddle._C_ops.mean(concat_2, full_int_array_0, True) - - # pd_op.conv2d: (2x384x1x1xf32) <- (2x384x1x1xf32, 384x384x1x1xf32) - conv2d_61 = paddle._C_ops.conv2d( - mean_2, parameter_398, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_398 - - # pd_op.reshape: (1x384x1x1xf32) <- (384xf32, 4xi64) - reshape_2 = paddle._C_ops.reshape(parameter_397, full_int_array_1) - del parameter_397 - - # pd_op.add: (2x384x1x1xf32) <- (2x384x1x1xf32, 1x384x1x1xf32) - add_32 = paddle._C_ops.add(conv2d_61, reshape_2) - - # pd_op.hardsigmoid: (2x384x1x1xf32) <- (2x384x1x1xf32) - hardsigmoid_2 = paddle._C_ops.hardsigmoid( - add_32, float("0.166667"), float("0.5") - ) - del add_32 - - # pd_op.multiply: (2x384x32x32xf32) <- (2x384x32x32xf32, 2x384x1x1xf32) - multiply_2 = paddle._C_ops.multiply(concat_2, hardsigmoid_2) - - # pd_op.conv2d: (2x512x32x32xf32) <- (2x384x32x32xf32, 512x384x1x1xf32) - conv2d_62 = paddle._C_ops.conv2d( - multiply_2, parameter_396, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_396 - - # pd_op.batch_norm_: (2x512x32x32xf32, 512xf32, 512xf32, 512xf32, 512xf32, -1xui8) <- (2x512x32x32xf32, 512xf32, 512xf32, 512xf32, 512xf32) - ( - batch_norm__354, - batch_norm__355, - batch_norm__356, - batch_norm__357, - batch_norm__358, - batch_norm__359, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_62, - parameter_395, - parameter_394, - parameter_393, - parameter_392, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_392, parameter_393, parameter_394, parameter_395 - - # pd_op.swish: (2x512x32x32xf32) <- (2x512x32x32xf32) - swish_45 = paddle._C_ops.swish(batch_norm__354) - - # pd_op.conv2d: (2x768x16x16xf32) <- (2x512x32x32xf32, 768x512x3x3xf32) - conv2d_63 = paddle._C_ops.conv2d( - swish_45, parameter_391, [2, 2], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_391 - - # pd_op.batch_norm_: (2x768x16x16xf32, 768xf32, 768xf32, 768xf32, 768xf32, -1xui8) <- (2x768x16x16xf32, 768xf32, 768xf32, 768xf32, 768xf32) - ( - batch_norm__360, - batch_norm__361, - batch_norm__362, - batch_norm__363, - batch_norm__364, - batch_norm__365, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_63, - parameter_390, - parameter_389, - parameter_388, - parameter_387, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_387, parameter_388, parameter_389, parameter_390 - - # pd_op.swish: (2x768x16x16xf32) <- (2x768x16x16xf32) - swish_46 = paddle._C_ops.swish(batch_norm__360) - - # pd_op.conv2d: (2x384x16x16xf32) <- (2x768x16x16xf32, 384x768x1x1xf32) - conv2d_64 = paddle._C_ops.conv2d( - swish_46, parameter_386, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_386 - - # pd_op.batch_norm_: (2x384x16x16xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x16x16xf32, 384xf32, 384xf32, 384xf32, 384xf32) - ( - batch_norm__366, - batch_norm__367, - batch_norm__368, - batch_norm__369, - batch_norm__370, - batch_norm__371, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_64, - parameter_385, - parameter_384, - parameter_383, - parameter_382, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_382, parameter_383, parameter_384, parameter_385 - - # pd_op.swish: (2x384x16x16xf32) <- (2x384x16x16xf32) - swish_47 = paddle._C_ops.swish(batch_norm__366) - - # pd_op.conv2d: (2x384x16x16xf32) <- (2x768x16x16xf32, 384x768x1x1xf32) - conv2d_65 = paddle._C_ops.conv2d( - swish_46, parameter_381, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_381 - - # pd_op.batch_norm_: (2x384x16x16xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x16x16xf32, 384xf32, 384xf32, 384xf32, 384xf32) - ( - batch_norm__372, - batch_norm__373, - batch_norm__374, - batch_norm__375, - batch_norm__376, - batch_norm__377, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_65, - parameter_380, - parameter_379, - parameter_378, - parameter_377, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_377, parameter_378, parameter_379, parameter_380 - - # pd_op.swish: (2x384x16x16xf32) <- (2x384x16x16xf32) - swish_48 = paddle._C_ops.swish(batch_norm__372) - - # pd_op.conv2d: (2x384x16x16xf32) <- (2x384x16x16xf32, 384x384x3x3xf32) - conv2d_66 = paddle._C_ops.conv2d( - swish_48, parameter_376, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_376 - - # pd_op.batch_norm_: (2x384x16x16xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x16x16xf32, 384xf32, 384xf32, 384xf32, 384xf32) - ( - batch_norm__378, - batch_norm__379, - batch_norm__380, - batch_norm__381, - batch_norm__382, - batch_norm__383, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_66, - parameter_375, - parameter_374, - parameter_373, - parameter_372, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_372, parameter_373, parameter_374, parameter_375 - - # pd_op.swish: (2x384x16x16xf32) <- (2x384x16x16xf32) - swish_49 = paddle._C_ops.swish(batch_norm__378) - - # pd_op.conv2d: (2x384x16x16xf32) <- (2x384x16x16xf32, 384x384x3x3xf32) - conv2d_67 = paddle._C_ops.conv2d( - swish_49, parameter_371, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_371 - - # pd_op.batch_norm_: (2x384x16x16xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x16x16xf32, 384xf32, 384xf32, 384xf32, 384xf32) - ( - batch_norm__384, - batch_norm__385, - batch_norm__386, - batch_norm__387, - batch_norm__388, - batch_norm__389, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_67, - parameter_370, - parameter_369, - parameter_368, - parameter_367, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_367, parameter_368, parameter_369, parameter_370 - - # pd_op.conv2d: (2x384x16x16xf32) <- (2x384x16x16xf32, 384x384x1x1xf32) - conv2d_68 = paddle._C_ops.conv2d( - swish_49, parameter_366, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_366 - - # pd_op.batch_norm_: (2x384x16x16xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x16x16xf32, 384xf32, 384xf32, 384xf32, 384xf32) - ( - batch_norm__390, - batch_norm__391, - batch_norm__392, - batch_norm__393, - batch_norm__394, - batch_norm__395, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_68, - parameter_365, - parameter_364, - parameter_363, - parameter_362, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_362, parameter_363, parameter_364, parameter_365 - - # pd_op.add: (2x384x16x16xf32) <- (2x384x16x16xf32, 2x384x16x16xf32) - add_33 = paddle._C_ops.add(batch_norm__384, batch_norm__390) - - # pd_op.swish: (2x384x16x16xf32) <- (2x384x16x16xf32) - swish_50 = paddle._C_ops.swish(add_33) - - # pd_op.add: (2x384x16x16xf32) <- (2x384x16x16xf32, 2x384x16x16xf32) - add_34 = paddle._C_ops.add(swish_48, swish_50) - - # pd_op.conv2d: (2x384x16x16xf32) <- (2x384x16x16xf32, 384x384x3x3xf32) - conv2d_69 = paddle._C_ops.conv2d( - add_34, parameter_361, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_361 - - # pd_op.batch_norm_: (2x384x16x16xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x16x16xf32, 384xf32, 384xf32, 384xf32, 384xf32) - ( - batch_norm__396, - batch_norm__397, - batch_norm__398, - batch_norm__399, - batch_norm__400, - batch_norm__401, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_69, - parameter_360, - parameter_359, - parameter_358, - parameter_357, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_357, parameter_358, parameter_359, parameter_360 - - # pd_op.swish: (2x384x16x16xf32) <- (2x384x16x16xf32) - swish_51 = paddle._C_ops.swish(batch_norm__396) - - # pd_op.conv2d: (2x384x16x16xf32) <- (2x384x16x16xf32, 384x384x3x3xf32) - conv2d_70 = paddle._C_ops.conv2d( - swish_51, parameter_356, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_356 - - # pd_op.batch_norm_: (2x384x16x16xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x16x16xf32, 384xf32, 384xf32, 384xf32, 384xf32) - ( - batch_norm__402, - batch_norm__403, - batch_norm__404, - batch_norm__405, - batch_norm__406, - batch_norm__407, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_70, - parameter_355, - parameter_354, - parameter_353, - parameter_352, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_352, parameter_353, parameter_354, parameter_355 - - # pd_op.conv2d: (2x384x16x16xf32) <- (2x384x16x16xf32, 384x384x1x1xf32) - conv2d_71 = paddle._C_ops.conv2d( - swish_51, parameter_351, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_351 - - # pd_op.batch_norm_: (2x384x16x16xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x16x16xf32, 384xf32, 384xf32, 384xf32, 384xf32) - ( - batch_norm__408, - batch_norm__409, - batch_norm__410, - batch_norm__411, - batch_norm__412, - batch_norm__413, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_71, - parameter_350, - parameter_349, - parameter_348, - parameter_347, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_347, parameter_348, parameter_349, parameter_350 - - # pd_op.add: (2x384x16x16xf32) <- (2x384x16x16xf32, 2x384x16x16xf32) - add_35 = paddle._C_ops.add(batch_norm__402, batch_norm__408) - - # pd_op.swish: (2x384x16x16xf32) <- (2x384x16x16xf32) - swish_52 = paddle._C_ops.swish(add_35) - - # pd_op.add: (2x384x16x16xf32) <- (2x384x16x16xf32, 2x384x16x16xf32) - add_36 = paddle._C_ops.add(add_34, swish_52) - - # pd_op.conv2d: (2x384x16x16xf32) <- (2x384x16x16xf32, 384x384x3x3xf32) - conv2d_72 = paddle._C_ops.conv2d( - add_36, parameter_346, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_346 - - # pd_op.batch_norm_: (2x384x16x16xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x16x16xf32, 384xf32, 384xf32, 384xf32, 384xf32) - ( - batch_norm__414, - batch_norm__415, - batch_norm__416, - batch_norm__417, - batch_norm__418, - batch_norm__419, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_72, - parameter_345, - parameter_344, - parameter_343, - parameter_342, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_342, parameter_343, parameter_344, parameter_345 - - # pd_op.swish: (2x384x16x16xf32) <- (2x384x16x16xf32) - swish_53 = paddle._C_ops.swish(batch_norm__414) - - # pd_op.conv2d: (2x384x16x16xf32) <- (2x384x16x16xf32, 384x384x3x3xf32) - conv2d_73 = paddle._C_ops.conv2d( - swish_53, parameter_341, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_341 - - # pd_op.batch_norm_: (2x384x16x16xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x16x16xf32, 384xf32, 384xf32, 384xf32, 384xf32) - ( - batch_norm__420, - batch_norm__421, - batch_norm__422, - batch_norm__423, - batch_norm__424, - batch_norm__425, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_73, - parameter_340, - parameter_339, - parameter_338, - parameter_337, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_337, parameter_338, parameter_339, parameter_340 - - # pd_op.conv2d: (2x384x16x16xf32) <- (2x384x16x16xf32, 384x384x1x1xf32) - conv2d_74 = paddle._C_ops.conv2d( - swish_53, parameter_336, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_336 - - # pd_op.batch_norm_: (2x384x16x16xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x16x16xf32, 384xf32, 384xf32, 384xf32, 384xf32) - ( - batch_norm__426, - batch_norm__427, - batch_norm__428, - batch_norm__429, - batch_norm__430, - batch_norm__431, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_74, - parameter_335, - parameter_334, - parameter_333, - parameter_332, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_332, parameter_333, parameter_334, parameter_335 - - # pd_op.add: (2x384x16x16xf32) <- (2x384x16x16xf32, 2x384x16x16xf32) - add_37 = paddle._C_ops.add(batch_norm__420, batch_norm__426) - - # pd_op.swish: (2x384x16x16xf32) <- (2x384x16x16xf32) - swish_54 = paddle._C_ops.swish(add_37) - - # pd_op.add: (2x384x16x16xf32) <- (2x384x16x16xf32, 2x384x16x16xf32) - add_38 = paddle._C_ops.add(add_36, swish_54) - - # builtin.combine: ([2x384x16x16xf32, 2x384x16x16xf32]) <- (2x384x16x16xf32, 2x384x16x16xf32) - combine_3 = [swish_47, add_38] - - # pd_op.concat: (2x768x16x16xf32) <- ([2x384x16x16xf32, 2x384x16x16xf32], 1xi32) - concat_3 = paddle._C_ops.concat(combine_3, full_0) - del combine_3 - - # pd_op.mean: (2x768x1x1xf32) <- (2x768x16x16xf32, 2xi64) - mean_3 = paddle._C_ops.mean(concat_3, full_int_array_0, True) - - # pd_op.conv2d: (2x768x1x1xf32) <- (2x768x1x1xf32, 768x768x1x1xf32) - conv2d_75 = paddle._C_ops.conv2d( - mean_3, parameter_331, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_331 - - # pd_op.reshape: (1x768x1x1xf32) <- (768xf32, 4xi64) - reshape_3 = paddle._C_ops.reshape(parameter_330, full_int_array_1) - del full_int_array_1, parameter_330 - - # pd_op.add: (2x768x1x1xf32) <- (2x768x1x1xf32, 1x768x1x1xf32) - add_39 = paddle._C_ops.add(conv2d_75, reshape_3) - - # pd_op.hardsigmoid: (2x768x1x1xf32) <- (2x768x1x1xf32) - hardsigmoid_3 = paddle._C_ops.hardsigmoid( - add_39, float("0.166667"), float("0.5") - ) - del add_39 - - # pd_op.multiply: (2x768x16x16xf32) <- (2x768x16x16xf32, 2x768x1x1xf32) - multiply_3 = paddle._C_ops.multiply(concat_3, hardsigmoid_3) - - # pd_op.conv2d: (2x1024x16x16xf32) <- (2x768x16x16xf32, 1024x768x1x1xf32) - conv2d_76 = paddle._C_ops.conv2d( - multiply_3, parameter_329, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_329 - - # pd_op.batch_norm_: (2x1024x16x16xf32, 1024xf32, 1024xf32, 1024xf32, 1024xf32, -1xui8) <- (2x1024x16x16xf32, 1024xf32, 1024xf32, 1024xf32, 1024xf32) - ( - batch_norm__432, - batch_norm__433, - batch_norm__434, - batch_norm__435, - batch_norm__436, - batch_norm__437, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_76, - parameter_328, - parameter_327, - parameter_326, - parameter_325, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_325, parameter_326, parameter_327, parameter_328 - - # pd_op.swish: (2x1024x16x16xf32) <- (2x1024x16x16xf32) - swish_55 = paddle._C_ops.swish(batch_norm__432) - - # pd_op.conv2d: (2x384x16x16xf32) <- (2x1024x16x16xf32, 384x1024x1x1xf32) - conv2d_77 = paddle._C_ops.conv2d( - swish_55, parameter_324, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_324 - - # pd_op.batch_norm_: (2x384x16x16xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x16x16xf32, 384xf32, 384xf32, 384xf32, 384xf32) - ( - batch_norm__438, - batch_norm__439, - batch_norm__440, - batch_norm__441, - batch_norm__442, - batch_norm__443, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_77, - parameter_323, - parameter_322, - parameter_321, - parameter_320, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_320, parameter_321, parameter_322, parameter_323 - - # pd_op.swish: (2x384x16x16xf32) <- (2x384x16x16xf32) - swish_56 = paddle._C_ops.swish(batch_norm__438) - - # pd_op.conv2d: (2x384x16x16xf32) <- (2x1024x16x16xf32, 384x1024x1x1xf32) - conv2d_78 = paddle._C_ops.conv2d( - swish_55, parameter_319, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_319 - - # pd_op.batch_norm_: (2x384x16x16xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x16x16xf32, 384xf32, 384xf32, 384xf32, 384xf32) - ( - batch_norm__444, - batch_norm__445, - batch_norm__446, - batch_norm__447, - batch_norm__448, - batch_norm__449, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_78, - parameter_318, - parameter_317, - parameter_316, - parameter_315, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_315, parameter_316, parameter_317, parameter_318 - - # pd_op.swish: (2x384x16x16xf32) <- (2x384x16x16xf32) - swish_57 = paddle._C_ops.swish(batch_norm__444) - - # pd_op.conv2d: (2x384x16x16xf32) <- (2x384x16x16xf32, 384x384x3x3xf32) - conv2d_79 = paddle._C_ops.conv2d( - swish_57, parameter_314, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_314 - - # pd_op.batch_norm_: (2x384x16x16xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x16x16xf32, 384xf32, 384xf32, 384xf32, 384xf32) - ( - batch_norm__450, - batch_norm__451, - batch_norm__452, - batch_norm__453, - batch_norm__454, - batch_norm__455, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_79, - parameter_313, - parameter_312, - parameter_311, - parameter_310, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_310, parameter_311, parameter_312, parameter_313 - - # pd_op.swish: (2x384x16x16xf32) <- (2x384x16x16xf32) - swish_58 = paddle._C_ops.swish(batch_norm__450) - - # pd_op.conv2d: (2x384x16x16xf32) <- (2x384x16x16xf32, 384x384x3x3xf32) - conv2d_80 = paddle._C_ops.conv2d( - swish_58, parameter_309, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_309 - - # pd_op.batch_norm_: (2x384x16x16xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x16x16xf32, 384xf32, 384xf32, 384xf32, 384xf32) - ( - batch_norm__456, - batch_norm__457, - batch_norm__458, - batch_norm__459, - batch_norm__460, - batch_norm__461, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_80, - parameter_308, - parameter_307, - parameter_306, - parameter_305, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_305, parameter_306, parameter_307, parameter_308 - - # pd_op.conv2d: (2x384x16x16xf32) <- (2x384x16x16xf32, 384x384x1x1xf32) - conv2d_81 = paddle._C_ops.conv2d( - swish_58, parameter_304, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_304 - - # pd_op.batch_norm_: (2x384x16x16xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x16x16xf32, 384xf32, 384xf32, 384xf32, 384xf32) - ( - batch_norm__462, - batch_norm__463, - batch_norm__464, - batch_norm__465, - batch_norm__466, - batch_norm__467, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_81, - parameter_303, - parameter_302, - parameter_301, - parameter_300, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_300, parameter_301, parameter_302, parameter_303 - - # pd_op.add: (2x384x16x16xf32) <- (2x384x16x16xf32, 2x384x16x16xf32) - add_40 = paddle._C_ops.add(batch_norm__456, batch_norm__462) - - # pd_op.swish: (2x384x16x16xf32) <- (2x384x16x16xf32) - swish_59 = paddle._C_ops.swish(add_40) - - # pd_op.conv2d: (2x384x16x16xf32) <- (2x384x16x16xf32, 384x384x3x3xf32) - conv2d_82 = paddle._C_ops.conv2d( - swish_59, parameter_299, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_299 - - # pd_op.batch_norm_: (2x384x16x16xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x16x16xf32, 384xf32, 384xf32, 384xf32, 384xf32) - ( - batch_norm__468, - batch_norm__469, - batch_norm__470, - batch_norm__471, - batch_norm__472, - batch_norm__473, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_82, - parameter_298, - parameter_297, - parameter_296, - parameter_295, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_295, parameter_296, parameter_297, parameter_298 - - # pd_op.swish: (2x384x16x16xf32) <- (2x384x16x16xf32) - swish_60 = paddle._C_ops.swish(batch_norm__468) - - # pd_op.conv2d: (2x384x16x16xf32) <- (2x384x16x16xf32, 384x384x3x3xf32) - conv2d_83 = paddle._C_ops.conv2d( - swish_60, parameter_294, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_294 - - # pd_op.batch_norm_: (2x384x16x16xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x16x16xf32, 384xf32, 384xf32, 384xf32, 384xf32) - ( - batch_norm__474, - batch_norm__475, - batch_norm__476, - batch_norm__477, - batch_norm__478, - batch_norm__479, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_83, - parameter_293, - parameter_292, - parameter_291, - parameter_290, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_290, parameter_291, parameter_292, parameter_293 - - # pd_op.conv2d: (2x384x16x16xf32) <- (2x384x16x16xf32, 384x384x1x1xf32) - conv2d_84 = paddle._C_ops.conv2d( - swish_60, parameter_289, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_289 - - # pd_op.batch_norm_: (2x384x16x16xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x16x16xf32, 384xf32, 384xf32, 384xf32, 384xf32) - ( - batch_norm__480, - batch_norm__481, - batch_norm__482, - batch_norm__483, - batch_norm__484, - batch_norm__485, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_84, - parameter_288, - parameter_287, - parameter_286, - parameter_285, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_285, parameter_286, parameter_287, parameter_288 - - # pd_op.add: (2x384x16x16xf32) <- (2x384x16x16xf32, 2x384x16x16xf32) - add_41 = paddle._C_ops.add(batch_norm__474, batch_norm__480) - - # pd_op.swish: (2x384x16x16xf32) <- (2x384x16x16xf32) - swish_61 = paddle._C_ops.swish(add_41) - - # pd_op.full_int_array: (2xi64) <- () - full_int_array_2 = [5, 5] - - # pd_op.pool2d: (2x384x16x16xf32) <- (2x384x16x16xf32, 2xi64) - pool2d_0 = paddle._C_ops.pool2d( - swish_61, - full_int_array_2, - [1, 1], - [2, 2], - False, - True, - "NCHW", - "max", - False, - False, - "EXPLICIT", - ) - - # pd_op.full_int_array: (2xi64) <- () - full_int_array_3 = [9, 9] - - # pd_op.pool2d: (2x384x16x16xf32) <- (2x384x16x16xf32, 2xi64) - pool2d_1 = paddle._C_ops.pool2d( - swish_61, - full_int_array_3, - [1, 1], - [4, 4], - False, - True, - "NCHW", - "max", - False, - False, - "EXPLICIT", - ) - - # pd_op.full_int_array: (2xi64) <- () - full_int_array_4 = [13, 13] - - # pd_op.pool2d: (2x384x16x16xf32) <- (2x384x16x16xf32, 2xi64) - pool2d_2 = paddle._C_ops.pool2d( - swish_61, - full_int_array_4, - [1, 1], - [6, 6], - False, - True, - "NCHW", - "max", - False, - False, - "EXPLICIT", - ) - - # builtin.combine: ([2x384x16x16xf32, 2x384x16x16xf32, 2x384x16x16xf32, 2x384x16x16xf32]) <- (2x384x16x16xf32, 2x384x16x16xf32, 2x384x16x16xf32, 2x384x16x16xf32) - combine_4 = [swish_61, pool2d_0, pool2d_1, pool2d_2] - - # pd_op.concat: (2x1536x16x16xf32) <- ([2x384x16x16xf32, 2x384x16x16xf32, 2x384x16x16xf32, 2x384x16x16xf32], 1xi32) - concat_4 = paddle._C_ops.concat(combine_4, full_0) - del combine_4 - - # pd_op.conv2d: (2x384x16x16xf32) <- (2x1536x16x16xf32, 384x1536x1x1xf32) - conv2d_85 = paddle._C_ops.conv2d( - concat_4, parameter_284, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_284 - - # pd_op.batch_norm_: (2x384x16x16xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x16x16xf32, 384xf32, 384xf32, 384xf32, 384xf32) - ( - batch_norm__486, - batch_norm__487, - batch_norm__488, - batch_norm__489, - batch_norm__490, - batch_norm__491, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_85, - parameter_283, - parameter_282, - parameter_281, - parameter_280, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_280, parameter_281, parameter_282, parameter_283 - - # pd_op.swish: (2x384x16x16xf32) <- (2x384x16x16xf32) - swish_62 = paddle._C_ops.swish(batch_norm__486) - - # pd_op.conv2d: (2x384x16x16xf32) <- (2x384x16x16xf32, 384x384x3x3xf32) - conv2d_86 = paddle._C_ops.conv2d( - swish_62, parameter_279, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_279 - - # pd_op.batch_norm_: (2x384x16x16xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x16x16xf32, 384xf32, 384xf32, 384xf32, 384xf32) - ( - batch_norm__492, - batch_norm__493, - batch_norm__494, - batch_norm__495, - batch_norm__496, - batch_norm__497, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_86, - parameter_278, - parameter_277, - parameter_276, - parameter_275, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_275, parameter_276, parameter_277, parameter_278 - - # pd_op.swish: (2x384x16x16xf32) <- (2x384x16x16xf32) - swish_63 = paddle._C_ops.swish(batch_norm__492) - - # pd_op.conv2d: (2x384x16x16xf32) <- (2x384x16x16xf32, 384x384x3x3xf32) - conv2d_87 = paddle._C_ops.conv2d( - swish_63, parameter_274, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_274 - - # pd_op.batch_norm_: (2x384x16x16xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x16x16xf32, 384xf32, 384xf32, 384xf32, 384xf32) - ( - batch_norm__498, - batch_norm__499, - batch_norm__500, - batch_norm__501, - batch_norm__502, - batch_norm__503, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_87, - parameter_273, - parameter_272, - parameter_271, - parameter_270, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_270, parameter_271, parameter_272, parameter_273 - - # pd_op.conv2d: (2x384x16x16xf32) <- (2x384x16x16xf32, 384x384x1x1xf32) - conv2d_88 = paddle._C_ops.conv2d( - swish_63, parameter_269, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_269 - - # pd_op.batch_norm_: (2x384x16x16xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x16x16xf32, 384xf32, 384xf32, 384xf32, 384xf32) - ( - batch_norm__504, - batch_norm__505, - batch_norm__506, - batch_norm__507, - batch_norm__508, - batch_norm__509, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_88, - parameter_268, - parameter_267, - parameter_266, - parameter_265, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_265, parameter_266, parameter_267, parameter_268 - - # pd_op.add: (2x384x16x16xf32) <- (2x384x16x16xf32, 2x384x16x16xf32) - add_42 = paddle._C_ops.add(batch_norm__498, batch_norm__504) - - # pd_op.swish: (2x384x16x16xf32) <- (2x384x16x16xf32) - swish_64 = paddle._C_ops.swish(add_42) - - # builtin.combine: ([2x384x16x16xf32, 2x384x16x16xf32]) <- (2x384x16x16xf32, 2x384x16x16xf32) - combine_5 = [swish_56, swish_64] - - # pd_op.concat: (2x768x16x16xf32) <- ([2x384x16x16xf32, 2x384x16x16xf32], 1xi32) - concat_5 = paddle._C_ops.concat(combine_5, full_0) - del combine_5 - - # pd_op.conv2d: (2x768x16x16xf32) <- (2x768x16x16xf32, 768x768x1x1xf32) - conv2d_89 = paddle._C_ops.conv2d( - concat_5, parameter_264, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_264 - - # pd_op.batch_norm_: (2x768x16x16xf32, 768xf32, 768xf32, 768xf32, 768xf32, -1xui8) <- (2x768x16x16xf32, 768xf32, 768xf32, 768xf32, 768xf32) - ( - batch_norm__510, - batch_norm__511, - batch_norm__512, - batch_norm__513, - batch_norm__514, - batch_norm__515, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_89, - parameter_263, - parameter_262, - parameter_261, - parameter_260, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_260, parameter_261, parameter_262, parameter_263 - - # pd_op.swish: (2x768x16x16xf32) <- (2x768x16x16xf32) - swish_65 = paddle._C_ops.swish(batch_norm__510) - - # pd_op.conv2d: (2x384x16x16xf32) <- (2x768x16x16xf32, 384x768x1x1xf32) - conv2d_90 = paddle._C_ops.conv2d( - swish_65, parameter_259, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_259 - - # pd_op.batch_norm_: (2x384x16x16xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x16x16xf32, 384xf32, 384xf32, 384xf32, 384xf32) - ( - batch_norm__516, - batch_norm__517, - batch_norm__518, - batch_norm__519, - batch_norm__520, - batch_norm__521, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_90, - parameter_258, - parameter_257, - parameter_256, - parameter_255, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_255, parameter_256, parameter_257, parameter_258 - - # pd_op.swish: (2x384x16x16xf32) <- (2x384x16x16xf32) - swish_66 = paddle._C_ops.swish(batch_norm__516) - - # pd_op.nearest_interp: (2x384x32x32xf32) <- (2x384x16x16xf32, None, None, None) - nearest_interp_0 = paddle._C_ops.nearest_interp( - swish_66, - None, - None, - None, - "NCHW", - -1, - -1, - -1, - [float("2"), float("2")], - "nearest", - False, - 0, - ) - - # builtin.combine: ([2x384x32x32xf32, 2x512x32x32xf32]) <- (2x384x32x32xf32, 2x512x32x32xf32) - combine_6 = [nearest_interp_0, swish_45] - - # pd_op.concat: (2x896x32x32xf32) <- ([2x384x32x32xf32, 2x512x32x32xf32], 1xi32) - concat_6 = paddle._C_ops.concat(combine_6, full_0) - del combine_6 - - # pd_op.conv2d: (2x192x32x32xf32) <- (2x896x32x32xf32, 192x896x1x1xf32) - conv2d_91 = paddle._C_ops.conv2d( - concat_6, parameter_254, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_254 - - # pd_op.batch_norm_: (2x192x32x32xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x32x32xf32, 192xf32, 192xf32, 192xf32, 192xf32) - ( - batch_norm__522, - batch_norm__523, - batch_norm__524, - batch_norm__525, - batch_norm__526, - batch_norm__527, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_91, - parameter_253, - parameter_252, - parameter_251, - parameter_250, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_250, parameter_251, parameter_252, parameter_253 - - # pd_op.swish: (2x192x32x32xf32) <- (2x192x32x32xf32) - swish_67 = paddle._C_ops.swish(batch_norm__522) - - # pd_op.conv2d: (2x192x32x32xf32) <- (2x896x32x32xf32, 192x896x1x1xf32) - conv2d_92 = paddle._C_ops.conv2d( - concat_6, parameter_249, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_249 - - # pd_op.batch_norm_: (2x192x32x32xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x32x32xf32, 192xf32, 192xf32, 192xf32, 192xf32) - ( - batch_norm__528, - batch_norm__529, - batch_norm__530, - batch_norm__531, - batch_norm__532, - batch_norm__533, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_92, - parameter_248, - parameter_247, - parameter_246, - parameter_245, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_245, parameter_246, parameter_247, parameter_248 - - # pd_op.swish: (2x192x32x32xf32) <- (2x192x32x32xf32) - swish_68 = paddle._C_ops.swish(batch_norm__528) - - # pd_op.conv2d: (2x192x32x32xf32) <- (2x192x32x32xf32, 192x192x3x3xf32) - conv2d_93 = paddle._C_ops.conv2d( - swish_68, parameter_244, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_244 - - # pd_op.batch_norm_: (2x192x32x32xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x32x32xf32, 192xf32, 192xf32, 192xf32, 192xf32) - ( - batch_norm__534, - batch_norm__535, - batch_norm__536, - batch_norm__537, - batch_norm__538, - batch_norm__539, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_93, - parameter_243, - parameter_242, - parameter_241, - parameter_240, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_240, parameter_241, parameter_242, parameter_243 - - # pd_op.swish: (2x192x32x32xf32) <- (2x192x32x32xf32) - swish_69 = paddle._C_ops.swish(batch_norm__534) - - # pd_op.conv2d: (2x192x32x32xf32) <- (2x192x32x32xf32, 192x192x3x3xf32) - conv2d_94 = paddle._C_ops.conv2d( - swish_69, parameter_239, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_239 - - # pd_op.batch_norm_: (2x192x32x32xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x32x32xf32, 192xf32, 192xf32, 192xf32, 192xf32) - ( - batch_norm__540, - batch_norm__541, - batch_norm__542, - batch_norm__543, - batch_norm__544, - batch_norm__545, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_94, - parameter_238, - parameter_237, - parameter_236, - parameter_235, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_235, parameter_236, parameter_237, parameter_238 - - # pd_op.conv2d: (2x192x32x32xf32) <- (2x192x32x32xf32, 192x192x1x1xf32) - conv2d_95 = paddle._C_ops.conv2d( - swish_69, parameter_234, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_234 - - # pd_op.batch_norm_: (2x192x32x32xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x32x32xf32, 192xf32, 192xf32, 192xf32, 192xf32) - ( - batch_norm__546, - batch_norm__547, - batch_norm__548, - batch_norm__549, - batch_norm__550, - batch_norm__551, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_95, - parameter_233, - parameter_232, - parameter_231, - parameter_230, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_230, parameter_231, parameter_232, parameter_233 - - # pd_op.add: (2x192x32x32xf32) <- (2x192x32x32xf32, 2x192x32x32xf32) - add_43 = paddle._C_ops.add(batch_norm__540, batch_norm__546) - - # pd_op.swish: (2x192x32x32xf32) <- (2x192x32x32xf32) - swish_70 = paddle._C_ops.swish(add_43) - - # pd_op.conv2d: (2x192x32x32xf32) <- (2x192x32x32xf32, 192x192x3x3xf32) - conv2d_96 = paddle._C_ops.conv2d( - swish_70, parameter_229, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_229 - - # pd_op.batch_norm_: (2x192x32x32xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x32x32xf32, 192xf32, 192xf32, 192xf32, 192xf32) - ( - batch_norm__552, - batch_norm__553, - batch_norm__554, - batch_norm__555, - batch_norm__556, - batch_norm__557, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_96, - parameter_228, - parameter_227, - parameter_226, - parameter_225, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_225, parameter_226, parameter_227, parameter_228 - - # pd_op.swish: (2x192x32x32xf32) <- (2x192x32x32xf32) - swish_71 = paddle._C_ops.swish(batch_norm__552) - - # pd_op.conv2d: (2x192x32x32xf32) <- (2x192x32x32xf32, 192x192x3x3xf32) - conv2d_97 = paddle._C_ops.conv2d( - swish_71, parameter_224, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_224 - - # pd_op.batch_norm_: (2x192x32x32xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x32x32xf32, 192xf32, 192xf32, 192xf32, 192xf32) - ( - batch_norm__558, - batch_norm__559, - batch_norm__560, - batch_norm__561, - batch_norm__562, - batch_norm__563, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_97, - parameter_223, - parameter_222, - parameter_221, - parameter_220, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_220, parameter_221, parameter_222, parameter_223 - - # pd_op.conv2d: (2x192x32x32xf32) <- (2x192x32x32xf32, 192x192x1x1xf32) - conv2d_98 = paddle._C_ops.conv2d( - swish_71, parameter_219, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_219 - - # pd_op.batch_norm_: (2x192x32x32xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x32x32xf32, 192xf32, 192xf32, 192xf32, 192xf32) - ( - batch_norm__564, - batch_norm__565, - batch_norm__566, - batch_norm__567, - batch_norm__568, - batch_norm__569, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_98, - parameter_218, - parameter_217, - parameter_216, - parameter_215, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_215, parameter_216, parameter_217, parameter_218 - - # pd_op.add: (2x192x32x32xf32) <- (2x192x32x32xf32, 2x192x32x32xf32) - add_44 = paddle._C_ops.add(batch_norm__558, batch_norm__564) - - # pd_op.swish: (2x192x32x32xf32) <- (2x192x32x32xf32) - swish_72 = paddle._C_ops.swish(add_44) - - # pd_op.conv2d: (2x192x32x32xf32) <- (2x192x32x32xf32, 192x192x3x3xf32) - conv2d_99 = paddle._C_ops.conv2d( - swish_72, parameter_214, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_214 - - # pd_op.batch_norm_: (2x192x32x32xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x32x32xf32, 192xf32, 192xf32, 192xf32, 192xf32) - ( - batch_norm__570, - batch_norm__571, - batch_norm__572, - batch_norm__573, - batch_norm__574, - batch_norm__575, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_99, - parameter_213, - parameter_212, - parameter_211, - parameter_210, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_210, parameter_211, parameter_212, parameter_213 - - # pd_op.swish: (2x192x32x32xf32) <- (2x192x32x32xf32) - swish_73 = paddle._C_ops.swish(batch_norm__570) - - # pd_op.conv2d: (2x192x32x32xf32) <- (2x192x32x32xf32, 192x192x3x3xf32) - conv2d_100 = paddle._C_ops.conv2d( - swish_73, parameter_209, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_209 - - # pd_op.batch_norm_: (2x192x32x32xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x32x32xf32, 192xf32, 192xf32, 192xf32, 192xf32) - ( - batch_norm__576, - batch_norm__577, - batch_norm__578, - batch_norm__579, - batch_norm__580, - batch_norm__581, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_100, - parameter_208, - parameter_207, - parameter_206, - parameter_205, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_205, parameter_206, parameter_207, parameter_208 - - # pd_op.conv2d: (2x192x32x32xf32) <- (2x192x32x32xf32, 192x192x1x1xf32) - conv2d_101 = paddle._C_ops.conv2d( - swish_73, parameter_204, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_204 - - # pd_op.batch_norm_: (2x192x32x32xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x32x32xf32, 192xf32, 192xf32, 192xf32, 192xf32) - ( - batch_norm__582, - batch_norm__583, - batch_norm__584, - batch_norm__585, - batch_norm__586, - batch_norm__587, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_101, - parameter_203, - parameter_202, - parameter_201, - parameter_200, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_200, parameter_201, parameter_202, parameter_203 - - # pd_op.add: (2x192x32x32xf32) <- (2x192x32x32xf32, 2x192x32x32xf32) - add_45 = paddle._C_ops.add(batch_norm__576, batch_norm__582) - - # pd_op.swish: (2x192x32x32xf32) <- (2x192x32x32xf32) - swish_74 = paddle._C_ops.swish(add_45) - - # builtin.combine: ([2x192x32x32xf32, 2x192x32x32xf32]) <- (2x192x32x32xf32, 2x192x32x32xf32) - combine_7 = [swish_67, swish_74] - - # pd_op.concat: (2x384x32x32xf32) <- ([2x192x32x32xf32, 2x192x32x32xf32], 1xi32) - concat_7 = paddle._C_ops.concat(combine_7, full_0) - del combine_7 - - # pd_op.conv2d: (2x384x32x32xf32) <- (2x384x32x32xf32, 384x384x1x1xf32) - conv2d_102 = paddle._C_ops.conv2d( - concat_7, parameter_199, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_199 - - # pd_op.batch_norm_: (2x384x32x32xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x32x32xf32, 384xf32, 384xf32, 384xf32, 384xf32) - ( - batch_norm__588, - batch_norm__589, - batch_norm__590, - batch_norm__591, - batch_norm__592, - batch_norm__593, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_102, - parameter_198, - parameter_197, - parameter_196, - parameter_195, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_195, parameter_196, parameter_197, parameter_198 - - # pd_op.swish: (2x384x32x32xf32) <- (2x384x32x32xf32) - swish_75 = paddle._C_ops.swish(batch_norm__588) - - # pd_op.conv2d: (2x192x32x32xf32) <- (2x384x32x32xf32, 192x384x1x1xf32) - conv2d_103 = paddle._C_ops.conv2d( - swish_75, parameter_194, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_194 - - # pd_op.batch_norm_: (2x192x32x32xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x32x32xf32, 192xf32, 192xf32, 192xf32, 192xf32) - ( - batch_norm__594, - batch_norm__595, - batch_norm__596, - batch_norm__597, - batch_norm__598, - batch_norm__599, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_103, - parameter_193, - parameter_192, - parameter_191, - parameter_190, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_190, parameter_191, parameter_192, parameter_193 - - # pd_op.swish: (2x192x32x32xf32) <- (2x192x32x32xf32) - swish_76 = paddle._C_ops.swish(batch_norm__594) - - # pd_op.nearest_interp: (2x192x64x64xf32) <- (2x192x32x32xf32, None, None, None) - nearest_interp_1 = paddle._C_ops.nearest_interp( - swish_76, - None, - None, - None, - "NCHW", - -1, - -1, - -1, - [float("2"), float("2")], - "nearest", - False, - 0, - ) - - # builtin.combine: ([2x192x64x64xf32, 2x256x64x64xf32]) <- (2x192x64x64xf32, 2x256x64x64xf32) - combine_8 = [nearest_interp_1, swish_29] - - # pd_op.concat: (2x448x64x64xf32) <- ([2x192x64x64xf32, 2x256x64x64xf32], 1xi32) - concat_8 = paddle._C_ops.concat(combine_8, full_0) - del combine_8 - - # pd_op.conv2d: (2x96x64x64xf32) <- (2x448x64x64xf32, 96x448x1x1xf32) - conv2d_104 = paddle._C_ops.conv2d( - concat_8, parameter_189, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_189 - - # pd_op.batch_norm_: (2x96x64x64xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x64x64xf32, 96xf32, 96xf32, 96xf32, 96xf32) - ( - batch_norm__600, - batch_norm__601, - batch_norm__602, - batch_norm__603, - batch_norm__604, - batch_norm__605, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_104, - parameter_188, - parameter_187, - parameter_186, - parameter_185, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_185, parameter_186, parameter_187, parameter_188 - - # pd_op.swish: (2x96x64x64xf32) <- (2x96x64x64xf32) - swish_77 = paddle._C_ops.swish(batch_norm__600) - - # pd_op.conv2d: (2x96x64x64xf32) <- (2x448x64x64xf32, 96x448x1x1xf32) - conv2d_105 = paddle._C_ops.conv2d( - concat_8, parameter_184, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_184 - - # pd_op.batch_norm_: (2x96x64x64xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x64x64xf32, 96xf32, 96xf32, 96xf32, 96xf32) - ( - batch_norm__606, - batch_norm__607, - batch_norm__608, - batch_norm__609, - batch_norm__610, - batch_norm__611, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_105, - parameter_183, - parameter_182, - parameter_181, - parameter_180, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_180, parameter_181, parameter_182, parameter_183 - - # pd_op.swish: (2x96x64x64xf32) <- (2x96x64x64xf32) - swish_78 = paddle._C_ops.swish(batch_norm__606) - - # pd_op.conv2d: (2x96x64x64xf32) <- (2x96x64x64xf32, 96x96x3x3xf32) - conv2d_106 = paddle._C_ops.conv2d( - swish_78, parameter_179, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_179 - - # pd_op.batch_norm_: (2x96x64x64xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x64x64xf32, 96xf32, 96xf32, 96xf32, 96xf32) - ( - batch_norm__612, - batch_norm__613, - batch_norm__614, - batch_norm__615, - batch_norm__616, - batch_norm__617, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_106, - parameter_178, - parameter_177, - parameter_176, - parameter_175, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_175, parameter_176, parameter_177, parameter_178 - - # pd_op.swish: (2x96x64x64xf32) <- (2x96x64x64xf32) - swish_79 = paddle._C_ops.swish(batch_norm__612) - - # pd_op.conv2d: (2x96x64x64xf32) <- (2x96x64x64xf32, 96x96x3x3xf32) - conv2d_107 = paddle._C_ops.conv2d( - swish_79, parameter_174, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_174 - - # pd_op.batch_norm_: (2x96x64x64xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x64x64xf32, 96xf32, 96xf32, 96xf32, 96xf32) - ( - batch_norm__618, - batch_norm__619, - batch_norm__620, - batch_norm__621, - batch_norm__622, - batch_norm__623, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_107, - parameter_173, - parameter_172, - parameter_171, - parameter_170, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_170, parameter_171, parameter_172, parameter_173 - - # pd_op.conv2d: (2x96x64x64xf32) <- (2x96x64x64xf32, 96x96x1x1xf32) - conv2d_108 = paddle._C_ops.conv2d( - swish_79, parameter_169, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_169 - - # pd_op.batch_norm_: (2x96x64x64xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x64x64xf32, 96xf32, 96xf32, 96xf32, 96xf32) - ( - batch_norm__624, - batch_norm__625, - batch_norm__626, - batch_norm__627, - batch_norm__628, - batch_norm__629, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_108, - parameter_168, - parameter_167, - parameter_166, - parameter_165, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_165, parameter_166, parameter_167, parameter_168 - - # pd_op.add: (2x96x64x64xf32) <- (2x96x64x64xf32, 2x96x64x64xf32) - add_46 = paddle._C_ops.add(batch_norm__618, batch_norm__624) - - # pd_op.swish: (2x96x64x64xf32) <- (2x96x64x64xf32) - swish_80 = paddle._C_ops.swish(add_46) - - # pd_op.conv2d: (2x96x64x64xf32) <- (2x96x64x64xf32, 96x96x3x3xf32) - conv2d_109 = paddle._C_ops.conv2d( - swish_80, parameter_164, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_164 - - # pd_op.batch_norm_: (2x96x64x64xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x64x64xf32, 96xf32, 96xf32, 96xf32, 96xf32) - ( - batch_norm__630, - batch_norm__631, - batch_norm__632, - batch_norm__633, - batch_norm__634, - batch_norm__635, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_109, - parameter_163, - parameter_162, - parameter_161, - parameter_160, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_160, parameter_161, parameter_162, parameter_163 - - # pd_op.swish: (2x96x64x64xf32) <- (2x96x64x64xf32) - swish_81 = paddle._C_ops.swish(batch_norm__630) - - # pd_op.conv2d: (2x96x64x64xf32) <- (2x96x64x64xf32, 96x96x3x3xf32) - conv2d_110 = paddle._C_ops.conv2d( - swish_81, parameter_159, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_159 - - # pd_op.batch_norm_: (2x96x64x64xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x64x64xf32, 96xf32, 96xf32, 96xf32, 96xf32) - ( - batch_norm__636, - batch_norm__637, - batch_norm__638, - batch_norm__639, - batch_norm__640, - batch_norm__641, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_110, - parameter_158, - parameter_157, - parameter_156, - parameter_155, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_155, parameter_156, parameter_157, parameter_158 - - # pd_op.conv2d: (2x96x64x64xf32) <- (2x96x64x64xf32, 96x96x1x1xf32) - conv2d_111 = paddle._C_ops.conv2d( - swish_81, parameter_154, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_154 - - # pd_op.batch_norm_: (2x96x64x64xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x64x64xf32, 96xf32, 96xf32, 96xf32, 96xf32) - ( - batch_norm__642, - batch_norm__643, - batch_norm__644, - batch_norm__645, - batch_norm__646, - batch_norm__647, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_111, - parameter_153, - parameter_152, - parameter_151, - parameter_150, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_150, parameter_151, parameter_152, parameter_153 - - # pd_op.add: (2x96x64x64xf32) <- (2x96x64x64xf32, 2x96x64x64xf32) - add_47 = paddle._C_ops.add(batch_norm__636, batch_norm__642) - - # pd_op.swish: (2x96x64x64xf32) <- (2x96x64x64xf32) - swish_82 = paddle._C_ops.swish(add_47) - - # pd_op.conv2d: (2x96x64x64xf32) <- (2x96x64x64xf32, 96x96x3x3xf32) - conv2d_112 = paddle._C_ops.conv2d( - swish_82, parameter_149, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_149 - - # pd_op.batch_norm_: (2x96x64x64xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x64x64xf32, 96xf32, 96xf32, 96xf32, 96xf32) - ( - batch_norm__648, - batch_norm__649, - batch_norm__650, - batch_norm__651, - batch_norm__652, - batch_norm__653, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_112, - parameter_148, - parameter_147, - parameter_146, - parameter_145, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_145, parameter_146, parameter_147, parameter_148 - - # pd_op.swish: (2x96x64x64xf32) <- (2x96x64x64xf32) - swish_83 = paddle._C_ops.swish(batch_norm__648) - - # pd_op.conv2d: (2x96x64x64xf32) <- (2x96x64x64xf32, 96x96x3x3xf32) - conv2d_113 = paddle._C_ops.conv2d( - swish_83, parameter_144, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_144 - - # pd_op.batch_norm_: (2x96x64x64xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x64x64xf32, 96xf32, 96xf32, 96xf32, 96xf32) - ( - batch_norm__654, - batch_norm__655, - batch_norm__656, - batch_norm__657, - batch_norm__658, - batch_norm__659, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_113, - parameter_143, - parameter_142, - parameter_141, - parameter_140, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_140, parameter_141, parameter_142, parameter_143 - - # pd_op.conv2d: (2x96x64x64xf32) <- (2x96x64x64xf32, 96x96x1x1xf32) - conv2d_114 = paddle._C_ops.conv2d( - swish_83, parameter_139, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_139 - - # pd_op.batch_norm_: (2x96x64x64xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x64x64xf32, 96xf32, 96xf32, 96xf32, 96xf32) - ( - batch_norm__660, - batch_norm__661, - batch_norm__662, - batch_norm__663, - batch_norm__664, - batch_norm__665, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_114, - parameter_138, - parameter_137, - parameter_136, - parameter_135, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_135, parameter_136, parameter_137, parameter_138 - - # pd_op.add: (2x96x64x64xf32) <- (2x96x64x64xf32, 2x96x64x64xf32) - add_48 = paddle._C_ops.add(batch_norm__654, batch_norm__660) - - # pd_op.swish: (2x96x64x64xf32) <- (2x96x64x64xf32) - swish_84 = paddle._C_ops.swish(add_48) - - # builtin.combine: ([2x96x64x64xf32, 2x96x64x64xf32]) <- (2x96x64x64xf32, 2x96x64x64xf32) - combine_9 = [swish_77, swish_84] - - # pd_op.concat: (2x192x64x64xf32) <- ([2x96x64x64xf32, 2x96x64x64xf32], 1xi32) - concat_9 = paddle._C_ops.concat(combine_9, full_0) - del combine_9 - - # pd_op.conv2d: (2x192x64x64xf32) <- (2x192x64x64xf32, 192x192x1x1xf32) - conv2d_115 = paddle._C_ops.conv2d( - concat_9, parameter_134, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_134 - - # pd_op.batch_norm_: (2x192x64x64xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x64x64xf32, 192xf32, 192xf32, 192xf32, 192xf32) - ( - batch_norm__666, - batch_norm__667, - batch_norm__668, - batch_norm__669, - batch_norm__670, - batch_norm__671, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_115, - parameter_133, - parameter_132, - parameter_131, - parameter_130, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_130, parameter_131, parameter_132, parameter_133 - - # pd_op.swish: (2x192x64x64xf32) <- (2x192x64x64xf32) - swish_85 = paddle._C_ops.swish(batch_norm__666) - - # pd_op.conv2d: (2x192x32x32xf32) <- (2x192x64x64xf32, 192x192x3x3xf32) - conv2d_116 = paddle._C_ops.conv2d( - swish_85, parameter_129, [2, 2], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_129 - - # pd_op.batch_norm_: (2x192x32x32xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x32x32xf32, 192xf32, 192xf32, 192xf32, 192xf32) - ( - batch_norm__672, - batch_norm__673, - batch_norm__674, - batch_norm__675, - batch_norm__676, - batch_norm__677, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_116, - parameter_128, - parameter_127, - parameter_126, - parameter_125, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_125, parameter_126, parameter_127, parameter_128 - - # pd_op.swish: (2x192x32x32xf32) <- (2x192x32x32xf32) - swish_86 = paddle._C_ops.swish(batch_norm__672) - - # builtin.combine: ([2x192x32x32xf32, 2x384x32x32xf32]) <- (2x192x32x32xf32, 2x384x32x32xf32) - combine_10 = [swish_86, swish_75] - - # pd_op.concat: (2x576x32x32xf32) <- ([2x192x32x32xf32, 2x384x32x32xf32], 1xi32) - concat_10 = paddle._C_ops.concat(combine_10, full_0) - del combine_10 - - # pd_op.conv2d: (2x192x32x32xf32) <- (2x576x32x32xf32, 192x576x1x1xf32) - conv2d_117 = paddle._C_ops.conv2d( - concat_10, parameter_124, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_124 - - # pd_op.batch_norm_: (2x192x32x32xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x32x32xf32, 192xf32, 192xf32, 192xf32, 192xf32) - ( - batch_norm__678, - batch_norm__679, - batch_norm__680, - batch_norm__681, - batch_norm__682, - batch_norm__683, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_117, - parameter_123, - parameter_122, - parameter_121, - parameter_120, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_120, parameter_121, parameter_122, parameter_123 - - # pd_op.swish: (2x192x32x32xf32) <- (2x192x32x32xf32) - swish_87 = paddle._C_ops.swish(batch_norm__678) - - # pd_op.conv2d: (2x192x32x32xf32) <- (2x576x32x32xf32, 192x576x1x1xf32) - conv2d_118 = paddle._C_ops.conv2d( - concat_10, parameter_119, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_119 - - # pd_op.batch_norm_: (2x192x32x32xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x32x32xf32, 192xf32, 192xf32, 192xf32, 192xf32) - ( - batch_norm__684, - batch_norm__685, - batch_norm__686, - batch_norm__687, - batch_norm__688, - batch_norm__689, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_118, - parameter_118, - parameter_117, - parameter_116, - parameter_115, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_115, parameter_116, parameter_117, parameter_118 - - # pd_op.swish: (2x192x32x32xf32) <- (2x192x32x32xf32) - swish_88 = paddle._C_ops.swish(batch_norm__684) - - # pd_op.conv2d: (2x192x32x32xf32) <- (2x192x32x32xf32, 192x192x3x3xf32) - conv2d_119 = paddle._C_ops.conv2d( - swish_88, parameter_114, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_114 - - # pd_op.batch_norm_: (2x192x32x32xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x32x32xf32, 192xf32, 192xf32, 192xf32, 192xf32) - ( - batch_norm__690, - batch_norm__691, - batch_norm__692, - batch_norm__693, - batch_norm__694, - batch_norm__695, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_119, - parameter_113, - parameter_112, - parameter_111, - parameter_110, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_110, parameter_111, parameter_112, parameter_113 - - # pd_op.swish: (2x192x32x32xf32) <- (2x192x32x32xf32) - swish_89 = paddle._C_ops.swish(batch_norm__690) - - # pd_op.conv2d: (2x192x32x32xf32) <- (2x192x32x32xf32, 192x192x3x3xf32) - conv2d_120 = paddle._C_ops.conv2d( - swish_89, parameter_109, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_109 - - # pd_op.batch_norm_: (2x192x32x32xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x32x32xf32, 192xf32, 192xf32, 192xf32, 192xf32) - ( - batch_norm__696, - batch_norm__697, - batch_norm__698, - batch_norm__699, - batch_norm__700, - batch_norm__701, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_120, - parameter_108, - parameter_107, - parameter_106, - parameter_105, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_105, parameter_106, parameter_107, parameter_108 - - # pd_op.conv2d: (2x192x32x32xf32) <- (2x192x32x32xf32, 192x192x1x1xf32) - conv2d_121 = paddle._C_ops.conv2d( - swish_89, parameter_104, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_104 - - # pd_op.batch_norm_: (2x192x32x32xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x32x32xf32, 192xf32, 192xf32, 192xf32, 192xf32) - ( - batch_norm__702, - batch_norm__703, - batch_norm__704, - batch_norm__705, - batch_norm__706, - batch_norm__707, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_121, - parameter_103, - parameter_102, - parameter_101, - parameter_100, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_100, parameter_101, parameter_102, parameter_103 - - # pd_op.add: (2x192x32x32xf32) <- (2x192x32x32xf32, 2x192x32x32xf32) - add_49 = paddle._C_ops.add(batch_norm__696, batch_norm__702) - - # pd_op.swish: (2x192x32x32xf32) <- (2x192x32x32xf32) - swish_90 = paddle._C_ops.swish(add_49) - - # pd_op.conv2d: (2x192x32x32xf32) <- (2x192x32x32xf32, 192x192x3x3xf32) - conv2d_122 = paddle._C_ops.conv2d( - swish_90, parameter_99, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_99 - - # pd_op.batch_norm_: (2x192x32x32xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x32x32xf32, 192xf32, 192xf32, 192xf32, 192xf32) - ( - batch_norm__708, - batch_norm__709, - batch_norm__710, - batch_norm__711, - batch_norm__712, - batch_norm__713, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_122, - parameter_98, - parameter_97, - parameter_96, - parameter_95, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_95, parameter_96, parameter_97, parameter_98 - - # pd_op.swish: (2x192x32x32xf32) <- (2x192x32x32xf32) - swish_91 = paddle._C_ops.swish(batch_norm__708) - - # pd_op.conv2d: (2x192x32x32xf32) <- (2x192x32x32xf32, 192x192x3x3xf32) - conv2d_123 = paddle._C_ops.conv2d( - swish_91, parameter_94, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_94 - - # pd_op.batch_norm_: (2x192x32x32xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x32x32xf32, 192xf32, 192xf32, 192xf32, 192xf32) - ( - batch_norm__714, - batch_norm__715, - batch_norm__716, - batch_norm__717, - batch_norm__718, - batch_norm__719, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_123, - parameter_93, - parameter_92, - parameter_91, - parameter_90, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_90, parameter_91, parameter_92, parameter_93 - - # pd_op.conv2d: (2x192x32x32xf32) <- (2x192x32x32xf32, 192x192x1x1xf32) - conv2d_124 = paddle._C_ops.conv2d( - swish_91, parameter_89, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_89 - - # pd_op.batch_norm_: (2x192x32x32xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x32x32xf32, 192xf32, 192xf32, 192xf32, 192xf32) - ( - batch_norm__720, - batch_norm__721, - batch_norm__722, - batch_norm__723, - batch_norm__724, - batch_norm__725, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_124, - parameter_88, - parameter_87, - parameter_86, - parameter_85, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_85, parameter_86, parameter_87, parameter_88 - - # pd_op.add: (2x192x32x32xf32) <- (2x192x32x32xf32, 2x192x32x32xf32) - add_50 = paddle._C_ops.add(batch_norm__714, batch_norm__720) - - # pd_op.swish: (2x192x32x32xf32) <- (2x192x32x32xf32) - swish_92 = paddle._C_ops.swish(add_50) - - # pd_op.conv2d: (2x192x32x32xf32) <- (2x192x32x32xf32, 192x192x3x3xf32) - conv2d_125 = paddle._C_ops.conv2d( - swish_92, parameter_84, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_84 - - # pd_op.batch_norm_: (2x192x32x32xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x32x32xf32, 192xf32, 192xf32, 192xf32, 192xf32) - ( - batch_norm__726, - batch_norm__727, - batch_norm__728, - batch_norm__729, - batch_norm__730, - batch_norm__731, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_125, - parameter_83, - parameter_82, - parameter_81, - parameter_80, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_80, parameter_81, parameter_82, parameter_83 - - # pd_op.swish: (2x192x32x32xf32) <- (2x192x32x32xf32) - swish_93 = paddle._C_ops.swish(batch_norm__726) - - # pd_op.conv2d: (2x192x32x32xf32) <- (2x192x32x32xf32, 192x192x3x3xf32) - conv2d_126 = paddle._C_ops.conv2d( - swish_93, parameter_79, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_79 - - # pd_op.batch_norm_: (2x192x32x32xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x32x32xf32, 192xf32, 192xf32, 192xf32, 192xf32) - ( - batch_norm__732, - batch_norm__733, - batch_norm__734, - batch_norm__735, - batch_norm__736, - batch_norm__737, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_126, - parameter_78, - parameter_77, - parameter_76, - parameter_75, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_75, parameter_76, parameter_77, parameter_78 - - # pd_op.conv2d: (2x192x32x32xf32) <- (2x192x32x32xf32, 192x192x1x1xf32) - conv2d_127 = paddle._C_ops.conv2d( - swish_93, parameter_74, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_74 - - # pd_op.batch_norm_: (2x192x32x32xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x32x32xf32, 192xf32, 192xf32, 192xf32, 192xf32) - ( - batch_norm__738, - batch_norm__739, - batch_norm__740, - batch_norm__741, - batch_norm__742, - batch_norm__743, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_127, - parameter_73, - parameter_72, - parameter_71, - parameter_70, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_70, parameter_71, parameter_72, parameter_73 - - # pd_op.add: (2x192x32x32xf32) <- (2x192x32x32xf32, 2x192x32x32xf32) - add_51 = paddle._C_ops.add(batch_norm__732, batch_norm__738) - - # pd_op.swish: (2x192x32x32xf32) <- (2x192x32x32xf32) - swish_94 = paddle._C_ops.swish(add_51) - - # builtin.combine: ([2x192x32x32xf32, 2x192x32x32xf32]) <- (2x192x32x32xf32, 2x192x32x32xf32) - combine_11 = [swish_87, swish_94] - - # pd_op.concat: (2x384x32x32xf32) <- ([2x192x32x32xf32, 2x192x32x32xf32], 1xi32) - concat_11 = paddle._C_ops.concat(combine_11, full_0) - del combine_11 - - # pd_op.conv2d: (2x384x32x32xf32) <- (2x384x32x32xf32, 384x384x1x1xf32) - conv2d_128 = paddle._C_ops.conv2d( - concat_11, parameter_69, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_69 - - # pd_op.batch_norm_: (2x384x32x32xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x32x32xf32, 384xf32, 384xf32, 384xf32, 384xf32) - ( - batch_norm__744, - batch_norm__745, - batch_norm__746, - batch_norm__747, - batch_norm__748, - batch_norm__749, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_128, - parameter_68, - parameter_67, - parameter_66, - parameter_65, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_65, parameter_66, parameter_67, parameter_68 - - # pd_op.swish: (2x384x32x32xf32) <- (2x384x32x32xf32) - swish_95 = paddle._C_ops.swish(batch_norm__744) - - # pd_op.conv2d: (2x384x16x16xf32) <- (2x384x32x32xf32, 384x384x3x3xf32) - conv2d_129 = paddle._C_ops.conv2d( - swish_95, parameter_64, [2, 2], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_64 - - # pd_op.batch_norm_: (2x384x16x16xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x16x16xf32, 384xf32, 384xf32, 384xf32, 384xf32) - ( - batch_norm__750, - batch_norm__751, - batch_norm__752, - batch_norm__753, - batch_norm__754, - batch_norm__755, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_129, - parameter_63, - parameter_62, - parameter_61, - parameter_60, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_60, parameter_61, parameter_62, parameter_63 - - # pd_op.swish: (2x384x16x16xf32) <- (2x384x16x16xf32) - swish_96 = paddle._C_ops.swish(batch_norm__750) - - # builtin.combine: ([2x384x16x16xf32, 2x768x16x16xf32]) <- (2x384x16x16xf32, 2x768x16x16xf32) - combine_12 = [swish_96, swish_65] - - # pd_op.concat: (2x1152x16x16xf32) <- ([2x384x16x16xf32, 2x768x16x16xf32], 1xi32) - concat_12 = paddle._C_ops.concat(combine_12, full_0) - del combine_12 - - # pd_op.conv2d: (2x384x16x16xf32) <- (2x1152x16x16xf32, 384x1152x1x1xf32) - conv2d_130 = paddle._C_ops.conv2d( - concat_12, parameter_59, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_59 - - # pd_op.batch_norm_: (2x384x16x16xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x16x16xf32, 384xf32, 384xf32, 384xf32, 384xf32) - ( - batch_norm__756, - batch_norm__757, - batch_norm__758, - batch_norm__759, - batch_norm__760, - batch_norm__761, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_130, - parameter_58, - parameter_57, - parameter_56, - parameter_55, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_55, parameter_56, parameter_57, parameter_58 - - # pd_op.swish: (2x384x16x16xf32) <- (2x384x16x16xf32) - swish_97 = paddle._C_ops.swish(batch_norm__756) - - # pd_op.conv2d: (2x384x16x16xf32) <- (2x1152x16x16xf32, 384x1152x1x1xf32) - conv2d_131 = paddle._C_ops.conv2d( - concat_12, parameter_54, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_54 - - # pd_op.batch_norm_: (2x384x16x16xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x16x16xf32, 384xf32, 384xf32, 384xf32, 384xf32) - ( - batch_norm__762, - batch_norm__763, - batch_norm__764, - batch_norm__765, - batch_norm__766, - batch_norm__767, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_131, - parameter_53, - parameter_52, - parameter_51, - parameter_50, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_50, parameter_51, parameter_52, parameter_53 - - # pd_op.swish: (2x384x16x16xf32) <- (2x384x16x16xf32) - swish_98 = paddle._C_ops.swish(batch_norm__762) - - # pd_op.conv2d: (2x384x16x16xf32) <- (2x384x16x16xf32, 384x384x3x3xf32) - conv2d_132 = paddle._C_ops.conv2d( - swish_98, parameter_49, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_49 - - # pd_op.batch_norm_: (2x384x16x16xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x16x16xf32, 384xf32, 384xf32, 384xf32, 384xf32) - ( - batch_norm__768, - batch_norm__769, - batch_norm__770, - batch_norm__771, - batch_norm__772, - batch_norm__773, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_132, - parameter_48, - parameter_47, - parameter_46, - parameter_45, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_45, parameter_46, parameter_47, parameter_48 - - # pd_op.swish: (2x384x16x16xf32) <- (2x384x16x16xf32) - swish_99 = paddle._C_ops.swish(batch_norm__768) - - # pd_op.conv2d: (2x384x16x16xf32) <- (2x384x16x16xf32, 384x384x3x3xf32) - conv2d_133 = paddle._C_ops.conv2d( - swish_99, parameter_44, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_44 - - # pd_op.batch_norm_: (2x384x16x16xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x16x16xf32, 384xf32, 384xf32, 384xf32, 384xf32) - ( - batch_norm__774, - batch_norm__775, - batch_norm__776, - batch_norm__777, - batch_norm__778, - batch_norm__779, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_133, - parameter_43, - parameter_42, - parameter_41, - parameter_40, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_40, parameter_41, parameter_42, parameter_43 - - # pd_op.conv2d: (2x384x16x16xf32) <- (2x384x16x16xf32, 384x384x1x1xf32) - conv2d_134 = paddle._C_ops.conv2d( - swish_99, parameter_39, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_39 - - # pd_op.batch_norm_: (2x384x16x16xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x16x16xf32, 384xf32, 384xf32, 384xf32, 384xf32) - ( - batch_norm__780, - batch_norm__781, - batch_norm__782, - batch_norm__783, - batch_norm__784, - batch_norm__785, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_134, - parameter_38, - parameter_37, - parameter_36, - parameter_35, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_35, parameter_36, parameter_37, parameter_38 - - # pd_op.add: (2x384x16x16xf32) <- (2x384x16x16xf32, 2x384x16x16xf32) - add_52 = paddle._C_ops.add(batch_norm__774, batch_norm__780) - - # pd_op.swish: (2x384x16x16xf32) <- (2x384x16x16xf32) - swish_100 = paddle._C_ops.swish(add_52) - - # pd_op.conv2d: (2x384x16x16xf32) <- (2x384x16x16xf32, 384x384x3x3xf32) - conv2d_135 = paddle._C_ops.conv2d( - swish_100, parameter_34, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_34 - - # pd_op.batch_norm_: (2x384x16x16xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x16x16xf32, 384xf32, 384xf32, 384xf32, 384xf32) - ( - batch_norm__786, - batch_norm__787, - batch_norm__788, - batch_norm__789, - batch_norm__790, - batch_norm__791, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_135, - parameter_33, - parameter_32, - parameter_31, - parameter_30, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_30, parameter_31, parameter_32, parameter_33 - - # pd_op.swish: (2x384x16x16xf32) <- (2x384x16x16xf32) - swish_101 = paddle._C_ops.swish(batch_norm__786) - - # pd_op.conv2d: (2x384x16x16xf32) <- (2x384x16x16xf32, 384x384x3x3xf32) - conv2d_136 = paddle._C_ops.conv2d( - swish_101, parameter_29, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_29 - - # pd_op.batch_norm_: (2x384x16x16xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x16x16xf32, 384xf32, 384xf32, 384xf32, 384xf32) - ( - batch_norm__792, - batch_norm__793, - batch_norm__794, - batch_norm__795, - batch_norm__796, - batch_norm__797, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_136, - parameter_28, - parameter_27, - parameter_26, - parameter_25, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_25, parameter_26, parameter_27, parameter_28 - - # pd_op.conv2d: (2x384x16x16xf32) <- (2x384x16x16xf32, 384x384x1x1xf32) - conv2d_137 = paddle._C_ops.conv2d( - swish_101, parameter_24, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_24 - - # pd_op.batch_norm_: (2x384x16x16xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x16x16xf32, 384xf32, 384xf32, 384xf32, 384xf32) - ( - batch_norm__798, - batch_norm__799, - batch_norm__800, - batch_norm__801, - batch_norm__802, - batch_norm__803, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_137, - parameter_23, - parameter_22, - parameter_21, - parameter_20, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_20, parameter_21, parameter_22, parameter_23 - - # pd_op.add: (2x384x16x16xf32) <- (2x384x16x16xf32, 2x384x16x16xf32) - add_53 = paddle._C_ops.add(batch_norm__792, batch_norm__798) - - # pd_op.swish: (2x384x16x16xf32) <- (2x384x16x16xf32) - swish_102 = paddle._C_ops.swish(add_53) - - # pd_op.conv2d: (2x384x16x16xf32) <- (2x384x16x16xf32, 384x384x3x3xf32) - conv2d_138 = paddle._C_ops.conv2d( - swish_102, parameter_19, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_19 - - # pd_op.batch_norm_: (2x384x16x16xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x16x16xf32, 384xf32, 384xf32, 384xf32, 384xf32) - ( - batch_norm__804, - batch_norm__805, - batch_norm__806, - batch_norm__807, - batch_norm__808, - batch_norm__809, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_138, - parameter_18, - parameter_17, - parameter_16, - parameter_15, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_15, parameter_16, parameter_17, parameter_18 - - # pd_op.swish: (2x384x16x16xf32) <- (2x384x16x16xf32) - swish_103 = paddle._C_ops.swish(batch_norm__804) - - # pd_op.conv2d: (2x384x16x16xf32) <- (2x384x16x16xf32, 384x384x3x3xf32) - conv2d_139 = paddle._C_ops.conv2d( - swish_103, parameter_14, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_14 - - # pd_op.batch_norm_: (2x384x16x16xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x16x16xf32, 384xf32, 384xf32, 384xf32, 384xf32) - ( - batch_norm__810, - batch_norm__811, - batch_norm__812, - batch_norm__813, - batch_norm__814, - batch_norm__815, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_139, - parameter_13, - parameter_12, - parameter_11, - parameter_10, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_10, parameter_11, parameter_12, parameter_13 - - # pd_op.conv2d: (2x384x16x16xf32) <- (2x384x16x16xf32, 384x384x1x1xf32) - conv2d_140 = paddle._C_ops.conv2d( - swish_103, parameter_9, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_9 - - # pd_op.batch_norm_: (2x384x16x16xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x16x16xf32, 384xf32, 384xf32, 384xf32, 384xf32) - ( - batch_norm__816, - batch_norm__817, - batch_norm__818, - batch_norm__819, - batch_norm__820, - batch_norm__821, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_140, - parameter_8, - parameter_7, - parameter_6, - parameter_5, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_5, parameter_6, parameter_7, parameter_8 - - # pd_op.add: (2x384x16x16xf32) <- (2x384x16x16xf32, 2x384x16x16xf32) - add_54 = paddle._C_ops.add(batch_norm__810, batch_norm__816) - - # pd_op.swish: (2x384x16x16xf32) <- (2x384x16x16xf32) - swish_104 = paddle._C_ops.swish(add_54) - - # builtin.combine: ([2x384x16x16xf32, 2x384x16x16xf32]) <- (2x384x16x16xf32, 2x384x16x16xf32) - combine_13 = [swish_97, swish_104] - - # pd_op.concat: (2x768x16x16xf32) <- ([2x384x16x16xf32, 2x384x16x16xf32], 1xi32) - concat_13 = paddle._C_ops.concat(combine_13, full_0) - del combine_13 - - # pd_op.conv2d: (2x768x16x16xf32) <- (2x768x16x16xf32, 768x768x1x1xf32) - conv2d_141 = paddle._C_ops.conv2d( - concat_13, parameter_4, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_4 - - # pd_op.batch_norm_: (2x768x16x16xf32, 768xf32, 768xf32, 768xf32, 768xf32, -1xui8) <- (2x768x16x16xf32, 768xf32, 768xf32, 768xf32, 768xf32) - ( - batch_norm__822, - batch_norm__823, - batch_norm__824, - batch_norm__825, - batch_norm__826, - batch_norm__827, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_141, - parameter_3, - parameter_2, - parameter_1, - parameter_0, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_0, parameter_1, parameter_2, parameter_3 - - # pd_op.swish: (2x768x16x16xf32) <- (2x768x16x16xf32) - swish_0 = paddle._C_ops.swish(batch_norm__822) - del ( - add_0, - add_1, - add_10, - add_11, - add_12, - add_13, - add_14, - add_15, - add_16, - add_17, - add_18, - add_2, - add_20, - add_21, - add_22, - add_23, - add_24, - add_25, - add_26, - add_27, - add_28, - add_29, - add_3, - add_30, - add_31, - add_33, - add_34, - add_35, - add_36, - add_37, - add_38, - add_4, - add_40, - add_41, - add_42, - add_43, - add_44, - add_45, - add_46, - add_47, - add_48, - add_49, - add_5, - add_50, - add_51, - add_52, - add_53, - add_54, - add_7, - add_8, - add_9, - assign_0, - assign_1, - assign_10, - assign_11, - assign_12, - assign_13, - assign_14, - assign_15, - assign_2, - assign_3, - assign_4, - assign_5, - assign_6, - assign_7, - assign_8, - assign_9, - batch_norm__0, - batch_norm__1, - batch_norm__10, - batch_norm__100, - batch_norm__101, - batch_norm__102, - batch_norm__103, - batch_norm__104, - batch_norm__105, - batch_norm__106, - batch_norm__107, - batch_norm__108, - batch_norm__109, - batch_norm__11, - batch_norm__110, - batch_norm__111, - batch_norm__112, - batch_norm__113, - batch_norm__114, - batch_norm__115, - batch_norm__116, - batch_norm__117, - batch_norm__118, - batch_norm__119, - batch_norm__12, - batch_norm__120, - batch_norm__121, - batch_norm__122, - batch_norm__123, - batch_norm__124, - batch_norm__125, - batch_norm__126, - batch_norm__127, - batch_norm__128, - batch_norm__129, - batch_norm__13, - batch_norm__130, - batch_norm__131, - batch_norm__132, - batch_norm__133, - batch_norm__134, - batch_norm__135, - batch_norm__136, - batch_norm__137, - batch_norm__138, - batch_norm__139, - batch_norm__14, - batch_norm__140, - batch_norm__141, - batch_norm__142, - batch_norm__143, - batch_norm__144, - batch_norm__145, - batch_norm__146, - batch_norm__147, - batch_norm__148, - batch_norm__149, - batch_norm__15, - batch_norm__150, - batch_norm__151, - batch_norm__152, - batch_norm__153, - batch_norm__154, - batch_norm__155, - batch_norm__156, - batch_norm__157, - batch_norm__158, - batch_norm__159, - batch_norm__16, - batch_norm__160, - batch_norm__161, - batch_norm__162, - batch_norm__163, - batch_norm__164, - batch_norm__165, - batch_norm__166, - batch_norm__167, - batch_norm__168, - batch_norm__169, - batch_norm__17, - batch_norm__170, - batch_norm__171, - batch_norm__172, - batch_norm__173, - batch_norm__174, - batch_norm__175, - batch_norm__176, - batch_norm__177, - batch_norm__178, - batch_norm__179, - batch_norm__18, - batch_norm__180, - batch_norm__181, - batch_norm__182, - batch_norm__183, - batch_norm__184, - batch_norm__185, - batch_norm__186, - batch_norm__187, - batch_norm__188, - batch_norm__189, - batch_norm__19, - batch_norm__190, - batch_norm__191, - batch_norm__192, - batch_norm__193, - batch_norm__194, - batch_norm__195, - batch_norm__196, - batch_norm__197, - batch_norm__198, - batch_norm__199, - batch_norm__2, - batch_norm__20, - batch_norm__200, - batch_norm__201, - batch_norm__202, - batch_norm__203, - batch_norm__204, - batch_norm__205, - batch_norm__206, - batch_norm__207, - batch_norm__208, - batch_norm__209, - batch_norm__21, - batch_norm__210, - batch_norm__211, - batch_norm__212, - batch_norm__213, - batch_norm__214, - batch_norm__215, - batch_norm__216, - batch_norm__217, - batch_norm__218, - batch_norm__219, - batch_norm__22, - batch_norm__220, - batch_norm__221, - batch_norm__222, - batch_norm__223, - batch_norm__224, - batch_norm__225, - batch_norm__226, - batch_norm__227, - batch_norm__228, - batch_norm__229, - batch_norm__23, - batch_norm__230, - batch_norm__231, - batch_norm__232, - batch_norm__233, - batch_norm__234, - batch_norm__235, - batch_norm__236, - batch_norm__237, - batch_norm__238, - batch_norm__239, - batch_norm__24, - batch_norm__240, - batch_norm__241, - batch_norm__242, - batch_norm__243, - batch_norm__244, - batch_norm__245, - batch_norm__246, - batch_norm__247, - batch_norm__248, - batch_norm__249, - batch_norm__25, - batch_norm__250, - batch_norm__251, - batch_norm__252, - batch_norm__253, - batch_norm__254, - batch_norm__255, - batch_norm__256, - batch_norm__257, - batch_norm__258, - batch_norm__259, - batch_norm__26, - batch_norm__260, - batch_norm__261, - batch_norm__262, - batch_norm__263, - batch_norm__264, - batch_norm__265, - batch_norm__266, - batch_norm__267, - batch_norm__268, - batch_norm__269, - batch_norm__27, - batch_norm__270, - batch_norm__271, - batch_norm__272, - batch_norm__273, - batch_norm__274, - batch_norm__275, - batch_norm__276, - batch_norm__277, - batch_norm__278, - batch_norm__279, - batch_norm__28, - batch_norm__280, - batch_norm__281, - batch_norm__282, - batch_norm__283, - batch_norm__284, - batch_norm__285, - batch_norm__286, - batch_norm__287, - batch_norm__288, - batch_norm__289, - batch_norm__29, - batch_norm__290, - batch_norm__291, - batch_norm__292, - batch_norm__293, - batch_norm__294, - batch_norm__295, - batch_norm__296, - batch_norm__297, - batch_norm__298, - batch_norm__299, - batch_norm__3, - batch_norm__30, - batch_norm__300, - batch_norm__301, - batch_norm__302, - batch_norm__303, - batch_norm__304, - batch_norm__305, - batch_norm__306, - batch_norm__307, - batch_norm__308, - batch_norm__309, - batch_norm__31, - batch_norm__310, - batch_norm__311, - batch_norm__312, - batch_norm__313, - batch_norm__314, - batch_norm__315, - batch_norm__316, - batch_norm__317, - batch_norm__318, - batch_norm__319, - batch_norm__32, - batch_norm__320, - batch_norm__321, - batch_norm__322, - batch_norm__323, - batch_norm__324, - batch_norm__325, - batch_norm__326, - batch_norm__327, - batch_norm__328, - batch_norm__329, - batch_norm__33, - batch_norm__330, - batch_norm__331, - batch_norm__332, - batch_norm__333, - batch_norm__334, - batch_norm__335, - batch_norm__336, - batch_norm__337, - batch_norm__338, - batch_norm__339, - batch_norm__34, - batch_norm__340, - batch_norm__341, - batch_norm__342, - batch_norm__343, - batch_norm__344, - batch_norm__345, - batch_norm__346, - batch_norm__347, - batch_norm__348, - batch_norm__349, - batch_norm__35, - batch_norm__350, - batch_norm__351, - batch_norm__352, - batch_norm__353, - batch_norm__354, - batch_norm__355, - batch_norm__356, - batch_norm__357, - batch_norm__358, - batch_norm__359, - batch_norm__36, - batch_norm__360, - batch_norm__361, - batch_norm__362, - batch_norm__363, - batch_norm__364, - batch_norm__365, - batch_norm__366, - batch_norm__367, - batch_norm__368, - batch_norm__369, - batch_norm__37, - batch_norm__370, - batch_norm__371, - batch_norm__372, - batch_norm__373, - batch_norm__374, - batch_norm__375, - batch_norm__376, - batch_norm__377, - batch_norm__378, - batch_norm__379, - batch_norm__38, - batch_norm__380, - batch_norm__381, - batch_norm__382, - batch_norm__383, - batch_norm__384, - batch_norm__385, - batch_norm__386, - batch_norm__387, - batch_norm__388, - batch_norm__389, - batch_norm__39, - batch_norm__390, - batch_norm__391, - batch_norm__392, - batch_norm__393, - batch_norm__394, - batch_norm__395, - batch_norm__396, - batch_norm__397, - batch_norm__398, - batch_norm__399, - batch_norm__4, - batch_norm__40, - batch_norm__400, - batch_norm__401, - batch_norm__402, - batch_norm__403, - batch_norm__404, - batch_norm__405, - batch_norm__406, - batch_norm__407, - batch_norm__408, - batch_norm__409, - batch_norm__41, - batch_norm__410, - batch_norm__411, - batch_norm__412, - batch_norm__413, - batch_norm__414, - batch_norm__415, - batch_norm__416, - batch_norm__417, - batch_norm__418, - batch_norm__419, - batch_norm__42, - batch_norm__420, - batch_norm__421, - batch_norm__422, - batch_norm__423, - batch_norm__424, - batch_norm__425, - batch_norm__426, - batch_norm__427, - batch_norm__428, - batch_norm__429, - batch_norm__43, - batch_norm__430, - batch_norm__431, - batch_norm__432, - batch_norm__433, - batch_norm__434, - batch_norm__435, - batch_norm__436, - batch_norm__437, - batch_norm__438, - batch_norm__439, - batch_norm__44, - batch_norm__440, - batch_norm__441, - batch_norm__442, - batch_norm__443, - batch_norm__444, - batch_norm__445, - batch_norm__446, - batch_norm__447, - batch_norm__448, - batch_norm__449, - batch_norm__45, - batch_norm__450, - batch_norm__451, - batch_norm__452, - batch_norm__453, - batch_norm__454, - batch_norm__455, - batch_norm__456, - batch_norm__457, - batch_norm__458, - batch_norm__459, - batch_norm__46, - batch_norm__460, - batch_norm__461, - batch_norm__462, - batch_norm__463, - batch_norm__464, - batch_norm__465, - batch_norm__466, - batch_norm__467, - batch_norm__468, - batch_norm__469, - batch_norm__47, - batch_norm__470, - batch_norm__471, - batch_norm__472, - batch_norm__473, - batch_norm__474, - batch_norm__475, - batch_norm__476, - batch_norm__477, - batch_norm__478, - batch_norm__479, - batch_norm__48, - batch_norm__480, - batch_norm__481, - batch_norm__482, - batch_norm__483, - batch_norm__484, - batch_norm__485, - batch_norm__486, - batch_norm__487, - batch_norm__488, - batch_norm__489, - batch_norm__49, - batch_norm__490, - batch_norm__491, - batch_norm__492, - batch_norm__493, - batch_norm__494, - batch_norm__495, - batch_norm__496, - batch_norm__497, - batch_norm__498, - batch_norm__499, - batch_norm__5, - batch_norm__50, - batch_norm__500, - batch_norm__501, - batch_norm__502, - batch_norm__503, - batch_norm__504, - batch_norm__505, - batch_norm__506, - batch_norm__507, - batch_norm__508, - batch_norm__509, - batch_norm__51, - batch_norm__510, - batch_norm__511, - batch_norm__512, - batch_norm__513, - batch_norm__514, - batch_norm__515, - batch_norm__516, - batch_norm__517, - batch_norm__518, - batch_norm__519, - batch_norm__52, - batch_norm__520, - batch_norm__521, - batch_norm__522, - batch_norm__523, - batch_norm__524, - batch_norm__525, - batch_norm__526, - batch_norm__527, - batch_norm__528, - batch_norm__529, - batch_norm__53, - batch_norm__530, - batch_norm__531, - batch_norm__532, - batch_norm__533, - batch_norm__534, - batch_norm__535, - batch_norm__536, - batch_norm__537, - batch_norm__538, - batch_norm__539, - batch_norm__54, - batch_norm__540, - batch_norm__541, - batch_norm__542, - batch_norm__543, - batch_norm__544, - batch_norm__545, - batch_norm__546, - batch_norm__547, - batch_norm__548, - batch_norm__549, - batch_norm__55, - batch_norm__550, - batch_norm__551, - batch_norm__552, - batch_norm__553, - batch_norm__554, - batch_norm__555, - batch_norm__556, - batch_norm__557, - batch_norm__558, - batch_norm__559, - batch_norm__56, - batch_norm__560, - batch_norm__561, - batch_norm__562, - batch_norm__563, - batch_norm__564, - batch_norm__565, - batch_norm__566, - batch_norm__567, - batch_norm__568, - batch_norm__569, - batch_norm__57, - batch_norm__570, - batch_norm__571, - batch_norm__572, - batch_norm__573, - batch_norm__574, - batch_norm__575, - batch_norm__576, - batch_norm__577, - batch_norm__578, - batch_norm__579, - batch_norm__58, - batch_norm__580, - batch_norm__581, - batch_norm__582, - batch_norm__583, - batch_norm__584, - batch_norm__585, - batch_norm__586, - batch_norm__587, - batch_norm__588, - batch_norm__589, - batch_norm__59, - batch_norm__590, - batch_norm__591, - batch_norm__592, - batch_norm__593, - batch_norm__594, - batch_norm__595, - batch_norm__596, - batch_norm__597, - batch_norm__598, - batch_norm__599, - batch_norm__6, - batch_norm__60, - batch_norm__600, - batch_norm__601, - batch_norm__602, - batch_norm__603, - batch_norm__604, - batch_norm__605, - batch_norm__606, - batch_norm__607, - batch_norm__608, - batch_norm__609, - batch_norm__61, - batch_norm__610, - batch_norm__611, - batch_norm__612, - batch_norm__613, - batch_norm__614, - batch_norm__615, - batch_norm__616, - batch_norm__617, - batch_norm__618, - batch_norm__619, - batch_norm__62, - batch_norm__620, - batch_norm__621, - batch_norm__622, - batch_norm__623, - batch_norm__624, - batch_norm__625, - batch_norm__626, - batch_norm__627, - batch_norm__628, - batch_norm__629, - batch_norm__63, - batch_norm__630, - batch_norm__631, - batch_norm__632, - batch_norm__633, - batch_norm__634, - batch_norm__635, - batch_norm__636, - batch_norm__637, - batch_norm__638, - batch_norm__639, - batch_norm__64, - batch_norm__640, - batch_norm__641, - batch_norm__642, - batch_norm__643, - batch_norm__644, - batch_norm__645, - batch_norm__646, - batch_norm__647, - batch_norm__648, - batch_norm__649, - batch_norm__65, - batch_norm__650, - batch_norm__651, - batch_norm__652, - batch_norm__653, - batch_norm__654, - batch_norm__655, - batch_norm__656, - batch_norm__657, - batch_norm__658, - batch_norm__659, - batch_norm__66, - batch_norm__660, - batch_norm__661, - batch_norm__662, - batch_norm__663, - batch_norm__664, - batch_norm__665, - batch_norm__666, - batch_norm__667, - batch_norm__668, - batch_norm__669, - batch_norm__67, - batch_norm__670, - batch_norm__671, - batch_norm__672, - batch_norm__673, - batch_norm__674, - batch_norm__675, - batch_norm__676, - batch_norm__677, - batch_norm__678, - batch_norm__679, - batch_norm__68, - batch_norm__680, - batch_norm__681, - batch_norm__682, - batch_norm__683, - batch_norm__684, - batch_norm__685, - batch_norm__686, - batch_norm__687, - batch_norm__688, - batch_norm__689, - batch_norm__69, - batch_norm__690, - batch_norm__691, - batch_norm__692, - batch_norm__693, - batch_norm__694, - batch_norm__695, - batch_norm__696, - batch_norm__697, - batch_norm__698, - batch_norm__699, - batch_norm__7, - batch_norm__70, - batch_norm__700, - batch_norm__701, - batch_norm__702, - batch_norm__703, - batch_norm__704, - batch_norm__705, - batch_norm__706, - batch_norm__707, - batch_norm__708, - batch_norm__709, - batch_norm__71, - batch_norm__710, - batch_norm__711, - batch_norm__712, - batch_norm__713, - batch_norm__714, - batch_norm__715, - batch_norm__716, - batch_norm__717, - batch_norm__718, - batch_norm__719, - batch_norm__72, - batch_norm__720, - batch_norm__721, - batch_norm__722, - batch_norm__723, - batch_norm__724, - batch_norm__725, - batch_norm__726, - batch_norm__727, - batch_norm__728, - batch_norm__729, - batch_norm__73, - batch_norm__730, - batch_norm__731, - batch_norm__732, - batch_norm__733, - batch_norm__734, - batch_norm__735, - batch_norm__736, - batch_norm__737, - batch_norm__738, - batch_norm__739, - batch_norm__74, - batch_norm__740, - batch_norm__741, - batch_norm__742, - batch_norm__743, - batch_norm__744, - batch_norm__745, - batch_norm__746, - batch_norm__747, - batch_norm__748, - batch_norm__749, - batch_norm__75, - batch_norm__750, - batch_norm__751, - batch_norm__752, - batch_norm__753, - batch_norm__754, - batch_norm__755, - batch_norm__756, - batch_norm__757, - batch_norm__758, - batch_norm__759, - batch_norm__76, - batch_norm__760, - batch_norm__761, - batch_norm__762, - batch_norm__763, - batch_norm__764, - batch_norm__765, - batch_norm__766, - batch_norm__767, - batch_norm__768, - batch_norm__769, - batch_norm__77, - batch_norm__770, - batch_norm__771, - batch_norm__772, - batch_norm__773, - batch_norm__774, - batch_norm__775, - batch_norm__776, - batch_norm__777, - batch_norm__778, - batch_norm__779, - batch_norm__78, - batch_norm__780, - batch_norm__781, - batch_norm__782, - batch_norm__783, - batch_norm__784, - batch_norm__785, - batch_norm__786, - batch_norm__787, - batch_norm__788, - batch_norm__789, - batch_norm__79, - batch_norm__790, - batch_norm__791, - batch_norm__792, - batch_norm__793, - batch_norm__794, - batch_norm__795, - batch_norm__796, - batch_norm__797, - batch_norm__798, - batch_norm__799, - batch_norm__8, - batch_norm__80, - batch_norm__800, - batch_norm__801, - batch_norm__802, - batch_norm__803, - batch_norm__804, - batch_norm__805, - batch_norm__806, - batch_norm__807, - batch_norm__808, - batch_norm__809, - batch_norm__81, - batch_norm__810, - batch_norm__811, - batch_norm__812, - batch_norm__813, - batch_norm__814, - batch_norm__815, - batch_norm__816, - batch_norm__817, - batch_norm__818, - batch_norm__819, - batch_norm__82, - batch_norm__820, - batch_norm__821, - batch_norm__822, - batch_norm__823, - batch_norm__824, - batch_norm__825, - batch_norm__826, - batch_norm__827, - batch_norm__83, - batch_norm__84, - batch_norm__85, - batch_norm__86, - batch_norm__87, - batch_norm__88, - batch_norm__89, - batch_norm__9, - batch_norm__90, - batch_norm__91, - batch_norm__92, - batch_norm__93, - batch_norm__94, - batch_norm__95, - batch_norm__96, - batch_norm__97, - batch_norm__98, - batch_norm__99, - concat_0, - concat_1, - concat_10, - concat_11, - concat_12, - concat_13, - concat_2, - concat_3, - concat_4, - concat_5, - concat_6, - concat_7, - concat_8, - concat_9, - conv2d_0, - conv2d_1, - conv2d_10, - conv2d_100, - conv2d_101, - conv2d_102, - conv2d_103, - conv2d_104, - conv2d_105, - conv2d_106, - conv2d_107, - conv2d_108, - conv2d_109, - conv2d_11, - conv2d_110, - conv2d_111, - conv2d_112, - conv2d_113, - conv2d_114, - conv2d_115, - conv2d_116, - conv2d_117, - conv2d_118, - conv2d_119, - conv2d_12, - conv2d_120, - conv2d_121, - conv2d_122, - conv2d_123, - conv2d_124, - conv2d_125, - conv2d_126, - conv2d_127, - conv2d_128, - conv2d_129, - conv2d_13, - conv2d_130, - conv2d_131, - conv2d_132, - conv2d_133, - conv2d_134, - conv2d_135, - conv2d_136, - conv2d_137, - conv2d_138, - conv2d_139, - conv2d_14, - conv2d_140, - conv2d_141, - conv2d_15, - conv2d_16, - conv2d_17, - conv2d_18, - conv2d_19, - conv2d_2, - conv2d_20, - conv2d_21, - conv2d_22, - conv2d_23, - conv2d_24, - conv2d_25, - conv2d_26, - conv2d_27, - conv2d_28, - conv2d_29, - conv2d_3, - conv2d_30, - conv2d_31, - conv2d_32, - conv2d_33, - conv2d_34, - conv2d_35, - conv2d_36, - conv2d_37, - conv2d_38, - conv2d_39, - conv2d_4, - conv2d_40, - conv2d_41, - conv2d_42, - conv2d_43, - conv2d_44, - conv2d_45, - conv2d_46, - conv2d_47, - conv2d_48, - conv2d_49, - conv2d_5, - conv2d_50, - conv2d_51, - conv2d_52, - conv2d_53, - conv2d_54, - conv2d_55, - conv2d_56, - conv2d_57, - conv2d_58, - conv2d_59, - conv2d_6, - conv2d_60, - conv2d_61, - conv2d_62, - conv2d_63, - conv2d_64, - conv2d_65, - conv2d_66, - conv2d_67, - conv2d_68, - conv2d_69, - conv2d_7, - conv2d_70, - conv2d_71, - conv2d_72, - conv2d_73, - conv2d_74, - conv2d_75, - conv2d_76, - conv2d_77, - conv2d_78, - conv2d_79, - conv2d_8, - conv2d_80, - conv2d_81, - conv2d_82, - conv2d_83, - conv2d_84, - conv2d_85, - conv2d_86, - conv2d_87, - conv2d_88, - conv2d_89, - conv2d_9, - conv2d_90, - conv2d_91, - conv2d_92, - conv2d_93, - conv2d_94, - conv2d_95, - conv2d_96, - conv2d_97, - conv2d_98, - conv2d_99, - full_0, - full_int_array_0, - full_int_array_2, - full_int_array_3, - full_int_array_4, - hardsigmoid_0, - hardsigmoid_1, - hardsigmoid_2, - hardsigmoid_3, - mean_0, - mean_1, - mean_2, - mean_3, - multiply_0, - multiply_1, - multiply_2, - multiply_3, - nearest_interp_0, - nearest_interp_1, - pool2d_0, - pool2d_1, - pool2d_2, - reshape_0, - reshape_1, - reshape_2, - reshape_3, - swish_1, - swish_10, - swish_100, - swish_101, - swish_102, - swish_103, - swish_104, - swish_11, - swish_12, - swish_13, - swish_14, - swish_15, - swish_16, - swish_17, - swish_18, - swish_19, - swish_2, - swish_20, - swish_21, - swish_22, - swish_23, - swish_24, - swish_25, - swish_26, - swish_27, - swish_28, - swish_29, - swish_3, - swish_30, - swish_31, - swish_32, - swish_33, - swish_34, - swish_35, - swish_36, - swish_37, - swish_38, - swish_39, - swish_4, - swish_40, - swish_41, - swish_42, - swish_43, - swish_44, - swish_45, - swish_46, - swish_47, - swish_48, - swish_49, - swish_5, - swish_50, - swish_51, - swish_52, - swish_53, - swish_54, - swish_55, - swish_56, - swish_57, - swish_58, - swish_59, - swish_6, - swish_60, - swish_61, - swish_62, - swish_63, - swish_64, - swish_65, - swish_66, - swish_67, - swish_68, - swish_69, - swish_7, - swish_70, - swish_71, - swish_72, - swish_73, - swish_74, - swish_75, - swish_76, - swish_77, - swish_78, - swish_79, - swish_8, - swish_80, - swish_81, - swish_82, - swish_83, - swish_84, - swish_85, - swish_86, - swish_87, - swish_88, - swish_89, - swish_9, - swish_90, - swish_91, - swish_92, - swish_93, - swish_94, - swish_95, - swish_96, - swish_97, - swish_98, - swish_99, - ) + # pd_op.multiply: (2x12096x4xf32) <- (2x12096x4xf32, 2x12096x1xf32) + multiply_0 = paddle._C_ops.multiply(index_select_0, unsqueeze_1) + del index_select_0, unsqueeze_1, where_1 - return swish_0 + return reshape_0, multiply_0 diff --git a/paddle_samples/PaddleX/PP-YOLOE-L_vehicle/subgraph_2/weight_meta.py b/paddle_samples/PaddleX/PP-YOLOE-L_vehicle/subgraph_2/weight_meta.py index 25c54b0a2..8b1378917 100644 --- a/paddle_samples/PaddleX/PP-YOLOE-L_vehicle/subgraph_2/weight_meta.py +++ b/paddle_samples/PaddleX/PP-YOLOE-L_vehicle/subgraph_2/weight_meta.py @@ -1,7564 +1 @@ -class Program_weight_tensor_parameter_0: - name = "parameter_0" - shape = [768] - dtype = "float32" - min_val = float("-0.241183") - max_val = float("0.339114") - mean = float("0.111688") - std = float("0.0760614") - data = None - -class Program_weight_tensor_parameter_1: - name = "parameter_1" - shape = [768] - dtype = "float32" - min_val = float("0.855286") - max_val = float("1.34027") - mean = float("1.09294") - std = float("0.0413897") - data = None - - -class Program_weight_tensor_parameter_2: - name = "parameter_2" - shape = [768] - dtype = "float32" - min_val = float("0.000633402") - max_val = float("0.0240355") - mean = float("0.002005") - std = float("0.00159656") - data = None - - -class Program_weight_tensor_parameter_3: - name = "parameter_3" - shape = [768] - dtype = "float32" - min_val = float("-0.0975389") - max_val = float("0.0882256") - mean = float("-0.0156178") - std = float("0.0147547") - data = None - - -class Program_weight_tensor_parameter_4: - name = "parameter_4" - shape = [768, 768, 1, 1] - dtype = "float32" - min_val = float("-0.0352945") - max_val = float("0.027285") - mean = float("-9.50896e-05") - std = float("0.0014892") - data = None - - -class Program_weight_tensor_parameter_5: - name = "parameter_5" - shape = [384] - dtype = "float32" - min_val = float("-0.220092") - max_val = float("0.0360991") - mean = float("-0.0289701") - std = float("0.0325433") - data = None - - -class Program_weight_tensor_parameter_6: - name = "parameter_6" - shape = [384] - dtype = "float32" - min_val = float("0.949185") - max_val = float("1.03882") - mean = float("0.984992") - std = float("0.0122396") - data = None - - -class Program_weight_tensor_parameter_7: - name = "parameter_7" - shape = [384] - dtype = "float32" - min_val = float("0.000285216") - max_val = float("0.00478627") - mean = float("0.00122854") - std = float("0.00061932") - data = None - - -class Program_weight_tensor_parameter_8: - name = "parameter_8" - shape = [384] - dtype = "float32" - min_val = float("-0.0285195") - max_val = float("0.0323284") - mean = float("0.00097355") - std = float("0.01139") - data = None - - -class Program_weight_tensor_parameter_9: - name = "parameter_9" - shape = [384, 384, 1, 1] - dtype = "float32" - min_val = float("-0.021078") - max_val = float("0.0140467") - mean = float("5.46721e-06") - std = float("0.00115269") - data = None - - -class Program_weight_tensor_parameter_10: - name = "parameter_10" - shape = [384] - dtype = "float32" - min_val = float("-0.220092") - max_val = float("0.0360991") - mean = float("-0.0289701") - std = float("0.0325433") - data = None - - -class Program_weight_tensor_parameter_11: - name = "parameter_11" - shape = [384] - dtype = "float32" - min_val = float("0.853263") - max_val = float("1.12594") - mean = float("1.01951") - std = float("0.020693") - data = None - - -class Program_weight_tensor_parameter_12: - name = "parameter_12" - shape = [384] - dtype = "float32" - min_val = float("0.000688955") - max_val = float("0.010658") - mean = float("0.00259363") - std = float("0.001307") - data = None - - -class Program_weight_tensor_parameter_13: - name = "parameter_13" - shape = [384] - dtype = "float32" - min_val = float("-0.0898062") - max_val = float("0.0697168") - mean = float("-0.0204018") - std = float("0.0193672") - data = None - - -class Program_weight_tensor_parameter_14: - name = "parameter_14" - shape = [384, 384, 3, 3] - dtype = "float32" - min_val = float("-0.0184926") - max_val = float("0.0231329") - mean = float("-3.89981e-05") - std = float("0.000775433") - data = None - - -class Program_weight_tensor_parameter_15: - name = "parameter_15" - shape = [384] - dtype = "float32" - min_val = float("-0.187634") - max_val = float("0.0397745") - mean = float("-0.0496692") - std = float("0.0341143") - data = None - - -class Program_weight_tensor_parameter_16: - name = "parameter_16" - shape = [384] - dtype = "float32" - min_val = float("0.923384") - max_val = float("1.15592") - mean = float("1.01789") - std = float("0.0319458") - data = None - - -class Program_weight_tensor_parameter_17: - name = "parameter_17" - shape = [384] - dtype = "float32" - min_val = float("0.00169357") - max_val = float("0.0414876") - mean = float("0.00683062") - std = float("0.00397382") - data = None - - -class Program_weight_tensor_parameter_18: - name = "parameter_18" - shape = [384] - dtype = "float32" - min_val = float("-0.144802") - max_val = float("0.086576") - mean = float("-0.0246949") - std = float("0.0237346") - data = None - - -class Program_weight_tensor_parameter_19: - name = "parameter_19" - shape = [384, 384, 3, 3] - dtype = "float32" - min_val = float("-0.0202825") - max_val = float("0.0290433") - mean = float("-4.22358e-05") - std = float("0.000884567") - data = None - - -class Program_weight_tensor_parameter_20: - name = "parameter_20" - shape = [384] - dtype = "float32" - min_val = float("-0.13611") - max_val = float("0.0209956") - mean = float("-0.0495068") - std = float("0.0271924") - data = None - - -class Program_weight_tensor_parameter_21: - name = "parameter_21" - shape = [384] - dtype = "float32" - min_val = float("0.940917") - max_val = float("1.03841") - mean = float("0.986233") - std = float("0.0130345") - data = None - - -class Program_weight_tensor_parameter_22: - name = "parameter_22" - shape = [384] - dtype = "float32" - min_val = float("0.000300689") - max_val = float("0.00457802") - mean = float("0.00153576") - std = float("0.000801307") - data = None - - -class Program_weight_tensor_parameter_23: - name = "parameter_23" - shape = [384] - dtype = "float32" - min_val = float("-0.037531") - max_val = float("0.034864") - mean = float("0.000281664") - std = float("0.0103515") - data = None - - -class Program_weight_tensor_parameter_24: - name = "parameter_24" - shape = [384, 384, 1, 1] - dtype = "float32" - min_val = float("-0.019341") - max_val = float("0.0169804") - mean = float("-1.66912e-05") - std = float("0.0011822") - data = None - - -class Program_weight_tensor_parameter_25: - name = "parameter_25" - shape = [384] - dtype = "float32" - min_val = float("-0.13611") - max_val = float("0.0209956") - mean = float("-0.0495068") - std = float("0.0271924") - data = None - - -class Program_weight_tensor_parameter_26: - name = "parameter_26" - shape = [384] - dtype = "float32" - min_val = float("0.966625") - max_val = float("1.10386") - mean = float("1.01865") - std = float("0.0185725") - data = None - - -class Program_weight_tensor_parameter_27: - name = "parameter_27" - shape = [384] - dtype = "float32" - min_val = float("0.00102883") - max_val = float("0.0119425") - mean = float("0.00332718") - std = float("0.00160318") - data = None - - -class Program_weight_tensor_parameter_28: - name = "parameter_28" - shape = [384] - dtype = "float32" - min_val = float("-0.087175") - max_val = float("0.0661578") - mean = float("-0.0253732") - std = float("0.0194598") - data = None - - -class Program_weight_tensor_parameter_29: - name = "parameter_29" - shape = [384, 384, 3, 3] - dtype = "float32" - min_val = float("-0.0204281") - max_val = float("0.0272238") - mean = float("-4.94366e-05") - std = float("0.000795963") - data = None - - -class Program_weight_tensor_parameter_30: - name = "parameter_30" - shape = [384] - dtype = "float32" - min_val = float("-0.148287") - max_val = float("0.0257711") - mean = float("-0.0507511") - std = float("0.0264773") - data = None - - -class Program_weight_tensor_parameter_31: - name = "parameter_31" - shape = [384] - dtype = "float32" - min_val = float("0.938638") - max_val = float("1.11514") - mean = float("1.0147") - std = float("0.0355173") - data = None - - -class Program_weight_tensor_parameter_32: - name = "parameter_32" - shape = [384] - dtype = "float32" - min_val = float("0.00170241") - max_val = float("0.0180036") - mean = float("0.00504014") - std = float("0.00243654") - data = None - - -class Program_weight_tensor_parameter_33: - name = "parameter_33" - shape = [384] - dtype = "float32" - min_val = float("-0.0935511") - max_val = float("0.0491545") - mean = float("-0.00834149") - std = float("0.0230017") - data = None - - -class Program_weight_tensor_parameter_34: - name = "parameter_34" - shape = [384, 384, 3, 3] - dtype = "float32" - min_val = float("-0.0202815") - max_val = float("0.0279557") - mean = float("-3.32955e-05") - std = float("0.000914897") - data = None - - -class Program_weight_tensor_parameter_35: - name = "parameter_35" - shape = [384] - dtype = "float32" - min_val = float("-0.153278") - max_val = float("0.0451778") - mean = float("-0.0555992") - std = float("0.0277362") - data = None - - -class Program_weight_tensor_parameter_36: - name = "parameter_36" - shape = [384] - dtype = "float32" - min_val = float("0.932352") - max_val = float("1.05292") - mean = float("0.984355") - std = float("0.0159156") - data = None - - -class Program_weight_tensor_parameter_37: - name = "parameter_37" - shape = [384] - dtype = "float32" - min_val = float("0.000433048") - max_val = float("0.0042084") - mean = float("0.00195875") - std = float("0.000692107") - data = None - - -class Program_weight_tensor_parameter_38: - name = "parameter_38" - shape = [384] - dtype = "float32" - min_val = float("-0.0284852") - max_val = float("0.0252557") - mean = float("-0.00507782") - std = float("0.008977") - data = None - - -class Program_weight_tensor_parameter_39: - name = "parameter_39" - shape = [384, 384, 1, 1] - dtype = "float32" - min_val = float("-0.0207499") - max_val = float("0.0193701") - mean = float("-0.000100869") - std = float("0.00123961") - data = None - - -class Program_weight_tensor_parameter_40: - name = "parameter_40" - shape = [384] - dtype = "float32" - min_val = float("-0.153278") - max_val = float("0.0451778") - mean = float("-0.0555992") - std = float("0.0277362") - data = None - - -class Program_weight_tensor_parameter_41: - name = "parameter_41" - shape = [384] - dtype = "float32" - min_val = float("0.963645") - max_val = float("1.1319") - mean = float("1.02215") - std = float("0.0263352") - data = None - - -class Program_weight_tensor_parameter_42: - name = "parameter_42" - shape = [384] - dtype = "float32" - min_val = float("0.0016078") - max_val = float("0.0222238") - mean = float("0.00525679") - std = float("0.00277651") - data = None - - -class Program_weight_tensor_parameter_43: - name = "parameter_43" - shape = [384] - dtype = "float32" - min_val = float("-0.104951") - max_val = float("0.0448978") - mean = float("-0.0148056") - std = float("0.0210608") - data = None - - -class Program_weight_tensor_parameter_44: - name = "parameter_44" - shape = [384, 384, 3, 3] - dtype = "float32" - min_val = float("-0.0196928") - max_val = float("0.0250277") - mean = float("-3.1454e-05") - std = float("0.000855646") - data = None - - -class Program_weight_tensor_parameter_45: - name = "parameter_45" - shape = [384] - dtype = "float32" - min_val = float("-0.161193") - max_val = float("0.0517648") - mean = float("-0.053389") - std = float("0.0280913") - data = None - - -class Program_weight_tensor_parameter_46: - name = "parameter_46" - shape = [384] - dtype = "float32" - min_val = float("0.917844") - max_val = float("1.15227") - mean = float("1.01538") - std = float("0.0358938") - data = None - - -class Program_weight_tensor_parameter_47: - name = "parameter_47" - shape = [384] - dtype = "float32" - min_val = float("0.00206138") - max_val = float("0.0284275") - mean = float("0.00546661") - std = float("0.0027318") - data = None - - -class Program_weight_tensor_parameter_48: - name = "parameter_48" - shape = [384] - dtype = "float32" - min_val = float("-0.0944194") - max_val = float("0.0675621") - mean = float("-0.0218124") - std = float("0.0260175") - data = None - - -class Program_weight_tensor_parameter_49: - name = "parameter_49" - shape = [384, 384, 3, 3] - dtype = "float32" - min_val = float("-0.0217502") - max_val = float("0.0223443") - mean = float("-4.13226e-05") - std = float("0.000970614") - data = None - - -class Program_weight_tensor_parameter_50: - name = "parameter_50" - shape = [384] - dtype = "float32" - min_val = float("-0.101781") - max_val = float("0.056876") - mean = float("-0.0401516") - std = float("0.0228938") - data = None - - -class Program_weight_tensor_parameter_51: - name = "parameter_51" - shape = [384] - dtype = "float32" - min_val = float("0.963822") - max_val = float("1.11711") - mean = float("1.01317") - std = float("0.0243344") - data = None - - -class Program_weight_tensor_parameter_52: - name = "parameter_52" - shape = [384] - dtype = "float32" - min_val = float("0.00110984") - max_val = float("0.00597262") - mean = float("0.0020846") - std = float("0.000693443") - data = None - - -class Program_weight_tensor_parameter_53: - name = "parameter_53" - shape = [384] - dtype = "float32" - min_val = float("-0.0495382") - max_val = float("0.0540655") - mean = float("-0.0114017") - std = float("0.0130666") - data = None - - -class Program_weight_tensor_parameter_54: - name = "parameter_54" - shape = [384, 1152, 1, 1] - dtype = "float32" - min_val = float("-0.0410642") - max_val = float("0.0455197") - mean = float("-5.58216e-05") - std = float("0.0014784") - data = None - - -class Program_weight_tensor_parameter_55: - name = "parameter_55" - shape = [384] - dtype = "float32" - min_val = float("-0.0709703") - max_val = float("0.0169244") - mean = float("-0.0176548") - std = float("0.0126642") - data = None - - -class Program_weight_tensor_parameter_56: - name = "parameter_56" - shape = [384] - dtype = "float32" - min_val = float("0.913666") - max_val = float("1.1021") - mean = float("1.00906") - std = float("0.0166227") - data = None - - -class Program_weight_tensor_parameter_57: - name = "parameter_57" - shape = [384] - dtype = "float32" - min_val = float("0.000694125") - max_val = float("0.00878753") - mean = float("0.00162036") - std = float("0.000892236") - data = None - - -class Program_weight_tensor_parameter_58: - name = "parameter_58" - shape = [384] - dtype = "float32" - min_val = float("-0.0470721") - max_val = float("0.0304015") - mean = float("-0.0113723") - std = float("0.0122988") - data = None - - -class Program_weight_tensor_parameter_59: - name = "parameter_59" - shape = [384, 1152, 1, 1] - dtype = "float32" - min_val = float("-0.0379563") - max_val = float("0.0300735") - mean = float("-5.92181e-05") - std = float("0.00129426") - data = None - - -class Program_weight_tensor_parameter_60: - name = "parameter_60" - shape = [384] - dtype = "float32" - min_val = float("-0.0787137") - max_val = float("0.0034001") - mean = float("-0.0252802") - std = float("0.0143999") - data = None - - -class Program_weight_tensor_parameter_61: - name = "parameter_61" - shape = [384] - dtype = "float32" - min_val = float("0.98146") - max_val = float("1.12391") - mean = float("1.02741") - std = float("0.0212668") - data = None - - -class Program_weight_tensor_parameter_62: - name = "parameter_62" - shape = [384] - dtype = "float32" - min_val = float("0.00215154") - max_val = float("0.0252834") - mean = float("0.0067307") - std = float("0.00354134") - data = None - - -class Program_weight_tensor_parameter_63: - name = "parameter_63" - shape = [384] - dtype = "float32" - min_val = float("-0.277707") - max_val = float("0.114193") - mean = float("-0.0247189") - std = float("0.0461422") - data = None - - -class Program_weight_tensor_parameter_64: - name = "parameter_64" - shape = [384, 384, 3, 3] - dtype = "float32" - min_val = float("-0.031519") - max_val = float("0.0263953") - mean = float("-1.72944e-05") - std = float("0.000847953") - data = None - - -class Program_weight_tensor_parameter_65: - name = "parameter_65" - shape = [384] - dtype = "float32" - min_val = float("-0.413088") - max_val = float("0.667853") - mean = float("0.255716") - std = float("0.158995") - data = None - - -class Program_weight_tensor_parameter_66: - name = "parameter_66" - shape = [384] - dtype = "float32" - min_val = float("0.924404") - max_val = float("1.6694") - mean = float("1.17434") - std = float("0.0909085") - data = None - - -class Program_weight_tensor_parameter_67: - name = "parameter_67" - shape = [384] - dtype = "float32" - min_val = float("0.00152847") - max_val = float("0.0344264") - mean = float("0.00519486") - std = float("0.00331994") - data = None - - -class Program_weight_tensor_parameter_68: - name = "parameter_68" - shape = [384] - dtype = "float32" - min_val = float("-0.0946215") - max_val = float("0.0705854") - mean = float("-0.019859") - std = float("0.0214886") - data = None - - -class Program_weight_tensor_parameter_69: - name = "parameter_69" - shape = [384, 384, 1, 1] - dtype = "float32" - min_val = float("-0.066096") - max_val = float("0.062936") - mean = float("-0.000217452") - std = float("0.00361837") - data = None - - -class Program_weight_tensor_parameter_70: - name = "parameter_70" - shape = [192] - dtype = "float32" - min_val = float("-0.257401") - max_val = float("0.0756873") - mean = float("-0.0386734") - std = float("0.0605711") - data = None - - -class Program_weight_tensor_parameter_71: - name = "parameter_71" - shape = [192] - dtype = "float32" - min_val = float("0.913002") - max_val = float("1.05482") - mean = float("0.97016") - std = float("0.0253977") - data = None - - -class Program_weight_tensor_parameter_72: - name = "parameter_72" - shape = [192] - dtype = "float32" - min_val = float("0.000555623") - max_val = float("0.0115202") - mean = float("0.00332226") - std = float("0.00222795") - data = None - - -class Program_weight_tensor_parameter_73: - name = "parameter_73" - shape = [192] - dtype = "float32" - min_val = float("-0.0431262") - max_val = float("0.0415504") - mean = float("-0.00850673") - std = float("0.0137204") - data = None - - -class Program_weight_tensor_parameter_74: - name = "parameter_74" - shape = [192, 192, 1, 1] - dtype = "float32" - min_val = float("-0.0403293") - max_val = float("0.0259594") - mean = float("-0.00030975") - std = float("0.00273458") - data = None - - -class Program_weight_tensor_parameter_75: - name = "parameter_75" - shape = [192] - dtype = "float32" - min_val = float("-0.257401") - max_val = float("0.0756873") - mean = float("-0.0386734") - std = float("0.0605711") - data = None - - -class Program_weight_tensor_parameter_76: - name = "parameter_76" - shape = [192] - dtype = "float32" - min_val = float("0.67347") - max_val = float("1.16453") - mean = float("1.02581") - std = float("0.0489") - data = None - - -class Program_weight_tensor_parameter_77: - name = "parameter_77" - shape = [192] - dtype = "float32" - min_val = float("0.00163203") - max_val = float("0.0223524") - mean = float("0.00610887") - std = float("0.00292424") - data = None - - -class Program_weight_tensor_parameter_78: - name = "parameter_78" - shape = [192] - dtype = "float32" - min_val = float("-0.110504") - max_val = float("0.0737091") - mean = float("-0.0141394") - std = float("0.0249903") - data = None - - -class Program_weight_tensor_parameter_79: - name = "parameter_79" - shape = [192, 192, 3, 3] - dtype = "float32" - min_val = float("-0.0282871") - max_val = float("0.036242") - mean = float("-4.29571e-05") - std = float("0.00185962") - data = None - - -class Program_weight_tensor_parameter_80: - name = "parameter_80" - shape = [192] - dtype = "float32" - min_val = float("-0.254657") - max_val = float("0.0957903") - mean = float("-0.080577") - std = float("0.0593186") - data = None - - -class Program_weight_tensor_parameter_81: - name = "parameter_81" - shape = [192] - dtype = "float32" - min_val = float("0.858086") - max_val = float("1.31453") - mean = float("1.01565") - std = float("0.0626041") - data = None - - -class Program_weight_tensor_parameter_82: - name = "parameter_82" - shape = [192] - dtype = "float32" - min_val = float("0.00360845") - max_val = float("0.0470869") - mean = float("0.0123099") - std = float("0.00713766") - data = None - - -class Program_weight_tensor_parameter_83: - name = "parameter_83" - shape = [192] - dtype = "float32" - min_val = float("-0.112606") - max_val = float("0.159199") - mean = float("-0.0134364") - std = float("0.0271641") - data = None - - -class Program_weight_tensor_parameter_84: - name = "parameter_84" - shape = [192, 192, 3, 3] - dtype = "float32" - min_val = float("-0.0377322") - max_val = float("0.0505119") - mean = float("-6.78957e-05") - std = float("0.00209937") - data = None - - -class Program_weight_tensor_parameter_85: - name = "parameter_85" - shape = [192] - dtype = "float32" - min_val = float("-0.2187") - max_val = float("0.0411154") - mean = float("-0.100397") - std = float("0.0464196") - data = None - - -class Program_weight_tensor_parameter_86: - name = "parameter_86" - shape = [192] - dtype = "float32" - min_val = float("0.889894") - max_val = float("1.08345") - mean = float("0.97001") - std = float("0.0267277") - data = None - - -class Program_weight_tensor_parameter_87: - name = "parameter_87" - shape = [192] - dtype = "float32" - min_val = float("0.00101443") - max_val = float("0.00913514") - mean = float("0.00312222") - std = float("0.00149668") - data = None - - -class Program_weight_tensor_parameter_88: - name = "parameter_88" - shape = [192] - dtype = "float32" - min_val = float("-0.0424533") - max_val = float("0.0299395") - mean = float("-0.00723978") - std = float("0.0103044") - data = None - - -class Program_weight_tensor_parameter_89: - name = "parameter_89" - shape = [192, 192, 1, 1] - dtype = "float32" - min_val = float("-0.0374012") - max_val = float("0.0307031") - mean = float("-0.000380153") - std = float("0.00281281") - data = None - - -class Program_weight_tensor_parameter_90: - name = "parameter_90" - shape = [192] - dtype = "float32" - min_val = float("-0.2187") - max_val = float("0.0411154") - mean = float("-0.100397") - std = float("0.0464196") - data = None - - -class Program_weight_tensor_parameter_91: - name = "parameter_91" - shape = [192] - dtype = "float32" - min_val = float("0.930532") - max_val = float("1.13532") - mean = float("1.02386") - std = float("0.0379141") - data = None - - -class Program_weight_tensor_parameter_92: - name = "parameter_92" - shape = [192] - dtype = "float32" - min_val = float("0.00223834") - max_val = float("0.0246658") - mean = float("0.00757584") - std = float("0.00405972") - data = None - - -class Program_weight_tensor_parameter_93: - name = "parameter_93" - shape = [192] - dtype = "float32" - min_val = float("-0.0893976") - max_val = float("0.0617831") - mean = float("-0.0197007") - std = float("0.0216284") - data = None - - -class Program_weight_tensor_parameter_94: - name = "parameter_94" - shape = [192, 192, 3, 3] - dtype = "float32" - min_val = float("-0.03875") - max_val = float("0.0477189") - mean = float("-8.51208e-05") - std = float("0.0019455") - data = None - - -class Program_weight_tensor_parameter_95: - name = "parameter_95" - shape = [192] - dtype = "float32" - min_val = float("-0.230434") - max_val = float("0.0106795") - mean = float("-0.106056") - std = float("0.0515807") - data = None - - -class Program_weight_tensor_parameter_96: - name = "parameter_96" - shape = [192] - dtype = "float32" - min_val = float("0.866582") - max_val = float("1.19747") - mean = float("1.01744") - std = float("0.0615714") - data = None - - -class Program_weight_tensor_parameter_97: - name = "parameter_97" - shape = [192] - dtype = "float32" - min_val = float("0.00388242") - max_val = float("0.0321491") - mean = float("0.00938872") - std = float("0.00493863") - data = None - - -class Program_weight_tensor_parameter_98: - name = "parameter_98" - shape = [192] - dtype = "float32" - min_val = float("-0.0911383") - max_val = float("0.0437329") - mean = float("-0.0127117") - std = float("0.0237587") - data = None - - -class Program_weight_tensor_parameter_99: - name = "parameter_99" - shape = [192, 192, 3, 3] - dtype = "float32" - min_val = float("-0.0428683") - max_val = float("0.060501") - mean = float("-7.44884e-05") - std = float("0.00218307") - data = None - - -class Program_weight_tensor_parameter_100: - name = "parameter_100" - shape = [192] - dtype = "float32" - min_val = float("-0.331729") - max_val = float("0.0596956") - mean = float("-0.122847") - std = float("0.0596309") - data = None - - -class Program_weight_tensor_parameter_101: - name = "parameter_101" - shape = [192] - dtype = "float32" - min_val = float("0.864417") - max_val = float("1.08298") - mean = float("0.967068") - std = float("0.029395") - data = None - - -class Program_weight_tensor_parameter_102: - name = "parameter_102" - shape = [192] - dtype = "float32" - min_val = float("0.00115593") - max_val = float("0.00832892") - mean = float("0.00330322") - std = float("0.0012261") - data = None - - -class Program_weight_tensor_parameter_103: - name = "parameter_103" - shape = [192] - dtype = "float32" - min_val = float("-0.029956") - max_val = float("0.0246558") - mean = float("-0.00718649") - std = float("0.0115832") - data = None - - -class Program_weight_tensor_parameter_104: - name = "parameter_104" - shape = [192, 192, 1, 1] - dtype = "float32" - min_val = float("-0.0306946") - max_val = float("0.0748886") - mean = float("-0.000418031") - std = float("0.00305454") - data = None - - -class Program_weight_tensor_parameter_105: - name = "parameter_105" - shape = [192] - dtype = "float32" - min_val = float("-0.331729") - max_val = float("0.0596956") - mean = float("-0.122847") - std = float("0.0596309") - data = None - - -class Program_weight_tensor_parameter_106: - name = "parameter_106" - shape = [192] - dtype = "float32" - min_val = float("0.930062") - max_val = float("1.13715") - mean = float("1.02214") - std = float("0.0316853") - data = None - - -class Program_weight_tensor_parameter_107: - name = "parameter_107" - shape = [192] - dtype = "float32" - min_val = float("0.00308637") - max_val = float("0.0441344") - mean = float("0.00890809") - std = float("0.00562068") - data = None - - -class Program_weight_tensor_parameter_108: - name = "parameter_108" - shape = [192] - dtype = "float32" - min_val = float("-0.101372") - max_val = float("0.040673") - mean = float("-0.0117882") - std = float("0.0223792") - data = None - - -class Program_weight_tensor_parameter_109: - name = "parameter_109" - shape = [192, 192, 3, 3] - dtype = "float32" - min_val = float("-0.0356841") - max_val = float("0.0579415") - mean = float("-6.02996e-05") - std = float("0.00208653") - data = None - - -class Program_weight_tensor_parameter_110: - name = "parameter_110" - shape = [192] - dtype = "float32" - min_val = float("-0.348121") - max_val = float("0.134135") - mean = float("-0.132564") - std = float("0.0683482") - data = None - - -class Program_weight_tensor_parameter_111: - name = "parameter_111" - shape = [192] - dtype = "float32" - min_val = float("0.883098") - max_val = float("1.33245") - mean = float("1.01684") - std = float("0.066269") - data = None - - -class Program_weight_tensor_parameter_112: - name = "parameter_112" - shape = [192] - dtype = "float32" - min_val = float("0.00419839") - max_val = float("0.0370941") - mean = float("0.0100871") - std = float("0.00543772") - data = None - - -class Program_weight_tensor_parameter_113: - name = "parameter_113" - shape = [192] - dtype = "float32" - min_val = float("-0.10148") - max_val = float("0.0669823") - mean = float("-0.0195411") - std = float("0.0241009") - data = None - - -class Program_weight_tensor_parameter_114: - name = "parameter_114" - shape = [192, 192, 3, 3] - dtype = "float32" - min_val = float("-0.0441361") - max_val = float("0.0879773") - mean = float("-7.14624e-05") - std = float("0.00242393") - data = None - - -class Program_weight_tensor_parameter_115: - name = "parameter_115" - shape = [192] - dtype = "float32" - min_val = float("-0.248584") - max_val = float("0.0643379") - mean = float("-0.0972664") - std = float("0.0449312") - data = None - - -class Program_weight_tensor_parameter_116: - name = "parameter_116" - shape = [192] - dtype = "float32" - min_val = float("0.916261") - max_val = float("1.23422") - mean = float("1.01788") - std = float("0.0458154") - data = None - - -class Program_weight_tensor_parameter_117: - name = "parameter_117" - shape = [192] - dtype = "float32" - min_val = float("0.00242366") - max_val = float("0.0139676") - mean = float("0.00484591") - std = float("0.00171238") - data = None - - -class Program_weight_tensor_parameter_118: - name = "parameter_118" - shape = [192] - dtype = "float32" - min_val = float("-0.0716758") - max_val = float("0.0467604") - mean = float("-0.01717") - std = float("0.0186884") - data = None - - -class Program_weight_tensor_parameter_119: - name = "parameter_119" - shape = [192, 576, 1, 1] - dtype = "float32" - min_val = float("-0.0539295") - max_val = float("0.0629534") - mean = float("-0.000154038") - std = float("0.00352968") - data = None - - -class Program_weight_tensor_parameter_120: - name = "parameter_120" - shape = [192] - dtype = "float32" - min_val = float("-0.165453") - max_val = float("0.0408002") - mean = float("-0.033107") - std = float("0.0302826") - data = None - - -class Program_weight_tensor_parameter_121: - name = "parameter_121" - shape = [192] - dtype = "float32" - min_val = float("0.914357") - max_val = float("1.29586") - mean = float("1.00068") - std = float("0.0391306") - data = None - - -class Program_weight_tensor_parameter_122: - name = "parameter_122" - shape = [192] - dtype = "float32" - min_val = float("0.00125772") - max_val = float("0.0253186") - mean = float("0.0035712") - std = float("0.00260193") - data = None - - -class Program_weight_tensor_parameter_123: - name = "parameter_123" - shape = [192] - dtype = "float32" - min_val = float("-0.0483963") - max_val = float("0.0286654") - mean = float("-0.0102664") - std = float("0.0142139") - data = None - - -class Program_weight_tensor_parameter_124: - name = "parameter_124" - shape = [192, 576, 1, 1] - dtype = "float32" - min_val = float("-0.052518") - max_val = float("0.0664707") - mean = float("-9.00415e-05") - std = float("0.00298878") - data = None - - -class Program_weight_tensor_parameter_125: - name = "parameter_125" - shape = [192] - dtype = "float32" - min_val = float("-0.156099") - max_val = float("0.0110499") - mean = float("-0.0538026") - std = float("0.030917") - data = None - - -class Program_weight_tensor_parameter_126: - name = "parameter_126" - shape = [192] - dtype = "float32" - min_val = float("0.853169") - max_val = float("1.1744") - mean = float("1.00826") - std = float("0.0376149") - data = None - - -class Program_weight_tensor_parameter_127: - name = "parameter_127" - shape = [192] - dtype = "float32" - min_val = float("0.00348672") - max_val = float("0.0403159") - mean = float("0.0111201") - std = float("0.00590273") - data = None - - -class Program_weight_tensor_parameter_128: - name = "parameter_128" - shape = [192] - dtype = "float32" - min_val = float("-0.272761") - max_val = float("0.304479") - mean = float("-0.0311626") - std = float("0.0877005") - data = None - - -class Program_weight_tensor_parameter_129: - name = "parameter_129" - shape = [192, 192, 3, 3] - dtype = "float32" - min_val = float("-0.0436434") - max_val = float("0.0390175") - mean = float("-2.75791e-05") - std = float("0.00207461") - data = None - - -class Program_weight_tensor_parameter_130: - name = "parameter_130" - shape = [192] - dtype = "float32" - min_val = float("-0.731877") - max_val = float("1.79262") - mean = float("0.373936") - std = float("0.434119") - data = None - - -class Program_weight_tensor_parameter_131: - name = "parameter_131" - shape = [192] - dtype = "float32" - min_val = float("0.625408") - max_val = float("1.70704") - mean = float("1.16439") - std = float("0.199739") - data = None - - -class Program_weight_tensor_parameter_132: - name = "parameter_132" - shape = [192] - dtype = "float32" - min_val = float("0.00270591") - max_val = float("0.0641962") - mean = float("0.0133792") - std = float("0.0107877") - data = None - - -class Program_weight_tensor_parameter_133: - name = "parameter_133" - shape = [192] - dtype = "float32" - min_val = float("-0.201918") - max_val = float("0.126312") - mean = float("-0.02014") - std = float("0.0395955") - data = None - - -class Program_weight_tensor_parameter_134: - name = "parameter_134" - shape = [192, 192, 1, 1] - dtype = "float32" - min_val = float("-0.124441") - max_val = float("0.101297") - mean = float("-0.000428392") - std = float("0.00839381") - data = None - - -class Program_weight_tensor_parameter_135: - name = "parameter_135" - shape = [96] - dtype = "float32" - min_val = float("-0.646764") - max_val = float("0.287418") - mean = float("-0.036847") - std = float("0.203964") - data = None - - -class Program_weight_tensor_parameter_136: - name = "parameter_136" - shape = [96] - dtype = "float32" - min_val = float("0.743727") - max_val = float("1.31816") - mean = float("0.928051") - std = float("0.0891314") - data = None - - -class Program_weight_tensor_parameter_137: - name = "parameter_137" - shape = [96] - dtype = "float32" - min_val = float("0.00126457") - max_val = float("0.0283809") - mean = float("0.00872036") - std = float("0.0074912") - data = None - - -class Program_weight_tensor_parameter_138: - name = "parameter_138" - shape = [96] - dtype = "float32" - min_val = float("-0.0533211") - max_val = float("0.054261") - mean = float("-0.00777041") - std = float("0.0241502") - data = None - - -class Program_weight_tensor_parameter_139: - name = "parameter_139" - shape = [96, 96, 1, 1] - dtype = "float32" - min_val = float("-0.0662853") - max_val = float("0.0498423") - mean = float("-0.000819471") - std = float("0.00712985") - data = None - - -class Program_weight_tensor_parameter_140: - name = "parameter_140" - shape = [96] - dtype = "float32" - min_val = float("-0.646764") - max_val = float("0.287418") - mean = float("-0.036847") - std = float("0.203964") - data = None - - -class Program_weight_tensor_parameter_141: - name = "parameter_141" - shape = [96] - dtype = "float32" - min_val = float("0.478759") - max_val = float("1.3967") - mean = float("1.04303") - std = float("0.135316") - data = None - - -class Program_weight_tensor_parameter_142: - name = "parameter_142" - shape = [96] - dtype = "float32" - min_val = float("0.00456047") - max_val = float("0.0957772") - mean = float("0.0187912") - std = float("0.0159306") - data = None - - -class Program_weight_tensor_parameter_143: - name = "parameter_143" - shape = [96] - dtype = "float32" - min_val = float("-0.189351") - max_val = float("0.148294") - mean = float("-0.00279725") - std = float("0.0509242") - data = None - - -class Program_weight_tensor_parameter_144: - name = "parameter_144" - shape = [96, 96, 3, 3] - dtype = "float32" - min_val = float("-0.0753107") - max_val = float("0.0647631") - mean = float("-7.46307e-05") - std = float("0.00493989") - data = None - - -class Program_weight_tensor_parameter_145: - name = "parameter_145" - shape = [96] - dtype = "float32" - min_val = float("-0.807012") - max_val = float("0.639205") - mean = float("-0.135788") - std = float("0.222961") - data = None - - -class Program_weight_tensor_parameter_146: - name = "parameter_146" - shape = [96] - dtype = "float32" - min_val = float("0.500992") - max_val = float("1.52738") - mean = float("0.986272") - std = float("0.139979") - data = None - - -class Program_weight_tensor_parameter_147: - name = "parameter_147" - shape = [96] - dtype = "float32" - min_val = float("0.00448636") - max_val = float("0.078761") - mean = float("0.0176369") - std = float("0.0131972") - data = None - - -class Program_weight_tensor_parameter_148: - name = "parameter_148" - shape = [96] - dtype = "float32" - min_val = float("-0.232282") - max_val = float("0.112253") - mean = float("0.00311495") - std = float("0.0583791") - data = None - - -class Program_weight_tensor_parameter_149: - name = "parameter_149" - shape = [96, 96, 3, 3] - dtype = "float32" - min_val = float("-0.0632043") - max_val = float("0.0542453") - mean = float("-0.000286044") - std = float("0.00552684") - data = None - - -class Program_weight_tensor_parameter_150: - name = "parameter_150" - shape = [96] - dtype = "float32" - min_val = float("-0.375565") - max_val = float("0.21532") - mean = float("-0.182874") - std = float("0.131104") - data = None - - -class Program_weight_tensor_parameter_151: - name = "parameter_151" - shape = [96] - dtype = "float32" - min_val = float("0.660313") - max_val = float("1.1612") - mean = float("0.867514") - std = float("0.0695054") - data = None - - -class Program_weight_tensor_parameter_152: - name = "parameter_152" - shape = [96] - dtype = "float32" - min_val = float("0.00209929") - max_val = float("0.0217899") - mean = float("0.00748623") - std = float("0.00298326") - data = None - - -class Program_weight_tensor_parameter_153: - name = "parameter_153" - shape = [96] - dtype = "float32" - min_val = float("-0.0465666") - max_val = float("0.039584") - mean = float("-0.0139749") - std = float("0.0171722") - data = None - - -class Program_weight_tensor_parameter_154: - name = "parameter_154" - shape = [96, 96, 1, 1] - dtype = "float32" - min_val = float("-0.063312") - max_val = float("0.0527997") - mean = float("-0.00133358") - std = float("0.00750932") - data = None - - -class Program_weight_tensor_parameter_155: - name = "parameter_155" - shape = [96] - dtype = "float32" - min_val = float("-0.375565") - max_val = float("0.21532") - mean = float("-0.182874") - std = float("0.131104") - data = None - - -class Program_weight_tensor_parameter_156: - name = "parameter_156" - shape = [96] - dtype = "float32" - min_val = float("0.801681") - max_val = float("1.29162") - mean = float("1.01024") - std = float("0.0817165") - data = None - - -class Program_weight_tensor_parameter_157: - name = "parameter_157" - shape = [96] - dtype = "float32" - min_val = float("0.00485304") - max_val = float("0.0746222") - mean = float("0.0201209") - std = float("0.0145062") - data = None - - -class Program_weight_tensor_parameter_158: - name = "parameter_158" - shape = [96] - dtype = "float32" - min_val = float("-0.102996") - max_val = float("0.0561913") - mean = float("-0.0165577") - std = float("0.0346552") - data = None - - -class Program_weight_tensor_parameter_159: - name = "parameter_159" - shape = [96, 96, 3, 3] - dtype = "float32" - min_val = float("-0.0719449") - max_val = float("0.0662486") - mean = float("-0.000336108") - std = float("0.0053214") - data = None - - -class Program_weight_tensor_parameter_160: - name = "parameter_160" - shape = [96] - dtype = "float32" - min_val = float("-0.500131") - max_val = float("0.31631") - mean = float("-0.192663") - std = float("0.162844") - data = None - - -class Program_weight_tensor_parameter_161: - name = "parameter_161" - shape = [96] - dtype = "float32" - min_val = float("0.725877") - max_val = float("1.31136") - mean = float("0.944603") - std = float("0.105848") - data = None - - -class Program_weight_tensor_parameter_162: - name = "parameter_162" - shape = [96] - dtype = "float32" - min_val = float("0.00343356") - max_val = float("0.0319398") - mean = float("0.00955323") - std = float("0.00603822") - data = None - - -class Program_weight_tensor_parameter_163: - name = "parameter_163" - shape = [96] - dtype = "float32" - min_val = float("-0.114596") - max_val = float("0.088809") - mean = float("0.0165881") - std = float("0.0398273") - data = None - - -class Program_weight_tensor_parameter_164: - name = "parameter_164" - shape = [96, 96, 3, 3] - dtype = "float32" - min_val = float("-0.0822766") - max_val = float("0.0772172") - mean = float("-0.000347346") - std = float("0.00616296") - data = None - - -class Program_weight_tensor_parameter_165: - name = "parameter_165" - shape = [96] - dtype = "float32" - min_val = float("-0.60397") - max_val = float("0.0882115") - mean = float("-0.234695") - std = float("0.141888") - data = None - - -class Program_weight_tensor_parameter_166: - name = "parameter_166" - shape = [96] - dtype = "float32" - min_val = float("0.705014") - max_val = float("1.02807") - mean = float("0.909017") - std = float("0.0648434") - data = None - - -class Program_weight_tensor_parameter_167: - name = "parameter_167" - shape = [96] - dtype = "float32" - min_val = float("0.00465651") - max_val = float("0.0323499") - mean = float("0.0105922") - std = float("0.00456131") - data = None - - -class Program_weight_tensor_parameter_168: - name = "parameter_168" - shape = [96] - dtype = "float32" - min_val = float("-0.0572868") - max_val = float("0.0528458") - mean = float("-0.0101465") - std = float("0.0295067") - data = None - - -class Program_weight_tensor_parameter_169: - name = "parameter_169" - shape = [96, 96, 1, 1] - dtype = "float32" - min_val = float("-0.0723145") - max_val = float("0.0736894") - mean = float("-0.0014416") - std = float("0.00966079") - data = None - - -class Program_weight_tensor_parameter_170: - name = "parameter_170" - shape = [96] - dtype = "float32" - min_val = float("-0.60397") - max_val = float("0.0882115") - mean = float("-0.234695") - std = float("0.141888") - data = None - - -class Program_weight_tensor_parameter_171: - name = "parameter_171" - shape = [96] - dtype = "float32" - min_val = float("0.618521") - max_val = float("1.21767") - mean = float("0.958308") - std = float("0.106787") - data = None - - -class Program_weight_tensor_parameter_172: - name = "parameter_172" - shape = [96] - dtype = "float32" - min_val = float("0.00884074") - max_val = float("0.0914745") - mean = float("0.0314621") - std = float("0.0183233") - data = None - - -class Program_weight_tensor_parameter_173: - name = "parameter_173" - shape = [96] - dtype = "float32" - min_val = float("-0.160652") - max_val = float("0.12782") - mean = float("-0.004871") - std = float("0.0578784") - data = None - - -class Program_weight_tensor_parameter_174: - name = "parameter_174" - shape = [96, 96, 3, 3] - dtype = "float32" - min_val = float("-0.0638916") - max_val = float("0.0818362") - mean = float("-0.000246873") - std = float("0.00600333") - data = None - - -class Program_weight_tensor_parameter_175: - name = "parameter_175" - shape = [96] - dtype = "float32" - min_val = float("-0.81651") - max_val = float("0.693971") - mean = float("-0.221258") - std = float("0.247267") - data = None - - -class Program_weight_tensor_parameter_176: - name = "parameter_176" - shape = [96] - dtype = "float32" - min_val = float("0.652024") - max_val = float("1.50005") - mean = float("0.904973") - std = float("0.115969") - data = None - - -class Program_weight_tensor_parameter_177: - name = "parameter_177" - shape = [96] - dtype = "float32" - min_val = float("0.00583431") - max_val = float("0.0907615") - mean = float("0.0165982") - std = float("0.0160351") - data = None - - -class Program_weight_tensor_parameter_178: - name = "parameter_178" - shape = [96] - dtype = "float32" - min_val = float("-0.201578") - max_val = float("0.211771") - mean = float("-0.00815532") - std = float("0.0732757") - data = None - - -class Program_weight_tensor_parameter_179: - name = "parameter_179" - shape = [96, 96, 3, 3] - dtype = "float32" - min_val = float("-0.0998433") - max_val = float("0.159214") - mean = float("-0.000138757") - std = float("0.00702056") - data = None - - -class Program_weight_tensor_parameter_180: - name = "parameter_180" - shape = [96] - dtype = "float32" - min_val = float("-0.737142") - max_val = float("1.03848") - mean = float("-0.0970708") - std = float("0.357165") - data = None - - -class Program_weight_tensor_parameter_181: - name = "parameter_181" - shape = [96] - dtype = "float32" - min_val = float("0.485726") - max_val = float("1.17258") - mean = float("0.785471") - std = float("0.141168") - data = None - - -class Program_weight_tensor_parameter_182: - name = "parameter_182" - shape = [96] - dtype = "float32" - min_val = float("0.00479017") - max_val = float("0.0498613") - mean = float("0.0140149") - std = float("0.00706644") - data = None - - -class Program_weight_tensor_parameter_183: - name = "parameter_183" - shape = [96] - dtype = "float32" - min_val = float("-0.140604") - max_val = float("0.0620771") - mean = float("-0.00136434") - std = float("0.0304237") - data = None - - -class Program_weight_tensor_parameter_184: - name = "parameter_184" - shape = [96, 448, 1, 1] - dtype = "float32" - min_val = float("-0.138366") - max_val = float("0.118931") - mean = float("-0.000333914") - std = float("0.00899348") - data = None - - -class Program_weight_tensor_parameter_185: - name = "parameter_185" - shape = [96] - dtype = "float32" - min_val = float("-0.100614") - max_val = float("0.279575") - mean = float("0.0624506") - std = float("0.0700954") - data = None - - -class Program_weight_tensor_parameter_186: - name = "parameter_186" - shape = [96] - dtype = "float32" - min_val = float("0.726756") - max_val = float("1.15922") - mean = float("0.8968") - std = float("0.0750659") - data = None - - -class Program_weight_tensor_parameter_187: - name = "parameter_187" - shape = [96] - dtype = "float32" - min_val = float("0.00172884") - max_val = float("0.0284937") - mean = float("0.0037772") - std = float("0.00304651") - data = None - - -class Program_weight_tensor_parameter_188: - name = "parameter_188" - shape = [96] - dtype = "float32" - min_val = float("-0.0587997") - max_val = float("0.0664821") - mean = float("-0.0051813") - std = float("0.0161551") - data = None - - -class Program_weight_tensor_parameter_189: - name = "parameter_189" - shape = [96, 448, 1, 1] - dtype = "float32" - min_val = float("-0.0960914") - max_val = float("0.114711") - mean = float("-0.000131248") - std = float("0.00503197") - data = None - - -class Program_weight_tensor_parameter_190: - name = "parameter_190" - shape = [192] - dtype = "float32" - min_val = float("-0.419878") - max_val = float("0.305803") - mean = float("-0.0843783") - std = float("0.0933839") - data = None - - -class Program_weight_tensor_parameter_191: - name = "parameter_191" - shape = [192] - dtype = "float32" - min_val = float("0.660092") - max_val = float("1.60427") - mean = float("0.830813") - std = float("0.0949805") - data = None - - -class Program_weight_tensor_parameter_192: - name = "parameter_192" - shape = [192] - dtype = "float32" - min_val = float("0.00393118") - max_val = float("0.106249") - mean = float("0.0110337") - std = float("0.00808965") - data = None - - -class Program_weight_tensor_parameter_193: - name = "parameter_193" - shape = [192] - dtype = "float32" - min_val = float("-0.0959304") - max_val = float("0.0379394") - mean = float("-0.0249515") - std = float("0.0229359") - data = None - - -class Program_weight_tensor_parameter_194: - name = "parameter_194" - shape = [192, 384, 1, 1] - dtype = "float32" - min_val = float("-0.0812206") - max_val = float("0.0824483") - mean = float("-0.000421448") - std = float("0.00583855") - data = None - - -class Program_weight_tensor_parameter_195: - name = "parameter_195" - shape = [384] - dtype = "float32" - min_val = float("-0.373089") - max_val = float("0.1651") - mean = float("-0.0928755") - std = float("0.059282") - data = None - - -class Program_weight_tensor_parameter_196: - name = "parameter_196" - shape = [384] - dtype = "float32" - min_val = float("0.877299") - max_val = float("1.57429") - mean = float("1.01597") - std = float("0.0852694") - data = None - - -class Program_weight_tensor_parameter_197: - name = "parameter_197" - shape = [384] - dtype = "float32" - min_val = float("0.00323605") - max_val = float("0.0532371") - mean = float("0.00771792") - std = float("0.00446179") - data = None - - -class Program_weight_tensor_parameter_198: - name = "parameter_198" - shape = [384] - dtype = "float32" - min_val = float("-0.165965") - max_val = float("0.109289") - mean = float("-0.031791") - std = float("0.0291846") - data = None - - -class Program_weight_tensor_parameter_199: - name = "parameter_199" - shape = [384, 384, 1, 1] - dtype = "float32" - min_val = float("-0.130749") - max_val = float("0.0789119") - mean = float("-0.000445351") - std = float("0.00532961") - data = None - - -class Program_weight_tensor_parameter_200: - name = "parameter_200" - shape = [192] - dtype = "float32" - min_val = float("-0.256683") - max_val = float("0.0671899") - mean = float("-0.0839381") - std = float("0.0440531") - data = None - - -class Program_weight_tensor_parameter_201: - name = "parameter_201" - shape = [192] - dtype = "float32" - min_val = float("0.819788") - max_val = float("0.987271") - mean = float("0.928926") - std = float("0.0272814") - data = None - - -class Program_weight_tensor_parameter_202: - name = "parameter_202" - shape = [192] - dtype = "float32" - min_val = float("0.00236419") - max_val = float("0.0177991") - mean = float("0.00505779") - std = float("0.00197103") - data = None - - -class Program_weight_tensor_parameter_203: - name = "parameter_203" - shape = [192] - dtype = "float32" - min_val = float("-0.0602631") - max_val = float("0.0372194") - mean = float("-0.0145273") - std = float("0.0175294") - data = None - - -class Program_weight_tensor_parameter_204: - name = "parameter_204" - shape = [192, 192, 1, 1] - dtype = "float32" - min_val = float("-0.0359811") - max_val = float("0.0341578") - mean = float("-0.000499181") - std = float("0.00410174") - data = None - - -class Program_weight_tensor_parameter_205: - name = "parameter_205" - shape = [192] - dtype = "float32" - min_val = float("-0.256683") - max_val = float("0.0671899") - mean = float("-0.0839381") - std = float("0.0440531") - data = None - - -class Program_weight_tensor_parameter_206: - name = "parameter_206" - shape = [192] - dtype = "float32" - min_val = float("0.900374") - max_val = float("1.08407") - mean = float("0.991603") - std = float("0.0256836") - data = None - - -class Program_weight_tensor_parameter_207: - name = "parameter_207" - shape = [192] - dtype = "float32" - min_val = float("0.00869434") - max_val = float("0.103153") - mean = float("0.0195607") - std = float("0.00998195") - data = None - - -class Program_weight_tensor_parameter_208: - name = "parameter_208" - shape = [192] - dtype = "float32" - min_val = float("-0.148147") - max_val = float("0.083841") - mean = float("-0.0215099") - std = float("0.0357341") - data = None - - -class Program_weight_tensor_parameter_209: - name = "parameter_209" - shape = [192, 192, 3, 3] - dtype = "float32" - min_val = float("-0.0560753") - max_val = float("0.0798715") - mean = float("-6.90239e-05") - std = float("0.00228177") - data = None - - -class Program_weight_tensor_parameter_210: - name = "parameter_210" - shape = [192] - dtype = "float32" - min_val = float("-0.28296") - max_val = float("0.00624382") - mean = float("-0.108697") - std = float("0.054323") - data = None - - -class Program_weight_tensor_parameter_211: - name = "parameter_211" - shape = [192] - dtype = "float32" - min_val = float("0.935362") - max_val = float("1.19786") - mean = float("1.03662") - std = float("0.044863") - data = None - - -class Program_weight_tensor_parameter_212: - name = "parameter_212" - shape = [192] - dtype = "float32" - min_val = float("0.0153049") - max_val = float("0.150077") - mean = float("0.0321082") - std = float("0.0130511") - data = None - - -class Program_weight_tensor_parameter_213: - name = "parameter_213" - shape = [192] - dtype = "float32" - min_val = float("-0.205475") - max_val = float("0.151818") - mean = float("-0.048646") - std = float("0.0468518") - data = None - - -class Program_weight_tensor_parameter_214: - name = "parameter_214" - shape = [192, 192, 3, 3] - dtype = "float32" - min_val = float("-0.0723395") - max_val = float("0.0757746") - mean = float("-0.000117075") - std = float("0.00276706") - data = None - - -class Program_weight_tensor_parameter_215: - name = "parameter_215" - shape = [192] - dtype = "float32" - min_val = float("-0.253549") - max_val = float("-0.0252667") - mean = float("-0.111396") - std = float("0.0515085") - data = None - - -class Program_weight_tensor_parameter_216: - name = "parameter_216" - shape = [192] - dtype = "float32" - min_val = float("0.915269") - max_val = float("1.08382") - mean = float("0.975487") - std = float("0.0197657") - data = None - - -class Program_weight_tensor_parameter_217: - name = "parameter_217" - shape = [192] - dtype = "float32" - min_val = float("0.00131146") - max_val = float("0.00746504") - mean = float("0.00271604") - std = float("0.00086359") - data = None - - -class Program_weight_tensor_parameter_218: - name = "parameter_218" - shape = [192] - dtype = "float32" - min_val = float("-0.0571962") - max_val = float("0.0503431") - mean = float("-0.0146694") - std = float("0.014805") - data = None - - -class Program_weight_tensor_parameter_219: - name = "parameter_219" - shape = [192, 192, 1, 1] - dtype = "float32" - min_val = float("-0.0237103") - max_val = float("0.0309154") - mean = float("-0.000535383") - std = float("0.00421215") - data = None - - -class Program_weight_tensor_parameter_220: - name = "parameter_220" - shape = [192] - dtype = "float32" - min_val = float("-0.253549") - max_val = float("-0.0252667") - mean = float("-0.111396") - std = float("0.0515085") - data = None - - -class Program_weight_tensor_parameter_221: - name = "parameter_221" - shape = [192] - dtype = "float32" - min_val = float("0.93953") - max_val = float("1.1319") - mean = float("1.00446") - std = float("0.0346886") - data = None - - -class Program_weight_tensor_parameter_222: - name = "parameter_222" - shape = [192] - dtype = "float32" - min_val = float("0.00555641") - max_val = float("0.0273444") - mean = float("0.0100098") - std = float("0.00301259") - data = None - - -class Program_weight_tensor_parameter_223: - name = "parameter_223" - shape = [192] - dtype = "float32" - min_val = float("-0.142862") - max_val = float("0.0842238") - mean = float("-0.0289718") - std = float("0.0284312") - data = None - - -class Program_weight_tensor_parameter_224: - name = "parameter_224" - shape = [192, 192, 3, 3] - dtype = "float32" - min_val = float("-0.0463499") - max_val = float("0.0648047") - mean = float("-0.000117258") - std = float("0.00236655") - data = None - - -class Program_weight_tensor_parameter_225: - name = "parameter_225" - shape = [192] - dtype = "float32" - min_val = float("-0.397852") - max_val = float("-0.024517") - mean = float("-0.135045") - std = float("0.0579008") - data = None - - -class Program_weight_tensor_parameter_226: - name = "parameter_226" - shape = [192] - dtype = "float32" - min_val = float("0.935578") - max_val = float("1.28416") - mean = float("1.02712") - std = float("0.0581486") - data = None - - -class Program_weight_tensor_parameter_227: - name = "parameter_227" - shape = [192] - dtype = "float32" - min_val = float("0.0152128") - max_val = float("0.0761776") - mean = float("0.0305415") - std = float("0.00974539") - data = None - - -class Program_weight_tensor_parameter_228: - name = "parameter_228" - shape = [192] - dtype = "float32" - min_val = float("-0.258209") - max_val = float("0.302634") - mean = float("-0.0505409") - std = float("0.057312") - data = None - - -class Program_weight_tensor_parameter_229: - name = "parameter_229" - shape = [192, 192, 3, 3] - dtype = "float32" - min_val = float("-0.0333938") - max_val = float("0.0459033") - mean = float("-0.000119163") - std = float("0.00292916") - data = None - - -class Program_weight_tensor_parameter_230: - name = "parameter_230" - shape = [192] - dtype = "float32" - min_val = float("-0.291413") - max_val = float("-0.0234132") - mean = float("-0.113952") - std = float("0.046762") - data = None - - -class Program_weight_tensor_parameter_231: - name = "parameter_231" - shape = [192] - dtype = "float32" - min_val = float("0.906693") - max_val = float("1.13942") - mean = float("0.996984") - std = float("0.036522") - data = None - - -class Program_weight_tensor_parameter_232: - name = "parameter_232" - shape = [192] - dtype = "float32" - min_val = float("0.0014244") - max_val = float("0.00546528") - mean = float("0.00250251") - std = float("0.000770997") - data = None - - -class Program_weight_tensor_parameter_233: - name = "parameter_233" - shape = [192] - dtype = "float32" - min_val = float("-0.0389427") - max_val = float("0.0867383") - mean = float("-0.00930065") - std = float("0.0135574") - data = None - - -class Program_weight_tensor_parameter_234: - name = "parameter_234" - shape = [192, 192, 1, 1] - dtype = "float32" - min_val = float("-0.0346133") - max_val = float("0.0524164") - mean = float("-0.000326125") - std = float("0.00484124") - data = None - - -class Program_weight_tensor_parameter_235: - name = "parameter_235" - shape = [192] - dtype = "float32" - min_val = float("-0.291413") - max_val = float("-0.0234132") - mean = float("-0.113952") - std = float("0.046762") - data = None - - -class Program_weight_tensor_parameter_236: - name = "parameter_236" - shape = [192] - dtype = "float32" - min_val = float("0.909728") - max_val = float("1.14814") - mean = float("0.986983") - std = float("0.0373625") - data = None - - -class Program_weight_tensor_parameter_237: - name = "parameter_237" - shape = [192] - dtype = "float32" - min_val = float("0.00562666") - max_val = float("0.0219922") - mean = float("0.0109758") - std = float("0.00338237") - data = None - - -class Program_weight_tensor_parameter_238: - name = "parameter_238" - shape = [192] - dtype = "float32" - min_val = float("-0.21375") - max_val = float("0.0270211") - mean = float("-0.0354808") - std = float("0.0287563") - data = None - - -class Program_weight_tensor_parameter_239: - name = "parameter_239" - shape = [192, 192, 3, 3] - dtype = "float32" - min_val = float("-0.024049") - max_val = float("0.0316937") - mean = float("-0.00014643") - std = float("0.00232213") - data = None - - -class Program_weight_tensor_parameter_240: - name = "parameter_240" - shape = [192] - dtype = "float32" - min_val = float("-0.370567") - max_val = float("-0.0105869") - mean = float("-0.161477") - std = float("0.0602506") - data = None - - -class Program_weight_tensor_parameter_241: - name = "parameter_241" - shape = [192] - dtype = "float32" - min_val = float("0.905056") - max_val = float("1.22126") - mean = float("1.03042") - std = float("0.0494268") - data = None - - -class Program_weight_tensor_parameter_242: - name = "parameter_242" - shape = [192] - dtype = "float32" - min_val = float("0.00753308") - max_val = float("0.0302844") - mean = float("0.0141411") - std = float("0.00443453") - data = None - - -class Program_weight_tensor_parameter_243: - name = "parameter_243" - shape = [192] - dtype = "float32" - min_val = float("-0.100325") - max_val = float("0.0739225") - mean = float("-0.0300805") - std = float("0.0307536") - data = None - - -class Program_weight_tensor_parameter_244: - name = "parameter_244" - shape = [192, 192, 3, 3] - dtype = "float32" - min_val = float("-0.0896984") - max_val = float("0.04215") - mean = float("-0.000139982") - std = float("0.00324088") - data = None - - -class Program_weight_tensor_parameter_245: - name = "parameter_245" - shape = [192] - dtype = "float32" - min_val = float("-0.397771") - max_val = float("0.0863893") - mean = float("-0.165521") - std = float("0.0737187") - data = None - - -class Program_weight_tensor_parameter_246: - name = "parameter_246" - shape = [192] - dtype = "float32" - min_val = float("0.878799") - max_val = float("1.17577") - mean = float("1.01746") - std = float("0.0565899") - data = None - - -class Program_weight_tensor_parameter_247: - name = "parameter_247" - shape = [192] - dtype = "float32" - min_val = float("0.00278118") - max_val = float("0.0114293") - mean = float("0.00483493") - std = float("0.00146785") - data = None - - -class Program_weight_tensor_parameter_248: - name = "parameter_248" - shape = [192] - dtype = "float32" - min_val = float("-0.0560004") - max_val = float("0.0581845") - mean = float("0.0102661") - std = float("0.0202822") - data = None - - -class Program_weight_tensor_parameter_249: - name = "parameter_249" - shape = [192, 896, 1, 1] - dtype = "float32" - min_val = float("-0.101477") - max_val = float("0.142492") - mean = float("-0.000156828") - std = float("0.00446408") - data = None - - -class Program_weight_tensor_parameter_250: - name = "parameter_250" - shape = [192] - dtype = "float32" - min_val = float("-0.151112") - max_val = float("0.501803") - mean = float("-0.00466305") - std = float("0.073986") - data = None - - -class Program_weight_tensor_parameter_251: - name = "parameter_251" - shape = [192] - dtype = "float32" - min_val = float("0.932258") - max_val = float("1.23138") - mean = float("1.04643") - std = float("0.0627369") - data = None - - -class Program_weight_tensor_parameter_252: - name = "parameter_252" - shape = [192] - dtype = "float32" - min_val = float("0.00250556") - max_val = float("0.0461735") - mean = float("0.00561131") - std = float("0.00346797") - data = None - - -class Program_weight_tensor_parameter_253: - name = "parameter_253" - shape = [192] - dtype = "float32" - min_val = float("-0.0548003") - max_val = float("0.0495759") - mean = float("-0.000104753") - std = float("0.0205629") - data = None - - -class Program_weight_tensor_parameter_254: - name = "parameter_254" - shape = [192, 896, 1, 1] - dtype = "float32" - min_val = float("-0.134457") - max_val = float("0.0882326") - mean = float("-0.000122861") - std = float("0.00449465") - data = None - - -class Program_weight_tensor_parameter_255: - name = "parameter_255" - shape = [384] - dtype = "float32" - min_val = float("-0.31215") - max_val = float("-0.0444473") - mean = float("-0.171263") - std = float("0.0441121") - data = None - - -class Program_weight_tensor_parameter_256: - name = "parameter_256" - shape = [384] - dtype = "float32" - min_val = float("0.786726") - max_val = float("1.17526") - mean = float("0.885032") - std = float("0.0343599") - data = None - - -class Program_weight_tensor_parameter_257: - name = "parameter_257" - shape = [384] - dtype = "float32" - min_val = float("0.00467646") - max_val = float("0.0521003") - mean = float("0.0100483") - std = float("0.00415136") - data = None - - -class Program_weight_tensor_parameter_258: - name = "parameter_258" - shape = [384] - dtype = "float32" - min_val = float("-0.0852832") - max_val = float("0.0645434") - mean = float("-0.0324354") - std = float("0.0213669") - data = None - - -class Program_weight_tensor_parameter_259: - name = "parameter_259" - shape = [384, 768, 1, 1] - dtype = "float32" - min_val = float("-0.0336805") - max_val = float("0.0404763") - mean = float("-0.000294351") - std = float("0.00351804") - data = None - - -class Program_weight_tensor_parameter_260: - name = "parameter_260" - shape = [768] - dtype = "float32" - min_val = float("-0.164743") - max_val = float("0.115333") - mean = float("-0.0875639") - std = float("0.0238268") - data = None - - -class Program_weight_tensor_parameter_261: - name = "parameter_261" - shape = [768] - dtype = "float32" - min_val = float("0.935282") - max_val = float("1.27924") - mean = float("1.02951") - std = float("0.0299064") - data = None - - -class Program_weight_tensor_parameter_262: - name = "parameter_262" - shape = [768] - dtype = "float32" - min_val = float("0.00328574") - max_val = float("0.0307873") - mean = float("0.00666914") - std = float("0.00225792") - data = None - - -class Program_weight_tensor_parameter_263: - name = "parameter_263" - shape = [768] - dtype = "float32" - min_val = float("-0.13377") - max_val = float("0.10924") - mean = float("-0.0334799") - std = float("0.0256795") - data = None - - -class Program_weight_tensor_parameter_264: - name = "parameter_264" - shape = [768, 768, 1, 1] - dtype = "float32" - min_val = float("-0.0417249") - max_val = float("0.0674709") - mean = float("-0.000227656") - std = float("0.00301788") - data = None - - -class Program_weight_tensor_parameter_265: - name = "parameter_265" - shape = [384] - dtype = "float32" - min_val = float("-0.175755") - max_val = float("0.114994") - mean = float("-0.059758") - std = float("0.0316004") - data = None - - -class Program_weight_tensor_parameter_266: - name = "parameter_266" - shape = [384] - dtype = "float32" - min_val = float("0.875498") - max_val = float("1.05486") - mean = float("0.97544") - std = float("0.0178167") - data = None - - -class Program_weight_tensor_parameter_267: - name = "parameter_267" - shape = [384] - dtype = "float32" - min_val = float("0.00187857") - max_val = float("0.0214535") - mean = float("0.0049821") - std = float("0.00205769") - data = None - - -class Program_weight_tensor_parameter_268: - name = "parameter_268" - shape = [384] - dtype = "float32" - min_val = float("-0.048337") - max_val = float("0.0454388") - mean = float("-0.00746495") - std = float("0.0193299") - data = None - - -class Program_weight_tensor_parameter_269: - name = "parameter_269" - shape = [384, 384, 1, 1] - dtype = "float32" - min_val = float("-0.0520415") - max_val = float("0.0516332") - mean = float("-0.000102198") - std = float("0.00275407") - data = None - - -class Program_weight_tensor_parameter_270: - name = "parameter_270" - shape = [384] - dtype = "float32" - min_val = float("-0.175755") - max_val = float("0.114994") - mean = float("-0.059758") - std = float("0.0316004") - data = None - - -class Program_weight_tensor_parameter_271: - name = "parameter_271" - shape = [384] - dtype = "float32" - min_val = float("0.939765") - max_val = float("1.08488") - mean = float("0.993679") - std = float("0.0187977") - data = None - - -class Program_weight_tensor_parameter_272: - name = "parameter_272" - shape = [384] - dtype = "float32" - min_val = float("0.0136676") - max_val = float("0.24702") - mean = float("0.0361577") - std = float("0.0178113") - data = None - - -class Program_weight_tensor_parameter_273: - name = "parameter_273" - shape = [384] - dtype = "float32" - min_val = float("-0.193526") - max_val = float("0.128898") - mean = float("-0.0635078") - std = float("0.0592118") - data = None - - -class Program_weight_tensor_parameter_274: - name = "parameter_274" - shape = [384, 384, 3, 3] - dtype = "float32" - min_val = float("-0.0262406") - max_val = float("0.0342993") - mean = float("-0.000104765") - std = float("0.00102972") - data = None - - -class Program_weight_tensor_parameter_275: - name = "parameter_275" - shape = [384] - dtype = "float32" - min_val = float("-0.151307") - max_val = float("0.102932") - mean = float("-0.036051") - std = float("0.0243773") - data = None - - -class Program_weight_tensor_parameter_276: - name = "parameter_276" - shape = [384] - dtype = "float32" - min_val = float("0.948438") - max_val = float("1.23653") - mean = float("1.02223") - std = float("0.0374692") - data = None - - -class Program_weight_tensor_parameter_277: - name = "parameter_277" - shape = [384] - dtype = "float32" - min_val = float("0.00885489") - max_val = float("0.121229") - mean = float("0.0291848") - std = float("0.0116348") - data = None - - -class Program_weight_tensor_parameter_278: - name = "parameter_278" - shape = [384] - dtype = "float32" - min_val = float("-0.180611") - max_val = float("0.112007") - mean = float("-0.0377734") - std = float("0.047009") - data = None - - -class Program_weight_tensor_parameter_279: - name = "parameter_279" - shape = [384, 384, 3, 3] - dtype = "float32" - min_val = float("-0.0233099") - max_val = float("0.0327733") - mean = float("-6.46831e-05") - std = float("0.00135178") - data = None - - -class Program_weight_tensor_parameter_280: - name = "parameter_280" - shape = [384] - dtype = "float32" - min_val = float("-0.12739") - max_val = float("0.0293372") - mean = float("-0.0396596") - std = float("0.0204788") - data = None - - -class Program_weight_tensor_parameter_281: - name = "parameter_281" - shape = [384] - dtype = "float32" - min_val = float("0.935426") - max_val = float("1.22654") - mean = float("1.02217") - std = float("0.0409336") - data = None - - -class Program_weight_tensor_parameter_282: - name = "parameter_282" - shape = [384] - dtype = "float32" - min_val = float("0.026841") - max_val = float("0.276428") - mean = float("0.0890046") - std = float("0.0287197") - data = None - - -class Program_weight_tensor_parameter_283: - name = "parameter_283" - shape = [384] - dtype = "float32" - min_val = float("-0.988092") - max_val = float("1.24067") - mean = float("-0.0106803") - std = float("0.356169") - data = None - - -class Program_weight_tensor_parameter_284: - name = "parameter_284" - shape = [384, 1536, 1, 1] - dtype = "float32" - min_val = float("-0.0437694") - max_val = float("0.0478719") - mean = float("3.92494e-05") - std = float("0.00226745") - data = None - - -class Program_weight_tensor_parameter_285: - name = "parameter_285" - shape = [384] - dtype = "float32" - min_val = float("-0.0351363") - max_val = float("0.0448389") - mean = float("-0.00202565") - std = float("0.0112301") - data = None - - -class Program_weight_tensor_parameter_286: - name = "parameter_286" - shape = [384] - dtype = "float32" - min_val = float("0.955166") - max_val = float("1.07949") - mean = float("0.99032") - std = float("0.0160701") - data = None - - -class Program_weight_tensor_parameter_287: - name = "parameter_287" - shape = [384] - dtype = "float32" - min_val = float("0.00173498") - max_val = float("0.00643269") - mean = float("0.00313556") - std = float("0.000799758") - data = None - - -class Program_weight_tensor_parameter_288: - name = "parameter_288" - shape = [384] - dtype = "float32" - min_val = float("-0.0717004") - max_val = float("0.0408329") - mean = float("-0.0299012") - std = float("0.0176195") - data = None - - -class Program_weight_tensor_parameter_289: - name = "parameter_289" - shape = [384, 384, 1, 1] - dtype = "float32" - min_val = float("-0.0228554") - max_val = float("0.0328209") - mean = float("-0.000375757") - std = float("0.00255745") - data = None - - -class Program_weight_tensor_parameter_290: - name = "parameter_290" - shape = [384] - dtype = "float32" - min_val = float("-0.0351365") - max_val = float("0.0448389") - mean = float("-0.00202565") - std = float("0.0112301") - data = None - - -class Program_weight_tensor_parameter_291: - name = "parameter_291" - shape = [384] - dtype = "float32" - min_val = float("0.957996") - max_val = float("1.12454") - mean = float("1.00467") - std = float("0.0255587") - data = None - - -class Program_weight_tensor_parameter_292: - name = "parameter_292" - shape = [384] - dtype = "float32" - min_val = float("0.0083014") - max_val = float("0.0352855") - mean = float("0.0175327") - std = float("0.00454233") - data = None - - -class Program_weight_tensor_parameter_293: - name = "parameter_293" - shape = [384] - dtype = "float32" - min_val = float("-0.247265") - max_val = float("0.101324") - mean = float("-0.0772083") - std = float("0.0418546") - data = None - - -class Program_weight_tensor_parameter_294: - name = "parameter_294" - shape = [384, 384, 3, 3] - dtype = "float32" - min_val = float("-0.0259612") - max_val = float("0.0450877") - mean = float("-0.000119198") - std = float("0.00109556") - data = None - - -class Program_weight_tensor_parameter_295: - name = "parameter_295" - shape = [384] - dtype = "float32" - min_val = float("-0.0757949") - max_val = float("0.0143529") - mean = float("-0.0184104") - std = float("0.0130086") - data = None - - -class Program_weight_tensor_parameter_296: - name = "parameter_296" - shape = [384] - dtype = "float32" - min_val = float("0.948442") - max_val = float("1.1913") - mean = float("1.02005") - std = float("0.0307435") - data = None - - -class Program_weight_tensor_parameter_297: - name = "parameter_297" - shape = [384] - dtype = "float32" - min_val = float("0.0341549") - max_val = float("0.134905") - mean = float("0.0680685") - std = float("0.0155983") - data = None - - -class Program_weight_tensor_parameter_298: - name = "parameter_298" - shape = [384] - dtype = "float32" - min_val = float("-0.699112") - max_val = float("0.534627") - mean = float("-0.130478") - std = float("0.14264") - data = None - - -class Program_weight_tensor_parameter_299: - name = "parameter_299" - shape = [384, 384, 3, 3] - dtype = "float32" - min_val = float("-0.0199475") - max_val = float("0.0262388") - mean = float("-8.6257e-05") - std = float("0.00132087") - data = None - - -class Program_weight_tensor_parameter_300: - name = "parameter_300" - shape = [384] - dtype = "float32" - min_val = float("-0.0646182") - max_val = float("0.0263686") - mean = float("-0.0175816") - std = float("0.0121433") - data = None - - -class Program_weight_tensor_parameter_301: - name = "parameter_301" - shape = [384] - dtype = "float32" - min_val = float("0.976895") - max_val = float("1.05226") - mean = float("0.998698") - std = float("0.0104278") - data = None - - -class Program_weight_tensor_parameter_302: - name = "parameter_302" - shape = [384] - dtype = "float32" - min_val = float("0.000942471") - max_val = float("0.00502155") - mean = float("0.00201621") - std = float("0.000551618") - data = None - - -class Program_weight_tensor_parameter_303: - name = "parameter_303" - shape = [384] - dtype = "float32" - min_val = float("-0.0479207") - max_val = float("0.0928967") - mean = float("-0.0128137") - std = float("0.0168058") - data = None - - -class Program_weight_tensor_parameter_304: - name = "parameter_304" - shape = [384, 384, 1, 1] - dtype = "float32" - min_val = float("-0.019998") - max_val = float("0.0336093") - mean = float("-0.0001808") - std = float("0.00225336") - data = None - - -class Program_weight_tensor_parameter_305: - name = "parameter_305" - shape = [384] - dtype = "float32" - min_val = float("-0.0646181") - max_val = float("0.0263686") - mean = float("-0.0175816") - std = float("0.0121433") - data = None - - -class Program_weight_tensor_parameter_306: - name = "parameter_306" - shape = [384] - dtype = "float32" - min_val = float("0.976548") - max_val = float("1.1009") - mean = float("1.00714") - std = float("0.0202515") - data = None - - -class Program_weight_tensor_parameter_307: - name = "parameter_307" - shape = [384] - dtype = "float32" - min_val = float("0.00525835") - max_val = float("0.0300494") - mean = float("0.0114248") - std = float("0.0035076") - data = None - - -class Program_weight_tensor_parameter_308: - name = "parameter_308" - shape = [384] - dtype = "float32" - min_val = float("-0.129246") - max_val = float("0.173103") - mean = float("-0.0523329") - std = float("0.0390795") - data = None - - -class Program_weight_tensor_parameter_309: - name = "parameter_309" - shape = [384, 384, 3, 3] - dtype = "float32" - min_val = float("-0.0126416") - max_val = float("0.0247245") - mean = float("-9.22132e-05") - std = float("0.000953698") - data = None - - -class Program_weight_tensor_parameter_310: - name = "parameter_310" - shape = [384] - dtype = "float32" - min_val = float("-0.0846385") - max_val = float("-0.0012331") - mean = float("-0.0378149") - std = float("0.0147169") - data = None - - -class Program_weight_tensor_parameter_311: - name = "parameter_311" - shape = [384] - dtype = "float32" - min_val = float("0.961605") - max_val = float("1.1164") - mean = float("1.01863") - std = float("0.0258138") - data = None - - -class Program_weight_tensor_parameter_312: - name = "parameter_312" - shape = [384] - dtype = "float32" - min_val = float("0.00543412") - max_val = float("0.034") - mean = float("0.0114679") - std = float("0.00281078") - data = None - - -class Program_weight_tensor_parameter_313: - name = "parameter_313" - shape = [384] - dtype = "float32" - min_val = float("-0.105395") - max_val = float("0.0875829") - mean = float("-0.0276342") - std = float("0.0283604") - data = None - - -class Program_weight_tensor_parameter_314: - name = "parameter_314" - shape = [384, 384, 3, 3] - dtype = "float32" - min_val = float("-0.0131868") - max_val = float("0.0207652") - mean = float("-5.34705e-05") - std = float("0.00131228") - data = None - - -class Program_weight_tensor_parameter_315: - name = "parameter_315" - shape = [384] - dtype = "float32" - min_val = float("-0.107271") - max_val = float("0.0233646") - mean = float("-0.0562337") - std = float("0.0197346") - data = None - - -class Program_weight_tensor_parameter_316: - name = "parameter_316" - shape = [384] - dtype = "float32" - min_val = float("0.981244") - max_val = float("1.07279") - mean = float("1.02152") - std = float("0.0138952") - data = None - - -class Program_weight_tensor_parameter_317: - name = "parameter_317" - shape = [384] - dtype = "float32" - min_val = float("0.00227472") - max_val = float("0.00995654") - mean = float("0.00327401") - std = float("0.000751464") - data = None - - -class Program_weight_tensor_parameter_318: - name = "parameter_318" - shape = [384] - dtype = "float32" - min_val = float("-0.0410182") - max_val = float("0.0248085") - mean = float("0.0060818") - std = float("0.00760998") - data = None - - -class Program_weight_tensor_parameter_319: - name = "parameter_319" - shape = [384, 1024, 1, 1] - dtype = "float32" - min_val = float("-0.0184221") - max_val = float("0.0482036") - mean = float("-0.000162631") - std = float("0.00257425") - data = None - - -class Program_weight_tensor_parameter_320: - name = "parameter_320" - shape = [384] - dtype = "float32" - min_val = float("-0.058929") - max_val = float("0.0342356") - mean = float("-0.00850635") - std = float("0.0113521") - data = None - - -class Program_weight_tensor_parameter_321: - name = "parameter_321" - shape = [384] - dtype = "float32" - min_val = float("1.00879") - max_val = float("1.21413") - mean = float("1.05118") - std = float("0.0209519") - data = None - - -class Program_weight_tensor_parameter_322: - name = "parameter_322" - shape = [384] - dtype = "float32" - min_val = float("0.00216519") - max_val = float("0.00959873") - mean = float("0.00317997") - std = float("0.000757622") - data = None - - -class Program_weight_tensor_parameter_323: - name = "parameter_323" - shape = [384] - dtype = "float32" - min_val = float("-0.0134693") - max_val = float("0.019212") - mean = float("0.00436546") - std = float("0.00543387") - data = None - - -class Program_weight_tensor_parameter_324: - name = "parameter_324" - shape = [384, 1024, 1, 1] - dtype = "float32" - min_val = float("-0.0415669") - max_val = float("0.0455758") - mean = float("-0.000161607") - std = float("0.00274744") - data = None - - -class Program_weight_tensor_parameter_325: - name = "parameter_325" - shape = [1024] - dtype = "float32" - min_val = float("-3.76504") - max_val = float("-0.731072") - mean = float("-2.1942") - std = float("0.428366") - data = None - - -class Program_weight_tensor_parameter_326: - name = "parameter_326" - shape = [1024] - dtype = "float32" - min_val = float("1.62418") - max_val = float("4.43435") - mean = float("3.07417") - std = float("0.255202") - data = None - - -class Program_weight_tensor_parameter_327: - name = "parameter_327" - shape = [1024] - dtype = "float32" - min_val = float("0.00545502") - max_val = float("0.0375841") - mean = float("0.0112259") - std = float("0.00369148") - data = None - - -class Program_weight_tensor_parameter_328: - name = "parameter_328" - shape = [1024] - dtype = "float32" - min_val = float("-0.116582") - max_val = float("0.0584592") - mean = float("-0.0421243") - std = float("0.0189653") - data = None - - -class Program_weight_tensor_parameter_329: - name = "parameter_329" - shape = [1024, 768, 1, 1] - dtype = "float32" - min_val = float("-0.0682658") - max_val = float("0.08076") - mean = float("-0.00031367") - std = float("0.00305502") - data = None - - -class Program_weight_tensor_parameter_330: - name = "parameter_330" - shape = [768] - dtype = "float32" - min_val = float("-0.0121546") - max_val = float("0.00561678") - mean = float("-0.000568828") - std = float("0.00173927") - data = None - - -class Program_weight_tensor_parameter_331: - name = "parameter_331" - shape = [768, 768, 1, 1] - dtype = "float32" - min_val = float("-0.104186") - max_val = float("0.108443") - mean = float("-0.000213298") - std = float("0.00130075") - data = None - - -class Program_weight_tensor_parameter_332: - name = "parameter_332" - shape = [384] - dtype = "float32" - min_val = float("-1.77817") - max_val = float("0.498255") - mean = float("-0.30001") - std = float("0.296857") - data = None - - -class Program_weight_tensor_parameter_333: - name = "parameter_333" - shape = [384] - dtype = "float32" - min_val = float("0.18984") - max_val = float("1.98158") - mean = float("0.620203") - std = float("0.2786") - data = None - - -class Program_weight_tensor_parameter_334: - name = "parameter_334" - shape = [384] - dtype = "float32" - min_val = float("3.45478e-05") - max_val = float("0.000862679") - mean = float("0.000152348") - std = float("8.47812e-05") - data = None - - -class Program_weight_tensor_parameter_335: - name = "parameter_335" - shape = [384] - dtype = "float32" - min_val = float("-0.0224284") - max_val = float("0.0569889") - mean = float("0.0173439") - std = float("0.0133472") - data = None - - -class Program_weight_tensor_parameter_336: - name = "parameter_336" - shape = [384, 384, 1, 1] - dtype = "float32" - min_val = float("-0.0196332") - max_val = float("0.0252926") - mean = float("-0.000291589") - std = float("0.00211517") - data = None - - -class Program_weight_tensor_parameter_337: - name = "parameter_337" - shape = [384] - dtype = "float32" - min_val = float("-1.77817") - max_val = float("0.498255") - mean = float("-0.30001") - std = float("0.296857") - data = None - - -class Program_weight_tensor_parameter_338: - name = "parameter_338" - shape = [384] - dtype = "float32" - min_val = float("0.36588") - max_val = float("2.77771") - mean = float("1.04742") - std = float("0.308644") - data = None - - -class Program_weight_tensor_parameter_339: - name = "parameter_339" - shape = [384] - dtype = "float32" - min_val = float("0.000357258") - max_val = float("0.00471916") - mean = float("0.00106354") - std = float("0.000429049") - data = None - - -class Program_weight_tensor_parameter_340: - name = "parameter_340" - shape = [384] - dtype = "float32" - min_val = float("-0.199773") - max_val = float("0.0734005") - mean = float("0.0127227") - std = float("0.0242137") - data = None - - -class Program_weight_tensor_parameter_341: - name = "parameter_341" - shape = [384, 384, 3, 3] - dtype = "float32" - min_val = float("-0.0170937") - max_val = float("0.0209283") - mean = float("-2.76977e-05") - std = float("0.0013628") - data = None - - -class Program_weight_tensor_parameter_342: - name = "parameter_342" - shape = [384] - dtype = "float32" - min_val = float("-2.61203") - max_val = float("0.0551867") - mean = float("-1.58323") - std = float("0.416329") - data = None - - -class Program_weight_tensor_parameter_343: - name = "parameter_343" - shape = [384] - dtype = "float32" - min_val = float("0.567765") - max_val = float("1.67644") - mean = float("1.1241") - std = float("0.146785") - data = None - - -class Program_weight_tensor_parameter_344: - name = "parameter_344" - shape = [384] - dtype = "float32" - min_val = float("0.0222405") - max_val = float("0.126805") - mean = float("0.0503725") - std = float("0.0157052") - data = None - - -class Program_weight_tensor_parameter_345: - name = "parameter_345" - shape = [384] - dtype = "float32" - min_val = float("-0.656718") - max_val = float("0.389056") - mean = float("-0.183173") - std = float("0.0983499") - data = None - - -class Program_weight_tensor_parameter_346: - name = "parameter_346" - shape = [384, 384, 3, 3] - dtype = "float32" - min_val = float("-0.0169074") - max_val = float("0.0427054") - mean = float("-0.000129063") - std = float("0.00175192") - data = None - - -class Program_weight_tensor_parameter_347: - name = "parameter_347" - shape = [384] - dtype = "float32" - min_val = float("-1.93759") - max_val = float("0.733668") - mean = float("-0.570541") - std = float("0.365887") - data = None - - -class Program_weight_tensor_parameter_348: - name = "parameter_348" - shape = [384] - dtype = "float32" - min_val = float("0.140501") - max_val = float("2.06358") - mean = float("0.563187") - std = float("0.226696") - data = None - - -class Program_weight_tensor_parameter_349: - name = "parameter_349" - shape = [384] - dtype = "float32" - min_val = float("4.15388e-05") - max_val = float("0.000906682") - mean = float("0.00019597") - std = float("9.97336e-05") - data = None - - -class Program_weight_tensor_parameter_350: - name = "parameter_350" - shape = [384] - dtype = "float32" - min_val = float("-0.0259493") - max_val = float("0.0583295") - mean = float("0.0172821") - std = float("0.0111211") - data = None - - -class Program_weight_tensor_parameter_351: - name = "parameter_351" - shape = [384, 384, 1, 1] - dtype = "float32" - min_val = float("-0.0185543") - max_val = float("0.0208855") - mean = float("-0.000319504") - std = float("0.00201805") - data = None - - -class Program_weight_tensor_parameter_352: - name = "parameter_352" - shape = [384] - dtype = "float32" - min_val = float("-1.93768") - max_val = float("0.733668") - mean = float("-0.570541") - std = float("0.365888") - data = None - - -class Program_weight_tensor_parameter_353: - name = "parameter_353" - shape = [384] - dtype = "float32" - min_val = float("0.57953") - max_val = float("2.10385") - mean = float("1.09247") - std = float("0.254048") - data = None - - -class Program_weight_tensor_parameter_354: - name = "parameter_354" - shape = [384] - dtype = "float32" - min_val = float("0.000627586") - max_val = float("0.00452718") - mean = float("0.00162004") - std = float("0.000525088") - data = None - - -class Program_weight_tensor_parameter_355: - name = "parameter_355" - shape = [384] - dtype = "float32" - min_val = float("-0.0456402") - max_val = float("0.100243") - mean = float("0.0222313") - std = float("0.0223563") - data = None - - -class Program_weight_tensor_parameter_356: - name = "parameter_356" - shape = [384, 384, 3, 3] - dtype = "float32" - min_val = float("-0.017816") - max_val = float("0.0248569") - mean = float("-5.76546e-05") - std = float("0.00141105") - data = None - - -class Program_weight_tensor_parameter_357: - name = "parameter_357" - shape = [384] - dtype = "float32" - min_val = float("-2.4273") - max_val = float("0.839141") - mean = float("-1.42155") - std = float("0.360806") - data = None - - -class Program_weight_tensor_parameter_358: - name = "parameter_358" - shape = [384] - dtype = "float32" - min_val = float("0.438688") - max_val = float("1.84355") - mean = float("1.15674") - std = float("0.142333") - data = None - - -class Program_weight_tensor_parameter_359: - name = "parameter_359" - shape = [384] - dtype = "float32" - min_val = float("0.0169176") - max_val = float("0.0834605") - mean = float("0.0339828") - std = float("0.0113006") - data = None - - -class Program_weight_tensor_parameter_360: - name = "parameter_360" - shape = [384] - dtype = "float32" - min_val = float("-0.537141") - max_val = float("0.620008") - mean = float("-0.11866") - std = float("0.0759343") - data = None - - -class Program_weight_tensor_parameter_361: - name = "parameter_361" - shape = [384, 384, 3, 3] - dtype = "float32" - min_val = float("-0.0215698") - max_val = float("0.0331226") - mean = float("-0.000122919") - std = float("0.00173093") - data = None - - -class Program_weight_tensor_parameter_362: - name = "parameter_362" - shape = [384] - dtype = "float32" - min_val = float("-1.88491") - max_val = float("0.489301") - mean = float("-0.478428") - std = float("0.384422") - data = None - - -class Program_weight_tensor_parameter_363: - name = "parameter_363" - shape = [384] - dtype = "float32" - min_val = float("0.0963961") - max_val = float("2.12165") - mean = float("0.441644") - std = float("0.215765") - data = None - - -class Program_weight_tensor_parameter_364: - name = "parameter_364" - shape = [384] - dtype = "float32" - min_val = float("4.83567e-05") - max_val = float("0.00122351") - mean = float("0.000214336") - std = float("0.000121244") - data = None - - -class Program_weight_tensor_parameter_365: - name = "parameter_365" - shape = [384] - dtype = "float32" - min_val = float("-0.0604177") - max_val = float("0.0648457") - mean = float("0.0205045") - std = float("0.0130396") - data = None - - -class Program_weight_tensor_parameter_366: - name = "parameter_366" - shape = [384, 384, 1, 1] - dtype = "float32" - min_val = float("-0.0178981") - max_val = float("0.0199833") - mean = float("-0.000383584") - std = float("0.00172042") - data = None - - -class Program_weight_tensor_parameter_367: - name = "parameter_367" - shape = [384] - dtype = "float32" - min_val = float("-1.88491") - max_val = float("0.489301") - mean = float("-0.478428") - std = float("0.384422") - data = None - - -class Program_weight_tensor_parameter_368: - name = "parameter_368" - shape = [384] - dtype = "float32" - min_val = float("0.572695") - max_val = float("2.21595") - mean = float("1.06078") - std = float("0.254685") - data = None - - -class Program_weight_tensor_parameter_369: - name = "parameter_369" - shape = [384] - dtype = "float32" - min_val = float("0.000726389") - max_val = float("0.00447183") - mean = float("0.00202693") - std = float("0.000650997") - data = None - - -class Program_weight_tensor_parameter_370: - name = "parameter_370" - shape = [384] - dtype = "float32" - min_val = float("-0.161605") - max_val = float("0.0850251") - mean = float("0.0261345") - std = float("0.0250145") - data = None - - -class Program_weight_tensor_parameter_371: - name = "parameter_371" - shape = [384, 384, 3, 3] - dtype = "float32" - min_val = float("-0.016375") - max_val = float("0.0234752") - mean = float("-5.94736e-05") - std = float("0.00147549") - data = None - - -class Program_weight_tensor_parameter_372: - name = "parameter_372" - shape = [384] - dtype = "float32" - min_val = float("-2.15512") - max_val = float("0.42913") - mean = float("-1.38221") - std = float("0.277652") - data = None - - -class Program_weight_tensor_parameter_373: - name = "parameter_373" - shape = [384] - dtype = "float32" - min_val = float("0.714131") - max_val = float("1.63294") - mean = float("1.13544") - std = float("0.0992085") - data = None - - -class Program_weight_tensor_parameter_374: - name = "parameter_374" - shape = [384] - dtype = "float32" - min_val = float("0.0110827") - max_val = float("0.0639734") - mean = float("0.0257205") - std = float("0.00835153") - data = None - - -class Program_weight_tensor_parameter_375: - name = "parameter_375" - shape = [384] - dtype = "float32" - min_val = float("-0.516632") - max_val = float("0.224516") - mean = float("-0.0886359") - std = float("0.0647703") - data = None - - -class Program_weight_tensor_parameter_376: - name = "parameter_376" - shape = [384, 384, 3, 3] - dtype = "float32" - min_val = float("-0.0257925") - max_val = float("0.0417178") - mean = float("-0.000106541") - std = float("0.00163045") - data = None - - -class Program_weight_tensor_parameter_377: - name = "parameter_377" - shape = [384] - dtype = "float32" - min_val = float("-2.93083") - max_val = float("1.76129") - mean = float("-0.76492") - std = float("0.654104") - data = None - - -class Program_weight_tensor_parameter_378: - name = "parameter_378" - shape = [384] - dtype = "float32" - min_val = float("0.973816") - max_val = float("2.91102") - mean = float("1.85247") - std = float("0.272485") - data = None - - -class Program_weight_tensor_parameter_379: - name = "parameter_379" - shape = [384] - dtype = "float32" - min_val = float("0.00124108") - max_val = float("0.00551094") - mean = float("0.00251181") - std = float("0.000624682") - data = None - - -class Program_weight_tensor_parameter_380: - name = "parameter_380" - shape = [384] - dtype = "float32" - min_val = float("-0.154056") - max_val = float("0.0997407") - mean = float("0.0427987") - std = float("0.0198229") - data = None - - -class Program_weight_tensor_parameter_381: - name = "parameter_381" - shape = [384, 768, 1, 1] - dtype = "float32" - min_val = float("-0.0482202") - max_val = float("0.0395783") - mean = float("-0.000489332") - std = float("0.00371088") - data = None - - -class Program_weight_tensor_parameter_382: - name = "parameter_382" - shape = [384] - dtype = "float32" - min_val = float("-2.24474") - max_val = float("0.693989") - mean = float("-0.776354") - std = float("0.476138") - data = None - - -class Program_weight_tensor_parameter_383: - name = "parameter_383" - shape = [384] - dtype = "float32" - min_val = float("0.973795") - max_val = float("2.89093") - mean = float("2.10265") - std = float("0.302973") - data = None - - -class Program_weight_tensor_parameter_384: - name = "parameter_384" - shape = [384] - dtype = "float32" - min_val = float("0.000459391") - max_val = float("0.00364653") - mean = float("0.00103832") - std = float("0.000354294") - data = None - - -class Program_weight_tensor_parameter_385: - name = "parameter_385" - shape = [384] - dtype = "float32" - min_val = float("-0.036311") - max_val = float("0.0640884") - mean = float("0.0228094") - std = float("0.0124348") - data = None - - -class Program_weight_tensor_parameter_386: - name = "parameter_386" - shape = [384, 768, 1, 1] - dtype = "float32" - min_val = float("-0.129421") - max_val = float("0.063091") - mean = float("-0.000254003") - std = float("0.00272646") - data = None - - -class Program_weight_tensor_parameter_387: - name = "parameter_387" - shape = [768] - dtype = "float32" - min_val = float("-2.41043") - max_val = float("0.654464") - mean = float("-0.915927") - std = float("0.344484") - data = None - - -class Program_weight_tensor_parameter_388: - name = "parameter_388" - shape = [768] - dtype = "float32" - min_val = float("0.519256") - max_val = float("1.87666") - mean = float("0.912499") - std = float("0.147147") - data = None - - -class Program_weight_tensor_parameter_389: - name = "parameter_389" - shape = [768] - dtype = "float32" - min_val = float("0.00362407") - max_val = float("0.0346536") - mean = float("0.00806678") - std = float("0.00278567") - data = None - - -class Program_weight_tensor_parameter_390: - name = "parameter_390" - shape = [768] - dtype = "float32" - min_val = float("-0.180838") - max_val = float("0.1547") - mean = float("0.0229543") - std = float("0.0362646") - data = None - - -class Program_weight_tensor_parameter_391: - name = "parameter_391" - shape = [768, 512, 3, 3] - dtype = "float32" - min_val = float("-0.050247") - max_val = float("0.0427207") - mean = float("-6.42878e-05") - std = float("0.00168004") - data = None - - -class Program_weight_tensor_parameter_392: - name = "parameter_392" - shape = [512] - dtype = "float32" - min_val = float("-3.38705") - max_val = float("1.66488") - mean = float("-1.17821") - std = float("0.526891") - data = None - - -class Program_weight_tensor_parameter_393: - name = "parameter_393" - shape = [512] - dtype = "float32" - min_val = float("0.488671") - max_val = float("1.69914") - mean = float("1.10955") - std = float("0.15027") - data = None - - -class Program_weight_tensor_parameter_394: - name = "parameter_394" - shape = [512] - dtype = "float32" - min_val = float("0.00126485") - max_val = float("0.00820921") - mean = float("0.00340923") - std = float("0.00106595") - data = None - - -class Program_weight_tensor_parameter_395: - name = "parameter_395" - shape = [512] - dtype = "float32" - min_val = float("-0.11898") - max_val = float("0.0891435") - mean = float("-0.0369126") - std = float("0.0289632") - data = None - - -class Program_weight_tensor_parameter_396: - name = "parameter_396" - shape = [512, 384, 1, 1] - dtype = "float32" - min_val = float("-0.225953") - max_val = float("0.196097") - mean = float("-0.000408665") - std = float("0.00566642") - data = None - - -class Program_weight_tensor_parameter_397: - name = "parameter_397" - shape = [384] - dtype = "float32" - min_val = float("-0.00866309") - max_val = float("0.000825011") - mean = float("-0.00232518") - std = float("0.0017594") - data = None - - -class Program_weight_tensor_parameter_398: - name = "parameter_398" - shape = [384, 384, 1, 1] - dtype = "float32" - min_val = float("-0.202855") - max_val = float("0.135115") - mean = float("-0.00175899") - std = float("0.00410018") - data = None - - -class Program_weight_tensor_parameter_399: - name = "parameter_399" - shape = [192] - dtype = "float32" - min_val = float("-1.95281") - max_val = float("0.504414") - mean = float("-0.323169") - std = float("0.341204") - data = None - - -class Program_weight_tensor_parameter_400: - name = "parameter_400" - shape = [192] - dtype = "float32" - min_val = float("0.0702988") - max_val = float("2.23409") - mean = float("0.601912") - std = float("0.439789") - data = None - - -class Program_weight_tensor_parameter_401: - name = "parameter_401" - shape = [192] - dtype = "float32" - min_val = float("6.06397e-05") - max_val = float("0.000857671") - mean = float("0.000292524") - std = float("0.000171414") - data = None - - -class Program_weight_tensor_parameter_402: - name = "parameter_402" - shape = [192] - dtype = "float32" - min_val = float("-0.0341788") - max_val = float("0.0367911") - mean = float("0.0035144") - std = float("0.0110465") - data = None - - -class Program_weight_tensor_parameter_403: - name = "parameter_403" - shape = [192, 192, 1, 1] - dtype = "float32" - min_val = float("-0.0204711") - max_val = float("0.0537535") - mean = float("-0.000251412") - std = float("0.00321926") - data = None - - -class Program_weight_tensor_parameter_404: - name = "parameter_404" - shape = [192] - dtype = "float32" - min_val = float("-1.95281") - max_val = float("0.504414") - mean = float("-0.323169") - std = float("0.341204") - data = None - - -class Program_weight_tensor_parameter_405: - name = "parameter_405" - shape = [192] - dtype = "float32" - min_val = float("0.384773") - max_val = float("2.87161") - mean = float("1.22954") - std = float("0.52094") - data = None - - -class Program_weight_tensor_parameter_406: - name = "parameter_406" - shape = [192] - dtype = "float32" - min_val = float("0.00047849") - max_val = float("0.0105565") - mean = float("0.00264743") - std = float("0.0011636") - data = None - - -class Program_weight_tensor_parameter_407: - name = "parameter_407" - shape = [192] - dtype = "float32" - min_val = float("-0.0760633") - max_val = float("0.0960629") - mean = float("0.0132737") - std = float("0.0293694") - data = None - - -class Program_weight_tensor_parameter_408: - name = "parameter_408" - shape = [192, 192, 3, 3] - dtype = "float32" - min_val = float("-0.0214696") - max_val = float("0.0330379") - mean = float("-0.000105926") - std = float("0.00236944") - data = None - - -class Program_weight_tensor_parameter_409: - name = "parameter_409" - shape = [192] - dtype = "float32" - min_val = float("-2.88997") - max_val = float("-0.124639") - mean = float("-1.33303") - std = float("0.398") - data = None - - -class Program_weight_tensor_parameter_410: - name = "parameter_410" - shape = [192] - dtype = "float32" - min_val = float("0.720106") - max_val = float("2.09477") - mean = float("1.16316") - std = float("0.171513") - data = None - - -class Program_weight_tensor_parameter_411: - name = "parameter_411" - shape = [192] - dtype = "float32" - min_val = float("0.0350146") - max_val = float("0.281849") - mean = float("0.0801621") - std = float("0.0325573") - data = None - - -class Program_weight_tensor_parameter_412: - name = "parameter_412" - shape = [192] - dtype = "float32" - min_val = float("-2.97017") - max_val = float("2.23437") - mean = float("-0.175713") - std = float("0.385326") - data = None - - -class Program_weight_tensor_parameter_413: - name = "parameter_413" - shape = [192, 192, 3, 3] - dtype = "float32" - min_val = float("-0.0303358") - max_val = float("0.0416184") - mean = float("-0.000150412") - std = float("0.00285456") - data = None - - -class Program_weight_tensor_parameter_414: - name = "parameter_414" - shape = [192] - dtype = "float32" - min_val = float("-1.92902") - max_val = float("0.596353") - mean = float("-0.261587") - std = float("0.334409") - data = None - - -class Program_weight_tensor_parameter_415: - name = "parameter_415" - shape = [192] - dtype = "float32" - min_val = float("0.0489169") - max_val = float("1.76734") - mean = float("0.453534") - std = float("0.302569") - data = None - - -class Program_weight_tensor_parameter_416: - name = "parameter_416" - shape = [192] - dtype = "float32" - min_val = float("5.20947e-05") - max_val = float("0.00150981") - mean = float("0.000284881") - std = float("0.00019177") - data = None - - -class Program_weight_tensor_parameter_417: - name = "parameter_417" - shape = [192] - dtype = "float32" - min_val = float("-0.0223658") - max_val = float("0.0363701") - mean = float("0.00787701") - std = float("0.00970774") - data = None - - -class Program_weight_tensor_parameter_418: - name = "parameter_418" - shape = [192, 192, 1, 1] - dtype = "float32" - min_val = float("-0.0239734") - max_val = float("0.0302964") - mean = float("-0.000361483") - std = float("0.00299226") - data = None - - -class Program_weight_tensor_parameter_419: - name = "parameter_419" - shape = [192] - dtype = "float32" - min_val = float("-1.92902") - max_val = float("0.596353") - mean = float("-0.261587") - std = float("0.334409") - data = None - - -class Program_weight_tensor_parameter_420: - name = "parameter_420" - shape = [192] - dtype = "float32" - min_val = float("0.419546") - max_val = float("2.27565") - mean = float("1.1481") - std = float("0.38095") - data = None - - -class Program_weight_tensor_parameter_421: - name = "parameter_421" - shape = [192] - dtype = "float32" - min_val = float("0.00122219") - max_val = float("0.0063277") - mean = float("0.00282241") - std = float("0.000859264") - data = None - - -class Program_weight_tensor_parameter_422: - name = "parameter_422" - shape = [192] - dtype = "float32" - min_val = float("-0.0745243") - max_val = float("0.0844685") - mean = float("0.0227952") - std = float("0.0246463") - data = None - - -class Program_weight_tensor_parameter_423: - name = "parameter_423" - shape = [192, 192, 3, 3] - dtype = "float32" - min_val = float("-0.0186838") - max_val = float("0.0254977") - mean = float("-0.000124378") - std = float("0.00251097") - data = None - - -class Program_weight_tensor_parameter_424: - name = "parameter_424" - shape = [192] - dtype = "float32" - min_val = float("-2.53592") - max_val = float("-0.131672") - mean = float("-1.31612") - std = float("0.443757") - data = None - - -class Program_weight_tensor_parameter_425: - name = "parameter_425" - shape = [192] - dtype = "float32" - min_val = float("0.717982") - max_val = float("1.65612") - mean = float("1.17887") - std = float("0.161137") - data = None - - -class Program_weight_tensor_parameter_426: - name = "parameter_426" - shape = [192] - dtype = "float32" - min_val = float("0.019859") - max_val = float("0.109361") - mean = float("0.053725") - std = float("0.0174112") - data = None - - -class Program_weight_tensor_parameter_427: - name = "parameter_427" - shape = [192] - dtype = "float32" - min_val = float("-1.6874") - max_val = float("0.715917") - mean = float("-0.0525487") - std = float("0.202935") - data = None - - -class Program_weight_tensor_parameter_428: - name = "parameter_428" - shape = [192, 192, 3, 3] - dtype = "float32" - min_val = float("-0.0424222") - max_val = float("0.0424072") - mean = float("-0.000164971") - std = float("0.00292092") - data = None - - -class Program_weight_tensor_parameter_429: - name = "parameter_429" - shape = [192] - dtype = "float32" - min_val = float("-1.76329") - max_val = float("0.544922") - mean = float("-0.246307") - std = float("0.349367") - data = None - - -class Program_weight_tensor_parameter_430: - name = "parameter_430" - shape = [192] - dtype = "float32" - min_val = float("0.00838759") - max_val = float("1.66366") - mean = float("0.357305") - std = float("0.246704") - data = None - - -class Program_weight_tensor_parameter_431: - name = "parameter_431" - shape = [192] - dtype = "float32" - min_val = float("2.7519e-06") - max_val = float("0.00136124") - mean = float("0.000270016") - std = float("0.000205115") - data = None - - -class Program_weight_tensor_parameter_432: - name = "parameter_432" - shape = [192] - dtype = "float32" - min_val = float("-0.0246477") - max_val = float("0.0419057") - mean = float("0.00937144") - std = float("0.00982806") - data = None - - -class Program_weight_tensor_parameter_433: - name = "parameter_433" - shape = [192, 192, 1, 1] - dtype = "float32" - min_val = float("-0.0257052") - max_val = float("0.0227599") - mean = float("-0.000402919") - std = float("0.00286132") - data = None - - -class Program_weight_tensor_parameter_434: - name = "parameter_434" - shape = [192] - dtype = "float32" - min_val = float("-1.76329") - max_val = float("0.544922") - mean = float("-0.246307") - std = float("0.349367") - data = None - - -class Program_weight_tensor_parameter_435: - name = "parameter_435" - shape = [192] - dtype = "float32" - min_val = float("0.385511") - max_val = float("1.96748") - mean = float("1.06974") - std = float("0.336052") - data = None - - -class Program_weight_tensor_parameter_436: - name = "parameter_436" - shape = [192] - dtype = "float32" - min_val = float("0.00118411") - max_val = float("0.00608141") - mean = float("0.0031663") - std = float("0.000994709") - data = None - - -class Program_weight_tensor_parameter_437: - name = "parameter_437" - shape = [192] - dtype = "float32" - min_val = float("-0.0656616") - max_val = float("0.0775184") - mean = float("0.0234637") - std = float("0.0196456") - data = None - - -class Program_weight_tensor_parameter_438: - name = "parameter_438" - shape = [192, 192, 3, 3] - dtype = "float32" - min_val = float("-0.0234567") - max_val = float("0.031622") - mean = float("-0.000127451") - std = float("0.0025894") - data = None - - -class Program_weight_tensor_parameter_439: - name = "parameter_439" - shape = [192] - dtype = "float32" - min_val = float("-2.51425") - max_val = float("0.158605") - mean = float("-1.26745") - std = float("0.427693") - data = None - - -class Program_weight_tensor_parameter_440: - name = "parameter_440" - shape = [192] - dtype = "float32" - min_val = float("0.599436") - max_val = float("1.78145") - mean = float("1.14926") - std = float("0.161525") - data = None - - -class Program_weight_tensor_parameter_441: - name = "parameter_441" - shape = [192] - dtype = "float32" - min_val = float("0.0150449") - max_val = float("0.0928161") - mean = float("0.0356273") - std = float("0.0112392") - data = None - - -class Program_weight_tensor_parameter_442: - name = "parameter_442" - shape = [192] - dtype = "float32" - min_val = float("-1.33124") - max_val = float("0.435574") - mean = float("-0.0281949") - std = float("0.145649") - data = None - - -class Program_weight_tensor_parameter_443: - name = "parameter_443" - shape = [192, 192, 3, 3] - dtype = "float32" - min_val = float("-0.0368305") - max_val = float("0.0462592") - mean = float("-0.000170748") - std = float("0.0029624") - data = None - - -class Program_weight_tensor_parameter_444: - name = "parameter_444" - shape = [192] - dtype = "float32" - min_val = float("-2.0885") - max_val = float("0.649037") - mean = float("-0.259743") - std = float("0.386626") - data = None - - -class Program_weight_tensor_parameter_445: - name = "parameter_445" - shape = [192] - dtype = "float32" - min_val = float("0.000296745") - max_val = float("0.72258") - mean = float("0.216526") - std = float("0.135201") - data = None - - -class Program_weight_tensor_parameter_446: - name = "parameter_446" - shape = [192] - dtype = "float32" - min_val = float("8.11875e-09") - max_val = float("0.000665964") - mean = float("0.000158357") - std = float("9.40236e-05") - data = None - - -class Program_weight_tensor_parameter_447: - name = "parameter_447" - shape = [192] - dtype = "float32" - min_val = float("-0.0160817") - max_val = float("0.0306913") - mean = float("0.00616664") - std = float("0.0078993") - data = None - - -class Program_weight_tensor_parameter_448: - name = "parameter_448" - shape = [192, 192, 1, 1] - dtype = "float32" - min_val = float("-0.0133827") - max_val = float("0.0222178") - mean = float("-0.000251321") - std = float("0.00253254") - data = None - - -class Program_weight_tensor_parameter_449: - name = "parameter_449" - shape = [192] - dtype = "float32" - min_val = float("-2.0885") - max_val = float("0.649037") - mean = float("-0.259743") - std = float("0.386626") - data = None - - -class Program_weight_tensor_parameter_450: - name = "parameter_450" - shape = [192] - dtype = "float32" - min_val = float("0.394568") - max_val = float("1.95749") - mean = float("0.953795") - std = float("0.30538") - data = None - - -class Program_weight_tensor_parameter_451: - name = "parameter_451" - shape = [192] - dtype = "float32" - min_val = float("0.0012967") - max_val = float("0.00756705") - mean = float("0.00306035") - std = float("0.000987711") - data = None - - -class Program_weight_tensor_parameter_452: - name = "parameter_452" - shape = [192] - dtype = "float32" - min_val = float("-0.0322819") - max_val = float("0.0806911") - mean = float("0.0286436") - std = float("0.0227917") - data = None - - -class Program_weight_tensor_parameter_453: - name = "parameter_453" - shape = [192, 192, 3, 3] - dtype = "float32" - min_val = float("-0.0259952") - max_val = float("0.0272212") - mean = float("-0.000147606") - std = float("0.00265528") - data = None - - -class Program_weight_tensor_parameter_454: - name = "parameter_454" - shape = [192] - dtype = "float32" - min_val = float("-2.7731") - max_val = float("-0.0376525") - mean = float("-1.25978") - std = float("0.434928") - data = None - - -class Program_weight_tensor_parameter_455: - name = "parameter_455" - shape = [192] - dtype = "float32" - min_val = float("0.744519") - max_val = float("1.562") - mean = float("1.13319") - std = float("0.139729") - data = None - - -class Program_weight_tensor_parameter_456: - name = "parameter_456" - shape = [192] - dtype = "float32" - min_val = float("0.0113947") - max_val = float("0.0533248") - mean = float("0.0251346") - std = float("0.00756214") - data = None - - -class Program_weight_tensor_parameter_457: - name = "parameter_457" - shape = [192] - dtype = "float32" - min_val = float("-0.991719") - max_val = float("0.260327") - mean = float("-0.0417286") - std = float("0.119228") - data = None - - -class Program_weight_tensor_parameter_458: - name = "parameter_458" - shape = [192, 192, 3, 3] - dtype = "float32" - min_val = float("-0.0456909") - max_val = float("0.0485113") - mean = float("-0.000180492") - std = float("0.00293373") - data = None - - -class Program_weight_tensor_parameter_459: - name = "parameter_459" - shape = [192] - dtype = "float32" - min_val = float("-1.20653") - max_val = float("0.515872") - mean = float("-0.218122") - std = float("0.352265") - data = None - - -class Program_weight_tensor_parameter_460: - name = "parameter_460" - shape = [192] - dtype = "float32" - min_val = float("-3.46641e-06") - max_val = float("0.680648") - mean = float("0.195002") - std = float("0.117181") - data = None - - -class Program_weight_tensor_parameter_461: - name = "parameter_461" - shape = [192] - dtype = "float32" - min_val = float("1.40005e-12") - max_val = float("0.000626638") - mean = float("0.000157763") - std = float("9.56391e-05") - data = None - - -class Program_weight_tensor_parameter_462: - name = "parameter_462" - shape = [192] - dtype = "float32" - min_val = float("-0.0271548") - max_val = float("0.033376") - mean = float("0.0067476") - std = float("0.00878243") - data = None - - -class Program_weight_tensor_parameter_463: - name = "parameter_463" - shape = [192, 192, 1, 1] - dtype = "float32" - min_val = float("-0.0239047") - max_val = float("0.0291998") - mean = float("-0.00027316") - std = float("0.00260504") - data = None - - -class Program_weight_tensor_parameter_464: - name = "parameter_464" - shape = [192] - dtype = "float32" - min_val = float("-1.20653") - max_val = float("0.515872") - mean = float("-0.218122") - std = float("0.352265") - data = None - - -class Program_weight_tensor_parameter_465: - name = "parameter_465" - shape = [192] - dtype = "float32" - min_val = float("0.398504") - max_val = float("1.57172") - mean = float("0.848038") - std = float("0.259251") - data = None - - -class Program_weight_tensor_parameter_466: - name = "parameter_466" - shape = [192] - dtype = "float32" - min_val = float("0.00112489") - max_val = float("0.00610025") - mean = float("0.00301719") - std = float("0.000917632") - data = None - - -class Program_weight_tensor_parameter_467: - name = "parameter_467" - shape = [192] - dtype = "float32" - min_val = float("-0.0478429") - max_val = float("0.0836548") - mean = float("0.0242454") - std = float("0.0221161") - data = None - - -class Program_weight_tensor_parameter_468: - name = "parameter_468" - shape = [192, 192, 3, 3] - dtype = "float32" - min_val = float("-0.0240424") - max_val = float("0.0295695") - mean = float("-0.000120615") - std = float("0.00264505") - data = None - - -class Program_weight_tensor_parameter_469: - name = "parameter_469" - shape = [192] - dtype = "float32" - min_val = float("-2.48813") - max_val = float("-0.081453") - mean = float("-1.27066") - std = float("0.42002") - data = None - - -class Program_weight_tensor_parameter_470: - name = "parameter_470" - shape = [192] - dtype = "float32" - min_val = float("0.694183") - max_val = float("1.5418") - mean = float("1.10669") - std = float("0.135408") - data = None - - -class Program_weight_tensor_parameter_471: - name = "parameter_471" - shape = [192] - dtype = "float32" - min_val = float("0.00729749") - max_val = float("0.037708") - mean = float("0.0179887") - std = float("0.00554409") - data = None - - -class Program_weight_tensor_parameter_472: - name = "parameter_472" - shape = [192] - dtype = "float32" - min_val = float("-0.396006") - max_val = float("0.372514") - mean = float("-0.0446145") - std = float("0.0976403") - data = None - - -class Program_weight_tensor_parameter_473: - name = "parameter_473" - shape = [192, 192, 3, 3] - dtype = "float32" - min_val = float("-0.0442693") - max_val = float("0.0514986") - mean = float("-0.000169164") - std = float("0.00292394") - data = None - - -class Program_weight_tensor_parameter_474: - name = "parameter_474" - shape = [192] - dtype = "float32" - min_val = float("-1.23276") - max_val = float("0.509269") - mean = float("-0.153678") - std = float("0.304039") - data = None - - -class Program_weight_tensor_parameter_475: - name = "parameter_475" - shape = [192] - dtype = "float32" - min_val = float("0.00230822") - max_val = float("1.53088") - mean = float("0.236662") - std = float("0.211142") - data = None - - -class Program_weight_tensor_parameter_476: - name = "parameter_476" - shape = [192] - dtype = "float32" - min_val = float("1.5422e-06") - max_val = float("0.00552255") - mean = float("0.000356895") - std = float("0.000492703") - data = None - - -class Program_weight_tensor_parameter_477: - name = "parameter_477" - shape = [192] - dtype = "float32" - min_val = float("-0.0329799") - max_val = float("0.071078") - mean = float("0.00817666") - std = float("0.0127163") - data = None - - -class Program_weight_tensor_parameter_478: - name = "parameter_478" - shape = [192, 192, 1, 1] - dtype = "float32" - min_val = float("-0.0505573") - max_val = float("0.0215882") - mean = float("-0.000354289") - std = float("0.00312677") - data = None - - -class Program_weight_tensor_parameter_479: - name = "parameter_479" - shape = [192] - dtype = "float32" - min_val = float("-1.23276") - max_val = float("0.509269") - mean = float("-0.153678") - std = float("0.304039") - data = None - - -class Program_weight_tensor_parameter_480: - name = "parameter_480" - shape = [192] - dtype = "float32" - min_val = float("0.332725") - max_val = float("1.44096") - mean = float("0.751287") - std = float("0.218777") - data = None - - -class Program_weight_tensor_parameter_481: - name = "parameter_481" - shape = [192] - dtype = "float32" - min_val = float("0.00203765") - max_val = float("0.010713") - mean = float("0.00478482") - std = float("0.00160695") - data = None - - -class Program_weight_tensor_parameter_482: - name = "parameter_482" - shape = [192] - dtype = "float32" - min_val = float("-0.0848523") - max_val = float("0.0954295") - mean = float("0.0308101") - std = float("0.029966") - data = None - - -class Program_weight_tensor_parameter_483: - name = "parameter_483" - shape = [192, 192, 3, 3] - dtype = "float32" - min_val = float("-0.0446459") - max_val = float("0.043586") - mean = float("-0.000155276") - std = float("0.00261469") - data = None - - -class Program_weight_tensor_parameter_484: - name = "parameter_484" - shape = [192] - dtype = "float32" - min_val = float("-1.86947") - max_val = float("-0.187775") - mean = float("-1.16389") - std = float("0.325632") - data = None - - -class Program_weight_tensor_parameter_485: - name = "parameter_485" - shape = [192] - dtype = "float32" - min_val = float("0.751502") - max_val = float("1.6189") - mean = float("1.10951") - std = float("0.131899") - data = None - - -class Program_weight_tensor_parameter_486: - name = "parameter_486" - shape = [192] - dtype = "float32" - min_val = float("0.00675098") - max_val = float("0.0288212") - mean = float("0.0150362") - std = float("0.00493022") - data = None - - -class Program_weight_tensor_parameter_487: - name = "parameter_487" - shape = [192] - dtype = "float32" - min_val = float("-0.630258") - max_val = float("0.15393") - mean = float("-0.0440278") - std = float("0.0837471") - data = None - - -class Program_weight_tensor_parameter_488: - name = "parameter_488" - shape = [192, 192, 3, 3] - dtype = "float32" - min_val = float("-0.0520183") - max_val = float("0.060427") - mean = float("-0.000134748") - std = float("0.00285085") - data = None - - -class Program_weight_tensor_parameter_489: - name = "parameter_489" - shape = [192] - dtype = "float32" - min_val = float("-2.8152") - max_val = float("1.61438") - mean = float("-0.0255439") - std = float("0.761416") - data = None - - -class Program_weight_tensor_parameter_490: - name = "parameter_490" - shape = [192] - dtype = "float32" - min_val = float("0.478669") - max_val = float("2.07816") - mean = float("0.879851") - std = float("0.224413") - data = None - - -class Program_weight_tensor_parameter_491: - name = "parameter_491" - shape = [192] - dtype = "float32" - min_val = float("0.00476784") - max_val = float("0.0318025") - mean = float("0.0116175") - std = float("0.00470038") - data = None - - -class Program_weight_tensor_parameter_492: - name = "parameter_492" - shape = [192] - dtype = "float32" - min_val = float("-0.131857") - max_val = float("0.222727") - mean = float("-0.026891") - std = float("0.042331") - data = None - - -class Program_weight_tensor_parameter_493: - name = "parameter_493" - shape = [192, 384, 1, 1] - dtype = "float32" - min_val = float("-0.0777914") - max_val = float("0.0808518") - mean = float("-0.000351302") - std = float("0.00605248") - data = None - - -class Program_weight_tensor_parameter_494: - name = "parameter_494" - shape = [192] - dtype = "float32" - min_val = float("-2.91406") - max_val = float("2.11419") - mean = float("0.103692") - std = float("0.66786") - data = None - - -class Program_weight_tensor_parameter_495: - name = "parameter_495" - shape = [192] - dtype = "float32" - min_val = float("0.856639") - max_val = float("5.70993") - mean = float("1.92505") - std = float("0.968671") - data = None - - -class Program_weight_tensor_parameter_496: - name = "parameter_496" - shape = [192] - dtype = "float32" - min_val = float("0.00257245") - max_val = float("0.0283236") - mean = float("0.00901804") - std = float("0.00367608") - data = None - - -class Program_weight_tensor_parameter_497: - name = "parameter_497" - shape = [192] - dtype = "float32" - min_val = float("-0.109946") - max_val = float("0.115643") - mean = float("-0.0160117") - std = float("0.0391476") - data = None - - -class Program_weight_tensor_parameter_498: - name = "parameter_498" - shape = [192, 384, 1, 1] - dtype = "float32" - min_val = float("-0.0685027") - max_val = float("0.128425") - mean = float("-0.000355146") - std = float("0.0056262") - data = None - - -class Program_weight_tensor_parameter_499: - name = "parameter_499" - shape = [384] - dtype = "float32" - min_val = float("-2.9274") - max_val = float("1.33653") - mean = float("-0.313448") - std = float("0.57221") - data = None - - -class Program_weight_tensor_parameter_500: - name = "parameter_500" - shape = [384] - dtype = "float32" - min_val = float("0.700648") - max_val = float("2.45294") - mean = float("1.1469") - std = float("0.257954") - data = None - - -class Program_weight_tensor_parameter_501: - name = "parameter_501" - shape = [384] - dtype = "float32" - min_val = float("0.00449195") - max_val = float("0.0530031") - mean = float("0.0124585") - std = float("0.00683387") - data = None - - -class Program_weight_tensor_parameter_502: - name = "parameter_502" - shape = [384] - dtype = "float32" - min_val = float("-0.1972") - max_val = float("0.152797") - mean = float("0.0131924") - std = float("0.044096") - data = None - - -class Program_weight_tensor_parameter_503: - name = "parameter_503" - shape = [384, 256, 3, 3] - dtype = "float32" - min_val = float("-0.0546488") - max_val = float("0.0575398") - mean = float("-6.44059e-05") - std = float("0.00299774") - data = None - - -class Program_weight_tensor_parameter_504: - name = "parameter_504" - shape = [256] - dtype = "float32" - min_val = float("-2.08086") - max_val = float("1.23876") - mean = float("-0.929492") - std = float("0.560306") - data = None - - -class Program_weight_tensor_parameter_505: - name = "parameter_505" - shape = [256] - dtype = "float32" - min_val = float("0.460814") - max_val = float("1.60591") - mean = float("1.03747") - std = float("0.187077") - data = None - - -class Program_weight_tensor_parameter_506: - name = "parameter_506" - shape = [256] - dtype = "float32" - min_val = float("0.000450162") - max_val = float("0.0138338") - mean = float("0.00234151") - std = float("0.00124114") - data = None - - -class Program_weight_tensor_parameter_507: - name = "parameter_507" - shape = [256] - dtype = "float32" - min_val = float("-0.15446") - max_val = float("0.117599") - mean = float("-0.0301123") - std = float("0.0510926") - data = None - - -class Program_weight_tensor_parameter_508: - name = "parameter_508" - shape = [256, 192, 1, 1] - dtype = "float32" - min_val = float("-0.215021") - max_val = float("0.160071") - mean = float("-0.000576881") - std = float("0.00999512") - data = None - - -class Program_weight_tensor_parameter_509: - name = "parameter_509" - shape = [192] - dtype = "float32" - min_val = float("-0.0144925") - max_val = float("0.00179001") - mean = float("-0.00408812") - std = float("0.00285198") - data = None - - -class Program_weight_tensor_parameter_510: - name = "parameter_510" - shape = [192, 192, 1, 1] - dtype = "float32" - min_val = float("-0.536113") - max_val = float("0.170091") - mean = float("-0.00352777") - std = float("0.00872308") - data = None - - -class Program_weight_tensor_parameter_511: - name = "parameter_511" - shape = [96] - dtype = "float32" - min_val = float("-1.89842") - max_val = float("0.649199") - mean = float("-0.163374") - std = float("0.446007") - data = None - - -class Program_weight_tensor_parameter_512: - name = "parameter_512" - shape = [96] - dtype = "float32" - min_val = float("0.118793") - max_val = float("3.45032") - mean = float("0.648909") - std = float("0.709122") - data = None - - -class Program_weight_tensor_parameter_513: - name = "parameter_513" - shape = [96] - dtype = "float32" - min_val = float("3.48017e-05") - max_val = float("0.00129092") - mean = float("0.000353523") - std = float("0.000270336") - data = None - - -class Program_weight_tensor_parameter_514: - name = "parameter_514" - shape = [96] - dtype = "float32" - min_val = float("-0.0383633") - max_val = float("0.0410815") - mean = float("0.00326046") - std = float("0.0151102") - data = None - - -class Program_weight_tensor_parameter_515: - name = "parameter_515" - shape = [96, 96, 1, 1] - dtype = "float32" - min_val = float("-0.0396602") - max_val = float("0.0712072") - mean = float("-0.000415922") - std = float("0.00589655") - data = None - - -class Program_weight_tensor_parameter_516: - name = "parameter_516" - shape = [96] - dtype = "float32" - min_val = float("-1.89842") - max_val = float("0.649199") - mean = float("-0.163374") - std = float("0.446007") - data = None - - -class Program_weight_tensor_parameter_517: - name = "parameter_517" - shape = [96] - dtype = "float32" - min_val = float("0.27629") - max_val = float("5.76398") - mean = float("1.11237") - std = float("0.941275") - data = None - - -class Program_weight_tensor_parameter_518: - name = "parameter_518" - shape = [96] - dtype = "float32" - min_val = float("0.00035511") - max_val = float("0.00718247") - mean = float("0.0023089") - std = float("0.00133833") - data = None - - -class Program_weight_tensor_parameter_519: - name = "parameter_519" - shape = [96] - dtype = "float32" - min_val = float("-0.0920791") - max_val = float("0.0842519") - mean = float("0.0093933") - std = float("0.0381936") - data = None - - -class Program_weight_tensor_parameter_520: - name = "parameter_520" - shape = [96, 96, 3, 3] - dtype = "float32" - min_val = float("-0.0346361") - max_val = float("0.0508237") - mean = float("-0.000158614") - std = float("0.00427877") - data = None - - -class Program_weight_tensor_parameter_521: - name = "parameter_521" - shape = [96] - dtype = "float32" - min_val = float("-2.47268") - max_val = float("-0.0395751") - mean = float("-1.25288") - std = float("0.438506") - data = None - - -class Program_weight_tensor_parameter_522: - name = "parameter_522" - shape = [96] - dtype = "float32" - min_val = float("0.484514") - max_val = float("1.73163") - mean = float("0.919464") - std = float("0.175598") - data = None - - -class Program_weight_tensor_parameter_523: - name = "parameter_523" - shape = [96] - dtype = "float32" - min_val = float("0.0182282") - max_val = float("0.116164") - mean = float("0.0425678") - std = float("0.017739") - data = None - - -class Program_weight_tensor_parameter_524: - name = "parameter_524" - shape = [96] - dtype = "float32" - min_val = float("-2.55824") - max_val = float("1.13418") - mean = float("-0.120129") - std = float("0.381008") - data = None - - -class Program_weight_tensor_parameter_525: - name = "parameter_525" - shape = [96, 96, 3, 3] - dtype = "float32" - min_val = float("-0.127128") - max_val = float("0.0861504") - mean = float("-0.000218704") - std = float("0.00529549") - data = None - - -class Program_weight_tensor_parameter_526: - name = "parameter_526" - shape = [96] - dtype = "float32" - min_val = float("-1.35987") - max_val = float("0.614259") - mean = float("-0.108904") - std = float("0.363421") - data = None - - -class Program_weight_tensor_parameter_527: - name = "parameter_527" - shape = [96] - dtype = "float32" - min_val = float("0.00955654") - max_val = float("1.85224") - mean = float("0.454916") - std = float("0.359478") - data = None - - -class Program_weight_tensor_parameter_528: - name = "parameter_528" - shape = [96] - dtype = "float32" - min_val = float("4.00506e-06") - max_val = float("0.00234563") - mean = float("0.000538477") - std = float("0.000453658") - data = None - - -class Program_weight_tensor_parameter_529: - name = "parameter_529" - shape = [96] - dtype = "float32" - min_val = float("-0.0357937") - max_val = float("0.0432553") - mean = float("0.00761236") - std = float("0.014379") - data = None - - -class Program_weight_tensor_parameter_530: - name = "parameter_530" - shape = [96, 96, 1, 1] - dtype = "float32" - min_val = float("-0.0375291") - max_val = float("0.0336522") - mean = float("-0.000679886") - std = float("0.00538009") - data = None - - -class Program_weight_tensor_parameter_531: - name = "parameter_531" - shape = [96] - dtype = "float32" - min_val = float("-1.35987") - max_val = float("0.614259") - mean = float("-0.108904") - std = float("0.363421") - data = None - - -class Program_weight_tensor_parameter_532: - name = "parameter_532" - shape = [96] - dtype = "float32" - min_val = float("0.381916") - max_val = float("2.31118") - mean = float("0.904033") - std = float("0.422742") - data = None - - -class Program_weight_tensor_parameter_533: - name = "parameter_533" - shape = [96] - dtype = "float32" - min_val = float("0.00152583") - max_val = float("0.0130956") - mean = float("0.00360983") - std = float("0.00182045") - data = None - - -class Program_weight_tensor_parameter_534: - name = "parameter_534" - shape = [96] - dtype = "float32" - min_val = float("-0.0602139") - max_val = float("0.0926286") - mean = float("0.0240804") - std = float("0.0293413") - data = None - - -class Program_weight_tensor_parameter_535: - name = "parameter_535" - shape = [96, 96, 3, 3] - dtype = "float32" - min_val = float("-0.0526704") - max_val = float("0.0617496") - mean = float("-0.00024044") - std = float("0.00425527") - data = None - - -class Program_weight_tensor_parameter_536: - name = "parameter_536" - shape = [96] - dtype = "float32" - min_val = float("-3.30872") - max_val = float("0.356571") - mean = float("-1.21876") - std = float("0.556947") - data = None - - -class Program_weight_tensor_parameter_537: - name = "parameter_537" - shape = [96] - dtype = "float32" - min_val = float("0.42521") - max_val = float("1.92533") - mean = float("1.00705") - std = float("0.236551") - data = None - - -class Program_weight_tensor_parameter_538: - name = "parameter_538" - shape = [96] - dtype = "float32" - min_val = float("0.0150869") - max_val = float("0.0812702") - mean = float("0.0265418") - std = float("0.00908418") - data = None - - -class Program_weight_tensor_parameter_539: - name = "parameter_539" - shape = [96] - dtype = "float32" - min_val = float("-0.89497") - max_val = float("0.555961") - mean = float("-0.0363605") - std = float("0.206065") - data = None - - -class Program_weight_tensor_parameter_540: - name = "parameter_540" - shape = [96, 96, 3, 3] - dtype = "float32" - min_val = float("-0.132978") - max_val = float("0.135745") - mean = float("-0.000279374") - std = float("0.00518152") - data = None - - -class Program_weight_tensor_parameter_541: - name = "parameter_541" - shape = [96] - dtype = "float32" - min_val = float("-1.22354") - max_val = float("0.655745") - mean = float("-0.0920703") - std = float("0.30607") - data = None - - -class Program_weight_tensor_parameter_542: - name = "parameter_542" - shape = [96] - dtype = "float32" - min_val = float("0.0320682") - max_val = float("1.28684") - mean = float("0.312972") - std = float("0.193547") - data = None - - -class Program_weight_tensor_parameter_543: - name = "parameter_543" - shape = [96] - dtype = "float32" - min_val = float("1.25242e-05") - max_val = float("0.00304423") - mean = float("0.000494706") - std = float("0.000483804") - data = None - - -class Program_weight_tensor_parameter_544: - name = "parameter_544" - shape = [96] - dtype = "float32" - min_val = float("-0.0280188") - max_val = float("0.0438614") - mean = float("0.0066925") - std = float("0.0132689") - data = None - - -class Program_weight_tensor_parameter_545: - name = "parameter_545" - shape = [96, 96, 1, 1] - dtype = "float32" - min_val = float("-0.0359891") - max_val = float("0.037578") - mean = float("-0.000549529") - std = float("0.00543459") - data = None - - -class Program_weight_tensor_parameter_546: - name = "parameter_546" - shape = [96] - dtype = "float32" - min_val = float("-1.22354") - max_val = float("0.655745") - mean = float("-0.0920703") - std = float("0.30607") - data = None - - -class Program_weight_tensor_parameter_547: - name = "parameter_547" - shape = [96] - dtype = "float32" - min_val = float("0.321517") - max_val = float("1.60435") - mean = float("0.742508") - std = float("0.256539") - data = None - - -class Program_weight_tensor_parameter_548: - name = "parameter_548" - shape = [96] - dtype = "float32" - min_val = float("0.0010948") - max_val = float("0.0109667") - mean = float("0.00391925") - std = float("0.00180583") - data = None - - -class Program_weight_tensor_parameter_549: - name = "parameter_549" - shape = [96] - dtype = "float32" - min_val = float("-0.0465221") - max_val = float("0.11619") - mean = float("0.0224584") - std = float("0.0290944") - data = None - - -class Program_weight_tensor_parameter_550: - name = "parameter_550" - shape = [96, 96, 3, 3] - dtype = "float32" - min_val = float("-0.0464603") - max_val = float("0.0403376") - mean = float("-0.000230737") - std = float("0.00435378") - data = None - - -class Program_weight_tensor_parameter_551: - name = "parameter_551" - shape = [96] - dtype = "float32" - min_val = float("-3.56355") - max_val = float("0.31361") - mean = float("-1.16302") - std = float("0.578576") - data = None - - -class Program_weight_tensor_parameter_552: - name = "parameter_552" - shape = [96] - dtype = "float32" - min_val = float("0.516248") - max_val = float("2.22549") - mean = float("1.01872") - std = float("0.244167") - data = None - - -class Program_weight_tensor_parameter_553: - name = "parameter_553" - shape = [96] - dtype = "float32" - min_val = float("0.00947191") - max_val = float("0.0394704") - mean = float("0.019952") - std = float("0.00531941") - data = None - - -class Program_weight_tensor_parameter_554: - name = "parameter_554" - shape = [96] - dtype = "float32" - min_val = float("-0.543762") - max_val = float("0.453745") - mean = float("-0.00190825") - std = float("0.159635") - data = None - - -class Program_weight_tensor_parameter_555: - name = "parameter_555" - shape = [96, 96, 3, 3] - dtype = "float32" - min_val = float("-0.104936") - max_val = float("0.123929") - mean = float("-0.000230516") - std = float("0.00527145") - data = None - - -class Program_weight_tensor_parameter_556: - name = "parameter_556" - shape = [96] - dtype = "float32" - min_val = float("-0.914544") - max_val = float("0.549399") - mean = float("-0.147604") - std = float("0.291572") - data = None - - -class Program_weight_tensor_parameter_557: - name = "parameter_557" - shape = [96] - dtype = "float32" - min_val = float("0.0335844") - max_val = float("1.37871") - mean = float("0.313952") - std = float("0.205577") - data = None - - -class Program_weight_tensor_parameter_558: - name = "parameter_558" - shape = [96] - dtype = "float32" - min_val = float("2.17369e-05") - max_val = float("0.00299857") - mean = float("0.000531722") - std = float("0.000408053") - data = None - - -class Program_weight_tensor_parameter_559: - name = "parameter_559" - shape = [96] - dtype = "float32" - min_val = float("-0.0202019") - max_val = float("0.0401913") - mean = float("0.00828191") - std = float("0.0127605") - data = None - - -class Program_weight_tensor_parameter_560: - name = "parameter_560" - shape = [96, 96, 1, 1] - dtype = "float32" - min_val = float("-0.0457306") - max_val = float("0.0364814") - mean = float("-0.00067745") - std = float("0.00557391") - data = None - - -class Program_weight_tensor_parameter_561: - name = "parameter_561" - shape = [96] - dtype = "float32" - min_val = float("-0.914544") - max_val = float("0.549399") - mean = float("-0.147604") - std = float("0.291572") - data = None - - -class Program_weight_tensor_parameter_562: - name = "parameter_562" - shape = [96] - dtype = "float32" - min_val = float("0.141171") - max_val = float("1.73846") - mean = float("0.702424") - std = float("0.28575") - data = None - - -class Program_weight_tensor_parameter_563: - name = "parameter_563" - shape = [96] - dtype = "float32" - min_val = float("0.000436992") - max_val = float("0.0116807") - mean = float("0.00437206") - std = float("0.00181053") - data = None - - -class Program_weight_tensor_parameter_564: - name = "parameter_564" - shape = [96] - dtype = "float32" - min_val = float("-0.0611763") - max_val = float("0.0849162") - mean = float("0.0256109") - std = float("0.0259056") - data = None - - -class Program_weight_tensor_parameter_565: - name = "parameter_565" - shape = [96, 96, 3, 3] - dtype = "float32" - min_val = float("-0.0711506") - max_val = float("0.0585904") - mean = float("-0.000252199") - std = float("0.00442523") - data = None - - -class Program_weight_tensor_parameter_566: - name = "parameter_566" - shape = [96] - dtype = "float32" - min_val = float("-2.62308") - max_val = float("0.0475886") - mean = float("-1.09223") - std = float("0.492262") - data = None - - -class Program_weight_tensor_parameter_567: - name = "parameter_567" - shape = [96] - dtype = "float32" - min_val = float("0.546141") - max_val = float("1.74737") - mean = float("0.990116") - std = float("0.183616") - data = None - - -class Program_weight_tensor_parameter_568: - name = "parameter_568" - shape = [96] - dtype = "float32" - min_val = float("0.00780535") - max_val = float("0.0286571") - mean = float("0.0150858") - std = float("0.00396297") - data = None - - -class Program_weight_tensor_parameter_569: - name = "parameter_569" - shape = [96] - dtype = "float32" - min_val = float("-0.397648") - max_val = float("0.372501") - mean = float("-0.0250664") - std = float("0.127281") - data = None - - -class Program_weight_tensor_parameter_570: - name = "parameter_570" - shape = [96, 96, 3, 3] - dtype = "float32" - min_val = float("-0.0589674") - max_val = float("0.10126") - mean = float("-0.000249985") - std = float("0.00515185") - data = None - - -class Program_weight_tensor_parameter_571: - name = "parameter_571" - shape = [96] - dtype = "float32" - min_val = float("-0.982631") - max_val = float("0.556017") - mean = float("-0.128051") - std = float("0.290117") - data = None - - -class Program_weight_tensor_parameter_572: - name = "parameter_572" - shape = [96] - dtype = "float32" - min_val = float("0.0693376") - max_val = float("1.15446") - mean = float("0.273121") - std = float("0.164903") - data = None - - -class Program_weight_tensor_parameter_573: - name = "parameter_573" - shape = [96] - dtype = "float32" - min_val = float("6.83451e-05") - max_val = float("0.00211645") - mean = float("0.000639177") - std = float("0.000387327") - data = None - - -class Program_weight_tensor_parameter_574: - name = "parameter_574" - shape = [96] - dtype = "float32" - min_val = float("-0.0410594") - max_val = float("0.0527878") - mean = float("0.00475022") - std = float("0.0156944") - data = None - - -class Program_weight_tensor_parameter_575: - name = "parameter_575" - shape = [96, 96, 1, 1] - dtype = "float32" - min_val = float("-0.0522607") - max_val = float("0.0599666") - mean = float("-0.000547462") - std = float("0.00617448") - data = None - - -class Program_weight_tensor_parameter_576: - name = "parameter_576" - shape = [96] - dtype = "float32" - min_val = float("-0.982631") - max_val = float("0.556015") - mean = float("-0.128051") - std = float("0.290117") - data = None - - -class Program_weight_tensor_parameter_577: - name = "parameter_577" - shape = [96] - dtype = "float32" - min_val = float("0.179543") - max_val = float("1.52891") - mean = float("0.577267") - std = float("0.230524") - data = None - - -class Program_weight_tensor_parameter_578: - name = "parameter_578" - shape = [96] - dtype = "float32" - min_val = float("0.00185316") - max_val = float("0.0151893") - mean = float("0.0053405") - std = float("0.00217932") - data = None - - -class Program_weight_tensor_parameter_579: - name = "parameter_579" - shape = [96] - dtype = "float32" - min_val = float("-0.062003") - max_val = float("0.105803") - mean = float("0.0194326") - std = float("0.0299898") - data = None - - -class Program_weight_tensor_parameter_580: - name = "parameter_580" - shape = [96, 96, 3, 3] - dtype = "float32" - min_val = float("-0.052588") - max_val = float("0.0401134") - mean = float("-0.000216222") - std = float("0.00433571") - data = None - - -class Program_weight_tensor_parameter_581: - name = "parameter_581" - shape = [96] - dtype = "float32" - min_val = float("-3.34591") - max_val = float("0.217567") - mean = float("-1.0205") - std = float("0.542044") - data = None - - -class Program_weight_tensor_parameter_582: - name = "parameter_582" - shape = [96] - dtype = "float32" - min_val = float("0.541294") - max_val = float("2.73375") - mean = float("1.0434") - std = float("0.234097") - data = None - - -class Program_weight_tensor_parameter_583: - name = "parameter_583" - shape = [96] - dtype = "float32" - min_val = float("0.00623622") - max_val = float("0.0298161") - mean = float("0.0123004") - std = float("0.00410441") - data = None - - -class Program_weight_tensor_parameter_584: - name = "parameter_584" - shape = [96] - dtype = "float32" - min_val = float("-0.316894") - max_val = float("0.213951") - mean = float("-0.0247624") - std = float("0.0998134") - data = None - - -class Program_weight_tensor_parameter_585: - name = "parameter_585" - shape = [96, 96, 3, 3] - dtype = "float32" - min_val = float("-0.0787901") - max_val = float("0.0721749") - mean = float("-0.000275213") - std = float("0.00519315") - data = None - - -class Program_weight_tensor_parameter_586: - name = "parameter_586" - shape = [96] - dtype = "float32" - min_val = float("-0.603406") - max_val = float("0.46876") - mean = float("-0.0838298") - std = float("0.256426") - data = None - - -class Program_weight_tensor_parameter_587: - name = "parameter_587" - shape = [96] - dtype = "float32" - min_val = float("0.0549309") - max_val = float("1.22997") - mean = float("0.285879") - std = float("0.196767") - data = None - - -class Program_weight_tensor_parameter_588: - name = "parameter_588" - shape = [96] - dtype = "float32" - min_val = float("0.00027077") - max_val = float("0.0173696") - mean = float("0.00296255") - std = float("0.00270399") - data = None - - -class Program_weight_tensor_parameter_589: - name = "parameter_589" - shape = [96] - dtype = "float32" - min_val = float("-0.0278518") - max_val = float("0.0236883") - mean = float("0.000353097") - std = float("0.00822438") - data = None - - -class Program_weight_tensor_parameter_590: - name = "parameter_590" - shape = [96, 96, 1, 1] - dtype = "float32" - min_val = float("-0.0745539") - max_val = float("0.0558059") - mean = float("-0.000956811") - std = float("0.00693171") - data = None - - -class Program_weight_tensor_parameter_591: - name = "parameter_591" - shape = [96] - dtype = "float32" - min_val = float("-0.603406") - max_val = float("0.468759") - mean = float("-0.0838298") - std = float("0.256426") - data = None - - -class Program_weight_tensor_parameter_592: - name = "parameter_592" - shape = [96] - dtype = "float32" - min_val = float("0.184847") - max_val = float("1.32269") - mean = float("0.519013") - std = float("0.258771") - data = None - - -class Program_weight_tensor_parameter_593: - name = "parameter_593" - shape = [96] - dtype = "float32" - min_val = float("0.00359367") - max_val = float("0.0619606") - mean = float("0.0188235") - std = float("0.00993846") - data = None - - -class Program_weight_tensor_parameter_594: - name = "parameter_594" - shape = [96] - dtype = "float32" - min_val = float("-0.0720302") - max_val = float("0.0583683") - mean = float("-0.00652554") - std = float("0.0276525") - data = None - - -class Program_weight_tensor_parameter_595: - name = "parameter_595" - shape = [96, 96, 3, 3] - dtype = "float32" - min_val = float("-0.077949") - max_val = float("0.0416607") - mean = float("-0.000187561") - std = float("0.00433555") - data = None - - -class Program_weight_tensor_parameter_596: - name = "parameter_596" - shape = [96] - dtype = "float32" - min_val = float("-2.41266") - max_val = float("0.498184") - mean = float("-0.836552") - std = float("0.475182") - data = None - - -class Program_weight_tensor_parameter_597: - name = "parameter_597" - shape = [96] - dtype = "float32" - min_val = float("0.828403") - max_val = float("2.26623") - mean = float("1.25294") - std = float("0.215447") - data = None - - -class Program_weight_tensor_parameter_598: - name = "parameter_598" - shape = [96] - dtype = "float32" - min_val = float("0.00371984") - max_val = float("0.0266859") - mean = float("0.00901981") - std = float("0.00402208") - data = None - - -class Program_weight_tensor_parameter_599: - name = "parameter_599" - shape = [96] - dtype = "float32" - min_val = float("-0.356962") - max_val = float("0.240426") - mean = float("-0.0246764") - std = float("0.102141") - data = None - - -class Program_weight_tensor_parameter_600: - name = "parameter_600" - shape = [96, 96, 3, 3] - dtype = "float32" - min_val = float("-0.111863") - max_val = float("0.117189") - mean = float("-0.00012768") - std = float("0.00536188") - data = None - - -class Program_weight_tensor_parameter_601: - name = "parameter_601" - shape = [96] - dtype = "float32" - min_val = float("-3.19124") - max_val = float("1.93359") - mean = float("0.508377") - std = float("0.871805") - data = None - - -class Program_weight_tensor_parameter_602: - name = "parameter_602" - shape = [96] - dtype = "float32" - min_val = float("0.236398") - max_val = float("2.59722") - mean = float("0.515435") - std = float("0.323564") - data = None - - -class Program_weight_tensor_parameter_603: - name = "parameter_603" - shape = [96] - dtype = "float32" - min_val = float("0.00484403") - max_val = float("0.0706225") - mean = float("0.0146031") - std = float("0.0110259") - data = None - - -class Program_weight_tensor_parameter_604: - name = "parameter_604" - shape = [96] - dtype = "float32" - min_val = float("-0.209383") - max_val = float("0.238176") - mean = float("-0.0128806") - std = float("0.0740322") - data = None - - -class Program_weight_tensor_parameter_605: - name = "parameter_605" - shape = [96, 192, 1, 1] - dtype = "float32" - min_val = float("-0.169334") - max_val = float("0.152895") - mean = float("-0.000316404") - std = float("0.0110428") - data = None - - -class Program_weight_tensor_parameter_606: - name = "parameter_606" - shape = [96] - dtype = "float32" - min_val = float("-4.89364") - max_val = float("1.73104") - mean = float("0.4216") - std = float("1.0546") - data = None - - -class Program_weight_tensor_parameter_607: - name = "parameter_607" - shape = [96] - dtype = "float32" - min_val = float("0.36737") - max_val = float("6.94832") - mean = float("1.69928") - std = float("1.37496") - data = None - - -class Program_weight_tensor_parameter_608: - name = "parameter_608" - shape = [96] - dtype = "float32" - min_val = float("0.00220095") - max_val = float("0.0892683") - mean = float("0.0142797") - std = float("0.011982") - data = None - - -class Program_weight_tensor_parameter_609: - name = "parameter_609" - shape = [96] - dtype = "float32" - min_val = float("-0.162445") - max_val = float("0.294147") - mean = float("0.00715431") - std = float("0.0821284") - data = None - - -class Program_weight_tensor_parameter_610: - name = "parameter_610" - shape = [96, 192, 1, 1] - dtype = "float32" - min_val = float("-0.0897093") - max_val = float("0.18944") - mean = float("9.40014e-05") - std = float("0.0102431") - data = None - - -class Program_weight_tensor_parameter_611: - name = "parameter_611" - shape = [192] - dtype = "float32" - min_val = float("-2.2631") - max_val = float("1.81642") - mean = float("-0.104199") - std = float("0.765409") - data = None - - -class Program_weight_tensor_parameter_612: - name = "parameter_612" - shape = [192] - dtype = "float32" - min_val = float("0.55386") - max_val = float("3.06666") - mean = float("1.03418") - std = float("0.295019") - data = None - - -class Program_weight_tensor_parameter_613: - name = "parameter_613" - shape = [192] - dtype = "float32" - min_val = float("0.00449313") - max_val = float("0.113476") - mean = float("0.0166471") - std = float("0.0141621") - data = None - - -class Program_weight_tensor_parameter_614: - name = "parameter_614" - shape = [192] - dtype = "float32" - min_val = float("-0.370599") - max_val = float("0.182049") - mean = float("-0.036688") - std = float("0.0839067") - data = None - - -class Program_weight_tensor_parameter_615: - name = "parameter_615" - shape = [192, 128, 3, 3] - dtype = "float32" - min_val = float("-0.0617191") - max_val = float("0.0972102") - mean = float("-0.000107627") - std = float("0.00527157") - data = None - - -class Program_weight_tensor_parameter_616: - name = "parameter_616" - shape = [128] - dtype = "float32" - min_val = float("-2.77261") - max_val = float("1.9501") - mean = float("-0.747738") - std = float("0.667569") - data = None - - -class Program_weight_tensor_parameter_617: - name = "parameter_617" - shape = [128] - dtype = "float32" - min_val = float("0.286557") - max_val = float("2.12971") - mean = float("0.963078") - std = float("0.252374") - data = None - - -class Program_weight_tensor_parameter_618: - name = "parameter_618" - shape = [128] - dtype = "float32" - min_val = float("0.000299552") - max_val = float("0.00717751") - mean = float("0.00205572") - std = float("0.00102826") - data = None - - -class Program_weight_tensor_parameter_619: - name = "parameter_619" - shape = [128] - dtype = "float32" - min_val = float("-0.172927") - max_val = float("0.263946") - mean = float("0.00188719") - std = float("0.0703671") - data = None - - -class Program_weight_tensor_parameter_620: - name = "parameter_620" - shape = [128, 96, 1, 1] - dtype = "float32" - min_val = float("-0.15476") - max_val = float("0.142506") - mean = float("-0.00109664") - std = float("0.0155992") - data = None - - -class Program_weight_tensor_parameter_621: - name = "parameter_621" - shape = [96] - dtype = "float32" - min_val = float("-0.0126476") - max_val = float("-0.000993819") - mean = float("-0.00607335") - std = float("0.00318226") - data = None - - -class Program_weight_tensor_parameter_622: - name = "parameter_622" - shape = [96, 96, 1, 1] - dtype = "float32" - min_val = float("-0.201162") - max_val = float("0.136016") - mean = float("-0.00720846") - std = float("0.0140613") - data = None - - -class Program_weight_tensor_parameter_623: - name = "parameter_623" - shape = [48] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_624: - name = "parameter_624" - shape = [48] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_625: - name = "parameter_625" - shape = [48] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_626: - name = "parameter_626" - shape = [48] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_627: - name = "parameter_627" - shape = [48, 48, 1, 1] - dtype = "float32" - min_val = float("-0.0505865") - max_val = float("0.0461092") - mean = float("-0.00117516") - std = float("0.00977429") - data = None - - -class Program_weight_tensor_parameter_628: - name = "parameter_628" - shape = [48] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_629: - name = "parameter_629" - shape = [48] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_630: - name = "parameter_630" - shape = [48] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_631: - name = "parameter_631" - shape = [48] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_632: - name = "parameter_632" - shape = [48, 48, 3, 3] - dtype = "float32" - min_val = float("-0.0457659") - max_val = float("0.0697484") - mean = float("-0.00021736") - std = float("0.00778433") - data = None - - -class Program_weight_tensor_parameter_633: - name = "parameter_633" - shape = [48] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_634: - name = "parameter_634" - shape = [48] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_635: - name = "parameter_635" - shape = [48] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_636: - name = "parameter_636" - shape = [48] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_637: - name = "parameter_637" - shape = [48, 48, 3, 3] - dtype = "float32" - min_val = float("-0.0742706") - max_val = float("0.0873867") - mean = float("-0.000456091") - std = float("0.00888736") - data = None - - -class Program_weight_tensor_parameter_638: - name = "parameter_638" - shape = [48] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_639: - name = "parameter_639" - shape = [48] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_640: - name = "parameter_640" - shape = [48] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_641: - name = "parameter_641" - shape = [48] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_642: - name = "parameter_642" - shape = [48, 48, 1, 1] - dtype = "float32" - min_val = float("-0.0664849") - max_val = float("0.0489036") - mean = float("-0.00150188") - std = float("0.0103773") - data = None - - -class Program_weight_tensor_parameter_643: - name = "parameter_643" - shape = [48] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_644: - name = "parameter_644" - shape = [48] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_645: - name = "parameter_645" - shape = [48] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_646: - name = "parameter_646" - shape = [48] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_647: - name = "parameter_647" - shape = [48, 48, 3, 3] - dtype = "float32" - min_val = float("-0.0407101") - max_val = float("0.050469") - mean = float("-0.000484431") - std = float("0.00782941") - data = None - - -class Program_weight_tensor_parameter_648: - name = "parameter_648" - shape = [48] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_649: - name = "parameter_649" - shape = [48] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_650: - name = "parameter_650" - shape = [48] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_651: - name = "parameter_651" - shape = [48] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_652: - name = "parameter_652" - shape = [48, 48, 3, 3] - dtype = "float32" - min_val = float("-0.0793737") - max_val = float("0.0613154") - mean = float("-0.000271202") - std = float("0.00895769") - data = None - - -class Program_weight_tensor_parameter_653: - name = "parameter_653" - shape = [48] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_654: - name = "parameter_654" - shape = [48] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_655: - name = "parameter_655" - shape = [48] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_656: - name = "parameter_656" - shape = [48] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_657: - name = "parameter_657" - shape = [48, 48, 1, 1] - dtype = "float32" - min_val = float("-0.0919037") - max_val = float("0.0493862") - mean = float("-0.00106003") - std = float("0.0128276") - data = None - - -class Program_weight_tensor_parameter_658: - name = "parameter_658" - shape = [48] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_659: - name = "parameter_659" - shape = [48] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_660: - name = "parameter_660" - shape = [48] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_661: - name = "parameter_661" - shape = [48] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_662: - name = "parameter_662" - shape = [48, 48, 3, 3] - dtype = "float32" - min_val = float("-0.0804163") - max_val = float("0.0565016") - mean = float("-0.000357967") - std = float("0.00821755") - data = None - - -class Program_weight_tensor_parameter_663: - name = "parameter_663" - shape = [48] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_664: - name = "parameter_664" - shape = [48] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_665: - name = "parameter_665" - shape = [48] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_666: - name = "parameter_666" - shape = [48] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_667: - name = "parameter_667" - shape = [48, 48, 3, 3] - dtype = "float32" - min_val = float("-0.0770235") - max_val = float("0.067704") - mean = float("-0.000344409") - std = float("0.00960955") - data = None - - -class Program_weight_tensor_parameter_668: - name = "parameter_668" - shape = [48] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_669: - name = "parameter_669" - shape = [48] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_670: - name = "parameter_670" - shape = [48] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_671: - name = "parameter_671" - shape = [48] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_672: - name = "parameter_672" - shape = [48, 96, 1, 1] - dtype = "float32" - min_val = float("-0.118598") - max_val = float("0.0941444") - mean = float("-0.00163172") - std = float("0.0167723") - data = None - - -class Program_weight_tensor_parameter_673: - name = "parameter_673" - shape = [48] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_674: - name = "parameter_674" - shape = [48] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_675: - name = "parameter_675" - shape = [48] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_676: - name = "parameter_676" - shape = [48] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_677: - name = "parameter_677" - shape = [48, 96, 1, 1] - dtype = "float32" - min_val = float("-0.0929333") - max_val = float("0.130766") - mean = float("-0.000650292") - std = float("0.0158564") - data = None - - -class Program_weight_tensor_parameter_678: - name = "parameter_678" - shape = [96] - dtype = "float32" - min_val = float("-3.11018") - max_val = float("3.25616") - mean = float("0.366533") - std = float("1.14177") - data = None - - -class Program_weight_tensor_parameter_679: - name = "parameter_679" - shape = [96] - dtype = "float32" - min_val = float("0.792014") - max_val = float("4.97259") - mean = float("1.87775") - std = float("0.779521") - data = None - - -class Program_weight_tensor_parameter_680: - name = "parameter_680" - shape = [96] - dtype = "float32" - min_val = float("0.315668") - max_val = float("13.6419") - mean = float("1.47445") - std = float("1.58175") - data = None - - -class Program_weight_tensor_parameter_681: - name = "parameter_681" - shape = [96] - dtype = "float32" - min_val = float("-1.15247") - max_val = float("1.963") - mean = float("-0.208063") - std = float("0.507653") - data = None - - -class Program_weight_tensor_parameter_682: - name = "parameter_682" - shape = [96, 64, 3, 3] - dtype = "float32" - min_val = float("-0.0933882") - max_val = float("0.0936018") - mean = float("-0.000299565") - std = float("0.00912066") - data = None - - -class Program_weight_tensor_parameter_683: - name = "parameter_683" - shape = [64] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_684: - name = "parameter_684" - shape = [64] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_685: - name = "parameter_685" - shape = [64] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_686: - name = "parameter_686" - shape = [64] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_687: - name = "parameter_687" - shape = [64, 32, 3, 3] - dtype = "float32" - min_val = float("-0.111523") - max_val = float("0.120134") - mean = float("-0.000452619") - std = float("0.0141786") - data = None - - -class Program_weight_tensor_parameter_688: - name = "parameter_688" - shape = [32] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_689: - name = "parameter_689" - shape = [32] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_690: - name = "parameter_690" - shape = [32] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_691: - name = "parameter_691" - shape = [32] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_692: - name = "parameter_692" - shape = [32, 32, 3, 3] - dtype = "float32" - min_val = float("-0.225894") - max_val = float("0.134795") - mean = float("-0.0001144") - std = float("0.0181569") - data = None - - -class Program_weight_tensor_parameter_693: - name = "parameter_693" - shape = [32] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_694: - name = "parameter_694" - shape = [32] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_695: - name = "parameter_695" - shape = [32] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_696: - name = "parameter_696" - shape = [32] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_697: - name = "parameter_697" - shape = [32, 3, 3, 3] - dtype = "float32" - min_val = float("-0.214027") - max_val = float("0.221186") - mean = float("-0.00133857") - std = float("0.0524068") - data = None diff --git a/paddle_samples/PaddleX/PP-YOLOE-L_vehicle/subgraph_3/graph_hash.txt b/paddle_samples/PaddleX/PP-YOLOE-L_vehicle/subgraph_3/graph_hash.txt index 1b08335a3..065083791 100644 --- a/paddle_samples/PaddleX/PP-YOLOE-L_vehicle/subgraph_3/graph_hash.txt +++ b/paddle_samples/PaddleX/PP-YOLOE-L_vehicle/subgraph_3/graph_hash.txt @@ -1 +1 @@ -999d81ae5f8f7aa80107216e32ce8e5d9c0b867a696357b475912426f0891658 \ No newline at end of file +753a38369a6c1cbad2faa292d21d2c22dc88ca45239db771fc9c111eed147698 \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE-L_vehicle/subgraph_3/input_meta.py b/paddle_samples/PaddleX/PP-YOLOE-L_vehicle/subgraph_3/input_meta.py index 24ecd6576..c0dc17231 100644 --- a/paddle_samples/PaddleX/PP-YOLOE-L_vehicle/subgraph_3/input_meta.py +++ b/paddle_samples/PaddleX/PP-YOLOE-L_vehicle/subgraph_3/input_meta.py @@ -1,42 +1,69 @@ class Program_weight_tensor_data_0: name = "data_0" - shape = [2, 3549, 4] - dtype = "float32" - min_val = float("0.01") - max_val = float("0.01") - mean = float("0.01") - std = float("9.31323e-10") - data = None + shape = [] + dtype = "int64" + data = [7581] class Program_weight_tensor_data_1: name = "data_1" - shape = [2, 3549, 68] + shape = [2, 7581] dtype = "float32" - min_val = float("-3.91208") - max_val = float("10.707") - mean = float("3.11295e-05") - std = float("1.44354") + max_val = float("2.0") + mean = float("0.00995911") + std = float("0.10127") data = None class Program_weight_tensor_data_2: name = "data_2" - shape = [3549, 2] + shape = [2, 11, 7581] dtype = "float32" - min_val = float("4.0") - max_val = float("412.0") - mean = float("208.0") - std = float("120.038") + max_val = float("0.971142") + mean = float("0.00550478") + std = float("0.0532604") data = None class Program_weight_tensor_data_3: name = "data_3" - shape = [3549, 1] + shape = [2, 11, 7581] + dtype = "float32" + max_val = float("1.0") + mean = float("0.000905374") + std = float("0.0300758") + data = None + + +class Program_weight_tensor_data_4: + name = "data_4" + shape = [2, 1] + dtype = "int32" + data = [0, 1] + + +class Program_weight_tensor_data_5: + name = "data_5" + shape = [2, 11, 1] + dtype = "int32" + data = [0, 0, 0, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0] + + +class Program_weight_tensor_data_6: + name = "data_6" + shape = [2, 11, 4] + dtype = "float32" + max_val = float("608.0") + mean = float("218.815") + std = float("214.701") + data = None + + +class Program_weight_tensor_data_7: + name = "data_7" + shape = [2, 11, 7581] dtype = "float32" - min_val = float("8.0") - max_val = float("32.0") - mean = float("10.6667") - std = float("5.70157") + max_val = float("0.00885437") + mean = float("1.14459e-05") + std = float("0.000226499") data = None diff --git a/paddle_samples/PaddleX/PP-YOLOE-L_vehicle/subgraph_3/model.py b/paddle_samples/PaddleX/PP-YOLOE-L_vehicle/subgraph_3/model.py index 4910e2ad1..d6c3a388b 100644 --- a/paddle_samples/PaddleX/PP-YOLOE-L_vehicle/subgraph_3/model.py +++ b/paddle_samples/PaddleX/PP-YOLOE-L_vehicle/subgraph_3/model.py @@ -5,158 +5,244 @@ class GraphModule(paddle.nn.Layer): def __init__(self): super().__init__() - def forward(self, parameter_0, data_0, data_1, data_2, data_3): - # pd_op.divide: (-1x2xf32) <- (-1x2xf32, -1x1xf32) - divide_0 = paddle._C_ops.divide(data_2, data_3) - del data_2 + def forward(self, data_0, data_1, data_2, data_3, data_4, data_5, data_6, data_7): + # pd_op.full_int_array: (1xi64) <- () + full_int_array_0 = [1] - # pd_op.shape64: (3xi64) <- (2x-1x68xf32) - shape64_0 = paddle._C_ops.shape64(data_1) + # pd_op.unsqueeze: (2x1x-1xf32) <- (2x-1xf32, 1xi64) + unsqueeze_0 = paddle._C_ops.unsqueeze(data_1, full_int_array_0) + del data_1, full_int_array_0 - # pd_op.full_int_array: (1xi64) <- () - full_int_array_0 = [0] + # pd_op.full: (xf32) <- () + full_0 = paddle._C_ops.full( + [], float("1"), paddle.float32, paddle.framework._current_expected_place() + ) - # pd_op.full_int_array: (1xi64) <- () - full_int_array_1 = [1] + # pd_op.greater_than: (2x1x-1xb) <- (2x1x-1xf32, xf32) + greater_than_0 = paddle._C_ops.greater_than(unsqueeze_0, full_0) + del full_0, unsqueeze_0 - # pd_op.assign: (1xi64) <- (1xi64) - assign_0 = full_int_array_1 + # pd_op.full_int_array: (3xi64) <- () + full_int_array_1 = [1, 11, 1] - # pd_op.slice: (xi64) <- (3xi64, 1xi64, 1xi64) - slice_0 = paddle._C_ops.slice( - shape64_0, [0], full_int_array_0, full_int_array_1, [1], [0] + # pd_op.tile: (2x11x-1xb) <- (2x1x-1xb, 3xi64) + tile_0 = paddle._C_ops.tile(greater_than_0, full_int_array_1) + del full_int_array_1, greater_than_0 + + # pd_op.full: (1xi64) <- () + full_1 = paddle._C_ops.full( + [1], float("-2"), paddle.int64, paddle.core.CPUPlace() ) - del full_int_array_0 - # pd_op.full_int_array: (1xi64) <- () - full_int_array_2 = [2] + # pd_op.argmax: (2x-1xi64) <- (2x11x-1xf32, 1xi64) + argmax_0 = paddle._C_ops.argmax(data_2, full_1, False, False, paddle.int64) - # pd_op.slice: (xi64) <- (3xi64, 1xi64, 1xi64) - slice_1 = paddle._C_ops.slice( - shape64_0, [0], full_int_array_1, full_int_array_2, [1], [0] + # pd_op.full: (1xi32) <- () + full_2 = paddle._C_ops.full( + [1], float("11"), paddle.int32, paddle.core.CPUPlace() + ) + + # pd_op.one_hot: (2x-1x11xf32) <- (2x-1xi64, 1xi32) + one_hot_0 = paddle._C_ops.one_hot( + argmax_0 % paddle.cast(full_2, argmax_0.dtype), full_2 ) + del argmax_0, full_2 + + # pd_op.transpose: (2x11x-1xf32) <- (2x-1x11xf32) + transpose_0 = paddle._C_ops.transpose(one_hot_0, [0, 2, 1]) + del one_hot_0 + + # pd_op.where: (2x11x-1xf32) <- (2x11x-1xb, 2x11x-1xf32, 2x11x-1xf32) + where_0 = paddle._C_ops.where(tile_0, transpose_0, data_3) + del data_3, tile_0, transpose_0 # pd_op.full_int_array: (1xi64) <- () - full_int_array_3 = [3] + full_int_array_2 = [-2] - # pd_op.slice: (xi64) <- (3xi64, 1xi64, 1xi64) - slice_2 = paddle._C_ops.slice( - shape64_0, [0], full_int_array_2, full_int_array_3, [1], [0] - ) - del full_int_array_2, full_int_array_3, shape64_0 + # pd_op.sum: (2x-1xf32) <- (2x11x-1xf32, 1xi64) + sum_0 = paddle._C_ops.sum(where_0, full_int_array_2, None, False) - # pd_op.full: (xi64) <- () - full_0 = paddle._C_ops.full( - [], float("-1"), paddle.int64, paddle.core.CPUPlace() + # pd_op.argmax: (2x-1xi64) <- (2x11x-1xf32, 1xi64) + argmax_1 = paddle._C_ops.argmax(where_0, full_1, False, False, paddle.int64) + del full_1 + + # pd_op.full: (1xf32) <- () + full_3 = paddle._C_ops.full( + [1], float("11"), paddle.float32, paddle.core.CPUPlace() ) - # pd_op.full: (xi64) <- () - full_1 = paddle._C_ops.full( - [], float("4"), paddle.int64, paddle.core.CPUPlace() + # pd_op.scale: (2x1xi32) <- (2x1xi32, 1xf32) + scale_0 = paddle._C_ops.scale(data_4, full_3, float("0"), True) + del data_4, full_3 + + # pd_op.cast: (2x1xi64) <- (2x1xi32) + cast_0 = paddle._C_ops.cast(scale_0, paddle.int64) + del scale_0 + + # pd_op.add: (2x-1xi64) <- (2x-1xi64, 2x1xi64) + add_0 = paddle._C_ops.add(argmax_1, cast_0) + del argmax_1, cast_0 + + # pd_op.flatten: (22xi32) <- (2x11x1xi32) + flatten_0 = paddle._C_ops.flatten(data_5, 0, 2) + del data_5 + + # pd_op.flatten: (-1xi64) <- (2x-1xi64) + flatten_1 = paddle._C_ops.flatten(add_0, 0, 1) + del add_0 + + # pd_op.full: (1xi32) <- () + full_4 = paddle._C_ops.full( + [1], float("0"), paddle.int32, paddle.core.CPUPlace() ) + # pd_op.gather: (-1xi32) <- (22xi32, -1xi64, 1xi32) + gather_0 = paddle._C_ops.gather(flatten_0, flatten_1, full_4) + del flatten_0 + # pd_op.full: (xi64) <- () - full_2 = paddle._C_ops.full( - [], float("17"), paddle.int64, paddle.core.CPUPlace() + full_5 = paddle._C_ops.full( + [], float("2"), paddle.int64, paddle.core.CPUPlace() ) - # builtin.combine: ([xi64, xi64, xi64, xi64]) <- (xi64, xi64, xi64, xi64) - combine_0 = [full_0, slice_1, full_1, full_2] - del full_0, full_1, full_2, slice_1 + # builtin.combine: ([xi64, xi64]) <- (xi64, xi64) + combine_0 = [full_5, data_0] - # pd_op.stack: (4xi64) <- ([xi64, xi64, xi64, xi64]) + # pd_op.stack: (2xi64) <- ([xi64, xi64]) stack_0 = paddle._C_ops.stack(combine_0, 0) del combine_0 - # pd_op.reshape: (-1x-1x4x17xf32) <- (2x-1x68xf32, 4xi64) - reshape_0 = paddle._C_ops.reshape(data_1, stack_0) - del data_1, stack_0 + # pd_op.reshape: (2x-1xi32) <- (-1xi32, 2xi64) + reshape_1 = paddle._C_ops.reshape(gather_0, stack_0) + del gather_0, stack_0 - # pd_op.softmax: (-1x-1x4x17xf32) <- (-1x-1x4x17xf32) - softmax_0 = paddle._C_ops.softmax(reshape_0, -1) - del reshape_0 + # pd_op.full: (xf32) <- () + full_6 = paddle._C_ops.full( + [], float("0"), paddle.float32, paddle.framework._current_expected_place() + ) + + # pd_op.greater_than: (2x-1xb) <- (2x-1xf32, xf32) + greater_than_1 = paddle._C_ops.greater_than(sum_0, full_6) + del full_6, sum_0 + + # pd_op.full: (1xf32) <- () + full_7 = paddle._C_ops.full( + [1], float("4"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.full_like: (2x-1xi32) <- (2x-1xi32, 1xf32) + full_like_0 = paddle._C_ops.full_like( + reshape_1, full_7, paddle.int32, paddle.framework._current_expected_place() + ) + del full_7 + + # pd_op.where: (2x-1xi32) <- (2x-1xb, 2x-1xi32, 2x-1xi32) + where_1 = paddle._C_ops.where(greater_than_1, reshape_1, full_like_0) + del full_like_0, greater_than_1, reshape_1 - # pd_op.transpose: (-1x17x-1x4xf32) <- (-1x-1x4x17xf32) - transpose_0 = paddle._C_ops.transpose(softmax_0, [0, 3, 1, 2]) + # pd_op.full_int_array: (2xi64) <- () + full_int_array_3 = [-1, 4] - # pd_op.conv2d: (-1x1x-1x4xf32) <- (-1x17x-1x4xf32, 1x17x1x1xf32) - conv2d_0 = paddle._C_ops.conv2d( - transpose_0, parameter_0, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + # pd_op.reshape: (22x4xf32) <- (2x11x4xf32, 2xi64) + reshape_2 = paddle._C_ops.reshape(data_6, full_int_array_3) + del data_6, full_int_array_3 + + # pd_op.gather: (-1x4xf32) <- (22x4xf32, -1xi64, 1xi32) + gather_1 = paddle._C_ops.gather(reshape_2, flatten_1, full_4) + del flatten_1, full_4, reshape_2 + + # pd_op.full: (xi64) <- () + full_8 = paddle._C_ops.full( + [], float("4"), paddle.int64, paddle.core.CPUPlace() ) - del parameter_0 - # pd_op.squeeze: (-1x-1x4xf32) <- (-1x1x-1x4xf32, 1xi64) - squeeze_0 = paddle._C_ops.squeeze(conv2d_0, full_int_array_1) - del full_int_array_1 + # builtin.combine: ([xi64, xi64, xi64]) <- (xi64, xi64, xi64) + combine_1 = [full_5, data_0, full_8] + del data_0, full_5, full_8 + + # pd_op.stack: (3xi64) <- ([xi64, xi64, xi64]) + stack_1 = paddle._C_ops.stack(combine_1, 0) + del combine_1 + + # pd_op.reshape: (2x-1x4xf32) <- (-1x4xf32, 3xi64) + reshape_0 = paddle._C_ops.reshape(gather_1, stack_1) + del gather_1, stack_1 # pd_op.full: (1xi32) <- () - full_3 = paddle._C_ops.full( - [1], float("2"), paddle.int32, paddle.core.CPUPlace() + full_9 = paddle._C_ops.full( + [1], float("5"), paddle.int32, paddle.core.CPUPlace() ) - # pd_op.split_with_num: ([-1x-1x2xf32, -1x-1x2xf32]) <- (-1x-1x4xf32, 1xi32) - split_with_num_0 = paddle._C_ops.split_with_num(squeeze_0, 2, full_3) - del squeeze_0 + # pd_op.one_hot: (2x-1x5xf32) <- (2x-1xi32, 1xi32) + one_hot_1 = paddle._C_ops.one_hot( + where_1 % paddle.cast(full_9, where_1.dtype), full_9 + ) + del full_9 - # builtin.split: (-1x-1x2xf32, -1x-1x2xf32) <- ([-1x-1x2xf32, -1x-1x2xf32]) - ( - split_0, - split_1, - ) = split_with_num_0 - del split_with_num_0 + # pd_op.full: (4xi64) <- () + full_10 = paddle._C_ops.full( + [4], float("0"), paddle.int64, paddle.framework._current_expected_place() + ) - # pd_op.full: (1xf32) <- () - full_4 = paddle._C_ops.full( - [1], float("-1"), paddle.float32, paddle.core.CPUPlace() + # pd_op.assign_value_: (4xi64) <- (4xi64) + assign_value__0 = paddle._C_ops.assign_value_( + full_10, + [4], + paddle.int64, + [float("0"), float("1"), float("2"), float("3")], + paddle.framework._current_expected_place(), ) + del full_10 - # pd_op.scale: (-1x-1x2xf32) <- (-1x-1x2xf32, 1xf32) - scale_0 = paddle._C_ops.scale(split_0, full_4, float("0"), True) - del split_0 + # pd_op.index_select: (2x-1x4xf32) <- (2x-1x5xf32, 4xi64) + index_select_0 = paddle._C_ops.index_select(one_hot_1, assign_value__0, -1) + del assign_value__0, one_hot_1 - # pd_op.add: (-1x-1x2xf32) <- (-1x-1x2xf32, -1x2xf32) - add_0 = paddle._C_ops.add(scale_0, divide_0) + # pd_op.multiply: (2x11x-1xf32) <- (2x11x-1xf32, 2x11x-1xf32) + multiply_1 = paddle._C_ops.multiply(data_7, where_0) + del data_7 - # pd_op.add: (-1x-1x2xf32) <- (-1x-1x2xf32, -1x2xf32) - add_1 = paddle._C_ops.add(split_1, divide_0) + # pd_op.full_int_array: (1xi64) <- () + full_int_array_4 = [-1] - # pd_op.full: (1xi32) <- () - full_5 = paddle._C_ops.full( - [1], float("-1"), paddle.int32, paddle.core.CPUPlace() + # pd_op.max: (2x11x1xf32) <- (2x11x-1xf32, 1xi64) + max_0 = paddle._C_ops.max(multiply_1, full_int_array_4, True) + + # pd_op.multiply: (2x11x-1xf32) <- (2x11x-1xf32, 2x11x-1xf32) + multiply_2 = paddle._C_ops.multiply(data_2, where_0) + del data_2, where_0 + + # pd_op.max: (2x11x1xf32) <- (2x11x-1xf32, 1xi64) + max_1 = paddle._C_ops.max(multiply_2, full_int_array_4, True) + del multiply_2 + + # pd_op.full: (1xf32) <- () + full_11 = paddle._C_ops.full( + [1], float("1"), paddle.float32, paddle.core.CPUPlace() ) - # builtin.combine: ([-1x-1x2xf32, -1x-1x2xf32]) <- (-1x-1x2xf32, -1x-1x2xf32) - combine_1 = [add_0, add_1] + # pd_op.scale: (2x11x1xf32) <- (2x11x1xf32, 1xf32) + scale_1 = paddle._C_ops.scale(max_0, full_11, float("1e-09"), True) + del full_11, max_0 - # pd_op.concat: (-1x-1x4xf32) <- ([-1x-1x2xf32, -1x-1x2xf32], 1xi32) - concat_0 = paddle._C_ops.concat(combine_1, full_5) - del combine_1 + # pd_op.divide: (2x11x-1xf32) <- (2x11x-1xf32, 2x11x1xf32) + divide_0 = paddle._C_ops.divide(multiply_1, scale_1) + del multiply_1, scale_1 + + # pd_op.multiply: (2x11x-1xf32) <- (2x11x-1xf32, 2x11x1xf32) + multiply_3 = paddle._C_ops.multiply(divide_0, max_1) + del divide_0, max_1 + + # pd_op.max: (2x-1xf32) <- (2x11x-1xf32, 1xi64) + max_2 = paddle._C_ops.max(multiply_3, full_int_array_2, False) + del full_int_array_2, multiply_3 + + # pd_op.unsqueeze: (2x-1x1xf32) <- (2x-1xf32, 1xi64) + unsqueeze_1 = paddle._C_ops.unsqueeze(max_2, full_int_array_4) + del full_int_array_4, max_2 + + # pd_op.multiply: (2x-1x4xf32) <- (2x-1x4xf32, 2x-1x1xf32) + multiply_0 = paddle._C_ops.multiply(index_select_0, unsqueeze_1) + del index_select_0, unsqueeze_1, where_1 - # pd_op.share_data_: (2x-1x4xf32) <- (2x-1x4xf32) - share_data__0 = data_0.detach() - del data_0 - - # pd_op.share_data_: (-1x-1x4xf32) <- (-1x-1x4xf32) - share_data__1 = concat_0.detach() - - # pd_op.multiply: (-1x-1x4xf32) <- (-1x-1x4xf32, -1x1xf32) - multiply_0 = paddle._C_ops.multiply(share_data__1, data_3) - del ( - add_0, - add_1, - assign_0, - concat_0, - conv2d_0, - data_3, - divide_0, - full_3, - full_4, - full_5, - scale_0, - share_data__1, - softmax_0, - split_1, - transpose_0, - ) - - return share_data__0, multiply_0 + return reshape_0, multiply_0 diff --git a/paddle_samples/PaddleX/PP-YOLOE-L_vehicle/subgraph_3/weight_meta.py b/paddle_samples/PaddleX/PP-YOLOE-L_vehicle/subgraph_3/weight_meta.py index 28198680e..8b1378917 100644 --- a/paddle_samples/PaddleX/PP-YOLOE-L_vehicle/subgraph_3/weight_meta.py +++ b/paddle_samples/PaddleX/PP-YOLOE-L_vehicle/subgraph_3/weight_meta.py @@ -1,7 +1 @@ -class Program_weight_tensor_parameter_0: - name = "parameter_0" - shape = [1, 17, 1, 1] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None + diff --git a/paddle_samples/PaddleX/PP-YOLOE-L_vehicle/subgraph_17/graph_hash.txt b/paddle_samples/PaddleX/PP-YOLOE-L_vehicle/subgraph_4/graph_hash.txt similarity index 100% rename from paddle_samples/PaddleX/PP-YOLOE-L_vehicle/subgraph_17/graph_hash.txt rename to paddle_samples/PaddleX/PP-YOLOE-L_vehicle/subgraph_4/graph_hash.txt diff --git a/paddle_samples/PaddleX/PP-YOLOE-L_vehicle/subgraph_13/graph_net.json b/paddle_samples/PaddleX/PP-YOLOE-L_vehicle/subgraph_4/graph_net.json similarity index 100% rename from paddle_samples/PaddleX/PP-YOLOE-L_vehicle/subgraph_13/graph_net.json rename to paddle_samples/PaddleX/PP-YOLOE-L_vehicle/subgraph_4/graph_net.json diff --git a/paddle_samples/PaddleX/PP-YOLOE-L_vehicle/subgraph_17/input_meta.py b/paddle_samples/PaddleX/PP-YOLOE-L_vehicle/subgraph_4/input_meta.py similarity index 100% rename from paddle_samples/PaddleX/PP-YOLOE-L_vehicle/subgraph_17/input_meta.py rename to paddle_samples/PaddleX/PP-YOLOE-L_vehicle/subgraph_4/input_meta.py diff --git a/paddle_samples/PaddleX/PP-YOLOE-L_vehicle/subgraph_17/model.py b/paddle_samples/PaddleX/PP-YOLOE-L_vehicle/subgraph_4/model.py similarity index 100% rename from paddle_samples/PaddleX/PP-YOLOE-L_vehicle/subgraph_17/model.py rename to paddle_samples/PaddleX/PP-YOLOE-L_vehicle/subgraph_4/model.py diff --git a/paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_12/weight_meta.py b/paddle_samples/PaddleX/PP-YOLOE-L_vehicle/subgraph_4/weight_meta.py similarity index 100% rename from paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_12/weight_meta.py rename to paddle_samples/PaddleX/PP-YOLOE-L_vehicle/subgraph_4/weight_meta.py diff --git a/paddle_samples/PaddleX/PP-YOLOE-L_vehicle/subgraph_5/graph_hash.txt b/paddle_samples/PaddleX/PP-YOLOE-L_vehicle/subgraph_5/graph_hash.txt deleted file mode 100644 index 5a5c73cb5..000000000 --- a/paddle_samples/PaddleX/PP-YOLOE-L_vehicle/subgraph_5/graph_hash.txt +++ /dev/null @@ -1 +0,0 @@ -e7b7e25c61625d326cacc8dd082d9e064f725c78447257a261d2d0d52c62c7d7 \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE-L_vehicle/subgraph_5/graph_net.json b/paddle_samples/PaddleX/PP-YOLOE-L_vehicle/subgraph_5/graph_net.json deleted file mode 100644 index 4a2e26ae4..000000000 --- a/paddle_samples/PaddleX/PP-YOLOE-L_vehicle/subgraph_5/graph_net.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "framework": "paddle", - "model_name": "PP-YOLOE-L_vehicle", - "num_devices_required": 1, - "num_nodes_required": 1 -} \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE-L_vehicle/subgraph_5/model.py b/paddle_samples/PaddleX/PP-YOLOE-L_vehicle/subgraph_5/model.py deleted file mode 100644 index 6bdb7ea24..000000000 --- a/paddle_samples/PaddleX/PP-YOLOE-L_vehicle/subgraph_5/model.py +++ /dev/null @@ -1,7159 +0,0 @@ -import paddle - - -class GraphModule(paddle.nn.Layer): - def __init__(self): - super().__init__() - - def forward( - self, - parameter_0, - parameter_1, - parameter_2, - parameter_3, - parameter_4, - parameter_5, - parameter_6, - parameter_7, - parameter_8, - parameter_9, - parameter_10, - parameter_11, - parameter_12, - parameter_13, - parameter_14, - parameter_15, - parameter_16, - parameter_17, - parameter_18, - parameter_19, - parameter_20, - parameter_21, - parameter_22, - parameter_23, - parameter_24, - parameter_25, - parameter_26, - parameter_27, - parameter_28, - parameter_29, - parameter_30, - parameter_31, - parameter_32, - parameter_33, - parameter_34, - parameter_35, - parameter_36, - parameter_37, - parameter_38, - parameter_39, - parameter_40, - parameter_41, - parameter_42, - parameter_43, - parameter_44, - parameter_45, - parameter_46, - parameter_47, - parameter_48, - parameter_49, - parameter_50, - parameter_51, - parameter_52, - parameter_53, - parameter_54, - parameter_55, - parameter_56, - parameter_57, - parameter_58, - parameter_59, - parameter_60, - parameter_61, - parameter_62, - parameter_63, - parameter_64, - parameter_65, - parameter_66, - parameter_67, - parameter_68, - parameter_69, - parameter_70, - parameter_71, - parameter_72, - parameter_73, - parameter_74, - parameter_75, - parameter_76, - parameter_77, - parameter_78, - parameter_79, - parameter_80, - parameter_81, - parameter_82, - parameter_83, - parameter_84, - parameter_85, - parameter_86, - parameter_87, - parameter_88, - parameter_89, - parameter_90, - parameter_91, - parameter_92, - parameter_93, - parameter_94, - parameter_95, - parameter_96, - parameter_97, - parameter_98, - parameter_99, - parameter_100, - parameter_101, - parameter_102, - parameter_103, - parameter_104, - parameter_105, - parameter_106, - parameter_107, - parameter_108, - parameter_109, - parameter_110, - parameter_111, - parameter_112, - parameter_113, - parameter_114, - parameter_115, - parameter_116, - parameter_117, - parameter_118, - parameter_119, - parameter_120, - parameter_121, - parameter_122, - parameter_123, - parameter_124, - parameter_125, - parameter_126, - parameter_127, - parameter_128, - parameter_129, - parameter_130, - parameter_131, - parameter_132, - parameter_133, - parameter_134, - parameter_135, - parameter_136, - parameter_137, - parameter_138, - parameter_139, - parameter_140, - parameter_141, - parameter_142, - parameter_143, - parameter_144, - parameter_145, - parameter_146, - parameter_147, - parameter_148, - parameter_149, - parameter_150, - parameter_151, - parameter_152, - parameter_153, - parameter_154, - parameter_155, - parameter_156, - parameter_157, - parameter_158, - parameter_159, - parameter_160, - parameter_161, - parameter_162, - parameter_163, - parameter_164, - parameter_165, - parameter_166, - parameter_167, - parameter_168, - parameter_169, - parameter_170, - parameter_171, - parameter_172, - parameter_173, - parameter_174, - parameter_175, - parameter_176, - parameter_177, - parameter_178, - parameter_179, - parameter_180, - parameter_181, - parameter_182, - parameter_183, - parameter_184, - parameter_185, - parameter_186, - parameter_187, - parameter_188, - parameter_189, - parameter_190, - parameter_191, - parameter_192, - parameter_193, - parameter_194, - parameter_195, - parameter_196, - parameter_197, - parameter_198, - parameter_199, - parameter_200, - parameter_201, - parameter_202, - parameter_203, - parameter_204, - parameter_205, - parameter_206, - parameter_207, - parameter_208, - parameter_209, - parameter_210, - parameter_211, - parameter_212, - parameter_213, - parameter_214, - parameter_215, - parameter_216, - parameter_217, - parameter_218, - parameter_219, - parameter_220, - parameter_221, - parameter_222, - parameter_223, - parameter_224, - parameter_225, - parameter_226, - parameter_227, - parameter_228, - parameter_229, - parameter_230, - parameter_231, - parameter_232, - parameter_233, - parameter_234, - parameter_235, - parameter_236, - parameter_237, - parameter_238, - parameter_239, - parameter_240, - parameter_241, - parameter_242, - parameter_243, - parameter_244, - parameter_245, - parameter_246, - parameter_247, - parameter_248, - parameter_249, - parameter_250, - parameter_251, - parameter_252, - parameter_253, - parameter_254, - parameter_255, - parameter_256, - parameter_257, - parameter_258, - parameter_259, - parameter_260, - parameter_261, - parameter_262, - parameter_263, - parameter_264, - parameter_265, - parameter_266, - parameter_267, - parameter_268, - parameter_269, - parameter_270, - parameter_271, - parameter_272, - parameter_273, - parameter_274, - parameter_275, - parameter_276, - parameter_277, - parameter_278, - parameter_279, - parameter_280, - parameter_281, - parameter_282, - parameter_283, - parameter_284, - parameter_285, - parameter_286, - parameter_287, - parameter_288, - parameter_289, - parameter_290, - parameter_291, - parameter_292, - parameter_293, - parameter_294, - parameter_295, - parameter_296, - parameter_297, - parameter_298, - parameter_299, - parameter_300, - parameter_301, - parameter_302, - parameter_303, - parameter_304, - parameter_305, - parameter_306, - parameter_307, - parameter_308, - parameter_309, - parameter_310, - parameter_311, - parameter_312, - parameter_313, - parameter_314, - parameter_315, - parameter_316, - parameter_317, - parameter_318, - parameter_319, - parameter_320, - parameter_321, - parameter_322, - parameter_323, - parameter_324, - parameter_325, - parameter_326, - parameter_327, - parameter_328, - parameter_329, - parameter_330, - parameter_331, - parameter_332, - parameter_333, - parameter_334, - parameter_335, - parameter_336, - parameter_337, - parameter_338, - parameter_339, - parameter_340, - parameter_341, - parameter_342, - parameter_343, - parameter_344, - parameter_345, - parameter_346, - parameter_347, - parameter_348, - parameter_349, - parameter_350, - parameter_351, - parameter_352, - parameter_353, - parameter_354, - parameter_355, - parameter_356, - parameter_357, - parameter_358, - parameter_359, - parameter_360, - parameter_361, - parameter_362, - parameter_363, - parameter_364, - parameter_365, - parameter_366, - parameter_367, - parameter_368, - parameter_369, - parameter_370, - parameter_371, - parameter_372, - parameter_373, - parameter_374, - parameter_375, - parameter_376, - parameter_377, - parameter_378, - parameter_379, - parameter_380, - parameter_381, - parameter_382, - parameter_383, - parameter_384, - parameter_385, - parameter_386, - parameter_387, - parameter_388, - parameter_389, - parameter_390, - parameter_391, - parameter_392, - parameter_393, - parameter_394, - parameter_395, - parameter_396, - parameter_397, - parameter_398, - parameter_399, - parameter_400, - parameter_401, - parameter_402, - parameter_403, - parameter_404, - parameter_405, - parameter_406, - parameter_407, - parameter_408, - parameter_409, - parameter_410, - parameter_411, - parameter_412, - parameter_413, - parameter_414, - parameter_415, - parameter_416, - parameter_417, - parameter_418, - parameter_419, - parameter_420, - parameter_421, - parameter_422, - parameter_423, - parameter_424, - parameter_425, - parameter_426, - parameter_427, - parameter_428, - parameter_429, - parameter_430, - parameter_431, - parameter_432, - parameter_433, - parameter_434, - parameter_435, - parameter_436, - parameter_437, - parameter_438, - parameter_439, - parameter_440, - parameter_441, - parameter_442, - parameter_443, - parameter_444, - parameter_445, - parameter_446, - parameter_447, - parameter_448, - parameter_449, - parameter_450, - parameter_451, - parameter_452, - parameter_453, - parameter_454, - parameter_455, - parameter_456, - parameter_457, - parameter_458, - parameter_459, - parameter_460, - parameter_461, - parameter_462, - parameter_463, - parameter_464, - parameter_465, - parameter_466, - parameter_467, - parameter_468, - parameter_469, - parameter_470, - parameter_471, - parameter_472, - parameter_473, - parameter_474, - parameter_475, - parameter_476, - parameter_477, - parameter_478, - parameter_479, - parameter_480, - parameter_481, - parameter_482, - parameter_483, - parameter_484, - parameter_485, - parameter_486, - parameter_487, - parameter_488, - parameter_489, - parameter_490, - parameter_491, - parameter_492, - parameter_493, - parameter_494, - parameter_495, - parameter_496, - parameter_497, - parameter_498, - parameter_499, - parameter_500, - parameter_501, - parameter_502, - parameter_503, - parameter_504, - parameter_505, - parameter_506, - parameter_507, - parameter_508, - parameter_509, - parameter_510, - parameter_511, - parameter_512, - parameter_513, - parameter_514, - parameter_515, - parameter_516, - parameter_517, - parameter_518, - parameter_519, - parameter_520, - parameter_521, - parameter_522, - parameter_523, - parameter_524, - parameter_525, - parameter_526, - parameter_527, - parameter_528, - parameter_529, - parameter_530, - parameter_531, - parameter_532, - parameter_533, - parameter_534, - parameter_535, - parameter_536, - parameter_537, - parameter_538, - parameter_539, - parameter_540, - parameter_541, - parameter_542, - parameter_543, - parameter_544, - parameter_545, - parameter_546, - parameter_547, - parameter_548, - parameter_549, - parameter_550, - parameter_551, - parameter_552, - parameter_553, - parameter_554, - parameter_555, - parameter_556, - parameter_557, - parameter_558, - parameter_559, - parameter_560, - parameter_561, - parameter_562, - parameter_563, - parameter_564, - parameter_565, - parameter_566, - parameter_567, - parameter_568, - parameter_569, - parameter_570, - parameter_571, - parameter_572, - parameter_573, - parameter_574, - parameter_575, - parameter_576, - parameter_577, - parameter_578, - parameter_579, - parameter_580, - parameter_581, - parameter_582, - parameter_583, - parameter_584, - parameter_585, - parameter_586, - parameter_587, - parameter_588, - parameter_589, - parameter_590, - parameter_591, - parameter_592, - parameter_593, - parameter_594, - parameter_595, - parameter_596, - parameter_597, - parameter_598, - parameter_599, - parameter_600, - parameter_601, - parameter_602, - parameter_603, - parameter_604, - parameter_605, - parameter_606, - parameter_607, - parameter_608, - parameter_609, - parameter_610, - parameter_611, - parameter_612, - parameter_613, - parameter_614, - parameter_615, - parameter_616, - parameter_617, - parameter_618, - parameter_619, - parameter_620, - parameter_621, - parameter_622, - parameter_623, - parameter_624, - parameter_625, - parameter_626, - parameter_627, - parameter_628, - parameter_629, - parameter_630, - parameter_631, - parameter_632, - parameter_633, - parameter_634, - parameter_635, - parameter_636, - parameter_637, - parameter_638, - parameter_639, - parameter_640, - parameter_641, - parameter_642, - parameter_643, - parameter_644, - parameter_645, - parameter_646, - parameter_647, - parameter_648, - parameter_649, - parameter_650, - parameter_651, - parameter_652, - parameter_653, - parameter_654, - parameter_655, - parameter_656, - parameter_657, - parameter_658, - parameter_659, - parameter_660, - parameter_661, - parameter_662, - parameter_663, - parameter_664, - parameter_665, - parameter_666, - parameter_667, - parameter_668, - parameter_669, - parameter_670, - parameter_671, - parameter_672, - parameter_673, - parameter_674, - parameter_675, - parameter_676, - parameter_677, - parameter_678, - parameter_679, - parameter_680, - parameter_681, - parameter_682, - parameter_683, - parameter_684, - parameter_685, - parameter_686, - parameter_687, - parameter_688, - parameter_689, - parameter_690, - parameter_691, - parameter_692, - parameter_693, - parameter_694, - parameter_695, - parameter_696, - parameter_697, - parameter_698, - parameter_699, - parameter_700, - parameter_701, - parameter_702, - parameter_703, - parameter_704, - parameter_705, - parameter_706, - parameter_707, - parameter_708, - parameter_709, - parameter_710, - parameter_711, - parameter_712, - parameter_713, - parameter_714, - parameter_715, - parameter_716, - parameter_717, - parameter_718, - parameter_719, - parameter_720, - parameter_721, - parameter_722, - parameter_723, - parameter_724, - parameter_725, - parameter_726, - parameter_727, - parameter_728, - parameter_729, - parameter_730, - parameter_731, - parameter_732, - parameter_733, - parameter_734, - parameter_735, - parameter_736, - parameter_737, - parameter_738, - parameter_739, - parameter_740, - parameter_741, - parameter_742, - parameter_743, - parameter_744, - parameter_745, - parameter_746, - parameter_747, - parameter_748, - parameter_749, - parameter_750, - parameter_751, - parameter_752, - data_0, - ): - # pd_op.conv2d: (2x32x-1x-1xf32) <- (2x3x-1x-1xf32, 32x3x3x3xf32) - conv2d_0 = paddle._C_ops.conv2d( - data_0, parameter_752, [2, 2], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del data_0, parameter_752 - - # pd_op.batch_norm_: (2x32x-1x-1xf32, 32xf32, 32xf32, 32xf32, 32xf32, -1xui8) <- (2x32x-1x-1xf32, 32xf32, 32xf32, 32xf32, 32xf32) - ( - batch_norm__0, - batch_norm__1, - batch_norm__2, - batch_norm__3, - batch_norm__4, - batch_norm__5, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_0, - parameter_751, - parameter_750, - parameter_749, - parameter_748, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_0, parameter_748, parameter_749, parameter_750, parameter_751 - - # pd_op.swish: (2x32x-1x-1xf32) <- (2x32x-1x-1xf32) - swish_0 = paddle._C_ops.swish(batch_norm__0) - del batch_norm__0 - - # pd_op.conv2d: (2x32x-1x-1xf32) <- (2x32x-1x-1xf32, 32x32x3x3xf32) - conv2d_1 = paddle._C_ops.conv2d( - swish_0, parameter_747, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_747, swish_0 - - # pd_op.batch_norm_: (2x32x-1x-1xf32, 32xf32, 32xf32, 32xf32, 32xf32, -1xui8) <- (2x32x-1x-1xf32, 32xf32, 32xf32, 32xf32, 32xf32) - ( - batch_norm__6, - batch_norm__7, - batch_norm__8, - batch_norm__9, - batch_norm__10, - batch_norm__11, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_1, - parameter_746, - parameter_745, - parameter_744, - parameter_743, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_1, parameter_743, parameter_744, parameter_745, parameter_746 - - # pd_op.swish: (2x32x-1x-1xf32) <- (2x32x-1x-1xf32) - swish_1 = paddle._C_ops.swish(batch_norm__6) - del batch_norm__6 - - # pd_op.conv2d: (2x64x-1x-1xf32) <- (2x32x-1x-1xf32, 64x32x3x3xf32) - conv2d_2 = paddle._C_ops.conv2d( - swish_1, parameter_742, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_742, swish_1 - - # pd_op.batch_norm_: (2x64x-1x-1xf32, 64xf32, 64xf32, 64xf32, 64xf32, -1xui8) <- (2x64x-1x-1xf32, 64xf32, 64xf32, 64xf32, 64xf32) - ( - batch_norm__12, - batch_norm__13, - batch_norm__14, - batch_norm__15, - batch_norm__16, - batch_norm__17, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_2, - parameter_741, - parameter_740, - parameter_739, - parameter_738, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_2, parameter_738, parameter_739, parameter_740, parameter_741 - - # pd_op.swish: (2x64x-1x-1xf32) <- (2x64x-1x-1xf32) - swish_2 = paddle._C_ops.swish(batch_norm__12) - del batch_norm__12 - - # pd_op.conv2d: (2x96x-1x-1xf32) <- (2x64x-1x-1xf32, 96x64x3x3xf32) - conv2d_3 = paddle._C_ops.conv2d( - swish_2, parameter_737, [2, 2], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_737, swish_2 - - # pd_op.batch_norm_: (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) - ( - batch_norm__18, - batch_norm__19, - batch_norm__20, - batch_norm__21, - batch_norm__22, - batch_norm__23, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_3, - parameter_736, - parameter_735, - parameter_734, - parameter_733, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_3, parameter_733, parameter_734, parameter_735, parameter_736 - - # pd_op.swish: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32) - swish_3 = paddle._C_ops.swish(batch_norm__18) - del batch_norm__18 - - # pd_op.conv2d: (2x48x-1x-1xf32) <- (2x96x-1x-1xf32, 48x96x1x1xf32) - conv2d_4 = paddle._C_ops.conv2d( - swish_3, parameter_732, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_732 - - # pd_op.batch_norm_: (2x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32, -1xui8) <- (2x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32) - ( - batch_norm__24, - batch_norm__25, - batch_norm__26, - batch_norm__27, - batch_norm__28, - batch_norm__29, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_4, - parameter_731, - parameter_730, - parameter_729, - parameter_728, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_4, parameter_728, parameter_729, parameter_730, parameter_731 - - # pd_op.swish: (2x48x-1x-1xf32) <- (2x48x-1x-1xf32) - swish_4 = paddle._C_ops.swish(batch_norm__24) - del batch_norm__24 - - # pd_op.conv2d: (2x48x-1x-1xf32) <- (2x96x-1x-1xf32, 48x96x1x1xf32) - conv2d_5 = paddle._C_ops.conv2d( - swish_3, parameter_727, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_727, swish_3 - - # pd_op.batch_norm_: (2x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32, -1xui8) <- (2x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32) - ( - batch_norm__30, - batch_norm__31, - batch_norm__32, - batch_norm__33, - batch_norm__34, - batch_norm__35, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_5, - parameter_726, - parameter_725, - parameter_724, - parameter_723, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_5, parameter_723, parameter_724, parameter_725, parameter_726 - - # pd_op.swish: (2x48x-1x-1xf32) <- (2x48x-1x-1xf32) - swish_5 = paddle._C_ops.swish(batch_norm__30) - del batch_norm__30 - - # pd_op.conv2d: (2x48x-1x-1xf32) <- (2x48x-1x-1xf32, 48x48x3x3xf32) - conv2d_6 = paddle._C_ops.conv2d( - swish_5, parameter_722, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_722 - - # pd_op.batch_norm_: (2x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32, -1xui8) <- (2x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32) - ( - batch_norm__36, - batch_norm__37, - batch_norm__38, - batch_norm__39, - batch_norm__40, - batch_norm__41, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_6, - parameter_721, - parameter_720, - parameter_719, - parameter_718, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_6, parameter_718, parameter_719, parameter_720, parameter_721 - - # pd_op.swish: (2x48x-1x-1xf32) <- (2x48x-1x-1xf32) - swish_6 = paddle._C_ops.swish(batch_norm__36) - del batch_norm__36 - - # pd_op.conv2d: (2x48x-1x-1xf32) <- (2x48x-1x-1xf32, 48x48x3x3xf32) - conv2d_7 = paddle._C_ops.conv2d( - swish_6, parameter_717, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_717 - - # pd_op.batch_norm_: (2x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32, -1xui8) <- (2x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32) - ( - batch_norm__42, - batch_norm__43, - batch_norm__44, - batch_norm__45, - batch_norm__46, - batch_norm__47, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_7, - parameter_716, - parameter_715, - parameter_714, - parameter_713, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_7, parameter_713, parameter_714, parameter_715, parameter_716 - - # pd_op.conv2d: (2x48x-1x-1xf32) <- (2x48x-1x-1xf32, 48x48x1x1xf32) - conv2d_8 = paddle._C_ops.conv2d( - swish_6, parameter_712, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_712, swish_6 - - # pd_op.batch_norm_: (2x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32, -1xui8) <- (2x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32) - ( - batch_norm__48, - batch_norm__49, - batch_norm__50, - batch_norm__51, - batch_norm__52, - batch_norm__53, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_8, - parameter_711, - parameter_710, - parameter_709, - parameter_708, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_8, parameter_708, parameter_709, parameter_710, parameter_711 - - # pd_op.add: (2x48x-1x-1xf32) <- (2x48x-1x-1xf32, 2x48x-1x-1xf32) - add_0 = paddle._C_ops.add(batch_norm__42, batch_norm__48) - del batch_norm__42, batch_norm__48 - - # pd_op.swish: (2x48x-1x-1xf32) <- (2x48x-1x-1xf32) - swish_7 = paddle._C_ops.swish(add_0) - del add_0 - - # pd_op.add: (2x48x-1x-1xf32) <- (2x48x-1x-1xf32, 2x48x-1x-1xf32) - add_1 = paddle._C_ops.add(swish_5, swish_7) - del swish_5, swish_7 - - # pd_op.conv2d: (2x48x-1x-1xf32) <- (2x48x-1x-1xf32, 48x48x3x3xf32) - conv2d_9 = paddle._C_ops.conv2d( - add_1, parameter_707, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_707 - - # pd_op.batch_norm_: (2x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32, -1xui8) <- (2x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32) - ( - batch_norm__54, - batch_norm__55, - batch_norm__56, - batch_norm__57, - batch_norm__58, - batch_norm__59, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_9, - parameter_706, - parameter_705, - parameter_704, - parameter_703, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_9, parameter_703, parameter_704, parameter_705, parameter_706 - - # pd_op.swish: (2x48x-1x-1xf32) <- (2x48x-1x-1xf32) - swish_8 = paddle._C_ops.swish(batch_norm__54) - del batch_norm__54 - - # pd_op.conv2d: (2x48x-1x-1xf32) <- (2x48x-1x-1xf32, 48x48x3x3xf32) - conv2d_10 = paddle._C_ops.conv2d( - swish_8, parameter_702, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_702 - - # pd_op.batch_norm_: (2x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32, -1xui8) <- (2x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32) - ( - batch_norm__60, - batch_norm__61, - batch_norm__62, - batch_norm__63, - batch_norm__64, - batch_norm__65, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_10, - parameter_701, - parameter_700, - parameter_699, - parameter_698, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_10, parameter_698, parameter_699, parameter_700, parameter_701 - - # pd_op.conv2d: (2x48x-1x-1xf32) <- (2x48x-1x-1xf32, 48x48x1x1xf32) - conv2d_11 = paddle._C_ops.conv2d( - swish_8, parameter_697, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_697, swish_8 - - # pd_op.batch_norm_: (2x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32, -1xui8) <- (2x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32) - ( - batch_norm__66, - batch_norm__67, - batch_norm__68, - batch_norm__69, - batch_norm__70, - batch_norm__71, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_11, - parameter_696, - parameter_695, - parameter_694, - parameter_693, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_11, parameter_693, parameter_694, parameter_695, parameter_696 - - # pd_op.add: (2x48x-1x-1xf32) <- (2x48x-1x-1xf32, 2x48x-1x-1xf32) - add_2 = paddle._C_ops.add(batch_norm__60, batch_norm__66) - del batch_norm__60, batch_norm__66 - - # pd_op.swish: (2x48x-1x-1xf32) <- (2x48x-1x-1xf32) - swish_9 = paddle._C_ops.swish(add_2) - del add_2 - - # pd_op.add: (2x48x-1x-1xf32) <- (2x48x-1x-1xf32, 2x48x-1x-1xf32) - add_3 = paddle._C_ops.add(add_1, swish_9) - del add_1, swish_9 - - # pd_op.conv2d: (2x48x-1x-1xf32) <- (2x48x-1x-1xf32, 48x48x3x3xf32) - conv2d_12 = paddle._C_ops.conv2d( - add_3, parameter_692, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_692 - - # pd_op.batch_norm_: (2x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32, -1xui8) <- (2x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32) - ( - batch_norm__72, - batch_norm__73, - batch_norm__74, - batch_norm__75, - batch_norm__76, - batch_norm__77, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_12, - parameter_691, - parameter_690, - parameter_689, - parameter_688, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_12, parameter_688, parameter_689, parameter_690, parameter_691 - - # pd_op.swish: (2x48x-1x-1xf32) <- (2x48x-1x-1xf32) - swish_10 = paddle._C_ops.swish(batch_norm__72) - del batch_norm__72 - - # pd_op.conv2d: (2x48x-1x-1xf32) <- (2x48x-1x-1xf32, 48x48x3x3xf32) - conv2d_13 = paddle._C_ops.conv2d( - swish_10, parameter_687, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_687 - - # pd_op.batch_norm_: (2x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32, -1xui8) <- (2x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32) - ( - batch_norm__78, - batch_norm__79, - batch_norm__80, - batch_norm__81, - batch_norm__82, - batch_norm__83, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_13, - parameter_686, - parameter_685, - parameter_684, - parameter_683, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_13, parameter_683, parameter_684, parameter_685, parameter_686 - - # pd_op.conv2d: (2x48x-1x-1xf32) <- (2x48x-1x-1xf32, 48x48x1x1xf32) - conv2d_14 = paddle._C_ops.conv2d( - swish_10, parameter_682, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_682, swish_10 - - # pd_op.batch_norm_: (2x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32, -1xui8) <- (2x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32) - ( - batch_norm__84, - batch_norm__85, - batch_norm__86, - batch_norm__87, - batch_norm__88, - batch_norm__89, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_14, - parameter_681, - parameter_680, - parameter_679, - parameter_678, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_14, parameter_678, parameter_679, parameter_680, parameter_681 - - # pd_op.add: (2x48x-1x-1xf32) <- (2x48x-1x-1xf32, 2x48x-1x-1xf32) - add_4 = paddle._C_ops.add(batch_norm__78, batch_norm__84) - del batch_norm__78, batch_norm__84 - - # pd_op.swish: (2x48x-1x-1xf32) <- (2x48x-1x-1xf32) - swish_11 = paddle._C_ops.swish(add_4) - del add_4 - - # pd_op.add: (2x48x-1x-1xf32) <- (2x48x-1x-1xf32, 2x48x-1x-1xf32) - add_5 = paddle._C_ops.add(add_3, swish_11) - del add_3, swish_11 - - # pd_op.full: (1xi32) <- () - full_0 = paddle._C_ops.full( - [1], float("1"), paddle.int32, paddle.core.CPUPlace() - ) - - # builtin.combine: ([2x48x-1x-1xf32, 2x48x-1x-1xf32]) <- (2x48x-1x-1xf32, 2x48x-1x-1xf32) - combine_0 = [swish_4, add_5] - del add_5, swish_4 - - # pd_op.concat: (2x96x-1x-1xf32) <- ([2x48x-1x-1xf32, 2x48x-1x-1xf32], 1xi32) - concat_2 = paddle._C_ops.concat(combine_0, full_0) - del combine_0 - - # pd_op.full_int_array: (2xi64) <- () - full_int_array_0 = [2, 3] - - # pd_op.mean: (2x96x1x1xf32) <- (2x96x-1x-1xf32, 2xi64) - mean_0 = paddle._C_ops.mean(concat_2, full_int_array_0, True) - - # pd_op.conv2d: (2x96x1x1xf32) <- (2x96x1x1xf32, 96x96x1x1xf32) - conv2d_15 = paddle._C_ops.conv2d( - mean_0, parameter_677, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del mean_0, parameter_677 - - # pd_op.full_int_array: (4xi64) <- () - full_int_array_1 = [1, -1, 1, 1] - - # pd_op.reshape: (1x96x1x1xf32) <- (96xf32, 4xi64) - reshape_0 = paddle._C_ops.reshape(parameter_676, full_int_array_1) - del parameter_676 - - # pd_op.add: (2x96x1x1xf32) <- (2x96x1x1xf32, 1x96x1x1xf32) - add_6 = paddle._C_ops.add(conv2d_15, reshape_0) - del conv2d_15, reshape_0 - - # pd_op.hardsigmoid: (2x96x1x1xf32) <- (2x96x1x1xf32) - hardsigmoid_0 = paddle._C_ops.hardsigmoid( - add_6, float("0.166667"), float("0.5") - ) - del add_6 - - # pd_op.multiply: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, 2x96x1x1xf32) - multiply_0 = paddle._C_ops.multiply(concat_2, hardsigmoid_0) - del concat_2, hardsigmoid_0 - - # pd_op.conv2d: (2x128x-1x-1xf32) <- (2x96x-1x-1xf32, 128x96x1x1xf32) - conv2d_16 = paddle._C_ops.conv2d( - multiply_0, parameter_675, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del multiply_0, parameter_675 - - # pd_op.batch_norm_: (2x128x-1x-1xf32, 128xf32, 128xf32, 128xf32, 128xf32, -1xui8) <- (2x128x-1x-1xf32, 128xf32, 128xf32, 128xf32, 128xf32) - ( - batch_norm__90, - batch_norm__91, - batch_norm__92, - batch_norm__93, - batch_norm__94, - batch_norm__95, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_16, - parameter_674, - parameter_673, - parameter_672, - parameter_671, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_16, parameter_671, parameter_672, parameter_673, parameter_674 - - # pd_op.swish: (2x128x-1x-1xf32) <- (2x128x-1x-1xf32) - swish_12 = paddle._C_ops.swish(batch_norm__90) - del batch_norm__90 - - # pd_op.conv2d: (2x192x-1x-1xf32) <- (2x128x-1x-1xf32, 192x128x3x3xf32) - conv2d_17 = paddle._C_ops.conv2d( - swish_12, parameter_670, [2, 2], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_670, swish_12 - - # pd_op.batch_norm_: (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) - ( - batch_norm__96, - batch_norm__97, - batch_norm__98, - batch_norm__99, - batch_norm__100, - batch_norm__101, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_17, - parameter_669, - parameter_668, - parameter_667, - parameter_666, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_17, parameter_666, parameter_667, parameter_668, parameter_669 - - # pd_op.swish: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32) - swish_13 = paddle._C_ops.swish(batch_norm__96) - del batch_norm__96 - - # pd_op.conv2d: (2x96x-1x-1xf32) <- (2x192x-1x-1xf32, 96x192x1x1xf32) - conv2d_18 = paddle._C_ops.conv2d( - swish_13, parameter_665, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_665 - - # pd_op.batch_norm_: (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) - ( - batch_norm__102, - batch_norm__103, - batch_norm__104, - batch_norm__105, - batch_norm__106, - batch_norm__107, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_18, - parameter_664, - parameter_663, - parameter_662, - parameter_661, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_18, parameter_661, parameter_662, parameter_663, parameter_664 - - # pd_op.swish: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32) - swish_14 = paddle._C_ops.swish(batch_norm__102) - del batch_norm__102 - - # pd_op.conv2d: (2x96x-1x-1xf32) <- (2x192x-1x-1xf32, 96x192x1x1xf32) - conv2d_19 = paddle._C_ops.conv2d( - swish_13, parameter_660, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_660, swish_13 - - # pd_op.batch_norm_: (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) - ( - batch_norm__108, - batch_norm__109, - batch_norm__110, - batch_norm__111, - batch_norm__112, - batch_norm__113, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_19, - parameter_659, - parameter_658, - parameter_657, - parameter_656, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_19, parameter_656, parameter_657, parameter_658, parameter_659 - - # pd_op.swish: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32) - swish_15 = paddle._C_ops.swish(batch_norm__108) - del batch_norm__108 - - # pd_op.conv2d: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, 96x96x3x3xf32) - conv2d_20 = paddle._C_ops.conv2d( - swish_15, parameter_655, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_655 - - # pd_op.batch_norm_: (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) - ( - batch_norm__114, - batch_norm__115, - batch_norm__116, - batch_norm__117, - batch_norm__118, - batch_norm__119, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_20, - parameter_654, - parameter_653, - parameter_652, - parameter_651, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_20, parameter_651, parameter_652, parameter_653, parameter_654 - - # pd_op.swish: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32) - swish_16 = paddle._C_ops.swish(batch_norm__114) - del batch_norm__114 - - # pd_op.conv2d: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, 96x96x3x3xf32) - conv2d_21 = paddle._C_ops.conv2d( - swish_16, parameter_650, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_650 - - # pd_op.batch_norm_: (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) - ( - batch_norm__120, - batch_norm__121, - batch_norm__122, - batch_norm__123, - batch_norm__124, - batch_norm__125, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_21, - parameter_649, - parameter_648, - parameter_647, - parameter_646, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_21, parameter_646, parameter_647, parameter_648, parameter_649 - - # pd_op.conv2d: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, 96x96x1x1xf32) - conv2d_22 = paddle._C_ops.conv2d( - swish_16, parameter_645, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_645, swish_16 - - # pd_op.batch_norm_: (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) - ( - batch_norm__126, - batch_norm__127, - batch_norm__128, - batch_norm__129, - batch_norm__130, - batch_norm__131, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_22, - parameter_644, - parameter_643, - parameter_642, - parameter_641, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_22, parameter_641, parameter_642, parameter_643, parameter_644 - - # pd_op.add: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, 2x96x-1x-1xf32) - add_7 = paddle._C_ops.add(batch_norm__120, batch_norm__126) - del batch_norm__120, batch_norm__126 - - # pd_op.swish: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32) - swish_17 = paddle._C_ops.swish(add_7) - del add_7 - - # pd_op.add: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, 2x96x-1x-1xf32) - add_8 = paddle._C_ops.add(swish_15, swish_17) - del swish_15, swish_17 - - # pd_op.conv2d: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, 96x96x3x3xf32) - conv2d_23 = paddle._C_ops.conv2d( - add_8, parameter_640, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_640 - - # pd_op.batch_norm_: (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) - ( - batch_norm__132, - batch_norm__133, - batch_norm__134, - batch_norm__135, - batch_norm__136, - batch_norm__137, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_23, - parameter_639, - parameter_638, - parameter_637, - parameter_636, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_23, parameter_636, parameter_637, parameter_638, parameter_639 - - # pd_op.swish: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32) - swish_18 = paddle._C_ops.swish(batch_norm__132) - del batch_norm__132 - - # pd_op.conv2d: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, 96x96x3x3xf32) - conv2d_24 = paddle._C_ops.conv2d( - swish_18, parameter_635, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_635 - - # pd_op.batch_norm_: (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) - ( - batch_norm__138, - batch_norm__139, - batch_norm__140, - batch_norm__141, - batch_norm__142, - batch_norm__143, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_24, - parameter_634, - parameter_633, - parameter_632, - parameter_631, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_24, parameter_631, parameter_632, parameter_633, parameter_634 - - # pd_op.conv2d: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, 96x96x1x1xf32) - conv2d_25 = paddle._C_ops.conv2d( - swish_18, parameter_630, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_630, swish_18 - - # pd_op.batch_norm_: (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) - ( - batch_norm__144, - batch_norm__145, - batch_norm__146, - batch_norm__147, - batch_norm__148, - batch_norm__149, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_25, - parameter_629, - parameter_628, - parameter_627, - parameter_626, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_25, parameter_626, parameter_627, parameter_628, parameter_629 - - # pd_op.add: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, 2x96x-1x-1xf32) - add_9 = paddle._C_ops.add(batch_norm__138, batch_norm__144) - del batch_norm__138, batch_norm__144 - - # pd_op.swish: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32) - swish_19 = paddle._C_ops.swish(add_9) - del add_9 - - # pd_op.add: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, 2x96x-1x-1xf32) - add_10 = paddle._C_ops.add(add_8, swish_19) - del add_8, swish_19 - - # pd_op.conv2d: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, 96x96x3x3xf32) - conv2d_26 = paddle._C_ops.conv2d( - add_10, parameter_625, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_625 - - # pd_op.batch_norm_: (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) - ( - batch_norm__150, - batch_norm__151, - batch_norm__152, - batch_norm__153, - batch_norm__154, - batch_norm__155, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_26, - parameter_624, - parameter_623, - parameter_622, - parameter_621, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_26, parameter_621, parameter_622, parameter_623, parameter_624 - - # pd_op.swish: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32) - swish_20 = paddle._C_ops.swish(batch_norm__150) - del batch_norm__150 - - # pd_op.conv2d: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, 96x96x3x3xf32) - conv2d_27 = paddle._C_ops.conv2d( - swish_20, parameter_620, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_620 - - # pd_op.batch_norm_: (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) - ( - batch_norm__156, - batch_norm__157, - batch_norm__158, - batch_norm__159, - batch_norm__160, - batch_norm__161, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_27, - parameter_619, - parameter_618, - parameter_617, - parameter_616, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_27, parameter_616, parameter_617, parameter_618, parameter_619 - - # pd_op.conv2d: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, 96x96x1x1xf32) - conv2d_28 = paddle._C_ops.conv2d( - swish_20, parameter_615, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_615, swish_20 - - # pd_op.batch_norm_: (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) - ( - batch_norm__162, - batch_norm__163, - batch_norm__164, - batch_norm__165, - batch_norm__166, - batch_norm__167, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_28, - parameter_614, - parameter_613, - parameter_612, - parameter_611, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_28, parameter_611, parameter_612, parameter_613, parameter_614 - - # pd_op.add: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, 2x96x-1x-1xf32) - add_11 = paddle._C_ops.add(batch_norm__156, batch_norm__162) - del batch_norm__156, batch_norm__162 - - # pd_op.swish: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32) - swish_21 = paddle._C_ops.swish(add_11) - del add_11 - - # pd_op.add: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, 2x96x-1x-1xf32) - add_12 = paddle._C_ops.add(add_10, swish_21) - del add_10, swish_21 - - # pd_op.conv2d: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, 96x96x3x3xf32) - conv2d_29 = paddle._C_ops.conv2d( - add_12, parameter_610, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_610 - - # pd_op.batch_norm_: (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) - ( - batch_norm__168, - batch_norm__169, - batch_norm__170, - batch_norm__171, - batch_norm__172, - batch_norm__173, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_29, - parameter_609, - parameter_608, - parameter_607, - parameter_606, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_29, parameter_606, parameter_607, parameter_608, parameter_609 - - # pd_op.swish: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32) - swish_22 = paddle._C_ops.swish(batch_norm__168) - del batch_norm__168 - - # pd_op.conv2d: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, 96x96x3x3xf32) - conv2d_30 = paddle._C_ops.conv2d( - swish_22, parameter_605, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_605 - - # pd_op.batch_norm_: (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) - ( - batch_norm__174, - batch_norm__175, - batch_norm__176, - batch_norm__177, - batch_norm__178, - batch_norm__179, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_30, - parameter_604, - parameter_603, - parameter_602, - parameter_601, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_30, parameter_601, parameter_602, parameter_603, parameter_604 - - # pd_op.conv2d: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, 96x96x1x1xf32) - conv2d_31 = paddle._C_ops.conv2d( - swish_22, parameter_600, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_600, swish_22 - - # pd_op.batch_norm_: (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) - ( - batch_norm__180, - batch_norm__181, - batch_norm__182, - batch_norm__183, - batch_norm__184, - batch_norm__185, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_31, - parameter_599, - parameter_598, - parameter_597, - parameter_596, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_31, parameter_596, parameter_597, parameter_598, parameter_599 - - # pd_op.add: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, 2x96x-1x-1xf32) - add_13 = paddle._C_ops.add(batch_norm__174, batch_norm__180) - del batch_norm__174, batch_norm__180 - - # pd_op.swish: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32) - swish_23 = paddle._C_ops.swish(add_13) - del add_13 - - # pd_op.add: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, 2x96x-1x-1xf32) - add_14 = paddle._C_ops.add(add_12, swish_23) - del add_12, swish_23 - - # pd_op.conv2d: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, 96x96x3x3xf32) - conv2d_32 = paddle._C_ops.conv2d( - add_14, parameter_595, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_595 - - # pd_op.batch_norm_: (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) - ( - batch_norm__186, - batch_norm__187, - batch_norm__188, - batch_norm__189, - batch_norm__190, - batch_norm__191, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_32, - parameter_594, - parameter_593, - parameter_592, - parameter_591, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_32, parameter_591, parameter_592, parameter_593, parameter_594 - - # pd_op.swish: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32) - swish_24 = paddle._C_ops.swish(batch_norm__186) - del batch_norm__186 - - # pd_op.conv2d: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, 96x96x3x3xf32) - conv2d_33 = paddle._C_ops.conv2d( - swish_24, parameter_590, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_590 - - # pd_op.batch_norm_: (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) - ( - batch_norm__192, - batch_norm__193, - batch_norm__194, - batch_norm__195, - batch_norm__196, - batch_norm__197, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_33, - parameter_589, - parameter_588, - parameter_587, - parameter_586, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_33, parameter_586, parameter_587, parameter_588, parameter_589 - - # pd_op.conv2d: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, 96x96x1x1xf32) - conv2d_34 = paddle._C_ops.conv2d( - swish_24, parameter_585, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_585, swish_24 - - # pd_op.batch_norm_: (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) - ( - batch_norm__198, - batch_norm__199, - batch_norm__200, - batch_norm__201, - batch_norm__202, - batch_norm__203, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_34, - parameter_584, - parameter_583, - parameter_582, - parameter_581, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_34, parameter_581, parameter_582, parameter_583, parameter_584 - - # pd_op.add: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, 2x96x-1x-1xf32) - add_15 = paddle._C_ops.add(batch_norm__192, batch_norm__198) - del batch_norm__192, batch_norm__198 - - # pd_op.swish: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32) - swish_25 = paddle._C_ops.swish(add_15) - del add_15 - - # pd_op.add: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, 2x96x-1x-1xf32) - add_16 = paddle._C_ops.add(add_14, swish_25) - del add_14, swish_25 - - # pd_op.conv2d: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, 96x96x3x3xf32) - conv2d_35 = paddle._C_ops.conv2d( - add_16, parameter_580, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_580 - - # pd_op.batch_norm_: (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) - ( - batch_norm__204, - batch_norm__205, - batch_norm__206, - batch_norm__207, - batch_norm__208, - batch_norm__209, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_35, - parameter_579, - parameter_578, - parameter_577, - parameter_576, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_35, parameter_576, parameter_577, parameter_578, parameter_579 - - # pd_op.swish: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32) - swish_26 = paddle._C_ops.swish(batch_norm__204) - del batch_norm__204 - - # pd_op.conv2d: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, 96x96x3x3xf32) - conv2d_36 = paddle._C_ops.conv2d( - swish_26, parameter_575, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_575 - - # pd_op.batch_norm_: (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) - ( - batch_norm__210, - batch_norm__211, - batch_norm__212, - batch_norm__213, - batch_norm__214, - batch_norm__215, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_36, - parameter_574, - parameter_573, - parameter_572, - parameter_571, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_36, parameter_571, parameter_572, parameter_573, parameter_574 - - # pd_op.conv2d: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, 96x96x1x1xf32) - conv2d_37 = paddle._C_ops.conv2d( - swish_26, parameter_570, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_570, swish_26 - - # pd_op.batch_norm_: (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) - ( - batch_norm__216, - batch_norm__217, - batch_norm__218, - batch_norm__219, - batch_norm__220, - batch_norm__221, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_37, - parameter_569, - parameter_568, - parameter_567, - parameter_566, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_37, parameter_566, parameter_567, parameter_568, parameter_569 - - # pd_op.add: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, 2x96x-1x-1xf32) - add_17 = paddle._C_ops.add(batch_norm__210, batch_norm__216) - del batch_norm__210, batch_norm__216 - - # pd_op.swish: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32) - swish_27 = paddle._C_ops.swish(add_17) - del add_17 - - # pd_op.add: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, 2x96x-1x-1xf32) - add_18 = paddle._C_ops.add(add_16, swish_27) - del add_16, swish_27 - - # builtin.combine: ([2x96x-1x-1xf32, 2x96x-1x-1xf32]) <- (2x96x-1x-1xf32, 2x96x-1x-1xf32) - combine_1 = [swish_14, add_18] - del add_18, swish_14 - - # pd_op.concat: (2x192x-1x-1xf32) <- ([2x96x-1x-1xf32, 2x96x-1x-1xf32], 1xi32) - concat_3 = paddle._C_ops.concat(combine_1, full_0) - del combine_1 - - # pd_op.mean: (2x192x1x1xf32) <- (2x192x-1x-1xf32, 2xi64) - mean_1 = paddle._C_ops.mean(concat_3, full_int_array_0, True) - - # pd_op.conv2d: (2x192x1x1xf32) <- (2x192x1x1xf32, 192x192x1x1xf32) - conv2d_38 = paddle._C_ops.conv2d( - mean_1, parameter_565, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del mean_1, parameter_565 - - # pd_op.reshape: (1x192x1x1xf32) <- (192xf32, 4xi64) - reshape_1 = paddle._C_ops.reshape(parameter_564, full_int_array_1) - del parameter_564 - - # pd_op.add: (2x192x1x1xf32) <- (2x192x1x1xf32, 1x192x1x1xf32) - add_19 = paddle._C_ops.add(conv2d_38, reshape_1) - del conv2d_38, reshape_1 - - # pd_op.hardsigmoid: (2x192x1x1xf32) <- (2x192x1x1xf32) - hardsigmoid_1 = paddle._C_ops.hardsigmoid( - add_19, float("0.166667"), float("0.5") - ) - del add_19 - - # pd_op.multiply: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 2x192x1x1xf32) - multiply_1 = paddle._C_ops.multiply(concat_3, hardsigmoid_1) - del concat_3, hardsigmoid_1 - - # pd_op.conv2d: (2x256x-1x-1xf32) <- (2x192x-1x-1xf32, 256x192x1x1xf32) - conv2d_39 = paddle._C_ops.conv2d( - multiply_1, parameter_563, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del multiply_1, parameter_563 - - # pd_op.batch_norm_: (2x256x-1x-1xf32, 256xf32, 256xf32, 256xf32, 256xf32, -1xui8) <- (2x256x-1x-1xf32, 256xf32, 256xf32, 256xf32, 256xf32) - ( - batch_norm__222, - batch_norm__223, - batch_norm__224, - batch_norm__225, - batch_norm__226, - batch_norm__227, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_39, - parameter_562, - parameter_561, - parameter_560, - parameter_559, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_39, parameter_559, parameter_560, parameter_561, parameter_562 - - # pd_op.swish: (2x256x-1x-1xf32) <- (2x256x-1x-1xf32) - swish_28 = paddle._C_ops.swish(batch_norm__222) - del batch_norm__222 - - # pd_op.conv2d: (2x384x-1x-1xf32) <- (2x256x-1x-1xf32, 384x256x3x3xf32) - conv2d_40 = paddle._C_ops.conv2d( - swish_28, parameter_558, [2, 2], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_558 - - # pd_op.batch_norm_: (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) - ( - batch_norm__228, - batch_norm__229, - batch_norm__230, - batch_norm__231, - batch_norm__232, - batch_norm__233, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_40, - parameter_557, - parameter_556, - parameter_555, - parameter_554, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_40, parameter_554, parameter_555, parameter_556, parameter_557 - - # pd_op.swish: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32) - swish_29 = paddle._C_ops.swish(batch_norm__228) - del batch_norm__228 - - # pd_op.conv2d: (2x192x-1x-1xf32) <- (2x384x-1x-1xf32, 192x384x1x1xf32) - conv2d_41 = paddle._C_ops.conv2d( - swish_29, parameter_553, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_553 - - # pd_op.batch_norm_: (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) - ( - batch_norm__234, - batch_norm__235, - batch_norm__236, - batch_norm__237, - batch_norm__238, - batch_norm__239, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_41, - parameter_552, - parameter_551, - parameter_550, - parameter_549, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_41, parameter_549, parameter_550, parameter_551, parameter_552 - - # pd_op.swish: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32) - swish_30 = paddle._C_ops.swish(batch_norm__234) - del batch_norm__234 - - # pd_op.conv2d: (2x192x-1x-1xf32) <- (2x384x-1x-1xf32, 192x384x1x1xf32) - conv2d_42 = paddle._C_ops.conv2d( - swish_29, parameter_548, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_548, swish_29 - - # pd_op.batch_norm_: (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) - ( - batch_norm__240, - batch_norm__241, - batch_norm__242, - batch_norm__243, - batch_norm__244, - batch_norm__245, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_42, - parameter_547, - parameter_546, - parameter_545, - parameter_544, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_42, parameter_544, parameter_545, parameter_546, parameter_547 - - # pd_op.swish: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32) - swish_31 = paddle._C_ops.swish(batch_norm__240) - del batch_norm__240 - - # pd_op.conv2d: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 192x192x3x3xf32) - conv2d_43 = paddle._C_ops.conv2d( - swish_31, parameter_543, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_543 - - # pd_op.batch_norm_: (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) - ( - batch_norm__246, - batch_norm__247, - batch_norm__248, - batch_norm__249, - batch_norm__250, - batch_norm__251, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_43, - parameter_542, - parameter_541, - parameter_540, - parameter_539, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_43, parameter_539, parameter_540, parameter_541, parameter_542 - - # pd_op.swish: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32) - swish_32 = paddle._C_ops.swish(batch_norm__246) - del batch_norm__246 - - # pd_op.conv2d: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 192x192x3x3xf32) - conv2d_44 = paddle._C_ops.conv2d( - swish_32, parameter_538, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_538 - - # pd_op.batch_norm_: (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) - ( - batch_norm__252, - batch_norm__253, - batch_norm__254, - batch_norm__255, - batch_norm__256, - batch_norm__257, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_44, - parameter_537, - parameter_536, - parameter_535, - parameter_534, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_44, parameter_534, parameter_535, parameter_536, parameter_537 - - # pd_op.conv2d: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 192x192x1x1xf32) - conv2d_45 = paddle._C_ops.conv2d( - swish_32, parameter_533, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_533, swish_32 - - # pd_op.batch_norm_: (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) - ( - batch_norm__258, - batch_norm__259, - batch_norm__260, - batch_norm__261, - batch_norm__262, - batch_norm__263, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_45, - parameter_532, - parameter_531, - parameter_530, - parameter_529, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_45, parameter_529, parameter_530, parameter_531, parameter_532 - - # pd_op.add: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 2x192x-1x-1xf32) - add_20 = paddle._C_ops.add(batch_norm__252, batch_norm__258) - del batch_norm__252, batch_norm__258 - - # pd_op.swish: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32) - swish_33 = paddle._C_ops.swish(add_20) - del add_20 - - # pd_op.add: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 2x192x-1x-1xf32) - add_21 = paddle._C_ops.add(swish_31, swish_33) - del swish_31, swish_33 - - # pd_op.conv2d: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 192x192x3x3xf32) - conv2d_46 = paddle._C_ops.conv2d( - add_21, parameter_528, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_528 - - # pd_op.batch_norm_: (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) - ( - batch_norm__264, - batch_norm__265, - batch_norm__266, - batch_norm__267, - batch_norm__268, - batch_norm__269, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_46, - parameter_527, - parameter_526, - parameter_525, - parameter_524, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_46, parameter_524, parameter_525, parameter_526, parameter_527 - - # pd_op.swish: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32) - swish_34 = paddle._C_ops.swish(batch_norm__264) - del batch_norm__264 - - # pd_op.conv2d: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 192x192x3x3xf32) - conv2d_47 = paddle._C_ops.conv2d( - swish_34, parameter_523, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_523 - - # pd_op.batch_norm_: (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) - ( - batch_norm__270, - batch_norm__271, - batch_norm__272, - batch_norm__273, - batch_norm__274, - batch_norm__275, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_47, - parameter_522, - parameter_521, - parameter_520, - parameter_519, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_47, parameter_519, parameter_520, parameter_521, parameter_522 - - # pd_op.conv2d: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 192x192x1x1xf32) - conv2d_48 = paddle._C_ops.conv2d( - swish_34, parameter_518, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_518, swish_34 - - # pd_op.batch_norm_: (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) - ( - batch_norm__276, - batch_norm__277, - batch_norm__278, - batch_norm__279, - batch_norm__280, - batch_norm__281, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_48, - parameter_517, - parameter_516, - parameter_515, - parameter_514, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_48, parameter_514, parameter_515, parameter_516, parameter_517 - - # pd_op.add: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 2x192x-1x-1xf32) - add_22 = paddle._C_ops.add(batch_norm__270, batch_norm__276) - del batch_norm__270, batch_norm__276 - - # pd_op.swish: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32) - swish_35 = paddle._C_ops.swish(add_22) - del add_22 - - # pd_op.add: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 2x192x-1x-1xf32) - add_23 = paddle._C_ops.add(add_21, swish_35) - del add_21, swish_35 - - # pd_op.conv2d: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 192x192x3x3xf32) - conv2d_49 = paddle._C_ops.conv2d( - add_23, parameter_513, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_513 - - # pd_op.batch_norm_: (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) - ( - batch_norm__282, - batch_norm__283, - batch_norm__284, - batch_norm__285, - batch_norm__286, - batch_norm__287, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_49, - parameter_512, - parameter_511, - parameter_510, - parameter_509, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_49, parameter_509, parameter_510, parameter_511, parameter_512 - - # pd_op.swish: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32) - swish_36 = paddle._C_ops.swish(batch_norm__282) - del batch_norm__282 - - # pd_op.conv2d: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 192x192x3x3xf32) - conv2d_50 = paddle._C_ops.conv2d( - swish_36, parameter_508, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_508 - - # pd_op.batch_norm_: (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) - ( - batch_norm__288, - batch_norm__289, - batch_norm__290, - batch_norm__291, - batch_norm__292, - batch_norm__293, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_50, - parameter_507, - parameter_506, - parameter_505, - parameter_504, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_50, parameter_504, parameter_505, parameter_506, parameter_507 - - # pd_op.conv2d: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 192x192x1x1xf32) - conv2d_51 = paddle._C_ops.conv2d( - swish_36, parameter_503, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_503, swish_36 - - # pd_op.batch_norm_: (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) - ( - batch_norm__294, - batch_norm__295, - batch_norm__296, - batch_norm__297, - batch_norm__298, - batch_norm__299, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_51, - parameter_502, - parameter_501, - parameter_500, - parameter_499, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_51, parameter_499, parameter_500, parameter_501, parameter_502 - - # pd_op.add: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 2x192x-1x-1xf32) - add_24 = paddle._C_ops.add(batch_norm__288, batch_norm__294) - del batch_norm__288, batch_norm__294 - - # pd_op.swish: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32) - swish_37 = paddle._C_ops.swish(add_24) - del add_24 - - # pd_op.add: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 2x192x-1x-1xf32) - add_25 = paddle._C_ops.add(add_23, swish_37) - del add_23, swish_37 - - # pd_op.conv2d: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 192x192x3x3xf32) - conv2d_52 = paddle._C_ops.conv2d( - add_25, parameter_498, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_498 - - # pd_op.batch_norm_: (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) - ( - batch_norm__300, - batch_norm__301, - batch_norm__302, - batch_norm__303, - batch_norm__304, - batch_norm__305, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_52, - parameter_497, - parameter_496, - parameter_495, - parameter_494, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_52, parameter_494, parameter_495, parameter_496, parameter_497 - - # pd_op.swish: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32) - swish_38 = paddle._C_ops.swish(batch_norm__300) - del batch_norm__300 - - # pd_op.conv2d: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 192x192x3x3xf32) - conv2d_53 = paddle._C_ops.conv2d( - swish_38, parameter_493, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_493 - - # pd_op.batch_norm_: (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) - ( - batch_norm__306, - batch_norm__307, - batch_norm__308, - batch_norm__309, - batch_norm__310, - batch_norm__311, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_53, - parameter_492, - parameter_491, - parameter_490, - parameter_489, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_53, parameter_489, parameter_490, parameter_491, parameter_492 - - # pd_op.conv2d: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 192x192x1x1xf32) - conv2d_54 = paddle._C_ops.conv2d( - swish_38, parameter_488, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_488, swish_38 - - # pd_op.batch_norm_: (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) - ( - batch_norm__312, - batch_norm__313, - batch_norm__314, - batch_norm__315, - batch_norm__316, - batch_norm__317, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_54, - parameter_487, - parameter_486, - parameter_485, - parameter_484, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_54, parameter_484, parameter_485, parameter_486, parameter_487 - - # pd_op.add: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 2x192x-1x-1xf32) - add_26 = paddle._C_ops.add(batch_norm__306, batch_norm__312) - del batch_norm__306, batch_norm__312 - - # pd_op.swish: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32) - swish_39 = paddle._C_ops.swish(add_26) - del add_26 - - # pd_op.add: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 2x192x-1x-1xf32) - add_27 = paddle._C_ops.add(add_25, swish_39) - del add_25, swish_39 - - # pd_op.conv2d: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 192x192x3x3xf32) - conv2d_55 = paddle._C_ops.conv2d( - add_27, parameter_483, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_483 - - # pd_op.batch_norm_: (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) - ( - batch_norm__318, - batch_norm__319, - batch_norm__320, - batch_norm__321, - batch_norm__322, - batch_norm__323, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_55, - parameter_482, - parameter_481, - parameter_480, - parameter_479, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_55, parameter_479, parameter_480, parameter_481, parameter_482 - - # pd_op.swish: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32) - swish_40 = paddle._C_ops.swish(batch_norm__318) - del batch_norm__318 - - # pd_op.conv2d: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 192x192x3x3xf32) - conv2d_56 = paddle._C_ops.conv2d( - swish_40, parameter_478, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_478 - - # pd_op.batch_norm_: (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) - ( - batch_norm__324, - batch_norm__325, - batch_norm__326, - batch_norm__327, - batch_norm__328, - batch_norm__329, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_56, - parameter_477, - parameter_476, - parameter_475, - parameter_474, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_56, parameter_474, parameter_475, parameter_476, parameter_477 - - # pd_op.conv2d: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 192x192x1x1xf32) - conv2d_57 = paddle._C_ops.conv2d( - swish_40, parameter_473, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_473, swish_40 - - # pd_op.batch_norm_: (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) - ( - batch_norm__330, - batch_norm__331, - batch_norm__332, - batch_norm__333, - batch_norm__334, - batch_norm__335, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_57, - parameter_472, - parameter_471, - parameter_470, - parameter_469, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_57, parameter_469, parameter_470, parameter_471, parameter_472 - - # pd_op.add: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 2x192x-1x-1xf32) - add_28 = paddle._C_ops.add(batch_norm__324, batch_norm__330) - del batch_norm__324, batch_norm__330 - - # pd_op.swish: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32) - swish_41 = paddle._C_ops.swish(add_28) - del add_28 - - # pd_op.add: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 2x192x-1x-1xf32) - add_29 = paddle._C_ops.add(add_27, swish_41) - del add_27, swish_41 - - # pd_op.conv2d: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 192x192x3x3xf32) - conv2d_58 = paddle._C_ops.conv2d( - add_29, parameter_468, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_468 - - # pd_op.batch_norm_: (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) - ( - batch_norm__336, - batch_norm__337, - batch_norm__338, - batch_norm__339, - batch_norm__340, - batch_norm__341, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_58, - parameter_467, - parameter_466, - parameter_465, - parameter_464, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_58, parameter_464, parameter_465, parameter_466, parameter_467 - - # pd_op.swish: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32) - swish_42 = paddle._C_ops.swish(batch_norm__336) - del batch_norm__336 - - # pd_op.conv2d: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 192x192x3x3xf32) - conv2d_59 = paddle._C_ops.conv2d( - swish_42, parameter_463, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_463 - - # pd_op.batch_norm_: (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) - ( - batch_norm__342, - batch_norm__343, - batch_norm__344, - batch_norm__345, - batch_norm__346, - batch_norm__347, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_59, - parameter_462, - parameter_461, - parameter_460, - parameter_459, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_59, parameter_459, parameter_460, parameter_461, parameter_462 - - # pd_op.conv2d: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 192x192x1x1xf32) - conv2d_60 = paddle._C_ops.conv2d( - swish_42, parameter_458, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_458, swish_42 - - # pd_op.batch_norm_: (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) - ( - batch_norm__348, - batch_norm__349, - batch_norm__350, - batch_norm__351, - batch_norm__352, - batch_norm__353, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_60, - parameter_457, - parameter_456, - parameter_455, - parameter_454, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_60, parameter_454, parameter_455, parameter_456, parameter_457 - - # pd_op.add: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 2x192x-1x-1xf32) - add_30 = paddle._C_ops.add(batch_norm__342, batch_norm__348) - del batch_norm__342, batch_norm__348 - - # pd_op.swish: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32) - swish_43 = paddle._C_ops.swish(add_30) - del add_30 - - # pd_op.add: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 2x192x-1x-1xf32) - add_31 = paddle._C_ops.add(add_29, swish_43) - del add_29, swish_43 - - # builtin.combine: ([2x192x-1x-1xf32, 2x192x-1x-1xf32]) <- (2x192x-1x-1xf32, 2x192x-1x-1xf32) - combine_2 = [swish_30, add_31] - del add_31, swish_30 - - # pd_op.concat: (2x384x-1x-1xf32) <- ([2x192x-1x-1xf32, 2x192x-1x-1xf32], 1xi32) - concat_4 = paddle._C_ops.concat(combine_2, full_0) - del combine_2 - - # pd_op.mean: (2x384x1x1xf32) <- (2x384x-1x-1xf32, 2xi64) - mean_2 = paddle._C_ops.mean(concat_4, full_int_array_0, True) - - # pd_op.conv2d: (2x384x1x1xf32) <- (2x384x1x1xf32, 384x384x1x1xf32) - conv2d_61 = paddle._C_ops.conv2d( - mean_2, parameter_453, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del mean_2, parameter_453 - - # pd_op.reshape: (1x384x1x1xf32) <- (384xf32, 4xi64) - reshape_2 = paddle._C_ops.reshape(parameter_452, full_int_array_1) - del parameter_452 - - # pd_op.add: (2x384x1x1xf32) <- (2x384x1x1xf32, 1x384x1x1xf32) - add_32 = paddle._C_ops.add(conv2d_61, reshape_2) - del conv2d_61, reshape_2 - - # pd_op.hardsigmoid: (2x384x1x1xf32) <- (2x384x1x1xf32) - hardsigmoid_2 = paddle._C_ops.hardsigmoid( - add_32, float("0.166667"), float("0.5") - ) - del add_32 - - # pd_op.multiply: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32, 2x384x1x1xf32) - multiply_2 = paddle._C_ops.multiply(concat_4, hardsigmoid_2) - del concat_4, hardsigmoid_2 - - # pd_op.conv2d: (2x512x-1x-1xf32) <- (2x384x-1x-1xf32, 512x384x1x1xf32) - conv2d_62 = paddle._C_ops.conv2d( - multiply_2, parameter_451, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del multiply_2, parameter_451 - - # pd_op.batch_norm_: (2x512x-1x-1xf32, 512xf32, 512xf32, 512xf32, 512xf32, -1xui8) <- (2x512x-1x-1xf32, 512xf32, 512xf32, 512xf32, 512xf32) - ( - batch_norm__354, - batch_norm__355, - batch_norm__356, - batch_norm__357, - batch_norm__358, - batch_norm__359, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_62, - parameter_450, - parameter_449, - parameter_448, - parameter_447, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_62, parameter_447, parameter_448, parameter_449, parameter_450 - - # pd_op.swish: (2x512x-1x-1xf32) <- (2x512x-1x-1xf32) - swish_44 = paddle._C_ops.swish(batch_norm__354) - del batch_norm__354 - - # pd_op.conv2d: (2x768x-1x-1xf32) <- (2x512x-1x-1xf32, 768x512x3x3xf32) - conv2d_63 = paddle._C_ops.conv2d( - swish_44, parameter_446, [2, 2], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_446 - - # pd_op.batch_norm_: (2x768x-1x-1xf32, 768xf32, 768xf32, 768xf32, 768xf32, -1xui8) <- (2x768x-1x-1xf32, 768xf32, 768xf32, 768xf32, 768xf32) - ( - batch_norm__360, - batch_norm__361, - batch_norm__362, - batch_norm__363, - batch_norm__364, - batch_norm__365, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_63, - parameter_445, - parameter_444, - parameter_443, - parameter_442, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_63, parameter_442, parameter_443, parameter_444, parameter_445 - - # pd_op.swish: (2x768x-1x-1xf32) <- (2x768x-1x-1xf32) - swish_45 = paddle._C_ops.swish(batch_norm__360) - del batch_norm__360 - - # pd_op.conv2d: (2x384x-1x-1xf32) <- (2x768x-1x-1xf32, 384x768x1x1xf32) - conv2d_64 = paddle._C_ops.conv2d( - swish_45, parameter_441, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_441 - - # pd_op.batch_norm_: (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) - ( - batch_norm__366, - batch_norm__367, - batch_norm__368, - batch_norm__369, - batch_norm__370, - batch_norm__371, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_64, - parameter_440, - parameter_439, - parameter_438, - parameter_437, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_64, parameter_437, parameter_438, parameter_439, parameter_440 - - # pd_op.swish: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32) - swish_46 = paddle._C_ops.swish(batch_norm__366) - del batch_norm__366 - - # pd_op.conv2d: (2x384x-1x-1xf32) <- (2x768x-1x-1xf32, 384x768x1x1xf32) - conv2d_65 = paddle._C_ops.conv2d( - swish_45, parameter_436, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_436, swish_45 - - # pd_op.batch_norm_: (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) - ( - batch_norm__372, - batch_norm__373, - batch_norm__374, - batch_norm__375, - batch_norm__376, - batch_norm__377, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_65, - parameter_435, - parameter_434, - parameter_433, - parameter_432, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_65, parameter_432, parameter_433, parameter_434, parameter_435 - - # pd_op.swish: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32) - swish_47 = paddle._C_ops.swish(batch_norm__372) - del batch_norm__372 - - # pd_op.conv2d: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32, 384x384x3x3xf32) - conv2d_66 = paddle._C_ops.conv2d( - swish_47, parameter_431, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_431 - - # pd_op.batch_norm_: (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) - ( - batch_norm__378, - batch_norm__379, - batch_norm__380, - batch_norm__381, - batch_norm__382, - batch_norm__383, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_66, - parameter_430, - parameter_429, - parameter_428, - parameter_427, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_66, parameter_427, parameter_428, parameter_429, parameter_430 - - # pd_op.swish: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32) - swish_48 = paddle._C_ops.swish(batch_norm__378) - del batch_norm__378 - - # pd_op.conv2d: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32, 384x384x3x3xf32) - conv2d_67 = paddle._C_ops.conv2d( - swish_48, parameter_426, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_426 - - # pd_op.batch_norm_: (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) - ( - batch_norm__384, - batch_norm__385, - batch_norm__386, - batch_norm__387, - batch_norm__388, - batch_norm__389, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_67, - parameter_425, - parameter_424, - parameter_423, - parameter_422, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_67, parameter_422, parameter_423, parameter_424, parameter_425 - - # pd_op.conv2d: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32, 384x384x1x1xf32) - conv2d_68 = paddle._C_ops.conv2d( - swish_48, parameter_421, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_421, swish_48 - - # pd_op.batch_norm_: (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) - ( - batch_norm__390, - batch_norm__391, - batch_norm__392, - batch_norm__393, - batch_norm__394, - batch_norm__395, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_68, - parameter_420, - parameter_419, - parameter_418, - parameter_417, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_68, parameter_417, parameter_418, parameter_419, parameter_420 - - # pd_op.add: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32, 2x384x-1x-1xf32) - add_33 = paddle._C_ops.add(batch_norm__384, batch_norm__390) - del batch_norm__384, batch_norm__390 - - # pd_op.swish: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32) - swish_49 = paddle._C_ops.swish(add_33) - del add_33 - - # pd_op.add: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32, 2x384x-1x-1xf32) - add_34 = paddle._C_ops.add(swish_47, swish_49) - del swish_47, swish_49 - - # pd_op.conv2d: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32, 384x384x3x3xf32) - conv2d_69 = paddle._C_ops.conv2d( - add_34, parameter_416, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_416 - - # pd_op.batch_norm_: (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) - ( - batch_norm__396, - batch_norm__397, - batch_norm__398, - batch_norm__399, - batch_norm__400, - batch_norm__401, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_69, - parameter_415, - parameter_414, - parameter_413, - parameter_412, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_69, parameter_412, parameter_413, parameter_414, parameter_415 - - # pd_op.swish: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32) - swish_50 = paddle._C_ops.swish(batch_norm__396) - del batch_norm__396 - - # pd_op.conv2d: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32, 384x384x3x3xf32) - conv2d_70 = paddle._C_ops.conv2d( - swish_50, parameter_411, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_411 - - # pd_op.batch_norm_: (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) - ( - batch_norm__402, - batch_norm__403, - batch_norm__404, - batch_norm__405, - batch_norm__406, - batch_norm__407, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_70, - parameter_410, - parameter_409, - parameter_408, - parameter_407, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_70, parameter_407, parameter_408, parameter_409, parameter_410 - - # pd_op.conv2d: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32, 384x384x1x1xf32) - conv2d_71 = paddle._C_ops.conv2d( - swish_50, parameter_406, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_406, swish_50 - - # pd_op.batch_norm_: (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) - ( - batch_norm__408, - batch_norm__409, - batch_norm__410, - batch_norm__411, - batch_norm__412, - batch_norm__413, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_71, - parameter_405, - parameter_404, - parameter_403, - parameter_402, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_71, parameter_402, parameter_403, parameter_404, parameter_405 - - # pd_op.add: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32, 2x384x-1x-1xf32) - add_35 = paddle._C_ops.add(batch_norm__402, batch_norm__408) - del batch_norm__402, batch_norm__408 - - # pd_op.swish: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32) - swish_51 = paddle._C_ops.swish(add_35) - del add_35 - - # pd_op.add: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32, 2x384x-1x-1xf32) - add_36 = paddle._C_ops.add(add_34, swish_51) - del add_34, swish_51 - - # pd_op.conv2d: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32, 384x384x3x3xf32) - conv2d_72 = paddle._C_ops.conv2d( - add_36, parameter_401, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_401 - - # pd_op.batch_norm_: (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) - ( - batch_norm__414, - batch_norm__415, - batch_norm__416, - batch_norm__417, - batch_norm__418, - batch_norm__419, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_72, - parameter_400, - parameter_399, - parameter_398, - parameter_397, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_72, parameter_397, parameter_398, parameter_399, parameter_400 - - # pd_op.swish: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32) - swish_52 = paddle._C_ops.swish(batch_norm__414) - del batch_norm__414 - - # pd_op.conv2d: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32, 384x384x3x3xf32) - conv2d_73 = paddle._C_ops.conv2d( - swish_52, parameter_396, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_396 - - # pd_op.batch_norm_: (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) - ( - batch_norm__420, - batch_norm__421, - batch_norm__422, - batch_norm__423, - batch_norm__424, - batch_norm__425, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_73, - parameter_395, - parameter_394, - parameter_393, - parameter_392, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_73, parameter_392, parameter_393, parameter_394, parameter_395 - - # pd_op.conv2d: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32, 384x384x1x1xf32) - conv2d_74 = paddle._C_ops.conv2d( - swish_52, parameter_391, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_391, swish_52 - - # pd_op.batch_norm_: (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) - ( - batch_norm__426, - batch_norm__427, - batch_norm__428, - batch_norm__429, - batch_norm__430, - batch_norm__431, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_74, - parameter_390, - parameter_389, - parameter_388, - parameter_387, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_74, parameter_387, parameter_388, parameter_389, parameter_390 - - # pd_op.add: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32, 2x384x-1x-1xf32) - add_37 = paddle._C_ops.add(batch_norm__420, batch_norm__426) - del batch_norm__420, batch_norm__426 - - # pd_op.swish: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32) - swish_53 = paddle._C_ops.swish(add_37) - del add_37 - - # pd_op.add: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32, 2x384x-1x-1xf32) - add_38 = paddle._C_ops.add(add_36, swish_53) - del add_36, swish_53 - - # builtin.combine: ([2x384x-1x-1xf32, 2x384x-1x-1xf32]) <- (2x384x-1x-1xf32, 2x384x-1x-1xf32) - combine_3 = [swish_46, add_38] - del add_38, swish_46 - - # pd_op.concat: (2x768x-1x-1xf32) <- ([2x384x-1x-1xf32, 2x384x-1x-1xf32], 1xi32) - concat_5 = paddle._C_ops.concat(combine_3, full_0) - del combine_3 - - # pd_op.mean: (2x768x1x1xf32) <- (2x768x-1x-1xf32, 2xi64) - mean_3 = paddle._C_ops.mean(concat_5, full_int_array_0, True) - del full_int_array_0 - - # pd_op.conv2d: (2x768x1x1xf32) <- (2x768x1x1xf32, 768x768x1x1xf32) - conv2d_75 = paddle._C_ops.conv2d( - mean_3, parameter_386, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del mean_3, parameter_386 - - # pd_op.reshape: (1x768x1x1xf32) <- (768xf32, 4xi64) - reshape_3 = paddle._C_ops.reshape(parameter_385, full_int_array_1) - del parameter_385 - - # pd_op.add: (2x768x1x1xf32) <- (2x768x1x1xf32, 1x768x1x1xf32) - add_39 = paddle._C_ops.add(conv2d_75, reshape_3) - del conv2d_75, reshape_3 - - # pd_op.hardsigmoid: (2x768x1x1xf32) <- (2x768x1x1xf32) - hardsigmoid_3 = paddle._C_ops.hardsigmoid( - add_39, float("0.166667"), float("0.5") - ) - del add_39 - - # pd_op.multiply: (2x768x-1x-1xf32) <- (2x768x-1x-1xf32, 2x768x1x1xf32) - multiply_3 = paddle._C_ops.multiply(concat_5, hardsigmoid_3) - del concat_5, hardsigmoid_3 - - # pd_op.conv2d: (2x1024x-1x-1xf32) <- (2x768x-1x-1xf32, 1024x768x1x1xf32) - conv2d_76 = paddle._C_ops.conv2d( - multiply_3, parameter_384, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del multiply_3, parameter_384 - - # pd_op.batch_norm_: (2x1024x-1x-1xf32, 1024xf32, 1024xf32, 1024xf32, 1024xf32, -1xui8) <- (2x1024x-1x-1xf32, 1024xf32, 1024xf32, 1024xf32, 1024xf32) - ( - batch_norm__432, - batch_norm__433, - batch_norm__434, - batch_norm__435, - batch_norm__436, - batch_norm__437, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_76, - parameter_383, - parameter_382, - parameter_381, - parameter_380, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_76, parameter_380, parameter_381, parameter_382, parameter_383 - - # pd_op.swish: (2x1024x-1x-1xf32) <- (2x1024x-1x-1xf32) - swish_54 = paddle._C_ops.swish(batch_norm__432) - del batch_norm__432 - - # pd_op.conv2d: (2x384x-1x-1xf32) <- (2x1024x-1x-1xf32, 384x1024x1x1xf32) - conv2d_77 = paddle._C_ops.conv2d( - swish_54, parameter_379, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_379 - - # pd_op.batch_norm_: (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) - ( - batch_norm__438, - batch_norm__439, - batch_norm__440, - batch_norm__441, - batch_norm__442, - batch_norm__443, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_77, - parameter_378, - parameter_377, - parameter_376, - parameter_375, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_77, parameter_375, parameter_376, parameter_377, parameter_378 - - # pd_op.swish: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32) - swish_55 = paddle._C_ops.swish(batch_norm__438) - del batch_norm__438 - - # pd_op.conv2d: (2x384x-1x-1xf32) <- (2x1024x-1x-1xf32, 384x1024x1x1xf32) - conv2d_78 = paddle._C_ops.conv2d( - swish_54, parameter_374, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_374, swish_54 - - # pd_op.batch_norm_: (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) - ( - batch_norm__444, - batch_norm__445, - batch_norm__446, - batch_norm__447, - batch_norm__448, - batch_norm__449, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_78, - parameter_373, - parameter_372, - parameter_371, - parameter_370, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_78, parameter_370, parameter_371, parameter_372, parameter_373 - - # pd_op.swish: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32) - swish_56 = paddle._C_ops.swish(batch_norm__444) - del batch_norm__444 - - # pd_op.conv2d: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32, 384x384x3x3xf32) - conv2d_79 = paddle._C_ops.conv2d( - swish_56, parameter_369, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_369, swish_56 - - # pd_op.batch_norm_: (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) - ( - batch_norm__450, - batch_norm__451, - batch_norm__452, - batch_norm__453, - batch_norm__454, - batch_norm__455, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_79, - parameter_368, - parameter_367, - parameter_366, - parameter_365, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_79, parameter_365, parameter_366, parameter_367, parameter_368 - - # pd_op.swish: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32) - swish_57 = paddle._C_ops.swish(batch_norm__450) - del batch_norm__450 - - # pd_op.conv2d: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32, 384x384x3x3xf32) - conv2d_80 = paddle._C_ops.conv2d( - swish_57, parameter_364, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_364 - - # pd_op.batch_norm_: (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) - ( - batch_norm__456, - batch_norm__457, - batch_norm__458, - batch_norm__459, - batch_norm__460, - batch_norm__461, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_80, - parameter_363, - parameter_362, - parameter_361, - parameter_360, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_80, parameter_360, parameter_361, parameter_362, parameter_363 - - # pd_op.conv2d: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32, 384x384x1x1xf32) - conv2d_81 = paddle._C_ops.conv2d( - swish_57, parameter_359, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_359, swish_57 - - # pd_op.batch_norm_: (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) - ( - batch_norm__462, - batch_norm__463, - batch_norm__464, - batch_norm__465, - batch_norm__466, - batch_norm__467, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_81, - parameter_358, - parameter_357, - parameter_356, - parameter_355, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_81, parameter_355, parameter_356, parameter_357, parameter_358 - - # pd_op.add: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32, 2x384x-1x-1xf32) - add_40 = paddle._C_ops.add(batch_norm__456, batch_norm__462) - del batch_norm__456, batch_norm__462 - - # pd_op.swish: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32) - swish_58 = paddle._C_ops.swish(add_40) - del add_40 - - # pd_op.conv2d: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32, 384x384x3x3xf32) - conv2d_82 = paddle._C_ops.conv2d( - swish_58, parameter_354, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_354, swish_58 - - # pd_op.batch_norm_: (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) - ( - batch_norm__468, - batch_norm__469, - batch_norm__470, - batch_norm__471, - batch_norm__472, - batch_norm__473, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_82, - parameter_353, - parameter_352, - parameter_351, - parameter_350, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_82, parameter_350, parameter_351, parameter_352, parameter_353 - - # pd_op.swish: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32) - swish_59 = paddle._C_ops.swish(batch_norm__468) - del batch_norm__468 - - # pd_op.conv2d: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32, 384x384x3x3xf32) - conv2d_83 = paddle._C_ops.conv2d( - swish_59, parameter_349, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_349 - - # pd_op.batch_norm_: (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) - ( - batch_norm__474, - batch_norm__475, - batch_norm__476, - batch_norm__477, - batch_norm__478, - batch_norm__479, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_83, - parameter_348, - parameter_347, - parameter_346, - parameter_345, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_83, parameter_345, parameter_346, parameter_347, parameter_348 - - # pd_op.conv2d: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32, 384x384x1x1xf32) - conv2d_84 = paddle._C_ops.conv2d( - swish_59, parameter_344, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_344, swish_59 - - # pd_op.batch_norm_: (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) - ( - batch_norm__480, - batch_norm__481, - batch_norm__482, - batch_norm__483, - batch_norm__484, - batch_norm__485, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_84, - parameter_343, - parameter_342, - parameter_341, - parameter_340, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_84, parameter_340, parameter_341, parameter_342, parameter_343 - - # pd_op.add: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32, 2x384x-1x-1xf32) - add_41 = paddle._C_ops.add(batch_norm__474, batch_norm__480) - del batch_norm__474, batch_norm__480 - - # pd_op.swish: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32) - swish_60 = paddle._C_ops.swish(add_41) - del add_41 - - # pd_op.full_int_array: (2xi64) <- () - full_int_array_2 = [5, 5] - - # pd_op.pool2d: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32, 2xi64) - pool2d_0 = paddle._C_ops.pool2d( - swish_60, - full_int_array_2, - [1, 1], - [2, 2], - False, - True, - "NCHW", - "max", - False, - False, - "EXPLICIT", - ) - del full_int_array_2 - - # pd_op.full_int_array: (2xi64) <- () - full_int_array_3 = [9, 9] - - # pd_op.pool2d: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32, 2xi64) - pool2d_1 = paddle._C_ops.pool2d( - swish_60, - full_int_array_3, - [1, 1], - [4, 4], - False, - True, - "NCHW", - "max", - False, - False, - "EXPLICIT", - ) - del full_int_array_3 - - # pd_op.full_int_array: (2xi64) <- () - full_int_array_4 = [13, 13] - - # pd_op.pool2d: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32, 2xi64) - pool2d_2 = paddle._C_ops.pool2d( - swish_60, - full_int_array_4, - [1, 1], - [6, 6], - False, - True, - "NCHW", - "max", - False, - False, - "EXPLICIT", - ) - del full_int_array_4 - - # builtin.combine: ([2x384x-1x-1xf32, 2x384x-1x-1xf32, 2x384x-1x-1xf32, 2x384x-1x-1xf32]) <- (2x384x-1x-1xf32, 2x384x-1x-1xf32, 2x384x-1x-1xf32, 2x384x-1x-1xf32) - combine_4 = [swish_60, pool2d_0, pool2d_1, pool2d_2] - del pool2d_0, pool2d_1, pool2d_2, swish_60 - - # pd_op.concat: (2x1536x-1x-1xf32) <- ([2x384x-1x-1xf32, 2x384x-1x-1xf32, 2x384x-1x-1xf32, 2x384x-1x-1xf32], 1xi32) - concat_6 = paddle._C_ops.concat(combine_4, full_0) - del combine_4 - - # pd_op.conv2d: (2x384x-1x-1xf32) <- (2x1536x-1x-1xf32, 384x1536x1x1xf32) - conv2d_85 = paddle._C_ops.conv2d( - concat_6, parameter_339, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del concat_6, parameter_339 - - # pd_op.batch_norm_: (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) - ( - batch_norm__486, - batch_norm__487, - batch_norm__488, - batch_norm__489, - batch_norm__490, - batch_norm__491, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_85, - parameter_338, - parameter_337, - parameter_336, - parameter_335, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_85, parameter_335, parameter_336, parameter_337, parameter_338 - - # pd_op.swish: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32) - swish_61 = paddle._C_ops.swish(batch_norm__486) - del batch_norm__486 - - # pd_op.conv2d: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32, 384x384x3x3xf32) - conv2d_86 = paddle._C_ops.conv2d( - swish_61, parameter_334, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_334, swish_61 - - # pd_op.batch_norm_: (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) - ( - batch_norm__492, - batch_norm__493, - batch_norm__494, - batch_norm__495, - batch_norm__496, - batch_norm__497, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_86, - parameter_333, - parameter_332, - parameter_331, - parameter_330, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_86, parameter_330, parameter_331, parameter_332, parameter_333 - - # pd_op.swish: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32) - swish_62 = paddle._C_ops.swish(batch_norm__492) - del batch_norm__492 - - # pd_op.conv2d: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32, 384x384x3x3xf32) - conv2d_87 = paddle._C_ops.conv2d( - swish_62, parameter_329, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_329 - - # pd_op.batch_norm_: (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) - ( - batch_norm__498, - batch_norm__499, - batch_norm__500, - batch_norm__501, - batch_norm__502, - batch_norm__503, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_87, - parameter_328, - parameter_327, - parameter_326, - parameter_325, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_87, parameter_325, parameter_326, parameter_327, parameter_328 - - # pd_op.conv2d: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32, 384x384x1x1xf32) - conv2d_88 = paddle._C_ops.conv2d( - swish_62, parameter_324, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_324, swish_62 - - # pd_op.batch_norm_: (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) - ( - batch_norm__504, - batch_norm__505, - batch_norm__506, - batch_norm__507, - batch_norm__508, - batch_norm__509, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_88, - parameter_323, - parameter_322, - parameter_321, - parameter_320, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_88, parameter_320, parameter_321, parameter_322, parameter_323 - - # pd_op.add: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32, 2x384x-1x-1xf32) - add_42 = paddle._C_ops.add(batch_norm__498, batch_norm__504) - del batch_norm__498, batch_norm__504 - - # pd_op.swish: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32) - swish_63 = paddle._C_ops.swish(add_42) - del add_42 - - # builtin.combine: ([2x384x-1x-1xf32, 2x384x-1x-1xf32]) <- (2x384x-1x-1xf32, 2x384x-1x-1xf32) - combine_5 = [swish_55, swish_63] - del swish_55, swish_63 - - # pd_op.concat: (2x768x-1x-1xf32) <- ([2x384x-1x-1xf32, 2x384x-1x-1xf32], 1xi32) - concat_7 = paddle._C_ops.concat(combine_5, full_0) - del combine_5 - - # pd_op.conv2d: (2x768x-1x-1xf32) <- (2x768x-1x-1xf32, 768x768x1x1xf32) - conv2d_89 = paddle._C_ops.conv2d( - concat_7, parameter_319, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del concat_7, parameter_319 - - # pd_op.batch_norm_: (2x768x-1x-1xf32, 768xf32, 768xf32, 768xf32, 768xf32, -1xui8) <- (2x768x-1x-1xf32, 768xf32, 768xf32, 768xf32, 768xf32) - ( - batch_norm__510, - batch_norm__511, - batch_norm__512, - batch_norm__513, - batch_norm__514, - batch_norm__515, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_89, - parameter_318, - parameter_317, - parameter_316, - parameter_315, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_89, parameter_315, parameter_316, parameter_317, parameter_318 - - # pd_op.swish: (2x768x-1x-1xf32) <- (2x768x-1x-1xf32) - swish_64 = paddle._C_ops.swish(batch_norm__510) - del batch_norm__510 - - # pd_op.conv2d: (2x384x-1x-1xf32) <- (2x768x-1x-1xf32, 384x768x1x1xf32) - conv2d_90 = paddle._C_ops.conv2d( - swish_64, parameter_314, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_314 - - # pd_op.batch_norm_: (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) - ( - batch_norm__516, - batch_norm__517, - batch_norm__518, - batch_norm__519, - batch_norm__520, - batch_norm__521, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_90, - parameter_313, - parameter_312, - parameter_311, - parameter_310, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_90, parameter_310, parameter_311, parameter_312, parameter_313 - - # pd_op.swish: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32) - swish_65 = paddle._C_ops.swish(batch_norm__516) - del batch_norm__516 - - # pd_op.nearest_interp: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32, None, None, None) - nearest_interp_0 = paddle._C_ops.nearest_interp( - swish_65, - None, - None, - None, - "NCHW", - -1, - -1, - -1, - [float("2"), float("2")], - "nearest", - False, - 0, - ) - del swish_65 - - # builtin.combine: ([2x384x-1x-1xf32, 2x512x-1x-1xf32]) <- (2x384x-1x-1xf32, 2x512x-1x-1xf32) - combine_6 = [nearest_interp_0, swish_44] - del nearest_interp_0, swish_44 - - # pd_op.concat: (2x896x-1x-1xf32) <- ([2x384x-1x-1xf32, 2x512x-1x-1xf32], 1xi32) - concat_8 = paddle._C_ops.concat(combine_6, full_0) - del combine_6 - - # pd_op.conv2d: (2x192x-1x-1xf32) <- (2x896x-1x-1xf32, 192x896x1x1xf32) - conv2d_91 = paddle._C_ops.conv2d( - concat_8, parameter_309, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_309 - - # pd_op.batch_norm_: (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) - ( - batch_norm__522, - batch_norm__523, - batch_norm__524, - batch_norm__525, - batch_norm__526, - batch_norm__527, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_91, - parameter_308, - parameter_307, - parameter_306, - parameter_305, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_91, parameter_305, parameter_306, parameter_307, parameter_308 - - # pd_op.swish: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32) - swish_66 = paddle._C_ops.swish(batch_norm__522) - del batch_norm__522 - - # pd_op.conv2d: (2x192x-1x-1xf32) <- (2x896x-1x-1xf32, 192x896x1x1xf32) - conv2d_92 = paddle._C_ops.conv2d( - concat_8, parameter_304, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del concat_8, parameter_304 - - # pd_op.batch_norm_: (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) - ( - batch_norm__528, - batch_norm__529, - batch_norm__530, - batch_norm__531, - batch_norm__532, - batch_norm__533, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_92, - parameter_303, - parameter_302, - parameter_301, - parameter_300, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_92, parameter_300, parameter_301, parameter_302, parameter_303 - - # pd_op.swish: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32) - swish_67 = paddle._C_ops.swish(batch_norm__528) - del batch_norm__528 - - # pd_op.conv2d: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 192x192x3x3xf32) - conv2d_93 = paddle._C_ops.conv2d( - swish_67, parameter_299, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_299, swish_67 - - # pd_op.batch_norm_: (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) - ( - batch_norm__534, - batch_norm__535, - batch_norm__536, - batch_norm__537, - batch_norm__538, - batch_norm__539, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_93, - parameter_298, - parameter_297, - parameter_296, - parameter_295, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_93, parameter_295, parameter_296, parameter_297, parameter_298 - - # pd_op.swish: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32) - swish_68 = paddle._C_ops.swish(batch_norm__534) - del batch_norm__534 - - # pd_op.conv2d: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 192x192x3x3xf32) - conv2d_94 = paddle._C_ops.conv2d( - swish_68, parameter_294, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_294 - - # pd_op.batch_norm_: (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) - ( - batch_norm__540, - batch_norm__541, - batch_norm__542, - batch_norm__543, - batch_norm__544, - batch_norm__545, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_94, - parameter_293, - parameter_292, - parameter_291, - parameter_290, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_94, parameter_290, parameter_291, parameter_292, parameter_293 - - # pd_op.conv2d: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 192x192x1x1xf32) - conv2d_95 = paddle._C_ops.conv2d( - swish_68, parameter_289, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_289, swish_68 - - # pd_op.batch_norm_: (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) - ( - batch_norm__546, - batch_norm__547, - batch_norm__548, - batch_norm__549, - batch_norm__550, - batch_norm__551, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_95, - parameter_288, - parameter_287, - parameter_286, - parameter_285, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_95, parameter_285, parameter_286, parameter_287, parameter_288 - - # pd_op.add: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 2x192x-1x-1xf32) - add_43 = paddle._C_ops.add(batch_norm__540, batch_norm__546) - del batch_norm__540, batch_norm__546 - - # pd_op.swish: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32) - swish_69 = paddle._C_ops.swish(add_43) - del add_43 - - # pd_op.conv2d: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 192x192x3x3xf32) - conv2d_96 = paddle._C_ops.conv2d( - swish_69, parameter_284, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_284, swish_69 - - # pd_op.batch_norm_: (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) - ( - batch_norm__552, - batch_norm__553, - batch_norm__554, - batch_norm__555, - batch_norm__556, - batch_norm__557, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_96, - parameter_283, - parameter_282, - parameter_281, - parameter_280, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_96, parameter_280, parameter_281, parameter_282, parameter_283 - - # pd_op.swish: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32) - swish_70 = paddle._C_ops.swish(batch_norm__552) - del batch_norm__552 - - # pd_op.conv2d: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 192x192x3x3xf32) - conv2d_97 = paddle._C_ops.conv2d( - swish_70, parameter_279, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_279 - - # pd_op.batch_norm_: (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) - ( - batch_norm__558, - batch_norm__559, - batch_norm__560, - batch_norm__561, - batch_norm__562, - batch_norm__563, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_97, - parameter_278, - parameter_277, - parameter_276, - parameter_275, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_97, parameter_275, parameter_276, parameter_277, parameter_278 - - # pd_op.conv2d: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 192x192x1x1xf32) - conv2d_98 = paddle._C_ops.conv2d( - swish_70, parameter_274, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_274, swish_70 - - # pd_op.batch_norm_: (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) - ( - batch_norm__564, - batch_norm__565, - batch_norm__566, - batch_norm__567, - batch_norm__568, - batch_norm__569, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_98, - parameter_273, - parameter_272, - parameter_271, - parameter_270, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_98, parameter_270, parameter_271, parameter_272, parameter_273 - - # pd_op.add: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 2x192x-1x-1xf32) - add_44 = paddle._C_ops.add(batch_norm__558, batch_norm__564) - del batch_norm__558, batch_norm__564 - - # pd_op.swish: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32) - swish_71 = paddle._C_ops.swish(add_44) - del add_44 - - # pd_op.conv2d: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 192x192x3x3xf32) - conv2d_99 = paddle._C_ops.conv2d( - swish_71, parameter_269, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_269, swish_71 - - # pd_op.batch_norm_: (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) - ( - batch_norm__570, - batch_norm__571, - batch_norm__572, - batch_norm__573, - batch_norm__574, - batch_norm__575, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_99, - parameter_268, - parameter_267, - parameter_266, - parameter_265, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_99, parameter_265, parameter_266, parameter_267, parameter_268 - - # pd_op.swish: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32) - swish_72 = paddle._C_ops.swish(batch_norm__570) - del batch_norm__570 - - # pd_op.conv2d: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 192x192x3x3xf32) - conv2d_100 = paddle._C_ops.conv2d( - swish_72, parameter_264, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_264 - - # pd_op.batch_norm_: (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) - ( - batch_norm__576, - batch_norm__577, - batch_norm__578, - batch_norm__579, - batch_norm__580, - batch_norm__581, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_100, - parameter_263, - parameter_262, - parameter_261, - parameter_260, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_100, parameter_260, parameter_261, parameter_262, parameter_263 - - # pd_op.conv2d: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 192x192x1x1xf32) - conv2d_101 = paddle._C_ops.conv2d( - swish_72, parameter_259, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_259, swish_72 - - # pd_op.batch_norm_: (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) - ( - batch_norm__582, - batch_norm__583, - batch_norm__584, - batch_norm__585, - batch_norm__586, - batch_norm__587, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_101, - parameter_258, - parameter_257, - parameter_256, - parameter_255, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_101, parameter_255, parameter_256, parameter_257, parameter_258 - - # pd_op.add: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 2x192x-1x-1xf32) - add_45 = paddle._C_ops.add(batch_norm__576, batch_norm__582) - del batch_norm__576, batch_norm__582 - - # pd_op.swish: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32) - swish_73 = paddle._C_ops.swish(add_45) - del add_45 - - # builtin.combine: ([2x192x-1x-1xf32, 2x192x-1x-1xf32]) <- (2x192x-1x-1xf32, 2x192x-1x-1xf32) - combine_7 = [swish_66, swish_73] - del swish_66, swish_73 - - # pd_op.concat: (2x384x-1x-1xf32) <- ([2x192x-1x-1xf32, 2x192x-1x-1xf32], 1xi32) - concat_9 = paddle._C_ops.concat(combine_7, full_0) - del combine_7 - - # pd_op.conv2d: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32, 384x384x1x1xf32) - conv2d_102 = paddle._C_ops.conv2d( - concat_9, parameter_254, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del concat_9, parameter_254 - - # pd_op.batch_norm_: (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) - ( - batch_norm__588, - batch_norm__589, - batch_norm__590, - batch_norm__591, - batch_norm__592, - batch_norm__593, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_102, - parameter_253, - parameter_252, - parameter_251, - parameter_250, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_102, parameter_250, parameter_251, parameter_252, parameter_253 - - # pd_op.swish: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32) - swish_74 = paddle._C_ops.swish(batch_norm__588) - del batch_norm__588 - - # pd_op.conv2d: (2x192x-1x-1xf32) <- (2x384x-1x-1xf32, 192x384x1x1xf32) - conv2d_103 = paddle._C_ops.conv2d( - swish_74, parameter_249, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_249 - - # pd_op.batch_norm_: (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) - ( - batch_norm__594, - batch_norm__595, - batch_norm__596, - batch_norm__597, - batch_norm__598, - batch_norm__599, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_103, - parameter_248, - parameter_247, - parameter_246, - parameter_245, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_103, parameter_245, parameter_246, parameter_247, parameter_248 - - # pd_op.swish: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32) - swish_75 = paddle._C_ops.swish(batch_norm__594) - del batch_norm__594 - - # pd_op.nearest_interp: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, None, None, None) - nearest_interp_1 = paddle._C_ops.nearest_interp( - swish_75, - None, - None, - None, - "NCHW", - -1, - -1, - -1, - [float("2"), float("2")], - "nearest", - False, - 0, - ) - del swish_75 - - # builtin.combine: ([2x192x-1x-1xf32, 2x256x-1x-1xf32]) <- (2x192x-1x-1xf32, 2x256x-1x-1xf32) - combine_8 = [nearest_interp_1, swish_28] - del nearest_interp_1, swish_28 - - # pd_op.concat: (2x448x-1x-1xf32) <- ([2x192x-1x-1xf32, 2x256x-1x-1xf32], 1xi32) - concat_10 = paddle._C_ops.concat(combine_8, full_0) - del combine_8 - - # pd_op.conv2d: (2x96x-1x-1xf32) <- (2x448x-1x-1xf32, 96x448x1x1xf32) - conv2d_104 = paddle._C_ops.conv2d( - concat_10, parameter_244, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_244 - - # pd_op.batch_norm_: (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) - ( - batch_norm__600, - batch_norm__601, - batch_norm__602, - batch_norm__603, - batch_norm__604, - batch_norm__605, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_104, - parameter_243, - parameter_242, - parameter_241, - parameter_240, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_104, parameter_240, parameter_241, parameter_242, parameter_243 - - # pd_op.swish: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32) - swish_76 = paddle._C_ops.swish(batch_norm__600) - del batch_norm__600 - - # pd_op.conv2d: (2x96x-1x-1xf32) <- (2x448x-1x-1xf32, 96x448x1x1xf32) - conv2d_105 = paddle._C_ops.conv2d( - concat_10, parameter_239, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del concat_10, parameter_239 - - # pd_op.batch_norm_: (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) - ( - batch_norm__606, - batch_norm__607, - batch_norm__608, - batch_norm__609, - batch_norm__610, - batch_norm__611, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_105, - parameter_238, - parameter_237, - parameter_236, - parameter_235, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_105, parameter_235, parameter_236, parameter_237, parameter_238 - - # pd_op.swish: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32) - swish_77 = paddle._C_ops.swish(batch_norm__606) - del batch_norm__606 - - # pd_op.conv2d: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, 96x96x3x3xf32) - conv2d_106 = paddle._C_ops.conv2d( - swish_77, parameter_234, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_234, swish_77 - - # pd_op.batch_norm_: (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) - ( - batch_norm__612, - batch_norm__613, - batch_norm__614, - batch_norm__615, - batch_norm__616, - batch_norm__617, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_106, - parameter_233, - parameter_232, - parameter_231, - parameter_230, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_106, parameter_230, parameter_231, parameter_232, parameter_233 - - # pd_op.swish: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32) - swish_78 = paddle._C_ops.swish(batch_norm__612) - del batch_norm__612 - - # pd_op.conv2d: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, 96x96x3x3xf32) - conv2d_107 = paddle._C_ops.conv2d( - swish_78, parameter_229, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_229 - - # pd_op.batch_norm_: (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) - ( - batch_norm__618, - batch_norm__619, - batch_norm__620, - batch_norm__621, - batch_norm__622, - batch_norm__623, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_107, - parameter_228, - parameter_227, - parameter_226, - parameter_225, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_107, parameter_225, parameter_226, parameter_227, parameter_228 - - # pd_op.conv2d: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, 96x96x1x1xf32) - conv2d_108 = paddle._C_ops.conv2d( - swish_78, parameter_224, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_224, swish_78 - - # pd_op.batch_norm_: (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) - ( - batch_norm__624, - batch_norm__625, - batch_norm__626, - batch_norm__627, - batch_norm__628, - batch_norm__629, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_108, - parameter_223, - parameter_222, - parameter_221, - parameter_220, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_108, parameter_220, parameter_221, parameter_222, parameter_223 - - # pd_op.add: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, 2x96x-1x-1xf32) - add_46 = paddle._C_ops.add(batch_norm__618, batch_norm__624) - del batch_norm__618, batch_norm__624 - - # pd_op.swish: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32) - swish_79 = paddle._C_ops.swish(add_46) - del add_46 - - # pd_op.conv2d: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, 96x96x3x3xf32) - conv2d_109 = paddle._C_ops.conv2d( - swish_79, parameter_219, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_219, swish_79 - - # pd_op.batch_norm_: (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) - ( - batch_norm__630, - batch_norm__631, - batch_norm__632, - batch_norm__633, - batch_norm__634, - batch_norm__635, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_109, - parameter_218, - parameter_217, - parameter_216, - parameter_215, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_109, parameter_215, parameter_216, parameter_217, parameter_218 - - # pd_op.swish: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32) - swish_80 = paddle._C_ops.swish(batch_norm__630) - del batch_norm__630 - - # pd_op.conv2d: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, 96x96x3x3xf32) - conv2d_110 = paddle._C_ops.conv2d( - swish_80, parameter_214, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_214 - - # pd_op.batch_norm_: (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) - ( - batch_norm__636, - batch_norm__637, - batch_norm__638, - batch_norm__639, - batch_norm__640, - batch_norm__641, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_110, - parameter_213, - parameter_212, - parameter_211, - parameter_210, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_110, parameter_210, parameter_211, parameter_212, parameter_213 - - # pd_op.conv2d: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, 96x96x1x1xf32) - conv2d_111 = paddle._C_ops.conv2d( - swish_80, parameter_209, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_209, swish_80 - - # pd_op.batch_norm_: (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) - ( - batch_norm__642, - batch_norm__643, - batch_norm__644, - batch_norm__645, - batch_norm__646, - batch_norm__647, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_111, - parameter_208, - parameter_207, - parameter_206, - parameter_205, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_111, parameter_205, parameter_206, parameter_207, parameter_208 - - # pd_op.add: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, 2x96x-1x-1xf32) - add_47 = paddle._C_ops.add(batch_norm__636, batch_norm__642) - del batch_norm__636, batch_norm__642 - - # pd_op.swish: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32) - swish_81 = paddle._C_ops.swish(add_47) - del add_47 - - # pd_op.conv2d: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, 96x96x3x3xf32) - conv2d_112 = paddle._C_ops.conv2d( - swish_81, parameter_204, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_204, swish_81 - - # pd_op.batch_norm_: (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) - ( - batch_norm__648, - batch_norm__649, - batch_norm__650, - batch_norm__651, - batch_norm__652, - batch_norm__653, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_112, - parameter_203, - parameter_202, - parameter_201, - parameter_200, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_112, parameter_200, parameter_201, parameter_202, parameter_203 - - # pd_op.swish: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32) - swish_82 = paddle._C_ops.swish(batch_norm__648) - del batch_norm__648 - - # pd_op.conv2d: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, 96x96x3x3xf32) - conv2d_113 = paddle._C_ops.conv2d( - swish_82, parameter_199, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_199 - - # pd_op.batch_norm_: (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) - ( - batch_norm__654, - batch_norm__655, - batch_norm__656, - batch_norm__657, - batch_norm__658, - batch_norm__659, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_113, - parameter_198, - parameter_197, - parameter_196, - parameter_195, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_113, parameter_195, parameter_196, parameter_197, parameter_198 - - # pd_op.conv2d: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, 96x96x1x1xf32) - conv2d_114 = paddle._C_ops.conv2d( - swish_82, parameter_194, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_194, swish_82 - - # pd_op.batch_norm_: (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) - ( - batch_norm__660, - batch_norm__661, - batch_norm__662, - batch_norm__663, - batch_norm__664, - batch_norm__665, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_114, - parameter_193, - parameter_192, - parameter_191, - parameter_190, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_114, parameter_190, parameter_191, parameter_192, parameter_193 - - # pd_op.add: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, 2x96x-1x-1xf32) - add_48 = paddle._C_ops.add(batch_norm__654, batch_norm__660) - del batch_norm__654, batch_norm__660 - - # pd_op.swish: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32) - swish_83 = paddle._C_ops.swish(add_48) - del add_48 - - # builtin.combine: ([2x96x-1x-1xf32, 2x96x-1x-1xf32]) <- (2x96x-1x-1xf32, 2x96x-1x-1xf32) - combine_9 = [swish_76, swish_83] - del swish_76, swish_83 - - # pd_op.concat: (2x192x-1x-1xf32) <- ([2x96x-1x-1xf32, 2x96x-1x-1xf32], 1xi32) - concat_11 = paddle._C_ops.concat(combine_9, full_0) - del combine_9 - - # pd_op.conv2d: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 192x192x1x1xf32) - conv2d_115 = paddle._C_ops.conv2d( - concat_11, parameter_189, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del concat_11, parameter_189 - - # pd_op.batch_norm_: (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) - ( - batch_norm__666, - batch_norm__667, - batch_norm__668, - batch_norm__669, - batch_norm__670, - batch_norm__671, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_115, - parameter_188, - parameter_187, - parameter_186, - parameter_185, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_115, parameter_185, parameter_186, parameter_187, parameter_188 - - # pd_op.swish: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32) - swish_84 = paddle._C_ops.swish(batch_norm__666) - del batch_norm__666 - - # pd_op.conv2d: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 192x192x3x3xf32) - conv2d_116 = paddle._C_ops.conv2d( - swish_84, parameter_184, [2, 2], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_184 - - # pd_op.batch_norm_: (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) - ( - batch_norm__672, - batch_norm__673, - batch_norm__674, - batch_norm__675, - batch_norm__676, - batch_norm__677, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_116, - parameter_183, - parameter_182, - parameter_181, - parameter_180, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_116, parameter_180, parameter_181, parameter_182, parameter_183 - - # pd_op.swish: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32) - swish_85 = paddle._C_ops.swish(batch_norm__672) - del batch_norm__672 - - # builtin.combine: ([2x192x-1x-1xf32, 2x384x-1x-1xf32]) <- (2x192x-1x-1xf32, 2x384x-1x-1xf32) - combine_10 = [swish_85, swish_74] - del swish_74, swish_85 - - # pd_op.concat: (2x576x-1x-1xf32) <- ([2x192x-1x-1xf32, 2x384x-1x-1xf32], 1xi32) - concat_12 = paddle._C_ops.concat(combine_10, full_0) - del combine_10 - - # pd_op.conv2d: (2x192x-1x-1xf32) <- (2x576x-1x-1xf32, 192x576x1x1xf32) - conv2d_117 = paddle._C_ops.conv2d( - concat_12, parameter_179, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_179 - - # pd_op.batch_norm_: (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) - ( - batch_norm__678, - batch_norm__679, - batch_norm__680, - batch_norm__681, - batch_norm__682, - batch_norm__683, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_117, - parameter_178, - parameter_177, - parameter_176, - parameter_175, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_117, parameter_175, parameter_176, parameter_177, parameter_178 - - # pd_op.swish: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32) - swish_86 = paddle._C_ops.swish(batch_norm__678) - del batch_norm__678 - - # pd_op.conv2d: (2x192x-1x-1xf32) <- (2x576x-1x-1xf32, 192x576x1x1xf32) - conv2d_118 = paddle._C_ops.conv2d( - concat_12, parameter_174, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del concat_12, parameter_174 - - # pd_op.batch_norm_: (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) - ( - batch_norm__684, - batch_norm__685, - batch_norm__686, - batch_norm__687, - batch_norm__688, - batch_norm__689, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_118, - parameter_173, - parameter_172, - parameter_171, - parameter_170, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_118, parameter_170, parameter_171, parameter_172, parameter_173 - - # pd_op.swish: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32) - swish_87 = paddle._C_ops.swish(batch_norm__684) - del batch_norm__684 - - # pd_op.conv2d: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 192x192x3x3xf32) - conv2d_119 = paddle._C_ops.conv2d( - swish_87, parameter_169, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_169, swish_87 - - # pd_op.batch_norm_: (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) - ( - batch_norm__690, - batch_norm__691, - batch_norm__692, - batch_norm__693, - batch_norm__694, - batch_norm__695, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_119, - parameter_168, - parameter_167, - parameter_166, - parameter_165, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_119, parameter_165, parameter_166, parameter_167, parameter_168 - - # pd_op.swish: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32) - swish_88 = paddle._C_ops.swish(batch_norm__690) - del batch_norm__690 - - # pd_op.conv2d: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 192x192x3x3xf32) - conv2d_120 = paddle._C_ops.conv2d( - swish_88, parameter_164, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_164 - - # pd_op.batch_norm_: (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) - ( - batch_norm__696, - batch_norm__697, - batch_norm__698, - batch_norm__699, - batch_norm__700, - batch_norm__701, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_120, - parameter_163, - parameter_162, - parameter_161, - parameter_160, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_120, parameter_160, parameter_161, parameter_162, parameter_163 - - # pd_op.conv2d: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 192x192x1x1xf32) - conv2d_121 = paddle._C_ops.conv2d( - swish_88, parameter_159, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_159, swish_88 - - # pd_op.batch_norm_: (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) - ( - batch_norm__702, - batch_norm__703, - batch_norm__704, - batch_norm__705, - batch_norm__706, - batch_norm__707, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_121, - parameter_158, - parameter_157, - parameter_156, - parameter_155, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_121, parameter_155, parameter_156, parameter_157, parameter_158 - - # pd_op.add: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 2x192x-1x-1xf32) - add_49 = paddle._C_ops.add(batch_norm__696, batch_norm__702) - del batch_norm__696, batch_norm__702 - - # pd_op.swish: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32) - swish_89 = paddle._C_ops.swish(add_49) - del add_49 - - # pd_op.conv2d: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 192x192x3x3xf32) - conv2d_122 = paddle._C_ops.conv2d( - swish_89, parameter_154, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_154, swish_89 - - # pd_op.batch_norm_: (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) - ( - batch_norm__708, - batch_norm__709, - batch_norm__710, - batch_norm__711, - batch_norm__712, - batch_norm__713, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_122, - parameter_153, - parameter_152, - parameter_151, - parameter_150, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_122, parameter_150, parameter_151, parameter_152, parameter_153 - - # pd_op.swish: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32) - swish_90 = paddle._C_ops.swish(batch_norm__708) - del batch_norm__708 - - # pd_op.conv2d: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 192x192x3x3xf32) - conv2d_123 = paddle._C_ops.conv2d( - swish_90, parameter_149, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_149 - - # pd_op.batch_norm_: (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) - ( - batch_norm__714, - batch_norm__715, - batch_norm__716, - batch_norm__717, - batch_norm__718, - batch_norm__719, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_123, - parameter_148, - parameter_147, - parameter_146, - parameter_145, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_123, parameter_145, parameter_146, parameter_147, parameter_148 - - # pd_op.conv2d: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 192x192x1x1xf32) - conv2d_124 = paddle._C_ops.conv2d( - swish_90, parameter_144, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_144, swish_90 - - # pd_op.batch_norm_: (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) - ( - batch_norm__720, - batch_norm__721, - batch_norm__722, - batch_norm__723, - batch_norm__724, - batch_norm__725, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_124, - parameter_143, - parameter_142, - parameter_141, - parameter_140, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_124, parameter_140, parameter_141, parameter_142, parameter_143 - - # pd_op.add: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 2x192x-1x-1xf32) - add_50 = paddle._C_ops.add(batch_norm__714, batch_norm__720) - del batch_norm__714, batch_norm__720 - - # pd_op.swish: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32) - swish_91 = paddle._C_ops.swish(add_50) - del add_50 - - # pd_op.conv2d: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 192x192x3x3xf32) - conv2d_125 = paddle._C_ops.conv2d( - swish_91, parameter_139, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_139, swish_91 - - # pd_op.batch_norm_: (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) - ( - batch_norm__726, - batch_norm__727, - batch_norm__728, - batch_norm__729, - batch_norm__730, - batch_norm__731, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_125, - parameter_138, - parameter_137, - parameter_136, - parameter_135, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_125, parameter_135, parameter_136, parameter_137, parameter_138 - - # pd_op.swish: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32) - swish_92 = paddle._C_ops.swish(batch_norm__726) - del batch_norm__726 - - # pd_op.conv2d: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 192x192x3x3xf32) - conv2d_126 = paddle._C_ops.conv2d( - swish_92, parameter_134, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_134 - - # pd_op.batch_norm_: (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) - ( - batch_norm__732, - batch_norm__733, - batch_norm__734, - batch_norm__735, - batch_norm__736, - batch_norm__737, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_126, - parameter_133, - parameter_132, - parameter_131, - parameter_130, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_126, parameter_130, parameter_131, parameter_132, parameter_133 - - # pd_op.conv2d: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 192x192x1x1xf32) - conv2d_127 = paddle._C_ops.conv2d( - swish_92, parameter_129, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_129, swish_92 - - # pd_op.batch_norm_: (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) - ( - batch_norm__738, - batch_norm__739, - batch_norm__740, - batch_norm__741, - batch_norm__742, - batch_norm__743, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_127, - parameter_128, - parameter_127, - parameter_126, - parameter_125, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_127, parameter_125, parameter_126, parameter_127, parameter_128 - - # pd_op.add: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 2x192x-1x-1xf32) - add_51 = paddle._C_ops.add(batch_norm__732, batch_norm__738) - del batch_norm__732, batch_norm__738 - - # pd_op.swish: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32) - swish_93 = paddle._C_ops.swish(add_51) - del add_51 - - # builtin.combine: ([2x192x-1x-1xf32, 2x192x-1x-1xf32]) <- (2x192x-1x-1xf32, 2x192x-1x-1xf32) - combine_11 = [swish_86, swish_93] - del swish_86, swish_93 - - # pd_op.concat: (2x384x-1x-1xf32) <- ([2x192x-1x-1xf32, 2x192x-1x-1xf32], 1xi32) - concat_13 = paddle._C_ops.concat(combine_11, full_0) - del combine_11 - - # pd_op.conv2d: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32, 384x384x1x1xf32) - conv2d_128 = paddle._C_ops.conv2d( - concat_13, parameter_124, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del concat_13, parameter_124 - - # pd_op.batch_norm_: (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) - ( - batch_norm__744, - batch_norm__745, - batch_norm__746, - batch_norm__747, - batch_norm__748, - batch_norm__749, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_128, - parameter_123, - parameter_122, - parameter_121, - parameter_120, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_128, parameter_120, parameter_121, parameter_122, parameter_123 - - # pd_op.swish: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32) - swish_94 = paddle._C_ops.swish(batch_norm__744) - del batch_norm__744 - - # pd_op.conv2d: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32, 384x384x3x3xf32) - conv2d_129 = paddle._C_ops.conv2d( - swish_94, parameter_119, [2, 2], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_119 - - # pd_op.batch_norm_: (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) - ( - batch_norm__750, - batch_norm__751, - batch_norm__752, - batch_norm__753, - batch_norm__754, - batch_norm__755, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_129, - parameter_118, - parameter_117, - parameter_116, - parameter_115, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_129, parameter_115, parameter_116, parameter_117, parameter_118 - - # pd_op.swish: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32) - swish_95 = paddle._C_ops.swish(batch_norm__750) - del batch_norm__750 - - # builtin.combine: ([2x384x-1x-1xf32, 2x768x-1x-1xf32]) <- (2x384x-1x-1xf32, 2x768x-1x-1xf32) - combine_12 = [swish_95, swish_64] - del swish_64, swish_95 - - # pd_op.concat: (2x1152x-1x-1xf32) <- ([2x384x-1x-1xf32, 2x768x-1x-1xf32], 1xi32) - concat_14 = paddle._C_ops.concat(combine_12, full_0) - del combine_12 - - # pd_op.conv2d: (2x384x-1x-1xf32) <- (2x1152x-1x-1xf32, 384x1152x1x1xf32) - conv2d_130 = paddle._C_ops.conv2d( - concat_14, parameter_114, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_114 - - # pd_op.batch_norm_: (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) - ( - batch_norm__756, - batch_norm__757, - batch_norm__758, - batch_norm__759, - batch_norm__760, - batch_norm__761, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_130, - parameter_113, - parameter_112, - parameter_111, - parameter_110, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_130, parameter_110, parameter_111, parameter_112, parameter_113 - - # pd_op.swish: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32) - swish_96 = paddle._C_ops.swish(batch_norm__756) - del batch_norm__756 - - # pd_op.conv2d: (2x384x-1x-1xf32) <- (2x1152x-1x-1xf32, 384x1152x1x1xf32) - conv2d_131 = paddle._C_ops.conv2d( - concat_14, parameter_109, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del concat_14, parameter_109 - - # pd_op.batch_norm_: (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) - ( - batch_norm__762, - batch_norm__763, - batch_norm__764, - batch_norm__765, - batch_norm__766, - batch_norm__767, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_131, - parameter_108, - parameter_107, - parameter_106, - parameter_105, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_131, parameter_105, parameter_106, parameter_107, parameter_108 - - # pd_op.swish: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32) - swish_97 = paddle._C_ops.swish(batch_norm__762) - del batch_norm__762 - - # pd_op.conv2d: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32, 384x384x3x3xf32) - conv2d_132 = paddle._C_ops.conv2d( - swish_97, parameter_104, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_104, swish_97 - - # pd_op.batch_norm_: (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) - ( - batch_norm__768, - batch_norm__769, - batch_norm__770, - batch_norm__771, - batch_norm__772, - batch_norm__773, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_132, - parameter_103, - parameter_102, - parameter_101, - parameter_100, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_132, parameter_100, parameter_101, parameter_102, parameter_103 - - # pd_op.swish: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32) - swish_98 = paddle._C_ops.swish(batch_norm__768) - del batch_norm__768 - - # pd_op.conv2d: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32, 384x384x3x3xf32) - conv2d_133 = paddle._C_ops.conv2d( - swish_98, parameter_99, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_99 - - # pd_op.batch_norm_: (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) - ( - batch_norm__774, - batch_norm__775, - batch_norm__776, - batch_norm__777, - batch_norm__778, - batch_norm__779, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_133, - parameter_98, - parameter_97, - parameter_96, - parameter_95, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_133, parameter_95, parameter_96, parameter_97, parameter_98 - - # pd_op.conv2d: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32, 384x384x1x1xf32) - conv2d_134 = paddle._C_ops.conv2d( - swish_98, parameter_94, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_94, swish_98 - - # pd_op.batch_norm_: (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) - ( - batch_norm__780, - batch_norm__781, - batch_norm__782, - batch_norm__783, - batch_norm__784, - batch_norm__785, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_134, - parameter_93, - parameter_92, - parameter_91, - parameter_90, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_134, parameter_90, parameter_91, parameter_92, parameter_93 - - # pd_op.add: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32, 2x384x-1x-1xf32) - add_52 = paddle._C_ops.add(batch_norm__774, batch_norm__780) - del batch_norm__774, batch_norm__780 - - # pd_op.swish: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32) - swish_99 = paddle._C_ops.swish(add_52) - del add_52 - - # pd_op.conv2d: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32, 384x384x3x3xf32) - conv2d_135 = paddle._C_ops.conv2d( - swish_99, parameter_89, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_89, swish_99 - - # pd_op.batch_norm_: (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) - ( - batch_norm__786, - batch_norm__787, - batch_norm__788, - batch_norm__789, - batch_norm__790, - batch_norm__791, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_135, - parameter_88, - parameter_87, - parameter_86, - parameter_85, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_135, parameter_85, parameter_86, parameter_87, parameter_88 - - # pd_op.swish: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32) - swish_100 = paddle._C_ops.swish(batch_norm__786) - del batch_norm__786 - - # pd_op.conv2d: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32, 384x384x3x3xf32) - conv2d_136 = paddle._C_ops.conv2d( - swish_100, parameter_84, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_84 - - # pd_op.batch_norm_: (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) - ( - batch_norm__792, - batch_norm__793, - batch_norm__794, - batch_norm__795, - batch_norm__796, - batch_norm__797, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_136, - parameter_83, - parameter_82, - parameter_81, - parameter_80, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_136, parameter_80, parameter_81, parameter_82, parameter_83 - - # pd_op.conv2d: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32, 384x384x1x1xf32) - conv2d_137 = paddle._C_ops.conv2d( - swish_100, parameter_79, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_79, swish_100 - - # pd_op.batch_norm_: (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) - ( - batch_norm__798, - batch_norm__799, - batch_norm__800, - batch_norm__801, - batch_norm__802, - batch_norm__803, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_137, - parameter_78, - parameter_77, - parameter_76, - parameter_75, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_137, parameter_75, parameter_76, parameter_77, parameter_78 - - # pd_op.add: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32, 2x384x-1x-1xf32) - add_53 = paddle._C_ops.add(batch_norm__792, batch_norm__798) - del batch_norm__792, batch_norm__798 - - # pd_op.swish: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32) - swish_101 = paddle._C_ops.swish(add_53) - del add_53 - - # pd_op.conv2d: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32, 384x384x3x3xf32) - conv2d_138 = paddle._C_ops.conv2d( - swish_101, parameter_74, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_74, swish_101 - - # pd_op.batch_norm_: (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) - ( - batch_norm__804, - batch_norm__805, - batch_norm__806, - batch_norm__807, - batch_norm__808, - batch_norm__809, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_138, - parameter_73, - parameter_72, - parameter_71, - parameter_70, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_138, parameter_70, parameter_71, parameter_72, parameter_73 - - # pd_op.swish: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32) - swish_102 = paddle._C_ops.swish(batch_norm__804) - del batch_norm__804 - - # pd_op.conv2d: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32, 384x384x3x3xf32) - conv2d_139 = paddle._C_ops.conv2d( - swish_102, parameter_69, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_69 - - # pd_op.batch_norm_: (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) - ( - batch_norm__810, - batch_norm__811, - batch_norm__812, - batch_norm__813, - batch_norm__814, - batch_norm__815, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_139, - parameter_68, - parameter_67, - parameter_66, - parameter_65, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_139, parameter_65, parameter_66, parameter_67, parameter_68 - - # pd_op.conv2d: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32, 384x384x1x1xf32) - conv2d_140 = paddle._C_ops.conv2d( - swish_102, parameter_64, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_64, swish_102 - - # pd_op.batch_norm_: (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) - ( - batch_norm__816, - batch_norm__817, - batch_norm__818, - batch_norm__819, - batch_norm__820, - batch_norm__821, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_140, - parameter_63, - parameter_62, - parameter_61, - parameter_60, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_140, parameter_60, parameter_61, parameter_62, parameter_63 - - # pd_op.add: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32, 2x384x-1x-1xf32) - add_54 = paddle._C_ops.add(batch_norm__810, batch_norm__816) - del batch_norm__810, batch_norm__816 - - # pd_op.swish: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32) - swish_103 = paddle._C_ops.swish(add_54) - del add_54 - - # builtin.combine: ([2x384x-1x-1xf32, 2x384x-1x-1xf32]) <- (2x384x-1x-1xf32, 2x384x-1x-1xf32) - combine_13 = [swish_96, swish_103] - del swish_103, swish_96 - - # pd_op.concat: (2x768x-1x-1xf32) <- ([2x384x-1x-1xf32, 2x384x-1x-1xf32], 1xi32) - concat_15 = paddle._C_ops.concat(combine_13, full_0) - del combine_13 - - # pd_op.conv2d: (2x768x-1x-1xf32) <- (2x768x-1x-1xf32, 768x768x1x1xf32) - conv2d_141 = paddle._C_ops.conv2d( - concat_15, parameter_59, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del concat_15, parameter_59 - - # pd_op.batch_norm_: (2x768x-1x-1xf32, 768xf32, 768xf32, 768xf32, 768xf32, -1xui8) <- (2x768x-1x-1xf32, 768xf32, 768xf32, 768xf32, 768xf32) - ( - batch_norm__822, - batch_norm__823, - batch_norm__824, - batch_norm__825, - batch_norm__826, - batch_norm__827, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_141, - parameter_58, - parameter_57, - parameter_56, - parameter_55, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_141, parameter_55, parameter_56, parameter_57, parameter_58 - - # pd_op.swish: (2x768x-1x-1xf32) <- (2x768x-1x-1xf32) - swish_104 = paddle._C_ops.swish(batch_norm__822) - del batch_norm__822 - - # pd_op.shape64: (4xi64) <- (2x768x-1x-1xf32) - shape64_0 = paddle._C_ops.shape64(swish_104) - - # pd_op.full_int_array: (1xi64) <- () - full_int_array_5 = [2] - - # pd_op.full_int_array: (1xi64) <- () - full_int_array_6 = [3] - - # pd_op.slice: (xi64) <- (4xi64, 1xi64, 1xi64) - slice_0 = paddle._C_ops.slice( - shape64_0, [0], full_int_array_5, full_int_array_6, [1], [0] - ) - del shape64_0 - - # pd_op.shape64: (4xi64) <- (2x768x-1x-1xf32) - shape64_1 = paddle._C_ops.shape64(swish_104) - - # pd_op.full_int_array: (1xi64) <- () - full_int_array_7 = [4] - - # pd_op.slice: (xi64) <- (4xi64, 1xi64, 1xi64) - slice_1 = paddle._C_ops.slice( - shape64_1, [0], full_int_array_6, full_int_array_7, [1], [0] - ) - del shape64_1 - - # pd_op.multiply: (xi64) <- (xi64, xi64) - multiply_4 = paddle._C_ops.multiply(slice_0, slice_1) - del slice_0, slice_1 - - # pd_op.full_int_array: (2xi64) <- () - full_int_array_8 = [1, 1] - - # pd_op.pool2d: (2x768x1x1xf32) <- (2x768x-1x-1xf32, 2xi64) - pool2d_3 = paddle._C_ops.pool2d( - swish_104, - full_int_array_8, - [1, 1], - [0, 0], - False, - True, - "NCHW", - "avg", - False, - True, - "EXPLICIT", - ) - - # pd_op.conv2d: (2x768x1x1xf32) <- (2x768x1x1xf32, 768x768x1x1xf32) - conv2d_142 = paddle._C_ops.conv2d( - pool2d_3, parameter_54, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_54 - - # pd_op.reshape: (1x768x1x1xf32) <- (768xf32, 4xi64) - reshape_4 = paddle._C_ops.reshape(parameter_53, full_int_array_1) - del parameter_53 - - # pd_op.add: (2x768x1x1xf32) <- (2x768x1x1xf32, 1x768x1x1xf32) - add_55 = paddle._C_ops.add(conv2d_142, reshape_4) - del conv2d_142, reshape_4 - - # pd_op.sigmoid: (2x768x1x1xf32) <- (2x768x1x1xf32) - sigmoid_0 = paddle._C_ops.sigmoid(add_55) - del add_55 - - # pd_op.multiply: (2x768x-1x-1xf32) <- (2x768x-1x-1xf32, 2x768x1x1xf32) - multiply_5 = paddle._C_ops.multiply(swish_104, sigmoid_0) - del sigmoid_0 - - # pd_op.conv2d: (2x768x-1x-1xf32) <- (2x768x-1x-1xf32, 768x768x1x1xf32) - conv2d_143 = paddle._C_ops.conv2d( - multiply_5, parameter_52, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del multiply_5, parameter_52 - - # pd_op.batch_norm_: (2x768x-1x-1xf32, 768xf32, 768xf32, 768xf32, 768xf32, -1xui8) <- (2x768x-1x-1xf32, 768xf32, 768xf32, 768xf32, 768xf32) - ( - batch_norm__828, - batch_norm__829, - batch_norm__830, - batch_norm__831, - batch_norm__832, - batch_norm__833, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_143, - parameter_51, - parameter_50, - parameter_49, - parameter_48, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_143, parameter_48, parameter_49, parameter_50, parameter_51 - - # pd_op.swish: (2x768x-1x-1xf32) <- (2x768x-1x-1xf32) - swish_105 = paddle._C_ops.swish(batch_norm__828) - del batch_norm__828 - - # pd_op.add: (2x768x-1x-1xf32) <- (2x768x-1x-1xf32, 2x768x-1x-1xf32) - add_56 = paddle._C_ops.add(swish_105, swish_104) - del swish_105 - - # pd_op.conv2d: (2x4x-1x-1xf32) <- (2x768x-1x-1xf32, 4x768x3x3xf32) - conv2d_144 = paddle._C_ops.conv2d( - add_56, parameter_47, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del add_56, parameter_47 - - # pd_op.reshape: (1x4x1x1xf32) <- (4xf32, 4xi64) - reshape_5 = paddle._C_ops.reshape(parameter_46, full_int_array_1) - del parameter_46 - - # pd_op.add: (2x4x-1x-1xf32) <- (2x4x-1x-1xf32, 1x4x1x1xf32) - add_57 = paddle._C_ops.add(conv2d_144, reshape_5) - del conv2d_144, reshape_5 - - # pd_op.conv2d: (2x768x1x1xf32) <- (2x768x1x1xf32, 768x768x1x1xf32) - conv2d_145 = paddle._C_ops.conv2d( - pool2d_3, parameter_45, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_45, pool2d_3 - - # pd_op.reshape: (1x768x1x1xf32) <- (768xf32, 4xi64) - reshape_6 = paddle._C_ops.reshape(parameter_44, full_int_array_1) - del parameter_44 - - # pd_op.add: (2x768x1x1xf32) <- (2x768x1x1xf32, 1x768x1x1xf32) - add_58 = paddle._C_ops.add(conv2d_145, reshape_6) - del conv2d_145, reshape_6 - - # pd_op.sigmoid: (2x768x1x1xf32) <- (2x768x1x1xf32) - sigmoid_1 = paddle._C_ops.sigmoid(add_58) - del add_58 - - # pd_op.multiply: (2x768x-1x-1xf32) <- (2x768x-1x-1xf32, 2x768x1x1xf32) - multiply_6 = paddle._C_ops.multiply(swish_104, sigmoid_1) - del sigmoid_1, swish_104 - - # pd_op.conv2d: (2x768x-1x-1xf32) <- (2x768x-1x-1xf32, 768x768x1x1xf32) - conv2d_146 = paddle._C_ops.conv2d( - multiply_6, parameter_43, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del multiply_6, parameter_43 - - # pd_op.batch_norm_: (2x768x-1x-1xf32, 768xf32, 768xf32, 768xf32, 768xf32, -1xui8) <- (2x768x-1x-1xf32, 768xf32, 768xf32, 768xf32, 768xf32) - ( - batch_norm__834, - batch_norm__835, - batch_norm__836, - batch_norm__837, - batch_norm__838, - batch_norm__839, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_146, - parameter_42, - parameter_41, - parameter_40, - parameter_39, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_146, parameter_39, parameter_40, parameter_41, parameter_42 - - # pd_op.swish: (2x768x-1x-1xf32) <- (2x768x-1x-1xf32) - swish_106 = paddle._C_ops.swish(batch_norm__834) - del batch_norm__834 - - # pd_op.conv2d: (2x68x-1x-1xf32) <- (2x768x-1x-1xf32, 68x768x3x3xf32) - conv2d_147 = paddle._C_ops.conv2d( - swish_106, parameter_38, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_38, swish_106 - - # pd_op.reshape: (1x68x1x1xf32) <- (68xf32, 4xi64) - reshape_7 = paddle._C_ops.reshape(parameter_37, full_int_array_1) - del parameter_37 - - # pd_op.add: (2x68x-1x-1xf32) <- (2x68x-1x-1xf32, 1x68x1x1xf32) - add_59 = paddle._C_ops.add(conv2d_147, reshape_7) - del conv2d_147, reshape_7 - - # pd_op.full: (xi64) <- () - full_1 = paddle._C_ops.full( - [], float("-1"), paddle.int64, paddle.core.CPUPlace() - ) - - # pd_op.full: (xi64) <- () - full_2 = paddle._C_ops.full( - [], float("4"), paddle.int64, paddle.core.CPUPlace() - ) - - # pd_op.full: (xi64) <- () - full_3 = paddle._C_ops.full( - [], float("17"), paddle.int64, paddle.core.CPUPlace() - ) - - # builtin.combine: ([xi64, xi64, xi64, xi64]) <- (xi64, xi64, xi64, xi64) - combine_14 = [full_1, full_2, full_3, multiply_4] - - # pd_op.stack: (4xi64) <- ([xi64, xi64, xi64, xi64]) - stack_0 = paddle._C_ops.stack(combine_14, 0) - del combine_14 - - # pd_op.reshape: (-1x4x17x-1xf32) <- (2x68x-1x-1xf32, 4xi64) - reshape_8 = paddle._C_ops.reshape(add_59, stack_0) - del add_59, stack_0 - - # pd_op.transpose: (-1x17x-1x4xf32) <- (-1x4x17x-1xf32) - transpose_0 = paddle._C_ops.transpose(reshape_8, [0, 2, 3, 1]) - del reshape_8 - - # pd_op.softmax: (-1x17x-1x4xf32) <- (-1x17x-1x4xf32) - softmax_0 = paddle._C_ops.softmax(transpose_0, 1) - del transpose_0 - - # pd_op.conv2d: (-1x1x-1x4xf32) <- (-1x17x-1x4xf32, 1x17x1x1xf32) - conv2d_148 = paddle._C_ops.conv2d( - softmax_0, parameter_36, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del softmax_0 - - # pd_op.full_int_array: (1xi64) <- () - full_int_array_9 = [1] - - # pd_op.squeeze: (-1x-1x4xf32) <- (-1x1x-1x4xf32, 1xi64) - squeeze_0 = paddle._C_ops.squeeze(conv2d_148, full_int_array_9) - del conv2d_148 - - # pd_op.sigmoid: (2x4x-1x-1xf32) <- (2x4x-1x-1xf32) - sigmoid_2 = paddle._C_ops.sigmoid(add_57) - del add_57 - - # builtin.combine: ([xi64, xi64, xi64]) <- (xi64, xi64, xi64) - combine_15 = [full_1, full_2, multiply_4] - del multiply_4 - - # pd_op.stack: (3xi64) <- ([xi64, xi64, xi64]) - stack_1 = paddle._C_ops.stack(combine_15, 0) - del combine_15 - - # pd_op.reshape: (-1x4x-1xf32) <- (2x4x-1x-1xf32, 3xi64) - reshape_9 = paddle._C_ops.reshape(sigmoid_2, stack_1) - del sigmoid_2, stack_1 - - # pd_op.shape64: (4xi64) <- (2x384x-1x-1xf32) - shape64_2 = paddle._C_ops.shape64(swish_94) - - # pd_op.slice: (xi64) <- (4xi64, 1xi64, 1xi64) - slice_2 = paddle._C_ops.slice( - shape64_2, [0], full_int_array_5, full_int_array_6, [1], [0] - ) - del shape64_2 - - # pd_op.shape64: (4xi64) <- (2x384x-1x-1xf32) - shape64_3 = paddle._C_ops.shape64(swish_94) - - # pd_op.slice: (xi64) <- (4xi64, 1xi64, 1xi64) - slice_3 = paddle._C_ops.slice( - shape64_3, [0], full_int_array_6, full_int_array_7, [1], [0] - ) - del shape64_3 - - # pd_op.multiply: (xi64) <- (xi64, xi64) - multiply_7 = paddle._C_ops.multiply(slice_2, slice_3) - del slice_2, slice_3 - - # pd_op.pool2d: (2x384x1x1xf32) <- (2x384x-1x-1xf32, 2xi64) - pool2d_4 = paddle._C_ops.pool2d( - swish_94, - full_int_array_8, - [1, 1], - [0, 0], - False, - True, - "NCHW", - "avg", - False, - True, - "EXPLICIT", - ) - - # pd_op.conv2d: (2x384x1x1xf32) <- (2x384x1x1xf32, 384x384x1x1xf32) - conv2d_149 = paddle._C_ops.conv2d( - pool2d_4, parameter_35, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_35 - - # pd_op.reshape: (1x384x1x1xf32) <- (384xf32, 4xi64) - reshape_10 = paddle._C_ops.reshape(parameter_34, full_int_array_1) - del parameter_34 - - # pd_op.add: (2x384x1x1xf32) <- (2x384x1x1xf32, 1x384x1x1xf32) - add_60 = paddle._C_ops.add(conv2d_149, reshape_10) - del conv2d_149, reshape_10 - - # pd_op.sigmoid: (2x384x1x1xf32) <- (2x384x1x1xf32) - sigmoid_3 = paddle._C_ops.sigmoid(add_60) - del add_60 - - # pd_op.multiply: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32, 2x384x1x1xf32) - multiply_8 = paddle._C_ops.multiply(swish_94, sigmoid_3) - del sigmoid_3 - - # pd_op.conv2d: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32, 384x384x1x1xf32) - conv2d_150 = paddle._C_ops.conv2d( - multiply_8, parameter_33, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del multiply_8, parameter_33 - - # pd_op.batch_norm_: (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) - ( - batch_norm__840, - batch_norm__841, - batch_norm__842, - batch_norm__843, - batch_norm__844, - batch_norm__845, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_150, - parameter_32, - parameter_31, - parameter_30, - parameter_29, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_150, parameter_29, parameter_30, parameter_31, parameter_32 - - # pd_op.swish: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32) - swish_107 = paddle._C_ops.swish(batch_norm__840) - del batch_norm__840 - - # pd_op.add: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32, 2x384x-1x-1xf32) - add_61 = paddle._C_ops.add(swish_107, swish_94) - del swish_107 - - # pd_op.conv2d: (2x4x-1x-1xf32) <- (2x384x-1x-1xf32, 4x384x3x3xf32) - conv2d_151 = paddle._C_ops.conv2d( - add_61, parameter_28, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del add_61, parameter_28 - - # pd_op.reshape: (1x4x1x1xf32) <- (4xf32, 4xi64) - reshape_11 = paddle._C_ops.reshape(parameter_27, full_int_array_1) - del parameter_27 - - # pd_op.add: (2x4x-1x-1xf32) <- (2x4x-1x-1xf32, 1x4x1x1xf32) - add_62 = paddle._C_ops.add(conv2d_151, reshape_11) - del conv2d_151, reshape_11 - - # pd_op.conv2d: (2x384x1x1xf32) <- (2x384x1x1xf32, 384x384x1x1xf32) - conv2d_152 = paddle._C_ops.conv2d( - pool2d_4, parameter_26, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_26, pool2d_4 - - # pd_op.reshape: (1x384x1x1xf32) <- (384xf32, 4xi64) - reshape_12 = paddle._C_ops.reshape(parameter_25, full_int_array_1) - del parameter_25 - - # pd_op.add: (2x384x1x1xf32) <- (2x384x1x1xf32, 1x384x1x1xf32) - add_63 = paddle._C_ops.add(conv2d_152, reshape_12) - del conv2d_152, reshape_12 - - # pd_op.sigmoid: (2x384x1x1xf32) <- (2x384x1x1xf32) - sigmoid_4 = paddle._C_ops.sigmoid(add_63) - del add_63 - - # pd_op.multiply: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32, 2x384x1x1xf32) - multiply_9 = paddle._C_ops.multiply(swish_94, sigmoid_4) - del sigmoid_4, swish_94 - - # pd_op.conv2d: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32, 384x384x1x1xf32) - conv2d_153 = paddle._C_ops.conv2d( - multiply_9, parameter_24, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del multiply_9, parameter_24 - - # pd_op.batch_norm_: (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) - ( - batch_norm__846, - batch_norm__847, - batch_norm__848, - batch_norm__849, - batch_norm__850, - batch_norm__851, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_153, - parameter_23, - parameter_22, - parameter_21, - parameter_20, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_153, parameter_20, parameter_21, parameter_22, parameter_23 - - # pd_op.swish: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32) - swish_108 = paddle._C_ops.swish(batch_norm__846) - del batch_norm__846 - - # pd_op.conv2d: (2x68x-1x-1xf32) <- (2x384x-1x-1xf32, 68x384x3x3xf32) - conv2d_154 = paddle._C_ops.conv2d( - swish_108, parameter_19, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_19, swish_108 - - # pd_op.reshape: (1x68x1x1xf32) <- (68xf32, 4xi64) - reshape_13 = paddle._C_ops.reshape(parameter_18, full_int_array_1) - del parameter_18 - - # pd_op.add: (2x68x-1x-1xf32) <- (2x68x-1x-1xf32, 1x68x1x1xf32) - add_64 = paddle._C_ops.add(conv2d_154, reshape_13) - del conv2d_154, reshape_13 - - # builtin.combine: ([xi64, xi64, xi64, xi64]) <- (xi64, xi64, xi64, xi64) - combine_16 = [full_1, full_2, full_3, multiply_7] - - # pd_op.stack: (4xi64) <- ([xi64, xi64, xi64, xi64]) - stack_2 = paddle._C_ops.stack(combine_16, 0) - del combine_16 - - # pd_op.reshape: (-1x4x17x-1xf32) <- (2x68x-1x-1xf32, 4xi64) - reshape_14 = paddle._C_ops.reshape(add_64, stack_2) - del add_64, stack_2 - - # pd_op.transpose: (-1x17x-1x4xf32) <- (-1x4x17x-1xf32) - transpose_1 = paddle._C_ops.transpose(reshape_14, [0, 2, 3, 1]) - del reshape_14 - - # pd_op.softmax: (-1x17x-1x4xf32) <- (-1x17x-1x4xf32) - softmax_1 = paddle._C_ops.softmax(transpose_1, 1) - del transpose_1 - - # pd_op.conv2d: (-1x1x-1x4xf32) <- (-1x17x-1x4xf32, 1x17x1x1xf32) - conv2d_155 = paddle._C_ops.conv2d( - softmax_1, parameter_36, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del softmax_1 - - # pd_op.squeeze: (-1x-1x4xf32) <- (-1x1x-1x4xf32, 1xi64) - squeeze_1 = paddle._C_ops.squeeze(conv2d_155, full_int_array_9) - del conv2d_155 - - # pd_op.sigmoid: (2x4x-1x-1xf32) <- (2x4x-1x-1xf32) - sigmoid_5 = paddle._C_ops.sigmoid(add_62) - del add_62 - - # builtin.combine: ([xi64, xi64, xi64]) <- (xi64, xi64, xi64) - combine_17 = [full_1, full_2, multiply_7] - del multiply_7 - - # pd_op.stack: (3xi64) <- ([xi64, xi64, xi64]) - stack_3 = paddle._C_ops.stack(combine_17, 0) - del combine_17 - - # pd_op.reshape: (-1x4x-1xf32) <- (2x4x-1x-1xf32, 3xi64) - reshape_15 = paddle._C_ops.reshape(sigmoid_5, stack_3) - del sigmoid_5, stack_3 - - # pd_op.shape64: (4xi64) <- (2x192x-1x-1xf32) - shape64_4 = paddle._C_ops.shape64(swish_84) - - # pd_op.slice: (xi64) <- (4xi64, 1xi64, 1xi64) - slice_4 = paddle._C_ops.slice( - shape64_4, [0], full_int_array_5, full_int_array_6, [1], [0] - ) - del full_int_array_5, shape64_4 - - # pd_op.shape64: (4xi64) <- (2x192x-1x-1xf32) - shape64_5 = paddle._C_ops.shape64(swish_84) - - # pd_op.slice: (xi64) <- (4xi64, 1xi64, 1xi64) - slice_5 = paddle._C_ops.slice( - shape64_5, [0], full_int_array_6, full_int_array_7, [1], [0] - ) - del full_int_array_6, full_int_array_7, shape64_5 - - # pd_op.multiply: (xi64) <- (xi64, xi64) - multiply_10 = paddle._C_ops.multiply(slice_4, slice_5) - del slice_4, slice_5 - - # pd_op.pool2d: (2x192x1x1xf32) <- (2x192x-1x-1xf32, 2xi64) - pool2d_5 = paddle._C_ops.pool2d( - swish_84, - full_int_array_8, - [1, 1], - [0, 0], - False, - True, - "NCHW", - "avg", - False, - True, - "EXPLICIT", - ) - del full_int_array_8 - - # pd_op.conv2d: (2x192x1x1xf32) <- (2x192x1x1xf32, 192x192x1x1xf32) - conv2d_156 = paddle._C_ops.conv2d( - pool2d_5, parameter_17, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_17 - - # pd_op.reshape: (1x192x1x1xf32) <- (192xf32, 4xi64) - reshape_16 = paddle._C_ops.reshape(parameter_16, full_int_array_1) - del parameter_16 - - # pd_op.add: (2x192x1x1xf32) <- (2x192x1x1xf32, 1x192x1x1xf32) - add_65 = paddle._C_ops.add(conv2d_156, reshape_16) - del conv2d_156, reshape_16 - - # pd_op.sigmoid: (2x192x1x1xf32) <- (2x192x1x1xf32) - sigmoid_6 = paddle._C_ops.sigmoid(add_65) - del add_65 - - # pd_op.multiply: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 2x192x1x1xf32) - multiply_11 = paddle._C_ops.multiply(swish_84, sigmoid_6) - del sigmoid_6 - - # pd_op.conv2d: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 192x192x1x1xf32) - conv2d_157 = paddle._C_ops.conv2d( - multiply_11, parameter_15, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del multiply_11, parameter_15 - - # pd_op.batch_norm_: (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) - ( - batch_norm__852, - batch_norm__853, - batch_norm__854, - batch_norm__855, - batch_norm__856, - batch_norm__857, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_157, - parameter_14, - parameter_13, - parameter_12, - parameter_11, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_157, parameter_11, parameter_12, parameter_13, parameter_14 - - # pd_op.swish: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32) - swish_109 = paddle._C_ops.swish(batch_norm__852) - del batch_norm__852 - - # pd_op.add: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 2x192x-1x-1xf32) - add_66 = paddle._C_ops.add(swish_109, swish_84) - del swish_109 - - # pd_op.conv2d: (2x4x-1x-1xf32) <- (2x192x-1x-1xf32, 4x192x3x3xf32) - conv2d_158 = paddle._C_ops.conv2d( - add_66, parameter_10, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del add_66, parameter_10 - - # pd_op.reshape: (1x4x1x1xf32) <- (4xf32, 4xi64) - reshape_17 = paddle._C_ops.reshape(parameter_9, full_int_array_1) - del parameter_9 - - # pd_op.add: (2x4x-1x-1xf32) <- (2x4x-1x-1xf32, 1x4x1x1xf32) - add_67 = paddle._C_ops.add(conv2d_158, reshape_17) - del conv2d_158, reshape_17 - - # pd_op.conv2d: (2x192x1x1xf32) <- (2x192x1x1xf32, 192x192x1x1xf32) - conv2d_159 = paddle._C_ops.conv2d( - pool2d_5, parameter_8, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_8, pool2d_5 - - # pd_op.reshape: (1x192x1x1xf32) <- (192xf32, 4xi64) - reshape_18 = paddle._C_ops.reshape(parameter_7, full_int_array_1) - del parameter_7 - - # pd_op.add: (2x192x1x1xf32) <- (2x192x1x1xf32, 1x192x1x1xf32) - add_68 = paddle._C_ops.add(conv2d_159, reshape_18) - del conv2d_159, reshape_18 - - # pd_op.sigmoid: (2x192x1x1xf32) <- (2x192x1x1xf32) - sigmoid_7 = paddle._C_ops.sigmoid(add_68) - del add_68 - - # pd_op.multiply: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 2x192x1x1xf32) - multiply_12 = paddle._C_ops.multiply(swish_84, sigmoid_7) - del sigmoid_7, swish_84 - - # pd_op.conv2d: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 192x192x1x1xf32) - conv2d_160 = paddle._C_ops.conv2d( - multiply_12, parameter_6, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del multiply_12, parameter_6 - - # pd_op.batch_norm_: (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) - ( - batch_norm__858, - batch_norm__859, - batch_norm__860, - batch_norm__861, - batch_norm__862, - batch_norm__863, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_160, - parameter_5, - parameter_4, - parameter_3, - parameter_2, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_160, parameter_2, parameter_3, parameter_4, parameter_5 - - # pd_op.swish: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32) - swish_110 = paddle._C_ops.swish(batch_norm__858) - del batch_norm__858 - - # pd_op.conv2d: (2x68x-1x-1xf32) <- (2x192x-1x-1xf32, 68x192x3x3xf32) - conv2d_161 = paddle._C_ops.conv2d( - swish_110, parameter_1, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_1, swish_110 - - # pd_op.reshape: (1x68x1x1xf32) <- (68xf32, 4xi64) - reshape_19 = paddle._C_ops.reshape(parameter_0, full_int_array_1) - del full_int_array_1, parameter_0 - - # pd_op.add: (2x68x-1x-1xf32) <- (2x68x-1x-1xf32, 1x68x1x1xf32) - add_69 = paddle._C_ops.add(conv2d_161, reshape_19) - del conv2d_161, reshape_19 - - # builtin.combine: ([xi64, xi64, xi64, xi64]) <- (xi64, xi64, xi64, xi64) - combine_18 = [full_1, full_2, full_3, multiply_10] - del full_3 - - # pd_op.stack: (4xi64) <- ([xi64, xi64, xi64, xi64]) - stack_4 = paddle._C_ops.stack(combine_18, 0) - del combine_18 - - # pd_op.reshape: (-1x4x17x-1xf32) <- (2x68x-1x-1xf32, 4xi64) - reshape_20 = paddle._C_ops.reshape(add_69, stack_4) - del add_69, stack_4 - - # pd_op.transpose: (-1x17x-1x4xf32) <- (-1x4x17x-1xf32) - transpose_2 = paddle._C_ops.transpose(reshape_20, [0, 2, 3, 1]) - del reshape_20 - - # pd_op.softmax: (-1x17x-1x4xf32) <- (-1x17x-1x4xf32) - softmax_2 = paddle._C_ops.softmax(transpose_2, 1) - del transpose_2 - - # pd_op.conv2d: (-1x1x-1x4xf32) <- (-1x17x-1x4xf32, 1x17x1x1xf32) - conv2d_162 = paddle._C_ops.conv2d( - softmax_2, parameter_36, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_36, softmax_2 - - # pd_op.squeeze: (-1x-1x4xf32) <- (-1x1x-1x4xf32, 1xi64) - squeeze_2 = paddle._C_ops.squeeze(conv2d_162, full_int_array_9) - del conv2d_162, full_int_array_9 - - # pd_op.sigmoid: (2x4x-1x-1xf32) <- (2x4x-1x-1xf32) - sigmoid_8 = paddle._C_ops.sigmoid(add_67) - del add_67 - - # builtin.combine: ([xi64, xi64, xi64]) <- (xi64, xi64, xi64) - combine_19 = [full_1, full_2, multiply_10] - del full_1, full_2, multiply_10 - - # pd_op.stack: (3xi64) <- ([xi64, xi64, xi64]) - stack_5 = paddle._C_ops.stack(combine_19, 0) - del combine_19 - - # pd_op.reshape: (-1x4x-1xf32) <- (2x4x-1x-1xf32, 3xi64) - reshape_21 = paddle._C_ops.reshape(sigmoid_8, stack_5) - del sigmoid_8, stack_5 - - # pd_op.full: (1xi32) <- () - full_4 = paddle._C_ops.full( - [1], float("-1"), paddle.int32, paddle.core.CPUPlace() - ) - - # builtin.combine: ([-1x4x-1xf32, -1x4x-1xf32, -1x4x-1xf32]) <- (-1x4x-1xf32, -1x4x-1xf32, -1x4x-1xf32) - combine_20 = [reshape_9, reshape_15, reshape_21] - del reshape_15, reshape_21, reshape_9 - - # pd_op.concat: (-1x4x-1xf32) <- ([-1x4x-1xf32, -1x4x-1xf32, -1x4x-1xf32], 1xi32) - concat_0 = paddle._C_ops.concat(combine_20, full_4) - del combine_20, full_4 - - # builtin.combine: ([-1x-1x4xf32, -1x-1x4xf32, -1x-1x4xf32]) <- (-1x-1x4xf32, -1x-1x4xf32, -1x-1x4xf32) - combine_21 = [squeeze_0, squeeze_1, squeeze_2] - del squeeze_0, squeeze_1, squeeze_2 - - # pd_op.concat: (-1x-1x4xf32) <- ([-1x-1x4xf32, -1x-1x4xf32, -1x-1x4xf32], 1xi32) - concat_1 = paddle._C_ops.concat(combine_21, full_0) - del combine_21, full_0 - - return concat_0, concat_1 diff --git a/paddle_samples/PaddleX/PP-YOLOE-L_vehicle/subgraph_5/weight_meta.py b/paddle_samples/PaddleX/PP-YOLOE-L_vehicle/subgraph_5/weight_meta.py deleted file mode 100644 index bf4cf55ae..000000000 --- a/paddle_samples/PaddleX/PP-YOLOE-L_vehicle/subgraph_5/weight_meta.py +++ /dev/null @@ -1,8161 +0,0 @@ -class Program_weight_tensor_parameter_0: - name = "parameter_0" - shape = [68] - dtype = "float32" - min_val = float("-0.00892735") - max_val = float("0.0359164") - mean = float("8.74861e-08") - std = float("0.00810027") - data = None - - -class Program_weight_tensor_parameter_1: - name = "parameter_1" - shape = [68, 192, 3, 3] - dtype = "float32" - min_val = float("-0.14403") - max_val = float("0.16853") - mean = float("5.68543e-08") - std = float("0.00707932") - data = None - - -class Program_weight_tensor_parameter_2: - name = "parameter_2" - shape = [192] - dtype = "float32" - min_val = float("-0.0740081") - max_val = float("0.252888") - mean = float("0.0629457") - std = float("0.0572415") - data = None - - -class Program_weight_tensor_parameter_3: - name = "parameter_3" - shape = [192] - dtype = "float32" - min_val = float("0.838381") - max_val = float("1.78564") - mean = float("1.29191") - std = float("0.191219") - data = None - - -class Program_weight_tensor_parameter_4: - name = "parameter_4" - shape = [192] - dtype = "float32" - min_val = float("0.000988996") - max_val = float("0.0390925") - mean = float("0.00961489") - std = float("0.00656614") - data = None - - -class Program_weight_tensor_parameter_5: - name = "parameter_5" - shape = [192] - dtype = "float32" - min_val = float("-0.11659") - max_val = float("0.132114") - mean = float("0.00156193") - std = float("0.0387838") - data = None - - -class Program_weight_tensor_parameter_6: - name = "parameter_6" - shape = [192, 192, 1, 1] - dtype = "float32" - min_val = float("-0.060318") - max_val = float("0.0853128") - mean = float("-0.000471342") - std = float("0.00698795") - data = None - - -class Program_weight_tensor_parameter_7: - name = "parameter_7" - shape = [192] - dtype = "float32" - min_val = float("-0.00462196") - max_val = float("0.00603283") - mean = float("4.25491e-05") - std = float("0.00192614") - data = None - - -class Program_weight_tensor_parameter_8: - name = "parameter_8" - shape = [192, 192, 1, 1] - dtype = "float32" - min_val = float("-0.0100888") - max_val = float("0.0113319") - mean = float("-9.49891e-06") - std = float("0.00128433") - data = None - - -class Program_weight_tensor_parameter_9: - name = "parameter_9" - shape = [4] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_10: - name = "parameter_10" - shape = [4, 192, 3, 3] - dtype = "float32" - min_val = float("-0.336461") - max_val = float("0.0527773") - mean = float("-0.0165997") - std = float("0.0458695") - data = None - - -class Program_weight_tensor_parameter_11: - name = "parameter_11" - shape = [192] - dtype = "float32" - min_val = float("-0.446317") - max_val = float("1.5052") - mean = float("0.404532") - std = float("0.335901") - data = None - - -class Program_weight_tensor_parameter_12: - name = "parameter_12" - shape = [192] - dtype = "float32" - min_val = float("0.959521") - max_val = float("2.22919") - mean = float("1.3736") - std = float("0.176912") - data = None - - -class Program_weight_tensor_parameter_13: - name = "parameter_13" - shape = [192] - dtype = "float32" - min_val = float("0.00279508") - max_val = float("23.2612") - mean = float("0.479151") - std = float("2.07157") - data = None - - -class Program_weight_tensor_parameter_14: - name = "parameter_14" - shape = [192] - dtype = "float32" - min_val = float("-0.559818") - max_val = float("2.59203") - mean = float("0.0883731") - std = float("0.322225") - data = None - - -class Program_weight_tensor_parameter_15: - name = "parameter_15" - shape = [192, 192, 1, 1] - dtype = "float32" - min_val = float("-0.70809") - max_val = float("0.443594") - mean = float("0.00156292") - std = float("0.0251271") - data = None - - -class Program_weight_tensor_parameter_16: - name = "parameter_16" - shape = [192] - dtype = "float32" - min_val = float("-0.0114725") - max_val = float("0.0157792") - mean = float("-8.72761e-05") - std = float("0.00276262") - data = None - - -class Program_weight_tensor_parameter_17: - name = "parameter_17" - shape = [192, 192, 1, 1] - dtype = "float32" - min_val = float("-0.0348732") - max_val = float("0.0307751") - mean = float("2.79732e-05") - std = float("0.00181061") - data = None - - -class Program_weight_tensor_parameter_18: - name = "parameter_18" - shape = [68] - dtype = "float32" - min_val = float("-0.00445757") - max_val = float("0.0190964") - mean = float("6.58329e-08") - std = float("0.00445574") - data = None - - -class Program_weight_tensor_parameter_19: - name = "parameter_19" - shape = [68, 384, 3, 3] - dtype = "float32" - min_val = float("-0.0827377") - max_val = float("0.109436") - mean = float("3.68891e-08") - std = float("0.00410645") - data = None - - -class Program_weight_tensor_parameter_20: - name = "parameter_20" - shape = [384] - dtype = "float32" - min_val = float("-0.0151169") - max_val = float("0.107487") - mean = float("0.032917") - std = float("0.0178601") - data = None - - -class Program_weight_tensor_parameter_21: - name = "parameter_21" - shape = [384] - dtype = "float32" - min_val = float("1.00868") - max_val = float("1.29019") - mean = float("1.1456") - std = float("0.0510674") - data = None - - -class Program_weight_tensor_parameter_22: - name = "parameter_22" - shape = [384] - dtype = "float32" - min_val = float("0.000383663") - max_val = float("0.550054") - mean = float("0.0230272") - std = float("0.0486734") - data = None - - -class Program_weight_tensor_parameter_23: - name = "parameter_23" - shape = [384] - dtype = "float32" - min_val = float("-0.106161") - max_val = float("0.123469") - mean = float("-0.010298") - std = float("0.0313141") - data = None - - -class Program_weight_tensor_parameter_24: - name = "parameter_24" - shape = [384, 384, 1, 1] - dtype = "float32" - min_val = float("-0.0462353") - max_val = float("0.0532962") - mean = float("-0.000249771") - std = float("0.00297506") - data = None - - -class Program_weight_tensor_parameter_25: - name = "parameter_25" - shape = [384] - dtype = "float32" - min_val = float("-0.00255366") - max_val = float("0.00300366") - mean = float("9.24283e-05") - std = float("0.00102595") - data = None - - -class Program_weight_tensor_parameter_26: - name = "parameter_26" - shape = [384, 384, 1, 1] - dtype = "float32" - min_val = float("-0.00218811") - max_val = float("0.00368407") - mean = float("2.14446e-05") - std = float("0.000454602") - data = None - - -class Program_weight_tensor_parameter_27: - name = "parameter_27" - shape = [4] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_28: - name = "parameter_28" - shape = [4, 384, 3, 3] - dtype = "float32" - min_val = float("-0.404012") - max_val = float("0.018674") - mean = float("-0.0535936") - std = float("0.0678225") - data = None - - -class Program_weight_tensor_parameter_29: - name = "parameter_29" - shape = [384] - dtype = "float32" - min_val = float("-0.23292") - max_val = float("0.549526") - mean = float("0.280285") - std = float("0.126685") - data = None - - -class Program_weight_tensor_parameter_30: - name = "parameter_30" - shape = [384] - dtype = "float32" - min_val = float("0.992106") - max_val = float("1.5115") - mean = float("1.23501") - std = float("0.0724327") - data = None - - -class Program_weight_tensor_parameter_31: - name = "parameter_31" - shape = [384] - dtype = "float32" - min_val = float("0.00193308") - max_val = float("158.649") - mean = float("2.35102") - std = float("10.1328") - data = None - - -class Program_weight_tensor_parameter_32: - name = "parameter_32" - shape = [384] - dtype = "float32" - min_val = float("-3.49971") - max_val = float("2.32388") - mean = float("0.0203273") - std = float("0.419635") - data = None - - -class Program_weight_tensor_parameter_33: - name = "parameter_33" - shape = [384, 384, 1, 1] - dtype = "float32" - min_val = float("-0.299716") - max_val = float("0.19324") - mean = float("0.000565759") - std = float("0.0141589") - data = None - - -class Program_weight_tensor_parameter_34: - name = "parameter_34" - shape = [384] - dtype = "float32" - min_val = float("-0.00201965") - max_val = float("0.00793739") - mean = float("1.85378e-06") - std = float("0.000924825") - data = None - - -class Program_weight_tensor_parameter_35: - name = "parameter_35" - shape = [384, 384, 1, 1] - dtype = "float32" - min_val = float("-0.0053256") - max_val = float("0.0106358") - mean = float("3.65302e-06") - std = float("0.000513084") - data = None - - -class Program_weight_tensor_parameter_36: - name = "parameter_36" - shape = [1, 17, 1, 1] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_37: - name = "parameter_37" - shape = [68] - dtype = "float32" - min_val = float("-0.00483921") - max_val = float("0.0132204") - mean = float("1.85537e-08") - std = float("0.00434121") - data = None - - -class Program_weight_tensor_parameter_38: - name = "parameter_38" - shape = [68, 768, 3, 3] - dtype = "float32" - min_val = float("-0.033019") - max_val = float("0.0657063") - mean = float("1.06083e-08") - std = float("0.00221797") - data = None - - -class Program_weight_tensor_parameter_39: - name = "parameter_39" - shape = [768] - dtype = "float32" - min_val = float("-0.0161648") - max_val = float("0.0716208") - mean = float("0.0156062") - std = float("0.014137") - data = None - - -class Program_weight_tensor_parameter_40: - name = "parameter_40" - shape = [768] - dtype = "float32" - min_val = float("1.02758") - max_val = float("1.22328") - mean = float("1.09273") - std = float("0.0263012") - data = None - - -class Program_weight_tensor_parameter_41: - name = "parameter_41" - shape = [768] - dtype = "float32" - min_val = float("9.07716e-05") - max_val = float("0.044646") - mean = float("0.00124304") - std = float("0.0020634") - data = None - - -class Program_weight_tensor_parameter_42: - name = "parameter_42" - shape = [768] - dtype = "float32" - min_val = float("-0.110096") - max_val = float("0.0490828") - mean = float("-0.00556926") - std = float("0.0125766") - data = None - - -class Program_weight_tensor_parameter_43: - name = "parameter_43" - shape = [768, 768, 1, 1] - dtype = "float32" - min_val = float("-0.0245815") - max_val = float("0.0259975") - mean = float("-8.56304e-05") - std = float("0.00108959") - data = None - - -class Program_weight_tensor_parameter_44: - name = "parameter_44" - shape = [768] - dtype = "float32" - min_val = float("-0.00228552") - max_val = float("0.00179437") - mean = float("9.01848e-05") - std = float("0.000464165") - data = None - - -class Program_weight_tensor_parameter_45: - name = "parameter_45" - shape = [768, 768, 1, 1] - dtype = "float32" - min_val = float("-0.00190183") - max_val = float("0.00166425") - mean = float("2.80024e-05") - std = float("0.000154694") - data = None - - -class Program_weight_tensor_parameter_46: - name = "parameter_46" - shape = [4] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_47: - name = "parameter_47" - shape = [4, 768, 3, 3] - dtype = "float32" - min_val = float("-0.360602") - max_val = float("0.0321948") - mean = float("-0.017021") - std = float("0.0416566") - data = None - - -class Program_weight_tensor_parameter_48: - name = "parameter_48" - shape = [768] - dtype = "float32" - min_val = float("-0.149733") - max_val = float("0.255597") - mean = float("0.127259") - std = float("0.0547028") - data = None - - -class Program_weight_tensor_parameter_49: - name = "parameter_49" - shape = [768] - dtype = "float32" - min_val = float("1.01591") - max_val = float("1.35046") - mean = float("1.11013") - std = float("0.0353362") - data = None - - -class Program_weight_tensor_parameter_50: - name = "parameter_50" - shape = [768] - dtype = "float32" - min_val = float("0.000190586") - max_val = float("5.78833") - mean = float("0.106439") - std = float("0.43971") - data = None - - -class Program_weight_tensor_parameter_51: - name = "parameter_51" - shape = [768] - dtype = "float32" - min_val = float("-0.732685") - max_val = float("0.378772") - mean = float("-0.0300328") - std = float("0.0991444") - data = None - - -class Program_weight_tensor_parameter_52: - name = "parameter_52" - shape = [768, 768, 1, 1] - dtype = "float32" - min_val = float("-0.0393304") - max_val = float("0.0266957") - mean = float("-0.000475184") - std = float("0.00312029") - data = None - - -class Program_weight_tensor_parameter_53: - name = "parameter_53" - shape = [768] - dtype = "float32" - min_val = float("-0.00365788") - max_val = float("0.00249573") - mean = float("1.83923e-05") - std = float("0.00034543") - data = None - - -class Program_weight_tensor_parameter_54: - name = "parameter_54" - shape = [768, 768, 1, 1] - dtype = "float32" - min_val = float("-0.0129539") - max_val = float("0.0408516") - mean = float("7.14053e-06") - std = float("0.000222292") - data = None - - -class Program_weight_tensor_parameter_55: - name = "parameter_55" - shape = [768] - dtype = "float32" - min_val = float("-0.239693") - max_val = float("0.342023") - mean = float("0.111992") - std = float("0.075942") - data = None - - -class Program_weight_tensor_parameter_56: - name = "parameter_56" - shape = [768] - dtype = "float32" - min_val = float("0.85597") - max_val = float("1.34121") - mean = float("1.0926") - std = float("0.041353") - data = None - - -class Program_weight_tensor_parameter_57: - name = "parameter_57" - shape = [768] - dtype = "float32" - min_val = float("0.00747408") - max_val = float("91.142") - mean = float("1.28785") - std = float("4.31139") - data = None - - -class Program_weight_tensor_parameter_58: - name = "parameter_58" - shape = [768] - dtype = "float32" - min_val = float("-0.940587") - max_val = float("0.533912") - mean = float("-0.0459939") - std = float("0.109863") - data = None - - -class Program_weight_tensor_parameter_59: - name = "parameter_59" - shape = [768, 768, 1, 1] - dtype = "float32" - min_val = float("-0.0494613") - max_val = float("0.0342687") - mean = float("-0.00053839") - std = float("0.00305259") - data = None - - -class Program_weight_tensor_parameter_60: - name = "parameter_60" - shape = [384] - dtype = "float32" - min_val = float("-0.220176") - max_val = float("0.0362122") - mean = float("-0.0290593") - std = float("0.0325532") - data = None - - -class Program_weight_tensor_parameter_61: - name = "parameter_61" - shape = [384] - dtype = "float32" - min_val = float("0.949198") - max_val = float("1.03905") - mean = float("0.985069") - std = float("0.0122127") - data = None - - -class Program_weight_tensor_parameter_62: - name = "parameter_62" - shape = [384] - dtype = "float32" - min_val = float("0.000362336") - max_val = float("5.86749") - mean = float("0.0824461") - std = float("0.456655") - data = None - - -class Program_weight_tensor_parameter_63: - name = "parameter_63" - shape = [384] - dtype = "float32" - min_val = float("-0.0999696") - max_val = float("0.203239") - mean = float("0.00744766") - std = float("0.024564") - data = None - - -class Program_weight_tensor_parameter_64: - name = "parameter_64" - shape = [384, 384, 1, 1] - dtype = "float32" - min_val = float("-0.0202515") - max_val = float("0.0297371") - mean = float("0.000259053") - std = float("0.00196863") - data = None - - -class Program_weight_tensor_parameter_65: - name = "parameter_65" - shape = [384] - dtype = "float32" - min_val = float("-0.220176") - max_val = float("0.0362122") - mean = float("-0.0290593") - std = float("0.0325532") - data = None - - -class Program_weight_tensor_parameter_66: - name = "parameter_66" - shape = [384] - dtype = "float32" - min_val = float("0.853608") - max_val = float("1.12594") - mean = float("1.01957") - std = float("0.0206557") - data = None - - -class Program_weight_tensor_parameter_67: - name = "parameter_67" - shape = [384] - dtype = "float32" - min_val = float("0.00676097") - max_val = float("25.3593") - mean = float("0.532137") - std = float("1.58471") - data = None - - -class Program_weight_tensor_parameter_68: - name = "parameter_68" - shape = [384] - dtype = "float32" - min_val = float("-0.311548") - max_val = float("0.516814") - mean = float("-0.00365134") - std = float("0.0774418") - data = None - - -class Program_weight_tensor_parameter_69: - name = "parameter_69" - shape = [384, 384, 3, 3] - dtype = "float32" - min_val = float("-0.018036") - max_val = float("0.0229965") - mean = float("-1.03648e-05") - std = float("0.00100784") - data = None - - -class Program_weight_tensor_parameter_70: - name = "parameter_70" - shape = [384] - dtype = "float32" - min_val = float("-0.187836") - max_val = float("0.0393232") - mean = float("-0.0497104") - std = float("0.0341181") - data = None - - -class Program_weight_tensor_parameter_71: - name = "parameter_71" - shape = [384] - dtype = "float32" - min_val = float("0.92338") - max_val = float("1.15649") - mean = float("1.01802") - std = float("0.0319672") - data = None - - -class Program_weight_tensor_parameter_72: - name = "parameter_72" - shape = [384] - dtype = "float32" - min_val = float("0.0260268") - max_val = float("9.39199") - mean = float("0.909698") - std = float("1.08737") - data = None - - -class Program_weight_tensor_parameter_73: - name = "parameter_73" - shape = [384] - dtype = "float32" - min_val = float("-0.352188") - max_val = float("0.490548") - mean = float("0.0139095") - std = float("0.119918") - data = None - - -class Program_weight_tensor_parameter_74: - name = "parameter_74" - shape = [384, 384, 3, 3] - dtype = "float32" - min_val = float("-0.0214424") - max_val = float("0.0293422") - mean = float("1.62345e-05") - std = float("0.00113367") - data = None - - -class Program_weight_tensor_parameter_75: - name = "parameter_75" - shape = [384] - dtype = "float32" - min_val = float("-0.136108") - max_val = float("0.0210026") - mean = float("-0.0495464") - std = float("0.0271927") - data = None - - -class Program_weight_tensor_parameter_76: - name = "parameter_76" - shape = [384] - dtype = "float32" - min_val = float("0.94096") - max_val = float("1.03866") - mean = float("0.98639") - std = float("0.0130591") - data = None - - -class Program_weight_tensor_parameter_77: - name = "parameter_77" - shape = [384] - dtype = "float32" - min_val = float("0.00109296") - max_val = float("0.435956") - mean = float("0.0262465") - std = float("0.047939") - data = None - - -class Program_weight_tensor_parameter_78: - name = "parameter_78" - shape = [384] - dtype = "float32" - min_val = float("-0.0984515") - max_val = float("0.0774922") - mean = float("0.00451097") - std = float("0.020161") - data = None - - -class Program_weight_tensor_parameter_79: - name = "parameter_79" - shape = [384, 384, 1, 1] - dtype = "float32" - min_val = float("-0.0187966") - max_val = float("0.015677") - mean = float("0.000104981") - std = float("0.00175477") - data = None - - -class Program_weight_tensor_parameter_80: - name = "parameter_80" - shape = [384] - dtype = "float32" - min_val = float("-0.136108") - max_val = float("0.0210026") - mean = float("-0.0495464") - std = float("0.0271927") - data = None - - -class Program_weight_tensor_parameter_81: - name = "parameter_81" - shape = [384] - dtype = "float32" - min_val = float("0.966771") - max_val = float("1.104") - mean = float("1.01877") - std = float("0.0185739") - data = None - - -class Program_weight_tensor_parameter_82: - name = "parameter_82" - shape = [384] - dtype = "float32" - min_val = float("0.006631") - max_val = float("4.5025") - mean = float("0.219574") - std = float("0.441772") - data = None - - -class Program_weight_tensor_parameter_83: - name = "parameter_83" - shape = [384] - dtype = "float32" - min_val = float("-0.29354") - max_val = float("0.27892") - mean = float("-0.00342736") - std = float("0.0628133") - data = None - - -class Program_weight_tensor_parameter_84: - name = "parameter_84" - shape = [384, 384, 3, 3] - dtype = "float32" - min_val = float("-0.0202651") - max_val = float("0.0277646") - mean = float("-7.89595e-06") - std = float("0.000982729") - data = None - - -class Program_weight_tensor_parameter_85: - name = "parameter_85" - shape = [384] - dtype = "float32" - min_val = float("-0.148346") - max_val = float("0.0258169") - mean = float("-0.0508039") - std = float("0.0264955") - data = None - - -class Program_weight_tensor_parameter_86: - name = "parameter_86" - shape = [384] - dtype = "float32" - min_val = float("0.938732") - max_val = float("1.11539") - mean = float("1.01484") - std = float("0.0355445") - data = None - - -class Program_weight_tensor_parameter_87: - name = "parameter_87" - shape = [384] - dtype = "float32" - min_val = float("0.00583479") - max_val = float("7.27592") - mean = float("0.403401") - std = float("0.610221") - data = None - - -class Program_weight_tensor_parameter_88: - name = "parameter_88" - shape = [384] - dtype = "float32" - min_val = float("-0.416572") - max_val = float("0.519537") - mean = float("0.0156239") - std = float("0.100455") - data = None - - -class Program_weight_tensor_parameter_89: - name = "parameter_89" - shape = [384, 384, 3, 3] - dtype = "float32" - min_val = float("-0.0194111") - max_val = float("0.027243") - mean = float("1.68141e-05") - std = float("0.00118306") - data = None - - -class Program_weight_tensor_parameter_90: - name = "parameter_90" - shape = [384] - dtype = "float32" - min_val = float("-0.153224") - max_val = float("0.0447664") - mean = float("-0.0556235") - std = float("0.0277297") - data = None - - -class Program_weight_tensor_parameter_91: - name = "parameter_91" - shape = [384] - dtype = "float32" - min_val = float("0.932483") - max_val = float("1.05427") - mean = float("0.984506") - std = float("0.0159277") - data = None - - -class Program_weight_tensor_parameter_92: - name = "parameter_92" - shape = [384] - dtype = "float32" - min_val = float("0.000534063") - max_val = float("0.217156") - mean = float("0.0168761") - std = float("0.0268172") - data = None - - -class Program_weight_tensor_parameter_93: - name = "parameter_93" - shape = [384] - dtype = "float32" - min_val = float("-0.0656613") - max_val = float("0.0732472") - mean = float("-0.00097402") - std = float("0.0152174") - data = None - - -class Program_weight_tensor_parameter_94: - name = "parameter_94" - shape = [384, 384, 1, 1] - dtype = "float32" - min_val = float("-0.0201466") - max_val = float("0.0213735") - mean = float("-4.74338e-05") - std = float("0.00168772") - data = None - - -class Program_weight_tensor_parameter_95: - name = "parameter_95" - shape = [384] - dtype = "float32" - min_val = float("-0.153224") - max_val = float("0.0447664") - mean = float("-0.0556235") - std = float("0.0277297") - data = None - - -class Program_weight_tensor_parameter_96: - name = "parameter_96" - shape = [384] - dtype = "float32" - min_val = float("0.963627") - max_val = float("1.13194") - mean = float("1.02226") - std = float("0.0263426") - data = None - - -class Program_weight_tensor_parameter_97: - name = "parameter_97" - shape = [384] - dtype = "float32" - min_val = float("0.0039872") - max_val = float("2.61582") - mean = float("0.128734") - std = float("0.223227") - data = None - - -class Program_weight_tensor_parameter_98: - name = "parameter_98" - shape = [384] - dtype = "float32" - min_val = float("-0.158487") - max_val = float("0.16455") - mean = float("-0.00243845") - std = float("0.0415247") - data = None - - -class Program_weight_tensor_parameter_99: - name = "parameter_99" - shape = [384, 384, 3, 3] - dtype = "float32" - min_val = float("-0.0197083") - max_val = float("0.0251654") - mean = float("-8.82344e-06") - std = float("0.00104335") - data = None - - -class Program_weight_tensor_parameter_100: - name = "parameter_100" - shape = [384] - dtype = "float32" - min_val = float("-0.161053") - max_val = float("0.0517202") - mean = float("-0.0534173") - std = float("0.0280919") - data = None - - -class Program_weight_tensor_parameter_101: - name = "parameter_101" - shape = [384] - dtype = "float32" - min_val = float("0.918049") - max_val = float("1.15232") - mean = float("1.01552") - std = float("0.0359069") - data = None - - -class Program_weight_tensor_parameter_102: - name = "parameter_102" - shape = [384] - dtype = "float32" - min_val = float("0.00751172") - max_val = float("4.27019") - mean = float("0.219163") - std = float("0.336004") - data = None - - -class Program_weight_tensor_parameter_103: - name = "parameter_103" - shape = [384] - dtype = "float32" - min_val = float("-0.172824") - max_val = float("0.238297") - mean = float("0.000740969") - std = float("0.0528198") - data = None - - -class Program_weight_tensor_parameter_104: - name = "parameter_104" - shape = [384, 384, 3, 3] - dtype = "float32" - min_val = float("-0.0223627") - max_val = float("0.0225632") - mean = float("4.76008e-06") - std = float("0.00121998") - data = None - - -class Program_weight_tensor_parameter_105: - name = "parameter_105" - shape = [384] - dtype = "float32" - min_val = float("-0.10183") - max_val = float("0.0571021") - mean = float("-0.0401848") - std = float("0.0228972") - data = None - - -class Program_weight_tensor_parameter_106: - name = "parameter_106" - shape = [384] - dtype = "float32" - min_val = float("0.964009") - max_val = float("1.11744") - mean = float("1.01332") - std = float("0.0243487") - data = None - - -class Program_weight_tensor_parameter_107: - name = "parameter_107" - shape = [384] - dtype = "float32" - min_val = float("0.0068207") - max_val = float("0.57439") - mean = float("0.0795897") - std = float("0.0829282") - data = None - - -class Program_weight_tensor_parameter_108: - name = "parameter_108" - shape = [384] - dtype = "float32" - min_val = float("-0.0720079") - max_val = float("0.133674") - mean = float("0.00403881") - std = float("0.0281469") - data = None - - -class Program_weight_tensor_parameter_109: - name = "parameter_109" - shape = [384, 1152, 1, 1] - dtype = "float32" - min_val = float("-0.040986") - max_val = float("0.0442725") - mean = float("2.35921e-05") - std = float("0.00180591") - data = None - - -class Program_weight_tensor_parameter_110: - name = "parameter_110" - shape = [384] - dtype = "float32" - min_val = float("-0.0710209") - max_val = float("0.0171363") - mean = float("-0.0176361") - std = float("0.0126887") - data = None - - -class Program_weight_tensor_parameter_111: - name = "parameter_111" - shape = [384] - dtype = "float32" - min_val = float("0.915116") - max_val = float("1.10226") - mean = float("1.00939") - std = float("0.0165555") - data = None - - -class Program_weight_tensor_parameter_112: - name = "parameter_112" - shape = [384] - dtype = "float32" - min_val = float("0.00101995") - max_val = float("1.60525") - mean = float("0.0940799") - std = float("0.172003") - data = None - - -class Program_weight_tensor_parameter_113: - name = "parameter_113" - shape = [384] - dtype = "float32" - min_val = float("-0.106481") - max_val = float("0.109302") - mean = float("0.00224785") - std = float("0.0268252") - data = None - - -class Program_weight_tensor_parameter_114: - name = "parameter_114" - shape = [384, 1152, 1, 1] - dtype = "float32" - min_val = float("-0.0370576") - max_val = float("0.0304661") - mean = float("3.8777e-05") - std = float("0.00152873") - data = None - - -class Program_weight_tensor_parameter_115: - name = "parameter_115" - shape = [384] - dtype = "float32" - min_val = float("-0.0787177") - max_val = float("0.00338451") - mean = float("-0.025285") - std = float("0.0143999") - data = None - - -class Program_weight_tensor_parameter_116: - name = "parameter_116" - shape = [384] - dtype = "float32" - min_val = float("0.98141") - max_val = float("1.12417") - mean = float("1.02758") - std = float("0.0212745") - data = None - - -class Program_weight_tensor_parameter_117: - name = "parameter_117" - shape = [384] - dtype = "float32" - min_val = float("0.00212422") - max_val = float("3.61176") - mean = float("0.161326") - std = float("0.310472") - data = None - - -class Program_weight_tensor_parameter_118: - name = "parameter_118" - shape = [384] - dtype = "float32" - min_val = float("-0.339877") - max_val = float("0.322401") - mean = float("-0.00463237") - std = float("0.0920204") - data = None - - -class Program_weight_tensor_parameter_119: - name = "parameter_119" - shape = [384, 384, 3, 3] - dtype = "float32" - min_val = float("-0.0316566") - max_val = float("0.0261297") - mean = float("-3.55618e-06") - std = float("0.000915228") - data = None - - -class Program_weight_tensor_parameter_120: - name = "parameter_120" - shape = [384] - dtype = "float32" - min_val = float("-0.413396") - max_val = float("0.666571") - mean = float("0.255151") - std = float("0.15886") - data = None - - -class Program_weight_tensor_parameter_121: - name = "parameter_121" - shape = [384] - dtype = "float32" - min_val = float("0.924159") - max_val = float("1.66721") - mean = float("1.17398") - std = float("0.0907099") - data = None - - -class Program_weight_tensor_parameter_122: - name = "parameter_122" - shape = [384] - dtype = "float32" - min_val = float("0.0144254") - max_val = float("122.174") - mean = float("3.89996") - std = float("13.8836") - data = None - - -class Program_weight_tensor_parameter_123: - name = "parameter_123" - shape = [384] - dtype = "float32" - min_val = float("-0.852841") - max_val = float("0.566699") - mean = float("-0.0134303") - std = float("0.139115") - data = None - - -class Program_weight_tensor_parameter_124: - name = "parameter_124" - shape = [384, 384, 1, 1] - dtype = "float32" - min_val = float("-0.136465") - max_val = float("0.10202") - mean = float("-0.000553339") - std = float("0.00889327") - data = None - - -class Program_weight_tensor_parameter_125: - name = "parameter_125" - shape = [192] - dtype = "float32" - min_val = float("-0.257665") - max_val = float("0.0752437") - mean = float("-0.0387492") - std = float("0.0605554") - data = None - - -class Program_weight_tensor_parameter_126: - name = "parameter_126" - shape = [192] - dtype = "float32" - min_val = float("0.912244") - max_val = float("1.05536") - mean = float("0.97033") - std = float("0.0254714") - data = None - - -class Program_weight_tensor_parameter_127: - name = "parameter_127" - shape = [192] - dtype = "float32" - min_val = float("0.000393611") - max_val = float("2.87493") - mean = float("0.111455") - std = float("0.279032") - data = None - - -class Program_weight_tensor_parameter_128: - name = "parameter_128" - shape = [192] - dtype = "float32" - min_val = float("-0.0556163") - max_val = float("0.0292508") - mean = float("-0.00194405") - std = float("0.0112362") - data = None - - -class Program_weight_tensor_parameter_129: - name = "parameter_129" - shape = [192, 192, 1, 1] - dtype = "float32" - min_val = float("-0.0439425") - max_val = float("0.0410791") - mean = float("-0.000352772") - std = float("0.00463785") - data = None - - -class Program_weight_tensor_parameter_130: - name = "parameter_130" - shape = [192] - dtype = "float32" - min_val = float("-0.257665") - max_val = float("0.0752437") - mean = float("-0.0387492") - std = float("0.0605554") - data = None - - -class Program_weight_tensor_parameter_131: - name = "parameter_131" - shape = [192] - dtype = "float32" - min_val = float("0.673684") - max_val = float("1.16438") - mean = float("1.02596") - std = float("0.0488779") - data = None - - -class Program_weight_tensor_parameter_132: - name = "parameter_132" - shape = [192] - dtype = "float32" - min_val = float("0.0133915") - max_val = float("25.98") - mean = float("1.07065") - std = float("2.9016") - data = None - - -class Program_weight_tensor_parameter_133: - name = "parameter_133" - shape = [192] - dtype = "float32" - min_val = float("-0.180444") - max_val = float("0.125675") - mean = float("-0.00551328") - std = float("0.0357889") - data = None - - -class Program_weight_tensor_parameter_134: - name = "parameter_134" - shape = [192, 192, 3, 3] - dtype = "float32" - min_val = float("-0.031421") - max_val = float("0.0394767") - mean = float("-0.000121284") - std = float("0.0024869") - data = None - - -class Program_weight_tensor_parameter_135: - name = "parameter_135" - shape = [192] - dtype = "float32" - min_val = float("-0.255521") - max_val = float("0.0954508") - mean = float("-0.0805929") - std = float("0.0593622") - data = None - - -class Program_weight_tensor_parameter_136: - name = "parameter_136" - shape = [192] - dtype = "float32" - min_val = float("0.856802") - max_val = float("1.31575") - mean = float("1.0158") - std = float("0.0628707") - data = None - - -class Program_weight_tensor_parameter_137: - name = "parameter_137" - shape = [192] - dtype = "float32" - min_val = float("0.0279533") - max_val = float("27.6276") - mean = float("1.05945") - std = float("2.31666") - data = None - - -class Program_weight_tensor_parameter_138: - name = "parameter_138" - shape = [192] - dtype = "float32" - min_val = float("-0.134847") - max_val = float("0.218809") - mean = float("0.00283619") - std = float("0.0445478") - data = None - - -class Program_weight_tensor_parameter_139: - name = "parameter_139" - shape = [192, 192, 3, 3] - dtype = "float32" - min_val = float("-0.0375382") - max_val = float("0.0517003") - mean = float("-3.45177e-05") - std = float("0.0026056") - data = None - - -class Program_weight_tensor_parameter_140: - name = "parameter_140" - shape = [192] - dtype = "float32" - min_val = float("-0.21883") - max_val = float("0.0413199") - mean = float("-0.100393") - std = float("0.0464424") - data = None - - -class Program_weight_tensor_parameter_141: - name = "parameter_141" - shape = [192] - dtype = "float32" - min_val = float("0.8901") - max_val = float("1.08415") - mean = float("0.970219") - std = float("0.0268324") - data = None - - -class Program_weight_tensor_parameter_142: - name = "parameter_142" - shape = [192] - dtype = "float32" - min_val = float("0.000612437") - max_val = float("0.233253") - mean = float("0.0124722") - std = float("0.0250989") - data = None - - -class Program_weight_tensor_parameter_143: - name = "parameter_143" - shape = [192] - dtype = "float32" - min_val = float("-0.0253723") - max_val = float("0.00984091") - mean = float("-0.0014133") - std = float("0.00517924") - data = None - - -class Program_weight_tensor_parameter_144: - name = "parameter_144" - shape = [192, 192, 1, 1] - dtype = "float32" - min_val = float("-0.0375517") - max_val = float("0.033268") - mean = float("-0.000569872") - std = float("0.00362479") - data = None - - -class Program_weight_tensor_parameter_145: - name = "parameter_145" - shape = [192] - dtype = "float32" - min_val = float("-0.21883") - max_val = float("0.0413199") - mean = float("-0.100393") - std = float("0.0464424") - data = None - - -class Program_weight_tensor_parameter_146: - name = "parameter_146" - shape = [192] - dtype = "float32" - min_val = float("0.930194") - max_val = float("1.13524") - mean = float("1.02396") - std = float("0.0379187") - data = None - - -class Program_weight_tensor_parameter_147: - name = "parameter_147" - shape = [192] - dtype = "float32" - min_val = float("0.00801575") - max_val = float("1.06604") - mean = float("0.0984606") - std = float("0.143442") - data = None - - -class Program_weight_tensor_parameter_148: - name = "parameter_148" - shape = [192] - dtype = "float32" - min_val = float("-0.0705495") - max_val = float("0.024401") - mean = float("-0.00514406") - std = float("0.0149741") - data = None - - -class Program_weight_tensor_parameter_149: - name = "parameter_149" - shape = [192, 192, 3, 3] - dtype = "float32" - min_val = float("-0.0383657") - max_val = float("0.0467087") - mean = float("-0.000148064") - std = float("0.00219098") - data = None - - -class Program_weight_tensor_parameter_150: - name = "parameter_150" - shape = [192] - dtype = "float32" - min_val = float("-0.230449") - max_val = float("0.0107375") - mean = float("-0.106032") - std = float("0.0515695") - data = None - - -class Program_weight_tensor_parameter_151: - name = "parameter_151" - shape = [192] - dtype = "float32" - min_val = float("0.866584") - max_val = float("1.19769") - mean = float("1.0176") - std = float("0.0616183") - data = None - - -class Program_weight_tensor_parameter_152: - name = "parameter_152" - shape = [192] - dtype = "float32" - min_val = float("0.0189783") - max_val = float("2.75021") - mean = float("0.20213") - std = float("0.25366") - data = None - - -class Program_weight_tensor_parameter_153: - name = "parameter_153" - shape = [192] - dtype = "float32" - min_val = float("-0.0862401") - max_val = float("0.0391613") - mean = float("-0.00692022") - std = float("0.0188185") - data = None - - -class Program_weight_tensor_parameter_154: - name = "parameter_154" - shape = [192, 192, 3, 3] - dtype = "float32" - min_val = float("-0.0440031") - max_val = float("0.0615391") - mean = float("-0.000144339") - std = float("0.00244733") - data = None - - -class Program_weight_tensor_parameter_155: - name = "parameter_155" - shape = [192] - dtype = "float32" - min_val = float("-0.331751") - max_val = float("0.0593176") - mean = float("-0.122839") - std = float("0.0596387") - data = None - - -class Program_weight_tensor_parameter_156: - name = "parameter_156" - shape = [192] - dtype = "float32" - min_val = float("0.864681") - max_val = float("1.08356") - mean = float("0.967237") - std = float("0.0294572") - data = None - - -class Program_weight_tensor_parameter_157: - name = "parameter_157" - shape = [192] - dtype = "float32" - min_val = float("0.000340173") - max_val = float("0.0775148") - mean = float("0.00814505") - std = float("0.0093693") - data = None - - -class Program_weight_tensor_parameter_158: - name = "parameter_158" - shape = [192] - dtype = "float32" - min_val = float("-0.0142046") - max_val = float("0.0108901") - mean = float("-0.000354694") - std = float("0.00406659") - data = None - - -class Program_weight_tensor_parameter_159: - name = "parameter_159" - shape = [192, 192, 1, 1] - dtype = "float32" - min_val = float("-0.0335535") - max_val = float("0.0763086") - mean = float("-0.000457842") - std = float("0.00358968") - data = None - - -class Program_weight_tensor_parameter_160: - name = "parameter_160" - shape = [192] - dtype = "float32" - min_val = float("-0.331751") - max_val = float("0.0593176") - mean = float("-0.122839") - std = float("0.0596387") - data = None - - -class Program_weight_tensor_parameter_161: - name = "parameter_161" - shape = [192] - dtype = "float32" - min_val = float("0.930627") - max_val = float("1.13713") - mean = float("1.02228") - std = float("0.0316434") - data = None - - -class Program_weight_tensor_parameter_162: - name = "parameter_162" - shape = [192] - dtype = "float32" - min_val = float("0.00494727") - max_val = float("1.02902") - mean = float("0.085425") - std = float("0.136729") - data = None - - -class Program_weight_tensor_parameter_163: - name = "parameter_163" - shape = [192] - dtype = "float32" - min_val = float("-0.0599128") - max_val = float("0.0420675") - mean = float("-0.00217438") - std = float("0.0152992") - data = None - - -class Program_weight_tensor_parameter_164: - name = "parameter_164" - shape = [192, 192, 3, 3] - dtype = "float32" - min_val = float("-0.0370746") - max_val = float("0.0560801") - mean = float("-0.000137326") - std = float("0.00230186") - data = None - - -class Program_weight_tensor_parameter_165: - name = "parameter_165" - shape = [192] - dtype = "float32" - min_val = float("-0.348258") - max_val = float("0.134414") - mean = float("-0.132516") - std = float("0.0683607") - data = None - - -class Program_weight_tensor_parameter_166: - name = "parameter_166" - shape = [192] - dtype = "float32" - min_val = float("0.882276") - max_val = float("1.33254") - mean = float("1.01699") - std = float("0.066298") - data = None - - -class Program_weight_tensor_parameter_167: - name = "parameter_167" - shape = [192] - dtype = "float32" - min_val = float("0.00563114") - max_val = float("2.12348") - mean = float("0.105603") - std = float("0.186094") - data = None - - -class Program_weight_tensor_parameter_168: - name = "parameter_168" - shape = [192] - dtype = "float32" - min_val = float("-0.0852457") - max_val = float("0.107334") - mean = float("-0.00720781") - std = float("0.0193172") - data = None - - -class Program_weight_tensor_parameter_169: - name = "parameter_169" - shape = [192, 192, 3, 3] - dtype = "float32" - min_val = float("-0.0441365") - max_val = float("0.0884734") - mean = float("-8.53865e-05") - std = float("0.00277581") - data = None - - -class Program_weight_tensor_parameter_170: - name = "parameter_170" - shape = [192] - dtype = "float32" - min_val = float("-0.248486") - max_val = float("0.0638204") - mean = float("-0.0972066") - std = float("0.0449139") - data = None - - -class Program_weight_tensor_parameter_171: - name = "parameter_171" - shape = [192] - dtype = "float32" - min_val = float("0.916404") - max_val = float("1.2336") - mean = float("1.01803") - std = float("0.0458357") - data = None - - -class Program_weight_tensor_parameter_172: - name = "parameter_172" - shape = [192] - dtype = "float32" - min_val = float("0.00285938") - max_val = float("1.25761") - mean = float("0.0895332") - std = float("0.162617") - data = None - - -class Program_weight_tensor_parameter_173: - name = "parameter_173" - shape = [192] - dtype = "float32" - min_val = float("-0.096256") - max_val = float("0.113259") - mean = float("-0.00855981") - std = float("0.0239588") - data = None - - -class Program_weight_tensor_parameter_174: - name = "parameter_174" - shape = [192, 576, 1, 1] - dtype = "float32" - min_val = float("-0.054491") - max_val = float("0.0593654") - mean = float("-0.000210848") - std = float("0.00429447") - data = None - - -class Program_weight_tensor_parameter_175: - name = "parameter_175" - shape = [192] - dtype = "float32" - min_val = float("-0.165888") - max_val = float("0.0407389") - mean = float("-0.0331057") - std = float("0.0303175") - data = None - - -class Program_weight_tensor_parameter_176: - name = "parameter_176" - shape = [192] - dtype = "float32" - min_val = float("0.914858") - max_val = float("1.29697") - mean = float("1.00082") - std = float("0.0391908") - data = None - - -class Program_weight_tensor_parameter_177: - name = "parameter_177" - shape = [192] - dtype = "float32" - min_val = float("0.00305941") - max_val = float("1.43751") - mean = float("0.124248") - std = float("0.190205") - data = None - - -class Program_weight_tensor_parameter_178: - name = "parameter_178" - shape = [192] - dtype = "float32" - min_val = float("-0.100544") - max_val = float("0.127463") - mean = float("0.00148309") - std = float("0.0308809") - data = None - - -class Program_weight_tensor_parameter_179: - name = "parameter_179" - shape = [192, 576, 1, 1] - dtype = "float32" - min_val = float("-0.0567269") - max_val = float("0.0677656") - mean = float("6.1846e-05") - std = float("0.00367772") - data = None - - -class Program_weight_tensor_parameter_180: - name = "parameter_180" - shape = [192] - dtype = "float32" - min_val = float("-0.15598") - max_val = float("0.0109365") - mean = float("-0.0537461") - std = float("0.0309099") - data = None - - -class Program_weight_tensor_parameter_181: - name = "parameter_181" - shape = [192] - dtype = "float32" - min_val = float("0.854102") - max_val = float("1.17499") - mean = float("1.00855") - std = float("0.0375922") - data = None - - -class Program_weight_tensor_parameter_182: - name = "parameter_182" - shape = [192] - dtype = "float32" - min_val = float("0.0177937") - max_val = float("2.32847") - mean = float("0.20173") - std = float("0.23775") - data = None - - -class Program_weight_tensor_parameter_183: - name = "parameter_183" - shape = [192] - dtype = "float32" - min_val = float("-0.582113") - max_val = float("0.599466") - mean = float("-0.0228734") - std = float("0.194881") - data = None - - -class Program_weight_tensor_parameter_184: - name = "parameter_184" - shape = [192, 192, 3, 3] - dtype = "float32" - min_val = float("-0.0460401") - max_val = float("0.0411278") - mean = float("-2.83696e-05") - std = float("0.00233677") - data = None - - -class Program_weight_tensor_parameter_185: - name = "parameter_185" - shape = [192] - dtype = "float32" - min_val = float("-0.727151") - max_val = float("1.77126") - mean = float("0.372279") - std = float("0.431884") - data = None - - -class Program_weight_tensor_parameter_186: - name = "parameter_186" - shape = [192] - dtype = "float32" - min_val = float("0.643738") - max_val = float("1.70973") - mean = float("1.1633") - std = float("0.19941") - data = None - - -class Program_weight_tensor_parameter_187: - name = "parameter_187" - shape = [192] - dtype = "float32" - min_val = float("0.0175227") - max_val = float("24.367") - mean = float("0.996291") - std = float("2.34855") - data = None - - -class Program_weight_tensor_parameter_188: - name = "parameter_188" - shape = [192] - dtype = "float32" - min_val = float("-0.649941") - max_val = float("0.526551") - mean = float("0.0190609") - std = float("0.138321") - data = None - - -class Program_weight_tensor_parameter_189: - name = "parameter_189" - shape = [192, 192, 1, 1] - dtype = "float32" - min_val = float("-0.222576") - max_val = float("0.181966") - mean = float("0.000145706") - std = float("0.0205848") - data = None - - -class Program_weight_tensor_parameter_190: - name = "parameter_190" - shape = [96] - dtype = "float32" - min_val = float("-0.646041") - max_val = float("0.287621") - mean = float("-0.0371052") - std = float("0.203701") - data = None - - -class Program_weight_tensor_parameter_191: - name = "parameter_191" - shape = [96] - dtype = "float32" - min_val = float("0.729587") - max_val = float("1.32793") - mean = float("0.928081") - std = float("0.0901068") - data = None - - -class Program_weight_tensor_parameter_192: - name = "parameter_192" - shape = [96] - dtype = "float32" - min_val = float("0.00961121") - max_val = float("5.95736") - mean = float("0.233741") - std = float("0.717086") - data = None - - -class Program_weight_tensor_parameter_193: - name = "parameter_193" - shape = [96] - dtype = "float32" - min_val = float("-0.0755287") - max_val = float("0.0554683") - mean = float("0.00113738") - std = float("0.0240865") - data = None - - -class Program_weight_tensor_parameter_194: - name = "parameter_194" - shape = [96, 96, 1, 1] - dtype = "float32" - min_val = float("-0.538395") - max_val = float("0.204203") - mean = float("-0.00127837") - std = float("0.021114") - data = None - - -class Program_weight_tensor_parameter_195: - name = "parameter_195" - shape = [96] - dtype = "float32" - min_val = float("-0.646041") - max_val = float("0.287621") - mean = float("-0.0371052") - std = float("0.203701") - data = None - - -class Program_weight_tensor_parameter_196: - name = "parameter_196" - shape = [96] - dtype = "float32" - min_val = float("0.482104") - max_val = float("1.39863") - mean = float("1.04313") - std = float("0.135699") - data = None - - -class Program_weight_tensor_parameter_197: - name = "parameter_197" - shape = [96] - dtype = "float32" - min_val = float("0.0457488") - max_val = float("30.6283") - mean = float("1.69124") - std = float("4.42624") - data = None - - -class Program_weight_tensor_parameter_198: - name = "parameter_198" - shape = [96] - dtype = "float32" - min_val = float("-0.384559") - max_val = float("0.181016") - mean = float("-0.0210207") - std = float("0.0873941") - data = None - - -class Program_weight_tensor_parameter_199: - name = "parameter_199" - shape = [96, 96, 3, 3] - dtype = "float32" - min_val = float("-0.247075") - max_val = float("0.10887") - mean = float("-0.000316918") - std = float("0.00915013") - data = None - - -class Program_weight_tensor_parameter_200: - name = "parameter_200" - shape = [96] - dtype = "float32" - min_val = float("-0.806157") - max_val = float("0.642647") - mean = float("-0.135704") - std = float("0.223383") - data = None - - -class Program_weight_tensor_parameter_201: - name = "parameter_201" - shape = [96] - dtype = "float32" - min_val = float("0.498225") - max_val = float("1.52038") - mean = float("0.986561") - std = float("0.14034") - data = None - - -class Program_weight_tensor_parameter_202: - name = "parameter_202" - shape = [96] - dtype = "float32" - min_val = float("0.133909") - max_val = float("27.5573") - mean = float("1.63725") - std = float("2.96894") - data = None - - -class Program_weight_tensor_parameter_203: - name = "parameter_203" - shape = [96] - dtype = "float32" - min_val = float("-0.14425") - max_val = float("0.110106") - mean = float("0.00161153") - std = float("0.0553599") - data = None - - -class Program_weight_tensor_parameter_204: - name = "parameter_204" - shape = [96, 96, 3, 3] - dtype = "float32" - min_val = float("-0.217565") - max_val = float("0.114085") - mean = float("-0.000489408") - std = float("0.00910554") - data = None - - -class Program_weight_tensor_parameter_205: - name = "parameter_205" - shape = [96] - dtype = "float32" - min_val = float("-0.376161") - max_val = float("0.21441") - mean = float("-0.182742") - std = float("0.131052") - data = None - - -class Program_weight_tensor_parameter_206: - name = "parameter_206" - shape = [96] - dtype = "float32" - min_val = float("0.657588") - max_val = float("1.16753") - mean = float("0.867648") - std = float("0.0697505") - data = None - - -class Program_weight_tensor_parameter_207: - name = "parameter_207" - shape = [96] - dtype = "float32" - min_val = float("0.00284137") - max_val = float("1.11982") - mean = float("0.0593209") - std = float("0.123554") - data = None - - -class Program_weight_tensor_parameter_208: - name = "parameter_208" - shape = [96] - dtype = "float32" - min_val = float("-0.0657001") - max_val = float("0.0660294") - mean = float("-0.0085817") - std = float("0.0166625") - data = None - - -class Program_weight_tensor_parameter_209: - name = "parameter_209" - shape = [96, 96, 1, 1] - dtype = "float32" - min_val = float("-0.112441") - max_val = float("0.145551") - mean = float("-0.00185134") - std = float("0.0132855") - data = None - - -class Program_weight_tensor_parameter_210: - name = "parameter_210" - shape = [96] - dtype = "float32" - min_val = float("-0.376161") - max_val = float("0.21441") - mean = float("-0.182742") - std = float("0.131052") - data = None - - -class Program_weight_tensor_parameter_211: - name = "parameter_211" - shape = [96] - dtype = "float32" - min_val = float("0.794636") - max_val = float("1.29933") - mean = float("1.01044") - std = float("0.0818406") - data = None - - -class Program_weight_tensor_parameter_212: - name = "parameter_212" - shape = [96] - dtype = "float32" - min_val = float("0.0372437") - max_val = float("9.07593") - mean = float("0.686368") - std = float("1.14539") - data = None - - -class Program_weight_tensor_parameter_213: - name = "parameter_213" - shape = [96] - dtype = "float32" - min_val = float("-0.192429") - max_val = float("0.242172") - mean = float("-0.0113233") - std = float("0.0649532") - data = None - - -class Program_weight_tensor_parameter_214: - name = "parameter_214" - shape = [96, 96, 3, 3] - dtype = "float32" - min_val = float("-0.0724886") - max_val = float("0.0676065") - mean = float("-0.000591651") - std = float("0.00719272") - data = None - - -class Program_weight_tensor_parameter_215: - name = "parameter_215" - shape = [96] - dtype = "float32" - min_val = float("-0.504724") - max_val = float("0.310511") - mean = float("-0.192469") - std = float("0.162887") - data = None - - -class Program_weight_tensor_parameter_216: - name = "parameter_216" - shape = [96] - dtype = "float32" - min_val = float("0.729078") - max_val = float("1.32546") - mean = float("0.944974") - std = float("0.106132") - data = None - - -class Program_weight_tensor_parameter_217: - name = "parameter_217" - shape = [96] - dtype = "float32" - min_val = float("0.0420698") - max_val = float("6.71555") - mean = float("1.0677") - std = float("1.20063") - data = None - - -class Program_weight_tensor_parameter_218: - name = "parameter_218" - shape = [96] - dtype = "float32" - min_val = float("-0.126845") - max_val = float("0.134215") - mean = float("0.0195531") - std = float("0.0565244") - data = None - - -class Program_weight_tensor_parameter_219: - name = "parameter_219" - shape = [96, 96, 3, 3] - dtype = "float32" - min_val = float("-0.086667") - max_val = float("0.0891793") - mean = float("-0.000463108") - std = float("0.00828012") - data = None - - -class Program_weight_tensor_parameter_220: - name = "parameter_220" - shape = [96] - dtype = "float32" - min_val = float("-0.603289") - max_val = float("0.0867803") - mean = float("-0.234451") - std = float("0.142047") - data = None - - -class Program_weight_tensor_parameter_221: - name = "parameter_221" - shape = [96] - dtype = "float32" - min_val = float("0.704426") - max_val = float("1.02604") - mean = float("0.909068") - std = float("0.0649952") - data = None - - -class Program_weight_tensor_parameter_222: - name = "parameter_222" - shape = [96] - dtype = "float32" - min_val = float("0.001266") - max_val = float("1.28419") - mean = float("0.0888219") - std = float("0.188276") - data = None - - -class Program_weight_tensor_parameter_223: - name = "parameter_223" - shape = [96] - dtype = "float32" - min_val = float("-0.0485395") - max_val = float("0.0643204") - mean = float("-0.00403548") - std = float("0.0232515") - data = None - - -class Program_weight_tensor_parameter_224: - name = "parameter_224" - shape = [96, 96, 1, 1] - dtype = "float32" - min_val = float("-0.0943453") - max_val = float("0.128335") - mean = float("-0.00194288") - std = float("0.0140302") - data = None - - -class Program_weight_tensor_parameter_225: - name = "parameter_225" - shape = [96] - dtype = "float32" - min_val = float("-0.603289") - max_val = float("0.0867803") - mean = float("-0.234451") - std = float("0.142047") - data = None - - -class Program_weight_tensor_parameter_226: - name = "parameter_226" - shape = [96] - dtype = "float32" - min_val = float("0.622021") - max_val = float("1.21586") - mean = float("0.958698") - std = float("0.106219") - data = None - - -class Program_weight_tensor_parameter_227: - name = "parameter_227" - shape = [96] - dtype = "float32" - min_val = float("0.00636808") - max_val = float("6.96019") - mean = float("0.706011") - std = float("1.27379") - data = None - - -class Program_weight_tensor_parameter_228: - name = "parameter_228" - shape = [96] - dtype = "float32" - min_val = float("-0.265289") - max_val = float("0.2344") - mean = float("6.01998e-05") - std = float("0.078052") - data = None - - -class Program_weight_tensor_parameter_229: - name = "parameter_229" - shape = [96, 96, 3, 3] - dtype = "float32" - min_val = float("-0.0724794") - max_val = float("0.0855183") - mean = float("-0.000671817") - std = float("0.00725462") - data = None - - -class Program_weight_tensor_parameter_230: - name = "parameter_230" - shape = [96] - dtype = "float32" - min_val = float("-0.81596") - max_val = float("0.694539") - mean = float("-0.220603") - std = float("0.24757") - data = None - - -class Program_weight_tensor_parameter_231: - name = "parameter_231" - shape = [96] - dtype = "float32" - min_val = float("0.653873") - max_val = float("1.50477") - mean = float("0.905064") - std = float("0.115987") - data = None - - -class Program_weight_tensor_parameter_232: - name = "parameter_232" - shape = [96] - dtype = "float32" - min_val = float("0.0138713") - max_val = float("5.6047") - mean = float("0.607674") - std = float("0.872345") - data = None - - -class Program_weight_tensor_parameter_233: - name = "parameter_233" - shape = [96] - dtype = "float32" - min_val = float("-0.18801") - max_val = float("0.361114") - mean = float("0.00822761") - std = float("0.104183") - data = None - - -class Program_weight_tensor_parameter_234: - name = "parameter_234" - shape = [96, 96, 3, 3] - dtype = "float32" - min_val = float("-0.112173") - max_val = float("0.152093") - mean = float("-0.000219951") - std = float("0.00849275") - data = None - - -class Program_weight_tensor_parameter_235: - name = "parameter_235" - shape = [96] - dtype = "float32" - min_val = float("-0.737721") - max_val = float("1.03803") - mean = float("-0.0965003") - std = float("0.357534") - data = None - - -class Program_weight_tensor_parameter_236: - name = "parameter_236" - shape = [96] - dtype = "float32" - min_val = float("0.486178") - max_val = float("1.16474") - mean = float("0.785591") - std = float("0.140337") - data = None - - -class Program_weight_tensor_parameter_237: - name = "parameter_237" - shape = [96] - dtype = "float32" - min_val = float("0.0220315") - max_val = float("1.90785") - mean = float("0.302731") - std = float("0.386768") - data = None - - -class Program_weight_tensor_parameter_238: - name = "parameter_238" - shape = [96] - dtype = "float32" - min_val = float("-0.265742") - max_val = float("0.176522") - mean = float("0.0019327") - std = float("0.0895195") - data = None - - -class Program_weight_tensor_parameter_239: - name = "parameter_239" - shape = [96, 448, 1, 1] - dtype = "float32" - min_val = float("-0.141003") - max_val = float("0.12526") - mean = float("-0.000255411") - std = float("0.0113464") - data = None - - -class Program_weight_tensor_parameter_240: - name = "parameter_240" - shape = [96] - dtype = "float32" - min_val = float("-0.101234") - max_val = float("0.280761") - mean = float("0.0626191") - std = float("0.0701217") - data = None - - -class Program_weight_tensor_parameter_241: - name = "parameter_241" - shape = [96] - dtype = "float32" - min_val = float("0.728134") - max_val = float("1.1619") - mean = float("0.897257") - std = float("0.075542") - data = None - - -class Program_weight_tensor_parameter_242: - name = "parameter_242" - shape = [96] - dtype = "float32" - min_val = float("0.00554226") - max_val = float("3.43284") - mean = float("0.265583") - std = float("0.559653") - data = None - - -class Program_weight_tensor_parameter_243: - name = "parameter_243" - shape = [96] - dtype = "float32" - min_val = float("-0.350261") - max_val = float("0.171463") - mean = float("-0.00933972") - std = float("0.0751828") - data = None - - -class Program_weight_tensor_parameter_244: - name = "parameter_244" - shape = [96, 448, 1, 1] - dtype = "float32" - min_val = float("-0.0930981") - max_val = float("0.109019") - mean = float("-1.41361e-05") - std = float("0.00738557") - data = None - - -class Program_weight_tensor_parameter_245: - name = "parameter_245" - shape = [192] - dtype = "float32" - min_val = float("-0.420169") - max_val = float("0.304077") - mean = float("-0.0846625") - std = float("0.0933085") - data = None - - -class Program_weight_tensor_parameter_246: - name = "parameter_246" - shape = [192] - dtype = "float32" - min_val = float("0.660034") - max_val = float("1.60045") - mean = float("0.830394") - std = float("0.0946997") - data = None - - -class Program_weight_tensor_parameter_247: - name = "parameter_247" - shape = [192] - dtype = "float32" - min_val = float("0.00802662") - max_val = float("1.1227") - mean = float("0.141375") - std = float("0.138512") - data = None - - -class Program_weight_tensor_parameter_248: - name = "parameter_248" - shape = [192] - dtype = "float32" - min_val = float("-0.0902271") - max_val = float("0.0555908") - mean = float("-0.00784275") - std = float("0.0256276") - data = None - - -class Program_weight_tensor_parameter_249: - name = "parameter_249" - shape = [192, 384, 1, 1] - dtype = "float32" - min_val = float("-0.0796293") - max_val = float("0.0883018") - mean = float("-0.000481953") - std = float("0.00665654") - data = None - - -class Program_weight_tensor_parameter_250: - name = "parameter_250" - shape = [384] - dtype = "float32" - min_val = float("-0.373051") - max_val = float("0.163758") - mean = float("-0.0929626") - std = float("0.0592494") - data = None - - -class Program_weight_tensor_parameter_251: - name = "parameter_251" - shape = [384] - dtype = "float32" - min_val = float("0.87726") - max_val = float("1.57409") - mean = float("1.01609") - std = float("0.085249") - data = None - - -class Program_weight_tensor_parameter_252: - name = "parameter_252" - shape = [384] - dtype = "float32" - min_val = float("0.00707231") - max_val = float("0.602354") - mean = float("0.080235") - std = float("0.0743166") - data = None - - -class Program_weight_tensor_parameter_253: - name = "parameter_253" - shape = [384] - dtype = "float32" - min_val = float("-0.136249") - max_val = float("0.144427") - mean = float("-0.0149049") - std = float("0.0284274") - data = None - - -class Program_weight_tensor_parameter_254: - name = "parameter_254" - shape = [384, 384, 1, 1] - dtype = "float32" - min_val = float("-0.129077") - max_val = float("0.0807547") - mean = float("-0.000487049") - std = float("0.00603461") - data = None - - -class Program_weight_tensor_parameter_255: - name = "parameter_255" - shape = [192] - dtype = "float32" - min_val = float("-0.256959") - max_val = float("0.0668543") - mean = float("-0.0840696") - std = float("0.0440415") - data = None - - -class Program_weight_tensor_parameter_256: - name = "parameter_256" - shape = [192] - dtype = "float32" - min_val = float("0.819568") - max_val = float("0.986962") - mean = float("0.929009") - std = float("0.0272595") - data = None - - -class Program_weight_tensor_parameter_257: - name = "parameter_257" - shape = [192] - dtype = "float32" - min_val = float("0.000592244") - max_val = float("0.300787") - mean = float("0.0342886") - std = float("0.0384523") - data = None - - -class Program_weight_tensor_parameter_258: - name = "parameter_258" - shape = [192] - dtype = "float32" - min_val = float("-0.0318971") - max_val = float("0.0175305") - mean = float("-0.00483448") - std = float("0.00913169") - data = None - - -class Program_weight_tensor_parameter_259: - name = "parameter_259" - shape = [192, 192, 1, 1] - dtype = "float32" - min_val = float("-0.035315") - max_val = float("0.0383402") - mean = float("-0.000675809") - std = float("0.0047554") - data = None - - -class Program_weight_tensor_parameter_260: - name = "parameter_260" - shape = [192] - dtype = "float32" - min_val = float("-0.256959") - max_val = float("0.0668543") - mean = float("-0.0840696") - std = float("0.0440415") - data = None - - -class Program_weight_tensor_parameter_261: - name = "parameter_261" - shape = [192] - dtype = "float32" - min_val = float("0.900377") - max_val = float("1.08413") - mean = float("0.991669") - std = float("0.0256929") - data = None - - -class Program_weight_tensor_parameter_262: - name = "parameter_262" - shape = [192] - dtype = "float32" - min_val = float("0.0159625") - max_val = float("1.75258") - mean = float("0.264178") - std = float("0.327029") - data = None - - -class Program_weight_tensor_parameter_263: - name = "parameter_263" - shape = [192] - dtype = "float32" - min_val = float("-0.0846459") - max_val = float("0.0435916") - mean = float("-0.01252") - std = float("0.0267311") - data = None - - -class Program_weight_tensor_parameter_264: - name = "parameter_264" - shape = [192, 192, 3, 3] - dtype = "float32" - min_val = float("-0.0581955") - max_val = float("0.0815388") - mean = float("-0.000159628") - std = float("0.00251421") - data = None - - -class Program_weight_tensor_parameter_265: - name = "parameter_265" - shape = [192] - dtype = "float32" - min_val = float("-0.283023") - max_val = float("0.00609804") - mean = float("-0.108732") - std = float("0.0543265") - data = None - - -class Program_weight_tensor_parameter_266: - name = "parameter_266" - shape = [192] - dtype = "float32" - min_val = float("0.93521") - max_val = float("1.19749") - mean = float("1.03676") - std = float("0.0449099") - data = None - - -class Program_weight_tensor_parameter_267: - name = "parameter_267" - shape = [192] - dtype = "float32" - min_val = float("0.0382762") - max_val = float("5.57442") - mean = float("0.725692") - std = float("0.823782") - data = None - - -class Program_weight_tensor_parameter_268: - name = "parameter_268" - shape = [192] - dtype = "float32" - min_val = float("-0.237511") - max_val = float("0.165957") - mean = float("-0.0152484") - std = float("0.0789669") - data = None - - -class Program_weight_tensor_parameter_269: - name = "parameter_269" - shape = [192, 192, 3, 3] - dtype = "float32" - min_val = float("-0.0710582") - max_val = float("0.0757265") - mean = float("-0.000116839") - std = float("0.00304633") - data = None - - -class Program_weight_tensor_parameter_270: - name = "parameter_270" - shape = [192] - dtype = "float32" - min_val = float("-0.253576") - max_val = float("-0.0258551") - mean = float("-0.111423") - std = float("0.0514888") - data = None - - -class Program_weight_tensor_parameter_271: - name = "parameter_271" - shape = [192] - dtype = "float32" - min_val = float("0.915196") - max_val = float("1.08387") - mean = float("0.975653") - std = float("0.0197955") - data = None - - -class Program_weight_tensor_parameter_272: - name = "parameter_272" - shape = [192] - dtype = "float32" - min_val = float("0.000784494") - max_val = float("0.170735") - mean = float("0.0158213") - std = float("0.0237635") - data = None - - -class Program_weight_tensor_parameter_273: - name = "parameter_273" - shape = [192] - dtype = "float32" - min_val = float("-0.0457468") - max_val = float("0.0201233") - mean = float("-0.00730915") - std = float("0.0101963") - data = None - - -class Program_weight_tensor_parameter_274: - name = "parameter_274" - shape = [192, 192, 1, 1] - dtype = "float32" - min_val = float("-0.0337886") - max_val = float("0.0331928") - mean = float("-0.000817084") - std = float("0.00496809") - data = None - - -class Program_weight_tensor_parameter_275: - name = "parameter_275" - shape = [192] - dtype = "float32" - min_val = float("-0.253576") - max_val = float("-0.0258551") - mean = float("-0.111423") - std = float("0.0514888") - data = None - - -class Program_weight_tensor_parameter_276: - name = "parameter_276" - shape = [192] - dtype = "float32" - min_val = float("0.940652") - max_val = float("1.13061") - mean = float("1.00459") - std = float("0.0346428") - data = None - - -class Program_weight_tensor_parameter_277: - name = "parameter_277" - shape = [192] - dtype = "float32" - min_val = float("0.00912887") - max_val = float("1.74385") - mean = float("0.136466") - std = float("0.22376") - data = None - - -class Program_weight_tensor_parameter_278: - name = "parameter_278" - shape = [192] - dtype = "float32" - min_val = float("-0.104047") - max_val = float("0.047509") - mean = float("-0.0196895") - std = float("0.0277402") - data = None - - -class Program_weight_tensor_parameter_279: - name = "parameter_279" - shape = [192, 192, 3, 3] - dtype = "float32" - min_val = float("-0.0458249") - max_val = float("0.0663524") - mean = float("-0.000235445") - std = float("0.00258708") - data = None - - -class Program_weight_tensor_parameter_280: - name = "parameter_280" - shape = [192] - dtype = "float32" - min_val = float("-0.397813") - max_val = float("-0.0235897") - mean = float("-0.135031") - std = float("0.0579049") - data = None - - -class Program_weight_tensor_parameter_281: - name = "parameter_281" - shape = [192] - dtype = "float32" - min_val = float("0.935628") - max_val = float("1.28762") - mean = float("1.02726") - std = float("0.0582587") - data = None - - -class Program_weight_tensor_parameter_282: - name = "parameter_282" - shape = [192] - dtype = "float32" - min_val = float("0.0350071") - max_val = float("3.67144") - mean = float("0.444898") - std = float("0.548341") - data = None - - -class Program_weight_tensor_parameter_283: - name = "parameter_283" - shape = [192] - dtype = "float32" - min_val = float("-0.456827") - max_val = float("0.250861") - mean = float("-0.0445605") - std = float("0.110845") - data = None - - -class Program_weight_tensor_parameter_284: - name = "parameter_284" - shape = [192, 192, 3, 3] - dtype = "float32" - min_val = float("-0.032414") - max_val = float("0.0474969") - mean = float("-0.000226297") - std = float("0.00328822") - data = None - - -class Program_weight_tensor_parameter_285: - name = "parameter_285" - shape = [192] - dtype = "float32" - min_val = float("-0.290864") - max_val = float("-0.0230562") - mean = float("-0.11391") - std = float("0.04676") - data = None - - -class Program_weight_tensor_parameter_286: - name = "parameter_286" - shape = [192] - dtype = "float32" - min_val = float("0.906862") - max_val = float("1.13958") - mean = float("0.997132") - std = float("0.0365506") - data = None - - -class Program_weight_tensor_parameter_287: - name = "parameter_287" - shape = [192] - dtype = "float32" - min_val = float("0.000630271") - max_val = float("0.126275") - mean = float("0.0156972") - std = float("0.0212032") - data = None - - -class Program_weight_tensor_parameter_288: - name = "parameter_288" - shape = [192] - dtype = "float32" - min_val = float("-0.032696") - max_val = float("0.0205933") - mean = float("-0.00387507") - std = float("0.00785712") - data = None - - -class Program_weight_tensor_parameter_289: - name = "parameter_289" - shape = [192, 192, 1, 1] - dtype = "float32" - min_val = float("-0.0348327") - max_val = float("0.0582176") - mean = float("-0.000388586") - std = float("0.0055446") - data = None - - -class Program_weight_tensor_parameter_290: - name = "parameter_290" - shape = [192] - dtype = "float32" - min_val = float("-0.290864") - max_val = float("-0.0230562") - mean = float("-0.11391") - std = float("0.04676") - data = None - - -class Program_weight_tensor_parameter_291: - name = "parameter_291" - shape = [192] - dtype = "float32" - min_val = float("0.90984") - max_val = float("1.14776") - mean = float("0.987138") - std = float("0.0373269") - data = None - - -class Program_weight_tensor_parameter_292: - name = "parameter_292" - shape = [192] - dtype = "float32" - min_val = float("0.0068577") - max_val = float("3.15529") - mean = float("0.115454") - std = float("0.251449") - data = None - - -class Program_weight_tensor_parameter_293: - name = "parameter_293" - shape = [192] - dtype = "float32" - min_val = float("-0.157661") - max_val = float("0.0523813") - mean = float("-0.0154723") - std = float("0.0237568") - data = None - - -class Program_weight_tensor_parameter_294: - name = "parameter_294" - shape = [192, 192, 3, 3] - dtype = "float32" - min_val = float("-0.0229903") - max_val = float("0.0319766") - mean = float("-0.000196356") - std = float("0.00258864") - data = None - - -class Program_weight_tensor_parameter_295: - name = "parameter_295" - shape = [192] - dtype = "float32" - min_val = float("-0.370486") - max_val = float("-0.0106218") - mean = float("-0.161455") - std = float("0.0602386") - data = None - - -class Program_weight_tensor_parameter_296: - name = "parameter_296" - shape = [192] - dtype = "float32" - min_val = float("0.9051") - max_val = float("1.22165") - mean = float("1.03058") - std = float("0.049462") - data = None - - -class Program_weight_tensor_parameter_297: - name = "parameter_297" - shape = [192] - dtype = "float32" - min_val = float("0.0103088") - max_val = float("3.73392") - mean = float("0.208286") - std = float("0.402475") - data = None - - -class Program_weight_tensor_parameter_298: - name = "parameter_298" - shape = [192] - dtype = "float32" - min_val = float("-0.0463211") - max_val = float("0.0315376") - mean = float("-0.00913568") - std = float("0.0128318") - data = None - - -class Program_weight_tensor_parameter_299: - name = "parameter_299" - shape = [192, 192, 3, 3] - dtype = "float32" - min_val = float("-0.0895417") - max_val = float("0.0442348") - mean = float("-0.000211153") - std = float("0.0035822") - data = None - - -class Program_weight_tensor_parameter_300: - name = "parameter_300" - shape = [192] - dtype = "float32" - min_val = float("-0.397496") - max_val = float("0.0864231") - mean = float("-0.165482") - std = float("0.0737238") - data = None - - -class Program_weight_tensor_parameter_301: - name = "parameter_301" - shape = [192] - dtype = "float32" - min_val = float("0.879897") - max_val = float("1.17541") - mean = float("1.01759") - std = float("0.0566345") - data = None - - -class Program_weight_tensor_parameter_302: - name = "parameter_302" - shape = [192] - dtype = "float32" - min_val = float("0.00621631") - max_val = float("1.89483") - mean = float("0.0866904") - std = float("0.204768") - data = None - - -class Program_weight_tensor_parameter_303: - name = "parameter_303" - shape = [192] - dtype = "float32" - min_val = float("-0.10704") - max_val = float("0.250321") - mean = float("0.0243905") - std = float("0.0576948") - data = None - - -class Program_weight_tensor_parameter_304: - name = "parameter_304" - shape = [192, 896, 1, 1] - dtype = "float32" - min_val = float("-0.102693") - max_val = float("0.140514") - mean = float("-0.000271279") - std = float("0.00498004") - data = None - - -class Program_weight_tensor_parameter_305: - name = "parameter_305" - shape = [192] - dtype = "float32" - min_val = float("-0.150926") - max_val = float("0.502386") - mean = float("-0.00458928") - std = float("0.0740063") - data = None - - -class Program_weight_tensor_parameter_306: - name = "parameter_306" - shape = [192] - dtype = "float32" - min_val = float("0.932811") - max_val = float("1.23138") - mean = float("1.04668") - std = float("0.0628145") - data = None - - -class Program_weight_tensor_parameter_307: - name = "parameter_307" - shape = [192] - dtype = "float32" - min_val = float("0.00500346") - max_val = float("0.658867") - mean = float("0.071691") - std = float("0.0871382") - data = None - - -class Program_weight_tensor_parameter_308: - name = "parameter_308" - shape = [192] - dtype = "float32" - min_val = float("-0.179438") - max_val = float("0.127582") - mean = float("0.00911666") - std = float("0.0540771") - data = None - - -class Program_weight_tensor_parameter_309: - name = "parameter_309" - shape = [192, 896, 1, 1] - dtype = "float32" - min_val = float("-0.135587") - max_val = float("0.0877653") - mean = float("-0.000157689") - std = float("0.00488948") - data = None - - -class Program_weight_tensor_parameter_310: - name = "parameter_310" - shape = [384] - dtype = "float32" - min_val = float("-0.312093") - max_val = float("-0.0453597") - mean = float("-0.171326") - std = float("0.0441224") - data = None - - -class Program_weight_tensor_parameter_311: - name = "parameter_311" - shape = [384] - dtype = "float32" - min_val = float("0.787891") - max_val = float("1.17398") - mean = float("0.88519") - std = float("0.0343002") - data = None - - -class Program_weight_tensor_parameter_312: - name = "parameter_312" - shape = [384] - dtype = "float32" - min_val = float("0.00518361") - max_val = float("0.717927") - mean = float("0.0818779") - std = float("0.074952") - data = None - - -class Program_weight_tensor_parameter_313: - name = "parameter_313" - shape = [384] - dtype = "float32" - min_val = float("-0.112216") - max_val = float("0.0645384") - mean = float("-0.00977235") - std = float("0.0242007") - data = None - - -class Program_weight_tensor_parameter_314: - name = "parameter_314" - shape = [384, 768, 1, 1] - dtype = "float32" - min_val = float("-0.0335371") - max_val = float("0.0385938") - mean = float("-0.000215569") - std = float("0.00376159") - data = None - - -class Program_weight_tensor_parameter_315: - name = "parameter_315" - shape = [768] - dtype = "float32" - min_val = float("-0.16492") - max_val = float("0.116104") - mean = float("-0.0876142") - std = float("0.0238545") - data = None - - -class Program_weight_tensor_parameter_316: - name = "parameter_316" - shape = [768] - dtype = "float32" - min_val = float("0.935583") - max_val = float("1.28041") - mean = float("1.02965") - std = float("0.0299369") - data = None - - -class Program_weight_tensor_parameter_317: - name = "parameter_317" - shape = [768] - dtype = "float32" - min_val = float("0.00577983") - max_val = float("0.559026") - mean = float("0.0765748") - std = float("0.0652625") - data = None - - -class Program_weight_tensor_parameter_318: - name = "parameter_318" - shape = [768] - dtype = "float32" - min_val = float("-0.154632") - max_val = float("0.121681") - mean = float("-0.00883901") - std = float("0.0433664") - data = None - - -class Program_weight_tensor_parameter_319: - name = "parameter_319" - shape = [768, 768, 1, 1] - dtype = "float32" - min_val = float("-0.0409931") - max_val = float("0.0668") - mean = float("-0.000149374") - std = float("0.00323186") - data = None - - -class Program_weight_tensor_parameter_320: - name = "parameter_320" - shape = [384] - dtype = "float32" - min_val = float("-0.175711") - max_val = float("0.114937") - mean = float("-0.0598235") - std = float("0.031613") - data = None - - -class Program_weight_tensor_parameter_321: - name = "parameter_321" - shape = [384] - dtype = "float32" - min_val = float("0.876243") - max_val = float("1.05485") - mean = float("0.975582") - std = float("0.0178294") - data = None - - -class Program_weight_tensor_parameter_322: - name = "parameter_322" - shape = [384] - dtype = "float32" - min_val = float("0.000782373") - max_val = float("0.603868") - mean = float("0.0317629") - std = float("0.0542092") - data = None - - -class Program_weight_tensor_parameter_323: - name = "parameter_323" - shape = [384] - dtype = "float32" - min_val = float("-0.0928944") - max_val = float("0.0888735") - mean = float("0.00210265") - std = float("0.0327669") - data = None - - -class Program_weight_tensor_parameter_324: - name = "parameter_324" - shape = [384, 384, 1, 1] - dtype = "float32" - min_val = float("-0.0523449") - max_val = float("0.052802") - mean = float("4.73102e-05") - std = float("0.00322689") - data = None - - -class Program_weight_tensor_parameter_325: - name = "parameter_325" - shape = [384] - dtype = "float32" - min_val = float("-0.175711") - max_val = float("0.114937") - mean = float("-0.0598235") - std = float("0.031613") - data = None - - -class Program_weight_tensor_parameter_326: - name = "parameter_326" - shape = [384] - dtype = "float32" - min_val = float("0.939826") - max_val = float("1.08521") - mean = float("0.993811") - std = float("0.0188195") - data = None - - -class Program_weight_tensor_parameter_327: - name = "parameter_327" - shape = [384] - dtype = "float32" - min_val = float("0.0100212") - max_val = float("3.79981") - mean = float("0.240529") - std = float("0.361857") - data = None - - -class Program_weight_tensor_parameter_328: - name = "parameter_328" - shape = [384] - dtype = "float32" - min_val = float("-0.247484") - max_val = float("0.338793") - mean = float("-0.0375155") - std = float("0.0861982") - data = None - - -class Program_weight_tensor_parameter_329: - name = "parameter_329" - shape = [384, 384, 3, 3] - dtype = "float32" - min_val = float("-0.0258772") - max_val = float("0.0360759") - mean = float("-7.49101e-05") - std = float("0.001213") - data = None - - -class Program_weight_tensor_parameter_330: - name = "parameter_330" - shape = [384] - dtype = "float32" - min_val = float("-0.151775") - max_val = float("0.102834") - mean = float("-0.0361192") - std = float("0.0243711") - data = None - - -class Program_weight_tensor_parameter_331: - name = "parameter_331" - shape = [384] - dtype = "float32" - min_val = float("0.94838") - max_val = float("1.23657") - mean = float("1.02234") - std = float("0.0374976") - data = None - - -class Program_weight_tensor_parameter_332: - name = "parameter_332" - shape = [384] - dtype = "float32" - min_val = float("0.0447625") - max_val = float("8.11219") - mean = float("0.62398") - std = float("1.02319") - data = None - - -class Program_weight_tensor_parameter_333: - name = "parameter_333" - shape = [384] - dtype = "float32" - min_val = float("-0.712325") - max_val = float("0.573616") - mean = float("0.00533148") - std = float("0.162416") - data = None - - -class Program_weight_tensor_parameter_334: - name = "parameter_334" - shape = [384, 384, 3, 3] - dtype = "float32" - min_val = float("-0.0241385") - max_val = float("0.0318638") - mean = float("1.18507e-05") - std = float("0.00161292") - data = None - - -class Program_weight_tensor_parameter_335: - name = "parameter_335" - shape = [384] - dtype = "float32" - min_val = float("-0.127682") - max_val = float("0.0290178") - mean = float("-0.0397355") - std = float("0.020477") - data = None - - -class Program_weight_tensor_parameter_336: - name = "parameter_336" - shape = [384] - dtype = "float32" - min_val = float("0.935742") - max_val = float("1.22687") - mean = float("1.02229") - std = float("0.040962") - data = None - - -class Program_weight_tensor_parameter_337: - name = "parameter_337" - shape = [384] - dtype = "float32" - min_val = float("0.255072") - max_val = float("120.906") - mean = float("9.80837") - std = float("15.5408") - data = None - - -class Program_weight_tensor_parameter_338: - name = "parameter_338" - shape = [384] - dtype = "float32" - min_val = float("-4.13105") - max_val = float("6.0128") - mean = float("0.0669194") - std = float("1.73557") - data = None - - -class Program_weight_tensor_parameter_339: - name = "parameter_339" - shape = [384, 1536, 1, 1] - dtype = "float32" - min_val = float("-0.0444577") - max_val = float("0.0503126") - mean = float("0.0001148") - std = float("0.00260266") - data = None - - -class Program_weight_tensor_parameter_340: - name = "parameter_340" - shape = [384] - dtype = "float32" - min_val = float("-0.0351402") - max_val = float("0.0448126") - mean = float("-0.00203657") - std = float("0.0112334") - data = None - - -class Program_weight_tensor_parameter_341: - name = "parameter_341" - shape = [384] - dtype = "float32" - min_val = float("0.955294") - max_val = float("1.07985") - mean = float("0.990475") - std = float("0.01608") - data = None - - -class Program_weight_tensor_parameter_342: - name = "parameter_342" - shape = [384] - dtype = "float32" - min_val = float("0.00198763") - max_val = float("0.0635025") - mean = float("0.0138523") - std = float("0.00926475") - data = None - - -class Program_weight_tensor_parameter_343: - name = "parameter_343" - shape = [384] - dtype = "float32" - min_val = float("-0.0391059") - max_val = float("0.0393871") - mean = float("-0.0105951") - std = float("0.0131426") - data = None - - -class Program_weight_tensor_parameter_344: - name = "parameter_344" - shape = [384, 384, 1, 1] - dtype = "float32" - min_val = float("-0.0224049") - max_val = float("0.0332456") - mean = float("-0.000297944") - std = float("0.00282752") - data = None - - -class Program_weight_tensor_parameter_345: - name = "parameter_345" - shape = [384] - dtype = "float32" - min_val = float("-0.0351403") - max_val = float("0.0448126") - mean = float("-0.00203657") - std = float("0.0112334") - data = None - - -class Program_weight_tensor_parameter_346: - name = "parameter_346" - shape = [384] - dtype = "float32" - min_val = float("0.958435") - max_val = float("1.12455") - mean = float("1.0048") - std = float("0.0255586") - data = None - - -class Program_weight_tensor_parameter_347: - name = "parameter_347" - shape = [384] - dtype = "float32" - min_val = float("0.0140512") - max_val = float("0.800076") - mean = float("0.108902") - std = float("0.0948212") - data = None - - -class Program_weight_tensor_parameter_348: - name = "parameter_348" - shape = [384] - dtype = "float32" - min_val = float("-0.115193") - max_val = float("0.079648") - mean = float("-0.0321172") - std = float("0.0341768") - data = None - - -class Program_weight_tensor_parameter_349: - name = "parameter_349" - shape = [384, 384, 3, 3] - dtype = "float32" - min_val = float("-0.0255902") - max_val = float("0.0443317") - mean = float("-0.00011048") - std = float("0.00120834") - data = None - - -class Program_weight_tensor_parameter_350: - name = "parameter_350" - shape = [384] - dtype = "float32" - min_val = float("-0.0758443") - max_val = float("0.0143564") - mean = float("-0.018454") - std = float("0.013002") - data = None - - -class Program_weight_tensor_parameter_351: - name = "parameter_351" - shape = [384] - dtype = "float32" - min_val = float("0.948675") - max_val = float("1.19194") - mean = float("1.02021") - std = float("0.0307393") - data = None - - -class Program_weight_tensor_parameter_352: - name = "parameter_352" - shape = [384] - dtype = "float32" - min_val = float("0.0278921") - max_val = float("3.5996") - mean = float("0.368141") - std = float("0.390009") - data = None - - -class Program_weight_tensor_parameter_353: - name = "parameter_353" - shape = [384] - dtype = "float32" - min_val = float("-0.273124") - max_val = float("0.208821") - mean = float("-0.0380423") - std = float("0.077889") - data = None - - -class Program_weight_tensor_parameter_354: - name = "parameter_354" - shape = [384, 384, 3, 3] - dtype = "float32" - min_val = float("-0.0198387") - max_val = float("0.0263313") - mean = float("-7.66766e-05") - std = float("0.00143257") - data = None - - -class Program_weight_tensor_parameter_355: - name = "parameter_355" - shape = [384] - dtype = "float32" - min_val = float("-0.0645948") - max_val = float("0.0263716") - mean = float("-0.0175864") - std = float("0.0121472") - data = None - - -class Program_weight_tensor_parameter_356: - name = "parameter_356" - shape = [384] - dtype = "float32" - min_val = float("0.977031") - max_val = float("1.05231") - mean = float("0.998866") - std = float("0.0104315") - data = None - - -class Program_weight_tensor_parameter_357: - name = "parameter_357" - shape = [384] - dtype = "float32" - min_val = float("0.000423634") - max_val = float("0.110283") - mean = float("0.0116174") - std = float("0.0134231") - data = None - - -class Program_weight_tensor_parameter_358: - name = "parameter_358" - shape = [384] - dtype = "float32" - min_val = float("-0.0211042") - max_val = float("0.0345802") - mean = float("-0.0023165") - std = float("0.00961282") - data = None - - -class Program_weight_tensor_parameter_359: - name = "parameter_359" - shape = [384, 384, 1, 1] - dtype = "float32" - min_val = float("-0.0201647") - max_val = float("0.0351393") - mean = float("-8.36299e-05") - std = float("0.00242893") - data = None - - -class Program_weight_tensor_parameter_360: - name = "parameter_360" - shape = [384] - dtype = "float32" - min_val = float("-0.0645947") - max_val = float("0.0263716") - mean = float("-0.0175864") - std = float("0.0121472") - data = None - - -class Program_weight_tensor_parameter_361: - name = "parameter_361" - shape = [384] - dtype = "float32" - min_val = float("0.976794") - max_val = float("1.10103") - mean = float("1.00726") - std = float("0.0202551") - data = None - - -class Program_weight_tensor_parameter_362: - name = "parameter_362" - shape = [384] - dtype = "float32" - min_val = float("0.00768908") - max_val = float("1.55127") - mean = float("0.109916") - std = float("0.159092") - data = None - - -class Program_weight_tensor_parameter_363: - name = "parameter_363" - shape = [384] - dtype = "float32" - min_val = float("-0.103719") - max_val = float("0.0862114") - mean = float("-0.0208973") - std = float("0.0271219") - data = None - - -class Program_weight_tensor_parameter_364: - name = "parameter_364" - shape = [384, 384, 3, 3] - dtype = "float32" - min_val = float("-0.0127177") - max_val = float("0.0247748") - mean = float("-9.78059e-05") - std = float("0.00103805") - data = None - - -class Program_weight_tensor_parameter_365: - name = "parameter_365" - shape = [384] - dtype = "float32" - min_val = float("-0.084671") - max_val = float("-0.001335") - mean = float("-0.0378383") - std = float("0.0147188") - data = None - - -class Program_weight_tensor_parameter_366: - name = "parameter_366" - shape = [384] - dtype = "float32" - min_val = float("0.961734") - max_val = float("1.11654") - mean = float("1.01877") - std = float("0.0258236") - data = None - - -class Program_weight_tensor_parameter_367: - name = "parameter_367" - shape = [384] - dtype = "float32" - min_val = float("0.00767187") - max_val = float("0.454337") - mean = float("0.0877939") - std = float("0.0760314") - data = None - - -class Program_weight_tensor_parameter_368: - name = "parameter_368" - shape = [384] - dtype = "float32" - min_val = float("-0.0506767") - max_val = float("0.0420926") - mean = float("-0.0059652") - std = float("0.018851") - data = None - - -class Program_weight_tensor_parameter_369: - name = "parameter_369" - shape = [384, 384, 3, 3] - dtype = "float32" - min_val = float("-0.0131907") - max_val = float("0.0210535") - mean = float("-3.41569e-05") - std = float("0.00141085") - data = None - - -class Program_weight_tensor_parameter_370: - name = "parameter_370" - shape = [384] - dtype = "float32" - min_val = float("-0.10718") - max_val = float("0.0233714") - mean = float("-0.0562477") - std = float("0.019742") - data = None - - -class Program_weight_tensor_parameter_371: - name = "parameter_371" - shape = [384] - dtype = "float32" - min_val = float("0.981369") - max_val = float("1.07286") - mean = float("1.02166") - std = float("0.0138993") - data = None - - -class Program_weight_tensor_parameter_372: - name = "parameter_372" - shape = [384] - dtype = "float32" - min_val = float("0.00628651") - max_val = float("2.59218") - mean = float("0.256612") - std = float("0.377235") - data = None - - -class Program_weight_tensor_parameter_373: - name = "parameter_373" - shape = [384] - dtype = "float32" - min_val = float("-0.10037") - max_val = float("0.159741") - mean = float("0.0326345") - std = float("0.0438385") - data = None - - -class Program_weight_tensor_parameter_374: - name = "parameter_374" - shape = [384, 1024, 1, 1] - dtype = "float32" - min_val = float("-0.017557") - max_val = float("0.0459387") - mean = float("-0.000156264") - std = float("0.00273104") - data = None - - -class Program_weight_tensor_parameter_375: - name = "parameter_375" - shape = [384] - dtype = "float32" - min_val = float("-0.0589739") - max_val = float("0.0343006") - mean = float("-0.00850519") - std = float("0.0113599") - data = None - - -class Program_weight_tensor_parameter_376: - name = "parameter_376" - shape = [384] - dtype = "float32" - min_val = float("1.00897") - max_val = float("1.21432") - mean = float("1.05133") - std = float("0.0209679") - data = None - - -class Program_weight_tensor_parameter_377: - name = "parameter_377" - shape = [384] - dtype = "float32" - min_val = float("0.0034229") - max_val = float("0.869117") - mean = float("0.100891") - std = float("0.0914111") - data = None - - -class Program_weight_tensor_parameter_378: - name = "parameter_378" - shape = [384] - dtype = "float32" - min_val = float("-0.0875187") - max_val = float("0.134152") - mean = float("0.0286449") - std = float("0.0311893") - data = None - - -class Program_weight_tensor_parameter_379: - name = "parameter_379" - shape = [384, 1024, 1, 1] - dtype = "float32" - min_val = float("-0.0404324") - max_val = float("0.0486178") - mean = float("-0.000170107") - std = float("0.00283689") - data = None - - -class Program_weight_tensor_parameter_380: - name = "parameter_380" - shape = [1024] - dtype = "float32" - min_val = float("-3.76561") - max_val = float("-0.731173") - mean = float("-2.19452") - std = float("0.428428") - data = None - - -class Program_weight_tensor_parameter_381: - name = "parameter_381" - shape = [1024] - dtype = "float32" - min_val = float("1.62445") - max_val = float("4.43497") - mean = float("3.07461") - std = float("0.255239") - data = None - - -class Program_weight_tensor_parameter_382: - name = "parameter_382" - shape = [1024] - dtype = "float32" - min_val = float("0.00300033") - max_val = float("1.03235") - mean = float("0.0732599") - std = float("0.0918229") - data = None - - -class Program_weight_tensor_parameter_383: - name = "parameter_383" - shape = [1024] - dtype = "float32" - min_val = float("-0.102441") - max_val = float("0.0886517") - mean = float("0.0176384") - std = float("0.0286918") - data = None - - -class Program_weight_tensor_parameter_384: - name = "parameter_384" - shape = [1024, 768, 1, 1] - dtype = "float32" - min_val = float("-0.0683686") - max_val = float("0.079499") - mean = float("-0.000295847") - std = float("0.00320057") - data = None - - -class Program_weight_tensor_parameter_385: - name = "parameter_385" - shape = [768] - dtype = "float32" - min_val = float("-0.0121386") - max_val = float("0.00527683") - mean = float("-0.00056885") - std = float("0.00174363") - data = None - - -class Program_weight_tensor_parameter_386: - name = "parameter_386" - shape = [768, 768, 1, 1] - dtype = "float32" - min_val = float("-0.104283") - max_val = float("0.108395") - mean = float("-0.000212643") - std = float("0.00130148") - data = None - - -class Program_weight_tensor_parameter_387: - name = "parameter_387" - shape = [384] - dtype = "float32" - min_val = float("-1.77842") - max_val = float("0.498407") - mean = float("-0.300061") - std = float("0.296898") - data = None - - -class Program_weight_tensor_parameter_388: - name = "parameter_388" - shape = [384] - dtype = "float32" - min_val = float("0.189938") - max_val = float("1.98187") - mean = float("0.620289") - std = float("0.278642") - data = None - - -class Program_weight_tensor_parameter_389: - name = "parameter_389" - shape = [384] - dtype = "float32" - min_val = float("9.91892e-05") - max_val = float("0.0270208") - mean = float("0.00199739") - std = float("0.00264807") - data = None - - -class Program_weight_tensor_parameter_390: - name = "parameter_390" - shape = [384] - dtype = "float32" - min_val = float("-0.0508396") - max_val = float("0.0837222") - mean = float("0.0276176") - std = float("0.0223824") - data = None - - -class Program_weight_tensor_parameter_391: - name = "parameter_391" - shape = [384, 384, 1, 1] - dtype = "float32" - min_val = float("-0.019634") - max_val = float("0.026249") - mean = float("-0.000305565") - std = float("0.00221217") - data = None - - -class Program_weight_tensor_parameter_392: - name = "parameter_392" - shape = [384] - dtype = "float32" - min_val = float("-1.77842") - max_val = float("0.498407") - mean = float("-0.300061") - std = float("0.296898") - data = None - - -class Program_weight_tensor_parameter_393: - name = "parameter_393" - shape = [384] - dtype = "float32" - min_val = float("0.365979") - max_val = float("2.77813") - mean = float("1.04754") - std = float("0.30869") - data = None - - -class Program_weight_tensor_parameter_394: - name = "parameter_394" - shape = [384] - dtype = "float32" - min_val = float("0.000885633") - max_val = float("0.640413") - mean = float("0.0266485") - std = float("0.0528161") - data = None - - -class Program_weight_tensor_parameter_395: - name = "parameter_395" - shape = [384] - dtype = "float32" - min_val = float("-0.2607") - max_val = float("0.252739") - mean = float("0.0237469") - std = float("0.0710539") - data = None - - -class Program_weight_tensor_parameter_396: - name = "parameter_396" - shape = [384, 384, 3, 3] - dtype = "float32" - min_val = float("-0.0161625") - max_val = float("0.0237353") - mean = float("-2.77227e-05") - std = float("0.00143234") - data = None - - -class Program_weight_tensor_parameter_397: - name = "parameter_397" - shape = [384] - dtype = "float32" - min_val = float("-2.61241") - max_val = float("0.0552853") - mean = float("-1.58347") - std = float("0.416394") - data = None - - -class Program_weight_tensor_parameter_398: - name = "parameter_398" - shape = [384] - dtype = "float32" - min_val = float("0.56794") - max_val = float("1.67647") - mean = float("1.12425") - std = float("0.146808") - data = None - - -class Program_weight_tensor_parameter_399: - name = "parameter_399" - shape = [384] - dtype = "float32" - min_val = float("0.0425016") - max_val = float("8.60413") - mean = float("0.715833") - std = float("0.95826") - data = None - - -class Program_weight_tensor_parameter_400: - name = "parameter_400" - shape = [384] - dtype = "float32" - min_val = float("-0.579806") - max_val = float("0.450828") - mean = float("0.0787798") - std = float("0.153293") - data = None - - -class Program_weight_tensor_parameter_401: - name = "parameter_401" - shape = [384, 384, 3, 3] - dtype = "float32" - min_val = float("-0.0172878") - max_val = float("0.0422016") - mean = float("-0.000115855") - std = float("0.00184295") - data = None - - -class Program_weight_tensor_parameter_402: - name = "parameter_402" - shape = [384] - dtype = "float32" - min_val = float("-1.93787") - max_val = float("0.733813") - mean = float("-0.570636") - std = float("0.36594") - data = None - - -class Program_weight_tensor_parameter_403: - name = "parameter_403" - shape = [384] - dtype = "float32" - min_val = float("0.140643") - max_val = float("2.06386") - mean = float("0.563265") - std = float("0.226726") - data = None - - -class Program_weight_tensor_parameter_404: - name = "parameter_404" - shape = [384] - dtype = "float32" - min_val = float("7.24306e-05") - max_val = float("0.0212711") - mean = float("0.00249773") - std = float("0.00263818") - data = None - - -class Program_weight_tensor_parameter_405: - name = "parameter_405" - shape = [384] - dtype = "float32" - min_val = float("-0.0480177") - max_val = float("0.0906403") - mean = float("0.0290943") - std = float("0.0218768") - data = None - - -class Program_weight_tensor_parameter_406: - name = "parameter_406" - shape = [384, 384, 1, 1] - dtype = "float32" - min_val = float("-0.0187865") - max_val = float("0.0197971") - mean = float("-0.000317036") - std = float("0.00213102") - data = None - - -class Program_weight_tensor_parameter_407: - name = "parameter_407" - shape = [384] - dtype = "float32" - min_val = float("-1.93797") - max_val = float("0.733813") - mean = float("-0.570636") - std = float("0.365941") - data = None - - -class Program_weight_tensor_parameter_408: - name = "parameter_408" - shape = [384] - dtype = "float32" - min_val = float("0.579568") - max_val = float("2.10408") - mean = float("1.0926") - std = float("0.254088") - data = None - - -class Program_weight_tensor_parameter_409: - name = "parameter_409" - shape = [384] - dtype = "float32" - min_val = float("0.00121035") - max_val = float("0.722758") - mean = float("0.0326783") - std = float("0.0604562") - data = None - - -class Program_weight_tensor_parameter_410: - name = "parameter_410" - shape = [384] - dtype = "float32" - min_val = float("-0.233337") - max_val = float("0.235021") - mean = float("0.0433769") - std = float("0.0724029") - data = None - - -class Program_weight_tensor_parameter_411: - name = "parameter_411" - shape = [384, 384, 3, 3] - dtype = "float32" - min_val = float("-0.017636") - max_val = float("0.0242342") - mean = float("-5.98118e-05") - std = float("0.00148823") - data = None - - -class Program_weight_tensor_parameter_412: - name = "parameter_412" - shape = [384] - dtype = "float32" - min_val = float("-2.42769") - max_val = float("0.839301") - mean = float("-1.42177") - std = float("0.360858") - data = None - - -class Program_weight_tensor_parameter_413: - name = "parameter_413" - shape = [384] - dtype = "float32" - min_val = float("0.438544") - max_val = float("1.84376") - mean = float("1.15689") - std = float("0.142353") - data = None - - -class Program_weight_tensor_parameter_414: - name = "parameter_414" - shape = [384] - dtype = "float32" - min_val = float("0.0203352") - max_val = float("3.181") - mean = float("0.368327") - std = float("0.465673") - data = None - - -class Program_weight_tensor_parameter_415: - name = "parameter_415" - shape = [384] - dtype = "float32" - min_val = float("-0.205922") - max_val = float("0.327803") - mean = float("0.0418561") - std = float("0.0756754") - data = None - - -class Program_weight_tensor_parameter_416: - name = "parameter_416" - shape = [384, 384, 3, 3] - dtype = "float32" - min_val = float("-0.0216487") - max_val = float("0.0329662") - mean = float("-0.000111566") - std = float("0.00182337") - data = None - - -class Program_weight_tensor_parameter_417: - name = "parameter_417" - shape = [384] - dtype = "float32" - min_val = float("-1.88519") - max_val = float("0.489282") - mean = float("-0.478509") - std = float("0.384475") - data = None - - -class Program_weight_tensor_parameter_418: - name = "parameter_418" - shape = [384] - dtype = "float32" - min_val = float("0.0964864") - max_val = float("2.12186") - mean = float("0.441698") - std = float("0.215794") - data = None - - -class Program_weight_tensor_parameter_419: - name = "parameter_419" - shape = [384] - dtype = "float32" - min_val = float("0.000190261") - max_val = float("0.0374628") - mean = float("0.00504513") - std = float("0.00528967") - data = None - - -class Program_weight_tensor_parameter_420: - name = "parameter_420" - shape = [384] - dtype = "float32" - min_val = float("-0.0833438") - max_val = float("0.106805") - mean = float("0.0319307") - std = float("0.0228581") - data = None - - -class Program_weight_tensor_parameter_421: - name = "parameter_421" - shape = [384, 384, 1, 1] - dtype = "float32" - min_val = float("-0.0196403") - max_val = float("0.0181013") - mean = float("-0.000367563") - std = float("0.00183188") - data = None - - -class Program_weight_tensor_parameter_422: - name = "parameter_422" - shape = [384] - dtype = "float32" - min_val = float("-1.88519") - max_val = float("0.489282") - mean = float("-0.478509") - std = float("0.384475") - data = None - - -class Program_weight_tensor_parameter_423: - name = "parameter_423" - shape = [384] - dtype = "float32" - min_val = float("0.572754") - max_val = float("2.21625") - mean = float("1.0609") - std = float("0.254729") - data = None - - -class Program_weight_tensor_parameter_424: - name = "parameter_424" - shape = [384] - dtype = "float32" - min_val = float("0.00314815") - max_val = float("0.609992") - mean = float("0.063887") - std = float("0.0854036") - data = None - - -class Program_weight_tensor_parameter_425: - name = "parameter_425" - shape = [384] - dtype = "float32" - min_val = float("-0.309743") - max_val = float("0.213002") - mean = float("0.0357878") - std = float("0.0857551") - data = None - - -class Program_weight_tensor_parameter_426: - name = "parameter_426" - shape = [384, 384, 3, 3] - dtype = "float32" - min_val = float("-0.0167874") - max_val = float("0.0235595") - mean = float("-4.10985e-05") - std = float("0.0015578") - data = None - - -class Program_weight_tensor_parameter_427: - name = "parameter_427" - shape = [384] - dtype = "float32" - min_val = float("-2.15541") - max_val = float("0.429211") - mean = float("-1.38242") - std = float("0.277701") - data = None - - -class Program_weight_tensor_parameter_428: - name = "parameter_428" - shape = [384] - dtype = "float32" - min_val = float("0.714293") - max_val = float("1.63322") - mean = float("1.13559") - std = float("0.0992203") - data = None - - -class Program_weight_tensor_parameter_429: - name = "parameter_429" - shape = [384] - dtype = "float32" - min_val = float("0.00874361") - max_val = float("1.63267") - mean = float("0.172849") - std = float("0.199136") - data = None - - -class Program_weight_tensor_parameter_430: - name = "parameter_430" - shape = [384] - dtype = "float32" - min_val = float("-0.262866") - max_val = float("0.171872") - mean = float("0.00667807") - std = float("0.0549583") - data = None - - -class Program_weight_tensor_parameter_431: - name = "parameter_431" - shape = [384, 384, 3, 3] - dtype = "float32" - min_val = float("-0.0255212") - max_val = float("0.0449234") - mean = float("-9.38033e-05") - std = float("0.00171679") - data = None - - -class Program_weight_tensor_parameter_432: - name = "parameter_432" - shape = [384] - dtype = "float32" - min_val = float("-2.9313") - max_val = float("1.76163") - mean = float("-0.765032") - std = float("0.654203") - data = None - - -class Program_weight_tensor_parameter_433: - name = "parameter_433" - shape = [384] - dtype = "float32" - min_val = float("0.974162") - max_val = float("2.91141") - mean = float("1.85277") - std = float("0.272517") - data = None - - -class Program_weight_tensor_parameter_434: - name = "parameter_434" - shape = [384] - dtype = "float32" - min_val = float("0.00181718") - max_val = float("0.234308") - mean = float("0.0412232") - std = float("0.0430638") - data = None - - -class Program_weight_tensor_parameter_435: - name = "parameter_435" - shape = [384] - dtype = "float32" - min_val = float("-0.234178") - max_val = float("0.245307") - mean = float("0.0695519") - std = float("0.0618743") - data = None - - -class Program_weight_tensor_parameter_436: - name = "parameter_436" - shape = [384, 768, 1, 1] - dtype = "float32" - min_val = float("-0.0469498") - max_val = float("0.0418139") - mean = float("-0.000433104") - std = float("0.00390566") - data = None - - -class Program_weight_tensor_parameter_437: - name = "parameter_437" - shape = [384] - dtype = "float32" - min_val = float("-2.24508") - max_val = float("0.69413") - mean = float("-0.776468") - std = float("0.476207") - data = None - - -class Program_weight_tensor_parameter_438: - name = "parameter_438" - shape = [384] - dtype = "float32" - min_val = float("0.973977") - max_val = float("2.89139") - mean = float("2.10296") - std = float("0.303008") - data = None - - -class Program_weight_tensor_parameter_439: - name = "parameter_439" - shape = [384] - dtype = "float32" - min_val = float("0.000334173") - max_val = float("0.779074") - mean = float("0.0234264") - std = float("0.0613748") - data = None - - -class Program_weight_tensor_parameter_440: - name = "parameter_440" - shape = [384] - dtype = "float32" - min_val = float("-0.246706") - max_val = float("0.197941") - mean = float("0.032747") - std = float("0.0469715") - data = None - - -class Program_weight_tensor_parameter_441: - name = "parameter_441" - shape = [384, 768, 1, 1] - dtype = "float32" - min_val = float("-0.128826") - max_val = float("0.0634304") - mean = float("-0.000209218") - std = float("0.0029129") - data = None - - -class Program_weight_tensor_parameter_442: - name = "parameter_442" - shape = [768] - dtype = "float32" - min_val = float("-2.41087") - max_val = float("0.654592") - mean = float("-0.916074") - std = float("0.344533") - data = None - - -class Program_weight_tensor_parameter_443: - name = "parameter_443" - shape = [768] - dtype = "float32" - min_val = float("0.51965") - max_val = float("1.8768") - mean = float("0.91262") - std = float("0.147168") - data = None - - -class Program_weight_tensor_parameter_444: - name = "parameter_444" - shape = [768] - dtype = "float32" - min_val = float("0.00391866") - max_val = float("6.24599") - mean = float("0.161767") - std = float("0.35048") - data = None - - -class Program_weight_tensor_parameter_445: - name = "parameter_445" - shape = [768] - dtype = "float32" - min_val = float("-0.309254") - max_val = float("0.670901") - mean = float("0.0400058") - std = float("0.130768") - data = None - - -class Program_weight_tensor_parameter_446: - name = "parameter_446" - shape = [768, 512, 3, 3] - dtype = "float32" - min_val = float("-0.049509") - max_val = float("0.0452558") - mean = float("-4.74502e-05") - std = float("0.00178091") - data = None - - -class Program_weight_tensor_parameter_447: - name = "parameter_447" - shape = [512] - dtype = "float32" - min_val = float("-3.38771") - max_val = float("1.66524") - mean = float("-1.17835") - std = float("0.526979") - data = None - - -class Program_weight_tensor_parameter_448: - name = "parameter_448" - shape = [512] - dtype = "float32" - min_val = float("0.490699") - max_val = float("1.69897") - mean = float("1.10972") - std = float("0.150271") - data = None - - -class Program_weight_tensor_parameter_449: - name = "parameter_449" - shape = [512] - dtype = "float32" - min_val = float("0.00174229") - max_val = float("1.5935") - mean = float("0.0661371") - std = float("0.125469") - data = None - - -class Program_weight_tensor_parameter_450: - name = "parameter_450" - shape = [512] - dtype = "float32" - min_val = float("-0.179287") - max_val = float("0.13044") - mean = float("-0.00872415") - std = float("0.0427454") - data = None - - -class Program_weight_tensor_parameter_451: - name = "parameter_451" - shape = [512, 384, 1, 1] - dtype = "float32" - min_val = float("-0.228408") - max_val = float("0.195018") - mean = float("-0.000416299") - std = float("0.00607886") - data = None - - -class Program_weight_tensor_parameter_452: - name = "parameter_452" - shape = [384] - dtype = "float32" - min_val = float("-0.00934079") - max_val = float("0.000912873") - mean = float("-0.00232942") - std = float("0.00179213") - data = None - - -class Program_weight_tensor_parameter_453: - name = "parameter_453" - shape = [384, 384, 1, 1] - dtype = "float32" - min_val = float("-0.202973") - max_val = float("0.134442") - mean = float("-0.00176474") - std = float("0.00412056") - data = None - - -class Program_weight_tensor_parameter_454: - name = "parameter_454" - shape = [192] - dtype = "float32" - min_val = float("-1.95309") - max_val = float("0.504365") - mean = float("-0.323219") - std = float("0.341256") - data = None - - -class Program_weight_tensor_parameter_455: - name = "parameter_455" - shape = [192] - dtype = "float32" - min_val = float("0.0702496") - max_val = float("2.23431") - mean = float("0.601975") - std = float("0.439856") - data = None - - -class Program_weight_tensor_parameter_456: - name = "parameter_456" - shape = [192] - dtype = "float32" - min_val = float("4.76391e-05") - max_val = float("0.0303151") - mean = float("0.00419476") - std = float("0.00513906") - data = None - - -class Program_weight_tensor_parameter_457: - name = "parameter_457" - shape = [192] - dtype = "float32" - min_val = float("-0.0732794") - max_val = float("0.0514422") - mean = float("0.00968913") - std = float("0.0191895") - data = None - - -class Program_weight_tensor_parameter_458: - name = "parameter_458" - shape = [192, 192, 1, 1] - dtype = "float32" - min_val = float("-0.0205319") - max_val = float("0.0540422") - mean = float("-0.000288173") - std = float("0.00337745") - data = None - - -class Program_weight_tensor_parameter_459: - name = "parameter_459" - shape = [192] - dtype = "float32" - min_val = float("-1.95309") - max_val = float("0.504365") - mean = float("-0.323219") - std = float("0.341256") - data = None - - -class Program_weight_tensor_parameter_460: - name = "parameter_460" - shape = [192] - dtype = "float32" - min_val = float("0.384813") - max_val = float("2.87188") - mean = float("1.22967") - std = float("0.52103") - data = None - - -class Program_weight_tensor_parameter_461: - name = "parameter_461" - shape = [192] - dtype = "float32" - min_val = float("0.00334316") - max_val = float("0.649378") - mean = float("0.0680944") - std = float("0.0920336") - data = None - - -class Program_weight_tensor_parameter_462: - name = "parameter_462" - shape = [192] - dtype = "float32" - min_val = float("-0.149357") - max_val = float("0.210071") - mean = float("0.0335995") - std = float("0.0682833") - data = None - - -class Program_weight_tensor_parameter_463: - name = "parameter_463" - shape = [192, 192, 3, 3] - dtype = "float32" - min_val = float("-0.0206109") - max_val = float("0.0328944") - mean = float("-0.000105819") - std = float("0.00247448") - data = None - - -class Program_weight_tensor_parameter_464: - name = "parameter_464" - shape = [192] - dtype = "float32" - min_val = float("-2.89041") - max_val = float("-0.124973") - mean = float("-1.33321") - std = float("0.398058") - data = None - - -class Program_weight_tensor_parameter_465: - name = "parameter_465" - shape = [192] - dtype = "float32" - min_val = float("0.719268") - max_val = float("2.09501") - mean = float("1.16332") - std = float("0.171594") - data = None - - -class Program_weight_tensor_parameter_466: - name = "parameter_466" - shape = [192] - dtype = "float32" - min_val = float("0.0195207") - max_val = float("8.32209") - mean = float("1.10631") - std = float("1.4008") - data = None - - -class Program_weight_tensor_parameter_467: - name = "parameter_467" - shape = [192] - dtype = "float32" - min_val = float("-1.98123") - max_val = float("1.21603") - mean = float("0.0516891") - std = float("0.330024") - data = None - - -class Program_weight_tensor_parameter_468: - name = "parameter_468" - shape = [192, 192, 3, 3] - dtype = "float32" - min_val = float("-0.0302761") - max_val = float("0.0413314") - mean = float("-9.04468e-05") - std = float("0.00300886") - data = None - - -class Program_weight_tensor_parameter_469: - name = "parameter_469" - shape = [192] - dtype = "float32" - min_val = float("-1.92933") - max_val = float("0.596271") - mean = float("-0.261624") - std = float("0.334454") - data = None - - -class Program_weight_tensor_parameter_470: - name = "parameter_470" - shape = [192] - dtype = "float32" - min_val = float("0.0488552") - max_val = float("1.76755") - mean = float("0.453568") - std = float("0.30262") - data = None - - -class Program_weight_tensor_parameter_471: - name = "parameter_471" - shape = [192] - dtype = "float32" - min_val = float("2.24294e-05") - max_val = float("0.164315") - mean = float("0.00587699") - std = float("0.0161003") - data = None - - -class Program_weight_tensor_parameter_472: - name = "parameter_472" - shape = [192] - dtype = "float32" - min_val = float("-0.0490808") - max_val = float("0.0669156") - mean = float("0.0149264") - std = float("0.0217292") - data = None - - -class Program_weight_tensor_parameter_473: - name = "parameter_473" - shape = [192, 192, 1, 1] - dtype = "float32" - min_val = float("-0.0242882") - max_val = float("0.0299433") - mean = float("-0.000340263") - std = float("0.00326035") - data = None - - -class Program_weight_tensor_parameter_474: - name = "parameter_474" - shape = [192] - dtype = "float32" - min_val = float("-1.92933") - max_val = float("0.596271") - mean = float("-0.261624") - std = float("0.334454") - data = None - - -class Program_weight_tensor_parameter_475: - name = "parameter_475" - shape = [192] - dtype = "float32" - min_val = float("0.419712") - max_val = float("2.27591") - mean = float("1.14823") - std = float("0.381008") - data = None - - -class Program_weight_tensor_parameter_476: - name = "parameter_476" - shape = [192] - dtype = "float32" - min_val = float("0.00492381") - max_val = float("1.92887") - mean = float("0.107118") - std = float("0.20391") - data = None - - -class Program_weight_tensor_parameter_477: - name = "parameter_477" - shape = [192] - dtype = "float32" - min_val = float("-0.242602") - max_val = float("0.295724") - mean = float("0.0375102") - std = float("0.0955899") - data = None - - -class Program_weight_tensor_parameter_478: - name = "parameter_478" - shape = [192, 192, 3, 3] - dtype = "float32" - min_val = float("-0.0188034") - max_val = float("0.0253279") - mean = float("-8.278e-05") - std = float("0.00266932") - data = None - - -class Program_weight_tensor_parameter_479: - name = "parameter_479" - shape = [192] - dtype = "float32" - min_val = float("-2.53631") - max_val = float("-0.131674") - mean = float("-1.31632") - std = float("0.443813") - data = None - - -class Program_weight_tensor_parameter_480: - name = "parameter_480" - shape = [192] - dtype = "float32" - min_val = float("0.718027") - max_val = float("1.65396") - mean = float("1.17902") - std = float("0.161151") - data = None - - -class Program_weight_tensor_parameter_481: - name = "parameter_481" - shape = [192] - dtype = "float32" - min_val = float("0.0311619") - max_val = float("12.4415") - mean = float("1.08409") - std = float("1.81585") - data = None - - -class Program_weight_tensor_parameter_482: - name = "parameter_482" - shape = [192] - dtype = "float32" - min_val = float("-1.09829") - max_val = float("0.629046") - mean = float("0.123942") - std = float("0.213918") - data = None - - -class Program_weight_tensor_parameter_483: - name = "parameter_483" - shape = [192, 192, 3, 3] - dtype = "float32" - min_val = float("-0.042754") - max_val = float("0.0429227") - mean = float("-0.000109615") - std = float("0.00313016") - data = None - - -class Program_weight_tensor_parameter_484: - name = "parameter_484" - shape = [192] - dtype = "float32" - min_val = float("-1.76349") - max_val = float("0.544904") - mean = float("-0.246355") - std = float("0.349411") - data = None - - -class Program_weight_tensor_parameter_485: - name = "parameter_485" - shape = [192] - dtype = "float32" - min_val = float("0.00801797") - max_val = float("1.66398") - mean = float("0.357353") - std = float("0.246738") - data = None - - -class Program_weight_tensor_parameter_486: - name = "parameter_486" - shape = [192] - dtype = "float32" - min_val = float("7.15677e-05") - max_val = float("0.105247") - mean = float("0.00410654") - std = float("0.0106252") - data = None - - -class Program_weight_tensor_parameter_487: - name = "parameter_487" - shape = [192] - dtype = "float32" - min_val = float("-0.0643907") - max_val = float("0.0951448") - mean = float("0.0170309") - std = float("0.0236018") - data = None - - -class Program_weight_tensor_parameter_488: - name = "parameter_488" - shape = [192, 192, 1, 1] - dtype = "float32" - min_val = float("-0.020302") - max_val = float("0.0261232") - mean = float("-0.00036246") - std = float("0.00323036") - data = None - - -class Program_weight_tensor_parameter_489: - name = "parameter_489" - shape = [192] - dtype = "float32" - min_val = float("-1.76349") - max_val = float("0.544905") - mean = float("-0.246355") - std = float("0.349411") - data = None - - -class Program_weight_tensor_parameter_490: - name = "parameter_490" - shape = [192] - dtype = "float32" - min_val = float("0.385673") - max_val = float("1.96785") - mean = float("1.06985") - std = float("0.336108") - data = None - - -class Program_weight_tensor_parameter_491: - name = "parameter_491" - shape = [192] - dtype = "float32" - min_val = float("0.00226675") - max_val = float("1.91593") - mean = float("0.101595") - std = float("0.227642") - data = None - - -class Program_weight_tensor_parameter_492: - name = "parameter_492" - shape = [192] - dtype = "float32" - min_val = float("-0.261671") - max_val = float("0.282769") - mean = float("0.0457371") - std = float("0.101536") - data = None - - -class Program_weight_tensor_parameter_493: - name = "parameter_493" - shape = [192, 192, 3, 3] - dtype = "float32" - min_val = float("-0.0263202") - max_val = float("0.031635") - mean = float("-0.000104025") - std = float("0.00284282") - data = None - - -class Program_weight_tensor_parameter_494: - name = "parameter_494" - shape = [192] - dtype = "float32" - min_val = float("-2.51462") - max_val = float("0.158257") - mean = float("-1.26766") - std = float("0.427753") - data = None - - -class Program_weight_tensor_parameter_495: - name = "parameter_495" - shape = [192] - dtype = "float32" - min_val = float("0.598242") - max_val = float("1.78157") - mean = float("1.1494") - std = float("0.161575") - data = None - - -class Program_weight_tensor_parameter_496: - name = "parameter_496" - shape = [192] - dtype = "float32" - min_val = float("0.0222714") - max_val = float("37.7966") - mean = float("1.49691") - std = float("3.97204") - data = None - - -class Program_weight_tensor_parameter_497: - name = "parameter_497" - shape = [192] - dtype = "float32" - min_val = float("-0.873886") - max_val = float("0.81748") - mean = float("0.0710219") - std = float("0.213003") - data = None - - -class Program_weight_tensor_parameter_498: - name = "parameter_498" - shape = [192, 192, 3, 3] - dtype = "float32" - min_val = float("-0.0380726") - max_val = float("0.047001") - mean = float("-0.000179735") - std = float("0.00344855") - data = None - - -class Program_weight_tensor_parameter_499: - name = "parameter_499" - shape = [192] - dtype = "float32" - min_val = float("-2.08881") - max_val = float("0.648999") - mean = float("-0.259801") - std = float("0.386671") - data = None - - -class Program_weight_tensor_parameter_500: - name = "parameter_500" - shape = [192] - dtype = "float32" - min_val = float("0.000298623") - max_val = float("0.722711") - mean = float("0.216526") - std = float("0.135243") - data = None - - -class Program_weight_tensor_parameter_501: - name = "parameter_501" - shape = [192] - dtype = "float32" - min_val = float("4.96868e-08") - max_val = float("0.107631") - mean = float("0.00291956") - std = float("0.00896913") - data = None - - -class Program_weight_tensor_parameter_502: - name = "parameter_502" - shape = [192] - dtype = "float32" - min_val = float("-0.0826544") - max_val = float("0.0880952") - mean = float("0.0119081") - std = float("0.0222372") - data = None - - -class Program_weight_tensor_parameter_503: - name = "parameter_503" - shape = [192, 192, 1, 1] - dtype = "float32" - min_val = float("-0.0383726") - max_val = float("0.0426544") - mean = float("-0.000299896") - std = float("0.00305223") - data = None - - -class Program_weight_tensor_parameter_504: - name = "parameter_504" - shape = [192] - dtype = "float32" - min_val = float("-2.08881") - max_val = float("0.648999") - mean = float("-0.259801") - std = float("0.386671") - data = None - - -class Program_weight_tensor_parameter_505: - name = "parameter_505" - shape = [192] - dtype = "float32" - min_val = float("0.394983") - max_val = float("1.95775") - mean = float("0.953923") - std = float("0.305397") - data = None - - -class Program_weight_tensor_parameter_506: - name = "parameter_506" - shape = [192] - dtype = "float32" - min_val = float("0.00262196") - max_val = float("7.469") - mean = float("0.131262") - std = float("0.559292") - data = None - - -class Program_weight_tensor_parameter_507: - name = "parameter_507" - shape = [192] - dtype = "float32" - min_val = float("-1.11459") - max_val = float("0.413148") - mean = float("0.0497411") - std = float("0.149456") - data = None - - -class Program_weight_tensor_parameter_508: - name = "parameter_508" - shape = [192, 192, 3, 3] - dtype = "float32" - min_val = float("-0.0273404") - max_val = float("0.0675831") - mean = float("-0.000146946") - std = float("0.00314256") - data = None - - -class Program_weight_tensor_parameter_509: - name = "parameter_509" - shape = [192] - dtype = "float32" - min_val = float("-2.77342") - max_val = float("-0.0376017") - mean = float("-1.25998") - std = float("0.434996") - data = None - - -class Program_weight_tensor_parameter_510: - name = "parameter_510" - shape = [192] - dtype = "float32" - min_val = float("0.744961") - max_val = float("1.56236") - mean = float("1.13335") - std = float("0.139709") - data = None - - -class Program_weight_tensor_parameter_511: - name = "parameter_511" - shape = [192] - dtype = "float32" - min_val = float("0.00886955") - max_val = float("38.6321") - mean = float("1.16498") - std = float("4.2895") - data = None - - -class Program_weight_tensor_parameter_512: - name = "parameter_512" - shape = [192] - dtype = "float32" - min_val = float("-0.81896") - max_val = float("0.818165") - mean = float("0.0290114") - std = float("0.197181") - data = None - - -class Program_weight_tensor_parameter_513: - name = "parameter_513" - shape = [192, 192, 3, 3] - dtype = "float32" - min_val = float("-0.0472119") - max_val = float("0.0488221") - mean = float("-0.000229853") - std = float("0.00339264") - data = None - - -class Program_weight_tensor_parameter_514: - name = "parameter_514" - shape = [192] - dtype = "float32" - min_val = float("-1.20653") - max_val = float("0.515824") - mean = float("-0.218129") - std = float("0.352315") - data = None - - -class Program_weight_tensor_parameter_515: - name = "parameter_515" - shape = [192] - dtype = "float32" - min_val = float("-3.41948e-06") - max_val = float("0.680384") - mean = float("0.195099") - std = float("0.117152") - data = None - - -class Program_weight_tensor_parameter_516: - name = "parameter_516" - shape = [192] - dtype = "float32" - min_val = float("6.06307e-12") - max_val = float("0.398718") - mean = float("0.00738152") - std = float("0.0311542") - data = None - - -class Program_weight_tensor_parameter_517: - name = "parameter_517" - shape = [192] - dtype = "float32" - min_val = float("-0.129674") - max_val = float("0.071682") - mean = float("0.0112656") - std = float("0.0230303") - data = None - - -class Program_weight_tensor_parameter_518: - name = "parameter_518" - shape = [192, 192, 1, 1] - dtype = "float32" - min_val = float("-0.0270501") - max_val = float("0.0345417") - mean = float("-0.000283263") - std = float("0.00318819") - data = None - - -class Program_weight_tensor_parameter_519: - name = "parameter_519" - shape = [192] - dtype = "float32" - min_val = float("-1.20653") - max_val = float("0.515824") - mean = float("-0.218129") - std = float("0.352315") - data = None - - -class Program_weight_tensor_parameter_520: - name = "parameter_520" - shape = [192] - dtype = "float32" - min_val = float("0.398135") - max_val = float("1.5715") - mean = float("0.848265") - std = float("0.259232") - data = None - - -class Program_weight_tensor_parameter_521: - name = "parameter_521" - shape = [192] - dtype = "float32" - min_val = float("0.00433799") - max_val = float("10.4863") - mean = float("0.263417") - std = float("0.977392") - data = None - - -class Program_weight_tensor_parameter_522: - name = "parameter_522" - shape = [192] - dtype = "float32" - min_val = float("-0.947376") - max_val = float("0.293492") - mean = float("0.0244436") - std = float("0.157364") - data = None - - -class Program_weight_tensor_parameter_523: - name = "parameter_523" - shape = [192, 192, 3, 3] - dtype = "float32" - min_val = float("-0.0235823") - max_val = float("0.0383498") - mean = float("-6.68298e-05") - std = float("0.00312995") - data = None - - -class Program_weight_tensor_parameter_524: - name = "parameter_524" - shape = [192] - dtype = "float32" - min_val = float("-2.48859") - max_val = float("-0.0816794") - mean = float("-1.27085") - std = float("0.420072") - data = None - - -class Program_weight_tensor_parameter_525: - name = "parameter_525" - shape = [192] - dtype = "float32" - min_val = float("0.694565") - max_val = float("1.54206") - mean = float("1.10687") - std = float("0.135381") - data = None - - -class Program_weight_tensor_parameter_526: - name = "parameter_526" - shape = [192] - dtype = "float32" - min_val = float("0.00545689") - max_val = float("4.02717") - mean = float("0.403302") - std = float("0.646807") - data = None - - -class Program_weight_tensor_parameter_527: - name = "parameter_527" - shape = [192] - dtype = "float32" - min_val = float("-0.588504") - max_val = float("0.746211") - mean = float("0.00482585") - std = float("0.188335") - data = None - - -class Program_weight_tensor_parameter_528: - name = "parameter_528" - shape = [192, 192, 3, 3] - dtype = "float32" - min_val = float("-0.0454542") - max_val = float("0.0510382") - mean = float("-0.00015484") - std = float("0.00337127") - data = None - - -class Program_weight_tensor_parameter_529: - name = "parameter_529" - shape = [192] - dtype = "float32" - min_val = float("-1.23302") - max_val = float("0.509381") - mean = float("-0.153645") - std = float("0.30407") - data = None - - -class Program_weight_tensor_parameter_530: - name = "parameter_530" - shape = [192] - dtype = "float32" - min_val = float("0.00227296") - max_val = float("1.53114") - mean = float("0.236795") - std = float("0.211228") - data = None - - -class Program_weight_tensor_parameter_531: - name = "parameter_531" - shape = [192] - dtype = "float32" - min_val = float("2.14271e-05") - max_val = float("0.127867") - mean = float("0.00908259") - std = float("0.0177169") - data = None - - -class Program_weight_tensor_parameter_532: - name = "parameter_532" - shape = [192] - dtype = "float32" - min_val = float("-0.0935318") - max_val = float("0.137813") - mean = float("0.0162608") - std = float("0.0290513") - data = None - - -class Program_weight_tensor_parameter_533: - name = "parameter_533" - shape = [192, 192, 1, 1] - dtype = "float32" - min_val = float("-0.0512067") - max_val = float("0.0280295") - mean = float("-0.000438388") - std = float("0.00368151") - data = None - - -class Program_weight_tensor_parameter_534: - name = "parameter_534" - shape = [192] - dtype = "float32" - min_val = float("-1.23302") - max_val = float("0.509381") - mean = float("-0.153645") - std = float("0.30407") - data = None - - -class Program_weight_tensor_parameter_535: - name = "parameter_535" - shape = [192] - dtype = "float32" - min_val = float("0.332594") - max_val = float("1.44107") - mean = float("0.751491") - std = float("0.218724") - data = None - - -class Program_weight_tensor_parameter_536: - name = "parameter_536" - shape = [192] - dtype = "float32" - min_val = float("0.00580545") - max_val = float("3.585") - mean = float("0.245801") - std = float("0.410758") - data = None - - -class Program_weight_tensor_parameter_537: - name = "parameter_537" - shape = [192] - dtype = "float32" - min_val = float("-0.654561") - max_val = float("0.520224") - mean = float("0.0637138") - std = float("0.132489") - data = None - - -class Program_weight_tensor_parameter_538: - name = "parameter_538" - shape = [192, 192, 3, 3] - dtype = "float32" - min_val = float("-0.0460525") - max_val = float("0.0463877") - mean = float("-0.000214011") - std = float("0.0030628") - data = None - - -class Program_weight_tensor_parameter_539: - name = "parameter_539" - shape = [192] - dtype = "float32" - min_val = float("-1.86975") - max_val = float("-0.187693") - mean = float("-1.16402") - std = float("0.325704") - data = None - - -class Program_weight_tensor_parameter_540: - name = "parameter_540" - shape = [192] - dtype = "float32" - min_val = float("0.751831") - max_val = float("1.61753") - mean = float("1.10973") - std = float("0.131817") - data = None - - -class Program_weight_tensor_parameter_541: - name = "parameter_541" - shape = [192] - dtype = "float32" - min_val = float("0.00623295") - max_val = float("7.47422") - mean = float("0.274923") - std = float("0.674046") - data = None - - -class Program_weight_tensor_parameter_542: - name = "parameter_542" - shape = [192] - dtype = "float32" - min_val = float("-0.566295") - max_val = float("0.711085") - mean = float("-0.0315813") - std = float("0.130281") - data = None - - -class Program_weight_tensor_parameter_543: - name = "parameter_543" - shape = [192, 192, 3, 3] - dtype = "float32" - min_val = float("-0.0494038") - max_val = float("0.0562226") - mean = float("-0.000159684") - std = float("0.00334459") - data = None - - -class Program_weight_tensor_parameter_544: - name = "parameter_544" - shape = [192] - dtype = "float32" - min_val = float("-2.81555") - max_val = float("1.61423") - mean = float("-0.0254536") - std = float("0.761522") - data = None - - -class Program_weight_tensor_parameter_545: - name = "parameter_545" - shape = [192] - dtype = "float32" - min_val = float("0.476452") - max_val = float("2.07853") - mean = float("0.880028") - std = float("0.224402") - data = None - - -class Program_weight_tensor_parameter_546: - name = "parameter_546" - shape = [192] - dtype = "float32" - min_val = float("0.00413651") - max_val = float("6.56568") - mean = float("0.232782") - std = float("0.576315") - data = None - - -class Program_weight_tensor_parameter_547: - name = "parameter_547" - shape = [192] - dtype = "float32" - min_val = float("-0.211511") - max_val = float("0.111527") - mean = float("-0.00608796") - std = float("0.0521257") - data = None - - -class Program_weight_tensor_parameter_548: - name = "parameter_548" - shape = [192, 384, 1, 1] - dtype = "float32" - min_val = float("-0.0748107") - max_val = float("0.0760846") - mean = float("-0.000563234") - std = float("0.00701903") - data = None - - -class Program_weight_tensor_parameter_549: - name = "parameter_549" - shape = [192] - dtype = "float32" - min_val = float("-2.91449") - max_val = float("2.11472") - mean = float("0.103721") - std = float("0.667968") - data = None - - -class Program_weight_tensor_parameter_550: - name = "parameter_550" - shape = [192] - dtype = "float32" - min_val = float("0.856446") - max_val = float("5.71069") - mean = float("1.92533") - std = float("0.968824") - data = None - - -class Program_weight_tensor_parameter_551: - name = "parameter_551" - shape = [192] - dtype = "float32" - min_val = float("0.00176998") - max_val = float("0.489107") - mean = float("0.0413273") - std = float("0.0662281") - data = None - - -class Program_weight_tensor_parameter_552: - name = "parameter_552" - shape = [192] - dtype = "float32" - min_val = float("-0.089043") - max_val = float("0.117142") - mean = float("0.00920289") - std = float("0.0350583") - data = None - - -class Program_weight_tensor_parameter_553: - name = "parameter_553" - shape = [192, 384, 1, 1] - dtype = "float32" - min_val = float("-0.0673953") - max_val = float("0.128402") - mean = float("-0.000430991") - std = float("0.00603374") - data = None - - -class Program_weight_tensor_parameter_554: - name = "parameter_554" - shape = [384] - dtype = "float32" - min_val = float("-2.92773") - max_val = float("1.33693") - mean = float("-0.313455") - std = float("0.572295") - data = None - - -class Program_weight_tensor_parameter_555: - name = "parameter_555" - shape = [384] - dtype = "float32" - min_val = float("0.699301") - max_val = float("2.45328") - mean = float("1.14709") - std = float("0.257994") - data = None - - -class Program_weight_tensor_parameter_556: - name = "parameter_556" - shape = [384] - dtype = "float32" - min_val = float("0.00380316") - max_val = float("33.1796") - mean = float("0.637666") - std = float("1.97202") - data = None - - -class Program_weight_tensor_parameter_557: - name = "parameter_557" - shape = [384] - dtype = "float32" - min_val = float("-0.920201") - max_val = float("0.383577") - mean = float("0.0242312") - std = float("0.154423") - data = None - - -class Program_weight_tensor_parameter_558: - name = "parameter_558" - shape = [384, 256, 3, 3] - dtype = "float32" - min_val = float("-0.0550514") - max_val = float("0.0583738") - mean = float("-6.14001e-05") - std = float("0.00339831") - data = None - - -class Program_weight_tensor_parameter_559: - name = "parameter_559" - shape = [256] - dtype = "float32" - min_val = float("-2.08101") - max_val = float("1.23907") - mean = float("-0.929071") - std = float("0.560762") - data = None - - -class Program_weight_tensor_parameter_560: - name = "parameter_560" - shape = [256] - dtype = "float32" - min_val = float("0.460852") - max_val = float("1.60647") - mean = float("1.03822") - std = float("0.186473") - data = None - - -class Program_weight_tensor_parameter_561: - name = "parameter_561" - shape = [256] - dtype = "float32" - min_val = float("0.00288698") - max_val = float("2.85435") - mean = float("0.124524") - std = float("0.278458") - data = None - - -class Program_weight_tensor_parameter_562: - name = "parameter_562" - shape = [256] - dtype = "float32" - min_val = float("-0.513886") - max_val = float("0.320015") - mean = float("-0.0168315") - std = float("0.107427") - data = None - - -class Program_weight_tensor_parameter_563: - name = "parameter_563" - shape = [256, 192, 1, 1] - dtype = "float32" - min_val = float("-0.22133") - max_val = float("0.170826") - mean = float("-0.000439134") - std = float("0.0126658") - data = None - - -class Program_weight_tensor_parameter_564: - name = "parameter_564" - shape = [192] - dtype = "float32" - min_val = float("-0.0149414") - max_val = float("0.00274012") - mean = float("-0.0039491") - std = float("0.00303571") - data = None - - -class Program_weight_tensor_parameter_565: - name = "parameter_565" - shape = [192, 192, 1, 1] - dtype = "float32" - min_val = float("-0.518434") - max_val = float("0.174661") - mean = float("-0.00334445") - std = float("0.00882037") - data = None - - -class Program_weight_tensor_parameter_566: - name = "parameter_566" - shape = [96] - dtype = "float32" - min_val = float("-1.89866") - max_val = float("0.648435") - mean = float("-0.163531") - std = float("0.445964") - data = None - - -class Program_weight_tensor_parameter_567: - name = "parameter_567" - shape = [96] - dtype = "float32" - min_val = float("0.118827") - max_val = float("3.45075") - mean = float("0.648866") - std = float("0.709221") - data = None - - -class Program_weight_tensor_parameter_568: - name = "parameter_568" - shape = [96] - dtype = "float32" - min_val = float("5.72145e-05") - max_val = float("0.0305947") - mean = float("0.00350272") - std = float("0.00465155") - data = None - - -class Program_weight_tensor_parameter_569: - name = "parameter_569" - shape = [96] - dtype = "float32" - min_val = float("-0.0537825") - max_val = float("0.0541921") - mean = float("0.010285") - std = float("0.0254635") - data = None - - -class Program_weight_tensor_parameter_570: - name = "parameter_570" - shape = [96, 96, 1, 1] - dtype = "float32" - min_val = float("-0.0435225") - max_val = float("0.0719533") - mean = float("-0.000638006") - std = float("0.00649986") - data = None - - -class Program_weight_tensor_parameter_571: - name = "parameter_571" - shape = [96] - dtype = "float32" - min_val = float("-1.89866") - max_val = float("0.648435") - mean = float("-0.163531") - std = float("0.445964") - data = None - - -class Program_weight_tensor_parameter_572: - name = "parameter_572" - shape = [96] - dtype = "float32" - min_val = float("0.274848") - max_val = float("5.76456") - mean = float("1.11235") - std = float("0.941375") - data = None - - -class Program_weight_tensor_parameter_573: - name = "parameter_573" - shape = [96] - dtype = "float32" - min_val = float("0.00262636") - max_val = float("0.731286") - mean = float("0.0545356") - std = float("0.080615") - data = None - - -class Program_weight_tensor_parameter_574: - name = "parameter_574" - shape = [96] - dtype = "float32" - min_val = float("-0.403692") - max_val = float("0.191464") - mean = float("0.0207246") - std = float("0.102896") - data = None - - -class Program_weight_tensor_parameter_575: - name = "parameter_575" - shape = [96, 96, 3, 3] - dtype = "float32" - min_val = float("-0.034156") - max_val = float("0.0485352") - mean = float("-0.000167324") - std = float("0.00457433") - data = None - - -class Program_weight_tensor_parameter_576: - name = "parameter_576" - shape = [96] - dtype = "float32" - min_val = float("-2.47237") - max_val = float("-0.039721") - mean = float("-1.25291") - std = float("0.438553") - data = None - - -class Program_weight_tensor_parameter_577: - name = "parameter_577" - shape = [96] - dtype = "float32" - min_val = float("0.484422") - max_val = float("1.73309") - mean = float("0.919688") - std = float("0.175697") - data = None - - -class Program_weight_tensor_parameter_578: - name = "parameter_578" - shape = [96] - dtype = "float32" - min_val = float("0.0913526") - max_val = float("5.78304") - mean = float("1.20582") - std = float("1.11379") - data = None - - -class Program_weight_tensor_parameter_579: - name = "parameter_579" - shape = [96] - dtype = "float32" - min_val = float("-2.67749") - max_val = float("1.89478") - mean = float("-0.0280634") - std = float("0.649795") - data = None - - -class Program_weight_tensor_parameter_580: - name = "parameter_580" - shape = [96, 96, 3, 3] - dtype = "float32" - min_val = float("-0.125626") - max_val = float("0.0867783") - mean = float("-0.000262143") - std = float("0.00565539") - data = None - - -class Program_weight_tensor_parameter_581: - name = "parameter_581" - shape = [96] - dtype = "float32" - min_val = float("-1.36009") - max_val = float("0.613418") - mean = float("-0.109119") - std = float("0.363358") - data = None - - -class Program_weight_tensor_parameter_582: - name = "parameter_582" - shape = [96] - dtype = "float32" - min_val = float("0.00951907") - max_val = float("1.85237") - mean = float("0.454878") - std = float("0.359562") - data = None - - -class Program_weight_tensor_parameter_583: - name = "parameter_583" - shape = [96] - dtype = "float32" - min_val = float("1.34543e-05") - max_val = float("0.100978") - mean = float("0.00659523") - std = float("0.0127025") - data = None - - -class Program_weight_tensor_parameter_584: - name = "parameter_584" - shape = [96] - dtype = "float32" - min_val = float("-0.0733295") - max_val = float("0.0665844") - mean = float("0.0164669") - std = float("0.0261863") - data = None - - -class Program_weight_tensor_parameter_585: - name = "parameter_585" - shape = [96, 96, 1, 1] - dtype = "float32" - min_val = float("-0.0616569") - max_val = float("0.0533414") - mean = float("-0.000915536") - std = float("0.0063832") - data = None - - -class Program_weight_tensor_parameter_586: - name = "parameter_586" - shape = [96] - dtype = "float32" - min_val = float("-1.36009") - max_val = float("0.613418") - mean = float("-0.109119") - std = float("0.363358") - data = None - - -class Program_weight_tensor_parameter_587: - name = "parameter_587" - shape = [96] - dtype = "float32" - min_val = float("0.381804") - max_val = float("2.31148") - mean = float("0.90401") - std = float("0.422733") - data = None - - -class Program_weight_tensor_parameter_588: - name = "parameter_588" - shape = [96] - dtype = "float32" - min_val = float("0.0028765") - max_val = float("1.96506") - mean = float("0.163957") - std = float("0.29453") - data = None - - -class Program_weight_tensor_parameter_589: - name = "parameter_589" - shape = [96] - dtype = "float32" - min_val = float("-0.169521") - max_val = float("0.382588") - mean = float("0.0552") - std = float("0.100991") - data = None - - -class Program_weight_tensor_parameter_590: - name = "parameter_590" - shape = [96, 96, 3, 3] - dtype = "float32" - min_val = float("-0.0539882") - max_val = float("0.0612803") - mean = float("-0.000352692") - std = float("0.0047621") - data = None - - -class Program_weight_tensor_parameter_591: - name = "parameter_591" - shape = [96] - dtype = "float32" - min_val = float("-3.30884") - max_val = float("0.356165") - mean = float("-1.21887") - std = float("0.556975") - data = None - - -class Program_weight_tensor_parameter_592: - name = "parameter_592" - shape = [96] - dtype = "float32" - min_val = float("0.424695") - max_val = float("1.92592") - mean = float("1.00728") - std = float("0.236573") - data = None - - -class Program_weight_tensor_parameter_593: - name = "parameter_593" - shape = [96] - dtype = "float32" - min_val = float("0.0237573") - max_val = float("6.63037") - mean = float("1.04207") - std = float("1.2277") - data = None - - -class Program_weight_tensor_parameter_594: - name = "parameter_594" - shape = [96] - dtype = "float32" - min_val = float("-1.1716") - max_val = float("1.33307") - mean = float("0.029565") - std = float("0.439463") - data = None - - -class Program_weight_tensor_parameter_595: - name = "parameter_595" - shape = [96, 96, 3, 3] - dtype = "float32" - min_val = float("-0.134962") - max_val = float("0.136054") - mean = float("-0.000198739") - std = float("0.0057204") - data = None - - -class Program_weight_tensor_parameter_596: - name = "parameter_596" - shape = [96] - dtype = "float32" - min_val = float("-1.22361") - max_val = float("0.654799") - mean = float("-0.0922317") - std = float("0.305958") - data = None - - -class Program_weight_tensor_parameter_597: - name = "parameter_597" - shape = [96] - dtype = "float32" - min_val = float("0.0316927") - max_val = float("1.28685") - mean = float("0.312909") - std = float("0.193627") - data = None - - -class Program_weight_tensor_parameter_598: - name = "parameter_598" - shape = [96] - dtype = "float32" - min_val = float("9.22096e-05") - max_val = float("0.267661") - mean = float("0.00780572") - std = float("0.0277819") - data = None - - -class Program_weight_tensor_parameter_599: - name = "parameter_599" - shape = [96] - dtype = "float32" - min_val = float("-0.127424") - max_val = float("0.0779931") - mean = float("0.00867301") - std = float("0.0281039") - data = None - - -class Program_weight_tensor_parameter_600: - name = "parameter_600" - shape = [96, 96, 1, 1] - dtype = "float32" - min_val = float("-0.0357805") - max_val = float("0.04937") - mean = float("-0.000468989") - std = float("0.00648871") - data = None - - -class Program_weight_tensor_parameter_601: - name = "parameter_601" - shape = [96] - dtype = "float32" - min_val = float("-1.22361") - max_val = float("0.654799") - mean = float("-0.0922317") - std = float("0.305958") - data = None - - -class Program_weight_tensor_parameter_602: - name = "parameter_602" - shape = [96] - dtype = "float32" - min_val = float("0.321215") - max_val = float("1.60468") - mean = float("0.742453") - std = float("0.256594") - data = None - - -class Program_weight_tensor_parameter_603: - name = "parameter_603" - shape = [96] - dtype = "float32" - min_val = float("0.00594866") - max_val = float("13.1468") - mean = float("0.280576") - std = float("1.35706") - data = None - - -class Program_weight_tensor_parameter_604: - name = "parameter_604" - shape = [96] - dtype = "float32" - min_val = float("-0.512798") - max_val = float("0.360902") - mean = float("0.024901") - std = float("0.131996") - data = None - - -class Program_weight_tensor_parameter_605: - name = "parameter_605" - shape = [96, 96, 3, 3] - dtype = "float32" - min_val = float("-0.0457283") - max_val = float("0.0529573") - mean = float("-0.000146386") - std = float("0.00507802") - data = None - - -class Program_weight_tensor_parameter_606: - name = "parameter_606" - shape = [96] - dtype = "float32" - min_val = float("-3.56417") - max_val = float("0.313374") - mean = float("-1.16309") - std = float("0.578663") - data = None - - -class Program_weight_tensor_parameter_607: - name = "parameter_607" - shape = [96] - dtype = "float32" - min_val = float("0.515839") - max_val = float("2.22553") - mean = float("1.01898") - std = float("0.244192") - data = None - - -class Program_weight_tensor_parameter_608: - name = "parameter_608" - shape = [96] - dtype = "float32" - min_val = float("0.0566049") - max_val = float("8.50527") - mean = float("0.821018") - std = float("1.25589") - data = None - - -class Program_weight_tensor_parameter_609: - name = "parameter_609" - shape = [96] - dtype = "float32" - min_val = float("-1.3259") - max_val = float("0.986206") - mean = float("0.0226801") - std = float("0.384225") - data = None - - -class Program_weight_tensor_parameter_610: - name = "parameter_610" - shape = [96, 96, 3, 3] - dtype = "float32" - min_val = float("-0.106683") - max_val = float("0.124295") - mean = float("-0.000180527") - std = float("0.00585965") - data = None - - -class Program_weight_tensor_parameter_611: - name = "parameter_611" - shape = [96] - dtype = "float32" - min_val = float("-0.914024") - max_val = float("0.548448") - mean = float("-0.147808") - std = float("0.291506") - data = None - - -class Program_weight_tensor_parameter_612: - name = "parameter_612" - shape = [96] - dtype = "float32" - min_val = float("0.0339392") - max_val = float("1.38054") - mean = float("0.313981") - std = float("0.205844") - data = None - - -class Program_weight_tensor_parameter_613: - name = "parameter_613" - shape = [96] - dtype = "float32" - min_val = float("2.21066e-05") - max_val = float("0.136525") - mean = float("0.00723304") - std = float("0.0189755") - data = None - - -class Program_weight_tensor_parameter_614: - name = "parameter_614" - shape = [96] - dtype = "float32" - min_val = float("-0.0806456") - max_val = float("0.0638613") - mean = float("0.013262") - std = float("0.0276148") - data = None - - -class Program_weight_tensor_parameter_615: - name = "parameter_615" - shape = [96, 96, 1, 1] - dtype = "float32" - min_val = float("-0.0590545") - max_val = float("0.0449412") - mean = float("-0.00069183") - std = float("0.00674782") - data = None - - -class Program_weight_tensor_parameter_616: - name = "parameter_616" - shape = [96] - dtype = "float32" - min_val = float("-0.914024") - max_val = float("0.548448") - mean = float("-0.147808") - std = float("0.291506") - data = None - - -class Program_weight_tensor_parameter_617: - name = "parameter_617" - shape = [96] - dtype = "float32" - min_val = float("0.142128") - max_val = float("1.73988") - mean = float("0.702206") - std = float("0.285683") - data = None - - -class Program_weight_tensor_parameter_618: - name = "parameter_618" - shape = [96] - dtype = "float32" - min_val = float("0.00387817") - max_val = float("1.69108") - mean = float("0.132084") - std = float("0.265544") - data = None - - -class Program_weight_tensor_parameter_619: - name = "parameter_619" - shape = [96] - dtype = "float32" - min_val = float("-0.365992") - max_val = float("0.29138") - mean = float("0.0334006") - std = float("0.11377") - data = None - - -class Program_weight_tensor_parameter_620: - name = "parameter_620" - shape = [96, 96, 3, 3] - dtype = "float32" - min_val = float("-0.0709519") - max_val = float("0.0589113") - mean = float("-0.000196033") - std = float("0.00498706") - data = None - - -class Program_weight_tensor_parameter_621: - name = "parameter_621" - shape = [96] - dtype = "float32" - min_val = float("-2.6239") - max_val = float("0.0469523") - mean = float("-1.09235") - std = float("0.492344") - data = None - - -class Program_weight_tensor_parameter_622: - name = "parameter_622" - shape = [96] - dtype = "float32" - min_val = float("0.546345") - max_val = float("1.74702") - mean = float("0.990286") - std = float("0.183688") - data = None - - -class Program_weight_tensor_parameter_623: - name = "parameter_623" - shape = [96] - dtype = "float32" - min_val = float("0.036742") - max_val = float("4.90946") - mean = float("0.515044") - std = float("0.805591") - data = None - - -class Program_weight_tensor_parameter_624: - name = "parameter_624" - shape = [96] - dtype = "float32" - min_val = float("-0.8935") - max_val = float("0.923151") - mean = float("-0.0110131") - std = float("0.335925") - data = None - - -class Program_weight_tensor_parameter_625: - name = "parameter_625" - shape = [96, 96, 3, 3] - dtype = "float32" - min_val = float("-0.0557588") - max_val = float("0.0977312") - mean = float("-0.000280993") - std = float("0.00584596") - data = None - - -class Program_weight_tensor_parameter_626: - name = "parameter_626" - shape = [96] - dtype = "float32" - min_val = float("-0.982144") - max_val = float("0.555605") - mean = float("-0.128127") - std = float("0.289988") - data = None - - -class Program_weight_tensor_parameter_627: - name = "parameter_627" - shape = [96] - dtype = "float32" - min_val = float("0.0646262") - max_val = float("1.15632") - mean = float("0.273046") - std = float("0.165216") - data = None - - -class Program_weight_tensor_parameter_628: - name = "parameter_628" - shape = [96] - dtype = "float32" - min_val = float("0.000135618") - max_val = float("0.187931") - mean = float("0.0150782") - std = float("0.0310702") - data = None - - -class Program_weight_tensor_parameter_629: - name = "parameter_629" - shape = [96] - dtype = "float32" - min_val = float("-0.0878502") - max_val = float("0.0810483") - mean = float("0.00536994") - std = float("0.0324391") - data = None - - -class Program_weight_tensor_parameter_630: - name = "parameter_630" - shape = [96, 96, 1, 1] - dtype = "float32" - min_val = float("-0.0693269") - max_val = float("0.0585016") - mean = float("-7.57148e-05") - std = float("0.00796075") - data = None - - -class Program_weight_tensor_parameter_631: - name = "parameter_631" - shape = [96] - dtype = "float32" - min_val = float("-0.982144") - max_val = float("0.555603") - mean = float("-0.128127") - std = float("0.289988") - data = None - - -class Program_weight_tensor_parameter_632: - name = "parameter_632" - shape = [96] - dtype = "float32" - min_val = float("0.178989") - max_val = float("1.52642") - mean = float("0.577049") - std = float("0.230661") - data = None - - -class Program_weight_tensor_parameter_633: - name = "parameter_633" - shape = [96] - dtype = "float32" - min_val = float("0.00908185") - max_val = float("6.25082") - mean = float("0.308875") - std = float("0.731709") - data = None - - -class Program_weight_tensor_parameter_634: - name = "parameter_634" - shape = [96] - dtype = "float32" - min_val = float("-0.503088") - max_val = float("0.306438") - mean = float("0.0144393") - std = float("0.128309") - data = None - - -class Program_weight_tensor_parameter_635: - name = "parameter_635" - shape = [96, 96, 3, 3] - dtype = "float32" - min_val = float("-0.0559299") - max_val = float("0.0452462") - mean = float("5.04339e-05") - std = float("0.0052732") - data = None - - -class Program_weight_tensor_parameter_636: - name = "parameter_636" - shape = [96] - dtype = "float32" - min_val = float("-3.34611") - max_val = float("0.216964") - mean = float("-1.02056") - std = float("0.542199") - data = None - - -class Program_weight_tensor_parameter_637: - name = "parameter_637" - shape = [96] - dtype = "float32" - min_val = float("0.541273") - max_val = float("2.73475") - mean = float("1.04359") - std = float("0.234238") - data = None - - -class Program_weight_tensor_parameter_638: - name = "parameter_638" - shape = [96] - dtype = "float32" - min_val = float("0.00839167") - max_val = float("10.3218") - mean = float("0.36125") - std = float("1.13641") - data = None - - -class Program_weight_tensor_parameter_639: - name = "parameter_639" - shape = [96] - dtype = "float32" - min_val = float("-0.676206") - max_val = float("2.05745") - mean = float("-0.000203486") - std = float("0.390477") - data = None - - -class Program_weight_tensor_parameter_640: - name = "parameter_640" - shape = [96, 96, 3, 3] - dtype = "float32" - min_val = float("-0.0823601") - max_val = float("0.0816342") - mean = float("-0.000177511") - std = float("0.00638281") - data = None - - -class Program_weight_tensor_parameter_641: - name = "parameter_641" - shape = [96] - dtype = "float32" - min_val = float("-0.60359") - max_val = float("0.468132") - mean = float("-0.0840719") - std = float("0.256343") - data = None - - -class Program_weight_tensor_parameter_642: - name = "parameter_642" - shape = [96] - dtype = "float32" - min_val = float("0.0544126") - max_val = float("1.22927") - mean = float("0.286115") - std = float("0.196735") - data = None - - -class Program_weight_tensor_parameter_643: - name = "parameter_643" - shape = [96] - dtype = "float32" - min_val = float("0.000143085") - max_val = float("0.359897") - mean = float("0.0363701") - std = float("0.0574546") - data = None - - -class Program_weight_tensor_parameter_644: - name = "parameter_644" - shape = [96] - dtype = "float32" - min_val = float("-0.032617") - max_val = float("0.0498112") - mean = float("0.0055907") - std = float("0.016775") - data = None - - -class Program_weight_tensor_parameter_645: - name = "parameter_645" - shape = [96, 96, 1, 1] - dtype = "float32" - min_val = float("-0.0773345") - max_val = float("0.0606068") - mean = float("-0.000857754") - std = float("0.00880052") - data = None - - -class Program_weight_tensor_parameter_646: - name = "parameter_646" - shape = [96] - dtype = "float32" - min_val = float("-0.60359") - max_val = float("0.468132") - mean = float("-0.0840719") - std = float("0.256343") - data = None - - -class Program_weight_tensor_parameter_647: - name = "parameter_647" - shape = [96] - dtype = "float32" - min_val = float("0.182862") - max_val = float("1.3226") - mean = float("0.518428") - std = float("0.258918") - data = None - - -class Program_weight_tensor_parameter_648: - name = "parameter_648" - shape = [96] - dtype = "float32" - min_val = float("0.00892751") - max_val = float("44.0063") - mean = float("0.942949") - std = float("4.46794") - data = None - - -class Program_weight_tensor_parameter_649: - name = "parameter_649" - shape = [96] - dtype = "float32" - min_val = float("-0.205703") - max_val = float("0.154926") - mean = float("0.000264955") - std = float("0.061708") - data = None - - -class Program_weight_tensor_parameter_650: - name = "parameter_650" - shape = [96, 96, 3, 3] - dtype = "float32" - min_val = float("-0.0785968") - max_val = float("0.0587748") - mean = float("6.78683e-05") - std = float("0.00555531") - data = None - - -class Program_weight_tensor_parameter_651: - name = "parameter_651" - shape = [96] - dtype = "float32" - min_val = float("-2.41369") - max_val = float("0.496388") - mean = float("-0.836608") - std = float("0.475164") - data = None - - -class Program_weight_tensor_parameter_652: - name = "parameter_652" - shape = [96] - dtype = "float32" - min_val = float("0.830242") - max_val = float("2.26639") - mean = float("1.25319") - std = float("0.21549") - data = None - - -class Program_weight_tensor_parameter_653: - name = "parameter_653" - shape = [96] - dtype = "float32" - min_val = float("0.00554491") - max_val = float("3.92354") - mean = float("0.225363") - std = float("0.491203") - data = None - - -class Program_weight_tensor_parameter_654: - name = "parameter_654" - shape = [96] - dtype = "float32" - min_val = float("-0.682011") - max_val = float("0.863938") - mean = float("-0.0559418") - std = float("0.30762") - data = None - - -class Program_weight_tensor_parameter_655: - name = "parameter_655" - shape = [96, 96, 3, 3] - dtype = "float32" - min_val = float("-0.12871") - max_val = float("0.120943") - mean = float("-7.30879e-05") - std = float("0.00678624") - data = None - - -class Program_weight_tensor_parameter_656: - name = "parameter_656" - shape = [96] - dtype = "float32" - min_val = float("-3.19003") - max_val = float("1.93308") - mean = float("0.508604") - std = float("0.871957") - data = None - - -class Program_weight_tensor_parameter_657: - name = "parameter_657" - shape = [96] - dtype = "float32" - min_val = float("0.229244") - max_val = float("2.60033") - mean = float("0.516748") - std = float("0.323392") - data = None - - -class Program_weight_tensor_parameter_658: - name = "parameter_658" - shape = [96] - dtype = "float32" - min_val = float("0.00224007") - max_val = float("3.81285") - mean = float("0.214005") - std = float("0.462532") - data = None - - -class Program_weight_tensor_parameter_659: - name = "parameter_659" - shape = [96] - dtype = "float32" - min_val = float("-0.449601") - max_val = float("0.320966") - mean = float("-0.0199257") - std = float("0.135458") - data = None - - -class Program_weight_tensor_parameter_660: - name = "parameter_660" - shape = [96, 192, 1, 1] - dtype = "float32" - min_val = float("-0.149955") - max_val = float("0.139515") - mean = float("-0.000577616") - std = float("0.0141809") - data = None - - -class Program_weight_tensor_parameter_661: - name = "parameter_661" - shape = [96] - dtype = "float32" - min_val = float("-4.89436") - max_val = float("1.73118") - mean = float("0.421666") - std = float("1.05477") - data = None - - -class Program_weight_tensor_parameter_662: - name = "parameter_662" - shape = [96] - dtype = "float32" - min_val = float("0.368666") - max_val = float("6.94933") - mean = float("1.70017") - std = float("1.37461") - data = None - - -class Program_weight_tensor_parameter_663: - name = "parameter_663" - shape = [96] - dtype = "float32" - min_val = float("0.00483646") - max_val = float("0.772254") - mean = float("0.109227") - std = float("0.134964") - data = None - - -class Program_weight_tensor_parameter_664: - name = "parameter_664" - shape = [96] - dtype = "float32" - min_val = float("-0.28843") - max_val = float("0.305353") - mean = float("0.017579") - std = float("0.109216") - data = None - - -class Program_weight_tensor_parameter_665: - name = "parameter_665" - shape = [96, 192, 1, 1] - dtype = "float32" - min_val = float("-0.085332") - max_val = float("0.209102") - mean = float("0.000131895") - std = float("0.0127058") - data = None - - -class Program_weight_tensor_parameter_666: - name = "parameter_666" - shape = [192] - dtype = "float32" - min_val = float("-2.26416") - max_val = float("1.81781") - mean = float("-0.104368") - std = float("0.765541") - data = None - - -class Program_weight_tensor_parameter_667: - name = "parameter_667" - shape = [192] - dtype = "float32" - min_val = float("0.557485") - max_val = float("3.06726") - mean = float("1.03433") - std = float("0.294829") - data = None - - -class Program_weight_tensor_parameter_668: - name = "parameter_668" - shape = [192] - dtype = "float32" - min_val = float("0.0122193") - max_val = float("31.99") - mean = float("0.691469") - std = float("2.78351") - data = None - - -class Program_weight_tensor_parameter_669: - name = "parameter_669" - shape = [192] - dtype = "float32" - min_val = float("-0.716405") - max_val = float("0.543476") - mean = float("-0.0205713") - std = float("0.180835") - data = None - - -class Program_weight_tensor_parameter_670: - name = "parameter_670" - shape = [192, 128, 3, 3] - dtype = "float32" - min_val = float("-0.088438") - max_val = float("0.0989281") - mean = float("-0.000264084") - std = float("0.00696874") - data = None - - -class Program_weight_tensor_parameter_671: - name = "parameter_671" - shape = [128] - dtype = "float32" - min_val = float("-2.77349") - max_val = float("1.94915") - mean = float("-0.748001") - std = float("0.667633") - data = None - - -class Program_weight_tensor_parameter_672: - name = "parameter_672" - shape = [128] - dtype = "float32" - min_val = float("0.282119") - max_val = float("2.11163") - mean = float("0.963232") - std = float("0.25164") - data = None - - -class Program_weight_tensor_parameter_673: - name = "parameter_673" - shape = [128] - dtype = "float32" - min_val = float("0.00348181") - max_val = float("0.641549") - mean = float("0.0466075") - std = float("0.0727225") - data = None - - -class Program_weight_tensor_parameter_674: - name = "parameter_674" - shape = [128] - dtype = "float32" - min_val = float("-0.799433") - max_val = float("0.769186") - mean = float("-0.000783121") - std = float("0.234277") - data = None - - -class Program_weight_tensor_parameter_675: - name = "parameter_675" - shape = [128, 96, 1, 1] - dtype = "float32" - min_val = float("-0.237331") - max_val = float("0.234872") - mean = float("-0.00058892") - std = float("0.0227538") - data = None - - -class Program_weight_tensor_parameter_676: - name = "parameter_676" - shape = [96] - dtype = "float32" - min_val = float("-0.0196388") - max_val = float("0.00429926") - mean = float("-0.00604744") - std = float("0.00423631") - data = None - - -class Program_weight_tensor_parameter_677: - name = "parameter_677" - shape = [96, 96, 1, 1] - dtype = "float32" - min_val = float("-0.202175") - max_val = float("0.139733") - mean = float("-0.00708148") - std = float("0.015418") - data = None - - -class Program_weight_tensor_parameter_678: - name = "parameter_678" - shape = [48] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_679: - name = "parameter_679" - shape = [48] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_680: - name = "parameter_680" - shape = [48] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_681: - name = "parameter_681" - shape = [48] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_682: - name = "parameter_682" - shape = [48, 48, 1, 1] - dtype = "float32" - min_val = float("-0.113449") - max_val = float("0.116382") - mean = float("-8.18214e-05") - std = float("0.0174881") - data = None - - -class Program_weight_tensor_parameter_683: - name = "parameter_683" - shape = [48] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_684: - name = "parameter_684" - shape = [48] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_685: - name = "parameter_685" - shape = [48] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_686: - name = "parameter_686" - shape = [48] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_687: - name = "parameter_687" - shape = [48, 48, 3, 3] - dtype = "float32" - min_val = float("-0.110314") - max_val = float("0.0737469") - mean = float("-6.02772e-05") - std = float("0.0116185") - data = None - - -class Program_weight_tensor_parameter_688: - name = "parameter_688" - shape = [48] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_689: - name = "parameter_689" - shape = [48] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_690: - name = "parameter_690" - shape = [48] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_691: - name = "parameter_691" - shape = [48] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_692: - name = "parameter_692" - shape = [48, 48, 3, 3] - dtype = "float32" - min_val = float("-0.114734") - max_val = float("0.10347") - mean = float("-0.000467215") - std = float("0.0140058") - data = None - - -class Program_weight_tensor_parameter_693: - name = "parameter_693" - shape = [48] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_694: - name = "parameter_694" - shape = [48] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_695: - name = "parameter_695" - shape = [48] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_696: - name = "parameter_696" - shape = [48] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_697: - name = "parameter_697" - shape = [48, 48, 1, 1] - dtype = "float32" - min_val = float("-0.195911") - max_val = float("0.178967") - mean = float("-0.00335025") - std = float("0.0224713") - data = None - - -class Program_weight_tensor_parameter_698: - name = "parameter_698" - shape = [48] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_699: - name = "parameter_699" - shape = [48] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_700: - name = "parameter_700" - shape = [48] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_701: - name = "parameter_701" - shape = [48] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_702: - name = "parameter_702" - shape = [48, 48, 3, 3] - dtype = "float32" - min_val = float("-0.100652") - max_val = float("0.124988") - mean = float("-0.00151496") - std = float("0.0136224") - data = None - - -class Program_weight_tensor_parameter_703: - name = "parameter_703" - shape = [48] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_704: - name = "parameter_704" - shape = [48] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_705: - name = "parameter_705" - shape = [48] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_706: - name = "parameter_706" - shape = [48] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_707: - name = "parameter_707" - shape = [48, 48, 3, 3] - dtype = "float32" - min_val = float("-0.177811") - max_val = float("0.145591") - mean = float("-0.00141726") - std = float("0.0186424") - data = None - - -class Program_weight_tensor_parameter_708: - name = "parameter_708" - shape = [48] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_709: - name = "parameter_709" - shape = [48] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_710: - name = "parameter_710" - shape = [48] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_711: - name = "parameter_711" - shape = [48] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_712: - name = "parameter_712" - shape = [48, 48, 1, 1] - dtype = "float32" - min_val = float("-0.374308") - max_val = float("0.181152") - mean = float("-0.000464201") - std = float("0.0312118") - data = None - - -class Program_weight_tensor_parameter_713: - name = "parameter_713" - shape = [48] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_714: - name = "parameter_714" - shape = [48] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_715: - name = "parameter_715" - shape = [48] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_716: - name = "parameter_716" - shape = [48] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_717: - name = "parameter_717" - shape = [48, 48, 3, 3] - dtype = "float32" - min_val = float("-0.310804") - max_val = float("0.0760952") - mean = float("-0.000399181") - std = float("0.0158955") - data = None - - -class Program_weight_tensor_parameter_718: - name = "parameter_718" - shape = [48] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_719: - name = "parameter_719" - shape = [48] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_720: - name = "parameter_720" - shape = [48] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_721: - name = "parameter_721" - shape = [48] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_722: - name = "parameter_722" - shape = [48, 48, 3, 3] - dtype = "float32" - min_val = float("-0.156863") - max_val = float("0.102575") - mean = float("-0.000657703") - std = float("0.0180323") - data = None - - -class Program_weight_tensor_parameter_723: - name = "parameter_723" - shape = [48] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_724: - name = "parameter_724" - shape = [48] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_725: - name = "parameter_725" - shape = [48] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_726: - name = "parameter_726" - shape = [48] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_727: - name = "parameter_727" - shape = [48, 96, 1, 1] - dtype = "float32" - min_val = float("-0.231057") - max_val = float("0.209028") - mean = float("-0.00477756") - std = float("0.0398542") - data = None - - -class Program_weight_tensor_parameter_728: - name = "parameter_728" - shape = [48] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_729: - name = "parameter_729" - shape = [48] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_730: - name = "parameter_730" - shape = [48] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_731: - name = "parameter_731" - shape = [48] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_732: - name = "parameter_732" - shape = [48, 96, 1, 1] - dtype = "float32" - min_val = float("-0.096208") - max_val = float("0.142834") - mean = float("-0.000778624") - std = float("0.0186653") - data = None - - -class Program_weight_tensor_parameter_733: - name = "parameter_733" - shape = [96] - dtype = "float32" - min_val = float("-3.10984") - max_val = float("3.25719") - mean = float("0.366741") - std = float("1.14155") - data = None - - -class Program_weight_tensor_parameter_734: - name = "parameter_734" - shape = [96] - dtype = "float32" - min_val = float("0.799139") - max_val = float("4.98464") - mean = float("1.87865") - std = float("0.779024") - data = None - - -class Program_weight_tensor_parameter_735: - name = "parameter_735" - shape = [96] - dtype = "float32" - min_val = float("1.76406") - max_val = float("1001.9") - mean = float("80.1803") - std = float("148.327") - data = None - - -class Program_weight_tensor_parameter_736: - name = "parameter_736" - shape = [96] - dtype = "float32" - min_val = float("-12.59") - max_val = float("13.3036") - mean = float("-0.396251") - std = float("4.14618") - data = None - - -class Program_weight_tensor_parameter_737: - name = "parameter_737" - shape = [96, 64, 3, 3] - dtype = "float32" - min_val = float("-0.0895351") - max_val = float("0.0977452") - mean = float("-0.000531484") - std = float("0.0157853") - data = None - - -class Program_weight_tensor_parameter_738: - name = "parameter_738" - shape = [64] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_739: - name = "parameter_739" - shape = [64] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_740: - name = "parameter_740" - shape = [64] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_741: - name = "parameter_741" - shape = [64] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_742: - name = "parameter_742" - shape = [64, 32, 3, 3] - dtype = "float32" - min_val = float("-0.15482") - max_val = float("0.145416") - mean = float("-0.000966986") - std = float("0.0231232") - data = None - - -class Program_weight_tensor_parameter_743: - name = "parameter_743" - shape = [32] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_744: - name = "parameter_744" - shape = [32] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_745: - name = "parameter_745" - shape = [32] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_746: - name = "parameter_746" - shape = [32] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_747: - name = "parameter_747" - shape = [32, 32, 3, 3] - dtype = "float32" - min_val = float("-0.27982") - max_val = float("0.160959") - mean = float("0.00175241") - std = float("0.0302711") - data = None - - -class Program_weight_tensor_parameter_748: - name = "parameter_748" - shape = [32] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_749: - name = "parameter_749" - shape = [32] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_750: - name = "parameter_750" - shape = [32] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_751: - name = "parameter_751" - shape = [32] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_752: - name = "parameter_752" - shape = [32, 3, 3, 3] - dtype = "float32" - min_val = float("-0.231799") - max_val = float("0.266845") - mean = float("0.00703356") - std = float("0.0617717") - data = None diff --git a/paddle_samples/PaddleX/PP-YOLOE-L_vehicle/subgraph_7/graph_net.json b/paddle_samples/PaddleX/PP-YOLOE-L_vehicle/subgraph_7/graph_net.json deleted file mode 100644 index 4a2e26ae4..000000000 --- a/paddle_samples/PaddleX/PP-YOLOE-L_vehicle/subgraph_7/graph_net.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "framework": "paddle", - "model_name": "PP-YOLOE-L_vehicle", - "num_devices_required": 1, - "num_nodes_required": 1 -} \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE-L_vehicle/subgraph_8/graph_hash.txt b/paddle_samples/PaddleX/PP-YOLOE-L_vehicle/subgraph_8/graph_hash.txt deleted file mode 100644 index a62e3c346..000000000 --- a/paddle_samples/PaddleX/PP-YOLOE-L_vehicle/subgraph_8/graph_hash.txt +++ /dev/null @@ -1 +0,0 @@ -b3076122b18bff71174d70b804480bc7272686491c85cf6a5e3c4b3eba6b39ef \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE-L_vehicle/subgraph_8/graph_net.json b/paddle_samples/PaddleX/PP-YOLOE-L_vehicle/subgraph_8/graph_net.json deleted file mode 100644 index 4a2e26ae4..000000000 --- a/paddle_samples/PaddleX/PP-YOLOE-L_vehicle/subgraph_8/graph_net.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "framework": "paddle", - "model_name": "PP-YOLOE-L_vehicle", - "num_devices_required": 1, - "num_nodes_required": 1 -} \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE-L_vehicle/subgraph_8/input_meta.py b/paddle_samples/PaddleX/PP-YOLOE-L_vehicle/subgraph_8/input_meta.py deleted file mode 100644 index b47fda69a..000000000 --- a/paddle_samples/PaddleX/PP-YOLOE-L_vehicle/subgraph_8/input_meta.py +++ /dev/null @@ -1,62 +0,0 @@ -class Program_weight_tensor_data_0: - name = "data_0" - shape = [2, 12096] - dtype = "float32" - max_val = float("2.0") - mean = float("0.00223214") - std = float("0.0480607") - data = None - - -class Program_weight_tensor_data_1: - name = "data_1" - shape = [2, 11, 12096] - dtype = "float32" - max_val = float("0.940076") - mean = float("0.000804856") - std = float("0.0222414") - data = None - - -class Program_weight_tensor_data_2: - name = "data_2" - shape = [2, 11, 12096] - dtype = "float32" - max_val = float("1.0") - mean = float("0.000202922") - std = float("0.0142436") - data = None - - -class Program_weight_tensor_data_3: - name = "data_3" - shape = [2, 1] - dtype = "int32" - data = [0, 1] - - -class Program_weight_tensor_data_4: - name = "data_4" - shape = [2, 11, 1] - dtype = "int32" - data = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0] - - -class Program_weight_tensor_data_5: - name = "data_5" - shape = [2, 11, 4] - dtype = "float32" - max_val = float("629.571") - mean = float("192.521") - std = float("244.622") - data = None - - -class Program_weight_tensor_data_6: - name = "data_6" - shape = [2, 11, 12096] - dtype = "float32" - max_val = float("0.00694391") - mean = float("1.95634e-06") - std = float("9.01196e-05") - data = None diff --git a/paddle_samples/PaddleX/PP-YOLOE-L_vehicle/subgraph_8/model.py b/paddle_samples/PaddleX/PP-YOLOE-L_vehicle/subgraph_8/model.py deleted file mode 100644 index 41382ee7e..000000000 --- a/paddle_samples/PaddleX/PP-YOLOE-L_vehicle/subgraph_8/model.py +++ /dev/null @@ -1,229 +0,0 @@ -import paddle - - -class GraphModule(paddle.nn.Layer): - def __init__(self): - super().__init__() - - def forward(self, data_0, data_1, data_2, data_3, data_4, data_5, data_6): - # pd_op.full_int_array: (1xi64) <- () - full_int_array_0 = [1] - - # pd_op.unsqueeze: (2x1x12096xf32) <- (2x12096xf32, 1xi64) - unsqueeze_0 = paddle._C_ops.unsqueeze(data_0, full_int_array_0) - del data_0, full_int_array_0 - - # pd_op.full: (xf32) <- () - full_0 = paddle._C_ops.full( - [], float("1"), paddle.float32, paddle.framework._current_expected_place() - ) - - # pd_op.greater_than: (2x1x12096xb) <- (2x1x12096xf32, xf32) - greater_than_0 = paddle._C_ops.greater_than(unsqueeze_0, full_0) - del full_0, unsqueeze_0 - - # pd_op.full_int_array: (3xi64) <- () - full_int_array_1 = [1, 11, 1] - - # pd_op.tile: (2x11x12096xb) <- (2x1x12096xb, 3xi64) - tile_0 = paddle._C_ops.tile(greater_than_0, full_int_array_1) - del full_int_array_1, greater_than_0 - - # pd_op.full: (1xi64) <- () - full_1 = paddle._C_ops.full( - [1], float("-2"), paddle.int64, paddle.core.CPUPlace() - ) - - # pd_op.argmax: (2x12096xi64) <- (2x11x12096xf32, 1xi64) - argmax_0 = paddle._C_ops.argmax(data_1, full_1, False, False, paddle.int64) - - # pd_op.full: (1xi32) <- () - full_2 = paddle._C_ops.full( - [1], float("11"), paddle.int32, paddle.core.CPUPlace() - ) - - # pd_op.one_hot: (2x12096x11xf32) <- (2x12096xi64, 1xi32) - one_hot_0 = paddle._C_ops.one_hot( - argmax_0 % paddle.cast(full_2, argmax_0.dtype), full_2 - ) - del argmax_0, full_2 - - # pd_op.transpose: (2x11x12096xf32) <- (2x12096x11xf32) - transpose_0 = paddle._C_ops.transpose(one_hot_0, [0, 2, 1]) - del one_hot_0 - - # pd_op.where: (2x11x12096xf32) <- (2x11x12096xb, 2x11x12096xf32, 2x11x12096xf32) - where_0 = paddle._C_ops.where(tile_0, transpose_0, data_2) - del data_2, tile_0, transpose_0 - - # pd_op.full_int_array: (1xi64) <- () - full_int_array_2 = [-2] - - # pd_op.sum: (2x12096xf32) <- (2x11x12096xf32, 1xi64) - sum_0 = paddle._C_ops.sum(where_0, full_int_array_2, None, False) - - # pd_op.argmax: (2x12096xi64) <- (2x11x12096xf32, 1xi64) - argmax_1 = paddle._C_ops.argmax(where_0, full_1, False, False, paddle.int64) - del full_1 - - # pd_op.full: (1xf32) <- () - full_3 = paddle._C_ops.full( - [1], float("11"), paddle.float32, paddle.core.CPUPlace() - ) - - # pd_op.scale: (2x1xi32) <- (2x1xi32, 1xf32) - scale_0 = paddle._C_ops.scale(data_3, full_3, float("0"), True) - del data_3, full_3 - - # pd_op.cast: (2x1xi64) <- (2x1xi32) - cast_0 = paddle._C_ops.cast(scale_0, paddle.int64) - del scale_0 - - # pd_op.add: (2x12096xi64) <- (2x12096xi64, 2x1xi64) - add_0 = paddle._C_ops.add(argmax_1, cast_0) - del argmax_1, cast_0 - - # pd_op.flatten: (22xi32) <- (2x11x1xi32) - flatten_0 = paddle._C_ops.flatten(data_4, 0, 2) - del data_4 - - # pd_op.flatten: (24192xi64) <- (2x12096xi64) - flatten_1 = paddle._C_ops.flatten(add_0, 0, 1) - del add_0 - - # pd_op.full: (1xi32) <- () - full_4 = paddle._C_ops.full( - [1], float("0"), paddle.int32, paddle.core.CPUPlace() - ) - - # pd_op.gather: (24192xi32) <- (22xi32, 24192xi64, 1xi32) - gather_0 = paddle._C_ops.gather(flatten_0, flatten_1, full_4) - del flatten_0 - - # pd_op.full_int_array: (2xi64) <- () - full_int_array_3 = [2, 12096] - - # pd_op.reshape: (2x12096xi32) <- (24192xi32, 2xi64) - reshape_1 = paddle._C_ops.reshape(gather_0, full_int_array_3) - del full_int_array_3, gather_0 - - # pd_op.full: (xf32) <- () - full_5 = paddle._C_ops.full( - [], float("0"), paddle.float32, paddle.framework._current_expected_place() - ) - - # pd_op.greater_than: (2x12096xb) <- (2x12096xf32, xf32) - greater_than_1 = paddle._C_ops.greater_than(sum_0, full_5) - del full_5, sum_0 - - # pd_op.full: (1xf32) <- () - full_6 = paddle._C_ops.full( - [1], float("4"), paddle.float32, paddle.core.CPUPlace() - ) - - # pd_op.full_like: (2x12096xi32) <- (2x12096xi32, 1xf32) - full_like_0 = paddle._C_ops.full_like( - reshape_1, full_6, paddle.int32, paddle.framework._current_expected_place() - ) - del full_6 - - # pd_op.where: (2x12096xi32) <- (2x12096xb, 2x12096xi32, 2x12096xi32) - where_1 = paddle._C_ops.where(greater_than_1, reshape_1, full_like_0) - del full_like_0, greater_than_1, reshape_1 - - # pd_op.full_int_array: (2xi64) <- () - full_int_array_4 = [-1, 4] - - # pd_op.reshape: (22x4xf32) <- (2x11x4xf32, 2xi64) - reshape_2 = paddle._C_ops.reshape(data_5, full_int_array_4) - del data_5, full_int_array_4 - - # pd_op.gather: (24192x4xf32) <- (22x4xf32, 24192xi64, 1xi32) - gather_1 = paddle._C_ops.gather(reshape_2, flatten_1, full_4) - del flatten_1, full_4, reshape_2 - - # pd_op.full_int_array: (3xi64) <- () - full_int_array_5 = [2, 12096, 4] - - # pd_op.reshape: (2x12096x4xf32) <- (24192x4xf32, 3xi64) - reshape_0 = paddle._C_ops.reshape(gather_1, full_int_array_5) - del full_int_array_5, gather_1 - - # pd_op.full: (1xi32) <- () - full_7 = paddle._C_ops.full( - [1], float("5"), paddle.int32, paddle.core.CPUPlace() - ) - - # pd_op.one_hot: (2x12096x5xf32) <- (2x12096xi32, 1xi32) - one_hot_1 = paddle._C_ops.one_hot( - where_1 % paddle.cast(full_7, where_1.dtype), full_7 - ) - del full_7 - - # pd_op.full: (4xi64) <- () - full_8 = paddle._C_ops.full( - [4], float("0"), paddle.int64, paddle.framework._current_expected_place() - ) - - # pd_op.assign_value_: (4xi64) <- (4xi64) - assign_value__0 = paddle._C_ops.assign_value_( - full_8, - [4], - paddle.int64, - [float("0"), float("1"), float("2"), float("3")], - paddle.framework._current_expected_place(), - ) - del full_8 - - # pd_op.index_select: (2x12096x4xf32) <- (2x12096x5xf32, 4xi64) - index_select_0 = paddle._C_ops.index_select(one_hot_1, assign_value__0, -1) - del assign_value__0, one_hot_1 - - # pd_op.multiply: (2x11x12096xf32) <- (2x11x12096xf32, 2x11x12096xf32) - multiply_1 = paddle._C_ops.multiply(data_6, where_0) - del data_6 - - # pd_op.full_int_array: (1xi64) <- () - full_int_array_6 = [-1] - - # pd_op.max: (2x11x1xf32) <- (2x11x12096xf32, 1xi64) - max_0 = paddle._C_ops.max(multiply_1, full_int_array_6, True) - - # pd_op.multiply: (2x11x12096xf32) <- (2x11x12096xf32, 2x11x12096xf32) - multiply_2 = paddle._C_ops.multiply(data_1, where_0) - del data_1, where_0 - - # pd_op.max: (2x11x1xf32) <- (2x11x12096xf32, 1xi64) - max_1 = paddle._C_ops.max(multiply_2, full_int_array_6, True) - del multiply_2 - - # pd_op.full: (1xf32) <- () - full_9 = paddle._C_ops.full( - [1], float("1"), paddle.float32, paddle.core.CPUPlace() - ) - - # pd_op.scale: (2x11x1xf32) <- (2x11x1xf32, 1xf32) - scale_1 = paddle._C_ops.scale(max_0, full_9, float("1e-09"), True) - del full_9, max_0 - - # pd_op.divide: (2x11x12096xf32) <- (2x11x12096xf32, 2x11x1xf32) - divide_0 = paddle._C_ops.divide(multiply_1, scale_1) - del multiply_1, scale_1 - - # pd_op.multiply: (2x11x12096xf32) <- (2x11x12096xf32, 2x11x1xf32) - multiply_3 = paddle._C_ops.multiply(divide_0, max_1) - del divide_0, max_1 - - # pd_op.max: (2x12096xf32) <- (2x11x12096xf32, 1xi64) - max_2 = paddle._C_ops.max(multiply_3, full_int_array_2, False) - del full_int_array_2, multiply_3 - - # pd_op.unsqueeze: (2x12096x1xf32) <- (2x12096xf32, 1xi64) - unsqueeze_1 = paddle._C_ops.unsqueeze(max_2, full_int_array_6) - del full_int_array_6, max_2 - - # pd_op.multiply: (2x12096x4xf32) <- (2x12096x4xf32, 2x12096x1xf32) - multiply_0 = paddle._C_ops.multiply(index_select_0, unsqueeze_1) - del index_select_0, unsqueeze_1, where_1 - - return reshape_0, multiply_0 diff --git a/paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_13/input_meta.py b/paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_1/shape_patches_PP-YOLOE-L_human/input_meta.py similarity index 100% rename from paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_13/input_meta.py rename to paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_1/shape_patches_PP-YOLOE-L_human/input_meta.py diff --git a/paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_13/weight_meta.py b/paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_1/shape_patches_PP-YOLOE-L_human/weight_meta.py similarity index 100% rename from paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_13/weight_meta.py rename to paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_1/shape_patches_PP-YOLOE-L_human/weight_meta.py diff --git a/paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_7/input_meta.py b/paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_1/shape_patches_PP-YOLOE-S_vehicle/input_meta.py similarity index 100% rename from paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_7/input_meta.py rename to paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_1/shape_patches_PP-YOLOE-S_vehicle/input_meta.py diff --git a/paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_6/weight_meta.py b/paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_1/shape_patches_PP-YOLOE-S_vehicle/weight_meta.py similarity index 100% rename from paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_6/weight_meta.py rename to paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_1/shape_patches_PP-YOLOE-S_vehicle/weight_meta.py diff --git a/paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_13/graph_hash.txt b/paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_13/graph_hash.txt deleted file mode 100644 index 315669793..000000000 --- a/paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_13/graph_hash.txt +++ /dev/null @@ -1 +0,0 @@ -772280e4e0e6df48fdaa1b3bea8b0dd5cbaf7dad6c90d940fb9d38ade5d09fa6 \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_13/graph_net.json b/paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_13/graph_net.json deleted file mode 100644 index 399d8354e..000000000 --- a/paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_13/graph_net.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "framework": "paddle", - "model_name": "PP-YOLOE-S_human", - "num_devices_required": 1, - "num_nodes_required": 1 -} \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_13/input_meta.py b/paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_13/input_meta.py deleted file mode 100644 index 73ef358c5..000000000 --- a/paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_13/input_meta.py +++ /dev/null @@ -1,9 +0,0 @@ -class Program_weight_tensor_data_0: - name = "data_0" - shape = [2, 3, 640, 640] - dtype = "float32" - min_val = float("-2.1179") - max_val = float("2.64") - mean = float("0.521346") - std = float("0.808427") - data = None diff --git a/paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_13/model.py b/paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_13/model.py deleted file mode 100644 index 7a218f1b7..000000000 --- a/paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_13/model.py +++ /dev/null @@ -1,4048 +0,0 @@ -import paddle - - -class GraphModule(paddle.nn.Layer): - def __init__(self): - super().__init__() - - def forward( - self, - parameter_0, - parameter_1, - parameter_2, - parameter_3, - parameter_4, - parameter_5, - parameter_6, - parameter_7, - parameter_8, - parameter_9, - parameter_10, - parameter_11, - parameter_12, - parameter_13, - parameter_14, - parameter_15, - parameter_16, - parameter_17, - parameter_18, - parameter_19, - parameter_20, - parameter_21, - parameter_22, - parameter_23, - parameter_24, - parameter_25, - parameter_26, - parameter_27, - parameter_28, - parameter_29, - parameter_30, - parameter_31, - parameter_32, - parameter_33, - parameter_34, - parameter_35, - parameter_36, - parameter_37, - parameter_38, - parameter_39, - parameter_40, - parameter_41, - parameter_42, - parameter_43, - parameter_44, - parameter_45, - parameter_46, - parameter_47, - parameter_48, - parameter_49, - parameter_50, - parameter_51, - parameter_52, - parameter_53, - parameter_54, - parameter_55, - parameter_56, - parameter_57, - parameter_58, - parameter_59, - parameter_60, - parameter_61, - parameter_62, - parameter_63, - parameter_64, - parameter_65, - parameter_66, - parameter_67, - parameter_68, - parameter_69, - parameter_70, - parameter_71, - parameter_72, - parameter_73, - parameter_74, - parameter_75, - parameter_76, - parameter_77, - parameter_78, - parameter_79, - parameter_80, - parameter_81, - parameter_82, - parameter_83, - parameter_84, - parameter_85, - parameter_86, - parameter_87, - parameter_88, - parameter_89, - parameter_90, - parameter_91, - parameter_92, - parameter_93, - parameter_94, - parameter_95, - parameter_96, - parameter_97, - parameter_98, - parameter_99, - parameter_100, - parameter_101, - parameter_102, - parameter_103, - parameter_104, - parameter_105, - parameter_106, - parameter_107, - parameter_108, - parameter_109, - parameter_110, - parameter_111, - parameter_112, - parameter_113, - parameter_114, - parameter_115, - parameter_116, - parameter_117, - parameter_118, - parameter_119, - parameter_120, - parameter_121, - parameter_122, - parameter_123, - parameter_124, - parameter_125, - parameter_126, - parameter_127, - parameter_128, - parameter_129, - parameter_130, - parameter_131, - parameter_132, - parameter_133, - parameter_134, - parameter_135, - parameter_136, - parameter_137, - parameter_138, - parameter_139, - parameter_140, - parameter_141, - parameter_142, - parameter_143, - parameter_144, - parameter_145, - parameter_146, - parameter_147, - parameter_148, - parameter_149, - parameter_150, - parameter_151, - parameter_152, - parameter_153, - parameter_154, - parameter_155, - parameter_156, - parameter_157, - parameter_158, - parameter_159, - parameter_160, - parameter_161, - parameter_162, - parameter_163, - parameter_164, - parameter_165, - parameter_166, - parameter_167, - parameter_168, - parameter_169, - parameter_170, - parameter_171, - parameter_172, - parameter_173, - parameter_174, - parameter_175, - parameter_176, - parameter_177, - parameter_178, - parameter_179, - parameter_180, - parameter_181, - parameter_182, - parameter_183, - parameter_184, - parameter_185, - parameter_186, - parameter_187, - parameter_188, - parameter_189, - parameter_190, - parameter_191, - parameter_192, - parameter_193, - parameter_194, - parameter_195, - parameter_196, - parameter_197, - parameter_198, - parameter_199, - parameter_200, - parameter_201, - parameter_202, - parameter_203, - parameter_204, - parameter_205, - parameter_206, - parameter_207, - parameter_208, - parameter_209, - parameter_210, - parameter_211, - parameter_212, - parameter_213, - parameter_214, - parameter_215, - parameter_216, - parameter_217, - parameter_218, - parameter_219, - parameter_220, - parameter_221, - parameter_222, - parameter_223, - parameter_224, - parameter_225, - parameter_226, - parameter_227, - parameter_228, - parameter_229, - parameter_230, - parameter_231, - parameter_232, - parameter_233, - parameter_234, - parameter_235, - parameter_236, - parameter_237, - parameter_238, - parameter_239, - parameter_240, - parameter_241, - parameter_242, - parameter_243, - parameter_244, - parameter_245, - parameter_246, - parameter_247, - parameter_248, - parameter_249, - parameter_250, - parameter_251, - parameter_252, - parameter_253, - parameter_254, - parameter_255, - parameter_256, - parameter_257, - parameter_258, - parameter_259, - parameter_260, - parameter_261, - parameter_262, - parameter_263, - parameter_264, - parameter_265, - parameter_266, - parameter_267, - parameter_268, - parameter_269, - parameter_270, - parameter_271, - parameter_272, - parameter_273, - parameter_274, - parameter_275, - parameter_276, - parameter_277, - parameter_278, - parameter_279, - parameter_280, - parameter_281, - parameter_282, - parameter_283, - parameter_284, - parameter_285, - parameter_286, - parameter_287, - parameter_288, - parameter_289, - parameter_290, - parameter_291, - parameter_292, - parameter_293, - parameter_294, - parameter_295, - parameter_296, - parameter_297, - parameter_298, - parameter_299, - parameter_300, - parameter_301, - parameter_302, - parameter_303, - parameter_304, - parameter_305, - parameter_306, - parameter_307, - parameter_308, - parameter_309, - parameter_310, - parameter_311, - parameter_312, - parameter_313, - parameter_314, - parameter_315, - parameter_316, - parameter_317, - parameter_318, - parameter_319, - parameter_320, - parameter_321, - parameter_322, - parameter_323, - parameter_324, - parameter_325, - parameter_326, - parameter_327, - parameter_328, - parameter_329, - parameter_330, - parameter_331, - parameter_332, - parameter_333, - parameter_334, - parameter_335, - parameter_336, - parameter_337, - parameter_338, - parameter_339, - parameter_340, - parameter_341, - parameter_342, - parameter_343, - parameter_344, - parameter_345, - parameter_346, - parameter_347, - parameter_348, - parameter_349, - parameter_350, - parameter_351, - parameter_352, - parameter_353, - parameter_354, - parameter_355, - parameter_356, - parameter_357, - parameter_358, - parameter_359, - parameter_360, - parameter_361, - parameter_362, - parameter_363, - parameter_364, - parameter_365, - parameter_366, - parameter_367, - data_0, - ): - # pd_op.conv2d: (2x16x-1x-1xf32) <- (2x3x-1x-1xf32, 16x3x3x3xf32) - conv2d_0 = paddle._C_ops.conv2d( - data_0, parameter_367, [2, 2], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del data_0, parameter_367 - - # pd_op.batch_norm_: (2x16x-1x-1xf32, 16xf32, 16xf32, 16xf32, 16xf32, -1xui8) <- (2x16x-1x-1xf32, 16xf32, 16xf32, 16xf32, 16xf32) - ( - batch_norm__0, - batch_norm__1, - batch_norm__2, - batch_norm__3, - batch_norm__4, - batch_norm__5, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_0, - parameter_366, - parameter_365, - parameter_364, - parameter_363, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_363, parameter_364, parameter_365, parameter_366 - - # pd_op.swish: (2x16x-1x-1xf32) <- (2x16x-1x-1xf32) - swish_1 = paddle._C_ops.swish(batch_norm__0) - - # pd_op.conv2d: (2x16x-1x-1xf32) <- (2x16x-1x-1xf32, 16x16x3x3xf32) - conv2d_1 = paddle._C_ops.conv2d( - swish_1, parameter_362, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_362 - - # pd_op.batch_norm_: (2x16x-1x-1xf32, 16xf32, 16xf32, 16xf32, 16xf32, -1xui8) <- (2x16x-1x-1xf32, 16xf32, 16xf32, 16xf32, 16xf32) - ( - batch_norm__6, - batch_norm__7, - batch_norm__8, - batch_norm__9, - batch_norm__10, - batch_norm__11, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_1, - parameter_361, - parameter_360, - parameter_359, - parameter_358, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_358, parameter_359, parameter_360, parameter_361 - - # pd_op.swish: (2x16x-1x-1xf32) <- (2x16x-1x-1xf32) - swish_2 = paddle._C_ops.swish(batch_norm__6) - - # pd_op.conv2d: (2x32x-1x-1xf32) <- (2x16x-1x-1xf32, 32x16x3x3xf32) - conv2d_2 = paddle._C_ops.conv2d( - swish_2, parameter_357, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_357 - - # pd_op.batch_norm_: (2x32x-1x-1xf32, 32xf32, 32xf32, 32xf32, 32xf32, -1xui8) <- (2x32x-1x-1xf32, 32xf32, 32xf32, 32xf32, 32xf32) - ( - batch_norm__12, - batch_norm__13, - batch_norm__14, - batch_norm__15, - batch_norm__16, - batch_norm__17, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_2, - parameter_356, - parameter_355, - parameter_354, - parameter_353, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_353, parameter_354, parameter_355, parameter_356 - - # pd_op.swish: (2x32x-1x-1xf32) <- (2x32x-1x-1xf32) - swish_3 = paddle._C_ops.swish(batch_norm__12) - - # pd_op.conv2d: (2x48x-1x-1xf32) <- (2x32x-1x-1xf32, 48x32x3x3xf32) - conv2d_3 = paddle._C_ops.conv2d( - swish_3, parameter_352, [2, 2], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_352 - - # pd_op.batch_norm_: (2x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32, -1xui8) <- (2x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32) - ( - batch_norm__18, - batch_norm__19, - batch_norm__20, - batch_norm__21, - batch_norm__22, - batch_norm__23, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_3, - parameter_351, - parameter_350, - parameter_349, - parameter_348, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_348, parameter_349, parameter_350, parameter_351 - - # pd_op.swish: (2x48x-1x-1xf32) <- (2x48x-1x-1xf32) - swish_4 = paddle._C_ops.swish(batch_norm__18) - - # pd_op.conv2d: (2x24x-1x-1xf32) <- (2x48x-1x-1xf32, 24x48x1x1xf32) - conv2d_4 = paddle._C_ops.conv2d( - swish_4, parameter_347, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_347 - - # pd_op.batch_norm_: (2x24x-1x-1xf32, 24xf32, 24xf32, 24xf32, 24xf32, -1xui8) <- (2x24x-1x-1xf32, 24xf32, 24xf32, 24xf32, 24xf32) - ( - batch_norm__24, - batch_norm__25, - batch_norm__26, - batch_norm__27, - batch_norm__28, - batch_norm__29, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_4, - parameter_346, - parameter_345, - parameter_344, - parameter_343, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_343, parameter_344, parameter_345, parameter_346 - - # pd_op.swish: (2x24x-1x-1xf32) <- (2x24x-1x-1xf32) - swish_5 = paddle._C_ops.swish(batch_norm__24) - - # pd_op.conv2d: (2x24x-1x-1xf32) <- (2x48x-1x-1xf32, 24x48x1x1xf32) - conv2d_5 = paddle._C_ops.conv2d( - swish_4, parameter_342, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_342 - - # pd_op.batch_norm_: (2x24x-1x-1xf32, 24xf32, 24xf32, 24xf32, 24xf32, -1xui8) <- (2x24x-1x-1xf32, 24xf32, 24xf32, 24xf32, 24xf32) - ( - batch_norm__30, - batch_norm__31, - batch_norm__32, - batch_norm__33, - batch_norm__34, - batch_norm__35, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_5, - parameter_341, - parameter_340, - parameter_339, - parameter_338, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_338, parameter_339, parameter_340, parameter_341 - - # pd_op.swish: (2x24x-1x-1xf32) <- (2x24x-1x-1xf32) - swish_6 = paddle._C_ops.swish(batch_norm__30) - - # pd_op.conv2d: (2x24x-1x-1xf32) <- (2x24x-1x-1xf32, 24x24x3x3xf32) - conv2d_6 = paddle._C_ops.conv2d( - swish_6, parameter_337, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_337 - - # pd_op.batch_norm_: (2x24x-1x-1xf32, 24xf32, 24xf32, 24xf32, 24xf32, -1xui8) <- (2x24x-1x-1xf32, 24xf32, 24xf32, 24xf32, 24xf32) - ( - batch_norm__36, - batch_norm__37, - batch_norm__38, - batch_norm__39, - batch_norm__40, - batch_norm__41, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_6, - parameter_336, - parameter_335, - parameter_334, - parameter_333, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_333, parameter_334, parameter_335, parameter_336 - - # pd_op.swish: (2x24x-1x-1xf32) <- (2x24x-1x-1xf32) - swish_7 = paddle._C_ops.swish(batch_norm__36) - - # pd_op.conv2d: (2x24x-1x-1xf32) <- (2x24x-1x-1xf32, 24x24x3x3xf32) - conv2d_7 = paddle._C_ops.conv2d( - swish_7, parameter_332, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_332 - - # pd_op.batch_norm_: (2x24x-1x-1xf32, 24xf32, 24xf32, 24xf32, 24xf32, -1xui8) <- (2x24x-1x-1xf32, 24xf32, 24xf32, 24xf32, 24xf32) - ( - batch_norm__42, - batch_norm__43, - batch_norm__44, - batch_norm__45, - batch_norm__46, - batch_norm__47, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_7, - parameter_331, - parameter_330, - parameter_329, - parameter_328, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_328, parameter_329, parameter_330, parameter_331 - - # pd_op.conv2d: (2x24x-1x-1xf32) <- (2x24x-1x-1xf32, 24x24x1x1xf32) - conv2d_8 = paddle._C_ops.conv2d( - swish_7, parameter_327, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_327 - - # pd_op.batch_norm_: (2x24x-1x-1xf32, 24xf32, 24xf32, 24xf32, 24xf32, -1xui8) <- (2x24x-1x-1xf32, 24xf32, 24xf32, 24xf32, 24xf32) - ( - batch_norm__48, - batch_norm__49, - batch_norm__50, - batch_norm__51, - batch_norm__52, - batch_norm__53, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_8, - parameter_326, - parameter_325, - parameter_324, - parameter_323, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_323, parameter_324, parameter_325, parameter_326 - - # pd_op.add: (2x24x-1x-1xf32) <- (2x24x-1x-1xf32, 2x24x-1x-1xf32) - add_0 = paddle._C_ops.add(batch_norm__42, batch_norm__48) - - # pd_op.swish: (2x24x-1x-1xf32) <- (2x24x-1x-1xf32) - swish_8 = paddle._C_ops.swish(add_0) - - # pd_op.add: (2x24x-1x-1xf32) <- (2x24x-1x-1xf32, 2x24x-1x-1xf32) - add_1 = paddle._C_ops.add(swish_6, swish_8) - - # pd_op.full: (1xi32) <- () - full_0 = paddle._C_ops.full( - [1], float("1"), paddle.int32, paddle.core.CPUPlace() - ) - - # pd_op.assign: (1xi32) <- (1xi32) - assign_0 = full_0 - - # pd_op.assign: (1xi32) <- (1xi32) - assign_1 = full_0 - - # pd_op.assign: (1xi32) <- (1xi32) - assign_2 = full_0 - - # pd_op.assign: (1xi32) <- (1xi32) - assign_3 = full_0 - - # pd_op.assign: (1xi32) <- (1xi32) - assign_4 = full_0 - - # pd_op.assign: (1xi32) <- (1xi32) - assign_5 = full_0 - - # pd_op.assign: (1xi32) <- (1xi32) - assign_6 = full_0 - - # pd_op.assign: (1xi32) <- (1xi32) - assign_7 = full_0 - - # pd_op.assign: (1xi32) <- (1xi32) - assign_8 = full_0 - - # pd_op.assign: (1xi32) <- (1xi32) - assign_9 = full_0 - - # pd_op.assign: (1xi32) <- (1xi32) - assign_10 = full_0 - - # pd_op.assign: (1xi32) <- (1xi32) - assign_11 = full_0 - - # pd_op.assign: (1xi32) <- (1xi32) - assign_12 = full_0 - - # builtin.combine: ([2x24x-1x-1xf32, 2x24x-1x-1xf32]) <- (2x24x-1x-1xf32, 2x24x-1x-1xf32) - combine_0 = [swish_5, add_1] - - # pd_op.concat: (2x48x-1x-1xf32) <- ([2x24x-1x-1xf32, 2x24x-1x-1xf32], 1xi32) - concat_0 = paddle._C_ops.concat(combine_0, full_0) - del combine_0 - - # pd_op.full_int_array: (2xi64) <- () - full_int_array_0 = [2, 3] - - # pd_op.assign: (2xi64) <- (2xi64) - assign_13 = full_int_array_0 - - # pd_op.assign: (2xi64) <- (2xi64) - assign_14 = full_int_array_0 - - # pd_op.assign: (2xi64) <- (2xi64) - assign_15 = full_int_array_0 - - # pd_op.mean: (2x48x1x1xf32) <- (2x48x-1x-1xf32, 2xi64) - mean_0 = paddle._C_ops.mean(concat_0, full_int_array_0, True) - - # pd_op.conv2d: (2x48x1x1xf32) <- (2x48x1x1xf32, 48x48x1x1xf32) - conv2d_9 = paddle._C_ops.conv2d( - mean_0, parameter_322, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_322 - - # pd_op.full_int_array: (4xi64) <- () - full_int_array_1 = [1, -1, 1, 1] - - # pd_op.reshape: (1x48x1x1xf32) <- (48xf32, 4xi64) - reshape_0 = paddle._C_ops.reshape(parameter_321, full_int_array_1) - del parameter_321 - - # pd_op.add: (2x48x1x1xf32) <- (2x48x1x1xf32, 1x48x1x1xf32) - add_2 = paddle._C_ops.add(conv2d_9, reshape_0) - - # pd_op.hardsigmoid: (2x48x1x1xf32) <- (2x48x1x1xf32) - hardsigmoid_0 = paddle._C_ops.hardsigmoid( - add_2, float("0.166667"), float("0.5") - ) - del add_2 - - # pd_op.multiply: (2x48x-1x-1xf32) <- (2x48x-1x-1xf32, 2x48x1x1xf32) - multiply_0 = paddle._C_ops.multiply(concat_0, hardsigmoid_0) - - # pd_op.conv2d: (2x64x-1x-1xf32) <- (2x48x-1x-1xf32, 64x48x1x1xf32) - conv2d_10 = paddle._C_ops.conv2d( - multiply_0, parameter_320, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_320 - - # pd_op.batch_norm_: (2x64x-1x-1xf32, 64xf32, 64xf32, 64xf32, 64xf32, -1xui8) <- (2x64x-1x-1xf32, 64xf32, 64xf32, 64xf32, 64xf32) - ( - batch_norm__54, - batch_norm__55, - batch_norm__56, - batch_norm__57, - batch_norm__58, - batch_norm__59, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_10, - parameter_319, - parameter_318, - parameter_317, - parameter_316, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_316, parameter_317, parameter_318, parameter_319 - - # pd_op.swish: (2x64x-1x-1xf32) <- (2x64x-1x-1xf32) - swish_9 = paddle._C_ops.swish(batch_norm__54) - - # pd_op.conv2d: (2x96x-1x-1xf32) <- (2x64x-1x-1xf32, 96x64x3x3xf32) - conv2d_11 = paddle._C_ops.conv2d( - swish_9, parameter_315, [2, 2], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_315 - - # pd_op.batch_norm_: (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) - ( - batch_norm__60, - batch_norm__61, - batch_norm__62, - batch_norm__63, - batch_norm__64, - batch_norm__65, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_11, - parameter_314, - parameter_313, - parameter_312, - parameter_311, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_311, parameter_312, parameter_313, parameter_314 - - # pd_op.swish: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32) - swish_10 = paddle._C_ops.swish(batch_norm__60) - - # pd_op.conv2d: (2x48x-1x-1xf32) <- (2x96x-1x-1xf32, 48x96x1x1xf32) - conv2d_12 = paddle._C_ops.conv2d( - swish_10, parameter_310, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_310 - - # pd_op.batch_norm_: (2x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32, -1xui8) <- (2x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32) - ( - batch_norm__66, - batch_norm__67, - batch_norm__68, - batch_norm__69, - batch_norm__70, - batch_norm__71, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_12, - parameter_309, - parameter_308, - parameter_307, - parameter_306, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_306, parameter_307, parameter_308, parameter_309 - - # pd_op.swish: (2x48x-1x-1xf32) <- (2x48x-1x-1xf32) - swish_11 = paddle._C_ops.swish(batch_norm__66) - - # pd_op.conv2d: (2x48x-1x-1xf32) <- (2x96x-1x-1xf32, 48x96x1x1xf32) - conv2d_13 = paddle._C_ops.conv2d( - swish_10, parameter_305, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_305 - - # pd_op.batch_norm_: (2x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32, -1xui8) <- (2x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32) - ( - batch_norm__72, - batch_norm__73, - batch_norm__74, - batch_norm__75, - batch_norm__76, - batch_norm__77, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_13, - parameter_304, - parameter_303, - parameter_302, - parameter_301, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_301, parameter_302, parameter_303, parameter_304 - - # pd_op.swish: (2x48x-1x-1xf32) <- (2x48x-1x-1xf32) - swish_12 = paddle._C_ops.swish(batch_norm__72) - - # pd_op.conv2d: (2x48x-1x-1xf32) <- (2x48x-1x-1xf32, 48x48x3x3xf32) - conv2d_14 = paddle._C_ops.conv2d( - swish_12, parameter_300, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_300 - - # pd_op.batch_norm_: (2x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32, -1xui8) <- (2x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32) - ( - batch_norm__78, - batch_norm__79, - batch_norm__80, - batch_norm__81, - batch_norm__82, - batch_norm__83, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_14, - parameter_299, - parameter_298, - parameter_297, - parameter_296, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_296, parameter_297, parameter_298, parameter_299 - - # pd_op.swish: (2x48x-1x-1xf32) <- (2x48x-1x-1xf32) - swish_13 = paddle._C_ops.swish(batch_norm__78) - - # pd_op.conv2d: (2x48x-1x-1xf32) <- (2x48x-1x-1xf32, 48x48x3x3xf32) - conv2d_15 = paddle._C_ops.conv2d( - swish_13, parameter_295, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_295 - - # pd_op.batch_norm_: (2x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32, -1xui8) <- (2x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32) - ( - batch_norm__84, - batch_norm__85, - batch_norm__86, - batch_norm__87, - batch_norm__88, - batch_norm__89, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_15, - parameter_294, - parameter_293, - parameter_292, - parameter_291, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_291, parameter_292, parameter_293, parameter_294 - - # pd_op.conv2d: (2x48x-1x-1xf32) <- (2x48x-1x-1xf32, 48x48x1x1xf32) - conv2d_16 = paddle._C_ops.conv2d( - swish_13, parameter_290, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_290 - - # pd_op.batch_norm_: (2x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32, -1xui8) <- (2x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32) - ( - batch_norm__90, - batch_norm__91, - batch_norm__92, - batch_norm__93, - batch_norm__94, - batch_norm__95, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_16, - parameter_289, - parameter_288, - parameter_287, - parameter_286, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_286, parameter_287, parameter_288, parameter_289 - - # pd_op.add: (2x48x-1x-1xf32) <- (2x48x-1x-1xf32, 2x48x-1x-1xf32) - add_3 = paddle._C_ops.add(batch_norm__84, batch_norm__90) - - # pd_op.swish: (2x48x-1x-1xf32) <- (2x48x-1x-1xf32) - swish_14 = paddle._C_ops.swish(add_3) - - # pd_op.add: (2x48x-1x-1xf32) <- (2x48x-1x-1xf32, 2x48x-1x-1xf32) - add_4 = paddle._C_ops.add(swish_12, swish_14) - - # pd_op.conv2d: (2x48x-1x-1xf32) <- (2x48x-1x-1xf32, 48x48x3x3xf32) - conv2d_17 = paddle._C_ops.conv2d( - add_4, parameter_285, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_285 - - # pd_op.batch_norm_: (2x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32, -1xui8) <- (2x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32) - ( - batch_norm__96, - batch_norm__97, - batch_norm__98, - batch_norm__99, - batch_norm__100, - batch_norm__101, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_17, - parameter_284, - parameter_283, - parameter_282, - parameter_281, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_281, parameter_282, parameter_283, parameter_284 - - # pd_op.swish: (2x48x-1x-1xf32) <- (2x48x-1x-1xf32) - swish_15 = paddle._C_ops.swish(batch_norm__96) - - # pd_op.conv2d: (2x48x-1x-1xf32) <- (2x48x-1x-1xf32, 48x48x3x3xf32) - conv2d_18 = paddle._C_ops.conv2d( - swish_15, parameter_280, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_280 - - # pd_op.batch_norm_: (2x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32, -1xui8) <- (2x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32) - ( - batch_norm__102, - batch_norm__103, - batch_norm__104, - batch_norm__105, - batch_norm__106, - batch_norm__107, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_18, - parameter_279, - parameter_278, - parameter_277, - parameter_276, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_276, parameter_277, parameter_278, parameter_279 - - # pd_op.conv2d: (2x48x-1x-1xf32) <- (2x48x-1x-1xf32, 48x48x1x1xf32) - conv2d_19 = paddle._C_ops.conv2d( - swish_15, parameter_275, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_275 - - # pd_op.batch_norm_: (2x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32, -1xui8) <- (2x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32) - ( - batch_norm__108, - batch_norm__109, - batch_norm__110, - batch_norm__111, - batch_norm__112, - batch_norm__113, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_19, - parameter_274, - parameter_273, - parameter_272, - parameter_271, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_271, parameter_272, parameter_273, parameter_274 - - # pd_op.add: (2x48x-1x-1xf32) <- (2x48x-1x-1xf32, 2x48x-1x-1xf32) - add_5 = paddle._C_ops.add(batch_norm__102, batch_norm__108) - - # pd_op.swish: (2x48x-1x-1xf32) <- (2x48x-1x-1xf32) - swish_16 = paddle._C_ops.swish(add_5) - - # pd_op.add: (2x48x-1x-1xf32) <- (2x48x-1x-1xf32, 2x48x-1x-1xf32) - add_6 = paddle._C_ops.add(add_4, swish_16) - - # builtin.combine: ([2x48x-1x-1xf32, 2x48x-1x-1xf32]) <- (2x48x-1x-1xf32, 2x48x-1x-1xf32) - combine_1 = [swish_11, add_6] - - # pd_op.concat: (2x96x-1x-1xf32) <- ([2x48x-1x-1xf32, 2x48x-1x-1xf32], 1xi32) - concat_1 = paddle._C_ops.concat(combine_1, full_0) - del combine_1 - - # pd_op.mean: (2x96x1x1xf32) <- (2x96x-1x-1xf32, 2xi64) - mean_1 = paddle._C_ops.mean(concat_1, full_int_array_0, True) - - # pd_op.conv2d: (2x96x1x1xf32) <- (2x96x1x1xf32, 96x96x1x1xf32) - conv2d_20 = paddle._C_ops.conv2d( - mean_1, parameter_270, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_270 - - # pd_op.reshape: (1x96x1x1xf32) <- (96xf32, 4xi64) - reshape_1 = paddle._C_ops.reshape(parameter_269, full_int_array_1) - del parameter_269 - - # pd_op.add: (2x96x1x1xf32) <- (2x96x1x1xf32, 1x96x1x1xf32) - add_7 = paddle._C_ops.add(conv2d_20, reshape_1) - - # pd_op.hardsigmoid: (2x96x1x1xf32) <- (2x96x1x1xf32) - hardsigmoid_1 = paddle._C_ops.hardsigmoid( - add_7, float("0.166667"), float("0.5") - ) - del add_7 - - # pd_op.multiply: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, 2x96x1x1xf32) - multiply_1 = paddle._C_ops.multiply(concat_1, hardsigmoid_1) - - # pd_op.conv2d: (2x128x-1x-1xf32) <- (2x96x-1x-1xf32, 128x96x1x1xf32) - conv2d_21 = paddle._C_ops.conv2d( - multiply_1, parameter_268, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_268 - - # pd_op.batch_norm_: (2x128x-1x-1xf32, 128xf32, 128xf32, 128xf32, 128xf32, -1xui8) <- (2x128x-1x-1xf32, 128xf32, 128xf32, 128xf32, 128xf32) - ( - batch_norm__114, - batch_norm__115, - batch_norm__116, - batch_norm__117, - batch_norm__118, - batch_norm__119, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_21, - parameter_267, - parameter_266, - parameter_265, - parameter_264, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_264, parameter_265, parameter_266, parameter_267 - - # pd_op.swish: (2x128x-1x-1xf32) <- (2x128x-1x-1xf32) - swish_17 = paddle._C_ops.swish(batch_norm__114) - - # pd_op.conv2d: (2x192x-1x-1xf32) <- (2x128x-1x-1xf32, 192x128x3x3xf32) - conv2d_22 = paddle._C_ops.conv2d( - swish_17, parameter_263, [2, 2], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_263 - - # pd_op.batch_norm_: (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) - ( - batch_norm__120, - batch_norm__121, - batch_norm__122, - batch_norm__123, - batch_norm__124, - batch_norm__125, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_22, - parameter_262, - parameter_261, - parameter_260, - parameter_259, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_259, parameter_260, parameter_261, parameter_262 - - # pd_op.swish: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32) - swish_18 = paddle._C_ops.swish(batch_norm__120) - - # pd_op.conv2d: (2x96x-1x-1xf32) <- (2x192x-1x-1xf32, 96x192x1x1xf32) - conv2d_23 = paddle._C_ops.conv2d( - swish_18, parameter_258, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_258 - - # pd_op.batch_norm_: (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) - ( - batch_norm__126, - batch_norm__127, - batch_norm__128, - batch_norm__129, - batch_norm__130, - batch_norm__131, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_23, - parameter_257, - parameter_256, - parameter_255, - parameter_254, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_254, parameter_255, parameter_256, parameter_257 - - # pd_op.swish: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32) - swish_19 = paddle._C_ops.swish(batch_norm__126) - - # pd_op.conv2d: (2x96x-1x-1xf32) <- (2x192x-1x-1xf32, 96x192x1x1xf32) - conv2d_24 = paddle._C_ops.conv2d( - swish_18, parameter_253, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_253 - - # pd_op.batch_norm_: (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) - ( - batch_norm__132, - batch_norm__133, - batch_norm__134, - batch_norm__135, - batch_norm__136, - batch_norm__137, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_24, - parameter_252, - parameter_251, - parameter_250, - parameter_249, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_249, parameter_250, parameter_251, parameter_252 - - # pd_op.swish: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32) - swish_20 = paddle._C_ops.swish(batch_norm__132) - - # pd_op.conv2d: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, 96x96x3x3xf32) - conv2d_25 = paddle._C_ops.conv2d( - swish_20, parameter_248, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_248 - - # pd_op.batch_norm_: (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) - ( - batch_norm__138, - batch_norm__139, - batch_norm__140, - batch_norm__141, - batch_norm__142, - batch_norm__143, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_25, - parameter_247, - parameter_246, - parameter_245, - parameter_244, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_244, parameter_245, parameter_246, parameter_247 - - # pd_op.swish: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32) - swish_21 = paddle._C_ops.swish(batch_norm__138) - - # pd_op.conv2d: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, 96x96x3x3xf32) - conv2d_26 = paddle._C_ops.conv2d( - swish_21, parameter_243, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_243 - - # pd_op.batch_norm_: (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) - ( - batch_norm__144, - batch_norm__145, - batch_norm__146, - batch_norm__147, - batch_norm__148, - batch_norm__149, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_26, - parameter_242, - parameter_241, - parameter_240, - parameter_239, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_239, parameter_240, parameter_241, parameter_242 - - # pd_op.conv2d: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, 96x96x1x1xf32) - conv2d_27 = paddle._C_ops.conv2d( - swish_21, parameter_238, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_238 - - # pd_op.batch_norm_: (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) - ( - batch_norm__150, - batch_norm__151, - batch_norm__152, - batch_norm__153, - batch_norm__154, - batch_norm__155, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_27, - parameter_237, - parameter_236, - parameter_235, - parameter_234, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_234, parameter_235, parameter_236, parameter_237 - - # pd_op.add: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, 2x96x-1x-1xf32) - add_8 = paddle._C_ops.add(batch_norm__144, batch_norm__150) - - # pd_op.swish: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32) - swish_22 = paddle._C_ops.swish(add_8) - - # pd_op.add: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, 2x96x-1x-1xf32) - add_9 = paddle._C_ops.add(swish_20, swish_22) - - # pd_op.conv2d: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, 96x96x3x3xf32) - conv2d_28 = paddle._C_ops.conv2d( - add_9, parameter_233, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_233 - - # pd_op.batch_norm_: (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) - ( - batch_norm__156, - batch_norm__157, - batch_norm__158, - batch_norm__159, - batch_norm__160, - batch_norm__161, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_28, - parameter_232, - parameter_231, - parameter_230, - parameter_229, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_229, parameter_230, parameter_231, parameter_232 - - # pd_op.swish: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32) - swish_23 = paddle._C_ops.swish(batch_norm__156) - - # pd_op.conv2d: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, 96x96x3x3xf32) - conv2d_29 = paddle._C_ops.conv2d( - swish_23, parameter_228, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_228 - - # pd_op.batch_norm_: (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) - ( - batch_norm__162, - batch_norm__163, - batch_norm__164, - batch_norm__165, - batch_norm__166, - batch_norm__167, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_29, - parameter_227, - parameter_226, - parameter_225, - parameter_224, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_224, parameter_225, parameter_226, parameter_227 - - # pd_op.conv2d: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, 96x96x1x1xf32) - conv2d_30 = paddle._C_ops.conv2d( - swish_23, parameter_223, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_223 - - # pd_op.batch_norm_: (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) - ( - batch_norm__168, - batch_norm__169, - batch_norm__170, - batch_norm__171, - batch_norm__172, - batch_norm__173, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_30, - parameter_222, - parameter_221, - parameter_220, - parameter_219, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_219, parameter_220, parameter_221, parameter_222 - - # pd_op.add: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, 2x96x-1x-1xf32) - add_10 = paddle._C_ops.add(batch_norm__162, batch_norm__168) - - # pd_op.swish: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32) - swish_24 = paddle._C_ops.swish(add_10) - - # pd_op.add: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, 2x96x-1x-1xf32) - add_11 = paddle._C_ops.add(add_9, swish_24) - - # builtin.combine: ([2x96x-1x-1xf32, 2x96x-1x-1xf32]) <- (2x96x-1x-1xf32, 2x96x-1x-1xf32) - combine_2 = [swish_19, add_11] - - # pd_op.concat: (2x192x-1x-1xf32) <- ([2x96x-1x-1xf32, 2x96x-1x-1xf32], 1xi32) - concat_2 = paddle._C_ops.concat(combine_2, full_0) - del combine_2 - - # pd_op.mean: (2x192x1x1xf32) <- (2x192x-1x-1xf32, 2xi64) - mean_2 = paddle._C_ops.mean(concat_2, full_int_array_0, True) - - # pd_op.conv2d: (2x192x1x1xf32) <- (2x192x1x1xf32, 192x192x1x1xf32) - conv2d_31 = paddle._C_ops.conv2d( - mean_2, parameter_218, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_218 - - # pd_op.reshape: (1x192x1x1xf32) <- (192xf32, 4xi64) - reshape_2 = paddle._C_ops.reshape(parameter_217, full_int_array_1) - del parameter_217 - - # pd_op.add: (2x192x1x1xf32) <- (2x192x1x1xf32, 1x192x1x1xf32) - add_12 = paddle._C_ops.add(conv2d_31, reshape_2) - - # pd_op.hardsigmoid: (2x192x1x1xf32) <- (2x192x1x1xf32) - hardsigmoid_2 = paddle._C_ops.hardsigmoid( - add_12, float("0.166667"), float("0.5") - ) - del add_12 - - # pd_op.multiply: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 2x192x1x1xf32) - multiply_2 = paddle._C_ops.multiply(concat_2, hardsigmoid_2) - - # pd_op.conv2d: (2x256x-1x-1xf32) <- (2x192x-1x-1xf32, 256x192x1x1xf32) - conv2d_32 = paddle._C_ops.conv2d( - multiply_2, parameter_216, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_216 - - # pd_op.batch_norm_: (2x256x-1x-1xf32, 256xf32, 256xf32, 256xf32, 256xf32, -1xui8) <- (2x256x-1x-1xf32, 256xf32, 256xf32, 256xf32, 256xf32) - ( - batch_norm__174, - batch_norm__175, - batch_norm__176, - batch_norm__177, - batch_norm__178, - batch_norm__179, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_32, - parameter_215, - parameter_214, - parameter_213, - parameter_212, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_212, parameter_213, parameter_214, parameter_215 - - # pd_op.swish: (2x256x-1x-1xf32) <- (2x256x-1x-1xf32) - swish_25 = paddle._C_ops.swish(batch_norm__174) - - # pd_op.conv2d: (2x384x-1x-1xf32) <- (2x256x-1x-1xf32, 384x256x3x3xf32) - conv2d_33 = paddle._C_ops.conv2d( - swish_25, parameter_211, [2, 2], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_211 - - # pd_op.batch_norm_: (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) - ( - batch_norm__180, - batch_norm__181, - batch_norm__182, - batch_norm__183, - batch_norm__184, - batch_norm__185, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_33, - parameter_210, - parameter_209, - parameter_208, - parameter_207, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_207, parameter_208, parameter_209, parameter_210 - - # pd_op.swish: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32) - swish_26 = paddle._C_ops.swish(batch_norm__180) - - # pd_op.conv2d: (2x192x-1x-1xf32) <- (2x384x-1x-1xf32, 192x384x1x1xf32) - conv2d_34 = paddle._C_ops.conv2d( - swish_26, parameter_206, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_206 - - # pd_op.batch_norm_: (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) - ( - batch_norm__186, - batch_norm__187, - batch_norm__188, - batch_norm__189, - batch_norm__190, - batch_norm__191, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_34, - parameter_205, - parameter_204, - parameter_203, - parameter_202, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_202, parameter_203, parameter_204, parameter_205 - - # pd_op.swish: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32) - swish_27 = paddle._C_ops.swish(batch_norm__186) - - # pd_op.conv2d: (2x192x-1x-1xf32) <- (2x384x-1x-1xf32, 192x384x1x1xf32) - conv2d_35 = paddle._C_ops.conv2d( - swish_26, parameter_201, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_201 - - # pd_op.batch_norm_: (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) - ( - batch_norm__192, - batch_norm__193, - batch_norm__194, - batch_norm__195, - batch_norm__196, - batch_norm__197, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_35, - parameter_200, - parameter_199, - parameter_198, - parameter_197, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_197, parameter_198, parameter_199, parameter_200 - - # pd_op.swish: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32) - swish_28 = paddle._C_ops.swish(batch_norm__192) - - # pd_op.conv2d: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 192x192x3x3xf32) - conv2d_36 = paddle._C_ops.conv2d( - swish_28, parameter_196, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_196 - - # pd_op.batch_norm_: (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) - ( - batch_norm__198, - batch_norm__199, - batch_norm__200, - batch_norm__201, - batch_norm__202, - batch_norm__203, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_36, - parameter_195, - parameter_194, - parameter_193, - parameter_192, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_192, parameter_193, parameter_194, parameter_195 - - # pd_op.swish: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32) - swish_29 = paddle._C_ops.swish(batch_norm__198) - - # pd_op.conv2d: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 192x192x3x3xf32) - conv2d_37 = paddle._C_ops.conv2d( - swish_29, parameter_191, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_191 - - # pd_op.batch_norm_: (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) - ( - batch_norm__204, - batch_norm__205, - batch_norm__206, - batch_norm__207, - batch_norm__208, - batch_norm__209, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_37, - parameter_190, - parameter_189, - parameter_188, - parameter_187, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_187, parameter_188, parameter_189, parameter_190 - - # pd_op.conv2d: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 192x192x1x1xf32) - conv2d_38 = paddle._C_ops.conv2d( - swish_29, parameter_186, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_186 - - # pd_op.batch_norm_: (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) - ( - batch_norm__210, - batch_norm__211, - batch_norm__212, - batch_norm__213, - batch_norm__214, - batch_norm__215, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_38, - parameter_185, - parameter_184, - parameter_183, - parameter_182, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_182, parameter_183, parameter_184, parameter_185 - - # pd_op.add: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 2x192x-1x-1xf32) - add_13 = paddle._C_ops.add(batch_norm__204, batch_norm__210) - - # pd_op.swish: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32) - swish_30 = paddle._C_ops.swish(add_13) - - # pd_op.add: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 2x192x-1x-1xf32) - add_14 = paddle._C_ops.add(swish_28, swish_30) - - # builtin.combine: ([2x192x-1x-1xf32, 2x192x-1x-1xf32]) <- (2x192x-1x-1xf32, 2x192x-1x-1xf32) - combine_3 = [swish_27, add_14] - - # pd_op.concat: (2x384x-1x-1xf32) <- ([2x192x-1x-1xf32, 2x192x-1x-1xf32], 1xi32) - concat_3 = paddle._C_ops.concat(combine_3, full_0) - del combine_3 - - # pd_op.mean: (2x384x1x1xf32) <- (2x384x-1x-1xf32, 2xi64) - mean_3 = paddle._C_ops.mean(concat_3, full_int_array_0, True) - - # pd_op.conv2d: (2x384x1x1xf32) <- (2x384x1x1xf32, 384x384x1x1xf32) - conv2d_39 = paddle._C_ops.conv2d( - mean_3, parameter_181, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_181 - - # pd_op.reshape: (1x384x1x1xf32) <- (384xf32, 4xi64) - reshape_3 = paddle._C_ops.reshape(parameter_180, full_int_array_1) - del full_int_array_1, parameter_180 - - # pd_op.add: (2x384x1x1xf32) <- (2x384x1x1xf32, 1x384x1x1xf32) - add_15 = paddle._C_ops.add(conv2d_39, reshape_3) - - # pd_op.hardsigmoid: (2x384x1x1xf32) <- (2x384x1x1xf32) - hardsigmoid_3 = paddle._C_ops.hardsigmoid( - add_15, float("0.166667"), float("0.5") - ) - del add_15 - - # pd_op.multiply: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32, 2x384x1x1xf32) - multiply_3 = paddle._C_ops.multiply(concat_3, hardsigmoid_3) - - # pd_op.conv2d: (2x512x-1x-1xf32) <- (2x384x-1x-1xf32, 512x384x1x1xf32) - conv2d_40 = paddle._C_ops.conv2d( - multiply_3, parameter_179, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_179 - - # pd_op.batch_norm_: (2x512x-1x-1xf32, 512xf32, 512xf32, 512xf32, 512xf32, -1xui8) <- (2x512x-1x-1xf32, 512xf32, 512xf32, 512xf32, 512xf32) - ( - batch_norm__216, - batch_norm__217, - batch_norm__218, - batch_norm__219, - batch_norm__220, - batch_norm__221, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_40, - parameter_178, - parameter_177, - parameter_176, - parameter_175, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_175, parameter_176, parameter_177, parameter_178 - - # pd_op.swish: (2x512x-1x-1xf32) <- (2x512x-1x-1xf32) - swish_31 = paddle._C_ops.swish(batch_norm__216) - - # pd_op.conv2d: (2x192x-1x-1xf32) <- (2x512x-1x-1xf32, 192x512x1x1xf32) - conv2d_41 = paddle._C_ops.conv2d( - swish_31, parameter_174, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_174 - - # pd_op.batch_norm_: (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) - ( - batch_norm__222, - batch_norm__223, - batch_norm__224, - batch_norm__225, - batch_norm__226, - batch_norm__227, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_41, - parameter_173, - parameter_172, - parameter_171, - parameter_170, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_170, parameter_171, parameter_172, parameter_173 - - # pd_op.swish: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32) - swish_32 = paddle._C_ops.swish(batch_norm__222) - - # pd_op.conv2d: (2x192x-1x-1xf32) <- (2x512x-1x-1xf32, 192x512x1x1xf32) - conv2d_42 = paddle._C_ops.conv2d( - swish_31, parameter_169, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_169 - - # pd_op.batch_norm_: (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) - ( - batch_norm__228, - batch_norm__229, - batch_norm__230, - batch_norm__231, - batch_norm__232, - batch_norm__233, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_42, - parameter_168, - parameter_167, - parameter_166, - parameter_165, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_165, parameter_166, parameter_167, parameter_168 - - # pd_op.swish: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32) - swish_33 = paddle._C_ops.swish(batch_norm__228) - - # pd_op.conv2d: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 192x192x3x3xf32) - conv2d_43 = paddle._C_ops.conv2d( - swish_33, parameter_164, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_164 - - # pd_op.batch_norm_: (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) - ( - batch_norm__234, - batch_norm__235, - batch_norm__236, - batch_norm__237, - batch_norm__238, - batch_norm__239, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_43, - parameter_163, - parameter_162, - parameter_161, - parameter_160, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_160, parameter_161, parameter_162, parameter_163 - - # pd_op.swish: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32) - swish_34 = paddle._C_ops.swish(batch_norm__234) - - # pd_op.conv2d: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 192x192x3x3xf32) - conv2d_44 = paddle._C_ops.conv2d( - swish_34, parameter_159, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_159 - - # pd_op.batch_norm_: (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) - ( - batch_norm__240, - batch_norm__241, - batch_norm__242, - batch_norm__243, - batch_norm__244, - batch_norm__245, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_44, - parameter_158, - parameter_157, - parameter_156, - parameter_155, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_155, parameter_156, parameter_157, parameter_158 - - # pd_op.conv2d: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 192x192x1x1xf32) - conv2d_45 = paddle._C_ops.conv2d( - swish_34, parameter_154, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_154 - - # pd_op.batch_norm_: (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) - ( - batch_norm__246, - batch_norm__247, - batch_norm__248, - batch_norm__249, - batch_norm__250, - batch_norm__251, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_45, - parameter_153, - parameter_152, - parameter_151, - parameter_150, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_150, parameter_151, parameter_152, parameter_153 - - # pd_op.add: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 2x192x-1x-1xf32) - add_16 = paddle._C_ops.add(batch_norm__240, batch_norm__246) - - # pd_op.swish: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32) - swish_35 = paddle._C_ops.swish(add_16) - - # pd_op.full_int_array: (2xi64) <- () - full_int_array_2 = [5, 5] - - # pd_op.pool2d: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 2xi64) - pool2d_0 = paddle._C_ops.pool2d( - swish_35, - full_int_array_2, - [1, 1], - [2, 2], - False, - True, - "NCHW", - "max", - False, - False, - "EXPLICIT", - ) - - # pd_op.full_int_array: (2xi64) <- () - full_int_array_3 = [9, 9] - - # pd_op.pool2d: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 2xi64) - pool2d_1 = paddle._C_ops.pool2d( - swish_35, - full_int_array_3, - [1, 1], - [4, 4], - False, - True, - "NCHW", - "max", - False, - False, - "EXPLICIT", - ) - - # pd_op.full_int_array: (2xi64) <- () - full_int_array_4 = [13, 13] - - # pd_op.pool2d: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 2xi64) - pool2d_2 = paddle._C_ops.pool2d( - swish_35, - full_int_array_4, - [1, 1], - [6, 6], - False, - True, - "NCHW", - "max", - False, - False, - "EXPLICIT", - ) - - # builtin.combine: ([2x192x-1x-1xf32, 2x192x-1x-1xf32, 2x192x-1x-1xf32, 2x192x-1x-1xf32]) <- (2x192x-1x-1xf32, 2x192x-1x-1xf32, 2x192x-1x-1xf32, 2x192x-1x-1xf32) - combine_4 = [swish_35, pool2d_0, pool2d_1, pool2d_2] - - # pd_op.concat: (2x768x-1x-1xf32) <- ([2x192x-1x-1xf32, 2x192x-1x-1xf32, 2x192x-1x-1xf32, 2x192x-1x-1xf32], 1xi32) - concat_4 = paddle._C_ops.concat(combine_4, full_0) - del combine_4 - - # pd_op.conv2d: (2x192x-1x-1xf32) <- (2x768x-1x-1xf32, 192x768x1x1xf32) - conv2d_46 = paddle._C_ops.conv2d( - concat_4, parameter_149, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_149 - - # pd_op.batch_norm_: (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) - ( - batch_norm__252, - batch_norm__253, - batch_norm__254, - batch_norm__255, - batch_norm__256, - batch_norm__257, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_46, - parameter_148, - parameter_147, - parameter_146, - parameter_145, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_145, parameter_146, parameter_147, parameter_148 - - # pd_op.swish: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32) - swish_36 = paddle._C_ops.swish(batch_norm__252) - - # builtin.combine: ([2x192x-1x-1xf32, 2x192x-1x-1xf32]) <- (2x192x-1x-1xf32, 2x192x-1x-1xf32) - combine_5 = [swish_32, swish_36] - - # pd_op.concat: (2x384x-1x-1xf32) <- ([2x192x-1x-1xf32, 2x192x-1x-1xf32], 1xi32) - concat_5 = paddle._C_ops.concat(combine_5, full_0) - del combine_5 - - # pd_op.conv2d: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32, 384x384x1x1xf32) - conv2d_47 = paddle._C_ops.conv2d( - concat_5, parameter_144, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_144 - - # pd_op.batch_norm_: (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) - ( - batch_norm__258, - batch_norm__259, - batch_norm__260, - batch_norm__261, - batch_norm__262, - batch_norm__263, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_47, - parameter_143, - parameter_142, - parameter_141, - parameter_140, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_140, parameter_141, parameter_142, parameter_143 - - # pd_op.swish: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32) - swish_37 = paddle._C_ops.swish(batch_norm__258) - - # pd_op.conv2d: (2x192x-1x-1xf32) <- (2x384x-1x-1xf32, 192x384x1x1xf32) - conv2d_48 = paddle._C_ops.conv2d( - swish_37, parameter_139, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_139 - - # pd_op.batch_norm_: (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) - ( - batch_norm__264, - batch_norm__265, - batch_norm__266, - batch_norm__267, - batch_norm__268, - batch_norm__269, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_48, - parameter_138, - parameter_137, - parameter_136, - parameter_135, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_135, parameter_136, parameter_137, parameter_138 - - # pd_op.swish: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32) - swish_38 = paddle._C_ops.swish(batch_norm__264) - - # pd_op.nearest_interp: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, None, None, None) - nearest_interp_0 = paddle._C_ops.nearest_interp( - swish_38, - None, - None, - None, - "NCHW", - -1, - -1, - -1, - [float("2"), float("2")], - "nearest", - False, - 0, - ) - - # builtin.combine: ([2x192x-1x-1xf32, 2x256x-1x-1xf32]) <- (2x192x-1x-1xf32, 2x256x-1x-1xf32) - combine_6 = [nearest_interp_0, swish_25] - - # pd_op.concat: (2x448x-1x-1xf32) <- ([2x192x-1x-1xf32, 2x256x-1x-1xf32], 1xi32) - concat_6 = paddle._C_ops.concat(combine_6, full_0) - del combine_6 - - # pd_op.conv2d: (2x96x-1x-1xf32) <- (2x448x-1x-1xf32, 96x448x1x1xf32) - conv2d_49 = paddle._C_ops.conv2d( - concat_6, parameter_134, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_134 - - # pd_op.batch_norm_: (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) - ( - batch_norm__270, - batch_norm__271, - batch_norm__272, - batch_norm__273, - batch_norm__274, - batch_norm__275, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_49, - parameter_133, - parameter_132, - parameter_131, - parameter_130, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_130, parameter_131, parameter_132, parameter_133 - - # pd_op.swish: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32) - swish_39 = paddle._C_ops.swish(batch_norm__270) - - # pd_op.conv2d: (2x96x-1x-1xf32) <- (2x448x-1x-1xf32, 96x448x1x1xf32) - conv2d_50 = paddle._C_ops.conv2d( - concat_6, parameter_129, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_129 - - # pd_op.batch_norm_: (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) - ( - batch_norm__276, - batch_norm__277, - batch_norm__278, - batch_norm__279, - batch_norm__280, - batch_norm__281, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_50, - parameter_128, - parameter_127, - parameter_126, - parameter_125, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_125, parameter_126, parameter_127, parameter_128 - - # pd_op.swish: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32) - swish_40 = paddle._C_ops.swish(batch_norm__276) - - # pd_op.conv2d: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, 96x96x3x3xf32) - conv2d_51 = paddle._C_ops.conv2d( - swish_40, parameter_124, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_124 - - # pd_op.batch_norm_: (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) - ( - batch_norm__282, - batch_norm__283, - batch_norm__284, - batch_norm__285, - batch_norm__286, - batch_norm__287, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_51, - parameter_123, - parameter_122, - parameter_121, - parameter_120, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_120, parameter_121, parameter_122, parameter_123 - - # pd_op.swish: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32) - swish_41 = paddle._C_ops.swish(batch_norm__282) - - # pd_op.conv2d: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, 96x96x3x3xf32) - conv2d_52 = paddle._C_ops.conv2d( - swish_41, parameter_119, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_119 - - # pd_op.batch_norm_: (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) - ( - batch_norm__288, - batch_norm__289, - batch_norm__290, - batch_norm__291, - batch_norm__292, - batch_norm__293, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_52, - parameter_118, - parameter_117, - parameter_116, - parameter_115, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_115, parameter_116, parameter_117, parameter_118 - - # pd_op.conv2d: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, 96x96x1x1xf32) - conv2d_53 = paddle._C_ops.conv2d( - swish_41, parameter_114, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_114 - - # pd_op.batch_norm_: (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) - ( - batch_norm__294, - batch_norm__295, - batch_norm__296, - batch_norm__297, - batch_norm__298, - batch_norm__299, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_53, - parameter_113, - parameter_112, - parameter_111, - parameter_110, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_110, parameter_111, parameter_112, parameter_113 - - # pd_op.add: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, 2x96x-1x-1xf32) - add_17 = paddle._C_ops.add(batch_norm__288, batch_norm__294) - - # pd_op.swish: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32) - swish_42 = paddle._C_ops.swish(add_17) - - # builtin.combine: ([2x96x-1x-1xf32, 2x96x-1x-1xf32]) <- (2x96x-1x-1xf32, 2x96x-1x-1xf32) - combine_7 = [swish_39, swish_42] - - # pd_op.concat: (2x192x-1x-1xf32) <- ([2x96x-1x-1xf32, 2x96x-1x-1xf32], 1xi32) - concat_7 = paddle._C_ops.concat(combine_7, full_0) - del combine_7 - - # pd_op.conv2d: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 192x192x1x1xf32) - conv2d_54 = paddle._C_ops.conv2d( - concat_7, parameter_109, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_109 - - # pd_op.batch_norm_: (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) - ( - batch_norm__300, - batch_norm__301, - batch_norm__302, - batch_norm__303, - batch_norm__304, - batch_norm__305, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_54, - parameter_108, - parameter_107, - parameter_106, - parameter_105, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_105, parameter_106, parameter_107, parameter_108 - - # pd_op.swish: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32) - swish_43 = paddle._C_ops.swish(batch_norm__300) - - # pd_op.conv2d: (2x96x-1x-1xf32) <- (2x192x-1x-1xf32, 96x192x1x1xf32) - conv2d_55 = paddle._C_ops.conv2d( - swish_43, parameter_104, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_104 - - # pd_op.batch_norm_: (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) - ( - batch_norm__306, - batch_norm__307, - batch_norm__308, - batch_norm__309, - batch_norm__310, - batch_norm__311, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_55, - parameter_103, - parameter_102, - parameter_101, - parameter_100, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_100, parameter_101, parameter_102, parameter_103 - - # pd_op.swish: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32) - swish_44 = paddle._C_ops.swish(batch_norm__306) - - # pd_op.nearest_interp: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, None, None, None) - nearest_interp_1 = paddle._C_ops.nearest_interp( - swish_44, - None, - None, - None, - "NCHW", - -1, - -1, - -1, - [float("2"), float("2")], - "nearest", - False, - 0, - ) - - # builtin.combine: ([2x96x-1x-1xf32, 2x128x-1x-1xf32]) <- (2x96x-1x-1xf32, 2x128x-1x-1xf32) - combine_8 = [nearest_interp_1, swish_17] - - # pd_op.concat: (2x224x-1x-1xf32) <- ([2x96x-1x-1xf32, 2x128x-1x-1xf32], 1xi32) - concat_8 = paddle._C_ops.concat(combine_8, full_0) - del combine_8 - - # pd_op.conv2d: (2x48x-1x-1xf32) <- (2x224x-1x-1xf32, 48x224x1x1xf32) - conv2d_56 = paddle._C_ops.conv2d( - concat_8, parameter_99, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_99 - - # pd_op.batch_norm_: (2x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32, -1xui8) <- (2x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32) - ( - batch_norm__312, - batch_norm__313, - batch_norm__314, - batch_norm__315, - batch_norm__316, - batch_norm__317, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_56, - parameter_98, - parameter_97, - parameter_96, - parameter_95, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_95, parameter_96, parameter_97, parameter_98 - - # pd_op.swish: (2x48x-1x-1xf32) <- (2x48x-1x-1xf32) - swish_45 = paddle._C_ops.swish(batch_norm__312) - - # pd_op.conv2d: (2x48x-1x-1xf32) <- (2x224x-1x-1xf32, 48x224x1x1xf32) - conv2d_57 = paddle._C_ops.conv2d( - concat_8, parameter_94, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_94 - - # pd_op.batch_norm_: (2x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32, -1xui8) <- (2x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32) - ( - batch_norm__318, - batch_norm__319, - batch_norm__320, - batch_norm__321, - batch_norm__322, - batch_norm__323, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_57, - parameter_93, - parameter_92, - parameter_91, - parameter_90, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_90, parameter_91, parameter_92, parameter_93 - - # pd_op.swish: (2x48x-1x-1xf32) <- (2x48x-1x-1xf32) - swish_46 = paddle._C_ops.swish(batch_norm__318) - - # pd_op.conv2d: (2x48x-1x-1xf32) <- (2x48x-1x-1xf32, 48x48x3x3xf32) - conv2d_58 = paddle._C_ops.conv2d( - swish_46, parameter_89, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_89 - - # pd_op.batch_norm_: (2x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32, -1xui8) <- (2x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32) - ( - batch_norm__324, - batch_norm__325, - batch_norm__326, - batch_norm__327, - batch_norm__328, - batch_norm__329, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_58, - parameter_88, - parameter_87, - parameter_86, - parameter_85, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_85, parameter_86, parameter_87, parameter_88 - - # pd_op.swish: (2x48x-1x-1xf32) <- (2x48x-1x-1xf32) - swish_47 = paddle._C_ops.swish(batch_norm__324) - - # pd_op.conv2d: (2x48x-1x-1xf32) <- (2x48x-1x-1xf32, 48x48x3x3xf32) - conv2d_59 = paddle._C_ops.conv2d( - swish_47, parameter_84, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_84 - - # pd_op.batch_norm_: (2x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32, -1xui8) <- (2x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32) - ( - batch_norm__330, - batch_norm__331, - batch_norm__332, - batch_norm__333, - batch_norm__334, - batch_norm__335, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_59, - parameter_83, - parameter_82, - parameter_81, - parameter_80, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_80, parameter_81, parameter_82, parameter_83 - - # pd_op.conv2d: (2x48x-1x-1xf32) <- (2x48x-1x-1xf32, 48x48x1x1xf32) - conv2d_60 = paddle._C_ops.conv2d( - swish_47, parameter_79, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_79 - - # pd_op.batch_norm_: (2x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32, -1xui8) <- (2x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32) - ( - batch_norm__336, - batch_norm__337, - batch_norm__338, - batch_norm__339, - batch_norm__340, - batch_norm__341, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_60, - parameter_78, - parameter_77, - parameter_76, - parameter_75, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_75, parameter_76, parameter_77, parameter_78 - - # pd_op.add: (2x48x-1x-1xf32) <- (2x48x-1x-1xf32, 2x48x-1x-1xf32) - add_18 = paddle._C_ops.add(batch_norm__330, batch_norm__336) - - # pd_op.swish: (2x48x-1x-1xf32) <- (2x48x-1x-1xf32) - swish_48 = paddle._C_ops.swish(add_18) - - # builtin.combine: ([2x48x-1x-1xf32, 2x48x-1x-1xf32]) <- (2x48x-1x-1xf32, 2x48x-1x-1xf32) - combine_9 = [swish_45, swish_48] - - # pd_op.concat: (2x96x-1x-1xf32) <- ([2x48x-1x-1xf32, 2x48x-1x-1xf32], 1xi32) - concat_9 = paddle._C_ops.concat(combine_9, full_0) - del combine_9 - - # pd_op.conv2d: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, 96x96x1x1xf32) - conv2d_61 = paddle._C_ops.conv2d( - concat_9, parameter_74, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_74 - - # pd_op.batch_norm_: (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) - ( - batch_norm__342, - batch_norm__343, - batch_norm__344, - batch_norm__345, - batch_norm__346, - batch_norm__347, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_61, - parameter_73, - parameter_72, - parameter_71, - parameter_70, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_70, parameter_71, parameter_72, parameter_73 - - # pd_op.swish: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32) - swish_49 = paddle._C_ops.swish(batch_norm__342) - - # pd_op.conv2d: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, 96x96x3x3xf32) - conv2d_62 = paddle._C_ops.conv2d( - swish_49, parameter_69, [2, 2], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_69 - - # pd_op.batch_norm_: (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) - ( - batch_norm__348, - batch_norm__349, - batch_norm__350, - batch_norm__351, - batch_norm__352, - batch_norm__353, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_62, - parameter_68, - parameter_67, - parameter_66, - parameter_65, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_65, parameter_66, parameter_67, parameter_68 - - # pd_op.swish: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32) - swish_50 = paddle._C_ops.swish(batch_norm__348) - - # builtin.combine: ([2x96x-1x-1xf32, 2x192x-1x-1xf32]) <- (2x96x-1x-1xf32, 2x192x-1x-1xf32) - combine_10 = [swish_50, swish_43] - - # pd_op.concat: (2x288x-1x-1xf32) <- ([2x96x-1x-1xf32, 2x192x-1x-1xf32], 1xi32) - concat_10 = paddle._C_ops.concat(combine_10, full_0) - del combine_10 - - # pd_op.conv2d: (2x96x-1x-1xf32) <- (2x288x-1x-1xf32, 96x288x1x1xf32) - conv2d_63 = paddle._C_ops.conv2d( - concat_10, parameter_64, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_64 - - # pd_op.batch_norm_: (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) - ( - batch_norm__354, - batch_norm__355, - batch_norm__356, - batch_norm__357, - batch_norm__358, - batch_norm__359, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_63, - parameter_63, - parameter_62, - parameter_61, - parameter_60, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_60, parameter_61, parameter_62, parameter_63 - - # pd_op.swish: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32) - swish_51 = paddle._C_ops.swish(batch_norm__354) - - # pd_op.conv2d: (2x96x-1x-1xf32) <- (2x288x-1x-1xf32, 96x288x1x1xf32) - conv2d_64 = paddle._C_ops.conv2d( - concat_10, parameter_59, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_59 - - # pd_op.batch_norm_: (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) - ( - batch_norm__360, - batch_norm__361, - batch_norm__362, - batch_norm__363, - batch_norm__364, - batch_norm__365, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_64, - parameter_58, - parameter_57, - parameter_56, - parameter_55, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_55, parameter_56, parameter_57, parameter_58 - - # pd_op.swish: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32) - swish_52 = paddle._C_ops.swish(batch_norm__360) - - # pd_op.conv2d: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, 96x96x3x3xf32) - conv2d_65 = paddle._C_ops.conv2d( - swish_52, parameter_54, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_54 - - # pd_op.batch_norm_: (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) - ( - batch_norm__366, - batch_norm__367, - batch_norm__368, - batch_norm__369, - batch_norm__370, - batch_norm__371, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_65, - parameter_53, - parameter_52, - parameter_51, - parameter_50, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_50, parameter_51, parameter_52, parameter_53 - - # pd_op.swish: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32) - swish_53 = paddle._C_ops.swish(batch_norm__366) - - # pd_op.conv2d: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, 96x96x3x3xf32) - conv2d_66 = paddle._C_ops.conv2d( - swish_53, parameter_49, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_49 - - # pd_op.batch_norm_: (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) - ( - batch_norm__372, - batch_norm__373, - batch_norm__374, - batch_norm__375, - batch_norm__376, - batch_norm__377, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_66, - parameter_48, - parameter_47, - parameter_46, - parameter_45, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_45, parameter_46, parameter_47, parameter_48 - - # pd_op.conv2d: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, 96x96x1x1xf32) - conv2d_67 = paddle._C_ops.conv2d( - swish_53, parameter_44, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_44 - - # pd_op.batch_norm_: (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) - ( - batch_norm__378, - batch_norm__379, - batch_norm__380, - batch_norm__381, - batch_norm__382, - batch_norm__383, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_67, - parameter_43, - parameter_42, - parameter_41, - parameter_40, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_40, parameter_41, parameter_42, parameter_43 - - # pd_op.add: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, 2x96x-1x-1xf32) - add_19 = paddle._C_ops.add(batch_norm__372, batch_norm__378) - - # pd_op.swish: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32) - swish_54 = paddle._C_ops.swish(add_19) - - # builtin.combine: ([2x96x-1x-1xf32, 2x96x-1x-1xf32]) <- (2x96x-1x-1xf32, 2x96x-1x-1xf32) - combine_11 = [swish_51, swish_54] - - # pd_op.concat: (2x192x-1x-1xf32) <- ([2x96x-1x-1xf32, 2x96x-1x-1xf32], 1xi32) - concat_11 = paddle._C_ops.concat(combine_11, full_0) - del combine_11 - - # pd_op.conv2d: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 192x192x1x1xf32) - conv2d_68 = paddle._C_ops.conv2d( - concat_11, parameter_39, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_39 - - # pd_op.batch_norm_: (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) - ( - batch_norm__384, - batch_norm__385, - batch_norm__386, - batch_norm__387, - batch_norm__388, - batch_norm__389, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_68, - parameter_38, - parameter_37, - parameter_36, - parameter_35, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_35, parameter_36, parameter_37, parameter_38 - - # pd_op.swish: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32) - swish_55 = paddle._C_ops.swish(batch_norm__384) - - # pd_op.conv2d: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 192x192x3x3xf32) - conv2d_69 = paddle._C_ops.conv2d( - swish_55, parameter_34, [2, 2], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_34 - - # pd_op.batch_norm_: (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) - ( - batch_norm__390, - batch_norm__391, - batch_norm__392, - batch_norm__393, - batch_norm__394, - batch_norm__395, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_69, - parameter_33, - parameter_32, - parameter_31, - parameter_30, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_30, parameter_31, parameter_32, parameter_33 - - # pd_op.swish: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32) - swish_56 = paddle._C_ops.swish(batch_norm__390) - - # builtin.combine: ([2x192x-1x-1xf32, 2x384x-1x-1xf32]) <- (2x192x-1x-1xf32, 2x384x-1x-1xf32) - combine_12 = [swish_56, swish_37] - - # pd_op.concat: (2x576x-1x-1xf32) <- ([2x192x-1x-1xf32, 2x384x-1x-1xf32], 1xi32) - concat_12 = paddle._C_ops.concat(combine_12, full_0) - del combine_12 - - # pd_op.conv2d: (2x192x-1x-1xf32) <- (2x576x-1x-1xf32, 192x576x1x1xf32) - conv2d_70 = paddle._C_ops.conv2d( - concat_12, parameter_29, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_29 - - # pd_op.batch_norm_: (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) - ( - batch_norm__396, - batch_norm__397, - batch_norm__398, - batch_norm__399, - batch_norm__400, - batch_norm__401, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_70, - parameter_28, - parameter_27, - parameter_26, - parameter_25, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_25, parameter_26, parameter_27, parameter_28 - - # pd_op.swish: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32) - swish_57 = paddle._C_ops.swish(batch_norm__396) - - # pd_op.conv2d: (2x192x-1x-1xf32) <- (2x576x-1x-1xf32, 192x576x1x1xf32) - conv2d_71 = paddle._C_ops.conv2d( - concat_12, parameter_24, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_24 - - # pd_op.batch_norm_: (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) - ( - batch_norm__402, - batch_norm__403, - batch_norm__404, - batch_norm__405, - batch_norm__406, - batch_norm__407, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_71, - parameter_23, - parameter_22, - parameter_21, - parameter_20, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_20, parameter_21, parameter_22, parameter_23 - - # pd_op.swish: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32) - swish_58 = paddle._C_ops.swish(batch_norm__402) - - # pd_op.conv2d: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 192x192x3x3xf32) - conv2d_72 = paddle._C_ops.conv2d( - swish_58, parameter_19, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_19 - - # pd_op.batch_norm_: (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) - ( - batch_norm__408, - batch_norm__409, - batch_norm__410, - batch_norm__411, - batch_norm__412, - batch_norm__413, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_72, - parameter_18, - parameter_17, - parameter_16, - parameter_15, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_15, parameter_16, parameter_17, parameter_18 - - # pd_op.swish: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32) - swish_59 = paddle._C_ops.swish(batch_norm__408) - - # pd_op.conv2d: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 192x192x3x3xf32) - conv2d_73 = paddle._C_ops.conv2d( - swish_59, parameter_14, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_14 - - # pd_op.batch_norm_: (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) - ( - batch_norm__414, - batch_norm__415, - batch_norm__416, - batch_norm__417, - batch_norm__418, - batch_norm__419, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_73, - parameter_13, - parameter_12, - parameter_11, - parameter_10, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_10, parameter_11, parameter_12, parameter_13 - - # pd_op.conv2d: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 192x192x1x1xf32) - conv2d_74 = paddle._C_ops.conv2d( - swish_59, parameter_9, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_9 - - # pd_op.batch_norm_: (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) - ( - batch_norm__420, - batch_norm__421, - batch_norm__422, - batch_norm__423, - batch_norm__424, - batch_norm__425, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_74, - parameter_8, - parameter_7, - parameter_6, - parameter_5, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_5, parameter_6, parameter_7, parameter_8 - - # pd_op.add: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 2x192x-1x-1xf32) - add_20 = paddle._C_ops.add(batch_norm__414, batch_norm__420) - - # pd_op.swish: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32) - swish_60 = paddle._C_ops.swish(add_20) - - # builtin.combine: ([2x192x-1x-1xf32, 2x192x-1x-1xf32]) <- (2x192x-1x-1xf32, 2x192x-1x-1xf32) - combine_13 = [swish_57, swish_60] - - # pd_op.concat: (2x384x-1x-1xf32) <- ([2x192x-1x-1xf32, 2x192x-1x-1xf32], 1xi32) - concat_13 = paddle._C_ops.concat(combine_13, full_0) - del combine_13 - - # pd_op.conv2d: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32, 384x384x1x1xf32) - conv2d_75 = paddle._C_ops.conv2d( - concat_13, parameter_4, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_4 - - # pd_op.batch_norm_: (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) - ( - batch_norm__426, - batch_norm__427, - batch_norm__428, - batch_norm__429, - batch_norm__430, - batch_norm__431, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_75, - parameter_3, - parameter_2, - parameter_1, - parameter_0, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_0, parameter_1, parameter_2, parameter_3 - - # pd_op.swish: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32) - swish_0 = paddle._C_ops.swish(batch_norm__426) - del ( - add_0, - add_1, - add_10, - add_11, - add_13, - add_14, - add_16, - add_17, - add_18, - add_19, - add_20, - add_3, - add_4, - add_5, - add_6, - add_8, - add_9, - assign_0, - assign_1, - assign_10, - assign_11, - assign_12, - assign_13, - assign_14, - assign_15, - assign_2, - assign_3, - assign_4, - assign_5, - assign_6, - assign_7, - assign_8, - assign_9, - batch_norm__0, - batch_norm__1, - batch_norm__10, - batch_norm__100, - batch_norm__101, - batch_norm__102, - batch_norm__103, - batch_norm__104, - batch_norm__105, - batch_norm__106, - batch_norm__107, - batch_norm__108, - batch_norm__109, - batch_norm__11, - batch_norm__110, - batch_norm__111, - batch_norm__112, - batch_norm__113, - batch_norm__114, - batch_norm__115, - batch_norm__116, - batch_norm__117, - batch_norm__118, - batch_norm__119, - batch_norm__12, - batch_norm__120, - batch_norm__121, - batch_norm__122, - batch_norm__123, - batch_norm__124, - batch_norm__125, - batch_norm__126, - batch_norm__127, - batch_norm__128, - batch_norm__129, - batch_norm__13, - batch_norm__130, - batch_norm__131, - batch_norm__132, - batch_norm__133, - batch_norm__134, - batch_norm__135, - batch_norm__136, - batch_norm__137, - batch_norm__138, - batch_norm__139, - batch_norm__14, - batch_norm__140, - batch_norm__141, - batch_norm__142, - batch_norm__143, - batch_norm__144, - batch_norm__145, - batch_norm__146, - batch_norm__147, - batch_norm__148, - batch_norm__149, - batch_norm__15, - batch_norm__150, - batch_norm__151, - batch_norm__152, - batch_norm__153, - batch_norm__154, - batch_norm__155, - batch_norm__156, - batch_norm__157, - batch_norm__158, - batch_norm__159, - batch_norm__16, - batch_norm__160, - batch_norm__161, - batch_norm__162, - batch_norm__163, - batch_norm__164, - batch_norm__165, - batch_norm__166, - batch_norm__167, - batch_norm__168, - batch_norm__169, - batch_norm__17, - batch_norm__170, - batch_norm__171, - batch_norm__172, - batch_norm__173, - batch_norm__174, - batch_norm__175, - batch_norm__176, - batch_norm__177, - batch_norm__178, - batch_norm__179, - batch_norm__18, - batch_norm__180, - batch_norm__181, - batch_norm__182, - batch_norm__183, - batch_norm__184, - batch_norm__185, - batch_norm__186, - batch_norm__187, - batch_norm__188, - batch_norm__189, - batch_norm__19, - batch_norm__190, - batch_norm__191, - batch_norm__192, - batch_norm__193, - batch_norm__194, - batch_norm__195, - batch_norm__196, - batch_norm__197, - batch_norm__198, - batch_norm__199, - batch_norm__2, - batch_norm__20, - batch_norm__200, - batch_norm__201, - batch_norm__202, - batch_norm__203, - batch_norm__204, - batch_norm__205, - batch_norm__206, - batch_norm__207, - batch_norm__208, - batch_norm__209, - batch_norm__21, - batch_norm__210, - batch_norm__211, - batch_norm__212, - batch_norm__213, - batch_norm__214, - batch_norm__215, - batch_norm__216, - batch_norm__217, - batch_norm__218, - batch_norm__219, - batch_norm__22, - batch_norm__220, - batch_norm__221, - batch_norm__222, - batch_norm__223, - batch_norm__224, - batch_norm__225, - batch_norm__226, - batch_norm__227, - batch_norm__228, - batch_norm__229, - batch_norm__23, - batch_norm__230, - batch_norm__231, - batch_norm__232, - batch_norm__233, - batch_norm__234, - batch_norm__235, - batch_norm__236, - batch_norm__237, - batch_norm__238, - batch_norm__239, - batch_norm__24, - batch_norm__240, - batch_norm__241, - batch_norm__242, - batch_norm__243, - batch_norm__244, - batch_norm__245, - batch_norm__246, - batch_norm__247, - batch_norm__248, - batch_norm__249, - batch_norm__25, - batch_norm__250, - batch_norm__251, - batch_norm__252, - batch_norm__253, - batch_norm__254, - batch_norm__255, - batch_norm__256, - batch_norm__257, - batch_norm__258, - batch_norm__259, - batch_norm__26, - batch_norm__260, - batch_norm__261, - batch_norm__262, - batch_norm__263, - batch_norm__264, - batch_norm__265, - batch_norm__266, - batch_norm__267, - batch_norm__268, - batch_norm__269, - batch_norm__27, - batch_norm__270, - batch_norm__271, - batch_norm__272, - batch_norm__273, - batch_norm__274, - batch_norm__275, - batch_norm__276, - batch_norm__277, - batch_norm__278, - batch_norm__279, - batch_norm__28, - batch_norm__280, - batch_norm__281, - batch_norm__282, - batch_norm__283, - batch_norm__284, - batch_norm__285, - batch_norm__286, - batch_norm__287, - batch_norm__288, - batch_norm__289, - batch_norm__29, - batch_norm__290, - batch_norm__291, - batch_norm__292, - batch_norm__293, - batch_norm__294, - batch_norm__295, - batch_norm__296, - batch_norm__297, - batch_norm__298, - batch_norm__299, - batch_norm__3, - batch_norm__30, - batch_norm__300, - batch_norm__301, - batch_norm__302, - batch_norm__303, - batch_norm__304, - batch_norm__305, - batch_norm__306, - batch_norm__307, - batch_norm__308, - batch_norm__309, - batch_norm__31, - batch_norm__310, - batch_norm__311, - batch_norm__312, - batch_norm__313, - batch_norm__314, - batch_norm__315, - batch_norm__316, - batch_norm__317, - batch_norm__318, - batch_norm__319, - batch_norm__32, - batch_norm__320, - batch_norm__321, - batch_norm__322, - batch_norm__323, - batch_norm__324, - batch_norm__325, - batch_norm__326, - batch_norm__327, - batch_norm__328, - batch_norm__329, - batch_norm__33, - batch_norm__330, - batch_norm__331, - batch_norm__332, - batch_norm__333, - batch_norm__334, - batch_norm__335, - batch_norm__336, - batch_norm__337, - batch_norm__338, - batch_norm__339, - batch_norm__34, - batch_norm__340, - batch_norm__341, - batch_norm__342, - batch_norm__343, - batch_norm__344, - batch_norm__345, - batch_norm__346, - batch_norm__347, - batch_norm__348, - batch_norm__349, - batch_norm__35, - batch_norm__350, - batch_norm__351, - batch_norm__352, - batch_norm__353, - batch_norm__354, - batch_norm__355, - batch_norm__356, - batch_norm__357, - batch_norm__358, - batch_norm__359, - batch_norm__36, - batch_norm__360, - batch_norm__361, - batch_norm__362, - batch_norm__363, - batch_norm__364, - batch_norm__365, - batch_norm__366, - batch_norm__367, - batch_norm__368, - batch_norm__369, - batch_norm__37, - batch_norm__370, - batch_norm__371, - batch_norm__372, - batch_norm__373, - batch_norm__374, - batch_norm__375, - batch_norm__376, - batch_norm__377, - batch_norm__378, - batch_norm__379, - batch_norm__38, - batch_norm__380, - batch_norm__381, - batch_norm__382, - batch_norm__383, - batch_norm__384, - batch_norm__385, - batch_norm__386, - batch_norm__387, - batch_norm__388, - batch_norm__389, - batch_norm__39, - batch_norm__390, - batch_norm__391, - batch_norm__392, - batch_norm__393, - batch_norm__394, - batch_norm__395, - batch_norm__396, - batch_norm__397, - batch_norm__398, - batch_norm__399, - batch_norm__4, - batch_norm__40, - batch_norm__400, - batch_norm__401, - batch_norm__402, - batch_norm__403, - batch_norm__404, - batch_norm__405, - batch_norm__406, - batch_norm__407, - batch_norm__408, - batch_norm__409, - batch_norm__41, - batch_norm__410, - batch_norm__411, - batch_norm__412, - batch_norm__413, - batch_norm__414, - batch_norm__415, - batch_norm__416, - batch_norm__417, - batch_norm__418, - batch_norm__419, - batch_norm__42, - batch_norm__420, - batch_norm__421, - batch_norm__422, - batch_norm__423, - batch_norm__424, - batch_norm__425, - batch_norm__426, - batch_norm__427, - batch_norm__428, - batch_norm__429, - batch_norm__43, - batch_norm__430, - batch_norm__431, - batch_norm__44, - batch_norm__45, - batch_norm__46, - batch_norm__47, - batch_norm__48, - batch_norm__49, - batch_norm__5, - batch_norm__50, - batch_norm__51, - batch_norm__52, - batch_norm__53, - batch_norm__54, - batch_norm__55, - batch_norm__56, - batch_norm__57, - batch_norm__58, - batch_norm__59, - batch_norm__6, - batch_norm__60, - batch_norm__61, - batch_norm__62, - batch_norm__63, - batch_norm__64, - batch_norm__65, - batch_norm__66, - batch_norm__67, - batch_norm__68, - batch_norm__69, - batch_norm__7, - batch_norm__70, - batch_norm__71, - batch_norm__72, - batch_norm__73, - batch_norm__74, - batch_norm__75, - batch_norm__76, - batch_norm__77, - batch_norm__78, - batch_norm__79, - batch_norm__8, - batch_norm__80, - batch_norm__81, - batch_norm__82, - batch_norm__83, - batch_norm__84, - batch_norm__85, - batch_norm__86, - batch_norm__87, - batch_norm__88, - batch_norm__89, - batch_norm__9, - batch_norm__90, - batch_norm__91, - batch_norm__92, - batch_norm__93, - batch_norm__94, - batch_norm__95, - batch_norm__96, - batch_norm__97, - batch_norm__98, - batch_norm__99, - concat_0, - concat_1, - concat_10, - concat_11, - concat_12, - concat_13, - concat_2, - concat_3, - concat_4, - concat_5, - concat_6, - concat_7, - concat_8, - concat_9, - conv2d_0, - conv2d_1, - conv2d_10, - conv2d_11, - conv2d_12, - conv2d_13, - conv2d_14, - conv2d_15, - conv2d_16, - conv2d_17, - conv2d_18, - conv2d_19, - conv2d_2, - conv2d_20, - conv2d_21, - conv2d_22, - conv2d_23, - conv2d_24, - conv2d_25, - conv2d_26, - conv2d_27, - conv2d_28, - conv2d_29, - conv2d_3, - conv2d_30, - conv2d_31, - conv2d_32, - conv2d_33, - conv2d_34, - conv2d_35, - conv2d_36, - conv2d_37, - conv2d_38, - conv2d_39, - conv2d_4, - conv2d_40, - conv2d_41, - conv2d_42, - conv2d_43, - conv2d_44, - conv2d_45, - conv2d_46, - conv2d_47, - conv2d_48, - conv2d_49, - conv2d_5, - conv2d_50, - conv2d_51, - conv2d_52, - conv2d_53, - conv2d_54, - conv2d_55, - conv2d_56, - conv2d_57, - conv2d_58, - conv2d_59, - conv2d_6, - conv2d_60, - conv2d_61, - conv2d_62, - conv2d_63, - conv2d_64, - conv2d_65, - conv2d_66, - conv2d_67, - conv2d_68, - conv2d_69, - conv2d_7, - conv2d_70, - conv2d_71, - conv2d_72, - conv2d_73, - conv2d_74, - conv2d_75, - conv2d_8, - conv2d_9, - full_0, - full_int_array_0, - full_int_array_2, - full_int_array_3, - full_int_array_4, - hardsigmoid_0, - hardsigmoid_1, - hardsigmoid_2, - hardsigmoid_3, - mean_0, - mean_1, - mean_2, - mean_3, - multiply_0, - multiply_1, - multiply_2, - multiply_3, - nearest_interp_0, - nearest_interp_1, - pool2d_0, - pool2d_1, - pool2d_2, - reshape_0, - reshape_1, - reshape_2, - reshape_3, - swish_1, - swish_10, - swish_11, - swish_12, - swish_13, - swish_14, - swish_15, - swish_16, - swish_17, - swish_18, - swish_19, - swish_2, - swish_20, - swish_21, - swish_22, - swish_23, - swish_24, - swish_25, - swish_26, - swish_27, - swish_28, - swish_29, - swish_3, - swish_30, - swish_31, - swish_32, - swish_33, - swish_34, - swish_35, - swish_36, - swish_37, - swish_38, - swish_39, - swish_4, - swish_40, - swish_41, - swish_42, - swish_43, - swish_44, - swish_45, - swish_46, - swish_47, - swish_48, - swish_49, - swish_5, - swish_50, - swish_51, - swish_52, - swish_53, - swish_54, - swish_55, - swish_56, - swish_57, - swish_58, - swish_59, - swish_6, - swish_60, - swish_7, - swish_8, - swish_9, - ) - - return swish_0 diff --git a/paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_14/graph_hash.txt b/paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_14/graph_hash.txt deleted file mode 100644 index a2c6d8b8a..000000000 --- a/paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_14/graph_hash.txt +++ /dev/null @@ -1 +0,0 @@ -81e1c79881631ba9c8fea543662e0c88108b9a0a23f037c923767f08270a38b3 \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_14/graph_net.json b/paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_14/graph_net.json deleted file mode 100644 index 399d8354e..000000000 --- a/paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_14/graph_net.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "framework": "paddle", - "model_name": "PP-YOLOE-S_human", - "num_devices_required": 1, - "num_nodes_required": 1 -} \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_14/input_meta.py b/paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_14/input_meta.py deleted file mode 100644 index 1cb0adc74..000000000 --- a/paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_14/input_meta.py +++ /dev/null @@ -1,31 +0,0 @@ -class Program_weight_tensor_data_0: - name = "data_0" - shape = [2, 384, 16, 16] - dtype = "float32" - min_val = float("-0.278465") - max_val = float("5.37003") - mean = float("0.223584") - std = float("0.551132") - data = None - - -class Program_weight_tensor_data_1: - name = "data_1" - shape = [2, 192, 32, 32] - dtype = "float32" - min_val = float("-0.278465") - max_val = float("7.76898") - mean = float("0.298021") - std = float("0.601257") - data = None - - -class Program_weight_tensor_data_2: - name = "data_2" - shape = [2, 96, 64, 64] - dtype = "float32" - min_val = float("-0.278465") - max_val = float("9.44375") - mean = float("0.389922") - std = float("0.624856") - data = None diff --git a/paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_14/model.py b/paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_14/model.py deleted file mode 100644 index fc0f65e76..000000000 --- a/paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_14/model.py +++ /dev/null @@ -1,1050 +0,0 @@ -import paddle - - -class GraphModule(paddle.nn.Layer): - def __init__(self): - super().__init__() - - def forward( - self, - parameter_0, - parameter_1, - parameter_2, - parameter_3, - parameter_4, - parameter_5, - parameter_6, - parameter_7, - parameter_8, - parameter_9, - parameter_10, - parameter_11, - parameter_12, - parameter_13, - parameter_14, - parameter_15, - parameter_16, - parameter_17, - parameter_18, - parameter_19, - parameter_20, - parameter_21, - parameter_22, - parameter_23, - parameter_24, - parameter_25, - parameter_26, - parameter_27, - parameter_28, - parameter_29, - parameter_30, - parameter_31, - parameter_32, - parameter_33, - parameter_34, - parameter_35, - parameter_36, - parameter_37, - parameter_38, - parameter_39, - parameter_40, - parameter_41, - parameter_42, - parameter_43, - parameter_44, - parameter_45, - parameter_46, - parameter_47, - parameter_48, - parameter_49, - parameter_50, - parameter_51, - parameter_52, - parameter_53, - data_0, - data_1, - data_2, - ): - # pd_op.full: (1xf64) <- () - full_0 = paddle._C_ops.full( - [1], float("0"), paddle.float64, paddle.core.CPUPlace() - ) - - # pd_op.full: (1xf64) <- () - full_1 = paddle._C_ops.full( - [1], float("16"), paddle.float64, paddle.core.CPUPlace() - ) - - # pd_op.full: (1xf64) <- () - full_2 = paddle._C_ops.full( - [1], float("1"), paddle.float64, paddle.core.CPUPlace() - ) - - # pd_op.arange: (16xi64) <- (1xf64, 1xf64, 1xf64) - arange_0 = paddle.arange(full_0, full_1, full_2, dtype="int64") - del full_1 - - # pd_op.cast: (16xf32) <- (16xi64) - cast_0 = paddle._C_ops.cast(arange_0, paddle.float32) - del arange_0 - - # pd_op.full: (1xf32) <- () - full_3 = paddle._C_ops.full( - [1], float("1"), paddle.float32, paddle.core.CPUPlace() - ) - - # pd_op.scale: (16xf32) <- (16xf32, 1xf32) - scale_0 = paddle._C_ops.scale(cast_0, full_3, float("0.5"), True) - del cast_0 - - # pd_op.full: (1xf32) <- () - full_4 = paddle._C_ops.full( - [1], float("32"), paddle.float32, paddle.core.CPUPlace() - ) - - # pd_op.scale: (16xf32) <- (16xf32, 1xf32) - scale_1 = paddle._C_ops.scale(scale_0, full_4, float("0"), True) - del full_4, scale_0 - - # builtin.combine: ([16xf32, 16xf32]) <- (16xf32, 16xf32) - combine_0 = [scale_1, scale_1] - del scale_1 - - # pd_op.meshgrid: ([16x16xf32, 16x16xf32]) <- ([16xf32, 16xf32]) - meshgrid_0 = paddle._C_ops.meshgrid(combine_0) - del combine_0 - - # builtin.split: (16x16xf32, 16x16xf32) <- ([16x16xf32, 16x16xf32]) - ( - split_0, - split_1, - ) = meshgrid_0 - del meshgrid_0 - - # pd_op.scale: (16x16xf32) <- (16x16xf32, 1xf32) - scale_2 = paddle._C_ops.scale(split_1, full_3, float("-80"), True) - - # pd_op.scale: (16x16xf32) <- (16x16xf32, 1xf32) - scale_3 = paddle._C_ops.scale(split_0, full_3, float("-80"), True) - - # pd_op.scale: (16x16xf32) <- (16x16xf32, 1xf32) - scale_4 = paddle._C_ops.scale(split_1, full_3, float("80"), True) - - # pd_op.scale: (16x16xf32) <- (16x16xf32, 1xf32) - scale_5 = paddle._C_ops.scale(split_0, full_3, float("80"), True) - - # builtin.combine: ([16x16xf32, 16x16xf32, 16x16xf32, 16x16xf32]) <- (16x16xf32, 16x16xf32, 16x16xf32, 16x16xf32) - combine_1 = [scale_2, scale_3, scale_4, scale_5] - del scale_2, scale_3, scale_4, scale_5 - - # pd_op.stack: (16x16x4xf32) <- ([16x16xf32, 16x16xf32, 16x16xf32, 16x16xf32]) - stack_0 = paddle._C_ops.stack(combine_1, -1) - del combine_1 - - # builtin.combine: ([16x16xf32, 16x16xf32]) <- (16x16xf32, 16x16xf32) - combine_2 = [split_1, split_0] - del split_0, split_1 - - # pd_op.stack: (16x16x2xf32) <- ([16x16xf32, 16x16xf32]) - stack_1 = paddle._C_ops.stack(combine_2, -1) - del combine_2 - - # pd_op.full_int_array: (2xi64) <- () - full_int_array_0 = [-1, 4] - - # pd_op.reshape: (256x4xf32) <- (16x16x4xf32, 2xi64) - reshape_0 = paddle._C_ops.reshape(stack_0, full_int_array_0) - del stack_0 - - # pd_op.full_int_array: (2xi64) <- () - full_int_array_1 = [-1, 2] - - # pd_op.reshape: (256x2xf32) <- (16x16x2xf32, 2xi64) - reshape_1 = paddle._C_ops.reshape(stack_1, full_int_array_1) - del stack_1 - - # pd_op.full: (256x1xf32) <- () - full_5 = paddle._C_ops.full( - [256, 1], - float("32"), - paddle.float32, - paddle.framework._current_expected_place(), - ) - - # pd_op.full: (1xf64) <- () - full_6 = paddle._C_ops.full( - [1], float("32"), paddle.float64, paddle.core.CPUPlace() - ) - - # pd_op.arange: (32xi64) <- (1xf64, 1xf64, 1xf64) - arange_1 = paddle.arange(full_0, full_6, full_2, dtype="int64") - del full_6 - - # pd_op.cast: (32xf32) <- (32xi64) - cast_1 = paddle._C_ops.cast(arange_1, paddle.float32) - del arange_1 - - # pd_op.scale: (32xf32) <- (32xf32, 1xf32) - scale_6 = paddle._C_ops.scale(cast_1, full_3, float("0.5"), True) - del cast_1 - - # pd_op.full: (1xf32) <- () - full_7 = paddle._C_ops.full( - [1], float("16"), paddle.float32, paddle.core.CPUPlace() - ) - - # pd_op.scale: (32xf32) <- (32xf32, 1xf32) - scale_7 = paddle._C_ops.scale(scale_6, full_7, float("0"), True) - del full_7, scale_6 - - # builtin.combine: ([32xf32, 32xf32]) <- (32xf32, 32xf32) - combine_3 = [scale_7, scale_7] - del scale_7 - - # pd_op.meshgrid: ([32x32xf32, 32x32xf32]) <- ([32xf32, 32xf32]) - meshgrid_1 = paddle._C_ops.meshgrid(combine_3) - del combine_3 - - # builtin.split: (32x32xf32, 32x32xf32) <- ([32x32xf32, 32x32xf32]) - ( - split_2, - split_3, - ) = meshgrid_1 - del meshgrid_1 - - # pd_op.scale: (32x32xf32) <- (32x32xf32, 1xf32) - scale_8 = paddle._C_ops.scale(split_3, full_3, float("-40"), True) - - # pd_op.scale: (32x32xf32) <- (32x32xf32, 1xf32) - scale_9 = paddle._C_ops.scale(split_2, full_3, float("-40"), True) - - # pd_op.scale: (32x32xf32) <- (32x32xf32, 1xf32) - scale_10 = paddle._C_ops.scale(split_3, full_3, float("40"), True) - - # pd_op.scale: (32x32xf32) <- (32x32xf32, 1xf32) - scale_11 = paddle._C_ops.scale(split_2, full_3, float("40"), True) - - # builtin.combine: ([32x32xf32, 32x32xf32, 32x32xf32, 32x32xf32]) <- (32x32xf32, 32x32xf32, 32x32xf32, 32x32xf32) - combine_4 = [scale_8, scale_9, scale_10, scale_11] - del scale_10, scale_11, scale_8, scale_9 - - # pd_op.stack: (32x32x4xf32) <- ([32x32xf32, 32x32xf32, 32x32xf32, 32x32xf32]) - stack_2 = paddle._C_ops.stack(combine_4, -1) - del combine_4 - - # builtin.combine: ([32x32xf32, 32x32xf32]) <- (32x32xf32, 32x32xf32) - combine_5 = [split_3, split_2] - del split_2, split_3 - - # pd_op.stack: (32x32x2xf32) <- ([32x32xf32, 32x32xf32]) - stack_3 = paddle._C_ops.stack(combine_5, -1) - del combine_5 - - # pd_op.reshape: (1024x4xf32) <- (32x32x4xf32, 2xi64) - reshape_2 = paddle._C_ops.reshape(stack_2, full_int_array_0) - del stack_2 - - # pd_op.reshape: (1024x2xf32) <- (32x32x2xf32, 2xi64) - reshape_3 = paddle._C_ops.reshape(stack_3, full_int_array_1) - del stack_3 - - # pd_op.full: (1024x1xf32) <- () - full_8 = paddle._C_ops.full( - [1024, 1], - float("16"), - paddle.float32, - paddle.framework._current_expected_place(), - ) - - # pd_op.full: (1xf64) <- () - full_9 = paddle._C_ops.full( - [1], float("64"), paddle.float64, paddle.core.CPUPlace() - ) - - # pd_op.arange: (64xi64) <- (1xf64, 1xf64, 1xf64) - arange_2 = paddle.arange(full_0, full_9, full_2, dtype="int64") - del full_0, full_2, full_9 - - # pd_op.cast: (64xf32) <- (64xi64) - cast_2 = paddle._C_ops.cast(arange_2, paddle.float32) - del arange_2 - - # pd_op.scale: (64xf32) <- (64xf32, 1xf32) - scale_12 = paddle._C_ops.scale(cast_2, full_3, float("0.5"), True) - del cast_2 - - # pd_op.full: (1xf32) <- () - full_10 = paddle._C_ops.full( - [1], float("8"), paddle.float32, paddle.core.CPUPlace() - ) - - # pd_op.scale: (64xf32) <- (64xf32, 1xf32) - scale_13 = paddle._C_ops.scale(scale_12, full_10, float("0"), True) - del full_10, scale_12 - - # builtin.combine: ([64xf32, 64xf32]) <- (64xf32, 64xf32) - combine_6 = [scale_13, scale_13] - del scale_13 - - # pd_op.meshgrid: ([64x64xf32, 64x64xf32]) <- ([64xf32, 64xf32]) - meshgrid_2 = paddle._C_ops.meshgrid(combine_6) - del combine_6 - - # builtin.split: (64x64xf32, 64x64xf32) <- ([64x64xf32, 64x64xf32]) - ( - split_4, - split_5, - ) = meshgrid_2 - del meshgrid_2 - - # pd_op.scale: (64x64xf32) <- (64x64xf32, 1xf32) - scale_14 = paddle._C_ops.scale(split_5, full_3, float("-20"), True) - - # pd_op.scale: (64x64xf32) <- (64x64xf32, 1xf32) - scale_15 = paddle._C_ops.scale(split_4, full_3, float("-20"), True) - - # pd_op.scale: (64x64xf32) <- (64x64xf32, 1xf32) - scale_16 = paddle._C_ops.scale(split_5, full_3, float("20"), True) - - # pd_op.scale: (64x64xf32) <- (64x64xf32, 1xf32) - scale_17 = paddle._C_ops.scale(split_4, full_3, float("20"), True) - del full_3 - - # builtin.combine: ([64x64xf32, 64x64xf32, 64x64xf32, 64x64xf32]) <- (64x64xf32, 64x64xf32, 64x64xf32, 64x64xf32) - combine_7 = [scale_14, scale_15, scale_16, scale_17] - del scale_14, scale_15, scale_16, scale_17 - - # pd_op.stack: (64x64x4xf32) <- ([64x64xf32, 64x64xf32, 64x64xf32, 64x64xf32]) - stack_4 = paddle._C_ops.stack(combine_7, -1) - del combine_7 - - # builtin.combine: ([64x64xf32, 64x64xf32]) <- (64x64xf32, 64x64xf32) - combine_8 = [split_5, split_4] - del split_4, split_5 - - # pd_op.stack: (64x64x2xf32) <- ([64x64xf32, 64x64xf32]) - stack_5 = paddle._C_ops.stack(combine_8, -1) - del combine_8 - - # pd_op.reshape: (4096x4xf32) <- (64x64x4xf32, 2xi64) - reshape_4 = paddle._C_ops.reshape(stack_4, full_int_array_0) - del full_int_array_0, stack_4 - - # pd_op.reshape: (4096x2xf32) <- (64x64x2xf32, 2xi64) - reshape_5 = paddle._C_ops.reshape(stack_5, full_int_array_1) - del full_int_array_1, stack_5 - - # pd_op.full: (4096x1xf32) <- () - full_11 = paddle._C_ops.full( - [4096, 1], - float("8"), - paddle.float32, - paddle.framework._current_expected_place(), - ) - - # pd_op.full: (1xi32) <- () - full_12 = paddle._C_ops.full( - [1], float("0"), paddle.int32, paddle.core.CPUPlace() - ) - - # builtin.combine: ([256x4xf32, 1024x4xf32, 4096x4xf32]) <- (256x4xf32, 1024x4xf32, 4096x4xf32) - combine_9 = [reshape_0, reshape_2, reshape_4] - - # pd_op.concat: (5376x4xf32) <- ([256x4xf32, 1024x4xf32, 4096x4xf32], 1xi32) - concat_2 = paddle._C_ops.concat(combine_9, full_12) - del combine_9 - - # builtin.combine: ([256x2xf32, 1024x2xf32, 4096x2xf32]) <- (256x2xf32, 1024x2xf32, 4096x2xf32) - combine_10 = [reshape_1, reshape_3, reshape_5] - del reshape_1, reshape_3, reshape_5 - - # pd_op.concat: (5376x2xf32) <- ([256x2xf32, 1024x2xf32, 4096x2xf32], 1xi32) - concat_3 = paddle._C_ops.concat(combine_10, full_12) - del combine_10 - - # builtin.combine: ([256x1xf32, 1024x1xf32, 4096x1xf32]) <- (256x1xf32, 1024x1xf32, 4096x1xf32) - combine_11 = [full_5, full_8, full_11] - del full_11, full_5, full_8 - - # pd_op.concat: (5376x1xf32) <- ([256x1xf32, 1024x1xf32, 4096x1xf32], 1xi32) - concat_4 = paddle._C_ops.concat(combine_11, full_12) - del combine_11, full_12 - - # pd_op.full_int_array: (2xi64) <- () - full_int_array_2 = [1, 1] - - # pd_op.assign: (2xi64) <- (2xi64) - assign_0 = full_int_array_2 - - # pd_op.assign: (2xi64) <- (2xi64) - assign_1 = full_int_array_2 - - # pd_op.pool2d: (2x384x1x1xf32) <- (2x384x16x16xf32, 2xi64) - pool2d_0 = paddle._C_ops.pool2d( - data_0, - full_int_array_2, - [1, 1], - [0, 0], - False, - True, - "NCHW", - "avg", - False, - True, - "EXPLICIT", - ) - - # pd_op.conv2d: (2x384x1x1xf32) <- (2x384x1x1xf32, 384x384x1x1xf32) - conv2d_0 = paddle._C_ops.conv2d( - pool2d_0, parameter_53, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_53 - - # pd_op.full_int_array: (4xi64) <- () - full_int_array_3 = [1, -1, 1, 1] - - # pd_op.reshape: (1x384x1x1xf32) <- (384xf32, 4xi64) - reshape_6 = paddle._C_ops.reshape(parameter_52, full_int_array_3) - del parameter_52 - - # pd_op.add: (2x384x1x1xf32) <- (2x384x1x1xf32, 1x384x1x1xf32) - add_0 = paddle._C_ops.add(conv2d_0, reshape_6) - - # pd_op.sigmoid: (2x384x1x1xf32) <- (2x384x1x1xf32) - sigmoid_0 = paddle._C_ops.sigmoid(add_0) - del add_0 - - # pd_op.multiply: (2x384x16x16xf32) <- (2x384x16x16xf32, 2x384x1x1xf32) - multiply_0 = paddle._C_ops.multiply(data_0, sigmoid_0) - - # pd_op.conv2d: (2x384x16x16xf32) <- (2x384x16x16xf32, 384x384x1x1xf32) - conv2d_1 = paddle._C_ops.conv2d( - multiply_0, parameter_51, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_51 - - # pd_op.batch_norm_: (2x384x16x16xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x16x16xf32, 384xf32, 384xf32, 384xf32, 384xf32) - ( - batch_norm__0, - batch_norm__1, - batch_norm__2, - batch_norm__3, - batch_norm__4, - batch_norm__5, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_1, - parameter_50, - parameter_49, - parameter_48, - parameter_47, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_47, parameter_48, parameter_49, parameter_50 - - # pd_op.swish: (2x384x16x16xf32) <- (2x384x16x16xf32) - swish_0 = paddle._C_ops.swish(batch_norm__0) - - # pd_op.add: (2x384x16x16xf32) <- (2x384x16x16xf32, 2x384x16x16xf32) - add_1 = paddle._C_ops.add(swish_0, data_0) - - # pd_op.conv2d: (2x1x16x16xf32) <- (2x384x16x16xf32, 1x384x3x3xf32) - conv2d_2 = paddle._C_ops.conv2d( - add_1, parameter_46, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_46 - - # pd_op.reshape: (1x1x1x1xf32) <- (1xf32, 4xi64) - reshape_7 = paddle._C_ops.reshape(parameter_45, full_int_array_3) - del parameter_45 - - # pd_op.add: (2x1x16x16xf32) <- (2x1x16x16xf32, 1x1x1x1xf32) - add_2 = paddle._C_ops.add(conv2d_2, reshape_7) - - # pd_op.conv2d: (2x384x1x1xf32) <- (2x384x1x1xf32, 384x384x1x1xf32) - conv2d_3 = paddle._C_ops.conv2d( - pool2d_0, parameter_44, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_44 - - # pd_op.reshape: (1x384x1x1xf32) <- (384xf32, 4xi64) - reshape_8 = paddle._C_ops.reshape(parameter_43, full_int_array_3) - del parameter_43 - - # pd_op.add: (2x384x1x1xf32) <- (2x384x1x1xf32, 1x384x1x1xf32) - add_3 = paddle._C_ops.add(conv2d_3, reshape_8) - - # pd_op.sigmoid: (2x384x1x1xf32) <- (2x384x1x1xf32) - sigmoid_1 = paddle._C_ops.sigmoid(add_3) - del add_3 - - # pd_op.multiply: (2x384x16x16xf32) <- (2x384x16x16xf32, 2x384x1x1xf32) - multiply_1 = paddle._C_ops.multiply(data_0, sigmoid_1) - del data_0 - - # pd_op.conv2d: (2x384x16x16xf32) <- (2x384x16x16xf32, 384x384x1x1xf32) - conv2d_4 = paddle._C_ops.conv2d( - multiply_1, parameter_42, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_42 - - # pd_op.batch_norm_: (2x384x16x16xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x16x16xf32, 384xf32, 384xf32, 384xf32, 384xf32) - ( - batch_norm__6, - batch_norm__7, - batch_norm__8, - batch_norm__9, - batch_norm__10, - batch_norm__11, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_4, - parameter_41, - parameter_40, - parameter_39, - parameter_38, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_38, parameter_39, parameter_40, parameter_41 - - # pd_op.swish: (2x384x16x16xf32) <- (2x384x16x16xf32) - swish_1 = paddle._C_ops.swish(batch_norm__6) - - # pd_op.conv2d: (2x68x16x16xf32) <- (2x384x16x16xf32, 68x384x3x3xf32) - conv2d_5 = paddle._C_ops.conv2d( - swish_1, parameter_37, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_37 - - # pd_op.reshape: (1x68x1x1xf32) <- (68xf32, 4xi64) - reshape_9 = paddle._C_ops.reshape(parameter_36, full_int_array_3) - del parameter_36 - - # pd_op.add: (2x68x16x16xf32) <- (2x68x16x16xf32, 1x68x1x1xf32) - add_4 = paddle._C_ops.add(conv2d_5, reshape_9) - - # pd_op.sigmoid: (2x1x16x16xf32) <- (2x1x16x16xf32) - sigmoid_2 = paddle._C_ops.sigmoid(add_2) - del add_2 - - # pd_op.flatten: (2x1x256xf32) <- (2x1x16x16xf32) - flatten_0 = paddle._C_ops.flatten(sigmoid_2, 2, 3) - - # pd_op.transpose: (2x256x1xf32) <- (2x1x256xf32) - transpose_0 = paddle._C_ops.transpose(flatten_0, [0, 2, 1]) - del flatten_0 - - # pd_op.flatten: (2x68x256xf32) <- (2x68x16x16xf32) - flatten_1 = paddle._C_ops.flatten(add_4, 2, 3) - - # pd_op.transpose: (2x256x68xf32) <- (2x68x256xf32) - transpose_1 = paddle._C_ops.transpose(flatten_1, [0, 2, 1]) - del flatten_1 - - # pd_op.pool2d: (2x192x1x1xf32) <- (2x192x32x32xf32, 2xi64) - pool2d_1 = paddle._C_ops.pool2d( - data_1, - full_int_array_2, - [1, 1], - [0, 0], - False, - True, - "NCHW", - "avg", - False, - True, - "EXPLICIT", - ) - - # pd_op.conv2d: (2x192x1x1xf32) <- (2x192x1x1xf32, 192x192x1x1xf32) - conv2d_6 = paddle._C_ops.conv2d( - pool2d_1, parameter_35, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_35 - - # pd_op.reshape: (1x192x1x1xf32) <- (192xf32, 4xi64) - reshape_10 = paddle._C_ops.reshape(parameter_34, full_int_array_3) - del parameter_34 - - # pd_op.add: (2x192x1x1xf32) <- (2x192x1x1xf32, 1x192x1x1xf32) - add_5 = paddle._C_ops.add(conv2d_6, reshape_10) - - # pd_op.sigmoid: (2x192x1x1xf32) <- (2x192x1x1xf32) - sigmoid_3 = paddle._C_ops.sigmoid(add_5) - del add_5 - - # pd_op.multiply: (2x192x32x32xf32) <- (2x192x32x32xf32, 2x192x1x1xf32) - multiply_2 = paddle._C_ops.multiply(data_1, sigmoid_3) - - # pd_op.conv2d: (2x192x32x32xf32) <- (2x192x32x32xf32, 192x192x1x1xf32) - conv2d_7 = paddle._C_ops.conv2d( - multiply_2, parameter_33, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_33 - - # pd_op.batch_norm_: (2x192x32x32xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x32x32xf32, 192xf32, 192xf32, 192xf32, 192xf32) - ( - batch_norm__12, - batch_norm__13, - batch_norm__14, - batch_norm__15, - batch_norm__16, - batch_norm__17, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_7, - parameter_32, - parameter_31, - parameter_30, - parameter_29, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_29, parameter_30, parameter_31, parameter_32 - - # pd_op.swish: (2x192x32x32xf32) <- (2x192x32x32xf32) - swish_2 = paddle._C_ops.swish(batch_norm__12) - - # pd_op.add: (2x192x32x32xf32) <- (2x192x32x32xf32, 2x192x32x32xf32) - add_6 = paddle._C_ops.add(swish_2, data_1) - - # pd_op.conv2d: (2x1x32x32xf32) <- (2x192x32x32xf32, 1x192x3x3xf32) - conv2d_8 = paddle._C_ops.conv2d( - add_6, parameter_28, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_28 - - # pd_op.reshape: (1x1x1x1xf32) <- (1xf32, 4xi64) - reshape_11 = paddle._C_ops.reshape(parameter_27, full_int_array_3) - del parameter_27 - - # pd_op.add: (2x1x32x32xf32) <- (2x1x32x32xf32, 1x1x1x1xf32) - add_7 = paddle._C_ops.add(conv2d_8, reshape_11) - - # pd_op.conv2d: (2x192x1x1xf32) <- (2x192x1x1xf32, 192x192x1x1xf32) - conv2d_9 = paddle._C_ops.conv2d( - pool2d_1, parameter_26, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_26 - - # pd_op.reshape: (1x192x1x1xf32) <- (192xf32, 4xi64) - reshape_12 = paddle._C_ops.reshape(parameter_25, full_int_array_3) - del parameter_25 - - # pd_op.add: (2x192x1x1xf32) <- (2x192x1x1xf32, 1x192x1x1xf32) - add_8 = paddle._C_ops.add(conv2d_9, reshape_12) - - # pd_op.sigmoid: (2x192x1x1xf32) <- (2x192x1x1xf32) - sigmoid_4 = paddle._C_ops.sigmoid(add_8) - del add_8 - - # pd_op.multiply: (2x192x32x32xf32) <- (2x192x32x32xf32, 2x192x1x1xf32) - multiply_3 = paddle._C_ops.multiply(data_1, sigmoid_4) - del data_1 - - # pd_op.conv2d: (2x192x32x32xf32) <- (2x192x32x32xf32, 192x192x1x1xf32) - conv2d_10 = paddle._C_ops.conv2d( - multiply_3, parameter_24, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_24 - - # pd_op.batch_norm_: (2x192x32x32xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x32x32xf32, 192xf32, 192xf32, 192xf32, 192xf32) - ( - batch_norm__18, - batch_norm__19, - batch_norm__20, - batch_norm__21, - batch_norm__22, - batch_norm__23, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_10, - parameter_23, - parameter_22, - parameter_21, - parameter_20, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_20, parameter_21, parameter_22, parameter_23 - - # pd_op.swish: (2x192x32x32xf32) <- (2x192x32x32xf32) - swish_3 = paddle._C_ops.swish(batch_norm__18) - - # pd_op.conv2d: (2x68x32x32xf32) <- (2x192x32x32xf32, 68x192x3x3xf32) - conv2d_11 = paddle._C_ops.conv2d( - swish_3, parameter_19, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_19 - - # pd_op.reshape: (1x68x1x1xf32) <- (68xf32, 4xi64) - reshape_13 = paddle._C_ops.reshape(parameter_18, full_int_array_3) - del parameter_18 - - # pd_op.add: (2x68x32x32xf32) <- (2x68x32x32xf32, 1x68x1x1xf32) - add_9 = paddle._C_ops.add(conv2d_11, reshape_13) - - # pd_op.sigmoid: (2x1x32x32xf32) <- (2x1x32x32xf32) - sigmoid_5 = paddle._C_ops.sigmoid(add_7) - del add_7 - - # pd_op.flatten: (2x1x1024xf32) <- (2x1x32x32xf32) - flatten_2 = paddle._C_ops.flatten(sigmoid_5, 2, 3) - - # pd_op.transpose: (2x1024x1xf32) <- (2x1x1024xf32) - transpose_2 = paddle._C_ops.transpose(flatten_2, [0, 2, 1]) - del flatten_2 - - # pd_op.flatten: (2x68x1024xf32) <- (2x68x32x32xf32) - flatten_3 = paddle._C_ops.flatten(add_9, 2, 3) - - # pd_op.transpose: (2x1024x68xf32) <- (2x68x1024xf32) - transpose_3 = paddle._C_ops.transpose(flatten_3, [0, 2, 1]) - del flatten_3 - - # pd_op.pool2d: (2x96x1x1xf32) <- (2x96x64x64xf32, 2xi64) - pool2d_2 = paddle._C_ops.pool2d( - data_2, - full_int_array_2, - [1, 1], - [0, 0], - False, - True, - "NCHW", - "avg", - False, - True, - "EXPLICIT", - ) - - # pd_op.conv2d: (2x96x1x1xf32) <- (2x96x1x1xf32, 96x96x1x1xf32) - conv2d_12 = paddle._C_ops.conv2d( - pool2d_2, parameter_17, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_17 - - # pd_op.reshape: (1x96x1x1xf32) <- (96xf32, 4xi64) - reshape_14 = paddle._C_ops.reshape(parameter_16, full_int_array_3) - del parameter_16 - - # pd_op.add: (2x96x1x1xf32) <- (2x96x1x1xf32, 1x96x1x1xf32) - add_10 = paddle._C_ops.add(conv2d_12, reshape_14) - - # pd_op.sigmoid: (2x96x1x1xf32) <- (2x96x1x1xf32) - sigmoid_6 = paddle._C_ops.sigmoid(add_10) - del add_10 - - # pd_op.multiply: (2x96x64x64xf32) <- (2x96x64x64xf32, 2x96x1x1xf32) - multiply_4 = paddle._C_ops.multiply(data_2, sigmoid_6) - - # pd_op.conv2d: (2x96x64x64xf32) <- (2x96x64x64xf32, 96x96x1x1xf32) - conv2d_13 = paddle._C_ops.conv2d( - multiply_4, parameter_15, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_15 - - # pd_op.batch_norm_: (2x96x64x64xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x64x64xf32, 96xf32, 96xf32, 96xf32, 96xf32) - ( - batch_norm__24, - batch_norm__25, - batch_norm__26, - batch_norm__27, - batch_norm__28, - batch_norm__29, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_13, - parameter_14, - parameter_13, - parameter_12, - parameter_11, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_11, parameter_12, parameter_13, parameter_14 - - # pd_op.swish: (2x96x64x64xf32) <- (2x96x64x64xf32) - swish_4 = paddle._C_ops.swish(batch_norm__24) - - # pd_op.add: (2x96x64x64xf32) <- (2x96x64x64xf32, 2x96x64x64xf32) - add_11 = paddle._C_ops.add(swish_4, data_2) - - # pd_op.conv2d: (2x1x64x64xf32) <- (2x96x64x64xf32, 1x96x3x3xf32) - conv2d_14 = paddle._C_ops.conv2d( - add_11, parameter_10, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_10 - - # pd_op.reshape: (1x1x1x1xf32) <- (1xf32, 4xi64) - reshape_15 = paddle._C_ops.reshape(parameter_9, full_int_array_3) - del parameter_9 - - # pd_op.add: (2x1x64x64xf32) <- (2x1x64x64xf32, 1x1x1x1xf32) - add_12 = paddle._C_ops.add(conv2d_14, reshape_15) - - # pd_op.conv2d: (2x96x1x1xf32) <- (2x96x1x1xf32, 96x96x1x1xf32) - conv2d_15 = paddle._C_ops.conv2d( - pool2d_2, parameter_8, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_8 - - # pd_op.reshape: (1x96x1x1xf32) <- (96xf32, 4xi64) - reshape_16 = paddle._C_ops.reshape(parameter_7, full_int_array_3) - del parameter_7 - - # pd_op.add: (2x96x1x1xf32) <- (2x96x1x1xf32, 1x96x1x1xf32) - add_13 = paddle._C_ops.add(conv2d_15, reshape_16) - - # pd_op.sigmoid: (2x96x1x1xf32) <- (2x96x1x1xf32) - sigmoid_7 = paddle._C_ops.sigmoid(add_13) - del add_13 - - # pd_op.multiply: (2x96x64x64xf32) <- (2x96x64x64xf32, 2x96x1x1xf32) - multiply_5 = paddle._C_ops.multiply(data_2, sigmoid_7) - del data_2 - - # pd_op.conv2d: (2x96x64x64xf32) <- (2x96x64x64xf32, 96x96x1x1xf32) - conv2d_16 = paddle._C_ops.conv2d( - multiply_5, parameter_6, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_6 - - # pd_op.batch_norm_: (2x96x64x64xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x64x64xf32, 96xf32, 96xf32, 96xf32, 96xf32) - ( - batch_norm__30, - batch_norm__31, - batch_norm__32, - batch_norm__33, - batch_norm__34, - batch_norm__35, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_16, - parameter_5, - parameter_4, - parameter_3, - parameter_2, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_2, parameter_3, parameter_4, parameter_5 - - # pd_op.swish: (2x96x64x64xf32) <- (2x96x64x64xf32) - swish_5 = paddle._C_ops.swish(batch_norm__30) - - # pd_op.conv2d: (2x68x64x64xf32) <- (2x96x64x64xf32, 68x96x3x3xf32) - conv2d_17 = paddle._C_ops.conv2d( - swish_5, parameter_1, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_1 - - # pd_op.reshape: (1x68x1x1xf32) <- (68xf32, 4xi64) - reshape_17 = paddle._C_ops.reshape(parameter_0, full_int_array_3) - del full_int_array_3, parameter_0 - - # pd_op.add: (2x68x64x64xf32) <- (2x68x64x64xf32, 1x68x1x1xf32) - add_14 = paddle._C_ops.add(conv2d_17, reshape_17) - - # pd_op.sigmoid: (2x1x64x64xf32) <- (2x1x64x64xf32) - sigmoid_8 = paddle._C_ops.sigmoid(add_12) - del add_12 - - # pd_op.flatten: (2x1x4096xf32) <- (2x1x64x64xf32) - flatten_4 = paddle._C_ops.flatten(sigmoid_8, 2, 3) - - # pd_op.transpose: (2x4096x1xf32) <- (2x1x4096xf32) - transpose_4 = paddle._C_ops.transpose(flatten_4, [0, 2, 1]) - del flatten_4 - - # pd_op.flatten: (2x68x4096xf32) <- (2x68x64x64xf32) - flatten_5 = paddle._C_ops.flatten(add_14, 2, 3) - - # pd_op.transpose: (2x4096x68xf32) <- (2x68x4096xf32) - transpose_5 = paddle._C_ops.transpose(flatten_5, [0, 2, 1]) - del flatten_5 - - # pd_op.full: (1xi32) <- () - full_13 = paddle._C_ops.full( - [1], float("1"), paddle.int32, paddle.core.CPUPlace() - ) - - # pd_op.assign: (1xi32) <- (1xi32) - assign_2 = full_13 - - # builtin.combine: ([2x256x1xf32, 2x1024x1xf32, 2x4096x1xf32]) <- (2x256x1xf32, 2x1024x1xf32, 2x4096x1xf32) - combine_12 = [transpose_0, transpose_2, transpose_4] - - # pd_op.concat: (2x5376x1xf32) <- ([2x256x1xf32, 2x1024x1xf32, 2x4096x1xf32], 1xi32) - concat_0 = paddle._C_ops.concat(combine_12, full_13) - del combine_12 - - # builtin.combine: ([2x256x68xf32, 2x1024x68xf32, 2x4096x68xf32]) <- (2x256x68xf32, 2x1024x68xf32, 2x4096x68xf32) - combine_13 = [transpose_1, transpose_3, transpose_5] - - # pd_op.concat: (2x5376x68xf32) <- ([2x256x68xf32, 2x1024x68xf32, 2x4096x68xf32], 1xi32) - concat_1 = paddle._C_ops.concat(combine_13, full_13) - del ( - add_1, - add_11, - add_14, - add_4, - add_6, - add_9, - assign_0, - assign_1, - assign_2, - batch_norm__0, - batch_norm__1, - batch_norm__10, - batch_norm__11, - batch_norm__12, - batch_norm__13, - batch_norm__14, - batch_norm__15, - batch_norm__16, - batch_norm__17, - batch_norm__18, - batch_norm__19, - batch_norm__2, - batch_norm__20, - batch_norm__21, - batch_norm__22, - batch_norm__23, - batch_norm__24, - batch_norm__25, - batch_norm__26, - batch_norm__27, - batch_norm__28, - batch_norm__29, - batch_norm__3, - batch_norm__30, - batch_norm__31, - batch_norm__32, - batch_norm__33, - batch_norm__34, - batch_norm__35, - batch_norm__4, - batch_norm__5, - batch_norm__6, - batch_norm__7, - batch_norm__8, - batch_norm__9, - combine_13, - conv2d_0, - conv2d_1, - conv2d_10, - conv2d_11, - conv2d_12, - conv2d_13, - conv2d_14, - conv2d_15, - conv2d_16, - conv2d_17, - conv2d_2, - conv2d_3, - conv2d_4, - conv2d_5, - conv2d_6, - conv2d_7, - conv2d_8, - conv2d_9, - full_13, - full_int_array_2, - multiply_0, - multiply_1, - multiply_2, - multiply_3, - multiply_4, - multiply_5, - pool2d_0, - pool2d_1, - pool2d_2, - reshape_0, - reshape_10, - reshape_11, - reshape_12, - reshape_13, - reshape_14, - reshape_15, - reshape_16, - reshape_17, - reshape_2, - reshape_4, - reshape_6, - reshape_7, - reshape_8, - reshape_9, - sigmoid_0, - sigmoid_1, - sigmoid_2, - sigmoid_3, - sigmoid_4, - sigmoid_5, - sigmoid_6, - sigmoid_7, - sigmoid_8, - swish_0, - swish_1, - swish_2, - swish_3, - swish_4, - swish_5, - transpose_0, - transpose_1, - transpose_2, - transpose_3, - transpose_4, - transpose_5, - ) - - return concat_0, concat_1, concat_2, concat_3, concat_4 diff --git a/paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_15/graph_hash.txt b/paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_15/graph_hash.txt deleted file mode 100644 index cf9cecf24..000000000 --- a/paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_15/graph_hash.txt +++ /dev/null @@ -1 +0,0 @@ -b570e084ecf7776a98a81e353b5f581d19f9ab243969ebfb5988dfda43a8a50f \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_15/graph_net.json b/paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_15/graph_net.json deleted file mode 100644 index 399d8354e..000000000 --- a/paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_15/graph_net.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "framework": "paddle", - "model_name": "PP-YOLOE-S_human", - "num_devices_required": 1, - "num_nodes_required": 1 -} \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_15/model.py b/paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_15/model.py deleted file mode 100644 index 56d62b15a..000000000 --- a/paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_15/model.py +++ /dev/null @@ -1,1144 +0,0 @@ -import paddle - - -class GraphModule(paddle.nn.Layer): - def __init__(self): - super().__init__() - - def forward( - self, - parameter_0, - parameter_1, - parameter_2, - parameter_3, - parameter_4, - parameter_5, - parameter_6, - parameter_7, - parameter_8, - parameter_9, - parameter_10, - parameter_11, - parameter_12, - parameter_13, - parameter_14, - parameter_15, - parameter_16, - parameter_17, - parameter_18, - parameter_19, - parameter_20, - parameter_21, - parameter_22, - parameter_23, - parameter_24, - parameter_25, - parameter_26, - parameter_27, - parameter_28, - parameter_29, - parameter_30, - parameter_31, - parameter_32, - parameter_33, - parameter_34, - parameter_35, - parameter_36, - parameter_37, - parameter_38, - parameter_39, - parameter_40, - parameter_41, - parameter_42, - parameter_43, - parameter_44, - parameter_45, - parameter_46, - parameter_47, - parameter_48, - parameter_49, - parameter_50, - parameter_51, - parameter_52, - parameter_53, - data_0, - data_1, - data_2, - data_3, - data_4, - data_5, - data_6, - data_7, - data_8, - ): - # pd_op.full: (1xi64) <- () - full_0 = paddle._C_ops.full( - [1], float("0"), paddle.int64, paddle.core.CPUPlace() - ) - - # pd_op.full: (1xi64) <- () - full_1 = paddle._C_ops.full( - [1], float("1"), paddle.int64, paddle.core.CPUPlace() - ) - - # pd_op.arange: (-1xi64) <- (1xi64, xi64, 1xi64) - arange_0 = paddle.arange(full_0, data_1, full_1, dtype="int64") - del data_1 - - # pd_op.cast: (-1xf32) <- (-1xi64) - cast_0 = paddle._C_ops.cast(arange_0, paddle.float32) - del arange_0 - - # pd_op.full: (1xf32) <- () - full_2 = paddle._C_ops.full( - [1], float("1"), paddle.float32, paddle.core.CPUPlace() - ) - - # pd_op.scale: (-1xf32) <- (-1xf32, 1xf32) - scale_0 = paddle._C_ops.scale(cast_0, full_2, float("0.5"), True) - del cast_0 - - # pd_op.full: (1xf32) <- () - full_3 = paddle._C_ops.full( - [1], float("32"), paddle.float32, paddle.core.CPUPlace() - ) - - # pd_op.scale: (-1xf32) <- (-1xf32, 1xf32) - scale_1 = paddle._C_ops.scale(scale_0, full_3, float("0"), True) - del scale_0 - - # pd_op.arange: (-1xi64) <- (1xi64, xi64, 1xi64) - arange_1 = paddle.arange(full_0, data_0, full_1, dtype="int64") - del data_0 - - # pd_op.cast: (-1xf32) <- (-1xi64) - cast_1 = paddle._C_ops.cast(arange_1, paddle.float32) - del arange_1 - - # pd_op.scale: (-1xf32) <- (-1xf32, 1xf32) - scale_2 = paddle._C_ops.scale(cast_1, full_2, float("0.5"), True) - del cast_1 - - # pd_op.scale: (-1xf32) <- (-1xf32, 1xf32) - scale_3 = paddle._C_ops.scale(scale_2, full_3, float("0"), True) - del scale_2 - - # builtin.combine: ([-1xf32, -1xf32]) <- (-1xf32, -1xf32) - combine_0 = [scale_3, scale_1] - del scale_1, scale_3 - - # pd_op.meshgrid: ([-1x-1xf32, -1x-1xf32]) <- ([-1xf32, -1xf32]) - meshgrid_0 = paddle._C_ops.meshgrid(combine_0) - del combine_0 - - # builtin.split: (-1x-1xf32, -1x-1xf32) <- ([-1x-1xf32, -1x-1xf32]) - ( - split_0, - split_1, - ) = meshgrid_0 - del meshgrid_0 - - # pd_op.scale: (-1x-1xf32) <- (-1x-1xf32, 1xf32) - scale_4 = paddle._C_ops.scale(split_1, full_2, float("-80"), True) - - # pd_op.scale: (-1x-1xf32) <- (-1x-1xf32, 1xf32) - scale_5 = paddle._C_ops.scale(split_0, full_2, float("-80"), True) - - # pd_op.scale: (-1x-1xf32) <- (-1x-1xf32, 1xf32) - scale_6 = paddle._C_ops.scale(split_1, full_2, float("80"), True) - - # pd_op.scale: (-1x-1xf32) <- (-1x-1xf32, 1xf32) - scale_7 = paddle._C_ops.scale(split_0, full_2, float("80"), True) - - # builtin.combine: ([-1x-1xf32, -1x-1xf32, -1x-1xf32, -1x-1xf32]) <- (-1x-1xf32, -1x-1xf32, -1x-1xf32, -1x-1xf32) - combine_1 = [scale_4, scale_5, scale_6, scale_7] - del scale_4, scale_5, scale_6, scale_7 - - # pd_op.stack: (-1x-1x4xf32) <- ([-1x-1xf32, -1x-1xf32, -1x-1xf32, -1x-1xf32]) - stack_0 = paddle._C_ops.stack(combine_1, -1) - del combine_1 - - # builtin.combine: ([-1x-1xf32, -1x-1xf32]) <- (-1x-1xf32, -1x-1xf32) - combine_2 = [split_1, split_0] - del split_0, split_1 - - # pd_op.stack: (-1x-1x2xf32) <- ([-1x-1xf32, -1x-1xf32]) - stack_1 = paddle._C_ops.stack(combine_2, -1) - del combine_2 - - # pd_op.full_int_array: (2xi64) <- () - full_int_array_0 = [-1, 4] - - # pd_op.reshape: (-1x4xf32) <- (-1x-1x4xf32, 2xi64) - reshape_0 = paddle._C_ops.reshape(stack_0, full_int_array_0) - del stack_0 - - # pd_op.full_int_array: (2xi64) <- () - full_int_array_1 = [-1, 2] - - # pd_op.reshape: (-1x2xf32) <- (-1x-1x2xf32, 2xi64) - reshape_1 = paddle._C_ops.reshape(stack_1, full_int_array_1) - del stack_1 - - # pd_op.shape64: (2xi64) <- (-1x4xf32) - shape64_0 = paddle._C_ops.shape64(reshape_0) - - # pd_op.full_int_array: (1xi64) <- () - full_int_array_2 = [0] - - # pd_op.full_int_array: (1xi64) <- () - full_int_array_3 = [1] - - # pd_op.slice: (xi64) <- (2xi64, 1xi64, 1xi64) - slice_0 = paddle._C_ops.slice( - shape64_0, [0], full_int_array_2, full_int_array_3, [1], [0] - ) - del shape64_0 - - # pd_op.full: (xi64) <- () - full_4 = paddle._C_ops.full( - [], float("1"), paddle.int64, paddle.core.CPUPlace() - ) - - # builtin.combine: ([xi64, xi64]) <- (xi64, xi64) - combine_3 = [slice_0, full_4] - - # pd_op.stack: (2xi64) <- ([xi64, xi64]) - stack_2 = paddle._C_ops.stack(combine_3, 0) - del combine_3 - - # pd_op.full_with_tensor: (-1x1xf32) <- (1xf32, 2xi64) - full_with_tensor_0 = paddle._C_ops.full_with_tensor( - full_3, stack_2, paddle.float32 - ) - del full_3, stack_2 - - # pd_op.arange: (-1xi64) <- (1xi64, xi64, 1xi64) - arange_2 = paddle.arange(full_0, data_3, full_1, dtype="int64") - del data_3 - - # pd_op.cast: (-1xf32) <- (-1xi64) - cast_2 = paddle._C_ops.cast(arange_2, paddle.float32) - del arange_2 - - # pd_op.scale: (-1xf32) <- (-1xf32, 1xf32) - scale_8 = paddle._C_ops.scale(cast_2, full_2, float("0.5"), True) - del cast_2 - - # pd_op.full: (1xf32) <- () - full_5 = paddle._C_ops.full( - [1], float("16"), paddle.float32, paddle.core.CPUPlace() - ) - - # pd_op.scale: (-1xf32) <- (-1xf32, 1xf32) - scale_9 = paddle._C_ops.scale(scale_8, full_5, float("0"), True) - del scale_8 - - # pd_op.arange: (-1xi64) <- (1xi64, xi64, 1xi64) - arange_3 = paddle.arange(full_0, data_2, full_1, dtype="int64") - del data_2 - - # pd_op.cast: (-1xf32) <- (-1xi64) - cast_3 = paddle._C_ops.cast(arange_3, paddle.float32) - del arange_3 - - # pd_op.scale: (-1xf32) <- (-1xf32, 1xf32) - scale_10 = paddle._C_ops.scale(cast_3, full_2, float("0.5"), True) - del cast_3 - - # pd_op.scale: (-1xf32) <- (-1xf32, 1xf32) - scale_11 = paddle._C_ops.scale(scale_10, full_5, float("0"), True) - del scale_10 - - # builtin.combine: ([-1xf32, -1xf32]) <- (-1xf32, -1xf32) - combine_4 = [scale_11, scale_9] - del scale_11, scale_9 - - # pd_op.meshgrid: ([-1x-1xf32, -1x-1xf32]) <- ([-1xf32, -1xf32]) - meshgrid_1 = paddle._C_ops.meshgrid(combine_4) - del combine_4 - - # builtin.split: (-1x-1xf32, -1x-1xf32) <- ([-1x-1xf32, -1x-1xf32]) - ( - split_2, - split_3, - ) = meshgrid_1 - del meshgrid_1 - - # pd_op.scale: (-1x-1xf32) <- (-1x-1xf32, 1xf32) - scale_12 = paddle._C_ops.scale(split_3, full_2, float("-40"), True) - - # pd_op.scale: (-1x-1xf32) <- (-1x-1xf32, 1xf32) - scale_13 = paddle._C_ops.scale(split_2, full_2, float("-40"), True) - - # pd_op.scale: (-1x-1xf32) <- (-1x-1xf32, 1xf32) - scale_14 = paddle._C_ops.scale(split_3, full_2, float("40"), True) - - # pd_op.scale: (-1x-1xf32) <- (-1x-1xf32, 1xf32) - scale_15 = paddle._C_ops.scale(split_2, full_2, float("40"), True) - - # builtin.combine: ([-1x-1xf32, -1x-1xf32, -1x-1xf32, -1x-1xf32]) <- (-1x-1xf32, -1x-1xf32, -1x-1xf32, -1x-1xf32) - combine_5 = [scale_12, scale_13, scale_14, scale_15] - del scale_12, scale_13, scale_14, scale_15 - - # pd_op.stack: (-1x-1x4xf32) <- ([-1x-1xf32, -1x-1xf32, -1x-1xf32, -1x-1xf32]) - stack_3 = paddle._C_ops.stack(combine_5, -1) - del combine_5 - - # builtin.combine: ([-1x-1xf32, -1x-1xf32]) <- (-1x-1xf32, -1x-1xf32) - combine_6 = [split_3, split_2] - del split_2, split_3 - - # pd_op.stack: (-1x-1x2xf32) <- ([-1x-1xf32, -1x-1xf32]) - stack_4 = paddle._C_ops.stack(combine_6, -1) - del combine_6 - - # pd_op.reshape: (-1x4xf32) <- (-1x-1x4xf32, 2xi64) - reshape_2 = paddle._C_ops.reshape(stack_3, full_int_array_0) - del stack_3 - - # pd_op.reshape: (-1x2xf32) <- (-1x-1x2xf32, 2xi64) - reshape_3 = paddle._C_ops.reshape(stack_4, full_int_array_1) - del stack_4 - - # pd_op.shape64: (2xi64) <- (-1x4xf32) - shape64_1 = paddle._C_ops.shape64(reshape_2) - - # pd_op.slice: (xi64) <- (2xi64, 1xi64, 1xi64) - slice_1 = paddle._C_ops.slice( - shape64_1, [0], full_int_array_2, full_int_array_3, [1], [0] - ) - del shape64_1 - - # builtin.combine: ([xi64, xi64]) <- (xi64, xi64) - combine_7 = [slice_1, full_4] - - # pd_op.stack: (2xi64) <- ([xi64, xi64]) - stack_5 = paddle._C_ops.stack(combine_7, 0) - del combine_7 - - # pd_op.full_with_tensor: (-1x1xf32) <- (1xf32, 2xi64) - full_with_tensor_1 = paddle._C_ops.full_with_tensor( - full_5, stack_5, paddle.float32 - ) - del full_5, stack_5 - - # pd_op.arange: (-1xi64) <- (1xi64, xi64, 1xi64) - arange_4 = paddle.arange(full_0, data_5, full_1, dtype="int64") - del data_5 - - # pd_op.cast: (-1xf32) <- (-1xi64) - cast_4 = paddle._C_ops.cast(arange_4, paddle.float32) - del arange_4 - - # pd_op.scale: (-1xf32) <- (-1xf32, 1xf32) - scale_16 = paddle._C_ops.scale(cast_4, full_2, float("0.5"), True) - del cast_4 - - # pd_op.full: (1xf32) <- () - full_6 = paddle._C_ops.full( - [1], float("8"), paddle.float32, paddle.core.CPUPlace() - ) - - # pd_op.scale: (-1xf32) <- (-1xf32, 1xf32) - scale_17 = paddle._C_ops.scale(scale_16, full_6, float("0"), True) - del scale_16 - - # pd_op.arange: (-1xi64) <- (1xi64, xi64, 1xi64) - arange_5 = paddle.arange(full_0, data_4, full_1, dtype="int64") - del data_4, full_0, full_1 - - # pd_op.cast: (-1xf32) <- (-1xi64) - cast_5 = paddle._C_ops.cast(arange_5, paddle.float32) - del arange_5 - - # pd_op.scale: (-1xf32) <- (-1xf32, 1xf32) - scale_18 = paddle._C_ops.scale(cast_5, full_2, float("0.5"), True) - del cast_5 - - # pd_op.scale: (-1xf32) <- (-1xf32, 1xf32) - scale_19 = paddle._C_ops.scale(scale_18, full_6, float("0"), True) - del scale_18 - - # builtin.combine: ([-1xf32, -1xf32]) <- (-1xf32, -1xf32) - combine_8 = [scale_19, scale_17] - del scale_17, scale_19 - - # pd_op.meshgrid: ([-1x-1xf32, -1x-1xf32]) <- ([-1xf32, -1xf32]) - meshgrid_2 = paddle._C_ops.meshgrid(combine_8) - del combine_8 - - # builtin.split: (-1x-1xf32, -1x-1xf32) <- ([-1x-1xf32, -1x-1xf32]) - ( - split_4, - split_5, - ) = meshgrid_2 - del meshgrid_2 - - # pd_op.scale: (-1x-1xf32) <- (-1x-1xf32, 1xf32) - scale_20 = paddle._C_ops.scale(split_5, full_2, float("-20"), True) - - # pd_op.scale: (-1x-1xf32) <- (-1x-1xf32, 1xf32) - scale_21 = paddle._C_ops.scale(split_4, full_2, float("-20"), True) - - # pd_op.scale: (-1x-1xf32) <- (-1x-1xf32, 1xf32) - scale_22 = paddle._C_ops.scale(split_5, full_2, float("20"), True) - - # pd_op.scale: (-1x-1xf32) <- (-1x-1xf32, 1xf32) - scale_23 = paddle._C_ops.scale(split_4, full_2, float("20"), True) - del full_2 - - # builtin.combine: ([-1x-1xf32, -1x-1xf32, -1x-1xf32, -1x-1xf32]) <- (-1x-1xf32, -1x-1xf32, -1x-1xf32, -1x-1xf32) - combine_9 = [scale_20, scale_21, scale_22, scale_23] - del scale_20, scale_21, scale_22, scale_23 - - # pd_op.stack: (-1x-1x4xf32) <- ([-1x-1xf32, -1x-1xf32, -1x-1xf32, -1x-1xf32]) - stack_6 = paddle._C_ops.stack(combine_9, -1) - del combine_9 - - # builtin.combine: ([-1x-1xf32, -1x-1xf32]) <- (-1x-1xf32, -1x-1xf32) - combine_10 = [split_5, split_4] - del split_4, split_5 - - # pd_op.stack: (-1x-1x2xf32) <- ([-1x-1xf32, -1x-1xf32]) - stack_7 = paddle._C_ops.stack(combine_10, -1) - del combine_10 - - # pd_op.reshape: (-1x4xf32) <- (-1x-1x4xf32, 2xi64) - reshape_4 = paddle._C_ops.reshape(stack_6, full_int_array_0) - del full_int_array_0, stack_6 - - # pd_op.reshape: (-1x2xf32) <- (-1x-1x2xf32, 2xi64) - reshape_5 = paddle._C_ops.reshape(stack_7, full_int_array_1) - del full_int_array_1, stack_7 - - # pd_op.shape64: (2xi64) <- (-1x4xf32) - shape64_2 = paddle._C_ops.shape64(reshape_4) - - # pd_op.slice: (xi64) <- (2xi64, 1xi64, 1xi64) - slice_2 = paddle._C_ops.slice( - shape64_2, [0], full_int_array_2, full_int_array_3, [1], [0] - ) - del full_int_array_2, full_int_array_3, shape64_2 - - # builtin.combine: ([xi64, xi64]) <- (xi64, xi64) - combine_11 = [slice_2, full_4] - del full_4 - - # pd_op.stack: (2xi64) <- ([xi64, xi64]) - stack_8 = paddle._C_ops.stack(combine_11, 0) - del combine_11 - - # pd_op.full_with_tensor: (-1x1xf32) <- (1xf32, 2xi64) - full_with_tensor_2 = paddle._C_ops.full_with_tensor( - full_6, stack_8, paddle.float32 - ) - del full_6, stack_8 - - # pd_op.full: (1xi32) <- () - full_7 = paddle._C_ops.full( - [1], float("0"), paddle.int32, paddle.core.CPUPlace() - ) - - # builtin.combine: ([-1x4xf32, -1x4xf32, -1x4xf32]) <- (-1x4xf32, -1x4xf32, -1x4xf32) - combine_12 = [reshape_0, reshape_2, reshape_4] - del reshape_0, reshape_2, reshape_4 - - # pd_op.concat: (-1x4xf32) <- ([-1x4xf32, -1x4xf32, -1x4xf32], 1xi32) - concat_2 = paddle._C_ops.concat(combine_12, full_7) - del combine_12 - - # builtin.combine: ([-1x2xf32, -1x2xf32, -1x2xf32]) <- (-1x2xf32, -1x2xf32, -1x2xf32) - combine_13 = [reshape_1, reshape_3, reshape_5] - del reshape_1, reshape_3, reshape_5 - - # pd_op.concat: (-1x2xf32) <- ([-1x2xf32, -1x2xf32, -1x2xf32], 1xi32) - concat_3 = paddle._C_ops.concat(combine_13, full_7) - del combine_13 - - # builtin.combine: ([-1x1xf32, -1x1xf32, -1x1xf32]) <- (-1x1xf32, -1x1xf32, -1x1xf32) - combine_14 = [full_with_tensor_0, full_with_tensor_1, full_with_tensor_2] - del full_with_tensor_0, full_with_tensor_1, full_with_tensor_2 - - # pd_op.concat: (-1x1xf32) <- ([-1x1xf32, -1x1xf32, -1x1xf32], 1xi32) - concat_4 = paddle._C_ops.concat(combine_14, full_7) - del combine_14, full_7 - - # pd_op.full_int_array: (2xi64) <- () - full_int_array_4 = [1, 1] - - # pd_op.assign: (2xi64) <- (2xi64) - assign_0 = full_int_array_4 - - # pd_op.assign: (2xi64) <- (2xi64) - assign_1 = full_int_array_4 - - # pd_op.pool2d: (2x384x1x1xf32) <- (2x384x-1x-1xf32, 2xi64) - pool2d_0 = paddle._C_ops.pool2d( - data_6, - full_int_array_4, - [1, 1], - [0, 0], - False, - True, - "NCHW", - "avg", - False, - True, - "EXPLICIT", - ) - - # pd_op.conv2d: (2x384x1x1xf32) <- (2x384x1x1xf32, 384x384x1x1xf32) - conv2d_0 = paddle._C_ops.conv2d( - pool2d_0, parameter_53, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_53 - - # pd_op.full_int_array: (4xi64) <- () - full_int_array_5 = [1, -1, 1, 1] - - # pd_op.reshape: (1x384x1x1xf32) <- (384xf32, 4xi64) - reshape_6 = paddle._C_ops.reshape(parameter_52, full_int_array_5) - del parameter_52 - - # pd_op.add: (2x384x1x1xf32) <- (2x384x1x1xf32, 1x384x1x1xf32) - add_0 = paddle._C_ops.add(conv2d_0, reshape_6) - - # pd_op.sigmoid: (2x384x1x1xf32) <- (2x384x1x1xf32) - sigmoid_0 = paddle._C_ops.sigmoid(add_0) - del add_0 - - # pd_op.multiply: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32, 2x384x1x1xf32) - multiply_0 = paddle._C_ops.multiply(data_6, sigmoid_0) - - # pd_op.conv2d: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32, 384x384x1x1xf32) - conv2d_1 = paddle._C_ops.conv2d( - multiply_0, parameter_51, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_51 - - # pd_op.batch_norm_: (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) - ( - batch_norm__0, - batch_norm__1, - batch_norm__2, - batch_norm__3, - batch_norm__4, - batch_norm__5, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_1, - parameter_50, - parameter_49, - parameter_48, - parameter_47, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_47, parameter_48, parameter_49, parameter_50 - - # pd_op.swish: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32) - swish_0 = paddle._C_ops.swish(batch_norm__0) - - # pd_op.add: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32, 2x384x-1x-1xf32) - add_1 = paddle._C_ops.add(swish_0, data_6) - - # pd_op.conv2d: (2x1x-1x-1xf32) <- (2x384x-1x-1xf32, 1x384x3x3xf32) - conv2d_2 = paddle._C_ops.conv2d( - add_1, parameter_46, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_46 - - # pd_op.reshape: (1x1x1x1xf32) <- (1xf32, 4xi64) - reshape_7 = paddle._C_ops.reshape(parameter_45, full_int_array_5) - del parameter_45 - - # pd_op.add: (2x1x-1x-1xf32) <- (2x1x-1x-1xf32, 1x1x1x1xf32) - add_2 = paddle._C_ops.add(conv2d_2, reshape_7) - - # pd_op.conv2d: (2x384x1x1xf32) <- (2x384x1x1xf32, 384x384x1x1xf32) - conv2d_3 = paddle._C_ops.conv2d( - pool2d_0, parameter_44, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_44 - - # pd_op.reshape: (1x384x1x1xf32) <- (384xf32, 4xi64) - reshape_8 = paddle._C_ops.reshape(parameter_43, full_int_array_5) - del parameter_43 - - # pd_op.add: (2x384x1x1xf32) <- (2x384x1x1xf32, 1x384x1x1xf32) - add_3 = paddle._C_ops.add(conv2d_3, reshape_8) - - # pd_op.sigmoid: (2x384x1x1xf32) <- (2x384x1x1xf32) - sigmoid_1 = paddle._C_ops.sigmoid(add_3) - del add_3 - - # pd_op.multiply: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32, 2x384x1x1xf32) - multiply_1 = paddle._C_ops.multiply(data_6, sigmoid_1) - del data_6 - - # pd_op.conv2d: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32, 384x384x1x1xf32) - conv2d_4 = paddle._C_ops.conv2d( - multiply_1, parameter_42, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_42 - - # pd_op.batch_norm_: (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) - ( - batch_norm__6, - batch_norm__7, - batch_norm__8, - batch_norm__9, - batch_norm__10, - batch_norm__11, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_4, - parameter_41, - parameter_40, - parameter_39, - parameter_38, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_38, parameter_39, parameter_40, parameter_41 - - # pd_op.swish: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32) - swish_1 = paddle._C_ops.swish(batch_norm__6) - - # pd_op.conv2d: (2x68x-1x-1xf32) <- (2x384x-1x-1xf32, 68x384x3x3xf32) - conv2d_5 = paddle._C_ops.conv2d( - swish_1, parameter_37, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_37 - - # pd_op.reshape: (1x68x1x1xf32) <- (68xf32, 4xi64) - reshape_9 = paddle._C_ops.reshape(parameter_36, full_int_array_5) - del parameter_36 - - # pd_op.add: (2x68x-1x-1xf32) <- (2x68x-1x-1xf32, 1x68x1x1xf32) - add_4 = paddle._C_ops.add(conv2d_5, reshape_9) - - # pd_op.sigmoid: (2x1x-1x-1xf32) <- (2x1x-1x-1xf32) - sigmoid_2 = paddle._C_ops.sigmoid(add_2) - del add_2 - - # pd_op.flatten: (2x1x-1xf32) <- (2x1x-1x-1xf32) - flatten_0 = paddle._C_ops.flatten(sigmoid_2, 2, 3) - - # pd_op.transpose: (2x-1x1xf32) <- (2x1x-1xf32) - transpose_0 = paddle._C_ops.transpose(flatten_0, [0, 2, 1]) - del flatten_0 - - # pd_op.flatten: (2x68x-1xf32) <- (2x68x-1x-1xf32) - flatten_1 = paddle._C_ops.flatten(add_4, 2, 3) - - # pd_op.transpose: (2x-1x68xf32) <- (2x68x-1xf32) - transpose_1 = paddle._C_ops.transpose(flatten_1, [0, 2, 1]) - del flatten_1 - - # pd_op.pool2d: (2x192x1x1xf32) <- (2x192x-1x-1xf32, 2xi64) - pool2d_1 = paddle._C_ops.pool2d( - data_7, - full_int_array_4, - [1, 1], - [0, 0], - False, - True, - "NCHW", - "avg", - False, - True, - "EXPLICIT", - ) - - # pd_op.conv2d: (2x192x1x1xf32) <- (2x192x1x1xf32, 192x192x1x1xf32) - conv2d_6 = paddle._C_ops.conv2d( - pool2d_1, parameter_35, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_35 - - # pd_op.reshape: (1x192x1x1xf32) <- (192xf32, 4xi64) - reshape_10 = paddle._C_ops.reshape(parameter_34, full_int_array_5) - del parameter_34 - - # pd_op.add: (2x192x1x1xf32) <- (2x192x1x1xf32, 1x192x1x1xf32) - add_5 = paddle._C_ops.add(conv2d_6, reshape_10) - - # pd_op.sigmoid: (2x192x1x1xf32) <- (2x192x1x1xf32) - sigmoid_3 = paddle._C_ops.sigmoid(add_5) - del add_5 - - # pd_op.multiply: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 2x192x1x1xf32) - multiply_2 = paddle._C_ops.multiply(data_7, sigmoid_3) - - # pd_op.conv2d: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 192x192x1x1xf32) - conv2d_7 = paddle._C_ops.conv2d( - multiply_2, parameter_33, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_33 - - # pd_op.batch_norm_: (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) - ( - batch_norm__12, - batch_norm__13, - batch_norm__14, - batch_norm__15, - batch_norm__16, - batch_norm__17, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_7, - parameter_32, - parameter_31, - parameter_30, - parameter_29, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_29, parameter_30, parameter_31, parameter_32 - - # pd_op.swish: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32) - swish_2 = paddle._C_ops.swish(batch_norm__12) - - # pd_op.add: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 2x192x-1x-1xf32) - add_6 = paddle._C_ops.add(swish_2, data_7) - - # pd_op.conv2d: (2x1x-1x-1xf32) <- (2x192x-1x-1xf32, 1x192x3x3xf32) - conv2d_8 = paddle._C_ops.conv2d( - add_6, parameter_28, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_28 - - # pd_op.reshape: (1x1x1x1xf32) <- (1xf32, 4xi64) - reshape_11 = paddle._C_ops.reshape(parameter_27, full_int_array_5) - del parameter_27 - - # pd_op.add: (2x1x-1x-1xf32) <- (2x1x-1x-1xf32, 1x1x1x1xf32) - add_7 = paddle._C_ops.add(conv2d_8, reshape_11) - - # pd_op.conv2d: (2x192x1x1xf32) <- (2x192x1x1xf32, 192x192x1x1xf32) - conv2d_9 = paddle._C_ops.conv2d( - pool2d_1, parameter_26, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_26 - - # pd_op.reshape: (1x192x1x1xf32) <- (192xf32, 4xi64) - reshape_12 = paddle._C_ops.reshape(parameter_25, full_int_array_5) - del parameter_25 - - # pd_op.add: (2x192x1x1xf32) <- (2x192x1x1xf32, 1x192x1x1xf32) - add_8 = paddle._C_ops.add(conv2d_9, reshape_12) - - # pd_op.sigmoid: (2x192x1x1xf32) <- (2x192x1x1xf32) - sigmoid_4 = paddle._C_ops.sigmoid(add_8) - del add_8 - - # pd_op.multiply: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 2x192x1x1xf32) - multiply_3 = paddle._C_ops.multiply(data_7, sigmoid_4) - del data_7 - - # pd_op.conv2d: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 192x192x1x1xf32) - conv2d_10 = paddle._C_ops.conv2d( - multiply_3, parameter_24, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_24 - - # pd_op.batch_norm_: (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) - ( - batch_norm__18, - batch_norm__19, - batch_norm__20, - batch_norm__21, - batch_norm__22, - batch_norm__23, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_10, - parameter_23, - parameter_22, - parameter_21, - parameter_20, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_20, parameter_21, parameter_22, parameter_23 - - # pd_op.swish: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32) - swish_3 = paddle._C_ops.swish(batch_norm__18) - - # pd_op.conv2d: (2x68x-1x-1xf32) <- (2x192x-1x-1xf32, 68x192x3x3xf32) - conv2d_11 = paddle._C_ops.conv2d( - swish_3, parameter_19, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_19 - - # pd_op.reshape: (1x68x1x1xf32) <- (68xf32, 4xi64) - reshape_13 = paddle._C_ops.reshape(parameter_18, full_int_array_5) - del parameter_18 - - # pd_op.add: (2x68x-1x-1xf32) <- (2x68x-1x-1xf32, 1x68x1x1xf32) - add_9 = paddle._C_ops.add(conv2d_11, reshape_13) - - # pd_op.sigmoid: (2x1x-1x-1xf32) <- (2x1x-1x-1xf32) - sigmoid_5 = paddle._C_ops.sigmoid(add_7) - del add_7 - - # pd_op.flatten: (2x1x-1xf32) <- (2x1x-1x-1xf32) - flatten_2 = paddle._C_ops.flatten(sigmoid_5, 2, 3) - - # pd_op.transpose: (2x-1x1xf32) <- (2x1x-1xf32) - transpose_2 = paddle._C_ops.transpose(flatten_2, [0, 2, 1]) - del flatten_2 - - # pd_op.flatten: (2x68x-1xf32) <- (2x68x-1x-1xf32) - flatten_3 = paddle._C_ops.flatten(add_9, 2, 3) - - # pd_op.transpose: (2x-1x68xf32) <- (2x68x-1xf32) - transpose_3 = paddle._C_ops.transpose(flatten_3, [0, 2, 1]) - del flatten_3 - - # pd_op.pool2d: (2x96x1x1xf32) <- (2x96x-1x-1xf32, 2xi64) - pool2d_2 = paddle._C_ops.pool2d( - data_8, - full_int_array_4, - [1, 1], - [0, 0], - False, - True, - "NCHW", - "avg", - False, - True, - "EXPLICIT", - ) - - # pd_op.conv2d: (2x96x1x1xf32) <- (2x96x1x1xf32, 96x96x1x1xf32) - conv2d_12 = paddle._C_ops.conv2d( - pool2d_2, parameter_17, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_17 - - # pd_op.reshape: (1x96x1x1xf32) <- (96xf32, 4xi64) - reshape_14 = paddle._C_ops.reshape(parameter_16, full_int_array_5) - del parameter_16 - - # pd_op.add: (2x96x1x1xf32) <- (2x96x1x1xf32, 1x96x1x1xf32) - add_10 = paddle._C_ops.add(conv2d_12, reshape_14) - - # pd_op.sigmoid: (2x96x1x1xf32) <- (2x96x1x1xf32) - sigmoid_6 = paddle._C_ops.sigmoid(add_10) - del add_10 - - # pd_op.multiply: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, 2x96x1x1xf32) - multiply_4 = paddle._C_ops.multiply(data_8, sigmoid_6) - - # pd_op.conv2d: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, 96x96x1x1xf32) - conv2d_13 = paddle._C_ops.conv2d( - multiply_4, parameter_15, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_15 - - # pd_op.batch_norm_: (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) - ( - batch_norm__24, - batch_norm__25, - batch_norm__26, - batch_norm__27, - batch_norm__28, - batch_norm__29, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_13, - parameter_14, - parameter_13, - parameter_12, - parameter_11, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_11, parameter_12, parameter_13, parameter_14 - - # pd_op.swish: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32) - swish_4 = paddle._C_ops.swish(batch_norm__24) - - # pd_op.add: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, 2x96x-1x-1xf32) - add_11 = paddle._C_ops.add(swish_4, data_8) - - # pd_op.conv2d: (2x1x-1x-1xf32) <- (2x96x-1x-1xf32, 1x96x3x3xf32) - conv2d_14 = paddle._C_ops.conv2d( - add_11, parameter_10, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_10 - - # pd_op.reshape: (1x1x1x1xf32) <- (1xf32, 4xi64) - reshape_15 = paddle._C_ops.reshape(parameter_9, full_int_array_5) - del parameter_9 - - # pd_op.add: (2x1x-1x-1xf32) <- (2x1x-1x-1xf32, 1x1x1x1xf32) - add_12 = paddle._C_ops.add(conv2d_14, reshape_15) - - # pd_op.conv2d: (2x96x1x1xf32) <- (2x96x1x1xf32, 96x96x1x1xf32) - conv2d_15 = paddle._C_ops.conv2d( - pool2d_2, parameter_8, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_8 - - # pd_op.reshape: (1x96x1x1xf32) <- (96xf32, 4xi64) - reshape_16 = paddle._C_ops.reshape(parameter_7, full_int_array_5) - del parameter_7 - - # pd_op.add: (2x96x1x1xf32) <- (2x96x1x1xf32, 1x96x1x1xf32) - add_13 = paddle._C_ops.add(conv2d_15, reshape_16) - - # pd_op.sigmoid: (2x96x1x1xf32) <- (2x96x1x1xf32) - sigmoid_7 = paddle._C_ops.sigmoid(add_13) - del add_13 - - # pd_op.multiply: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, 2x96x1x1xf32) - multiply_5 = paddle._C_ops.multiply(data_8, sigmoid_7) - del data_8 - - # pd_op.conv2d: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, 96x96x1x1xf32) - conv2d_16 = paddle._C_ops.conv2d( - multiply_5, parameter_6, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_6 - - # pd_op.batch_norm_: (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) - ( - batch_norm__30, - batch_norm__31, - batch_norm__32, - batch_norm__33, - batch_norm__34, - batch_norm__35, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_16, - parameter_5, - parameter_4, - parameter_3, - parameter_2, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_2, parameter_3, parameter_4, parameter_5 - - # pd_op.swish: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32) - swish_5 = paddle._C_ops.swish(batch_norm__30) - - # pd_op.conv2d: (2x68x-1x-1xf32) <- (2x96x-1x-1xf32, 68x96x3x3xf32) - conv2d_17 = paddle._C_ops.conv2d( - swish_5, parameter_1, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_1 - - # pd_op.reshape: (1x68x1x1xf32) <- (68xf32, 4xi64) - reshape_17 = paddle._C_ops.reshape(parameter_0, full_int_array_5) - del full_int_array_5, parameter_0 - - # pd_op.add: (2x68x-1x-1xf32) <- (2x68x-1x-1xf32, 1x68x1x1xf32) - add_14 = paddle._C_ops.add(conv2d_17, reshape_17) - - # pd_op.sigmoid: (2x1x-1x-1xf32) <- (2x1x-1x-1xf32) - sigmoid_8 = paddle._C_ops.sigmoid(add_12) - del add_12 - - # pd_op.flatten: (2x1x-1xf32) <- (2x1x-1x-1xf32) - flatten_4 = paddle._C_ops.flatten(sigmoid_8, 2, 3) - - # pd_op.transpose: (2x-1x1xf32) <- (2x1x-1xf32) - transpose_4 = paddle._C_ops.transpose(flatten_4, [0, 2, 1]) - del flatten_4 - - # pd_op.flatten: (2x68x-1xf32) <- (2x68x-1x-1xf32) - flatten_5 = paddle._C_ops.flatten(add_14, 2, 3) - - # pd_op.transpose: (2x-1x68xf32) <- (2x68x-1xf32) - transpose_5 = paddle._C_ops.transpose(flatten_5, [0, 2, 1]) - del flatten_5 - - # pd_op.full: (1xi32) <- () - full_8 = paddle._C_ops.full( - [1], float("1"), paddle.int32, paddle.core.CPUPlace() - ) - - # pd_op.assign: (1xi32) <- (1xi32) - assign_2 = full_8 - - # builtin.combine: ([2x-1x1xf32, 2x-1x1xf32, 2x-1x1xf32]) <- (2x-1x1xf32, 2x-1x1xf32, 2x-1x1xf32) - combine_15 = [transpose_0, transpose_2, transpose_4] - - # pd_op.concat: (2x-1x1xf32) <- ([2x-1x1xf32, 2x-1x1xf32, 2x-1x1xf32], 1xi32) - concat_0 = paddle._C_ops.concat(combine_15, full_8) - del combine_15 - - # builtin.combine: ([2x-1x68xf32, 2x-1x68xf32, 2x-1x68xf32]) <- (2x-1x68xf32, 2x-1x68xf32, 2x-1x68xf32) - combine_16 = [transpose_1, transpose_3, transpose_5] - - # pd_op.concat: (2x-1x68xf32) <- ([2x-1x68xf32, 2x-1x68xf32, 2x-1x68xf32], 1xi32) - concat_1 = paddle._C_ops.concat(combine_16, full_8) - del ( - add_1, - add_11, - add_14, - add_4, - add_6, - add_9, - assign_0, - assign_1, - assign_2, - batch_norm__0, - batch_norm__1, - batch_norm__10, - batch_norm__11, - batch_norm__12, - batch_norm__13, - batch_norm__14, - batch_norm__15, - batch_norm__16, - batch_norm__17, - batch_norm__18, - batch_norm__19, - batch_norm__2, - batch_norm__20, - batch_norm__21, - batch_norm__22, - batch_norm__23, - batch_norm__24, - batch_norm__25, - batch_norm__26, - batch_norm__27, - batch_norm__28, - batch_norm__29, - batch_norm__3, - batch_norm__30, - batch_norm__31, - batch_norm__32, - batch_norm__33, - batch_norm__34, - batch_norm__35, - batch_norm__4, - batch_norm__5, - batch_norm__6, - batch_norm__7, - batch_norm__8, - batch_norm__9, - combine_16, - conv2d_0, - conv2d_1, - conv2d_10, - conv2d_11, - conv2d_12, - conv2d_13, - conv2d_14, - conv2d_15, - conv2d_16, - conv2d_17, - conv2d_2, - conv2d_3, - conv2d_4, - conv2d_5, - conv2d_6, - conv2d_7, - conv2d_8, - conv2d_9, - full_8, - full_int_array_4, - multiply_0, - multiply_1, - multiply_2, - multiply_3, - multiply_4, - multiply_5, - pool2d_0, - pool2d_1, - pool2d_2, - reshape_10, - reshape_11, - reshape_12, - reshape_13, - reshape_14, - reshape_15, - reshape_16, - reshape_17, - reshape_6, - reshape_7, - reshape_8, - reshape_9, - sigmoid_0, - sigmoid_1, - sigmoid_2, - sigmoid_3, - sigmoid_4, - sigmoid_5, - sigmoid_6, - sigmoid_7, - sigmoid_8, - slice_0, - slice_1, - slice_2, - swish_0, - swish_1, - swish_2, - swish_3, - swish_4, - swish_5, - transpose_0, - transpose_1, - transpose_2, - transpose_3, - transpose_4, - transpose_5, - ) - - return concat_0, concat_1, concat_2, concat_3, concat_4 diff --git a/paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_16/graph_hash.txt b/paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_16/graph_hash.txt deleted file mode 100644 index d3368bec1..000000000 --- a/paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_16/graph_hash.txt +++ /dev/null @@ -1 +0,0 @@ -b31d9174479d0938255cd2ec58334899ecf03916288acd4eafc0c43a6b55388d \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_16/graph_net.json b/paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_16/graph_net.json deleted file mode 100644 index 399d8354e..000000000 --- a/paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_16/graph_net.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "framework": "paddle", - "model_name": "PP-YOLOE-S_human", - "num_devices_required": 1, - "num_nodes_required": 1 -} \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_16/input_meta.py b/paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_16/input_meta.py deleted file mode 100644 index 268c9fefb..000000000 --- a/paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_16/input_meta.py +++ /dev/null @@ -1,124 +0,0 @@ -class Program_weight_tensor_data_0: - name = "data_0" - shape = [] - dtype = "int64" - data = [20] - - -class Program_weight_tensor_data_1: - name = "data_1" - shape = [] - dtype = "int64" - data = [8400] - - -class Program_weight_tensor_data_2: - name = "data_2" - shape = [] - dtype = "int64" - data = [20] - - -class Program_weight_tensor_data_3: - name = "data_3" - shape = [2, 8400] - dtype = "float32" - max_val = float("3.0") - mean = float("0.0289881") - std = float("0.183368") - data = None - - -class Program_weight_tensor_data_4: - name = "data_4" - shape = [2, 20, 8400] - dtype = "float32" - max_val = float("0.911359") - mean = float("0.0080156") - std = float("0.0534019") - data = None - - -class Program_weight_tensor_data_5: - name = "data_5" - shape = [2, 20, 8400] - dtype = "float32" - max_val = float("1.0") - mean = float("0.0014494") - std = float("0.0380434") - data = None - - -class Program_weight_tensor_data_6: - name = "data_6" - shape = [2, 1] - dtype = "int32" - data = [0, 1] - - -class Program_weight_tensor_data_7: - name = "data_7" - shape = [2, 20, 1] - dtype = "int32" - data = [ - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - ] - - -class Program_weight_tensor_data_8: - name = "data_8" - shape = [2, 20, 4] - dtype = "float32" - max_val = float("640.0") - mean = float("258.282") - std = float("180.734") - data = None - - -class Program_weight_tensor_data_9: - name = "data_9" - shape = [2, 20, 8400] - dtype = "float32" - max_val = float("0.331258") - mean = float("9.44418e-05") - std = float("0.00284092") - data = None diff --git a/paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_16/model.py b/paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_16/model.py deleted file mode 100644 index 0eefa8f8c..000000000 --- a/paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_16/model.py +++ /dev/null @@ -1,258 +0,0 @@ -import paddle - - -class GraphModule(paddle.nn.Layer): - def __init__(self): - super().__init__() - - def forward( - self, - data_0, - data_1, - data_2, - data_3, - data_4, - data_5, - data_6, - data_7, - data_8, - data_9, - ): - # pd_op.full_int_array: (1xi64) <- () - full_int_array_0 = [1] - - # pd_op.unsqueeze: (2x1x-1xf32) <- (2x-1xf32, 1xi64) - unsqueeze_0 = paddle._C_ops.unsqueeze(data_3, full_int_array_0) - del data_3, full_int_array_0 - - # pd_op.full: (xf32) <- () - full_0 = paddle._C_ops.full( - [], float("1"), paddle.float32, paddle.framework._current_expected_place() - ) - - # pd_op.greater_than: (2x1x-1xb) <- (2x1x-1xf32, xf32) - greater_than_0 = paddle._C_ops.greater_than(unsqueeze_0, full_0) - del full_0, unsqueeze_0 - - # pd_op.full: (xi64) <- () - full_1 = paddle._C_ops.full( - [], float("1"), paddle.int64, paddle.core.CPUPlace() - ) - - # builtin.combine: ([xi64, xi64, xi64]) <- (xi64, xi64, xi64) - combine_0 = [full_1, data_0, full_1] - del full_1 - - # pd_op.stack: (3xi64) <- ([xi64, xi64, xi64]) - stack_0 = paddle._C_ops.stack(combine_0, 0) - del combine_0 - - # pd_op.tile: (2x-1x-1xb) <- (2x1x-1xb, 3xi64) - tile_0 = paddle._C_ops.tile(greater_than_0, stack_0) - del greater_than_0, stack_0 - - # pd_op.full: (1xi64) <- () - full_2 = paddle._C_ops.full( - [1], float("-2"), paddle.int64, paddle.core.CPUPlace() - ) - - # pd_op.argmax: (2x-1xi64) <- (2x-1x-1xf32, 1xi64) - argmax_0 = paddle._C_ops.argmax(data_4, full_2, False, False, paddle.int64) - - # pd_op.one_hot: (2x-1x-1xf32) <- (2x-1xi64, xi64) - one_hot_0 = paddle._C_ops.one_hot( - argmax_0 % paddle.cast(data_2, argmax_0.dtype), data_2 - ) - del argmax_0, data_2 - - # pd_op.transpose: (2x-1x-1xf32) <- (2x-1x-1xf32) - transpose_0 = paddle._C_ops.transpose(one_hot_0, [0, 2, 1]) - del one_hot_0 - - # pd_op.where: (2x-1x-1xf32) <- (2x-1x-1xb, 2x-1x-1xf32, 2x-1x-1xf32) - where_0 = paddle._C_ops.where(tile_0, transpose_0, data_5) - del data_5, tile_0, transpose_0 - - # pd_op.full_int_array: (1xi64) <- () - full_int_array_1 = [-2] - - # pd_op.sum: (2x-1xf32) <- (2x-1x-1xf32, 1xi64) - sum_0 = paddle._C_ops.sum(where_0, full_int_array_1, None, False) - - # pd_op.argmax: (2x-1xi64) <- (2x-1x-1xf32, 1xi64) - argmax_1 = paddle._C_ops.argmax(where_0, full_2, False, False, paddle.int64) - del full_2 - - # pd_op.cast: (xi32) <- (xi64) - cast_0 = paddle._C_ops.cast(data_0, paddle.int32) - del data_0 - - # pd_op.multiply: (2x1xi32) <- (2x1xi32, xi32) - multiply_1 = paddle._C_ops.multiply(data_6, cast_0) - del cast_0, data_6 - - # pd_op.cast: (2x1xi64) <- (2x1xi32) - cast_1 = paddle._C_ops.cast(multiply_1, paddle.int64) - del multiply_1 - - # pd_op.add: (2x-1xi64) <- (2x-1xi64, 2x1xi64) - add_0 = paddle._C_ops.add(argmax_1, cast_1) - del argmax_1, cast_1 - - # pd_op.flatten: (-1xi32) <- (2x-1x1xi32) - flatten_0 = paddle._C_ops.flatten(data_7, 0, 2) - del data_7 - - # pd_op.flatten: (-1xi64) <- (2x-1xi64) - flatten_1 = paddle._C_ops.flatten(add_0, 0, 1) - del add_0 - - # pd_op.full: (1xi32) <- () - full_3 = paddle._C_ops.full( - [1], float("0"), paddle.int32, paddle.core.CPUPlace() - ) - - # pd_op.gather: (-1xi32) <- (-1xi32, -1xi64, 1xi32) - gather_0 = paddle._C_ops.gather(flatten_0, flatten_1, full_3) - del flatten_0 - - # pd_op.full: (xi64) <- () - full_4 = paddle._C_ops.full( - [], float("2"), paddle.int64, paddle.core.CPUPlace() - ) - - # builtin.combine: ([xi64, xi64]) <- (xi64, xi64) - combine_1 = [full_4, data_1] - - # pd_op.stack: (2xi64) <- ([xi64, xi64]) - stack_1 = paddle._C_ops.stack(combine_1, 0) - del combine_1 - - # pd_op.reshape: (2x-1xi32) <- (-1xi32, 2xi64) - reshape_1 = paddle._C_ops.reshape(gather_0, stack_1) - del gather_0, stack_1 - - # pd_op.full: (xf32) <- () - full_5 = paddle._C_ops.full( - [], float("0"), paddle.float32, paddle.framework._current_expected_place() - ) - - # pd_op.greater_than: (2x-1xb) <- (2x-1xf32, xf32) - greater_than_1 = paddle._C_ops.greater_than(sum_0, full_5) - del full_5, sum_0 - - # pd_op.full: (1xf32) <- () - full_6 = paddle._C_ops.full( - [1], float("1"), paddle.float32, paddle.core.CPUPlace() - ) - - # pd_op.full_like: (2x-1xi32) <- (2x-1xi32, 1xf32) - full_like_0 = paddle._C_ops.full_like( - reshape_1, full_6, paddle.int32, paddle.framework._current_expected_place() - ) - - # pd_op.where: (2x-1xi32) <- (2x-1xb, 2x-1xi32, 2x-1xi32) - where_1 = paddle._C_ops.where(greater_than_1, reshape_1, full_like_0) - del full_like_0, greater_than_1, reshape_1 - - # pd_op.full_int_array: (2xi64) <- () - full_int_array_2 = [-1, 4] - - # pd_op.reshape: (-1x4xf32) <- (2x-1x4xf32, 2xi64) - reshape_2 = paddle._C_ops.reshape(data_8, full_int_array_2) - del data_8, full_int_array_2 - - # pd_op.gather: (-1x4xf32) <- (-1x4xf32, -1xi64, 1xi32) - gather_1 = paddle._C_ops.gather(reshape_2, flatten_1, full_3) - del flatten_1, full_3, reshape_2 - - # pd_op.full: (xi64) <- () - full_7 = paddle._C_ops.full( - [], float("4"), paddle.int64, paddle.core.CPUPlace() - ) - - # builtin.combine: ([xi64, xi64, xi64]) <- (xi64, xi64, xi64) - combine_2 = [full_4, data_1, full_7] - del data_1, full_4, full_7 - - # pd_op.stack: (3xi64) <- ([xi64, xi64, xi64]) - stack_2 = paddle._C_ops.stack(combine_2, 0) - del combine_2 - - # pd_op.reshape: (2x-1x4xf32) <- (-1x4xf32, 3xi64) - reshape_0 = paddle._C_ops.reshape(gather_1, stack_2) - del gather_1, stack_2 - - # pd_op.full: (1xi32) <- () - full_8 = paddle._C_ops.full( - [1], float("2"), paddle.int32, paddle.core.CPUPlace() - ) - - # pd_op.one_hot: (2x-1x2xf32) <- (2x-1xi32, 1xi32) - one_hot_1 = paddle._C_ops.one_hot( - where_1 % paddle.cast(full_8, where_1.dtype), full_8 - ) - del full_8 - - # pd_op.full: (1xi64) <- () - full_9 = paddle._C_ops.full( - [1], float("0"), paddle.int64, paddle.framework._current_expected_place() - ) - - # pd_op.assign_value_: (1xi64) <- (1xi64) - assign_value__0 = paddle._C_ops.assign_value_( - full_9, - [1], - paddle.int64, - [float("0")], - paddle.framework._current_expected_place(), - ) - del full_9 - - # pd_op.index_select: (2x-1x1xf32) <- (2x-1x2xf32, 1xi64) - index_select_0 = paddle._C_ops.index_select(one_hot_1, assign_value__0, -1) - del assign_value__0, one_hot_1 - - # pd_op.multiply: (2x-1x-1xf32) <- (2x-1x-1xf32, 2x-1x-1xf32) - multiply_2 = paddle._C_ops.multiply(data_9, where_0) - del data_9 - - # pd_op.full_int_array: (1xi64) <- () - full_int_array_3 = [-1] - - # pd_op.max: (2x-1x1xf32) <- (2x-1x-1xf32, 1xi64) - max_0 = paddle._C_ops.max(multiply_2, full_int_array_3, True) - - # pd_op.multiply: (2x-1x-1xf32) <- (2x-1x-1xf32, 2x-1x-1xf32) - multiply_3 = paddle._C_ops.multiply(data_4, where_0) - del data_4, where_0 - - # pd_op.max: (2x-1x1xf32) <- (2x-1x-1xf32, 1xi64) - max_1 = paddle._C_ops.max(multiply_3, full_int_array_3, True) - del multiply_3 - - # pd_op.scale: (2x-1x1xf32) <- (2x-1x1xf32, 1xf32) - scale_0 = paddle._C_ops.scale(max_0, full_6, float("1e-09"), True) - del full_6, max_0 - - # pd_op.divide: (2x-1x-1xf32) <- (2x-1x-1xf32, 2x-1x1xf32) - divide_0 = paddle._C_ops.divide(multiply_2, scale_0) - del multiply_2, scale_0 - - # pd_op.multiply: (2x-1x-1xf32) <- (2x-1x-1xf32, 2x-1x1xf32) - multiply_4 = paddle._C_ops.multiply(divide_0, max_1) - del divide_0, max_1 - - # pd_op.max: (2x-1xf32) <- (2x-1x-1xf32, 1xi64) - max_2 = paddle._C_ops.max(multiply_4, full_int_array_1, False) - del full_int_array_1, multiply_4 - - # pd_op.unsqueeze: (2x-1x1xf32) <- (2x-1xf32, 1xi64) - unsqueeze_1 = paddle._C_ops.unsqueeze(max_2, full_int_array_3) - del full_int_array_3, max_2 - - # pd_op.multiply: (2x-1x1xf32) <- (2x-1x1xf32, 2x-1x1xf32) - multiply_0 = paddle._C_ops.multiply(index_select_0, unsqueeze_1) - del index_select_0, unsqueeze_1, where_1 - - return reshape_0, multiply_0 diff --git a/paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_3/graph_hash.txt b/paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_3/graph_hash.txt index 27bd82e0e..d3368bec1 100644 --- a/paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_3/graph_hash.txt +++ b/paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_3/graph_hash.txt @@ -1 +1 @@ -8d319f5ec2a187a0cbeb6b629bf54f2df054e934514d4a5042fa9c577597355f \ No newline at end of file +b31d9174479d0938255cd2ec58334899ecf03916288acd4eafc0c43a6b55388d \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_3/input_meta.py b/paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_3/input_meta.py index 201ee0397..268c9fefb 100644 --- a/paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_3/input_meta.py +++ b/paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_3/input_meta.py @@ -1,67 +1,124 @@ class Program_weight_tensor_data_0: name = "data_0" - shape = [2, 8400] - dtype = "bool" - min_val = 0 - max_val = 2 - data = None + shape = [] + dtype = "int64" + data = [20] class Program_weight_tensor_data_1: name = "data_1" - shape = [2, 8400, 4] - dtype = "float32" - min_val = float("-8.72054") - max_val = float("86.5058") - mean = float("34.8123") - std = float("23.6174") - data = None + shape = [] + dtype = "int64" + data = [8400] class Program_weight_tensor_data_2: name = "data_2" - shape = [2, 8400, 4] - dtype = "float32" - max_val = float("80.0") - mean = float("34.7665") - std = float("25.0051") - data = None + shape = [] + dtype = "int64" + data = [20] class Program_weight_tensor_data_3: name = "data_3" - shape = [2, 8400, 1] + shape = [2, 8400] dtype = "float32" - max_val = float("0.911359") - mean = float("0.00967398") - std = float("0.0703979") + max_val = float("3.0") + mean = float("0.0289881") + std = float("0.183368") data = None class Program_weight_tensor_data_4: name = "data_4" - shape = [] + shape = [2, 20, 8400] dtype = "float32" - data = [162.523] + max_val = float("0.911359") + mean = float("0.0080156") + std = float("0.0534019") + data = None class Program_weight_tensor_data_5: name = "data_5" - shape = [2, 8400, 68] + shape = [2, 20, 8400] dtype = "float32" - min_val = float("-7.01895") - max_val = float("14.5228") - mean = float("8.74382e-06") - std = float("1.67872") + max_val = float("1.0") + mean = float("0.0014494") + std = float("0.0380434") data = None class Program_weight_tensor_data_6: name = "data_6" - shape = [8400, 2] + shape = [2, 1] + dtype = "int32" + data = [0, 1] + + +class Program_weight_tensor_data_7: + name = "data_7" + shape = [2, 20, 1] + dtype = "int32" + data = [ + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + ] + + +class Program_weight_tensor_data_8: + name = "data_8" + shape = [2, 20, 4] + dtype = "float32" + max_val = float("640.0") + mean = float("258.282") + std = float("180.734") + data = None + + +class Program_weight_tensor_data_9: + name = "data_9" + shape = [2, 20, 8400] dtype = "float32" - min_val = float("0.5") - max_val = float("79.5") - mean = float("34.7619") - std = float("22.9098") + max_val = float("0.331258") + mean = float("9.44418e-05") + std = float("0.00284092") data = None diff --git a/paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_3/model.py b/paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_3/model.py index e706a2c08..0eefa8f8c 100644 --- a/paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_3/model.py +++ b/paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_3/model.py @@ -5,505 +5,254 @@ class GraphModule(paddle.nn.Layer): def __init__(self): super().__init__() - def forward(self, data_0, data_1, data_2, data_3, data_4, data_5, data_6): - # pd_op.cast: (2x-1xi32) <- (2x-1xb) - cast_0 = paddle._C_ops.cast(data_0, paddle.int32) - + def forward( + self, + data_0, + data_1, + data_2, + data_3, + data_4, + data_5, + data_6, + data_7, + data_8, + data_9, + ): # pd_op.full_int_array: (1xi64) <- () - full_int_array_0 = [-1] - - # pd_op.assign: (1xi64) <- (1xi64) - assign_0 = full_int_array_0 - - # pd_op.assign: (1xi64) <- (1xi64) - assign_1 = full_int_array_0 - - # pd_op.assign: (1xi64) <- (1xi64) - assign_2 = full_int_array_0 - - # pd_op.unsqueeze: (2x-1x1xi32) <- (2x-1xi32, 1xi64) - unsqueeze_0 = paddle._C_ops.unsqueeze(cast_0, full_int_array_0) - del cast_0 + full_int_array_0 = [1] - # pd_op.full_int_array: (3xi64) <- () - full_int_array_1 = [1, 1, 4] - - # pd_op.tile: (2x-1x4xi32) <- (2x-1x1xi32, 3xi64) - tile_0 = paddle._C_ops.tile(unsqueeze_0, full_int_array_1) - del full_int_array_1, unsqueeze_0 - - # pd_op.cast: (2x-1x4xb) <- (2x-1x4xi32) - cast_1 = paddle._C_ops.cast(tile_0, paddle.bool) - del tile_0 - - # pd_op.masked_select: (-1xf32) <- (2x-1x4xf32, 2x-1x4xb) - masked_select_0 = paddle._C_ops.masked_select(data_1, cast_1) - del data_1 - - # pd_op.full_int_array: (2xi64) <- () - full_int_array_2 = [-1, 4] + # pd_op.unsqueeze: (2x1x-1xf32) <- (2x-1xf32, 1xi64) + unsqueeze_0 = paddle._C_ops.unsqueeze(data_3, full_int_array_0) + del data_3, full_int_array_0 - # pd_op.reshape: (-1x4xf32) <- (-1xf32, 2xi64) - reshape_0 = paddle._C_ops.reshape(masked_select_0, full_int_array_2) - - # pd_op.masked_select: (-1xf32) <- (2x-1x4xf32, 2x-1x4xb) - masked_select_1 = paddle._C_ops.masked_select(data_2, cast_1) - - # pd_op.reshape: (-1x4xf32) <- (-1xf32, 2xi64) - reshape_1 = paddle._C_ops.reshape(masked_select_1, full_int_array_2) - del masked_select_1 - - # pd_op.sum: (2x-1xf32) <- (2x-1x1xf32, 1xi64) - sum_0 = paddle._C_ops.sum(data_3, full_int_array_0, None, False) - del data_3 - - # pd_op.masked_select: (-1xf32) <- (2x-1xf32, 2x-1xb) - masked_select_2 = paddle._C_ops.masked_select(sum_0, data_0) - del sum_0 - - # pd_op.unsqueeze: (-1x1xf32) <- (-1xf32, 1xi64) - unsqueeze_1 = paddle._C_ops.unsqueeze(masked_select_2, full_int_array_0) - del masked_select_2 - - # pd_op.subtract: (-1x4xf32) <- (-1x4xf32, -1x4xf32) - subtract_0 = paddle._C_ops.subtract(reshape_0, reshape_1) - - # pd_op.abs: (-1x4xf32) <- (-1x4xf32) - abs_0 = paddle._C_ops.abs(subtract_0) - - # pd_op.mean_all: (xf32) <- (-1x4xf32) - mean_all_0 = paddle._C_ops.mean_all(abs_0) - - # pd_op.full: (1xi32) <- () + # pd_op.full: (xf32) <- () full_0 = paddle._C_ops.full( - [1], float("1"), paddle.int32, paddle.core.CPUPlace() + [], float("1"), paddle.float32, paddle.framework._current_expected_place() ) - # pd_op.split_with_num: ([-1x1xf32, -1x1xf32, -1x1xf32, -1x1xf32]) <- (-1x4xf32, 1xi32) - split_with_num_0 = paddle._C_ops.split_with_num(reshape_0, 4, full_0) - - # builtin.split: (-1x1xf32, -1x1xf32, -1x1xf32, -1x1xf32) <- ([-1x1xf32, -1x1xf32, -1x1xf32, -1x1xf32]) - ( - split_0, - split_1, - split_2, - split_3, - ) = split_with_num_0 - del split_with_num_0 + # pd_op.greater_than: (2x1x-1xb) <- (2x1x-1xf32, xf32) + greater_than_0 = paddle._C_ops.greater_than(unsqueeze_0, full_0) + del full_0, unsqueeze_0 - # pd_op.split_with_num: ([-1x1xf32, -1x1xf32, -1x1xf32, -1x1xf32]) <- (-1x4xf32, 1xi32) - split_with_num_1 = paddle._C_ops.split_with_num(reshape_1, 4, full_0) - - # builtin.split: (-1x1xf32, -1x1xf32, -1x1xf32, -1x1xf32) <- ([-1x1xf32, -1x1xf32, -1x1xf32, -1x1xf32]) - ( - split_4, - split_5, - split_6, - split_7, - ) = split_with_num_1 - del split_with_num_1 - - # pd_op.maximum: (-1x1xf32) <- (-1x1xf32, -1x1xf32) - maximum_0 = paddle._C_ops.maximum(split_0, split_4) - - # pd_op.maximum: (-1x1xf32) <- (-1x1xf32, -1x1xf32) - maximum_1 = paddle._C_ops.maximum(split_1, split_5) - - # pd_op.minimum: (-1x1xf32) <- (-1x1xf32, -1x1xf32) - minimum_0 = paddle._C_ops.minimum(split_2, split_6) - - # pd_op.minimum: (-1x1xf32) <- (-1x1xf32, -1x1xf32) - minimum_1 = paddle._C_ops.minimum(split_3, split_7) - - # pd_op.subtract: (-1x1xf32) <- (-1x1xf32, -1x1xf32) - subtract_1 = paddle._C_ops.subtract(minimum_0, maximum_0) - - # pd_op.full: (1xf32) <- () + # pd_op.full: (xi64) <- () full_1 = paddle._C_ops.full( - [1], float("0"), paddle.float32, paddle.core.CPUPlace() + [], float("1"), paddle.int64, paddle.core.CPUPlace() ) - # pd_op.assign: (1xf32) <- (1xf32) - assign_3 = full_1 + # builtin.combine: ([xi64, xi64, xi64]) <- (xi64, xi64, xi64) + combine_0 = [full_1, data_0, full_1] + del full_1 - # pd_op.full: (1xf32) <- () + # pd_op.stack: (3xi64) <- ([xi64, xi64, xi64]) + stack_0 = paddle._C_ops.stack(combine_0, 0) + del combine_0 + + # pd_op.tile: (2x-1x-1xb) <- (2x1x-1xb, 3xi64) + tile_0 = paddle._C_ops.tile(greater_than_0, stack_0) + del greater_than_0, stack_0 + + # pd_op.full: (1xi64) <- () full_2 = paddle._C_ops.full( - [1], float("3.40282e+38"), paddle.float32, paddle.core.CPUPlace() + [1], float("-2"), paddle.int64, paddle.core.CPUPlace() ) - # pd_op.assign: (1xf32) <- (1xf32) - assign_4 = full_2 + # pd_op.argmax: (2x-1xi64) <- (2x-1x-1xf32, 1xi64) + argmax_0 = paddle._C_ops.argmax(data_4, full_2, False, False, paddle.int64) - # pd_op.clip: (-1x1xf32) <- (-1x1xf32, 1xf32, 1xf32) - clip_0 = paddle._C_ops.clip(subtract_1, full_1, full_2) + # pd_op.one_hot: (2x-1x-1xf32) <- (2x-1xi64, xi64) + one_hot_0 = paddle._C_ops.one_hot( + argmax_0 % paddle.cast(data_2, argmax_0.dtype), data_2 + ) + del argmax_0, data_2 - # pd_op.subtract: (-1x1xf32) <- (-1x1xf32, -1x1xf32) - subtract_2 = paddle._C_ops.subtract(minimum_1, maximum_1) + # pd_op.transpose: (2x-1x-1xf32) <- (2x-1x-1xf32) + transpose_0 = paddle._C_ops.transpose(one_hot_0, [0, 2, 1]) + del one_hot_0 - # pd_op.clip: (-1x1xf32) <- (-1x1xf32, 1xf32, 1xf32) - clip_1 = paddle._C_ops.clip(subtract_2, full_1, full_2) + # pd_op.where: (2x-1x-1xf32) <- (2x-1x-1xb, 2x-1x-1xf32, 2x-1x-1xf32) + where_0 = paddle._C_ops.where(tile_0, transpose_0, data_5) + del data_5, tile_0, transpose_0 - # pd_op.multiply: (-1x1xf32) <- (-1x1xf32, -1x1xf32) - multiply_0 = paddle._C_ops.multiply(clip_0, clip_1) + # pd_op.full_int_array: (1xi64) <- () + full_int_array_1 = [-2] - # pd_op.subtract: (-1x1xf32) <- (-1x1xf32, -1x1xf32) - subtract_3 = paddle._C_ops.subtract(split_2, split_0) + # pd_op.sum: (2x-1xf32) <- (2x-1x-1xf32, 1xi64) + sum_0 = paddle._C_ops.sum(where_0, full_int_array_1, None, False) - # pd_op.subtract: (-1x1xf32) <- (-1x1xf32, -1x1xf32) - subtract_4 = paddle._C_ops.subtract(split_3, split_1) + # pd_op.argmax: (2x-1xi64) <- (2x-1x-1xf32, 1xi64) + argmax_1 = paddle._C_ops.argmax(where_0, full_2, False, False, paddle.int64) + del full_2 - # pd_op.multiply: (-1x1xf32) <- (-1x1xf32, -1x1xf32) - multiply_1 = paddle._C_ops.multiply(subtract_3, subtract_4) + # pd_op.cast: (xi32) <- (xi64) + cast_0 = paddle._C_ops.cast(data_0, paddle.int32) + del data_0 - # pd_op.subtract: (-1x1xf32) <- (-1x1xf32, -1x1xf32) - subtract_5 = paddle._C_ops.subtract(split_6, split_4) + # pd_op.multiply: (2x1xi32) <- (2x1xi32, xi32) + multiply_1 = paddle._C_ops.multiply(data_6, cast_0) + del cast_0, data_6 - # pd_op.subtract: (-1x1xf32) <- (-1x1xf32, -1x1xf32) - subtract_6 = paddle._C_ops.subtract(split_7, split_5) + # pd_op.cast: (2x1xi64) <- (2x1xi32) + cast_1 = paddle._C_ops.cast(multiply_1, paddle.int64) + del multiply_1 - # pd_op.multiply: (-1x1xf32) <- (-1x1xf32, -1x1xf32) - multiply_2 = paddle._C_ops.multiply(subtract_5, subtract_6) - del subtract_5, subtract_6 + # pd_op.add: (2x-1xi64) <- (2x-1xi64, 2x1xi64) + add_0 = paddle._C_ops.add(argmax_1, cast_1) + del argmax_1, cast_1 - # pd_op.add: (-1x1xf32) <- (-1x1xf32, -1x1xf32) - add_0 = paddle._C_ops.add(multiply_1, multiply_2) + # pd_op.flatten: (-1xi32) <- (2x-1x1xi32) + flatten_0 = paddle._C_ops.flatten(data_7, 0, 2) + del data_7 - # pd_op.subtract: (-1x1xf32) <- (-1x1xf32, -1x1xf32) - subtract_7 = paddle._C_ops.subtract(add_0, multiply_0) + # pd_op.flatten: (-1xi64) <- (2x-1xi64) + flatten_1 = paddle._C_ops.flatten(add_0, 0, 1) + del add_0 - # pd_op.full: (1xf32) <- () + # pd_op.full: (1xi32) <- () full_3 = paddle._C_ops.full( - [1], float("1"), paddle.float32, paddle.core.CPUPlace() + [1], float("0"), paddle.int32, paddle.core.CPUPlace() ) - # pd_op.assign: (1xf32) <- (1xf32) - assign_5 = full_3 - - # pd_op.assign: (1xf32) <- (1xf32) - assign_6 = full_3 - - # pd_op.scale: (-1x1xf32) <- (-1x1xf32, 1xf32) - scale_0 = paddle._C_ops.scale(subtract_7, full_3, float("1e-10"), True) - del subtract_7 - - # pd_op.divide: (-1x1xf32) <- (-1x1xf32, -1x1xf32) - divide_2 = paddle._C_ops.divide(multiply_0, scale_0) - - # pd_op.minimum: (-1x1xf32) <- (-1x1xf32, -1x1xf32) - minimum_2 = paddle._C_ops.minimum(split_0, split_4) - - # pd_op.minimum: (-1x1xf32) <- (-1x1xf32, -1x1xf32) - minimum_3 = paddle._C_ops.minimum(split_1, split_5) + # pd_op.gather: (-1xi32) <- (-1xi32, -1xi64, 1xi32) + gather_0 = paddle._C_ops.gather(flatten_0, flatten_1, full_3) + del flatten_0 - # pd_op.maximum: (-1x1xf32) <- (-1x1xf32, -1x1xf32) - maximum_2 = paddle._C_ops.maximum(split_2, split_6) - - # pd_op.maximum: (-1x1xf32) <- (-1x1xf32, -1x1xf32) - maximum_3 = paddle._C_ops.maximum(split_3, split_7) - - # pd_op.subtract: (-1x1xf32) <- (-1x1xf32, -1x1xf32) - subtract_8 = paddle._C_ops.subtract(maximum_2, minimum_2) - - # pd_op.subtract: (-1x1xf32) <- (-1x1xf32, -1x1xf32) - subtract_9 = paddle._C_ops.subtract(maximum_3, minimum_3) - - # pd_op.multiply: (-1x1xf32) <- (-1x1xf32, -1x1xf32) - multiply_3 = paddle._C_ops.multiply(subtract_8, subtract_9) - - # pd_op.scale: (-1x1xf32) <- (-1x1xf32, 1xf32) - scale_1 = paddle._C_ops.scale(multiply_3, full_3, float("1e-10"), True) - del multiply_3 - - # pd_op.subtract: (-1x1xf32) <- (-1x1xf32, -1x1xf32) - subtract_10 = paddle._C_ops.subtract(scale_1, scale_0) - - # pd_op.divide: (-1x1xf32) <- (-1x1xf32, -1x1xf32) - divide_3 = paddle._C_ops.divide(subtract_10, scale_1) - - # pd_op.subtract: (-1x1xf32) <- (-1x1xf32, -1x1xf32) - subtract_11 = paddle._C_ops.subtract(divide_2, divide_3) - - # pd_op.full: (1xf32) <- () + # pd_op.full: (xi64) <- () full_4 = paddle._C_ops.full( - [1], float("-1"), paddle.float32, paddle.core.CPUPlace() + [], float("2"), paddle.int64, paddle.core.CPUPlace() ) - # pd_op.scale: (-1x1xf32) <- (-1x1xf32, 1xf32) - scale_2 = paddle._C_ops.scale(subtract_11, full_4, float("1"), True) - del subtract_11 + # builtin.combine: ([xi64, xi64]) <- (xi64, xi64) + combine_1 = [full_4, data_1] - # pd_op.scale: (-1x1xf32) <- (-1x1xf32, 1xf32) - scale_3 = paddle._C_ops.scale(scale_2, full_3, float("0"), True) - del scale_2 + # pd_op.stack: (2xi64) <- ([xi64, xi64]) + stack_1 = paddle._C_ops.stack(combine_1, 0) + del combine_1 - # pd_op.multiply: (-1x1xf32) <- (-1x1xf32, -1x1xf32) - multiply_4 = paddle._C_ops.multiply(scale_3, unsqueeze_1) + # pd_op.reshape: (2x-1xi32) <- (-1xi32, 2xi64) + reshape_1 = paddle._C_ops.reshape(gather_0, stack_1) + del gather_0, stack_1 - # pd_op.full_int_array: (0xi64) <- () - full_int_array_3 = [] + # pd_op.full: (xf32) <- () + full_5 = paddle._C_ops.full( + [], float("0"), paddle.float32, paddle.framework._current_expected_place() + ) - # pd_op.assign: (0xi64) <- (0xi64) - assign_7 = full_int_array_3 + # pd_op.greater_than: (2x-1xb) <- (2x-1xf32, xf32) + greater_than_1 = paddle._C_ops.greater_than(sum_0, full_5) + del full_5, sum_0 - # pd_op.sum: (xf32) <- (-1x1xf32, 0xi64) - sum_1 = paddle._C_ops.sum(multiply_4, full_int_array_3, None, False) + # pd_op.full: (1xf32) <- () + full_6 = paddle._C_ops.full( + [1], float("1"), paddle.float32, paddle.core.CPUPlace() + ) - # pd_op.divide: (xf32) <- (xf32, xf32) - divide_0 = paddle._C_ops.divide(sum_1, data_4) + # pd_op.full_like: (2x-1xi32) <- (2x-1xi32, 1xf32) + full_like_0 = paddle._C_ops.full_like( + reshape_1, full_6, paddle.int32, paddle.framework._current_expected_place() + ) - # pd_op.unsqueeze: (2x-1x1xb) <- (2x-1xb, 1xi64) - unsqueeze_2 = paddle._C_ops.unsqueeze(data_0, full_int_array_0) - del data_0 + # pd_op.where: (2x-1xi32) <- (2x-1xb, 2x-1xi32, 2x-1xi32) + where_1 = paddle._C_ops.where(greater_than_1, reshape_1, full_like_0) + del full_like_0, greater_than_1, reshape_1 - # pd_op.cast: (2x-1x1xi32) <- (2x-1x1xb) - cast_2 = paddle._C_ops.cast(unsqueeze_2, paddle.int32) - del unsqueeze_2 + # pd_op.full_int_array: (2xi64) <- () + full_int_array_2 = [-1, 4] - # pd_op.full_int_array: (3xi64) <- () - full_int_array_4 = [1, 1, 68] + # pd_op.reshape: (-1x4xf32) <- (2x-1x4xf32, 2xi64) + reshape_2 = paddle._C_ops.reshape(data_8, full_int_array_2) + del data_8, full_int_array_2 - # pd_op.tile: (2x-1x68xi32) <- (2x-1x1xi32, 3xi64) - tile_1 = paddle._C_ops.tile(cast_2, full_int_array_4) - del cast_2, full_int_array_4 + # pd_op.gather: (-1x4xf32) <- (-1x4xf32, -1xi64, 1xi32) + gather_1 = paddle._C_ops.gather(reshape_2, flatten_1, full_3) + del flatten_1, full_3, reshape_2 - # pd_op.cast: (2x-1x68xb) <- (2x-1x68xi32) - cast_3 = paddle._C_ops.cast(tile_1, paddle.bool) - del tile_1 + # pd_op.full: (xi64) <- () + full_7 = paddle._C_ops.full( + [], float("4"), paddle.int64, paddle.core.CPUPlace() + ) - # pd_op.masked_select: (-1xf32) <- (2x-1x68xf32, 2x-1x68xb) - masked_select_3 = paddle._C_ops.masked_select(data_5, cast_3) - del data_5 + # builtin.combine: ([xi64, xi64, xi64]) <- (xi64, xi64, xi64) + combine_2 = [full_4, data_1, full_7] + del data_1, full_4, full_7 - # pd_op.full_int_array: (3xi64) <- () - full_int_array_5 = [-1, 4, 17] + # pd_op.stack: (3xi64) <- ([xi64, xi64, xi64]) + stack_2 = paddle._C_ops.stack(combine_2, 0) + del combine_2 - # pd_op.reshape: (-1x4x17xf32) <- (-1xf32, 3xi64) - reshape_2 = paddle._C_ops.reshape(masked_select_3, full_int_array_5) - del full_int_array_5 + # pd_op.reshape: (2x-1x4xf32) <- (-1x4xf32, 3xi64) + reshape_0 = paddle._C_ops.reshape(gather_1, stack_2) + del gather_1, stack_2 # pd_op.full: (1xi32) <- () - full_5 = paddle._C_ops.full( + full_8 = paddle._C_ops.full( [1], float("2"), paddle.int32, paddle.core.CPUPlace() ) - # pd_op.split_with_num: ([2x-1x2xf32, 2x-1x2xf32]) <- (2x-1x4xf32, 1xi32) - split_with_num_2 = paddle._C_ops.split_with_num(data_2, 2, full_5) - del data_2, full_5 - - # builtin.split: (2x-1x2xf32, 2x-1x2xf32) <- ([2x-1x2xf32, 2x-1x2xf32]) - ( - split_8, - split_9, - ) = split_with_num_2 - del split_with_num_2 - - # pd_op.subtract: (2x-1x2xf32) <- (-1x2xf32, 2x-1x2xf32) - subtract_12 = paddle._C_ops.subtract(data_6, split_8) - del split_8 - - # pd_op.subtract: (2x-1x2xf32) <- (2x-1x2xf32, -1x2xf32) - subtract_13 = paddle._C_ops.subtract(split_9, data_6) - del data_6, split_9 - - # pd_op.full: (1xi32) <- () - full_6 = paddle._C_ops.full( - [1], float("-1"), paddle.int32, paddle.core.CPUPlace() + # pd_op.one_hot: (2x-1x2xf32) <- (2x-1xi32, 1xi32) + one_hot_1 = paddle._C_ops.one_hot( + where_1 % paddle.cast(full_8, where_1.dtype), full_8 ) + del full_8 - # builtin.combine: ([2x-1x2xf32, 2x-1x2xf32]) <- (2x-1x2xf32, 2x-1x2xf32) - combine_0 = [subtract_12, subtract_13] - del subtract_12, subtract_13 - - # pd_op.concat: (2x-1x4xf32) <- ([2x-1x2xf32, 2x-1x2xf32], 1xi32) - concat_0 = paddle._C_ops.concat(combine_0, full_6) - del combine_0, full_6 - - # pd_op.full: (1xf32) <- () - full_7 = paddle._C_ops.full( - [1], float("15.99"), paddle.float32, paddle.core.CPUPlace() + # pd_op.full: (1xi64) <- () + full_9 = paddle._C_ops.full( + [1], float("0"), paddle.int64, paddle.framework._current_expected_place() ) - # pd_op.clip: (2x-1x4xf32) <- (2x-1x4xf32, 1xf32, 1xf32) - clip_2 = paddle._C_ops.clip(concat_0, full_1, full_7) - del concat_0, full_7 - - # pd_op.masked_select: (-1xf32) <- (2x-1x4xf32, 2x-1x4xb) - masked_select_4 = paddle._C_ops.masked_select(clip_2, cast_1) - del clip_2 + # pd_op.assign_value_: (1xi64) <- (1xi64) + assign_value__0 = paddle._C_ops.assign_value_( + full_9, + [1], + paddle.int64, + [float("0")], + paddle.framework._current_expected_place(), + ) + del full_9 - # pd_op.reshape: (-1x4xf32) <- (-1xf32, 2xi64) - reshape_3 = paddle._C_ops.reshape(masked_select_4, full_int_array_2) - del full_int_array_2, masked_select_4 + # pd_op.index_select: (2x-1x1xf32) <- (2x-1x2xf32, 1xi64) + index_select_0 = paddle._C_ops.index_select(one_hot_1, assign_value__0, -1) + del assign_value__0, one_hot_1 - # pd_op.floor: (-1x4xf32) <- (-1x4xf32) - floor_0 = paddle._C_ops.floor(reshape_3) + # pd_op.multiply: (2x-1x-1xf32) <- (2x-1x-1xf32, 2x-1x-1xf32) + multiply_2 = paddle._C_ops.multiply(data_9, where_0) + del data_9 - # pd_op.cast: (-1x4xi64) <- (-1x4xf32) - cast_4 = paddle._C_ops.cast(floor_0, paddle.int64) - del floor_0 + # pd_op.full_int_array: (1xi64) <- () + full_int_array_3 = [-1] - # pd_op.scale: (-1x4xi64) <- (-1x4xi64, 1xf32) - scale_4 = paddle._C_ops.scale(cast_4, full_3, float("1"), True) + # pd_op.max: (2x-1x1xf32) <- (2x-1x-1xf32, 1xi64) + max_0 = paddle._C_ops.max(multiply_2, full_int_array_3, True) - # pd_op.cast: (-1x4xf32) <- (-1x4xi64) - cast_5 = paddle._C_ops.cast(scale_4, paddle.float32) + # pd_op.multiply: (2x-1x-1xf32) <- (2x-1x-1xf32, 2x-1x-1xf32) + multiply_3 = paddle._C_ops.multiply(data_4, where_0) + del data_4, where_0 - # pd_op.subtract: (-1x4xf32) <- (-1x4xf32, -1x4xf32) - subtract_14 = paddle._C_ops.subtract(cast_5, reshape_3) - del cast_5, reshape_3 + # pd_op.max: (2x-1x1xf32) <- (2x-1x-1xf32, 1xi64) + max_1 = paddle._C_ops.max(multiply_3, full_int_array_3, True) + del multiply_3 - # pd_op.scale: (-1x4xf32) <- (-1x4xf32, 1xf32) - scale_5 = paddle._C_ops.scale(subtract_14, full_4, float("1"), True) + # pd_op.scale: (2x-1x1xf32) <- (2x-1x1xf32, 1xf32) + scale_0 = paddle._C_ops.scale(max_0, full_6, float("1e-09"), True) + del full_6, max_0 - # pd_op.scale: (-1x4xi64) <- (-1x4xi64, 1xf32) - scale_6 = paddle._C_ops.scale(cast_4, full_3, float("0"), True) - del cast_4 + # pd_op.divide: (2x-1x-1xf32) <- (2x-1x-1xf32, 2x-1x1xf32) + divide_0 = paddle._C_ops.divide(multiply_2, scale_0) + del multiply_2, scale_0 - # pd_op.unsqueeze: (-1x4x1xi64) <- (-1x4xi64, 1xi64) - unsqueeze_3 = paddle._C_ops.unsqueeze(scale_6, full_int_array_0) - del scale_6 + # pd_op.multiply: (2x-1x-1xf32) <- (2x-1x-1xf32, 2x-1x1xf32) + multiply_4 = paddle._C_ops.multiply(divide_0, max_1) + del divide_0, max_1 - # pd_op.cross_entropy_with_softmax: (-1x4x17xf32, -1x4x1xf32) <- (-1x4x17xf32, -1x4x1xi64) - cross_entropy_with_softmax_0, cross_entropy_with_softmax_2 = ( - lambda x, f: f(x) - )( - paddle._C_ops.cross_entropy_with_softmax( - reshape_2, unsqueeze_3, False, True, True, -100, -1 - ), - lambda out: out if isinstance(out, (list, tuple)) else (out, None), - ) + # pd_op.max: (2x-1xf32) <- (2x-1x-1xf32, 1xi64) + max_2 = paddle._C_ops.max(multiply_4, full_int_array_1, False) + del full_int_array_1, multiply_4 - # pd_op.squeeze: (-1x4xf32) <- (-1x4x1xf32, 1xi64) - squeeze_0 = paddle._C_ops.squeeze( - cross_entropy_with_softmax_2, full_int_array_0 - ) + # pd_op.unsqueeze: (2x-1x1xf32) <- (2x-1xf32, 1xi64) + unsqueeze_1 = paddle._C_ops.unsqueeze(max_2, full_int_array_3) + del full_int_array_3, max_2 - # pd_op.multiply: (-1x4xf32) <- (-1x4xf32, -1x4xf32) - multiply_5 = paddle._C_ops.multiply(squeeze_0, subtract_14) - - # pd_op.scale: (-1x4xi64) <- (-1x4xi64, 1xf32) - scale_7 = paddle._C_ops.scale(scale_4, full_3, float("0"), True) - del scale_4 - - # pd_op.unsqueeze: (-1x4x1xi64) <- (-1x4xi64, 1xi64) - unsqueeze_4 = paddle._C_ops.unsqueeze(scale_7, full_int_array_0) - del scale_7 - - # pd_op.cross_entropy_with_softmax: (-1x4x17xf32, -1x4x1xf32) <- (-1x4x17xf32, -1x4x1xi64) - cross_entropy_with_softmax_1, cross_entropy_with_softmax_3 = ( - lambda x, f: f(x) - )( - paddle._C_ops.cross_entropy_with_softmax( - reshape_2, unsqueeze_4, False, True, True, -100, -1 - ), - lambda out: out if isinstance(out, (list, tuple)) else (out, None), - ) - del reshape_2 + # pd_op.multiply: (2x-1x1xf32) <- (2x-1x1xf32, 2x-1x1xf32) + multiply_0 = paddle._C_ops.multiply(index_select_0, unsqueeze_1) + del index_select_0, unsqueeze_1, where_1 - # pd_op.squeeze: (-1x4xf32) <- (-1x4x1xf32, 1xi64) - squeeze_1 = paddle._C_ops.squeeze( - cross_entropy_with_softmax_3, full_int_array_0 - ) - - # pd_op.multiply: (-1x4xf32) <- (-1x4xf32, -1x4xf32) - multiply_6 = paddle._C_ops.multiply(squeeze_1, scale_5) - - # pd_op.add: (-1x4xf32) <- (-1x4xf32, -1x4xf32) - add_1 = paddle._C_ops.add(multiply_5, multiply_6) - - # pd_op.mean: (-1x1xf32) <- (-1x4xf32, 1xi64) - mean_0 = paddle._C_ops.mean(add_1, full_int_array_0, True) - del full_int_array_0 - - # pd_op.multiply: (-1x1xf32) <- (-1x1xf32, -1x1xf32) - multiply_7 = paddle._C_ops.multiply(mean_0, unsqueeze_1) - - # pd_op.sum: (xf32) <- (-1x1xf32, 0xi64) - sum_2 = paddle._C_ops.sum(multiply_7, full_int_array_3, None, False) - - # pd_op.divide: (xf32) <- (xf32, xf32) - divide_1 = paddle._C_ops.divide(sum_2, data_4) - del ( - abs_0, - add_0, - add_1, - assign_0, - assign_1, - assign_2, - assign_3, - assign_4, - assign_5, - assign_6, - assign_7, - cast_1, - cast_3, - clip_0, - clip_1, - cross_entropy_with_softmax_2, - cross_entropy_with_softmax_3, - data_4, - divide_2, - divide_3, - full_0, - full_1, - full_2, - full_3, - full_4, - full_int_array_3, - masked_select_0, - masked_select_3, - maximum_0, - maximum_1, - maximum_2, - maximum_3, - mean_0, - minimum_0, - minimum_1, - minimum_2, - minimum_3, - multiply_0, - multiply_1, - multiply_2, - multiply_4, - multiply_5, - multiply_6, - multiply_7, - reshape_0, - reshape_1, - scale_0, - scale_1, - scale_3, - scale_5, - split_0, - split_1, - split_2, - split_3, - split_4, - split_5, - split_6, - split_7, - squeeze_0, - squeeze_1, - subtract_0, - subtract_1, - subtract_10, - subtract_14, - subtract_2, - subtract_3, - subtract_4, - subtract_8, - subtract_9, - sum_1, - sum_2, - unsqueeze_1, - unsqueeze_3, - unsqueeze_4, - ) - - return ( - cross_entropy_with_softmax_0, - cross_entropy_with_softmax_1, - mean_all_0, - divide_0, - divide_1, - ) + return reshape_0, multiply_0 diff --git a/paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_10/input_meta.py b/paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_4/shape_patches_PP-YOLOE-S_vehicle/input_meta.py similarity index 100% rename from paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_10/input_meta.py rename to paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_4/shape_patches_PP-YOLOE-S_vehicle/input_meta.py diff --git a/paddle_samples/PaddleX/PP-YOLOE-L_vehicle/subgraph_15/weight_meta.py b/paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_4/shape_patches_PP-YOLOE-S_vehicle/weight_meta.py similarity index 100% rename from paddle_samples/PaddleX/PP-YOLOE-L_vehicle/subgraph_15/weight_meta.py rename to paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_4/shape_patches_PP-YOLOE-S_vehicle/weight_meta.py diff --git a/paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_6/input_meta.py b/paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_5/shape_patches_PP-YOLOE-S_vehicle/input_meta.py similarity index 100% rename from paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_6/input_meta.py rename to paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_5/shape_patches_PP-YOLOE-S_vehicle/input_meta.py diff --git a/paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_14/weight_meta.py b/paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_5/shape_patches_PP-YOLOE-S_vehicle/weight_meta.py similarity index 100% rename from paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_14/weight_meta.py rename to paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_5/shape_patches_PP-YOLOE-S_vehicle/weight_meta.py diff --git a/paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_6/graph_hash.txt b/paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_6/graph_hash.txt index 896fa94fd..a2c6d8b8a 100644 --- a/paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_6/graph_hash.txt +++ b/paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_6/graph_hash.txt @@ -1 +1 @@ -66b8a266cc256b5299b432a3f1cf5582a5f9a5321ba1505da3c19b3bc24f5624 \ No newline at end of file +81e1c79881631ba9c8fea543662e0c88108b9a0a23f037c923767f08270a38b3 \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_6/input_meta.py b/paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_6/input_meta.py index 4202f26aa..1cb0adc74 100644 --- a/paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_6/input_meta.py +++ b/paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_6/input_meta.py @@ -1,38 +1,31 @@ class Program_weight_tensor_data_0: name = "data_0" - shape = [2, 8400, 4] + shape = [2, 384, 16, 16] dtype = "float32" - min_val = float("0.0843685") - max_val = float("15.1723") - mean = float("4.87061") - std = float("3.19583") + min_val = float("-0.278465") + max_val = float("5.37003") + mean = float("0.223584") + std = float("0.551132") data = None class Program_weight_tensor_data_1: name = "data_1" - shape = [8400, 2] + shape = [2, 192, 32, 32] dtype = "float32" - min_val = float("0.5") - max_val = float("79.5") - mean = float("34.7619") - std = float("22.9098") + min_val = float("-0.278465") + max_val = float("7.76898") + mean = float("0.298021") + std = float("0.601257") data = None class Program_weight_tensor_data_2: name = "data_2" - shape = [8400, 1] + shape = [2, 96, 64, 64] dtype = "float32" - min_val = float("8.0") - max_val = float("32.0") - mean = float("10.6667") - std = float("5.70157") + min_val = float("-0.278465") + max_val = float("9.44375") + mean = float("0.389922") + std = float("0.624856") data = None - - -class Program_weight_tensor_data_3: - name = "data_3" - shape = [2, 2] - dtype = "float32" - data = [1.20075, 0.802005, 1.74863, 1.16364] diff --git a/paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_6/model.py b/paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_6/model.py index 561c0c35b..fc0f65e76 100644 --- a/paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_6/model.py +++ b/paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_6/model.py @@ -5,90 +5,1046 @@ class GraphModule(paddle.nn.Layer): def __init__(self): super().__init__() - def forward(self, data_0, data_1, data_2, data_3): - # pd_op.full: (1xi32) <- () + def forward( + self, + parameter_0, + parameter_1, + parameter_2, + parameter_3, + parameter_4, + parameter_5, + parameter_6, + parameter_7, + parameter_8, + parameter_9, + parameter_10, + parameter_11, + parameter_12, + parameter_13, + parameter_14, + parameter_15, + parameter_16, + parameter_17, + parameter_18, + parameter_19, + parameter_20, + parameter_21, + parameter_22, + parameter_23, + parameter_24, + parameter_25, + parameter_26, + parameter_27, + parameter_28, + parameter_29, + parameter_30, + parameter_31, + parameter_32, + parameter_33, + parameter_34, + parameter_35, + parameter_36, + parameter_37, + parameter_38, + parameter_39, + parameter_40, + parameter_41, + parameter_42, + parameter_43, + parameter_44, + parameter_45, + parameter_46, + parameter_47, + parameter_48, + parameter_49, + parameter_50, + parameter_51, + parameter_52, + parameter_53, + data_0, + data_1, + data_2, + ): + # pd_op.full: (1xf64) <- () full_0 = paddle._C_ops.full( - [1], float("2"), paddle.int32, paddle.core.CPUPlace() + [1], float("0"), paddle.float64, paddle.core.CPUPlace() + ) + + # pd_op.full: (1xf64) <- () + full_1 = paddle._C_ops.full( + [1], float("16"), paddle.float64, paddle.core.CPUPlace() + ) + + # pd_op.full: (1xf64) <- () + full_2 = paddle._C_ops.full( + [1], float("1"), paddle.float64, paddle.core.CPUPlace() + ) + + # pd_op.arange: (16xi64) <- (1xf64, 1xf64, 1xf64) + arange_0 = paddle.arange(full_0, full_1, full_2, dtype="int64") + del full_1 + + # pd_op.cast: (16xf32) <- (16xi64) + cast_0 = paddle._C_ops.cast(arange_0, paddle.float32) + del arange_0 + + # pd_op.full: (1xf32) <- () + full_3 = paddle._C_ops.full( + [1], float("1"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.scale: (16xf32) <- (16xf32, 1xf32) + scale_0 = paddle._C_ops.scale(cast_0, full_3, float("0.5"), True) + del cast_0 + + # pd_op.full: (1xf32) <- () + full_4 = paddle._C_ops.full( + [1], float("32"), paddle.float32, paddle.core.CPUPlace() ) - # pd_op.split_with_num: ([2x8400x2xf32, 2x8400x2xf32]) <- (2x8400x4xf32, 1xi32) - split_with_num_0 = paddle._C_ops.split_with_num(data_0, 2, full_0) - del data_0, full_0 + # pd_op.scale: (16xf32) <- (16xf32, 1xf32) + scale_1 = paddle._C_ops.scale(scale_0, full_4, float("0"), True) + del full_4, scale_0 + + # builtin.combine: ([16xf32, 16xf32]) <- (16xf32, 16xf32) + combine_0 = [scale_1, scale_1] + del scale_1 - # builtin.split: (2x8400x2xf32, 2x8400x2xf32) <- ([2x8400x2xf32, 2x8400x2xf32]) + # pd_op.meshgrid: ([16x16xf32, 16x16xf32]) <- ([16xf32, 16xf32]) + meshgrid_0 = paddle._C_ops.meshgrid(combine_0) + del combine_0 + + # builtin.split: (16x16xf32, 16x16xf32) <- ([16x16xf32, 16x16xf32]) ( split_0, split_1, - ) = split_with_num_0 - del split_with_num_0 + ) = meshgrid_0 + del meshgrid_0 - # pd_op.full: (1xf32) <- () - full_1 = paddle._C_ops.full( - [1], float("-1"), paddle.float32, paddle.core.CPUPlace() - ) + # pd_op.scale: (16x16xf32) <- (16x16xf32, 1xf32) + scale_2 = paddle._C_ops.scale(split_1, full_3, float("-80"), True) - # pd_op.scale: (2x8400x2xf32) <- (2x8400x2xf32, 1xf32) - scale_0 = paddle._C_ops.scale(split_0, full_1, float("0"), True) - del full_1, split_0 + # pd_op.scale: (16x16xf32) <- (16x16xf32, 1xf32) + scale_3 = paddle._C_ops.scale(split_0, full_3, float("-80"), True) - # pd_op.add: (2x8400x2xf32) <- (2x8400x2xf32, 8400x2xf32) - add_0 = paddle._C_ops.add(scale_0, data_1) - del scale_0 + # pd_op.scale: (16x16xf32) <- (16x16xf32, 1xf32) + scale_4 = paddle._C_ops.scale(split_1, full_3, float("80"), True) - # pd_op.add: (2x8400x2xf32) <- (2x8400x2xf32, 8400x2xf32) - add_1 = paddle._C_ops.add(split_1, data_1) - del data_1, split_1 + # pd_op.scale: (16x16xf32) <- (16x16xf32, 1xf32) + scale_5 = paddle._C_ops.scale(split_0, full_3, float("80"), True) - # pd_op.full: (1xi32) <- () - full_2 = paddle._C_ops.full( - [1], float("-1"), paddle.int32, paddle.core.CPUPlace() + # builtin.combine: ([16x16xf32, 16x16xf32, 16x16xf32, 16x16xf32]) <- (16x16xf32, 16x16xf32, 16x16xf32, 16x16xf32) + combine_1 = [scale_2, scale_3, scale_4, scale_5] + del scale_2, scale_3, scale_4, scale_5 + + # pd_op.stack: (16x16x4xf32) <- ([16x16xf32, 16x16xf32, 16x16xf32, 16x16xf32]) + stack_0 = paddle._C_ops.stack(combine_1, -1) + del combine_1 + + # builtin.combine: ([16x16xf32, 16x16xf32]) <- (16x16xf32, 16x16xf32) + combine_2 = [split_1, split_0] + del split_0, split_1 + + # pd_op.stack: (16x16x2xf32) <- ([16x16xf32, 16x16xf32]) + stack_1 = paddle._C_ops.stack(combine_2, -1) + del combine_2 + + # pd_op.full_int_array: (2xi64) <- () + full_int_array_0 = [-1, 4] + + # pd_op.reshape: (256x4xf32) <- (16x16x4xf32, 2xi64) + reshape_0 = paddle._C_ops.reshape(stack_0, full_int_array_0) + del stack_0 + + # pd_op.full_int_array: (2xi64) <- () + full_int_array_1 = [-1, 2] + + # pd_op.reshape: (256x2xf32) <- (16x16x2xf32, 2xi64) + reshape_1 = paddle._C_ops.reshape(stack_1, full_int_array_1) + del stack_1 + + # pd_op.full: (256x1xf32) <- () + full_5 = paddle._C_ops.full( + [256, 1], + float("32"), + paddle.float32, + paddle.framework._current_expected_place(), ) - # builtin.combine: ([2x8400x2xf32, 2x8400x2xf32]) <- (2x8400x2xf32, 2x8400x2xf32) - combine_0 = [add_0, add_1] - del add_0, add_1 + # pd_op.full: (1xf64) <- () + full_6 = paddle._C_ops.full( + [1], float("32"), paddle.float64, paddle.core.CPUPlace() + ) - # pd_op.concat: (2x8400x4xf32) <- ([2x8400x2xf32, 2x8400x2xf32], 1xi32) - concat_0 = paddle._C_ops.concat(combine_0, full_2) - del combine_0 + # pd_op.arange: (32xi64) <- (1xf64, 1xf64, 1xf64) + arange_1 = paddle.arange(full_0, full_6, full_2, dtype="int64") + del full_6 - # pd_op.multiply: (2x8400x4xf32) <- (2x8400x4xf32, 8400x1xf32) - multiply_0 = paddle._C_ops.multiply(concat_0, data_2) - del concat_0, data_2 + # pd_op.cast: (32xf32) <- (32xi64) + cast_1 = paddle._C_ops.cast(arange_1, paddle.float32) + del arange_1 - # pd_op.full: (1xi32) <- () - full_3 = paddle._C_ops.full( - [1], float("1"), paddle.int32, paddle.core.CPUPlace() + # pd_op.scale: (32xf32) <- (32xf32, 1xf32) + scale_6 = paddle._C_ops.scale(cast_1, full_3, float("0.5"), True) + del cast_1 + + # pd_op.full: (1xf32) <- () + full_7 = paddle._C_ops.full( + [1], float("16"), paddle.float32, paddle.core.CPUPlace() ) - # pd_op.split_with_num: ([2x1xf32, 2x1xf32]) <- (2x2xf32, 1xi32) - split_with_num_1 = paddle._C_ops.split_with_num(data_3, 2, full_3) - del data_3, full_3 + # pd_op.scale: (32xf32) <- (32xf32, 1xf32) + scale_7 = paddle._C_ops.scale(scale_6, full_7, float("0"), True) + del full_7, scale_6 + + # builtin.combine: ([32xf32, 32xf32]) <- (32xf32, 32xf32) + combine_3 = [scale_7, scale_7] + del scale_7 + + # pd_op.meshgrid: ([32x32xf32, 32x32xf32]) <- ([32xf32, 32xf32]) + meshgrid_1 = paddle._C_ops.meshgrid(combine_3) + del combine_3 - # builtin.split: (2x1xf32, 2x1xf32) <- ([2x1xf32, 2x1xf32]) + # builtin.split: (32x32xf32, 32x32xf32) <- ([32x32xf32, 32x32xf32]) ( split_2, split_3, - ) = split_with_num_1 - del split_with_num_1 + ) = meshgrid_1 + del meshgrid_1 - # builtin.combine: ([2x1xf32, 2x1xf32, 2x1xf32, 2x1xf32]) <- (2x1xf32, 2x1xf32, 2x1xf32, 2x1xf32) - combine_1 = [split_3, split_2, split_3, split_2] + # pd_op.scale: (32x32xf32) <- (32x32xf32, 1xf32) + scale_8 = paddle._C_ops.scale(split_3, full_3, float("-40"), True) + + # pd_op.scale: (32x32xf32) <- (32x32xf32, 1xf32) + scale_9 = paddle._C_ops.scale(split_2, full_3, float("-40"), True) + + # pd_op.scale: (32x32xf32) <- (32x32xf32, 1xf32) + scale_10 = paddle._C_ops.scale(split_3, full_3, float("40"), True) + + # pd_op.scale: (32x32xf32) <- (32x32xf32, 1xf32) + scale_11 = paddle._C_ops.scale(split_2, full_3, float("40"), True) + + # builtin.combine: ([32x32xf32, 32x32xf32, 32x32xf32, 32x32xf32]) <- (32x32xf32, 32x32xf32, 32x32xf32, 32x32xf32) + combine_4 = [scale_8, scale_9, scale_10, scale_11] + del scale_10, scale_11, scale_8, scale_9 + + # pd_op.stack: (32x32x4xf32) <- ([32x32xf32, 32x32xf32, 32x32xf32, 32x32xf32]) + stack_2 = paddle._C_ops.stack(combine_4, -1) + del combine_4 + + # builtin.combine: ([32x32xf32, 32x32xf32]) <- (32x32xf32, 32x32xf32) + combine_5 = [split_3, split_2] del split_2, split_3 - # pd_op.concat: (2x4xf32) <- ([2x1xf32, 2x1xf32, 2x1xf32, 2x1xf32], 1xi32) - concat_1 = paddle._C_ops.concat(combine_1, full_2) - del combine_1, full_2 + # pd_op.stack: (32x32x2xf32) <- ([32x32xf32, 32x32xf32]) + stack_3 = paddle._C_ops.stack(combine_5, -1) + del combine_5 + + # pd_op.reshape: (1024x4xf32) <- (32x32x4xf32, 2xi64) + reshape_2 = paddle._C_ops.reshape(stack_2, full_int_array_0) + del stack_2 + + # pd_op.reshape: (1024x2xf32) <- (32x32x2xf32, 2xi64) + reshape_3 = paddle._C_ops.reshape(stack_3, full_int_array_1) + del stack_3 + + # pd_op.full: (1024x1xf32) <- () + full_8 = paddle._C_ops.full( + [1024, 1], + float("16"), + paddle.float32, + paddle.framework._current_expected_place(), + ) + + # pd_op.full: (1xf64) <- () + full_9 = paddle._C_ops.full( + [1], float("64"), paddle.float64, paddle.core.CPUPlace() + ) + + # pd_op.arange: (64xi64) <- (1xf64, 1xf64, 1xf64) + arange_2 = paddle.arange(full_0, full_9, full_2, dtype="int64") + del full_0, full_2, full_9 - # pd_op.full_int_array: (3xi64) <- () - full_int_array_0 = [-1, 1, 4] + # pd_op.cast: (64xf32) <- (64xi64) + cast_2 = paddle._C_ops.cast(arange_2, paddle.float32) + del arange_2 - # pd_op.reshape: (2x1x4xf32) <- (2x4xf32, 3xi64) - reshape_0 = paddle._C_ops.reshape(concat_1, full_int_array_0) - del concat_1, full_int_array_0 + # pd_op.scale: (64xf32) <- (64xf32, 1xf32) + scale_12 = paddle._C_ops.scale(cast_2, full_3, float("0.5"), True) + del cast_2 + + # pd_op.full: (1xf32) <- () + full_10 = paddle._C_ops.full( + [1], float("8"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.scale: (64xf32) <- (64xf32, 1xf32) + scale_13 = paddle._C_ops.scale(scale_12, full_10, float("0"), True) + del full_10, scale_12 + + # builtin.combine: ([64xf32, 64xf32]) <- (64xf32, 64xf32) + combine_6 = [scale_13, scale_13] + del scale_13 + + # pd_op.meshgrid: ([64x64xf32, 64x64xf32]) <- ([64xf32, 64xf32]) + meshgrid_2 = paddle._C_ops.meshgrid(combine_6) + del combine_6 + + # builtin.split: (64x64xf32, 64x64xf32) <- ([64x64xf32, 64x64xf32]) + ( + split_4, + split_5, + ) = meshgrid_2 + del meshgrid_2 + + # pd_op.scale: (64x64xf32) <- (64x64xf32, 1xf32) + scale_14 = paddle._C_ops.scale(split_5, full_3, float("-20"), True) + + # pd_op.scale: (64x64xf32) <- (64x64xf32, 1xf32) + scale_15 = paddle._C_ops.scale(split_4, full_3, float("-20"), True) + + # pd_op.scale: (64x64xf32) <- (64x64xf32, 1xf32) + scale_16 = paddle._C_ops.scale(split_5, full_3, float("20"), True) + + # pd_op.scale: (64x64xf32) <- (64x64xf32, 1xf32) + scale_17 = paddle._C_ops.scale(split_4, full_3, float("20"), True) + del full_3 + + # builtin.combine: ([64x64xf32, 64x64xf32, 64x64xf32, 64x64xf32]) <- (64x64xf32, 64x64xf32, 64x64xf32, 64x64xf32) + combine_7 = [scale_14, scale_15, scale_16, scale_17] + del scale_14, scale_15, scale_16, scale_17 + + # pd_op.stack: (64x64x4xf32) <- ([64x64xf32, 64x64xf32, 64x64xf32, 64x64xf32]) + stack_4 = paddle._C_ops.stack(combine_7, -1) + del combine_7 + + # builtin.combine: ([64x64xf32, 64x64xf32]) <- (64x64xf32, 64x64xf32) + combine_8 = [split_5, split_4] + del split_4, split_5 + + # pd_op.stack: (64x64x2xf32) <- ([64x64xf32, 64x64xf32]) + stack_5 = paddle._C_ops.stack(combine_8, -1) + del combine_8 + + # pd_op.reshape: (4096x4xf32) <- (64x64x4xf32, 2xi64) + reshape_4 = paddle._C_ops.reshape(stack_4, full_int_array_0) + del full_int_array_0, stack_4 + + # pd_op.reshape: (4096x2xf32) <- (64x64x2xf32, 2xi64) + reshape_5 = paddle._C_ops.reshape(stack_5, full_int_array_1) + del full_int_array_1, stack_5 + + # pd_op.full: (4096x1xf32) <- () + full_11 = paddle._C_ops.full( + [4096, 1], + float("8"), + paddle.float32, + paddle.framework._current_expected_place(), + ) + + # pd_op.full: (1xi32) <- () + full_12 = paddle._C_ops.full( + [1], float("0"), paddle.int32, paddle.core.CPUPlace() + ) + + # builtin.combine: ([256x4xf32, 1024x4xf32, 4096x4xf32]) <- (256x4xf32, 1024x4xf32, 4096x4xf32) + combine_9 = [reshape_0, reshape_2, reshape_4] + + # pd_op.concat: (5376x4xf32) <- ([256x4xf32, 1024x4xf32, 4096x4xf32], 1xi32) + concat_2 = paddle._C_ops.concat(combine_9, full_12) + del combine_9 + + # builtin.combine: ([256x2xf32, 1024x2xf32, 4096x2xf32]) <- (256x2xf32, 1024x2xf32, 4096x2xf32) + combine_10 = [reshape_1, reshape_3, reshape_5] + del reshape_1, reshape_3, reshape_5 + + # pd_op.concat: (5376x2xf32) <- ([256x2xf32, 1024x2xf32, 4096x2xf32], 1xi32) + concat_3 = paddle._C_ops.concat(combine_10, full_12) + del combine_10 + + # builtin.combine: ([256x1xf32, 1024x1xf32, 4096x1xf32]) <- (256x1xf32, 1024x1xf32, 4096x1xf32) + combine_11 = [full_5, full_8, full_11] + del full_11, full_5, full_8 + + # pd_op.concat: (5376x1xf32) <- ([256x1xf32, 1024x1xf32, 4096x1xf32], 1xi32) + concat_4 = paddle._C_ops.concat(combine_11, full_12) + del combine_11, full_12 + + # pd_op.full_int_array: (2xi64) <- () + full_int_array_2 = [1, 1] + + # pd_op.assign: (2xi64) <- (2xi64) + assign_0 = full_int_array_2 + + # pd_op.assign: (2xi64) <- (2xi64) + assign_1 = full_int_array_2 + + # pd_op.pool2d: (2x384x1x1xf32) <- (2x384x16x16xf32, 2xi64) + pool2d_0 = paddle._C_ops.pool2d( + data_0, + full_int_array_2, + [1, 1], + [0, 0], + False, + True, + "NCHW", + "avg", + False, + True, + "EXPLICIT", + ) + + # pd_op.conv2d: (2x384x1x1xf32) <- (2x384x1x1xf32, 384x384x1x1xf32) + conv2d_0 = paddle._C_ops.conv2d( + pool2d_0, parameter_53, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_53 + + # pd_op.full_int_array: (4xi64) <- () + full_int_array_3 = [1, -1, 1, 1] + + # pd_op.reshape: (1x384x1x1xf32) <- (384xf32, 4xi64) + reshape_6 = paddle._C_ops.reshape(parameter_52, full_int_array_3) + del parameter_52 + + # pd_op.add: (2x384x1x1xf32) <- (2x384x1x1xf32, 1x384x1x1xf32) + add_0 = paddle._C_ops.add(conv2d_0, reshape_6) + + # pd_op.sigmoid: (2x384x1x1xf32) <- (2x384x1x1xf32) + sigmoid_0 = paddle._C_ops.sigmoid(add_0) + del add_0 + + # pd_op.multiply: (2x384x16x16xf32) <- (2x384x16x16xf32, 2x384x1x1xf32) + multiply_0 = paddle._C_ops.multiply(data_0, sigmoid_0) + + # pd_op.conv2d: (2x384x16x16xf32) <- (2x384x16x16xf32, 384x384x1x1xf32) + conv2d_1 = paddle._C_ops.conv2d( + multiply_0, parameter_51, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_51 + + # pd_op.batch_norm_: (2x384x16x16xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x16x16xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__0, + batch_norm__1, + batch_norm__2, + batch_norm__3, + batch_norm__4, + batch_norm__5, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_1, + parameter_50, + parameter_49, + parameter_48, + parameter_47, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_47, parameter_48, parameter_49, parameter_50 + + # pd_op.swish: (2x384x16x16xf32) <- (2x384x16x16xf32) + swish_0 = paddle._C_ops.swish(batch_norm__0) + + # pd_op.add: (2x384x16x16xf32) <- (2x384x16x16xf32, 2x384x16x16xf32) + add_1 = paddle._C_ops.add(swish_0, data_0) + + # pd_op.conv2d: (2x1x16x16xf32) <- (2x384x16x16xf32, 1x384x3x3xf32) + conv2d_2 = paddle._C_ops.conv2d( + add_1, parameter_46, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_46 + + # pd_op.reshape: (1x1x1x1xf32) <- (1xf32, 4xi64) + reshape_7 = paddle._C_ops.reshape(parameter_45, full_int_array_3) + del parameter_45 + + # pd_op.add: (2x1x16x16xf32) <- (2x1x16x16xf32, 1x1x1x1xf32) + add_2 = paddle._C_ops.add(conv2d_2, reshape_7) + + # pd_op.conv2d: (2x384x1x1xf32) <- (2x384x1x1xf32, 384x384x1x1xf32) + conv2d_3 = paddle._C_ops.conv2d( + pool2d_0, parameter_44, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_44 - # pd_op.divide: (2x8400x4xf32) <- (2x8400x4xf32, 2x1x4xf32) - divide_0 = paddle._C_ops.divide(multiply_0, reshape_0) - del multiply_0, reshape_0 + # pd_op.reshape: (1x384x1x1xf32) <- (384xf32, 4xi64) + reshape_8 = paddle._C_ops.reshape(parameter_43, full_int_array_3) + del parameter_43 + + # pd_op.add: (2x384x1x1xf32) <- (2x384x1x1xf32, 1x384x1x1xf32) + add_3 = paddle._C_ops.add(conv2d_3, reshape_8) + + # pd_op.sigmoid: (2x384x1x1xf32) <- (2x384x1x1xf32) + sigmoid_1 = paddle._C_ops.sigmoid(add_3) + del add_3 + + # pd_op.multiply: (2x384x16x16xf32) <- (2x384x16x16xf32, 2x384x1x1xf32) + multiply_1 = paddle._C_ops.multiply(data_0, sigmoid_1) + del data_0 + + # pd_op.conv2d: (2x384x16x16xf32) <- (2x384x16x16xf32, 384x384x1x1xf32) + conv2d_4 = paddle._C_ops.conv2d( + multiply_1, parameter_42, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_42 + + # pd_op.batch_norm_: (2x384x16x16xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x16x16xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__6, + batch_norm__7, + batch_norm__8, + batch_norm__9, + batch_norm__10, + batch_norm__11, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_4, + parameter_41, + parameter_40, + parameter_39, + parameter_38, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_38, parameter_39, parameter_40, parameter_41 + + # pd_op.swish: (2x384x16x16xf32) <- (2x384x16x16xf32) + swish_1 = paddle._C_ops.swish(batch_norm__6) + + # pd_op.conv2d: (2x68x16x16xf32) <- (2x384x16x16xf32, 68x384x3x3xf32) + conv2d_5 = paddle._C_ops.conv2d( + swish_1, parameter_37, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_37 + + # pd_op.reshape: (1x68x1x1xf32) <- (68xf32, 4xi64) + reshape_9 = paddle._C_ops.reshape(parameter_36, full_int_array_3) + del parameter_36 + + # pd_op.add: (2x68x16x16xf32) <- (2x68x16x16xf32, 1x68x1x1xf32) + add_4 = paddle._C_ops.add(conv2d_5, reshape_9) + + # pd_op.sigmoid: (2x1x16x16xf32) <- (2x1x16x16xf32) + sigmoid_2 = paddle._C_ops.sigmoid(add_2) + del add_2 + + # pd_op.flatten: (2x1x256xf32) <- (2x1x16x16xf32) + flatten_0 = paddle._C_ops.flatten(sigmoid_2, 2, 3) + + # pd_op.transpose: (2x256x1xf32) <- (2x1x256xf32) + transpose_0 = paddle._C_ops.transpose(flatten_0, [0, 2, 1]) + del flatten_0 + + # pd_op.flatten: (2x68x256xf32) <- (2x68x16x16xf32) + flatten_1 = paddle._C_ops.flatten(add_4, 2, 3) + + # pd_op.transpose: (2x256x68xf32) <- (2x68x256xf32) + transpose_1 = paddle._C_ops.transpose(flatten_1, [0, 2, 1]) + del flatten_1 + + # pd_op.pool2d: (2x192x1x1xf32) <- (2x192x32x32xf32, 2xi64) + pool2d_1 = paddle._C_ops.pool2d( + data_1, + full_int_array_2, + [1, 1], + [0, 0], + False, + True, + "NCHW", + "avg", + False, + True, + "EXPLICIT", + ) + + # pd_op.conv2d: (2x192x1x1xf32) <- (2x192x1x1xf32, 192x192x1x1xf32) + conv2d_6 = paddle._C_ops.conv2d( + pool2d_1, parameter_35, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_35 + + # pd_op.reshape: (1x192x1x1xf32) <- (192xf32, 4xi64) + reshape_10 = paddle._C_ops.reshape(parameter_34, full_int_array_3) + del parameter_34 + + # pd_op.add: (2x192x1x1xf32) <- (2x192x1x1xf32, 1x192x1x1xf32) + add_5 = paddle._C_ops.add(conv2d_6, reshape_10) + + # pd_op.sigmoid: (2x192x1x1xf32) <- (2x192x1x1xf32) + sigmoid_3 = paddle._C_ops.sigmoid(add_5) + del add_5 + + # pd_op.multiply: (2x192x32x32xf32) <- (2x192x32x32xf32, 2x192x1x1xf32) + multiply_2 = paddle._C_ops.multiply(data_1, sigmoid_3) + + # pd_op.conv2d: (2x192x32x32xf32) <- (2x192x32x32xf32, 192x192x1x1xf32) + conv2d_7 = paddle._C_ops.conv2d( + multiply_2, parameter_33, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_33 + + # pd_op.batch_norm_: (2x192x32x32xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x32x32xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__12, + batch_norm__13, + batch_norm__14, + batch_norm__15, + batch_norm__16, + batch_norm__17, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_7, + parameter_32, + parameter_31, + parameter_30, + parameter_29, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_29, parameter_30, parameter_31, parameter_32 + + # pd_op.swish: (2x192x32x32xf32) <- (2x192x32x32xf32) + swish_2 = paddle._C_ops.swish(batch_norm__12) + + # pd_op.add: (2x192x32x32xf32) <- (2x192x32x32xf32, 2x192x32x32xf32) + add_6 = paddle._C_ops.add(swish_2, data_1) + + # pd_op.conv2d: (2x1x32x32xf32) <- (2x192x32x32xf32, 1x192x3x3xf32) + conv2d_8 = paddle._C_ops.conv2d( + add_6, parameter_28, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_28 + + # pd_op.reshape: (1x1x1x1xf32) <- (1xf32, 4xi64) + reshape_11 = paddle._C_ops.reshape(parameter_27, full_int_array_3) + del parameter_27 + + # pd_op.add: (2x1x32x32xf32) <- (2x1x32x32xf32, 1x1x1x1xf32) + add_7 = paddle._C_ops.add(conv2d_8, reshape_11) + + # pd_op.conv2d: (2x192x1x1xf32) <- (2x192x1x1xf32, 192x192x1x1xf32) + conv2d_9 = paddle._C_ops.conv2d( + pool2d_1, parameter_26, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_26 + + # pd_op.reshape: (1x192x1x1xf32) <- (192xf32, 4xi64) + reshape_12 = paddle._C_ops.reshape(parameter_25, full_int_array_3) + del parameter_25 + + # pd_op.add: (2x192x1x1xf32) <- (2x192x1x1xf32, 1x192x1x1xf32) + add_8 = paddle._C_ops.add(conv2d_9, reshape_12) + + # pd_op.sigmoid: (2x192x1x1xf32) <- (2x192x1x1xf32) + sigmoid_4 = paddle._C_ops.sigmoid(add_8) + del add_8 + + # pd_op.multiply: (2x192x32x32xf32) <- (2x192x32x32xf32, 2x192x1x1xf32) + multiply_3 = paddle._C_ops.multiply(data_1, sigmoid_4) + del data_1 + + # pd_op.conv2d: (2x192x32x32xf32) <- (2x192x32x32xf32, 192x192x1x1xf32) + conv2d_10 = paddle._C_ops.conv2d( + multiply_3, parameter_24, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_24 + + # pd_op.batch_norm_: (2x192x32x32xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x32x32xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__18, + batch_norm__19, + batch_norm__20, + batch_norm__21, + batch_norm__22, + batch_norm__23, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_10, + parameter_23, + parameter_22, + parameter_21, + parameter_20, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_20, parameter_21, parameter_22, parameter_23 + + # pd_op.swish: (2x192x32x32xf32) <- (2x192x32x32xf32) + swish_3 = paddle._C_ops.swish(batch_norm__18) + + # pd_op.conv2d: (2x68x32x32xf32) <- (2x192x32x32xf32, 68x192x3x3xf32) + conv2d_11 = paddle._C_ops.conv2d( + swish_3, parameter_19, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_19 + + # pd_op.reshape: (1x68x1x1xf32) <- (68xf32, 4xi64) + reshape_13 = paddle._C_ops.reshape(parameter_18, full_int_array_3) + del parameter_18 + + # pd_op.add: (2x68x32x32xf32) <- (2x68x32x32xf32, 1x68x1x1xf32) + add_9 = paddle._C_ops.add(conv2d_11, reshape_13) + + # pd_op.sigmoid: (2x1x32x32xf32) <- (2x1x32x32xf32) + sigmoid_5 = paddle._C_ops.sigmoid(add_7) + del add_7 + + # pd_op.flatten: (2x1x1024xf32) <- (2x1x32x32xf32) + flatten_2 = paddle._C_ops.flatten(sigmoid_5, 2, 3) + + # pd_op.transpose: (2x1024x1xf32) <- (2x1x1024xf32) + transpose_2 = paddle._C_ops.transpose(flatten_2, [0, 2, 1]) + del flatten_2 + + # pd_op.flatten: (2x68x1024xf32) <- (2x68x32x32xf32) + flatten_3 = paddle._C_ops.flatten(add_9, 2, 3) + + # pd_op.transpose: (2x1024x68xf32) <- (2x68x1024xf32) + transpose_3 = paddle._C_ops.transpose(flatten_3, [0, 2, 1]) + del flatten_3 + + # pd_op.pool2d: (2x96x1x1xf32) <- (2x96x64x64xf32, 2xi64) + pool2d_2 = paddle._C_ops.pool2d( + data_2, + full_int_array_2, + [1, 1], + [0, 0], + False, + True, + "NCHW", + "avg", + False, + True, + "EXPLICIT", + ) + + # pd_op.conv2d: (2x96x1x1xf32) <- (2x96x1x1xf32, 96x96x1x1xf32) + conv2d_12 = paddle._C_ops.conv2d( + pool2d_2, parameter_17, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_17 + + # pd_op.reshape: (1x96x1x1xf32) <- (96xf32, 4xi64) + reshape_14 = paddle._C_ops.reshape(parameter_16, full_int_array_3) + del parameter_16 + + # pd_op.add: (2x96x1x1xf32) <- (2x96x1x1xf32, 1x96x1x1xf32) + add_10 = paddle._C_ops.add(conv2d_12, reshape_14) + + # pd_op.sigmoid: (2x96x1x1xf32) <- (2x96x1x1xf32) + sigmoid_6 = paddle._C_ops.sigmoid(add_10) + del add_10 + + # pd_op.multiply: (2x96x64x64xf32) <- (2x96x64x64xf32, 2x96x1x1xf32) + multiply_4 = paddle._C_ops.multiply(data_2, sigmoid_6) + + # pd_op.conv2d: (2x96x64x64xf32) <- (2x96x64x64xf32, 96x96x1x1xf32) + conv2d_13 = paddle._C_ops.conv2d( + multiply_4, parameter_15, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_15 + + # pd_op.batch_norm_: (2x96x64x64xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x64x64xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__24, + batch_norm__25, + batch_norm__26, + batch_norm__27, + batch_norm__28, + batch_norm__29, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_13, + parameter_14, + parameter_13, + parameter_12, + parameter_11, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_11, parameter_12, parameter_13, parameter_14 + + # pd_op.swish: (2x96x64x64xf32) <- (2x96x64x64xf32) + swish_4 = paddle._C_ops.swish(batch_norm__24) + + # pd_op.add: (2x96x64x64xf32) <- (2x96x64x64xf32, 2x96x64x64xf32) + add_11 = paddle._C_ops.add(swish_4, data_2) + + # pd_op.conv2d: (2x1x64x64xf32) <- (2x96x64x64xf32, 1x96x3x3xf32) + conv2d_14 = paddle._C_ops.conv2d( + add_11, parameter_10, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_10 + + # pd_op.reshape: (1x1x1x1xf32) <- (1xf32, 4xi64) + reshape_15 = paddle._C_ops.reshape(parameter_9, full_int_array_3) + del parameter_9 + + # pd_op.add: (2x1x64x64xf32) <- (2x1x64x64xf32, 1x1x1x1xf32) + add_12 = paddle._C_ops.add(conv2d_14, reshape_15) + + # pd_op.conv2d: (2x96x1x1xf32) <- (2x96x1x1xf32, 96x96x1x1xf32) + conv2d_15 = paddle._C_ops.conv2d( + pool2d_2, parameter_8, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_8 + + # pd_op.reshape: (1x96x1x1xf32) <- (96xf32, 4xi64) + reshape_16 = paddle._C_ops.reshape(parameter_7, full_int_array_3) + del parameter_7 + + # pd_op.add: (2x96x1x1xf32) <- (2x96x1x1xf32, 1x96x1x1xf32) + add_13 = paddle._C_ops.add(conv2d_15, reshape_16) + + # pd_op.sigmoid: (2x96x1x1xf32) <- (2x96x1x1xf32) + sigmoid_7 = paddle._C_ops.sigmoid(add_13) + del add_13 + + # pd_op.multiply: (2x96x64x64xf32) <- (2x96x64x64xf32, 2x96x1x1xf32) + multiply_5 = paddle._C_ops.multiply(data_2, sigmoid_7) + del data_2 + + # pd_op.conv2d: (2x96x64x64xf32) <- (2x96x64x64xf32, 96x96x1x1xf32) + conv2d_16 = paddle._C_ops.conv2d( + multiply_5, parameter_6, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_6 + + # pd_op.batch_norm_: (2x96x64x64xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x64x64xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__30, + batch_norm__31, + batch_norm__32, + batch_norm__33, + batch_norm__34, + batch_norm__35, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_16, + parameter_5, + parameter_4, + parameter_3, + parameter_2, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_2, parameter_3, parameter_4, parameter_5 + + # pd_op.swish: (2x96x64x64xf32) <- (2x96x64x64xf32) + swish_5 = paddle._C_ops.swish(batch_norm__30) + + # pd_op.conv2d: (2x68x64x64xf32) <- (2x96x64x64xf32, 68x96x3x3xf32) + conv2d_17 = paddle._C_ops.conv2d( + swish_5, parameter_1, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_1 + + # pd_op.reshape: (1x68x1x1xf32) <- (68xf32, 4xi64) + reshape_17 = paddle._C_ops.reshape(parameter_0, full_int_array_3) + del full_int_array_3, parameter_0 + + # pd_op.add: (2x68x64x64xf32) <- (2x68x64x64xf32, 1x68x1x1xf32) + add_14 = paddle._C_ops.add(conv2d_17, reshape_17) + + # pd_op.sigmoid: (2x1x64x64xf32) <- (2x1x64x64xf32) + sigmoid_8 = paddle._C_ops.sigmoid(add_12) + del add_12 + + # pd_op.flatten: (2x1x4096xf32) <- (2x1x64x64xf32) + flatten_4 = paddle._C_ops.flatten(sigmoid_8, 2, 3) + + # pd_op.transpose: (2x4096x1xf32) <- (2x1x4096xf32) + transpose_4 = paddle._C_ops.transpose(flatten_4, [0, 2, 1]) + del flatten_4 + + # pd_op.flatten: (2x68x4096xf32) <- (2x68x64x64xf32) + flatten_5 = paddle._C_ops.flatten(add_14, 2, 3) + + # pd_op.transpose: (2x4096x68xf32) <- (2x68x4096xf32) + transpose_5 = paddle._C_ops.transpose(flatten_5, [0, 2, 1]) + del flatten_5 + + # pd_op.full: (1xi32) <- () + full_13 = paddle._C_ops.full( + [1], float("1"), paddle.int32, paddle.core.CPUPlace() + ) + + # pd_op.assign: (1xi32) <- (1xi32) + assign_2 = full_13 + + # builtin.combine: ([2x256x1xf32, 2x1024x1xf32, 2x4096x1xf32]) <- (2x256x1xf32, 2x1024x1xf32, 2x4096x1xf32) + combine_12 = [transpose_0, transpose_2, transpose_4] + + # pd_op.concat: (2x5376x1xf32) <- ([2x256x1xf32, 2x1024x1xf32, 2x4096x1xf32], 1xi32) + concat_0 = paddle._C_ops.concat(combine_12, full_13) + del combine_12 + + # builtin.combine: ([2x256x68xf32, 2x1024x68xf32, 2x4096x68xf32]) <- (2x256x68xf32, 2x1024x68xf32, 2x4096x68xf32) + combine_13 = [transpose_1, transpose_3, transpose_5] + + # pd_op.concat: (2x5376x68xf32) <- ([2x256x68xf32, 2x1024x68xf32, 2x4096x68xf32], 1xi32) + concat_1 = paddle._C_ops.concat(combine_13, full_13) + del ( + add_1, + add_11, + add_14, + add_4, + add_6, + add_9, + assign_0, + assign_1, + assign_2, + batch_norm__0, + batch_norm__1, + batch_norm__10, + batch_norm__11, + batch_norm__12, + batch_norm__13, + batch_norm__14, + batch_norm__15, + batch_norm__16, + batch_norm__17, + batch_norm__18, + batch_norm__19, + batch_norm__2, + batch_norm__20, + batch_norm__21, + batch_norm__22, + batch_norm__23, + batch_norm__24, + batch_norm__25, + batch_norm__26, + batch_norm__27, + batch_norm__28, + batch_norm__29, + batch_norm__3, + batch_norm__30, + batch_norm__31, + batch_norm__32, + batch_norm__33, + batch_norm__34, + batch_norm__35, + batch_norm__4, + batch_norm__5, + batch_norm__6, + batch_norm__7, + batch_norm__8, + batch_norm__9, + combine_13, + conv2d_0, + conv2d_1, + conv2d_10, + conv2d_11, + conv2d_12, + conv2d_13, + conv2d_14, + conv2d_15, + conv2d_16, + conv2d_17, + conv2d_2, + conv2d_3, + conv2d_4, + conv2d_5, + conv2d_6, + conv2d_7, + conv2d_8, + conv2d_9, + full_13, + full_int_array_2, + multiply_0, + multiply_1, + multiply_2, + multiply_3, + multiply_4, + multiply_5, + pool2d_0, + pool2d_1, + pool2d_2, + reshape_0, + reshape_10, + reshape_11, + reshape_12, + reshape_13, + reshape_14, + reshape_15, + reshape_16, + reshape_17, + reshape_2, + reshape_4, + reshape_6, + reshape_7, + reshape_8, + reshape_9, + sigmoid_0, + sigmoid_1, + sigmoid_2, + sigmoid_3, + sigmoid_4, + sigmoid_5, + sigmoid_6, + sigmoid_7, + sigmoid_8, + swish_0, + swish_1, + swish_2, + swish_3, + swish_4, + swish_5, + transpose_0, + transpose_1, + transpose_2, + transpose_3, + transpose_4, + transpose_5, + ) - return divide_0 + return concat_0, concat_1, concat_2, concat_3, concat_4 diff --git a/paddle_samples/PaddleX/PP-YOLOE-L_vehicle/subgraph_13/input_meta.py b/paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_6/shape_patches_PP-YOLOE-L_vehicle/input_meta.py similarity index 100% rename from paddle_samples/PaddleX/PP-YOLOE-L_vehicle/subgraph_13/input_meta.py rename to paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_6/shape_patches_PP-YOLOE-L_vehicle/input_meta.py diff --git a/paddle_samples/PaddleX/PP-YOLOE-L_vehicle/subgraph_13/weight_meta.py b/paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_6/shape_patches_PP-YOLOE-L_vehicle/weight_meta.py similarity index 100% rename from paddle_samples/PaddleX/PP-YOLOE-L_vehicle/subgraph_13/weight_meta.py rename to paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_6/shape_patches_PP-YOLOE-L_vehicle/weight_meta.py diff --git a/paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_6/weight_meta.py b/paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_6/weight_meta.py index 8b1378917..a80fe5856 100644 --- a/paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_6/weight_meta.py +++ b/paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_6/weight_meta.py @@ -1 +1,586 @@ +class Program_weight_tensor_parameter_0: + name = "parameter_0" + shape = [68] + dtype = "float32" + min_val = float("-0.0172485") + max_val = float("0.027465") + mean = float("1.46232e-07") + std = float("0.00758165") + data = None + +class Program_weight_tensor_parameter_1: + name = "parameter_1" + shape = [68, 96, 3, 3] + dtype = "float32" + min_val = float("-0.193407") + max_val = float("0.203896") + mean = float("4.08909e-08") + std = float("0.0115569") + data = None + + +class Program_weight_tensor_parameter_2: + name = "parameter_2" + shape = [96] + dtype = "float32" + min_val = float("-0.149226") + max_val = float("0.348802") + mean = float("0.0836582") + std = float("0.116186") + data = None + + +class Program_weight_tensor_parameter_3: + name = "parameter_3" + shape = [96] + dtype = "float32" + min_val = float("0.92059") + max_val = float("2.01352") + mean = float("1.39698") + std = float("0.216217") + data = None + + +class Program_weight_tensor_parameter_4: + name = "parameter_4" + shape = [96] + dtype = "float32" + min_val = float("0.000218703") + max_val = float("0.00374521") + mean = float("0.000878038") + std = float("0.00058106") + data = None + + +class Program_weight_tensor_parameter_5: + name = "parameter_5" + shape = [96] + dtype = "float32" + min_val = float("-0.0825809") + max_val = float("0.0417536") + mean = float("-0.00846266") + std = float("0.0198982") + data = None + + +class Program_weight_tensor_parameter_6: + name = "parameter_6" + shape = [96, 96, 1, 1] + dtype = "float32" + min_val = float("-0.0948878") + max_val = float("0.109061") + mean = float("-0.000797905") + std = float("0.0137305") + data = None + + +class Program_weight_tensor_parameter_7: + name = "parameter_7" + shape = [96] + dtype = "float32" + min_val = float("-0.012275") + max_val = float("0.0115959") + mean = float("-0.000356062") + std = float("0.00537855") + data = None + + +class Program_weight_tensor_parameter_8: + name = "parameter_8" + shape = [96, 96, 1, 1] + dtype = "float32" + min_val = float("-0.0233111") + max_val = float("0.0248623") + mean = float("-0.000113331") + std = float("0.0035026") + data = None + + +class Program_weight_tensor_parameter_9: + name = "parameter_9" + shape = [1] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_10: + name = "parameter_10" + shape = [1, 96, 3, 3] + dtype = "float32" + min_val = float("-0.0555531") + max_val = float("0.0394738") + mean = float("0.00027914") + std = float("0.0112485") + data = None + + +class Program_weight_tensor_parameter_11: + name = "parameter_11" + shape = [96] + dtype = "float32" + min_val = float("-0.661613") + max_val = float("1.11986") + mean = float("0.208505") + std = float("0.335963") + data = None + + +class Program_weight_tensor_parameter_12: + name = "parameter_12" + shape = [96] + dtype = "float32" + min_val = float("0.773318") + max_val = float("1.56281") + mean = float("1.11195") + std = float("0.138849") + data = None + + +class Program_weight_tensor_parameter_13: + name = "parameter_13" + shape = [96] + dtype = "float32" + min_val = float("0.000158442") + max_val = float("0.00563483") + mean = float("0.00119669") + std = float("0.0010054") + data = None + + +class Program_weight_tensor_parameter_14: + name = "parameter_14" + shape = [96] + dtype = "float32" + min_val = float("-0.217371") + max_val = float("0.0873086") + mean = float("-0.0303735") + std = float("0.050933") + data = None + + +class Program_weight_tensor_parameter_15: + name = "parameter_15" + shape = [96, 96, 1, 1] + dtype = "float32" + min_val = float("-0.113703") + max_val = float("0.0885765") + mean = float("-0.00122357") + std = float("0.0140255") + data = None + + +class Program_weight_tensor_parameter_16: + name = "parameter_16" + shape = [96] + dtype = "float32" + min_val = float("-0.00655335") + max_val = float("0.00788225") + mean = float("-0.000702387") + std = float("0.00315771") + data = None + + +class Program_weight_tensor_parameter_17: + name = "parameter_17" + shape = [96, 96, 1, 1] + dtype = "float32" + min_val = float("-0.0695131") + max_val = float("0.0989064") + mean = float("-0.000394188") + std = float("0.00429104") + data = None + + +class Program_weight_tensor_parameter_18: + name = "parameter_18" + shape = [68] + dtype = "float32" + min_val = float("-0.00653538") + max_val = float("0.0248622") + mean = float("1.52999e-07") + std = float("0.00624483") + data = None + + +class Program_weight_tensor_parameter_19: + name = "parameter_19" + shape = [68, 192, 3, 3] + dtype = "float32" + min_val = float("-0.155994") + max_val = float("0.17862") + mean = float("-1.00845e-08") + std = float("0.00807346") + data = None + + +class Program_weight_tensor_parameter_20: + name = "parameter_20" + shape = [192] + dtype = "float32" + min_val = float("-0.111521") + max_val = float("0.136793") + mean = float("0.050372") + std = float("0.0428473") + data = None + + +class Program_weight_tensor_parameter_21: + name = "parameter_21" + shape = [192] + dtype = "float32" + min_val = float("0.941879") + max_val = float("1.4895") + mean = float("1.20932") + std = float("0.101229") + data = None + + +class Program_weight_tensor_parameter_22: + name = "parameter_22" + shape = [192] + dtype = "float32" + min_val = float("0.000166872") + max_val = float("0.00497783") + mean = float("0.00083062") + std = float("0.000691699") + data = None + + +class Program_weight_tensor_parameter_23: + name = "parameter_23" + shape = [192] + dtype = "float32" + min_val = float("-0.0352484") + max_val = float("0.0207761") + mean = float("-0.00483897") + std = float("0.00870871") + data = None + + +class Program_weight_tensor_parameter_24: + name = "parameter_24" + shape = [192, 192, 1, 1] + dtype = "float32" + min_val = float("-0.0620273") + max_val = float("0.101258") + mean = float("-0.000222798") + std = float("0.00681063") + data = None + + +class Program_weight_tensor_parameter_25: + name = "parameter_25" + shape = [192] + dtype = "float32" + min_val = float("-0.0102195") + max_val = float("0.0101765") + mean = float("-0.000115416") + std = float("0.00396591") + data = None + + +class Program_weight_tensor_parameter_26: + name = "parameter_26" + shape = [192, 192, 1, 1] + dtype = "float32" + min_val = float("-0.00892386") + max_val = float("0.0199459") + mean = float("-0.000135272") + std = float("0.00154918") + data = None + + +class Program_weight_tensor_parameter_27: + name = "parameter_27" + shape = [1] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_28: + name = "parameter_28" + shape = [1, 192, 3, 3] + dtype = "float32" + min_val = float("-0.0670605") + max_val = float("0.0310249") + mean = float("0.000294137") + std = float("0.00757806") + data = None + + +class Program_weight_tensor_parameter_29: + name = "parameter_29" + shape = [192] + dtype = "float32" + min_val = float("-0.290544") + max_val = float("0.608277") + mean = float("0.147622") + std = float("0.158959") + data = None + + +class Program_weight_tensor_parameter_30: + name = "parameter_30" + shape = [192] + dtype = "float32" + min_val = float("0.913214") + max_val = float("1.4959") + mean = float("1.08724") + std = float("0.0815711") + data = None + + +class Program_weight_tensor_parameter_31: + name = "parameter_31" + shape = [192] + dtype = "float32" + min_val = float("0.000173535") + max_val = float("0.00980167") + mean = float("0.00157464") + std = float("0.00163303") + data = None + + +class Program_weight_tensor_parameter_32: + name = "parameter_32" + shape = [192] + dtype = "float32" + min_val = float("-0.1722") + max_val = float("0.0293933") + mean = float("-0.0362368") + std = float("0.0320849") + data = None + + +class Program_weight_tensor_parameter_33: + name = "parameter_33" + shape = [192, 192, 1, 1] + dtype = "float32" + min_val = float("-0.0771266") + max_val = float("0.0600578") + mean = float("-0.00104893") + std = float("0.00708841") + data = None + + +class Program_weight_tensor_parameter_34: + name = "parameter_34" + shape = [192] + dtype = "float32" + min_val = float("-0.00520655") + max_val = float("0.0122276") + mean = float("-0.000186298") + std = float("0.00206584") + data = None + + +class Program_weight_tensor_parameter_35: + name = "parameter_35" + shape = [192, 192, 1, 1] + dtype = "float32" + min_val = float("-0.0221196") + max_val = float("0.0288162") + mean = float("-0.000192599") + std = float("0.00153376") + data = None + + +class Program_weight_tensor_parameter_36: + name = "parameter_36" + shape = [68] + dtype = "float32" + min_val = float("-0.00618645") + max_val = float("0.0126918") + mean = float("1.55094e-07") + std = float("0.0052247") + data = None + + +class Program_weight_tensor_parameter_37: + name = "parameter_37" + shape = [68, 384, 3, 3] + dtype = "float32" + min_val = float("-0.0898313") + max_val = float("0.115939") + mean = float("1.97397e-08") + std = float("0.00562912") + data = None + + +class Program_weight_tensor_parameter_38: + name = "parameter_38" + shape = [384] + dtype = "float32" + min_val = float("-0.0751669") + max_val = float("0.111426") + mean = float("0.0119129") + std = float("0.0354094") + data = None + + +class Program_weight_tensor_parameter_39: + name = "parameter_39" + shape = [384] + dtype = "float32" + min_val = float("0.969448") + max_val = float("1.49376") + mean = float("1.16935") + std = float("0.0775516") + data = None + + +class Program_weight_tensor_parameter_40: + name = "parameter_40" + shape = [384] + dtype = "float32" + min_val = float("9.22385e-05") + max_val = float("0.00462362") + mean = float("0.000541906") + std = float("0.000444355") + data = None + + +class Program_weight_tensor_parameter_41: + name = "parameter_41" + shape = [384] + dtype = "float32" + min_val = float("-0.0402063") + max_val = float("0.0136726") + mean = float("-0.00386345") + std = float("0.00617741") + data = None + + +class Program_weight_tensor_parameter_42: + name = "parameter_42" + shape = [384, 384, 1, 1] + dtype = "float32" + min_val = float("-0.0664712") + max_val = float("0.0691916") + mean = float("-0.000151613") + std = float("0.00377438") + data = None + + +class Program_weight_tensor_parameter_43: + name = "parameter_43" + shape = [384] + dtype = "float32" + min_val = float("-0.00514094") + max_val = float("0.00626889") + mean = float("-6.43167e-05") + std = float("0.00278132") + data = None + + +class Program_weight_tensor_parameter_44: + name = "parameter_44" + shape = [384, 384, 1, 1] + dtype = "float32" + min_val = float("-0.0303347") + max_val = float("0.0123028") + mean = float("-5.00562e-05") + std = float("0.000943714") + data = None + + +class Program_weight_tensor_parameter_45: + name = "parameter_45" + shape = [1] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_46: + name = "parameter_46" + shape = [1, 384, 3, 3] + dtype = "float32" + min_val = float("-0.0481817") + max_val = float("0.0280031") + mean = float("0.000415518") + std = float("0.0047328") + data = None + + +class Program_weight_tensor_parameter_47: + name = "parameter_47" + shape = [384] + dtype = "float32" + min_val = float("-0.369118") + max_val = float("0.494762") + mean = float("0.0350664") + std = float("0.121505") + data = None + + +class Program_weight_tensor_parameter_48: + name = "parameter_48" + shape = [384] + dtype = "float32" + min_val = float("0.883076") + max_val = float("1.55393") + mean = float("1.05789") + std = float("0.0835119") + data = None + + +class Program_weight_tensor_parameter_49: + name = "parameter_49" + shape = [384] + dtype = "float32" + min_val = float("0.000182895") + max_val = float("0.00777303") + mean = float("0.001135") + std = float("0.00109732") + data = None + + +class Program_weight_tensor_parameter_50: + name = "parameter_50" + shape = [384] + dtype = "float32" + min_val = float("-0.130947") + max_val = float("0.0318047") + mean = float("-0.0288516") + std = float("0.0238519") + data = None + + +class Program_weight_tensor_parameter_51: + name = "parameter_51" + shape = [384, 384, 1, 1] + dtype = "float32" + min_val = float("-0.0496468") + max_val = float("0.0518443") + mean = float("-0.000562622") + std = float("0.0041021") + data = None + + +class Program_weight_tensor_parameter_52: + name = "parameter_52" + shape = [384] + dtype = "float32" + min_val = float("-0.0141813") + max_val = float("0.0110761") + mean = float("-0.000147647") + std = float("0.00165558") + data = None + + +class Program_weight_tensor_parameter_53: + name = "parameter_53" + shape = [384, 384, 1, 1] + dtype = "float32" + min_val = float("-0.109289") + max_val = float("0.057067") + mean = float("-4.32392e-05") + std = float("0.00113413") + data = None diff --git a/paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_9/graph_hash.txt b/paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_9/graph_hash.txt index 82d83ca0b..315669793 100644 --- a/paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_9/graph_hash.txt +++ b/paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_9/graph_hash.txt @@ -1 +1 @@ -2e1652dec5c10dbfbb766b39e6dd1a2a376f1e03061d76cd66a79bf79d0616f3 \ No newline at end of file +772280e4e0e6df48fdaa1b3bea8b0dd5cbaf7dad6c90d940fb9d38ade5d09fa6 \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_9/input_meta.py b/paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_9/input_meta.py index 4ea29a61c..73ef358c5 100644 --- a/paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_9/input_meta.py +++ b/paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_9/input_meta.py @@ -1,19 +1,9 @@ class Program_weight_tensor_data_0: name = "data_0" - shape = [] + shape = [2, 3, 640, 640] dtype = "float32" - data = [0.241028] - - -class Program_weight_tensor_data_1: - name = "data_1" - shape = [] - dtype = "float32" - data = [1.26015] - - -class Program_weight_tensor_data_2: - name = "data_2" - shape = [] - dtype = "float32" - data = [1.07448] + min_val = float("-2.1179") + max_val = float("2.64") + mean = float("0.521346") + std = float("0.808427") + data = None diff --git a/paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_9/model.py b/paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_9/model.py index 4cccb2b8e..7a218f1b7 100644 --- a/paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_9/model.py +++ b/paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_9/model.py @@ -5,39 +5,4044 @@ class GraphModule(paddle.nn.Layer): def __init__(self): super().__init__() - def forward(self, data_0, data_1, data_2): - # pd_op.full: (1xf32) <- () + def forward( + self, + parameter_0, + parameter_1, + parameter_2, + parameter_3, + parameter_4, + parameter_5, + parameter_6, + parameter_7, + parameter_8, + parameter_9, + parameter_10, + parameter_11, + parameter_12, + parameter_13, + parameter_14, + parameter_15, + parameter_16, + parameter_17, + parameter_18, + parameter_19, + parameter_20, + parameter_21, + parameter_22, + parameter_23, + parameter_24, + parameter_25, + parameter_26, + parameter_27, + parameter_28, + parameter_29, + parameter_30, + parameter_31, + parameter_32, + parameter_33, + parameter_34, + parameter_35, + parameter_36, + parameter_37, + parameter_38, + parameter_39, + parameter_40, + parameter_41, + parameter_42, + parameter_43, + parameter_44, + parameter_45, + parameter_46, + parameter_47, + parameter_48, + parameter_49, + parameter_50, + parameter_51, + parameter_52, + parameter_53, + parameter_54, + parameter_55, + parameter_56, + parameter_57, + parameter_58, + parameter_59, + parameter_60, + parameter_61, + parameter_62, + parameter_63, + parameter_64, + parameter_65, + parameter_66, + parameter_67, + parameter_68, + parameter_69, + parameter_70, + parameter_71, + parameter_72, + parameter_73, + parameter_74, + parameter_75, + parameter_76, + parameter_77, + parameter_78, + parameter_79, + parameter_80, + parameter_81, + parameter_82, + parameter_83, + parameter_84, + parameter_85, + parameter_86, + parameter_87, + parameter_88, + parameter_89, + parameter_90, + parameter_91, + parameter_92, + parameter_93, + parameter_94, + parameter_95, + parameter_96, + parameter_97, + parameter_98, + parameter_99, + parameter_100, + parameter_101, + parameter_102, + parameter_103, + parameter_104, + parameter_105, + parameter_106, + parameter_107, + parameter_108, + parameter_109, + parameter_110, + parameter_111, + parameter_112, + parameter_113, + parameter_114, + parameter_115, + parameter_116, + parameter_117, + parameter_118, + parameter_119, + parameter_120, + parameter_121, + parameter_122, + parameter_123, + parameter_124, + parameter_125, + parameter_126, + parameter_127, + parameter_128, + parameter_129, + parameter_130, + parameter_131, + parameter_132, + parameter_133, + parameter_134, + parameter_135, + parameter_136, + parameter_137, + parameter_138, + parameter_139, + parameter_140, + parameter_141, + parameter_142, + parameter_143, + parameter_144, + parameter_145, + parameter_146, + parameter_147, + parameter_148, + parameter_149, + parameter_150, + parameter_151, + parameter_152, + parameter_153, + parameter_154, + parameter_155, + parameter_156, + parameter_157, + parameter_158, + parameter_159, + parameter_160, + parameter_161, + parameter_162, + parameter_163, + parameter_164, + parameter_165, + parameter_166, + parameter_167, + parameter_168, + parameter_169, + parameter_170, + parameter_171, + parameter_172, + parameter_173, + parameter_174, + parameter_175, + parameter_176, + parameter_177, + parameter_178, + parameter_179, + parameter_180, + parameter_181, + parameter_182, + parameter_183, + parameter_184, + parameter_185, + parameter_186, + parameter_187, + parameter_188, + parameter_189, + parameter_190, + parameter_191, + parameter_192, + parameter_193, + parameter_194, + parameter_195, + parameter_196, + parameter_197, + parameter_198, + parameter_199, + parameter_200, + parameter_201, + parameter_202, + parameter_203, + parameter_204, + parameter_205, + parameter_206, + parameter_207, + parameter_208, + parameter_209, + parameter_210, + parameter_211, + parameter_212, + parameter_213, + parameter_214, + parameter_215, + parameter_216, + parameter_217, + parameter_218, + parameter_219, + parameter_220, + parameter_221, + parameter_222, + parameter_223, + parameter_224, + parameter_225, + parameter_226, + parameter_227, + parameter_228, + parameter_229, + parameter_230, + parameter_231, + parameter_232, + parameter_233, + parameter_234, + parameter_235, + parameter_236, + parameter_237, + parameter_238, + parameter_239, + parameter_240, + parameter_241, + parameter_242, + parameter_243, + parameter_244, + parameter_245, + parameter_246, + parameter_247, + parameter_248, + parameter_249, + parameter_250, + parameter_251, + parameter_252, + parameter_253, + parameter_254, + parameter_255, + parameter_256, + parameter_257, + parameter_258, + parameter_259, + parameter_260, + parameter_261, + parameter_262, + parameter_263, + parameter_264, + parameter_265, + parameter_266, + parameter_267, + parameter_268, + parameter_269, + parameter_270, + parameter_271, + parameter_272, + parameter_273, + parameter_274, + parameter_275, + parameter_276, + parameter_277, + parameter_278, + parameter_279, + parameter_280, + parameter_281, + parameter_282, + parameter_283, + parameter_284, + parameter_285, + parameter_286, + parameter_287, + parameter_288, + parameter_289, + parameter_290, + parameter_291, + parameter_292, + parameter_293, + parameter_294, + parameter_295, + parameter_296, + parameter_297, + parameter_298, + parameter_299, + parameter_300, + parameter_301, + parameter_302, + parameter_303, + parameter_304, + parameter_305, + parameter_306, + parameter_307, + parameter_308, + parameter_309, + parameter_310, + parameter_311, + parameter_312, + parameter_313, + parameter_314, + parameter_315, + parameter_316, + parameter_317, + parameter_318, + parameter_319, + parameter_320, + parameter_321, + parameter_322, + parameter_323, + parameter_324, + parameter_325, + parameter_326, + parameter_327, + parameter_328, + parameter_329, + parameter_330, + parameter_331, + parameter_332, + parameter_333, + parameter_334, + parameter_335, + parameter_336, + parameter_337, + parameter_338, + parameter_339, + parameter_340, + parameter_341, + parameter_342, + parameter_343, + parameter_344, + parameter_345, + parameter_346, + parameter_347, + parameter_348, + parameter_349, + parameter_350, + parameter_351, + parameter_352, + parameter_353, + parameter_354, + parameter_355, + parameter_356, + parameter_357, + parameter_358, + parameter_359, + parameter_360, + parameter_361, + parameter_362, + parameter_363, + parameter_364, + parameter_365, + parameter_366, + parameter_367, + data_0, + ): + # pd_op.conv2d: (2x16x-1x-1xf32) <- (2x3x-1x-1xf32, 16x3x3x3xf32) + conv2d_0 = paddle._C_ops.conv2d( + data_0, parameter_367, [2, 2], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del data_0, parameter_367 + + # pd_op.batch_norm_: (2x16x-1x-1xf32, 16xf32, 16xf32, 16xf32, 16xf32, -1xui8) <- (2x16x-1x-1xf32, 16xf32, 16xf32, 16xf32, 16xf32) + ( + batch_norm__0, + batch_norm__1, + batch_norm__2, + batch_norm__3, + batch_norm__4, + batch_norm__5, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_0, + parameter_366, + parameter_365, + parameter_364, + parameter_363, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_363, parameter_364, parameter_365, parameter_366 + + # pd_op.swish: (2x16x-1x-1xf32) <- (2x16x-1x-1xf32) + swish_1 = paddle._C_ops.swish(batch_norm__0) + + # pd_op.conv2d: (2x16x-1x-1xf32) <- (2x16x-1x-1xf32, 16x16x3x3xf32) + conv2d_1 = paddle._C_ops.conv2d( + swish_1, parameter_362, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_362 + + # pd_op.batch_norm_: (2x16x-1x-1xf32, 16xf32, 16xf32, 16xf32, 16xf32, -1xui8) <- (2x16x-1x-1xf32, 16xf32, 16xf32, 16xf32, 16xf32) + ( + batch_norm__6, + batch_norm__7, + batch_norm__8, + batch_norm__9, + batch_norm__10, + batch_norm__11, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_1, + parameter_361, + parameter_360, + parameter_359, + parameter_358, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_358, parameter_359, parameter_360, parameter_361 + + # pd_op.swish: (2x16x-1x-1xf32) <- (2x16x-1x-1xf32) + swish_2 = paddle._C_ops.swish(batch_norm__6) + + # pd_op.conv2d: (2x32x-1x-1xf32) <- (2x16x-1x-1xf32, 32x16x3x3xf32) + conv2d_2 = paddle._C_ops.conv2d( + swish_2, parameter_357, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_357 + + # pd_op.batch_norm_: (2x32x-1x-1xf32, 32xf32, 32xf32, 32xf32, 32xf32, -1xui8) <- (2x32x-1x-1xf32, 32xf32, 32xf32, 32xf32, 32xf32) + ( + batch_norm__12, + batch_norm__13, + batch_norm__14, + batch_norm__15, + batch_norm__16, + batch_norm__17, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_2, + parameter_356, + parameter_355, + parameter_354, + parameter_353, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_353, parameter_354, parameter_355, parameter_356 + + # pd_op.swish: (2x32x-1x-1xf32) <- (2x32x-1x-1xf32) + swish_3 = paddle._C_ops.swish(batch_norm__12) + + # pd_op.conv2d: (2x48x-1x-1xf32) <- (2x32x-1x-1xf32, 48x32x3x3xf32) + conv2d_3 = paddle._C_ops.conv2d( + swish_3, parameter_352, [2, 2], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_352 + + # pd_op.batch_norm_: (2x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32, -1xui8) <- (2x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32) + ( + batch_norm__18, + batch_norm__19, + batch_norm__20, + batch_norm__21, + batch_norm__22, + batch_norm__23, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_3, + parameter_351, + parameter_350, + parameter_349, + parameter_348, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_348, parameter_349, parameter_350, parameter_351 + + # pd_op.swish: (2x48x-1x-1xf32) <- (2x48x-1x-1xf32) + swish_4 = paddle._C_ops.swish(batch_norm__18) + + # pd_op.conv2d: (2x24x-1x-1xf32) <- (2x48x-1x-1xf32, 24x48x1x1xf32) + conv2d_4 = paddle._C_ops.conv2d( + swish_4, parameter_347, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_347 + + # pd_op.batch_norm_: (2x24x-1x-1xf32, 24xf32, 24xf32, 24xf32, 24xf32, -1xui8) <- (2x24x-1x-1xf32, 24xf32, 24xf32, 24xf32, 24xf32) + ( + batch_norm__24, + batch_norm__25, + batch_norm__26, + batch_norm__27, + batch_norm__28, + batch_norm__29, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_4, + parameter_346, + parameter_345, + parameter_344, + parameter_343, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_343, parameter_344, parameter_345, parameter_346 + + # pd_op.swish: (2x24x-1x-1xf32) <- (2x24x-1x-1xf32) + swish_5 = paddle._C_ops.swish(batch_norm__24) + + # pd_op.conv2d: (2x24x-1x-1xf32) <- (2x48x-1x-1xf32, 24x48x1x1xf32) + conv2d_5 = paddle._C_ops.conv2d( + swish_4, parameter_342, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_342 + + # pd_op.batch_norm_: (2x24x-1x-1xf32, 24xf32, 24xf32, 24xf32, 24xf32, -1xui8) <- (2x24x-1x-1xf32, 24xf32, 24xf32, 24xf32, 24xf32) + ( + batch_norm__30, + batch_norm__31, + batch_norm__32, + batch_norm__33, + batch_norm__34, + batch_norm__35, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_5, + parameter_341, + parameter_340, + parameter_339, + parameter_338, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_338, parameter_339, parameter_340, parameter_341 + + # pd_op.swish: (2x24x-1x-1xf32) <- (2x24x-1x-1xf32) + swish_6 = paddle._C_ops.swish(batch_norm__30) + + # pd_op.conv2d: (2x24x-1x-1xf32) <- (2x24x-1x-1xf32, 24x24x3x3xf32) + conv2d_6 = paddle._C_ops.conv2d( + swish_6, parameter_337, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_337 + + # pd_op.batch_norm_: (2x24x-1x-1xf32, 24xf32, 24xf32, 24xf32, 24xf32, -1xui8) <- (2x24x-1x-1xf32, 24xf32, 24xf32, 24xf32, 24xf32) + ( + batch_norm__36, + batch_norm__37, + batch_norm__38, + batch_norm__39, + batch_norm__40, + batch_norm__41, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_6, + parameter_336, + parameter_335, + parameter_334, + parameter_333, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_333, parameter_334, parameter_335, parameter_336 + + # pd_op.swish: (2x24x-1x-1xf32) <- (2x24x-1x-1xf32) + swish_7 = paddle._C_ops.swish(batch_norm__36) + + # pd_op.conv2d: (2x24x-1x-1xf32) <- (2x24x-1x-1xf32, 24x24x3x3xf32) + conv2d_7 = paddle._C_ops.conv2d( + swish_7, parameter_332, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_332 + + # pd_op.batch_norm_: (2x24x-1x-1xf32, 24xf32, 24xf32, 24xf32, 24xf32, -1xui8) <- (2x24x-1x-1xf32, 24xf32, 24xf32, 24xf32, 24xf32) + ( + batch_norm__42, + batch_norm__43, + batch_norm__44, + batch_norm__45, + batch_norm__46, + batch_norm__47, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_7, + parameter_331, + parameter_330, + parameter_329, + parameter_328, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_328, parameter_329, parameter_330, parameter_331 + + # pd_op.conv2d: (2x24x-1x-1xf32) <- (2x24x-1x-1xf32, 24x24x1x1xf32) + conv2d_8 = paddle._C_ops.conv2d( + swish_7, parameter_327, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_327 + + # pd_op.batch_norm_: (2x24x-1x-1xf32, 24xf32, 24xf32, 24xf32, 24xf32, -1xui8) <- (2x24x-1x-1xf32, 24xf32, 24xf32, 24xf32, 24xf32) + ( + batch_norm__48, + batch_norm__49, + batch_norm__50, + batch_norm__51, + batch_norm__52, + batch_norm__53, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_8, + parameter_326, + parameter_325, + parameter_324, + parameter_323, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_323, parameter_324, parameter_325, parameter_326 + + # pd_op.add: (2x24x-1x-1xf32) <- (2x24x-1x-1xf32, 2x24x-1x-1xf32) + add_0 = paddle._C_ops.add(batch_norm__42, batch_norm__48) + + # pd_op.swish: (2x24x-1x-1xf32) <- (2x24x-1x-1xf32) + swish_8 = paddle._C_ops.swish(add_0) + + # pd_op.add: (2x24x-1x-1xf32) <- (2x24x-1x-1xf32, 2x24x-1x-1xf32) + add_1 = paddle._C_ops.add(swish_6, swish_8) + + # pd_op.full: (1xi32) <- () full_0 = paddle._C_ops.full( - [1], float("1"), paddle.float32, paddle.core.CPUPlace() + [1], float("1"), paddle.int32, paddle.core.CPUPlace() ) - # pd_op.scale: (xf32) <- (xf32, 1xf32) - scale_0 = paddle._C_ops.scale(data_2, full_0, float("0"), True) - del data_2 + # pd_op.assign: (1xi32) <- (1xi32) + assign_0 = full_0 + + # pd_op.assign: (1xi32) <- (1xi32) + assign_1 = full_0 + + # pd_op.assign: (1xi32) <- (1xi32) + assign_2 = full_0 + + # pd_op.assign: (1xi32) <- (1xi32) + assign_3 = full_0 + + # pd_op.assign: (1xi32) <- (1xi32) + assign_4 = full_0 + + # pd_op.assign: (1xi32) <- (1xi32) + assign_5 = full_0 + + # pd_op.assign: (1xi32) <- (1xi32) + assign_6 = full_0 + + # pd_op.assign: (1xi32) <- (1xi32) + assign_7 = full_0 + + # pd_op.assign: (1xi32) <- (1xi32) + assign_8 = full_0 + + # pd_op.assign: (1xi32) <- (1xi32) + assign_9 = full_0 + + # pd_op.assign: (1xi32) <- (1xi32) + assign_10 = full_0 + + # pd_op.assign: (1xi32) <- (1xi32) + assign_11 = full_0 - # pd_op.full: (1xf32) <- () - full_1 = paddle._C_ops.full( - [1], float("2.5"), paddle.float32, paddle.core.CPUPlace() + # pd_op.assign: (1xi32) <- (1xi32) + assign_12 = full_0 + + # builtin.combine: ([2x24x-1x-1xf32, 2x24x-1x-1xf32]) <- (2x24x-1x-1xf32, 2x24x-1x-1xf32) + combine_0 = [swish_5, add_1] + + # pd_op.concat: (2x48x-1x-1xf32) <- ([2x24x-1x-1xf32, 2x24x-1x-1xf32], 1xi32) + concat_0 = paddle._C_ops.concat(combine_0, full_0) + del combine_0 + + # pd_op.full_int_array: (2xi64) <- () + full_int_array_0 = [2, 3] + + # pd_op.assign: (2xi64) <- (2xi64) + assign_13 = full_int_array_0 + + # pd_op.assign: (2xi64) <- (2xi64) + assign_14 = full_int_array_0 + + # pd_op.assign: (2xi64) <- (2xi64) + assign_15 = full_int_array_0 + + # pd_op.mean: (2x48x1x1xf32) <- (2x48x-1x-1xf32, 2xi64) + mean_0 = paddle._C_ops.mean(concat_0, full_int_array_0, True) + + # pd_op.conv2d: (2x48x1x1xf32) <- (2x48x1x1xf32, 48x48x1x1xf32) + conv2d_9 = paddle._C_ops.conv2d( + mean_0, parameter_322, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_322 + + # pd_op.full_int_array: (4xi64) <- () + full_int_array_1 = [1, -1, 1, 1] + + # pd_op.reshape: (1x48x1x1xf32) <- (48xf32, 4xi64) + reshape_0 = paddle._C_ops.reshape(parameter_321, full_int_array_1) + del parameter_321 + + # pd_op.add: (2x48x1x1xf32) <- (2x48x1x1xf32, 1x48x1x1xf32) + add_2 = paddle._C_ops.add(conv2d_9, reshape_0) + + # pd_op.hardsigmoid: (2x48x1x1xf32) <- (2x48x1x1xf32) + hardsigmoid_0 = paddle._C_ops.hardsigmoid( + add_2, float("0.166667"), float("0.5") + ) + del add_2 + + # pd_op.multiply: (2x48x-1x-1xf32) <- (2x48x-1x-1xf32, 2x48x1x1xf32) + multiply_0 = paddle._C_ops.multiply(concat_0, hardsigmoid_0) + + # pd_op.conv2d: (2x64x-1x-1xf32) <- (2x48x-1x-1xf32, 64x48x1x1xf32) + conv2d_10 = paddle._C_ops.conv2d( + multiply_0, parameter_320, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_320 + + # pd_op.batch_norm_: (2x64x-1x-1xf32, 64xf32, 64xf32, 64xf32, 64xf32, -1xui8) <- (2x64x-1x-1xf32, 64xf32, 64xf32, 64xf32, 64xf32) + ( + batch_norm__54, + batch_norm__55, + batch_norm__56, + batch_norm__57, + batch_norm__58, + batch_norm__59, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_10, + parameter_319, + parameter_318, + parameter_317, + parameter_316, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_316, parameter_317, parameter_318, parameter_319 + + # pd_op.swish: (2x64x-1x-1xf32) <- (2x64x-1x-1xf32) + swish_9 = paddle._C_ops.swish(batch_norm__54) + + # pd_op.conv2d: (2x96x-1x-1xf32) <- (2x64x-1x-1xf32, 96x64x3x3xf32) + conv2d_11 = paddle._C_ops.conv2d( + swish_9, parameter_315, [2, 2], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_315 + + # pd_op.batch_norm_: (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__60, + batch_norm__61, + batch_norm__62, + batch_norm__63, + batch_norm__64, + batch_norm__65, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_11, + parameter_314, + parameter_313, + parameter_312, + parameter_311, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_311, parameter_312, parameter_313, parameter_314 + + # pd_op.swish: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32) + swish_10 = paddle._C_ops.swish(batch_norm__60) + + # pd_op.conv2d: (2x48x-1x-1xf32) <- (2x96x-1x-1xf32, 48x96x1x1xf32) + conv2d_12 = paddle._C_ops.conv2d( + swish_10, parameter_310, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_310 + + # pd_op.batch_norm_: (2x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32, -1xui8) <- (2x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32) + ( + batch_norm__66, + batch_norm__67, + batch_norm__68, + batch_norm__69, + batch_norm__70, + batch_norm__71, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_12, + parameter_309, + parameter_308, + parameter_307, + parameter_306, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), ) + del parameter_306, parameter_307, parameter_308, parameter_309 - # pd_op.scale: (xf32) <- (xf32, 1xf32) - scale_1 = paddle._C_ops.scale(data_0, full_1, float("0"), True) - del data_0 + # pd_op.swish: (2x48x-1x-1xf32) <- (2x48x-1x-1xf32) + swish_11 = paddle._C_ops.swish(batch_norm__66) - # pd_op.add: (xf32) <- (xf32, xf32) - add_1 = paddle._C_ops.add(scale_0, scale_1) + # pd_op.conv2d: (2x48x-1x-1xf32) <- (2x96x-1x-1xf32, 48x96x1x1xf32) + conv2d_13 = paddle._C_ops.conv2d( + swish_10, parameter_305, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_305 + + # pd_op.batch_norm_: (2x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32, -1xui8) <- (2x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32) + ( + batch_norm__72, + batch_norm__73, + batch_norm__74, + batch_norm__75, + batch_norm__76, + batch_norm__77, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_13, + parameter_304, + parameter_303, + parameter_302, + parameter_301, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_301, parameter_302, parameter_303, parameter_304 + + # pd_op.swish: (2x48x-1x-1xf32) <- (2x48x-1x-1xf32) + swish_12 = paddle._C_ops.swish(batch_norm__72) + + # pd_op.conv2d: (2x48x-1x-1xf32) <- (2x48x-1x-1xf32, 48x48x3x3xf32) + conv2d_14 = paddle._C_ops.conv2d( + swish_12, parameter_300, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_300 + + # pd_op.batch_norm_: (2x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32, -1xui8) <- (2x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32) + ( + batch_norm__78, + batch_norm__79, + batch_norm__80, + batch_norm__81, + batch_norm__82, + batch_norm__83, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_14, + parameter_299, + parameter_298, + parameter_297, + parameter_296, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_296, parameter_297, parameter_298, parameter_299 + + # pd_op.swish: (2x48x-1x-1xf32) <- (2x48x-1x-1xf32) + swish_13 = paddle._C_ops.swish(batch_norm__78) + + # pd_op.conv2d: (2x48x-1x-1xf32) <- (2x48x-1x-1xf32, 48x48x3x3xf32) + conv2d_15 = paddle._C_ops.conv2d( + swish_13, parameter_295, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_295 + + # pd_op.batch_norm_: (2x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32, -1xui8) <- (2x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32) + ( + batch_norm__84, + batch_norm__85, + batch_norm__86, + batch_norm__87, + batch_norm__88, + batch_norm__89, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_15, + parameter_294, + parameter_293, + parameter_292, + parameter_291, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_291, parameter_292, parameter_293, parameter_294 + + # pd_op.conv2d: (2x48x-1x-1xf32) <- (2x48x-1x-1xf32, 48x48x1x1xf32) + conv2d_16 = paddle._C_ops.conv2d( + swish_13, parameter_290, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_290 + + # pd_op.batch_norm_: (2x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32, -1xui8) <- (2x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32) + ( + batch_norm__90, + batch_norm__91, + batch_norm__92, + batch_norm__93, + batch_norm__94, + batch_norm__95, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_16, + parameter_289, + parameter_288, + parameter_287, + parameter_286, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_286, parameter_287, parameter_288, parameter_289 + + # pd_op.add: (2x48x-1x-1xf32) <- (2x48x-1x-1xf32, 2x48x-1x-1xf32) + add_3 = paddle._C_ops.add(batch_norm__84, batch_norm__90) + + # pd_op.swish: (2x48x-1x-1xf32) <- (2x48x-1x-1xf32) + swish_14 = paddle._C_ops.swish(add_3) + + # pd_op.add: (2x48x-1x-1xf32) <- (2x48x-1x-1xf32, 2x48x-1x-1xf32) + add_4 = paddle._C_ops.add(swish_12, swish_14) + + # pd_op.conv2d: (2x48x-1x-1xf32) <- (2x48x-1x-1xf32, 48x48x3x3xf32) + conv2d_17 = paddle._C_ops.conv2d( + add_4, parameter_285, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_285 + + # pd_op.batch_norm_: (2x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32, -1xui8) <- (2x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32) + ( + batch_norm__96, + batch_norm__97, + batch_norm__98, + batch_norm__99, + batch_norm__100, + batch_norm__101, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_17, + parameter_284, + parameter_283, + parameter_282, + parameter_281, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_281, parameter_282, parameter_283, parameter_284 + + # pd_op.swish: (2x48x-1x-1xf32) <- (2x48x-1x-1xf32) + swish_15 = paddle._C_ops.swish(batch_norm__96) - # pd_op.full: (1xf32) <- () - full_2 = paddle._C_ops.full( - [1], float("0.5"), paddle.float32, paddle.core.CPUPlace() + # pd_op.conv2d: (2x48x-1x-1xf32) <- (2x48x-1x-1xf32, 48x48x3x3xf32) + conv2d_18 = paddle._C_ops.conv2d( + swish_15, parameter_280, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" ) + del parameter_280 + + # pd_op.batch_norm_: (2x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32, -1xui8) <- (2x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32) + ( + batch_norm__102, + batch_norm__103, + batch_norm__104, + batch_norm__105, + batch_norm__106, + batch_norm__107, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_18, + parameter_279, + parameter_278, + parameter_277, + parameter_276, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_276, parameter_277, parameter_278, parameter_279 + + # pd_op.conv2d: (2x48x-1x-1xf32) <- (2x48x-1x-1xf32, 48x48x1x1xf32) + conv2d_19 = paddle._C_ops.conv2d( + swish_15, parameter_275, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_275 + + # pd_op.batch_norm_: (2x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32, -1xui8) <- (2x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32) + ( + batch_norm__108, + batch_norm__109, + batch_norm__110, + batch_norm__111, + batch_norm__112, + batch_norm__113, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_19, + parameter_274, + parameter_273, + parameter_272, + parameter_271, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_271, parameter_272, parameter_273, parameter_274 + + # pd_op.add: (2x48x-1x-1xf32) <- (2x48x-1x-1xf32, 2x48x-1x-1xf32) + add_5 = paddle._C_ops.add(batch_norm__102, batch_norm__108) + + # pd_op.swish: (2x48x-1x-1xf32) <- (2x48x-1x-1xf32) + swish_16 = paddle._C_ops.swish(add_5) + + # pd_op.add: (2x48x-1x-1xf32) <- (2x48x-1x-1xf32, 2x48x-1x-1xf32) + add_6 = paddle._C_ops.add(add_4, swish_16) + + # builtin.combine: ([2x48x-1x-1xf32, 2x48x-1x-1xf32]) <- (2x48x-1x-1xf32, 2x48x-1x-1xf32) + combine_1 = [swish_11, add_6] + + # pd_op.concat: (2x96x-1x-1xf32) <- ([2x48x-1x-1xf32, 2x48x-1x-1xf32], 1xi32) + concat_1 = paddle._C_ops.concat(combine_1, full_0) + del combine_1 + + # pd_op.mean: (2x96x1x1xf32) <- (2x96x-1x-1xf32, 2xi64) + mean_1 = paddle._C_ops.mean(concat_1, full_int_array_0, True) + + # pd_op.conv2d: (2x96x1x1xf32) <- (2x96x1x1xf32, 96x96x1x1xf32) + conv2d_20 = paddle._C_ops.conv2d( + mean_1, parameter_270, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_270 + + # pd_op.reshape: (1x96x1x1xf32) <- (96xf32, 4xi64) + reshape_1 = paddle._C_ops.reshape(parameter_269, full_int_array_1) + del parameter_269 + + # pd_op.add: (2x96x1x1xf32) <- (2x96x1x1xf32, 1x96x1x1xf32) + add_7 = paddle._C_ops.add(conv2d_20, reshape_1) + + # pd_op.hardsigmoid: (2x96x1x1xf32) <- (2x96x1x1xf32) + hardsigmoid_1 = paddle._C_ops.hardsigmoid( + add_7, float("0.166667"), float("0.5") + ) + del add_7 + + # pd_op.multiply: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, 2x96x1x1xf32) + multiply_1 = paddle._C_ops.multiply(concat_1, hardsigmoid_1) + + # pd_op.conv2d: (2x128x-1x-1xf32) <- (2x96x-1x-1xf32, 128x96x1x1xf32) + conv2d_21 = paddle._C_ops.conv2d( + multiply_1, parameter_268, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_268 + + # pd_op.batch_norm_: (2x128x-1x-1xf32, 128xf32, 128xf32, 128xf32, 128xf32, -1xui8) <- (2x128x-1x-1xf32, 128xf32, 128xf32, 128xf32, 128xf32) + ( + batch_norm__114, + batch_norm__115, + batch_norm__116, + batch_norm__117, + batch_norm__118, + batch_norm__119, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_21, + parameter_267, + parameter_266, + parameter_265, + parameter_264, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_264, parameter_265, parameter_266, parameter_267 + + # pd_op.swish: (2x128x-1x-1xf32) <- (2x128x-1x-1xf32) + swish_17 = paddle._C_ops.swish(batch_norm__114) + + # pd_op.conv2d: (2x192x-1x-1xf32) <- (2x128x-1x-1xf32, 192x128x3x3xf32) + conv2d_22 = paddle._C_ops.conv2d( + swish_17, parameter_263, [2, 2], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_263 + + # pd_op.batch_norm_: (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__120, + batch_norm__121, + batch_norm__122, + batch_norm__123, + batch_norm__124, + batch_norm__125, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_22, + parameter_262, + parameter_261, + parameter_260, + parameter_259, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_259, parameter_260, parameter_261, parameter_262 + + # pd_op.swish: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32) + swish_18 = paddle._C_ops.swish(batch_norm__120) + + # pd_op.conv2d: (2x96x-1x-1xf32) <- (2x192x-1x-1xf32, 96x192x1x1xf32) + conv2d_23 = paddle._C_ops.conv2d( + swish_18, parameter_258, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_258 + + # pd_op.batch_norm_: (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__126, + batch_norm__127, + batch_norm__128, + batch_norm__129, + batch_norm__130, + batch_norm__131, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_23, + parameter_257, + parameter_256, + parameter_255, + parameter_254, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_254, parameter_255, parameter_256, parameter_257 + + # pd_op.swish: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32) + swish_19 = paddle._C_ops.swish(batch_norm__126) + + # pd_op.conv2d: (2x96x-1x-1xf32) <- (2x192x-1x-1xf32, 96x192x1x1xf32) + conv2d_24 = paddle._C_ops.conv2d( + swish_18, parameter_253, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_253 + + # pd_op.batch_norm_: (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__132, + batch_norm__133, + batch_norm__134, + batch_norm__135, + batch_norm__136, + batch_norm__137, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_24, + parameter_252, + parameter_251, + parameter_250, + parameter_249, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_249, parameter_250, parameter_251, parameter_252 + + # pd_op.swish: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32) + swish_20 = paddle._C_ops.swish(batch_norm__132) + + # pd_op.conv2d: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, 96x96x3x3xf32) + conv2d_25 = paddle._C_ops.conv2d( + swish_20, parameter_248, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_248 + + # pd_op.batch_norm_: (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__138, + batch_norm__139, + batch_norm__140, + batch_norm__141, + batch_norm__142, + batch_norm__143, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_25, + parameter_247, + parameter_246, + parameter_245, + parameter_244, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_244, parameter_245, parameter_246, parameter_247 + + # pd_op.swish: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32) + swish_21 = paddle._C_ops.swish(batch_norm__138) + + # pd_op.conv2d: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, 96x96x3x3xf32) + conv2d_26 = paddle._C_ops.conv2d( + swish_21, parameter_243, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_243 + + # pd_op.batch_norm_: (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__144, + batch_norm__145, + batch_norm__146, + batch_norm__147, + batch_norm__148, + batch_norm__149, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_26, + parameter_242, + parameter_241, + parameter_240, + parameter_239, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_239, parameter_240, parameter_241, parameter_242 + + # pd_op.conv2d: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, 96x96x1x1xf32) + conv2d_27 = paddle._C_ops.conv2d( + swish_21, parameter_238, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_238 + + # pd_op.batch_norm_: (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__150, + batch_norm__151, + batch_norm__152, + batch_norm__153, + batch_norm__154, + batch_norm__155, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_27, + parameter_237, + parameter_236, + parameter_235, + parameter_234, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_234, parameter_235, parameter_236, parameter_237 + + # pd_op.add: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, 2x96x-1x-1xf32) + add_8 = paddle._C_ops.add(batch_norm__144, batch_norm__150) + + # pd_op.swish: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32) + swish_22 = paddle._C_ops.swish(add_8) + + # pd_op.add: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, 2x96x-1x-1xf32) + add_9 = paddle._C_ops.add(swish_20, swish_22) + + # pd_op.conv2d: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, 96x96x3x3xf32) + conv2d_28 = paddle._C_ops.conv2d( + add_9, parameter_233, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_233 + + # pd_op.batch_norm_: (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__156, + batch_norm__157, + batch_norm__158, + batch_norm__159, + batch_norm__160, + batch_norm__161, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_28, + parameter_232, + parameter_231, + parameter_230, + parameter_229, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_229, parameter_230, parameter_231, parameter_232 + + # pd_op.swish: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32) + swish_23 = paddle._C_ops.swish(batch_norm__156) + + # pd_op.conv2d: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, 96x96x3x3xf32) + conv2d_29 = paddle._C_ops.conv2d( + swish_23, parameter_228, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_228 + + # pd_op.batch_norm_: (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__162, + batch_norm__163, + batch_norm__164, + batch_norm__165, + batch_norm__166, + batch_norm__167, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_29, + parameter_227, + parameter_226, + parameter_225, + parameter_224, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_224, parameter_225, parameter_226, parameter_227 + + # pd_op.conv2d: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, 96x96x1x1xf32) + conv2d_30 = paddle._C_ops.conv2d( + swish_23, parameter_223, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_223 + + # pd_op.batch_norm_: (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__168, + batch_norm__169, + batch_norm__170, + batch_norm__171, + batch_norm__172, + batch_norm__173, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_30, + parameter_222, + parameter_221, + parameter_220, + parameter_219, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_219, parameter_220, parameter_221, parameter_222 + + # pd_op.add: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, 2x96x-1x-1xf32) + add_10 = paddle._C_ops.add(batch_norm__162, batch_norm__168) + + # pd_op.swish: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32) + swish_24 = paddle._C_ops.swish(add_10) + + # pd_op.add: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, 2x96x-1x-1xf32) + add_11 = paddle._C_ops.add(add_9, swish_24) + + # builtin.combine: ([2x96x-1x-1xf32, 2x96x-1x-1xf32]) <- (2x96x-1x-1xf32, 2x96x-1x-1xf32) + combine_2 = [swish_19, add_11] + + # pd_op.concat: (2x192x-1x-1xf32) <- ([2x96x-1x-1xf32, 2x96x-1x-1xf32], 1xi32) + concat_2 = paddle._C_ops.concat(combine_2, full_0) + del combine_2 + + # pd_op.mean: (2x192x1x1xf32) <- (2x192x-1x-1xf32, 2xi64) + mean_2 = paddle._C_ops.mean(concat_2, full_int_array_0, True) + + # pd_op.conv2d: (2x192x1x1xf32) <- (2x192x1x1xf32, 192x192x1x1xf32) + conv2d_31 = paddle._C_ops.conv2d( + mean_2, parameter_218, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_218 + + # pd_op.reshape: (1x192x1x1xf32) <- (192xf32, 4xi64) + reshape_2 = paddle._C_ops.reshape(parameter_217, full_int_array_1) + del parameter_217 + + # pd_op.add: (2x192x1x1xf32) <- (2x192x1x1xf32, 1x192x1x1xf32) + add_12 = paddle._C_ops.add(conv2d_31, reshape_2) + + # pd_op.hardsigmoid: (2x192x1x1xf32) <- (2x192x1x1xf32) + hardsigmoid_2 = paddle._C_ops.hardsigmoid( + add_12, float("0.166667"), float("0.5") + ) + del add_12 + + # pd_op.multiply: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 2x192x1x1xf32) + multiply_2 = paddle._C_ops.multiply(concat_2, hardsigmoid_2) + + # pd_op.conv2d: (2x256x-1x-1xf32) <- (2x192x-1x-1xf32, 256x192x1x1xf32) + conv2d_32 = paddle._C_ops.conv2d( + multiply_2, parameter_216, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_216 + + # pd_op.batch_norm_: (2x256x-1x-1xf32, 256xf32, 256xf32, 256xf32, 256xf32, -1xui8) <- (2x256x-1x-1xf32, 256xf32, 256xf32, 256xf32, 256xf32) + ( + batch_norm__174, + batch_norm__175, + batch_norm__176, + batch_norm__177, + batch_norm__178, + batch_norm__179, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_32, + parameter_215, + parameter_214, + parameter_213, + parameter_212, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_212, parameter_213, parameter_214, parameter_215 + + # pd_op.swish: (2x256x-1x-1xf32) <- (2x256x-1x-1xf32) + swish_25 = paddle._C_ops.swish(batch_norm__174) + + # pd_op.conv2d: (2x384x-1x-1xf32) <- (2x256x-1x-1xf32, 384x256x3x3xf32) + conv2d_33 = paddle._C_ops.conv2d( + swish_25, parameter_211, [2, 2], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_211 + + # pd_op.batch_norm_: (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__180, + batch_norm__181, + batch_norm__182, + batch_norm__183, + batch_norm__184, + batch_norm__185, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_33, + parameter_210, + parameter_209, + parameter_208, + parameter_207, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_207, parameter_208, parameter_209, parameter_210 + + # pd_op.swish: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32) + swish_26 = paddle._C_ops.swish(batch_norm__180) + + # pd_op.conv2d: (2x192x-1x-1xf32) <- (2x384x-1x-1xf32, 192x384x1x1xf32) + conv2d_34 = paddle._C_ops.conv2d( + swish_26, parameter_206, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_206 + + # pd_op.batch_norm_: (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__186, + batch_norm__187, + batch_norm__188, + batch_norm__189, + batch_norm__190, + batch_norm__191, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_34, + parameter_205, + parameter_204, + parameter_203, + parameter_202, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_202, parameter_203, parameter_204, parameter_205 + + # pd_op.swish: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32) + swish_27 = paddle._C_ops.swish(batch_norm__186) + + # pd_op.conv2d: (2x192x-1x-1xf32) <- (2x384x-1x-1xf32, 192x384x1x1xf32) + conv2d_35 = paddle._C_ops.conv2d( + swish_26, parameter_201, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_201 + + # pd_op.batch_norm_: (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__192, + batch_norm__193, + batch_norm__194, + batch_norm__195, + batch_norm__196, + batch_norm__197, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_35, + parameter_200, + parameter_199, + parameter_198, + parameter_197, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_197, parameter_198, parameter_199, parameter_200 + + # pd_op.swish: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32) + swish_28 = paddle._C_ops.swish(batch_norm__192) + + # pd_op.conv2d: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 192x192x3x3xf32) + conv2d_36 = paddle._C_ops.conv2d( + swish_28, parameter_196, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_196 + + # pd_op.batch_norm_: (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__198, + batch_norm__199, + batch_norm__200, + batch_norm__201, + batch_norm__202, + batch_norm__203, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_36, + parameter_195, + parameter_194, + parameter_193, + parameter_192, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_192, parameter_193, parameter_194, parameter_195 + + # pd_op.swish: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32) + swish_29 = paddle._C_ops.swish(batch_norm__198) + + # pd_op.conv2d: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 192x192x3x3xf32) + conv2d_37 = paddle._C_ops.conv2d( + swish_29, parameter_191, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_191 + + # pd_op.batch_norm_: (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__204, + batch_norm__205, + batch_norm__206, + batch_norm__207, + batch_norm__208, + batch_norm__209, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_37, + parameter_190, + parameter_189, + parameter_188, + parameter_187, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_187, parameter_188, parameter_189, parameter_190 + + # pd_op.conv2d: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 192x192x1x1xf32) + conv2d_38 = paddle._C_ops.conv2d( + swish_29, parameter_186, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_186 + + # pd_op.batch_norm_: (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__210, + batch_norm__211, + batch_norm__212, + batch_norm__213, + batch_norm__214, + batch_norm__215, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_38, + parameter_185, + parameter_184, + parameter_183, + parameter_182, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_182, parameter_183, parameter_184, parameter_185 + + # pd_op.add: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 2x192x-1x-1xf32) + add_13 = paddle._C_ops.add(batch_norm__204, batch_norm__210) - # pd_op.scale: (xf32) <- (xf32, 1xf32) - scale_2 = paddle._C_ops.scale(data_1, full_2, float("0"), True) - del data_1 + # pd_op.swish: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32) + swish_30 = paddle._C_ops.swish(add_13) - # pd_op.add: (xf32) <- (xf32, xf32) - add_0 = paddle._C_ops.add(add_1, scale_2) - del add_1, full_0, full_1, full_2, scale_0, scale_1, scale_2 + # pd_op.add: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 2x192x-1x-1xf32) + add_14 = paddle._C_ops.add(swish_28, swish_30) + + # builtin.combine: ([2x192x-1x-1xf32, 2x192x-1x-1xf32]) <- (2x192x-1x-1xf32, 2x192x-1x-1xf32) + combine_3 = [swish_27, add_14] + + # pd_op.concat: (2x384x-1x-1xf32) <- ([2x192x-1x-1xf32, 2x192x-1x-1xf32], 1xi32) + concat_3 = paddle._C_ops.concat(combine_3, full_0) + del combine_3 + + # pd_op.mean: (2x384x1x1xf32) <- (2x384x-1x-1xf32, 2xi64) + mean_3 = paddle._C_ops.mean(concat_3, full_int_array_0, True) + + # pd_op.conv2d: (2x384x1x1xf32) <- (2x384x1x1xf32, 384x384x1x1xf32) + conv2d_39 = paddle._C_ops.conv2d( + mean_3, parameter_181, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_181 + + # pd_op.reshape: (1x384x1x1xf32) <- (384xf32, 4xi64) + reshape_3 = paddle._C_ops.reshape(parameter_180, full_int_array_1) + del full_int_array_1, parameter_180 + + # pd_op.add: (2x384x1x1xf32) <- (2x384x1x1xf32, 1x384x1x1xf32) + add_15 = paddle._C_ops.add(conv2d_39, reshape_3) + + # pd_op.hardsigmoid: (2x384x1x1xf32) <- (2x384x1x1xf32) + hardsigmoid_3 = paddle._C_ops.hardsigmoid( + add_15, float("0.166667"), float("0.5") + ) + del add_15 + + # pd_op.multiply: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32, 2x384x1x1xf32) + multiply_3 = paddle._C_ops.multiply(concat_3, hardsigmoid_3) + + # pd_op.conv2d: (2x512x-1x-1xf32) <- (2x384x-1x-1xf32, 512x384x1x1xf32) + conv2d_40 = paddle._C_ops.conv2d( + multiply_3, parameter_179, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_179 + + # pd_op.batch_norm_: (2x512x-1x-1xf32, 512xf32, 512xf32, 512xf32, 512xf32, -1xui8) <- (2x512x-1x-1xf32, 512xf32, 512xf32, 512xf32, 512xf32) + ( + batch_norm__216, + batch_norm__217, + batch_norm__218, + batch_norm__219, + batch_norm__220, + batch_norm__221, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_40, + parameter_178, + parameter_177, + parameter_176, + parameter_175, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_175, parameter_176, parameter_177, parameter_178 + + # pd_op.swish: (2x512x-1x-1xf32) <- (2x512x-1x-1xf32) + swish_31 = paddle._C_ops.swish(batch_norm__216) + + # pd_op.conv2d: (2x192x-1x-1xf32) <- (2x512x-1x-1xf32, 192x512x1x1xf32) + conv2d_41 = paddle._C_ops.conv2d( + swish_31, parameter_174, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_174 + + # pd_op.batch_norm_: (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__222, + batch_norm__223, + batch_norm__224, + batch_norm__225, + batch_norm__226, + batch_norm__227, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_41, + parameter_173, + parameter_172, + parameter_171, + parameter_170, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_170, parameter_171, parameter_172, parameter_173 + + # pd_op.swish: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32) + swish_32 = paddle._C_ops.swish(batch_norm__222) + + # pd_op.conv2d: (2x192x-1x-1xf32) <- (2x512x-1x-1xf32, 192x512x1x1xf32) + conv2d_42 = paddle._C_ops.conv2d( + swish_31, parameter_169, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_169 + + # pd_op.batch_norm_: (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__228, + batch_norm__229, + batch_norm__230, + batch_norm__231, + batch_norm__232, + batch_norm__233, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_42, + parameter_168, + parameter_167, + parameter_166, + parameter_165, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_165, parameter_166, parameter_167, parameter_168 + + # pd_op.swish: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32) + swish_33 = paddle._C_ops.swish(batch_norm__228) + + # pd_op.conv2d: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 192x192x3x3xf32) + conv2d_43 = paddle._C_ops.conv2d( + swish_33, parameter_164, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_164 + + # pd_op.batch_norm_: (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__234, + batch_norm__235, + batch_norm__236, + batch_norm__237, + batch_norm__238, + batch_norm__239, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_43, + parameter_163, + parameter_162, + parameter_161, + parameter_160, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_160, parameter_161, parameter_162, parameter_163 + + # pd_op.swish: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32) + swish_34 = paddle._C_ops.swish(batch_norm__234) + + # pd_op.conv2d: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 192x192x3x3xf32) + conv2d_44 = paddle._C_ops.conv2d( + swish_34, parameter_159, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_159 + + # pd_op.batch_norm_: (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__240, + batch_norm__241, + batch_norm__242, + batch_norm__243, + batch_norm__244, + batch_norm__245, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_44, + parameter_158, + parameter_157, + parameter_156, + parameter_155, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_155, parameter_156, parameter_157, parameter_158 + + # pd_op.conv2d: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 192x192x1x1xf32) + conv2d_45 = paddle._C_ops.conv2d( + swish_34, parameter_154, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_154 + + # pd_op.batch_norm_: (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__246, + batch_norm__247, + batch_norm__248, + batch_norm__249, + batch_norm__250, + batch_norm__251, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_45, + parameter_153, + parameter_152, + parameter_151, + parameter_150, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_150, parameter_151, parameter_152, parameter_153 + + # pd_op.add: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 2x192x-1x-1xf32) + add_16 = paddle._C_ops.add(batch_norm__240, batch_norm__246) + + # pd_op.swish: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32) + swish_35 = paddle._C_ops.swish(add_16) + + # pd_op.full_int_array: (2xi64) <- () + full_int_array_2 = [5, 5] + + # pd_op.pool2d: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 2xi64) + pool2d_0 = paddle._C_ops.pool2d( + swish_35, + full_int_array_2, + [1, 1], + [2, 2], + False, + True, + "NCHW", + "max", + False, + False, + "EXPLICIT", + ) + + # pd_op.full_int_array: (2xi64) <- () + full_int_array_3 = [9, 9] + + # pd_op.pool2d: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 2xi64) + pool2d_1 = paddle._C_ops.pool2d( + swish_35, + full_int_array_3, + [1, 1], + [4, 4], + False, + True, + "NCHW", + "max", + False, + False, + "EXPLICIT", + ) + + # pd_op.full_int_array: (2xi64) <- () + full_int_array_4 = [13, 13] + + # pd_op.pool2d: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 2xi64) + pool2d_2 = paddle._C_ops.pool2d( + swish_35, + full_int_array_4, + [1, 1], + [6, 6], + False, + True, + "NCHW", + "max", + False, + False, + "EXPLICIT", + ) + + # builtin.combine: ([2x192x-1x-1xf32, 2x192x-1x-1xf32, 2x192x-1x-1xf32, 2x192x-1x-1xf32]) <- (2x192x-1x-1xf32, 2x192x-1x-1xf32, 2x192x-1x-1xf32, 2x192x-1x-1xf32) + combine_4 = [swish_35, pool2d_0, pool2d_1, pool2d_2] + + # pd_op.concat: (2x768x-1x-1xf32) <- ([2x192x-1x-1xf32, 2x192x-1x-1xf32, 2x192x-1x-1xf32, 2x192x-1x-1xf32], 1xi32) + concat_4 = paddle._C_ops.concat(combine_4, full_0) + del combine_4 + + # pd_op.conv2d: (2x192x-1x-1xf32) <- (2x768x-1x-1xf32, 192x768x1x1xf32) + conv2d_46 = paddle._C_ops.conv2d( + concat_4, parameter_149, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_149 + + # pd_op.batch_norm_: (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__252, + batch_norm__253, + batch_norm__254, + batch_norm__255, + batch_norm__256, + batch_norm__257, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_46, + parameter_148, + parameter_147, + parameter_146, + parameter_145, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_145, parameter_146, parameter_147, parameter_148 + + # pd_op.swish: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32) + swish_36 = paddle._C_ops.swish(batch_norm__252) + + # builtin.combine: ([2x192x-1x-1xf32, 2x192x-1x-1xf32]) <- (2x192x-1x-1xf32, 2x192x-1x-1xf32) + combine_5 = [swish_32, swish_36] + + # pd_op.concat: (2x384x-1x-1xf32) <- ([2x192x-1x-1xf32, 2x192x-1x-1xf32], 1xi32) + concat_5 = paddle._C_ops.concat(combine_5, full_0) + del combine_5 + + # pd_op.conv2d: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32, 384x384x1x1xf32) + conv2d_47 = paddle._C_ops.conv2d( + concat_5, parameter_144, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_144 + + # pd_op.batch_norm_: (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__258, + batch_norm__259, + batch_norm__260, + batch_norm__261, + batch_norm__262, + batch_norm__263, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_47, + parameter_143, + parameter_142, + parameter_141, + parameter_140, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_140, parameter_141, parameter_142, parameter_143 + + # pd_op.swish: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32) + swish_37 = paddle._C_ops.swish(batch_norm__258) + + # pd_op.conv2d: (2x192x-1x-1xf32) <- (2x384x-1x-1xf32, 192x384x1x1xf32) + conv2d_48 = paddle._C_ops.conv2d( + swish_37, parameter_139, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_139 + + # pd_op.batch_norm_: (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__264, + batch_norm__265, + batch_norm__266, + batch_norm__267, + batch_norm__268, + batch_norm__269, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_48, + parameter_138, + parameter_137, + parameter_136, + parameter_135, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_135, parameter_136, parameter_137, parameter_138 + + # pd_op.swish: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32) + swish_38 = paddle._C_ops.swish(batch_norm__264) + + # pd_op.nearest_interp: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, None, None, None) + nearest_interp_0 = paddle._C_ops.nearest_interp( + swish_38, + None, + None, + None, + "NCHW", + -1, + -1, + -1, + [float("2"), float("2")], + "nearest", + False, + 0, + ) + + # builtin.combine: ([2x192x-1x-1xf32, 2x256x-1x-1xf32]) <- (2x192x-1x-1xf32, 2x256x-1x-1xf32) + combine_6 = [nearest_interp_0, swish_25] + + # pd_op.concat: (2x448x-1x-1xf32) <- ([2x192x-1x-1xf32, 2x256x-1x-1xf32], 1xi32) + concat_6 = paddle._C_ops.concat(combine_6, full_0) + del combine_6 + + # pd_op.conv2d: (2x96x-1x-1xf32) <- (2x448x-1x-1xf32, 96x448x1x1xf32) + conv2d_49 = paddle._C_ops.conv2d( + concat_6, parameter_134, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_134 + + # pd_op.batch_norm_: (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__270, + batch_norm__271, + batch_norm__272, + batch_norm__273, + batch_norm__274, + batch_norm__275, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_49, + parameter_133, + parameter_132, + parameter_131, + parameter_130, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_130, parameter_131, parameter_132, parameter_133 + + # pd_op.swish: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32) + swish_39 = paddle._C_ops.swish(batch_norm__270) + + # pd_op.conv2d: (2x96x-1x-1xf32) <- (2x448x-1x-1xf32, 96x448x1x1xf32) + conv2d_50 = paddle._C_ops.conv2d( + concat_6, parameter_129, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_129 + + # pd_op.batch_norm_: (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__276, + batch_norm__277, + batch_norm__278, + batch_norm__279, + batch_norm__280, + batch_norm__281, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_50, + parameter_128, + parameter_127, + parameter_126, + parameter_125, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_125, parameter_126, parameter_127, parameter_128 + + # pd_op.swish: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32) + swish_40 = paddle._C_ops.swish(batch_norm__276) + + # pd_op.conv2d: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, 96x96x3x3xf32) + conv2d_51 = paddle._C_ops.conv2d( + swish_40, parameter_124, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_124 + + # pd_op.batch_norm_: (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__282, + batch_norm__283, + batch_norm__284, + batch_norm__285, + batch_norm__286, + batch_norm__287, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_51, + parameter_123, + parameter_122, + parameter_121, + parameter_120, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_120, parameter_121, parameter_122, parameter_123 + + # pd_op.swish: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32) + swish_41 = paddle._C_ops.swish(batch_norm__282) + + # pd_op.conv2d: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, 96x96x3x3xf32) + conv2d_52 = paddle._C_ops.conv2d( + swish_41, parameter_119, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_119 + + # pd_op.batch_norm_: (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__288, + batch_norm__289, + batch_norm__290, + batch_norm__291, + batch_norm__292, + batch_norm__293, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_52, + parameter_118, + parameter_117, + parameter_116, + parameter_115, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_115, parameter_116, parameter_117, parameter_118 + + # pd_op.conv2d: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, 96x96x1x1xf32) + conv2d_53 = paddle._C_ops.conv2d( + swish_41, parameter_114, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_114 + + # pd_op.batch_norm_: (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__294, + batch_norm__295, + batch_norm__296, + batch_norm__297, + batch_norm__298, + batch_norm__299, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_53, + parameter_113, + parameter_112, + parameter_111, + parameter_110, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_110, parameter_111, parameter_112, parameter_113 + + # pd_op.add: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, 2x96x-1x-1xf32) + add_17 = paddle._C_ops.add(batch_norm__288, batch_norm__294) + + # pd_op.swish: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32) + swish_42 = paddle._C_ops.swish(add_17) + + # builtin.combine: ([2x96x-1x-1xf32, 2x96x-1x-1xf32]) <- (2x96x-1x-1xf32, 2x96x-1x-1xf32) + combine_7 = [swish_39, swish_42] + + # pd_op.concat: (2x192x-1x-1xf32) <- ([2x96x-1x-1xf32, 2x96x-1x-1xf32], 1xi32) + concat_7 = paddle._C_ops.concat(combine_7, full_0) + del combine_7 + + # pd_op.conv2d: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 192x192x1x1xf32) + conv2d_54 = paddle._C_ops.conv2d( + concat_7, parameter_109, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_109 + + # pd_op.batch_norm_: (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__300, + batch_norm__301, + batch_norm__302, + batch_norm__303, + batch_norm__304, + batch_norm__305, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_54, + parameter_108, + parameter_107, + parameter_106, + parameter_105, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_105, parameter_106, parameter_107, parameter_108 + + # pd_op.swish: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32) + swish_43 = paddle._C_ops.swish(batch_norm__300) + + # pd_op.conv2d: (2x96x-1x-1xf32) <- (2x192x-1x-1xf32, 96x192x1x1xf32) + conv2d_55 = paddle._C_ops.conv2d( + swish_43, parameter_104, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_104 + + # pd_op.batch_norm_: (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__306, + batch_norm__307, + batch_norm__308, + batch_norm__309, + batch_norm__310, + batch_norm__311, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_55, + parameter_103, + parameter_102, + parameter_101, + parameter_100, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_100, parameter_101, parameter_102, parameter_103 + + # pd_op.swish: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32) + swish_44 = paddle._C_ops.swish(batch_norm__306) + + # pd_op.nearest_interp: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, None, None, None) + nearest_interp_1 = paddle._C_ops.nearest_interp( + swish_44, + None, + None, + None, + "NCHW", + -1, + -1, + -1, + [float("2"), float("2")], + "nearest", + False, + 0, + ) + + # builtin.combine: ([2x96x-1x-1xf32, 2x128x-1x-1xf32]) <- (2x96x-1x-1xf32, 2x128x-1x-1xf32) + combine_8 = [nearest_interp_1, swish_17] + + # pd_op.concat: (2x224x-1x-1xf32) <- ([2x96x-1x-1xf32, 2x128x-1x-1xf32], 1xi32) + concat_8 = paddle._C_ops.concat(combine_8, full_0) + del combine_8 + + # pd_op.conv2d: (2x48x-1x-1xf32) <- (2x224x-1x-1xf32, 48x224x1x1xf32) + conv2d_56 = paddle._C_ops.conv2d( + concat_8, parameter_99, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_99 + + # pd_op.batch_norm_: (2x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32, -1xui8) <- (2x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32) + ( + batch_norm__312, + batch_norm__313, + batch_norm__314, + batch_norm__315, + batch_norm__316, + batch_norm__317, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_56, + parameter_98, + parameter_97, + parameter_96, + parameter_95, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_95, parameter_96, parameter_97, parameter_98 + + # pd_op.swish: (2x48x-1x-1xf32) <- (2x48x-1x-1xf32) + swish_45 = paddle._C_ops.swish(batch_norm__312) + + # pd_op.conv2d: (2x48x-1x-1xf32) <- (2x224x-1x-1xf32, 48x224x1x1xf32) + conv2d_57 = paddle._C_ops.conv2d( + concat_8, parameter_94, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_94 + + # pd_op.batch_norm_: (2x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32, -1xui8) <- (2x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32) + ( + batch_norm__318, + batch_norm__319, + batch_norm__320, + batch_norm__321, + batch_norm__322, + batch_norm__323, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_57, + parameter_93, + parameter_92, + parameter_91, + parameter_90, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_90, parameter_91, parameter_92, parameter_93 + + # pd_op.swish: (2x48x-1x-1xf32) <- (2x48x-1x-1xf32) + swish_46 = paddle._C_ops.swish(batch_norm__318) + + # pd_op.conv2d: (2x48x-1x-1xf32) <- (2x48x-1x-1xf32, 48x48x3x3xf32) + conv2d_58 = paddle._C_ops.conv2d( + swish_46, parameter_89, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_89 + + # pd_op.batch_norm_: (2x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32, -1xui8) <- (2x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32) + ( + batch_norm__324, + batch_norm__325, + batch_norm__326, + batch_norm__327, + batch_norm__328, + batch_norm__329, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_58, + parameter_88, + parameter_87, + parameter_86, + parameter_85, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_85, parameter_86, parameter_87, parameter_88 + + # pd_op.swish: (2x48x-1x-1xf32) <- (2x48x-1x-1xf32) + swish_47 = paddle._C_ops.swish(batch_norm__324) + + # pd_op.conv2d: (2x48x-1x-1xf32) <- (2x48x-1x-1xf32, 48x48x3x3xf32) + conv2d_59 = paddle._C_ops.conv2d( + swish_47, parameter_84, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_84 + + # pd_op.batch_norm_: (2x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32, -1xui8) <- (2x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32) + ( + batch_norm__330, + batch_norm__331, + batch_norm__332, + batch_norm__333, + batch_norm__334, + batch_norm__335, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_59, + parameter_83, + parameter_82, + parameter_81, + parameter_80, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_80, parameter_81, parameter_82, parameter_83 + + # pd_op.conv2d: (2x48x-1x-1xf32) <- (2x48x-1x-1xf32, 48x48x1x1xf32) + conv2d_60 = paddle._C_ops.conv2d( + swish_47, parameter_79, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_79 + + # pd_op.batch_norm_: (2x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32, -1xui8) <- (2x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32) + ( + batch_norm__336, + batch_norm__337, + batch_norm__338, + batch_norm__339, + batch_norm__340, + batch_norm__341, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_60, + parameter_78, + parameter_77, + parameter_76, + parameter_75, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_75, parameter_76, parameter_77, parameter_78 + + # pd_op.add: (2x48x-1x-1xf32) <- (2x48x-1x-1xf32, 2x48x-1x-1xf32) + add_18 = paddle._C_ops.add(batch_norm__330, batch_norm__336) + + # pd_op.swish: (2x48x-1x-1xf32) <- (2x48x-1x-1xf32) + swish_48 = paddle._C_ops.swish(add_18) + + # builtin.combine: ([2x48x-1x-1xf32, 2x48x-1x-1xf32]) <- (2x48x-1x-1xf32, 2x48x-1x-1xf32) + combine_9 = [swish_45, swish_48] + + # pd_op.concat: (2x96x-1x-1xf32) <- ([2x48x-1x-1xf32, 2x48x-1x-1xf32], 1xi32) + concat_9 = paddle._C_ops.concat(combine_9, full_0) + del combine_9 + + # pd_op.conv2d: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, 96x96x1x1xf32) + conv2d_61 = paddle._C_ops.conv2d( + concat_9, parameter_74, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_74 + + # pd_op.batch_norm_: (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__342, + batch_norm__343, + batch_norm__344, + batch_norm__345, + batch_norm__346, + batch_norm__347, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_61, + parameter_73, + parameter_72, + parameter_71, + parameter_70, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_70, parameter_71, parameter_72, parameter_73 + + # pd_op.swish: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32) + swish_49 = paddle._C_ops.swish(batch_norm__342) + + # pd_op.conv2d: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, 96x96x3x3xf32) + conv2d_62 = paddle._C_ops.conv2d( + swish_49, parameter_69, [2, 2], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_69 + + # pd_op.batch_norm_: (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__348, + batch_norm__349, + batch_norm__350, + batch_norm__351, + batch_norm__352, + batch_norm__353, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_62, + parameter_68, + parameter_67, + parameter_66, + parameter_65, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_65, parameter_66, parameter_67, parameter_68 + + # pd_op.swish: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32) + swish_50 = paddle._C_ops.swish(batch_norm__348) + + # builtin.combine: ([2x96x-1x-1xf32, 2x192x-1x-1xf32]) <- (2x96x-1x-1xf32, 2x192x-1x-1xf32) + combine_10 = [swish_50, swish_43] + + # pd_op.concat: (2x288x-1x-1xf32) <- ([2x96x-1x-1xf32, 2x192x-1x-1xf32], 1xi32) + concat_10 = paddle._C_ops.concat(combine_10, full_0) + del combine_10 + + # pd_op.conv2d: (2x96x-1x-1xf32) <- (2x288x-1x-1xf32, 96x288x1x1xf32) + conv2d_63 = paddle._C_ops.conv2d( + concat_10, parameter_64, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_64 + + # pd_op.batch_norm_: (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__354, + batch_norm__355, + batch_norm__356, + batch_norm__357, + batch_norm__358, + batch_norm__359, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_63, + parameter_63, + parameter_62, + parameter_61, + parameter_60, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_60, parameter_61, parameter_62, parameter_63 + + # pd_op.swish: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32) + swish_51 = paddle._C_ops.swish(batch_norm__354) + + # pd_op.conv2d: (2x96x-1x-1xf32) <- (2x288x-1x-1xf32, 96x288x1x1xf32) + conv2d_64 = paddle._C_ops.conv2d( + concat_10, parameter_59, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_59 + + # pd_op.batch_norm_: (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__360, + batch_norm__361, + batch_norm__362, + batch_norm__363, + batch_norm__364, + batch_norm__365, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_64, + parameter_58, + parameter_57, + parameter_56, + parameter_55, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_55, parameter_56, parameter_57, parameter_58 + + # pd_op.swish: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32) + swish_52 = paddle._C_ops.swish(batch_norm__360) + + # pd_op.conv2d: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, 96x96x3x3xf32) + conv2d_65 = paddle._C_ops.conv2d( + swish_52, parameter_54, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_54 + + # pd_op.batch_norm_: (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__366, + batch_norm__367, + batch_norm__368, + batch_norm__369, + batch_norm__370, + batch_norm__371, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_65, + parameter_53, + parameter_52, + parameter_51, + parameter_50, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_50, parameter_51, parameter_52, parameter_53 + + # pd_op.swish: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32) + swish_53 = paddle._C_ops.swish(batch_norm__366) + + # pd_op.conv2d: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, 96x96x3x3xf32) + conv2d_66 = paddle._C_ops.conv2d( + swish_53, parameter_49, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_49 + + # pd_op.batch_norm_: (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__372, + batch_norm__373, + batch_norm__374, + batch_norm__375, + batch_norm__376, + batch_norm__377, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_66, + parameter_48, + parameter_47, + parameter_46, + parameter_45, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_45, parameter_46, parameter_47, parameter_48 + + # pd_op.conv2d: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, 96x96x1x1xf32) + conv2d_67 = paddle._C_ops.conv2d( + swish_53, parameter_44, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_44 + + # pd_op.batch_norm_: (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__378, + batch_norm__379, + batch_norm__380, + batch_norm__381, + batch_norm__382, + batch_norm__383, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_67, + parameter_43, + parameter_42, + parameter_41, + parameter_40, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_40, parameter_41, parameter_42, parameter_43 + + # pd_op.add: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, 2x96x-1x-1xf32) + add_19 = paddle._C_ops.add(batch_norm__372, batch_norm__378) + + # pd_op.swish: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32) + swish_54 = paddle._C_ops.swish(add_19) + + # builtin.combine: ([2x96x-1x-1xf32, 2x96x-1x-1xf32]) <- (2x96x-1x-1xf32, 2x96x-1x-1xf32) + combine_11 = [swish_51, swish_54] + + # pd_op.concat: (2x192x-1x-1xf32) <- ([2x96x-1x-1xf32, 2x96x-1x-1xf32], 1xi32) + concat_11 = paddle._C_ops.concat(combine_11, full_0) + del combine_11 + + # pd_op.conv2d: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 192x192x1x1xf32) + conv2d_68 = paddle._C_ops.conv2d( + concat_11, parameter_39, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_39 + + # pd_op.batch_norm_: (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__384, + batch_norm__385, + batch_norm__386, + batch_norm__387, + batch_norm__388, + batch_norm__389, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_68, + parameter_38, + parameter_37, + parameter_36, + parameter_35, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_35, parameter_36, parameter_37, parameter_38 + + # pd_op.swish: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32) + swish_55 = paddle._C_ops.swish(batch_norm__384) + + # pd_op.conv2d: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 192x192x3x3xf32) + conv2d_69 = paddle._C_ops.conv2d( + swish_55, parameter_34, [2, 2], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_34 + + # pd_op.batch_norm_: (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__390, + batch_norm__391, + batch_norm__392, + batch_norm__393, + batch_norm__394, + batch_norm__395, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_69, + parameter_33, + parameter_32, + parameter_31, + parameter_30, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_30, parameter_31, parameter_32, parameter_33 + + # pd_op.swish: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32) + swish_56 = paddle._C_ops.swish(batch_norm__390) + + # builtin.combine: ([2x192x-1x-1xf32, 2x384x-1x-1xf32]) <- (2x192x-1x-1xf32, 2x384x-1x-1xf32) + combine_12 = [swish_56, swish_37] + + # pd_op.concat: (2x576x-1x-1xf32) <- ([2x192x-1x-1xf32, 2x384x-1x-1xf32], 1xi32) + concat_12 = paddle._C_ops.concat(combine_12, full_0) + del combine_12 + + # pd_op.conv2d: (2x192x-1x-1xf32) <- (2x576x-1x-1xf32, 192x576x1x1xf32) + conv2d_70 = paddle._C_ops.conv2d( + concat_12, parameter_29, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_29 + + # pd_op.batch_norm_: (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__396, + batch_norm__397, + batch_norm__398, + batch_norm__399, + batch_norm__400, + batch_norm__401, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_70, + parameter_28, + parameter_27, + parameter_26, + parameter_25, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_25, parameter_26, parameter_27, parameter_28 + + # pd_op.swish: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32) + swish_57 = paddle._C_ops.swish(batch_norm__396) + + # pd_op.conv2d: (2x192x-1x-1xf32) <- (2x576x-1x-1xf32, 192x576x1x1xf32) + conv2d_71 = paddle._C_ops.conv2d( + concat_12, parameter_24, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_24 + + # pd_op.batch_norm_: (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__402, + batch_norm__403, + batch_norm__404, + batch_norm__405, + batch_norm__406, + batch_norm__407, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_71, + parameter_23, + parameter_22, + parameter_21, + parameter_20, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_20, parameter_21, parameter_22, parameter_23 + + # pd_op.swish: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32) + swish_58 = paddle._C_ops.swish(batch_norm__402) + + # pd_op.conv2d: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 192x192x3x3xf32) + conv2d_72 = paddle._C_ops.conv2d( + swish_58, parameter_19, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_19 + + # pd_op.batch_norm_: (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__408, + batch_norm__409, + batch_norm__410, + batch_norm__411, + batch_norm__412, + batch_norm__413, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_72, + parameter_18, + parameter_17, + parameter_16, + parameter_15, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_15, parameter_16, parameter_17, parameter_18 + + # pd_op.swish: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32) + swish_59 = paddle._C_ops.swish(batch_norm__408) + + # pd_op.conv2d: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 192x192x3x3xf32) + conv2d_73 = paddle._C_ops.conv2d( + swish_59, parameter_14, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_14 + + # pd_op.batch_norm_: (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__414, + batch_norm__415, + batch_norm__416, + batch_norm__417, + batch_norm__418, + batch_norm__419, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_73, + parameter_13, + parameter_12, + parameter_11, + parameter_10, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_10, parameter_11, parameter_12, parameter_13 + + # pd_op.conv2d: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 192x192x1x1xf32) + conv2d_74 = paddle._C_ops.conv2d( + swish_59, parameter_9, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_9 + + # pd_op.batch_norm_: (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__420, + batch_norm__421, + batch_norm__422, + batch_norm__423, + batch_norm__424, + batch_norm__425, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_74, + parameter_8, + parameter_7, + parameter_6, + parameter_5, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_5, parameter_6, parameter_7, parameter_8 + + # pd_op.add: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 2x192x-1x-1xf32) + add_20 = paddle._C_ops.add(batch_norm__414, batch_norm__420) + + # pd_op.swish: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32) + swish_60 = paddle._C_ops.swish(add_20) + + # builtin.combine: ([2x192x-1x-1xf32, 2x192x-1x-1xf32]) <- (2x192x-1x-1xf32, 2x192x-1x-1xf32) + combine_13 = [swish_57, swish_60] + + # pd_op.concat: (2x384x-1x-1xf32) <- ([2x192x-1x-1xf32, 2x192x-1x-1xf32], 1xi32) + concat_13 = paddle._C_ops.concat(combine_13, full_0) + del combine_13 + + # pd_op.conv2d: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32, 384x384x1x1xf32) + conv2d_75 = paddle._C_ops.conv2d( + concat_13, parameter_4, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_4 + + # pd_op.batch_norm_: (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__426, + batch_norm__427, + batch_norm__428, + batch_norm__429, + batch_norm__430, + batch_norm__431, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_75, + parameter_3, + parameter_2, + parameter_1, + parameter_0, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_0, parameter_1, parameter_2, parameter_3 + + # pd_op.swish: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32) + swish_0 = paddle._C_ops.swish(batch_norm__426) + del ( + add_0, + add_1, + add_10, + add_11, + add_13, + add_14, + add_16, + add_17, + add_18, + add_19, + add_20, + add_3, + add_4, + add_5, + add_6, + add_8, + add_9, + assign_0, + assign_1, + assign_10, + assign_11, + assign_12, + assign_13, + assign_14, + assign_15, + assign_2, + assign_3, + assign_4, + assign_5, + assign_6, + assign_7, + assign_8, + assign_9, + batch_norm__0, + batch_norm__1, + batch_norm__10, + batch_norm__100, + batch_norm__101, + batch_norm__102, + batch_norm__103, + batch_norm__104, + batch_norm__105, + batch_norm__106, + batch_norm__107, + batch_norm__108, + batch_norm__109, + batch_norm__11, + batch_norm__110, + batch_norm__111, + batch_norm__112, + batch_norm__113, + batch_norm__114, + batch_norm__115, + batch_norm__116, + batch_norm__117, + batch_norm__118, + batch_norm__119, + batch_norm__12, + batch_norm__120, + batch_norm__121, + batch_norm__122, + batch_norm__123, + batch_norm__124, + batch_norm__125, + batch_norm__126, + batch_norm__127, + batch_norm__128, + batch_norm__129, + batch_norm__13, + batch_norm__130, + batch_norm__131, + batch_norm__132, + batch_norm__133, + batch_norm__134, + batch_norm__135, + batch_norm__136, + batch_norm__137, + batch_norm__138, + batch_norm__139, + batch_norm__14, + batch_norm__140, + batch_norm__141, + batch_norm__142, + batch_norm__143, + batch_norm__144, + batch_norm__145, + batch_norm__146, + batch_norm__147, + batch_norm__148, + batch_norm__149, + batch_norm__15, + batch_norm__150, + batch_norm__151, + batch_norm__152, + batch_norm__153, + batch_norm__154, + batch_norm__155, + batch_norm__156, + batch_norm__157, + batch_norm__158, + batch_norm__159, + batch_norm__16, + batch_norm__160, + batch_norm__161, + batch_norm__162, + batch_norm__163, + batch_norm__164, + batch_norm__165, + batch_norm__166, + batch_norm__167, + batch_norm__168, + batch_norm__169, + batch_norm__17, + batch_norm__170, + batch_norm__171, + batch_norm__172, + batch_norm__173, + batch_norm__174, + batch_norm__175, + batch_norm__176, + batch_norm__177, + batch_norm__178, + batch_norm__179, + batch_norm__18, + batch_norm__180, + batch_norm__181, + batch_norm__182, + batch_norm__183, + batch_norm__184, + batch_norm__185, + batch_norm__186, + batch_norm__187, + batch_norm__188, + batch_norm__189, + batch_norm__19, + batch_norm__190, + batch_norm__191, + batch_norm__192, + batch_norm__193, + batch_norm__194, + batch_norm__195, + batch_norm__196, + batch_norm__197, + batch_norm__198, + batch_norm__199, + batch_norm__2, + batch_norm__20, + batch_norm__200, + batch_norm__201, + batch_norm__202, + batch_norm__203, + batch_norm__204, + batch_norm__205, + batch_norm__206, + batch_norm__207, + batch_norm__208, + batch_norm__209, + batch_norm__21, + batch_norm__210, + batch_norm__211, + batch_norm__212, + batch_norm__213, + batch_norm__214, + batch_norm__215, + batch_norm__216, + batch_norm__217, + batch_norm__218, + batch_norm__219, + batch_norm__22, + batch_norm__220, + batch_norm__221, + batch_norm__222, + batch_norm__223, + batch_norm__224, + batch_norm__225, + batch_norm__226, + batch_norm__227, + batch_norm__228, + batch_norm__229, + batch_norm__23, + batch_norm__230, + batch_norm__231, + batch_norm__232, + batch_norm__233, + batch_norm__234, + batch_norm__235, + batch_norm__236, + batch_norm__237, + batch_norm__238, + batch_norm__239, + batch_norm__24, + batch_norm__240, + batch_norm__241, + batch_norm__242, + batch_norm__243, + batch_norm__244, + batch_norm__245, + batch_norm__246, + batch_norm__247, + batch_norm__248, + batch_norm__249, + batch_norm__25, + batch_norm__250, + batch_norm__251, + batch_norm__252, + batch_norm__253, + batch_norm__254, + batch_norm__255, + batch_norm__256, + batch_norm__257, + batch_norm__258, + batch_norm__259, + batch_norm__26, + batch_norm__260, + batch_norm__261, + batch_norm__262, + batch_norm__263, + batch_norm__264, + batch_norm__265, + batch_norm__266, + batch_norm__267, + batch_norm__268, + batch_norm__269, + batch_norm__27, + batch_norm__270, + batch_norm__271, + batch_norm__272, + batch_norm__273, + batch_norm__274, + batch_norm__275, + batch_norm__276, + batch_norm__277, + batch_norm__278, + batch_norm__279, + batch_norm__28, + batch_norm__280, + batch_norm__281, + batch_norm__282, + batch_norm__283, + batch_norm__284, + batch_norm__285, + batch_norm__286, + batch_norm__287, + batch_norm__288, + batch_norm__289, + batch_norm__29, + batch_norm__290, + batch_norm__291, + batch_norm__292, + batch_norm__293, + batch_norm__294, + batch_norm__295, + batch_norm__296, + batch_norm__297, + batch_norm__298, + batch_norm__299, + batch_norm__3, + batch_norm__30, + batch_norm__300, + batch_norm__301, + batch_norm__302, + batch_norm__303, + batch_norm__304, + batch_norm__305, + batch_norm__306, + batch_norm__307, + batch_norm__308, + batch_norm__309, + batch_norm__31, + batch_norm__310, + batch_norm__311, + batch_norm__312, + batch_norm__313, + batch_norm__314, + batch_norm__315, + batch_norm__316, + batch_norm__317, + batch_norm__318, + batch_norm__319, + batch_norm__32, + batch_norm__320, + batch_norm__321, + batch_norm__322, + batch_norm__323, + batch_norm__324, + batch_norm__325, + batch_norm__326, + batch_norm__327, + batch_norm__328, + batch_norm__329, + batch_norm__33, + batch_norm__330, + batch_norm__331, + batch_norm__332, + batch_norm__333, + batch_norm__334, + batch_norm__335, + batch_norm__336, + batch_norm__337, + batch_norm__338, + batch_norm__339, + batch_norm__34, + batch_norm__340, + batch_norm__341, + batch_norm__342, + batch_norm__343, + batch_norm__344, + batch_norm__345, + batch_norm__346, + batch_norm__347, + batch_norm__348, + batch_norm__349, + batch_norm__35, + batch_norm__350, + batch_norm__351, + batch_norm__352, + batch_norm__353, + batch_norm__354, + batch_norm__355, + batch_norm__356, + batch_norm__357, + batch_norm__358, + batch_norm__359, + batch_norm__36, + batch_norm__360, + batch_norm__361, + batch_norm__362, + batch_norm__363, + batch_norm__364, + batch_norm__365, + batch_norm__366, + batch_norm__367, + batch_norm__368, + batch_norm__369, + batch_norm__37, + batch_norm__370, + batch_norm__371, + batch_norm__372, + batch_norm__373, + batch_norm__374, + batch_norm__375, + batch_norm__376, + batch_norm__377, + batch_norm__378, + batch_norm__379, + batch_norm__38, + batch_norm__380, + batch_norm__381, + batch_norm__382, + batch_norm__383, + batch_norm__384, + batch_norm__385, + batch_norm__386, + batch_norm__387, + batch_norm__388, + batch_norm__389, + batch_norm__39, + batch_norm__390, + batch_norm__391, + batch_norm__392, + batch_norm__393, + batch_norm__394, + batch_norm__395, + batch_norm__396, + batch_norm__397, + batch_norm__398, + batch_norm__399, + batch_norm__4, + batch_norm__40, + batch_norm__400, + batch_norm__401, + batch_norm__402, + batch_norm__403, + batch_norm__404, + batch_norm__405, + batch_norm__406, + batch_norm__407, + batch_norm__408, + batch_norm__409, + batch_norm__41, + batch_norm__410, + batch_norm__411, + batch_norm__412, + batch_norm__413, + batch_norm__414, + batch_norm__415, + batch_norm__416, + batch_norm__417, + batch_norm__418, + batch_norm__419, + batch_norm__42, + batch_norm__420, + batch_norm__421, + batch_norm__422, + batch_norm__423, + batch_norm__424, + batch_norm__425, + batch_norm__426, + batch_norm__427, + batch_norm__428, + batch_norm__429, + batch_norm__43, + batch_norm__430, + batch_norm__431, + batch_norm__44, + batch_norm__45, + batch_norm__46, + batch_norm__47, + batch_norm__48, + batch_norm__49, + batch_norm__5, + batch_norm__50, + batch_norm__51, + batch_norm__52, + batch_norm__53, + batch_norm__54, + batch_norm__55, + batch_norm__56, + batch_norm__57, + batch_norm__58, + batch_norm__59, + batch_norm__6, + batch_norm__60, + batch_norm__61, + batch_norm__62, + batch_norm__63, + batch_norm__64, + batch_norm__65, + batch_norm__66, + batch_norm__67, + batch_norm__68, + batch_norm__69, + batch_norm__7, + batch_norm__70, + batch_norm__71, + batch_norm__72, + batch_norm__73, + batch_norm__74, + batch_norm__75, + batch_norm__76, + batch_norm__77, + batch_norm__78, + batch_norm__79, + batch_norm__8, + batch_norm__80, + batch_norm__81, + batch_norm__82, + batch_norm__83, + batch_norm__84, + batch_norm__85, + batch_norm__86, + batch_norm__87, + batch_norm__88, + batch_norm__89, + batch_norm__9, + batch_norm__90, + batch_norm__91, + batch_norm__92, + batch_norm__93, + batch_norm__94, + batch_norm__95, + batch_norm__96, + batch_norm__97, + batch_norm__98, + batch_norm__99, + concat_0, + concat_1, + concat_10, + concat_11, + concat_12, + concat_13, + concat_2, + concat_3, + concat_4, + concat_5, + concat_6, + concat_7, + concat_8, + concat_9, + conv2d_0, + conv2d_1, + conv2d_10, + conv2d_11, + conv2d_12, + conv2d_13, + conv2d_14, + conv2d_15, + conv2d_16, + conv2d_17, + conv2d_18, + conv2d_19, + conv2d_2, + conv2d_20, + conv2d_21, + conv2d_22, + conv2d_23, + conv2d_24, + conv2d_25, + conv2d_26, + conv2d_27, + conv2d_28, + conv2d_29, + conv2d_3, + conv2d_30, + conv2d_31, + conv2d_32, + conv2d_33, + conv2d_34, + conv2d_35, + conv2d_36, + conv2d_37, + conv2d_38, + conv2d_39, + conv2d_4, + conv2d_40, + conv2d_41, + conv2d_42, + conv2d_43, + conv2d_44, + conv2d_45, + conv2d_46, + conv2d_47, + conv2d_48, + conv2d_49, + conv2d_5, + conv2d_50, + conv2d_51, + conv2d_52, + conv2d_53, + conv2d_54, + conv2d_55, + conv2d_56, + conv2d_57, + conv2d_58, + conv2d_59, + conv2d_6, + conv2d_60, + conv2d_61, + conv2d_62, + conv2d_63, + conv2d_64, + conv2d_65, + conv2d_66, + conv2d_67, + conv2d_68, + conv2d_69, + conv2d_7, + conv2d_70, + conv2d_71, + conv2d_72, + conv2d_73, + conv2d_74, + conv2d_75, + conv2d_8, + conv2d_9, + full_0, + full_int_array_0, + full_int_array_2, + full_int_array_3, + full_int_array_4, + hardsigmoid_0, + hardsigmoid_1, + hardsigmoid_2, + hardsigmoid_3, + mean_0, + mean_1, + mean_2, + mean_3, + multiply_0, + multiply_1, + multiply_2, + multiply_3, + nearest_interp_0, + nearest_interp_1, + pool2d_0, + pool2d_1, + pool2d_2, + reshape_0, + reshape_1, + reshape_2, + reshape_3, + swish_1, + swish_10, + swish_11, + swish_12, + swish_13, + swish_14, + swish_15, + swish_16, + swish_17, + swish_18, + swish_19, + swish_2, + swish_20, + swish_21, + swish_22, + swish_23, + swish_24, + swish_25, + swish_26, + swish_27, + swish_28, + swish_29, + swish_3, + swish_30, + swish_31, + swish_32, + swish_33, + swish_34, + swish_35, + swish_36, + swish_37, + swish_38, + swish_39, + swish_4, + swish_40, + swish_41, + swish_42, + swish_43, + swish_44, + swish_45, + swish_46, + swish_47, + swish_48, + swish_49, + swish_5, + swish_50, + swish_51, + swish_52, + swish_53, + swish_54, + swish_55, + swish_56, + swish_57, + swish_58, + swish_59, + swish_6, + swish_60, + swish_7, + swish_8, + swish_9, + ) - return add_0 + return swish_0 diff --git a/paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_9/shape_patches_PP-YOLOE-S_vehicle/input_meta.py b/paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_9/shape_patches_PP-YOLOE-S_vehicle/input_meta.py new file mode 100644 index 000000000..903e9a326 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_9/shape_patches_PP-YOLOE-S_vehicle/input_meta.py @@ -0,0 +1,9 @@ +class Program_weight_tensor_data_0: + name = "data_0" + shape = [2, 3, 608, 608] + dtype = "float32" + min_val = float("-1.85379") + max_val = float("2.55285") + mean = float("0.163596") + std = float("0.495929") + data = None diff --git a/paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_13/weight_meta.py b/paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_9/shape_patches_PP-YOLOE-S_vehicle/weight_meta.py similarity index 60% rename from paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_13/weight_meta.py rename to paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_9/shape_patches_PP-YOLOE-S_vehicle/weight_meta.py index d58909a7b..df50680d5 100644 --- a/paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_13/weight_meta.py +++ b/paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_9/shape_patches_PP-YOLOE-S_vehicle/weight_meta.py @@ -2,10 +2,10 @@ class Program_weight_tensor_parameter_0: name = "parameter_0" shape = [384] dtype = "float32" - min_val = float("-0.65221") - max_val = float("1.18847") - mean = float("0.0280678") - std = float("0.238602") + min_val = float("-0.652218") + max_val = float("1.18857") + mean = float("0.0280274") + std = float("0.238618") data = None @@ -13,10 +13,10 @@ class Program_weight_tensor_parameter_1: name = "parameter_1" shape = [384] dtype = "float32" - min_val = float("0.840777") - max_val = float("1.38166") - mean = float("0.983157") - std = float("0.0682519") + min_val = float("0.840697") + max_val = float("1.38324") + mean = float("0.98305") + std = float("0.0683125") data = None @@ -24,10 +24,10 @@ class Program_weight_tensor_parameter_2: name = "parameter_2" shape = [384] dtype = "float32" - min_val = float("0.00367937") - max_val = float("0.0877201") - mean = float("0.0123265") - std = float("0.0078903") + min_val = float("0.0026422") + max_val = float("0.0571294") + mean = float("0.00840019") + std = float("0.00480438") data = None @@ -35,10 +35,10 @@ class Program_weight_tensor_parameter_3: name = "parameter_3" shape = [384] dtype = "float32" - min_val = float("-0.194012") - max_val = float("0.0551918") - mean = float("-0.02945") - std = float("0.036829") + min_val = float("-0.153452") + max_val = float("0.0567554") + mean = float("-0.0268421") + std = float("0.029361") data = None @@ -46,10 +46,10 @@ class Program_weight_tensor_parameter_4: name = "parameter_4" shape = [384, 384, 1, 1] dtype = "float32" - min_val = float("-0.100636") - max_val = float("0.0644305") - mean = float("-0.000367326") - std = float("0.00632618") + min_val = float("-0.0814399") + max_val = float("0.0603684") + mean = float("-0.00033216") + std = float("0.00506095") data = None @@ -57,10 +57,10 @@ class Program_weight_tensor_parameter_5: name = "parameter_5" shape = [192] dtype = "float32" - min_val = float("-0.444539") - max_val = float("0.100308") - mean = float("-0.0845765") - std = float("0.104435") + min_val = float("-0.44561") + max_val = float("0.100505") + mean = float("-0.084569") + std = float("0.10453") data = None @@ -68,10 +68,10 @@ class Program_weight_tensor_parameter_6: name = "parameter_6" shape = [192] dtype = "float32" - min_val = float("0.827634") - max_val = float("1.20887") - mean = float("0.926304") - std = float("0.0461583") + min_val = float("0.827482") + max_val = float("1.2095") + mean = float("0.926275") + std = float("0.0461945") data = None @@ -79,10 +79,10 @@ class Program_weight_tensor_parameter_7: name = "parameter_7" shape = [192] dtype = "float32" - min_val = float("0.0022587") - max_val = float("0.0317087") - mean = float("0.0127237") - std = float("0.00671595") + min_val = float("0.00190456") + max_val = float("0.0205137") + mean = float("0.00863049") + std = float("0.00430541") data = None @@ -90,10 +90,10 @@ class Program_weight_tensor_parameter_8: name = "parameter_8" shape = [192] dtype = "float32" - min_val = float("-0.0420164") - max_val = float("0.0489711") - mean = float("7.80708e-05") - std = float("0.023324") + min_val = float("-0.0318185") + max_val = float("0.0379224") + mean = float("-0.000110429") + std = float("0.0186888") data = None @@ -101,10 +101,10 @@ class Program_weight_tensor_parameter_9: name = "parameter_9" shape = [192, 192, 1, 1] dtype = "float32" - min_val = float("-0.0494705") - max_val = float("0.0588015") - mean = float("-0.000304397") - std = float("0.00403892") + min_val = float("-0.0393104") + max_val = float("0.0483612") + mean = float("-0.000255694") + std = float("0.00325694") data = None @@ -112,10 +112,10 @@ class Program_weight_tensor_parameter_10: name = "parameter_10" shape = [192] dtype = "float32" - min_val = float("-0.444539") - max_val = float("0.100308") - mean = float("-0.0845765") - std = float("0.104435") + min_val = float("-0.44561") + max_val = float("0.100505") + mean = float("-0.084569") + std = float("0.10453") data = None @@ -123,10 +123,10 @@ class Program_weight_tensor_parameter_11: name = "parameter_11" shape = [192] dtype = "float32" - min_val = float("0.861989") - max_val = float("1.42115") - mean = float("1.11192") - std = float("0.0818402") + min_val = float("0.860758") + max_val = float("1.4208") + mean = float("1.11187") + std = float("0.0818408") data = None @@ -134,10 +134,10 @@ class Program_weight_tensor_parameter_12: name = "parameter_12" shape = [192] dtype = "float32" - min_val = float("0.00625506") - max_val = float("0.0604123") - mean = float("0.0170478") - std = float("0.00762762") + min_val = float("0.00352246") + max_val = float("0.035092") + mean = float("0.0105274") + std = float("0.00437158") data = None @@ -145,10 +145,10 @@ class Program_weight_tensor_parameter_13: name = "parameter_13" shape = [192] dtype = "float32" - min_val = float("-0.114496") - max_val = float("0.0773463") - mean = float("-0.0180438") - std = float("0.0309436") + min_val = float("-0.0797058") + max_val = float("0.0680474") + mean = float("-0.0157311") + std = float("0.0255872") data = None @@ -156,10 +156,10 @@ class Program_weight_tensor_parameter_14: name = "parameter_14" shape = [192, 192, 3, 3] dtype = "float32" - min_val = float("-0.0719425") - max_val = float("0.0798922") - mean = float("-0.000137273") - std = float("0.0037425") + min_val = float("-0.0573713") + max_val = float("0.0648518") + mean = float("-0.000118352") + std = float("0.00301846") data = None @@ -167,10 +167,10 @@ class Program_weight_tensor_parameter_15: name = "parameter_15" shape = [192] dtype = "float32" - min_val = float("-0.519307") - max_val = float("0.119213") - mean = float("-0.173655") - std = float("0.128078") + min_val = float("-0.519542") + max_val = float("0.119264") + mean = float("-0.17366") + std = float("0.128125") data = None @@ -178,10 +178,10 @@ class Program_weight_tensor_parameter_16: name = "parameter_16" shape = [192] dtype = "float32" - min_val = float("0.843345") - max_val = float("1.65101") - mean = float("1.06412") - std = float("0.100931") + min_val = float("0.843388") + max_val = float("1.65187") + mean = float("1.06405") + std = float("0.100937") data = None @@ -189,10 +189,10 @@ class Program_weight_tensor_parameter_17: name = "parameter_17" shape = [192] dtype = "float32" - min_val = float("0.0129167") - max_val = float("0.102406") - mean = float("0.0387628") - std = float("0.0164023") + min_val = float("0.0093173") + max_val = float("0.0586855") + mean = float("0.0231857") + std = float("0.00925497") data = None @@ -200,10 +200,10 @@ class Program_weight_tensor_parameter_18: name = "parameter_18" shape = [192] dtype = "float32" - min_val = float("-0.201974") - max_val = float("0.118718") - mean = float("-0.070128") - std = float("0.0516738") + min_val = float("-0.168595") + max_val = float("0.0949792") + mean = float("-0.0552586") + std = float("0.0424072") data = None @@ -211,10 +211,10 @@ class Program_weight_tensor_parameter_19: name = "parameter_19" shape = [192, 192, 3, 3] dtype = "float32" - min_val = float("-0.0703397") - max_val = float("0.0838756") - mean = float("-0.000297626") - std = float("0.00409097") + min_val = float("-0.0550749") + max_val = float("0.0694004") + mean = float("-0.000238535") + std = float("0.00329047") data = None @@ -222,10 +222,10 @@ class Program_weight_tensor_parameter_20: name = "parameter_20" shape = [192] dtype = "float32" - min_val = float("-0.45538") - max_val = float("0.186694") - mean = float("-0.0819211") - std = float("0.101908") + min_val = float("-0.455754") + max_val = float("0.186932") + mean = float("-0.0819852") + std = float("0.102015") data = None @@ -233,10 +233,10 @@ class Program_weight_tensor_parameter_21: name = "parameter_21" shape = [192] dtype = "float32" - min_val = float("0.842136") - max_val = float("1.25451") - mean = float("1.02689") - std = float("0.0669984") + min_val = float("0.841905") + max_val = float("1.25543") + mean = float("1.02686") + std = float("0.0670556") data = None @@ -244,10 +244,10 @@ class Program_weight_tensor_parameter_22: name = "parameter_22" shape = [192] dtype = "float32" - min_val = float("0.00609049") - max_val = float("0.0366868") - mean = float("0.0132496") - std = float("0.00446736") + min_val = float("0.00382876") + max_val = float("0.0168788") + mean = float("0.00831223") + std = float("0.00247857") data = None @@ -255,10 +255,10 @@ class Program_weight_tensor_parameter_23: name = "parameter_23" shape = [192] dtype = "float32" - min_val = float("-0.108315") - max_val = float("0.071515") - mean = float("-0.0205931") - std = float("0.0309136") + min_val = float("-0.0941842") + max_val = float("0.0492551") + mean = float("-0.0169118") + std = float("0.0260979") data = None @@ -266,10 +266,10 @@ class Program_weight_tensor_parameter_24: name = "parameter_24" shape = [192, 576, 1, 1] dtype = "float32" - min_val = float("-0.102669") - max_val = float("0.10072") - mean = float("-0.000208583") - std = float("0.00580958") + min_val = float("-0.0820429") + max_val = float("0.0807641") + mean = float("-0.00017519") + std = float("0.00465979") data = None @@ -277,10 +277,10 @@ class Program_weight_tensor_parameter_25: name = "parameter_25" shape = [192] dtype = "float32" - min_val = float("-0.217714") - max_val = float("0.0349411") - mean = float("-0.0691139") - std = float("0.0385785") + min_val = float("-0.217783") + max_val = float("0.0355625") + mean = float("-0.0691304") + std = float("0.0386326") data = None @@ -288,10 +288,10 @@ class Program_weight_tensor_parameter_26: name = "parameter_26" shape = [192] dtype = "float32" - min_val = float("0.843869") - max_val = float("1.15213") - mean = float("1.01543") - std = float("0.0502928") + min_val = float("0.843901") + max_val = float("1.15229") + mean = float("1.01548") + std = float("0.0503056") data = None @@ -299,10 +299,10 @@ class Program_weight_tensor_parameter_27: name = "parameter_27" shape = [192] dtype = "float32" - min_val = float("0.00469781") - max_val = float("0.0515763") - mean = float("0.0100063") - std = float("0.00473549") + min_val = float("0.00247351") + max_val = float("0.0164084") + mean = float("0.00608305") + std = float("0.00215721") data = None @@ -310,10 +310,10 @@ class Program_weight_tensor_parameter_28: name = "parameter_28" shape = [192] dtype = "float32" - min_val = float("-0.100338") - max_val = float("0.103443") - mean = float("-0.028043") - std = float("0.0298177") + min_val = float("-0.076298") + max_val = float("0.0940074") + mean = float("-0.0220292") + std = float("0.025105") data = None @@ -321,10 +321,10 @@ class Program_weight_tensor_parameter_29: name = "parameter_29" shape = [192, 576, 1, 1] dtype = "float32" - min_val = float("-0.0445699") - max_val = float("0.0626305") - mean = float("-0.000342079") - std = float("0.00527897") + min_val = float("-0.0441749") + max_val = float("0.05036") + mean = float("-0.000272052") + std = float("0.0041864") data = None @@ -332,10 +332,10 @@ class Program_weight_tensor_parameter_30: name = "parameter_30" shape = [192] dtype = "float32" - min_val = float("-0.295573") - max_val = float("-0.00727007") - mean = float("-0.0908822") - std = float("0.0602539") + min_val = float("-0.296363") + max_val = float("-0.00731421") + mean = float("-0.0909181") + std = float("0.0603085") data = None @@ -343,10 +343,10 @@ class Program_weight_tensor_parameter_31: name = "parameter_31" shape = [192] dtype = "float32" - min_val = float("0.782898") - max_val = float("1.34847") - mean = float("1.05287") - std = float("0.0658118") + min_val = float("0.781699") + max_val = float("1.34829") + mean = float("1.05295") + std = float("0.0659017") data = None @@ -354,10 +354,10 @@ class Program_weight_tensor_parameter_32: name = "parameter_32" shape = [192] dtype = "float32" - min_val = float("0.00853608") - max_val = float("0.0834335") - mean = float("0.0220792") - std = float("0.0106956") + min_val = float("0.00518533") + max_val = float("0.0436202") + mean = float("0.0137517") + std = float("0.00626765") data = None @@ -365,10 +365,10 @@ class Program_weight_tensor_parameter_33: name = "parameter_33" shape = [192] dtype = "float32" - min_val = float("-0.281933") - max_val = float("0.32665") - mean = float("-0.0476883") - std = float("0.0906137") + min_val = float("-0.24857") + max_val = float("0.288341") + mean = float("-0.0322151") + std = float("0.0764451") data = None @@ -376,10 +376,10 @@ class Program_weight_tensor_parameter_34: name = "parameter_34" shape = [192, 192, 3, 3] dtype = "float32" - min_val = float("-0.0372086") - max_val = float("0.0434341") - mean = float("-9.58531e-05") - std = float("0.00301696") + min_val = float("-0.0319297") + max_val = float("0.0371234") + mean = float("-6.61646e-05") + std = float("0.00247009") data = None @@ -387,10 +387,10 @@ class Program_weight_tensor_parameter_35: name = "parameter_35" shape = [192] dtype = "float32" - min_val = float("-0.529319") - max_val = float("1.03253") - mean = float("0.1482") - std = float("0.259312") + min_val = float("-0.530523") + max_val = float("1.03181") + mean = float("0.148142") + std = float("0.25938") data = None @@ -398,10 +398,10 @@ class Program_weight_tensor_parameter_36: name = "parameter_36" shape = [192] dtype = "float32" - min_val = float("0.733167") - max_val = float("1.57011") - mean = float("1.01433") - std = float("0.106495") + min_val = float("0.732244") + max_val = float("1.56838") + mean = float("1.01394") + std = float("0.106713") data = None @@ -409,10 +409,10 @@ class Program_weight_tensor_parameter_37: name = "parameter_37" shape = [192] dtype = "float32" - min_val = float("0.00533333") - max_val = float("0.0628113") - mean = float("0.0202291") - std = float("0.0104903") + min_val = float("0.00474578") + max_val = float("0.0449361") + mean = float("0.0159591") + std = float("0.00817663") data = None @@ -420,10 +420,10 @@ class Program_weight_tensor_parameter_38: name = "parameter_38" shape = [192] dtype = "float32" - min_val = float("-0.274173") - max_val = float("0.190265") - mean = float("-0.0383734") - std = float("0.0518374") + min_val = float("-0.23459") + max_val = float("0.15968") + mean = float("-0.0416686") + std = float("0.0471789") data = None @@ -431,10 +431,10 @@ class Program_weight_tensor_parameter_39: name = "parameter_39" shape = [192, 192, 1, 1] dtype = "float32" - min_val = float("-0.148329") - max_val = float("0.115949") - mean = float("-0.000786037") - std = float("0.0116676") + min_val = float("-0.130799") + max_val = float("0.0914109") + mean = float("-0.000795506") + std = float("0.00978873") data = None @@ -442,10 +442,10 @@ class Program_weight_tensor_parameter_40: name = "parameter_40" shape = [96] dtype = "float32" - min_val = float("-0.290208") - max_val = float("0.171692") - mean = float("-0.0709438") - std = float("0.105357") + min_val = float("-0.290307") + max_val = float("0.172582") + mean = float("-0.0708609") + std = float("0.105586") data = None @@ -453,10 +453,10 @@ class Program_weight_tensor_parameter_41: name = "parameter_41" shape = [96] dtype = "float32" - min_val = float("0.730214") - max_val = float("1.20725") - mean = float("0.877815") - std = float("0.0776628") + min_val = float("0.730168") + max_val = float("1.20841") + mean = float("0.877696") + std = float("0.0778222") data = None @@ -464,10 +464,10 @@ class Program_weight_tensor_parameter_42: name = "parameter_42" shape = [96] dtype = "float32" - min_val = float("0.00244432") - max_val = float("0.0359181") - mean = float("0.0138133") - std = float("0.00616422") + min_val = float("0.00209252") + max_val = float("0.0172969") + mean = float("0.00895461") + std = float("0.00362651") data = None @@ -475,10 +475,10 @@ class Program_weight_tensor_parameter_43: name = "parameter_43" shape = [96] dtype = "float32" - min_val = float("-0.0390463") - max_val = float("0.0281496") - mean = float("-0.00737283") - std = float("0.0187507") + min_val = float("-0.030881") + max_val = float("0.0264932") + mean = float("-0.00664423") + std = float("0.0163084") data = None @@ -486,10 +486,10 @@ class Program_weight_tensor_parameter_44: name = "parameter_44" shape = [96, 96, 1, 1] dtype = "float32" - min_val = float("-0.0574805") - max_val = float("0.0583116") - mean = float("-0.00132585") - std = float("0.00692157") + min_val = float("-0.0481719") + max_val = float("0.0491173") + mean = float("-0.00114232") + std = float("0.0059524") data = None @@ -497,10 +497,10 @@ class Program_weight_tensor_parameter_45: name = "parameter_45" shape = [96] dtype = "float32" - min_val = float("-0.290208") - max_val = float("0.171692") - mean = float("-0.0709438") - std = float("0.105357") + min_val = float("-0.290307") + max_val = float("0.172582") + mean = float("-0.0708609") + std = float("0.105586") data = None @@ -508,10 +508,10 @@ class Program_weight_tensor_parameter_46: name = "parameter_46" shape = [96] dtype = "float32" - min_val = float("0.969898") - max_val = float("1.3202") - mean = float("1.13218") - std = float("0.0750079") + min_val = float("0.970765") + max_val = float("1.31932") + mean = float("1.13205") + std = float("0.0751806") data = None @@ -519,10 +519,10 @@ class Program_weight_tensor_parameter_47: name = "parameter_47" shape = [96] dtype = "float32" - min_val = float("0.00695661") - max_val = float("0.0436736") - mean = float("0.0204114") - std = float("0.00835407") + min_val = float("0.00509897") + max_val = float("0.0269461") + mean = float("0.0144103") + std = float("0.00491216") data = None @@ -530,10 +530,10 @@ class Program_weight_tensor_parameter_48: name = "parameter_48" shape = [96] dtype = "float32" - min_val = float("-0.0615079") - max_val = float("0.0687841") - mean = float("-0.00728561") - std = float("0.0229485") + min_val = float("-0.0593684") + max_val = float("0.0709636") + mean = float("-0.00964274") + std = float("0.0202695") data = None @@ -541,10 +541,10 @@ class Program_weight_tensor_parameter_49: name = "parameter_49" shape = [96, 96, 3, 3] dtype = "float32" - min_val = float("-0.0888788") - max_val = float("0.0951908") - mean = float("-0.00012594") - std = float("0.00695592") + min_val = float("-0.0744412") + max_val = float("0.0808028") + mean = float("-0.000134804") + std = float("0.00586885") data = None @@ -552,10 +552,10 @@ class Program_weight_tensor_parameter_50: name = "parameter_50" shape = [96] dtype = "float32" - min_val = float("-0.672726") - max_val = float("0.111066") - mean = float("-0.258997") - std = float("0.150238") + min_val = float("-0.672978") + max_val = float("0.110937") + mean = float("-0.259195") + std = float("0.150512") data = None @@ -563,10 +563,10 @@ class Program_weight_tensor_parameter_51: name = "parameter_51" shape = [96] dtype = "float32" - min_val = float("0.802266") - max_val = float("1.40896") - mean = float("1.04531") - std = float("0.116924") + min_val = float("0.800552") + max_val = float("1.41215") + mean = float("1.04504") + std = float("0.11692") data = None @@ -574,10 +574,10 @@ class Program_weight_tensor_parameter_52: name = "parameter_52" shape = [96] dtype = "float32" - min_val = float("0.0207014") - max_val = float("0.108587") - mean = float("0.0455437") - std = float("0.0186345") + min_val = float("0.0141801") + max_val = float("0.0721458") + mean = float("0.0300632") + std = float("0.0107204") data = None @@ -585,10 +585,10 @@ class Program_weight_tensor_parameter_53: name = "parameter_53" shape = [96] dtype = "float32" - min_val = float("-0.112693") - max_val = float("0.0516466") - mean = float("-0.0468167") - std = float("0.0294565") + min_val = float("-0.0891538") + max_val = float("0.0552256") + mean = float("-0.0348822") + std = float("0.0246653") data = None @@ -596,10 +596,10 @@ class Program_weight_tensor_parameter_54: name = "parameter_54" shape = [96, 96, 3, 3] dtype = "float32" - min_val = float("-0.0830006") - max_val = float("0.106216") - mean = float("-0.00053104") - std = float("0.00775187") + min_val = float("-0.0755165") + max_val = float("0.0788485") + mean = float("-0.00042175") + std = float("0.00651052") data = None @@ -607,10 +607,10 @@ class Program_weight_tensor_parameter_55: name = "parameter_55" shape = [96] dtype = "float32" - min_val = float("-0.642948") - max_val = float("0.150819") - mean = float("-0.155351") - std = float("0.115348") + min_val = float("-0.644167") + max_val = float("0.152209") + mean = float("-0.155579") + std = float("0.115703") data = None @@ -618,10 +618,10 @@ class Program_weight_tensor_parameter_56: name = "parameter_56" shape = [96] dtype = "float32" - min_val = float("0.84901") - max_val = float("1.26259") - mean = float("1.03349") - std = float("0.0720544") + min_val = float("0.849539") + max_val = float("1.26621") + mean = float("1.03329") + std = float("0.0722571") data = None @@ -629,10 +629,10 @@ class Program_weight_tensor_parameter_57: name = "parameter_57" shape = [96] dtype = "float32" - min_val = float("0.00947266") - max_val = float("0.0469677") - mean = float("0.0192275") - std = float("0.00716215") + min_val = float("0.00630263") + max_val = float("0.0491987") + mean = float("0.01393") + std = float("0.00582943") data = None @@ -640,10 +640,10 @@ class Program_weight_tensor_parameter_58: name = "parameter_58" shape = [96] dtype = "float32" - min_val = float("-0.135148") - max_val = float("0.0288217") - mean = float("-0.0359178") - std = float("0.0292974") + min_val = float("-0.115691") + max_val = float("0.0294622") + mean = float("-0.0334293") + std = float("0.0282444") data = None @@ -651,10 +651,10 @@ class Program_weight_tensor_parameter_59: name = "parameter_59" shape = [96, 288, 1, 1] dtype = "float32" - min_val = float("-0.0792001") - max_val = float("0.0950111") - mean = float("-0.00062642") - std = float("0.0105349") + min_val = float("-0.0675787") + max_val = float("0.0715923") + mean = float("-0.000602525") + std = float("0.00883627") data = None @@ -662,10 +662,10 @@ class Program_weight_tensor_parameter_60: name = "parameter_60" shape = [96] dtype = "float32" - min_val = float("-0.198091") - max_val = float("0.0826105") - mean = float("-0.0299011") - std = float("0.0459564") + min_val = float("-0.19838") + max_val = float("0.0828483") + mean = float("-0.0298722") + std = float("0.0460234") data = None @@ -673,10 +673,10 @@ class Program_weight_tensor_parameter_61: name = "parameter_61" shape = [96] dtype = "float32" - min_val = float("0.685979") - max_val = float("1.33637") - mean = float("0.95441") - std = float("0.0883622") + min_val = float("0.684756") + max_val = float("1.33599") + mean = float("0.954296") + std = float("0.0884899") data = None @@ -684,10 +684,10 @@ class Program_weight_tensor_parameter_62: name = "parameter_62" shape = [96] dtype = "float32" - min_val = float("0.00495099") - max_val = float("0.0554625") - mean = float("0.012309") - std = float("0.00627308") + min_val = float("0.00456548") + max_val = float("0.0382428") + mean = float("0.00924608") + std = float("0.00437704") data = None @@ -695,10 +695,10 @@ class Program_weight_tensor_parameter_63: name = "parameter_63" shape = [96] dtype = "float32" - min_val = float("-0.0965082") - max_val = float("0.0676515") - mean = float("-0.0128269") - std = float("0.0323591") + min_val = float("-0.0819243") + max_val = float("0.0486769") + mean = float("-0.0149872") + std = float("0.0303643") data = None @@ -706,10 +706,10 @@ class Program_weight_tensor_parameter_64: name = "parameter_64" shape = [96, 288, 1, 1] dtype = "float32" - min_val = float("-0.0773482") - max_val = float("0.0795383") - mean = float("-0.000205562") - std = float("0.00892342") + min_val = float("-0.0851124") + max_val = float("0.0753681") + mean = float("-0.000267914") + std = float("0.00746335") data = None @@ -717,10 +717,10 @@ class Program_weight_tensor_parameter_65: name = "parameter_65" shape = [96] dtype = "float32" - min_val = float("-0.3351") - max_val = float("0.0180339") - mean = float("-0.108666") - std = float("0.0839689") + min_val = float("-0.335793") + max_val = float("0.0181512") + mean = float("-0.108645") + std = float("0.0840511") data = None @@ -728,10 +728,10 @@ class Program_weight_tensor_parameter_66: name = "parameter_66" shape = [96] dtype = "float32" - min_val = float("0.730829") - max_val = float("1.20386") - mean = float("1.0551") - std = float("0.0746036") + min_val = float("0.72996") + max_val = float("1.20589") + mean = float("1.05546") + std = float("0.0750732") data = None @@ -739,10 +739,10 @@ class Program_weight_tensor_parameter_67: name = "parameter_67" shape = [96] dtype = "float32" - min_val = float("0.00893169") - max_val = float("0.0609817") - mean = float("0.0216752") - std = float("0.0103436") + min_val = float("0.00560508") + max_val = float("0.0383423") + mean = float("0.0151426") + std = float("0.00688387") data = None @@ -750,10 +750,10 @@ class Program_weight_tensor_parameter_68: name = "parameter_68" shape = [96] dtype = "float32" - min_val = float("-0.43779") - max_val = float("0.530239") - mean = float("-0.00359941") - std = float("0.156358") + min_val = float("-0.364406") + max_val = float("0.413851") + mean = float("-0.0185548") + std = float("0.134763") data = None @@ -761,10 +761,10 @@ class Program_weight_tensor_parameter_69: name = "parameter_69" shape = [96, 96, 3, 3] dtype = "float32" - min_val = float("-0.0647316") - max_val = float("0.0613698") - mean = float("-1.86625e-05") - std = float("0.00623329") + min_val = float("-0.0556938") + max_val = float("0.0565333") + mean = float("-3.0497e-05") + std = float("0.00548975") data = None @@ -772,10 +772,10 @@ class Program_weight_tensor_parameter_70: name = "parameter_70" shape = [96] dtype = "float32" - min_val = float("-1.07261") - max_val = float("2.35998") - mean = float("0.312216") - std = float("0.587121") + min_val = float("-1.07759") + max_val = float("2.35772") + mean = float("0.310808") + std = float("0.586812") data = None @@ -783,10 +783,10 @@ class Program_weight_tensor_parameter_71: name = "parameter_71" shape = [96] dtype = "float32" - min_val = float("0.476972") - max_val = float("1.40751") - mean = float("0.884046") - std = float("0.166927") + min_val = float("0.468543") + max_val = float("1.40561") + mean = float("0.882514") + std = float("0.167348") data = None @@ -794,10 +794,10 @@ class Program_weight_tensor_parameter_72: name = "parameter_72" shape = [96] dtype = "float32" - min_val = float("0.00642742") - max_val = float("0.129825") - mean = float("0.0330591") - std = float("0.0219559") + min_val = float("0.00504964") + max_val = float("0.106532") + mean = float("0.0264739") + std = float("0.0164013") data = None @@ -805,10 +805,10 @@ class Program_weight_tensor_parameter_73: name = "parameter_73" shape = [96] dtype = "float32" - min_val = float("-0.294202") - max_val = float("0.222749") - mean = float("-0.0136424") - std = float("0.071643") + min_val = float("-0.217561") + max_val = float("0.181267") + mean = float("-0.0162134") + std = float("0.0711045") data = None @@ -816,10 +816,10 @@ class Program_weight_tensor_parameter_74: name = "parameter_74" shape = [96, 96, 1, 1] dtype = "float32" - min_val = float("-0.188209") - max_val = float("0.120938") - mean = float("-0.000991715") - std = float("0.0220383") + min_val = float("-0.149095") + max_val = float("0.112249") + mean = float("-0.00101846") + std = float("0.0196923") data = None @@ -863,10 +863,10 @@ class Program_weight_tensor_parameter_79: name = "parameter_79" shape = [48, 48, 1, 1] dtype = "float32" - min_val = float("-0.0700832") - max_val = float("0.0649644") - mean = float("-0.00214276") - std = float("0.0125585") + min_val = float("-0.064872") + max_val = float("0.067209") + mean = float("-0.00182682") + std = float("0.0118552") data = None @@ -910,10 +910,10 @@ class Program_weight_tensor_parameter_84: name = "parameter_84" shape = [48, 48, 3, 3] dtype = "float32" - min_val = float("-0.136078") - max_val = float("0.180716") - mean = float("-0.000282686") - std = float("0.0138341") + min_val = float("-0.126562") + max_val = float("0.145589") + mean = float("-0.00030101") + std = float("0.0128623") data = None @@ -957,10 +957,10 @@ class Program_weight_tensor_parameter_89: name = "parameter_89" shape = [48, 48, 3, 3] dtype = "float32" - min_val = float("-0.114709") - max_val = float("0.160677") - mean = float("-0.000863383") - std = float("0.0150497") + min_val = float("-0.105155") + max_val = float("0.121463") + mean = float("-0.000928041") + std = float("0.0141327") data = None @@ -1004,10 +1004,10 @@ class Program_weight_tensor_parameter_94: name = "parameter_94" shape = [48, 224, 1, 1] dtype = "float32" - min_val = float("-0.241983") - max_val = float("0.156755") - mean = float("-0.000908695") - std = float("0.0191741") + min_val = float("-0.18791") + max_val = float("0.126171") + mean = float("-0.00105876") + std = float("0.0178172") data = None @@ -1051,10 +1051,10 @@ class Program_weight_tensor_parameter_99: name = "parameter_99" shape = [48, 224, 1, 1] dtype = "float32" - min_val = float("-0.112347") - max_val = float("0.148275") - mean = float("0.00022258") - std = float("0.0143593") + min_val = float("-0.102039") + max_val = float("0.121479") + mean = float("2.99226e-05") + std = float("0.0123136") data = None @@ -1062,10 +1062,10 @@ class Program_weight_tensor_parameter_100: name = "parameter_100" shape = [96] dtype = "float32" - min_val = float("-0.354063") - max_val = float("0.389439") - mean = float("-0.0077798") - std = float("0.135547") + min_val = float("-0.355935") + max_val = float("0.392551") + mean = float("-0.00746683") + std = float("0.135778") data = None @@ -1073,10 +1073,10 @@ class Program_weight_tensor_parameter_101: name = "parameter_101" shape = [96] dtype = "float32" - min_val = float("0.581944") - max_val = float("1.61146") - mean = float("0.798361") - std = float("0.141114") + min_val = float("0.58214") + max_val = float("1.61741") + mean = float("0.798871") + std = float("0.141455") data = None @@ -1084,10 +1084,10 @@ class Program_weight_tensor_parameter_102: name = "parameter_102" shape = [96] dtype = "float32" - min_val = float("0.00926414") - max_val = float("0.0779837") - mean = float("0.0233765") - std = float("0.011586") + min_val = float("0.00708678") + max_val = float("0.0623589") + mean = float("0.0174758") + std = float("0.00821209") data = None @@ -1095,10 +1095,10 @@ class Program_weight_tensor_parameter_103: name = "parameter_103" shape = [96] dtype = "float32" - min_val = float("-0.205203") - max_val = float("0.065409") - mean = float("-0.0368353") - std = float("0.0408489") + min_val = float("-0.138992") + max_val = float("0.0786226") + mean = float("-0.0313739") + std = float("0.0364738") data = None @@ -1106,10 +1106,10 @@ class Program_weight_tensor_parameter_104: name = "parameter_104" shape = [96, 192, 1, 1] dtype = "float32" - min_val = float("-0.107548") - max_val = float("0.121609") - mean = float("-0.00106908") - std = float("0.0142625") + min_val = float("-0.0964661") + max_val = float("0.0936981") + mean = float("-0.000909439") + std = float("0.0125162") data = None @@ -1117,10 +1117,10 @@ class Program_weight_tensor_parameter_105: name = "parameter_105" shape = [192] dtype = "float32" - min_val = float("-0.33747") - max_val = float("0.174782") - mean = float("-0.0799299") - std = float("0.089405") + min_val = float("-0.337392") + max_val = float("0.172941") + mean = float("-0.0803987") + std = float("0.0895922") data = None @@ -1128,10 +1128,10 @@ class Program_weight_tensor_parameter_106: name = "parameter_106" shape = [192] dtype = "float32" - min_val = float("0.697042") - max_val = float("1.47363") - mean = float("0.990958") - std = float("0.0990363") + min_val = float("0.695083") + max_val = float("1.47609") + mean = float("0.990456") + std = float("0.0995214") data = None @@ -1139,10 +1139,10 @@ class Program_weight_tensor_parameter_107: name = "parameter_107" shape = [192] dtype = "float32" - min_val = float("0.0106758") - max_val = float("0.0901736") - mean = float("0.0233258") - std = float("0.00975982") + min_val = float("0.00754238") + max_val = float("0.0988877") + mean = float("0.0184313") + std = float("0.00883254") data = None @@ -1150,10 +1150,10 @@ class Program_weight_tensor_parameter_108: name = "parameter_108" shape = [192] dtype = "float32" - min_val = float("-0.218067") - max_val = float("0.191552") - mean = float("-0.054689") - std = float("0.0548119") + min_val = float("-0.204684") + max_val = float("0.16103") + mean = float("-0.0503988") + std = float("0.049554") data = None @@ -1161,10 +1161,10 @@ class Program_weight_tensor_parameter_109: name = "parameter_109" shape = [192, 192, 1, 1] dtype = "float32" - min_val = float("-0.11401") - max_val = float("0.147871") - mean = float("-0.00132952") - std = float("0.0140748") + min_val = float("-0.099788") + max_val = float("0.114986") + mean = float("-0.00122472") + std = float("0.0120725") data = None @@ -1172,10 +1172,10 @@ class Program_weight_tensor_parameter_110: name = "parameter_110" shape = [96] dtype = "float32" - min_val = float("-0.308172") - max_val = float("0.100695") - mean = float("-0.0814609") - std = float("0.0991118") + min_val = float("-0.307239") + max_val = float("0.101878") + mean = float("-0.0816011") + std = float("0.0993456") data = None @@ -1183,10 +1183,10 @@ class Program_weight_tensor_parameter_111: name = "parameter_111" shape = [96] dtype = "float32" - min_val = float("0.552538") - max_val = float("0.935439") - mean = float("0.809676") - std = float("0.0654097") + min_val = float("0.551598") + max_val = float("0.936081") + mean = float("0.809201") + std = float("0.0653613") data = None @@ -1194,10 +1194,10 @@ class Program_weight_tensor_parameter_112: name = "parameter_112" shape = [96] dtype = "float32" - min_val = float("0.0077781") - max_val = float("0.0360303") - mean = float("0.0172914") - std = float("0.0054044") + min_val = float("0.0035") + max_val = float("0.0258289") + mean = float("0.0122223") + std = float("0.00384374") data = None @@ -1205,10 +1205,10 @@ class Program_weight_tensor_parameter_113: name = "parameter_113" shape = [96] dtype = "float32" - min_val = float("-0.0473472") - max_val = float("0.0334054") - mean = float("-0.0169048") - std = float("0.0189166") + min_val = float("-0.0463445") + max_val = float("0.0326283") + mean = float("-0.0141616") + std = float("0.0176504") data = None @@ -1216,10 +1216,10 @@ class Program_weight_tensor_parameter_114: name = "parameter_114" shape = [96, 96, 1, 1] dtype = "float32" - min_val = float("-0.0532353") - max_val = float("0.0545219") - mean = float("-0.0018903") - std = float("0.00933137") + min_val = float("-0.0439526") + max_val = float("0.0551086") + mean = float("-0.00157619") + std = float("0.00827761") data = None @@ -1227,10 +1227,10 @@ class Program_weight_tensor_parameter_115: name = "parameter_115" shape = [96] dtype = "float32" - min_val = float("-0.308172") - max_val = float("0.100695") - mean = float("-0.0814609") - std = float("0.0991118") + min_val = float("-0.307239") + max_val = float("0.101878") + mean = float("-0.0816011") + std = float("0.0993456") data = None @@ -1238,10 +1238,10 @@ class Program_weight_tensor_parameter_116: name = "parameter_116" shape = [96] dtype = "float32" - min_val = float("0.843695") - max_val = float("1.28928") - mean = float("1.0347") - std = float("0.0944676") + min_val = float("0.842432") + max_val = float("1.28751") + mean = float("1.03469") + std = float("0.0943599") data = None @@ -1249,10 +1249,10 @@ class Program_weight_tensor_parameter_117: name = "parameter_117" shape = [96] dtype = "float32" - min_val = float("0.0169339") - max_val = float("0.161601") - mean = float("0.0363965") - std = float("0.0171841") + min_val = float("0.013302") + max_val = float("0.0739954") + mean = float("0.0273068") + std = float("0.00856333") data = None @@ -1260,10 +1260,10 @@ class Program_weight_tensor_parameter_118: name = "parameter_118" shape = [96] dtype = "float32" - min_val = float("-0.0918578") - max_val = float("0.0336943") - mean = float("-0.0247889") - std = float("0.0245036") + min_val = float("-0.0788659") + max_val = float("0.0467382") + mean = float("-0.0227617") + std = float("0.0268656") data = None @@ -1271,10 +1271,10 @@ class Program_weight_tensor_parameter_119: name = "parameter_119" shape = [96, 96, 3, 3] dtype = "float32" - min_val = float("-0.101803") - max_val = float("0.200902") - mean = float("-0.000267266") - std = float("0.0083296") + min_val = float("-0.0842602") + max_val = float("0.155772") + mean = float("-0.00023666") + std = float("0.00725825") data = None @@ -1282,10 +1282,10 @@ class Program_weight_tensor_parameter_120: name = "parameter_120" shape = [96] dtype = "float32" - min_val = float("-0.728683") - max_val = float("0.317979") - mean = float("-0.275123") - std = float("0.174815") + min_val = float("-0.731847") + max_val = float("0.315882") + mean = float("-0.275694") + std = float("0.175237") data = None @@ -1293,10 +1293,10 @@ class Program_weight_tensor_parameter_121: name = "parameter_121" shape = [96] dtype = "float32" - min_val = float("0.764105") - max_val = float("1.3124") - mean = float("1.04343") - std = float("0.115762") + min_val = float("0.765079") + max_val = float("1.30982") + mean = float("1.04342") + std = float("0.115486") data = None @@ -1304,10 +1304,10 @@ class Program_weight_tensor_parameter_122: name = "parameter_122" shape = [96] dtype = "float32" - min_val = float("0.0272856") - max_val = float("0.0961064") - mean = float("0.0504816") - std = float("0.0161463") + min_val = float("0.0173859") + max_val = float("0.0675619") + mean = float("0.0355326") + std = float("0.00949136") data = None @@ -1315,10 +1315,10 @@ class Program_weight_tensor_parameter_123: name = "parameter_123" shape = [96] dtype = "float32" - min_val = float("-0.135954") - max_val = float("0.0746318") - mean = float("-0.0577229") - std = float("0.0449165") + min_val = float("-0.119758") + max_val = float("0.0576312") + mean = float("-0.0514595") + std = float("0.0439535") data = None @@ -1326,10 +1326,10 @@ class Program_weight_tensor_parameter_124: name = "parameter_124" shape = [96, 96, 3, 3] dtype = "float32" - min_val = float("-0.185749") - max_val = float("0.160808") - mean = float("-0.000603971") - std = float("0.00986059") + min_val = float("-0.141323") + max_val = float("0.125212") + mean = float("-0.000531571") + std = float("0.00857427") data = None @@ -1337,10 +1337,10 @@ class Program_weight_tensor_parameter_125: name = "parameter_125" shape = [96] dtype = "float32" - min_val = float("-0.646446") - max_val = float("0.382953") - mean = float("-0.253615") - std = float("0.209383") + min_val = float("-0.649336") + max_val = float("0.386205") + mean = float("-0.253708") + std = float("0.210117") data = None @@ -1348,10 +1348,10 @@ class Program_weight_tensor_parameter_126: name = "parameter_126" shape = [96] dtype = "float32" - min_val = float("0.737499") - max_val = float("1.37821") - mean = float("1.02572") - std = float("0.122249") + min_val = float("0.743859") + max_val = float("1.37989") + mean = float("1.02545") + std = float("0.12205") data = None @@ -1359,10 +1359,10 @@ class Program_weight_tensor_parameter_127: name = "parameter_127" shape = [96] dtype = "float32" - min_val = float("0.00985305") - max_val = float("0.0428714") - mean = float("0.0193938") - std = float("0.00597774") + min_val = float("0.00720624") + max_val = float("0.0314703") + mean = float("0.0139725") + std = float("0.00426494") data = None @@ -1370,10 +1370,10 @@ class Program_weight_tensor_parameter_128: name = "parameter_128" shape = [96] dtype = "float32" - min_val = float("-0.372134") - max_val = float("0.319757") - mean = float("0.0183732") - std = float("0.0780363") + min_val = float("-0.313971") + max_val = float("0.275864") + mean = float("0.0100895") + std = float("0.0634222") data = None @@ -1381,10 +1381,10 @@ class Program_weight_tensor_parameter_129: name = "parameter_129" shape = [96, 448, 1, 1] dtype = "float32" - min_val = float("-0.203733") - max_val = float("0.134243") - mean = float("-0.000669446") - std = float("0.0129484") + min_val = float("-0.167863") + max_val = float("0.111638") + mean = float("-0.000518773") + std = float("0.0111108") data = None @@ -1392,10 +1392,10 @@ class Program_weight_tensor_parameter_130: name = "parameter_130" shape = [96] dtype = "float32" - min_val = float("-0.238657") - max_val = float("0.170206") - mean = float("-0.0409469") - std = float("0.0883869") + min_val = float("-0.23906") + max_val = float("0.172271") + mean = float("-0.0410503") + std = float("0.0886321") data = None @@ -1403,10 +1403,10 @@ class Program_weight_tensor_parameter_131: name = "parameter_131" shape = [96] dtype = "float32" - min_val = float("0.915723") - max_val = float("1.41222") - mean = float("1.07273") - std = float("0.0922586") + min_val = float("0.917932") + max_val = float("1.41199") + mean = float("1.07275") + std = float("0.0920705") data = None @@ -1414,10 +1414,10 @@ class Program_weight_tensor_parameter_132: name = "parameter_132" shape = [96] dtype = "float32" - min_val = float("0.00786671") - max_val = float("0.0588079") - mean = float("0.0163456") - std = float("0.0066827") + min_val = float("0.00501564") + max_val = float("0.0446779") + mean = float("0.0113275") + std = float("0.00531961") data = None @@ -1425,10 +1425,10 @@ class Program_weight_tensor_parameter_133: name = "parameter_133" shape = [96] dtype = "float32" - min_val = float("-0.11548") - max_val = float("0.0928178") - mean = float("0.00883762") - std = float("0.0363849") + min_val = float("-0.066575") + max_val = float("0.0649515") + mean = float("0.00698475") + std = float("0.0279665") data = None @@ -1436,10 +1436,10 @@ class Program_weight_tensor_parameter_134: name = "parameter_134" shape = [96, 448, 1, 1] dtype = "float32" - min_val = float("-0.0962928") - max_val = float("0.16056") - mean = float("-0.000559513") - std = float("0.0114298") + min_val = float("-0.112935") + max_val = float("0.117872") + mean = float("-0.000426335") + std = float("0.00968317") data = None @@ -1447,10 +1447,10 @@ class Program_weight_tensor_parameter_135: name = "parameter_135" shape = [192] dtype = "float32" - min_val = float("-0.538624") - max_val = float("-0.101332") - mean = float("-0.294138") - std = float("0.0707416") + min_val = float("-0.540293") + max_val = float("-0.102122") + mean = float("-0.294636") + std = float("0.0708506") data = None @@ -1458,10 +1458,10 @@ class Program_weight_tensor_parameter_136: name = "parameter_136" shape = [192] dtype = "float32" - min_val = float("0.651882") - max_val = float("1.08011") - mean = float("0.852092") - std = float("0.0724292") + min_val = float("0.648817") + max_val = float("1.08017") + mean = float("0.851902") + std = float("0.0725407") data = None @@ -1469,10 +1469,10 @@ class Program_weight_tensor_parameter_137: name = "parameter_137" shape = [192] dtype = "float32" - min_val = float("0.0137887") - max_val = float("0.0661338") - mean = float("0.0268937") - std = float("0.00906335") + min_val = float("0.00844823") + max_val = float("0.0608477") + mean = float("0.0196898") + std = float("0.00777921") data = None @@ -1480,10 +1480,10 @@ class Program_weight_tensor_parameter_138: name = "parameter_138" shape = [192] dtype = "float32" - min_val = float("-0.120176") - max_val = float("0.0680941") - mean = float("-0.0396069") - std = float("0.0327503") + min_val = float("-0.103642") + max_val = float("0.0398287") + mean = float("-0.0337268") + std = float("0.0283827") data = None @@ -1491,10 +1491,10 @@ class Program_weight_tensor_parameter_139: name = "parameter_139" shape = [192, 384, 1, 1] dtype = "float32" - min_val = float("-0.0639447") - max_val = float("0.0672375") - mean = float("-0.000833786") - std = float("0.0103229") + min_val = float("-0.0581647") + max_val = float("0.0551684") + mean = float("-0.000715396") + std = float("0.00866401") data = None @@ -1502,10 +1502,10 @@ class Program_weight_tensor_parameter_140: name = "parameter_140" shape = [384] dtype = "float32" - min_val = float("-0.521228") - max_val = float("0.213986") - mean = float("-0.1682") - std = float("0.0775017") + min_val = float("-0.522165") + max_val = float("0.213846") + mean = float("-0.168509") + std = float("0.0774913") data = None @@ -1513,10 +1513,10 @@ class Program_weight_tensor_parameter_141: name = "parameter_141" shape = [384] dtype = "float32" - min_val = float("0.850237") - max_val = float("1.39388") - mean = float("1.0626") - std = float("0.0773034") + min_val = float("0.849121") + max_val = float("1.39276") + mean = float("1.06266") + std = float("0.0773261") data = None @@ -1524,10 +1524,10 @@ class Program_weight_tensor_parameter_142: name = "parameter_142" shape = [384] dtype = "float32" - min_val = float("0.0088734") - max_val = float("0.0441515") - mean = float("0.0191098") - std = float("0.00573499") + min_val = float("0.00619419") + max_val = float("0.0326996") + mean = float("0.0131745") + std = float("0.00396857") data = None @@ -1535,10 +1535,10 @@ class Program_weight_tensor_parameter_143: name = "parameter_143" shape = [384] dtype = "float32" - min_val = float("-0.129402") - max_val = float("0.0875489") - mean = float("-0.0445092") - std = float("0.0396322") + min_val = float("-0.120299") + max_val = float("0.0721486") + mean = float("-0.0363523") + std = float("0.0318809") data = None @@ -1546,10 +1546,10 @@ class Program_weight_tensor_parameter_144: name = "parameter_144" shape = [384, 384, 1, 1] dtype = "float32" - min_val = float("-0.121659") - max_val = float("0.133055") - mean = float("-0.000702422") - std = float("0.00934463") + min_val = float("-0.0950377") + max_val = float("0.108554") + mean = float("-0.000578436") + std = float("0.00775758") data = None @@ -1557,10 +1557,10 @@ class Program_weight_tensor_parameter_145: name = "parameter_145" shape = [192] dtype = "float32" - min_val = float("-0.382852") - max_val = float("0.227352") - mean = float("-0.117886") - std = float("0.101914") + min_val = float("-0.384308") + max_val = float("0.227819") + mean = float("-0.118179") + std = float("0.102012") data = None @@ -1568,10 +1568,10 @@ class Program_weight_tensor_parameter_146: name = "parameter_146" shape = [192] dtype = "float32" - min_val = float("0.86878") - max_val = float("1.51462") - mean = float("1.12296") - std = float("0.11892") + min_val = float("0.868523") + max_val = float("1.51316") + mean = float("1.12307") + std = float("0.119072") data = None @@ -1579,10 +1579,10 @@ class Program_weight_tensor_parameter_147: name = "parameter_147" shape = [192] dtype = "float32" - min_val = float("0.0805715") - max_val = float("0.861815") - mean = float("0.24173") - std = float("0.115689") + min_val = float("0.0671554") + max_val = float("0.590166") + mean = float("0.189132") + std = float("0.0743371") data = None @@ -1590,10 +1590,10 @@ class Program_weight_tensor_parameter_148: name = "parameter_148" shape = [192] dtype = "float32" - min_val = float("-1.76276") - max_val = float("1.36192") - mean = float("-0.20097") - std = float("0.618042") + min_val = float("-1.66989") + max_val = float("0.864913") + mean = float("-0.128574") + std = float("0.49302") data = None @@ -1601,10 +1601,10 @@ class Program_weight_tensor_parameter_149: name = "parameter_149" shape = [192, 768, 1, 1] dtype = "float32" - min_val = float("-0.143123") - max_val = float("0.101835") - mean = float("-0.000122669") - std = float("0.00812429") + min_val = float("-0.113072") + max_val = float("0.078636") + mean = float("-9.22477e-05") + std = float("0.00673866") data = None @@ -1612,10 +1612,10 @@ class Program_weight_tensor_parameter_150: name = "parameter_150" shape = [192] dtype = "float32" - min_val = float("-0.242929") - max_val = float("0.168527") - mean = float("-0.0174021") - std = float("0.0538902") + min_val = float("-0.24328") + max_val = float("0.168953") + mean = float("-0.0173023") + std = float("0.0539756") data = None @@ -1623,10 +1623,10 @@ class Program_weight_tensor_parameter_151: name = "parameter_151" shape = [192] dtype = "float32" - min_val = float("0.618325") - max_val = float("1.01596") - mean = float("0.837489") - std = float("0.06312") + min_val = float("0.617702") + max_val = float("1.01648") + mean = float("0.837238") + std = float("0.0631802") data = None @@ -1634,10 +1634,10 @@ class Program_weight_tensor_parameter_152: name = "parameter_152" shape = [192] dtype = "float32" - min_val = float("0.00769304") - max_val = float("0.0329157") - mean = float("0.0170219") - std = float("0.00463802") + min_val = float("0.00614033") + max_val = float("0.0266385") + mean = float("0.0103423") + std = float("0.0026319") data = None @@ -1645,10 +1645,10 @@ class Program_weight_tensor_parameter_153: name = "parameter_153" shape = [192] dtype = "float32" - min_val = float("-0.141686") - max_val = float("0.0792819") - mean = float("-0.0667949") - std = float("0.0484579") + min_val = float("-0.127304") + max_val = float("0.0867262") + mean = float("-0.0538262") + std = float("0.0399598") data = None @@ -1656,10 +1656,10 @@ class Program_weight_tensor_parameter_154: name = "parameter_154" shape = [192, 192, 1, 1] dtype = "float32" - min_val = float("-0.0463403") - max_val = float("0.0758786") - mean = float("-0.00171087") - std = float("0.00771467") + min_val = float("-0.0383872") + max_val = float("0.0621299") + mean = float("-0.00140552") + std = float("0.00644221") data = None @@ -1667,10 +1667,10 @@ class Program_weight_tensor_parameter_155: name = "parameter_155" shape = [192] dtype = "float32" - min_val = float("-0.242929") - max_val = float("0.168527") - mean = float("-0.0174021") - std = float("0.0538902") + min_val = float("-0.24328") + max_val = float("0.168953") + mean = float("-0.0173023") + std = float("0.0539756") data = None @@ -1678,10 +1678,10 @@ class Program_weight_tensor_parameter_156: name = "parameter_156" shape = [192] dtype = "float32" - min_val = float("0.874574") - max_val = float("1.46208") - mean = float("1.1059") - std = float("0.129661") + min_val = float("0.874918") + max_val = float("1.46078") + mean = float("1.10611") + std = float("0.129545") data = None @@ -1689,10 +1689,10 @@ class Program_weight_tensor_parameter_157: name = "parameter_157" shape = [192] dtype = "float32" - min_val = float("0.0313595") - max_val = float("0.113252") - mean = float("0.0588345") - std = float("0.0156086") + min_val = float("0.0245048") + max_val = float("0.0980499") + mean = float("0.045066") + std = float("0.0114572") data = None @@ -1700,10 +1700,10 @@ class Program_weight_tensor_parameter_158: name = "parameter_158" shape = [192] dtype = "float32" - min_val = float("-0.332132") - max_val = float("0.010893") - mean = float("-0.152239") - std = float("0.0624963") + min_val = float("-0.272811") + max_val = float("0.0283262") + mean = float("-0.122117") + std = float("0.0543901") data = None @@ -1711,10 +1711,10 @@ class Program_weight_tensor_parameter_159: name = "parameter_159" shape = [192, 192, 3, 3] dtype = "float32" - min_val = float("-0.0503168") - max_val = float("0.0641412") - mean = float("-0.000498968") - std = float("0.00471767") + min_val = float("-0.037555") + max_val = float("0.0532694") + mean = float("-0.000403704") + std = float("0.00395116") data = None @@ -1722,10 +1722,10 @@ class Program_weight_tensor_parameter_160: name = "parameter_160" shape = [192] dtype = "float32" - min_val = float("-0.311021") - max_val = float("0.0670138") - mean = float("-0.114925") - std = float("0.0801899") + min_val = float("-0.311171") + max_val = float("0.0670066") + mean = float("-0.115181") + std = float("0.0802375") data = None @@ -1733,10 +1733,10 @@ class Program_weight_tensor_parameter_161: name = "parameter_161" shape = [192] dtype = "float32" - min_val = float("0.909746") - max_val = float("1.44623") - mean = float("1.10815") - std = float("0.101996") + min_val = float("0.910219") + max_val = float("1.44564") + mean = float("1.10838") + std = float("0.101829") data = None @@ -1744,10 +1744,10 @@ class Program_weight_tensor_parameter_162: name = "parameter_162" shape = [192] dtype = "float32" - min_val = float("0.0399431") - max_val = float("0.140144") - mean = float("0.0747907") - std = float("0.0215638") + min_val = float("0.0298363") + max_val = float("0.10738") + mean = float("0.0514797") + std = float("0.0140253") data = None @@ -1755,10 +1755,10 @@ class Program_weight_tensor_parameter_163: name = "parameter_163" shape = [192] dtype = "float32" - min_val = float("-0.533458") - max_val = float("0.244514") - mean = float("-0.168827") - std = float("0.11435") + min_val = float("-0.487163") + max_val = float("0.229195") + mean = float("-0.132677") + std = float("0.0943938") data = None @@ -1766,10 +1766,10 @@ class Program_weight_tensor_parameter_164: name = "parameter_164" shape = [192, 192, 3, 3] dtype = "float32" - min_val = float("-0.0566842") - max_val = float("0.0514181") - mean = float("-0.000587536") - std = float("0.00527014") + min_val = float("-0.0485576") + max_val = float("0.040365") + mean = float("-0.000462303") + std = float("0.00440033") data = None @@ -1777,10 +1777,10 @@ class Program_weight_tensor_parameter_165: name = "parameter_165" shape = [192] dtype = "float32" - min_val = float("-0.444627") - max_val = float("0.412033") - mean = float("-0.137488") - std = float("0.130168") + min_val = float("-0.444127") + max_val = float("0.411817") + mean = float("-0.1378") + std = float("0.130204") data = None @@ -1788,10 +1788,10 @@ class Program_weight_tensor_parameter_166: name = "parameter_166" shape = [192] dtype = "float32" - min_val = float("0.95351") - max_val = float("1.37306") - mean = float("1.11002") - std = float("0.0723609") + min_val = float("0.955474") + max_val = float("1.3718") + mean = float("1.11018") + std = float("0.072198") data = None @@ -1799,10 +1799,10 @@ class Program_weight_tensor_parameter_167: name = "parameter_167" shape = [192] dtype = "float32" - min_val = float("0.0551676") - max_val = float("0.229055") - mean = float("0.0906291") - std = float("0.0274345") + min_val = float("0.0481328") + max_val = float("0.201416") + mean = float("0.0730914") + std = float("0.0211211") data = None @@ -1810,10 +1810,10 @@ class Program_weight_tensor_parameter_168: name = "parameter_168" shape = [192] dtype = "float32" - min_val = float("-0.313806") - max_val = float("0.492745") - mean = float("-0.132664") - std = float("0.085465") + min_val = float("-0.272425") + max_val = float("0.493843") + mean = float("-0.124677") + std = float("0.0789093") data = None @@ -1821,10 +1821,10 @@ class Program_weight_tensor_parameter_169: name = "parameter_169" shape = [192, 512, 1, 1] dtype = "float32" - min_val = float("-0.0601338") - max_val = float("0.10781") - mean = float("-0.000964638") - std = float("0.00915433") + min_val = float("-0.0520012") + max_val = float("0.0916206") + mean = float("-0.00073398") + std = float("0.0076239") data = None @@ -1832,10 +1832,10 @@ class Program_weight_tensor_parameter_170: name = "parameter_170" shape = [192] dtype = "float32" - min_val = float("-0.163877") - max_val = float("0.00112327") - mean = float("-0.0651513") - std = float("0.0261511") + min_val = float("-0.16409") + max_val = float("0.00104506") + mean = float("-0.0652767") + std = float("0.0261643") data = None @@ -1843,10 +1843,10 @@ class Program_weight_tensor_parameter_171: name = "parameter_171" shape = [192] dtype = "float32" - min_val = float("0.819712") - max_val = float("1.06624") - mean = float("0.968869") - std = float("0.0460872") + min_val = float("0.819388") + max_val = float("1.06661") + mean = float("0.968901") + std = float("0.0460972") data = None @@ -1854,10 +1854,10 @@ class Program_weight_tensor_parameter_172: name = "parameter_172" shape = [192] dtype = "float32" - min_val = float("0.0346107") - max_val = float("0.137989") - mean = float("0.054459") - std = float("0.0124478") + min_val = float("0.0308169") + max_val = float("0.0790712") + mean = float("0.0462185") + std = float("0.00870431") data = None @@ -1865,10 +1865,10 @@ class Program_weight_tensor_parameter_173: name = "parameter_173" shape = [192] dtype = "float32" - min_val = float("-0.200298") - max_val = float("0.0295223") - mean = float("-0.0891939") - std = float("0.0476986") + min_val = float("-0.182206") + max_val = float("0.0940335") + mean = float("-0.089627") + std = float("0.0463932") data = None @@ -1876,10 +1876,10 @@ class Program_weight_tensor_parameter_174: name = "parameter_174" shape = [192, 512, 1, 1] dtype = "float32" - min_val = float("-0.0357362") - max_val = float("0.0637335") - mean = float("-0.000787404") - std = float("0.00752266") + min_val = float("-0.0262639") + max_val = float("0.0504804") + mean = float("-0.000603328") + std = float("0.00628844") data = None @@ -1887,10 +1887,10 @@ class Program_weight_tensor_parameter_175: name = "parameter_175" shape = [512] dtype = "float32" - min_val = float("-4.82803") - max_val = float("-0.112013") - mean = float("-2.29502") - std = float("0.775166") + min_val = float("-4.82816") + max_val = float("-0.111144") + mean = float("-2.29505") + std = float("0.77518") data = None @@ -1898,10 +1898,10 @@ class Program_weight_tensor_parameter_176: name = "parameter_176" shape = [512] dtype = "float32" - min_val = float("2.10203") - max_val = float("5.21664") - mean = float("3.70064") - std = float("0.482744") + min_val = float("2.1017") + max_val = float("5.21657") + mean = float("3.70059") + std = float("0.482718") data = None @@ -1909,10 +1909,10 @@ class Program_weight_tensor_parameter_177: name = "parameter_177" shape = [512] dtype = "float32" - min_val = float("0.00190724") - max_val = float("0.0148178") - mean = float("0.00510842") - std = float("0.00181044") + min_val = float("0.00200829") + max_val = float("0.0114641") + mean = float("0.00406053") + std = float("0.00119042") data = None @@ -1920,10 +1920,10 @@ class Program_weight_tensor_parameter_178: name = "parameter_178" shape = [512] dtype = "float32" - min_val = float("-0.130767") - max_val = float("0.0826418") - mean = float("-0.0410784") - std = float("0.0246182") + min_val = float("-0.123017") + max_val = float("0.0755615") + mean = float("-0.042568") + std = float("0.0275421") data = None @@ -1931,10 +1931,10 @@ class Program_weight_tensor_parameter_179: name = "parameter_179" shape = [512, 384, 1, 1] dtype = "float32" - min_val = float("-0.106883") - max_val = float("0.153104") - mean = float("-0.00124309") - std = float("0.00937421") + min_val = float("-0.0810492") + max_val = float("0.133509") + mean = float("-0.000971967") + std = float("0.00774565") data = None @@ -1942,10 +1942,10 @@ class Program_weight_tensor_parameter_180: name = "parameter_180" shape = [384] dtype = "float32" - min_val = float("-0.0182735") - max_val = float("-0.000488762") - mean = float("-0.0066305") - std = float("0.00403925") + min_val = float("-0.0162429") + max_val = float("-0.000278986") + mean = float("-0.0054732") + std = float("0.00365563") data = None @@ -1953,10 +1953,10 @@ class Program_weight_tensor_parameter_181: name = "parameter_181" shape = [384, 384, 1, 1] dtype = "float32" - min_val = float("-0.243656") - max_val = float("0.180358") - mean = float("-0.00257278") - std = float("0.00811913") + min_val = float("-0.18691") + max_val = float("0.145937") + mean = float("-0.00212935") + std = float("0.00665719") data = None @@ -1964,10 +1964,10 @@ class Program_weight_tensor_parameter_182: name = "parameter_182" shape = [192] dtype = "float32" - min_val = float("-2.38777") - max_val = float("3.15932") - mean = float("-0.20407") - std = float("0.562338") + min_val = float("-2.38779") + max_val = float("3.17061") + mean = float("-0.203411") + std = float("0.563155") data = None @@ -1975,10 +1975,10 @@ class Program_weight_tensor_parameter_183: name = "parameter_183" shape = [192] dtype = "float32" - min_val = float("0.123779") - max_val = float("2.40527") - mean = float("0.524516") - std = float("0.334825") + min_val = float("0.123346") + max_val = float("2.40428") + mean = float("0.524679") + std = float("0.3349") data = None @@ -1986,10 +1986,10 @@ class Program_weight_tensor_parameter_184: name = "parameter_184" shape = [192] dtype = "float32" - min_val = float("0.000126989") - max_val = float("0.0045594") - mean = float("0.000819872") - std = float("0.000558532") + min_val = float("0.000128668") + max_val = float("0.0027455") + mean = float("0.00061953") + std = float("0.000399688") data = None @@ -1997,10 +1997,10 @@ class Program_weight_tensor_parameter_185: name = "parameter_185" shape = [192] dtype = "float32" - min_val = float("-0.0804045") - max_val = float("0.119949") - mean = float("0.0138328") - std = float("0.0270058") + min_val = float("-0.0609001") + max_val = float("0.0904291") + mean = float("0.010232") + std = float("0.0217379") data = None @@ -2008,10 +2008,10 @@ class Program_weight_tensor_parameter_186: name = "parameter_186" shape = [192, 192, 1, 1] dtype = "float32" - min_val = float("-0.0707601") - max_val = float("0.0621697") - mean = float("-0.000379924") - std = float("0.00602374") + min_val = float("-0.0576781") + max_val = float("0.0497244") + mean = float("-0.000313562") + std = float("0.00494929") data = None @@ -2019,10 +2019,10 @@ class Program_weight_tensor_parameter_187: name = "parameter_187" shape = [192] dtype = "float32" - min_val = float("-2.38777") - max_val = float("3.15932") - mean = float("-0.20407") - std = float("0.562338") + min_val = float("-2.38779") + max_val = float("3.17061") + mean = float("-0.203411") + std = float("0.563155") data = None @@ -2030,10 +2030,10 @@ class Program_weight_tensor_parameter_188: name = "parameter_188" shape = [192] dtype = "float32" - min_val = float("0.67694") - max_val = float("3.07362") - mean = float("1.54467") - std = float("0.450817") + min_val = float("0.678982") + max_val = float("3.07273") + mean = float("1.54519") + std = float("0.450797") data = None @@ -2041,10 +2041,10 @@ class Program_weight_tensor_parameter_189: name = "parameter_189" shape = [192] dtype = "float32" - min_val = float("0.00215705") - max_val = float("0.0356044") - mean = float("0.00897467") - std = float("0.00458554") + min_val = float("0.00238059") + max_val = float("0.0225906") + mean = float("0.00724614") + std = float("0.00305679") data = None @@ -2052,10 +2052,10 @@ class Program_weight_tensor_parameter_190: name = "parameter_190" shape = [192] dtype = "float32" - min_val = float("-0.241985") - max_val = float("0.257354") - mean = float("0.0137577") - std = float("0.0550272") + min_val = float("-0.212221") + max_val = float("0.164978") + mean = float("0.0097172") + std = float("0.0447557") data = None @@ -2063,10 +2063,10 @@ class Program_weight_tensor_parameter_191: name = "parameter_191" shape = [192, 192, 3, 3] dtype = "float32" - min_val = float("-0.0901592") - max_val = float("0.0804") - mean = float("-9.38039e-05") - std = float("0.00532701") + min_val = float("-0.0724552") + max_val = float("0.0669129") + mean = float("-8.66037e-05") + std = float("0.00438841") data = None @@ -2074,10 +2074,10 @@ class Program_weight_tensor_parameter_192: name = "parameter_192" shape = [192] dtype = "float32" - min_val = float("-3.43174") - max_val = float("1.16938") - mean = float("-1.42833") - std = float("0.634694") + min_val = float("-3.43225") + max_val = float("1.16814") + mean = float("-1.42818") + std = float("0.634737") data = None @@ -2085,10 +2085,10 @@ class Program_weight_tensor_parameter_193: name = "parameter_193" shape = [192] dtype = "float32" - min_val = float("0.389358") - max_val = float("1.7276") - mean = float("1.0897") - std = float("0.190293") + min_val = float("0.390538") + max_val = float("1.72646") + mean = float("1.08981") + std = float("0.189952") data = None @@ -2096,10 +2096,10 @@ class Program_weight_tensor_parameter_194: name = "parameter_194" shape = [192] dtype = "float32" - min_val = float("0.0526142") - max_val = float("0.270718") - mean = float("0.108176") - std = float("0.0325536") + min_val = float("0.0318757") + max_val = float("0.213938") + mean = float("0.0702584") + std = float("0.0222017") data = None @@ -2107,10 +2107,10 @@ class Program_weight_tensor_parameter_195: name = "parameter_195" shape = [192] dtype = "float32" - min_val = float("-1.54162") - max_val = float("0.471207") - mean = float("-0.283055") - std = float("0.214286") + min_val = float("-1.23721") + max_val = float("0.358903") + mean = float("-0.222679") + std = float("0.169848") data = None @@ -2118,10 +2118,10 @@ class Program_weight_tensor_parameter_196: name = "parameter_196" shape = [192, 192, 3, 3] dtype = "float32" - min_val = float("-0.0845723") - max_val = float("0.0650064") - mean = float("-0.000462494") - std = float("0.00626612") + min_val = float("-0.0627818") + max_val = float("0.0522698") + mean = float("-0.000387135") + std = float("0.00513507") data = None @@ -2129,10 +2129,10 @@ class Program_weight_tensor_parameter_197: name = "parameter_197" shape = [192] dtype = "float32" - min_val = float("-3.87665") - max_val = float("4.23691") - mean = float("-0.62962") - std = float("0.987882") + min_val = float("-3.87733") + max_val = float("4.24375") + mean = float("-0.629121") + std = float("0.988583") data = None @@ -2140,10 +2140,10 @@ class Program_weight_tensor_parameter_198: name = "parameter_198" shape = [192] dtype = "float32" - min_val = float("0.580822") - max_val = float("4.17446") - mean = float("1.54478") - std = float("0.398628") + min_val = float("0.579177") + max_val = float("4.17445") + mean = float("1.54468") + std = float("0.398498") data = None @@ -2151,10 +2151,10 @@ class Program_weight_tensor_parameter_199: name = "parameter_199" shape = [192] dtype = "float32" - min_val = float("0.00595442") - max_val = float("0.0273129") - mean = float("0.011053") - std = float("0.00374204") + min_val = float("0.00376591") + max_val = float("0.0199072") + mean = float("0.00750177") + std = float("0.0024196") data = None @@ -2162,10 +2162,10 @@ class Program_weight_tensor_parameter_200: name = "parameter_200" shape = [192] dtype = "float32" - min_val = float("-0.22698") - max_val = float("0.189248") - mean = float("0.0593732") - std = float("0.0450403") + min_val = float("-0.181981") + max_val = float("0.142706") + mean = float("0.0472299") + std = float("0.0360465") data = None @@ -2173,10 +2173,10 @@ class Program_weight_tensor_parameter_201: name = "parameter_201" shape = [192, 384, 1, 1] dtype = "float32" - min_val = float("-0.114839") - max_val = float("0.085935") - mean = float("-0.00162257") - std = float("0.011314") + min_val = float("-0.0974781") + max_val = float("0.0606577") + mean = float("-0.00130154") + std = float("0.00921657") data = None @@ -2184,10 +2184,10 @@ class Program_weight_tensor_parameter_202: name = "parameter_202" shape = [192] dtype = "float32" - min_val = float("-2.93751") - max_val = float("1.02421") - mean = float("-0.427093") - std = float("0.681453") + min_val = float("-2.93753") + max_val = float("1.02574") + mean = float("-0.426705") + std = float("0.681645") data = None @@ -2195,10 +2195,10 @@ class Program_weight_tensor_parameter_203: name = "parameter_203" shape = [192] dtype = "float32" - min_val = float("0.698228") - max_val = float("3.61037") - mean = float("1.48106") - std = float("0.505456") + min_val = float("0.700073") + max_val = float("3.61084") + mean = float("1.48155") + std = float("0.505235") data = None @@ -2206,10 +2206,10 @@ class Program_weight_tensor_parameter_204: name = "parameter_204" shape = [192] dtype = "float32" - min_val = float("0.00245686") - max_val = float("0.012822") - mean = float("0.00510958") - std = float("0.00159036") + min_val = float("0.00182554") + max_val = float("0.00715893") + mean = float("0.00350375") + std = float("0.000839174") data = None @@ -2217,10 +2217,10 @@ class Program_weight_tensor_parameter_205: name = "parameter_205" shape = [192] dtype = "float32" - min_val = float("-0.100272") - max_val = float("0.105996") - mean = float("0.0195239") - std = float("0.0348524") + min_val = float("-0.0595251") + max_val = float("0.0837498") + mean = float("0.0142918") + std = float("0.0271037") data = None @@ -2228,10 +2228,10 @@ class Program_weight_tensor_parameter_206: name = "parameter_206" shape = [192, 384, 1, 1] dtype = "float32" - min_val = float("-0.0708848") - max_val = float("0.0850015") - mean = float("-0.000706931") - std = float("0.00901731") + min_val = float("-0.0725004") + max_val = float("0.0651294") + mean = float("-0.000532804") + std = float("0.00742023") data = None @@ -2239,10 +2239,10 @@ class Program_weight_tensor_parameter_207: name = "parameter_207" shape = [384] dtype = "float32" - min_val = float("-2.84209") - max_val = float("1.12257") - mean = float("-0.753555") - std = float("0.497094") + min_val = float("-2.84234") + max_val = float("1.12211") + mean = float("-0.753291") + std = float("0.497125") data = None @@ -2250,10 +2250,10 @@ class Program_weight_tensor_parameter_208: name = "parameter_208" shape = [384] dtype = "float32" - min_val = float("0.417665") - max_val = float("1.80337") - mean = float("0.867666") - std = float("0.218119") + min_val = float("0.420357") + max_val = float("1.80213") + mean = float("0.867653") + std = float("0.218147") data = None @@ -2261,10 +2261,10 @@ class Program_weight_tensor_parameter_209: name = "parameter_209" shape = [384] dtype = "float32" - min_val = float("0.0108198") - max_val = float("0.0878778") - mean = float("0.0200508") - std = float("0.00708447") + min_val = float("0.00857563") + max_val = float("0.0680256") + mean = float("0.0166475") + std = float("0.00541149") data = None @@ -2272,10 +2272,10 @@ class Program_weight_tensor_parameter_210: name = "parameter_210" shape = [384] dtype = "float32" - min_val = float("-0.552474") - max_val = float("0.359625") - mean = float("0.0190155") - std = float("0.105451") + min_val = float("-0.467733") + max_val = float("0.319199") + mean = float("0.00858269") + std = float("0.0842052") data = None @@ -2283,10 +2283,10 @@ class Program_weight_tensor_parameter_211: name = "parameter_211" shape = [384, 256, 3, 3] dtype = "float32" - min_val = float("-0.050684") - max_val = float("0.0606495") - mean = float("-0.000221799") - std = float("0.00520848") + min_val = float("-0.0544237") + max_val = float("0.0537574") + mean = float("-0.000167554") + std = float("0.00423747") data = None @@ -2294,10 +2294,10 @@ class Program_weight_tensor_parameter_212: name = "parameter_212" shape = [256] dtype = "float32" - min_val = float("-2.81734") - max_val = float("1.46527") - mean = float("-1.07834") - std = float("0.63321") + min_val = float("-2.82015") + max_val = float("1.46513") + mean = float("-1.07771") + std = float("0.633538") data = None @@ -2305,10 +2305,10 @@ class Program_weight_tensor_parameter_213: name = "parameter_213" shape = [256] dtype = "float32" - min_val = float("0.430261") - max_val = float("1.7692") - mean = float("0.978084") - std = float("0.17059") + min_val = float("0.419279") + max_val = float("1.76958") + mean = float("0.978268") + std = float("0.170537") data = None @@ -2316,10 +2316,10 @@ class Program_weight_tensor_parameter_214: name = "parameter_214" shape = [256] dtype = "float32" - min_val = float("0.00302249") - max_val = float("0.0150254") - mean = float("0.00711712") - std = float("0.0018291") + min_val = float("0.00255461") + max_val = float("0.0103385") + mean = float("0.00516842") + std = float("0.00131104") data = None @@ -2327,10 +2327,10 @@ class Program_weight_tensor_parameter_215: name = "parameter_215" shape = [256] dtype = "float32" - min_val = float("-0.242576") - max_val = float("0.242969") - mean = float("-0.0674006") - std = float("0.0835729") + min_val = float("-0.222755") + max_val = float("0.223332") + mean = float("-0.057983") + std = float("0.0714632") data = None @@ -2338,10 +2338,10 @@ class Program_weight_tensor_parameter_216: name = "parameter_216" shape = [256, 192, 1, 1] dtype = "float32" - min_val = float("-0.11703") - max_val = float("0.206335") - mean = float("-0.00133698") - std = float("0.016598") + min_val = float("-0.122474") + max_val = float("0.173112") + mean = float("-0.00112125") + std = float("0.0137328") data = None @@ -2349,10 +2349,10 @@ class Program_weight_tensor_parameter_217: name = "parameter_217" shape = [192] dtype = "float32" - min_val = float("-0.0214622") - max_val = float("0.00204459") - mean = float("-0.00682654") - std = float("0.00523474") + min_val = float("-0.0217534") + max_val = float("0.00201664") + mean = float("-0.00613465") + std = float("0.00506215") data = None @@ -2360,10 +2360,10 @@ class Program_weight_tensor_parameter_218: name = "parameter_218" shape = [192, 192, 1, 1] dtype = "float32" - min_val = float("-0.274972") - max_val = float("0.196684") - mean = float("-0.00446271") - std = float("0.0117188") + min_val = float("-0.23422") + max_val = float("0.177901") + mean = float("-0.00406409") + std = float("0.0102684") data = None @@ -2371,10 +2371,10 @@ class Program_weight_tensor_parameter_219: name = "parameter_219" shape = [96] dtype = "float32" - min_val = float("-2.27808") - max_val = float("0.747421") - mean = float("-0.117479") - std = float("0.506792") + min_val = float("-2.2781") + max_val = float("0.7544") + mean = float("-0.115735") + std = float("0.508041") data = None @@ -2382,10 +2382,10 @@ class Program_weight_tensor_parameter_220: name = "parameter_220" shape = [96] dtype = "float32" - min_val = float("-0.0587442") - max_val = float("2.30527") - mean = float("0.261103") - std = float("0.366411") + min_val = float("-0.0586392") + max_val = float("2.30658") + mean = float("0.261357") + std = float("0.366816") data = None @@ -2393,10 +2393,10 @@ class Program_weight_tensor_parameter_221: name = "parameter_221" shape = [96] dtype = "float32" - min_val = float("9.94584e-12") - max_val = float("0.00288558") - mean = float("0.000611538") - std = float("0.000479206") + min_val = float("5.76256e-12") + max_val = float("0.00191318") + mean = float("0.000441882") + std = float("0.000366175") data = None @@ -2404,10 +2404,10 @@ class Program_weight_tensor_parameter_222: name = "parameter_222" shape = [96] dtype = "float32" - min_val = float("-0.0613965") - max_val = float("0.0867185") - mean = float("0.0075244") - std = float("0.022618") + min_val = float("-0.048434") + max_val = float("0.0685867") + mean = float("0.00619257") + std = float("0.0183565") data = None @@ -2415,10 +2415,10 @@ class Program_weight_tensor_parameter_223: name = "parameter_223" shape = [96, 96, 1, 1] dtype = "float32" - min_val = float("-0.0458095") - max_val = float("0.0794823") - mean = float("-0.000380958") - std = float("0.0066521") + min_val = float("-0.0376665") + max_val = float("0.0662012") + mean = float("-0.000297498") + std = float("0.00560219") data = None @@ -2426,10 +2426,10 @@ class Program_weight_tensor_parameter_224: name = "parameter_224" shape = [96] dtype = "float32" - min_val = float("-2.27808") - max_val = float("0.747421") - mean = float("-0.117479") - std = float("0.506792") + min_val = float("-2.2781") + max_val = float("0.7544") + mean = float("-0.115735") + std = float("0.508041") data = None @@ -2437,10 +2437,10 @@ class Program_weight_tensor_parameter_225: name = "parameter_225" shape = [96] dtype = "float32" - min_val = float("0.348389") - max_val = float("3.24093") - mean = float("1.29082") - std = float("0.633395") + min_val = float("0.35139") + max_val = float("3.24211") + mean = float("1.2913") + std = float("0.633692") data = None @@ -2448,10 +2448,10 @@ class Program_weight_tensor_parameter_226: name = "parameter_226" shape = [96] dtype = "float32" - min_val = float("0.00469999") - max_val = float("0.0420037") - mean = float("0.0162317") - std = float("0.00640931") + min_val = float("0.00288832") + max_val = float("0.0301202") + mean = float("0.0128354") + std = float("0.00565579") data = None @@ -2459,10 +2459,10 @@ class Program_weight_tensor_parameter_227: name = "parameter_227" shape = [96] dtype = "float32" - min_val = float("-0.198405") - max_val = float("0.208243") - mean = float("0.0278847") - std = float("0.0747534") + min_val = float("-0.186493") + max_val = float("0.170245") + mean = float("0.0280013") + std = float("0.0660658") data = None @@ -2470,10 +2470,10 @@ class Program_weight_tensor_parameter_228: name = "parameter_228" shape = [96, 96, 3, 3] dtype = "float32" - min_val = float("-0.0703987") - max_val = float("0.0847217") - mean = float("-0.000400512") - std = float("0.00880434") + min_val = float("-0.0540548") + max_val = float("0.0585126") + mean = float("-0.000334364") + std = float("0.00738032") data = None @@ -2481,10 +2481,10 @@ class Program_weight_tensor_parameter_229: name = "parameter_229" shape = [96] dtype = "float32" - min_val = float("-2.79718") - max_val = float("1.50453") - mean = float("-1.09173") - std = float("0.69636") + min_val = float("-2.79991") + max_val = float("1.50593") + mean = float("-1.09119") + std = float("0.696756") data = None @@ -2492,10 +2492,10 @@ class Program_weight_tensor_parameter_230: name = "parameter_230" shape = [96] dtype = "float32" - min_val = float("0.319783") - max_val = float("1.80086") - mean = float("1.07317") - std = float("0.21342") + min_val = float("0.324891") + max_val = float("1.80804") + mean = float("1.07292") + std = float("0.213034") data = None @@ -2503,10 +2503,10 @@ class Program_weight_tensor_parameter_231: name = "parameter_231" shape = [96] dtype = "float32" - min_val = float("0.0391363") - max_val = float("0.160891") - mean = float("0.0831981") - std = float("0.0236604") + min_val = float("0.0318799") + max_val = float("0.113866") + mean = float("0.0602373") + std = float("0.0156272") data = None @@ -2514,10 +2514,10 @@ class Program_weight_tensor_parameter_232: name = "parameter_232" shape = [96] dtype = "float32" - min_val = float("-1.59676") - max_val = float("0.430385") - mean = float("-0.176851") - std = float("0.317024") + min_val = float("-1.57323") + max_val = float("0.320517") + mean = float("-0.143123") + std = float("0.265242") data = None @@ -2525,10 +2525,10 @@ class Program_weight_tensor_parameter_233: name = "parameter_233" shape = [96, 96, 3, 3] dtype = "float32" - min_val = float("-0.0684623") - max_val = float("0.0821956") - mean = float("-0.000727853") - std = float("0.00945016") + min_val = float("-0.0521981") + max_val = float("0.0688539") + mean = float("-0.000580267") + std = float("0.00795035") data = None @@ -2536,10 +2536,10 @@ class Program_weight_tensor_parameter_234: name = "parameter_234" shape = [96] dtype = "float32" - min_val = float("-2.53961") - max_val = float("0.660095") - mean = float("-0.0506683") - std = float("0.473015") + min_val = float("-2.54041") + max_val = float("0.664474") + mean = float("-0.0487346") + std = float("0.474109") data = None @@ -2547,10 +2547,10 @@ class Program_weight_tensor_parameter_235: name = "parameter_235" shape = [96] dtype = "float32" - min_val = float("-0.0761853") - max_val = float("3.15108") - mean = float("0.280991") - std = float("0.409237") + min_val = float("-0.0773368") + max_val = float("3.15117") + mean = float("0.280349") + std = float("0.408758") data = None @@ -2558,10 +2558,10 @@ class Program_weight_tensor_parameter_236: name = "parameter_236" shape = [96] dtype = "float32" - min_val = float("2.2112e-10") - max_val = float("0.0182811") - mean = float("0.0018356") - std = float("0.00247258") + min_val = float("1.3812e-10") + max_val = float("0.0159383") + mean = float("0.00155641") + std = float("0.00234573") data = None @@ -2569,10 +2569,10 @@ class Program_weight_tensor_parameter_237: name = "parameter_237" shape = [96] dtype = "float32" - min_val = float("-0.0564698") - max_val = float("0.134674") - mean = float("0.0208637") - std = float("0.0323877") + min_val = float("-0.0454894") + max_val = float("0.108167") + mean = float("0.0171959") + std = float("0.0277947") data = None @@ -2580,10 +2580,10 @@ class Program_weight_tensor_parameter_238: name = "parameter_238" shape = [96, 96, 1, 1] dtype = "float32" - min_val = float("-0.149971") - max_val = float("0.0877827") - mean = float("-0.00159757") - std = float("0.00996893") + min_val = float("-0.11378") + max_val = float("0.0697877") + mean = float("-0.00124357") + std = float("0.00831874") data = None @@ -2591,10 +2591,10 @@ class Program_weight_tensor_parameter_239: name = "parameter_239" shape = [96] dtype = "float32" - min_val = float("-2.53961") - max_val = float("0.660096") - mean = float("-0.0506683") - std = float("0.473015") + min_val = float("-2.54041") + max_val = float("0.664475") + mean = float("-0.0487346") + std = float("0.474109") data = None @@ -2602,10 +2602,10 @@ class Program_weight_tensor_parameter_240: name = "parameter_240" shape = [96] dtype = "float32" - min_val = float("0.340415") - max_val = float("2.99317") - mean = float("0.929372") - std = float("0.41222") + min_val = float("0.343863") + max_val = float("2.99332") + mean = float("0.929472") + std = float("0.412076") data = None @@ -2613,10 +2613,10 @@ class Program_weight_tensor_parameter_241: name = "parameter_241" shape = [96] dtype = "float32" - min_val = float("0.0101736") - max_val = float("0.0521186") - mean = float("0.0255179") - std = float("0.00892784") + min_val = float("0.00889548") + max_val = float("0.0360352") + mean = float("0.0196807") + std = float("0.00631594") data = None @@ -2624,10 +2624,10 @@ class Program_weight_tensor_parameter_242: name = "parameter_242" shape = [96] dtype = "float32" - min_val = float("-0.226393") - max_val = float("0.221011") - mean = float("0.0450625") - std = float("0.0782651") + min_val = float("-0.183188") + max_val = float("0.203949") + mean = float("0.0349526") + std = float("0.0692374") data = None @@ -2635,10 +2635,10 @@ class Program_weight_tensor_parameter_243: name = "parameter_243" shape = [96, 96, 3, 3] dtype = "float32" - min_val = float("-0.0614525") - max_val = float("0.0642893") - mean = float("-0.000703378") - std = float("0.00896078") + min_val = float("-0.0529212") + max_val = float("0.050965") + mean = float("-0.000562978") + std = float("0.00752018") data = None @@ -2646,10 +2646,10 @@ class Program_weight_tensor_parameter_244: name = "parameter_244" shape = [96] dtype = "float32" - min_val = float("-2.01737") - max_val = float("1.65537") - mean = float("-0.920561") - std = float("0.650231") + min_val = float("-2.01882") + max_val = float("1.6565") + mean = float("-0.91983") + std = float("0.650475") data = None @@ -2657,10 +2657,10 @@ class Program_weight_tensor_parameter_245: name = "parameter_245" shape = [96] dtype = "float32" - min_val = float("0.434872") - max_val = float("1.96317") - mean = float("1.06433") - std = float("0.227725") + min_val = float("0.443451") + max_val = float("1.97486") + mean = float("1.06386") + std = float("0.2277") data = None @@ -2668,10 +2668,10 @@ class Program_weight_tensor_parameter_246: name = "parameter_246" shape = [96] dtype = "float32" - min_val = float("0.0165936") - max_val = float("0.132837") - mean = float("0.0345742") - std = float("0.0150947") + min_val = float("0.00968838") + max_val = float("0.115069") + mean = float("0.0238439") + std = float("0.011445") data = None @@ -2679,10 +2679,10 @@ class Program_weight_tensor_parameter_247: name = "parameter_247" shape = [96] dtype = "float32" - min_val = float("-2.3732") - max_val = float("0.24267") - mean = float("-0.0505484") - std = float("0.287143") + min_val = float("-2.04544") + max_val = float("0.240057") + mean = float("-0.0326069") + std = float("0.235198") data = None @@ -2690,10 +2690,10 @@ class Program_weight_tensor_parameter_248: name = "parameter_248" shape = [96, 96, 3, 3] dtype = "float32" - min_val = float("-0.127211") - max_val = float("0.160397") - mean = float("-0.000544621") - std = float("0.0101928") + min_val = float("-0.102806") + max_val = float("0.1255") + mean = float("-0.000419155") + std = float("0.00850691") data = None @@ -2701,10 +2701,10 @@ class Program_weight_tensor_parameter_249: name = "parameter_249" shape = [96] dtype = "float32" - min_val = float("-1.61344") - max_val = float("1.88195") - mean = float("0.00484379") - std = float("0.837537") + min_val = float("-1.61915") + max_val = float("1.88666") + mean = float("0.00600959") + std = float("0.838747") data = None @@ -2712,10 +2712,10 @@ class Program_weight_tensor_parameter_250: name = "parameter_250" shape = [96] dtype = "float32" - min_val = float("0.347101") - max_val = float("1.32016") - mean = float("0.70129") - std = float("0.236267") + min_val = float("0.348796") + max_val = float("1.32224") + mean = float("0.700437") + std = float("0.236363") data = None @@ -2723,10 +2723,10 @@ class Program_weight_tensor_parameter_251: name = "parameter_251" shape = [96] dtype = "float32" - min_val = float("0.0166919") - max_val = float("0.0891169") - mean = float("0.0374108") - std = float("0.0147788") + min_val = float("0.00889485") + max_val = float("0.0570436") + mean = float("0.0271583") + std = float("0.0110354") data = None @@ -2734,10 +2734,10 @@ class Program_weight_tensor_parameter_252: name = "parameter_252" shape = [96] dtype = "float32" - min_val = float("-0.381066") - max_val = float("0.566263") - mean = float("-0.0972959") - std = float("0.136521") + min_val = float("-0.327651") + max_val = float("0.488071") + mean = float("-0.0740222") + std = float("0.115843") data = None @@ -2745,10 +2745,10 @@ class Program_weight_tensor_parameter_253: name = "parameter_253" shape = [96, 192, 1, 1] dtype = "float32" - min_val = float("-0.143906") - max_val = float("0.121873") - mean = float("-0.00172717") - std = float("0.0164963") + min_val = float("-0.124833") + max_val = float("0.114017") + mean = float("-0.001301") + std = float("0.0137993") data = None @@ -2756,10 +2756,10 @@ class Program_weight_tensor_parameter_254: name = "parameter_254" shape = [96] dtype = "float32" - min_val = float("-2.46841") - max_val = float("1.71052") - mean = float("0.339601") - std = float("0.678564") + min_val = float("-2.46673") + max_val = float("1.7157") + mean = float("0.340991") + std = float("0.679024") data = None @@ -2767,10 +2767,10 @@ class Program_weight_tensor_parameter_255: name = "parameter_255" shape = [96] dtype = "float32" - min_val = float("0.541707") - max_val = float("4.87976") - mean = float("1.48201") - std = float("0.958387") + min_val = float("0.539684") + max_val = float("4.88444") + mean = float("1.48216") + std = float("0.959789") data = None @@ -2778,10 +2778,10 @@ class Program_weight_tensor_parameter_256: name = "parameter_256" shape = [96] dtype = "float32" - min_val = float("0.0122307") - max_val = float("0.120423") - mean = float("0.0291796") - std = float("0.0157993") + min_val = float("0.00993858") + max_val = float("0.070056") + mean = float("0.0215824") + std = float("0.0104011") data = None @@ -2789,10 +2789,10 @@ class Program_weight_tensor_parameter_257: name = "parameter_257" shape = [96] dtype = "float32" - min_val = float("-0.296455") - max_val = float("0.260254") - mean = float("-0.00596865") - std = float("0.132222") + min_val = float("-0.283006") + max_val = float("0.290595") + mean = float("-0.00648709") + std = float("0.112547") data = None @@ -2800,10 +2800,10 @@ class Program_weight_tensor_parameter_258: name = "parameter_258" shape = [96, 192, 1, 1] dtype = "float32" - min_val = float("-0.111668") - max_val = float("0.214196") - mean = float("-0.000763054") - std = float("0.0168732") + min_val = float("-0.0886065") + max_val = float("0.166141") + mean = float("-0.000584531") + std = float("0.0139549") data = None @@ -2811,10 +2811,10 @@ class Program_weight_tensor_parameter_259: name = "parameter_259" shape = [192] dtype = "float32" - min_val = float("-4.44408") - max_val = float("2.00967") - mean = float("-0.0987827") - std = float("0.883275") + min_val = float("-4.44154") + max_val = float("2.00924") + mean = float("-0.0984691") + std = float("0.883168") data = None @@ -2822,10 +2822,10 @@ class Program_weight_tensor_parameter_260: name = "parameter_260" shape = [192] dtype = "float32" - min_val = float("0.570935") - max_val = float("4.51306") - mean = float("1.08264") - std = float("0.425298") + min_val = float("0.574537") + max_val = float("4.5171") + mean = float("1.08183") + std = float("0.426228") data = None @@ -2833,10 +2833,10 @@ class Program_weight_tensor_parameter_261: name = "parameter_261" shape = [192] dtype = "float32" - min_val = float("0.0126533") - max_val = float("0.136437") - mean = float("0.0356273") - std = float("0.0206117") + min_val = float("0.00770389") + max_val = float("0.101175") + mean = float("0.0267908") + std = float("0.0158745") data = None @@ -2844,10 +2844,10 @@ class Program_weight_tensor_parameter_262: name = "parameter_262" shape = [192] dtype = "float32" - min_val = float("-0.392763") - max_val = float("0.345645") - mean = float("0.022741") - std = float("0.127592") + min_val = float("-0.351997") + max_val = float("0.260742") + mean = float("0.0118419") + std = float("0.104813") data = None @@ -2855,10 +2855,10 @@ class Program_weight_tensor_parameter_263: name = "parameter_263" shape = [192, 128, 3, 3] dtype = "float32" - min_val = float("-0.109003") - max_val = float("0.0868493") - mean = float("-0.00035271") - std = float("0.00845821") + min_val = float("-0.0873539") + max_val = float("0.0704455") + mean = float("-0.000213424") + std = float("0.00708121") data = None @@ -2866,10 +2866,10 @@ class Program_weight_tensor_parameter_264: name = "parameter_264" shape = [128] dtype = "float32" - min_val = float("-2.14726") - max_val = float("1.36561") - mean = float("-0.674451") - std = float("0.681161") + min_val = float("-2.15168") + max_val = float("1.36722") + mean = float("-0.673621") + std = float("0.681958") data = None @@ -2877,10 +2877,10 @@ class Program_weight_tensor_parameter_265: name = "parameter_265" shape = [128] dtype = "float32" - min_val = float("0.375234") - max_val = float("2.23956") - mean = float("0.877769") - std = float("0.235655") + min_val = float("0.364727") + max_val = float("2.2521") + mean = float("0.875301") + std = float("0.236523") data = None @@ -2888,10 +2888,10 @@ class Program_weight_tensor_parameter_266: name = "parameter_266" shape = [128] dtype = "float32" - min_val = float("0.00172908") - max_val = float("0.0243042") - mean = float("0.00734318") - std = float("0.00288478") + min_val = float("0.000976585") + max_val = float("0.0184592") + mean = float("0.00559844") + std = float("0.0023354") data = None @@ -2899,10 +2899,10 @@ class Program_weight_tensor_parameter_267: name = "parameter_267" shape = [128] dtype = "float32" - min_val = float("-0.322838") - max_val = float("0.289878") - mean = float("-0.0726895") - std = float("0.129009") + min_val = float("-0.295433") + max_val = float("0.256224") + mean = float("-0.0630695") + std = float("0.112593") data = None @@ -2910,10 +2910,10 @@ class Program_weight_tensor_parameter_268: name = "parameter_268" shape = [128, 96, 1, 1] dtype = "float32" - min_val = float("-0.258942") - max_val = float("0.222471") - mean = float("-0.00147825") - std = float("0.0265093") + min_val = float("-0.211698") + max_val = float("0.196697") + mean = float("-0.00125294") + std = float("0.0226183") data = None @@ -2921,10 +2921,10 @@ class Program_weight_tensor_parameter_269: name = "parameter_269" shape = [96] dtype = "float32" - min_val = float("-0.0261312") - max_val = float("0.00440601") - mean = float("-0.00868593") - std = float("0.00785641") + min_val = float("-0.0253266") + max_val = float("0.00358212") + mean = float("-0.0083497") + std = float("0.00765327") data = None @@ -2932,10 +2932,10 @@ class Program_weight_tensor_parameter_270: name = "parameter_270" shape = [96, 96, 1, 1] dtype = "float32" - min_val = float("-0.323766") - max_val = float("0.314811") - mean = float("-0.00588923") - std = float("0.0203338") + min_val = float("-0.293386") + max_val = float("0.279833") + mean = float("-0.00579641") + std = float("0.0184424") data = None @@ -2979,10 +2979,10 @@ class Program_weight_tensor_parameter_275: name = "parameter_275" shape = [48, 48, 1, 1] dtype = "float32" - min_val = float("-0.0754874") - max_val = float("0.0918744") - mean = float("-0.00116183") - std = float("0.0137375") + min_val = float("-0.0497361") + max_val = float("0.082633") + mean = float("-0.00112833") + std = float("0.0118656") data = None @@ -3026,10 +3026,10 @@ class Program_weight_tensor_parameter_280: name = "parameter_280" shape = [48, 48, 3, 3] dtype = "float32" - min_val = float("-0.0909754") - max_val = float("0.119188") - mean = float("-0.000469278") - std = float("0.0143324") + min_val = float("-0.0995478") + max_val = float("0.100891") + mean = float("-0.00032949") + std = float("0.012257") data = None @@ -3073,10 +3073,10 @@ class Program_weight_tensor_parameter_285: name = "parameter_285" shape = [48, 48, 3, 3] dtype = "float32" - min_val = float("-0.107025") - max_val = float("0.127668") - mean = float("-0.00119973") - std = float("0.0151861") + min_val = float("-0.09303") + max_val = float("0.111166") + mean = float("-0.00105886") + std = float("0.0130745") data = None @@ -3120,10 +3120,10 @@ class Program_weight_tensor_parameter_290: name = "parameter_290" shape = [48, 48, 1, 1] dtype = "float32" - min_val = float("-0.0825525") - max_val = float("0.0847429") - mean = float("-0.00293553") - std = float("0.0180257") + min_val = float("-0.0686653") + max_val = float("0.073192") + mean = float("-0.00264945") + std = float("0.015246") data = None @@ -3167,10 +3167,10 @@ class Program_weight_tensor_parameter_295: name = "parameter_295" shape = [48, 48, 3, 3] dtype = "float32" - min_val = float("-0.121405") - max_val = float("0.106816") - mean = float("-0.00103162") - std = float("0.0139597") + min_val = float("-0.0915852") + max_val = float("0.0964297") + mean = float("-0.000904809") + std = float("0.0119445") data = None @@ -3214,10 +3214,10 @@ class Program_weight_tensor_parameter_300: name = "parameter_300" shape = [48, 48, 3, 3] dtype = "float32" - min_val = float("-0.104387") - max_val = float("0.0933051") - mean = float("-0.000898517") - std = float("0.0163105") + min_val = float("-0.0993379") + max_val = float("0.0790286") + mean = float("-0.00075533") + std = float("0.0139095") data = None @@ -3261,10 +3261,10 @@ class Program_weight_tensor_parameter_305: name = "parameter_305" shape = [48, 96, 1, 1] dtype = "float32" - min_val = float("-0.127218") - max_val = float("0.131789") - mean = float("-0.00305303") - std = float("0.0260694") + min_val = float("-0.0993822") + max_val = float("0.120858") + mean = float("-0.00232535") + std = float("0.0225912") data = None @@ -3308,10 +3308,10 @@ class Program_weight_tensor_parameter_310: name = "parameter_310" shape = [48, 96, 1, 1] dtype = "float32" - min_val = float("-0.187551") - max_val = float("0.266311") - mean = float("0.000454803") - std = float("0.0279756") + min_val = float("-0.158166") + max_val = float("0.239253") + mean = float("0.000385605") + std = float("0.023753") data = None @@ -3319,10 +3319,10 @@ class Program_weight_tensor_parameter_311: name = "parameter_311" shape = [96] dtype = "float32" - min_val = float("-3.32387") - max_val = float("3.83534") - mean = float("0.265485") - std = float("1.21084") + min_val = float("-3.31552") + max_val = float("3.83527") + mean = float("0.267103") + std = float("1.21077") data = None @@ -3330,10 +3330,10 @@ class Program_weight_tensor_parameter_312: name = "parameter_312" shape = [96] dtype = "float32" - min_val = float("0.503076") - max_val = float("5.39398") - mean = float("1.12896") - std = float("0.545544") + min_val = float("0.510535") + max_val = float("5.40356") + mean = float("1.12504") + std = float("0.54684") data = None @@ -3341,10 +3341,10 @@ class Program_weight_tensor_parameter_313: name = "parameter_313" shape = [96] dtype = "float32" - min_val = float("0.0185778") - max_val = float("0.251561") - mean = float("0.0602414") - std = float("0.0415712") + min_val = float("0.0102515") + max_val = float("0.199232") + mean = float("0.0491335") + std = float("0.034631") data = None @@ -3352,10 +3352,10 @@ class Program_weight_tensor_parameter_314: name = "parameter_314" shape = [96] dtype = "float32" - min_val = float("-0.530586") - max_val = float("0.599692") - mean = float("-0.0329156") - std = float("0.184534") + min_val = float("-0.48286") + max_val = float("0.474603") + mean = float("-0.0301538") + std = float("0.175452") data = None @@ -3363,10 +3363,10 @@ class Program_weight_tensor_parameter_315: name = "parameter_315" shape = [96, 64, 3, 3] dtype = "float32" - min_val = float("-0.129865") - max_val = float("0.140642") - mean = float("-0.000231144") - std = float("0.0135591") + min_val = float("-0.0965118") + max_val = float("0.119571") + mean = float("-0.000173883") + std = float("0.0116758") data = None @@ -3410,10 +3410,10 @@ class Program_weight_tensor_parameter_320: name = "parameter_320" shape = [64, 48, 1, 1] dtype = "float32" - min_val = float("-0.217846") - max_val = float("0.197691") - mean = float("-0.00202445") - std = float("0.0390741") + min_val = float("-0.1726") + max_val = float("0.173966") + mean = float("-0.00244534") + std = float("0.0334275") data = None @@ -3430,10 +3430,10 @@ class Program_weight_tensor_parameter_322: name = "parameter_322" shape = [48, 48, 1, 1] dtype = "float32" - min_val = float("-0.185619") - max_val = float("0.168747") - mean = float("-0.0126224") - std = float("0.0278031") + min_val = float("-0.162368") + max_val = float("0.15788") + mean = float("-0.0128963") + std = float("0.0255366") data = None @@ -3477,10 +3477,10 @@ class Program_weight_tensor_parameter_327: name = "parameter_327" shape = [24, 24, 1, 1] dtype = "float32" - min_val = float("-0.11592") - max_val = float("0.15413") - mean = float("-0.00151763") - std = float("0.0277352") + min_val = float("-0.101262") + max_val = float("0.144448") + mean = float("-0.00119167") + std = float("0.0240568") data = None @@ -3524,10 +3524,10 @@ class Program_weight_tensor_parameter_332: name = "parameter_332" shape = [24, 24, 3, 3] dtype = "float32" - min_val = float("-0.106175") - max_val = float("0.114008") - mean = float("-0.000884197") - std = float("0.0238809") + min_val = float("-0.0939293") + max_val = float("0.0880602") + mean = float("-0.000667988") + std = float("0.0206646") data = None @@ -3571,10 +3571,10 @@ class Program_weight_tensor_parameter_337: name = "parameter_337" shape = [24, 24, 3, 3] dtype = "float32" - min_val = float("-0.138643") - max_val = float("0.153767") - mean = float("-0.000248352") - std = float("0.0266901") + min_val = float("-0.121119") + max_val = float("0.170902") + mean = float("-0.000408415") + std = float("0.0230859") data = None @@ -3618,10 +3618,10 @@ class Program_weight_tensor_parameter_342: name = "parameter_342" shape = [24, 48, 1, 1] dtype = "float32" - min_val = float("-0.220553") - max_val = float("0.201403") - mean = float("-0.00385181") - std = float("0.04011") + min_val = float("-0.19645") + max_val = float("0.180409") + mean = float("-0.00327746") + std = float("0.0345822") data = None @@ -3665,10 +3665,10 @@ class Program_weight_tensor_parameter_347: name = "parameter_347" shape = [24, 48, 1, 1] dtype = "float32" - min_val = float("-0.232263") - max_val = float("0.179328") - mean = float("-0.00110555") - std = float("0.0447387") + min_val = float("-0.189487") + max_val = float("0.153256") + mean = float("-0.00117991") + std = float("0.037909") data = None @@ -3712,10 +3712,10 @@ class Program_weight_tensor_parameter_352: name = "parameter_352" shape = [48, 32, 3, 3] dtype = "float32" - min_val = float("-0.152721") - max_val = float("0.151792") - mean = float("-0.000535835") - std = float("0.0230336") + min_val = float("-0.165199") + max_val = float("0.114686") + mean = float("-0.000200965") + std = float("0.0197641") data = None @@ -3759,10 +3759,10 @@ class Program_weight_tensor_parameter_357: name = "parameter_357" shape = [32, 16, 3, 3] dtype = "float32" - min_val = float("-0.279318") - max_val = float("0.276461") - mean = float("-0.00085436") - std = float("0.0389004") + min_val = float("-0.233333") + max_val = float("0.23547") + mean = float("-0.000463458") + std = float("0.0335966") data = None @@ -3806,10 +3806,10 @@ class Program_weight_tensor_parameter_362: name = "parameter_362" shape = [16, 16, 3, 3] dtype = "float32" - min_val = float("-0.337845") - max_val = float("0.369192") - mean = float("-0.00069606") - std = float("0.0519452") + min_val = float("-0.296254") + max_val = float("0.317451") + mean = float("-0.000376615") + std = float("0.0448856") data = None @@ -3853,8 +3853,8 @@ class Program_weight_tensor_parameter_367: name = "parameter_367" shape = [16, 3, 3, 3] dtype = "float32" - min_val = float("-0.245822") - max_val = float("0.288484") - mean = float("-0.00340321") - std = float("0.0733952") + min_val = float("-0.229598") + max_val = float("0.264831") + mean = float("-0.00222273") + std = float("0.0664701") data = None diff --git a/paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_9/weight_meta.py b/paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_9/weight_meta.py index 8b1378917..d58909a7b 100644 --- a/paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_9/weight_meta.py +++ b/paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_9/weight_meta.py @@ -1 +1,3860 @@ +class Program_weight_tensor_parameter_0: + name = "parameter_0" + shape = [384] + dtype = "float32" + min_val = float("-0.65221") + max_val = float("1.18847") + mean = float("0.0280678") + std = float("0.238602") + data = None + +class Program_weight_tensor_parameter_1: + name = "parameter_1" + shape = [384] + dtype = "float32" + min_val = float("0.840777") + max_val = float("1.38166") + mean = float("0.983157") + std = float("0.0682519") + data = None + + +class Program_weight_tensor_parameter_2: + name = "parameter_2" + shape = [384] + dtype = "float32" + min_val = float("0.00367937") + max_val = float("0.0877201") + mean = float("0.0123265") + std = float("0.0078903") + data = None + + +class Program_weight_tensor_parameter_3: + name = "parameter_3" + shape = [384] + dtype = "float32" + min_val = float("-0.194012") + max_val = float("0.0551918") + mean = float("-0.02945") + std = float("0.036829") + data = None + + +class Program_weight_tensor_parameter_4: + name = "parameter_4" + shape = [384, 384, 1, 1] + dtype = "float32" + min_val = float("-0.100636") + max_val = float("0.0644305") + mean = float("-0.000367326") + std = float("0.00632618") + data = None + + +class Program_weight_tensor_parameter_5: + name = "parameter_5" + shape = [192] + dtype = "float32" + min_val = float("-0.444539") + max_val = float("0.100308") + mean = float("-0.0845765") + std = float("0.104435") + data = None + + +class Program_weight_tensor_parameter_6: + name = "parameter_6" + shape = [192] + dtype = "float32" + min_val = float("0.827634") + max_val = float("1.20887") + mean = float("0.926304") + std = float("0.0461583") + data = None + + +class Program_weight_tensor_parameter_7: + name = "parameter_7" + shape = [192] + dtype = "float32" + min_val = float("0.0022587") + max_val = float("0.0317087") + mean = float("0.0127237") + std = float("0.00671595") + data = None + + +class Program_weight_tensor_parameter_8: + name = "parameter_8" + shape = [192] + dtype = "float32" + min_val = float("-0.0420164") + max_val = float("0.0489711") + mean = float("7.80708e-05") + std = float("0.023324") + data = None + + +class Program_weight_tensor_parameter_9: + name = "parameter_9" + shape = [192, 192, 1, 1] + dtype = "float32" + min_val = float("-0.0494705") + max_val = float("0.0588015") + mean = float("-0.000304397") + std = float("0.00403892") + data = None + + +class Program_weight_tensor_parameter_10: + name = "parameter_10" + shape = [192] + dtype = "float32" + min_val = float("-0.444539") + max_val = float("0.100308") + mean = float("-0.0845765") + std = float("0.104435") + data = None + + +class Program_weight_tensor_parameter_11: + name = "parameter_11" + shape = [192] + dtype = "float32" + min_val = float("0.861989") + max_val = float("1.42115") + mean = float("1.11192") + std = float("0.0818402") + data = None + + +class Program_weight_tensor_parameter_12: + name = "parameter_12" + shape = [192] + dtype = "float32" + min_val = float("0.00625506") + max_val = float("0.0604123") + mean = float("0.0170478") + std = float("0.00762762") + data = None + + +class Program_weight_tensor_parameter_13: + name = "parameter_13" + shape = [192] + dtype = "float32" + min_val = float("-0.114496") + max_val = float("0.0773463") + mean = float("-0.0180438") + std = float("0.0309436") + data = None + + +class Program_weight_tensor_parameter_14: + name = "parameter_14" + shape = [192, 192, 3, 3] + dtype = "float32" + min_val = float("-0.0719425") + max_val = float("0.0798922") + mean = float("-0.000137273") + std = float("0.0037425") + data = None + + +class Program_weight_tensor_parameter_15: + name = "parameter_15" + shape = [192] + dtype = "float32" + min_val = float("-0.519307") + max_val = float("0.119213") + mean = float("-0.173655") + std = float("0.128078") + data = None + + +class Program_weight_tensor_parameter_16: + name = "parameter_16" + shape = [192] + dtype = "float32" + min_val = float("0.843345") + max_val = float("1.65101") + mean = float("1.06412") + std = float("0.100931") + data = None + + +class Program_weight_tensor_parameter_17: + name = "parameter_17" + shape = [192] + dtype = "float32" + min_val = float("0.0129167") + max_val = float("0.102406") + mean = float("0.0387628") + std = float("0.0164023") + data = None + + +class Program_weight_tensor_parameter_18: + name = "parameter_18" + shape = [192] + dtype = "float32" + min_val = float("-0.201974") + max_val = float("0.118718") + mean = float("-0.070128") + std = float("0.0516738") + data = None + + +class Program_weight_tensor_parameter_19: + name = "parameter_19" + shape = [192, 192, 3, 3] + dtype = "float32" + min_val = float("-0.0703397") + max_val = float("0.0838756") + mean = float("-0.000297626") + std = float("0.00409097") + data = None + + +class Program_weight_tensor_parameter_20: + name = "parameter_20" + shape = [192] + dtype = "float32" + min_val = float("-0.45538") + max_val = float("0.186694") + mean = float("-0.0819211") + std = float("0.101908") + data = None + + +class Program_weight_tensor_parameter_21: + name = "parameter_21" + shape = [192] + dtype = "float32" + min_val = float("0.842136") + max_val = float("1.25451") + mean = float("1.02689") + std = float("0.0669984") + data = None + + +class Program_weight_tensor_parameter_22: + name = "parameter_22" + shape = [192] + dtype = "float32" + min_val = float("0.00609049") + max_val = float("0.0366868") + mean = float("0.0132496") + std = float("0.00446736") + data = None + + +class Program_weight_tensor_parameter_23: + name = "parameter_23" + shape = [192] + dtype = "float32" + min_val = float("-0.108315") + max_val = float("0.071515") + mean = float("-0.0205931") + std = float("0.0309136") + data = None + + +class Program_weight_tensor_parameter_24: + name = "parameter_24" + shape = [192, 576, 1, 1] + dtype = "float32" + min_val = float("-0.102669") + max_val = float("0.10072") + mean = float("-0.000208583") + std = float("0.00580958") + data = None + + +class Program_weight_tensor_parameter_25: + name = "parameter_25" + shape = [192] + dtype = "float32" + min_val = float("-0.217714") + max_val = float("0.0349411") + mean = float("-0.0691139") + std = float("0.0385785") + data = None + + +class Program_weight_tensor_parameter_26: + name = "parameter_26" + shape = [192] + dtype = "float32" + min_val = float("0.843869") + max_val = float("1.15213") + mean = float("1.01543") + std = float("0.0502928") + data = None + + +class Program_weight_tensor_parameter_27: + name = "parameter_27" + shape = [192] + dtype = "float32" + min_val = float("0.00469781") + max_val = float("0.0515763") + mean = float("0.0100063") + std = float("0.00473549") + data = None + + +class Program_weight_tensor_parameter_28: + name = "parameter_28" + shape = [192] + dtype = "float32" + min_val = float("-0.100338") + max_val = float("0.103443") + mean = float("-0.028043") + std = float("0.0298177") + data = None + + +class Program_weight_tensor_parameter_29: + name = "parameter_29" + shape = [192, 576, 1, 1] + dtype = "float32" + min_val = float("-0.0445699") + max_val = float("0.0626305") + mean = float("-0.000342079") + std = float("0.00527897") + data = None + + +class Program_weight_tensor_parameter_30: + name = "parameter_30" + shape = [192] + dtype = "float32" + min_val = float("-0.295573") + max_val = float("-0.00727007") + mean = float("-0.0908822") + std = float("0.0602539") + data = None + + +class Program_weight_tensor_parameter_31: + name = "parameter_31" + shape = [192] + dtype = "float32" + min_val = float("0.782898") + max_val = float("1.34847") + mean = float("1.05287") + std = float("0.0658118") + data = None + + +class Program_weight_tensor_parameter_32: + name = "parameter_32" + shape = [192] + dtype = "float32" + min_val = float("0.00853608") + max_val = float("0.0834335") + mean = float("0.0220792") + std = float("0.0106956") + data = None + + +class Program_weight_tensor_parameter_33: + name = "parameter_33" + shape = [192] + dtype = "float32" + min_val = float("-0.281933") + max_val = float("0.32665") + mean = float("-0.0476883") + std = float("0.0906137") + data = None + + +class Program_weight_tensor_parameter_34: + name = "parameter_34" + shape = [192, 192, 3, 3] + dtype = "float32" + min_val = float("-0.0372086") + max_val = float("0.0434341") + mean = float("-9.58531e-05") + std = float("0.00301696") + data = None + + +class Program_weight_tensor_parameter_35: + name = "parameter_35" + shape = [192] + dtype = "float32" + min_val = float("-0.529319") + max_val = float("1.03253") + mean = float("0.1482") + std = float("0.259312") + data = None + + +class Program_weight_tensor_parameter_36: + name = "parameter_36" + shape = [192] + dtype = "float32" + min_val = float("0.733167") + max_val = float("1.57011") + mean = float("1.01433") + std = float("0.106495") + data = None + + +class Program_weight_tensor_parameter_37: + name = "parameter_37" + shape = [192] + dtype = "float32" + min_val = float("0.00533333") + max_val = float("0.0628113") + mean = float("0.0202291") + std = float("0.0104903") + data = None + + +class Program_weight_tensor_parameter_38: + name = "parameter_38" + shape = [192] + dtype = "float32" + min_val = float("-0.274173") + max_val = float("0.190265") + mean = float("-0.0383734") + std = float("0.0518374") + data = None + + +class Program_weight_tensor_parameter_39: + name = "parameter_39" + shape = [192, 192, 1, 1] + dtype = "float32" + min_val = float("-0.148329") + max_val = float("0.115949") + mean = float("-0.000786037") + std = float("0.0116676") + data = None + + +class Program_weight_tensor_parameter_40: + name = "parameter_40" + shape = [96] + dtype = "float32" + min_val = float("-0.290208") + max_val = float("0.171692") + mean = float("-0.0709438") + std = float("0.105357") + data = None + + +class Program_weight_tensor_parameter_41: + name = "parameter_41" + shape = [96] + dtype = "float32" + min_val = float("0.730214") + max_val = float("1.20725") + mean = float("0.877815") + std = float("0.0776628") + data = None + + +class Program_weight_tensor_parameter_42: + name = "parameter_42" + shape = [96] + dtype = "float32" + min_val = float("0.00244432") + max_val = float("0.0359181") + mean = float("0.0138133") + std = float("0.00616422") + data = None + + +class Program_weight_tensor_parameter_43: + name = "parameter_43" + shape = [96] + dtype = "float32" + min_val = float("-0.0390463") + max_val = float("0.0281496") + mean = float("-0.00737283") + std = float("0.0187507") + data = None + + +class Program_weight_tensor_parameter_44: + name = "parameter_44" + shape = [96, 96, 1, 1] + dtype = "float32" + min_val = float("-0.0574805") + max_val = float("0.0583116") + mean = float("-0.00132585") + std = float("0.00692157") + data = None + + +class Program_weight_tensor_parameter_45: + name = "parameter_45" + shape = [96] + dtype = "float32" + min_val = float("-0.290208") + max_val = float("0.171692") + mean = float("-0.0709438") + std = float("0.105357") + data = None + + +class Program_weight_tensor_parameter_46: + name = "parameter_46" + shape = [96] + dtype = "float32" + min_val = float("0.969898") + max_val = float("1.3202") + mean = float("1.13218") + std = float("0.0750079") + data = None + + +class Program_weight_tensor_parameter_47: + name = "parameter_47" + shape = [96] + dtype = "float32" + min_val = float("0.00695661") + max_val = float("0.0436736") + mean = float("0.0204114") + std = float("0.00835407") + data = None + + +class Program_weight_tensor_parameter_48: + name = "parameter_48" + shape = [96] + dtype = "float32" + min_val = float("-0.0615079") + max_val = float("0.0687841") + mean = float("-0.00728561") + std = float("0.0229485") + data = None + + +class Program_weight_tensor_parameter_49: + name = "parameter_49" + shape = [96, 96, 3, 3] + dtype = "float32" + min_val = float("-0.0888788") + max_val = float("0.0951908") + mean = float("-0.00012594") + std = float("0.00695592") + data = None + + +class Program_weight_tensor_parameter_50: + name = "parameter_50" + shape = [96] + dtype = "float32" + min_val = float("-0.672726") + max_val = float("0.111066") + mean = float("-0.258997") + std = float("0.150238") + data = None + + +class Program_weight_tensor_parameter_51: + name = "parameter_51" + shape = [96] + dtype = "float32" + min_val = float("0.802266") + max_val = float("1.40896") + mean = float("1.04531") + std = float("0.116924") + data = None + + +class Program_weight_tensor_parameter_52: + name = "parameter_52" + shape = [96] + dtype = "float32" + min_val = float("0.0207014") + max_val = float("0.108587") + mean = float("0.0455437") + std = float("0.0186345") + data = None + + +class Program_weight_tensor_parameter_53: + name = "parameter_53" + shape = [96] + dtype = "float32" + min_val = float("-0.112693") + max_val = float("0.0516466") + mean = float("-0.0468167") + std = float("0.0294565") + data = None + + +class Program_weight_tensor_parameter_54: + name = "parameter_54" + shape = [96, 96, 3, 3] + dtype = "float32" + min_val = float("-0.0830006") + max_val = float("0.106216") + mean = float("-0.00053104") + std = float("0.00775187") + data = None + + +class Program_weight_tensor_parameter_55: + name = "parameter_55" + shape = [96] + dtype = "float32" + min_val = float("-0.642948") + max_val = float("0.150819") + mean = float("-0.155351") + std = float("0.115348") + data = None + + +class Program_weight_tensor_parameter_56: + name = "parameter_56" + shape = [96] + dtype = "float32" + min_val = float("0.84901") + max_val = float("1.26259") + mean = float("1.03349") + std = float("0.0720544") + data = None + + +class Program_weight_tensor_parameter_57: + name = "parameter_57" + shape = [96] + dtype = "float32" + min_val = float("0.00947266") + max_val = float("0.0469677") + mean = float("0.0192275") + std = float("0.00716215") + data = None + + +class Program_weight_tensor_parameter_58: + name = "parameter_58" + shape = [96] + dtype = "float32" + min_val = float("-0.135148") + max_val = float("0.0288217") + mean = float("-0.0359178") + std = float("0.0292974") + data = None + + +class Program_weight_tensor_parameter_59: + name = "parameter_59" + shape = [96, 288, 1, 1] + dtype = "float32" + min_val = float("-0.0792001") + max_val = float("0.0950111") + mean = float("-0.00062642") + std = float("0.0105349") + data = None + + +class Program_weight_tensor_parameter_60: + name = "parameter_60" + shape = [96] + dtype = "float32" + min_val = float("-0.198091") + max_val = float("0.0826105") + mean = float("-0.0299011") + std = float("0.0459564") + data = None + + +class Program_weight_tensor_parameter_61: + name = "parameter_61" + shape = [96] + dtype = "float32" + min_val = float("0.685979") + max_val = float("1.33637") + mean = float("0.95441") + std = float("0.0883622") + data = None + + +class Program_weight_tensor_parameter_62: + name = "parameter_62" + shape = [96] + dtype = "float32" + min_val = float("0.00495099") + max_val = float("0.0554625") + mean = float("0.012309") + std = float("0.00627308") + data = None + + +class Program_weight_tensor_parameter_63: + name = "parameter_63" + shape = [96] + dtype = "float32" + min_val = float("-0.0965082") + max_val = float("0.0676515") + mean = float("-0.0128269") + std = float("0.0323591") + data = None + + +class Program_weight_tensor_parameter_64: + name = "parameter_64" + shape = [96, 288, 1, 1] + dtype = "float32" + min_val = float("-0.0773482") + max_val = float("0.0795383") + mean = float("-0.000205562") + std = float("0.00892342") + data = None + + +class Program_weight_tensor_parameter_65: + name = "parameter_65" + shape = [96] + dtype = "float32" + min_val = float("-0.3351") + max_val = float("0.0180339") + mean = float("-0.108666") + std = float("0.0839689") + data = None + + +class Program_weight_tensor_parameter_66: + name = "parameter_66" + shape = [96] + dtype = "float32" + min_val = float("0.730829") + max_val = float("1.20386") + mean = float("1.0551") + std = float("0.0746036") + data = None + + +class Program_weight_tensor_parameter_67: + name = "parameter_67" + shape = [96] + dtype = "float32" + min_val = float("0.00893169") + max_val = float("0.0609817") + mean = float("0.0216752") + std = float("0.0103436") + data = None + + +class Program_weight_tensor_parameter_68: + name = "parameter_68" + shape = [96] + dtype = "float32" + min_val = float("-0.43779") + max_val = float("0.530239") + mean = float("-0.00359941") + std = float("0.156358") + data = None + + +class Program_weight_tensor_parameter_69: + name = "parameter_69" + shape = [96, 96, 3, 3] + dtype = "float32" + min_val = float("-0.0647316") + max_val = float("0.0613698") + mean = float("-1.86625e-05") + std = float("0.00623329") + data = None + + +class Program_weight_tensor_parameter_70: + name = "parameter_70" + shape = [96] + dtype = "float32" + min_val = float("-1.07261") + max_val = float("2.35998") + mean = float("0.312216") + std = float("0.587121") + data = None + + +class Program_weight_tensor_parameter_71: + name = "parameter_71" + shape = [96] + dtype = "float32" + min_val = float("0.476972") + max_val = float("1.40751") + mean = float("0.884046") + std = float("0.166927") + data = None + + +class Program_weight_tensor_parameter_72: + name = "parameter_72" + shape = [96] + dtype = "float32" + min_val = float("0.00642742") + max_val = float("0.129825") + mean = float("0.0330591") + std = float("0.0219559") + data = None + + +class Program_weight_tensor_parameter_73: + name = "parameter_73" + shape = [96] + dtype = "float32" + min_val = float("-0.294202") + max_val = float("0.222749") + mean = float("-0.0136424") + std = float("0.071643") + data = None + + +class Program_weight_tensor_parameter_74: + name = "parameter_74" + shape = [96, 96, 1, 1] + dtype = "float32" + min_val = float("-0.188209") + max_val = float("0.120938") + mean = float("-0.000991715") + std = float("0.0220383") + data = None + + +class Program_weight_tensor_parameter_75: + name = "parameter_75" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_76: + name = "parameter_76" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_77: + name = "parameter_77" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_78: + name = "parameter_78" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_79: + name = "parameter_79" + shape = [48, 48, 1, 1] + dtype = "float32" + min_val = float("-0.0700832") + max_val = float("0.0649644") + mean = float("-0.00214276") + std = float("0.0125585") + data = None + + +class Program_weight_tensor_parameter_80: + name = "parameter_80" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_81: + name = "parameter_81" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_82: + name = "parameter_82" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_83: + name = "parameter_83" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_84: + name = "parameter_84" + shape = [48, 48, 3, 3] + dtype = "float32" + min_val = float("-0.136078") + max_val = float("0.180716") + mean = float("-0.000282686") + std = float("0.0138341") + data = None + + +class Program_weight_tensor_parameter_85: + name = "parameter_85" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_86: + name = "parameter_86" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_87: + name = "parameter_87" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_88: + name = "parameter_88" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_89: + name = "parameter_89" + shape = [48, 48, 3, 3] + dtype = "float32" + min_val = float("-0.114709") + max_val = float("0.160677") + mean = float("-0.000863383") + std = float("0.0150497") + data = None + + +class Program_weight_tensor_parameter_90: + name = "parameter_90" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_91: + name = "parameter_91" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_92: + name = "parameter_92" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_93: + name = "parameter_93" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_94: + name = "parameter_94" + shape = [48, 224, 1, 1] + dtype = "float32" + min_val = float("-0.241983") + max_val = float("0.156755") + mean = float("-0.000908695") + std = float("0.0191741") + data = None + + +class Program_weight_tensor_parameter_95: + name = "parameter_95" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_96: + name = "parameter_96" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_97: + name = "parameter_97" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_98: + name = "parameter_98" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_99: + name = "parameter_99" + shape = [48, 224, 1, 1] + dtype = "float32" + min_val = float("-0.112347") + max_val = float("0.148275") + mean = float("0.00022258") + std = float("0.0143593") + data = None + + +class Program_weight_tensor_parameter_100: + name = "parameter_100" + shape = [96] + dtype = "float32" + min_val = float("-0.354063") + max_val = float("0.389439") + mean = float("-0.0077798") + std = float("0.135547") + data = None + + +class Program_weight_tensor_parameter_101: + name = "parameter_101" + shape = [96] + dtype = "float32" + min_val = float("0.581944") + max_val = float("1.61146") + mean = float("0.798361") + std = float("0.141114") + data = None + + +class Program_weight_tensor_parameter_102: + name = "parameter_102" + shape = [96] + dtype = "float32" + min_val = float("0.00926414") + max_val = float("0.0779837") + mean = float("0.0233765") + std = float("0.011586") + data = None + + +class Program_weight_tensor_parameter_103: + name = "parameter_103" + shape = [96] + dtype = "float32" + min_val = float("-0.205203") + max_val = float("0.065409") + mean = float("-0.0368353") + std = float("0.0408489") + data = None + + +class Program_weight_tensor_parameter_104: + name = "parameter_104" + shape = [96, 192, 1, 1] + dtype = "float32" + min_val = float("-0.107548") + max_val = float("0.121609") + mean = float("-0.00106908") + std = float("0.0142625") + data = None + + +class Program_weight_tensor_parameter_105: + name = "parameter_105" + shape = [192] + dtype = "float32" + min_val = float("-0.33747") + max_val = float("0.174782") + mean = float("-0.0799299") + std = float("0.089405") + data = None + + +class Program_weight_tensor_parameter_106: + name = "parameter_106" + shape = [192] + dtype = "float32" + min_val = float("0.697042") + max_val = float("1.47363") + mean = float("0.990958") + std = float("0.0990363") + data = None + + +class Program_weight_tensor_parameter_107: + name = "parameter_107" + shape = [192] + dtype = "float32" + min_val = float("0.0106758") + max_val = float("0.0901736") + mean = float("0.0233258") + std = float("0.00975982") + data = None + + +class Program_weight_tensor_parameter_108: + name = "parameter_108" + shape = [192] + dtype = "float32" + min_val = float("-0.218067") + max_val = float("0.191552") + mean = float("-0.054689") + std = float("0.0548119") + data = None + + +class Program_weight_tensor_parameter_109: + name = "parameter_109" + shape = [192, 192, 1, 1] + dtype = "float32" + min_val = float("-0.11401") + max_val = float("0.147871") + mean = float("-0.00132952") + std = float("0.0140748") + data = None + + +class Program_weight_tensor_parameter_110: + name = "parameter_110" + shape = [96] + dtype = "float32" + min_val = float("-0.308172") + max_val = float("0.100695") + mean = float("-0.0814609") + std = float("0.0991118") + data = None + + +class Program_weight_tensor_parameter_111: + name = "parameter_111" + shape = [96] + dtype = "float32" + min_val = float("0.552538") + max_val = float("0.935439") + mean = float("0.809676") + std = float("0.0654097") + data = None + + +class Program_weight_tensor_parameter_112: + name = "parameter_112" + shape = [96] + dtype = "float32" + min_val = float("0.0077781") + max_val = float("0.0360303") + mean = float("0.0172914") + std = float("0.0054044") + data = None + + +class Program_weight_tensor_parameter_113: + name = "parameter_113" + shape = [96] + dtype = "float32" + min_val = float("-0.0473472") + max_val = float("0.0334054") + mean = float("-0.0169048") + std = float("0.0189166") + data = None + + +class Program_weight_tensor_parameter_114: + name = "parameter_114" + shape = [96, 96, 1, 1] + dtype = "float32" + min_val = float("-0.0532353") + max_val = float("0.0545219") + mean = float("-0.0018903") + std = float("0.00933137") + data = None + + +class Program_weight_tensor_parameter_115: + name = "parameter_115" + shape = [96] + dtype = "float32" + min_val = float("-0.308172") + max_val = float("0.100695") + mean = float("-0.0814609") + std = float("0.0991118") + data = None + + +class Program_weight_tensor_parameter_116: + name = "parameter_116" + shape = [96] + dtype = "float32" + min_val = float("0.843695") + max_val = float("1.28928") + mean = float("1.0347") + std = float("0.0944676") + data = None + + +class Program_weight_tensor_parameter_117: + name = "parameter_117" + shape = [96] + dtype = "float32" + min_val = float("0.0169339") + max_val = float("0.161601") + mean = float("0.0363965") + std = float("0.0171841") + data = None + + +class Program_weight_tensor_parameter_118: + name = "parameter_118" + shape = [96] + dtype = "float32" + min_val = float("-0.0918578") + max_val = float("0.0336943") + mean = float("-0.0247889") + std = float("0.0245036") + data = None + + +class Program_weight_tensor_parameter_119: + name = "parameter_119" + shape = [96, 96, 3, 3] + dtype = "float32" + min_val = float("-0.101803") + max_val = float("0.200902") + mean = float("-0.000267266") + std = float("0.0083296") + data = None + + +class Program_weight_tensor_parameter_120: + name = "parameter_120" + shape = [96] + dtype = "float32" + min_val = float("-0.728683") + max_val = float("0.317979") + mean = float("-0.275123") + std = float("0.174815") + data = None + + +class Program_weight_tensor_parameter_121: + name = "parameter_121" + shape = [96] + dtype = "float32" + min_val = float("0.764105") + max_val = float("1.3124") + mean = float("1.04343") + std = float("0.115762") + data = None + + +class Program_weight_tensor_parameter_122: + name = "parameter_122" + shape = [96] + dtype = "float32" + min_val = float("0.0272856") + max_val = float("0.0961064") + mean = float("0.0504816") + std = float("0.0161463") + data = None + + +class Program_weight_tensor_parameter_123: + name = "parameter_123" + shape = [96] + dtype = "float32" + min_val = float("-0.135954") + max_val = float("0.0746318") + mean = float("-0.0577229") + std = float("0.0449165") + data = None + + +class Program_weight_tensor_parameter_124: + name = "parameter_124" + shape = [96, 96, 3, 3] + dtype = "float32" + min_val = float("-0.185749") + max_val = float("0.160808") + mean = float("-0.000603971") + std = float("0.00986059") + data = None + + +class Program_weight_tensor_parameter_125: + name = "parameter_125" + shape = [96] + dtype = "float32" + min_val = float("-0.646446") + max_val = float("0.382953") + mean = float("-0.253615") + std = float("0.209383") + data = None + + +class Program_weight_tensor_parameter_126: + name = "parameter_126" + shape = [96] + dtype = "float32" + min_val = float("0.737499") + max_val = float("1.37821") + mean = float("1.02572") + std = float("0.122249") + data = None + + +class Program_weight_tensor_parameter_127: + name = "parameter_127" + shape = [96] + dtype = "float32" + min_val = float("0.00985305") + max_val = float("0.0428714") + mean = float("0.0193938") + std = float("0.00597774") + data = None + + +class Program_weight_tensor_parameter_128: + name = "parameter_128" + shape = [96] + dtype = "float32" + min_val = float("-0.372134") + max_val = float("0.319757") + mean = float("0.0183732") + std = float("0.0780363") + data = None + + +class Program_weight_tensor_parameter_129: + name = "parameter_129" + shape = [96, 448, 1, 1] + dtype = "float32" + min_val = float("-0.203733") + max_val = float("0.134243") + mean = float("-0.000669446") + std = float("0.0129484") + data = None + + +class Program_weight_tensor_parameter_130: + name = "parameter_130" + shape = [96] + dtype = "float32" + min_val = float("-0.238657") + max_val = float("0.170206") + mean = float("-0.0409469") + std = float("0.0883869") + data = None + + +class Program_weight_tensor_parameter_131: + name = "parameter_131" + shape = [96] + dtype = "float32" + min_val = float("0.915723") + max_val = float("1.41222") + mean = float("1.07273") + std = float("0.0922586") + data = None + + +class Program_weight_tensor_parameter_132: + name = "parameter_132" + shape = [96] + dtype = "float32" + min_val = float("0.00786671") + max_val = float("0.0588079") + mean = float("0.0163456") + std = float("0.0066827") + data = None + + +class Program_weight_tensor_parameter_133: + name = "parameter_133" + shape = [96] + dtype = "float32" + min_val = float("-0.11548") + max_val = float("0.0928178") + mean = float("0.00883762") + std = float("0.0363849") + data = None + + +class Program_weight_tensor_parameter_134: + name = "parameter_134" + shape = [96, 448, 1, 1] + dtype = "float32" + min_val = float("-0.0962928") + max_val = float("0.16056") + mean = float("-0.000559513") + std = float("0.0114298") + data = None + + +class Program_weight_tensor_parameter_135: + name = "parameter_135" + shape = [192] + dtype = "float32" + min_val = float("-0.538624") + max_val = float("-0.101332") + mean = float("-0.294138") + std = float("0.0707416") + data = None + + +class Program_weight_tensor_parameter_136: + name = "parameter_136" + shape = [192] + dtype = "float32" + min_val = float("0.651882") + max_val = float("1.08011") + mean = float("0.852092") + std = float("0.0724292") + data = None + + +class Program_weight_tensor_parameter_137: + name = "parameter_137" + shape = [192] + dtype = "float32" + min_val = float("0.0137887") + max_val = float("0.0661338") + mean = float("0.0268937") + std = float("0.00906335") + data = None + + +class Program_weight_tensor_parameter_138: + name = "parameter_138" + shape = [192] + dtype = "float32" + min_val = float("-0.120176") + max_val = float("0.0680941") + mean = float("-0.0396069") + std = float("0.0327503") + data = None + + +class Program_weight_tensor_parameter_139: + name = "parameter_139" + shape = [192, 384, 1, 1] + dtype = "float32" + min_val = float("-0.0639447") + max_val = float("0.0672375") + mean = float("-0.000833786") + std = float("0.0103229") + data = None + + +class Program_weight_tensor_parameter_140: + name = "parameter_140" + shape = [384] + dtype = "float32" + min_val = float("-0.521228") + max_val = float("0.213986") + mean = float("-0.1682") + std = float("0.0775017") + data = None + + +class Program_weight_tensor_parameter_141: + name = "parameter_141" + shape = [384] + dtype = "float32" + min_val = float("0.850237") + max_val = float("1.39388") + mean = float("1.0626") + std = float("0.0773034") + data = None + + +class Program_weight_tensor_parameter_142: + name = "parameter_142" + shape = [384] + dtype = "float32" + min_val = float("0.0088734") + max_val = float("0.0441515") + mean = float("0.0191098") + std = float("0.00573499") + data = None + + +class Program_weight_tensor_parameter_143: + name = "parameter_143" + shape = [384] + dtype = "float32" + min_val = float("-0.129402") + max_val = float("0.0875489") + mean = float("-0.0445092") + std = float("0.0396322") + data = None + + +class Program_weight_tensor_parameter_144: + name = "parameter_144" + shape = [384, 384, 1, 1] + dtype = "float32" + min_val = float("-0.121659") + max_val = float("0.133055") + mean = float("-0.000702422") + std = float("0.00934463") + data = None + + +class Program_weight_tensor_parameter_145: + name = "parameter_145" + shape = [192] + dtype = "float32" + min_val = float("-0.382852") + max_val = float("0.227352") + mean = float("-0.117886") + std = float("0.101914") + data = None + + +class Program_weight_tensor_parameter_146: + name = "parameter_146" + shape = [192] + dtype = "float32" + min_val = float("0.86878") + max_val = float("1.51462") + mean = float("1.12296") + std = float("0.11892") + data = None + + +class Program_weight_tensor_parameter_147: + name = "parameter_147" + shape = [192] + dtype = "float32" + min_val = float("0.0805715") + max_val = float("0.861815") + mean = float("0.24173") + std = float("0.115689") + data = None + + +class Program_weight_tensor_parameter_148: + name = "parameter_148" + shape = [192] + dtype = "float32" + min_val = float("-1.76276") + max_val = float("1.36192") + mean = float("-0.20097") + std = float("0.618042") + data = None + + +class Program_weight_tensor_parameter_149: + name = "parameter_149" + shape = [192, 768, 1, 1] + dtype = "float32" + min_val = float("-0.143123") + max_val = float("0.101835") + mean = float("-0.000122669") + std = float("0.00812429") + data = None + + +class Program_weight_tensor_parameter_150: + name = "parameter_150" + shape = [192] + dtype = "float32" + min_val = float("-0.242929") + max_val = float("0.168527") + mean = float("-0.0174021") + std = float("0.0538902") + data = None + + +class Program_weight_tensor_parameter_151: + name = "parameter_151" + shape = [192] + dtype = "float32" + min_val = float("0.618325") + max_val = float("1.01596") + mean = float("0.837489") + std = float("0.06312") + data = None + + +class Program_weight_tensor_parameter_152: + name = "parameter_152" + shape = [192] + dtype = "float32" + min_val = float("0.00769304") + max_val = float("0.0329157") + mean = float("0.0170219") + std = float("0.00463802") + data = None + + +class Program_weight_tensor_parameter_153: + name = "parameter_153" + shape = [192] + dtype = "float32" + min_val = float("-0.141686") + max_val = float("0.0792819") + mean = float("-0.0667949") + std = float("0.0484579") + data = None + + +class Program_weight_tensor_parameter_154: + name = "parameter_154" + shape = [192, 192, 1, 1] + dtype = "float32" + min_val = float("-0.0463403") + max_val = float("0.0758786") + mean = float("-0.00171087") + std = float("0.00771467") + data = None + + +class Program_weight_tensor_parameter_155: + name = "parameter_155" + shape = [192] + dtype = "float32" + min_val = float("-0.242929") + max_val = float("0.168527") + mean = float("-0.0174021") + std = float("0.0538902") + data = None + + +class Program_weight_tensor_parameter_156: + name = "parameter_156" + shape = [192] + dtype = "float32" + min_val = float("0.874574") + max_val = float("1.46208") + mean = float("1.1059") + std = float("0.129661") + data = None + + +class Program_weight_tensor_parameter_157: + name = "parameter_157" + shape = [192] + dtype = "float32" + min_val = float("0.0313595") + max_val = float("0.113252") + mean = float("0.0588345") + std = float("0.0156086") + data = None + + +class Program_weight_tensor_parameter_158: + name = "parameter_158" + shape = [192] + dtype = "float32" + min_val = float("-0.332132") + max_val = float("0.010893") + mean = float("-0.152239") + std = float("0.0624963") + data = None + + +class Program_weight_tensor_parameter_159: + name = "parameter_159" + shape = [192, 192, 3, 3] + dtype = "float32" + min_val = float("-0.0503168") + max_val = float("0.0641412") + mean = float("-0.000498968") + std = float("0.00471767") + data = None + + +class Program_weight_tensor_parameter_160: + name = "parameter_160" + shape = [192] + dtype = "float32" + min_val = float("-0.311021") + max_val = float("0.0670138") + mean = float("-0.114925") + std = float("0.0801899") + data = None + + +class Program_weight_tensor_parameter_161: + name = "parameter_161" + shape = [192] + dtype = "float32" + min_val = float("0.909746") + max_val = float("1.44623") + mean = float("1.10815") + std = float("0.101996") + data = None + + +class Program_weight_tensor_parameter_162: + name = "parameter_162" + shape = [192] + dtype = "float32" + min_val = float("0.0399431") + max_val = float("0.140144") + mean = float("0.0747907") + std = float("0.0215638") + data = None + + +class Program_weight_tensor_parameter_163: + name = "parameter_163" + shape = [192] + dtype = "float32" + min_val = float("-0.533458") + max_val = float("0.244514") + mean = float("-0.168827") + std = float("0.11435") + data = None + + +class Program_weight_tensor_parameter_164: + name = "parameter_164" + shape = [192, 192, 3, 3] + dtype = "float32" + min_val = float("-0.0566842") + max_val = float("0.0514181") + mean = float("-0.000587536") + std = float("0.00527014") + data = None + + +class Program_weight_tensor_parameter_165: + name = "parameter_165" + shape = [192] + dtype = "float32" + min_val = float("-0.444627") + max_val = float("0.412033") + mean = float("-0.137488") + std = float("0.130168") + data = None + + +class Program_weight_tensor_parameter_166: + name = "parameter_166" + shape = [192] + dtype = "float32" + min_val = float("0.95351") + max_val = float("1.37306") + mean = float("1.11002") + std = float("0.0723609") + data = None + + +class Program_weight_tensor_parameter_167: + name = "parameter_167" + shape = [192] + dtype = "float32" + min_val = float("0.0551676") + max_val = float("0.229055") + mean = float("0.0906291") + std = float("0.0274345") + data = None + + +class Program_weight_tensor_parameter_168: + name = "parameter_168" + shape = [192] + dtype = "float32" + min_val = float("-0.313806") + max_val = float("0.492745") + mean = float("-0.132664") + std = float("0.085465") + data = None + + +class Program_weight_tensor_parameter_169: + name = "parameter_169" + shape = [192, 512, 1, 1] + dtype = "float32" + min_val = float("-0.0601338") + max_val = float("0.10781") + mean = float("-0.000964638") + std = float("0.00915433") + data = None + + +class Program_weight_tensor_parameter_170: + name = "parameter_170" + shape = [192] + dtype = "float32" + min_val = float("-0.163877") + max_val = float("0.00112327") + mean = float("-0.0651513") + std = float("0.0261511") + data = None + + +class Program_weight_tensor_parameter_171: + name = "parameter_171" + shape = [192] + dtype = "float32" + min_val = float("0.819712") + max_val = float("1.06624") + mean = float("0.968869") + std = float("0.0460872") + data = None + + +class Program_weight_tensor_parameter_172: + name = "parameter_172" + shape = [192] + dtype = "float32" + min_val = float("0.0346107") + max_val = float("0.137989") + mean = float("0.054459") + std = float("0.0124478") + data = None + + +class Program_weight_tensor_parameter_173: + name = "parameter_173" + shape = [192] + dtype = "float32" + min_val = float("-0.200298") + max_val = float("0.0295223") + mean = float("-0.0891939") + std = float("0.0476986") + data = None + + +class Program_weight_tensor_parameter_174: + name = "parameter_174" + shape = [192, 512, 1, 1] + dtype = "float32" + min_val = float("-0.0357362") + max_val = float("0.0637335") + mean = float("-0.000787404") + std = float("0.00752266") + data = None + + +class Program_weight_tensor_parameter_175: + name = "parameter_175" + shape = [512] + dtype = "float32" + min_val = float("-4.82803") + max_val = float("-0.112013") + mean = float("-2.29502") + std = float("0.775166") + data = None + + +class Program_weight_tensor_parameter_176: + name = "parameter_176" + shape = [512] + dtype = "float32" + min_val = float("2.10203") + max_val = float("5.21664") + mean = float("3.70064") + std = float("0.482744") + data = None + + +class Program_weight_tensor_parameter_177: + name = "parameter_177" + shape = [512] + dtype = "float32" + min_val = float("0.00190724") + max_val = float("0.0148178") + mean = float("0.00510842") + std = float("0.00181044") + data = None + + +class Program_weight_tensor_parameter_178: + name = "parameter_178" + shape = [512] + dtype = "float32" + min_val = float("-0.130767") + max_val = float("0.0826418") + mean = float("-0.0410784") + std = float("0.0246182") + data = None + + +class Program_weight_tensor_parameter_179: + name = "parameter_179" + shape = [512, 384, 1, 1] + dtype = "float32" + min_val = float("-0.106883") + max_val = float("0.153104") + mean = float("-0.00124309") + std = float("0.00937421") + data = None + + +class Program_weight_tensor_parameter_180: + name = "parameter_180" + shape = [384] + dtype = "float32" + min_val = float("-0.0182735") + max_val = float("-0.000488762") + mean = float("-0.0066305") + std = float("0.00403925") + data = None + + +class Program_weight_tensor_parameter_181: + name = "parameter_181" + shape = [384, 384, 1, 1] + dtype = "float32" + min_val = float("-0.243656") + max_val = float("0.180358") + mean = float("-0.00257278") + std = float("0.00811913") + data = None + + +class Program_weight_tensor_parameter_182: + name = "parameter_182" + shape = [192] + dtype = "float32" + min_val = float("-2.38777") + max_val = float("3.15932") + mean = float("-0.20407") + std = float("0.562338") + data = None + + +class Program_weight_tensor_parameter_183: + name = "parameter_183" + shape = [192] + dtype = "float32" + min_val = float("0.123779") + max_val = float("2.40527") + mean = float("0.524516") + std = float("0.334825") + data = None + + +class Program_weight_tensor_parameter_184: + name = "parameter_184" + shape = [192] + dtype = "float32" + min_val = float("0.000126989") + max_val = float("0.0045594") + mean = float("0.000819872") + std = float("0.000558532") + data = None + + +class Program_weight_tensor_parameter_185: + name = "parameter_185" + shape = [192] + dtype = "float32" + min_val = float("-0.0804045") + max_val = float("0.119949") + mean = float("0.0138328") + std = float("0.0270058") + data = None + + +class Program_weight_tensor_parameter_186: + name = "parameter_186" + shape = [192, 192, 1, 1] + dtype = "float32" + min_val = float("-0.0707601") + max_val = float("0.0621697") + mean = float("-0.000379924") + std = float("0.00602374") + data = None + + +class Program_weight_tensor_parameter_187: + name = "parameter_187" + shape = [192] + dtype = "float32" + min_val = float("-2.38777") + max_val = float("3.15932") + mean = float("-0.20407") + std = float("0.562338") + data = None + + +class Program_weight_tensor_parameter_188: + name = "parameter_188" + shape = [192] + dtype = "float32" + min_val = float("0.67694") + max_val = float("3.07362") + mean = float("1.54467") + std = float("0.450817") + data = None + + +class Program_weight_tensor_parameter_189: + name = "parameter_189" + shape = [192] + dtype = "float32" + min_val = float("0.00215705") + max_val = float("0.0356044") + mean = float("0.00897467") + std = float("0.00458554") + data = None + + +class Program_weight_tensor_parameter_190: + name = "parameter_190" + shape = [192] + dtype = "float32" + min_val = float("-0.241985") + max_val = float("0.257354") + mean = float("0.0137577") + std = float("0.0550272") + data = None + + +class Program_weight_tensor_parameter_191: + name = "parameter_191" + shape = [192, 192, 3, 3] + dtype = "float32" + min_val = float("-0.0901592") + max_val = float("0.0804") + mean = float("-9.38039e-05") + std = float("0.00532701") + data = None + + +class Program_weight_tensor_parameter_192: + name = "parameter_192" + shape = [192] + dtype = "float32" + min_val = float("-3.43174") + max_val = float("1.16938") + mean = float("-1.42833") + std = float("0.634694") + data = None + + +class Program_weight_tensor_parameter_193: + name = "parameter_193" + shape = [192] + dtype = "float32" + min_val = float("0.389358") + max_val = float("1.7276") + mean = float("1.0897") + std = float("0.190293") + data = None + + +class Program_weight_tensor_parameter_194: + name = "parameter_194" + shape = [192] + dtype = "float32" + min_val = float("0.0526142") + max_val = float("0.270718") + mean = float("0.108176") + std = float("0.0325536") + data = None + + +class Program_weight_tensor_parameter_195: + name = "parameter_195" + shape = [192] + dtype = "float32" + min_val = float("-1.54162") + max_val = float("0.471207") + mean = float("-0.283055") + std = float("0.214286") + data = None + + +class Program_weight_tensor_parameter_196: + name = "parameter_196" + shape = [192, 192, 3, 3] + dtype = "float32" + min_val = float("-0.0845723") + max_val = float("0.0650064") + mean = float("-0.000462494") + std = float("0.00626612") + data = None + + +class Program_weight_tensor_parameter_197: + name = "parameter_197" + shape = [192] + dtype = "float32" + min_val = float("-3.87665") + max_val = float("4.23691") + mean = float("-0.62962") + std = float("0.987882") + data = None + + +class Program_weight_tensor_parameter_198: + name = "parameter_198" + shape = [192] + dtype = "float32" + min_val = float("0.580822") + max_val = float("4.17446") + mean = float("1.54478") + std = float("0.398628") + data = None + + +class Program_weight_tensor_parameter_199: + name = "parameter_199" + shape = [192] + dtype = "float32" + min_val = float("0.00595442") + max_val = float("0.0273129") + mean = float("0.011053") + std = float("0.00374204") + data = None + + +class Program_weight_tensor_parameter_200: + name = "parameter_200" + shape = [192] + dtype = "float32" + min_val = float("-0.22698") + max_val = float("0.189248") + mean = float("0.0593732") + std = float("0.0450403") + data = None + + +class Program_weight_tensor_parameter_201: + name = "parameter_201" + shape = [192, 384, 1, 1] + dtype = "float32" + min_val = float("-0.114839") + max_val = float("0.085935") + mean = float("-0.00162257") + std = float("0.011314") + data = None + + +class Program_weight_tensor_parameter_202: + name = "parameter_202" + shape = [192] + dtype = "float32" + min_val = float("-2.93751") + max_val = float("1.02421") + mean = float("-0.427093") + std = float("0.681453") + data = None + + +class Program_weight_tensor_parameter_203: + name = "parameter_203" + shape = [192] + dtype = "float32" + min_val = float("0.698228") + max_val = float("3.61037") + mean = float("1.48106") + std = float("0.505456") + data = None + + +class Program_weight_tensor_parameter_204: + name = "parameter_204" + shape = [192] + dtype = "float32" + min_val = float("0.00245686") + max_val = float("0.012822") + mean = float("0.00510958") + std = float("0.00159036") + data = None + + +class Program_weight_tensor_parameter_205: + name = "parameter_205" + shape = [192] + dtype = "float32" + min_val = float("-0.100272") + max_val = float("0.105996") + mean = float("0.0195239") + std = float("0.0348524") + data = None + + +class Program_weight_tensor_parameter_206: + name = "parameter_206" + shape = [192, 384, 1, 1] + dtype = "float32" + min_val = float("-0.0708848") + max_val = float("0.0850015") + mean = float("-0.000706931") + std = float("0.00901731") + data = None + + +class Program_weight_tensor_parameter_207: + name = "parameter_207" + shape = [384] + dtype = "float32" + min_val = float("-2.84209") + max_val = float("1.12257") + mean = float("-0.753555") + std = float("0.497094") + data = None + + +class Program_weight_tensor_parameter_208: + name = "parameter_208" + shape = [384] + dtype = "float32" + min_val = float("0.417665") + max_val = float("1.80337") + mean = float("0.867666") + std = float("0.218119") + data = None + + +class Program_weight_tensor_parameter_209: + name = "parameter_209" + shape = [384] + dtype = "float32" + min_val = float("0.0108198") + max_val = float("0.0878778") + mean = float("0.0200508") + std = float("0.00708447") + data = None + + +class Program_weight_tensor_parameter_210: + name = "parameter_210" + shape = [384] + dtype = "float32" + min_val = float("-0.552474") + max_val = float("0.359625") + mean = float("0.0190155") + std = float("0.105451") + data = None + + +class Program_weight_tensor_parameter_211: + name = "parameter_211" + shape = [384, 256, 3, 3] + dtype = "float32" + min_val = float("-0.050684") + max_val = float("0.0606495") + mean = float("-0.000221799") + std = float("0.00520848") + data = None + + +class Program_weight_tensor_parameter_212: + name = "parameter_212" + shape = [256] + dtype = "float32" + min_val = float("-2.81734") + max_val = float("1.46527") + mean = float("-1.07834") + std = float("0.63321") + data = None + + +class Program_weight_tensor_parameter_213: + name = "parameter_213" + shape = [256] + dtype = "float32" + min_val = float("0.430261") + max_val = float("1.7692") + mean = float("0.978084") + std = float("0.17059") + data = None + + +class Program_weight_tensor_parameter_214: + name = "parameter_214" + shape = [256] + dtype = "float32" + min_val = float("0.00302249") + max_val = float("0.0150254") + mean = float("0.00711712") + std = float("0.0018291") + data = None + + +class Program_weight_tensor_parameter_215: + name = "parameter_215" + shape = [256] + dtype = "float32" + min_val = float("-0.242576") + max_val = float("0.242969") + mean = float("-0.0674006") + std = float("0.0835729") + data = None + + +class Program_weight_tensor_parameter_216: + name = "parameter_216" + shape = [256, 192, 1, 1] + dtype = "float32" + min_val = float("-0.11703") + max_val = float("0.206335") + mean = float("-0.00133698") + std = float("0.016598") + data = None + + +class Program_weight_tensor_parameter_217: + name = "parameter_217" + shape = [192] + dtype = "float32" + min_val = float("-0.0214622") + max_val = float("0.00204459") + mean = float("-0.00682654") + std = float("0.00523474") + data = None + + +class Program_weight_tensor_parameter_218: + name = "parameter_218" + shape = [192, 192, 1, 1] + dtype = "float32" + min_val = float("-0.274972") + max_val = float("0.196684") + mean = float("-0.00446271") + std = float("0.0117188") + data = None + + +class Program_weight_tensor_parameter_219: + name = "parameter_219" + shape = [96] + dtype = "float32" + min_val = float("-2.27808") + max_val = float("0.747421") + mean = float("-0.117479") + std = float("0.506792") + data = None + + +class Program_weight_tensor_parameter_220: + name = "parameter_220" + shape = [96] + dtype = "float32" + min_val = float("-0.0587442") + max_val = float("2.30527") + mean = float("0.261103") + std = float("0.366411") + data = None + + +class Program_weight_tensor_parameter_221: + name = "parameter_221" + shape = [96] + dtype = "float32" + min_val = float("9.94584e-12") + max_val = float("0.00288558") + mean = float("0.000611538") + std = float("0.000479206") + data = None + + +class Program_weight_tensor_parameter_222: + name = "parameter_222" + shape = [96] + dtype = "float32" + min_val = float("-0.0613965") + max_val = float("0.0867185") + mean = float("0.0075244") + std = float("0.022618") + data = None + + +class Program_weight_tensor_parameter_223: + name = "parameter_223" + shape = [96, 96, 1, 1] + dtype = "float32" + min_val = float("-0.0458095") + max_val = float("0.0794823") + mean = float("-0.000380958") + std = float("0.0066521") + data = None + + +class Program_weight_tensor_parameter_224: + name = "parameter_224" + shape = [96] + dtype = "float32" + min_val = float("-2.27808") + max_val = float("0.747421") + mean = float("-0.117479") + std = float("0.506792") + data = None + + +class Program_weight_tensor_parameter_225: + name = "parameter_225" + shape = [96] + dtype = "float32" + min_val = float("0.348389") + max_val = float("3.24093") + mean = float("1.29082") + std = float("0.633395") + data = None + + +class Program_weight_tensor_parameter_226: + name = "parameter_226" + shape = [96] + dtype = "float32" + min_val = float("0.00469999") + max_val = float("0.0420037") + mean = float("0.0162317") + std = float("0.00640931") + data = None + + +class Program_weight_tensor_parameter_227: + name = "parameter_227" + shape = [96] + dtype = "float32" + min_val = float("-0.198405") + max_val = float("0.208243") + mean = float("0.0278847") + std = float("0.0747534") + data = None + + +class Program_weight_tensor_parameter_228: + name = "parameter_228" + shape = [96, 96, 3, 3] + dtype = "float32" + min_val = float("-0.0703987") + max_val = float("0.0847217") + mean = float("-0.000400512") + std = float("0.00880434") + data = None + + +class Program_weight_tensor_parameter_229: + name = "parameter_229" + shape = [96] + dtype = "float32" + min_val = float("-2.79718") + max_val = float("1.50453") + mean = float("-1.09173") + std = float("0.69636") + data = None + + +class Program_weight_tensor_parameter_230: + name = "parameter_230" + shape = [96] + dtype = "float32" + min_val = float("0.319783") + max_val = float("1.80086") + mean = float("1.07317") + std = float("0.21342") + data = None + + +class Program_weight_tensor_parameter_231: + name = "parameter_231" + shape = [96] + dtype = "float32" + min_val = float("0.0391363") + max_val = float("0.160891") + mean = float("0.0831981") + std = float("0.0236604") + data = None + + +class Program_weight_tensor_parameter_232: + name = "parameter_232" + shape = [96] + dtype = "float32" + min_val = float("-1.59676") + max_val = float("0.430385") + mean = float("-0.176851") + std = float("0.317024") + data = None + + +class Program_weight_tensor_parameter_233: + name = "parameter_233" + shape = [96, 96, 3, 3] + dtype = "float32" + min_val = float("-0.0684623") + max_val = float("0.0821956") + mean = float("-0.000727853") + std = float("0.00945016") + data = None + + +class Program_weight_tensor_parameter_234: + name = "parameter_234" + shape = [96] + dtype = "float32" + min_val = float("-2.53961") + max_val = float("0.660095") + mean = float("-0.0506683") + std = float("0.473015") + data = None + + +class Program_weight_tensor_parameter_235: + name = "parameter_235" + shape = [96] + dtype = "float32" + min_val = float("-0.0761853") + max_val = float("3.15108") + mean = float("0.280991") + std = float("0.409237") + data = None + + +class Program_weight_tensor_parameter_236: + name = "parameter_236" + shape = [96] + dtype = "float32" + min_val = float("2.2112e-10") + max_val = float("0.0182811") + mean = float("0.0018356") + std = float("0.00247258") + data = None + + +class Program_weight_tensor_parameter_237: + name = "parameter_237" + shape = [96] + dtype = "float32" + min_val = float("-0.0564698") + max_val = float("0.134674") + mean = float("0.0208637") + std = float("0.0323877") + data = None + + +class Program_weight_tensor_parameter_238: + name = "parameter_238" + shape = [96, 96, 1, 1] + dtype = "float32" + min_val = float("-0.149971") + max_val = float("0.0877827") + mean = float("-0.00159757") + std = float("0.00996893") + data = None + + +class Program_weight_tensor_parameter_239: + name = "parameter_239" + shape = [96] + dtype = "float32" + min_val = float("-2.53961") + max_val = float("0.660096") + mean = float("-0.0506683") + std = float("0.473015") + data = None + + +class Program_weight_tensor_parameter_240: + name = "parameter_240" + shape = [96] + dtype = "float32" + min_val = float("0.340415") + max_val = float("2.99317") + mean = float("0.929372") + std = float("0.41222") + data = None + + +class Program_weight_tensor_parameter_241: + name = "parameter_241" + shape = [96] + dtype = "float32" + min_val = float("0.0101736") + max_val = float("0.0521186") + mean = float("0.0255179") + std = float("0.00892784") + data = None + + +class Program_weight_tensor_parameter_242: + name = "parameter_242" + shape = [96] + dtype = "float32" + min_val = float("-0.226393") + max_val = float("0.221011") + mean = float("0.0450625") + std = float("0.0782651") + data = None + + +class Program_weight_tensor_parameter_243: + name = "parameter_243" + shape = [96, 96, 3, 3] + dtype = "float32" + min_val = float("-0.0614525") + max_val = float("0.0642893") + mean = float("-0.000703378") + std = float("0.00896078") + data = None + + +class Program_weight_tensor_parameter_244: + name = "parameter_244" + shape = [96] + dtype = "float32" + min_val = float("-2.01737") + max_val = float("1.65537") + mean = float("-0.920561") + std = float("0.650231") + data = None + + +class Program_weight_tensor_parameter_245: + name = "parameter_245" + shape = [96] + dtype = "float32" + min_val = float("0.434872") + max_val = float("1.96317") + mean = float("1.06433") + std = float("0.227725") + data = None + + +class Program_weight_tensor_parameter_246: + name = "parameter_246" + shape = [96] + dtype = "float32" + min_val = float("0.0165936") + max_val = float("0.132837") + mean = float("0.0345742") + std = float("0.0150947") + data = None + + +class Program_weight_tensor_parameter_247: + name = "parameter_247" + shape = [96] + dtype = "float32" + min_val = float("-2.3732") + max_val = float("0.24267") + mean = float("-0.0505484") + std = float("0.287143") + data = None + + +class Program_weight_tensor_parameter_248: + name = "parameter_248" + shape = [96, 96, 3, 3] + dtype = "float32" + min_val = float("-0.127211") + max_val = float("0.160397") + mean = float("-0.000544621") + std = float("0.0101928") + data = None + + +class Program_weight_tensor_parameter_249: + name = "parameter_249" + shape = [96] + dtype = "float32" + min_val = float("-1.61344") + max_val = float("1.88195") + mean = float("0.00484379") + std = float("0.837537") + data = None + + +class Program_weight_tensor_parameter_250: + name = "parameter_250" + shape = [96] + dtype = "float32" + min_val = float("0.347101") + max_val = float("1.32016") + mean = float("0.70129") + std = float("0.236267") + data = None + + +class Program_weight_tensor_parameter_251: + name = "parameter_251" + shape = [96] + dtype = "float32" + min_val = float("0.0166919") + max_val = float("0.0891169") + mean = float("0.0374108") + std = float("0.0147788") + data = None + + +class Program_weight_tensor_parameter_252: + name = "parameter_252" + shape = [96] + dtype = "float32" + min_val = float("-0.381066") + max_val = float("0.566263") + mean = float("-0.0972959") + std = float("0.136521") + data = None + + +class Program_weight_tensor_parameter_253: + name = "parameter_253" + shape = [96, 192, 1, 1] + dtype = "float32" + min_val = float("-0.143906") + max_val = float("0.121873") + mean = float("-0.00172717") + std = float("0.0164963") + data = None + + +class Program_weight_tensor_parameter_254: + name = "parameter_254" + shape = [96] + dtype = "float32" + min_val = float("-2.46841") + max_val = float("1.71052") + mean = float("0.339601") + std = float("0.678564") + data = None + + +class Program_weight_tensor_parameter_255: + name = "parameter_255" + shape = [96] + dtype = "float32" + min_val = float("0.541707") + max_val = float("4.87976") + mean = float("1.48201") + std = float("0.958387") + data = None + + +class Program_weight_tensor_parameter_256: + name = "parameter_256" + shape = [96] + dtype = "float32" + min_val = float("0.0122307") + max_val = float("0.120423") + mean = float("0.0291796") + std = float("0.0157993") + data = None + + +class Program_weight_tensor_parameter_257: + name = "parameter_257" + shape = [96] + dtype = "float32" + min_val = float("-0.296455") + max_val = float("0.260254") + mean = float("-0.00596865") + std = float("0.132222") + data = None + + +class Program_weight_tensor_parameter_258: + name = "parameter_258" + shape = [96, 192, 1, 1] + dtype = "float32" + min_val = float("-0.111668") + max_val = float("0.214196") + mean = float("-0.000763054") + std = float("0.0168732") + data = None + + +class Program_weight_tensor_parameter_259: + name = "parameter_259" + shape = [192] + dtype = "float32" + min_val = float("-4.44408") + max_val = float("2.00967") + mean = float("-0.0987827") + std = float("0.883275") + data = None + + +class Program_weight_tensor_parameter_260: + name = "parameter_260" + shape = [192] + dtype = "float32" + min_val = float("0.570935") + max_val = float("4.51306") + mean = float("1.08264") + std = float("0.425298") + data = None + + +class Program_weight_tensor_parameter_261: + name = "parameter_261" + shape = [192] + dtype = "float32" + min_val = float("0.0126533") + max_val = float("0.136437") + mean = float("0.0356273") + std = float("0.0206117") + data = None + + +class Program_weight_tensor_parameter_262: + name = "parameter_262" + shape = [192] + dtype = "float32" + min_val = float("-0.392763") + max_val = float("0.345645") + mean = float("0.022741") + std = float("0.127592") + data = None + + +class Program_weight_tensor_parameter_263: + name = "parameter_263" + shape = [192, 128, 3, 3] + dtype = "float32" + min_val = float("-0.109003") + max_val = float("0.0868493") + mean = float("-0.00035271") + std = float("0.00845821") + data = None + + +class Program_weight_tensor_parameter_264: + name = "parameter_264" + shape = [128] + dtype = "float32" + min_val = float("-2.14726") + max_val = float("1.36561") + mean = float("-0.674451") + std = float("0.681161") + data = None + + +class Program_weight_tensor_parameter_265: + name = "parameter_265" + shape = [128] + dtype = "float32" + min_val = float("0.375234") + max_val = float("2.23956") + mean = float("0.877769") + std = float("0.235655") + data = None + + +class Program_weight_tensor_parameter_266: + name = "parameter_266" + shape = [128] + dtype = "float32" + min_val = float("0.00172908") + max_val = float("0.0243042") + mean = float("0.00734318") + std = float("0.00288478") + data = None + + +class Program_weight_tensor_parameter_267: + name = "parameter_267" + shape = [128] + dtype = "float32" + min_val = float("-0.322838") + max_val = float("0.289878") + mean = float("-0.0726895") + std = float("0.129009") + data = None + + +class Program_weight_tensor_parameter_268: + name = "parameter_268" + shape = [128, 96, 1, 1] + dtype = "float32" + min_val = float("-0.258942") + max_val = float("0.222471") + mean = float("-0.00147825") + std = float("0.0265093") + data = None + + +class Program_weight_tensor_parameter_269: + name = "parameter_269" + shape = [96] + dtype = "float32" + min_val = float("-0.0261312") + max_val = float("0.00440601") + mean = float("-0.00868593") + std = float("0.00785641") + data = None + + +class Program_weight_tensor_parameter_270: + name = "parameter_270" + shape = [96, 96, 1, 1] + dtype = "float32" + min_val = float("-0.323766") + max_val = float("0.314811") + mean = float("-0.00588923") + std = float("0.0203338") + data = None + + +class Program_weight_tensor_parameter_271: + name = "parameter_271" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_272: + name = "parameter_272" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_273: + name = "parameter_273" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_274: + name = "parameter_274" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_275: + name = "parameter_275" + shape = [48, 48, 1, 1] + dtype = "float32" + min_val = float("-0.0754874") + max_val = float("0.0918744") + mean = float("-0.00116183") + std = float("0.0137375") + data = None + + +class Program_weight_tensor_parameter_276: + name = "parameter_276" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_277: + name = "parameter_277" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_278: + name = "parameter_278" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_279: + name = "parameter_279" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_280: + name = "parameter_280" + shape = [48, 48, 3, 3] + dtype = "float32" + min_val = float("-0.0909754") + max_val = float("0.119188") + mean = float("-0.000469278") + std = float("0.0143324") + data = None + + +class Program_weight_tensor_parameter_281: + name = "parameter_281" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_282: + name = "parameter_282" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_283: + name = "parameter_283" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_284: + name = "parameter_284" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_285: + name = "parameter_285" + shape = [48, 48, 3, 3] + dtype = "float32" + min_val = float("-0.107025") + max_val = float("0.127668") + mean = float("-0.00119973") + std = float("0.0151861") + data = None + + +class Program_weight_tensor_parameter_286: + name = "parameter_286" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_287: + name = "parameter_287" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_288: + name = "parameter_288" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_289: + name = "parameter_289" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_290: + name = "parameter_290" + shape = [48, 48, 1, 1] + dtype = "float32" + min_val = float("-0.0825525") + max_val = float("0.0847429") + mean = float("-0.00293553") + std = float("0.0180257") + data = None + + +class Program_weight_tensor_parameter_291: + name = "parameter_291" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_292: + name = "parameter_292" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_293: + name = "parameter_293" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_294: + name = "parameter_294" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_295: + name = "parameter_295" + shape = [48, 48, 3, 3] + dtype = "float32" + min_val = float("-0.121405") + max_val = float("0.106816") + mean = float("-0.00103162") + std = float("0.0139597") + data = None + + +class Program_weight_tensor_parameter_296: + name = "parameter_296" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_297: + name = "parameter_297" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_298: + name = "parameter_298" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_299: + name = "parameter_299" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_300: + name = "parameter_300" + shape = [48, 48, 3, 3] + dtype = "float32" + min_val = float("-0.104387") + max_val = float("0.0933051") + mean = float("-0.000898517") + std = float("0.0163105") + data = None + + +class Program_weight_tensor_parameter_301: + name = "parameter_301" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_302: + name = "parameter_302" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_303: + name = "parameter_303" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_304: + name = "parameter_304" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_305: + name = "parameter_305" + shape = [48, 96, 1, 1] + dtype = "float32" + min_val = float("-0.127218") + max_val = float("0.131789") + mean = float("-0.00305303") + std = float("0.0260694") + data = None + + +class Program_weight_tensor_parameter_306: + name = "parameter_306" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_307: + name = "parameter_307" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_308: + name = "parameter_308" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_309: + name = "parameter_309" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_310: + name = "parameter_310" + shape = [48, 96, 1, 1] + dtype = "float32" + min_val = float("-0.187551") + max_val = float("0.266311") + mean = float("0.000454803") + std = float("0.0279756") + data = None + + +class Program_weight_tensor_parameter_311: + name = "parameter_311" + shape = [96] + dtype = "float32" + min_val = float("-3.32387") + max_val = float("3.83534") + mean = float("0.265485") + std = float("1.21084") + data = None + + +class Program_weight_tensor_parameter_312: + name = "parameter_312" + shape = [96] + dtype = "float32" + min_val = float("0.503076") + max_val = float("5.39398") + mean = float("1.12896") + std = float("0.545544") + data = None + + +class Program_weight_tensor_parameter_313: + name = "parameter_313" + shape = [96] + dtype = "float32" + min_val = float("0.0185778") + max_val = float("0.251561") + mean = float("0.0602414") + std = float("0.0415712") + data = None + + +class Program_weight_tensor_parameter_314: + name = "parameter_314" + shape = [96] + dtype = "float32" + min_val = float("-0.530586") + max_val = float("0.599692") + mean = float("-0.0329156") + std = float("0.184534") + data = None + + +class Program_weight_tensor_parameter_315: + name = "parameter_315" + shape = [96, 64, 3, 3] + dtype = "float32" + min_val = float("-0.129865") + max_val = float("0.140642") + mean = float("-0.000231144") + std = float("0.0135591") + data = None + + +class Program_weight_tensor_parameter_316: + name = "parameter_316" + shape = [64] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_317: + name = "parameter_317" + shape = [64] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_318: + name = "parameter_318" + shape = [64] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_319: + name = "parameter_319" + shape = [64] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_320: + name = "parameter_320" + shape = [64, 48, 1, 1] + dtype = "float32" + min_val = float("-0.217846") + max_val = float("0.197691") + mean = float("-0.00202445") + std = float("0.0390741") + data = None + + +class Program_weight_tensor_parameter_321: + name = "parameter_321" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_322: + name = "parameter_322" + shape = [48, 48, 1, 1] + dtype = "float32" + min_val = float("-0.185619") + max_val = float("0.168747") + mean = float("-0.0126224") + std = float("0.0278031") + data = None + + +class Program_weight_tensor_parameter_323: + name = "parameter_323" + shape = [24] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_324: + name = "parameter_324" + shape = [24] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_325: + name = "parameter_325" + shape = [24] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_326: + name = "parameter_326" + shape = [24] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_327: + name = "parameter_327" + shape = [24, 24, 1, 1] + dtype = "float32" + min_val = float("-0.11592") + max_val = float("0.15413") + mean = float("-0.00151763") + std = float("0.0277352") + data = None + + +class Program_weight_tensor_parameter_328: + name = "parameter_328" + shape = [24] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_329: + name = "parameter_329" + shape = [24] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_330: + name = "parameter_330" + shape = [24] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_331: + name = "parameter_331" + shape = [24] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_332: + name = "parameter_332" + shape = [24, 24, 3, 3] + dtype = "float32" + min_val = float("-0.106175") + max_val = float("0.114008") + mean = float("-0.000884197") + std = float("0.0238809") + data = None + + +class Program_weight_tensor_parameter_333: + name = "parameter_333" + shape = [24] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_334: + name = "parameter_334" + shape = [24] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_335: + name = "parameter_335" + shape = [24] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_336: + name = "parameter_336" + shape = [24] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_337: + name = "parameter_337" + shape = [24, 24, 3, 3] + dtype = "float32" + min_val = float("-0.138643") + max_val = float("0.153767") + mean = float("-0.000248352") + std = float("0.0266901") + data = None + + +class Program_weight_tensor_parameter_338: + name = "parameter_338" + shape = [24] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_339: + name = "parameter_339" + shape = [24] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_340: + name = "parameter_340" + shape = [24] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_341: + name = "parameter_341" + shape = [24] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_342: + name = "parameter_342" + shape = [24, 48, 1, 1] + dtype = "float32" + min_val = float("-0.220553") + max_val = float("0.201403") + mean = float("-0.00385181") + std = float("0.04011") + data = None + + +class Program_weight_tensor_parameter_343: + name = "parameter_343" + shape = [24] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_344: + name = "parameter_344" + shape = [24] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_345: + name = "parameter_345" + shape = [24] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_346: + name = "parameter_346" + shape = [24] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_347: + name = "parameter_347" + shape = [24, 48, 1, 1] + dtype = "float32" + min_val = float("-0.232263") + max_val = float("0.179328") + mean = float("-0.00110555") + std = float("0.0447387") + data = None + + +class Program_weight_tensor_parameter_348: + name = "parameter_348" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_349: + name = "parameter_349" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_350: + name = "parameter_350" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_351: + name = "parameter_351" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_352: + name = "parameter_352" + shape = [48, 32, 3, 3] + dtype = "float32" + min_val = float("-0.152721") + max_val = float("0.151792") + mean = float("-0.000535835") + std = float("0.0230336") + data = None + + +class Program_weight_tensor_parameter_353: + name = "parameter_353" + shape = [32] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_354: + name = "parameter_354" + shape = [32] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_355: + name = "parameter_355" + shape = [32] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_356: + name = "parameter_356" + shape = [32] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_357: + name = "parameter_357" + shape = [32, 16, 3, 3] + dtype = "float32" + min_val = float("-0.279318") + max_val = float("0.276461") + mean = float("-0.00085436") + std = float("0.0389004") + data = None + + +class Program_weight_tensor_parameter_358: + name = "parameter_358" + shape = [16] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_359: + name = "parameter_359" + shape = [16] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_360: + name = "parameter_360" + shape = [16] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_361: + name = "parameter_361" + shape = [16] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_362: + name = "parameter_362" + shape = [16, 16, 3, 3] + dtype = "float32" + min_val = float("-0.337845") + max_val = float("0.369192") + mean = float("-0.00069606") + std = float("0.0519452") + data = None + + +class Program_weight_tensor_parameter_363: + name = "parameter_363" + shape = [16] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_364: + name = "parameter_364" + shape = [16] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_365: + name = "parameter_365" + shape = [16] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_366: + name = "parameter_366" + shape = [16] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_367: + name = "parameter_367" + shape = [16, 3, 3, 3] + dtype = "float32" + min_val = float("-0.245822") + max_val = float("0.288484") + mean = float("-0.00340321") + std = float("0.0733952") + data = None diff --git a/paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_0/graph_hash.txt b/paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_0/graph_hash.txt index 21b305551..248541aad 100644 --- a/paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_0/graph_hash.txt +++ b/paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_0/graph_hash.txt @@ -1 +1 @@ -700c99cae481b4de7b4ae0500e225ae03e4708238020ea1d75b3fa409c1ef3e9 \ No newline at end of file +c94cf7aa14030cc58b016c1f88c7cd23c6ff7ce7be2dce9f0f95c5469f2ac412 \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_0/input_meta.py b/paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_0/input_meta.py index dce0b815d..12c56f973 100644 --- a/paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_0/input_meta.py +++ b/paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_0/input_meta.py @@ -1,28 +1,76 @@ class Program_weight_tensor_data_0: name = "data_0" - shape = [2, 5376, 4] - dtype = "float32" - min_val = float("0.01") - max_val = float("0.01") - mean = float("0.01") - std = float("9.31323e-10") - data = None + shape = [] + dtype = "int64" + data = [12] class Program_weight_tensor_data_1: name = "data_1" - shape = [2, 5376] - dtype = "int32" - min_val = 0 - max_val = 4 - data = None + shape = [] + dtype = "int64" + data = [5376] class Program_weight_tensor_data_2: name = "data_2" - shape = [2, 5376, 4] + shape = [2, 12, 5376] + dtype = "float32" + max_val = float("1.0") + mean = float("0.000612289") + std = float("0.0247369") + data = None + + +class Program_weight_tensor_data_3: + name = "data_3" + shape = [2, 1] + dtype = "int32" + data = [0, 1] + + +class Program_weight_tensor_data_4: + name = "data_4" + shape = [2, 12, 1] + dtype = "int32" + data = [0, 3, 0, 0, 1, 0, 0, 0, 0, 0, 0, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0] + + +class Program_weight_tensor_data_5: + name = "data_5" + shape = [2, 5376] + dtype = "float32" + max_val = float("1.0") + mean = float("0.00734747") + std = float("0.0854019") + data = None + + +class Program_weight_tensor_data_6: + name = "data_6" + shape = [2, 12, 4] + dtype = "float32" + max_val = float("326.78") + mean = float("160.052") + std = float("110.95") + data = None + + +class Program_weight_tensor_data_7: + name = "data_7" + shape = [2, 12, 5376] + dtype = "float32" + max_val = float("0.00716361") + mean = float("2.14567e-06") + std = float("8.59189e-05") + data = None + + +class Program_weight_tensor_data_8: + name = "data_8" + shape = [2, 12, 5376] dtype = "float32" max_val = float("0.945922") - mean = float("0.00102082") - std = float("0.0260199") + mean = float("0.00158376") + std = float("0.0267929") data = None diff --git a/paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_0/model.py b/paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_0/model.py index 7c04ede5b..4ae94d4a6 100644 --- a/paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_0/model.py +++ b/paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_0/model.py @@ -5,106 +5,197 @@ class GraphModule(paddle.nn.Layer): def __init__(self): super().__init__() - def forward(self, data_0, data_1, data_2): - # pd_op.full: (1xi32) <- () + def forward( + self, data_0, data_1, data_2, data_3, data_4, data_5, data_6, data_7, data_8 + ): + # pd_op.full: (1xi64) <- () full_0 = paddle._C_ops.full( - [1], float("5"), paddle.int32, paddle.core.CPUPlace() + [1], float("-2"), paddle.int64, paddle.core.CPUPlace() ) - # pd_op.one_hot: (2x-1x5xf32) <- (2x-1xi32, 1xi32) - one_hot_0 = paddle._C_ops.one_hot( - data_1 % paddle.cast(full_0, data_1.dtype), full_0 + # pd_op.argmax: (2x-1xi64) <- (2x-1x-1xf32, 1xi64) + argmax_0 = paddle._C_ops.argmax(data_2, full_0, False, False, paddle.int64) + del full_0 + + # pd_op.cast: (xi32) <- (xi64) + cast_0 = paddle._C_ops.cast(data_0, paddle.int32) + del data_0 + + # pd_op.multiply: (2x1xi32) <- (2x1xi32, xi32) + multiply_1 = paddle._C_ops.multiply(data_3, cast_0) + del cast_0, data_3 + + # pd_op.cast: (2x1xi64) <- (2x1xi32) + cast_1 = paddle._C_ops.cast(multiply_1, paddle.int64) + del multiply_1 + + # pd_op.add: (2x-1xi64) <- (2x-1xi64, 2x1xi64) + add_0 = paddle._C_ops.add(argmax_0, cast_1) + del argmax_0, cast_1 + + # pd_op.flatten: (-1xi32) <- (2x-1x1xi32) + flatten_0 = paddle._C_ops.flatten(data_4, 0, 2) + del data_4 + + # pd_op.flatten: (-1xi64) <- (2x-1xi64) + flatten_1 = paddle._C_ops.flatten(add_0, 0, 1) + del add_0 + + # pd_op.full: (1xi32) <- () + full_1 = paddle._C_ops.full( + [1], float("0"), paddle.int32, paddle.core.CPUPlace() ) - del data_1, full_0 - # pd_op.full_int_array: (1xi64) <- () - full_int_array_0 = [0] + # pd_op.gather: (-1xi32) <- (-1xi32, -1xi64, 1xi32) + gather_0 = paddle._C_ops.gather(flatten_0, flatten_1, full_1) + del flatten_0 - # pd_op.full_int_array: (1xi64) <- () - full_int_array_1 = [-1] + # pd_op.full: (xi64) <- () + full_2 = paddle._C_ops.full( + [], float("2"), paddle.int64, paddle.core.CPUPlace() + ) + + # builtin.combine: ([xi64, xi64]) <- (xi64, xi64) + combine_0 = [full_2, data_1] + + # pd_op.stack: (2xi64) <- ([xi64, xi64]) + stack_0 = paddle._C_ops.stack(combine_0, 0) + del combine_0 + + # pd_op.reshape: (2x-1xi32) <- (-1xi32, 2xi64) + reshape_1 = paddle._C_ops.reshape(gather_0, stack_0) + del gather_0, stack_0 - # pd_op.slice: (2x-1x4xf32) <- (2x-1x5xf32, 1xi64, 1xi64) - slice_0 = paddle._C_ops.slice( - one_hot_0, [2], full_int_array_0, full_int_array_1, [1], [] + # pd_op.full: (xf32) <- () + full_3 = paddle._C_ops.full( + [], float("0"), paddle.float32, paddle.framework._current_expected_place() ) - del full_int_array_0, full_int_array_1, one_hot_0 - # pd_op.pow: (2x-1x4xf32) <- (2x-1x4xf32) - pow_0 = paddle._C_ops.pow(data_0, float("2")) + # pd_op.greater_than: (2x-1xb) <- (2x-1xf32, xf32) + greater_than_0 = paddle._C_ops.greater_than(data_5, full_3) + del data_5, full_3 # pd_op.full: (1xf32) <- () - full_1 = paddle._C_ops.full( - [1], float("0.75"), paddle.float32, paddle.core.CPUPlace() + full_4 = paddle._C_ops.full( + [1], float("4"), paddle.float32, paddle.core.CPUPlace() ) - # pd_op.scale: (2x-1x4xf32) <- (2x-1x4xf32, 1xf32) - scale_0 = paddle._C_ops.scale(pow_0, full_1, float("0"), True) - del pow_0 + # pd_op.full_like: (2x-1xi32) <- (2x-1xi32, 1xf32) + full_like_0 = paddle._C_ops.full_like( + reshape_1, full_4, paddle.int32, paddle.framework._current_expected_place() + ) + del full_4 - # pd_op.full: (1xf32) <- () - full_2 = paddle._C_ops.full( - [1], float("-1"), paddle.float32, paddle.core.CPUPlace() + # pd_op.where: (2x-1xi32) <- (2x-1xb, 2x-1xi32, 2x-1xi32) + where_0 = paddle._C_ops.where(greater_than_0, reshape_1, full_like_0) + del full_like_0, greater_than_0, reshape_1 + + # pd_op.full_int_array: (2xi64) <- () + full_int_array_0 = [-1, 4] + + # pd_op.reshape: (-1x4xf32) <- (2x-1x4xf32, 2xi64) + reshape_2 = paddle._C_ops.reshape(data_6, full_int_array_0) + del data_6, full_int_array_0 + + # pd_op.gather: (-1x4xf32) <- (-1x4xf32, -1xi64, 1xi32) + gather_1 = paddle._C_ops.gather(reshape_2, flatten_1, full_1) + del flatten_1, full_1, reshape_2 + + # pd_op.full: (xi64) <- () + full_5 = paddle._C_ops.full( + [], float("4"), paddle.int64, paddle.core.CPUPlace() ) - # pd_op.scale: (2x-1x4xf32) <- (2x-1x4xf32, 1xf32) - scale_1 = paddle._C_ops.scale(slice_0, full_2, float("1"), True) - del full_2 + # builtin.combine: ([xi64, xi64, xi64]) <- (xi64, xi64, xi64) + combine_1 = [full_2, data_1, full_5] + del data_1, full_2, full_5 - # pd_op.multiply: (2x-1x4xf32) <- (2x-1x4xf32, 2x-1x4xf32) - multiply_0 = paddle._C_ops.multiply(scale_0, scale_1) + # pd_op.stack: (3xi64) <- ([xi64, xi64, xi64]) + stack_1 = paddle._C_ops.stack(combine_1, 0) + del combine_1 - # pd_op.multiply: (2x-1x4xf32) <- (2x-1x4xf32, 2x-1x4xf32) - multiply_1 = paddle._C_ops.multiply(data_2, slice_0) - del slice_0 + # pd_op.reshape: (2x-1x4xf32) <- (-1x4xf32, 3xi64) + reshape_0 = paddle._C_ops.reshape(gather_1, stack_1) + del gather_1, stack_1 - # pd_op.add: (2x-1x4xf32) <- (2x-1x4xf32, 2x-1x4xf32) - add_0 = paddle._C_ops.add(multiply_0, multiply_1) + # pd_op.full: (1xi32) <- () + full_6 = paddle._C_ops.full( + [1], float("5"), paddle.int32, paddle.core.CPUPlace() + ) - # pd_op.bce_loss: (2x-1x4xf32) <- (2x-1x4xf32, 2x-1x4xf32) - bce_loss_0 = paddle._C_ops.bce_loss(data_0, data_2) - del data_0 + # pd_op.one_hot: (2x-1x5xf32) <- (2x-1xi32, 1xi32) + one_hot_0 = paddle._C_ops.one_hot( + where_0 % paddle.cast(full_6, where_0.dtype), full_6 + ) + del full_6 - # pd_op.multiply: (2x-1x4xf32) <- (2x-1x4xf32, 2x-1x4xf32) - multiply_2 = paddle._C_ops.multiply(bce_loss_0, add_0) + # pd_op.full: (4xi64) <- () + full_7 = paddle._C_ops.full( + [4], float("0"), paddle.int64, paddle.framework._current_expected_place() + ) - # pd_op.full_int_array: (0xi64) <- () - full_int_array_2 = [] + # pd_op.assign_value_: (4xi64) <- (4xi64) + assign_value__0 = paddle._C_ops.assign_value_( + full_7, + [4], + paddle.int64, + [float("0"), float("1"), float("2"), float("3")], + paddle.framework._current_expected_place(), + ) + del full_7 - # pd_op.sum: (xf32) <- (2x-1x4xf32, 0xi64) - sum_0 = paddle._C_ops.sum(multiply_2, full_int_array_2, None, False) + # pd_op.index_select: (2x-1x4xf32) <- (2x-1x5xf32, 4xi64) + index_select_0 = paddle._C_ops.index_select(one_hot_0, assign_value__0, -1) + del assign_value__0, one_hot_0 - # pd_op.sum: (xf32) <- (2x-1x4xf32, 0xi64) - sum_1 = paddle._C_ops.sum(data_2, full_int_array_2, None, False) - del data_2 + # pd_op.multiply: (2x-1x-1xf32) <- (2x-1x-1xf32, 2x-1x-1xf32) + multiply_2 = paddle._C_ops.multiply(data_7, data_2) + del data_7 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_1 = [-1] + + # pd_op.max: (2x-1x1xf32) <- (2x-1x-1xf32, 1xi64) + max_0 = paddle._C_ops.max(multiply_2, full_int_array_1, True) + + # pd_op.multiply: (2x-1x-1xf32) <- (2x-1x-1xf32, 2x-1x-1xf32) + multiply_3 = paddle._C_ops.multiply(data_8, data_2) + del data_2, data_8 + + # pd_op.max: (2x-1x1xf32) <- (2x-1x-1xf32, 1xi64) + max_1 = paddle._C_ops.max(multiply_3, full_int_array_1, True) + del multiply_3 # pd_op.full: (1xf32) <- () - full_3 = paddle._C_ops.full( + full_8 = paddle._C_ops.full( [1], float("1"), paddle.float32, paddle.core.CPUPlace() ) - # pd_op.full: (1xf32) <- () - full_4 = paddle._C_ops.full( - [1], float("3.40282e+38"), paddle.float32, paddle.core.CPUPlace() - ) + # pd_op.scale: (2x-1x1xf32) <- (2x-1x1xf32, 1xf32) + scale_0 = paddle._C_ops.scale(max_0, full_8, float("1e-09"), True) + del full_8, max_0 - # pd_op.clip: (xf32) <- (xf32, 1xf32, 1xf32) - clip_0 = paddle._C_ops.clip(sum_1, full_3, full_4) - del full_3, full_4, sum_1 - - # pd_op.divide: (xf32) <- (xf32, xf32) - divide_0 = paddle._C_ops.divide(sum_0, clip_0) - del ( - add_0, - bce_loss_0, - clip_0, - full_1, - full_int_array_2, - multiply_0, - multiply_1, - multiply_2, - scale_0, - scale_1, - sum_0, - ) + # pd_op.divide: (2x-1x-1xf32) <- (2x-1x-1xf32, 2x-1x1xf32) + divide_0 = paddle._C_ops.divide(multiply_2, scale_0) + del multiply_2, scale_0 + + # pd_op.multiply: (2x-1x-1xf32) <- (2x-1x-1xf32, 2x-1x1xf32) + multiply_4 = paddle._C_ops.multiply(divide_0, max_1) + del divide_0, max_1 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_2 = [-2] + + # pd_op.max: (2x-1xf32) <- (2x-1x-1xf32, 1xi64) + max_2 = paddle._C_ops.max(multiply_4, full_int_array_2, False) + del full_int_array_2, multiply_4 + + # pd_op.unsqueeze: (2x-1x1xf32) <- (2x-1xf32, 1xi64) + unsqueeze_0 = paddle._C_ops.unsqueeze(max_2, full_int_array_1) + del full_int_array_1, max_2 + + # pd_op.multiply: (2x-1x4xf32) <- (2x-1x4xf32, 2x-1x1xf32) + multiply_0 = paddle._C_ops.multiply(index_select_0, unsqueeze_0) + del index_select_0, unsqueeze_0, where_0 - return divide_0 + return reshape_0, multiply_0 diff --git a/paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_1/graph_hash.txt b/paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_1/graph_hash.txt index 248541aad..c00ab6d8b 100644 --- a/paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_1/graph_hash.txt +++ b/paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_1/graph_hash.txt @@ -1 +1 @@ -c94cf7aa14030cc58b016c1f88c7cd23c6ff7ce7be2dce9f0f95c5469f2ac412 \ No newline at end of file +65d3614abbd2ef389b7cf238f14225f701dc8087775ebca72684cad377953c54 \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_1/input_meta.py b/paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_1/input_meta.py index 12c56f973..f58dc071b 100644 --- a/paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_1/input_meta.py +++ b/paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_1/input_meta.py @@ -1,76 +1,9 @@ class Program_weight_tensor_data_0: name = "data_0" - shape = [] - dtype = "int64" - data = [12] - - -class Program_weight_tensor_data_1: - name = "data_1" - shape = [] - dtype = "int64" - data = [5376] - - -class Program_weight_tensor_data_2: - name = "data_2" - shape = [2, 12, 5376] + shape = [2, 3, 640, 640] dtype = "float32" - max_val = float("1.0") - mean = float("0.000612289") - std = float("0.0247369") - data = None - - -class Program_weight_tensor_data_3: - name = "data_3" - shape = [2, 1] - dtype = "int32" - data = [0, 1] - - -class Program_weight_tensor_data_4: - name = "data_4" - shape = [2, 12, 1] - dtype = "int32" - data = [0, 3, 0, 0, 1, 0, 0, 0, 0, 0, 0, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0] - - -class Program_weight_tensor_data_5: - name = "data_5" - shape = [2, 5376] - dtype = "float32" - max_val = float("1.0") - mean = float("0.00734747") - std = float("0.0854019") - data = None - - -class Program_weight_tensor_data_6: - name = "data_6" - shape = [2, 12, 4] - dtype = "float32" - max_val = float("326.78") - mean = float("160.052") - std = float("110.95") - data = None - - -class Program_weight_tensor_data_7: - name = "data_7" - shape = [2, 12, 5376] - dtype = "float32" - max_val = float("0.00716361") - mean = float("2.14567e-06") - std = float("8.59189e-05") - data = None - - -class Program_weight_tensor_data_8: - name = "data_8" - shape = [2, 12, 5376] - dtype = "float32" - max_val = float("0.945922") - mean = float("0.00158376") - std = float("0.0267929") + min_val = float("-2.01516") + max_val = float("2.64") + mean = float("0.187747") + std = float("0.681331") data = None diff --git a/paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_1/model.py b/paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_1/model.py index 4ae94d4a6..f20830b9f 100644 --- a/paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_1/model.py +++ b/paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_1/model.py @@ -6,196 +6,4268 @@ def __init__(self): super().__init__() def forward( - self, data_0, data_1, data_2, data_3, data_4, data_5, data_6, data_7, data_8 + self, + parameter_0, + parameter_1, + parameter_2, + parameter_3, + parameter_4, + parameter_5, + parameter_6, + parameter_7, + parameter_8, + parameter_9, + parameter_10, + parameter_11, + parameter_12, + parameter_13, + parameter_14, + parameter_15, + parameter_16, + parameter_17, + parameter_18, + parameter_19, + parameter_20, + parameter_21, + parameter_22, + parameter_23, + parameter_24, + parameter_25, + parameter_26, + parameter_27, + parameter_28, + parameter_29, + parameter_30, + parameter_31, + parameter_32, + parameter_33, + parameter_34, + parameter_35, + parameter_36, + parameter_37, + parameter_38, + parameter_39, + parameter_40, + parameter_41, + parameter_42, + parameter_43, + parameter_44, + parameter_45, + parameter_46, + parameter_47, + parameter_48, + parameter_49, + parameter_50, + parameter_51, + parameter_52, + parameter_53, + parameter_54, + parameter_55, + parameter_56, + parameter_57, + parameter_58, + parameter_59, + parameter_60, + parameter_61, + parameter_62, + parameter_63, + parameter_64, + parameter_65, + parameter_66, + parameter_67, + parameter_68, + parameter_69, + parameter_70, + parameter_71, + parameter_72, + parameter_73, + parameter_74, + parameter_75, + parameter_76, + parameter_77, + parameter_78, + parameter_79, + parameter_80, + parameter_81, + parameter_82, + parameter_83, + parameter_84, + parameter_85, + parameter_86, + parameter_87, + parameter_88, + parameter_89, + parameter_90, + parameter_91, + parameter_92, + parameter_93, + parameter_94, + parameter_95, + parameter_96, + parameter_97, + parameter_98, + parameter_99, + parameter_100, + parameter_101, + parameter_102, + parameter_103, + parameter_104, + parameter_105, + parameter_106, + parameter_107, + parameter_108, + parameter_109, + parameter_110, + parameter_111, + parameter_112, + parameter_113, + parameter_114, + parameter_115, + parameter_116, + parameter_117, + parameter_118, + parameter_119, + parameter_120, + parameter_121, + parameter_122, + parameter_123, + parameter_124, + parameter_125, + parameter_126, + parameter_127, + parameter_128, + parameter_129, + parameter_130, + parameter_131, + parameter_132, + parameter_133, + parameter_134, + parameter_135, + parameter_136, + parameter_137, + parameter_138, + parameter_139, + parameter_140, + parameter_141, + parameter_142, + parameter_143, + parameter_144, + parameter_145, + parameter_146, + parameter_147, + parameter_148, + parameter_149, + parameter_150, + parameter_151, + parameter_152, + parameter_153, + parameter_154, + parameter_155, + parameter_156, + parameter_157, + parameter_158, + parameter_159, + parameter_160, + parameter_161, + parameter_162, + parameter_163, + parameter_164, + parameter_165, + parameter_166, + parameter_167, + parameter_168, + parameter_169, + parameter_170, + parameter_171, + parameter_172, + parameter_173, + parameter_174, + parameter_175, + parameter_176, + parameter_177, + parameter_178, + parameter_179, + parameter_180, + parameter_181, + parameter_182, + parameter_183, + parameter_184, + parameter_185, + parameter_186, + parameter_187, + parameter_188, + parameter_189, + parameter_190, + parameter_191, + parameter_192, + parameter_193, + parameter_194, + parameter_195, + parameter_196, + parameter_197, + parameter_198, + parameter_199, + parameter_200, + parameter_201, + parameter_202, + parameter_203, + parameter_204, + parameter_205, + parameter_206, + parameter_207, + parameter_208, + parameter_209, + parameter_210, + parameter_211, + parameter_212, + parameter_213, + parameter_214, + parameter_215, + parameter_216, + parameter_217, + parameter_218, + parameter_219, + parameter_220, + parameter_221, + parameter_222, + parameter_223, + parameter_224, + parameter_225, + parameter_226, + parameter_227, + parameter_228, + parameter_229, + parameter_230, + parameter_231, + parameter_232, + parameter_233, + parameter_234, + parameter_235, + parameter_236, + parameter_237, + parameter_238, + parameter_239, + parameter_240, + parameter_241, + parameter_242, + parameter_243, + parameter_244, + parameter_245, + parameter_246, + parameter_247, + parameter_248, + parameter_249, + parameter_250, + parameter_251, + parameter_252, + parameter_253, + parameter_254, + parameter_255, + parameter_256, + parameter_257, + parameter_258, + parameter_259, + parameter_260, + parameter_261, + parameter_262, + parameter_263, + parameter_264, + parameter_265, + parameter_266, + parameter_267, + parameter_268, + parameter_269, + parameter_270, + parameter_271, + parameter_272, + parameter_273, + parameter_274, + parameter_275, + parameter_276, + parameter_277, + parameter_278, + parameter_279, + parameter_280, + parameter_281, + parameter_282, + parameter_283, + parameter_284, + parameter_285, + parameter_286, + parameter_287, + parameter_288, + parameter_289, + parameter_290, + parameter_291, + parameter_292, + parameter_293, + parameter_294, + parameter_295, + parameter_296, + parameter_297, + parameter_298, + parameter_299, + parameter_300, + parameter_301, + parameter_302, + parameter_303, + parameter_304, + parameter_305, + parameter_306, + parameter_307, + parameter_308, + parameter_309, + parameter_310, + parameter_311, + parameter_312, + parameter_313, + parameter_314, + parameter_315, + parameter_316, + parameter_317, + parameter_318, + parameter_319, + parameter_320, + parameter_321, + parameter_322, + parameter_323, + parameter_324, + parameter_325, + parameter_326, + parameter_327, + parameter_328, + parameter_329, + parameter_330, + parameter_331, + parameter_332, + parameter_333, + parameter_334, + parameter_335, + parameter_336, + parameter_337, + parameter_338, + parameter_339, + parameter_340, + parameter_341, + parameter_342, + parameter_343, + parameter_344, + parameter_345, + parameter_346, + parameter_347, + parameter_348, + parameter_349, + parameter_350, + parameter_351, + parameter_352, + parameter_353, + parameter_354, + parameter_355, + parameter_356, + parameter_357, + parameter_358, + parameter_359, + parameter_360, + parameter_361, + parameter_362, + parameter_363, + parameter_364, + parameter_365, + parameter_366, + parameter_367, + parameter_368, + parameter_369, + parameter_370, + parameter_371, + parameter_372, + parameter_373, + parameter_374, + parameter_375, + parameter_376, + parameter_377, + parameter_378, + parameter_379, + parameter_380, + parameter_381, + parameter_382, + parameter_383, + parameter_384, + parameter_385, + parameter_386, + parameter_387, + parameter_388, + parameter_389, + parameter_390, + parameter_391, + parameter_392, + parameter_393, + parameter_394, + parameter_395, + parameter_396, + parameter_397, + parameter_398, + parameter_399, + parameter_400, + parameter_401, + parameter_402, + parameter_403, + parameter_404, + parameter_405, + parameter_406, + parameter_407, + parameter_408, + parameter_409, + parameter_410, + parameter_411, + parameter_412, + parameter_413, + parameter_414, + parameter_415, + parameter_416, + parameter_417, + parameter_418, + parameter_419, + parameter_420, + parameter_421, + parameter_422, + data_0, ): - # pd_op.full: (1xi64) <- () + # pd_op.conv2d: (2x16x-1x-1xf32) <- (2x3x-1x-1xf32, 16x3x3x3xf32) + conv2d_0 = paddle._C_ops.conv2d( + data_0, parameter_422, [2, 2], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del data_0, parameter_422 + + # pd_op.batch_norm_: (2x16x-1x-1xf32, 16xf32, 16xf32, 16xf32, 16xf32, -1xui8) <- (2x16x-1x-1xf32, 16xf32, 16xf32, 16xf32, 16xf32) + ( + batch_norm__0, + batch_norm__1, + batch_norm__2, + batch_norm__3, + batch_norm__4, + batch_norm__5, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_0, + parameter_421, + parameter_420, + parameter_419, + parameter_418, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_0, parameter_418, parameter_419, parameter_420, parameter_421 + + # pd_op.swish: (2x16x-1x-1xf32) <- (2x16x-1x-1xf32) + swish_0 = paddle._C_ops.swish(batch_norm__0) + del batch_norm__0 + + # pd_op.conv2d: (2x16x-1x-1xf32) <- (2x16x-1x-1xf32, 16x16x3x3xf32) + conv2d_1 = paddle._C_ops.conv2d( + swish_0, parameter_417, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_417, swish_0 + + # pd_op.batch_norm_: (2x16x-1x-1xf32, 16xf32, 16xf32, 16xf32, 16xf32, -1xui8) <- (2x16x-1x-1xf32, 16xf32, 16xf32, 16xf32, 16xf32) + ( + batch_norm__6, + batch_norm__7, + batch_norm__8, + batch_norm__9, + batch_norm__10, + batch_norm__11, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_1, + parameter_416, + parameter_415, + parameter_414, + parameter_413, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_1, parameter_413, parameter_414, parameter_415, parameter_416 + + # pd_op.swish: (2x16x-1x-1xf32) <- (2x16x-1x-1xf32) + swish_1 = paddle._C_ops.swish(batch_norm__6) + del batch_norm__6 + + # pd_op.conv2d: (2x32x-1x-1xf32) <- (2x16x-1x-1xf32, 32x16x3x3xf32) + conv2d_2 = paddle._C_ops.conv2d( + swish_1, parameter_412, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_412, swish_1 + + # pd_op.batch_norm_: (2x32x-1x-1xf32, 32xf32, 32xf32, 32xf32, 32xf32, -1xui8) <- (2x32x-1x-1xf32, 32xf32, 32xf32, 32xf32, 32xf32) + ( + batch_norm__12, + batch_norm__13, + batch_norm__14, + batch_norm__15, + batch_norm__16, + batch_norm__17, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_2, + parameter_411, + parameter_410, + parameter_409, + parameter_408, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_2, parameter_408, parameter_409, parameter_410, parameter_411 + + # pd_op.swish: (2x32x-1x-1xf32) <- (2x32x-1x-1xf32) + swish_2 = paddle._C_ops.swish(batch_norm__12) + del batch_norm__12 + + # pd_op.conv2d: (2x48x-1x-1xf32) <- (2x32x-1x-1xf32, 48x32x3x3xf32) + conv2d_3 = paddle._C_ops.conv2d( + swish_2, parameter_407, [2, 2], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_407, swish_2 + + # pd_op.batch_norm_: (2x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32, -1xui8) <- (2x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32) + ( + batch_norm__18, + batch_norm__19, + batch_norm__20, + batch_norm__21, + batch_norm__22, + batch_norm__23, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_3, + parameter_406, + parameter_405, + parameter_404, + parameter_403, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_3, parameter_403, parameter_404, parameter_405, parameter_406 + + # pd_op.swish: (2x48x-1x-1xf32) <- (2x48x-1x-1xf32) + swish_3 = paddle._C_ops.swish(batch_norm__18) + del batch_norm__18 + + # pd_op.conv2d: (2x24x-1x-1xf32) <- (2x48x-1x-1xf32, 24x48x1x1xf32) + conv2d_4 = paddle._C_ops.conv2d( + swish_3, parameter_402, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_402 + + # pd_op.batch_norm_: (2x24x-1x-1xf32, 24xf32, 24xf32, 24xf32, 24xf32, -1xui8) <- (2x24x-1x-1xf32, 24xf32, 24xf32, 24xf32, 24xf32) + ( + batch_norm__24, + batch_norm__25, + batch_norm__26, + batch_norm__27, + batch_norm__28, + batch_norm__29, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_4, + parameter_401, + parameter_400, + parameter_399, + parameter_398, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_4, parameter_398, parameter_399, parameter_400, parameter_401 + + # pd_op.swish: (2x24x-1x-1xf32) <- (2x24x-1x-1xf32) + swish_4 = paddle._C_ops.swish(batch_norm__24) + del batch_norm__24 + + # pd_op.conv2d: (2x24x-1x-1xf32) <- (2x48x-1x-1xf32, 24x48x1x1xf32) + conv2d_5 = paddle._C_ops.conv2d( + swish_3, parameter_397, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_397, swish_3 + + # pd_op.batch_norm_: (2x24x-1x-1xf32, 24xf32, 24xf32, 24xf32, 24xf32, -1xui8) <- (2x24x-1x-1xf32, 24xf32, 24xf32, 24xf32, 24xf32) + ( + batch_norm__30, + batch_norm__31, + batch_norm__32, + batch_norm__33, + batch_norm__34, + batch_norm__35, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_5, + parameter_396, + parameter_395, + parameter_394, + parameter_393, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_5, parameter_393, parameter_394, parameter_395, parameter_396 + + # pd_op.swish: (2x24x-1x-1xf32) <- (2x24x-1x-1xf32) + swish_5 = paddle._C_ops.swish(batch_norm__30) + del batch_norm__30 + + # pd_op.conv2d: (2x24x-1x-1xf32) <- (2x24x-1x-1xf32, 24x24x3x3xf32) + conv2d_6 = paddle._C_ops.conv2d( + swish_5, parameter_392, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_392 + + # pd_op.batch_norm_: (2x24x-1x-1xf32, 24xf32, 24xf32, 24xf32, 24xf32, -1xui8) <- (2x24x-1x-1xf32, 24xf32, 24xf32, 24xf32, 24xf32) + ( + batch_norm__36, + batch_norm__37, + batch_norm__38, + batch_norm__39, + batch_norm__40, + batch_norm__41, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_6, + parameter_391, + parameter_390, + parameter_389, + parameter_388, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_6, parameter_388, parameter_389, parameter_390, parameter_391 + + # pd_op.swish: (2x24x-1x-1xf32) <- (2x24x-1x-1xf32) + swish_6 = paddle._C_ops.swish(batch_norm__36) + del batch_norm__36 + + # pd_op.conv2d: (2x24x-1x-1xf32) <- (2x24x-1x-1xf32, 24x24x3x3xf32) + conv2d_7 = paddle._C_ops.conv2d( + swish_6, parameter_387, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_387 + + # pd_op.batch_norm_: (2x24x-1x-1xf32, 24xf32, 24xf32, 24xf32, 24xf32, -1xui8) <- (2x24x-1x-1xf32, 24xf32, 24xf32, 24xf32, 24xf32) + ( + batch_norm__42, + batch_norm__43, + batch_norm__44, + batch_norm__45, + batch_norm__46, + batch_norm__47, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_7, + parameter_386, + parameter_385, + parameter_384, + parameter_383, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_7, parameter_383, parameter_384, parameter_385, parameter_386 + + # pd_op.conv2d: (2x24x-1x-1xf32) <- (2x24x-1x-1xf32, 24x24x1x1xf32) + conv2d_8 = paddle._C_ops.conv2d( + swish_6, parameter_382, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_382, swish_6 + + # pd_op.batch_norm_: (2x24x-1x-1xf32, 24xf32, 24xf32, 24xf32, 24xf32, -1xui8) <- (2x24x-1x-1xf32, 24xf32, 24xf32, 24xf32, 24xf32) + ( + batch_norm__48, + batch_norm__49, + batch_norm__50, + batch_norm__51, + batch_norm__52, + batch_norm__53, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_8, + parameter_381, + parameter_380, + parameter_379, + parameter_378, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_8, parameter_378, parameter_379, parameter_380, parameter_381 + + # pd_op.add: (2x24x-1x-1xf32) <- (2x24x-1x-1xf32, 2x24x-1x-1xf32) + add_0 = paddle._C_ops.add(batch_norm__42, batch_norm__48) + del batch_norm__42, batch_norm__48 + + # pd_op.swish: (2x24x-1x-1xf32) <- (2x24x-1x-1xf32) + swish_7 = paddle._C_ops.swish(add_0) + del add_0 + + # pd_op.add: (2x24x-1x-1xf32) <- (2x24x-1x-1xf32, 2x24x-1x-1xf32) + add_1 = paddle._C_ops.add(swish_5, swish_7) + del swish_5, swish_7 + + # pd_op.full: (1xi32) <- () full_0 = paddle._C_ops.full( - [1], float("-2"), paddle.int64, paddle.core.CPUPlace() + [1], float("1"), paddle.int32, paddle.core.CPUPlace() + ) + + # builtin.combine: ([2x24x-1x-1xf32, 2x24x-1x-1xf32]) <- (2x24x-1x-1xf32, 2x24x-1x-1xf32) + combine_0 = [swish_4, add_1] + del add_1, swish_4 + + # pd_op.concat: (2x48x-1x-1xf32) <- ([2x24x-1x-1xf32, 2x24x-1x-1xf32], 1xi32) + concat_2 = paddle._C_ops.concat(combine_0, full_0) + del combine_0 + + # pd_op.full_int_array: (2xi64) <- () + full_int_array_0 = [2, 3] + + # pd_op.mean: (2x48x1x1xf32) <- (2x48x-1x-1xf32, 2xi64) + mean_0 = paddle._C_ops.mean(concat_2, full_int_array_0, True) + + # pd_op.conv2d: (2x48x1x1xf32) <- (2x48x1x1xf32, 48x48x1x1xf32) + conv2d_9 = paddle._C_ops.conv2d( + mean_0, parameter_377, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del mean_0, parameter_377 + + # pd_op.full_int_array: (4xi64) <- () + full_int_array_1 = [1, -1, 1, 1] + + # pd_op.reshape: (1x48x1x1xf32) <- (48xf32, 4xi64) + reshape_0 = paddle._C_ops.reshape(parameter_376, full_int_array_1) + del parameter_376 + + # pd_op.add: (2x48x1x1xf32) <- (2x48x1x1xf32, 1x48x1x1xf32) + add_2 = paddle._C_ops.add(conv2d_9, reshape_0) + del conv2d_9, reshape_0 + + # pd_op.hardsigmoid: (2x48x1x1xf32) <- (2x48x1x1xf32) + hardsigmoid_0 = paddle._C_ops.hardsigmoid( + add_2, float("0.166667"), float("0.5") + ) + del add_2 + + # pd_op.multiply: (2x48x-1x-1xf32) <- (2x48x-1x-1xf32, 2x48x1x1xf32) + multiply_0 = paddle._C_ops.multiply(concat_2, hardsigmoid_0) + del concat_2, hardsigmoid_0 + + # pd_op.conv2d: (2x64x-1x-1xf32) <- (2x48x-1x-1xf32, 64x48x1x1xf32) + conv2d_10 = paddle._C_ops.conv2d( + multiply_0, parameter_375, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del multiply_0, parameter_375 + + # pd_op.batch_norm_: (2x64x-1x-1xf32, 64xf32, 64xf32, 64xf32, 64xf32, -1xui8) <- (2x64x-1x-1xf32, 64xf32, 64xf32, 64xf32, 64xf32) + ( + batch_norm__54, + batch_norm__55, + batch_norm__56, + batch_norm__57, + batch_norm__58, + batch_norm__59, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_10, + parameter_374, + parameter_373, + parameter_372, + parameter_371, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_10, parameter_371, parameter_372, parameter_373, parameter_374 + + # pd_op.swish: (2x64x-1x-1xf32) <- (2x64x-1x-1xf32) + swish_8 = paddle._C_ops.swish(batch_norm__54) + del batch_norm__54 + + # pd_op.conv2d: (2x96x-1x-1xf32) <- (2x64x-1x-1xf32, 96x64x3x3xf32) + conv2d_11 = paddle._C_ops.conv2d( + swish_8, parameter_370, [2, 2], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_370, swish_8 + + # pd_op.batch_norm_: (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__60, + batch_norm__61, + batch_norm__62, + batch_norm__63, + batch_norm__64, + batch_norm__65, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_11, + parameter_369, + parameter_368, + parameter_367, + parameter_366, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_11, parameter_366, parameter_367, parameter_368, parameter_369 + + # pd_op.swish: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32) + swish_9 = paddle._C_ops.swish(batch_norm__60) + del batch_norm__60 + + # pd_op.conv2d: (2x48x-1x-1xf32) <- (2x96x-1x-1xf32, 48x96x1x1xf32) + conv2d_12 = paddle._C_ops.conv2d( + swish_9, parameter_365, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_365 + + # pd_op.batch_norm_: (2x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32, -1xui8) <- (2x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32) + ( + batch_norm__66, + batch_norm__67, + batch_norm__68, + batch_norm__69, + batch_norm__70, + batch_norm__71, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_12, + parameter_364, + parameter_363, + parameter_362, + parameter_361, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_12, parameter_361, parameter_362, parameter_363, parameter_364 + + # pd_op.swish: (2x48x-1x-1xf32) <- (2x48x-1x-1xf32) + swish_10 = paddle._C_ops.swish(batch_norm__66) + del batch_norm__66 + + # pd_op.conv2d: (2x48x-1x-1xf32) <- (2x96x-1x-1xf32, 48x96x1x1xf32) + conv2d_13 = paddle._C_ops.conv2d( + swish_9, parameter_360, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_360, swish_9 + + # pd_op.batch_norm_: (2x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32, -1xui8) <- (2x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32) + ( + batch_norm__72, + batch_norm__73, + batch_norm__74, + batch_norm__75, + batch_norm__76, + batch_norm__77, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_13, + parameter_359, + parameter_358, + parameter_357, + parameter_356, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_13, parameter_356, parameter_357, parameter_358, parameter_359 + + # pd_op.swish: (2x48x-1x-1xf32) <- (2x48x-1x-1xf32) + swish_11 = paddle._C_ops.swish(batch_norm__72) + del batch_norm__72 + + # pd_op.conv2d: (2x48x-1x-1xf32) <- (2x48x-1x-1xf32, 48x48x3x3xf32) + conv2d_14 = paddle._C_ops.conv2d( + swish_11, parameter_355, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_355 + + # pd_op.batch_norm_: (2x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32, -1xui8) <- (2x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32) + ( + batch_norm__78, + batch_norm__79, + batch_norm__80, + batch_norm__81, + batch_norm__82, + batch_norm__83, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_14, + parameter_354, + parameter_353, + parameter_352, + parameter_351, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_14, parameter_351, parameter_352, parameter_353, parameter_354 + + # pd_op.swish: (2x48x-1x-1xf32) <- (2x48x-1x-1xf32) + swish_12 = paddle._C_ops.swish(batch_norm__78) + del batch_norm__78 + + # pd_op.conv2d: (2x48x-1x-1xf32) <- (2x48x-1x-1xf32, 48x48x3x3xf32) + conv2d_15 = paddle._C_ops.conv2d( + swish_12, parameter_350, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_350 + + # pd_op.batch_norm_: (2x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32, -1xui8) <- (2x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32) + ( + batch_norm__84, + batch_norm__85, + batch_norm__86, + batch_norm__87, + batch_norm__88, + batch_norm__89, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_15, + parameter_349, + parameter_348, + parameter_347, + parameter_346, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_15, parameter_346, parameter_347, parameter_348, parameter_349 + + # pd_op.conv2d: (2x48x-1x-1xf32) <- (2x48x-1x-1xf32, 48x48x1x1xf32) + conv2d_16 = paddle._C_ops.conv2d( + swish_12, parameter_345, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_345, swish_12 + + # pd_op.batch_norm_: (2x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32, -1xui8) <- (2x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32) + ( + batch_norm__90, + batch_norm__91, + batch_norm__92, + batch_norm__93, + batch_norm__94, + batch_norm__95, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_16, + parameter_344, + parameter_343, + parameter_342, + parameter_341, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_16, parameter_341, parameter_342, parameter_343, parameter_344 + + # pd_op.add: (2x48x-1x-1xf32) <- (2x48x-1x-1xf32, 2x48x-1x-1xf32) + add_3 = paddle._C_ops.add(batch_norm__84, batch_norm__90) + del batch_norm__84, batch_norm__90 + + # pd_op.swish: (2x48x-1x-1xf32) <- (2x48x-1x-1xf32) + swish_13 = paddle._C_ops.swish(add_3) + del add_3 + + # pd_op.add: (2x48x-1x-1xf32) <- (2x48x-1x-1xf32, 2x48x-1x-1xf32) + add_4 = paddle._C_ops.add(swish_11, swish_13) + del swish_11, swish_13 + + # pd_op.conv2d: (2x48x-1x-1xf32) <- (2x48x-1x-1xf32, 48x48x3x3xf32) + conv2d_17 = paddle._C_ops.conv2d( + add_4, parameter_340, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_340 + + # pd_op.batch_norm_: (2x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32, -1xui8) <- (2x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32) + ( + batch_norm__96, + batch_norm__97, + batch_norm__98, + batch_norm__99, + batch_norm__100, + batch_norm__101, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_17, + parameter_339, + parameter_338, + parameter_337, + parameter_336, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_17, parameter_336, parameter_337, parameter_338, parameter_339 + + # pd_op.swish: (2x48x-1x-1xf32) <- (2x48x-1x-1xf32) + swish_14 = paddle._C_ops.swish(batch_norm__96) + del batch_norm__96 + + # pd_op.conv2d: (2x48x-1x-1xf32) <- (2x48x-1x-1xf32, 48x48x3x3xf32) + conv2d_18 = paddle._C_ops.conv2d( + swish_14, parameter_335, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_335 + + # pd_op.batch_norm_: (2x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32, -1xui8) <- (2x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32) + ( + batch_norm__102, + batch_norm__103, + batch_norm__104, + batch_norm__105, + batch_norm__106, + batch_norm__107, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_18, + parameter_334, + parameter_333, + parameter_332, + parameter_331, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_18, parameter_331, parameter_332, parameter_333, parameter_334 + + # pd_op.conv2d: (2x48x-1x-1xf32) <- (2x48x-1x-1xf32, 48x48x1x1xf32) + conv2d_19 = paddle._C_ops.conv2d( + swish_14, parameter_330, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_330, swish_14 + + # pd_op.batch_norm_: (2x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32, -1xui8) <- (2x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32) + ( + batch_norm__108, + batch_norm__109, + batch_norm__110, + batch_norm__111, + batch_norm__112, + batch_norm__113, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_19, + parameter_329, + parameter_328, + parameter_327, + parameter_326, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_19, parameter_326, parameter_327, parameter_328, parameter_329 + + # pd_op.add: (2x48x-1x-1xf32) <- (2x48x-1x-1xf32, 2x48x-1x-1xf32) + add_5 = paddle._C_ops.add(batch_norm__102, batch_norm__108) + del batch_norm__102, batch_norm__108 + + # pd_op.swish: (2x48x-1x-1xf32) <- (2x48x-1x-1xf32) + swish_15 = paddle._C_ops.swish(add_5) + del add_5 + + # pd_op.add: (2x48x-1x-1xf32) <- (2x48x-1x-1xf32, 2x48x-1x-1xf32) + add_6 = paddle._C_ops.add(add_4, swish_15) + del add_4, swish_15 + + # builtin.combine: ([2x48x-1x-1xf32, 2x48x-1x-1xf32]) <- (2x48x-1x-1xf32, 2x48x-1x-1xf32) + combine_1 = [swish_10, add_6] + del add_6, swish_10 + + # pd_op.concat: (2x96x-1x-1xf32) <- ([2x48x-1x-1xf32, 2x48x-1x-1xf32], 1xi32) + concat_3 = paddle._C_ops.concat(combine_1, full_0) + del combine_1 + + # pd_op.mean: (2x96x1x1xf32) <- (2x96x-1x-1xf32, 2xi64) + mean_1 = paddle._C_ops.mean(concat_3, full_int_array_0, True) + + # pd_op.conv2d: (2x96x1x1xf32) <- (2x96x1x1xf32, 96x96x1x1xf32) + conv2d_20 = paddle._C_ops.conv2d( + mean_1, parameter_325, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del mean_1, parameter_325 + + # pd_op.reshape: (1x96x1x1xf32) <- (96xf32, 4xi64) + reshape_1 = paddle._C_ops.reshape(parameter_324, full_int_array_1) + del parameter_324 + + # pd_op.add: (2x96x1x1xf32) <- (2x96x1x1xf32, 1x96x1x1xf32) + add_7 = paddle._C_ops.add(conv2d_20, reshape_1) + del conv2d_20, reshape_1 + + # pd_op.hardsigmoid: (2x96x1x1xf32) <- (2x96x1x1xf32) + hardsigmoid_1 = paddle._C_ops.hardsigmoid( + add_7, float("0.166667"), float("0.5") + ) + del add_7 + + # pd_op.multiply: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, 2x96x1x1xf32) + multiply_1 = paddle._C_ops.multiply(concat_3, hardsigmoid_1) + del concat_3, hardsigmoid_1 + + # pd_op.conv2d: (2x128x-1x-1xf32) <- (2x96x-1x-1xf32, 128x96x1x1xf32) + conv2d_21 = paddle._C_ops.conv2d( + multiply_1, parameter_323, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del multiply_1, parameter_323 + + # pd_op.batch_norm_: (2x128x-1x-1xf32, 128xf32, 128xf32, 128xf32, 128xf32, -1xui8) <- (2x128x-1x-1xf32, 128xf32, 128xf32, 128xf32, 128xf32) + ( + batch_norm__114, + batch_norm__115, + batch_norm__116, + batch_norm__117, + batch_norm__118, + batch_norm__119, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_21, + parameter_322, + parameter_321, + parameter_320, + parameter_319, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_21, parameter_319, parameter_320, parameter_321, parameter_322 + + # pd_op.swish: (2x128x-1x-1xf32) <- (2x128x-1x-1xf32) + swish_16 = paddle._C_ops.swish(batch_norm__114) + del batch_norm__114 + + # pd_op.conv2d: (2x192x-1x-1xf32) <- (2x128x-1x-1xf32, 192x128x3x3xf32) + conv2d_22 = paddle._C_ops.conv2d( + swish_16, parameter_318, [2, 2], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_318 + + # pd_op.batch_norm_: (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__120, + batch_norm__121, + batch_norm__122, + batch_norm__123, + batch_norm__124, + batch_norm__125, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_22, + parameter_317, + parameter_316, + parameter_315, + parameter_314, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_22, parameter_314, parameter_315, parameter_316, parameter_317 + + # pd_op.swish: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32) + swish_17 = paddle._C_ops.swish(batch_norm__120) + del batch_norm__120 + + # pd_op.conv2d: (2x96x-1x-1xf32) <- (2x192x-1x-1xf32, 96x192x1x1xf32) + conv2d_23 = paddle._C_ops.conv2d( + swish_17, parameter_313, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_313 + + # pd_op.batch_norm_: (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__126, + batch_norm__127, + batch_norm__128, + batch_norm__129, + batch_norm__130, + batch_norm__131, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_23, + parameter_312, + parameter_311, + parameter_310, + parameter_309, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_23, parameter_309, parameter_310, parameter_311, parameter_312 + + # pd_op.swish: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32) + swish_18 = paddle._C_ops.swish(batch_norm__126) + del batch_norm__126 + + # pd_op.conv2d: (2x96x-1x-1xf32) <- (2x192x-1x-1xf32, 96x192x1x1xf32) + conv2d_24 = paddle._C_ops.conv2d( + swish_17, parameter_308, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_308, swish_17 + + # pd_op.batch_norm_: (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__132, + batch_norm__133, + batch_norm__134, + batch_norm__135, + batch_norm__136, + batch_norm__137, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_24, + parameter_307, + parameter_306, + parameter_305, + parameter_304, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_24, parameter_304, parameter_305, parameter_306, parameter_307 + + # pd_op.swish: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32) + swish_19 = paddle._C_ops.swish(batch_norm__132) + del batch_norm__132 + + # pd_op.conv2d: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, 96x96x3x3xf32) + conv2d_25 = paddle._C_ops.conv2d( + swish_19, parameter_303, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_303 + + # pd_op.batch_norm_: (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__138, + batch_norm__139, + batch_norm__140, + batch_norm__141, + batch_norm__142, + batch_norm__143, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_25, + parameter_302, + parameter_301, + parameter_300, + parameter_299, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_25, parameter_299, parameter_300, parameter_301, parameter_302 + + # pd_op.swish: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32) + swish_20 = paddle._C_ops.swish(batch_norm__138) + del batch_norm__138 + + # pd_op.conv2d: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, 96x96x3x3xf32) + conv2d_26 = paddle._C_ops.conv2d( + swish_20, parameter_298, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_298 + + # pd_op.batch_norm_: (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__144, + batch_norm__145, + batch_norm__146, + batch_norm__147, + batch_norm__148, + batch_norm__149, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_26, + parameter_297, + parameter_296, + parameter_295, + parameter_294, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_26, parameter_294, parameter_295, parameter_296, parameter_297 + + # pd_op.conv2d: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, 96x96x1x1xf32) + conv2d_27 = paddle._C_ops.conv2d( + swish_20, parameter_293, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_293, swish_20 + + # pd_op.batch_norm_: (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__150, + batch_norm__151, + batch_norm__152, + batch_norm__153, + batch_norm__154, + batch_norm__155, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_27, + parameter_292, + parameter_291, + parameter_290, + parameter_289, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_27, parameter_289, parameter_290, parameter_291, parameter_292 + + # pd_op.add: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, 2x96x-1x-1xf32) + add_8 = paddle._C_ops.add(batch_norm__144, batch_norm__150) + del batch_norm__144, batch_norm__150 + + # pd_op.swish: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32) + swish_21 = paddle._C_ops.swish(add_8) + del add_8 + + # pd_op.add: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, 2x96x-1x-1xf32) + add_9 = paddle._C_ops.add(swish_19, swish_21) + del swish_19, swish_21 + + # pd_op.conv2d: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, 96x96x3x3xf32) + conv2d_28 = paddle._C_ops.conv2d( + add_9, parameter_288, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_288 + + # pd_op.batch_norm_: (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__156, + batch_norm__157, + batch_norm__158, + batch_norm__159, + batch_norm__160, + batch_norm__161, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_28, + parameter_287, + parameter_286, + parameter_285, + parameter_284, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_28, parameter_284, parameter_285, parameter_286, parameter_287 + + # pd_op.swish: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32) + swish_22 = paddle._C_ops.swish(batch_norm__156) + del batch_norm__156 + + # pd_op.conv2d: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, 96x96x3x3xf32) + conv2d_29 = paddle._C_ops.conv2d( + swish_22, parameter_283, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_283 + + # pd_op.batch_norm_: (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__162, + batch_norm__163, + batch_norm__164, + batch_norm__165, + batch_norm__166, + batch_norm__167, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_29, + parameter_282, + parameter_281, + parameter_280, + parameter_279, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_29, parameter_279, parameter_280, parameter_281, parameter_282 + + # pd_op.conv2d: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, 96x96x1x1xf32) + conv2d_30 = paddle._C_ops.conv2d( + swish_22, parameter_278, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_278, swish_22 + + # pd_op.batch_norm_: (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__168, + batch_norm__169, + batch_norm__170, + batch_norm__171, + batch_norm__172, + batch_norm__173, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_30, + parameter_277, + parameter_276, + parameter_275, + parameter_274, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_30, parameter_274, parameter_275, parameter_276, parameter_277 + + # pd_op.add: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, 2x96x-1x-1xf32) + add_10 = paddle._C_ops.add(batch_norm__162, batch_norm__168) + del batch_norm__162, batch_norm__168 + + # pd_op.swish: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32) + swish_23 = paddle._C_ops.swish(add_10) + del add_10 + + # pd_op.add: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, 2x96x-1x-1xf32) + add_11 = paddle._C_ops.add(add_9, swish_23) + del add_9, swish_23 + + # builtin.combine: ([2x96x-1x-1xf32, 2x96x-1x-1xf32]) <- (2x96x-1x-1xf32, 2x96x-1x-1xf32) + combine_2 = [swish_18, add_11] + del add_11, swish_18 + + # pd_op.concat: (2x192x-1x-1xf32) <- ([2x96x-1x-1xf32, 2x96x-1x-1xf32], 1xi32) + concat_4 = paddle._C_ops.concat(combine_2, full_0) + del combine_2 + + # pd_op.mean: (2x192x1x1xf32) <- (2x192x-1x-1xf32, 2xi64) + mean_2 = paddle._C_ops.mean(concat_4, full_int_array_0, True) + + # pd_op.conv2d: (2x192x1x1xf32) <- (2x192x1x1xf32, 192x192x1x1xf32) + conv2d_31 = paddle._C_ops.conv2d( + mean_2, parameter_273, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del mean_2, parameter_273 + + # pd_op.reshape: (1x192x1x1xf32) <- (192xf32, 4xi64) + reshape_2 = paddle._C_ops.reshape(parameter_272, full_int_array_1) + del parameter_272 + + # pd_op.add: (2x192x1x1xf32) <- (2x192x1x1xf32, 1x192x1x1xf32) + add_12 = paddle._C_ops.add(conv2d_31, reshape_2) + del conv2d_31, reshape_2 + + # pd_op.hardsigmoid: (2x192x1x1xf32) <- (2x192x1x1xf32) + hardsigmoid_2 = paddle._C_ops.hardsigmoid( + add_12, float("0.166667"), float("0.5") + ) + del add_12 + + # pd_op.multiply: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 2x192x1x1xf32) + multiply_2 = paddle._C_ops.multiply(concat_4, hardsigmoid_2) + del concat_4, hardsigmoid_2 + + # pd_op.conv2d: (2x256x-1x-1xf32) <- (2x192x-1x-1xf32, 256x192x1x1xf32) + conv2d_32 = paddle._C_ops.conv2d( + multiply_2, parameter_271, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del multiply_2, parameter_271 + + # pd_op.batch_norm_: (2x256x-1x-1xf32, 256xf32, 256xf32, 256xf32, 256xf32, -1xui8) <- (2x256x-1x-1xf32, 256xf32, 256xf32, 256xf32, 256xf32) + ( + batch_norm__174, + batch_norm__175, + batch_norm__176, + batch_norm__177, + batch_norm__178, + batch_norm__179, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_32, + parameter_270, + parameter_269, + parameter_268, + parameter_267, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_32, parameter_267, parameter_268, parameter_269, parameter_270 + + # pd_op.swish: (2x256x-1x-1xf32) <- (2x256x-1x-1xf32) + swish_24 = paddle._C_ops.swish(batch_norm__174) + del batch_norm__174 + + # pd_op.conv2d: (2x384x-1x-1xf32) <- (2x256x-1x-1xf32, 384x256x3x3xf32) + conv2d_33 = paddle._C_ops.conv2d( + swish_24, parameter_266, [2, 2], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_266 + + # pd_op.batch_norm_: (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__180, + batch_norm__181, + batch_norm__182, + batch_norm__183, + batch_norm__184, + batch_norm__185, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_33, + parameter_265, + parameter_264, + parameter_263, + parameter_262, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_33, parameter_262, parameter_263, parameter_264, parameter_265 + + # pd_op.swish: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32) + swish_25 = paddle._C_ops.swish(batch_norm__180) + del batch_norm__180 + + # pd_op.conv2d: (2x192x-1x-1xf32) <- (2x384x-1x-1xf32, 192x384x1x1xf32) + conv2d_34 = paddle._C_ops.conv2d( + swish_25, parameter_261, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_261 + + # pd_op.batch_norm_: (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__186, + batch_norm__187, + batch_norm__188, + batch_norm__189, + batch_norm__190, + batch_norm__191, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_34, + parameter_260, + parameter_259, + parameter_258, + parameter_257, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_34, parameter_257, parameter_258, parameter_259, parameter_260 + + # pd_op.swish: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32) + swish_26 = paddle._C_ops.swish(batch_norm__186) + del batch_norm__186 + + # pd_op.conv2d: (2x192x-1x-1xf32) <- (2x384x-1x-1xf32, 192x384x1x1xf32) + conv2d_35 = paddle._C_ops.conv2d( + swish_25, parameter_256, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_256, swish_25 + + # pd_op.batch_norm_: (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__192, + batch_norm__193, + batch_norm__194, + batch_norm__195, + batch_norm__196, + batch_norm__197, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_35, + parameter_255, + parameter_254, + parameter_253, + parameter_252, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_35, parameter_252, parameter_253, parameter_254, parameter_255 + + # pd_op.swish: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32) + swish_27 = paddle._C_ops.swish(batch_norm__192) + del batch_norm__192 + + # pd_op.conv2d: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 192x192x3x3xf32) + conv2d_36 = paddle._C_ops.conv2d( + swish_27, parameter_251, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_251 + + # pd_op.batch_norm_: (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__198, + batch_norm__199, + batch_norm__200, + batch_norm__201, + batch_norm__202, + batch_norm__203, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_36, + parameter_250, + parameter_249, + parameter_248, + parameter_247, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_36, parameter_247, parameter_248, parameter_249, parameter_250 + + # pd_op.swish: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32) + swish_28 = paddle._C_ops.swish(batch_norm__198) + del batch_norm__198 + + # pd_op.conv2d: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 192x192x3x3xf32) + conv2d_37 = paddle._C_ops.conv2d( + swish_28, parameter_246, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_246 + + # pd_op.batch_norm_: (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__204, + batch_norm__205, + batch_norm__206, + batch_norm__207, + batch_norm__208, + batch_norm__209, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_37, + parameter_245, + parameter_244, + parameter_243, + parameter_242, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_37, parameter_242, parameter_243, parameter_244, parameter_245 + + # pd_op.conv2d: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 192x192x1x1xf32) + conv2d_38 = paddle._C_ops.conv2d( + swish_28, parameter_241, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_241, swish_28 + + # pd_op.batch_norm_: (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__210, + batch_norm__211, + batch_norm__212, + batch_norm__213, + batch_norm__214, + batch_norm__215, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_38, + parameter_240, + parameter_239, + parameter_238, + parameter_237, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_38, parameter_237, parameter_238, parameter_239, parameter_240 + + # pd_op.add: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 2x192x-1x-1xf32) + add_13 = paddle._C_ops.add(batch_norm__204, batch_norm__210) + del batch_norm__204, batch_norm__210 + + # pd_op.swish: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32) + swish_29 = paddle._C_ops.swish(add_13) + del add_13 + + # pd_op.add: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 2x192x-1x-1xf32) + add_14 = paddle._C_ops.add(swish_27, swish_29) + del swish_27, swish_29 + + # builtin.combine: ([2x192x-1x-1xf32, 2x192x-1x-1xf32]) <- (2x192x-1x-1xf32, 2x192x-1x-1xf32) + combine_3 = [swish_26, add_14] + del add_14, swish_26 + + # pd_op.concat: (2x384x-1x-1xf32) <- ([2x192x-1x-1xf32, 2x192x-1x-1xf32], 1xi32) + concat_5 = paddle._C_ops.concat(combine_3, full_0) + del combine_3 + + # pd_op.mean: (2x384x1x1xf32) <- (2x384x-1x-1xf32, 2xi64) + mean_3 = paddle._C_ops.mean(concat_5, full_int_array_0, True) + del full_int_array_0 + + # pd_op.conv2d: (2x384x1x1xf32) <- (2x384x1x1xf32, 384x384x1x1xf32) + conv2d_39 = paddle._C_ops.conv2d( + mean_3, parameter_236, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del mean_3, parameter_236 + + # pd_op.reshape: (1x384x1x1xf32) <- (384xf32, 4xi64) + reshape_3 = paddle._C_ops.reshape(parameter_235, full_int_array_1) + del parameter_235 + + # pd_op.add: (2x384x1x1xf32) <- (2x384x1x1xf32, 1x384x1x1xf32) + add_15 = paddle._C_ops.add(conv2d_39, reshape_3) + del conv2d_39, reshape_3 + + # pd_op.hardsigmoid: (2x384x1x1xf32) <- (2x384x1x1xf32) + hardsigmoid_3 = paddle._C_ops.hardsigmoid( + add_15, float("0.166667"), float("0.5") + ) + del add_15 + + # pd_op.multiply: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32, 2x384x1x1xf32) + multiply_3 = paddle._C_ops.multiply(concat_5, hardsigmoid_3) + del concat_5, hardsigmoid_3 + + # pd_op.conv2d: (2x512x-1x-1xf32) <- (2x384x-1x-1xf32, 512x384x1x1xf32) + conv2d_40 = paddle._C_ops.conv2d( + multiply_3, parameter_234, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del multiply_3, parameter_234 + + # pd_op.batch_norm_: (2x512x-1x-1xf32, 512xf32, 512xf32, 512xf32, 512xf32, -1xui8) <- (2x512x-1x-1xf32, 512xf32, 512xf32, 512xf32, 512xf32) + ( + batch_norm__216, + batch_norm__217, + batch_norm__218, + batch_norm__219, + batch_norm__220, + batch_norm__221, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_40, + parameter_233, + parameter_232, + parameter_231, + parameter_230, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_40, parameter_230, parameter_231, parameter_232, parameter_233 + + # pd_op.swish: (2x512x-1x-1xf32) <- (2x512x-1x-1xf32) + swish_30 = paddle._C_ops.swish(batch_norm__216) + del batch_norm__216 + + # pd_op.conv2d: (2x192x-1x-1xf32) <- (2x512x-1x-1xf32, 192x512x1x1xf32) + conv2d_41 = paddle._C_ops.conv2d( + swish_30, parameter_229, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_229 + + # pd_op.batch_norm_: (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__222, + batch_norm__223, + batch_norm__224, + batch_norm__225, + batch_norm__226, + batch_norm__227, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_41, + parameter_228, + parameter_227, + parameter_226, + parameter_225, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_41, parameter_225, parameter_226, parameter_227, parameter_228 + + # pd_op.swish: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32) + swish_31 = paddle._C_ops.swish(batch_norm__222) + del batch_norm__222 + + # pd_op.conv2d: (2x192x-1x-1xf32) <- (2x512x-1x-1xf32, 192x512x1x1xf32) + conv2d_42 = paddle._C_ops.conv2d( + swish_30, parameter_224, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_224, swish_30 + + # pd_op.batch_norm_: (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__228, + batch_norm__229, + batch_norm__230, + batch_norm__231, + batch_norm__232, + batch_norm__233, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_42, + parameter_223, + parameter_222, + parameter_221, + parameter_220, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_42, parameter_220, parameter_221, parameter_222, parameter_223 + + # pd_op.swish: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32) + swish_32 = paddle._C_ops.swish(batch_norm__228) + del batch_norm__228 + + # pd_op.conv2d: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 192x192x3x3xf32) + conv2d_43 = paddle._C_ops.conv2d( + swish_32, parameter_219, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_219, swish_32 + + # pd_op.batch_norm_: (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__234, + batch_norm__235, + batch_norm__236, + batch_norm__237, + batch_norm__238, + batch_norm__239, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_43, + parameter_218, + parameter_217, + parameter_216, + parameter_215, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_43, parameter_215, parameter_216, parameter_217, parameter_218 + + # pd_op.swish: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32) + swish_33 = paddle._C_ops.swish(batch_norm__234) + del batch_norm__234 + + # pd_op.conv2d: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 192x192x3x3xf32) + conv2d_44 = paddle._C_ops.conv2d( + swish_33, parameter_214, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_214 + + # pd_op.batch_norm_: (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__240, + batch_norm__241, + batch_norm__242, + batch_norm__243, + batch_norm__244, + batch_norm__245, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_44, + parameter_213, + parameter_212, + parameter_211, + parameter_210, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_44, parameter_210, parameter_211, parameter_212, parameter_213 + + # pd_op.conv2d: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 192x192x1x1xf32) + conv2d_45 = paddle._C_ops.conv2d( + swish_33, parameter_209, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_209, swish_33 + + # pd_op.batch_norm_: (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__246, + batch_norm__247, + batch_norm__248, + batch_norm__249, + batch_norm__250, + batch_norm__251, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_45, + parameter_208, + parameter_207, + parameter_206, + parameter_205, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_45, parameter_205, parameter_206, parameter_207, parameter_208 + + # pd_op.add: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 2x192x-1x-1xf32) + add_16 = paddle._C_ops.add(batch_norm__240, batch_norm__246) + del batch_norm__240, batch_norm__246 + + # pd_op.swish: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32) + swish_34 = paddle._C_ops.swish(add_16) + del add_16 + + # pd_op.full_int_array: (2xi64) <- () + full_int_array_2 = [5, 5] + + # pd_op.pool2d: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 2xi64) + pool2d_0 = paddle._C_ops.pool2d( + swish_34, + full_int_array_2, + [1, 1], + [2, 2], + False, + True, + "NCHW", + "max", + False, + False, + "EXPLICIT", + ) + del full_int_array_2 + + # pd_op.full_int_array: (2xi64) <- () + full_int_array_3 = [9, 9] + + # pd_op.pool2d: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 2xi64) + pool2d_1 = paddle._C_ops.pool2d( + swish_34, + full_int_array_3, + [1, 1], + [4, 4], + False, + True, + "NCHW", + "max", + False, + False, + "EXPLICIT", + ) + del full_int_array_3 + + # pd_op.full_int_array: (2xi64) <- () + full_int_array_4 = [13, 13] + + # pd_op.pool2d: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 2xi64) + pool2d_2 = paddle._C_ops.pool2d( + swish_34, + full_int_array_4, + [1, 1], + [6, 6], + False, + True, + "NCHW", + "max", + False, + False, + "EXPLICIT", + ) + del full_int_array_4 + + # builtin.combine: ([2x192x-1x-1xf32, 2x192x-1x-1xf32, 2x192x-1x-1xf32, 2x192x-1x-1xf32]) <- (2x192x-1x-1xf32, 2x192x-1x-1xf32, 2x192x-1x-1xf32, 2x192x-1x-1xf32) + combine_4 = [swish_34, pool2d_0, pool2d_1, pool2d_2] + del pool2d_0, pool2d_1, pool2d_2, swish_34 + + # pd_op.concat: (2x768x-1x-1xf32) <- ([2x192x-1x-1xf32, 2x192x-1x-1xf32, 2x192x-1x-1xf32, 2x192x-1x-1xf32], 1xi32) + concat_6 = paddle._C_ops.concat(combine_4, full_0) + del combine_4 + + # pd_op.conv2d: (2x192x-1x-1xf32) <- (2x768x-1x-1xf32, 192x768x1x1xf32) + conv2d_46 = paddle._C_ops.conv2d( + concat_6, parameter_204, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del concat_6, parameter_204 + + # pd_op.batch_norm_: (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__252, + batch_norm__253, + batch_norm__254, + batch_norm__255, + batch_norm__256, + batch_norm__257, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_46, + parameter_203, + parameter_202, + parameter_201, + parameter_200, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_46, parameter_200, parameter_201, parameter_202, parameter_203 + + # pd_op.swish: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32) + swish_35 = paddle._C_ops.swish(batch_norm__252) + del batch_norm__252 + + # builtin.combine: ([2x192x-1x-1xf32, 2x192x-1x-1xf32]) <- (2x192x-1x-1xf32, 2x192x-1x-1xf32) + combine_5 = [swish_31, swish_35] + del swish_31, swish_35 + + # pd_op.concat: (2x384x-1x-1xf32) <- ([2x192x-1x-1xf32, 2x192x-1x-1xf32], 1xi32) + concat_7 = paddle._C_ops.concat(combine_5, full_0) + del combine_5 + + # pd_op.conv2d: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32, 384x384x1x1xf32) + conv2d_47 = paddle._C_ops.conv2d( + concat_7, parameter_199, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del concat_7, parameter_199 + + # pd_op.batch_norm_: (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__258, + batch_norm__259, + batch_norm__260, + batch_norm__261, + batch_norm__262, + batch_norm__263, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_47, + parameter_198, + parameter_197, + parameter_196, + parameter_195, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_47, parameter_195, parameter_196, parameter_197, parameter_198 + + # pd_op.swish: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32) + swish_36 = paddle._C_ops.swish(batch_norm__258) + del batch_norm__258 + + # pd_op.conv2d: (2x192x-1x-1xf32) <- (2x384x-1x-1xf32, 192x384x1x1xf32) + conv2d_48 = paddle._C_ops.conv2d( + swish_36, parameter_194, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_194 + + # pd_op.batch_norm_: (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__264, + batch_norm__265, + batch_norm__266, + batch_norm__267, + batch_norm__268, + batch_norm__269, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_48, + parameter_193, + parameter_192, + parameter_191, + parameter_190, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_48, parameter_190, parameter_191, parameter_192, parameter_193 + + # pd_op.swish: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32) + swish_37 = paddle._C_ops.swish(batch_norm__264) + del batch_norm__264 + + # pd_op.nearest_interp: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, None, None, None) + nearest_interp_0 = paddle._C_ops.nearest_interp( + swish_37, + None, + None, + None, + "NCHW", + -1, + -1, + -1, + [float("2"), float("2")], + "nearest", + False, + 0, + ) + del swish_37 + + # builtin.combine: ([2x192x-1x-1xf32, 2x256x-1x-1xf32]) <- (2x192x-1x-1xf32, 2x256x-1x-1xf32) + combine_6 = [nearest_interp_0, swish_24] + del nearest_interp_0, swish_24 + + # pd_op.concat: (2x448x-1x-1xf32) <- ([2x192x-1x-1xf32, 2x256x-1x-1xf32], 1xi32) + concat_8 = paddle._C_ops.concat(combine_6, full_0) + del combine_6 + + # pd_op.conv2d: (2x96x-1x-1xf32) <- (2x448x-1x-1xf32, 96x448x1x1xf32) + conv2d_49 = paddle._C_ops.conv2d( + concat_8, parameter_189, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_189 + + # pd_op.batch_norm_: (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__270, + batch_norm__271, + batch_norm__272, + batch_norm__273, + batch_norm__274, + batch_norm__275, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_49, + parameter_188, + parameter_187, + parameter_186, + parameter_185, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_49, parameter_185, parameter_186, parameter_187, parameter_188 + + # pd_op.swish: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32) + swish_38 = paddle._C_ops.swish(batch_norm__270) + del batch_norm__270 + + # pd_op.conv2d: (2x96x-1x-1xf32) <- (2x448x-1x-1xf32, 96x448x1x1xf32) + conv2d_50 = paddle._C_ops.conv2d( + concat_8, parameter_184, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del concat_8, parameter_184 + + # pd_op.batch_norm_: (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__276, + batch_norm__277, + batch_norm__278, + batch_norm__279, + batch_norm__280, + batch_norm__281, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_50, + parameter_183, + parameter_182, + parameter_181, + parameter_180, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_50, parameter_180, parameter_181, parameter_182, parameter_183 + + # pd_op.swish: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32) + swish_39 = paddle._C_ops.swish(batch_norm__276) + del batch_norm__276 + + # pd_op.conv2d: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, 96x96x3x3xf32) + conv2d_51 = paddle._C_ops.conv2d( + swish_39, parameter_179, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_179, swish_39 + + # pd_op.batch_norm_: (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__282, + batch_norm__283, + batch_norm__284, + batch_norm__285, + batch_norm__286, + batch_norm__287, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_51, + parameter_178, + parameter_177, + parameter_176, + parameter_175, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_51, parameter_175, parameter_176, parameter_177, parameter_178 + + # pd_op.swish: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32) + swish_40 = paddle._C_ops.swish(batch_norm__282) + del batch_norm__282 + + # pd_op.conv2d: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, 96x96x3x3xf32) + conv2d_52 = paddle._C_ops.conv2d( + swish_40, parameter_174, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_174 + + # pd_op.batch_norm_: (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__288, + batch_norm__289, + batch_norm__290, + batch_norm__291, + batch_norm__292, + batch_norm__293, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_52, + parameter_173, + parameter_172, + parameter_171, + parameter_170, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_52, parameter_170, parameter_171, parameter_172, parameter_173 + + # pd_op.conv2d: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, 96x96x1x1xf32) + conv2d_53 = paddle._C_ops.conv2d( + swish_40, parameter_169, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_169, swish_40 + + # pd_op.batch_norm_: (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__294, + batch_norm__295, + batch_norm__296, + batch_norm__297, + batch_norm__298, + batch_norm__299, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_53, + parameter_168, + parameter_167, + parameter_166, + parameter_165, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_53, parameter_165, parameter_166, parameter_167, parameter_168 + + # pd_op.add: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, 2x96x-1x-1xf32) + add_17 = paddle._C_ops.add(batch_norm__288, batch_norm__294) + del batch_norm__288, batch_norm__294 + + # pd_op.swish: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32) + swish_41 = paddle._C_ops.swish(add_17) + del add_17 + + # builtin.combine: ([2x96x-1x-1xf32, 2x96x-1x-1xf32]) <- (2x96x-1x-1xf32, 2x96x-1x-1xf32) + combine_7 = [swish_38, swish_41] + del swish_38, swish_41 + + # pd_op.concat: (2x192x-1x-1xf32) <- ([2x96x-1x-1xf32, 2x96x-1x-1xf32], 1xi32) + concat_9 = paddle._C_ops.concat(combine_7, full_0) + del combine_7 + + # pd_op.conv2d: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 192x192x1x1xf32) + conv2d_54 = paddle._C_ops.conv2d( + concat_9, parameter_164, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del concat_9, parameter_164 + + # pd_op.batch_norm_: (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__300, + batch_norm__301, + batch_norm__302, + batch_norm__303, + batch_norm__304, + batch_norm__305, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_54, + parameter_163, + parameter_162, + parameter_161, + parameter_160, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_54, parameter_160, parameter_161, parameter_162, parameter_163 + + # pd_op.swish: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32) + swish_42 = paddle._C_ops.swish(batch_norm__300) + del batch_norm__300 + + # pd_op.conv2d: (2x96x-1x-1xf32) <- (2x192x-1x-1xf32, 96x192x1x1xf32) + conv2d_55 = paddle._C_ops.conv2d( + swish_42, parameter_159, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_159 + + # pd_op.batch_norm_: (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__306, + batch_norm__307, + batch_norm__308, + batch_norm__309, + batch_norm__310, + batch_norm__311, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_55, + parameter_158, + parameter_157, + parameter_156, + parameter_155, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_55, parameter_155, parameter_156, parameter_157, parameter_158 + + # pd_op.swish: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32) + swish_43 = paddle._C_ops.swish(batch_norm__306) + del batch_norm__306 + + # pd_op.nearest_interp: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, None, None, None) + nearest_interp_1 = paddle._C_ops.nearest_interp( + swish_43, + None, + None, + None, + "NCHW", + -1, + -1, + -1, + [float("2"), float("2")], + "nearest", + False, + 0, + ) + del swish_43 + + # builtin.combine: ([2x96x-1x-1xf32, 2x128x-1x-1xf32]) <- (2x96x-1x-1xf32, 2x128x-1x-1xf32) + combine_8 = [nearest_interp_1, swish_16] + del nearest_interp_1, swish_16 + + # pd_op.concat: (2x224x-1x-1xf32) <- ([2x96x-1x-1xf32, 2x128x-1x-1xf32], 1xi32) + concat_10 = paddle._C_ops.concat(combine_8, full_0) + del combine_8 + + # pd_op.conv2d: (2x48x-1x-1xf32) <- (2x224x-1x-1xf32, 48x224x1x1xf32) + conv2d_56 = paddle._C_ops.conv2d( + concat_10, parameter_154, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" ) + del parameter_154 - # pd_op.argmax: (2x-1xi64) <- (2x-1x-1xf32, 1xi64) - argmax_0 = paddle._C_ops.argmax(data_2, full_0, False, False, paddle.int64) - del full_0 + # pd_op.batch_norm_: (2x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32, -1xui8) <- (2x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32) + ( + batch_norm__312, + batch_norm__313, + batch_norm__314, + batch_norm__315, + batch_norm__316, + batch_norm__317, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_56, + parameter_153, + parameter_152, + parameter_151, + parameter_150, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_56, parameter_150, parameter_151, parameter_152, parameter_153 - # pd_op.cast: (xi32) <- (xi64) - cast_0 = paddle._C_ops.cast(data_0, paddle.int32) - del data_0 + # pd_op.swish: (2x48x-1x-1xf32) <- (2x48x-1x-1xf32) + swish_44 = paddle._C_ops.swish(batch_norm__312) + del batch_norm__312 - # pd_op.multiply: (2x1xi32) <- (2x1xi32, xi32) - multiply_1 = paddle._C_ops.multiply(data_3, cast_0) - del cast_0, data_3 + # pd_op.conv2d: (2x48x-1x-1xf32) <- (2x224x-1x-1xf32, 48x224x1x1xf32) + conv2d_57 = paddle._C_ops.conv2d( + concat_10, parameter_149, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del concat_10, parameter_149 - # pd_op.cast: (2x1xi64) <- (2x1xi32) - cast_1 = paddle._C_ops.cast(multiply_1, paddle.int64) - del multiply_1 + # pd_op.batch_norm_: (2x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32, -1xui8) <- (2x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32) + ( + batch_norm__318, + batch_norm__319, + batch_norm__320, + batch_norm__321, + batch_norm__322, + batch_norm__323, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_57, + parameter_148, + parameter_147, + parameter_146, + parameter_145, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_57, parameter_145, parameter_146, parameter_147, parameter_148 - # pd_op.add: (2x-1xi64) <- (2x-1xi64, 2x1xi64) - add_0 = paddle._C_ops.add(argmax_0, cast_1) - del argmax_0, cast_1 + # pd_op.swish: (2x48x-1x-1xf32) <- (2x48x-1x-1xf32) + swish_45 = paddle._C_ops.swish(batch_norm__318) + del batch_norm__318 - # pd_op.flatten: (-1xi32) <- (2x-1x1xi32) - flatten_0 = paddle._C_ops.flatten(data_4, 0, 2) - del data_4 + # pd_op.conv2d: (2x48x-1x-1xf32) <- (2x48x-1x-1xf32, 48x48x3x3xf32) + conv2d_58 = paddle._C_ops.conv2d( + swish_45, parameter_144, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_144, swish_45 - # pd_op.flatten: (-1xi64) <- (2x-1xi64) - flatten_1 = paddle._C_ops.flatten(add_0, 0, 1) - del add_0 + # pd_op.batch_norm_: (2x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32, -1xui8) <- (2x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32) + ( + batch_norm__324, + batch_norm__325, + batch_norm__326, + batch_norm__327, + batch_norm__328, + batch_norm__329, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_58, + parameter_143, + parameter_142, + parameter_141, + parameter_140, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_58, parameter_140, parameter_141, parameter_142, parameter_143 - # pd_op.full: (1xi32) <- () - full_1 = paddle._C_ops.full( - [1], float("0"), paddle.int32, paddle.core.CPUPlace() + # pd_op.swish: (2x48x-1x-1xf32) <- (2x48x-1x-1xf32) + swish_46 = paddle._C_ops.swish(batch_norm__324) + del batch_norm__324 + + # pd_op.conv2d: (2x48x-1x-1xf32) <- (2x48x-1x-1xf32, 48x48x3x3xf32) + conv2d_59 = paddle._C_ops.conv2d( + swish_46, parameter_139, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" ) + del parameter_139 - # pd_op.gather: (-1xi32) <- (-1xi32, -1xi64, 1xi32) - gather_0 = paddle._C_ops.gather(flatten_0, flatten_1, full_1) - del flatten_0 + # pd_op.batch_norm_: (2x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32, -1xui8) <- (2x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32) + ( + batch_norm__330, + batch_norm__331, + batch_norm__332, + batch_norm__333, + batch_norm__334, + batch_norm__335, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_59, + parameter_138, + parameter_137, + parameter_136, + parameter_135, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_59, parameter_135, parameter_136, parameter_137, parameter_138 - # pd_op.full: (xi64) <- () - full_2 = paddle._C_ops.full( - [], float("2"), paddle.int64, paddle.core.CPUPlace() + # pd_op.conv2d: (2x48x-1x-1xf32) <- (2x48x-1x-1xf32, 48x48x1x1xf32) + conv2d_60 = paddle._C_ops.conv2d( + swish_46, parameter_134, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" ) + del parameter_134, swish_46 - # builtin.combine: ([xi64, xi64]) <- (xi64, xi64) - combine_0 = [full_2, data_1] + # pd_op.batch_norm_: (2x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32, -1xui8) <- (2x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32) + ( + batch_norm__336, + batch_norm__337, + batch_norm__338, + batch_norm__339, + batch_norm__340, + batch_norm__341, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_60, + parameter_133, + parameter_132, + parameter_131, + parameter_130, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_60, parameter_130, parameter_131, parameter_132, parameter_133 - # pd_op.stack: (2xi64) <- ([xi64, xi64]) - stack_0 = paddle._C_ops.stack(combine_0, 0) - del combine_0 + # pd_op.add: (2x48x-1x-1xf32) <- (2x48x-1x-1xf32, 2x48x-1x-1xf32) + add_18 = paddle._C_ops.add(batch_norm__330, batch_norm__336) + del batch_norm__330, batch_norm__336 - # pd_op.reshape: (2x-1xi32) <- (-1xi32, 2xi64) - reshape_1 = paddle._C_ops.reshape(gather_0, stack_0) - del gather_0, stack_0 + # pd_op.swish: (2x48x-1x-1xf32) <- (2x48x-1x-1xf32) + swish_47 = paddle._C_ops.swish(add_18) + del add_18 - # pd_op.full: (xf32) <- () - full_3 = paddle._C_ops.full( - [], float("0"), paddle.float32, paddle.framework._current_expected_place() + # builtin.combine: ([2x48x-1x-1xf32, 2x48x-1x-1xf32]) <- (2x48x-1x-1xf32, 2x48x-1x-1xf32) + combine_9 = [swish_44, swish_47] + del swish_44, swish_47 + + # pd_op.concat: (2x96x-1x-1xf32) <- ([2x48x-1x-1xf32, 2x48x-1x-1xf32], 1xi32) + concat_11 = paddle._C_ops.concat(combine_9, full_0) + del combine_9 + + # pd_op.conv2d: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, 96x96x1x1xf32) + conv2d_61 = paddle._C_ops.conv2d( + concat_11, parameter_129, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del concat_11, parameter_129 + + # pd_op.batch_norm_: (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__342, + batch_norm__343, + batch_norm__344, + batch_norm__345, + batch_norm__346, + batch_norm__347, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_61, + parameter_128, + parameter_127, + parameter_126, + parameter_125, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), ) + del conv2d_61, parameter_125, parameter_126, parameter_127, parameter_128 - # pd_op.greater_than: (2x-1xb) <- (2x-1xf32, xf32) - greater_than_0 = paddle._C_ops.greater_than(data_5, full_3) - del data_5, full_3 + # pd_op.swish: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32) + swish_48 = paddle._C_ops.swish(batch_norm__342) + del batch_norm__342 - # pd_op.full: (1xf32) <- () - full_4 = paddle._C_ops.full( - [1], float("4"), paddle.float32, paddle.core.CPUPlace() + # pd_op.conv2d: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, 96x96x3x3xf32) + conv2d_62 = paddle._C_ops.conv2d( + swish_48, parameter_124, [2, 2], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_124 + + # pd_op.batch_norm_: (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__348, + batch_norm__349, + batch_norm__350, + batch_norm__351, + batch_norm__352, + batch_norm__353, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_62, + parameter_123, + parameter_122, + parameter_121, + parameter_120, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_62, parameter_120, parameter_121, parameter_122, parameter_123 + + # pd_op.swish: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32) + swish_49 = paddle._C_ops.swish(batch_norm__348) + del batch_norm__348 + + # builtin.combine: ([2x96x-1x-1xf32, 2x192x-1x-1xf32]) <- (2x96x-1x-1xf32, 2x192x-1x-1xf32) + combine_10 = [swish_49, swish_42] + del swish_42, swish_49 + + # pd_op.concat: (2x288x-1x-1xf32) <- ([2x96x-1x-1xf32, 2x192x-1x-1xf32], 1xi32) + concat_12 = paddle._C_ops.concat(combine_10, full_0) + del combine_10 + + # pd_op.conv2d: (2x96x-1x-1xf32) <- (2x288x-1x-1xf32, 96x288x1x1xf32) + conv2d_63 = paddle._C_ops.conv2d( + concat_12, parameter_119, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_119 + + # pd_op.batch_norm_: (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__354, + batch_norm__355, + batch_norm__356, + batch_norm__357, + batch_norm__358, + batch_norm__359, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_63, + parameter_118, + parameter_117, + parameter_116, + parameter_115, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_63, parameter_115, parameter_116, parameter_117, parameter_118 + + # pd_op.swish: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32) + swish_50 = paddle._C_ops.swish(batch_norm__354) + del batch_norm__354 + + # pd_op.conv2d: (2x96x-1x-1xf32) <- (2x288x-1x-1xf32, 96x288x1x1xf32) + conv2d_64 = paddle._C_ops.conv2d( + concat_12, parameter_114, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del concat_12, parameter_114 + + # pd_op.batch_norm_: (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__360, + batch_norm__361, + batch_norm__362, + batch_norm__363, + batch_norm__364, + batch_norm__365, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_64, + parameter_113, + parameter_112, + parameter_111, + parameter_110, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_64, parameter_110, parameter_111, parameter_112, parameter_113 + + # pd_op.swish: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32) + swish_51 = paddle._C_ops.swish(batch_norm__360) + del batch_norm__360 + + # pd_op.conv2d: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, 96x96x3x3xf32) + conv2d_65 = paddle._C_ops.conv2d( + swish_51, parameter_109, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_109, swish_51 + + # pd_op.batch_norm_: (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__366, + batch_norm__367, + batch_norm__368, + batch_norm__369, + batch_norm__370, + batch_norm__371, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_65, + parameter_108, + parameter_107, + parameter_106, + parameter_105, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_65, parameter_105, parameter_106, parameter_107, parameter_108 + + # pd_op.swish: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32) + swish_52 = paddle._C_ops.swish(batch_norm__366) + del batch_norm__366 + + # pd_op.conv2d: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, 96x96x3x3xf32) + conv2d_66 = paddle._C_ops.conv2d( + swish_52, parameter_104, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_104 + + # pd_op.batch_norm_: (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__372, + batch_norm__373, + batch_norm__374, + batch_norm__375, + batch_norm__376, + batch_norm__377, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_66, + parameter_103, + parameter_102, + parameter_101, + parameter_100, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_66, parameter_100, parameter_101, parameter_102, parameter_103 + + # pd_op.conv2d: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, 96x96x1x1xf32) + conv2d_67 = paddle._C_ops.conv2d( + swish_52, parameter_99, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_99, swish_52 + + # pd_op.batch_norm_: (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__378, + batch_norm__379, + batch_norm__380, + batch_norm__381, + batch_norm__382, + batch_norm__383, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_67, + parameter_98, + parameter_97, + parameter_96, + parameter_95, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_67, parameter_95, parameter_96, parameter_97, parameter_98 + + # pd_op.add: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, 2x96x-1x-1xf32) + add_19 = paddle._C_ops.add(batch_norm__372, batch_norm__378) + del batch_norm__372, batch_norm__378 + + # pd_op.swish: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32) + swish_53 = paddle._C_ops.swish(add_19) + del add_19 + + # builtin.combine: ([2x96x-1x-1xf32, 2x96x-1x-1xf32]) <- (2x96x-1x-1xf32, 2x96x-1x-1xf32) + combine_11 = [swish_50, swish_53] + del swish_50, swish_53 + + # pd_op.concat: (2x192x-1x-1xf32) <- ([2x96x-1x-1xf32, 2x96x-1x-1xf32], 1xi32) + concat_13 = paddle._C_ops.concat(combine_11, full_0) + del combine_11 + + # pd_op.conv2d: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 192x192x1x1xf32) + conv2d_68 = paddle._C_ops.conv2d( + concat_13, parameter_94, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del concat_13, parameter_94 + + # pd_op.batch_norm_: (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__384, + batch_norm__385, + batch_norm__386, + batch_norm__387, + batch_norm__388, + batch_norm__389, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_68, + parameter_93, + parameter_92, + parameter_91, + parameter_90, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_68, parameter_90, parameter_91, parameter_92, parameter_93 + + # pd_op.swish: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32) + swish_54 = paddle._C_ops.swish(batch_norm__384) + del batch_norm__384 + + # pd_op.conv2d: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 192x192x3x3xf32) + conv2d_69 = paddle._C_ops.conv2d( + swish_54, parameter_89, [2, 2], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_89 + + # pd_op.batch_norm_: (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__390, + batch_norm__391, + batch_norm__392, + batch_norm__393, + batch_norm__394, + batch_norm__395, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_69, + parameter_88, + parameter_87, + parameter_86, + parameter_85, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_69, parameter_85, parameter_86, parameter_87, parameter_88 + + # pd_op.swish: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32) + swish_55 = paddle._C_ops.swish(batch_norm__390) + del batch_norm__390 + + # builtin.combine: ([2x192x-1x-1xf32, 2x384x-1x-1xf32]) <- (2x192x-1x-1xf32, 2x384x-1x-1xf32) + combine_12 = [swish_55, swish_36] + del swish_36, swish_55 + + # pd_op.concat: (2x576x-1x-1xf32) <- ([2x192x-1x-1xf32, 2x384x-1x-1xf32], 1xi32) + concat_14 = paddle._C_ops.concat(combine_12, full_0) + del combine_12 + + # pd_op.conv2d: (2x192x-1x-1xf32) <- (2x576x-1x-1xf32, 192x576x1x1xf32) + conv2d_70 = paddle._C_ops.conv2d( + concat_14, parameter_84, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_84 + + # pd_op.batch_norm_: (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__396, + batch_norm__397, + batch_norm__398, + batch_norm__399, + batch_norm__400, + batch_norm__401, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_70, + parameter_83, + parameter_82, + parameter_81, + parameter_80, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_70, parameter_80, parameter_81, parameter_82, parameter_83 + + # pd_op.swish: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32) + swish_56 = paddle._C_ops.swish(batch_norm__396) + del batch_norm__396 + + # pd_op.conv2d: (2x192x-1x-1xf32) <- (2x576x-1x-1xf32, 192x576x1x1xf32) + conv2d_71 = paddle._C_ops.conv2d( + concat_14, parameter_79, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del concat_14, parameter_79 + + # pd_op.batch_norm_: (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__402, + batch_norm__403, + batch_norm__404, + batch_norm__405, + batch_norm__406, + batch_norm__407, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_71, + parameter_78, + parameter_77, + parameter_76, + parameter_75, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_71, parameter_75, parameter_76, parameter_77, parameter_78 + + # pd_op.swish: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32) + swish_57 = paddle._C_ops.swish(batch_norm__402) + del batch_norm__402 + + # pd_op.conv2d: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 192x192x3x3xf32) + conv2d_72 = paddle._C_ops.conv2d( + swish_57, parameter_74, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_74, swish_57 + + # pd_op.batch_norm_: (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__408, + batch_norm__409, + batch_norm__410, + batch_norm__411, + batch_norm__412, + batch_norm__413, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_72, + parameter_73, + parameter_72, + parameter_71, + parameter_70, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_72, parameter_70, parameter_71, parameter_72, parameter_73 + + # pd_op.swish: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32) + swish_58 = paddle._C_ops.swish(batch_norm__408) + del batch_norm__408 + + # pd_op.conv2d: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 192x192x3x3xf32) + conv2d_73 = paddle._C_ops.conv2d( + swish_58, parameter_69, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_69 + + # pd_op.batch_norm_: (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__414, + batch_norm__415, + batch_norm__416, + batch_norm__417, + batch_norm__418, + batch_norm__419, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_73, + parameter_68, + parameter_67, + parameter_66, + parameter_65, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_73, parameter_65, parameter_66, parameter_67, parameter_68 + + # pd_op.conv2d: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 192x192x1x1xf32) + conv2d_74 = paddle._C_ops.conv2d( + swish_58, parameter_64, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_64, swish_58 + + # pd_op.batch_norm_: (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__420, + batch_norm__421, + batch_norm__422, + batch_norm__423, + batch_norm__424, + batch_norm__425, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_74, + parameter_63, + parameter_62, + parameter_61, + parameter_60, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_74, parameter_60, parameter_61, parameter_62, parameter_63 + + # pd_op.add: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 2x192x-1x-1xf32) + add_20 = paddle._C_ops.add(batch_norm__414, batch_norm__420) + del batch_norm__414, batch_norm__420 + + # pd_op.swish: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32) + swish_59 = paddle._C_ops.swish(add_20) + del add_20 + + # builtin.combine: ([2x192x-1x-1xf32, 2x192x-1x-1xf32]) <- (2x192x-1x-1xf32, 2x192x-1x-1xf32) + combine_13 = [swish_56, swish_59] + del swish_56, swish_59 + + # pd_op.concat: (2x384x-1x-1xf32) <- ([2x192x-1x-1xf32, 2x192x-1x-1xf32], 1xi32) + concat_15 = paddle._C_ops.concat(combine_13, full_0) + del combine_13 + + # pd_op.conv2d: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32, 384x384x1x1xf32) + conv2d_75 = paddle._C_ops.conv2d( + concat_15, parameter_59, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del concat_15, parameter_59 + + # pd_op.batch_norm_: (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__426, + batch_norm__427, + batch_norm__428, + batch_norm__429, + batch_norm__430, + batch_norm__431, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_75, + parameter_58, + parameter_57, + parameter_56, + parameter_55, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_75, parameter_55, parameter_56, parameter_57, parameter_58 + + # pd_op.swish: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32) + swish_60 = paddle._C_ops.swish(batch_norm__426) + del batch_norm__426 + + # pd_op.shape64: (4xi64) <- (2x384x-1x-1xf32) + shape64_0 = paddle._C_ops.shape64(swish_60) + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_5 = [2] + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_6 = [3] + + # pd_op.slice: (xi64) <- (4xi64, 1xi64, 1xi64) + slice_0 = paddle._C_ops.slice( + shape64_0, [0], full_int_array_5, full_int_array_6, [1], [0] ) + del shape64_0 + + # pd_op.shape64: (4xi64) <- (2x384x-1x-1xf32) + shape64_1 = paddle._C_ops.shape64(swish_60) + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_7 = [4] - # pd_op.full_like: (2x-1xi32) <- (2x-1xi32, 1xf32) - full_like_0 = paddle._C_ops.full_like( - reshape_1, full_4, paddle.int32, paddle.framework._current_expected_place() + # pd_op.slice: (xi64) <- (4xi64, 1xi64, 1xi64) + slice_1 = paddle._C_ops.slice( + shape64_1, [0], full_int_array_6, full_int_array_7, [1], [0] ) - del full_4 + del shape64_1 - # pd_op.where: (2x-1xi32) <- (2x-1xb, 2x-1xi32, 2x-1xi32) - where_0 = paddle._C_ops.where(greater_than_0, reshape_1, full_like_0) - del full_like_0, greater_than_0, reshape_1 + # pd_op.multiply: (xi64) <- (xi64, xi64) + multiply_4 = paddle._C_ops.multiply(slice_0, slice_1) + del slice_0, slice_1 # pd_op.full_int_array: (2xi64) <- () - full_int_array_0 = [-1, 4] + full_int_array_8 = [1, 1] + + # pd_op.pool2d: (2x384x1x1xf32) <- (2x384x-1x-1xf32, 2xi64) + pool2d_3 = paddle._C_ops.pool2d( + swish_60, + full_int_array_8, + [1, 1], + [0, 0], + False, + True, + "NCHW", + "avg", + False, + True, + "EXPLICIT", + ) + + # pd_op.conv2d: (2x384x1x1xf32) <- (2x384x1x1xf32, 384x384x1x1xf32) + conv2d_76 = paddle._C_ops.conv2d( + pool2d_3, parameter_54, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_54 + + # pd_op.reshape: (1x384x1x1xf32) <- (384xf32, 4xi64) + reshape_4 = paddle._C_ops.reshape(parameter_53, full_int_array_1) + del parameter_53 + + # pd_op.add: (2x384x1x1xf32) <- (2x384x1x1xf32, 1x384x1x1xf32) + add_21 = paddle._C_ops.add(conv2d_76, reshape_4) + del conv2d_76, reshape_4 + + # pd_op.sigmoid: (2x384x1x1xf32) <- (2x384x1x1xf32) + sigmoid_0 = paddle._C_ops.sigmoid(add_21) + del add_21 + + # pd_op.multiply: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32, 2x384x1x1xf32) + multiply_5 = paddle._C_ops.multiply(swish_60, sigmoid_0) + del sigmoid_0 + + # pd_op.conv2d: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32, 384x384x1x1xf32) + conv2d_77 = paddle._C_ops.conv2d( + multiply_5, parameter_52, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del multiply_5, parameter_52 + + # pd_op.batch_norm_: (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__432, + batch_norm__433, + batch_norm__434, + batch_norm__435, + batch_norm__436, + batch_norm__437, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_77, + parameter_51, + parameter_50, + parameter_49, + parameter_48, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_77, parameter_48, parameter_49, parameter_50, parameter_51 + + # pd_op.swish: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32) + swish_61 = paddle._C_ops.swish(batch_norm__432) + del batch_norm__432 + + # pd_op.add: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32, 2x384x-1x-1xf32) + add_22 = paddle._C_ops.add(swish_61, swish_60) + del swish_61 + + # pd_op.conv2d: (2x4x-1x-1xf32) <- (2x384x-1x-1xf32, 4x384x3x3xf32) + conv2d_78 = paddle._C_ops.conv2d( + add_22, parameter_47, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del add_22, parameter_47 + + # pd_op.reshape: (1x4x1x1xf32) <- (4xf32, 4xi64) + reshape_5 = paddle._C_ops.reshape(parameter_46, full_int_array_1) + del parameter_46 + + # pd_op.add: (2x4x-1x-1xf32) <- (2x4x-1x-1xf32, 1x4x1x1xf32) + add_23 = paddle._C_ops.add(conv2d_78, reshape_5) + del conv2d_78, reshape_5 + + # pd_op.conv2d: (2x384x1x1xf32) <- (2x384x1x1xf32, 384x384x1x1xf32) + conv2d_79 = paddle._C_ops.conv2d( + pool2d_3, parameter_45, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_45, pool2d_3 - # pd_op.reshape: (-1x4xf32) <- (2x-1x4xf32, 2xi64) - reshape_2 = paddle._C_ops.reshape(data_6, full_int_array_0) - del data_6, full_int_array_0 + # pd_op.reshape: (1x384x1x1xf32) <- (384xf32, 4xi64) + reshape_6 = paddle._C_ops.reshape(parameter_44, full_int_array_1) + del parameter_44 - # pd_op.gather: (-1x4xf32) <- (-1x4xf32, -1xi64, 1xi32) - gather_1 = paddle._C_ops.gather(reshape_2, flatten_1, full_1) - del flatten_1, full_1, reshape_2 + # pd_op.add: (2x384x1x1xf32) <- (2x384x1x1xf32, 1x384x1x1xf32) + add_24 = paddle._C_ops.add(conv2d_79, reshape_6) + del conv2d_79, reshape_6 + + # pd_op.sigmoid: (2x384x1x1xf32) <- (2x384x1x1xf32) + sigmoid_1 = paddle._C_ops.sigmoid(add_24) + del add_24 + + # pd_op.multiply: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32, 2x384x1x1xf32) + multiply_6 = paddle._C_ops.multiply(swish_60, sigmoid_1) + del sigmoid_1, swish_60 + + # pd_op.conv2d: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32, 384x384x1x1xf32) + conv2d_80 = paddle._C_ops.conv2d( + multiply_6, parameter_43, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del multiply_6, parameter_43 + + # pd_op.batch_norm_: (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__438, + batch_norm__439, + batch_norm__440, + batch_norm__441, + batch_norm__442, + batch_norm__443, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_80, + parameter_42, + parameter_41, + parameter_40, + parameter_39, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_80, parameter_39, parameter_40, parameter_41, parameter_42 + + # pd_op.swish: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32) + swish_62 = paddle._C_ops.swish(batch_norm__438) + del batch_norm__438 + + # pd_op.conv2d: (2x68x-1x-1xf32) <- (2x384x-1x-1xf32, 68x384x3x3xf32) + conv2d_81 = paddle._C_ops.conv2d( + swish_62, parameter_38, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_38, swish_62 + + # pd_op.reshape: (1x68x1x1xf32) <- (68xf32, 4xi64) + reshape_7 = paddle._C_ops.reshape(parameter_37, full_int_array_1) + del parameter_37 + + # pd_op.add: (2x68x-1x-1xf32) <- (2x68x-1x-1xf32, 1x68x1x1xf32) + add_25 = paddle._C_ops.add(conv2d_81, reshape_7) + del conv2d_81, reshape_7 + + # pd_op.full: (xi64) <- () + full_1 = paddle._C_ops.full( + [], float("-1"), paddle.int64, paddle.core.CPUPlace() + ) # pd_op.full: (xi64) <- () - full_5 = paddle._C_ops.full( + full_2 = paddle._C_ops.full( [], float("4"), paddle.int64, paddle.core.CPUPlace() ) + # pd_op.full: (xi64) <- () + full_3 = paddle._C_ops.full( + [], float("17"), paddle.int64, paddle.core.CPUPlace() + ) + + # builtin.combine: ([xi64, xi64, xi64, xi64]) <- (xi64, xi64, xi64, xi64) + combine_14 = [full_1, full_2, full_3, multiply_4] + + # pd_op.stack: (4xi64) <- ([xi64, xi64, xi64, xi64]) + stack_0 = paddle._C_ops.stack(combine_14, 0) + del combine_14 + + # pd_op.reshape: (-1x4x17x-1xf32) <- (2x68x-1x-1xf32, 4xi64) + reshape_8 = paddle._C_ops.reshape(add_25, stack_0) + del add_25, stack_0 + + # pd_op.transpose: (-1x17x-1x4xf32) <- (-1x4x17x-1xf32) + transpose_0 = paddle._C_ops.transpose(reshape_8, [0, 2, 3, 1]) + del reshape_8 + + # pd_op.softmax: (-1x17x-1x4xf32) <- (-1x17x-1x4xf32) + softmax_0 = paddle._C_ops.softmax(transpose_0, 1) + del transpose_0 + + # pd_op.conv2d: (-1x1x-1x4xf32) <- (-1x17x-1x4xf32, 1x17x1x1xf32) + conv2d_82 = paddle._C_ops.conv2d( + softmax_0, parameter_36, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del softmax_0 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_9 = [1] + + # pd_op.squeeze: (-1x-1x4xf32) <- (-1x1x-1x4xf32, 1xi64) + squeeze_0 = paddle._C_ops.squeeze(conv2d_82, full_int_array_9) + del conv2d_82 + + # pd_op.sigmoid: (2x4x-1x-1xf32) <- (2x4x-1x-1xf32) + sigmoid_2 = paddle._C_ops.sigmoid(add_23) + del add_23 + # builtin.combine: ([xi64, xi64, xi64]) <- (xi64, xi64, xi64) - combine_1 = [full_2, data_1, full_5] - del data_1, full_2, full_5 + combine_15 = [full_1, full_2, multiply_4] + del multiply_4 # pd_op.stack: (3xi64) <- ([xi64, xi64, xi64]) - stack_1 = paddle._C_ops.stack(combine_1, 0) - del combine_1 + stack_1 = paddle._C_ops.stack(combine_15, 0) + del combine_15 - # pd_op.reshape: (2x-1x4xf32) <- (-1x4xf32, 3xi64) - reshape_0 = paddle._C_ops.reshape(gather_1, stack_1) - del gather_1, stack_1 + # pd_op.reshape: (-1x4x-1xf32) <- (2x4x-1x-1xf32, 3xi64) + reshape_9 = paddle._C_ops.reshape(sigmoid_2, stack_1) + del sigmoid_2, stack_1 - # pd_op.full: (1xi32) <- () - full_6 = paddle._C_ops.full( - [1], float("5"), paddle.int32, paddle.core.CPUPlace() + # pd_op.shape64: (4xi64) <- (2x192x-1x-1xf32) + shape64_2 = paddle._C_ops.shape64(swish_54) + + # pd_op.slice: (xi64) <- (4xi64, 1xi64, 1xi64) + slice_2 = paddle._C_ops.slice( + shape64_2, [0], full_int_array_5, full_int_array_6, [1], [0] ) + del shape64_2 - # pd_op.one_hot: (2x-1x5xf32) <- (2x-1xi32, 1xi32) - one_hot_0 = paddle._C_ops.one_hot( - where_0 % paddle.cast(full_6, where_0.dtype), full_6 + # pd_op.shape64: (4xi64) <- (2x192x-1x-1xf32) + shape64_3 = paddle._C_ops.shape64(swish_54) + + # pd_op.slice: (xi64) <- (4xi64, 1xi64, 1xi64) + slice_3 = paddle._C_ops.slice( + shape64_3, [0], full_int_array_6, full_int_array_7, [1], [0] ) - del full_6 + del shape64_3 + + # pd_op.multiply: (xi64) <- (xi64, xi64) + multiply_7 = paddle._C_ops.multiply(slice_2, slice_3) + del slice_2, slice_3 - # pd_op.full: (4xi64) <- () - full_7 = paddle._C_ops.full( - [4], float("0"), paddle.int64, paddle.framework._current_expected_place() + # pd_op.pool2d: (2x192x1x1xf32) <- (2x192x-1x-1xf32, 2xi64) + pool2d_4 = paddle._C_ops.pool2d( + swish_54, + full_int_array_8, + [1, 1], + [0, 0], + False, + True, + "NCHW", + "avg", + False, + True, + "EXPLICIT", ) - # pd_op.assign_value_: (4xi64) <- (4xi64) - assign_value__0 = paddle._C_ops.assign_value_( - full_7, - [4], - paddle.int64, - [float("0"), float("1"), float("2"), float("3")], - paddle.framework._current_expected_place(), + # pd_op.conv2d: (2x192x1x1xf32) <- (2x192x1x1xf32, 192x192x1x1xf32) + conv2d_83 = paddle._C_ops.conv2d( + pool2d_4, parameter_35, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" ) - del full_7 + del parameter_35 - # pd_op.index_select: (2x-1x4xf32) <- (2x-1x5xf32, 4xi64) - index_select_0 = paddle._C_ops.index_select(one_hot_0, assign_value__0, -1) - del assign_value__0, one_hot_0 + # pd_op.reshape: (1x192x1x1xf32) <- (192xf32, 4xi64) + reshape_10 = paddle._C_ops.reshape(parameter_34, full_int_array_1) + del parameter_34 - # pd_op.multiply: (2x-1x-1xf32) <- (2x-1x-1xf32, 2x-1x-1xf32) - multiply_2 = paddle._C_ops.multiply(data_7, data_2) - del data_7 + # pd_op.add: (2x192x1x1xf32) <- (2x192x1x1xf32, 1x192x1x1xf32) + add_26 = paddle._C_ops.add(conv2d_83, reshape_10) + del conv2d_83, reshape_10 - # pd_op.full_int_array: (1xi64) <- () - full_int_array_1 = [-1] + # pd_op.sigmoid: (2x192x1x1xf32) <- (2x192x1x1xf32) + sigmoid_3 = paddle._C_ops.sigmoid(add_26) + del add_26 + + # pd_op.multiply: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 2x192x1x1xf32) + multiply_8 = paddle._C_ops.multiply(swish_54, sigmoid_3) + del sigmoid_3 + + # pd_op.conv2d: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 192x192x1x1xf32) + conv2d_84 = paddle._C_ops.conv2d( + multiply_8, parameter_33, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del multiply_8, parameter_33 - # pd_op.max: (2x-1x1xf32) <- (2x-1x-1xf32, 1xi64) - max_0 = paddle._C_ops.max(multiply_2, full_int_array_1, True) + # pd_op.batch_norm_: (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__444, + batch_norm__445, + batch_norm__446, + batch_norm__447, + batch_norm__448, + batch_norm__449, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_84, + parameter_32, + parameter_31, + parameter_30, + parameter_29, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_84, parameter_29, parameter_30, parameter_31, parameter_32 - # pd_op.multiply: (2x-1x-1xf32) <- (2x-1x-1xf32, 2x-1x-1xf32) - multiply_3 = paddle._C_ops.multiply(data_8, data_2) - del data_2, data_8 + # pd_op.swish: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32) + swish_63 = paddle._C_ops.swish(batch_norm__444) + del batch_norm__444 - # pd_op.max: (2x-1x1xf32) <- (2x-1x-1xf32, 1xi64) - max_1 = paddle._C_ops.max(multiply_3, full_int_array_1, True) - del multiply_3 + # pd_op.add: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 2x192x-1x-1xf32) + add_27 = paddle._C_ops.add(swish_63, swish_54) + del swish_63 - # pd_op.full: (1xf32) <- () - full_8 = paddle._C_ops.full( - [1], float("1"), paddle.float32, paddle.core.CPUPlace() + # pd_op.conv2d: (2x4x-1x-1xf32) <- (2x192x-1x-1xf32, 4x192x3x3xf32) + conv2d_85 = paddle._C_ops.conv2d( + add_27, parameter_28, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" ) + del add_27, parameter_28 + + # pd_op.reshape: (1x4x1x1xf32) <- (4xf32, 4xi64) + reshape_11 = paddle._C_ops.reshape(parameter_27, full_int_array_1) + del parameter_27 - # pd_op.scale: (2x-1x1xf32) <- (2x-1x1xf32, 1xf32) - scale_0 = paddle._C_ops.scale(max_0, full_8, float("1e-09"), True) - del full_8, max_0 + # pd_op.add: (2x4x-1x-1xf32) <- (2x4x-1x-1xf32, 1x4x1x1xf32) + add_28 = paddle._C_ops.add(conv2d_85, reshape_11) + del conv2d_85, reshape_11 - # pd_op.divide: (2x-1x-1xf32) <- (2x-1x-1xf32, 2x-1x1xf32) - divide_0 = paddle._C_ops.divide(multiply_2, scale_0) - del multiply_2, scale_0 + # pd_op.conv2d: (2x192x1x1xf32) <- (2x192x1x1xf32, 192x192x1x1xf32) + conv2d_86 = paddle._C_ops.conv2d( + pool2d_4, parameter_26, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_26, pool2d_4 - # pd_op.multiply: (2x-1x-1xf32) <- (2x-1x-1xf32, 2x-1x1xf32) - multiply_4 = paddle._C_ops.multiply(divide_0, max_1) - del divide_0, max_1 + # pd_op.reshape: (1x192x1x1xf32) <- (192xf32, 4xi64) + reshape_12 = paddle._C_ops.reshape(parameter_25, full_int_array_1) + del parameter_25 - # pd_op.full_int_array: (1xi64) <- () - full_int_array_2 = [-2] + # pd_op.add: (2x192x1x1xf32) <- (2x192x1x1xf32, 1x192x1x1xf32) + add_29 = paddle._C_ops.add(conv2d_86, reshape_12) + del conv2d_86, reshape_12 + + # pd_op.sigmoid: (2x192x1x1xf32) <- (2x192x1x1xf32) + sigmoid_4 = paddle._C_ops.sigmoid(add_29) + del add_29 + + # pd_op.multiply: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 2x192x1x1xf32) + multiply_9 = paddle._C_ops.multiply(swish_54, sigmoid_4) + del sigmoid_4, swish_54 + + # pd_op.conv2d: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 192x192x1x1xf32) + conv2d_87 = paddle._C_ops.conv2d( + multiply_9, parameter_24, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del multiply_9, parameter_24 + + # pd_op.batch_norm_: (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__450, + batch_norm__451, + batch_norm__452, + batch_norm__453, + batch_norm__454, + batch_norm__455, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_87, + parameter_23, + parameter_22, + parameter_21, + parameter_20, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_87, parameter_20, parameter_21, parameter_22, parameter_23 + + # pd_op.swish: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32) + swish_64 = paddle._C_ops.swish(batch_norm__450) + del batch_norm__450 + + # pd_op.conv2d: (2x68x-1x-1xf32) <- (2x192x-1x-1xf32, 68x192x3x3xf32) + conv2d_88 = paddle._C_ops.conv2d( + swish_64, parameter_19, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_19, swish_64 + + # pd_op.reshape: (1x68x1x1xf32) <- (68xf32, 4xi64) + reshape_13 = paddle._C_ops.reshape(parameter_18, full_int_array_1) + del parameter_18 + + # pd_op.add: (2x68x-1x-1xf32) <- (2x68x-1x-1xf32, 1x68x1x1xf32) + add_30 = paddle._C_ops.add(conv2d_88, reshape_13) + del conv2d_88, reshape_13 + + # builtin.combine: ([xi64, xi64, xi64, xi64]) <- (xi64, xi64, xi64, xi64) + combine_16 = [full_1, full_2, full_3, multiply_7] + + # pd_op.stack: (4xi64) <- ([xi64, xi64, xi64, xi64]) + stack_2 = paddle._C_ops.stack(combine_16, 0) + del combine_16 + + # pd_op.reshape: (-1x4x17x-1xf32) <- (2x68x-1x-1xf32, 4xi64) + reshape_14 = paddle._C_ops.reshape(add_30, stack_2) + del add_30, stack_2 + + # pd_op.transpose: (-1x17x-1x4xf32) <- (-1x4x17x-1xf32) + transpose_1 = paddle._C_ops.transpose(reshape_14, [0, 2, 3, 1]) + del reshape_14 + + # pd_op.softmax: (-1x17x-1x4xf32) <- (-1x17x-1x4xf32) + softmax_1 = paddle._C_ops.softmax(transpose_1, 1) + del transpose_1 + + # pd_op.conv2d: (-1x1x-1x4xf32) <- (-1x17x-1x4xf32, 1x17x1x1xf32) + conv2d_89 = paddle._C_ops.conv2d( + softmax_1, parameter_36, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del softmax_1 + + # pd_op.squeeze: (-1x-1x4xf32) <- (-1x1x-1x4xf32, 1xi64) + squeeze_1 = paddle._C_ops.squeeze(conv2d_89, full_int_array_9) + del conv2d_89 + + # pd_op.sigmoid: (2x4x-1x-1xf32) <- (2x4x-1x-1xf32) + sigmoid_5 = paddle._C_ops.sigmoid(add_28) + del add_28 + + # builtin.combine: ([xi64, xi64, xi64]) <- (xi64, xi64, xi64) + combine_17 = [full_1, full_2, multiply_7] + del multiply_7 + + # pd_op.stack: (3xi64) <- ([xi64, xi64, xi64]) + stack_3 = paddle._C_ops.stack(combine_17, 0) + del combine_17 + + # pd_op.reshape: (-1x4x-1xf32) <- (2x4x-1x-1xf32, 3xi64) + reshape_15 = paddle._C_ops.reshape(sigmoid_5, stack_3) + del sigmoid_5, stack_3 + + # pd_op.shape64: (4xi64) <- (2x96x-1x-1xf32) + shape64_4 = paddle._C_ops.shape64(swish_48) + + # pd_op.slice: (xi64) <- (4xi64, 1xi64, 1xi64) + slice_4 = paddle._C_ops.slice( + shape64_4, [0], full_int_array_5, full_int_array_6, [1], [0] + ) + del full_int_array_5, shape64_4 + + # pd_op.shape64: (4xi64) <- (2x96x-1x-1xf32) + shape64_5 = paddle._C_ops.shape64(swish_48) + + # pd_op.slice: (xi64) <- (4xi64, 1xi64, 1xi64) + slice_5 = paddle._C_ops.slice( + shape64_5, [0], full_int_array_6, full_int_array_7, [1], [0] + ) + del full_int_array_6, full_int_array_7, shape64_5 + + # pd_op.multiply: (xi64) <- (xi64, xi64) + multiply_10 = paddle._C_ops.multiply(slice_4, slice_5) + del slice_4, slice_5 + + # pd_op.pool2d: (2x96x1x1xf32) <- (2x96x-1x-1xf32, 2xi64) + pool2d_5 = paddle._C_ops.pool2d( + swish_48, + full_int_array_8, + [1, 1], + [0, 0], + False, + True, + "NCHW", + "avg", + False, + True, + "EXPLICIT", + ) + del full_int_array_8 + + # pd_op.conv2d: (2x96x1x1xf32) <- (2x96x1x1xf32, 96x96x1x1xf32) + conv2d_90 = paddle._C_ops.conv2d( + pool2d_5, parameter_17, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_17 + + # pd_op.reshape: (1x96x1x1xf32) <- (96xf32, 4xi64) + reshape_16 = paddle._C_ops.reshape(parameter_16, full_int_array_1) + del parameter_16 + + # pd_op.add: (2x96x1x1xf32) <- (2x96x1x1xf32, 1x96x1x1xf32) + add_31 = paddle._C_ops.add(conv2d_90, reshape_16) + del conv2d_90, reshape_16 + + # pd_op.sigmoid: (2x96x1x1xf32) <- (2x96x1x1xf32) + sigmoid_6 = paddle._C_ops.sigmoid(add_31) + del add_31 + + # pd_op.multiply: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, 2x96x1x1xf32) + multiply_11 = paddle._C_ops.multiply(swish_48, sigmoid_6) + del sigmoid_6 + + # pd_op.conv2d: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, 96x96x1x1xf32) + conv2d_91 = paddle._C_ops.conv2d( + multiply_11, parameter_15, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del multiply_11, parameter_15 + + # pd_op.batch_norm_: (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__456, + batch_norm__457, + batch_norm__458, + batch_norm__459, + batch_norm__460, + batch_norm__461, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_91, + parameter_14, + parameter_13, + parameter_12, + parameter_11, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_91, parameter_11, parameter_12, parameter_13, parameter_14 + + # pd_op.swish: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32) + swish_65 = paddle._C_ops.swish(batch_norm__456) + del batch_norm__456 + + # pd_op.add: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, 2x96x-1x-1xf32) + add_32 = paddle._C_ops.add(swish_65, swish_48) + del swish_65 + + # pd_op.conv2d: (2x4x-1x-1xf32) <- (2x96x-1x-1xf32, 4x96x3x3xf32) + conv2d_92 = paddle._C_ops.conv2d( + add_32, parameter_10, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del add_32, parameter_10 + + # pd_op.reshape: (1x4x1x1xf32) <- (4xf32, 4xi64) + reshape_17 = paddle._C_ops.reshape(parameter_9, full_int_array_1) + del parameter_9 + + # pd_op.add: (2x4x-1x-1xf32) <- (2x4x-1x-1xf32, 1x4x1x1xf32) + add_33 = paddle._C_ops.add(conv2d_92, reshape_17) + del conv2d_92, reshape_17 + + # pd_op.conv2d: (2x96x1x1xf32) <- (2x96x1x1xf32, 96x96x1x1xf32) + conv2d_93 = paddle._C_ops.conv2d( + pool2d_5, parameter_8, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_8, pool2d_5 + + # pd_op.reshape: (1x96x1x1xf32) <- (96xf32, 4xi64) + reshape_18 = paddle._C_ops.reshape(parameter_7, full_int_array_1) + del parameter_7 + + # pd_op.add: (2x96x1x1xf32) <- (2x96x1x1xf32, 1x96x1x1xf32) + add_34 = paddle._C_ops.add(conv2d_93, reshape_18) + del conv2d_93, reshape_18 + + # pd_op.sigmoid: (2x96x1x1xf32) <- (2x96x1x1xf32) + sigmoid_7 = paddle._C_ops.sigmoid(add_34) + del add_34 + + # pd_op.multiply: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, 2x96x1x1xf32) + multiply_12 = paddle._C_ops.multiply(swish_48, sigmoid_7) + del sigmoid_7, swish_48 + + # pd_op.conv2d: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, 96x96x1x1xf32) + conv2d_94 = paddle._C_ops.conv2d( + multiply_12, parameter_6, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del multiply_12, parameter_6 + + # pd_op.batch_norm_: (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__462, + batch_norm__463, + batch_norm__464, + batch_norm__465, + batch_norm__466, + batch_norm__467, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_94, + parameter_5, + parameter_4, + parameter_3, + parameter_2, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_94, parameter_2, parameter_3, parameter_4, parameter_5 + + # pd_op.swish: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32) + swish_66 = paddle._C_ops.swish(batch_norm__462) + del batch_norm__462 + + # pd_op.conv2d: (2x68x-1x-1xf32) <- (2x96x-1x-1xf32, 68x96x3x3xf32) + conv2d_95 = paddle._C_ops.conv2d( + swish_66, parameter_1, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_1, swish_66 + + # pd_op.reshape: (1x68x1x1xf32) <- (68xf32, 4xi64) + reshape_19 = paddle._C_ops.reshape(parameter_0, full_int_array_1) + del full_int_array_1, parameter_0 + + # pd_op.add: (2x68x-1x-1xf32) <- (2x68x-1x-1xf32, 1x68x1x1xf32) + add_35 = paddle._C_ops.add(conv2d_95, reshape_19) + del conv2d_95, reshape_19 + + # builtin.combine: ([xi64, xi64, xi64, xi64]) <- (xi64, xi64, xi64, xi64) + combine_18 = [full_1, full_2, full_3, multiply_10] + del full_3 + + # pd_op.stack: (4xi64) <- ([xi64, xi64, xi64, xi64]) + stack_4 = paddle._C_ops.stack(combine_18, 0) + del combine_18 + + # pd_op.reshape: (-1x4x17x-1xf32) <- (2x68x-1x-1xf32, 4xi64) + reshape_20 = paddle._C_ops.reshape(add_35, stack_4) + del add_35, stack_4 + + # pd_op.transpose: (-1x17x-1x4xf32) <- (-1x4x17x-1xf32) + transpose_2 = paddle._C_ops.transpose(reshape_20, [0, 2, 3, 1]) + del reshape_20 + + # pd_op.softmax: (-1x17x-1x4xf32) <- (-1x17x-1x4xf32) + softmax_2 = paddle._C_ops.softmax(transpose_2, 1) + del transpose_2 + + # pd_op.conv2d: (-1x1x-1x4xf32) <- (-1x17x-1x4xf32, 1x17x1x1xf32) + conv2d_96 = paddle._C_ops.conv2d( + softmax_2, parameter_36, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_36, softmax_2 + + # pd_op.squeeze: (-1x-1x4xf32) <- (-1x1x-1x4xf32, 1xi64) + squeeze_2 = paddle._C_ops.squeeze(conv2d_96, full_int_array_9) + del conv2d_96, full_int_array_9 + + # pd_op.sigmoid: (2x4x-1x-1xf32) <- (2x4x-1x-1xf32) + sigmoid_8 = paddle._C_ops.sigmoid(add_33) + del add_33 + + # builtin.combine: ([xi64, xi64, xi64]) <- (xi64, xi64, xi64) + combine_19 = [full_1, full_2, multiply_10] + del full_1, full_2, multiply_10 + + # pd_op.stack: (3xi64) <- ([xi64, xi64, xi64]) + stack_5 = paddle._C_ops.stack(combine_19, 0) + del combine_19 + + # pd_op.reshape: (-1x4x-1xf32) <- (2x4x-1x-1xf32, 3xi64) + reshape_21 = paddle._C_ops.reshape(sigmoid_8, stack_5) + del sigmoid_8, stack_5 + + # pd_op.full: (1xi32) <- () + full_4 = paddle._C_ops.full( + [1], float("-1"), paddle.int32, paddle.core.CPUPlace() + ) + + # builtin.combine: ([-1x4x-1xf32, -1x4x-1xf32, -1x4x-1xf32]) <- (-1x4x-1xf32, -1x4x-1xf32, -1x4x-1xf32) + combine_20 = [reshape_9, reshape_15, reshape_21] + del reshape_15, reshape_21, reshape_9 - # pd_op.max: (2x-1xf32) <- (2x-1x-1xf32, 1xi64) - max_2 = paddle._C_ops.max(multiply_4, full_int_array_2, False) - del full_int_array_2, multiply_4 + # pd_op.concat: (-1x4x-1xf32) <- ([-1x4x-1xf32, -1x4x-1xf32, -1x4x-1xf32], 1xi32) + concat_0 = paddle._C_ops.concat(combine_20, full_4) + del combine_20, full_4 - # pd_op.unsqueeze: (2x-1x1xf32) <- (2x-1xf32, 1xi64) - unsqueeze_0 = paddle._C_ops.unsqueeze(max_2, full_int_array_1) - del full_int_array_1, max_2 + # builtin.combine: ([-1x-1x4xf32, -1x-1x4xf32, -1x-1x4xf32]) <- (-1x-1x4xf32, -1x-1x4xf32, -1x-1x4xf32) + combine_21 = [squeeze_0, squeeze_1, squeeze_2] + del squeeze_0, squeeze_1, squeeze_2 - # pd_op.multiply: (2x-1x4xf32) <- (2x-1x4xf32, 2x-1x1xf32) - multiply_0 = paddle._C_ops.multiply(index_select_0, unsqueeze_0) - del index_select_0, unsqueeze_0, where_0 + # pd_op.concat: (-1x-1x4xf32) <- ([-1x-1x4xf32, -1x-1x4xf32, -1x-1x4xf32], 1xi32) + concat_1 = paddle._C_ops.concat(combine_21, full_0) + del combine_21, full_0 - return reshape_0, multiply_0 + return concat_0, concat_1 diff --git a/paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_1/weight_meta.py b/paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_1/weight_meta.py index 8b1378917..2a1c68dc7 100644 --- a/paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_1/weight_meta.py +++ b/paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_1/weight_meta.py @@ -1 +1,4457 @@ +class Program_weight_tensor_parameter_0: + name = "parameter_0" + shape = [68] + dtype = "float32" + min_val = float("-0.018653") + max_val = float("0.0310325") + mean = float("1.30123e-07") + std = float("0.00772894") + data = None + +class Program_weight_tensor_parameter_1: + name = "parameter_1" + shape = [68, 96, 3, 3] + dtype = "float32" + min_val = float("-0.190211") + max_val = float("0.209443") + mean = float("3.7835e-08") + std = float("0.0111182") + data = None + + +class Program_weight_tensor_parameter_2: + name = "parameter_2" + shape = [96] + dtype = "float32" + min_val = float("-0.149303") + max_val = float("0.349229") + mean = float("0.0834703") + std = float("0.116476") + data = None + + +class Program_weight_tensor_parameter_3: + name = "parameter_3" + shape = [96] + dtype = "float32" + min_val = float("0.918914") + max_val = float("2.00844") + mean = float("1.39791") + std = float("0.216949") + data = None + + +class Program_weight_tensor_parameter_4: + name = "parameter_4" + shape = [96] + dtype = "float32" + min_val = float("0.000284833") + max_val = float("0.00552148") + mean = float("0.00151597") + std = float("0.00109394") + data = None + + +class Program_weight_tensor_parameter_5: + name = "parameter_5" + shape = [96] + dtype = "float32" + min_val = float("-0.0946916") + max_val = float("0.0327932") + mean = float("-0.0138573") + std = float("0.0245203") + data = None + + +class Program_weight_tensor_parameter_6: + name = "parameter_6" + shape = [96, 96, 1, 1] + dtype = "float32" + min_val = float("-0.0951807") + max_val = float("0.113065") + mean = float("-0.00129554") + std = float("0.013839") + data = None + + +class Program_weight_tensor_parameter_7: + name = "parameter_7" + shape = [96] + dtype = "float32" + min_val = float("-0.0103042") + max_val = float("0.0108003") + mean = float("-0.000302473") + std = float("0.00453127") + data = None + + +class Program_weight_tensor_parameter_8: + name = "parameter_8" + shape = [96, 96, 1, 1] + dtype = "float32" + min_val = float("-0.0202563") + max_val = float("0.0235644") + mean = float("-9.45269e-05") + std = float("0.00306802") + data = None + + +class Program_weight_tensor_parameter_9: + name = "parameter_9" + shape = [4] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_10: + name = "parameter_10" + shape = [4, 96, 3, 3] + dtype = "float32" + min_val = float("-0.0327041") + max_val = float("0.0429711") + mean = float("0.000567976") + std = float("0.00660947") + data = None + + +class Program_weight_tensor_parameter_11: + name = "parameter_11" + shape = [96] + dtype = "float32" + min_val = float("-0.66171") + max_val = float("1.12003") + mean = float("0.208007") + std = float("0.336117") + data = None + + +class Program_weight_tensor_parameter_12: + name = "parameter_12" + shape = [96] + dtype = "float32" + min_val = float("0.774741") + max_val = float("1.56622") + mean = float("1.11212") + std = float("0.140094") + data = None + + +class Program_weight_tensor_parameter_13: + name = "parameter_13" + shape = [96] + dtype = "float32" + min_val = float("0.000349212") + max_val = float("0.0115798") + mean = float("0.00164328") + std = float("0.00164938") + data = None + + +class Program_weight_tensor_parameter_14: + name = "parameter_14" + shape = [96] + dtype = "float32" + min_val = float("-0.201551") + max_val = float("0.114135") + mean = float("-0.0261146") + std = float("0.047028") + data = None + + +class Program_weight_tensor_parameter_15: + name = "parameter_15" + shape = [96, 96, 1, 1] + dtype = "float32" + min_val = float("-0.0897622") + max_val = float("0.0968557") + mean = float("-0.00169456") + std = float("0.0125057") + data = None + + +class Program_weight_tensor_parameter_16: + name = "parameter_16" + shape = [96] + dtype = "float32" + min_val = float("-0.0059556") + max_val = float("0.00753131") + mean = float("-0.000550704") + std = float("0.0026296") + data = None + + +class Program_weight_tensor_parameter_17: + name = "parameter_17" + shape = [96, 96, 1, 1] + dtype = "float32" + min_val = float("-0.0532542") + max_val = float("0.0784924") + mean = float("-0.000321779") + std = float("0.00341872") + data = None + + +class Program_weight_tensor_parameter_18: + name = "parameter_18" + shape = [68] + dtype = "float32" + min_val = float("-0.00592671") + max_val = float("0.0245108") + mean = float("1.32248e-07") + std = float("0.00592063") + data = None + + +class Program_weight_tensor_parameter_19: + name = "parameter_19" + shape = [68, 192, 3, 3] + dtype = "float32" + min_val = float("-0.148013") + max_val = float("0.188684") + mean = float("-7.52334e-09") + std = float("0.00734346") + data = None + + +class Program_weight_tensor_parameter_20: + name = "parameter_20" + shape = [192] + dtype = "float32" + min_val = float("-0.112013") + max_val = float("0.136873") + mean = float("0.0501926") + std = float("0.042912") + data = None + + +class Program_weight_tensor_parameter_21: + name = "parameter_21" + shape = [192] + dtype = "float32" + min_val = float("0.942516") + max_val = float("1.48651") + mean = float("1.209") + std = float("0.101334") + data = None + + +class Program_weight_tensor_parameter_22: + name = "parameter_22" + shape = [192] + dtype = "float32" + min_val = float("0.000274533") + max_val = float("0.00473256") + mean = float("0.00131755") + std = float("0.00090147") + data = None + + +class Program_weight_tensor_parameter_23: + name = "parameter_23" + shape = [192] + dtype = "float32" + min_val = float("-0.0539903") + max_val = float("0.0221637") + mean = float("-0.00864077") + std = float("0.011364") + data = None + + +class Program_weight_tensor_parameter_24: + name = "parameter_24" + shape = [192, 192, 1, 1] + dtype = "float32" + min_val = float("-0.0600062") + max_val = float("0.103906") + mean = float("-0.000355432") + std = float("0.00633016") + data = None + + +class Program_weight_tensor_parameter_25: + name = "parameter_25" + shape = [192] + dtype = "float32" + min_val = float("-0.00820877") + max_val = float("0.00880323") + mean = float("-9.72734e-05") + std = float("0.0032937") + data = None + + +class Program_weight_tensor_parameter_26: + name = "parameter_26" + shape = [192, 192, 1, 1] + dtype = "float32" + min_val = float("-0.00942992") + max_val = float("0.0136885") + mean = float("-9.81164e-05") + std = float("0.00127282") + data = None + + +class Program_weight_tensor_parameter_27: + name = "parameter_27" + shape = [4] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_28: + name = "parameter_28" + shape = [4, 192, 3, 3] + dtype = "float32" + min_val = float("-0.0185986") + max_val = float("0.017836") + mean = float("0.00051337") + std = float("0.00358993") + data = None + + +class Program_weight_tensor_parameter_29: + name = "parameter_29" + shape = [192] + dtype = "float32" + min_val = float("-0.291073") + max_val = float("0.60814") + mean = float("0.147454") + std = float("0.159") + data = None + + +class Program_weight_tensor_parameter_30: + name = "parameter_30" + shape = [192] + dtype = "float32" + min_val = float("0.913676") + max_val = float("1.49743") + mean = float("1.08728") + std = float("0.081887") + data = None + + +class Program_weight_tensor_parameter_31: + name = "parameter_31" + shape = [192] + dtype = "float32" + min_val = float("0.000306423") + max_val = float("0.00589856") + mean = float("0.00147365") + std = float("0.000924636") + data = None + + +class Program_weight_tensor_parameter_32: + name = "parameter_32" + shape = [192] + dtype = "float32" + min_val = float("-0.139638") + max_val = float("0.023164") + mean = float("-0.0355444") + std = float("0.0277406") + data = None + + +class Program_weight_tensor_parameter_33: + name = "parameter_33" + shape = [192, 192, 1, 1] + dtype = "float32" + min_val = float("-0.0584223") + max_val = float("0.0463674") + mean = float("-0.00109099") + std = float("0.00597895") + data = None + + +class Program_weight_tensor_parameter_34: + name = "parameter_34" + shape = [192] + dtype = "float32" + min_val = float("-0.00481239") + max_val = float("0.00933689") + mean = float("-0.000150003") + std = float("0.00178034") + data = None + + +class Program_weight_tensor_parameter_35: + name = "parameter_35" + shape = [192, 192, 1, 1] + dtype = "float32" + min_val = float("-0.0175333") + max_val = float("0.0225389") + mean = float("-0.000152035") + std = float("0.00123807") + data = None + + +class Program_weight_tensor_parameter_36: + name = "parameter_36" + shape = [1, 17, 1, 1] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_37: + name = "parameter_37" + shape = [68] + dtype = "float32" + min_val = float("-0.00491466") + max_val = float("0.00925987") + mean = float("1.23691e-07") + std = float("0.00410694") + data = None + + +class Program_weight_tensor_parameter_38: + name = "parameter_38" + shape = [68, 384, 3, 3] + dtype = "float32" + min_val = float("-0.0794888") + max_val = float("0.102667") + mean = float("1.60253e-08") + std = float("0.00459131") + data = None + + +class Program_weight_tensor_parameter_39: + name = "parameter_39" + shape = [384] + dtype = "float32" + min_val = float("-0.0756471") + max_val = float("0.111591") + mean = float("0.0118707") + std = float("0.0354397") + data = None + + +class Program_weight_tensor_parameter_40: + name = "parameter_40" + shape = [384] + dtype = "float32" + min_val = float("0.969442") + max_val = float("1.49466") + mean = float("1.1695") + std = float("0.0775726") + data = None + + +class Program_weight_tensor_parameter_41: + name = "parameter_41" + shape = [384] + dtype = "float32" + min_val = float("7.22536e-05") + max_val = float("0.0068391") + mean = float("0.000845068") + std = float("0.000703644") + data = None + + +class Program_weight_tensor_parameter_42: + name = "parameter_42" + shape = [384] + dtype = "float32" + min_val = float("-0.040997") + max_val = float("0.0158692") + mean = float("-0.00441209") + std = float("0.00851866") + data = None + + +class Program_weight_tensor_parameter_43: + name = "parameter_43" + shape = [384, 384, 1, 1] + dtype = "float32" + min_val = float("-0.0576993") + max_val = float("0.0600385") + mean = float("-0.000138602") + std = float("0.00315958") + data = None + + +class Program_weight_tensor_parameter_44: + name = "parameter_44" + shape = [384] + dtype = "float32" + min_val = float("-0.00427117") + max_val = float("0.00491851") + mean = float("-5.07427e-05") + std = float("0.00221218") + data = None + + +class Program_weight_tensor_parameter_45: + name = "parameter_45" + shape = [384, 384, 1, 1] + dtype = "float32" + min_val = float("-0.0242408") + max_val = float("0.00942797") + mean = float("-3.94686e-05") + std = float("0.000747366") + data = None + + +class Program_weight_tensor_parameter_46: + name = "parameter_46" + shape = [4] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_47: + name = "parameter_47" + shape = [4, 384, 3, 3] + dtype = "float32" + min_val = float("-0.00841769") + max_val = float("0.0133731") + mean = float("0.000292149") + std = float("0.00177607") + data = None + + +class Program_weight_tensor_parameter_48: + name = "parameter_48" + shape = [384] + dtype = "float32" + min_val = float("-0.369152") + max_val = float("0.494632") + mean = float("0.0349918") + std = float("0.121509") + data = None + + +class Program_weight_tensor_parameter_49: + name = "parameter_49" + shape = [384] + dtype = "float32" + min_val = float("0.883459") + max_val = float("1.55467") + mean = float("1.05797") + std = float("0.0835312") + data = None + + +class Program_weight_tensor_parameter_50: + name = "parameter_50" + shape = [384] + dtype = "float32" + min_val = float("0.00013119") + max_val = float("0.00702002") + mean = float("0.000884237") + std = float("0.000684954") + data = None + + +class Program_weight_tensor_parameter_51: + name = "parameter_51" + shape = [384] + dtype = "float32" + min_val = float("-0.105751") + max_val = float("0.0294571") + mean = float("-0.0262986") + std = float("0.0195655") + data = None + + +class Program_weight_tensor_parameter_52: + name = "parameter_52" + shape = [384, 384, 1, 1] + dtype = "float32" + min_val = float("-0.0387492") + max_val = float("0.0393312") + mean = float("-0.000523745") + std = float("0.00328341") + data = None + + +class Program_weight_tensor_parameter_53: + name = "parameter_53" + shape = [384] + dtype = "float32" + min_val = float("-0.0107119") + max_val = float("0.00930954") + mean = float("-0.000115991") + std = float("0.00131259") + data = None + + +class Program_weight_tensor_parameter_54: + name = "parameter_54" + shape = [384, 384, 1, 1] + dtype = "float32" + min_val = float("-0.0844844") + max_val = float("0.0443428") + mean = float("-3.32923e-05") + std = float("0.000884614") + data = None + + +class Program_weight_tensor_parameter_55: + name = "parameter_55" + shape = [384] + dtype = "float32" + min_val = float("-0.652269") + max_val = float("1.18859") + mean = float("0.0279915") + std = float("0.238646") + data = None + + +class Program_weight_tensor_parameter_56: + name = "parameter_56" + shape = [384] + dtype = "float32" + min_val = float("0.840802") + max_val = float("1.38345") + mean = float("0.983163") + std = float("0.068319") + data = None + + +class Program_weight_tensor_parameter_57: + name = "parameter_57" + shape = [384] + dtype = "float32" + min_val = float("0.0028222") + max_val = float("0.120091") + mean = float("0.0150878") + std = float("0.0128566") + data = None + + +class Program_weight_tensor_parameter_58: + name = "parameter_58" + shape = [384] + dtype = "float32" + min_val = float("-0.186506") + max_val = float("0.0914117") + mean = float("-0.0370423") + std = float("0.0408906") + data = None + + +class Program_weight_tensor_parameter_59: + name = "parameter_59" + shape = [384, 384, 1, 1] + dtype = "float32" + min_val = float("-0.0804522") + max_val = float("0.0621238") + mean = float("-0.000348866") + std = float("0.00515003") + data = None + + +class Program_weight_tensor_parameter_60: + name = "parameter_60" + shape = [192] + dtype = "float32" + min_val = float("-0.445858") + max_val = float("0.100597") + mean = float("-0.0845864") + std = float("0.104559") + data = None + + +class Program_weight_tensor_parameter_61: + name = "parameter_61" + shape = [192] + dtype = "float32" + min_val = float("0.827741") + max_val = float("1.21013") + mean = float("0.92639") + std = float("0.0462105") + data = None + + +class Program_weight_tensor_parameter_62: + name = "parameter_62" + shape = [192] + dtype = "float32" + min_val = float("0.00247019") + max_val = float("0.0260277") + mean = float("0.00631141") + std = float("0.00287846") + data = None + + +class Program_weight_tensor_parameter_63: + name = "parameter_63" + shape = [192] + dtype = "float32" + min_val = float("-0.0379476") + max_val = float("0.0497353") + mean = float("-0.0021489") + std = float("0.0234555") + data = None + + +class Program_weight_tensor_parameter_64: + name = "parameter_64" + shape = [192, 192, 1, 1] + dtype = "float32" + min_val = float("-0.0401693") + max_val = float("0.0482149") + mean = float("-0.000258359") + std = float("0.003519") + data = None + + +class Program_weight_tensor_parameter_65: + name = "parameter_65" + shape = [192] + dtype = "float32" + min_val = float("-0.445858") + max_val = float("0.100597") + mean = float("-0.0845864") + std = float("0.104559") + data = None + + +class Program_weight_tensor_parameter_66: + name = "parameter_66" + shape = [192] + dtype = "float32" + min_val = float("0.861621") + max_val = float("1.42119") + mean = float("1.11205") + std = float("0.0818467") + data = None + + +class Program_weight_tensor_parameter_67: + name = "parameter_67" + shape = [192] + dtype = "float32" + min_val = float("0.00521346") + max_val = float("0.108237") + mean = float("0.0222325") + std = float("0.0164108") + data = None + + +class Program_weight_tensor_parameter_68: + name = "parameter_68" + shape = [192] + dtype = "float32" + min_val = float("-0.112022") + max_val = float("0.0861229") + mean = float("-0.0205872") + std = float("0.039563") + data = None + + +class Program_weight_tensor_parameter_69: + name = "parameter_69" + shape = [192, 192, 3, 3] + dtype = "float32" + min_val = float("-0.0590664") + max_val = float("0.0651393") + mean = float("-0.000122795") + std = float("0.00309452") + data = None + + +class Program_weight_tensor_parameter_70: + name = "parameter_70" + shape = [192] + dtype = "float32" + min_val = float("-0.519576") + max_val = float("0.119175") + mean = float("-0.173677") + std = float("0.128148") + data = None + + +class Program_weight_tensor_parameter_71: + name = "parameter_71" + shape = [192] + dtype = "float32" + min_val = float("0.843577") + max_val = float("1.65244") + mean = float("1.0642") + std = float("0.100969") + data = None + + +class Program_weight_tensor_parameter_72: + name = "parameter_72" + shape = [192] + dtype = "float32" + min_val = float("0.0134686") + max_val = float("0.262111") + mean = float("0.035477") + std = float("0.0257605") + data = None + + +class Program_weight_tensor_parameter_73: + name = "parameter_73" + shape = [192] + dtype = "float32" + min_val = float("-0.2601") + max_val = float("0.142926") + mean = float("-0.0593171") + std = float("0.0572285") + data = None + + +class Program_weight_tensor_parameter_74: + name = "parameter_74" + shape = [192, 192, 3, 3] + dtype = "float32" + min_val = float("-0.0566011") + max_val = float("0.0723318") + mean = float("-0.000243394") + std = float("0.00339424") + data = None + + +class Program_weight_tensor_parameter_75: + name = "parameter_75" + shape = [192] + dtype = "float32" + min_val = float("-0.455876") + max_val = float("0.187264") + mean = float("-0.0820069") + std = float("0.10205") + data = None + + +class Program_weight_tensor_parameter_76: + name = "parameter_76" + shape = [192] + dtype = "float32" + min_val = float("0.841971") + max_val = float("1.25563") + mean = float("1.02701") + std = float("0.0670478") + data = None + + +class Program_weight_tensor_parameter_77: + name = "parameter_77" + shape = [192] + dtype = "float32" + min_val = float("0.00508938") + max_val = float("0.0629504") + mean = float("0.0128185") + std = float("0.00617773") + data = None + + +class Program_weight_tensor_parameter_78: + name = "parameter_78" + shape = [192] + dtype = "float32" + min_val = float("-0.11049") + max_val = float("0.0547617") + mean = float("-0.0181669") + std = float("0.0279962") + data = None + + +class Program_weight_tensor_parameter_79: + name = "parameter_79" + shape = [192, 576, 1, 1] + dtype = "float32" + min_val = float("-0.080574") + max_val = float("0.0848044") + mean = float("-0.000185678") + std = float("0.00481275") + data = None + + +class Program_weight_tensor_parameter_80: + name = "parameter_80" + shape = [192] + dtype = "float32" + min_val = float("-0.217908") + max_val = float("0.0355956") + mean = float("-0.0691499") + std = float("0.0386375") + data = None + + +class Program_weight_tensor_parameter_81: + name = "parameter_81" + shape = [192] + dtype = "float32" + min_val = float("0.844091") + max_val = float("1.15255") + mean = float("1.01562") + std = float("0.0503148") + data = None + + +class Program_weight_tensor_parameter_82: + name = "parameter_82" + shape = [192] + dtype = "float32" + min_val = float("0.00256848") + max_val = float("0.0204398") + mean = float("0.00702577") + std = float("0.00290168") + data = None + + +class Program_weight_tensor_parameter_83: + name = "parameter_83" + shape = [192] + dtype = "float32" + min_val = float("-0.0967604") + max_val = float("0.0866626") + mean = float("-0.0222285") + std = float("0.0273828") + data = None + + +class Program_weight_tensor_parameter_84: + name = "parameter_84" + shape = [192, 576, 1, 1] + dtype = "float32" + min_val = float("-0.0433461") + max_val = float("0.0508427") + mean = float("-0.000278308") + std = float("0.00423993") + data = None + + +class Program_weight_tensor_parameter_85: + name = "parameter_85" + shape = [192] + dtype = "float32" + min_val = float("-0.296093") + max_val = float("-0.00746985") + mean = float("-0.0909473") + std = float("0.0603181") + data = None + + +class Program_weight_tensor_parameter_86: + name = "parameter_86" + shape = [192] + dtype = "float32" + min_val = float("0.78297") + max_val = float("1.34841") + mean = float("1.0531") + std = float("0.0659046") + data = None + + +class Program_weight_tensor_parameter_87: + name = "parameter_87" + shape = [192] + dtype = "float32" + min_val = float("0.00866336") + max_val = float("0.0606129") + mean = float("0.0236506") + std = float("0.00920598") + data = None + + +class Program_weight_tensor_parameter_88: + name = "parameter_88" + shape = [192] + dtype = "float32" + min_val = float("-0.274437") + max_val = float("0.303481") + mean = float("-0.028094") + std = float("0.0952102") + data = None + + +class Program_weight_tensor_parameter_89: + name = "parameter_89" + shape = [192, 192, 3, 3] + dtype = "float32" + min_val = float("-0.0320502") + max_val = float("0.0370765") + mean = float("-5.98437e-05") + std = float("0.00253528") + data = None + + +class Program_weight_tensor_parameter_90: + name = "parameter_90" + shape = [192] + dtype = "float32" + min_val = float("-0.530717") + max_val = float("1.03145") + mean = float("0.148064") + std = float("0.259361") + data = None + + +class Program_weight_tensor_parameter_91: + name = "parameter_91" + shape = [192] + dtype = "float32" + min_val = float("0.731798") + max_val = float("1.56803") + mean = float("1.014") + std = float("0.106695") + data = None + + +class Program_weight_tensor_parameter_92: + name = "parameter_92" + shape = [192] + dtype = "float32" + min_val = float("0.00711023") + max_val = float("0.0905165") + mean = float("0.0211361") + std = float("0.0114095") + data = None + + +class Program_weight_tensor_parameter_93: + name = "parameter_93" + shape = [192] + dtype = "float32" + min_val = float("-0.279082") + max_val = float("0.153551") + mean = float("-0.0509504") + std = float("0.0522264") + data = None + + +class Program_weight_tensor_parameter_94: + name = "parameter_94" + shape = [192, 192, 1, 1] + dtype = "float32" + min_val = float("-0.13468") + max_val = float("0.0903167") + mean = float("-0.000883571") + std = float("0.0100158") + data = None + + +class Program_weight_tensor_parameter_95: + name = "parameter_95" + shape = [96] + dtype = "float32" + min_val = float("-0.29043") + max_val = float("0.172569") + mean = float("-0.070874") + std = float("0.105573") + data = None + + +class Program_weight_tensor_parameter_96: + name = "parameter_96" + shape = [96] + dtype = "float32" + min_val = float("0.730954") + max_val = float("1.20877") + mean = float("0.877662") + std = float("0.077901") + data = None + + +class Program_weight_tensor_parameter_97: + name = "parameter_97" + shape = [96] + dtype = "float32" + min_val = float("0.00246171") + max_val = float("0.0129422") + mean = float("0.00671665") + std = float("0.00235517") + data = None + + +class Program_weight_tensor_parameter_98: + name = "parameter_98" + shape = [96] + dtype = "float32" + min_val = float("-0.0452652") + max_val = float("0.0355382") + mean = float("-0.0105203") + std = float("0.0215082") + data = None + + +class Program_weight_tensor_parameter_99: + name = "parameter_99" + shape = [96, 96, 1, 1] + dtype = "float32" + min_val = float("-0.0494") + max_val = float("0.0504503") + mean = float("-0.00128754") + std = float("0.00640071") + data = None + + +class Program_weight_tensor_parameter_100: + name = "parameter_100" + shape = [96] + dtype = "float32" + min_val = float("-0.29043") + max_val = float("0.172569") + mean = float("-0.070874") + std = float("0.105573") + data = None + + +class Program_weight_tensor_parameter_101: + name = "parameter_101" + shape = [96] + dtype = "float32" + min_val = float("0.97156") + max_val = float("1.32049") + mean = float("1.13236") + std = float("0.0752191") + data = None + + +class Program_weight_tensor_parameter_102: + name = "parameter_102" + shape = [96] + dtype = "float32" + min_val = float("0.0093333") + max_val = float("0.0602011") + mean = float("0.0268797") + std = float("0.0114369") + data = None + + +class Program_weight_tensor_parameter_103: + name = "parameter_103" + shape = [96] + dtype = "float32" + min_val = float("-0.0726866") + max_val = float("0.101348") + mean = float("-0.0120426") + std = float("0.0278587") + data = None + + +class Program_weight_tensor_parameter_104: + name = "parameter_104" + shape = [96, 96, 3, 3] + dtype = "float32" + min_val = float("-0.0765906") + max_val = float("0.0838438") + mean = float("-0.000172577") + std = float("0.00601682") + data = None + + +class Program_weight_tensor_parameter_105: + name = "parameter_105" + shape = [96] + dtype = "float32" + min_val = float("-0.673369") + max_val = float("0.111337") + mean = float("-0.259249") + std = float("0.15059") + data = None + + +class Program_weight_tensor_parameter_106: + name = "parameter_106" + shape = [96] + dtype = "float32" + min_val = float("0.801527") + max_val = float("1.41146") + mean = float("1.04521") + std = float("0.116809") + data = None + + +class Program_weight_tensor_parameter_107: + name = "parameter_107" + shape = [96] + dtype = "float32" + min_val = float("0.0179787") + max_val = float("0.123656") + mean = float("0.0407975") + std = float("0.0163583") + data = None + + +class Program_weight_tensor_parameter_108: + name = "parameter_108" + shape = [96] + dtype = "float32" + min_val = float("-0.210235") + max_val = float("0.0371458") + mean = float("-0.0429902") + std = float("0.0344348") + data = None + + +class Program_weight_tensor_parameter_109: + name = "parameter_109" + shape = [96, 96, 3, 3] + dtype = "float32" + min_val = float("-0.075314") + max_val = float("0.0818873") + mean = float("-0.000470995") + std = float("0.00667813") + data = None + + +class Program_weight_tensor_parameter_110: + name = "parameter_110" + shape = [96] + dtype = "float32" + min_val = float("-0.644536") + max_val = float("0.152401") + mean = float("-0.155624") + std = float("0.115775") + data = None + + +class Program_weight_tensor_parameter_111: + name = "parameter_111" + shape = [96] + dtype = "float32" + min_val = float("0.84937") + max_val = float("1.26645") + mean = float("1.03345") + std = float("0.0722039") + data = None + + +class Program_weight_tensor_parameter_112: + name = "parameter_112" + shape = [96] + dtype = "float32" + min_val = float("0.00973348") + max_val = float("0.0487037") + mean = float("0.0192495") + std = float("0.00666947") + data = None + + +class Program_weight_tensor_parameter_113: + name = "parameter_113" + shape = [96] + dtype = "float32" + min_val = float("-0.122358") + max_val = float("0.0284554") + mean = float("-0.0357051") + std = float("0.0301403") + data = None + + +class Program_weight_tensor_parameter_114: + name = "parameter_114" + shape = [96, 288, 1, 1] + dtype = "float32" + min_val = float("-0.0666353") + max_val = float("0.07655") + mean = float("-0.000641477") + std = float("0.00909018") + data = None + + +class Program_weight_tensor_parameter_115: + name = "parameter_115" + shape = [96] + dtype = "float32" + min_val = float("-0.198184") + max_val = float("0.0830438") + mean = float("-0.029894") + std = float("0.0460273") + data = None + + +class Program_weight_tensor_parameter_116: + name = "parameter_116" + shape = [96] + dtype = "float32" + min_val = float("0.68536") + max_val = float("1.33613") + mean = float("0.954422") + std = float("0.0884454") + data = None + + +class Program_weight_tensor_parameter_117: + name = "parameter_117" + shape = [96] + dtype = "float32" + min_val = float("0.00439953") + max_val = float("0.0376729") + mean = float("0.0110806") + std = float("0.00518735") + data = None + + +class Program_weight_tensor_parameter_118: + name = "parameter_118" + shape = [96] + dtype = "float32" + min_val = float("-0.110611") + max_val = float("0.053016") + mean = float("-0.0161266") + std = float("0.0307335") + data = None + + +class Program_weight_tensor_parameter_119: + name = "parameter_119" + shape = [96, 288, 1, 1] + dtype = "float32" + min_val = float("-0.0847356") + max_val = float("0.0781943") + mean = float("-0.00030171") + std = float("0.00760969") + data = None + + +class Program_weight_tensor_parameter_120: + name = "parameter_120" + shape = [96] + dtype = "float32" + min_val = float("-0.335709") + max_val = float("0.0179744") + mean = float("-0.108667") + std = float("0.0840873") + data = None + + +class Program_weight_tensor_parameter_121: + name = "parameter_121" + shape = [96] + dtype = "float32" + min_val = float("0.731015") + max_val = float("1.20665") + mean = float("1.05574") + std = float("0.0750007") + data = None + + +class Program_weight_tensor_parameter_122: + name = "parameter_122" + shape = [96] + dtype = "float32" + min_val = float("0.0110792") + max_val = float("0.108652") + mean = float("0.0289631") + std = float("0.0151623") + data = None + + +class Program_weight_tensor_parameter_123: + name = "parameter_123" + shape = [96] + dtype = "float32" + min_val = float("-0.402624") + max_val = float("0.415541") + mean = float("-0.0212279") + std = float("0.14343") + data = None + + +class Program_weight_tensor_parameter_124: + name = "parameter_124" + shape = [96, 96, 3, 3] + dtype = "float32" + min_val = float("-0.0566918") + max_val = float("0.0566383") + mean = float("-4.65555e-05") + std = float("0.00561229") + data = None + + +class Program_weight_tensor_parameter_125: + name = "parameter_125" + shape = [96] + dtype = "float32" + min_val = float("-1.07712") + max_val = float("2.35644") + mean = float("0.310791") + std = float("0.586496") + data = None + + +class Program_weight_tensor_parameter_126: + name = "parameter_126" + shape = [96] + dtype = "float32" + min_val = float("0.468523") + max_val = float("1.40452") + mean = float("0.882442") + std = float("0.167292") + data = None + + +class Program_weight_tensor_parameter_127: + name = "parameter_127" + shape = [96] + dtype = "float32" + min_val = float("0.00622077") + max_val = float("0.145427") + mean = float("0.0336655") + std = float("0.0192279") + data = None + + +class Program_weight_tensor_parameter_128: + name = "parameter_128" + shape = [96] + dtype = "float32" + min_val = float("-0.252919") + max_val = float("0.171344") + mean = float("-0.0348699") + std = float("0.0800858") + data = None + + +class Program_weight_tensor_parameter_129: + name = "parameter_129" + shape = [96, 96, 1, 1] + dtype = "float32" + min_val = float("-0.151113") + max_val = float("0.113524") + mean = float("-0.00159645") + std = float("0.0201408") + data = None + + +class Program_weight_tensor_parameter_130: + name = "parameter_130" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_131: + name = "parameter_131" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_132: + name = "parameter_132" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_133: + name = "parameter_133" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_134: + name = "parameter_134" + shape = [48, 48, 1, 1] + dtype = "float32" + min_val = float("-0.0664404") + max_val = float("0.0734275") + mean = float("-0.00237338") + std = float("0.012309") + data = None + + +class Program_weight_tensor_parameter_135: + name = "parameter_135" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_136: + name = "parameter_136" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_137: + name = "parameter_137" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_138: + name = "parameter_138" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_139: + name = "parameter_139" + shape = [48, 48, 3, 3] + dtype = "float32" + min_val = float("-0.126099") + max_val = float("0.146893") + mean = float("-0.000602698") + std = float("0.0131643") + data = None + + +class Program_weight_tensor_parameter_140: + name = "parameter_140" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_141: + name = "parameter_141" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_142: + name = "parameter_142" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_143: + name = "parameter_143" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_144: + name = "parameter_144" + shape = [48, 48, 3, 3] + dtype = "float32" + min_val = float("-0.10691") + max_val = float("0.123684") + mean = float("-0.0011924") + std = float("0.0144144") + data = None + + +class Program_weight_tensor_parameter_145: + name = "parameter_145" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_146: + name = "parameter_146" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_147: + name = "parameter_147" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_148: + name = "parameter_148" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_149: + name = "parameter_149" + shape = [48, 224, 1, 1] + dtype = "float32" + min_val = float("-0.188612") + max_val = float("0.131781") + mean = float("-0.00116675") + std = float("0.0182059") + data = None + + +class Program_weight_tensor_parameter_150: + name = "parameter_150" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_151: + name = "parameter_151" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_152: + name = "parameter_152" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_153: + name = "parameter_153" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_154: + name = "parameter_154" + shape = [48, 224, 1, 1] + dtype = "float32" + min_val = float("-0.102996") + max_val = float("0.127082") + mean = float("-0.000100325") + std = float("0.0125415") + data = None + + +class Program_weight_tensor_parameter_155: + name = "parameter_155" + shape = [96] + dtype = "float32" + min_val = float("-0.355722") + max_val = float("0.392015") + mean = float("-0.00761515") + std = float("0.135765") + data = None + + +class Program_weight_tensor_parameter_156: + name = "parameter_156" + shape = [96] + dtype = "float32" + min_val = float("0.58176") + max_val = float("1.61796") + mean = float("0.798413") + std = float("0.141773") + data = None + + +class Program_weight_tensor_parameter_157: + name = "parameter_157" + shape = [96] + dtype = "float32" + min_val = float("0.00744695") + max_val = float("0.0777617") + mean = float("0.0234491") + std = float("0.0125216") + data = None + + +class Program_weight_tensor_parameter_158: + name = "parameter_158" + shape = [96] + dtype = "float32" + min_val = float("-0.126434") + max_val = float("0.0750849") + mean = float("-0.0331653") + std = float("0.0358366") + data = None + + +class Program_weight_tensor_parameter_159: + name = "parameter_159" + shape = [96, 192, 1, 1] + dtype = "float32" + min_val = float("-0.0965171") + max_val = float("0.103243") + mean = float("-0.000979696") + std = float("0.0126998") + data = None + + +class Program_weight_tensor_parameter_160: + name = "parameter_160" + shape = [192] + dtype = "float32" + min_val = float("-0.337168") + max_val = float("0.172866") + mean = float("-0.0804302") + std = float("0.0895865") + data = None + + +class Program_weight_tensor_parameter_161: + name = "parameter_161" + shape = [192] + dtype = "float32" + min_val = float("0.695369") + max_val = float("1.47856") + mean = float("0.99055") + std = float("0.0996963") + data = None + + +class Program_weight_tensor_parameter_162: + name = "parameter_162" + shape = [192] + dtype = "float32" + min_val = float("0.0097712") + max_val = float("0.0987394") + mean = float("0.0251769") + std = float("0.012782") + data = None + + +class Program_weight_tensor_parameter_163: + name = "parameter_163" + shape = [192] + dtype = "float32" + min_val = float("-0.203449") + max_val = float("0.19128") + mean = float("-0.0630189") + std = float("0.0562037") + data = None + + +class Program_weight_tensor_parameter_164: + name = "parameter_164" + shape = [192, 192, 1, 1] + dtype = "float32" + min_val = float("-0.10035") + max_val = float("0.120314") + mean = float("-0.00133976") + std = float("0.0122628") + data = None + + +class Program_weight_tensor_parameter_165: + name = "parameter_165" + shape = [96] + dtype = "float32" + min_val = float("-0.307168") + max_val = float("0.102436") + mean = float("-0.0815792") + std = float("0.0994278") + data = None + + +class Program_weight_tensor_parameter_166: + name = "parameter_166" + shape = [96] + dtype = "float32" + min_val = float("0.550693") + max_val = float("0.936676") + mean = float("0.809285") + std = float("0.0654997") + data = None + + +class Program_weight_tensor_parameter_167: + name = "parameter_167" + shape = [96] + dtype = "float32" + min_val = float("0.00340201") + max_val = float("0.01613") + mean = float("0.00847885") + std = float("0.00252636") + data = None + + +class Program_weight_tensor_parameter_168: + name = "parameter_168" + shape = [96] + dtype = "float32" + min_val = float("-0.0470381") + max_val = float("0.0327168") + mean = float("-0.0154057") + std = float("0.0195276") + data = None + + +class Program_weight_tensor_parameter_169: + name = "parameter_169" + shape = [96, 96, 1, 1] + dtype = "float32" + min_val = float("-0.046942") + max_val = float("0.0545283") + mean = float("-0.00159272") + std = float("0.00865431") + data = None + + +class Program_weight_tensor_parameter_170: + name = "parameter_170" + shape = [96] + dtype = "float32" + min_val = float("-0.307168") + max_val = float("0.102436") + mean = float("-0.0815792") + std = float("0.0994278") + data = None + + +class Program_weight_tensor_parameter_171: + name = "parameter_171" + shape = [96] + dtype = "float32" + min_val = float("0.843562") + max_val = float("1.28869") + mean = float("1.03487") + std = float("0.0944074") + data = None + + +class Program_weight_tensor_parameter_172: + name = "parameter_172" + shape = [96] + dtype = "float32" + min_val = float("0.0162234") + max_val = float("0.0750483") + mean = float("0.0356845") + std = float("0.0121583") + data = None + + +class Program_weight_tensor_parameter_173: + name = "parameter_173" + shape = [96] + dtype = "float32" + min_val = float("-0.0894171") + max_val = float("0.0639562") + mean = float("-0.0235775") + std = float("0.0292923") + data = None + + +class Program_weight_tensor_parameter_174: + name = "parameter_174" + shape = [96, 96, 3, 3] + dtype = "float32" + min_val = float("-0.086771") + max_val = float("0.165089") + mean = float("-0.000248005") + std = float("0.00738139") + data = None + + +class Program_weight_tensor_parameter_175: + name = "parameter_175" + shape = [96] + dtype = "float32" + min_val = float("-0.732503") + max_val = float("0.316296") + mean = float("-0.275731") + std = float("0.175263") + data = None + + +class Program_weight_tensor_parameter_176: + name = "parameter_176" + shape = [96] + dtype = "float32" + min_val = float("0.764579") + max_val = float("1.31051") + mean = float("1.0436") + std = float("0.115503") + data = None + + +class Program_weight_tensor_parameter_177: + name = "parameter_177" + shape = [96] + dtype = "float32" + min_val = float("0.0234519") + max_val = float("0.125482") + mean = float("0.0544499") + std = float("0.0175139") + data = None + + +class Program_weight_tensor_parameter_178: + name = "parameter_178" + shape = [96] + dtype = "float32" + min_val = float("-0.166347") + max_val = float("0.0892188") + mean = float("-0.0515841") + std = float("0.0489599") + data = None + + +class Program_weight_tensor_parameter_179: + name = "parameter_179" + shape = [96, 96, 3, 3] + dtype = "float32" + min_val = float("-0.144257") + max_val = float("0.132913") + mean = float("-0.000565483") + std = float("0.00873368") + data = None + + +class Program_weight_tensor_parameter_180: + name = "parameter_180" + shape = [96] + dtype = "float32" + min_val = float("-0.649417") + max_val = float("0.386836") + mean = float("-0.253654") + std = float("0.210215") + data = None + + +class Program_weight_tensor_parameter_181: + name = "parameter_181" + shape = [96] + dtype = "float32" + min_val = float("0.74298") + max_val = float("1.3799") + mean = float("1.02561") + std = float("0.122082") + data = None + + +class Program_weight_tensor_parameter_182: + name = "parameter_182" + shape = [96] + dtype = "float32" + min_val = float("0.00554249") + max_val = float("0.0349953") + mean = float("0.0169508") + std = float("0.00634658") + data = None + + +class Program_weight_tensor_parameter_183: + name = "parameter_183" + shape = [96] + dtype = "float32" + min_val = float("-0.314719") + max_val = float("0.272384") + mean = float("0.0111478") + std = float("0.0678291") + data = None + + +class Program_weight_tensor_parameter_184: + name = "parameter_184" + shape = [96, 448, 1, 1] + dtype = "float32" + min_val = float("-0.169447") + max_val = float("0.110463") + mean = float("-0.000526406") + std = float("0.0113187") + data = None + + +class Program_weight_tensor_parameter_185: + name = "parameter_185" + shape = [96] + dtype = "float32" + min_val = float("-0.239085") + max_val = float("0.172277") + mean = float("-0.0410098") + std = float("0.088654") + data = None + + +class Program_weight_tensor_parameter_186: + name = "parameter_186" + shape = [96] + dtype = "float32" + min_val = float("0.917909") + max_val = float("1.41228") + mean = float("1.07289") + std = float("0.0921621") + data = None + + +class Program_weight_tensor_parameter_187: + name = "parameter_187" + shape = [96] + dtype = "float32" + min_val = float("0.00512108") + max_val = float("0.0377565") + mean = float("0.012503") + std = float("0.00556829") + data = None + + +class Program_weight_tensor_parameter_188: + name = "parameter_188" + shape = [96] + dtype = "float32" + min_val = float("-0.0764749") + max_val = float("0.0829136") + mean = float("0.0112017") + std = float("0.0312016") + data = None + + +class Program_weight_tensor_parameter_189: + name = "parameter_189" + shape = [96, 448, 1, 1] + dtype = "float32" + min_val = float("-0.114799") + max_val = float("0.116536") + mean = float("-0.000477416") + std = float("0.00980395") + data = None + + +class Program_weight_tensor_parameter_190: + name = "parameter_190" + shape = [192] + dtype = "float32" + min_val = float("-0.539928") + max_val = float("-0.101676") + mean = float("-0.294772") + std = float("0.070854") + data = None + + +class Program_weight_tensor_parameter_191: + name = "parameter_191" + shape = [192] + dtype = "float32" + min_val = float("0.649882") + max_val = float("1.08068") + mean = float("0.851912") + std = float("0.0726184") + data = None + + +class Program_weight_tensor_parameter_192: + name = "parameter_192" + shape = [192] + dtype = "float32" + min_val = float("0.00896797") + max_val = float("0.0613196") + mean = float("0.0239899") + std = float("0.00961639") + data = None + + +class Program_weight_tensor_parameter_193: + name = "parameter_193" + shape = [192] + dtype = "float32" + min_val = float("-0.110165") + max_val = float("0.0578497") + mean = float("-0.0313571") + std = float("0.0318844") + data = None + + +class Program_weight_tensor_parameter_194: + name = "parameter_194" + shape = [192, 384, 1, 1] + dtype = "float32" + min_val = float("-0.0569669") + max_val = float("0.0580897") + mean = float("-0.000683949") + std = float("0.00880451") + data = None + + +class Program_weight_tensor_parameter_195: + name = "parameter_195" + shape = [384] + dtype = "float32" + min_val = float("-0.522469") + max_val = float("0.214295") + mean = float("-0.168543") + std = float("0.0775358") + data = None + + +class Program_weight_tensor_parameter_196: + name = "parameter_196" + shape = [384] + dtype = "float32" + min_val = float("0.848906") + max_val = float("1.39284") + mean = float("1.06283") + std = float("0.0773502") + data = None + + +class Program_weight_tensor_parameter_197: + name = "parameter_197" + shape = [384] + dtype = "float32" + min_val = float("0.00501982") + max_val = float("0.0397325") + mean = float("0.0160268") + std = float("0.00625591") + data = None + + +class Program_weight_tensor_parameter_198: + name = "parameter_198" + shape = [384] + dtype = "float32" + min_val = float("-0.114137") + max_val = float("0.0857981") + mean = float("-0.0355965") + std = float("0.0347035") + data = None + + +class Program_weight_tensor_parameter_199: + name = "parameter_199" + shape = [384, 384, 1, 1] + dtype = "float32" + min_val = float("-0.0936791") + max_val = float("0.106798") + mean = float("-0.000567793") + std = float("0.00789782") + data = None + + +class Program_weight_tensor_parameter_200: + name = "parameter_200" + shape = [192] + dtype = "float32" + min_val = float("-0.384521") + max_val = float("0.227763") + mean = float("-0.118185") + std = float("0.102049") + data = None + + +class Program_weight_tensor_parameter_201: + name = "parameter_201" + shape = [192] + dtype = "float32" + min_val = float("0.869586") + max_val = float("1.51327") + mean = float("1.12323") + std = float("0.119089") + data = None + + +class Program_weight_tensor_parameter_202: + name = "parameter_202" + shape = [192] + dtype = "float32" + min_val = float("0.119222") + max_val = float("1.51213") + mean = float("0.355679") + std = float("0.194075") + data = None + + +class Program_weight_tensor_parameter_203: + name = "parameter_203" + shape = [192] + dtype = "float32" + min_val = float("-3.46501") + max_val = float("1.69978") + mean = float("-0.19182") + std = float("0.986212") + data = None + + +class Program_weight_tensor_parameter_204: + name = "parameter_204" + shape = [192, 768, 1, 1] + dtype = "float32" + min_val = float("-0.113572") + max_val = float("0.0748414") + mean = float("-8.3021e-05") + std = float("0.00688727") + data = None + + +class Program_weight_tensor_parameter_205: + name = "parameter_205" + shape = [192] + dtype = "float32" + min_val = float("-0.243321") + max_val = float("0.168978") + mean = float("-0.0173026") + std = float("0.0539895") + data = None + + +class Program_weight_tensor_parameter_206: + name = "parameter_206" + shape = [192] + dtype = "float32" + min_val = float("0.617694") + max_val = float("1.01652") + mean = float("0.837363") + std = float("0.0632031") + data = None + + +class Program_weight_tensor_parameter_207: + name = "parameter_207" + shape = [192] + dtype = "float32" + min_val = float("0.00364927") + max_val = float("0.0302202") + mean = float("0.00897402") + std = float("0.00329128") + data = None + + +class Program_weight_tensor_parameter_208: + name = "parameter_208" + shape = [192] + dtype = "float32" + min_val = float("-0.116422") + max_val = float("0.0722651") + mean = float("-0.0506104") + std = float("0.0400639") + data = None + + +class Program_weight_tensor_parameter_209: + name = "parameter_209" + shape = [192, 192, 1, 1] + dtype = "float32" + min_val = float("-0.0376764") + max_val = float("0.0673975") + mean = float("-0.00139833") + std = float("0.00662605") + data = None + + +class Program_weight_tensor_parameter_210: + name = "parameter_210" + shape = [192] + dtype = "float32" + min_val = float("-0.243321") + max_val = float("0.168978") + mean = float("-0.0173026") + std = float("0.0539895") + data = None + + +class Program_weight_tensor_parameter_211: + name = "parameter_211" + shape = [192] + dtype = "float32" + min_val = float("0.875399") + max_val = float("1.46098") + mean = float("1.10627") + std = float("0.129572") + data = None + + +class Program_weight_tensor_parameter_212: + name = "parameter_212" + shape = [192] + dtype = "float32" + min_val = float("0.0227077") + max_val = float("0.237351") + mean = float("0.0547969") + std = float("0.0225584") + data = None + + +class Program_weight_tensor_parameter_213: + name = "parameter_213" + shape = [192] + dtype = "float32" + min_val = float("-0.293552") + max_val = float("0.0473091") + mean = float("-0.118203") + std = float("0.0657697") + data = None + + +class Program_weight_tensor_parameter_214: + name = "parameter_214" + shape = [192, 192, 3, 3] + dtype = "float32" + min_val = float("-0.0368216") + max_val = float("0.0546366") + mean = float("-0.000404501") + std = float("0.00405002") + data = None + + +class Program_weight_tensor_parameter_215: + name = "parameter_215" + shape = [192] + dtype = "float32" + min_val = float("-0.311401") + max_val = float("0.0667354") + mean = float("-0.115178") + std = float("0.0802349") + data = None + + +class Program_weight_tensor_parameter_216: + name = "parameter_216" + shape = [192] + dtype = "float32" + min_val = float("0.910824") + max_val = float("1.44605") + mean = float("1.10855") + std = float("0.101864") + data = None + + +class Program_weight_tensor_parameter_217: + name = "parameter_217" + shape = [192] + dtype = "float32" + min_val = float("0.0286424") + max_val = float("0.136652") + mean = float("0.0700486") + std = float("0.0228431") + data = None + + +class Program_weight_tensor_parameter_218: + name = "parameter_218" + shape = [192] + dtype = "float32" + min_val = float("-0.460812") + max_val = float("0.215499") + mean = float("-0.119224") + std = float("0.0838897") + data = None + + +class Program_weight_tensor_parameter_219: + name = "parameter_219" + shape = [192, 192, 3, 3] + dtype = "float32" + min_val = float("-0.0498457") + max_val = float("0.0422007") + mean = float("-0.000479539") + std = float("0.00450633") + data = None + + +class Program_weight_tensor_parameter_220: + name = "parameter_220" + shape = [192] + dtype = "float32" + min_val = float("-0.444339") + max_val = float("0.412099") + mean = float("-0.137793") + std = float("0.130321") + data = None + + +class Program_weight_tensor_parameter_221: + name = "parameter_221" + shape = [192] + dtype = "float32" + min_val = float("0.955612") + max_val = float("1.37241") + mean = float("1.11033") + std = float("0.0722102") + data = None + + +class Program_weight_tensor_parameter_222: + name = "parameter_222" + shape = [192] + dtype = "float32" + min_val = float("0.0599313") + max_val = float("0.406381") + mean = float("0.146198") + std = float("0.0531059") + data = None + + +class Program_weight_tensor_parameter_223: + name = "parameter_223" + shape = [192] + dtype = "float32" + min_val = float("-0.3746") + max_val = float("0.457922") + mean = float("-0.113673") + std = float("0.104493") + data = None + + +class Program_weight_tensor_parameter_224: + name = "parameter_224" + shape = [192, 512, 1, 1] + dtype = "float32" + min_val = float("-0.0511306") + max_val = float("0.0904195") + mean = float("-0.000705076") + std = float("0.00778049") + data = None + + +class Program_weight_tensor_parameter_225: + name = "parameter_225" + shape = [192] + dtype = "float32" + min_val = float("-0.164214") + max_val = float("0.00108657") + mean = float("-0.0652814") + std = float("0.0261733") + data = None + + +class Program_weight_tensor_parameter_226: + name = "parameter_226" + shape = [192] + dtype = "float32" + min_val = float("0.819443") + max_val = float("1.06651") + mean = float("0.969047") + std = float("0.046122") + data = None + + +class Program_weight_tensor_parameter_227: + name = "parameter_227" + shape = [192] + dtype = "float32" + min_val = float("0.0402565") + max_val = float("0.234889") + mean = float("0.0881219") + std = float("0.0288936") + data = None + + +class Program_weight_tensor_parameter_228: + name = "parameter_228" + shape = [192] + dtype = "float32" + min_val = float("-0.264071") + max_val = float("0.148941") + mean = float("-0.104244") + std = float("0.0769829") + data = None + + +class Program_weight_tensor_parameter_229: + name = "parameter_229" + shape = [192, 512, 1, 1] + dtype = "float32" + min_val = float("-0.0279262") + max_val = float("0.0522477") + mean = float("-0.00062665") + std = float("0.0064125") + data = None + + +class Program_weight_tensor_parameter_230: + name = "parameter_230" + shape = [512] + dtype = "float32" + min_val = float("-4.82887") + max_val = float("-0.110974") + mean = float("-2.29538") + std = float("0.775295") + data = None + + +class Program_weight_tensor_parameter_231: + name = "parameter_231" + shape = [512] + dtype = "float32" + min_val = float("2.10191") + max_val = float("5.21727") + mean = float("3.70112") + std = float("0.482785") + data = None + + +class Program_weight_tensor_parameter_232: + name = "parameter_232" + shape = [512] + dtype = "float32" + min_val = float("0.00205017") + max_val = float("0.0129863") + mean = float("0.00439085") + std = float("0.00153234") + data = None + + +class Program_weight_tensor_parameter_233: + name = "parameter_233" + shape = [512] + dtype = "float32" + min_val = float("-0.125558") + max_val = float("0.0844085") + mean = float("-0.0389285") + std = float("0.0304083") + data = None + + +class Program_weight_tensor_parameter_234: + name = "parameter_234" + shape = [512, 384, 1, 1] + dtype = "float32" + min_val = float("-0.0812518") + max_val = float("0.135076") + mean = float("-0.000961032") + std = float("0.00790225") + data = None + + +class Program_weight_tensor_parameter_235: + name = "parameter_235" + shape = [384] + dtype = "float32" + min_val = float("-0.0156508") + max_val = float("-0.000140212") + mean = float("-0.00546153") + std = float("0.0036436") + data = None + + +class Program_weight_tensor_parameter_236: + name = "parameter_236" + shape = [384, 384, 1, 1] + dtype = "float32" + min_val = float("-0.186417") + max_val = float("0.14642") + mean = float("-0.00212537") + std = float("0.00664924") + data = None + + +class Program_weight_tensor_parameter_237: + name = "parameter_237" + shape = [192] + dtype = "float32" + min_val = float("-2.38809") + max_val = float("3.17072") + mean = float("-0.203472") + std = float("0.563206") + data = None + + +class Program_weight_tensor_parameter_238: + name = "parameter_238" + shape = [192] + dtype = "float32" + min_val = float("0.123887") + max_val = float("2.40473") + mean = float("0.524729") + std = float("0.334968") + data = None + + +class Program_weight_tensor_parameter_239: + name = "parameter_239" + shape = [192] + dtype = "float32" + min_val = float("0.000170815") + max_val = float("0.00268326") + mean = float("0.000688421") + std = float("0.000383392") + data = None + + +class Program_weight_tensor_parameter_240: + name = "parameter_240" + shape = [192] + dtype = "float32" + min_val = float("-0.0629836") + max_val = float("0.0878972") + mean = float("0.0115645") + std = float("0.0223406") + data = None + + +class Program_weight_tensor_parameter_241: + name = "parameter_241" + shape = [192, 192, 1, 1] + dtype = "float32" + min_val = float("-0.0580822") + max_val = float("0.058285") + mean = float("-0.000330827") + std = float("0.00504808") + data = None + + +class Program_weight_tensor_parameter_242: + name = "parameter_242" + shape = [192] + dtype = "float32" + min_val = float("-2.38809") + max_val = float("3.17072") + mean = float("-0.203472") + std = float("0.563206") + data = None + + +class Program_weight_tensor_parameter_243: + name = "parameter_243" + shape = [192] + dtype = "float32" + min_val = float("0.679292") + max_val = float("3.07313") + mean = float("1.54536") + std = float("0.450864") + data = None + + +class Program_weight_tensor_parameter_244: + name = "parameter_244" + shape = [192] + dtype = "float32" + min_val = float("0.0033827") + max_val = float("0.0334462") + mean = float("0.00949376") + std = float("0.00462441") + data = None + + +class Program_weight_tensor_parameter_245: + name = "parameter_245" + shape = [192] + dtype = "float32" + min_val = float("-0.225625") + max_val = float("0.191391") + mean = float("0.012192") + std = float("0.0564082") + data = None + + +class Program_weight_tensor_parameter_246: + name = "parameter_246" + shape = [192, 192, 3, 3] + dtype = "float32" + min_val = float("-0.074483") + max_val = float("0.0656173") + mean = float("-7.80967e-05") + std = float("0.00448667") + data = None + + +class Program_weight_tensor_parameter_247: + name = "parameter_247" + shape = [192] + dtype = "float32" + min_val = float("-3.43262") + max_val = float("1.16961") + mean = float("-1.42837") + std = float("0.634837") + data = None + + +class Program_weight_tensor_parameter_248: + name = "parameter_248" + shape = [192] + dtype = "float32" + min_val = float("0.392163") + max_val = float("1.72692") + mean = float("1.08998") + std = float("0.190011") + data = None + + +class Program_weight_tensor_parameter_249: + name = "parameter_249" + shape = [192] + dtype = "float32" + min_val = float("0.0270888") + max_val = float("0.44195") + mean = float("0.084525") + std = float("0.0387844") + data = None + + +class Program_weight_tensor_parameter_250: + name = "parameter_250" + shape = [192] + dtype = "float32" + min_val = float("-1.22131") + max_val = float("0.423185") + mean = float("-0.221144") + std = float("0.173004") + data = None + + +class Program_weight_tensor_parameter_251: + name = "parameter_251" + shape = [192, 192, 3, 3] + dtype = "float32" + min_val = float("-0.060306") + max_val = float("0.0575339") + mean = float("-0.000382961") + std = float("0.00524134") + data = None + + +class Program_weight_tensor_parameter_252: + name = "parameter_252" + shape = [192] + dtype = "float32" + min_val = float("-3.8777") + max_val = float("4.24427") + mean = float("-0.62919") + std = float("0.988716") + data = None + + +class Program_weight_tensor_parameter_253: + name = "parameter_253" + shape = [192] + dtype = "float32" + min_val = float("0.580104") + max_val = float("4.17524") + mean = float("1.54492") + std = float("0.398566") + data = None + + +class Program_weight_tensor_parameter_254: + name = "parameter_254" + shape = [192] + dtype = "float32" + min_val = float("0.00324018") + max_val = float("0.0287499") + mean = float("0.0091977") + std = float("0.00409842") + data = None + + +class Program_weight_tensor_parameter_255: + name = "parameter_255" + shape = [192] + dtype = "float32" + min_val = float("-0.163414") + max_val = float("0.133777") + mean = float("0.0531707") + std = float("0.0370733") + data = None + + +class Program_weight_tensor_parameter_256: + name = "parameter_256" + shape = [192, 384, 1, 1] + dtype = "float32" + min_val = float("-0.0969858") + max_val = float("0.0643058") + mean = float("-0.00132637") + std = float("0.00937933") + data = None + + +class Program_weight_tensor_parameter_257: + name = "parameter_257" + shape = [192] + dtype = "float32" + min_val = float("-2.93792") + max_val = float("1.02599") + mean = float("-0.426756") + std = float("0.68174") + data = None + + +class Program_weight_tensor_parameter_258: + name = "parameter_258" + shape = [192] + dtype = "float32" + min_val = float("0.700506") + max_val = float("3.6114") + mean = float("1.48179") + std = float("0.505296") + data = None + + +class Program_weight_tensor_parameter_259: + name = "parameter_259" + shape = [192] + dtype = "float32" + min_val = float("0.00157973") + max_val = float("0.00922621") + mean = float("0.00455175") + std = float("0.00146605") + data = None + + +class Program_weight_tensor_parameter_260: + name = "parameter_260" + shape = [192] + dtype = "float32" + min_val = float("-0.062057") + max_val = float("0.0798436") + mean = float("0.0163012") + std = float("0.0297843") + data = None + + +class Program_weight_tensor_parameter_261: + name = "parameter_261" + shape = [192, 384, 1, 1] + dtype = "float32" + min_val = float("-0.0711863") + max_val = float("0.0659081") + mean = float("-0.000541447") + std = float("0.00758362") + data = None + + +class Program_weight_tensor_parameter_262: + name = "parameter_262" + shape = [384] + dtype = "float32" + min_val = float("-2.84279") + max_val = float("1.12229") + mean = float("-0.753376") + std = float("0.497189") + data = None + + +class Program_weight_tensor_parameter_263: + name = "parameter_263" + shape = [384] + dtype = "float32" + min_val = float("0.419122") + max_val = float("1.80266") + mean = float("0.867781") + std = float("0.218188") + data = None + + +class Program_weight_tensor_parameter_264: + name = "parameter_264" + shape = [384] + dtype = "float32" + min_val = float("0.00581871") + max_val = float("0.137693") + mean = float("0.0206925") + std = float("0.0128053") + data = None + + +class Program_weight_tensor_parameter_265: + name = "parameter_265" + shape = [384] + dtype = "float32" + min_val = float("-0.489828") + max_val = float("0.409672") + mean = float("0.0184444") + std = float("0.103856") + data = None + + +class Program_weight_tensor_parameter_266: + name = "parameter_266" + shape = [384, 256, 3, 3] + dtype = "float32" + min_val = float("-0.0545942") + max_val = float("0.0530515") + mean = float("-0.000171083") + std = float("0.00433153") + data = None + + +class Program_weight_tensor_parameter_267: + name = "parameter_267" + shape = [256] + dtype = "float32" + min_val = float("-2.82072") + max_val = float("1.4645") + mean = float("-1.07775") + std = float("0.63367") + data = None + + +class Program_weight_tensor_parameter_268: + name = "parameter_268" + shape = [256] + dtype = "float32" + min_val = float("0.424072") + max_val = float("1.76956") + mean = float("0.978554") + std = float("0.170473") + data = None + + +class Program_weight_tensor_parameter_269: + name = "parameter_269" + shape = [256] + dtype = "float32" + min_val = float("0.00145624") + max_val = float("0.0198528") + mean = float("0.00622552") + std = float("0.00273816") + data = None + + +class Program_weight_tensor_parameter_270: + name = "parameter_270" + shape = [256] + dtype = "float32" + min_val = float("-0.233052") + max_val = float("0.227501") + mean = float("-0.0537504") + std = float("0.0737751") + data = None + + +class Program_weight_tensor_parameter_271: + name = "parameter_271" + shape = [256, 192, 1, 1] + dtype = "float32" + min_val = float("-0.129954") + max_val = float("0.174189") + mean = float("-0.00111637") + std = float("0.014005") + data = None + + +class Program_weight_tensor_parameter_272: + name = "parameter_272" + shape = [192] + dtype = "float32" + min_val = float("-0.0203441") + max_val = float("0.00142473") + mean = float("-0.00616218") + std = float("0.00515112") + data = None + + +class Program_weight_tensor_parameter_273: + name = "parameter_273" + shape = [192, 192, 1, 1] + dtype = "float32" + min_val = float("-0.235185") + max_val = float("0.180564") + mean = float("-0.00409182") + std = float("0.010316") + data = None + + +class Program_weight_tensor_parameter_274: + name = "parameter_274" + shape = [96] + dtype = "float32" + min_val = float("-2.27838") + max_val = float("0.754522") + mean = float("-0.115757") + std = float("0.508129") + data = None + + +class Program_weight_tensor_parameter_275: + name = "parameter_275" + shape = [96] + dtype = "float32" + min_val = float("-0.0583754") + max_val = float("2.30701") + mean = float("0.261422") + std = float("0.366858") + data = None + + +class Program_weight_tensor_parameter_276: + name = "parameter_276" + shape = [96] + dtype = "float32" + min_val = float("5.52856e-12") + max_val = float("0.00211136") + mean = float("0.000440278") + std = float("0.00038522") + data = None + + +class Program_weight_tensor_parameter_277: + name = "parameter_277" + shape = [96] + dtype = "float32" + min_val = float("-0.0522517") + max_val = float("0.0841538") + mean = float("0.00706718") + std = float("0.0204386") + data = None + + +class Program_weight_tensor_parameter_278: + name = "parameter_278" + shape = [96, 96, 1, 1] + dtype = "float32" + min_val = float("-0.0379797") + max_val = float("0.0638861") + mean = float("-0.000312308") + std = float("0.00575617") + data = None + + +class Program_weight_tensor_parameter_279: + name = "parameter_279" + shape = [96] + dtype = "float32" + min_val = float("-2.27838") + max_val = float("0.754522") + mean = float("-0.115757") + std = float("0.508129") + data = None + + +class Program_weight_tensor_parameter_280: + name = "parameter_280" + shape = [96] + dtype = "float32" + min_val = float("0.349893") + max_val = float("3.24248") + mean = float("1.29137") + std = float("0.633887") + data = None + + +class Program_weight_tensor_parameter_281: + name = "parameter_281" + shape = [96] + dtype = "float32" + min_val = float("0.00382734") + max_val = float("0.0305749") + mean = float("0.0139795") + std = float("0.00619915") + data = None + + +class Program_weight_tensor_parameter_282: + name = "parameter_282" + shape = [96] + dtype = "float32" + min_val = float("-0.179476") + max_val = float("0.231631") + mean = float("0.0342856") + std = float("0.0751833") + data = None + + +class Program_weight_tensor_parameter_283: + name = "parameter_283" + shape = [96, 96, 3, 3] + dtype = "float32" + min_val = float("-0.0540098") + max_val = float("0.0647126") + mean = float("-0.000333391") + std = float("0.00757747") + data = None + + +class Program_weight_tensor_parameter_284: + name = "parameter_284" + shape = [96] + dtype = "float32" + min_val = float("-2.80047") + max_val = float("1.50581") + mean = float("-1.09128") + std = float("0.696783") + data = None + + +class Program_weight_tensor_parameter_285: + name = "parameter_285" + shape = [96] + dtype = "float32" + min_val = float("0.32196") + max_val = float("1.80506") + mean = float("1.07312") + std = float("0.213196") + data = None + + +class Program_weight_tensor_parameter_286: + name = "parameter_286" + shape = [96] + dtype = "float32" + min_val = float("0.0443664") + max_val = float("0.229848") + mean = float("0.0920728") + std = float("0.0345847") + data = None + + +class Program_weight_tensor_parameter_287: + name = "parameter_287" + shape = [96] + dtype = "float32" + min_val = float("-1.52006") + max_val = float("0.514198") + mean = float("-0.12686") + std = float("0.297573") + data = None + + +class Program_weight_tensor_parameter_288: + name = "parameter_288" + shape = [96, 96, 3, 3] + dtype = "float32" + min_val = float("-0.0508838") + max_val = float("0.0716904") + mean = float("-0.000600059") + std = float("0.00819703") + data = None + + +class Program_weight_tensor_parameter_289: + name = "parameter_289" + shape = [96] + dtype = "float32" + min_val = float("-2.54073") + max_val = float("0.664611") + mean = float("-0.0486788") + std = float("0.474242") + data = None + + +class Program_weight_tensor_parameter_290: + name = "parameter_290" + shape = [96] + dtype = "float32" + min_val = float("-0.0782312") + max_val = float("3.15097") + mean = float("0.280453") + std = float("0.408781") + data = None + + +class Program_weight_tensor_parameter_291: + name = "parameter_291" + shape = [96] + dtype = "float32" + min_val = float("1.38686e-10") + max_val = float("0.0216024") + mean = float("0.00193235") + std = float("0.00284515") + data = None + + +class Program_weight_tensor_parameter_292: + name = "parameter_292" + shape = [96] + dtype = "float32" + min_val = float("-0.051793") + max_val = float("0.122769") + mean = float("0.0178591") + std = float("0.0297203") + data = None + + +class Program_weight_tensor_parameter_293: + name = "parameter_293" + shape = [96, 96, 1, 1] + dtype = "float32" + min_val = float("-0.111904") + max_val = float("0.0788536") + mean = float("-0.00121451") + std = float("0.00859042") + data = None + + +class Program_weight_tensor_parameter_294: + name = "parameter_294" + shape = [96] + dtype = "float32" + min_val = float("-2.54073") + max_val = float("0.664612") + mean = float("-0.0486788") + std = float("0.474242") + data = None + + +class Program_weight_tensor_parameter_295: + name = "parameter_295" + shape = [96] + dtype = "float32" + min_val = float("0.34207") + max_val = float("2.99219") + mean = float("0.929546") + std = float("0.412034") + data = None + + +class Program_weight_tensor_parameter_296: + name = "parameter_296" + shape = [96] + dtype = "float32" + min_val = float("0.00578641") + max_val = float("0.0746827") + mean = float("0.0256145") + std = float("0.0116189") + data = None + + +class Program_weight_tensor_parameter_297: + name = "parameter_297" + shape = [96] + dtype = "float32" + min_val = float("-0.159121") + max_val = float("0.1999") + mean = float("0.0399585") + std = float("0.0761745") + data = None + + +class Program_weight_tensor_parameter_298: + name = "parameter_298" + shape = [96, 96, 3, 3] + dtype = "float32" + min_val = float("-0.0537042") + max_val = float("0.0504456") + mean = float("-0.000550937") + std = float("0.00775915") + data = None + + +class Program_weight_tensor_parameter_299: + name = "parameter_299" + shape = [96] + dtype = "float32" + min_val = float("-2.02001") + max_val = float("1.65623") + mean = float("-0.919923") + std = float("0.650572") + data = None + + +class Program_weight_tensor_parameter_300: + name = "parameter_300" + shape = [96] + dtype = "float32" + min_val = float("0.442811") + max_val = float("1.97179") + mean = float("1.06409") + std = float("0.227631") + data = None + + +class Program_weight_tensor_parameter_301: + name = "parameter_301" + shape = [96] + dtype = "float32" + min_val = float("0.0126015") + max_val = float("0.116688") + mean = float("0.0335163") + std = float("0.0162914") + data = None + + +class Program_weight_tensor_parameter_302: + name = "parameter_302" + shape = [96] + dtype = "float32" + min_val = float("-2.08549") + max_val = float("0.285641") + mean = float("-0.0344373") + std = float("0.253697") + data = None + + +class Program_weight_tensor_parameter_303: + name = "parameter_303" + shape = [96, 96, 3, 3] + dtype = "float32" + min_val = float("-0.105598") + max_val = float("0.126106") + mean = float("-0.000400525") + std = float("0.00876452") + data = None + + +class Program_weight_tensor_parameter_304: + name = "parameter_304" + shape = [96] + dtype = "float32" + min_val = float("-1.61964") + max_val = float("1.88676") + mean = float("0.00618829") + std = float("0.838881") + data = None + + +class Program_weight_tensor_parameter_305: + name = "parameter_305" + shape = [96] + dtype = "float32" + min_val = float("0.352578") + max_val = float("1.32187") + mean = float("0.700929") + std = float("0.236185") + data = None + + +class Program_weight_tensor_parameter_306: + name = "parameter_306" + shape = [96] + dtype = "float32" + min_val = float("0.00963589") + max_val = float("0.0791035") + mean = float("0.0353916") + std = float("0.0149812") + data = None + + +class Program_weight_tensor_parameter_307: + name = "parameter_307" + shape = [96] + dtype = "float32" + min_val = float("-0.28232") + max_val = float("0.404327") + mean = float("-0.0642217") + std = float("0.107473") + data = None + + +class Program_weight_tensor_parameter_308: + name = "parameter_308" + shape = [96, 192, 1, 1] + dtype = "float32" + min_val = float("-0.125711") + max_val = float("0.113026") + mean = float("-0.00129075") + std = float("0.0142062") + data = None + + +class Program_weight_tensor_parameter_309: + name = "parameter_309" + shape = [96] + dtype = "float32" + min_val = float("-2.46694") + max_val = float("1.71626") + mean = float("0.341079") + std = float("0.679162") + data = None + + +class Program_weight_tensor_parameter_310: + name = "parameter_310" + shape = [96] + dtype = "float32" + min_val = float("0.539295") + max_val = float("4.8854") + mean = float("1.48247") + std = float("0.959871") + data = None + + +class Program_weight_tensor_parameter_311: + name = "parameter_311" + shape = [96] + dtype = "float32" + min_val = float("0.00783824") + max_val = float("0.0876481") + mean = float("0.0244986") + std = float("0.0120614") + data = None + + +class Program_weight_tensor_parameter_312: + name = "parameter_312" + shape = [96] + dtype = "float32" + min_val = float("-0.287482") + max_val = float("0.306711") + mean = float("-0.00499391") + std = float("0.109698") + data = None + + +class Program_weight_tensor_parameter_313: + name = "parameter_313" + shape = [96, 192, 1, 1] + dtype = "float32" + min_val = float("-0.0888948") + max_val = float("0.165433") + mean = float("-0.000715943") + std = float("0.0141494") + data = None + + +class Program_weight_tensor_parameter_314: + name = "parameter_314" + shape = [192] + dtype = "float32" + min_val = float("-4.44162") + max_val = float("2.00951") + mean = float("-0.0984248") + std = float("0.883313") + data = None + + +class Program_weight_tensor_parameter_315: + name = "parameter_315" + shape = [192] + dtype = "float32" + min_val = float("0.575367") + max_val = float("4.51851") + mean = float("1.08197") + std = float("0.426247") + data = None + + +class Program_weight_tensor_parameter_316: + name = "parameter_316" + shape = [192] + dtype = "float32" + min_val = float("0.00785823") + max_val = float("0.189363") + mean = float("0.0403725") + std = float("0.0275254") + data = None + + +class Program_weight_tensor_parameter_317: + name = "parameter_317" + shape = [192] + dtype = "float32" + min_val = float("-0.270304") + max_val = float("0.337865") + mean = float("0.0348807") + std = float("0.105792") + data = None + + +class Program_weight_tensor_parameter_318: + name = "parameter_318" + shape = [192, 128, 3, 3] + dtype = "float32" + min_val = float("-0.087766") + max_val = float("0.0743655") + mean = float("-0.000243175") + std = float("0.00724859") + data = None + + +class Program_weight_tensor_parameter_319: + name = "parameter_319" + shape = [128] + dtype = "float32" + min_val = float("-2.15213") + max_val = float("1.36751") + mean = float("-0.673313") + std = float("0.682255") + data = None + + +class Program_weight_tensor_parameter_320: + name = "parameter_320" + shape = [128] + dtype = "float32" + min_val = float("0.368949") + max_val = float("2.25056") + mean = float("0.876029") + std = float("0.235567") + data = None + + +class Program_weight_tensor_parameter_321: + name = "parameter_321" + shape = [128] + dtype = "float32" + min_val = float("0.00227055") + max_val = float("0.0578315") + mean = float("0.00881457") + std = float("0.00556285") + data = None + + +class Program_weight_tensor_parameter_322: + name = "parameter_322" + shape = [128] + dtype = "float32" + min_val = float("-0.299004") + max_val = float("0.266637") + mean = float("-0.0648951") + std = float("0.119821") + data = None + + +class Program_weight_tensor_parameter_323: + name = "parameter_323" + shape = [128, 96, 1, 1] + dtype = "float32" + min_val = float("-0.205453") + max_val = float("0.19767") + mean = float("-0.00132955") + std = float("0.0233044") + data = None + + +class Program_weight_tensor_parameter_324: + name = "parameter_324" + shape = [96] + dtype = "float32" + min_val = float("-0.0287844") + max_val = float("0.00351177") + mean = float("-0.00835282") + std = float("0.00772968") + data = None + + +class Program_weight_tensor_parameter_325: + name = "parameter_325" + shape = [96, 96, 1, 1] + dtype = "float32" + min_val = float("-0.293759") + max_val = float("0.281304") + mean = float("-0.00581101") + std = float("0.0185083") + data = None + + +class Program_weight_tensor_parameter_326: + name = "parameter_326" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_327: + name = "parameter_327" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_328: + name = "parameter_328" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_329: + name = "parameter_329" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_330: + name = "parameter_330" + shape = [48, 48, 1, 1] + dtype = "float32" + min_val = float("-0.083645") + max_val = float("0.0802369") + mean = float("-0.00148088") + std = float("0.0125285") + data = None + + +class Program_weight_tensor_parameter_331: + name = "parameter_331" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_332: + name = "parameter_332" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_333: + name = "parameter_333" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_334: + name = "parameter_334" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_335: + name = "parameter_335" + shape = [48, 48, 3, 3] + dtype = "float32" + min_val = float("-0.101441") + max_val = float("0.102416") + mean = float("-0.000544695") + std = float("0.0126239") + data = None + + +class Program_weight_tensor_parameter_336: + name = "parameter_336" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_337: + name = "parameter_337" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_338: + name = "parameter_338" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_339: + name = "parameter_339" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_340: + name = "parameter_340" + shape = [48, 48, 3, 3] + dtype = "float32" + min_val = float("-0.0993692") + max_val = float("0.108811") + mean = float("-0.00104822") + std = float("0.0135148") + data = None + + +class Program_weight_tensor_parameter_341: + name = "parameter_341" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_342: + name = "parameter_342" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_343: + name = "parameter_343" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_344: + name = "parameter_344" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_345: + name = "parameter_345" + shape = [48, 48, 1, 1] + dtype = "float32" + min_val = float("-0.0692134") + max_val = float("0.0765493") + mean = float("-0.00277088") + std = float("0.0156891") + data = None + + +class Program_weight_tensor_parameter_346: + name = "parameter_346" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_347: + name = "parameter_347" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_348: + name = "parameter_348" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_349: + name = "parameter_349" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_350: + name = "parameter_350" + shape = [48, 48, 3, 3] + dtype = "float32" + min_val = float("-0.0966216") + max_val = float("0.0984974") + mean = float("-0.00101028") + std = float("0.0123098") + data = None + + +class Program_weight_tensor_parameter_351: + name = "parameter_351" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_352: + name = "parameter_352" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_353: + name = "parameter_353" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_354: + name = "parameter_354" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_355: + name = "parameter_355" + shape = [48, 48, 3, 3] + dtype = "float32" + min_val = float("-0.0948994") + max_val = float("0.0895512") + mean = float("-0.000794946") + std = float("0.0143308") + data = None + + +class Program_weight_tensor_parameter_356: + name = "parameter_356" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_357: + name = "parameter_357" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_358: + name = "parameter_358" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_359: + name = "parameter_359" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_360: + name = "parameter_360" + shape = [48, 96, 1, 1] + dtype = "float32" + min_val = float("-0.0970452") + max_val = float("0.119633") + mean = float("-0.00232752") + std = float("0.0233937") + data = None + + +class Program_weight_tensor_parameter_361: + name = "parameter_361" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_362: + name = "parameter_362" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_363: + name = "parameter_363" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_364: + name = "parameter_364" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_365: + name = "parameter_365" + shape = [48, 96, 1, 1] + dtype = "float32" + min_val = float("-0.161532") + max_val = float("0.246027") + mean = float("0.000306195") + std = float("0.0243305") + data = None + + +class Program_weight_tensor_parameter_366: + name = "parameter_366" + shape = [96] + dtype = "float32" + min_val = float("-3.31592") + max_val = float("3.83597") + mean = float("0.267111") + std = float("1.21094") + data = None + + +class Program_weight_tensor_parameter_367: + name = "parameter_367" + shape = [96] + dtype = "float32" + min_val = float("0.511478") + max_val = float("5.40365") + mean = float("1.12531") + std = float("0.546749") + data = None + + +class Program_weight_tensor_parameter_368: + name = "parameter_368" + shape = [96] + dtype = "float32" + min_val = float("0.0162512") + max_val = float("0.320396") + mean = float("0.0760809") + std = float("0.0518261") + data = None + + +class Program_weight_tensor_parameter_369: + name = "parameter_369" + shape = [96] + dtype = "float32" + min_val = float("-0.502761") + max_val = float("0.492542") + mean = float("-0.0358312") + std = float("0.181422") + data = None + + +class Program_weight_tensor_parameter_370: + name = "parameter_370" + shape = [96, 64, 3, 3] + dtype = "float32" + min_val = float("-0.0955215") + max_val = float("0.123103") + mean = float("-0.000235878") + std = float("0.0120646") + data = None + + +class Program_weight_tensor_parameter_371: + name = "parameter_371" + shape = [64] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_372: + name = "parameter_372" + shape = [64] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_373: + name = "parameter_373" + shape = [64] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_374: + name = "parameter_374" + shape = [64] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_375: + name = "parameter_375" + shape = [64, 48, 1, 1] + dtype = "float32" + min_val = float("-0.17433") + max_val = float("0.176451") + mean = float("-0.00222374") + std = float("0.0347099") + data = None + + +class Program_weight_tensor_parameter_376: + name = "parameter_376" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_377: + name = "parameter_377" + shape = [48, 48, 1, 1] + dtype = "float32" + min_val = float("-0.166649") + max_val = float("0.156533") + mean = float("-0.0129927") + std = float("0.0258165") + data = None + + +class Program_weight_tensor_parameter_378: + name = "parameter_378" + shape = [24] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_379: + name = "parameter_379" + shape = [24] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_380: + name = "parameter_380" + shape = [24] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_381: + name = "parameter_381" + shape = [24] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_382: + name = "parameter_382" + shape = [24, 24, 1, 1] + dtype = "float32" + min_val = float("-0.0915942") + max_val = float("0.147481") + mean = float("-0.00102278") + std = float("0.0251579") + data = None + + +class Program_weight_tensor_parameter_383: + name = "parameter_383" + shape = [24] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_384: + name = "parameter_384" + shape = [24] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_385: + name = "parameter_385" + shape = [24] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_386: + name = "parameter_386" + shape = [24] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_387: + name = "parameter_387" + shape = [24, 24, 3, 3] + dtype = "float32" + min_val = float("-0.110347") + max_val = float("0.0904305") + mean = float("-0.000736999") + std = float("0.0215299") + data = None + + +class Program_weight_tensor_parameter_388: + name = "parameter_388" + shape = [24] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_389: + name = "parameter_389" + shape = [24] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_390: + name = "parameter_390" + shape = [24] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_391: + name = "parameter_391" + shape = [24] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_392: + name = "parameter_392" + shape = [24, 24, 3, 3] + dtype = "float32" + min_val = float("-0.125611") + max_val = float("0.174817") + mean = float("-0.00010855") + std = float("0.023961") + data = None + + +class Program_weight_tensor_parameter_393: + name = "parameter_393" + shape = [24] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_394: + name = "parameter_394" + shape = [24] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_395: + name = "parameter_395" + shape = [24] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_396: + name = "parameter_396" + shape = [24] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_397: + name = "parameter_397" + shape = [24, 48, 1, 1] + dtype = "float32" + min_val = float("-0.205563") + max_val = float("0.188891") + mean = float("-0.00450245") + std = float("0.0361434") + data = None + + +class Program_weight_tensor_parameter_398: + name = "parameter_398" + shape = [24] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_399: + name = "parameter_399" + shape = [24] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_400: + name = "parameter_400" + shape = [24] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_401: + name = "parameter_401" + shape = [24] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_402: + name = "parameter_402" + shape = [24, 48, 1, 1] + dtype = "float32" + min_val = float("-0.192775") + max_val = float("0.166962") + mean = float("-0.00176564") + std = float("0.0388666") + data = None + + +class Program_weight_tensor_parameter_403: + name = "parameter_403" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_404: + name = "parameter_404" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_405: + name = "parameter_405" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_406: + name = "parameter_406" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_407: + name = "parameter_407" + shape = [48, 32, 3, 3] + dtype = "float32" + min_val = float("-0.162459") + max_val = float("0.113525") + mean = float("-0.000259478") + std = float("0.0203147") + data = None + + +class Program_weight_tensor_parameter_408: + name = "parameter_408" + shape = [32] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_409: + name = "parameter_409" + shape = [32] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_410: + name = "parameter_410" + shape = [32] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_411: + name = "parameter_411" + shape = [32] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_412: + name = "parameter_412" + shape = [32, 16, 3, 3] + dtype = "float32" + min_val = float("-0.21861") + max_val = float("0.233137") + mean = float("-0.000345629") + std = float("0.0349451") + data = None + + +class Program_weight_tensor_parameter_413: + name = "parameter_413" + shape = [16] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_414: + name = "parameter_414" + shape = [16] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_415: + name = "parameter_415" + shape = [16] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_416: + name = "parameter_416" + shape = [16] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_417: + name = "parameter_417" + shape = [16, 16, 3, 3] + dtype = "float32" + min_val = float("-0.301154") + max_val = float("0.330949") + mean = float("-0.00144702") + std = float("0.0464861") + data = None + + +class Program_weight_tensor_parameter_418: + name = "parameter_418" + shape = [16] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_419: + name = "parameter_419" + shape = [16] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_420: + name = "parameter_420" + shape = [16] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_421: + name = "parameter_421" + shape = [16] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_422: + name = "parameter_422" + shape = [16, 3, 3, 3] + dtype = "float32" + min_val = float("-0.247963") + max_val = float("0.271619") + mean = float("-0.00315738") + std = float("0.068481") + data = None diff --git a/paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_10/graph_hash.txt b/paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_10/graph_hash.txt deleted file mode 100644 index 7fbb8551e..000000000 --- a/paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_10/graph_hash.txt +++ /dev/null @@ -1 +0,0 @@ -b58b47a10405b5de0e1c7f3dab25881ba3cc8c8bdd1045e44640464fa936bf04 \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_10/graph_net.json b/paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_10/graph_net.json deleted file mode 100644 index d8719c2c9..000000000 --- a/paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_10/graph_net.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "framework": "paddle", - "model_name": "PP-YOLOE-S_vehicle", - "num_devices_required": 1, - "num_nodes_required": 1 -} \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_10/model.py b/paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_10/model.py deleted file mode 100644 index 2d525f6d3..000000000 --- a/paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_10/model.py +++ /dev/null @@ -1,192 +0,0 @@ -import paddle - - -class GraphModule(paddle.nn.Layer): - def __init__(self): - super().__init__() - - def forward(self, parameter_0, data_0, data_1, data_2, data_3, data_4): - # pd_op.divide: (-1x2xf32) <- (-1x2xf32, -1x1xf32) - divide_0 = paddle._C_ops.divide(data_3, data_4) - del data_3 - - # pd_op.shape64: (3xi64) <- (2x-1x68xf32) - shape64_0 = paddle._C_ops.shape64(data_2) - - # pd_op.full_int_array: (1xi64) <- () - full_int_array_0 = [0] - - # pd_op.full_int_array: (1xi64) <- () - full_int_array_1 = [1] - - # pd_op.assign: (1xi64) <- (1xi64) - assign_0 = full_int_array_1 - - # pd_op.slice: (xi64) <- (3xi64, 1xi64, 1xi64) - slice_0 = paddle._C_ops.slice( - shape64_0, [0], full_int_array_0, full_int_array_1, [1], [0] - ) - del full_int_array_0 - - # pd_op.full_int_array: (1xi64) <- () - full_int_array_2 = [2] - - # pd_op.slice: (xi64) <- (3xi64, 1xi64, 1xi64) - slice_1 = paddle._C_ops.slice( - shape64_0, [0], full_int_array_1, full_int_array_2, [1], [0] - ) - - # pd_op.full_int_array: (1xi64) <- () - full_int_array_3 = [3] - - # pd_op.slice: (xi64) <- (3xi64, 1xi64, 1xi64) - slice_2 = paddle._C_ops.slice( - shape64_0, [0], full_int_array_2, full_int_array_3, [1], [0] - ) - del full_int_array_2, full_int_array_3, shape64_0 - - # pd_op.full: (xi64) <- () - full_0 = paddle._C_ops.full( - [], float("-1"), paddle.int64, paddle.core.CPUPlace() - ) - - # pd_op.full: (xi64) <- () - full_1 = paddle._C_ops.full( - [], float("4"), paddle.int64, paddle.core.CPUPlace() - ) - - # pd_op.full: (xi64) <- () - full_2 = paddle._C_ops.full( - [], float("17"), paddle.int64, paddle.core.CPUPlace() - ) - - # builtin.combine: ([xi64, xi64, xi64, xi64]) <- (xi64, xi64, xi64, xi64) - combine_0 = [full_0, slice_1, full_1, full_2] - del full_0, full_1, full_2, slice_1 - - # pd_op.stack: (4xi64) <- ([xi64, xi64, xi64, xi64]) - stack_0 = paddle._C_ops.stack(combine_0, 0) - del combine_0 - - # pd_op.reshape: (-1x-1x4x17xf32) <- (2x-1x68xf32, 4xi64) - reshape_0 = paddle._C_ops.reshape(data_2, stack_0) - del data_2, stack_0 - - # pd_op.softmax: (-1x-1x4x17xf32) <- (-1x-1x4x17xf32) - softmax_0 = paddle._C_ops.softmax(reshape_0, -1) - del reshape_0 - - # pd_op.transpose: (-1x17x-1x4xf32) <- (-1x-1x4x17xf32) - transpose_0 = paddle._C_ops.transpose(softmax_0, [0, 3, 1, 2]) - - # pd_op.conv2d: (-1x1x-1x4xf32) <- (-1x17x-1x4xf32, 1x17x1x1xf32) - conv2d_0 = paddle._C_ops.conv2d( - transpose_0, parameter_0, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_0 - - # pd_op.squeeze: (-1x-1x4xf32) <- (-1x1x-1x4xf32, 1xi64) - squeeze_0 = paddle._C_ops.squeeze(conv2d_0, full_int_array_1) - del full_int_array_1 - - # pd_op.full: (1xi32) <- () - full_3 = paddle._C_ops.full( - [1], float("2"), paddle.int32, paddle.core.CPUPlace() - ) - - # pd_op.split_with_num: ([-1x-1x2xf32, -1x-1x2xf32]) <- (-1x-1x4xf32, 1xi32) - split_with_num_0 = paddle._C_ops.split_with_num(squeeze_0, 2, full_3) - del squeeze_0 - - # builtin.split: (-1x-1x2xf32, -1x-1x2xf32) <- ([-1x-1x2xf32, -1x-1x2xf32]) - ( - split_0, - split_1, - ) = split_with_num_0 - del split_with_num_0 - - # pd_op.full: (1xf32) <- () - full_4 = paddle._C_ops.full( - [1], float("-1"), paddle.float32, paddle.core.CPUPlace() - ) - - # pd_op.scale: (-1x-1x2xf32) <- (-1x-1x2xf32, 1xf32) - scale_0 = paddle._C_ops.scale(split_0, full_4, float("0"), True) - del split_0 - - # pd_op.add: (-1x-1x2xf32) <- (-1x-1x2xf32, -1x2xf32) - add_0 = paddle._C_ops.add(scale_0, divide_0) - - # pd_op.add: (-1x-1x2xf32) <- (-1x-1x2xf32, -1x2xf32) - add_1 = paddle._C_ops.add(split_1, divide_0) - - # pd_op.full: (1xi32) <- () - full_5 = paddle._C_ops.full( - [1], float("-1"), paddle.int32, paddle.core.CPUPlace() - ) - - # builtin.combine: ([-1x-1x2xf32, -1x-1x2xf32]) <- (-1x-1x2xf32, -1x-1x2xf32) - combine_1 = [add_0, add_1] - - # pd_op.concat: (-1x-1x4xf32) <- ([-1x-1x2xf32, -1x-1x2xf32], 1xi32) - concat_0 = paddle._C_ops.concat(combine_1, full_5) - del combine_1 - - # pd_op.full: (xi64) <- () - full_6 = paddle._C_ops.full( - [], float("-1"), paddle.int64, paddle.framework._current_expected_place() - ) - - # pd_op.less_than: (xb) <- (xi64, xi64) - less_than_0 = paddle._C_ops.less_than(data_0, full_6) - del data_0, full_6 - - # pd_op.cast: (xi64) <- (xb) - cast_0 = paddle._C_ops.cast(less_than_0, paddle.int64) - del less_than_0 - - # pd_op.full: (xi64) <- () - full_7 = paddle._C_ops.full( - [], float("0"), paddle.int64, paddle.framework._current_expected_place() - ) - - # pd_op.not_equal: (xb) <- (xi64, xi64) - not_equal_0 = paddle._C_ops.not_equal(cast_0, full_7) - del cast_0 - - # pd_op.cast: (xi64) <- (xb) - cast_1 = paddle._C_ops.cast(not_equal_0, paddle.int64) - del not_equal_0 - - # pd_op.equal: (xb) <- (xi64, xi64) - equal_0 = paddle._C_ops.equal(cast_1, full_7) - del cast_1, full_7 - - # pd_op.share_data_: (2x-1x4xf32) <- (2x-1x4xf32) - share_data__0 = data_1.detach() - del data_1 - - # pd_op.share_data_: (-1x-1x4xf32) <- (-1x-1x4xf32) - share_data__1 = concat_0.detach() - - # pd_op.multiply: (-1x-1x4xf32) <- (-1x-1x4xf32, -1x1xf32) - multiply_0 = paddle._C_ops.multiply(share_data__1, data_4) - del ( - add_0, - add_1, - assign_0, - concat_0, - conv2d_0, - data_4, - divide_0, - full_3, - full_4, - full_5, - scale_0, - share_data__1, - softmax_0, - split_1, - transpose_0, - ) - - return share_data__0, multiply_0 diff --git a/paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_11/graph_hash.txt b/paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_11/graph_hash.txt deleted file mode 100644 index e5ec97328..000000000 --- a/paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_11/graph_hash.txt +++ /dev/null @@ -1 +0,0 @@ -d8e2807e0c261d57e00c887aba4b333ffd83562a1bf230d5a26bacf0379fad87 \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_11/graph_net.json b/paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_11/graph_net.json deleted file mode 100644 index d8719c2c9..000000000 --- a/paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_11/graph_net.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "framework": "paddle", - "model_name": "PP-YOLOE-S_vehicle", - "num_devices_required": 1, - "num_nodes_required": 1 -} \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_11/input_meta.py b/paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_11/input_meta.py deleted file mode 100644 index 3217a39a7..000000000 --- a/paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_11/input_meta.py +++ /dev/null @@ -1,91 +0,0 @@ -class Program_weight_tensor_data_0: - name = "data_0" - shape = [2, 3549] - dtype = "float32" - max_val = float("2.0") - mean = float("0.0250775") - std = float("0.157259") - data = None - - -class Program_weight_tensor_data_1: - name = "data_1" - shape = [2, 14, 3549] - dtype = "float32" - max_val = float("0.973582") - mean = float("0.00986958") - std = float("0.0654711") - data = None - - -class Program_weight_tensor_data_2: - name = "data_2" - shape = [2, 14, 3549] - dtype = "float32" - max_val = float("1.0") - mean = float("0.00179125") - std = float("0.0422852") - data = None - - -class Program_weight_tensor_data_3: - name = "data_3" - shape = [2, 1] - dtype = "int32" - data = [0, 1] - - -class Program_weight_tensor_data_4: - name = "data_4" - shape = [2, 14, 1] - dtype = "int32" - data = [ - 0, - 0, - 1, - 0, - 0, - 0, - 0, - 0, - 3, - 0, - 0, - 0, - 0, - 3, - 0, - 0, - 1, - 0, - 0, - 0, - 0, - 0, - 3, - 0, - 0, - 0, - 0, - 0, - ] - - -class Program_weight_tensor_data_5: - name = "data_5" - shape = [2, 14, 4] - dtype = "float32" - max_val = float("384.824") - mean = float("133.114") - std = float("96.9844") - data = None - - -class Program_weight_tensor_data_6: - name = "data_6" - shape = [2, 14, 3549] - dtype = "float32" - max_val = float("0.00888292") - mean = float("1.5455e-05") - std = float("0.000268339") - data = None diff --git a/paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_11/model.py b/paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_11/model.py deleted file mode 100644 index 2432102f1..000000000 --- a/paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_11/model.py +++ /dev/null @@ -1,229 +0,0 @@ -import paddle - - -class GraphModule(paddle.nn.Layer): - def __init__(self): - super().__init__() - - def forward(self, data_0, data_1, data_2, data_3, data_4, data_5, data_6): - # pd_op.full_int_array: (1xi64) <- () - full_int_array_0 = [1] - - # pd_op.unsqueeze: (2x1x3549xf32) <- (2x3549xf32, 1xi64) - unsqueeze_0 = paddle._C_ops.unsqueeze(data_0, full_int_array_0) - del data_0, full_int_array_0 - - # pd_op.full: (xf32) <- () - full_0 = paddle._C_ops.full( - [], float("1"), paddle.float32, paddle.framework._current_expected_place() - ) - - # pd_op.greater_than: (2x1x3549xb) <- (2x1x3549xf32, xf32) - greater_than_0 = paddle._C_ops.greater_than(unsqueeze_0, full_0) - del full_0, unsqueeze_0 - - # pd_op.full_int_array: (3xi64) <- () - full_int_array_1 = [1, 14, 1] - - # pd_op.tile: (2x14x3549xb) <- (2x1x3549xb, 3xi64) - tile_0 = paddle._C_ops.tile(greater_than_0, full_int_array_1) - del full_int_array_1, greater_than_0 - - # pd_op.full: (1xi64) <- () - full_1 = paddle._C_ops.full( - [1], float("-2"), paddle.int64, paddle.core.CPUPlace() - ) - - # pd_op.argmax: (2x3549xi64) <- (2x14x3549xf32, 1xi64) - argmax_0 = paddle._C_ops.argmax(data_1, full_1, False, False, paddle.int64) - - # pd_op.full: (1xi32) <- () - full_2 = paddle._C_ops.full( - [1], float("14"), paddle.int32, paddle.core.CPUPlace() - ) - - # pd_op.one_hot: (2x3549x14xf32) <- (2x3549xi64, 1xi32) - one_hot_0 = paddle._C_ops.one_hot( - argmax_0 % paddle.cast(full_2, argmax_0.dtype), full_2 - ) - del argmax_0, full_2 - - # pd_op.transpose: (2x14x3549xf32) <- (2x3549x14xf32) - transpose_0 = paddle._C_ops.transpose(one_hot_0, [0, 2, 1]) - del one_hot_0 - - # pd_op.where: (2x14x3549xf32) <- (2x14x3549xb, 2x14x3549xf32, 2x14x3549xf32) - where_0 = paddle._C_ops.where(tile_0, transpose_0, data_2) - del data_2, tile_0, transpose_0 - - # pd_op.full_int_array: (1xi64) <- () - full_int_array_2 = [-2] - - # pd_op.sum: (2x3549xf32) <- (2x14x3549xf32, 1xi64) - sum_0 = paddle._C_ops.sum(where_0, full_int_array_2, None, False) - - # pd_op.argmax: (2x3549xi64) <- (2x14x3549xf32, 1xi64) - argmax_1 = paddle._C_ops.argmax(where_0, full_1, False, False, paddle.int64) - del full_1 - - # pd_op.full: (1xf32) <- () - full_3 = paddle._C_ops.full( - [1], float("14"), paddle.float32, paddle.core.CPUPlace() - ) - - # pd_op.scale: (2x1xi32) <- (2x1xi32, 1xf32) - scale_0 = paddle._C_ops.scale(data_3, full_3, float("0"), True) - del data_3, full_3 - - # pd_op.cast: (2x1xi64) <- (2x1xi32) - cast_0 = paddle._C_ops.cast(scale_0, paddle.int64) - del scale_0 - - # pd_op.add: (2x3549xi64) <- (2x3549xi64, 2x1xi64) - add_0 = paddle._C_ops.add(argmax_1, cast_0) - del argmax_1, cast_0 - - # pd_op.flatten: (28xi32) <- (2x14x1xi32) - flatten_0 = paddle._C_ops.flatten(data_4, 0, 2) - del data_4 - - # pd_op.flatten: (7098xi64) <- (2x3549xi64) - flatten_1 = paddle._C_ops.flatten(add_0, 0, 1) - del add_0 - - # pd_op.full: (1xi32) <- () - full_4 = paddle._C_ops.full( - [1], float("0"), paddle.int32, paddle.core.CPUPlace() - ) - - # pd_op.gather: (7098xi32) <- (28xi32, 7098xi64, 1xi32) - gather_0 = paddle._C_ops.gather(flatten_0, flatten_1, full_4) - del flatten_0 - - # pd_op.full_int_array: (2xi64) <- () - full_int_array_3 = [2, 3549] - - # pd_op.reshape: (2x3549xi32) <- (7098xi32, 2xi64) - reshape_1 = paddle._C_ops.reshape(gather_0, full_int_array_3) - del full_int_array_3, gather_0 - - # pd_op.full: (xf32) <- () - full_5 = paddle._C_ops.full( - [], float("0"), paddle.float32, paddle.framework._current_expected_place() - ) - - # pd_op.greater_than: (2x3549xb) <- (2x3549xf32, xf32) - greater_than_1 = paddle._C_ops.greater_than(sum_0, full_5) - del full_5, sum_0 - - # pd_op.full: (1xf32) <- () - full_6 = paddle._C_ops.full( - [1], float("4"), paddle.float32, paddle.core.CPUPlace() - ) - - # pd_op.full_like: (2x3549xi32) <- (2x3549xi32, 1xf32) - full_like_0 = paddle._C_ops.full_like( - reshape_1, full_6, paddle.int32, paddle.framework._current_expected_place() - ) - del full_6 - - # pd_op.where: (2x3549xi32) <- (2x3549xb, 2x3549xi32, 2x3549xi32) - where_1 = paddle._C_ops.where(greater_than_1, reshape_1, full_like_0) - del full_like_0, greater_than_1, reshape_1 - - # pd_op.full_int_array: (2xi64) <- () - full_int_array_4 = [-1, 4] - - # pd_op.reshape: (28x4xf32) <- (2x14x4xf32, 2xi64) - reshape_2 = paddle._C_ops.reshape(data_5, full_int_array_4) - del data_5, full_int_array_4 - - # pd_op.gather: (7098x4xf32) <- (28x4xf32, 7098xi64, 1xi32) - gather_1 = paddle._C_ops.gather(reshape_2, flatten_1, full_4) - del flatten_1, full_4, reshape_2 - - # pd_op.full_int_array: (3xi64) <- () - full_int_array_5 = [2, 3549, 4] - - # pd_op.reshape: (2x3549x4xf32) <- (7098x4xf32, 3xi64) - reshape_0 = paddle._C_ops.reshape(gather_1, full_int_array_5) - del full_int_array_5, gather_1 - - # pd_op.full: (1xi32) <- () - full_7 = paddle._C_ops.full( - [1], float("5"), paddle.int32, paddle.core.CPUPlace() - ) - - # pd_op.one_hot: (2x3549x5xf32) <- (2x3549xi32, 1xi32) - one_hot_1 = paddle._C_ops.one_hot( - where_1 % paddle.cast(full_7, where_1.dtype), full_7 - ) - del full_7 - - # pd_op.full: (4xi64) <- () - full_8 = paddle._C_ops.full( - [4], float("0"), paddle.int64, paddle.framework._current_expected_place() - ) - - # pd_op.assign_value_: (4xi64) <- (4xi64) - assign_value__0 = paddle._C_ops.assign_value_( - full_8, - [4], - paddle.int64, - [float("0"), float("1"), float("2"), float("3")], - paddle.framework._current_expected_place(), - ) - del full_8 - - # pd_op.index_select: (2x3549x4xf32) <- (2x3549x5xf32, 4xi64) - index_select_0 = paddle._C_ops.index_select(one_hot_1, assign_value__0, -1) - del assign_value__0, one_hot_1 - - # pd_op.multiply: (2x14x3549xf32) <- (2x14x3549xf32, 2x14x3549xf32) - multiply_1 = paddle._C_ops.multiply(data_6, where_0) - del data_6 - - # pd_op.full_int_array: (1xi64) <- () - full_int_array_6 = [-1] - - # pd_op.max: (2x14x1xf32) <- (2x14x3549xf32, 1xi64) - max_0 = paddle._C_ops.max(multiply_1, full_int_array_6, True) - - # pd_op.multiply: (2x14x3549xf32) <- (2x14x3549xf32, 2x14x3549xf32) - multiply_2 = paddle._C_ops.multiply(data_1, where_0) - del data_1, where_0 - - # pd_op.max: (2x14x1xf32) <- (2x14x3549xf32, 1xi64) - max_1 = paddle._C_ops.max(multiply_2, full_int_array_6, True) - del multiply_2 - - # pd_op.full: (1xf32) <- () - full_9 = paddle._C_ops.full( - [1], float("1"), paddle.float32, paddle.core.CPUPlace() - ) - - # pd_op.scale: (2x14x1xf32) <- (2x14x1xf32, 1xf32) - scale_1 = paddle._C_ops.scale(max_0, full_9, float("1e-09"), True) - del full_9, max_0 - - # pd_op.divide: (2x14x3549xf32) <- (2x14x3549xf32, 2x14x1xf32) - divide_0 = paddle._C_ops.divide(multiply_1, scale_1) - del multiply_1, scale_1 - - # pd_op.multiply: (2x14x3549xf32) <- (2x14x3549xf32, 2x14x1xf32) - multiply_3 = paddle._C_ops.multiply(divide_0, max_1) - del divide_0, max_1 - - # pd_op.max: (2x3549xf32) <- (2x14x3549xf32, 1xi64) - max_2 = paddle._C_ops.max(multiply_3, full_int_array_2, False) - del full_int_array_2, multiply_3 - - # pd_op.unsqueeze: (2x3549x1xf32) <- (2x3549xf32, 1xi64) - unsqueeze_1 = paddle._C_ops.unsqueeze(max_2, full_int_array_6) - del full_int_array_6, max_2 - - # pd_op.multiply: (2x3549x4xf32) <- (2x3549x4xf32, 2x3549x1xf32) - multiply_0 = paddle._C_ops.multiply(index_select_0, unsqueeze_1) - del index_select_0, unsqueeze_1, where_1 - - return reshape_0, multiply_0 diff --git a/paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_12/graph_hash.txt b/paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_12/graph_hash.txt deleted file mode 100644 index 18426c718..000000000 --- a/paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_12/graph_hash.txt +++ /dev/null @@ -1 +0,0 @@ -c4af36497f7852167288dc3ac1e4b55956d1b6c42ca46e70cd27bb1ccc05b8bd \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_12/graph_net.json b/paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_12/graph_net.json deleted file mode 100644 index d8719c2c9..000000000 --- a/paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_12/graph_net.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "framework": "paddle", - "model_name": "PP-YOLOE-S_vehicle", - "num_devices_required": 1, - "num_nodes_required": 1 -} \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_12/input_meta.py b/paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_12/input_meta.py deleted file mode 100644 index 5a54b3b62..000000000 --- a/paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_12/input_meta.py +++ /dev/null @@ -1,83 +0,0 @@ -class Program_weight_tensor_data_0: - name = "data_0" - shape = [] - dtype = "int64" - data = [12] - - -class Program_weight_tensor_data_1: - name = "data_1" - shape = [] - dtype = "int64" - data = [10164] - - -class Program_weight_tensor_data_2: - name = "data_2" - shape = [] - dtype = "int64" - data = [12] - - -class Program_weight_tensor_data_3: - name = "data_3" - shape = [2, 10164] - dtype = "float32" - max_val = float("2.0") - mean = float("0.00491932") - std = float("0.0706648") - data = None - - -class Program_weight_tensor_data_4: - name = "data_4" - shape = [2, 12, 10164] - dtype = "float32" - max_val = float("0.964484") - mean = float("0.00145547") - std = float("0.0263451") - data = None - - -class Program_weight_tensor_data_5: - name = "data_5" - shape = [2, 12, 10164] - dtype = "float32" - max_val = float("1.0") - mean = float("0.000409944") - std = float("0.0202429") - data = None - - -class Program_weight_tensor_data_6: - name = "data_6" - shape = [2, 1] - dtype = "int32" - data = [0, 1] - - -class Program_weight_tensor_data_7: - name = "data_7" - shape = [2, 12, 1] - dtype = "int32" - data = [0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0] - - -class Program_weight_tensor_data_8: - name = "data_8" - shape = [2, 12, 4] - dtype = "float32" - max_val = float("658.644") - mean = float("251.921") - std = float("253.674") - data = None - - -class Program_weight_tensor_data_9: - name = "data_9" - shape = [2, 12, 10164] - dtype = "float32" - max_val = float("0.00990145") - mean = float("2.84369e-06") - std = float("0.000124311") - data = None diff --git a/paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_12/model.py b/paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_12/model.py deleted file mode 100644 index d3b764f89..000000000 --- a/paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_12/model.py +++ /dev/null @@ -1,264 +0,0 @@ -import paddle - - -class GraphModule(paddle.nn.Layer): - def __init__(self): - super().__init__() - - def forward( - self, - data_0, - data_1, - data_2, - data_3, - data_4, - data_5, - data_6, - data_7, - data_8, - data_9, - ): - # pd_op.full_int_array: (1xi64) <- () - full_int_array_0 = [1] - - # pd_op.unsqueeze: (2x1x-1xf32) <- (2x-1xf32, 1xi64) - unsqueeze_0 = paddle._C_ops.unsqueeze(data_3, full_int_array_0) - del data_3, full_int_array_0 - - # pd_op.full: (xf32) <- () - full_0 = paddle._C_ops.full( - [], float("1"), paddle.float32, paddle.framework._current_expected_place() - ) - - # pd_op.greater_than: (2x1x-1xb) <- (2x1x-1xf32, xf32) - greater_than_0 = paddle._C_ops.greater_than(unsqueeze_0, full_0) - del full_0, unsqueeze_0 - - # pd_op.full: (xi64) <- () - full_1 = paddle._C_ops.full( - [], float("1"), paddle.int64, paddle.core.CPUPlace() - ) - - # builtin.combine: ([xi64, xi64, xi64]) <- (xi64, xi64, xi64) - combine_0 = [full_1, data_0, full_1] - del full_1 - - # pd_op.stack: (3xi64) <- ([xi64, xi64, xi64]) - stack_0 = paddle._C_ops.stack(combine_0, 0) - del combine_0 - - # pd_op.tile: (2x-1x-1xb) <- (2x1x-1xb, 3xi64) - tile_0 = paddle._C_ops.tile(greater_than_0, stack_0) - del greater_than_0, stack_0 - - # pd_op.full: (1xi64) <- () - full_2 = paddle._C_ops.full( - [1], float("-2"), paddle.int64, paddle.core.CPUPlace() - ) - - # pd_op.argmax: (2x-1xi64) <- (2x-1x-1xf32, 1xi64) - argmax_0 = paddle._C_ops.argmax(data_4, full_2, False, False, paddle.int64) - - # pd_op.one_hot: (2x-1x-1xf32) <- (2x-1xi64, xi64) - one_hot_0 = paddle._C_ops.one_hot( - argmax_0 % paddle.cast(data_2, argmax_0.dtype), data_2 - ) - del argmax_0, data_2 - - # pd_op.transpose: (2x-1x-1xf32) <- (2x-1x-1xf32) - transpose_0 = paddle._C_ops.transpose(one_hot_0, [0, 2, 1]) - del one_hot_0 - - # pd_op.where: (2x-1x-1xf32) <- (2x-1x-1xb, 2x-1x-1xf32, 2x-1x-1xf32) - where_0 = paddle._C_ops.where(tile_0, transpose_0, data_5) - del data_5, tile_0, transpose_0 - - # pd_op.full_int_array: (1xi64) <- () - full_int_array_1 = [-2] - - # pd_op.sum: (2x-1xf32) <- (2x-1x-1xf32, 1xi64) - sum_0 = paddle._C_ops.sum(where_0, full_int_array_1, None, False) - - # pd_op.argmax: (2x-1xi64) <- (2x-1x-1xf32, 1xi64) - argmax_1 = paddle._C_ops.argmax(where_0, full_2, False, False, paddle.int64) - del full_2 - - # pd_op.cast: (xi32) <- (xi64) - cast_0 = paddle._C_ops.cast(data_0, paddle.int32) - del data_0 - - # pd_op.multiply: (2x1xi32) <- (2x1xi32, xi32) - multiply_1 = paddle._C_ops.multiply(data_6, cast_0) - del cast_0, data_6 - - # pd_op.cast: (2x1xi64) <- (2x1xi32) - cast_1 = paddle._C_ops.cast(multiply_1, paddle.int64) - del multiply_1 - - # pd_op.add: (2x-1xi64) <- (2x-1xi64, 2x1xi64) - add_0 = paddle._C_ops.add(argmax_1, cast_1) - del argmax_1, cast_1 - - # pd_op.flatten: (-1xi32) <- (2x-1x1xi32) - flatten_0 = paddle._C_ops.flatten(data_7, 0, 2) - del data_7 - - # pd_op.flatten: (-1xi64) <- (2x-1xi64) - flatten_1 = paddle._C_ops.flatten(add_0, 0, 1) - del add_0 - - # pd_op.full: (1xi32) <- () - full_3 = paddle._C_ops.full( - [1], float("0"), paddle.int32, paddle.core.CPUPlace() - ) - - # pd_op.gather: (-1xi32) <- (-1xi32, -1xi64, 1xi32) - gather_0 = paddle._C_ops.gather(flatten_0, flatten_1, full_3) - del flatten_0 - - # pd_op.full: (xi64) <- () - full_4 = paddle._C_ops.full( - [], float("2"), paddle.int64, paddle.core.CPUPlace() - ) - - # builtin.combine: ([xi64, xi64]) <- (xi64, xi64) - combine_1 = [full_4, data_1] - - # pd_op.stack: (2xi64) <- ([xi64, xi64]) - stack_1 = paddle._C_ops.stack(combine_1, 0) - del combine_1 - - # pd_op.reshape: (2x-1xi32) <- (-1xi32, 2xi64) - reshape_1 = paddle._C_ops.reshape(gather_0, stack_1) - del gather_0, stack_1 - - # pd_op.full: (xf32) <- () - full_5 = paddle._C_ops.full( - [], float("0"), paddle.float32, paddle.framework._current_expected_place() - ) - - # pd_op.greater_than: (2x-1xb) <- (2x-1xf32, xf32) - greater_than_1 = paddle._C_ops.greater_than(sum_0, full_5) - del full_5, sum_0 - - # pd_op.full: (1xf32) <- () - full_6 = paddle._C_ops.full( - [1], float("4"), paddle.float32, paddle.core.CPUPlace() - ) - - # pd_op.full_like: (2x-1xi32) <- (2x-1xi32, 1xf32) - full_like_0 = paddle._C_ops.full_like( - reshape_1, full_6, paddle.int32, paddle.framework._current_expected_place() - ) - del full_6 - - # pd_op.where: (2x-1xi32) <- (2x-1xb, 2x-1xi32, 2x-1xi32) - where_1 = paddle._C_ops.where(greater_than_1, reshape_1, full_like_0) - del full_like_0, greater_than_1, reshape_1 - - # pd_op.full_int_array: (2xi64) <- () - full_int_array_2 = [-1, 4] - - # pd_op.reshape: (-1x4xf32) <- (2x-1x4xf32, 2xi64) - reshape_2 = paddle._C_ops.reshape(data_8, full_int_array_2) - del data_8, full_int_array_2 - - # pd_op.gather: (-1x4xf32) <- (-1x4xf32, -1xi64, 1xi32) - gather_1 = paddle._C_ops.gather(reshape_2, flatten_1, full_3) - del flatten_1, full_3, reshape_2 - - # pd_op.full: (xi64) <- () - full_7 = paddle._C_ops.full( - [], float("4"), paddle.int64, paddle.core.CPUPlace() - ) - - # builtin.combine: ([xi64, xi64, xi64]) <- (xi64, xi64, xi64) - combine_2 = [full_4, data_1, full_7] - del data_1, full_4, full_7 - - # pd_op.stack: (3xi64) <- ([xi64, xi64, xi64]) - stack_2 = paddle._C_ops.stack(combine_2, 0) - del combine_2 - - # pd_op.reshape: (2x-1x4xf32) <- (-1x4xf32, 3xi64) - reshape_0 = paddle._C_ops.reshape(gather_1, stack_2) - del gather_1, stack_2 - - # pd_op.full: (1xi32) <- () - full_8 = paddle._C_ops.full( - [1], float("5"), paddle.int32, paddle.core.CPUPlace() - ) - - # pd_op.one_hot: (2x-1x5xf32) <- (2x-1xi32, 1xi32) - one_hot_1 = paddle._C_ops.one_hot( - where_1 % paddle.cast(full_8, where_1.dtype), full_8 - ) - del full_8 - - # pd_op.full: (4xi64) <- () - full_9 = paddle._C_ops.full( - [4], float("0"), paddle.int64, paddle.framework._current_expected_place() - ) - - # pd_op.assign_value_: (4xi64) <- (4xi64) - assign_value__0 = paddle._C_ops.assign_value_( - full_9, - [4], - paddle.int64, - [float("0"), float("1"), float("2"), float("3")], - paddle.framework._current_expected_place(), - ) - del full_9 - - # pd_op.index_select: (2x-1x4xf32) <- (2x-1x5xf32, 4xi64) - index_select_0 = paddle._C_ops.index_select(one_hot_1, assign_value__0, -1) - del assign_value__0, one_hot_1 - - # pd_op.multiply: (2x-1x-1xf32) <- (2x-1x-1xf32, 2x-1x-1xf32) - multiply_2 = paddle._C_ops.multiply(data_9, where_0) - del data_9 - - # pd_op.full_int_array: (1xi64) <- () - full_int_array_3 = [-1] - - # pd_op.max: (2x-1x1xf32) <- (2x-1x-1xf32, 1xi64) - max_0 = paddle._C_ops.max(multiply_2, full_int_array_3, True) - - # pd_op.multiply: (2x-1x-1xf32) <- (2x-1x-1xf32, 2x-1x-1xf32) - multiply_3 = paddle._C_ops.multiply(data_4, where_0) - del data_4, where_0 - - # pd_op.max: (2x-1x1xf32) <- (2x-1x-1xf32, 1xi64) - max_1 = paddle._C_ops.max(multiply_3, full_int_array_3, True) - del multiply_3 - - # pd_op.full: (1xf32) <- () - full_10 = paddle._C_ops.full( - [1], float("1"), paddle.float32, paddle.core.CPUPlace() - ) - - # pd_op.scale: (2x-1x1xf32) <- (2x-1x1xf32, 1xf32) - scale_0 = paddle._C_ops.scale(max_0, full_10, float("1e-09"), True) - del full_10, max_0 - - # pd_op.divide: (2x-1x-1xf32) <- (2x-1x-1xf32, 2x-1x1xf32) - divide_0 = paddle._C_ops.divide(multiply_2, scale_0) - del multiply_2, scale_0 - - # pd_op.multiply: (2x-1x-1xf32) <- (2x-1x-1xf32, 2x-1x1xf32) - multiply_4 = paddle._C_ops.multiply(divide_0, max_1) - del divide_0, max_1 - - # pd_op.max: (2x-1xf32) <- (2x-1x-1xf32, 1xi64) - max_2 = paddle._C_ops.max(multiply_4, full_int_array_1, False) - del full_int_array_1, multiply_4 - - # pd_op.unsqueeze: (2x-1x1xf32) <- (2x-1xf32, 1xi64) - unsqueeze_1 = paddle._C_ops.unsqueeze(max_2, full_int_array_3) - del full_int_array_3, max_2 - - # pd_op.multiply: (2x-1x4xf32) <- (2x-1x4xf32, 2x-1x1xf32) - multiply_0 = paddle._C_ops.multiply(index_select_0, unsqueeze_1) - del index_select_0, unsqueeze_1, where_1 - - return reshape_0, multiply_0 diff --git a/paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_13/graph_hash.txt b/paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_13/graph_hash.txt deleted file mode 100644 index cf9cecf24..000000000 --- a/paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_13/graph_hash.txt +++ /dev/null @@ -1 +0,0 @@ -b570e084ecf7776a98a81e353b5f581d19f9ab243969ebfb5988dfda43a8a50f \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_13/graph_net.json b/paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_13/graph_net.json deleted file mode 100644 index d8719c2c9..000000000 --- a/paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_13/graph_net.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "framework": "paddle", - "model_name": "PP-YOLOE-S_vehicle", - "num_devices_required": 1, - "num_nodes_required": 1 -} \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_13/model.py b/paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_13/model.py deleted file mode 100644 index e6e9cf731..000000000 --- a/paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_13/model.py +++ /dev/null @@ -1,1144 +0,0 @@ -import paddle - - -class GraphModule(paddle.nn.Layer): - def __init__(self): - super().__init__() - - def forward( - self, - parameter_0, - parameter_1, - parameter_2, - parameter_3, - parameter_4, - parameter_5, - parameter_6, - parameter_7, - parameter_8, - parameter_9, - parameter_10, - parameter_11, - parameter_12, - parameter_13, - parameter_14, - parameter_15, - parameter_16, - parameter_17, - parameter_18, - parameter_19, - parameter_20, - parameter_21, - parameter_22, - parameter_23, - parameter_24, - parameter_25, - parameter_26, - parameter_27, - parameter_28, - parameter_29, - parameter_30, - parameter_31, - parameter_32, - parameter_33, - parameter_34, - parameter_35, - parameter_36, - parameter_37, - parameter_38, - parameter_39, - parameter_40, - parameter_41, - parameter_42, - parameter_43, - parameter_44, - parameter_45, - parameter_46, - parameter_47, - parameter_48, - parameter_49, - parameter_50, - parameter_51, - parameter_52, - parameter_53, - data_0, - data_1, - data_2, - data_3, - data_4, - data_5, - data_6, - data_7, - data_8, - ): - # pd_op.full: (1xi64) <- () - full_0 = paddle._C_ops.full( - [1], float("0"), paddle.int64, paddle.core.CPUPlace() - ) - - # pd_op.full: (1xi64) <- () - full_1 = paddle._C_ops.full( - [1], float("1"), paddle.int64, paddle.core.CPUPlace() - ) - - # pd_op.arange: (-1xi64) <- (1xi64, xi64, 1xi64) - arange_0 = paddle.arange(full_0, data_1, full_1, dtype="int64") - del data_1 - - # pd_op.cast: (-1xf32) <- (-1xi64) - cast_0 = paddle._C_ops.cast(arange_0, paddle.float32) - del arange_0 - - # pd_op.full: (1xf32) <- () - full_2 = paddle._C_ops.full( - [1], float("1"), paddle.float32, paddle.core.CPUPlace() - ) - - # pd_op.scale: (-1xf32) <- (-1xf32, 1xf32) - scale_0 = paddle._C_ops.scale(cast_0, full_2, float("0.5"), True) - del cast_0 - - # pd_op.full: (1xf32) <- () - full_3 = paddle._C_ops.full( - [1], float("32"), paddle.float32, paddle.core.CPUPlace() - ) - - # pd_op.scale: (-1xf32) <- (-1xf32, 1xf32) - scale_1 = paddle._C_ops.scale(scale_0, full_3, float("0"), True) - del scale_0 - - # pd_op.arange: (-1xi64) <- (1xi64, xi64, 1xi64) - arange_1 = paddle.arange(full_0, data_0, full_1, dtype="int64") - del data_0 - - # pd_op.cast: (-1xf32) <- (-1xi64) - cast_1 = paddle._C_ops.cast(arange_1, paddle.float32) - del arange_1 - - # pd_op.scale: (-1xf32) <- (-1xf32, 1xf32) - scale_2 = paddle._C_ops.scale(cast_1, full_2, float("0.5"), True) - del cast_1 - - # pd_op.scale: (-1xf32) <- (-1xf32, 1xf32) - scale_3 = paddle._C_ops.scale(scale_2, full_3, float("0"), True) - del scale_2 - - # builtin.combine: ([-1xf32, -1xf32]) <- (-1xf32, -1xf32) - combine_0 = [scale_3, scale_1] - del scale_1, scale_3 - - # pd_op.meshgrid: ([-1x-1xf32, -1x-1xf32]) <- ([-1xf32, -1xf32]) - meshgrid_0 = paddle._C_ops.meshgrid(combine_0) - del combine_0 - - # builtin.split: (-1x-1xf32, -1x-1xf32) <- ([-1x-1xf32, -1x-1xf32]) - ( - split_0, - split_1, - ) = meshgrid_0 - del meshgrid_0 - - # pd_op.scale: (-1x-1xf32) <- (-1x-1xf32, 1xf32) - scale_4 = paddle._C_ops.scale(split_1, full_2, float("-80"), True) - - # pd_op.scale: (-1x-1xf32) <- (-1x-1xf32, 1xf32) - scale_5 = paddle._C_ops.scale(split_0, full_2, float("-80"), True) - - # pd_op.scale: (-1x-1xf32) <- (-1x-1xf32, 1xf32) - scale_6 = paddle._C_ops.scale(split_1, full_2, float("80"), True) - - # pd_op.scale: (-1x-1xf32) <- (-1x-1xf32, 1xf32) - scale_7 = paddle._C_ops.scale(split_0, full_2, float("80"), True) - - # builtin.combine: ([-1x-1xf32, -1x-1xf32, -1x-1xf32, -1x-1xf32]) <- (-1x-1xf32, -1x-1xf32, -1x-1xf32, -1x-1xf32) - combine_1 = [scale_4, scale_5, scale_6, scale_7] - del scale_4, scale_5, scale_6, scale_7 - - # pd_op.stack: (-1x-1x4xf32) <- ([-1x-1xf32, -1x-1xf32, -1x-1xf32, -1x-1xf32]) - stack_0 = paddle._C_ops.stack(combine_1, -1) - del combine_1 - - # builtin.combine: ([-1x-1xf32, -1x-1xf32]) <- (-1x-1xf32, -1x-1xf32) - combine_2 = [split_1, split_0] - del split_0, split_1 - - # pd_op.stack: (-1x-1x2xf32) <- ([-1x-1xf32, -1x-1xf32]) - stack_1 = paddle._C_ops.stack(combine_2, -1) - del combine_2 - - # pd_op.full_int_array: (2xi64) <- () - full_int_array_0 = [-1, 4] - - # pd_op.reshape: (-1x4xf32) <- (-1x-1x4xf32, 2xi64) - reshape_0 = paddle._C_ops.reshape(stack_0, full_int_array_0) - del stack_0 - - # pd_op.full_int_array: (2xi64) <- () - full_int_array_1 = [-1, 2] - - # pd_op.reshape: (-1x2xf32) <- (-1x-1x2xf32, 2xi64) - reshape_1 = paddle._C_ops.reshape(stack_1, full_int_array_1) - del stack_1 - - # pd_op.shape64: (2xi64) <- (-1x4xf32) - shape64_0 = paddle._C_ops.shape64(reshape_0) - - # pd_op.full_int_array: (1xi64) <- () - full_int_array_2 = [0] - - # pd_op.full_int_array: (1xi64) <- () - full_int_array_3 = [1] - - # pd_op.slice: (xi64) <- (2xi64, 1xi64, 1xi64) - slice_0 = paddle._C_ops.slice( - shape64_0, [0], full_int_array_2, full_int_array_3, [1], [0] - ) - del shape64_0 - - # pd_op.full: (xi64) <- () - full_4 = paddle._C_ops.full( - [], float("1"), paddle.int64, paddle.core.CPUPlace() - ) - - # builtin.combine: ([xi64, xi64]) <- (xi64, xi64) - combine_3 = [slice_0, full_4] - - # pd_op.stack: (2xi64) <- ([xi64, xi64]) - stack_2 = paddle._C_ops.stack(combine_3, 0) - del combine_3 - - # pd_op.full_with_tensor: (-1x1xf32) <- (1xf32, 2xi64) - full_with_tensor_0 = paddle._C_ops.full_with_tensor( - full_3, stack_2, paddle.float32 - ) - del full_3, stack_2 - - # pd_op.arange: (-1xi64) <- (1xi64, xi64, 1xi64) - arange_2 = paddle.arange(full_0, data_3, full_1, dtype="int64") - del data_3 - - # pd_op.cast: (-1xf32) <- (-1xi64) - cast_2 = paddle._C_ops.cast(arange_2, paddle.float32) - del arange_2 - - # pd_op.scale: (-1xf32) <- (-1xf32, 1xf32) - scale_8 = paddle._C_ops.scale(cast_2, full_2, float("0.5"), True) - del cast_2 - - # pd_op.full: (1xf32) <- () - full_5 = paddle._C_ops.full( - [1], float("16"), paddle.float32, paddle.core.CPUPlace() - ) - - # pd_op.scale: (-1xf32) <- (-1xf32, 1xf32) - scale_9 = paddle._C_ops.scale(scale_8, full_5, float("0"), True) - del scale_8 - - # pd_op.arange: (-1xi64) <- (1xi64, xi64, 1xi64) - arange_3 = paddle.arange(full_0, data_2, full_1, dtype="int64") - del data_2 - - # pd_op.cast: (-1xf32) <- (-1xi64) - cast_3 = paddle._C_ops.cast(arange_3, paddle.float32) - del arange_3 - - # pd_op.scale: (-1xf32) <- (-1xf32, 1xf32) - scale_10 = paddle._C_ops.scale(cast_3, full_2, float("0.5"), True) - del cast_3 - - # pd_op.scale: (-1xf32) <- (-1xf32, 1xf32) - scale_11 = paddle._C_ops.scale(scale_10, full_5, float("0"), True) - del scale_10 - - # builtin.combine: ([-1xf32, -1xf32]) <- (-1xf32, -1xf32) - combine_4 = [scale_11, scale_9] - del scale_11, scale_9 - - # pd_op.meshgrid: ([-1x-1xf32, -1x-1xf32]) <- ([-1xf32, -1xf32]) - meshgrid_1 = paddle._C_ops.meshgrid(combine_4) - del combine_4 - - # builtin.split: (-1x-1xf32, -1x-1xf32) <- ([-1x-1xf32, -1x-1xf32]) - ( - split_2, - split_3, - ) = meshgrid_1 - del meshgrid_1 - - # pd_op.scale: (-1x-1xf32) <- (-1x-1xf32, 1xf32) - scale_12 = paddle._C_ops.scale(split_3, full_2, float("-40"), True) - - # pd_op.scale: (-1x-1xf32) <- (-1x-1xf32, 1xf32) - scale_13 = paddle._C_ops.scale(split_2, full_2, float("-40"), True) - - # pd_op.scale: (-1x-1xf32) <- (-1x-1xf32, 1xf32) - scale_14 = paddle._C_ops.scale(split_3, full_2, float("40"), True) - - # pd_op.scale: (-1x-1xf32) <- (-1x-1xf32, 1xf32) - scale_15 = paddle._C_ops.scale(split_2, full_2, float("40"), True) - - # builtin.combine: ([-1x-1xf32, -1x-1xf32, -1x-1xf32, -1x-1xf32]) <- (-1x-1xf32, -1x-1xf32, -1x-1xf32, -1x-1xf32) - combine_5 = [scale_12, scale_13, scale_14, scale_15] - del scale_12, scale_13, scale_14, scale_15 - - # pd_op.stack: (-1x-1x4xf32) <- ([-1x-1xf32, -1x-1xf32, -1x-1xf32, -1x-1xf32]) - stack_3 = paddle._C_ops.stack(combine_5, -1) - del combine_5 - - # builtin.combine: ([-1x-1xf32, -1x-1xf32]) <- (-1x-1xf32, -1x-1xf32) - combine_6 = [split_3, split_2] - del split_2, split_3 - - # pd_op.stack: (-1x-1x2xf32) <- ([-1x-1xf32, -1x-1xf32]) - stack_4 = paddle._C_ops.stack(combine_6, -1) - del combine_6 - - # pd_op.reshape: (-1x4xf32) <- (-1x-1x4xf32, 2xi64) - reshape_2 = paddle._C_ops.reshape(stack_3, full_int_array_0) - del stack_3 - - # pd_op.reshape: (-1x2xf32) <- (-1x-1x2xf32, 2xi64) - reshape_3 = paddle._C_ops.reshape(stack_4, full_int_array_1) - del stack_4 - - # pd_op.shape64: (2xi64) <- (-1x4xf32) - shape64_1 = paddle._C_ops.shape64(reshape_2) - - # pd_op.slice: (xi64) <- (2xi64, 1xi64, 1xi64) - slice_1 = paddle._C_ops.slice( - shape64_1, [0], full_int_array_2, full_int_array_3, [1], [0] - ) - del shape64_1 - - # builtin.combine: ([xi64, xi64]) <- (xi64, xi64) - combine_7 = [slice_1, full_4] - - # pd_op.stack: (2xi64) <- ([xi64, xi64]) - stack_5 = paddle._C_ops.stack(combine_7, 0) - del combine_7 - - # pd_op.full_with_tensor: (-1x1xf32) <- (1xf32, 2xi64) - full_with_tensor_1 = paddle._C_ops.full_with_tensor( - full_5, stack_5, paddle.float32 - ) - del full_5, stack_5 - - # pd_op.arange: (-1xi64) <- (1xi64, xi64, 1xi64) - arange_4 = paddle.arange(full_0, data_5, full_1, dtype="int64") - del data_5 - - # pd_op.cast: (-1xf32) <- (-1xi64) - cast_4 = paddle._C_ops.cast(arange_4, paddle.float32) - del arange_4 - - # pd_op.scale: (-1xf32) <- (-1xf32, 1xf32) - scale_16 = paddle._C_ops.scale(cast_4, full_2, float("0.5"), True) - del cast_4 - - # pd_op.full: (1xf32) <- () - full_6 = paddle._C_ops.full( - [1], float("8"), paddle.float32, paddle.core.CPUPlace() - ) - - # pd_op.scale: (-1xf32) <- (-1xf32, 1xf32) - scale_17 = paddle._C_ops.scale(scale_16, full_6, float("0"), True) - del scale_16 - - # pd_op.arange: (-1xi64) <- (1xi64, xi64, 1xi64) - arange_5 = paddle.arange(full_0, data_4, full_1, dtype="int64") - del data_4, full_0, full_1 - - # pd_op.cast: (-1xf32) <- (-1xi64) - cast_5 = paddle._C_ops.cast(arange_5, paddle.float32) - del arange_5 - - # pd_op.scale: (-1xf32) <- (-1xf32, 1xf32) - scale_18 = paddle._C_ops.scale(cast_5, full_2, float("0.5"), True) - del cast_5 - - # pd_op.scale: (-1xf32) <- (-1xf32, 1xf32) - scale_19 = paddle._C_ops.scale(scale_18, full_6, float("0"), True) - del scale_18 - - # builtin.combine: ([-1xf32, -1xf32]) <- (-1xf32, -1xf32) - combine_8 = [scale_19, scale_17] - del scale_17, scale_19 - - # pd_op.meshgrid: ([-1x-1xf32, -1x-1xf32]) <- ([-1xf32, -1xf32]) - meshgrid_2 = paddle._C_ops.meshgrid(combine_8) - del combine_8 - - # builtin.split: (-1x-1xf32, -1x-1xf32) <- ([-1x-1xf32, -1x-1xf32]) - ( - split_4, - split_5, - ) = meshgrid_2 - del meshgrid_2 - - # pd_op.scale: (-1x-1xf32) <- (-1x-1xf32, 1xf32) - scale_20 = paddle._C_ops.scale(split_5, full_2, float("-20"), True) - - # pd_op.scale: (-1x-1xf32) <- (-1x-1xf32, 1xf32) - scale_21 = paddle._C_ops.scale(split_4, full_2, float("-20"), True) - - # pd_op.scale: (-1x-1xf32) <- (-1x-1xf32, 1xf32) - scale_22 = paddle._C_ops.scale(split_5, full_2, float("20"), True) - - # pd_op.scale: (-1x-1xf32) <- (-1x-1xf32, 1xf32) - scale_23 = paddle._C_ops.scale(split_4, full_2, float("20"), True) - del full_2 - - # builtin.combine: ([-1x-1xf32, -1x-1xf32, -1x-1xf32, -1x-1xf32]) <- (-1x-1xf32, -1x-1xf32, -1x-1xf32, -1x-1xf32) - combine_9 = [scale_20, scale_21, scale_22, scale_23] - del scale_20, scale_21, scale_22, scale_23 - - # pd_op.stack: (-1x-1x4xf32) <- ([-1x-1xf32, -1x-1xf32, -1x-1xf32, -1x-1xf32]) - stack_6 = paddle._C_ops.stack(combine_9, -1) - del combine_9 - - # builtin.combine: ([-1x-1xf32, -1x-1xf32]) <- (-1x-1xf32, -1x-1xf32) - combine_10 = [split_5, split_4] - del split_4, split_5 - - # pd_op.stack: (-1x-1x2xf32) <- ([-1x-1xf32, -1x-1xf32]) - stack_7 = paddle._C_ops.stack(combine_10, -1) - del combine_10 - - # pd_op.reshape: (-1x4xf32) <- (-1x-1x4xf32, 2xi64) - reshape_4 = paddle._C_ops.reshape(stack_6, full_int_array_0) - del full_int_array_0, stack_6 - - # pd_op.reshape: (-1x2xf32) <- (-1x-1x2xf32, 2xi64) - reshape_5 = paddle._C_ops.reshape(stack_7, full_int_array_1) - del full_int_array_1, stack_7 - - # pd_op.shape64: (2xi64) <- (-1x4xf32) - shape64_2 = paddle._C_ops.shape64(reshape_4) - - # pd_op.slice: (xi64) <- (2xi64, 1xi64, 1xi64) - slice_2 = paddle._C_ops.slice( - shape64_2, [0], full_int_array_2, full_int_array_3, [1], [0] - ) - del full_int_array_2, full_int_array_3, shape64_2 - - # builtin.combine: ([xi64, xi64]) <- (xi64, xi64) - combine_11 = [slice_2, full_4] - del full_4 - - # pd_op.stack: (2xi64) <- ([xi64, xi64]) - stack_8 = paddle._C_ops.stack(combine_11, 0) - del combine_11 - - # pd_op.full_with_tensor: (-1x1xf32) <- (1xf32, 2xi64) - full_with_tensor_2 = paddle._C_ops.full_with_tensor( - full_6, stack_8, paddle.float32 - ) - del full_6, stack_8 - - # pd_op.full: (1xi32) <- () - full_7 = paddle._C_ops.full( - [1], float("0"), paddle.int32, paddle.core.CPUPlace() - ) - - # builtin.combine: ([-1x4xf32, -1x4xf32, -1x4xf32]) <- (-1x4xf32, -1x4xf32, -1x4xf32) - combine_12 = [reshape_0, reshape_2, reshape_4] - del reshape_0, reshape_2, reshape_4 - - # pd_op.concat: (-1x4xf32) <- ([-1x4xf32, -1x4xf32, -1x4xf32], 1xi32) - concat_2 = paddle._C_ops.concat(combine_12, full_7) - del combine_12 - - # builtin.combine: ([-1x2xf32, -1x2xf32, -1x2xf32]) <- (-1x2xf32, -1x2xf32, -1x2xf32) - combine_13 = [reshape_1, reshape_3, reshape_5] - del reshape_1, reshape_3, reshape_5 - - # pd_op.concat: (-1x2xf32) <- ([-1x2xf32, -1x2xf32, -1x2xf32], 1xi32) - concat_3 = paddle._C_ops.concat(combine_13, full_7) - del combine_13 - - # builtin.combine: ([-1x1xf32, -1x1xf32, -1x1xf32]) <- (-1x1xf32, -1x1xf32, -1x1xf32) - combine_14 = [full_with_tensor_0, full_with_tensor_1, full_with_tensor_2] - del full_with_tensor_0, full_with_tensor_1, full_with_tensor_2 - - # pd_op.concat: (-1x1xf32) <- ([-1x1xf32, -1x1xf32, -1x1xf32], 1xi32) - concat_4 = paddle._C_ops.concat(combine_14, full_7) - del combine_14, full_7 - - # pd_op.full_int_array: (2xi64) <- () - full_int_array_4 = [1, 1] - - # pd_op.assign: (2xi64) <- (2xi64) - assign_0 = full_int_array_4 - - # pd_op.assign: (2xi64) <- (2xi64) - assign_1 = full_int_array_4 - - # pd_op.pool2d: (2x384x1x1xf32) <- (2x384x-1x-1xf32, 2xi64) - pool2d_0 = paddle._C_ops.pool2d( - data_6, - full_int_array_4, - [1, 1], - [0, 0], - False, - True, - "NCHW", - "avg", - False, - True, - "EXPLICIT", - ) - - # pd_op.conv2d: (2x384x1x1xf32) <- (2x384x1x1xf32, 384x384x1x1xf32) - conv2d_0 = paddle._C_ops.conv2d( - pool2d_0, parameter_53, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_53 - - # pd_op.full_int_array: (4xi64) <- () - full_int_array_5 = [1, -1, 1, 1] - - # pd_op.reshape: (1x384x1x1xf32) <- (384xf32, 4xi64) - reshape_6 = paddle._C_ops.reshape(parameter_52, full_int_array_5) - del parameter_52 - - # pd_op.add: (2x384x1x1xf32) <- (2x384x1x1xf32, 1x384x1x1xf32) - add_0 = paddle._C_ops.add(conv2d_0, reshape_6) - - # pd_op.sigmoid: (2x384x1x1xf32) <- (2x384x1x1xf32) - sigmoid_0 = paddle._C_ops.sigmoid(add_0) - del add_0 - - # pd_op.multiply: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32, 2x384x1x1xf32) - multiply_0 = paddle._C_ops.multiply(data_6, sigmoid_0) - - # pd_op.conv2d: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32, 384x384x1x1xf32) - conv2d_1 = paddle._C_ops.conv2d( - multiply_0, parameter_51, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_51 - - # pd_op.batch_norm_: (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) - ( - batch_norm__0, - batch_norm__1, - batch_norm__2, - batch_norm__3, - batch_norm__4, - batch_norm__5, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_1, - parameter_50, - parameter_49, - parameter_48, - parameter_47, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_47, parameter_48, parameter_49, parameter_50 - - # pd_op.swish: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32) - swish_0 = paddle._C_ops.swish(batch_norm__0) - - # pd_op.add: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32, 2x384x-1x-1xf32) - add_1 = paddle._C_ops.add(swish_0, data_6) - - # pd_op.conv2d: (2x4x-1x-1xf32) <- (2x384x-1x-1xf32, 4x384x3x3xf32) - conv2d_2 = paddle._C_ops.conv2d( - add_1, parameter_46, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_46 - - # pd_op.reshape: (1x4x1x1xf32) <- (4xf32, 4xi64) - reshape_7 = paddle._C_ops.reshape(parameter_45, full_int_array_5) - del parameter_45 - - # pd_op.add: (2x4x-1x-1xf32) <- (2x4x-1x-1xf32, 1x4x1x1xf32) - add_2 = paddle._C_ops.add(conv2d_2, reshape_7) - - # pd_op.conv2d: (2x384x1x1xf32) <- (2x384x1x1xf32, 384x384x1x1xf32) - conv2d_3 = paddle._C_ops.conv2d( - pool2d_0, parameter_44, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_44 - - # pd_op.reshape: (1x384x1x1xf32) <- (384xf32, 4xi64) - reshape_8 = paddle._C_ops.reshape(parameter_43, full_int_array_5) - del parameter_43 - - # pd_op.add: (2x384x1x1xf32) <- (2x384x1x1xf32, 1x384x1x1xf32) - add_3 = paddle._C_ops.add(conv2d_3, reshape_8) - - # pd_op.sigmoid: (2x384x1x1xf32) <- (2x384x1x1xf32) - sigmoid_1 = paddle._C_ops.sigmoid(add_3) - del add_3 - - # pd_op.multiply: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32, 2x384x1x1xf32) - multiply_1 = paddle._C_ops.multiply(data_6, sigmoid_1) - del data_6 - - # pd_op.conv2d: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32, 384x384x1x1xf32) - conv2d_4 = paddle._C_ops.conv2d( - multiply_1, parameter_42, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_42 - - # pd_op.batch_norm_: (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) - ( - batch_norm__6, - batch_norm__7, - batch_norm__8, - batch_norm__9, - batch_norm__10, - batch_norm__11, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_4, - parameter_41, - parameter_40, - parameter_39, - parameter_38, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_38, parameter_39, parameter_40, parameter_41 - - # pd_op.swish: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32) - swish_1 = paddle._C_ops.swish(batch_norm__6) - - # pd_op.conv2d: (2x68x-1x-1xf32) <- (2x384x-1x-1xf32, 68x384x3x3xf32) - conv2d_5 = paddle._C_ops.conv2d( - swish_1, parameter_37, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_37 - - # pd_op.reshape: (1x68x1x1xf32) <- (68xf32, 4xi64) - reshape_9 = paddle._C_ops.reshape(parameter_36, full_int_array_5) - del parameter_36 - - # pd_op.add: (2x68x-1x-1xf32) <- (2x68x-1x-1xf32, 1x68x1x1xf32) - add_4 = paddle._C_ops.add(conv2d_5, reshape_9) - - # pd_op.sigmoid: (2x4x-1x-1xf32) <- (2x4x-1x-1xf32) - sigmoid_2 = paddle._C_ops.sigmoid(add_2) - del add_2 - - # pd_op.flatten: (2x4x-1xf32) <- (2x4x-1x-1xf32) - flatten_0 = paddle._C_ops.flatten(sigmoid_2, 2, 3) - - # pd_op.transpose: (2x-1x4xf32) <- (2x4x-1xf32) - transpose_0 = paddle._C_ops.transpose(flatten_0, [0, 2, 1]) - del flatten_0 - - # pd_op.flatten: (2x68x-1xf32) <- (2x68x-1x-1xf32) - flatten_1 = paddle._C_ops.flatten(add_4, 2, 3) - - # pd_op.transpose: (2x-1x68xf32) <- (2x68x-1xf32) - transpose_1 = paddle._C_ops.transpose(flatten_1, [0, 2, 1]) - del flatten_1 - - # pd_op.pool2d: (2x192x1x1xf32) <- (2x192x-1x-1xf32, 2xi64) - pool2d_1 = paddle._C_ops.pool2d( - data_7, - full_int_array_4, - [1, 1], - [0, 0], - False, - True, - "NCHW", - "avg", - False, - True, - "EXPLICIT", - ) - - # pd_op.conv2d: (2x192x1x1xf32) <- (2x192x1x1xf32, 192x192x1x1xf32) - conv2d_6 = paddle._C_ops.conv2d( - pool2d_1, parameter_35, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_35 - - # pd_op.reshape: (1x192x1x1xf32) <- (192xf32, 4xi64) - reshape_10 = paddle._C_ops.reshape(parameter_34, full_int_array_5) - del parameter_34 - - # pd_op.add: (2x192x1x1xf32) <- (2x192x1x1xf32, 1x192x1x1xf32) - add_5 = paddle._C_ops.add(conv2d_6, reshape_10) - - # pd_op.sigmoid: (2x192x1x1xf32) <- (2x192x1x1xf32) - sigmoid_3 = paddle._C_ops.sigmoid(add_5) - del add_5 - - # pd_op.multiply: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 2x192x1x1xf32) - multiply_2 = paddle._C_ops.multiply(data_7, sigmoid_3) - - # pd_op.conv2d: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 192x192x1x1xf32) - conv2d_7 = paddle._C_ops.conv2d( - multiply_2, parameter_33, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_33 - - # pd_op.batch_norm_: (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) - ( - batch_norm__12, - batch_norm__13, - batch_norm__14, - batch_norm__15, - batch_norm__16, - batch_norm__17, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_7, - parameter_32, - parameter_31, - parameter_30, - parameter_29, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_29, parameter_30, parameter_31, parameter_32 - - # pd_op.swish: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32) - swish_2 = paddle._C_ops.swish(batch_norm__12) - - # pd_op.add: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 2x192x-1x-1xf32) - add_6 = paddle._C_ops.add(swish_2, data_7) - - # pd_op.conv2d: (2x4x-1x-1xf32) <- (2x192x-1x-1xf32, 4x192x3x3xf32) - conv2d_8 = paddle._C_ops.conv2d( - add_6, parameter_28, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_28 - - # pd_op.reshape: (1x4x1x1xf32) <- (4xf32, 4xi64) - reshape_11 = paddle._C_ops.reshape(parameter_27, full_int_array_5) - del parameter_27 - - # pd_op.add: (2x4x-1x-1xf32) <- (2x4x-1x-1xf32, 1x4x1x1xf32) - add_7 = paddle._C_ops.add(conv2d_8, reshape_11) - - # pd_op.conv2d: (2x192x1x1xf32) <- (2x192x1x1xf32, 192x192x1x1xf32) - conv2d_9 = paddle._C_ops.conv2d( - pool2d_1, parameter_26, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_26 - - # pd_op.reshape: (1x192x1x1xf32) <- (192xf32, 4xi64) - reshape_12 = paddle._C_ops.reshape(parameter_25, full_int_array_5) - del parameter_25 - - # pd_op.add: (2x192x1x1xf32) <- (2x192x1x1xf32, 1x192x1x1xf32) - add_8 = paddle._C_ops.add(conv2d_9, reshape_12) - - # pd_op.sigmoid: (2x192x1x1xf32) <- (2x192x1x1xf32) - sigmoid_4 = paddle._C_ops.sigmoid(add_8) - del add_8 - - # pd_op.multiply: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 2x192x1x1xf32) - multiply_3 = paddle._C_ops.multiply(data_7, sigmoid_4) - del data_7 - - # pd_op.conv2d: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 192x192x1x1xf32) - conv2d_10 = paddle._C_ops.conv2d( - multiply_3, parameter_24, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_24 - - # pd_op.batch_norm_: (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) - ( - batch_norm__18, - batch_norm__19, - batch_norm__20, - batch_norm__21, - batch_norm__22, - batch_norm__23, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_10, - parameter_23, - parameter_22, - parameter_21, - parameter_20, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_20, parameter_21, parameter_22, parameter_23 - - # pd_op.swish: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32) - swish_3 = paddle._C_ops.swish(batch_norm__18) - - # pd_op.conv2d: (2x68x-1x-1xf32) <- (2x192x-1x-1xf32, 68x192x3x3xf32) - conv2d_11 = paddle._C_ops.conv2d( - swish_3, parameter_19, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_19 - - # pd_op.reshape: (1x68x1x1xf32) <- (68xf32, 4xi64) - reshape_13 = paddle._C_ops.reshape(parameter_18, full_int_array_5) - del parameter_18 - - # pd_op.add: (2x68x-1x-1xf32) <- (2x68x-1x-1xf32, 1x68x1x1xf32) - add_9 = paddle._C_ops.add(conv2d_11, reshape_13) - - # pd_op.sigmoid: (2x4x-1x-1xf32) <- (2x4x-1x-1xf32) - sigmoid_5 = paddle._C_ops.sigmoid(add_7) - del add_7 - - # pd_op.flatten: (2x4x-1xf32) <- (2x4x-1x-1xf32) - flatten_2 = paddle._C_ops.flatten(sigmoid_5, 2, 3) - - # pd_op.transpose: (2x-1x4xf32) <- (2x4x-1xf32) - transpose_2 = paddle._C_ops.transpose(flatten_2, [0, 2, 1]) - del flatten_2 - - # pd_op.flatten: (2x68x-1xf32) <- (2x68x-1x-1xf32) - flatten_3 = paddle._C_ops.flatten(add_9, 2, 3) - - # pd_op.transpose: (2x-1x68xf32) <- (2x68x-1xf32) - transpose_3 = paddle._C_ops.transpose(flatten_3, [0, 2, 1]) - del flatten_3 - - # pd_op.pool2d: (2x96x1x1xf32) <- (2x96x-1x-1xf32, 2xi64) - pool2d_2 = paddle._C_ops.pool2d( - data_8, - full_int_array_4, - [1, 1], - [0, 0], - False, - True, - "NCHW", - "avg", - False, - True, - "EXPLICIT", - ) - - # pd_op.conv2d: (2x96x1x1xf32) <- (2x96x1x1xf32, 96x96x1x1xf32) - conv2d_12 = paddle._C_ops.conv2d( - pool2d_2, parameter_17, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_17 - - # pd_op.reshape: (1x96x1x1xf32) <- (96xf32, 4xi64) - reshape_14 = paddle._C_ops.reshape(parameter_16, full_int_array_5) - del parameter_16 - - # pd_op.add: (2x96x1x1xf32) <- (2x96x1x1xf32, 1x96x1x1xf32) - add_10 = paddle._C_ops.add(conv2d_12, reshape_14) - - # pd_op.sigmoid: (2x96x1x1xf32) <- (2x96x1x1xf32) - sigmoid_6 = paddle._C_ops.sigmoid(add_10) - del add_10 - - # pd_op.multiply: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, 2x96x1x1xf32) - multiply_4 = paddle._C_ops.multiply(data_8, sigmoid_6) - - # pd_op.conv2d: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, 96x96x1x1xf32) - conv2d_13 = paddle._C_ops.conv2d( - multiply_4, parameter_15, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_15 - - # pd_op.batch_norm_: (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) - ( - batch_norm__24, - batch_norm__25, - batch_norm__26, - batch_norm__27, - batch_norm__28, - batch_norm__29, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_13, - parameter_14, - parameter_13, - parameter_12, - parameter_11, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_11, parameter_12, parameter_13, parameter_14 - - # pd_op.swish: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32) - swish_4 = paddle._C_ops.swish(batch_norm__24) - - # pd_op.add: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, 2x96x-1x-1xf32) - add_11 = paddle._C_ops.add(swish_4, data_8) - - # pd_op.conv2d: (2x4x-1x-1xf32) <- (2x96x-1x-1xf32, 4x96x3x3xf32) - conv2d_14 = paddle._C_ops.conv2d( - add_11, parameter_10, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_10 - - # pd_op.reshape: (1x4x1x1xf32) <- (4xf32, 4xi64) - reshape_15 = paddle._C_ops.reshape(parameter_9, full_int_array_5) - del parameter_9 - - # pd_op.add: (2x4x-1x-1xf32) <- (2x4x-1x-1xf32, 1x4x1x1xf32) - add_12 = paddle._C_ops.add(conv2d_14, reshape_15) - - # pd_op.conv2d: (2x96x1x1xf32) <- (2x96x1x1xf32, 96x96x1x1xf32) - conv2d_15 = paddle._C_ops.conv2d( - pool2d_2, parameter_8, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_8 - - # pd_op.reshape: (1x96x1x1xf32) <- (96xf32, 4xi64) - reshape_16 = paddle._C_ops.reshape(parameter_7, full_int_array_5) - del parameter_7 - - # pd_op.add: (2x96x1x1xf32) <- (2x96x1x1xf32, 1x96x1x1xf32) - add_13 = paddle._C_ops.add(conv2d_15, reshape_16) - - # pd_op.sigmoid: (2x96x1x1xf32) <- (2x96x1x1xf32) - sigmoid_7 = paddle._C_ops.sigmoid(add_13) - del add_13 - - # pd_op.multiply: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, 2x96x1x1xf32) - multiply_5 = paddle._C_ops.multiply(data_8, sigmoid_7) - del data_8 - - # pd_op.conv2d: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, 96x96x1x1xf32) - conv2d_16 = paddle._C_ops.conv2d( - multiply_5, parameter_6, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_6 - - # pd_op.batch_norm_: (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) - ( - batch_norm__30, - batch_norm__31, - batch_norm__32, - batch_norm__33, - batch_norm__34, - batch_norm__35, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_16, - parameter_5, - parameter_4, - parameter_3, - parameter_2, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_2, parameter_3, parameter_4, parameter_5 - - # pd_op.swish: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32) - swish_5 = paddle._C_ops.swish(batch_norm__30) - - # pd_op.conv2d: (2x68x-1x-1xf32) <- (2x96x-1x-1xf32, 68x96x3x3xf32) - conv2d_17 = paddle._C_ops.conv2d( - swish_5, parameter_1, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_1 - - # pd_op.reshape: (1x68x1x1xf32) <- (68xf32, 4xi64) - reshape_17 = paddle._C_ops.reshape(parameter_0, full_int_array_5) - del full_int_array_5, parameter_0 - - # pd_op.add: (2x68x-1x-1xf32) <- (2x68x-1x-1xf32, 1x68x1x1xf32) - add_14 = paddle._C_ops.add(conv2d_17, reshape_17) - - # pd_op.sigmoid: (2x4x-1x-1xf32) <- (2x4x-1x-1xf32) - sigmoid_8 = paddle._C_ops.sigmoid(add_12) - del add_12 - - # pd_op.flatten: (2x4x-1xf32) <- (2x4x-1x-1xf32) - flatten_4 = paddle._C_ops.flatten(sigmoid_8, 2, 3) - - # pd_op.transpose: (2x-1x4xf32) <- (2x4x-1xf32) - transpose_4 = paddle._C_ops.transpose(flatten_4, [0, 2, 1]) - del flatten_4 - - # pd_op.flatten: (2x68x-1xf32) <- (2x68x-1x-1xf32) - flatten_5 = paddle._C_ops.flatten(add_14, 2, 3) - - # pd_op.transpose: (2x-1x68xf32) <- (2x68x-1xf32) - transpose_5 = paddle._C_ops.transpose(flatten_5, [0, 2, 1]) - del flatten_5 - - # pd_op.full: (1xi32) <- () - full_8 = paddle._C_ops.full( - [1], float("1"), paddle.int32, paddle.core.CPUPlace() - ) - - # pd_op.assign: (1xi32) <- (1xi32) - assign_2 = full_8 - - # builtin.combine: ([2x-1x4xf32, 2x-1x4xf32, 2x-1x4xf32]) <- (2x-1x4xf32, 2x-1x4xf32, 2x-1x4xf32) - combine_15 = [transpose_0, transpose_2, transpose_4] - - # pd_op.concat: (2x-1x4xf32) <- ([2x-1x4xf32, 2x-1x4xf32, 2x-1x4xf32], 1xi32) - concat_0 = paddle._C_ops.concat(combine_15, full_8) - del combine_15 - - # builtin.combine: ([2x-1x68xf32, 2x-1x68xf32, 2x-1x68xf32]) <- (2x-1x68xf32, 2x-1x68xf32, 2x-1x68xf32) - combine_16 = [transpose_1, transpose_3, transpose_5] - - # pd_op.concat: (2x-1x68xf32) <- ([2x-1x68xf32, 2x-1x68xf32, 2x-1x68xf32], 1xi32) - concat_1 = paddle._C_ops.concat(combine_16, full_8) - del ( - add_1, - add_11, - add_14, - add_4, - add_6, - add_9, - assign_0, - assign_1, - assign_2, - batch_norm__0, - batch_norm__1, - batch_norm__10, - batch_norm__11, - batch_norm__12, - batch_norm__13, - batch_norm__14, - batch_norm__15, - batch_norm__16, - batch_norm__17, - batch_norm__18, - batch_norm__19, - batch_norm__2, - batch_norm__20, - batch_norm__21, - batch_norm__22, - batch_norm__23, - batch_norm__24, - batch_norm__25, - batch_norm__26, - batch_norm__27, - batch_norm__28, - batch_norm__29, - batch_norm__3, - batch_norm__30, - batch_norm__31, - batch_norm__32, - batch_norm__33, - batch_norm__34, - batch_norm__35, - batch_norm__4, - batch_norm__5, - batch_norm__6, - batch_norm__7, - batch_norm__8, - batch_norm__9, - combine_16, - conv2d_0, - conv2d_1, - conv2d_10, - conv2d_11, - conv2d_12, - conv2d_13, - conv2d_14, - conv2d_15, - conv2d_16, - conv2d_17, - conv2d_2, - conv2d_3, - conv2d_4, - conv2d_5, - conv2d_6, - conv2d_7, - conv2d_8, - conv2d_9, - full_8, - full_int_array_4, - multiply_0, - multiply_1, - multiply_2, - multiply_3, - multiply_4, - multiply_5, - pool2d_0, - pool2d_1, - pool2d_2, - reshape_10, - reshape_11, - reshape_12, - reshape_13, - reshape_14, - reshape_15, - reshape_16, - reshape_17, - reshape_6, - reshape_7, - reshape_8, - reshape_9, - sigmoid_0, - sigmoid_1, - sigmoid_2, - sigmoid_3, - sigmoid_4, - sigmoid_5, - sigmoid_6, - sigmoid_7, - sigmoid_8, - slice_0, - slice_1, - slice_2, - swish_0, - swish_1, - swish_2, - swish_3, - swish_4, - swish_5, - transpose_0, - transpose_1, - transpose_2, - transpose_3, - transpose_4, - transpose_5, - ) - - return concat_0, concat_1, concat_2, concat_3, concat_4 diff --git a/paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_14/graph_hash.txt b/paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_14/graph_hash.txt deleted file mode 100644 index 82d83ca0b..000000000 --- a/paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_14/graph_hash.txt +++ /dev/null @@ -1 +0,0 @@ -2e1652dec5c10dbfbb766b39e6dd1a2a376f1e03061d76cd66a79bf79d0616f3 \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_14/graph_net.json b/paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_14/graph_net.json deleted file mode 100644 index d8719c2c9..000000000 --- a/paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_14/graph_net.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "framework": "paddle", - "model_name": "PP-YOLOE-S_vehicle", - "num_devices_required": 1, - "num_nodes_required": 1 -} \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_14/input_meta.py b/paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_14/input_meta.py deleted file mode 100644 index 5e383c184..000000000 --- a/paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_14/input_meta.py +++ /dev/null @@ -1,19 +0,0 @@ -class Program_weight_tensor_data_0: - name = "data_0" - shape = [] - dtype = "float32" - data = [0.197761] - - -class Program_weight_tensor_data_1: - name = "data_1" - shape = [] - dtype = "float32" - data = [1.17599] - - -class Program_weight_tensor_data_2: - name = "data_2" - shape = [] - dtype = "float32" - data = [3.1444] diff --git a/paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_14/model.py b/paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_14/model.py deleted file mode 100644 index 4cccb2b8e..000000000 --- a/paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_14/model.py +++ /dev/null @@ -1,43 +0,0 @@ -import paddle - - -class GraphModule(paddle.nn.Layer): - def __init__(self): - super().__init__() - - def forward(self, data_0, data_1, data_2): - # pd_op.full: (1xf32) <- () - full_0 = paddle._C_ops.full( - [1], float("1"), paddle.float32, paddle.core.CPUPlace() - ) - - # pd_op.scale: (xf32) <- (xf32, 1xf32) - scale_0 = paddle._C_ops.scale(data_2, full_0, float("0"), True) - del data_2 - - # pd_op.full: (1xf32) <- () - full_1 = paddle._C_ops.full( - [1], float("2.5"), paddle.float32, paddle.core.CPUPlace() - ) - - # pd_op.scale: (xf32) <- (xf32, 1xf32) - scale_1 = paddle._C_ops.scale(data_0, full_1, float("0"), True) - del data_0 - - # pd_op.add: (xf32) <- (xf32, xf32) - add_1 = paddle._C_ops.add(scale_0, scale_1) - - # pd_op.full: (1xf32) <- () - full_2 = paddle._C_ops.full( - [1], float("0.5"), paddle.float32, paddle.core.CPUPlace() - ) - - # pd_op.scale: (xf32) <- (xf32, 1xf32) - scale_2 = paddle._C_ops.scale(data_1, full_2, float("0"), True) - del data_1 - - # pd_op.add: (xf32) <- (xf32, xf32) - add_0 = paddle._C_ops.add(add_1, scale_2) - del add_1, full_0, full_1, full_2, scale_0, scale_1, scale_2 - - return add_0 diff --git a/paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_15/graph_hash.txt b/paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_15/graph_hash.txt deleted file mode 100644 index 168b53be9..000000000 --- a/paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_15/graph_hash.txt +++ /dev/null @@ -1 +0,0 @@ -519225de24ab7e47b3551471bab23db242df95467d8bc5652b816d75bc2b10ea \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_15/graph_net.json b/paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_15/graph_net.json deleted file mode 100644 index d8719c2c9..000000000 --- a/paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_15/graph_net.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "framework": "paddle", - "model_name": "PP-YOLOE-S_vehicle", - "num_devices_required": 1, - "num_nodes_required": 1 -} \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_15/model.py b/paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_15/model.py deleted file mode 100644 index 3494309b2..000000000 --- a/paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_15/model.py +++ /dev/null @@ -1,1050 +0,0 @@ -import paddle - - -class GraphModule(paddle.nn.Layer): - def __init__(self): - super().__init__() - - def forward( - self, - parameter_0, - parameter_1, - parameter_2, - parameter_3, - parameter_4, - parameter_5, - parameter_6, - parameter_7, - parameter_8, - parameter_9, - parameter_10, - parameter_11, - parameter_12, - parameter_13, - parameter_14, - parameter_15, - parameter_16, - parameter_17, - parameter_18, - parameter_19, - parameter_20, - parameter_21, - parameter_22, - parameter_23, - parameter_24, - parameter_25, - parameter_26, - parameter_27, - parameter_28, - parameter_29, - parameter_30, - parameter_31, - parameter_32, - parameter_33, - parameter_34, - parameter_35, - parameter_36, - parameter_37, - parameter_38, - parameter_39, - parameter_40, - parameter_41, - parameter_42, - parameter_43, - parameter_44, - parameter_45, - parameter_46, - parameter_47, - parameter_48, - parameter_49, - parameter_50, - parameter_51, - parameter_52, - parameter_53, - data_0, - data_1, - data_2, - ): - # pd_op.full: (1xf64) <- () - full_0 = paddle._C_ops.full( - [1], float("0"), paddle.float64, paddle.core.CPUPlace() - ) - - # pd_op.full: (1xf64) <- () - full_1 = paddle._C_ops.full( - [1], float("19"), paddle.float64, paddle.core.CPUPlace() - ) - - # pd_op.full: (1xf64) <- () - full_2 = paddle._C_ops.full( - [1], float("1"), paddle.float64, paddle.core.CPUPlace() - ) - - # pd_op.arange: (19xi64) <- (1xf64, 1xf64, 1xf64) - arange_0 = paddle.arange(full_0, full_1, full_2, dtype="int64") - del full_1 - - # pd_op.cast: (19xf32) <- (19xi64) - cast_0 = paddle._C_ops.cast(arange_0, paddle.float32) - del arange_0 - - # pd_op.full: (1xf32) <- () - full_3 = paddle._C_ops.full( - [1], float("1"), paddle.float32, paddle.core.CPUPlace() - ) - - # pd_op.scale: (19xf32) <- (19xf32, 1xf32) - scale_0 = paddle._C_ops.scale(cast_0, full_3, float("0.5"), True) - del cast_0 - - # pd_op.full: (1xf32) <- () - full_4 = paddle._C_ops.full( - [1], float("32"), paddle.float32, paddle.core.CPUPlace() - ) - - # pd_op.scale: (19xf32) <- (19xf32, 1xf32) - scale_1 = paddle._C_ops.scale(scale_0, full_4, float("0"), True) - del full_4, scale_0 - - # builtin.combine: ([19xf32, 19xf32]) <- (19xf32, 19xf32) - combine_0 = [scale_1, scale_1] - del scale_1 - - # pd_op.meshgrid: ([19x19xf32, 19x19xf32]) <- ([19xf32, 19xf32]) - meshgrid_0 = paddle._C_ops.meshgrid(combine_0) - del combine_0 - - # builtin.split: (19x19xf32, 19x19xf32) <- ([19x19xf32, 19x19xf32]) - ( - split_0, - split_1, - ) = meshgrid_0 - del meshgrid_0 - - # pd_op.scale: (19x19xf32) <- (19x19xf32, 1xf32) - scale_2 = paddle._C_ops.scale(split_1, full_3, float("-80"), True) - - # pd_op.scale: (19x19xf32) <- (19x19xf32, 1xf32) - scale_3 = paddle._C_ops.scale(split_0, full_3, float("-80"), True) - - # pd_op.scale: (19x19xf32) <- (19x19xf32, 1xf32) - scale_4 = paddle._C_ops.scale(split_1, full_3, float("80"), True) - - # pd_op.scale: (19x19xf32) <- (19x19xf32, 1xf32) - scale_5 = paddle._C_ops.scale(split_0, full_3, float("80"), True) - - # builtin.combine: ([19x19xf32, 19x19xf32, 19x19xf32, 19x19xf32]) <- (19x19xf32, 19x19xf32, 19x19xf32, 19x19xf32) - combine_1 = [scale_2, scale_3, scale_4, scale_5] - del scale_2, scale_3, scale_4, scale_5 - - # pd_op.stack: (19x19x4xf32) <- ([19x19xf32, 19x19xf32, 19x19xf32, 19x19xf32]) - stack_0 = paddle._C_ops.stack(combine_1, -1) - del combine_1 - - # builtin.combine: ([19x19xf32, 19x19xf32]) <- (19x19xf32, 19x19xf32) - combine_2 = [split_1, split_0] - del split_0, split_1 - - # pd_op.stack: (19x19x2xf32) <- ([19x19xf32, 19x19xf32]) - stack_1 = paddle._C_ops.stack(combine_2, -1) - del combine_2 - - # pd_op.full_int_array: (2xi64) <- () - full_int_array_0 = [-1, 4] - - # pd_op.reshape: (361x4xf32) <- (19x19x4xf32, 2xi64) - reshape_0 = paddle._C_ops.reshape(stack_0, full_int_array_0) - del stack_0 - - # pd_op.full_int_array: (2xi64) <- () - full_int_array_1 = [-1, 2] - - # pd_op.reshape: (361x2xf32) <- (19x19x2xf32, 2xi64) - reshape_1 = paddle._C_ops.reshape(stack_1, full_int_array_1) - del stack_1 - - # pd_op.full: (361x1xf32) <- () - full_5 = paddle._C_ops.full( - [361, 1], - float("32"), - paddle.float32, - paddle.framework._current_expected_place(), - ) - - # pd_op.full: (1xf64) <- () - full_6 = paddle._C_ops.full( - [1], float("38"), paddle.float64, paddle.core.CPUPlace() - ) - - # pd_op.arange: (38xi64) <- (1xf64, 1xf64, 1xf64) - arange_1 = paddle.arange(full_0, full_6, full_2, dtype="int64") - del full_6 - - # pd_op.cast: (38xf32) <- (38xi64) - cast_1 = paddle._C_ops.cast(arange_1, paddle.float32) - del arange_1 - - # pd_op.scale: (38xf32) <- (38xf32, 1xf32) - scale_6 = paddle._C_ops.scale(cast_1, full_3, float("0.5"), True) - del cast_1 - - # pd_op.full: (1xf32) <- () - full_7 = paddle._C_ops.full( - [1], float("16"), paddle.float32, paddle.core.CPUPlace() - ) - - # pd_op.scale: (38xf32) <- (38xf32, 1xf32) - scale_7 = paddle._C_ops.scale(scale_6, full_7, float("0"), True) - del full_7, scale_6 - - # builtin.combine: ([38xf32, 38xf32]) <- (38xf32, 38xf32) - combine_3 = [scale_7, scale_7] - del scale_7 - - # pd_op.meshgrid: ([38x38xf32, 38x38xf32]) <- ([38xf32, 38xf32]) - meshgrid_1 = paddle._C_ops.meshgrid(combine_3) - del combine_3 - - # builtin.split: (38x38xf32, 38x38xf32) <- ([38x38xf32, 38x38xf32]) - ( - split_2, - split_3, - ) = meshgrid_1 - del meshgrid_1 - - # pd_op.scale: (38x38xf32) <- (38x38xf32, 1xf32) - scale_8 = paddle._C_ops.scale(split_3, full_3, float("-40"), True) - - # pd_op.scale: (38x38xf32) <- (38x38xf32, 1xf32) - scale_9 = paddle._C_ops.scale(split_2, full_3, float("-40"), True) - - # pd_op.scale: (38x38xf32) <- (38x38xf32, 1xf32) - scale_10 = paddle._C_ops.scale(split_3, full_3, float("40"), True) - - # pd_op.scale: (38x38xf32) <- (38x38xf32, 1xf32) - scale_11 = paddle._C_ops.scale(split_2, full_3, float("40"), True) - - # builtin.combine: ([38x38xf32, 38x38xf32, 38x38xf32, 38x38xf32]) <- (38x38xf32, 38x38xf32, 38x38xf32, 38x38xf32) - combine_4 = [scale_8, scale_9, scale_10, scale_11] - del scale_10, scale_11, scale_8, scale_9 - - # pd_op.stack: (38x38x4xf32) <- ([38x38xf32, 38x38xf32, 38x38xf32, 38x38xf32]) - stack_2 = paddle._C_ops.stack(combine_4, -1) - del combine_4 - - # builtin.combine: ([38x38xf32, 38x38xf32]) <- (38x38xf32, 38x38xf32) - combine_5 = [split_3, split_2] - del split_2, split_3 - - # pd_op.stack: (38x38x2xf32) <- ([38x38xf32, 38x38xf32]) - stack_3 = paddle._C_ops.stack(combine_5, -1) - del combine_5 - - # pd_op.reshape: (1444x4xf32) <- (38x38x4xf32, 2xi64) - reshape_2 = paddle._C_ops.reshape(stack_2, full_int_array_0) - del stack_2 - - # pd_op.reshape: (1444x2xf32) <- (38x38x2xf32, 2xi64) - reshape_3 = paddle._C_ops.reshape(stack_3, full_int_array_1) - del stack_3 - - # pd_op.full: (1444x1xf32) <- () - full_8 = paddle._C_ops.full( - [1444, 1], - float("16"), - paddle.float32, - paddle.framework._current_expected_place(), - ) - - # pd_op.full: (1xf64) <- () - full_9 = paddle._C_ops.full( - [1], float("76"), paddle.float64, paddle.core.CPUPlace() - ) - - # pd_op.arange: (76xi64) <- (1xf64, 1xf64, 1xf64) - arange_2 = paddle.arange(full_0, full_9, full_2, dtype="int64") - del full_0, full_2, full_9 - - # pd_op.cast: (76xf32) <- (76xi64) - cast_2 = paddle._C_ops.cast(arange_2, paddle.float32) - del arange_2 - - # pd_op.scale: (76xf32) <- (76xf32, 1xf32) - scale_12 = paddle._C_ops.scale(cast_2, full_3, float("0.5"), True) - del cast_2 - - # pd_op.full: (1xf32) <- () - full_10 = paddle._C_ops.full( - [1], float("8"), paddle.float32, paddle.core.CPUPlace() - ) - - # pd_op.scale: (76xf32) <- (76xf32, 1xf32) - scale_13 = paddle._C_ops.scale(scale_12, full_10, float("0"), True) - del full_10, scale_12 - - # builtin.combine: ([76xf32, 76xf32]) <- (76xf32, 76xf32) - combine_6 = [scale_13, scale_13] - del scale_13 - - # pd_op.meshgrid: ([76x76xf32, 76x76xf32]) <- ([76xf32, 76xf32]) - meshgrid_2 = paddle._C_ops.meshgrid(combine_6) - del combine_6 - - # builtin.split: (76x76xf32, 76x76xf32) <- ([76x76xf32, 76x76xf32]) - ( - split_4, - split_5, - ) = meshgrid_2 - del meshgrid_2 - - # pd_op.scale: (76x76xf32) <- (76x76xf32, 1xf32) - scale_14 = paddle._C_ops.scale(split_5, full_3, float("-20"), True) - - # pd_op.scale: (76x76xf32) <- (76x76xf32, 1xf32) - scale_15 = paddle._C_ops.scale(split_4, full_3, float("-20"), True) - - # pd_op.scale: (76x76xf32) <- (76x76xf32, 1xf32) - scale_16 = paddle._C_ops.scale(split_5, full_3, float("20"), True) - - # pd_op.scale: (76x76xf32) <- (76x76xf32, 1xf32) - scale_17 = paddle._C_ops.scale(split_4, full_3, float("20"), True) - del full_3 - - # builtin.combine: ([76x76xf32, 76x76xf32, 76x76xf32, 76x76xf32]) <- (76x76xf32, 76x76xf32, 76x76xf32, 76x76xf32) - combine_7 = [scale_14, scale_15, scale_16, scale_17] - del scale_14, scale_15, scale_16, scale_17 - - # pd_op.stack: (76x76x4xf32) <- ([76x76xf32, 76x76xf32, 76x76xf32, 76x76xf32]) - stack_4 = paddle._C_ops.stack(combine_7, -1) - del combine_7 - - # builtin.combine: ([76x76xf32, 76x76xf32]) <- (76x76xf32, 76x76xf32) - combine_8 = [split_5, split_4] - del split_4, split_5 - - # pd_op.stack: (76x76x2xf32) <- ([76x76xf32, 76x76xf32]) - stack_5 = paddle._C_ops.stack(combine_8, -1) - del combine_8 - - # pd_op.reshape: (5776x4xf32) <- (76x76x4xf32, 2xi64) - reshape_4 = paddle._C_ops.reshape(stack_4, full_int_array_0) - del full_int_array_0, stack_4 - - # pd_op.reshape: (5776x2xf32) <- (76x76x2xf32, 2xi64) - reshape_5 = paddle._C_ops.reshape(stack_5, full_int_array_1) - del full_int_array_1, stack_5 - - # pd_op.full: (5776x1xf32) <- () - full_11 = paddle._C_ops.full( - [5776, 1], - float("8"), - paddle.float32, - paddle.framework._current_expected_place(), - ) - - # pd_op.full: (1xi32) <- () - full_12 = paddle._C_ops.full( - [1], float("0"), paddle.int32, paddle.core.CPUPlace() - ) - - # builtin.combine: ([361x4xf32, 1444x4xf32, 5776x4xf32]) <- (361x4xf32, 1444x4xf32, 5776x4xf32) - combine_9 = [reshape_0, reshape_2, reshape_4] - - # pd_op.concat: (7581x4xf32) <- ([361x4xf32, 1444x4xf32, 5776x4xf32], 1xi32) - concat_2 = paddle._C_ops.concat(combine_9, full_12) - del combine_9 - - # builtin.combine: ([361x2xf32, 1444x2xf32, 5776x2xf32]) <- (361x2xf32, 1444x2xf32, 5776x2xf32) - combine_10 = [reshape_1, reshape_3, reshape_5] - del reshape_1, reshape_3, reshape_5 - - # pd_op.concat: (7581x2xf32) <- ([361x2xf32, 1444x2xf32, 5776x2xf32], 1xi32) - concat_3 = paddle._C_ops.concat(combine_10, full_12) - del combine_10 - - # builtin.combine: ([361x1xf32, 1444x1xf32, 5776x1xf32]) <- (361x1xf32, 1444x1xf32, 5776x1xf32) - combine_11 = [full_5, full_8, full_11] - del full_11, full_5, full_8 - - # pd_op.concat: (7581x1xf32) <- ([361x1xf32, 1444x1xf32, 5776x1xf32], 1xi32) - concat_4 = paddle._C_ops.concat(combine_11, full_12) - del combine_11, full_12 - - # pd_op.full_int_array: (2xi64) <- () - full_int_array_2 = [1, 1] - - # pd_op.assign: (2xi64) <- (2xi64) - assign_0 = full_int_array_2 - - # pd_op.assign: (2xi64) <- (2xi64) - assign_1 = full_int_array_2 - - # pd_op.pool2d: (2x384x1x1xf32) <- (2x384x19x19xf32, 2xi64) - pool2d_0 = paddle._C_ops.pool2d( - data_0, - full_int_array_2, - [1, 1], - [0, 0], - False, - True, - "NCHW", - "avg", - False, - True, - "EXPLICIT", - ) - - # pd_op.conv2d: (2x384x1x1xf32) <- (2x384x1x1xf32, 384x384x1x1xf32) - conv2d_0 = paddle._C_ops.conv2d( - pool2d_0, parameter_53, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_53 - - # pd_op.full_int_array: (4xi64) <- () - full_int_array_3 = [1, -1, 1, 1] - - # pd_op.reshape: (1x384x1x1xf32) <- (384xf32, 4xi64) - reshape_6 = paddle._C_ops.reshape(parameter_52, full_int_array_3) - del parameter_52 - - # pd_op.add: (2x384x1x1xf32) <- (2x384x1x1xf32, 1x384x1x1xf32) - add_0 = paddle._C_ops.add(conv2d_0, reshape_6) - - # pd_op.sigmoid: (2x384x1x1xf32) <- (2x384x1x1xf32) - sigmoid_0 = paddle._C_ops.sigmoid(add_0) - del add_0 - - # pd_op.multiply: (2x384x19x19xf32) <- (2x384x19x19xf32, 2x384x1x1xf32) - multiply_0 = paddle._C_ops.multiply(data_0, sigmoid_0) - - # pd_op.conv2d: (2x384x19x19xf32) <- (2x384x19x19xf32, 384x384x1x1xf32) - conv2d_1 = paddle._C_ops.conv2d( - multiply_0, parameter_51, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_51 - - # pd_op.batch_norm_: (2x384x19x19xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x19x19xf32, 384xf32, 384xf32, 384xf32, 384xf32) - ( - batch_norm__0, - batch_norm__1, - batch_norm__2, - batch_norm__3, - batch_norm__4, - batch_norm__5, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_1, - parameter_50, - parameter_49, - parameter_48, - parameter_47, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_47, parameter_48, parameter_49, parameter_50 - - # pd_op.swish: (2x384x19x19xf32) <- (2x384x19x19xf32) - swish_0 = paddle._C_ops.swish(batch_norm__0) - - # pd_op.add: (2x384x19x19xf32) <- (2x384x19x19xf32, 2x384x19x19xf32) - add_1 = paddle._C_ops.add(swish_0, data_0) - - # pd_op.conv2d: (2x4x19x19xf32) <- (2x384x19x19xf32, 4x384x3x3xf32) - conv2d_2 = paddle._C_ops.conv2d( - add_1, parameter_46, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_46 - - # pd_op.reshape: (1x4x1x1xf32) <- (4xf32, 4xi64) - reshape_7 = paddle._C_ops.reshape(parameter_45, full_int_array_3) - del parameter_45 - - # pd_op.add: (2x4x19x19xf32) <- (2x4x19x19xf32, 1x4x1x1xf32) - add_2 = paddle._C_ops.add(conv2d_2, reshape_7) - - # pd_op.conv2d: (2x384x1x1xf32) <- (2x384x1x1xf32, 384x384x1x1xf32) - conv2d_3 = paddle._C_ops.conv2d( - pool2d_0, parameter_44, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_44 - - # pd_op.reshape: (1x384x1x1xf32) <- (384xf32, 4xi64) - reshape_8 = paddle._C_ops.reshape(parameter_43, full_int_array_3) - del parameter_43 - - # pd_op.add: (2x384x1x1xf32) <- (2x384x1x1xf32, 1x384x1x1xf32) - add_3 = paddle._C_ops.add(conv2d_3, reshape_8) - - # pd_op.sigmoid: (2x384x1x1xf32) <- (2x384x1x1xf32) - sigmoid_1 = paddle._C_ops.sigmoid(add_3) - del add_3 - - # pd_op.multiply: (2x384x19x19xf32) <- (2x384x19x19xf32, 2x384x1x1xf32) - multiply_1 = paddle._C_ops.multiply(data_0, sigmoid_1) - del data_0 - - # pd_op.conv2d: (2x384x19x19xf32) <- (2x384x19x19xf32, 384x384x1x1xf32) - conv2d_4 = paddle._C_ops.conv2d( - multiply_1, parameter_42, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_42 - - # pd_op.batch_norm_: (2x384x19x19xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x19x19xf32, 384xf32, 384xf32, 384xf32, 384xf32) - ( - batch_norm__6, - batch_norm__7, - batch_norm__8, - batch_norm__9, - batch_norm__10, - batch_norm__11, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_4, - parameter_41, - parameter_40, - parameter_39, - parameter_38, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_38, parameter_39, parameter_40, parameter_41 - - # pd_op.swish: (2x384x19x19xf32) <- (2x384x19x19xf32) - swish_1 = paddle._C_ops.swish(batch_norm__6) - - # pd_op.conv2d: (2x68x19x19xf32) <- (2x384x19x19xf32, 68x384x3x3xf32) - conv2d_5 = paddle._C_ops.conv2d( - swish_1, parameter_37, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_37 - - # pd_op.reshape: (1x68x1x1xf32) <- (68xf32, 4xi64) - reshape_9 = paddle._C_ops.reshape(parameter_36, full_int_array_3) - del parameter_36 - - # pd_op.add: (2x68x19x19xf32) <- (2x68x19x19xf32, 1x68x1x1xf32) - add_4 = paddle._C_ops.add(conv2d_5, reshape_9) - - # pd_op.sigmoid: (2x4x19x19xf32) <- (2x4x19x19xf32) - sigmoid_2 = paddle._C_ops.sigmoid(add_2) - del add_2 - - # pd_op.flatten: (2x4x361xf32) <- (2x4x19x19xf32) - flatten_0 = paddle._C_ops.flatten(sigmoid_2, 2, 3) - - # pd_op.transpose: (2x361x4xf32) <- (2x4x361xf32) - transpose_0 = paddle._C_ops.transpose(flatten_0, [0, 2, 1]) - del flatten_0 - - # pd_op.flatten: (2x68x361xf32) <- (2x68x19x19xf32) - flatten_1 = paddle._C_ops.flatten(add_4, 2, 3) - - # pd_op.transpose: (2x361x68xf32) <- (2x68x361xf32) - transpose_1 = paddle._C_ops.transpose(flatten_1, [0, 2, 1]) - del flatten_1 - - # pd_op.pool2d: (2x192x1x1xf32) <- (2x192x38x38xf32, 2xi64) - pool2d_1 = paddle._C_ops.pool2d( - data_1, - full_int_array_2, - [1, 1], - [0, 0], - False, - True, - "NCHW", - "avg", - False, - True, - "EXPLICIT", - ) - - # pd_op.conv2d: (2x192x1x1xf32) <- (2x192x1x1xf32, 192x192x1x1xf32) - conv2d_6 = paddle._C_ops.conv2d( - pool2d_1, parameter_35, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_35 - - # pd_op.reshape: (1x192x1x1xf32) <- (192xf32, 4xi64) - reshape_10 = paddle._C_ops.reshape(parameter_34, full_int_array_3) - del parameter_34 - - # pd_op.add: (2x192x1x1xf32) <- (2x192x1x1xf32, 1x192x1x1xf32) - add_5 = paddle._C_ops.add(conv2d_6, reshape_10) - - # pd_op.sigmoid: (2x192x1x1xf32) <- (2x192x1x1xf32) - sigmoid_3 = paddle._C_ops.sigmoid(add_5) - del add_5 - - # pd_op.multiply: (2x192x38x38xf32) <- (2x192x38x38xf32, 2x192x1x1xf32) - multiply_2 = paddle._C_ops.multiply(data_1, sigmoid_3) - - # pd_op.conv2d: (2x192x38x38xf32) <- (2x192x38x38xf32, 192x192x1x1xf32) - conv2d_7 = paddle._C_ops.conv2d( - multiply_2, parameter_33, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_33 - - # pd_op.batch_norm_: (2x192x38x38xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x38x38xf32, 192xf32, 192xf32, 192xf32, 192xf32) - ( - batch_norm__12, - batch_norm__13, - batch_norm__14, - batch_norm__15, - batch_norm__16, - batch_norm__17, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_7, - parameter_32, - parameter_31, - parameter_30, - parameter_29, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_29, parameter_30, parameter_31, parameter_32 - - # pd_op.swish: (2x192x38x38xf32) <- (2x192x38x38xf32) - swish_2 = paddle._C_ops.swish(batch_norm__12) - - # pd_op.add: (2x192x38x38xf32) <- (2x192x38x38xf32, 2x192x38x38xf32) - add_6 = paddle._C_ops.add(swish_2, data_1) - - # pd_op.conv2d: (2x4x38x38xf32) <- (2x192x38x38xf32, 4x192x3x3xf32) - conv2d_8 = paddle._C_ops.conv2d( - add_6, parameter_28, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_28 - - # pd_op.reshape: (1x4x1x1xf32) <- (4xf32, 4xi64) - reshape_11 = paddle._C_ops.reshape(parameter_27, full_int_array_3) - del parameter_27 - - # pd_op.add: (2x4x38x38xf32) <- (2x4x38x38xf32, 1x4x1x1xf32) - add_7 = paddle._C_ops.add(conv2d_8, reshape_11) - - # pd_op.conv2d: (2x192x1x1xf32) <- (2x192x1x1xf32, 192x192x1x1xf32) - conv2d_9 = paddle._C_ops.conv2d( - pool2d_1, parameter_26, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_26 - - # pd_op.reshape: (1x192x1x1xf32) <- (192xf32, 4xi64) - reshape_12 = paddle._C_ops.reshape(parameter_25, full_int_array_3) - del parameter_25 - - # pd_op.add: (2x192x1x1xf32) <- (2x192x1x1xf32, 1x192x1x1xf32) - add_8 = paddle._C_ops.add(conv2d_9, reshape_12) - - # pd_op.sigmoid: (2x192x1x1xf32) <- (2x192x1x1xf32) - sigmoid_4 = paddle._C_ops.sigmoid(add_8) - del add_8 - - # pd_op.multiply: (2x192x38x38xf32) <- (2x192x38x38xf32, 2x192x1x1xf32) - multiply_3 = paddle._C_ops.multiply(data_1, sigmoid_4) - del data_1 - - # pd_op.conv2d: (2x192x38x38xf32) <- (2x192x38x38xf32, 192x192x1x1xf32) - conv2d_10 = paddle._C_ops.conv2d( - multiply_3, parameter_24, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_24 - - # pd_op.batch_norm_: (2x192x38x38xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x38x38xf32, 192xf32, 192xf32, 192xf32, 192xf32) - ( - batch_norm__18, - batch_norm__19, - batch_norm__20, - batch_norm__21, - batch_norm__22, - batch_norm__23, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_10, - parameter_23, - parameter_22, - parameter_21, - parameter_20, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_20, parameter_21, parameter_22, parameter_23 - - # pd_op.swish: (2x192x38x38xf32) <- (2x192x38x38xf32) - swish_3 = paddle._C_ops.swish(batch_norm__18) - - # pd_op.conv2d: (2x68x38x38xf32) <- (2x192x38x38xf32, 68x192x3x3xf32) - conv2d_11 = paddle._C_ops.conv2d( - swish_3, parameter_19, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_19 - - # pd_op.reshape: (1x68x1x1xf32) <- (68xf32, 4xi64) - reshape_13 = paddle._C_ops.reshape(parameter_18, full_int_array_3) - del parameter_18 - - # pd_op.add: (2x68x38x38xf32) <- (2x68x38x38xf32, 1x68x1x1xf32) - add_9 = paddle._C_ops.add(conv2d_11, reshape_13) - - # pd_op.sigmoid: (2x4x38x38xf32) <- (2x4x38x38xf32) - sigmoid_5 = paddle._C_ops.sigmoid(add_7) - del add_7 - - # pd_op.flatten: (2x4x1444xf32) <- (2x4x38x38xf32) - flatten_2 = paddle._C_ops.flatten(sigmoid_5, 2, 3) - - # pd_op.transpose: (2x1444x4xf32) <- (2x4x1444xf32) - transpose_2 = paddle._C_ops.transpose(flatten_2, [0, 2, 1]) - del flatten_2 - - # pd_op.flatten: (2x68x1444xf32) <- (2x68x38x38xf32) - flatten_3 = paddle._C_ops.flatten(add_9, 2, 3) - - # pd_op.transpose: (2x1444x68xf32) <- (2x68x1444xf32) - transpose_3 = paddle._C_ops.transpose(flatten_3, [0, 2, 1]) - del flatten_3 - - # pd_op.pool2d: (2x96x1x1xf32) <- (2x96x76x76xf32, 2xi64) - pool2d_2 = paddle._C_ops.pool2d( - data_2, - full_int_array_2, - [1, 1], - [0, 0], - False, - True, - "NCHW", - "avg", - False, - True, - "EXPLICIT", - ) - - # pd_op.conv2d: (2x96x1x1xf32) <- (2x96x1x1xf32, 96x96x1x1xf32) - conv2d_12 = paddle._C_ops.conv2d( - pool2d_2, parameter_17, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_17 - - # pd_op.reshape: (1x96x1x1xf32) <- (96xf32, 4xi64) - reshape_14 = paddle._C_ops.reshape(parameter_16, full_int_array_3) - del parameter_16 - - # pd_op.add: (2x96x1x1xf32) <- (2x96x1x1xf32, 1x96x1x1xf32) - add_10 = paddle._C_ops.add(conv2d_12, reshape_14) - - # pd_op.sigmoid: (2x96x1x1xf32) <- (2x96x1x1xf32) - sigmoid_6 = paddle._C_ops.sigmoid(add_10) - del add_10 - - # pd_op.multiply: (2x96x76x76xf32) <- (2x96x76x76xf32, 2x96x1x1xf32) - multiply_4 = paddle._C_ops.multiply(data_2, sigmoid_6) - - # pd_op.conv2d: (2x96x76x76xf32) <- (2x96x76x76xf32, 96x96x1x1xf32) - conv2d_13 = paddle._C_ops.conv2d( - multiply_4, parameter_15, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_15 - - # pd_op.batch_norm_: (2x96x76x76xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x76x76xf32, 96xf32, 96xf32, 96xf32, 96xf32) - ( - batch_norm__24, - batch_norm__25, - batch_norm__26, - batch_norm__27, - batch_norm__28, - batch_norm__29, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_13, - parameter_14, - parameter_13, - parameter_12, - parameter_11, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_11, parameter_12, parameter_13, parameter_14 - - # pd_op.swish: (2x96x76x76xf32) <- (2x96x76x76xf32) - swish_4 = paddle._C_ops.swish(batch_norm__24) - - # pd_op.add: (2x96x76x76xf32) <- (2x96x76x76xf32, 2x96x76x76xf32) - add_11 = paddle._C_ops.add(swish_4, data_2) - - # pd_op.conv2d: (2x4x76x76xf32) <- (2x96x76x76xf32, 4x96x3x3xf32) - conv2d_14 = paddle._C_ops.conv2d( - add_11, parameter_10, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_10 - - # pd_op.reshape: (1x4x1x1xf32) <- (4xf32, 4xi64) - reshape_15 = paddle._C_ops.reshape(parameter_9, full_int_array_3) - del parameter_9 - - # pd_op.add: (2x4x76x76xf32) <- (2x4x76x76xf32, 1x4x1x1xf32) - add_12 = paddle._C_ops.add(conv2d_14, reshape_15) - - # pd_op.conv2d: (2x96x1x1xf32) <- (2x96x1x1xf32, 96x96x1x1xf32) - conv2d_15 = paddle._C_ops.conv2d( - pool2d_2, parameter_8, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_8 - - # pd_op.reshape: (1x96x1x1xf32) <- (96xf32, 4xi64) - reshape_16 = paddle._C_ops.reshape(parameter_7, full_int_array_3) - del parameter_7 - - # pd_op.add: (2x96x1x1xf32) <- (2x96x1x1xf32, 1x96x1x1xf32) - add_13 = paddle._C_ops.add(conv2d_15, reshape_16) - - # pd_op.sigmoid: (2x96x1x1xf32) <- (2x96x1x1xf32) - sigmoid_7 = paddle._C_ops.sigmoid(add_13) - del add_13 - - # pd_op.multiply: (2x96x76x76xf32) <- (2x96x76x76xf32, 2x96x1x1xf32) - multiply_5 = paddle._C_ops.multiply(data_2, sigmoid_7) - del data_2 - - # pd_op.conv2d: (2x96x76x76xf32) <- (2x96x76x76xf32, 96x96x1x1xf32) - conv2d_16 = paddle._C_ops.conv2d( - multiply_5, parameter_6, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_6 - - # pd_op.batch_norm_: (2x96x76x76xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x76x76xf32, 96xf32, 96xf32, 96xf32, 96xf32) - ( - batch_norm__30, - batch_norm__31, - batch_norm__32, - batch_norm__33, - batch_norm__34, - batch_norm__35, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_16, - parameter_5, - parameter_4, - parameter_3, - parameter_2, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_2, parameter_3, parameter_4, parameter_5 - - # pd_op.swish: (2x96x76x76xf32) <- (2x96x76x76xf32) - swish_5 = paddle._C_ops.swish(batch_norm__30) - - # pd_op.conv2d: (2x68x76x76xf32) <- (2x96x76x76xf32, 68x96x3x3xf32) - conv2d_17 = paddle._C_ops.conv2d( - swish_5, parameter_1, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_1 - - # pd_op.reshape: (1x68x1x1xf32) <- (68xf32, 4xi64) - reshape_17 = paddle._C_ops.reshape(parameter_0, full_int_array_3) - del full_int_array_3, parameter_0 - - # pd_op.add: (2x68x76x76xf32) <- (2x68x76x76xf32, 1x68x1x1xf32) - add_14 = paddle._C_ops.add(conv2d_17, reshape_17) - - # pd_op.sigmoid: (2x4x76x76xf32) <- (2x4x76x76xf32) - sigmoid_8 = paddle._C_ops.sigmoid(add_12) - del add_12 - - # pd_op.flatten: (2x4x5776xf32) <- (2x4x76x76xf32) - flatten_4 = paddle._C_ops.flatten(sigmoid_8, 2, 3) - - # pd_op.transpose: (2x5776x4xf32) <- (2x4x5776xf32) - transpose_4 = paddle._C_ops.transpose(flatten_4, [0, 2, 1]) - del flatten_4 - - # pd_op.flatten: (2x68x5776xf32) <- (2x68x76x76xf32) - flatten_5 = paddle._C_ops.flatten(add_14, 2, 3) - - # pd_op.transpose: (2x5776x68xf32) <- (2x68x5776xf32) - transpose_5 = paddle._C_ops.transpose(flatten_5, [0, 2, 1]) - del flatten_5 - - # pd_op.full: (1xi32) <- () - full_13 = paddle._C_ops.full( - [1], float("1"), paddle.int32, paddle.core.CPUPlace() - ) - - # pd_op.assign: (1xi32) <- (1xi32) - assign_2 = full_13 - - # builtin.combine: ([2x361x4xf32, 2x1444x4xf32, 2x5776x4xf32]) <- (2x361x4xf32, 2x1444x4xf32, 2x5776x4xf32) - combine_12 = [transpose_0, transpose_2, transpose_4] - - # pd_op.concat: (2x7581x4xf32) <- ([2x361x4xf32, 2x1444x4xf32, 2x5776x4xf32], 1xi32) - concat_0 = paddle._C_ops.concat(combine_12, full_13) - del combine_12 - - # builtin.combine: ([2x361x68xf32, 2x1444x68xf32, 2x5776x68xf32]) <- (2x361x68xf32, 2x1444x68xf32, 2x5776x68xf32) - combine_13 = [transpose_1, transpose_3, transpose_5] - - # pd_op.concat: (2x7581x68xf32) <- ([2x361x68xf32, 2x1444x68xf32, 2x5776x68xf32], 1xi32) - concat_1 = paddle._C_ops.concat(combine_13, full_13) - del ( - add_1, - add_11, - add_14, - add_4, - add_6, - add_9, - assign_0, - assign_1, - assign_2, - batch_norm__0, - batch_norm__1, - batch_norm__10, - batch_norm__11, - batch_norm__12, - batch_norm__13, - batch_norm__14, - batch_norm__15, - batch_norm__16, - batch_norm__17, - batch_norm__18, - batch_norm__19, - batch_norm__2, - batch_norm__20, - batch_norm__21, - batch_norm__22, - batch_norm__23, - batch_norm__24, - batch_norm__25, - batch_norm__26, - batch_norm__27, - batch_norm__28, - batch_norm__29, - batch_norm__3, - batch_norm__30, - batch_norm__31, - batch_norm__32, - batch_norm__33, - batch_norm__34, - batch_norm__35, - batch_norm__4, - batch_norm__5, - batch_norm__6, - batch_norm__7, - batch_norm__8, - batch_norm__9, - combine_13, - conv2d_0, - conv2d_1, - conv2d_10, - conv2d_11, - conv2d_12, - conv2d_13, - conv2d_14, - conv2d_15, - conv2d_16, - conv2d_17, - conv2d_2, - conv2d_3, - conv2d_4, - conv2d_5, - conv2d_6, - conv2d_7, - conv2d_8, - conv2d_9, - full_13, - full_int_array_2, - multiply_0, - multiply_1, - multiply_2, - multiply_3, - multiply_4, - multiply_5, - pool2d_0, - pool2d_1, - pool2d_2, - reshape_0, - reshape_10, - reshape_11, - reshape_12, - reshape_13, - reshape_14, - reshape_15, - reshape_16, - reshape_17, - reshape_2, - reshape_4, - reshape_6, - reshape_7, - reshape_8, - reshape_9, - sigmoid_0, - sigmoid_1, - sigmoid_2, - sigmoid_3, - sigmoid_4, - sigmoid_5, - sigmoid_6, - sigmoid_7, - sigmoid_8, - swish_0, - swish_1, - swish_2, - swish_3, - swish_4, - swish_5, - transpose_0, - transpose_1, - transpose_2, - transpose_3, - transpose_4, - transpose_5, - ) - - return concat_0, concat_1, concat_2, concat_3, concat_4 diff --git a/paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_16/graph_hash.txt b/paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_16/graph_hash.txt deleted file mode 100644 index 6ac9d23ff..000000000 --- a/paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_16/graph_hash.txt +++ /dev/null @@ -1 +0,0 @@ -4386995a6a00133c7db276591465ce4ec1f82a0512fc13aeb9bb4745fafa593c \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_16/graph_net.json b/paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_16/graph_net.json deleted file mode 100644 index d8719c2c9..000000000 --- a/paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_16/graph_net.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "framework": "paddle", - "model_name": "PP-YOLOE-S_vehicle", - "num_devices_required": 1, - "num_nodes_required": 1 -} \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_16/input_meta.py b/paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_16/input_meta.py deleted file mode 100644 index 2525ffce1..000000000 --- a/paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_16/input_meta.py +++ /dev/null @@ -1,52 +0,0 @@ -class Program_weight_tensor_data_0: - name = "data_0" - shape = [2, 7581, 4] - dtype = "float32" - min_val = float("0.01") - max_val = float("0.01") - mean = float("0.01") - std = float("2.79397e-09") - data = None - - -class Program_weight_tensor_data_1: - name = "data_1" - shape = [2, 7581, 4] - dtype = "float32" - min_val = float("-231.068") - max_val = float("849.397") - mean = float("303.519") - std = float("185.947") - data = None - - -class Program_weight_tensor_data_2: - name = "data_2" - shape = [7581, 2] - dtype = "float32" - min_val = float("4.0") - max_val = float("604.0") - mean = float("304.0") - std = float("175.48") - data = None - - -class Program_weight_tensor_data_3: - name = "data_3" - shape = [2, 1, 1] - dtype = "int32" - data = [0, 3] - - -class Program_weight_tensor_data_4: - name = "data_4" - shape = [2, 1, 4] - dtype = "float32" - data = [376.443, 61.9806, 517.447, 398.447, 562.465, 468.683, 608.0, 608.0] - - -class Program_weight_tensor_data_5: - name = "data_5" - shape = [2, 1, 1] - dtype = "float32" - data = [1.0, 1.0] diff --git a/paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_16/model.py b/paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_16/model.py deleted file mode 100644 index 950175fb3..000000000 --- a/paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_16/model.py +++ /dev/null @@ -1,338 +0,0 @@ -import paddle - - -class GraphModule(paddle.nn.Layer): - def __init__(self): - super().__init__() - - def forward(self, data_0, data_1, data_2, data_3, data_4, data_5): - # pd_op.full_int_array: (1xi64) <- () - full_int_array_0 = [2] - - # pd_op.unsqueeze: (2x1x1x4xf32) <- (2x1x4xf32, 1xi64) - unsqueeze_0 = paddle._C_ops.unsqueeze(data_4, full_int_array_0) - del data_4 - - # pd_op.full_int_array: (1xi64) <- () - full_int_array_1 = [1] - - # pd_op.unsqueeze: (2x1x7581x4xf32) <- (2x7581x4xf32, 1xi64) - unsqueeze_1 = paddle._C_ops.unsqueeze(data_1, full_int_array_1) - del data_1, full_int_array_1 - - # pd_op.full_int_array: (1xi64) <- () - full_int_array_2 = [0] - - # pd_op.slice: (2x1x1x2xf32) <- (2x1x1x4xf32, 1xi64, 1xi64) - slice_0 = paddle._C_ops.slice( - unsqueeze_0, [3], full_int_array_2, full_int_array_0, [1], [] - ) - - # pd_op.full_int_array: (1xi64) <- () - full_int_array_3 = [2147483647] - - # pd_op.slice: (2x1x1x2xf32) <- (2x1x1x4xf32, 1xi64, 1xi64) - slice_1 = paddle._C_ops.slice( - unsqueeze_0, [3], full_int_array_0, full_int_array_3, [1], [] - ) - - # pd_op.slice: (2x1x7581x2xf32) <- (2x1x7581x4xf32, 1xi64, 1xi64) - slice_2 = paddle._C_ops.slice( - unsqueeze_1, [3], full_int_array_2, full_int_array_0, [1], [] - ) - del full_int_array_2 - - # pd_op.slice: (2x1x7581x2xf32) <- (2x1x7581x4xf32, 1xi64, 1xi64) - slice_3 = paddle._C_ops.slice( - unsqueeze_1, [3], full_int_array_0, full_int_array_3, [1], [] - ) - del full_int_array_0, full_int_array_3, unsqueeze_1 - - # pd_op.maximum: (2x1x7581x2xf32) <- (2x1x1x2xf32, 2x1x7581x2xf32) - maximum_0 = paddle._C_ops.maximum(slice_0, slice_2) - - # pd_op.minimum: (2x1x7581x2xf32) <- (2x1x1x2xf32, 2x1x7581x2xf32) - minimum_0 = paddle._C_ops.minimum(slice_1, slice_3) - - # pd_op.subtract: (2x1x7581x2xf32) <- (2x1x7581x2xf32, 2x1x7581x2xf32) - subtract_0 = paddle._C_ops.subtract(minimum_0, maximum_0) - del maximum_0, minimum_0 - - # pd_op.full: (1xf32) <- () - full_0 = paddle._C_ops.full( - [1], float("0"), paddle.float32, paddle.core.CPUPlace() - ) - - # pd_op.full: (1xf32) <- () - full_1 = paddle._C_ops.full( - [1], float("3.40282e+38"), paddle.float32, paddle.core.CPUPlace() - ) - - # pd_op.clip: (2x1x7581x2xf32) <- (2x1x7581x2xf32, 1xf32, 1xf32) - clip_0 = paddle._C_ops.clip(subtract_0, full_0, full_1) - del subtract_0 - - # pd_op.full_int_array: (1xi64) <- () - full_int_array_4 = [-1] - - # pd_op.prod: (2x1x7581xf32) <- (2x1x7581x2xf32, 1xi64) - prod_0 = paddle._C_ops.prod(clip_0, full_int_array_4, False, False) - del clip_0 - - # pd_op.subtract: (2x1x1x2xf32) <- (2x1x1x2xf32, 2x1x1x2xf32) - subtract_1 = paddle._C_ops.subtract(slice_1, slice_0) - del slice_0, slice_1 - - # pd_op.clip: (2x1x1x2xf32) <- (2x1x1x2xf32, 1xf32, 1xf32) - clip_1 = paddle._C_ops.clip(subtract_1, full_0, full_1) - del subtract_1 - - # pd_op.prod: (2x1x1xf32) <- (2x1x1x2xf32, 1xi64) - prod_1 = paddle._C_ops.prod(clip_1, full_int_array_4, False, False) - del clip_1 - - # pd_op.subtract: (2x1x7581x2xf32) <- (2x1x7581x2xf32, 2x1x7581x2xf32) - subtract_2 = paddle._C_ops.subtract(slice_3, slice_2) - del slice_2, slice_3 - - # pd_op.clip: (2x1x7581x2xf32) <- (2x1x7581x2xf32, 1xf32, 1xf32) - clip_2 = paddle._C_ops.clip(subtract_2, full_0, full_1) - del full_0, full_1, subtract_2 - - # pd_op.prod: (2x1x7581xf32) <- (2x1x7581x2xf32, 1xi64) - prod_2 = paddle._C_ops.prod(clip_2, full_int_array_4, False, False) - del clip_2 - - # pd_op.add: (2x1x7581xf32) <- (2x1x1xf32, 2x1x7581xf32) - add_0 = paddle._C_ops.add(prod_1, prod_2) - del prod_1, prod_2 - - # pd_op.subtract: (2x1x7581xf32) <- (2x1x7581xf32, 2x1x7581xf32) - subtract_3 = paddle._C_ops.subtract(add_0, prod_0) - del add_0 - - # pd_op.full: (1xf32) <- () - full_2 = paddle._C_ops.full( - [1], float("1"), paddle.float32, paddle.core.CPUPlace() - ) - - # pd_op.scale: (2x1x7581xf32) <- (2x1x7581xf32, 1xf32) - scale_0 = paddle._C_ops.scale(subtract_3, full_2, float("1e-09"), True) - del full_2, subtract_3 - - # pd_op.divide: (2x1x7581xf32) <- (2x1x7581xf32, 2x1x7581xf32) - divide_0 = paddle._C_ops.divide(prod_0, scale_0) - del prod_0, scale_0 - - # pd_op.transpose: (2x4x7581xf32) <- (2x7581x4xf32) - transpose_0 = paddle._C_ops.transpose(data_0, [0, 2, 1]) - del data_0 - - # pd_op.full: (1xf64) <- () - full_3 = paddle._C_ops.full( - [1], float("0"), paddle.float64, paddle.core.CPUPlace() - ) - - # pd_op.full: (1xf64) <- () - full_4 = paddle._C_ops.full( - [1], float("2"), paddle.float64, paddle.core.CPUPlace() - ) - - # pd_op.full: (1xf64) <- () - full_5 = paddle._C_ops.full( - [1], float("1"), paddle.float64, paddle.core.CPUPlace() - ) - - # pd_op.arange: (2xi32) <- (1xf64, 1xf64, 1xf64) - arange_0 = paddle.arange(full_3, full_4, full_5, dtype="int32") - del full_3, full_4, full_5 - - # pd_op.unsqueeze: (2x1xi32) <- (2xi32, 1xi64) - unsqueeze_2 = paddle._C_ops.unsqueeze(arange_0, full_int_array_4) - del arange_0 - - # pd_op.full_int_array: (2xi64) <- () - full_int_array_5 = [1, 1] - - # pd_op.tile: (2x1xi32) <- (2x1xi32, 2xi64) - tile_0 = paddle._C_ops.tile(unsqueeze_2, full_int_array_5) - del full_int_array_5 - - # pd_op.squeeze: (2x1xi32) <- (2x1x1xi32, 1xi64) - squeeze_0 = paddle._C_ops.squeeze(data_3, full_int_array_4) - del data_3 - - # builtin.combine: ([2x1xi32, 2x1xi32]) <- (2x1xi32, 2x1xi32) - combine_0 = [tile_0, squeeze_0] - del squeeze_0, tile_0 - - # pd_op.stack: (2x1x2xi32) <- ([2x1xi32, 2x1xi32]) - stack_0 = paddle._C_ops.stack(combine_0, -1) - del combine_0 - - # pd_op.gather_nd: (2x1x7581xf32) <- (2x4x7581xf32, 2x1x2xi32) - gather_nd_0 = paddle._C_ops.gather_nd(transpose_0, stack_0) - del stack_0, transpose_0 - - # pd_op.pow: (2x1x7581xf32) <- (2x1x7581xf32) - pow_0 = paddle._C_ops.pow(gather_nd_0, float("1")) - del gather_nd_0 - - # pd_op.pow: (2x1x7581xf32) <- (2x1x7581xf32) - pow_1 = paddle._C_ops.pow(divide_0, float("6")) - - # pd_op.multiply: (2x1x7581xf32) <- (2x1x7581xf32, 2x1x7581xf32) - multiply_0 = paddle._C_ops.multiply(pow_0, pow_1) - del pow_0, pow_1 - - # pd_op.full_int_array: (2xi64) <- () - full_int_array_6 = [0, 1] - - # pd_op.unsqueeze: (1x1x7581x2xf32) <- (7581x2xf32, 2xi64) - unsqueeze_3 = paddle._C_ops.unsqueeze(data_2, full_int_array_6) - del data_2, full_int_array_6 - - # pd_op.full: (1xi32) <- () - full_6 = paddle._C_ops.full( - [1], float("3"), paddle.int32, paddle.core.CPUPlace() - ) - - # pd_op.split_with_num: ([1x1x7581x1xf32, 1x1x7581x1xf32]) <- (1x1x7581x2xf32, 1xi32) - split_with_num_0 = paddle._C_ops.split_with_num(unsqueeze_3, 2, full_6) - del unsqueeze_3 - - # builtin.split: (1x1x7581x1xf32, 1x1x7581x1xf32) <- ([1x1x7581x1xf32, 1x1x7581x1xf32]) - ( - split_0, - split_1, - ) = split_with_num_0 - del split_with_num_0 - - # pd_op.split_with_num: ([2x1x1x1xf32, 2x1x1x1xf32, 2x1x1x1xf32, 2x1x1x1xf32]) <- (2x1x1x4xf32, 1xi32) - split_with_num_1 = paddle._C_ops.split_with_num(unsqueeze_0, 4, full_6) - del full_6, unsqueeze_0 - - # builtin.split: (2x1x1x1xf32, 2x1x1x1xf32, 2x1x1x1xf32, 2x1x1x1xf32) <- ([2x1x1x1xf32, 2x1x1x1xf32, 2x1x1x1xf32, 2x1x1x1xf32]) - ( - split_2, - split_3, - split_4, - split_5, - ) = split_with_num_1 - del split_with_num_1 - - # pd_op.subtract: (2x1x7581x1xf32) <- (1x1x7581x1xf32, 2x1x1x1xf32) - subtract_4 = paddle._C_ops.subtract(split_0, split_2) - del split_2 - - # pd_op.subtract: (2x1x7581x1xf32) <- (1x1x7581x1xf32, 2x1x1x1xf32) - subtract_5 = paddle._C_ops.subtract(split_1, split_3) - del split_3 - - # pd_op.subtract: (2x1x7581x1xf32) <- (2x1x1x1xf32, 1x1x7581x1xf32) - subtract_6 = paddle._C_ops.subtract(split_4, split_0) - del split_0, split_4 - - # pd_op.subtract: (2x1x7581x1xf32) <- (2x1x1x1xf32, 1x1x7581x1xf32) - subtract_7 = paddle._C_ops.subtract(split_5, split_1) - del split_1, split_5 - - # pd_op.full: (1xi32) <- () - full_7 = paddle._C_ops.full( - [1], float("-1"), paddle.int32, paddle.core.CPUPlace() - ) - - # builtin.combine: ([2x1x7581x1xf32, 2x1x7581x1xf32, 2x1x7581x1xf32, 2x1x7581x1xf32]) <- (2x1x7581x1xf32, 2x1x7581x1xf32, 2x1x7581x1xf32, 2x1x7581x1xf32) - combine_1 = [subtract_4, subtract_5, subtract_6, subtract_7] - del subtract_4, subtract_5, subtract_6, subtract_7 - - # pd_op.concat: (2x1x7581x4xf32) <- ([2x1x7581x1xf32, 2x1x7581x1xf32, 2x1x7581x1xf32, 2x1x7581x1xf32], 1xi32) - concat_0 = paddle._C_ops.concat(combine_1, full_7) - del combine_1, full_7 - - # pd_op.min: (2x1x7581xf32) <- (2x1x7581x4xf32, 1xi64) - min_0 = paddle._C_ops.min(concat_0, full_int_array_4, False) - del concat_0, full_int_array_4 - - # pd_op.full: (xf32) <- () - full_8 = paddle._C_ops.full( - [], - float("1e-09"), - paddle.float32, - paddle.framework._current_expected_place(), - ) - - # pd_op.greater_than: (2x1x7581xb) <- (2x1x7581xf32, xf32) - greater_than_1 = paddle._C_ops.greater_than(min_0, full_8) - del full_8, min_0 - - # pd_op.cast: (2x1x7581xf32) <- (2x1x7581xb) - cast_0 = paddle._C_ops.cast(greater_than_1, paddle.float32) - del greater_than_1 - - # pd_op.multiply: (2x1x7581xf32) <- (2x1x7581xf32, 2x1x7581xf32) - multiply_1 = paddle._C_ops.multiply(multiply_0, cast_0) - - # pd_op.full: (1xi32) <- () - full_9 = paddle._C_ops.full( - [1], float("13"), paddle.int32, paddle.core.CPUPlace() - ) - - # pd_op.topk: (2x1x13xf32, 2x1x13xi64) <- (2x1x7581xf32, 1xi32) - topk_0, topk_1 = (lambda x, f: f(x))( - paddle._C_ops.topk(multiply_1, full_9, -1, True, True), - lambda out: out if isinstance(out, (list, tuple)) else (out, None), - ) - del full_9, multiply_1 - - # pd_op.full: (1xi32) <- () - full_10 = paddle._C_ops.full( - [1], float("7581"), paddle.int32, paddle.core.CPUPlace() - ) - - # pd_op.one_hot: (2x1x13x7581xf32) <- (2x1x13xi64, 1xi32) - one_hot_0 = paddle._C_ops.one_hot( - topk_1 % paddle.cast(full_10, topk_1.dtype), full_10 - ) - del full_10, topk_1 - - # pd_op.full_int_array: (1xi64) <- () - full_int_array_7 = [-2] - - # pd_op.sum: (2x1x7581xf32) <- (2x1x13x7581xf32, 1xi64) - sum_0 = paddle._C_ops.sum(one_hot_0, full_int_array_7, None, False) - del one_hot_0 - - # pd_op.multiply: (2x1x7581xf32) <- (2x1x7581xf32, 2x1x1xf32) - multiply_2 = paddle._C_ops.multiply(sum_0, data_5) - del sum_0 - - # pd_op.multiply: (2x1x7581xf32) <- (2x1x7581xf32, 2x1x7581xf32) - multiply_3 = paddle._C_ops.multiply(multiply_2, cast_0) - del cast_0, multiply_2 - - # pd_op.multiply: (2x1x7581xf32) <- (2x1x7581xf32, 2x1x1xf32) - multiply_4 = paddle._C_ops.multiply(multiply_3, data_5) - del data_5, multiply_3 - - # pd_op.sum: (2x7581xf32) <- (2x1x7581xf32, 1xi64) - sum_1 = paddle._C_ops.sum(multiply_4, full_int_array_7, None, False) - del full_int_array_7 - - # pd_op.full_int_array: (0xi64) <- () - full_int_array_8 = [] - - # pd_op.max: (xf32) <- (2x7581xf32, 0xi64) - max_0 = paddle._C_ops.max(sum_1, full_int_array_8, False) - del full_int_array_8 - - # pd_op.full: (xf32) <- () - full_11 = paddle._C_ops.full( - [], float("1"), paddle.float32, paddle.framework._current_expected_place() - ) - - # pd_op.greater_than: (xb) <- (xf32, xf32) - greater_than_0 = paddle._C_ops.greater_than(max_0, full_11) - del divide_0, full_11, max_0, multiply_0, multiply_4, sum_1, unsqueeze_2 - - return greater_than_0 diff --git a/paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_2/graph_hash.txt b/paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_2/graph_hash.txt index c00ab6d8b..4d04a175d 100644 --- a/paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_2/graph_hash.txt +++ b/paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_2/graph_hash.txt @@ -1 +1 @@ -65d3614abbd2ef389b7cf238f14225f701dc8087775ebca72684cad377953c54 \ No newline at end of file +6f0f40cbf909627fa867337174f532d3e179ced4784c7fc2c9cb00ae6193ac2e \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_2/input_meta.py b/paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_2/input_meta.py index f58dc071b..aa6620489 100644 --- a/paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_2/input_meta.py +++ b/paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_2/input_meta.py @@ -1,9 +1,59 @@ class Program_weight_tensor_data_0: name = "data_0" - shape = [2, 3, 640, 640] + shape = [2, 1, 7581] dtype = "float32" - min_val = float("-2.01516") - max_val = float("2.64") - mean = float("0.187747") - std = float("0.681331") + max_val = float("1.0") + mean = float("0.00171481") + std = float("0.0413748") + data = None + + +class Program_weight_tensor_data_1: + name = "data_1" + shape = [2, 1] + dtype = "int32" + data = [0, 1] + + +class Program_weight_tensor_data_2: + name = "data_2" + shape = [2, 1, 1] + dtype = "int32" + data = [0, 3] + + +class Program_weight_tensor_data_3: + name = "data_3" + shape = [2, 7581] + dtype = "float32" + max_val = float("1.0") + mean = float("0.00171481") + std = float("0.0413748") + data = None + + +class Program_weight_tensor_data_4: + name = "data_4" + shape = [2, 1, 4] + dtype = "float32" + data = [376.443, 61.9806, 517.447, 398.447, 562.465, 468.683, 608.0, 608.0] + + +class Program_weight_tensor_data_5: + name = "data_5" + shape = [2, 1, 7581] + dtype = "float32" + max_val = float("0.00652957") + mean = float("2.09634e-05") + std = float("0.000282651") + data = None + + +class Program_weight_tensor_data_6: + name = "data_6" + shape = [2, 1, 7581] + dtype = "float32" + max_val = float("0.931424") + mean = float("0.0255599") + std = float("0.091854") data = None diff --git a/paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_2/model.py b/paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_2/model.py index f20830b9f..80304c15d 100644 --- a/paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_2/model.py +++ b/paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_2/model.py @@ -5,4269 +5,172 @@ class GraphModule(paddle.nn.Layer): def __init__(self): super().__init__() - def forward( - self, - parameter_0, - parameter_1, - parameter_2, - parameter_3, - parameter_4, - parameter_5, - parameter_6, - parameter_7, - parameter_8, - parameter_9, - parameter_10, - parameter_11, - parameter_12, - parameter_13, - parameter_14, - parameter_15, - parameter_16, - parameter_17, - parameter_18, - parameter_19, - parameter_20, - parameter_21, - parameter_22, - parameter_23, - parameter_24, - parameter_25, - parameter_26, - parameter_27, - parameter_28, - parameter_29, - parameter_30, - parameter_31, - parameter_32, - parameter_33, - parameter_34, - parameter_35, - parameter_36, - parameter_37, - parameter_38, - parameter_39, - parameter_40, - parameter_41, - parameter_42, - parameter_43, - parameter_44, - parameter_45, - parameter_46, - parameter_47, - parameter_48, - parameter_49, - parameter_50, - parameter_51, - parameter_52, - parameter_53, - parameter_54, - parameter_55, - parameter_56, - parameter_57, - parameter_58, - parameter_59, - parameter_60, - parameter_61, - parameter_62, - parameter_63, - parameter_64, - parameter_65, - parameter_66, - parameter_67, - parameter_68, - parameter_69, - parameter_70, - parameter_71, - parameter_72, - parameter_73, - parameter_74, - parameter_75, - parameter_76, - parameter_77, - parameter_78, - parameter_79, - parameter_80, - parameter_81, - parameter_82, - parameter_83, - parameter_84, - parameter_85, - parameter_86, - parameter_87, - parameter_88, - parameter_89, - parameter_90, - parameter_91, - parameter_92, - parameter_93, - parameter_94, - parameter_95, - parameter_96, - parameter_97, - parameter_98, - parameter_99, - parameter_100, - parameter_101, - parameter_102, - parameter_103, - parameter_104, - parameter_105, - parameter_106, - parameter_107, - parameter_108, - parameter_109, - parameter_110, - parameter_111, - parameter_112, - parameter_113, - parameter_114, - parameter_115, - parameter_116, - parameter_117, - parameter_118, - parameter_119, - parameter_120, - parameter_121, - parameter_122, - parameter_123, - parameter_124, - parameter_125, - parameter_126, - parameter_127, - parameter_128, - parameter_129, - parameter_130, - parameter_131, - parameter_132, - parameter_133, - parameter_134, - parameter_135, - parameter_136, - parameter_137, - parameter_138, - parameter_139, - parameter_140, - parameter_141, - parameter_142, - parameter_143, - parameter_144, - parameter_145, - parameter_146, - parameter_147, - parameter_148, - parameter_149, - parameter_150, - parameter_151, - parameter_152, - parameter_153, - parameter_154, - parameter_155, - parameter_156, - parameter_157, - parameter_158, - parameter_159, - parameter_160, - parameter_161, - parameter_162, - parameter_163, - parameter_164, - parameter_165, - parameter_166, - parameter_167, - parameter_168, - parameter_169, - parameter_170, - parameter_171, - parameter_172, - parameter_173, - parameter_174, - parameter_175, - parameter_176, - parameter_177, - parameter_178, - parameter_179, - parameter_180, - parameter_181, - parameter_182, - parameter_183, - parameter_184, - parameter_185, - parameter_186, - parameter_187, - parameter_188, - parameter_189, - parameter_190, - parameter_191, - parameter_192, - parameter_193, - parameter_194, - parameter_195, - parameter_196, - parameter_197, - parameter_198, - parameter_199, - parameter_200, - parameter_201, - parameter_202, - parameter_203, - parameter_204, - parameter_205, - parameter_206, - parameter_207, - parameter_208, - parameter_209, - parameter_210, - parameter_211, - parameter_212, - parameter_213, - parameter_214, - parameter_215, - parameter_216, - parameter_217, - parameter_218, - parameter_219, - parameter_220, - parameter_221, - parameter_222, - parameter_223, - parameter_224, - parameter_225, - parameter_226, - parameter_227, - parameter_228, - parameter_229, - parameter_230, - parameter_231, - parameter_232, - parameter_233, - parameter_234, - parameter_235, - parameter_236, - parameter_237, - parameter_238, - parameter_239, - parameter_240, - parameter_241, - parameter_242, - parameter_243, - parameter_244, - parameter_245, - parameter_246, - parameter_247, - parameter_248, - parameter_249, - parameter_250, - parameter_251, - parameter_252, - parameter_253, - parameter_254, - parameter_255, - parameter_256, - parameter_257, - parameter_258, - parameter_259, - parameter_260, - parameter_261, - parameter_262, - parameter_263, - parameter_264, - parameter_265, - parameter_266, - parameter_267, - parameter_268, - parameter_269, - parameter_270, - parameter_271, - parameter_272, - parameter_273, - parameter_274, - parameter_275, - parameter_276, - parameter_277, - parameter_278, - parameter_279, - parameter_280, - parameter_281, - parameter_282, - parameter_283, - parameter_284, - parameter_285, - parameter_286, - parameter_287, - parameter_288, - parameter_289, - parameter_290, - parameter_291, - parameter_292, - parameter_293, - parameter_294, - parameter_295, - parameter_296, - parameter_297, - parameter_298, - parameter_299, - parameter_300, - parameter_301, - parameter_302, - parameter_303, - parameter_304, - parameter_305, - parameter_306, - parameter_307, - parameter_308, - parameter_309, - parameter_310, - parameter_311, - parameter_312, - parameter_313, - parameter_314, - parameter_315, - parameter_316, - parameter_317, - parameter_318, - parameter_319, - parameter_320, - parameter_321, - parameter_322, - parameter_323, - parameter_324, - parameter_325, - parameter_326, - parameter_327, - parameter_328, - parameter_329, - parameter_330, - parameter_331, - parameter_332, - parameter_333, - parameter_334, - parameter_335, - parameter_336, - parameter_337, - parameter_338, - parameter_339, - parameter_340, - parameter_341, - parameter_342, - parameter_343, - parameter_344, - parameter_345, - parameter_346, - parameter_347, - parameter_348, - parameter_349, - parameter_350, - parameter_351, - parameter_352, - parameter_353, - parameter_354, - parameter_355, - parameter_356, - parameter_357, - parameter_358, - parameter_359, - parameter_360, - parameter_361, - parameter_362, - parameter_363, - parameter_364, - parameter_365, - parameter_366, - parameter_367, - parameter_368, - parameter_369, - parameter_370, - parameter_371, - parameter_372, - parameter_373, - parameter_374, - parameter_375, - parameter_376, - parameter_377, - parameter_378, - parameter_379, - parameter_380, - parameter_381, - parameter_382, - parameter_383, - parameter_384, - parameter_385, - parameter_386, - parameter_387, - parameter_388, - parameter_389, - parameter_390, - parameter_391, - parameter_392, - parameter_393, - parameter_394, - parameter_395, - parameter_396, - parameter_397, - parameter_398, - parameter_399, - parameter_400, - parameter_401, - parameter_402, - parameter_403, - parameter_404, - parameter_405, - parameter_406, - parameter_407, - parameter_408, - parameter_409, - parameter_410, - parameter_411, - parameter_412, - parameter_413, - parameter_414, - parameter_415, - parameter_416, - parameter_417, - parameter_418, - parameter_419, - parameter_420, - parameter_421, - parameter_422, - data_0, - ): - # pd_op.conv2d: (2x16x-1x-1xf32) <- (2x3x-1x-1xf32, 16x3x3x3xf32) - conv2d_0 = paddle._C_ops.conv2d( - data_0, parameter_422, [2, 2], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del data_0, parameter_422 - - # pd_op.batch_norm_: (2x16x-1x-1xf32, 16xf32, 16xf32, 16xf32, 16xf32, -1xui8) <- (2x16x-1x-1xf32, 16xf32, 16xf32, 16xf32, 16xf32) - ( - batch_norm__0, - batch_norm__1, - batch_norm__2, - batch_norm__3, - batch_norm__4, - batch_norm__5, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_0, - parameter_421, - parameter_420, - parameter_419, - parameter_418, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_0, parameter_418, parameter_419, parameter_420, parameter_421 - - # pd_op.swish: (2x16x-1x-1xf32) <- (2x16x-1x-1xf32) - swish_0 = paddle._C_ops.swish(batch_norm__0) - del batch_norm__0 - - # pd_op.conv2d: (2x16x-1x-1xf32) <- (2x16x-1x-1xf32, 16x16x3x3xf32) - conv2d_1 = paddle._C_ops.conv2d( - swish_0, parameter_417, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_417, swish_0 - - # pd_op.batch_norm_: (2x16x-1x-1xf32, 16xf32, 16xf32, 16xf32, 16xf32, -1xui8) <- (2x16x-1x-1xf32, 16xf32, 16xf32, 16xf32, 16xf32) - ( - batch_norm__6, - batch_norm__7, - batch_norm__8, - batch_norm__9, - batch_norm__10, - batch_norm__11, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_1, - parameter_416, - parameter_415, - parameter_414, - parameter_413, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_1, parameter_413, parameter_414, parameter_415, parameter_416 - - # pd_op.swish: (2x16x-1x-1xf32) <- (2x16x-1x-1xf32) - swish_1 = paddle._C_ops.swish(batch_norm__6) - del batch_norm__6 - - # pd_op.conv2d: (2x32x-1x-1xf32) <- (2x16x-1x-1xf32, 32x16x3x3xf32) - conv2d_2 = paddle._C_ops.conv2d( - swish_1, parameter_412, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_412, swish_1 - - # pd_op.batch_norm_: (2x32x-1x-1xf32, 32xf32, 32xf32, 32xf32, 32xf32, -1xui8) <- (2x32x-1x-1xf32, 32xf32, 32xf32, 32xf32, 32xf32) - ( - batch_norm__12, - batch_norm__13, - batch_norm__14, - batch_norm__15, - batch_norm__16, - batch_norm__17, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_2, - parameter_411, - parameter_410, - parameter_409, - parameter_408, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_2, parameter_408, parameter_409, parameter_410, parameter_411 - - # pd_op.swish: (2x32x-1x-1xf32) <- (2x32x-1x-1xf32) - swish_2 = paddle._C_ops.swish(batch_norm__12) - del batch_norm__12 - - # pd_op.conv2d: (2x48x-1x-1xf32) <- (2x32x-1x-1xf32, 48x32x3x3xf32) - conv2d_3 = paddle._C_ops.conv2d( - swish_2, parameter_407, [2, 2], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_407, swish_2 - - # pd_op.batch_norm_: (2x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32, -1xui8) <- (2x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32) - ( - batch_norm__18, - batch_norm__19, - batch_norm__20, - batch_norm__21, - batch_norm__22, - batch_norm__23, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_3, - parameter_406, - parameter_405, - parameter_404, - parameter_403, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_3, parameter_403, parameter_404, parameter_405, parameter_406 - - # pd_op.swish: (2x48x-1x-1xf32) <- (2x48x-1x-1xf32) - swish_3 = paddle._C_ops.swish(batch_norm__18) - del batch_norm__18 - - # pd_op.conv2d: (2x24x-1x-1xf32) <- (2x48x-1x-1xf32, 24x48x1x1xf32) - conv2d_4 = paddle._C_ops.conv2d( - swish_3, parameter_402, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_402 - - # pd_op.batch_norm_: (2x24x-1x-1xf32, 24xf32, 24xf32, 24xf32, 24xf32, -1xui8) <- (2x24x-1x-1xf32, 24xf32, 24xf32, 24xf32, 24xf32) - ( - batch_norm__24, - batch_norm__25, - batch_norm__26, - batch_norm__27, - batch_norm__28, - batch_norm__29, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_4, - parameter_401, - parameter_400, - parameter_399, - parameter_398, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_4, parameter_398, parameter_399, parameter_400, parameter_401 - - # pd_op.swish: (2x24x-1x-1xf32) <- (2x24x-1x-1xf32) - swish_4 = paddle._C_ops.swish(batch_norm__24) - del batch_norm__24 - - # pd_op.conv2d: (2x24x-1x-1xf32) <- (2x48x-1x-1xf32, 24x48x1x1xf32) - conv2d_5 = paddle._C_ops.conv2d( - swish_3, parameter_397, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_397, swish_3 - - # pd_op.batch_norm_: (2x24x-1x-1xf32, 24xf32, 24xf32, 24xf32, 24xf32, -1xui8) <- (2x24x-1x-1xf32, 24xf32, 24xf32, 24xf32, 24xf32) - ( - batch_norm__30, - batch_norm__31, - batch_norm__32, - batch_norm__33, - batch_norm__34, - batch_norm__35, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_5, - parameter_396, - parameter_395, - parameter_394, - parameter_393, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_5, parameter_393, parameter_394, parameter_395, parameter_396 - - # pd_op.swish: (2x24x-1x-1xf32) <- (2x24x-1x-1xf32) - swish_5 = paddle._C_ops.swish(batch_norm__30) - del batch_norm__30 - - # pd_op.conv2d: (2x24x-1x-1xf32) <- (2x24x-1x-1xf32, 24x24x3x3xf32) - conv2d_6 = paddle._C_ops.conv2d( - swish_5, parameter_392, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_392 - - # pd_op.batch_norm_: (2x24x-1x-1xf32, 24xf32, 24xf32, 24xf32, 24xf32, -1xui8) <- (2x24x-1x-1xf32, 24xf32, 24xf32, 24xf32, 24xf32) - ( - batch_norm__36, - batch_norm__37, - batch_norm__38, - batch_norm__39, - batch_norm__40, - batch_norm__41, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_6, - parameter_391, - parameter_390, - parameter_389, - parameter_388, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), + def forward(self, data_0, data_1, data_2, data_3, data_4, data_5, data_6): + # pd_op.full: (1xi64) <- () + full_0 = paddle._C_ops.full( + [1], float("-2"), paddle.int64, paddle.core.CPUPlace() ) - del conv2d_6, parameter_388, parameter_389, parameter_390, parameter_391 - # pd_op.swish: (2x24x-1x-1xf32) <- (2x24x-1x-1xf32) - swish_6 = paddle._C_ops.swish(batch_norm__36) - del batch_norm__36 + # pd_op.argmax: (2x7581xi64) <- (2x1x7581xf32, 1xi64) + argmax_0 = paddle._C_ops.argmax(data_0, full_0, False, False, paddle.int64) + del full_0 - # pd_op.conv2d: (2x24x-1x-1xf32) <- (2x24x-1x-1xf32, 24x24x3x3xf32) - conv2d_7 = paddle._C_ops.conv2d( - swish_6, parameter_387, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + # pd_op.full: (1xf32) <- () + full_1 = paddle._C_ops.full( + [1], float("1"), paddle.float32, paddle.core.CPUPlace() ) - del parameter_387 - # pd_op.batch_norm_: (2x24x-1x-1xf32, 24xf32, 24xf32, 24xf32, 24xf32, -1xui8) <- (2x24x-1x-1xf32, 24xf32, 24xf32, 24xf32, 24xf32) - ( - batch_norm__42, - batch_norm__43, - batch_norm__44, - batch_norm__45, - batch_norm__46, - batch_norm__47, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_7, - parameter_386, - parameter_385, - parameter_384, - parameter_383, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_7, parameter_383, parameter_384, parameter_385, parameter_386 + # pd_op.scale: (2x1xi32) <- (2x1xi32, 1xf32) + scale_0 = paddle._C_ops.scale(data_1, full_1, float("0"), True) + del data_1 - # pd_op.conv2d: (2x24x-1x-1xf32) <- (2x24x-1x-1xf32, 24x24x1x1xf32) - conv2d_8 = paddle._C_ops.conv2d( - swish_6, parameter_382, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_382, swish_6 + # pd_op.cast: (2x1xi64) <- (2x1xi32) + cast_0 = paddle._C_ops.cast(scale_0, paddle.int64) + del scale_0 - # pd_op.batch_norm_: (2x24x-1x-1xf32, 24xf32, 24xf32, 24xf32, 24xf32, -1xui8) <- (2x24x-1x-1xf32, 24xf32, 24xf32, 24xf32, 24xf32) - ( - batch_norm__48, - batch_norm__49, - batch_norm__50, - batch_norm__51, - batch_norm__52, - batch_norm__53, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_8, - parameter_381, - parameter_380, - parameter_379, - parameter_378, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_8, parameter_378, parameter_379, parameter_380, parameter_381 + # pd_op.add: (2x7581xi64) <- (2x7581xi64, 2x1xi64) + add_0 = paddle._C_ops.add(argmax_0, cast_0) + del argmax_0, cast_0 - # pd_op.add: (2x24x-1x-1xf32) <- (2x24x-1x-1xf32, 2x24x-1x-1xf32) - add_0 = paddle._C_ops.add(batch_norm__42, batch_norm__48) - del batch_norm__42, batch_norm__48 + # pd_op.flatten: (2xi32) <- (2x1x1xi32) + flatten_0 = paddle._C_ops.flatten(data_2, 0, 2) + del data_2 - # pd_op.swish: (2x24x-1x-1xf32) <- (2x24x-1x-1xf32) - swish_7 = paddle._C_ops.swish(add_0) + # pd_op.flatten: (15162xi64) <- (2x7581xi64) + flatten_1 = paddle._C_ops.flatten(add_0, 0, 1) del add_0 - # pd_op.add: (2x24x-1x-1xf32) <- (2x24x-1x-1xf32, 2x24x-1x-1xf32) - add_1 = paddle._C_ops.add(swish_5, swish_7) - del swish_5, swish_7 - # pd_op.full: (1xi32) <- () - full_0 = paddle._C_ops.full( - [1], float("1"), paddle.int32, paddle.core.CPUPlace() + full_2 = paddle._C_ops.full( + [1], float("0"), paddle.int32, paddle.core.CPUPlace() ) - # builtin.combine: ([2x24x-1x-1xf32, 2x24x-1x-1xf32]) <- (2x24x-1x-1xf32, 2x24x-1x-1xf32) - combine_0 = [swish_4, add_1] - del add_1, swish_4 - - # pd_op.concat: (2x48x-1x-1xf32) <- ([2x24x-1x-1xf32, 2x24x-1x-1xf32], 1xi32) - concat_2 = paddle._C_ops.concat(combine_0, full_0) - del combine_0 + # pd_op.gather: (15162xi32) <- (2xi32, 15162xi64, 1xi32) + gather_0 = paddle._C_ops.gather(flatten_0, flatten_1, full_2) + del flatten_0 # pd_op.full_int_array: (2xi64) <- () - full_int_array_0 = [2, 3] - - # pd_op.mean: (2x48x1x1xf32) <- (2x48x-1x-1xf32, 2xi64) - mean_0 = paddle._C_ops.mean(concat_2, full_int_array_0, True) - - # pd_op.conv2d: (2x48x1x1xf32) <- (2x48x1x1xf32, 48x48x1x1xf32) - conv2d_9 = paddle._C_ops.conv2d( - mean_0, parameter_377, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del mean_0, parameter_377 - - # pd_op.full_int_array: (4xi64) <- () - full_int_array_1 = [1, -1, 1, 1] - - # pd_op.reshape: (1x48x1x1xf32) <- (48xf32, 4xi64) - reshape_0 = paddle._C_ops.reshape(parameter_376, full_int_array_1) - del parameter_376 - - # pd_op.add: (2x48x1x1xf32) <- (2x48x1x1xf32, 1x48x1x1xf32) - add_2 = paddle._C_ops.add(conv2d_9, reshape_0) - del conv2d_9, reshape_0 - - # pd_op.hardsigmoid: (2x48x1x1xf32) <- (2x48x1x1xf32) - hardsigmoid_0 = paddle._C_ops.hardsigmoid( - add_2, float("0.166667"), float("0.5") - ) - del add_2 - - # pd_op.multiply: (2x48x-1x-1xf32) <- (2x48x-1x-1xf32, 2x48x1x1xf32) - multiply_0 = paddle._C_ops.multiply(concat_2, hardsigmoid_0) - del concat_2, hardsigmoid_0 - - # pd_op.conv2d: (2x64x-1x-1xf32) <- (2x48x-1x-1xf32, 64x48x1x1xf32) - conv2d_10 = paddle._C_ops.conv2d( - multiply_0, parameter_375, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del multiply_0, parameter_375 - - # pd_op.batch_norm_: (2x64x-1x-1xf32, 64xf32, 64xf32, 64xf32, 64xf32, -1xui8) <- (2x64x-1x-1xf32, 64xf32, 64xf32, 64xf32, 64xf32) - ( - batch_norm__54, - batch_norm__55, - batch_norm__56, - batch_norm__57, - batch_norm__58, - batch_norm__59, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_10, - parameter_374, - parameter_373, - parameter_372, - parameter_371, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_10, parameter_371, parameter_372, parameter_373, parameter_374 - - # pd_op.swish: (2x64x-1x-1xf32) <- (2x64x-1x-1xf32) - swish_8 = paddle._C_ops.swish(batch_norm__54) - del batch_norm__54 - - # pd_op.conv2d: (2x96x-1x-1xf32) <- (2x64x-1x-1xf32, 96x64x3x3xf32) - conv2d_11 = paddle._C_ops.conv2d( - swish_8, parameter_370, [2, 2], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_370, swish_8 - - # pd_op.batch_norm_: (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) - ( - batch_norm__60, - batch_norm__61, - batch_norm__62, - batch_norm__63, - batch_norm__64, - batch_norm__65, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_11, - parameter_369, - parameter_368, - parameter_367, - parameter_366, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_11, parameter_366, parameter_367, parameter_368, parameter_369 - - # pd_op.swish: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32) - swish_9 = paddle._C_ops.swish(batch_norm__60) - del batch_norm__60 - - # pd_op.conv2d: (2x48x-1x-1xf32) <- (2x96x-1x-1xf32, 48x96x1x1xf32) - conv2d_12 = paddle._C_ops.conv2d( - swish_9, parameter_365, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_365 - - # pd_op.batch_norm_: (2x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32, -1xui8) <- (2x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32) - ( - batch_norm__66, - batch_norm__67, - batch_norm__68, - batch_norm__69, - batch_norm__70, - batch_norm__71, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_12, - parameter_364, - parameter_363, - parameter_362, - parameter_361, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_12, parameter_361, parameter_362, parameter_363, parameter_364 - - # pd_op.swish: (2x48x-1x-1xf32) <- (2x48x-1x-1xf32) - swish_10 = paddle._C_ops.swish(batch_norm__66) - del batch_norm__66 - - # pd_op.conv2d: (2x48x-1x-1xf32) <- (2x96x-1x-1xf32, 48x96x1x1xf32) - conv2d_13 = paddle._C_ops.conv2d( - swish_9, parameter_360, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_360, swish_9 - - # pd_op.batch_norm_: (2x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32, -1xui8) <- (2x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32) - ( - batch_norm__72, - batch_norm__73, - batch_norm__74, - batch_norm__75, - batch_norm__76, - batch_norm__77, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_13, - parameter_359, - parameter_358, - parameter_357, - parameter_356, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_13, parameter_356, parameter_357, parameter_358, parameter_359 - - # pd_op.swish: (2x48x-1x-1xf32) <- (2x48x-1x-1xf32) - swish_11 = paddle._C_ops.swish(batch_norm__72) - del batch_norm__72 - - # pd_op.conv2d: (2x48x-1x-1xf32) <- (2x48x-1x-1xf32, 48x48x3x3xf32) - conv2d_14 = paddle._C_ops.conv2d( - swish_11, parameter_355, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_355 - - # pd_op.batch_norm_: (2x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32, -1xui8) <- (2x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32) - ( - batch_norm__78, - batch_norm__79, - batch_norm__80, - batch_norm__81, - batch_norm__82, - batch_norm__83, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_14, - parameter_354, - parameter_353, - parameter_352, - parameter_351, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_14, parameter_351, parameter_352, parameter_353, parameter_354 - - # pd_op.swish: (2x48x-1x-1xf32) <- (2x48x-1x-1xf32) - swish_12 = paddle._C_ops.swish(batch_norm__78) - del batch_norm__78 - - # pd_op.conv2d: (2x48x-1x-1xf32) <- (2x48x-1x-1xf32, 48x48x3x3xf32) - conv2d_15 = paddle._C_ops.conv2d( - swish_12, parameter_350, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_350 - - # pd_op.batch_norm_: (2x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32, -1xui8) <- (2x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32) - ( - batch_norm__84, - batch_norm__85, - batch_norm__86, - batch_norm__87, - batch_norm__88, - batch_norm__89, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_15, - parameter_349, - parameter_348, - parameter_347, - parameter_346, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_15, parameter_346, parameter_347, parameter_348, parameter_349 - - # pd_op.conv2d: (2x48x-1x-1xf32) <- (2x48x-1x-1xf32, 48x48x1x1xf32) - conv2d_16 = paddle._C_ops.conv2d( - swish_12, parameter_345, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_345, swish_12 - - # pd_op.batch_norm_: (2x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32, -1xui8) <- (2x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32) - ( - batch_norm__90, - batch_norm__91, - batch_norm__92, - batch_norm__93, - batch_norm__94, - batch_norm__95, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_16, - parameter_344, - parameter_343, - parameter_342, - parameter_341, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_16, parameter_341, parameter_342, parameter_343, parameter_344 - - # pd_op.add: (2x48x-1x-1xf32) <- (2x48x-1x-1xf32, 2x48x-1x-1xf32) - add_3 = paddle._C_ops.add(batch_norm__84, batch_norm__90) - del batch_norm__84, batch_norm__90 - - # pd_op.swish: (2x48x-1x-1xf32) <- (2x48x-1x-1xf32) - swish_13 = paddle._C_ops.swish(add_3) - del add_3 - - # pd_op.add: (2x48x-1x-1xf32) <- (2x48x-1x-1xf32, 2x48x-1x-1xf32) - add_4 = paddle._C_ops.add(swish_11, swish_13) - del swish_11, swish_13 - - # pd_op.conv2d: (2x48x-1x-1xf32) <- (2x48x-1x-1xf32, 48x48x3x3xf32) - conv2d_17 = paddle._C_ops.conv2d( - add_4, parameter_340, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_340 - - # pd_op.batch_norm_: (2x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32, -1xui8) <- (2x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32) - ( - batch_norm__96, - batch_norm__97, - batch_norm__98, - batch_norm__99, - batch_norm__100, - batch_norm__101, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_17, - parameter_339, - parameter_338, - parameter_337, - parameter_336, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_17, parameter_336, parameter_337, parameter_338, parameter_339 - - # pd_op.swish: (2x48x-1x-1xf32) <- (2x48x-1x-1xf32) - swish_14 = paddle._C_ops.swish(batch_norm__96) - del batch_norm__96 - - # pd_op.conv2d: (2x48x-1x-1xf32) <- (2x48x-1x-1xf32, 48x48x3x3xf32) - conv2d_18 = paddle._C_ops.conv2d( - swish_14, parameter_335, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_335 - - # pd_op.batch_norm_: (2x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32, -1xui8) <- (2x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32) - ( - batch_norm__102, - batch_norm__103, - batch_norm__104, - batch_norm__105, - batch_norm__106, - batch_norm__107, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_18, - parameter_334, - parameter_333, - parameter_332, - parameter_331, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_18, parameter_331, parameter_332, parameter_333, parameter_334 - - # pd_op.conv2d: (2x48x-1x-1xf32) <- (2x48x-1x-1xf32, 48x48x1x1xf32) - conv2d_19 = paddle._C_ops.conv2d( - swish_14, parameter_330, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_330, swish_14 - - # pd_op.batch_norm_: (2x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32, -1xui8) <- (2x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32) - ( - batch_norm__108, - batch_norm__109, - batch_norm__110, - batch_norm__111, - batch_norm__112, - batch_norm__113, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_19, - parameter_329, - parameter_328, - parameter_327, - parameter_326, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_19, parameter_326, parameter_327, parameter_328, parameter_329 - - # pd_op.add: (2x48x-1x-1xf32) <- (2x48x-1x-1xf32, 2x48x-1x-1xf32) - add_5 = paddle._C_ops.add(batch_norm__102, batch_norm__108) - del batch_norm__102, batch_norm__108 - - # pd_op.swish: (2x48x-1x-1xf32) <- (2x48x-1x-1xf32) - swish_15 = paddle._C_ops.swish(add_5) - del add_5 - - # pd_op.add: (2x48x-1x-1xf32) <- (2x48x-1x-1xf32, 2x48x-1x-1xf32) - add_6 = paddle._C_ops.add(add_4, swish_15) - del add_4, swish_15 - - # builtin.combine: ([2x48x-1x-1xf32, 2x48x-1x-1xf32]) <- (2x48x-1x-1xf32, 2x48x-1x-1xf32) - combine_1 = [swish_10, add_6] - del add_6, swish_10 - - # pd_op.concat: (2x96x-1x-1xf32) <- ([2x48x-1x-1xf32, 2x48x-1x-1xf32], 1xi32) - concat_3 = paddle._C_ops.concat(combine_1, full_0) - del combine_1 - - # pd_op.mean: (2x96x1x1xf32) <- (2x96x-1x-1xf32, 2xi64) - mean_1 = paddle._C_ops.mean(concat_3, full_int_array_0, True) - - # pd_op.conv2d: (2x96x1x1xf32) <- (2x96x1x1xf32, 96x96x1x1xf32) - conv2d_20 = paddle._C_ops.conv2d( - mean_1, parameter_325, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del mean_1, parameter_325 - - # pd_op.reshape: (1x96x1x1xf32) <- (96xf32, 4xi64) - reshape_1 = paddle._C_ops.reshape(parameter_324, full_int_array_1) - del parameter_324 - - # pd_op.add: (2x96x1x1xf32) <- (2x96x1x1xf32, 1x96x1x1xf32) - add_7 = paddle._C_ops.add(conv2d_20, reshape_1) - del conv2d_20, reshape_1 - - # pd_op.hardsigmoid: (2x96x1x1xf32) <- (2x96x1x1xf32) - hardsigmoid_1 = paddle._C_ops.hardsigmoid( - add_7, float("0.166667"), float("0.5") - ) - del add_7 - - # pd_op.multiply: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, 2x96x1x1xf32) - multiply_1 = paddle._C_ops.multiply(concat_3, hardsigmoid_1) - del concat_3, hardsigmoid_1 - - # pd_op.conv2d: (2x128x-1x-1xf32) <- (2x96x-1x-1xf32, 128x96x1x1xf32) - conv2d_21 = paddle._C_ops.conv2d( - multiply_1, parameter_323, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del multiply_1, parameter_323 - - # pd_op.batch_norm_: (2x128x-1x-1xf32, 128xf32, 128xf32, 128xf32, 128xf32, -1xui8) <- (2x128x-1x-1xf32, 128xf32, 128xf32, 128xf32, 128xf32) - ( - batch_norm__114, - batch_norm__115, - batch_norm__116, - batch_norm__117, - batch_norm__118, - batch_norm__119, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_21, - parameter_322, - parameter_321, - parameter_320, - parameter_319, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_21, parameter_319, parameter_320, parameter_321, parameter_322 - - # pd_op.swish: (2x128x-1x-1xf32) <- (2x128x-1x-1xf32) - swish_16 = paddle._C_ops.swish(batch_norm__114) - del batch_norm__114 - - # pd_op.conv2d: (2x192x-1x-1xf32) <- (2x128x-1x-1xf32, 192x128x3x3xf32) - conv2d_22 = paddle._C_ops.conv2d( - swish_16, parameter_318, [2, 2], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_318 - - # pd_op.batch_norm_: (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) - ( - batch_norm__120, - batch_norm__121, - batch_norm__122, - batch_norm__123, - batch_norm__124, - batch_norm__125, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_22, - parameter_317, - parameter_316, - parameter_315, - parameter_314, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_22, parameter_314, parameter_315, parameter_316, parameter_317 - - # pd_op.swish: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32) - swish_17 = paddle._C_ops.swish(batch_norm__120) - del batch_norm__120 - - # pd_op.conv2d: (2x96x-1x-1xf32) <- (2x192x-1x-1xf32, 96x192x1x1xf32) - conv2d_23 = paddle._C_ops.conv2d( - swish_17, parameter_313, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_313 - - # pd_op.batch_norm_: (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) - ( - batch_norm__126, - batch_norm__127, - batch_norm__128, - batch_norm__129, - batch_norm__130, - batch_norm__131, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_23, - parameter_312, - parameter_311, - parameter_310, - parameter_309, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_23, parameter_309, parameter_310, parameter_311, parameter_312 - - # pd_op.swish: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32) - swish_18 = paddle._C_ops.swish(batch_norm__126) - del batch_norm__126 - - # pd_op.conv2d: (2x96x-1x-1xf32) <- (2x192x-1x-1xf32, 96x192x1x1xf32) - conv2d_24 = paddle._C_ops.conv2d( - swish_17, parameter_308, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_308, swish_17 - - # pd_op.batch_norm_: (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) - ( - batch_norm__132, - batch_norm__133, - batch_norm__134, - batch_norm__135, - batch_norm__136, - batch_norm__137, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_24, - parameter_307, - parameter_306, - parameter_305, - parameter_304, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_24, parameter_304, parameter_305, parameter_306, parameter_307 - - # pd_op.swish: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32) - swish_19 = paddle._C_ops.swish(batch_norm__132) - del batch_norm__132 - - # pd_op.conv2d: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, 96x96x3x3xf32) - conv2d_25 = paddle._C_ops.conv2d( - swish_19, parameter_303, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_303 - - # pd_op.batch_norm_: (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) - ( - batch_norm__138, - batch_norm__139, - batch_norm__140, - batch_norm__141, - batch_norm__142, - batch_norm__143, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_25, - parameter_302, - parameter_301, - parameter_300, - parameter_299, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_25, parameter_299, parameter_300, parameter_301, parameter_302 - - # pd_op.swish: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32) - swish_20 = paddle._C_ops.swish(batch_norm__138) - del batch_norm__138 - - # pd_op.conv2d: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, 96x96x3x3xf32) - conv2d_26 = paddle._C_ops.conv2d( - swish_20, parameter_298, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_298 - - # pd_op.batch_norm_: (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) - ( - batch_norm__144, - batch_norm__145, - batch_norm__146, - batch_norm__147, - batch_norm__148, - batch_norm__149, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_26, - parameter_297, - parameter_296, - parameter_295, - parameter_294, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_26, parameter_294, parameter_295, parameter_296, parameter_297 - - # pd_op.conv2d: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, 96x96x1x1xf32) - conv2d_27 = paddle._C_ops.conv2d( - swish_20, parameter_293, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_293, swish_20 - - # pd_op.batch_norm_: (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) - ( - batch_norm__150, - batch_norm__151, - batch_norm__152, - batch_norm__153, - batch_norm__154, - batch_norm__155, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_27, - parameter_292, - parameter_291, - parameter_290, - parameter_289, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_27, parameter_289, parameter_290, parameter_291, parameter_292 - - # pd_op.add: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, 2x96x-1x-1xf32) - add_8 = paddle._C_ops.add(batch_norm__144, batch_norm__150) - del batch_norm__144, batch_norm__150 - - # pd_op.swish: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32) - swish_21 = paddle._C_ops.swish(add_8) - del add_8 - - # pd_op.add: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, 2x96x-1x-1xf32) - add_9 = paddle._C_ops.add(swish_19, swish_21) - del swish_19, swish_21 - - # pd_op.conv2d: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, 96x96x3x3xf32) - conv2d_28 = paddle._C_ops.conv2d( - add_9, parameter_288, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_288 - - # pd_op.batch_norm_: (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) - ( - batch_norm__156, - batch_norm__157, - batch_norm__158, - batch_norm__159, - batch_norm__160, - batch_norm__161, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_28, - parameter_287, - parameter_286, - parameter_285, - parameter_284, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_28, parameter_284, parameter_285, parameter_286, parameter_287 - - # pd_op.swish: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32) - swish_22 = paddle._C_ops.swish(batch_norm__156) - del batch_norm__156 - - # pd_op.conv2d: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, 96x96x3x3xf32) - conv2d_29 = paddle._C_ops.conv2d( - swish_22, parameter_283, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_283 - - # pd_op.batch_norm_: (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) - ( - batch_norm__162, - batch_norm__163, - batch_norm__164, - batch_norm__165, - batch_norm__166, - batch_norm__167, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_29, - parameter_282, - parameter_281, - parameter_280, - parameter_279, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_29, parameter_279, parameter_280, parameter_281, parameter_282 - - # pd_op.conv2d: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, 96x96x1x1xf32) - conv2d_30 = paddle._C_ops.conv2d( - swish_22, parameter_278, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_278, swish_22 - - # pd_op.batch_norm_: (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) - ( - batch_norm__168, - batch_norm__169, - batch_norm__170, - batch_norm__171, - batch_norm__172, - batch_norm__173, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_30, - parameter_277, - parameter_276, - parameter_275, - parameter_274, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_30, parameter_274, parameter_275, parameter_276, parameter_277 - - # pd_op.add: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, 2x96x-1x-1xf32) - add_10 = paddle._C_ops.add(batch_norm__162, batch_norm__168) - del batch_norm__162, batch_norm__168 - - # pd_op.swish: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32) - swish_23 = paddle._C_ops.swish(add_10) - del add_10 - - # pd_op.add: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, 2x96x-1x-1xf32) - add_11 = paddle._C_ops.add(add_9, swish_23) - del add_9, swish_23 - - # builtin.combine: ([2x96x-1x-1xf32, 2x96x-1x-1xf32]) <- (2x96x-1x-1xf32, 2x96x-1x-1xf32) - combine_2 = [swish_18, add_11] - del add_11, swish_18 - - # pd_op.concat: (2x192x-1x-1xf32) <- ([2x96x-1x-1xf32, 2x96x-1x-1xf32], 1xi32) - concat_4 = paddle._C_ops.concat(combine_2, full_0) - del combine_2 - - # pd_op.mean: (2x192x1x1xf32) <- (2x192x-1x-1xf32, 2xi64) - mean_2 = paddle._C_ops.mean(concat_4, full_int_array_0, True) - - # pd_op.conv2d: (2x192x1x1xf32) <- (2x192x1x1xf32, 192x192x1x1xf32) - conv2d_31 = paddle._C_ops.conv2d( - mean_2, parameter_273, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del mean_2, parameter_273 - - # pd_op.reshape: (1x192x1x1xf32) <- (192xf32, 4xi64) - reshape_2 = paddle._C_ops.reshape(parameter_272, full_int_array_1) - del parameter_272 - - # pd_op.add: (2x192x1x1xf32) <- (2x192x1x1xf32, 1x192x1x1xf32) - add_12 = paddle._C_ops.add(conv2d_31, reshape_2) - del conv2d_31, reshape_2 - - # pd_op.hardsigmoid: (2x192x1x1xf32) <- (2x192x1x1xf32) - hardsigmoid_2 = paddle._C_ops.hardsigmoid( - add_12, float("0.166667"), float("0.5") - ) - del add_12 - - # pd_op.multiply: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 2x192x1x1xf32) - multiply_2 = paddle._C_ops.multiply(concat_4, hardsigmoid_2) - del concat_4, hardsigmoid_2 - - # pd_op.conv2d: (2x256x-1x-1xf32) <- (2x192x-1x-1xf32, 256x192x1x1xf32) - conv2d_32 = paddle._C_ops.conv2d( - multiply_2, parameter_271, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del multiply_2, parameter_271 - - # pd_op.batch_norm_: (2x256x-1x-1xf32, 256xf32, 256xf32, 256xf32, 256xf32, -1xui8) <- (2x256x-1x-1xf32, 256xf32, 256xf32, 256xf32, 256xf32) - ( - batch_norm__174, - batch_norm__175, - batch_norm__176, - batch_norm__177, - batch_norm__178, - batch_norm__179, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_32, - parameter_270, - parameter_269, - parameter_268, - parameter_267, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_32, parameter_267, parameter_268, parameter_269, parameter_270 - - # pd_op.swish: (2x256x-1x-1xf32) <- (2x256x-1x-1xf32) - swish_24 = paddle._C_ops.swish(batch_norm__174) - del batch_norm__174 - - # pd_op.conv2d: (2x384x-1x-1xf32) <- (2x256x-1x-1xf32, 384x256x3x3xf32) - conv2d_33 = paddle._C_ops.conv2d( - swish_24, parameter_266, [2, 2], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_266 - - # pd_op.batch_norm_: (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) - ( - batch_norm__180, - batch_norm__181, - batch_norm__182, - batch_norm__183, - batch_norm__184, - batch_norm__185, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_33, - parameter_265, - parameter_264, - parameter_263, - parameter_262, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_33, parameter_262, parameter_263, parameter_264, parameter_265 - - # pd_op.swish: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32) - swish_25 = paddle._C_ops.swish(batch_norm__180) - del batch_norm__180 - - # pd_op.conv2d: (2x192x-1x-1xf32) <- (2x384x-1x-1xf32, 192x384x1x1xf32) - conv2d_34 = paddle._C_ops.conv2d( - swish_25, parameter_261, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_261 - - # pd_op.batch_norm_: (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) - ( - batch_norm__186, - batch_norm__187, - batch_norm__188, - batch_norm__189, - batch_norm__190, - batch_norm__191, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_34, - parameter_260, - parameter_259, - parameter_258, - parameter_257, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_34, parameter_257, parameter_258, parameter_259, parameter_260 - - # pd_op.swish: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32) - swish_26 = paddle._C_ops.swish(batch_norm__186) - del batch_norm__186 - - # pd_op.conv2d: (2x192x-1x-1xf32) <- (2x384x-1x-1xf32, 192x384x1x1xf32) - conv2d_35 = paddle._C_ops.conv2d( - swish_25, parameter_256, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_256, swish_25 - - # pd_op.batch_norm_: (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) - ( - batch_norm__192, - batch_norm__193, - batch_norm__194, - batch_norm__195, - batch_norm__196, - batch_norm__197, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_35, - parameter_255, - parameter_254, - parameter_253, - parameter_252, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_35, parameter_252, parameter_253, parameter_254, parameter_255 - - # pd_op.swish: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32) - swish_27 = paddle._C_ops.swish(batch_norm__192) - del batch_norm__192 - - # pd_op.conv2d: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 192x192x3x3xf32) - conv2d_36 = paddle._C_ops.conv2d( - swish_27, parameter_251, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_251 - - # pd_op.batch_norm_: (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) - ( - batch_norm__198, - batch_norm__199, - batch_norm__200, - batch_norm__201, - batch_norm__202, - batch_norm__203, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_36, - parameter_250, - parameter_249, - parameter_248, - parameter_247, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_36, parameter_247, parameter_248, parameter_249, parameter_250 - - # pd_op.swish: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32) - swish_28 = paddle._C_ops.swish(batch_norm__198) - del batch_norm__198 - - # pd_op.conv2d: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 192x192x3x3xf32) - conv2d_37 = paddle._C_ops.conv2d( - swish_28, parameter_246, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_246 - - # pd_op.batch_norm_: (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) - ( - batch_norm__204, - batch_norm__205, - batch_norm__206, - batch_norm__207, - batch_norm__208, - batch_norm__209, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_37, - parameter_245, - parameter_244, - parameter_243, - parameter_242, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_37, parameter_242, parameter_243, parameter_244, parameter_245 - - # pd_op.conv2d: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 192x192x1x1xf32) - conv2d_38 = paddle._C_ops.conv2d( - swish_28, parameter_241, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_241, swish_28 - - # pd_op.batch_norm_: (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) - ( - batch_norm__210, - batch_norm__211, - batch_norm__212, - batch_norm__213, - batch_norm__214, - batch_norm__215, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_38, - parameter_240, - parameter_239, - parameter_238, - parameter_237, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_38, parameter_237, parameter_238, parameter_239, parameter_240 - - # pd_op.add: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 2x192x-1x-1xf32) - add_13 = paddle._C_ops.add(batch_norm__204, batch_norm__210) - del batch_norm__204, batch_norm__210 - - # pd_op.swish: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32) - swish_29 = paddle._C_ops.swish(add_13) - del add_13 - - # pd_op.add: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 2x192x-1x-1xf32) - add_14 = paddle._C_ops.add(swish_27, swish_29) - del swish_27, swish_29 - - # builtin.combine: ([2x192x-1x-1xf32, 2x192x-1x-1xf32]) <- (2x192x-1x-1xf32, 2x192x-1x-1xf32) - combine_3 = [swish_26, add_14] - del add_14, swish_26 - - # pd_op.concat: (2x384x-1x-1xf32) <- ([2x192x-1x-1xf32, 2x192x-1x-1xf32], 1xi32) - concat_5 = paddle._C_ops.concat(combine_3, full_0) - del combine_3 - - # pd_op.mean: (2x384x1x1xf32) <- (2x384x-1x-1xf32, 2xi64) - mean_3 = paddle._C_ops.mean(concat_5, full_int_array_0, True) - del full_int_array_0 - - # pd_op.conv2d: (2x384x1x1xf32) <- (2x384x1x1xf32, 384x384x1x1xf32) - conv2d_39 = paddle._C_ops.conv2d( - mean_3, parameter_236, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del mean_3, parameter_236 - - # pd_op.reshape: (1x384x1x1xf32) <- (384xf32, 4xi64) - reshape_3 = paddle._C_ops.reshape(parameter_235, full_int_array_1) - del parameter_235 - - # pd_op.add: (2x384x1x1xf32) <- (2x384x1x1xf32, 1x384x1x1xf32) - add_15 = paddle._C_ops.add(conv2d_39, reshape_3) - del conv2d_39, reshape_3 - - # pd_op.hardsigmoid: (2x384x1x1xf32) <- (2x384x1x1xf32) - hardsigmoid_3 = paddle._C_ops.hardsigmoid( - add_15, float("0.166667"), float("0.5") - ) - del add_15 - - # pd_op.multiply: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32, 2x384x1x1xf32) - multiply_3 = paddle._C_ops.multiply(concat_5, hardsigmoid_3) - del concat_5, hardsigmoid_3 - - # pd_op.conv2d: (2x512x-1x-1xf32) <- (2x384x-1x-1xf32, 512x384x1x1xf32) - conv2d_40 = paddle._C_ops.conv2d( - multiply_3, parameter_234, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del multiply_3, parameter_234 - - # pd_op.batch_norm_: (2x512x-1x-1xf32, 512xf32, 512xf32, 512xf32, 512xf32, -1xui8) <- (2x512x-1x-1xf32, 512xf32, 512xf32, 512xf32, 512xf32) - ( - batch_norm__216, - batch_norm__217, - batch_norm__218, - batch_norm__219, - batch_norm__220, - batch_norm__221, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_40, - parameter_233, - parameter_232, - parameter_231, - parameter_230, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_40, parameter_230, parameter_231, parameter_232, parameter_233 + full_int_array_0 = [2, 7581] - # pd_op.swish: (2x512x-1x-1xf32) <- (2x512x-1x-1xf32) - swish_30 = paddle._C_ops.swish(batch_norm__216) - del batch_norm__216 + # pd_op.reshape: (2x7581xi32) <- (15162xi32, 2xi64) + reshape_1 = paddle._C_ops.reshape(gather_0, full_int_array_0) + del full_int_array_0, gather_0 - # pd_op.conv2d: (2x192x-1x-1xf32) <- (2x512x-1x-1xf32, 192x512x1x1xf32) - conv2d_41 = paddle._C_ops.conv2d( - swish_30, parameter_229, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_229 - - # pd_op.batch_norm_: (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) - ( - batch_norm__222, - batch_norm__223, - batch_norm__224, - batch_norm__225, - batch_norm__226, - batch_norm__227, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_41, - parameter_228, - parameter_227, - parameter_226, - parameter_225, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_41, parameter_225, parameter_226, parameter_227, parameter_228 - - # pd_op.swish: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32) - swish_31 = paddle._C_ops.swish(batch_norm__222) - del batch_norm__222 - - # pd_op.conv2d: (2x192x-1x-1xf32) <- (2x512x-1x-1xf32, 192x512x1x1xf32) - conv2d_42 = paddle._C_ops.conv2d( - swish_30, parameter_224, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_224, swish_30 - - # pd_op.batch_norm_: (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) - ( - batch_norm__228, - batch_norm__229, - batch_norm__230, - batch_norm__231, - batch_norm__232, - batch_norm__233, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_42, - parameter_223, - parameter_222, - parameter_221, - parameter_220, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_42, parameter_220, parameter_221, parameter_222, parameter_223 - - # pd_op.swish: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32) - swish_32 = paddle._C_ops.swish(batch_norm__228) - del batch_norm__228 - - # pd_op.conv2d: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 192x192x3x3xf32) - conv2d_43 = paddle._C_ops.conv2d( - swish_32, parameter_219, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_219, swish_32 - - # pd_op.batch_norm_: (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) - ( - batch_norm__234, - batch_norm__235, - batch_norm__236, - batch_norm__237, - batch_norm__238, - batch_norm__239, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_43, - parameter_218, - parameter_217, - parameter_216, - parameter_215, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_43, parameter_215, parameter_216, parameter_217, parameter_218 - - # pd_op.swish: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32) - swish_33 = paddle._C_ops.swish(batch_norm__234) - del batch_norm__234 - - # pd_op.conv2d: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 192x192x3x3xf32) - conv2d_44 = paddle._C_ops.conv2d( - swish_33, parameter_214, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + # pd_op.full: (xf32) <- () + full_3 = paddle._C_ops.full( + [], float("0"), paddle.float32, paddle.framework._current_expected_place() ) - del parameter_214 - # pd_op.batch_norm_: (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) - ( - batch_norm__240, - batch_norm__241, - batch_norm__242, - batch_norm__243, - batch_norm__244, - batch_norm__245, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_44, - parameter_213, - parameter_212, - parameter_211, - parameter_210, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_44, parameter_210, parameter_211, parameter_212, parameter_213 + # pd_op.greater_than: (2x7581xb) <- (2x7581xf32, xf32) + greater_than_0 = paddle._C_ops.greater_than(data_3, full_3) + del data_3, full_3 - # pd_op.conv2d: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 192x192x1x1xf32) - conv2d_45 = paddle._C_ops.conv2d( - swish_33, parameter_209, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + # pd_op.full: (1xf32) <- () + full_4 = paddle._C_ops.full( + [1], float("4"), paddle.float32, paddle.core.CPUPlace() ) - del parameter_209, swish_33 - # pd_op.batch_norm_: (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) - ( - batch_norm__246, - batch_norm__247, - batch_norm__248, - batch_norm__249, - batch_norm__250, - batch_norm__251, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_45, - parameter_208, - parameter_207, - parameter_206, - parameter_205, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), + # pd_op.full_like: (2x7581xi32) <- (2x7581xi32, 1xf32) + full_like_0 = paddle._C_ops.full_like( + reshape_1, full_4, paddle.int32, paddle.framework._current_expected_place() ) - del conv2d_45, parameter_205, parameter_206, parameter_207, parameter_208 + del full_4 - # pd_op.add: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 2x192x-1x-1xf32) - add_16 = paddle._C_ops.add(batch_norm__240, batch_norm__246) - del batch_norm__240, batch_norm__246 - - # pd_op.swish: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32) - swish_34 = paddle._C_ops.swish(add_16) - del add_16 + # pd_op.where: (2x7581xi32) <- (2x7581xb, 2x7581xi32, 2x7581xi32) + where_0 = paddle._C_ops.where(greater_than_0, reshape_1, full_like_0) + del full_like_0, greater_than_0, reshape_1 # pd_op.full_int_array: (2xi64) <- () - full_int_array_2 = [5, 5] + full_int_array_1 = [-1, 4] - # pd_op.pool2d: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 2xi64) - pool2d_0 = paddle._C_ops.pool2d( - swish_34, - full_int_array_2, - [1, 1], - [2, 2], - False, - True, - "NCHW", - "max", - False, - False, - "EXPLICIT", - ) - del full_int_array_2 + # pd_op.reshape: (2x4xf32) <- (2x1x4xf32, 2xi64) + reshape_2 = paddle._C_ops.reshape(data_4, full_int_array_1) + del data_4, full_int_array_1 - # pd_op.full_int_array: (2xi64) <- () - full_int_array_3 = [9, 9] + # pd_op.gather: (15162x4xf32) <- (2x4xf32, 15162xi64, 1xi32) + gather_1 = paddle._C_ops.gather(reshape_2, flatten_1, full_2) + del flatten_1, full_2, reshape_2 - # pd_op.pool2d: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 2xi64) - pool2d_1 = paddle._C_ops.pool2d( - swish_34, - full_int_array_3, - [1, 1], - [4, 4], - False, - True, - "NCHW", - "max", - False, - False, - "EXPLICIT", - ) - del full_int_array_3 + # pd_op.full_int_array: (3xi64) <- () + full_int_array_2 = [2, 7581, 4] - # pd_op.full_int_array: (2xi64) <- () - full_int_array_4 = [13, 13] + # pd_op.reshape: (2x7581x4xf32) <- (15162x4xf32, 3xi64) + reshape_0 = paddle._C_ops.reshape(gather_1, full_int_array_2) + del full_int_array_2, gather_1 - # pd_op.pool2d: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 2xi64) - pool2d_2 = paddle._C_ops.pool2d( - swish_34, - full_int_array_4, - [1, 1], - [6, 6], - False, - True, - "NCHW", - "max", - False, - False, - "EXPLICIT", + # pd_op.full: (1xi32) <- () + full_5 = paddle._C_ops.full( + [1], float("5"), paddle.int32, paddle.core.CPUPlace() ) - del full_int_array_4 - # builtin.combine: ([2x192x-1x-1xf32, 2x192x-1x-1xf32, 2x192x-1x-1xf32, 2x192x-1x-1xf32]) <- (2x192x-1x-1xf32, 2x192x-1x-1xf32, 2x192x-1x-1xf32, 2x192x-1x-1xf32) - combine_4 = [swish_34, pool2d_0, pool2d_1, pool2d_2] - del pool2d_0, pool2d_1, pool2d_2, swish_34 - - # pd_op.concat: (2x768x-1x-1xf32) <- ([2x192x-1x-1xf32, 2x192x-1x-1xf32, 2x192x-1x-1xf32, 2x192x-1x-1xf32], 1xi32) - concat_6 = paddle._C_ops.concat(combine_4, full_0) - del combine_4 + # pd_op.one_hot: (2x7581x5xf32) <- (2x7581xi32, 1xi32) + one_hot_0 = paddle._C_ops.one_hot( + where_0 % paddle.cast(full_5, where_0.dtype), full_5 + ) + del full_5 - # pd_op.conv2d: (2x192x-1x-1xf32) <- (2x768x-1x-1xf32, 192x768x1x1xf32) - conv2d_46 = paddle._C_ops.conv2d( - concat_6, parameter_204, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + # pd_op.full: (4xi64) <- () + full_6 = paddle._C_ops.full( + [4], float("0"), paddle.int64, paddle.framework._current_expected_place() ) - del concat_6, parameter_204 - # pd_op.batch_norm_: (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) - ( - batch_norm__252, - batch_norm__253, - batch_norm__254, - batch_norm__255, - batch_norm__256, - batch_norm__257, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_46, - parameter_203, - parameter_202, - parameter_201, - parameter_200, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), + # pd_op.assign_value_: (4xi64) <- (4xi64) + assign_value__0 = paddle._C_ops.assign_value_( + full_6, + [4], + paddle.int64, + [float("0"), float("1"), float("2"), float("3")], + paddle.framework._current_expected_place(), ) - del conv2d_46, parameter_200, parameter_201, parameter_202, parameter_203 + del full_6 - # pd_op.swish: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32) - swish_35 = paddle._C_ops.swish(batch_norm__252) - del batch_norm__252 + # pd_op.index_select: (2x7581x4xf32) <- (2x7581x5xf32, 4xi64) + index_select_0 = paddle._C_ops.index_select(one_hot_0, assign_value__0, -1) + del assign_value__0, one_hot_0 - # builtin.combine: ([2x192x-1x-1xf32, 2x192x-1x-1xf32]) <- (2x192x-1x-1xf32, 2x192x-1x-1xf32) - combine_5 = [swish_31, swish_35] - del swish_31, swish_35 + # pd_op.multiply: (2x1x7581xf32) <- (2x1x7581xf32, 2x1x7581xf32) + multiply_1 = paddle._C_ops.multiply(data_5, data_0) + del data_5 - # pd_op.concat: (2x384x-1x-1xf32) <- ([2x192x-1x-1xf32, 2x192x-1x-1xf32], 1xi32) - concat_7 = paddle._C_ops.concat(combine_5, full_0) - del combine_5 + # pd_op.full_int_array: (1xi64) <- () + full_int_array_3 = [-1] - # pd_op.conv2d: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32, 384x384x1x1xf32) - conv2d_47 = paddle._C_ops.conv2d( - concat_7, parameter_199, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del concat_7, parameter_199 + # pd_op.max: (2x1x1xf32) <- (2x1x7581xf32, 1xi64) + max_0 = paddle._C_ops.max(multiply_1, full_int_array_3, True) - # pd_op.batch_norm_: (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) - ( - batch_norm__258, - batch_norm__259, - batch_norm__260, - batch_norm__261, - batch_norm__262, - batch_norm__263, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_47, - parameter_198, - parameter_197, - parameter_196, - parameter_195, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_47, parameter_195, parameter_196, parameter_197, parameter_198 + # pd_op.multiply: (2x1x7581xf32) <- (2x1x7581xf32, 2x1x7581xf32) + multiply_2 = paddle._C_ops.multiply(data_6, data_0) + del data_0, data_6 - # pd_op.swish: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32) - swish_36 = paddle._C_ops.swish(batch_norm__258) - del batch_norm__258 + # pd_op.max: (2x1x1xf32) <- (2x1x7581xf32, 1xi64) + max_1 = paddle._C_ops.max(multiply_2, full_int_array_3, True) + del multiply_2 - # pd_op.conv2d: (2x192x-1x-1xf32) <- (2x384x-1x-1xf32, 192x384x1x1xf32) - conv2d_48 = paddle._C_ops.conv2d( - swish_36, parameter_194, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_194 + # pd_op.scale: (2x1x1xf32) <- (2x1x1xf32, 1xf32) + scale_1 = paddle._C_ops.scale(max_0, full_1, float("1e-09"), True) + del full_1, max_0 - # pd_op.batch_norm_: (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) - ( - batch_norm__264, - batch_norm__265, - batch_norm__266, - batch_norm__267, - batch_norm__268, - batch_norm__269, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_48, - parameter_193, - parameter_192, - parameter_191, - parameter_190, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_48, parameter_190, parameter_191, parameter_192, parameter_193 + # pd_op.divide: (2x1x7581xf32) <- (2x1x7581xf32, 2x1x1xf32) + divide_0 = paddle._C_ops.divide(multiply_1, scale_1) + del multiply_1, scale_1 - # pd_op.swish: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32) - swish_37 = paddle._C_ops.swish(batch_norm__264) - del batch_norm__264 - - # pd_op.nearest_interp: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, None, None, None) - nearest_interp_0 = paddle._C_ops.nearest_interp( - swish_37, - None, - None, - None, - "NCHW", - -1, - -1, - -1, - [float("2"), float("2")], - "nearest", - False, - 0, - ) - del swish_37 - - # builtin.combine: ([2x192x-1x-1xf32, 2x256x-1x-1xf32]) <- (2x192x-1x-1xf32, 2x256x-1x-1xf32) - combine_6 = [nearest_interp_0, swish_24] - del nearest_interp_0, swish_24 - - # pd_op.concat: (2x448x-1x-1xf32) <- ([2x192x-1x-1xf32, 2x256x-1x-1xf32], 1xi32) - concat_8 = paddle._C_ops.concat(combine_6, full_0) - del combine_6 - - # pd_op.conv2d: (2x96x-1x-1xf32) <- (2x448x-1x-1xf32, 96x448x1x1xf32) - conv2d_49 = paddle._C_ops.conv2d( - concat_8, parameter_189, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_189 - - # pd_op.batch_norm_: (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) - ( - batch_norm__270, - batch_norm__271, - batch_norm__272, - batch_norm__273, - batch_norm__274, - batch_norm__275, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_49, - parameter_188, - parameter_187, - parameter_186, - parameter_185, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_49, parameter_185, parameter_186, parameter_187, parameter_188 - - # pd_op.swish: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32) - swish_38 = paddle._C_ops.swish(batch_norm__270) - del batch_norm__270 - - # pd_op.conv2d: (2x96x-1x-1xf32) <- (2x448x-1x-1xf32, 96x448x1x1xf32) - conv2d_50 = paddle._C_ops.conv2d( - concat_8, parameter_184, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del concat_8, parameter_184 - - # pd_op.batch_norm_: (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) - ( - batch_norm__276, - batch_norm__277, - batch_norm__278, - batch_norm__279, - batch_norm__280, - batch_norm__281, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_50, - parameter_183, - parameter_182, - parameter_181, - parameter_180, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_50, parameter_180, parameter_181, parameter_182, parameter_183 - - # pd_op.swish: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32) - swish_39 = paddle._C_ops.swish(batch_norm__276) - del batch_norm__276 - - # pd_op.conv2d: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, 96x96x3x3xf32) - conv2d_51 = paddle._C_ops.conv2d( - swish_39, parameter_179, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_179, swish_39 - - # pd_op.batch_norm_: (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) - ( - batch_norm__282, - batch_norm__283, - batch_norm__284, - batch_norm__285, - batch_norm__286, - batch_norm__287, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_51, - parameter_178, - parameter_177, - parameter_176, - parameter_175, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_51, parameter_175, parameter_176, parameter_177, parameter_178 - - # pd_op.swish: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32) - swish_40 = paddle._C_ops.swish(batch_norm__282) - del batch_norm__282 - - # pd_op.conv2d: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, 96x96x3x3xf32) - conv2d_52 = paddle._C_ops.conv2d( - swish_40, parameter_174, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_174 - - # pd_op.batch_norm_: (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) - ( - batch_norm__288, - batch_norm__289, - batch_norm__290, - batch_norm__291, - batch_norm__292, - batch_norm__293, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_52, - parameter_173, - parameter_172, - parameter_171, - parameter_170, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_52, parameter_170, parameter_171, parameter_172, parameter_173 - - # pd_op.conv2d: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, 96x96x1x1xf32) - conv2d_53 = paddle._C_ops.conv2d( - swish_40, parameter_169, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_169, swish_40 - - # pd_op.batch_norm_: (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) - ( - batch_norm__294, - batch_norm__295, - batch_norm__296, - batch_norm__297, - batch_norm__298, - batch_norm__299, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_53, - parameter_168, - parameter_167, - parameter_166, - parameter_165, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_53, parameter_165, parameter_166, parameter_167, parameter_168 - - # pd_op.add: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, 2x96x-1x-1xf32) - add_17 = paddle._C_ops.add(batch_norm__288, batch_norm__294) - del batch_norm__288, batch_norm__294 - - # pd_op.swish: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32) - swish_41 = paddle._C_ops.swish(add_17) - del add_17 - - # builtin.combine: ([2x96x-1x-1xf32, 2x96x-1x-1xf32]) <- (2x96x-1x-1xf32, 2x96x-1x-1xf32) - combine_7 = [swish_38, swish_41] - del swish_38, swish_41 - - # pd_op.concat: (2x192x-1x-1xf32) <- ([2x96x-1x-1xf32, 2x96x-1x-1xf32], 1xi32) - concat_9 = paddle._C_ops.concat(combine_7, full_0) - del combine_7 - - # pd_op.conv2d: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 192x192x1x1xf32) - conv2d_54 = paddle._C_ops.conv2d( - concat_9, parameter_164, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del concat_9, parameter_164 - - # pd_op.batch_norm_: (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) - ( - batch_norm__300, - batch_norm__301, - batch_norm__302, - batch_norm__303, - batch_norm__304, - batch_norm__305, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_54, - parameter_163, - parameter_162, - parameter_161, - parameter_160, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_54, parameter_160, parameter_161, parameter_162, parameter_163 - - # pd_op.swish: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32) - swish_42 = paddle._C_ops.swish(batch_norm__300) - del batch_norm__300 - - # pd_op.conv2d: (2x96x-1x-1xf32) <- (2x192x-1x-1xf32, 96x192x1x1xf32) - conv2d_55 = paddle._C_ops.conv2d( - swish_42, parameter_159, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_159 - - # pd_op.batch_norm_: (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) - ( - batch_norm__306, - batch_norm__307, - batch_norm__308, - batch_norm__309, - batch_norm__310, - batch_norm__311, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_55, - parameter_158, - parameter_157, - parameter_156, - parameter_155, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_55, parameter_155, parameter_156, parameter_157, parameter_158 - - # pd_op.swish: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32) - swish_43 = paddle._C_ops.swish(batch_norm__306) - del batch_norm__306 - - # pd_op.nearest_interp: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, None, None, None) - nearest_interp_1 = paddle._C_ops.nearest_interp( - swish_43, - None, - None, - None, - "NCHW", - -1, - -1, - -1, - [float("2"), float("2")], - "nearest", - False, - 0, - ) - del swish_43 - - # builtin.combine: ([2x96x-1x-1xf32, 2x128x-1x-1xf32]) <- (2x96x-1x-1xf32, 2x128x-1x-1xf32) - combine_8 = [nearest_interp_1, swish_16] - del nearest_interp_1, swish_16 - - # pd_op.concat: (2x224x-1x-1xf32) <- ([2x96x-1x-1xf32, 2x128x-1x-1xf32], 1xi32) - concat_10 = paddle._C_ops.concat(combine_8, full_0) - del combine_8 - - # pd_op.conv2d: (2x48x-1x-1xf32) <- (2x224x-1x-1xf32, 48x224x1x1xf32) - conv2d_56 = paddle._C_ops.conv2d( - concat_10, parameter_154, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_154 - - # pd_op.batch_norm_: (2x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32, -1xui8) <- (2x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32) - ( - batch_norm__312, - batch_norm__313, - batch_norm__314, - batch_norm__315, - batch_norm__316, - batch_norm__317, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_56, - parameter_153, - parameter_152, - parameter_151, - parameter_150, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_56, parameter_150, parameter_151, parameter_152, parameter_153 - - # pd_op.swish: (2x48x-1x-1xf32) <- (2x48x-1x-1xf32) - swish_44 = paddle._C_ops.swish(batch_norm__312) - del batch_norm__312 - - # pd_op.conv2d: (2x48x-1x-1xf32) <- (2x224x-1x-1xf32, 48x224x1x1xf32) - conv2d_57 = paddle._C_ops.conv2d( - concat_10, parameter_149, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del concat_10, parameter_149 - - # pd_op.batch_norm_: (2x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32, -1xui8) <- (2x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32) - ( - batch_norm__318, - batch_norm__319, - batch_norm__320, - batch_norm__321, - batch_norm__322, - batch_norm__323, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_57, - parameter_148, - parameter_147, - parameter_146, - parameter_145, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_57, parameter_145, parameter_146, parameter_147, parameter_148 - - # pd_op.swish: (2x48x-1x-1xf32) <- (2x48x-1x-1xf32) - swish_45 = paddle._C_ops.swish(batch_norm__318) - del batch_norm__318 - - # pd_op.conv2d: (2x48x-1x-1xf32) <- (2x48x-1x-1xf32, 48x48x3x3xf32) - conv2d_58 = paddle._C_ops.conv2d( - swish_45, parameter_144, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_144, swish_45 - - # pd_op.batch_norm_: (2x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32, -1xui8) <- (2x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32) - ( - batch_norm__324, - batch_norm__325, - batch_norm__326, - batch_norm__327, - batch_norm__328, - batch_norm__329, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_58, - parameter_143, - parameter_142, - parameter_141, - parameter_140, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_58, parameter_140, parameter_141, parameter_142, parameter_143 - - # pd_op.swish: (2x48x-1x-1xf32) <- (2x48x-1x-1xf32) - swish_46 = paddle._C_ops.swish(batch_norm__324) - del batch_norm__324 - - # pd_op.conv2d: (2x48x-1x-1xf32) <- (2x48x-1x-1xf32, 48x48x3x3xf32) - conv2d_59 = paddle._C_ops.conv2d( - swish_46, parameter_139, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_139 - - # pd_op.batch_norm_: (2x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32, -1xui8) <- (2x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32) - ( - batch_norm__330, - batch_norm__331, - batch_norm__332, - batch_norm__333, - batch_norm__334, - batch_norm__335, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_59, - parameter_138, - parameter_137, - parameter_136, - parameter_135, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_59, parameter_135, parameter_136, parameter_137, parameter_138 - - # pd_op.conv2d: (2x48x-1x-1xf32) <- (2x48x-1x-1xf32, 48x48x1x1xf32) - conv2d_60 = paddle._C_ops.conv2d( - swish_46, parameter_134, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_134, swish_46 - - # pd_op.batch_norm_: (2x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32, -1xui8) <- (2x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32) - ( - batch_norm__336, - batch_norm__337, - batch_norm__338, - batch_norm__339, - batch_norm__340, - batch_norm__341, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_60, - parameter_133, - parameter_132, - parameter_131, - parameter_130, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_60, parameter_130, parameter_131, parameter_132, parameter_133 - - # pd_op.add: (2x48x-1x-1xf32) <- (2x48x-1x-1xf32, 2x48x-1x-1xf32) - add_18 = paddle._C_ops.add(batch_norm__330, batch_norm__336) - del batch_norm__330, batch_norm__336 - - # pd_op.swish: (2x48x-1x-1xf32) <- (2x48x-1x-1xf32) - swish_47 = paddle._C_ops.swish(add_18) - del add_18 - - # builtin.combine: ([2x48x-1x-1xf32, 2x48x-1x-1xf32]) <- (2x48x-1x-1xf32, 2x48x-1x-1xf32) - combine_9 = [swish_44, swish_47] - del swish_44, swish_47 - - # pd_op.concat: (2x96x-1x-1xf32) <- ([2x48x-1x-1xf32, 2x48x-1x-1xf32], 1xi32) - concat_11 = paddle._C_ops.concat(combine_9, full_0) - del combine_9 - - # pd_op.conv2d: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, 96x96x1x1xf32) - conv2d_61 = paddle._C_ops.conv2d( - concat_11, parameter_129, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del concat_11, parameter_129 - - # pd_op.batch_norm_: (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) - ( - batch_norm__342, - batch_norm__343, - batch_norm__344, - batch_norm__345, - batch_norm__346, - batch_norm__347, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_61, - parameter_128, - parameter_127, - parameter_126, - parameter_125, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_61, parameter_125, parameter_126, parameter_127, parameter_128 - - # pd_op.swish: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32) - swish_48 = paddle._C_ops.swish(batch_norm__342) - del batch_norm__342 - - # pd_op.conv2d: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, 96x96x3x3xf32) - conv2d_62 = paddle._C_ops.conv2d( - swish_48, parameter_124, [2, 2], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_124 - - # pd_op.batch_norm_: (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) - ( - batch_norm__348, - batch_norm__349, - batch_norm__350, - batch_norm__351, - batch_norm__352, - batch_norm__353, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_62, - parameter_123, - parameter_122, - parameter_121, - parameter_120, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_62, parameter_120, parameter_121, parameter_122, parameter_123 - - # pd_op.swish: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32) - swish_49 = paddle._C_ops.swish(batch_norm__348) - del batch_norm__348 - - # builtin.combine: ([2x96x-1x-1xf32, 2x192x-1x-1xf32]) <- (2x96x-1x-1xf32, 2x192x-1x-1xf32) - combine_10 = [swish_49, swish_42] - del swish_42, swish_49 - - # pd_op.concat: (2x288x-1x-1xf32) <- ([2x96x-1x-1xf32, 2x192x-1x-1xf32], 1xi32) - concat_12 = paddle._C_ops.concat(combine_10, full_0) - del combine_10 - - # pd_op.conv2d: (2x96x-1x-1xf32) <- (2x288x-1x-1xf32, 96x288x1x1xf32) - conv2d_63 = paddle._C_ops.conv2d( - concat_12, parameter_119, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_119 - - # pd_op.batch_norm_: (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) - ( - batch_norm__354, - batch_norm__355, - batch_norm__356, - batch_norm__357, - batch_norm__358, - batch_norm__359, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_63, - parameter_118, - parameter_117, - parameter_116, - parameter_115, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_63, parameter_115, parameter_116, parameter_117, parameter_118 - - # pd_op.swish: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32) - swish_50 = paddle._C_ops.swish(batch_norm__354) - del batch_norm__354 - - # pd_op.conv2d: (2x96x-1x-1xf32) <- (2x288x-1x-1xf32, 96x288x1x1xf32) - conv2d_64 = paddle._C_ops.conv2d( - concat_12, parameter_114, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del concat_12, parameter_114 - - # pd_op.batch_norm_: (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) - ( - batch_norm__360, - batch_norm__361, - batch_norm__362, - batch_norm__363, - batch_norm__364, - batch_norm__365, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_64, - parameter_113, - parameter_112, - parameter_111, - parameter_110, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_64, parameter_110, parameter_111, parameter_112, parameter_113 - - # pd_op.swish: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32) - swish_51 = paddle._C_ops.swish(batch_norm__360) - del batch_norm__360 - - # pd_op.conv2d: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, 96x96x3x3xf32) - conv2d_65 = paddle._C_ops.conv2d( - swish_51, parameter_109, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_109, swish_51 - - # pd_op.batch_norm_: (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) - ( - batch_norm__366, - batch_norm__367, - batch_norm__368, - batch_norm__369, - batch_norm__370, - batch_norm__371, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_65, - parameter_108, - parameter_107, - parameter_106, - parameter_105, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_65, parameter_105, parameter_106, parameter_107, parameter_108 - - # pd_op.swish: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32) - swish_52 = paddle._C_ops.swish(batch_norm__366) - del batch_norm__366 - - # pd_op.conv2d: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, 96x96x3x3xf32) - conv2d_66 = paddle._C_ops.conv2d( - swish_52, parameter_104, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_104 - - # pd_op.batch_norm_: (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) - ( - batch_norm__372, - batch_norm__373, - batch_norm__374, - batch_norm__375, - batch_norm__376, - batch_norm__377, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_66, - parameter_103, - parameter_102, - parameter_101, - parameter_100, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_66, parameter_100, parameter_101, parameter_102, parameter_103 - - # pd_op.conv2d: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, 96x96x1x1xf32) - conv2d_67 = paddle._C_ops.conv2d( - swish_52, parameter_99, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_99, swish_52 - - # pd_op.batch_norm_: (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) - ( - batch_norm__378, - batch_norm__379, - batch_norm__380, - batch_norm__381, - batch_norm__382, - batch_norm__383, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_67, - parameter_98, - parameter_97, - parameter_96, - parameter_95, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_67, parameter_95, parameter_96, parameter_97, parameter_98 - - # pd_op.add: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, 2x96x-1x-1xf32) - add_19 = paddle._C_ops.add(batch_norm__372, batch_norm__378) - del batch_norm__372, batch_norm__378 - - # pd_op.swish: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32) - swish_53 = paddle._C_ops.swish(add_19) - del add_19 - - # builtin.combine: ([2x96x-1x-1xf32, 2x96x-1x-1xf32]) <- (2x96x-1x-1xf32, 2x96x-1x-1xf32) - combine_11 = [swish_50, swish_53] - del swish_50, swish_53 - - # pd_op.concat: (2x192x-1x-1xf32) <- ([2x96x-1x-1xf32, 2x96x-1x-1xf32], 1xi32) - concat_13 = paddle._C_ops.concat(combine_11, full_0) - del combine_11 - - # pd_op.conv2d: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 192x192x1x1xf32) - conv2d_68 = paddle._C_ops.conv2d( - concat_13, parameter_94, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del concat_13, parameter_94 - - # pd_op.batch_norm_: (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) - ( - batch_norm__384, - batch_norm__385, - batch_norm__386, - batch_norm__387, - batch_norm__388, - batch_norm__389, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_68, - parameter_93, - parameter_92, - parameter_91, - parameter_90, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_68, parameter_90, parameter_91, parameter_92, parameter_93 - - # pd_op.swish: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32) - swish_54 = paddle._C_ops.swish(batch_norm__384) - del batch_norm__384 - - # pd_op.conv2d: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 192x192x3x3xf32) - conv2d_69 = paddle._C_ops.conv2d( - swish_54, parameter_89, [2, 2], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_89 - - # pd_op.batch_norm_: (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) - ( - batch_norm__390, - batch_norm__391, - batch_norm__392, - batch_norm__393, - batch_norm__394, - batch_norm__395, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_69, - parameter_88, - parameter_87, - parameter_86, - parameter_85, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_69, parameter_85, parameter_86, parameter_87, parameter_88 - - # pd_op.swish: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32) - swish_55 = paddle._C_ops.swish(batch_norm__390) - del batch_norm__390 - - # builtin.combine: ([2x192x-1x-1xf32, 2x384x-1x-1xf32]) <- (2x192x-1x-1xf32, 2x384x-1x-1xf32) - combine_12 = [swish_55, swish_36] - del swish_36, swish_55 - - # pd_op.concat: (2x576x-1x-1xf32) <- ([2x192x-1x-1xf32, 2x384x-1x-1xf32], 1xi32) - concat_14 = paddle._C_ops.concat(combine_12, full_0) - del combine_12 - - # pd_op.conv2d: (2x192x-1x-1xf32) <- (2x576x-1x-1xf32, 192x576x1x1xf32) - conv2d_70 = paddle._C_ops.conv2d( - concat_14, parameter_84, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_84 - - # pd_op.batch_norm_: (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) - ( - batch_norm__396, - batch_norm__397, - batch_norm__398, - batch_norm__399, - batch_norm__400, - batch_norm__401, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_70, - parameter_83, - parameter_82, - parameter_81, - parameter_80, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_70, parameter_80, parameter_81, parameter_82, parameter_83 - - # pd_op.swish: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32) - swish_56 = paddle._C_ops.swish(batch_norm__396) - del batch_norm__396 - - # pd_op.conv2d: (2x192x-1x-1xf32) <- (2x576x-1x-1xf32, 192x576x1x1xf32) - conv2d_71 = paddle._C_ops.conv2d( - concat_14, parameter_79, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del concat_14, parameter_79 - - # pd_op.batch_norm_: (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) - ( - batch_norm__402, - batch_norm__403, - batch_norm__404, - batch_norm__405, - batch_norm__406, - batch_norm__407, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_71, - parameter_78, - parameter_77, - parameter_76, - parameter_75, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_71, parameter_75, parameter_76, parameter_77, parameter_78 - - # pd_op.swish: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32) - swish_57 = paddle._C_ops.swish(batch_norm__402) - del batch_norm__402 - - # pd_op.conv2d: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 192x192x3x3xf32) - conv2d_72 = paddle._C_ops.conv2d( - swish_57, parameter_74, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_74, swish_57 - - # pd_op.batch_norm_: (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) - ( - batch_norm__408, - batch_norm__409, - batch_norm__410, - batch_norm__411, - batch_norm__412, - batch_norm__413, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_72, - parameter_73, - parameter_72, - parameter_71, - parameter_70, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_72, parameter_70, parameter_71, parameter_72, parameter_73 - - # pd_op.swish: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32) - swish_58 = paddle._C_ops.swish(batch_norm__408) - del batch_norm__408 - - # pd_op.conv2d: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 192x192x3x3xf32) - conv2d_73 = paddle._C_ops.conv2d( - swish_58, parameter_69, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_69 - - # pd_op.batch_norm_: (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) - ( - batch_norm__414, - batch_norm__415, - batch_norm__416, - batch_norm__417, - batch_norm__418, - batch_norm__419, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_73, - parameter_68, - parameter_67, - parameter_66, - parameter_65, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_73, parameter_65, parameter_66, parameter_67, parameter_68 - - # pd_op.conv2d: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 192x192x1x1xf32) - conv2d_74 = paddle._C_ops.conv2d( - swish_58, parameter_64, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_64, swish_58 - - # pd_op.batch_norm_: (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) - ( - batch_norm__420, - batch_norm__421, - batch_norm__422, - batch_norm__423, - batch_norm__424, - batch_norm__425, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_74, - parameter_63, - parameter_62, - parameter_61, - parameter_60, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_74, parameter_60, parameter_61, parameter_62, parameter_63 - - # pd_op.add: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 2x192x-1x-1xf32) - add_20 = paddle._C_ops.add(batch_norm__414, batch_norm__420) - del batch_norm__414, batch_norm__420 - - # pd_op.swish: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32) - swish_59 = paddle._C_ops.swish(add_20) - del add_20 - - # builtin.combine: ([2x192x-1x-1xf32, 2x192x-1x-1xf32]) <- (2x192x-1x-1xf32, 2x192x-1x-1xf32) - combine_13 = [swish_56, swish_59] - del swish_56, swish_59 - - # pd_op.concat: (2x384x-1x-1xf32) <- ([2x192x-1x-1xf32, 2x192x-1x-1xf32], 1xi32) - concat_15 = paddle._C_ops.concat(combine_13, full_0) - del combine_13 - - # pd_op.conv2d: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32, 384x384x1x1xf32) - conv2d_75 = paddle._C_ops.conv2d( - concat_15, parameter_59, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del concat_15, parameter_59 - - # pd_op.batch_norm_: (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) - ( - batch_norm__426, - batch_norm__427, - batch_norm__428, - batch_norm__429, - batch_norm__430, - batch_norm__431, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_75, - parameter_58, - parameter_57, - parameter_56, - parameter_55, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_75, parameter_55, parameter_56, parameter_57, parameter_58 - - # pd_op.swish: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32) - swish_60 = paddle._C_ops.swish(batch_norm__426) - del batch_norm__426 - - # pd_op.shape64: (4xi64) <- (2x384x-1x-1xf32) - shape64_0 = paddle._C_ops.shape64(swish_60) - - # pd_op.full_int_array: (1xi64) <- () - full_int_array_5 = [2] - - # pd_op.full_int_array: (1xi64) <- () - full_int_array_6 = [3] - - # pd_op.slice: (xi64) <- (4xi64, 1xi64, 1xi64) - slice_0 = paddle._C_ops.slice( - shape64_0, [0], full_int_array_5, full_int_array_6, [1], [0] - ) - del shape64_0 - - # pd_op.shape64: (4xi64) <- (2x384x-1x-1xf32) - shape64_1 = paddle._C_ops.shape64(swish_60) - - # pd_op.full_int_array: (1xi64) <- () - full_int_array_7 = [4] - - # pd_op.slice: (xi64) <- (4xi64, 1xi64, 1xi64) - slice_1 = paddle._C_ops.slice( - shape64_1, [0], full_int_array_6, full_int_array_7, [1], [0] - ) - del shape64_1 - - # pd_op.multiply: (xi64) <- (xi64, xi64) - multiply_4 = paddle._C_ops.multiply(slice_0, slice_1) - del slice_0, slice_1 - - # pd_op.full_int_array: (2xi64) <- () - full_int_array_8 = [1, 1] - - # pd_op.pool2d: (2x384x1x1xf32) <- (2x384x-1x-1xf32, 2xi64) - pool2d_3 = paddle._C_ops.pool2d( - swish_60, - full_int_array_8, - [1, 1], - [0, 0], - False, - True, - "NCHW", - "avg", - False, - True, - "EXPLICIT", - ) - - # pd_op.conv2d: (2x384x1x1xf32) <- (2x384x1x1xf32, 384x384x1x1xf32) - conv2d_76 = paddle._C_ops.conv2d( - pool2d_3, parameter_54, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_54 - - # pd_op.reshape: (1x384x1x1xf32) <- (384xf32, 4xi64) - reshape_4 = paddle._C_ops.reshape(parameter_53, full_int_array_1) - del parameter_53 - - # pd_op.add: (2x384x1x1xf32) <- (2x384x1x1xf32, 1x384x1x1xf32) - add_21 = paddle._C_ops.add(conv2d_76, reshape_4) - del conv2d_76, reshape_4 - - # pd_op.sigmoid: (2x384x1x1xf32) <- (2x384x1x1xf32) - sigmoid_0 = paddle._C_ops.sigmoid(add_21) - del add_21 - - # pd_op.multiply: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32, 2x384x1x1xf32) - multiply_5 = paddle._C_ops.multiply(swish_60, sigmoid_0) - del sigmoid_0 - - # pd_op.conv2d: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32, 384x384x1x1xf32) - conv2d_77 = paddle._C_ops.conv2d( - multiply_5, parameter_52, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del multiply_5, parameter_52 - - # pd_op.batch_norm_: (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) - ( - batch_norm__432, - batch_norm__433, - batch_norm__434, - batch_norm__435, - batch_norm__436, - batch_norm__437, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_77, - parameter_51, - parameter_50, - parameter_49, - parameter_48, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_77, parameter_48, parameter_49, parameter_50, parameter_51 - - # pd_op.swish: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32) - swish_61 = paddle._C_ops.swish(batch_norm__432) - del batch_norm__432 - - # pd_op.add: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32, 2x384x-1x-1xf32) - add_22 = paddle._C_ops.add(swish_61, swish_60) - del swish_61 - - # pd_op.conv2d: (2x4x-1x-1xf32) <- (2x384x-1x-1xf32, 4x384x3x3xf32) - conv2d_78 = paddle._C_ops.conv2d( - add_22, parameter_47, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del add_22, parameter_47 - - # pd_op.reshape: (1x4x1x1xf32) <- (4xf32, 4xi64) - reshape_5 = paddle._C_ops.reshape(parameter_46, full_int_array_1) - del parameter_46 - - # pd_op.add: (2x4x-1x-1xf32) <- (2x4x-1x-1xf32, 1x4x1x1xf32) - add_23 = paddle._C_ops.add(conv2d_78, reshape_5) - del conv2d_78, reshape_5 - - # pd_op.conv2d: (2x384x1x1xf32) <- (2x384x1x1xf32, 384x384x1x1xf32) - conv2d_79 = paddle._C_ops.conv2d( - pool2d_3, parameter_45, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_45, pool2d_3 - - # pd_op.reshape: (1x384x1x1xf32) <- (384xf32, 4xi64) - reshape_6 = paddle._C_ops.reshape(parameter_44, full_int_array_1) - del parameter_44 - - # pd_op.add: (2x384x1x1xf32) <- (2x384x1x1xf32, 1x384x1x1xf32) - add_24 = paddle._C_ops.add(conv2d_79, reshape_6) - del conv2d_79, reshape_6 - - # pd_op.sigmoid: (2x384x1x1xf32) <- (2x384x1x1xf32) - sigmoid_1 = paddle._C_ops.sigmoid(add_24) - del add_24 - - # pd_op.multiply: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32, 2x384x1x1xf32) - multiply_6 = paddle._C_ops.multiply(swish_60, sigmoid_1) - del sigmoid_1, swish_60 - - # pd_op.conv2d: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32, 384x384x1x1xf32) - conv2d_80 = paddle._C_ops.conv2d( - multiply_6, parameter_43, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del multiply_6, parameter_43 - - # pd_op.batch_norm_: (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) - ( - batch_norm__438, - batch_norm__439, - batch_norm__440, - batch_norm__441, - batch_norm__442, - batch_norm__443, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_80, - parameter_42, - parameter_41, - parameter_40, - parameter_39, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_80, parameter_39, parameter_40, parameter_41, parameter_42 - - # pd_op.swish: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32) - swish_62 = paddle._C_ops.swish(batch_norm__438) - del batch_norm__438 - - # pd_op.conv2d: (2x68x-1x-1xf32) <- (2x384x-1x-1xf32, 68x384x3x3xf32) - conv2d_81 = paddle._C_ops.conv2d( - swish_62, parameter_38, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_38, swish_62 - - # pd_op.reshape: (1x68x1x1xf32) <- (68xf32, 4xi64) - reshape_7 = paddle._C_ops.reshape(parameter_37, full_int_array_1) - del parameter_37 - - # pd_op.add: (2x68x-1x-1xf32) <- (2x68x-1x-1xf32, 1x68x1x1xf32) - add_25 = paddle._C_ops.add(conv2d_81, reshape_7) - del conv2d_81, reshape_7 - - # pd_op.full: (xi64) <- () - full_1 = paddle._C_ops.full( - [], float("-1"), paddle.int64, paddle.core.CPUPlace() - ) - - # pd_op.full: (xi64) <- () - full_2 = paddle._C_ops.full( - [], float("4"), paddle.int64, paddle.core.CPUPlace() - ) - - # pd_op.full: (xi64) <- () - full_3 = paddle._C_ops.full( - [], float("17"), paddle.int64, paddle.core.CPUPlace() - ) - - # builtin.combine: ([xi64, xi64, xi64, xi64]) <- (xi64, xi64, xi64, xi64) - combine_14 = [full_1, full_2, full_3, multiply_4] - - # pd_op.stack: (4xi64) <- ([xi64, xi64, xi64, xi64]) - stack_0 = paddle._C_ops.stack(combine_14, 0) - del combine_14 - - # pd_op.reshape: (-1x4x17x-1xf32) <- (2x68x-1x-1xf32, 4xi64) - reshape_8 = paddle._C_ops.reshape(add_25, stack_0) - del add_25, stack_0 - - # pd_op.transpose: (-1x17x-1x4xf32) <- (-1x4x17x-1xf32) - transpose_0 = paddle._C_ops.transpose(reshape_8, [0, 2, 3, 1]) - del reshape_8 - - # pd_op.softmax: (-1x17x-1x4xf32) <- (-1x17x-1x4xf32) - softmax_0 = paddle._C_ops.softmax(transpose_0, 1) - del transpose_0 - - # pd_op.conv2d: (-1x1x-1x4xf32) <- (-1x17x-1x4xf32, 1x17x1x1xf32) - conv2d_82 = paddle._C_ops.conv2d( - softmax_0, parameter_36, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del softmax_0 + # pd_op.multiply: (2x1x7581xf32) <- (2x1x7581xf32, 2x1x1xf32) + multiply_3 = paddle._C_ops.multiply(divide_0, max_1) + del divide_0, max_1 # pd_op.full_int_array: (1xi64) <- () - full_int_array_9 = [1] - - # pd_op.squeeze: (-1x-1x4xf32) <- (-1x1x-1x4xf32, 1xi64) - squeeze_0 = paddle._C_ops.squeeze(conv2d_82, full_int_array_9) - del conv2d_82 - - # pd_op.sigmoid: (2x4x-1x-1xf32) <- (2x4x-1x-1xf32) - sigmoid_2 = paddle._C_ops.sigmoid(add_23) - del add_23 - - # builtin.combine: ([xi64, xi64, xi64]) <- (xi64, xi64, xi64) - combine_15 = [full_1, full_2, multiply_4] - del multiply_4 - - # pd_op.stack: (3xi64) <- ([xi64, xi64, xi64]) - stack_1 = paddle._C_ops.stack(combine_15, 0) - del combine_15 - - # pd_op.reshape: (-1x4x-1xf32) <- (2x4x-1x-1xf32, 3xi64) - reshape_9 = paddle._C_ops.reshape(sigmoid_2, stack_1) - del sigmoid_2, stack_1 - - # pd_op.shape64: (4xi64) <- (2x192x-1x-1xf32) - shape64_2 = paddle._C_ops.shape64(swish_54) - - # pd_op.slice: (xi64) <- (4xi64, 1xi64, 1xi64) - slice_2 = paddle._C_ops.slice( - shape64_2, [0], full_int_array_5, full_int_array_6, [1], [0] - ) - del shape64_2 - - # pd_op.shape64: (4xi64) <- (2x192x-1x-1xf32) - shape64_3 = paddle._C_ops.shape64(swish_54) - - # pd_op.slice: (xi64) <- (4xi64, 1xi64, 1xi64) - slice_3 = paddle._C_ops.slice( - shape64_3, [0], full_int_array_6, full_int_array_7, [1], [0] - ) - del shape64_3 - - # pd_op.multiply: (xi64) <- (xi64, xi64) - multiply_7 = paddle._C_ops.multiply(slice_2, slice_3) - del slice_2, slice_3 - - # pd_op.pool2d: (2x192x1x1xf32) <- (2x192x-1x-1xf32, 2xi64) - pool2d_4 = paddle._C_ops.pool2d( - swish_54, - full_int_array_8, - [1, 1], - [0, 0], - False, - True, - "NCHW", - "avg", - False, - True, - "EXPLICIT", - ) - - # pd_op.conv2d: (2x192x1x1xf32) <- (2x192x1x1xf32, 192x192x1x1xf32) - conv2d_83 = paddle._C_ops.conv2d( - pool2d_4, parameter_35, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_35 - - # pd_op.reshape: (1x192x1x1xf32) <- (192xf32, 4xi64) - reshape_10 = paddle._C_ops.reshape(parameter_34, full_int_array_1) - del parameter_34 - - # pd_op.add: (2x192x1x1xf32) <- (2x192x1x1xf32, 1x192x1x1xf32) - add_26 = paddle._C_ops.add(conv2d_83, reshape_10) - del conv2d_83, reshape_10 - - # pd_op.sigmoid: (2x192x1x1xf32) <- (2x192x1x1xf32) - sigmoid_3 = paddle._C_ops.sigmoid(add_26) - del add_26 - - # pd_op.multiply: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 2x192x1x1xf32) - multiply_8 = paddle._C_ops.multiply(swish_54, sigmoid_3) - del sigmoid_3 - - # pd_op.conv2d: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 192x192x1x1xf32) - conv2d_84 = paddle._C_ops.conv2d( - multiply_8, parameter_33, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del multiply_8, parameter_33 - - # pd_op.batch_norm_: (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) - ( - batch_norm__444, - batch_norm__445, - batch_norm__446, - batch_norm__447, - batch_norm__448, - batch_norm__449, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_84, - parameter_32, - parameter_31, - parameter_30, - parameter_29, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_84, parameter_29, parameter_30, parameter_31, parameter_32 - - # pd_op.swish: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32) - swish_63 = paddle._C_ops.swish(batch_norm__444) - del batch_norm__444 - - # pd_op.add: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 2x192x-1x-1xf32) - add_27 = paddle._C_ops.add(swish_63, swish_54) - del swish_63 - - # pd_op.conv2d: (2x4x-1x-1xf32) <- (2x192x-1x-1xf32, 4x192x3x3xf32) - conv2d_85 = paddle._C_ops.conv2d( - add_27, parameter_28, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del add_27, parameter_28 - - # pd_op.reshape: (1x4x1x1xf32) <- (4xf32, 4xi64) - reshape_11 = paddle._C_ops.reshape(parameter_27, full_int_array_1) - del parameter_27 - - # pd_op.add: (2x4x-1x-1xf32) <- (2x4x-1x-1xf32, 1x4x1x1xf32) - add_28 = paddle._C_ops.add(conv2d_85, reshape_11) - del conv2d_85, reshape_11 - - # pd_op.conv2d: (2x192x1x1xf32) <- (2x192x1x1xf32, 192x192x1x1xf32) - conv2d_86 = paddle._C_ops.conv2d( - pool2d_4, parameter_26, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_26, pool2d_4 - - # pd_op.reshape: (1x192x1x1xf32) <- (192xf32, 4xi64) - reshape_12 = paddle._C_ops.reshape(parameter_25, full_int_array_1) - del parameter_25 - - # pd_op.add: (2x192x1x1xf32) <- (2x192x1x1xf32, 1x192x1x1xf32) - add_29 = paddle._C_ops.add(conv2d_86, reshape_12) - del conv2d_86, reshape_12 - - # pd_op.sigmoid: (2x192x1x1xf32) <- (2x192x1x1xf32) - sigmoid_4 = paddle._C_ops.sigmoid(add_29) - del add_29 - - # pd_op.multiply: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 2x192x1x1xf32) - multiply_9 = paddle._C_ops.multiply(swish_54, sigmoid_4) - del sigmoid_4, swish_54 - - # pd_op.conv2d: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 192x192x1x1xf32) - conv2d_87 = paddle._C_ops.conv2d( - multiply_9, parameter_24, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del multiply_9, parameter_24 - - # pd_op.batch_norm_: (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) - ( - batch_norm__450, - batch_norm__451, - batch_norm__452, - batch_norm__453, - batch_norm__454, - batch_norm__455, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_87, - parameter_23, - parameter_22, - parameter_21, - parameter_20, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_87, parameter_20, parameter_21, parameter_22, parameter_23 - - # pd_op.swish: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32) - swish_64 = paddle._C_ops.swish(batch_norm__450) - del batch_norm__450 - - # pd_op.conv2d: (2x68x-1x-1xf32) <- (2x192x-1x-1xf32, 68x192x3x3xf32) - conv2d_88 = paddle._C_ops.conv2d( - swish_64, parameter_19, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_19, swish_64 - - # pd_op.reshape: (1x68x1x1xf32) <- (68xf32, 4xi64) - reshape_13 = paddle._C_ops.reshape(parameter_18, full_int_array_1) - del parameter_18 - - # pd_op.add: (2x68x-1x-1xf32) <- (2x68x-1x-1xf32, 1x68x1x1xf32) - add_30 = paddle._C_ops.add(conv2d_88, reshape_13) - del conv2d_88, reshape_13 - - # builtin.combine: ([xi64, xi64, xi64, xi64]) <- (xi64, xi64, xi64, xi64) - combine_16 = [full_1, full_2, full_3, multiply_7] - - # pd_op.stack: (4xi64) <- ([xi64, xi64, xi64, xi64]) - stack_2 = paddle._C_ops.stack(combine_16, 0) - del combine_16 - - # pd_op.reshape: (-1x4x17x-1xf32) <- (2x68x-1x-1xf32, 4xi64) - reshape_14 = paddle._C_ops.reshape(add_30, stack_2) - del add_30, stack_2 - - # pd_op.transpose: (-1x17x-1x4xf32) <- (-1x4x17x-1xf32) - transpose_1 = paddle._C_ops.transpose(reshape_14, [0, 2, 3, 1]) - del reshape_14 - - # pd_op.softmax: (-1x17x-1x4xf32) <- (-1x17x-1x4xf32) - softmax_1 = paddle._C_ops.softmax(transpose_1, 1) - del transpose_1 - - # pd_op.conv2d: (-1x1x-1x4xf32) <- (-1x17x-1x4xf32, 1x17x1x1xf32) - conv2d_89 = paddle._C_ops.conv2d( - softmax_1, parameter_36, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del softmax_1 - - # pd_op.squeeze: (-1x-1x4xf32) <- (-1x1x-1x4xf32, 1xi64) - squeeze_1 = paddle._C_ops.squeeze(conv2d_89, full_int_array_9) - del conv2d_89 - - # pd_op.sigmoid: (2x4x-1x-1xf32) <- (2x4x-1x-1xf32) - sigmoid_5 = paddle._C_ops.sigmoid(add_28) - del add_28 - - # builtin.combine: ([xi64, xi64, xi64]) <- (xi64, xi64, xi64) - combine_17 = [full_1, full_2, multiply_7] - del multiply_7 - - # pd_op.stack: (3xi64) <- ([xi64, xi64, xi64]) - stack_3 = paddle._C_ops.stack(combine_17, 0) - del combine_17 - - # pd_op.reshape: (-1x4x-1xf32) <- (2x4x-1x-1xf32, 3xi64) - reshape_15 = paddle._C_ops.reshape(sigmoid_5, stack_3) - del sigmoid_5, stack_3 - - # pd_op.shape64: (4xi64) <- (2x96x-1x-1xf32) - shape64_4 = paddle._C_ops.shape64(swish_48) - - # pd_op.slice: (xi64) <- (4xi64, 1xi64, 1xi64) - slice_4 = paddle._C_ops.slice( - shape64_4, [0], full_int_array_5, full_int_array_6, [1], [0] - ) - del full_int_array_5, shape64_4 - - # pd_op.shape64: (4xi64) <- (2x96x-1x-1xf32) - shape64_5 = paddle._C_ops.shape64(swish_48) - - # pd_op.slice: (xi64) <- (4xi64, 1xi64, 1xi64) - slice_5 = paddle._C_ops.slice( - shape64_5, [0], full_int_array_6, full_int_array_7, [1], [0] - ) - del full_int_array_6, full_int_array_7, shape64_5 - - # pd_op.multiply: (xi64) <- (xi64, xi64) - multiply_10 = paddle._C_ops.multiply(slice_4, slice_5) - del slice_4, slice_5 - - # pd_op.pool2d: (2x96x1x1xf32) <- (2x96x-1x-1xf32, 2xi64) - pool2d_5 = paddle._C_ops.pool2d( - swish_48, - full_int_array_8, - [1, 1], - [0, 0], - False, - True, - "NCHW", - "avg", - False, - True, - "EXPLICIT", - ) - del full_int_array_8 - - # pd_op.conv2d: (2x96x1x1xf32) <- (2x96x1x1xf32, 96x96x1x1xf32) - conv2d_90 = paddle._C_ops.conv2d( - pool2d_5, parameter_17, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_17 - - # pd_op.reshape: (1x96x1x1xf32) <- (96xf32, 4xi64) - reshape_16 = paddle._C_ops.reshape(parameter_16, full_int_array_1) - del parameter_16 - - # pd_op.add: (2x96x1x1xf32) <- (2x96x1x1xf32, 1x96x1x1xf32) - add_31 = paddle._C_ops.add(conv2d_90, reshape_16) - del conv2d_90, reshape_16 - - # pd_op.sigmoid: (2x96x1x1xf32) <- (2x96x1x1xf32) - sigmoid_6 = paddle._C_ops.sigmoid(add_31) - del add_31 - - # pd_op.multiply: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, 2x96x1x1xf32) - multiply_11 = paddle._C_ops.multiply(swish_48, sigmoid_6) - del sigmoid_6 - - # pd_op.conv2d: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, 96x96x1x1xf32) - conv2d_91 = paddle._C_ops.conv2d( - multiply_11, parameter_15, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del multiply_11, parameter_15 - - # pd_op.batch_norm_: (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) - ( - batch_norm__456, - batch_norm__457, - batch_norm__458, - batch_norm__459, - batch_norm__460, - batch_norm__461, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_91, - parameter_14, - parameter_13, - parameter_12, - parameter_11, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_91, parameter_11, parameter_12, parameter_13, parameter_14 - - # pd_op.swish: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32) - swish_65 = paddle._C_ops.swish(batch_norm__456) - del batch_norm__456 - - # pd_op.add: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, 2x96x-1x-1xf32) - add_32 = paddle._C_ops.add(swish_65, swish_48) - del swish_65 - - # pd_op.conv2d: (2x4x-1x-1xf32) <- (2x96x-1x-1xf32, 4x96x3x3xf32) - conv2d_92 = paddle._C_ops.conv2d( - add_32, parameter_10, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del add_32, parameter_10 - - # pd_op.reshape: (1x4x1x1xf32) <- (4xf32, 4xi64) - reshape_17 = paddle._C_ops.reshape(parameter_9, full_int_array_1) - del parameter_9 - - # pd_op.add: (2x4x-1x-1xf32) <- (2x4x-1x-1xf32, 1x4x1x1xf32) - add_33 = paddle._C_ops.add(conv2d_92, reshape_17) - del conv2d_92, reshape_17 - - # pd_op.conv2d: (2x96x1x1xf32) <- (2x96x1x1xf32, 96x96x1x1xf32) - conv2d_93 = paddle._C_ops.conv2d( - pool2d_5, parameter_8, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_8, pool2d_5 - - # pd_op.reshape: (1x96x1x1xf32) <- (96xf32, 4xi64) - reshape_18 = paddle._C_ops.reshape(parameter_7, full_int_array_1) - del parameter_7 - - # pd_op.add: (2x96x1x1xf32) <- (2x96x1x1xf32, 1x96x1x1xf32) - add_34 = paddle._C_ops.add(conv2d_93, reshape_18) - del conv2d_93, reshape_18 - - # pd_op.sigmoid: (2x96x1x1xf32) <- (2x96x1x1xf32) - sigmoid_7 = paddle._C_ops.sigmoid(add_34) - del add_34 - - # pd_op.multiply: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, 2x96x1x1xf32) - multiply_12 = paddle._C_ops.multiply(swish_48, sigmoid_7) - del sigmoid_7, swish_48 - - # pd_op.conv2d: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, 96x96x1x1xf32) - conv2d_94 = paddle._C_ops.conv2d( - multiply_12, parameter_6, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del multiply_12, parameter_6 - - # pd_op.batch_norm_: (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) - ( - batch_norm__462, - batch_norm__463, - batch_norm__464, - batch_norm__465, - batch_norm__466, - batch_norm__467, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_94, - parameter_5, - parameter_4, - parameter_3, - parameter_2, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_94, parameter_2, parameter_3, parameter_4, parameter_5 - - # pd_op.swish: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32) - swish_66 = paddle._C_ops.swish(batch_norm__462) - del batch_norm__462 - - # pd_op.conv2d: (2x68x-1x-1xf32) <- (2x96x-1x-1xf32, 68x96x3x3xf32) - conv2d_95 = paddle._C_ops.conv2d( - swish_66, parameter_1, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_1, swish_66 - - # pd_op.reshape: (1x68x1x1xf32) <- (68xf32, 4xi64) - reshape_19 = paddle._C_ops.reshape(parameter_0, full_int_array_1) - del full_int_array_1, parameter_0 - - # pd_op.add: (2x68x-1x-1xf32) <- (2x68x-1x-1xf32, 1x68x1x1xf32) - add_35 = paddle._C_ops.add(conv2d_95, reshape_19) - del conv2d_95, reshape_19 - - # builtin.combine: ([xi64, xi64, xi64, xi64]) <- (xi64, xi64, xi64, xi64) - combine_18 = [full_1, full_2, full_3, multiply_10] - del full_3 - - # pd_op.stack: (4xi64) <- ([xi64, xi64, xi64, xi64]) - stack_4 = paddle._C_ops.stack(combine_18, 0) - del combine_18 - - # pd_op.reshape: (-1x4x17x-1xf32) <- (2x68x-1x-1xf32, 4xi64) - reshape_20 = paddle._C_ops.reshape(add_35, stack_4) - del add_35, stack_4 - - # pd_op.transpose: (-1x17x-1x4xf32) <- (-1x4x17x-1xf32) - transpose_2 = paddle._C_ops.transpose(reshape_20, [0, 2, 3, 1]) - del reshape_20 - - # pd_op.softmax: (-1x17x-1x4xf32) <- (-1x17x-1x4xf32) - softmax_2 = paddle._C_ops.softmax(transpose_2, 1) - del transpose_2 - - # pd_op.conv2d: (-1x1x-1x4xf32) <- (-1x17x-1x4xf32, 1x17x1x1xf32) - conv2d_96 = paddle._C_ops.conv2d( - softmax_2, parameter_36, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_36, softmax_2 - - # pd_op.squeeze: (-1x-1x4xf32) <- (-1x1x-1x4xf32, 1xi64) - squeeze_2 = paddle._C_ops.squeeze(conv2d_96, full_int_array_9) - del conv2d_96, full_int_array_9 - - # pd_op.sigmoid: (2x4x-1x-1xf32) <- (2x4x-1x-1xf32) - sigmoid_8 = paddle._C_ops.sigmoid(add_33) - del add_33 - - # builtin.combine: ([xi64, xi64, xi64]) <- (xi64, xi64, xi64) - combine_19 = [full_1, full_2, multiply_10] - del full_1, full_2, multiply_10 - - # pd_op.stack: (3xi64) <- ([xi64, xi64, xi64]) - stack_5 = paddle._C_ops.stack(combine_19, 0) - del combine_19 - - # pd_op.reshape: (-1x4x-1xf32) <- (2x4x-1x-1xf32, 3xi64) - reshape_21 = paddle._C_ops.reshape(sigmoid_8, stack_5) - del sigmoid_8, stack_5 - - # pd_op.full: (1xi32) <- () - full_4 = paddle._C_ops.full( - [1], float("-1"), paddle.int32, paddle.core.CPUPlace() - ) - - # builtin.combine: ([-1x4x-1xf32, -1x4x-1xf32, -1x4x-1xf32]) <- (-1x4x-1xf32, -1x4x-1xf32, -1x4x-1xf32) - combine_20 = [reshape_9, reshape_15, reshape_21] - del reshape_15, reshape_21, reshape_9 + full_int_array_4 = [-2] - # pd_op.concat: (-1x4x-1xf32) <- ([-1x4x-1xf32, -1x4x-1xf32, -1x4x-1xf32], 1xi32) - concat_0 = paddle._C_ops.concat(combine_20, full_4) - del combine_20, full_4 + # pd_op.max: (2x7581xf32) <- (2x1x7581xf32, 1xi64) + max_2 = paddle._C_ops.max(multiply_3, full_int_array_4, False) + del full_int_array_4, multiply_3 - # builtin.combine: ([-1x-1x4xf32, -1x-1x4xf32, -1x-1x4xf32]) <- (-1x-1x4xf32, -1x-1x4xf32, -1x-1x4xf32) - combine_21 = [squeeze_0, squeeze_1, squeeze_2] - del squeeze_0, squeeze_1, squeeze_2 + # pd_op.unsqueeze: (2x7581x1xf32) <- (2x7581xf32, 1xi64) + unsqueeze_0 = paddle._C_ops.unsqueeze(max_2, full_int_array_3) + del full_int_array_3, max_2 - # pd_op.concat: (-1x-1x4xf32) <- ([-1x-1x4xf32, -1x-1x4xf32, -1x-1x4xf32], 1xi32) - concat_1 = paddle._C_ops.concat(combine_21, full_0) - del combine_21, full_0 + # pd_op.multiply: (2x7581x4xf32) <- (2x7581x4xf32, 2x7581x1xf32) + multiply_0 = paddle._C_ops.multiply(index_select_0, unsqueeze_0) + del index_select_0, unsqueeze_0, where_0 - return concat_0, concat_1 + return reshape_0, multiply_0 diff --git a/paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_2/weight_meta.py b/paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_2/weight_meta.py index 2a1c68dc7..8b1378917 100644 --- a/paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_2/weight_meta.py +++ b/paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_2/weight_meta.py @@ -1,4457 +1 @@ -class Program_weight_tensor_parameter_0: - name = "parameter_0" - shape = [68] - dtype = "float32" - min_val = float("-0.018653") - max_val = float("0.0310325") - mean = float("1.30123e-07") - std = float("0.00772894") - data = None - -class Program_weight_tensor_parameter_1: - name = "parameter_1" - shape = [68, 96, 3, 3] - dtype = "float32" - min_val = float("-0.190211") - max_val = float("0.209443") - mean = float("3.7835e-08") - std = float("0.0111182") - data = None - - -class Program_weight_tensor_parameter_2: - name = "parameter_2" - shape = [96] - dtype = "float32" - min_val = float("-0.149303") - max_val = float("0.349229") - mean = float("0.0834703") - std = float("0.116476") - data = None - - -class Program_weight_tensor_parameter_3: - name = "parameter_3" - shape = [96] - dtype = "float32" - min_val = float("0.918914") - max_val = float("2.00844") - mean = float("1.39791") - std = float("0.216949") - data = None - - -class Program_weight_tensor_parameter_4: - name = "parameter_4" - shape = [96] - dtype = "float32" - min_val = float("0.000284833") - max_val = float("0.00552148") - mean = float("0.00151597") - std = float("0.00109394") - data = None - - -class Program_weight_tensor_parameter_5: - name = "parameter_5" - shape = [96] - dtype = "float32" - min_val = float("-0.0946916") - max_val = float("0.0327932") - mean = float("-0.0138573") - std = float("0.0245203") - data = None - - -class Program_weight_tensor_parameter_6: - name = "parameter_6" - shape = [96, 96, 1, 1] - dtype = "float32" - min_val = float("-0.0951807") - max_val = float("0.113065") - mean = float("-0.00129554") - std = float("0.013839") - data = None - - -class Program_weight_tensor_parameter_7: - name = "parameter_7" - shape = [96] - dtype = "float32" - min_val = float("-0.0103042") - max_val = float("0.0108003") - mean = float("-0.000302473") - std = float("0.00453127") - data = None - - -class Program_weight_tensor_parameter_8: - name = "parameter_8" - shape = [96, 96, 1, 1] - dtype = "float32" - min_val = float("-0.0202563") - max_val = float("0.0235644") - mean = float("-9.45269e-05") - std = float("0.00306802") - data = None - - -class Program_weight_tensor_parameter_9: - name = "parameter_9" - shape = [4] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_10: - name = "parameter_10" - shape = [4, 96, 3, 3] - dtype = "float32" - min_val = float("-0.0327041") - max_val = float("0.0429711") - mean = float("0.000567976") - std = float("0.00660947") - data = None - - -class Program_weight_tensor_parameter_11: - name = "parameter_11" - shape = [96] - dtype = "float32" - min_val = float("-0.66171") - max_val = float("1.12003") - mean = float("0.208007") - std = float("0.336117") - data = None - - -class Program_weight_tensor_parameter_12: - name = "parameter_12" - shape = [96] - dtype = "float32" - min_val = float("0.774741") - max_val = float("1.56622") - mean = float("1.11212") - std = float("0.140094") - data = None - - -class Program_weight_tensor_parameter_13: - name = "parameter_13" - shape = [96] - dtype = "float32" - min_val = float("0.000349212") - max_val = float("0.0115798") - mean = float("0.00164328") - std = float("0.00164938") - data = None - - -class Program_weight_tensor_parameter_14: - name = "parameter_14" - shape = [96] - dtype = "float32" - min_val = float("-0.201551") - max_val = float("0.114135") - mean = float("-0.0261146") - std = float("0.047028") - data = None - - -class Program_weight_tensor_parameter_15: - name = "parameter_15" - shape = [96, 96, 1, 1] - dtype = "float32" - min_val = float("-0.0897622") - max_val = float("0.0968557") - mean = float("-0.00169456") - std = float("0.0125057") - data = None - - -class Program_weight_tensor_parameter_16: - name = "parameter_16" - shape = [96] - dtype = "float32" - min_val = float("-0.0059556") - max_val = float("0.00753131") - mean = float("-0.000550704") - std = float("0.0026296") - data = None - - -class Program_weight_tensor_parameter_17: - name = "parameter_17" - shape = [96, 96, 1, 1] - dtype = "float32" - min_val = float("-0.0532542") - max_val = float("0.0784924") - mean = float("-0.000321779") - std = float("0.00341872") - data = None - - -class Program_weight_tensor_parameter_18: - name = "parameter_18" - shape = [68] - dtype = "float32" - min_val = float("-0.00592671") - max_val = float("0.0245108") - mean = float("1.32248e-07") - std = float("0.00592063") - data = None - - -class Program_weight_tensor_parameter_19: - name = "parameter_19" - shape = [68, 192, 3, 3] - dtype = "float32" - min_val = float("-0.148013") - max_val = float("0.188684") - mean = float("-7.52334e-09") - std = float("0.00734346") - data = None - - -class Program_weight_tensor_parameter_20: - name = "parameter_20" - shape = [192] - dtype = "float32" - min_val = float("-0.112013") - max_val = float("0.136873") - mean = float("0.0501926") - std = float("0.042912") - data = None - - -class Program_weight_tensor_parameter_21: - name = "parameter_21" - shape = [192] - dtype = "float32" - min_val = float("0.942516") - max_val = float("1.48651") - mean = float("1.209") - std = float("0.101334") - data = None - - -class Program_weight_tensor_parameter_22: - name = "parameter_22" - shape = [192] - dtype = "float32" - min_val = float("0.000274533") - max_val = float("0.00473256") - mean = float("0.00131755") - std = float("0.00090147") - data = None - - -class Program_weight_tensor_parameter_23: - name = "parameter_23" - shape = [192] - dtype = "float32" - min_val = float("-0.0539903") - max_val = float("0.0221637") - mean = float("-0.00864077") - std = float("0.011364") - data = None - - -class Program_weight_tensor_parameter_24: - name = "parameter_24" - shape = [192, 192, 1, 1] - dtype = "float32" - min_val = float("-0.0600062") - max_val = float("0.103906") - mean = float("-0.000355432") - std = float("0.00633016") - data = None - - -class Program_weight_tensor_parameter_25: - name = "parameter_25" - shape = [192] - dtype = "float32" - min_val = float("-0.00820877") - max_val = float("0.00880323") - mean = float("-9.72734e-05") - std = float("0.0032937") - data = None - - -class Program_weight_tensor_parameter_26: - name = "parameter_26" - shape = [192, 192, 1, 1] - dtype = "float32" - min_val = float("-0.00942992") - max_val = float("0.0136885") - mean = float("-9.81164e-05") - std = float("0.00127282") - data = None - - -class Program_weight_tensor_parameter_27: - name = "parameter_27" - shape = [4] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_28: - name = "parameter_28" - shape = [4, 192, 3, 3] - dtype = "float32" - min_val = float("-0.0185986") - max_val = float("0.017836") - mean = float("0.00051337") - std = float("0.00358993") - data = None - - -class Program_weight_tensor_parameter_29: - name = "parameter_29" - shape = [192] - dtype = "float32" - min_val = float("-0.291073") - max_val = float("0.60814") - mean = float("0.147454") - std = float("0.159") - data = None - - -class Program_weight_tensor_parameter_30: - name = "parameter_30" - shape = [192] - dtype = "float32" - min_val = float("0.913676") - max_val = float("1.49743") - mean = float("1.08728") - std = float("0.081887") - data = None - - -class Program_weight_tensor_parameter_31: - name = "parameter_31" - shape = [192] - dtype = "float32" - min_val = float("0.000306423") - max_val = float("0.00589856") - mean = float("0.00147365") - std = float("0.000924636") - data = None - - -class Program_weight_tensor_parameter_32: - name = "parameter_32" - shape = [192] - dtype = "float32" - min_val = float("-0.139638") - max_val = float("0.023164") - mean = float("-0.0355444") - std = float("0.0277406") - data = None - - -class Program_weight_tensor_parameter_33: - name = "parameter_33" - shape = [192, 192, 1, 1] - dtype = "float32" - min_val = float("-0.0584223") - max_val = float("0.0463674") - mean = float("-0.00109099") - std = float("0.00597895") - data = None - - -class Program_weight_tensor_parameter_34: - name = "parameter_34" - shape = [192] - dtype = "float32" - min_val = float("-0.00481239") - max_val = float("0.00933689") - mean = float("-0.000150003") - std = float("0.00178034") - data = None - - -class Program_weight_tensor_parameter_35: - name = "parameter_35" - shape = [192, 192, 1, 1] - dtype = "float32" - min_val = float("-0.0175333") - max_val = float("0.0225389") - mean = float("-0.000152035") - std = float("0.00123807") - data = None - - -class Program_weight_tensor_parameter_36: - name = "parameter_36" - shape = [1, 17, 1, 1] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_37: - name = "parameter_37" - shape = [68] - dtype = "float32" - min_val = float("-0.00491466") - max_val = float("0.00925987") - mean = float("1.23691e-07") - std = float("0.00410694") - data = None - - -class Program_weight_tensor_parameter_38: - name = "parameter_38" - shape = [68, 384, 3, 3] - dtype = "float32" - min_val = float("-0.0794888") - max_val = float("0.102667") - mean = float("1.60253e-08") - std = float("0.00459131") - data = None - - -class Program_weight_tensor_parameter_39: - name = "parameter_39" - shape = [384] - dtype = "float32" - min_val = float("-0.0756471") - max_val = float("0.111591") - mean = float("0.0118707") - std = float("0.0354397") - data = None - - -class Program_weight_tensor_parameter_40: - name = "parameter_40" - shape = [384] - dtype = "float32" - min_val = float("0.969442") - max_val = float("1.49466") - mean = float("1.1695") - std = float("0.0775726") - data = None - - -class Program_weight_tensor_parameter_41: - name = "parameter_41" - shape = [384] - dtype = "float32" - min_val = float("7.22536e-05") - max_val = float("0.0068391") - mean = float("0.000845068") - std = float("0.000703644") - data = None - - -class Program_weight_tensor_parameter_42: - name = "parameter_42" - shape = [384] - dtype = "float32" - min_val = float("-0.040997") - max_val = float("0.0158692") - mean = float("-0.00441209") - std = float("0.00851866") - data = None - - -class Program_weight_tensor_parameter_43: - name = "parameter_43" - shape = [384, 384, 1, 1] - dtype = "float32" - min_val = float("-0.0576993") - max_val = float("0.0600385") - mean = float("-0.000138602") - std = float("0.00315958") - data = None - - -class Program_weight_tensor_parameter_44: - name = "parameter_44" - shape = [384] - dtype = "float32" - min_val = float("-0.00427117") - max_val = float("0.00491851") - mean = float("-5.07427e-05") - std = float("0.00221218") - data = None - - -class Program_weight_tensor_parameter_45: - name = "parameter_45" - shape = [384, 384, 1, 1] - dtype = "float32" - min_val = float("-0.0242408") - max_val = float("0.00942797") - mean = float("-3.94686e-05") - std = float("0.000747366") - data = None - - -class Program_weight_tensor_parameter_46: - name = "parameter_46" - shape = [4] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_47: - name = "parameter_47" - shape = [4, 384, 3, 3] - dtype = "float32" - min_val = float("-0.00841769") - max_val = float("0.0133731") - mean = float("0.000292149") - std = float("0.00177607") - data = None - - -class Program_weight_tensor_parameter_48: - name = "parameter_48" - shape = [384] - dtype = "float32" - min_val = float("-0.369152") - max_val = float("0.494632") - mean = float("0.0349918") - std = float("0.121509") - data = None - - -class Program_weight_tensor_parameter_49: - name = "parameter_49" - shape = [384] - dtype = "float32" - min_val = float("0.883459") - max_val = float("1.55467") - mean = float("1.05797") - std = float("0.0835312") - data = None - - -class Program_weight_tensor_parameter_50: - name = "parameter_50" - shape = [384] - dtype = "float32" - min_val = float("0.00013119") - max_val = float("0.00702002") - mean = float("0.000884237") - std = float("0.000684954") - data = None - - -class Program_weight_tensor_parameter_51: - name = "parameter_51" - shape = [384] - dtype = "float32" - min_val = float("-0.105751") - max_val = float("0.0294571") - mean = float("-0.0262986") - std = float("0.0195655") - data = None - - -class Program_weight_tensor_parameter_52: - name = "parameter_52" - shape = [384, 384, 1, 1] - dtype = "float32" - min_val = float("-0.0387492") - max_val = float("0.0393312") - mean = float("-0.000523745") - std = float("0.00328341") - data = None - - -class Program_weight_tensor_parameter_53: - name = "parameter_53" - shape = [384] - dtype = "float32" - min_val = float("-0.0107119") - max_val = float("0.00930954") - mean = float("-0.000115991") - std = float("0.00131259") - data = None - - -class Program_weight_tensor_parameter_54: - name = "parameter_54" - shape = [384, 384, 1, 1] - dtype = "float32" - min_val = float("-0.0844844") - max_val = float("0.0443428") - mean = float("-3.32923e-05") - std = float("0.000884614") - data = None - - -class Program_weight_tensor_parameter_55: - name = "parameter_55" - shape = [384] - dtype = "float32" - min_val = float("-0.652269") - max_val = float("1.18859") - mean = float("0.0279915") - std = float("0.238646") - data = None - - -class Program_weight_tensor_parameter_56: - name = "parameter_56" - shape = [384] - dtype = "float32" - min_val = float("0.840802") - max_val = float("1.38345") - mean = float("0.983163") - std = float("0.068319") - data = None - - -class Program_weight_tensor_parameter_57: - name = "parameter_57" - shape = [384] - dtype = "float32" - min_val = float("0.0028222") - max_val = float("0.120091") - mean = float("0.0150878") - std = float("0.0128566") - data = None - - -class Program_weight_tensor_parameter_58: - name = "parameter_58" - shape = [384] - dtype = "float32" - min_val = float("-0.186506") - max_val = float("0.0914117") - mean = float("-0.0370423") - std = float("0.0408906") - data = None - - -class Program_weight_tensor_parameter_59: - name = "parameter_59" - shape = [384, 384, 1, 1] - dtype = "float32" - min_val = float("-0.0804522") - max_val = float("0.0621238") - mean = float("-0.000348866") - std = float("0.00515003") - data = None - - -class Program_weight_tensor_parameter_60: - name = "parameter_60" - shape = [192] - dtype = "float32" - min_val = float("-0.445858") - max_val = float("0.100597") - mean = float("-0.0845864") - std = float("0.104559") - data = None - - -class Program_weight_tensor_parameter_61: - name = "parameter_61" - shape = [192] - dtype = "float32" - min_val = float("0.827741") - max_val = float("1.21013") - mean = float("0.92639") - std = float("0.0462105") - data = None - - -class Program_weight_tensor_parameter_62: - name = "parameter_62" - shape = [192] - dtype = "float32" - min_val = float("0.00247019") - max_val = float("0.0260277") - mean = float("0.00631141") - std = float("0.00287846") - data = None - - -class Program_weight_tensor_parameter_63: - name = "parameter_63" - shape = [192] - dtype = "float32" - min_val = float("-0.0379476") - max_val = float("0.0497353") - mean = float("-0.0021489") - std = float("0.0234555") - data = None - - -class Program_weight_tensor_parameter_64: - name = "parameter_64" - shape = [192, 192, 1, 1] - dtype = "float32" - min_val = float("-0.0401693") - max_val = float("0.0482149") - mean = float("-0.000258359") - std = float("0.003519") - data = None - - -class Program_weight_tensor_parameter_65: - name = "parameter_65" - shape = [192] - dtype = "float32" - min_val = float("-0.445858") - max_val = float("0.100597") - mean = float("-0.0845864") - std = float("0.104559") - data = None - - -class Program_weight_tensor_parameter_66: - name = "parameter_66" - shape = [192] - dtype = "float32" - min_val = float("0.861621") - max_val = float("1.42119") - mean = float("1.11205") - std = float("0.0818467") - data = None - - -class Program_weight_tensor_parameter_67: - name = "parameter_67" - shape = [192] - dtype = "float32" - min_val = float("0.00521346") - max_val = float("0.108237") - mean = float("0.0222325") - std = float("0.0164108") - data = None - - -class Program_weight_tensor_parameter_68: - name = "parameter_68" - shape = [192] - dtype = "float32" - min_val = float("-0.112022") - max_val = float("0.0861229") - mean = float("-0.0205872") - std = float("0.039563") - data = None - - -class Program_weight_tensor_parameter_69: - name = "parameter_69" - shape = [192, 192, 3, 3] - dtype = "float32" - min_val = float("-0.0590664") - max_val = float("0.0651393") - mean = float("-0.000122795") - std = float("0.00309452") - data = None - - -class Program_weight_tensor_parameter_70: - name = "parameter_70" - shape = [192] - dtype = "float32" - min_val = float("-0.519576") - max_val = float("0.119175") - mean = float("-0.173677") - std = float("0.128148") - data = None - - -class Program_weight_tensor_parameter_71: - name = "parameter_71" - shape = [192] - dtype = "float32" - min_val = float("0.843577") - max_val = float("1.65244") - mean = float("1.0642") - std = float("0.100969") - data = None - - -class Program_weight_tensor_parameter_72: - name = "parameter_72" - shape = [192] - dtype = "float32" - min_val = float("0.0134686") - max_val = float("0.262111") - mean = float("0.035477") - std = float("0.0257605") - data = None - - -class Program_weight_tensor_parameter_73: - name = "parameter_73" - shape = [192] - dtype = "float32" - min_val = float("-0.2601") - max_val = float("0.142926") - mean = float("-0.0593171") - std = float("0.0572285") - data = None - - -class Program_weight_tensor_parameter_74: - name = "parameter_74" - shape = [192, 192, 3, 3] - dtype = "float32" - min_val = float("-0.0566011") - max_val = float("0.0723318") - mean = float("-0.000243394") - std = float("0.00339424") - data = None - - -class Program_weight_tensor_parameter_75: - name = "parameter_75" - shape = [192] - dtype = "float32" - min_val = float("-0.455876") - max_val = float("0.187264") - mean = float("-0.0820069") - std = float("0.10205") - data = None - - -class Program_weight_tensor_parameter_76: - name = "parameter_76" - shape = [192] - dtype = "float32" - min_val = float("0.841971") - max_val = float("1.25563") - mean = float("1.02701") - std = float("0.0670478") - data = None - - -class Program_weight_tensor_parameter_77: - name = "parameter_77" - shape = [192] - dtype = "float32" - min_val = float("0.00508938") - max_val = float("0.0629504") - mean = float("0.0128185") - std = float("0.00617773") - data = None - - -class Program_weight_tensor_parameter_78: - name = "parameter_78" - shape = [192] - dtype = "float32" - min_val = float("-0.11049") - max_val = float("0.0547617") - mean = float("-0.0181669") - std = float("0.0279962") - data = None - - -class Program_weight_tensor_parameter_79: - name = "parameter_79" - shape = [192, 576, 1, 1] - dtype = "float32" - min_val = float("-0.080574") - max_val = float("0.0848044") - mean = float("-0.000185678") - std = float("0.00481275") - data = None - - -class Program_weight_tensor_parameter_80: - name = "parameter_80" - shape = [192] - dtype = "float32" - min_val = float("-0.217908") - max_val = float("0.0355956") - mean = float("-0.0691499") - std = float("0.0386375") - data = None - - -class Program_weight_tensor_parameter_81: - name = "parameter_81" - shape = [192] - dtype = "float32" - min_val = float("0.844091") - max_val = float("1.15255") - mean = float("1.01562") - std = float("0.0503148") - data = None - - -class Program_weight_tensor_parameter_82: - name = "parameter_82" - shape = [192] - dtype = "float32" - min_val = float("0.00256848") - max_val = float("0.0204398") - mean = float("0.00702577") - std = float("0.00290168") - data = None - - -class Program_weight_tensor_parameter_83: - name = "parameter_83" - shape = [192] - dtype = "float32" - min_val = float("-0.0967604") - max_val = float("0.0866626") - mean = float("-0.0222285") - std = float("0.0273828") - data = None - - -class Program_weight_tensor_parameter_84: - name = "parameter_84" - shape = [192, 576, 1, 1] - dtype = "float32" - min_val = float("-0.0433461") - max_val = float("0.0508427") - mean = float("-0.000278308") - std = float("0.00423993") - data = None - - -class Program_weight_tensor_parameter_85: - name = "parameter_85" - shape = [192] - dtype = "float32" - min_val = float("-0.296093") - max_val = float("-0.00746985") - mean = float("-0.0909473") - std = float("0.0603181") - data = None - - -class Program_weight_tensor_parameter_86: - name = "parameter_86" - shape = [192] - dtype = "float32" - min_val = float("0.78297") - max_val = float("1.34841") - mean = float("1.0531") - std = float("0.0659046") - data = None - - -class Program_weight_tensor_parameter_87: - name = "parameter_87" - shape = [192] - dtype = "float32" - min_val = float("0.00866336") - max_val = float("0.0606129") - mean = float("0.0236506") - std = float("0.00920598") - data = None - - -class Program_weight_tensor_parameter_88: - name = "parameter_88" - shape = [192] - dtype = "float32" - min_val = float("-0.274437") - max_val = float("0.303481") - mean = float("-0.028094") - std = float("0.0952102") - data = None - - -class Program_weight_tensor_parameter_89: - name = "parameter_89" - shape = [192, 192, 3, 3] - dtype = "float32" - min_val = float("-0.0320502") - max_val = float("0.0370765") - mean = float("-5.98437e-05") - std = float("0.00253528") - data = None - - -class Program_weight_tensor_parameter_90: - name = "parameter_90" - shape = [192] - dtype = "float32" - min_val = float("-0.530717") - max_val = float("1.03145") - mean = float("0.148064") - std = float("0.259361") - data = None - - -class Program_weight_tensor_parameter_91: - name = "parameter_91" - shape = [192] - dtype = "float32" - min_val = float("0.731798") - max_val = float("1.56803") - mean = float("1.014") - std = float("0.106695") - data = None - - -class Program_weight_tensor_parameter_92: - name = "parameter_92" - shape = [192] - dtype = "float32" - min_val = float("0.00711023") - max_val = float("0.0905165") - mean = float("0.0211361") - std = float("0.0114095") - data = None - - -class Program_weight_tensor_parameter_93: - name = "parameter_93" - shape = [192] - dtype = "float32" - min_val = float("-0.279082") - max_val = float("0.153551") - mean = float("-0.0509504") - std = float("0.0522264") - data = None - - -class Program_weight_tensor_parameter_94: - name = "parameter_94" - shape = [192, 192, 1, 1] - dtype = "float32" - min_val = float("-0.13468") - max_val = float("0.0903167") - mean = float("-0.000883571") - std = float("0.0100158") - data = None - - -class Program_weight_tensor_parameter_95: - name = "parameter_95" - shape = [96] - dtype = "float32" - min_val = float("-0.29043") - max_val = float("0.172569") - mean = float("-0.070874") - std = float("0.105573") - data = None - - -class Program_weight_tensor_parameter_96: - name = "parameter_96" - shape = [96] - dtype = "float32" - min_val = float("0.730954") - max_val = float("1.20877") - mean = float("0.877662") - std = float("0.077901") - data = None - - -class Program_weight_tensor_parameter_97: - name = "parameter_97" - shape = [96] - dtype = "float32" - min_val = float("0.00246171") - max_val = float("0.0129422") - mean = float("0.00671665") - std = float("0.00235517") - data = None - - -class Program_weight_tensor_parameter_98: - name = "parameter_98" - shape = [96] - dtype = "float32" - min_val = float("-0.0452652") - max_val = float("0.0355382") - mean = float("-0.0105203") - std = float("0.0215082") - data = None - - -class Program_weight_tensor_parameter_99: - name = "parameter_99" - shape = [96, 96, 1, 1] - dtype = "float32" - min_val = float("-0.0494") - max_val = float("0.0504503") - mean = float("-0.00128754") - std = float("0.00640071") - data = None - - -class Program_weight_tensor_parameter_100: - name = "parameter_100" - shape = [96] - dtype = "float32" - min_val = float("-0.29043") - max_val = float("0.172569") - mean = float("-0.070874") - std = float("0.105573") - data = None - - -class Program_weight_tensor_parameter_101: - name = "parameter_101" - shape = [96] - dtype = "float32" - min_val = float("0.97156") - max_val = float("1.32049") - mean = float("1.13236") - std = float("0.0752191") - data = None - - -class Program_weight_tensor_parameter_102: - name = "parameter_102" - shape = [96] - dtype = "float32" - min_val = float("0.0093333") - max_val = float("0.0602011") - mean = float("0.0268797") - std = float("0.0114369") - data = None - - -class Program_weight_tensor_parameter_103: - name = "parameter_103" - shape = [96] - dtype = "float32" - min_val = float("-0.0726866") - max_val = float("0.101348") - mean = float("-0.0120426") - std = float("0.0278587") - data = None - - -class Program_weight_tensor_parameter_104: - name = "parameter_104" - shape = [96, 96, 3, 3] - dtype = "float32" - min_val = float("-0.0765906") - max_val = float("0.0838438") - mean = float("-0.000172577") - std = float("0.00601682") - data = None - - -class Program_weight_tensor_parameter_105: - name = "parameter_105" - shape = [96] - dtype = "float32" - min_val = float("-0.673369") - max_val = float("0.111337") - mean = float("-0.259249") - std = float("0.15059") - data = None - - -class Program_weight_tensor_parameter_106: - name = "parameter_106" - shape = [96] - dtype = "float32" - min_val = float("0.801527") - max_val = float("1.41146") - mean = float("1.04521") - std = float("0.116809") - data = None - - -class Program_weight_tensor_parameter_107: - name = "parameter_107" - shape = [96] - dtype = "float32" - min_val = float("0.0179787") - max_val = float("0.123656") - mean = float("0.0407975") - std = float("0.0163583") - data = None - - -class Program_weight_tensor_parameter_108: - name = "parameter_108" - shape = [96] - dtype = "float32" - min_val = float("-0.210235") - max_val = float("0.0371458") - mean = float("-0.0429902") - std = float("0.0344348") - data = None - - -class Program_weight_tensor_parameter_109: - name = "parameter_109" - shape = [96, 96, 3, 3] - dtype = "float32" - min_val = float("-0.075314") - max_val = float("0.0818873") - mean = float("-0.000470995") - std = float("0.00667813") - data = None - - -class Program_weight_tensor_parameter_110: - name = "parameter_110" - shape = [96] - dtype = "float32" - min_val = float("-0.644536") - max_val = float("0.152401") - mean = float("-0.155624") - std = float("0.115775") - data = None - - -class Program_weight_tensor_parameter_111: - name = "parameter_111" - shape = [96] - dtype = "float32" - min_val = float("0.84937") - max_val = float("1.26645") - mean = float("1.03345") - std = float("0.0722039") - data = None - - -class Program_weight_tensor_parameter_112: - name = "parameter_112" - shape = [96] - dtype = "float32" - min_val = float("0.00973348") - max_val = float("0.0487037") - mean = float("0.0192495") - std = float("0.00666947") - data = None - - -class Program_weight_tensor_parameter_113: - name = "parameter_113" - shape = [96] - dtype = "float32" - min_val = float("-0.122358") - max_val = float("0.0284554") - mean = float("-0.0357051") - std = float("0.0301403") - data = None - - -class Program_weight_tensor_parameter_114: - name = "parameter_114" - shape = [96, 288, 1, 1] - dtype = "float32" - min_val = float("-0.0666353") - max_val = float("0.07655") - mean = float("-0.000641477") - std = float("0.00909018") - data = None - - -class Program_weight_tensor_parameter_115: - name = "parameter_115" - shape = [96] - dtype = "float32" - min_val = float("-0.198184") - max_val = float("0.0830438") - mean = float("-0.029894") - std = float("0.0460273") - data = None - - -class Program_weight_tensor_parameter_116: - name = "parameter_116" - shape = [96] - dtype = "float32" - min_val = float("0.68536") - max_val = float("1.33613") - mean = float("0.954422") - std = float("0.0884454") - data = None - - -class Program_weight_tensor_parameter_117: - name = "parameter_117" - shape = [96] - dtype = "float32" - min_val = float("0.00439953") - max_val = float("0.0376729") - mean = float("0.0110806") - std = float("0.00518735") - data = None - - -class Program_weight_tensor_parameter_118: - name = "parameter_118" - shape = [96] - dtype = "float32" - min_val = float("-0.110611") - max_val = float("0.053016") - mean = float("-0.0161266") - std = float("0.0307335") - data = None - - -class Program_weight_tensor_parameter_119: - name = "parameter_119" - shape = [96, 288, 1, 1] - dtype = "float32" - min_val = float("-0.0847356") - max_val = float("0.0781943") - mean = float("-0.00030171") - std = float("0.00760969") - data = None - - -class Program_weight_tensor_parameter_120: - name = "parameter_120" - shape = [96] - dtype = "float32" - min_val = float("-0.335709") - max_val = float("0.0179744") - mean = float("-0.108667") - std = float("0.0840873") - data = None - - -class Program_weight_tensor_parameter_121: - name = "parameter_121" - shape = [96] - dtype = "float32" - min_val = float("0.731015") - max_val = float("1.20665") - mean = float("1.05574") - std = float("0.0750007") - data = None - - -class Program_weight_tensor_parameter_122: - name = "parameter_122" - shape = [96] - dtype = "float32" - min_val = float("0.0110792") - max_val = float("0.108652") - mean = float("0.0289631") - std = float("0.0151623") - data = None - - -class Program_weight_tensor_parameter_123: - name = "parameter_123" - shape = [96] - dtype = "float32" - min_val = float("-0.402624") - max_val = float("0.415541") - mean = float("-0.0212279") - std = float("0.14343") - data = None - - -class Program_weight_tensor_parameter_124: - name = "parameter_124" - shape = [96, 96, 3, 3] - dtype = "float32" - min_val = float("-0.0566918") - max_val = float("0.0566383") - mean = float("-4.65555e-05") - std = float("0.00561229") - data = None - - -class Program_weight_tensor_parameter_125: - name = "parameter_125" - shape = [96] - dtype = "float32" - min_val = float("-1.07712") - max_val = float("2.35644") - mean = float("0.310791") - std = float("0.586496") - data = None - - -class Program_weight_tensor_parameter_126: - name = "parameter_126" - shape = [96] - dtype = "float32" - min_val = float("0.468523") - max_val = float("1.40452") - mean = float("0.882442") - std = float("0.167292") - data = None - - -class Program_weight_tensor_parameter_127: - name = "parameter_127" - shape = [96] - dtype = "float32" - min_val = float("0.00622077") - max_val = float("0.145427") - mean = float("0.0336655") - std = float("0.0192279") - data = None - - -class Program_weight_tensor_parameter_128: - name = "parameter_128" - shape = [96] - dtype = "float32" - min_val = float("-0.252919") - max_val = float("0.171344") - mean = float("-0.0348699") - std = float("0.0800858") - data = None - - -class Program_weight_tensor_parameter_129: - name = "parameter_129" - shape = [96, 96, 1, 1] - dtype = "float32" - min_val = float("-0.151113") - max_val = float("0.113524") - mean = float("-0.00159645") - std = float("0.0201408") - data = None - - -class Program_weight_tensor_parameter_130: - name = "parameter_130" - shape = [48] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_131: - name = "parameter_131" - shape = [48] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_132: - name = "parameter_132" - shape = [48] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_133: - name = "parameter_133" - shape = [48] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_134: - name = "parameter_134" - shape = [48, 48, 1, 1] - dtype = "float32" - min_val = float("-0.0664404") - max_val = float("0.0734275") - mean = float("-0.00237338") - std = float("0.012309") - data = None - - -class Program_weight_tensor_parameter_135: - name = "parameter_135" - shape = [48] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_136: - name = "parameter_136" - shape = [48] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_137: - name = "parameter_137" - shape = [48] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_138: - name = "parameter_138" - shape = [48] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_139: - name = "parameter_139" - shape = [48, 48, 3, 3] - dtype = "float32" - min_val = float("-0.126099") - max_val = float("0.146893") - mean = float("-0.000602698") - std = float("0.0131643") - data = None - - -class Program_weight_tensor_parameter_140: - name = "parameter_140" - shape = [48] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_141: - name = "parameter_141" - shape = [48] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_142: - name = "parameter_142" - shape = [48] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_143: - name = "parameter_143" - shape = [48] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_144: - name = "parameter_144" - shape = [48, 48, 3, 3] - dtype = "float32" - min_val = float("-0.10691") - max_val = float("0.123684") - mean = float("-0.0011924") - std = float("0.0144144") - data = None - - -class Program_weight_tensor_parameter_145: - name = "parameter_145" - shape = [48] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_146: - name = "parameter_146" - shape = [48] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_147: - name = "parameter_147" - shape = [48] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_148: - name = "parameter_148" - shape = [48] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_149: - name = "parameter_149" - shape = [48, 224, 1, 1] - dtype = "float32" - min_val = float("-0.188612") - max_val = float("0.131781") - mean = float("-0.00116675") - std = float("0.0182059") - data = None - - -class Program_weight_tensor_parameter_150: - name = "parameter_150" - shape = [48] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_151: - name = "parameter_151" - shape = [48] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_152: - name = "parameter_152" - shape = [48] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_153: - name = "parameter_153" - shape = [48] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_154: - name = "parameter_154" - shape = [48, 224, 1, 1] - dtype = "float32" - min_val = float("-0.102996") - max_val = float("0.127082") - mean = float("-0.000100325") - std = float("0.0125415") - data = None - - -class Program_weight_tensor_parameter_155: - name = "parameter_155" - shape = [96] - dtype = "float32" - min_val = float("-0.355722") - max_val = float("0.392015") - mean = float("-0.00761515") - std = float("0.135765") - data = None - - -class Program_weight_tensor_parameter_156: - name = "parameter_156" - shape = [96] - dtype = "float32" - min_val = float("0.58176") - max_val = float("1.61796") - mean = float("0.798413") - std = float("0.141773") - data = None - - -class Program_weight_tensor_parameter_157: - name = "parameter_157" - shape = [96] - dtype = "float32" - min_val = float("0.00744695") - max_val = float("0.0777617") - mean = float("0.0234491") - std = float("0.0125216") - data = None - - -class Program_weight_tensor_parameter_158: - name = "parameter_158" - shape = [96] - dtype = "float32" - min_val = float("-0.126434") - max_val = float("0.0750849") - mean = float("-0.0331653") - std = float("0.0358366") - data = None - - -class Program_weight_tensor_parameter_159: - name = "parameter_159" - shape = [96, 192, 1, 1] - dtype = "float32" - min_val = float("-0.0965171") - max_val = float("0.103243") - mean = float("-0.000979696") - std = float("0.0126998") - data = None - - -class Program_weight_tensor_parameter_160: - name = "parameter_160" - shape = [192] - dtype = "float32" - min_val = float("-0.337168") - max_val = float("0.172866") - mean = float("-0.0804302") - std = float("0.0895865") - data = None - - -class Program_weight_tensor_parameter_161: - name = "parameter_161" - shape = [192] - dtype = "float32" - min_val = float("0.695369") - max_val = float("1.47856") - mean = float("0.99055") - std = float("0.0996963") - data = None - - -class Program_weight_tensor_parameter_162: - name = "parameter_162" - shape = [192] - dtype = "float32" - min_val = float("0.0097712") - max_val = float("0.0987394") - mean = float("0.0251769") - std = float("0.012782") - data = None - - -class Program_weight_tensor_parameter_163: - name = "parameter_163" - shape = [192] - dtype = "float32" - min_val = float("-0.203449") - max_val = float("0.19128") - mean = float("-0.0630189") - std = float("0.0562037") - data = None - - -class Program_weight_tensor_parameter_164: - name = "parameter_164" - shape = [192, 192, 1, 1] - dtype = "float32" - min_val = float("-0.10035") - max_val = float("0.120314") - mean = float("-0.00133976") - std = float("0.0122628") - data = None - - -class Program_weight_tensor_parameter_165: - name = "parameter_165" - shape = [96] - dtype = "float32" - min_val = float("-0.307168") - max_val = float("0.102436") - mean = float("-0.0815792") - std = float("0.0994278") - data = None - - -class Program_weight_tensor_parameter_166: - name = "parameter_166" - shape = [96] - dtype = "float32" - min_val = float("0.550693") - max_val = float("0.936676") - mean = float("0.809285") - std = float("0.0654997") - data = None - - -class Program_weight_tensor_parameter_167: - name = "parameter_167" - shape = [96] - dtype = "float32" - min_val = float("0.00340201") - max_val = float("0.01613") - mean = float("0.00847885") - std = float("0.00252636") - data = None - - -class Program_weight_tensor_parameter_168: - name = "parameter_168" - shape = [96] - dtype = "float32" - min_val = float("-0.0470381") - max_val = float("0.0327168") - mean = float("-0.0154057") - std = float("0.0195276") - data = None - - -class Program_weight_tensor_parameter_169: - name = "parameter_169" - shape = [96, 96, 1, 1] - dtype = "float32" - min_val = float("-0.046942") - max_val = float("0.0545283") - mean = float("-0.00159272") - std = float("0.00865431") - data = None - - -class Program_weight_tensor_parameter_170: - name = "parameter_170" - shape = [96] - dtype = "float32" - min_val = float("-0.307168") - max_val = float("0.102436") - mean = float("-0.0815792") - std = float("0.0994278") - data = None - - -class Program_weight_tensor_parameter_171: - name = "parameter_171" - shape = [96] - dtype = "float32" - min_val = float("0.843562") - max_val = float("1.28869") - mean = float("1.03487") - std = float("0.0944074") - data = None - - -class Program_weight_tensor_parameter_172: - name = "parameter_172" - shape = [96] - dtype = "float32" - min_val = float("0.0162234") - max_val = float("0.0750483") - mean = float("0.0356845") - std = float("0.0121583") - data = None - - -class Program_weight_tensor_parameter_173: - name = "parameter_173" - shape = [96] - dtype = "float32" - min_val = float("-0.0894171") - max_val = float("0.0639562") - mean = float("-0.0235775") - std = float("0.0292923") - data = None - - -class Program_weight_tensor_parameter_174: - name = "parameter_174" - shape = [96, 96, 3, 3] - dtype = "float32" - min_val = float("-0.086771") - max_val = float("0.165089") - mean = float("-0.000248005") - std = float("0.00738139") - data = None - - -class Program_weight_tensor_parameter_175: - name = "parameter_175" - shape = [96] - dtype = "float32" - min_val = float("-0.732503") - max_val = float("0.316296") - mean = float("-0.275731") - std = float("0.175263") - data = None - - -class Program_weight_tensor_parameter_176: - name = "parameter_176" - shape = [96] - dtype = "float32" - min_val = float("0.764579") - max_val = float("1.31051") - mean = float("1.0436") - std = float("0.115503") - data = None - - -class Program_weight_tensor_parameter_177: - name = "parameter_177" - shape = [96] - dtype = "float32" - min_val = float("0.0234519") - max_val = float("0.125482") - mean = float("0.0544499") - std = float("0.0175139") - data = None - - -class Program_weight_tensor_parameter_178: - name = "parameter_178" - shape = [96] - dtype = "float32" - min_val = float("-0.166347") - max_val = float("0.0892188") - mean = float("-0.0515841") - std = float("0.0489599") - data = None - - -class Program_weight_tensor_parameter_179: - name = "parameter_179" - shape = [96, 96, 3, 3] - dtype = "float32" - min_val = float("-0.144257") - max_val = float("0.132913") - mean = float("-0.000565483") - std = float("0.00873368") - data = None - - -class Program_weight_tensor_parameter_180: - name = "parameter_180" - shape = [96] - dtype = "float32" - min_val = float("-0.649417") - max_val = float("0.386836") - mean = float("-0.253654") - std = float("0.210215") - data = None - - -class Program_weight_tensor_parameter_181: - name = "parameter_181" - shape = [96] - dtype = "float32" - min_val = float("0.74298") - max_val = float("1.3799") - mean = float("1.02561") - std = float("0.122082") - data = None - - -class Program_weight_tensor_parameter_182: - name = "parameter_182" - shape = [96] - dtype = "float32" - min_val = float("0.00554249") - max_val = float("0.0349953") - mean = float("0.0169508") - std = float("0.00634658") - data = None - - -class Program_weight_tensor_parameter_183: - name = "parameter_183" - shape = [96] - dtype = "float32" - min_val = float("-0.314719") - max_val = float("0.272384") - mean = float("0.0111478") - std = float("0.0678291") - data = None - - -class Program_weight_tensor_parameter_184: - name = "parameter_184" - shape = [96, 448, 1, 1] - dtype = "float32" - min_val = float("-0.169447") - max_val = float("0.110463") - mean = float("-0.000526406") - std = float("0.0113187") - data = None - - -class Program_weight_tensor_parameter_185: - name = "parameter_185" - shape = [96] - dtype = "float32" - min_val = float("-0.239085") - max_val = float("0.172277") - mean = float("-0.0410098") - std = float("0.088654") - data = None - - -class Program_weight_tensor_parameter_186: - name = "parameter_186" - shape = [96] - dtype = "float32" - min_val = float("0.917909") - max_val = float("1.41228") - mean = float("1.07289") - std = float("0.0921621") - data = None - - -class Program_weight_tensor_parameter_187: - name = "parameter_187" - shape = [96] - dtype = "float32" - min_val = float("0.00512108") - max_val = float("0.0377565") - mean = float("0.012503") - std = float("0.00556829") - data = None - - -class Program_weight_tensor_parameter_188: - name = "parameter_188" - shape = [96] - dtype = "float32" - min_val = float("-0.0764749") - max_val = float("0.0829136") - mean = float("0.0112017") - std = float("0.0312016") - data = None - - -class Program_weight_tensor_parameter_189: - name = "parameter_189" - shape = [96, 448, 1, 1] - dtype = "float32" - min_val = float("-0.114799") - max_val = float("0.116536") - mean = float("-0.000477416") - std = float("0.00980395") - data = None - - -class Program_weight_tensor_parameter_190: - name = "parameter_190" - shape = [192] - dtype = "float32" - min_val = float("-0.539928") - max_val = float("-0.101676") - mean = float("-0.294772") - std = float("0.070854") - data = None - - -class Program_weight_tensor_parameter_191: - name = "parameter_191" - shape = [192] - dtype = "float32" - min_val = float("0.649882") - max_val = float("1.08068") - mean = float("0.851912") - std = float("0.0726184") - data = None - - -class Program_weight_tensor_parameter_192: - name = "parameter_192" - shape = [192] - dtype = "float32" - min_val = float("0.00896797") - max_val = float("0.0613196") - mean = float("0.0239899") - std = float("0.00961639") - data = None - - -class Program_weight_tensor_parameter_193: - name = "parameter_193" - shape = [192] - dtype = "float32" - min_val = float("-0.110165") - max_val = float("0.0578497") - mean = float("-0.0313571") - std = float("0.0318844") - data = None - - -class Program_weight_tensor_parameter_194: - name = "parameter_194" - shape = [192, 384, 1, 1] - dtype = "float32" - min_val = float("-0.0569669") - max_val = float("0.0580897") - mean = float("-0.000683949") - std = float("0.00880451") - data = None - - -class Program_weight_tensor_parameter_195: - name = "parameter_195" - shape = [384] - dtype = "float32" - min_val = float("-0.522469") - max_val = float("0.214295") - mean = float("-0.168543") - std = float("0.0775358") - data = None - - -class Program_weight_tensor_parameter_196: - name = "parameter_196" - shape = [384] - dtype = "float32" - min_val = float("0.848906") - max_val = float("1.39284") - mean = float("1.06283") - std = float("0.0773502") - data = None - - -class Program_weight_tensor_parameter_197: - name = "parameter_197" - shape = [384] - dtype = "float32" - min_val = float("0.00501982") - max_val = float("0.0397325") - mean = float("0.0160268") - std = float("0.00625591") - data = None - - -class Program_weight_tensor_parameter_198: - name = "parameter_198" - shape = [384] - dtype = "float32" - min_val = float("-0.114137") - max_val = float("0.0857981") - mean = float("-0.0355965") - std = float("0.0347035") - data = None - - -class Program_weight_tensor_parameter_199: - name = "parameter_199" - shape = [384, 384, 1, 1] - dtype = "float32" - min_val = float("-0.0936791") - max_val = float("0.106798") - mean = float("-0.000567793") - std = float("0.00789782") - data = None - - -class Program_weight_tensor_parameter_200: - name = "parameter_200" - shape = [192] - dtype = "float32" - min_val = float("-0.384521") - max_val = float("0.227763") - mean = float("-0.118185") - std = float("0.102049") - data = None - - -class Program_weight_tensor_parameter_201: - name = "parameter_201" - shape = [192] - dtype = "float32" - min_val = float("0.869586") - max_val = float("1.51327") - mean = float("1.12323") - std = float("0.119089") - data = None - - -class Program_weight_tensor_parameter_202: - name = "parameter_202" - shape = [192] - dtype = "float32" - min_val = float("0.119222") - max_val = float("1.51213") - mean = float("0.355679") - std = float("0.194075") - data = None - - -class Program_weight_tensor_parameter_203: - name = "parameter_203" - shape = [192] - dtype = "float32" - min_val = float("-3.46501") - max_val = float("1.69978") - mean = float("-0.19182") - std = float("0.986212") - data = None - - -class Program_weight_tensor_parameter_204: - name = "parameter_204" - shape = [192, 768, 1, 1] - dtype = "float32" - min_val = float("-0.113572") - max_val = float("0.0748414") - mean = float("-8.3021e-05") - std = float("0.00688727") - data = None - - -class Program_weight_tensor_parameter_205: - name = "parameter_205" - shape = [192] - dtype = "float32" - min_val = float("-0.243321") - max_val = float("0.168978") - mean = float("-0.0173026") - std = float("0.0539895") - data = None - - -class Program_weight_tensor_parameter_206: - name = "parameter_206" - shape = [192] - dtype = "float32" - min_val = float("0.617694") - max_val = float("1.01652") - mean = float("0.837363") - std = float("0.0632031") - data = None - - -class Program_weight_tensor_parameter_207: - name = "parameter_207" - shape = [192] - dtype = "float32" - min_val = float("0.00364927") - max_val = float("0.0302202") - mean = float("0.00897402") - std = float("0.00329128") - data = None - - -class Program_weight_tensor_parameter_208: - name = "parameter_208" - shape = [192] - dtype = "float32" - min_val = float("-0.116422") - max_val = float("0.0722651") - mean = float("-0.0506104") - std = float("0.0400639") - data = None - - -class Program_weight_tensor_parameter_209: - name = "parameter_209" - shape = [192, 192, 1, 1] - dtype = "float32" - min_val = float("-0.0376764") - max_val = float("0.0673975") - mean = float("-0.00139833") - std = float("0.00662605") - data = None - - -class Program_weight_tensor_parameter_210: - name = "parameter_210" - shape = [192] - dtype = "float32" - min_val = float("-0.243321") - max_val = float("0.168978") - mean = float("-0.0173026") - std = float("0.0539895") - data = None - - -class Program_weight_tensor_parameter_211: - name = "parameter_211" - shape = [192] - dtype = "float32" - min_val = float("0.875399") - max_val = float("1.46098") - mean = float("1.10627") - std = float("0.129572") - data = None - - -class Program_weight_tensor_parameter_212: - name = "parameter_212" - shape = [192] - dtype = "float32" - min_val = float("0.0227077") - max_val = float("0.237351") - mean = float("0.0547969") - std = float("0.0225584") - data = None - - -class Program_weight_tensor_parameter_213: - name = "parameter_213" - shape = [192] - dtype = "float32" - min_val = float("-0.293552") - max_val = float("0.0473091") - mean = float("-0.118203") - std = float("0.0657697") - data = None - - -class Program_weight_tensor_parameter_214: - name = "parameter_214" - shape = [192, 192, 3, 3] - dtype = "float32" - min_val = float("-0.0368216") - max_val = float("0.0546366") - mean = float("-0.000404501") - std = float("0.00405002") - data = None - - -class Program_weight_tensor_parameter_215: - name = "parameter_215" - shape = [192] - dtype = "float32" - min_val = float("-0.311401") - max_val = float("0.0667354") - mean = float("-0.115178") - std = float("0.0802349") - data = None - - -class Program_weight_tensor_parameter_216: - name = "parameter_216" - shape = [192] - dtype = "float32" - min_val = float("0.910824") - max_val = float("1.44605") - mean = float("1.10855") - std = float("0.101864") - data = None - - -class Program_weight_tensor_parameter_217: - name = "parameter_217" - shape = [192] - dtype = "float32" - min_val = float("0.0286424") - max_val = float("0.136652") - mean = float("0.0700486") - std = float("0.0228431") - data = None - - -class Program_weight_tensor_parameter_218: - name = "parameter_218" - shape = [192] - dtype = "float32" - min_val = float("-0.460812") - max_val = float("0.215499") - mean = float("-0.119224") - std = float("0.0838897") - data = None - - -class Program_weight_tensor_parameter_219: - name = "parameter_219" - shape = [192, 192, 3, 3] - dtype = "float32" - min_val = float("-0.0498457") - max_val = float("0.0422007") - mean = float("-0.000479539") - std = float("0.00450633") - data = None - - -class Program_weight_tensor_parameter_220: - name = "parameter_220" - shape = [192] - dtype = "float32" - min_val = float("-0.444339") - max_val = float("0.412099") - mean = float("-0.137793") - std = float("0.130321") - data = None - - -class Program_weight_tensor_parameter_221: - name = "parameter_221" - shape = [192] - dtype = "float32" - min_val = float("0.955612") - max_val = float("1.37241") - mean = float("1.11033") - std = float("0.0722102") - data = None - - -class Program_weight_tensor_parameter_222: - name = "parameter_222" - shape = [192] - dtype = "float32" - min_val = float("0.0599313") - max_val = float("0.406381") - mean = float("0.146198") - std = float("0.0531059") - data = None - - -class Program_weight_tensor_parameter_223: - name = "parameter_223" - shape = [192] - dtype = "float32" - min_val = float("-0.3746") - max_val = float("0.457922") - mean = float("-0.113673") - std = float("0.104493") - data = None - - -class Program_weight_tensor_parameter_224: - name = "parameter_224" - shape = [192, 512, 1, 1] - dtype = "float32" - min_val = float("-0.0511306") - max_val = float("0.0904195") - mean = float("-0.000705076") - std = float("0.00778049") - data = None - - -class Program_weight_tensor_parameter_225: - name = "parameter_225" - shape = [192] - dtype = "float32" - min_val = float("-0.164214") - max_val = float("0.00108657") - mean = float("-0.0652814") - std = float("0.0261733") - data = None - - -class Program_weight_tensor_parameter_226: - name = "parameter_226" - shape = [192] - dtype = "float32" - min_val = float("0.819443") - max_val = float("1.06651") - mean = float("0.969047") - std = float("0.046122") - data = None - - -class Program_weight_tensor_parameter_227: - name = "parameter_227" - shape = [192] - dtype = "float32" - min_val = float("0.0402565") - max_val = float("0.234889") - mean = float("0.0881219") - std = float("0.0288936") - data = None - - -class Program_weight_tensor_parameter_228: - name = "parameter_228" - shape = [192] - dtype = "float32" - min_val = float("-0.264071") - max_val = float("0.148941") - mean = float("-0.104244") - std = float("0.0769829") - data = None - - -class Program_weight_tensor_parameter_229: - name = "parameter_229" - shape = [192, 512, 1, 1] - dtype = "float32" - min_val = float("-0.0279262") - max_val = float("0.0522477") - mean = float("-0.00062665") - std = float("0.0064125") - data = None - - -class Program_weight_tensor_parameter_230: - name = "parameter_230" - shape = [512] - dtype = "float32" - min_val = float("-4.82887") - max_val = float("-0.110974") - mean = float("-2.29538") - std = float("0.775295") - data = None - - -class Program_weight_tensor_parameter_231: - name = "parameter_231" - shape = [512] - dtype = "float32" - min_val = float("2.10191") - max_val = float("5.21727") - mean = float("3.70112") - std = float("0.482785") - data = None - - -class Program_weight_tensor_parameter_232: - name = "parameter_232" - shape = [512] - dtype = "float32" - min_val = float("0.00205017") - max_val = float("0.0129863") - mean = float("0.00439085") - std = float("0.00153234") - data = None - - -class Program_weight_tensor_parameter_233: - name = "parameter_233" - shape = [512] - dtype = "float32" - min_val = float("-0.125558") - max_val = float("0.0844085") - mean = float("-0.0389285") - std = float("0.0304083") - data = None - - -class Program_weight_tensor_parameter_234: - name = "parameter_234" - shape = [512, 384, 1, 1] - dtype = "float32" - min_val = float("-0.0812518") - max_val = float("0.135076") - mean = float("-0.000961032") - std = float("0.00790225") - data = None - - -class Program_weight_tensor_parameter_235: - name = "parameter_235" - shape = [384] - dtype = "float32" - min_val = float("-0.0156508") - max_val = float("-0.000140212") - mean = float("-0.00546153") - std = float("0.0036436") - data = None - - -class Program_weight_tensor_parameter_236: - name = "parameter_236" - shape = [384, 384, 1, 1] - dtype = "float32" - min_val = float("-0.186417") - max_val = float("0.14642") - mean = float("-0.00212537") - std = float("0.00664924") - data = None - - -class Program_weight_tensor_parameter_237: - name = "parameter_237" - shape = [192] - dtype = "float32" - min_val = float("-2.38809") - max_val = float("3.17072") - mean = float("-0.203472") - std = float("0.563206") - data = None - - -class Program_weight_tensor_parameter_238: - name = "parameter_238" - shape = [192] - dtype = "float32" - min_val = float("0.123887") - max_val = float("2.40473") - mean = float("0.524729") - std = float("0.334968") - data = None - - -class Program_weight_tensor_parameter_239: - name = "parameter_239" - shape = [192] - dtype = "float32" - min_val = float("0.000170815") - max_val = float("0.00268326") - mean = float("0.000688421") - std = float("0.000383392") - data = None - - -class Program_weight_tensor_parameter_240: - name = "parameter_240" - shape = [192] - dtype = "float32" - min_val = float("-0.0629836") - max_val = float("0.0878972") - mean = float("0.0115645") - std = float("0.0223406") - data = None - - -class Program_weight_tensor_parameter_241: - name = "parameter_241" - shape = [192, 192, 1, 1] - dtype = "float32" - min_val = float("-0.0580822") - max_val = float("0.058285") - mean = float("-0.000330827") - std = float("0.00504808") - data = None - - -class Program_weight_tensor_parameter_242: - name = "parameter_242" - shape = [192] - dtype = "float32" - min_val = float("-2.38809") - max_val = float("3.17072") - mean = float("-0.203472") - std = float("0.563206") - data = None - - -class Program_weight_tensor_parameter_243: - name = "parameter_243" - shape = [192] - dtype = "float32" - min_val = float("0.679292") - max_val = float("3.07313") - mean = float("1.54536") - std = float("0.450864") - data = None - - -class Program_weight_tensor_parameter_244: - name = "parameter_244" - shape = [192] - dtype = "float32" - min_val = float("0.0033827") - max_val = float("0.0334462") - mean = float("0.00949376") - std = float("0.00462441") - data = None - - -class Program_weight_tensor_parameter_245: - name = "parameter_245" - shape = [192] - dtype = "float32" - min_val = float("-0.225625") - max_val = float("0.191391") - mean = float("0.012192") - std = float("0.0564082") - data = None - - -class Program_weight_tensor_parameter_246: - name = "parameter_246" - shape = [192, 192, 3, 3] - dtype = "float32" - min_val = float("-0.074483") - max_val = float("0.0656173") - mean = float("-7.80967e-05") - std = float("0.00448667") - data = None - - -class Program_weight_tensor_parameter_247: - name = "parameter_247" - shape = [192] - dtype = "float32" - min_val = float("-3.43262") - max_val = float("1.16961") - mean = float("-1.42837") - std = float("0.634837") - data = None - - -class Program_weight_tensor_parameter_248: - name = "parameter_248" - shape = [192] - dtype = "float32" - min_val = float("0.392163") - max_val = float("1.72692") - mean = float("1.08998") - std = float("0.190011") - data = None - - -class Program_weight_tensor_parameter_249: - name = "parameter_249" - shape = [192] - dtype = "float32" - min_val = float("0.0270888") - max_val = float("0.44195") - mean = float("0.084525") - std = float("0.0387844") - data = None - - -class Program_weight_tensor_parameter_250: - name = "parameter_250" - shape = [192] - dtype = "float32" - min_val = float("-1.22131") - max_val = float("0.423185") - mean = float("-0.221144") - std = float("0.173004") - data = None - - -class Program_weight_tensor_parameter_251: - name = "parameter_251" - shape = [192, 192, 3, 3] - dtype = "float32" - min_val = float("-0.060306") - max_val = float("0.0575339") - mean = float("-0.000382961") - std = float("0.00524134") - data = None - - -class Program_weight_tensor_parameter_252: - name = "parameter_252" - shape = [192] - dtype = "float32" - min_val = float("-3.8777") - max_val = float("4.24427") - mean = float("-0.62919") - std = float("0.988716") - data = None - - -class Program_weight_tensor_parameter_253: - name = "parameter_253" - shape = [192] - dtype = "float32" - min_val = float("0.580104") - max_val = float("4.17524") - mean = float("1.54492") - std = float("0.398566") - data = None - - -class Program_weight_tensor_parameter_254: - name = "parameter_254" - shape = [192] - dtype = "float32" - min_val = float("0.00324018") - max_val = float("0.0287499") - mean = float("0.0091977") - std = float("0.00409842") - data = None - - -class Program_weight_tensor_parameter_255: - name = "parameter_255" - shape = [192] - dtype = "float32" - min_val = float("-0.163414") - max_val = float("0.133777") - mean = float("0.0531707") - std = float("0.0370733") - data = None - - -class Program_weight_tensor_parameter_256: - name = "parameter_256" - shape = [192, 384, 1, 1] - dtype = "float32" - min_val = float("-0.0969858") - max_val = float("0.0643058") - mean = float("-0.00132637") - std = float("0.00937933") - data = None - - -class Program_weight_tensor_parameter_257: - name = "parameter_257" - shape = [192] - dtype = "float32" - min_val = float("-2.93792") - max_val = float("1.02599") - mean = float("-0.426756") - std = float("0.68174") - data = None - - -class Program_weight_tensor_parameter_258: - name = "parameter_258" - shape = [192] - dtype = "float32" - min_val = float("0.700506") - max_val = float("3.6114") - mean = float("1.48179") - std = float("0.505296") - data = None - - -class Program_weight_tensor_parameter_259: - name = "parameter_259" - shape = [192] - dtype = "float32" - min_val = float("0.00157973") - max_val = float("0.00922621") - mean = float("0.00455175") - std = float("0.00146605") - data = None - - -class Program_weight_tensor_parameter_260: - name = "parameter_260" - shape = [192] - dtype = "float32" - min_val = float("-0.062057") - max_val = float("0.0798436") - mean = float("0.0163012") - std = float("0.0297843") - data = None - - -class Program_weight_tensor_parameter_261: - name = "parameter_261" - shape = [192, 384, 1, 1] - dtype = "float32" - min_val = float("-0.0711863") - max_val = float("0.0659081") - mean = float("-0.000541447") - std = float("0.00758362") - data = None - - -class Program_weight_tensor_parameter_262: - name = "parameter_262" - shape = [384] - dtype = "float32" - min_val = float("-2.84279") - max_val = float("1.12229") - mean = float("-0.753376") - std = float("0.497189") - data = None - - -class Program_weight_tensor_parameter_263: - name = "parameter_263" - shape = [384] - dtype = "float32" - min_val = float("0.419122") - max_val = float("1.80266") - mean = float("0.867781") - std = float("0.218188") - data = None - - -class Program_weight_tensor_parameter_264: - name = "parameter_264" - shape = [384] - dtype = "float32" - min_val = float("0.00581871") - max_val = float("0.137693") - mean = float("0.0206925") - std = float("0.0128053") - data = None - - -class Program_weight_tensor_parameter_265: - name = "parameter_265" - shape = [384] - dtype = "float32" - min_val = float("-0.489828") - max_val = float("0.409672") - mean = float("0.0184444") - std = float("0.103856") - data = None - - -class Program_weight_tensor_parameter_266: - name = "parameter_266" - shape = [384, 256, 3, 3] - dtype = "float32" - min_val = float("-0.0545942") - max_val = float("0.0530515") - mean = float("-0.000171083") - std = float("0.00433153") - data = None - - -class Program_weight_tensor_parameter_267: - name = "parameter_267" - shape = [256] - dtype = "float32" - min_val = float("-2.82072") - max_val = float("1.4645") - mean = float("-1.07775") - std = float("0.63367") - data = None - - -class Program_weight_tensor_parameter_268: - name = "parameter_268" - shape = [256] - dtype = "float32" - min_val = float("0.424072") - max_val = float("1.76956") - mean = float("0.978554") - std = float("0.170473") - data = None - - -class Program_weight_tensor_parameter_269: - name = "parameter_269" - shape = [256] - dtype = "float32" - min_val = float("0.00145624") - max_val = float("0.0198528") - mean = float("0.00622552") - std = float("0.00273816") - data = None - - -class Program_weight_tensor_parameter_270: - name = "parameter_270" - shape = [256] - dtype = "float32" - min_val = float("-0.233052") - max_val = float("0.227501") - mean = float("-0.0537504") - std = float("0.0737751") - data = None - - -class Program_weight_tensor_parameter_271: - name = "parameter_271" - shape = [256, 192, 1, 1] - dtype = "float32" - min_val = float("-0.129954") - max_val = float("0.174189") - mean = float("-0.00111637") - std = float("0.014005") - data = None - - -class Program_weight_tensor_parameter_272: - name = "parameter_272" - shape = [192] - dtype = "float32" - min_val = float("-0.0203441") - max_val = float("0.00142473") - mean = float("-0.00616218") - std = float("0.00515112") - data = None - - -class Program_weight_tensor_parameter_273: - name = "parameter_273" - shape = [192, 192, 1, 1] - dtype = "float32" - min_val = float("-0.235185") - max_val = float("0.180564") - mean = float("-0.00409182") - std = float("0.010316") - data = None - - -class Program_weight_tensor_parameter_274: - name = "parameter_274" - shape = [96] - dtype = "float32" - min_val = float("-2.27838") - max_val = float("0.754522") - mean = float("-0.115757") - std = float("0.508129") - data = None - - -class Program_weight_tensor_parameter_275: - name = "parameter_275" - shape = [96] - dtype = "float32" - min_val = float("-0.0583754") - max_val = float("2.30701") - mean = float("0.261422") - std = float("0.366858") - data = None - - -class Program_weight_tensor_parameter_276: - name = "parameter_276" - shape = [96] - dtype = "float32" - min_val = float("5.52856e-12") - max_val = float("0.00211136") - mean = float("0.000440278") - std = float("0.00038522") - data = None - - -class Program_weight_tensor_parameter_277: - name = "parameter_277" - shape = [96] - dtype = "float32" - min_val = float("-0.0522517") - max_val = float("0.0841538") - mean = float("0.00706718") - std = float("0.0204386") - data = None - - -class Program_weight_tensor_parameter_278: - name = "parameter_278" - shape = [96, 96, 1, 1] - dtype = "float32" - min_val = float("-0.0379797") - max_val = float("0.0638861") - mean = float("-0.000312308") - std = float("0.00575617") - data = None - - -class Program_weight_tensor_parameter_279: - name = "parameter_279" - shape = [96] - dtype = "float32" - min_val = float("-2.27838") - max_val = float("0.754522") - mean = float("-0.115757") - std = float("0.508129") - data = None - - -class Program_weight_tensor_parameter_280: - name = "parameter_280" - shape = [96] - dtype = "float32" - min_val = float("0.349893") - max_val = float("3.24248") - mean = float("1.29137") - std = float("0.633887") - data = None - - -class Program_weight_tensor_parameter_281: - name = "parameter_281" - shape = [96] - dtype = "float32" - min_val = float("0.00382734") - max_val = float("0.0305749") - mean = float("0.0139795") - std = float("0.00619915") - data = None - - -class Program_weight_tensor_parameter_282: - name = "parameter_282" - shape = [96] - dtype = "float32" - min_val = float("-0.179476") - max_val = float("0.231631") - mean = float("0.0342856") - std = float("0.0751833") - data = None - - -class Program_weight_tensor_parameter_283: - name = "parameter_283" - shape = [96, 96, 3, 3] - dtype = "float32" - min_val = float("-0.0540098") - max_val = float("0.0647126") - mean = float("-0.000333391") - std = float("0.00757747") - data = None - - -class Program_weight_tensor_parameter_284: - name = "parameter_284" - shape = [96] - dtype = "float32" - min_val = float("-2.80047") - max_val = float("1.50581") - mean = float("-1.09128") - std = float("0.696783") - data = None - - -class Program_weight_tensor_parameter_285: - name = "parameter_285" - shape = [96] - dtype = "float32" - min_val = float("0.32196") - max_val = float("1.80506") - mean = float("1.07312") - std = float("0.213196") - data = None - - -class Program_weight_tensor_parameter_286: - name = "parameter_286" - shape = [96] - dtype = "float32" - min_val = float("0.0443664") - max_val = float("0.229848") - mean = float("0.0920728") - std = float("0.0345847") - data = None - - -class Program_weight_tensor_parameter_287: - name = "parameter_287" - shape = [96] - dtype = "float32" - min_val = float("-1.52006") - max_val = float("0.514198") - mean = float("-0.12686") - std = float("0.297573") - data = None - - -class Program_weight_tensor_parameter_288: - name = "parameter_288" - shape = [96, 96, 3, 3] - dtype = "float32" - min_val = float("-0.0508838") - max_val = float("0.0716904") - mean = float("-0.000600059") - std = float("0.00819703") - data = None - - -class Program_weight_tensor_parameter_289: - name = "parameter_289" - shape = [96] - dtype = "float32" - min_val = float("-2.54073") - max_val = float("0.664611") - mean = float("-0.0486788") - std = float("0.474242") - data = None - - -class Program_weight_tensor_parameter_290: - name = "parameter_290" - shape = [96] - dtype = "float32" - min_val = float("-0.0782312") - max_val = float("3.15097") - mean = float("0.280453") - std = float("0.408781") - data = None - - -class Program_weight_tensor_parameter_291: - name = "parameter_291" - shape = [96] - dtype = "float32" - min_val = float("1.38686e-10") - max_val = float("0.0216024") - mean = float("0.00193235") - std = float("0.00284515") - data = None - - -class Program_weight_tensor_parameter_292: - name = "parameter_292" - shape = [96] - dtype = "float32" - min_val = float("-0.051793") - max_val = float("0.122769") - mean = float("0.0178591") - std = float("0.0297203") - data = None - - -class Program_weight_tensor_parameter_293: - name = "parameter_293" - shape = [96, 96, 1, 1] - dtype = "float32" - min_val = float("-0.111904") - max_val = float("0.0788536") - mean = float("-0.00121451") - std = float("0.00859042") - data = None - - -class Program_weight_tensor_parameter_294: - name = "parameter_294" - shape = [96] - dtype = "float32" - min_val = float("-2.54073") - max_val = float("0.664612") - mean = float("-0.0486788") - std = float("0.474242") - data = None - - -class Program_weight_tensor_parameter_295: - name = "parameter_295" - shape = [96] - dtype = "float32" - min_val = float("0.34207") - max_val = float("2.99219") - mean = float("0.929546") - std = float("0.412034") - data = None - - -class Program_weight_tensor_parameter_296: - name = "parameter_296" - shape = [96] - dtype = "float32" - min_val = float("0.00578641") - max_val = float("0.0746827") - mean = float("0.0256145") - std = float("0.0116189") - data = None - - -class Program_weight_tensor_parameter_297: - name = "parameter_297" - shape = [96] - dtype = "float32" - min_val = float("-0.159121") - max_val = float("0.1999") - mean = float("0.0399585") - std = float("0.0761745") - data = None - - -class Program_weight_tensor_parameter_298: - name = "parameter_298" - shape = [96, 96, 3, 3] - dtype = "float32" - min_val = float("-0.0537042") - max_val = float("0.0504456") - mean = float("-0.000550937") - std = float("0.00775915") - data = None - - -class Program_weight_tensor_parameter_299: - name = "parameter_299" - shape = [96] - dtype = "float32" - min_val = float("-2.02001") - max_val = float("1.65623") - mean = float("-0.919923") - std = float("0.650572") - data = None - - -class Program_weight_tensor_parameter_300: - name = "parameter_300" - shape = [96] - dtype = "float32" - min_val = float("0.442811") - max_val = float("1.97179") - mean = float("1.06409") - std = float("0.227631") - data = None - - -class Program_weight_tensor_parameter_301: - name = "parameter_301" - shape = [96] - dtype = "float32" - min_val = float("0.0126015") - max_val = float("0.116688") - mean = float("0.0335163") - std = float("0.0162914") - data = None - - -class Program_weight_tensor_parameter_302: - name = "parameter_302" - shape = [96] - dtype = "float32" - min_val = float("-2.08549") - max_val = float("0.285641") - mean = float("-0.0344373") - std = float("0.253697") - data = None - - -class Program_weight_tensor_parameter_303: - name = "parameter_303" - shape = [96, 96, 3, 3] - dtype = "float32" - min_val = float("-0.105598") - max_val = float("0.126106") - mean = float("-0.000400525") - std = float("0.00876452") - data = None - - -class Program_weight_tensor_parameter_304: - name = "parameter_304" - shape = [96] - dtype = "float32" - min_val = float("-1.61964") - max_val = float("1.88676") - mean = float("0.00618829") - std = float("0.838881") - data = None - - -class Program_weight_tensor_parameter_305: - name = "parameter_305" - shape = [96] - dtype = "float32" - min_val = float("0.352578") - max_val = float("1.32187") - mean = float("0.700929") - std = float("0.236185") - data = None - - -class Program_weight_tensor_parameter_306: - name = "parameter_306" - shape = [96] - dtype = "float32" - min_val = float("0.00963589") - max_val = float("0.0791035") - mean = float("0.0353916") - std = float("0.0149812") - data = None - - -class Program_weight_tensor_parameter_307: - name = "parameter_307" - shape = [96] - dtype = "float32" - min_val = float("-0.28232") - max_val = float("0.404327") - mean = float("-0.0642217") - std = float("0.107473") - data = None - - -class Program_weight_tensor_parameter_308: - name = "parameter_308" - shape = [96, 192, 1, 1] - dtype = "float32" - min_val = float("-0.125711") - max_val = float("0.113026") - mean = float("-0.00129075") - std = float("0.0142062") - data = None - - -class Program_weight_tensor_parameter_309: - name = "parameter_309" - shape = [96] - dtype = "float32" - min_val = float("-2.46694") - max_val = float("1.71626") - mean = float("0.341079") - std = float("0.679162") - data = None - - -class Program_weight_tensor_parameter_310: - name = "parameter_310" - shape = [96] - dtype = "float32" - min_val = float("0.539295") - max_val = float("4.8854") - mean = float("1.48247") - std = float("0.959871") - data = None - - -class Program_weight_tensor_parameter_311: - name = "parameter_311" - shape = [96] - dtype = "float32" - min_val = float("0.00783824") - max_val = float("0.0876481") - mean = float("0.0244986") - std = float("0.0120614") - data = None - - -class Program_weight_tensor_parameter_312: - name = "parameter_312" - shape = [96] - dtype = "float32" - min_val = float("-0.287482") - max_val = float("0.306711") - mean = float("-0.00499391") - std = float("0.109698") - data = None - - -class Program_weight_tensor_parameter_313: - name = "parameter_313" - shape = [96, 192, 1, 1] - dtype = "float32" - min_val = float("-0.0888948") - max_val = float("0.165433") - mean = float("-0.000715943") - std = float("0.0141494") - data = None - - -class Program_weight_tensor_parameter_314: - name = "parameter_314" - shape = [192] - dtype = "float32" - min_val = float("-4.44162") - max_val = float("2.00951") - mean = float("-0.0984248") - std = float("0.883313") - data = None - - -class Program_weight_tensor_parameter_315: - name = "parameter_315" - shape = [192] - dtype = "float32" - min_val = float("0.575367") - max_val = float("4.51851") - mean = float("1.08197") - std = float("0.426247") - data = None - - -class Program_weight_tensor_parameter_316: - name = "parameter_316" - shape = [192] - dtype = "float32" - min_val = float("0.00785823") - max_val = float("0.189363") - mean = float("0.0403725") - std = float("0.0275254") - data = None - - -class Program_weight_tensor_parameter_317: - name = "parameter_317" - shape = [192] - dtype = "float32" - min_val = float("-0.270304") - max_val = float("0.337865") - mean = float("0.0348807") - std = float("0.105792") - data = None - - -class Program_weight_tensor_parameter_318: - name = "parameter_318" - shape = [192, 128, 3, 3] - dtype = "float32" - min_val = float("-0.087766") - max_val = float("0.0743655") - mean = float("-0.000243175") - std = float("0.00724859") - data = None - - -class Program_weight_tensor_parameter_319: - name = "parameter_319" - shape = [128] - dtype = "float32" - min_val = float("-2.15213") - max_val = float("1.36751") - mean = float("-0.673313") - std = float("0.682255") - data = None - - -class Program_weight_tensor_parameter_320: - name = "parameter_320" - shape = [128] - dtype = "float32" - min_val = float("0.368949") - max_val = float("2.25056") - mean = float("0.876029") - std = float("0.235567") - data = None - - -class Program_weight_tensor_parameter_321: - name = "parameter_321" - shape = [128] - dtype = "float32" - min_val = float("0.00227055") - max_val = float("0.0578315") - mean = float("0.00881457") - std = float("0.00556285") - data = None - - -class Program_weight_tensor_parameter_322: - name = "parameter_322" - shape = [128] - dtype = "float32" - min_val = float("-0.299004") - max_val = float("0.266637") - mean = float("-0.0648951") - std = float("0.119821") - data = None - - -class Program_weight_tensor_parameter_323: - name = "parameter_323" - shape = [128, 96, 1, 1] - dtype = "float32" - min_val = float("-0.205453") - max_val = float("0.19767") - mean = float("-0.00132955") - std = float("0.0233044") - data = None - - -class Program_weight_tensor_parameter_324: - name = "parameter_324" - shape = [96] - dtype = "float32" - min_val = float("-0.0287844") - max_val = float("0.00351177") - mean = float("-0.00835282") - std = float("0.00772968") - data = None - - -class Program_weight_tensor_parameter_325: - name = "parameter_325" - shape = [96, 96, 1, 1] - dtype = "float32" - min_val = float("-0.293759") - max_val = float("0.281304") - mean = float("-0.00581101") - std = float("0.0185083") - data = None - - -class Program_weight_tensor_parameter_326: - name = "parameter_326" - shape = [48] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_327: - name = "parameter_327" - shape = [48] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_328: - name = "parameter_328" - shape = [48] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_329: - name = "parameter_329" - shape = [48] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_330: - name = "parameter_330" - shape = [48, 48, 1, 1] - dtype = "float32" - min_val = float("-0.083645") - max_val = float("0.0802369") - mean = float("-0.00148088") - std = float("0.0125285") - data = None - - -class Program_weight_tensor_parameter_331: - name = "parameter_331" - shape = [48] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_332: - name = "parameter_332" - shape = [48] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_333: - name = "parameter_333" - shape = [48] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_334: - name = "parameter_334" - shape = [48] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_335: - name = "parameter_335" - shape = [48, 48, 3, 3] - dtype = "float32" - min_val = float("-0.101441") - max_val = float("0.102416") - mean = float("-0.000544695") - std = float("0.0126239") - data = None - - -class Program_weight_tensor_parameter_336: - name = "parameter_336" - shape = [48] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_337: - name = "parameter_337" - shape = [48] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_338: - name = "parameter_338" - shape = [48] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_339: - name = "parameter_339" - shape = [48] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_340: - name = "parameter_340" - shape = [48, 48, 3, 3] - dtype = "float32" - min_val = float("-0.0993692") - max_val = float("0.108811") - mean = float("-0.00104822") - std = float("0.0135148") - data = None - - -class Program_weight_tensor_parameter_341: - name = "parameter_341" - shape = [48] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_342: - name = "parameter_342" - shape = [48] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_343: - name = "parameter_343" - shape = [48] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_344: - name = "parameter_344" - shape = [48] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_345: - name = "parameter_345" - shape = [48, 48, 1, 1] - dtype = "float32" - min_val = float("-0.0692134") - max_val = float("0.0765493") - mean = float("-0.00277088") - std = float("0.0156891") - data = None - - -class Program_weight_tensor_parameter_346: - name = "parameter_346" - shape = [48] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_347: - name = "parameter_347" - shape = [48] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_348: - name = "parameter_348" - shape = [48] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_349: - name = "parameter_349" - shape = [48] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_350: - name = "parameter_350" - shape = [48, 48, 3, 3] - dtype = "float32" - min_val = float("-0.0966216") - max_val = float("0.0984974") - mean = float("-0.00101028") - std = float("0.0123098") - data = None - - -class Program_weight_tensor_parameter_351: - name = "parameter_351" - shape = [48] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_352: - name = "parameter_352" - shape = [48] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_353: - name = "parameter_353" - shape = [48] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_354: - name = "parameter_354" - shape = [48] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_355: - name = "parameter_355" - shape = [48, 48, 3, 3] - dtype = "float32" - min_val = float("-0.0948994") - max_val = float("0.0895512") - mean = float("-0.000794946") - std = float("0.0143308") - data = None - - -class Program_weight_tensor_parameter_356: - name = "parameter_356" - shape = [48] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_357: - name = "parameter_357" - shape = [48] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_358: - name = "parameter_358" - shape = [48] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_359: - name = "parameter_359" - shape = [48] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_360: - name = "parameter_360" - shape = [48, 96, 1, 1] - dtype = "float32" - min_val = float("-0.0970452") - max_val = float("0.119633") - mean = float("-0.00232752") - std = float("0.0233937") - data = None - - -class Program_weight_tensor_parameter_361: - name = "parameter_361" - shape = [48] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_362: - name = "parameter_362" - shape = [48] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_363: - name = "parameter_363" - shape = [48] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_364: - name = "parameter_364" - shape = [48] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_365: - name = "parameter_365" - shape = [48, 96, 1, 1] - dtype = "float32" - min_val = float("-0.161532") - max_val = float("0.246027") - mean = float("0.000306195") - std = float("0.0243305") - data = None - - -class Program_weight_tensor_parameter_366: - name = "parameter_366" - shape = [96] - dtype = "float32" - min_val = float("-3.31592") - max_val = float("3.83597") - mean = float("0.267111") - std = float("1.21094") - data = None - - -class Program_weight_tensor_parameter_367: - name = "parameter_367" - shape = [96] - dtype = "float32" - min_val = float("0.511478") - max_val = float("5.40365") - mean = float("1.12531") - std = float("0.546749") - data = None - - -class Program_weight_tensor_parameter_368: - name = "parameter_368" - shape = [96] - dtype = "float32" - min_val = float("0.0162512") - max_val = float("0.320396") - mean = float("0.0760809") - std = float("0.0518261") - data = None - - -class Program_weight_tensor_parameter_369: - name = "parameter_369" - shape = [96] - dtype = "float32" - min_val = float("-0.502761") - max_val = float("0.492542") - mean = float("-0.0358312") - std = float("0.181422") - data = None - - -class Program_weight_tensor_parameter_370: - name = "parameter_370" - shape = [96, 64, 3, 3] - dtype = "float32" - min_val = float("-0.0955215") - max_val = float("0.123103") - mean = float("-0.000235878") - std = float("0.0120646") - data = None - - -class Program_weight_tensor_parameter_371: - name = "parameter_371" - shape = [64] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_372: - name = "parameter_372" - shape = [64] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_373: - name = "parameter_373" - shape = [64] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_374: - name = "parameter_374" - shape = [64] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_375: - name = "parameter_375" - shape = [64, 48, 1, 1] - dtype = "float32" - min_val = float("-0.17433") - max_val = float("0.176451") - mean = float("-0.00222374") - std = float("0.0347099") - data = None - - -class Program_weight_tensor_parameter_376: - name = "parameter_376" - shape = [48] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_377: - name = "parameter_377" - shape = [48, 48, 1, 1] - dtype = "float32" - min_val = float("-0.166649") - max_val = float("0.156533") - mean = float("-0.0129927") - std = float("0.0258165") - data = None - - -class Program_weight_tensor_parameter_378: - name = "parameter_378" - shape = [24] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_379: - name = "parameter_379" - shape = [24] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_380: - name = "parameter_380" - shape = [24] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_381: - name = "parameter_381" - shape = [24] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_382: - name = "parameter_382" - shape = [24, 24, 1, 1] - dtype = "float32" - min_val = float("-0.0915942") - max_val = float("0.147481") - mean = float("-0.00102278") - std = float("0.0251579") - data = None - - -class Program_weight_tensor_parameter_383: - name = "parameter_383" - shape = [24] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_384: - name = "parameter_384" - shape = [24] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_385: - name = "parameter_385" - shape = [24] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_386: - name = "parameter_386" - shape = [24] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_387: - name = "parameter_387" - shape = [24, 24, 3, 3] - dtype = "float32" - min_val = float("-0.110347") - max_val = float("0.0904305") - mean = float("-0.000736999") - std = float("0.0215299") - data = None - - -class Program_weight_tensor_parameter_388: - name = "parameter_388" - shape = [24] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_389: - name = "parameter_389" - shape = [24] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_390: - name = "parameter_390" - shape = [24] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_391: - name = "parameter_391" - shape = [24] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_392: - name = "parameter_392" - shape = [24, 24, 3, 3] - dtype = "float32" - min_val = float("-0.125611") - max_val = float("0.174817") - mean = float("-0.00010855") - std = float("0.023961") - data = None - - -class Program_weight_tensor_parameter_393: - name = "parameter_393" - shape = [24] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_394: - name = "parameter_394" - shape = [24] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_395: - name = "parameter_395" - shape = [24] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_396: - name = "parameter_396" - shape = [24] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_397: - name = "parameter_397" - shape = [24, 48, 1, 1] - dtype = "float32" - min_val = float("-0.205563") - max_val = float("0.188891") - mean = float("-0.00450245") - std = float("0.0361434") - data = None - - -class Program_weight_tensor_parameter_398: - name = "parameter_398" - shape = [24] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_399: - name = "parameter_399" - shape = [24] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_400: - name = "parameter_400" - shape = [24] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_401: - name = "parameter_401" - shape = [24] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_402: - name = "parameter_402" - shape = [24, 48, 1, 1] - dtype = "float32" - min_val = float("-0.192775") - max_val = float("0.166962") - mean = float("-0.00176564") - std = float("0.0388666") - data = None - - -class Program_weight_tensor_parameter_403: - name = "parameter_403" - shape = [48] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_404: - name = "parameter_404" - shape = [48] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_405: - name = "parameter_405" - shape = [48] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_406: - name = "parameter_406" - shape = [48] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_407: - name = "parameter_407" - shape = [48, 32, 3, 3] - dtype = "float32" - min_val = float("-0.162459") - max_val = float("0.113525") - mean = float("-0.000259478") - std = float("0.0203147") - data = None - - -class Program_weight_tensor_parameter_408: - name = "parameter_408" - shape = [32] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_409: - name = "parameter_409" - shape = [32] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_410: - name = "parameter_410" - shape = [32] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_411: - name = "parameter_411" - shape = [32] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_412: - name = "parameter_412" - shape = [32, 16, 3, 3] - dtype = "float32" - min_val = float("-0.21861") - max_val = float("0.233137") - mean = float("-0.000345629") - std = float("0.0349451") - data = None - - -class Program_weight_tensor_parameter_413: - name = "parameter_413" - shape = [16] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_414: - name = "parameter_414" - shape = [16] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_415: - name = "parameter_415" - shape = [16] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_416: - name = "parameter_416" - shape = [16] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_417: - name = "parameter_417" - shape = [16, 16, 3, 3] - dtype = "float32" - min_val = float("-0.301154") - max_val = float("0.330949") - mean = float("-0.00144702") - std = float("0.0464861") - data = None - - -class Program_weight_tensor_parameter_418: - name = "parameter_418" - shape = [16] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_419: - name = "parameter_419" - shape = [16] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_420: - name = "parameter_420" - shape = [16] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_421: - name = "parameter_421" - shape = [16] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_422: - name = "parameter_422" - shape = [16, 3, 3, 3] - dtype = "float32" - min_val = float("-0.247963") - max_val = float("0.271619") - mean = float("-0.00315738") - std = float("0.068481") - data = None diff --git a/paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_3/graph_hash.txt b/paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_3/graph_hash.txt index 896fa94fd..e5ec97328 100644 --- a/paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_3/graph_hash.txt +++ b/paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_3/graph_hash.txt @@ -1 +1 @@ -66b8a266cc256b5299b432a3f1cf5582a5f9a5321ba1505da3c19b3bc24f5624 \ No newline at end of file +d8e2807e0c261d57e00c887aba4b333ffd83562a1bf230d5a26bacf0379fad87 \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_3/input_meta.py b/paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_3/input_meta.py index 5c608d9b7..3217a39a7 100644 --- a/paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_3/input_meta.py +++ b/paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_3/input_meta.py @@ -1,38 +1,91 @@ class Program_weight_tensor_data_0: name = "data_0" - shape = [2, 8400, 4] + shape = [2, 3549] dtype = "float32" - min_val = float("0.0430395") - max_val = float("12.2027") - mean = float("5.0132") - std = float("2.64575") + max_val = float("2.0") + mean = float("0.0250775") + std = float("0.157259") data = None class Program_weight_tensor_data_1: name = "data_1" - shape = [8400, 2] + shape = [2, 14, 3549] dtype = "float32" - min_val = float("0.5") - max_val = float("79.5") - mean = float("34.7619") - std = float("22.9098") + max_val = float("0.973582") + mean = float("0.00986958") + std = float("0.0654711") data = None class Program_weight_tensor_data_2: name = "data_2" - shape = [8400, 1] + shape = [2, 14, 3549] dtype = "float32" - min_val = float("8.0") - max_val = float("32.0") - mean = float("10.6667") - std = float("5.70157") + max_val = float("1.0") + mean = float("0.00179125") + std = float("0.0422852") data = None class Program_weight_tensor_data_3: name = "data_3" - shape = [2, 2] + shape = [2, 1] + dtype = "int32" + data = [0, 1] + + +class Program_weight_tensor_data_4: + name = "data_4" + shape = [2, 14, 1] + dtype = "int32" + data = [ + 0, + 0, + 1, + 0, + 0, + 0, + 0, + 0, + 3, + 0, + 0, + 0, + 0, + 3, + 0, + 0, + 1, + 0, + 0, + 0, + 0, + 0, + 3, + 0, + 0, + 0, + 0, + 0, + ] + + +class Program_weight_tensor_data_5: + name = "data_5" + shape = [2, 14, 4] dtype = "float32" - data = [1.18519, 0.666667, 1.18519, 0.666667] + max_val = float("384.824") + mean = float("133.114") + std = float("96.9844") + data = None + + +class Program_weight_tensor_data_6: + name = "data_6" + shape = [2, 14, 3549] + dtype = "float32" + max_val = float("0.00888292") + mean = float("1.5455e-05") + std = float("0.000268339") + data = None diff --git a/paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_3/model.py b/paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_3/model.py index 561c0c35b..2432102f1 100644 --- a/paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_3/model.py +++ b/paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_3/model.py @@ -5,90 +5,225 @@ class GraphModule(paddle.nn.Layer): def __init__(self): super().__init__() - def forward(self, data_0, data_1, data_2, data_3): - # pd_op.full: (1xi32) <- () + def forward(self, data_0, data_1, data_2, data_3, data_4, data_5, data_6): + # pd_op.full_int_array: (1xi64) <- () + full_int_array_0 = [1] + + # pd_op.unsqueeze: (2x1x3549xf32) <- (2x3549xf32, 1xi64) + unsqueeze_0 = paddle._C_ops.unsqueeze(data_0, full_int_array_0) + del data_0, full_int_array_0 + + # pd_op.full: (xf32) <- () full_0 = paddle._C_ops.full( - [1], float("2"), paddle.int32, paddle.core.CPUPlace() + [], float("1"), paddle.float32, paddle.framework._current_expected_place() ) - # pd_op.split_with_num: ([2x8400x2xf32, 2x8400x2xf32]) <- (2x8400x4xf32, 1xi32) - split_with_num_0 = paddle._C_ops.split_with_num(data_0, 2, full_0) - del data_0, full_0 + # pd_op.greater_than: (2x1x3549xb) <- (2x1x3549xf32, xf32) + greater_than_0 = paddle._C_ops.greater_than(unsqueeze_0, full_0) + del full_0, unsqueeze_0 + + # pd_op.full_int_array: (3xi64) <- () + full_int_array_1 = [1, 14, 1] - # builtin.split: (2x8400x2xf32, 2x8400x2xf32) <- ([2x8400x2xf32, 2x8400x2xf32]) - ( - split_0, - split_1, - ) = split_with_num_0 - del split_with_num_0 + # pd_op.tile: (2x14x3549xb) <- (2x1x3549xb, 3xi64) + tile_0 = paddle._C_ops.tile(greater_than_0, full_int_array_1) + del full_int_array_1, greater_than_0 - # pd_op.full: (1xf32) <- () + # pd_op.full: (1xi64) <- () full_1 = paddle._C_ops.full( - [1], float("-1"), paddle.float32, paddle.core.CPUPlace() + [1], float("-2"), paddle.int64, paddle.core.CPUPlace() ) - # pd_op.scale: (2x8400x2xf32) <- (2x8400x2xf32, 1xf32) - scale_0 = paddle._C_ops.scale(split_0, full_1, float("0"), True) - del full_1, split_0 - - # pd_op.add: (2x8400x2xf32) <- (2x8400x2xf32, 8400x2xf32) - add_0 = paddle._C_ops.add(scale_0, data_1) - del scale_0 - - # pd_op.add: (2x8400x2xf32) <- (2x8400x2xf32, 8400x2xf32) - add_1 = paddle._C_ops.add(split_1, data_1) - del data_1, split_1 + # pd_op.argmax: (2x3549xi64) <- (2x14x3549xf32, 1xi64) + argmax_0 = paddle._C_ops.argmax(data_1, full_1, False, False, paddle.int64) # pd_op.full: (1xi32) <- () full_2 = paddle._C_ops.full( - [1], float("-1"), paddle.int32, paddle.core.CPUPlace() + [1], float("14"), paddle.int32, paddle.core.CPUPlace() + ) + + # pd_op.one_hot: (2x3549x14xf32) <- (2x3549xi64, 1xi32) + one_hot_0 = paddle._C_ops.one_hot( + argmax_0 % paddle.cast(full_2, argmax_0.dtype), full_2 ) + del argmax_0, full_2 - # builtin.combine: ([2x8400x2xf32, 2x8400x2xf32]) <- (2x8400x2xf32, 2x8400x2xf32) - combine_0 = [add_0, add_1] - del add_0, add_1 + # pd_op.transpose: (2x14x3549xf32) <- (2x3549x14xf32) + transpose_0 = paddle._C_ops.transpose(one_hot_0, [0, 2, 1]) + del one_hot_0 - # pd_op.concat: (2x8400x4xf32) <- ([2x8400x2xf32, 2x8400x2xf32], 1xi32) - concat_0 = paddle._C_ops.concat(combine_0, full_2) - del combine_0 + # pd_op.where: (2x14x3549xf32) <- (2x14x3549xb, 2x14x3549xf32, 2x14x3549xf32) + where_0 = paddle._C_ops.where(tile_0, transpose_0, data_2) + del data_2, tile_0, transpose_0 - # pd_op.multiply: (2x8400x4xf32) <- (2x8400x4xf32, 8400x1xf32) - multiply_0 = paddle._C_ops.multiply(concat_0, data_2) - del concat_0, data_2 + # pd_op.full_int_array: (1xi64) <- () + full_int_array_2 = [-2] - # pd_op.full: (1xi32) <- () + # pd_op.sum: (2x3549xf32) <- (2x14x3549xf32, 1xi64) + sum_0 = paddle._C_ops.sum(where_0, full_int_array_2, None, False) + + # pd_op.argmax: (2x3549xi64) <- (2x14x3549xf32, 1xi64) + argmax_1 = paddle._C_ops.argmax(where_0, full_1, False, False, paddle.int64) + del full_1 + + # pd_op.full: (1xf32) <- () full_3 = paddle._C_ops.full( - [1], float("1"), paddle.int32, paddle.core.CPUPlace() + [1], float("14"), paddle.float32, paddle.core.CPUPlace() ) - # pd_op.split_with_num: ([2x1xf32, 2x1xf32]) <- (2x2xf32, 1xi32) - split_with_num_1 = paddle._C_ops.split_with_num(data_3, 2, full_3) + # pd_op.scale: (2x1xi32) <- (2x1xi32, 1xf32) + scale_0 = paddle._C_ops.scale(data_3, full_3, float("0"), True) del data_3, full_3 - # builtin.split: (2x1xf32, 2x1xf32) <- ([2x1xf32, 2x1xf32]) - ( - split_2, - split_3, - ) = split_with_num_1 - del split_with_num_1 + # pd_op.cast: (2x1xi64) <- (2x1xi32) + cast_0 = paddle._C_ops.cast(scale_0, paddle.int64) + del scale_0 + + # pd_op.add: (2x3549xi64) <- (2x3549xi64, 2x1xi64) + add_0 = paddle._C_ops.add(argmax_1, cast_0) + del argmax_1, cast_0 + + # pd_op.flatten: (28xi32) <- (2x14x1xi32) + flatten_0 = paddle._C_ops.flatten(data_4, 0, 2) + del data_4 + + # pd_op.flatten: (7098xi64) <- (2x3549xi64) + flatten_1 = paddle._C_ops.flatten(add_0, 0, 1) + del add_0 + + # pd_op.full: (1xi32) <- () + full_4 = paddle._C_ops.full( + [1], float("0"), paddle.int32, paddle.core.CPUPlace() + ) + + # pd_op.gather: (7098xi32) <- (28xi32, 7098xi64, 1xi32) + gather_0 = paddle._C_ops.gather(flatten_0, flatten_1, full_4) + del flatten_0 + + # pd_op.full_int_array: (2xi64) <- () + full_int_array_3 = [2, 3549] + + # pd_op.reshape: (2x3549xi32) <- (7098xi32, 2xi64) + reshape_1 = paddle._C_ops.reshape(gather_0, full_int_array_3) + del full_int_array_3, gather_0 + + # pd_op.full: (xf32) <- () + full_5 = paddle._C_ops.full( + [], float("0"), paddle.float32, paddle.framework._current_expected_place() + ) + + # pd_op.greater_than: (2x3549xb) <- (2x3549xf32, xf32) + greater_than_1 = paddle._C_ops.greater_than(sum_0, full_5) + del full_5, sum_0 + + # pd_op.full: (1xf32) <- () + full_6 = paddle._C_ops.full( + [1], float("4"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.full_like: (2x3549xi32) <- (2x3549xi32, 1xf32) + full_like_0 = paddle._C_ops.full_like( + reshape_1, full_6, paddle.int32, paddle.framework._current_expected_place() + ) + del full_6 - # builtin.combine: ([2x1xf32, 2x1xf32, 2x1xf32, 2x1xf32]) <- (2x1xf32, 2x1xf32, 2x1xf32, 2x1xf32) - combine_1 = [split_3, split_2, split_3, split_2] - del split_2, split_3 + # pd_op.where: (2x3549xi32) <- (2x3549xb, 2x3549xi32, 2x3549xi32) + where_1 = paddle._C_ops.where(greater_than_1, reshape_1, full_like_0) + del full_like_0, greater_than_1, reshape_1 - # pd_op.concat: (2x4xf32) <- ([2x1xf32, 2x1xf32, 2x1xf32, 2x1xf32], 1xi32) - concat_1 = paddle._C_ops.concat(combine_1, full_2) - del combine_1, full_2 + # pd_op.full_int_array: (2xi64) <- () + full_int_array_4 = [-1, 4] + + # pd_op.reshape: (28x4xf32) <- (2x14x4xf32, 2xi64) + reshape_2 = paddle._C_ops.reshape(data_5, full_int_array_4) + del data_5, full_int_array_4 + + # pd_op.gather: (7098x4xf32) <- (28x4xf32, 7098xi64, 1xi32) + gather_1 = paddle._C_ops.gather(reshape_2, flatten_1, full_4) + del flatten_1, full_4, reshape_2 # pd_op.full_int_array: (3xi64) <- () - full_int_array_0 = [-1, 1, 4] + full_int_array_5 = [2, 3549, 4] + + # pd_op.reshape: (2x3549x4xf32) <- (7098x4xf32, 3xi64) + reshape_0 = paddle._C_ops.reshape(gather_1, full_int_array_5) + del full_int_array_5, gather_1 + + # pd_op.full: (1xi32) <- () + full_7 = paddle._C_ops.full( + [1], float("5"), paddle.int32, paddle.core.CPUPlace() + ) + + # pd_op.one_hot: (2x3549x5xf32) <- (2x3549xi32, 1xi32) + one_hot_1 = paddle._C_ops.one_hot( + where_1 % paddle.cast(full_7, where_1.dtype), full_7 + ) + del full_7 + + # pd_op.full: (4xi64) <- () + full_8 = paddle._C_ops.full( + [4], float("0"), paddle.int64, paddle.framework._current_expected_place() + ) + + # pd_op.assign_value_: (4xi64) <- (4xi64) + assign_value__0 = paddle._C_ops.assign_value_( + full_8, + [4], + paddle.int64, + [float("0"), float("1"), float("2"), float("3")], + paddle.framework._current_expected_place(), + ) + del full_8 + + # pd_op.index_select: (2x3549x4xf32) <- (2x3549x5xf32, 4xi64) + index_select_0 = paddle._C_ops.index_select(one_hot_1, assign_value__0, -1) + del assign_value__0, one_hot_1 + + # pd_op.multiply: (2x14x3549xf32) <- (2x14x3549xf32, 2x14x3549xf32) + multiply_1 = paddle._C_ops.multiply(data_6, where_0) + del data_6 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_6 = [-1] + + # pd_op.max: (2x14x1xf32) <- (2x14x3549xf32, 1xi64) + max_0 = paddle._C_ops.max(multiply_1, full_int_array_6, True) + + # pd_op.multiply: (2x14x3549xf32) <- (2x14x3549xf32, 2x14x3549xf32) + multiply_2 = paddle._C_ops.multiply(data_1, where_0) + del data_1, where_0 + + # pd_op.max: (2x14x1xf32) <- (2x14x3549xf32, 1xi64) + max_1 = paddle._C_ops.max(multiply_2, full_int_array_6, True) + del multiply_2 + + # pd_op.full: (1xf32) <- () + full_9 = paddle._C_ops.full( + [1], float("1"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.scale: (2x14x1xf32) <- (2x14x1xf32, 1xf32) + scale_1 = paddle._C_ops.scale(max_0, full_9, float("1e-09"), True) + del full_9, max_0 + + # pd_op.divide: (2x14x3549xf32) <- (2x14x3549xf32, 2x14x1xf32) + divide_0 = paddle._C_ops.divide(multiply_1, scale_1) + del multiply_1, scale_1 + + # pd_op.multiply: (2x14x3549xf32) <- (2x14x3549xf32, 2x14x1xf32) + multiply_3 = paddle._C_ops.multiply(divide_0, max_1) + del divide_0, max_1 + + # pd_op.max: (2x3549xf32) <- (2x14x3549xf32, 1xi64) + max_2 = paddle._C_ops.max(multiply_3, full_int_array_2, False) + del full_int_array_2, multiply_3 - # pd_op.reshape: (2x1x4xf32) <- (2x4xf32, 3xi64) - reshape_0 = paddle._C_ops.reshape(concat_1, full_int_array_0) - del concat_1, full_int_array_0 + # pd_op.unsqueeze: (2x3549x1xf32) <- (2x3549xf32, 1xi64) + unsqueeze_1 = paddle._C_ops.unsqueeze(max_2, full_int_array_6) + del full_int_array_6, max_2 - # pd_op.divide: (2x8400x4xf32) <- (2x8400x4xf32, 2x1x4xf32) - divide_0 = paddle._C_ops.divide(multiply_0, reshape_0) - del multiply_0, reshape_0 + # pd_op.multiply: (2x3549x4xf32) <- (2x3549x4xf32, 2x3549x1xf32) + multiply_0 = paddle._C_ops.multiply(index_select_0, unsqueeze_1) + del index_select_0, unsqueeze_1, where_1 - return divide_0 + return reshape_0, multiply_0 diff --git a/paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_4/graph_hash.txt b/paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_4/graph_hash.txt index 4d04a175d..18426c718 100644 --- a/paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_4/graph_hash.txt +++ b/paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_4/graph_hash.txt @@ -1 +1 @@ -6f0f40cbf909627fa867337174f532d3e179ced4784c7fc2c9cb00ae6193ac2e \ No newline at end of file +c4af36497f7852167288dc3ac1e4b55956d1b6c42ca46e70cd27bb1ccc05b8bd \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_4/input_meta.py b/paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_4/input_meta.py index aa6620489..5a54b3b62 100644 --- a/paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_4/input_meta.py +++ b/paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_4/input_meta.py @@ -1,59 +1,83 @@ class Program_weight_tensor_data_0: name = "data_0" - shape = [2, 1, 7581] - dtype = "float32" - max_val = float("1.0") - mean = float("0.00171481") - std = float("0.0413748") - data = None + shape = [] + dtype = "int64" + data = [12] class Program_weight_tensor_data_1: name = "data_1" - shape = [2, 1] - dtype = "int32" - data = [0, 1] + shape = [] + dtype = "int64" + data = [10164] class Program_weight_tensor_data_2: name = "data_2" - shape = [2, 1, 1] - dtype = "int32" - data = [0, 3] + shape = [] + dtype = "int64" + data = [12] class Program_weight_tensor_data_3: name = "data_3" - shape = [2, 7581] + shape = [2, 10164] dtype = "float32" - max_val = float("1.0") - mean = float("0.00171481") - std = float("0.0413748") + max_val = float("2.0") + mean = float("0.00491932") + std = float("0.0706648") data = None class Program_weight_tensor_data_4: name = "data_4" - shape = [2, 1, 4] + shape = [2, 12, 10164] dtype = "float32" - data = [376.443, 61.9806, 517.447, 398.447, 562.465, 468.683, 608.0, 608.0] + max_val = float("0.964484") + mean = float("0.00145547") + std = float("0.0263451") + data = None class Program_weight_tensor_data_5: name = "data_5" - shape = [2, 1, 7581] + shape = [2, 12, 10164] dtype = "float32" - max_val = float("0.00652957") - mean = float("2.09634e-05") - std = float("0.000282651") + max_val = float("1.0") + mean = float("0.000409944") + std = float("0.0202429") data = None class Program_weight_tensor_data_6: name = "data_6" - shape = [2, 1, 7581] + shape = [2, 1] + dtype = "int32" + data = [0, 1] + + +class Program_weight_tensor_data_7: + name = "data_7" + shape = [2, 12, 1] + dtype = "int32" + data = [0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0] + + +class Program_weight_tensor_data_8: + name = "data_8" + shape = [2, 12, 4] + dtype = "float32" + max_val = float("658.644") + mean = float("251.921") + std = float("253.674") + data = None + + +class Program_weight_tensor_data_9: + name = "data_9" + shape = [2, 12, 10164] dtype = "float32" - max_val = float("0.931424") - mean = float("0.0255599") - std = float("0.091854") + max_val = float("0.00990145") + mean = float("2.84369e-06") + std = float("0.000124311") data = None diff --git a/paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_4/model.py b/paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_4/model.py index 80304c15d..d3b764f89 100644 --- a/paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_4/model.py +++ b/paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_4/model.py @@ -5,172 +5,260 @@ class GraphModule(paddle.nn.Layer): def __init__(self): super().__init__() - def forward(self, data_0, data_1, data_2, data_3, data_4, data_5, data_6): - # pd_op.full: (1xi64) <- () + def forward( + self, + data_0, + data_1, + data_2, + data_3, + data_4, + data_5, + data_6, + data_7, + data_8, + data_9, + ): + # pd_op.full_int_array: (1xi64) <- () + full_int_array_0 = [1] + + # pd_op.unsqueeze: (2x1x-1xf32) <- (2x-1xf32, 1xi64) + unsqueeze_0 = paddle._C_ops.unsqueeze(data_3, full_int_array_0) + del data_3, full_int_array_0 + + # pd_op.full: (xf32) <- () full_0 = paddle._C_ops.full( - [1], float("-2"), paddle.int64, paddle.core.CPUPlace() + [], float("1"), paddle.float32, paddle.framework._current_expected_place() ) - # pd_op.argmax: (2x7581xi64) <- (2x1x7581xf32, 1xi64) - argmax_0 = paddle._C_ops.argmax(data_0, full_0, False, False, paddle.int64) - del full_0 + # pd_op.greater_than: (2x1x-1xb) <- (2x1x-1xf32, xf32) + greater_than_0 = paddle._C_ops.greater_than(unsqueeze_0, full_0) + del full_0, unsqueeze_0 - # pd_op.full: (1xf32) <- () + # pd_op.full: (xi64) <- () full_1 = paddle._C_ops.full( - [1], float("1"), paddle.float32, paddle.core.CPUPlace() + [], float("1"), paddle.int64, paddle.core.CPUPlace() + ) + + # builtin.combine: ([xi64, xi64, xi64]) <- (xi64, xi64, xi64) + combine_0 = [full_1, data_0, full_1] + del full_1 + + # pd_op.stack: (3xi64) <- ([xi64, xi64, xi64]) + stack_0 = paddle._C_ops.stack(combine_0, 0) + del combine_0 + + # pd_op.tile: (2x-1x-1xb) <- (2x1x-1xb, 3xi64) + tile_0 = paddle._C_ops.tile(greater_than_0, stack_0) + del greater_than_0, stack_0 + + # pd_op.full: (1xi64) <- () + full_2 = paddle._C_ops.full( + [1], float("-2"), paddle.int64, paddle.core.CPUPlace() + ) + + # pd_op.argmax: (2x-1xi64) <- (2x-1x-1xf32, 1xi64) + argmax_0 = paddle._C_ops.argmax(data_4, full_2, False, False, paddle.int64) + + # pd_op.one_hot: (2x-1x-1xf32) <- (2x-1xi64, xi64) + one_hot_0 = paddle._C_ops.one_hot( + argmax_0 % paddle.cast(data_2, argmax_0.dtype), data_2 ) + del argmax_0, data_2 + + # pd_op.transpose: (2x-1x-1xf32) <- (2x-1x-1xf32) + transpose_0 = paddle._C_ops.transpose(one_hot_0, [0, 2, 1]) + del one_hot_0 + + # pd_op.where: (2x-1x-1xf32) <- (2x-1x-1xb, 2x-1x-1xf32, 2x-1x-1xf32) + where_0 = paddle._C_ops.where(tile_0, transpose_0, data_5) + del data_5, tile_0, transpose_0 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_1 = [-2] + + # pd_op.sum: (2x-1xf32) <- (2x-1x-1xf32, 1xi64) + sum_0 = paddle._C_ops.sum(where_0, full_int_array_1, None, False) + + # pd_op.argmax: (2x-1xi64) <- (2x-1x-1xf32, 1xi64) + argmax_1 = paddle._C_ops.argmax(where_0, full_2, False, False, paddle.int64) + del full_2 - # pd_op.scale: (2x1xi32) <- (2x1xi32, 1xf32) - scale_0 = paddle._C_ops.scale(data_1, full_1, float("0"), True) - del data_1 + # pd_op.cast: (xi32) <- (xi64) + cast_0 = paddle._C_ops.cast(data_0, paddle.int32) + del data_0 + + # pd_op.multiply: (2x1xi32) <- (2x1xi32, xi32) + multiply_1 = paddle._C_ops.multiply(data_6, cast_0) + del cast_0, data_6 # pd_op.cast: (2x1xi64) <- (2x1xi32) - cast_0 = paddle._C_ops.cast(scale_0, paddle.int64) - del scale_0 + cast_1 = paddle._C_ops.cast(multiply_1, paddle.int64) + del multiply_1 - # pd_op.add: (2x7581xi64) <- (2x7581xi64, 2x1xi64) - add_0 = paddle._C_ops.add(argmax_0, cast_0) - del argmax_0, cast_0 + # pd_op.add: (2x-1xi64) <- (2x-1xi64, 2x1xi64) + add_0 = paddle._C_ops.add(argmax_1, cast_1) + del argmax_1, cast_1 - # pd_op.flatten: (2xi32) <- (2x1x1xi32) - flatten_0 = paddle._C_ops.flatten(data_2, 0, 2) - del data_2 + # pd_op.flatten: (-1xi32) <- (2x-1x1xi32) + flatten_0 = paddle._C_ops.flatten(data_7, 0, 2) + del data_7 - # pd_op.flatten: (15162xi64) <- (2x7581xi64) + # pd_op.flatten: (-1xi64) <- (2x-1xi64) flatten_1 = paddle._C_ops.flatten(add_0, 0, 1) del add_0 # pd_op.full: (1xi32) <- () - full_2 = paddle._C_ops.full( + full_3 = paddle._C_ops.full( [1], float("0"), paddle.int32, paddle.core.CPUPlace() ) - # pd_op.gather: (15162xi32) <- (2xi32, 15162xi64, 1xi32) - gather_0 = paddle._C_ops.gather(flatten_0, flatten_1, full_2) + # pd_op.gather: (-1xi32) <- (-1xi32, -1xi64, 1xi32) + gather_0 = paddle._C_ops.gather(flatten_0, flatten_1, full_3) del flatten_0 - # pd_op.full_int_array: (2xi64) <- () - full_int_array_0 = [2, 7581] + # pd_op.full: (xi64) <- () + full_4 = paddle._C_ops.full( + [], float("2"), paddle.int64, paddle.core.CPUPlace() + ) + + # builtin.combine: ([xi64, xi64]) <- (xi64, xi64) + combine_1 = [full_4, data_1] - # pd_op.reshape: (2x7581xi32) <- (15162xi32, 2xi64) - reshape_1 = paddle._C_ops.reshape(gather_0, full_int_array_0) - del full_int_array_0, gather_0 + # pd_op.stack: (2xi64) <- ([xi64, xi64]) + stack_1 = paddle._C_ops.stack(combine_1, 0) + del combine_1 + + # pd_op.reshape: (2x-1xi32) <- (-1xi32, 2xi64) + reshape_1 = paddle._C_ops.reshape(gather_0, stack_1) + del gather_0, stack_1 # pd_op.full: (xf32) <- () - full_3 = paddle._C_ops.full( + full_5 = paddle._C_ops.full( [], float("0"), paddle.float32, paddle.framework._current_expected_place() ) - # pd_op.greater_than: (2x7581xb) <- (2x7581xf32, xf32) - greater_than_0 = paddle._C_ops.greater_than(data_3, full_3) - del data_3, full_3 + # pd_op.greater_than: (2x-1xb) <- (2x-1xf32, xf32) + greater_than_1 = paddle._C_ops.greater_than(sum_0, full_5) + del full_5, sum_0 # pd_op.full: (1xf32) <- () - full_4 = paddle._C_ops.full( + full_6 = paddle._C_ops.full( [1], float("4"), paddle.float32, paddle.core.CPUPlace() ) - # pd_op.full_like: (2x7581xi32) <- (2x7581xi32, 1xf32) + # pd_op.full_like: (2x-1xi32) <- (2x-1xi32, 1xf32) full_like_0 = paddle._C_ops.full_like( - reshape_1, full_4, paddle.int32, paddle.framework._current_expected_place() + reshape_1, full_6, paddle.int32, paddle.framework._current_expected_place() ) - del full_4 + del full_6 - # pd_op.where: (2x7581xi32) <- (2x7581xb, 2x7581xi32, 2x7581xi32) - where_0 = paddle._C_ops.where(greater_than_0, reshape_1, full_like_0) - del full_like_0, greater_than_0, reshape_1 + # pd_op.where: (2x-1xi32) <- (2x-1xb, 2x-1xi32, 2x-1xi32) + where_1 = paddle._C_ops.where(greater_than_1, reshape_1, full_like_0) + del full_like_0, greater_than_1, reshape_1 # pd_op.full_int_array: (2xi64) <- () - full_int_array_1 = [-1, 4] + full_int_array_2 = [-1, 4] - # pd_op.reshape: (2x4xf32) <- (2x1x4xf32, 2xi64) - reshape_2 = paddle._C_ops.reshape(data_4, full_int_array_1) - del data_4, full_int_array_1 + # pd_op.reshape: (-1x4xf32) <- (2x-1x4xf32, 2xi64) + reshape_2 = paddle._C_ops.reshape(data_8, full_int_array_2) + del data_8, full_int_array_2 - # pd_op.gather: (15162x4xf32) <- (2x4xf32, 15162xi64, 1xi32) - gather_1 = paddle._C_ops.gather(reshape_2, flatten_1, full_2) - del flatten_1, full_2, reshape_2 + # pd_op.gather: (-1x4xf32) <- (-1x4xf32, -1xi64, 1xi32) + gather_1 = paddle._C_ops.gather(reshape_2, flatten_1, full_3) + del flatten_1, full_3, reshape_2 - # pd_op.full_int_array: (3xi64) <- () - full_int_array_2 = [2, 7581, 4] + # pd_op.full: (xi64) <- () + full_7 = paddle._C_ops.full( + [], float("4"), paddle.int64, paddle.core.CPUPlace() + ) + + # builtin.combine: ([xi64, xi64, xi64]) <- (xi64, xi64, xi64) + combine_2 = [full_4, data_1, full_7] + del data_1, full_4, full_7 - # pd_op.reshape: (2x7581x4xf32) <- (15162x4xf32, 3xi64) - reshape_0 = paddle._C_ops.reshape(gather_1, full_int_array_2) - del full_int_array_2, gather_1 + # pd_op.stack: (3xi64) <- ([xi64, xi64, xi64]) + stack_2 = paddle._C_ops.stack(combine_2, 0) + del combine_2 + + # pd_op.reshape: (2x-1x4xf32) <- (-1x4xf32, 3xi64) + reshape_0 = paddle._C_ops.reshape(gather_1, stack_2) + del gather_1, stack_2 # pd_op.full: (1xi32) <- () - full_5 = paddle._C_ops.full( + full_8 = paddle._C_ops.full( [1], float("5"), paddle.int32, paddle.core.CPUPlace() ) - # pd_op.one_hot: (2x7581x5xf32) <- (2x7581xi32, 1xi32) - one_hot_0 = paddle._C_ops.one_hot( - where_0 % paddle.cast(full_5, where_0.dtype), full_5 + # pd_op.one_hot: (2x-1x5xf32) <- (2x-1xi32, 1xi32) + one_hot_1 = paddle._C_ops.one_hot( + where_1 % paddle.cast(full_8, where_1.dtype), full_8 ) - del full_5 + del full_8 # pd_op.full: (4xi64) <- () - full_6 = paddle._C_ops.full( + full_9 = paddle._C_ops.full( [4], float("0"), paddle.int64, paddle.framework._current_expected_place() ) # pd_op.assign_value_: (4xi64) <- (4xi64) assign_value__0 = paddle._C_ops.assign_value_( - full_6, + full_9, [4], paddle.int64, [float("0"), float("1"), float("2"), float("3")], paddle.framework._current_expected_place(), ) - del full_6 + del full_9 - # pd_op.index_select: (2x7581x4xf32) <- (2x7581x5xf32, 4xi64) - index_select_0 = paddle._C_ops.index_select(one_hot_0, assign_value__0, -1) - del assign_value__0, one_hot_0 + # pd_op.index_select: (2x-1x4xf32) <- (2x-1x5xf32, 4xi64) + index_select_0 = paddle._C_ops.index_select(one_hot_1, assign_value__0, -1) + del assign_value__0, one_hot_1 - # pd_op.multiply: (2x1x7581xf32) <- (2x1x7581xf32, 2x1x7581xf32) - multiply_1 = paddle._C_ops.multiply(data_5, data_0) - del data_5 + # pd_op.multiply: (2x-1x-1xf32) <- (2x-1x-1xf32, 2x-1x-1xf32) + multiply_2 = paddle._C_ops.multiply(data_9, where_0) + del data_9 # pd_op.full_int_array: (1xi64) <- () full_int_array_3 = [-1] - # pd_op.max: (2x1x1xf32) <- (2x1x7581xf32, 1xi64) - max_0 = paddle._C_ops.max(multiply_1, full_int_array_3, True) + # pd_op.max: (2x-1x1xf32) <- (2x-1x-1xf32, 1xi64) + max_0 = paddle._C_ops.max(multiply_2, full_int_array_3, True) - # pd_op.multiply: (2x1x7581xf32) <- (2x1x7581xf32, 2x1x7581xf32) - multiply_2 = paddle._C_ops.multiply(data_6, data_0) - del data_0, data_6 + # pd_op.multiply: (2x-1x-1xf32) <- (2x-1x-1xf32, 2x-1x-1xf32) + multiply_3 = paddle._C_ops.multiply(data_4, where_0) + del data_4, where_0 - # pd_op.max: (2x1x1xf32) <- (2x1x7581xf32, 1xi64) - max_1 = paddle._C_ops.max(multiply_2, full_int_array_3, True) - del multiply_2 + # pd_op.max: (2x-1x1xf32) <- (2x-1x-1xf32, 1xi64) + max_1 = paddle._C_ops.max(multiply_3, full_int_array_3, True) + del multiply_3 - # pd_op.scale: (2x1x1xf32) <- (2x1x1xf32, 1xf32) - scale_1 = paddle._C_ops.scale(max_0, full_1, float("1e-09"), True) - del full_1, max_0 + # pd_op.full: (1xf32) <- () + full_10 = paddle._C_ops.full( + [1], float("1"), paddle.float32, paddle.core.CPUPlace() + ) - # pd_op.divide: (2x1x7581xf32) <- (2x1x7581xf32, 2x1x1xf32) - divide_0 = paddle._C_ops.divide(multiply_1, scale_1) - del multiply_1, scale_1 + # pd_op.scale: (2x-1x1xf32) <- (2x-1x1xf32, 1xf32) + scale_0 = paddle._C_ops.scale(max_0, full_10, float("1e-09"), True) + del full_10, max_0 - # pd_op.multiply: (2x1x7581xf32) <- (2x1x7581xf32, 2x1x1xf32) - multiply_3 = paddle._C_ops.multiply(divide_0, max_1) - del divide_0, max_1 + # pd_op.divide: (2x-1x-1xf32) <- (2x-1x-1xf32, 2x-1x1xf32) + divide_0 = paddle._C_ops.divide(multiply_2, scale_0) + del multiply_2, scale_0 - # pd_op.full_int_array: (1xi64) <- () - full_int_array_4 = [-2] + # pd_op.multiply: (2x-1x-1xf32) <- (2x-1x-1xf32, 2x-1x1xf32) + multiply_4 = paddle._C_ops.multiply(divide_0, max_1) + del divide_0, max_1 - # pd_op.max: (2x7581xf32) <- (2x1x7581xf32, 1xi64) - max_2 = paddle._C_ops.max(multiply_3, full_int_array_4, False) - del full_int_array_4, multiply_3 + # pd_op.max: (2x-1xf32) <- (2x-1x-1xf32, 1xi64) + max_2 = paddle._C_ops.max(multiply_4, full_int_array_1, False) + del full_int_array_1, multiply_4 - # pd_op.unsqueeze: (2x7581x1xf32) <- (2x7581xf32, 1xi64) - unsqueeze_0 = paddle._C_ops.unsqueeze(max_2, full_int_array_3) + # pd_op.unsqueeze: (2x-1x1xf32) <- (2x-1xf32, 1xi64) + unsqueeze_1 = paddle._C_ops.unsqueeze(max_2, full_int_array_3) del full_int_array_3, max_2 - # pd_op.multiply: (2x7581x4xf32) <- (2x7581x4xf32, 2x7581x1xf32) - multiply_0 = paddle._C_ops.multiply(index_select_0, unsqueeze_0) - del index_select_0, unsqueeze_0, where_0 + # pd_op.multiply: (2x-1x4xf32) <- (2x-1x4xf32, 2x-1x1xf32) + multiply_0 = paddle._C_ops.multiply(index_select_0, unsqueeze_1) + del index_select_0, unsqueeze_1, where_1 return reshape_0, multiply_0 diff --git a/paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_5/graph_hash.txt b/paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_5/graph_hash.txt index 315669793..6ac9d23ff 100644 --- a/paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_5/graph_hash.txt +++ b/paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_5/graph_hash.txt @@ -1 +1 @@ -772280e4e0e6df48fdaa1b3bea8b0dd5cbaf7dad6c90d940fb9d38ade5d09fa6 \ No newline at end of file +4386995a6a00133c7db276591465ce4ec1f82a0512fc13aeb9bb4745fafa593c \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_5/input_meta.py b/paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_5/input_meta.py index 903e9a326..2525ffce1 100644 --- a/paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_5/input_meta.py +++ b/paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_5/input_meta.py @@ -1,9 +1,52 @@ class Program_weight_tensor_data_0: name = "data_0" - shape = [2, 3, 608, 608] + shape = [2, 7581, 4] dtype = "float32" - min_val = float("-1.85379") - max_val = float("2.55285") - mean = float("0.163596") - std = float("0.495929") + min_val = float("0.01") + max_val = float("0.01") + mean = float("0.01") + std = float("2.79397e-09") data = None + + +class Program_weight_tensor_data_1: + name = "data_1" + shape = [2, 7581, 4] + dtype = "float32" + min_val = float("-231.068") + max_val = float("849.397") + mean = float("303.519") + std = float("185.947") + data = None + + +class Program_weight_tensor_data_2: + name = "data_2" + shape = [7581, 2] + dtype = "float32" + min_val = float("4.0") + max_val = float("604.0") + mean = float("304.0") + std = float("175.48") + data = None + + +class Program_weight_tensor_data_3: + name = "data_3" + shape = [2, 1, 1] + dtype = "int32" + data = [0, 3] + + +class Program_weight_tensor_data_4: + name = "data_4" + shape = [2, 1, 4] + dtype = "float32" + data = [376.443, 61.9806, 517.447, 398.447, 562.465, 468.683, 608.0, 608.0] + + +class Program_weight_tensor_data_5: + name = "data_5" + shape = [2, 1, 1] + dtype = "float32" + data = [1.0, 1.0] diff --git a/paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_5/model.py b/paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_5/model.py index 70cc252dc..950175fb3 100644 --- a/paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_5/model.py +++ b/paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_5/model.py @@ -5,4044 +5,334 @@ class GraphModule(paddle.nn.Layer): def __init__(self): super().__init__() - def forward( - self, - parameter_0, - parameter_1, - parameter_2, - parameter_3, - parameter_4, - parameter_5, - parameter_6, - parameter_7, - parameter_8, - parameter_9, - parameter_10, - parameter_11, - parameter_12, - parameter_13, - parameter_14, - parameter_15, - parameter_16, - parameter_17, - parameter_18, - parameter_19, - parameter_20, - parameter_21, - parameter_22, - parameter_23, - parameter_24, - parameter_25, - parameter_26, - parameter_27, - parameter_28, - parameter_29, - parameter_30, - parameter_31, - parameter_32, - parameter_33, - parameter_34, - parameter_35, - parameter_36, - parameter_37, - parameter_38, - parameter_39, - parameter_40, - parameter_41, - parameter_42, - parameter_43, - parameter_44, - parameter_45, - parameter_46, - parameter_47, - parameter_48, - parameter_49, - parameter_50, - parameter_51, - parameter_52, - parameter_53, - parameter_54, - parameter_55, - parameter_56, - parameter_57, - parameter_58, - parameter_59, - parameter_60, - parameter_61, - parameter_62, - parameter_63, - parameter_64, - parameter_65, - parameter_66, - parameter_67, - parameter_68, - parameter_69, - parameter_70, - parameter_71, - parameter_72, - parameter_73, - parameter_74, - parameter_75, - parameter_76, - parameter_77, - parameter_78, - parameter_79, - parameter_80, - parameter_81, - parameter_82, - parameter_83, - parameter_84, - parameter_85, - parameter_86, - parameter_87, - parameter_88, - parameter_89, - parameter_90, - parameter_91, - parameter_92, - parameter_93, - parameter_94, - parameter_95, - parameter_96, - parameter_97, - parameter_98, - parameter_99, - parameter_100, - parameter_101, - parameter_102, - parameter_103, - parameter_104, - parameter_105, - parameter_106, - parameter_107, - parameter_108, - parameter_109, - parameter_110, - parameter_111, - parameter_112, - parameter_113, - parameter_114, - parameter_115, - parameter_116, - parameter_117, - parameter_118, - parameter_119, - parameter_120, - parameter_121, - parameter_122, - parameter_123, - parameter_124, - parameter_125, - parameter_126, - parameter_127, - parameter_128, - parameter_129, - parameter_130, - parameter_131, - parameter_132, - parameter_133, - parameter_134, - parameter_135, - parameter_136, - parameter_137, - parameter_138, - parameter_139, - parameter_140, - parameter_141, - parameter_142, - parameter_143, - parameter_144, - parameter_145, - parameter_146, - parameter_147, - parameter_148, - parameter_149, - parameter_150, - parameter_151, - parameter_152, - parameter_153, - parameter_154, - parameter_155, - parameter_156, - parameter_157, - parameter_158, - parameter_159, - parameter_160, - parameter_161, - parameter_162, - parameter_163, - parameter_164, - parameter_165, - parameter_166, - parameter_167, - parameter_168, - parameter_169, - parameter_170, - parameter_171, - parameter_172, - parameter_173, - parameter_174, - parameter_175, - parameter_176, - parameter_177, - parameter_178, - parameter_179, - parameter_180, - parameter_181, - parameter_182, - parameter_183, - parameter_184, - parameter_185, - parameter_186, - parameter_187, - parameter_188, - parameter_189, - parameter_190, - parameter_191, - parameter_192, - parameter_193, - parameter_194, - parameter_195, - parameter_196, - parameter_197, - parameter_198, - parameter_199, - parameter_200, - parameter_201, - parameter_202, - parameter_203, - parameter_204, - parameter_205, - parameter_206, - parameter_207, - parameter_208, - parameter_209, - parameter_210, - parameter_211, - parameter_212, - parameter_213, - parameter_214, - parameter_215, - parameter_216, - parameter_217, - parameter_218, - parameter_219, - parameter_220, - parameter_221, - parameter_222, - parameter_223, - parameter_224, - parameter_225, - parameter_226, - parameter_227, - parameter_228, - parameter_229, - parameter_230, - parameter_231, - parameter_232, - parameter_233, - parameter_234, - parameter_235, - parameter_236, - parameter_237, - parameter_238, - parameter_239, - parameter_240, - parameter_241, - parameter_242, - parameter_243, - parameter_244, - parameter_245, - parameter_246, - parameter_247, - parameter_248, - parameter_249, - parameter_250, - parameter_251, - parameter_252, - parameter_253, - parameter_254, - parameter_255, - parameter_256, - parameter_257, - parameter_258, - parameter_259, - parameter_260, - parameter_261, - parameter_262, - parameter_263, - parameter_264, - parameter_265, - parameter_266, - parameter_267, - parameter_268, - parameter_269, - parameter_270, - parameter_271, - parameter_272, - parameter_273, - parameter_274, - parameter_275, - parameter_276, - parameter_277, - parameter_278, - parameter_279, - parameter_280, - parameter_281, - parameter_282, - parameter_283, - parameter_284, - parameter_285, - parameter_286, - parameter_287, - parameter_288, - parameter_289, - parameter_290, - parameter_291, - parameter_292, - parameter_293, - parameter_294, - parameter_295, - parameter_296, - parameter_297, - parameter_298, - parameter_299, - parameter_300, - parameter_301, - parameter_302, - parameter_303, - parameter_304, - parameter_305, - parameter_306, - parameter_307, - parameter_308, - parameter_309, - parameter_310, - parameter_311, - parameter_312, - parameter_313, - parameter_314, - parameter_315, - parameter_316, - parameter_317, - parameter_318, - parameter_319, - parameter_320, - parameter_321, - parameter_322, - parameter_323, - parameter_324, - parameter_325, - parameter_326, - parameter_327, - parameter_328, - parameter_329, - parameter_330, - parameter_331, - parameter_332, - parameter_333, - parameter_334, - parameter_335, - parameter_336, - parameter_337, - parameter_338, - parameter_339, - parameter_340, - parameter_341, - parameter_342, - parameter_343, - parameter_344, - parameter_345, - parameter_346, - parameter_347, - parameter_348, - parameter_349, - parameter_350, - parameter_351, - parameter_352, - parameter_353, - parameter_354, - parameter_355, - parameter_356, - parameter_357, - parameter_358, - parameter_359, - parameter_360, - parameter_361, - parameter_362, - parameter_363, - parameter_364, - parameter_365, - parameter_366, - parameter_367, - data_0, - ): - # pd_op.conv2d: (2x16x304x304xf32) <- (2x3x608x608xf32, 16x3x3x3xf32) - conv2d_0 = paddle._C_ops.conv2d( - data_0, parameter_367, [2, 2], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del data_0, parameter_367 - - # pd_op.batch_norm_: (2x16x304x304xf32, 16xf32, 16xf32, 16xf32, 16xf32, -1xui8) <- (2x16x304x304xf32, 16xf32, 16xf32, 16xf32, 16xf32) - ( - batch_norm__0, - batch_norm__1, - batch_norm__2, - batch_norm__3, - batch_norm__4, - batch_norm__5, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_0, - parameter_366, - parameter_365, - parameter_364, - parameter_363, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_363, parameter_364, parameter_365, parameter_366 - - # pd_op.swish: (2x16x304x304xf32) <- (2x16x304x304xf32) - swish_1 = paddle._C_ops.swish(batch_norm__0) - - # pd_op.conv2d: (2x16x304x304xf32) <- (2x16x304x304xf32, 16x16x3x3xf32) - conv2d_1 = paddle._C_ops.conv2d( - swish_1, parameter_362, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_362 - - # pd_op.batch_norm_: (2x16x304x304xf32, 16xf32, 16xf32, 16xf32, 16xf32, -1xui8) <- (2x16x304x304xf32, 16xf32, 16xf32, 16xf32, 16xf32) - ( - batch_norm__6, - batch_norm__7, - batch_norm__8, - batch_norm__9, - batch_norm__10, - batch_norm__11, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_1, - parameter_361, - parameter_360, - parameter_359, - parameter_358, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_358, parameter_359, parameter_360, parameter_361 - - # pd_op.swish: (2x16x304x304xf32) <- (2x16x304x304xf32) - swish_2 = paddle._C_ops.swish(batch_norm__6) - - # pd_op.conv2d: (2x32x304x304xf32) <- (2x16x304x304xf32, 32x16x3x3xf32) - conv2d_2 = paddle._C_ops.conv2d( - swish_2, parameter_357, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_357 - - # pd_op.batch_norm_: (2x32x304x304xf32, 32xf32, 32xf32, 32xf32, 32xf32, -1xui8) <- (2x32x304x304xf32, 32xf32, 32xf32, 32xf32, 32xf32) - ( - batch_norm__12, - batch_norm__13, - batch_norm__14, - batch_norm__15, - batch_norm__16, - batch_norm__17, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_2, - parameter_356, - parameter_355, - parameter_354, - parameter_353, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_353, parameter_354, parameter_355, parameter_356 + def forward(self, data_0, data_1, data_2, data_3, data_4, data_5): + # pd_op.full_int_array: (1xi64) <- () + full_int_array_0 = [2] - # pd_op.swish: (2x32x304x304xf32) <- (2x32x304x304xf32) - swish_3 = paddle._C_ops.swish(batch_norm__12) + # pd_op.unsqueeze: (2x1x1x4xf32) <- (2x1x4xf32, 1xi64) + unsqueeze_0 = paddle._C_ops.unsqueeze(data_4, full_int_array_0) + del data_4 - # pd_op.conv2d: (2x48x152x152xf32) <- (2x32x304x304xf32, 48x32x3x3xf32) - conv2d_3 = paddle._C_ops.conv2d( - swish_3, parameter_352, [2, 2], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_352 + # pd_op.full_int_array: (1xi64) <- () + full_int_array_1 = [1] - # pd_op.batch_norm_: (2x48x152x152xf32, 48xf32, 48xf32, 48xf32, 48xf32, -1xui8) <- (2x48x152x152xf32, 48xf32, 48xf32, 48xf32, 48xf32) - ( - batch_norm__18, - batch_norm__19, - batch_norm__20, - batch_norm__21, - batch_norm__22, - batch_norm__23, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_3, - parameter_351, - parameter_350, - parameter_349, - parameter_348, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_348, parameter_349, parameter_350, parameter_351 - - # pd_op.swish: (2x48x152x152xf32) <- (2x48x152x152xf32) - swish_4 = paddle._C_ops.swish(batch_norm__18) + # pd_op.unsqueeze: (2x1x7581x4xf32) <- (2x7581x4xf32, 1xi64) + unsqueeze_1 = paddle._C_ops.unsqueeze(data_1, full_int_array_1) + del data_1, full_int_array_1 - # pd_op.conv2d: (2x24x152x152xf32) <- (2x48x152x152xf32, 24x48x1x1xf32) - conv2d_4 = paddle._C_ops.conv2d( - swish_4, parameter_347, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_347 + # pd_op.full_int_array: (1xi64) <- () + full_int_array_2 = [0] - # pd_op.batch_norm_: (2x24x152x152xf32, 24xf32, 24xf32, 24xf32, 24xf32, -1xui8) <- (2x24x152x152xf32, 24xf32, 24xf32, 24xf32, 24xf32) - ( - batch_norm__24, - batch_norm__25, - batch_norm__26, - batch_norm__27, - batch_norm__28, - batch_norm__29, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_4, - parameter_346, - parameter_345, - parameter_344, - parameter_343, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), + # pd_op.slice: (2x1x1x2xf32) <- (2x1x1x4xf32, 1xi64, 1xi64) + slice_0 = paddle._C_ops.slice( + unsqueeze_0, [3], full_int_array_2, full_int_array_0, [1], [] ) - del parameter_343, parameter_344, parameter_345, parameter_346 - # pd_op.swish: (2x24x152x152xf32) <- (2x24x152x152xf32) - swish_5 = paddle._C_ops.swish(batch_norm__24) + # pd_op.full_int_array: (1xi64) <- () + full_int_array_3 = [2147483647] - # pd_op.conv2d: (2x24x152x152xf32) <- (2x48x152x152xf32, 24x48x1x1xf32) - conv2d_5 = paddle._C_ops.conv2d( - swish_4, parameter_342, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + # pd_op.slice: (2x1x1x2xf32) <- (2x1x1x4xf32, 1xi64, 1xi64) + slice_1 = paddle._C_ops.slice( + unsqueeze_0, [3], full_int_array_0, full_int_array_3, [1], [] ) - del parameter_342 - # pd_op.batch_norm_: (2x24x152x152xf32, 24xf32, 24xf32, 24xf32, 24xf32, -1xui8) <- (2x24x152x152xf32, 24xf32, 24xf32, 24xf32, 24xf32) - ( - batch_norm__30, - batch_norm__31, - batch_norm__32, - batch_norm__33, - batch_norm__34, - batch_norm__35, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_5, - parameter_341, - parameter_340, - parameter_339, - parameter_338, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), + # pd_op.slice: (2x1x7581x2xf32) <- (2x1x7581x4xf32, 1xi64, 1xi64) + slice_2 = paddle._C_ops.slice( + unsqueeze_1, [3], full_int_array_2, full_int_array_0, [1], [] ) - del parameter_338, parameter_339, parameter_340, parameter_341 + del full_int_array_2 - # pd_op.swish: (2x24x152x152xf32) <- (2x24x152x152xf32) - swish_6 = paddle._C_ops.swish(batch_norm__30) - - # pd_op.conv2d: (2x24x152x152xf32) <- (2x24x152x152xf32, 24x24x3x3xf32) - conv2d_6 = paddle._C_ops.conv2d( - swish_6, parameter_337, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + # pd_op.slice: (2x1x7581x2xf32) <- (2x1x7581x4xf32, 1xi64, 1xi64) + slice_3 = paddle._C_ops.slice( + unsqueeze_1, [3], full_int_array_0, full_int_array_3, [1], [] ) - del parameter_337 + del full_int_array_0, full_int_array_3, unsqueeze_1 - # pd_op.batch_norm_: (2x24x152x152xf32, 24xf32, 24xf32, 24xf32, 24xf32, -1xui8) <- (2x24x152x152xf32, 24xf32, 24xf32, 24xf32, 24xf32) - ( - batch_norm__36, - batch_norm__37, - batch_norm__38, - batch_norm__39, - batch_norm__40, - batch_norm__41, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_6, - parameter_336, - parameter_335, - parameter_334, - parameter_333, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_333, parameter_334, parameter_335, parameter_336 + # pd_op.maximum: (2x1x7581x2xf32) <- (2x1x1x2xf32, 2x1x7581x2xf32) + maximum_0 = paddle._C_ops.maximum(slice_0, slice_2) - # pd_op.swish: (2x24x152x152xf32) <- (2x24x152x152xf32) - swish_7 = paddle._C_ops.swish(batch_norm__36) + # pd_op.minimum: (2x1x7581x2xf32) <- (2x1x1x2xf32, 2x1x7581x2xf32) + minimum_0 = paddle._C_ops.minimum(slice_1, slice_3) - # pd_op.conv2d: (2x24x152x152xf32) <- (2x24x152x152xf32, 24x24x3x3xf32) - conv2d_7 = paddle._C_ops.conv2d( - swish_7, parameter_332, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_332 + # pd_op.subtract: (2x1x7581x2xf32) <- (2x1x7581x2xf32, 2x1x7581x2xf32) + subtract_0 = paddle._C_ops.subtract(minimum_0, maximum_0) + del maximum_0, minimum_0 - # pd_op.batch_norm_: (2x24x152x152xf32, 24xf32, 24xf32, 24xf32, 24xf32, -1xui8) <- (2x24x152x152xf32, 24xf32, 24xf32, 24xf32, 24xf32) - ( - batch_norm__42, - batch_norm__43, - batch_norm__44, - batch_norm__45, - batch_norm__46, - batch_norm__47, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_7, - parameter_331, - parameter_330, - parameter_329, - parameter_328, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_328, parameter_329, parameter_330, parameter_331 - - # pd_op.conv2d: (2x24x152x152xf32) <- (2x24x152x152xf32, 24x24x1x1xf32) - conv2d_8 = paddle._C_ops.conv2d( - swish_7, parameter_327, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_327 - - # pd_op.batch_norm_: (2x24x152x152xf32, 24xf32, 24xf32, 24xf32, 24xf32, -1xui8) <- (2x24x152x152xf32, 24xf32, 24xf32, 24xf32, 24xf32) - ( - batch_norm__48, - batch_norm__49, - batch_norm__50, - batch_norm__51, - batch_norm__52, - batch_norm__53, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_8, - parameter_326, - parameter_325, - parameter_324, - parameter_323, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_323, parameter_324, parameter_325, parameter_326 - - # pd_op.add: (2x24x152x152xf32) <- (2x24x152x152xf32, 2x24x152x152xf32) - add_0 = paddle._C_ops.add(batch_norm__42, batch_norm__48) - - # pd_op.swish: (2x24x152x152xf32) <- (2x24x152x152xf32) - swish_8 = paddle._C_ops.swish(add_0) - - # pd_op.add: (2x24x152x152xf32) <- (2x24x152x152xf32, 2x24x152x152xf32) - add_1 = paddle._C_ops.add(swish_6, swish_8) - - # pd_op.full: (1xi32) <- () + # pd_op.full: (1xf32) <- () full_0 = paddle._C_ops.full( - [1], float("1"), paddle.int32, paddle.core.CPUPlace() - ) - - # pd_op.assign: (1xi32) <- (1xi32) - assign_0 = full_0 - - # pd_op.assign: (1xi32) <- (1xi32) - assign_1 = full_0 - - # pd_op.assign: (1xi32) <- (1xi32) - assign_2 = full_0 - - # pd_op.assign: (1xi32) <- (1xi32) - assign_3 = full_0 - - # pd_op.assign: (1xi32) <- (1xi32) - assign_4 = full_0 - - # pd_op.assign: (1xi32) <- (1xi32) - assign_5 = full_0 - - # pd_op.assign: (1xi32) <- (1xi32) - assign_6 = full_0 - - # pd_op.assign: (1xi32) <- (1xi32) - assign_7 = full_0 - - # pd_op.assign: (1xi32) <- (1xi32) - assign_8 = full_0 - - # pd_op.assign: (1xi32) <- (1xi32) - assign_9 = full_0 - - # pd_op.assign: (1xi32) <- (1xi32) - assign_10 = full_0 - - # pd_op.assign: (1xi32) <- (1xi32) - assign_11 = full_0 - - # pd_op.assign: (1xi32) <- (1xi32) - assign_12 = full_0 - - # builtin.combine: ([2x24x152x152xf32, 2x24x152x152xf32]) <- (2x24x152x152xf32, 2x24x152x152xf32) - combine_0 = [swish_5, add_1] - - # pd_op.concat: (2x48x152x152xf32) <- ([2x24x152x152xf32, 2x24x152x152xf32], 1xi32) - concat_0 = paddle._C_ops.concat(combine_0, full_0) - del combine_0 - - # pd_op.full_int_array: (2xi64) <- () - full_int_array_0 = [2, 3] - - # pd_op.assign: (2xi64) <- (2xi64) - assign_13 = full_int_array_0 - - # pd_op.assign: (2xi64) <- (2xi64) - assign_14 = full_int_array_0 - - # pd_op.assign: (2xi64) <- (2xi64) - assign_15 = full_int_array_0 - - # pd_op.mean: (2x48x1x1xf32) <- (2x48x152x152xf32, 2xi64) - mean_0 = paddle._C_ops.mean(concat_0, full_int_array_0, True) - - # pd_op.conv2d: (2x48x1x1xf32) <- (2x48x1x1xf32, 48x48x1x1xf32) - conv2d_9 = paddle._C_ops.conv2d( - mean_0, parameter_322, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_322 - - # pd_op.full_int_array: (4xi64) <- () - full_int_array_1 = [1, -1, 1, 1] - - # pd_op.reshape: (1x48x1x1xf32) <- (48xf32, 4xi64) - reshape_0 = paddle._C_ops.reshape(parameter_321, full_int_array_1) - del parameter_321 - - # pd_op.add: (2x48x1x1xf32) <- (2x48x1x1xf32, 1x48x1x1xf32) - add_2 = paddle._C_ops.add(conv2d_9, reshape_0) - - # pd_op.hardsigmoid: (2x48x1x1xf32) <- (2x48x1x1xf32) - hardsigmoid_0 = paddle._C_ops.hardsigmoid( - add_2, float("0.166667"), float("0.5") - ) - del add_2 - - # pd_op.multiply: (2x48x152x152xf32) <- (2x48x152x152xf32, 2x48x1x1xf32) - multiply_0 = paddle._C_ops.multiply(concat_0, hardsigmoid_0) - - # pd_op.conv2d: (2x64x152x152xf32) <- (2x48x152x152xf32, 64x48x1x1xf32) - conv2d_10 = paddle._C_ops.conv2d( - multiply_0, parameter_320, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_320 - - # pd_op.batch_norm_: (2x64x152x152xf32, 64xf32, 64xf32, 64xf32, 64xf32, -1xui8) <- (2x64x152x152xf32, 64xf32, 64xf32, 64xf32, 64xf32) - ( - batch_norm__54, - batch_norm__55, - batch_norm__56, - batch_norm__57, - batch_norm__58, - batch_norm__59, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_10, - parameter_319, - parameter_318, - parameter_317, - parameter_316, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_316, parameter_317, parameter_318, parameter_319 - - # pd_op.swish: (2x64x152x152xf32) <- (2x64x152x152xf32) - swish_9 = paddle._C_ops.swish(batch_norm__54) - - # pd_op.conv2d: (2x96x76x76xf32) <- (2x64x152x152xf32, 96x64x3x3xf32) - conv2d_11 = paddle._C_ops.conv2d( - swish_9, parameter_315, [2, 2], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_315 - - # pd_op.batch_norm_: (2x96x76x76xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x76x76xf32, 96xf32, 96xf32, 96xf32, 96xf32) - ( - batch_norm__60, - batch_norm__61, - batch_norm__62, - batch_norm__63, - batch_norm__64, - batch_norm__65, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_11, - parameter_314, - parameter_313, - parameter_312, - parameter_311, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_311, parameter_312, parameter_313, parameter_314 - - # pd_op.swish: (2x96x76x76xf32) <- (2x96x76x76xf32) - swish_10 = paddle._C_ops.swish(batch_norm__60) - - # pd_op.conv2d: (2x48x76x76xf32) <- (2x96x76x76xf32, 48x96x1x1xf32) - conv2d_12 = paddle._C_ops.conv2d( - swish_10, parameter_310, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_310 - - # pd_op.batch_norm_: (2x48x76x76xf32, 48xf32, 48xf32, 48xf32, 48xf32, -1xui8) <- (2x48x76x76xf32, 48xf32, 48xf32, 48xf32, 48xf32) - ( - batch_norm__66, - batch_norm__67, - batch_norm__68, - batch_norm__69, - batch_norm__70, - batch_norm__71, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_12, - parameter_309, - parameter_308, - parameter_307, - parameter_306, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_306, parameter_307, parameter_308, parameter_309 - - # pd_op.swish: (2x48x76x76xf32) <- (2x48x76x76xf32) - swish_11 = paddle._C_ops.swish(batch_norm__66) - - # pd_op.conv2d: (2x48x76x76xf32) <- (2x96x76x76xf32, 48x96x1x1xf32) - conv2d_13 = paddle._C_ops.conv2d( - swish_10, parameter_305, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_305 - - # pd_op.batch_norm_: (2x48x76x76xf32, 48xf32, 48xf32, 48xf32, 48xf32, -1xui8) <- (2x48x76x76xf32, 48xf32, 48xf32, 48xf32, 48xf32) - ( - batch_norm__72, - batch_norm__73, - batch_norm__74, - batch_norm__75, - batch_norm__76, - batch_norm__77, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_13, - parameter_304, - parameter_303, - parameter_302, - parameter_301, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_301, parameter_302, parameter_303, parameter_304 - - # pd_op.swish: (2x48x76x76xf32) <- (2x48x76x76xf32) - swish_12 = paddle._C_ops.swish(batch_norm__72) - - # pd_op.conv2d: (2x48x76x76xf32) <- (2x48x76x76xf32, 48x48x3x3xf32) - conv2d_14 = paddle._C_ops.conv2d( - swish_12, parameter_300, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_300 - - # pd_op.batch_norm_: (2x48x76x76xf32, 48xf32, 48xf32, 48xf32, 48xf32, -1xui8) <- (2x48x76x76xf32, 48xf32, 48xf32, 48xf32, 48xf32) - ( - batch_norm__78, - batch_norm__79, - batch_norm__80, - batch_norm__81, - batch_norm__82, - batch_norm__83, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_14, - parameter_299, - parameter_298, - parameter_297, - parameter_296, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_296, parameter_297, parameter_298, parameter_299 - - # pd_op.swish: (2x48x76x76xf32) <- (2x48x76x76xf32) - swish_13 = paddle._C_ops.swish(batch_norm__78) - - # pd_op.conv2d: (2x48x76x76xf32) <- (2x48x76x76xf32, 48x48x3x3xf32) - conv2d_15 = paddle._C_ops.conv2d( - swish_13, parameter_295, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_295 - - # pd_op.batch_norm_: (2x48x76x76xf32, 48xf32, 48xf32, 48xf32, 48xf32, -1xui8) <- (2x48x76x76xf32, 48xf32, 48xf32, 48xf32, 48xf32) - ( - batch_norm__84, - batch_norm__85, - batch_norm__86, - batch_norm__87, - batch_norm__88, - batch_norm__89, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_15, - parameter_294, - parameter_293, - parameter_292, - parameter_291, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_291, parameter_292, parameter_293, parameter_294 - - # pd_op.conv2d: (2x48x76x76xf32) <- (2x48x76x76xf32, 48x48x1x1xf32) - conv2d_16 = paddle._C_ops.conv2d( - swish_13, parameter_290, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_290 - - # pd_op.batch_norm_: (2x48x76x76xf32, 48xf32, 48xf32, 48xf32, 48xf32, -1xui8) <- (2x48x76x76xf32, 48xf32, 48xf32, 48xf32, 48xf32) - ( - batch_norm__90, - batch_norm__91, - batch_norm__92, - batch_norm__93, - batch_norm__94, - batch_norm__95, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_16, - parameter_289, - parameter_288, - parameter_287, - parameter_286, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_286, parameter_287, parameter_288, parameter_289 - - # pd_op.add: (2x48x76x76xf32) <- (2x48x76x76xf32, 2x48x76x76xf32) - add_3 = paddle._C_ops.add(batch_norm__84, batch_norm__90) - - # pd_op.swish: (2x48x76x76xf32) <- (2x48x76x76xf32) - swish_14 = paddle._C_ops.swish(add_3) - - # pd_op.add: (2x48x76x76xf32) <- (2x48x76x76xf32, 2x48x76x76xf32) - add_4 = paddle._C_ops.add(swish_12, swish_14) - - # pd_op.conv2d: (2x48x76x76xf32) <- (2x48x76x76xf32, 48x48x3x3xf32) - conv2d_17 = paddle._C_ops.conv2d( - add_4, parameter_285, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_285 - - # pd_op.batch_norm_: (2x48x76x76xf32, 48xf32, 48xf32, 48xf32, 48xf32, -1xui8) <- (2x48x76x76xf32, 48xf32, 48xf32, 48xf32, 48xf32) - ( - batch_norm__96, - batch_norm__97, - batch_norm__98, - batch_norm__99, - batch_norm__100, - batch_norm__101, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_17, - parameter_284, - parameter_283, - parameter_282, - parameter_281, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_281, parameter_282, parameter_283, parameter_284 - - # pd_op.swish: (2x48x76x76xf32) <- (2x48x76x76xf32) - swish_15 = paddle._C_ops.swish(batch_norm__96) - - # pd_op.conv2d: (2x48x76x76xf32) <- (2x48x76x76xf32, 48x48x3x3xf32) - conv2d_18 = paddle._C_ops.conv2d( - swish_15, parameter_280, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_280 - - # pd_op.batch_norm_: (2x48x76x76xf32, 48xf32, 48xf32, 48xf32, 48xf32, -1xui8) <- (2x48x76x76xf32, 48xf32, 48xf32, 48xf32, 48xf32) - ( - batch_norm__102, - batch_norm__103, - batch_norm__104, - batch_norm__105, - batch_norm__106, - batch_norm__107, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_18, - parameter_279, - parameter_278, - parameter_277, - parameter_276, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_276, parameter_277, parameter_278, parameter_279 - - # pd_op.conv2d: (2x48x76x76xf32) <- (2x48x76x76xf32, 48x48x1x1xf32) - conv2d_19 = paddle._C_ops.conv2d( - swish_15, parameter_275, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_275 - - # pd_op.batch_norm_: (2x48x76x76xf32, 48xf32, 48xf32, 48xf32, 48xf32, -1xui8) <- (2x48x76x76xf32, 48xf32, 48xf32, 48xf32, 48xf32) - ( - batch_norm__108, - batch_norm__109, - batch_norm__110, - batch_norm__111, - batch_norm__112, - batch_norm__113, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_19, - parameter_274, - parameter_273, - parameter_272, - parameter_271, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_271, parameter_272, parameter_273, parameter_274 - - # pd_op.add: (2x48x76x76xf32) <- (2x48x76x76xf32, 2x48x76x76xf32) - add_5 = paddle._C_ops.add(batch_norm__102, batch_norm__108) - - # pd_op.swish: (2x48x76x76xf32) <- (2x48x76x76xf32) - swish_16 = paddle._C_ops.swish(add_5) - - # pd_op.add: (2x48x76x76xf32) <- (2x48x76x76xf32, 2x48x76x76xf32) - add_6 = paddle._C_ops.add(add_4, swish_16) - - # builtin.combine: ([2x48x76x76xf32, 2x48x76x76xf32]) <- (2x48x76x76xf32, 2x48x76x76xf32) - combine_1 = [swish_11, add_6] - - # pd_op.concat: (2x96x76x76xf32) <- ([2x48x76x76xf32, 2x48x76x76xf32], 1xi32) - concat_1 = paddle._C_ops.concat(combine_1, full_0) - del combine_1 - - # pd_op.mean: (2x96x1x1xf32) <- (2x96x76x76xf32, 2xi64) - mean_1 = paddle._C_ops.mean(concat_1, full_int_array_0, True) - - # pd_op.conv2d: (2x96x1x1xf32) <- (2x96x1x1xf32, 96x96x1x1xf32) - conv2d_20 = paddle._C_ops.conv2d( - mean_1, parameter_270, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_270 - - # pd_op.reshape: (1x96x1x1xf32) <- (96xf32, 4xi64) - reshape_1 = paddle._C_ops.reshape(parameter_269, full_int_array_1) - del parameter_269 - - # pd_op.add: (2x96x1x1xf32) <- (2x96x1x1xf32, 1x96x1x1xf32) - add_7 = paddle._C_ops.add(conv2d_20, reshape_1) - - # pd_op.hardsigmoid: (2x96x1x1xf32) <- (2x96x1x1xf32) - hardsigmoid_1 = paddle._C_ops.hardsigmoid( - add_7, float("0.166667"), float("0.5") - ) - del add_7 - - # pd_op.multiply: (2x96x76x76xf32) <- (2x96x76x76xf32, 2x96x1x1xf32) - multiply_1 = paddle._C_ops.multiply(concat_1, hardsigmoid_1) - - # pd_op.conv2d: (2x128x76x76xf32) <- (2x96x76x76xf32, 128x96x1x1xf32) - conv2d_21 = paddle._C_ops.conv2d( - multiply_1, parameter_268, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_268 - - # pd_op.batch_norm_: (2x128x76x76xf32, 128xf32, 128xf32, 128xf32, 128xf32, -1xui8) <- (2x128x76x76xf32, 128xf32, 128xf32, 128xf32, 128xf32) - ( - batch_norm__114, - batch_norm__115, - batch_norm__116, - batch_norm__117, - batch_norm__118, - batch_norm__119, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_21, - parameter_267, - parameter_266, - parameter_265, - parameter_264, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_264, parameter_265, parameter_266, parameter_267 - - # pd_op.swish: (2x128x76x76xf32) <- (2x128x76x76xf32) - swish_17 = paddle._C_ops.swish(batch_norm__114) - - # pd_op.conv2d: (2x192x38x38xf32) <- (2x128x76x76xf32, 192x128x3x3xf32) - conv2d_22 = paddle._C_ops.conv2d( - swish_17, parameter_263, [2, 2], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_263 - - # pd_op.batch_norm_: (2x192x38x38xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x38x38xf32, 192xf32, 192xf32, 192xf32, 192xf32) - ( - batch_norm__120, - batch_norm__121, - batch_norm__122, - batch_norm__123, - batch_norm__124, - batch_norm__125, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_22, - parameter_262, - parameter_261, - parameter_260, - parameter_259, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_259, parameter_260, parameter_261, parameter_262 - - # pd_op.swish: (2x192x38x38xf32) <- (2x192x38x38xf32) - swish_18 = paddle._C_ops.swish(batch_norm__120) - - # pd_op.conv2d: (2x96x38x38xf32) <- (2x192x38x38xf32, 96x192x1x1xf32) - conv2d_23 = paddle._C_ops.conv2d( - swish_18, parameter_258, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_258 - - # pd_op.batch_norm_: (2x96x38x38xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x38x38xf32, 96xf32, 96xf32, 96xf32, 96xf32) - ( - batch_norm__126, - batch_norm__127, - batch_norm__128, - batch_norm__129, - batch_norm__130, - batch_norm__131, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_23, - parameter_257, - parameter_256, - parameter_255, - parameter_254, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_254, parameter_255, parameter_256, parameter_257 - - # pd_op.swish: (2x96x38x38xf32) <- (2x96x38x38xf32) - swish_19 = paddle._C_ops.swish(batch_norm__126) - - # pd_op.conv2d: (2x96x38x38xf32) <- (2x192x38x38xf32, 96x192x1x1xf32) - conv2d_24 = paddle._C_ops.conv2d( - swish_18, parameter_253, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_253 - - # pd_op.batch_norm_: (2x96x38x38xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x38x38xf32, 96xf32, 96xf32, 96xf32, 96xf32) - ( - batch_norm__132, - batch_norm__133, - batch_norm__134, - batch_norm__135, - batch_norm__136, - batch_norm__137, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_24, - parameter_252, - parameter_251, - parameter_250, - parameter_249, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_249, parameter_250, parameter_251, parameter_252 - - # pd_op.swish: (2x96x38x38xf32) <- (2x96x38x38xf32) - swish_20 = paddle._C_ops.swish(batch_norm__132) - - # pd_op.conv2d: (2x96x38x38xf32) <- (2x96x38x38xf32, 96x96x3x3xf32) - conv2d_25 = paddle._C_ops.conv2d( - swish_20, parameter_248, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_248 - - # pd_op.batch_norm_: (2x96x38x38xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x38x38xf32, 96xf32, 96xf32, 96xf32, 96xf32) - ( - batch_norm__138, - batch_norm__139, - batch_norm__140, - batch_norm__141, - batch_norm__142, - batch_norm__143, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_25, - parameter_247, - parameter_246, - parameter_245, - parameter_244, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_244, parameter_245, parameter_246, parameter_247 - - # pd_op.swish: (2x96x38x38xf32) <- (2x96x38x38xf32) - swish_21 = paddle._C_ops.swish(batch_norm__138) - - # pd_op.conv2d: (2x96x38x38xf32) <- (2x96x38x38xf32, 96x96x3x3xf32) - conv2d_26 = paddle._C_ops.conv2d( - swish_21, parameter_243, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_243 - - # pd_op.batch_norm_: (2x96x38x38xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x38x38xf32, 96xf32, 96xf32, 96xf32, 96xf32) - ( - batch_norm__144, - batch_norm__145, - batch_norm__146, - batch_norm__147, - batch_norm__148, - batch_norm__149, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_26, - parameter_242, - parameter_241, - parameter_240, - parameter_239, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_239, parameter_240, parameter_241, parameter_242 - - # pd_op.conv2d: (2x96x38x38xf32) <- (2x96x38x38xf32, 96x96x1x1xf32) - conv2d_27 = paddle._C_ops.conv2d( - swish_21, parameter_238, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_238 - - # pd_op.batch_norm_: (2x96x38x38xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x38x38xf32, 96xf32, 96xf32, 96xf32, 96xf32) - ( - batch_norm__150, - batch_norm__151, - batch_norm__152, - batch_norm__153, - batch_norm__154, - batch_norm__155, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_27, - parameter_237, - parameter_236, - parameter_235, - parameter_234, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_234, parameter_235, parameter_236, parameter_237 - - # pd_op.add: (2x96x38x38xf32) <- (2x96x38x38xf32, 2x96x38x38xf32) - add_8 = paddle._C_ops.add(batch_norm__144, batch_norm__150) - - # pd_op.swish: (2x96x38x38xf32) <- (2x96x38x38xf32) - swish_22 = paddle._C_ops.swish(add_8) - - # pd_op.add: (2x96x38x38xf32) <- (2x96x38x38xf32, 2x96x38x38xf32) - add_9 = paddle._C_ops.add(swish_20, swish_22) - - # pd_op.conv2d: (2x96x38x38xf32) <- (2x96x38x38xf32, 96x96x3x3xf32) - conv2d_28 = paddle._C_ops.conv2d( - add_9, parameter_233, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_233 - - # pd_op.batch_norm_: (2x96x38x38xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x38x38xf32, 96xf32, 96xf32, 96xf32, 96xf32) - ( - batch_norm__156, - batch_norm__157, - batch_norm__158, - batch_norm__159, - batch_norm__160, - batch_norm__161, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_28, - parameter_232, - parameter_231, - parameter_230, - parameter_229, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_229, parameter_230, parameter_231, parameter_232 - - # pd_op.swish: (2x96x38x38xf32) <- (2x96x38x38xf32) - swish_23 = paddle._C_ops.swish(batch_norm__156) - - # pd_op.conv2d: (2x96x38x38xf32) <- (2x96x38x38xf32, 96x96x3x3xf32) - conv2d_29 = paddle._C_ops.conv2d( - swish_23, parameter_228, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_228 - - # pd_op.batch_norm_: (2x96x38x38xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x38x38xf32, 96xf32, 96xf32, 96xf32, 96xf32) - ( - batch_norm__162, - batch_norm__163, - batch_norm__164, - batch_norm__165, - batch_norm__166, - batch_norm__167, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_29, - parameter_227, - parameter_226, - parameter_225, - parameter_224, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_224, parameter_225, parameter_226, parameter_227 - - # pd_op.conv2d: (2x96x38x38xf32) <- (2x96x38x38xf32, 96x96x1x1xf32) - conv2d_30 = paddle._C_ops.conv2d( - swish_23, parameter_223, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_223 - - # pd_op.batch_norm_: (2x96x38x38xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x38x38xf32, 96xf32, 96xf32, 96xf32, 96xf32) - ( - batch_norm__168, - batch_norm__169, - batch_norm__170, - batch_norm__171, - batch_norm__172, - batch_norm__173, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_30, - parameter_222, - parameter_221, - parameter_220, - parameter_219, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_219, parameter_220, parameter_221, parameter_222 - - # pd_op.add: (2x96x38x38xf32) <- (2x96x38x38xf32, 2x96x38x38xf32) - add_10 = paddle._C_ops.add(batch_norm__162, batch_norm__168) - - # pd_op.swish: (2x96x38x38xf32) <- (2x96x38x38xf32) - swish_24 = paddle._C_ops.swish(add_10) - - # pd_op.add: (2x96x38x38xf32) <- (2x96x38x38xf32, 2x96x38x38xf32) - add_11 = paddle._C_ops.add(add_9, swish_24) - - # builtin.combine: ([2x96x38x38xf32, 2x96x38x38xf32]) <- (2x96x38x38xf32, 2x96x38x38xf32) - combine_2 = [swish_19, add_11] - - # pd_op.concat: (2x192x38x38xf32) <- ([2x96x38x38xf32, 2x96x38x38xf32], 1xi32) - concat_2 = paddle._C_ops.concat(combine_2, full_0) - del combine_2 - - # pd_op.mean: (2x192x1x1xf32) <- (2x192x38x38xf32, 2xi64) - mean_2 = paddle._C_ops.mean(concat_2, full_int_array_0, True) - - # pd_op.conv2d: (2x192x1x1xf32) <- (2x192x1x1xf32, 192x192x1x1xf32) - conv2d_31 = paddle._C_ops.conv2d( - mean_2, parameter_218, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_218 - - # pd_op.reshape: (1x192x1x1xf32) <- (192xf32, 4xi64) - reshape_2 = paddle._C_ops.reshape(parameter_217, full_int_array_1) - del parameter_217 - - # pd_op.add: (2x192x1x1xf32) <- (2x192x1x1xf32, 1x192x1x1xf32) - add_12 = paddle._C_ops.add(conv2d_31, reshape_2) - - # pd_op.hardsigmoid: (2x192x1x1xf32) <- (2x192x1x1xf32) - hardsigmoid_2 = paddle._C_ops.hardsigmoid( - add_12, float("0.166667"), float("0.5") - ) - del add_12 - - # pd_op.multiply: (2x192x38x38xf32) <- (2x192x38x38xf32, 2x192x1x1xf32) - multiply_2 = paddle._C_ops.multiply(concat_2, hardsigmoid_2) - - # pd_op.conv2d: (2x256x38x38xf32) <- (2x192x38x38xf32, 256x192x1x1xf32) - conv2d_32 = paddle._C_ops.conv2d( - multiply_2, parameter_216, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_216 - - # pd_op.batch_norm_: (2x256x38x38xf32, 256xf32, 256xf32, 256xf32, 256xf32, -1xui8) <- (2x256x38x38xf32, 256xf32, 256xf32, 256xf32, 256xf32) - ( - batch_norm__174, - batch_norm__175, - batch_norm__176, - batch_norm__177, - batch_norm__178, - batch_norm__179, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_32, - parameter_215, - parameter_214, - parameter_213, - parameter_212, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_212, parameter_213, parameter_214, parameter_215 - - # pd_op.swish: (2x256x38x38xf32) <- (2x256x38x38xf32) - swish_25 = paddle._C_ops.swish(batch_norm__174) - - # pd_op.conv2d: (2x384x19x19xf32) <- (2x256x38x38xf32, 384x256x3x3xf32) - conv2d_33 = paddle._C_ops.conv2d( - swish_25, parameter_211, [2, 2], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_211 - - # pd_op.batch_norm_: (2x384x19x19xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x19x19xf32, 384xf32, 384xf32, 384xf32, 384xf32) - ( - batch_norm__180, - batch_norm__181, - batch_norm__182, - batch_norm__183, - batch_norm__184, - batch_norm__185, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_33, - parameter_210, - parameter_209, - parameter_208, - parameter_207, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_207, parameter_208, parameter_209, parameter_210 - - # pd_op.swish: (2x384x19x19xf32) <- (2x384x19x19xf32) - swish_26 = paddle._C_ops.swish(batch_norm__180) - - # pd_op.conv2d: (2x192x19x19xf32) <- (2x384x19x19xf32, 192x384x1x1xf32) - conv2d_34 = paddle._C_ops.conv2d( - swish_26, parameter_206, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_206 - - # pd_op.batch_norm_: (2x192x19x19xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x19x19xf32, 192xf32, 192xf32, 192xf32, 192xf32) - ( - batch_norm__186, - batch_norm__187, - batch_norm__188, - batch_norm__189, - batch_norm__190, - batch_norm__191, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_34, - parameter_205, - parameter_204, - parameter_203, - parameter_202, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_202, parameter_203, parameter_204, parameter_205 - - # pd_op.swish: (2x192x19x19xf32) <- (2x192x19x19xf32) - swish_27 = paddle._C_ops.swish(batch_norm__186) - - # pd_op.conv2d: (2x192x19x19xf32) <- (2x384x19x19xf32, 192x384x1x1xf32) - conv2d_35 = paddle._C_ops.conv2d( - swish_26, parameter_201, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + [1], float("0"), paddle.float32, paddle.core.CPUPlace() ) - del parameter_201 - # pd_op.batch_norm_: (2x192x19x19xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x19x19xf32, 192xf32, 192xf32, 192xf32, 192xf32) - ( - batch_norm__192, - batch_norm__193, - batch_norm__194, - batch_norm__195, - batch_norm__196, - batch_norm__197, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_35, - parameter_200, - parameter_199, - parameter_198, - parameter_197, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_197, parameter_198, parameter_199, parameter_200 - - # pd_op.swish: (2x192x19x19xf32) <- (2x192x19x19xf32) - swish_28 = paddle._C_ops.swish(batch_norm__192) - - # pd_op.conv2d: (2x192x19x19xf32) <- (2x192x19x19xf32, 192x192x3x3xf32) - conv2d_36 = paddle._C_ops.conv2d( - swish_28, parameter_196, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_196 - - # pd_op.batch_norm_: (2x192x19x19xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x19x19xf32, 192xf32, 192xf32, 192xf32, 192xf32) - ( - batch_norm__198, - batch_norm__199, - batch_norm__200, - batch_norm__201, - batch_norm__202, - batch_norm__203, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_36, - parameter_195, - parameter_194, - parameter_193, - parameter_192, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), + # pd_op.full: (1xf32) <- () + full_1 = paddle._C_ops.full( + [1], float("3.40282e+38"), paddle.float32, paddle.core.CPUPlace() ) - del parameter_192, parameter_193, parameter_194, parameter_195 - # pd_op.swish: (2x192x19x19xf32) <- (2x192x19x19xf32) - swish_29 = paddle._C_ops.swish(batch_norm__198) + # pd_op.clip: (2x1x7581x2xf32) <- (2x1x7581x2xf32, 1xf32, 1xf32) + clip_0 = paddle._C_ops.clip(subtract_0, full_0, full_1) + del subtract_0 - # pd_op.conv2d: (2x192x19x19xf32) <- (2x192x19x19xf32, 192x192x3x3xf32) - conv2d_37 = paddle._C_ops.conv2d( - swish_29, parameter_191, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_191 + # pd_op.full_int_array: (1xi64) <- () + full_int_array_4 = [-1] - # pd_op.batch_norm_: (2x192x19x19xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x19x19xf32, 192xf32, 192xf32, 192xf32, 192xf32) - ( - batch_norm__204, - batch_norm__205, - batch_norm__206, - batch_norm__207, - batch_norm__208, - batch_norm__209, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_37, - parameter_190, - parameter_189, - parameter_188, - parameter_187, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_187, parameter_188, parameter_189, parameter_190 + # pd_op.prod: (2x1x7581xf32) <- (2x1x7581x2xf32, 1xi64) + prod_0 = paddle._C_ops.prod(clip_0, full_int_array_4, False, False) + del clip_0 - # pd_op.conv2d: (2x192x19x19xf32) <- (2x192x19x19xf32, 192x192x1x1xf32) - conv2d_38 = paddle._C_ops.conv2d( - swish_29, parameter_186, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_186 + # pd_op.subtract: (2x1x1x2xf32) <- (2x1x1x2xf32, 2x1x1x2xf32) + subtract_1 = paddle._C_ops.subtract(slice_1, slice_0) + del slice_0, slice_1 - # pd_op.batch_norm_: (2x192x19x19xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x19x19xf32, 192xf32, 192xf32, 192xf32, 192xf32) - ( - batch_norm__210, - batch_norm__211, - batch_norm__212, - batch_norm__213, - batch_norm__214, - batch_norm__215, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_38, - parameter_185, - parameter_184, - parameter_183, - parameter_182, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_182, parameter_183, parameter_184, parameter_185 + # pd_op.clip: (2x1x1x2xf32) <- (2x1x1x2xf32, 1xf32, 1xf32) + clip_1 = paddle._C_ops.clip(subtract_1, full_0, full_1) + del subtract_1 - # pd_op.add: (2x192x19x19xf32) <- (2x192x19x19xf32, 2x192x19x19xf32) - add_13 = paddle._C_ops.add(batch_norm__204, batch_norm__210) + # pd_op.prod: (2x1x1xf32) <- (2x1x1x2xf32, 1xi64) + prod_1 = paddle._C_ops.prod(clip_1, full_int_array_4, False, False) + del clip_1 - # pd_op.swish: (2x192x19x19xf32) <- (2x192x19x19xf32) - swish_30 = paddle._C_ops.swish(add_13) + # pd_op.subtract: (2x1x7581x2xf32) <- (2x1x7581x2xf32, 2x1x7581x2xf32) + subtract_2 = paddle._C_ops.subtract(slice_3, slice_2) + del slice_2, slice_3 - # pd_op.add: (2x192x19x19xf32) <- (2x192x19x19xf32, 2x192x19x19xf32) - add_14 = paddle._C_ops.add(swish_28, swish_30) + # pd_op.clip: (2x1x7581x2xf32) <- (2x1x7581x2xf32, 1xf32, 1xf32) + clip_2 = paddle._C_ops.clip(subtract_2, full_0, full_1) + del full_0, full_1, subtract_2 - # builtin.combine: ([2x192x19x19xf32, 2x192x19x19xf32]) <- (2x192x19x19xf32, 2x192x19x19xf32) - combine_3 = [swish_27, add_14] + # pd_op.prod: (2x1x7581xf32) <- (2x1x7581x2xf32, 1xi64) + prod_2 = paddle._C_ops.prod(clip_2, full_int_array_4, False, False) + del clip_2 - # pd_op.concat: (2x384x19x19xf32) <- ([2x192x19x19xf32, 2x192x19x19xf32], 1xi32) - concat_3 = paddle._C_ops.concat(combine_3, full_0) - del combine_3 + # pd_op.add: (2x1x7581xf32) <- (2x1x1xf32, 2x1x7581xf32) + add_0 = paddle._C_ops.add(prod_1, prod_2) + del prod_1, prod_2 - # pd_op.mean: (2x384x1x1xf32) <- (2x384x19x19xf32, 2xi64) - mean_3 = paddle._C_ops.mean(concat_3, full_int_array_0, True) + # pd_op.subtract: (2x1x7581xf32) <- (2x1x7581xf32, 2x1x7581xf32) + subtract_3 = paddle._C_ops.subtract(add_0, prod_0) + del add_0 - # pd_op.conv2d: (2x384x1x1xf32) <- (2x384x1x1xf32, 384x384x1x1xf32) - conv2d_39 = paddle._C_ops.conv2d( - mean_3, parameter_181, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + # pd_op.full: (1xf32) <- () + full_2 = paddle._C_ops.full( + [1], float("1"), paddle.float32, paddle.core.CPUPlace() ) - del parameter_181 - # pd_op.reshape: (1x384x1x1xf32) <- (384xf32, 4xi64) - reshape_3 = paddle._C_ops.reshape(parameter_180, full_int_array_1) - del full_int_array_1, parameter_180 + # pd_op.scale: (2x1x7581xf32) <- (2x1x7581xf32, 1xf32) + scale_0 = paddle._C_ops.scale(subtract_3, full_2, float("1e-09"), True) + del full_2, subtract_3 - # pd_op.add: (2x384x1x1xf32) <- (2x384x1x1xf32, 1x384x1x1xf32) - add_15 = paddle._C_ops.add(conv2d_39, reshape_3) + # pd_op.divide: (2x1x7581xf32) <- (2x1x7581xf32, 2x1x7581xf32) + divide_0 = paddle._C_ops.divide(prod_0, scale_0) + del prod_0, scale_0 - # pd_op.hardsigmoid: (2x384x1x1xf32) <- (2x384x1x1xf32) - hardsigmoid_3 = paddle._C_ops.hardsigmoid( - add_15, float("0.166667"), float("0.5") - ) - del add_15 - - # pd_op.multiply: (2x384x19x19xf32) <- (2x384x19x19xf32, 2x384x1x1xf32) - multiply_3 = paddle._C_ops.multiply(concat_3, hardsigmoid_3) - - # pd_op.conv2d: (2x512x19x19xf32) <- (2x384x19x19xf32, 512x384x1x1xf32) - conv2d_40 = paddle._C_ops.conv2d( - multiply_3, parameter_179, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_179 - - # pd_op.batch_norm_: (2x512x19x19xf32, 512xf32, 512xf32, 512xf32, 512xf32, -1xui8) <- (2x512x19x19xf32, 512xf32, 512xf32, 512xf32, 512xf32) - ( - batch_norm__216, - batch_norm__217, - batch_norm__218, - batch_norm__219, - batch_norm__220, - batch_norm__221, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_40, - parameter_178, - parameter_177, - parameter_176, - parameter_175, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_175, parameter_176, parameter_177, parameter_178 - - # pd_op.swish: (2x512x19x19xf32) <- (2x512x19x19xf32) - swish_31 = paddle._C_ops.swish(batch_norm__216) - - # pd_op.conv2d: (2x192x19x19xf32) <- (2x512x19x19xf32, 192x512x1x1xf32) - conv2d_41 = paddle._C_ops.conv2d( - swish_31, parameter_174, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_174 - - # pd_op.batch_norm_: (2x192x19x19xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x19x19xf32, 192xf32, 192xf32, 192xf32, 192xf32) - ( - batch_norm__222, - batch_norm__223, - batch_norm__224, - batch_norm__225, - batch_norm__226, - batch_norm__227, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_41, - parameter_173, - parameter_172, - parameter_171, - parameter_170, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_170, parameter_171, parameter_172, parameter_173 - - # pd_op.swish: (2x192x19x19xf32) <- (2x192x19x19xf32) - swish_32 = paddle._C_ops.swish(batch_norm__222) - - # pd_op.conv2d: (2x192x19x19xf32) <- (2x512x19x19xf32, 192x512x1x1xf32) - conv2d_42 = paddle._C_ops.conv2d( - swish_31, parameter_169, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_169 - - # pd_op.batch_norm_: (2x192x19x19xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x19x19xf32, 192xf32, 192xf32, 192xf32, 192xf32) - ( - batch_norm__228, - batch_norm__229, - batch_norm__230, - batch_norm__231, - batch_norm__232, - batch_norm__233, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_42, - parameter_168, - parameter_167, - parameter_166, - parameter_165, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_165, parameter_166, parameter_167, parameter_168 - - # pd_op.swish: (2x192x19x19xf32) <- (2x192x19x19xf32) - swish_33 = paddle._C_ops.swish(batch_norm__228) - - # pd_op.conv2d: (2x192x19x19xf32) <- (2x192x19x19xf32, 192x192x3x3xf32) - conv2d_43 = paddle._C_ops.conv2d( - swish_33, parameter_164, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_164 - - # pd_op.batch_norm_: (2x192x19x19xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x19x19xf32, 192xf32, 192xf32, 192xf32, 192xf32) - ( - batch_norm__234, - batch_norm__235, - batch_norm__236, - batch_norm__237, - batch_norm__238, - batch_norm__239, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_43, - parameter_163, - parameter_162, - parameter_161, - parameter_160, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_160, parameter_161, parameter_162, parameter_163 - - # pd_op.swish: (2x192x19x19xf32) <- (2x192x19x19xf32) - swish_34 = paddle._C_ops.swish(batch_norm__234) - - # pd_op.conv2d: (2x192x19x19xf32) <- (2x192x19x19xf32, 192x192x3x3xf32) - conv2d_44 = paddle._C_ops.conv2d( - swish_34, parameter_159, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_159 + # pd_op.transpose: (2x4x7581xf32) <- (2x7581x4xf32) + transpose_0 = paddle._C_ops.transpose(data_0, [0, 2, 1]) + del data_0 - # pd_op.batch_norm_: (2x192x19x19xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x19x19xf32, 192xf32, 192xf32, 192xf32, 192xf32) - ( - batch_norm__240, - batch_norm__241, - batch_norm__242, - batch_norm__243, - batch_norm__244, - batch_norm__245, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_44, - parameter_158, - parameter_157, - parameter_156, - parameter_155, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), + # pd_op.full: (1xf64) <- () + full_3 = paddle._C_ops.full( + [1], float("0"), paddle.float64, paddle.core.CPUPlace() ) - del parameter_155, parameter_156, parameter_157, parameter_158 - # pd_op.conv2d: (2x192x19x19xf32) <- (2x192x19x19xf32, 192x192x1x1xf32) - conv2d_45 = paddle._C_ops.conv2d( - swish_34, parameter_154, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + # pd_op.full: (1xf64) <- () + full_4 = paddle._C_ops.full( + [1], float("2"), paddle.float64, paddle.core.CPUPlace() ) - del parameter_154 - # pd_op.batch_norm_: (2x192x19x19xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x19x19xf32, 192xf32, 192xf32, 192xf32, 192xf32) - ( - batch_norm__246, - batch_norm__247, - batch_norm__248, - batch_norm__249, - batch_norm__250, - batch_norm__251, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_45, - parameter_153, - parameter_152, - parameter_151, - parameter_150, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), + # pd_op.full: (1xf64) <- () + full_5 = paddle._C_ops.full( + [1], float("1"), paddle.float64, paddle.core.CPUPlace() ) - del parameter_150, parameter_151, parameter_152, parameter_153 - # pd_op.add: (2x192x19x19xf32) <- (2x192x19x19xf32, 2x192x19x19xf32) - add_16 = paddle._C_ops.add(batch_norm__240, batch_norm__246) + # pd_op.arange: (2xi32) <- (1xf64, 1xf64, 1xf64) + arange_0 = paddle.arange(full_3, full_4, full_5, dtype="int32") + del full_3, full_4, full_5 - # pd_op.swish: (2x192x19x19xf32) <- (2x192x19x19xf32) - swish_35 = paddle._C_ops.swish(add_16) - - # pd_op.full_int_array: (2xi64) <- () - full_int_array_2 = [5, 5] - - # pd_op.pool2d: (2x192x19x19xf32) <- (2x192x19x19xf32, 2xi64) - pool2d_0 = paddle._C_ops.pool2d( - swish_35, - full_int_array_2, - [1, 1], - [2, 2], - False, - True, - "NCHW", - "max", - False, - False, - "EXPLICIT", - ) - - # pd_op.full_int_array: (2xi64) <- () - full_int_array_3 = [9, 9] - - # pd_op.pool2d: (2x192x19x19xf32) <- (2x192x19x19xf32, 2xi64) - pool2d_1 = paddle._C_ops.pool2d( - swish_35, - full_int_array_3, - [1, 1], - [4, 4], - False, - True, - "NCHW", - "max", - False, - False, - "EXPLICIT", - ) + # pd_op.unsqueeze: (2x1xi32) <- (2xi32, 1xi64) + unsqueeze_2 = paddle._C_ops.unsqueeze(arange_0, full_int_array_4) + del arange_0 # pd_op.full_int_array: (2xi64) <- () - full_int_array_4 = [13, 13] - - # pd_op.pool2d: (2x192x19x19xf32) <- (2x192x19x19xf32, 2xi64) - pool2d_2 = paddle._C_ops.pool2d( - swish_35, - full_int_array_4, - [1, 1], - [6, 6], - False, - True, - "NCHW", - "max", - False, - False, - "EXPLICIT", - ) - - # builtin.combine: ([2x192x19x19xf32, 2x192x19x19xf32, 2x192x19x19xf32, 2x192x19x19xf32]) <- (2x192x19x19xf32, 2x192x19x19xf32, 2x192x19x19xf32, 2x192x19x19xf32) - combine_4 = [swish_35, pool2d_0, pool2d_1, pool2d_2] - - # pd_op.concat: (2x768x19x19xf32) <- ([2x192x19x19xf32, 2x192x19x19xf32, 2x192x19x19xf32, 2x192x19x19xf32], 1xi32) - concat_4 = paddle._C_ops.concat(combine_4, full_0) - del combine_4 - - # pd_op.conv2d: (2x192x19x19xf32) <- (2x768x19x19xf32, 192x768x1x1xf32) - conv2d_46 = paddle._C_ops.conv2d( - concat_4, parameter_149, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_149 - - # pd_op.batch_norm_: (2x192x19x19xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x19x19xf32, 192xf32, 192xf32, 192xf32, 192xf32) - ( - batch_norm__252, - batch_norm__253, - batch_norm__254, - batch_norm__255, - batch_norm__256, - batch_norm__257, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_46, - parameter_148, - parameter_147, - parameter_146, - parameter_145, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_145, parameter_146, parameter_147, parameter_148 - - # pd_op.swish: (2x192x19x19xf32) <- (2x192x19x19xf32) - swish_36 = paddle._C_ops.swish(batch_norm__252) - - # builtin.combine: ([2x192x19x19xf32, 2x192x19x19xf32]) <- (2x192x19x19xf32, 2x192x19x19xf32) - combine_5 = [swish_32, swish_36] - - # pd_op.concat: (2x384x19x19xf32) <- ([2x192x19x19xf32, 2x192x19x19xf32], 1xi32) - concat_5 = paddle._C_ops.concat(combine_5, full_0) - del combine_5 - - # pd_op.conv2d: (2x384x19x19xf32) <- (2x384x19x19xf32, 384x384x1x1xf32) - conv2d_47 = paddle._C_ops.conv2d( - concat_5, parameter_144, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_144 + full_int_array_5 = [1, 1] - # pd_op.batch_norm_: (2x384x19x19xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x19x19xf32, 384xf32, 384xf32, 384xf32, 384xf32) - ( - batch_norm__258, - batch_norm__259, - batch_norm__260, - batch_norm__261, - batch_norm__262, - batch_norm__263, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_47, - parameter_143, - parameter_142, - parameter_141, - parameter_140, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_140, parameter_141, parameter_142, parameter_143 + # pd_op.tile: (2x1xi32) <- (2x1xi32, 2xi64) + tile_0 = paddle._C_ops.tile(unsqueeze_2, full_int_array_5) + del full_int_array_5 - # pd_op.swish: (2x384x19x19xf32) <- (2x384x19x19xf32) - swish_37 = paddle._C_ops.swish(batch_norm__258) - - # pd_op.conv2d: (2x192x19x19xf32) <- (2x384x19x19xf32, 192x384x1x1xf32) - conv2d_48 = paddle._C_ops.conv2d( - swish_37, parameter_139, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_139 + # pd_op.squeeze: (2x1xi32) <- (2x1x1xi32, 1xi64) + squeeze_0 = paddle._C_ops.squeeze(data_3, full_int_array_4) + del data_3 - # pd_op.batch_norm_: (2x192x19x19xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x19x19xf32, 192xf32, 192xf32, 192xf32, 192xf32) - ( - batch_norm__264, - batch_norm__265, - batch_norm__266, - batch_norm__267, - batch_norm__268, - batch_norm__269, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_48, - parameter_138, - parameter_137, - parameter_136, - parameter_135, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_135, parameter_136, parameter_137, parameter_138 - - # pd_op.swish: (2x192x19x19xf32) <- (2x192x19x19xf32) - swish_38 = paddle._C_ops.swish(batch_norm__264) - - # pd_op.nearest_interp: (2x192x38x38xf32) <- (2x192x19x19xf32, None, None, None) - nearest_interp_0 = paddle._C_ops.nearest_interp( - swish_38, - None, - None, - None, - "NCHW", - -1, - -1, - -1, - [float("2"), float("2")], - "nearest", - False, - 0, - ) - - # builtin.combine: ([2x192x38x38xf32, 2x256x38x38xf32]) <- (2x192x38x38xf32, 2x256x38x38xf32) - combine_6 = [nearest_interp_0, swish_25] - - # pd_op.concat: (2x448x38x38xf32) <- ([2x192x38x38xf32, 2x256x38x38xf32], 1xi32) - concat_6 = paddle._C_ops.concat(combine_6, full_0) - del combine_6 - - # pd_op.conv2d: (2x96x38x38xf32) <- (2x448x38x38xf32, 96x448x1x1xf32) - conv2d_49 = paddle._C_ops.conv2d( - concat_6, parameter_134, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_134 - - # pd_op.batch_norm_: (2x96x38x38xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x38x38xf32, 96xf32, 96xf32, 96xf32, 96xf32) - ( - batch_norm__270, - batch_norm__271, - batch_norm__272, - batch_norm__273, - batch_norm__274, - batch_norm__275, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_49, - parameter_133, - parameter_132, - parameter_131, - parameter_130, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_130, parameter_131, parameter_132, parameter_133 - - # pd_op.swish: (2x96x38x38xf32) <- (2x96x38x38xf32) - swish_39 = paddle._C_ops.swish(batch_norm__270) - - # pd_op.conv2d: (2x96x38x38xf32) <- (2x448x38x38xf32, 96x448x1x1xf32) - conv2d_50 = paddle._C_ops.conv2d( - concat_6, parameter_129, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_129 - - # pd_op.batch_norm_: (2x96x38x38xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x38x38xf32, 96xf32, 96xf32, 96xf32, 96xf32) - ( - batch_norm__276, - batch_norm__277, - batch_norm__278, - batch_norm__279, - batch_norm__280, - batch_norm__281, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_50, - parameter_128, - parameter_127, - parameter_126, - parameter_125, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_125, parameter_126, parameter_127, parameter_128 - - # pd_op.swish: (2x96x38x38xf32) <- (2x96x38x38xf32) - swish_40 = paddle._C_ops.swish(batch_norm__276) - - # pd_op.conv2d: (2x96x38x38xf32) <- (2x96x38x38xf32, 96x96x3x3xf32) - conv2d_51 = paddle._C_ops.conv2d( - swish_40, parameter_124, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_124 - - # pd_op.batch_norm_: (2x96x38x38xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x38x38xf32, 96xf32, 96xf32, 96xf32, 96xf32) - ( - batch_norm__282, - batch_norm__283, - batch_norm__284, - batch_norm__285, - batch_norm__286, - batch_norm__287, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_51, - parameter_123, - parameter_122, - parameter_121, - parameter_120, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_120, parameter_121, parameter_122, parameter_123 - - # pd_op.swish: (2x96x38x38xf32) <- (2x96x38x38xf32) - swish_41 = paddle._C_ops.swish(batch_norm__282) - - # pd_op.conv2d: (2x96x38x38xf32) <- (2x96x38x38xf32, 96x96x3x3xf32) - conv2d_52 = paddle._C_ops.conv2d( - swish_41, parameter_119, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_119 - - # pd_op.batch_norm_: (2x96x38x38xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x38x38xf32, 96xf32, 96xf32, 96xf32, 96xf32) - ( - batch_norm__288, - batch_norm__289, - batch_norm__290, - batch_norm__291, - batch_norm__292, - batch_norm__293, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_52, - parameter_118, - parameter_117, - parameter_116, - parameter_115, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_115, parameter_116, parameter_117, parameter_118 - - # pd_op.conv2d: (2x96x38x38xf32) <- (2x96x38x38xf32, 96x96x1x1xf32) - conv2d_53 = paddle._C_ops.conv2d( - swish_41, parameter_114, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_114 + # builtin.combine: ([2x1xi32, 2x1xi32]) <- (2x1xi32, 2x1xi32) + combine_0 = [tile_0, squeeze_0] + del squeeze_0, tile_0 - # pd_op.batch_norm_: (2x96x38x38xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x38x38xf32, 96xf32, 96xf32, 96xf32, 96xf32) - ( - batch_norm__294, - batch_norm__295, - batch_norm__296, - batch_norm__297, - batch_norm__298, - batch_norm__299, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_53, - parameter_113, - parameter_112, - parameter_111, - parameter_110, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_110, parameter_111, parameter_112, parameter_113 - - # pd_op.add: (2x96x38x38xf32) <- (2x96x38x38xf32, 2x96x38x38xf32) - add_17 = paddle._C_ops.add(batch_norm__288, batch_norm__294) - - # pd_op.swish: (2x96x38x38xf32) <- (2x96x38x38xf32) - swish_42 = paddle._C_ops.swish(add_17) - - # builtin.combine: ([2x96x38x38xf32, 2x96x38x38xf32]) <- (2x96x38x38xf32, 2x96x38x38xf32) - combine_7 = [swish_39, swish_42] - - # pd_op.concat: (2x192x38x38xf32) <- ([2x96x38x38xf32, 2x96x38x38xf32], 1xi32) - concat_7 = paddle._C_ops.concat(combine_7, full_0) - del combine_7 - - # pd_op.conv2d: (2x192x38x38xf32) <- (2x192x38x38xf32, 192x192x1x1xf32) - conv2d_54 = paddle._C_ops.conv2d( - concat_7, parameter_109, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_109 - - # pd_op.batch_norm_: (2x192x38x38xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x38x38xf32, 192xf32, 192xf32, 192xf32, 192xf32) - ( - batch_norm__300, - batch_norm__301, - batch_norm__302, - batch_norm__303, - batch_norm__304, - batch_norm__305, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_54, - parameter_108, - parameter_107, - parameter_106, - parameter_105, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_105, parameter_106, parameter_107, parameter_108 - - # pd_op.swish: (2x192x38x38xf32) <- (2x192x38x38xf32) - swish_43 = paddle._C_ops.swish(batch_norm__300) - - # pd_op.conv2d: (2x96x38x38xf32) <- (2x192x38x38xf32, 96x192x1x1xf32) - conv2d_55 = paddle._C_ops.conv2d( - swish_43, parameter_104, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_104 - - # pd_op.batch_norm_: (2x96x38x38xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x38x38xf32, 96xf32, 96xf32, 96xf32, 96xf32) - ( - batch_norm__306, - batch_norm__307, - batch_norm__308, - batch_norm__309, - batch_norm__310, - batch_norm__311, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_55, - parameter_103, - parameter_102, - parameter_101, - parameter_100, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_100, parameter_101, parameter_102, parameter_103 - - # pd_op.swish: (2x96x38x38xf32) <- (2x96x38x38xf32) - swish_44 = paddle._C_ops.swish(batch_norm__306) - - # pd_op.nearest_interp: (2x96x76x76xf32) <- (2x96x38x38xf32, None, None, None) - nearest_interp_1 = paddle._C_ops.nearest_interp( - swish_44, - None, - None, - None, - "NCHW", - -1, - -1, - -1, - [float("2"), float("2")], - "nearest", - False, - 0, - ) - - # builtin.combine: ([2x96x76x76xf32, 2x128x76x76xf32]) <- (2x96x76x76xf32, 2x128x76x76xf32) - combine_8 = [nearest_interp_1, swish_17] - - # pd_op.concat: (2x224x76x76xf32) <- ([2x96x76x76xf32, 2x128x76x76xf32], 1xi32) - concat_8 = paddle._C_ops.concat(combine_8, full_0) - del combine_8 - - # pd_op.conv2d: (2x48x76x76xf32) <- (2x224x76x76xf32, 48x224x1x1xf32) - conv2d_56 = paddle._C_ops.conv2d( - concat_8, parameter_99, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_99 - - # pd_op.batch_norm_: (2x48x76x76xf32, 48xf32, 48xf32, 48xf32, 48xf32, -1xui8) <- (2x48x76x76xf32, 48xf32, 48xf32, 48xf32, 48xf32) - ( - batch_norm__312, - batch_norm__313, - batch_norm__314, - batch_norm__315, - batch_norm__316, - batch_norm__317, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_56, - parameter_98, - parameter_97, - parameter_96, - parameter_95, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_95, parameter_96, parameter_97, parameter_98 - - # pd_op.swish: (2x48x76x76xf32) <- (2x48x76x76xf32) - swish_45 = paddle._C_ops.swish(batch_norm__312) - - # pd_op.conv2d: (2x48x76x76xf32) <- (2x224x76x76xf32, 48x224x1x1xf32) - conv2d_57 = paddle._C_ops.conv2d( - concat_8, parameter_94, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_94 - - # pd_op.batch_norm_: (2x48x76x76xf32, 48xf32, 48xf32, 48xf32, 48xf32, -1xui8) <- (2x48x76x76xf32, 48xf32, 48xf32, 48xf32, 48xf32) - ( - batch_norm__318, - batch_norm__319, - batch_norm__320, - batch_norm__321, - batch_norm__322, - batch_norm__323, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_57, - parameter_93, - parameter_92, - parameter_91, - parameter_90, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_90, parameter_91, parameter_92, parameter_93 - - # pd_op.swish: (2x48x76x76xf32) <- (2x48x76x76xf32) - swish_46 = paddle._C_ops.swish(batch_norm__318) - - # pd_op.conv2d: (2x48x76x76xf32) <- (2x48x76x76xf32, 48x48x3x3xf32) - conv2d_58 = paddle._C_ops.conv2d( - swish_46, parameter_89, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_89 - - # pd_op.batch_norm_: (2x48x76x76xf32, 48xf32, 48xf32, 48xf32, 48xf32, -1xui8) <- (2x48x76x76xf32, 48xf32, 48xf32, 48xf32, 48xf32) - ( - batch_norm__324, - batch_norm__325, - batch_norm__326, - batch_norm__327, - batch_norm__328, - batch_norm__329, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_58, - parameter_88, - parameter_87, - parameter_86, - parameter_85, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_85, parameter_86, parameter_87, parameter_88 - - # pd_op.swish: (2x48x76x76xf32) <- (2x48x76x76xf32) - swish_47 = paddle._C_ops.swish(batch_norm__324) - - # pd_op.conv2d: (2x48x76x76xf32) <- (2x48x76x76xf32, 48x48x3x3xf32) - conv2d_59 = paddle._C_ops.conv2d( - swish_47, parameter_84, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_84 - - # pd_op.batch_norm_: (2x48x76x76xf32, 48xf32, 48xf32, 48xf32, 48xf32, -1xui8) <- (2x48x76x76xf32, 48xf32, 48xf32, 48xf32, 48xf32) - ( - batch_norm__330, - batch_norm__331, - batch_norm__332, - batch_norm__333, - batch_norm__334, - batch_norm__335, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_59, - parameter_83, - parameter_82, - parameter_81, - parameter_80, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_80, parameter_81, parameter_82, parameter_83 - - # pd_op.conv2d: (2x48x76x76xf32) <- (2x48x76x76xf32, 48x48x1x1xf32) - conv2d_60 = paddle._C_ops.conv2d( - swish_47, parameter_79, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_79 - - # pd_op.batch_norm_: (2x48x76x76xf32, 48xf32, 48xf32, 48xf32, 48xf32, -1xui8) <- (2x48x76x76xf32, 48xf32, 48xf32, 48xf32, 48xf32) - ( - batch_norm__336, - batch_norm__337, - batch_norm__338, - batch_norm__339, - batch_norm__340, - batch_norm__341, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_60, - parameter_78, - parameter_77, - parameter_76, - parameter_75, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_75, parameter_76, parameter_77, parameter_78 - - # pd_op.add: (2x48x76x76xf32) <- (2x48x76x76xf32, 2x48x76x76xf32) - add_18 = paddle._C_ops.add(batch_norm__330, batch_norm__336) - - # pd_op.swish: (2x48x76x76xf32) <- (2x48x76x76xf32) - swish_48 = paddle._C_ops.swish(add_18) - - # builtin.combine: ([2x48x76x76xf32, 2x48x76x76xf32]) <- (2x48x76x76xf32, 2x48x76x76xf32) - combine_9 = [swish_45, swish_48] - - # pd_op.concat: (2x96x76x76xf32) <- ([2x48x76x76xf32, 2x48x76x76xf32], 1xi32) - concat_9 = paddle._C_ops.concat(combine_9, full_0) - del combine_9 - - # pd_op.conv2d: (2x96x76x76xf32) <- (2x96x76x76xf32, 96x96x1x1xf32) - conv2d_61 = paddle._C_ops.conv2d( - concat_9, parameter_74, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_74 - - # pd_op.batch_norm_: (2x96x76x76xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x76x76xf32, 96xf32, 96xf32, 96xf32, 96xf32) - ( - batch_norm__342, - batch_norm__343, - batch_norm__344, - batch_norm__345, - batch_norm__346, - batch_norm__347, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_61, - parameter_73, - parameter_72, - parameter_71, - parameter_70, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_70, parameter_71, parameter_72, parameter_73 - - # pd_op.swish: (2x96x76x76xf32) <- (2x96x76x76xf32) - swish_49 = paddle._C_ops.swish(batch_norm__342) - - # pd_op.conv2d: (2x96x38x38xf32) <- (2x96x76x76xf32, 96x96x3x3xf32) - conv2d_62 = paddle._C_ops.conv2d( - swish_49, parameter_69, [2, 2], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_69 - - # pd_op.batch_norm_: (2x96x38x38xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x38x38xf32, 96xf32, 96xf32, 96xf32, 96xf32) - ( - batch_norm__348, - batch_norm__349, - batch_norm__350, - batch_norm__351, - batch_norm__352, - batch_norm__353, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_62, - parameter_68, - parameter_67, - parameter_66, - parameter_65, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_65, parameter_66, parameter_67, parameter_68 - - # pd_op.swish: (2x96x38x38xf32) <- (2x96x38x38xf32) - swish_50 = paddle._C_ops.swish(batch_norm__348) + # pd_op.stack: (2x1x2xi32) <- ([2x1xi32, 2x1xi32]) + stack_0 = paddle._C_ops.stack(combine_0, -1) + del combine_0 - # builtin.combine: ([2x96x38x38xf32, 2x192x38x38xf32]) <- (2x96x38x38xf32, 2x192x38x38xf32) - combine_10 = [swish_50, swish_43] + # pd_op.gather_nd: (2x1x7581xf32) <- (2x4x7581xf32, 2x1x2xi32) + gather_nd_0 = paddle._C_ops.gather_nd(transpose_0, stack_0) + del stack_0, transpose_0 - # pd_op.concat: (2x288x38x38xf32) <- ([2x96x38x38xf32, 2x192x38x38xf32], 1xi32) - concat_10 = paddle._C_ops.concat(combine_10, full_0) - del combine_10 + # pd_op.pow: (2x1x7581xf32) <- (2x1x7581xf32) + pow_0 = paddle._C_ops.pow(gather_nd_0, float("1")) + del gather_nd_0 - # pd_op.conv2d: (2x96x38x38xf32) <- (2x288x38x38xf32, 96x288x1x1xf32) - conv2d_63 = paddle._C_ops.conv2d( - concat_10, parameter_64, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_64 - - # pd_op.batch_norm_: (2x96x38x38xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x38x38xf32, 96xf32, 96xf32, 96xf32, 96xf32) - ( - batch_norm__354, - batch_norm__355, - batch_norm__356, - batch_norm__357, - batch_norm__358, - batch_norm__359, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_63, - parameter_63, - parameter_62, - parameter_61, - parameter_60, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_60, parameter_61, parameter_62, parameter_63 + # pd_op.pow: (2x1x7581xf32) <- (2x1x7581xf32) + pow_1 = paddle._C_ops.pow(divide_0, float("6")) - # pd_op.swish: (2x96x38x38xf32) <- (2x96x38x38xf32) - swish_51 = paddle._C_ops.swish(batch_norm__354) + # pd_op.multiply: (2x1x7581xf32) <- (2x1x7581xf32, 2x1x7581xf32) + multiply_0 = paddle._C_ops.multiply(pow_0, pow_1) + del pow_0, pow_1 - # pd_op.conv2d: (2x96x38x38xf32) <- (2x288x38x38xf32, 96x288x1x1xf32) - conv2d_64 = paddle._C_ops.conv2d( - concat_10, parameter_59, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_59 - - # pd_op.batch_norm_: (2x96x38x38xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x38x38xf32, 96xf32, 96xf32, 96xf32, 96xf32) - ( - batch_norm__360, - batch_norm__361, - batch_norm__362, - batch_norm__363, - batch_norm__364, - batch_norm__365, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_64, - parameter_58, - parameter_57, - parameter_56, - parameter_55, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_55, parameter_56, parameter_57, parameter_58 - - # pd_op.swish: (2x96x38x38xf32) <- (2x96x38x38xf32) - swish_52 = paddle._C_ops.swish(batch_norm__360) + # pd_op.full_int_array: (2xi64) <- () + full_int_array_6 = [0, 1] - # pd_op.conv2d: (2x96x38x38xf32) <- (2x96x38x38xf32, 96x96x3x3xf32) - conv2d_65 = paddle._C_ops.conv2d( - swish_52, parameter_54, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_54 + # pd_op.unsqueeze: (1x1x7581x2xf32) <- (7581x2xf32, 2xi64) + unsqueeze_3 = paddle._C_ops.unsqueeze(data_2, full_int_array_6) + del data_2, full_int_array_6 - # pd_op.batch_norm_: (2x96x38x38xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x38x38xf32, 96xf32, 96xf32, 96xf32, 96xf32) - ( - batch_norm__366, - batch_norm__367, - batch_norm__368, - batch_norm__369, - batch_norm__370, - batch_norm__371, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_65, - parameter_53, - parameter_52, - parameter_51, - parameter_50, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), + # pd_op.full: (1xi32) <- () + full_6 = paddle._C_ops.full( + [1], float("3"), paddle.int32, paddle.core.CPUPlace() ) - del parameter_50, parameter_51, parameter_52, parameter_53 - - # pd_op.swish: (2x96x38x38xf32) <- (2x96x38x38xf32) - swish_53 = paddle._C_ops.swish(batch_norm__366) - # pd_op.conv2d: (2x96x38x38xf32) <- (2x96x38x38xf32, 96x96x3x3xf32) - conv2d_66 = paddle._C_ops.conv2d( - swish_53, parameter_49, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_49 + # pd_op.split_with_num: ([1x1x7581x1xf32, 1x1x7581x1xf32]) <- (1x1x7581x2xf32, 1xi32) + split_with_num_0 = paddle._C_ops.split_with_num(unsqueeze_3, 2, full_6) + del unsqueeze_3 - # pd_op.batch_norm_: (2x96x38x38xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x38x38xf32, 96xf32, 96xf32, 96xf32, 96xf32) + # builtin.split: (1x1x7581x1xf32, 1x1x7581x1xf32) <- ([1x1x7581x1xf32, 1x1x7581x1xf32]) ( - batch_norm__372, - batch_norm__373, - batch_norm__374, - batch_norm__375, - batch_norm__376, - batch_norm__377, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_66, - parameter_48, - parameter_47, - parameter_46, - parameter_45, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_45, parameter_46, parameter_47, parameter_48 + split_0, + split_1, + ) = split_with_num_0 + del split_with_num_0 - # pd_op.conv2d: (2x96x38x38xf32) <- (2x96x38x38xf32, 96x96x1x1xf32) - conv2d_67 = paddle._C_ops.conv2d( - swish_53, parameter_44, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_44 + # pd_op.split_with_num: ([2x1x1x1xf32, 2x1x1x1xf32, 2x1x1x1xf32, 2x1x1x1xf32]) <- (2x1x1x4xf32, 1xi32) + split_with_num_1 = paddle._C_ops.split_with_num(unsqueeze_0, 4, full_6) + del full_6, unsqueeze_0 - # pd_op.batch_norm_: (2x96x38x38xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x38x38xf32, 96xf32, 96xf32, 96xf32, 96xf32) + # builtin.split: (2x1x1x1xf32, 2x1x1x1xf32, 2x1x1x1xf32, 2x1x1x1xf32) <- ([2x1x1x1xf32, 2x1x1x1xf32, 2x1x1x1xf32, 2x1x1x1xf32]) ( - batch_norm__378, - batch_norm__379, - batch_norm__380, - batch_norm__381, - batch_norm__382, - batch_norm__383, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_67, - parameter_43, - parameter_42, - parameter_41, - parameter_40, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_40, parameter_41, parameter_42, parameter_43 + split_2, + split_3, + split_4, + split_5, + ) = split_with_num_1 + del split_with_num_1 - # pd_op.add: (2x96x38x38xf32) <- (2x96x38x38xf32, 2x96x38x38xf32) - add_19 = paddle._C_ops.add(batch_norm__372, batch_norm__378) + # pd_op.subtract: (2x1x7581x1xf32) <- (1x1x7581x1xf32, 2x1x1x1xf32) + subtract_4 = paddle._C_ops.subtract(split_0, split_2) + del split_2 - # pd_op.swish: (2x96x38x38xf32) <- (2x96x38x38xf32) - swish_54 = paddle._C_ops.swish(add_19) + # pd_op.subtract: (2x1x7581x1xf32) <- (1x1x7581x1xf32, 2x1x1x1xf32) + subtract_5 = paddle._C_ops.subtract(split_1, split_3) + del split_3 - # builtin.combine: ([2x96x38x38xf32, 2x96x38x38xf32]) <- (2x96x38x38xf32, 2x96x38x38xf32) - combine_11 = [swish_51, swish_54] + # pd_op.subtract: (2x1x7581x1xf32) <- (2x1x1x1xf32, 1x1x7581x1xf32) + subtract_6 = paddle._C_ops.subtract(split_4, split_0) + del split_0, split_4 - # pd_op.concat: (2x192x38x38xf32) <- ([2x96x38x38xf32, 2x96x38x38xf32], 1xi32) - concat_11 = paddle._C_ops.concat(combine_11, full_0) - del combine_11 + # pd_op.subtract: (2x1x7581x1xf32) <- (2x1x1x1xf32, 1x1x7581x1xf32) + subtract_7 = paddle._C_ops.subtract(split_5, split_1) + del split_1, split_5 - # pd_op.conv2d: (2x192x38x38xf32) <- (2x192x38x38xf32, 192x192x1x1xf32) - conv2d_68 = paddle._C_ops.conv2d( - concat_11, parameter_39, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + # pd_op.full: (1xi32) <- () + full_7 = paddle._C_ops.full( + [1], float("-1"), paddle.int32, paddle.core.CPUPlace() ) - del parameter_39 - # pd_op.batch_norm_: (2x192x38x38xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x38x38xf32, 192xf32, 192xf32, 192xf32, 192xf32) - ( - batch_norm__384, - batch_norm__385, - batch_norm__386, - batch_norm__387, - batch_norm__388, - batch_norm__389, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_68, - parameter_38, - parameter_37, - parameter_36, - parameter_35, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_35, parameter_36, parameter_37, parameter_38 + # builtin.combine: ([2x1x7581x1xf32, 2x1x7581x1xf32, 2x1x7581x1xf32, 2x1x7581x1xf32]) <- (2x1x7581x1xf32, 2x1x7581x1xf32, 2x1x7581x1xf32, 2x1x7581x1xf32) + combine_1 = [subtract_4, subtract_5, subtract_6, subtract_7] + del subtract_4, subtract_5, subtract_6, subtract_7 - # pd_op.swish: (2x192x38x38xf32) <- (2x192x38x38xf32) - swish_55 = paddle._C_ops.swish(batch_norm__384) + # pd_op.concat: (2x1x7581x4xf32) <- ([2x1x7581x1xf32, 2x1x7581x1xf32, 2x1x7581x1xf32, 2x1x7581x1xf32], 1xi32) + concat_0 = paddle._C_ops.concat(combine_1, full_7) + del combine_1, full_7 - # pd_op.conv2d: (2x192x19x19xf32) <- (2x192x38x38xf32, 192x192x3x3xf32) - conv2d_69 = paddle._C_ops.conv2d( - swish_55, parameter_34, [2, 2], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_34 + # pd_op.min: (2x1x7581xf32) <- (2x1x7581x4xf32, 1xi64) + min_0 = paddle._C_ops.min(concat_0, full_int_array_4, False) + del concat_0, full_int_array_4 - # pd_op.batch_norm_: (2x192x19x19xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x19x19xf32, 192xf32, 192xf32, 192xf32, 192xf32) - ( - batch_norm__390, - batch_norm__391, - batch_norm__392, - batch_norm__393, - batch_norm__394, - batch_norm__395, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_69, - parameter_33, - parameter_32, - parameter_31, - parameter_30, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), + # pd_op.full: (xf32) <- () + full_8 = paddle._C_ops.full( + [], + float("1e-09"), + paddle.float32, + paddle.framework._current_expected_place(), ) - del parameter_30, parameter_31, parameter_32, parameter_33 - - # pd_op.swish: (2x192x19x19xf32) <- (2x192x19x19xf32) - swish_56 = paddle._C_ops.swish(batch_norm__390) - # builtin.combine: ([2x192x19x19xf32, 2x384x19x19xf32]) <- (2x192x19x19xf32, 2x384x19x19xf32) - combine_12 = [swish_56, swish_37] + # pd_op.greater_than: (2x1x7581xb) <- (2x1x7581xf32, xf32) + greater_than_1 = paddle._C_ops.greater_than(min_0, full_8) + del full_8, min_0 - # pd_op.concat: (2x576x19x19xf32) <- ([2x192x19x19xf32, 2x384x19x19xf32], 1xi32) - concat_12 = paddle._C_ops.concat(combine_12, full_0) - del combine_12 + # pd_op.cast: (2x1x7581xf32) <- (2x1x7581xb) + cast_0 = paddle._C_ops.cast(greater_than_1, paddle.float32) + del greater_than_1 - # pd_op.conv2d: (2x192x19x19xf32) <- (2x576x19x19xf32, 192x576x1x1xf32) - conv2d_70 = paddle._C_ops.conv2d( - concat_12, parameter_29, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_29 - - # pd_op.batch_norm_: (2x192x19x19xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x19x19xf32, 192xf32, 192xf32, 192xf32, 192xf32) - ( - batch_norm__396, - batch_norm__397, - batch_norm__398, - batch_norm__399, - batch_norm__400, - batch_norm__401, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_70, - parameter_28, - parameter_27, - parameter_26, - parameter_25, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_25, parameter_26, parameter_27, parameter_28 + # pd_op.multiply: (2x1x7581xf32) <- (2x1x7581xf32, 2x1x7581xf32) + multiply_1 = paddle._C_ops.multiply(multiply_0, cast_0) - # pd_op.swish: (2x192x19x19xf32) <- (2x192x19x19xf32) - swish_57 = paddle._C_ops.swish(batch_norm__396) - - # pd_op.conv2d: (2x192x19x19xf32) <- (2x576x19x19xf32, 192x576x1x1xf32) - conv2d_71 = paddle._C_ops.conv2d( - concat_12, parameter_24, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + # pd_op.full: (1xi32) <- () + full_9 = paddle._C_ops.full( + [1], float("13"), paddle.int32, paddle.core.CPUPlace() ) - del parameter_24 - # pd_op.batch_norm_: (2x192x19x19xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x19x19xf32, 192xf32, 192xf32, 192xf32, 192xf32) - ( - batch_norm__402, - batch_norm__403, - batch_norm__404, - batch_norm__405, - batch_norm__406, - batch_norm__407, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_71, - parameter_23, - parameter_22, - parameter_21, - parameter_20, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), + # pd_op.topk: (2x1x13xf32, 2x1x13xi64) <- (2x1x7581xf32, 1xi32) + topk_0, topk_1 = (lambda x, f: f(x))( + paddle._C_ops.topk(multiply_1, full_9, -1, True, True), + lambda out: out if isinstance(out, (list, tuple)) else (out, None), ) - del parameter_20, parameter_21, parameter_22, parameter_23 + del full_9, multiply_1 - # pd_op.swish: (2x192x19x19xf32) <- (2x192x19x19xf32) - swish_58 = paddle._C_ops.swish(batch_norm__402) - - # pd_op.conv2d: (2x192x19x19xf32) <- (2x192x19x19xf32, 192x192x3x3xf32) - conv2d_72 = paddle._C_ops.conv2d( - swish_58, parameter_19, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + # pd_op.full: (1xi32) <- () + full_10 = paddle._C_ops.full( + [1], float("7581"), paddle.int32, paddle.core.CPUPlace() ) - del parameter_19 - # pd_op.batch_norm_: (2x192x19x19xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x19x19xf32, 192xf32, 192xf32, 192xf32, 192xf32) - ( - batch_norm__408, - batch_norm__409, - batch_norm__410, - batch_norm__411, - batch_norm__412, - batch_norm__413, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_72, - parameter_18, - parameter_17, - parameter_16, - parameter_15, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), + # pd_op.one_hot: (2x1x13x7581xf32) <- (2x1x13xi64, 1xi32) + one_hot_0 = paddle._C_ops.one_hot( + topk_1 % paddle.cast(full_10, topk_1.dtype), full_10 ) - del parameter_15, parameter_16, parameter_17, parameter_18 + del full_10, topk_1 - # pd_op.swish: (2x192x19x19xf32) <- (2x192x19x19xf32) - swish_59 = paddle._C_ops.swish(batch_norm__408) + # pd_op.full_int_array: (1xi64) <- () + full_int_array_7 = [-2] - # pd_op.conv2d: (2x192x19x19xf32) <- (2x192x19x19xf32, 192x192x3x3xf32) - conv2d_73 = paddle._C_ops.conv2d( - swish_59, parameter_14, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_14 + # pd_op.sum: (2x1x7581xf32) <- (2x1x13x7581xf32, 1xi64) + sum_0 = paddle._C_ops.sum(one_hot_0, full_int_array_7, None, False) + del one_hot_0 - # pd_op.batch_norm_: (2x192x19x19xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x19x19xf32, 192xf32, 192xf32, 192xf32, 192xf32) - ( - batch_norm__414, - batch_norm__415, - batch_norm__416, - batch_norm__417, - batch_norm__418, - batch_norm__419, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_73, - parameter_13, - parameter_12, - parameter_11, - parameter_10, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_10, parameter_11, parameter_12, parameter_13 + # pd_op.multiply: (2x1x7581xf32) <- (2x1x7581xf32, 2x1x1xf32) + multiply_2 = paddle._C_ops.multiply(sum_0, data_5) + del sum_0 - # pd_op.conv2d: (2x192x19x19xf32) <- (2x192x19x19xf32, 192x192x1x1xf32) - conv2d_74 = paddle._C_ops.conv2d( - swish_59, parameter_9, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_9 + # pd_op.multiply: (2x1x7581xf32) <- (2x1x7581xf32, 2x1x7581xf32) + multiply_3 = paddle._C_ops.multiply(multiply_2, cast_0) + del cast_0, multiply_2 - # pd_op.batch_norm_: (2x192x19x19xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x19x19xf32, 192xf32, 192xf32, 192xf32, 192xf32) - ( - batch_norm__420, - batch_norm__421, - batch_norm__422, - batch_norm__423, - batch_norm__424, - batch_norm__425, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_74, - parameter_8, - parameter_7, - parameter_6, - parameter_5, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_5, parameter_6, parameter_7, parameter_8 - - # pd_op.add: (2x192x19x19xf32) <- (2x192x19x19xf32, 2x192x19x19xf32) - add_20 = paddle._C_ops.add(batch_norm__414, batch_norm__420) + # pd_op.multiply: (2x1x7581xf32) <- (2x1x7581xf32, 2x1x1xf32) + multiply_4 = paddle._C_ops.multiply(multiply_3, data_5) + del data_5, multiply_3 - # pd_op.swish: (2x192x19x19xf32) <- (2x192x19x19xf32) - swish_60 = paddle._C_ops.swish(add_20) + # pd_op.sum: (2x7581xf32) <- (2x1x7581xf32, 1xi64) + sum_1 = paddle._C_ops.sum(multiply_4, full_int_array_7, None, False) + del full_int_array_7 - # builtin.combine: ([2x192x19x19xf32, 2x192x19x19xf32]) <- (2x192x19x19xf32, 2x192x19x19xf32) - combine_13 = [swish_57, swish_60] + # pd_op.full_int_array: (0xi64) <- () + full_int_array_8 = [] - # pd_op.concat: (2x384x19x19xf32) <- ([2x192x19x19xf32, 2x192x19x19xf32], 1xi32) - concat_13 = paddle._C_ops.concat(combine_13, full_0) - del combine_13 + # pd_op.max: (xf32) <- (2x7581xf32, 0xi64) + max_0 = paddle._C_ops.max(sum_1, full_int_array_8, False) + del full_int_array_8 - # pd_op.conv2d: (2x384x19x19xf32) <- (2x384x19x19xf32, 384x384x1x1xf32) - conv2d_75 = paddle._C_ops.conv2d( - concat_13, parameter_4, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + # pd_op.full: (xf32) <- () + full_11 = paddle._C_ops.full( + [], float("1"), paddle.float32, paddle.framework._current_expected_place() ) - del parameter_4 - # pd_op.batch_norm_: (2x384x19x19xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x19x19xf32, 384xf32, 384xf32, 384xf32, 384xf32) - ( - batch_norm__426, - batch_norm__427, - batch_norm__428, - batch_norm__429, - batch_norm__430, - batch_norm__431, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_75, - parameter_3, - parameter_2, - parameter_1, - parameter_0, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_0, parameter_1, parameter_2, parameter_3 - - # pd_op.swish: (2x384x19x19xf32) <- (2x384x19x19xf32) - swish_0 = paddle._C_ops.swish(batch_norm__426) - del ( - add_0, - add_1, - add_10, - add_11, - add_13, - add_14, - add_16, - add_17, - add_18, - add_19, - add_20, - add_3, - add_4, - add_5, - add_6, - add_8, - add_9, - assign_0, - assign_1, - assign_10, - assign_11, - assign_12, - assign_13, - assign_14, - assign_15, - assign_2, - assign_3, - assign_4, - assign_5, - assign_6, - assign_7, - assign_8, - assign_9, - batch_norm__0, - batch_norm__1, - batch_norm__10, - batch_norm__100, - batch_norm__101, - batch_norm__102, - batch_norm__103, - batch_norm__104, - batch_norm__105, - batch_norm__106, - batch_norm__107, - batch_norm__108, - batch_norm__109, - batch_norm__11, - batch_norm__110, - batch_norm__111, - batch_norm__112, - batch_norm__113, - batch_norm__114, - batch_norm__115, - batch_norm__116, - batch_norm__117, - batch_norm__118, - batch_norm__119, - batch_norm__12, - batch_norm__120, - batch_norm__121, - batch_norm__122, - batch_norm__123, - batch_norm__124, - batch_norm__125, - batch_norm__126, - batch_norm__127, - batch_norm__128, - batch_norm__129, - batch_norm__13, - batch_norm__130, - batch_norm__131, - batch_norm__132, - batch_norm__133, - batch_norm__134, - batch_norm__135, - batch_norm__136, - batch_norm__137, - batch_norm__138, - batch_norm__139, - batch_norm__14, - batch_norm__140, - batch_norm__141, - batch_norm__142, - batch_norm__143, - batch_norm__144, - batch_norm__145, - batch_norm__146, - batch_norm__147, - batch_norm__148, - batch_norm__149, - batch_norm__15, - batch_norm__150, - batch_norm__151, - batch_norm__152, - batch_norm__153, - batch_norm__154, - batch_norm__155, - batch_norm__156, - batch_norm__157, - batch_norm__158, - batch_norm__159, - batch_norm__16, - batch_norm__160, - batch_norm__161, - batch_norm__162, - batch_norm__163, - batch_norm__164, - batch_norm__165, - batch_norm__166, - batch_norm__167, - batch_norm__168, - batch_norm__169, - batch_norm__17, - batch_norm__170, - batch_norm__171, - batch_norm__172, - batch_norm__173, - batch_norm__174, - batch_norm__175, - batch_norm__176, - batch_norm__177, - batch_norm__178, - batch_norm__179, - batch_norm__18, - batch_norm__180, - batch_norm__181, - batch_norm__182, - batch_norm__183, - batch_norm__184, - batch_norm__185, - batch_norm__186, - batch_norm__187, - batch_norm__188, - batch_norm__189, - batch_norm__19, - batch_norm__190, - batch_norm__191, - batch_norm__192, - batch_norm__193, - batch_norm__194, - batch_norm__195, - batch_norm__196, - batch_norm__197, - batch_norm__198, - batch_norm__199, - batch_norm__2, - batch_norm__20, - batch_norm__200, - batch_norm__201, - batch_norm__202, - batch_norm__203, - batch_norm__204, - batch_norm__205, - batch_norm__206, - batch_norm__207, - batch_norm__208, - batch_norm__209, - batch_norm__21, - batch_norm__210, - batch_norm__211, - batch_norm__212, - batch_norm__213, - batch_norm__214, - batch_norm__215, - batch_norm__216, - batch_norm__217, - batch_norm__218, - batch_norm__219, - batch_norm__22, - batch_norm__220, - batch_norm__221, - batch_norm__222, - batch_norm__223, - batch_norm__224, - batch_norm__225, - batch_norm__226, - batch_norm__227, - batch_norm__228, - batch_norm__229, - batch_norm__23, - batch_norm__230, - batch_norm__231, - batch_norm__232, - batch_norm__233, - batch_norm__234, - batch_norm__235, - batch_norm__236, - batch_norm__237, - batch_norm__238, - batch_norm__239, - batch_norm__24, - batch_norm__240, - batch_norm__241, - batch_norm__242, - batch_norm__243, - batch_norm__244, - batch_norm__245, - batch_norm__246, - batch_norm__247, - batch_norm__248, - batch_norm__249, - batch_norm__25, - batch_norm__250, - batch_norm__251, - batch_norm__252, - batch_norm__253, - batch_norm__254, - batch_norm__255, - batch_norm__256, - batch_norm__257, - batch_norm__258, - batch_norm__259, - batch_norm__26, - batch_norm__260, - batch_norm__261, - batch_norm__262, - batch_norm__263, - batch_norm__264, - batch_norm__265, - batch_norm__266, - batch_norm__267, - batch_norm__268, - batch_norm__269, - batch_norm__27, - batch_norm__270, - batch_norm__271, - batch_norm__272, - batch_norm__273, - batch_norm__274, - batch_norm__275, - batch_norm__276, - batch_norm__277, - batch_norm__278, - batch_norm__279, - batch_norm__28, - batch_norm__280, - batch_norm__281, - batch_norm__282, - batch_norm__283, - batch_norm__284, - batch_norm__285, - batch_norm__286, - batch_norm__287, - batch_norm__288, - batch_norm__289, - batch_norm__29, - batch_norm__290, - batch_norm__291, - batch_norm__292, - batch_norm__293, - batch_norm__294, - batch_norm__295, - batch_norm__296, - batch_norm__297, - batch_norm__298, - batch_norm__299, - batch_norm__3, - batch_norm__30, - batch_norm__300, - batch_norm__301, - batch_norm__302, - batch_norm__303, - batch_norm__304, - batch_norm__305, - batch_norm__306, - batch_norm__307, - batch_norm__308, - batch_norm__309, - batch_norm__31, - batch_norm__310, - batch_norm__311, - batch_norm__312, - batch_norm__313, - batch_norm__314, - batch_norm__315, - batch_norm__316, - batch_norm__317, - batch_norm__318, - batch_norm__319, - batch_norm__32, - batch_norm__320, - batch_norm__321, - batch_norm__322, - batch_norm__323, - batch_norm__324, - batch_norm__325, - batch_norm__326, - batch_norm__327, - batch_norm__328, - batch_norm__329, - batch_norm__33, - batch_norm__330, - batch_norm__331, - batch_norm__332, - batch_norm__333, - batch_norm__334, - batch_norm__335, - batch_norm__336, - batch_norm__337, - batch_norm__338, - batch_norm__339, - batch_norm__34, - batch_norm__340, - batch_norm__341, - batch_norm__342, - batch_norm__343, - batch_norm__344, - batch_norm__345, - batch_norm__346, - batch_norm__347, - batch_norm__348, - batch_norm__349, - batch_norm__35, - batch_norm__350, - batch_norm__351, - batch_norm__352, - batch_norm__353, - batch_norm__354, - batch_norm__355, - batch_norm__356, - batch_norm__357, - batch_norm__358, - batch_norm__359, - batch_norm__36, - batch_norm__360, - batch_norm__361, - batch_norm__362, - batch_norm__363, - batch_norm__364, - batch_norm__365, - batch_norm__366, - batch_norm__367, - batch_norm__368, - batch_norm__369, - batch_norm__37, - batch_norm__370, - batch_norm__371, - batch_norm__372, - batch_norm__373, - batch_norm__374, - batch_norm__375, - batch_norm__376, - batch_norm__377, - batch_norm__378, - batch_norm__379, - batch_norm__38, - batch_norm__380, - batch_norm__381, - batch_norm__382, - batch_norm__383, - batch_norm__384, - batch_norm__385, - batch_norm__386, - batch_norm__387, - batch_norm__388, - batch_norm__389, - batch_norm__39, - batch_norm__390, - batch_norm__391, - batch_norm__392, - batch_norm__393, - batch_norm__394, - batch_norm__395, - batch_norm__396, - batch_norm__397, - batch_norm__398, - batch_norm__399, - batch_norm__4, - batch_norm__40, - batch_norm__400, - batch_norm__401, - batch_norm__402, - batch_norm__403, - batch_norm__404, - batch_norm__405, - batch_norm__406, - batch_norm__407, - batch_norm__408, - batch_norm__409, - batch_norm__41, - batch_norm__410, - batch_norm__411, - batch_norm__412, - batch_norm__413, - batch_norm__414, - batch_norm__415, - batch_norm__416, - batch_norm__417, - batch_norm__418, - batch_norm__419, - batch_norm__42, - batch_norm__420, - batch_norm__421, - batch_norm__422, - batch_norm__423, - batch_norm__424, - batch_norm__425, - batch_norm__426, - batch_norm__427, - batch_norm__428, - batch_norm__429, - batch_norm__43, - batch_norm__430, - batch_norm__431, - batch_norm__44, - batch_norm__45, - batch_norm__46, - batch_norm__47, - batch_norm__48, - batch_norm__49, - batch_norm__5, - batch_norm__50, - batch_norm__51, - batch_norm__52, - batch_norm__53, - batch_norm__54, - batch_norm__55, - batch_norm__56, - batch_norm__57, - batch_norm__58, - batch_norm__59, - batch_norm__6, - batch_norm__60, - batch_norm__61, - batch_norm__62, - batch_norm__63, - batch_norm__64, - batch_norm__65, - batch_norm__66, - batch_norm__67, - batch_norm__68, - batch_norm__69, - batch_norm__7, - batch_norm__70, - batch_norm__71, - batch_norm__72, - batch_norm__73, - batch_norm__74, - batch_norm__75, - batch_norm__76, - batch_norm__77, - batch_norm__78, - batch_norm__79, - batch_norm__8, - batch_norm__80, - batch_norm__81, - batch_norm__82, - batch_norm__83, - batch_norm__84, - batch_norm__85, - batch_norm__86, - batch_norm__87, - batch_norm__88, - batch_norm__89, - batch_norm__9, - batch_norm__90, - batch_norm__91, - batch_norm__92, - batch_norm__93, - batch_norm__94, - batch_norm__95, - batch_norm__96, - batch_norm__97, - batch_norm__98, - batch_norm__99, - concat_0, - concat_1, - concat_10, - concat_11, - concat_12, - concat_13, - concat_2, - concat_3, - concat_4, - concat_5, - concat_6, - concat_7, - concat_8, - concat_9, - conv2d_0, - conv2d_1, - conv2d_10, - conv2d_11, - conv2d_12, - conv2d_13, - conv2d_14, - conv2d_15, - conv2d_16, - conv2d_17, - conv2d_18, - conv2d_19, - conv2d_2, - conv2d_20, - conv2d_21, - conv2d_22, - conv2d_23, - conv2d_24, - conv2d_25, - conv2d_26, - conv2d_27, - conv2d_28, - conv2d_29, - conv2d_3, - conv2d_30, - conv2d_31, - conv2d_32, - conv2d_33, - conv2d_34, - conv2d_35, - conv2d_36, - conv2d_37, - conv2d_38, - conv2d_39, - conv2d_4, - conv2d_40, - conv2d_41, - conv2d_42, - conv2d_43, - conv2d_44, - conv2d_45, - conv2d_46, - conv2d_47, - conv2d_48, - conv2d_49, - conv2d_5, - conv2d_50, - conv2d_51, - conv2d_52, - conv2d_53, - conv2d_54, - conv2d_55, - conv2d_56, - conv2d_57, - conv2d_58, - conv2d_59, - conv2d_6, - conv2d_60, - conv2d_61, - conv2d_62, - conv2d_63, - conv2d_64, - conv2d_65, - conv2d_66, - conv2d_67, - conv2d_68, - conv2d_69, - conv2d_7, - conv2d_70, - conv2d_71, - conv2d_72, - conv2d_73, - conv2d_74, - conv2d_75, - conv2d_8, - conv2d_9, - full_0, - full_int_array_0, - full_int_array_2, - full_int_array_3, - full_int_array_4, - hardsigmoid_0, - hardsigmoid_1, - hardsigmoid_2, - hardsigmoid_3, - mean_0, - mean_1, - mean_2, - mean_3, - multiply_0, - multiply_1, - multiply_2, - multiply_3, - nearest_interp_0, - nearest_interp_1, - pool2d_0, - pool2d_1, - pool2d_2, - reshape_0, - reshape_1, - reshape_2, - reshape_3, - swish_1, - swish_10, - swish_11, - swish_12, - swish_13, - swish_14, - swish_15, - swish_16, - swish_17, - swish_18, - swish_19, - swish_2, - swish_20, - swish_21, - swish_22, - swish_23, - swish_24, - swish_25, - swish_26, - swish_27, - swish_28, - swish_29, - swish_3, - swish_30, - swish_31, - swish_32, - swish_33, - swish_34, - swish_35, - swish_36, - swish_37, - swish_38, - swish_39, - swish_4, - swish_40, - swish_41, - swish_42, - swish_43, - swish_44, - swish_45, - swish_46, - swish_47, - swish_48, - swish_49, - swish_5, - swish_50, - swish_51, - swish_52, - swish_53, - swish_54, - swish_55, - swish_56, - swish_57, - swish_58, - swish_59, - swish_6, - swish_60, - swish_7, - swish_8, - swish_9, - ) + # pd_op.greater_than: (xb) <- (xf32, xf32) + greater_than_0 = paddle._C_ops.greater_than(max_0, full_11) + del divide_0, full_11, max_0, multiply_0, multiply_4, sum_1, unsqueeze_2 - return swish_0 + return greater_than_0 diff --git a/paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_5/weight_meta.py b/paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_5/weight_meta.py index df50680d5..8b1378917 100644 --- a/paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_5/weight_meta.py +++ b/paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_5/weight_meta.py @@ -1,3860 +1 @@ -class Program_weight_tensor_parameter_0: - name = "parameter_0" - shape = [384] - dtype = "float32" - min_val = float("-0.652218") - max_val = float("1.18857") - mean = float("0.0280274") - std = float("0.238618") - data = None - -class Program_weight_tensor_parameter_1: - name = "parameter_1" - shape = [384] - dtype = "float32" - min_val = float("0.840697") - max_val = float("1.38324") - mean = float("0.98305") - std = float("0.0683125") - data = None - - -class Program_weight_tensor_parameter_2: - name = "parameter_2" - shape = [384] - dtype = "float32" - min_val = float("0.0026422") - max_val = float("0.0571294") - mean = float("0.00840019") - std = float("0.00480438") - data = None - - -class Program_weight_tensor_parameter_3: - name = "parameter_3" - shape = [384] - dtype = "float32" - min_val = float("-0.153452") - max_val = float("0.0567554") - mean = float("-0.0268421") - std = float("0.029361") - data = None - - -class Program_weight_tensor_parameter_4: - name = "parameter_4" - shape = [384, 384, 1, 1] - dtype = "float32" - min_val = float("-0.0814399") - max_val = float("0.0603684") - mean = float("-0.00033216") - std = float("0.00506095") - data = None - - -class Program_weight_tensor_parameter_5: - name = "parameter_5" - shape = [192] - dtype = "float32" - min_val = float("-0.44561") - max_val = float("0.100505") - mean = float("-0.084569") - std = float("0.10453") - data = None - - -class Program_weight_tensor_parameter_6: - name = "parameter_6" - shape = [192] - dtype = "float32" - min_val = float("0.827482") - max_val = float("1.2095") - mean = float("0.926275") - std = float("0.0461945") - data = None - - -class Program_weight_tensor_parameter_7: - name = "parameter_7" - shape = [192] - dtype = "float32" - min_val = float("0.00190456") - max_val = float("0.0205137") - mean = float("0.00863049") - std = float("0.00430541") - data = None - - -class Program_weight_tensor_parameter_8: - name = "parameter_8" - shape = [192] - dtype = "float32" - min_val = float("-0.0318185") - max_val = float("0.0379224") - mean = float("-0.000110429") - std = float("0.0186888") - data = None - - -class Program_weight_tensor_parameter_9: - name = "parameter_9" - shape = [192, 192, 1, 1] - dtype = "float32" - min_val = float("-0.0393104") - max_val = float("0.0483612") - mean = float("-0.000255694") - std = float("0.00325694") - data = None - - -class Program_weight_tensor_parameter_10: - name = "parameter_10" - shape = [192] - dtype = "float32" - min_val = float("-0.44561") - max_val = float("0.100505") - mean = float("-0.084569") - std = float("0.10453") - data = None - - -class Program_weight_tensor_parameter_11: - name = "parameter_11" - shape = [192] - dtype = "float32" - min_val = float("0.860758") - max_val = float("1.4208") - mean = float("1.11187") - std = float("0.0818408") - data = None - - -class Program_weight_tensor_parameter_12: - name = "parameter_12" - shape = [192] - dtype = "float32" - min_val = float("0.00352246") - max_val = float("0.035092") - mean = float("0.0105274") - std = float("0.00437158") - data = None - - -class Program_weight_tensor_parameter_13: - name = "parameter_13" - shape = [192] - dtype = "float32" - min_val = float("-0.0797058") - max_val = float("0.0680474") - mean = float("-0.0157311") - std = float("0.0255872") - data = None - - -class Program_weight_tensor_parameter_14: - name = "parameter_14" - shape = [192, 192, 3, 3] - dtype = "float32" - min_val = float("-0.0573713") - max_val = float("0.0648518") - mean = float("-0.000118352") - std = float("0.00301846") - data = None - - -class Program_weight_tensor_parameter_15: - name = "parameter_15" - shape = [192] - dtype = "float32" - min_val = float("-0.519542") - max_val = float("0.119264") - mean = float("-0.17366") - std = float("0.128125") - data = None - - -class Program_weight_tensor_parameter_16: - name = "parameter_16" - shape = [192] - dtype = "float32" - min_val = float("0.843388") - max_val = float("1.65187") - mean = float("1.06405") - std = float("0.100937") - data = None - - -class Program_weight_tensor_parameter_17: - name = "parameter_17" - shape = [192] - dtype = "float32" - min_val = float("0.0093173") - max_val = float("0.0586855") - mean = float("0.0231857") - std = float("0.00925497") - data = None - - -class Program_weight_tensor_parameter_18: - name = "parameter_18" - shape = [192] - dtype = "float32" - min_val = float("-0.168595") - max_val = float("0.0949792") - mean = float("-0.0552586") - std = float("0.0424072") - data = None - - -class Program_weight_tensor_parameter_19: - name = "parameter_19" - shape = [192, 192, 3, 3] - dtype = "float32" - min_val = float("-0.0550749") - max_val = float("0.0694004") - mean = float("-0.000238535") - std = float("0.00329047") - data = None - - -class Program_weight_tensor_parameter_20: - name = "parameter_20" - shape = [192] - dtype = "float32" - min_val = float("-0.455754") - max_val = float("0.186932") - mean = float("-0.0819852") - std = float("0.102015") - data = None - - -class Program_weight_tensor_parameter_21: - name = "parameter_21" - shape = [192] - dtype = "float32" - min_val = float("0.841905") - max_val = float("1.25543") - mean = float("1.02686") - std = float("0.0670556") - data = None - - -class Program_weight_tensor_parameter_22: - name = "parameter_22" - shape = [192] - dtype = "float32" - min_val = float("0.00382876") - max_val = float("0.0168788") - mean = float("0.00831223") - std = float("0.00247857") - data = None - - -class Program_weight_tensor_parameter_23: - name = "parameter_23" - shape = [192] - dtype = "float32" - min_val = float("-0.0941842") - max_val = float("0.0492551") - mean = float("-0.0169118") - std = float("0.0260979") - data = None - - -class Program_weight_tensor_parameter_24: - name = "parameter_24" - shape = [192, 576, 1, 1] - dtype = "float32" - min_val = float("-0.0820429") - max_val = float("0.0807641") - mean = float("-0.00017519") - std = float("0.00465979") - data = None - - -class Program_weight_tensor_parameter_25: - name = "parameter_25" - shape = [192] - dtype = "float32" - min_val = float("-0.217783") - max_val = float("0.0355625") - mean = float("-0.0691304") - std = float("0.0386326") - data = None - - -class Program_weight_tensor_parameter_26: - name = "parameter_26" - shape = [192] - dtype = "float32" - min_val = float("0.843901") - max_val = float("1.15229") - mean = float("1.01548") - std = float("0.0503056") - data = None - - -class Program_weight_tensor_parameter_27: - name = "parameter_27" - shape = [192] - dtype = "float32" - min_val = float("0.00247351") - max_val = float("0.0164084") - mean = float("0.00608305") - std = float("0.00215721") - data = None - - -class Program_weight_tensor_parameter_28: - name = "parameter_28" - shape = [192] - dtype = "float32" - min_val = float("-0.076298") - max_val = float("0.0940074") - mean = float("-0.0220292") - std = float("0.025105") - data = None - - -class Program_weight_tensor_parameter_29: - name = "parameter_29" - shape = [192, 576, 1, 1] - dtype = "float32" - min_val = float("-0.0441749") - max_val = float("0.05036") - mean = float("-0.000272052") - std = float("0.0041864") - data = None - - -class Program_weight_tensor_parameter_30: - name = "parameter_30" - shape = [192] - dtype = "float32" - min_val = float("-0.296363") - max_val = float("-0.00731421") - mean = float("-0.0909181") - std = float("0.0603085") - data = None - - -class Program_weight_tensor_parameter_31: - name = "parameter_31" - shape = [192] - dtype = "float32" - min_val = float("0.781699") - max_val = float("1.34829") - mean = float("1.05295") - std = float("0.0659017") - data = None - - -class Program_weight_tensor_parameter_32: - name = "parameter_32" - shape = [192] - dtype = "float32" - min_val = float("0.00518533") - max_val = float("0.0436202") - mean = float("0.0137517") - std = float("0.00626765") - data = None - - -class Program_weight_tensor_parameter_33: - name = "parameter_33" - shape = [192] - dtype = "float32" - min_val = float("-0.24857") - max_val = float("0.288341") - mean = float("-0.0322151") - std = float("0.0764451") - data = None - - -class Program_weight_tensor_parameter_34: - name = "parameter_34" - shape = [192, 192, 3, 3] - dtype = "float32" - min_val = float("-0.0319297") - max_val = float("0.0371234") - mean = float("-6.61646e-05") - std = float("0.00247009") - data = None - - -class Program_weight_tensor_parameter_35: - name = "parameter_35" - shape = [192] - dtype = "float32" - min_val = float("-0.530523") - max_val = float("1.03181") - mean = float("0.148142") - std = float("0.25938") - data = None - - -class Program_weight_tensor_parameter_36: - name = "parameter_36" - shape = [192] - dtype = "float32" - min_val = float("0.732244") - max_val = float("1.56838") - mean = float("1.01394") - std = float("0.106713") - data = None - - -class Program_weight_tensor_parameter_37: - name = "parameter_37" - shape = [192] - dtype = "float32" - min_val = float("0.00474578") - max_val = float("0.0449361") - mean = float("0.0159591") - std = float("0.00817663") - data = None - - -class Program_weight_tensor_parameter_38: - name = "parameter_38" - shape = [192] - dtype = "float32" - min_val = float("-0.23459") - max_val = float("0.15968") - mean = float("-0.0416686") - std = float("0.0471789") - data = None - - -class Program_weight_tensor_parameter_39: - name = "parameter_39" - shape = [192, 192, 1, 1] - dtype = "float32" - min_val = float("-0.130799") - max_val = float("0.0914109") - mean = float("-0.000795506") - std = float("0.00978873") - data = None - - -class Program_weight_tensor_parameter_40: - name = "parameter_40" - shape = [96] - dtype = "float32" - min_val = float("-0.290307") - max_val = float("0.172582") - mean = float("-0.0708609") - std = float("0.105586") - data = None - - -class Program_weight_tensor_parameter_41: - name = "parameter_41" - shape = [96] - dtype = "float32" - min_val = float("0.730168") - max_val = float("1.20841") - mean = float("0.877696") - std = float("0.0778222") - data = None - - -class Program_weight_tensor_parameter_42: - name = "parameter_42" - shape = [96] - dtype = "float32" - min_val = float("0.00209252") - max_val = float("0.0172969") - mean = float("0.00895461") - std = float("0.00362651") - data = None - - -class Program_weight_tensor_parameter_43: - name = "parameter_43" - shape = [96] - dtype = "float32" - min_val = float("-0.030881") - max_val = float("0.0264932") - mean = float("-0.00664423") - std = float("0.0163084") - data = None - - -class Program_weight_tensor_parameter_44: - name = "parameter_44" - shape = [96, 96, 1, 1] - dtype = "float32" - min_val = float("-0.0481719") - max_val = float("0.0491173") - mean = float("-0.00114232") - std = float("0.0059524") - data = None - - -class Program_weight_tensor_parameter_45: - name = "parameter_45" - shape = [96] - dtype = "float32" - min_val = float("-0.290307") - max_val = float("0.172582") - mean = float("-0.0708609") - std = float("0.105586") - data = None - - -class Program_weight_tensor_parameter_46: - name = "parameter_46" - shape = [96] - dtype = "float32" - min_val = float("0.970765") - max_val = float("1.31932") - mean = float("1.13205") - std = float("0.0751806") - data = None - - -class Program_weight_tensor_parameter_47: - name = "parameter_47" - shape = [96] - dtype = "float32" - min_val = float("0.00509897") - max_val = float("0.0269461") - mean = float("0.0144103") - std = float("0.00491216") - data = None - - -class Program_weight_tensor_parameter_48: - name = "parameter_48" - shape = [96] - dtype = "float32" - min_val = float("-0.0593684") - max_val = float("0.0709636") - mean = float("-0.00964274") - std = float("0.0202695") - data = None - - -class Program_weight_tensor_parameter_49: - name = "parameter_49" - shape = [96, 96, 3, 3] - dtype = "float32" - min_val = float("-0.0744412") - max_val = float("0.0808028") - mean = float("-0.000134804") - std = float("0.00586885") - data = None - - -class Program_weight_tensor_parameter_50: - name = "parameter_50" - shape = [96] - dtype = "float32" - min_val = float("-0.672978") - max_val = float("0.110937") - mean = float("-0.259195") - std = float("0.150512") - data = None - - -class Program_weight_tensor_parameter_51: - name = "parameter_51" - shape = [96] - dtype = "float32" - min_val = float("0.800552") - max_val = float("1.41215") - mean = float("1.04504") - std = float("0.11692") - data = None - - -class Program_weight_tensor_parameter_52: - name = "parameter_52" - shape = [96] - dtype = "float32" - min_val = float("0.0141801") - max_val = float("0.0721458") - mean = float("0.0300632") - std = float("0.0107204") - data = None - - -class Program_weight_tensor_parameter_53: - name = "parameter_53" - shape = [96] - dtype = "float32" - min_val = float("-0.0891538") - max_val = float("0.0552256") - mean = float("-0.0348822") - std = float("0.0246653") - data = None - - -class Program_weight_tensor_parameter_54: - name = "parameter_54" - shape = [96, 96, 3, 3] - dtype = "float32" - min_val = float("-0.0755165") - max_val = float("0.0788485") - mean = float("-0.00042175") - std = float("0.00651052") - data = None - - -class Program_weight_tensor_parameter_55: - name = "parameter_55" - shape = [96] - dtype = "float32" - min_val = float("-0.644167") - max_val = float("0.152209") - mean = float("-0.155579") - std = float("0.115703") - data = None - - -class Program_weight_tensor_parameter_56: - name = "parameter_56" - shape = [96] - dtype = "float32" - min_val = float("0.849539") - max_val = float("1.26621") - mean = float("1.03329") - std = float("0.0722571") - data = None - - -class Program_weight_tensor_parameter_57: - name = "parameter_57" - shape = [96] - dtype = "float32" - min_val = float("0.00630263") - max_val = float("0.0491987") - mean = float("0.01393") - std = float("0.00582943") - data = None - - -class Program_weight_tensor_parameter_58: - name = "parameter_58" - shape = [96] - dtype = "float32" - min_val = float("-0.115691") - max_val = float("0.0294622") - mean = float("-0.0334293") - std = float("0.0282444") - data = None - - -class Program_weight_tensor_parameter_59: - name = "parameter_59" - shape = [96, 288, 1, 1] - dtype = "float32" - min_val = float("-0.0675787") - max_val = float("0.0715923") - mean = float("-0.000602525") - std = float("0.00883627") - data = None - - -class Program_weight_tensor_parameter_60: - name = "parameter_60" - shape = [96] - dtype = "float32" - min_val = float("-0.19838") - max_val = float("0.0828483") - mean = float("-0.0298722") - std = float("0.0460234") - data = None - - -class Program_weight_tensor_parameter_61: - name = "parameter_61" - shape = [96] - dtype = "float32" - min_val = float("0.684756") - max_val = float("1.33599") - mean = float("0.954296") - std = float("0.0884899") - data = None - - -class Program_weight_tensor_parameter_62: - name = "parameter_62" - shape = [96] - dtype = "float32" - min_val = float("0.00456548") - max_val = float("0.0382428") - mean = float("0.00924608") - std = float("0.00437704") - data = None - - -class Program_weight_tensor_parameter_63: - name = "parameter_63" - shape = [96] - dtype = "float32" - min_val = float("-0.0819243") - max_val = float("0.0486769") - mean = float("-0.0149872") - std = float("0.0303643") - data = None - - -class Program_weight_tensor_parameter_64: - name = "parameter_64" - shape = [96, 288, 1, 1] - dtype = "float32" - min_val = float("-0.0851124") - max_val = float("0.0753681") - mean = float("-0.000267914") - std = float("0.00746335") - data = None - - -class Program_weight_tensor_parameter_65: - name = "parameter_65" - shape = [96] - dtype = "float32" - min_val = float("-0.335793") - max_val = float("0.0181512") - mean = float("-0.108645") - std = float("0.0840511") - data = None - - -class Program_weight_tensor_parameter_66: - name = "parameter_66" - shape = [96] - dtype = "float32" - min_val = float("0.72996") - max_val = float("1.20589") - mean = float("1.05546") - std = float("0.0750732") - data = None - - -class Program_weight_tensor_parameter_67: - name = "parameter_67" - shape = [96] - dtype = "float32" - min_val = float("0.00560508") - max_val = float("0.0383423") - mean = float("0.0151426") - std = float("0.00688387") - data = None - - -class Program_weight_tensor_parameter_68: - name = "parameter_68" - shape = [96] - dtype = "float32" - min_val = float("-0.364406") - max_val = float("0.413851") - mean = float("-0.0185548") - std = float("0.134763") - data = None - - -class Program_weight_tensor_parameter_69: - name = "parameter_69" - shape = [96, 96, 3, 3] - dtype = "float32" - min_val = float("-0.0556938") - max_val = float("0.0565333") - mean = float("-3.0497e-05") - std = float("0.00548975") - data = None - - -class Program_weight_tensor_parameter_70: - name = "parameter_70" - shape = [96] - dtype = "float32" - min_val = float("-1.07759") - max_val = float("2.35772") - mean = float("0.310808") - std = float("0.586812") - data = None - - -class Program_weight_tensor_parameter_71: - name = "parameter_71" - shape = [96] - dtype = "float32" - min_val = float("0.468543") - max_val = float("1.40561") - mean = float("0.882514") - std = float("0.167348") - data = None - - -class Program_weight_tensor_parameter_72: - name = "parameter_72" - shape = [96] - dtype = "float32" - min_val = float("0.00504964") - max_val = float("0.106532") - mean = float("0.0264739") - std = float("0.0164013") - data = None - - -class Program_weight_tensor_parameter_73: - name = "parameter_73" - shape = [96] - dtype = "float32" - min_val = float("-0.217561") - max_val = float("0.181267") - mean = float("-0.0162134") - std = float("0.0711045") - data = None - - -class Program_weight_tensor_parameter_74: - name = "parameter_74" - shape = [96, 96, 1, 1] - dtype = "float32" - min_val = float("-0.149095") - max_val = float("0.112249") - mean = float("-0.00101846") - std = float("0.0196923") - data = None - - -class Program_weight_tensor_parameter_75: - name = "parameter_75" - shape = [48] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_76: - name = "parameter_76" - shape = [48] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_77: - name = "parameter_77" - shape = [48] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_78: - name = "parameter_78" - shape = [48] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_79: - name = "parameter_79" - shape = [48, 48, 1, 1] - dtype = "float32" - min_val = float("-0.064872") - max_val = float("0.067209") - mean = float("-0.00182682") - std = float("0.0118552") - data = None - - -class Program_weight_tensor_parameter_80: - name = "parameter_80" - shape = [48] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_81: - name = "parameter_81" - shape = [48] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_82: - name = "parameter_82" - shape = [48] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_83: - name = "parameter_83" - shape = [48] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_84: - name = "parameter_84" - shape = [48, 48, 3, 3] - dtype = "float32" - min_val = float("-0.126562") - max_val = float("0.145589") - mean = float("-0.00030101") - std = float("0.0128623") - data = None - - -class Program_weight_tensor_parameter_85: - name = "parameter_85" - shape = [48] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_86: - name = "parameter_86" - shape = [48] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_87: - name = "parameter_87" - shape = [48] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_88: - name = "parameter_88" - shape = [48] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_89: - name = "parameter_89" - shape = [48, 48, 3, 3] - dtype = "float32" - min_val = float("-0.105155") - max_val = float("0.121463") - mean = float("-0.000928041") - std = float("0.0141327") - data = None - - -class Program_weight_tensor_parameter_90: - name = "parameter_90" - shape = [48] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_91: - name = "parameter_91" - shape = [48] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_92: - name = "parameter_92" - shape = [48] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_93: - name = "parameter_93" - shape = [48] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_94: - name = "parameter_94" - shape = [48, 224, 1, 1] - dtype = "float32" - min_val = float("-0.18791") - max_val = float("0.126171") - mean = float("-0.00105876") - std = float("0.0178172") - data = None - - -class Program_weight_tensor_parameter_95: - name = "parameter_95" - shape = [48] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_96: - name = "parameter_96" - shape = [48] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_97: - name = "parameter_97" - shape = [48] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_98: - name = "parameter_98" - shape = [48] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_99: - name = "parameter_99" - shape = [48, 224, 1, 1] - dtype = "float32" - min_val = float("-0.102039") - max_val = float("0.121479") - mean = float("2.99226e-05") - std = float("0.0123136") - data = None - - -class Program_weight_tensor_parameter_100: - name = "parameter_100" - shape = [96] - dtype = "float32" - min_val = float("-0.355935") - max_val = float("0.392551") - mean = float("-0.00746683") - std = float("0.135778") - data = None - - -class Program_weight_tensor_parameter_101: - name = "parameter_101" - shape = [96] - dtype = "float32" - min_val = float("0.58214") - max_val = float("1.61741") - mean = float("0.798871") - std = float("0.141455") - data = None - - -class Program_weight_tensor_parameter_102: - name = "parameter_102" - shape = [96] - dtype = "float32" - min_val = float("0.00708678") - max_val = float("0.0623589") - mean = float("0.0174758") - std = float("0.00821209") - data = None - - -class Program_weight_tensor_parameter_103: - name = "parameter_103" - shape = [96] - dtype = "float32" - min_val = float("-0.138992") - max_val = float("0.0786226") - mean = float("-0.0313739") - std = float("0.0364738") - data = None - - -class Program_weight_tensor_parameter_104: - name = "parameter_104" - shape = [96, 192, 1, 1] - dtype = "float32" - min_val = float("-0.0964661") - max_val = float("0.0936981") - mean = float("-0.000909439") - std = float("0.0125162") - data = None - - -class Program_weight_tensor_parameter_105: - name = "parameter_105" - shape = [192] - dtype = "float32" - min_val = float("-0.337392") - max_val = float("0.172941") - mean = float("-0.0803987") - std = float("0.0895922") - data = None - - -class Program_weight_tensor_parameter_106: - name = "parameter_106" - shape = [192] - dtype = "float32" - min_val = float("0.695083") - max_val = float("1.47609") - mean = float("0.990456") - std = float("0.0995214") - data = None - - -class Program_weight_tensor_parameter_107: - name = "parameter_107" - shape = [192] - dtype = "float32" - min_val = float("0.00754238") - max_val = float("0.0988877") - mean = float("0.0184313") - std = float("0.00883254") - data = None - - -class Program_weight_tensor_parameter_108: - name = "parameter_108" - shape = [192] - dtype = "float32" - min_val = float("-0.204684") - max_val = float("0.16103") - mean = float("-0.0503988") - std = float("0.049554") - data = None - - -class Program_weight_tensor_parameter_109: - name = "parameter_109" - shape = [192, 192, 1, 1] - dtype = "float32" - min_val = float("-0.099788") - max_val = float("0.114986") - mean = float("-0.00122472") - std = float("0.0120725") - data = None - - -class Program_weight_tensor_parameter_110: - name = "parameter_110" - shape = [96] - dtype = "float32" - min_val = float("-0.307239") - max_val = float("0.101878") - mean = float("-0.0816011") - std = float("0.0993456") - data = None - - -class Program_weight_tensor_parameter_111: - name = "parameter_111" - shape = [96] - dtype = "float32" - min_val = float("0.551598") - max_val = float("0.936081") - mean = float("0.809201") - std = float("0.0653613") - data = None - - -class Program_weight_tensor_parameter_112: - name = "parameter_112" - shape = [96] - dtype = "float32" - min_val = float("0.0035") - max_val = float("0.0258289") - mean = float("0.0122223") - std = float("0.00384374") - data = None - - -class Program_weight_tensor_parameter_113: - name = "parameter_113" - shape = [96] - dtype = "float32" - min_val = float("-0.0463445") - max_val = float("0.0326283") - mean = float("-0.0141616") - std = float("0.0176504") - data = None - - -class Program_weight_tensor_parameter_114: - name = "parameter_114" - shape = [96, 96, 1, 1] - dtype = "float32" - min_val = float("-0.0439526") - max_val = float("0.0551086") - mean = float("-0.00157619") - std = float("0.00827761") - data = None - - -class Program_weight_tensor_parameter_115: - name = "parameter_115" - shape = [96] - dtype = "float32" - min_val = float("-0.307239") - max_val = float("0.101878") - mean = float("-0.0816011") - std = float("0.0993456") - data = None - - -class Program_weight_tensor_parameter_116: - name = "parameter_116" - shape = [96] - dtype = "float32" - min_val = float("0.842432") - max_val = float("1.28751") - mean = float("1.03469") - std = float("0.0943599") - data = None - - -class Program_weight_tensor_parameter_117: - name = "parameter_117" - shape = [96] - dtype = "float32" - min_val = float("0.013302") - max_val = float("0.0739954") - mean = float("0.0273068") - std = float("0.00856333") - data = None - - -class Program_weight_tensor_parameter_118: - name = "parameter_118" - shape = [96] - dtype = "float32" - min_val = float("-0.0788659") - max_val = float("0.0467382") - mean = float("-0.0227617") - std = float("0.0268656") - data = None - - -class Program_weight_tensor_parameter_119: - name = "parameter_119" - shape = [96, 96, 3, 3] - dtype = "float32" - min_val = float("-0.0842602") - max_val = float("0.155772") - mean = float("-0.00023666") - std = float("0.00725825") - data = None - - -class Program_weight_tensor_parameter_120: - name = "parameter_120" - shape = [96] - dtype = "float32" - min_val = float("-0.731847") - max_val = float("0.315882") - mean = float("-0.275694") - std = float("0.175237") - data = None - - -class Program_weight_tensor_parameter_121: - name = "parameter_121" - shape = [96] - dtype = "float32" - min_val = float("0.765079") - max_val = float("1.30982") - mean = float("1.04342") - std = float("0.115486") - data = None - - -class Program_weight_tensor_parameter_122: - name = "parameter_122" - shape = [96] - dtype = "float32" - min_val = float("0.0173859") - max_val = float("0.0675619") - mean = float("0.0355326") - std = float("0.00949136") - data = None - - -class Program_weight_tensor_parameter_123: - name = "parameter_123" - shape = [96] - dtype = "float32" - min_val = float("-0.119758") - max_val = float("0.0576312") - mean = float("-0.0514595") - std = float("0.0439535") - data = None - - -class Program_weight_tensor_parameter_124: - name = "parameter_124" - shape = [96, 96, 3, 3] - dtype = "float32" - min_val = float("-0.141323") - max_val = float("0.125212") - mean = float("-0.000531571") - std = float("0.00857427") - data = None - - -class Program_weight_tensor_parameter_125: - name = "parameter_125" - shape = [96] - dtype = "float32" - min_val = float("-0.649336") - max_val = float("0.386205") - mean = float("-0.253708") - std = float("0.210117") - data = None - - -class Program_weight_tensor_parameter_126: - name = "parameter_126" - shape = [96] - dtype = "float32" - min_val = float("0.743859") - max_val = float("1.37989") - mean = float("1.02545") - std = float("0.12205") - data = None - - -class Program_weight_tensor_parameter_127: - name = "parameter_127" - shape = [96] - dtype = "float32" - min_val = float("0.00720624") - max_val = float("0.0314703") - mean = float("0.0139725") - std = float("0.00426494") - data = None - - -class Program_weight_tensor_parameter_128: - name = "parameter_128" - shape = [96] - dtype = "float32" - min_val = float("-0.313971") - max_val = float("0.275864") - mean = float("0.0100895") - std = float("0.0634222") - data = None - - -class Program_weight_tensor_parameter_129: - name = "parameter_129" - shape = [96, 448, 1, 1] - dtype = "float32" - min_val = float("-0.167863") - max_val = float("0.111638") - mean = float("-0.000518773") - std = float("0.0111108") - data = None - - -class Program_weight_tensor_parameter_130: - name = "parameter_130" - shape = [96] - dtype = "float32" - min_val = float("-0.23906") - max_val = float("0.172271") - mean = float("-0.0410503") - std = float("0.0886321") - data = None - - -class Program_weight_tensor_parameter_131: - name = "parameter_131" - shape = [96] - dtype = "float32" - min_val = float("0.917932") - max_val = float("1.41199") - mean = float("1.07275") - std = float("0.0920705") - data = None - - -class Program_weight_tensor_parameter_132: - name = "parameter_132" - shape = [96] - dtype = "float32" - min_val = float("0.00501564") - max_val = float("0.0446779") - mean = float("0.0113275") - std = float("0.00531961") - data = None - - -class Program_weight_tensor_parameter_133: - name = "parameter_133" - shape = [96] - dtype = "float32" - min_val = float("-0.066575") - max_val = float("0.0649515") - mean = float("0.00698475") - std = float("0.0279665") - data = None - - -class Program_weight_tensor_parameter_134: - name = "parameter_134" - shape = [96, 448, 1, 1] - dtype = "float32" - min_val = float("-0.112935") - max_val = float("0.117872") - mean = float("-0.000426335") - std = float("0.00968317") - data = None - - -class Program_weight_tensor_parameter_135: - name = "parameter_135" - shape = [192] - dtype = "float32" - min_val = float("-0.540293") - max_val = float("-0.102122") - mean = float("-0.294636") - std = float("0.0708506") - data = None - - -class Program_weight_tensor_parameter_136: - name = "parameter_136" - shape = [192] - dtype = "float32" - min_val = float("0.648817") - max_val = float("1.08017") - mean = float("0.851902") - std = float("0.0725407") - data = None - - -class Program_weight_tensor_parameter_137: - name = "parameter_137" - shape = [192] - dtype = "float32" - min_val = float("0.00844823") - max_val = float("0.0608477") - mean = float("0.0196898") - std = float("0.00777921") - data = None - - -class Program_weight_tensor_parameter_138: - name = "parameter_138" - shape = [192] - dtype = "float32" - min_val = float("-0.103642") - max_val = float("0.0398287") - mean = float("-0.0337268") - std = float("0.0283827") - data = None - - -class Program_weight_tensor_parameter_139: - name = "parameter_139" - shape = [192, 384, 1, 1] - dtype = "float32" - min_val = float("-0.0581647") - max_val = float("0.0551684") - mean = float("-0.000715396") - std = float("0.00866401") - data = None - - -class Program_weight_tensor_parameter_140: - name = "parameter_140" - shape = [384] - dtype = "float32" - min_val = float("-0.522165") - max_val = float("0.213846") - mean = float("-0.168509") - std = float("0.0774913") - data = None - - -class Program_weight_tensor_parameter_141: - name = "parameter_141" - shape = [384] - dtype = "float32" - min_val = float("0.849121") - max_val = float("1.39276") - mean = float("1.06266") - std = float("0.0773261") - data = None - - -class Program_weight_tensor_parameter_142: - name = "parameter_142" - shape = [384] - dtype = "float32" - min_val = float("0.00619419") - max_val = float("0.0326996") - mean = float("0.0131745") - std = float("0.00396857") - data = None - - -class Program_weight_tensor_parameter_143: - name = "parameter_143" - shape = [384] - dtype = "float32" - min_val = float("-0.120299") - max_val = float("0.0721486") - mean = float("-0.0363523") - std = float("0.0318809") - data = None - - -class Program_weight_tensor_parameter_144: - name = "parameter_144" - shape = [384, 384, 1, 1] - dtype = "float32" - min_val = float("-0.0950377") - max_val = float("0.108554") - mean = float("-0.000578436") - std = float("0.00775758") - data = None - - -class Program_weight_tensor_parameter_145: - name = "parameter_145" - shape = [192] - dtype = "float32" - min_val = float("-0.384308") - max_val = float("0.227819") - mean = float("-0.118179") - std = float("0.102012") - data = None - - -class Program_weight_tensor_parameter_146: - name = "parameter_146" - shape = [192] - dtype = "float32" - min_val = float("0.868523") - max_val = float("1.51316") - mean = float("1.12307") - std = float("0.119072") - data = None - - -class Program_weight_tensor_parameter_147: - name = "parameter_147" - shape = [192] - dtype = "float32" - min_val = float("0.0671554") - max_val = float("0.590166") - mean = float("0.189132") - std = float("0.0743371") - data = None - - -class Program_weight_tensor_parameter_148: - name = "parameter_148" - shape = [192] - dtype = "float32" - min_val = float("-1.66989") - max_val = float("0.864913") - mean = float("-0.128574") - std = float("0.49302") - data = None - - -class Program_weight_tensor_parameter_149: - name = "parameter_149" - shape = [192, 768, 1, 1] - dtype = "float32" - min_val = float("-0.113072") - max_val = float("0.078636") - mean = float("-9.22477e-05") - std = float("0.00673866") - data = None - - -class Program_weight_tensor_parameter_150: - name = "parameter_150" - shape = [192] - dtype = "float32" - min_val = float("-0.24328") - max_val = float("0.168953") - mean = float("-0.0173023") - std = float("0.0539756") - data = None - - -class Program_weight_tensor_parameter_151: - name = "parameter_151" - shape = [192] - dtype = "float32" - min_val = float("0.617702") - max_val = float("1.01648") - mean = float("0.837238") - std = float("0.0631802") - data = None - - -class Program_weight_tensor_parameter_152: - name = "parameter_152" - shape = [192] - dtype = "float32" - min_val = float("0.00614033") - max_val = float("0.0266385") - mean = float("0.0103423") - std = float("0.0026319") - data = None - - -class Program_weight_tensor_parameter_153: - name = "parameter_153" - shape = [192] - dtype = "float32" - min_val = float("-0.127304") - max_val = float("0.0867262") - mean = float("-0.0538262") - std = float("0.0399598") - data = None - - -class Program_weight_tensor_parameter_154: - name = "parameter_154" - shape = [192, 192, 1, 1] - dtype = "float32" - min_val = float("-0.0383872") - max_val = float("0.0621299") - mean = float("-0.00140552") - std = float("0.00644221") - data = None - - -class Program_weight_tensor_parameter_155: - name = "parameter_155" - shape = [192] - dtype = "float32" - min_val = float("-0.24328") - max_val = float("0.168953") - mean = float("-0.0173023") - std = float("0.0539756") - data = None - - -class Program_weight_tensor_parameter_156: - name = "parameter_156" - shape = [192] - dtype = "float32" - min_val = float("0.874918") - max_val = float("1.46078") - mean = float("1.10611") - std = float("0.129545") - data = None - - -class Program_weight_tensor_parameter_157: - name = "parameter_157" - shape = [192] - dtype = "float32" - min_val = float("0.0245048") - max_val = float("0.0980499") - mean = float("0.045066") - std = float("0.0114572") - data = None - - -class Program_weight_tensor_parameter_158: - name = "parameter_158" - shape = [192] - dtype = "float32" - min_val = float("-0.272811") - max_val = float("0.0283262") - mean = float("-0.122117") - std = float("0.0543901") - data = None - - -class Program_weight_tensor_parameter_159: - name = "parameter_159" - shape = [192, 192, 3, 3] - dtype = "float32" - min_val = float("-0.037555") - max_val = float("0.0532694") - mean = float("-0.000403704") - std = float("0.00395116") - data = None - - -class Program_weight_tensor_parameter_160: - name = "parameter_160" - shape = [192] - dtype = "float32" - min_val = float("-0.311171") - max_val = float("0.0670066") - mean = float("-0.115181") - std = float("0.0802375") - data = None - - -class Program_weight_tensor_parameter_161: - name = "parameter_161" - shape = [192] - dtype = "float32" - min_val = float("0.910219") - max_val = float("1.44564") - mean = float("1.10838") - std = float("0.101829") - data = None - - -class Program_weight_tensor_parameter_162: - name = "parameter_162" - shape = [192] - dtype = "float32" - min_val = float("0.0298363") - max_val = float("0.10738") - mean = float("0.0514797") - std = float("0.0140253") - data = None - - -class Program_weight_tensor_parameter_163: - name = "parameter_163" - shape = [192] - dtype = "float32" - min_val = float("-0.487163") - max_val = float("0.229195") - mean = float("-0.132677") - std = float("0.0943938") - data = None - - -class Program_weight_tensor_parameter_164: - name = "parameter_164" - shape = [192, 192, 3, 3] - dtype = "float32" - min_val = float("-0.0485576") - max_val = float("0.040365") - mean = float("-0.000462303") - std = float("0.00440033") - data = None - - -class Program_weight_tensor_parameter_165: - name = "parameter_165" - shape = [192] - dtype = "float32" - min_val = float("-0.444127") - max_val = float("0.411817") - mean = float("-0.1378") - std = float("0.130204") - data = None - - -class Program_weight_tensor_parameter_166: - name = "parameter_166" - shape = [192] - dtype = "float32" - min_val = float("0.955474") - max_val = float("1.3718") - mean = float("1.11018") - std = float("0.072198") - data = None - - -class Program_weight_tensor_parameter_167: - name = "parameter_167" - shape = [192] - dtype = "float32" - min_val = float("0.0481328") - max_val = float("0.201416") - mean = float("0.0730914") - std = float("0.0211211") - data = None - - -class Program_weight_tensor_parameter_168: - name = "parameter_168" - shape = [192] - dtype = "float32" - min_val = float("-0.272425") - max_val = float("0.493843") - mean = float("-0.124677") - std = float("0.0789093") - data = None - - -class Program_weight_tensor_parameter_169: - name = "parameter_169" - shape = [192, 512, 1, 1] - dtype = "float32" - min_val = float("-0.0520012") - max_val = float("0.0916206") - mean = float("-0.00073398") - std = float("0.0076239") - data = None - - -class Program_weight_tensor_parameter_170: - name = "parameter_170" - shape = [192] - dtype = "float32" - min_val = float("-0.16409") - max_val = float("0.00104506") - mean = float("-0.0652767") - std = float("0.0261643") - data = None - - -class Program_weight_tensor_parameter_171: - name = "parameter_171" - shape = [192] - dtype = "float32" - min_val = float("0.819388") - max_val = float("1.06661") - mean = float("0.968901") - std = float("0.0460972") - data = None - - -class Program_weight_tensor_parameter_172: - name = "parameter_172" - shape = [192] - dtype = "float32" - min_val = float("0.0308169") - max_val = float("0.0790712") - mean = float("0.0462185") - std = float("0.00870431") - data = None - - -class Program_weight_tensor_parameter_173: - name = "parameter_173" - shape = [192] - dtype = "float32" - min_val = float("-0.182206") - max_val = float("0.0940335") - mean = float("-0.089627") - std = float("0.0463932") - data = None - - -class Program_weight_tensor_parameter_174: - name = "parameter_174" - shape = [192, 512, 1, 1] - dtype = "float32" - min_val = float("-0.0262639") - max_val = float("0.0504804") - mean = float("-0.000603328") - std = float("0.00628844") - data = None - - -class Program_weight_tensor_parameter_175: - name = "parameter_175" - shape = [512] - dtype = "float32" - min_val = float("-4.82816") - max_val = float("-0.111144") - mean = float("-2.29505") - std = float("0.77518") - data = None - - -class Program_weight_tensor_parameter_176: - name = "parameter_176" - shape = [512] - dtype = "float32" - min_val = float("2.1017") - max_val = float("5.21657") - mean = float("3.70059") - std = float("0.482718") - data = None - - -class Program_weight_tensor_parameter_177: - name = "parameter_177" - shape = [512] - dtype = "float32" - min_val = float("0.00200829") - max_val = float("0.0114641") - mean = float("0.00406053") - std = float("0.00119042") - data = None - - -class Program_weight_tensor_parameter_178: - name = "parameter_178" - shape = [512] - dtype = "float32" - min_val = float("-0.123017") - max_val = float("0.0755615") - mean = float("-0.042568") - std = float("0.0275421") - data = None - - -class Program_weight_tensor_parameter_179: - name = "parameter_179" - shape = [512, 384, 1, 1] - dtype = "float32" - min_val = float("-0.0810492") - max_val = float("0.133509") - mean = float("-0.000971967") - std = float("0.00774565") - data = None - - -class Program_weight_tensor_parameter_180: - name = "parameter_180" - shape = [384] - dtype = "float32" - min_val = float("-0.0162429") - max_val = float("-0.000278986") - mean = float("-0.0054732") - std = float("0.00365563") - data = None - - -class Program_weight_tensor_parameter_181: - name = "parameter_181" - shape = [384, 384, 1, 1] - dtype = "float32" - min_val = float("-0.18691") - max_val = float("0.145937") - mean = float("-0.00212935") - std = float("0.00665719") - data = None - - -class Program_weight_tensor_parameter_182: - name = "parameter_182" - shape = [192] - dtype = "float32" - min_val = float("-2.38779") - max_val = float("3.17061") - mean = float("-0.203411") - std = float("0.563155") - data = None - - -class Program_weight_tensor_parameter_183: - name = "parameter_183" - shape = [192] - dtype = "float32" - min_val = float("0.123346") - max_val = float("2.40428") - mean = float("0.524679") - std = float("0.3349") - data = None - - -class Program_weight_tensor_parameter_184: - name = "parameter_184" - shape = [192] - dtype = "float32" - min_val = float("0.000128668") - max_val = float("0.0027455") - mean = float("0.00061953") - std = float("0.000399688") - data = None - - -class Program_weight_tensor_parameter_185: - name = "parameter_185" - shape = [192] - dtype = "float32" - min_val = float("-0.0609001") - max_val = float("0.0904291") - mean = float("0.010232") - std = float("0.0217379") - data = None - - -class Program_weight_tensor_parameter_186: - name = "parameter_186" - shape = [192, 192, 1, 1] - dtype = "float32" - min_val = float("-0.0576781") - max_val = float("0.0497244") - mean = float("-0.000313562") - std = float("0.00494929") - data = None - - -class Program_weight_tensor_parameter_187: - name = "parameter_187" - shape = [192] - dtype = "float32" - min_val = float("-2.38779") - max_val = float("3.17061") - mean = float("-0.203411") - std = float("0.563155") - data = None - - -class Program_weight_tensor_parameter_188: - name = "parameter_188" - shape = [192] - dtype = "float32" - min_val = float("0.678982") - max_val = float("3.07273") - mean = float("1.54519") - std = float("0.450797") - data = None - - -class Program_weight_tensor_parameter_189: - name = "parameter_189" - shape = [192] - dtype = "float32" - min_val = float("0.00238059") - max_val = float("0.0225906") - mean = float("0.00724614") - std = float("0.00305679") - data = None - - -class Program_weight_tensor_parameter_190: - name = "parameter_190" - shape = [192] - dtype = "float32" - min_val = float("-0.212221") - max_val = float("0.164978") - mean = float("0.0097172") - std = float("0.0447557") - data = None - - -class Program_weight_tensor_parameter_191: - name = "parameter_191" - shape = [192, 192, 3, 3] - dtype = "float32" - min_val = float("-0.0724552") - max_val = float("0.0669129") - mean = float("-8.66037e-05") - std = float("0.00438841") - data = None - - -class Program_weight_tensor_parameter_192: - name = "parameter_192" - shape = [192] - dtype = "float32" - min_val = float("-3.43225") - max_val = float("1.16814") - mean = float("-1.42818") - std = float("0.634737") - data = None - - -class Program_weight_tensor_parameter_193: - name = "parameter_193" - shape = [192] - dtype = "float32" - min_val = float("0.390538") - max_val = float("1.72646") - mean = float("1.08981") - std = float("0.189952") - data = None - - -class Program_weight_tensor_parameter_194: - name = "parameter_194" - shape = [192] - dtype = "float32" - min_val = float("0.0318757") - max_val = float("0.213938") - mean = float("0.0702584") - std = float("0.0222017") - data = None - - -class Program_weight_tensor_parameter_195: - name = "parameter_195" - shape = [192] - dtype = "float32" - min_val = float("-1.23721") - max_val = float("0.358903") - mean = float("-0.222679") - std = float("0.169848") - data = None - - -class Program_weight_tensor_parameter_196: - name = "parameter_196" - shape = [192, 192, 3, 3] - dtype = "float32" - min_val = float("-0.0627818") - max_val = float("0.0522698") - mean = float("-0.000387135") - std = float("0.00513507") - data = None - - -class Program_weight_tensor_parameter_197: - name = "parameter_197" - shape = [192] - dtype = "float32" - min_val = float("-3.87733") - max_val = float("4.24375") - mean = float("-0.629121") - std = float("0.988583") - data = None - - -class Program_weight_tensor_parameter_198: - name = "parameter_198" - shape = [192] - dtype = "float32" - min_val = float("0.579177") - max_val = float("4.17445") - mean = float("1.54468") - std = float("0.398498") - data = None - - -class Program_weight_tensor_parameter_199: - name = "parameter_199" - shape = [192] - dtype = "float32" - min_val = float("0.00376591") - max_val = float("0.0199072") - mean = float("0.00750177") - std = float("0.0024196") - data = None - - -class Program_weight_tensor_parameter_200: - name = "parameter_200" - shape = [192] - dtype = "float32" - min_val = float("-0.181981") - max_val = float("0.142706") - mean = float("0.0472299") - std = float("0.0360465") - data = None - - -class Program_weight_tensor_parameter_201: - name = "parameter_201" - shape = [192, 384, 1, 1] - dtype = "float32" - min_val = float("-0.0974781") - max_val = float("0.0606577") - mean = float("-0.00130154") - std = float("0.00921657") - data = None - - -class Program_weight_tensor_parameter_202: - name = "parameter_202" - shape = [192] - dtype = "float32" - min_val = float("-2.93753") - max_val = float("1.02574") - mean = float("-0.426705") - std = float("0.681645") - data = None - - -class Program_weight_tensor_parameter_203: - name = "parameter_203" - shape = [192] - dtype = "float32" - min_val = float("0.700073") - max_val = float("3.61084") - mean = float("1.48155") - std = float("0.505235") - data = None - - -class Program_weight_tensor_parameter_204: - name = "parameter_204" - shape = [192] - dtype = "float32" - min_val = float("0.00182554") - max_val = float("0.00715893") - mean = float("0.00350375") - std = float("0.000839174") - data = None - - -class Program_weight_tensor_parameter_205: - name = "parameter_205" - shape = [192] - dtype = "float32" - min_val = float("-0.0595251") - max_val = float("0.0837498") - mean = float("0.0142918") - std = float("0.0271037") - data = None - - -class Program_weight_tensor_parameter_206: - name = "parameter_206" - shape = [192, 384, 1, 1] - dtype = "float32" - min_val = float("-0.0725004") - max_val = float("0.0651294") - mean = float("-0.000532804") - std = float("0.00742023") - data = None - - -class Program_weight_tensor_parameter_207: - name = "parameter_207" - shape = [384] - dtype = "float32" - min_val = float("-2.84234") - max_val = float("1.12211") - mean = float("-0.753291") - std = float("0.497125") - data = None - - -class Program_weight_tensor_parameter_208: - name = "parameter_208" - shape = [384] - dtype = "float32" - min_val = float("0.420357") - max_val = float("1.80213") - mean = float("0.867653") - std = float("0.218147") - data = None - - -class Program_weight_tensor_parameter_209: - name = "parameter_209" - shape = [384] - dtype = "float32" - min_val = float("0.00857563") - max_val = float("0.0680256") - mean = float("0.0166475") - std = float("0.00541149") - data = None - - -class Program_weight_tensor_parameter_210: - name = "parameter_210" - shape = [384] - dtype = "float32" - min_val = float("-0.467733") - max_val = float("0.319199") - mean = float("0.00858269") - std = float("0.0842052") - data = None - - -class Program_weight_tensor_parameter_211: - name = "parameter_211" - shape = [384, 256, 3, 3] - dtype = "float32" - min_val = float("-0.0544237") - max_val = float("0.0537574") - mean = float("-0.000167554") - std = float("0.00423747") - data = None - - -class Program_weight_tensor_parameter_212: - name = "parameter_212" - shape = [256] - dtype = "float32" - min_val = float("-2.82015") - max_val = float("1.46513") - mean = float("-1.07771") - std = float("0.633538") - data = None - - -class Program_weight_tensor_parameter_213: - name = "parameter_213" - shape = [256] - dtype = "float32" - min_val = float("0.419279") - max_val = float("1.76958") - mean = float("0.978268") - std = float("0.170537") - data = None - - -class Program_weight_tensor_parameter_214: - name = "parameter_214" - shape = [256] - dtype = "float32" - min_val = float("0.00255461") - max_val = float("0.0103385") - mean = float("0.00516842") - std = float("0.00131104") - data = None - - -class Program_weight_tensor_parameter_215: - name = "parameter_215" - shape = [256] - dtype = "float32" - min_val = float("-0.222755") - max_val = float("0.223332") - mean = float("-0.057983") - std = float("0.0714632") - data = None - - -class Program_weight_tensor_parameter_216: - name = "parameter_216" - shape = [256, 192, 1, 1] - dtype = "float32" - min_val = float("-0.122474") - max_val = float("0.173112") - mean = float("-0.00112125") - std = float("0.0137328") - data = None - - -class Program_weight_tensor_parameter_217: - name = "parameter_217" - shape = [192] - dtype = "float32" - min_val = float("-0.0217534") - max_val = float("0.00201664") - mean = float("-0.00613465") - std = float("0.00506215") - data = None - - -class Program_weight_tensor_parameter_218: - name = "parameter_218" - shape = [192, 192, 1, 1] - dtype = "float32" - min_val = float("-0.23422") - max_val = float("0.177901") - mean = float("-0.00406409") - std = float("0.0102684") - data = None - - -class Program_weight_tensor_parameter_219: - name = "parameter_219" - shape = [96] - dtype = "float32" - min_val = float("-2.2781") - max_val = float("0.7544") - mean = float("-0.115735") - std = float("0.508041") - data = None - - -class Program_weight_tensor_parameter_220: - name = "parameter_220" - shape = [96] - dtype = "float32" - min_val = float("-0.0586392") - max_val = float("2.30658") - mean = float("0.261357") - std = float("0.366816") - data = None - - -class Program_weight_tensor_parameter_221: - name = "parameter_221" - shape = [96] - dtype = "float32" - min_val = float("5.76256e-12") - max_val = float("0.00191318") - mean = float("0.000441882") - std = float("0.000366175") - data = None - - -class Program_weight_tensor_parameter_222: - name = "parameter_222" - shape = [96] - dtype = "float32" - min_val = float("-0.048434") - max_val = float("0.0685867") - mean = float("0.00619257") - std = float("0.0183565") - data = None - - -class Program_weight_tensor_parameter_223: - name = "parameter_223" - shape = [96, 96, 1, 1] - dtype = "float32" - min_val = float("-0.0376665") - max_val = float("0.0662012") - mean = float("-0.000297498") - std = float("0.00560219") - data = None - - -class Program_weight_tensor_parameter_224: - name = "parameter_224" - shape = [96] - dtype = "float32" - min_val = float("-2.2781") - max_val = float("0.7544") - mean = float("-0.115735") - std = float("0.508041") - data = None - - -class Program_weight_tensor_parameter_225: - name = "parameter_225" - shape = [96] - dtype = "float32" - min_val = float("0.35139") - max_val = float("3.24211") - mean = float("1.2913") - std = float("0.633692") - data = None - - -class Program_weight_tensor_parameter_226: - name = "parameter_226" - shape = [96] - dtype = "float32" - min_val = float("0.00288832") - max_val = float("0.0301202") - mean = float("0.0128354") - std = float("0.00565579") - data = None - - -class Program_weight_tensor_parameter_227: - name = "parameter_227" - shape = [96] - dtype = "float32" - min_val = float("-0.186493") - max_val = float("0.170245") - mean = float("0.0280013") - std = float("0.0660658") - data = None - - -class Program_weight_tensor_parameter_228: - name = "parameter_228" - shape = [96, 96, 3, 3] - dtype = "float32" - min_val = float("-0.0540548") - max_val = float("0.0585126") - mean = float("-0.000334364") - std = float("0.00738032") - data = None - - -class Program_weight_tensor_parameter_229: - name = "parameter_229" - shape = [96] - dtype = "float32" - min_val = float("-2.79991") - max_val = float("1.50593") - mean = float("-1.09119") - std = float("0.696756") - data = None - - -class Program_weight_tensor_parameter_230: - name = "parameter_230" - shape = [96] - dtype = "float32" - min_val = float("0.324891") - max_val = float("1.80804") - mean = float("1.07292") - std = float("0.213034") - data = None - - -class Program_weight_tensor_parameter_231: - name = "parameter_231" - shape = [96] - dtype = "float32" - min_val = float("0.0318799") - max_val = float("0.113866") - mean = float("0.0602373") - std = float("0.0156272") - data = None - - -class Program_weight_tensor_parameter_232: - name = "parameter_232" - shape = [96] - dtype = "float32" - min_val = float("-1.57323") - max_val = float("0.320517") - mean = float("-0.143123") - std = float("0.265242") - data = None - - -class Program_weight_tensor_parameter_233: - name = "parameter_233" - shape = [96, 96, 3, 3] - dtype = "float32" - min_val = float("-0.0521981") - max_val = float("0.0688539") - mean = float("-0.000580267") - std = float("0.00795035") - data = None - - -class Program_weight_tensor_parameter_234: - name = "parameter_234" - shape = [96] - dtype = "float32" - min_val = float("-2.54041") - max_val = float("0.664474") - mean = float("-0.0487346") - std = float("0.474109") - data = None - - -class Program_weight_tensor_parameter_235: - name = "parameter_235" - shape = [96] - dtype = "float32" - min_val = float("-0.0773368") - max_val = float("3.15117") - mean = float("0.280349") - std = float("0.408758") - data = None - - -class Program_weight_tensor_parameter_236: - name = "parameter_236" - shape = [96] - dtype = "float32" - min_val = float("1.3812e-10") - max_val = float("0.0159383") - mean = float("0.00155641") - std = float("0.00234573") - data = None - - -class Program_weight_tensor_parameter_237: - name = "parameter_237" - shape = [96] - dtype = "float32" - min_val = float("-0.0454894") - max_val = float("0.108167") - mean = float("0.0171959") - std = float("0.0277947") - data = None - - -class Program_weight_tensor_parameter_238: - name = "parameter_238" - shape = [96, 96, 1, 1] - dtype = "float32" - min_val = float("-0.11378") - max_val = float("0.0697877") - mean = float("-0.00124357") - std = float("0.00831874") - data = None - - -class Program_weight_tensor_parameter_239: - name = "parameter_239" - shape = [96] - dtype = "float32" - min_val = float("-2.54041") - max_val = float("0.664475") - mean = float("-0.0487346") - std = float("0.474109") - data = None - - -class Program_weight_tensor_parameter_240: - name = "parameter_240" - shape = [96] - dtype = "float32" - min_val = float("0.343863") - max_val = float("2.99332") - mean = float("0.929472") - std = float("0.412076") - data = None - - -class Program_weight_tensor_parameter_241: - name = "parameter_241" - shape = [96] - dtype = "float32" - min_val = float("0.00889548") - max_val = float("0.0360352") - mean = float("0.0196807") - std = float("0.00631594") - data = None - - -class Program_weight_tensor_parameter_242: - name = "parameter_242" - shape = [96] - dtype = "float32" - min_val = float("-0.183188") - max_val = float("0.203949") - mean = float("0.0349526") - std = float("0.0692374") - data = None - - -class Program_weight_tensor_parameter_243: - name = "parameter_243" - shape = [96, 96, 3, 3] - dtype = "float32" - min_val = float("-0.0529212") - max_val = float("0.050965") - mean = float("-0.000562978") - std = float("0.00752018") - data = None - - -class Program_weight_tensor_parameter_244: - name = "parameter_244" - shape = [96] - dtype = "float32" - min_val = float("-2.01882") - max_val = float("1.6565") - mean = float("-0.91983") - std = float("0.650475") - data = None - - -class Program_weight_tensor_parameter_245: - name = "parameter_245" - shape = [96] - dtype = "float32" - min_val = float("0.443451") - max_val = float("1.97486") - mean = float("1.06386") - std = float("0.2277") - data = None - - -class Program_weight_tensor_parameter_246: - name = "parameter_246" - shape = [96] - dtype = "float32" - min_val = float("0.00968838") - max_val = float("0.115069") - mean = float("0.0238439") - std = float("0.011445") - data = None - - -class Program_weight_tensor_parameter_247: - name = "parameter_247" - shape = [96] - dtype = "float32" - min_val = float("-2.04544") - max_val = float("0.240057") - mean = float("-0.0326069") - std = float("0.235198") - data = None - - -class Program_weight_tensor_parameter_248: - name = "parameter_248" - shape = [96, 96, 3, 3] - dtype = "float32" - min_val = float("-0.102806") - max_val = float("0.1255") - mean = float("-0.000419155") - std = float("0.00850691") - data = None - - -class Program_weight_tensor_parameter_249: - name = "parameter_249" - shape = [96] - dtype = "float32" - min_val = float("-1.61915") - max_val = float("1.88666") - mean = float("0.00600959") - std = float("0.838747") - data = None - - -class Program_weight_tensor_parameter_250: - name = "parameter_250" - shape = [96] - dtype = "float32" - min_val = float("0.348796") - max_val = float("1.32224") - mean = float("0.700437") - std = float("0.236363") - data = None - - -class Program_weight_tensor_parameter_251: - name = "parameter_251" - shape = [96] - dtype = "float32" - min_val = float("0.00889485") - max_val = float("0.0570436") - mean = float("0.0271583") - std = float("0.0110354") - data = None - - -class Program_weight_tensor_parameter_252: - name = "parameter_252" - shape = [96] - dtype = "float32" - min_val = float("-0.327651") - max_val = float("0.488071") - mean = float("-0.0740222") - std = float("0.115843") - data = None - - -class Program_weight_tensor_parameter_253: - name = "parameter_253" - shape = [96, 192, 1, 1] - dtype = "float32" - min_val = float("-0.124833") - max_val = float("0.114017") - mean = float("-0.001301") - std = float("0.0137993") - data = None - - -class Program_weight_tensor_parameter_254: - name = "parameter_254" - shape = [96] - dtype = "float32" - min_val = float("-2.46673") - max_val = float("1.7157") - mean = float("0.340991") - std = float("0.679024") - data = None - - -class Program_weight_tensor_parameter_255: - name = "parameter_255" - shape = [96] - dtype = "float32" - min_val = float("0.539684") - max_val = float("4.88444") - mean = float("1.48216") - std = float("0.959789") - data = None - - -class Program_weight_tensor_parameter_256: - name = "parameter_256" - shape = [96] - dtype = "float32" - min_val = float("0.00993858") - max_val = float("0.070056") - mean = float("0.0215824") - std = float("0.0104011") - data = None - - -class Program_weight_tensor_parameter_257: - name = "parameter_257" - shape = [96] - dtype = "float32" - min_val = float("-0.283006") - max_val = float("0.290595") - mean = float("-0.00648709") - std = float("0.112547") - data = None - - -class Program_weight_tensor_parameter_258: - name = "parameter_258" - shape = [96, 192, 1, 1] - dtype = "float32" - min_val = float("-0.0886065") - max_val = float("0.166141") - mean = float("-0.000584531") - std = float("0.0139549") - data = None - - -class Program_weight_tensor_parameter_259: - name = "parameter_259" - shape = [192] - dtype = "float32" - min_val = float("-4.44154") - max_val = float("2.00924") - mean = float("-0.0984691") - std = float("0.883168") - data = None - - -class Program_weight_tensor_parameter_260: - name = "parameter_260" - shape = [192] - dtype = "float32" - min_val = float("0.574537") - max_val = float("4.5171") - mean = float("1.08183") - std = float("0.426228") - data = None - - -class Program_weight_tensor_parameter_261: - name = "parameter_261" - shape = [192] - dtype = "float32" - min_val = float("0.00770389") - max_val = float("0.101175") - mean = float("0.0267908") - std = float("0.0158745") - data = None - - -class Program_weight_tensor_parameter_262: - name = "parameter_262" - shape = [192] - dtype = "float32" - min_val = float("-0.351997") - max_val = float("0.260742") - mean = float("0.0118419") - std = float("0.104813") - data = None - - -class Program_weight_tensor_parameter_263: - name = "parameter_263" - shape = [192, 128, 3, 3] - dtype = "float32" - min_val = float("-0.0873539") - max_val = float("0.0704455") - mean = float("-0.000213424") - std = float("0.00708121") - data = None - - -class Program_weight_tensor_parameter_264: - name = "parameter_264" - shape = [128] - dtype = "float32" - min_val = float("-2.15168") - max_val = float("1.36722") - mean = float("-0.673621") - std = float("0.681958") - data = None - - -class Program_weight_tensor_parameter_265: - name = "parameter_265" - shape = [128] - dtype = "float32" - min_val = float("0.364727") - max_val = float("2.2521") - mean = float("0.875301") - std = float("0.236523") - data = None - - -class Program_weight_tensor_parameter_266: - name = "parameter_266" - shape = [128] - dtype = "float32" - min_val = float("0.000976585") - max_val = float("0.0184592") - mean = float("0.00559844") - std = float("0.0023354") - data = None - - -class Program_weight_tensor_parameter_267: - name = "parameter_267" - shape = [128] - dtype = "float32" - min_val = float("-0.295433") - max_val = float("0.256224") - mean = float("-0.0630695") - std = float("0.112593") - data = None - - -class Program_weight_tensor_parameter_268: - name = "parameter_268" - shape = [128, 96, 1, 1] - dtype = "float32" - min_val = float("-0.211698") - max_val = float("0.196697") - mean = float("-0.00125294") - std = float("0.0226183") - data = None - - -class Program_weight_tensor_parameter_269: - name = "parameter_269" - shape = [96] - dtype = "float32" - min_val = float("-0.0253266") - max_val = float("0.00358212") - mean = float("-0.0083497") - std = float("0.00765327") - data = None - - -class Program_weight_tensor_parameter_270: - name = "parameter_270" - shape = [96, 96, 1, 1] - dtype = "float32" - min_val = float("-0.293386") - max_val = float("0.279833") - mean = float("-0.00579641") - std = float("0.0184424") - data = None - - -class Program_weight_tensor_parameter_271: - name = "parameter_271" - shape = [48] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_272: - name = "parameter_272" - shape = [48] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_273: - name = "parameter_273" - shape = [48] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_274: - name = "parameter_274" - shape = [48] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_275: - name = "parameter_275" - shape = [48, 48, 1, 1] - dtype = "float32" - min_val = float("-0.0497361") - max_val = float("0.082633") - mean = float("-0.00112833") - std = float("0.0118656") - data = None - - -class Program_weight_tensor_parameter_276: - name = "parameter_276" - shape = [48] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_277: - name = "parameter_277" - shape = [48] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_278: - name = "parameter_278" - shape = [48] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_279: - name = "parameter_279" - shape = [48] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_280: - name = "parameter_280" - shape = [48, 48, 3, 3] - dtype = "float32" - min_val = float("-0.0995478") - max_val = float("0.100891") - mean = float("-0.00032949") - std = float("0.012257") - data = None - - -class Program_weight_tensor_parameter_281: - name = "parameter_281" - shape = [48] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_282: - name = "parameter_282" - shape = [48] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_283: - name = "parameter_283" - shape = [48] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_284: - name = "parameter_284" - shape = [48] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_285: - name = "parameter_285" - shape = [48, 48, 3, 3] - dtype = "float32" - min_val = float("-0.09303") - max_val = float("0.111166") - mean = float("-0.00105886") - std = float("0.0130745") - data = None - - -class Program_weight_tensor_parameter_286: - name = "parameter_286" - shape = [48] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_287: - name = "parameter_287" - shape = [48] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_288: - name = "parameter_288" - shape = [48] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_289: - name = "parameter_289" - shape = [48] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_290: - name = "parameter_290" - shape = [48, 48, 1, 1] - dtype = "float32" - min_val = float("-0.0686653") - max_val = float("0.073192") - mean = float("-0.00264945") - std = float("0.015246") - data = None - - -class Program_weight_tensor_parameter_291: - name = "parameter_291" - shape = [48] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_292: - name = "parameter_292" - shape = [48] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_293: - name = "parameter_293" - shape = [48] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_294: - name = "parameter_294" - shape = [48] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_295: - name = "parameter_295" - shape = [48, 48, 3, 3] - dtype = "float32" - min_val = float("-0.0915852") - max_val = float("0.0964297") - mean = float("-0.000904809") - std = float("0.0119445") - data = None - - -class Program_weight_tensor_parameter_296: - name = "parameter_296" - shape = [48] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_297: - name = "parameter_297" - shape = [48] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_298: - name = "parameter_298" - shape = [48] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_299: - name = "parameter_299" - shape = [48] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_300: - name = "parameter_300" - shape = [48, 48, 3, 3] - dtype = "float32" - min_val = float("-0.0993379") - max_val = float("0.0790286") - mean = float("-0.00075533") - std = float("0.0139095") - data = None - - -class Program_weight_tensor_parameter_301: - name = "parameter_301" - shape = [48] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_302: - name = "parameter_302" - shape = [48] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_303: - name = "parameter_303" - shape = [48] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_304: - name = "parameter_304" - shape = [48] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_305: - name = "parameter_305" - shape = [48, 96, 1, 1] - dtype = "float32" - min_val = float("-0.0993822") - max_val = float("0.120858") - mean = float("-0.00232535") - std = float("0.0225912") - data = None - - -class Program_weight_tensor_parameter_306: - name = "parameter_306" - shape = [48] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_307: - name = "parameter_307" - shape = [48] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_308: - name = "parameter_308" - shape = [48] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_309: - name = "parameter_309" - shape = [48] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_310: - name = "parameter_310" - shape = [48, 96, 1, 1] - dtype = "float32" - min_val = float("-0.158166") - max_val = float("0.239253") - mean = float("0.000385605") - std = float("0.023753") - data = None - - -class Program_weight_tensor_parameter_311: - name = "parameter_311" - shape = [96] - dtype = "float32" - min_val = float("-3.31552") - max_val = float("3.83527") - mean = float("0.267103") - std = float("1.21077") - data = None - - -class Program_weight_tensor_parameter_312: - name = "parameter_312" - shape = [96] - dtype = "float32" - min_val = float("0.510535") - max_val = float("5.40356") - mean = float("1.12504") - std = float("0.54684") - data = None - - -class Program_weight_tensor_parameter_313: - name = "parameter_313" - shape = [96] - dtype = "float32" - min_val = float("0.0102515") - max_val = float("0.199232") - mean = float("0.0491335") - std = float("0.034631") - data = None - - -class Program_weight_tensor_parameter_314: - name = "parameter_314" - shape = [96] - dtype = "float32" - min_val = float("-0.48286") - max_val = float("0.474603") - mean = float("-0.0301538") - std = float("0.175452") - data = None - - -class Program_weight_tensor_parameter_315: - name = "parameter_315" - shape = [96, 64, 3, 3] - dtype = "float32" - min_val = float("-0.0965118") - max_val = float("0.119571") - mean = float("-0.000173883") - std = float("0.0116758") - data = None - - -class Program_weight_tensor_parameter_316: - name = "parameter_316" - shape = [64] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_317: - name = "parameter_317" - shape = [64] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_318: - name = "parameter_318" - shape = [64] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_319: - name = "parameter_319" - shape = [64] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_320: - name = "parameter_320" - shape = [64, 48, 1, 1] - dtype = "float32" - min_val = float("-0.1726") - max_val = float("0.173966") - mean = float("-0.00244534") - std = float("0.0334275") - data = None - - -class Program_weight_tensor_parameter_321: - name = "parameter_321" - shape = [48] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_322: - name = "parameter_322" - shape = [48, 48, 1, 1] - dtype = "float32" - min_val = float("-0.162368") - max_val = float("0.15788") - mean = float("-0.0128963") - std = float("0.0255366") - data = None - - -class Program_weight_tensor_parameter_323: - name = "parameter_323" - shape = [24] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_324: - name = "parameter_324" - shape = [24] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_325: - name = "parameter_325" - shape = [24] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_326: - name = "parameter_326" - shape = [24] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_327: - name = "parameter_327" - shape = [24, 24, 1, 1] - dtype = "float32" - min_val = float("-0.101262") - max_val = float("0.144448") - mean = float("-0.00119167") - std = float("0.0240568") - data = None - - -class Program_weight_tensor_parameter_328: - name = "parameter_328" - shape = [24] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_329: - name = "parameter_329" - shape = [24] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_330: - name = "parameter_330" - shape = [24] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_331: - name = "parameter_331" - shape = [24] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_332: - name = "parameter_332" - shape = [24, 24, 3, 3] - dtype = "float32" - min_val = float("-0.0939293") - max_val = float("0.0880602") - mean = float("-0.000667988") - std = float("0.0206646") - data = None - - -class Program_weight_tensor_parameter_333: - name = "parameter_333" - shape = [24] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_334: - name = "parameter_334" - shape = [24] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_335: - name = "parameter_335" - shape = [24] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_336: - name = "parameter_336" - shape = [24] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_337: - name = "parameter_337" - shape = [24, 24, 3, 3] - dtype = "float32" - min_val = float("-0.121119") - max_val = float("0.170902") - mean = float("-0.000408415") - std = float("0.0230859") - data = None - - -class Program_weight_tensor_parameter_338: - name = "parameter_338" - shape = [24] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_339: - name = "parameter_339" - shape = [24] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_340: - name = "parameter_340" - shape = [24] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_341: - name = "parameter_341" - shape = [24] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_342: - name = "parameter_342" - shape = [24, 48, 1, 1] - dtype = "float32" - min_val = float("-0.19645") - max_val = float("0.180409") - mean = float("-0.00327746") - std = float("0.0345822") - data = None - - -class Program_weight_tensor_parameter_343: - name = "parameter_343" - shape = [24] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_344: - name = "parameter_344" - shape = [24] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_345: - name = "parameter_345" - shape = [24] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_346: - name = "parameter_346" - shape = [24] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_347: - name = "parameter_347" - shape = [24, 48, 1, 1] - dtype = "float32" - min_val = float("-0.189487") - max_val = float("0.153256") - mean = float("-0.00117991") - std = float("0.037909") - data = None - - -class Program_weight_tensor_parameter_348: - name = "parameter_348" - shape = [48] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_349: - name = "parameter_349" - shape = [48] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_350: - name = "parameter_350" - shape = [48] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_351: - name = "parameter_351" - shape = [48] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_352: - name = "parameter_352" - shape = [48, 32, 3, 3] - dtype = "float32" - min_val = float("-0.165199") - max_val = float("0.114686") - mean = float("-0.000200965") - std = float("0.0197641") - data = None - - -class Program_weight_tensor_parameter_353: - name = "parameter_353" - shape = [32] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_354: - name = "parameter_354" - shape = [32] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_355: - name = "parameter_355" - shape = [32] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_356: - name = "parameter_356" - shape = [32] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_357: - name = "parameter_357" - shape = [32, 16, 3, 3] - dtype = "float32" - min_val = float("-0.233333") - max_val = float("0.23547") - mean = float("-0.000463458") - std = float("0.0335966") - data = None - - -class Program_weight_tensor_parameter_358: - name = "parameter_358" - shape = [16] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_359: - name = "parameter_359" - shape = [16] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_360: - name = "parameter_360" - shape = [16] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_361: - name = "parameter_361" - shape = [16] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_362: - name = "parameter_362" - shape = [16, 16, 3, 3] - dtype = "float32" - min_val = float("-0.296254") - max_val = float("0.317451") - mean = float("-0.000376615") - std = float("0.0448856") - data = None - - -class Program_weight_tensor_parameter_363: - name = "parameter_363" - shape = [16] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_364: - name = "parameter_364" - shape = [16] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_365: - name = "parameter_365" - shape = [16] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_366: - name = "parameter_366" - shape = [16] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_367: - name = "parameter_367" - shape = [16, 3, 3, 3] - dtype = "float32" - min_val = float("-0.229598") - max_val = float("0.264831") - mean = float("-0.00222273") - std = float("0.0664701") - data = None diff --git a/paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_6/graph_hash.txt b/paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_6/graph_hash.txt deleted file mode 100644 index eb0adff13..000000000 --- a/paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_6/graph_hash.txt +++ /dev/null @@ -1 +0,0 @@ -c1b8c019f1768926fb1763d743b6f1af638d4214fa9152321214c544c752c751 \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_6/graph_net.json b/paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_6/graph_net.json deleted file mode 100644 index d8719c2c9..000000000 --- a/paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_6/graph_net.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "framework": "paddle", - "model_name": "PP-YOLOE-S_vehicle", - "num_devices_required": 1, - "num_nodes_required": 1 -} \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_6/model.py b/paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_6/model.py deleted file mode 100644 index 098cbb449..000000000 --- a/paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_6/model.py +++ /dev/null @@ -1,385 +0,0 @@ -import paddle - - -class GraphModule(paddle.nn.Layer): - def __init__(self): - super().__init__() - - def forward(self, data_0, data_1, data_2, data_3, data_4, data_5, data_6): - # pd_op.full: (xi64) <- () - full_0 = paddle._C_ops.full( - [], float("0"), paddle.int64, paddle.framework._current_expected_place() - ) - - # pd_op.equal: (xb) <- (xi64, xi64) - equal_0 = paddle._C_ops.equal(data_0, full_0) - - # pd_op.cast: (xi64) <- (xb) - cast_0 = paddle._C_ops.cast(equal_0, paddle.int64) - del equal_0 - - # pd_op.not_equal: (xb) <- (xi64, xi64) - not_equal_0 = paddle._C_ops.not_equal(cast_0, full_0) - del cast_0 - - # pd_op.cast: (xi64) <- (xb) - cast_1 = paddle._C_ops.cast(not_equal_0, paddle.int64) - del not_equal_0 - - # pd_op.equal: (xb) <- (xi64, xi64) - equal_1 = paddle._C_ops.equal(cast_1, full_0) - del cast_1, full_0 - - # pd_op.full_int_array: (1xi64) <- () - full_int_array_0 = [2] - - # pd_op.unsqueeze: (2x-1x1x4xf32) <- (2x-1x4xf32, 1xi64) - unsqueeze_0 = paddle._C_ops.unsqueeze(data_5, full_int_array_0) - del data_5 - - # pd_op.full_int_array: (1xi64) <- () - full_int_array_1 = [1] - - # pd_op.unsqueeze: (2x1x-1x4xf32) <- (2x-1x4xf32, 1xi64) - unsqueeze_1 = paddle._C_ops.unsqueeze(data_2, full_int_array_1) - del data_2 - - # pd_op.full_int_array: (1xi64) <- () - full_int_array_2 = [0] - - # pd_op.slice: (2x-1x1x2xf32) <- (2x-1x1x4xf32, 1xi64, 1xi64) - slice_0 = paddle._C_ops.slice( - unsqueeze_0, [3], full_int_array_2, full_int_array_0, [1], [] - ) - - # pd_op.full_int_array: (1xi64) <- () - full_int_array_3 = [2147483647] - - # pd_op.slice: (2x-1x1x2xf32) <- (2x-1x1x4xf32, 1xi64, 1xi64) - slice_1 = paddle._C_ops.slice( - unsqueeze_0, [3], full_int_array_0, full_int_array_3, [1], [] - ) - - # pd_op.slice: (2x1x-1x2xf32) <- (2x1x-1x4xf32, 1xi64, 1xi64) - slice_2 = paddle._C_ops.slice( - unsqueeze_1, [3], full_int_array_2, full_int_array_0, [1], [] - ) - del full_int_array_2 - - # pd_op.slice: (2x1x-1x2xf32) <- (2x1x-1x4xf32, 1xi64, 1xi64) - slice_3 = paddle._C_ops.slice( - unsqueeze_1, [3], full_int_array_0, full_int_array_3, [1], [] - ) - del full_int_array_3, unsqueeze_1 - - # pd_op.maximum: (2x-1x-1x2xf32) <- (2x-1x1x2xf32, 2x1x-1x2xf32) - maximum_0 = paddle._C_ops.maximum(slice_0, slice_2) - - # pd_op.minimum: (2x-1x-1x2xf32) <- (2x-1x1x2xf32, 2x1x-1x2xf32) - minimum_0 = paddle._C_ops.minimum(slice_1, slice_3) - - # pd_op.subtract: (2x-1x-1x2xf32) <- (2x-1x-1x2xf32, 2x-1x-1x2xf32) - subtract_0 = paddle._C_ops.subtract(minimum_0, maximum_0) - del maximum_0, minimum_0 - - # pd_op.full: (1xf32) <- () - full_1 = paddle._C_ops.full( - [1], float("0"), paddle.float32, paddle.core.CPUPlace() - ) - - # pd_op.full: (1xf32) <- () - full_2 = paddle._C_ops.full( - [1], float("3.40282e+38"), paddle.float32, paddle.core.CPUPlace() - ) - - # pd_op.clip: (2x-1x-1x2xf32) <- (2x-1x-1x2xf32, 1xf32, 1xf32) - clip_0 = paddle._C_ops.clip(subtract_0, full_1, full_2) - del subtract_0 - - # pd_op.full_int_array: (1xi64) <- () - full_int_array_4 = [-1] - - # pd_op.prod: (2x-1x-1xf32) <- (2x-1x-1x2xf32, 1xi64) - prod_0 = paddle._C_ops.prod(clip_0, full_int_array_4, False, False) - del clip_0 - - # pd_op.subtract: (2x-1x1x2xf32) <- (2x-1x1x2xf32, 2x-1x1x2xf32) - subtract_1 = paddle._C_ops.subtract(slice_1, slice_0) - del slice_0, slice_1 - - # pd_op.clip: (2x-1x1x2xf32) <- (2x-1x1x2xf32, 1xf32, 1xf32) - clip_1 = paddle._C_ops.clip(subtract_1, full_1, full_2) - del subtract_1 - - # pd_op.prod: (2x-1x1xf32) <- (2x-1x1x2xf32, 1xi64) - prod_1 = paddle._C_ops.prod(clip_1, full_int_array_4, False, False) - del clip_1 - - # pd_op.subtract: (2x1x-1x2xf32) <- (2x1x-1x2xf32, 2x1x-1x2xf32) - subtract_2 = paddle._C_ops.subtract(slice_3, slice_2) - del slice_2, slice_3 - - # pd_op.clip: (2x1x-1x2xf32) <- (2x1x-1x2xf32, 1xf32, 1xf32) - clip_2 = paddle._C_ops.clip(subtract_2, full_1, full_2) - del full_1, full_2, subtract_2 - - # pd_op.prod: (2x1x-1xf32) <- (2x1x-1x2xf32, 1xi64) - prod_2 = paddle._C_ops.prod(clip_2, full_int_array_4, False, False) - del clip_2 - - # pd_op.add: (2x-1x-1xf32) <- (2x-1x1xf32, 2x1x-1xf32) - add_0 = paddle._C_ops.add(prod_1, prod_2) - del prod_1, prod_2 - - # pd_op.subtract: (2x-1x-1xf32) <- (2x-1x-1xf32, 2x-1x-1xf32) - subtract_3 = paddle._C_ops.subtract(add_0, prod_0) - del add_0 - - # pd_op.full: (1xf32) <- () - full_3 = paddle._C_ops.full( - [1], float("1"), paddle.float32, paddle.core.CPUPlace() - ) - - # pd_op.scale: (2x-1x-1xf32) <- (2x-1x-1xf32, 1xf32) - scale_0 = paddle._C_ops.scale(subtract_3, full_3, float("1e-09"), True) - del full_3, subtract_3 - - # pd_op.divide: (2x-1x-1xf32) <- (2x-1x-1xf32, 2x-1x-1xf32) - divide_0 = paddle._C_ops.divide(prod_0, scale_0) - del prod_0, scale_0 - - # pd_op.transpose: (2x4x-1xf32) <- (2x-1x4xf32) - transpose_0 = paddle._C_ops.transpose(data_1, [0, 2, 1]) - del data_1 - - # pd_op.full: (1xf64) <- () - full_4 = paddle._C_ops.full( - [1], float("0"), paddle.float64, paddle.core.CPUPlace() - ) - - # pd_op.full: (1xf64) <- () - full_5 = paddle._C_ops.full( - [1], float("2"), paddle.float64, paddle.core.CPUPlace() - ) - - # pd_op.full: (1xf64) <- () - full_6 = paddle._C_ops.full( - [1], float("1"), paddle.float64, paddle.core.CPUPlace() - ) - - # pd_op.arange: (2xi32) <- (1xf64, 1xf64, 1xf64) - arange_0 = paddle.arange(full_4, full_5, full_6, dtype="int32") - del full_4, full_5, full_6 - - # pd_op.unsqueeze: (2x1xi32) <- (2xi32, 1xi64) - unsqueeze_2 = paddle._C_ops.unsqueeze(arange_0, full_int_array_4) - del arange_0 - - # pd_op.full: (xi64) <- () - full_7 = paddle._C_ops.full( - [], float("1"), paddle.int64, paddle.core.CPUPlace() - ) - - # builtin.combine: ([xi64, xi64]) <- (xi64, xi64) - combine_0 = [full_7, data_0] - del data_0, full_7 - - # pd_op.stack: (2xi64) <- ([xi64, xi64]) - stack_0 = paddle._C_ops.stack(combine_0, 0) - del combine_0 - - # pd_op.tile: (2x-1xi32) <- (2x1xi32, 2xi64) - tile_0 = paddle._C_ops.tile(unsqueeze_2, stack_0) - del stack_0 - - # pd_op.squeeze: (2x-1xi32) <- (2x-1x1xi32, 1xi64) - squeeze_0 = paddle._C_ops.squeeze(data_4, full_int_array_4) - del data_4 - - # builtin.combine: ([2x-1xi32, 2x-1xi32]) <- (2x-1xi32, 2x-1xi32) - combine_1 = [tile_0, squeeze_0] - del squeeze_0, tile_0 - - # pd_op.stack: (2x-1x2xi32) <- ([2x-1xi32, 2x-1xi32]) - stack_1 = paddle._C_ops.stack(combine_1, -1) - del combine_1 - - # pd_op.gather_nd: (2x-1x-1xf32) <- (2x4x-1xf32, 2x-1x2xi32) - gather_nd_0 = paddle._C_ops.gather_nd(transpose_0, stack_1) - del stack_1, transpose_0 - - # pd_op.pow: (2x-1x-1xf32) <- (2x-1x-1xf32) - pow_0 = paddle._C_ops.pow(gather_nd_0, float("1")) - del gather_nd_0 - - # pd_op.pow: (2x-1x-1xf32) <- (2x-1x-1xf32) - pow_1 = paddle._C_ops.pow(divide_0, float("6")) - - # pd_op.multiply: (2x-1x-1xf32) <- (2x-1x-1xf32, 2x-1x-1xf32) - multiply_0 = paddle._C_ops.multiply(pow_0, pow_1) - del pow_0, pow_1 - - # pd_op.full_int_array: (2xi64) <- () - full_int_array_5 = [0, 1] - - # pd_op.unsqueeze: (1x1x-1x2xf32) <- (-1x2xf32, 2xi64) - unsqueeze_3 = paddle._C_ops.unsqueeze(data_3, full_int_array_5) - del data_3, full_int_array_5 - - # pd_op.full: (1xi32) <- () - full_8 = paddle._C_ops.full( - [1], float("3"), paddle.int32, paddle.core.CPUPlace() - ) - - # pd_op.split_with_num: ([1x1x-1x1xf32, 1x1x-1x1xf32]) <- (1x1x-1x2xf32, 1xi32) - split_with_num_0 = paddle._C_ops.split_with_num(unsqueeze_3, 2, full_8) - del unsqueeze_3 - - # builtin.split: (1x1x-1x1xf32, 1x1x-1x1xf32) <- ([1x1x-1x1xf32, 1x1x-1x1xf32]) - ( - split_0, - split_1, - ) = split_with_num_0 - del split_with_num_0 - - # pd_op.split_with_num: ([2x-1x1x1xf32, 2x-1x1x1xf32, 2x-1x1x1xf32, 2x-1x1x1xf32]) <- (2x-1x1x4xf32, 1xi32) - split_with_num_1 = paddle._C_ops.split_with_num(unsqueeze_0, 4, full_8) - del full_8, unsqueeze_0 - - # builtin.split: (2x-1x1x1xf32, 2x-1x1x1xf32, 2x-1x1x1xf32, 2x-1x1x1xf32) <- ([2x-1x1x1xf32, 2x-1x1x1xf32, 2x-1x1x1xf32, 2x-1x1x1xf32]) - ( - split_2, - split_3, - split_4, - split_5, - ) = split_with_num_1 - del split_with_num_1 - - # pd_op.subtract: (2x-1x-1x1xf32) <- (1x1x-1x1xf32, 2x-1x1x1xf32) - subtract_4 = paddle._C_ops.subtract(split_0, split_2) - del split_2 - - # pd_op.subtract: (2x-1x-1x1xf32) <- (1x1x-1x1xf32, 2x-1x1x1xf32) - subtract_5 = paddle._C_ops.subtract(split_1, split_3) - del split_3 - - # pd_op.subtract: (2x-1x-1x1xf32) <- (2x-1x1x1xf32, 1x1x-1x1xf32) - subtract_6 = paddle._C_ops.subtract(split_4, split_0) - del split_0, split_4 - - # pd_op.subtract: (2x-1x-1x1xf32) <- (2x-1x1x1xf32, 1x1x-1x1xf32) - subtract_7 = paddle._C_ops.subtract(split_5, split_1) - del split_1, split_5 - - # pd_op.full: (1xi32) <- () - full_9 = paddle._C_ops.full( - [1], float("-1"), paddle.int32, paddle.core.CPUPlace() - ) - - # builtin.combine: ([2x-1x-1x1xf32, 2x-1x-1x1xf32, 2x-1x-1x1xf32, 2x-1x-1x1xf32]) <- (2x-1x-1x1xf32, 2x-1x-1x1xf32, 2x-1x-1x1xf32, 2x-1x-1x1xf32) - combine_2 = [subtract_4, subtract_5, subtract_6, subtract_7] - del subtract_4, subtract_5, subtract_6, subtract_7 - - # pd_op.concat: (2x-1x-1x4xf32) <- ([2x-1x-1x1xf32, 2x-1x-1x1xf32, 2x-1x-1x1xf32, 2x-1x-1x1xf32], 1xi32) - concat_0 = paddle._C_ops.concat(combine_2, full_9) - del combine_2, full_9 - - # pd_op.min: (2x-1x-1xf32) <- (2x-1x-1x4xf32, 1xi64) - min_0 = paddle._C_ops.min(concat_0, full_int_array_4, False) - del concat_0, full_int_array_4 - - # pd_op.full: (xf32) <- () - full_10 = paddle._C_ops.full( - [], - float("1e-09"), - paddle.float32, - paddle.framework._current_expected_place(), - ) - - # pd_op.greater_than: (2x-1x-1xb) <- (2x-1x-1xf32, xf32) - greater_than_1 = paddle._C_ops.greater_than(min_0, full_10) - del full_10, min_0 - - # pd_op.cast: (2x-1x-1xf32) <- (2x-1x-1xb) - cast_2 = paddle._C_ops.cast(greater_than_1, paddle.float32) - del greater_than_1 - - # pd_op.multiply: (2x-1x-1xf32) <- (2x-1x-1xf32, 2x-1x-1xf32) - multiply_1 = paddle._C_ops.multiply(multiply_0, cast_2) - - # pd_op.shape64: (3xi64) <- (2x-1x-1xf32) - shape64_0 = paddle._C_ops.shape64(multiply_1) - - # pd_op.slice: (xi64) <- (3xi64, 1xi64, 1xi64) - slice_4 = paddle._C_ops.slice( - shape64_0, [0], full_int_array_1, full_int_array_0, [1], [0] - ) - del full_int_array_1 - - # pd_op.full_int_array: (1xi64) <- () - full_int_array_6 = [3] - - # pd_op.slice: (xi64) <- (3xi64, 1xi64, 1xi64) - slice_5 = paddle._C_ops.slice( - shape64_0, [0], full_int_array_0, full_int_array_6, [1], [0] - ) - del full_int_array_0, full_int_array_6, shape64_0 - - # pd_op.full: (1xi32) <- () - full_11 = paddle._C_ops.full( - [1], float("13"), paddle.int32, paddle.core.CPUPlace() - ) - - # pd_op.topk: (2x-1x13xf32, 2x-1x13xi64) <- (2x-1x-1xf32, 1xi32) - topk_0, topk_1 = (lambda x, f: f(x))( - paddle._C_ops.topk(multiply_1, full_11, -1, True, True), - lambda out: out if isinstance(out, (list, tuple)) else (out, None), - ) - del full_11, multiply_1 - - # pd_op.one_hot: (2x-1x13x-1xf32) <- (2x-1x13xi64, xi64) - one_hot_0 = paddle._C_ops.one_hot( - topk_1 % paddle.cast(slice_5, topk_1.dtype), slice_5 - ) - del slice_5, topk_1 - - # pd_op.full_int_array: (1xi64) <- () - full_int_array_7 = [-2] - - # pd_op.sum: (2x-1x-1xf32) <- (2x-1x13x-1xf32, 1xi64) - sum_0 = paddle._C_ops.sum(one_hot_0, full_int_array_7, None, False) - del one_hot_0 - - # pd_op.multiply: (2x-1x-1xf32) <- (2x-1x-1xf32, 2x-1x1xf32) - multiply_2 = paddle._C_ops.multiply(sum_0, data_6) - del sum_0 - - # pd_op.multiply: (2x-1x-1xf32) <- (2x-1x-1xf32, 2x-1x-1xf32) - multiply_3 = paddle._C_ops.multiply(multiply_2, cast_2) - del cast_2, multiply_2 - - # pd_op.multiply: (2x-1x-1xf32) <- (2x-1x-1xf32, 2x-1x1xf32) - multiply_4 = paddle._C_ops.multiply(multiply_3, data_6) - del data_6, multiply_3 - - # pd_op.sum: (2x-1xf32) <- (2x-1x-1xf32, 1xi64) - sum_1 = paddle._C_ops.sum(multiply_4, full_int_array_7, None, False) - del full_int_array_7 - - # pd_op.full_int_array: (0xi64) <- () - full_int_array_8 = [] - - # pd_op.max: (xf32) <- (2x-1xf32, 0xi64) - max_0 = paddle._C_ops.max(sum_1, full_int_array_8, False) - del full_int_array_8 - - # pd_op.full: (xf32) <- () - full_12 = paddle._C_ops.full( - [], float("1"), paddle.float32, paddle.framework._current_expected_place() - ) - - # pd_op.greater_than: (xb) <- (xf32, xf32) - greater_than_0 = paddle._C_ops.greater_than(max_0, full_12) - del divide_0, full_12, max_0, multiply_0, multiply_4, sum_1, unsqueeze_2 - - return greater_than_0 diff --git a/paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_7/graph_hash.txt b/paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_7/graph_hash.txt deleted file mode 100644 index 1b08335a3..000000000 --- a/paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_7/graph_hash.txt +++ /dev/null @@ -1 +0,0 @@ -999d81ae5f8f7aa80107216e32ce8e5d9c0b867a696357b475912426f0891658 \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_7/graph_net.json b/paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_7/graph_net.json deleted file mode 100644 index d8719c2c9..000000000 --- a/paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_7/graph_net.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "framework": "paddle", - "model_name": "PP-YOLOE-S_vehicle", - "num_devices_required": 1, - "num_nodes_required": 1 -} \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_7/model.py b/paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_7/model.py deleted file mode 100644 index 4910e2ad1..000000000 --- a/paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_7/model.py +++ /dev/null @@ -1,162 +0,0 @@ -import paddle - - -class GraphModule(paddle.nn.Layer): - def __init__(self): - super().__init__() - - def forward(self, parameter_0, data_0, data_1, data_2, data_3): - # pd_op.divide: (-1x2xf32) <- (-1x2xf32, -1x1xf32) - divide_0 = paddle._C_ops.divide(data_2, data_3) - del data_2 - - # pd_op.shape64: (3xi64) <- (2x-1x68xf32) - shape64_0 = paddle._C_ops.shape64(data_1) - - # pd_op.full_int_array: (1xi64) <- () - full_int_array_0 = [0] - - # pd_op.full_int_array: (1xi64) <- () - full_int_array_1 = [1] - - # pd_op.assign: (1xi64) <- (1xi64) - assign_0 = full_int_array_1 - - # pd_op.slice: (xi64) <- (3xi64, 1xi64, 1xi64) - slice_0 = paddle._C_ops.slice( - shape64_0, [0], full_int_array_0, full_int_array_1, [1], [0] - ) - del full_int_array_0 - - # pd_op.full_int_array: (1xi64) <- () - full_int_array_2 = [2] - - # pd_op.slice: (xi64) <- (3xi64, 1xi64, 1xi64) - slice_1 = paddle._C_ops.slice( - shape64_0, [0], full_int_array_1, full_int_array_2, [1], [0] - ) - - # pd_op.full_int_array: (1xi64) <- () - full_int_array_3 = [3] - - # pd_op.slice: (xi64) <- (3xi64, 1xi64, 1xi64) - slice_2 = paddle._C_ops.slice( - shape64_0, [0], full_int_array_2, full_int_array_3, [1], [0] - ) - del full_int_array_2, full_int_array_3, shape64_0 - - # pd_op.full: (xi64) <- () - full_0 = paddle._C_ops.full( - [], float("-1"), paddle.int64, paddle.core.CPUPlace() - ) - - # pd_op.full: (xi64) <- () - full_1 = paddle._C_ops.full( - [], float("4"), paddle.int64, paddle.core.CPUPlace() - ) - - # pd_op.full: (xi64) <- () - full_2 = paddle._C_ops.full( - [], float("17"), paddle.int64, paddle.core.CPUPlace() - ) - - # builtin.combine: ([xi64, xi64, xi64, xi64]) <- (xi64, xi64, xi64, xi64) - combine_0 = [full_0, slice_1, full_1, full_2] - del full_0, full_1, full_2, slice_1 - - # pd_op.stack: (4xi64) <- ([xi64, xi64, xi64, xi64]) - stack_0 = paddle._C_ops.stack(combine_0, 0) - del combine_0 - - # pd_op.reshape: (-1x-1x4x17xf32) <- (2x-1x68xf32, 4xi64) - reshape_0 = paddle._C_ops.reshape(data_1, stack_0) - del data_1, stack_0 - - # pd_op.softmax: (-1x-1x4x17xf32) <- (-1x-1x4x17xf32) - softmax_0 = paddle._C_ops.softmax(reshape_0, -1) - del reshape_0 - - # pd_op.transpose: (-1x17x-1x4xf32) <- (-1x-1x4x17xf32) - transpose_0 = paddle._C_ops.transpose(softmax_0, [0, 3, 1, 2]) - - # pd_op.conv2d: (-1x1x-1x4xf32) <- (-1x17x-1x4xf32, 1x17x1x1xf32) - conv2d_0 = paddle._C_ops.conv2d( - transpose_0, parameter_0, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_0 - - # pd_op.squeeze: (-1x-1x4xf32) <- (-1x1x-1x4xf32, 1xi64) - squeeze_0 = paddle._C_ops.squeeze(conv2d_0, full_int_array_1) - del full_int_array_1 - - # pd_op.full: (1xi32) <- () - full_3 = paddle._C_ops.full( - [1], float("2"), paddle.int32, paddle.core.CPUPlace() - ) - - # pd_op.split_with_num: ([-1x-1x2xf32, -1x-1x2xf32]) <- (-1x-1x4xf32, 1xi32) - split_with_num_0 = paddle._C_ops.split_with_num(squeeze_0, 2, full_3) - del squeeze_0 - - # builtin.split: (-1x-1x2xf32, -1x-1x2xf32) <- ([-1x-1x2xf32, -1x-1x2xf32]) - ( - split_0, - split_1, - ) = split_with_num_0 - del split_with_num_0 - - # pd_op.full: (1xf32) <- () - full_4 = paddle._C_ops.full( - [1], float("-1"), paddle.float32, paddle.core.CPUPlace() - ) - - # pd_op.scale: (-1x-1x2xf32) <- (-1x-1x2xf32, 1xf32) - scale_0 = paddle._C_ops.scale(split_0, full_4, float("0"), True) - del split_0 - - # pd_op.add: (-1x-1x2xf32) <- (-1x-1x2xf32, -1x2xf32) - add_0 = paddle._C_ops.add(scale_0, divide_0) - - # pd_op.add: (-1x-1x2xf32) <- (-1x-1x2xf32, -1x2xf32) - add_1 = paddle._C_ops.add(split_1, divide_0) - - # pd_op.full: (1xi32) <- () - full_5 = paddle._C_ops.full( - [1], float("-1"), paddle.int32, paddle.core.CPUPlace() - ) - - # builtin.combine: ([-1x-1x2xf32, -1x-1x2xf32]) <- (-1x-1x2xf32, -1x-1x2xf32) - combine_1 = [add_0, add_1] - - # pd_op.concat: (-1x-1x4xf32) <- ([-1x-1x2xf32, -1x-1x2xf32], 1xi32) - concat_0 = paddle._C_ops.concat(combine_1, full_5) - del combine_1 - - # pd_op.share_data_: (2x-1x4xf32) <- (2x-1x4xf32) - share_data__0 = data_0.detach() - del data_0 - - # pd_op.share_data_: (-1x-1x4xf32) <- (-1x-1x4xf32) - share_data__1 = concat_0.detach() - - # pd_op.multiply: (-1x-1x4xf32) <- (-1x-1x4xf32, -1x1xf32) - multiply_0 = paddle._C_ops.multiply(share_data__1, data_3) - del ( - add_0, - add_1, - assign_0, - concat_0, - conv2d_0, - data_3, - divide_0, - full_3, - full_4, - full_5, - scale_0, - share_data__1, - softmax_0, - split_1, - transpose_0, - ) - - return share_data__0, multiply_0 diff --git a/paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_8/graph_hash.txt b/paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_8/graph_hash.txt deleted file mode 100644 index 88f716dff..000000000 --- a/paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_8/graph_hash.txt +++ /dev/null @@ -1 +0,0 @@ -839bed95f06a549ca0a6c49aa3c1a018fbd7c4f0023cedf35760437922761076 \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_8/graph_net.json b/paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_8/graph_net.json deleted file mode 100644 index d8719c2c9..000000000 --- a/paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_8/graph_net.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "framework": "paddle", - "model_name": "PP-YOLOE-S_vehicle", - "num_devices_required": 1, - "num_nodes_required": 1 -} \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_8/model.py b/paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_8/model.py deleted file mode 100644 index 301a09c22..000000000 --- a/paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_8/model.py +++ /dev/null @@ -1,34 +0,0 @@ -import paddle - - -class GraphModule(paddle.nn.Layer): - def __init__(self): - super().__init__() - - def forward(self, data_0): - # pd_op.full: (xi32) <- () - full_0 = paddle._C_ops.full( - [], float("4"), paddle.int32, paddle.framework._current_expected_place() - ) - - # pd_op.not_equal: (2x-1xb) <- (2x-1xi32, xi32) - not_equal_0 = paddle._C_ops.not_equal(data_0, full_0) - del data_0, full_0 - - # pd_op.full_int_array: (0xi64) <- () - full_int_array_0 = [] - - # pd_op.sum: (xi64) <- (2x-1xb, 0xi64) - sum_0 = paddle._C_ops.sum(not_equal_0, full_int_array_0, None, False) - del full_int_array_0 - - # pd_op.full: (xi64) <- () - full_1 = paddle._C_ops.full( - [], float("0"), paddle.int64, paddle.framework._current_expected_place() - ) - - # pd_op.greater_than: (xb) <- (xi64, xi64) - greater_than_0 = paddle._C_ops.greater_than(sum_0, full_1) - del full_1, not_equal_0, sum_0 - - return greater_than_0 diff --git a/paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_9/graph_hash.txt b/paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_9/graph_hash.txt deleted file mode 100644 index 27bd82e0e..000000000 --- a/paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_9/graph_hash.txt +++ /dev/null @@ -1 +0,0 @@ -8d319f5ec2a187a0cbeb6b629bf54f2df054e934514d4a5042fa9c577597355f \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_9/graph_net.json b/paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_9/graph_net.json deleted file mode 100644 index d8719c2c9..000000000 --- a/paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_9/graph_net.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "framework": "paddle", - "model_name": "PP-YOLOE-S_vehicle", - "num_devices_required": 1, - "num_nodes_required": 1 -} \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_9/model.py b/paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_9/model.py deleted file mode 100644 index 5db76f83d..000000000 --- a/paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_9/model.py +++ /dev/null @@ -1,509 +0,0 @@ -import paddle - - -class GraphModule(paddle.nn.Layer): - def __init__(self): - super().__init__() - - def forward(self, data_0, data_1, data_2, data_3, data_4, data_5, data_6): - # pd_op.cast: (2x-1xi32) <- (2x-1xb) - cast_0 = paddle._C_ops.cast(data_0, paddle.int32) - - # pd_op.full_int_array: (1xi64) <- () - full_int_array_0 = [-1] - - # pd_op.assign: (1xi64) <- (1xi64) - assign_0 = full_int_array_0 - - # pd_op.assign: (1xi64) <- (1xi64) - assign_1 = full_int_array_0 - - # pd_op.assign: (1xi64) <- (1xi64) - assign_2 = full_int_array_0 - - # pd_op.unsqueeze: (2x-1x1xi32) <- (2x-1xi32, 1xi64) - unsqueeze_0 = paddle._C_ops.unsqueeze(cast_0, full_int_array_0) - del cast_0 - - # pd_op.full_int_array: (3xi64) <- () - full_int_array_1 = [1, 1, 4] - - # pd_op.tile: (2x-1x4xi32) <- (2x-1x1xi32, 3xi64) - tile_0 = paddle._C_ops.tile(unsqueeze_0, full_int_array_1) - del full_int_array_1, unsqueeze_0 - - # pd_op.cast: (2x-1x4xb) <- (2x-1x4xi32) - cast_1 = paddle._C_ops.cast(tile_0, paddle.bool) - del tile_0 - - # pd_op.masked_select: (-1xf32) <- (2x-1x4xf32, 2x-1x4xb) - masked_select_0 = paddle._C_ops.masked_select(data_1, cast_1) - del data_1 - - # pd_op.full_int_array: (2xi64) <- () - full_int_array_2 = [-1, 4] - - # pd_op.reshape: (-1x4xf32) <- (-1xf32, 2xi64) - reshape_0 = paddle._C_ops.reshape(masked_select_0, full_int_array_2) - - # pd_op.masked_select: (-1xf32) <- (2x-1x4xf32, 2x-1x4xb) - masked_select_1 = paddle._C_ops.masked_select(data_2, cast_1) - - # pd_op.reshape: (-1x4xf32) <- (-1xf32, 2xi64) - reshape_1 = paddle._C_ops.reshape(masked_select_1, full_int_array_2) - del masked_select_1 - - # pd_op.sum: (2x-1xf32) <- (2x-1x4xf32, 1xi64) - sum_0 = paddle._C_ops.sum(data_3, full_int_array_0, None, False) - del data_3 - - # pd_op.masked_select: (-1xf32) <- (2x-1xf32, 2x-1xb) - masked_select_2 = paddle._C_ops.masked_select(sum_0, data_0) - del sum_0 - - # pd_op.unsqueeze: (-1x1xf32) <- (-1xf32, 1xi64) - unsqueeze_1 = paddle._C_ops.unsqueeze(masked_select_2, full_int_array_0) - del masked_select_2 - - # pd_op.subtract: (-1x4xf32) <- (-1x4xf32, -1x4xf32) - subtract_0 = paddle._C_ops.subtract(reshape_0, reshape_1) - - # pd_op.abs: (-1x4xf32) <- (-1x4xf32) - abs_0 = paddle._C_ops.abs(subtract_0) - - # pd_op.mean_all: (xf32) <- (-1x4xf32) - mean_all_0 = paddle._C_ops.mean_all(abs_0) - - # pd_op.full: (1xi32) <- () - full_0 = paddle._C_ops.full( - [1], float("1"), paddle.int32, paddle.core.CPUPlace() - ) - - # pd_op.split_with_num: ([-1x1xf32, -1x1xf32, -1x1xf32, -1x1xf32]) <- (-1x4xf32, 1xi32) - split_with_num_0 = paddle._C_ops.split_with_num(reshape_0, 4, full_0) - - # builtin.split: (-1x1xf32, -1x1xf32, -1x1xf32, -1x1xf32) <- ([-1x1xf32, -1x1xf32, -1x1xf32, -1x1xf32]) - ( - split_0, - split_1, - split_2, - split_3, - ) = split_with_num_0 - del split_with_num_0 - - # pd_op.split_with_num: ([-1x1xf32, -1x1xf32, -1x1xf32, -1x1xf32]) <- (-1x4xf32, 1xi32) - split_with_num_1 = paddle._C_ops.split_with_num(reshape_1, 4, full_0) - - # builtin.split: (-1x1xf32, -1x1xf32, -1x1xf32, -1x1xf32) <- ([-1x1xf32, -1x1xf32, -1x1xf32, -1x1xf32]) - ( - split_4, - split_5, - split_6, - split_7, - ) = split_with_num_1 - del split_with_num_1 - - # pd_op.maximum: (-1x1xf32) <- (-1x1xf32, -1x1xf32) - maximum_0 = paddle._C_ops.maximum(split_0, split_4) - - # pd_op.maximum: (-1x1xf32) <- (-1x1xf32, -1x1xf32) - maximum_1 = paddle._C_ops.maximum(split_1, split_5) - - # pd_op.minimum: (-1x1xf32) <- (-1x1xf32, -1x1xf32) - minimum_0 = paddle._C_ops.minimum(split_2, split_6) - - # pd_op.minimum: (-1x1xf32) <- (-1x1xf32, -1x1xf32) - minimum_1 = paddle._C_ops.minimum(split_3, split_7) - - # pd_op.subtract: (-1x1xf32) <- (-1x1xf32, -1x1xf32) - subtract_1 = paddle._C_ops.subtract(minimum_0, maximum_0) - - # pd_op.full: (1xf32) <- () - full_1 = paddle._C_ops.full( - [1], float("0"), paddle.float32, paddle.core.CPUPlace() - ) - - # pd_op.assign: (1xf32) <- (1xf32) - assign_3 = full_1 - - # pd_op.full: (1xf32) <- () - full_2 = paddle._C_ops.full( - [1], float("3.40282e+38"), paddle.float32, paddle.core.CPUPlace() - ) - - # pd_op.assign: (1xf32) <- (1xf32) - assign_4 = full_2 - - # pd_op.clip: (-1x1xf32) <- (-1x1xf32, 1xf32, 1xf32) - clip_0 = paddle._C_ops.clip(subtract_1, full_1, full_2) - - # pd_op.subtract: (-1x1xf32) <- (-1x1xf32, -1x1xf32) - subtract_2 = paddle._C_ops.subtract(minimum_1, maximum_1) - - # pd_op.clip: (-1x1xf32) <- (-1x1xf32, 1xf32, 1xf32) - clip_1 = paddle._C_ops.clip(subtract_2, full_1, full_2) - - # pd_op.multiply: (-1x1xf32) <- (-1x1xf32, -1x1xf32) - multiply_0 = paddle._C_ops.multiply(clip_0, clip_1) - - # pd_op.subtract: (-1x1xf32) <- (-1x1xf32, -1x1xf32) - subtract_3 = paddle._C_ops.subtract(split_2, split_0) - - # pd_op.subtract: (-1x1xf32) <- (-1x1xf32, -1x1xf32) - subtract_4 = paddle._C_ops.subtract(split_3, split_1) - - # pd_op.multiply: (-1x1xf32) <- (-1x1xf32, -1x1xf32) - multiply_1 = paddle._C_ops.multiply(subtract_3, subtract_4) - - # pd_op.subtract: (-1x1xf32) <- (-1x1xf32, -1x1xf32) - subtract_5 = paddle._C_ops.subtract(split_6, split_4) - - # pd_op.subtract: (-1x1xf32) <- (-1x1xf32, -1x1xf32) - subtract_6 = paddle._C_ops.subtract(split_7, split_5) - - # pd_op.multiply: (-1x1xf32) <- (-1x1xf32, -1x1xf32) - multiply_2 = paddle._C_ops.multiply(subtract_5, subtract_6) - del subtract_5, subtract_6 - - # pd_op.add: (-1x1xf32) <- (-1x1xf32, -1x1xf32) - add_0 = paddle._C_ops.add(multiply_1, multiply_2) - - # pd_op.subtract: (-1x1xf32) <- (-1x1xf32, -1x1xf32) - subtract_7 = paddle._C_ops.subtract(add_0, multiply_0) - - # pd_op.full: (1xf32) <- () - full_3 = paddle._C_ops.full( - [1], float("1"), paddle.float32, paddle.core.CPUPlace() - ) - - # pd_op.assign: (1xf32) <- (1xf32) - assign_5 = full_3 - - # pd_op.assign: (1xf32) <- (1xf32) - assign_6 = full_3 - - # pd_op.scale: (-1x1xf32) <- (-1x1xf32, 1xf32) - scale_0 = paddle._C_ops.scale(subtract_7, full_3, float("1e-10"), True) - del subtract_7 - - # pd_op.divide: (-1x1xf32) <- (-1x1xf32, -1x1xf32) - divide_2 = paddle._C_ops.divide(multiply_0, scale_0) - - # pd_op.minimum: (-1x1xf32) <- (-1x1xf32, -1x1xf32) - minimum_2 = paddle._C_ops.minimum(split_0, split_4) - - # pd_op.minimum: (-1x1xf32) <- (-1x1xf32, -1x1xf32) - minimum_3 = paddle._C_ops.minimum(split_1, split_5) - - # pd_op.maximum: (-1x1xf32) <- (-1x1xf32, -1x1xf32) - maximum_2 = paddle._C_ops.maximum(split_2, split_6) - - # pd_op.maximum: (-1x1xf32) <- (-1x1xf32, -1x1xf32) - maximum_3 = paddle._C_ops.maximum(split_3, split_7) - - # pd_op.subtract: (-1x1xf32) <- (-1x1xf32, -1x1xf32) - subtract_8 = paddle._C_ops.subtract(maximum_2, minimum_2) - - # pd_op.subtract: (-1x1xf32) <- (-1x1xf32, -1x1xf32) - subtract_9 = paddle._C_ops.subtract(maximum_3, minimum_3) - - # pd_op.multiply: (-1x1xf32) <- (-1x1xf32, -1x1xf32) - multiply_3 = paddle._C_ops.multiply(subtract_8, subtract_9) - - # pd_op.scale: (-1x1xf32) <- (-1x1xf32, 1xf32) - scale_1 = paddle._C_ops.scale(multiply_3, full_3, float("1e-10"), True) - del multiply_3 - - # pd_op.subtract: (-1x1xf32) <- (-1x1xf32, -1x1xf32) - subtract_10 = paddle._C_ops.subtract(scale_1, scale_0) - - # pd_op.divide: (-1x1xf32) <- (-1x1xf32, -1x1xf32) - divide_3 = paddle._C_ops.divide(subtract_10, scale_1) - - # pd_op.subtract: (-1x1xf32) <- (-1x1xf32, -1x1xf32) - subtract_11 = paddle._C_ops.subtract(divide_2, divide_3) - - # pd_op.full: (1xf32) <- () - full_4 = paddle._C_ops.full( - [1], float("-1"), paddle.float32, paddle.core.CPUPlace() - ) - - # pd_op.scale: (-1x1xf32) <- (-1x1xf32, 1xf32) - scale_2 = paddle._C_ops.scale(subtract_11, full_4, float("1"), True) - del subtract_11 - - # pd_op.scale: (-1x1xf32) <- (-1x1xf32, 1xf32) - scale_3 = paddle._C_ops.scale(scale_2, full_3, float("0"), True) - del scale_2 - - # pd_op.multiply: (-1x1xf32) <- (-1x1xf32, -1x1xf32) - multiply_4 = paddle._C_ops.multiply(scale_3, unsqueeze_1) - - # pd_op.full_int_array: (0xi64) <- () - full_int_array_3 = [] - - # pd_op.assign: (0xi64) <- (0xi64) - assign_7 = full_int_array_3 - - # pd_op.sum: (xf32) <- (-1x1xf32, 0xi64) - sum_1 = paddle._C_ops.sum(multiply_4, full_int_array_3, None, False) - - # pd_op.divide: (xf32) <- (xf32, xf32) - divide_0 = paddle._C_ops.divide(sum_1, data_4) - - # pd_op.unsqueeze: (2x-1x1xb) <- (2x-1xb, 1xi64) - unsqueeze_2 = paddle._C_ops.unsqueeze(data_0, full_int_array_0) - del data_0 - - # pd_op.cast: (2x-1x1xi32) <- (2x-1x1xb) - cast_2 = paddle._C_ops.cast(unsqueeze_2, paddle.int32) - del unsqueeze_2 - - # pd_op.full_int_array: (3xi64) <- () - full_int_array_4 = [1, 1, 68] - - # pd_op.tile: (2x-1x68xi32) <- (2x-1x1xi32, 3xi64) - tile_1 = paddle._C_ops.tile(cast_2, full_int_array_4) - del cast_2, full_int_array_4 - - # pd_op.cast: (2x-1x68xb) <- (2x-1x68xi32) - cast_3 = paddle._C_ops.cast(tile_1, paddle.bool) - del tile_1 - - # pd_op.masked_select: (-1xf32) <- (2x-1x68xf32, 2x-1x68xb) - masked_select_3 = paddle._C_ops.masked_select(data_5, cast_3) - del data_5 - - # pd_op.full_int_array: (3xi64) <- () - full_int_array_5 = [-1, 4, 17] - - # pd_op.reshape: (-1x4x17xf32) <- (-1xf32, 3xi64) - reshape_2 = paddle._C_ops.reshape(masked_select_3, full_int_array_5) - del full_int_array_5 - - # pd_op.full: (1xi32) <- () - full_5 = paddle._C_ops.full( - [1], float("2"), paddle.int32, paddle.core.CPUPlace() - ) - - # pd_op.split_with_num: ([2x-1x2xf32, 2x-1x2xf32]) <- (2x-1x4xf32, 1xi32) - split_with_num_2 = paddle._C_ops.split_with_num(data_2, 2, full_5) - del data_2, full_5 - - # builtin.split: (2x-1x2xf32, 2x-1x2xf32) <- ([2x-1x2xf32, 2x-1x2xf32]) - ( - split_8, - split_9, - ) = split_with_num_2 - del split_with_num_2 - - # pd_op.subtract: (2x-1x2xf32) <- (-1x2xf32, 2x-1x2xf32) - subtract_12 = paddle._C_ops.subtract(data_6, split_8) - del split_8 - - # pd_op.subtract: (2x-1x2xf32) <- (2x-1x2xf32, -1x2xf32) - subtract_13 = paddle._C_ops.subtract(split_9, data_6) - del data_6, split_9 - - # pd_op.full: (1xi32) <- () - full_6 = paddle._C_ops.full( - [1], float("-1"), paddle.int32, paddle.core.CPUPlace() - ) - - # builtin.combine: ([2x-1x2xf32, 2x-1x2xf32]) <- (2x-1x2xf32, 2x-1x2xf32) - combine_0 = [subtract_12, subtract_13] - del subtract_12, subtract_13 - - # pd_op.concat: (2x-1x4xf32) <- ([2x-1x2xf32, 2x-1x2xf32], 1xi32) - concat_0 = paddle._C_ops.concat(combine_0, full_6) - del combine_0, full_6 - - # pd_op.full: (1xf32) <- () - full_7 = paddle._C_ops.full( - [1], float("15.99"), paddle.float32, paddle.core.CPUPlace() - ) - - # pd_op.clip: (2x-1x4xf32) <- (2x-1x4xf32, 1xf32, 1xf32) - clip_2 = paddle._C_ops.clip(concat_0, full_1, full_7) - del concat_0, full_7 - - # pd_op.masked_select: (-1xf32) <- (2x-1x4xf32, 2x-1x4xb) - masked_select_4 = paddle._C_ops.masked_select(clip_2, cast_1) - del clip_2 - - # pd_op.reshape: (-1x4xf32) <- (-1xf32, 2xi64) - reshape_3 = paddle._C_ops.reshape(masked_select_4, full_int_array_2) - del full_int_array_2, masked_select_4 - - # pd_op.floor: (-1x4xf32) <- (-1x4xf32) - floor_0 = paddle._C_ops.floor(reshape_3) - - # pd_op.cast: (-1x4xi64) <- (-1x4xf32) - cast_4 = paddle._C_ops.cast(floor_0, paddle.int64) - del floor_0 - - # pd_op.scale: (-1x4xi64) <- (-1x4xi64, 1xf32) - scale_4 = paddle._C_ops.scale(cast_4, full_3, float("1"), True) - - # pd_op.cast: (-1x4xf32) <- (-1x4xi64) - cast_5 = paddle._C_ops.cast(scale_4, paddle.float32) - - # pd_op.subtract: (-1x4xf32) <- (-1x4xf32, -1x4xf32) - subtract_14 = paddle._C_ops.subtract(cast_5, reshape_3) - del cast_5, reshape_3 - - # pd_op.scale: (-1x4xf32) <- (-1x4xf32, 1xf32) - scale_5 = paddle._C_ops.scale(subtract_14, full_4, float("1"), True) - - # pd_op.scale: (-1x4xi64) <- (-1x4xi64, 1xf32) - scale_6 = paddle._C_ops.scale(cast_4, full_3, float("0"), True) - del cast_4 - - # pd_op.unsqueeze: (-1x4x1xi64) <- (-1x4xi64, 1xi64) - unsqueeze_3 = paddle._C_ops.unsqueeze(scale_6, full_int_array_0) - del scale_6 - - # pd_op.cross_entropy_with_softmax: (-1x4x17xf32, -1x4x1xf32) <- (-1x4x17xf32, -1x4x1xi64) - cross_entropy_with_softmax_0, cross_entropy_with_softmax_2 = ( - lambda x, f: f(x) - )( - paddle._C_ops.cross_entropy_with_softmax( - reshape_2, unsqueeze_3, False, True, True, -100, -1 - ), - lambda out: out if isinstance(out, (list, tuple)) else (out, None), - ) - - # pd_op.squeeze: (-1x4xf32) <- (-1x4x1xf32, 1xi64) - squeeze_0 = paddle._C_ops.squeeze( - cross_entropy_with_softmax_2, full_int_array_0 - ) - - # pd_op.multiply: (-1x4xf32) <- (-1x4xf32, -1x4xf32) - multiply_5 = paddle._C_ops.multiply(squeeze_0, subtract_14) - - # pd_op.scale: (-1x4xi64) <- (-1x4xi64, 1xf32) - scale_7 = paddle._C_ops.scale(scale_4, full_3, float("0"), True) - del scale_4 - - # pd_op.unsqueeze: (-1x4x1xi64) <- (-1x4xi64, 1xi64) - unsqueeze_4 = paddle._C_ops.unsqueeze(scale_7, full_int_array_0) - del scale_7 - - # pd_op.cross_entropy_with_softmax: (-1x4x17xf32, -1x4x1xf32) <- (-1x4x17xf32, -1x4x1xi64) - cross_entropy_with_softmax_1, cross_entropy_with_softmax_3 = ( - lambda x, f: f(x) - )( - paddle._C_ops.cross_entropy_with_softmax( - reshape_2, unsqueeze_4, False, True, True, -100, -1 - ), - lambda out: out if isinstance(out, (list, tuple)) else (out, None), - ) - del reshape_2 - - # pd_op.squeeze: (-1x4xf32) <- (-1x4x1xf32, 1xi64) - squeeze_1 = paddle._C_ops.squeeze( - cross_entropy_with_softmax_3, full_int_array_0 - ) - - # pd_op.multiply: (-1x4xf32) <- (-1x4xf32, -1x4xf32) - multiply_6 = paddle._C_ops.multiply(squeeze_1, scale_5) - - # pd_op.add: (-1x4xf32) <- (-1x4xf32, -1x4xf32) - add_1 = paddle._C_ops.add(multiply_5, multiply_6) - - # pd_op.mean: (-1x1xf32) <- (-1x4xf32, 1xi64) - mean_0 = paddle._C_ops.mean(add_1, full_int_array_0, True) - del full_int_array_0 - - # pd_op.multiply: (-1x1xf32) <- (-1x1xf32, -1x1xf32) - multiply_7 = paddle._C_ops.multiply(mean_0, unsqueeze_1) - - # pd_op.sum: (xf32) <- (-1x1xf32, 0xi64) - sum_2 = paddle._C_ops.sum(multiply_7, full_int_array_3, None, False) - - # pd_op.divide: (xf32) <- (xf32, xf32) - divide_1 = paddle._C_ops.divide(sum_2, data_4) - del ( - abs_0, - add_0, - add_1, - assign_0, - assign_1, - assign_2, - assign_3, - assign_4, - assign_5, - assign_6, - assign_7, - cast_1, - cast_3, - clip_0, - clip_1, - cross_entropy_with_softmax_2, - cross_entropy_with_softmax_3, - data_4, - divide_2, - divide_3, - full_0, - full_1, - full_2, - full_3, - full_4, - full_int_array_3, - masked_select_0, - masked_select_3, - maximum_0, - maximum_1, - maximum_2, - maximum_3, - mean_0, - minimum_0, - minimum_1, - minimum_2, - minimum_3, - multiply_0, - multiply_1, - multiply_2, - multiply_4, - multiply_5, - multiply_6, - multiply_7, - reshape_0, - reshape_1, - scale_0, - scale_1, - scale_3, - scale_5, - split_0, - split_1, - split_2, - split_3, - split_4, - split_5, - split_6, - split_7, - squeeze_0, - squeeze_1, - subtract_0, - subtract_1, - subtract_10, - subtract_14, - subtract_2, - subtract_3, - subtract_4, - subtract_8, - subtract_9, - sum_1, - sum_2, - unsqueeze_1, - unsqueeze_3, - unsqueeze_4, - ) - - return ( - cross_entropy_with_softmax_0, - cross_entropy_with_softmax_1, - mean_all_0, - divide_0, - divide_1, - ) diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus-L/subgraph_0/graph_hash.txt b/paddle_samples/PaddleX/PP-YOLOE_plus-L/subgraph_0/graph_hash.txt index 21b305551..04a94e06c 100644 --- a/paddle_samples/PaddleX/PP-YOLOE_plus-L/subgraph_0/graph_hash.txt +++ b/paddle_samples/PaddleX/PP-YOLOE_plus-L/subgraph_0/graph_hash.txt @@ -1 +1 @@ -700c99cae481b4de7b4ae0500e225ae03e4708238020ea1d75b3fa409c1ef3e9 \ No newline at end of file +bad9511933297116e3f564919a85809c5c29647d857a25c4a19f4a67e2c0b51c \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus-L/subgraph_0/input_meta.py b/paddle_samples/PaddleX/PP-YOLOE_plus-L/subgraph_0/input_meta.py index fd9b56629..09671b32f 100644 --- a/paddle_samples/PaddleX/PP-YOLOE_plus-L/subgraph_0/input_meta.py +++ b/paddle_samples/PaddleX/PP-YOLOE_plus-L/subgraph_0/input_meta.py @@ -1,27 +1,134 @@ class Program_weight_tensor_data_0: name = "data_0" - shape = [8, 4116, 4] + shape = [1] dtype = "float32" - min_val = float("0.01") - max_val = float("0.01") - mean = float("0.01") - data = None + data = [1.00241] class Program_weight_tensor_data_1: name = "data_1" - shape = [8, 4116] - dtype = "int32" - min_val = 0 - max_val = 4 - data = None + shape = [1] + dtype = "float32" + data = [1.00237] class Program_weight_tensor_data_2: name = "data_2" - shape = [8, 4116, 4] + shape = [1] + dtype = "float32" + data = [1.00237] + + +class Program_weight_tensor_data_3: + name = "data_3" + shape = [1] + dtype = "float32" + data = [1.00237] + + +class Program_weight_tensor_data_4: + name = "data_4" + shape = [1] + dtype = "float32" + data = [1.00237] + + +class Program_weight_tensor_data_5: + name = "data_5" + shape = [1] + dtype = "float32" + data = [1.00237] + + +class Program_weight_tensor_data_6: + name = "data_6" + shape = [1] + dtype = "float32" + data = [1.00236] + + +class Program_weight_tensor_data_7: + name = "data_7" + shape = [1] + dtype = "float32" + data = [1.00237] + + +class Program_weight_tensor_data_8: + name = "data_8" + shape = [1] + dtype = "float32" + data = [1.00237] + + +class Program_weight_tensor_data_9: + name = "data_9" + shape = [1] + dtype = "float32" + data = [1.00237] + + +class Program_weight_tensor_data_10: + name = "data_10" + shape = [1] + dtype = "float32" + data = [1.00237] + + +class Program_weight_tensor_data_11: + name = "data_11" + shape = [1] + dtype = "float32" + data = [1.00237] + + +class Program_weight_tensor_data_12: + name = "data_12" + shape = [1] + dtype = "float32" + data = [1.00237] + + +class Program_weight_tensor_data_13: + name = "data_13" + shape = [1] + dtype = "float32" + data = [1.00237] + + +class Program_weight_tensor_data_14: + name = "data_14" + shape = [1] + dtype = "float32" + data = [1.00237] + + +class Program_weight_tensor_data_15: + name = "data_15" + shape = [1] + dtype = "float32" + data = [1.00237] + + +class Program_weight_tensor_data_16: + name = "data_16" + shape = [1] + dtype = "float32" + data = [1.00237] + + +class Program_weight_tensor_data_17: + name = "data_17" + shape = [1] + dtype = "float32" + data = [1.00237] + + +class Program_weight_tensor_data_18: + name = "data_18" + shape = [2, 3, 640, 640] dtype = "float32" - max_val = float("0.947339") - mean = float("0.000280391") - std = float("0.0157366") + max_val = float("1.0") + mean = float("0.471598") + std = float("0.270715") data = None diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus-L/subgraph_0/model.py b/paddle_samples/PaddleX/PP-YOLOE_plus-L/subgraph_0/model.py index a899ea187..1eb1d0609 100644 --- a/paddle_samples/PaddleX/PP-YOLOE_plus-L/subgraph_0/model.py +++ b/paddle_samples/PaddleX/PP-YOLOE_plus-L/subgraph_0/model.py @@ -5,106 +5,7275 @@ class GraphModule(paddle.nn.Layer): def __init__(self): super().__init__() - def forward(self, data_0, data_1, data_2): + def forward( + self, + parameter_0, + parameter_1, + parameter_2, + parameter_3, + parameter_4, + parameter_5, + parameter_6, + parameter_7, + parameter_8, + parameter_9, + parameter_10, + parameter_11, + parameter_12, + parameter_13, + parameter_14, + parameter_15, + parameter_16, + parameter_17, + parameter_18, + parameter_19, + parameter_20, + parameter_21, + parameter_22, + parameter_23, + parameter_24, + parameter_25, + parameter_26, + parameter_27, + parameter_28, + parameter_29, + parameter_30, + parameter_31, + parameter_32, + parameter_33, + parameter_34, + parameter_35, + parameter_36, + parameter_37, + parameter_38, + parameter_39, + parameter_40, + parameter_41, + parameter_42, + parameter_43, + parameter_44, + parameter_45, + parameter_46, + parameter_47, + parameter_48, + parameter_49, + parameter_50, + parameter_51, + parameter_52, + parameter_53, + parameter_54, + parameter_55, + parameter_56, + parameter_57, + parameter_58, + parameter_59, + parameter_60, + parameter_61, + parameter_62, + parameter_63, + parameter_64, + parameter_65, + parameter_66, + parameter_67, + parameter_68, + parameter_69, + parameter_70, + parameter_71, + parameter_72, + parameter_73, + parameter_74, + parameter_75, + parameter_76, + parameter_77, + parameter_78, + parameter_79, + parameter_80, + parameter_81, + parameter_82, + parameter_83, + parameter_84, + parameter_85, + parameter_86, + parameter_87, + parameter_88, + parameter_89, + parameter_90, + parameter_91, + parameter_92, + parameter_93, + parameter_94, + parameter_95, + parameter_96, + parameter_97, + parameter_98, + parameter_99, + parameter_100, + parameter_101, + parameter_102, + parameter_103, + parameter_104, + parameter_105, + parameter_106, + parameter_107, + parameter_108, + parameter_109, + parameter_110, + parameter_111, + parameter_112, + parameter_113, + parameter_114, + parameter_115, + parameter_116, + parameter_117, + parameter_118, + parameter_119, + parameter_120, + parameter_121, + parameter_122, + parameter_123, + parameter_124, + parameter_125, + parameter_126, + parameter_127, + parameter_128, + parameter_129, + parameter_130, + parameter_131, + parameter_132, + parameter_133, + parameter_134, + parameter_135, + parameter_136, + parameter_137, + parameter_138, + parameter_139, + parameter_140, + parameter_141, + parameter_142, + parameter_143, + parameter_144, + parameter_145, + parameter_146, + parameter_147, + parameter_148, + parameter_149, + parameter_150, + parameter_151, + parameter_152, + parameter_153, + parameter_154, + parameter_155, + parameter_156, + parameter_157, + parameter_158, + parameter_159, + parameter_160, + parameter_161, + parameter_162, + parameter_163, + parameter_164, + parameter_165, + parameter_166, + parameter_167, + parameter_168, + parameter_169, + parameter_170, + parameter_171, + parameter_172, + parameter_173, + parameter_174, + parameter_175, + parameter_176, + parameter_177, + parameter_178, + parameter_179, + parameter_180, + parameter_181, + parameter_182, + parameter_183, + parameter_184, + parameter_185, + parameter_186, + parameter_187, + parameter_188, + parameter_189, + parameter_190, + parameter_191, + parameter_192, + parameter_193, + parameter_194, + parameter_195, + parameter_196, + parameter_197, + parameter_198, + parameter_199, + parameter_200, + parameter_201, + parameter_202, + parameter_203, + parameter_204, + parameter_205, + parameter_206, + parameter_207, + parameter_208, + parameter_209, + parameter_210, + parameter_211, + parameter_212, + parameter_213, + parameter_214, + parameter_215, + parameter_216, + parameter_217, + parameter_218, + parameter_219, + parameter_220, + parameter_221, + parameter_222, + parameter_223, + parameter_224, + parameter_225, + parameter_226, + parameter_227, + parameter_228, + parameter_229, + parameter_230, + parameter_231, + parameter_232, + parameter_233, + parameter_234, + parameter_235, + parameter_236, + parameter_237, + parameter_238, + parameter_239, + parameter_240, + parameter_241, + parameter_242, + parameter_243, + parameter_244, + parameter_245, + parameter_246, + parameter_247, + parameter_248, + parameter_249, + parameter_250, + parameter_251, + parameter_252, + parameter_253, + parameter_254, + parameter_255, + parameter_256, + parameter_257, + parameter_258, + parameter_259, + parameter_260, + parameter_261, + parameter_262, + parameter_263, + parameter_264, + parameter_265, + parameter_266, + parameter_267, + parameter_268, + parameter_269, + parameter_270, + parameter_271, + parameter_272, + parameter_273, + parameter_274, + parameter_275, + parameter_276, + parameter_277, + parameter_278, + parameter_279, + parameter_280, + parameter_281, + parameter_282, + parameter_283, + parameter_284, + parameter_285, + parameter_286, + parameter_287, + parameter_288, + parameter_289, + parameter_290, + parameter_291, + parameter_292, + parameter_293, + parameter_294, + parameter_295, + parameter_296, + parameter_297, + parameter_298, + parameter_299, + parameter_300, + parameter_301, + parameter_302, + parameter_303, + parameter_304, + parameter_305, + parameter_306, + parameter_307, + parameter_308, + parameter_309, + parameter_310, + parameter_311, + parameter_312, + parameter_313, + parameter_314, + parameter_315, + parameter_316, + parameter_317, + parameter_318, + parameter_319, + parameter_320, + parameter_321, + parameter_322, + parameter_323, + parameter_324, + parameter_325, + parameter_326, + parameter_327, + parameter_328, + parameter_329, + parameter_330, + parameter_331, + parameter_332, + parameter_333, + parameter_334, + parameter_335, + parameter_336, + parameter_337, + parameter_338, + parameter_339, + parameter_340, + parameter_341, + parameter_342, + parameter_343, + parameter_344, + parameter_345, + parameter_346, + parameter_347, + parameter_348, + parameter_349, + parameter_350, + parameter_351, + parameter_352, + parameter_353, + parameter_354, + parameter_355, + parameter_356, + parameter_357, + parameter_358, + parameter_359, + parameter_360, + parameter_361, + parameter_362, + parameter_363, + parameter_364, + parameter_365, + parameter_366, + parameter_367, + parameter_368, + parameter_369, + parameter_370, + parameter_371, + parameter_372, + parameter_373, + parameter_374, + parameter_375, + parameter_376, + parameter_377, + parameter_378, + parameter_379, + parameter_380, + parameter_381, + parameter_382, + parameter_383, + parameter_384, + parameter_385, + parameter_386, + parameter_387, + parameter_388, + parameter_389, + parameter_390, + parameter_391, + parameter_392, + parameter_393, + parameter_394, + parameter_395, + parameter_396, + parameter_397, + parameter_398, + parameter_399, + parameter_400, + parameter_401, + parameter_402, + parameter_403, + parameter_404, + parameter_405, + parameter_406, + parameter_407, + parameter_408, + parameter_409, + parameter_410, + parameter_411, + parameter_412, + parameter_413, + parameter_414, + parameter_415, + parameter_416, + parameter_417, + parameter_418, + parameter_419, + parameter_420, + parameter_421, + parameter_422, + parameter_423, + parameter_424, + parameter_425, + parameter_426, + parameter_427, + parameter_428, + parameter_429, + parameter_430, + parameter_431, + parameter_432, + parameter_433, + parameter_434, + parameter_435, + parameter_436, + parameter_437, + parameter_438, + parameter_439, + parameter_440, + parameter_441, + parameter_442, + parameter_443, + parameter_444, + parameter_445, + parameter_446, + parameter_447, + parameter_448, + parameter_449, + parameter_450, + parameter_451, + parameter_452, + parameter_453, + parameter_454, + parameter_455, + parameter_456, + parameter_457, + parameter_458, + parameter_459, + parameter_460, + parameter_461, + parameter_462, + parameter_463, + parameter_464, + parameter_465, + parameter_466, + parameter_467, + parameter_468, + parameter_469, + parameter_470, + parameter_471, + parameter_472, + parameter_473, + parameter_474, + parameter_475, + parameter_476, + parameter_477, + parameter_478, + parameter_479, + parameter_480, + parameter_481, + parameter_482, + parameter_483, + parameter_484, + parameter_485, + parameter_486, + parameter_487, + parameter_488, + parameter_489, + parameter_490, + parameter_491, + parameter_492, + parameter_493, + parameter_494, + parameter_495, + parameter_496, + parameter_497, + parameter_498, + parameter_499, + parameter_500, + parameter_501, + parameter_502, + parameter_503, + parameter_504, + parameter_505, + parameter_506, + parameter_507, + parameter_508, + parameter_509, + parameter_510, + parameter_511, + parameter_512, + parameter_513, + parameter_514, + parameter_515, + parameter_516, + parameter_517, + parameter_518, + parameter_519, + parameter_520, + parameter_521, + parameter_522, + parameter_523, + parameter_524, + parameter_525, + parameter_526, + parameter_527, + parameter_528, + parameter_529, + parameter_530, + parameter_531, + parameter_532, + parameter_533, + parameter_534, + parameter_535, + parameter_536, + parameter_537, + parameter_538, + parameter_539, + parameter_540, + parameter_541, + parameter_542, + parameter_543, + parameter_544, + parameter_545, + parameter_546, + parameter_547, + parameter_548, + parameter_549, + parameter_550, + parameter_551, + parameter_552, + parameter_553, + parameter_554, + parameter_555, + parameter_556, + parameter_557, + parameter_558, + parameter_559, + parameter_560, + parameter_561, + parameter_562, + parameter_563, + parameter_564, + parameter_565, + parameter_566, + parameter_567, + parameter_568, + parameter_569, + parameter_570, + parameter_571, + parameter_572, + parameter_573, + parameter_574, + parameter_575, + parameter_576, + parameter_577, + parameter_578, + parameter_579, + parameter_580, + parameter_581, + parameter_582, + parameter_583, + parameter_584, + parameter_585, + parameter_586, + parameter_587, + parameter_588, + parameter_589, + parameter_590, + parameter_591, + parameter_592, + parameter_593, + parameter_594, + parameter_595, + parameter_596, + parameter_597, + parameter_598, + parameter_599, + parameter_600, + parameter_601, + parameter_602, + parameter_603, + parameter_604, + parameter_605, + parameter_606, + parameter_607, + parameter_608, + parameter_609, + parameter_610, + parameter_611, + parameter_612, + parameter_613, + parameter_614, + parameter_615, + parameter_616, + parameter_617, + parameter_618, + parameter_619, + parameter_620, + parameter_621, + parameter_622, + parameter_623, + parameter_624, + parameter_625, + parameter_626, + parameter_627, + parameter_628, + parameter_629, + parameter_630, + parameter_631, + parameter_632, + parameter_633, + parameter_634, + parameter_635, + parameter_636, + parameter_637, + parameter_638, + parameter_639, + parameter_640, + parameter_641, + parameter_642, + parameter_643, + parameter_644, + parameter_645, + parameter_646, + parameter_647, + parameter_648, + parameter_649, + parameter_650, + parameter_651, + parameter_652, + parameter_653, + parameter_654, + parameter_655, + parameter_656, + parameter_657, + parameter_658, + parameter_659, + parameter_660, + parameter_661, + parameter_662, + parameter_663, + parameter_664, + parameter_665, + parameter_666, + parameter_667, + parameter_668, + parameter_669, + parameter_670, + parameter_671, + parameter_672, + parameter_673, + parameter_674, + parameter_675, + parameter_676, + parameter_677, + parameter_678, + parameter_679, + parameter_680, + parameter_681, + parameter_682, + parameter_683, + parameter_684, + parameter_685, + parameter_686, + parameter_687, + parameter_688, + parameter_689, + parameter_690, + parameter_691, + parameter_692, + parameter_693, + parameter_694, + parameter_695, + parameter_696, + parameter_697, + parameter_698, + parameter_699, + parameter_700, + parameter_701, + parameter_702, + parameter_703, + parameter_704, + parameter_705, + parameter_706, + parameter_707, + parameter_708, + parameter_709, + parameter_710, + parameter_711, + parameter_712, + parameter_713, + parameter_714, + parameter_715, + parameter_716, + parameter_717, + parameter_718, + parameter_719, + parameter_720, + parameter_721, + parameter_722, + parameter_723, + parameter_724, + parameter_725, + parameter_726, + parameter_727, + parameter_728, + parameter_729, + parameter_730, + parameter_731, + parameter_732, + parameter_733, + parameter_734, + parameter_735, + parameter_736, + parameter_737, + parameter_738, + parameter_739, + parameter_740, + parameter_741, + parameter_742, + parameter_743, + parameter_744, + parameter_745, + parameter_746, + parameter_747, + parameter_748, + parameter_749, + parameter_750, + parameter_751, + parameter_752, + data_0, + data_1, + data_2, + data_3, + data_4, + data_5, + data_6, + data_7, + data_8, + data_9, + data_10, + data_11, + data_12, + data_13, + data_14, + data_15, + data_16, + data_17, + data_18, + ): + # pd_op.conv2d: (-1x32x-1x-1xf32) <- (-1x3x-1x-1xf32, 32x3x3x3xf32) + conv2d_0 = paddle._C_ops.conv2d( + data_18, parameter_752, [2, 2], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del data_18, parameter_752 + + # pd_op.batch_norm_: (-1x32x-1x-1xf32, 32xf32, 32xf32, 32xf32, 32xf32, -1xui8) <- (-1x32x-1x-1xf32, 32xf32, 32xf32, 32xf32, 32xf32) + ( + batch_norm__0, + batch_norm__1, + batch_norm__2, + batch_norm__3, + batch_norm__4, + batch_norm__5, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_0, + parameter_751, + parameter_750, + parameter_749, + parameter_748, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_0, parameter_748, parameter_749, parameter_750, parameter_751 + + # pd_op.swish: (-1x32x-1x-1xf32) <- (-1x32x-1x-1xf32) + swish_0 = paddle._C_ops.swish(batch_norm__0) + del batch_norm__0 + + # pd_op.conv2d: (-1x32x-1x-1xf32) <- (-1x32x-1x-1xf32, 32x32x3x3xf32) + conv2d_1 = paddle._C_ops.conv2d( + swish_0, parameter_747, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_747, swish_0 + + # pd_op.batch_norm_: (-1x32x-1x-1xf32, 32xf32, 32xf32, 32xf32, 32xf32, -1xui8) <- (-1x32x-1x-1xf32, 32xf32, 32xf32, 32xf32, 32xf32) + ( + batch_norm__6, + batch_norm__7, + batch_norm__8, + batch_norm__9, + batch_norm__10, + batch_norm__11, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_1, + parameter_746, + parameter_745, + parameter_744, + parameter_743, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_1, parameter_743, parameter_744, parameter_745, parameter_746 + + # pd_op.swish: (-1x32x-1x-1xf32) <- (-1x32x-1x-1xf32) + swish_1 = paddle._C_ops.swish(batch_norm__6) + del batch_norm__6 + + # pd_op.conv2d: (-1x64x-1x-1xf32) <- (-1x32x-1x-1xf32, 64x32x3x3xf32) + conv2d_2 = paddle._C_ops.conv2d( + swish_1, parameter_742, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_742, swish_1 + + # pd_op.batch_norm_: (-1x64x-1x-1xf32, 64xf32, 64xf32, 64xf32, 64xf32, -1xui8) <- (-1x64x-1x-1xf32, 64xf32, 64xf32, 64xf32, 64xf32) + ( + batch_norm__12, + batch_norm__13, + batch_norm__14, + batch_norm__15, + batch_norm__16, + batch_norm__17, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_2, + parameter_741, + parameter_740, + parameter_739, + parameter_738, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_2, parameter_738, parameter_739, parameter_740, parameter_741 + + # pd_op.swish: (-1x64x-1x-1xf32) <- (-1x64x-1x-1xf32) + swish_2 = paddle._C_ops.swish(batch_norm__12) + del batch_norm__12 + + # pd_op.conv2d: (-1x96x-1x-1xf32) <- (-1x64x-1x-1xf32, 96x64x3x3xf32) + conv2d_3 = paddle._C_ops.conv2d( + swish_2, parameter_737, [2, 2], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_737, swish_2 + + # pd_op.batch_norm_: (-1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (-1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__18, + batch_norm__19, + batch_norm__20, + batch_norm__21, + batch_norm__22, + batch_norm__23, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_3, + parameter_736, + parameter_735, + parameter_734, + parameter_733, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_3, parameter_733, parameter_734, parameter_735, parameter_736 + + # pd_op.swish: (-1x96x-1x-1xf32) <- (-1x96x-1x-1xf32) + swish_3 = paddle._C_ops.swish(batch_norm__18) + del batch_norm__18 + + # pd_op.conv2d: (-1x48x-1x-1xf32) <- (-1x96x-1x-1xf32, 48x96x1x1xf32) + conv2d_4 = paddle._C_ops.conv2d( + swish_3, parameter_732, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_732 + + # pd_op.batch_norm_: (-1x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32, -1xui8) <- (-1x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32) + ( + batch_norm__24, + batch_norm__25, + batch_norm__26, + batch_norm__27, + batch_norm__28, + batch_norm__29, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_4, + parameter_731, + parameter_730, + parameter_729, + parameter_728, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_4, parameter_728, parameter_729, parameter_730, parameter_731 + + # pd_op.swish: (-1x48x-1x-1xf32) <- (-1x48x-1x-1xf32) + swish_4 = paddle._C_ops.swish(batch_norm__24) + del batch_norm__24 + + # pd_op.conv2d: (-1x48x-1x-1xf32) <- (-1x96x-1x-1xf32, 48x96x1x1xf32) + conv2d_5 = paddle._C_ops.conv2d( + swish_3, parameter_727, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_727, swish_3 + + # pd_op.batch_norm_: (-1x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32, -1xui8) <- (-1x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32) + ( + batch_norm__30, + batch_norm__31, + batch_norm__32, + batch_norm__33, + batch_norm__34, + batch_norm__35, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_5, + parameter_726, + parameter_725, + parameter_724, + parameter_723, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_5, parameter_723, parameter_724, parameter_725, parameter_726 + + # pd_op.swish: (-1x48x-1x-1xf32) <- (-1x48x-1x-1xf32) + swish_5 = paddle._C_ops.swish(batch_norm__30) + del batch_norm__30 + + # pd_op.conv2d: (-1x48x-1x-1xf32) <- (-1x48x-1x-1xf32, 48x48x3x3xf32) + conv2d_6 = paddle._C_ops.conv2d( + swish_5, parameter_722, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_722 + + # pd_op.batch_norm_: (-1x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32, -1xui8) <- (-1x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32) + ( + batch_norm__36, + batch_norm__37, + batch_norm__38, + batch_norm__39, + batch_norm__40, + batch_norm__41, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_6, + parameter_721, + parameter_720, + parameter_719, + parameter_718, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_6, parameter_718, parameter_719, parameter_720, parameter_721 + + # pd_op.swish: (-1x48x-1x-1xf32) <- (-1x48x-1x-1xf32) + swish_6 = paddle._C_ops.swish(batch_norm__36) + del batch_norm__36 + + # pd_op.conv2d: (-1x48x-1x-1xf32) <- (-1x48x-1x-1xf32, 48x48x3x3xf32) + conv2d_7 = paddle._C_ops.conv2d( + swish_6, parameter_717, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_717 + + # pd_op.batch_norm_: (-1x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32, -1xui8) <- (-1x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32) + ( + batch_norm__42, + batch_norm__43, + batch_norm__44, + batch_norm__45, + batch_norm__46, + batch_norm__47, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_7, + parameter_716, + parameter_715, + parameter_714, + parameter_713, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_7, parameter_713, parameter_714, parameter_715, parameter_716 + + # pd_op.conv2d: (-1x48x-1x-1xf32) <- (-1x48x-1x-1xf32, 48x48x1x1xf32) + conv2d_8 = paddle._C_ops.conv2d( + swish_6, parameter_712, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_712, swish_6 + + # pd_op.batch_norm_: (-1x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32, -1xui8) <- (-1x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32) + ( + batch_norm__48, + batch_norm__49, + batch_norm__50, + batch_norm__51, + batch_norm__52, + batch_norm__53, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_8, + parameter_711, + parameter_710, + parameter_709, + parameter_708, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_8, parameter_708, parameter_709, parameter_710, parameter_711 + + # pd_op.multiply: (-1x48x-1x-1xf32) <- (1xf32, -1x48x-1x-1xf32) + multiply_0 = paddle._C_ops.multiply(data_0, batch_norm__48) + del batch_norm__48, data_0 + + # pd_op.add: (-1x48x-1x-1xf32) <- (-1x48x-1x-1xf32, -1x48x-1x-1xf32) + add_0 = paddle._C_ops.add(batch_norm__42, multiply_0) + del batch_norm__42, multiply_0 + + # pd_op.swish: (-1x48x-1x-1xf32) <- (-1x48x-1x-1xf32) + swish_7 = paddle._C_ops.swish(add_0) + del add_0 + + # pd_op.add: (-1x48x-1x-1xf32) <- (-1x48x-1x-1xf32, -1x48x-1x-1xf32) + add_1 = paddle._C_ops.add(swish_5, swish_7) + del swish_5, swish_7 + + # pd_op.conv2d: (-1x48x-1x-1xf32) <- (-1x48x-1x-1xf32, 48x48x3x3xf32) + conv2d_9 = paddle._C_ops.conv2d( + add_1, parameter_707, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_707 + + # pd_op.batch_norm_: (-1x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32, -1xui8) <- (-1x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32) + ( + batch_norm__54, + batch_norm__55, + batch_norm__56, + batch_norm__57, + batch_norm__58, + batch_norm__59, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_9, + parameter_706, + parameter_705, + parameter_704, + parameter_703, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_9, parameter_703, parameter_704, parameter_705, parameter_706 + + # pd_op.swish: (-1x48x-1x-1xf32) <- (-1x48x-1x-1xf32) + swish_8 = paddle._C_ops.swish(batch_norm__54) + del batch_norm__54 + + # pd_op.conv2d: (-1x48x-1x-1xf32) <- (-1x48x-1x-1xf32, 48x48x3x3xf32) + conv2d_10 = paddle._C_ops.conv2d( + swish_8, parameter_702, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_702 + + # pd_op.batch_norm_: (-1x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32, -1xui8) <- (-1x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32) + ( + batch_norm__60, + batch_norm__61, + batch_norm__62, + batch_norm__63, + batch_norm__64, + batch_norm__65, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_10, + parameter_701, + parameter_700, + parameter_699, + parameter_698, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_10, parameter_698, parameter_699, parameter_700, parameter_701 + + # pd_op.conv2d: (-1x48x-1x-1xf32) <- (-1x48x-1x-1xf32, 48x48x1x1xf32) + conv2d_11 = paddle._C_ops.conv2d( + swish_8, parameter_697, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_697, swish_8 + + # pd_op.batch_norm_: (-1x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32, -1xui8) <- (-1x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32) + ( + batch_norm__66, + batch_norm__67, + batch_norm__68, + batch_norm__69, + batch_norm__70, + batch_norm__71, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_11, + parameter_696, + parameter_695, + parameter_694, + parameter_693, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_11, parameter_693, parameter_694, parameter_695, parameter_696 + + # pd_op.multiply: (-1x48x-1x-1xf32) <- (1xf32, -1x48x-1x-1xf32) + multiply_1 = paddle._C_ops.multiply(data_1, batch_norm__66) + del batch_norm__66, data_1 + + # pd_op.add: (-1x48x-1x-1xf32) <- (-1x48x-1x-1xf32, -1x48x-1x-1xf32) + add_2 = paddle._C_ops.add(batch_norm__60, multiply_1) + del batch_norm__60, multiply_1 + + # pd_op.swish: (-1x48x-1x-1xf32) <- (-1x48x-1x-1xf32) + swish_9 = paddle._C_ops.swish(add_2) + del add_2 + + # pd_op.add: (-1x48x-1x-1xf32) <- (-1x48x-1x-1xf32, -1x48x-1x-1xf32) + add_3 = paddle._C_ops.add(add_1, swish_9) + del add_1, swish_9 + + # pd_op.conv2d: (-1x48x-1x-1xf32) <- (-1x48x-1x-1xf32, 48x48x3x3xf32) + conv2d_12 = paddle._C_ops.conv2d( + add_3, parameter_692, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_692 + + # pd_op.batch_norm_: (-1x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32, -1xui8) <- (-1x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32) + ( + batch_norm__72, + batch_norm__73, + batch_norm__74, + batch_norm__75, + batch_norm__76, + batch_norm__77, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_12, + parameter_691, + parameter_690, + parameter_689, + parameter_688, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_12, parameter_688, parameter_689, parameter_690, parameter_691 + + # pd_op.swish: (-1x48x-1x-1xf32) <- (-1x48x-1x-1xf32) + swish_10 = paddle._C_ops.swish(batch_norm__72) + del batch_norm__72 + + # pd_op.conv2d: (-1x48x-1x-1xf32) <- (-1x48x-1x-1xf32, 48x48x3x3xf32) + conv2d_13 = paddle._C_ops.conv2d( + swish_10, parameter_687, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_687 + + # pd_op.batch_norm_: (-1x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32, -1xui8) <- (-1x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32) + ( + batch_norm__78, + batch_norm__79, + batch_norm__80, + batch_norm__81, + batch_norm__82, + batch_norm__83, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_13, + parameter_686, + parameter_685, + parameter_684, + parameter_683, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_13, parameter_683, parameter_684, parameter_685, parameter_686 + + # pd_op.conv2d: (-1x48x-1x-1xf32) <- (-1x48x-1x-1xf32, 48x48x1x1xf32) + conv2d_14 = paddle._C_ops.conv2d( + swish_10, parameter_682, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_682, swish_10 + + # pd_op.batch_norm_: (-1x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32, -1xui8) <- (-1x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32) + ( + batch_norm__84, + batch_norm__85, + batch_norm__86, + batch_norm__87, + batch_norm__88, + batch_norm__89, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_14, + parameter_681, + parameter_680, + parameter_679, + parameter_678, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_14, parameter_678, parameter_679, parameter_680, parameter_681 + + # pd_op.multiply: (-1x48x-1x-1xf32) <- (1xf32, -1x48x-1x-1xf32) + multiply_2 = paddle._C_ops.multiply(data_2, batch_norm__84) + del batch_norm__84, data_2 + + # pd_op.add: (-1x48x-1x-1xf32) <- (-1x48x-1x-1xf32, -1x48x-1x-1xf32) + add_4 = paddle._C_ops.add(batch_norm__78, multiply_2) + del batch_norm__78, multiply_2 + + # pd_op.swish: (-1x48x-1x-1xf32) <- (-1x48x-1x-1xf32) + swish_11 = paddle._C_ops.swish(add_4) + del add_4 + + # pd_op.add: (-1x48x-1x-1xf32) <- (-1x48x-1x-1xf32, -1x48x-1x-1xf32) + add_5 = paddle._C_ops.add(add_3, swish_11) + del add_3, swish_11 + # pd_op.full: (1xi32) <- () full_0 = paddle._C_ops.full( - [1], float("5"), paddle.int32, paddle.core.CPUPlace() + [1], float("1"), paddle.int32, paddle.core.CPUPlace() + ) + + # builtin.combine: ([-1x48x-1x-1xf32, -1x48x-1x-1xf32]) <- (-1x48x-1x-1xf32, -1x48x-1x-1xf32) + combine_0 = [swish_4, add_5] + del add_5, swish_4 + + # pd_op.concat: (-1x96x-1x-1xf32) <- ([-1x48x-1x-1xf32, -1x48x-1x-1xf32], 1xi32) + concat_2 = paddle._C_ops.concat(combine_0, full_0) + del combine_0 + + # pd_op.full_int_array: (2xi64) <- () + full_int_array_0 = [2, 3] + + # pd_op.mean: (-1x96x1x1xf32) <- (-1x96x-1x-1xf32, 2xi64) + mean_0 = paddle._C_ops.mean(concat_2, full_int_array_0, True) + + # pd_op.conv2d: (-1x96x1x1xf32) <- (-1x96x1x1xf32, 96x96x1x1xf32) + conv2d_15 = paddle._C_ops.conv2d( + mean_0, parameter_677, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del mean_0, parameter_677 + + # pd_op.full_int_array: (4xi64) <- () + full_int_array_1 = [1, -1, 1, 1] + + # pd_op.reshape: (1x96x1x1xf32) <- (96xf32, 4xi64) + reshape_0 = paddle._C_ops.reshape(parameter_676, full_int_array_1) + del parameter_676 + + # pd_op.add: (-1x96x1x1xf32) <- (-1x96x1x1xf32, 1x96x1x1xf32) + add_6 = paddle._C_ops.add(conv2d_15, reshape_0) + del conv2d_15, reshape_0 + + # pd_op.hardsigmoid: (-1x96x1x1xf32) <- (-1x96x1x1xf32) + hardsigmoid_0 = paddle._C_ops.hardsigmoid( + add_6, float("0.166667"), float("0.5") + ) + del add_6 + + # pd_op.multiply: (-1x96x-1x-1xf32) <- (-1x96x-1x-1xf32, -1x96x1x1xf32) + multiply_3 = paddle._C_ops.multiply(concat_2, hardsigmoid_0) + del concat_2, hardsigmoid_0 + + # pd_op.conv2d: (-1x128x-1x-1xf32) <- (-1x96x-1x-1xf32, 128x96x1x1xf32) + conv2d_16 = paddle._C_ops.conv2d( + multiply_3, parameter_675, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del multiply_3, parameter_675 + + # pd_op.batch_norm_: (-1x128x-1x-1xf32, 128xf32, 128xf32, 128xf32, 128xf32, -1xui8) <- (-1x128x-1x-1xf32, 128xf32, 128xf32, 128xf32, 128xf32) + ( + batch_norm__90, + batch_norm__91, + batch_norm__92, + batch_norm__93, + batch_norm__94, + batch_norm__95, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_16, + parameter_674, + parameter_673, + parameter_672, + parameter_671, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_16, parameter_671, parameter_672, parameter_673, parameter_674 + + # pd_op.swish: (-1x128x-1x-1xf32) <- (-1x128x-1x-1xf32) + swish_12 = paddle._C_ops.swish(batch_norm__90) + del batch_norm__90 + + # pd_op.conv2d: (-1x192x-1x-1xf32) <- (-1x128x-1x-1xf32, 192x128x3x3xf32) + conv2d_17 = paddle._C_ops.conv2d( + swish_12, parameter_670, [2, 2], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_670, swish_12 + + # pd_op.batch_norm_: (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__96, + batch_norm__97, + batch_norm__98, + batch_norm__99, + batch_norm__100, + batch_norm__101, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_17, + parameter_669, + parameter_668, + parameter_667, + parameter_666, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_17, parameter_666, parameter_667, parameter_668, parameter_669 + + # pd_op.swish: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32) + swish_13 = paddle._C_ops.swish(batch_norm__96) + del batch_norm__96 + + # pd_op.conv2d: (-1x96x-1x-1xf32) <- (-1x192x-1x-1xf32, 96x192x1x1xf32) + conv2d_18 = paddle._C_ops.conv2d( + swish_13, parameter_665, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_665 + + # pd_op.batch_norm_: (-1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (-1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__102, + batch_norm__103, + batch_norm__104, + batch_norm__105, + batch_norm__106, + batch_norm__107, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_18, + parameter_664, + parameter_663, + parameter_662, + parameter_661, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_18, parameter_661, parameter_662, parameter_663, parameter_664 + + # pd_op.swish: (-1x96x-1x-1xf32) <- (-1x96x-1x-1xf32) + swish_14 = paddle._C_ops.swish(batch_norm__102) + del batch_norm__102 + + # pd_op.conv2d: (-1x96x-1x-1xf32) <- (-1x192x-1x-1xf32, 96x192x1x1xf32) + conv2d_19 = paddle._C_ops.conv2d( + swish_13, parameter_660, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_660, swish_13 + + # pd_op.batch_norm_: (-1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (-1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__108, + batch_norm__109, + batch_norm__110, + batch_norm__111, + batch_norm__112, + batch_norm__113, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_19, + parameter_659, + parameter_658, + parameter_657, + parameter_656, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_19, parameter_656, parameter_657, parameter_658, parameter_659 + + # pd_op.swish: (-1x96x-1x-1xf32) <- (-1x96x-1x-1xf32) + swish_15 = paddle._C_ops.swish(batch_norm__108) + del batch_norm__108 + + # pd_op.conv2d: (-1x96x-1x-1xf32) <- (-1x96x-1x-1xf32, 96x96x3x3xf32) + conv2d_20 = paddle._C_ops.conv2d( + swish_15, parameter_655, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_655 + + # pd_op.batch_norm_: (-1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (-1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__114, + batch_norm__115, + batch_norm__116, + batch_norm__117, + batch_norm__118, + batch_norm__119, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_20, + parameter_654, + parameter_653, + parameter_652, + parameter_651, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_20, parameter_651, parameter_652, parameter_653, parameter_654 + + # pd_op.swish: (-1x96x-1x-1xf32) <- (-1x96x-1x-1xf32) + swish_16 = paddle._C_ops.swish(batch_norm__114) + del batch_norm__114 + + # pd_op.conv2d: (-1x96x-1x-1xf32) <- (-1x96x-1x-1xf32, 96x96x3x3xf32) + conv2d_21 = paddle._C_ops.conv2d( + swish_16, parameter_650, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_650 + + # pd_op.batch_norm_: (-1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (-1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__120, + batch_norm__121, + batch_norm__122, + batch_norm__123, + batch_norm__124, + batch_norm__125, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_21, + parameter_649, + parameter_648, + parameter_647, + parameter_646, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_21, parameter_646, parameter_647, parameter_648, parameter_649 + + # pd_op.conv2d: (-1x96x-1x-1xf32) <- (-1x96x-1x-1xf32, 96x96x1x1xf32) + conv2d_22 = paddle._C_ops.conv2d( + swish_16, parameter_645, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_645, swish_16 + + # pd_op.batch_norm_: (-1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (-1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__126, + batch_norm__127, + batch_norm__128, + batch_norm__129, + batch_norm__130, + batch_norm__131, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_22, + parameter_644, + parameter_643, + parameter_642, + parameter_641, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_22, parameter_641, parameter_642, parameter_643, parameter_644 + + # pd_op.multiply: (-1x96x-1x-1xf32) <- (1xf32, -1x96x-1x-1xf32) + multiply_4 = paddle._C_ops.multiply(data_3, batch_norm__126) + del batch_norm__126, data_3 + + # pd_op.add: (-1x96x-1x-1xf32) <- (-1x96x-1x-1xf32, -1x96x-1x-1xf32) + add_7 = paddle._C_ops.add(batch_norm__120, multiply_4) + del batch_norm__120, multiply_4 + + # pd_op.swish: (-1x96x-1x-1xf32) <- (-1x96x-1x-1xf32) + swish_17 = paddle._C_ops.swish(add_7) + del add_7 + + # pd_op.add: (-1x96x-1x-1xf32) <- (-1x96x-1x-1xf32, -1x96x-1x-1xf32) + add_8 = paddle._C_ops.add(swish_15, swish_17) + del swish_15, swish_17 + + # pd_op.conv2d: (-1x96x-1x-1xf32) <- (-1x96x-1x-1xf32, 96x96x3x3xf32) + conv2d_23 = paddle._C_ops.conv2d( + add_8, parameter_640, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_640 + + # pd_op.batch_norm_: (-1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (-1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__132, + batch_norm__133, + batch_norm__134, + batch_norm__135, + batch_norm__136, + batch_norm__137, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_23, + parameter_639, + parameter_638, + parameter_637, + parameter_636, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_23, parameter_636, parameter_637, parameter_638, parameter_639 + + # pd_op.swish: (-1x96x-1x-1xf32) <- (-1x96x-1x-1xf32) + swish_18 = paddle._C_ops.swish(batch_norm__132) + del batch_norm__132 + + # pd_op.conv2d: (-1x96x-1x-1xf32) <- (-1x96x-1x-1xf32, 96x96x3x3xf32) + conv2d_24 = paddle._C_ops.conv2d( + swish_18, parameter_635, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_635 + + # pd_op.batch_norm_: (-1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (-1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__138, + batch_norm__139, + batch_norm__140, + batch_norm__141, + batch_norm__142, + batch_norm__143, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_24, + parameter_634, + parameter_633, + parameter_632, + parameter_631, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_24, parameter_631, parameter_632, parameter_633, parameter_634 + + # pd_op.conv2d: (-1x96x-1x-1xf32) <- (-1x96x-1x-1xf32, 96x96x1x1xf32) + conv2d_25 = paddle._C_ops.conv2d( + swish_18, parameter_630, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_630, swish_18 + + # pd_op.batch_norm_: (-1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (-1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__144, + batch_norm__145, + batch_norm__146, + batch_norm__147, + batch_norm__148, + batch_norm__149, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_25, + parameter_629, + parameter_628, + parameter_627, + parameter_626, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_25, parameter_626, parameter_627, parameter_628, parameter_629 + + # pd_op.multiply: (-1x96x-1x-1xf32) <- (1xf32, -1x96x-1x-1xf32) + multiply_5 = paddle._C_ops.multiply(data_4, batch_norm__144) + del batch_norm__144, data_4 + + # pd_op.add: (-1x96x-1x-1xf32) <- (-1x96x-1x-1xf32, -1x96x-1x-1xf32) + add_9 = paddle._C_ops.add(batch_norm__138, multiply_5) + del batch_norm__138, multiply_5 + + # pd_op.swish: (-1x96x-1x-1xf32) <- (-1x96x-1x-1xf32) + swish_19 = paddle._C_ops.swish(add_9) + del add_9 + + # pd_op.add: (-1x96x-1x-1xf32) <- (-1x96x-1x-1xf32, -1x96x-1x-1xf32) + add_10 = paddle._C_ops.add(add_8, swish_19) + del add_8, swish_19 + + # pd_op.conv2d: (-1x96x-1x-1xf32) <- (-1x96x-1x-1xf32, 96x96x3x3xf32) + conv2d_26 = paddle._C_ops.conv2d( + add_10, parameter_625, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_625 + + # pd_op.batch_norm_: (-1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (-1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__150, + batch_norm__151, + batch_norm__152, + batch_norm__153, + batch_norm__154, + batch_norm__155, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_26, + parameter_624, + parameter_623, + parameter_622, + parameter_621, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_26, parameter_621, parameter_622, parameter_623, parameter_624 + + # pd_op.swish: (-1x96x-1x-1xf32) <- (-1x96x-1x-1xf32) + swish_20 = paddle._C_ops.swish(batch_norm__150) + del batch_norm__150 + + # pd_op.conv2d: (-1x96x-1x-1xf32) <- (-1x96x-1x-1xf32, 96x96x3x3xf32) + conv2d_27 = paddle._C_ops.conv2d( + swish_20, parameter_620, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_620 + + # pd_op.batch_norm_: (-1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (-1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__156, + batch_norm__157, + batch_norm__158, + batch_norm__159, + batch_norm__160, + batch_norm__161, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_27, + parameter_619, + parameter_618, + parameter_617, + parameter_616, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_27, parameter_616, parameter_617, parameter_618, parameter_619 + + # pd_op.conv2d: (-1x96x-1x-1xf32) <- (-1x96x-1x-1xf32, 96x96x1x1xf32) + conv2d_28 = paddle._C_ops.conv2d( + swish_20, parameter_615, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_615, swish_20 + + # pd_op.batch_norm_: (-1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (-1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__162, + batch_norm__163, + batch_norm__164, + batch_norm__165, + batch_norm__166, + batch_norm__167, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_28, + parameter_614, + parameter_613, + parameter_612, + parameter_611, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_28, parameter_611, parameter_612, parameter_613, parameter_614 + + # pd_op.multiply: (-1x96x-1x-1xf32) <- (1xf32, -1x96x-1x-1xf32) + multiply_6 = paddle._C_ops.multiply(data_5, batch_norm__162) + del batch_norm__162, data_5 + + # pd_op.add: (-1x96x-1x-1xf32) <- (-1x96x-1x-1xf32, -1x96x-1x-1xf32) + add_11 = paddle._C_ops.add(batch_norm__156, multiply_6) + del batch_norm__156, multiply_6 + + # pd_op.swish: (-1x96x-1x-1xf32) <- (-1x96x-1x-1xf32) + swish_21 = paddle._C_ops.swish(add_11) + del add_11 + + # pd_op.add: (-1x96x-1x-1xf32) <- (-1x96x-1x-1xf32, -1x96x-1x-1xf32) + add_12 = paddle._C_ops.add(add_10, swish_21) + del add_10, swish_21 + + # pd_op.conv2d: (-1x96x-1x-1xf32) <- (-1x96x-1x-1xf32, 96x96x3x3xf32) + conv2d_29 = paddle._C_ops.conv2d( + add_12, parameter_610, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_610 + + # pd_op.batch_norm_: (-1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (-1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__168, + batch_norm__169, + batch_norm__170, + batch_norm__171, + batch_norm__172, + batch_norm__173, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_29, + parameter_609, + parameter_608, + parameter_607, + parameter_606, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_29, parameter_606, parameter_607, parameter_608, parameter_609 + + # pd_op.swish: (-1x96x-1x-1xf32) <- (-1x96x-1x-1xf32) + swish_22 = paddle._C_ops.swish(batch_norm__168) + del batch_norm__168 + + # pd_op.conv2d: (-1x96x-1x-1xf32) <- (-1x96x-1x-1xf32, 96x96x3x3xf32) + conv2d_30 = paddle._C_ops.conv2d( + swish_22, parameter_605, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_605 + + # pd_op.batch_norm_: (-1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (-1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__174, + batch_norm__175, + batch_norm__176, + batch_norm__177, + batch_norm__178, + batch_norm__179, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_30, + parameter_604, + parameter_603, + parameter_602, + parameter_601, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_30, parameter_601, parameter_602, parameter_603, parameter_604 + + # pd_op.conv2d: (-1x96x-1x-1xf32) <- (-1x96x-1x-1xf32, 96x96x1x1xf32) + conv2d_31 = paddle._C_ops.conv2d( + swish_22, parameter_600, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_600, swish_22 + + # pd_op.batch_norm_: (-1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (-1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__180, + batch_norm__181, + batch_norm__182, + batch_norm__183, + batch_norm__184, + batch_norm__185, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_31, + parameter_599, + parameter_598, + parameter_597, + parameter_596, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_31, parameter_596, parameter_597, parameter_598, parameter_599 + + # pd_op.multiply: (-1x96x-1x-1xf32) <- (1xf32, -1x96x-1x-1xf32) + multiply_7 = paddle._C_ops.multiply(data_6, batch_norm__180) + del batch_norm__180, data_6 + + # pd_op.add: (-1x96x-1x-1xf32) <- (-1x96x-1x-1xf32, -1x96x-1x-1xf32) + add_13 = paddle._C_ops.add(batch_norm__174, multiply_7) + del batch_norm__174, multiply_7 + + # pd_op.swish: (-1x96x-1x-1xf32) <- (-1x96x-1x-1xf32) + swish_23 = paddle._C_ops.swish(add_13) + del add_13 + + # pd_op.add: (-1x96x-1x-1xf32) <- (-1x96x-1x-1xf32, -1x96x-1x-1xf32) + add_14 = paddle._C_ops.add(add_12, swish_23) + del add_12, swish_23 + + # pd_op.conv2d: (-1x96x-1x-1xf32) <- (-1x96x-1x-1xf32, 96x96x3x3xf32) + conv2d_32 = paddle._C_ops.conv2d( + add_14, parameter_595, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_595 + + # pd_op.batch_norm_: (-1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (-1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__186, + batch_norm__187, + batch_norm__188, + batch_norm__189, + batch_norm__190, + batch_norm__191, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_32, + parameter_594, + parameter_593, + parameter_592, + parameter_591, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_32, parameter_591, parameter_592, parameter_593, parameter_594 + + # pd_op.swish: (-1x96x-1x-1xf32) <- (-1x96x-1x-1xf32) + swish_24 = paddle._C_ops.swish(batch_norm__186) + del batch_norm__186 + + # pd_op.conv2d: (-1x96x-1x-1xf32) <- (-1x96x-1x-1xf32, 96x96x3x3xf32) + conv2d_33 = paddle._C_ops.conv2d( + swish_24, parameter_590, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_590 + + # pd_op.batch_norm_: (-1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (-1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__192, + batch_norm__193, + batch_norm__194, + batch_norm__195, + batch_norm__196, + batch_norm__197, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_33, + parameter_589, + parameter_588, + parameter_587, + parameter_586, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_33, parameter_586, parameter_587, parameter_588, parameter_589 + + # pd_op.conv2d: (-1x96x-1x-1xf32) <- (-1x96x-1x-1xf32, 96x96x1x1xf32) + conv2d_34 = paddle._C_ops.conv2d( + swish_24, parameter_585, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_585, swish_24 + + # pd_op.batch_norm_: (-1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (-1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__198, + batch_norm__199, + batch_norm__200, + batch_norm__201, + batch_norm__202, + batch_norm__203, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_34, + parameter_584, + parameter_583, + parameter_582, + parameter_581, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_34, parameter_581, parameter_582, parameter_583, parameter_584 + + # pd_op.multiply: (-1x96x-1x-1xf32) <- (1xf32, -1x96x-1x-1xf32) + multiply_8 = paddle._C_ops.multiply(data_7, batch_norm__198) + del batch_norm__198, data_7 + + # pd_op.add: (-1x96x-1x-1xf32) <- (-1x96x-1x-1xf32, -1x96x-1x-1xf32) + add_15 = paddle._C_ops.add(batch_norm__192, multiply_8) + del batch_norm__192, multiply_8 + + # pd_op.swish: (-1x96x-1x-1xf32) <- (-1x96x-1x-1xf32) + swish_25 = paddle._C_ops.swish(add_15) + del add_15 + + # pd_op.add: (-1x96x-1x-1xf32) <- (-1x96x-1x-1xf32, -1x96x-1x-1xf32) + add_16 = paddle._C_ops.add(add_14, swish_25) + del add_14, swish_25 + + # pd_op.conv2d: (-1x96x-1x-1xf32) <- (-1x96x-1x-1xf32, 96x96x3x3xf32) + conv2d_35 = paddle._C_ops.conv2d( + add_16, parameter_580, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_580 + + # pd_op.batch_norm_: (-1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (-1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__204, + batch_norm__205, + batch_norm__206, + batch_norm__207, + batch_norm__208, + batch_norm__209, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_35, + parameter_579, + parameter_578, + parameter_577, + parameter_576, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_35, parameter_576, parameter_577, parameter_578, parameter_579 + + # pd_op.swish: (-1x96x-1x-1xf32) <- (-1x96x-1x-1xf32) + swish_26 = paddle._C_ops.swish(batch_norm__204) + del batch_norm__204 + + # pd_op.conv2d: (-1x96x-1x-1xf32) <- (-1x96x-1x-1xf32, 96x96x3x3xf32) + conv2d_36 = paddle._C_ops.conv2d( + swish_26, parameter_575, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_575 + + # pd_op.batch_norm_: (-1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (-1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__210, + batch_norm__211, + batch_norm__212, + batch_norm__213, + batch_norm__214, + batch_norm__215, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_36, + parameter_574, + parameter_573, + parameter_572, + parameter_571, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_36, parameter_571, parameter_572, parameter_573, parameter_574 + + # pd_op.conv2d: (-1x96x-1x-1xf32) <- (-1x96x-1x-1xf32, 96x96x1x1xf32) + conv2d_37 = paddle._C_ops.conv2d( + swish_26, parameter_570, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_570, swish_26 + + # pd_op.batch_norm_: (-1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (-1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__216, + batch_norm__217, + batch_norm__218, + batch_norm__219, + batch_norm__220, + batch_norm__221, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_37, + parameter_569, + parameter_568, + parameter_567, + parameter_566, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_37, parameter_566, parameter_567, parameter_568, parameter_569 + + # pd_op.multiply: (-1x96x-1x-1xf32) <- (1xf32, -1x96x-1x-1xf32) + multiply_9 = paddle._C_ops.multiply(data_8, batch_norm__216) + del batch_norm__216, data_8 + + # pd_op.add: (-1x96x-1x-1xf32) <- (-1x96x-1x-1xf32, -1x96x-1x-1xf32) + add_17 = paddle._C_ops.add(batch_norm__210, multiply_9) + del batch_norm__210, multiply_9 + + # pd_op.swish: (-1x96x-1x-1xf32) <- (-1x96x-1x-1xf32) + swish_27 = paddle._C_ops.swish(add_17) + del add_17 + + # pd_op.add: (-1x96x-1x-1xf32) <- (-1x96x-1x-1xf32, -1x96x-1x-1xf32) + add_18 = paddle._C_ops.add(add_16, swish_27) + del add_16, swish_27 + + # builtin.combine: ([-1x96x-1x-1xf32, -1x96x-1x-1xf32]) <- (-1x96x-1x-1xf32, -1x96x-1x-1xf32) + combine_1 = [swish_14, add_18] + del add_18, swish_14 + + # pd_op.concat: (-1x192x-1x-1xf32) <- ([-1x96x-1x-1xf32, -1x96x-1x-1xf32], 1xi32) + concat_3 = paddle._C_ops.concat(combine_1, full_0) + del combine_1 + + # pd_op.mean: (-1x192x1x1xf32) <- (-1x192x-1x-1xf32, 2xi64) + mean_1 = paddle._C_ops.mean(concat_3, full_int_array_0, True) + + # pd_op.conv2d: (-1x192x1x1xf32) <- (-1x192x1x1xf32, 192x192x1x1xf32) + conv2d_38 = paddle._C_ops.conv2d( + mean_1, parameter_565, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del mean_1, parameter_565 + + # pd_op.reshape: (1x192x1x1xf32) <- (192xf32, 4xi64) + reshape_1 = paddle._C_ops.reshape(parameter_564, full_int_array_1) + del parameter_564 + + # pd_op.add: (-1x192x1x1xf32) <- (-1x192x1x1xf32, 1x192x1x1xf32) + add_19 = paddle._C_ops.add(conv2d_38, reshape_1) + del conv2d_38, reshape_1 + + # pd_op.hardsigmoid: (-1x192x1x1xf32) <- (-1x192x1x1xf32) + hardsigmoid_1 = paddle._C_ops.hardsigmoid( + add_19, float("0.166667"), float("0.5") + ) + del add_19 + + # pd_op.multiply: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32, -1x192x1x1xf32) + multiply_10 = paddle._C_ops.multiply(concat_3, hardsigmoid_1) + del concat_3, hardsigmoid_1 + + # pd_op.conv2d: (-1x256x-1x-1xf32) <- (-1x192x-1x-1xf32, 256x192x1x1xf32) + conv2d_39 = paddle._C_ops.conv2d( + multiply_10, parameter_563, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del multiply_10, parameter_563 + + # pd_op.batch_norm_: (-1x256x-1x-1xf32, 256xf32, 256xf32, 256xf32, 256xf32, -1xui8) <- (-1x256x-1x-1xf32, 256xf32, 256xf32, 256xf32, 256xf32) + ( + batch_norm__222, + batch_norm__223, + batch_norm__224, + batch_norm__225, + batch_norm__226, + batch_norm__227, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_39, + parameter_562, + parameter_561, + parameter_560, + parameter_559, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_39, parameter_559, parameter_560, parameter_561, parameter_562 + + # pd_op.swish: (-1x256x-1x-1xf32) <- (-1x256x-1x-1xf32) + swish_28 = paddle._C_ops.swish(batch_norm__222) + del batch_norm__222 + + # pd_op.conv2d: (-1x384x-1x-1xf32) <- (-1x256x-1x-1xf32, 384x256x3x3xf32) + conv2d_40 = paddle._C_ops.conv2d( + swish_28, parameter_558, [2, 2], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_558 + + # pd_op.batch_norm_: (-1x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (-1x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__228, + batch_norm__229, + batch_norm__230, + batch_norm__231, + batch_norm__232, + batch_norm__233, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_40, + parameter_557, + parameter_556, + parameter_555, + parameter_554, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_40, parameter_554, parameter_555, parameter_556, parameter_557 + + # pd_op.swish: (-1x384x-1x-1xf32) <- (-1x384x-1x-1xf32) + swish_29 = paddle._C_ops.swish(batch_norm__228) + del batch_norm__228 + + # pd_op.conv2d: (-1x192x-1x-1xf32) <- (-1x384x-1x-1xf32, 192x384x1x1xf32) + conv2d_41 = paddle._C_ops.conv2d( + swish_29, parameter_553, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_553 + + # pd_op.batch_norm_: (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__234, + batch_norm__235, + batch_norm__236, + batch_norm__237, + batch_norm__238, + batch_norm__239, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_41, + parameter_552, + parameter_551, + parameter_550, + parameter_549, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_41, parameter_549, parameter_550, parameter_551, parameter_552 + + # pd_op.swish: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32) + swish_30 = paddle._C_ops.swish(batch_norm__234) + del batch_norm__234 + + # pd_op.conv2d: (-1x192x-1x-1xf32) <- (-1x384x-1x-1xf32, 192x384x1x1xf32) + conv2d_42 = paddle._C_ops.conv2d( + swish_29, parameter_548, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_548, swish_29 + + # pd_op.batch_norm_: (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__240, + batch_norm__241, + batch_norm__242, + batch_norm__243, + batch_norm__244, + batch_norm__245, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_42, + parameter_547, + parameter_546, + parameter_545, + parameter_544, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_42, parameter_544, parameter_545, parameter_546, parameter_547 + + # pd_op.swish: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32) + swish_31 = paddle._C_ops.swish(batch_norm__240) + del batch_norm__240 + + # pd_op.conv2d: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32, 192x192x3x3xf32) + conv2d_43 = paddle._C_ops.conv2d( + swish_31, parameter_543, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_543 + + # pd_op.batch_norm_: (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__246, + batch_norm__247, + batch_norm__248, + batch_norm__249, + batch_norm__250, + batch_norm__251, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_43, + parameter_542, + parameter_541, + parameter_540, + parameter_539, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_43, parameter_539, parameter_540, parameter_541, parameter_542 + + # pd_op.swish: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32) + swish_32 = paddle._C_ops.swish(batch_norm__246) + del batch_norm__246 + + # pd_op.conv2d: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32, 192x192x3x3xf32) + conv2d_44 = paddle._C_ops.conv2d( + swish_32, parameter_538, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_538 + + # pd_op.batch_norm_: (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__252, + batch_norm__253, + batch_norm__254, + batch_norm__255, + batch_norm__256, + batch_norm__257, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_44, + parameter_537, + parameter_536, + parameter_535, + parameter_534, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_44, parameter_534, parameter_535, parameter_536, parameter_537 + + # pd_op.conv2d: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32, 192x192x1x1xf32) + conv2d_45 = paddle._C_ops.conv2d( + swish_32, parameter_533, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_533, swish_32 + + # pd_op.batch_norm_: (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__258, + batch_norm__259, + batch_norm__260, + batch_norm__261, + batch_norm__262, + batch_norm__263, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_45, + parameter_532, + parameter_531, + parameter_530, + parameter_529, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_45, parameter_529, parameter_530, parameter_531, parameter_532 + + # pd_op.multiply: (-1x192x-1x-1xf32) <- (1xf32, -1x192x-1x-1xf32) + multiply_11 = paddle._C_ops.multiply(data_9, batch_norm__258) + del batch_norm__258, data_9 + + # pd_op.add: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32, -1x192x-1x-1xf32) + add_20 = paddle._C_ops.add(batch_norm__252, multiply_11) + del batch_norm__252, multiply_11 + + # pd_op.swish: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32) + swish_33 = paddle._C_ops.swish(add_20) + del add_20 + + # pd_op.add: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32, -1x192x-1x-1xf32) + add_21 = paddle._C_ops.add(swish_31, swish_33) + del swish_31, swish_33 + + # pd_op.conv2d: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32, 192x192x3x3xf32) + conv2d_46 = paddle._C_ops.conv2d( + add_21, parameter_528, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_528 + + # pd_op.batch_norm_: (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__264, + batch_norm__265, + batch_norm__266, + batch_norm__267, + batch_norm__268, + batch_norm__269, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_46, + parameter_527, + parameter_526, + parameter_525, + parameter_524, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_46, parameter_524, parameter_525, parameter_526, parameter_527 + + # pd_op.swish: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32) + swish_34 = paddle._C_ops.swish(batch_norm__264) + del batch_norm__264 + + # pd_op.conv2d: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32, 192x192x3x3xf32) + conv2d_47 = paddle._C_ops.conv2d( + swish_34, parameter_523, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_523 + + # pd_op.batch_norm_: (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__270, + batch_norm__271, + batch_norm__272, + batch_norm__273, + batch_norm__274, + batch_norm__275, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_47, + parameter_522, + parameter_521, + parameter_520, + parameter_519, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_47, parameter_519, parameter_520, parameter_521, parameter_522 + + # pd_op.conv2d: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32, 192x192x1x1xf32) + conv2d_48 = paddle._C_ops.conv2d( + swish_34, parameter_518, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_518, swish_34 + + # pd_op.batch_norm_: (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__276, + batch_norm__277, + batch_norm__278, + batch_norm__279, + batch_norm__280, + batch_norm__281, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_48, + parameter_517, + parameter_516, + parameter_515, + parameter_514, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_48, parameter_514, parameter_515, parameter_516, parameter_517 + + # pd_op.multiply: (-1x192x-1x-1xf32) <- (1xf32, -1x192x-1x-1xf32) + multiply_12 = paddle._C_ops.multiply(data_10, batch_norm__276) + del batch_norm__276, data_10 + + # pd_op.add: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32, -1x192x-1x-1xf32) + add_22 = paddle._C_ops.add(batch_norm__270, multiply_12) + del batch_norm__270, multiply_12 + + # pd_op.swish: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32) + swish_35 = paddle._C_ops.swish(add_22) + del add_22 + + # pd_op.add: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32, -1x192x-1x-1xf32) + add_23 = paddle._C_ops.add(add_21, swish_35) + del add_21, swish_35 + + # pd_op.conv2d: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32, 192x192x3x3xf32) + conv2d_49 = paddle._C_ops.conv2d( + add_23, parameter_513, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_513 + + # pd_op.batch_norm_: (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__282, + batch_norm__283, + batch_norm__284, + batch_norm__285, + batch_norm__286, + batch_norm__287, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_49, + parameter_512, + parameter_511, + parameter_510, + parameter_509, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_49, parameter_509, parameter_510, parameter_511, parameter_512 + + # pd_op.swish: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32) + swish_36 = paddle._C_ops.swish(batch_norm__282) + del batch_norm__282 + + # pd_op.conv2d: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32, 192x192x3x3xf32) + conv2d_50 = paddle._C_ops.conv2d( + swish_36, parameter_508, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_508 + + # pd_op.batch_norm_: (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__288, + batch_norm__289, + batch_norm__290, + batch_norm__291, + batch_norm__292, + batch_norm__293, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_50, + parameter_507, + parameter_506, + parameter_505, + parameter_504, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_50, parameter_504, parameter_505, parameter_506, parameter_507 + + # pd_op.conv2d: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32, 192x192x1x1xf32) + conv2d_51 = paddle._C_ops.conv2d( + swish_36, parameter_503, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_503, swish_36 + + # pd_op.batch_norm_: (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__294, + batch_norm__295, + batch_norm__296, + batch_norm__297, + batch_norm__298, + batch_norm__299, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_51, + parameter_502, + parameter_501, + parameter_500, + parameter_499, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_51, parameter_499, parameter_500, parameter_501, parameter_502 + + # pd_op.multiply: (-1x192x-1x-1xf32) <- (1xf32, -1x192x-1x-1xf32) + multiply_13 = paddle._C_ops.multiply(data_11, batch_norm__294) + del batch_norm__294, data_11 + + # pd_op.add: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32, -1x192x-1x-1xf32) + add_24 = paddle._C_ops.add(batch_norm__288, multiply_13) + del batch_norm__288, multiply_13 + + # pd_op.swish: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32) + swish_37 = paddle._C_ops.swish(add_24) + del add_24 + + # pd_op.add: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32, -1x192x-1x-1xf32) + add_25 = paddle._C_ops.add(add_23, swish_37) + del add_23, swish_37 + + # pd_op.conv2d: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32, 192x192x3x3xf32) + conv2d_52 = paddle._C_ops.conv2d( + add_25, parameter_498, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_498 + + # pd_op.batch_norm_: (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__300, + batch_norm__301, + batch_norm__302, + batch_norm__303, + batch_norm__304, + batch_norm__305, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_52, + parameter_497, + parameter_496, + parameter_495, + parameter_494, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_52, parameter_494, parameter_495, parameter_496, parameter_497 + + # pd_op.swish: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32) + swish_38 = paddle._C_ops.swish(batch_norm__300) + del batch_norm__300 + + # pd_op.conv2d: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32, 192x192x3x3xf32) + conv2d_53 = paddle._C_ops.conv2d( + swish_38, parameter_493, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_493 + + # pd_op.batch_norm_: (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__306, + batch_norm__307, + batch_norm__308, + batch_norm__309, + batch_norm__310, + batch_norm__311, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_53, + parameter_492, + parameter_491, + parameter_490, + parameter_489, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_53, parameter_489, parameter_490, parameter_491, parameter_492 + + # pd_op.conv2d: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32, 192x192x1x1xf32) + conv2d_54 = paddle._C_ops.conv2d( + swish_38, parameter_488, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_488, swish_38 + + # pd_op.batch_norm_: (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__312, + batch_norm__313, + batch_norm__314, + batch_norm__315, + batch_norm__316, + batch_norm__317, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_54, + parameter_487, + parameter_486, + parameter_485, + parameter_484, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_54, parameter_484, parameter_485, parameter_486, parameter_487 + + # pd_op.multiply: (-1x192x-1x-1xf32) <- (1xf32, -1x192x-1x-1xf32) + multiply_14 = paddle._C_ops.multiply(data_12, batch_norm__312) + del batch_norm__312, data_12 + + # pd_op.add: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32, -1x192x-1x-1xf32) + add_26 = paddle._C_ops.add(batch_norm__306, multiply_14) + del batch_norm__306, multiply_14 + + # pd_op.swish: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32) + swish_39 = paddle._C_ops.swish(add_26) + del add_26 + + # pd_op.add: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32, -1x192x-1x-1xf32) + add_27 = paddle._C_ops.add(add_25, swish_39) + del add_25, swish_39 + + # pd_op.conv2d: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32, 192x192x3x3xf32) + conv2d_55 = paddle._C_ops.conv2d( + add_27, parameter_483, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_483 + + # pd_op.batch_norm_: (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__318, + batch_norm__319, + batch_norm__320, + batch_norm__321, + batch_norm__322, + batch_norm__323, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_55, + parameter_482, + parameter_481, + parameter_480, + parameter_479, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_55, parameter_479, parameter_480, parameter_481, parameter_482 + + # pd_op.swish: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32) + swish_40 = paddle._C_ops.swish(batch_norm__318) + del batch_norm__318 + + # pd_op.conv2d: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32, 192x192x3x3xf32) + conv2d_56 = paddle._C_ops.conv2d( + swish_40, parameter_478, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_478 + + # pd_op.batch_norm_: (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__324, + batch_norm__325, + batch_norm__326, + batch_norm__327, + batch_norm__328, + batch_norm__329, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_56, + parameter_477, + parameter_476, + parameter_475, + parameter_474, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_56, parameter_474, parameter_475, parameter_476, parameter_477 + + # pd_op.conv2d: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32, 192x192x1x1xf32) + conv2d_57 = paddle._C_ops.conv2d( + swish_40, parameter_473, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_473, swish_40 + + # pd_op.batch_norm_: (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__330, + batch_norm__331, + batch_norm__332, + batch_norm__333, + batch_norm__334, + batch_norm__335, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_57, + parameter_472, + parameter_471, + parameter_470, + parameter_469, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_57, parameter_469, parameter_470, parameter_471, parameter_472 + + # pd_op.multiply: (-1x192x-1x-1xf32) <- (1xf32, -1x192x-1x-1xf32) + multiply_15 = paddle._C_ops.multiply(data_13, batch_norm__330) + del batch_norm__330, data_13 + + # pd_op.add: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32, -1x192x-1x-1xf32) + add_28 = paddle._C_ops.add(batch_norm__324, multiply_15) + del batch_norm__324, multiply_15 + + # pd_op.swish: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32) + swish_41 = paddle._C_ops.swish(add_28) + del add_28 + + # pd_op.add: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32, -1x192x-1x-1xf32) + add_29 = paddle._C_ops.add(add_27, swish_41) + del add_27, swish_41 + + # pd_op.conv2d: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32, 192x192x3x3xf32) + conv2d_58 = paddle._C_ops.conv2d( + add_29, parameter_468, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_468 + + # pd_op.batch_norm_: (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__336, + batch_norm__337, + batch_norm__338, + batch_norm__339, + batch_norm__340, + batch_norm__341, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_58, + parameter_467, + parameter_466, + parameter_465, + parameter_464, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_58, parameter_464, parameter_465, parameter_466, parameter_467 + + # pd_op.swish: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32) + swish_42 = paddle._C_ops.swish(batch_norm__336) + del batch_norm__336 + + # pd_op.conv2d: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32, 192x192x3x3xf32) + conv2d_59 = paddle._C_ops.conv2d( + swish_42, parameter_463, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_463 + + # pd_op.batch_norm_: (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__342, + batch_norm__343, + batch_norm__344, + batch_norm__345, + batch_norm__346, + batch_norm__347, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_59, + parameter_462, + parameter_461, + parameter_460, + parameter_459, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_59, parameter_459, parameter_460, parameter_461, parameter_462 + + # pd_op.conv2d: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32, 192x192x1x1xf32) + conv2d_60 = paddle._C_ops.conv2d( + swish_42, parameter_458, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_458, swish_42 + + # pd_op.batch_norm_: (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__348, + batch_norm__349, + batch_norm__350, + batch_norm__351, + batch_norm__352, + batch_norm__353, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_60, + parameter_457, + parameter_456, + parameter_455, + parameter_454, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_60, parameter_454, parameter_455, parameter_456, parameter_457 + + # pd_op.multiply: (-1x192x-1x-1xf32) <- (1xf32, -1x192x-1x-1xf32) + multiply_16 = paddle._C_ops.multiply(data_14, batch_norm__348) + del batch_norm__348, data_14 + + # pd_op.add: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32, -1x192x-1x-1xf32) + add_30 = paddle._C_ops.add(batch_norm__342, multiply_16) + del batch_norm__342, multiply_16 + + # pd_op.swish: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32) + swish_43 = paddle._C_ops.swish(add_30) + del add_30 + + # pd_op.add: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32, -1x192x-1x-1xf32) + add_31 = paddle._C_ops.add(add_29, swish_43) + del add_29, swish_43 + + # builtin.combine: ([-1x192x-1x-1xf32, -1x192x-1x-1xf32]) <- (-1x192x-1x-1xf32, -1x192x-1x-1xf32) + combine_2 = [swish_30, add_31] + del add_31, swish_30 + + # pd_op.concat: (-1x384x-1x-1xf32) <- ([-1x192x-1x-1xf32, -1x192x-1x-1xf32], 1xi32) + concat_4 = paddle._C_ops.concat(combine_2, full_0) + del combine_2 + + # pd_op.mean: (-1x384x1x1xf32) <- (-1x384x-1x-1xf32, 2xi64) + mean_2 = paddle._C_ops.mean(concat_4, full_int_array_0, True) + + # pd_op.conv2d: (-1x384x1x1xf32) <- (-1x384x1x1xf32, 384x384x1x1xf32) + conv2d_61 = paddle._C_ops.conv2d( + mean_2, parameter_453, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del mean_2, parameter_453 + + # pd_op.reshape: (1x384x1x1xf32) <- (384xf32, 4xi64) + reshape_2 = paddle._C_ops.reshape(parameter_452, full_int_array_1) + del parameter_452 + + # pd_op.add: (-1x384x1x1xf32) <- (-1x384x1x1xf32, 1x384x1x1xf32) + add_32 = paddle._C_ops.add(conv2d_61, reshape_2) + del conv2d_61, reshape_2 + + # pd_op.hardsigmoid: (-1x384x1x1xf32) <- (-1x384x1x1xf32) + hardsigmoid_2 = paddle._C_ops.hardsigmoid( + add_32, float("0.166667"), float("0.5") + ) + del add_32 + + # pd_op.multiply: (-1x384x-1x-1xf32) <- (-1x384x-1x-1xf32, -1x384x1x1xf32) + multiply_17 = paddle._C_ops.multiply(concat_4, hardsigmoid_2) + del concat_4, hardsigmoid_2 + + # pd_op.conv2d: (-1x512x-1x-1xf32) <- (-1x384x-1x-1xf32, 512x384x1x1xf32) + conv2d_62 = paddle._C_ops.conv2d( + multiply_17, parameter_451, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del multiply_17, parameter_451 + + # pd_op.batch_norm_: (-1x512x-1x-1xf32, 512xf32, 512xf32, 512xf32, 512xf32, -1xui8) <- (-1x512x-1x-1xf32, 512xf32, 512xf32, 512xf32, 512xf32) + ( + batch_norm__354, + batch_norm__355, + batch_norm__356, + batch_norm__357, + batch_norm__358, + batch_norm__359, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_62, + parameter_450, + parameter_449, + parameter_448, + parameter_447, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_62, parameter_447, parameter_448, parameter_449, parameter_450 + + # pd_op.swish: (-1x512x-1x-1xf32) <- (-1x512x-1x-1xf32) + swish_44 = paddle._C_ops.swish(batch_norm__354) + del batch_norm__354 + + # pd_op.conv2d: (-1x768x-1x-1xf32) <- (-1x512x-1x-1xf32, 768x512x3x3xf32) + conv2d_63 = paddle._C_ops.conv2d( + swish_44, parameter_446, [2, 2], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_446 + + # pd_op.batch_norm_: (-1x768x-1x-1xf32, 768xf32, 768xf32, 768xf32, 768xf32, -1xui8) <- (-1x768x-1x-1xf32, 768xf32, 768xf32, 768xf32, 768xf32) + ( + batch_norm__360, + batch_norm__361, + batch_norm__362, + batch_norm__363, + batch_norm__364, + batch_norm__365, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_63, + parameter_445, + parameter_444, + parameter_443, + parameter_442, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_63, parameter_442, parameter_443, parameter_444, parameter_445 + + # pd_op.swish: (-1x768x-1x-1xf32) <- (-1x768x-1x-1xf32) + swish_45 = paddle._C_ops.swish(batch_norm__360) + del batch_norm__360 + + # pd_op.conv2d: (-1x384x-1x-1xf32) <- (-1x768x-1x-1xf32, 384x768x1x1xf32) + conv2d_64 = paddle._C_ops.conv2d( + swish_45, parameter_441, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_441 + + # pd_op.batch_norm_: (-1x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (-1x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__366, + batch_norm__367, + batch_norm__368, + batch_norm__369, + batch_norm__370, + batch_norm__371, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_64, + parameter_440, + parameter_439, + parameter_438, + parameter_437, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_64, parameter_437, parameter_438, parameter_439, parameter_440 + + # pd_op.swish: (-1x384x-1x-1xf32) <- (-1x384x-1x-1xf32) + swish_46 = paddle._C_ops.swish(batch_norm__366) + del batch_norm__366 + + # pd_op.conv2d: (-1x384x-1x-1xf32) <- (-1x768x-1x-1xf32, 384x768x1x1xf32) + conv2d_65 = paddle._C_ops.conv2d( + swish_45, parameter_436, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_436, swish_45 + + # pd_op.batch_norm_: (-1x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (-1x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__372, + batch_norm__373, + batch_norm__374, + batch_norm__375, + batch_norm__376, + batch_norm__377, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_65, + parameter_435, + parameter_434, + parameter_433, + parameter_432, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_65, parameter_432, parameter_433, parameter_434, parameter_435 + + # pd_op.swish: (-1x384x-1x-1xf32) <- (-1x384x-1x-1xf32) + swish_47 = paddle._C_ops.swish(batch_norm__372) + del batch_norm__372 + + # pd_op.conv2d: (-1x384x-1x-1xf32) <- (-1x384x-1x-1xf32, 384x384x3x3xf32) + conv2d_66 = paddle._C_ops.conv2d( + swish_47, parameter_431, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_431 + + # pd_op.batch_norm_: (-1x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (-1x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__378, + batch_norm__379, + batch_norm__380, + batch_norm__381, + batch_norm__382, + batch_norm__383, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_66, + parameter_430, + parameter_429, + parameter_428, + parameter_427, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_66, parameter_427, parameter_428, parameter_429, parameter_430 + + # pd_op.swish: (-1x384x-1x-1xf32) <- (-1x384x-1x-1xf32) + swish_48 = paddle._C_ops.swish(batch_norm__378) + del batch_norm__378 + + # pd_op.conv2d: (-1x384x-1x-1xf32) <- (-1x384x-1x-1xf32, 384x384x3x3xf32) + conv2d_67 = paddle._C_ops.conv2d( + swish_48, parameter_426, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_426 + + # pd_op.batch_norm_: (-1x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (-1x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__384, + batch_norm__385, + batch_norm__386, + batch_norm__387, + batch_norm__388, + batch_norm__389, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_67, + parameter_425, + parameter_424, + parameter_423, + parameter_422, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_67, parameter_422, parameter_423, parameter_424, parameter_425 + + # pd_op.conv2d: (-1x384x-1x-1xf32) <- (-1x384x-1x-1xf32, 384x384x1x1xf32) + conv2d_68 = paddle._C_ops.conv2d( + swish_48, parameter_421, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_421, swish_48 + + # pd_op.batch_norm_: (-1x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (-1x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__390, + batch_norm__391, + batch_norm__392, + batch_norm__393, + batch_norm__394, + batch_norm__395, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_68, + parameter_420, + parameter_419, + parameter_418, + parameter_417, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_68, parameter_417, parameter_418, parameter_419, parameter_420 + + # pd_op.multiply: (-1x384x-1x-1xf32) <- (1xf32, -1x384x-1x-1xf32) + multiply_18 = paddle._C_ops.multiply(data_15, batch_norm__390) + del batch_norm__390, data_15 + + # pd_op.add: (-1x384x-1x-1xf32) <- (-1x384x-1x-1xf32, -1x384x-1x-1xf32) + add_33 = paddle._C_ops.add(batch_norm__384, multiply_18) + del batch_norm__384, multiply_18 + + # pd_op.swish: (-1x384x-1x-1xf32) <- (-1x384x-1x-1xf32) + swish_49 = paddle._C_ops.swish(add_33) + del add_33 + + # pd_op.add: (-1x384x-1x-1xf32) <- (-1x384x-1x-1xf32, -1x384x-1x-1xf32) + add_34 = paddle._C_ops.add(swish_47, swish_49) + del swish_47, swish_49 + + # pd_op.conv2d: (-1x384x-1x-1xf32) <- (-1x384x-1x-1xf32, 384x384x3x3xf32) + conv2d_69 = paddle._C_ops.conv2d( + add_34, parameter_416, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_416 + + # pd_op.batch_norm_: (-1x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (-1x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__396, + batch_norm__397, + batch_norm__398, + batch_norm__399, + batch_norm__400, + batch_norm__401, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_69, + parameter_415, + parameter_414, + parameter_413, + parameter_412, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_69, parameter_412, parameter_413, parameter_414, parameter_415 + + # pd_op.swish: (-1x384x-1x-1xf32) <- (-1x384x-1x-1xf32) + swish_50 = paddle._C_ops.swish(batch_norm__396) + del batch_norm__396 + + # pd_op.conv2d: (-1x384x-1x-1xf32) <- (-1x384x-1x-1xf32, 384x384x3x3xf32) + conv2d_70 = paddle._C_ops.conv2d( + swish_50, parameter_411, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_411 + + # pd_op.batch_norm_: (-1x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (-1x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__402, + batch_norm__403, + batch_norm__404, + batch_norm__405, + batch_norm__406, + batch_norm__407, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_70, + parameter_410, + parameter_409, + parameter_408, + parameter_407, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_70, parameter_407, parameter_408, parameter_409, parameter_410 + + # pd_op.conv2d: (-1x384x-1x-1xf32) <- (-1x384x-1x-1xf32, 384x384x1x1xf32) + conv2d_71 = paddle._C_ops.conv2d( + swish_50, parameter_406, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_406, swish_50 + + # pd_op.batch_norm_: (-1x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (-1x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__408, + batch_norm__409, + batch_norm__410, + batch_norm__411, + batch_norm__412, + batch_norm__413, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_71, + parameter_405, + parameter_404, + parameter_403, + parameter_402, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_71, parameter_402, parameter_403, parameter_404, parameter_405 + + # pd_op.multiply: (-1x384x-1x-1xf32) <- (1xf32, -1x384x-1x-1xf32) + multiply_19 = paddle._C_ops.multiply(data_16, batch_norm__408) + del batch_norm__408, data_16 + + # pd_op.add: (-1x384x-1x-1xf32) <- (-1x384x-1x-1xf32, -1x384x-1x-1xf32) + add_35 = paddle._C_ops.add(batch_norm__402, multiply_19) + del batch_norm__402, multiply_19 + + # pd_op.swish: (-1x384x-1x-1xf32) <- (-1x384x-1x-1xf32) + swish_51 = paddle._C_ops.swish(add_35) + del add_35 + + # pd_op.add: (-1x384x-1x-1xf32) <- (-1x384x-1x-1xf32, -1x384x-1x-1xf32) + add_36 = paddle._C_ops.add(add_34, swish_51) + del add_34, swish_51 + + # pd_op.conv2d: (-1x384x-1x-1xf32) <- (-1x384x-1x-1xf32, 384x384x3x3xf32) + conv2d_72 = paddle._C_ops.conv2d( + add_36, parameter_401, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_401 + + # pd_op.batch_norm_: (-1x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (-1x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__414, + batch_norm__415, + batch_norm__416, + batch_norm__417, + batch_norm__418, + batch_norm__419, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_72, + parameter_400, + parameter_399, + parameter_398, + parameter_397, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_72, parameter_397, parameter_398, parameter_399, parameter_400 + + # pd_op.swish: (-1x384x-1x-1xf32) <- (-1x384x-1x-1xf32) + swish_52 = paddle._C_ops.swish(batch_norm__414) + del batch_norm__414 + + # pd_op.conv2d: (-1x384x-1x-1xf32) <- (-1x384x-1x-1xf32, 384x384x3x3xf32) + conv2d_73 = paddle._C_ops.conv2d( + swish_52, parameter_396, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_396 + + # pd_op.batch_norm_: (-1x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (-1x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__420, + batch_norm__421, + batch_norm__422, + batch_norm__423, + batch_norm__424, + batch_norm__425, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_73, + parameter_395, + parameter_394, + parameter_393, + parameter_392, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_73, parameter_392, parameter_393, parameter_394, parameter_395 + + # pd_op.conv2d: (-1x384x-1x-1xf32) <- (-1x384x-1x-1xf32, 384x384x1x1xf32) + conv2d_74 = paddle._C_ops.conv2d( + swish_52, parameter_391, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_391, swish_52 + + # pd_op.batch_norm_: (-1x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (-1x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__426, + batch_norm__427, + batch_norm__428, + batch_norm__429, + batch_norm__430, + batch_norm__431, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_74, + parameter_390, + parameter_389, + parameter_388, + parameter_387, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_74, parameter_387, parameter_388, parameter_389, parameter_390 + + # pd_op.multiply: (-1x384x-1x-1xf32) <- (1xf32, -1x384x-1x-1xf32) + multiply_20 = paddle._C_ops.multiply(data_17, batch_norm__426) + del batch_norm__426, data_17 + + # pd_op.add: (-1x384x-1x-1xf32) <- (-1x384x-1x-1xf32, -1x384x-1x-1xf32) + add_37 = paddle._C_ops.add(batch_norm__420, multiply_20) + del batch_norm__420, multiply_20 + + # pd_op.swish: (-1x384x-1x-1xf32) <- (-1x384x-1x-1xf32) + swish_53 = paddle._C_ops.swish(add_37) + del add_37 + + # pd_op.add: (-1x384x-1x-1xf32) <- (-1x384x-1x-1xf32, -1x384x-1x-1xf32) + add_38 = paddle._C_ops.add(add_36, swish_53) + del add_36, swish_53 + + # builtin.combine: ([-1x384x-1x-1xf32, -1x384x-1x-1xf32]) <- (-1x384x-1x-1xf32, -1x384x-1x-1xf32) + combine_3 = [swish_46, add_38] + del add_38, swish_46 + + # pd_op.concat: (-1x768x-1x-1xf32) <- ([-1x384x-1x-1xf32, -1x384x-1x-1xf32], 1xi32) + concat_5 = paddle._C_ops.concat(combine_3, full_0) + del combine_3 + + # pd_op.mean: (-1x768x1x1xf32) <- (-1x768x-1x-1xf32, 2xi64) + mean_3 = paddle._C_ops.mean(concat_5, full_int_array_0, True) + del full_int_array_0 + + # pd_op.conv2d: (-1x768x1x1xf32) <- (-1x768x1x1xf32, 768x768x1x1xf32) + conv2d_75 = paddle._C_ops.conv2d( + mean_3, parameter_386, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del mean_3, parameter_386 + + # pd_op.reshape: (1x768x1x1xf32) <- (768xf32, 4xi64) + reshape_3 = paddle._C_ops.reshape(parameter_385, full_int_array_1) + del parameter_385 + + # pd_op.add: (-1x768x1x1xf32) <- (-1x768x1x1xf32, 1x768x1x1xf32) + add_39 = paddle._C_ops.add(conv2d_75, reshape_3) + del conv2d_75, reshape_3 + + # pd_op.hardsigmoid: (-1x768x1x1xf32) <- (-1x768x1x1xf32) + hardsigmoid_3 = paddle._C_ops.hardsigmoid( + add_39, float("0.166667"), float("0.5") + ) + del add_39 + + # pd_op.multiply: (-1x768x-1x-1xf32) <- (-1x768x-1x-1xf32, -1x768x1x1xf32) + multiply_21 = paddle._C_ops.multiply(concat_5, hardsigmoid_3) + del concat_5, hardsigmoid_3 + + # pd_op.conv2d: (-1x1024x-1x-1xf32) <- (-1x768x-1x-1xf32, 1024x768x1x1xf32) + conv2d_76 = paddle._C_ops.conv2d( + multiply_21, parameter_384, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del multiply_21, parameter_384 + + # pd_op.batch_norm_: (-1x1024x-1x-1xf32, 1024xf32, 1024xf32, 1024xf32, 1024xf32, -1xui8) <- (-1x1024x-1x-1xf32, 1024xf32, 1024xf32, 1024xf32, 1024xf32) + ( + batch_norm__432, + batch_norm__433, + batch_norm__434, + batch_norm__435, + batch_norm__436, + batch_norm__437, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_76, + parameter_383, + parameter_382, + parameter_381, + parameter_380, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_76, parameter_380, parameter_381, parameter_382, parameter_383 + + # pd_op.swish: (-1x1024x-1x-1xf32) <- (-1x1024x-1x-1xf32) + swish_54 = paddle._C_ops.swish(batch_norm__432) + del batch_norm__432 + + # pd_op.conv2d: (-1x384x-1x-1xf32) <- (-1x1024x-1x-1xf32, 384x1024x1x1xf32) + conv2d_77 = paddle._C_ops.conv2d( + swish_54, parameter_379, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_379 + + # pd_op.batch_norm_: (-1x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (-1x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__438, + batch_norm__439, + batch_norm__440, + batch_norm__441, + batch_norm__442, + batch_norm__443, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_77, + parameter_378, + parameter_377, + parameter_376, + parameter_375, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_77, parameter_375, parameter_376, parameter_377, parameter_378 + + # pd_op.swish: (-1x384x-1x-1xf32) <- (-1x384x-1x-1xf32) + swish_55 = paddle._C_ops.swish(batch_norm__438) + del batch_norm__438 + + # pd_op.conv2d: (-1x384x-1x-1xf32) <- (-1x1024x-1x-1xf32, 384x1024x1x1xf32) + conv2d_78 = paddle._C_ops.conv2d( + swish_54, parameter_374, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_374, swish_54 + + # pd_op.batch_norm_: (-1x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (-1x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__444, + batch_norm__445, + batch_norm__446, + batch_norm__447, + batch_norm__448, + batch_norm__449, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_78, + parameter_373, + parameter_372, + parameter_371, + parameter_370, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_78, parameter_370, parameter_371, parameter_372, parameter_373 + + # pd_op.swish: (-1x384x-1x-1xf32) <- (-1x384x-1x-1xf32) + swish_56 = paddle._C_ops.swish(batch_norm__444) + del batch_norm__444 + + # pd_op.conv2d: (-1x384x-1x-1xf32) <- (-1x384x-1x-1xf32, 384x384x3x3xf32) + conv2d_79 = paddle._C_ops.conv2d( + swish_56, parameter_369, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_369, swish_56 + + # pd_op.batch_norm_: (-1x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (-1x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__450, + batch_norm__451, + batch_norm__452, + batch_norm__453, + batch_norm__454, + batch_norm__455, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_79, + parameter_368, + parameter_367, + parameter_366, + parameter_365, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_79, parameter_365, parameter_366, parameter_367, parameter_368 + + # pd_op.swish: (-1x384x-1x-1xf32) <- (-1x384x-1x-1xf32) + swish_57 = paddle._C_ops.swish(batch_norm__450) + del batch_norm__450 + + # pd_op.conv2d: (-1x384x-1x-1xf32) <- (-1x384x-1x-1xf32, 384x384x3x3xf32) + conv2d_80 = paddle._C_ops.conv2d( + swish_57, parameter_364, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_364 + + # pd_op.batch_norm_: (-1x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (-1x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__456, + batch_norm__457, + batch_norm__458, + batch_norm__459, + batch_norm__460, + batch_norm__461, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_80, + parameter_363, + parameter_362, + parameter_361, + parameter_360, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_80, parameter_360, parameter_361, parameter_362, parameter_363 + + # pd_op.conv2d: (-1x384x-1x-1xf32) <- (-1x384x-1x-1xf32, 384x384x1x1xf32) + conv2d_81 = paddle._C_ops.conv2d( + swish_57, parameter_359, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_359, swish_57 + + # pd_op.batch_norm_: (-1x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (-1x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__462, + batch_norm__463, + batch_norm__464, + batch_norm__465, + batch_norm__466, + batch_norm__467, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_81, + parameter_358, + parameter_357, + parameter_356, + parameter_355, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_81, parameter_355, parameter_356, parameter_357, parameter_358 + + # pd_op.add: (-1x384x-1x-1xf32) <- (-1x384x-1x-1xf32, -1x384x-1x-1xf32) + add_40 = paddle._C_ops.add(batch_norm__456, batch_norm__462) + del batch_norm__456, batch_norm__462 + + # pd_op.swish: (-1x384x-1x-1xf32) <- (-1x384x-1x-1xf32) + swish_58 = paddle._C_ops.swish(add_40) + del add_40 + + # pd_op.conv2d: (-1x384x-1x-1xf32) <- (-1x384x-1x-1xf32, 384x384x3x3xf32) + conv2d_82 = paddle._C_ops.conv2d( + swish_58, parameter_354, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_354, swish_58 + + # pd_op.batch_norm_: (-1x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (-1x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__468, + batch_norm__469, + batch_norm__470, + batch_norm__471, + batch_norm__472, + batch_norm__473, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_82, + parameter_353, + parameter_352, + parameter_351, + parameter_350, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_82, parameter_350, parameter_351, parameter_352, parameter_353 + + # pd_op.swish: (-1x384x-1x-1xf32) <- (-1x384x-1x-1xf32) + swish_59 = paddle._C_ops.swish(batch_norm__468) + del batch_norm__468 + + # pd_op.conv2d: (-1x384x-1x-1xf32) <- (-1x384x-1x-1xf32, 384x384x3x3xf32) + conv2d_83 = paddle._C_ops.conv2d( + swish_59, parameter_349, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_349 + + # pd_op.batch_norm_: (-1x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (-1x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__474, + batch_norm__475, + batch_norm__476, + batch_norm__477, + batch_norm__478, + batch_norm__479, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_83, + parameter_348, + parameter_347, + parameter_346, + parameter_345, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_83, parameter_345, parameter_346, parameter_347, parameter_348 + + # pd_op.conv2d: (-1x384x-1x-1xf32) <- (-1x384x-1x-1xf32, 384x384x1x1xf32) + conv2d_84 = paddle._C_ops.conv2d( + swish_59, parameter_344, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_344, swish_59 + + # pd_op.batch_norm_: (-1x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (-1x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__480, + batch_norm__481, + batch_norm__482, + batch_norm__483, + batch_norm__484, + batch_norm__485, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_84, + parameter_343, + parameter_342, + parameter_341, + parameter_340, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_84, parameter_340, parameter_341, parameter_342, parameter_343 + + # pd_op.add: (-1x384x-1x-1xf32) <- (-1x384x-1x-1xf32, -1x384x-1x-1xf32) + add_41 = paddle._C_ops.add(batch_norm__474, batch_norm__480) + del batch_norm__474, batch_norm__480 + + # pd_op.swish: (-1x384x-1x-1xf32) <- (-1x384x-1x-1xf32) + swish_60 = paddle._C_ops.swish(add_41) + del add_41 + + # pd_op.full_int_array: (2xi64) <- () + full_int_array_2 = [5, 5] + + # pd_op.pool2d: (-1x384x-1x-1xf32) <- (-1x384x-1x-1xf32, 2xi64) + pool2d_0 = paddle._C_ops.pool2d( + swish_60, + full_int_array_2, + [1, 1], + [2, 2], + False, + True, + "NCHW", + "max", + False, + False, + "EXPLICIT", + ) + del full_int_array_2 + + # pd_op.full_int_array: (2xi64) <- () + full_int_array_3 = [9, 9] + + # pd_op.pool2d: (-1x384x-1x-1xf32) <- (-1x384x-1x-1xf32, 2xi64) + pool2d_1 = paddle._C_ops.pool2d( + swish_60, + full_int_array_3, + [1, 1], + [4, 4], + False, + True, + "NCHW", + "max", + False, + False, + "EXPLICIT", + ) + del full_int_array_3 + + # pd_op.full_int_array: (2xi64) <- () + full_int_array_4 = [13, 13] + + # pd_op.pool2d: (-1x384x-1x-1xf32) <- (-1x384x-1x-1xf32, 2xi64) + pool2d_2 = paddle._C_ops.pool2d( + swish_60, + full_int_array_4, + [1, 1], + [6, 6], + False, + True, + "NCHW", + "max", + False, + False, + "EXPLICIT", + ) + del full_int_array_4 + + # builtin.combine: ([-1x384x-1x-1xf32, -1x384x-1x-1xf32, -1x384x-1x-1xf32, -1x384x-1x-1xf32]) <- (-1x384x-1x-1xf32, -1x384x-1x-1xf32, -1x384x-1x-1xf32, -1x384x-1x-1xf32) + combine_4 = [swish_60, pool2d_0, pool2d_1, pool2d_2] + del pool2d_0, pool2d_1, pool2d_2, swish_60 + + # pd_op.concat: (-1x1536x-1x-1xf32) <- ([-1x384x-1x-1xf32, -1x384x-1x-1xf32, -1x384x-1x-1xf32, -1x384x-1x-1xf32], 1xi32) + concat_6 = paddle._C_ops.concat(combine_4, full_0) + del combine_4 + + # pd_op.conv2d: (-1x384x-1x-1xf32) <- (-1x1536x-1x-1xf32, 384x1536x1x1xf32) + conv2d_85 = paddle._C_ops.conv2d( + concat_6, parameter_339, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del concat_6, parameter_339 + + # pd_op.batch_norm_: (-1x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (-1x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__486, + batch_norm__487, + batch_norm__488, + batch_norm__489, + batch_norm__490, + batch_norm__491, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_85, + parameter_338, + parameter_337, + parameter_336, + parameter_335, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_85, parameter_335, parameter_336, parameter_337, parameter_338 + + # pd_op.swish: (-1x384x-1x-1xf32) <- (-1x384x-1x-1xf32) + swish_61 = paddle._C_ops.swish(batch_norm__486) + del batch_norm__486 + + # pd_op.conv2d: (-1x384x-1x-1xf32) <- (-1x384x-1x-1xf32, 384x384x3x3xf32) + conv2d_86 = paddle._C_ops.conv2d( + swish_61, parameter_334, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_334, swish_61 + + # pd_op.batch_norm_: (-1x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (-1x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__492, + batch_norm__493, + batch_norm__494, + batch_norm__495, + batch_norm__496, + batch_norm__497, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_86, + parameter_333, + parameter_332, + parameter_331, + parameter_330, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_86, parameter_330, parameter_331, parameter_332, parameter_333 + + # pd_op.swish: (-1x384x-1x-1xf32) <- (-1x384x-1x-1xf32) + swish_62 = paddle._C_ops.swish(batch_norm__492) + del batch_norm__492 + + # pd_op.conv2d: (-1x384x-1x-1xf32) <- (-1x384x-1x-1xf32, 384x384x3x3xf32) + conv2d_87 = paddle._C_ops.conv2d( + swish_62, parameter_329, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_329 + + # pd_op.batch_norm_: (-1x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (-1x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__498, + batch_norm__499, + batch_norm__500, + batch_norm__501, + batch_norm__502, + batch_norm__503, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_87, + parameter_328, + parameter_327, + parameter_326, + parameter_325, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_87, parameter_325, parameter_326, parameter_327, parameter_328 + + # pd_op.conv2d: (-1x384x-1x-1xf32) <- (-1x384x-1x-1xf32, 384x384x1x1xf32) + conv2d_88 = paddle._C_ops.conv2d( + swish_62, parameter_324, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_324, swish_62 + + # pd_op.batch_norm_: (-1x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (-1x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__504, + batch_norm__505, + batch_norm__506, + batch_norm__507, + batch_norm__508, + batch_norm__509, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_88, + parameter_323, + parameter_322, + parameter_321, + parameter_320, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_88, parameter_320, parameter_321, parameter_322, parameter_323 + + # pd_op.add: (-1x384x-1x-1xf32) <- (-1x384x-1x-1xf32, -1x384x-1x-1xf32) + add_42 = paddle._C_ops.add(batch_norm__498, batch_norm__504) + del batch_norm__498, batch_norm__504 + + # pd_op.swish: (-1x384x-1x-1xf32) <- (-1x384x-1x-1xf32) + swish_63 = paddle._C_ops.swish(add_42) + del add_42 + + # builtin.combine: ([-1x384x-1x-1xf32, -1x384x-1x-1xf32]) <- (-1x384x-1x-1xf32, -1x384x-1x-1xf32) + combine_5 = [swish_55, swish_63] + del swish_55, swish_63 + + # pd_op.concat: (-1x768x-1x-1xf32) <- ([-1x384x-1x-1xf32, -1x384x-1x-1xf32], 1xi32) + concat_7 = paddle._C_ops.concat(combine_5, full_0) + del combine_5 + + # pd_op.conv2d: (-1x768x-1x-1xf32) <- (-1x768x-1x-1xf32, 768x768x1x1xf32) + conv2d_89 = paddle._C_ops.conv2d( + concat_7, parameter_319, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del concat_7, parameter_319 + + # pd_op.batch_norm_: (-1x768x-1x-1xf32, 768xf32, 768xf32, 768xf32, 768xf32, -1xui8) <- (-1x768x-1x-1xf32, 768xf32, 768xf32, 768xf32, 768xf32) + ( + batch_norm__510, + batch_norm__511, + batch_norm__512, + batch_norm__513, + batch_norm__514, + batch_norm__515, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_89, + parameter_318, + parameter_317, + parameter_316, + parameter_315, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_89, parameter_315, parameter_316, parameter_317, parameter_318 + + # pd_op.swish: (-1x768x-1x-1xf32) <- (-1x768x-1x-1xf32) + swish_64 = paddle._C_ops.swish(batch_norm__510) + del batch_norm__510 + + # pd_op.conv2d: (-1x384x-1x-1xf32) <- (-1x768x-1x-1xf32, 384x768x1x1xf32) + conv2d_90 = paddle._C_ops.conv2d( + swish_64, parameter_314, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_314 + + # pd_op.batch_norm_: (-1x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (-1x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__516, + batch_norm__517, + batch_norm__518, + batch_norm__519, + batch_norm__520, + batch_norm__521, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_90, + parameter_313, + parameter_312, + parameter_311, + parameter_310, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_90, parameter_310, parameter_311, parameter_312, parameter_313 + + # pd_op.swish: (-1x384x-1x-1xf32) <- (-1x384x-1x-1xf32) + swish_65 = paddle._C_ops.swish(batch_norm__516) + del batch_norm__516 + + # pd_op.nearest_interp: (-1x384x-1x-1xf32) <- (-1x384x-1x-1xf32, None, None, None) + nearest_interp_0 = paddle._C_ops.nearest_interp( + swish_65, + None, + None, + None, + "NCHW", + -1, + -1, + -1, + [float("2"), float("2")], + "nearest", + False, + 0, + ) + del swish_65 + + # builtin.combine: ([-1x384x-1x-1xf32, -1x512x-1x-1xf32]) <- (-1x384x-1x-1xf32, -1x512x-1x-1xf32) + combine_6 = [nearest_interp_0, swish_44] + del nearest_interp_0, swish_44 + + # pd_op.concat: (-1x896x-1x-1xf32) <- ([-1x384x-1x-1xf32, -1x512x-1x-1xf32], 1xi32) + concat_8 = paddle._C_ops.concat(combine_6, full_0) + del combine_6 + + # pd_op.conv2d: (-1x192x-1x-1xf32) <- (-1x896x-1x-1xf32, 192x896x1x1xf32) + conv2d_91 = paddle._C_ops.conv2d( + concat_8, parameter_309, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_309 + + # pd_op.batch_norm_: (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__522, + batch_norm__523, + batch_norm__524, + batch_norm__525, + batch_norm__526, + batch_norm__527, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_91, + parameter_308, + parameter_307, + parameter_306, + parameter_305, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_91, parameter_305, parameter_306, parameter_307, parameter_308 + + # pd_op.swish: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32) + swish_66 = paddle._C_ops.swish(batch_norm__522) + del batch_norm__522 + + # pd_op.conv2d: (-1x192x-1x-1xf32) <- (-1x896x-1x-1xf32, 192x896x1x1xf32) + conv2d_92 = paddle._C_ops.conv2d( + concat_8, parameter_304, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del concat_8, parameter_304 + + # pd_op.batch_norm_: (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__528, + batch_norm__529, + batch_norm__530, + batch_norm__531, + batch_norm__532, + batch_norm__533, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_92, + parameter_303, + parameter_302, + parameter_301, + parameter_300, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_92, parameter_300, parameter_301, parameter_302, parameter_303 + + # pd_op.swish: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32) + swish_67 = paddle._C_ops.swish(batch_norm__528) + del batch_norm__528 + + # pd_op.conv2d: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32, 192x192x3x3xf32) + conv2d_93 = paddle._C_ops.conv2d( + swish_67, parameter_299, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_299, swish_67 + + # pd_op.batch_norm_: (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__534, + batch_norm__535, + batch_norm__536, + batch_norm__537, + batch_norm__538, + batch_norm__539, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_93, + parameter_298, + parameter_297, + parameter_296, + parameter_295, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_93, parameter_295, parameter_296, parameter_297, parameter_298 + + # pd_op.swish: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32) + swish_68 = paddle._C_ops.swish(batch_norm__534) + del batch_norm__534 + + # pd_op.conv2d: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32, 192x192x3x3xf32) + conv2d_94 = paddle._C_ops.conv2d( + swish_68, parameter_294, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_294 + + # pd_op.batch_norm_: (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__540, + batch_norm__541, + batch_norm__542, + batch_norm__543, + batch_norm__544, + batch_norm__545, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_94, + parameter_293, + parameter_292, + parameter_291, + parameter_290, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_94, parameter_290, parameter_291, parameter_292, parameter_293 + + # pd_op.conv2d: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32, 192x192x1x1xf32) + conv2d_95 = paddle._C_ops.conv2d( + swish_68, parameter_289, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_289, swish_68 + + # pd_op.batch_norm_: (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__546, + batch_norm__547, + batch_norm__548, + batch_norm__549, + batch_norm__550, + batch_norm__551, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_95, + parameter_288, + parameter_287, + parameter_286, + parameter_285, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_95, parameter_285, parameter_286, parameter_287, parameter_288 + + # pd_op.add: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32, -1x192x-1x-1xf32) + add_43 = paddle._C_ops.add(batch_norm__540, batch_norm__546) + del batch_norm__540, batch_norm__546 + + # pd_op.swish: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32) + swish_69 = paddle._C_ops.swish(add_43) + del add_43 + + # pd_op.conv2d: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32, 192x192x3x3xf32) + conv2d_96 = paddle._C_ops.conv2d( + swish_69, parameter_284, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_284, swish_69 + + # pd_op.batch_norm_: (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__552, + batch_norm__553, + batch_norm__554, + batch_norm__555, + batch_norm__556, + batch_norm__557, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_96, + parameter_283, + parameter_282, + parameter_281, + parameter_280, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_96, parameter_280, parameter_281, parameter_282, parameter_283 + + # pd_op.swish: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32) + swish_70 = paddle._C_ops.swish(batch_norm__552) + del batch_norm__552 + + # pd_op.conv2d: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32, 192x192x3x3xf32) + conv2d_97 = paddle._C_ops.conv2d( + swish_70, parameter_279, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_279 + + # pd_op.batch_norm_: (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__558, + batch_norm__559, + batch_norm__560, + batch_norm__561, + batch_norm__562, + batch_norm__563, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_97, + parameter_278, + parameter_277, + parameter_276, + parameter_275, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_97, parameter_275, parameter_276, parameter_277, parameter_278 + + # pd_op.conv2d: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32, 192x192x1x1xf32) + conv2d_98 = paddle._C_ops.conv2d( + swish_70, parameter_274, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_274, swish_70 + + # pd_op.batch_norm_: (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__564, + batch_norm__565, + batch_norm__566, + batch_norm__567, + batch_norm__568, + batch_norm__569, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_98, + parameter_273, + parameter_272, + parameter_271, + parameter_270, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_98, parameter_270, parameter_271, parameter_272, parameter_273 + + # pd_op.add: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32, -1x192x-1x-1xf32) + add_44 = paddle._C_ops.add(batch_norm__558, batch_norm__564) + del batch_norm__558, batch_norm__564 + + # pd_op.swish: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32) + swish_71 = paddle._C_ops.swish(add_44) + del add_44 + + # pd_op.conv2d: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32, 192x192x3x3xf32) + conv2d_99 = paddle._C_ops.conv2d( + swish_71, parameter_269, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_269, swish_71 + + # pd_op.batch_norm_: (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__570, + batch_norm__571, + batch_norm__572, + batch_norm__573, + batch_norm__574, + batch_norm__575, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_99, + parameter_268, + parameter_267, + parameter_266, + parameter_265, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_99, parameter_265, parameter_266, parameter_267, parameter_268 + + # pd_op.swish: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32) + swish_72 = paddle._C_ops.swish(batch_norm__570) + del batch_norm__570 + + # pd_op.conv2d: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32, 192x192x3x3xf32) + conv2d_100 = paddle._C_ops.conv2d( + swish_72, parameter_264, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_264 + + # pd_op.batch_norm_: (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__576, + batch_norm__577, + batch_norm__578, + batch_norm__579, + batch_norm__580, + batch_norm__581, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_100, + parameter_263, + parameter_262, + parameter_261, + parameter_260, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_100, parameter_260, parameter_261, parameter_262, parameter_263 + + # pd_op.conv2d: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32, 192x192x1x1xf32) + conv2d_101 = paddle._C_ops.conv2d( + swish_72, parameter_259, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_259, swish_72 + + # pd_op.batch_norm_: (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__582, + batch_norm__583, + batch_norm__584, + batch_norm__585, + batch_norm__586, + batch_norm__587, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_101, + parameter_258, + parameter_257, + parameter_256, + parameter_255, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_101, parameter_255, parameter_256, parameter_257, parameter_258 + + # pd_op.add: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32, -1x192x-1x-1xf32) + add_45 = paddle._C_ops.add(batch_norm__576, batch_norm__582) + del batch_norm__576, batch_norm__582 + + # pd_op.swish: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32) + swish_73 = paddle._C_ops.swish(add_45) + del add_45 + + # builtin.combine: ([-1x192x-1x-1xf32, -1x192x-1x-1xf32]) <- (-1x192x-1x-1xf32, -1x192x-1x-1xf32) + combine_7 = [swish_66, swish_73] + del swish_66, swish_73 + + # pd_op.concat: (-1x384x-1x-1xf32) <- ([-1x192x-1x-1xf32, -1x192x-1x-1xf32], 1xi32) + concat_9 = paddle._C_ops.concat(combine_7, full_0) + del combine_7 + + # pd_op.conv2d: (-1x384x-1x-1xf32) <- (-1x384x-1x-1xf32, 384x384x1x1xf32) + conv2d_102 = paddle._C_ops.conv2d( + concat_9, parameter_254, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del concat_9, parameter_254 + + # pd_op.batch_norm_: (-1x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (-1x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__588, + batch_norm__589, + batch_norm__590, + batch_norm__591, + batch_norm__592, + batch_norm__593, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_102, + parameter_253, + parameter_252, + parameter_251, + parameter_250, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_102, parameter_250, parameter_251, parameter_252, parameter_253 + + # pd_op.swish: (-1x384x-1x-1xf32) <- (-1x384x-1x-1xf32) + swish_74 = paddle._C_ops.swish(batch_norm__588) + del batch_norm__588 + + # pd_op.conv2d: (-1x192x-1x-1xf32) <- (-1x384x-1x-1xf32, 192x384x1x1xf32) + conv2d_103 = paddle._C_ops.conv2d( + swish_74, parameter_249, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_249 + + # pd_op.batch_norm_: (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__594, + batch_norm__595, + batch_norm__596, + batch_norm__597, + batch_norm__598, + batch_norm__599, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_103, + parameter_248, + parameter_247, + parameter_246, + parameter_245, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_103, parameter_245, parameter_246, parameter_247, parameter_248 + + # pd_op.swish: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32) + swish_75 = paddle._C_ops.swish(batch_norm__594) + del batch_norm__594 + + # pd_op.nearest_interp: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32, None, None, None) + nearest_interp_1 = paddle._C_ops.nearest_interp( + swish_75, + None, + None, + None, + "NCHW", + -1, + -1, + -1, + [float("2"), float("2")], + "nearest", + False, + 0, + ) + del swish_75 + + # builtin.combine: ([-1x192x-1x-1xf32, -1x256x-1x-1xf32]) <- (-1x192x-1x-1xf32, -1x256x-1x-1xf32) + combine_8 = [nearest_interp_1, swish_28] + del nearest_interp_1, swish_28 + + # pd_op.concat: (-1x448x-1x-1xf32) <- ([-1x192x-1x-1xf32, -1x256x-1x-1xf32], 1xi32) + concat_10 = paddle._C_ops.concat(combine_8, full_0) + del combine_8 + + # pd_op.conv2d: (-1x96x-1x-1xf32) <- (-1x448x-1x-1xf32, 96x448x1x1xf32) + conv2d_104 = paddle._C_ops.conv2d( + concat_10, parameter_244, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_244 + + # pd_op.batch_norm_: (-1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (-1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__600, + batch_norm__601, + batch_norm__602, + batch_norm__603, + batch_norm__604, + batch_norm__605, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_104, + parameter_243, + parameter_242, + parameter_241, + parameter_240, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_104, parameter_240, parameter_241, parameter_242, parameter_243 + + # pd_op.swish: (-1x96x-1x-1xf32) <- (-1x96x-1x-1xf32) + swish_76 = paddle._C_ops.swish(batch_norm__600) + del batch_norm__600 + + # pd_op.conv2d: (-1x96x-1x-1xf32) <- (-1x448x-1x-1xf32, 96x448x1x1xf32) + conv2d_105 = paddle._C_ops.conv2d( + concat_10, parameter_239, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del concat_10, parameter_239 + + # pd_op.batch_norm_: (-1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (-1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__606, + batch_norm__607, + batch_norm__608, + batch_norm__609, + batch_norm__610, + batch_norm__611, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_105, + parameter_238, + parameter_237, + parameter_236, + parameter_235, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_105, parameter_235, parameter_236, parameter_237, parameter_238 + + # pd_op.swish: (-1x96x-1x-1xf32) <- (-1x96x-1x-1xf32) + swish_77 = paddle._C_ops.swish(batch_norm__606) + del batch_norm__606 + + # pd_op.conv2d: (-1x96x-1x-1xf32) <- (-1x96x-1x-1xf32, 96x96x3x3xf32) + conv2d_106 = paddle._C_ops.conv2d( + swish_77, parameter_234, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_234, swish_77 + + # pd_op.batch_norm_: (-1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (-1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__612, + batch_norm__613, + batch_norm__614, + batch_norm__615, + batch_norm__616, + batch_norm__617, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_106, + parameter_233, + parameter_232, + parameter_231, + parameter_230, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_106, parameter_230, parameter_231, parameter_232, parameter_233 + + # pd_op.swish: (-1x96x-1x-1xf32) <- (-1x96x-1x-1xf32) + swish_78 = paddle._C_ops.swish(batch_norm__612) + del batch_norm__612 + + # pd_op.conv2d: (-1x96x-1x-1xf32) <- (-1x96x-1x-1xf32, 96x96x3x3xf32) + conv2d_107 = paddle._C_ops.conv2d( + swish_78, parameter_229, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_229 + + # pd_op.batch_norm_: (-1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (-1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__618, + batch_norm__619, + batch_norm__620, + batch_norm__621, + batch_norm__622, + batch_norm__623, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_107, + parameter_228, + parameter_227, + parameter_226, + parameter_225, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_107, parameter_225, parameter_226, parameter_227, parameter_228 + + # pd_op.conv2d: (-1x96x-1x-1xf32) <- (-1x96x-1x-1xf32, 96x96x1x1xf32) + conv2d_108 = paddle._C_ops.conv2d( + swish_78, parameter_224, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_224, swish_78 + + # pd_op.batch_norm_: (-1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (-1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__624, + batch_norm__625, + batch_norm__626, + batch_norm__627, + batch_norm__628, + batch_norm__629, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_108, + parameter_223, + parameter_222, + parameter_221, + parameter_220, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_108, parameter_220, parameter_221, parameter_222, parameter_223 + + # pd_op.add: (-1x96x-1x-1xf32) <- (-1x96x-1x-1xf32, -1x96x-1x-1xf32) + add_46 = paddle._C_ops.add(batch_norm__618, batch_norm__624) + del batch_norm__618, batch_norm__624 + + # pd_op.swish: (-1x96x-1x-1xf32) <- (-1x96x-1x-1xf32) + swish_79 = paddle._C_ops.swish(add_46) + del add_46 + + # pd_op.conv2d: (-1x96x-1x-1xf32) <- (-1x96x-1x-1xf32, 96x96x3x3xf32) + conv2d_109 = paddle._C_ops.conv2d( + swish_79, parameter_219, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_219, swish_79 + + # pd_op.batch_norm_: (-1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (-1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__630, + batch_norm__631, + batch_norm__632, + batch_norm__633, + batch_norm__634, + batch_norm__635, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_109, + parameter_218, + parameter_217, + parameter_216, + parameter_215, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_109, parameter_215, parameter_216, parameter_217, parameter_218 + + # pd_op.swish: (-1x96x-1x-1xf32) <- (-1x96x-1x-1xf32) + swish_80 = paddle._C_ops.swish(batch_norm__630) + del batch_norm__630 + + # pd_op.conv2d: (-1x96x-1x-1xf32) <- (-1x96x-1x-1xf32, 96x96x3x3xf32) + conv2d_110 = paddle._C_ops.conv2d( + swish_80, parameter_214, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_214 + + # pd_op.batch_norm_: (-1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (-1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__636, + batch_norm__637, + batch_norm__638, + batch_norm__639, + batch_norm__640, + batch_norm__641, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_110, + parameter_213, + parameter_212, + parameter_211, + parameter_210, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_110, parameter_210, parameter_211, parameter_212, parameter_213 + + # pd_op.conv2d: (-1x96x-1x-1xf32) <- (-1x96x-1x-1xf32, 96x96x1x1xf32) + conv2d_111 = paddle._C_ops.conv2d( + swish_80, parameter_209, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_209, swish_80 + + # pd_op.batch_norm_: (-1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (-1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__642, + batch_norm__643, + batch_norm__644, + batch_norm__645, + batch_norm__646, + batch_norm__647, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_111, + parameter_208, + parameter_207, + parameter_206, + parameter_205, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_111, parameter_205, parameter_206, parameter_207, parameter_208 + + # pd_op.add: (-1x96x-1x-1xf32) <- (-1x96x-1x-1xf32, -1x96x-1x-1xf32) + add_47 = paddle._C_ops.add(batch_norm__636, batch_norm__642) + del batch_norm__636, batch_norm__642 + + # pd_op.swish: (-1x96x-1x-1xf32) <- (-1x96x-1x-1xf32) + swish_81 = paddle._C_ops.swish(add_47) + del add_47 + + # pd_op.conv2d: (-1x96x-1x-1xf32) <- (-1x96x-1x-1xf32, 96x96x3x3xf32) + conv2d_112 = paddle._C_ops.conv2d( + swish_81, parameter_204, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_204, swish_81 + + # pd_op.batch_norm_: (-1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (-1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__648, + batch_norm__649, + batch_norm__650, + batch_norm__651, + batch_norm__652, + batch_norm__653, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_112, + parameter_203, + parameter_202, + parameter_201, + parameter_200, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_112, parameter_200, parameter_201, parameter_202, parameter_203 + + # pd_op.swish: (-1x96x-1x-1xf32) <- (-1x96x-1x-1xf32) + swish_82 = paddle._C_ops.swish(batch_norm__648) + del batch_norm__648 + + # pd_op.conv2d: (-1x96x-1x-1xf32) <- (-1x96x-1x-1xf32, 96x96x3x3xf32) + conv2d_113 = paddle._C_ops.conv2d( + swish_82, parameter_199, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_199 + + # pd_op.batch_norm_: (-1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (-1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__654, + batch_norm__655, + batch_norm__656, + batch_norm__657, + batch_norm__658, + batch_norm__659, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_113, + parameter_198, + parameter_197, + parameter_196, + parameter_195, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_113, parameter_195, parameter_196, parameter_197, parameter_198 + + # pd_op.conv2d: (-1x96x-1x-1xf32) <- (-1x96x-1x-1xf32, 96x96x1x1xf32) + conv2d_114 = paddle._C_ops.conv2d( + swish_82, parameter_194, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_194, swish_82 + + # pd_op.batch_norm_: (-1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (-1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__660, + batch_norm__661, + batch_norm__662, + batch_norm__663, + batch_norm__664, + batch_norm__665, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_114, + parameter_193, + parameter_192, + parameter_191, + parameter_190, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_114, parameter_190, parameter_191, parameter_192, parameter_193 + + # pd_op.add: (-1x96x-1x-1xf32) <- (-1x96x-1x-1xf32, -1x96x-1x-1xf32) + add_48 = paddle._C_ops.add(batch_norm__654, batch_norm__660) + del batch_norm__654, batch_norm__660 + + # pd_op.swish: (-1x96x-1x-1xf32) <- (-1x96x-1x-1xf32) + swish_83 = paddle._C_ops.swish(add_48) + del add_48 + + # builtin.combine: ([-1x96x-1x-1xf32, -1x96x-1x-1xf32]) <- (-1x96x-1x-1xf32, -1x96x-1x-1xf32) + combine_9 = [swish_76, swish_83] + del swish_76, swish_83 + + # pd_op.concat: (-1x192x-1x-1xf32) <- ([-1x96x-1x-1xf32, -1x96x-1x-1xf32], 1xi32) + concat_11 = paddle._C_ops.concat(combine_9, full_0) + del combine_9 + + # pd_op.conv2d: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32, 192x192x1x1xf32) + conv2d_115 = paddle._C_ops.conv2d( + concat_11, parameter_189, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del concat_11, parameter_189 + + # pd_op.batch_norm_: (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__666, + batch_norm__667, + batch_norm__668, + batch_norm__669, + batch_norm__670, + batch_norm__671, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_115, + parameter_188, + parameter_187, + parameter_186, + parameter_185, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_115, parameter_185, parameter_186, parameter_187, parameter_188 + + # pd_op.swish: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32) + swish_84 = paddle._C_ops.swish(batch_norm__666) + del batch_norm__666 + + # pd_op.conv2d: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32, 192x192x3x3xf32) + conv2d_116 = paddle._C_ops.conv2d( + swish_84, parameter_184, [2, 2], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_184 + + # pd_op.batch_norm_: (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__672, + batch_norm__673, + batch_norm__674, + batch_norm__675, + batch_norm__676, + batch_norm__677, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_116, + parameter_183, + parameter_182, + parameter_181, + parameter_180, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_116, parameter_180, parameter_181, parameter_182, parameter_183 + + # pd_op.swish: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32) + swish_85 = paddle._C_ops.swish(batch_norm__672) + del batch_norm__672 + + # builtin.combine: ([-1x192x-1x-1xf32, -1x384x-1x-1xf32]) <- (-1x192x-1x-1xf32, -1x384x-1x-1xf32) + combine_10 = [swish_85, swish_74] + del swish_74, swish_85 + + # pd_op.concat: (-1x576x-1x-1xf32) <- ([-1x192x-1x-1xf32, -1x384x-1x-1xf32], 1xi32) + concat_12 = paddle._C_ops.concat(combine_10, full_0) + del combine_10 + + # pd_op.conv2d: (-1x192x-1x-1xf32) <- (-1x576x-1x-1xf32, 192x576x1x1xf32) + conv2d_117 = paddle._C_ops.conv2d( + concat_12, parameter_179, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_179 + + # pd_op.batch_norm_: (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__678, + batch_norm__679, + batch_norm__680, + batch_norm__681, + batch_norm__682, + batch_norm__683, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_117, + parameter_178, + parameter_177, + parameter_176, + parameter_175, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_117, parameter_175, parameter_176, parameter_177, parameter_178 + + # pd_op.swish: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32) + swish_86 = paddle._C_ops.swish(batch_norm__678) + del batch_norm__678 + + # pd_op.conv2d: (-1x192x-1x-1xf32) <- (-1x576x-1x-1xf32, 192x576x1x1xf32) + conv2d_118 = paddle._C_ops.conv2d( + concat_12, parameter_174, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del concat_12, parameter_174 + + # pd_op.batch_norm_: (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__684, + batch_norm__685, + batch_norm__686, + batch_norm__687, + batch_norm__688, + batch_norm__689, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_118, + parameter_173, + parameter_172, + parameter_171, + parameter_170, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_118, parameter_170, parameter_171, parameter_172, parameter_173 + + # pd_op.swish: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32) + swish_87 = paddle._C_ops.swish(batch_norm__684) + del batch_norm__684 + + # pd_op.conv2d: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32, 192x192x3x3xf32) + conv2d_119 = paddle._C_ops.conv2d( + swish_87, parameter_169, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_169, swish_87 + + # pd_op.batch_norm_: (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__690, + batch_norm__691, + batch_norm__692, + batch_norm__693, + batch_norm__694, + batch_norm__695, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_119, + parameter_168, + parameter_167, + parameter_166, + parameter_165, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_119, parameter_165, parameter_166, parameter_167, parameter_168 + + # pd_op.swish: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32) + swish_88 = paddle._C_ops.swish(batch_norm__690) + del batch_norm__690 + + # pd_op.conv2d: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32, 192x192x3x3xf32) + conv2d_120 = paddle._C_ops.conv2d( + swish_88, parameter_164, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_164 + + # pd_op.batch_norm_: (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__696, + batch_norm__697, + batch_norm__698, + batch_norm__699, + batch_norm__700, + batch_norm__701, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_120, + parameter_163, + parameter_162, + parameter_161, + parameter_160, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_120, parameter_160, parameter_161, parameter_162, parameter_163 + + # pd_op.conv2d: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32, 192x192x1x1xf32) + conv2d_121 = paddle._C_ops.conv2d( + swish_88, parameter_159, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_159, swish_88 + + # pd_op.batch_norm_: (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__702, + batch_norm__703, + batch_norm__704, + batch_norm__705, + batch_norm__706, + batch_norm__707, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_121, + parameter_158, + parameter_157, + parameter_156, + parameter_155, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_121, parameter_155, parameter_156, parameter_157, parameter_158 + + # pd_op.add: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32, -1x192x-1x-1xf32) + add_49 = paddle._C_ops.add(batch_norm__696, batch_norm__702) + del batch_norm__696, batch_norm__702 + + # pd_op.swish: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32) + swish_89 = paddle._C_ops.swish(add_49) + del add_49 + + # pd_op.conv2d: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32, 192x192x3x3xf32) + conv2d_122 = paddle._C_ops.conv2d( + swish_89, parameter_154, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_154, swish_89 + + # pd_op.batch_norm_: (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__708, + batch_norm__709, + batch_norm__710, + batch_norm__711, + batch_norm__712, + batch_norm__713, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_122, + parameter_153, + parameter_152, + parameter_151, + parameter_150, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_122, parameter_150, parameter_151, parameter_152, parameter_153 + + # pd_op.swish: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32) + swish_90 = paddle._C_ops.swish(batch_norm__708) + del batch_norm__708 + + # pd_op.conv2d: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32, 192x192x3x3xf32) + conv2d_123 = paddle._C_ops.conv2d( + swish_90, parameter_149, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_149 + + # pd_op.batch_norm_: (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__714, + batch_norm__715, + batch_norm__716, + batch_norm__717, + batch_norm__718, + batch_norm__719, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_123, + parameter_148, + parameter_147, + parameter_146, + parameter_145, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_123, parameter_145, parameter_146, parameter_147, parameter_148 + + # pd_op.conv2d: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32, 192x192x1x1xf32) + conv2d_124 = paddle._C_ops.conv2d( + swish_90, parameter_144, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_144, swish_90 + + # pd_op.batch_norm_: (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__720, + batch_norm__721, + batch_norm__722, + batch_norm__723, + batch_norm__724, + batch_norm__725, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_124, + parameter_143, + parameter_142, + parameter_141, + parameter_140, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_124, parameter_140, parameter_141, parameter_142, parameter_143 + + # pd_op.add: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32, -1x192x-1x-1xf32) + add_50 = paddle._C_ops.add(batch_norm__714, batch_norm__720) + del batch_norm__714, batch_norm__720 + + # pd_op.swish: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32) + swish_91 = paddle._C_ops.swish(add_50) + del add_50 + + # pd_op.conv2d: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32, 192x192x3x3xf32) + conv2d_125 = paddle._C_ops.conv2d( + swish_91, parameter_139, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_139, swish_91 + + # pd_op.batch_norm_: (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__726, + batch_norm__727, + batch_norm__728, + batch_norm__729, + batch_norm__730, + batch_norm__731, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_125, + parameter_138, + parameter_137, + parameter_136, + parameter_135, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_125, parameter_135, parameter_136, parameter_137, parameter_138 + + # pd_op.swish: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32) + swish_92 = paddle._C_ops.swish(batch_norm__726) + del batch_norm__726 + + # pd_op.conv2d: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32, 192x192x3x3xf32) + conv2d_126 = paddle._C_ops.conv2d( + swish_92, parameter_134, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_134 + + # pd_op.batch_norm_: (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__732, + batch_norm__733, + batch_norm__734, + batch_norm__735, + batch_norm__736, + batch_norm__737, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_126, + parameter_133, + parameter_132, + parameter_131, + parameter_130, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_126, parameter_130, parameter_131, parameter_132, parameter_133 + + # pd_op.conv2d: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32, 192x192x1x1xf32) + conv2d_127 = paddle._C_ops.conv2d( + swish_92, parameter_129, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_129, swish_92 + + # pd_op.batch_norm_: (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__738, + batch_norm__739, + batch_norm__740, + batch_norm__741, + batch_norm__742, + batch_norm__743, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_127, + parameter_128, + parameter_127, + parameter_126, + parameter_125, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_127, parameter_125, parameter_126, parameter_127, parameter_128 + + # pd_op.add: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32, -1x192x-1x-1xf32) + add_51 = paddle._C_ops.add(batch_norm__732, batch_norm__738) + del batch_norm__732, batch_norm__738 + + # pd_op.swish: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32) + swish_93 = paddle._C_ops.swish(add_51) + del add_51 + + # builtin.combine: ([-1x192x-1x-1xf32, -1x192x-1x-1xf32]) <- (-1x192x-1x-1xf32, -1x192x-1x-1xf32) + combine_11 = [swish_86, swish_93] + del swish_86, swish_93 + + # pd_op.concat: (-1x384x-1x-1xf32) <- ([-1x192x-1x-1xf32, -1x192x-1x-1xf32], 1xi32) + concat_13 = paddle._C_ops.concat(combine_11, full_0) + del combine_11 + + # pd_op.conv2d: (-1x384x-1x-1xf32) <- (-1x384x-1x-1xf32, 384x384x1x1xf32) + conv2d_128 = paddle._C_ops.conv2d( + concat_13, parameter_124, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del concat_13, parameter_124 + + # pd_op.batch_norm_: (-1x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (-1x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__744, + batch_norm__745, + batch_norm__746, + batch_norm__747, + batch_norm__748, + batch_norm__749, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_128, + parameter_123, + parameter_122, + parameter_121, + parameter_120, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_128, parameter_120, parameter_121, parameter_122, parameter_123 + + # pd_op.swish: (-1x384x-1x-1xf32) <- (-1x384x-1x-1xf32) + swish_94 = paddle._C_ops.swish(batch_norm__744) + del batch_norm__744 + + # pd_op.conv2d: (-1x384x-1x-1xf32) <- (-1x384x-1x-1xf32, 384x384x3x3xf32) + conv2d_129 = paddle._C_ops.conv2d( + swish_94, parameter_119, [2, 2], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_119 + + # pd_op.batch_norm_: (-1x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (-1x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__750, + batch_norm__751, + batch_norm__752, + batch_norm__753, + batch_norm__754, + batch_norm__755, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_129, + parameter_118, + parameter_117, + parameter_116, + parameter_115, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_129, parameter_115, parameter_116, parameter_117, parameter_118 + + # pd_op.swish: (-1x384x-1x-1xf32) <- (-1x384x-1x-1xf32) + swish_95 = paddle._C_ops.swish(batch_norm__750) + del batch_norm__750 + + # builtin.combine: ([-1x384x-1x-1xf32, -1x768x-1x-1xf32]) <- (-1x384x-1x-1xf32, -1x768x-1x-1xf32) + combine_12 = [swish_95, swish_64] + del swish_64, swish_95 + + # pd_op.concat: (-1x1152x-1x-1xf32) <- ([-1x384x-1x-1xf32, -1x768x-1x-1xf32], 1xi32) + concat_14 = paddle._C_ops.concat(combine_12, full_0) + del combine_12 + + # pd_op.conv2d: (-1x384x-1x-1xf32) <- (-1x1152x-1x-1xf32, 384x1152x1x1xf32) + conv2d_130 = paddle._C_ops.conv2d( + concat_14, parameter_114, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_114 + + # pd_op.batch_norm_: (-1x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (-1x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__756, + batch_norm__757, + batch_norm__758, + batch_norm__759, + batch_norm__760, + batch_norm__761, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_130, + parameter_113, + parameter_112, + parameter_111, + parameter_110, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_130, parameter_110, parameter_111, parameter_112, parameter_113 + + # pd_op.swish: (-1x384x-1x-1xf32) <- (-1x384x-1x-1xf32) + swish_96 = paddle._C_ops.swish(batch_norm__756) + del batch_norm__756 + + # pd_op.conv2d: (-1x384x-1x-1xf32) <- (-1x1152x-1x-1xf32, 384x1152x1x1xf32) + conv2d_131 = paddle._C_ops.conv2d( + concat_14, parameter_109, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del concat_14, parameter_109 + + # pd_op.batch_norm_: (-1x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (-1x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__762, + batch_norm__763, + batch_norm__764, + batch_norm__765, + batch_norm__766, + batch_norm__767, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_131, + parameter_108, + parameter_107, + parameter_106, + parameter_105, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_131, parameter_105, parameter_106, parameter_107, parameter_108 + + # pd_op.swish: (-1x384x-1x-1xf32) <- (-1x384x-1x-1xf32) + swish_97 = paddle._C_ops.swish(batch_norm__762) + del batch_norm__762 + + # pd_op.conv2d: (-1x384x-1x-1xf32) <- (-1x384x-1x-1xf32, 384x384x3x3xf32) + conv2d_132 = paddle._C_ops.conv2d( + swish_97, parameter_104, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_104, swish_97 + + # pd_op.batch_norm_: (-1x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (-1x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__768, + batch_norm__769, + batch_norm__770, + batch_norm__771, + batch_norm__772, + batch_norm__773, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_132, + parameter_103, + parameter_102, + parameter_101, + parameter_100, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_132, parameter_100, parameter_101, parameter_102, parameter_103 + + # pd_op.swish: (-1x384x-1x-1xf32) <- (-1x384x-1x-1xf32) + swish_98 = paddle._C_ops.swish(batch_norm__768) + del batch_norm__768 + + # pd_op.conv2d: (-1x384x-1x-1xf32) <- (-1x384x-1x-1xf32, 384x384x3x3xf32) + conv2d_133 = paddle._C_ops.conv2d( + swish_98, parameter_99, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_99 + + # pd_op.batch_norm_: (-1x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (-1x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__774, + batch_norm__775, + batch_norm__776, + batch_norm__777, + batch_norm__778, + batch_norm__779, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_133, + parameter_98, + parameter_97, + parameter_96, + parameter_95, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_133, parameter_95, parameter_96, parameter_97, parameter_98 + + # pd_op.conv2d: (-1x384x-1x-1xf32) <- (-1x384x-1x-1xf32, 384x384x1x1xf32) + conv2d_134 = paddle._C_ops.conv2d( + swish_98, parameter_94, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_94, swish_98 + + # pd_op.batch_norm_: (-1x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (-1x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__780, + batch_norm__781, + batch_norm__782, + batch_norm__783, + batch_norm__784, + batch_norm__785, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_134, + parameter_93, + parameter_92, + parameter_91, + parameter_90, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_134, parameter_90, parameter_91, parameter_92, parameter_93 + + # pd_op.add: (-1x384x-1x-1xf32) <- (-1x384x-1x-1xf32, -1x384x-1x-1xf32) + add_52 = paddle._C_ops.add(batch_norm__774, batch_norm__780) + del batch_norm__774, batch_norm__780 + + # pd_op.swish: (-1x384x-1x-1xf32) <- (-1x384x-1x-1xf32) + swish_99 = paddle._C_ops.swish(add_52) + del add_52 + + # pd_op.conv2d: (-1x384x-1x-1xf32) <- (-1x384x-1x-1xf32, 384x384x3x3xf32) + conv2d_135 = paddle._C_ops.conv2d( + swish_99, parameter_89, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_89, swish_99 + + # pd_op.batch_norm_: (-1x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (-1x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__786, + batch_norm__787, + batch_norm__788, + batch_norm__789, + batch_norm__790, + batch_norm__791, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_135, + parameter_88, + parameter_87, + parameter_86, + parameter_85, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_135, parameter_85, parameter_86, parameter_87, parameter_88 + + # pd_op.swish: (-1x384x-1x-1xf32) <- (-1x384x-1x-1xf32) + swish_100 = paddle._C_ops.swish(batch_norm__786) + del batch_norm__786 + + # pd_op.conv2d: (-1x384x-1x-1xf32) <- (-1x384x-1x-1xf32, 384x384x3x3xf32) + conv2d_136 = paddle._C_ops.conv2d( + swish_100, parameter_84, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_84 + + # pd_op.batch_norm_: (-1x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (-1x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__792, + batch_norm__793, + batch_norm__794, + batch_norm__795, + batch_norm__796, + batch_norm__797, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_136, + parameter_83, + parameter_82, + parameter_81, + parameter_80, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_136, parameter_80, parameter_81, parameter_82, parameter_83 + + # pd_op.conv2d: (-1x384x-1x-1xf32) <- (-1x384x-1x-1xf32, 384x384x1x1xf32) + conv2d_137 = paddle._C_ops.conv2d( + swish_100, parameter_79, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" ) + del parameter_79, swish_100 - # pd_op.one_hot: (8x4116x5xf32) <- (8x4116xi32, 1xi32) - one_hot_0 = paddle._C_ops.one_hot( - data_1 % paddle.cast(full_0, data_1.dtype), full_0 + # pd_op.batch_norm_: (-1x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (-1x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__798, + batch_norm__799, + batch_norm__800, + batch_norm__801, + batch_norm__802, + batch_norm__803, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_137, + parameter_78, + parameter_77, + parameter_76, + parameter_75, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), ) - del data_1, full_0 + del conv2d_137, parameter_75, parameter_76, parameter_77, parameter_78 + + # pd_op.add: (-1x384x-1x-1xf32) <- (-1x384x-1x-1xf32, -1x384x-1x-1xf32) + add_53 = paddle._C_ops.add(batch_norm__792, batch_norm__798) + del batch_norm__792, batch_norm__798 + + # pd_op.swish: (-1x384x-1x-1xf32) <- (-1x384x-1x-1xf32) + swish_101 = paddle._C_ops.swish(add_53) + del add_53 + + # pd_op.conv2d: (-1x384x-1x-1xf32) <- (-1x384x-1x-1xf32, 384x384x3x3xf32) + conv2d_138 = paddle._C_ops.conv2d( + swish_101, parameter_74, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_74, swish_101 + + # pd_op.batch_norm_: (-1x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (-1x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__804, + batch_norm__805, + batch_norm__806, + batch_norm__807, + batch_norm__808, + batch_norm__809, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_138, + parameter_73, + parameter_72, + parameter_71, + parameter_70, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_138, parameter_70, parameter_71, parameter_72, parameter_73 + + # pd_op.swish: (-1x384x-1x-1xf32) <- (-1x384x-1x-1xf32) + swish_102 = paddle._C_ops.swish(batch_norm__804) + del batch_norm__804 + + # pd_op.conv2d: (-1x384x-1x-1xf32) <- (-1x384x-1x-1xf32, 384x384x3x3xf32) + conv2d_139 = paddle._C_ops.conv2d( + swish_102, parameter_69, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_69 + + # pd_op.batch_norm_: (-1x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (-1x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__810, + batch_norm__811, + batch_norm__812, + batch_norm__813, + batch_norm__814, + batch_norm__815, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_139, + parameter_68, + parameter_67, + parameter_66, + parameter_65, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_139, parameter_65, parameter_66, parameter_67, parameter_68 + + # pd_op.conv2d: (-1x384x-1x-1xf32) <- (-1x384x-1x-1xf32, 384x384x1x1xf32) + conv2d_140 = paddle._C_ops.conv2d( + swish_102, parameter_64, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_64, swish_102 + + # pd_op.batch_norm_: (-1x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (-1x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__816, + batch_norm__817, + batch_norm__818, + batch_norm__819, + batch_norm__820, + batch_norm__821, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_140, + parameter_63, + parameter_62, + parameter_61, + parameter_60, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_140, parameter_60, parameter_61, parameter_62, parameter_63 + + # pd_op.add: (-1x384x-1x-1xf32) <- (-1x384x-1x-1xf32, -1x384x-1x-1xf32) + add_54 = paddle._C_ops.add(batch_norm__810, batch_norm__816) + del batch_norm__810, batch_norm__816 + + # pd_op.swish: (-1x384x-1x-1xf32) <- (-1x384x-1x-1xf32) + swish_103 = paddle._C_ops.swish(add_54) + del add_54 + + # builtin.combine: ([-1x384x-1x-1xf32, -1x384x-1x-1xf32]) <- (-1x384x-1x-1xf32, -1x384x-1x-1xf32) + combine_13 = [swish_96, swish_103] + del swish_103, swish_96 + + # pd_op.concat: (-1x768x-1x-1xf32) <- ([-1x384x-1x-1xf32, -1x384x-1x-1xf32], 1xi32) + concat_15 = paddle._C_ops.concat(combine_13, full_0) + del combine_13 + + # pd_op.conv2d: (-1x768x-1x-1xf32) <- (-1x768x-1x-1xf32, 768x768x1x1xf32) + conv2d_141 = paddle._C_ops.conv2d( + concat_15, parameter_59, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del concat_15, parameter_59 + + # pd_op.batch_norm_: (-1x768x-1x-1xf32, 768xf32, 768xf32, 768xf32, 768xf32, -1xui8) <- (-1x768x-1x-1xf32, 768xf32, 768xf32, 768xf32, 768xf32) + ( + batch_norm__822, + batch_norm__823, + batch_norm__824, + batch_norm__825, + batch_norm__826, + batch_norm__827, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_141, + parameter_58, + parameter_57, + parameter_56, + parameter_55, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_141, parameter_55, parameter_56, parameter_57, parameter_58 + + # pd_op.swish: (-1x768x-1x-1xf32) <- (-1x768x-1x-1xf32) + swish_104 = paddle._C_ops.swish(batch_norm__822) + del batch_norm__822 + + # pd_op.shape64: (4xi64) <- (-1x768x-1x-1xf32) + shape64_0 = paddle._C_ops.shape64(swish_104) # pd_op.full_int_array: (1xi64) <- () - full_int_array_0 = [0] + full_int_array_5 = [0] # pd_op.full_int_array: (1xi64) <- () - full_int_array_1 = [-1] + full_int_array_6 = [1] - # pd_op.slice: (8x4116x4xf32) <- (8x4116x5xf32, 1xi64, 1xi64) + # pd_op.slice: (xi64) <- (4xi64, 1xi64, 1xi64) slice_0 = paddle._C_ops.slice( - one_hot_0, [2], full_int_array_0, full_int_array_1, [1], [] + shape64_0, [0], full_int_array_5, full_int_array_6, [1], [0] ) - del full_int_array_0, full_int_array_1, one_hot_0 + del shape64_0 - # pd_op.pow: (8x4116x4xf32) <- (8x4116x4xf32) - pow_0 = paddle._C_ops.pow(data_0, float("2")) + # pd_op.shape64: (4xi64) <- (-1x768x-1x-1xf32) + shape64_1 = paddle._C_ops.shape64(swish_104) - # pd_op.full: (1xf32) <- () - full_1 = paddle._C_ops.full( - [1], float("0.75"), paddle.float32, paddle.core.CPUPlace() + # pd_op.full_int_array: (1xi64) <- () + full_int_array_7 = [2] + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_8 = [3] + + # pd_op.slice: (xi64) <- (4xi64, 1xi64, 1xi64) + slice_1 = paddle._C_ops.slice( + shape64_1, [0], full_int_array_7, full_int_array_8, [1], [0] ) + del shape64_1 - # pd_op.scale: (8x4116x4xf32) <- (8x4116x4xf32, 1xf32) - scale_0 = paddle._C_ops.scale(pow_0, full_1, float("0"), True) - del pow_0 + # pd_op.shape64: (4xi64) <- (-1x768x-1x-1xf32) + shape64_2 = paddle._C_ops.shape64(swish_104) - # pd_op.full: (1xf32) <- () - full_2 = paddle._C_ops.full( - [1], float("-1"), paddle.float32, paddle.core.CPUPlace() + # pd_op.full_int_array: (1xi64) <- () + full_int_array_9 = [4] + + # pd_op.slice: (xi64) <- (4xi64, 1xi64, 1xi64) + slice_2 = paddle._C_ops.slice( + shape64_2, [0], full_int_array_8, full_int_array_9, [1], [0] + ) + del shape64_2 + + # pd_op.multiply: (xi64) <- (xi64, xi64) + multiply_22 = paddle._C_ops.multiply(slice_1, slice_2) + del slice_1, slice_2 + + # pd_op.full_int_array: (2xi64) <- () + full_int_array_10 = [1, 1] + + # pd_op.pool2d: (-1x768x1x1xf32) <- (-1x768x-1x-1xf32, 2xi64) + pool2d_3 = paddle._C_ops.pool2d( + swish_104, + full_int_array_10, + [1, 1], + [0, 0], + False, + True, + "NCHW", + "avg", + False, + True, + "EXPLICIT", + ) + + # pd_op.conv2d: (-1x768x1x1xf32) <- (-1x768x1x1xf32, 768x768x1x1xf32) + conv2d_142 = paddle._C_ops.conv2d( + pool2d_3, parameter_54, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_54 + + # pd_op.reshape: (1x768x1x1xf32) <- (768xf32, 4xi64) + reshape_4 = paddle._C_ops.reshape(parameter_53, full_int_array_1) + del parameter_53 + + # pd_op.add: (-1x768x1x1xf32) <- (-1x768x1x1xf32, 1x768x1x1xf32) + add_55 = paddle._C_ops.add(conv2d_142, reshape_4) + del conv2d_142, reshape_4 + + # pd_op.sigmoid: (-1x768x1x1xf32) <- (-1x768x1x1xf32) + sigmoid_0 = paddle._C_ops.sigmoid(add_55) + del add_55 + + # pd_op.multiply: (-1x768x-1x-1xf32) <- (-1x768x-1x-1xf32, -1x768x1x1xf32) + multiply_23 = paddle._C_ops.multiply(swish_104, sigmoid_0) + del sigmoid_0 + + # pd_op.conv2d: (-1x768x-1x-1xf32) <- (-1x768x-1x-1xf32, 768x768x1x1xf32) + conv2d_143 = paddle._C_ops.conv2d( + multiply_23, parameter_52, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del multiply_23, parameter_52 + + # pd_op.batch_norm_: (-1x768x-1x-1xf32, 768xf32, 768xf32, 768xf32, 768xf32, -1xui8) <- (-1x768x-1x-1xf32, 768xf32, 768xf32, 768xf32, 768xf32) + ( + batch_norm__828, + batch_norm__829, + batch_norm__830, + batch_norm__831, + batch_norm__832, + batch_norm__833, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_143, + parameter_51, + parameter_50, + parameter_49, + parameter_48, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_143, parameter_48, parameter_49, parameter_50, parameter_51 + + # pd_op.swish: (-1x768x-1x-1xf32) <- (-1x768x-1x-1xf32) + swish_105 = paddle._C_ops.swish(batch_norm__828) + del batch_norm__828 + + # pd_op.add: (-1x768x-1x-1xf32) <- (-1x768x-1x-1xf32, -1x768x-1x-1xf32) + add_56 = paddle._C_ops.add(swish_105, swish_104) + del swish_105 + + # pd_op.conv2d: (-1x4x-1x-1xf32) <- (-1x768x-1x-1xf32, 4x768x3x3xf32) + conv2d_144 = paddle._C_ops.conv2d( + add_56, parameter_47, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del add_56, parameter_47 + + # pd_op.reshape: (1x4x1x1xf32) <- (4xf32, 4xi64) + reshape_5 = paddle._C_ops.reshape(parameter_46, full_int_array_1) + del parameter_46 + + # pd_op.add: (-1x4x-1x-1xf32) <- (-1x4x-1x-1xf32, 1x4x1x1xf32) + add_57 = paddle._C_ops.add(conv2d_144, reshape_5) + del conv2d_144, reshape_5 + + # pd_op.conv2d: (-1x768x1x1xf32) <- (-1x768x1x1xf32, 768x768x1x1xf32) + conv2d_145 = paddle._C_ops.conv2d( + pool2d_3, parameter_45, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" ) + del parameter_45, pool2d_3 + + # pd_op.reshape: (1x768x1x1xf32) <- (768xf32, 4xi64) + reshape_6 = paddle._C_ops.reshape(parameter_44, full_int_array_1) + del parameter_44 - # pd_op.scale: (8x4116x4xf32) <- (8x4116x4xf32, 1xf32) - scale_1 = paddle._C_ops.scale(slice_0, full_2, float("1"), True) - del full_2 + # pd_op.add: (-1x768x1x1xf32) <- (-1x768x1x1xf32, 1x768x1x1xf32) + add_58 = paddle._C_ops.add(conv2d_145, reshape_6) + del conv2d_145, reshape_6 - # pd_op.multiply: (8x4116x4xf32) <- (8x4116x4xf32, 8x4116x4xf32) - multiply_0 = paddle._C_ops.multiply(scale_0, scale_1) + # pd_op.sigmoid: (-1x768x1x1xf32) <- (-1x768x1x1xf32) + sigmoid_1 = paddle._C_ops.sigmoid(add_58) + del add_58 - # pd_op.multiply: (8x4116x4xf32) <- (8x4116x4xf32, 8x4116x4xf32) - multiply_1 = paddle._C_ops.multiply(data_2, slice_0) - del slice_0 + # pd_op.multiply: (-1x768x-1x-1xf32) <- (-1x768x-1x-1xf32, -1x768x1x1xf32) + multiply_24 = paddle._C_ops.multiply(swish_104, sigmoid_1) + del sigmoid_1, swish_104 - # pd_op.add: (8x4116x4xf32) <- (8x4116x4xf32, 8x4116x4xf32) - add_0 = paddle._C_ops.add(multiply_0, multiply_1) + # pd_op.conv2d: (-1x768x-1x-1xf32) <- (-1x768x-1x-1xf32, 768x768x1x1xf32) + conv2d_146 = paddle._C_ops.conv2d( + multiply_24, parameter_43, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del multiply_24, parameter_43 + + # pd_op.batch_norm_: (-1x768x-1x-1xf32, 768xf32, 768xf32, 768xf32, 768xf32, -1xui8) <- (-1x768x-1x-1xf32, 768xf32, 768xf32, 768xf32, 768xf32) + ( + batch_norm__834, + batch_norm__835, + batch_norm__836, + batch_norm__837, + batch_norm__838, + batch_norm__839, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_146, + parameter_42, + parameter_41, + parameter_40, + parameter_39, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_146, parameter_39, parameter_40, parameter_41, parameter_42 + + # pd_op.swish: (-1x768x-1x-1xf32) <- (-1x768x-1x-1xf32) + swish_106 = paddle._C_ops.swish(batch_norm__834) + del batch_norm__834 - # pd_op.bce_loss: (8x4116x4xf32) <- (8x4116x4xf32, 8x4116x4xf32) - bce_loss_0 = paddle._C_ops.bce_loss(data_0, data_2) - del data_0 + # pd_op.conv2d: (-1x68x-1x-1xf32) <- (-1x768x-1x-1xf32, 68x768x3x3xf32) + conv2d_147 = paddle._C_ops.conv2d( + swish_106, parameter_38, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_38, swish_106 - # pd_op.multiply: (8x4116x4xf32) <- (8x4116x4xf32, 8x4116x4xf32) - multiply_2 = paddle._C_ops.multiply(bce_loss_0, add_0) + # pd_op.reshape: (1x68x1x1xf32) <- (68xf32, 4xi64) + reshape_7 = paddle._C_ops.reshape(parameter_37, full_int_array_1) + del parameter_37 - # pd_op.full_int_array: (0xi64) <- () - full_int_array_2 = [] + # pd_op.add: (-1x68x-1x-1xf32) <- (-1x68x-1x-1xf32, 1x68x1x1xf32) + add_59 = paddle._C_ops.add(conv2d_147, reshape_7) + del conv2d_147, reshape_7 - # pd_op.sum: (xf32) <- (8x4116x4xf32, 0xi64) - sum_0 = paddle._C_ops.sum(multiply_2, full_int_array_2, None, False) + # pd_op.full: (xi64) <- () + full_1 = paddle._C_ops.full( + [], float("-1"), paddle.int64, paddle.core.CPUPlace() + ) - # pd_op.sum: (xf32) <- (8x4116x4xf32, 0xi64) - sum_1 = paddle._C_ops.sum(data_2, full_int_array_2, None, False) - del data_2 + # pd_op.full: (xi64) <- () + full_2 = paddle._C_ops.full( + [], float("4"), paddle.int64, paddle.core.CPUPlace() + ) - # pd_op.full: (1xf32) <- () + # pd_op.full: (xi64) <- () full_3 = paddle._C_ops.full( - [1], float("1"), paddle.float32, paddle.core.CPUPlace() + [], float("17"), paddle.int64, paddle.core.CPUPlace() ) - # pd_op.full: (1xf32) <- () - full_4 = paddle._C_ops.full( - [1], float("3.40282e+38"), paddle.float32, paddle.core.CPUPlace() + # builtin.combine: ([xi64, xi64, xi64, xi64]) <- (xi64, xi64, xi64, xi64) + combine_14 = [full_1, full_2, full_3, multiply_22] + + # pd_op.stack: (4xi64) <- ([xi64, xi64, xi64, xi64]) + stack_0 = paddle._C_ops.stack(combine_14, 0) + del combine_14 + + # pd_op.reshape: (-1x4x17x-1xf32) <- (-1x68x-1x-1xf32, 4xi64) + reshape_8 = paddle._C_ops.reshape(add_59, stack_0) + del add_59, stack_0 + + # pd_op.transpose: (-1x17x-1x4xf32) <- (-1x4x17x-1xf32) + transpose_0 = paddle._C_ops.transpose(reshape_8, [0, 2, 3, 1]) + del reshape_8 + + # pd_op.softmax: (-1x17x-1x4xf32) <- (-1x17x-1x4xf32) + softmax_0 = paddle._C_ops.softmax(transpose_0, 1) + del transpose_0 + + # pd_op.conv2d: (-1x1x-1x4xf32) <- (-1x17x-1x4xf32, 1x17x1x1xf32) + conv2d_148 = paddle._C_ops.conv2d( + softmax_0, parameter_36, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" ) + del softmax_0 - # pd_op.clip: (xf32) <- (xf32, 1xf32, 1xf32) - clip_0 = paddle._C_ops.clip(sum_1, full_3, full_4) - del full_3, full_4, sum_1 + # pd_op.squeeze: (-1x-1x4xf32) <- (-1x1x-1x4xf32, 1xi64) + squeeze_0 = paddle._C_ops.squeeze(conv2d_148, full_int_array_6) + del conv2d_148 - # pd_op.divide: (xf32) <- (xf32, xf32) - divide_0 = paddle._C_ops.divide(sum_0, clip_0) - del ( - add_0, - bce_loss_0, - clip_0, - full_1, - full_int_array_2, - multiply_0, - multiply_1, - multiply_2, - scale_0, - scale_1, - sum_0, + # pd_op.sigmoid: (-1x4x-1x-1xf32) <- (-1x4x-1x-1xf32) + sigmoid_2 = paddle._C_ops.sigmoid(add_57) + del add_57 + + # builtin.combine: ([xi64, xi64, xi64]) <- (xi64, xi64, xi64) + combine_15 = [full_1, full_2, multiply_22] + del multiply_22 + + # pd_op.stack: (3xi64) <- ([xi64, xi64, xi64]) + stack_1 = paddle._C_ops.stack(combine_15, 0) + del combine_15 + + # pd_op.reshape: (-1x4x-1xf32) <- (-1x4x-1x-1xf32, 3xi64) + reshape_9 = paddle._C_ops.reshape(sigmoid_2, stack_1) + del sigmoid_2, stack_1 + + # pd_op.shape64: (4xi64) <- (-1x384x-1x-1xf32) + shape64_3 = paddle._C_ops.shape64(swish_94) + + # pd_op.slice: (xi64) <- (4xi64, 1xi64, 1xi64) + slice_3 = paddle._C_ops.slice( + shape64_3, [0], full_int_array_5, full_int_array_6, [1], [0] + ) + del shape64_3 + + # pd_op.shape64: (4xi64) <- (-1x384x-1x-1xf32) + shape64_4 = paddle._C_ops.shape64(swish_94) + + # pd_op.slice: (xi64) <- (4xi64, 1xi64, 1xi64) + slice_4 = paddle._C_ops.slice( + shape64_4, [0], full_int_array_7, full_int_array_8, [1], [0] + ) + del shape64_4 + + # pd_op.shape64: (4xi64) <- (-1x384x-1x-1xf32) + shape64_5 = paddle._C_ops.shape64(swish_94) + + # pd_op.slice: (xi64) <- (4xi64, 1xi64, 1xi64) + slice_5 = paddle._C_ops.slice( + shape64_5, [0], full_int_array_8, full_int_array_9, [1], [0] + ) + del shape64_5 + + # pd_op.multiply: (xi64) <- (xi64, xi64) + multiply_25 = paddle._C_ops.multiply(slice_4, slice_5) + del slice_4, slice_5 + + # pd_op.pool2d: (-1x384x1x1xf32) <- (-1x384x-1x-1xf32, 2xi64) + pool2d_4 = paddle._C_ops.pool2d( + swish_94, + full_int_array_10, + [1, 1], + [0, 0], + False, + True, + "NCHW", + "avg", + False, + True, + "EXPLICIT", + ) + + # pd_op.conv2d: (-1x384x1x1xf32) <- (-1x384x1x1xf32, 384x384x1x1xf32) + conv2d_149 = paddle._C_ops.conv2d( + pool2d_4, parameter_35, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_35 + + # pd_op.reshape: (1x384x1x1xf32) <- (384xf32, 4xi64) + reshape_10 = paddle._C_ops.reshape(parameter_34, full_int_array_1) + del parameter_34 + + # pd_op.add: (-1x384x1x1xf32) <- (-1x384x1x1xf32, 1x384x1x1xf32) + add_60 = paddle._C_ops.add(conv2d_149, reshape_10) + del conv2d_149, reshape_10 + + # pd_op.sigmoid: (-1x384x1x1xf32) <- (-1x384x1x1xf32) + sigmoid_3 = paddle._C_ops.sigmoid(add_60) + del add_60 + + # pd_op.multiply: (-1x384x-1x-1xf32) <- (-1x384x-1x-1xf32, -1x384x1x1xf32) + multiply_26 = paddle._C_ops.multiply(swish_94, sigmoid_3) + del sigmoid_3 + + # pd_op.conv2d: (-1x384x-1x-1xf32) <- (-1x384x-1x-1xf32, 384x384x1x1xf32) + conv2d_150 = paddle._C_ops.conv2d( + multiply_26, parameter_33, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del multiply_26, parameter_33 + + # pd_op.batch_norm_: (-1x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (-1x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__840, + batch_norm__841, + batch_norm__842, + batch_norm__843, + batch_norm__844, + batch_norm__845, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_150, + parameter_32, + parameter_31, + parameter_30, + parameter_29, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_150, parameter_29, parameter_30, parameter_31, parameter_32 + + # pd_op.swish: (-1x384x-1x-1xf32) <- (-1x384x-1x-1xf32) + swish_107 = paddle._C_ops.swish(batch_norm__840) + del batch_norm__840 + + # pd_op.add: (-1x384x-1x-1xf32) <- (-1x384x-1x-1xf32, -1x384x-1x-1xf32) + add_61 = paddle._C_ops.add(swish_107, swish_94) + del swish_107 + + # pd_op.conv2d: (-1x4x-1x-1xf32) <- (-1x384x-1x-1xf32, 4x384x3x3xf32) + conv2d_151 = paddle._C_ops.conv2d( + add_61, parameter_28, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del add_61, parameter_28 + + # pd_op.reshape: (1x4x1x1xf32) <- (4xf32, 4xi64) + reshape_11 = paddle._C_ops.reshape(parameter_27, full_int_array_1) + del parameter_27 + + # pd_op.add: (-1x4x-1x-1xf32) <- (-1x4x-1x-1xf32, 1x4x1x1xf32) + add_62 = paddle._C_ops.add(conv2d_151, reshape_11) + del conv2d_151, reshape_11 + + # pd_op.conv2d: (-1x384x1x1xf32) <- (-1x384x1x1xf32, 384x384x1x1xf32) + conv2d_152 = paddle._C_ops.conv2d( + pool2d_4, parameter_26, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_26, pool2d_4 + + # pd_op.reshape: (1x384x1x1xf32) <- (384xf32, 4xi64) + reshape_12 = paddle._C_ops.reshape(parameter_25, full_int_array_1) + del parameter_25 + + # pd_op.add: (-1x384x1x1xf32) <- (-1x384x1x1xf32, 1x384x1x1xf32) + add_63 = paddle._C_ops.add(conv2d_152, reshape_12) + del conv2d_152, reshape_12 + + # pd_op.sigmoid: (-1x384x1x1xf32) <- (-1x384x1x1xf32) + sigmoid_4 = paddle._C_ops.sigmoid(add_63) + del add_63 + + # pd_op.multiply: (-1x384x-1x-1xf32) <- (-1x384x-1x-1xf32, -1x384x1x1xf32) + multiply_27 = paddle._C_ops.multiply(swish_94, sigmoid_4) + del sigmoid_4, swish_94 + + # pd_op.conv2d: (-1x384x-1x-1xf32) <- (-1x384x-1x-1xf32, 384x384x1x1xf32) + conv2d_153 = paddle._C_ops.conv2d( + multiply_27, parameter_24, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del multiply_27, parameter_24 + + # pd_op.batch_norm_: (-1x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (-1x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__846, + batch_norm__847, + batch_norm__848, + batch_norm__849, + batch_norm__850, + batch_norm__851, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_153, + parameter_23, + parameter_22, + parameter_21, + parameter_20, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_153, parameter_20, parameter_21, parameter_22, parameter_23 + + # pd_op.swish: (-1x384x-1x-1xf32) <- (-1x384x-1x-1xf32) + swish_108 = paddle._C_ops.swish(batch_norm__846) + del batch_norm__846 + + # pd_op.conv2d: (-1x68x-1x-1xf32) <- (-1x384x-1x-1xf32, 68x384x3x3xf32) + conv2d_154 = paddle._C_ops.conv2d( + swish_108, parameter_19, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_19, swish_108 + + # pd_op.reshape: (1x68x1x1xf32) <- (68xf32, 4xi64) + reshape_13 = paddle._C_ops.reshape(parameter_18, full_int_array_1) + del parameter_18 + + # pd_op.add: (-1x68x-1x-1xf32) <- (-1x68x-1x-1xf32, 1x68x1x1xf32) + add_64 = paddle._C_ops.add(conv2d_154, reshape_13) + del conv2d_154, reshape_13 + + # builtin.combine: ([xi64, xi64, xi64, xi64]) <- (xi64, xi64, xi64, xi64) + combine_16 = [full_1, full_2, full_3, multiply_25] + + # pd_op.stack: (4xi64) <- ([xi64, xi64, xi64, xi64]) + stack_2 = paddle._C_ops.stack(combine_16, 0) + del combine_16 + + # pd_op.reshape: (-1x4x17x-1xf32) <- (-1x68x-1x-1xf32, 4xi64) + reshape_14 = paddle._C_ops.reshape(add_64, stack_2) + del add_64, stack_2 + + # pd_op.transpose: (-1x17x-1x4xf32) <- (-1x4x17x-1xf32) + transpose_1 = paddle._C_ops.transpose(reshape_14, [0, 2, 3, 1]) + del reshape_14 + + # pd_op.softmax: (-1x17x-1x4xf32) <- (-1x17x-1x4xf32) + softmax_1 = paddle._C_ops.softmax(transpose_1, 1) + del transpose_1 + + # pd_op.conv2d: (-1x1x-1x4xf32) <- (-1x17x-1x4xf32, 1x17x1x1xf32) + conv2d_155 = paddle._C_ops.conv2d( + softmax_1, parameter_36, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del softmax_1 + + # pd_op.squeeze: (-1x-1x4xf32) <- (-1x1x-1x4xf32, 1xi64) + squeeze_1 = paddle._C_ops.squeeze(conv2d_155, full_int_array_6) + del conv2d_155 + + # pd_op.sigmoid: (-1x4x-1x-1xf32) <- (-1x4x-1x-1xf32) + sigmoid_5 = paddle._C_ops.sigmoid(add_62) + del add_62 + + # builtin.combine: ([xi64, xi64, xi64]) <- (xi64, xi64, xi64) + combine_17 = [full_1, full_2, multiply_25] + del multiply_25 + + # pd_op.stack: (3xi64) <- ([xi64, xi64, xi64]) + stack_3 = paddle._C_ops.stack(combine_17, 0) + del combine_17 + + # pd_op.reshape: (-1x4x-1xf32) <- (-1x4x-1x-1xf32, 3xi64) + reshape_15 = paddle._C_ops.reshape(sigmoid_5, stack_3) + del sigmoid_5, stack_3 + + # pd_op.shape64: (4xi64) <- (-1x192x-1x-1xf32) + shape64_6 = paddle._C_ops.shape64(swish_84) + + # pd_op.slice: (xi64) <- (4xi64, 1xi64, 1xi64) + slice_6 = paddle._C_ops.slice( + shape64_6, [0], full_int_array_5, full_int_array_6, [1], [0] + ) + del full_int_array_5, shape64_6 + + # pd_op.shape64: (4xi64) <- (-1x192x-1x-1xf32) + shape64_7 = paddle._C_ops.shape64(swish_84) + + # pd_op.slice: (xi64) <- (4xi64, 1xi64, 1xi64) + slice_7 = paddle._C_ops.slice( + shape64_7, [0], full_int_array_7, full_int_array_8, [1], [0] + ) + del full_int_array_7, shape64_7 + + # pd_op.shape64: (4xi64) <- (-1x192x-1x-1xf32) + shape64_8 = paddle._C_ops.shape64(swish_84) + + # pd_op.slice: (xi64) <- (4xi64, 1xi64, 1xi64) + slice_8 = paddle._C_ops.slice( + shape64_8, [0], full_int_array_8, full_int_array_9, [1], [0] + ) + del full_int_array_8, full_int_array_9, shape64_8 + + # pd_op.multiply: (xi64) <- (xi64, xi64) + multiply_28 = paddle._C_ops.multiply(slice_7, slice_8) + del slice_7, slice_8 + + # pd_op.pool2d: (-1x192x1x1xf32) <- (-1x192x-1x-1xf32, 2xi64) + pool2d_5 = paddle._C_ops.pool2d( + swish_84, + full_int_array_10, + [1, 1], + [0, 0], + False, + True, + "NCHW", + "avg", + False, + True, + "EXPLICIT", + ) + del full_int_array_10 + + # pd_op.conv2d: (-1x192x1x1xf32) <- (-1x192x1x1xf32, 192x192x1x1xf32) + conv2d_156 = paddle._C_ops.conv2d( + pool2d_5, parameter_17, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_17 + + # pd_op.reshape: (1x192x1x1xf32) <- (192xf32, 4xi64) + reshape_16 = paddle._C_ops.reshape(parameter_16, full_int_array_1) + del parameter_16 + + # pd_op.add: (-1x192x1x1xf32) <- (-1x192x1x1xf32, 1x192x1x1xf32) + add_65 = paddle._C_ops.add(conv2d_156, reshape_16) + del conv2d_156, reshape_16 + + # pd_op.sigmoid: (-1x192x1x1xf32) <- (-1x192x1x1xf32) + sigmoid_6 = paddle._C_ops.sigmoid(add_65) + del add_65 + + # pd_op.multiply: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32, -1x192x1x1xf32) + multiply_29 = paddle._C_ops.multiply(swish_84, sigmoid_6) + del sigmoid_6 + + # pd_op.conv2d: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32, 192x192x1x1xf32) + conv2d_157 = paddle._C_ops.conv2d( + multiply_29, parameter_15, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del multiply_29, parameter_15 + + # pd_op.batch_norm_: (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__852, + batch_norm__853, + batch_norm__854, + batch_norm__855, + batch_norm__856, + batch_norm__857, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_157, + parameter_14, + parameter_13, + parameter_12, + parameter_11, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_157, parameter_11, parameter_12, parameter_13, parameter_14 + + # pd_op.swish: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32) + swish_109 = paddle._C_ops.swish(batch_norm__852) + del batch_norm__852 + + # pd_op.add: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32, -1x192x-1x-1xf32) + add_66 = paddle._C_ops.add(swish_109, swish_84) + del swish_109 + + # pd_op.conv2d: (-1x4x-1x-1xf32) <- (-1x192x-1x-1xf32, 4x192x3x3xf32) + conv2d_158 = paddle._C_ops.conv2d( + add_66, parameter_10, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del add_66, parameter_10 + + # pd_op.reshape: (1x4x1x1xf32) <- (4xf32, 4xi64) + reshape_17 = paddle._C_ops.reshape(parameter_9, full_int_array_1) + del parameter_9 + + # pd_op.add: (-1x4x-1x-1xf32) <- (-1x4x-1x-1xf32, 1x4x1x1xf32) + add_67 = paddle._C_ops.add(conv2d_158, reshape_17) + del conv2d_158, reshape_17 + + # pd_op.conv2d: (-1x192x1x1xf32) <- (-1x192x1x1xf32, 192x192x1x1xf32) + conv2d_159 = paddle._C_ops.conv2d( + pool2d_5, parameter_8, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_8, pool2d_5 + + # pd_op.reshape: (1x192x1x1xf32) <- (192xf32, 4xi64) + reshape_18 = paddle._C_ops.reshape(parameter_7, full_int_array_1) + del parameter_7 + + # pd_op.add: (-1x192x1x1xf32) <- (-1x192x1x1xf32, 1x192x1x1xf32) + add_68 = paddle._C_ops.add(conv2d_159, reshape_18) + del conv2d_159, reshape_18 + + # pd_op.sigmoid: (-1x192x1x1xf32) <- (-1x192x1x1xf32) + sigmoid_7 = paddle._C_ops.sigmoid(add_68) + del add_68 + + # pd_op.multiply: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32, -1x192x1x1xf32) + multiply_30 = paddle._C_ops.multiply(swish_84, sigmoid_7) + del sigmoid_7, swish_84 + + # pd_op.conv2d: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32, 192x192x1x1xf32) + conv2d_160 = paddle._C_ops.conv2d( + multiply_30, parameter_6, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del multiply_30, parameter_6 + + # pd_op.batch_norm_: (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__858, + batch_norm__859, + batch_norm__860, + batch_norm__861, + batch_norm__862, + batch_norm__863, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_160, + parameter_5, + parameter_4, + parameter_3, + parameter_2, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_160, parameter_2, parameter_3, parameter_4, parameter_5 + + # pd_op.swish: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32) + swish_110 = paddle._C_ops.swish(batch_norm__858) + del batch_norm__858 + + # pd_op.conv2d: (-1x68x-1x-1xf32) <- (-1x192x-1x-1xf32, 68x192x3x3xf32) + conv2d_161 = paddle._C_ops.conv2d( + swish_110, parameter_1, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_1, swish_110 + + # pd_op.reshape: (1x68x1x1xf32) <- (68xf32, 4xi64) + reshape_19 = paddle._C_ops.reshape(parameter_0, full_int_array_1) + del full_int_array_1, parameter_0 + + # pd_op.add: (-1x68x-1x-1xf32) <- (-1x68x-1x-1xf32, 1x68x1x1xf32) + add_69 = paddle._C_ops.add(conv2d_161, reshape_19) + del conv2d_161, reshape_19 + + # builtin.combine: ([xi64, xi64, xi64, xi64]) <- (xi64, xi64, xi64, xi64) + combine_18 = [full_1, full_2, full_3, multiply_28] + del full_3 + + # pd_op.stack: (4xi64) <- ([xi64, xi64, xi64, xi64]) + stack_4 = paddle._C_ops.stack(combine_18, 0) + del combine_18 + + # pd_op.reshape: (-1x4x17x-1xf32) <- (-1x68x-1x-1xf32, 4xi64) + reshape_20 = paddle._C_ops.reshape(add_69, stack_4) + del add_69, stack_4 + + # pd_op.transpose: (-1x17x-1x4xf32) <- (-1x4x17x-1xf32) + transpose_2 = paddle._C_ops.transpose(reshape_20, [0, 2, 3, 1]) + del reshape_20 + + # pd_op.softmax: (-1x17x-1x4xf32) <- (-1x17x-1x4xf32) + softmax_2 = paddle._C_ops.softmax(transpose_2, 1) + del transpose_2 + + # pd_op.conv2d: (-1x1x-1x4xf32) <- (-1x17x-1x4xf32, 1x17x1x1xf32) + conv2d_162 = paddle._C_ops.conv2d( + softmax_2, parameter_36, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_36, softmax_2 + + # pd_op.squeeze: (-1x-1x4xf32) <- (-1x1x-1x4xf32, 1xi64) + squeeze_2 = paddle._C_ops.squeeze(conv2d_162, full_int_array_6) + del conv2d_162, full_int_array_6 + + # pd_op.sigmoid: (-1x4x-1x-1xf32) <- (-1x4x-1x-1xf32) + sigmoid_8 = paddle._C_ops.sigmoid(add_67) + del add_67 + + # builtin.combine: ([xi64, xi64, xi64]) <- (xi64, xi64, xi64) + combine_19 = [full_1, full_2, multiply_28] + del full_1, full_2, multiply_28 + + # pd_op.stack: (3xi64) <- ([xi64, xi64, xi64]) + stack_5 = paddle._C_ops.stack(combine_19, 0) + del combine_19 + + # pd_op.reshape: (-1x4x-1xf32) <- (-1x4x-1x-1xf32, 3xi64) + reshape_21 = paddle._C_ops.reshape(sigmoid_8, stack_5) + del sigmoid_8, stack_5 + + # pd_op.full: (1xi32) <- () + full_4 = paddle._C_ops.full( + [1], float("-1"), paddle.int32, paddle.core.CPUPlace() ) - return divide_0 + # builtin.combine: ([-1x4x-1xf32, -1x4x-1xf32, -1x4x-1xf32]) <- (-1x4x-1xf32, -1x4x-1xf32, -1x4x-1xf32) + combine_20 = [reshape_9, reshape_15, reshape_21] + del reshape_15, reshape_21, reshape_9 + + # pd_op.concat: (-1x4x-1xf32) <- ([-1x4x-1xf32, -1x4x-1xf32, -1x4x-1xf32], 1xi32) + concat_0 = paddle._C_ops.concat(combine_20, full_4) + del combine_20, full_4 + + # builtin.combine: ([-1x-1x4xf32, -1x-1x4xf32, -1x-1x4xf32]) <- (-1x-1x4xf32, -1x-1x4xf32, -1x-1x4xf32) + combine_21 = [squeeze_0, squeeze_1, squeeze_2] + del squeeze_0, squeeze_1, squeeze_2 + + # pd_op.concat: (-1x-1x4xf32) <- ([-1x-1x4xf32, -1x-1x4xf32, -1x-1x4xf32], 1xi32) + concat_1 = paddle._C_ops.concat(combine_21, full_0) + del combine_21, full_0 + + return concat_0, concat_1 diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus-L/subgraph_0/weight_meta.py b/paddle_samples/PaddleX/PP-YOLOE_plus-L/subgraph_0/weight_meta.py index 8b1378917..86b97343c 100644 --- a/paddle_samples/PaddleX/PP-YOLOE_plus-L/subgraph_0/weight_meta.py +++ b/paddle_samples/PaddleX/PP-YOLOE_plus-L/subgraph_0/weight_meta.py @@ -1 +1,8161 @@ +class Program_weight_tensor_parameter_0: + name = "parameter_0" + shape = [68] + dtype = "float32" + min_val = float("-0.00996713") + max_val = float("0.0296165") + mean = float("1.85551e-07") + std = float("0.00658747") + data = None + +class Program_weight_tensor_parameter_1: + name = "parameter_1" + shape = [68, 192, 3, 3] + dtype = "float32" + min_val = float("-0.132365") + max_val = float("0.153351") + mean = float("5.83823e-08") + std = float("0.0069927") + data = None + + +class Program_weight_tensor_parameter_2: + name = "parameter_2" + shape = [192] + dtype = "float32" + min_val = float("-0.0440964") + max_val = float("0.204248") + mean = float("0.0505266") + std = float("0.0396607") + data = None + + +class Program_weight_tensor_parameter_3: + name = "parameter_3" + shape = [192] + dtype = "float32" + min_val = float("0.854628") + max_val = float("1.623") + mean = float("1.22222") + std = float("0.143373") + data = None + + +class Program_weight_tensor_parameter_4: + name = "parameter_4" + shape = [192] + dtype = "float32" + min_val = float("0.000124474") + max_val = float("0.00268664") + mean = float("0.000430455") + std = float("0.00032986") + data = None + + +class Program_weight_tensor_parameter_5: + name = "parameter_5" + shape = [192] + dtype = "float32" + min_val = float("-0.0352297") + max_val = float("0.0298138") + mean = float("-0.00349452") + std = float("0.0106524") + data = None + + +class Program_weight_tensor_parameter_6: + name = "parameter_6" + shape = [192, 192, 1, 1] + dtype = "float32" + min_val = float("-0.0520579") + max_val = float("0.075326") + mean = float("-0.00011553") + std = float("0.00541501") + data = None + + +class Program_weight_tensor_parameter_7: + name = "parameter_7" + shape = [192] + dtype = "float32" + min_val = float("-0.00468623") + max_val = float("0.00853988") + mean = float("3.18341e-05") + std = float("0.00259726") + data = None + + +class Program_weight_tensor_parameter_8: + name = "parameter_8" + shape = [192, 192, 1, 1] + dtype = "float32" + min_val = float("-0.00533172") + max_val = float("0.00945863") + mean = float("-9.35044e-05") + std = float("0.00138579") + data = None + + +class Program_weight_tensor_parameter_9: + name = "parameter_9" + shape = [4] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_10: + name = "parameter_10" + shape = [4, 192, 3, 3] + dtype = "float32" + min_val = float("-8.0039e-06") + max_val = float("0.00022716") + mean = float("1.07103e-05") + std = float("1.95264e-05") + data = None + + +class Program_weight_tensor_parameter_11: + name = "parameter_11" + shape = [192] + dtype = "float32" + min_val = float("-0.327873") + max_val = float("0.892505") + mean = float("0.35844") + std = float("0.270005") + data = None + + +class Program_weight_tensor_parameter_12: + name = "parameter_12" + shape = [192] + dtype = "float32" + min_val = float("1.0197") + max_val = float("1.7745") + mean = float("1.31881") + std = float("0.141386") + data = None + + +class Program_weight_tensor_parameter_13: + name = "parameter_13" + shape = [192] + dtype = "float32" + min_val = float("0.000191765") + max_val = float("0.00418178") + mean = float("0.00073364") + std = float("0.000566711") + data = None + + +class Program_weight_tensor_parameter_14: + name = "parameter_14" + shape = [192] + dtype = "float32" + min_val = float("-0.171816") + max_val = float("0.0388429") + mean = float("-0.024799") + std = float("0.0310049") + data = None + + +class Program_weight_tensor_parameter_15: + name = "parameter_15" + shape = [192, 192, 1, 1] + dtype = "float32" + min_val = float("-0.0758177") + max_val = float("0.0688388") + mean = float("-0.000505692") + std = float("0.0065208") + data = None + + +class Program_weight_tensor_parameter_16: + name = "parameter_16" + shape = [192] + dtype = "float32" + min_val = float("-0.0046322") + max_val = float("0.00955243") + mean = float("-0.0001086") + std = float("0.00181395") + data = None + + +class Program_weight_tensor_parameter_17: + name = "parameter_17" + shape = [192, 192, 1, 1] + dtype = "float32" + min_val = float("-0.0166797") + max_val = float("0.0150515") + mean = float("-1.40907e-05") + std = float("0.00152728") + data = None + + +class Program_weight_tensor_parameter_18: + name = "parameter_18" + shape = [68] + dtype = "float32" + min_val = float("-0.00414429") + max_val = float("0.0248245") + mean = float("1.70752e-07") + std = float("0.00516597") + data = None + + +class Program_weight_tensor_parameter_19: + name = "parameter_19" + shape = [68, 384, 3, 3] + dtype = "float32" + min_val = float("-0.0881466") + max_val = float("0.115817") + mean = float("3.10683e-08") + std = float("0.00468137") + data = None + + +class Program_weight_tensor_parameter_20: + name = "parameter_20" + shape = [384] + dtype = "float32" + min_val = float("-0.00505458") + max_val = float("0.0679229") + mean = float("0.025356") + std = float("0.0129557") + data = None + + +class Program_weight_tensor_parameter_21: + name = "parameter_21" + shape = [384] + dtype = "float32" + min_val = float("1.00103") + max_val = float("1.23541") + mean = float("1.10699") + std = float("0.0406781") + data = None + + +class Program_weight_tensor_parameter_22: + name = "parameter_22" + shape = [384] + dtype = "float32" + min_val = float("6.77016e-05") + max_val = float("0.00289487") + mean = float("0.00031114") + std = float("0.00031604") + data = None + + +class Program_weight_tensor_parameter_23: + name = "parameter_23" + shape = [384] + dtype = "float32" + min_val = float("-0.0401932") + max_val = float("0.0131579") + mean = float("-0.00637696") + std = float("0.00739159") + data = None + + +class Program_weight_tensor_parameter_24: + name = "parameter_24" + shape = [384, 384, 1, 1] + dtype = "float32" + min_val = float("-0.0495062") + max_val = float("0.06518") + mean = float("-8.72216e-05") + std = float("0.00262797") + data = None + + +class Program_weight_tensor_parameter_25: + name = "parameter_25" + shape = [384] + dtype = "float32" + min_val = float("-0.00259419") + max_val = float("0.00557747") + mean = float("9.36863e-05") + std = float("0.00147316") + data = None + + +class Program_weight_tensor_parameter_26: + name = "parameter_26" + shape = [384, 384, 1, 1] + dtype = "float32" + min_val = float("-0.00175705") + max_val = float("0.00490764") + mean = float("1.06471e-05") + std = float("0.000587436") + data = None + + +class Program_weight_tensor_parameter_27: + name = "parameter_27" + shape = [4] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_28: + name = "parameter_28" + shape = [4, 384, 3, 3] + dtype = "float32" + min_val = float("-2.18712e-06") + max_val = float("5.28604e-05") + mean = float("1.78963e-06") + std = float("3.72888e-06") + data = None + + +class Program_weight_tensor_parameter_29: + name = "parameter_29" + shape = [384] + dtype = "float32" + min_val = float("-0.150383") + max_val = float("0.452458") + mean = float("0.229981") + std = float("0.0998846") + data = None + + +class Program_weight_tensor_parameter_30: + name = "parameter_30" + shape = [384] + dtype = "float32" + min_val = float("1.00536") + max_val = float("1.40175") + mean = float("1.18904") + std = float("0.0599888") + data = None + + +class Program_weight_tensor_parameter_31: + name = "parameter_31" + shape = [384] + dtype = "float32" + min_val = float("0.000159485") + max_val = float("0.0036339") + mean = float("0.000711029") + std = float("0.000576938") + data = None + + +class Program_weight_tensor_parameter_32: + name = "parameter_32" + shape = [384] + dtype = "float32" + min_val = float("-0.108726") + max_val = float("0.0565601") + mean = float("-0.0264711") + std = float("0.0221625") + data = None + + +class Program_weight_tensor_parameter_33: + name = "parameter_33" + shape = [384, 384, 1, 1] + dtype = "float32" + min_val = float("-0.0481454") + max_val = float("0.0448686") + mean = float("-0.000359909") + std = float("0.00296123") + data = None + + +class Program_weight_tensor_parameter_34: + name = "parameter_34" + shape = [384] + dtype = "float32" + min_val = float("-0.00204396") + max_val = float("0.0090588") + mean = float("-3.67064e-06") + std = float("0.000961893") + data = None + + +class Program_weight_tensor_parameter_35: + name = "parameter_35" + shape = [384, 384, 1, 1] + dtype = "float32" + min_val = float("-0.00523821") + max_val = float("0.00886045") + mean = float("-4.69801e-06") + std = float("0.000620937") + data = None + + +class Program_weight_tensor_parameter_36: + name = "parameter_36" + shape = [1, 17, 1, 1] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_37: + name = "parameter_37" + shape = [68] + dtype = "float32" + min_val = float("-0.00290911") + max_val = float("0.0102058") + mean = float("1.30967e-07") + std = float("0.00299753") + data = None + + +class Program_weight_tensor_parameter_38: + name = "parameter_38" + shape = [68, 768, 3, 3] + dtype = "float32" + min_val = float("-0.0412324") + max_val = float("0.0740684") + mean = float("1.4179e-08") + std = float("0.00274764") + data = None + + +class Program_weight_tensor_parameter_39: + name = "parameter_39" + shape = [768] + dtype = "float32" + min_val = float("-0.0142152") + max_val = float("0.0471953") + mean = float("0.011051") + std = float("0.0102595") + data = None + + +class Program_weight_tensor_parameter_40: + name = "parameter_40" + shape = [768] + dtype = "float32" + min_val = float("1.01074") + max_val = float("1.20195") + mean = float("1.0671") + std = float("0.0223299") + data = None + + +class Program_weight_tensor_parameter_41: + name = "parameter_41" + shape = [768] + dtype = "float32" + min_val = float("3.69016e-05") + max_val = float("0.00134973") + mean = float("0.000155962") + std = float("0.00011097") + data = None + + +class Program_weight_tensor_parameter_42: + name = "parameter_42" + shape = [768] + dtype = "float32" + min_val = float("-0.0238644") + max_val = float("0.00811534") + mean = float("-0.00384896") + std = float("0.00342466") + data = None + + +class Program_weight_tensor_parameter_43: + name = "parameter_43" + shape = [768, 768, 1, 1] + dtype = "float32" + min_val = float("-0.035485") + max_val = float("0.0312209") + mean = float("-3.51706e-05") + std = float("0.00119332") + data = None + + +class Program_weight_tensor_parameter_44: + name = "parameter_44" + shape = [768] + dtype = "float32" + min_val = float("-0.00351526") + max_val = float("0.00217768") + mean = float("0.00010496") + std = float("0.000670444") + data = None + + +class Program_weight_tensor_parameter_45: + name = "parameter_45" + shape = [768, 768, 1, 1] + dtype = "float32" + min_val = float("-0.00237088") + max_val = float("0.00289222") + mean = float("2.74817e-05") + std = float("0.000210228") + data = None + + +class Program_weight_tensor_parameter_46: + name = "parameter_46" + shape = [4] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_47: + name = "parameter_47" + shape = [4, 768, 3, 3] + dtype = "float32" + min_val = float("-1.14922e-05") + max_val = float("0.000123784") + mean = float("4.64809e-06") + std = float("9.90396e-06") + data = None + + +class Program_weight_tensor_parameter_48: + name = "parameter_48" + shape = [768] + dtype = "float32" + min_val = float("-0.109578") + max_val = float("0.200768") + mean = float("0.093855") + std = float("0.0421135") + data = None + + +class Program_weight_tensor_parameter_49: + name = "parameter_49" + shape = [768] + dtype = "float32" + min_val = float("1.00954") + max_val = float("1.25402") + mean = float("1.08094") + std = float("0.0259851") + data = None + + +class Program_weight_tensor_parameter_50: + name = "parameter_50" + shape = [768] + dtype = "float32" + min_val = float("9.71614e-05") + max_val = float("0.00324689") + mean = float("0.000620876") + std = float("0.000449642") + data = None + + +class Program_weight_tensor_parameter_51: + name = "parameter_51" + shape = [768] + dtype = "float32" + min_val = float("-0.0505123") + max_val = float("0.0945691") + mean = float("-0.0192848") + std = float("0.0111623") + data = None + + +class Program_weight_tensor_parameter_52: + name = "parameter_52" + shape = [768, 768, 1, 1] + dtype = "float32" + min_val = float("-0.0486782") + max_val = float("0.031813") + mean = float("-0.000184113") + std = float("0.00129984") + data = None + + +class Program_weight_tensor_parameter_53: + name = "parameter_53" + shape = [768] + dtype = "float32" + min_val = float("-0.00524145") + max_val = float("0.00429624") + mean = float("1.59716e-05") + std = float("0.00044354") + data = None + + +class Program_weight_tensor_parameter_54: + name = "parameter_54" + shape = [768, 768, 1, 1] + dtype = "float32" + min_val = float("-0.0191194") + max_val = float("0.0353741") + mean = float("5.41618e-06") + std = float("0.000293837") + data = None + + +class Program_weight_tensor_parameter_55: + name = "parameter_55" + shape = [768] + dtype = "float32" + min_val = float("-0.175306") + max_val = float("0.211594") + mean = float("0.0847743") + std = float("0.0563497") + data = None + + +class Program_weight_tensor_parameter_56: + name = "parameter_56" + shape = [768] + dtype = "float32" + min_val = float("0.939778") + max_val = float("1.2976") + mean = float("1.06639") + std = float("0.0311293") + data = None + + +class Program_weight_tensor_parameter_57: + name = "parameter_57" + shape = [768] + dtype = "float32" + min_val = float("0.00100313") + max_val = float("0.03162") + mean = float("0.00343704") + std = float("0.00260456") + data = None + + +class Program_weight_tensor_parameter_58: + name = "parameter_58" + shape = [768] + dtype = "float32" + min_val = float("-0.150191") + max_val = float("0.100073") + mean = float("-0.0236812") + std = float("0.0231298") + data = None + + +class Program_weight_tensor_parameter_59: + name = "parameter_59" + shape = [768, 768, 1, 1] + dtype = "float32" + min_val = float("-0.048459") + max_val = float("0.0301639") + mean = float("-0.000116837") + std = float("0.00198315") + data = None + + +class Program_weight_tensor_parameter_60: + name = "parameter_60" + shape = [384] + dtype = "float32" + min_val = float("-0.138639") + max_val = float("0.0301922") + mean = float("-0.0181041") + std = float("0.0228687") + data = None + + +class Program_weight_tensor_parameter_61: + name = "parameter_61" + shape = [384] + dtype = "float32" + min_val = float("0.949936") + max_val = float("1.04659") + mean = float("0.989067") + std = float("0.0104758") + data = None + + +class Program_weight_tensor_parameter_62: + name = "parameter_62" + shape = [384] + dtype = "float32" + min_val = float("0.000424965") + max_val = float("0.00769354") + mean = float("0.00208629") + std = float("0.00118364") + data = None + + +class Program_weight_tensor_parameter_63: + name = "parameter_63" + shape = [384] + dtype = "float32" + min_val = float("-0.0576822") + max_val = float("0.0495397") + mean = float("0.000801726") + std = float("0.0154888") + data = None + + +class Program_weight_tensor_parameter_64: + name = "parameter_64" + shape = [384, 384, 1, 1] + dtype = "float32" + min_val = float("-0.030027") + max_val = float("0.0156948") + mean = float("1.51312e-07") + std = float("0.00151823") + data = None + + +class Program_weight_tensor_parameter_65: + name = "parameter_65" + shape = [384] + dtype = "float32" + min_val = float("-0.138639") + max_val = float("0.0301921") + mean = float("-0.0181041") + std = float("0.0228687") + data = None + + +class Program_weight_tensor_parameter_66: + name = "parameter_66" + shape = [384] + dtype = "float32" + min_val = float("0.969953") + max_val = float("1.13144") + mean = float("1.01743") + std = float("0.0170265") + data = None + + +class Program_weight_tensor_parameter_67: + name = "parameter_67" + shape = [384] + dtype = "float32" + min_val = float("0.00123184") + max_val = float("0.0151822") + mean = float("0.00352376") + std = float("0.00156689") + data = None + + +class Program_weight_tensor_parameter_68: + name = "parameter_68" + shape = [384] + dtype = "float32" + min_val = float("-0.113443") + max_val = float("0.119366") + mean = float("-0.0249755") + std = float("0.0249942") + data = None + + +class Program_weight_tensor_parameter_69: + name = "parameter_69" + shape = [384, 384, 3, 3] + dtype = "float32" + min_val = float("-0.0252708") + max_val = float("0.0260981") + mean = float("-4.55984e-05") + std = float("0.00102241") + data = None + + +class Program_weight_tensor_parameter_70: + name = "parameter_70" + shape = [384] + dtype = "float32" + min_val = float("-0.167348") + max_val = float("0.0194716") + mean = float("-0.0338105") + std = float("0.0272605") + data = None + + +class Program_weight_tensor_parameter_71: + name = "parameter_71" + shape = [384] + dtype = "float32" + min_val = float("0.977422") + max_val = float("1.12785") + mean = float("1.01688") + std = float("0.0237162") + data = None + + +class Program_weight_tensor_parameter_72: + name = "parameter_72" + shape = [384] + dtype = "float32" + min_val = float("0.0030012") + max_val = float("0.0356644") + mean = float("0.00889859") + std = float("0.00464026") + data = None + + +class Program_weight_tensor_parameter_73: + name = "parameter_73" + shape = [384] + dtype = "float32" + min_val = float("-0.187143") + max_val = float("0.21034") + mean = float("-0.0263546") + std = float("0.034034") + data = None + + +class Program_weight_tensor_parameter_74: + name = "parameter_74" + shape = [384, 384, 3, 3] + dtype = "float32" + min_val = float("-0.0247522") + max_val = float("0.0417196") + mean = float("-4.63696e-05") + std = float("0.00115897") + data = None + + +class Program_weight_tensor_parameter_75: + name = "parameter_75" + shape = [384] + dtype = "float32" + min_val = float("-0.102213") + max_val = float("0.0125531") + mean = float("-0.0345887") + std = float("0.0187949") + data = None + + +class Program_weight_tensor_parameter_76: + name = "parameter_76" + shape = [384] + dtype = "float32" + min_val = float("0.949338") + max_val = float("1.04659") + mean = float("0.991003") + std = float("0.00965035") + data = None + + +class Program_weight_tensor_parameter_77: + name = "parameter_77" + shape = [384] + dtype = "float32" + min_val = float("0.000650704") + max_val = float("0.00768371") + mean = float("0.00277456") + std = float("0.00139097") + data = None + + +class Program_weight_tensor_parameter_78: + name = "parameter_78" + shape = [384] + dtype = "float32" + min_val = float("-0.068838") + max_val = float("0.0330406") + mean = float("-0.00344055") + std = float("0.0126807") + data = None + + +class Program_weight_tensor_parameter_79: + name = "parameter_79" + shape = [384, 384, 1, 1] + dtype = "float32" + min_val = float("-0.0239432") + max_val = float("0.0196078") + mean = float("-6.64687e-05") + std = float("0.00158521") + data = None + + +class Program_weight_tensor_parameter_80: + name = "parameter_80" + shape = [384] + dtype = "float32" + min_val = float("-0.102213") + max_val = float("0.0125531") + mean = float("-0.0345887") + std = float("0.0187949") + data = None + + +class Program_weight_tensor_parameter_81: + name = "parameter_81" + shape = [384] + dtype = "float32" + min_val = float("0.961887") + max_val = float("1.10634") + mean = float("1.01796") + std = float("0.0175095") + data = None + + +class Program_weight_tensor_parameter_82: + name = "parameter_82" + shape = [384] + dtype = "float32" + min_val = float("0.0018589") + max_val = float("0.0216184") + mean = float("0.00498132") + std = float("0.00238917") + data = None + + +class Program_weight_tensor_parameter_83: + name = "parameter_83" + shape = [384] + dtype = "float32" + min_val = float("-0.113641") + max_val = float("0.132971") + mean = float("-0.0350247") + std = float("0.0275334") + data = None + + +class Program_weight_tensor_parameter_84: + name = "parameter_84" + shape = [384, 384, 3, 3] + dtype = "float32" + min_val = float("-0.0306037") + max_val = float("0.039106") + mean = float("-6.35244e-05") + std = float("0.00105171") + data = None + + +class Program_weight_tensor_parameter_85: + name = "parameter_85" + shape = [384] + dtype = "float32" + min_val = float("-0.087894") + max_val = float("0.0187151") + mean = float("-0.035007") + std = float("0.0190606") + data = None + + +class Program_weight_tensor_parameter_86: + name = "parameter_86" + shape = [384] + dtype = "float32" + min_val = float("0.937686") + max_val = float("1.1153") + mean = float("1.01357") + std = float("0.0260763") + data = None + + +class Program_weight_tensor_parameter_87: + name = "parameter_87" + shape = [384] + dtype = "float32" + min_val = float("0.00329362") + max_val = float("0.0370326") + mean = float("0.00917159") + std = float("0.004535") + data = None + + +class Program_weight_tensor_parameter_88: + name = "parameter_88" + shape = [384] + dtype = "float32" + min_val = float("-0.116448") + max_val = float("0.0690271") + mean = float("-0.0174781") + std = float("0.0305536") + data = None + + +class Program_weight_tensor_parameter_89: + name = "parameter_89" + shape = [384, 384, 3, 3] + dtype = "float32" + min_val = float("-0.0356642") + max_val = float("0.0369726") + mean = float("-4.09478e-05") + std = float("0.0012155") + data = None + + +class Program_weight_tensor_parameter_90: + name = "parameter_90" + shape = [384] + dtype = "float32" + min_val = float("-0.113264") + max_val = float("0.0139328") + mean = float("-0.0360276") + std = float("0.0194684") + data = None + + +class Program_weight_tensor_parameter_91: + name = "parameter_91" + shape = [384] + dtype = "float32" + min_val = float("0.932607") + max_val = float("1.02926") + mean = float("0.989413") + std = float("0.0107521") + data = None + + +class Program_weight_tensor_parameter_92: + name = "parameter_92" + shape = [384] + dtype = "float32" + min_val = float("0.000850876") + max_val = float("0.00768634") + mean = float("0.00328168") + std = float("0.00126507") + data = None + + +class Program_weight_tensor_parameter_93: + name = "parameter_93" + shape = [384] + dtype = "float32" + min_val = float("-0.0328926") + max_val = float("0.0412224") + mean = float("-0.00745687") + std = float("0.010839") + data = None + + +class Program_weight_tensor_parameter_94: + name = "parameter_94" + shape = [384, 384, 1, 1] + dtype = "float32" + min_val = float("-0.0246626") + max_val = float("0.0246294") + mean = float("-0.000134739") + std = float("0.00162451") + data = None + + +class Program_weight_tensor_parameter_95: + name = "parameter_95" + shape = [384] + dtype = "float32" + min_val = float("-0.113264") + max_val = float("0.0139328") + mean = float("-0.0360276") + std = float("0.0194684") + data = None + + +class Program_weight_tensor_parameter_96: + name = "parameter_96" + shape = [384] + dtype = "float32" + min_val = float("0.983995") + max_val = float("1.10634") + mean = float("1.02014") + std = float("0.0217389") + data = None + + +class Program_weight_tensor_parameter_97: + name = "parameter_97" + shape = [384] + dtype = "float32" + min_val = float("0.00264665") + max_val = float("0.023121") + mean = float("0.00648167") + std = float("0.00274806") + data = None + + +class Program_weight_tensor_parameter_98: + name = "parameter_98" + shape = [384] + dtype = "float32" + min_val = float("-0.123387") + max_val = float("0.0727253") + mean = float("-0.0176321") + std = float("0.0264909") + data = None + + +class Program_weight_tensor_parameter_99: + name = "parameter_99" + shape = [384, 384, 3, 3] + dtype = "float32" + min_val = float("-0.0270575") + max_val = float("0.0462384") + mean = float("-3.55445e-05") + std = float("0.00113617") + data = None + + +class Program_weight_tensor_parameter_100: + name = "parameter_100" + shape = [384] + dtype = "float32" + min_val = float("-0.104816") + max_val = float("0.0223762") + mean = float("-0.0364198") + std = float("0.0208798") + data = None + + +class Program_weight_tensor_parameter_101: + name = "parameter_101" + shape = [384] + dtype = "float32" + min_val = float("0.94874") + max_val = float("1.1165") + mean = float("1.01377") + std = float("0.0272987") + data = None + + +class Program_weight_tensor_parameter_102: + name = "parameter_102" + shape = [384] + dtype = "float32" + min_val = float("0.00343716") + max_val = float("0.0376123") + mean = float("0.0090883") + std = float("0.00464666") + data = None + + +class Program_weight_tensor_parameter_103: + name = "parameter_103" + shape = [384] + dtype = "float32" + min_val = float("-0.10852") + max_val = float("0.127486") + mean = float("-0.0306965") + std = float("0.0372253") + data = None + + +class Program_weight_tensor_parameter_104: + name = "parameter_104" + shape = [384, 384, 3, 3] + dtype = "float32" + min_val = float("-0.0198386") + max_val = float("0.030936") + mean = float("-5.54577e-05") + std = float("0.00127916") + data = None + + +class Program_weight_tensor_parameter_105: + name = "parameter_105" + shape = [384] + dtype = "float32" + min_val = float("-0.104081") + max_val = float("0.0447507") + mean = float("-0.0253876") + std = float("0.0148924") + data = None + + +class Program_weight_tensor_parameter_106: + name = "parameter_106" + shape = [384] + dtype = "float32" + min_val = float("0.976825") + max_val = float("1.08842") + mean = float("1.01085") + std = float("0.0167894") + data = None + + +class Program_weight_tensor_parameter_107: + name = "parameter_107" + shape = [384] + dtype = "float32" + min_val = float("0.00194344") + max_val = float("0.0134659") + mean = float("0.00358093") + std = float("0.00122632") + data = None + + +class Program_weight_tensor_parameter_108: + name = "parameter_108" + shape = [384] + dtype = "float32" + min_val = float("-0.0628121") + max_val = float("0.0450731") + mean = float("-0.0156591") + std = float("0.0180011") + data = None + + +class Program_weight_tensor_parameter_109: + name = "parameter_109" + shape = [384, 1152, 1, 1] + dtype = "float32" + min_val = float("-0.0512204") + max_val = float("0.054272") + mean = float("-7.40727e-05") + std = float("0.00193913") + data = None + + +class Program_weight_tensor_parameter_110: + name = "parameter_110" + shape = [384] + dtype = "float32" + min_val = float("-0.0410536") + max_val = float("0.01587") + mean = float("-0.0085433") + std = float("0.00822821") + data = None + + +class Program_weight_tensor_parameter_111: + name = "parameter_111" + shape = [384] + dtype = "float32" + min_val = float("0.964874") + max_val = float("1.05256") + mean = float("1.00968") + std = float("0.0113002") + data = None + + +class Program_weight_tensor_parameter_112: + name = "parameter_112" + shape = [384] + dtype = "float32" + min_val = float("0.00148058") + max_val = float("0.0172515") + mean = float("0.00279498") + std = float("0.00125745") + data = None + + +class Program_weight_tensor_parameter_113: + name = "parameter_113" + shape = [384] + dtype = "float32" + min_val = float("-0.0613616") + max_val = float("0.0450121") + mean = float("-0.0183066") + std = float("0.0175678") + data = None + + +class Program_weight_tensor_parameter_114: + name = "parameter_114" + shape = [384, 1152, 1, 1] + dtype = "float32" + min_val = float("-0.0416152") + max_val = float("0.034011") + mean = float("-8.95819e-05") + std = float("0.00171404") + data = None + + +class Program_weight_tensor_parameter_115: + name = "parameter_115" + shape = [384] + dtype = "float32" + min_val = float("-0.0517357") + max_val = float("0.00607691") + mean = float("-0.0160479") + std = float("0.00958104") + data = None + + +class Program_weight_tensor_parameter_116: + name = "parameter_116" + shape = [384] + dtype = "float32" + min_val = float("0.990579") + max_val = float("1.10515") + mean = float("1.02122") + std = float("0.0166168") + data = None + + +class Program_weight_tensor_parameter_117: + name = "parameter_117" + shape = [384] + dtype = "float32" + min_val = float("0.00250794") + max_val = float("0.0303554") + mean = float("0.00898847") + std = float("0.00426596") + data = None + + +class Program_weight_tensor_parameter_118: + name = "parameter_118" + shape = [384] + dtype = "float32" + min_val = float("-0.256873") + max_val = float("0.206661") + mean = float("-0.0286502") + std = float("0.0560866") + data = None + + +class Program_weight_tensor_parameter_119: + name = "parameter_119" + shape = [384, 384, 3, 3] + dtype = "float32" + min_val = float("-0.0249441") + max_val = float("0.0277039") + mean = float("-1.94354e-05") + std = float("0.00104416") + data = None + + +class Program_weight_tensor_parameter_120: + name = "parameter_120" + shape = [384] + dtype = "float32" + min_val = float("-0.222759") + max_val = float("0.492934") + mean = float("0.21931") + std = float("0.123919") + data = None + + +class Program_weight_tensor_parameter_121: + name = "parameter_121" + shape = [384] + dtype = "float32" + min_val = float("0.926931") + max_val = float("1.47785") + mean = float("1.14418") + std = float("0.0736893") + data = None + + +class Program_weight_tensor_parameter_122: + name = "parameter_122" + shape = [384] + dtype = "float32" + min_val = float("0.00264483") + max_val = float("0.0974121") + mean = float("0.00798104") + std = float("0.00654548") + data = None + + +class Program_weight_tensor_parameter_123: + name = "parameter_123" + shape = [384] + dtype = "float32" + min_val = float("-0.159565") + max_val = float("0.121315") + mean = float("-0.0255922") + std = float("0.03015") + data = None + + +class Program_weight_tensor_parameter_124: + name = "parameter_124" + shape = [384, 384, 1, 1] + dtype = "float32" + min_val = float("-0.0876303") + max_val = float("0.0602537") + mean = float("-0.000242493") + std = float("0.00453291") + data = None + + +class Program_weight_tensor_parameter_125: + name = "parameter_125" + shape = [192] + dtype = "float32" + min_val = float("-0.164436") + max_val = float("0.0462449") + mean = float("-0.0239848") + std = float("0.0389306") + data = None + + +class Program_weight_tensor_parameter_126: + name = "parameter_126" + shape = [192] + dtype = "float32" + min_val = float("0.846263") + max_val = float("1.05376") + mean = float("0.97547") + std = float("0.0236593") + data = None + + +class Program_weight_tensor_parameter_127: + name = "parameter_127" + shape = [192] + dtype = "float32" + min_val = float("0.000579722") + max_val = float("0.0224778") + mean = float("0.00388016") + std = float("0.00265473") + data = None + + +class Program_weight_tensor_parameter_128: + name = "parameter_128" + shape = [192] + dtype = "float32" + min_val = float("-0.0510712") + max_val = float("0.0759894") + mean = float("-0.00386027") + std = float("0.0154973") + data = None + + +class Program_weight_tensor_parameter_129: + name = "parameter_129" + shape = [192, 192, 1, 1] + dtype = "float32" + min_val = float("-0.050302") + max_val = float("0.0287473") + mean = float("-0.000136137") + std = float("0.00336423") + data = None + + +class Program_weight_tensor_parameter_130: + name = "parameter_130" + shape = [192] + dtype = "float32" + min_val = float("-0.164436") + max_val = float("0.0462449") + mean = float("-0.0239848") + std = float("0.0389306") + data = None + + +class Program_weight_tensor_parameter_131: + name = "parameter_131" + shape = [192] + dtype = "float32" + min_val = float("0.736017") + max_val = float("1.12247") + mean = float("1.02412") + std = float("0.0366284") + data = None + + +class Program_weight_tensor_parameter_132: + name = "parameter_132" + shape = [192] + dtype = "float32" + min_val = float("0.00276112") + max_val = float("0.0194419") + mean = float("0.0066911") + std = float("0.00265579") + data = None + + +class Program_weight_tensor_parameter_133: + name = "parameter_133" + shape = [192] + dtype = "float32" + min_val = float("-0.16004") + max_val = float("0.0998512") + mean = float("-0.0225358") + std = float("0.0339102") + data = None + + +class Program_weight_tensor_parameter_134: + name = "parameter_134" + shape = [192, 192, 3, 3] + dtype = "float32" + min_val = float("-0.0444225") + max_val = float("0.0402511") + mean = float("-7.11892e-05") + std = float("0.00226777") + data = None + + +class Program_weight_tensor_parameter_135: + name = "parameter_135" + shape = [192] + dtype = "float32" + min_val = float("-0.188768") + max_val = float("0.0421971") + mean = float("-0.0567874") + std = float("0.0481367") + data = None + + +class Program_weight_tensor_parameter_136: + name = "parameter_136" + shape = [192] + dtype = "float32" + min_val = float("0.900639") + max_val = float("1.18283") + mean = float("1.01749") + std = float("0.0479958") + data = None + + +class Program_weight_tensor_parameter_137: + name = "parameter_137" + shape = [192] + dtype = "float32" + min_val = float("0.00535257") + max_val = float("0.0763801") + mean = float("0.0170376") + std = float("0.0105864") + data = None + + +class Program_weight_tensor_parameter_138: + name = "parameter_138" + shape = [192] + dtype = "float32" + min_val = float("-0.240955") + max_val = float("0.297589") + mean = float("-0.0264239") + std = float("0.0412806") + data = None + + +class Program_weight_tensor_parameter_139: + name = "parameter_139" + shape = [192, 192, 3, 3] + dtype = "float32" + min_val = float("-0.0580625") + max_val = float("0.0640828") + mean = float("-9.47042e-05") + std = float("0.00257609") + data = None + + +class Program_weight_tensor_parameter_140: + name = "parameter_140" + shape = [192] + dtype = "float32" + min_val = float("-0.188722") + max_val = float("0.00866455") + mean = float("-0.0625057") + std = float("0.0326874") + data = None + + +class Program_weight_tensor_parameter_141: + name = "parameter_141" + shape = [192] + dtype = "float32" + min_val = float("0.925735") + max_val = float("1.04838") + mean = float("0.976103") + std = float("0.0176867") + data = None + + +class Program_weight_tensor_parameter_142: + name = "parameter_142" + shape = [192] + dtype = "float32" + min_val = float("0.00136816") + max_val = float("0.0126086") + mean = float("0.00472105") + std = float("0.00208163") + data = None + + +class Program_weight_tensor_parameter_143: + name = "parameter_143" + shape = [192] + dtype = "float32" + min_val = float("-0.0491501") + max_val = float("0.0347549") + mean = float("-0.0081364") + std = float("0.0128259") + data = None + + +class Program_weight_tensor_parameter_144: + name = "parameter_144" + shape = [192, 192, 1, 1] + dtype = "float32" + min_val = float("-0.0432741") + max_val = float("0.0265813") + mean = float("-0.0003395") + std = float("0.0033694") + data = None + + +class Program_weight_tensor_parameter_145: + name = "parameter_145" + shape = [192] + dtype = "float32" + min_val = float("-0.188722") + max_val = float("0.00866455") + mean = float("-0.0625057") + std = float("0.0326874") + data = None + + +class Program_weight_tensor_parameter_146: + name = "parameter_146" + shape = [192] + dtype = "float32" + min_val = float("0.972045") + max_val = float("1.15056") + mean = float("1.02587") + std = float("0.028794") + data = None + + +class Program_weight_tensor_parameter_147: + name = "parameter_147" + shape = [192] + dtype = "float32" + min_val = float("0.00273726") + max_val = float("0.035953") + mean = float("0.00833297") + std = float("0.004788") + data = None + + +class Program_weight_tensor_parameter_148: + name = "parameter_148" + shape = [192] + dtype = "float32" + min_val = float("-0.106149") + max_val = float("0.117302") + mean = float("-0.0227699") + std = float("0.0288669") + data = None + + +class Program_weight_tensor_parameter_149: + name = "parameter_149" + shape = [192, 192, 3, 3] + dtype = "float32" + min_val = float("-0.0431991") + max_val = float("0.0568112") + mean = float("-8.61466e-05") + std = float("0.00241602") + data = None + + +class Program_weight_tensor_parameter_150: + name = "parameter_150" + shape = [192] + dtype = "float32" + min_val = float("-0.186994") + max_val = float("0.0596707") + mean = float("-0.0739558") + std = float("0.0398551") + data = None + + +class Program_weight_tensor_parameter_151: + name = "parameter_151" + shape = [192] + dtype = "float32" + min_val = float("0.884207") + max_val = float("1.2133") + mean = float("1.0167") + std = float("0.0500698") + data = None + + +class Program_weight_tensor_parameter_152: + name = "parameter_152" + shape = [192] + dtype = "float32" + min_val = float("0.00600039") + max_val = float("0.0445964") + mean = float("0.0130705") + std = float("0.00628092") + data = None + + +class Program_weight_tensor_parameter_153: + name = "parameter_153" + shape = [192] + dtype = "float32" + min_val = float("-0.0739679") + max_val = float("0.0406422") + mean = float("-0.0181695") + std = float("0.022831") + data = None + + +class Program_weight_tensor_parameter_154: + name = "parameter_154" + shape = [192, 192, 3, 3] + dtype = "float32" + min_val = float("-0.0454997") + max_val = float("0.0702924") + mean = float("-8.84971e-05") + std = float("0.0027379") + data = None + + +class Program_weight_tensor_parameter_155: + name = "parameter_155" + shape = [192] + dtype = "float32" + min_val = float("-0.223812") + max_val = float("-0.011848") + mean = float("-0.0808429") + std = float("0.0411614") + data = None + + +class Program_weight_tensor_parameter_156: + name = "parameter_156" + shape = [192] + dtype = "float32" + min_val = float("0.905395") + max_val = float("1.02866") + mean = float("0.977772") + std = float("0.0224939") + data = None + + +class Program_weight_tensor_parameter_157: + name = "parameter_157" + shape = [192] + dtype = "float32" + min_val = float("0.00182472") + max_val = float("0.0140011") + mean = float("0.00488018") + std = float("0.00160896") + data = None + + +class Program_weight_tensor_parameter_158: + name = "parameter_158" + shape = [192] + dtype = "float32" + min_val = float("-0.0421578") + max_val = float("0.0455086") + mean = float("-0.0102449") + std = float("0.0184866") + data = None + + +class Program_weight_tensor_parameter_159: + name = "parameter_159" + shape = [192, 192, 1, 1] + dtype = "float32" + min_val = float("-0.0390054") + max_val = float("0.0688211") + mean = float("-0.000451601") + std = float("0.00386161") + data = None + + +class Program_weight_tensor_parameter_160: + name = "parameter_160" + shape = [192] + dtype = "float32" + min_val = float("-0.223812") + max_val = float("-0.011848") + mean = float("-0.0808429") + std = float("0.0411614") + data = None + + +class Program_weight_tensor_parameter_161: + name = "parameter_161" + shape = [192] + dtype = "float32" + min_val = float("0.949933") + max_val = float("1.11112") + mean = float("1.02292") + std = float("0.030073") + data = None + + +class Program_weight_tensor_parameter_162: + name = "parameter_162" + shape = [192] + dtype = "float32" + min_val = float("0.00467925") + max_val = float("0.0479479") + mean = float("0.0111132") + std = float("0.00613812") + data = None + + +class Program_weight_tensor_parameter_163: + name = "parameter_163" + shape = [192] + dtype = "float32" + min_val = float("-0.101233") + max_val = float("0.0755529") + mean = float("-0.0174453") + std = float("0.0302045") + data = None + + +class Program_weight_tensor_parameter_164: + name = "parameter_164" + shape = [192, 192, 3, 3] + dtype = "float32" + min_val = float("-0.0474729") + max_val = float("0.0539855") + mean = float("-7.31038e-05") + std = float("0.00259765") + data = None + + +class Program_weight_tensor_parameter_165: + name = "parameter_165" + shape = [192] + dtype = "float32" + min_val = float("-0.228542") + max_val = float("0.0777608") + mean = float("-0.0924101") + std = float("0.0451672") + data = None + + +class Program_weight_tensor_parameter_166: + name = "parameter_166" + shape = [192] + dtype = "float32" + min_val = float("0.890182") + max_val = float("1.19896") + mean = float("1.01861") + std = float("0.0532403") + data = None + + +class Program_weight_tensor_parameter_167: + name = "parameter_167" + shape = [192] + dtype = "float32" + min_val = float("0.00579067") + max_val = float("0.0572229") + mean = float("0.0147046") + std = float("0.0085496") + data = None + + +class Program_weight_tensor_parameter_168: + name = "parameter_168" + shape = [192] + dtype = "float32" + min_val = float("-0.147872") + max_val = float("0.114807") + mean = float("-0.0323967") + std = float("0.0333687") + data = None + + +class Program_weight_tensor_parameter_169: + name = "parameter_169" + shape = [192, 192, 3, 3] + dtype = "float32" + min_val = float("-0.0419213") + max_val = float("0.0860108") + mean = float("-0.000108934") + std = float("0.0030019") + data = None + + +class Program_weight_tensor_parameter_170: + name = "parameter_170" + shape = [192] + dtype = "float32" + min_val = float("-0.19326") + max_val = float("0.0143295") + mean = float("-0.0644048") + std = float("0.0302824") + data = None + + +class Program_weight_tensor_parameter_171: + name = "parameter_171" + shape = [192] + dtype = "float32" + min_val = float("0.927945") + max_val = float("1.15109") + mean = float("1.015") + std = float("0.0376052") + data = None + + +class Program_weight_tensor_parameter_172: + name = "parameter_172" + shape = [192] + dtype = "float32" + min_val = float("0.00306597") + max_val = float("0.0203842") + mean = float("0.00643735") + std = float("0.00249811") + data = None + + +class Program_weight_tensor_parameter_173: + name = "parameter_173" + shape = [192] + dtype = "float32" + min_val = float("-0.0752295") + max_val = float("0.0935841") + mean = float("-0.020241") + std = float("0.0237735") + data = None + + +class Program_weight_tensor_parameter_174: + name = "parameter_174" + shape = [192, 576, 1, 1] + dtype = "float32" + min_val = float("-0.0570792") + max_val = float("0.0685417") + mean = float("-0.000178261") + std = float("0.0043567") + data = None + + +class Program_weight_tensor_parameter_175: + name = "parameter_175" + shape = [192] + dtype = "float32" + min_val = float("-0.0981279") + max_val = float("0.0362243") + mean = float("-0.0135197") + std = float("0.0200556") + data = None + + +class Program_weight_tensor_parameter_176: + name = "parameter_176" + shape = [192] + dtype = "float32" + min_val = float("0.92722") + max_val = float("1.19339") + mean = float("1.00446") + std = float("0.0253754") + data = None + + +class Program_weight_tensor_parameter_177: + name = "parameter_177" + shape = [192] + dtype = "float32" + min_val = float("0.00242938") + max_val = float("0.0286347") + mean = float("0.00506947") + std = float("0.00262691") + data = None + + +class Program_weight_tensor_parameter_178: + name = "parameter_178" + shape = [192] + dtype = "float32" + min_val = float("-0.0575542") + max_val = float("0.0378027") + mean = float("-0.0131132") + std = float("0.0183883") + data = None + + +class Program_weight_tensor_parameter_179: + name = "parameter_179" + shape = [192, 576, 1, 1] + dtype = "float32" + min_val = float("-0.0970742") + max_val = float("0.0665634") + mean = float("-0.000110766") + std = float("0.00377498") + data = None + + +class Program_weight_tensor_parameter_180: + name = "parameter_180" + shape = [192] + dtype = "float32" + min_val = float("-0.156035") + max_val = float("-0.00048574") + mean = float("-0.0380548") + std = float("0.0212619") + data = None + + +class Program_weight_tensor_parameter_181: + name = "parameter_181" + shape = [192] + dtype = "float32" + min_val = float("0.923345") + max_val = float("1.24736") + mean = float("1.00888") + std = float("0.029844") + data = None + + +class Program_weight_tensor_parameter_182: + name = "parameter_182" + shape = [192] + dtype = "float32" + min_val = float("0.00440107") + max_val = float("0.0576727") + mean = float("0.013745") + std = float("0.00731603") + data = None + + +class Program_weight_tensor_parameter_183: + name = "parameter_183" + shape = [192] + dtype = "float32" + min_val = float("-0.431982") + max_val = float("0.576694") + mean = float("-0.0311932") + std = float("0.102866") + data = None + + +class Program_weight_tensor_parameter_184: + name = "parameter_184" + shape = [192, 192, 3, 3] + dtype = "float32" + min_val = float("-0.0482483") + max_val = float("0.0395703") + mean = float("-2.80417e-05") + std = float("0.00235958") + data = None + + +class Program_weight_tensor_parameter_185: + name = "parameter_185" + shape = [192] + dtype = "float32" + min_val = float("-0.549518") + max_val = float("1.15667") + mean = float("0.361587") + std = float("0.347055") + data = None + + +class Program_weight_tensor_parameter_186: + name = "parameter_186" + shape = [192] + dtype = "float32" + min_val = float("0.547838") + max_val = float("1.57375") + mean = float("1.15649") + std = float("0.184225") + data = None + + +class Program_weight_tensor_parameter_187: + name = "parameter_187" + shape = [192] + dtype = "float32" + min_val = float("0.00263969") + max_val = float("0.178377") + mean = float("0.0146839") + std = float("0.0148158") + data = None + + +class Program_weight_tensor_parameter_188: + name = "parameter_188" + shape = [192] + dtype = "float32" + min_val = float("-0.206901") + max_val = float("0.201284") + mean = float("-0.0220776") + std = float("0.0538491") + data = None + + +class Program_weight_tensor_parameter_189: + name = "parameter_189" + shape = [192, 192, 1, 1] + dtype = "float32" + min_val = float("-0.122958") + max_val = float("0.0975332") + mean = float("-0.000422197") + std = float("0.00948692") + data = None + + +class Program_weight_tensor_parameter_190: + name = "parameter_190" + shape = [96] + dtype = "float32" + min_val = float("-0.45597") + max_val = float("0.225384") + mean = float("-0.00899429") + std = float("0.142447") + data = None + + +class Program_weight_tensor_parameter_191: + name = "parameter_191" + shape = [96] + dtype = "float32" + min_val = float("0.768038") + max_val = float("1.23793") + mean = float("0.951894") + std = float("0.070433") + data = None + + +class Program_weight_tensor_parameter_192: + name = "parameter_192" + shape = [96] + dtype = "float32" + min_val = float("0.001789") + max_val = float("0.0296245") + mean = float("0.00790415") + std = float("0.00529182") + data = None + + +class Program_weight_tensor_parameter_193: + name = "parameter_193" + shape = [96] + dtype = "float32" + min_val = float("-0.0397087") + max_val = float("0.0776004") + mean = float("-0.00745076") + std = float("0.0174977") + data = None + + +class Program_weight_tensor_parameter_194: + name = "parameter_194" + shape = [96, 96, 1, 1] + dtype = "float32" + min_val = float("-0.0690797") + max_val = float("0.0479575") + mean = float("-0.000703745") + std = float("0.00708779") + data = None + + +class Program_weight_tensor_parameter_195: + name = "parameter_195" + shape = [96] + dtype = "float32" + min_val = float("-0.45597") + max_val = float("0.225384") + mean = float("-0.00899429") + std = float("0.142447") + data = None + + +class Program_weight_tensor_parameter_196: + name = "parameter_196" + shape = [96] + dtype = "float32" + min_val = float("0.511673") + max_val = float("1.27365") + mean = float("1.03139") + std = float("0.0951454") + data = None + + +class Program_weight_tensor_parameter_197: + name = "parameter_197" + shape = [96] + dtype = "float32" + min_val = float("0.00447103") + max_val = float("0.0391091") + mean = float("0.0147757") + std = float("0.00849336") + data = None + + +class Program_weight_tensor_parameter_198: + name = "parameter_198" + shape = [96] + dtype = "float32" + min_val = float("-0.301622") + max_val = float("0.115808") + mean = float("-0.0166235") + std = float("0.0543423") + data = None + + +class Program_weight_tensor_parameter_199: + name = "parameter_199" + shape = [96, 96, 3, 3] + dtype = "float32" + min_val = float("-0.074843") + max_val = float("0.0811793") + mean = float("-3.78438e-05") + std = float("0.00502658") + data = None + + +class Program_weight_tensor_parameter_200: + name = "parameter_200" + shape = [96] + dtype = "float32" + min_val = float("-0.699659") + max_val = float("0.489136") + mean = float("-0.110588") + std = float("0.19522") + data = None + + +class Program_weight_tensor_parameter_201: + name = "parameter_201" + shape = [96] + dtype = "float32" + min_val = float("0.729744") + max_val = float("1.6979") + mean = float("0.998907") + std = float("0.132252") + data = None + + +class Program_weight_tensor_parameter_202: + name = "parameter_202" + shape = [96] + dtype = "float32" + min_val = float("0.00619322") + max_val = float("0.0753382") + mean = float("0.019023") + std = float("0.0147592") + data = None + + +class Program_weight_tensor_parameter_203: + name = "parameter_203" + shape = [96] + dtype = "float32" + min_val = float("-0.164466") + max_val = float("0.123973") + mean = float("-0.0167661") + std = float("0.0517777") + data = None + + +class Program_weight_tensor_parameter_204: + name = "parameter_204" + shape = [96, 96, 3, 3] + dtype = "float32" + min_val = float("-0.0904058") + max_val = float("0.0674916") + mean = float("-0.000269681") + std = float("0.00563526") + data = None + + +class Program_weight_tensor_parameter_205: + name = "parameter_205" + shape = [96] + dtype = "float32" + min_val = float("-0.357406") + max_val = float("0.180909") + mean = float("-0.135683") + std = float("0.0938867") + data = None + + +class Program_weight_tensor_parameter_206: + name = "parameter_206" + shape = [96] + dtype = "float32" + min_val = float("0.633758") + max_val = float("1.02328") + mean = float("0.910489") + std = float("0.054703") + data = None + + +class Program_weight_tensor_parameter_207: + name = "parameter_207" + shape = [96] + dtype = "float32" + min_val = float("0.00226766") + max_val = float("0.0117187") + mean = float("0.00655901") + std = float("0.00211374") + data = None + + +class Program_weight_tensor_parameter_208: + name = "parameter_208" + shape = [96] + dtype = "float32" + min_val = float("-0.0398978") + max_val = float("0.0295094") + mean = float("-0.00629756") + std = float("0.0138696") + data = None + + +class Program_weight_tensor_parameter_209: + name = "parameter_209" + shape = [96, 96, 1, 1] + dtype = "float32" + min_val = float("-0.0530557") + max_val = float("0.0589347") + mean = float("-0.000721103") + std = float("0.00726949") + data = None + + +class Program_weight_tensor_parameter_210: + name = "parameter_210" + shape = [96] + dtype = "float32" + min_val = float("-0.357406") + max_val = float("0.180909") + mean = float("-0.135683") + std = float("0.0938867") + data = None + + +class Program_weight_tensor_parameter_211: + name = "parameter_211" + shape = [96] + dtype = "float32" + min_val = float("0.8186") + max_val = float("1.15653") + mean = float("1.02469") + std = float("0.059396") + data = None + + +class Program_weight_tensor_parameter_212: + name = "parameter_212" + shape = [96] + dtype = "float32" + min_val = float("0.00540755") + max_val = float("0.0561564") + mean = float("0.0139166") + std = float("0.00841941") + data = None + + +class Program_weight_tensor_parameter_213: + name = "parameter_213" + shape = [96] + dtype = "float32" + min_val = float("-0.0899866") + max_val = float("0.0314018") + mean = float("-0.0208995") + std = float("0.025033") + data = None + + +class Program_weight_tensor_parameter_214: + name = "parameter_214" + shape = [96, 96, 3, 3] + dtype = "float32" + min_val = float("-0.0688236") + max_val = float("0.0708707") + mean = float("-0.000262969") + std = float("0.00532725") + data = None + + +class Program_weight_tensor_parameter_215: + name = "parameter_215" + shape = [96] + dtype = "float32" + min_val = float("-0.480249") + max_val = float("0.160466") + mean = float("-0.163804") + std = float("0.129028") + data = None + + +class Program_weight_tensor_parameter_216: + name = "parameter_216" + shape = [96] + dtype = "float32" + min_val = float("0.78143") + max_val = float("1.29526") + mean = float("0.966656") + std = float("0.0977077") + data = None + + +class Program_weight_tensor_parameter_217: + name = "parameter_217" + shape = [96] + dtype = "float32" + min_val = float("0.0048265") + max_val = float("0.0582232") + mean = float("0.0117158") + std = float("0.00796343") + data = None + + +class Program_weight_tensor_parameter_218: + name = "parameter_218" + shape = [96] + dtype = "float32" + min_val = float("-0.130681") + max_val = float("0.0398643") + mean = float("0.00105047") + std = float("0.0297261") + data = None + + +class Program_weight_tensor_parameter_219: + name = "parameter_219" + shape = [96, 96, 3, 3] + dtype = "float32" + min_val = float("-0.0838623") + max_val = float("0.0655946") + mean = float("-0.0002875") + std = float("0.00621097") + data = None + + +class Program_weight_tensor_parameter_220: + name = "parameter_220" + shape = [96] + dtype = "float32" + min_val = float("-0.482468") + max_val = float("0.0678707") + mean = float("-0.164086") + std = float("0.113081") + data = None + + +class Program_weight_tensor_parameter_221: + name = "parameter_221" + shape = [96] + dtype = "float32" + min_val = float("0.729445") + max_val = float("1.00356") + mean = float("0.921722") + std = float("0.0524543") + data = None + + +class Program_weight_tensor_parameter_222: + name = "parameter_222" + shape = [96] + dtype = "float32" + min_val = float("0.00433347") + max_val = float("0.0204491") + mean = float("0.00997535") + std = float("0.00321772") + data = None + + +class Program_weight_tensor_parameter_223: + name = "parameter_223" + shape = [96] + dtype = "float32" + min_val = float("-0.0446457") + max_val = float("0.0320629") + mean = float("-0.0167648") + std = float("0.0172211") + data = None + + +class Program_weight_tensor_parameter_224: + name = "parameter_224" + shape = [96, 96, 1, 1] + dtype = "float32" + min_val = float("-0.0830632") + max_val = float("0.0609676") + mean = float("-0.00159531") + std = float("0.00857298") + data = None + + +class Program_weight_tensor_parameter_225: + name = "parameter_225" + shape = [96] + dtype = "float32" + min_val = float("-0.482468") + max_val = float("0.0678707") + mean = float("-0.164086") + std = float("0.113081") + data = None + + +class Program_weight_tensor_parameter_226: + name = "parameter_226" + shape = [96] + dtype = "float32" + min_val = float("0.771572") + max_val = float("1.15355") + mean = float("0.986257") + std = float("0.0571516") + data = None + + +class Program_weight_tensor_parameter_227: + name = "parameter_227" + shape = [96] + dtype = "float32" + min_val = float("0.00988202") + max_val = float("0.116393") + mean = float("0.0221825") + std = float("0.0148292") + data = None + + +class Program_weight_tensor_parameter_228: + name = "parameter_228" + shape = [96] + dtype = "float32" + min_val = float("-0.11947") + max_val = float("0.0604604") + mean = float("-0.0128069") + std = float("0.0343308") + data = None + + +class Program_weight_tensor_parameter_229: + name = "parameter_229" + shape = [96, 96, 3, 3] + dtype = "float32" + min_val = float("-0.0988696") + max_val = float("0.0787178") + mean = float("-0.000160458") + std = float("0.00610999") + data = None + + +class Program_weight_tensor_parameter_230: + name = "parameter_230" + shape = [96] + dtype = "float32" + min_val = float("-0.55586") + max_val = float("0.346647") + mean = float("-0.174731") + std = float("0.169548") + data = None + + +class Program_weight_tensor_parameter_231: + name = "parameter_231" + shape = [96] + dtype = "float32" + min_val = float("0.757047") + max_val = float("1.34058") + mean = float("0.957428") + std = float("0.110982") + data = None + + +class Program_weight_tensor_parameter_232: + name = "parameter_232" + shape = [96] + dtype = "float32" + min_val = float("0.0072924") + max_val = float("0.0565885") + mean = float("0.0158637") + std = float("0.0105653") + data = None + + +class Program_weight_tensor_parameter_233: + name = "parameter_233" + shape = [96] + dtype = "float32" + min_val = float("-0.114767") + max_val = float("0.17063") + mean = float("-0.00772215") + std = float("0.0609158") + data = None + + +class Program_weight_tensor_parameter_234: + name = "parameter_234" + shape = [96, 96, 3, 3] + dtype = "float32" + min_val = float("-0.117416") + max_val = float("0.106153") + mean = float("-0.000121096") + std = float("0.00709328") + data = None + + +class Program_weight_tensor_parameter_235: + name = "parameter_235" + shape = [96] + dtype = "float32" + min_val = float("-0.61179") + max_val = float("0.578869") + mean = float("-0.0791862") + std = float("0.248136") + data = None + + +class Program_weight_tensor_parameter_236: + name = "parameter_236" + shape = [96] + dtype = "float32" + min_val = float("0.660131") + max_val = float("1.23003") + mean = float("0.871774") + std = float("0.112507") + data = None + + +class Program_weight_tensor_parameter_237: + name = "parameter_237" + shape = [96] + dtype = "float32" + min_val = float("0.00710491") + max_val = float("0.0445585") + mean = float("0.0147412") + std = float("0.00680549") + data = None + + +class Program_weight_tensor_parameter_238: + name = "parameter_238" + shape = [96] + dtype = "float32" + min_val = float("-0.0931176") + max_val = float("0.0541358") + mean = float("-0.0149245") + std = float("0.0312047") + data = None + + +class Program_weight_tensor_parameter_239: + name = "parameter_239" + shape = [96, 448, 1, 1] + dtype = "float32" + min_val = float("-0.131264") + max_val = float("0.116267") + mean = float("-0.000328445") + std = float("0.009384") + data = None + + +class Program_weight_tensor_parameter_240: + name = "parameter_240" + shape = [96] + dtype = "float32" + min_val = float("-0.0948697") + max_val = float("0.224511") + mean = float("0.0617959") + std = float("0.0539032") + data = None + + +class Program_weight_tensor_parameter_241: + name = "parameter_241" + shape = [96] + dtype = "float32" + min_val = float("0.713603") + max_val = float("1.12367") + mean = float("0.935296") + std = float("0.0628703") + data = None + + +class Program_weight_tensor_parameter_242: + name = "parameter_242" + shape = [96] + dtype = "float32" + min_val = float("0.00170399") + max_val = float("0.0269514") + mean = float("0.00620616") + std = float("0.00275772") + data = None + + +class Program_weight_tensor_parameter_243: + name = "parameter_243" + shape = [96] + dtype = "float32" + min_val = float("-0.062198") + max_val = float("0.114507") + mean = float("-0.0207577") + std = float("0.0255889") + data = None + + +class Program_weight_tensor_parameter_244: + name = "parameter_244" + shape = [96, 448, 1, 1] + dtype = "float32" + min_val = float("-0.0878207") + max_val = float("0.0793284") + mean = float("-0.000117661") + std = float("0.006259") + data = None + + +class Program_weight_tensor_parameter_245: + name = "parameter_245" + shape = [192] + dtype = "float32" + min_val = float("-0.290627") + max_val = float("0.197489") + mean = float("-0.0642084") + std = float("0.0682552") + data = None + + +class Program_weight_tensor_parameter_246: + name = "parameter_246" + shape = [192] + dtype = "float32" + min_val = float("0.677585") + max_val = float("1.44589") + mean = float("0.889482") + std = float("0.0775531") + data = None + + +class Program_weight_tensor_parameter_247: + name = "parameter_247" + shape = [192] + dtype = "float32" + min_val = float("0.00615469") + max_val = float("0.0653019") + mean = float("0.013452") + std = float("0.00596706") + data = None + + +class Program_weight_tensor_parameter_248: + name = "parameter_248" + shape = [192] + dtype = "float32" + min_val = float("-0.148841") + max_val = float("0.0453696") + mean = float("-0.0305179") + std = float("0.0258069") + data = None + + +class Program_weight_tensor_parameter_249: + name = "parameter_249" + shape = [192, 384, 1, 1] + dtype = "float32" + min_val = float("-0.103954") + max_val = float("0.0889274") + mean = float("-0.000488273") + std = float("0.00677572") + data = None + + +class Program_weight_tensor_parameter_250: + name = "parameter_250" + shape = [384] + dtype = "float32" + min_val = float("-0.197184") + max_val = float("0.235944") + mean = float("-0.0649902") + std = float("0.0407184") + data = None + + +class Program_weight_tensor_parameter_251: + name = "parameter_251" + shape = [384] + dtype = "float32" + min_val = float("0.87395") + max_val = float("1.53027") + mean = float("1.02089") + std = float("0.0620815") + data = None + + +class Program_weight_tensor_parameter_252: + name = "parameter_252" + shape = [384] + dtype = "float32" + min_val = float("0.00549855") + max_val = float("0.0533697") + mean = float("0.00979429") + std = float("0.00454056") + data = None + + +class Program_weight_tensor_parameter_253: + name = "parameter_253" + shape = [384] + dtype = "float32" + min_val = float("-0.248796") + max_val = float("0.131533") + mean = float("-0.0383995") + std = float("0.0378875") + data = None + + +class Program_weight_tensor_parameter_254: + name = "parameter_254" + shape = [384, 384, 1, 1] + dtype = "float32" + min_val = float("-0.155662") + max_val = float("0.0954612") + mean = float("-0.000485357") + std = float("0.00632631") + data = None + + +class Program_weight_tensor_parameter_255: + name = "parameter_255" + shape = [192] + dtype = "float32" + min_val = float("-0.172982") + max_val = float("0.00602343") + mean = float("-0.0635736") + std = float("0.031811") + data = None + + +class Program_weight_tensor_parameter_256: + name = "parameter_256" + shape = [192] + dtype = "float32" + min_val = float("0.888688") + max_val = float("0.993558") + mean = float("0.952478") + std = float("0.0161577") + data = None + + +class Program_weight_tensor_parameter_257: + name = "parameter_257" + shape = [192] + dtype = "float32" + min_val = float("0.00348684") + max_val = float("0.0168184") + mean = float("0.00628357") + std = float("0.00200719") + data = None + + +class Program_weight_tensor_parameter_258: + name = "parameter_258" + shape = [192] + dtype = "float32" + min_val = float("-0.0659639") + max_val = float("0.0508781") + mean = float("-0.0199445") + std = float("0.0244522") + data = None + + +class Program_weight_tensor_parameter_259: + name = "parameter_259" + shape = [192, 192, 1, 1] + dtype = "float32" + min_val = float("-0.0503466") + max_val = float("0.0313236") + mean = float("-0.000610377") + std = float("0.0047421") + data = None + + +class Program_weight_tensor_parameter_260: + name = "parameter_260" + shape = [192] + dtype = "float32" + min_val = float("-0.172982") + max_val = float("0.00602343") + mean = float("-0.0635736") + std = float("0.031811") + data = None + + +class Program_weight_tensor_parameter_261: + name = "parameter_261" + shape = [192] + dtype = "float32" + min_val = float("0.947546") + max_val = float("1.03225") + mean = float("0.990347") + std = float("0.0162233") + data = None + + +class Program_weight_tensor_parameter_262: + name = "parameter_262" + shape = [192] + dtype = "float32" + min_val = float("0.00995711") + max_val = float("0.0401164") + mean = float("0.017634") + std = float("0.00573009") + data = None + + +class Program_weight_tensor_parameter_263: + name = "parameter_263" + shape = [192] + dtype = "float32" + min_val = float("-0.140772") + max_val = float("0.138155") + mean = float("-0.0239356") + std = float("0.0422014") + data = None + + +class Program_weight_tensor_parameter_264: + name = "parameter_264" + shape = [192, 192, 3, 3] + dtype = "float32" + min_val = float("-0.0482809") + max_val = float("0.0734527") + mean = float("-7.90712e-05") + std = float("0.0027116") + data = None + + +class Program_weight_tensor_parameter_265: + name = "parameter_265" + shape = [192] + dtype = "float32" + min_val = float("-0.211375") + max_val = float("-0.00263112") + mean = float("-0.0715416") + std = float("0.0343984") + data = None + + +class Program_weight_tensor_parameter_266: + name = "parameter_266" + shape = [192] + dtype = "float32" + min_val = float("0.941718") + max_val = float("1.15116") + mean = float("1.03082") + std = float("0.0423415") + data = None + + +class Program_weight_tensor_parameter_267: + name = "parameter_267" + shape = [192] + dtype = "float32" + min_val = float("0.0211078") + max_val = float("0.140703") + mean = float("0.0359457") + std = float("0.0113705") + data = None + + +class Program_weight_tensor_parameter_268: + name = "parameter_268" + shape = [192] + dtype = "float32" + min_val = float("-0.167852") + max_val = float("0.256733") + mean = float("-0.0421418") + std = float("0.0506957") + data = None + + +class Program_weight_tensor_parameter_269: + name = "parameter_269" + shape = [192, 192, 3, 3] + dtype = "float32" + min_val = float("-0.0667117") + max_val = float("0.059304") + mean = float("-0.000107451") + std = float("0.00324108") + data = None + + +class Program_weight_tensor_parameter_270: + name = "parameter_270" + shape = [192] + dtype = "float32" + min_val = float("-0.190843") + max_val = float("-0.00892811") + mean = float("-0.0684056") + std = float("0.030824") + data = None + + +class Program_weight_tensor_parameter_271: + name = "parameter_271" + shape = [192] + dtype = "float32" + min_val = float("0.946948") + max_val = float("1.04539") + mean = float("0.990344") + std = float("0.013334") + data = None + + +class Program_weight_tensor_parameter_272: + name = "parameter_272" + shape = [192] + dtype = "float32" + min_val = float("0.00204439") + max_val = float("0.0101563") + mean = float("0.00350592") + std = float("0.000926001") + data = None + + +class Program_weight_tensor_parameter_273: + name = "parameter_273" + shape = [192] + dtype = "float32" + min_val = float("-0.0741431") + max_val = float("0.0383866") + mean = float("-0.0189034") + std = float("0.0168987") + data = None + + +class Program_weight_tensor_parameter_274: + name = "parameter_274" + shape = [192, 192, 1, 1] + dtype = "float32" + min_val = float("-0.0319038") + max_val = float("0.0417245") + mean = float("-0.000600213") + std = float("0.00487987") + data = None + + +class Program_weight_tensor_parameter_275: + name = "parameter_275" + shape = [192] + dtype = "float32" + min_val = float("-0.190843") + max_val = float("-0.00892811") + mean = float("-0.0684056") + std = float("0.030824") + data = None + + +class Program_weight_tensor_parameter_276: + name = "parameter_276" + shape = [192] + dtype = "float32" + min_val = float("0.956509") + max_val = float("1.11411") + mean = float("1.00672") + std = float("0.0257334") + data = None + + +class Program_weight_tensor_parameter_277: + name = "parameter_277" + shape = [192] + dtype = "float32" + min_val = float("0.00625705") + max_val = float("0.0226068") + mean = float("0.0103997") + std = float("0.00287058") + data = None + + +class Program_weight_tensor_parameter_278: + name = "parameter_278" + shape = [192] + dtype = "float32" + min_val = float("-0.141039") + max_val = float("0.0702292") + mean = float("-0.0298093") + std = float("0.0313043") + data = None + + +class Program_weight_tensor_parameter_279: + name = "parameter_279" + shape = [192, 192, 3, 3] + dtype = "float32" + min_val = float("-0.0504256") + max_val = float("0.0761871") + mean = float("-0.000102443") + std = float("0.0027184") + data = None + + +class Program_weight_tensor_parameter_280: + name = "parameter_280" + shape = [192] + dtype = "float32" + min_val = float("-0.224642") + max_val = float("-0.0187006") + mean = float("-0.0912059") + std = float("0.0387278") + data = None + + +class Program_weight_tensor_parameter_281: + name = "parameter_281" + shape = [192] + dtype = "float32" + min_val = float("0.950234") + max_val = float("1.19239") + mean = float("1.02577") + std = float("0.0451849") + data = None + + +class Program_weight_tensor_parameter_282: + name = "parameter_282" + shape = [192] + dtype = "float32" + min_val = float("0.0202728") + max_val = float("0.0768753") + mean = float("0.0352012") + std = float("0.0111367") + data = None + + +class Program_weight_tensor_parameter_283: + name = "parameter_283" + shape = [192] + dtype = "float32" + min_val = float("-0.22549") + max_val = float("0.0963683") + mean = float("-0.0586879") + std = float("0.0627008") + data = None + + +class Program_weight_tensor_parameter_284: + name = "parameter_284" + shape = [192, 192, 3, 3] + dtype = "float32" + min_val = float("-0.0629141") + max_val = float("0.0883468") + mean = float("-0.000127578") + std = float("0.00336756") + data = None + + +class Program_weight_tensor_parameter_285: + name = "parameter_285" + shape = [192] + dtype = "float32" + min_val = float("-0.150226") + max_val = float("-0.00319821") + mean = float("-0.0660682") + std = float("0.0225506") + data = None + + +class Program_weight_tensor_parameter_286: + name = "parameter_286" + shape = [192] + dtype = "float32" + min_val = float("0.935894") + max_val = float("1.07288") + mean = float("1.0007") + std = float("0.0211788") + data = None + + +class Program_weight_tensor_parameter_287: + name = "parameter_287" + shape = [192] + dtype = "float32" + min_val = float("0.00178607") + max_val = float("0.00643779") + mean = float("0.00311503") + std = float("0.000840167") + data = None + + +class Program_weight_tensor_parameter_288: + name = "parameter_288" + shape = [192] + dtype = "float32" + min_val = float("-0.0642588") + max_val = float("0.0879255") + mean = float("-0.009792") + std = float("0.0179078") + data = None + + +class Program_weight_tensor_parameter_289: + name = "parameter_289" + shape = [192, 192, 1, 1] + dtype = "float32" + min_val = float("-0.029514") + max_val = float("0.043096") + mean = float("-0.000329408") + std = float("0.00549588") + data = None + + +class Program_weight_tensor_parameter_290: + name = "parameter_290" + shape = [192] + dtype = "float32" + min_val = float("-0.150226") + max_val = float("-0.00319822") + mean = float("-0.0660682") + std = float("0.0225506") + data = None + + +class Program_weight_tensor_parameter_291: + name = "parameter_291" + shape = [192] + dtype = "float32" + min_val = float("0.939778") + max_val = float("1.11505") + mean = float("0.995075") + std = float("0.0251252") + data = None + + +class Program_weight_tensor_parameter_292: + name = "parameter_292" + shape = [192] + dtype = "float32" + min_val = float("0.00642307") + max_val = float("0.0266081") + mean = float("0.0114071") + std = float("0.00308351") + data = None + + +class Program_weight_tensor_parameter_293: + name = "parameter_293" + shape = [192] + dtype = "float32" + min_val = float("-0.18069") + max_val = float("0.0972039") + mean = float("-0.0325111") + std = float("0.0330231") + data = None + + +class Program_weight_tensor_parameter_294: + name = "parameter_294" + shape = [192, 192, 3, 3] + dtype = "float32" + min_val = float("-0.0382342") + max_val = float("0.0664391") + mean = float("-0.000128083") + std = float("0.00267217") + data = None + + +class Program_weight_tensor_parameter_295: + name = "parameter_295" + shape = [192] + dtype = "float32" + min_val = float("-0.282528") + max_val = float("0.0109662") + mean = float("-0.106158") + std = float("0.0388174") + data = None + + +class Program_weight_tensor_parameter_296: + name = "parameter_296" + shape = [192] + dtype = "float32" + min_val = float("0.946051") + max_val = float("1.24915") + mean = float("1.02799") + std = float("0.0411221") + data = None + + +class Program_weight_tensor_parameter_297: + name = "parameter_297" + shape = [192] + dtype = "float32" + min_val = float("0.00896292") + max_val = float("0.0393411") + mean = float("0.0158995") + std = float("0.00471727") + data = None + + +class Program_weight_tensor_parameter_298: + name = "parameter_298" + shape = [192] + dtype = "float32" + min_val = float("-0.220858") + max_val = float("0.10809") + mean = float("-0.0372746") + std = float("0.044431") + data = None + + +class Program_weight_tensor_parameter_299: + name = "parameter_299" + shape = [192, 192, 3, 3] + dtype = "float32" + min_val = float("-0.0533745") + max_val = float("0.0594756") + mean = float("-0.000147957") + std = float("0.00371064") + data = None + + +class Program_weight_tensor_parameter_300: + name = "parameter_300" + shape = [192] + dtype = "float32" + min_val = float("-0.250169") + max_val = float("-0.0169445") + mean = float("-0.118185") + std = float("0.0428781") + data = None + + +class Program_weight_tensor_parameter_301: + name = "parameter_301" + shape = [192] + dtype = "float32" + min_val = float("0.916471") + max_val = float("1.13189") + mean = float("1.02572") + std = float("0.0416176") + data = None + + +class Program_weight_tensor_parameter_302: + name = "parameter_302" + shape = [192] + dtype = "float32" + min_val = float("0.00405995") + max_val = float("0.0116058") + mean = float("0.00629149") + std = float("0.00151752") + data = None + + +class Program_weight_tensor_parameter_303: + name = "parameter_303" + shape = [192] + dtype = "float32" + min_val = float("-0.117604") + max_val = float("0.0642248") + mean = float("0.0107736") + std = float("0.0236389") + data = None + + +class Program_weight_tensor_parameter_304: + name = "parameter_304" + shape = [192, 896, 1, 1] + dtype = "float32" + min_val = float("-0.0597477") + max_val = float("0.0854183") + mean = float("-0.000166658") + std = float("0.00511791") + data = None + + +class Program_weight_tensor_parameter_305: + name = "parameter_305" + shape = [192] + dtype = "float32" + min_val = float("-0.174475") + max_val = float("0.20701") + mean = float("-0.00689085") + std = float("0.0496763") + data = None + + +class Program_weight_tensor_parameter_306: + name = "parameter_306" + shape = [192] + dtype = "float32" + min_val = float("0.951544") + max_val = float("1.21569") + mean = float("1.05632") + std = float("0.0494344") + data = None + + +class Program_weight_tensor_parameter_307: + name = "parameter_307" + shape = [192] + dtype = "float32" + min_val = float("0.00423262") + max_val = float("0.0403318") + mean = float("0.00759489") + std = float("0.00311219") + data = None + + +class Program_weight_tensor_parameter_308: + name = "parameter_308" + shape = [192] + dtype = "float32" + min_val = float("-0.0961858") + max_val = float("0.0406769") + mean = float("-0.00802736") + std = float("0.0245313") + data = None + + +class Program_weight_tensor_parameter_309: + name = "parameter_309" + shape = [192, 896, 1, 1] + dtype = "float32" + min_val = float("-0.0579338") + max_val = float("0.208244") + mean = float("-0.000163925") + std = float("0.00519386") + data = None + + +class Program_weight_tensor_parameter_310: + name = "parameter_310" + shape = [384] + dtype = "float32" + min_val = float("-0.243733") + max_val = float("-0.0546405") + mean = float("-0.121279") + std = float("0.0329438") + data = None + + +class Program_weight_tensor_parameter_311: + name = "parameter_311" + shape = [384] + dtype = "float32" + min_val = float("0.821465") + max_val = float("1.01492") + mean = float("0.913551") + std = float("0.0255274") + data = None + + +class Program_weight_tensor_parameter_312: + name = "parameter_312" + shape = [384] + dtype = "float32" + min_val = float("0.00632715") + max_val = float("0.0367869") + mean = float("0.0101591") + std = float("0.00340929") + data = None + + +class Program_weight_tensor_parameter_313: + name = "parameter_313" + shape = [384] + dtype = "float32" + min_val = float("-0.0871009") + max_val = float("0.0863362") + mean = float("-0.0276529") + std = float("0.0233351") + data = None + + +class Program_weight_tensor_parameter_314: + name = "parameter_314" + shape = [384, 768, 1, 1] + dtype = "float32" + min_val = float("-0.0365944") + max_val = float("0.0462394") + mean = float("-0.000227368") + std = float("0.00408063") + data = None + + +class Program_weight_tensor_parameter_315: + name = "parameter_315" + shape = [768] + dtype = "float32" + min_val = float("-0.101787") + max_val = float("0.0665855") + mean = float("-0.0546584") + std = float("0.0146864") + data = None + + +class Program_weight_tensor_parameter_316: + name = "parameter_316" + shape = [768] + dtype = "float32" + min_val = float("0.95621") + max_val = float("1.13825") + mean = float("1.02215") + std = float("0.0205763") + data = None + + +class Program_weight_tensor_parameter_317: + name = "parameter_317" + shape = [768] + dtype = "float32" + min_val = float("0.0043328") + max_val = float("0.027335") + mean = float("0.00718712") + std = float("0.00194442") + data = None + + +class Program_weight_tensor_parameter_318: + name = "parameter_318" + shape = [768] + dtype = "float32" + min_val = float("-0.0981102") + max_val = float("0.0940955") + mean = float("-0.0370968") + std = float("0.0222538") + data = None + + +class Program_weight_tensor_parameter_319: + name = "parameter_319" + shape = [768, 768, 1, 1] + dtype = "float32" + min_val = float("-0.0461237") + max_val = float("0.0869437") + mean = float("-0.000256912") + std = float("0.00342022") + data = None + + +class Program_weight_tensor_parameter_320: + name = "parameter_320" + shape = [384] + dtype = "float32" + min_val = float("-0.151767") + max_val = float("0.0695731") + mean = float("-0.0384536") + std = float("0.0201375") + data = None + + +class Program_weight_tensor_parameter_321: + name = "parameter_321" + shape = [384] + dtype = "float32" + min_val = float("0.894036") + max_val = float("1.07238") + mean = float("0.984632") + std = float("0.0127924") + data = None + + +class Program_weight_tensor_parameter_322: + name = "parameter_322" + shape = [384] + dtype = "float32" + min_val = float("0.00290649") + max_val = float("0.0408954") + mean = float("0.00666489") + std = float("0.00326729") + data = None + + +class Program_weight_tensor_parameter_323: + name = "parameter_323" + shape = [384] + dtype = "float32" + min_val = float("-0.0653166") + max_val = float("0.0519308") + mean = float("-0.00623302") + std = float("0.0155997") + data = None + + +class Program_weight_tensor_parameter_324: + name = "parameter_324" + shape = [384, 384, 1, 1] + dtype = "float32" + min_val = float("-0.0309181") + max_val = float("0.0533571") + mean = float("-7.98848e-05") + std = float("0.00292507") + data = None + + +class Program_weight_tensor_parameter_325: + name = "parameter_325" + shape = [384] + dtype = "float32" + min_val = float("-0.151767") + max_val = float("0.0695731") + mean = float("-0.0384536") + std = float("0.0201375") + data = None + + +class Program_weight_tensor_parameter_326: + name = "parameter_326" + shape = [384] + dtype = "float32" + min_val = float("0.888673") + max_val = float("1.0755") + mean = float("0.996161") + std = float("0.0119175") + data = None + + +class Program_weight_tensor_parameter_327: + name = "parameter_327" + shape = [384] + dtype = "float32" + min_val = float("0.0142738") + max_val = float("0.311691") + mean = float("0.0422161") + std = float("0.0212825") + data = None + + +class Program_weight_tensor_parameter_328: + name = "parameter_328" + shape = [384] + dtype = "float32" + min_val = float("-0.214832") + max_val = float("0.109983") + mean = float("-0.0665168") + std = float("0.0509291") + data = None + + +class Program_weight_tensor_parameter_329: + name = "parameter_329" + shape = [384, 384, 3, 3] + dtype = "float32" + min_val = float("-0.0333794") + max_val = float("0.041775") + mean = float("-0.000107745") + std = float("0.00109587") + data = None + + +class Program_weight_tensor_parameter_330: + name = "parameter_330" + shape = [384] + dtype = "float32" + min_val = float("-0.0782042") + max_val = float("0.112772") + mean = float("-0.0178359") + std = float("0.0153444") + data = None + + +class Program_weight_tensor_parameter_331: + name = "parameter_331" + shape = [384] + dtype = "float32" + min_val = float("0.921308") + max_val = float("1.1661") + mean = float("1.01649") + std = float("0.0243616") + data = None + + +class Program_weight_tensor_parameter_332: + name = "parameter_332" + shape = [384] + dtype = "float32" + min_val = float("0.0102295") + max_val = float("0.115044") + mean = float("0.0305011") + std = float("0.0124013") + data = None + + +class Program_weight_tensor_parameter_333: + name = "parameter_333" + shape = [384] + dtype = "float32" + min_val = float("-0.153753") + max_val = float("0.147071") + mean = float("-0.0372654") + std = float("0.0461481") + data = None + + +class Program_weight_tensor_parameter_334: + name = "parameter_334" + shape = [384, 384, 3, 3] + dtype = "float32" + min_val = float("-0.0226589") + max_val = float("0.036417") + mean = float("-6.21237e-05") + std = float("0.00142638") + data = None + + +class Program_weight_tensor_parameter_335: + name = "parameter_335" + shape = [384] + dtype = "float32" + min_val = float("-0.069438") + max_val = float("0.0210057") + mean = float("-0.0221755") + std = float("0.0130288") + data = None + + +class Program_weight_tensor_parameter_336: + name = "parameter_336" + shape = [384] + dtype = "float32" + min_val = float("0.948363") + max_val = float("1.1661") + mean = float("1.01607") + std = float("0.0267976") + data = None + + +class Program_weight_tensor_parameter_337: + name = "parameter_337" + shape = [384] + dtype = "float32" + min_val = float("0.0303298") + max_val = float("0.235692") + mean = float("0.0873822") + std = float("0.0294964") + data = None + + +class Program_weight_tensor_parameter_338: + name = "parameter_338" + shape = [384] + dtype = "float32" + min_val = float("-1.41616") + max_val = float("1.37834") + mean = float("-0.0672685") + std = float("0.446602") + data = None + + +class Program_weight_tensor_parameter_339: + name = "parameter_339" + shape = [384, 1536, 1, 1] + dtype = "float32" + min_val = float("-0.0365696") + max_val = float("0.0477869") + mean = float("3.75444e-05") + std = float("0.00244321") + data = None + + +class Program_weight_tensor_parameter_340: + name = "parameter_340" + shape = [384] + dtype = "float32" + min_val = float("-0.0175498") + max_val = float("0.0243298") + mean = float("-0.00128851") + std = float("0.0065282") + data = None + + +class Program_weight_tensor_parameter_341: + name = "parameter_341" + shape = [384] + dtype = "float32" + min_val = float("0.972045") + max_val = float("1.06093") + mean = float("0.99618") + std = float("0.01202") + data = None + + +class Program_weight_tensor_parameter_342: + name = "parameter_342" + shape = [384] + dtype = "float32" + min_val = float("0.0015358") + max_val = float("0.00696368") + mean = float("0.00325261") + std = float("0.000776971") + data = None + + +class Program_weight_tensor_parameter_343: + name = "parameter_343" + shape = [384] + dtype = "float32" + min_val = float("-0.0833909") + max_val = float("0.0596304") + mean = float("-0.028885") + std = float("0.0177319") + data = None + + +class Program_weight_tensor_parameter_344: + name = "parameter_344" + shape = [384, 384, 1, 1] + dtype = "float32" + min_val = float("-0.0289284") + max_val = float("0.0278605") + mean = float("-0.000360372") + std = float("0.00262503") + data = None + + +class Program_weight_tensor_parameter_345: + name = "parameter_345" + shape = [384] + dtype = "float32" + min_val = float("-0.0175498") + max_val = float("0.0243298") + mean = float("-0.00128851") + std = float("0.0065282") + data = None + + +class Program_weight_tensor_parameter_346: + name = "parameter_346" + shape = [384] + dtype = "float32" + min_val = float("0.974435") + max_val = float("1.08363") + mean = float("1.00583") + std = float("0.017741") + data = None + + +class Program_weight_tensor_parameter_347: + name = "parameter_347" + shape = [384] + dtype = "float32" + min_val = float("0.00723957") + max_val = float("0.0375603") + mean = float("0.0161577") + std = float("0.00499723") + data = None + + +class Program_weight_tensor_parameter_348: + name = "parameter_348" + shape = [384] + dtype = "float32" + min_val = float("-0.252151") + max_val = float("0.0737095") + mean = float("-0.08036") + std = float("0.0422289") + data = None + + +class Program_weight_tensor_parameter_349: + name = "parameter_349" + shape = [384, 384, 3, 3] + dtype = "float32" + min_val = float("-0.0230068") + max_val = float("0.0485732") + mean = float("-0.000124469") + std = float("0.00111462") + data = None + + +class Program_weight_tensor_parameter_350: + name = "parameter_350" + shape = [384] + dtype = "float32" + min_val = float("-0.0466225") + max_val = float("0.00898686") + mean = float("-0.00779507") + std = float("0.00746548") + data = None + + +class Program_weight_tensor_parameter_351: + name = "parameter_351" + shape = [384] + dtype = "float32" + min_val = float("0.958003") + max_val = float("1.12965") + mean = float("1.01422") + std = float("0.0192962") + data = None + + +class Program_weight_tensor_parameter_352: + name = "parameter_352" + shape = [384] + dtype = "float32" + min_val = float("0.0315362") + max_val = float("0.132781") + mean = float("0.069239") + std = float("0.0176957") + data = None + + +class Program_weight_tensor_parameter_353: + name = "parameter_353" + shape = [384] + dtype = "float32" + min_val = float("-0.817765") + max_val = float("0.618692") + mean = float("-0.19556") + std = float("0.181151") + data = None + + +class Program_weight_tensor_parameter_354: + name = "parameter_354" + shape = [384, 384, 3, 3] + dtype = "float32" + min_val = float("-0.022563") + max_val = float("0.038276") + mean = float("-0.000122085") + std = float("0.00133203") + data = None + + +class Program_weight_tensor_parameter_355: + name = "parameter_355" + shape = [384] + dtype = "float32" + min_val = float("-0.033857") + max_val = float("0.0139088") + mean = float("-0.006985") + std = float("0.00755489") + data = None + + +class Program_weight_tensor_parameter_356: + name = "parameter_356" + shape = [384] + dtype = "float32" + min_val = float("0.986684") + max_val = float("1.03523") + mean = float("1.0021") + std = float("0.00693134") + data = None + + +class Program_weight_tensor_parameter_357: + name = "parameter_357" + shape = [384] + dtype = "float32" + min_val = float("0.0010937") + max_val = float("0.00473797") + mean = float("0.0019152") + std = float("0.000506383") + data = None + + +class Program_weight_tensor_parameter_358: + name = "parameter_358" + shape = [384] + dtype = "float32" + min_val = float("-0.0582268") + max_val = float("0.108843") + mean = float("-0.0163738") + std = float("0.0197979") + data = None + + +class Program_weight_tensor_parameter_359: + name = "parameter_359" + shape = [384, 384, 1, 1] + dtype = "float32" + min_val = float("-0.020788") + max_val = float("0.0286466") + mean = float("-0.000218693") + std = float("0.00228777") + data = None + + +class Program_weight_tensor_parameter_360: + name = "parameter_360" + shape = [384] + dtype = "float32" + min_val = float("-0.033857") + max_val = float("0.0139088") + mean = float("-0.006985") + std = float("0.00755489") + data = None + + +class Program_weight_tensor_parameter_361: + name = "parameter_361" + shape = [384] + dtype = "float32" + min_val = float("0.984593") + max_val = float("1.06746") + mean = float("1.00655") + std = float("0.0121664") + data = None + + +class Program_weight_tensor_parameter_362: + name = "parameter_362" + shape = [384] + dtype = "float32" + min_val = float("0.00450449") + max_val = float("0.0227935") + mean = float("0.00957872") + std = float("0.00312672") + data = None + + +class Program_weight_tensor_parameter_363: + name = "parameter_363" + shape = [384] + dtype = "float32" + min_val = float("-0.144327") + max_val = float("0.265646") + mean = float("-0.0565659") + std = float("0.048812") + data = None + + +class Program_weight_tensor_parameter_364: + name = "parameter_364" + shape = [384, 384, 3, 3] + dtype = "float32" + min_val = float("-0.0124244") + max_val = float("0.0276773") + mean = float("-9.55709e-05") + std = float("0.00094902") + data = None + + +class Program_weight_tensor_parameter_365: + name = "parameter_365" + shape = [384] + dtype = "float32" + min_val = float("-0.0512307") + max_val = float("0.00385635") + mean = float("-0.0195493") + std = float("0.00836547") + data = None + + +class Program_weight_tensor_parameter_366: + name = "parameter_366" + shape = [384] + dtype = "float32" + min_val = float("0.978618") + max_val = float("1.08483") + mean = float("1.01365") + std = float("0.0153886") + data = None + + +class Program_weight_tensor_parameter_367: + name = "parameter_367" + shape = [384] + dtype = "float32" + min_val = float("0.0063934") + max_val = float("0.0246998") + mean = float("0.0111198") + std = float("0.00257045") + data = None + + +class Program_weight_tensor_parameter_368: + name = "parameter_368" + shape = [384] + dtype = "float32" + min_val = float("-0.120871") + max_val = float("0.132915") + mean = float("-0.0260459") + std = float("0.0299784") + data = None + + +class Program_weight_tensor_parameter_369: + name = "parameter_369" + shape = [384, 384, 3, 3] + dtype = "float32" + min_val = float("-0.0142981") + max_val = float("0.024141") + mean = float("-4.79187e-05") + std = float("0.00131278") + data = None + + +class Program_weight_tensor_parameter_370: + name = "parameter_370" + shape = [384] + dtype = "float32" + min_val = float("-0.0676664") + max_val = float("0.0208998") + mean = float("-0.0318731") + std = float("0.0122307") + data = None + + +class Program_weight_tensor_parameter_371: + name = "parameter_371" + shape = [384] + dtype = "float32" + min_val = float("0.984256") + max_val = float("1.05794") + mean = float("1.01498") + std = float("0.0106765") + data = None + + +class Program_weight_tensor_parameter_372: + name = "parameter_372" + shape = [384] + dtype = "float32" + min_val = float("0.00788932") + max_val = float("0.0348657") + mean = float("0.0128533") + std = float("0.00302232") + data = None + + +class Program_weight_tensor_parameter_373: + name = "parameter_373" + shape = [384] + dtype = "float32" + min_val = float("-0.101719") + max_val = float("0.172283") + mean = float("-0.0367191") + std = float("0.0276733") + data = None + + +class Program_weight_tensor_parameter_374: + name = "parameter_374" + shape = [384, 1024, 1, 1] + dtype = "float32" + min_val = float("-0.0172054") + max_val = float("0.0376208") + mean = float("-0.000183184") + std = float("0.00259433") + data = None + + +class Program_weight_tensor_parameter_375: + name = "parameter_375" + shape = [384] + dtype = "float32" + min_val = float("-0.0230121") + max_val = float("0.0211028") + mean = float("5.7334e-05") + std = float("0.00782956") + data = None + + +class Program_weight_tensor_parameter_376: + name = "parameter_376" + shape = [384] + dtype = "float32" + min_val = float("0.99505") + max_val = float("1.08363") + mean = float("1.04199") + std = float("0.0135379") + data = None + + +class Program_weight_tensor_parameter_377: + name = "parameter_377" + shape = [384] + dtype = "float32" + min_val = float("0.0124186") + max_val = float("0.025482") + mean = float("0.0160088") + std = float("0.00200221") + data = None + + +class Program_weight_tensor_parameter_378: + name = "parameter_378" + shape = [384] + dtype = "float32" + min_val = float("-0.0886053") + max_val = float("0.033751") + mean = float("-0.0450013") + std = float("0.0179425") + data = None + + +class Program_weight_tensor_parameter_379: + name = "parameter_379" + shape = [384, 1024, 1, 1] + dtype = "float32" + min_val = float("-0.0428553") + max_val = float("0.0454061") + mean = float("-0.000216885") + std = float("0.00310941") + data = None + + +class Program_weight_tensor_parameter_380: + name = "parameter_380" + shape = [1024] + dtype = "float32" + min_val = float("-3.76748") + max_val = float("-0.735718") + mean = float("-2.19173") + std = float("0.429764") + data = None + + +class Program_weight_tensor_parameter_381: + name = "parameter_381" + shape = [1024] + dtype = "float32" + min_val = float("1.62261") + max_val = float("4.45166") + mean = float("3.08738") + std = float("0.25481") + data = None + + +class Program_weight_tensor_parameter_382: + name = "parameter_382" + shape = [1024] + dtype = "float32" + min_val = float("0.00255178") + max_val = float("0.0174347") + mean = float("0.00504102") + std = float("0.00123753") + data = None + + +class Program_weight_tensor_parameter_383: + name = "parameter_383" + shape = [1024] + dtype = "float32" + min_val = float("-0.0981028") + max_val = float("0.109197") + mean = float("-0.0429288") + std = float("0.0200304") + data = None + + +class Program_weight_tensor_parameter_384: + name = "parameter_384" + shape = [1024, 768, 1, 1] + dtype = "float32" + min_val = float("-0.0759075") + max_val = float("0.0960025") + mean = float("-0.000303492") + std = float("0.00327529") + data = None + + +class Program_weight_tensor_parameter_385: + name = "parameter_385" + shape = [768] + dtype = "float32" + min_val = float("-0.0164547") + max_val = float("0.00136048") + mean = float("-0.000808882") + std = float("0.00234169") + data = None + + +class Program_weight_tensor_parameter_386: + name = "parameter_386" + shape = [768, 768, 1, 1] + dtype = "float32" + min_val = float("-0.0859031") + max_val = float("0.147876") + mean = float("-0.000294646") + std = float("0.00174956") + data = None + + +class Program_weight_tensor_parameter_387: + name = "parameter_387" + shape = [384] + dtype = "float32" + min_val = float("-1.77797") + max_val = float("0.31184") + mean = float("-0.312311") + std = float("0.291454") + data = None + + +class Program_weight_tensor_parameter_388: + name = "parameter_388" + shape = [384] + dtype = "float32" + min_val = float("0.188879") + max_val = float("1.81779") + mean = float("0.610184") + std = float("0.262228") + data = None + + +class Program_weight_tensor_parameter_389: + name = "parameter_389" + shape = [384] + dtype = "float32" + min_val = float("4.15295e-05") + max_val = float("0.00124149") + mean = float("0.00018635") + std = float("0.000117554") + data = None + + +class Program_weight_tensor_parameter_390: + name = "parameter_390" + shape = [384] + dtype = "float32" + min_val = float("-0.0894214") + max_val = float("0.0525955") + mean = float("0.0172684") + std = float("0.0154183") + data = None + + +class Program_weight_tensor_parameter_391: + name = "parameter_391" + shape = [384, 384, 1, 1] + dtype = "float32" + min_val = float("-0.0239132") + max_val = float("0.0254034") + mean = float("-0.000294387") + std = float("0.00236383") + data = None + + +class Program_weight_tensor_parameter_392: + name = "parameter_392" + shape = [384] + dtype = "float32" + min_val = float("-1.77797") + max_val = float("0.31184") + mean = float("-0.312311") + std = float("0.291454") + data = None + + +class Program_weight_tensor_parameter_393: + name = "parameter_393" + shape = [384] + dtype = "float32" + min_val = float("0.332455") + max_val = float("2.60228") + mean = float("1.02626") + std = float("0.289998") + data = None + + +class Program_weight_tensor_parameter_394: + name = "parameter_394" + shape = [384] + dtype = "float32" + min_val = float("0.000319043") + max_val = float("0.00414738") + mean = float("0.00107632") + std = float("0.000507674") + data = None + + +class Program_weight_tensor_parameter_395: + name = "parameter_395" + shape = [384] + dtype = "float32" + min_val = float("-0.186705") + max_val = float("0.0905628") + mean = float("0.0170662") + std = float("0.021429") + data = None + + +class Program_weight_tensor_parameter_396: + name = "parameter_396" + shape = [384, 384, 3, 3] + dtype = "float32" + min_val = float("-0.0184804") + max_val = float("0.0310221") + mean = float("-3.75571e-05") + std = float("0.00148755") + data = None + + +class Program_weight_tensor_parameter_397: + name = "parameter_397" + shape = [384] + dtype = "float32" + min_val = float("-2.58674") + max_val = float("0.0349821") + mean = float("-1.57085") + std = float("0.417307") + data = None + + +class Program_weight_tensor_parameter_398: + name = "parameter_398" + shape = [384] + dtype = "float32" + min_val = float("0.525383") + max_val = float("1.6495") + mean = float("1.13854") + std = float("0.149789") + data = None + + +class Program_weight_tensor_parameter_399: + name = "parameter_399" + shape = [384] + dtype = "float32" + min_val = float("0.0256519") + max_val = float("0.110068") + mean = float("0.0524846") + std = float("0.0116704") + data = None + + +class Program_weight_tensor_parameter_400: + name = "parameter_400" + shape = [384] + dtype = "float32" + min_val = float("-0.778566") + max_val = float("0.344438") + mean = float("-0.221917") + std = float("0.103427") + data = None + + +class Program_weight_tensor_parameter_401: + name = "parameter_401" + shape = [384, 384, 3, 3] + dtype = "float32" + min_val = float("-0.0296622") + max_val = float("0.0537224") + mean = float("-0.000158945") + std = float("0.00197052") + data = None + + +class Program_weight_tensor_parameter_402: + name = "parameter_402" + shape = [384] + dtype = "float32" + min_val = float("-1.94349") + max_val = float("0.641156") + mean = float("-0.576479") + std = float("0.359073") + data = None + + +class Program_weight_tensor_parameter_403: + name = "parameter_403" + shape = [384] + dtype = "float32" + min_val = float("0.164837") + max_val = float("2.07047") + mean = float("0.56295") + std = float("0.227775") + data = None + + +class Program_weight_tensor_parameter_404: + name = "parameter_404" + shape = [384] + dtype = "float32" + min_val = float("6.5426e-05") + max_val = float("0.00116989") + mean = float("0.00025892") + std = float("0.000131517") + data = None + + +class Program_weight_tensor_parameter_405: + name = "parameter_405" + shape = [384] + dtype = "float32" + min_val = float("-0.034319") + max_val = float("0.060842") + mean = float("0.0169333") + std = float("0.0124885") + data = None + + +class Program_weight_tensor_parameter_406: + name = "parameter_406" + shape = [384, 384, 1, 1] + dtype = "float32" + min_val = float("-0.0211277") + max_val = float("0.0256464") + mean = float("-0.000316836") + std = float("0.00224938") + data = None + + +class Program_weight_tensor_parameter_407: + name = "parameter_407" + shape = [384] + dtype = "float32" + min_val = float("-1.94349") + max_val = float("0.641156") + mean = float("-0.576479") + std = float("0.359073") + data = None + + +class Program_weight_tensor_parameter_408: + name = "parameter_408" + shape = [384] + dtype = "float32" + min_val = float("0.582152") + max_val = float("2.16129") + mean = float("1.08524") + std = float("0.256663") + data = None + + +class Program_weight_tensor_parameter_409: + name = "parameter_409" + shape = [384] + dtype = "float32" + min_val = float("0.00078451") + max_val = float("0.00519898") + mean = float("0.00162415") + std = float("0.000518067") + data = None + + +class Program_weight_tensor_parameter_410: + name = "parameter_410" + shape = [384] + dtype = "float32" + min_val = float("-0.0628209") + max_val = float("0.0895942") + mean = float("0.0228312") + std = float("0.0198838") + data = None + + +class Program_weight_tensor_parameter_411: + name = "parameter_411" + shape = [384, 384, 3, 3] + dtype = "float32" + min_val = float("-0.0187271") + max_val = float("0.0312371") + mean = float("-6.33411e-05") + std = float("0.00157788") + data = None + + +class Program_weight_tensor_parameter_412: + name = "parameter_412" + shape = [384] + dtype = "float32" + min_val = float("-2.4015") + max_val = float("0.848053") + mean = float("-1.4071") + std = float("0.361673") + data = None + + +class Program_weight_tensor_parameter_413: + name = "parameter_413" + shape = [384] + dtype = "float32" + min_val = float("0.461972") + max_val = float("1.92206") + mean = float("1.16949") + std = float("0.148563") + data = None + + +class Program_weight_tensor_parameter_414: + name = "parameter_414" + shape = [384] + dtype = "float32" + min_val = float("0.0223589") + max_val = float("0.0650232") + mean = float("0.034113") + std = float("0.00657291") + data = None + + +class Program_weight_tensor_parameter_415: + name = "parameter_415" + shape = [384] + dtype = "float32" + min_val = float("-0.609242") + max_val = float("0.728225") + mean = float("-0.148877") + std = float("0.0847971") + data = None + + +class Program_weight_tensor_parameter_416: + name = "parameter_416" + shape = [384, 384, 3, 3] + dtype = "float32" + min_val = float("-0.0203116") + max_val = float("0.0405905") + mean = float("-0.000153735") + std = float("0.00198218") + data = None + + +class Program_weight_tensor_parameter_417: + name = "parameter_417" + shape = [384] + dtype = "float32" + min_val = float("-1.88015") + max_val = float("0.45251") + mean = float("-0.486805") + std = float("0.376833") + data = None + + +class Program_weight_tensor_parameter_418: + name = "parameter_418" + shape = [384] + dtype = "float32" + min_val = float("0.0772688") + max_val = float("2.12425") + mean = float("0.442764") + std = float("0.218251") + data = None + + +class Program_weight_tensor_parameter_419: + name = "parameter_419" + shape = [384] + dtype = "float32" + min_val = float("5.14732e-05") + max_val = float("0.00175808") + mean = float("0.000313916") + std = float("0.00017621") + data = None + + +class Program_weight_tensor_parameter_420: + name = "parameter_420" + shape = [384] + dtype = "float32" + min_val = float("-0.0350628") + max_val = float("0.0623597") + mean = float("0.0217881") + std = float("0.0146826") + data = None + + +class Program_weight_tensor_parameter_421: + name = "parameter_421" + shape = [384, 384, 1, 1] + dtype = "float32" + min_val = float("-0.0144766") + max_val = float("0.0264016") + mean = float("-0.000421792") + std = float("0.0019438") + data = None + + +class Program_weight_tensor_parameter_422: + name = "parameter_422" + shape = [384] + dtype = "float32" + min_val = float("-1.88015") + max_val = float("0.45251") + mean = float("-0.486805") + std = float("0.376833") + data = None + + +class Program_weight_tensor_parameter_423: + name = "parameter_423" + shape = [384] + dtype = "float32" + min_val = float("0.518812") + max_val = float("2.23061") + mean = float("1.05416") + std = float("0.261303") + data = None + + +class Program_weight_tensor_parameter_424: + name = "parameter_424" + shape = [384] + dtype = "float32" + min_val = float("0.000907154") + max_val = float("0.00473234") + mean = float("0.00216236") + std = float("0.000621055") + data = None + + +class Program_weight_tensor_parameter_425: + name = "parameter_425" + shape = [384] + dtype = "float32" + min_val = float("-0.156848") + max_val = float("0.0998884") + mean = float("0.02917") + std = float("0.023776") + data = None + + +class Program_weight_tensor_parameter_426: + name = "parameter_426" + shape = [384, 384, 3, 3] + dtype = "float32" + min_val = float("-0.0200188") + max_val = float("0.0377429") + mean = float("-6.77733e-05") + std = float("0.00166655") + data = None + + +class Program_weight_tensor_parameter_427: + name = "parameter_427" + shape = [384] + dtype = "float32" + min_val = float("-2.1601") + max_val = float("0.422034") + mean = float("-1.36858") + std = float("0.278385") + data = None + + +class Program_weight_tensor_parameter_428: + name = "parameter_428" + shape = [384] + dtype = "float32" + min_val = float("0.71356") + max_val = float("1.64114") + mean = float("1.14618") + std = float("0.102016") + data = None + + +class Program_weight_tensor_parameter_429: + name = "parameter_429" + shape = [384] + dtype = "float32" + min_val = float("0.015836") + max_val = float("0.0669088") + mean = float("0.0268458") + std = float("0.00709364") + data = None + + +class Program_weight_tensor_parameter_430: + name = "parameter_430" + shape = [384] + dtype = "float32" + min_val = float("-0.604472") + max_val = float("0.189662") + mean = float("-0.105803") + std = float("0.069063") + data = None + + +class Program_weight_tensor_parameter_431: + name = "parameter_431" + shape = [384, 384, 3, 3] + dtype = "float32" + min_val = float("-0.0259247") + max_val = float("0.0434116") + mean = float("-0.000126202") + std = float("0.00187571") + data = None + + +class Program_weight_tensor_parameter_432: + name = "parameter_432" + shape = [384] + dtype = "float32" + min_val = float("-2.9286") + max_val = float("1.66241") + mean = float("-0.761307") + std = float("0.644444") + data = None + + +class Program_weight_tensor_parameter_433: + name = "parameter_433" + shape = [384] + dtype = "float32" + min_val = float("0.953521") + max_val = float("2.92614") + mean = float("1.86813") + std = float("0.276938") + data = None + + +class Program_weight_tensor_parameter_434: + name = "parameter_434" + shape = [384] + dtype = "float32" + min_val = float("0.00145196") + max_val = float("0.00602303") + mean = float("0.00286041") + std = float("0.000620991") + data = None + + +class Program_weight_tensor_parameter_435: + name = "parameter_435" + shape = [384] + dtype = "float32" + min_val = float("-0.206077") + max_val = float("0.108202") + mean = float("0.0475519") + std = float("0.0246364") + data = None + + +class Program_weight_tensor_parameter_436: + name = "parameter_436" + shape = [384, 768, 1, 1] + dtype = "float32" + min_val = float("-0.0455732") + max_val = float("0.0357705") + mean = float("-0.000554005") + std = float("0.00431649") + data = None + + +class Program_weight_tensor_parameter_437: + name = "parameter_437" + shape = [384] + dtype = "float32" + min_val = float("-2.25212") + max_val = float("0.68194") + mean = float("-0.778669") + std = float("0.473843") + data = None + + +class Program_weight_tensor_parameter_438: + name = "parameter_438" + shape = [384] + dtype = "float32" + min_val = float("0.966069") + max_val = float("2.89985") + mean = float("2.1016") + std = float("0.306322") + data = None + + +class Program_weight_tensor_parameter_439: + name = "parameter_439" + shape = [384] + dtype = "float32" + min_val = float("0.000400888") + max_val = float("0.00504261") + mean = float("0.000981843") + std = float("0.000289437") + data = None + + +class Program_weight_tensor_parameter_440: + name = "parameter_440" + shape = [384] + dtype = "float32" + min_val = float("-0.0495509") + max_val = float("0.0680366") + mean = float("0.0216031") + std = float("0.0123324") + data = None + + +class Program_weight_tensor_parameter_441: + name = "parameter_441" + shape = [384, 768, 1, 1] + dtype = "float32" + min_val = float("-0.145612") + max_val = float("0.0575213") + mean = float("-0.00024107") + std = float("0.00300636") + data = None + + +class Program_weight_tensor_parameter_442: + name = "parameter_442" + shape = [768] + dtype = "float32" + min_val = float("-2.40748") + max_val = float("0.642802") + mean = float("-0.909529") + std = float("0.33999") + data = None + + +class Program_weight_tensor_parameter_443: + name = "parameter_443" + shape = [768] + dtype = "float32" + min_val = float("0.529868") + max_val = float("1.91302") + mean = float("0.921944") + std = float("0.149362") + data = None + + +class Program_weight_tensor_parameter_444: + name = "parameter_444" + shape = [768] + dtype = "float32" + min_val = float("0.00474618") + max_val = float("0.0390067") + mean = float("0.00819721") + std = float("0.00242367") + data = None + + +class Program_weight_tensor_parameter_445: + name = "parameter_445" + shape = [768] + dtype = "float32" + min_val = float("-0.182419") + max_val = float("0.16049") + mean = float("0.0278199") + std = float("0.0398523") + data = None + + +class Program_weight_tensor_parameter_446: + name = "parameter_446" + shape = [768, 512, 3, 3] + dtype = "float32" + min_val = float("-0.059099") + max_val = float("0.0444559") + mean = float("-7.08532e-05") + std = float("0.00199255") + data = None + + +class Program_weight_tensor_parameter_447: + name = "parameter_447" + shape = [512] + dtype = "float32" + min_val = float("-3.3982") + max_val = float("1.66922") + mean = float("-1.1631") + std = float("0.514405") + data = None + + +class Program_weight_tensor_parameter_448: + name = "parameter_448" + shape = [512] + dtype = "float32" + min_val = float("0.523518") + max_val = float("1.6758") + mean = float("1.11208") + std = float("0.148137") + data = None + + +class Program_weight_tensor_parameter_449: + name = "parameter_449" + shape = [512] + dtype = "float32" + min_val = float("0.000933487") + max_val = float("0.00746954") + mean = float("0.00344664") + std = float("0.000801527") + data = None + + +class Program_weight_tensor_parameter_450: + name = "parameter_450" + shape = [512] + dtype = "float32" + min_val = float("-0.110012") + max_val = float("0.0714297") + mean = float("-0.0365401") + std = float("0.0293225") + data = None + + +class Program_weight_tensor_parameter_451: + name = "parameter_451" + shape = [512, 384, 1, 1] + dtype = "float32" + min_val = float("-0.330875") + max_val = float("0.172913") + mean = float("-0.000459854") + std = float("0.00662096") + data = None + + +class Program_weight_tensor_parameter_452: + name = "parameter_452" + shape = [384] + dtype = "float32" + min_val = float("-0.0108223") + max_val = float("0.000907801") + mean = float("-0.0031106") + std = float("0.00227253") + data = None + + +class Program_weight_tensor_parameter_453: + name = "parameter_453" + shape = [384, 384, 1, 1] + dtype = "float32" + min_val = float("-0.223828") + max_val = float("0.20505") + mean = float("-0.00218766") + std = float("0.00489559") + data = None + + +class Program_weight_tensor_parameter_454: + name = "parameter_454" + shape = [192] + dtype = "float32" + min_val = float("-1.97815") + max_val = float("0.402075") + mean = float("-0.350999") + std = float("0.333852") + data = None + + +class Program_weight_tensor_parameter_455: + name = "parameter_455" + shape = [192] + dtype = "float32" + min_val = float("0.0527639") + max_val = float("2.1601") + mean = float("0.580979") + std = float("0.418931") + data = None + + +class Program_weight_tensor_parameter_456: + name = "parameter_456" + shape = [192] + dtype = "float32" + min_val = float("5.97182e-05") + max_val = float("0.00113282") + mean = float("0.000337135") + std = float("0.00017036") + data = None + + +class Program_weight_tensor_parameter_457: + name = "parameter_457" + shape = [192] + dtype = "float32" + min_val = float("-0.0272331") + max_val = float("0.0426344") + mean = float("0.00406202") + std = float("0.0112711") + data = None + + +class Program_weight_tensor_parameter_458: + name = "parameter_458" + shape = [192, 192, 1, 1] + dtype = "float32" + min_val = float("-0.0211821") + max_val = float("0.0576226") + mean = float("-0.000295795") + std = float("0.00359937") + data = None + + +class Program_weight_tensor_parameter_459: + name = "parameter_459" + shape = [192] + dtype = "float32" + min_val = float("-1.97815") + max_val = float("0.402075") + mean = float("-0.350999") + std = float("0.333852") + data = None + + +class Program_weight_tensor_parameter_460: + name = "parameter_460" + shape = [192] + dtype = "float32" + min_val = float("0.372396") + max_val = float("2.6943") + mean = float("1.20241") + std = float("0.492555") + data = None + + +class Program_weight_tensor_parameter_461: + name = "parameter_461" + shape = [192] + dtype = "float32" + min_val = float("0.000778601") + max_val = float("0.00986122") + mean = float("0.0028243") + std = float("0.0011229") + data = None + + +class Program_weight_tensor_parameter_462: + name = "parameter_462" + shape = [192] + dtype = "float32" + min_val = float("-0.0718662") + max_val = float("0.098836") + mean = float("0.0132633") + std = float("0.0284276") + data = None + + +class Program_weight_tensor_parameter_463: + name = "parameter_463" + shape = [192, 192, 3, 3] + dtype = "float32" + min_val = float("-0.0267941") + max_val = float("0.0380043") + mean = float("-0.000114945") + std = float("0.00270079") + data = None + + +class Program_weight_tensor_parameter_464: + name = "parameter_464" + shape = [192] + dtype = "float32" + min_val = float("-2.89627") + max_val = float("-0.181006") + mean = float("-1.31578") + std = float("0.40203") + data = None + + +class Program_weight_tensor_parameter_465: + name = "parameter_465" + shape = [192] + dtype = "float32" + min_val = float("0.695389") + max_val = float("2.10154") + mean = float("1.18272") + std = float("0.170823") + data = None + + +class Program_weight_tensor_parameter_466: + name = "parameter_466" + shape = [192] + dtype = "float32" + min_val = float("0.0380002") + max_val = float("0.186937") + mean = float("0.0783337") + std = float("0.0246352") + data = None + + +class Program_weight_tensor_parameter_467: + name = "parameter_467" + shape = [192] + dtype = "float32" + min_val = float("-1.80338") + max_val = float("1.39476") + mean = float("-0.158506") + std = float("0.279166") + data = None + + +class Program_weight_tensor_parameter_468: + name = "parameter_468" + shape = [192, 192, 3, 3] + dtype = "float32" + min_val = float("-0.0376485") + max_val = float("0.0442722") + mean = float("-0.000156605") + std = float("0.00324367") + data = None + + +class Program_weight_tensor_parameter_469: + name = "parameter_469" + shape = [192] + dtype = "float32" + min_val = float("-1.94409") + max_val = float("0.506739") + mean = float("-0.280964") + std = float("0.32113") + data = None + + +class Program_weight_tensor_parameter_470: + name = "parameter_470" + shape = [192] + dtype = "float32" + min_val = float("0.0470323") + max_val = float("1.77439") + mean = float("0.444469") + std = float("0.306448") + data = None + + +class Program_weight_tensor_parameter_471: + name = "parameter_471" + shape = [192] + dtype = "float32" + min_val = float("5.53183e-05") + max_val = float("0.00145499") + mean = float("0.000343414") + std = float("0.000218973") + data = None + + +class Program_weight_tensor_parameter_472: + name = "parameter_472" + shape = [192] + dtype = "float32" + min_val = float("-0.0324414") + max_val = float("0.0424277") + mean = float("0.0078867") + std = float("0.0106231") + data = None + + +class Program_weight_tensor_parameter_473: + name = "parameter_473" + shape = [192, 192, 1, 1] + dtype = "float32" + min_val = float("-0.0232793") + max_val = float("0.0340463") + mean = float("-0.000372018") + std = float("0.00343686") + data = None + + +class Program_weight_tensor_parameter_474: + name = "parameter_474" + shape = [192] + dtype = "float32" + min_val = float("-1.94409") + max_val = float("0.506739") + mean = float("-0.280964") + std = float("0.32113") + data = None + + +class Program_weight_tensor_parameter_475: + name = "parameter_475" + shape = [192] + dtype = "float32" + min_val = float("0.486392") + max_val = float("2.27602") + mean = float("1.14015") + std = float("0.376136") + data = None + + +class Program_weight_tensor_parameter_476: + name = "parameter_476" + shape = [192] + dtype = "float32" + min_val = float("0.001572") + max_val = float("0.00624474") + mean = float("0.00321651") + std = float("0.000843905") + data = None + + +class Program_weight_tensor_parameter_477: + name = "parameter_477" + shape = [192] + dtype = "float32" + min_val = float("-0.0611836") + max_val = float("0.0803488") + mean = float("0.0262682") + std = float("0.0244502") + data = None + + +class Program_weight_tensor_parameter_478: + name = "parameter_478" + shape = [192, 192, 3, 3] + dtype = "float32" + min_val = float("-0.021333") + max_val = float("0.031939") + mean = float("-0.000148173") + std = float("0.00289116") + data = None + + +class Program_weight_tensor_parameter_479: + name = "parameter_479" + shape = [192] + dtype = "float32" + min_val = float("-2.51265") + max_val = float("-0.125796") + mean = float("-1.28955") + std = float("0.445077") + data = None + + +class Program_weight_tensor_parameter_480: + name = "parameter_480" + shape = [192] + dtype = "float32" + min_val = float("0.655675") + max_val = float("1.67878") + mean = float("1.2032") + std = float("0.16689") + data = None + + +class Program_weight_tensor_parameter_481: + name = "parameter_481" + shape = [192] + dtype = "float32" + min_val = float("0.0291112") + max_val = float("0.117157") + mean = float("0.0521028") + std = float("0.0142132") + data = None + + +class Program_weight_tensor_parameter_482: + name = "parameter_482" + shape = [192] + dtype = "float32" + min_val = float("-1.75423") + max_val = float("0.269628") + mean = float("-0.0616626") + std = float("0.184866") + data = None + + +class Program_weight_tensor_parameter_483: + name = "parameter_483" + shape = [192, 192, 3, 3] + dtype = "float32" + min_val = float("-0.0358087") + max_val = float("0.0447245") + mean = float("-0.000190853") + std = float("0.00335683") + data = None + + +class Program_weight_tensor_parameter_484: + name = "parameter_484" + shape = [192] + dtype = "float32" + min_val = float("-1.76065") + max_val = float("0.463281") + mean = float("-0.263631") + std = float("0.335459") + data = None + + +class Program_weight_tensor_parameter_485: + name = "parameter_485" + shape = [192] + dtype = "float32" + min_val = float("0.00363281") + max_val = float("1.68356") + mean = float("0.352056") + std = float("0.252441") + data = None + + +class Program_weight_tensor_parameter_486: + name = "parameter_486" + shape = [192] + dtype = "float32" + min_val = float("9.9945e-07") + max_val = float("0.00178321") + mean = float("0.000315486") + std = float("0.000233695") + data = None + + +class Program_weight_tensor_parameter_487: + name = "parameter_487" + shape = [192] + dtype = "float32" + min_val = float("-0.0321653") + max_val = float("0.0434796") + mean = float("0.0100317") + std = float("0.010429") + data = None + + +class Program_weight_tensor_parameter_488: + name = "parameter_488" + shape = [192, 192, 1, 1] + dtype = "float32" + min_val = float("-0.0333151") + max_val = float("0.028244") + mean = float("-0.000424522") + std = float("0.00328881") + data = None + + +class Program_weight_tensor_parameter_489: + name = "parameter_489" + shape = [192] + dtype = "float32" + min_val = float("-1.76065") + max_val = float("0.463281") + mean = float("-0.263631") + std = float("0.335459") + data = None + + +class Program_weight_tensor_parameter_490: + name = "parameter_490" + shape = [192] + dtype = "float32" + min_val = float("0.407314") + max_val = float("1.98401") + mean = float("1.06799") + std = float("0.335081") + data = None + + +class Program_weight_tensor_parameter_491: + name = "parameter_491" + shape = [192] + dtype = "float32" + min_val = float("0.00143898") + max_val = float("0.00771346") + mean = float("0.00340617") + std = float("0.000937101") + data = None + + +class Program_weight_tensor_parameter_492: + name = "parameter_492" + shape = [192] + dtype = "float32" + min_val = float("-0.0318019") + max_val = float("0.0790559") + mean = float("0.0270601") + std = float("0.019367") + data = None + + +class Program_weight_tensor_parameter_493: + name = "parameter_493" + shape = [192, 192, 3, 3] + dtype = "float32" + min_val = float("-0.0285572") + max_val = float("0.0385687") + mean = float("-0.000143999") + std = float("0.00298657") + data = None + + +class Program_weight_tensor_parameter_494: + name = "parameter_494" + shape = [192] + dtype = "float32" + min_val = float("-2.50189") + max_val = float("0.137771") + mean = float("-1.24368") + std = float("0.425346") + data = None + + +class Program_weight_tensor_parameter_495: + name = "parameter_495" + shape = [192] + dtype = "float32" + min_val = float("0.657082") + max_val = float("1.81751") + mean = float("1.17096") + std = float("0.166144") + data = None + + +class Program_weight_tensor_parameter_496: + name = "parameter_496" + shape = [192] + dtype = "float32" + min_val = float("0.0188017") + max_val = float("0.0700368") + mean = float("0.0347428") + std = float("0.00799167") + data = None + + +class Program_weight_tensor_parameter_497: + name = "parameter_497" + shape = [192] + dtype = "float32" + min_val = float("-1.21798") + max_val = float("0.267509") + mean = float("-0.0357914") + std = float("0.12809") + data = None + + +class Program_weight_tensor_parameter_498: + name = "parameter_498" + shape = [192, 192, 3, 3] + dtype = "float32" + min_val = float("-0.03732") + max_val = float("0.0499939") + mean = float("-0.000197441") + std = float("0.00341168") + data = None + + +class Program_weight_tensor_parameter_499: + name = "parameter_499" + shape = [192] + dtype = "float32" + min_val = float("-2.08361") + max_val = float("0.526166") + mean = float("-0.273402") + std = float("0.375102") + data = None + + +class Program_weight_tensor_parameter_500: + name = "parameter_500" + shape = [192] + dtype = "float32" + min_val = float("0.000539323") + max_val = float("0.733627") + mean = float("0.211797") + std = float("0.136369") + data = None + + +class Program_weight_tensor_parameter_501: + name = "parameter_501" + shape = [192] + dtype = "float32" + min_val = float("5.73491e-08") + max_val = float("0.000709717") + mean = float("0.000185769") + std = float("0.000106476") + data = None + + +class Program_weight_tensor_parameter_502: + name = "parameter_502" + shape = [192] + dtype = "float32" + min_val = float("-0.024762") + max_val = float("0.0297538") + mean = float("0.00669963") + std = float("0.00867654") + data = None + + +class Program_weight_tensor_parameter_503: + name = "parameter_503" + shape = [192, 192, 1, 1] + dtype = "float32" + min_val = float("-0.0223003") + max_val = float("0.0285012") + mean = float("-0.00027288") + std = float("0.00290769") + data = None + + +class Program_weight_tensor_parameter_504: + name = "parameter_504" + shape = [192] + dtype = "float32" + min_val = float("-2.08361") + max_val = float("0.526166") + mean = float("-0.273402") + std = float("0.375102") + data = None + + +class Program_weight_tensor_parameter_505: + name = "parameter_505" + shape = [192] + dtype = "float32" + min_val = float("0.395992") + max_val = float("1.96799") + mean = float("0.961194") + std = float("0.304605") + data = None + + +class Program_weight_tensor_parameter_506: + name = "parameter_506" + shape = [192] + dtype = "float32" + min_val = float("0.00143872") + max_val = float("0.00758554") + mean = float("0.00340905") + std = float("0.000995444") + data = None + + +class Program_weight_tensor_parameter_507: + name = "parameter_507" + shape = [192] + dtype = "float32" + min_val = float("-0.0308625") + max_val = float("0.0974639") + mean = float("0.0355014") + std = float("0.0243965") + data = None + + +class Program_weight_tensor_parameter_508: + name = "parameter_508" + shape = [192, 192, 3, 3] + dtype = "float32" + min_val = float("-0.0300099") + max_val = float("0.0331466") + mean = float("-0.000174018") + std = float("0.00307819") + data = None + + +class Program_weight_tensor_parameter_509: + name = "parameter_509" + shape = [192] + dtype = "float32" + min_val = float("-2.74569") + max_val = float("-0.0811876") + mean = float("-1.23738") + std = float("0.435119") + data = None + + +class Program_weight_tensor_parameter_510: + name = "parameter_510" + shape = [192] + dtype = "float32" + min_val = float("0.763204") + max_val = float("1.6262") + mean = float("1.15469") + std = float("0.143358") + data = None + + +class Program_weight_tensor_parameter_511: + name = "parameter_511" + shape = [192] + dtype = "float32" + min_val = float("0.0159479") + max_val = float("0.0420721") + mean = float("0.0253839") + std = float("0.00548298") + data = None + + +class Program_weight_tensor_parameter_512: + name = "parameter_512" + shape = [192] + dtype = "float32" + min_val = float("-0.985135") + max_val = float("0.209935") + mean = float("-0.0465764") + std = float("0.111494") + data = None + + +class Program_weight_tensor_parameter_513: + name = "parameter_513" + shape = [192, 192, 3, 3] + dtype = "float32" + min_val = float("-0.0517747") + max_val = float("0.0533624") + mean = float("-0.000217609") + std = float("0.00340238") + data = None + + +class Program_weight_tensor_parameter_514: + name = "parameter_514" + shape = [192] + dtype = "float32" + min_val = float("-1.21509") + max_val = float("0.443206") + mean = float("-0.23316") + std = float("0.338913") + data = None + + +class Program_weight_tensor_parameter_515: + name = "parameter_515" + shape = [192] + dtype = "float32" + min_val = float("-0.000141425") + max_val = float("0.676871") + mean = float("0.191956") + std = float("0.120826") + data = None + + +class Program_weight_tensor_parameter_516: + name = "parameter_516" + shape = [192] + dtype = "float32" + min_val = float("1.88795e-10") + max_val = float("0.00060921") + mean = float("0.00019844") + std = float("0.000122163") + data = None + + +class Program_weight_tensor_parameter_517: + name = "parameter_517" + shape = [192] + dtype = "float32" + min_val = float("-0.0415572") + max_val = float("0.0356041") + mean = float("0.00645752") + std = float("0.0102765") + data = None + + +class Program_weight_tensor_parameter_518: + name = "parameter_518" + shape = [192, 192, 1, 1] + dtype = "float32" + min_val = float("-0.0362203") + max_val = float("0.0384156") + mean = float("-0.000250814") + std = float("0.00300878") + data = None + + +class Program_weight_tensor_parameter_519: + name = "parameter_519" + shape = [192] + dtype = "float32" + min_val = float("-1.21509") + max_val = float("0.443206") + mean = float("-0.23316") + std = float("0.338913") + data = None + + +class Program_weight_tensor_parameter_520: + name = "parameter_520" + shape = [192] + dtype = "float32" + min_val = float("0.38485") + max_val = float("1.57063") + mean = float("0.854584") + std = float("0.261022") + data = None + + +class Program_weight_tensor_parameter_521: + name = "parameter_521" + shape = [192] + dtype = "float32" + min_val = float("0.00126636") + max_val = float("0.00625458") + mean = float("0.00353727") + std = float("0.000960266") + data = None + + +class Program_weight_tensor_parameter_522: + name = "parameter_522" + shape = [192] + dtype = "float32" + min_val = float("-0.0709334") + max_val = float("0.0992781") + mean = float("0.0299011") + std = float("0.022397") + data = None + + +class Program_weight_tensor_parameter_523: + name = "parameter_523" + shape = [192, 192, 3, 3] + dtype = "float32" + min_val = float("-0.0303355") + max_val = float("0.033651") + mean = float("-0.000135556") + std = float("0.00307625") + data = None + + +class Program_weight_tensor_parameter_524: + name = "parameter_524" + shape = [192] + dtype = "float32" + min_val = float("-2.49114") + max_val = float("-0.133527") + mean = float("-1.25068") + std = float("0.419307") + data = None + + +class Program_weight_tensor_parameter_525: + name = "parameter_525" + shape = [192] + dtype = "float32" + min_val = float("0.68941") + max_val = float("1.52402") + mean = float("1.1287") + std = float("0.135387") + data = None + + +class Program_weight_tensor_parameter_526: + name = "parameter_526" + shape = [192] + dtype = "float32" + min_val = float("0.0102917") + max_val = float("0.0340277") + mean = float("0.0181781") + std = float("0.00444353") + data = None + + +class Program_weight_tensor_parameter_527: + name = "parameter_527" + shape = [192] + dtype = "float32" + min_val = float("-0.598499") + max_val = float("0.205069") + mean = float("-0.0404677") + std = float("0.0908447") + data = None + + +class Program_weight_tensor_parameter_528: + name = "parameter_528" + shape = [192, 192, 3, 3] + dtype = "float32" + min_val = float("-0.0539394") + max_val = float("0.0542177") + mean = float("-0.000198971") + std = float("0.00340909") + data = None + + +class Program_weight_tensor_parameter_529: + name = "parameter_529" + shape = [192] + dtype = "float32" + min_val = float("-1.22047") + max_val = float("0.496178") + mean = float("-0.168848") + std = float("0.293103") + data = None + + +class Program_weight_tensor_parameter_530: + name = "parameter_530" + shape = [192] + dtype = "float32" + min_val = float("0.00876153") + max_val = float("1.5288") + mean = float("0.237756") + std = float("0.211674") + data = None + + +class Program_weight_tensor_parameter_531: + name = "parameter_531" + shape = [192] + dtype = "float32" + min_val = float("1.74591e-05") + max_val = float("0.00624593") + mean = float("0.00045268") + std = float("0.000588624") + data = None + + +class Program_weight_tensor_parameter_532: + name = "parameter_532" + shape = [192] + dtype = "float32" + min_val = float("-0.065087") + max_val = float("0.0770018") + mean = float("0.00835075") + std = float("0.0148227") + data = None + + +class Program_weight_tensor_parameter_533: + name = "parameter_533" + shape = [192, 192, 1, 1] + dtype = "float32" + min_val = float("-0.0660848") + max_val = float("0.0277484") + mean = float("-0.000371825") + std = float("0.00364693") + data = None + + +class Program_weight_tensor_parameter_534: + name = "parameter_534" + shape = [192] + dtype = "float32" + min_val = float("-1.22047") + max_val = float("0.496178") + mean = float("-0.168848") + std = float("0.293103") + data = None + + +class Program_weight_tensor_parameter_535: + name = "parameter_535" + shape = [192] + dtype = "float32" + min_val = float("0.355908") + max_val = float("1.45471") + mean = float("0.758883") + std = float("0.217186") + data = None + + +class Program_weight_tensor_parameter_536: + name = "parameter_536" + shape = [192] + dtype = "float32" + min_val = float("0.00268518") + max_val = float("0.0127648") + mean = float("0.0059621") + std = float("0.00184013") + data = None + + +class Program_weight_tensor_parameter_537: + name = "parameter_537" + shape = [192] + dtype = "float32" + min_val = float("-0.0754021") + max_val = float("0.0955923") + mean = float("0.0355705") + std = float("0.0315093") + data = None + + +class Program_weight_tensor_parameter_538: + name = "parameter_538" + shape = [192, 192, 3, 3] + dtype = "float32" + min_val = float("-0.0700958") + max_val = float("0.056488") + mean = float("-0.000170134") + std = float("0.00301629") + data = None + + +class Program_weight_tensor_parameter_539: + name = "parameter_539" + shape = [192] + dtype = "float32" + min_val = float("-1.88075") + max_val = float("-0.211975") + mean = float("-1.14723") + std = float("0.326349") + data = None + + +class Program_weight_tensor_parameter_540: + name = "parameter_540" + shape = [192] + dtype = "float32" + min_val = float("0.79192") + max_val = float("1.60588") + mean = float("1.12491") + std = float("0.130114") + data = None + + +class Program_weight_tensor_parameter_541: + name = "parameter_541" + shape = [192] + dtype = "float32" + min_val = float("0.00790317") + max_val = float("0.0430673") + mean = float("0.0158827") + std = float("0.00476539") + data = None + + +class Program_weight_tensor_parameter_542: + name = "parameter_542" + shape = [192] + dtype = "float32" + min_val = float("-0.455479") + max_val = float("0.211552") + mean = float("-0.0369992") + std = float("0.0813136") + data = None + + +class Program_weight_tensor_parameter_543: + name = "parameter_543" + shape = [192, 192, 3, 3] + dtype = "float32" + min_val = float("-0.0610284") + max_val = float("0.0703754") + mean = float("-0.000151637") + std = float("0.00331491") + data = None + + +class Program_weight_tensor_parameter_544: + name = "parameter_544" + shape = [192] + dtype = "float32" + min_val = float("-2.86758") + max_val = float("1.58079") + mean = float("-0.0285524") + std = float("0.747555") + data = None + + +class Program_weight_tensor_parameter_545: + name = "parameter_545" + shape = [192] + dtype = "float32" + min_val = float("0.4869") + max_val = float("2.086") + mean = float("0.902744") + std = float("0.233413") + data = None + + +class Program_weight_tensor_parameter_546: + name = "parameter_546" + shape = [192] + dtype = "float32" + min_val = float("0.00633974") + max_val = float("0.0416042") + mean = float("0.0149685") + std = float("0.00593254") + data = None + + +class Program_weight_tensor_parameter_547: + name = "parameter_547" + shape = [192] + dtype = "float32" + min_val = float("-0.196625") + max_val = float("0.272012") + mean = float("-0.0347194") + std = float("0.049849") + data = None + + +class Program_weight_tensor_parameter_548: + name = "parameter_548" + shape = [192, 384, 1, 1] + dtype = "float32" + min_val = float("-0.090327") + max_val = float("0.0781438") + mean = float("-0.00049126") + std = float("0.00703695") + data = None + + +class Program_weight_tensor_parameter_549: + name = "parameter_549" + shape = [192] + dtype = "float32" + min_val = float("-2.97514") + max_val = float("1.66537") + mean = float("0.0963337") + std = float("0.664688") + data = None + + +class Program_weight_tensor_parameter_550: + name = "parameter_550" + shape = [192] + dtype = "float32" + min_val = float("0.833701") + max_val = float("5.56786") + mean = float("1.91679") + std = float("0.933226") + data = None + + +class Program_weight_tensor_parameter_551: + name = "parameter_551" + shape = [192] + dtype = "float32" + min_val = float("0.00262273") + max_val = float("0.0469049") + mean = float("0.00988616") + std = float("0.00423222") + data = None + + +class Program_weight_tensor_parameter_552: + name = "parameter_552" + shape = [192] + dtype = "float32" + min_val = float("-0.122926") + max_val = float("0.108141") + mean = float("-0.0158569") + std = float("0.0435084") + data = None + + +class Program_weight_tensor_parameter_553: + name = "parameter_553" + shape = [192, 384, 1, 1] + dtype = "float32" + min_val = float("-0.0689058") + max_val = float("0.112599") + mean = float("-0.000328781") + std = float("0.0065748") + data = None + + +class Program_weight_tensor_parameter_554: + name = "parameter_554" + shape = [384] + dtype = "float32" + min_val = float("-2.92973") + max_val = float("1.33102") + mean = float("-0.301138") + std = float("0.56416") + data = None + + +class Program_weight_tensor_parameter_555: + name = "parameter_555" + shape = [384] + dtype = "float32" + min_val = float("0.639938") + max_val = float("2.47799") + mean = float("1.16309") + std = float("0.258012") + data = None + + +class Program_weight_tensor_parameter_556: + name = "parameter_556" + shape = [384] + dtype = "float32" + min_val = float("0.00577942") + max_val = float("0.0604073") + mean = float("0.0143328") + std = float("0.00717715") + data = None + + +class Program_weight_tensor_parameter_557: + name = "parameter_557" + shape = [384] + dtype = "float32" + min_val = float("-0.172991") + max_val = float("0.184791") + mean = float("0.019209") + std = float("0.0533994") + data = None + + +class Program_weight_tensor_parameter_558: + name = "parameter_558" + shape = [384, 256, 3, 3] + dtype = "float32" + min_val = float("-0.0651152") + max_val = float("0.063191") + mean = float("-7.21353e-05") + std = float("0.00350596") + data = None + + +class Program_weight_tensor_parameter_559: + name = "parameter_559" + shape = [256] + dtype = "float32" + min_val = float("-2.04896") + max_val = float("1.29277") + mean = float("-0.925662") + std = float("0.541886") + data = None + + +class Program_weight_tensor_parameter_560: + name = "parameter_560" + shape = [256] + dtype = "float32" + min_val = float("0.52945") + max_val = float("1.69731") + mean = float("1.05619") + std = float("0.17661") + data = None + + +class Program_weight_tensor_parameter_561: + name = "parameter_561" + shape = [256] + dtype = "float32" + min_val = float("0.000674469") + max_val = float("0.0100212") + mean = float("0.00272823") + std = float("0.00122462") + data = None + + +class Program_weight_tensor_parameter_562: + name = "parameter_562" + shape = [256] + dtype = "float32" + min_val = float("-0.164943") + max_val = float("0.107017") + mean = float("-0.0346008") + std = float("0.0479287") + data = None + + +class Program_weight_tensor_parameter_563: + name = "parameter_563" + shape = [256, 192, 1, 1] + dtype = "float32" + min_val = float("-0.166132") + max_val = float("0.120738") + mean = float("-0.000624717") + std = float("0.0111393") + data = None + + +class Program_weight_tensor_parameter_564: + name = "parameter_564" + shape = [192] + dtype = "float32" + min_val = float("-0.0132669") + max_val = float("0.00100077") + mean = float("-0.00489103") + std = float("0.0031796") + data = None + + +class Program_weight_tensor_parameter_565: + name = "parameter_565" + shape = [192, 192, 1, 1] + dtype = "float32" + min_val = float("-0.309648") + max_val = float("0.197925") + mean = float("-0.00390845") + std = float("0.00946233") + data = None + + +class Program_weight_tensor_parameter_566: + name = "parameter_566" + shape = [96] + dtype = "float32" + min_val = float("-1.92138") + max_val = float("0.528173") + mean = float("-0.211283") + std = float("0.434644") + data = None + + +class Program_weight_tensor_parameter_567: + name = "parameter_567" + shape = [96] + dtype = "float32" + min_val = float("0.140912") + max_val = float("3.21894") + mean = float("0.636059") + std = float("0.66684") + data = None + + +class Program_weight_tensor_parameter_568: + name = "parameter_568" + shape = [96] + dtype = "float32" + min_val = float("7.25601e-05") + max_val = float("0.00138037") + mean = float("0.00035803") + std = float("0.000250121") + data = None + + +class Program_weight_tensor_parameter_569: + name = "parameter_569" + shape = [96] + dtype = "float32" + min_val = float("-0.0383932") + max_val = float("0.0476881") + mean = float("0.00524749") + std = float("0.0168741") + data = None + + +class Program_weight_tensor_parameter_570: + name = "parameter_570" + shape = [96, 96, 1, 1] + dtype = "float32" + min_val = float("-0.042012") + max_val = float("0.0802529") + mean = float("-0.000564225") + std = float("0.0064658") + data = None + + +class Program_weight_tensor_parameter_571: + name = "parameter_571" + shape = [96] + dtype = "float32" + min_val = float("-1.92138") + max_val = float("0.528173") + mean = float("-0.211283") + std = float("0.434644") + data = None + + +class Program_weight_tensor_parameter_572: + name = "parameter_572" + shape = [96] + dtype = "float32" + min_val = float("0.34997") + max_val = float("5.4603") + mean = float("1.08725") + std = float("0.880273") + data = None + + +class Program_weight_tensor_parameter_573: + name = "parameter_573" + shape = [96] + dtype = "float32" + min_val = float("0.000393061") + max_val = float("0.00600073") + mean = float("0.00206613") + std = float("0.00106655") + data = None + + +class Program_weight_tensor_parameter_574: + name = "parameter_574" + shape = [96] + dtype = "float32" + min_val = float("-0.0914739") + max_val = float("0.110662") + mean = float("0.0155218") + std = float("0.0385942") + data = None + + +class Program_weight_tensor_parameter_575: + name = "parameter_575" + shape = [96, 96, 3, 3] + dtype = "float32" + min_val = float("-0.0397242") + max_val = float("0.0585133") + mean = float("-0.000194232") + std = float("0.00474942") + data = None + + +class Program_weight_tensor_parameter_576: + name = "parameter_576" + shape = [96] + dtype = "float32" + min_val = float("-2.46962") + max_val = float("-0.0207386") + mean = float("-1.22629") + std = float("0.444824") + data = None + + +class Program_weight_tensor_parameter_577: + name = "parameter_577" + shape = [96] + dtype = "float32" + min_val = float("0.533115") + max_val = float("1.64651") + mean = float("0.94968") + std = float("0.173477") + data = None + + +class Program_weight_tensor_parameter_578: + name = "parameter_578" + shape = [96] + dtype = "float32" + min_val = float("0.0258755") + max_val = float("0.1224") + mean = float("0.0531712") + std = float("0.0215289") + data = None + + +class Program_weight_tensor_parameter_579: + name = "parameter_579" + shape = [96] + dtype = "float32" + min_val = float("-2.50049") + max_val = float("1.19932") + mean = float("-0.168452") + std = float("0.381676") + data = None + + +class Program_weight_tensor_parameter_580: + name = "parameter_580" + shape = [96, 96, 3, 3] + dtype = "float32" + min_val = float("-0.173549") + max_val = float("0.0871168") + mean = float("-0.000260514") + std = float("0.00589301") + data = None + + +class Program_weight_tensor_parameter_581: + name = "parameter_581" + shape = [96] + dtype = "float32" + min_val = float("-1.39137") + max_val = float("0.557076") + mean = float("-0.134021") + std = float("0.346667") + data = None + + +class Program_weight_tensor_parameter_582: + name = "parameter_582" + shape = [96] + dtype = "float32" + min_val = float("0.0456926") + max_val = float("1.87") + mean = float("0.460917") + std = float("0.367799") + data = None + + +class Program_weight_tensor_parameter_583: + name = "parameter_583" + shape = [96] + dtype = "float32" + min_val = float("7.8985e-05") + max_val = float("0.00257956") + mean = float("0.000633262") + std = float("0.000529372") + data = None + + +class Program_weight_tensor_parameter_584: + name = "parameter_584" + shape = [96] + dtype = "float32" + min_val = float("-0.0294464") + max_val = float("0.0390531") + mean = float("0.00618468") + std = float("0.0144829") + data = None + + +class Program_weight_tensor_parameter_585: + name = "parameter_585" + shape = [96, 96, 1, 1] + dtype = "float32" + min_val = float("-0.0499002") + max_val = float("0.03712") + mean = float("-0.000516494") + std = float("0.00601108") + data = None + + +class Program_weight_tensor_parameter_586: + name = "parameter_586" + shape = [96] + dtype = "float32" + min_val = float("-1.39137") + max_val = float("0.557076") + mean = float("-0.134021") + std = float("0.346667") + data = None + + +class Program_weight_tensor_parameter_587: + name = "parameter_587" + shape = [96] + dtype = "float32" + min_val = float("0.369434") + max_val = float("2.33578") + mean = float("0.904761") + std = float("0.427621") + data = None + + +class Program_weight_tensor_parameter_588: + name = "parameter_588" + shape = [96] + dtype = "float32" + min_val = float("0.0013064") + max_val = float("0.0114625") + mean = float("0.00366985") + std = float("0.00182431") + data = None + + +class Program_weight_tensor_parameter_589: + name = "parameter_589" + shape = [96] + dtype = "float32" + min_val = float("-0.0674013") + max_val = float("0.106024") + mean = float("0.0235788") + std = float("0.0301432") + data = None + + +class Program_weight_tensor_parameter_590: + name = "parameter_590" + shape = [96, 96, 3, 3] + dtype = "float32" + min_val = float("-0.0544699") + max_val = float("0.0432893") + mean = float("-0.000227051") + std = float("0.00481142") + data = None + + +class Program_weight_tensor_parameter_591: + name = "parameter_591" + shape = [96] + dtype = "float32" + min_val = float("-3.32769") + max_val = float("0.362732") + mean = float("-1.17761") + std = float("0.557882") + data = None + + +class Program_weight_tensor_parameter_592: + name = "parameter_592" + shape = [96] + dtype = "float32" + min_val = float("0.470895") + max_val = float("1.98413") + mean = float("1.04365") + std = float("0.239424") + data = None + + +class Program_weight_tensor_parameter_593: + name = "parameter_593" + shape = [96] + dtype = "float32" + min_val = float("0.0172223") + max_val = float("0.0742288") + mean = float("0.0330012") + std = float("0.0104026") + data = None + + +class Program_weight_tensor_parameter_594: + name = "parameter_594" + shape = [96] + dtype = "float32" + min_val = float("-0.649399") + max_val = float("0.431953") + mean = float("-0.0718373") + std = float("0.18395") + data = None + + +class Program_weight_tensor_parameter_595: + name = "parameter_595" + shape = [96, 96, 3, 3] + dtype = "float32" + min_val = float("-0.136311") + max_val = float("0.143453") + mean = float("-0.000302539") + std = float("0.0058329") + data = None + + +class Program_weight_tensor_parameter_596: + name = "parameter_596" + shape = [96] + dtype = "float32" + min_val = float("-1.25274") + max_val = float("0.579736") + mean = float("-0.110157") + std = float("0.290873") + data = None + + +class Program_weight_tensor_parameter_597: + name = "parameter_597" + shape = [96] + dtype = "float32" + min_val = float("0.0246593") + max_val = float("1.28142") + mean = float("0.323963") + std = float("0.193926") + data = None + + +class Program_weight_tensor_parameter_598: + name = "parameter_598" + shape = [96] + dtype = "float32" + min_val = float("3.03979e-05") + max_val = float("0.0033039") + mean = float("0.000564813") + std = float("0.000505444") + data = None + + +class Program_weight_tensor_parameter_599: + name = "parameter_599" + shape = [96] + dtype = "float32" + min_val = float("-0.0377552") + max_val = float("0.049578") + mean = float("0.0038562") + std = float("0.0144143") + data = None + + +class Program_weight_tensor_parameter_600: + name = "parameter_600" + shape = [96, 96, 1, 1] + dtype = "float32" + min_val = float("-0.0412896") + max_val = float("0.0431426") + mean = float("-0.000341595") + std = float("0.00616412") + data = None + + +class Program_weight_tensor_parameter_601: + name = "parameter_601" + shape = [96] + dtype = "float32" + min_val = float("-1.25274") + max_val = float("0.579736") + mean = float("-0.110157") + std = float("0.290873") + data = None + + +class Program_weight_tensor_parameter_602: + name = "parameter_602" + shape = [96] + dtype = "float32" + min_val = float("0.317522") + max_val = float("1.6746") + mean = float("0.750723") + std = float("0.258265") + data = None + + +class Program_weight_tensor_parameter_603: + name = "parameter_603" + shape = [96] + dtype = "float32" + min_val = float("0.00138595") + max_val = float("0.0112807") + mean = float("0.00427388") + std = float("0.00172965") + data = None + + +class Program_weight_tensor_parameter_604: + name = "parameter_604" + shape = [96] + dtype = "float32" + min_val = float("-0.0693035") + max_val = float("0.106113") + mean = float("0.0156645") + std = float("0.029297") + data = None + + +class Program_weight_tensor_parameter_605: + name = "parameter_605" + shape = [96, 96, 3, 3] + dtype = "float32" + min_val = float("-0.0707344") + max_val = float("0.0533381") + mean = float("-0.000195671") + std = float("0.00486483") + data = None + + +class Program_weight_tensor_parameter_606: + name = "parameter_606" + shape = [96] + dtype = "float32" + min_val = float("-3.5906") + max_val = float("0.291524") + mean = float("-1.12744") + std = float("0.574031") + data = None + + +class Program_weight_tensor_parameter_607: + name = "parameter_607" + shape = [96] + dtype = "float32" + min_val = float("0.519079") + max_val = float("2.19595") + mean = float("1.05638") + std = float("0.238992") + data = None + + +class Program_weight_tensor_parameter_608: + name = "parameter_608" + shape = [96] + dtype = "float32" + min_val = float("0.0152112") + max_val = float("0.0436628") + mean = float("0.0246659") + std = float("0.00537498") + data = None + + +class Program_weight_tensor_parameter_609: + name = "parameter_609" + shape = [96] + dtype = "float32" + min_val = float("-0.688628") + max_val = float("0.554991") + mean = float("-0.0233944") + std = float("0.159928") + data = None + + +class Program_weight_tensor_parameter_610: + name = "parameter_610" + shape = [96, 96, 3, 3] + dtype = "float32" + min_val = float("-0.0863777") + max_val = float("0.123252") + mean = float("-0.000287898") + std = float("0.0059129") + data = None + + +class Program_weight_tensor_parameter_611: + name = "parameter_611" + shape = [96] + dtype = "float32" + min_val = float("-0.894065") + max_val = float("0.528462") + mean = float("-0.160914") + std = float("0.280775") + data = None + + +class Program_weight_tensor_parameter_612: + name = "parameter_612" + shape = [96] + dtype = "float32" + min_val = float("0.0198977") + max_val = float("1.40929") + mean = float("0.324417") + std = float("0.214346") + data = None + + +class Program_weight_tensor_parameter_613: + name = "parameter_613" + shape = [96] + dtype = "float32" + min_val = float("2.3179e-05") + max_val = float("0.00304799") + mean = float("0.000599645") + std = float("0.000483623") + data = None + + +class Program_weight_tensor_parameter_614: + name = "parameter_614" + shape = [96] + dtype = "float32" + min_val = float("-0.0235075") + max_val = float("0.0488833") + mean = float("0.00764636") + std = float("0.0134397") + data = None + + +class Program_weight_tensor_parameter_615: + name = "parameter_615" + shape = [96, 96, 1, 1] + dtype = "float32" + min_val = float("-0.0516664") + max_val = float("0.0403338") + mean = float("-0.000650817") + std = float("0.006249") + data = None + + +class Program_weight_tensor_parameter_616: + name = "parameter_616" + shape = [96] + dtype = "float32" + min_val = float("-0.894065") + max_val = float("0.528462") + mean = float("-0.160914") + std = float("0.280775") + data = None + + +class Program_weight_tensor_parameter_617: + name = "parameter_617" + shape = [96] + dtype = "float32" + min_val = float("0.177671") + max_val = float("1.78574") + mean = float("0.712956") + std = float("0.285068") + data = None + + +class Program_weight_tensor_parameter_618: + name = "parameter_618" + shape = [96] + dtype = "float32" + min_val = float("0.000855722") + max_val = float("0.011962") + mean = float("0.00441699") + std = float("0.00176161") + data = None + + +class Program_weight_tensor_parameter_619: + name = "parameter_619" + shape = [96] + dtype = "float32" + min_val = float("-0.046195") + max_val = float("0.101491") + mean = float("0.0248708") + std = float("0.0287099") + data = None + + +class Program_weight_tensor_parameter_620: + name = "parameter_620" + shape = [96, 96, 3, 3] + dtype = "float32" + min_val = float("-0.0617188") + max_val = float("0.0545246") + mean = float("-0.000236251") + std = float("0.00488926") + data = None + + +class Program_weight_tensor_parameter_621: + name = "parameter_621" + shape = [96] + dtype = "float32" + min_val = float("-2.66323") + max_val = float("0.0623296") + mean = float("-1.06373") + std = float("0.489342") + data = None + + +class Program_weight_tensor_parameter_622: + name = "parameter_622" + shape = [96] + dtype = "float32" + min_val = float("0.516127") + max_val = float("1.74272") + mean = float("1.01959") + std = float("0.194249") + data = None + + +class Program_weight_tensor_parameter_623: + name = "parameter_623" + shape = [96] + dtype = "float32" + min_val = float("0.00966188") + max_val = float("0.0337769") + mean = float("0.0185695") + std = float("0.00483648") + data = None + + +class Program_weight_tensor_parameter_624: + name = "parameter_624" + shape = [96] + dtype = "float32" + min_val = float("-0.574086") + max_val = float("0.395306") + mean = float("-0.0490517") + std = float("0.150869") + data = None + + +class Program_weight_tensor_parameter_625: + name = "parameter_625" + shape = [96, 96, 3, 3] + dtype = "float32" + min_val = float("-0.0819348") + max_val = float("0.110116") + mean = float("-0.000329675") + std = float("0.0058075") + data = None + + +class Program_weight_tensor_parameter_626: + name = "parameter_626" + shape = [96] + dtype = "float32" + min_val = float("-0.980112") + max_val = float("0.483047") + mean = float("-0.136611") + std = float("0.277228") + data = None + + +class Program_weight_tensor_parameter_627: + name = "parameter_627" + shape = [96] + dtype = "float32" + min_val = float("0.0468125") + max_val = float("1.14578") + mean = float("0.29425") + std = float("0.17328") + data = None + + +class Program_weight_tensor_parameter_628: + name = "parameter_628" + shape = [96] + dtype = "float32" + min_val = float("0.000143071") + max_val = float("0.00493424") + mean = float("0.000913103") + std = float("0.000688244") + data = None + + +class Program_weight_tensor_parameter_629: + name = "parameter_629" + shape = [96] + dtype = "float32" + min_val = float("-0.0363086") + max_val = float("0.0523069") + mean = float("0.00479237") + std = float("0.0159931") + data = None + + +class Program_weight_tensor_parameter_630: + name = "parameter_630" + shape = [96, 96, 1, 1] + dtype = "float32" + min_val = float("-0.0661086") + max_val = float("0.0594125") + mean = float("-0.00059754") + std = float("0.00705215") + data = None + + +class Program_weight_tensor_parameter_631: + name = "parameter_631" + shape = [96] + dtype = "float32" + min_val = float("-0.980112") + max_val = float("0.483046") + mean = float("-0.136611") + std = float("0.277228") + data = None + + +class Program_weight_tensor_parameter_632: + name = "parameter_632" + shape = [96] + dtype = "float32" + min_val = float("0.245629") + max_val = float("1.70476") + mean = float("0.608342") + std = float("0.228754") + data = None + + +class Program_weight_tensor_parameter_633: + name = "parameter_633" + shape = [96] + dtype = "float32" + min_val = float("0.00269671") + max_val = float("0.0179759") + mean = float("0.00695992") + std = float("0.00256464") + data = None + + +class Program_weight_tensor_parameter_634: + name = "parameter_634" + shape = [96] + dtype = "float32" + min_val = float("-0.0419227") + max_val = float("0.111328") + mean = float("0.0162502") + std = float("0.0308395") + data = None + + +class Program_weight_tensor_parameter_635: + name = "parameter_635" + shape = [96, 96, 3, 3] + dtype = "float32" + min_val = float("-0.070362") + max_val = float("0.0475107") + mean = float("-0.000231982") + std = float("0.0049307") + data = None + + +class Program_weight_tensor_parameter_636: + name = "parameter_636" + shape = [96] + dtype = "float32" + min_val = float("-3.47468") + max_val = float("0.198878") + mean = float("-1.00518") + std = float("0.549489") + data = None + + +class Program_weight_tensor_parameter_637: + name = "parameter_637" + shape = [96] + dtype = "float32" + min_val = float("0.68523") + max_val = float("2.51384") + mean = float("1.07866") + std = float("0.21278") + data = None + + +class Program_weight_tensor_parameter_638: + name = "parameter_638" + shape = [96] + dtype = "float32" + min_val = float("0.0073715") + max_val = float("0.0337206") + mean = float("0.01499") + std = float("0.00482926") + data = None + + +class Program_weight_tensor_parameter_639: + name = "parameter_639" + shape = [96] + dtype = "float32" + min_val = float("-0.403211") + max_val = float("0.240247") + mean = float("-0.0331757") + std = float("0.129637") + data = None + + +class Program_weight_tensor_parameter_640: + name = "parameter_640" + shape = [96, 96, 3, 3] + dtype = "float32" + min_val = float("-0.0707057") + max_val = float("0.0823979") + mean = float("-0.000250044") + std = float("0.00594881") + data = None + + +class Program_weight_tensor_parameter_641: + name = "parameter_641" + shape = [96] + dtype = "float32" + min_val = float("-0.626965") + max_val = float("0.448835") + mean = float("-0.0820382") + std = float("0.255843") + data = None + + +class Program_weight_tensor_parameter_642: + name = "parameter_642" + shape = [96] + dtype = "float32" + min_val = float("0.0906111") + max_val = float("1.29038") + mean = float("0.307307") + std = float("0.194816") + data = None + + +class Program_weight_tensor_parameter_643: + name = "parameter_643" + shape = [96] + dtype = "float32" + min_val = float("0.000315305") + max_val = float("0.0148723") + mean = float("0.00305807") + std = float("0.00245914") + data = None + + +class Program_weight_tensor_parameter_644: + name = "parameter_644" + shape = [96] + dtype = "float32" + min_val = float("-0.0371389") + max_val = float("0.0183386") + mean = float("0.000249639") + std = float("0.00910407") + data = None + + +class Program_weight_tensor_parameter_645: + name = "parameter_645" + shape = [96, 96, 1, 1] + dtype = "float32" + min_val = float("-0.0911381") + max_val = float("0.0639953") + mean = float("-0.000941367") + std = float("0.00812654") + data = None + + +class Program_weight_tensor_parameter_646: + name = "parameter_646" + shape = [96] + dtype = "float32" + min_val = float("-0.626965") + max_val = float("0.448835") + mean = float("-0.0820382") + std = float("0.255843") + data = None + + +class Program_weight_tensor_parameter_647: + name = "parameter_647" + shape = [96] + dtype = "float32" + min_val = float("0.209511") + max_val = float("1.43917") + mean = float("0.531943") + std = float("0.259075") + data = None + + +class Program_weight_tensor_parameter_648: + name = "parameter_648" + shape = [96] + dtype = "float32" + min_val = float("0.00462531") + max_val = float("0.0495976") + mean = float("0.0190222") + std = float("0.00948176") + data = None + + +class Program_weight_tensor_parameter_649: + name = "parameter_649" + shape = [96] + dtype = "float32" + min_val = float("-0.108167") + max_val = float("0.0628188") + mean = float("-0.00562213") + std = float("0.0290345") + data = None + + +class Program_weight_tensor_parameter_650: + name = "parameter_650" + shape = [96, 96, 3, 3] + dtype = "float32" + min_val = float("-0.0925723") + max_val = float("0.056426") + mean = float("-0.000299437") + std = float("0.00485596") + data = None + + +class Program_weight_tensor_parameter_651: + name = "parameter_651" + shape = [96] + dtype = "float32" + min_val = float("-2.41584") + max_val = float("0.51672") + mean = float("-0.829512") + std = float("0.467964") + data = None + + +class Program_weight_tensor_parameter_652: + name = "parameter_652" + shape = [96] + dtype = "float32" + min_val = float("0.858214") + max_val = float("2.18042") + mean = float("1.27928") + std = float("0.209066") + data = None + + +class Program_weight_tensor_parameter_653: + name = "parameter_653" + shape = [96] + dtype = "float32" + min_val = float("0.00573305") + max_val = float("0.0283505") + mean = float("0.0129242") + std = float("0.0047369") + data = None + + +class Program_weight_tensor_parameter_654: + name = "parameter_654" + shape = [96] + dtype = "float32" + min_val = float("-0.468758") + max_val = float("0.237107") + mean = float("-0.0457111") + std = float("0.116263") + data = None + + +class Program_weight_tensor_parameter_655: + name = "parameter_655" + shape = [96, 96, 3, 3] + dtype = "float32" + min_val = float("-0.140288") + max_val = float("0.144017") + mean = float("-0.000168599") + std = float("0.00619743") + data = None + + +class Program_weight_tensor_parameter_656: + name = "parameter_656" + shape = [96] + dtype = "float32" + min_val = float("-3.17591") + max_val = float("1.88794") + mean = float("0.50081") + std = float("0.862147") + data = None + + +class Program_weight_tensor_parameter_657: + name = "parameter_657" + shape = [96] + dtype = "float32" + min_val = float("0.218511") + max_val = float("2.64172") + mean = float("0.557308") + std = float("0.321222") + data = None + + +class Program_weight_tensor_parameter_658: + name = "parameter_658" + shape = [96] + dtype = "float32" + min_val = float("0.0059309") + max_val = float("0.0854273") + mean = float("0.0187079") + std = float("0.0133142") + data = None + + +class Program_weight_tensor_parameter_659: + name = "parameter_659" + shape = [96] + dtype = "float32" + min_val = float("-0.21823") + max_val = float("0.243022") + mean = float("-0.0238119") + std = float("0.0717645") + data = None + + +class Program_weight_tensor_parameter_660: + name = "parameter_660" + shape = [96, 192, 1, 1] + dtype = "float32" + min_val = float("-0.159229") + max_val = float("0.164707") + mean = float("-0.000489464") + std = float("0.0123153") + data = None + + +class Program_weight_tensor_parameter_661: + name = "parameter_661" + shape = [96] + dtype = "float32" + min_val = float("-4.93686") + max_val = float("1.57224") + mean = float("0.382249") + std = float("1.05007") + data = None + + +class Program_weight_tensor_parameter_662: + name = "parameter_662" + shape = [96] + dtype = "float32" + min_val = float("0.408469") + max_val = float("6.77488") + mean = float("1.6992") + std = float("1.3056") + data = None + + +class Program_weight_tensor_parameter_663: + name = "parameter_663" + shape = [96] + dtype = "float32" + min_val = float("0.0027471") + max_val = float("0.101892") + mean = float("0.0161575") + std = float("0.0144822") + data = None + + +class Program_weight_tensor_parameter_664: + name = "parameter_664" + shape = [96] + dtype = "float32" + min_val = float("-0.113828") + max_val = float("0.255763") + mean = float("0.0304151") + std = float("0.0726062") + data = None + + +class Program_weight_tensor_parameter_665: + name = "parameter_665" + shape = [96, 192, 1, 1] + dtype = "float32" + min_val = float("-0.0865442") + max_val = float("0.13143") + mean = float("0.000256689") + std = float("0.0111353") + data = None + + +class Program_weight_tensor_parameter_666: + name = "parameter_666" + shape = [192] + dtype = "float32" + min_val = float("-2.27841") + max_val = float("1.74989") + mean = float("-0.126437") + std = float("0.740487") + data = None + + +class Program_weight_tensor_parameter_667: + name = "parameter_667" + shape = [192] + dtype = "float32" + min_val = float("0.633239") + max_val = float("2.97753") + mean = float("1.09234") + std = float("0.284333") + data = None + + +class Program_weight_tensor_parameter_668: + name = "parameter_668" + shape = [192] + dtype = "float32" + min_val = float("0.00605626") + max_val = float("0.124183") + mean = float("0.0225335") + std = float("0.0173782") + data = None + + +class Program_weight_tensor_parameter_669: + name = "parameter_669" + shape = [192] + dtype = "float32" + min_val = float("-0.363149") + max_val = float("0.203569") + mean = float("-0.0509608") + std = float("0.0932024") + data = None + + +class Program_weight_tensor_parameter_670: + name = "parameter_670" + shape = [192, 128, 3, 3] + dtype = "float32" + min_val = float("-0.0732508") + max_val = float("0.0817381") + mean = float("-0.000189264") + std = float("0.00581775") + data = None + + +class Program_weight_tensor_parameter_671: + name = "parameter_671" + shape = [128] + dtype = "float32" + min_val = float("-2.81739") + max_val = float("1.95963") + mean = float("-0.71131") + std = float("0.648704") + data = None + + +class Program_weight_tensor_parameter_672: + name = "parameter_672" + shape = [128] + dtype = "float32" + min_val = float("0.305831") + max_val = float("2.87595") + mean = float("1.02519") + std = float("0.278769") + data = None + + +class Program_weight_tensor_parameter_673: + name = "parameter_673" + shape = [128] + dtype = "float32" + min_val = float("0.000296253") + max_val = float("0.00655884") + mean = float("0.00182291") + std = float("0.000942289") + data = None + + +class Program_weight_tensor_parameter_674: + name = "parameter_674" + shape = [128] + dtype = "float32" + min_val = float("-0.238123") + max_val = float("0.218266") + mean = float("0.0102315") + std = float("0.0613663") + data = None + + +class Program_weight_tensor_parameter_675: + name = "parameter_675" + shape = [128, 96, 1, 1] + dtype = "float32" + min_val = float("-0.147384") + max_val = float("0.149703") + mean = float("-0.00110619") + std = float("0.0174083") + data = None + + +class Program_weight_tensor_parameter_676: + name = "parameter_676" + shape = [96] + dtype = "float32" + min_val = float("-0.0166051") + max_val = float("-0.00197342") + mean = float("-0.00771776") + std = float("0.0036883") + data = None + + +class Program_weight_tensor_parameter_677: + name = "parameter_677" + shape = [96, 96, 1, 1] + dtype = "float32" + min_val = float("-0.244486") + max_val = float("0.115264") + mean = float("-0.0084264") + std = float("0.0163364") + data = None + + +class Program_weight_tensor_parameter_678: + name = "parameter_678" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_679: + name = "parameter_679" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_680: + name = "parameter_680" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_681: + name = "parameter_681" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_682: + name = "parameter_682" + shape = [48, 48, 1, 1] + dtype = "float32" + min_val = float("-0.0451306") + max_val = float("0.0516016") + mean = float("-0.00123331") + std = float("0.0108119") + data = None + + +class Program_weight_tensor_parameter_683: + name = "parameter_683" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_684: + name = "parameter_684" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_685: + name = "parameter_685" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_686: + name = "parameter_686" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_687: + name = "parameter_687" + shape = [48, 48, 3, 3] + dtype = "float32" + min_val = float("-0.0522823") + max_val = float("0.061342") + mean = float("-0.000213753") + std = float("0.00862639") + data = None + + +class Program_weight_tensor_parameter_688: + name = "parameter_688" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_689: + name = "parameter_689" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_690: + name = "parameter_690" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_691: + name = "parameter_691" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_692: + name = "parameter_692" + shape = [48, 48, 3, 3] + dtype = "float32" + min_val = float("-0.070824") + max_val = float("0.0787998") + mean = float("-0.000460596") + std = float("0.00955491") + data = None + + +class Program_weight_tensor_parameter_693: + name = "parameter_693" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_694: + name = "parameter_694" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_695: + name = "parameter_695" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_696: + name = "parameter_696" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_697: + name = "parameter_697" + shape = [48, 48, 1, 1] + dtype = "float32" + min_val = float("-0.0664729") + max_val = float("0.0674885") + mean = float("-0.000911561") + std = float("0.0112967") + data = None + + +class Program_weight_tensor_parameter_698: + name = "parameter_698" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_699: + name = "parameter_699" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_700: + name = "parameter_700" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_701: + name = "parameter_701" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_702: + name = "parameter_702" + shape = [48, 48, 3, 3] + dtype = "float32" + min_val = float("-0.0618099") + max_val = float("0.0462764") + mean = float("-0.000494121") + std = float("0.00848653") + data = None + + +class Program_weight_tensor_parameter_703: + name = "parameter_703" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_704: + name = "parameter_704" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_705: + name = "parameter_705" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_706: + name = "parameter_706" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_707: + name = "parameter_707" + shape = [48, 48, 3, 3] + dtype = "float32" + min_val = float("-0.0903042") + max_val = float("0.0707569") + mean = float("-0.000342113") + std = float("0.00974196") + data = None + + +class Program_weight_tensor_parameter_708: + name = "parameter_708" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_709: + name = "parameter_709" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_710: + name = "parameter_710" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_711: + name = "parameter_711" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_712: + name = "parameter_712" + shape = [48, 48, 1, 1] + dtype = "float32" + min_val = float("-0.0793648") + max_val = float("0.0587558") + mean = float("-0.00142765") + std = float("0.0137979") + data = None + + +class Program_weight_tensor_parameter_713: + name = "parameter_713" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_714: + name = "parameter_714" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_715: + name = "parameter_715" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_716: + name = "parameter_716" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_717: + name = "parameter_717" + shape = [48, 48, 3, 3] + dtype = "float32" + min_val = float("-0.057566") + max_val = float("0.0709009") + mean = float("-0.000249497") + std = float("0.00895742") + data = None + + +class Program_weight_tensor_parameter_718: + name = "parameter_718" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_719: + name = "parameter_719" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_720: + name = "parameter_720" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_721: + name = "parameter_721" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_722: + name = "parameter_722" + shape = [48, 48, 3, 3] + dtype = "float32" + min_val = float("-0.10442") + max_val = float("0.0691325") + mean = float("-0.000258872") + std = float("0.0104529") + data = None + + +class Program_weight_tensor_parameter_723: + name = "parameter_723" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_724: + name = "parameter_724" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_725: + name = "parameter_725" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_726: + name = "parameter_726" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_727: + name = "parameter_727" + shape = [48, 96, 1, 1] + dtype = "float32" + min_val = float("-0.136744") + max_val = float("0.10005") + mean = float("-0.00171348") + std = float("0.0182976") + data = None + + +class Program_weight_tensor_parameter_728: + name = "parameter_728" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_729: + name = "parameter_729" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_730: + name = "parameter_730" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_731: + name = "parameter_731" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_732: + name = "parameter_732" + shape = [48, 96, 1, 1] + dtype = "float32" + min_val = float("-0.101881") + max_val = float("0.143004") + mean = float("-0.000443039") + std = float("0.0175817") + data = None + + +class Program_weight_tensor_parameter_733: + name = "parameter_733" + shape = [96] + dtype = "float32" + min_val = float("-3.42322") + max_val = float("3.28108") + mean = float("0.327995") + std = float("1.1473") + data = None + + +class Program_weight_tensor_parameter_734: + name = "parameter_734" + shape = [96] + dtype = "float32" + min_val = float("0.874948") + max_val = float("4.92491") + mean = float("1.92226") + std = float("0.754909") + data = None + + +class Program_weight_tensor_parameter_735: + name = "parameter_735" + shape = [96] + dtype = "float32" + min_val = float("0.349668") + max_val = float("14.7849") + mean = float("1.53931") + std = float("1.73025") + data = None + + +class Program_weight_tensor_parameter_736: + name = "parameter_736" + shape = [96] + dtype = "float32" + min_val = float("-1.04846") + max_val = float("1.45518") + mean = float("-0.203391") + std = float("0.490156") + data = None + + +class Program_weight_tensor_parameter_737: + name = "parameter_737" + shape = [96, 64, 3, 3] + dtype = "float32" + min_val = float("-0.112555") + max_val = float("0.100752") + mean = float("-0.000270096") + std = float("0.0100051") + data = None + + +class Program_weight_tensor_parameter_738: + name = "parameter_738" + shape = [64] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_739: + name = "parameter_739" + shape = [64] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_740: + name = "parameter_740" + shape = [64] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_741: + name = "parameter_741" + shape = [64] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_742: + name = "parameter_742" + shape = [64, 32, 3, 3] + dtype = "float32" + min_val = float("-0.126906") + max_val = float("0.131715") + mean = float("-0.000428282") + std = float("0.0154308") + data = None + + +class Program_weight_tensor_parameter_743: + name = "parameter_743" + shape = [32] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_744: + name = "parameter_744" + shape = [32] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_745: + name = "parameter_745" + shape = [32] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_746: + name = "parameter_746" + shape = [32] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_747: + name = "parameter_747" + shape = [32, 32, 3, 3] + dtype = "float32" + min_val = float("-0.2537") + max_val = float("0.156204") + mean = float("-0.000164754") + std = float("0.0200964") + data = None + + +class Program_weight_tensor_parameter_748: + name = "parameter_748" + shape = [32] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_749: + name = "parameter_749" + shape = [32] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_750: + name = "parameter_750" + shape = [32] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_751: + name = "parameter_751" + shape = [32] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_752: + name = "parameter_752" + shape = [32, 3, 3, 3] + dtype = "float32" + min_val = float("-0.247045") + max_val = float("0.224081") + mean = float("-0.00143914") + std = float("0.0546739") + data = None diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus-L/subgraph_10/graph_hash.txt b/paddle_samples/PaddleX/PP-YOLOE_plus-L/subgraph_10/graph_hash.txt deleted file mode 100644 index 95c7fa710..000000000 --- a/paddle_samples/PaddleX/PP-YOLOE_plus-L/subgraph_10/graph_hash.txt +++ /dev/null @@ -1 +0,0 @@ -0ca9776f46c68e9c47d78262b0e7a676b5b201ebb813b00dd6cdd710477953d6 \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus-L/subgraph_10/input_meta.py b/paddle_samples/PaddleX/PP-YOLOE_plus-L/subgraph_10/input_meta.py deleted file mode 100644 index 7fd3d63e3..000000000 --- a/paddle_samples/PaddleX/PP-YOLOE_plus-L/subgraph_10/input_meta.py +++ /dev/null @@ -1,76 +0,0 @@ -class Program_weight_tensor_data_0: - name = "data_0" - shape = [8, 1, 4116] - dtype = "float32" - max_val = float("1.0") - mean = float("0.00127551") - std = float("0.0356915") - data = None - - -class Program_weight_tensor_data_1: - name = "data_1" - shape = [8, 1, 1] - dtype = "int32" - data = [0, 0, 0, 0, 0, 0, 0, 3] - - -class Program_weight_tensor_data_2: - name = "data_2" - shape = [8, 4116] - dtype = "float32" - max_val = float("1.0") - mean = float("0.00127551") - std = float("0.0356915") - data = None - - -class Program_weight_tensor_data_3: - name = "data_3" - shape = [8, 1, 4] - dtype = "float32" - data = [ - 35.3684, - 311.273, - 300.632, - 442.182, - 78.6168, - 241.764, - 110.41, - 266.46, - 195.413, - 193.28, - 236.373, - 224.0, - 130.415, - 227.85, - 158.792, - 248.688, - 360.901, - 338.022, - 374.268, - 347.726, - 304.951, - 76.6956, - 336.0, - 109.565, - 295.171, - 70.9146, - 350.609, - 111.437, - 18.5379, - 0.0, - 293.517, - 337.836, - ] - - -class Program_weight_tensor_data_4: - name = "data_4" - shape = [8, 4116, 4] - dtype = "float32" - min_val = float("-317.929") - max_val = float("740.992") - mean = float("224.215") - std = float("149.051") - data = None diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus-L/subgraph_10/model.py b/paddle_samples/PaddleX/PP-YOLOE_plus-L/subgraph_10/model.py deleted file mode 100644 index 0619d9191..000000000 --- a/paddle_samples/PaddleX/PP-YOLOE_plus-L/subgraph_10/model.py +++ /dev/null @@ -1,287 +0,0 @@ -import paddle - - -class GraphModule(paddle.nn.Layer): - def __init__(self): - super().__init__() - - def forward(self, data_0, data_1, data_2, data_3, data_4): - # pd_op.full: (1xi64) <- () - full_0 = paddle._C_ops.full( - [1], float("-2"), paddle.int64, paddle.core.CPUPlace() - ) - - # pd_op.argmax: (8x4116xi64) <- (8x1x4116xf32, 1xi64) - argmax_0 = paddle._C_ops.argmax(data_0, full_0, False, False, paddle.int64) - del full_0 - - # pd_op.full: (1xf64) <- () - full_1 = paddle._C_ops.full( - [1], float("0"), paddle.float64, paddle.core.CPUPlace() - ) - - # pd_op.full: (1xf64) <- () - full_2 = paddle._C_ops.full( - [1], float("8"), paddle.float64, paddle.core.CPUPlace() - ) - - # pd_op.full: (1xf64) <- () - full_3 = paddle._C_ops.full( - [1], float("1"), paddle.float64, paddle.core.CPUPlace() - ) - - # pd_op.arange: (8xi32) <- (1xf64, 1xf64, 1xf64) - arange_0 = paddle.arange(full_1, full_2, full_3, dtype="int32") - del full_1, full_2, full_3 - - # pd_op.full_int_array: (1xi64) <- () - full_int_array_0 = [-1] - - # pd_op.unsqueeze: (8x1xi32) <- (8xi32, 1xi64) - unsqueeze_0 = paddle._C_ops.unsqueeze(arange_0, full_int_array_0) - del arange_0 - - # pd_op.full: (1xf32) <- () - full_4 = paddle._C_ops.full( - [1], float("1"), paddle.float32, paddle.core.CPUPlace() - ) - - # pd_op.scale: (8x1xi32) <- (8x1xi32, 1xf32) - scale_0 = paddle._C_ops.scale(unsqueeze_0, full_4, float("0"), True) - del unsqueeze_0 - - # pd_op.cast: (8x1xi64) <- (8x1xi32) - cast_0 = paddle._C_ops.cast(scale_0, paddle.int64) - del scale_0 - - # pd_op.add: (8x4116xi64) <- (8x4116xi64, 8x1xi64) - add_0 = paddle._C_ops.add(argmax_0, cast_0) - del argmax_0, cast_0 - - # pd_op.flatten: (8xi32) <- (8x1x1xi32) - flatten_0 = paddle._C_ops.flatten(data_1, 0, 2) - del data_1 - - # pd_op.flatten: (32928xi64) <- (8x4116xi64) - flatten_1 = paddle._C_ops.flatten(add_0, 0, 1) - del add_0 - - # pd_op.full: (1xi32) <- () - full_5 = paddle._C_ops.full( - [1], float("0"), paddle.int32, paddle.core.CPUPlace() - ) - - # pd_op.gather: (32928xi32) <- (8xi32, 32928xi64, 1xi32) - gather_0 = paddle._C_ops.gather(flatten_0, flatten_1, full_5) - del flatten_0 - - # pd_op.full_int_array: (2xi64) <- () - full_int_array_1 = [8, 4116] - - # pd_op.reshape: (8x4116xi32) <- (32928xi32, 2xi64) - reshape_1 = paddle._C_ops.reshape(gather_0, full_int_array_1) - del full_int_array_1, gather_0 - - # pd_op.full: (xf32) <- () - full_6 = paddle._C_ops.full( - [], float("0"), paddle.float32, paddle.framework._current_expected_place() - ) - - # pd_op.greater_than: (8x4116xb) <- (8x4116xf32, xf32) - greater_than_0 = paddle._C_ops.greater_than(data_2, full_6) - del data_2, full_6 - - # pd_op.full: (1xf32) <- () - full_7 = paddle._C_ops.full( - [1], float("4"), paddle.float32, paddle.core.CPUPlace() - ) - - # pd_op.full_like: (8x4116xi32) <- (8x4116xi32, 1xf32) - full_like_0 = paddle._C_ops.full_like( - reshape_1, full_7, paddle.int32, paddle.framework._current_expected_place() - ) - del full_7 - - # pd_op.where: (8x4116xi32) <- (8x4116xb, 8x4116xi32, 8x4116xi32) - where_0 = paddle._C_ops.where(greater_than_0, reshape_1, full_like_0) - del full_like_0, greater_than_0, reshape_1 - - # pd_op.full_int_array: (2xi64) <- () - full_int_array_2 = [-1, 4] - - # pd_op.reshape: (8x4xf32) <- (8x1x4xf32, 2xi64) - reshape_2 = paddle._C_ops.reshape(data_3, full_int_array_2) - del full_int_array_2 - - # pd_op.gather: (32928x4xf32) <- (8x4xf32, 32928xi64, 1xi32) - gather_1 = paddle._C_ops.gather(reshape_2, flatten_1, full_5) - del flatten_1, full_5, reshape_2 - - # pd_op.full_int_array: (3xi64) <- () - full_int_array_3 = [8, 4116, 4] - - # pd_op.reshape: (8x4116x4xf32) <- (32928x4xf32, 3xi64) - reshape_0 = paddle._C_ops.reshape(gather_1, full_int_array_3) - del full_int_array_3, gather_1 - - # pd_op.full: (1xi32) <- () - full_8 = paddle._C_ops.full( - [1], float("5"), paddle.int32, paddle.core.CPUPlace() - ) - - # pd_op.one_hot: (8x4116x5xf32) <- (8x4116xi32, 1xi32) - one_hot_0 = paddle._C_ops.one_hot( - where_0 % paddle.cast(full_8, where_0.dtype), full_8 - ) - del full_8 - - # pd_op.full: (4xi64) <- () - full_9 = paddle._C_ops.full( - [4], float("0"), paddle.int64, paddle.framework._current_expected_place() - ) - - # pd_op.assign_value_: (4xi64) <- (4xi64) - assign_value__0 = paddle._C_ops.assign_value_( - full_9, - [4], - paddle.int64, - [float("0"), float("1"), float("2"), float("3")], - paddle.framework._current_expected_place(), - ) - del full_9 - - # pd_op.index_select: (8x4116x4xf32) <- (8x4116x5xf32, 4xi64) - index_select_0 = paddle._C_ops.index_select(one_hot_0, assign_value__0, -1) - del assign_value__0, one_hot_0 - - # pd_op.full_int_array: (1xi64) <- () - full_int_array_4 = [2] - - # pd_op.unsqueeze: (8x1x1x4xf32) <- (8x1x4xf32, 1xi64) - unsqueeze_1 = paddle._C_ops.unsqueeze(data_3, full_int_array_4) - del data_3 - - # pd_op.full_int_array: (1xi64) <- () - full_int_array_5 = [1] - - # pd_op.unsqueeze: (8x1x4116x4xf32) <- (8x4116x4xf32, 1xi64) - unsqueeze_2 = paddle._C_ops.unsqueeze(data_4, full_int_array_5) - del data_4, full_int_array_5 - - # pd_op.full_int_array: (1xi64) <- () - full_int_array_6 = [0] - - # pd_op.slice: (8x1x1x2xf32) <- (8x1x1x4xf32, 1xi64, 1xi64) - slice_0 = paddle._C_ops.slice( - unsqueeze_1, [3], full_int_array_6, full_int_array_4, [1], [] - ) - - # pd_op.full_int_array: (1xi64) <- () - full_int_array_7 = [2147483647] - - # pd_op.slice: (8x1x1x2xf32) <- (8x1x1x4xf32, 1xi64, 1xi64) - slice_1 = paddle._C_ops.slice( - unsqueeze_1, [3], full_int_array_4, full_int_array_7, [1], [] - ) - del unsqueeze_1 - - # pd_op.slice: (8x1x4116x2xf32) <- (8x1x4116x4xf32, 1xi64, 1xi64) - slice_2 = paddle._C_ops.slice( - unsqueeze_2, [3], full_int_array_6, full_int_array_4, [1], [] - ) - del full_int_array_6 - - # pd_op.slice: (8x1x4116x2xf32) <- (8x1x4116x4xf32, 1xi64, 1xi64) - slice_3 = paddle._C_ops.slice( - unsqueeze_2, [3], full_int_array_4, full_int_array_7, [1], [] - ) - del full_int_array_4, full_int_array_7, unsqueeze_2 - - # pd_op.maximum: (8x1x4116x2xf32) <- (8x1x1x2xf32, 8x1x4116x2xf32) - maximum_0 = paddle._C_ops.maximum(slice_0, slice_2) - - # pd_op.minimum: (8x1x4116x2xf32) <- (8x1x1x2xf32, 8x1x4116x2xf32) - minimum_0 = paddle._C_ops.minimum(slice_1, slice_3) - - # pd_op.subtract: (8x1x4116x2xf32) <- (8x1x4116x2xf32, 8x1x4116x2xf32) - subtract_0 = paddle._C_ops.subtract(minimum_0, maximum_0) - del maximum_0, minimum_0 - - # pd_op.full: (1xf32) <- () - full_10 = paddle._C_ops.full( - [1], float("0"), paddle.float32, paddle.core.CPUPlace() - ) - - # pd_op.full: (1xf32) <- () - full_11 = paddle._C_ops.full( - [1], float("3.40282e+38"), paddle.float32, paddle.core.CPUPlace() - ) - - # pd_op.clip: (8x1x4116x2xf32) <- (8x1x4116x2xf32, 1xf32, 1xf32) - clip_0 = paddle._C_ops.clip(subtract_0, full_10, full_11) - del subtract_0 - - # pd_op.prod: (8x1x4116xf32) <- (8x1x4116x2xf32, 1xi64) - prod_0 = paddle._C_ops.prod(clip_0, full_int_array_0, False, False) - del clip_0 - - # pd_op.subtract: (8x1x1x2xf32) <- (8x1x1x2xf32, 8x1x1x2xf32) - subtract_1 = paddle._C_ops.subtract(slice_1, slice_0) - del slice_0, slice_1 - - # pd_op.clip: (8x1x1x2xf32) <- (8x1x1x2xf32, 1xf32, 1xf32) - clip_1 = paddle._C_ops.clip(subtract_1, full_10, full_11) - del subtract_1 - - # pd_op.prod: (8x1x1xf32) <- (8x1x1x2xf32, 1xi64) - prod_1 = paddle._C_ops.prod(clip_1, full_int_array_0, False, False) - del clip_1 - - # pd_op.subtract: (8x1x4116x2xf32) <- (8x1x4116x2xf32, 8x1x4116x2xf32) - subtract_2 = paddle._C_ops.subtract(slice_3, slice_2) - del slice_2, slice_3 - - # pd_op.clip: (8x1x4116x2xf32) <- (8x1x4116x2xf32, 1xf32, 1xf32) - clip_2 = paddle._C_ops.clip(subtract_2, full_10, full_11) - del full_10, full_11, subtract_2 - - # pd_op.prod: (8x1x4116xf32) <- (8x1x4116x2xf32, 1xi64) - prod_2 = paddle._C_ops.prod(clip_2, full_int_array_0, False, False) - del clip_2 - - # pd_op.add: (8x1x4116xf32) <- (8x1x1xf32, 8x1x4116xf32) - add_1 = paddle._C_ops.add(prod_1, prod_2) - del prod_1, prod_2 - - # pd_op.subtract: (8x1x4116xf32) <- (8x1x4116xf32, 8x1x4116xf32) - subtract_3 = paddle._C_ops.subtract(add_1, prod_0) - del add_1 - - # pd_op.scale: (8x1x4116xf32) <- (8x1x4116xf32, 1xf32) - scale_1 = paddle._C_ops.scale(subtract_3, full_4, float("1e-09"), True) - del full_4, subtract_3 - - # pd_op.divide: (8x1x4116xf32) <- (8x1x4116xf32, 8x1x4116xf32) - divide_0 = paddle._C_ops.divide(prod_0, scale_1) - del prod_0, scale_1 - - # pd_op.multiply: (8x1x4116xf32) <- (8x1x4116xf32, 8x1x4116xf32) - multiply_1 = paddle._C_ops.multiply(divide_0, data_0) - del data_0, divide_0 - - # pd_op.full_int_array: (1xi64) <- () - full_int_array_8 = [-2] - - # pd_op.max: (8x4116xf32) <- (8x1x4116xf32, 1xi64) - max_0 = paddle._C_ops.max(multiply_1, full_int_array_8, False) - del full_int_array_8, multiply_1 - - # pd_op.unsqueeze: (8x4116x1xf32) <- (8x4116xf32, 1xi64) - unsqueeze_3 = paddle._C_ops.unsqueeze(max_0, full_int_array_0) - del full_int_array_0, max_0 - - # pd_op.multiply: (8x4116x4xf32) <- (8x4116x4xf32, 8x4116x1xf32) - multiply_0 = paddle._C_ops.multiply(index_select_0, unsqueeze_3) - del index_select_0, unsqueeze_3, where_0 - - return reshape_0, multiply_0 diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus-L/subgraph_12/graph_hash.txt b/paddle_samples/PaddleX/PP-YOLOE_plus-L/subgraph_12/graph_hash.txt deleted file mode 100644 index 5a3b9f807..000000000 --- a/paddle_samples/PaddleX/PP-YOLOE_plus-L/subgraph_12/graph_hash.txt +++ /dev/null @@ -1 +0,0 @@ -844261737f8230902d017b25798482fe873277e37f04757e32206af29ccc9250 \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus-L/subgraph_12/graph_net.json b/paddle_samples/PaddleX/PP-YOLOE_plus-L/subgraph_12/graph_net.json deleted file mode 100644 index 32219c0fa..000000000 --- a/paddle_samples/PaddleX/PP-YOLOE_plus-L/subgraph_12/graph_net.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "framework": "paddle", - "model_name": "PP-YOLOE_plus-L", - "num_devices_required": 1, - "num_nodes_required": 1 -} \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus-L/subgraph_12/model.py b/paddle_samples/PaddleX/PP-YOLOE_plus-L/subgraph_12/model.py deleted file mode 100644 index f692c519c..000000000 --- a/paddle_samples/PaddleX/PP-YOLOE_plus-L/subgraph_12/model.py +++ /dev/null @@ -1,158 +0,0 @@ -import paddle - - -class GraphModule(paddle.nn.Layer): - def __init__(self): - super().__init__() - - def forward(self, parameter_0, data_0, data_1, data_2): - # pd_op.divide: (4116x2xf32) <- (4116x2xf32, 4116x1xf32) - divide_0 = paddle._C_ops.divide(data_1, data_2) - del data_1 - - # pd_op.shape64: (3xi64) <- (8x4116x68xf32) - shape64_0 = paddle._C_ops.shape64(data_0) - - # pd_op.full_int_array: (1xi64) <- () - full_int_array_0 = [0] - - # pd_op.full_int_array: (1xi64) <- () - full_int_array_1 = [1] - - # pd_op.assign: (1xi64) <- (1xi64) - assign_0 = full_int_array_1 - - # pd_op.slice: (xi64) <- (3xi64, 1xi64, 1xi64) - slice_0 = paddle._C_ops.slice( - shape64_0, [0], full_int_array_0, full_int_array_1, [1], [0] - ) - del full_int_array_0 - - # pd_op.full_int_array: (1xi64) <- () - full_int_array_2 = [2] - - # pd_op.slice: (xi64) <- (3xi64, 1xi64, 1xi64) - slice_1 = paddle._C_ops.slice( - shape64_0, [0], full_int_array_1, full_int_array_2, [1], [0] - ) - - # pd_op.full_int_array: (1xi64) <- () - full_int_array_3 = [3] - - # pd_op.slice: (xi64) <- (3xi64, 1xi64, 1xi64) - slice_2 = paddle._C_ops.slice( - shape64_0, [0], full_int_array_2, full_int_array_3, [1], [0] - ) - del full_int_array_2, full_int_array_3, shape64_0 - - # pd_op.full: (xi64) <- () - full_0 = paddle._C_ops.full( - [], float("-1"), paddle.int64, paddle.core.CPUPlace() - ) - - # pd_op.full: (xi64) <- () - full_1 = paddle._C_ops.full( - [], float("4"), paddle.int64, paddle.core.CPUPlace() - ) - - # pd_op.full: (xi64) <- () - full_2 = paddle._C_ops.full( - [], float("17"), paddle.int64, paddle.core.CPUPlace() - ) - - # builtin.combine: ([xi64, xi64, xi64, xi64]) <- (xi64, xi64, xi64, xi64) - combine_0 = [full_0, slice_1, full_1, full_2] - del full_0, full_1, full_2, slice_1 - - # pd_op.stack: (4xi64) <- ([xi64, xi64, xi64, xi64]) - stack_0 = paddle._C_ops.stack(combine_0, 0) - del combine_0 - - # pd_op.reshape: (-1x-1x4x17xf32) <- (8x4116x68xf32, 4xi64) - reshape_0 = paddle._C_ops.reshape(data_0, stack_0) - del data_0, stack_0 - - # pd_op.softmax: (-1x-1x4x17xf32) <- (-1x-1x4x17xf32) - softmax_0 = paddle._C_ops.softmax(reshape_0, -1) - del reshape_0 - - # pd_op.transpose: (-1x17x-1x4xf32) <- (-1x-1x4x17xf32) - transpose_0 = paddle._C_ops.transpose(softmax_0, [0, 3, 1, 2]) - - # pd_op.conv2d: (-1x1x-1x4xf32) <- (-1x17x-1x4xf32, 1x17x1x1xf32) - conv2d_0 = paddle._C_ops.conv2d( - transpose_0, parameter_0, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_0 - - # pd_op.squeeze: (-1x-1x4xf32) <- (-1x1x-1x4xf32, 1xi64) - squeeze_0 = paddle._C_ops.squeeze(conv2d_0, full_int_array_1) - del full_int_array_1 - - # pd_op.full: (1xi32) <- () - full_3 = paddle._C_ops.full( - [1], float("2"), paddle.int32, paddle.core.CPUPlace() - ) - - # pd_op.split_with_num: ([-1x-1x2xf32, -1x-1x2xf32]) <- (-1x-1x4xf32, 1xi32) - split_with_num_0 = paddle._C_ops.split_with_num(squeeze_0, 2, full_3) - del squeeze_0 - - # builtin.split: (-1x-1x2xf32, -1x-1x2xf32) <- ([-1x-1x2xf32, -1x-1x2xf32]) - ( - split_0, - split_1, - ) = split_with_num_0 - del split_with_num_0 - - # pd_op.full: (1xf32) <- () - full_4 = paddle._C_ops.full( - [1], float("-1"), paddle.float32, paddle.core.CPUPlace() - ) - - # pd_op.scale: (-1x-1x2xf32) <- (-1x-1x2xf32, 1xf32) - scale_0 = paddle._C_ops.scale(split_0, full_4, float("0"), True) - del split_0 - - # pd_op.add: (-1x4116x2xf32) <- (-1x-1x2xf32, 4116x2xf32) - add_0 = paddle._C_ops.add(scale_0, divide_0) - - # pd_op.add: (-1x4116x2xf32) <- (-1x-1x2xf32, 4116x2xf32) - add_1 = paddle._C_ops.add(split_1, divide_0) - - # pd_op.full: (1xi32) <- () - full_5 = paddle._C_ops.full( - [1], float("-1"), paddle.int32, paddle.core.CPUPlace() - ) - - # builtin.combine: ([-1x4116x2xf32, -1x4116x2xf32]) <- (-1x4116x2xf32, -1x4116x2xf32) - combine_1 = [add_0, add_1] - - # pd_op.concat: (-1x4116x4xf32) <- ([-1x4116x2xf32, -1x4116x2xf32], 1xi32) - concat_0 = paddle._C_ops.concat(combine_1, full_5) - del combine_1 - - # pd_op.share_data_: (-1x4116x4xf32) <- (-1x4116x4xf32) - share_data__0 = concat_0.detach() - - # pd_op.multiply: (-1x4116x4xf32) <- (-1x4116x4xf32, 4116x1xf32) - multiply_0 = paddle._C_ops.multiply(share_data__0, data_2) - del ( - add_0, - add_1, - assign_0, - concat_0, - conv2d_0, - data_2, - divide_0, - full_3, - full_4, - full_5, - scale_0, - share_data__0, - softmax_0, - split_1, - transpose_0, - ) - - return multiply_0 diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus-L/subgraph_12/weight_meta.py b/paddle_samples/PaddleX/PP-YOLOE_plus-L/subgraph_12/weight_meta.py deleted file mode 100644 index 28198680e..000000000 --- a/paddle_samples/PaddleX/PP-YOLOE_plus-L/subgraph_12/weight_meta.py +++ /dev/null @@ -1,7 +0,0 @@ -class Program_weight_tensor_parameter_0: - name = "parameter_0" - shape = [1, 17, 1, 1] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus-L/subgraph_13/graph_hash.txt b/paddle_samples/PaddleX/PP-YOLOE_plus-L/subgraph_13/graph_hash.txt deleted file mode 100644 index 51f7ea2f5..000000000 --- a/paddle_samples/PaddleX/PP-YOLOE_plus-L/subgraph_13/graph_hash.txt +++ /dev/null @@ -1 +0,0 @@ -15daaa1fb802e63eb7201d9224130b2d687280387657cf94c22b712a5ddfde9c \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus-L/subgraph_13/graph_net.json b/paddle_samples/PaddleX/PP-YOLOE_plus-L/subgraph_13/graph_net.json deleted file mode 100644 index 32219c0fa..000000000 --- a/paddle_samples/PaddleX/PP-YOLOE_plus-L/subgraph_13/graph_net.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "framework": "paddle", - "model_name": "PP-YOLOE_plus-L", - "num_devices_required": 1, - "num_nodes_required": 1 -} \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus-L/subgraph_13/input_meta.py b/paddle_samples/PaddleX/PP-YOLOE_plus-L/subgraph_13/input_meta.py deleted file mode 100644 index 3981bf949..000000000 --- a/paddle_samples/PaddleX/PP-YOLOE_plus-L/subgraph_13/input_meta.py +++ /dev/null @@ -1,49 +0,0 @@ -class Program_weight_tensor_data_0: - name = "data_0" - shape = [4116, 4] - dtype = "float32" - min_val = float("-64.0") - max_val = float("512.0") - mean = float("224.0") - std = float("132.768") - data = None - - -class Program_weight_tensor_data_1: - name = "data_1" - shape = [8, 1, 4] - dtype = "float32" - data = [ - 35.3684, - 311.273, - 300.632, - 442.182, - 78.6168, - 241.764, - 110.41, - 266.46, - 195.413, - 193.28, - 236.373, - 224.0, - 130.415, - 227.85, - 158.792, - 248.688, - 360.901, - 338.022, - 374.268, - 347.726, - 304.951, - 76.6956, - 336.0, - 109.565, - 295.171, - 70.9146, - 350.609, - 111.437, - 18.5379, - 0.0, - 293.517, - 337.836, - ] diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus-L/subgraph_13/model.py b/paddle_samples/PaddleX/PP-YOLOE_plus-L/subgraph_13/model.py deleted file mode 100644 index 218186811..000000000 --- a/paddle_samples/PaddleX/PP-YOLOE_plus-L/subgraph_13/model.py +++ /dev/null @@ -1,263 +0,0 @@ -import paddle - - -class GraphModule(paddle.nn.Layer): - def __init__(self): - super().__init__() - - def forward(self, data_0, data_1): - # pd_op.full_int_array: (2xi64) <- () - full_int_array_0 = [-1, 4] - - # pd_op.reshape: (8x4xf32) <- (8x1x4xf32, 2xi64) - reshape_2 = paddle._C_ops.reshape(data_1, full_int_array_0) - del data_1, full_int_array_0 - - # pd_op.full_int_array: (1xi64) <- () - full_int_array_1 = [1] - - # pd_op.unsqueeze: (8x1x4xf32) <- (8x4xf32, 1xi64) - unsqueeze_0 = paddle._C_ops.unsqueeze(reshape_2, full_int_array_1) - - # pd_op.full_int_array: (1xi64) <- () - full_int_array_2 = [0] - - # pd_op.unsqueeze: (1x4116x4xf32) <- (4116x4xf32, 1xi64) - unsqueeze_1 = paddle._C_ops.unsqueeze(data_0, full_int_array_2) - - # pd_op.full_int_array: (1xi64) <- () - full_int_array_3 = [2] - - # pd_op.slice: (8x1x2xf32) <- (8x1x4xf32, 1xi64, 1xi64) - slice_0 = paddle._C_ops.slice( - unsqueeze_0, [2], full_int_array_2, full_int_array_3, [1], [] - ) - - # pd_op.full_int_array: (1xi64) <- () - full_int_array_4 = [2147483647] - - # pd_op.slice: (8x1x2xf32) <- (8x1x4xf32, 1xi64, 1xi64) - slice_1 = paddle._C_ops.slice( - unsqueeze_0, [2], full_int_array_3, full_int_array_4, [1], [] - ) - del unsqueeze_0 - - # pd_op.slice: (1x4116x2xf32) <- (1x4116x4xf32, 1xi64, 1xi64) - slice_2 = paddle._C_ops.slice( - unsqueeze_1, [2], full_int_array_2, full_int_array_3, [1], [] - ) - - # pd_op.slice: (1x4116x2xf32) <- (1x4116x4xf32, 1xi64, 1xi64) - slice_3 = paddle._C_ops.slice( - unsqueeze_1, [2], full_int_array_3, full_int_array_4, [1], [] - ) - del full_int_array_4, unsqueeze_1 - - # pd_op.maximum: (8x4116x2xf32) <- (8x1x2xf32, 1x4116x2xf32) - maximum_0 = paddle._C_ops.maximum(slice_0, slice_2) - - # pd_op.minimum: (8x4116x2xf32) <- (8x1x2xf32, 1x4116x2xf32) - minimum_0 = paddle._C_ops.minimum(slice_1, slice_3) - - # pd_op.subtract: (8x4116x2xf32) <- (8x4116x2xf32, 8x4116x2xf32) - subtract_0 = paddle._C_ops.subtract(minimum_0, maximum_0) - del maximum_0, minimum_0 - - # pd_op.full: (1xf32) <- () - full_0 = paddle._C_ops.full( - [1], float("0"), paddle.float32, paddle.core.CPUPlace() - ) - - # pd_op.full: (1xf32) <- () - full_1 = paddle._C_ops.full( - [1], float("3.40282e+38"), paddle.float32, paddle.core.CPUPlace() - ) - - # pd_op.clip: (8x4116x2xf32) <- (8x4116x2xf32, 1xf32, 1xf32) - clip_0 = paddle._C_ops.clip(subtract_0, full_0, full_1) - del subtract_0 - - # pd_op.full_int_array: (1xi64) <- () - full_int_array_5 = [-1] - - # pd_op.prod: (8x4116xf32) <- (8x4116x2xf32, 1xi64) - prod_0 = paddle._C_ops.prod(clip_0, full_int_array_5, False, False) - del clip_0 - - # pd_op.subtract: (8x1x2xf32) <- (8x1x2xf32, 8x1x2xf32) - subtract_1 = paddle._C_ops.subtract(slice_1, slice_0) - del slice_0, slice_1 - - # pd_op.clip: (8x1x2xf32) <- (8x1x2xf32, 1xf32, 1xf32) - clip_1 = paddle._C_ops.clip(subtract_1, full_0, full_1) - del subtract_1 - - # pd_op.prod: (8x1xf32) <- (8x1x2xf32, 1xi64) - prod_1 = paddle._C_ops.prod(clip_1, full_int_array_5, False, False) - del clip_1 - - # pd_op.subtract: (1x4116x2xf32) <- (1x4116x2xf32, 1x4116x2xf32) - subtract_2 = paddle._C_ops.subtract(slice_3, slice_2) - del slice_2, slice_3 - - # pd_op.clip: (1x4116x2xf32) <- (1x4116x2xf32, 1xf32, 1xf32) - clip_2 = paddle._C_ops.clip(subtract_2, full_0, full_1) - del full_0, full_1, subtract_2 - - # pd_op.prod: (1x4116xf32) <- (1x4116x2xf32, 1xi64) - prod_2 = paddle._C_ops.prod(clip_2, full_int_array_5, False, False) - del clip_2, full_int_array_5 - - # pd_op.add: (8x4116xf32) <- (8x1xf32, 1x4116xf32) - add_0 = paddle._C_ops.add(prod_1, prod_2) - del prod_1, prod_2 - - # pd_op.subtract: (8x4116xf32) <- (8x4116xf32, 8x4116xf32) - subtract_3 = paddle._C_ops.subtract(add_0, prod_0) - del add_0 - - # pd_op.full: (1xf32) <- () - full_2 = paddle._C_ops.full( - [1], float("1"), paddle.float32, paddle.core.CPUPlace() - ) - - # pd_op.scale: (8x4116xf32) <- (8x4116xf32, 1xf32) - scale_0 = paddle._C_ops.scale(subtract_3, full_2, float("1e-10"), True) - del full_2, subtract_3 - - # pd_op.divide: (8x4116xf32) <- (8x4116xf32, 8x4116xf32) - divide_0 = paddle._C_ops.divide(prod_0, scale_0) - del prod_0, scale_0 - - # pd_op.full_int_array: (3xi64) <- () - full_int_array_6 = [8, -1, 4116] - - # pd_op.reshape: (8x1x4116xf32) <- (8x4116xf32, 3xi64) - reshape_1 = paddle._C_ops.reshape(divide_0, full_int_array_6) - del divide_0 - - # pd_op.slice: (8xf32) <- (8x4xf32, 1xi64, 1xi64) - slice_4 = paddle._C_ops.slice( - reshape_2, [1], full_int_array_2, full_int_array_1, [1], [1] - ) - - # pd_op.full_int_array: (1xi64) <- () - full_int_array_7 = [3] - - # pd_op.slice: (8xf32) <- (8x4xf32, 1xi64, 1xi64) - slice_5 = paddle._C_ops.slice( - reshape_2, [1], full_int_array_3, full_int_array_7, [1], [1] - ) - - # pd_op.add: (8xf32) <- (8xf32, 8xf32) - add_1 = paddle._C_ops.add(slice_4, slice_5) - del slice_4, slice_5 - - # pd_op.full: (1xf32) <- () - full_3 = paddle._C_ops.full( - [1], float("0.5"), paddle.float32, paddle.core.CPUPlace() - ) - - # pd_op.scale: (8xf32) <- (8xf32, 1xf32) - scale_1 = paddle._C_ops.scale(add_1, full_3, float("0"), True) - del add_1 - - # pd_op.slice: (8xf32) <- (8x4xf32, 1xi64, 1xi64) - slice_6 = paddle._C_ops.slice( - reshape_2, [1], full_int_array_1, full_int_array_3, [1], [1] - ) - - # pd_op.full_int_array: (1xi64) <- () - full_int_array_8 = [4] - - # pd_op.slice: (8xf32) <- (8x4xf32, 1xi64, 1xi64) - slice_7 = paddle._C_ops.slice( - reshape_2, [1], full_int_array_7, full_int_array_8, [1], [1] - ) - del reshape_2 - - # pd_op.add: (8xf32) <- (8xf32, 8xf32) - add_2 = paddle._C_ops.add(slice_6, slice_7) - del slice_6, slice_7 - - # pd_op.scale: (8xf32) <- (8xf32, 1xf32) - scale_2 = paddle._C_ops.scale(add_2, full_3, float("0"), True) - del add_2 - - # builtin.combine: ([8xf32, 8xf32]) <- (8xf32, 8xf32) - combine_0 = [scale_1, scale_2] - del scale_1, scale_2 - - # pd_op.stack: (8x2xf32) <- ([8xf32, 8xf32]) - stack_0 = paddle._C_ops.stack(combine_0, -1) - del combine_0 - - # pd_op.unsqueeze: (8x1x2xf32) <- (8x2xf32, 1xi64) - unsqueeze_2 = paddle._C_ops.unsqueeze(stack_0, full_int_array_1) - del stack_0 - - # pd_op.slice: (4116xf32) <- (4116x4xf32, 1xi64, 1xi64) - slice_8 = paddle._C_ops.slice( - data_0, [1], full_int_array_2, full_int_array_1, [1], [1] - ) - - # pd_op.slice: (4116xf32) <- (4116x4xf32, 1xi64, 1xi64) - slice_9 = paddle._C_ops.slice( - data_0, [1], full_int_array_3, full_int_array_7, [1], [1] - ) - - # pd_op.add: (4116xf32) <- (4116xf32, 4116xf32) - add_3 = paddle._C_ops.add(slice_8, slice_9) - del slice_8, slice_9 - - # pd_op.scale: (4116xf32) <- (4116xf32, 1xf32) - scale_3 = paddle._C_ops.scale(add_3, full_3, float("0"), True) - del add_3 - - # pd_op.slice: (4116xf32) <- (4116x4xf32, 1xi64, 1xi64) - slice_10 = paddle._C_ops.slice( - data_0, [1], full_int_array_1, full_int_array_3, [1], [1] - ) - del full_int_array_1, full_int_array_3 - - # pd_op.slice: (4116xf32) <- (4116x4xf32, 1xi64, 1xi64) - slice_11 = paddle._C_ops.slice( - data_0, [1], full_int_array_7, full_int_array_8, [1], [1] - ) - del data_0, full_int_array_7, full_int_array_8 - - # pd_op.add: (4116xf32) <- (4116xf32, 4116xf32) - add_4 = paddle._C_ops.add(slice_10, slice_11) - del slice_10, slice_11 - - # pd_op.scale: (4116xf32) <- (4116xf32, 1xf32) - scale_4 = paddle._C_ops.scale(add_4, full_3, float("0"), True) - del add_4, full_3 - - # builtin.combine: ([4116xf32, 4116xf32]) <- (4116xf32, 4116xf32) - combine_1 = [scale_3, scale_4] - del scale_3, scale_4 - - # pd_op.stack: (4116x2xf32) <- ([4116xf32, 4116xf32]) - stack_1 = paddle._C_ops.stack(combine_1, -1) - del combine_1 - - # pd_op.unsqueeze: (1x4116x2xf32) <- (4116x2xf32, 1xi64) - unsqueeze_3 = paddle._C_ops.unsqueeze(stack_1, full_int_array_2) - del full_int_array_2 - - # pd_op.subtract: (8x4116x2xf32) <- (8x1x2xf32, 1x4116x2xf32) - subtract_4 = paddle._C_ops.subtract(unsqueeze_2, unsqueeze_3) - del unsqueeze_2, unsqueeze_3 - - # pd_op.p_norm: (8x4116xf32) <- (8x4116x2xf32) - p_norm_0 = paddle._C_ops.p_norm( - subtract_4, float("2"), -1, float("1e-12"), False, False - ) - del subtract_4 - - # pd_op.reshape: (8x1x4116xf32) <- (8x4116xf32, 3xi64) - reshape_0 = paddle._C_ops.reshape(p_norm_0, full_int_array_6) - del full_int_array_6, p_norm_0, stack_1 - - return reshape_0, reshape_1 diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus-L/subgraph_15/graph_hash.txt b/paddle_samples/PaddleX/PP-YOLOE_plus-L/subgraph_15/graph_hash.txt deleted file mode 100644 index cf9cecf24..000000000 --- a/paddle_samples/PaddleX/PP-YOLOE_plus-L/subgraph_15/graph_hash.txt +++ /dev/null @@ -1 +0,0 @@ -b570e084ecf7776a98a81e353b5f581d19f9ab243969ebfb5988dfda43a8a50f \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus-L/subgraph_15/graph_net.json b/paddle_samples/PaddleX/PP-YOLOE_plus-L/subgraph_15/graph_net.json deleted file mode 100644 index 32219c0fa..000000000 --- a/paddle_samples/PaddleX/PP-YOLOE_plus-L/subgraph_15/graph_net.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "framework": "paddle", - "model_name": "PP-YOLOE_plus-L", - "num_devices_required": 1, - "num_nodes_required": 1 -} \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus-L/subgraph_15/model.py b/paddle_samples/PaddleX/PP-YOLOE_plus-L/subgraph_15/model.py deleted file mode 100644 index a98e31988..000000000 --- a/paddle_samples/PaddleX/PP-YOLOE_plus-L/subgraph_15/model.py +++ /dev/null @@ -1,1144 +0,0 @@ -import paddle - - -class GraphModule(paddle.nn.Layer): - def __init__(self): - super().__init__() - - def forward( - self, - parameter_0, - parameter_1, - parameter_2, - parameter_3, - parameter_4, - parameter_5, - parameter_6, - parameter_7, - parameter_8, - parameter_9, - parameter_10, - parameter_11, - parameter_12, - parameter_13, - parameter_14, - parameter_15, - parameter_16, - parameter_17, - parameter_18, - parameter_19, - parameter_20, - parameter_21, - parameter_22, - parameter_23, - parameter_24, - parameter_25, - parameter_26, - parameter_27, - parameter_28, - parameter_29, - parameter_30, - parameter_31, - parameter_32, - parameter_33, - parameter_34, - parameter_35, - parameter_36, - parameter_37, - parameter_38, - parameter_39, - parameter_40, - parameter_41, - parameter_42, - parameter_43, - parameter_44, - parameter_45, - parameter_46, - parameter_47, - parameter_48, - parameter_49, - parameter_50, - parameter_51, - parameter_52, - parameter_53, - data_0, - data_1, - data_2, - data_3, - data_4, - data_5, - data_6, - data_7, - data_8, - ): - # pd_op.full: (1xi64) <- () - full_0 = paddle._C_ops.full( - [1], float("0"), paddle.int64, paddle.core.CPUPlace() - ) - - # pd_op.full: (1xi64) <- () - full_1 = paddle._C_ops.full( - [1], float("1"), paddle.int64, paddle.core.CPUPlace() - ) - - # pd_op.arange: (-1xi64) <- (1xi64, xi64, 1xi64) - arange_0 = paddle.arange(full_0, data_1, full_1, dtype="int64") - del data_1 - - # pd_op.cast: (-1xf32) <- (-1xi64) - cast_0 = paddle._C_ops.cast(arange_0, paddle.float32) - del arange_0 - - # pd_op.full: (1xf32) <- () - full_2 = paddle._C_ops.full( - [1], float("1"), paddle.float32, paddle.core.CPUPlace() - ) - - # pd_op.scale: (-1xf32) <- (-1xf32, 1xf32) - scale_0 = paddle._C_ops.scale(cast_0, full_2, float("0.5"), True) - del cast_0 - - # pd_op.full: (1xf32) <- () - full_3 = paddle._C_ops.full( - [1], float("32"), paddle.float32, paddle.core.CPUPlace() - ) - - # pd_op.scale: (-1xf32) <- (-1xf32, 1xf32) - scale_1 = paddle._C_ops.scale(scale_0, full_3, float("0"), True) - del scale_0 - - # pd_op.arange: (-1xi64) <- (1xi64, xi64, 1xi64) - arange_1 = paddle.arange(full_0, data_0, full_1, dtype="int64") - del data_0 - - # pd_op.cast: (-1xf32) <- (-1xi64) - cast_1 = paddle._C_ops.cast(arange_1, paddle.float32) - del arange_1 - - # pd_op.scale: (-1xf32) <- (-1xf32, 1xf32) - scale_2 = paddle._C_ops.scale(cast_1, full_2, float("0.5"), True) - del cast_1 - - # pd_op.scale: (-1xf32) <- (-1xf32, 1xf32) - scale_3 = paddle._C_ops.scale(scale_2, full_3, float("0"), True) - del scale_2 - - # builtin.combine: ([-1xf32, -1xf32]) <- (-1xf32, -1xf32) - combine_0 = [scale_3, scale_1] - del scale_1, scale_3 - - # pd_op.meshgrid: ([-1x-1xf32, -1x-1xf32]) <- ([-1xf32, -1xf32]) - meshgrid_0 = paddle._C_ops.meshgrid(combine_0) - del combine_0 - - # builtin.split: (-1x-1xf32, -1x-1xf32) <- ([-1x-1xf32, -1x-1xf32]) - ( - split_0, - split_1, - ) = meshgrid_0 - del meshgrid_0 - - # pd_op.scale: (-1x-1xf32) <- (-1x-1xf32, 1xf32) - scale_4 = paddle._C_ops.scale(split_1, full_2, float("-80"), True) - - # pd_op.scale: (-1x-1xf32) <- (-1x-1xf32, 1xf32) - scale_5 = paddle._C_ops.scale(split_0, full_2, float("-80"), True) - - # pd_op.scale: (-1x-1xf32) <- (-1x-1xf32, 1xf32) - scale_6 = paddle._C_ops.scale(split_1, full_2, float("80"), True) - - # pd_op.scale: (-1x-1xf32) <- (-1x-1xf32, 1xf32) - scale_7 = paddle._C_ops.scale(split_0, full_2, float("80"), True) - - # builtin.combine: ([-1x-1xf32, -1x-1xf32, -1x-1xf32, -1x-1xf32]) <- (-1x-1xf32, -1x-1xf32, -1x-1xf32, -1x-1xf32) - combine_1 = [scale_4, scale_5, scale_6, scale_7] - del scale_4, scale_5, scale_6, scale_7 - - # pd_op.stack: (-1x-1x4xf32) <- ([-1x-1xf32, -1x-1xf32, -1x-1xf32, -1x-1xf32]) - stack_0 = paddle._C_ops.stack(combine_1, -1) - del combine_1 - - # builtin.combine: ([-1x-1xf32, -1x-1xf32]) <- (-1x-1xf32, -1x-1xf32) - combine_2 = [split_1, split_0] - del split_0, split_1 - - # pd_op.stack: (-1x-1x2xf32) <- ([-1x-1xf32, -1x-1xf32]) - stack_1 = paddle._C_ops.stack(combine_2, -1) - del combine_2 - - # pd_op.full_int_array: (2xi64) <- () - full_int_array_0 = [-1, 4] - - # pd_op.reshape: (-1x4xf32) <- (-1x-1x4xf32, 2xi64) - reshape_0 = paddle._C_ops.reshape(stack_0, full_int_array_0) - del stack_0 - - # pd_op.full_int_array: (2xi64) <- () - full_int_array_1 = [-1, 2] - - # pd_op.reshape: (-1x2xf32) <- (-1x-1x2xf32, 2xi64) - reshape_1 = paddle._C_ops.reshape(stack_1, full_int_array_1) - del stack_1 - - # pd_op.shape64: (2xi64) <- (-1x4xf32) - shape64_0 = paddle._C_ops.shape64(reshape_0) - - # pd_op.full_int_array: (1xi64) <- () - full_int_array_2 = [0] - - # pd_op.full_int_array: (1xi64) <- () - full_int_array_3 = [1] - - # pd_op.slice: (xi64) <- (2xi64, 1xi64, 1xi64) - slice_0 = paddle._C_ops.slice( - shape64_0, [0], full_int_array_2, full_int_array_3, [1], [0] - ) - del shape64_0 - - # pd_op.full: (xi64) <- () - full_4 = paddle._C_ops.full( - [], float("1"), paddle.int64, paddle.core.CPUPlace() - ) - - # builtin.combine: ([xi64, xi64]) <- (xi64, xi64) - combine_3 = [slice_0, full_4] - - # pd_op.stack: (2xi64) <- ([xi64, xi64]) - stack_2 = paddle._C_ops.stack(combine_3, 0) - del combine_3 - - # pd_op.full_with_tensor: (-1x1xf32) <- (1xf32, 2xi64) - full_with_tensor_0 = paddle._C_ops.full_with_tensor( - full_3, stack_2, paddle.float32 - ) - del full_3, stack_2 - - # pd_op.arange: (-1xi64) <- (1xi64, xi64, 1xi64) - arange_2 = paddle.arange(full_0, data_3, full_1, dtype="int64") - del data_3 - - # pd_op.cast: (-1xf32) <- (-1xi64) - cast_2 = paddle._C_ops.cast(arange_2, paddle.float32) - del arange_2 - - # pd_op.scale: (-1xf32) <- (-1xf32, 1xf32) - scale_8 = paddle._C_ops.scale(cast_2, full_2, float("0.5"), True) - del cast_2 - - # pd_op.full: (1xf32) <- () - full_5 = paddle._C_ops.full( - [1], float("16"), paddle.float32, paddle.core.CPUPlace() - ) - - # pd_op.scale: (-1xf32) <- (-1xf32, 1xf32) - scale_9 = paddle._C_ops.scale(scale_8, full_5, float("0"), True) - del scale_8 - - # pd_op.arange: (-1xi64) <- (1xi64, xi64, 1xi64) - arange_3 = paddle.arange(full_0, data_2, full_1, dtype="int64") - del data_2 - - # pd_op.cast: (-1xf32) <- (-1xi64) - cast_3 = paddle._C_ops.cast(arange_3, paddle.float32) - del arange_3 - - # pd_op.scale: (-1xf32) <- (-1xf32, 1xf32) - scale_10 = paddle._C_ops.scale(cast_3, full_2, float("0.5"), True) - del cast_3 - - # pd_op.scale: (-1xf32) <- (-1xf32, 1xf32) - scale_11 = paddle._C_ops.scale(scale_10, full_5, float("0"), True) - del scale_10 - - # builtin.combine: ([-1xf32, -1xf32]) <- (-1xf32, -1xf32) - combine_4 = [scale_11, scale_9] - del scale_11, scale_9 - - # pd_op.meshgrid: ([-1x-1xf32, -1x-1xf32]) <- ([-1xf32, -1xf32]) - meshgrid_1 = paddle._C_ops.meshgrid(combine_4) - del combine_4 - - # builtin.split: (-1x-1xf32, -1x-1xf32) <- ([-1x-1xf32, -1x-1xf32]) - ( - split_2, - split_3, - ) = meshgrid_1 - del meshgrid_1 - - # pd_op.scale: (-1x-1xf32) <- (-1x-1xf32, 1xf32) - scale_12 = paddle._C_ops.scale(split_3, full_2, float("-40"), True) - - # pd_op.scale: (-1x-1xf32) <- (-1x-1xf32, 1xf32) - scale_13 = paddle._C_ops.scale(split_2, full_2, float("-40"), True) - - # pd_op.scale: (-1x-1xf32) <- (-1x-1xf32, 1xf32) - scale_14 = paddle._C_ops.scale(split_3, full_2, float("40"), True) - - # pd_op.scale: (-1x-1xf32) <- (-1x-1xf32, 1xf32) - scale_15 = paddle._C_ops.scale(split_2, full_2, float("40"), True) - - # builtin.combine: ([-1x-1xf32, -1x-1xf32, -1x-1xf32, -1x-1xf32]) <- (-1x-1xf32, -1x-1xf32, -1x-1xf32, -1x-1xf32) - combine_5 = [scale_12, scale_13, scale_14, scale_15] - del scale_12, scale_13, scale_14, scale_15 - - # pd_op.stack: (-1x-1x4xf32) <- ([-1x-1xf32, -1x-1xf32, -1x-1xf32, -1x-1xf32]) - stack_3 = paddle._C_ops.stack(combine_5, -1) - del combine_5 - - # builtin.combine: ([-1x-1xf32, -1x-1xf32]) <- (-1x-1xf32, -1x-1xf32) - combine_6 = [split_3, split_2] - del split_2, split_3 - - # pd_op.stack: (-1x-1x2xf32) <- ([-1x-1xf32, -1x-1xf32]) - stack_4 = paddle._C_ops.stack(combine_6, -1) - del combine_6 - - # pd_op.reshape: (-1x4xf32) <- (-1x-1x4xf32, 2xi64) - reshape_2 = paddle._C_ops.reshape(stack_3, full_int_array_0) - del stack_3 - - # pd_op.reshape: (-1x2xf32) <- (-1x-1x2xf32, 2xi64) - reshape_3 = paddle._C_ops.reshape(stack_4, full_int_array_1) - del stack_4 - - # pd_op.shape64: (2xi64) <- (-1x4xf32) - shape64_1 = paddle._C_ops.shape64(reshape_2) - - # pd_op.slice: (xi64) <- (2xi64, 1xi64, 1xi64) - slice_1 = paddle._C_ops.slice( - shape64_1, [0], full_int_array_2, full_int_array_3, [1], [0] - ) - del shape64_1 - - # builtin.combine: ([xi64, xi64]) <- (xi64, xi64) - combine_7 = [slice_1, full_4] - - # pd_op.stack: (2xi64) <- ([xi64, xi64]) - stack_5 = paddle._C_ops.stack(combine_7, 0) - del combine_7 - - # pd_op.full_with_tensor: (-1x1xf32) <- (1xf32, 2xi64) - full_with_tensor_1 = paddle._C_ops.full_with_tensor( - full_5, stack_5, paddle.float32 - ) - del full_5, stack_5 - - # pd_op.arange: (-1xi64) <- (1xi64, xi64, 1xi64) - arange_4 = paddle.arange(full_0, data_5, full_1, dtype="int64") - del data_5 - - # pd_op.cast: (-1xf32) <- (-1xi64) - cast_4 = paddle._C_ops.cast(arange_4, paddle.float32) - del arange_4 - - # pd_op.scale: (-1xf32) <- (-1xf32, 1xf32) - scale_16 = paddle._C_ops.scale(cast_4, full_2, float("0.5"), True) - del cast_4 - - # pd_op.full: (1xf32) <- () - full_6 = paddle._C_ops.full( - [1], float("8"), paddle.float32, paddle.core.CPUPlace() - ) - - # pd_op.scale: (-1xf32) <- (-1xf32, 1xf32) - scale_17 = paddle._C_ops.scale(scale_16, full_6, float("0"), True) - del scale_16 - - # pd_op.arange: (-1xi64) <- (1xi64, xi64, 1xi64) - arange_5 = paddle.arange(full_0, data_4, full_1, dtype="int64") - del data_4, full_0, full_1 - - # pd_op.cast: (-1xf32) <- (-1xi64) - cast_5 = paddle._C_ops.cast(arange_5, paddle.float32) - del arange_5 - - # pd_op.scale: (-1xf32) <- (-1xf32, 1xf32) - scale_18 = paddle._C_ops.scale(cast_5, full_2, float("0.5"), True) - del cast_5 - - # pd_op.scale: (-1xf32) <- (-1xf32, 1xf32) - scale_19 = paddle._C_ops.scale(scale_18, full_6, float("0"), True) - del scale_18 - - # builtin.combine: ([-1xf32, -1xf32]) <- (-1xf32, -1xf32) - combine_8 = [scale_19, scale_17] - del scale_17, scale_19 - - # pd_op.meshgrid: ([-1x-1xf32, -1x-1xf32]) <- ([-1xf32, -1xf32]) - meshgrid_2 = paddle._C_ops.meshgrid(combine_8) - del combine_8 - - # builtin.split: (-1x-1xf32, -1x-1xf32) <- ([-1x-1xf32, -1x-1xf32]) - ( - split_4, - split_5, - ) = meshgrid_2 - del meshgrid_2 - - # pd_op.scale: (-1x-1xf32) <- (-1x-1xf32, 1xf32) - scale_20 = paddle._C_ops.scale(split_5, full_2, float("-20"), True) - - # pd_op.scale: (-1x-1xf32) <- (-1x-1xf32, 1xf32) - scale_21 = paddle._C_ops.scale(split_4, full_2, float("-20"), True) - - # pd_op.scale: (-1x-1xf32) <- (-1x-1xf32, 1xf32) - scale_22 = paddle._C_ops.scale(split_5, full_2, float("20"), True) - - # pd_op.scale: (-1x-1xf32) <- (-1x-1xf32, 1xf32) - scale_23 = paddle._C_ops.scale(split_4, full_2, float("20"), True) - del full_2 - - # builtin.combine: ([-1x-1xf32, -1x-1xf32, -1x-1xf32, -1x-1xf32]) <- (-1x-1xf32, -1x-1xf32, -1x-1xf32, -1x-1xf32) - combine_9 = [scale_20, scale_21, scale_22, scale_23] - del scale_20, scale_21, scale_22, scale_23 - - # pd_op.stack: (-1x-1x4xf32) <- ([-1x-1xf32, -1x-1xf32, -1x-1xf32, -1x-1xf32]) - stack_6 = paddle._C_ops.stack(combine_9, -1) - del combine_9 - - # builtin.combine: ([-1x-1xf32, -1x-1xf32]) <- (-1x-1xf32, -1x-1xf32) - combine_10 = [split_5, split_4] - del split_4, split_5 - - # pd_op.stack: (-1x-1x2xf32) <- ([-1x-1xf32, -1x-1xf32]) - stack_7 = paddle._C_ops.stack(combine_10, -1) - del combine_10 - - # pd_op.reshape: (-1x4xf32) <- (-1x-1x4xf32, 2xi64) - reshape_4 = paddle._C_ops.reshape(stack_6, full_int_array_0) - del full_int_array_0, stack_6 - - # pd_op.reshape: (-1x2xf32) <- (-1x-1x2xf32, 2xi64) - reshape_5 = paddle._C_ops.reshape(stack_7, full_int_array_1) - del full_int_array_1, stack_7 - - # pd_op.shape64: (2xi64) <- (-1x4xf32) - shape64_2 = paddle._C_ops.shape64(reshape_4) - - # pd_op.slice: (xi64) <- (2xi64, 1xi64, 1xi64) - slice_2 = paddle._C_ops.slice( - shape64_2, [0], full_int_array_2, full_int_array_3, [1], [0] - ) - del full_int_array_2, full_int_array_3, shape64_2 - - # builtin.combine: ([xi64, xi64]) <- (xi64, xi64) - combine_11 = [slice_2, full_4] - del full_4 - - # pd_op.stack: (2xi64) <- ([xi64, xi64]) - stack_8 = paddle._C_ops.stack(combine_11, 0) - del combine_11 - - # pd_op.full_with_tensor: (-1x1xf32) <- (1xf32, 2xi64) - full_with_tensor_2 = paddle._C_ops.full_with_tensor( - full_6, stack_8, paddle.float32 - ) - del full_6, stack_8 - - # pd_op.full: (1xi32) <- () - full_7 = paddle._C_ops.full( - [1], float("0"), paddle.int32, paddle.core.CPUPlace() - ) - - # builtin.combine: ([-1x4xf32, -1x4xf32, -1x4xf32]) <- (-1x4xf32, -1x4xf32, -1x4xf32) - combine_12 = [reshape_0, reshape_2, reshape_4] - del reshape_0, reshape_2, reshape_4 - - # pd_op.concat: (-1x4xf32) <- ([-1x4xf32, -1x4xf32, -1x4xf32], 1xi32) - concat_2 = paddle._C_ops.concat(combine_12, full_7) - del combine_12 - - # builtin.combine: ([-1x2xf32, -1x2xf32, -1x2xf32]) <- (-1x2xf32, -1x2xf32, -1x2xf32) - combine_13 = [reshape_1, reshape_3, reshape_5] - del reshape_1, reshape_3, reshape_5 - - # pd_op.concat: (-1x2xf32) <- ([-1x2xf32, -1x2xf32, -1x2xf32], 1xi32) - concat_3 = paddle._C_ops.concat(combine_13, full_7) - del combine_13 - - # builtin.combine: ([-1x1xf32, -1x1xf32, -1x1xf32]) <- (-1x1xf32, -1x1xf32, -1x1xf32) - combine_14 = [full_with_tensor_0, full_with_tensor_1, full_with_tensor_2] - del full_with_tensor_0, full_with_tensor_1, full_with_tensor_2 - - # pd_op.concat: (-1x1xf32) <- ([-1x1xf32, -1x1xf32, -1x1xf32], 1xi32) - concat_4 = paddle._C_ops.concat(combine_14, full_7) - del combine_14, full_7 - - # pd_op.full_int_array: (2xi64) <- () - full_int_array_4 = [1, 1] - - # pd_op.assign: (2xi64) <- (2xi64) - assign_0 = full_int_array_4 - - # pd_op.assign: (2xi64) <- (2xi64) - assign_1 = full_int_array_4 - - # pd_op.pool2d: (8x768x1x1xf32) <- (8x768x-1x-1xf32, 2xi64) - pool2d_0 = paddle._C_ops.pool2d( - data_6, - full_int_array_4, - [1, 1], - [0, 0], - False, - True, - "NCHW", - "avg", - False, - True, - "EXPLICIT", - ) - - # pd_op.conv2d: (8x768x1x1xf32) <- (8x768x1x1xf32, 768x768x1x1xf32) - conv2d_0 = paddle._C_ops.conv2d( - pool2d_0, parameter_53, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_53 - - # pd_op.full_int_array: (4xi64) <- () - full_int_array_5 = [1, -1, 1, 1] - - # pd_op.reshape: (1x768x1x1xf32) <- (768xf32, 4xi64) - reshape_6 = paddle._C_ops.reshape(parameter_52, full_int_array_5) - del parameter_52 - - # pd_op.add: (8x768x1x1xf32) <- (8x768x1x1xf32, 1x768x1x1xf32) - add_0 = paddle._C_ops.add(conv2d_0, reshape_6) - - # pd_op.sigmoid: (8x768x1x1xf32) <- (8x768x1x1xf32) - sigmoid_0 = paddle._C_ops.sigmoid(add_0) - del add_0 - - # pd_op.multiply: (8x768x-1x-1xf32) <- (8x768x-1x-1xf32, 8x768x1x1xf32) - multiply_0 = paddle._C_ops.multiply(data_6, sigmoid_0) - - # pd_op.conv2d: (8x768x-1x-1xf32) <- (8x768x-1x-1xf32, 768x768x1x1xf32) - conv2d_1 = paddle._C_ops.conv2d( - multiply_0, parameter_51, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_51 - - # pd_op.batch_norm_: (8x768x-1x-1xf32, 768xf32, 768xf32, 768xf32, 768xf32, -1xui8) <- (8x768x-1x-1xf32, 768xf32, 768xf32, 768xf32, 768xf32) - ( - batch_norm__0, - batch_norm__1, - batch_norm__2, - batch_norm__3, - batch_norm__4, - batch_norm__5, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_1, - parameter_50, - parameter_49, - parameter_48, - parameter_47, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_47, parameter_48, parameter_49, parameter_50 - - # pd_op.swish: (8x768x-1x-1xf32) <- (8x768x-1x-1xf32) - swish_0 = paddle._C_ops.swish(batch_norm__0) - - # pd_op.add: (8x768x-1x-1xf32) <- (8x768x-1x-1xf32, 8x768x-1x-1xf32) - add_1 = paddle._C_ops.add(swish_0, data_6) - - # pd_op.conv2d: (8x4x-1x-1xf32) <- (8x768x-1x-1xf32, 4x768x3x3xf32) - conv2d_2 = paddle._C_ops.conv2d( - add_1, parameter_46, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_46 - - # pd_op.reshape: (1x4x1x1xf32) <- (4xf32, 4xi64) - reshape_7 = paddle._C_ops.reshape(parameter_45, full_int_array_5) - del parameter_45 - - # pd_op.add: (8x4x-1x-1xf32) <- (8x4x-1x-1xf32, 1x4x1x1xf32) - add_2 = paddle._C_ops.add(conv2d_2, reshape_7) - - # pd_op.conv2d: (8x768x1x1xf32) <- (8x768x1x1xf32, 768x768x1x1xf32) - conv2d_3 = paddle._C_ops.conv2d( - pool2d_0, parameter_44, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_44 - - # pd_op.reshape: (1x768x1x1xf32) <- (768xf32, 4xi64) - reshape_8 = paddle._C_ops.reshape(parameter_43, full_int_array_5) - del parameter_43 - - # pd_op.add: (8x768x1x1xf32) <- (8x768x1x1xf32, 1x768x1x1xf32) - add_3 = paddle._C_ops.add(conv2d_3, reshape_8) - - # pd_op.sigmoid: (8x768x1x1xf32) <- (8x768x1x1xf32) - sigmoid_1 = paddle._C_ops.sigmoid(add_3) - del add_3 - - # pd_op.multiply: (8x768x-1x-1xf32) <- (8x768x-1x-1xf32, 8x768x1x1xf32) - multiply_1 = paddle._C_ops.multiply(data_6, sigmoid_1) - del data_6 - - # pd_op.conv2d: (8x768x-1x-1xf32) <- (8x768x-1x-1xf32, 768x768x1x1xf32) - conv2d_4 = paddle._C_ops.conv2d( - multiply_1, parameter_42, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_42 - - # pd_op.batch_norm_: (8x768x-1x-1xf32, 768xf32, 768xf32, 768xf32, 768xf32, -1xui8) <- (8x768x-1x-1xf32, 768xf32, 768xf32, 768xf32, 768xf32) - ( - batch_norm__6, - batch_norm__7, - batch_norm__8, - batch_norm__9, - batch_norm__10, - batch_norm__11, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_4, - parameter_41, - parameter_40, - parameter_39, - parameter_38, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_38, parameter_39, parameter_40, parameter_41 - - # pd_op.swish: (8x768x-1x-1xf32) <- (8x768x-1x-1xf32) - swish_1 = paddle._C_ops.swish(batch_norm__6) - - # pd_op.conv2d: (8x68x-1x-1xf32) <- (8x768x-1x-1xf32, 68x768x3x3xf32) - conv2d_5 = paddle._C_ops.conv2d( - swish_1, parameter_37, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_37 - - # pd_op.reshape: (1x68x1x1xf32) <- (68xf32, 4xi64) - reshape_9 = paddle._C_ops.reshape(parameter_36, full_int_array_5) - del parameter_36 - - # pd_op.add: (8x68x-1x-1xf32) <- (8x68x-1x-1xf32, 1x68x1x1xf32) - add_4 = paddle._C_ops.add(conv2d_5, reshape_9) - - # pd_op.sigmoid: (8x4x-1x-1xf32) <- (8x4x-1x-1xf32) - sigmoid_2 = paddle._C_ops.sigmoid(add_2) - del add_2 - - # pd_op.flatten: (8x4x-1xf32) <- (8x4x-1x-1xf32) - flatten_0 = paddle._C_ops.flatten(sigmoid_2, 2, 3) - - # pd_op.transpose: (8x-1x4xf32) <- (8x4x-1xf32) - transpose_0 = paddle._C_ops.transpose(flatten_0, [0, 2, 1]) - del flatten_0 - - # pd_op.flatten: (8x68x-1xf32) <- (8x68x-1x-1xf32) - flatten_1 = paddle._C_ops.flatten(add_4, 2, 3) - - # pd_op.transpose: (8x-1x68xf32) <- (8x68x-1xf32) - transpose_1 = paddle._C_ops.transpose(flatten_1, [0, 2, 1]) - del flatten_1 - - # pd_op.pool2d: (8x384x1x1xf32) <- (8x384x-1x-1xf32, 2xi64) - pool2d_1 = paddle._C_ops.pool2d( - data_7, - full_int_array_4, - [1, 1], - [0, 0], - False, - True, - "NCHW", - "avg", - False, - True, - "EXPLICIT", - ) - - # pd_op.conv2d: (8x384x1x1xf32) <- (8x384x1x1xf32, 384x384x1x1xf32) - conv2d_6 = paddle._C_ops.conv2d( - pool2d_1, parameter_35, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_35 - - # pd_op.reshape: (1x384x1x1xf32) <- (384xf32, 4xi64) - reshape_10 = paddle._C_ops.reshape(parameter_34, full_int_array_5) - del parameter_34 - - # pd_op.add: (8x384x1x1xf32) <- (8x384x1x1xf32, 1x384x1x1xf32) - add_5 = paddle._C_ops.add(conv2d_6, reshape_10) - - # pd_op.sigmoid: (8x384x1x1xf32) <- (8x384x1x1xf32) - sigmoid_3 = paddle._C_ops.sigmoid(add_5) - del add_5 - - # pd_op.multiply: (8x384x-1x-1xf32) <- (8x384x-1x-1xf32, 8x384x1x1xf32) - multiply_2 = paddle._C_ops.multiply(data_7, sigmoid_3) - - # pd_op.conv2d: (8x384x-1x-1xf32) <- (8x384x-1x-1xf32, 384x384x1x1xf32) - conv2d_7 = paddle._C_ops.conv2d( - multiply_2, parameter_33, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_33 - - # pd_op.batch_norm_: (8x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (8x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) - ( - batch_norm__12, - batch_norm__13, - batch_norm__14, - batch_norm__15, - batch_norm__16, - batch_norm__17, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_7, - parameter_32, - parameter_31, - parameter_30, - parameter_29, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_29, parameter_30, parameter_31, parameter_32 - - # pd_op.swish: (8x384x-1x-1xf32) <- (8x384x-1x-1xf32) - swish_2 = paddle._C_ops.swish(batch_norm__12) - - # pd_op.add: (8x384x-1x-1xf32) <- (8x384x-1x-1xf32, 8x384x-1x-1xf32) - add_6 = paddle._C_ops.add(swish_2, data_7) - - # pd_op.conv2d: (8x4x-1x-1xf32) <- (8x384x-1x-1xf32, 4x384x3x3xf32) - conv2d_8 = paddle._C_ops.conv2d( - add_6, parameter_28, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_28 - - # pd_op.reshape: (1x4x1x1xf32) <- (4xf32, 4xi64) - reshape_11 = paddle._C_ops.reshape(parameter_27, full_int_array_5) - del parameter_27 - - # pd_op.add: (8x4x-1x-1xf32) <- (8x4x-1x-1xf32, 1x4x1x1xf32) - add_7 = paddle._C_ops.add(conv2d_8, reshape_11) - - # pd_op.conv2d: (8x384x1x1xf32) <- (8x384x1x1xf32, 384x384x1x1xf32) - conv2d_9 = paddle._C_ops.conv2d( - pool2d_1, parameter_26, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_26 - - # pd_op.reshape: (1x384x1x1xf32) <- (384xf32, 4xi64) - reshape_12 = paddle._C_ops.reshape(parameter_25, full_int_array_5) - del parameter_25 - - # pd_op.add: (8x384x1x1xf32) <- (8x384x1x1xf32, 1x384x1x1xf32) - add_8 = paddle._C_ops.add(conv2d_9, reshape_12) - - # pd_op.sigmoid: (8x384x1x1xf32) <- (8x384x1x1xf32) - sigmoid_4 = paddle._C_ops.sigmoid(add_8) - del add_8 - - # pd_op.multiply: (8x384x-1x-1xf32) <- (8x384x-1x-1xf32, 8x384x1x1xf32) - multiply_3 = paddle._C_ops.multiply(data_7, sigmoid_4) - del data_7 - - # pd_op.conv2d: (8x384x-1x-1xf32) <- (8x384x-1x-1xf32, 384x384x1x1xf32) - conv2d_10 = paddle._C_ops.conv2d( - multiply_3, parameter_24, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_24 - - # pd_op.batch_norm_: (8x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (8x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) - ( - batch_norm__18, - batch_norm__19, - batch_norm__20, - batch_norm__21, - batch_norm__22, - batch_norm__23, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_10, - parameter_23, - parameter_22, - parameter_21, - parameter_20, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_20, parameter_21, parameter_22, parameter_23 - - # pd_op.swish: (8x384x-1x-1xf32) <- (8x384x-1x-1xf32) - swish_3 = paddle._C_ops.swish(batch_norm__18) - - # pd_op.conv2d: (8x68x-1x-1xf32) <- (8x384x-1x-1xf32, 68x384x3x3xf32) - conv2d_11 = paddle._C_ops.conv2d( - swish_3, parameter_19, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_19 - - # pd_op.reshape: (1x68x1x1xf32) <- (68xf32, 4xi64) - reshape_13 = paddle._C_ops.reshape(parameter_18, full_int_array_5) - del parameter_18 - - # pd_op.add: (8x68x-1x-1xf32) <- (8x68x-1x-1xf32, 1x68x1x1xf32) - add_9 = paddle._C_ops.add(conv2d_11, reshape_13) - - # pd_op.sigmoid: (8x4x-1x-1xf32) <- (8x4x-1x-1xf32) - sigmoid_5 = paddle._C_ops.sigmoid(add_7) - del add_7 - - # pd_op.flatten: (8x4x-1xf32) <- (8x4x-1x-1xf32) - flatten_2 = paddle._C_ops.flatten(sigmoid_5, 2, 3) - - # pd_op.transpose: (8x-1x4xf32) <- (8x4x-1xf32) - transpose_2 = paddle._C_ops.transpose(flatten_2, [0, 2, 1]) - del flatten_2 - - # pd_op.flatten: (8x68x-1xf32) <- (8x68x-1x-1xf32) - flatten_3 = paddle._C_ops.flatten(add_9, 2, 3) - - # pd_op.transpose: (8x-1x68xf32) <- (8x68x-1xf32) - transpose_3 = paddle._C_ops.transpose(flatten_3, [0, 2, 1]) - del flatten_3 - - # pd_op.pool2d: (8x192x1x1xf32) <- (8x192x-1x-1xf32, 2xi64) - pool2d_2 = paddle._C_ops.pool2d( - data_8, - full_int_array_4, - [1, 1], - [0, 0], - False, - True, - "NCHW", - "avg", - False, - True, - "EXPLICIT", - ) - - # pd_op.conv2d: (8x192x1x1xf32) <- (8x192x1x1xf32, 192x192x1x1xf32) - conv2d_12 = paddle._C_ops.conv2d( - pool2d_2, parameter_17, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_17 - - # pd_op.reshape: (1x192x1x1xf32) <- (192xf32, 4xi64) - reshape_14 = paddle._C_ops.reshape(parameter_16, full_int_array_5) - del parameter_16 - - # pd_op.add: (8x192x1x1xf32) <- (8x192x1x1xf32, 1x192x1x1xf32) - add_10 = paddle._C_ops.add(conv2d_12, reshape_14) - - # pd_op.sigmoid: (8x192x1x1xf32) <- (8x192x1x1xf32) - sigmoid_6 = paddle._C_ops.sigmoid(add_10) - del add_10 - - # pd_op.multiply: (8x192x-1x-1xf32) <- (8x192x-1x-1xf32, 8x192x1x1xf32) - multiply_4 = paddle._C_ops.multiply(data_8, sigmoid_6) - - # pd_op.conv2d: (8x192x-1x-1xf32) <- (8x192x-1x-1xf32, 192x192x1x1xf32) - conv2d_13 = paddle._C_ops.conv2d( - multiply_4, parameter_15, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_15 - - # pd_op.batch_norm_: (8x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (8x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) - ( - batch_norm__24, - batch_norm__25, - batch_norm__26, - batch_norm__27, - batch_norm__28, - batch_norm__29, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_13, - parameter_14, - parameter_13, - parameter_12, - parameter_11, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_11, parameter_12, parameter_13, parameter_14 - - # pd_op.swish: (8x192x-1x-1xf32) <- (8x192x-1x-1xf32) - swish_4 = paddle._C_ops.swish(batch_norm__24) - - # pd_op.add: (8x192x-1x-1xf32) <- (8x192x-1x-1xf32, 8x192x-1x-1xf32) - add_11 = paddle._C_ops.add(swish_4, data_8) - - # pd_op.conv2d: (8x4x-1x-1xf32) <- (8x192x-1x-1xf32, 4x192x3x3xf32) - conv2d_14 = paddle._C_ops.conv2d( - add_11, parameter_10, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_10 - - # pd_op.reshape: (1x4x1x1xf32) <- (4xf32, 4xi64) - reshape_15 = paddle._C_ops.reshape(parameter_9, full_int_array_5) - del parameter_9 - - # pd_op.add: (8x4x-1x-1xf32) <- (8x4x-1x-1xf32, 1x4x1x1xf32) - add_12 = paddle._C_ops.add(conv2d_14, reshape_15) - - # pd_op.conv2d: (8x192x1x1xf32) <- (8x192x1x1xf32, 192x192x1x1xf32) - conv2d_15 = paddle._C_ops.conv2d( - pool2d_2, parameter_8, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_8 - - # pd_op.reshape: (1x192x1x1xf32) <- (192xf32, 4xi64) - reshape_16 = paddle._C_ops.reshape(parameter_7, full_int_array_5) - del parameter_7 - - # pd_op.add: (8x192x1x1xf32) <- (8x192x1x1xf32, 1x192x1x1xf32) - add_13 = paddle._C_ops.add(conv2d_15, reshape_16) - - # pd_op.sigmoid: (8x192x1x1xf32) <- (8x192x1x1xf32) - sigmoid_7 = paddle._C_ops.sigmoid(add_13) - del add_13 - - # pd_op.multiply: (8x192x-1x-1xf32) <- (8x192x-1x-1xf32, 8x192x1x1xf32) - multiply_5 = paddle._C_ops.multiply(data_8, sigmoid_7) - del data_8 - - # pd_op.conv2d: (8x192x-1x-1xf32) <- (8x192x-1x-1xf32, 192x192x1x1xf32) - conv2d_16 = paddle._C_ops.conv2d( - multiply_5, parameter_6, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_6 - - # pd_op.batch_norm_: (8x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (8x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) - ( - batch_norm__30, - batch_norm__31, - batch_norm__32, - batch_norm__33, - batch_norm__34, - batch_norm__35, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_16, - parameter_5, - parameter_4, - parameter_3, - parameter_2, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_2, parameter_3, parameter_4, parameter_5 - - # pd_op.swish: (8x192x-1x-1xf32) <- (8x192x-1x-1xf32) - swish_5 = paddle._C_ops.swish(batch_norm__30) - - # pd_op.conv2d: (8x68x-1x-1xf32) <- (8x192x-1x-1xf32, 68x192x3x3xf32) - conv2d_17 = paddle._C_ops.conv2d( - swish_5, parameter_1, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_1 - - # pd_op.reshape: (1x68x1x1xf32) <- (68xf32, 4xi64) - reshape_17 = paddle._C_ops.reshape(parameter_0, full_int_array_5) - del full_int_array_5, parameter_0 - - # pd_op.add: (8x68x-1x-1xf32) <- (8x68x-1x-1xf32, 1x68x1x1xf32) - add_14 = paddle._C_ops.add(conv2d_17, reshape_17) - - # pd_op.sigmoid: (8x4x-1x-1xf32) <- (8x4x-1x-1xf32) - sigmoid_8 = paddle._C_ops.sigmoid(add_12) - del add_12 - - # pd_op.flatten: (8x4x-1xf32) <- (8x4x-1x-1xf32) - flatten_4 = paddle._C_ops.flatten(sigmoid_8, 2, 3) - - # pd_op.transpose: (8x-1x4xf32) <- (8x4x-1xf32) - transpose_4 = paddle._C_ops.transpose(flatten_4, [0, 2, 1]) - del flatten_4 - - # pd_op.flatten: (8x68x-1xf32) <- (8x68x-1x-1xf32) - flatten_5 = paddle._C_ops.flatten(add_14, 2, 3) - - # pd_op.transpose: (8x-1x68xf32) <- (8x68x-1xf32) - transpose_5 = paddle._C_ops.transpose(flatten_5, [0, 2, 1]) - del flatten_5 - - # pd_op.full: (1xi32) <- () - full_8 = paddle._C_ops.full( - [1], float("1"), paddle.int32, paddle.core.CPUPlace() - ) - - # pd_op.assign: (1xi32) <- (1xi32) - assign_2 = full_8 - - # builtin.combine: ([8x-1x4xf32, 8x-1x4xf32, 8x-1x4xf32]) <- (8x-1x4xf32, 8x-1x4xf32, 8x-1x4xf32) - combine_15 = [transpose_0, transpose_2, transpose_4] - - # pd_op.concat: (8x-1x4xf32) <- ([8x-1x4xf32, 8x-1x4xf32, 8x-1x4xf32], 1xi32) - concat_0 = paddle._C_ops.concat(combine_15, full_8) - del combine_15 - - # builtin.combine: ([8x-1x68xf32, 8x-1x68xf32, 8x-1x68xf32]) <- (8x-1x68xf32, 8x-1x68xf32, 8x-1x68xf32) - combine_16 = [transpose_1, transpose_3, transpose_5] - - # pd_op.concat: (8x-1x68xf32) <- ([8x-1x68xf32, 8x-1x68xf32, 8x-1x68xf32], 1xi32) - concat_1 = paddle._C_ops.concat(combine_16, full_8) - del ( - add_1, - add_11, - add_14, - add_4, - add_6, - add_9, - assign_0, - assign_1, - assign_2, - batch_norm__0, - batch_norm__1, - batch_norm__10, - batch_norm__11, - batch_norm__12, - batch_norm__13, - batch_norm__14, - batch_norm__15, - batch_norm__16, - batch_norm__17, - batch_norm__18, - batch_norm__19, - batch_norm__2, - batch_norm__20, - batch_norm__21, - batch_norm__22, - batch_norm__23, - batch_norm__24, - batch_norm__25, - batch_norm__26, - batch_norm__27, - batch_norm__28, - batch_norm__29, - batch_norm__3, - batch_norm__30, - batch_norm__31, - batch_norm__32, - batch_norm__33, - batch_norm__34, - batch_norm__35, - batch_norm__4, - batch_norm__5, - batch_norm__6, - batch_norm__7, - batch_norm__8, - batch_norm__9, - combine_16, - conv2d_0, - conv2d_1, - conv2d_10, - conv2d_11, - conv2d_12, - conv2d_13, - conv2d_14, - conv2d_15, - conv2d_16, - conv2d_17, - conv2d_2, - conv2d_3, - conv2d_4, - conv2d_5, - conv2d_6, - conv2d_7, - conv2d_8, - conv2d_9, - full_8, - full_int_array_4, - multiply_0, - multiply_1, - multiply_2, - multiply_3, - multiply_4, - multiply_5, - pool2d_0, - pool2d_1, - pool2d_2, - reshape_10, - reshape_11, - reshape_12, - reshape_13, - reshape_14, - reshape_15, - reshape_16, - reshape_17, - reshape_6, - reshape_7, - reshape_8, - reshape_9, - sigmoid_0, - sigmoid_1, - sigmoid_2, - sigmoid_3, - sigmoid_4, - sigmoid_5, - sigmoid_6, - sigmoid_7, - sigmoid_8, - slice_0, - slice_1, - slice_2, - swish_0, - swish_1, - swish_2, - swish_3, - swish_4, - swish_5, - transpose_0, - transpose_1, - transpose_2, - transpose_3, - transpose_4, - transpose_5, - ) - - return concat_0, concat_1, concat_2, concat_3, concat_4 diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus-L/subgraph_16/graph_net.json b/paddle_samples/PaddleX/PP-YOLOE_plus-L/subgraph_16/graph_net.json deleted file mode 100644 index 32219c0fa..000000000 --- a/paddle_samples/PaddleX/PP-YOLOE_plus-L/subgraph_16/graph_net.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "framework": "paddle", - "model_name": "PP-YOLOE_plus-L", - "num_devices_required": 1, - "num_nodes_required": 1 -} \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus-L/subgraph_2/graph_hash.txt b/paddle_samples/PaddleX/PP-YOLOE_plus-L/subgraph_2/graph_hash.txt index 82d83ca0b..95c7fa710 100644 --- a/paddle_samples/PaddleX/PP-YOLOE_plus-L/subgraph_2/graph_hash.txt +++ b/paddle_samples/PaddleX/PP-YOLOE_plus-L/subgraph_2/graph_hash.txt @@ -1 +1 @@ -2e1652dec5c10dbfbb766b39e6dd1a2a376f1e03061d76cd66a79bf79d0616f3 \ No newline at end of file +0ca9776f46c68e9c47d78262b0e7a676b5b201ebb813b00dd6cdd710477953d6 \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus-L/subgraph_2/input_meta.py b/paddle_samples/PaddleX/PP-YOLOE_plus-L/subgraph_2/input_meta.py index 3e8b52976..7fd3d63e3 100644 --- a/paddle_samples/PaddleX/PP-YOLOE_plus-L/subgraph_2/input_meta.py +++ b/paddle_samples/PaddleX/PP-YOLOE_plus-L/subgraph_2/input_meta.py @@ -1,19 +1,76 @@ class Program_weight_tensor_data_0: name = "data_0" - shape = [] + shape = [8, 1, 4116] dtype = "float32" - data = [0.118063] + max_val = float("1.0") + mean = float("0.00127551") + std = float("0.0356915") + data = None class Program_weight_tensor_data_1: name = "data_1" - shape = [] - dtype = "float32" - data = [0.630255] + shape = [8, 1, 1] + dtype = "int32" + data = [0, 0, 0, 0, 0, 0, 0, 3] class Program_weight_tensor_data_2: name = "data_2" - shape = [] + shape = [8, 4116] + dtype = "float32" + max_val = float("1.0") + mean = float("0.00127551") + std = float("0.0356915") + data = None + + +class Program_weight_tensor_data_3: + name = "data_3" + shape = [8, 1, 4] + dtype = "float32" + data = [ + 35.3684, + 311.273, + 300.632, + 442.182, + 78.6168, + 241.764, + 110.41, + 266.46, + 195.413, + 193.28, + 236.373, + 224.0, + 130.415, + 227.85, + 158.792, + 248.688, + 360.901, + 338.022, + 374.268, + 347.726, + 304.951, + 76.6956, + 336.0, + 109.565, + 295.171, + 70.9146, + 350.609, + 111.437, + 18.5379, + 0.0, + 293.517, + 337.836, + ] + + +class Program_weight_tensor_data_4: + name = "data_4" + shape = [8, 4116, 4] dtype = "float32" - data = [4.07241] + min_val = float("-317.929") + max_val = float("740.992") + mean = float("224.215") + std = float("149.051") + data = None diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus-L/subgraph_2/model.py b/paddle_samples/PaddleX/PP-YOLOE_plus-L/subgraph_2/model.py index 4cccb2b8e..0619d9191 100644 --- a/paddle_samples/PaddleX/PP-YOLOE_plus-L/subgraph_2/model.py +++ b/paddle_samples/PaddleX/PP-YOLOE_plus-L/subgraph_2/model.py @@ -5,39 +5,283 @@ class GraphModule(paddle.nn.Layer): def __init__(self): super().__init__() - def forward(self, data_0, data_1, data_2): - # pd_op.full: (1xf32) <- () + def forward(self, data_0, data_1, data_2, data_3, data_4): + # pd_op.full: (1xi64) <- () full_0 = paddle._C_ops.full( - [1], float("1"), paddle.float32, paddle.core.CPUPlace() + [1], float("-2"), paddle.int64, paddle.core.CPUPlace() ) - # pd_op.scale: (xf32) <- (xf32, 1xf32) - scale_0 = paddle._C_ops.scale(data_2, full_0, float("0"), True) - del data_2 + # pd_op.argmax: (8x4116xi64) <- (8x1x4116xf32, 1xi64) + argmax_0 = paddle._C_ops.argmax(data_0, full_0, False, False, paddle.int64) + del full_0 - # pd_op.full: (1xf32) <- () + # pd_op.full: (1xf64) <- () full_1 = paddle._C_ops.full( - [1], float("2.5"), paddle.float32, paddle.core.CPUPlace() + [1], float("0"), paddle.float64, paddle.core.CPUPlace() ) - # pd_op.scale: (xf32) <- (xf32, 1xf32) - scale_1 = paddle._C_ops.scale(data_0, full_1, float("0"), True) - del data_0 + # pd_op.full: (1xf64) <- () + full_2 = paddle._C_ops.full( + [1], float("8"), paddle.float64, paddle.core.CPUPlace() + ) - # pd_op.add: (xf32) <- (xf32, xf32) - add_1 = paddle._C_ops.add(scale_0, scale_1) + # pd_op.full: (1xf64) <- () + full_3 = paddle._C_ops.full( + [1], float("1"), paddle.float64, paddle.core.CPUPlace() + ) + + # pd_op.arange: (8xi32) <- (1xf64, 1xf64, 1xf64) + arange_0 = paddle.arange(full_1, full_2, full_3, dtype="int32") + del full_1, full_2, full_3 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_0 = [-1] + + # pd_op.unsqueeze: (8x1xi32) <- (8xi32, 1xi64) + unsqueeze_0 = paddle._C_ops.unsqueeze(arange_0, full_int_array_0) + del arange_0 # pd_op.full: (1xf32) <- () - full_2 = paddle._C_ops.full( - [1], float("0.5"), paddle.float32, paddle.core.CPUPlace() + full_4 = paddle._C_ops.full( + [1], float("1"), paddle.float32, paddle.core.CPUPlace() ) - # pd_op.scale: (xf32) <- (xf32, 1xf32) - scale_2 = paddle._C_ops.scale(data_1, full_2, float("0"), True) + # pd_op.scale: (8x1xi32) <- (8x1xi32, 1xf32) + scale_0 = paddle._C_ops.scale(unsqueeze_0, full_4, float("0"), True) + del unsqueeze_0 + + # pd_op.cast: (8x1xi64) <- (8x1xi32) + cast_0 = paddle._C_ops.cast(scale_0, paddle.int64) + del scale_0 + + # pd_op.add: (8x4116xi64) <- (8x4116xi64, 8x1xi64) + add_0 = paddle._C_ops.add(argmax_0, cast_0) + del argmax_0, cast_0 + + # pd_op.flatten: (8xi32) <- (8x1x1xi32) + flatten_0 = paddle._C_ops.flatten(data_1, 0, 2) del data_1 - # pd_op.add: (xf32) <- (xf32, xf32) - add_0 = paddle._C_ops.add(add_1, scale_2) - del add_1, full_0, full_1, full_2, scale_0, scale_1, scale_2 + # pd_op.flatten: (32928xi64) <- (8x4116xi64) + flatten_1 = paddle._C_ops.flatten(add_0, 0, 1) + del add_0 + + # pd_op.full: (1xi32) <- () + full_5 = paddle._C_ops.full( + [1], float("0"), paddle.int32, paddle.core.CPUPlace() + ) + + # pd_op.gather: (32928xi32) <- (8xi32, 32928xi64, 1xi32) + gather_0 = paddle._C_ops.gather(flatten_0, flatten_1, full_5) + del flatten_0 + + # pd_op.full_int_array: (2xi64) <- () + full_int_array_1 = [8, 4116] + + # pd_op.reshape: (8x4116xi32) <- (32928xi32, 2xi64) + reshape_1 = paddle._C_ops.reshape(gather_0, full_int_array_1) + del full_int_array_1, gather_0 + + # pd_op.full: (xf32) <- () + full_6 = paddle._C_ops.full( + [], float("0"), paddle.float32, paddle.framework._current_expected_place() + ) + + # pd_op.greater_than: (8x4116xb) <- (8x4116xf32, xf32) + greater_than_0 = paddle._C_ops.greater_than(data_2, full_6) + del data_2, full_6 + + # pd_op.full: (1xf32) <- () + full_7 = paddle._C_ops.full( + [1], float("4"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.full_like: (8x4116xi32) <- (8x4116xi32, 1xf32) + full_like_0 = paddle._C_ops.full_like( + reshape_1, full_7, paddle.int32, paddle.framework._current_expected_place() + ) + del full_7 + + # pd_op.where: (8x4116xi32) <- (8x4116xb, 8x4116xi32, 8x4116xi32) + where_0 = paddle._C_ops.where(greater_than_0, reshape_1, full_like_0) + del full_like_0, greater_than_0, reshape_1 + + # pd_op.full_int_array: (2xi64) <- () + full_int_array_2 = [-1, 4] + + # pd_op.reshape: (8x4xf32) <- (8x1x4xf32, 2xi64) + reshape_2 = paddle._C_ops.reshape(data_3, full_int_array_2) + del full_int_array_2 + + # pd_op.gather: (32928x4xf32) <- (8x4xf32, 32928xi64, 1xi32) + gather_1 = paddle._C_ops.gather(reshape_2, flatten_1, full_5) + del flatten_1, full_5, reshape_2 + + # pd_op.full_int_array: (3xi64) <- () + full_int_array_3 = [8, 4116, 4] + + # pd_op.reshape: (8x4116x4xf32) <- (32928x4xf32, 3xi64) + reshape_0 = paddle._C_ops.reshape(gather_1, full_int_array_3) + del full_int_array_3, gather_1 + + # pd_op.full: (1xi32) <- () + full_8 = paddle._C_ops.full( + [1], float("5"), paddle.int32, paddle.core.CPUPlace() + ) + + # pd_op.one_hot: (8x4116x5xf32) <- (8x4116xi32, 1xi32) + one_hot_0 = paddle._C_ops.one_hot( + where_0 % paddle.cast(full_8, where_0.dtype), full_8 + ) + del full_8 + + # pd_op.full: (4xi64) <- () + full_9 = paddle._C_ops.full( + [4], float("0"), paddle.int64, paddle.framework._current_expected_place() + ) + + # pd_op.assign_value_: (4xi64) <- (4xi64) + assign_value__0 = paddle._C_ops.assign_value_( + full_9, + [4], + paddle.int64, + [float("0"), float("1"), float("2"), float("3")], + paddle.framework._current_expected_place(), + ) + del full_9 + + # pd_op.index_select: (8x4116x4xf32) <- (8x4116x5xf32, 4xi64) + index_select_0 = paddle._C_ops.index_select(one_hot_0, assign_value__0, -1) + del assign_value__0, one_hot_0 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_4 = [2] + + # pd_op.unsqueeze: (8x1x1x4xf32) <- (8x1x4xf32, 1xi64) + unsqueeze_1 = paddle._C_ops.unsqueeze(data_3, full_int_array_4) + del data_3 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_5 = [1] + + # pd_op.unsqueeze: (8x1x4116x4xf32) <- (8x4116x4xf32, 1xi64) + unsqueeze_2 = paddle._C_ops.unsqueeze(data_4, full_int_array_5) + del data_4, full_int_array_5 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_6 = [0] + + # pd_op.slice: (8x1x1x2xf32) <- (8x1x1x4xf32, 1xi64, 1xi64) + slice_0 = paddle._C_ops.slice( + unsqueeze_1, [3], full_int_array_6, full_int_array_4, [1], [] + ) + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_7 = [2147483647] + + # pd_op.slice: (8x1x1x2xf32) <- (8x1x1x4xf32, 1xi64, 1xi64) + slice_1 = paddle._C_ops.slice( + unsqueeze_1, [3], full_int_array_4, full_int_array_7, [1], [] + ) + del unsqueeze_1 + + # pd_op.slice: (8x1x4116x2xf32) <- (8x1x4116x4xf32, 1xi64, 1xi64) + slice_2 = paddle._C_ops.slice( + unsqueeze_2, [3], full_int_array_6, full_int_array_4, [1], [] + ) + del full_int_array_6 + + # pd_op.slice: (8x1x4116x2xf32) <- (8x1x4116x4xf32, 1xi64, 1xi64) + slice_3 = paddle._C_ops.slice( + unsqueeze_2, [3], full_int_array_4, full_int_array_7, [1], [] + ) + del full_int_array_4, full_int_array_7, unsqueeze_2 + + # pd_op.maximum: (8x1x4116x2xf32) <- (8x1x1x2xf32, 8x1x4116x2xf32) + maximum_0 = paddle._C_ops.maximum(slice_0, slice_2) + + # pd_op.minimum: (8x1x4116x2xf32) <- (8x1x1x2xf32, 8x1x4116x2xf32) + minimum_0 = paddle._C_ops.minimum(slice_1, slice_3) + + # pd_op.subtract: (8x1x4116x2xf32) <- (8x1x4116x2xf32, 8x1x4116x2xf32) + subtract_0 = paddle._C_ops.subtract(minimum_0, maximum_0) + del maximum_0, minimum_0 + + # pd_op.full: (1xf32) <- () + full_10 = paddle._C_ops.full( + [1], float("0"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.full: (1xf32) <- () + full_11 = paddle._C_ops.full( + [1], float("3.40282e+38"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.clip: (8x1x4116x2xf32) <- (8x1x4116x2xf32, 1xf32, 1xf32) + clip_0 = paddle._C_ops.clip(subtract_0, full_10, full_11) + del subtract_0 + + # pd_op.prod: (8x1x4116xf32) <- (8x1x4116x2xf32, 1xi64) + prod_0 = paddle._C_ops.prod(clip_0, full_int_array_0, False, False) + del clip_0 + + # pd_op.subtract: (8x1x1x2xf32) <- (8x1x1x2xf32, 8x1x1x2xf32) + subtract_1 = paddle._C_ops.subtract(slice_1, slice_0) + del slice_0, slice_1 + + # pd_op.clip: (8x1x1x2xf32) <- (8x1x1x2xf32, 1xf32, 1xf32) + clip_1 = paddle._C_ops.clip(subtract_1, full_10, full_11) + del subtract_1 + + # pd_op.prod: (8x1x1xf32) <- (8x1x1x2xf32, 1xi64) + prod_1 = paddle._C_ops.prod(clip_1, full_int_array_0, False, False) + del clip_1 + + # pd_op.subtract: (8x1x4116x2xf32) <- (8x1x4116x2xf32, 8x1x4116x2xf32) + subtract_2 = paddle._C_ops.subtract(slice_3, slice_2) + del slice_2, slice_3 + + # pd_op.clip: (8x1x4116x2xf32) <- (8x1x4116x2xf32, 1xf32, 1xf32) + clip_2 = paddle._C_ops.clip(subtract_2, full_10, full_11) + del full_10, full_11, subtract_2 + + # pd_op.prod: (8x1x4116xf32) <- (8x1x4116x2xf32, 1xi64) + prod_2 = paddle._C_ops.prod(clip_2, full_int_array_0, False, False) + del clip_2 + + # pd_op.add: (8x1x4116xf32) <- (8x1x1xf32, 8x1x4116xf32) + add_1 = paddle._C_ops.add(prod_1, prod_2) + del prod_1, prod_2 + + # pd_op.subtract: (8x1x4116xf32) <- (8x1x4116xf32, 8x1x4116xf32) + subtract_3 = paddle._C_ops.subtract(add_1, prod_0) + del add_1 + + # pd_op.scale: (8x1x4116xf32) <- (8x1x4116xf32, 1xf32) + scale_1 = paddle._C_ops.scale(subtract_3, full_4, float("1e-09"), True) + del full_4, subtract_3 + + # pd_op.divide: (8x1x4116xf32) <- (8x1x4116xf32, 8x1x4116xf32) + divide_0 = paddle._C_ops.divide(prod_0, scale_1) + del prod_0, scale_1 + + # pd_op.multiply: (8x1x4116xf32) <- (8x1x4116xf32, 8x1x4116xf32) + multiply_1 = paddle._C_ops.multiply(divide_0, data_0) + del data_0, divide_0 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_8 = [-2] + + # pd_op.max: (8x4116xf32) <- (8x1x4116xf32, 1xi64) + max_0 = paddle._C_ops.max(multiply_1, full_int_array_8, False) + del full_int_array_8, multiply_1 + + # pd_op.unsqueeze: (8x4116x1xf32) <- (8x4116xf32, 1xi64) + unsqueeze_3 = paddle._C_ops.unsqueeze(max_0, full_int_array_0) + del full_int_array_0, max_0 + + # pd_op.multiply: (8x4116x4xf32) <- (8x4116x4xf32, 8x4116x1xf32) + multiply_0 = paddle._C_ops.multiply(index_select_0, unsqueeze_3) + del index_select_0, unsqueeze_3, where_0 - return add_0 + return reshape_0, multiply_0 diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus-L/subgraph_3/graph_hash.txt b/paddle_samples/PaddleX/PP-YOLOE_plus-L/subgraph_3/graph_hash.txt index 27bd82e0e..51f7ea2f5 100644 --- a/paddle_samples/PaddleX/PP-YOLOE_plus-L/subgraph_3/graph_hash.txt +++ b/paddle_samples/PaddleX/PP-YOLOE_plus-L/subgraph_3/graph_hash.txt @@ -1 +1 @@ -8d319f5ec2a187a0cbeb6b629bf54f2df054e934514d4a5042fa9c577597355f \ No newline at end of file +15daaa1fb802e63eb7201d9224130b2d687280387657cf94c22b712a5ddfde9c \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus-L/subgraph_3/input_meta.py b/paddle_samples/PaddleX/PP-YOLOE_plus-L/subgraph_3/input_meta.py index d6af4f831..3981bf949 100644 --- a/paddle_samples/PaddleX/PP-YOLOE_plus-L/subgraph_3/input_meta.py +++ b/paddle_samples/PaddleX/PP-YOLOE_plus-L/subgraph_3/input_meta.py @@ -1,67 +1,49 @@ class Program_weight_tensor_data_0: name = "data_0" - shape = [8, 4116] - dtype = "bool" - min_val = 0 - max_val = 2 + shape = [4116, 4] + dtype = "float32" + min_val = float("-64.0") + max_val = float("512.0") + mean = float("224.0") + std = float("132.768") data = None class Program_weight_tensor_data_1: name = "data_1" - shape = [8, 4116, 4] - dtype = "float32" - min_val = float("-9.93527") - max_val = float("63.9265") - mean = float("24.3499") - std = float("17.1828") - data = None - - -class Program_weight_tensor_data_2: - name = "data_2" - shape = [8, 4116, 4] + shape = [8, 1, 4] dtype = "float32" - max_val = float("55.2727") - mean = float("24.1964") - std = float("14.7865") - data = None - - -class Program_weight_tensor_data_3: - name = "data_3" - shape = [8, 4116, 4] - dtype = "float32" - max_val = float("0.947339") - mean = float("0.000280391") - std = float("0.0157366") - data = None - - -class Program_weight_tensor_data_4: - name = "data_4" - shape = [] - dtype = "float32" - data = [36.9308] - - -class Program_weight_tensor_data_5: - name = "data_5" - shape = [8, 4116, 68] - dtype = "float32" - min_val = float("-6.52177") - max_val = float("13.8275") - mean = float("2.55256e-05") - std = float("1.49822") - data = None - - -class Program_weight_tensor_data_6: - name = "data_6" - shape = [4116, 2] - dtype = "float32" - min_val = float("0.5") - max_val = float("55.5") - mean = float("24.3333") - std = float("16.0356") - data = None + data = [ + 35.3684, + 311.273, + 300.632, + 442.182, + 78.6168, + 241.764, + 110.41, + 266.46, + 195.413, + 193.28, + 236.373, + 224.0, + 130.415, + 227.85, + 158.792, + 248.688, + 360.901, + 338.022, + 374.268, + 347.726, + 304.951, + 76.6956, + 336.0, + 109.565, + 295.171, + 70.9146, + 350.609, + 111.437, + 18.5379, + 0.0, + 293.517, + 337.836, + ] diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus-L/subgraph_3/model.py b/paddle_samples/PaddleX/PP-YOLOE_plus-L/subgraph_3/model.py index 04dca1210..218186811 100644 --- a/paddle_samples/PaddleX/PP-YOLOE_plus-L/subgraph_3/model.py +++ b/paddle_samples/PaddleX/PP-YOLOE_plus-L/subgraph_3/model.py @@ -5,505 +5,259 @@ class GraphModule(paddle.nn.Layer): def __init__(self): super().__init__() - def forward(self, data_0, data_1, data_2, data_3, data_4, data_5, data_6): - # pd_op.cast: (8x4116xi32) <- (8x4116xb) - cast_0 = paddle._C_ops.cast(data_0, paddle.int32) - - # pd_op.full_int_array: (1xi64) <- () - full_int_array_0 = [-1] - - # pd_op.assign: (1xi64) <- (1xi64) - assign_0 = full_int_array_0 - - # pd_op.assign: (1xi64) <- (1xi64) - assign_1 = full_int_array_0 - - # pd_op.assign: (1xi64) <- (1xi64) - assign_2 = full_int_array_0 - - # pd_op.unsqueeze: (8x4116x1xi32) <- (8x4116xi32, 1xi64) - unsqueeze_0 = paddle._C_ops.unsqueeze(cast_0, full_int_array_0) - del cast_0 - - # pd_op.full_int_array: (3xi64) <- () - full_int_array_1 = [1, 1, 4] - - # pd_op.tile: (8x4116x4xi32) <- (8x4116x1xi32, 3xi64) - tile_0 = paddle._C_ops.tile(unsqueeze_0, full_int_array_1) - del full_int_array_1, unsqueeze_0 - - # pd_op.cast: (8x4116x4xb) <- (8x4116x4xi32) - cast_1 = paddle._C_ops.cast(tile_0, paddle.bool) - del tile_0 - - # pd_op.masked_select: (-1xf32) <- (8x4116x4xf32, 8x4116x4xb) - masked_select_0 = paddle._C_ops.masked_select(data_1, cast_1) - del data_1 - + def forward(self, data_0, data_1): # pd_op.full_int_array: (2xi64) <- () - full_int_array_2 = [-1, 4] - - # pd_op.reshape: (-1x4xf32) <- (-1xf32, 2xi64) - reshape_0 = paddle._C_ops.reshape(masked_select_0, full_int_array_2) - - # pd_op.masked_select: (-1xf32) <- (8x4116x4xf32, 8x4116x4xb) - masked_select_1 = paddle._C_ops.masked_select(data_2, cast_1) - - # pd_op.reshape: (-1x4xf32) <- (-1xf32, 2xi64) - reshape_1 = paddle._C_ops.reshape(masked_select_1, full_int_array_2) - del masked_select_1 + full_int_array_0 = [-1, 4] - # pd_op.sum: (8x4116xf32) <- (8x4116x4xf32, 1xi64) - sum_0 = paddle._C_ops.sum(data_3, full_int_array_0, None, False) - del data_3 + # pd_op.reshape: (8x4xf32) <- (8x1x4xf32, 2xi64) + reshape_2 = paddle._C_ops.reshape(data_1, full_int_array_0) + del data_1, full_int_array_0 - # pd_op.masked_select: (-1xf32) <- (8x4116xf32, 8x4116xb) - masked_select_2 = paddle._C_ops.masked_select(sum_0, data_0) - del sum_0 + # pd_op.full_int_array: (1xi64) <- () + full_int_array_1 = [1] - # pd_op.unsqueeze: (-1x1xf32) <- (-1xf32, 1xi64) - unsqueeze_1 = paddle._C_ops.unsqueeze(masked_select_2, full_int_array_0) - del masked_select_2 + # pd_op.unsqueeze: (8x1x4xf32) <- (8x4xf32, 1xi64) + unsqueeze_0 = paddle._C_ops.unsqueeze(reshape_2, full_int_array_1) - # pd_op.subtract: (-1x4xf32) <- (-1x4xf32, -1x4xf32) - subtract_0 = paddle._C_ops.subtract(reshape_0, reshape_1) + # pd_op.full_int_array: (1xi64) <- () + full_int_array_2 = [0] - # pd_op.abs: (-1x4xf32) <- (-1x4xf32) - abs_0 = paddle._C_ops.abs(subtract_0) + # pd_op.unsqueeze: (1x4116x4xf32) <- (4116x4xf32, 1xi64) + unsqueeze_1 = paddle._C_ops.unsqueeze(data_0, full_int_array_2) - # pd_op.mean_all: (xf32) <- (-1x4xf32) - mean_all_0 = paddle._C_ops.mean_all(abs_0) + # pd_op.full_int_array: (1xi64) <- () + full_int_array_3 = [2] - # pd_op.full: (1xi32) <- () - full_0 = paddle._C_ops.full( - [1], float("1"), paddle.int32, paddle.core.CPUPlace() + # pd_op.slice: (8x1x2xf32) <- (8x1x4xf32, 1xi64, 1xi64) + slice_0 = paddle._C_ops.slice( + unsqueeze_0, [2], full_int_array_2, full_int_array_3, [1], [] ) - # pd_op.split_with_num: ([-1x1xf32, -1x1xf32, -1x1xf32, -1x1xf32]) <- (-1x4xf32, 1xi32) - split_with_num_0 = paddle._C_ops.split_with_num(reshape_0, 4, full_0) - - # builtin.split: (-1x1xf32, -1x1xf32, -1x1xf32, -1x1xf32) <- ([-1x1xf32, -1x1xf32, -1x1xf32, -1x1xf32]) - ( - split_0, - split_1, - split_2, - split_3, - ) = split_with_num_0 - del split_with_num_0 - - # pd_op.split_with_num: ([-1x1xf32, -1x1xf32, -1x1xf32, -1x1xf32]) <- (-1x4xf32, 1xi32) - split_with_num_1 = paddle._C_ops.split_with_num(reshape_1, 4, full_0) + # pd_op.full_int_array: (1xi64) <- () + full_int_array_4 = [2147483647] - # builtin.split: (-1x1xf32, -1x1xf32, -1x1xf32, -1x1xf32) <- ([-1x1xf32, -1x1xf32, -1x1xf32, -1x1xf32]) - ( - split_4, - split_5, - split_6, - split_7, - ) = split_with_num_1 - del split_with_num_1 + # pd_op.slice: (8x1x2xf32) <- (8x1x4xf32, 1xi64, 1xi64) + slice_1 = paddle._C_ops.slice( + unsqueeze_0, [2], full_int_array_3, full_int_array_4, [1], [] + ) + del unsqueeze_0 - # pd_op.maximum: (-1x1xf32) <- (-1x1xf32, -1x1xf32) - maximum_0 = paddle._C_ops.maximum(split_0, split_4) + # pd_op.slice: (1x4116x2xf32) <- (1x4116x4xf32, 1xi64, 1xi64) + slice_2 = paddle._C_ops.slice( + unsqueeze_1, [2], full_int_array_2, full_int_array_3, [1], [] + ) - # pd_op.maximum: (-1x1xf32) <- (-1x1xf32, -1x1xf32) - maximum_1 = paddle._C_ops.maximum(split_1, split_5) + # pd_op.slice: (1x4116x2xf32) <- (1x4116x4xf32, 1xi64, 1xi64) + slice_3 = paddle._C_ops.slice( + unsqueeze_1, [2], full_int_array_3, full_int_array_4, [1], [] + ) + del full_int_array_4, unsqueeze_1 - # pd_op.minimum: (-1x1xf32) <- (-1x1xf32, -1x1xf32) - minimum_0 = paddle._C_ops.minimum(split_2, split_6) + # pd_op.maximum: (8x4116x2xf32) <- (8x1x2xf32, 1x4116x2xf32) + maximum_0 = paddle._C_ops.maximum(slice_0, slice_2) - # pd_op.minimum: (-1x1xf32) <- (-1x1xf32, -1x1xf32) - minimum_1 = paddle._C_ops.minimum(split_3, split_7) + # pd_op.minimum: (8x4116x2xf32) <- (8x1x2xf32, 1x4116x2xf32) + minimum_0 = paddle._C_ops.minimum(slice_1, slice_3) - # pd_op.subtract: (-1x1xf32) <- (-1x1xf32, -1x1xf32) - subtract_1 = paddle._C_ops.subtract(minimum_0, maximum_0) + # pd_op.subtract: (8x4116x2xf32) <- (8x4116x2xf32, 8x4116x2xf32) + subtract_0 = paddle._C_ops.subtract(minimum_0, maximum_0) + del maximum_0, minimum_0 # pd_op.full: (1xf32) <- () - full_1 = paddle._C_ops.full( + full_0 = paddle._C_ops.full( [1], float("0"), paddle.float32, paddle.core.CPUPlace() ) - # pd_op.assign: (1xf32) <- (1xf32) - assign_3 = full_1 - # pd_op.full: (1xf32) <- () - full_2 = paddle._C_ops.full( + full_1 = paddle._C_ops.full( [1], float("3.40282e+38"), paddle.float32, paddle.core.CPUPlace() ) - # pd_op.assign: (1xf32) <- (1xf32) - assign_4 = full_2 + # pd_op.clip: (8x4116x2xf32) <- (8x4116x2xf32, 1xf32, 1xf32) + clip_0 = paddle._C_ops.clip(subtract_0, full_0, full_1) + del subtract_0 - # pd_op.clip: (-1x1xf32) <- (-1x1xf32, 1xf32, 1xf32) - clip_0 = paddle._C_ops.clip(subtract_1, full_1, full_2) - - # pd_op.subtract: (-1x1xf32) <- (-1x1xf32, -1x1xf32) - subtract_2 = paddle._C_ops.subtract(minimum_1, maximum_1) - - # pd_op.clip: (-1x1xf32) <- (-1x1xf32, 1xf32, 1xf32) - clip_1 = paddle._C_ops.clip(subtract_2, full_1, full_2) + # pd_op.full_int_array: (1xi64) <- () + full_int_array_5 = [-1] - # pd_op.multiply: (-1x1xf32) <- (-1x1xf32, -1x1xf32) - multiply_0 = paddle._C_ops.multiply(clip_0, clip_1) + # pd_op.prod: (8x4116xf32) <- (8x4116x2xf32, 1xi64) + prod_0 = paddle._C_ops.prod(clip_0, full_int_array_5, False, False) + del clip_0 - # pd_op.subtract: (-1x1xf32) <- (-1x1xf32, -1x1xf32) - subtract_3 = paddle._C_ops.subtract(split_2, split_0) + # pd_op.subtract: (8x1x2xf32) <- (8x1x2xf32, 8x1x2xf32) + subtract_1 = paddle._C_ops.subtract(slice_1, slice_0) + del slice_0, slice_1 - # pd_op.subtract: (-1x1xf32) <- (-1x1xf32, -1x1xf32) - subtract_4 = paddle._C_ops.subtract(split_3, split_1) + # pd_op.clip: (8x1x2xf32) <- (8x1x2xf32, 1xf32, 1xf32) + clip_1 = paddle._C_ops.clip(subtract_1, full_0, full_1) + del subtract_1 - # pd_op.multiply: (-1x1xf32) <- (-1x1xf32, -1x1xf32) - multiply_1 = paddle._C_ops.multiply(subtract_3, subtract_4) + # pd_op.prod: (8x1xf32) <- (8x1x2xf32, 1xi64) + prod_1 = paddle._C_ops.prod(clip_1, full_int_array_5, False, False) + del clip_1 - # pd_op.subtract: (-1x1xf32) <- (-1x1xf32, -1x1xf32) - subtract_5 = paddle._C_ops.subtract(split_6, split_4) + # pd_op.subtract: (1x4116x2xf32) <- (1x4116x2xf32, 1x4116x2xf32) + subtract_2 = paddle._C_ops.subtract(slice_3, slice_2) + del slice_2, slice_3 - # pd_op.subtract: (-1x1xf32) <- (-1x1xf32, -1x1xf32) - subtract_6 = paddle._C_ops.subtract(split_7, split_5) + # pd_op.clip: (1x4116x2xf32) <- (1x4116x2xf32, 1xf32, 1xf32) + clip_2 = paddle._C_ops.clip(subtract_2, full_0, full_1) + del full_0, full_1, subtract_2 - # pd_op.multiply: (-1x1xf32) <- (-1x1xf32, -1x1xf32) - multiply_2 = paddle._C_ops.multiply(subtract_5, subtract_6) - del subtract_5, subtract_6 + # pd_op.prod: (1x4116xf32) <- (1x4116x2xf32, 1xi64) + prod_2 = paddle._C_ops.prod(clip_2, full_int_array_5, False, False) + del clip_2, full_int_array_5 - # pd_op.add: (-1x1xf32) <- (-1x1xf32, -1x1xf32) - add_0 = paddle._C_ops.add(multiply_1, multiply_2) + # pd_op.add: (8x4116xf32) <- (8x1xf32, 1x4116xf32) + add_0 = paddle._C_ops.add(prod_1, prod_2) + del prod_1, prod_2 - # pd_op.subtract: (-1x1xf32) <- (-1x1xf32, -1x1xf32) - subtract_7 = paddle._C_ops.subtract(add_0, multiply_0) + # pd_op.subtract: (8x4116xf32) <- (8x4116xf32, 8x4116xf32) + subtract_3 = paddle._C_ops.subtract(add_0, prod_0) + del add_0 # pd_op.full: (1xf32) <- () - full_3 = paddle._C_ops.full( + full_2 = paddle._C_ops.full( [1], float("1"), paddle.float32, paddle.core.CPUPlace() ) - # pd_op.assign: (1xf32) <- (1xf32) - assign_5 = full_3 - - # pd_op.assign: (1xf32) <- (1xf32) - assign_6 = full_3 - - # pd_op.scale: (-1x1xf32) <- (-1x1xf32, 1xf32) - scale_0 = paddle._C_ops.scale(subtract_7, full_3, float("1e-10"), True) - del subtract_7 - - # pd_op.divide: (-1x1xf32) <- (-1x1xf32, -1x1xf32) - divide_2 = paddle._C_ops.divide(multiply_0, scale_0) - - # pd_op.minimum: (-1x1xf32) <- (-1x1xf32, -1x1xf32) - minimum_2 = paddle._C_ops.minimum(split_0, split_4) + # pd_op.scale: (8x4116xf32) <- (8x4116xf32, 1xf32) + scale_0 = paddle._C_ops.scale(subtract_3, full_2, float("1e-10"), True) + del full_2, subtract_3 - # pd_op.minimum: (-1x1xf32) <- (-1x1xf32, -1x1xf32) - minimum_3 = paddle._C_ops.minimum(split_1, split_5) + # pd_op.divide: (8x4116xf32) <- (8x4116xf32, 8x4116xf32) + divide_0 = paddle._C_ops.divide(prod_0, scale_0) + del prod_0, scale_0 - # pd_op.maximum: (-1x1xf32) <- (-1x1xf32, -1x1xf32) - maximum_2 = paddle._C_ops.maximum(split_2, split_6) - - # pd_op.maximum: (-1x1xf32) <- (-1x1xf32, -1x1xf32) - maximum_3 = paddle._C_ops.maximum(split_3, split_7) - - # pd_op.subtract: (-1x1xf32) <- (-1x1xf32, -1x1xf32) - subtract_8 = paddle._C_ops.subtract(maximum_2, minimum_2) - - # pd_op.subtract: (-1x1xf32) <- (-1x1xf32, -1x1xf32) - subtract_9 = paddle._C_ops.subtract(maximum_3, minimum_3) + # pd_op.full_int_array: (3xi64) <- () + full_int_array_6 = [8, -1, 4116] - # pd_op.multiply: (-1x1xf32) <- (-1x1xf32, -1x1xf32) - multiply_3 = paddle._C_ops.multiply(subtract_8, subtract_9) + # pd_op.reshape: (8x1x4116xf32) <- (8x4116xf32, 3xi64) + reshape_1 = paddle._C_ops.reshape(divide_0, full_int_array_6) + del divide_0 - # pd_op.scale: (-1x1xf32) <- (-1x1xf32, 1xf32) - scale_1 = paddle._C_ops.scale(multiply_3, full_3, float("1e-10"), True) - del multiply_3 + # pd_op.slice: (8xf32) <- (8x4xf32, 1xi64, 1xi64) + slice_4 = paddle._C_ops.slice( + reshape_2, [1], full_int_array_2, full_int_array_1, [1], [1] + ) - # pd_op.subtract: (-1x1xf32) <- (-1x1xf32, -1x1xf32) - subtract_10 = paddle._C_ops.subtract(scale_1, scale_0) + # pd_op.full_int_array: (1xi64) <- () + full_int_array_7 = [3] - # pd_op.divide: (-1x1xf32) <- (-1x1xf32, -1x1xf32) - divide_3 = paddle._C_ops.divide(subtract_10, scale_1) + # pd_op.slice: (8xf32) <- (8x4xf32, 1xi64, 1xi64) + slice_5 = paddle._C_ops.slice( + reshape_2, [1], full_int_array_3, full_int_array_7, [1], [1] + ) - # pd_op.subtract: (-1x1xf32) <- (-1x1xf32, -1x1xf32) - subtract_11 = paddle._C_ops.subtract(divide_2, divide_3) + # pd_op.add: (8xf32) <- (8xf32, 8xf32) + add_1 = paddle._C_ops.add(slice_4, slice_5) + del slice_4, slice_5 # pd_op.full: (1xf32) <- () - full_4 = paddle._C_ops.full( - [1], float("-1"), paddle.float32, paddle.core.CPUPlace() + full_3 = paddle._C_ops.full( + [1], float("0.5"), paddle.float32, paddle.core.CPUPlace() ) - # pd_op.scale: (-1x1xf32) <- (-1x1xf32, 1xf32) - scale_2 = paddle._C_ops.scale(subtract_11, full_4, float("1"), True) - del subtract_11 - - # pd_op.scale: (-1x1xf32) <- (-1x1xf32, 1xf32) - scale_3 = paddle._C_ops.scale(scale_2, full_3, float("0"), True) - del scale_2 - - # pd_op.multiply: (-1x1xf32) <- (-1x1xf32, -1x1xf32) - multiply_4 = paddle._C_ops.multiply(scale_3, unsqueeze_1) + # pd_op.scale: (8xf32) <- (8xf32, 1xf32) + scale_1 = paddle._C_ops.scale(add_1, full_3, float("0"), True) + del add_1 - # pd_op.full_int_array: (0xi64) <- () - full_int_array_3 = [] - - # pd_op.assign: (0xi64) <- (0xi64) - assign_7 = full_int_array_3 - - # pd_op.sum: (xf32) <- (-1x1xf32, 0xi64) - sum_1 = paddle._C_ops.sum(multiply_4, full_int_array_3, None, False) - - # pd_op.divide: (xf32) <- (xf32, xf32) - divide_0 = paddle._C_ops.divide(sum_1, data_4) - - # pd_op.unsqueeze: (8x4116x1xb) <- (8x4116xb, 1xi64) - unsqueeze_2 = paddle._C_ops.unsqueeze(data_0, full_int_array_0) - del data_0 + # pd_op.slice: (8xf32) <- (8x4xf32, 1xi64, 1xi64) + slice_6 = paddle._C_ops.slice( + reshape_2, [1], full_int_array_1, full_int_array_3, [1], [1] + ) - # pd_op.cast: (8x4116x1xi32) <- (8x4116x1xb) - cast_2 = paddle._C_ops.cast(unsqueeze_2, paddle.int32) - del unsqueeze_2 + # pd_op.full_int_array: (1xi64) <- () + full_int_array_8 = [4] - # pd_op.full_int_array: (3xi64) <- () - full_int_array_4 = [1, 1, 68] + # pd_op.slice: (8xf32) <- (8x4xf32, 1xi64, 1xi64) + slice_7 = paddle._C_ops.slice( + reshape_2, [1], full_int_array_7, full_int_array_8, [1], [1] + ) + del reshape_2 - # pd_op.tile: (8x4116x68xi32) <- (8x4116x1xi32, 3xi64) - tile_1 = paddle._C_ops.tile(cast_2, full_int_array_4) - del cast_2, full_int_array_4 + # pd_op.add: (8xf32) <- (8xf32, 8xf32) + add_2 = paddle._C_ops.add(slice_6, slice_7) + del slice_6, slice_7 - # pd_op.cast: (8x4116x68xb) <- (8x4116x68xi32) - cast_3 = paddle._C_ops.cast(tile_1, paddle.bool) - del tile_1 + # pd_op.scale: (8xf32) <- (8xf32, 1xf32) + scale_2 = paddle._C_ops.scale(add_2, full_3, float("0"), True) + del add_2 - # pd_op.masked_select: (-1xf32) <- (8x4116x68xf32, 8x4116x68xb) - masked_select_3 = paddle._C_ops.masked_select(data_5, cast_3) - del data_5 + # builtin.combine: ([8xf32, 8xf32]) <- (8xf32, 8xf32) + combine_0 = [scale_1, scale_2] + del scale_1, scale_2 - # pd_op.full_int_array: (3xi64) <- () - full_int_array_5 = [-1, 4, 17] + # pd_op.stack: (8x2xf32) <- ([8xf32, 8xf32]) + stack_0 = paddle._C_ops.stack(combine_0, -1) + del combine_0 - # pd_op.reshape: (-1x4x17xf32) <- (-1xf32, 3xi64) - reshape_2 = paddle._C_ops.reshape(masked_select_3, full_int_array_5) - del full_int_array_5 + # pd_op.unsqueeze: (8x1x2xf32) <- (8x2xf32, 1xi64) + unsqueeze_2 = paddle._C_ops.unsqueeze(stack_0, full_int_array_1) + del stack_0 - # pd_op.full: (1xi32) <- () - full_5 = paddle._C_ops.full( - [1], float("2"), paddle.int32, paddle.core.CPUPlace() + # pd_op.slice: (4116xf32) <- (4116x4xf32, 1xi64, 1xi64) + slice_8 = paddle._C_ops.slice( + data_0, [1], full_int_array_2, full_int_array_1, [1], [1] ) - # pd_op.split_with_num: ([8x4116x2xf32, 8x4116x2xf32]) <- (8x4116x4xf32, 1xi32) - split_with_num_2 = paddle._C_ops.split_with_num(data_2, 2, full_5) - del data_2, full_5 - - # builtin.split: (8x4116x2xf32, 8x4116x2xf32) <- ([8x4116x2xf32, 8x4116x2xf32]) - ( - split_8, - split_9, - ) = split_with_num_2 - del split_with_num_2 - - # pd_op.subtract: (8x4116x2xf32) <- (4116x2xf32, 8x4116x2xf32) - subtract_12 = paddle._C_ops.subtract(data_6, split_8) - del split_8 - - # pd_op.subtract: (8x4116x2xf32) <- (8x4116x2xf32, 4116x2xf32) - subtract_13 = paddle._C_ops.subtract(split_9, data_6) - del data_6, split_9 - - # pd_op.full: (1xi32) <- () - full_6 = paddle._C_ops.full( - [1], float("-1"), paddle.int32, paddle.core.CPUPlace() + # pd_op.slice: (4116xf32) <- (4116x4xf32, 1xi64, 1xi64) + slice_9 = paddle._C_ops.slice( + data_0, [1], full_int_array_3, full_int_array_7, [1], [1] ) - # builtin.combine: ([8x4116x2xf32, 8x4116x2xf32]) <- (8x4116x2xf32, 8x4116x2xf32) - combine_0 = [subtract_12, subtract_13] - del subtract_12, subtract_13 + # pd_op.add: (4116xf32) <- (4116xf32, 4116xf32) + add_3 = paddle._C_ops.add(slice_8, slice_9) + del slice_8, slice_9 - # pd_op.concat: (8x4116x4xf32) <- ([8x4116x2xf32, 8x4116x2xf32], 1xi32) - concat_0 = paddle._C_ops.concat(combine_0, full_6) - del combine_0, full_6 + # pd_op.scale: (4116xf32) <- (4116xf32, 1xf32) + scale_3 = paddle._C_ops.scale(add_3, full_3, float("0"), True) + del add_3 - # pd_op.full: (1xf32) <- () - full_7 = paddle._C_ops.full( - [1], float("15.99"), paddle.float32, paddle.core.CPUPlace() + # pd_op.slice: (4116xf32) <- (4116x4xf32, 1xi64, 1xi64) + slice_10 = paddle._C_ops.slice( + data_0, [1], full_int_array_1, full_int_array_3, [1], [1] ) + del full_int_array_1, full_int_array_3 - # pd_op.clip: (8x4116x4xf32) <- (8x4116x4xf32, 1xf32, 1xf32) - clip_2 = paddle._C_ops.clip(concat_0, full_1, full_7) - del concat_0, full_7 - - # pd_op.masked_select: (-1xf32) <- (8x4116x4xf32, 8x4116x4xb) - masked_select_4 = paddle._C_ops.masked_select(clip_2, cast_1) - del clip_2 - - # pd_op.reshape: (-1x4xf32) <- (-1xf32, 2xi64) - reshape_3 = paddle._C_ops.reshape(masked_select_4, full_int_array_2) - del full_int_array_2, masked_select_4 - - # pd_op.floor: (-1x4xf32) <- (-1x4xf32) - floor_0 = paddle._C_ops.floor(reshape_3) - - # pd_op.cast: (-1x4xi64) <- (-1x4xf32) - cast_4 = paddle._C_ops.cast(floor_0, paddle.int64) - del floor_0 - - # pd_op.scale: (-1x4xi64) <- (-1x4xi64, 1xf32) - scale_4 = paddle._C_ops.scale(cast_4, full_3, float("1"), True) + # pd_op.slice: (4116xf32) <- (4116x4xf32, 1xi64, 1xi64) + slice_11 = paddle._C_ops.slice( + data_0, [1], full_int_array_7, full_int_array_8, [1], [1] + ) + del data_0, full_int_array_7, full_int_array_8 - # pd_op.cast: (-1x4xf32) <- (-1x4xi64) - cast_5 = paddle._C_ops.cast(scale_4, paddle.float32) + # pd_op.add: (4116xf32) <- (4116xf32, 4116xf32) + add_4 = paddle._C_ops.add(slice_10, slice_11) + del slice_10, slice_11 - # pd_op.subtract: (-1x4xf32) <- (-1x4xf32, -1x4xf32) - subtract_14 = paddle._C_ops.subtract(cast_5, reshape_3) - del cast_5, reshape_3 + # pd_op.scale: (4116xf32) <- (4116xf32, 1xf32) + scale_4 = paddle._C_ops.scale(add_4, full_3, float("0"), True) + del add_4, full_3 - # pd_op.scale: (-1x4xf32) <- (-1x4xf32, 1xf32) - scale_5 = paddle._C_ops.scale(subtract_14, full_4, float("1"), True) + # builtin.combine: ([4116xf32, 4116xf32]) <- (4116xf32, 4116xf32) + combine_1 = [scale_3, scale_4] + del scale_3, scale_4 - # pd_op.scale: (-1x4xi64) <- (-1x4xi64, 1xf32) - scale_6 = paddle._C_ops.scale(cast_4, full_3, float("0"), True) - del cast_4 + # pd_op.stack: (4116x2xf32) <- ([4116xf32, 4116xf32]) + stack_1 = paddle._C_ops.stack(combine_1, -1) + del combine_1 - # pd_op.unsqueeze: (-1x4x1xi64) <- (-1x4xi64, 1xi64) - unsqueeze_3 = paddle._C_ops.unsqueeze(scale_6, full_int_array_0) - del scale_6 + # pd_op.unsqueeze: (1x4116x2xf32) <- (4116x2xf32, 1xi64) + unsqueeze_3 = paddle._C_ops.unsqueeze(stack_1, full_int_array_2) + del full_int_array_2 - # pd_op.cross_entropy_with_softmax: (-1x4x17xf32, -1x4x1xf32) <- (-1x4x17xf32, -1x4x1xi64) - cross_entropy_with_softmax_0, cross_entropy_with_softmax_2 = ( - lambda x, f: f(x) - )( - paddle._C_ops.cross_entropy_with_softmax( - reshape_2, unsqueeze_3, False, True, True, -100, -1 - ), - lambda out: out if isinstance(out, (list, tuple)) else (out, None), - ) + # pd_op.subtract: (8x4116x2xf32) <- (8x1x2xf32, 1x4116x2xf32) + subtract_4 = paddle._C_ops.subtract(unsqueeze_2, unsqueeze_3) + del unsqueeze_2, unsqueeze_3 - # pd_op.squeeze: (-1x4xf32) <- (-1x4x1xf32, 1xi64) - squeeze_0 = paddle._C_ops.squeeze( - cross_entropy_with_softmax_2, full_int_array_0 + # pd_op.p_norm: (8x4116xf32) <- (8x4116x2xf32) + p_norm_0 = paddle._C_ops.p_norm( + subtract_4, float("2"), -1, float("1e-12"), False, False ) + del subtract_4 - # pd_op.multiply: (-1x4xf32) <- (-1x4xf32, -1x4xf32) - multiply_5 = paddle._C_ops.multiply(squeeze_0, subtract_14) - - # pd_op.scale: (-1x4xi64) <- (-1x4xi64, 1xf32) - scale_7 = paddle._C_ops.scale(scale_4, full_3, float("0"), True) - del scale_4 - - # pd_op.unsqueeze: (-1x4x1xi64) <- (-1x4xi64, 1xi64) - unsqueeze_4 = paddle._C_ops.unsqueeze(scale_7, full_int_array_0) - del scale_7 - - # pd_op.cross_entropy_with_softmax: (-1x4x17xf32, -1x4x1xf32) <- (-1x4x17xf32, -1x4x1xi64) - cross_entropy_with_softmax_1, cross_entropy_with_softmax_3 = ( - lambda x, f: f(x) - )( - paddle._C_ops.cross_entropy_with_softmax( - reshape_2, unsqueeze_4, False, True, True, -100, -1 - ), - lambda out: out if isinstance(out, (list, tuple)) else (out, None), - ) - del reshape_2 + # pd_op.reshape: (8x1x4116xf32) <- (8x4116xf32, 3xi64) + reshape_0 = paddle._C_ops.reshape(p_norm_0, full_int_array_6) + del full_int_array_6, p_norm_0, stack_1 - # pd_op.squeeze: (-1x4xf32) <- (-1x4x1xf32, 1xi64) - squeeze_1 = paddle._C_ops.squeeze( - cross_entropy_with_softmax_3, full_int_array_0 - ) - - # pd_op.multiply: (-1x4xf32) <- (-1x4xf32, -1x4xf32) - multiply_6 = paddle._C_ops.multiply(squeeze_1, scale_5) - - # pd_op.add: (-1x4xf32) <- (-1x4xf32, -1x4xf32) - add_1 = paddle._C_ops.add(multiply_5, multiply_6) - - # pd_op.mean: (-1x1xf32) <- (-1x4xf32, 1xi64) - mean_0 = paddle._C_ops.mean(add_1, full_int_array_0, True) - del full_int_array_0 - - # pd_op.multiply: (-1x1xf32) <- (-1x1xf32, -1x1xf32) - multiply_7 = paddle._C_ops.multiply(mean_0, unsqueeze_1) - - # pd_op.sum: (xf32) <- (-1x1xf32, 0xi64) - sum_2 = paddle._C_ops.sum(multiply_7, full_int_array_3, None, False) - - # pd_op.divide: (xf32) <- (xf32, xf32) - divide_1 = paddle._C_ops.divide(sum_2, data_4) - del ( - abs_0, - add_0, - add_1, - assign_0, - assign_1, - assign_2, - assign_3, - assign_4, - assign_5, - assign_6, - assign_7, - cast_1, - cast_3, - clip_0, - clip_1, - cross_entropy_with_softmax_2, - cross_entropy_with_softmax_3, - data_4, - divide_2, - divide_3, - full_0, - full_1, - full_2, - full_3, - full_4, - full_int_array_3, - masked_select_0, - masked_select_3, - maximum_0, - maximum_1, - maximum_2, - maximum_3, - mean_0, - minimum_0, - minimum_1, - minimum_2, - minimum_3, - multiply_0, - multiply_1, - multiply_2, - multiply_4, - multiply_5, - multiply_6, - multiply_7, - reshape_0, - reshape_1, - scale_0, - scale_1, - scale_3, - scale_5, - split_0, - split_1, - split_2, - split_3, - split_4, - split_5, - split_6, - split_7, - squeeze_0, - squeeze_1, - subtract_0, - subtract_1, - subtract_10, - subtract_14, - subtract_2, - subtract_3, - subtract_4, - subtract_8, - subtract_9, - sum_1, - sum_2, - unsqueeze_1, - unsqueeze_3, - unsqueeze_4, - ) - - return ( - cross_entropy_with_softmax_0, - cross_entropy_with_softmax_1, - mean_all_0, - divide_0, - divide_1, - ) + return reshape_0, reshape_1 diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus-L/subgraph_16/graph_hash.txt b/paddle_samples/PaddleX/PP-YOLOE_plus-L/subgraph_4/graph_hash.txt similarity index 100% rename from paddle_samples/PaddleX/PP-YOLOE_plus-L/subgraph_16/graph_hash.txt rename to paddle_samples/PaddleX/PP-YOLOE_plus-L/subgraph_4/graph_hash.txt diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus-L/subgraph_10/graph_net.json b/paddle_samples/PaddleX/PP-YOLOE_plus-L/subgraph_4/graph_net.json similarity index 100% rename from paddle_samples/PaddleX/PP-YOLOE_plus-L/subgraph_10/graph_net.json rename to paddle_samples/PaddleX/PP-YOLOE_plus-L/subgraph_4/graph_net.json diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus-L/subgraph_16/input_meta.py b/paddle_samples/PaddleX/PP-YOLOE_plus-L/subgraph_4/input_meta.py similarity index 100% rename from paddle_samples/PaddleX/PP-YOLOE_plus-L/subgraph_16/input_meta.py rename to paddle_samples/PaddleX/PP-YOLOE_plus-L/subgraph_4/input_meta.py diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus-L/subgraph_16/model.py b/paddle_samples/PaddleX/PP-YOLOE_plus-L/subgraph_4/model.py similarity index 100% rename from paddle_samples/PaddleX/PP-YOLOE_plus-L/subgraph_16/model.py rename to paddle_samples/PaddleX/PP-YOLOE_plus-L/subgraph_4/model.py diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus-L/subgraph_16/weight_meta.py b/paddle_samples/PaddleX/PP-YOLOE_plus-L/subgraph_4/weight_meta.py similarity index 100% rename from paddle_samples/PaddleX/PP-YOLOE_plus-L/subgraph_16/weight_meta.py rename to paddle_samples/PaddleX/PP-YOLOE_plus-L/subgraph_4/weight_meta.py diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus-L/subgraph_5/graph_hash.txt b/paddle_samples/PaddleX/PP-YOLOE_plus-L/subgraph_5/graph_hash.txt index 04a94e06c..efa879b36 100644 --- a/paddle_samples/PaddleX/PP-YOLOE_plus-L/subgraph_5/graph_hash.txt +++ b/paddle_samples/PaddleX/PP-YOLOE_plus-L/subgraph_5/graph_hash.txt @@ -1 +1 @@ -bad9511933297116e3f564919a85809c5c29647d857a25c4a19f4a67e2c0b51c \ No newline at end of file +e98172b16f1b0a5022e17341958c7348d3c38f47fbbd435acea6cb34167725f0 \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus-L/subgraph_5/input_meta.py b/paddle_samples/PaddleX/PP-YOLOE_plus-L/subgraph_5/input_meta.py index 09671b32f..e7cef1f03 100644 --- a/paddle_samples/PaddleX/PP-YOLOE_plus-L/subgraph_5/input_meta.py +++ b/paddle_samples/PaddleX/PP-YOLOE_plus-L/subgraph_5/input_meta.py @@ -1,134 +1,31 @@ class Program_weight_tensor_data_0: name = "data_0" - shape = [1] + shape = [8, 768, 14, 14] dtype = "float32" - data = [1.00241] + min_val = float("-0.278465") + max_val = float("8.94092") + mean = float("0.271046") + std = float("0.622828") + data = None class Program_weight_tensor_data_1: name = "data_1" - shape = [1] + shape = [8, 384, 28, 28] dtype = "float32" - data = [1.00237] + min_val = float("-0.278465") + max_val = float("11.2275") + mean = float("0.375277") + std = float("0.700531") + data = None class Program_weight_tensor_data_2: name = "data_2" - shape = [1] - dtype = "float32" - data = [1.00237] - - -class Program_weight_tensor_data_3: - name = "data_3" - shape = [1] - dtype = "float32" - data = [1.00237] - - -class Program_weight_tensor_data_4: - name = "data_4" - shape = [1] - dtype = "float32" - data = [1.00237] - - -class Program_weight_tensor_data_5: - name = "data_5" - shape = [1] - dtype = "float32" - data = [1.00237] - - -class Program_weight_tensor_data_6: - name = "data_6" - shape = [1] - dtype = "float32" - data = [1.00236] - - -class Program_weight_tensor_data_7: - name = "data_7" - shape = [1] - dtype = "float32" - data = [1.00237] - - -class Program_weight_tensor_data_8: - name = "data_8" - shape = [1] - dtype = "float32" - data = [1.00237] - - -class Program_weight_tensor_data_9: - name = "data_9" - shape = [1] - dtype = "float32" - data = [1.00237] - - -class Program_weight_tensor_data_10: - name = "data_10" - shape = [1] - dtype = "float32" - data = [1.00237] - - -class Program_weight_tensor_data_11: - name = "data_11" - shape = [1] - dtype = "float32" - data = [1.00237] - - -class Program_weight_tensor_data_12: - name = "data_12" - shape = [1] - dtype = "float32" - data = [1.00237] - - -class Program_weight_tensor_data_13: - name = "data_13" - shape = [1] - dtype = "float32" - data = [1.00237] - - -class Program_weight_tensor_data_14: - name = "data_14" - shape = [1] - dtype = "float32" - data = [1.00237] - - -class Program_weight_tensor_data_15: - name = "data_15" - shape = [1] - dtype = "float32" - data = [1.00237] - - -class Program_weight_tensor_data_16: - name = "data_16" - shape = [1] - dtype = "float32" - data = [1.00237] - - -class Program_weight_tensor_data_17: - name = "data_17" - shape = [1] - dtype = "float32" - data = [1.00237] - - -class Program_weight_tensor_data_18: - name = "data_18" - shape = [2, 3, 640, 640] + shape = [8, 192, 56, 56] dtype = "float32" - max_val = float("1.0") - mean = float("0.471598") - std = float("0.270715") + min_val = float("-0.278465") + max_val = float("12.1434") + mean = float("0.487172") + std = float("0.770854") data = None diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus-L/subgraph_5/model.py b/paddle_samples/PaddleX/PP-YOLOE_plus-L/subgraph_5/model.py index 1eb1d0609..f2c6c745a 100644 --- a/paddle_samples/PaddleX/PP-YOLOE_plus-L/subgraph_5/model.py +++ b/paddle_samples/PaddleX/PP-YOLOE_plus-L/subgraph_5/model.py @@ -61,6489 +61,328 @@ def forward( parameter_51, parameter_52, parameter_53, - parameter_54, - parameter_55, - parameter_56, - parameter_57, - parameter_58, - parameter_59, - parameter_60, - parameter_61, - parameter_62, - parameter_63, - parameter_64, - parameter_65, - parameter_66, - parameter_67, - parameter_68, - parameter_69, - parameter_70, - parameter_71, - parameter_72, - parameter_73, - parameter_74, - parameter_75, - parameter_76, - parameter_77, - parameter_78, - parameter_79, - parameter_80, - parameter_81, - parameter_82, - parameter_83, - parameter_84, - parameter_85, - parameter_86, - parameter_87, - parameter_88, - parameter_89, - parameter_90, - parameter_91, - parameter_92, - parameter_93, - parameter_94, - parameter_95, - parameter_96, - parameter_97, - parameter_98, - parameter_99, - parameter_100, - parameter_101, - parameter_102, - parameter_103, - parameter_104, - parameter_105, - parameter_106, - parameter_107, - parameter_108, - parameter_109, - parameter_110, - parameter_111, - parameter_112, - parameter_113, - parameter_114, - parameter_115, - parameter_116, - parameter_117, - parameter_118, - parameter_119, - parameter_120, - parameter_121, - parameter_122, - parameter_123, - parameter_124, - parameter_125, - parameter_126, - parameter_127, - parameter_128, - parameter_129, - parameter_130, - parameter_131, - parameter_132, - parameter_133, - parameter_134, - parameter_135, - parameter_136, - parameter_137, - parameter_138, - parameter_139, - parameter_140, - parameter_141, - parameter_142, - parameter_143, - parameter_144, - parameter_145, - parameter_146, - parameter_147, - parameter_148, - parameter_149, - parameter_150, - parameter_151, - parameter_152, - parameter_153, - parameter_154, - parameter_155, - parameter_156, - parameter_157, - parameter_158, - parameter_159, - parameter_160, - parameter_161, - parameter_162, - parameter_163, - parameter_164, - parameter_165, - parameter_166, - parameter_167, - parameter_168, - parameter_169, - parameter_170, - parameter_171, - parameter_172, - parameter_173, - parameter_174, - parameter_175, - parameter_176, - parameter_177, - parameter_178, - parameter_179, - parameter_180, - parameter_181, - parameter_182, - parameter_183, - parameter_184, - parameter_185, - parameter_186, - parameter_187, - parameter_188, - parameter_189, - parameter_190, - parameter_191, - parameter_192, - parameter_193, - parameter_194, - parameter_195, - parameter_196, - parameter_197, - parameter_198, - parameter_199, - parameter_200, - parameter_201, - parameter_202, - parameter_203, - parameter_204, - parameter_205, - parameter_206, - parameter_207, - parameter_208, - parameter_209, - parameter_210, - parameter_211, - parameter_212, - parameter_213, - parameter_214, - parameter_215, - parameter_216, - parameter_217, - parameter_218, - parameter_219, - parameter_220, - parameter_221, - parameter_222, - parameter_223, - parameter_224, - parameter_225, - parameter_226, - parameter_227, - parameter_228, - parameter_229, - parameter_230, - parameter_231, - parameter_232, - parameter_233, - parameter_234, - parameter_235, - parameter_236, - parameter_237, - parameter_238, - parameter_239, - parameter_240, - parameter_241, - parameter_242, - parameter_243, - parameter_244, - parameter_245, - parameter_246, - parameter_247, - parameter_248, - parameter_249, - parameter_250, - parameter_251, - parameter_252, - parameter_253, - parameter_254, - parameter_255, - parameter_256, - parameter_257, - parameter_258, - parameter_259, - parameter_260, - parameter_261, - parameter_262, - parameter_263, - parameter_264, - parameter_265, - parameter_266, - parameter_267, - parameter_268, - parameter_269, - parameter_270, - parameter_271, - parameter_272, - parameter_273, - parameter_274, - parameter_275, - parameter_276, - parameter_277, - parameter_278, - parameter_279, - parameter_280, - parameter_281, - parameter_282, - parameter_283, - parameter_284, - parameter_285, - parameter_286, - parameter_287, - parameter_288, - parameter_289, - parameter_290, - parameter_291, - parameter_292, - parameter_293, - parameter_294, - parameter_295, - parameter_296, - parameter_297, - parameter_298, - parameter_299, - parameter_300, - parameter_301, - parameter_302, - parameter_303, - parameter_304, - parameter_305, - parameter_306, - parameter_307, - parameter_308, - parameter_309, - parameter_310, - parameter_311, - parameter_312, - parameter_313, - parameter_314, - parameter_315, - parameter_316, - parameter_317, - parameter_318, - parameter_319, - parameter_320, - parameter_321, - parameter_322, - parameter_323, - parameter_324, - parameter_325, - parameter_326, - parameter_327, - parameter_328, - parameter_329, - parameter_330, - parameter_331, - parameter_332, - parameter_333, - parameter_334, - parameter_335, - parameter_336, - parameter_337, - parameter_338, - parameter_339, - parameter_340, - parameter_341, - parameter_342, - parameter_343, - parameter_344, - parameter_345, - parameter_346, - parameter_347, - parameter_348, - parameter_349, - parameter_350, - parameter_351, - parameter_352, - parameter_353, - parameter_354, - parameter_355, - parameter_356, - parameter_357, - parameter_358, - parameter_359, - parameter_360, - parameter_361, - parameter_362, - parameter_363, - parameter_364, - parameter_365, - parameter_366, - parameter_367, - parameter_368, - parameter_369, - parameter_370, - parameter_371, - parameter_372, - parameter_373, - parameter_374, - parameter_375, - parameter_376, - parameter_377, - parameter_378, - parameter_379, - parameter_380, - parameter_381, - parameter_382, - parameter_383, - parameter_384, - parameter_385, - parameter_386, - parameter_387, - parameter_388, - parameter_389, - parameter_390, - parameter_391, - parameter_392, - parameter_393, - parameter_394, - parameter_395, - parameter_396, - parameter_397, - parameter_398, - parameter_399, - parameter_400, - parameter_401, - parameter_402, - parameter_403, - parameter_404, - parameter_405, - parameter_406, - parameter_407, - parameter_408, - parameter_409, - parameter_410, - parameter_411, - parameter_412, - parameter_413, - parameter_414, - parameter_415, - parameter_416, - parameter_417, - parameter_418, - parameter_419, - parameter_420, - parameter_421, - parameter_422, - parameter_423, - parameter_424, - parameter_425, - parameter_426, - parameter_427, - parameter_428, - parameter_429, - parameter_430, - parameter_431, - parameter_432, - parameter_433, - parameter_434, - parameter_435, - parameter_436, - parameter_437, - parameter_438, - parameter_439, - parameter_440, - parameter_441, - parameter_442, - parameter_443, - parameter_444, - parameter_445, - parameter_446, - parameter_447, - parameter_448, - parameter_449, - parameter_450, - parameter_451, - parameter_452, - parameter_453, - parameter_454, - parameter_455, - parameter_456, - parameter_457, - parameter_458, - parameter_459, - parameter_460, - parameter_461, - parameter_462, - parameter_463, - parameter_464, - parameter_465, - parameter_466, - parameter_467, - parameter_468, - parameter_469, - parameter_470, - parameter_471, - parameter_472, - parameter_473, - parameter_474, - parameter_475, - parameter_476, - parameter_477, - parameter_478, - parameter_479, - parameter_480, - parameter_481, - parameter_482, - parameter_483, - parameter_484, - parameter_485, - parameter_486, - parameter_487, - parameter_488, - parameter_489, - parameter_490, - parameter_491, - parameter_492, - parameter_493, - parameter_494, - parameter_495, - parameter_496, - parameter_497, - parameter_498, - parameter_499, - parameter_500, - parameter_501, - parameter_502, - parameter_503, - parameter_504, - parameter_505, - parameter_506, - parameter_507, - parameter_508, - parameter_509, - parameter_510, - parameter_511, - parameter_512, - parameter_513, - parameter_514, - parameter_515, - parameter_516, - parameter_517, - parameter_518, - parameter_519, - parameter_520, - parameter_521, - parameter_522, - parameter_523, - parameter_524, - parameter_525, - parameter_526, - parameter_527, - parameter_528, - parameter_529, - parameter_530, - parameter_531, - parameter_532, - parameter_533, - parameter_534, - parameter_535, - parameter_536, - parameter_537, - parameter_538, - parameter_539, - parameter_540, - parameter_541, - parameter_542, - parameter_543, - parameter_544, - parameter_545, - parameter_546, - parameter_547, - parameter_548, - parameter_549, - parameter_550, - parameter_551, - parameter_552, - parameter_553, - parameter_554, - parameter_555, - parameter_556, - parameter_557, - parameter_558, - parameter_559, - parameter_560, - parameter_561, - parameter_562, - parameter_563, - parameter_564, - parameter_565, - parameter_566, - parameter_567, - parameter_568, - parameter_569, - parameter_570, - parameter_571, - parameter_572, - parameter_573, - parameter_574, - parameter_575, - parameter_576, - parameter_577, - parameter_578, - parameter_579, - parameter_580, - parameter_581, - parameter_582, - parameter_583, - parameter_584, - parameter_585, - parameter_586, - parameter_587, - parameter_588, - parameter_589, - parameter_590, - parameter_591, - parameter_592, - parameter_593, - parameter_594, - parameter_595, - parameter_596, - parameter_597, - parameter_598, - parameter_599, - parameter_600, - parameter_601, - parameter_602, - parameter_603, - parameter_604, - parameter_605, - parameter_606, - parameter_607, - parameter_608, - parameter_609, - parameter_610, - parameter_611, - parameter_612, - parameter_613, - parameter_614, - parameter_615, - parameter_616, - parameter_617, - parameter_618, - parameter_619, - parameter_620, - parameter_621, - parameter_622, - parameter_623, - parameter_624, - parameter_625, - parameter_626, - parameter_627, - parameter_628, - parameter_629, - parameter_630, - parameter_631, - parameter_632, - parameter_633, - parameter_634, - parameter_635, - parameter_636, - parameter_637, - parameter_638, - parameter_639, - parameter_640, - parameter_641, - parameter_642, - parameter_643, - parameter_644, - parameter_645, - parameter_646, - parameter_647, - parameter_648, - parameter_649, - parameter_650, - parameter_651, - parameter_652, - parameter_653, - parameter_654, - parameter_655, - parameter_656, - parameter_657, - parameter_658, - parameter_659, - parameter_660, - parameter_661, - parameter_662, - parameter_663, - parameter_664, - parameter_665, - parameter_666, - parameter_667, - parameter_668, - parameter_669, - parameter_670, - parameter_671, - parameter_672, - parameter_673, - parameter_674, - parameter_675, - parameter_676, - parameter_677, - parameter_678, - parameter_679, - parameter_680, - parameter_681, - parameter_682, - parameter_683, - parameter_684, - parameter_685, - parameter_686, - parameter_687, - parameter_688, - parameter_689, - parameter_690, - parameter_691, - parameter_692, - parameter_693, - parameter_694, - parameter_695, - parameter_696, - parameter_697, - parameter_698, - parameter_699, - parameter_700, - parameter_701, - parameter_702, - parameter_703, - parameter_704, - parameter_705, - parameter_706, - parameter_707, - parameter_708, - parameter_709, - parameter_710, - parameter_711, - parameter_712, - parameter_713, - parameter_714, - parameter_715, - parameter_716, - parameter_717, - parameter_718, - parameter_719, - parameter_720, - parameter_721, - parameter_722, - parameter_723, - parameter_724, - parameter_725, - parameter_726, - parameter_727, - parameter_728, - parameter_729, - parameter_730, - parameter_731, - parameter_732, - parameter_733, - parameter_734, - parameter_735, - parameter_736, - parameter_737, - parameter_738, - parameter_739, - parameter_740, - parameter_741, - parameter_742, - parameter_743, - parameter_744, - parameter_745, - parameter_746, - parameter_747, - parameter_748, - parameter_749, - parameter_750, - parameter_751, - parameter_752, data_0, data_1, data_2, - data_3, - data_4, - data_5, - data_6, - data_7, - data_8, - data_9, - data_10, - data_11, - data_12, - data_13, - data_14, - data_15, - data_16, - data_17, - data_18, ): - # pd_op.conv2d: (-1x32x-1x-1xf32) <- (-1x3x-1x-1xf32, 32x3x3x3xf32) - conv2d_0 = paddle._C_ops.conv2d( - data_18, parameter_752, [2, 2], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del data_18, parameter_752 - - # pd_op.batch_norm_: (-1x32x-1x-1xf32, 32xf32, 32xf32, 32xf32, 32xf32, -1xui8) <- (-1x32x-1x-1xf32, 32xf32, 32xf32, 32xf32, 32xf32) - ( - batch_norm__0, - batch_norm__1, - batch_norm__2, - batch_norm__3, - batch_norm__4, - batch_norm__5, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_0, - parameter_751, - parameter_750, - parameter_749, - parameter_748, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_0, parameter_748, parameter_749, parameter_750, parameter_751 - - # pd_op.swish: (-1x32x-1x-1xf32) <- (-1x32x-1x-1xf32) - swish_0 = paddle._C_ops.swish(batch_norm__0) - del batch_norm__0 - - # pd_op.conv2d: (-1x32x-1x-1xf32) <- (-1x32x-1x-1xf32, 32x32x3x3xf32) - conv2d_1 = paddle._C_ops.conv2d( - swish_0, parameter_747, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_747, swish_0 - - # pd_op.batch_norm_: (-1x32x-1x-1xf32, 32xf32, 32xf32, 32xf32, 32xf32, -1xui8) <- (-1x32x-1x-1xf32, 32xf32, 32xf32, 32xf32, 32xf32) - ( - batch_norm__6, - batch_norm__7, - batch_norm__8, - batch_norm__9, - batch_norm__10, - batch_norm__11, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_1, - parameter_746, - parameter_745, - parameter_744, - parameter_743, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_1, parameter_743, parameter_744, parameter_745, parameter_746 - - # pd_op.swish: (-1x32x-1x-1xf32) <- (-1x32x-1x-1xf32) - swish_1 = paddle._C_ops.swish(batch_norm__6) - del batch_norm__6 - - # pd_op.conv2d: (-1x64x-1x-1xf32) <- (-1x32x-1x-1xf32, 64x32x3x3xf32) - conv2d_2 = paddle._C_ops.conv2d( - swish_1, parameter_742, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_742, swish_1 - - # pd_op.batch_norm_: (-1x64x-1x-1xf32, 64xf32, 64xf32, 64xf32, 64xf32, -1xui8) <- (-1x64x-1x-1xf32, 64xf32, 64xf32, 64xf32, 64xf32) - ( - batch_norm__12, - batch_norm__13, - batch_norm__14, - batch_norm__15, - batch_norm__16, - batch_norm__17, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_2, - parameter_741, - parameter_740, - parameter_739, - parameter_738, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_2, parameter_738, parameter_739, parameter_740, parameter_741 - - # pd_op.swish: (-1x64x-1x-1xf32) <- (-1x64x-1x-1xf32) - swish_2 = paddle._C_ops.swish(batch_norm__12) - del batch_norm__12 - - # pd_op.conv2d: (-1x96x-1x-1xf32) <- (-1x64x-1x-1xf32, 96x64x3x3xf32) - conv2d_3 = paddle._C_ops.conv2d( - swish_2, parameter_737, [2, 2], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_737, swish_2 - - # pd_op.batch_norm_: (-1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (-1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) - ( - batch_norm__18, - batch_norm__19, - batch_norm__20, - batch_norm__21, - batch_norm__22, - batch_norm__23, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_3, - parameter_736, - parameter_735, - parameter_734, - parameter_733, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_3, parameter_733, parameter_734, parameter_735, parameter_736 - - # pd_op.swish: (-1x96x-1x-1xf32) <- (-1x96x-1x-1xf32) - swish_3 = paddle._C_ops.swish(batch_norm__18) - del batch_norm__18 - - # pd_op.conv2d: (-1x48x-1x-1xf32) <- (-1x96x-1x-1xf32, 48x96x1x1xf32) - conv2d_4 = paddle._C_ops.conv2d( - swish_3, parameter_732, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_732 - - # pd_op.batch_norm_: (-1x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32, -1xui8) <- (-1x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32) - ( - batch_norm__24, - batch_norm__25, - batch_norm__26, - batch_norm__27, - batch_norm__28, - batch_norm__29, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_4, - parameter_731, - parameter_730, - parameter_729, - parameter_728, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_4, parameter_728, parameter_729, parameter_730, parameter_731 - - # pd_op.swish: (-1x48x-1x-1xf32) <- (-1x48x-1x-1xf32) - swish_4 = paddle._C_ops.swish(batch_norm__24) - del batch_norm__24 - - # pd_op.conv2d: (-1x48x-1x-1xf32) <- (-1x96x-1x-1xf32, 48x96x1x1xf32) - conv2d_5 = paddle._C_ops.conv2d( - swish_3, parameter_727, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_727, swish_3 - - # pd_op.batch_norm_: (-1x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32, -1xui8) <- (-1x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32) - ( - batch_norm__30, - batch_norm__31, - batch_norm__32, - batch_norm__33, - batch_norm__34, - batch_norm__35, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_5, - parameter_726, - parameter_725, - parameter_724, - parameter_723, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_5, parameter_723, parameter_724, parameter_725, parameter_726 - - # pd_op.swish: (-1x48x-1x-1xf32) <- (-1x48x-1x-1xf32) - swish_5 = paddle._C_ops.swish(batch_norm__30) - del batch_norm__30 - - # pd_op.conv2d: (-1x48x-1x-1xf32) <- (-1x48x-1x-1xf32, 48x48x3x3xf32) - conv2d_6 = paddle._C_ops.conv2d( - swish_5, parameter_722, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_722 - - # pd_op.batch_norm_: (-1x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32, -1xui8) <- (-1x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32) - ( - batch_norm__36, - batch_norm__37, - batch_norm__38, - batch_norm__39, - batch_norm__40, - batch_norm__41, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_6, - parameter_721, - parameter_720, - parameter_719, - parameter_718, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_6, parameter_718, parameter_719, parameter_720, parameter_721 - - # pd_op.swish: (-1x48x-1x-1xf32) <- (-1x48x-1x-1xf32) - swish_6 = paddle._C_ops.swish(batch_norm__36) - del batch_norm__36 - - # pd_op.conv2d: (-1x48x-1x-1xf32) <- (-1x48x-1x-1xf32, 48x48x3x3xf32) - conv2d_7 = paddle._C_ops.conv2d( - swish_6, parameter_717, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_717 - - # pd_op.batch_norm_: (-1x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32, -1xui8) <- (-1x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32) - ( - batch_norm__42, - batch_norm__43, - batch_norm__44, - batch_norm__45, - batch_norm__46, - batch_norm__47, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_7, - parameter_716, - parameter_715, - parameter_714, - parameter_713, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_7, parameter_713, parameter_714, parameter_715, parameter_716 - - # pd_op.conv2d: (-1x48x-1x-1xf32) <- (-1x48x-1x-1xf32, 48x48x1x1xf32) - conv2d_8 = paddle._C_ops.conv2d( - swish_6, parameter_712, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_712, swish_6 - - # pd_op.batch_norm_: (-1x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32, -1xui8) <- (-1x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32) - ( - batch_norm__48, - batch_norm__49, - batch_norm__50, - batch_norm__51, - batch_norm__52, - batch_norm__53, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_8, - parameter_711, - parameter_710, - parameter_709, - parameter_708, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_8, parameter_708, parameter_709, parameter_710, parameter_711 - - # pd_op.multiply: (-1x48x-1x-1xf32) <- (1xf32, -1x48x-1x-1xf32) - multiply_0 = paddle._C_ops.multiply(data_0, batch_norm__48) - del batch_norm__48, data_0 - - # pd_op.add: (-1x48x-1x-1xf32) <- (-1x48x-1x-1xf32, -1x48x-1x-1xf32) - add_0 = paddle._C_ops.add(batch_norm__42, multiply_0) - del batch_norm__42, multiply_0 - - # pd_op.swish: (-1x48x-1x-1xf32) <- (-1x48x-1x-1xf32) - swish_7 = paddle._C_ops.swish(add_0) - del add_0 - - # pd_op.add: (-1x48x-1x-1xf32) <- (-1x48x-1x-1xf32, -1x48x-1x-1xf32) - add_1 = paddle._C_ops.add(swish_5, swish_7) - del swish_5, swish_7 - - # pd_op.conv2d: (-1x48x-1x-1xf32) <- (-1x48x-1x-1xf32, 48x48x3x3xf32) - conv2d_9 = paddle._C_ops.conv2d( - add_1, parameter_707, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_707 - - # pd_op.batch_norm_: (-1x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32, -1xui8) <- (-1x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32) - ( - batch_norm__54, - batch_norm__55, - batch_norm__56, - batch_norm__57, - batch_norm__58, - batch_norm__59, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_9, - parameter_706, - parameter_705, - parameter_704, - parameter_703, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_9, parameter_703, parameter_704, parameter_705, parameter_706 - - # pd_op.swish: (-1x48x-1x-1xf32) <- (-1x48x-1x-1xf32) - swish_8 = paddle._C_ops.swish(batch_norm__54) - del batch_norm__54 - - # pd_op.conv2d: (-1x48x-1x-1xf32) <- (-1x48x-1x-1xf32, 48x48x3x3xf32) - conv2d_10 = paddle._C_ops.conv2d( - swish_8, parameter_702, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_702 - - # pd_op.batch_norm_: (-1x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32, -1xui8) <- (-1x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32) - ( - batch_norm__60, - batch_norm__61, - batch_norm__62, - batch_norm__63, - batch_norm__64, - batch_norm__65, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_10, - parameter_701, - parameter_700, - parameter_699, - parameter_698, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_10, parameter_698, parameter_699, parameter_700, parameter_701 - - # pd_op.conv2d: (-1x48x-1x-1xf32) <- (-1x48x-1x-1xf32, 48x48x1x1xf32) - conv2d_11 = paddle._C_ops.conv2d( - swish_8, parameter_697, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_697, swish_8 - - # pd_op.batch_norm_: (-1x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32, -1xui8) <- (-1x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32) - ( - batch_norm__66, - batch_norm__67, - batch_norm__68, - batch_norm__69, - batch_norm__70, - batch_norm__71, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_11, - parameter_696, - parameter_695, - parameter_694, - parameter_693, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_11, parameter_693, parameter_694, parameter_695, parameter_696 - - # pd_op.multiply: (-1x48x-1x-1xf32) <- (1xf32, -1x48x-1x-1xf32) - multiply_1 = paddle._C_ops.multiply(data_1, batch_norm__66) - del batch_norm__66, data_1 - - # pd_op.add: (-1x48x-1x-1xf32) <- (-1x48x-1x-1xf32, -1x48x-1x-1xf32) - add_2 = paddle._C_ops.add(batch_norm__60, multiply_1) - del batch_norm__60, multiply_1 - - # pd_op.swish: (-1x48x-1x-1xf32) <- (-1x48x-1x-1xf32) - swish_9 = paddle._C_ops.swish(add_2) - del add_2 - - # pd_op.add: (-1x48x-1x-1xf32) <- (-1x48x-1x-1xf32, -1x48x-1x-1xf32) - add_3 = paddle._C_ops.add(add_1, swish_9) - del add_1, swish_9 - - # pd_op.conv2d: (-1x48x-1x-1xf32) <- (-1x48x-1x-1xf32, 48x48x3x3xf32) - conv2d_12 = paddle._C_ops.conv2d( - add_3, parameter_692, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_692 - - # pd_op.batch_norm_: (-1x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32, -1xui8) <- (-1x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32) - ( - batch_norm__72, - batch_norm__73, - batch_norm__74, - batch_norm__75, - batch_norm__76, - batch_norm__77, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_12, - parameter_691, - parameter_690, - parameter_689, - parameter_688, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_12, parameter_688, parameter_689, parameter_690, parameter_691 - - # pd_op.swish: (-1x48x-1x-1xf32) <- (-1x48x-1x-1xf32) - swish_10 = paddle._C_ops.swish(batch_norm__72) - del batch_norm__72 - - # pd_op.conv2d: (-1x48x-1x-1xf32) <- (-1x48x-1x-1xf32, 48x48x3x3xf32) - conv2d_13 = paddle._C_ops.conv2d( - swish_10, parameter_687, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_687 - - # pd_op.batch_norm_: (-1x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32, -1xui8) <- (-1x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32) - ( - batch_norm__78, - batch_norm__79, - batch_norm__80, - batch_norm__81, - batch_norm__82, - batch_norm__83, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_13, - parameter_686, - parameter_685, - parameter_684, - parameter_683, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_13, parameter_683, parameter_684, parameter_685, parameter_686 - - # pd_op.conv2d: (-1x48x-1x-1xf32) <- (-1x48x-1x-1xf32, 48x48x1x1xf32) - conv2d_14 = paddle._C_ops.conv2d( - swish_10, parameter_682, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_682, swish_10 - - # pd_op.batch_norm_: (-1x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32, -1xui8) <- (-1x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32) - ( - batch_norm__84, - batch_norm__85, - batch_norm__86, - batch_norm__87, - batch_norm__88, - batch_norm__89, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_14, - parameter_681, - parameter_680, - parameter_679, - parameter_678, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_14, parameter_678, parameter_679, parameter_680, parameter_681 - - # pd_op.multiply: (-1x48x-1x-1xf32) <- (1xf32, -1x48x-1x-1xf32) - multiply_2 = paddle._C_ops.multiply(data_2, batch_norm__84) - del batch_norm__84, data_2 - - # pd_op.add: (-1x48x-1x-1xf32) <- (-1x48x-1x-1xf32, -1x48x-1x-1xf32) - add_4 = paddle._C_ops.add(batch_norm__78, multiply_2) - del batch_norm__78, multiply_2 - - # pd_op.swish: (-1x48x-1x-1xf32) <- (-1x48x-1x-1xf32) - swish_11 = paddle._C_ops.swish(add_4) - del add_4 - - # pd_op.add: (-1x48x-1x-1xf32) <- (-1x48x-1x-1xf32, -1x48x-1x-1xf32) - add_5 = paddle._C_ops.add(add_3, swish_11) - del add_3, swish_11 - - # pd_op.full: (1xi32) <- () - full_0 = paddle._C_ops.full( - [1], float("1"), paddle.int32, paddle.core.CPUPlace() - ) - - # builtin.combine: ([-1x48x-1x-1xf32, -1x48x-1x-1xf32]) <- (-1x48x-1x-1xf32, -1x48x-1x-1xf32) - combine_0 = [swish_4, add_5] - del add_5, swish_4 - - # pd_op.concat: (-1x96x-1x-1xf32) <- ([-1x48x-1x-1xf32, -1x48x-1x-1xf32], 1xi32) - concat_2 = paddle._C_ops.concat(combine_0, full_0) - del combine_0 - - # pd_op.full_int_array: (2xi64) <- () - full_int_array_0 = [2, 3] - - # pd_op.mean: (-1x96x1x1xf32) <- (-1x96x-1x-1xf32, 2xi64) - mean_0 = paddle._C_ops.mean(concat_2, full_int_array_0, True) - - # pd_op.conv2d: (-1x96x1x1xf32) <- (-1x96x1x1xf32, 96x96x1x1xf32) - conv2d_15 = paddle._C_ops.conv2d( - mean_0, parameter_677, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del mean_0, parameter_677 - - # pd_op.full_int_array: (4xi64) <- () - full_int_array_1 = [1, -1, 1, 1] - - # pd_op.reshape: (1x96x1x1xf32) <- (96xf32, 4xi64) - reshape_0 = paddle._C_ops.reshape(parameter_676, full_int_array_1) - del parameter_676 - - # pd_op.add: (-1x96x1x1xf32) <- (-1x96x1x1xf32, 1x96x1x1xf32) - add_6 = paddle._C_ops.add(conv2d_15, reshape_0) - del conv2d_15, reshape_0 - - # pd_op.hardsigmoid: (-1x96x1x1xf32) <- (-1x96x1x1xf32) - hardsigmoid_0 = paddle._C_ops.hardsigmoid( - add_6, float("0.166667"), float("0.5") - ) - del add_6 - - # pd_op.multiply: (-1x96x-1x-1xf32) <- (-1x96x-1x-1xf32, -1x96x1x1xf32) - multiply_3 = paddle._C_ops.multiply(concat_2, hardsigmoid_0) - del concat_2, hardsigmoid_0 - - # pd_op.conv2d: (-1x128x-1x-1xf32) <- (-1x96x-1x-1xf32, 128x96x1x1xf32) - conv2d_16 = paddle._C_ops.conv2d( - multiply_3, parameter_675, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del multiply_3, parameter_675 - - # pd_op.batch_norm_: (-1x128x-1x-1xf32, 128xf32, 128xf32, 128xf32, 128xf32, -1xui8) <- (-1x128x-1x-1xf32, 128xf32, 128xf32, 128xf32, 128xf32) - ( - batch_norm__90, - batch_norm__91, - batch_norm__92, - batch_norm__93, - batch_norm__94, - batch_norm__95, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_16, - parameter_674, - parameter_673, - parameter_672, - parameter_671, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_16, parameter_671, parameter_672, parameter_673, parameter_674 - - # pd_op.swish: (-1x128x-1x-1xf32) <- (-1x128x-1x-1xf32) - swish_12 = paddle._C_ops.swish(batch_norm__90) - del batch_norm__90 - - # pd_op.conv2d: (-1x192x-1x-1xf32) <- (-1x128x-1x-1xf32, 192x128x3x3xf32) - conv2d_17 = paddle._C_ops.conv2d( - swish_12, parameter_670, [2, 2], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_670, swish_12 - - # pd_op.batch_norm_: (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) - ( - batch_norm__96, - batch_norm__97, - batch_norm__98, - batch_norm__99, - batch_norm__100, - batch_norm__101, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_17, - parameter_669, - parameter_668, - parameter_667, - parameter_666, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_17, parameter_666, parameter_667, parameter_668, parameter_669 - - # pd_op.swish: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32) - swish_13 = paddle._C_ops.swish(batch_norm__96) - del batch_norm__96 - - # pd_op.conv2d: (-1x96x-1x-1xf32) <- (-1x192x-1x-1xf32, 96x192x1x1xf32) - conv2d_18 = paddle._C_ops.conv2d( - swish_13, parameter_665, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_665 - - # pd_op.batch_norm_: (-1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (-1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) - ( - batch_norm__102, - batch_norm__103, - batch_norm__104, - batch_norm__105, - batch_norm__106, - batch_norm__107, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_18, - parameter_664, - parameter_663, - parameter_662, - parameter_661, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_18, parameter_661, parameter_662, parameter_663, parameter_664 - - # pd_op.swish: (-1x96x-1x-1xf32) <- (-1x96x-1x-1xf32) - swish_14 = paddle._C_ops.swish(batch_norm__102) - del batch_norm__102 - - # pd_op.conv2d: (-1x96x-1x-1xf32) <- (-1x192x-1x-1xf32, 96x192x1x1xf32) - conv2d_19 = paddle._C_ops.conv2d( - swish_13, parameter_660, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_660, swish_13 - - # pd_op.batch_norm_: (-1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (-1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) - ( - batch_norm__108, - batch_norm__109, - batch_norm__110, - batch_norm__111, - batch_norm__112, - batch_norm__113, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_19, - parameter_659, - parameter_658, - parameter_657, - parameter_656, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_19, parameter_656, parameter_657, parameter_658, parameter_659 - - # pd_op.swish: (-1x96x-1x-1xf32) <- (-1x96x-1x-1xf32) - swish_15 = paddle._C_ops.swish(batch_norm__108) - del batch_norm__108 - - # pd_op.conv2d: (-1x96x-1x-1xf32) <- (-1x96x-1x-1xf32, 96x96x3x3xf32) - conv2d_20 = paddle._C_ops.conv2d( - swish_15, parameter_655, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_655 - - # pd_op.batch_norm_: (-1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (-1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) - ( - batch_norm__114, - batch_norm__115, - batch_norm__116, - batch_norm__117, - batch_norm__118, - batch_norm__119, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_20, - parameter_654, - parameter_653, - parameter_652, - parameter_651, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_20, parameter_651, parameter_652, parameter_653, parameter_654 - - # pd_op.swish: (-1x96x-1x-1xf32) <- (-1x96x-1x-1xf32) - swish_16 = paddle._C_ops.swish(batch_norm__114) - del batch_norm__114 - - # pd_op.conv2d: (-1x96x-1x-1xf32) <- (-1x96x-1x-1xf32, 96x96x3x3xf32) - conv2d_21 = paddle._C_ops.conv2d( - swish_16, parameter_650, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_650 - - # pd_op.batch_norm_: (-1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (-1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) - ( - batch_norm__120, - batch_norm__121, - batch_norm__122, - batch_norm__123, - batch_norm__124, - batch_norm__125, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_21, - parameter_649, - parameter_648, - parameter_647, - parameter_646, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_21, parameter_646, parameter_647, parameter_648, parameter_649 - - # pd_op.conv2d: (-1x96x-1x-1xf32) <- (-1x96x-1x-1xf32, 96x96x1x1xf32) - conv2d_22 = paddle._C_ops.conv2d( - swish_16, parameter_645, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_645, swish_16 - - # pd_op.batch_norm_: (-1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (-1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) - ( - batch_norm__126, - batch_norm__127, - batch_norm__128, - batch_norm__129, - batch_norm__130, - batch_norm__131, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_22, - parameter_644, - parameter_643, - parameter_642, - parameter_641, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_22, parameter_641, parameter_642, parameter_643, parameter_644 - - # pd_op.multiply: (-1x96x-1x-1xf32) <- (1xf32, -1x96x-1x-1xf32) - multiply_4 = paddle._C_ops.multiply(data_3, batch_norm__126) - del batch_norm__126, data_3 - - # pd_op.add: (-1x96x-1x-1xf32) <- (-1x96x-1x-1xf32, -1x96x-1x-1xf32) - add_7 = paddle._C_ops.add(batch_norm__120, multiply_4) - del batch_norm__120, multiply_4 - - # pd_op.swish: (-1x96x-1x-1xf32) <- (-1x96x-1x-1xf32) - swish_17 = paddle._C_ops.swish(add_7) - del add_7 - - # pd_op.add: (-1x96x-1x-1xf32) <- (-1x96x-1x-1xf32, -1x96x-1x-1xf32) - add_8 = paddle._C_ops.add(swish_15, swish_17) - del swish_15, swish_17 - - # pd_op.conv2d: (-1x96x-1x-1xf32) <- (-1x96x-1x-1xf32, 96x96x3x3xf32) - conv2d_23 = paddle._C_ops.conv2d( - add_8, parameter_640, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_640 - - # pd_op.batch_norm_: (-1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (-1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) - ( - batch_norm__132, - batch_norm__133, - batch_norm__134, - batch_norm__135, - batch_norm__136, - batch_norm__137, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_23, - parameter_639, - parameter_638, - parameter_637, - parameter_636, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_23, parameter_636, parameter_637, parameter_638, parameter_639 - - # pd_op.swish: (-1x96x-1x-1xf32) <- (-1x96x-1x-1xf32) - swish_18 = paddle._C_ops.swish(batch_norm__132) - del batch_norm__132 - - # pd_op.conv2d: (-1x96x-1x-1xf32) <- (-1x96x-1x-1xf32, 96x96x3x3xf32) - conv2d_24 = paddle._C_ops.conv2d( - swish_18, parameter_635, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_635 - - # pd_op.batch_norm_: (-1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (-1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) - ( - batch_norm__138, - batch_norm__139, - batch_norm__140, - batch_norm__141, - batch_norm__142, - batch_norm__143, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_24, - parameter_634, - parameter_633, - parameter_632, - parameter_631, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_24, parameter_631, parameter_632, parameter_633, parameter_634 - - # pd_op.conv2d: (-1x96x-1x-1xf32) <- (-1x96x-1x-1xf32, 96x96x1x1xf32) - conv2d_25 = paddle._C_ops.conv2d( - swish_18, parameter_630, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_630, swish_18 - - # pd_op.batch_norm_: (-1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (-1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) - ( - batch_norm__144, - batch_norm__145, - batch_norm__146, - batch_norm__147, - batch_norm__148, - batch_norm__149, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_25, - parameter_629, - parameter_628, - parameter_627, - parameter_626, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_25, parameter_626, parameter_627, parameter_628, parameter_629 - - # pd_op.multiply: (-1x96x-1x-1xf32) <- (1xf32, -1x96x-1x-1xf32) - multiply_5 = paddle._C_ops.multiply(data_4, batch_norm__144) - del batch_norm__144, data_4 - - # pd_op.add: (-1x96x-1x-1xf32) <- (-1x96x-1x-1xf32, -1x96x-1x-1xf32) - add_9 = paddle._C_ops.add(batch_norm__138, multiply_5) - del batch_norm__138, multiply_5 - - # pd_op.swish: (-1x96x-1x-1xf32) <- (-1x96x-1x-1xf32) - swish_19 = paddle._C_ops.swish(add_9) - del add_9 - - # pd_op.add: (-1x96x-1x-1xf32) <- (-1x96x-1x-1xf32, -1x96x-1x-1xf32) - add_10 = paddle._C_ops.add(add_8, swish_19) - del add_8, swish_19 - - # pd_op.conv2d: (-1x96x-1x-1xf32) <- (-1x96x-1x-1xf32, 96x96x3x3xf32) - conv2d_26 = paddle._C_ops.conv2d( - add_10, parameter_625, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_625 - - # pd_op.batch_norm_: (-1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (-1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) - ( - batch_norm__150, - batch_norm__151, - batch_norm__152, - batch_norm__153, - batch_norm__154, - batch_norm__155, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_26, - parameter_624, - parameter_623, - parameter_622, - parameter_621, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_26, parameter_621, parameter_622, parameter_623, parameter_624 - - # pd_op.swish: (-1x96x-1x-1xf32) <- (-1x96x-1x-1xf32) - swish_20 = paddle._C_ops.swish(batch_norm__150) - del batch_norm__150 - - # pd_op.conv2d: (-1x96x-1x-1xf32) <- (-1x96x-1x-1xf32, 96x96x3x3xf32) - conv2d_27 = paddle._C_ops.conv2d( - swish_20, parameter_620, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_620 - - # pd_op.batch_norm_: (-1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (-1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) - ( - batch_norm__156, - batch_norm__157, - batch_norm__158, - batch_norm__159, - batch_norm__160, - batch_norm__161, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_27, - parameter_619, - parameter_618, - parameter_617, - parameter_616, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_27, parameter_616, parameter_617, parameter_618, parameter_619 - - # pd_op.conv2d: (-1x96x-1x-1xf32) <- (-1x96x-1x-1xf32, 96x96x1x1xf32) - conv2d_28 = paddle._C_ops.conv2d( - swish_20, parameter_615, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_615, swish_20 - - # pd_op.batch_norm_: (-1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (-1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) - ( - batch_norm__162, - batch_norm__163, - batch_norm__164, - batch_norm__165, - batch_norm__166, - batch_norm__167, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_28, - parameter_614, - parameter_613, - parameter_612, - parameter_611, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_28, parameter_611, parameter_612, parameter_613, parameter_614 - - # pd_op.multiply: (-1x96x-1x-1xf32) <- (1xf32, -1x96x-1x-1xf32) - multiply_6 = paddle._C_ops.multiply(data_5, batch_norm__162) - del batch_norm__162, data_5 - - # pd_op.add: (-1x96x-1x-1xf32) <- (-1x96x-1x-1xf32, -1x96x-1x-1xf32) - add_11 = paddle._C_ops.add(batch_norm__156, multiply_6) - del batch_norm__156, multiply_6 - - # pd_op.swish: (-1x96x-1x-1xf32) <- (-1x96x-1x-1xf32) - swish_21 = paddle._C_ops.swish(add_11) - del add_11 - - # pd_op.add: (-1x96x-1x-1xf32) <- (-1x96x-1x-1xf32, -1x96x-1x-1xf32) - add_12 = paddle._C_ops.add(add_10, swish_21) - del add_10, swish_21 - - # pd_op.conv2d: (-1x96x-1x-1xf32) <- (-1x96x-1x-1xf32, 96x96x3x3xf32) - conv2d_29 = paddle._C_ops.conv2d( - add_12, parameter_610, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_610 - - # pd_op.batch_norm_: (-1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (-1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) - ( - batch_norm__168, - batch_norm__169, - batch_norm__170, - batch_norm__171, - batch_norm__172, - batch_norm__173, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_29, - parameter_609, - parameter_608, - parameter_607, - parameter_606, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_29, parameter_606, parameter_607, parameter_608, parameter_609 - - # pd_op.swish: (-1x96x-1x-1xf32) <- (-1x96x-1x-1xf32) - swish_22 = paddle._C_ops.swish(batch_norm__168) - del batch_norm__168 - - # pd_op.conv2d: (-1x96x-1x-1xf32) <- (-1x96x-1x-1xf32, 96x96x3x3xf32) - conv2d_30 = paddle._C_ops.conv2d( - swish_22, parameter_605, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_605 - - # pd_op.batch_norm_: (-1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (-1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) - ( - batch_norm__174, - batch_norm__175, - batch_norm__176, - batch_norm__177, - batch_norm__178, - batch_norm__179, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_30, - parameter_604, - parameter_603, - parameter_602, - parameter_601, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_30, parameter_601, parameter_602, parameter_603, parameter_604 - - # pd_op.conv2d: (-1x96x-1x-1xf32) <- (-1x96x-1x-1xf32, 96x96x1x1xf32) - conv2d_31 = paddle._C_ops.conv2d( - swish_22, parameter_600, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_600, swish_22 - - # pd_op.batch_norm_: (-1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (-1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) - ( - batch_norm__180, - batch_norm__181, - batch_norm__182, - batch_norm__183, - batch_norm__184, - batch_norm__185, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_31, - parameter_599, - parameter_598, - parameter_597, - parameter_596, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_31, parameter_596, parameter_597, parameter_598, parameter_599 - - # pd_op.multiply: (-1x96x-1x-1xf32) <- (1xf32, -1x96x-1x-1xf32) - multiply_7 = paddle._C_ops.multiply(data_6, batch_norm__180) - del batch_norm__180, data_6 - - # pd_op.add: (-1x96x-1x-1xf32) <- (-1x96x-1x-1xf32, -1x96x-1x-1xf32) - add_13 = paddle._C_ops.add(batch_norm__174, multiply_7) - del batch_norm__174, multiply_7 - - # pd_op.swish: (-1x96x-1x-1xf32) <- (-1x96x-1x-1xf32) - swish_23 = paddle._C_ops.swish(add_13) - del add_13 - - # pd_op.add: (-1x96x-1x-1xf32) <- (-1x96x-1x-1xf32, -1x96x-1x-1xf32) - add_14 = paddle._C_ops.add(add_12, swish_23) - del add_12, swish_23 - - # pd_op.conv2d: (-1x96x-1x-1xf32) <- (-1x96x-1x-1xf32, 96x96x3x3xf32) - conv2d_32 = paddle._C_ops.conv2d( - add_14, parameter_595, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_595 - - # pd_op.batch_norm_: (-1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (-1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) - ( - batch_norm__186, - batch_norm__187, - batch_norm__188, - batch_norm__189, - batch_norm__190, - batch_norm__191, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_32, - parameter_594, - parameter_593, - parameter_592, - parameter_591, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_32, parameter_591, parameter_592, parameter_593, parameter_594 - - # pd_op.swish: (-1x96x-1x-1xf32) <- (-1x96x-1x-1xf32) - swish_24 = paddle._C_ops.swish(batch_norm__186) - del batch_norm__186 - - # pd_op.conv2d: (-1x96x-1x-1xf32) <- (-1x96x-1x-1xf32, 96x96x3x3xf32) - conv2d_33 = paddle._C_ops.conv2d( - swish_24, parameter_590, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_590 - - # pd_op.batch_norm_: (-1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (-1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) - ( - batch_norm__192, - batch_norm__193, - batch_norm__194, - batch_norm__195, - batch_norm__196, - batch_norm__197, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_33, - parameter_589, - parameter_588, - parameter_587, - parameter_586, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_33, parameter_586, parameter_587, parameter_588, parameter_589 - - # pd_op.conv2d: (-1x96x-1x-1xf32) <- (-1x96x-1x-1xf32, 96x96x1x1xf32) - conv2d_34 = paddle._C_ops.conv2d( - swish_24, parameter_585, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_585, swish_24 - - # pd_op.batch_norm_: (-1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (-1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) - ( - batch_norm__198, - batch_norm__199, - batch_norm__200, - batch_norm__201, - batch_norm__202, - batch_norm__203, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_34, - parameter_584, - parameter_583, - parameter_582, - parameter_581, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_34, parameter_581, parameter_582, parameter_583, parameter_584 - - # pd_op.multiply: (-1x96x-1x-1xf32) <- (1xf32, -1x96x-1x-1xf32) - multiply_8 = paddle._C_ops.multiply(data_7, batch_norm__198) - del batch_norm__198, data_7 - - # pd_op.add: (-1x96x-1x-1xf32) <- (-1x96x-1x-1xf32, -1x96x-1x-1xf32) - add_15 = paddle._C_ops.add(batch_norm__192, multiply_8) - del batch_norm__192, multiply_8 - - # pd_op.swish: (-1x96x-1x-1xf32) <- (-1x96x-1x-1xf32) - swish_25 = paddle._C_ops.swish(add_15) - del add_15 - - # pd_op.add: (-1x96x-1x-1xf32) <- (-1x96x-1x-1xf32, -1x96x-1x-1xf32) - add_16 = paddle._C_ops.add(add_14, swish_25) - del add_14, swish_25 - - # pd_op.conv2d: (-1x96x-1x-1xf32) <- (-1x96x-1x-1xf32, 96x96x3x3xf32) - conv2d_35 = paddle._C_ops.conv2d( - add_16, parameter_580, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_580 - - # pd_op.batch_norm_: (-1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (-1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) - ( - batch_norm__204, - batch_norm__205, - batch_norm__206, - batch_norm__207, - batch_norm__208, - batch_norm__209, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_35, - parameter_579, - parameter_578, - parameter_577, - parameter_576, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_35, parameter_576, parameter_577, parameter_578, parameter_579 - - # pd_op.swish: (-1x96x-1x-1xf32) <- (-1x96x-1x-1xf32) - swish_26 = paddle._C_ops.swish(batch_norm__204) - del batch_norm__204 - - # pd_op.conv2d: (-1x96x-1x-1xf32) <- (-1x96x-1x-1xf32, 96x96x3x3xf32) - conv2d_36 = paddle._C_ops.conv2d( - swish_26, parameter_575, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_575 - - # pd_op.batch_norm_: (-1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (-1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) - ( - batch_norm__210, - batch_norm__211, - batch_norm__212, - batch_norm__213, - batch_norm__214, - batch_norm__215, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_36, - parameter_574, - parameter_573, - parameter_572, - parameter_571, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_36, parameter_571, parameter_572, parameter_573, parameter_574 - - # pd_op.conv2d: (-1x96x-1x-1xf32) <- (-1x96x-1x-1xf32, 96x96x1x1xf32) - conv2d_37 = paddle._C_ops.conv2d( - swish_26, parameter_570, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_570, swish_26 - - # pd_op.batch_norm_: (-1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (-1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) - ( - batch_norm__216, - batch_norm__217, - batch_norm__218, - batch_norm__219, - batch_norm__220, - batch_norm__221, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_37, - parameter_569, - parameter_568, - parameter_567, - parameter_566, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_37, parameter_566, parameter_567, parameter_568, parameter_569 - - # pd_op.multiply: (-1x96x-1x-1xf32) <- (1xf32, -1x96x-1x-1xf32) - multiply_9 = paddle._C_ops.multiply(data_8, batch_norm__216) - del batch_norm__216, data_8 - - # pd_op.add: (-1x96x-1x-1xf32) <- (-1x96x-1x-1xf32, -1x96x-1x-1xf32) - add_17 = paddle._C_ops.add(batch_norm__210, multiply_9) - del batch_norm__210, multiply_9 - - # pd_op.swish: (-1x96x-1x-1xf32) <- (-1x96x-1x-1xf32) - swish_27 = paddle._C_ops.swish(add_17) - del add_17 - - # pd_op.add: (-1x96x-1x-1xf32) <- (-1x96x-1x-1xf32, -1x96x-1x-1xf32) - add_18 = paddle._C_ops.add(add_16, swish_27) - del add_16, swish_27 - - # builtin.combine: ([-1x96x-1x-1xf32, -1x96x-1x-1xf32]) <- (-1x96x-1x-1xf32, -1x96x-1x-1xf32) - combine_1 = [swish_14, add_18] - del add_18, swish_14 - - # pd_op.concat: (-1x192x-1x-1xf32) <- ([-1x96x-1x-1xf32, -1x96x-1x-1xf32], 1xi32) - concat_3 = paddle._C_ops.concat(combine_1, full_0) - del combine_1 - - # pd_op.mean: (-1x192x1x1xf32) <- (-1x192x-1x-1xf32, 2xi64) - mean_1 = paddle._C_ops.mean(concat_3, full_int_array_0, True) - - # pd_op.conv2d: (-1x192x1x1xf32) <- (-1x192x1x1xf32, 192x192x1x1xf32) - conv2d_38 = paddle._C_ops.conv2d( - mean_1, parameter_565, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del mean_1, parameter_565 - - # pd_op.reshape: (1x192x1x1xf32) <- (192xf32, 4xi64) - reshape_1 = paddle._C_ops.reshape(parameter_564, full_int_array_1) - del parameter_564 - - # pd_op.add: (-1x192x1x1xf32) <- (-1x192x1x1xf32, 1x192x1x1xf32) - add_19 = paddle._C_ops.add(conv2d_38, reshape_1) - del conv2d_38, reshape_1 - - # pd_op.hardsigmoid: (-1x192x1x1xf32) <- (-1x192x1x1xf32) - hardsigmoid_1 = paddle._C_ops.hardsigmoid( - add_19, float("0.166667"), float("0.5") - ) - del add_19 - - # pd_op.multiply: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32, -1x192x1x1xf32) - multiply_10 = paddle._C_ops.multiply(concat_3, hardsigmoid_1) - del concat_3, hardsigmoid_1 - - # pd_op.conv2d: (-1x256x-1x-1xf32) <- (-1x192x-1x-1xf32, 256x192x1x1xf32) - conv2d_39 = paddle._C_ops.conv2d( - multiply_10, parameter_563, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del multiply_10, parameter_563 - - # pd_op.batch_norm_: (-1x256x-1x-1xf32, 256xf32, 256xf32, 256xf32, 256xf32, -1xui8) <- (-1x256x-1x-1xf32, 256xf32, 256xf32, 256xf32, 256xf32) - ( - batch_norm__222, - batch_norm__223, - batch_norm__224, - batch_norm__225, - batch_norm__226, - batch_norm__227, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_39, - parameter_562, - parameter_561, - parameter_560, - parameter_559, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_39, parameter_559, parameter_560, parameter_561, parameter_562 - - # pd_op.swish: (-1x256x-1x-1xf32) <- (-1x256x-1x-1xf32) - swish_28 = paddle._C_ops.swish(batch_norm__222) - del batch_norm__222 - - # pd_op.conv2d: (-1x384x-1x-1xf32) <- (-1x256x-1x-1xf32, 384x256x3x3xf32) - conv2d_40 = paddle._C_ops.conv2d( - swish_28, parameter_558, [2, 2], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_558 - - # pd_op.batch_norm_: (-1x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (-1x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) - ( - batch_norm__228, - batch_norm__229, - batch_norm__230, - batch_norm__231, - batch_norm__232, - batch_norm__233, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_40, - parameter_557, - parameter_556, - parameter_555, - parameter_554, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_40, parameter_554, parameter_555, parameter_556, parameter_557 - - # pd_op.swish: (-1x384x-1x-1xf32) <- (-1x384x-1x-1xf32) - swish_29 = paddle._C_ops.swish(batch_norm__228) - del batch_norm__228 - - # pd_op.conv2d: (-1x192x-1x-1xf32) <- (-1x384x-1x-1xf32, 192x384x1x1xf32) - conv2d_41 = paddle._C_ops.conv2d( - swish_29, parameter_553, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_553 - - # pd_op.batch_norm_: (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) - ( - batch_norm__234, - batch_norm__235, - batch_norm__236, - batch_norm__237, - batch_norm__238, - batch_norm__239, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_41, - parameter_552, - parameter_551, - parameter_550, - parameter_549, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_41, parameter_549, parameter_550, parameter_551, parameter_552 - - # pd_op.swish: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32) - swish_30 = paddle._C_ops.swish(batch_norm__234) - del batch_norm__234 - - # pd_op.conv2d: (-1x192x-1x-1xf32) <- (-1x384x-1x-1xf32, 192x384x1x1xf32) - conv2d_42 = paddle._C_ops.conv2d( - swish_29, parameter_548, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_548, swish_29 - - # pd_op.batch_norm_: (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) - ( - batch_norm__240, - batch_norm__241, - batch_norm__242, - batch_norm__243, - batch_norm__244, - batch_norm__245, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_42, - parameter_547, - parameter_546, - parameter_545, - parameter_544, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_42, parameter_544, parameter_545, parameter_546, parameter_547 - - # pd_op.swish: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32) - swish_31 = paddle._C_ops.swish(batch_norm__240) - del batch_norm__240 - - # pd_op.conv2d: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32, 192x192x3x3xf32) - conv2d_43 = paddle._C_ops.conv2d( - swish_31, parameter_543, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_543 - - # pd_op.batch_norm_: (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) - ( - batch_norm__246, - batch_norm__247, - batch_norm__248, - batch_norm__249, - batch_norm__250, - batch_norm__251, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_43, - parameter_542, - parameter_541, - parameter_540, - parameter_539, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_43, parameter_539, parameter_540, parameter_541, parameter_542 - - # pd_op.swish: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32) - swish_32 = paddle._C_ops.swish(batch_norm__246) - del batch_norm__246 - - # pd_op.conv2d: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32, 192x192x3x3xf32) - conv2d_44 = paddle._C_ops.conv2d( - swish_32, parameter_538, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_538 - - # pd_op.batch_norm_: (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) - ( - batch_norm__252, - batch_norm__253, - batch_norm__254, - batch_norm__255, - batch_norm__256, - batch_norm__257, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_44, - parameter_537, - parameter_536, - parameter_535, - parameter_534, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_44, parameter_534, parameter_535, parameter_536, parameter_537 - - # pd_op.conv2d: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32, 192x192x1x1xf32) - conv2d_45 = paddle._C_ops.conv2d( - swish_32, parameter_533, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_533, swish_32 - - # pd_op.batch_norm_: (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) - ( - batch_norm__258, - batch_norm__259, - batch_norm__260, - batch_norm__261, - batch_norm__262, - batch_norm__263, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_45, - parameter_532, - parameter_531, - parameter_530, - parameter_529, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_45, parameter_529, parameter_530, parameter_531, parameter_532 - - # pd_op.multiply: (-1x192x-1x-1xf32) <- (1xf32, -1x192x-1x-1xf32) - multiply_11 = paddle._C_ops.multiply(data_9, batch_norm__258) - del batch_norm__258, data_9 - - # pd_op.add: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32, -1x192x-1x-1xf32) - add_20 = paddle._C_ops.add(batch_norm__252, multiply_11) - del batch_norm__252, multiply_11 - - # pd_op.swish: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32) - swish_33 = paddle._C_ops.swish(add_20) - del add_20 - - # pd_op.add: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32, -1x192x-1x-1xf32) - add_21 = paddle._C_ops.add(swish_31, swish_33) - del swish_31, swish_33 - - # pd_op.conv2d: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32, 192x192x3x3xf32) - conv2d_46 = paddle._C_ops.conv2d( - add_21, parameter_528, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_528 - - # pd_op.batch_norm_: (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) - ( - batch_norm__264, - batch_norm__265, - batch_norm__266, - batch_norm__267, - batch_norm__268, - batch_norm__269, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_46, - parameter_527, - parameter_526, - parameter_525, - parameter_524, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_46, parameter_524, parameter_525, parameter_526, parameter_527 - - # pd_op.swish: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32) - swish_34 = paddle._C_ops.swish(batch_norm__264) - del batch_norm__264 - - # pd_op.conv2d: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32, 192x192x3x3xf32) - conv2d_47 = paddle._C_ops.conv2d( - swish_34, parameter_523, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_523 - - # pd_op.batch_norm_: (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) - ( - batch_norm__270, - batch_norm__271, - batch_norm__272, - batch_norm__273, - batch_norm__274, - batch_norm__275, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_47, - parameter_522, - parameter_521, - parameter_520, - parameter_519, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_47, parameter_519, parameter_520, parameter_521, parameter_522 - - # pd_op.conv2d: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32, 192x192x1x1xf32) - conv2d_48 = paddle._C_ops.conv2d( - swish_34, parameter_518, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_518, swish_34 - - # pd_op.batch_norm_: (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) - ( - batch_norm__276, - batch_norm__277, - batch_norm__278, - batch_norm__279, - batch_norm__280, - batch_norm__281, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_48, - parameter_517, - parameter_516, - parameter_515, - parameter_514, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_48, parameter_514, parameter_515, parameter_516, parameter_517 - - # pd_op.multiply: (-1x192x-1x-1xf32) <- (1xf32, -1x192x-1x-1xf32) - multiply_12 = paddle._C_ops.multiply(data_10, batch_norm__276) - del batch_norm__276, data_10 - - # pd_op.add: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32, -1x192x-1x-1xf32) - add_22 = paddle._C_ops.add(batch_norm__270, multiply_12) - del batch_norm__270, multiply_12 - - # pd_op.swish: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32) - swish_35 = paddle._C_ops.swish(add_22) - del add_22 - - # pd_op.add: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32, -1x192x-1x-1xf32) - add_23 = paddle._C_ops.add(add_21, swish_35) - del add_21, swish_35 - - # pd_op.conv2d: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32, 192x192x3x3xf32) - conv2d_49 = paddle._C_ops.conv2d( - add_23, parameter_513, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_513 - - # pd_op.batch_norm_: (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) - ( - batch_norm__282, - batch_norm__283, - batch_norm__284, - batch_norm__285, - batch_norm__286, - batch_norm__287, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_49, - parameter_512, - parameter_511, - parameter_510, - parameter_509, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_49, parameter_509, parameter_510, parameter_511, parameter_512 - - # pd_op.swish: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32) - swish_36 = paddle._C_ops.swish(batch_norm__282) - del batch_norm__282 - - # pd_op.conv2d: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32, 192x192x3x3xf32) - conv2d_50 = paddle._C_ops.conv2d( - swish_36, parameter_508, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_508 - - # pd_op.batch_norm_: (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) - ( - batch_norm__288, - batch_norm__289, - batch_norm__290, - batch_norm__291, - batch_norm__292, - batch_norm__293, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_50, - parameter_507, - parameter_506, - parameter_505, - parameter_504, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_50, parameter_504, parameter_505, parameter_506, parameter_507 - - # pd_op.conv2d: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32, 192x192x1x1xf32) - conv2d_51 = paddle._C_ops.conv2d( - swish_36, parameter_503, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_503, swish_36 - - # pd_op.batch_norm_: (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) - ( - batch_norm__294, - batch_norm__295, - batch_norm__296, - batch_norm__297, - batch_norm__298, - batch_norm__299, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_51, - parameter_502, - parameter_501, - parameter_500, - parameter_499, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_51, parameter_499, parameter_500, parameter_501, parameter_502 - - # pd_op.multiply: (-1x192x-1x-1xf32) <- (1xf32, -1x192x-1x-1xf32) - multiply_13 = paddle._C_ops.multiply(data_11, batch_norm__294) - del batch_norm__294, data_11 - - # pd_op.add: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32, -1x192x-1x-1xf32) - add_24 = paddle._C_ops.add(batch_norm__288, multiply_13) - del batch_norm__288, multiply_13 - - # pd_op.swish: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32) - swish_37 = paddle._C_ops.swish(add_24) - del add_24 - - # pd_op.add: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32, -1x192x-1x-1xf32) - add_25 = paddle._C_ops.add(add_23, swish_37) - del add_23, swish_37 - - # pd_op.conv2d: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32, 192x192x3x3xf32) - conv2d_52 = paddle._C_ops.conv2d( - add_25, parameter_498, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_498 - - # pd_op.batch_norm_: (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) - ( - batch_norm__300, - batch_norm__301, - batch_norm__302, - batch_norm__303, - batch_norm__304, - batch_norm__305, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_52, - parameter_497, - parameter_496, - parameter_495, - parameter_494, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_52, parameter_494, parameter_495, parameter_496, parameter_497 - - # pd_op.swish: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32) - swish_38 = paddle._C_ops.swish(batch_norm__300) - del batch_norm__300 - - # pd_op.conv2d: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32, 192x192x3x3xf32) - conv2d_53 = paddle._C_ops.conv2d( - swish_38, parameter_493, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_493 - - # pd_op.batch_norm_: (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) - ( - batch_norm__306, - batch_norm__307, - batch_norm__308, - batch_norm__309, - batch_norm__310, - batch_norm__311, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_53, - parameter_492, - parameter_491, - parameter_490, - parameter_489, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_53, parameter_489, parameter_490, parameter_491, parameter_492 - - # pd_op.conv2d: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32, 192x192x1x1xf32) - conv2d_54 = paddle._C_ops.conv2d( - swish_38, parameter_488, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_488, swish_38 - - # pd_op.batch_norm_: (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) - ( - batch_norm__312, - batch_norm__313, - batch_norm__314, - batch_norm__315, - batch_norm__316, - batch_norm__317, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_54, - parameter_487, - parameter_486, - parameter_485, - parameter_484, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_54, parameter_484, parameter_485, parameter_486, parameter_487 - - # pd_op.multiply: (-1x192x-1x-1xf32) <- (1xf32, -1x192x-1x-1xf32) - multiply_14 = paddle._C_ops.multiply(data_12, batch_norm__312) - del batch_norm__312, data_12 - - # pd_op.add: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32, -1x192x-1x-1xf32) - add_26 = paddle._C_ops.add(batch_norm__306, multiply_14) - del batch_norm__306, multiply_14 - - # pd_op.swish: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32) - swish_39 = paddle._C_ops.swish(add_26) - del add_26 - - # pd_op.add: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32, -1x192x-1x-1xf32) - add_27 = paddle._C_ops.add(add_25, swish_39) - del add_25, swish_39 - - # pd_op.conv2d: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32, 192x192x3x3xf32) - conv2d_55 = paddle._C_ops.conv2d( - add_27, parameter_483, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_483 - - # pd_op.batch_norm_: (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) - ( - batch_norm__318, - batch_norm__319, - batch_norm__320, - batch_norm__321, - batch_norm__322, - batch_norm__323, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_55, - parameter_482, - parameter_481, - parameter_480, - parameter_479, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_55, parameter_479, parameter_480, parameter_481, parameter_482 - - # pd_op.swish: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32) - swish_40 = paddle._C_ops.swish(batch_norm__318) - del batch_norm__318 - - # pd_op.conv2d: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32, 192x192x3x3xf32) - conv2d_56 = paddle._C_ops.conv2d( - swish_40, parameter_478, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_478 - - # pd_op.batch_norm_: (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) - ( - batch_norm__324, - batch_norm__325, - batch_norm__326, - batch_norm__327, - batch_norm__328, - batch_norm__329, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_56, - parameter_477, - parameter_476, - parameter_475, - parameter_474, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_56, parameter_474, parameter_475, parameter_476, parameter_477 - - # pd_op.conv2d: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32, 192x192x1x1xf32) - conv2d_57 = paddle._C_ops.conv2d( - swish_40, parameter_473, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_473, swish_40 - - # pd_op.batch_norm_: (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) - ( - batch_norm__330, - batch_norm__331, - batch_norm__332, - batch_norm__333, - batch_norm__334, - batch_norm__335, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_57, - parameter_472, - parameter_471, - parameter_470, - parameter_469, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_57, parameter_469, parameter_470, parameter_471, parameter_472 - - # pd_op.multiply: (-1x192x-1x-1xf32) <- (1xf32, -1x192x-1x-1xf32) - multiply_15 = paddle._C_ops.multiply(data_13, batch_norm__330) - del batch_norm__330, data_13 - - # pd_op.add: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32, -1x192x-1x-1xf32) - add_28 = paddle._C_ops.add(batch_norm__324, multiply_15) - del batch_norm__324, multiply_15 - - # pd_op.swish: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32) - swish_41 = paddle._C_ops.swish(add_28) - del add_28 - - # pd_op.add: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32, -1x192x-1x-1xf32) - add_29 = paddle._C_ops.add(add_27, swish_41) - del add_27, swish_41 - - # pd_op.conv2d: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32, 192x192x3x3xf32) - conv2d_58 = paddle._C_ops.conv2d( - add_29, parameter_468, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_468 - - # pd_op.batch_norm_: (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) - ( - batch_norm__336, - batch_norm__337, - batch_norm__338, - batch_norm__339, - batch_norm__340, - batch_norm__341, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_58, - parameter_467, - parameter_466, - parameter_465, - parameter_464, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_58, parameter_464, parameter_465, parameter_466, parameter_467 - - # pd_op.swish: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32) - swish_42 = paddle._C_ops.swish(batch_norm__336) - del batch_norm__336 - - # pd_op.conv2d: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32, 192x192x3x3xf32) - conv2d_59 = paddle._C_ops.conv2d( - swish_42, parameter_463, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_463 - - # pd_op.batch_norm_: (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) - ( - batch_norm__342, - batch_norm__343, - batch_norm__344, - batch_norm__345, - batch_norm__346, - batch_norm__347, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_59, - parameter_462, - parameter_461, - parameter_460, - parameter_459, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_59, parameter_459, parameter_460, parameter_461, parameter_462 - - # pd_op.conv2d: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32, 192x192x1x1xf32) - conv2d_60 = paddle._C_ops.conv2d( - swish_42, parameter_458, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_458, swish_42 - - # pd_op.batch_norm_: (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) - ( - batch_norm__348, - batch_norm__349, - batch_norm__350, - batch_norm__351, - batch_norm__352, - batch_norm__353, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_60, - parameter_457, - parameter_456, - parameter_455, - parameter_454, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_60, parameter_454, parameter_455, parameter_456, parameter_457 - - # pd_op.multiply: (-1x192x-1x-1xf32) <- (1xf32, -1x192x-1x-1xf32) - multiply_16 = paddle._C_ops.multiply(data_14, batch_norm__348) - del batch_norm__348, data_14 - - # pd_op.add: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32, -1x192x-1x-1xf32) - add_30 = paddle._C_ops.add(batch_norm__342, multiply_16) - del batch_norm__342, multiply_16 - - # pd_op.swish: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32) - swish_43 = paddle._C_ops.swish(add_30) - del add_30 - - # pd_op.add: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32, -1x192x-1x-1xf32) - add_31 = paddle._C_ops.add(add_29, swish_43) - del add_29, swish_43 - - # builtin.combine: ([-1x192x-1x-1xf32, -1x192x-1x-1xf32]) <- (-1x192x-1x-1xf32, -1x192x-1x-1xf32) - combine_2 = [swish_30, add_31] - del add_31, swish_30 - - # pd_op.concat: (-1x384x-1x-1xf32) <- ([-1x192x-1x-1xf32, -1x192x-1x-1xf32], 1xi32) - concat_4 = paddle._C_ops.concat(combine_2, full_0) - del combine_2 - - # pd_op.mean: (-1x384x1x1xf32) <- (-1x384x-1x-1xf32, 2xi64) - mean_2 = paddle._C_ops.mean(concat_4, full_int_array_0, True) - - # pd_op.conv2d: (-1x384x1x1xf32) <- (-1x384x1x1xf32, 384x384x1x1xf32) - conv2d_61 = paddle._C_ops.conv2d( - mean_2, parameter_453, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del mean_2, parameter_453 - - # pd_op.reshape: (1x384x1x1xf32) <- (384xf32, 4xi64) - reshape_2 = paddle._C_ops.reshape(parameter_452, full_int_array_1) - del parameter_452 - - # pd_op.add: (-1x384x1x1xf32) <- (-1x384x1x1xf32, 1x384x1x1xf32) - add_32 = paddle._C_ops.add(conv2d_61, reshape_2) - del conv2d_61, reshape_2 - - # pd_op.hardsigmoid: (-1x384x1x1xf32) <- (-1x384x1x1xf32) - hardsigmoid_2 = paddle._C_ops.hardsigmoid( - add_32, float("0.166667"), float("0.5") - ) - del add_32 - - # pd_op.multiply: (-1x384x-1x-1xf32) <- (-1x384x-1x-1xf32, -1x384x1x1xf32) - multiply_17 = paddle._C_ops.multiply(concat_4, hardsigmoid_2) - del concat_4, hardsigmoid_2 - - # pd_op.conv2d: (-1x512x-1x-1xf32) <- (-1x384x-1x-1xf32, 512x384x1x1xf32) - conv2d_62 = paddle._C_ops.conv2d( - multiply_17, parameter_451, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del multiply_17, parameter_451 - - # pd_op.batch_norm_: (-1x512x-1x-1xf32, 512xf32, 512xf32, 512xf32, 512xf32, -1xui8) <- (-1x512x-1x-1xf32, 512xf32, 512xf32, 512xf32, 512xf32) - ( - batch_norm__354, - batch_norm__355, - batch_norm__356, - batch_norm__357, - batch_norm__358, - batch_norm__359, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_62, - parameter_450, - parameter_449, - parameter_448, - parameter_447, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_62, parameter_447, parameter_448, parameter_449, parameter_450 - - # pd_op.swish: (-1x512x-1x-1xf32) <- (-1x512x-1x-1xf32) - swish_44 = paddle._C_ops.swish(batch_norm__354) - del batch_norm__354 - - # pd_op.conv2d: (-1x768x-1x-1xf32) <- (-1x512x-1x-1xf32, 768x512x3x3xf32) - conv2d_63 = paddle._C_ops.conv2d( - swish_44, parameter_446, [2, 2], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_446 - - # pd_op.batch_norm_: (-1x768x-1x-1xf32, 768xf32, 768xf32, 768xf32, 768xf32, -1xui8) <- (-1x768x-1x-1xf32, 768xf32, 768xf32, 768xf32, 768xf32) - ( - batch_norm__360, - batch_norm__361, - batch_norm__362, - batch_norm__363, - batch_norm__364, - batch_norm__365, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_63, - parameter_445, - parameter_444, - parameter_443, - parameter_442, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_63, parameter_442, parameter_443, parameter_444, parameter_445 - - # pd_op.swish: (-1x768x-1x-1xf32) <- (-1x768x-1x-1xf32) - swish_45 = paddle._C_ops.swish(batch_norm__360) - del batch_norm__360 - - # pd_op.conv2d: (-1x384x-1x-1xf32) <- (-1x768x-1x-1xf32, 384x768x1x1xf32) - conv2d_64 = paddle._C_ops.conv2d( - swish_45, parameter_441, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_441 - - # pd_op.batch_norm_: (-1x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (-1x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) - ( - batch_norm__366, - batch_norm__367, - batch_norm__368, - batch_norm__369, - batch_norm__370, - batch_norm__371, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_64, - parameter_440, - parameter_439, - parameter_438, - parameter_437, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_64, parameter_437, parameter_438, parameter_439, parameter_440 - - # pd_op.swish: (-1x384x-1x-1xf32) <- (-1x384x-1x-1xf32) - swish_46 = paddle._C_ops.swish(batch_norm__366) - del batch_norm__366 - - # pd_op.conv2d: (-1x384x-1x-1xf32) <- (-1x768x-1x-1xf32, 384x768x1x1xf32) - conv2d_65 = paddle._C_ops.conv2d( - swish_45, parameter_436, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_436, swish_45 - - # pd_op.batch_norm_: (-1x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (-1x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) - ( - batch_norm__372, - batch_norm__373, - batch_norm__374, - batch_norm__375, - batch_norm__376, - batch_norm__377, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_65, - parameter_435, - parameter_434, - parameter_433, - parameter_432, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_65, parameter_432, parameter_433, parameter_434, parameter_435 - - # pd_op.swish: (-1x384x-1x-1xf32) <- (-1x384x-1x-1xf32) - swish_47 = paddle._C_ops.swish(batch_norm__372) - del batch_norm__372 - - # pd_op.conv2d: (-1x384x-1x-1xf32) <- (-1x384x-1x-1xf32, 384x384x3x3xf32) - conv2d_66 = paddle._C_ops.conv2d( - swish_47, parameter_431, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_431 - - # pd_op.batch_norm_: (-1x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (-1x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) - ( - batch_norm__378, - batch_norm__379, - batch_norm__380, - batch_norm__381, - batch_norm__382, - batch_norm__383, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_66, - parameter_430, - parameter_429, - parameter_428, - parameter_427, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_66, parameter_427, parameter_428, parameter_429, parameter_430 - - # pd_op.swish: (-1x384x-1x-1xf32) <- (-1x384x-1x-1xf32) - swish_48 = paddle._C_ops.swish(batch_norm__378) - del batch_norm__378 - - # pd_op.conv2d: (-1x384x-1x-1xf32) <- (-1x384x-1x-1xf32, 384x384x3x3xf32) - conv2d_67 = paddle._C_ops.conv2d( - swish_48, parameter_426, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_426 - - # pd_op.batch_norm_: (-1x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (-1x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) - ( - batch_norm__384, - batch_norm__385, - batch_norm__386, - batch_norm__387, - batch_norm__388, - batch_norm__389, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_67, - parameter_425, - parameter_424, - parameter_423, - parameter_422, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_67, parameter_422, parameter_423, parameter_424, parameter_425 - - # pd_op.conv2d: (-1x384x-1x-1xf32) <- (-1x384x-1x-1xf32, 384x384x1x1xf32) - conv2d_68 = paddle._C_ops.conv2d( - swish_48, parameter_421, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_421, swish_48 - - # pd_op.batch_norm_: (-1x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (-1x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) - ( - batch_norm__390, - batch_norm__391, - batch_norm__392, - batch_norm__393, - batch_norm__394, - batch_norm__395, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_68, - parameter_420, - parameter_419, - parameter_418, - parameter_417, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_68, parameter_417, parameter_418, parameter_419, parameter_420 - - # pd_op.multiply: (-1x384x-1x-1xf32) <- (1xf32, -1x384x-1x-1xf32) - multiply_18 = paddle._C_ops.multiply(data_15, batch_norm__390) - del batch_norm__390, data_15 - - # pd_op.add: (-1x384x-1x-1xf32) <- (-1x384x-1x-1xf32, -1x384x-1x-1xf32) - add_33 = paddle._C_ops.add(batch_norm__384, multiply_18) - del batch_norm__384, multiply_18 - - # pd_op.swish: (-1x384x-1x-1xf32) <- (-1x384x-1x-1xf32) - swish_49 = paddle._C_ops.swish(add_33) - del add_33 - - # pd_op.add: (-1x384x-1x-1xf32) <- (-1x384x-1x-1xf32, -1x384x-1x-1xf32) - add_34 = paddle._C_ops.add(swish_47, swish_49) - del swish_47, swish_49 - - # pd_op.conv2d: (-1x384x-1x-1xf32) <- (-1x384x-1x-1xf32, 384x384x3x3xf32) - conv2d_69 = paddle._C_ops.conv2d( - add_34, parameter_416, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_416 - - # pd_op.batch_norm_: (-1x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (-1x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) - ( - batch_norm__396, - batch_norm__397, - batch_norm__398, - batch_norm__399, - batch_norm__400, - batch_norm__401, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_69, - parameter_415, - parameter_414, - parameter_413, - parameter_412, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_69, parameter_412, parameter_413, parameter_414, parameter_415 - - # pd_op.swish: (-1x384x-1x-1xf32) <- (-1x384x-1x-1xf32) - swish_50 = paddle._C_ops.swish(batch_norm__396) - del batch_norm__396 - - # pd_op.conv2d: (-1x384x-1x-1xf32) <- (-1x384x-1x-1xf32, 384x384x3x3xf32) - conv2d_70 = paddle._C_ops.conv2d( - swish_50, parameter_411, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_411 - - # pd_op.batch_norm_: (-1x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (-1x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) - ( - batch_norm__402, - batch_norm__403, - batch_norm__404, - batch_norm__405, - batch_norm__406, - batch_norm__407, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_70, - parameter_410, - parameter_409, - parameter_408, - parameter_407, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_70, parameter_407, parameter_408, parameter_409, parameter_410 - - # pd_op.conv2d: (-1x384x-1x-1xf32) <- (-1x384x-1x-1xf32, 384x384x1x1xf32) - conv2d_71 = paddle._C_ops.conv2d( - swish_50, parameter_406, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_406, swish_50 - - # pd_op.batch_norm_: (-1x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (-1x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) - ( - batch_norm__408, - batch_norm__409, - batch_norm__410, - batch_norm__411, - batch_norm__412, - batch_norm__413, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_71, - parameter_405, - parameter_404, - parameter_403, - parameter_402, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_71, parameter_402, parameter_403, parameter_404, parameter_405 - - # pd_op.multiply: (-1x384x-1x-1xf32) <- (1xf32, -1x384x-1x-1xf32) - multiply_19 = paddle._C_ops.multiply(data_16, batch_norm__408) - del batch_norm__408, data_16 - - # pd_op.add: (-1x384x-1x-1xf32) <- (-1x384x-1x-1xf32, -1x384x-1x-1xf32) - add_35 = paddle._C_ops.add(batch_norm__402, multiply_19) - del batch_norm__402, multiply_19 - - # pd_op.swish: (-1x384x-1x-1xf32) <- (-1x384x-1x-1xf32) - swish_51 = paddle._C_ops.swish(add_35) - del add_35 - - # pd_op.add: (-1x384x-1x-1xf32) <- (-1x384x-1x-1xf32, -1x384x-1x-1xf32) - add_36 = paddle._C_ops.add(add_34, swish_51) - del add_34, swish_51 - - # pd_op.conv2d: (-1x384x-1x-1xf32) <- (-1x384x-1x-1xf32, 384x384x3x3xf32) - conv2d_72 = paddle._C_ops.conv2d( - add_36, parameter_401, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_401 - - # pd_op.batch_norm_: (-1x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (-1x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) - ( - batch_norm__414, - batch_norm__415, - batch_norm__416, - batch_norm__417, - batch_norm__418, - batch_norm__419, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_72, - parameter_400, - parameter_399, - parameter_398, - parameter_397, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_72, parameter_397, parameter_398, parameter_399, parameter_400 - - # pd_op.swish: (-1x384x-1x-1xf32) <- (-1x384x-1x-1xf32) - swish_52 = paddle._C_ops.swish(batch_norm__414) - del batch_norm__414 - - # pd_op.conv2d: (-1x384x-1x-1xf32) <- (-1x384x-1x-1xf32, 384x384x3x3xf32) - conv2d_73 = paddle._C_ops.conv2d( - swish_52, parameter_396, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_396 - - # pd_op.batch_norm_: (-1x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (-1x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) - ( - batch_norm__420, - batch_norm__421, - batch_norm__422, - batch_norm__423, - batch_norm__424, - batch_norm__425, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_73, - parameter_395, - parameter_394, - parameter_393, - parameter_392, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_73, parameter_392, parameter_393, parameter_394, parameter_395 - - # pd_op.conv2d: (-1x384x-1x-1xf32) <- (-1x384x-1x-1xf32, 384x384x1x1xf32) - conv2d_74 = paddle._C_ops.conv2d( - swish_52, parameter_391, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_391, swish_52 - - # pd_op.batch_norm_: (-1x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (-1x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) - ( - batch_norm__426, - batch_norm__427, - batch_norm__428, - batch_norm__429, - batch_norm__430, - batch_norm__431, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_74, - parameter_390, - parameter_389, - parameter_388, - parameter_387, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_74, parameter_387, parameter_388, parameter_389, parameter_390 - - # pd_op.multiply: (-1x384x-1x-1xf32) <- (1xf32, -1x384x-1x-1xf32) - multiply_20 = paddle._C_ops.multiply(data_17, batch_norm__426) - del batch_norm__426, data_17 - - # pd_op.add: (-1x384x-1x-1xf32) <- (-1x384x-1x-1xf32, -1x384x-1x-1xf32) - add_37 = paddle._C_ops.add(batch_norm__420, multiply_20) - del batch_norm__420, multiply_20 - - # pd_op.swish: (-1x384x-1x-1xf32) <- (-1x384x-1x-1xf32) - swish_53 = paddle._C_ops.swish(add_37) - del add_37 - - # pd_op.add: (-1x384x-1x-1xf32) <- (-1x384x-1x-1xf32, -1x384x-1x-1xf32) - add_38 = paddle._C_ops.add(add_36, swish_53) - del add_36, swish_53 - - # builtin.combine: ([-1x384x-1x-1xf32, -1x384x-1x-1xf32]) <- (-1x384x-1x-1xf32, -1x384x-1x-1xf32) - combine_3 = [swish_46, add_38] - del add_38, swish_46 - - # pd_op.concat: (-1x768x-1x-1xf32) <- ([-1x384x-1x-1xf32, -1x384x-1x-1xf32], 1xi32) - concat_5 = paddle._C_ops.concat(combine_3, full_0) - del combine_3 - - # pd_op.mean: (-1x768x1x1xf32) <- (-1x768x-1x-1xf32, 2xi64) - mean_3 = paddle._C_ops.mean(concat_5, full_int_array_0, True) - del full_int_array_0 - - # pd_op.conv2d: (-1x768x1x1xf32) <- (-1x768x1x1xf32, 768x768x1x1xf32) - conv2d_75 = paddle._C_ops.conv2d( - mean_3, parameter_386, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del mean_3, parameter_386 - - # pd_op.reshape: (1x768x1x1xf32) <- (768xf32, 4xi64) - reshape_3 = paddle._C_ops.reshape(parameter_385, full_int_array_1) - del parameter_385 - - # pd_op.add: (-1x768x1x1xf32) <- (-1x768x1x1xf32, 1x768x1x1xf32) - add_39 = paddle._C_ops.add(conv2d_75, reshape_3) - del conv2d_75, reshape_3 - - # pd_op.hardsigmoid: (-1x768x1x1xf32) <- (-1x768x1x1xf32) - hardsigmoid_3 = paddle._C_ops.hardsigmoid( - add_39, float("0.166667"), float("0.5") - ) - del add_39 - - # pd_op.multiply: (-1x768x-1x-1xf32) <- (-1x768x-1x-1xf32, -1x768x1x1xf32) - multiply_21 = paddle._C_ops.multiply(concat_5, hardsigmoid_3) - del concat_5, hardsigmoid_3 - - # pd_op.conv2d: (-1x1024x-1x-1xf32) <- (-1x768x-1x-1xf32, 1024x768x1x1xf32) - conv2d_76 = paddle._C_ops.conv2d( - multiply_21, parameter_384, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del multiply_21, parameter_384 - - # pd_op.batch_norm_: (-1x1024x-1x-1xf32, 1024xf32, 1024xf32, 1024xf32, 1024xf32, -1xui8) <- (-1x1024x-1x-1xf32, 1024xf32, 1024xf32, 1024xf32, 1024xf32) - ( - batch_norm__432, - batch_norm__433, - batch_norm__434, - batch_norm__435, - batch_norm__436, - batch_norm__437, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_76, - parameter_383, - parameter_382, - parameter_381, - parameter_380, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_76, parameter_380, parameter_381, parameter_382, parameter_383 - - # pd_op.swish: (-1x1024x-1x-1xf32) <- (-1x1024x-1x-1xf32) - swish_54 = paddle._C_ops.swish(batch_norm__432) - del batch_norm__432 - - # pd_op.conv2d: (-1x384x-1x-1xf32) <- (-1x1024x-1x-1xf32, 384x1024x1x1xf32) - conv2d_77 = paddle._C_ops.conv2d( - swish_54, parameter_379, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_379 - - # pd_op.batch_norm_: (-1x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (-1x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) - ( - batch_norm__438, - batch_norm__439, - batch_norm__440, - batch_norm__441, - batch_norm__442, - batch_norm__443, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_77, - parameter_378, - parameter_377, - parameter_376, - parameter_375, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_77, parameter_375, parameter_376, parameter_377, parameter_378 - - # pd_op.swish: (-1x384x-1x-1xf32) <- (-1x384x-1x-1xf32) - swish_55 = paddle._C_ops.swish(batch_norm__438) - del batch_norm__438 - - # pd_op.conv2d: (-1x384x-1x-1xf32) <- (-1x1024x-1x-1xf32, 384x1024x1x1xf32) - conv2d_78 = paddle._C_ops.conv2d( - swish_54, parameter_374, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_374, swish_54 - - # pd_op.batch_norm_: (-1x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (-1x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) - ( - batch_norm__444, - batch_norm__445, - batch_norm__446, - batch_norm__447, - batch_norm__448, - batch_norm__449, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_78, - parameter_373, - parameter_372, - parameter_371, - parameter_370, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_78, parameter_370, parameter_371, parameter_372, parameter_373 - - # pd_op.swish: (-1x384x-1x-1xf32) <- (-1x384x-1x-1xf32) - swish_56 = paddle._C_ops.swish(batch_norm__444) - del batch_norm__444 - - # pd_op.conv2d: (-1x384x-1x-1xf32) <- (-1x384x-1x-1xf32, 384x384x3x3xf32) - conv2d_79 = paddle._C_ops.conv2d( - swish_56, parameter_369, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_369, swish_56 - - # pd_op.batch_norm_: (-1x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (-1x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) - ( - batch_norm__450, - batch_norm__451, - batch_norm__452, - batch_norm__453, - batch_norm__454, - batch_norm__455, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_79, - parameter_368, - parameter_367, - parameter_366, - parameter_365, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_79, parameter_365, parameter_366, parameter_367, parameter_368 - - # pd_op.swish: (-1x384x-1x-1xf32) <- (-1x384x-1x-1xf32) - swish_57 = paddle._C_ops.swish(batch_norm__450) - del batch_norm__450 - - # pd_op.conv2d: (-1x384x-1x-1xf32) <- (-1x384x-1x-1xf32, 384x384x3x3xf32) - conv2d_80 = paddle._C_ops.conv2d( - swish_57, parameter_364, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_364 - - # pd_op.batch_norm_: (-1x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (-1x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) - ( - batch_norm__456, - batch_norm__457, - batch_norm__458, - batch_norm__459, - batch_norm__460, - batch_norm__461, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_80, - parameter_363, - parameter_362, - parameter_361, - parameter_360, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_80, parameter_360, parameter_361, parameter_362, parameter_363 - - # pd_op.conv2d: (-1x384x-1x-1xf32) <- (-1x384x-1x-1xf32, 384x384x1x1xf32) - conv2d_81 = paddle._C_ops.conv2d( - swish_57, parameter_359, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_359, swish_57 - - # pd_op.batch_norm_: (-1x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (-1x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) - ( - batch_norm__462, - batch_norm__463, - batch_norm__464, - batch_norm__465, - batch_norm__466, - batch_norm__467, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_81, - parameter_358, - parameter_357, - parameter_356, - parameter_355, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_81, parameter_355, parameter_356, parameter_357, parameter_358 - - # pd_op.add: (-1x384x-1x-1xf32) <- (-1x384x-1x-1xf32, -1x384x-1x-1xf32) - add_40 = paddle._C_ops.add(batch_norm__456, batch_norm__462) - del batch_norm__456, batch_norm__462 - - # pd_op.swish: (-1x384x-1x-1xf32) <- (-1x384x-1x-1xf32) - swish_58 = paddle._C_ops.swish(add_40) - del add_40 - - # pd_op.conv2d: (-1x384x-1x-1xf32) <- (-1x384x-1x-1xf32, 384x384x3x3xf32) - conv2d_82 = paddle._C_ops.conv2d( - swish_58, parameter_354, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_354, swish_58 - - # pd_op.batch_norm_: (-1x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (-1x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) - ( - batch_norm__468, - batch_norm__469, - batch_norm__470, - batch_norm__471, - batch_norm__472, - batch_norm__473, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_82, - parameter_353, - parameter_352, - parameter_351, - parameter_350, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_82, parameter_350, parameter_351, parameter_352, parameter_353 - - # pd_op.swish: (-1x384x-1x-1xf32) <- (-1x384x-1x-1xf32) - swish_59 = paddle._C_ops.swish(batch_norm__468) - del batch_norm__468 - - # pd_op.conv2d: (-1x384x-1x-1xf32) <- (-1x384x-1x-1xf32, 384x384x3x3xf32) - conv2d_83 = paddle._C_ops.conv2d( - swish_59, parameter_349, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_349 - - # pd_op.batch_norm_: (-1x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (-1x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) - ( - batch_norm__474, - batch_norm__475, - batch_norm__476, - batch_norm__477, - batch_norm__478, - batch_norm__479, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_83, - parameter_348, - parameter_347, - parameter_346, - parameter_345, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_83, parameter_345, parameter_346, parameter_347, parameter_348 - - # pd_op.conv2d: (-1x384x-1x-1xf32) <- (-1x384x-1x-1xf32, 384x384x1x1xf32) - conv2d_84 = paddle._C_ops.conv2d( - swish_59, parameter_344, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_344, swish_59 - - # pd_op.batch_norm_: (-1x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (-1x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) - ( - batch_norm__480, - batch_norm__481, - batch_norm__482, - batch_norm__483, - batch_norm__484, - batch_norm__485, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_84, - parameter_343, - parameter_342, - parameter_341, - parameter_340, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_84, parameter_340, parameter_341, parameter_342, parameter_343 - - # pd_op.add: (-1x384x-1x-1xf32) <- (-1x384x-1x-1xf32, -1x384x-1x-1xf32) - add_41 = paddle._C_ops.add(batch_norm__474, batch_norm__480) - del batch_norm__474, batch_norm__480 - - # pd_op.swish: (-1x384x-1x-1xf32) <- (-1x384x-1x-1xf32) - swish_60 = paddle._C_ops.swish(add_41) - del add_41 - - # pd_op.full_int_array: (2xi64) <- () - full_int_array_2 = [5, 5] - - # pd_op.pool2d: (-1x384x-1x-1xf32) <- (-1x384x-1x-1xf32, 2xi64) - pool2d_0 = paddle._C_ops.pool2d( - swish_60, - full_int_array_2, - [1, 1], - [2, 2], - False, - True, - "NCHW", - "max", - False, - False, - "EXPLICIT", - ) - del full_int_array_2 - - # pd_op.full_int_array: (2xi64) <- () - full_int_array_3 = [9, 9] - - # pd_op.pool2d: (-1x384x-1x-1xf32) <- (-1x384x-1x-1xf32, 2xi64) - pool2d_1 = paddle._C_ops.pool2d( - swish_60, - full_int_array_3, - [1, 1], - [4, 4], - False, - True, - "NCHW", - "max", - False, - False, - "EXPLICIT", - ) - del full_int_array_3 - - # pd_op.full_int_array: (2xi64) <- () - full_int_array_4 = [13, 13] - - # pd_op.pool2d: (-1x384x-1x-1xf32) <- (-1x384x-1x-1xf32, 2xi64) - pool2d_2 = paddle._C_ops.pool2d( - swish_60, - full_int_array_4, - [1, 1], - [6, 6], - False, - True, - "NCHW", - "max", - False, - False, - "EXPLICIT", - ) - del full_int_array_4 - - # builtin.combine: ([-1x384x-1x-1xf32, -1x384x-1x-1xf32, -1x384x-1x-1xf32, -1x384x-1x-1xf32]) <- (-1x384x-1x-1xf32, -1x384x-1x-1xf32, -1x384x-1x-1xf32, -1x384x-1x-1xf32) - combine_4 = [swish_60, pool2d_0, pool2d_1, pool2d_2] - del pool2d_0, pool2d_1, pool2d_2, swish_60 - - # pd_op.concat: (-1x1536x-1x-1xf32) <- ([-1x384x-1x-1xf32, -1x384x-1x-1xf32, -1x384x-1x-1xf32, -1x384x-1x-1xf32], 1xi32) - concat_6 = paddle._C_ops.concat(combine_4, full_0) - del combine_4 - - # pd_op.conv2d: (-1x384x-1x-1xf32) <- (-1x1536x-1x-1xf32, 384x1536x1x1xf32) - conv2d_85 = paddle._C_ops.conv2d( - concat_6, parameter_339, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del concat_6, parameter_339 - - # pd_op.batch_norm_: (-1x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (-1x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) - ( - batch_norm__486, - batch_norm__487, - batch_norm__488, - batch_norm__489, - batch_norm__490, - batch_norm__491, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_85, - parameter_338, - parameter_337, - parameter_336, - parameter_335, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_85, parameter_335, parameter_336, parameter_337, parameter_338 - - # pd_op.swish: (-1x384x-1x-1xf32) <- (-1x384x-1x-1xf32) - swish_61 = paddle._C_ops.swish(batch_norm__486) - del batch_norm__486 - - # pd_op.conv2d: (-1x384x-1x-1xf32) <- (-1x384x-1x-1xf32, 384x384x3x3xf32) - conv2d_86 = paddle._C_ops.conv2d( - swish_61, parameter_334, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_334, swish_61 - - # pd_op.batch_norm_: (-1x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (-1x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) - ( - batch_norm__492, - batch_norm__493, - batch_norm__494, - batch_norm__495, - batch_norm__496, - batch_norm__497, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_86, - parameter_333, - parameter_332, - parameter_331, - parameter_330, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_86, parameter_330, parameter_331, parameter_332, parameter_333 - - # pd_op.swish: (-1x384x-1x-1xf32) <- (-1x384x-1x-1xf32) - swish_62 = paddle._C_ops.swish(batch_norm__492) - del batch_norm__492 - - # pd_op.conv2d: (-1x384x-1x-1xf32) <- (-1x384x-1x-1xf32, 384x384x3x3xf32) - conv2d_87 = paddle._C_ops.conv2d( - swish_62, parameter_329, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_329 - - # pd_op.batch_norm_: (-1x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (-1x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) - ( - batch_norm__498, - batch_norm__499, - batch_norm__500, - batch_norm__501, - batch_norm__502, - batch_norm__503, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_87, - parameter_328, - parameter_327, - parameter_326, - parameter_325, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_87, parameter_325, parameter_326, parameter_327, parameter_328 - - # pd_op.conv2d: (-1x384x-1x-1xf32) <- (-1x384x-1x-1xf32, 384x384x1x1xf32) - conv2d_88 = paddle._C_ops.conv2d( - swish_62, parameter_324, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_324, swish_62 - - # pd_op.batch_norm_: (-1x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (-1x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) - ( - batch_norm__504, - batch_norm__505, - batch_norm__506, - batch_norm__507, - batch_norm__508, - batch_norm__509, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_88, - parameter_323, - parameter_322, - parameter_321, - parameter_320, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_88, parameter_320, parameter_321, parameter_322, parameter_323 - - # pd_op.add: (-1x384x-1x-1xf32) <- (-1x384x-1x-1xf32, -1x384x-1x-1xf32) - add_42 = paddle._C_ops.add(batch_norm__498, batch_norm__504) - del batch_norm__498, batch_norm__504 - - # pd_op.swish: (-1x384x-1x-1xf32) <- (-1x384x-1x-1xf32) - swish_63 = paddle._C_ops.swish(add_42) - del add_42 - - # builtin.combine: ([-1x384x-1x-1xf32, -1x384x-1x-1xf32]) <- (-1x384x-1x-1xf32, -1x384x-1x-1xf32) - combine_5 = [swish_55, swish_63] - del swish_55, swish_63 - - # pd_op.concat: (-1x768x-1x-1xf32) <- ([-1x384x-1x-1xf32, -1x384x-1x-1xf32], 1xi32) - concat_7 = paddle._C_ops.concat(combine_5, full_0) - del combine_5 - - # pd_op.conv2d: (-1x768x-1x-1xf32) <- (-1x768x-1x-1xf32, 768x768x1x1xf32) - conv2d_89 = paddle._C_ops.conv2d( - concat_7, parameter_319, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del concat_7, parameter_319 - - # pd_op.batch_norm_: (-1x768x-1x-1xf32, 768xf32, 768xf32, 768xf32, 768xf32, -1xui8) <- (-1x768x-1x-1xf32, 768xf32, 768xf32, 768xf32, 768xf32) - ( - batch_norm__510, - batch_norm__511, - batch_norm__512, - batch_norm__513, - batch_norm__514, - batch_norm__515, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_89, - parameter_318, - parameter_317, - parameter_316, - parameter_315, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_89, parameter_315, parameter_316, parameter_317, parameter_318 - - # pd_op.swish: (-1x768x-1x-1xf32) <- (-1x768x-1x-1xf32) - swish_64 = paddle._C_ops.swish(batch_norm__510) - del batch_norm__510 - - # pd_op.conv2d: (-1x384x-1x-1xf32) <- (-1x768x-1x-1xf32, 384x768x1x1xf32) - conv2d_90 = paddle._C_ops.conv2d( - swish_64, parameter_314, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_314 - - # pd_op.batch_norm_: (-1x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (-1x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) - ( - batch_norm__516, - batch_norm__517, - batch_norm__518, - batch_norm__519, - batch_norm__520, - batch_norm__521, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_90, - parameter_313, - parameter_312, - parameter_311, - parameter_310, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_90, parameter_310, parameter_311, parameter_312, parameter_313 - - # pd_op.swish: (-1x384x-1x-1xf32) <- (-1x384x-1x-1xf32) - swish_65 = paddle._C_ops.swish(batch_norm__516) - del batch_norm__516 - - # pd_op.nearest_interp: (-1x384x-1x-1xf32) <- (-1x384x-1x-1xf32, None, None, None) - nearest_interp_0 = paddle._C_ops.nearest_interp( - swish_65, - None, - None, - None, - "NCHW", - -1, - -1, - -1, - [float("2"), float("2")], - "nearest", - False, - 0, - ) - del swish_65 - - # builtin.combine: ([-1x384x-1x-1xf32, -1x512x-1x-1xf32]) <- (-1x384x-1x-1xf32, -1x512x-1x-1xf32) - combine_6 = [nearest_interp_0, swish_44] - del nearest_interp_0, swish_44 - - # pd_op.concat: (-1x896x-1x-1xf32) <- ([-1x384x-1x-1xf32, -1x512x-1x-1xf32], 1xi32) - concat_8 = paddle._C_ops.concat(combine_6, full_0) - del combine_6 - - # pd_op.conv2d: (-1x192x-1x-1xf32) <- (-1x896x-1x-1xf32, 192x896x1x1xf32) - conv2d_91 = paddle._C_ops.conv2d( - concat_8, parameter_309, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_309 - - # pd_op.batch_norm_: (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) - ( - batch_norm__522, - batch_norm__523, - batch_norm__524, - batch_norm__525, - batch_norm__526, - batch_norm__527, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_91, - parameter_308, - parameter_307, - parameter_306, - parameter_305, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_91, parameter_305, parameter_306, parameter_307, parameter_308 - - # pd_op.swish: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32) - swish_66 = paddle._C_ops.swish(batch_norm__522) - del batch_norm__522 - - # pd_op.conv2d: (-1x192x-1x-1xf32) <- (-1x896x-1x-1xf32, 192x896x1x1xf32) - conv2d_92 = paddle._C_ops.conv2d( - concat_8, parameter_304, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del concat_8, parameter_304 - - # pd_op.batch_norm_: (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) - ( - batch_norm__528, - batch_norm__529, - batch_norm__530, - batch_norm__531, - batch_norm__532, - batch_norm__533, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_92, - parameter_303, - parameter_302, - parameter_301, - parameter_300, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_92, parameter_300, parameter_301, parameter_302, parameter_303 - - # pd_op.swish: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32) - swish_67 = paddle._C_ops.swish(batch_norm__528) - del batch_norm__528 - - # pd_op.conv2d: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32, 192x192x3x3xf32) - conv2d_93 = paddle._C_ops.conv2d( - swish_67, parameter_299, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_299, swish_67 - - # pd_op.batch_norm_: (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) - ( - batch_norm__534, - batch_norm__535, - batch_norm__536, - batch_norm__537, - batch_norm__538, - batch_norm__539, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_93, - parameter_298, - parameter_297, - parameter_296, - parameter_295, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_93, parameter_295, parameter_296, parameter_297, parameter_298 - - # pd_op.swish: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32) - swish_68 = paddle._C_ops.swish(batch_norm__534) - del batch_norm__534 - - # pd_op.conv2d: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32, 192x192x3x3xf32) - conv2d_94 = paddle._C_ops.conv2d( - swish_68, parameter_294, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_294 - - # pd_op.batch_norm_: (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) - ( - batch_norm__540, - batch_norm__541, - batch_norm__542, - batch_norm__543, - batch_norm__544, - batch_norm__545, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_94, - parameter_293, - parameter_292, - parameter_291, - parameter_290, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_94, parameter_290, parameter_291, parameter_292, parameter_293 - - # pd_op.conv2d: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32, 192x192x1x1xf32) - conv2d_95 = paddle._C_ops.conv2d( - swish_68, parameter_289, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_289, swish_68 - - # pd_op.batch_norm_: (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) - ( - batch_norm__546, - batch_norm__547, - batch_norm__548, - batch_norm__549, - batch_norm__550, - batch_norm__551, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_95, - parameter_288, - parameter_287, - parameter_286, - parameter_285, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_95, parameter_285, parameter_286, parameter_287, parameter_288 - - # pd_op.add: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32, -1x192x-1x-1xf32) - add_43 = paddle._C_ops.add(batch_norm__540, batch_norm__546) - del batch_norm__540, batch_norm__546 - - # pd_op.swish: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32) - swish_69 = paddle._C_ops.swish(add_43) - del add_43 - - # pd_op.conv2d: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32, 192x192x3x3xf32) - conv2d_96 = paddle._C_ops.conv2d( - swish_69, parameter_284, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_284, swish_69 - - # pd_op.batch_norm_: (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) - ( - batch_norm__552, - batch_norm__553, - batch_norm__554, - batch_norm__555, - batch_norm__556, - batch_norm__557, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_96, - parameter_283, - parameter_282, - parameter_281, - parameter_280, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_96, parameter_280, parameter_281, parameter_282, parameter_283 - - # pd_op.swish: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32) - swish_70 = paddle._C_ops.swish(batch_norm__552) - del batch_norm__552 - - # pd_op.conv2d: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32, 192x192x3x3xf32) - conv2d_97 = paddle._C_ops.conv2d( - swish_70, parameter_279, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_279 - - # pd_op.batch_norm_: (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) - ( - batch_norm__558, - batch_norm__559, - batch_norm__560, - batch_norm__561, - batch_norm__562, - batch_norm__563, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_97, - parameter_278, - parameter_277, - parameter_276, - parameter_275, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_97, parameter_275, parameter_276, parameter_277, parameter_278 - - # pd_op.conv2d: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32, 192x192x1x1xf32) - conv2d_98 = paddle._C_ops.conv2d( - swish_70, parameter_274, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_274, swish_70 - - # pd_op.batch_norm_: (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) - ( - batch_norm__564, - batch_norm__565, - batch_norm__566, - batch_norm__567, - batch_norm__568, - batch_norm__569, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_98, - parameter_273, - parameter_272, - parameter_271, - parameter_270, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_98, parameter_270, parameter_271, parameter_272, parameter_273 - - # pd_op.add: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32, -1x192x-1x-1xf32) - add_44 = paddle._C_ops.add(batch_norm__558, batch_norm__564) - del batch_norm__558, batch_norm__564 - - # pd_op.swish: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32) - swish_71 = paddle._C_ops.swish(add_44) - del add_44 - - # pd_op.conv2d: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32, 192x192x3x3xf32) - conv2d_99 = paddle._C_ops.conv2d( - swish_71, parameter_269, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_269, swish_71 - - # pd_op.batch_norm_: (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) - ( - batch_norm__570, - batch_norm__571, - batch_norm__572, - batch_norm__573, - batch_norm__574, - batch_norm__575, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_99, - parameter_268, - parameter_267, - parameter_266, - parameter_265, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_99, parameter_265, parameter_266, parameter_267, parameter_268 - - # pd_op.swish: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32) - swish_72 = paddle._C_ops.swish(batch_norm__570) - del batch_norm__570 - - # pd_op.conv2d: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32, 192x192x3x3xf32) - conv2d_100 = paddle._C_ops.conv2d( - swish_72, parameter_264, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_264 - - # pd_op.batch_norm_: (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) - ( - batch_norm__576, - batch_norm__577, - batch_norm__578, - batch_norm__579, - batch_norm__580, - batch_norm__581, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_100, - parameter_263, - parameter_262, - parameter_261, - parameter_260, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_100, parameter_260, parameter_261, parameter_262, parameter_263 - - # pd_op.conv2d: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32, 192x192x1x1xf32) - conv2d_101 = paddle._C_ops.conv2d( - swish_72, parameter_259, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_259, swish_72 - - # pd_op.batch_norm_: (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) - ( - batch_norm__582, - batch_norm__583, - batch_norm__584, - batch_norm__585, - batch_norm__586, - batch_norm__587, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_101, - parameter_258, - parameter_257, - parameter_256, - parameter_255, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_101, parameter_255, parameter_256, parameter_257, parameter_258 - - # pd_op.add: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32, -1x192x-1x-1xf32) - add_45 = paddle._C_ops.add(batch_norm__576, batch_norm__582) - del batch_norm__576, batch_norm__582 - - # pd_op.swish: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32) - swish_73 = paddle._C_ops.swish(add_45) - del add_45 - - # builtin.combine: ([-1x192x-1x-1xf32, -1x192x-1x-1xf32]) <- (-1x192x-1x-1xf32, -1x192x-1x-1xf32) - combine_7 = [swish_66, swish_73] - del swish_66, swish_73 - - # pd_op.concat: (-1x384x-1x-1xf32) <- ([-1x192x-1x-1xf32, -1x192x-1x-1xf32], 1xi32) - concat_9 = paddle._C_ops.concat(combine_7, full_0) - del combine_7 - - # pd_op.conv2d: (-1x384x-1x-1xf32) <- (-1x384x-1x-1xf32, 384x384x1x1xf32) - conv2d_102 = paddle._C_ops.conv2d( - concat_9, parameter_254, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del concat_9, parameter_254 - - # pd_op.batch_norm_: (-1x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (-1x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) - ( - batch_norm__588, - batch_norm__589, - batch_norm__590, - batch_norm__591, - batch_norm__592, - batch_norm__593, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_102, - parameter_253, - parameter_252, - parameter_251, - parameter_250, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_102, parameter_250, parameter_251, parameter_252, parameter_253 - - # pd_op.swish: (-1x384x-1x-1xf32) <- (-1x384x-1x-1xf32) - swish_74 = paddle._C_ops.swish(batch_norm__588) - del batch_norm__588 - - # pd_op.conv2d: (-1x192x-1x-1xf32) <- (-1x384x-1x-1xf32, 192x384x1x1xf32) - conv2d_103 = paddle._C_ops.conv2d( - swish_74, parameter_249, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_249 - - # pd_op.batch_norm_: (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) - ( - batch_norm__594, - batch_norm__595, - batch_norm__596, - batch_norm__597, - batch_norm__598, - batch_norm__599, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_103, - parameter_248, - parameter_247, - parameter_246, - parameter_245, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_103, parameter_245, parameter_246, parameter_247, parameter_248 - - # pd_op.swish: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32) - swish_75 = paddle._C_ops.swish(batch_norm__594) - del batch_norm__594 - - # pd_op.nearest_interp: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32, None, None, None) - nearest_interp_1 = paddle._C_ops.nearest_interp( - swish_75, - None, - None, - None, - "NCHW", - -1, - -1, - -1, - [float("2"), float("2")], - "nearest", - False, - 0, - ) - del swish_75 - - # builtin.combine: ([-1x192x-1x-1xf32, -1x256x-1x-1xf32]) <- (-1x192x-1x-1xf32, -1x256x-1x-1xf32) - combine_8 = [nearest_interp_1, swish_28] - del nearest_interp_1, swish_28 - - # pd_op.concat: (-1x448x-1x-1xf32) <- ([-1x192x-1x-1xf32, -1x256x-1x-1xf32], 1xi32) - concat_10 = paddle._C_ops.concat(combine_8, full_0) - del combine_8 - - # pd_op.conv2d: (-1x96x-1x-1xf32) <- (-1x448x-1x-1xf32, 96x448x1x1xf32) - conv2d_104 = paddle._C_ops.conv2d( - concat_10, parameter_244, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_244 - - # pd_op.batch_norm_: (-1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (-1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) - ( - batch_norm__600, - batch_norm__601, - batch_norm__602, - batch_norm__603, - batch_norm__604, - batch_norm__605, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_104, - parameter_243, - parameter_242, - parameter_241, - parameter_240, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_104, parameter_240, parameter_241, parameter_242, parameter_243 - - # pd_op.swish: (-1x96x-1x-1xf32) <- (-1x96x-1x-1xf32) - swish_76 = paddle._C_ops.swish(batch_norm__600) - del batch_norm__600 - - # pd_op.conv2d: (-1x96x-1x-1xf32) <- (-1x448x-1x-1xf32, 96x448x1x1xf32) - conv2d_105 = paddle._C_ops.conv2d( - concat_10, parameter_239, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del concat_10, parameter_239 - - # pd_op.batch_norm_: (-1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (-1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) - ( - batch_norm__606, - batch_norm__607, - batch_norm__608, - batch_norm__609, - batch_norm__610, - batch_norm__611, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_105, - parameter_238, - parameter_237, - parameter_236, - parameter_235, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_105, parameter_235, parameter_236, parameter_237, parameter_238 - - # pd_op.swish: (-1x96x-1x-1xf32) <- (-1x96x-1x-1xf32) - swish_77 = paddle._C_ops.swish(batch_norm__606) - del batch_norm__606 - - # pd_op.conv2d: (-1x96x-1x-1xf32) <- (-1x96x-1x-1xf32, 96x96x3x3xf32) - conv2d_106 = paddle._C_ops.conv2d( - swish_77, parameter_234, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_234, swish_77 - - # pd_op.batch_norm_: (-1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (-1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) - ( - batch_norm__612, - batch_norm__613, - batch_norm__614, - batch_norm__615, - batch_norm__616, - batch_norm__617, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_106, - parameter_233, - parameter_232, - parameter_231, - parameter_230, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_106, parameter_230, parameter_231, parameter_232, parameter_233 - - # pd_op.swish: (-1x96x-1x-1xf32) <- (-1x96x-1x-1xf32) - swish_78 = paddle._C_ops.swish(batch_norm__612) - del batch_norm__612 - - # pd_op.conv2d: (-1x96x-1x-1xf32) <- (-1x96x-1x-1xf32, 96x96x3x3xf32) - conv2d_107 = paddle._C_ops.conv2d( - swish_78, parameter_229, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_229 - - # pd_op.batch_norm_: (-1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (-1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) - ( - batch_norm__618, - batch_norm__619, - batch_norm__620, - batch_norm__621, - batch_norm__622, - batch_norm__623, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_107, - parameter_228, - parameter_227, - parameter_226, - parameter_225, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_107, parameter_225, parameter_226, parameter_227, parameter_228 - - # pd_op.conv2d: (-1x96x-1x-1xf32) <- (-1x96x-1x-1xf32, 96x96x1x1xf32) - conv2d_108 = paddle._C_ops.conv2d( - swish_78, parameter_224, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_224, swish_78 - - # pd_op.batch_norm_: (-1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (-1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) - ( - batch_norm__624, - batch_norm__625, - batch_norm__626, - batch_norm__627, - batch_norm__628, - batch_norm__629, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_108, - parameter_223, - parameter_222, - parameter_221, - parameter_220, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_108, parameter_220, parameter_221, parameter_222, parameter_223 - - # pd_op.add: (-1x96x-1x-1xf32) <- (-1x96x-1x-1xf32, -1x96x-1x-1xf32) - add_46 = paddle._C_ops.add(batch_norm__618, batch_norm__624) - del batch_norm__618, batch_norm__624 - - # pd_op.swish: (-1x96x-1x-1xf32) <- (-1x96x-1x-1xf32) - swish_79 = paddle._C_ops.swish(add_46) - del add_46 - - # pd_op.conv2d: (-1x96x-1x-1xf32) <- (-1x96x-1x-1xf32, 96x96x3x3xf32) - conv2d_109 = paddle._C_ops.conv2d( - swish_79, parameter_219, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_219, swish_79 - - # pd_op.batch_norm_: (-1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (-1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) - ( - batch_norm__630, - batch_norm__631, - batch_norm__632, - batch_norm__633, - batch_norm__634, - batch_norm__635, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_109, - parameter_218, - parameter_217, - parameter_216, - parameter_215, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_109, parameter_215, parameter_216, parameter_217, parameter_218 - - # pd_op.swish: (-1x96x-1x-1xf32) <- (-1x96x-1x-1xf32) - swish_80 = paddle._C_ops.swish(batch_norm__630) - del batch_norm__630 - - # pd_op.conv2d: (-1x96x-1x-1xf32) <- (-1x96x-1x-1xf32, 96x96x3x3xf32) - conv2d_110 = paddle._C_ops.conv2d( - swish_80, parameter_214, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_214 - - # pd_op.batch_norm_: (-1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (-1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) - ( - batch_norm__636, - batch_norm__637, - batch_norm__638, - batch_norm__639, - batch_norm__640, - batch_norm__641, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_110, - parameter_213, - parameter_212, - parameter_211, - parameter_210, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_110, parameter_210, parameter_211, parameter_212, parameter_213 - - # pd_op.conv2d: (-1x96x-1x-1xf32) <- (-1x96x-1x-1xf32, 96x96x1x1xf32) - conv2d_111 = paddle._C_ops.conv2d( - swish_80, parameter_209, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_209, swish_80 - - # pd_op.batch_norm_: (-1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (-1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) - ( - batch_norm__642, - batch_norm__643, - batch_norm__644, - batch_norm__645, - batch_norm__646, - batch_norm__647, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_111, - parameter_208, - parameter_207, - parameter_206, - parameter_205, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_111, parameter_205, parameter_206, parameter_207, parameter_208 - - # pd_op.add: (-1x96x-1x-1xf32) <- (-1x96x-1x-1xf32, -1x96x-1x-1xf32) - add_47 = paddle._C_ops.add(batch_norm__636, batch_norm__642) - del batch_norm__636, batch_norm__642 - - # pd_op.swish: (-1x96x-1x-1xf32) <- (-1x96x-1x-1xf32) - swish_81 = paddle._C_ops.swish(add_47) - del add_47 - - # pd_op.conv2d: (-1x96x-1x-1xf32) <- (-1x96x-1x-1xf32, 96x96x3x3xf32) - conv2d_112 = paddle._C_ops.conv2d( - swish_81, parameter_204, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_204, swish_81 - - # pd_op.batch_norm_: (-1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (-1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) - ( - batch_norm__648, - batch_norm__649, - batch_norm__650, - batch_norm__651, - batch_norm__652, - batch_norm__653, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_112, - parameter_203, - parameter_202, - parameter_201, - parameter_200, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_112, parameter_200, parameter_201, parameter_202, parameter_203 - - # pd_op.swish: (-1x96x-1x-1xf32) <- (-1x96x-1x-1xf32) - swish_82 = paddle._C_ops.swish(batch_norm__648) - del batch_norm__648 - - # pd_op.conv2d: (-1x96x-1x-1xf32) <- (-1x96x-1x-1xf32, 96x96x3x3xf32) - conv2d_113 = paddle._C_ops.conv2d( - swish_82, parameter_199, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_199 - - # pd_op.batch_norm_: (-1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (-1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) - ( - batch_norm__654, - batch_norm__655, - batch_norm__656, - batch_norm__657, - batch_norm__658, - batch_norm__659, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_113, - parameter_198, - parameter_197, - parameter_196, - parameter_195, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_113, parameter_195, parameter_196, parameter_197, parameter_198 - - # pd_op.conv2d: (-1x96x-1x-1xf32) <- (-1x96x-1x-1xf32, 96x96x1x1xf32) - conv2d_114 = paddle._C_ops.conv2d( - swish_82, parameter_194, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_194, swish_82 - - # pd_op.batch_norm_: (-1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (-1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) - ( - batch_norm__660, - batch_norm__661, - batch_norm__662, - batch_norm__663, - batch_norm__664, - batch_norm__665, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_114, - parameter_193, - parameter_192, - parameter_191, - parameter_190, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_114, parameter_190, parameter_191, parameter_192, parameter_193 - - # pd_op.add: (-1x96x-1x-1xf32) <- (-1x96x-1x-1xf32, -1x96x-1x-1xf32) - add_48 = paddle._C_ops.add(batch_norm__654, batch_norm__660) - del batch_norm__654, batch_norm__660 - - # pd_op.swish: (-1x96x-1x-1xf32) <- (-1x96x-1x-1xf32) - swish_83 = paddle._C_ops.swish(add_48) - del add_48 - - # builtin.combine: ([-1x96x-1x-1xf32, -1x96x-1x-1xf32]) <- (-1x96x-1x-1xf32, -1x96x-1x-1xf32) - combine_9 = [swish_76, swish_83] - del swish_76, swish_83 - - # pd_op.concat: (-1x192x-1x-1xf32) <- ([-1x96x-1x-1xf32, -1x96x-1x-1xf32], 1xi32) - concat_11 = paddle._C_ops.concat(combine_9, full_0) - del combine_9 - - # pd_op.conv2d: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32, 192x192x1x1xf32) - conv2d_115 = paddle._C_ops.conv2d( - concat_11, parameter_189, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del concat_11, parameter_189 - - # pd_op.batch_norm_: (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) - ( - batch_norm__666, - batch_norm__667, - batch_norm__668, - batch_norm__669, - batch_norm__670, - batch_norm__671, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_115, - parameter_188, - parameter_187, - parameter_186, - parameter_185, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_115, parameter_185, parameter_186, parameter_187, parameter_188 - - # pd_op.swish: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32) - swish_84 = paddle._C_ops.swish(batch_norm__666) - del batch_norm__666 - - # pd_op.conv2d: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32, 192x192x3x3xf32) - conv2d_116 = paddle._C_ops.conv2d( - swish_84, parameter_184, [2, 2], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_184 - - # pd_op.batch_norm_: (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) - ( - batch_norm__672, - batch_norm__673, - batch_norm__674, - batch_norm__675, - batch_norm__676, - batch_norm__677, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_116, - parameter_183, - parameter_182, - parameter_181, - parameter_180, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_116, parameter_180, parameter_181, parameter_182, parameter_183 - - # pd_op.swish: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32) - swish_85 = paddle._C_ops.swish(batch_norm__672) - del batch_norm__672 - - # builtin.combine: ([-1x192x-1x-1xf32, -1x384x-1x-1xf32]) <- (-1x192x-1x-1xf32, -1x384x-1x-1xf32) - combine_10 = [swish_85, swish_74] - del swish_74, swish_85 - - # pd_op.concat: (-1x576x-1x-1xf32) <- ([-1x192x-1x-1xf32, -1x384x-1x-1xf32], 1xi32) - concat_12 = paddle._C_ops.concat(combine_10, full_0) - del combine_10 - - # pd_op.conv2d: (-1x192x-1x-1xf32) <- (-1x576x-1x-1xf32, 192x576x1x1xf32) - conv2d_117 = paddle._C_ops.conv2d( - concat_12, parameter_179, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_179 - - # pd_op.batch_norm_: (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) - ( - batch_norm__678, - batch_norm__679, - batch_norm__680, - batch_norm__681, - batch_norm__682, - batch_norm__683, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_117, - parameter_178, - parameter_177, - parameter_176, - parameter_175, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_117, parameter_175, parameter_176, parameter_177, parameter_178 - - # pd_op.swish: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32) - swish_86 = paddle._C_ops.swish(batch_norm__678) - del batch_norm__678 - - # pd_op.conv2d: (-1x192x-1x-1xf32) <- (-1x576x-1x-1xf32, 192x576x1x1xf32) - conv2d_118 = paddle._C_ops.conv2d( - concat_12, parameter_174, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del concat_12, parameter_174 - - # pd_op.batch_norm_: (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) - ( - batch_norm__684, - batch_norm__685, - batch_norm__686, - batch_norm__687, - batch_norm__688, - batch_norm__689, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_118, - parameter_173, - parameter_172, - parameter_171, - parameter_170, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_118, parameter_170, parameter_171, parameter_172, parameter_173 - - # pd_op.swish: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32) - swish_87 = paddle._C_ops.swish(batch_norm__684) - del batch_norm__684 - - # pd_op.conv2d: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32, 192x192x3x3xf32) - conv2d_119 = paddle._C_ops.conv2d( - swish_87, parameter_169, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_169, swish_87 - - # pd_op.batch_norm_: (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) - ( - batch_norm__690, - batch_norm__691, - batch_norm__692, - batch_norm__693, - batch_norm__694, - batch_norm__695, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_119, - parameter_168, - parameter_167, - parameter_166, - parameter_165, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_119, parameter_165, parameter_166, parameter_167, parameter_168 - - # pd_op.swish: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32) - swish_88 = paddle._C_ops.swish(batch_norm__690) - del batch_norm__690 - - # pd_op.conv2d: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32, 192x192x3x3xf32) - conv2d_120 = paddle._C_ops.conv2d( - swish_88, parameter_164, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_164 - - # pd_op.batch_norm_: (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) - ( - batch_norm__696, - batch_norm__697, - batch_norm__698, - batch_norm__699, - batch_norm__700, - batch_norm__701, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_120, - parameter_163, - parameter_162, - parameter_161, - parameter_160, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_120, parameter_160, parameter_161, parameter_162, parameter_163 - - # pd_op.conv2d: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32, 192x192x1x1xf32) - conv2d_121 = paddle._C_ops.conv2d( - swish_88, parameter_159, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_159, swish_88 - - # pd_op.batch_norm_: (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) - ( - batch_norm__702, - batch_norm__703, - batch_norm__704, - batch_norm__705, - batch_norm__706, - batch_norm__707, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_121, - parameter_158, - parameter_157, - parameter_156, - parameter_155, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_121, parameter_155, parameter_156, parameter_157, parameter_158 - - # pd_op.add: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32, -1x192x-1x-1xf32) - add_49 = paddle._C_ops.add(batch_norm__696, batch_norm__702) - del batch_norm__696, batch_norm__702 - - # pd_op.swish: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32) - swish_89 = paddle._C_ops.swish(add_49) - del add_49 - - # pd_op.conv2d: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32, 192x192x3x3xf32) - conv2d_122 = paddle._C_ops.conv2d( - swish_89, parameter_154, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + # pd_op.full: (1xf64) <- () + full_0 = paddle._C_ops.full( + [1], float("0"), paddle.float64, paddle.core.CPUPlace() ) - del parameter_154, swish_89 - # pd_op.batch_norm_: (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) - ( - batch_norm__708, - batch_norm__709, - batch_norm__710, - batch_norm__711, - batch_norm__712, - batch_norm__713, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_122, - parameter_153, - parameter_152, - parameter_151, - parameter_150, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), + # pd_op.full: (1xf64) <- () + full_1 = paddle._C_ops.full( + [1], float("14"), paddle.float64, paddle.core.CPUPlace() ) - del conv2d_122, parameter_150, parameter_151, parameter_152, parameter_153 - - # pd_op.swish: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32) - swish_90 = paddle._C_ops.swish(batch_norm__708) - del batch_norm__708 - # pd_op.conv2d: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32, 192x192x3x3xf32) - conv2d_123 = paddle._C_ops.conv2d( - swish_90, parameter_149, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + # pd_op.full: (1xf64) <- () + full_2 = paddle._C_ops.full( + [1], float("1"), paddle.float64, paddle.core.CPUPlace() ) - del parameter_149 - # pd_op.batch_norm_: (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) - ( - batch_norm__714, - batch_norm__715, - batch_norm__716, - batch_norm__717, - batch_norm__718, - batch_norm__719, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_123, - parameter_148, - parameter_147, - parameter_146, - parameter_145, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_123, parameter_145, parameter_146, parameter_147, parameter_148 + # pd_op.arange: (14xi64) <- (1xf64, 1xf64, 1xf64) + arange_0 = paddle.arange(full_0, full_1, full_2, dtype="int64") + del full_1 - # pd_op.conv2d: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32, 192x192x1x1xf32) - conv2d_124 = paddle._C_ops.conv2d( - swish_90, parameter_144, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_144, swish_90 + # pd_op.cast: (14xf32) <- (14xi64) + cast_0 = paddle._C_ops.cast(arange_0, paddle.float32) + del arange_0 - # pd_op.batch_norm_: (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) - ( - batch_norm__720, - batch_norm__721, - batch_norm__722, - batch_norm__723, - batch_norm__724, - batch_norm__725, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_124, - parameter_143, - parameter_142, - parameter_141, - parameter_140, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), + # pd_op.full: (1xf32) <- () + full_3 = paddle._C_ops.full( + [1], float("1"), paddle.float32, paddle.core.CPUPlace() ) - del conv2d_124, parameter_140, parameter_141, parameter_142, parameter_143 - # pd_op.add: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32, -1x192x-1x-1xf32) - add_50 = paddle._C_ops.add(batch_norm__714, batch_norm__720) - del batch_norm__714, batch_norm__720 + # pd_op.scale: (14xf32) <- (14xf32, 1xf32) + scale_0 = paddle._C_ops.scale(cast_0, full_3, float("0.5"), True) + del cast_0 - # pd_op.swish: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32) - swish_91 = paddle._C_ops.swish(add_50) - del add_50 - - # pd_op.conv2d: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32, 192x192x3x3xf32) - conv2d_125 = paddle._C_ops.conv2d( - swish_91, parameter_139, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_139, swish_91 - - # pd_op.batch_norm_: (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) - ( - batch_norm__726, - batch_norm__727, - batch_norm__728, - batch_norm__729, - batch_norm__730, - batch_norm__731, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_125, - parameter_138, - parameter_137, - parameter_136, - parameter_135, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), + # pd_op.full: (1xf32) <- () + full_4 = paddle._C_ops.full( + [1], float("32"), paddle.float32, paddle.core.CPUPlace() ) - del conv2d_125, parameter_135, parameter_136, parameter_137, parameter_138 - - # pd_op.swish: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32) - swish_92 = paddle._C_ops.swish(batch_norm__726) - del batch_norm__726 - # pd_op.conv2d: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32, 192x192x3x3xf32) - conv2d_126 = paddle._C_ops.conv2d( - swish_92, parameter_134, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_134 + # pd_op.scale: (14xf32) <- (14xf32, 1xf32) + scale_1 = paddle._C_ops.scale(scale_0, full_4, float("0"), True) + del full_4, scale_0 - # pd_op.batch_norm_: (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) - ( - batch_norm__732, - batch_norm__733, - batch_norm__734, - batch_norm__735, - batch_norm__736, - batch_norm__737, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_126, - parameter_133, - parameter_132, - parameter_131, - parameter_130, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_126, parameter_130, parameter_131, parameter_132, parameter_133 + # builtin.combine: ([14xf32, 14xf32]) <- (14xf32, 14xf32) + combine_0 = [scale_1, scale_1] + del scale_1 - # pd_op.conv2d: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32, 192x192x1x1xf32) - conv2d_127 = paddle._C_ops.conv2d( - swish_92, parameter_129, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_129, swish_92 + # pd_op.meshgrid: ([14x14xf32, 14x14xf32]) <- ([14xf32, 14xf32]) + meshgrid_0 = paddle._C_ops.meshgrid(combine_0) + del combine_0 - # pd_op.batch_norm_: (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + # builtin.split: (14x14xf32, 14x14xf32) <- ([14x14xf32, 14x14xf32]) ( - batch_norm__738, - batch_norm__739, - batch_norm__740, - batch_norm__741, - batch_norm__742, - batch_norm__743, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_127, - parameter_128, - parameter_127, - parameter_126, - parameter_125, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_127, parameter_125, parameter_126, parameter_127, parameter_128 + split_0, + split_1, + ) = meshgrid_0 + del meshgrid_0 - # pd_op.add: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32, -1x192x-1x-1xf32) - add_51 = paddle._C_ops.add(batch_norm__732, batch_norm__738) - del batch_norm__732, batch_norm__738 + # pd_op.scale: (14x14xf32) <- (14x14xf32, 1xf32) + scale_2 = paddle._C_ops.scale(split_1, full_3, float("-80"), True) - # pd_op.swish: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32) - swish_93 = paddle._C_ops.swish(add_51) - del add_51 + # pd_op.scale: (14x14xf32) <- (14x14xf32, 1xf32) + scale_3 = paddle._C_ops.scale(split_0, full_3, float("-80"), True) - # builtin.combine: ([-1x192x-1x-1xf32, -1x192x-1x-1xf32]) <- (-1x192x-1x-1xf32, -1x192x-1x-1xf32) - combine_11 = [swish_86, swish_93] - del swish_86, swish_93 + # pd_op.scale: (14x14xf32) <- (14x14xf32, 1xf32) + scale_4 = paddle._C_ops.scale(split_1, full_3, float("80"), True) - # pd_op.concat: (-1x384x-1x-1xf32) <- ([-1x192x-1x-1xf32, -1x192x-1x-1xf32], 1xi32) - concat_13 = paddle._C_ops.concat(combine_11, full_0) - del combine_11 - - # pd_op.conv2d: (-1x384x-1x-1xf32) <- (-1x384x-1x-1xf32, 384x384x1x1xf32) - conv2d_128 = paddle._C_ops.conv2d( - concat_13, parameter_124, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del concat_13, parameter_124 + # pd_op.scale: (14x14xf32) <- (14x14xf32, 1xf32) + scale_5 = paddle._C_ops.scale(split_0, full_3, float("80"), True) - # pd_op.batch_norm_: (-1x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (-1x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) - ( - batch_norm__744, - batch_norm__745, - batch_norm__746, - batch_norm__747, - batch_norm__748, - batch_norm__749, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_128, - parameter_123, - parameter_122, - parameter_121, - parameter_120, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_128, parameter_120, parameter_121, parameter_122, parameter_123 + # builtin.combine: ([14x14xf32, 14x14xf32, 14x14xf32, 14x14xf32]) <- (14x14xf32, 14x14xf32, 14x14xf32, 14x14xf32) + combine_1 = [scale_2, scale_3, scale_4, scale_5] + del scale_2, scale_3, scale_4, scale_5 - # pd_op.swish: (-1x384x-1x-1xf32) <- (-1x384x-1x-1xf32) - swish_94 = paddle._C_ops.swish(batch_norm__744) - del batch_norm__744 + # pd_op.stack: (14x14x4xf32) <- ([14x14xf32, 14x14xf32, 14x14xf32, 14x14xf32]) + stack_0 = paddle._C_ops.stack(combine_1, -1) + del combine_1 - # pd_op.conv2d: (-1x384x-1x-1xf32) <- (-1x384x-1x-1xf32, 384x384x3x3xf32) - conv2d_129 = paddle._C_ops.conv2d( - swish_94, parameter_119, [2, 2], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_119 + # builtin.combine: ([14x14xf32, 14x14xf32]) <- (14x14xf32, 14x14xf32) + combine_2 = [split_1, split_0] + del split_0, split_1 - # pd_op.batch_norm_: (-1x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (-1x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) - ( - batch_norm__750, - batch_norm__751, - batch_norm__752, - batch_norm__753, - batch_norm__754, - batch_norm__755, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_129, - parameter_118, - parameter_117, - parameter_116, - parameter_115, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_129, parameter_115, parameter_116, parameter_117, parameter_118 + # pd_op.stack: (14x14x2xf32) <- ([14x14xf32, 14x14xf32]) + stack_1 = paddle._C_ops.stack(combine_2, -1) + del combine_2 - # pd_op.swish: (-1x384x-1x-1xf32) <- (-1x384x-1x-1xf32) - swish_95 = paddle._C_ops.swish(batch_norm__750) - del batch_norm__750 + # pd_op.full_int_array: (2xi64) <- () + full_int_array_0 = [-1, 4] - # builtin.combine: ([-1x384x-1x-1xf32, -1x768x-1x-1xf32]) <- (-1x384x-1x-1xf32, -1x768x-1x-1xf32) - combine_12 = [swish_95, swish_64] - del swish_64, swish_95 + # pd_op.reshape: (196x4xf32) <- (14x14x4xf32, 2xi64) + reshape_0 = paddle._C_ops.reshape(stack_0, full_int_array_0) + del stack_0 - # pd_op.concat: (-1x1152x-1x-1xf32) <- ([-1x384x-1x-1xf32, -1x768x-1x-1xf32], 1xi32) - concat_14 = paddle._C_ops.concat(combine_12, full_0) - del combine_12 + # pd_op.full_int_array: (2xi64) <- () + full_int_array_1 = [-1, 2] - # pd_op.conv2d: (-1x384x-1x-1xf32) <- (-1x1152x-1x-1xf32, 384x1152x1x1xf32) - conv2d_130 = paddle._C_ops.conv2d( - concat_14, parameter_114, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_114 + # pd_op.reshape: (196x2xf32) <- (14x14x2xf32, 2xi64) + reshape_1 = paddle._C_ops.reshape(stack_1, full_int_array_1) + del stack_1 - # pd_op.batch_norm_: (-1x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (-1x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) - ( - batch_norm__756, - batch_norm__757, - batch_norm__758, - batch_norm__759, - batch_norm__760, - batch_norm__761, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_130, - parameter_113, - parameter_112, - parameter_111, - parameter_110, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), + # pd_op.full: (196x1xf32) <- () + full_5 = paddle._C_ops.full( + [196, 1], + float("32"), + paddle.float32, + paddle.framework._current_expected_place(), ) - del conv2d_130, parameter_110, parameter_111, parameter_112, parameter_113 - # pd_op.swish: (-1x384x-1x-1xf32) <- (-1x384x-1x-1xf32) - swish_96 = paddle._C_ops.swish(batch_norm__756) - del batch_norm__756 - - # pd_op.conv2d: (-1x384x-1x-1xf32) <- (-1x1152x-1x-1xf32, 384x1152x1x1xf32) - conv2d_131 = paddle._C_ops.conv2d( - concat_14, parameter_109, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + # pd_op.full: (1xf64) <- () + full_6 = paddle._C_ops.full( + [1], float("28"), paddle.float64, paddle.core.CPUPlace() ) - del concat_14, parameter_109 - # pd_op.batch_norm_: (-1x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (-1x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) - ( - batch_norm__762, - batch_norm__763, - batch_norm__764, - batch_norm__765, - batch_norm__766, - batch_norm__767, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_131, - parameter_108, - parameter_107, - parameter_106, - parameter_105, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_131, parameter_105, parameter_106, parameter_107, parameter_108 + # pd_op.arange: (28xi64) <- (1xf64, 1xf64, 1xf64) + arange_1 = paddle.arange(full_0, full_6, full_2, dtype="int64") + del full_6 - # pd_op.swish: (-1x384x-1x-1xf32) <- (-1x384x-1x-1xf32) - swish_97 = paddle._C_ops.swish(batch_norm__762) - del batch_norm__762 + # pd_op.cast: (28xf32) <- (28xi64) + cast_1 = paddle._C_ops.cast(arange_1, paddle.float32) + del arange_1 - # pd_op.conv2d: (-1x384x-1x-1xf32) <- (-1x384x-1x-1xf32, 384x384x3x3xf32) - conv2d_132 = paddle._C_ops.conv2d( - swish_97, parameter_104, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_104, swish_97 + # pd_op.scale: (28xf32) <- (28xf32, 1xf32) + scale_6 = paddle._C_ops.scale(cast_1, full_3, float("0.5"), True) + del cast_1 - # pd_op.batch_norm_: (-1x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (-1x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) - ( - batch_norm__768, - batch_norm__769, - batch_norm__770, - batch_norm__771, - batch_norm__772, - batch_norm__773, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_132, - parameter_103, - parameter_102, - parameter_101, - parameter_100, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), + # pd_op.full: (1xf32) <- () + full_7 = paddle._C_ops.full( + [1], float("16"), paddle.float32, paddle.core.CPUPlace() ) - del conv2d_132, parameter_100, parameter_101, parameter_102, parameter_103 - - # pd_op.swish: (-1x384x-1x-1xf32) <- (-1x384x-1x-1xf32) - swish_98 = paddle._C_ops.swish(batch_norm__768) - del batch_norm__768 - # pd_op.conv2d: (-1x384x-1x-1xf32) <- (-1x384x-1x-1xf32, 384x384x3x3xf32) - conv2d_133 = paddle._C_ops.conv2d( - swish_98, parameter_99, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_99 + # pd_op.scale: (28xf32) <- (28xf32, 1xf32) + scale_7 = paddle._C_ops.scale(scale_6, full_7, float("0"), True) + del full_7, scale_6 - # pd_op.batch_norm_: (-1x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (-1x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) - ( - batch_norm__774, - batch_norm__775, - batch_norm__776, - batch_norm__777, - batch_norm__778, - batch_norm__779, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_133, - parameter_98, - parameter_97, - parameter_96, - parameter_95, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_133, parameter_95, parameter_96, parameter_97, parameter_98 + # builtin.combine: ([28xf32, 28xf32]) <- (28xf32, 28xf32) + combine_3 = [scale_7, scale_7] + del scale_7 - # pd_op.conv2d: (-1x384x-1x-1xf32) <- (-1x384x-1x-1xf32, 384x384x1x1xf32) - conv2d_134 = paddle._C_ops.conv2d( - swish_98, parameter_94, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_94, swish_98 + # pd_op.meshgrid: ([28x28xf32, 28x28xf32]) <- ([28xf32, 28xf32]) + meshgrid_1 = paddle._C_ops.meshgrid(combine_3) + del combine_3 - # pd_op.batch_norm_: (-1x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (-1x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) + # builtin.split: (28x28xf32, 28x28xf32) <- ([28x28xf32, 28x28xf32]) ( - batch_norm__780, - batch_norm__781, - batch_norm__782, - batch_norm__783, - batch_norm__784, - batch_norm__785, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_134, - parameter_93, - parameter_92, - parameter_91, - parameter_90, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_134, parameter_90, parameter_91, parameter_92, parameter_93 - - # pd_op.add: (-1x384x-1x-1xf32) <- (-1x384x-1x-1xf32, -1x384x-1x-1xf32) - add_52 = paddle._C_ops.add(batch_norm__774, batch_norm__780) - del batch_norm__774, batch_norm__780 - - # pd_op.swish: (-1x384x-1x-1xf32) <- (-1x384x-1x-1xf32) - swish_99 = paddle._C_ops.swish(add_52) - del add_52 + split_2, + split_3, + ) = meshgrid_1 + del meshgrid_1 - # pd_op.conv2d: (-1x384x-1x-1xf32) <- (-1x384x-1x-1xf32, 384x384x3x3xf32) - conv2d_135 = paddle._C_ops.conv2d( - swish_99, parameter_89, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_89, swish_99 + # pd_op.scale: (28x28xf32) <- (28x28xf32, 1xf32) + scale_8 = paddle._C_ops.scale(split_3, full_3, float("-40"), True) - # pd_op.batch_norm_: (-1x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (-1x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) - ( - batch_norm__786, - batch_norm__787, - batch_norm__788, - batch_norm__789, - batch_norm__790, - batch_norm__791, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_135, - parameter_88, - parameter_87, - parameter_86, - parameter_85, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_135, parameter_85, parameter_86, parameter_87, parameter_88 + # pd_op.scale: (28x28xf32) <- (28x28xf32, 1xf32) + scale_9 = paddle._C_ops.scale(split_2, full_3, float("-40"), True) - # pd_op.swish: (-1x384x-1x-1xf32) <- (-1x384x-1x-1xf32) - swish_100 = paddle._C_ops.swish(batch_norm__786) - del batch_norm__786 + # pd_op.scale: (28x28xf32) <- (28x28xf32, 1xf32) + scale_10 = paddle._C_ops.scale(split_3, full_3, float("40"), True) - # pd_op.conv2d: (-1x384x-1x-1xf32) <- (-1x384x-1x-1xf32, 384x384x3x3xf32) - conv2d_136 = paddle._C_ops.conv2d( - swish_100, parameter_84, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_84 + # pd_op.scale: (28x28xf32) <- (28x28xf32, 1xf32) + scale_11 = paddle._C_ops.scale(split_2, full_3, float("40"), True) - # pd_op.batch_norm_: (-1x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (-1x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) - ( - batch_norm__792, - batch_norm__793, - batch_norm__794, - batch_norm__795, - batch_norm__796, - batch_norm__797, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_136, - parameter_83, - parameter_82, - parameter_81, - parameter_80, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_136, parameter_80, parameter_81, parameter_82, parameter_83 + # builtin.combine: ([28x28xf32, 28x28xf32, 28x28xf32, 28x28xf32]) <- (28x28xf32, 28x28xf32, 28x28xf32, 28x28xf32) + combine_4 = [scale_8, scale_9, scale_10, scale_11] + del scale_10, scale_11, scale_8, scale_9 - # pd_op.conv2d: (-1x384x-1x-1xf32) <- (-1x384x-1x-1xf32, 384x384x1x1xf32) - conv2d_137 = paddle._C_ops.conv2d( - swish_100, parameter_79, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_79, swish_100 + # pd_op.stack: (28x28x4xf32) <- ([28x28xf32, 28x28xf32, 28x28xf32, 28x28xf32]) + stack_2 = paddle._C_ops.stack(combine_4, -1) + del combine_4 - # pd_op.batch_norm_: (-1x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (-1x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) - ( - batch_norm__798, - batch_norm__799, - batch_norm__800, - batch_norm__801, - batch_norm__802, - batch_norm__803, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_137, - parameter_78, - parameter_77, - parameter_76, - parameter_75, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_137, parameter_75, parameter_76, parameter_77, parameter_78 + # builtin.combine: ([28x28xf32, 28x28xf32]) <- (28x28xf32, 28x28xf32) + combine_5 = [split_3, split_2] + del split_2, split_3 - # pd_op.add: (-1x384x-1x-1xf32) <- (-1x384x-1x-1xf32, -1x384x-1x-1xf32) - add_53 = paddle._C_ops.add(batch_norm__792, batch_norm__798) - del batch_norm__792, batch_norm__798 + # pd_op.stack: (28x28x2xf32) <- ([28x28xf32, 28x28xf32]) + stack_3 = paddle._C_ops.stack(combine_5, -1) + del combine_5 + + # pd_op.reshape: (784x4xf32) <- (28x28x4xf32, 2xi64) + reshape_2 = paddle._C_ops.reshape(stack_2, full_int_array_0) + del stack_2 - # pd_op.swish: (-1x384x-1x-1xf32) <- (-1x384x-1x-1xf32) - swish_101 = paddle._C_ops.swish(add_53) - del add_53 + # pd_op.reshape: (784x2xf32) <- (28x28x2xf32, 2xi64) + reshape_3 = paddle._C_ops.reshape(stack_3, full_int_array_1) + del stack_3 - # pd_op.conv2d: (-1x384x-1x-1xf32) <- (-1x384x-1x-1xf32, 384x384x3x3xf32) - conv2d_138 = paddle._C_ops.conv2d( - swish_101, parameter_74, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + # pd_op.full: (784x1xf32) <- () + full_8 = paddle._C_ops.full( + [784, 1], + float("16"), + paddle.float32, + paddle.framework._current_expected_place(), ) - del parameter_74, swish_101 - # pd_op.batch_norm_: (-1x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (-1x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) - ( - batch_norm__804, - batch_norm__805, - batch_norm__806, - batch_norm__807, - batch_norm__808, - batch_norm__809, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_138, - parameter_73, - parameter_72, - parameter_71, - parameter_70, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), + # pd_op.full: (1xf64) <- () + full_9 = paddle._C_ops.full( + [1], float("56"), paddle.float64, paddle.core.CPUPlace() ) - del conv2d_138, parameter_70, parameter_71, parameter_72, parameter_73 - # pd_op.swish: (-1x384x-1x-1xf32) <- (-1x384x-1x-1xf32) - swish_102 = paddle._C_ops.swish(batch_norm__804) - del batch_norm__804 + # pd_op.arange: (56xi64) <- (1xf64, 1xf64, 1xf64) + arange_2 = paddle.arange(full_0, full_9, full_2, dtype="int64") + del full_0, full_2, full_9 - # pd_op.conv2d: (-1x384x-1x-1xf32) <- (-1x384x-1x-1xf32, 384x384x3x3xf32) - conv2d_139 = paddle._C_ops.conv2d( - swish_102, parameter_69, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_69 + # pd_op.cast: (56xf32) <- (56xi64) + cast_2 = paddle._C_ops.cast(arange_2, paddle.float32) + del arange_2 - # pd_op.batch_norm_: (-1x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (-1x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) - ( - batch_norm__810, - batch_norm__811, - batch_norm__812, - batch_norm__813, - batch_norm__814, - batch_norm__815, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_139, - parameter_68, - parameter_67, - parameter_66, - parameter_65, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_139, parameter_65, parameter_66, parameter_67, parameter_68 + # pd_op.scale: (56xf32) <- (56xf32, 1xf32) + scale_12 = paddle._C_ops.scale(cast_2, full_3, float("0.5"), True) + del cast_2 - # pd_op.conv2d: (-1x384x-1x-1xf32) <- (-1x384x-1x-1xf32, 384x384x1x1xf32) - conv2d_140 = paddle._C_ops.conv2d( - swish_102, parameter_64, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + # pd_op.full: (1xf32) <- () + full_10 = paddle._C_ops.full( + [1], float("8"), paddle.float32, paddle.core.CPUPlace() ) - del parameter_64, swish_102 - # pd_op.batch_norm_: (-1x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (-1x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) - ( - batch_norm__816, - batch_norm__817, - batch_norm__818, - batch_norm__819, - batch_norm__820, - batch_norm__821, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_140, - parameter_63, - parameter_62, - parameter_61, - parameter_60, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_140, parameter_60, parameter_61, parameter_62, parameter_63 + # pd_op.scale: (56xf32) <- (56xf32, 1xf32) + scale_13 = paddle._C_ops.scale(scale_12, full_10, float("0"), True) + del full_10, scale_12 - # pd_op.add: (-1x384x-1x-1xf32) <- (-1x384x-1x-1xf32, -1x384x-1x-1xf32) - add_54 = paddle._C_ops.add(batch_norm__810, batch_norm__816) - del batch_norm__810, batch_norm__816 + # builtin.combine: ([56xf32, 56xf32]) <- (56xf32, 56xf32) + combine_6 = [scale_13, scale_13] + del scale_13 - # pd_op.swish: (-1x384x-1x-1xf32) <- (-1x384x-1x-1xf32) - swish_103 = paddle._C_ops.swish(add_54) - del add_54 + # pd_op.meshgrid: ([56x56xf32, 56x56xf32]) <- ([56xf32, 56xf32]) + meshgrid_2 = paddle._C_ops.meshgrid(combine_6) + del combine_6 - # builtin.combine: ([-1x384x-1x-1xf32, -1x384x-1x-1xf32]) <- (-1x384x-1x-1xf32, -1x384x-1x-1xf32) - combine_13 = [swish_96, swish_103] - del swish_103, swish_96 + # builtin.split: (56x56xf32, 56x56xf32) <- ([56x56xf32, 56x56xf32]) + ( + split_4, + split_5, + ) = meshgrid_2 + del meshgrid_2 - # pd_op.concat: (-1x768x-1x-1xf32) <- ([-1x384x-1x-1xf32, -1x384x-1x-1xf32], 1xi32) - concat_15 = paddle._C_ops.concat(combine_13, full_0) - del combine_13 + # pd_op.scale: (56x56xf32) <- (56x56xf32, 1xf32) + scale_14 = paddle._C_ops.scale(split_5, full_3, float("-20"), True) - # pd_op.conv2d: (-1x768x-1x-1xf32) <- (-1x768x-1x-1xf32, 768x768x1x1xf32) - conv2d_141 = paddle._C_ops.conv2d( - concat_15, parameter_59, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del concat_15, parameter_59 + # pd_op.scale: (56x56xf32) <- (56x56xf32, 1xf32) + scale_15 = paddle._C_ops.scale(split_4, full_3, float("-20"), True) - # pd_op.batch_norm_: (-1x768x-1x-1xf32, 768xf32, 768xf32, 768xf32, 768xf32, -1xui8) <- (-1x768x-1x-1xf32, 768xf32, 768xf32, 768xf32, 768xf32) - ( - batch_norm__822, - batch_norm__823, - batch_norm__824, - batch_norm__825, - batch_norm__826, - batch_norm__827, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_141, - parameter_58, - parameter_57, - parameter_56, - parameter_55, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_141, parameter_55, parameter_56, parameter_57, parameter_58 + # pd_op.scale: (56x56xf32) <- (56x56xf32, 1xf32) + scale_16 = paddle._C_ops.scale(split_5, full_3, float("20"), True) - # pd_op.swish: (-1x768x-1x-1xf32) <- (-1x768x-1x-1xf32) - swish_104 = paddle._C_ops.swish(batch_norm__822) - del batch_norm__822 + # pd_op.scale: (56x56xf32) <- (56x56xf32, 1xf32) + scale_17 = paddle._C_ops.scale(split_4, full_3, float("20"), True) + del full_3 - # pd_op.shape64: (4xi64) <- (-1x768x-1x-1xf32) - shape64_0 = paddle._C_ops.shape64(swish_104) + # builtin.combine: ([56x56xf32, 56x56xf32, 56x56xf32, 56x56xf32]) <- (56x56xf32, 56x56xf32, 56x56xf32, 56x56xf32) + combine_7 = [scale_14, scale_15, scale_16, scale_17] + del scale_14, scale_15, scale_16, scale_17 - # pd_op.full_int_array: (1xi64) <- () - full_int_array_5 = [0] + # pd_op.stack: (56x56x4xf32) <- ([56x56xf32, 56x56xf32, 56x56xf32, 56x56xf32]) + stack_4 = paddle._C_ops.stack(combine_7, -1) + del combine_7 - # pd_op.full_int_array: (1xi64) <- () - full_int_array_6 = [1] + # builtin.combine: ([56x56xf32, 56x56xf32]) <- (56x56xf32, 56x56xf32) + combine_8 = [split_5, split_4] + del split_4, split_5 - # pd_op.slice: (xi64) <- (4xi64, 1xi64, 1xi64) - slice_0 = paddle._C_ops.slice( - shape64_0, [0], full_int_array_5, full_int_array_6, [1], [0] - ) - del shape64_0 + # pd_op.stack: (56x56x2xf32) <- ([56x56xf32, 56x56xf32]) + stack_5 = paddle._C_ops.stack(combine_8, -1) + del combine_8 - # pd_op.shape64: (4xi64) <- (-1x768x-1x-1xf32) - shape64_1 = paddle._C_ops.shape64(swish_104) + # pd_op.reshape: (3136x4xf32) <- (56x56x4xf32, 2xi64) + reshape_4 = paddle._C_ops.reshape(stack_4, full_int_array_0) + del full_int_array_0, stack_4 - # pd_op.full_int_array: (1xi64) <- () - full_int_array_7 = [2] + # pd_op.reshape: (3136x2xf32) <- (56x56x2xf32, 2xi64) + reshape_5 = paddle._C_ops.reshape(stack_5, full_int_array_1) + del full_int_array_1, stack_5 - # pd_op.full_int_array: (1xi64) <- () - full_int_array_8 = [3] + # pd_op.full: (3136x1xf32) <- () + full_11 = paddle._C_ops.full( + [3136, 1], + float("8"), + paddle.float32, + paddle.framework._current_expected_place(), + ) - # pd_op.slice: (xi64) <- (4xi64, 1xi64, 1xi64) - slice_1 = paddle._C_ops.slice( - shape64_1, [0], full_int_array_7, full_int_array_8, [1], [0] + # pd_op.full: (1xi32) <- () + full_12 = paddle._C_ops.full( + [1], float("0"), paddle.int32, paddle.core.CPUPlace() ) - del shape64_1 - # pd_op.shape64: (4xi64) <- (-1x768x-1x-1xf32) - shape64_2 = paddle._C_ops.shape64(swish_104) + # builtin.combine: ([196x4xf32, 784x4xf32, 3136x4xf32]) <- (196x4xf32, 784x4xf32, 3136x4xf32) + combine_9 = [reshape_0, reshape_2, reshape_4] + + # pd_op.concat: (4116x4xf32) <- ([196x4xf32, 784x4xf32, 3136x4xf32], 1xi32) + concat_2 = paddle._C_ops.concat(combine_9, full_12) + del combine_9 - # pd_op.full_int_array: (1xi64) <- () - full_int_array_9 = [4] + # builtin.combine: ([196x2xf32, 784x2xf32, 3136x2xf32]) <- (196x2xf32, 784x2xf32, 3136x2xf32) + combine_10 = [reshape_1, reshape_3, reshape_5] + del reshape_1, reshape_3, reshape_5 - # pd_op.slice: (xi64) <- (4xi64, 1xi64, 1xi64) - slice_2 = paddle._C_ops.slice( - shape64_2, [0], full_int_array_8, full_int_array_9, [1], [0] - ) - del shape64_2 + # pd_op.concat: (4116x2xf32) <- ([196x2xf32, 784x2xf32, 3136x2xf32], 1xi32) + concat_3 = paddle._C_ops.concat(combine_10, full_12) + del combine_10 + + # builtin.combine: ([196x1xf32, 784x1xf32, 3136x1xf32]) <- (196x1xf32, 784x1xf32, 3136x1xf32) + combine_11 = [full_5, full_8, full_11] + del full_11, full_5, full_8 - # pd_op.multiply: (xi64) <- (xi64, xi64) - multiply_22 = paddle._C_ops.multiply(slice_1, slice_2) - del slice_1, slice_2 + # pd_op.concat: (4116x1xf32) <- ([196x1xf32, 784x1xf32, 3136x1xf32], 1xi32) + concat_4 = paddle._C_ops.concat(combine_11, full_12) + del combine_11, full_12 # pd_op.full_int_array: (2xi64) <- () - full_int_array_10 = [1, 1] + full_int_array_2 = [1, 1] - # pd_op.pool2d: (-1x768x1x1xf32) <- (-1x768x-1x-1xf32, 2xi64) - pool2d_3 = paddle._C_ops.pool2d( - swish_104, - full_int_array_10, + # pd_op.assign: (2xi64) <- (2xi64) + assign_0 = full_int_array_2 + + # pd_op.assign: (2xi64) <- (2xi64) + assign_1 = full_int_array_2 + + # pd_op.pool2d: (8x768x1x1xf32) <- (8x768x14x14xf32, 2xi64) + pool2d_0 = paddle._C_ops.pool2d( + data_0, + full_int_array_2, [1, 1], [0, 0], False, @@ -6555,253 +394,175 @@ def forward( "EXPLICIT", ) - # pd_op.conv2d: (-1x768x1x1xf32) <- (-1x768x1x1xf32, 768x768x1x1xf32) - conv2d_142 = paddle._C_ops.conv2d( - pool2d_3, parameter_54, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + # pd_op.conv2d: (8x768x1x1xf32) <- (8x768x1x1xf32, 768x768x1x1xf32) + conv2d_0 = paddle._C_ops.conv2d( + pool2d_0, parameter_53, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" ) - del parameter_54 + del parameter_53 + + # pd_op.full_int_array: (4xi64) <- () + full_int_array_3 = [1, -1, 1, 1] # pd_op.reshape: (1x768x1x1xf32) <- (768xf32, 4xi64) - reshape_4 = paddle._C_ops.reshape(parameter_53, full_int_array_1) - del parameter_53 + reshape_6 = paddle._C_ops.reshape(parameter_52, full_int_array_3) + del parameter_52 - # pd_op.add: (-1x768x1x1xf32) <- (-1x768x1x1xf32, 1x768x1x1xf32) - add_55 = paddle._C_ops.add(conv2d_142, reshape_4) - del conv2d_142, reshape_4 + # pd_op.add: (8x768x1x1xf32) <- (8x768x1x1xf32, 1x768x1x1xf32) + add_0 = paddle._C_ops.add(conv2d_0, reshape_6) - # pd_op.sigmoid: (-1x768x1x1xf32) <- (-1x768x1x1xf32) - sigmoid_0 = paddle._C_ops.sigmoid(add_55) - del add_55 + # pd_op.sigmoid: (8x768x1x1xf32) <- (8x768x1x1xf32) + sigmoid_0 = paddle._C_ops.sigmoid(add_0) + del add_0 - # pd_op.multiply: (-1x768x-1x-1xf32) <- (-1x768x-1x-1xf32, -1x768x1x1xf32) - multiply_23 = paddle._C_ops.multiply(swish_104, sigmoid_0) - del sigmoid_0 + # pd_op.multiply: (8x768x14x14xf32) <- (8x768x14x14xf32, 8x768x1x1xf32) + multiply_0 = paddle._C_ops.multiply(data_0, sigmoid_0) - # pd_op.conv2d: (-1x768x-1x-1xf32) <- (-1x768x-1x-1xf32, 768x768x1x1xf32) - conv2d_143 = paddle._C_ops.conv2d( - multiply_23, parameter_52, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + # pd_op.conv2d: (8x768x14x14xf32) <- (8x768x14x14xf32, 768x768x1x1xf32) + conv2d_1 = paddle._C_ops.conv2d( + multiply_0, parameter_51, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" ) - del multiply_23, parameter_52 + del parameter_51 - # pd_op.batch_norm_: (-1x768x-1x-1xf32, 768xf32, 768xf32, 768xf32, 768xf32, -1xui8) <- (-1x768x-1x-1xf32, 768xf32, 768xf32, 768xf32, 768xf32) + # pd_op.batch_norm_: (8x768x14x14xf32, 768xf32, 768xf32, 768xf32, 768xf32, -1xui8) <- (8x768x14x14xf32, 768xf32, 768xf32, 768xf32, 768xf32) ( - batch_norm__828, - batch_norm__829, - batch_norm__830, - batch_norm__831, - batch_norm__832, - batch_norm__833, + batch_norm__0, + batch_norm__1, + batch_norm__2, + batch_norm__3, + batch_norm__4, + batch_norm__5, ) = (lambda x, f: f(x))( paddle._C_ops.batch_norm( - conv2d_143, - parameter_51, + conv2d_1, parameter_50, parameter_49, parameter_48, - True, + parameter_47, + False, float("0.9"), float("1e-05"), "NCHW", - True, + False, False, ), lambda out: out if isinstance(out, (list, tuple)) else (out, None, None, None, None, None), ) - del conv2d_143, parameter_48, parameter_49, parameter_50, parameter_51 + del parameter_47, parameter_48, parameter_49, parameter_50 - # pd_op.swish: (-1x768x-1x-1xf32) <- (-1x768x-1x-1xf32) - swish_105 = paddle._C_ops.swish(batch_norm__828) - del batch_norm__828 + # pd_op.swish: (8x768x14x14xf32) <- (8x768x14x14xf32) + swish_0 = paddle._C_ops.swish(batch_norm__0) - # pd_op.add: (-1x768x-1x-1xf32) <- (-1x768x-1x-1xf32, -1x768x-1x-1xf32) - add_56 = paddle._C_ops.add(swish_105, swish_104) - del swish_105 + # pd_op.add: (8x768x14x14xf32) <- (8x768x14x14xf32, 8x768x14x14xf32) + add_1 = paddle._C_ops.add(swish_0, data_0) - # pd_op.conv2d: (-1x4x-1x-1xf32) <- (-1x768x-1x-1xf32, 4x768x3x3xf32) - conv2d_144 = paddle._C_ops.conv2d( - add_56, parameter_47, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + # pd_op.conv2d: (8x4x14x14xf32) <- (8x768x14x14xf32, 4x768x3x3xf32) + conv2d_2 = paddle._C_ops.conv2d( + add_1, parameter_46, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" ) - del add_56, parameter_47 + del parameter_46 # pd_op.reshape: (1x4x1x1xf32) <- (4xf32, 4xi64) - reshape_5 = paddle._C_ops.reshape(parameter_46, full_int_array_1) - del parameter_46 + reshape_7 = paddle._C_ops.reshape(parameter_45, full_int_array_3) + del parameter_45 - # pd_op.add: (-1x4x-1x-1xf32) <- (-1x4x-1x-1xf32, 1x4x1x1xf32) - add_57 = paddle._C_ops.add(conv2d_144, reshape_5) - del conv2d_144, reshape_5 + # pd_op.add: (8x4x14x14xf32) <- (8x4x14x14xf32, 1x4x1x1xf32) + add_2 = paddle._C_ops.add(conv2d_2, reshape_7) - # pd_op.conv2d: (-1x768x1x1xf32) <- (-1x768x1x1xf32, 768x768x1x1xf32) - conv2d_145 = paddle._C_ops.conv2d( - pool2d_3, parameter_45, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + # pd_op.conv2d: (8x768x1x1xf32) <- (8x768x1x1xf32, 768x768x1x1xf32) + conv2d_3 = paddle._C_ops.conv2d( + pool2d_0, parameter_44, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" ) - del parameter_45, pool2d_3 + del parameter_44 # pd_op.reshape: (1x768x1x1xf32) <- (768xf32, 4xi64) - reshape_6 = paddle._C_ops.reshape(parameter_44, full_int_array_1) - del parameter_44 + reshape_8 = paddle._C_ops.reshape(parameter_43, full_int_array_3) + del parameter_43 - # pd_op.add: (-1x768x1x1xf32) <- (-1x768x1x1xf32, 1x768x1x1xf32) - add_58 = paddle._C_ops.add(conv2d_145, reshape_6) - del conv2d_145, reshape_6 + # pd_op.add: (8x768x1x1xf32) <- (8x768x1x1xf32, 1x768x1x1xf32) + add_3 = paddle._C_ops.add(conv2d_3, reshape_8) - # pd_op.sigmoid: (-1x768x1x1xf32) <- (-1x768x1x1xf32) - sigmoid_1 = paddle._C_ops.sigmoid(add_58) - del add_58 + # pd_op.sigmoid: (8x768x1x1xf32) <- (8x768x1x1xf32) + sigmoid_1 = paddle._C_ops.sigmoid(add_3) + del add_3 - # pd_op.multiply: (-1x768x-1x-1xf32) <- (-1x768x-1x-1xf32, -1x768x1x1xf32) - multiply_24 = paddle._C_ops.multiply(swish_104, sigmoid_1) - del sigmoid_1, swish_104 + # pd_op.multiply: (8x768x14x14xf32) <- (8x768x14x14xf32, 8x768x1x1xf32) + multiply_1 = paddle._C_ops.multiply(data_0, sigmoid_1) + del data_0 - # pd_op.conv2d: (-1x768x-1x-1xf32) <- (-1x768x-1x-1xf32, 768x768x1x1xf32) - conv2d_146 = paddle._C_ops.conv2d( - multiply_24, parameter_43, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + # pd_op.conv2d: (8x768x14x14xf32) <- (8x768x14x14xf32, 768x768x1x1xf32) + conv2d_4 = paddle._C_ops.conv2d( + multiply_1, parameter_42, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" ) - del multiply_24, parameter_43 + del parameter_42 - # pd_op.batch_norm_: (-1x768x-1x-1xf32, 768xf32, 768xf32, 768xf32, 768xf32, -1xui8) <- (-1x768x-1x-1xf32, 768xf32, 768xf32, 768xf32, 768xf32) + # pd_op.batch_norm_: (8x768x14x14xf32, 768xf32, 768xf32, 768xf32, 768xf32, -1xui8) <- (8x768x14x14xf32, 768xf32, 768xf32, 768xf32, 768xf32) ( - batch_norm__834, - batch_norm__835, - batch_norm__836, - batch_norm__837, - batch_norm__838, - batch_norm__839, + batch_norm__6, + batch_norm__7, + batch_norm__8, + batch_norm__9, + batch_norm__10, + batch_norm__11, ) = (lambda x, f: f(x))( paddle._C_ops.batch_norm( - conv2d_146, - parameter_42, + conv2d_4, parameter_41, parameter_40, parameter_39, - True, + parameter_38, + False, float("0.9"), float("1e-05"), "NCHW", - True, + False, False, ), lambda out: out if isinstance(out, (list, tuple)) else (out, None, None, None, None, None), ) - del conv2d_146, parameter_39, parameter_40, parameter_41, parameter_42 + del parameter_38, parameter_39, parameter_40, parameter_41 - # pd_op.swish: (-1x768x-1x-1xf32) <- (-1x768x-1x-1xf32) - swish_106 = paddle._C_ops.swish(batch_norm__834) - del batch_norm__834 + # pd_op.swish: (8x768x14x14xf32) <- (8x768x14x14xf32) + swish_1 = paddle._C_ops.swish(batch_norm__6) - # pd_op.conv2d: (-1x68x-1x-1xf32) <- (-1x768x-1x-1xf32, 68x768x3x3xf32) - conv2d_147 = paddle._C_ops.conv2d( - swish_106, parameter_38, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + # pd_op.conv2d: (8x68x14x14xf32) <- (8x768x14x14xf32, 68x768x3x3xf32) + conv2d_5 = paddle._C_ops.conv2d( + swish_1, parameter_37, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" ) - del parameter_38, swish_106 - - # pd_op.reshape: (1x68x1x1xf32) <- (68xf32, 4xi64) - reshape_7 = paddle._C_ops.reshape(parameter_37, full_int_array_1) del parameter_37 - # pd_op.add: (-1x68x-1x-1xf32) <- (-1x68x-1x-1xf32, 1x68x1x1xf32) - add_59 = paddle._C_ops.add(conv2d_147, reshape_7) - del conv2d_147, reshape_7 - - # pd_op.full: (xi64) <- () - full_1 = paddle._C_ops.full( - [], float("-1"), paddle.int64, paddle.core.CPUPlace() - ) - - # pd_op.full: (xi64) <- () - full_2 = paddle._C_ops.full( - [], float("4"), paddle.int64, paddle.core.CPUPlace() - ) - - # pd_op.full: (xi64) <- () - full_3 = paddle._C_ops.full( - [], float("17"), paddle.int64, paddle.core.CPUPlace() - ) - - # builtin.combine: ([xi64, xi64, xi64, xi64]) <- (xi64, xi64, xi64, xi64) - combine_14 = [full_1, full_2, full_3, multiply_22] - - # pd_op.stack: (4xi64) <- ([xi64, xi64, xi64, xi64]) - stack_0 = paddle._C_ops.stack(combine_14, 0) - del combine_14 - - # pd_op.reshape: (-1x4x17x-1xf32) <- (-1x68x-1x-1xf32, 4xi64) - reshape_8 = paddle._C_ops.reshape(add_59, stack_0) - del add_59, stack_0 - - # pd_op.transpose: (-1x17x-1x4xf32) <- (-1x4x17x-1xf32) - transpose_0 = paddle._C_ops.transpose(reshape_8, [0, 2, 3, 1]) - del reshape_8 - - # pd_op.softmax: (-1x17x-1x4xf32) <- (-1x17x-1x4xf32) - softmax_0 = paddle._C_ops.softmax(transpose_0, 1) - del transpose_0 - - # pd_op.conv2d: (-1x1x-1x4xf32) <- (-1x17x-1x4xf32, 1x17x1x1xf32) - conv2d_148 = paddle._C_ops.conv2d( - softmax_0, parameter_36, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del softmax_0 - - # pd_op.squeeze: (-1x-1x4xf32) <- (-1x1x-1x4xf32, 1xi64) - squeeze_0 = paddle._C_ops.squeeze(conv2d_148, full_int_array_6) - del conv2d_148 - - # pd_op.sigmoid: (-1x4x-1x-1xf32) <- (-1x4x-1x-1xf32) - sigmoid_2 = paddle._C_ops.sigmoid(add_57) - del add_57 - - # builtin.combine: ([xi64, xi64, xi64]) <- (xi64, xi64, xi64) - combine_15 = [full_1, full_2, multiply_22] - del multiply_22 - - # pd_op.stack: (3xi64) <- ([xi64, xi64, xi64]) - stack_1 = paddle._C_ops.stack(combine_15, 0) - del combine_15 - - # pd_op.reshape: (-1x4x-1xf32) <- (-1x4x-1x-1xf32, 3xi64) - reshape_9 = paddle._C_ops.reshape(sigmoid_2, stack_1) - del sigmoid_2, stack_1 - - # pd_op.shape64: (4xi64) <- (-1x384x-1x-1xf32) - shape64_3 = paddle._C_ops.shape64(swish_94) + # pd_op.reshape: (1x68x1x1xf32) <- (68xf32, 4xi64) + reshape_9 = paddle._C_ops.reshape(parameter_36, full_int_array_3) + del parameter_36 - # pd_op.slice: (xi64) <- (4xi64, 1xi64, 1xi64) - slice_3 = paddle._C_ops.slice( - shape64_3, [0], full_int_array_5, full_int_array_6, [1], [0] - ) - del shape64_3 + # pd_op.add: (8x68x14x14xf32) <- (8x68x14x14xf32, 1x68x1x1xf32) + add_4 = paddle._C_ops.add(conv2d_5, reshape_9) - # pd_op.shape64: (4xi64) <- (-1x384x-1x-1xf32) - shape64_4 = paddle._C_ops.shape64(swish_94) + # pd_op.sigmoid: (8x4x14x14xf32) <- (8x4x14x14xf32) + sigmoid_2 = paddle._C_ops.sigmoid(add_2) + del add_2 - # pd_op.slice: (xi64) <- (4xi64, 1xi64, 1xi64) - slice_4 = paddle._C_ops.slice( - shape64_4, [0], full_int_array_7, full_int_array_8, [1], [0] - ) - del shape64_4 + # pd_op.flatten: (8x4x196xf32) <- (8x4x14x14xf32) + flatten_0 = paddle._C_ops.flatten(sigmoid_2, 2, 3) - # pd_op.shape64: (4xi64) <- (-1x384x-1x-1xf32) - shape64_5 = paddle._C_ops.shape64(swish_94) + # pd_op.transpose: (8x196x4xf32) <- (8x4x196xf32) + transpose_0 = paddle._C_ops.transpose(flatten_0, [0, 2, 1]) + del flatten_0 - # pd_op.slice: (xi64) <- (4xi64, 1xi64, 1xi64) - slice_5 = paddle._C_ops.slice( - shape64_5, [0], full_int_array_8, full_int_array_9, [1], [0] - ) - del shape64_5 + # pd_op.flatten: (8x68x196xf32) <- (8x68x14x14xf32) + flatten_1 = paddle._C_ops.flatten(add_4, 2, 3) - # pd_op.multiply: (xi64) <- (xi64, xi64) - multiply_25 = paddle._C_ops.multiply(slice_4, slice_5) - del slice_4, slice_5 + # pd_op.transpose: (8x196x68xf32) <- (8x68x196xf32) + transpose_1 = paddle._C_ops.transpose(flatten_1, [0, 2, 1]) + del flatten_1 - # pd_op.pool2d: (-1x384x1x1xf32) <- (-1x384x-1x-1xf32, 2xi64) - pool2d_4 = paddle._C_ops.pool2d( - swish_94, - full_int_array_10, + # pd_op.pool2d: (8x384x1x1xf32) <- (8x384x28x28xf32, 2xi64) + pool2d_1 = paddle._C_ops.pool2d( + data_1, + full_int_array_2, [1, 1], [0, 0], False, @@ -6813,238 +574,172 @@ def forward( "EXPLICIT", ) - # pd_op.conv2d: (-1x384x1x1xf32) <- (-1x384x1x1xf32, 384x384x1x1xf32) - conv2d_149 = paddle._C_ops.conv2d( - pool2d_4, parameter_35, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + # pd_op.conv2d: (8x384x1x1xf32) <- (8x384x1x1xf32, 384x384x1x1xf32) + conv2d_6 = paddle._C_ops.conv2d( + pool2d_1, parameter_35, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" ) del parameter_35 # pd_op.reshape: (1x384x1x1xf32) <- (384xf32, 4xi64) - reshape_10 = paddle._C_ops.reshape(parameter_34, full_int_array_1) + reshape_10 = paddle._C_ops.reshape(parameter_34, full_int_array_3) del parameter_34 - # pd_op.add: (-1x384x1x1xf32) <- (-1x384x1x1xf32, 1x384x1x1xf32) - add_60 = paddle._C_ops.add(conv2d_149, reshape_10) - del conv2d_149, reshape_10 + # pd_op.add: (8x384x1x1xf32) <- (8x384x1x1xf32, 1x384x1x1xf32) + add_5 = paddle._C_ops.add(conv2d_6, reshape_10) - # pd_op.sigmoid: (-1x384x1x1xf32) <- (-1x384x1x1xf32) - sigmoid_3 = paddle._C_ops.sigmoid(add_60) - del add_60 + # pd_op.sigmoid: (8x384x1x1xf32) <- (8x384x1x1xf32) + sigmoid_3 = paddle._C_ops.sigmoid(add_5) + del add_5 - # pd_op.multiply: (-1x384x-1x-1xf32) <- (-1x384x-1x-1xf32, -1x384x1x1xf32) - multiply_26 = paddle._C_ops.multiply(swish_94, sigmoid_3) - del sigmoid_3 + # pd_op.multiply: (8x384x28x28xf32) <- (8x384x28x28xf32, 8x384x1x1xf32) + multiply_2 = paddle._C_ops.multiply(data_1, sigmoid_3) - # pd_op.conv2d: (-1x384x-1x-1xf32) <- (-1x384x-1x-1xf32, 384x384x1x1xf32) - conv2d_150 = paddle._C_ops.conv2d( - multiply_26, parameter_33, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + # pd_op.conv2d: (8x384x28x28xf32) <- (8x384x28x28xf32, 384x384x1x1xf32) + conv2d_7 = paddle._C_ops.conv2d( + multiply_2, parameter_33, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" ) - del multiply_26, parameter_33 + del parameter_33 - # pd_op.batch_norm_: (-1x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (-1x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) + # pd_op.batch_norm_: (8x384x28x28xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (8x384x28x28xf32, 384xf32, 384xf32, 384xf32, 384xf32) ( - batch_norm__840, - batch_norm__841, - batch_norm__842, - batch_norm__843, - batch_norm__844, - batch_norm__845, + batch_norm__12, + batch_norm__13, + batch_norm__14, + batch_norm__15, + batch_norm__16, + batch_norm__17, ) = (lambda x, f: f(x))( paddle._C_ops.batch_norm( - conv2d_150, + conv2d_7, parameter_32, parameter_31, parameter_30, parameter_29, - True, + False, float("0.9"), float("1e-05"), "NCHW", - True, + False, False, ), lambda out: out if isinstance(out, (list, tuple)) else (out, None, None, None, None, None), ) - del conv2d_150, parameter_29, parameter_30, parameter_31, parameter_32 + del parameter_29, parameter_30, parameter_31, parameter_32 - # pd_op.swish: (-1x384x-1x-1xf32) <- (-1x384x-1x-1xf32) - swish_107 = paddle._C_ops.swish(batch_norm__840) - del batch_norm__840 + # pd_op.swish: (8x384x28x28xf32) <- (8x384x28x28xf32) + swish_2 = paddle._C_ops.swish(batch_norm__12) - # pd_op.add: (-1x384x-1x-1xf32) <- (-1x384x-1x-1xf32, -1x384x-1x-1xf32) - add_61 = paddle._C_ops.add(swish_107, swish_94) - del swish_107 + # pd_op.add: (8x384x28x28xf32) <- (8x384x28x28xf32, 8x384x28x28xf32) + add_6 = paddle._C_ops.add(swish_2, data_1) - # pd_op.conv2d: (-1x4x-1x-1xf32) <- (-1x384x-1x-1xf32, 4x384x3x3xf32) - conv2d_151 = paddle._C_ops.conv2d( - add_61, parameter_28, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + # pd_op.conv2d: (8x4x28x28xf32) <- (8x384x28x28xf32, 4x384x3x3xf32) + conv2d_8 = paddle._C_ops.conv2d( + add_6, parameter_28, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" ) - del add_61, parameter_28 + del parameter_28 # pd_op.reshape: (1x4x1x1xf32) <- (4xf32, 4xi64) - reshape_11 = paddle._C_ops.reshape(parameter_27, full_int_array_1) + reshape_11 = paddle._C_ops.reshape(parameter_27, full_int_array_3) del parameter_27 - # pd_op.add: (-1x4x-1x-1xf32) <- (-1x4x-1x-1xf32, 1x4x1x1xf32) - add_62 = paddle._C_ops.add(conv2d_151, reshape_11) - del conv2d_151, reshape_11 + # pd_op.add: (8x4x28x28xf32) <- (8x4x28x28xf32, 1x4x1x1xf32) + add_7 = paddle._C_ops.add(conv2d_8, reshape_11) - # pd_op.conv2d: (-1x384x1x1xf32) <- (-1x384x1x1xf32, 384x384x1x1xf32) - conv2d_152 = paddle._C_ops.conv2d( - pool2d_4, parameter_26, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + # pd_op.conv2d: (8x384x1x1xf32) <- (8x384x1x1xf32, 384x384x1x1xf32) + conv2d_9 = paddle._C_ops.conv2d( + pool2d_1, parameter_26, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" ) - del parameter_26, pool2d_4 + del parameter_26 # pd_op.reshape: (1x384x1x1xf32) <- (384xf32, 4xi64) - reshape_12 = paddle._C_ops.reshape(parameter_25, full_int_array_1) + reshape_12 = paddle._C_ops.reshape(parameter_25, full_int_array_3) del parameter_25 - # pd_op.add: (-1x384x1x1xf32) <- (-1x384x1x1xf32, 1x384x1x1xf32) - add_63 = paddle._C_ops.add(conv2d_152, reshape_12) - del conv2d_152, reshape_12 + # pd_op.add: (8x384x1x1xf32) <- (8x384x1x1xf32, 1x384x1x1xf32) + add_8 = paddle._C_ops.add(conv2d_9, reshape_12) - # pd_op.sigmoid: (-1x384x1x1xf32) <- (-1x384x1x1xf32) - sigmoid_4 = paddle._C_ops.sigmoid(add_63) - del add_63 + # pd_op.sigmoid: (8x384x1x1xf32) <- (8x384x1x1xf32) + sigmoid_4 = paddle._C_ops.sigmoid(add_8) + del add_8 - # pd_op.multiply: (-1x384x-1x-1xf32) <- (-1x384x-1x-1xf32, -1x384x1x1xf32) - multiply_27 = paddle._C_ops.multiply(swish_94, sigmoid_4) - del sigmoid_4, swish_94 + # pd_op.multiply: (8x384x28x28xf32) <- (8x384x28x28xf32, 8x384x1x1xf32) + multiply_3 = paddle._C_ops.multiply(data_1, sigmoid_4) + del data_1 - # pd_op.conv2d: (-1x384x-1x-1xf32) <- (-1x384x-1x-1xf32, 384x384x1x1xf32) - conv2d_153 = paddle._C_ops.conv2d( - multiply_27, parameter_24, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + # pd_op.conv2d: (8x384x28x28xf32) <- (8x384x28x28xf32, 384x384x1x1xf32) + conv2d_10 = paddle._C_ops.conv2d( + multiply_3, parameter_24, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" ) - del multiply_27, parameter_24 + del parameter_24 - # pd_op.batch_norm_: (-1x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (-1x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) + # pd_op.batch_norm_: (8x384x28x28xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (8x384x28x28xf32, 384xf32, 384xf32, 384xf32, 384xf32) ( - batch_norm__846, - batch_norm__847, - batch_norm__848, - batch_norm__849, - batch_norm__850, - batch_norm__851, + batch_norm__18, + batch_norm__19, + batch_norm__20, + batch_norm__21, + batch_norm__22, + batch_norm__23, ) = (lambda x, f: f(x))( paddle._C_ops.batch_norm( - conv2d_153, + conv2d_10, parameter_23, parameter_22, parameter_21, parameter_20, - True, + False, float("0.9"), float("1e-05"), "NCHW", - True, + False, False, ), lambda out: out if isinstance(out, (list, tuple)) else (out, None, None, None, None, None), ) - del conv2d_153, parameter_20, parameter_21, parameter_22, parameter_23 + del parameter_20, parameter_21, parameter_22, parameter_23 - # pd_op.swish: (-1x384x-1x-1xf32) <- (-1x384x-1x-1xf32) - swish_108 = paddle._C_ops.swish(batch_norm__846) - del batch_norm__846 + # pd_op.swish: (8x384x28x28xf32) <- (8x384x28x28xf32) + swish_3 = paddle._C_ops.swish(batch_norm__18) - # pd_op.conv2d: (-1x68x-1x-1xf32) <- (-1x384x-1x-1xf32, 68x384x3x3xf32) - conv2d_154 = paddle._C_ops.conv2d( - swish_108, parameter_19, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + # pd_op.conv2d: (8x68x28x28xf32) <- (8x384x28x28xf32, 68x384x3x3xf32) + conv2d_11 = paddle._C_ops.conv2d( + swish_3, parameter_19, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" ) - del parameter_19, swish_108 + del parameter_19 # pd_op.reshape: (1x68x1x1xf32) <- (68xf32, 4xi64) - reshape_13 = paddle._C_ops.reshape(parameter_18, full_int_array_1) + reshape_13 = paddle._C_ops.reshape(parameter_18, full_int_array_3) del parameter_18 - # pd_op.add: (-1x68x-1x-1xf32) <- (-1x68x-1x-1xf32, 1x68x1x1xf32) - add_64 = paddle._C_ops.add(conv2d_154, reshape_13) - del conv2d_154, reshape_13 + # pd_op.add: (8x68x28x28xf32) <- (8x68x28x28xf32, 1x68x1x1xf32) + add_9 = paddle._C_ops.add(conv2d_11, reshape_13) - # builtin.combine: ([xi64, xi64, xi64, xi64]) <- (xi64, xi64, xi64, xi64) - combine_16 = [full_1, full_2, full_3, multiply_25] - - # pd_op.stack: (4xi64) <- ([xi64, xi64, xi64, xi64]) - stack_2 = paddle._C_ops.stack(combine_16, 0) - del combine_16 - - # pd_op.reshape: (-1x4x17x-1xf32) <- (-1x68x-1x-1xf32, 4xi64) - reshape_14 = paddle._C_ops.reshape(add_64, stack_2) - del add_64, stack_2 - - # pd_op.transpose: (-1x17x-1x4xf32) <- (-1x4x17x-1xf32) - transpose_1 = paddle._C_ops.transpose(reshape_14, [0, 2, 3, 1]) - del reshape_14 - - # pd_op.softmax: (-1x17x-1x4xf32) <- (-1x17x-1x4xf32) - softmax_1 = paddle._C_ops.softmax(transpose_1, 1) - del transpose_1 - - # pd_op.conv2d: (-1x1x-1x4xf32) <- (-1x17x-1x4xf32, 1x17x1x1xf32) - conv2d_155 = paddle._C_ops.conv2d( - softmax_1, parameter_36, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del softmax_1 - - # pd_op.squeeze: (-1x-1x4xf32) <- (-1x1x-1x4xf32, 1xi64) - squeeze_1 = paddle._C_ops.squeeze(conv2d_155, full_int_array_6) - del conv2d_155 - - # pd_op.sigmoid: (-1x4x-1x-1xf32) <- (-1x4x-1x-1xf32) - sigmoid_5 = paddle._C_ops.sigmoid(add_62) - del add_62 - - # builtin.combine: ([xi64, xi64, xi64]) <- (xi64, xi64, xi64) - combine_17 = [full_1, full_2, multiply_25] - del multiply_25 - - # pd_op.stack: (3xi64) <- ([xi64, xi64, xi64]) - stack_3 = paddle._C_ops.stack(combine_17, 0) - del combine_17 - - # pd_op.reshape: (-1x4x-1xf32) <- (-1x4x-1x-1xf32, 3xi64) - reshape_15 = paddle._C_ops.reshape(sigmoid_5, stack_3) - del sigmoid_5, stack_3 - - # pd_op.shape64: (4xi64) <- (-1x192x-1x-1xf32) - shape64_6 = paddle._C_ops.shape64(swish_84) - - # pd_op.slice: (xi64) <- (4xi64, 1xi64, 1xi64) - slice_6 = paddle._C_ops.slice( - shape64_6, [0], full_int_array_5, full_int_array_6, [1], [0] - ) - del full_int_array_5, shape64_6 - - # pd_op.shape64: (4xi64) <- (-1x192x-1x-1xf32) - shape64_7 = paddle._C_ops.shape64(swish_84) + # pd_op.sigmoid: (8x4x28x28xf32) <- (8x4x28x28xf32) + sigmoid_5 = paddle._C_ops.sigmoid(add_7) + del add_7 - # pd_op.slice: (xi64) <- (4xi64, 1xi64, 1xi64) - slice_7 = paddle._C_ops.slice( - shape64_7, [0], full_int_array_7, full_int_array_8, [1], [0] - ) - del full_int_array_7, shape64_7 + # pd_op.flatten: (8x4x784xf32) <- (8x4x28x28xf32) + flatten_2 = paddle._C_ops.flatten(sigmoid_5, 2, 3) - # pd_op.shape64: (4xi64) <- (-1x192x-1x-1xf32) - shape64_8 = paddle._C_ops.shape64(swish_84) + # pd_op.transpose: (8x784x4xf32) <- (8x4x784xf32) + transpose_2 = paddle._C_ops.transpose(flatten_2, [0, 2, 1]) + del flatten_2 - # pd_op.slice: (xi64) <- (4xi64, 1xi64, 1xi64) - slice_8 = paddle._C_ops.slice( - shape64_8, [0], full_int_array_8, full_int_array_9, [1], [0] - ) - del full_int_array_8, full_int_array_9, shape64_8 + # pd_op.flatten: (8x68x784xf32) <- (8x68x28x28xf32) + flatten_3 = paddle._C_ops.flatten(add_9, 2, 3) - # pd_op.multiply: (xi64) <- (xi64, xi64) - multiply_28 = paddle._C_ops.multiply(slice_7, slice_8) - del slice_7, slice_8 + # pd_op.transpose: (8x784x68xf32) <- (8x68x784xf32) + transpose_3 = paddle._C_ops.transpose(flatten_3, [0, 2, 1]) + del flatten_3 - # pd_op.pool2d: (-1x192x1x1xf32) <- (-1x192x-1x-1xf32, 2xi64) - pool2d_5 = paddle._C_ops.pool2d( - swish_84, - full_int_array_10, + # pd_op.pool2d: (8x192x1x1xf32) <- (8x192x56x56xf32, 2xi64) + pool2d_2 = paddle._C_ops.pool2d( + data_2, + full_int_array_2, [1, 1], [0, 0], False, @@ -7055,225 +750,301 @@ def forward( True, "EXPLICIT", ) - del full_int_array_10 - # pd_op.conv2d: (-1x192x1x1xf32) <- (-1x192x1x1xf32, 192x192x1x1xf32) - conv2d_156 = paddle._C_ops.conv2d( - pool2d_5, parameter_17, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + # pd_op.conv2d: (8x192x1x1xf32) <- (8x192x1x1xf32, 192x192x1x1xf32) + conv2d_12 = paddle._C_ops.conv2d( + pool2d_2, parameter_17, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" ) del parameter_17 # pd_op.reshape: (1x192x1x1xf32) <- (192xf32, 4xi64) - reshape_16 = paddle._C_ops.reshape(parameter_16, full_int_array_1) + reshape_14 = paddle._C_ops.reshape(parameter_16, full_int_array_3) del parameter_16 - # pd_op.add: (-1x192x1x1xf32) <- (-1x192x1x1xf32, 1x192x1x1xf32) - add_65 = paddle._C_ops.add(conv2d_156, reshape_16) - del conv2d_156, reshape_16 + # pd_op.add: (8x192x1x1xf32) <- (8x192x1x1xf32, 1x192x1x1xf32) + add_10 = paddle._C_ops.add(conv2d_12, reshape_14) - # pd_op.sigmoid: (-1x192x1x1xf32) <- (-1x192x1x1xf32) - sigmoid_6 = paddle._C_ops.sigmoid(add_65) - del add_65 + # pd_op.sigmoid: (8x192x1x1xf32) <- (8x192x1x1xf32) + sigmoid_6 = paddle._C_ops.sigmoid(add_10) + del add_10 - # pd_op.multiply: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32, -1x192x1x1xf32) - multiply_29 = paddle._C_ops.multiply(swish_84, sigmoid_6) - del sigmoid_6 + # pd_op.multiply: (8x192x56x56xf32) <- (8x192x56x56xf32, 8x192x1x1xf32) + multiply_4 = paddle._C_ops.multiply(data_2, sigmoid_6) - # pd_op.conv2d: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32, 192x192x1x1xf32) - conv2d_157 = paddle._C_ops.conv2d( - multiply_29, parameter_15, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + # pd_op.conv2d: (8x192x56x56xf32) <- (8x192x56x56xf32, 192x192x1x1xf32) + conv2d_13 = paddle._C_ops.conv2d( + multiply_4, parameter_15, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" ) - del multiply_29, parameter_15 + del parameter_15 - # pd_op.batch_norm_: (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + # pd_op.batch_norm_: (8x192x56x56xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (8x192x56x56xf32, 192xf32, 192xf32, 192xf32, 192xf32) ( - batch_norm__852, - batch_norm__853, - batch_norm__854, - batch_norm__855, - batch_norm__856, - batch_norm__857, + batch_norm__24, + batch_norm__25, + batch_norm__26, + batch_norm__27, + batch_norm__28, + batch_norm__29, ) = (lambda x, f: f(x))( paddle._C_ops.batch_norm( - conv2d_157, + conv2d_13, parameter_14, parameter_13, parameter_12, parameter_11, - True, + False, float("0.9"), float("1e-05"), "NCHW", - True, + False, False, ), lambda out: out if isinstance(out, (list, tuple)) else (out, None, None, None, None, None), ) - del conv2d_157, parameter_11, parameter_12, parameter_13, parameter_14 + del parameter_11, parameter_12, parameter_13, parameter_14 - # pd_op.swish: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32) - swish_109 = paddle._C_ops.swish(batch_norm__852) - del batch_norm__852 + # pd_op.swish: (8x192x56x56xf32) <- (8x192x56x56xf32) + swish_4 = paddle._C_ops.swish(batch_norm__24) - # pd_op.add: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32, -1x192x-1x-1xf32) - add_66 = paddle._C_ops.add(swish_109, swish_84) - del swish_109 + # pd_op.add: (8x192x56x56xf32) <- (8x192x56x56xf32, 8x192x56x56xf32) + add_11 = paddle._C_ops.add(swish_4, data_2) - # pd_op.conv2d: (-1x4x-1x-1xf32) <- (-1x192x-1x-1xf32, 4x192x3x3xf32) - conv2d_158 = paddle._C_ops.conv2d( - add_66, parameter_10, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + # pd_op.conv2d: (8x4x56x56xf32) <- (8x192x56x56xf32, 4x192x3x3xf32) + conv2d_14 = paddle._C_ops.conv2d( + add_11, parameter_10, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" ) - del add_66, parameter_10 + del parameter_10 # pd_op.reshape: (1x4x1x1xf32) <- (4xf32, 4xi64) - reshape_17 = paddle._C_ops.reshape(parameter_9, full_int_array_1) + reshape_15 = paddle._C_ops.reshape(parameter_9, full_int_array_3) del parameter_9 - # pd_op.add: (-1x4x-1x-1xf32) <- (-1x4x-1x-1xf32, 1x4x1x1xf32) - add_67 = paddle._C_ops.add(conv2d_158, reshape_17) - del conv2d_158, reshape_17 + # pd_op.add: (8x4x56x56xf32) <- (8x4x56x56xf32, 1x4x1x1xf32) + add_12 = paddle._C_ops.add(conv2d_14, reshape_15) - # pd_op.conv2d: (-1x192x1x1xf32) <- (-1x192x1x1xf32, 192x192x1x1xf32) - conv2d_159 = paddle._C_ops.conv2d( - pool2d_5, parameter_8, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + # pd_op.conv2d: (8x192x1x1xf32) <- (8x192x1x1xf32, 192x192x1x1xf32) + conv2d_15 = paddle._C_ops.conv2d( + pool2d_2, parameter_8, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" ) - del parameter_8, pool2d_5 + del parameter_8 # pd_op.reshape: (1x192x1x1xf32) <- (192xf32, 4xi64) - reshape_18 = paddle._C_ops.reshape(parameter_7, full_int_array_1) + reshape_16 = paddle._C_ops.reshape(parameter_7, full_int_array_3) del parameter_7 - # pd_op.add: (-1x192x1x1xf32) <- (-1x192x1x1xf32, 1x192x1x1xf32) - add_68 = paddle._C_ops.add(conv2d_159, reshape_18) - del conv2d_159, reshape_18 + # pd_op.add: (8x192x1x1xf32) <- (8x192x1x1xf32, 1x192x1x1xf32) + add_13 = paddle._C_ops.add(conv2d_15, reshape_16) - # pd_op.sigmoid: (-1x192x1x1xf32) <- (-1x192x1x1xf32) - sigmoid_7 = paddle._C_ops.sigmoid(add_68) - del add_68 + # pd_op.sigmoid: (8x192x1x1xf32) <- (8x192x1x1xf32) + sigmoid_7 = paddle._C_ops.sigmoid(add_13) + del add_13 - # pd_op.multiply: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32, -1x192x1x1xf32) - multiply_30 = paddle._C_ops.multiply(swish_84, sigmoid_7) - del sigmoid_7, swish_84 + # pd_op.multiply: (8x192x56x56xf32) <- (8x192x56x56xf32, 8x192x1x1xf32) + multiply_5 = paddle._C_ops.multiply(data_2, sigmoid_7) + del data_2 - # pd_op.conv2d: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32, 192x192x1x1xf32) - conv2d_160 = paddle._C_ops.conv2d( - multiply_30, parameter_6, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + # pd_op.conv2d: (8x192x56x56xf32) <- (8x192x56x56xf32, 192x192x1x1xf32) + conv2d_16 = paddle._C_ops.conv2d( + multiply_5, parameter_6, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" ) - del multiply_30, parameter_6 + del parameter_6 - # pd_op.batch_norm_: (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + # pd_op.batch_norm_: (8x192x56x56xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (8x192x56x56xf32, 192xf32, 192xf32, 192xf32, 192xf32) ( - batch_norm__858, - batch_norm__859, - batch_norm__860, - batch_norm__861, - batch_norm__862, - batch_norm__863, + batch_norm__30, + batch_norm__31, + batch_norm__32, + batch_norm__33, + batch_norm__34, + batch_norm__35, ) = (lambda x, f: f(x))( paddle._C_ops.batch_norm( - conv2d_160, + conv2d_16, parameter_5, parameter_4, parameter_3, parameter_2, - True, + False, float("0.9"), float("1e-05"), "NCHW", - True, + False, False, ), lambda out: out if isinstance(out, (list, tuple)) else (out, None, None, None, None, None), ) - del conv2d_160, parameter_2, parameter_3, parameter_4, parameter_5 + del parameter_2, parameter_3, parameter_4, parameter_5 - # pd_op.swish: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32) - swish_110 = paddle._C_ops.swish(batch_norm__858) - del batch_norm__858 + # pd_op.swish: (8x192x56x56xf32) <- (8x192x56x56xf32) + swish_5 = paddle._C_ops.swish(batch_norm__30) - # pd_op.conv2d: (-1x68x-1x-1xf32) <- (-1x192x-1x-1xf32, 68x192x3x3xf32) - conv2d_161 = paddle._C_ops.conv2d( - swish_110, parameter_1, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + # pd_op.conv2d: (8x68x56x56xf32) <- (8x192x56x56xf32, 68x192x3x3xf32) + conv2d_17 = paddle._C_ops.conv2d( + swish_5, parameter_1, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" ) - del parameter_1, swish_110 + del parameter_1 # pd_op.reshape: (1x68x1x1xf32) <- (68xf32, 4xi64) - reshape_19 = paddle._C_ops.reshape(parameter_0, full_int_array_1) - del full_int_array_1, parameter_0 - - # pd_op.add: (-1x68x-1x-1xf32) <- (-1x68x-1x-1xf32, 1x68x1x1xf32) - add_69 = paddle._C_ops.add(conv2d_161, reshape_19) - del conv2d_161, reshape_19 + reshape_17 = paddle._C_ops.reshape(parameter_0, full_int_array_3) + del full_int_array_3, parameter_0 - # builtin.combine: ([xi64, xi64, xi64, xi64]) <- (xi64, xi64, xi64, xi64) - combine_18 = [full_1, full_2, full_3, multiply_28] - del full_3 - - # pd_op.stack: (4xi64) <- ([xi64, xi64, xi64, xi64]) - stack_4 = paddle._C_ops.stack(combine_18, 0) - del combine_18 - - # pd_op.reshape: (-1x4x17x-1xf32) <- (-1x68x-1x-1xf32, 4xi64) - reshape_20 = paddle._C_ops.reshape(add_69, stack_4) - del add_69, stack_4 - - # pd_op.transpose: (-1x17x-1x4xf32) <- (-1x4x17x-1xf32) - transpose_2 = paddle._C_ops.transpose(reshape_20, [0, 2, 3, 1]) - del reshape_20 + # pd_op.add: (8x68x56x56xf32) <- (8x68x56x56xf32, 1x68x1x1xf32) + add_14 = paddle._C_ops.add(conv2d_17, reshape_17) - # pd_op.softmax: (-1x17x-1x4xf32) <- (-1x17x-1x4xf32) - softmax_2 = paddle._C_ops.softmax(transpose_2, 1) - del transpose_2 + # pd_op.sigmoid: (8x4x56x56xf32) <- (8x4x56x56xf32) + sigmoid_8 = paddle._C_ops.sigmoid(add_12) + del add_12 - # pd_op.conv2d: (-1x1x-1x4xf32) <- (-1x17x-1x4xf32, 1x17x1x1xf32) - conv2d_162 = paddle._C_ops.conv2d( - softmax_2, parameter_36, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_36, softmax_2 - - # pd_op.squeeze: (-1x-1x4xf32) <- (-1x1x-1x4xf32, 1xi64) - squeeze_2 = paddle._C_ops.squeeze(conv2d_162, full_int_array_6) - del conv2d_162, full_int_array_6 + # pd_op.flatten: (8x4x3136xf32) <- (8x4x56x56xf32) + flatten_4 = paddle._C_ops.flatten(sigmoid_8, 2, 3) - # pd_op.sigmoid: (-1x4x-1x-1xf32) <- (-1x4x-1x-1xf32) - sigmoid_8 = paddle._C_ops.sigmoid(add_67) - del add_67 + # pd_op.transpose: (8x3136x4xf32) <- (8x4x3136xf32) + transpose_4 = paddle._C_ops.transpose(flatten_4, [0, 2, 1]) + del flatten_4 - # builtin.combine: ([xi64, xi64, xi64]) <- (xi64, xi64, xi64) - combine_19 = [full_1, full_2, multiply_28] - del full_1, full_2, multiply_28 + # pd_op.flatten: (8x68x3136xf32) <- (8x68x56x56xf32) + flatten_5 = paddle._C_ops.flatten(add_14, 2, 3) - # pd_op.stack: (3xi64) <- ([xi64, xi64, xi64]) - stack_5 = paddle._C_ops.stack(combine_19, 0) - del combine_19 - - # pd_op.reshape: (-1x4x-1xf32) <- (-1x4x-1x-1xf32, 3xi64) - reshape_21 = paddle._C_ops.reshape(sigmoid_8, stack_5) - del sigmoid_8, stack_5 + # pd_op.transpose: (8x3136x68xf32) <- (8x68x3136xf32) + transpose_5 = paddle._C_ops.transpose(flatten_5, [0, 2, 1]) + del flatten_5 # pd_op.full: (1xi32) <- () - full_4 = paddle._C_ops.full( - [1], float("-1"), paddle.int32, paddle.core.CPUPlace() + full_13 = paddle._C_ops.full( + [1], float("1"), paddle.int32, paddle.core.CPUPlace() ) - # builtin.combine: ([-1x4x-1xf32, -1x4x-1xf32, -1x4x-1xf32]) <- (-1x4x-1xf32, -1x4x-1xf32, -1x4x-1xf32) - combine_20 = [reshape_9, reshape_15, reshape_21] - del reshape_15, reshape_21, reshape_9 - - # pd_op.concat: (-1x4x-1xf32) <- ([-1x4x-1xf32, -1x4x-1xf32, -1x4x-1xf32], 1xi32) - concat_0 = paddle._C_ops.concat(combine_20, full_4) - del combine_20, full_4 + # pd_op.assign: (1xi32) <- (1xi32) + assign_2 = full_13 - # builtin.combine: ([-1x-1x4xf32, -1x-1x4xf32, -1x-1x4xf32]) <- (-1x-1x4xf32, -1x-1x4xf32, -1x-1x4xf32) - combine_21 = [squeeze_0, squeeze_1, squeeze_2] - del squeeze_0, squeeze_1, squeeze_2 + # builtin.combine: ([8x196x4xf32, 8x784x4xf32, 8x3136x4xf32]) <- (8x196x4xf32, 8x784x4xf32, 8x3136x4xf32) + combine_12 = [transpose_0, transpose_2, transpose_4] - # pd_op.concat: (-1x-1x4xf32) <- ([-1x-1x4xf32, -1x-1x4xf32, -1x-1x4xf32], 1xi32) - concat_1 = paddle._C_ops.concat(combine_21, full_0) - del combine_21, full_0 + # pd_op.concat: (8x4116x4xf32) <- ([8x196x4xf32, 8x784x4xf32, 8x3136x4xf32], 1xi32) + concat_0 = paddle._C_ops.concat(combine_12, full_13) + del combine_12 - return concat_0, concat_1 + # builtin.combine: ([8x196x68xf32, 8x784x68xf32, 8x3136x68xf32]) <- (8x196x68xf32, 8x784x68xf32, 8x3136x68xf32) + combine_13 = [transpose_1, transpose_3, transpose_5] + + # pd_op.concat: (8x4116x68xf32) <- ([8x196x68xf32, 8x784x68xf32, 8x3136x68xf32], 1xi32) + concat_1 = paddle._C_ops.concat(combine_13, full_13) + del ( + add_1, + add_11, + add_14, + add_4, + add_6, + add_9, + assign_0, + assign_1, + assign_2, + batch_norm__0, + batch_norm__1, + batch_norm__10, + batch_norm__11, + batch_norm__12, + batch_norm__13, + batch_norm__14, + batch_norm__15, + batch_norm__16, + batch_norm__17, + batch_norm__18, + batch_norm__19, + batch_norm__2, + batch_norm__20, + batch_norm__21, + batch_norm__22, + batch_norm__23, + batch_norm__24, + batch_norm__25, + batch_norm__26, + batch_norm__27, + batch_norm__28, + batch_norm__29, + batch_norm__3, + batch_norm__30, + batch_norm__31, + batch_norm__32, + batch_norm__33, + batch_norm__34, + batch_norm__35, + batch_norm__4, + batch_norm__5, + batch_norm__6, + batch_norm__7, + batch_norm__8, + batch_norm__9, + combine_13, + conv2d_0, + conv2d_1, + conv2d_10, + conv2d_11, + conv2d_12, + conv2d_13, + conv2d_14, + conv2d_15, + conv2d_16, + conv2d_17, + conv2d_2, + conv2d_3, + conv2d_4, + conv2d_5, + conv2d_6, + conv2d_7, + conv2d_8, + conv2d_9, + full_13, + full_int_array_2, + multiply_0, + multiply_1, + multiply_2, + multiply_3, + multiply_4, + multiply_5, + pool2d_0, + pool2d_1, + pool2d_2, + reshape_0, + reshape_10, + reshape_11, + reshape_12, + reshape_13, + reshape_14, + reshape_15, + reshape_16, + reshape_17, + reshape_2, + reshape_4, + reshape_6, + reshape_7, + reshape_8, + reshape_9, + sigmoid_0, + sigmoid_1, + sigmoid_2, + sigmoid_3, + sigmoid_4, + sigmoid_5, + sigmoid_6, + sigmoid_7, + sigmoid_8, + swish_0, + swish_1, + swish_2, + swish_3, + swish_4, + swish_5, + transpose_0, + transpose_1, + transpose_2, + transpose_3, + transpose_4, + transpose_5, + ) + + return concat_0, concat_1, concat_2, concat_3, concat_4 diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus-L/subgraph_5/weight_meta.py b/paddle_samples/PaddleX/PP-YOLOE_plus-L/subgraph_5/weight_meta.py index 86b97343c..42b452682 100644 --- a/paddle_samples/PaddleX/PP-YOLOE_plus-L/subgraph_5/weight_meta.py +++ b/paddle_samples/PaddleX/PP-YOLOE_plus-L/subgraph_5/weight_meta.py @@ -2,10 +2,10 @@ class Program_weight_tensor_parameter_0: name = "parameter_0" shape = [68] dtype = "float32" - min_val = float("-0.00996713") - max_val = float("0.0296165") - mean = float("1.85551e-07") - std = float("0.00658747") + min_val = float("-0.00994345") + max_val = float("0.0295467") + mean = float("1.85188e-07") + std = float("0.00657184") data = None @@ -13,10 +13,10 @@ class Program_weight_tensor_parameter_1: name = "parameter_1" shape = [68, 192, 3, 3] dtype = "float32" - min_val = float("-0.132365") - max_val = float("0.153351") - mean = float("5.83823e-08") - std = float("0.0069927") + min_val = float("-0.132052") + max_val = float("0.152993") + mean = float("5.82659e-08") + std = float("0.00697617") data = None @@ -24,10 +24,10 @@ class Program_weight_tensor_parameter_2: name = "parameter_2" shape = [192] dtype = "float32" - min_val = float("-0.0440964") - max_val = float("0.204248") - mean = float("0.0505266") - std = float("0.0396607") + min_val = float("-0.0439922") + max_val = float("0.203765") + mean = float("0.0504072") + std = float("0.0395669") data = None @@ -35,10 +35,10 @@ class Program_weight_tensor_parameter_3: name = "parameter_3" shape = [192] dtype = "float32" - min_val = float("0.854628") - max_val = float("1.623") - mean = float("1.22222") - std = float("0.143373") + min_val = float("0.852608") + max_val = float("1.61916") + mean = float("1.21933") + std = float("0.143034") data = None @@ -46,10 +46,10 @@ class Program_weight_tensor_parameter_4: name = "parameter_4" shape = [192] dtype = "float32" - min_val = float("0.000124474") - max_val = float("0.00268664") - mean = float("0.000430455") - std = float("0.00032986") + min_val = float("0.000117627") + max_val = float("0.0026124") + mean = float("0.000404459") + std = float("0.000312994") data = None @@ -57,10 +57,10 @@ class Program_weight_tensor_parameter_5: name = "parameter_5" shape = [192] dtype = "float32" - min_val = float("-0.0352297") - max_val = float("0.0298138") - mean = float("-0.00349452") - std = float("0.0106524") + min_val = float("-0.0352285") + max_val = float("0.029767") + mean = float("-0.0034756") + std = float("0.0106549") data = None @@ -68,10 +68,10 @@ class Program_weight_tensor_parameter_6: name = "parameter_6" shape = [192, 192, 1, 1] dtype = "float32" - min_val = float("-0.0520579") - max_val = float("0.075326") - mean = float("-0.00011553") - std = float("0.00541501") + min_val = float("-0.0519368") + max_val = float("0.075145") + mean = float("-0.000115247") + std = float("0.00540221") data = None @@ -79,10 +79,10 @@ class Program_weight_tensor_parameter_7: name = "parameter_7" shape = [192] dtype = "float32" - min_val = float("-0.00468623") - max_val = float("0.00853988") - mean = float("3.18341e-05") - std = float("0.00259726") + min_val = float("-0.00467515") + max_val = float("0.00851991") + mean = float("3.17583e-05") + std = float("0.00259113") data = None @@ -90,10 +90,10 @@ class Program_weight_tensor_parameter_8: name = "parameter_8" shape = [192, 192, 1, 1] dtype = "float32" - min_val = float("-0.00533172") - max_val = float("0.00945863") - mean = float("-9.35044e-05") - std = float("0.00138579") + min_val = float("-0.00531914") + max_val = float("0.00943649") + mean = float("-9.32844e-05") + std = float("0.00138252") data = None @@ -110,10 +110,6 @@ class Program_weight_tensor_parameter_10: name = "parameter_10" shape = [4, 192, 3, 3] dtype = "float32" - min_val = float("-8.0039e-06") - max_val = float("0.00022716") - mean = float("1.07103e-05") - std = float("1.95264e-05") data = None @@ -121,10 +117,10 @@ class Program_weight_tensor_parameter_11: name = "parameter_11" shape = [192] dtype = "float32" - min_val = float("-0.327873") - max_val = float("0.892505") - mean = float("0.35844") - std = float("0.270005") + min_val = float("-0.327098") + max_val = float("0.890395") + mean = float("0.357592") + std = float("0.269366") data = None @@ -132,10 +128,10 @@ class Program_weight_tensor_parameter_12: name = "parameter_12" shape = [192] dtype = "float32" - min_val = float("1.0197") - max_val = float("1.7745") - mean = float("1.31881") - std = float("0.141386") + min_val = float("1.01729") + max_val = float("1.7703") + mean = float("1.31569") + std = float("0.141051") data = None @@ -143,10 +139,10 @@ class Program_weight_tensor_parameter_13: name = "parameter_13" shape = [192] dtype = "float32" - min_val = float("0.000191765") - max_val = float("0.00418178") - mean = float("0.00073364") - std = float("0.000566711") + min_val = float("0.000191938") + max_val = float("0.00371493") + mean = float("0.00070204") + std = float("0.000534277") data = None @@ -154,10 +150,10 @@ class Program_weight_tensor_parameter_14: name = "parameter_14" shape = [192] dtype = "float32" - min_val = float("-0.171816") - max_val = float("0.0388429") - mean = float("-0.024799") - std = float("0.0310049") + min_val = float("-0.172109") + max_val = float("0.0388642") + mean = float("-0.0248015") + std = float("0.0310403") data = None @@ -165,10 +161,10 @@ class Program_weight_tensor_parameter_15: name = "parameter_15" shape = [192, 192, 1, 1] dtype = "float32" - min_val = float("-0.0758177") - max_val = float("0.0688388") - mean = float("-0.000505692") - std = float("0.0065208") + min_val = float("-0.0756384") + max_val = float("0.068676") + mean = float("-0.0005045") + std = float("0.00650538") data = None @@ -176,10 +172,10 @@ class Program_weight_tensor_parameter_16: name = "parameter_16" shape = [192] dtype = "float32" - min_val = float("-0.0046322") - max_val = float("0.00955243") - mean = float("-0.0001086") - std = float("0.00181395") + min_val = float("-0.00462125") + max_val = float("0.00952984") + mean = float("-0.000108344") + std = float("0.00180966") data = None @@ -187,10 +183,10 @@ class Program_weight_tensor_parameter_17: name = "parameter_17" shape = [192, 192, 1, 1] dtype = "float32" - min_val = float("-0.0166797") - max_val = float("0.0150515") - mean = float("-1.40907e-05") - std = float("0.00152728") + min_val = float("-0.0166403") + max_val = float("0.015016") + mean = float("-1.40574e-05") + std = float("0.00152367") data = None @@ -198,10 +194,10 @@ class Program_weight_tensor_parameter_18: name = "parameter_18" shape = [68] dtype = "float32" - min_val = float("-0.00414429") - max_val = float("0.0248245") - mean = float("1.70752e-07") - std = float("0.00516597") + min_val = float("-0.00413441") + max_val = float("0.0247658") + mean = float("1.70403e-07") + std = float("0.00515374") data = None @@ -209,10 +205,10 @@ class Program_weight_tensor_parameter_19: name = "parameter_19" shape = [68, 384, 3, 3] dtype = "float32" - min_val = float("-0.0881466") - max_val = float("0.115817") - mean = float("3.10683e-08") - std = float("0.00468137") + min_val = float("-0.0879382") + max_val = float("0.115543") + mean = float("3.09883e-08") + std = float("0.0046703") data = None @@ -220,10 +216,10 @@ class Program_weight_tensor_parameter_20: name = "parameter_20" shape = [384] dtype = "float32" - min_val = float("-0.00505458") - max_val = float("0.0679229") - mean = float("0.025356") - std = float("0.0129557") + min_val = float("-0.00504264") + max_val = float("0.0677623") + mean = float("0.025296") + std = float("0.012925") data = None @@ -231,10 +227,10 @@ class Program_weight_tensor_parameter_21: name = "parameter_21" shape = [384] dtype = "float32" - min_val = float("1.00103") - max_val = float("1.23541") - mean = float("1.10699") - std = float("0.0406781") + min_val = float("0.998659") + max_val = float("1.23249") + mean = float("1.10437") + std = float("0.040582") data = None @@ -242,10 +238,10 @@ class Program_weight_tensor_parameter_22: name = "parameter_22" shape = [384] dtype = "float32" - min_val = float("6.77016e-05") - max_val = float("0.00289487") - mean = float("0.00031114") - std = float("0.00031604") + min_val = float("6.8383e-05") + max_val = float("0.00279866") + mean = float("0.000303046") + std = float("0.000306114") data = None @@ -253,10 +249,10 @@ class Program_weight_tensor_parameter_23: name = "parameter_23" shape = [384] dtype = "float32" - min_val = float("-0.0401932") - max_val = float("0.0131579") - mean = float("-0.00637696") - std = float("0.00739159") + min_val = float("-0.0400502") + max_val = float("0.0131297") + mean = float("-0.00632822") + std = float("0.00736343") data = None @@ -264,10 +260,10 @@ class Program_weight_tensor_parameter_24: name = "parameter_24" shape = [384, 384, 1, 1] dtype = "float32" - min_val = float("-0.0495062") - max_val = float("0.06518") - mean = float("-8.72216e-05") - std = float("0.00262797") + min_val = float("-0.0493892") + max_val = float("0.0650265") + mean = float("-8.70135e-05") + std = float("0.00262175") data = None @@ -275,10 +271,10 @@ class Program_weight_tensor_parameter_25: name = "parameter_25" shape = [384] dtype = "float32" - min_val = float("-0.00259419") - max_val = float("0.00557747") - mean = float("9.36863e-05") - std = float("0.00147316") + min_val = float("-0.00258806") + max_val = float("0.00556431") + mean = float("9.34648e-05") + std = float("0.00146967") data = None @@ -286,10 +282,10 @@ class Program_weight_tensor_parameter_26: name = "parameter_26" shape = [384, 384, 1, 1] dtype = "float32" - min_val = float("-0.00175705") - max_val = float("0.00490764") - mean = float("1.06471e-05") - std = float("0.000587436") + min_val = float("-0.00175289") + max_val = float("0.00489605") + mean = float("1.06219e-05") + std = float("0.000586047") data = None @@ -306,10 +302,6 @@ class Program_weight_tensor_parameter_28: name = "parameter_28" shape = [4, 384, 3, 3] dtype = "float32" - min_val = float("-2.18712e-06") - max_val = float("5.28604e-05") - mean = float("1.78963e-06") - std = float("3.72888e-06") data = None @@ -317,10 +309,10 @@ class Program_weight_tensor_parameter_29: name = "parameter_29" shape = [384] dtype = "float32" - min_val = float("-0.150383") - max_val = float("0.452458") - mean = float("0.229981") - std = float("0.0998846") + min_val = float("-0.150027") + max_val = float("0.451389") + mean = float("0.229437") + std = float("0.0996485") data = None @@ -328,10 +320,10 @@ class Program_weight_tensor_parameter_30: name = "parameter_30" shape = [384] dtype = "float32" - min_val = float("1.00536") - max_val = float("1.40175") - mean = float("1.18904") - std = float("0.0599888") + min_val = float("1.00298") + max_val = float("1.39843") + mean = float("1.18623") + std = float("0.059847") data = None @@ -339,10 +331,10 @@ class Program_weight_tensor_parameter_31: name = "parameter_31" shape = [384] dtype = "float32" - min_val = float("0.000159485") - max_val = float("0.0036339") - mean = float("0.000711029") - std = float("0.000576938") + min_val = float("0.000149666") + max_val = float("0.00370625") + mean = float("0.000713075") + std = float("0.000590199") data = None @@ -350,10 +342,10 @@ class Program_weight_tensor_parameter_32: name = "parameter_32" shape = [384] dtype = "float32" - min_val = float("-0.108726") - max_val = float("0.0565601") - mean = float("-0.0264711") - std = float("0.0221625") + min_val = float("-0.108532") + max_val = float("0.0565238") + mean = float("-0.0264124") + std = float("0.0221279") data = None @@ -361,10 +353,10 @@ class Program_weight_tensor_parameter_33: name = "parameter_33" shape = [384, 384, 1, 1] dtype = "float32" - min_val = float("-0.0481454") - max_val = float("0.0448686") - mean = float("-0.000359909") - std = float("0.00296123") + min_val = float("-0.0480316") + max_val = float("0.0447625") + mean = float("-0.000359058") + std = float("0.00295423") data = None @@ -372,10 +364,10 @@ class Program_weight_tensor_parameter_34: name = "parameter_34" shape = [384] dtype = "float32" - min_val = float("-0.00204396") - max_val = float("0.0090588") - mean = float("-3.67064e-06") - std = float("0.000961893") + min_val = float("-0.00203913") + max_val = float("0.00903738") + mean = float("-3.66197e-06") + std = float("0.000959619") data = None @@ -383,41 +375,43 @@ class Program_weight_tensor_parameter_35: name = "parameter_35" shape = [384, 384, 1, 1] dtype = "float32" - min_val = float("-0.00523821") - max_val = float("0.00886045") - mean = float("-4.69801e-06") - std = float("0.000620937") + min_val = float("-0.00522582") + max_val = float("0.0088395") + mean = float("-4.68691e-06") + std = float("0.000619469") data = None class Program_weight_tensor_parameter_36: name = "parameter_36" - shape = [1, 17, 1, 1] + shape = [68] dtype = "float32" - min_val = float("0") - max_val = float("0.5") + min_val = float("-0.00290222") + max_val = float("0.0101817") + mean = float("1.30633e-07") + std = float("0.00299044") data = None class Program_weight_tensor_parameter_37: name = "parameter_37" - shape = [68] + shape = [68, 768, 3, 3] dtype = "float32" - min_val = float("-0.00290911") - max_val = float("0.0102058") - mean = float("1.30967e-07") - std = float("0.00299753") + min_val = float("-0.0411349") + max_val = float("0.0738933") + mean = float("1.4159e-08") + std = float("0.00274115") data = None class Program_weight_tensor_parameter_38: name = "parameter_38" - shape = [68, 768, 3, 3] + shape = [768] dtype = "float32" - min_val = float("-0.0412324") - max_val = float("0.0740684") - mean = float("1.4179e-08") - std = float("0.00274764") + min_val = float("-0.0141815") + max_val = float("0.0470838") + mean = float("0.0110249") + std = float("0.0102353") data = None @@ -425,10 +419,10 @@ class Program_weight_tensor_parameter_39: name = "parameter_39" shape = [768] dtype = "float32" - min_val = float("-0.0142152") - max_val = float("0.0471953") - mean = float("0.011051") - std = float("0.0102595") + min_val = float("1.00835") + max_val = float("1.19911") + mean = float("1.06458") + std = float("0.0222771") data = None @@ -436,10 +430,10 @@ class Program_weight_tensor_parameter_40: name = "parameter_40" shape = [768] dtype = "float32" - min_val = float("1.01074") - max_val = float("1.20195") - mean = float("1.0671") - std = float("0.0223299") + min_val = float("3.80062e-05") + max_val = float("0.00131862") + mean = float("0.000152402") + std = float("0.000108701") data = None @@ -447,74 +441,70 @@ class Program_weight_tensor_parameter_41: name = "parameter_41" shape = [768] dtype = "float32" - min_val = float("3.69016e-05") - max_val = float("0.00134973") - mean = float("0.000155962") - std = float("0.00011097") + min_val = float("-0.0236698") + max_val = float("0.00795434") + mean = float("-0.00383744") + std = float("0.00338712") data = None class Program_weight_tensor_parameter_42: name = "parameter_42" - shape = [768] + shape = [768, 768, 1, 1] dtype = "float32" - min_val = float("-0.0238644") - max_val = float("0.00811534") - mean = float("-0.00384896") - std = float("0.00342466") + min_val = float("-0.0354011") + max_val = float("0.0311472") + mean = float("-3.50875e-05") + std = float("0.0011905") data = None class Program_weight_tensor_parameter_43: name = "parameter_43" - shape = [768, 768, 1, 1] + shape = [768] dtype = "float32" - min_val = float("-0.035485") - max_val = float("0.0312209") - mean = float("-3.51706e-05") - std = float("0.00119332") + min_val = float("-0.00350695") + max_val = float("0.00217249") + mean = float("0.000104712") + std = float("0.000668859") data = None class Program_weight_tensor_parameter_44: name = "parameter_44" - shape = [768] + shape = [768, 768, 1, 1] dtype = "float32" - min_val = float("-0.00351526") - max_val = float("0.00217768") - mean = float("0.00010496") - std = float("0.000670444") + min_val = float("-0.00236527") + max_val = float("0.00288539") + mean = float("2.74168e-05") + std = float("0.000209731") data = None class Program_weight_tensor_parameter_45: name = "parameter_45" - shape = [768, 768, 1, 1] + shape = [4] dtype = "float32" - min_val = float("-0.00237088") - max_val = float("0.00289222") - mean = float("2.74817e-05") - std = float("0.000210228") + min_val = float("0") + max_val = float("0.5") data = None class Program_weight_tensor_parameter_46: name = "parameter_46" - shape = [4] + shape = [4, 768, 3, 3] dtype = "float32" - min_val = float("0") - max_val = float("0.5") data = None class Program_weight_tensor_parameter_47: name = "parameter_47" - shape = [4, 768, 3, 3] + shape = [768] dtype = "float32" - min_val = float("-1.14922e-05") - max_val = float("0.000123784") - mean = float("4.64809e-06") - std = float("9.90396e-06") + min_val = float("-0.109319") + max_val = float("0.200294") + mean = float("0.0936331") + std = float("0.0420139") data = None @@ -522,10 +512,10 @@ class Program_weight_tensor_parameter_48: name = "parameter_48" shape = [768] dtype = "float32" - min_val = float("-0.109578") - max_val = float("0.200768") - mean = float("0.093855") - std = float("0.0421135") + min_val = float("1.00715") + max_val = float("1.25105") + mean = float("1.07838") + std = float("0.0259236") data = None @@ -533,10 +523,10 @@ class Program_weight_tensor_parameter_49: name = "parameter_49" shape = [768] dtype = "float32" - min_val = float("1.00954") - max_val = float("1.25402") - mean = float("1.08094") - std = float("0.0259851") + min_val = float("9.94571e-05") + max_val = float("0.00338121") + mean = float("0.000633121") + std = float("0.000467813") data = None @@ -544,7618 +534,41 @@ class Program_weight_tensor_parameter_50: name = "parameter_50" shape = [768] dtype = "float32" - min_val = float("9.71614e-05") - max_val = float("0.00324689") - mean = float("0.000620876") - std = float("0.000449642") + min_val = float("-0.0501712") + max_val = float("0.0941121") + mean = float("-0.0191505") + std = float("0.0110933") data = None class Program_weight_tensor_parameter_51: name = "parameter_51" - shape = [768] + shape = [768, 768, 1, 1] dtype = "float32" - min_val = float("-0.0505123") - max_val = float("0.0945691") - mean = float("-0.0192848") - std = float("0.0111623") + min_val = float("-0.0485631") + max_val = float("0.0317378") + mean = float("-0.000183678") + std = float("0.00129677") data = None class Program_weight_tensor_parameter_52: name = "parameter_52" - shape = [768, 768, 1, 1] + shape = [768] dtype = "float32" - min_val = float("-0.0486782") - max_val = float("0.031813") - mean = float("-0.000184113") - std = float("0.00129984") + min_val = float("-0.00522906") + max_val = float("0.00428608") + mean = float("1.59338e-05") + std = float("0.000442491") data = None class Program_weight_tensor_parameter_53: name = "parameter_53" - shape = [768] - dtype = "float32" - min_val = float("-0.00524145") - max_val = float("0.00429624") - mean = float("1.59716e-05") - std = float("0.00044354") - data = None - - -class Program_weight_tensor_parameter_54: - name = "parameter_54" shape = [768, 768, 1, 1] dtype = "float32" - min_val = float("-0.0191194") - max_val = float("0.0353741") - mean = float("5.41618e-06") - std = float("0.000293837") - data = None - - -class Program_weight_tensor_parameter_55: - name = "parameter_55" - shape = [768] - dtype = "float32" - min_val = float("-0.175306") - max_val = float("0.211594") - mean = float("0.0847743") - std = float("0.0563497") - data = None - - -class Program_weight_tensor_parameter_56: - name = "parameter_56" - shape = [768] - dtype = "float32" - min_val = float("0.939778") - max_val = float("1.2976") - mean = float("1.06639") - std = float("0.0311293") - data = None - - -class Program_weight_tensor_parameter_57: - name = "parameter_57" - shape = [768] - dtype = "float32" - min_val = float("0.00100313") - max_val = float("0.03162") - mean = float("0.00343704") - std = float("0.00260456") - data = None - - -class Program_weight_tensor_parameter_58: - name = "parameter_58" - shape = [768] - dtype = "float32" - min_val = float("-0.150191") - max_val = float("0.100073") - mean = float("-0.0236812") - std = float("0.0231298") - data = None - - -class Program_weight_tensor_parameter_59: - name = "parameter_59" - shape = [768, 768, 1, 1] - dtype = "float32" - min_val = float("-0.048459") - max_val = float("0.0301639") - mean = float("-0.000116837") - std = float("0.00198315") - data = None - - -class Program_weight_tensor_parameter_60: - name = "parameter_60" - shape = [384] - dtype = "float32" - min_val = float("-0.138639") - max_val = float("0.0301922") - mean = float("-0.0181041") - std = float("0.0228687") - data = None - - -class Program_weight_tensor_parameter_61: - name = "parameter_61" - shape = [384] - dtype = "float32" - min_val = float("0.949936") - max_val = float("1.04659") - mean = float("0.989067") - std = float("0.0104758") - data = None - - -class Program_weight_tensor_parameter_62: - name = "parameter_62" - shape = [384] - dtype = "float32" - min_val = float("0.000424965") - max_val = float("0.00769354") - mean = float("0.00208629") - std = float("0.00118364") - data = None - - -class Program_weight_tensor_parameter_63: - name = "parameter_63" - shape = [384] - dtype = "float32" - min_val = float("-0.0576822") - max_val = float("0.0495397") - mean = float("0.000801726") - std = float("0.0154888") - data = None - - -class Program_weight_tensor_parameter_64: - name = "parameter_64" - shape = [384, 384, 1, 1] - dtype = "float32" - min_val = float("-0.030027") - max_val = float("0.0156948") - mean = float("1.51312e-07") - std = float("0.00151823") - data = None - - -class Program_weight_tensor_parameter_65: - name = "parameter_65" - shape = [384] - dtype = "float32" - min_val = float("-0.138639") - max_val = float("0.0301921") - mean = float("-0.0181041") - std = float("0.0228687") - data = None - - -class Program_weight_tensor_parameter_66: - name = "parameter_66" - shape = [384] - dtype = "float32" - min_val = float("0.969953") - max_val = float("1.13144") - mean = float("1.01743") - std = float("0.0170265") - data = None - - -class Program_weight_tensor_parameter_67: - name = "parameter_67" - shape = [384] - dtype = "float32" - min_val = float("0.00123184") - max_val = float("0.0151822") - mean = float("0.00352376") - std = float("0.00156689") - data = None - - -class Program_weight_tensor_parameter_68: - name = "parameter_68" - shape = [384] - dtype = "float32" - min_val = float("-0.113443") - max_val = float("0.119366") - mean = float("-0.0249755") - std = float("0.0249942") - data = None - - -class Program_weight_tensor_parameter_69: - name = "parameter_69" - shape = [384, 384, 3, 3] - dtype = "float32" - min_val = float("-0.0252708") - max_val = float("0.0260981") - mean = float("-4.55984e-05") - std = float("0.00102241") - data = None - - -class Program_weight_tensor_parameter_70: - name = "parameter_70" - shape = [384] - dtype = "float32" - min_val = float("-0.167348") - max_val = float("0.0194716") - mean = float("-0.0338105") - std = float("0.0272605") - data = None - - -class Program_weight_tensor_parameter_71: - name = "parameter_71" - shape = [384] - dtype = "float32" - min_val = float("0.977422") - max_val = float("1.12785") - mean = float("1.01688") - std = float("0.0237162") - data = None - - -class Program_weight_tensor_parameter_72: - name = "parameter_72" - shape = [384] - dtype = "float32" - min_val = float("0.0030012") - max_val = float("0.0356644") - mean = float("0.00889859") - std = float("0.00464026") - data = None - - -class Program_weight_tensor_parameter_73: - name = "parameter_73" - shape = [384] - dtype = "float32" - min_val = float("-0.187143") - max_val = float("0.21034") - mean = float("-0.0263546") - std = float("0.034034") - data = None - - -class Program_weight_tensor_parameter_74: - name = "parameter_74" - shape = [384, 384, 3, 3] - dtype = "float32" - min_val = float("-0.0247522") - max_val = float("0.0417196") - mean = float("-4.63696e-05") - std = float("0.00115897") - data = None - - -class Program_weight_tensor_parameter_75: - name = "parameter_75" - shape = [384] - dtype = "float32" - min_val = float("-0.102213") - max_val = float("0.0125531") - mean = float("-0.0345887") - std = float("0.0187949") - data = None - - -class Program_weight_tensor_parameter_76: - name = "parameter_76" - shape = [384] - dtype = "float32" - min_val = float("0.949338") - max_val = float("1.04659") - mean = float("0.991003") - std = float("0.00965035") - data = None - - -class Program_weight_tensor_parameter_77: - name = "parameter_77" - shape = [384] - dtype = "float32" - min_val = float("0.000650704") - max_val = float("0.00768371") - mean = float("0.00277456") - std = float("0.00139097") - data = None - - -class Program_weight_tensor_parameter_78: - name = "parameter_78" - shape = [384] - dtype = "float32" - min_val = float("-0.068838") - max_val = float("0.0330406") - mean = float("-0.00344055") - std = float("0.0126807") - data = None - - -class Program_weight_tensor_parameter_79: - name = "parameter_79" - shape = [384, 384, 1, 1] - dtype = "float32" - min_val = float("-0.0239432") - max_val = float("0.0196078") - mean = float("-6.64687e-05") - std = float("0.00158521") - data = None - - -class Program_weight_tensor_parameter_80: - name = "parameter_80" - shape = [384] - dtype = "float32" - min_val = float("-0.102213") - max_val = float("0.0125531") - mean = float("-0.0345887") - std = float("0.0187949") - data = None - - -class Program_weight_tensor_parameter_81: - name = "parameter_81" - shape = [384] - dtype = "float32" - min_val = float("0.961887") - max_val = float("1.10634") - mean = float("1.01796") - std = float("0.0175095") - data = None - - -class Program_weight_tensor_parameter_82: - name = "parameter_82" - shape = [384] - dtype = "float32" - min_val = float("0.0018589") - max_val = float("0.0216184") - mean = float("0.00498132") - std = float("0.00238917") - data = None - - -class Program_weight_tensor_parameter_83: - name = "parameter_83" - shape = [384] - dtype = "float32" - min_val = float("-0.113641") - max_val = float("0.132971") - mean = float("-0.0350247") - std = float("0.0275334") - data = None - - -class Program_weight_tensor_parameter_84: - name = "parameter_84" - shape = [384, 384, 3, 3] - dtype = "float32" - min_val = float("-0.0306037") - max_val = float("0.039106") - mean = float("-6.35244e-05") - std = float("0.00105171") - data = None - - -class Program_weight_tensor_parameter_85: - name = "parameter_85" - shape = [384] - dtype = "float32" - min_val = float("-0.087894") - max_val = float("0.0187151") - mean = float("-0.035007") - std = float("0.0190606") - data = None - - -class Program_weight_tensor_parameter_86: - name = "parameter_86" - shape = [384] - dtype = "float32" - min_val = float("0.937686") - max_val = float("1.1153") - mean = float("1.01357") - std = float("0.0260763") - data = None - - -class Program_weight_tensor_parameter_87: - name = "parameter_87" - shape = [384] - dtype = "float32" - min_val = float("0.00329362") - max_val = float("0.0370326") - mean = float("0.00917159") - std = float("0.004535") - data = None - - -class Program_weight_tensor_parameter_88: - name = "parameter_88" - shape = [384] - dtype = "float32" - min_val = float("-0.116448") - max_val = float("0.0690271") - mean = float("-0.0174781") - std = float("0.0305536") - data = None - - -class Program_weight_tensor_parameter_89: - name = "parameter_89" - shape = [384, 384, 3, 3] - dtype = "float32" - min_val = float("-0.0356642") - max_val = float("0.0369726") - mean = float("-4.09478e-05") - std = float("0.0012155") - data = None - - -class Program_weight_tensor_parameter_90: - name = "parameter_90" - shape = [384] - dtype = "float32" - min_val = float("-0.113264") - max_val = float("0.0139328") - mean = float("-0.0360276") - std = float("0.0194684") - data = None - - -class Program_weight_tensor_parameter_91: - name = "parameter_91" - shape = [384] - dtype = "float32" - min_val = float("0.932607") - max_val = float("1.02926") - mean = float("0.989413") - std = float("0.0107521") - data = None - - -class Program_weight_tensor_parameter_92: - name = "parameter_92" - shape = [384] - dtype = "float32" - min_val = float("0.000850876") - max_val = float("0.00768634") - mean = float("0.00328168") - std = float("0.00126507") - data = None - - -class Program_weight_tensor_parameter_93: - name = "parameter_93" - shape = [384] - dtype = "float32" - min_val = float("-0.0328926") - max_val = float("0.0412224") - mean = float("-0.00745687") - std = float("0.010839") - data = None - - -class Program_weight_tensor_parameter_94: - name = "parameter_94" - shape = [384, 384, 1, 1] - dtype = "float32" - min_val = float("-0.0246626") - max_val = float("0.0246294") - mean = float("-0.000134739") - std = float("0.00162451") - data = None - - -class Program_weight_tensor_parameter_95: - name = "parameter_95" - shape = [384] - dtype = "float32" - min_val = float("-0.113264") - max_val = float("0.0139328") - mean = float("-0.0360276") - std = float("0.0194684") - data = None - - -class Program_weight_tensor_parameter_96: - name = "parameter_96" - shape = [384] - dtype = "float32" - min_val = float("0.983995") - max_val = float("1.10634") - mean = float("1.02014") - std = float("0.0217389") - data = None - - -class Program_weight_tensor_parameter_97: - name = "parameter_97" - shape = [384] - dtype = "float32" - min_val = float("0.00264665") - max_val = float("0.023121") - mean = float("0.00648167") - std = float("0.00274806") - data = None - - -class Program_weight_tensor_parameter_98: - name = "parameter_98" - shape = [384] - dtype = "float32" - min_val = float("-0.123387") - max_val = float("0.0727253") - mean = float("-0.0176321") - std = float("0.0264909") - data = None - - -class Program_weight_tensor_parameter_99: - name = "parameter_99" - shape = [384, 384, 3, 3] - dtype = "float32" - min_val = float("-0.0270575") - max_val = float("0.0462384") - mean = float("-3.55445e-05") - std = float("0.00113617") - data = None - - -class Program_weight_tensor_parameter_100: - name = "parameter_100" - shape = [384] - dtype = "float32" - min_val = float("-0.104816") - max_val = float("0.0223762") - mean = float("-0.0364198") - std = float("0.0208798") - data = None - - -class Program_weight_tensor_parameter_101: - name = "parameter_101" - shape = [384] - dtype = "float32" - min_val = float("0.94874") - max_val = float("1.1165") - mean = float("1.01377") - std = float("0.0272987") - data = None - - -class Program_weight_tensor_parameter_102: - name = "parameter_102" - shape = [384] - dtype = "float32" - min_val = float("0.00343716") - max_val = float("0.0376123") - mean = float("0.0090883") - std = float("0.00464666") - data = None - - -class Program_weight_tensor_parameter_103: - name = "parameter_103" - shape = [384] - dtype = "float32" - min_val = float("-0.10852") - max_val = float("0.127486") - mean = float("-0.0306965") - std = float("0.0372253") - data = None - - -class Program_weight_tensor_parameter_104: - name = "parameter_104" - shape = [384, 384, 3, 3] - dtype = "float32" - min_val = float("-0.0198386") - max_val = float("0.030936") - mean = float("-5.54577e-05") - std = float("0.00127916") - data = None - - -class Program_weight_tensor_parameter_105: - name = "parameter_105" - shape = [384] - dtype = "float32" - min_val = float("-0.104081") - max_val = float("0.0447507") - mean = float("-0.0253876") - std = float("0.0148924") - data = None - - -class Program_weight_tensor_parameter_106: - name = "parameter_106" - shape = [384] - dtype = "float32" - min_val = float("0.976825") - max_val = float("1.08842") - mean = float("1.01085") - std = float("0.0167894") - data = None - - -class Program_weight_tensor_parameter_107: - name = "parameter_107" - shape = [384] - dtype = "float32" - min_val = float("0.00194344") - max_val = float("0.0134659") - mean = float("0.00358093") - std = float("0.00122632") - data = None - - -class Program_weight_tensor_parameter_108: - name = "parameter_108" - shape = [384] - dtype = "float32" - min_val = float("-0.0628121") - max_val = float("0.0450731") - mean = float("-0.0156591") - std = float("0.0180011") - data = None - - -class Program_weight_tensor_parameter_109: - name = "parameter_109" - shape = [384, 1152, 1, 1] - dtype = "float32" - min_val = float("-0.0512204") - max_val = float("0.054272") - mean = float("-7.40727e-05") - std = float("0.00193913") - data = None - - -class Program_weight_tensor_parameter_110: - name = "parameter_110" - shape = [384] - dtype = "float32" - min_val = float("-0.0410536") - max_val = float("0.01587") - mean = float("-0.0085433") - std = float("0.00822821") - data = None - - -class Program_weight_tensor_parameter_111: - name = "parameter_111" - shape = [384] - dtype = "float32" - min_val = float("0.964874") - max_val = float("1.05256") - mean = float("1.00968") - std = float("0.0113002") - data = None - - -class Program_weight_tensor_parameter_112: - name = "parameter_112" - shape = [384] - dtype = "float32" - min_val = float("0.00148058") - max_val = float("0.0172515") - mean = float("0.00279498") - std = float("0.00125745") - data = None - - -class Program_weight_tensor_parameter_113: - name = "parameter_113" - shape = [384] - dtype = "float32" - min_val = float("-0.0613616") - max_val = float("0.0450121") - mean = float("-0.0183066") - std = float("0.0175678") - data = None - - -class Program_weight_tensor_parameter_114: - name = "parameter_114" - shape = [384, 1152, 1, 1] - dtype = "float32" - min_val = float("-0.0416152") - max_val = float("0.034011") - mean = float("-8.95819e-05") - std = float("0.00171404") - data = None - - -class Program_weight_tensor_parameter_115: - name = "parameter_115" - shape = [384] - dtype = "float32" - min_val = float("-0.0517357") - max_val = float("0.00607691") - mean = float("-0.0160479") - std = float("0.00958104") - data = None - - -class Program_weight_tensor_parameter_116: - name = "parameter_116" - shape = [384] - dtype = "float32" - min_val = float("0.990579") - max_val = float("1.10515") - mean = float("1.02122") - std = float("0.0166168") - data = None - - -class Program_weight_tensor_parameter_117: - name = "parameter_117" - shape = [384] - dtype = "float32" - min_val = float("0.00250794") - max_val = float("0.0303554") - mean = float("0.00898847") - std = float("0.00426596") - data = None - - -class Program_weight_tensor_parameter_118: - name = "parameter_118" - shape = [384] - dtype = "float32" - min_val = float("-0.256873") - max_val = float("0.206661") - mean = float("-0.0286502") - std = float("0.0560866") - data = None - - -class Program_weight_tensor_parameter_119: - name = "parameter_119" - shape = [384, 384, 3, 3] - dtype = "float32" - min_val = float("-0.0249441") - max_val = float("0.0277039") - mean = float("-1.94354e-05") - std = float("0.00104416") - data = None - - -class Program_weight_tensor_parameter_120: - name = "parameter_120" - shape = [384] - dtype = "float32" - min_val = float("-0.222759") - max_val = float("0.492934") - mean = float("0.21931") - std = float("0.123919") - data = None - - -class Program_weight_tensor_parameter_121: - name = "parameter_121" - shape = [384] - dtype = "float32" - min_val = float("0.926931") - max_val = float("1.47785") - mean = float("1.14418") - std = float("0.0736893") - data = None - - -class Program_weight_tensor_parameter_122: - name = "parameter_122" - shape = [384] - dtype = "float32" - min_val = float("0.00264483") - max_val = float("0.0974121") - mean = float("0.00798104") - std = float("0.00654548") - data = None - - -class Program_weight_tensor_parameter_123: - name = "parameter_123" - shape = [384] - dtype = "float32" - min_val = float("-0.159565") - max_val = float("0.121315") - mean = float("-0.0255922") - std = float("0.03015") - data = None - - -class Program_weight_tensor_parameter_124: - name = "parameter_124" - shape = [384, 384, 1, 1] - dtype = "float32" - min_val = float("-0.0876303") - max_val = float("0.0602537") - mean = float("-0.000242493") - std = float("0.00453291") - data = None - - -class Program_weight_tensor_parameter_125: - name = "parameter_125" - shape = [192] - dtype = "float32" - min_val = float("-0.164436") - max_val = float("0.0462449") - mean = float("-0.0239848") - std = float("0.0389306") - data = None - - -class Program_weight_tensor_parameter_126: - name = "parameter_126" - shape = [192] - dtype = "float32" - min_val = float("0.846263") - max_val = float("1.05376") - mean = float("0.97547") - std = float("0.0236593") - data = None - - -class Program_weight_tensor_parameter_127: - name = "parameter_127" - shape = [192] - dtype = "float32" - min_val = float("0.000579722") - max_val = float("0.0224778") - mean = float("0.00388016") - std = float("0.00265473") - data = None - - -class Program_weight_tensor_parameter_128: - name = "parameter_128" - shape = [192] - dtype = "float32" - min_val = float("-0.0510712") - max_val = float("0.0759894") - mean = float("-0.00386027") - std = float("0.0154973") - data = None - - -class Program_weight_tensor_parameter_129: - name = "parameter_129" - shape = [192, 192, 1, 1] - dtype = "float32" - min_val = float("-0.050302") - max_val = float("0.0287473") - mean = float("-0.000136137") - std = float("0.00336423") - data = None - - -class Program_weight_tensor_parameter_130: - name = "parameter_130" - shape = [192] - dtype = "float32" - min_val = float("-0.164436") - max_val = float("0.0462449") - mean = float("-0.0239848") - std = float("0.0389306") - data = None - - -class Program_weight_tensor_parameter_131: - name = "parameter_131" - shape = [192] - dtype = "float32" - min_val = float("0.736017") - max_val = float("1.12247") - mean = float("1.02412") - std = float("0.0366284") - data = None - - -class Program_weight_tensor_parameter_132: - name = "parameter_132" - shape = [192] - dtype = "float32" - min_val = float("0.00276112") - max_val = float("0.0194419") - mean = float("0.0066911") - std = float("0.00265579") - data = None - - -class Program_weight_tensor_parameter_133: - name = "parameter_133" - shape = [192] - dtype = "float32" - min_val = float("-0.16004") - max_val = float("0.0998512") - mean = float("-0.0225358") - std = float("0.0339102") - data = None - - -class Program_weight_tensor_parameter_134: - name = "parameter_134" - shape = [192, 192, 3, 3] - dtype = "float32" - min_val = float("-0.0444225") - max_val = float("0.0402511") - mean = float("-7.11892e-05") - std = float("0.00226777") - data = None - - -class Program_weight_tensor_parameter_135: - name = "parameter_135" - shape = [192] - dtype = "float32" - min_val = float("-0.188768") - max_val = float("0.0421971") - mean = float("-0.0567874") - std = float("0.0481367") - data = None - - -class Program_weight_tensor_parameter_136: - name = "parameter_136" - shape = [192] - dtype = "float32" - min_val = float("0.900639") - max_val = float("1.18283") - mean = float("1.01749") - std = float("0.0479958") - data = None - - -class Program_weight_tensor_parameter_137: - name = "parameter_137" - shape = [192] - dtype = "float32" - min_val = float("0.00535257") - max_val = float("0.0763801") - mean = float("0.0170376") - std = float("0.0105864") - data = None - - -class Program_weight_tensor_parameter_138: - name = "parameter_138" - shape = [192] - dtype = "float32" - min_val = float("-0.240955") - max_val = float("0.297589") - mean = float("-0.0264239") - std = float("0.0412806") - data = None - - -class Program_weight_tensor_parameter_139: - name = "parameter_139" - shape = [192, 192, 3, 3] - dtype = "float32" - min_val = float("-0.0580625") - max_val = float("0.0640828") - mean = float("-9.47042e-05") - std = float("0.00257609") - data = None - - -class Program_weight_tensor_parameter_140: - name = "parameter_140" - shape = [192] - dtype = "float32" - min_val = float("-0.188722") - max_val = float("0.00866455") - mean = float("-0.0625057") - std = float("0.0326874") - data = None - - -class Program_weight_tensor_parameter_141: - name = "parameter_141" - shape = [192] - dtype = "float32" - min_val = float("0.925735") - max_val = float("1.04838") - mean = float("0.976103") - std = float("0.0176867") - data = None - - -class Program_weight_tensor_parameter_142: - name = "parameter_142" - shape = [192] - dtype = "float32" - min_val = float("0.00136816") - max_val = float("0.0126086") - mean = float("0.00472105") - std = float("0.00208163") - data = None - - -class Program_weight_tensor_parameter_143: - name = "parameter_143" - shape = [192] - dtype = "float32" - min_val = float("-0.0491501") - max_val = float("0.0347549") - mean = float("-0.0081364") - std = float("0.0128259") - data = None - - -class Program_weight_tensor_parameter_144: - name = "parameter_144" - shape = [192, 192, 1, 1] - dtype = "float32" - min_val = float("-0.0432741") - max_val = float("0.0265813") - mean = float("-0.0003395") - std = float("0.0033694") - data = None - - -class Program_weight_tensor_parameter_145: - name = "parameter_145" - shape = [192] - dtype = "float32" - min_val = float("-0.188722") - max_val = float("0.00866455") - mean = float("-0.0625057") - std = float("0.0326874") - data = None - - -class Program_weight_tensor_parameter_146: - name = "parameter_146" - shape = [192] - dtype = "float32" - min_val = float("0.972045") - max_val = float("1.15056") - mean = float("1.02587") - std = float("0.028794") - data = None - - -class Program_weight_tensor_parameter_147: - name = "parameter_147" - shape = [192] - dtype = "float32" - min_val = float("0.00273726") - max_val = float("0.035953") - mean = float("0.00833297") - std = float("0.004788") - data = None - - -class Program_weight_tensor_parameter_148: - name = "parameter_148" - shape = [192] - dtype = "float32" - min_val = float("-0.106149") - max_val = float("0.117302") - mean = float("-0.0227699") - std = float("0.0288669") - data = None - - -class Program_weight_tensor_parameter_149: - name = "parameter_149" - shape = [192, 192, 3, 3] - dtype = "float32" - min_val = float("-0.0431991") - max_val = float("0.0568112") - mean = float("-8.61466e-05") - std = float("0.00241602") - data = None - - -class Program_weight_tensor_parameter_150: - name = "parameter_150" - shape = [192] - dtype = "float32" - min_val = float("-0.186994") - max_val = float("0.0596707") - mean = float("-0.0739558") - std = float("0.0398551") - data = None - - -class Program_weight_tensor_parameter_151: - name = "parameter_151" - shape = [192] - dtype = "float32" - min_val = float("0.884207") - max_val = float("1.2133") - mean = float("1.0167") - std = float("0.0500698") - data = None - - -class Program_weight_tensor_parameter_152: - name = "parameter_152" - shape = [192] - dtype = "float32" - min_val = float("0.00600039") - max_val = float("0.0445964") - mean = float("0.0130705") - std = float("0.00628092") - data = None - - -class Program_weight_tensor_parameter_153: - name = "parameter_153" - shape = [192] - dtype = "float32" - min_val = float("-0.0739679") - max_val = float("0.0406422") - mean = float("-0.0181695") - std = float("0.022831") - data = None - - -class Program_weight_tensor_parameter_154: - name = "parameter_154" - shape = [192, 192, 3, 3] - dtype = "float32" - min_val = float("-0.0454997") - max_val = float("0.0702924") - mean = float("-8.84971e-05") - std = float("0.0027379") - data = None - - -class Program_weight_tensor_parameter_155: - name = "parameter_155" - shape = [192] - dtype = "float32" - min_val = float("-0.223812") - max_val = float("-0.011848") - mean = float("-0.0808429") - std = float("0.0411614") - data = None - - -class Program_weight_tensor_parameter_156: - name = "parameter_156" - shape = [192] - dtype = "float32" - min_val = float("0.905395") - max_val = float("1.02866") - mean = float("0.977772") - std = float("0.0224939") - data = None - - -class Program_weight_tensor_parameter_157: - name = "parameter_157" - shape = [192] - dtype = "float32" - min_val = float("0.00182472") - max_val = float("0.0140011") - mean = float("0.00488018") - std = float("0.00160896") - data = None - - -class Program_weight_tensor_parameter_158: - name = "parameter_158" - shape = [192] - dtype = "float32" - min_val = float("-0.0421578") - max_val = float("0.0455086") - mean = float("-0.0102449") - std = float("0.0184866") - data = None - - -class Program_weight_tensor_parameter_159: - name = "parameter_159" - shape = [192, 192, 1, 1] - dtype = "float32" - min_val = float("-0.0390054") - max_val = float("0.0688211") - mean = float("-0.000451601") - std = float("0.00386161") - data = None - - -class Program_weight_tensor_parameter_160: - name = "parameter_160" - shape = [192] - dtype = "float32" - min_val = float("-0.223812") - max_val = float("-0.011848") - mean = float("-0.0808429") - std = float("0.0411614") - data = None - - -class Program_weight_tensor_parameter_161: - name = "parameter_161" - shape = [192] - dtype = "float32" - min_val = float("0.949933") - max_val = float("1.11112") - mean = float("1.02292") - std = float("0.030073") - data = None - - -class Program_weight_tensor_parameter_162: - name = "parameter_162" - shape = [192] - dtype = "float32" - min_val = float("0.00467925") - max_val = float("0.0479479") - mean = float("0.0111132") - std = float("0.00613812") - data = None - - -class Program_weight_tensor_parameter_163: - name = "parameter_163" - shape = [192] - dtype = "float32" - min_val = float("-0.101233") - max_val = float("0.0755529") - mean = float("-0.0174453") - std = float("0.0302045") - data = None - - -class Program_weight_tensor_parameter_164: - name = "parameter_164" - shape = [192, 192, 3, 3] - dtype = "float32" - min_val = float("-0.0474729") - max_val = float("0.0539855") - mean = float("-7.31038e-05") - std = float("0.00259765") - data = None - - -class Program_weight_tensor_parameter_165: - name = "parameter_165" - shape = [192] - dtype = "float32" - min_val = float("-0.228542") - max_val = float("0.0777608") - mean = float("-0.0924101") - std = float("0.0451672") - data = None - - -class Program_weight_tensor_parameter_166: - name = "parameter_166" - shape = [192] - dtype = "float32" - min_val = float("0.890182") - max_val = float("1.19896") - mean = float("1.01861") - std = float("0.0532403") - data = None - - -class Program_weight_tensor_parameter_167: - name = "parameter_167" - shape = [192] - dtype = "float32" - min_val = float("0.00579067") - max_val = float("0.0572229") - mean = float("0.0147046") - std = float("0.0085496") - data = None - - -class Program_weight_tensor_parameter_168: - name = "parameter_168" - shape = [192] - dtype = "float32" - min_val = float("-0.147872") - max_val = float("0.114807") - mean = float("-0.0323967") - std = float("0.0333687") - data = None - - -class Program_weight_tensor_parameter_169: - name = "parameter_169" - shape = [192, 192, 3, 3] - dtype = "float32" - min_val = float("-0.0419213") - max_val = float("0.0860108") - mean = float("-0.000108934") - std = float("0.0030019") - data = None - - -class Program_weight_tensor_parameter_170: - name = "parameter_170" - shape = [192] - dtype = "float32" - min_val = float("-0.19326") - max_val = float("0.0143295") - mean = float("-0.0644048") - std = float("0.0302824") - data = None - - -class Program_weight_tensor_parameter_171: - name = "parameter_171" - shape = [192] - dtype = "float32" - min_val = float("0.927945") - max_val = float("1.15109") - mean = float("1.015") - std = float("0.0376052") - data = None - - -class Program_weight_tensor_parameter_172: - name = "parameter_172" - shape = [192] - dtype = "float32" - min_val = float("0.00306597") - max_val = float("0.0203842") - mean = float("0.00643735") - std = float("0.00249811") - data = None - - -class Program_weight_tensor_parameter_173: - name = "parameter_173" - shape = [192] - dtype = "float32" - min_val = float("-0.0752295") - max_val = float("0.0935841") - mean = float("-0.020241") - std = float("0.0237735") - data = None - - -class Program_weight_tensor_parameter_174: - name = "parameter_174" - shape = [192, 576, 1, 1] - dtype = "float32" - min_val = float("-0.0570792") - max_val = float("0.0685417") - mean = float("-0.000178261") - std = float("0.0043567") - data = None - - -class Program_weight_tensor_parameter_175: - name = "parameter_175" - shape = [192] - dtype = "float32" - min_val = float("-0.0981279") - max_val = float("0.0362243") - mean = float("-0.0135197") - std = float("0.0200556") - data = None - - -class Program_weight_tensor_parameter_176: - name = "parameter_176" - shape = [192] - dtype = "float32" - min_val = float("0.92722") - max_val = float("1.19339") - mean = float("1.00446") - std = float("0.0253754") - data = None - - -class Program_weight_tensor_parameter_177: - name = "parameter_177" - shape = [192] - dtype = "float32" - min_val = float("0.00242938") - max_val = float("0.0286347") - mean = float("0.00506947") - std = float("0.00262691") - data = None - - -class Program_weight_tensor_parameter_178: - name = "parameter_178" - shape = [192] - dtype = "float32" - min_val = float("-0.0575542") - max_val = float("0.0378027") - mean = float("-0.0131132") - std = float("0.0183883") - data = None - - -class Program_weight_tensor_parameter_179: - name = "parameter_179" - shape = [192, 576, 1, 1] - dtype = "float32" - min_val = float("-0.0970742") - max_val = float("0.0665634") - mean = float("-0.000110766") - std = float("0.00377498") - data = None - - -class Program_weight_tensor_parameter_180: - name = "parameter_180" - shape = [192] - dtype = "float32" - min_val = float("-0.156035") - max_val = float("-0.00048574") - mean = float("-0.0380548") - std = float("0.0212619") - data = None - - -class Program_weight_tensor_parameter_181: - name = "parameter_181" - shape = [192] - dtype = "float32" - min_val = float("0.923345") - max_val = float("1.24736") - mean = float("1.00888") - std = float("0.029844") - data = None - - -class Program_weight_tensor_parameter_182: - name = "parameter_182" - shape = [192] - dtype = "float32" - min_val = float("0.00440107") - max_val = float("0.0576727") - mean = float("0.013745") - std = float("0.00731603") - data = None - - -class Program_weight_tensor_parameter_183: - name = "parameter_183" - shape = [192] - dtype = "float32" - min_val = float("-0.431982") - max_val = float("0.576694") - mean = float("-0.0311932") - std = float("0.102866") - data = None - - -class Program_weight_tensor_parameter_184: - name = "parameter_184" - shape = [192, 192, 3, 3] - dtype = "float32" - min_val = float("-0.0482483") - max_val = float("0.0395703") - mean = float("-2.80417e-05") - std = float("0.00235958") - data = None - - -class Program_weight_tensor_parameter_185: - name = "parameter_185" - shape = [192] - dtype = "float32" - min_val = float("-0.549518") - max_val = float("1.15667") - mean = float("0.361587") - std = float("0.347055") - data = None - - -class Program_weight_tensor_parameter_186: - name = "parameter_186" - shape = [192] - dtype = "float32" - min_val = float("0.547838") - max_val = float("1.57375") - mean = float("1.15649") - std = float("0.184225") - data = None - - -class Program_weight_tensor_parameter_187: - name = "parameter_187" - shape = [192] - dtype = "float32" - min_val = float("0.00263969") - max_val = float("0.178377") - mean = float("0.0146839") - std = float("0.0148158") - data = None - - -class Program_weight_tensor_parameter_188: - name = "parameter_188" - shape = [192] - dtype = "float32" - min_val = float("-0.206901") - max_val = float("0.201284") - mean = float("-0.0220776") - std = float("0.0538491") - data = None - - -class Program_weight_tensor_parameter_189: - name = "parameter_189" - shape = [192, 192, 1, 1] - dtype = "float32" - min_val = float("-0.122958") - max_val = float("0.0975332") - mean = float("-0.000422197") - std = float("0.00948692") - data = None - - -class Program_weight_tensor_parameter_190: - name = "parameter_190" - shape = [96] - dtype = "float32" - min_val = float("-0.45597") - max_val = float("0.225384") - mean = float("-0.00899429") - std = float("0.142447") - data = None - - -class Program_weight_tensor_parameter_191: - name = "parameter_191" - shape = [96] - dtype = "float32" - min_val = float("0.768038") - max_val = float("1.23793") - mean = float("0.951894") - std = float("0.070433") - data = None - - -class Program_weight_tensor_parameter_192: - name = "parameter_192" - shape = [96] - dtype = "float32" - min_val = float("0.001789") - max_val = float("0.0296245") - mean = float("0.00790415") - std = float("0.00529182") - data = None - - -class Program_weight_tensor_parameter_193: - name = "parameter_193" - shape = [96] - dtype = "float32" - min_val = float("-0.0397087") - max_val = float("0.0776004") - mean = float("-0.00745076") - std = float("0.0174977") - data = None - - -class Program_weight_tensor_parameter_194: - name = "parameter_194" - shape = [96, 96, 1, 1] - dtype = "float32" - min_val = float("-0.0690797") - max_val = float("0.0479575") - mean = float("-0.000703745") - std = float("0.00708779") - data = None - - -class Program_weight_tensor_parameter_195: - name = "parameter_195" - shape = [96] - dtype = "float32" - min_val = float("-0.45597") - max_val = float("0.225384") - mean = float("-0.00899429") - std = float("0.142447") - data = None - - -class Program_weight_tensor_parameter_196: - name = "parameter_196" - shape = [96] - dtype = "float32" - min_val = float("0.511673") - max_val = float("1.27365") - mean = float("1.03139") - std = float("0.0951454") - data = None - - -class Program_weight_tensor_parameter_197: - name = "parameter_197" - shape = [96] - dtype = "float32" - min_val = float("0.00447103") - max_val = float("0.0391091") - mean = float("0.0147757") - std = float("0.00849336") - data = None - - -class Program_weight_tensor_parameter_198: - name = "parameter_198" - shape = [96] - dtype = "float32" - min_val = float("-0.301622") - max_val = float("0.115808") - mean = float("-0.0166235") - std = float("0.0543423") - data = None - - -class Program_weight_tensor_parameter_199: - name = "parameter_199" - shape = [96, 96, 3, 3] - dtype = "float32" - min_val = float("-0.074843") - max_val = float("0.0811793") - mean = float("-3.78438e-05") - std = float("0.00502658") - data = None - - -class Program_weight_tensor_parameter_200: - name = "parameter_200" - shape = [96] - dtype = "float32" - min_val = float("-0.699659") - max_val = float("0.489136") - mean = float("-0.110588") - std = float("0.19522") - data = None - - -class Program_weight_tensor_parameter_201: - name = "parameter_201" - shape = [96] - dtype = "float32" - min_val = float("0.729744") - max_val = float("1.6979") - mean = float("0.998907") - std = float("0.132252") - data = None - - -class Program_weight_tensor_parameter_202: - name = "parameter_202" - shape = [96] - dtype = "float32" - min_val = float("0.00619322") - max_val = float("0.0753382") - mean = float("0.019023") - std = float("0.0147592") - data = None - - -class Program_weight_tensor_parameter_203: - name = "parameter_203" - shape = [96] - dtype = "float32" - min_val = float("-0.164466") - max_val = float("0.123973") - mean = float("-0.0167661") - std = float("0.0517777") - data = None - - -class Program_weight_tensor_parameter_204: - name = "parameter_204" - shape = [96, 96, 3, 3] - dtype = "float32" - min_val = float("-0.0904058") - max_val = float("0.0674916") - mean = float("-0.000269681") - std = float("0.00563526") - data = None - - -class Program_weight_tensor_parameter_205: - name = "parameter_205" - shape = [96] - dtype = "float32" - min_val = float("-0.357406") - max_val = float("0.180909") - mean = float("-0.135683") - std = float("0.0938867") - data = None - - -class Program_weight_tensor_parameter_206: - name = "parameter_206" - shape = [96] - dtype = "float32" - min_val = float("0.633758") - max_val = float("1.02328") - mean = float("0.910489") - std = float("0.054703") - data = None - - -class Program_weight_tensor_parameter_207: - name = "parameter_207" - shape = [96] - dtype = "float32" - min_val = float("0.00226766") - max_val = float("0.0117187") - mean = float("0.00655901") - std = float("0.00211374") - data = None - - -class Program_weight_tensor_parameter_208: - name = "parameter_208" - shape = [96] - dtype = "float32" - min_val = float("-0.0398978") - max_val = float("0.0295094") - mean = float("-0.00629756") - std = float("0.0138696") - data = None - - -class Program_weight_tensor_parameter_209: - name = "parameter_209" - shape = [96, 96, 1, 1] - dtype = "float32" - min_val = float("-0.0530557") - max_val = float("0.0589347") - mean = float("-0.000721103") - std = float("0.00726949") - data = None - - -class Program_weight_tensor_parameter_210: - name = "parameter_210" - shape = [96] - dtype = "float32" - min_val = float("-0.357406") - max_val = float("0.180909") - mean = float("-0.135683") - std = float("0.0938867") - data = None - - -class Program_weight_tensor_parameter_211: - name = "parameter_211" - shape = [96] - dtype = "float32" - min_val = float("0.8186") - max_val = float("1.15653") - mean = float("1.02469") - std = float("0.059396") - data = None - - -class Program_weight_tensor_parameter_212: - name = "parameter_212" - shape = [96] - dtype = "float32" - min_val = float("0.00540755") - max_val = float("0.0561564") - mean = float("0.0139166") - std = float("0.00841941") - data = None - - -class Program_weight_tensor_parameter_213: - name = "parameter_213" - shape = [96] - dtype = "float32" - min_val = float("-0.0899866") - max_val = float("0.0314018") - mean = float("-0.0208995") - std = float("0.025033") - data = None - - -class Program_weight_tensor_parameter_214: - name = "parameter_214" - shape = [96, 96, 3, 3] - dtype = "float32" - min_val = float("-0.0688236") - max_val = float("0.0708707") - mean = float("-0.000262969") - std = float("0.00532725") - data = None - - -class Program_weight_tensor_parameter_215: - name = "parameter_215" - shape = [96] - dtype = "float32" - min_val = float("-0.480249") - max_val = float("0.160466") - mean = float("-0.163804") - std = float("0.129028") - data = None - - -class Program_weight_tensor_parameter_216: - name = "parameter_216" - shape = [96] - dtype = "float32" - min_val = float("0.78143") - max_val = float("1.29526") - mean = float("0.966656") - std = float("0.0977077") - data = None - - -class Program_weight_tensor_parameter_217: - name = "parameter_217" - shape = [96] - dtype = "float32" - min_val = float("0.0048265") - max_val = float("0.0582232") - mean = float("0.0117158") - std = float("0.00796343") - data = None - - -class Program_weight_tensor_parameter_218: - name = "parameter_218" - shape = [96] - dtype = "float32" - min_val = float("-0.130681") - max_val = float("0.0398643") - mean = float("0.00105047") - std = float("0.0297261") - data = None - - -class Program_weight_tensor_parameter_219: - name = "parameter_219" - shape = [96, 96, 3, 3] - dtype = "float32" - min_val = float("-0.0838623") - max_val = float("0.0655946") - mean = float("-0.0002875") - std = float("0.00621097") - data = None - - -class Program_weight_tensor_parameter_220: - name = "parameter_220" - shape = [96] - dtype = "float32" - min_val = float("-0.482468") - max_val = float("0.0678707") - mean = float("-0.164086") - std = float("0.113081") - data = None - - -class Program_weight_tensor_parameter_221: - name = "parameter_221" - shape = [96] - dtype = "float32" - min_val = float("0.729445") - max_val = float("1.00356") - mean = float("0.921722") - std = float("0.0524543") - data = None - - -class Program_weight_tensor_parameter_222: - name = "parameter_222" - shape = [96] - dtype = "float32" - min_val = float("0.00433347") - max_val = float("0.0204491") - mean = float("0.00997535") - std = float("0.00321772") - data = None - - -class Program_weight_tensor_parameter_223: - name = "parameter_223" - shape = [96] - dtype = "float32" - min_val = float("-0.0446457") - max_val = float("0.0320629") - mean = float("-0.0167648") - std = float("0.0172211") - data = None - - -class Program_weight_tensor_parameter_224: - name = "parameter_224" - shape = [96, 96, 1, 1] - dtype = "float32" - min_val = float("-0.0830632") - max_val = float("0.0609676") - mean = float("-0.00159531") - std = float("0.00857298") - data = None - - -class Program_weight_tensor_parameter_225: - name = "parameter_225" - shape = [96] - dtype = "float32" - min_val = float("-0.482468") - max_val = float("0.0678707") - mean = float("-0.164086") - std = float("0.113081") - data = None - - -class Program_weight_tensor_parameter_226: - name = "parameter_226" - shape = [96] - dtype = "float32" - min_val = float("0.771572") - max_val = float("1.15355") - mean = float("0.986257") - std = float("0.0571516") - data = None - - -class Program_weight_tensor_parameter_227: - name = "parameter_227" - shape = [96] - dtype = "float32" - min_val = float("0.00988202") - max_val = float("0.116393") - mean = float("0.0221825") - std = float("0.0148292") - data = None - - -class Program_weight_tensor_parameter_228: - name = "parameter_228" - shape = [96] - dtype = "float32" - min_val = float("-0.11947") - max_val = float("0.0604604") - mean = float("-0.0128069") - std = float("0.0343308") - data = None - - -class Program_weight_tensor_parameter_229: - name = "parameter_229" - shape = [96, 96, 3, 3] - dtype = "float32" - min_val = float("-0.0988696") - max_val = float("0.0787178") - mean = float("-0.000160458") - std = float("0.00610999") - data = None - - -class Program_weight_tensor_parameter_230: - name = "parameter_230" - shape = [96] - dtype = "float32" - min_val = float("-0.55586") - max_val = float("0.346647") - mean = float("-0.174731") - std = float("0.169548") - data = None - - -class Program_weight_tensor_parameter_231: - name = "parameter_231" - shape = [96] - dtype = "float32" - min_val = float("0.757047") - max_val = float("1.34058") - mean = float("0.957428") - std = float("0.110982") - data = None - - -class Program_weight_tensor_parameter_232: - name = "parameter_232" - shape = [96] - dtype = "float32" - min_val = float("0.0072924") - max_val = float("0.0565885") - mean = float("0.0158637") - std = float("0.0105653") - data = None - - -class Program_weight_tensor_parameter_233: - name = "parameter_233" - shape = [96] - dtype = "float32" - min_val = float("-0.114767") - max_val = float("0.17063") - mean = float("-0.00772215") - std = float("0.0609158") - data = None - - -class Program_weight_tensor_parameter_234: - name = "parameter_234" - shape = [96, 96, 3, 3] - dtype = "float32" - min_val = float("-0.117416") - max_val = float("0.106153") - mean = float("-0.000121096") - std = float("0.00709328") - data = None - - -class Program_weight_tensor_parameter_235: - name = "parameter_235" - shape = [96] - dtype = "float32" - min_val = float("-0.61179") - max_val = float("0.578869") - mean = float("-0.0791862") - std = float("0.248136") - data = None - - -class Program_weight_tensor_parameter_236: - name = "parameter_236" - shape = [96] - dtype = "float32" - min_val = float("0.660131") - max_val = float("1.23003") - mean = float("0.871774") - std = float("0.112507") - data = None - - -class Program_weight_tensor_parameter_237: - name = "parameter_237" - shape = [96] - dtype = "float32" - min_val = float("0.00710491") - max_val = float("0.0445585") - mean = float("0.0147412") - std = float("0.00680549") - data = None - - -class Program_weight_tensor_parameter_238: - name = "parameter_238" - shape = [96] - dtype = "float32" - min_val = float("-0.0931176") - max_val = float("0.0541358") - mean = float("-0.0149245") - std = float("0.0312047") - data = None - - -class Program_weight_tensor_parameter_239: - name = "parameter_239" - shape = [96, 448, 1, 1] - dtype = "float32" - min_val = float("-0.131264") - max_val = float("0.116267") - mean = float("-0.000328445") - std = float("0.009384") - data = None - - -class Program_weight_tensor_parameter_240: - name = "parameter_240" - shape = [96] - dtype = "float32" - min_val = float("-0.0948697") - max_val = float("0.224511") - mean = float("0.0617959") - std = float("0.0539032") - data = None - - -class Program_weight_tensor_parameter_241: - name = "parameter_241" - shape = [96] - dtype = "float32" - min_val = float("0.713603") - max_val = float("1.12367") - mean = float("0.935296") - std = float("0.0628703") - data = None - - -class Program_weight_tensor_parameter_242: - name = "parameter_242" - shape = [96] - dtype = "float32" - min_val = float("0.00170399") - max_val = float("0.0269514") - mean = float("0.00620616") - std = float("0.00275772") - data = None - - -class Program_weight_tensor_parameter_243: - name = "parameter_243" - shape = [96] - dtype = "float32" - min_val = float("-0.062198") - max_val = float("0.114507") - mean = float("-0.0207577") - std = float("0.0255889") - data = None - - -class Program_weight_tensor_parameter_244: - name = "parameter_244" - shape = [96, 448, 1, 1] - dtype = "float32" - min_val = float("-0.0878207") - max_val = float("0.0793284") - mean = float("-0.000117661") - std = float("0.006259") - data = None - - -class Program_weight_tensor_parameter_245: - name = "parameter_245" - shape = [192] - dtype = "float32" - min_val = float("-0.290627") - max_val = float("0.197489") - mean = float("-0.0642084") - std = float("0.0682552") - data = None - - -class Program_weight_tensor_parameter_246: - name = "parameter_246" - shape = [192] - dtype = "float32" - min_val = float("0.677585") - max_val = float("1.44589") - mean = float("0.889482") - std = float("0.0775531") - data = None - - -class Program_weight_tensor_parameter_247: - name = "parameter_247" - shape = [192] - dtype = "float32" - min_val = float("0.00615469") - max_val = float("0.0653019") - mean = float("0.013452") - std = float("0.00596706") - data = None - - -class Program_weight_tensor_parameter_248: - name = "parameter_248" - shape = [192] - dtype = "float32" - min_val = float("-0.148841") - max_val = float("0.0453696") - mean = float("-0.0305179") - std = float("0.0258069") - data = None - - -class Program_weight_tensor_parameter_249: - name = "parameter_249" - shape = [192, 384, 1, 1] - dtype = "float32" - min_val = float("-0.103954") - max_val = float("0.0889274") - mean = float("-0.000488273") - std = float("0.00677572") - data = None - - -class Program_weight_tensor_parameter_250: - name = "parameter_250" - shape = [384] - dtype = "float32" - min_val = float("-0.197184") - max_val = float("0.235944") - mean = float("-0.0649902") - std = float("0.0407184") - data = None - - -class Program_weight_tensor_parameter_251: - name = "parameter_251" - shape = [384] - dtype = "float32" - min_val = float("0.87395") - max_val = float("1.53027") - mean = float("1.02089") - std = float("0.0620815") - data = None - - -class Program_weight_tensor_parameter_252: - name = "parameter_252" - shape = [384] - dtype = "float32" - min_val = float("0.00549855") - max_val = float("0.0533697") - mean = float("0.00979429") - std = float("0.00454056") - data = None - - -class Program_weight_tensor_parameter_253: - name = "parameter_253" - shape = [384] - dtype = "float32" - min_val = float("-0.248796") - max_val = float("0.131533") - mean = float("-0.0383995") - std = float("0.0378875") - data = None - - -class Program_weight_tensor_parameter_254: - name = "parameter_254" - shape = [384, 384, 1, 1] - dtype = "float32" - min_val = float("-0.155662") - max_val = float("0.0954612") - mean = float("-0.000485357") - std = float("0.00632631") - data = None - - -class Program_weight_tensor_parameter_255: - name = "parameter_255" - shape = [192] - dtype = "float32" - min_val = float("-0.172982") - max_val = float("0.00602343") - mean = float("-0.0635736") - std = float("0.031811") - data = None - - -class Program_weight_tensor_parameter_256: - name = "parameter_256" - shape = [192] - dtype = "float32" - min_val = float("0.888688") - max_val = float("0.993558") - mean = float("0.952478") - std = float("0.0161577") - data = None - - -class Program_weight_tensor_parameter_257: - name = "parameter_257" - shape = [192] - dtype = "float32" - min_val = float("0.00348684") - max_val = float("0.0168184") - mean = float("0.00628357") - std = float("0.00200719") - data = None - - -class Program_weight_tensor_parameter_258: - name = "parameter_258" - shape = [192] - dtype = "float32" - min_val = float("-0.0659639") - max_val = float("0.0508781") - mean = float("-0.0199445") - std = float("0.0244522") - data = None - - -class Program_weight_tensor_parameter_259: - name = "parameter_259" - shape = [192, 192, 1, 1] - dtype = "float32" - min_val = float("-0.0503466") - max_val = float("0.0313236") - mean = float("-0.000610377") - std = float("0.0047421") - data = None - - -class Program_weight_tensor_parameter_260: - name = "parameter_260" - shape = [192] - dtype = "float32" - min_val = float("-0.172982") - max_val = float("0.00602343") - mean = float("-0.0635736") - std = float("0.031811") - data = None - - -class Program_weight_tensor_parameter_261: - name = "parameter_261" - shape = [192] - dtype = "float32" - min_val = float("0.947546") - max_val = float("1.03225") - mean = float("0.990347") - std = float("0.0162233") - data = None - - -class Program_weight_tensor_parameter_262: - name = "parameter_262" - shape = [192] - dtype = "float32" - min_val = float("0.00995711") - max_val = float("0.0401164") - mean = float("0.017634") - std = float("0.00573009") - data = None - - -class Program_weight_tensor_parameter_263: - name = "parameter_263" - shape = [192] - dtype = "float32" - min_val = float("-0.140772") - max_val = float("0.138155") - mean = float("-0.0239356") - std = float("0.0422014") - data = None - - -class Program_weight_tensor_parameter_264: - name = "parameter_264" - shape = [192, 192, 3, 3] - dtype = "float32" - min_val = float("-0.0482809") - max_val = float("0.0734527") - mean = float("-7.90712e-05") - std = float("0.0027116") - data = None - - -class Program_weight_tensor_parameter_265: - name = "parameter_265" - shape = [192] - dtype = "float32" - min_val = float("-0.211375") - max_val = float("-0.00263112") - mean = float("-0.0715416") - std = float("0.0343984") - data = None - - -class Program_weight_tensor_parameter_266: - name = "parameter_266" - shape = [192] - dtype = "float32" - min_val = float("0.941718") - max_val = float("1.15116") - mean = float("1.03082") - std = float("0.0423415") - data = None - - -class Program_weight_tensor_parameter_267: - name = "parameter_267" - shape = [192] - dtype = "float32" - min_val = float("0.0211078") - max_val = float("0.140703") - mean = float("0.0359457") - std = float("0.0113705") - data = None - - -class Program_weight_tensor_parameter_268: - name = "parameter_268" - shape = [192] - dtype = "float32" - min_val = float("-0.167852") - max_val = float("0.256733") - mean = float("-0.0421418") - std = float("0.0506957") - data = None - - -class Program_weight_tensor_parameter_269: - name = "parameter_269" - shape = [192, 192, 3, 3] - dtype = "float32" - min_val = float("-0.0667117") - max_val = float("0.059304") - mean = float("-0.000107451") - std = float("0.00324108") - data = None - - -class Program_weight_tensor_parameter_270: - name = "parameter_270" - shape = [192] - dtype = "float32" - min_val = float("-0.190843") - max_val = float("-0.00892811") - mean = float("-0.0684056") - std = float("0.030824") - data = None - - -class Program_weight_tensor_parameter_271: - name = "parameter_271" - shape = [192] - dtype = "float32" - min_val = float("0.946948") - max_val = float("1.04539") - mean = float("0.990344") - std = float("0.013334") - data = None - - -class Program_weight_tensor_parameter_272: - name = "parameter_272" - shape = [192] - dtype = "float32" - min_val = float("0.00204439") - max_val = float("0.0101563") - mean = float("0.00350592") - std = float("0.000926001") - data = None - - -class Program_weight_tensor_parameter_273: - name = "parameter_273" - shape = [192] - dtype = "float32" - min_val = float("-0.0741431") - max_val = float("0.0383866") - mean = float("-0.0189034") - std = float("0.0168987") - data = None - - -class Program_weight_tensor_parameter_274: - name = "parameter_274" - shape = [192, 192, 1, 1] - dtype = "float32" - min_val = float("-0.0319038") - max_val = float("0.0417245") - mean = float("-0.000600213") - std = float("0.00487987") - data = None - - -class Program_weight_tensor_parameter_275: - name = "parameter_275" - shape = [192] - dtype = "float32" - min_val = float("-0.190843") - max_val = float("-0.00892811") - mean = float("-0.0684056") - std = float("0.030824") - data = None - - -class Program_weight_tensor_parameter_276: - name = "parameter_276" - shape = [192] - dtype = "float32" - min_val = float("0.956509") - max_val = float("1.11411") - mean = float("1.00672") - std = float("0.0257334") - data = None - - -class Program_weight_tensor_parameter_277: - name = "parameter_277" - shape = [192] - dtype = "float32" - min_val = float("0.00625705") - max_val = float("0.0226068") - mean = float("0.0103997") - std = float("0.00287058") - data = None - - -class Program_weight_tensor_parameter_278: - name = "parameter_278" - shape = [192] - dtype = "float32" - min_val = float("-0.141039") - max_val = float("0.0702292") - mean = float("-0.0298093") - std = float("0.0313043") - data = None - - -class Program_weight_tensor_parameter_279: - name = "parameter_279" - shape = [192, 192, 3, 3] - dtype = "float32" - min_val = float("-0.0504256") - max_val = float("0.0761871") - mean = float("-0.000102443") - std = float("0.0027184") - data = None - - -class Program_weight_tensor_parameter_280: - name = "parameter_280" - shape = [192] - dtype = "float32" - min_val = float("-0.224642") - max_val = float("-0.0187006") - mean = float("-0.0912059") - std = float("0.0387278") - data = None - - -class Program_weight_tensor_parameter_281: - name = "parameter_281" - shape = [192] - dtype = "float32" - min_val = float("0.950234") - max_val = float("1.19239") - mean = float("1.02577") - std = float("0.0451849") - data = None - - -class Program_weight_tensor_parameter_282: - name = "parameter_282" - shape = [192] - dtype = "float32" - min_val = float("0.0202728") - max_val = float("0.0768753") - mean = float("0.0352012") - std = float("0.0111367") - data = None - - -class Program_weight_tensor_parameter_283: - name = "parameter_283" - shape = [192] - dtype = "float32" - min_val = float("-0.22549") - max_val = float("0.0963683") - mean = float("-0.0586879") - std = float("0.0627008") - data = None - - -class Program_weight_tensor_parameter_284: - name = "parameter_284" - shape = [192, 192, 3, 3] - dtype = "float32" - min_val = float("-0.0629141") - max_val = float("0.0883468") - mean = float("-0.000127578") - std = float("0.00336756") - data = None - - -class Program_weight_tensor_parameter_285: - name = "parameter_285" - shape = [192] - dtype = "float32" - min_val = float("-0.150226") - max_val = float("-0.00319821") - mean = float("-0.0660682") - std = float("0.0225506") - data = None - - -class Program_weight_tensor_parameter_286: - name = "parameter_286" - shape = [192] - dtype = "float32" - min_val = float("0.935894") - max_val = float("1.07288") - mean = float("1.0007") - std = float("0.0211788") - data = None - - -class Program_weight_tensor_parameter_287: - name = "parameter_287" - shape = [192] - dtype = "float32" - min_val = float("0.00178607") - max_val = float("0.00643779") - mean = float("0.00311503") - std = float("0.000840167") - data = None - - -class Program_weight_tensor_parameter_288: - name = "parameter_288" - shape = [192] - dtype = "float32" - min_val = float("-0.0642588") - max_val = float("0.0879255") - mean = float("-0.009792") - std = float("0.0179078") - data = None - - -class Program_weight_tensor_parameter_289: - name = "parameter_289" - shape = [192, 192, 1, 1] - dtype = "float32" - min_val = float("-0.029514") - max_val = float("0.043096") - mean = float("-0.000329408") - std = float("0.00549588") - data = None - - -class Program_weight_tensor_parameter_290: - name = "parameter_290" - shape = [192] - dtype = "float32" - min_val = float("-0.150226") - max_val = float("-0.00319822") - mean = float("-0.0660682") - std = float("0.0225506") - data = None - - -class Program_weight_tensor_parameter_291: - name = "parameter_291" - shape = [192] - dtype = "float32" - min_val = float("0.939778") - max_val = float("1.11505") - mean = float("0.995075") - std = float("0.0251252") - data = None - - -class Program_weight_tensor_parameter_292: - name = "parameter_292" - shape = [192] - dtype = "float32" - min_val = float("0.00642307") - max_val = float("0.0266081") - mean = float("0.0114071") - std = float("0.00308351") - data = None - - -class Program_weight_tensor_parameter_293: - name = "parameter_293" - shape = [192] - dtype = "float32" - min_val = float("-0.18069") - max_val = float("0.0972039") - mean = float("-0.0325111") - std = float("0.0330231") - data = None - - -class Program_weight_tensor_parameter_294: - name = "parameter_294" - shape = [192, 192, 3, 3] - dtype = "float32" - min_val = float("-0.0382342") - max_val = float("0.0664391") - mean = float("-0.000128083") - std = float("0.00267217") - data = None - - -class Program_weight_tensor_parameter_295: - name = "parameter_295" - shape = [192] - dtype = "float32" - min_val = float("-0.282528") - max_val = float("0.0109662") - mean = float("-0.106158") - std = float("0.0388174") - data = None - - -class Program_weight_tensor_parameter_296: - name = "parameter_296" - shape = [192] - dtype = "float32" - min_val = float("0.946051") - max_val = float("1.24915") - mean = float("1.02799") - std = float("0.0411221") - data = None - - -class Program_weight_tensor_parameter_297: - name = "parameter_297" - shape = [192] - dtype = "float32" - min_val = float("0.00896292") - max_val = float("0.0393411") - mean = float("0.0158995") - std = float("0.00471727") - data = None - - -class Program_weight_tensor_parameter_298: - name = "parameter_298" - shape = [192] - dtype = "float32" - min_val = float("-0.220858") - max_val = float("0.10809") - mean = float("-0.0372746") - std = float("0.044431") - data = None - - -class Program_weight_tensor_parameter_299: - name = "parameter_299" - shape = [192, 192, 3, 3] - dtype = "float32" - min_val = float("-0.0533745") - max_val = float("0.0594756") - mean = float("-0.000147957") - std = float("0.00371064") - data = None - - -class Program_weight_tensor_parameter_300: - name = "parameter_300" - shape = [192] - dtype = "float32" - min_val = float("-0.250169") - max_val = float("-0.0169445") - mean = float("-0.118185") - std = float("0.0428781") - data = None - - -class Program_weight_tensor_parameter_301: - name = "parameter_301" - shape = [192] - dtype = "float32" - min_val = float("0.916471") - max_val = float("1.13189") - mean = float("1.02572") - std = float("0.0416176") - data = None - - -class Program_weight_tensor_parameter_302: - name = "parameter_302" - shape = [192] - dtype = "float32" - min_val = float("0.00405995") - max_val = float("0.0116058") - mean = float("0.00629149") - std = float("0.00151752") - data = None - - -class Program_weight_tensor_parameter_303: - name = "parameter_303" - shape = [192] - dtype = "float32" - min_val = float("-0.117604") - max_val = float("0.0642248") - mean = float("0.0107736") - std = float("0.0236389") - data = None - - -class Program_weight_tensor_parameter_304: - name = "parameter_304" - shape = [192, 896, 1, 1] - dtype = "float32" - min_val = float("-0.0597477") - max_val = float("0.0854183") - mean = float("-0.000166658") - std = float("0.00511791") - data = None - - -class Program_weight_tensor_parameter_305: - name = "parameter_305" - shape = [192] - dtype = "float32" - min_val = float("-0.174475") - max_val = float("0.20701") - mean = float("-0.00689085") - std = float("0.0496763") - data = None - - -class Program_weight_tensor_parameter_306: - name = "parameter_306" - shape = [192] - dtype = "float32" - min_val = float("0.951544") - max_val = float("1.21569") - mean = float("1.05632") - std = float("0.0494344") - data = None - - -class Program_weight_tensor_parameter_307: - name = "parameter_307" - shape = [192] - dtype = "float32" - min_val = float("0.00423262") - max_val = float("0.0403318") - mean = float("0.00759489") - std = float("0.00311219") - data = None - - -class Program_weight_tensor_parameter_308: - name = "parameter_308" - shape = [192] - dtype = "float32" - min_val = float("-0.0961858") - max_val = float("0.0406769") - mean = float("-0.00802736") - std = float("0.0245313") - data = None - - -class Program_weight_tensor_parameter_309: - name = "parameter_309" - shape = [192, 896, 1, 1] - dtype = "float32" - min_val = float("-0.0579338") - max_val = float("0.208244") - mean = float("-0.000163925") - std = float("0.00519386") - data = None - - -class Program_weight_tensor_parameter_310: - name = "parameter_310" - shape = [384] - dtype = "float32" - min_val = float("-0.243733") - max_val = float("-0.0546405") - mean = float("-0.121279") - std = float("0.0329438") - data = None - - -class Program_weight_tensor_parameter_311: - name = "parameter_311" - shape = [384] - dtype = "float32" - min_val = float("0.821465") - max_val = float("1.01492") - mean = float("0.913551") - std = float("0.0255274") - data = None - - -class Program_weight_tensor_parameter_312: - name = "parameter_312" - shape = [384] - dtype = "float32" - min_val = float("0.00632715") - max_val = float("0.0367869") - mean = float("0.0101591") - std = float("0.00340929") - data = None - - -class Program_weight_tensor_parameter_313: - name = "parameter_313" - shape = [384] - dtype = "float32" - min_val = float("-0.0871009") - max_val = float("0.0863362") - mean = float("-0.0276529") - std = float("0.0233351") - data = None - - -class Program_weight_tensor_parameter_314: - name = "parameter_314" - shape = [384, 768, 1, 1] - dtype = "float32" - min_val = float("-0.0365944") - max_val = float("0.0462394") - mean = float("-0.000227368") - std = float("0.00408063") - data = None - - -class Program_weight_tensor_parameter_315: - name = "parameter_315" - shape = [768] - dtype = "float32" - min_val = float("-0.101787") - max_val = float("0.0665855") - mean = float("-0.0546584") - std = float("0.0146864") - data = None - - -class Program_weight_tensor_parameter_316: - name = "parameter_316" - shape = [768] - dtype = "float32" - min_val = float("0.95621") - max_val = float("1.13825") - mean = float("1.02215") - std = float("0.0205763") - data = None - - -class Program_weight_tensor_parameter_317: - name = "parameter_317" - shape = [768] - dtype = "float32" - min_val = float("0.0043328") - max_val = float("0.027335") - mean = float("0.00718712") - std = float("0.00194442") - data = None - - -class Program_weight_tensor_parameter_318: - name = "parameter_318" - shape = [768] - dtype = "float32" - min_val = float("-0.0981102") - max_val = float("0.0940955") - mean = float("-0.0370968") - std = float("0.0222538") - data = None - - -class Program_weight_tensor_parameter_319: - name = "parameter_319" - shape = [768, 768, 1, 1] - dtype = "float32" - min_val = float("-0.0461237") - max_val = float("0.0869437") - mean = float("-0.000256912") - std = float("0.00342022") - data = None - - -class Program_weight_tensor_parameter_320: - name = "parameter_320" - shape = [384] - dtype = "float32" - min_val = float("-0.151767") - max_val = float("0.0695731") - mean = float("-0.0384536") - std = float("0.0201375") - data = None - - -class Program_weight_tensor_parameter_321: - name = "parameter_321" - shape = [384] - dtype = "float32" - min_val = float("0.894036") - max_val = float("1.07238") - mean = float("0.984632") - std = float("0.0127924") - data = None - - -class Program_weight_tensor_parameter_322: - name = "parameter_322" - shape = [384] - dtype = "float32" - min_val = float("0.00290649") - max_val = float("0.0408954") - mean = float("0.00666489") - std = float("0.00326729") - data = None - - -class Program_weight_tensor_parameter_323: - name = "parameter_323" - shape = [384] - dtype = "float32" - min_val = float("-0.0653166") - max_val = float("0.0519308") - mean = float("-0.00623302") - std = float("0.0155997") - data = None - - -class Program_weight_tensor_parameter_324: - name = "parameter_324" - shape = [384, 384, 1, 1] - dtype = "float32" - min_val = float("-0.0309181") - max_val = float("0.0533571") - mean = float("-7.98848e-05") - std = float("0.00292507") - data = None - - -class Program_weight_tensor_parameter_325: - name = "parameter_325" - shape = [384] - dtype = "float32" - min_val = float("-0.151767") - max_val = float("0.0695731") - mean = float("-0.0384536") - std = float("0.0201375") - data = None - - -class Program_weight_tensor_parameter_326: - name = "parameter_326" - shape = [384] - dtype = "float32" - min_val = float("0.888673") - max_val = float("1.0755") - mean = float("0.996161") - std = float("0.0119175") - data = None - - -class Program_weight_tensor_parameter_327: - name = "parameter_327" - shape = [384] - dtype = "float32" - min_val = float("0.0142738") - max_val = float("0.311691") - mean = float("0.0422161") - std = float("0.0212825") - data = None - - -class Program_weight_tensor_parameter_328: - name = "parameter_328" - shape = [384] - dtype = "float32" - min_val = float("-0.214832") - max_val = float("0.109983") - mean = float("-0.0665168") - std = float("0.0509291") - data = None - - -class Program_weight_tensor_parameter_329: - name = "parameter_329" - shape = [384, 384, 3, 3] - dtype = "float32" - min_val = float("-0.0333794") - max_val = float("0.041775") - mean = float("-0.000107745") - std = float("0.00109587") - data = None - - -class Program_weight_tensor_parameter_330: - name = "parameter_330" - shape = [384] - dtype = "float32" - min_val = float("-0.0782042") - max_val = float("0.112772") - mean = float("-0.0178359") - std = float("0.0153444") - data = None - - -class Program_weight_tensor_parameter_331: - name = "parameter_331" - shape = [384] - dtype = "float32" - min_val = float("0.921308") - max_val = float("1.1661") - mean = float("1.01649") - std = float("0.0243616") - data = None - - -class Program_weight_tensor_parameter_332: - name = "parameter_332" - shape = [384] - dtype = "float32" - min_val = float("0.0102295") - max_val = float("0.115044") - mean = float("0.0305011") - std = float("0.0124013") - data = None - - -class Program_weight_tensor_parameter_333: - name = "parameter_333" - shape = [384] - dtype = "float32" - min_val = float("-0.153753") - max_val = float("0.147071") - mean = float("-0.0372654") - std = float("0.0461481") - data = None - - -class Program_weight_tensor_parameter_334: - name = "parameter_334" - shape = [384, 384, 3, 3] - dtype = "float32" - min_val = float("-0.0226589") - max_val = float("0.036417") - mean = float("-6.21237e-05") - std = float("0.00142638") - data = None - - -class Program_weight_tensor_parameter_335: - name = "parameter_335" - shape = [384] - dtype = "float32" - min_val = float("-0.069438") - max_val = float("0.0210057") - mean = float("-0.0221755") - std = float("0.0130288") - data = None - - -class Program_weight_tensor_parameter_336: - name = "parameter_336" - shape = [384] - dtype = "float32" - min_val = float("0.948363") - max_val = float("1.1661") - mean = float("1.01607") - std = float("0.0267976") - data = None - - -class Program_weight_tensor_parameter_337: - name = "parameter_337" - shape = [384] - dtype = "float32" - min_val = float("0.0303298") - max_val = float("0.235692") - mean = float("0.0873822") - std = float("0.0294964") - data = None - - -class Program_weight_tensor_parameter_338: - name = "parameter_338" - shape = [384] - dtype = "float32" - min_val = float("-1.41616") - max_val = float("1.37834") - mean = float("-0.0672685") - std = float("0.446602") - data = None - - -class Program_weight_tensor_parameter_339: - name = "parameter_339" - shape = [384, 1536, 1, 1] - dtype = "float32" - min_val = float("-0.0365696") - max_val = float("0.0477869") - mean = float("3.75444e-05") - std = float("0.00244321") - data = None - - -class Program_weight_tensor_parameter_340: - name = "parameter_340" - shape = [384] - dtype = "float32" - min_val = float("-0.0175498") - max_val = float("0.0243298") - mean = float("-0.00128851") - std = float("0.0065282") - data = None - - -class Program_weight_tensor_parameter_341: - name = "parameter_341" - shape = [384] - dtype = "float32" - min_val = float("0.972045") - max_val = float("1.06093") - mean = float("0.99618") - std = float("0.01202") - data = None - - -class Program_weight_tensor_parameter_342: - name = "parameter_342" - shape = [384] - dtype = "float32" - min_val = float("0.0015358") - max_val = float("0.00696368") - mean = float("0.00325261") - std = float("0.000776971") - data = None - - -class Program_weight_tensor_parameter_343: - name = "parameter_343" - shape = [384] - dtype = "float32" - min_val = float("-0.0833909") - max_val = float("0.0596304") - mean = float("-0.028885") - std = float("0.0177319") - data = None - - -class Program_weight_tensor_parameter_344: - name = "parameter_344" - shape = [384, 384, 1, 1] - dtype = "float32" - min_val = float("-0.0289284") - max_val = float("0.0278605") - mean = float("-0.000360372") - std = float("0.00262503") - data = None - - -class Program_weight_tensor_parameter_345: - name = "parameter_345" - shape = [384] - dtype = "float32" - min_val = float("-0.0175498") - max_val = float("0.0243298") - mean = float("-0.00128851") - std = float("0.0065282") - data = None - - -class Program_weight_tensor_parameter_346: - name = "parameter_346" - shape = [384] - dtype = "float32" - min_val = float("0.974435") - max_val = float("1.08363") - mean = float("1.00583") - std = float("0.017741") - data = None - - -class Program_weight_tensor_parameter_347: - name = "parameter_347" - shape = [384] - dtype = "float32" - min_val = float("0.00723957") - max_val = float("0.0375603") - mean = float("0.0161577") - std = float("0.00499723") - data = None - - -class Program_weight_tensor_parameter_348: - name = "parameter_348" - shape = [384] - dtype = "float32" - min_val = float("-0.252151") - max_val = float("0.0737095") - mean = float("-0.08036") - std = float("0.0422289") - data = None - - -class Program_weight_tensor_parameter_349: - name = "parameter_349" - shape = [384, 384, 3, 3] - dtype = "float32" - min_val = float("-0.0230068") - max_val = float("0.0485732") - mean = float("-0.000124469") - std = float("0.00111462") - data = None - - -class Program_weight_tensor_parameter_350: - name = "parameter_350" - shape = [384] - dtype = "float32" - min_val = float("-0.0466225") - max_val = float("0.00898686") - mean = float("-0.00779507") - std = float("0.00746548") - data = None - - -class Program_weight_tensor_parameter_351: - name = "parameter_351" - shape = [384] - dtype = "float32" - min_val = float("0.958003") - max_val = float("1.12965") - mean = float("1.01422") - std = float("0.0192962") - data = None - - -class Program_weight_tensor_parameter_352: - name = "parameter_352" - shape = [384] - dtype = "float32" - min_val = float("0.0315362") - max_val = float("0.132781") - mean = float("0.069239") - std = float("0.0176957") - data = None - - -class Program_weight_tensor_parameter_353: - name = "parameter_353" - shape = [384] - dtype = "float32" - min_val = float("-0.817765") - max_val = float("0.618692") - mean = float("-0.19556") - std = float("0.181151") - data = None - - -class Program_weight_tensor_parameter_354: - name = "parameter_354" - shape = [384, 384, 3, 3] - dtype = "float32" - min_val = float("-0.022563") - max_val = float("0.038276") - mean = float("-0.000122085") - std = float("0.00133203") - data = None - - -class Program_weight_tensor_parameter_355: - name = "parameter_355" - shape = [384] - dtype = "float32" - min_val = float("-0.033857") - max_val = float("0.0139088") - mean = float("-0.006985") - std = float("0.00755489") - data = None - - -class Program_weight_tensor_parameter_356: - name = "parameter_356" - shape = [384] - dtype = "float32" - min_val = float("0.986684") - max_val = float("1.03523") - mean = float("1.0021") - std = float("0.00693134") - data = None - - -class Program_weight_tensor_parameter_357: - name = "parameter_357" - shape = [384] - dtype = "float32" - min_val = float("0.0010937") - max_val = float("0.00473797") - mean = float("0.0019152") - std = float("0.000506383") - data = None - - -class Program_weight_tensor_parameter_358: - name = "parameter_358" - shape = [384] - dtype = "float32" - min_val = float("-0.0582268") - max_val = float("0.108843") - mean = float("-0.0163738") - std = float("0.0197979") - data = None - - -class Program_weight_tensor_parameter_359: - name = "parameter_359" - shape = [384, 384, 1, 1] - dtype = "float32" - min_val = float("-0.020788") - max_val = float("0.0286466") - mean = float("-0.000218693") - std = float("0.00228777") - data = None - - -class Program_weight_tensor_parameter_360: - name = "parameter_360" - shape = [384] - dtype = "float32" - min_val = float("-0.033857") - max_val = float("0.0139088") - mean = float("-0.006985") - std = float("0.00755489") - data = None - - -class Program_weight_tensor_parameter_361: - name = "parameter_361" - shape = [384] - dtype = "float32" - min_val = float("0.984593") - max_val = float("1.06746") - mean = float("1.00655") - std = float("0.0121664") - data = None - - -class Program_weight_tensor_parameter_362: - name = "parameter_362" - shape = [384] - dtype = "float32" - min_val = float("0.00450449") - max_val = float("0.0227935") - mean = float("0.00957872") - std = float("0.00312672") - data = None - - -class Program_weight_tensor_parameter_363: - name = "parameter_363" - shape = [384] - dtype = "float32" - min_val = float("-0.144327") - max_val = float("0.265646") - mean = float("-0.0565659") - std = float("0.048812") - data = None - - -class Program_weight_tensor_parameter_364: - name = "parameter_364" - shape = [384, 384, 3, 3] - dtype = "float32" - min_val = float("-0.0124244") - max_val = float("0.0276773") - mean = float("-9.55709e-05") - std = float("0.00094902") - data = None - - -class Program_weight_tensor_parameter_365: - name = "parameter_365" - shape = [384] - dtype = "float32" - min_val = float("-0.0512307") - max_val = float("0.00385635") - mean = float("-0.0195493") - std = float("0.00836547") - data = None - - -class Program_weight_tensor_parameter_366: - name = "parameter_366" - shape = [384] - dtype = "float32" - min_val = float("0.978618") - max_val = float("1.08483") - mean = float("1.01365") - std = float("0.0153886") - data = None - - -class Program_weight_tensor_parameter_367: - name = "parameter_367" - shape = [384] - dtype = "float32" - min_val = float("0.0063934") - max_val = float("0.0246998") - mean = float("0.0111198") - std = float("0.00257045") - data = None - - -class Program_weight_tensor_parameter_368: - name = "parameter_368" - shape = [384] - dtype = "float32" - min_val = float("-0.120871") - max_val = float("0.132915") - mean = float("-0.0260459") - std = float("0.0299784") - data = None - - -class Program_weight_tensor_parameter_369: - name = "parameter_369" - shape = [384, 384, 3, 3] - dtype = "float32" - min_val = float("-0.0142981") - max_val = float("0.024141") - mean = float("-4.79187e-05") - std = float("0.00131278") - data = None - - -class Program_weight_tensor_parameter_370: - name = "parameter_370" - shape = [384] - dtype = "float32" - min_val = float("-0.0676664") - max_val = float("0.0208998") - mean = float("-0.0318731") - std = float("0.0122307") - data = None - - -class Program_weight_tensor_parameter_371: - name = "parameter_371" - shape = [384] - dtype = "float32" - min_val = float("0.984256") - max_val = float("1.05794") - mean = float("1.01498") - std = float("0.0106765") - data = None - - -class Program_weight_tensor_parameter_372: - name = "parameter_372" - shape = [384] - dtype = "float32" - min_val = float("0.00788932") - max_val = float("0.0348657") - mean = float("0.0128533") - std = float("0.00302232") - data = None - - -class Program_weight_tensor_parameter_373: - name = "parameter_373" - shape = [384] - dtype = "float32" - min_val = float("-0.101719") - max_val = float("0.172283") - mean = float("-0.0367191") - std = float("0.0276733") - data = None - - -class Program_weight_tensor_parameter_374: - name = "parameter_374" - shape = [384, 1024, 1, 1] - dtype = "float32" - min_val = float("-0.0172054") - max_val = float("0.0376208") - mean = float("-0.000183184") - std = float("0.00259433") - data = None - - -class Program_weight_tensor_parameter_375: - name = "parameter_375" - shape = [384] - dtype = "float32" - min_val = float("-0.0230121") - max_val = float("0.0211028") - mean = float("5.7334e-05") - std = float("0.00782956") - data = None - - -class Program_weight_tensor_parameter_376: - name = "parameter_376" - shape = [384] - dtype = "float32" - min_val = float("0.99505") - max_val = float("1.08363") - mean = float("1.04199") - std = float("0.0135379") - data = None - - -class Program_weight_tensor_parameter_377: - name = "parameter_377" - shape = [384] - dtype = "float32" - min_val = float("0.0124186") - max_val = float("0.025482") - mean = float("0.0160088") - std = float("0.00200221") - data = None - - -class Program_weight_tensor_parameter_378: - name = "parameter_378" - shape = [384] - dtype = "float32" - min_val = float("-0.0886053") - max_val = float("0.033751") - mean = float("-0.0450013") - std = float("0.0179425") - data = None - - -class Program_weight_tensor_parameter_379: - name = "parameter_379" - shape = [384, 1024, 1, 1] - dtype = "float32" - min_val = float("-0.0428553") - max_val = float("0.0454061") - mean = float("-0.000216885") - std = float("0.00310941") - data = None - - -class Program_weight_tensor_parameter_380: - name = "parameter_380" - shape = [1024] - dtype = "float32" - min_val = float("-3.76748") - max_val = float("-0.735718") - mean = float("-2.19173") - std = float("0.429764") - data = None - - -class Program_weight_tensor_parameter_381: - name = "parameter_381" - shape = [1024] - dtype = "float32" - min_val = float("1.62261") - max_val = float("4.45166") - mean = float("3.08738") - std = float("0.25481") - data = None - - -class Program_weight_tensor_parameter_382: - name = "parameter_382" - shape = [1024] - dtype = "float32" - min_val = float("0.00255178") - max_val = float("0.0174347") - mean = float("0.00504102") - std = float("0.00123753") - data = None - - -class Program_weight_tensor_parameter_383: - name = "parameter_383" - shape = [1024] - dtype = "float32" - min_val = float("-0.0981028") - max_val = float("0.109197") - mean = float("-0.0429288") - std = float("0.0200304") - data = None - - -class Program_weight_tensor_parameter_384: - name = "parameter_384" - shape = [1024, 768, 1, 1] - dtype = "float32" - min_val = float("-0.0759075") - max_val = float("0.0960025") - mean = float("-0.000303492") - std = float("0.00327529") - data = None - - -class Program_weight_tensor_parameter_385: - name = "parameter_385" - shape = [768] - dtype = "float32" - min_val = float("-0.0164547") - max_val = float("0.00136048") - mean = float("-0.000808882") - std = float("0.00234169") - data = None - - -class Program_weight_tensor_parameter_386: - name = "parameter_386" - shape = [768, 768, 1, 1] - dtype = "float32" - min_val = float("-0.0859031") - max_val = float("0.147876") - mean = float("-0.000294646") - std = float("0.00174956") - data = None - - -class Program_weight_tensor_parameter_387: - name = "parameter_387" - shape = [384] - dtype = "float32" - min_val = float("-1.77797") - max_val = float("0.31184") - mean = float("-0.312311") - std = float("0.291454") - data = None - - -class Program_weight_tensor_parameter_388: - name = "parameter_388" - shape = [384] - dtype = "float32" - min_val = float("0.188879") - max_val = float("1.81779") - mean = float("0.610184") - std = float("0.262228") - data = None - - -class Program_weight_tensor_parameter_389: - name = "parameter_389" - shape = [384] - dtype = "float32" - min_val = float("4.15295e-05") - max_val = float("0.00124149") - mean = float("0.00018635") - std = float("0.000117554") - data = None - - -class Program_weight_tensor_parameter_390: - name = "parameter_390" - shape = [384] - dtype = "float32" - min_val = float("-0.0894214") - max_val = float("0.0525955") - mean = float("0.0172684") - std = float("0.0154183") - data = None - - -class Program_weight_tensor_parameter_391: - name = "parameter_391" - shape = [384, 384, 1, 1] - dtype = "float32" - min_val = float("-0.0239132") - max_val = float("0.0254034") - mean = float("-0.000294387") - std = float("0.00236383") - data = None - - -class Program_weight_tensor_parameter_392: - name = "parameter_392" - shape = [384] - dtype = "float32" - min_val = float("-1.77797") - max_val = float("0.31184") - mean = float("-0.312311") - std = float("0.291454") - data = None - - -class Program_weight_tensor_parameter_393: - name = "parameter_393" - shape = [384] - dtype = "float32" - min_val = float("0.332455") - max_val = float("2.60228") - mean = float("1.02626") - std = float("0.289998") - data = None - - -class Program_weight_tensor_parameter_394: - name = "parameter_394" - shape = [384] - dtype = "float32" - min_val = float("0.000319043") - max_val = float("0.00414738") - mean = float("0.00107632") - std = float("0.000507674") - data = None - - -class Program_weight_tensor_parameter_395: - name = "parameter_395" - shape = [384] - dtype = "float32" - min_val = float("-0.186705") - max_val = float("0.0905628") - mean = float("0.0170662") - std = float("0.021429") - data = None - - -class Program_weight_tensor_parameter_396: - name = "parameter_396" - shape = [384, 384, 3, 3] - dtype = "float32" - min_val = float("-0.0184804") - max_val = float("0.0310221") - mean = float("-3.75571e-05") - std = float("0.00148755") - data = None - - -class Program_weight_tensor_parameter_397: - name = "parameter_397" - shape = [384] - dtype = "float32" - min_val = float("-2.58674") - max_val = float("0.0349821") - mean = float("-1.57085") - std = float("0.417307") - data = None - - -class Program_weight_tensor_parameter_398: - name = "parameter_398" - shape = [384] - dtype = "float32" - min_val = float("0.525383") - max_val = float("1.6495") - mean = float("1.13854") - std = float("0.149789") - data = None - - -class Program_weight_tensor_parameter_399: - name = "parameter_399" - shape = [384] - dtype = "float32" - min_val = float("0.0256519") - max_val = float("0.110068") - mean = float("0.0524846") - std = float("0.0116704") - data = None - - -class Program_weight_tensor_parameter_400: - name = "parameter_400" - shape = [384] - dtype = "float32" - min_val = float("-0.778566") - max_val = float("0.344438") - mean = float("-0.221917") - std = float("0.103427") - data = None - - -class Program_weight_tensor_parameter_401: - name = "parameter_401" - shape = [384, 384, 3, 3] - dtype = "float32" - min_val = float("-0.0296622") - max_val = float("0.0537224") - mean = float("-0.000158945") - std = float("0.00197052") - data = None - - -class Program_weight_tensor_parameter_402: - name = "parameter_402" - shape = [384] - dtype = "float32" - min_val = float("-1.94349") - max_val = float("0.641156") - mean = float("-0.576479") - std = float("0.359073") - data = None - - -class Program_weight_tensor_parameter_403: - name = "parameter_403" - shape = [384] - dtype = "float32" - min_val = float("0.164837") - max_val = float("2.07047") - mean = float("0.56295") - std = float("0.227775") - data = None - - -class Program_weight_tensor_parameter_404: - name = "parameter_404" - shape = [384] - dtype = "float32" - min_val = float("6.5426e-05") - max_val = float("0.00116989") - mean = float("0.00025892") - std = float("0.000131517") - data = None - - -class Program_weight_tensor_parameter_405: - name = "parameter_405" - shape = [384] - dtype = "float32" - min_val = float("-0.034319") - max_val = float("0.060842") - mean = float("0.0169333") - std = float("0.0124885") - data = None - - -class Program_weight_tensor_parameter_406: - name = "parameter_406" - shape = [384, 384, 1, 1] - dtype = "float32" - min_val = float("-0.0211277") - max_val = float("0.0256464") - mean = float("-0.000316836") - std = float("0.00224938") - data = None - - -class Program_weight_tensor_parameter_407: - name = "parameter_407" - shape = [384] - dtype = "float32" - min_val = float("-1.94349") - max_val = float("0.641156") - mean = float("-0.576479") - std = float("0.359073") - data = None - - -class Program_weight_tensor_parameter_408: - name = "parameter_408" - shape = [384] - dtype = "float32" - min_val = float("0.582152") - max_val = float("2.16129") - mean = float("1.08524") - std = float("0.256663") - data = None - - -class Program_weight_tensor_parameter_409: - name = "parameter_409" - shape = [384] - dtype = "float32" - min_val = float("0.00078451") - max_val = float("0.00519898") - mean = float("0.00162415") - std = float("0.000518067") - data = None - - -class Program_weight_tensor_parameter_410: - name = "parameter_410" - shape = [384] - dtype = "float32" - min_val = float("-0.0628209") - max_val = float("0.0895942") - mean = float("0.0228312") - std = float("0.0198838") - data = None - - -class Program_weight_tensor_parameter_411: - name = "parameter_411" - shape = [384, 384, 3, 3] - dtype = "float32" - min_val = float("-0.0187271") - max_val = float("0.0312371") - mean = float("-6.33411e-05") - std = float("0.00157788") - data = None - - -class Program_weight_tensor_parameter_412: - name = "parameter_412" - shape = [384] - dtype = "float32" - min_val = float("-2.4015") - max_val = float("0.848053") - mean = float("-1.4071") - std = float("0.361673") - data = None - - -class Program_weight_tensor_parameter_413: - name = "parameter_413" - shape = [384] - dtype = "float32" - min_val = float("0.461972") - max_val = float("1.92206") - mean = float("1.16949") - std = float("0.148563") - data = None - - -class Program_weight_tensor_parameter_414: - name = "parameter_414" - shape = [384] - dtype = "float32" - min_val = float("0.0223589") - max_val = float("0.0650232") - mean = float("0.034113") - std = float("0.00657291") - data = None - - -class Program_weight_tensor_parameter_415: - name = "parameter_415" - shape = [384] - dtype = "float32" - min_val = float("-0.609242") - max_val = float("0.728225") - mean = float("-0.148877") - std = float("0.0847971") - data = None - - -class Program_weight_tensor_parameter_416: - name = "parameter_416" - shape = [384, 384, 3, 3] - dtype = "float32" - min_val = float("-0.0203116") - max_val = float("0.0405905") - mean = float("-0.000153735") - std = float("0.00198218") - data = None - - -class Program_weight_tensor_parameter_417: - name = "parameter_417" - shape = [384] - dtype = "float32" - min_val = float("-1.88015") - max_val = float("0.45251") - mean = float("-0.486805") - std = float("0.376833") - data = None - - -class Program_weight_tensor_parameter_418: - name = "parameter_418" - shape = [384] - dtype = "float32" - min_val = float("0.0772688") - max_val = float("2.12425") - mean = float("0.442764") - std = float("0.218251") - data = None - - -class Program_weight_tensor_parameter_419: - name = "parameter_419" - shape = [384] - dtype = "float32" - min_val = float("5.14732e-05") - max_val = float("0.00175808") - mean = float("0.000313916") - std = float("0.00017621") - data = None - - -class Program_weight_tensor_parameter_420: - name = "parameter_420" - shape = [384] - dtype = "float32" - min_val = float("-0.0350628") - max_val = float("0.0623597") - mean = float("0.0217881") - std = float("0.0146826") - data = None - - -class Program_weight_tensor_parameter_421: - name = "parameter_421" - shape = [384, 384, 1, 1] - dtype = "float32" - min_val = float("-0.0144766") - max_val = float("0.0264016") - mean = float("-0.000421792") - std = float("0.0019438") - data = None - - -class Program_weight_tensor_parameter_422: - name = "parameter_422" - shape = [384] - dtype = "float32" - min_val = float("-1.88015") - max_val = float("0.45251") - mean = float("-0.486805") - std = float("0.376833") - data = None - - -class Program_weight_tensor_parameter_423: - name = "parameter_423" - shape = [384] - dtype = "float32" - min_val = float("0.518812") - max_val = float("2.23061") - mean = float("1.05416") - std = float("0.261303") - data = None - - -class Program_weight_tensor_parameter_424: - name = "parameter_424" - shape = [384] - dtype = "float32" - min_val = float("0.000907154") - max_val = float("0.00473234") - mean = float("0.00216236") - std = float("0.000621055") - data = None - - -class Program_weight_tensor_parameter_425: - name = "parameter_425" - shape = [384] - dtype = "float32" - min_val = float("-0.156848") - max_val = float("0.0998884") - mean = float("0.02917") - std = float("0.023776") - data = None - - -class Program_weight_tensor_parameter_426: - name = "parameter_426" - shape = [384, 384, 3, 3] - dtype = "float32" - min_val = float("-0.0200188") - max_val = float("0.0377429") - mean = float("-6.77733e-05") - std = float("0.00166655") - data = None - - -class Program_weight_tensor_parameter_427: - name = "parameter_427" - shape = [384] - dtype = "float32" - min_val = float("-2.1601") - max_val = float("0.422034") - mean = float("-1.36858") - std = float("0.278385") - data = None - - -class Program_weight_tensor_parameter_428: - name = "parameter_428" - shape = [384] - dtype = "float32" - min_val = float("0.71356") - max_val = float("1.64114") - mean = float("1.14618") - std = float("0.102016") - data = None - - -class Program_weight_tensor_parameter_429: - name = "parameter_429" - shape = [384] - dtype = "float32" - min_val = float("0.015836") - max_val = float("0.0669088") - mean = float("0.0268458") - std = float("0.00709364") - data = None - - -class Program_weight_tensor_parameter_430: - name = "parameter_430" - shape = [384] - dtype = "float32" - min_val = float("-0.604472") - max_val = float("0.189662") - mean = float("-0.105803") - std = float("0.069063") - data = None - - -class Program_weight_tensor_parameter_431: - name = "parameter_431" - shape = [384, 384, 3, 3] - dtype = "float32" - min_val = float("-0.0259247") - max_val = float("0.0434116") - mean = float("-0.000126202") - std = float("0.00187571") - data = None - - -class Program_weight_tensor_parameter_432: - name = "parameter_432" - shape = [384] - dtype = "float32" - min_val = float("-2.9286") - max_val = float("1.66241") - mean = float("-0.761307") - std = float("0.644444") - data = None - - -class Program_weight_tensor_parameter_433: - name = "parameter_433" - shape = [384] - dtype = "float32" - min_val = float("0.953521") - max_val = float("2.92614") - mean = float("1.86813") - std = float("0.276938") - data = None - - -class Program_weight_tensor_parameter_434: - name = "parameter_434" - shape = [384] - dtype = "float32" - min_val = float("0.00145196") - max_val = float("0.00602303") - mean = float("0.00286041") - std = float("0.000620991") - data = None - - -class Program_weight_tensor_parameter_435: - name = "parameter_435" - shape = [384] - dtype = "float32" - min_val = float("-0.206077") - max_val = float("0.108202") - mean = float("0.0475519") - std = float("0.0246364") - data = None - - -class Program_weight_tensor_parameter_436: - name = "parameter_436" - shape = [384, 768, 1, 1] - dtype = "float32" - min_val = float("-0.0455732") - max_val = float("0.0357705") - mean = float("-0.000554005") - std = float("0.00431649") - data = None - - -class Program_weight_tensor_parameter_437: - name = "parameter_437" - shape = [384] - dtype = "float32" - min_val = float("-2.25212") - max_val = float("0.68194") - mean = float("-0.778669") - std = float("0.473843") - data = None - - -class Program_weight_tensor_parameter_438: - name = "parameter_438" - shape = [384] - dtype = "float32" - min_val = float("0.966069") - max_val = float("2.89985") - mean = float("2.1016") - std = float("0.306322") - data = None - - -class Program_weight_tensor_parameter_439: - name = "parameter_439" - shape = [384] - dtype = "float32" - min_val = float("0.000400888") - max_val = float("0.00504261") - mean = float("0.000981843") - std = float("0.000289437") - data = None - - -class Program_weight_tensor_parameter_440: - name = "parameter_440" - shape = [384] - dtype = "float32" - min_val = float("-0.0495509") - max_val = float("0.0680366") - mean = float("0.0216031") - std = float("0.0123324") - data = None - - -class Program_weight_tensor_parameter_441: - name = "parameter_441" - shape = [384, 768, 1, 1] - dtype = "float32" - min_val = float("-0.145612") - max_val = float("0.0575213") - mean = float("-0.00024107") - std = float("0.00300636") - data = None - - -class Program_weight_tensor_parameter_442: - name = "parameter_442" - shape = [768] - dtype = "float32" - min_val = float("-2.40748") - max_val = float("0.642802") - mean = float("-0.909529") - std = float("0.33999") - data = None - - -class Program_weight_tensor_parameter_443: - name = "parameter_443" - shape = [768] - dtype = "float32" - min_val = float("0.529868") - max_val = float("1.91302") - mean = float("0.921944") - std = float("0.149362") - data = None - - -class Program_weight_tensor_parameter_444: - name = "parameter_444" - shape = [768] - dtype = "float32" - min_val = float("0.00474618") - max_val = float("0.0390067") - mean = float("0.00819721") - std = float("0.00242367") - data = None - - -class Program_weight_tensor_parameter_445: - name = "parameter_445" - shape = [768] - dtype = "float32" - min_val = float("-0.182419") - max_val = float("0.16049") - mean = float("0.0278199") - std = float("0.0398523") - data = None - - -class Program_weight_tensor_parameter_446: - name = "parameter_446" - shape = [768, 512, 3, 3] - dtype = "float32" - min_val = float("-0.059099") - max_val = float("0.0444559") - mean = float("-7.08532e-05") - std = float("0.00199255") - data = None - - -class Program_weight_tensor_parameter_447: - name = "parameter_447" - shape = [512] - dtype = "float32" - min_val = float("-3.3982") - max_val = float("1.66922") - mean = float("-1.1631") - std = float("0.514405") - data = None - - -class Program_weight_tensor_parameter_448: - name = "parameter_448" - shape = [512] - dtype = "float32" - min_val = float("0.523518") - max_val = float("1.6758") - mean = float("1.11208") - std = float("0.148137") - data = None - - -class Program_weight_tensor_parameter_449: - name = "parameter_449" - shape = [512] - dtype = "float32" - min_val = float("0.000933487") - max_val = float("0.00746954") - mean = float("0.00344664") - std = float("0.000801527") - data = None - - -class Program_weight_tensor_parameter_450: - name = "parameter_450" - shape = [512] - dtype = "float32" - min_val = float("-0.110012") - max_val = float("0.0714297") - mean = float("-0.0365401") - std = float("0.0293225") - data = None - - -class Program_weight_tensor_parameter_451: - name = "parameter_451" - shape = [512, 384, 1, 1] - dtype = "float32" - min_val = float("-0.330875") - max_val = float("0.172913") - mean = float("-0.000459854") - std = float("0.00662096") - data = None - - -class Program_weight_tensor_parameter_452: - name = "parameter_452" - shape = [384] - dtype = "float32" - min_val = float("-0.0108223") - max_val = float("0.000907801") - mean = float("-0.0031106") - std = float("0.00227253") - data = None - - -class Program_weight_tensor_parameter_453: - name = "parameter_453" - shape = [384, 384, 1, 1] - dtype = "float32" - min_val = float("-0.223828") - max_val = float("0.20505") - mean = float("-0.00218766") - std = float("0.00489559") - data = None - - -class Program_weight_tensor_parameter_454: - name = "parameter_454" - shape = [192] - dtype = "float32" - min_val = float("-1.97815") - max_val = float("0.402075") - mean = float("-0.350999") - std = float("0.333852") - data = None - - -class Program_weight_tensor_parameter_455: - name = "parameter_455" - shape = [192] - dtype = "float32" - min_val = float("0.0527639") - max_val = float("2.1601") - mean = float("0.580979") - std = float("0.418931") - data = None - - -class Program_weight_tensor_parameter_456: - name = "parameter_456" - shape = [192] - dtype = "float32" - min_val = float("5.97182e-05") - max_val = float("0.00113282") - mean = float("0.000337135") - std = float("0.00017036") - data = None - - -class Program_weight_tensor_parameter_457: - name = "parameter_457" - shape = [192] - dtype = "float32" - min_val = float("-0.0272331") - max_val = float("0.0426344") - mean = float("0.00406202") - std = float("0.0112711") - data = None - - -class Program_weight_tensor_parameter_458: - name = "parameter_458" - shape = [192, 192, 1, 1] - dtype = "float32" - min_val = float("-0.0211821") - max_val = float("0.0576226") - mean = float("-0.000295795") - std = float("0.00359937") - data = None - - -class Program_weight_tensor_parameter_459: - name = "parameter_459" - shape = [192] - dtype = "float32" - min_val = float("-1.97815") - max_val = float("0.402075") - mean = float("-0.350999") - std = float("0.333852") - data = None - - -class Program_weight_tensor_parameter_460: - name = "parameter_460" - shape = [192] - dtype = "float32" - min_val = float("0.372396") - max_val = float("2.6943") - mean = float("1.20241") - std = float("0.492555") - data = None - - -class Program_weight_tensor_parameter_461: - name = "parameter_461" - shape = [192] - dtype = "float32" - min_val = float("0.000778601") - max_val = float("0.00986122") - mean = float("0.0028243") - std = float("0.0011229") - data = None - - -class Program_weight_tensor_parameter_462: - name = "parameter_462" - shape = [192] - dtype = "float32" - min_val = float("-0.0718662") - max_val = float("0.098836") - mean = float("0.0132633") - std = float("0.0284276") - data = None - - -class Program_weight_tensor_parameter_463: - name = "parameter_463" - shape = [192, 192, 3, 3] - dtype = "float32" - min_val = float("-0.0267941") - max_val = float("0.0380043") - mean = float("-0.000114945") - std = float("0.00270079") - data = None - - -class Program_weight_tensor_parameter_464: - name = "parameter_464" - shape = [192] - dtype = "float32" - min_val = float("-2.89627") - max_val = float("-0.181006") - mean = float("-1.31578") - std = float("0.40203") - data = None - - -class Program_weight_tensor_parameter_465: - name = "parameter_465" - shape = [192] - dtype = "float32" - min_val = float("0.695389") - max_val = float("2.10154") - mean = float("1.18272") - std = float("0.170823") - data = None - - -class Program_weight_tensor_parameter_466: - name = "parameter_466" - shape = [192] - dtype = "float32" - min_val = float("0.0380002") - max_val = float("0.186937") - mean = float("0.0783337") - std = float("0.0246352") - data = None - - -class Program_weight_tensor_parameter_467: - name = "parameter_467" - shape = [192] - dtype = "float32" - min_val = float("-1.80338") - max_val = float("1.39476") - mean = float("-0.158506") - std = float("0.279166") - data = None - - -class Program_weight_tensor_parameter_468: - name = "parameter_468" - shape = [192, 192, 3, 3] - dtype = "float32" - min_val = float("-0.0376485") - max_val = float("0.0442722") - mean = float("-0.000156605") - std = float("0.00324367") - data = None - - -class Program_weight_tensor_parameter_469: - name = "parameter_469" - shape = [192] - dtype = "float32" - min_val = float("-1.94409") - max_val = float("0.506739") - mean = float("-0.280964") - std = float("0.32113") - data = None - - -class Program_weight_tensor_parameter_470: - name = "parameter_470" - shape = [192] - dtype = "float32" - min_val = float("0.0470323") - max_val = float("1.77439") - mean = float("0.444469") - std = float("0.306448") - data = None - - -class Program_weight_tensor_parameter_471: - name = "parameter_471" - shape = [192] - dtype = "float32" - min_val = float("5.53183e-05") - max_val = float("0.00145499") - mean = float("0.000343414") - std = float("0.000218973") - data = None - - -class Program_weight_tensor_parameter_472: - name = "parameter_472" - shape = [192] - dtype = "float32" - min_val = float("-0.0324414") - max_val = float("0.0424277") - mean = float("0.0078867") - std = float("0.0106231") - data = None - - -class Program_weight_tensor_parameter_473: - name = "parameter_473" - shape = [192, 192, 1, 1] - dtype = "float32" - min_val = float("-0.0232793") - max_val = float("0.0340463") - mean = float("-0.000372018") - std = float("0.00343686") - data = None - - -class Program_weight_tensor_parameter_474: - name = "parameter_474" - shape = [192] - dtype = "float32" - min_val = float("-1.94409") - max_val = float("0.506739") - mean = float("-0.280964") - std = float("0.32113") - data = None - - -class Program_weight_tensor_parameter_475: - name = "parameter_475" - shape = [192] - dtype = "float32" - min_val = float("0.486392") - max_val = float("2.27602") - mean = float("1.14015") - std = float("0.376136") - data = None - - -class Program_weight_tensor_parameter_476: - name = "parameter_476" - shape = [192] - dtype = "float32" - min_val = float("0.001572") - max_val = float("0.00624474") - mean = float("0.00321651") - std = float("0.000843905") - data = None - - -class Program_weight_tensor_parameter_477: - name = "parameter_477" - shape = [192] - dtype = "float32" - min_val = float("-0.0611836") - max_val = float("0.0803488") - mean = float("0.0262682") - std = float("0.0244502") - data = None - - -class Program_weight_tensor_parameter_478: - name = "parameter_478" - shape = [192, 192, 3, 3] - dtype = "float32" - min_val = float("-0.021333") - max_val = float("0.031939") - mean = float("-0.000148173") - std = float("0.00289116") - data = None - - -class Program_weight_tensor_parameter_479: - name = "parameter_479" - shape = [192] - dtype = "float32" - min_val = float("-2.51265") - max_val = float("-0.125796") - mean = float("-1.28955") - std = float("0.445077") - data = None - - -class Program_weight_tensor_parameter_480: - name = "parameter_480" - shape = [192] - dtype = "float32" - min_val = float("0.655675") - max_val = float("1.67878") - mean = float("1.2032") - std = float("0.16689") - data = None - - -class Program_weight_tensor_parameter_481: - name = "parameter_481" - shape = [192] - dtype = "float32" - min_val = float("0.0291112") - max_val = float("0.117157") - mean = float("0.0521028") - std = float("0.0142132") - data = None - - -class Program_weight_tensor_parameter_482: - name = "parameter_482" - shape = [192] - dtype = "float32" - min_val = float("-1.75423") - max_val = float("0.269628") - mean = float("-0.0616626") - std = float("0.184866") - data = None - - -class Program_weight_tensor_parameter_483: - name = "parameter_483" - shape = [192, 192, 3, 3] - dtype = "float32" - min_val = float("-0.0358087") - max_val = float("0.0447245") - mean = float("-0.000190853") - std = float("0.00335683") - data = None - - -class Program_weight_tensor_parameter_484: - name = "parameter_484" - shape = [192] - dtype = "float32" - min_val = float("-1.76065") - max_val = float("0.463281") - mean = float("-0.263631") - std = float("0.335459") - data = None - - -class Program_weight_tensor_parameter_485: - name = "parameter_485" - shape = [192] - dtype = "float32" - min_val = float("0.00363281") - max_val = float("1.68356") - mean = float("0.352056") - std = float("0.252441") - data = None - - -class Program_weight_tensor_parameter_486: - name = "parameter_486" - shape = [192] - dtype = "float32" - min_val = float("9.9945e-07") - max_val = float("0.00178321") - mean = float("0.000315486") - std = float("0.000233695") - data = None - - -class Program_weight_tensor_parameter_487: - name = "parameter_487" - shape = [192] - dtype = "float32" - min_val = float("-0.0321653") - max_val = float("0.0434796") - mean = float("0.0100317") - std = float("0.010429") - data = None - - -class Program_weight_tensor_parameter_488: - name = "parameter_488" - shape = [192, 192, 1, 1] - dtype = "float32" - min_val = float("-0.0333151") - max_val = float("0.028244") - mean = float("-0.000424522") - std = float("0.00328881") - data = None - - -class Program_weight_tensor_parameter_489: - name = "parameter_489" - shape = [192] - dtype = "float32" - min_val = float("-1.76065") - max_val = float("0.463281") - mean = float("-0.263631") - std = float("0.335459") - data = None - - -class Program_weight_tensor_parameter_490: - name = "parameter_490" - shape = [192] - dtype = "float32" - min_val = float("0.407314") - max_val = float("1.98401") - mean = float("1.06799") - std = float("0.335081") - data = None - - -class Program_weight_tensor_parameter_491: - name = "parameter_491" - shape = [192] - dtype = "float32" - min_val = float("0.00143898") - max_val = float("0.00771346") - mean = float("0.00340617") - std = float("0.000937101") - data = None - - -class Program_weight_tensor_parameter_492: - name = "parameter_492" - shape = [192] - dtype = "float32" - min_val = float("-0.0318019") - max_val = float("0.0790559") - mean = float("0.0270601") - std = float("0.019367") - data = None - - -class Program_weight_tensor_parameter_493: - name = "parameter_493" - shape = [192, 192, 3, 3] - dtype = "float32" - min_val = float("-0.0285572") - max_val = float("0.0385687") - mean = float("-0.000143999") - std = float("0.00298657") - data = None - - -class Program_weight_tensor_parameter_494: - name = "parameter_494" - shape = [192] - dtype = "float32" - min_val = float("-2.50189") - max_val = float("0.137771") - mean = float("-1.24368") - std = float("0.425346") - data = None - - -class Program_weight_tensor_parameter_495: - name = "parameter_495" - shape = [192] - dtype = "float32" - min_val = float("0.657082") - max_val = float("1.81751") - mean = float("1.17096") - std = float("0.166144") - data = None - - -class Program_weight_tensor_parameter_496: - name = "parameter_496" - shape = [192] - dtype = "float32" - min_val = float("0.0188017") - max_val = float("0.0700368") - mean = float("0.0347428") - std = float("0.00799167") - data = None - - -class Program_weight_tensor_parameter_497: - name = "parameter_497" - shape = [192] - dtype = "float32" - min_val = float("-1.21798") - max_val = float("0.267509") - mean = float("-0.0357914") - std = float("0.12809") - data = None - - -class Program_weight_tensor_parameter_498: - name = "parameter_498" - shape = [192, 192, 3, 3] - dtype = "float32" - min_val = float("-0.03732") - max_val = float("0.0499939") - mean = float("-0.000197441") - std = float("0.00341168") - data = None - - -class Program_weight_tensor_parameter_499: - name = "parameter_499" - shape = [192] - dtype = "float32" - min_val = float("-2.08361") - max_val = float("0.526166") - mean = float("-0.273402") - std = float("0.375102") - data = None - - -class Program_weight_tensor_parameter_500: - name = "parameter_500" - shape = [192] - dtype = "float32" - min_val = float("0.000539323") - max_val = float("0.733627") - mean = float("0.211797") - std = float("0.136369") - data = None - - -class Program_weight_tensor_parameter_501: - name = "parameter_501" - shape = [192] - dtype = "float32" - min_val = float("5.73491e-08") - max_val = float("0.000709717") - mean = float("0.000185769") - std = float("0.000106476") - data = None - - -class Program_weight_tensor_parameter_502: - name = "parameter_502" - shape = [192] - dtype = "float32" - min_val = float("-0.024762") - max_val = float("0.0297538") - mean = float("0.00669963") - std = float("0.00867654") - data = None - - -class Program_weight_tensor_parameter_503: - name = "parameter_503" - shape = [192, 192, 1, 1] - dtype = "float32" - min_val = float("-0.0223003") - max_val = float("0.0285012") - mean = float("-0.00027288") - std = float("0.00290769") - data = None - - -class Program_weight_tensor_parameter_504: - name = "parameter_504" - shape = [192] - dtype = "float32" - min_val = float("-2.08361") - max_val = float("0.526166") - mean = float("-0.273402") - std = float("0.375102") - data = None - - -class Program_weight_tensor_parameter_505: - name = "parameter_505" - shape = [192] - dtype = "float32" - min_val = float("0.395992") - max_val = float("1.96799") - mean = float("0.961194") - std = float("0.304605") - data = None - - -class Program_weight_tensor_parameter_506: - name = "parameter_506" - shape = [192] - dtype = "float32" - min_val = float("0.00143872") - max_val = float("0.00758554") - mean = float("0.00340905") - std = float("0.000995444") - data = None - - -class Program_weight_tensor_parameter_507: - name = "parameter_507" - shape = [192] - dtype = "float32" - min_val = float("-0.0308625") - max_val = float("0.0974639") - mean = float("0.0355014") - std = float("0.0243965") - data = None - - -class Program_weight_tensor_parameter_508: - name = "parameter_508" - shape = [192, 192, 3, 3] - dtype = "float32" - min_val = float("-0.0300099") - max_val = float("0.0331466") - mean = float("-0.000174018") - std = float("0.00307819") - data = None - - -class Program_weight_tensor_parameter_509: - name = "parameter_509" - shape = [192] - dtype = "float32" - min_val = float("-2.74569") - max_val = float("-0.0811876") - mean = float("-1.23738") - std = float("0.435119") - data = None - - -class Program_weight_tensor_parameter_510: - name = "parameter_510" - shape = [192] - dtype = "float32" - min_val = float("0.763204") - max_val = float("1.6262") - mean = float("1.15469") - std = float("0.143358") - data = None - - -class Program_weight_tensor_parameter_511: - name = "parameter_511" - shape = [192] - dtype = "float32" - min_val = float("0.0159479") - max_val = float("0.0420721") - mean = float("0.0253839") - std = float("0.00548298") - data = None - - -class Program_weight_tensor_parameter_512: - name = "parameter_512" - shape = [192] - dtype = "float32" - min_val = float("-0.985135") - max_val = float("0.209935") - mean = float("-0.0465764") - std = float("0.111494") - data = None - - -class Program_weight_tensor_parameter_513: - name = "parameter_513" - shape = [192, 192, 3, 3] - dtype = "float32" - min_val = float("-0.0517747") - max_val = float("0.0533624") - mean = float("-0.000217609") - std = float("0.00340238") - data = None - - -class Program_weight_tensor_parameter_514: - name = "parameter_514" - shape = [192] - dtype = "float32" - min_val = float("-1.21509") - max_val = float("0.443206") - mean = float("-0.23316") - std = float("0.338913") - data = None - - -class Program_weight_tensor_parameter_515: - name = "parameter_515" - shape = [192] - dtype = "float32" - min_val = float("-0.000141425") - max_val = float("0.676871") - mean = float("0.191956") - std = float("0.120826") - data = None - - -class Program_weight_tensor_parameter_516: - name = "parameter_516" - shape = [192] - dtype = "float32" - min_val = float("1.88795e-10") - max_val = float("0.00060921") - mean = float("0.00019844") - std = float("0.000122163") - data = None - - -class Program_weight_tensor_parameter_517: - name = "parameter_517" - shape = [192] - dtype = "float32" - min_val = float("-0.0415572") - max_val = float("0.0356041") - mean = float("0.00645752") - std = float("0.0102765") - data = None - - -class Program_weight_tensor_parameter_518: - name = "parameter_518" - shape = [192, 192, 1, 1] - dtype = "float32" - min_val = float("-0.0362203") - max_val = float("0.0384156") - mean = float("-0.000250814") - std = float("0.00300878") - data = None - - -class Program_weight_tensor_parameter_519: - name = "parameter_519" - shape = [192] - dtype = "float32" - min_val = float("-1.21509") - max_val = float("0.443206") - mean = float("-0.23316") - std = float("0.338913") - data = None - - -class Program_weight_tensor_parameter_520: - name = "parameter_520" - shape = [192] - dtype = "float32" - min_val = float("0.38485") - max_val = float("1.57063") - mean = float("0.854584") - std = float("0.261022") - data = None - - -class Program_weight_tensor_parameter_521: - name = "parameter_521" - shape = [192] - dtype = "float32" - min_val = float("0.00126636") - max_val = float("0.00625458") - mean = float("0.00353727") - std = float("0.000960266") - data = None - - -class Program_weight_tensor_parameter_522: - name = "parameter_522" - shape = [192] - dtype = "float32" - min_val = float("-0.0709334") - max_val = float("0.0992781") - mean = float("0.0299011") - std = float("0.022397") - data = None - - -class Program_weight_tensor_parameter_523: - name = "parameter_523" - shape = [192, 192, 3, 3] - dtype = "float32" - min_val = float("-0.0303355") - max_val = float("0.033651") - mean = float("-0.000135556") - std = float("0.00307625") - data = None - - -class Program_weight_tensor_parameter_524: - name = "parameter_524" - shape = [192] - dtype = "float32" - min_val = float("-2.49114") - max_val = float("-0.133527") - mean = float("-1.25068") - std = float("0.419307") - data = None - - -class Program_weight_tensor_parameter_525: - name = "parameter_525" - shape = [192] - dtype = "float32" - min_val = float("0.68941") - max_val = float("1.52402") - mean = float("1.1287") - std = float("0.135387") - data = None - - -class Program_weight_tensor_parameter_526: - name = "parameter_526" - shape = [192] - dtype = "float32" - min_val = float("0.0102917") - max_val = float("0.0340277") - mean = float("0.0181781") - std = float("0.00444353") - data = None - - -class Program_weight_tensor_parameter_527: - name = "parameter_527" - shape = [192] - dtype = "float32" - min_val = float("-0.598499") - max_val = float("0.205069") - mean = float("-0.0404677") - std = float("0.0908447") - data = None - - -class Program_weight_tensor_parameter_528: - name = "parameter_528" - shape = [192, 192, 3, 3] - dtype = "float32" - min_val = float("-0.0539394") - max_val = float("0.0542177") - mean = float("-0.000198971") - std = float("0.00340909") - data = None - - -class Program_weight_tensor_parameter_529: - name = "parameter_529" - shape = [192] - dtype = "float32" - min_val = float("-1.22047") - max_val = float("0.496178") - mean = float("-0.168848") - std = float("0.293103") - data = None - - -class Program_weight_tensor_parameter_530: - name = "parameter_530" - shape = [192] - dtype = "float32" - min_val = float("0.00876153") - max_val = float("1.5288") - mean = float("0.237756") - std = float("0.211674") - data = None - - -class Program_weight_tensor_parameter_531: - name = "parameter_531" - shape = [192] - dtype = "float32" - min_val = float("1.74591e-05") - max_val = float("0.00624593") - mean = float("0.00045268") - std = float("0.000588624") - data = None - - -class Program_weight_tensor_parameter_532: - name = "parameter_532" - shape = [192] - dtype = "float32" - min_val = float("-0.065087") - max_val = float("0.0770018") - mean = float("0.00835075") - std = float("0.0148227") - data = None - - -class Program_weight_tensor_parameter_533: - name = "parameter_533" - shape = [192, 192, 1, 1] - dtype = "float32" - min_val = float("-0.0660848") - max_val = float("0.0277484") - mean = float("-0.000371825") - std = float("0.00364693") - data = None - - -class Program_weight_tensor_parameter_534: - name = "parameter_534" - shape = [192] - dtype = "float32" - min_val = float("-1.22047") - max_val = float("0.496178") - mean = float("-0.168848") - std = float("0.293103") - data = None - - -class Program_weight_tensor_parameter_535: - name = "parameter_535" - shape = [192] - dtype = "float32" - min_val = float("0.355908") - max_val = float("1.45471") - mean = float("0.758883") - std = float("0.217186") - data = None - - -class Program_weight_tensor_parameter_536: - name = "parameter_536" - shape = [192] - dtype = "float32" - min_val = float("0.00268518") - max_val = float("0.0127648") - mean = float("0.0059621") - std = float("0.00184013") - data = None - - -class Program_weight_tensor_parameter_537: - name = "parameter_537" - shape = [192] - dtype = "float32" - min_val = float("-0.0754021") - max_val = float("0.0955923") - mean = float("0.0355705") - std = float("0.0315093") - data = None - - -class Program_weight_tensor_parameter_538: - name = "parameter_538" - shape = [192, 192, 3, 3] - dtype = "float32" - min_val = float("-0.0700958") - max_val = float("0.056488") - mean = float("-0.000170134") - std = float("0.00301629") - data = None - - -class Program_weight_tensor_parameter_539: - name = "parameter_539" - shape = [192] - dtype = "float32" - min_val = float("-1.88075") - max_val = float("-0.211975") - mean = float("-1.14723") - std = float("0.326349") - data = None - - -class Program_weight_tensor_parameter_540: - name = "parameter_540" - shape = [192] - dtype = "float32" - min_val = float("0.79192") - max_val = float("1.60588") - mean = float("1.12491") - std = float("0.130114") - data = None - - -class Program_weight_tensor_parameter_541: - name = "parameter_541" - shape = [192] - dtype = "float32" - min_val = float("0.00790317") - max_val = float("0.0430673") - mean = float("0.0158827") - std = float("0.00476539") - data = None - - -class Program_weight_tensor_parameter_542: - name = "parameter_542" - shape = [192] - dtype = "float32" - min_val = float("-0.455479") - max_val = float("0.211552") - mean = float("-0.0369992") - std = float("0.0813136") - data = None - - -class Program_weight_tensor_parameter_543: - name = "parameter_543" - shape = [192, 192, 3, 3] - dtype = "float32" - min_val = float("-0.0610284") - max_val = float("0.0703754") - mean = float("-0.000151637") - std = float("0.00331491") - data = None - - -class Program_weight_tensor_parameter_544: - name = "parameter_544" - shape = [192] - dtype = "float32" - min_val = float("-2.86758") - max_val = float("1.58079") - mean = float("-0.0285524") - std = float("0.747555") - data = None - - -class Program_weight_tensor_parameter_545: - name = "parameter_545" - shape = [192] - dtype = "float32" - min_val = float("0.4869") - max_val = float("2.086") - mean = float("0.902744") - std = float("0.233413") - data = None - - -class Program_weight_tensor_parameter_546: - name = "parameter_546" - shape = [192] - dtype = "float32" - min_val = float("0.00633974") - max_val = float("0.0416042") - mean = float("0.0149685") - std = float("0.00593254") - data = None - - -class Program_weight_tensor_parameter_547: - name = "parameter_547" - shape = [192] - dtype = "float32" - min_val = float("-0.196625") - max_val = float("0.272012") - mean = float("-0.0347194") - std = float("0.049849") - data = None - - -class Program_weight_tensor_parameter_548: - name = "parameter_548" - shape = [192, 384, 1, 1] - dtype = "float32" - min_val = float("-0.090327") - max_val = float("0.0781438") - mean = float("-0.00049126") - std = float("0.00703695") - data = None - - -class Program_weight_tensor_parameter_549: - name = "parameter_549" - shape = [192] - dtype = "float32" - min_val = float("-2.97514") - max_val = float("1.66537") - mean = float("0.0963337") - std = float("0.664688") - data = None - - -class Program_weight_tensor_parameter_550: - name = "parameter_550" - shape = [192] - dtype = "float32" - min_val = float("0.833701") - max_val = float("5.56786") - mean = float("1.91679") - std = float("0.933226") - data = None - - -class Program_weight_tensor_parameter_551: - name = "parameter_551" - shape = [192] - dtype = "float32" - min_val = float("0.00262273") - max_val = float("0.0469049") - mean = float("0.00988616") - std = float("0.00423222") - data = None - - -class Program_weight_tensor_parameter_552: - name = "parameter_552" - shape = [192] - dtype = "float32" - min_val = float("-0.122926") - max_val = float("0.108141") - mean = float("-0.0158569") - std = float("0.0435084") - data = None - - -class Program_weight_tensor_parameter_553: - name = "parameter_553" - shape = [192, 384, 1, 1] - dtype = "float32" - min_val = float("-0.0689058") - max_val = float("0.112599") - mean = float("-0.000328781") - std = float("0.0065748") - data = None - - -class Program_weight_tensor_parameter_554: - name = "parameter_554" - shape = [384] - dtype = "float32" - min_val = float("-2.92973") - max_val = float("1.33102") - mean = float("-0.301138") - std = float("0.56416") - data = None - - -class Program_weight_tensor_parameter_555: - name = "parameter_555" - shape = [384] - dtype = "float32" - min_val = float("0.639938") - max_val = float("2.47799") - mean = float("1.16309") - std = float("0.258012") - data = None - - -class Program_weight_tensor_parameter_556: - name = "parameter_556" - shape = [384] - dtype = "float32" - min_val = float("0.00577942") - max_val = float("0.0604073") - mean = float("0.0143328") - std = float("0.00717715") - data = None - - -class Program_weight_tensor_parameter_557: - name = "parameter_557" - shape = [384] - dtype = "float32" - min_val = float("-0.172991") - max_val = float("0.184791") - mean = float("0.019209") - std = float("0.0533994") - data = None - - -class Program_weight_tensor_parameter_558: - name = "parameter_558" - shape = [384, 256, 3, 3] - dtype = "float32" - min_val = float("-0.0651152") - max_val = float("0.063191") - mean = float("-7.21353e-05") - std = float("0.00350596") - data = None - - -class Program_weight_tensor_parameter_559: - name = "parameter_559" - shape = [256] - dtype = "float32" - min_val = float("-2.04896") - max_val = float("1.29277") - mean = float("-0.925662") - std = float("0.541886") - data = None - - -class Program_weight_tensor_parameter_560: - name = "parameter_560" - shape = [256] - dtype = "float32" - min_val = float("0.52945") - max_val = float("1.69731") - mean = float("1.05619") - std = float("0.17661") - data = None - - -class Program_weight_tensor_parameter_561: - name = "parameter_561" - shape = [256] - dtype = "float32" - min_val = float("0.000674469") - max_val = float("0.0100212") - mean = float("0.00272823") - std = float("0.00122462") - data = None - - -class Program_weight_tensor_parameter_562: - name = "parameter_562" - shape = [256] - dtype = "float32" - min_val = float("-0.164943") - max_val = float("0.107017") - mean = float("-0.0346008") - std = float("0.0479287") - data = None - - -class Program_weight_tensor_parameter_563: - name = "parameter_563" - shape = [256, 192, 1, 1] - dtype = "float32" - min_val = float("-0.166132") - max_val = float("0.120738") - mean = float("-0.000624717") - std = float("0.0111393") - data = None - - -class Program_weight_tensor_parameter_564: - name = "parameter_564" - shape = [192] - dtype = "float32" - min_val = float("-0.0132669") - max_val = float("0.00100077") - mean = float("-0.00489103") - std = float("0.0031796") - data = None - - -class Program_weight_tensor_parameter_565: - name = "parameter_565" - shape = [192, 192, 1, 1] - dtype = "float32" - min_val = float("-0.309648") - max_val = float("0.197925") - mean = float("-0.00390845") - std = float("0.00946233") - data = None - - -class Program_weight_tensor_parameter_566: - name = "parameter_566" - shape = [96] - dtype = "float32" - min_val = float("-1.92138") - max_val = float("0.528173") - mean = float("-0.211283") - std = float("0.434644") - data = None - - -class Program_weight_tensor_parameter_567: - name = "parameter_567" - shape = [96] - dtype = "float32" - min_val = float("0.140912") - max_val = float("3.21894") - mean = float("0.636059") - std = float("0.66684") - data = None - - -class Program_weight_tensor_parameter_568: - name = "parameter_568" - shape = [96] - dtype = "float32" - min_val = float("7.25601e-05") - max_val = float("0.00138037") - mean = float("0.00035803") - std = float("0.000250121") - data = None - - -class Program_weight_tensor_parameter_569: - name = "parameter_569" - shape = [96] - dtype = "float32" - min_val = float("-0.0383932") - max_val = float("0.0476881") - mean = float("0.00524749") - std = float("0.0168741") - data = None - - -class Program_weight_tensor_parameter_570: - name = "parameter_570" - shape = [96, 96, 1, 1] - dtype = "float32" - min_val = float("-0.042012") - max_val = float("0.0802529") - mean = float("-0.000564225") - std = float("0.0064658") - data = None - - -class Program_weight_tensor_parameter_571: - name = "parameter_571" - shape = [96] - dtype = "float32" - min_val = float("-1.92138") - max_val = float("0.528173") - mean = float("-0.211283") - std = float("0.434644") - data = None - - -class Program_weight_tensor_parameter_572: - name = "parameter_572" - shape = [96] - dtype = "float32" - min_val = float("0.34997") - max_val = float("5.4603") - mean = float("1.08725") - std = float("0.880273") - data = None - - -class Program_weight_tensor_parameter_573: - name = "parameter_573" - shape = [96] - dtype = "float32" - min_val = float("0.000393061") - max_val = float("0.00600073") - mean = float("0.00206613") - std = float("0.00106655") - data = None - - -class Program_weight_tensor_parameter_574: - name = "parameter_574" - shape = [96] - dtype = "float32" - min_val = float("-0.0914739") - max_val = float("0.110662") - mean = float("0.0155218") - std = float("0.0385942") - data = None - - -class Program_weight_tensor_parameter_575: - name = "parameter_575" - shape = [96, 96, 3, 3] - dtype = "float32" - min_val = float("-0.0397242") - max_val = float("0.0585133") - mean = float("-0.000194232") - std = float("0.00474942") - data = None - - -class Program_weight_tensor_parameter_576: - name = "parameter_576" - shape = [96] - dtype = "float32" - min_val = float("-2.46962") - max_val = float("-0.0207386") - mean = float("-1.22629") - std = float("0.444824") - data = None - - -class Program_weight_tensor_parameter_577: - name = "parameter_577" - shape = [96] - dtype = "float32" - min_val = float("0.533115") - max_val = float("1.64651") - mean = float("0.94968") - std = float("0.173477") - data = None - - -class Program_weight_tensor_parameter_578: - name = "parameter_578" - shape = [96] - dtype = "float32" - min_val = float("0.0258755") - max_val = float("0.1224") - mean = float("0.0531712") - std = float("0.0215289") - data = None - - -class Program_weight_tensor_parameter_579: - name = "parameter_579" - shape = [96] - dtype = "float32" - min_val = float("-2.50049") - max_val = float("1.19932") - mean = float("-0.168452") - std = float("0.381676") - data = None - - -class Program_weight_tensor_parameter_580: - name = "parameter_580" - shape = [96, 96, 3, 3] - dtype = "float32" - min_val = float("-0.173549") - max_val = float("0.0871168") - mean = float("-0.000260514") - std = float("0.00589301") - data = None - - -class Program_weight_tensor_parameter_581: - name = "parameter_581" - shape = [96] - dtype = "float32" - min_val = float("-1.39137") - max_val = float("0.557076") - mean = float("-0.134021") - std = float("0.346667") - data = None - - -class Program_weight_tensor_parameter_582: - name = "parameter_582" - shape = [96] - dtype = "float32" - min_val = float("0.0456926") - max_val = float("1.87") - mean = float("0.460917") - std = float("0.367799") - data = None - - -class Program_weight_tensor_parameter_583: - name = "parameter_583" - shape = [96] - dtype = "float32" - min_val = float("7.8985e-05") - max_val = float("0.00257956") - mean = float("0.000633262") - std = float("0.000529372") - data = None - - -class Program_weight_tensor_parameter_584: - name = "parameter_584" - shape = [96] - dtype = "float32" - min_val = float("-0.0294464") - max_val = float("0.0390531") - mean = float("0.00618468") - std = float("0.0144829") - data = None - - -class Program_weight_tensor_parameter_585: - name = "parameter_585" - shape = [96, 96, 1, 1] - dtype = "float32" - min_val = float("-0.0499002") - max_val = float("0.03712") - mean = float("-0.000516494") - std = float("0.00601108") - data = None - - -class Program_weight_tensor_parameter_586: - name = "parameter_586" - shape = [96] - dtype = "float32" - min_val = float("-1.39137") - max_val = float("0.557076") - mean = float("-0.134021") - std = float("0.346667") - data = None - - -class Program_weight_tensor_parameter_587: - name = "parameter_587" - shape = [96] - dtype = "float32" - min_val = float("0.369434") - max_val = float("2.33578") - mean = float("0.904761") - std = float("0.427621") - data = None - - -class Program_weight_tensor_parameter_588: - name = "parameter_588" - shape = [96] - dtype = "float32" - min_val = float("0.0013064") - max_val = float("0.0114625") - mean = float("0.00366985") - std = float("0.00182431") - data = None - - -class Program_weight_tensor_parameter_589: - name = "parameter_589" - shape = [96] - dtype = "float32" - min_val = float("-0.0674013") - max_val = float("0.106024") - mean = float("0.0235788") - std = float("0.0301432") - data = None - - -class Program_weight_tensor_parameter_590: - name = "parameter_590" - shape = [96, 96, 3, 3] - dtype = "float32" - min_val = float("-0.0544699") - max_val = float("0.0432893") - mean = float("-0.000227051") - std = float("0.00481142") - data = None - - -class Program_weight_tensor_parameter_591: - name = "parameter_591" - shape = [96] - dtype = "float32" - min_val = float("-3.32769") - max_val = float("0.362732") - mean = float("-1.17761") - std = float("0.557882") - data = None - - -class Program_weight_tensor_parameter_592: - name = "parameter_592" - shape = [96] - dtype = "float32" - min_val = float("0.470895") - max_val = float("1.98413") - mean = float("1.04365") - std = float("0.239424") - data = None - - -class Program_weight_tensor_parameter_593: - name = "parameter_593" - shape = [96] - dtype = "float32" - min_val = float("0.0172223") - max_val = float("0.0742288") - mean = float("0.0330012") - std = float("0.0104026") - data = None - - -class Program_weight_tensor_parameter_594: - name = "parameter_594" - shape = [96] - dtype = "float32" - min_val = float("-0.649399") - max_val = float("0.431953") - mean = float("-0.0718373") - std = float("0.18395") - data = None - - -class Program_weight_tensor_parameter_595: - name = "parameter_595" - shape = [96, 96, 3, 3] - dtype = "float32" - min_val = float("-0.136311") - max_val = float("0.143453") - mean = float("-0.000302539") - std = float("0.0058329") - data = None - - -class Program_weight_tensor_parameter_596: - name = "parameter_596" - shape = [96] - dtype = "float32" - min_val = float("-1.25274") - max_val = float("0.579736") - mean = float("-0.110157") - std = float("0.290873") - data = None - - -class Program_weight_tensor_parameter_597: - name = "parameter_597" - shape = [96] - dtype = "float32" - min_val = float("0.0246593") - max_val = float("1.28142") - mean = float("0.323963") - std = float("0.193926") - data = None - - -class Program_weight_tensor_parameter_598: - name = "parameter_598" - shape = [96] - dtype = "float32" - min_val = float("3.03979e-05") - max_val = float("0.0033039") - mean = float("0.000564813") - std = float("0.000505444") - data = None - - -class Program_weight_tensor_parameter_599: - name = "parameter_599" - shape = [96] - dtype = "float32" - min_val = float("-0.0377552") - max_val = float("0.049578") - mean = float("0.0038562") - std = float("0.0144143") - data = None - - -class Program_weight_tensor_parameter_600: - name = "parameter_600" - shape = [96, 96, 1, 1] - dtype = "float32" - min_val = float("-0.0412896") - max_val = float("0.0431426") - mean = float("-0.000341595") - std = float("0.00616412") - data = None - - -class Program_weight_tensor_parameter_601: - name = "parameter_601" - shape = [96] - dtype = "float32" - min_val = float("-1.25274") - max_val = float("0.579736") - mean = float("-0.110157") - std = float("0.290873") - data = None - - -class Program_weight_tensor_parameter_602: - name = "parameter_602" - shape = [96] - dtype = "float32" - min_val = float("0.317522") - max_val = float("1.6746") - mean = float("0.750723") - std = float("0.258265") - data = None - - -class Program_weight_tensor_parameter_603: - name = "parameter_603" - shape = [96] - dtype = "float32" - min_val = float("0.00138595") - max_val = float("0.0112807") - mean = float("0.00427388") - std = float("0.00172965") - data = None - - -class Program_weight_tensor_parameter_604: - name = "parameter_604" - shape = [96] - dtype = "float32" - min_val = float("-0.0693035") - max_val = float("0.106113") - mean = float("0.0156645") - std = float("0.029297") - data = None - - -class Program_weight_tensor_parameter_605: - name = "parameter_605" - shape = [96, 96, 3, 3] - dtype = "float32" - min_val = float("-0.0707344") - max_val = float("0.0533381") - mean = float("-0.000195671") - std = float("0.00486483") - data = None - - -class Program_weight_tensor_parameter_606: - name = "parameter_606" - shape = [96] - dtype = "float32" - min_val = float("-3.5906") - max_val = float("0.291524") - mean = float("-1.12744") - std = float("0.574031") - data = None - - -class Program_weight_tensor_parameter_607: - name = "parameter_607" - shape = [96] - dtype = "float32" - min_val = float("0.519079") - max_val = float("2.19595") - mean = float("1.05638") - std = float("0.238992") - data = None - - -class Program_weight_tensor_parameter_608: - name = "parameter_608" - shape = [96] - dtype = "float32" - min_val = float("0.0152112") - max_val = float("0.0436628") - mean = float("0.0246659") - std = float("0.00537498") - data = None - - -class Program_weight_tensor_parameter_609: - name = "parameter_609" - shape = [96] - dtype = "float32" - min_val = float("-0.688628") - max_val = float("0.554991") - mean = float("-0.0233944") - std = float("0.159928") - data = None - - -class Program_weight_tensor_parameter_610: - name = "parameter_610" - shape = [96, 96, 3, 3] - dtype = "float32" - min_val = float("-0.0863777") - max_val = float("0.123252") - mean = float("-0.000287898") - std = float("0.0059129") - data = None - - -class Program_weight_tensor_parameter_611: - name = "parameter_611" - shape = [96] - dtype = "float32" - min_val = float("-0.894065") - max_val = float("0.528462") - mean = float("-0.160914") - std = float("0.280775") - data = None - - -class Program_weight_tensor_parameter_612: - name = "parameter_612" - shape = [96] - dtype = "float32" - min_val = float("0.0198977") - max_val = float("1.40929") - mean = float("0.324417") - std = float("0.214346") - data = None - - -class Program_weight_tensor_parameter_613: - name = "parameter_613" - shape = [96] - dtype = "float32" - min_val = float("2.3179e-05") - max_val = float("0.00304799") - mean = float("0.000599645") - std = float("0.000483623") - data = None - - -class Program_weight_tensor_parameter_614: - name = "parameter_614" - shape = [96] - dtype = "float32" - min_val = float("-0.0235075") - max_val = float("0.0488833") - mean = float("0.00764636") - std = float("0.0134397") - data = None - - -class Program_weight_tensor_parameter_615: - name = "parameter_615" - shape = [96, 96, 1, 1] - dtype = "float32" - min_val = float("-0.0516664") - max_val = float("0.0403338") - mean = float("-0.000650817") - std = float("0.006249") - data = None - - -class Program_weight_tensor_parameter_616: - name = "parameter_616" - shape = [96] - dtype = "float32" - min_val = float("-0.894065") - max_val = float("0.528462") - mean = float("-0.160914") - std = float("0.280775") - data = None - - -class Program_weight_tensor_parameter_617: - name = "parameter_617" - shape = [96] - dtype = "float32" - min_val = float("0.177671") - max_val = float("1.78574") - mean = float("0.712956") - std = float("0.285068") - data = None - - -class Program_weight_tensor_parameter_618: - name = "parameter_618" - shape = [96] - dtype = "float32" - min_val = float("0.000855722") - max_val = float("0.011962") - mean = float("0.00441699") - std = float("0.00176161") - data = None - - -class Program_weight_tensor_parameter_619: - name = "parameter_619" - shape = [96] - dtype = "float32" - min_val = float("-0.046195") - max_val = float("0.101491") - mean = float("0.0248708") - std = float("0.0287099") - data = None - - -class Program_weight_tensor_parameter_620: - name = "parameter_620" - shape = [96, 96, 3, 3] - dtype = "float32" - min_val = float("-0.0617188") - max_val = float("0.0545246") - mean = float("-0.000236251") - std = float("0.00488926") - data = None - - -class Program_weight_tensor_parameter_621: - name = "parameter_621" - shape = [96] - dtype = "float32" - min_val = float("-2.66323") - max_val = float("0.0623296") - mean = float("-1.06373") - std = float("0.489342") - data = None - - -class Program_weight_tensor_parameter_622: - name = "parameter_622" - shape = [96] - dtype = "float32" - min_val = float("0.516127") - max_val = float("1.74272") - mean = float("1.01959") - std = float("0.194249") - data = None - - -class Program_weight_tensor_parameter_623: - name = "parameter_623" - shape = [96] - dtype = "float32" - min_val = float("0.00966188") - max_val = float("0.0337769") - mean = float("0.0185695") - std = float("0.00483648") - data = None - - -class Program_weight_tensor_parameter_624: - name = "parameter_624" - shape = [96] - dtype = "float32" - min_val = float("-0.574086") - max_val = float("0.395306") - mean = float("-0.0490517") - std = float("0.150869") - data = None - - -class Program_weight_tensor_parameter_625: - name = "parameter_625" - shape = [96, 96, 3, 3] - dtype = "float32" - min_val = float("-0.0819348") - max_val = float("0.110116") - mean = float("-0.000329675") - std = float("0.0058075") - data = None - - -class Program_weight_tensor_parameter_626: - name = "parameter_626" - shape = [96] - dtype = "float32" - min_val = float("-0.980112") - max_val = float("0.483047") - mean = float("-0.136611") - std = float("0.277228") - data = None - - -class Program_weight_tensor_parameter_627: - name = "parameter_627" - shape = [96] - dtype = "float32" - min_val = float("0.0468125") - max_val = float("1.14578") - mean = float("0.29425") - std = float("0.17328") - data = None - - -class Program_weight_tensor_parameter_628: - name = "parameter_628" - shape = [96] - dtype = "float32" - min_val = float("0.000143071") - max_val = float("0.00493424") - mean = float("0.000913103") - std = float("0.000688244") - data = None - - -class Program_weight_tensor_parameter_629: - name = "parameter_629" - shape = [96] - dtype = "float32" - min_val = float("-0.0363086") - max_val = float("0.0523069") - mean = float("0.00479237") - std = float("0.0159931") - data = None - - -class Program_weight_tensor_parameter_630: - name = "parameter_630" - shape = [96, 96, 1, 1] - dtype = "float32" - min_val = float("-0.0661086") - max_val = float("0.0594125") - mean = float("-0.00059754") - std = float("0.00705215") - data = None - - -class Program_weight_tensor_parameter_631: - name = "parameter_631" - shape = [96] - dtype = "float32" - min_val = float("-0.980112") - max_val = float("0.483046") - mean = float("-0.136611") - std = float("0.277228") - data = None - - -class Program_weight_tensor_parameter_632: - name = "parameter_632" - shape = [96] - dtype = "float32" - min_val = float("0.245629") - max_val = float("1.70476") - mean = float("0.608342") - std = float("0.228754") - data = None - - -class Program_weight_tensor_parameter_633: - name = "parameter_633" - shape = [96] - dtype = "float32" - min_val = float("0.00269671") - max_val = float("0.0179759") - mean = float("0.00695992") - std = float("0.00256464") - data = None - - -class Program_weight_tensor_parameter_634: - name = "parameter_634" - shape = [96] - dtype = "float32" - min_val = float("-0.0419227") - max_val = float("0.111328") - mean = float("0.0162502") - std = float("0.0308395") - data = None - - -class Program_weight_tensor_parameter_635: - name = "parameter_635" - shape = [96, 96, 3, 3] - dtype = "float32" - min_val = float("-0.070362") - max_val = float("0.0475107") - mean = float("-0.000231982") - std = float("0.0049307") - data = None - - -class Program_weight_tensor_parameter_636: - name = "parameter_636" - shape = [96] - dtype = "float32" - min_val = float("-3.47468") - max_val = float("0.198878") - mean = float("-1.00518") - std = float("0.549489") - data = None - - -class Program_weight_tensor_parameter_637: - name = "parameter_637" - shape = [96] - dtype = "float32" - min_val = float("0.68523") - max_val = float("2.51384") - mean = float("1.07866") - std = float("0.21278") - data = None - - -class Program_weight_tensor_parameter_638: - name = "parameter_638" - shape = [96] - dtype = "float32" - min_val = float("0.0073715") - max_val = float("0.0337206") - mean = float("0.01499") - std = float("0.00482926") - data = None - - -class Program_weight_tensor_parameter_639: - name = "parameter_639" - shape = [96] - dtype = "float32" - min_val = float("-0.403211") - max_val = float("0.240247") - mean = float("-0.0331757") - std = float("0.129637") - data = None - - -class Program_weight_tensor_parameter_640: - name = "parameter_640" - shape = [96, 96, 3, 3] - dtype = "float32" - min_val = float("-0.0707057") - max_val = float("0.0823979") - mean = float("-0.000250044") - std = float("0.00594881") - data = None - - -class Program_weight_tensor_parameter_641: - name = "parameter_641" - shape = [96] - dtype = "float32" - min_val = float("-0.626965") - max_val = float("0.448835") - mean = float("-0.0820382") - std = float("0.255843") - data = None - - -class Program_weight_tensor_parameter_642: - name = "parameter_642" - shape = [96] - dtype = "float32" - min_val = float("0.0906111") - max_val = float("1.29038") - mean = float("0.307307") - std = float("0.194816") - data = None - - -class Program_weight_tensor_parameter_643: - name = "parameter_643" - shape = [96] - dtype = "float32" - min_val = float("0.000315305") - max_val = float("0.0148723") - mean = float("0.00305807") - std = float("0.00245914") - data = None - - -class Program_weight_tensor_parameter_644: - name = "parameter_644" - shape = [96] - dtype = "float32" - min_val = float("-0.0371389") - max_val = float("0.0183386") - mean = float("0.000249639") - std = float("0.00910407") - data = None - - -class Program_weight_tensor_parameter_645: - name = "parameter_645" - shape = [96, 96, 1, 1] - dtype = "float32" - min_val = float("-0.0911381") - max_val = float("0.0639953") - mean = float("-0.000941367") - std = float("0.00812654") - data = None - - -class Program_weight_tensor_parameter_646: - name = "parameter_646" - shape = [96] - dtype = "float32" - min_val = float("-0.626965") - max_val = float("0.448835") - mean = float("-0.0820382") - std = float("0.255843") - data = None - - -class Program_weight_tensor_parameter_647: - name = "parameter_647" - shape = [96] - dtype = "float32" - min_val = float("0.209511") - max_val = float("1.43917") - mean = float("0.531943") - std = float("0.259075") - data = None - - -class Program_weight_tensor_parameter_648: - name = "parameter_648" - shape = [96] - dtype = "float32" - min_val = float("0.00462531") - max_val = float("0.0495976") - mean = float("0.0190222") - std = float("0.00948176") - data = None - - -class Program_weight_tensor_parameter_649: - name = "parameter_649" - shape = [96] - dtype = "float32" - min_val = float("-0.108167") - max_val = float("0.0628188") - mean = float("-0.00562213") - std = float("0.0290345") - data = None - - -class Program_weight_tensor_parameter_650: - name = "parameter_650" - shape = [96, 96, 3, 3] - dtype = "float32" - min_val = float("-0.0925723") - max_val = float("0.056426") - mean = float("-0.000299437") - std = float("0.00485596") - data = None - - -class Program_weight_tensor_parameter_651: - name = "parameter_651" - shape = [96] - dtype = "float32" - min_val = float("-2.41584") - max_val = float("0.51672") - mean = float("-0.829512") - std = float("0.467964") - data = None - - -class Program_weight_tensor_parameter_652: - name = "parameter_652" - shape = [96] - dtype = "float32" - min_val = float("0.858214") - max_val = float("2.18042") - mean = float("1.27928") - std = float("0.209066") - data = None - - -class Program_weight_tensor_parameter_653: - name = "parameter_653" - shape = [96] - dtype = "float32" - min_val = float("0.00573305") - max_val = float("0.0283505") - mean = float("0.0129242") - std = float("0.0047369") - data = None - - -class Program_weight_tensor_parameter_654: - name = "parameter_654" - shape = [96] - dtype = "float32" - min_val = float("-0.468758") - max_val = float("0.237107") - mean = float("-0.0457111") - std = float("0.116263") - data = None - - -class Program_weight_tensor_parameter_655: - name = "parameter_655" - shape = [96, 96, 3, 3] - dtype = "float32" - min_val = float("-0.140288") - max_val = float("0.144017") - mean = float("-0.000168599") - std = float("0.00619743") - data = None - - -class Program_weight_tensor_parameter_656: - name = "parameter_656" - shape = [96] - dtype = "float32" - min_val = float("-3.17591") - max_val = float("1.88794") - mean = float("0.50081") - std = float("0.862147") - data = None - - -class Program_weight_tensor_parameter_657: - name = "parameter_657" - shape = [96] - dtype = "float32" - min_val = float("0.218511") - max_val = float("2.64172") - mean = float("0.557308") - std = float("0.321222") - data = None - - -class Program_weight_tensor_parameter_658: - name = "parameter_658" - shape = [96] - dtype = "float32" - min_val = float("0.0059309") - max_val = float("0.0854273") - mean = float("0.0187079") - std = float("0.0133142") - data = None - - -class Program_weight_tensor_parameter_659: - name = "parameter_659" - shape = [96] - dtype = "float32" - min_val = float("-0.21823") - max_val = float("0.243022") - mean = float("-0.0238119") - std = float("0.0717645") - data = None - - -class Program_weight_tensor_parameter_660: - name = "parameter_660" - shape = [96, 192, 1, 1] - dtype = "float32" - min_val = float("-0.159229") - max_val = float("0.164707") - mean = float("-0.000489464") - std = float("0.0123153") - data = None - - -class Program_weight_tensor_parameter_661: - name = "parameter_661" - shape = [96] - dtype = "float32" - min_val = float("-4.93686") - max_val = float("1.57224") - mean = float("0.382249") - std = float("1.05007") - data = None - - -class Program_weight_tensor_parameter_662: - name = "parameter_662" - shape = [96] - dtype = "float32" - min_val = float("0.408469") - max_val = float("6.77488") - mean = float("1.6992") - std = float("1.3056") - data = None - - -class Program_weight_tensor_parameter_663: - name = "parameter_663" - shape = [96] - dtype = "float32" - min_val = float("0.0027471") - max_val = float("0.101892") - mean = float("0.0161575") - std = float("0.0144822") - data = None - - -class Program_weight_tensor_parameter_664: - name = "parameter_664" - shape = [96] - dtype = "float32" - min_val = float("-0.113828") - max_val = float("0.255763") - mean = float("0.0304151") - std = float("0.0726062") - data = None - - -class Program_weight_tensor_parameter_665: - name = "parameter_665" - shape = [96, 192, 1, 1] - dtype = "float32" - min_val = float("-0.0865442") - max_val = float("0.13143") - mean = float("0.000256689") - std = float("0.0111353") - data = None - - -class Program_weight_tensor_parameter_666: - name = "parameter_666" - shape = [192] - dtype = "float32" - min_val = float("-2.27841") - max_val = float("1.74989") - mean = float("-0.126437") - std = float("0.740487") - data = None - - -class Program_weight_tensor_parameter_667: - name = "parameter_667" - shape = [192] - dtype = "float32" - min_val = float("0.633239") - max_val = float("2.97753") - mean = float("1.09234") - std = float("0.284333") - data = None - - -class Program_weight_tensor_parameter_668: - name = "parameter_668" - shape = [192] - dtype = "float32" - min_val = float("0.00605626") - max_val = float("0.124183") - mean = float("0.0225335") - std = float("0.0173782") - data = None - - -class Program_weight_tensor_parameter_669: - name = "parameter_669" - shape = [192] - dtype = "float32" - min_val = float("-0.363149") - max_val = float("0.203569") - mean = float("-0.0509608") - std = float("0.0932024") - data = None - - -class Program_weight_tensor_parameter_670: - name = "parameter_670" - shape = [192, 128, 3, 3] - dtype = "float32" - min_val = float("-0.0732508") - max_val = float("0.0817381") - mean = float("-0.000189264") - std = float("0.00581775") - data = None - - -class Program_weight_tensor_parameter_671: - name = "parameter_671" - shape = [128] - dtype = "float32" - min_val = float("-2.81739") - max_val = float("1.95963") - mean = float("-0.71131") - std = float("0.648704") - data = None - - -class Program_weight_tensor_parameter_672: - name = "parameter_672" - shape = [128] - dtype = "float32" - min_val = float("0.305831") - max_val = float("2.87595") - mean = float("1.02519") - std = float("0.278769") - data = None - - -class Program_weight_tensor_parameter_673: - name = "parameter_673" - shape = [128] - dtype = "float32" - min_val = float("0.000296253") - max_val = float("0.00655884") - mean = float("0.00182291") - std = float("0.000942289") - data = None - - -class Program_weight_tensor_parameter_674: - name = "parameter_674" - shape = [128] - dtype = "float32" - min_val = float("-0.238123") - max_val = float("0.218266") - mean = float("0.0102315") - std = float("0.0613663") - data = None - - -class Program_weight_tensor_parameter_675: - name = "parameter_675" - shape = [128, 96, 1, 1] - dtype = "float32" - min_val = float("-0.147384") - max_val = float("0.149703") - mean = float("-0.00110619") - std = float("0.0174083") - data = None - - -class Program_weight_tensor_parameter_676: - name = "parameter_676" - shape = [96] - dtype = "float32" - min_val = float("-0.0166051") - max_val = float("-0.00197342") - mean = float("-0.00771776") - std = float("0.0036883") - data = None - - -class Program_weight_tensor_parameter_677: - name = "parameter_677" - shape = [96, 96, 1, 1] - dtype = "float32" - min_val = float("-0.244486") - max_val = float("0.115264") - mean = float("-0.0084264") - std = float("0.0163364") - data = None - - -class Program_weight_tensor_parameter_678: - name = "parameter_678" - shape = [48] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_679: - name = "parameter_679" - shape = [48] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_680: - name = "parameter_680" - shape = [48] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_681: - name = "parameter_681" - shape = [48] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_682: - name = "parameter_682" - shape = [48, 48, 1, 1] - dtype = "float32" - min_val = float("-0.0451306") - max_val = float("0.0516016") - mean = float("-0.00123331") - std = float("0.0108119") - data = None - - -class Program_weight_tensor_parameter_683: - name = "parameter_683" - shape = [48] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_684: - name = "parameter_684" - shape = [48] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_685: - name = "parameter_685" - shape = [48] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_686: - name = "parameter_686" - shape = [48] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_687: - name = "parameter_687" - shape = [48, 48, 3, 3] - dtype = "float32" - min_val = float("-0.0522823") - max_val = float("0.061342") - mean = float("-0.000213753") - std = float("0.00862639") - data = None - - -class Program_weight_tensor_parameter_688: - name = "parameter_688" - shape = [48] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_689: - name = "parameter_689" - shape = [48] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_690: - name = "parameter_690" - shape = [48] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_691: - name = "parameter_691" - shape = [48] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_692: - name = "parameter_692" - shape = [48, 48, 3, 3] - dtype = "float32" - min_val = float("-0.070824") - max_val = float("0.0787998") - mean = float("-0.000460596") - std = float("0.00955491") - data = None - - -class Program_weight_tensor_parameter_693: - name = "parameter_693" - shape = [48] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_694: - name = "parameter_694" - shape = [48] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_695: - name = "parameter_695" - shape = [48] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_696: - name = "parameter_696" - shape = [48] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_697: - name = "parameter_697" - shape = [48, 48, 1, 1] - dtype = "float32" - min_val = float("-0.0664729") - max_val = float("0.0674885") - mean = float("-0.000911561") - std = float("0.0112967") - data = None - - -class Program_weight_tensor_parameter_698: - name = "parameter_698" - shape = [48] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_699: - name = "parameter_699" - shape = [48] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_700: - name = "parameter_700" - shape = [48] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_701: - name = "parameter_701" - shape = [48] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_702: - name = "parameter_702" - shape = [48, 48, 3, 3] - dtype = "float32" - min_val = float("-0.0618099") - max_val = float("0.0462764") - mean = float("-0.000494121") - std = float("0.00848653") - data = None - - -class Program_weight_tensor_parameter_703: - name = "parameter_703" - shape = [48] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_704: - name = "parameter_704" - shape = [48] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_705: - name = "parameter_705" - shape = [48] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_706: - name = "parameter_706" - shape = [48] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_707: - name = "parameter_707" - shape = [48, 48, 3, 3] - dtype = "float32" - min_val = float("-0.0903042") - max_val = float("0.0707569") - mean = float("-0.000342113") - std = float("0.00974196") - data = None - - -class Program_weight_tensor_parameter_708: - name = "parameter_708" - shape = [48] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_709: - name = "parameter_709" - shape = [48] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_710: - name = "parameter_710" - shape = [48] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_711: - name = "parameter_711" - shape = [48] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_712: - name = "parameter_712" - shape = [48, 48, 1, 1] - dtype = "float32" - min_val = float("-0.0793648") - max_val = float("0.0587558") - mean = float("-0.00142765") - std = float("0.0137979") - data = None - - -class Program_weight_tensor_parameter_713: - name = "parameter_713" - shape = [48] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_714: - name = "parameter_714" - shape = [48] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_715: - name = "parameter_715" - shape = [48] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_716: - name = "parameter_716" - shape = [48] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_717: - name = "parameter_717" - shape = [48, 48, 3, 3] - dtype = "float32" - min_val = float("-0.057566") - max_val = float("0.0709009") - mean = float("-0.000249497") - std = float("0.00895742") - data = None - - -class Program_weight_tensor_parameter_718: - name = "parameter_718" - shape = [48] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_719: - name = "parameter_719" - shape = [48] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_720: - name = "parameter_720" - shape = [48] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_721: - name = "parameter_721" - shape = [48] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_722: - name = "parameter_722" - shape = [48, 48, 3, 3] - dtype = "float32" - min_val = float("-0.10442") - max_val = float("0.0691325") - mean = float("-0.000258872") - std = float("0.0104529") - data = None - - -class Program_weight_tensor_parameter_723: - name = "parameter_723" - shape = [48] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_724: - name = "parameter_724" - shape = [48] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_725: - name = "parameter_725" - shape = [48] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_726: - name = "parameter_726" - shape = [48] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_727: - name = "parameter_727" - shape = [48, 96, 1, 1] - dtype = "float32" - min_val = float("-0.136744") - max_val = float("0.10005") - mean = float("-0.00171348") - std = float("0.0182976") - data = None - - -class Program_weight_tensor_parameter_728: - name = "parameter_728" - shape = [48] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_729: - name = "parameter_729" - shape = [48] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_730: - name = "parameter_730" - shape = [48] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_731: - name = "parameter_731" - shape = [48] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_732: - name = "parameter_732" - shape = [48, 96, 1, 1] - dtype = "float32" - min_val = float("-0.101881") - max_val = float("0.143004") - mean = float("-0.000443039") - std = float("0.0175817") - data = None - - -class Program_weight_tensor_parameter_733: - name = "parameter_733" - shape = [96] - dtype = "float32" - min_val = float("-3.42322") - max_val = float("3.28108") - mean = float("0.327995") - std = float("1.1473") - data = None - - -class Program_weight_tensor_parameter_734: - name = "parameter_734" - shape = [96] - dtype = "float32" - min_val = float("0.874948") - max_val = float("4.92491") - mean = float("1.92226") - std = float("0.754909") - data = None - - -class Program_weight_tensor_parameter_735: - name = "parameter_735" - shape = [96] - dtype = "float32" - min_val = float("0.349668") - max_val = float("14.7849") - mean = float("1.53931") - std = float("1.73025") - data = None - - -class Program_weight_tensor_parameter_736: - name = "parameter_736" - shape = [96] - dtype = "float32" - min_val = float("-1.04846") - max_val = float("1.45518") - mean = float("-0.203391") - std = float("0.490156") - data = None - - -class Program_weight_tensor_parameter_737: - name = "parameter_737" - shape = [96, 64, 3, 3] - dtype = "float32" - min_val = float("-0.112555") - max_val = float("0.100752") - mean = float("-0.000270096") - std = float("0.0100051") - data = None - - -class Program_weight_tensor_parameter_738: - name = "parameter_738" - shape = [64] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_739: - name = "parameter_739" - shape = [64] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_740: - name = "parameter_740" - shape = [64] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_741: - name = "parameter_741" - shape = [64] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_742: - name = "parameter_742" - shape = [64, 32, 3, 3] - dtype = "float32" - min_val = float("-0.126906") - max_val = float("0.131715") - mean = float("-0.000428282") - std = float("0.0154308") - data = None - - -class Program_weight_tensor_parameter_743: - name = "parameter_743" - shape = [32] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_744: - name = "parameter_744" - shape = [32] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_745: - name = "parameter_745" - shape = [32] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_746: - name = "parameter_746" - shape = [32] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_747: - name = "parameter_747" - shape = [32, 32, 3, 3] - dtype = "float32" - min_val = float("-0.2537") - max_val = float("0.156204") - mean = float("-0.000164754") - std = float("0.0200964") - data = None - - -class Program_weight_tensor_parameter_748: - name = "parameter_748" - shape = [32] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_749: - name = "parameter_749" - shape = [32] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_750: - name = "parameter_750" - shape = [32] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_751: - name = "parameter_751" - shape = [32] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_752: - name = "parameter_752" - shape = [32, 3, 3, 3] - dtype = "float32" - min_val = float("-0.247045") - max_val = float("0.224081") - mean = float("-0.00143914") - std = float("0.0546739") + min_val = float("-0.0190742") + max_val = float("0.0352904") + mean = float("5.40338e-06") + std = float("0.000293143") data = None diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus-L/subgraph_6/graph_hash.txt b/paddle_samples/PaddleX/PP-YOLOE_plus-L/subgraph_6/graph_hash.txt deleted file mode 100644 index d15259c49..000000000 --- a/paddle_samples/PaddleX/PP-YOLOE_plus-L/subgraph_6/graph_hash.txt +++ /dev/null @@ -1 +0,0 @@ -a6658fa2fb342f963e549f08c1b73729d662eba241da80438af7d5534dbd6003 \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus-L/subgraph_6/graph_net.json b/paddle_samples/PaddleX/PP-YOLOE_plus-L/subgraph_6/graph_net.json deleted file mode 100644 index 32219c0fa..000000000 --- a/paddle_samples/PaddleX/PP-YOLOE_plus-L/subgraph_6/graph_net.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "framework": "paddle", - "model_name": "PP-YOLOE_plus-L", - "num_devices_required": 1, - "num_nodes_required": 1 -} \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus-L/subgraph_6/model.py b/paddle_samples/PaddleX/PP-YOLOE_plus-L/subgraph_6/model.py deleted file mode 100644 index 2ae07efe1..000000000 --- a/paddle_samples/PaddleX/PP-YOLOE_plus-L/subgraph_6/model.py +++ /dev/null @@ -1,247 +0,0 @@ -import paddle - - -class GraphModule(paddle.nn.Layer): - def __init__(self): - super().__init__() - - def forward(self, data_0, data_1, data_2, data_3, data_4, data_5): - # pd_op.multiply: (8x1x4116xf32) <- (8x1x4116xf32, 8x1x4116xf32) - multiply_0 = paddle._C_ops.multiply(data_2, data_0) - del data_2 - - # pd_op.flatten: (8x4116xf32) <- (8x1x4116xf32) - flatten_0 = paddle._C_ops.flatten(multiply_0, 0, 1) - - # pd_op.flatten: (8x27xi64) <- (8x1x27xi64) - flatten_1 = paddle._C_ops.flatten(data_1, 0, 1) - del data_1 - - # pd_op.index_sample: (8x27xf32) <- (8x4116xf32, 8x27xi64) - index_sample_0 = paddle._C_ops.index_sample(flatten_0, flatten_1) - del flatten_0, flatten_1 - - # pd_op.full_int_array: (3xi64) <- () - full_int_array_0 = [8, 1, -1] - - # pd_op.reshape: (8x1x27xf32) <- (8x27xf32, 3xi64) - reshape_0 = paddle._C_ops.reshape(index_sample_0, full_int_array_0) - del full_int_array_0, index_sample_0 - - # pd_op.full_int_array: (1xi64) <- () - full_int_array_1 = [-1] - - # pd_op.mean: (8x1x1xf32) <- (8x1x27xf32, 1xi64) - mean_0 = paddle._C_ops.mean(reshape_0, full_int_array_1, True) - - # pd_op.subtract: (8x1x27xf32) <- (8x1x27xf32, 8x1x1xf32) - subtract_0 = paddle._C_ops.subtract(reshape_0, mean_0) - - # pd_op.pow: (8x1x27xf32) <- (8x1x27xf32) - pow_0 = paddle._C_ops.pow(subtract_0, float("2")) - del subtract_0 - - # pd_op.sum: (8x1x1xf32) <- (8x1x27xf32, 1xi64) - sum_0 = paddle._C_ops.sum(pow_0, full_int_array_1, paddle.float32, True) - del pow_0 - - # pd_op.numel: (xi64) <- (8x1x27xf32) - numel_0 = paddle._C_ops.numel(reshape_0) - del reshape_0 - - # pd_op.cast: (xi64) <- (xi64) - cast_0 = paddle._C_ops.cast(numel_0, paddle.int64) - del numel_0 - - # pd_op.numel: (xi64) <- (8x1x1xf32) - numel_1 = paddle._C_ops.numel(sum_0) - - # pd_op.cast: (xi64) <- (xi64) - cast_1 = paddle._C_ops.cast(numel_1, paddle.int64) - del numel_1 - - # pd_op.cast: (xf32) <- (xi64) - cast_2 = paddle._C_ops.cast(cast_0, paddle.float32) - del cast_0 - - # pd_op.cast: (xf32) <- (xi64) - cast_3 = paddle._C_ops.cast(cast_1, paddle.float32) - del cast_1 - - # pd_op.divide: (xf32) <- (xf32, xf32) - divide_0 = paddle._C_ops.divide(cast_2, cast_3) - del cast_2, cast_3 - - # pd_op.full: (1xf32) <- () - full_0 = paddle._C_ops.full( - [1], float("1"), paddle.float32, paddle.core.CPUPlace() - ) - - # pd_op.scale: (xf32) <- (xf32, 1xf32) - scale_0 = paddle._C_ops.scale(divide_0, full_0, float("-1"), True) - del divide_0, full_0 - - # pd_op.full: (1xf32) <- () - full_1 = paddle._C_ops.full( - [1], float("0"), paddle.float32, paddle.core.CPUPlace() - ) - - # pd_op.full_like: (xf32) <- (xf32, 1xf32) - full_like_0 = paddle._C_ops.full_like( - scale_0, full_1, paddle.float32, paddle.framework._current_expected_place() - ) - - # pd_op.maximum: (xf32) <- (xf32, xf32) - maximum_0 = paddle._C_ops.maximum(scale_0, full_like_0) - del full_like_0, scale_0 - - # pd_op.divide: (8x1x1xf32) <- (8x1x1xf32, xf32) - divide_1 = paddle._C_ops.divide(sum_0, maximum_0) - del maximum_0, sum_0 - - # pd_op.sqrt: (8x1x1xf32) <- (8x1x1xf32) - sqrt_0 = paddle._C_ops.sqrt(divide_1) - del divide_1 - - # pd_op.add: (8x1x1xf32) <- (8x1x1xf32, 8x1x1xf32) - add_0 = paddle._C_ops.add(mean_0, sqrt_0) - del mean_0, sqrt_0 - - # pd_op.greater_than: (8x1x4116xb) <- (8x1x4116xf32, 8x1x1xf32) - greater_than_1 = paddle._C_ops.greater_than(multiply_0, add_0) - del add_0, multiply_0 - - # pd_op.full_like: (8x1x4116xf32) <- (8x1x4116xf32, 1xf32) - full_like_1 = paddle._C_ops.full_like( - data_0, full_1, paddle.float32, paddle.framework._current_expected_place() - ) - del full_1 - - # pd_op.where: (8x1x4116xf32) <- (8x1x4116xb, 8x1x4116xf32, 8x1x4116xf32) - where_0 = paddle._C_ops.where(greater_than_1, data_0, full_like_1) - del data_0, full_like_1, greater_than_1 - - # pd_op.full_int_array: (2xi64) <- () - full_int_array_2 = [0, 1] - - # pd_op.unsqueeze: (1x1x4116x2xf32) <- (4116x2xf32, 2xi64) - unsqueeze_0 = paddle._C_ops.unsqueeze(data_3, full_int_array_2) - del data_3, full_int_array_2 - - # pd_op.full: (1xi32) <- () - full_2 = paddle._C_ops.full( - [1], float("3"), paddle.int32, paddle.core.CPUPlace() - ) - - # pd_op.split_with_num: ([1x1x4116x1xf32, 1x1x4116x1xf32]) <- (1x1x4116x2xf32, 1xi32) - split_with_num_0 = paddle._C_ops.split_with_num(unsqueeze_0, 2, full_2) - del unsqueeze_0 - - # builtin.split: (1x1x4116x1xf32, 1x1x4116x1xf32) <- ([1x1x4116x1xf32, 1x1x4116x1xf32]) - ( - split_0, - split_1, - ) = split_with_num_0 - del split_with_num_0 - - # pd_op.full_int_array: (1xi64) <- () - full_int_array_3 = [2] - - # pd_op.unsqueeze: (8x1x1x4xf32) <- (8x1x4xf32, 1xi64) - unsqueeze_1 = paddle._C_ops.unsqueeze(data_4, full_int_array_3) - del data_4, full_int_array_3 - - # pd_op.split_with_num: ([8x1x1x1xf32, 8x1x1x1xf32, 8x1x1x1xf32, 8x1x1x1xf32]) <- (8x1x1x4xf32, 1xi32) - split_with_num_1 = paddle._C_ops.split_with_num(unsqueeze_1, 4, full_2) - del full_2, unsqueeze_1 - - # builtin.split: (8x1x1x1xf32, 8x1x1x1xf32, 8x1x1x1xf32, 8x1x1x1xf32) <- ([8x1x1x1xf32, 8x1x1x1xf32, 8x1x1x1xf32, 8x1x1x1xf32]) - ( - split_2, - split_3, - split_4, - split_5, - ) = split_with_num_1 - del split_with_num_1 - - # pd_op.subtract: (8x1x4116x1xf32) <- (1x1x4116x1xf32, 8x1x1x1xf32) - subtract_1 = paddle._C_ops.subtract(split_0, split_2) - del split_2 - - # pd_op.subtract: (8x1x4116x1xf32) <- (1x1x4116x1xf32, 8x1x1x1xf32) - subtract_2 = paddle._C_ops.subtract(split_1, split_3) - del split_3 - - # pd_op.subtract: (8x1x4116x1xf32) <- (8x1x1x1xf32, 1x1x4116x1xf32) - subtract_3 = paddle._C_ops.subtract(split_4, split_0) - del split_0, split_4 - - # pd_op.subtract: (8x1x4116x1xf32) <- (8x1x1x1xf32, 1x1x4116x1xf32) - subtract_4 = paddle._C_ops.subtract(split_5, split_1) - del split_1, split_5 - - # pd_op.full: (1xi32) <- () - full_3 = paddle._C_ops.full( - [1], float("-1"), paddle.int32, paddle.core.CPUPlace() - ) - - # builtin.combine: ([8x1x4116x1xf32, 8x1x4116x1xf32, 8x1x4116x1xf32, 8x1x4116x1xf32]) <- (8x1x4116x1xf32, 8x1x4116x1xf32, 8x1x4116x1xf32, 8x1x4116x1xf32) - combine_0 = [subtract_1, subtract_2, subtract_3, subtract_4] - del subtract_1, subtract_2, subtract_3, subtract_4 - - # pd_op.concat: (8x1x4116x4xf32) <- ([8x1x4116x1xf32, 8x1x4116x1xf32, 8x1x4116x1xf32, 8x1x4116x1xf32], 1xi32) - concat_0 = paddle._C_ops.concat(combine_0, full_3) - del combine_0, full_3 - - # pd_op.min: (8x1x4116xf32) <- (8x1x4116x4xf32, 1xi64) - min_0 = paddle._C_ops.min(concat_0, full_int_array_1, False) - del concat_0, full_int_array_1 - - # pd_op.full: (xf32) <- () - full_4 = paddle._C_ops.full( - [], - float("1e-09"), - paddle.float32, - paddle.framework._current_expected_place(), - ) - - # pd_op.greater_than: (8x1x4116xb) <- (8x1x4116xf32, xf32) - greater_than_2 = paddle._C_ops.greater_than(min_0, full_4) - del full_4, min_0 - - # pd_op.cast: (8x1x4116xf32) <- (8x1x4116xb) - cast_4 = paddle._C_ops.cast(greater_than_2, paddle.float32) - del greater_than_2 - - # pd_op.multiply: (8x1x4116xf32) <- (8x1x4116xf32, 8x1x4116xf32) - multiply_1 = paddle._C_ops.multiply(where_0, cast_4) - del cast_4, where_0 - - # pd_op.multiply: (8x1x4116xf32) <- (8x1x4116xf32, 8x1x1xf32) - multiply_2 = paddle._C_ops.multiply(multiply_1, data_5) - del data_5, multiply_1 - - # pd_op.full_int_array: (1xi64) <- () - full_int_array_4 = [-2] - - # pd_op.sum: (8x4116xf32) <- (8x1x4116xf32, 1xi64) - sum_1 = paddle._C_ops.sum(multiply_2, full_int_array_4, None, False) - del full_int_array_4 - - # pd_op.full_int_array: (0xi64) <- () - full_int_array_5 = [] - - # pd_op.max: (xf32) <- (8x4116xf32, 0xi64) - max_0 = paddle._C_ops.max(sum_1, full_int_array_5, False) - del full_int_array_5 - - # pd_op.full: (xf32) <- () - full_5 = paddle._C_ops.full( - [], float("1"), paddle.float32, paddle.framework._current_expected_place() - ) - - # pd_op.greater_than: (xb) <- (xf32, xf32) - greater_than_0 = paddle._C_ops.greater_than(max_0, full_5) - del full_5, max_0, multiply_2, sum_1 - - return greater_than_0 diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus-L/subgraph_6/weight_meta.py b/paddle_samples/PaddleX/PP-YOLOE_plus-L/subgraph_6/weight_meta.py deleted file mode 100644 index 8b1378917..000000000 --- a/paddle_samples/PaddleX/PP-YOLOE_plus-L/subgraph_6/weight_meta.py +++ /dev/null @@ -1 +0,0 @@ - diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus-L/subgraph_7/graph_hash.txt b/paddle_samples/PaddleX/PP-YOLOE_plus-L/subgraph_7/graph_hash.txt deleted file mode 100644 index 896fa94fd..000000000 --- a/paddle_samples/PaddleX/PP-YOLOE_plus-L/subgraph_7/graph_hash.txt +++ /dev/null @@ -1 +0,0 @@ -66b8a266cc256b5299b432a3f1cf5582a5f9a5321ba1505da3c19b3bc24f5624 \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus-L/subgraph_7/graph_net.json b/paddle_samples/PaddleX/PP-YOLOE_plus-L/subgraph_7/graph_net.json deleted file mode 100644 index 32219c0fa..000000000 --- a/paddle_samples/PaddleX/PP-YOLOE_plus-L/subgraph_7/graph_net.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "framework": "paddle", - "model_name": "PP-YOLOE_plus-L", - "num_devices_required": 1, - "num_nodes_required": 1 -} \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus-L/subgraph_7/input_meta.py b/paddle_samples/PaddleX/PP-YOLOE_plus-L/subgraph_7/input_meta.py deleted file mode 100644 index fc3dcee91..000000000 --- a/paddle_samples/PaddleX/PP-YOLOE_plus-L/subgraph_7/input_meta.py +++ /dev/null @@ -1,38 +0,0 @@ -class Program_weight_tensor_data_0: - name = "data_0" - shape = [2, 8400, 4] - dtype = "float32" - min_val = float("0.0512187") - max_val = float("14.5679") - mean = float("6.78357") - std = float("2.69712") - data = None - - -class Program_weight_tensor_data_1: - name = "data_1" - shape = [8400, 2] - dtype = "float32" - min_val = float("0.5") - max_val = float("79.5") - mean = float("34.7619") - std = float("22.9098") - data = None - - -class Program_weight_tensor_data_2: - name = "data_2" - shape = [8400, 1] - dtype = "float32" - min_val = float("8.0") - max_val = float("32.0") - mean = float("10.6667") - std = float("5.70157") - data = None - - -class Program_weight_tensor_data_3: - name = "data_3" - shape = [2, 2] - dtype = "float32" - data = [1.6, 2.397, 2.64463, 1.6] diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus-L/subgraph_7/model.py b/paddle_samples/PaddleX/PP-YOLOE_plus-L/subgraph_7/model.py deleted file mode 100644 index 561c0c35b..000000000 --- a/paddle_samples/PaddleX/PP-YOLOE_plus-L/subgraph_7/model.py +++ /dev/null @@ -1,94 +0,0 @@ -import paddle - - -class GraphModule(paddle.nn.Layer): - def __init__(self): - super().__init__() - - def forward(self, data_0, data_1, data_2, data_3): - # pd_op.full: (1xi32) <- () - full_0 = paddle._C_ops.full( - [1], float("2"), paddle.int32, paddle.core.CPUPlace() - ) - - # pd_op.split_with_num: ([2x8400x2xf32, 2x8400x2xf32]) <- (2x8400x4xf32, 1xi32) - split_with_num_0 = paddle._C_ops.split_with_num(data_0, 2, full_0) - del data_0, full_0 - - # builtin.split: (2x8400x2xf32, 2x8400x2xf32) <- ([2x8400x2xf32, 2x8400x2xf32]) - ( - split_0, - split_1, - ) = split_with_num_0 - del split_with_num_0 - - # pd_op.full: (1xf32) <- () - full_1 = paddle._C_ops.full( - [1], float("-1"), paddle.float32, paddle.core.CPUPlace() - ) - - # pd_op.scale: (2x8400x2xf32) <- (2x8400x2xf32, 1xf32) - scale_0 = paddle._C_ops.scale(split_0, full_1, float("0"), True) - del full_1, split_0 - - # pd_op.add: (2x8400x2xf32) <- (2x8400x2xf32, 8400x2xf32) - add_0 = paddle._C_ops.add(scale_0, data_1) - del scale_0 - - # pd_op.add: (2x8400x2xf32) <- (2x8400x2xf32, 8400x2xf32) - add_1 = paddle._C_ops.add(split_1, data_1) - del data_1, split_1 - - # pd_op.full: (1xi32) <- () - full_2 = paddle._C_ops.full( - [1], float("-1"), paddle.int32, paddle.core.CPUPlace() - ) - - # builtin.combine: ([2x8400x2xf32, 2x8400x2xf32]) <- (2x8400x2xf32, 2x8400x2xf32) - combine_0 = [add_0, add_1] - del add_0, add_1 - - # pd_op.concat: (2x8400x4xf32) <- ([2x8400x2xf32, 2x8400x2xf32], 1xi32) - concat_0 = paddle._C_ops.concat(combine_0, full_2) - del combine_0 - - # pd_op.multiply: (2x8400x4xf32) <- (2x8400x4xf32, 8400x1xf32) - multiply_0 = paddle._C_ops.multiply(concat_0, data_2) - del concat_0, data_2 - - # pd_op.full: (1xi32) <- () - full_3 = paddle._C_ops.full( - [1], float("1"), paddle.int32, paddle.core.CPUPlace() - ) - - # pd_op.split_with_num: ([2x1xf32, 2x1xf32]) <- (2x2xf32, 1xi32) - split_with_num_1 = paddle._C_ops.split_with_num(data_3, 2, full_3) - del data_3, full_3 - - # builtin.split: (2x1xf32, 2x1xf32) <- ([2x1xf32, 2x1xf32]) - ( - split_2, - split_3, - ) = split_with_num_1 - del split_with_num_1 - - # builtin.combine: ([2x1xf32, 2x1xf32, 2x1xf32, 2x1xf32]) <- (2x1xf32, 2x1xf32, 2x1xf32, 2x1xf32) - combine_1 = [split_3, split_2, split_3, split_2] - del split_2, split_3 - - # pd_op.concat: (2x4xf32) <- ([2x1xf32, 2x1xf32, 2x1xf32, 2x1xf32], 1xi32) - concat_1 = paddle._C_ops.concat(combine_1, full_2) - del combine_1, full_2 - - # pd_op.full_int_array: (3xi64) <- () - full_int_array_0 = [-1, 1, 4] - - # pd_op.reshape: (2x1x4xf32) <- (2x4xf32, 3xi64) - reshape_0 = paddle._C_ops.reshape(concat_1, full_int_array_0) - del concat_1, full_int_array_0 - - # pd_op.divide: (2x8400x4xf32) <- (2x8400x4xf32, 2x1x4xf32) - divide_0 = paddle._C_ops.divide(multiply_0, reshape_0) - del multiply_0, reshape_0 - - return divide_0 diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus-L/subgraph_7/weight_meta.py b/paddle_samples/PaddleX/PP-YOLOE_plus-L/subgraph_7/weight_meta.py deleted file mode 100644 index 8b1378917..000000000 --- a/paddle_samples/PaddleX/PP-YOLOE_plus-L/subgraph_7/weight_meta.py +++ /dev/null @@ -1 +0,0 @@ - diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus-L/subgraph_8/graph_hash.txt b/paddle_samples/PaddleX/PP-YOLOE_plus-L/subgraph_8/graph_hash.txt deleted file mode 100644 index 88f716dff..000000000 --- a/paddle_samples/PaddleX/PP-YOLOE_plus-L/subgraph_8/graph_hash.txt +++ /dev/null @@ -1 +0,0 @@ -839bed95f06a549ca0a6c49aa3c1a018fbd7c4f0023cedf35760437922761076 \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus-L/subgraph_8/graph_net.json b/paddle_samples/PaddleX/PP-YOLOE_plus-L/subgraph_8/graph_net.json deleted file mode 100644 index 32219c0fa..000000000 --- a/paddle_samples/PaddleX/PP-YOLOE_plus-L/subgraph_8/graph_net.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "framework": "paddle", - "model_name": "PP-YOLOE_plus-L", - "num_devices_required": 1, - "num_nodes_required": 1 -} \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus-L/subgraph_8/model.py b/paddle_samples/PaddleX/PP-YOLOE_plus-L/subgraph_8/model.py deleted file mode 100644 index 8ca9a2697..000000000 --- a/paddle_samples/PaddleX/PP-YOLOE_plus-L/subgraph_8/model.py +++ /dev/null @@ -1,34 +0,0 @@ -import paddle - - -class GraphModule(paddle.nn.Layer): - def __init__(self): - super().__init__() - - def forward(self, data_0): - # pd_op.full: (xi32) <- () - full_0 = paddle._C_ops.full( - [], float("4"), paddle.int32, paddle.framework._current_expected_place() - ) - - # pd_op.not_equal: (8x4116xb) <- (8x4116xi32, xi32) - not_equal_0 = paddle._C_ops.not_equal(data_0, full_0) - del data_0, full_0 - - # pd_op.full_int_array: (0xi64) <- () - full_int_array_0 = [] - - # pd_op.sum: (xi64) <- (8x4116xb, 0xi64) - sum_0 = paddle._C_ops.sum(not_equal_0, full_int_array_0, None, False) - del full_int_array_0 - - # pd_op.full: (xi64) <- () - full_1 = paddle._C_ops.full( - [], float("0"), paddle.int64, paddle.framework._current_expected_place() - ) - - # pd_op.greater_than: (xb) <- (xi64, xi64) - greater_than_0 = paddle._C_ops.greater_than(sum_0, full_1) - del full_1, not_equal_0, sum_0 - - return greater_than_0 diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus-L/subgraph_8/weight_meta.py b/paddle_samples/PaddleX/PP-YOLOE_plus-L/subgraph_8/weight_meta.py deleted file mode 100644 index 8b1378917..000000000 --- a/paddle_samples/PaddleX/PP-YOLOE_plus-L/subgraph_8/weight_meta.py +++ /dev/null @@ -1 +0,0 @@ - diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus-L/subgraph_9/graph_hash.txt b/paddle_samples/PaddleX/PP-YOLOE_plus-L/subgraph_9/graph_hash.txt deleted file mode 100644 index efa879b36..000000000 --- a/paddle_samples/PaddleX/PP-YOLOE_plus-L/subgraph_9/graph_hash.txt +++ /dev/null @@ -1 +0,0 @@ -e98172b16f1b0a5022e17341958c7348d3c38f47fbbd435acea6cb34167725f0 \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus-L/subgraph_9/graph_net.json b/paddle_samples/PaddleX/PP-YOLOE_plus-L/subgraph_9/graph_net.json deleted file mode 100644 index 32219c0fa..000000000 --- a/paddle_samples/PaddleX/PP-YOLOE_plus-L/subgraph_9/graph_net.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "framework": "paddle", - "model_name": "PP-YOLOE_plus-L", - "num_devices_required": 1, - "num_nodes_required": 1 -} \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus-L/subgraph_9/input_meta.py b/paddle_samples/PaddleX/PP-YOLOE_plus-L/subgraph_9/input_meta.py deleted file mode 100644 index e7cef1f03..000000000 --- a/paddle_samples/PaddleX/PP-YOLOE_plus-L/subgraph_9/input_meta.py +++ /dev/null @@ -1,31 +0,0 @@ -class Program_weight_tensor_data_0: - name = "data_0" - shape = [8, 768, 14, 14] - dtype = "float32" - min_val = float("-0.278465") - max_val = float("8.94092") - mean = float("0.271046") - std = float("0.622828") - data = None - - -class Program_weight_tensor_data_1: - name = "data_1" - shape = [8, 384, 28, 28] - dtype = "float32" - min_val = float("-0.278465") - max_val = float("11.2275") - mean = float("0.375277") - std = float("0.700531") - data = None - - -class Program_weight_tensor_data_2: - name = "data_2" - shape = [8, 192, 56, 56] - dtype = "float32" - min_val = float("-0.278465") - max_val = float("12.1434") - mean = float("0.487172") - std = float("0.770854") - data = None diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus-L/subgraph_9/model.py b/paddle_samples/PaddleX/PP-YOLOE_plus-L/subgraph_9/model.py deleted file mode 100644 index f2c6c745a..000000000 --- a/paddle_samples/PaddleX/PP-YOLOE_plus-L/subgraph_9/model.py +++ /dev/null @@ -1,1050 +0,0 @@ -import paddle - - -class GraphModule(paddle.nn.Layer): - def __init__(self): - super().__init__() - - def forward( - self, - parameter_0, - parameter_1, - parameter_2, - parameter_3, - parameter_4, - parameter_5, - parameter_6, - parameter_7, - parameter_8, - parameter_9, - parameter_10, - parameter_11, - parameter_12, - parameter_13, - parameter_14, - parameter_15, - parameter_16, - parameter_17, - parameter_18, - parameter_19, - parameter_20, - parameter_21, - parameter_22, - parameter_23, - parameter_24, - parameter_25, - parameter_26, - parameter_27, - parameter_28, - parameter_29, - parameter_30, - parameter_31, - parameter_32, - parameter_33, - parameter_34, - parameter_35, - parameter_36, - parameter_37, - parameter_38, - parameter_39, - parameter_40, - parameter_41, - parameter_42, - parameter_43, - parameter_44, - parameter_45, - parameter_46, - parameter_47, - parameter_48, - parameter_49, - parameter_50, - parameter_51, - parameter_52, - parameter_53, - data_0, - data_1, - data_2, - ): - # pd_op.full: (1xf64) <- () - full_0 = paddle._C_ops.full( - [1], float("0"), paddle.float64, paddle.core.CPUPlace() - ) - - # pd_op.full: (1xf64) <- () - full_1 = paddle._C_ops.full( - [1], float("14"), paddle.float64, paddle.core.CPUPlace() - ) - - # pd_op.full: (1xf64) <- () - full_2 = paddle._C_ops.full( - [1], float("1"), paddle.float64, paddle.core.CPUPlace() - ) - - # pd_op.arange: (14xi64) <- (1xf64, 1xf64, 1xf64) - arange_0 = paddle.arange(full_0, full_1, full_2, dtype="int64") - del full_1 - - # pd_op.cast: (14xf32) <- (14xi64) - cast_0 = paddle._C_ops.cast(arange_0, paddle.float32) - del arange_0 - - # pd_op.full: (1xf32) <- () - full_3 = paddle._C_ops.full( - [1], float("1"), paddle.float32, paddle.core.CPUPlace() - ) - - # pd_op.scale: (14xf32) <- (14xf32, 1xf32) - scale_0 = paddle._C_ops.scale(cast_0, full_3, float("0.5"), True) - del cast_0 - - # pd_op.full: (1xf32) <- () - full_4 = paddle._C_ops.full( - [1], float("32"), paddle.float32, paddle.core.CPUPlace() - ) - - # pd_op.scale: (14xf32) <- (14xf32, 1xf32) - scale_1 = paddle._C_ops.scale(scale_0, full_4, float("0"), True) - del full_4, scale_0 - - # builtin.combine: ([14xf32, 14xf32]) <- (14xf32, 14xf32) - combine_0 = [scale_1, scale_1] - del scale_1 - - # pd_op.meshgrid: ([14x14xf32, 14x14xf32]) <- ([14xf32, 14xf32]) - meshgrid_0 = paddle._C_ops.meshgrid(combine_0) - del combine_0 - - # builtin.split: (14x14xf32, 14x14xf32) <- ([14x14xf32, 14x14xf32]) - ( - split_0, - split_1, - ) = meshgrid_0 - del meshgrid_0 - - # pd_op.scale: (14x14xf32) <- (14x14xf32, 1xf32) - scale_2 = paddle._C_ops.scale(split_1, full_3, float("-80"), True) - - # pd_op.scale: (14x14xf32) <- (14x14xf32, 1xf32) - scale_3 = paddle._C_ops.scale(split_0, full_3, float("-80"), True) - - # pd_op.scale: (14x14xf32) <- (14x14xf32, 1xf32) - scale_4 = paddle._C_ops.scale(split_1, full_3, float("80"), True) - - # pd_op.scale: (14x14xf32) <- (14x14xf32, 1xf32) - scale_5 = paddle._C_ops.scale(split_0, full_3, float("80"), True) - - # builtin.combine: ([14x14xf32, 14x14xf32, 14x14xf32, 14x14xf32]) <- (14x14xf32, 14x14xf32, 14x14xf32, 14x14xf32) - combine_1 = [scale_2, scale_3, scale_4, scale_5] - del scale_2, scale_3, scale_4, scale_5 - - # pd_op.stack: (14x14x4xf32) <- ([14x14xf32, 14x14xf32, 14x14xf32, 14x14xf32]) - stack_0 = paddle._C_ops.stack(combine_1, -1) - del combine_1 - - # builtin.combine: ([14x14xf32, 14x14xf32]) <- (14x14xf32, 14x14xf32) - combine_2 = [split_1, split_0] - del split_0, split_1 - - # pd_op.stack: (14x14x2xf32) <- ([14x14xf32, 14x14xf32]) - stack_1 = paddle._C_ops.stack(combine_2, -1) - del combine_2 - - # pd_op.full_int_array: (2xi64) <- () - full_int_array_0 = [-1, 4] - - # pd_op.reshape: (196x4xf32) <- (14x14x4xf32, 2xi64) - reshape_0 = paddle._C_ops.reshape(stack_0, full_int_array_0) - del stack_0 - - # pd_op.full_int_array: (2xi64) <- () - full_int_array_1 = [-1, 2] - - # pd_op.reshape: (196x2xf32) <- (14x14x2xf32, 2xi64) - reshape_1 = paddle._C_ops.reshape(stack_1, full_int_array_1) - del stack_1 - - # pd_op.full: (196x1xf32) <- () - full_5 = paddle._C_ops.full( - [196, 1], - float("32"), - paddle.float32, - paddle.framework._current_expected_place(), - ) - - # pd_op.full: (1xf64) <- () - full_6 = paddle._C_ops.full( - [1], float("28"), paddle.float64, paddle.core.CPUPlace() - ) - - # pd_op.arange: (28xi64) <- (1xf64, 1xf64, 1xf64) - arange_1 = paddle.arange(full_0, full_6, full_2, dtype="int64") - del full_6 - - # pd_op.cast: (28xf32) <- (28xi64) - cast_1 = paddle._C_ops.cast(arange_1, paddle.float32) - del arange_1 - - # pd_op.scale: (28xf32) <- (28xf32, 1xf32) - scale_6 = paddle._C_ops.scale(cast_1, full_3, float("0.5"), True) - del cast_1 - - # pd_op.full: (1xf32) <- () - full_7 = paddle._C_ops.full( - [1], float("16"), paddle.float32, paddle.core.CPUPlace() - ) - - # pd_op.scale: (28xf32) <- (28xf32, 1xf32) - scale_7 = paddle._C_ops.scale(scale_6, full_7, float("0"), True) - del full_7, scale_6 - - # builtin.combine: ([28xf32, 28xf32]) <- (28xf32, 28xf32) - combine_3 = [scale_7, scale_7] - del scale_7 - - # pd_op.meshgrid: ([28x28xf32, 28x28xf32]) <- ([28xf32, 28xf32]) - meshgrid_1 = paddle._C_ops.meshgrid(combine_3) - del combine_3 - - # builtin.split: (28x28xf32, 28x28xf32) <- ([28x28xf32, 28x28xf32]) - ( - split_2, - split_3, - ) = meshgrid_1 - del meshgrid_1 - - # pd_op.scale: (28x28xf32) <- (28x28xf32, 1xf32) - scale_8 = paddle._C_ops.scale(split_3, full_3, float("-40"), True) - - # pd_op.scale: (28x28xf32) <- (28x28xf32, 1xf32) - scale_9 = paddle._C_ops.scale(split_2, full_3, float("-40"), True) - - # pd_op.scale: (28x28xf32) <- (28x28xf32, 1xf32) - scale_10 = paddle._C_ops.scale(split_3, full_3, float("40"), True) - - # pd_op.scale: (28x28xf32) <- (28x28xf32, 1xf32) - scale_11 = paddle._C_ops.scale(split_2, full_3, float("40"), True) - - # builtin.combine: ([28x28xf32, 28x28xf32, 28x28xf32, 28x28xf32]) <- (28x28xf32, 28x28xf32, 28x28xf32, 28x28xf32) - combine_4 = [scale_8, scale_9, scale_10, scale_11] - del scale_10, scale_11, scale_8, scale_9 - - # pd_op.stack: (28x28x4xf32) <- ([28x28xf32, 28x28xf32, 28x28xf32, 28x28xf32]) - stack_2 = paddle._C_ops.stack(combine_4, -1) - del combine_4 - - # builtin.combine: ([28x28xf32, 28x28xf32]) <- (28x28xf32, 28x28xf32) - combine_5 = [split_3, split_2] - del split_2, split_3 - - # pd_op.stack: (28x28x2xf32) <- ([28x28xf32, 28x28xf32]) - stack_3 = paddle._C_ops.stack(combine_5, -1) - del combine_5 - - # pd_op.reshape: (784x4xf32) <- (28x28x4xf32, 2xi64) - reshape_2 = paddle._C_ops.reshape(stack_2, full_int_array_0) - del stack_2 - - # pd_op.reshape: (784x2xf32) <- (28x28x2xf32, 2xi64) - reshape_3 = paddle._C_ops.reshape(stack_3, full_int_array_1) - del stack_3 - - # pd_op.full: (784x1xf32) <- () - full_8 = paddle._C_ops.full( - [784, 1], - float("16"), - paddle.float32, - paddle.framework._current_expected_place(), - ) - - # pd_op.full: (1xf64) <- () - full_9 = paddle._C_ops.full( - [1], float("56"), paddle.float64, paddle.core.CPUPlace() - ) - - # pd_op.arange: (56xi64) <- (1xf64, 1xf64, 1xf64) - arange_2 = paddle.arange(full_0, full_9, full_2, dtype="int64") - del full_0, full_2, full_9 - - # pd_op.cast: (56xf32) <- (56xi64) - cast_2 = paddle._C_ops.cast(arange_2, paddle.float32) - del arange_2 - - # pd_op.scale: (56xf32) <- (56xf32, 1xf32) - scale_12 = paddle._C_ops.scale(cast_2, full_3, float("0.5"), True) - del cast_2 - - # pd_op.full: (1xf32) <- () - full_10 = paddle._C_ops.full( - [1], float("8"), paddle.float32, paddle.core.CPUPlace() - ) - - # pd_op.scale: (56xf32) <- (56xf32, 1xf32) - scale_13 = paddle._C_ops.scale(scale_12, full_10, float("0"), True) - del full_10, scale_12 - - # builtin.combine: ([56xf32, 56xf32]) <- (56xf32, 56xf32) - combine_6 = [scale_13, scale_13] - del scale_13 - - # pd_op.meshgrid: ([56x56xf32, 56x56xf32]) <- ([56xf32, 56xf32]) - meshgrid_2 = paddle._C_ops.meshgrid(combine_6) - del combine_6 - - # builtin.split: (56x56xf32, 56x56xf32) <- ([56x56xf32, 56x56xf32]) - ( - split_4, - split_5, - ) = meshgrid_2 - del meshgrid_2 - - # pd_op.scale: (56x56xf32) <- (56x56xf32, 1xf32) - scale_14 = paddle._C_ops.scale(split_5, full_3, float("-20"), True) - - # pd_op.scale: (56x56xf32) <- (56x56xf32, 1xf32) - scale_15 = paddle._C_ops.scale(split_4, full_3, float("-20"), True) - - # pd_op.scale: (56x56xf32) <- (56x56xf32, 1xf32) - scale_16 = paddle._C_ops.scale(split_5, full_3, float("20"), True) - - # pd_op.scale: (56x56xf32) <- (56x56xf32, 1xf32) - scale_17 = paddle._C_ops.scale(split_4, full_3, float("20"), True) - del full_3 - - # builtin.combine: ([56x56xf32, 56x56xf32, 56x56xf32, 56x56xf32]) <- (56x56xf32, 56x56xf32, 56x56xf32, 56x56xf32) - combine_7 = [scale_14, scale_15, scale_16, scale_17] - del scale_14, scale_15, scale_16, scale_17 - - # pd_op.stack: (56x56x4xf32) <- ([56x56xf32, 56x56xf32, 56x56xf32, 56x56xf32]) - stack_4 = paddle._C_ops.stack(combine_7, -1) - del combine_7 - - # builtin.combine: ([56x56xf32, 56x56xf32]) <- (56x56xf32, 56x56xf32) - combine_8 = [split_5, split_4] - del split_4, split_5 - - # pd_op.stack: (56x56x2xf32) <- ([56x56xf32, 56x56xf32]) - stack_5 = paddle._C_ops.stack(combine_8, -1) - del combine_8 - - # pd_op.reshape: (3136x4xf32) <- (56x56x4xf32, 2xi64) - reshape_4 = paddle._C_ops.reshape(stack_4, full_int_array_0) - del full_int_array_0, stack_4 - - # pd_op.reshape: (3136x2xf32) <- (56x56x2xf32, 2xi64) - reshape_5 = paddle._C_ops.reshape(stack_5, full_int_array_1) - del full_int_array_1, stack_5 - - # pd_op.full: (3136x1xf32) <- () - full_11 = paddle._C_ops.full( - [3136, 1], - float("8"), - paddle.float32, - paddle.framework._current_expected_place(), - ) - - # pd_op.full: (1xi32) <- () - full_12 = paddle._C_ops.full( - [1], float("0"), paddle.int32, paddle.core.CPUPlace() - ) - - # builtin.combine: ([196x4xf32, 784x4xf32, 3136x4xf32]) <- (196x4xf32, 784x4xf32, 3136x4xf32) - combine_9 = [reshape_0, reshape_2, reshape_4] - - # pd_op.concat: (4116x4xf32) <- ([196x4xf32, 784x4xf32, 3136x4xf32], 1xi32) - concat_2 = paddle._C_ops.concat(combine_9, full_12) - del combine_9 - - # builtin.combine: ([196x2xf32, 784x2xf32, 3136x2xf32]) <- (196x2xf32, 784x2xf32, 3136x2xf32) - combine_10 = [reshape_1, reshape_3, reshape_5] - del reshape_1, reshape_3, reshape_5 - - # pd_op.concat: (4116x2xf32) <- ([196x2xf32, 784x2xf32, 3136x2xf32], 1xi32) - concat_3 = paddle._C_ops.concat(combine_10, full_12) - del combine_10 - - # builtin.combine: ([196x1xf32, 784x1xf32, 3136x1xf32]) <- (196x1xf32, 784x1xf32, 3136x1xf32) - combine_11 = [full_5, full_8, full_11] - del full_11, full_5, full_8 - - # pd_op.concat: (4116x1xf32) <- ([196x1xf32, 784x1xf32, 3136x1xf32], 1xi32) - concat_4 = paddle._C_ops.concat(combine_11, full_12) - del combine_11, full_12 - - # pd_op.full_int_array: (2xi64) <- () - full_int_array_2 = [1, 1] - - # pd_op.assign: (2xi64) <- (2xi64) - assign_0 = full_int_array_2 - - # pd_op.assign: (2xi64) <- (2xi64) - assign_1 = full_int_array_2 - - # pd_op.pool2d: (8x768x1x1xf32) <- (8x768x14x14xf32, 2xi64) - pool2d_0 = paddle._C_ops.pool2d( - data_0, - full_int_array_2, - [1, 1], - [0, 0], - False, - True, - "NCHW", - "avg", - False, - True, - "EXPLICIT", - ) - - # pd_op.conv2d: (8x768x1x1xf32) <- (8x768x1x1xf32, 768x768x1x1xf32) - conv2d_0 = paddle._C_ops.conv2d( - pool2d_0, parameter_53, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_53 - - # pd_op.full_int_array: (4xi64) <- () - full_int_array_3 = [1, -1, 1, 1] - - # pd_op.reshape: (1x768x1x1xf32) <- (768xf32, 4xi64) - reshape_6 = paddle._C_ops.reshape(parameter_52, full_int_array_3) - del parameter_52 - - # pd_op.add: (8x768x1x1xf32) <- (8x768x1x1xf32, 1x768x1x1xf32) - add_0 = paddle._C_ops.add(conv2d_0, reshape_6) - - # pd_op.sigmoid: (8x768x1x1xf32) <- (8x768x1x1xf32) - sigmoid_0 = paddle._C_ops.sigmoid(add_0) - del add_0 - - # pd_op.multiply: (8x768x14x14xf32) <- (8x768x14x14xf32, 8x768x1x1xf32) - multiply_0 = paddle._C_ops.multiply(data_0, sigmoid_0) - - # pd_op.conv2d: (8x768x14x14xf32) <- (8x768x14x14xf32, 768x768x1x1xf32) - conv2d_1 = paddle._C_ops.conv2d( - multiply_0, parameter_51, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_51 - - # pd_op.batch_norm_: (8x768x14x14xf32, 768xf32, 768xf32, 768xf32, 768xf32, -1xui8) <- (8x768x14x14xf32, 768xf32, 768xf32, 768xf32, 768xf32) - ( - batch_norm__0, - batch_norm__1, - batch_norm__2, - batch_norm__3, - batch_norm__4, - batch_norm__5, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_1, - parameter_50, - parameter_49, - parameter_48, - parameter_47, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_47, parameter_48, parameter_49, parameter_50 - - # pd_op.swish: (8x768x14x14xf32) <- (8x768x14x14xf32) - swish_0 = paddle._C_ops.swish(batch_norm__0) - - # pd_op.add: (8x768x14x14xf32) <- (8x768x14x14xf32, 8x768x14x14xf32) - add_1 = paddle._C_ops.add(swish_0, data_0) - - # pd_op.conv2d: (8x4x14x14xf32) <- (8x768x14x14xf32, 4x768x3x3xf32) - conv2d_2 = paddle._C_ops.conv2d( - add_1, parameter_46, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_46 - - # pd_op.reshape: (1x4x1x1xf32) <- (4xf32, 4xi64) - reshape_7 = paddle._C_ops.reshape(parameter_45, full_int_array_3) - del parameter_45 - - # pd_op.add: (8x4x14x14xf32) <- (8x4x14x14xf32, 1x4x1x1xf32) - add_2 = paddle._C_ops.add(conv2d_2, reshape_7) - - # pd_op.conv2d: (8x768x1x1xf32) <- (8x768x1x1xf32, 768x768x1x1xf32) - conv2d_3 = paddle._C_ops.conv2d( - pool2d_0, parameter_44, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_44 - - # pd_op.reshape: (1x768x1x1xf32) <- (768xf32, 4xi64) - reshape_8 = paddle._C_ops.reshape(parameter_43, full_int_array_3) - del parameter_43 - - # pd_op.add: (8x768x1x1xf32) <- (8x768x1x1xf32, 1x768x1x1xf32) - add_3 = paddle._C_ops.add(conv2d_3, reshape_8) - - # pd_op.sigmoid: (8x768x1x1xf32) <- (8x768x1x1xf32) - sigmoid_1 = paddle._C_ops.sigmoid(add_3) - del add_3 - - # pd_op.multiply: (8x768x14x14xf32) <- (8x768x14x14xf32, 8x768x1x1xf32) - multiply_1 = paddle._C_ops.multiply(data_0, sigmoid_1) - del data_0 - - # pd_op.conv2d: (8x768x14x14xf32) <- (8x768x14x14xf32, 768x768x1x1xf32) - conv2d_4 = paddle._C_ops.conv2d( - multiply_1, parameter_42, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_42 - - # pd_op.batch_norm_: (8x768x14x14xf32, 768xf32, 768xf32, 768xf32, 768xf32, -1xui8) <- (8x768x14x14xf32, 768xf32, 768xf32, 768xf32, 768xf32) - ( - batch_norm__6, - batch_norm__7, - batch_norm__8, - batch_norm__9, - batch_norm__10, - batch_norm__11, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_4, - parameter_41, - parameter_40, - parameter_39, - parameter_38, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_38, parameter_39, parameter_40, parameter_41 - - # pd_op.swish: (8x768x14x14xf32) <- (8x768x14x14xf32) - swish_1 = paddle._C_ops.swish(batch_norm__6) - - # pd_op.conv2d: (8x68x14x14xf32) <- (8x768x14x14xf32, 68x768x3x3xf32) - conv2d_5 = paddle._C_ops.conv2d( - swish_1, parameter_37, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_37 - - # pd_op.reshape: (1x68x1x1xf32) <- (68xf32, 4xi64) - reshape_9 = paddle._C_ops.reshape(parameter_36, full_int_array_3) - del parameter_36 - - # pd_op.add: (8x68x14x14xf32) <- (8x68x14x14xf32, 1x68x1x1xf32) - add_4 = paddle._C_ops.add(conv2d_5, reshape_9) - - # pd_op.sigmoid: (8x4x14x14xf32) <- (8x4x14x14xf32) - sigmoid_2 = paddle._C_ops.sigmoid(add_2) - del add_2 - - # pd_op.flatten: (8x4x196xf32) <- (8x4x14x14xf32) - flatten_0 = paddle._C_ops.flatten(sigmoid_2, 2, 3) - - # pd_op.transpose: (8x196x4xf32) <- (8x4x196xf32) - transpose_0 = paddle._C_ops.transpose(flatten_0, [0, 2, 1]) - del flatten_0 - - # pd_op.flatten: (8x68x196xf32) <- (8x68x14x14xf32) - flatten_1 = paddle._C_ops.flatten(add_4, 2, 3) - - # pd_op.transpose: (8x196x68xf32) <- (8x68x196xf32) - transpose_1 = paddle._C_ops.transpose(flatten_1, [0, 2, 1]) - del flatten_1 - - # pd_op.pool2d: (8x384x1x1xf32) <- (8x384x28x28xf32, 2xi64) - pool2d_1 = paddle._C_ops.pool2d( - data_1, - full_int_array_2, - [1, 1], - [0, 0], - False, - True, - "NCHW", - "avg", - False, - True, - "EXPLICIT", - ) - - # pd_op.conv2d: (8x384x1x1xf32) <- (8x384x1x1xf32, 384x384x1x1xf32) - conv2d_6 = paddle._C_ops.conv2d( - pool2d_1, parameter_35, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_35 - - # pd_op.reshape: (1x384x1x1xf32) <- (384xf32, 4xi64) - reshape_10 = paddle._C_ops.reshape(parameter_34, full_int_array_3) - del parameter_34 - - # pd_op.add: (8x384x1x1xf32) <- (8x384x1x1xf32, 1x384x1x1xf32) - add_5 = paddle._C_ops.add(conv2d_6, reshape_10) - - # pd_op.sigmoid: (8x384x1x1xf32) <- (8x384x1x1xf32) - sigmoid_3 = paddle._C_ops.sigmoid(add_5) - del add_5 - - # pd_op.multiply: (8x384x28x28xf32) <- (8x384x28x28xf32, 8x384x1x1xf32) - multiply_2 = paddle._C_ops.multiply(data_1, sigmoid_3) - - # pd_op.conv2d: (8x384x28x28xf32) <- (8x384x28x28xf32, 384x384x1x1xf32) - conv2d_7 = paddle._C_ops.conv2d( - multiply_2, parameter_33, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_33 - - # pd_op.batch_norm_: (8x384x28x28xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (8x384x28x28xf32, 384xf32, 384xf32, 384xf32, 384xf32) - ( - batch_norm__12, - batch_norm__13, - batch_norm__14, - batch_norm__15, - batch_norm__16, - batch_norm__17, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_7, - parameter_32, - parameter_31, - parameter_30, - parameter_29, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_29, parameter_30, parameter_31, parameter_32 - - # pd_op.swish: (8x384x28x28xf32) <- (8x384x28x28xf32) - swish_2 = paddle._C_ops.swish(batch_norm__12) - - # pd_op.add: (8x384x28x28xf32) <- (8x384x28x28xf32, 8x384x28x28xf32) - add_6 = paddle._C_ops.add(swish_2, data_1) - - # pd_op.conv2d: (8x4x28x28xf32) <- (8x384x28x28xf32, 4x384x3x3xf32) - conv2d_8 = paddle._C_ops.conv2d( - add_6, parameter_28, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_28 - - # pd_op.reshape: (1x4x1x1xf32) <- (4xf32, 4xi64) - reshape_11 = paddle._C_ops.reshape(parameter_27, full_int_array_3) - del parameter_27 - - # pd_op.add: (8x4x28x28xf32) <- (8x4x28x28xf32, 1x4x1x1xf32) - add_7 = paddle._C_ops.add(conv2d_8, reshape_11) - - # pd_op.conv2d: (8x384x1x1xf32) <- (8x384x1x1xf32, 384x384x1x1xf32) - conv2d_9 = paddle._C_ops.conv2d( - pool2d_1, parameter_26, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_26 - - # pd_op.reshape: (1x384x1x1xf32) <- (384xf32, 4xi64) - reshape_12 = paddle._C_ops.reshape(parameter_25, full_int_array_3) - del parameter_25 - - # pd_op.add: (8x384x1x1xf32) <- (8x384x1x1xf32, 1x384x1x1xf32) - add_8 = paddle._C_ops.add(conv2d_9, reshape_12) - - # pd_op.sigmoid: (8x384x1x1xf32) <- (8x384x1x1xf32) - sigmoid_4 = paddle._C_ops.sigmoid(add_8) - del add_8 - - # pd_op.multiply: (8x384x28x28xf32) <- (8x384x28x28xf32, 8x384x1x1xf32) - multiply_3 = paddle._C_ops.multiply(data_1, sigmoid_4) - del data_1 - - # pd_op.conv2d: (8x384x28x28xf32) <- (8x384x28x28xf32, 384x384x1x1xf32) - conv2d_10 = paddle._C_ops.conv2d( - multiply_3, parameter_24, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_24 - - # pd_op.batch_norm_: (8x384x28x28xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (8x384x28x28xf32, 384xf32, 384xf32, 384xf32, 384xf32) - ( - batch_norm__18, - batch_norm__19, - batch_norm__20, - batch_norm__21, - batch_norm__22, - batch_norm__23, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_10, - parameter_23, - parameter_22, - parameter_21, - parameter_20, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_20, parameter_21, parameter_22, parameter_23 - - # pd_op.swish: (8x384x28x28xf32) <- (8x384x28x28xf32) - swish_3 = paddle._C_ops.swish(batch_norm__18) - - # pd_op.conv2d: (8x68x28x28xf32) <- (8x384x28x28xf32, 68x384x3x3xf32) - conv2d_11 = paddle._C_ops.conv2d( - swish_3, parameter_19, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_19 - - # pd_op.reshape: (1x68x1x1xf32) <- (68xf32, 4xi64) - reshape_13 = paddle._C_ops.reshape(parameter_18, full_int_array_3) - del parameter_18 - - # pd_op.add: (8x68x28x28xf32) <- (8x68x28x28xf32, 1x68x1x1xf32) - add_9 = paddle._C_ops.add(conv2d_11, reshape_13) - - # pd_op.sigmoid: (8x4x28x28xf32) <- (8x4x28x28xf32) - sigmoid_5 = paddle._C_ops.sigmoid(add_7) - del add_7 - - # pd_op.flatten: (8x4x784xf32) <- (8x4x28x28xf32) - flatten_2 = paddle._C_ops.flatten(sigmoid_5, 2, 3) - - # pd_op.transpose: (8x784x4xf32) <- (8x4x784xf32) - transpose_2 = paddle._C_ops.transpose(flatten_2, [0, 2, 1]) - del flatten_2 - - # pd_op.flatten: (8x68x784xf32) <- (8x68x28x28xf32) - flatten_3 = paddle._C_ops.flatten(add_9, 2, 3) - - # pd_op.transpose: (8x784x68xf32) <- (8x68x784xf32) - transpose_3 = paddle._C_ops.transpose(flatten_3, [0, 2, 1]) - del flatten_3 - - # pd_op.pool2d: (8x192x1x1xf32) <- (8x192x56x56xf32, 2xi64) - pool2d_2 = paddle._C_ops.pool2d( - data_2, - full_int_array_2, - [1, 1], - [0, 0], - False, - True, - "NCHW", - "avg", - False, - True, - "EXPLICIT", - ) - - # pd_op.conv2d: (8x192x1x1xf32) <- (8x192x1x1xf32, 192x192x1x1xf32) - conv2d_12 = paddle._C_ops.conv2d( - pool2d_2, parameter_17, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_17 - - # pd_op.reshape: (1x192x1x1xf32) <- (192xf32, 4xi64) - reshape_14 = paddle._C_ops.reshape(parameter_16, full_int_array_3) - del parameter_16 - - # pd_op.add: (8x192x1x1xf32) <- (8x192x1x1xf32, 1x192x1x1xf32) - add_10 = paddle._C_ops.add(conv2d_12, reshape_14) - - # pd_op.sigmoid: (8x192x1x1xf32) <- (8x192x1x1xf32) - sigmoid_6 = paddle._C_ops.sigmoid(add_10) - del add_10 - - # pd_op.multiply: (8x192x56x56xf32) <- (8x192x56x56xf32, 8x192x1x1xf32) - multiply_4 = paddle._C_ops.multiply(data_2, sigmoid_6) - - # pd_op.conv2d: (8x192x56x56xf32) <- (8x192x56x56xf32, 192x192x1x1xf32) - conv2d_13 = paddle._C_ops.conv2d( - multiply_4, parameter_15, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_15 - - # pd_op.batch_norm_: (8x192x56x56xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (8x192x56x56xf32, 192xf32, 192xf32, 192xf32, 192xf32) - ( - batch_norm__24, - batch_norm__25, - batch_norm__26, - batch_norm__27, - batch_norm__28, - batch_norm__29, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_13, - parameter_14, - parameter_13, - parameter_12, - parameter_11, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_11, parameter_12, parameter_13, parameter_14 - - # pd_op.swish: (8x192x56x56xf32) <- (8x192x56x56xf32) - swish_4 = paddle._C_ops.swish(batch_norm__24) - - # pd_op.add: (8x192x56x56xf32) <- (8x192x56x56xf32, 8x192x56x56xf32) - add_11 = paddle._C_ops.add(swish_4, data_2) - - # pd_op.conv2d: (8x4x56x56xf32) <- (8x192x56x56xf32, 4x192x3x3xf32) - conv2d_14 = paddle._C_ops.conv2d( - add_11, parameter_10, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_10 - - # pd_op.reshape: (1x4x1x1xf32) <- (4xf32, 4xi64) - reshape_15 = paddle._C_ops.reshape(parameter_9, full_int_array_3) - del parameter_9 - - # pd_op.add: (8x4x56x56xf32) <- (8x4x56x56xf32, 1x4x1x1xf32) - add_12 = paddle._C_ops.add(conv2d_14, reshape_15) - - # pd_op.conv2d: (8x192x1x1xf32) <- (8x192x1x1xf32, 192x192x1x1xf32) - conv2d_15 = paddle._C_ops.conv2d( - pool2d_2, parameter_8, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_8 - - # pd_op.reshape: (1x192x1x1xf32) <- (192xf32, 4xi64) - reshape_16 = paddle._C_ops.reshape(parameter_7, full_int_array_3) - del parameter_7 - - # pd_op.add: (8x192x1x1xf32) <- (8x192x1x1xf32, 1x192x1x1xf32) - add_13 = paddle._C_ops.add(conv2d_15, reshape_16) - - # pd_op.sigmoid: (8x192x1x1xf32) <- (8x192x1x1xf32) - sigmoid_7 = paddle._C_ops.sigmoid(add_13) - del add_13 - - # pd_op.multiply: (8x192x56x56xf32) <- (8x192x56x56xf32, 8x192x1x1xf32) - multiply_5 = paddle._C_ops.multiply(data_2, sigmoid_7) - del data_2 - - # pd_op.conv2d: (8x192x56x56xf32) <- (8x192x56x56xf32, 192x192x1x1xf32) - conv2d_16 = paddle._C_ops.conv2d( - multiply_5, parameter_6, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_6 - - # pd_op.batch_norm_: (8x192x56x56xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (8x192x56x56xf32, 192xf32, 192xf32, 192xf32, 192xf32) - ( - batch_norm__30, - batch_norm__31, - batch_norm__32, - batch_norm__33, - batch_norm__34, - batch_norm__35, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_16, - parameter_5, - parameter_4, - parameter_3, - parameter_2, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_2, parameter_3, parameter_4, parameter_5 - - # pd_op.swish: (8x192x56x56xf32) <- (8x192x56x56xf32) - swish_5 = paddle._C_ops.swish(batch_norm__30) - - # pd_op.conv2d: (8x68x56x56xf32) <- (8x192x56x56xf32, 68x192x3x3xf32) - conv2d_17 = paddle._C_ops.conv2d( - swish_5, parameter_1, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_1 - - # pd_op.reshape: (1x68x1x1xf32) <- (68xf32, 4xi64) - reshape_17 = paddle._C_ops.reshape(parameter_0, full_int_array_3) - del full_int_array_3, parameter_0 - - # pd_op.add: (8x68x56x56xf32) <- (8x68x56x56xf32, 1x68x1x1xf32) - add_14 = paddle._C_ops.add(conv2d_17, reshape_17) - - # pd_op.sigmoid: (8x4x56x56xf32) <- (8x4x56x56xf32) - sigmoid_8 = paddle._C_ops.sigmoid(add_12) - del add_12 - - # pd_op.flatten: (8x4x3136xf32) <- (8x4x56x56xf32) - flatten_4 = paddle._C_ops.flatten(sigmoid_8, 2, 3) - - # pd_op.transpose: (8x3136x4xf32) <- (8x4x3136xf32) - transpose_4 = paddle._C_ops.transpose(flatten_4, [0, 2, 1]) - del flatten_4 - - # pd_op.flatten: (8x68x3136xf32) <- (8x68x56x56xf32) - flatten_5 = paddle._C_ops.flatten(add_14, 2, 3) - - # pd_op.transpose: (8x3136x68xf32) <- (8x68x3136xf32) - transpose_5 = paddle._C_ops.transpose(flatten_5, [0, 2, 1]) - del flatten_5 - - # pd_op.full: (1xi32) <- () - full_13 = paddle._C_ops.full( - [1], float("1"), paddle.int32, paddle.core.CPUPlace() - ) - - # pd_op.assign: (1xi32) <- (1xi32) - assign_2 = full_13 - - # builtin.combine: ([8x196x4xf32, 8x784x4xf32, 8x3136x4xf32]) <- (8x196x4xf32, 8x784x4xf32, 8x3136x4xf32) - combine_12 = [transpose_0, transpose_2, transpose_4] - - # pd_op.concat: (8x4116x4xf32) <- ([8x196x4xf32, 8x784x4xf32, 8x3136x4xf32], 1xi32) - concat_0 = paddle._C_ops.concat(combine_12, full_13) - del combine_12 - - # builtin.combine: ([8x196x68xf32, 8x784x68xf32, 8x3136x68xf32]) <- (8x196x68xf32, 8x784x68xf32, 8x3136x68xf32) - combine_13 = [transpose_1, transpose_3, transpose_5] - - # pd_op.concat: (8x4116x68xf32) <- ([8x196x68xf32, 8x784x68xf32, 8x3136x68xf32], 1xi32) - concat_1 = paddle._C_ops.concat(combine_13, full_13) - del ( - add_1, - add_11, - add_14, - add_4, - add_6, - add_9, - assign_0, - assign_1, - assign_2, - batch_norm__0, - batch_norm__1, - batch_norm__10, - batch_norm__11, - batch_norm__12, - batch_norm__13, - batch_norm__14, - batch_norm__15, - batch_norm__16, - batch_norm__17, - batch_norm__18, - batch_norm__19, - batch_norm__2, - batch_norm__20, - batch_norm__21, - batch_norm__22, - batch_norm__23, - batch_norm__24, - batch_norm__25, - batch_norm__26, - batch_norm__27, - batch_norm__28, - batch_norm__29, - batch_norm__3, - batch_norm__30, - batch_norm__31, - batch_norm__32, - batch_norm__33, - batch_norm__34, - batch_norm__35, - batch_norm__4, - batch_norm__5, - batch_norm__6, - batch_norm__7, - batch_norm__8, - batch_norm__9, - combine_13, - conv2d_0, - conv2d_1, - conv2d_10, - conv2d_11, - conv2d_12, - conv2d_13, - conv2d_14, - conv2d_15, - conv2d_16, - conv2d_17, - conv2d_2, - conv2d_3, - conv2d_4, - conv2d_5, - conv2d_6, - conv2d_7, - conv2d_8, - conv2d_9, - full_13, - full_int_array_2, - multiply_0, - multiply_1, - multiply_2, - multiply_3, - multiply_4, - multiply_5, - pool2d_0, - pool2d_1, - pool2d_2, - reshape_0, - reshape_10, - reshape_11, - reshape_12, - reshape_13, - reshape_14, - reshape_15, - reshape_16, - reshape_17, - reshape_2, - reshape_4, - reshape_6, - reshape_7, - reshape_8, - reshape_9, - sigmoid_0, - sigmoid_1, - sigmoid_2, - sigmoid_3, - sigmoid_4, - sigmoid_5, - sigmoid_6, - sigmoid_7, - sigmoid_8, - swish_0, - swish_1, - swish_2, - swish_3, - swish_4, - swish_5, - transpose_0, - transpose_1, - transpose_2, - transpose_3, - transpose_4, - transpose_5, - ) - - return concat_0, concat_1, concat_2, concat_3, concat_4 diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus-M/subgraph_10/graph_hash.txt b/paddle_samples/PaddleX/PP-YOLOE_plus-M/subgraph_10/graph_hash.txt deleted file mode 100644 index 896fa94fd..000000000 --- a/paddle_samples/PaddleX/PP-YOLOE_plus-M/subgraph_10/graph_hash.txt +++ /dev/null @@ -1 +0,0 @@ -66b8a266cc256b5299b432a3f1cf5582a5f9a5321ba1505da3c19b3bc24f5624 \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus-M/subgraph_10/input_meta.py b/paddle_samples/PaddleX/PP-YOLOE_plus-M/subgraph_10/input_meta.py deleted file mode 100644 index 472007b37..000000000 --- a/paddle_samples/PaddleX/PP-YOLOE_plus-M/subgraph_10/input_meta.py +++ /dev/null @@ -1,38 +0,0 @@ -class Program_weight_tensor_data_0: - name = "data_0" - shape = [2, 8400, 4] - dtype = "float32" - min_val = float("0.0746284") - max_val = float("15.1421") - mean = float("6.30923") - std = float("2.75071") - data = None - - -class Program_weight_tensor_data_1: - name = "data_1" - shape = [8400, 2] - dtype = "float32" - min_val = float("0.5") - max_val = float("79.5") - mean = float("34.7619") - std = float("22.9098") - data = None - - -class Program_weight_tensor_data_2: - name = "data_2" - shape = [8400, 1] - dtype = "float32" - min_val = float("8.0") - max_val = float("32.0") - mean = float("10.6667") - std = float("5.70157") - data = None - - -class Program_weight_tensor_data_3: - name = "data_3" - shape = [2, 2] - dtype = "float32" - data = [1.6, 2.397, 2.64463, 1.6] diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus-M/subgraph_10/model.py b/paddle_samples/PaddleX/PP-YOLOE_plus-M/subgraph_10/model.py deleted file mode 100644 index 561c0c35b..000000000 --- a/paddle_samples/PaddleX/PP-YOLOE_plus-M/subgraph_10/model.py +++ /dev/null @@ -1,94 +0,0 @@ -import paddle - - -class GraphModule(paddle.nn.Layer): - def __init__(self): - super().__init__() - - def forward(self, data_0, data_1, data_2, data_3): - # pd_op.full: (1xi32) <- () - full_0 = paddle._C_ops.full( - [1], float("2"), paddle.int32, paddle.core.CPUPlace() - ) - - # pd_op.split_with_num: ([2x8400x2xf32, 2x8400x2xf32]) <- (2x8400x4xf32, 1xi32) - split_with_num_0 = paddle._C_ops.split_with_num(data_0, 2, full_0) - del data_0, full_0 - - # builtin.split: (2x8400x2xf32, 2x8400x2xf32) <- ([2x8400x2xf32, 2x8400x2xf32]) - ( - split_0, - split_1, - ) = split_with_num_0 - del split_with_num_0 - - # pd_op.full: (1xf32) <- () - full_1 = paddle._C_ops.full( - [1], float("-1"), paddle.float32, paddle.core.CPUPlace() - ) - - # pd_op.scale: (2x8400x2xf32) <- (2x8400x2xf32, 1xf32) - scale_0 = paddle._C_ops.scale(split_0, full_1, float("0"), True) - del full_1, split_0 - - # pd_op.add: (2x8400x2xf32) <- (2x8400x2xf32, 8400x2xf32) - add_0 = paddle._C_ops.add(scale_0, data_1) - del scale_0 - - # pd_op.add: (2x8400x2xf32) <- (2x8400x2xf32, 8400x2xf32) - add_1 = paddle._C_ops.add(split_1, data_1) - del data_1, split_1 - - # pd_op.full: (1xi32) <- () - full_2 = paddle._C_ops.full( - [1], float("-1"), paddle.int32, paddle.core.CPUPlace() - ) - - # builtin.combine: ([2x8400x2xf32, 2x8400x2xf32]) <- (2x8400x2xf32, 2x8400x2xf32) - combine_0 = [add_0, add_1] - del add_0, add_1 - - # pd_op.concat: (2x8400x4xf32) <- ([2x8400x2xf32, 2x8400x2xf32], 1xi32) - concat_0 = paddle._C_ops.concat(combine_0, full_2) - del combine_0 - - # pd_op.multiply: (2x8400x4xf32) <- (2x8400x4xf32, 8400x1xf32) - multiply_0 = paddle._C_ops.multiply(concat_0, data_2) - del concat_0, data_2 - - # pd_op.full: (1xi32) <- () - full_3 = paddle._C_ops.full( - [1], float("1"), paddle.int32, paddle.core.CPUPlace() - ) - - # pd_op.split_with_num: ([2x1xf32, 2x1xf32]) <- (2x2xf32, 1xi32) - split_with_num_1 = paddle._C_ops.split_with_num(data_3, 2, full_3) - del data_3, full_3 - - # builtin.split: (2x1xf32, 2x1xf32) <- ([2x1xf32, 2x1xf32]) - ( - split_2, - split_3, - ) = split_with_num_1 - del split_with_num_1 - - # builtin.combine: ([2x1xf32, 2x1xf32, 2x1xf32, 2x1xf32]) <- (2x1xf32, 2x1xf32, 2x1xf32, 2x1xf32) - combine_1 = [split_3, split_2, split_3, split_2] - del split_2, split_3 - - # pd_op.concat: (2x4xf32) <- ([2x1xf32, 2x1xf32, 2x1xf32, 2x1xf32], 1xi32) - concat_1 = paddle._C_ops.concat(combine_1, full_2) - del combine_1, full_2 - - # pd_op.full_int_array: (3xi64) <- () - full_int_array_0 = [-1, 1, 4] - - # pd_op.reshape: (2x1x4xf32) <- (2x4xf32, 3xi64) - reshape_0 = paddle._C_ops.reshape(concat_1, full_int_array_0) - del concat_1, full_int_array_0 - - # pd_op.divide: (2x8400x4xf32) <- (2x8400x4xf32, 2x1x4xf32) - divide_0 = paddle._C_ops.divide(multiply_0, reshape_0) - del multiply_0, reshape_0 - - return divide_0 diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus-M/subgraph_10/weight_meta.py b/paddle_samples/PaddleX/PP-YOLOE_plus-M/subgraph_10/weight_meta.py deleted file mode 100644 index 8b1378917..000000000 --- a/paddle_samples/PaddleX/PP-YOLOE_plus-M/subgraph_10/weight_meta.py +++ /dev/null @@ -1 +0,0 @@ - diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus-M/subgraph_11/graph_hash.txt b/paddle_samples/PaddleX/PP-YOLOE_plus-M/subgraph_11/graph_hash.txt deleted file mode 100644 index 0eb7b765d..000000000 --- a/paddle_samples/PaddleX/PP-YOLOE_plus-M/subgraph_11/graph_hash.txt +++ /dev/null @@ -1 +0,0 @@ -87f1c9d15791927678923354dcfc589bd225484f29e377f70153217772641dce \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus-M/subgraph_11/graph_net.json b/paddle_samples/PaddleX/PP-YOLOE_plus-M/subgraph_11/graph_net.json deleted file mode 100644 index 1c6cb32da..000000000 --- a/paddle_samples/PaddleX/PP-YOLOE_plus-M/subgraph_11/graph_net.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "framework": "paddle", - "model_name": "PP-YOLOE_plus-M", - "num_devices_required": 1, - "num_nodes_required": 1 -} \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus-M/subgraph_11/input_meta.py b/paddle_samples/PaddleX/PP-YOLOE_plus-M/subgraph_11/input_meta.py deleted file mode 100644 index 880246b79..000000000 --- a/paddle_samples/PaddleX/PP-YOLOE_plus-M/subgraph_11/input_meta.py +++ /dev/null @@ -1,76 +0,0 @@ -class Program_weight_tensor_data_0: - name = "data_0" - shape = [8, 1, 10164] - dtype = "float32" - max_val = float("1.0") - mean = float("0.000676407") - std = float("0.025999") - data = None - - -class Program_weight_tensor_data_1: - name = "data_1" - shape = [8, 1, 1] - dtype = "int32" - data = [0, 0, 0, 0, 0, 0, 0, 3] - - -class Program_weight_tensor_data_2: - name = "data_2" - shape = [8, 10164] - dtype = "float32" - max_val = float("1.0") - mean = float("0.000676407") - std = float("0.025999") - data = None - - -class Program_weight_tensor_data_3: - name = "data_3" - shape = [8, 1, 4] - dtype = "float32" - data = [ - 174.715, - 140.8, - 405.956, - 457.6, - 375.985, - 345.193, - 411.639, - 372.906, - 317.49, - 292.0, - 450.008, - 388.0, - 287.439, - 452.211, - 340.211, - 490.947, - 352.0, - 296.267, - 584.17, - 384.267, - 222.933, - 194.723, - 332.444, - 275.609, - 80.8974, - 117.694, - 116.531, - 143.688, - 124.847, - 201.813, - 433.498, - 633.6, - ] - - -class Program_weight_tensor_data_4: - name = "data_4" - shape = [8, 10164, 4] - dtype = "float32" - min_val = float("-271.994") - max_val = float("993.136") - mean = float("352.517") - std = float("213.87") - data = None diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus-M/subgraph_11/model.py b/paddle_samples/PaddleX/PP-YOLOE_plus-M/subgraph_11/model.py deleted file mode 100644 index b0b1964b8..000000000 --- a/paddle_samples/PaddleX/PP-YOLOE_plus-M/subgraph_11/model.py +++ /dev/null @@ -1,287 +0,0 @@ -import paddle - - -class GraphModule(paddle.nn.Layer): - def __init__(self): - super().__init__() - - def forward(self, data_0, data_1, data_2, data_3, data_4): - # pd_op.full: (1xi64) <- () - full_0 = paddle._C_ops.full( - [1], float("-2"), paddle.int64, paddle.core.CPUPlace() - ) - - # pd_op.argmax: (8x10164xi64) <- (8x1x10164xf32, 1xi64) - argmax_0 = paddle._C_ops.argmax(data_0, full_0, False, False, paddle.int64) - del full_0 - - # pd_op.full: (1xf64) <- () - full_1 = paddle._C_ops.full( - [1], float("0"), paddle.float64, paddle.core.CPUPlace() - ) - - # pd_op.full: (1xf64) <- () - full_2 = paddle._C_ops.full( - [1], float("8"), paddle.float64, paddle.core.CPUPlace() - ) - - # pd_op.full: (1xf64) <- () - full_3 = paddle._C_ops.full( - [1], float("1"), paddle.float64, paddle.core.CPUPlace() - ) - - # pd_op.arange: (8xi32) <- (1xf64, 1xf64, 1xf64) - arange_0 = paddle.arange(full_1, full_2, full_3, dtype="int32") - del full_1, full_2, full_3 - - # pd_op.full_int_array: (1xi64) <- () - full_int_array_0 = [-1] - - # pd_op.unsqueeze: (8x1xi32) <- (8xi32, 1xi64) - unsqueeze_0 = paddle._C_ops.unsqueeze(arange_0, full_int_array_0) - del arange_0 - - # pd_op.full: (1xf32) <- () - full_4 = paddle._C_ops.full( - [1], float("1"), paddle.float32, paddle.core.CPUPlace() - ) - - # pd_op.scale: (8x1xi32) <- (8x1xi32, 1xf32) - scale_0 = paddle._C_ops.scale(unsqueeze_0, full_4, float("0"), True) - del unsqueeze_0 - - # pd_op.cast: (8x1xi64) <- (8x1xi32) - cast_0 = paddle._C_ops.cast(scale_0, paddle.int64) - del scale_0 - - # pd_op.add: (8x10164xi64) <- (8x10164xi64, 8x1xi64) - add_0 = paddle._C_ops.add(argmax_0, cast_0) - del argmax_0, cast_0 - - # pd_op.flatten: (8xi32) <- (8x1x1xi32) - flatten_0 = paddle._C_ops.flatten(data_1, 0, 2) - del data_1 - - # pd_op.flatten: (81312xi64) <- (8x10164xi64) - flatten_1 = paddle._C_ops.flatten(add_0, 0, 1) - del add_0 - - # pd_op.full: (1xi32) <- () - full_5 = paddle._C_ops.full( - [1], float("0"), paddle.int32, paddle.core.CPUPlace() - ) - - # pd_op.gather: (81312xi32) <- (8xi32, 81312xi64, 1xi32) - gather_0 = paddle._C_ops.gather(flatten_0, flatten_1, full_5) - del flatten_0 - - # pd_op.full_int_array: (2xi64) <- () - full_int_array_1 = [8, 10164] - - # pd_op.reshape: (8x10164xi32) <- (81312xi32, 2xi64) - reshape_1 = paddle._C_ops.reshape(gather_0, full_int_array_1) - del full_int_array_1, gather_0 - - # pd_op.full: (xf32) <- () - full_6 = paddle._C_ops.full( - [], float("0"), paddle.float32, paddle.framework._current_expected_place() - ) - - # pd_op.greater_than: (8x10164xb) <- (8x10164xf32, xf32) - greater_than_0 = paddle._C_ops.greater_than(data_2, full_6) - del data_2, full_6 - - # pd_op.full: (1xf32) <- () - full_7 = paddle._C_ops.full( - [1], float("4"), paddle.float32, paddle.core.CPUPlace() - ) - - # pd_op.full_like: (8x10164xi32) <- (8x10164xi32, 1xf32) - full_like_0 = paddle._C_ops.full_like( - reshape_1, full_7, paddle.int32, paddle.framework._current_expected_place() - ) - del full_7 - - # pd_op.where: (8x10164xi32) <- (8x10164xb, 8x10164xi32, 8x10164xi32) - where_0 = paddle._C_ops.where(greater_than_0, reshape_1, full_like_0) - del full_like_0, greater_than_0, reshape_1 - - # pd_op.full_int_array: (2xi64) <- () - full_int_array_2 = [-1, 4] - - # pd_op.reshape: (8x4xf32) <- (8x1x4xf32, 2xi64) - reshape_2 = paddle._C_ops.reshape(data_3, full_int_array_2) - del full_int_array_2 - - # pd_op.gather: (81312x4xf32) <- (8x4xf32, 81312xi64, 1xi32) - gather_1 = paddle._C_ops.gather(reshape_2, flatten_1, full_5) - del flatten_1, full_5, reshape_2 - - # pd_op.full_int_array: (3xi64) <- () - full_int_array_3 = [8, 10164, 4] - - # pd_op.reshape: (8x10164x4xf32) <- (81312x4xf32, 3xi64) - reshape_0 = paddle._C_ops.reshape(gather_1, full_int_array_3) - del full_int_array_3, gather_1 - - # pd_op.full: (1xi32) <- () - full_8 = paddle._C_ops.full( - [1], float("5"), paddle.int32, paddle.core.CPUPlace() - ) - - # pd_op.one_hot: (8x10164x5xf32) <- (8x10164xi32, 1xi32) - one_hot_0 = paddle._C_ops.one_hot( - where_0 % paddle.cast(full_8, where_0.dtype), full_8 - ) - del full_8 - - # pd_op.full: (4xi64) <- () - full_9 = paddle._C_ops.full( - [4], float("0"), paddle.int64, paddle.framework._current_expected_place() - ) - - # pd_op.assign_value_: (4xi64) <- (4xi64) - assign_value__0 = paddle._C_ops.assign_value_( - full_9, - [4], - paddle.int64, - [float("0"), float("1"), float("2"), float("3")], - paddle.framework._current_expected_place(), - ) - del full_9 - - # pd_op.index_select: (8x10164x4xf32) <- (8x10164x5xf32, 4xi64) - index_select_0 = paddle._C_ops.index_select(one_hot_0, assign_value__0, -1) - del assign_value__0, one_hot_0 - - # pd_op.full_int_array: (1xi64) <- () - full_int_array_4 = [2] - - # pd_op.unsqueeze: (8x1x1x4xf32) <- (8x1x4xf32, 1xi64) - unsqueeze_1 = paddle._C_ops.unsqueeze(data_3, full_int_array_4) - del data_3 - - # pd_op.full_int_array: (1xi64) <- () - full_int_array_5 = [1] - - # pd_op.unsqueeze: (8x1x10164x4xf32) <- (8x10164x4xf32, 1xi64) - unsqueeze_2 = paddle._C_ops.unsqueeze(data_4, full_int_array_5) - del data_4, full_int_array_5 - - # pd_op.full_int_array: (1xi64) <- () - full_int_array_6 = [0] - - # pd_op.slice: (8x1x1x2xf32) <- (8x1x1x4xf32, 1xi64, 1xi64) - slice_0 = paddle._C_ops.slice( - unsqueeze_1, [3], full_int_array_6, full_int_array_4, [1], [] - ) - - # pd_op.full_int_array: (1xi64) <- () - full_int_array_7 = [2147483647] - - # pd_op.slice: (8x1x1x2xf32) <- (8x1x1x4xf32, 1xi64, 1xi64) - slice_1 = paddle._C_ops.slice( - unsqueeze_1, [3], full_int_array_4, full_int_array_7, [1], [] - ) - del unsqueeze_1 - - # pd_op.slice: (8x1x10164x2xf32) <- (8x1x10164x4xf32, 1xi64, 1xi64) - slice_2 = paddle._C_ops.slice( - unsqueeze_2, [3], full_int_array_6, full_int_array_4, [1], [] - ) - del full_int_array_6 - - # pd_op.slice: (8x1x10164x2xf32) <- (8x1x10164x4xf32, 1xi64, 1xi64) - slice_3 = paddle._C_ops.slice( - unsqueeze_2, [3], full_int_array_4, full_int_array_7, [1], [] - ) - del full_int_array_4, full_int_array_7, unsqueeze_2 - - # pd_op.maximum: (8x1x10164x2xf32) <- (8x1x1x2xf32, 8x1x10164x2xf32) - maximum_0 = paddle._C_ops.maximum(slice_0, slice_2) - - # pd_op.minimum: (8x1x10164x2xf32) <- (8x1x1x2xf32, 8x1x10164x2xf32) - minimum_0 = paddle._C_ops.minimum(slice_1, slice_3) - - # pd_op.subtract: (8x1x10164x2xf32) <- (8x1x10164x2xf32, 8x1x10164x2xf32) - subtract_0 = paddle._C_ops.subtract(minimum_0, maximum_0) - del maximum_0, minimum_0 - - # pd_op.full: (1xf32) <- () - full_10 = paddle._C_ops.full( - [1], float("0"), paddle.float32, paddle.core.CPUPlace() - ) - - # pd_op.full: (1xf32) <- () - full_11 = paddle._C_ops.full( - [1], float("3.40282e+38"), paddle.float32, paddle.core.CPUPlace() - ) - - # pd_op.clip: (8x1x10164x2xf32) <- (8x1x10164x2xf32, 1xf32, 1xf32) - clip_0 = paddle._C_ops.clip(subtract_0, full_10, full_11) - del subtract_0 - - # pd_op.prod: (8x1x10164xf32) <- (8x1x10164x2xf32, 1xi64) - prod_0 = paddle._C_ops.prod(clip_0, full_int_array_0, False, False) - del clip_0 - - # pd_op.subtract: (8x1x1x2xf32) <- (8x1x1x2xf32, 8x1x1x2xf32) - subtract_1 = paddle._C_ops.subtract(slice_1, slice_0) - del slice_0, slice_1 - - # pd_op.clip: (8x1x1x2xf32) <- (8x1x1x2xf32, 1xf32, 1xf32) - clip_1 = paddle._C_ops.clip(subtract_1, full_10, full_11) - del subtract_1 - - # pd_op.prod: (8x1x1xf32) <- (8x1x1x2xf32, 1xi64) - prod_1 = paddle._C_ops.prod(clip_1, full_int_array_0, False, False) - del clip_1 - - # pd_op.subtract: (8x1x10164x2xf32) <- (8x1x10164x2xf32, 8x1x10164x2xf32) - subtract_2 = paddle._C_ops.subtract(slice_3, slice_2) - del slice_2, slice_3 - - # pd_op.clip: (8x1x10164x2xf32) <- (8x1x10164x2xf32, 1xf32, 1xf32) - clip_2 = paddle._C_ops.clip(subtract_2, full_10, full_11) - del full_10, full_11, subtract_2 - - # pd_op.prod: (8x1x10164xf32) <- (8x1x10164x2xf32, 1xi64) - prod_2 = paddle._C_ops.prod(clip_2, full_int_array_0, False, False) - del clip_2 - - # pd_op.add: (8x1x10164xf32) <- (8x1x1xf32, 8x1x10164xf32) - add_1 = paddle._C_ops.add(prod_1, prod_2) - del prod_1, prod_2 - - # pd_op.subtract: (8x1x10164xf32) <- (8x1x10164xf32, 8x1x10164xf32) - subtract_3 = paddle._C_ops.subtract(add_1, prod_0) - del add_1 - - # pd_op.scale: (8x1x10164xf32) <- (8x1x10164xf32, 1xf32) - scale_1 = paddle._C_ops.scale(subtract_3, full_4, float("1e-09"), True) - del full_4, subtract_3 - - # pd_op.divide: (8x1x10164xf32) <- (8x1x10164xf32, 8x1x10164xf32) - divide_0 = paddle._C_ops.divide(prod_0, scale_1) - del prod_0, scale_1 - - # pd_op.multiply: (8x1x10164xf32) <- (8x1x10164xf32, 8x1x10164xf32) - multiply_1 = paddle._C_ops.multiply(divide_0, data_0) - del data_0, divide_0 - - # pd_op.full_int_array: (1xi64) <- () - full_int_array_8 = [-2] - - # pd_op.max: (8x10164xf32) <- (8x1x10164xf32, 1xi64) - max_0 = paddle._C_ops.max(multiply_1, full_int_array_8, False) - del full_int_array_8, multiply_1 - - # pd_op.unsqueeze: (8x10164x1xf32) <- (8x10164xf32, 1xi64) - unsqueeze_3 = paddle._C_ops.unsqueeze(max_0, full_int_array_0) - del full_int_array_0, max_0 - - # pd_op.multiply: (8x10164x4xf32) <- (8x10164x4xf32, 8x10164x1xf32) - multiply_0 = paddle._C_ops.multiply(index_select_0, unsqueeze_3) - del index_select_0, unsqueeze_3, where_0 - - return reshape_0, multiply_0 diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus-M/subgraph_11/weight_meta.py b/paddle_samples/PaddleX/PP-YOLOE_plus-M/subgraph_11/weight_meta.py deleted file mode 100644 index 8b1378917..000000000 --- a/paddle_samples/PaddleX/PP-YOLOE_plus-M/subgraph_11/weight_meta.py +++ /dev/null @@ -1 +0,0 @@ - diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus-M/subgraph_13/graph_hash.txt b/paddle_samples/PaddleX/PP-YOLOE_plus-M/subgraph_13/graph_hash.txt deleted file mode 100644 index 82d83ca0b..000000000 --- a/paddle_samples/PaddleX/PP-YOLOE_plus-M/subgraph_13/graph_hash.txt +++ /dev/null @@ -1 +0,0 @@ -2e1652dec5c10dbfbb766b39e6dd1a2a376f1e03061d76cd66a79bf79d0616f3 \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus-M/subgraph_13/graph_net.json b/paddle_samples/PaddleX/PP-YOLOE_plus-M/subgraph_13/graph_net.json deleted file mode 100644 index 1c6cb32da..000000000 --- a/paddle_samples/PaddleX/PP-YOLOE_plus-M/subgraph_13/graph_net.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "framework": "paddle", - "model_name": "PP-YOLOE_plus-M", - "num_devices_required": 1, - "num_nodes_required": 1 -} \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus-M/subgraph_13/input_meta.py b/paddle_samples/PaddleX/PP-YOLOE_plus-M/subgraph_13/input_meta.py deleted file mode 100644 index fb1f72140..000000000 --- a/paddle_samples/PaddleX/PP-YOLOE_plus-M/subgraph_13/input_meta.py +++ /dev/null @@ -1,19 +0,0 @@ -class Program_weight_tensor_data_0: - name = "data_0" - shape = [] - dtype = "float32" - data = [0.136167] - - -class Program_weight_tensor_data_1: - name = "data_1" - shape = [] - dtype = "float32" - data = [0.78639] - - -class Program_weight_tensor_data_2: - name = "data_2" - shape = [] - dtype = "float32" - data = [3.99608] diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus-M/subgraph_13/model.py b/paddle_samples/PaddleX/PP-YOLOE_plus-M/subgraph_13/model.py deleted file mode 100644 index 4cccb2b8e..000000000 --- a/paddle_samples/PaddleX/PP-YOLOE_plus-M/subgraph_13/model.py +++ /dev/null @@ -1,43 +0,0 @@ -import paddle - - -class GraphModule(paddle.nn.Layer): - def __init__(self): - super().__init__() - - def forward(self, data_0, data_1, data_2): - # pd_op.full: (1xf32) <- () - full_0 = paddle._C_ops.full( - [1], float("1"), paddle.float32, paddle.core.CPUPlace() - ) - - # pd_op.scale: (xf32) <- (xf32, 1xf32) - scale_0 = paddle._C_ops.scale(data_2, full_0, float("0"), True) - del data_2 - - # pd_op.full: (1xf32) <- () - full_1 = paddle._C_ops.full( - [1], float("2.5"), paddle.float32, paddle.core.CPUPlace() - ) - - # pd_op.scale: (xf32) <- (xf32, 1xf32) - scale_1 = paddle._C_ops.scale(data_0, full_1, float("0"), True) - del data_0 - - # pd_op.add: (xf32) <- (xf32, xf32) - add_1 = paddle._C_ops.add(scale_0, scale_1) - - # pd_op.full: (1xf32) <- () - full_2 = paddle._C_ops.full( - [1], float("0.5"), paddle.float32, paddle.core.CPUPlace() - ) - - # pd_op.scale: (xf32) <- (xf32, 1xf32) - scale_2 = paddle._C_ops.scale(data_1, full_2, float("0"), True) - del data_1 - - # pd_op.add: (xf32) <- (xf32, xf32) - add_0 = paddle._C_ops.add(add_1, scale_2) - del add_1, full_0, full_1, full_2, scale_0, scale_1, scale_2 - - return add_0 diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus-M/subgraph_13/weight_meta.py b/paddle_samples/PaddleX/PP-YOLOE_plus-M/subgraph_13/weight_meta.py deleted file mode 100644 index 8b1378917..000000000 --- a/paddle_samples/PaddleX/PP-YOLOE_plus-M/subgraph_13/weight_meta.py +++ /dev/null @@ -1 +0,0 @@ - diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus-M/subgraph_14/graph_hash.txt b/paddle_samples/PaddleX/PP-YOLOE_plus-M/subgraph_14/graph_hash.txt deleted file mode 100644 index d15259c49..000000000 --- a/paddle_samples/PaddleX/PP-YOLOE_plus-M/subgraph_14/graph_hash.txt +++ /dev/null @@ -1 +0,0 @@ -a6658fa2fb342f963e549f08c1b73729d662eba241da80438af7d5534dbd6003 \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus-M/subgraph_14/graph_net.json b/paddle_samples/PaddleX/PP-YOLOE_plus-M/subgraph_14/graph_net.json deleted file mode 100644 index 1c6cb32da..000000000 --- a/paddle_samples/PaddleX/PP-YOLOE_plus-M/subgraph_14/graph_net.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "framework": "paddle", - "model_name": "PP-YOLOE_plus-M", - "num_devices_required": 1, - "num_nodes_required": 1 -} \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus-M/subgraph_14/model.py b/paddle_samples/PaddleX/PP-YOLOE_plus-M/subgraph_14/model.py deleted file mode 100644 index 51b356c21..000000000 --- a/paddle_samples/PaddleX/PP-YOLOE_plus-M/subgraph_14/model.py +++ /dev/null @@ -1,247 +0,0 @@ -import paddle - - -class GraphModule(paddle.nn.Layer): - def __init__(self): - super().__init__() - - def forward(self, data_0, data_1, data_2, data_3, data_4, data_5): - # pd_op.multiply: (8x1x10164xf32) <- (8x1x10164xf32, 8x1x10164xf32) - multiply_0 = paddle._C_ops.multiply(data_2, data_0) - del data_2 - - # pd_op.flatten: (8x10164xf32) <- (8x1x10164xf32) - flatten_0 = paddle._C_ops.flatten(multiply_0, 0, 1) - - # pd_op.flatten: (8x27xi64) <- (8x1x27xi64) - flatten_1 = paddle._C_ops.flatten(data_1, 0, 1) - del data_1 - - # pd_op.index_sample: (8x27xf32) <- (8x10164xf32, 8x27xi64) - index_sample_0 = paddle._C_ops.index_sample(flatten_0, flatten_1) - del flatten_0, flatten_1 - - # pd_op.full_int_array: (3xi64) <- () - full_int_array_0 = [8, 1, -1] - - # pd_op.reshape: (8x1x27xf32) <- (8x27xf32, 3xi64) - reshape_0 = paddle._C_ops.reshape(index_sample_0, full_int_array_0) - del full_int_array_0, index_sample_0 - - # pd_op.full_int_array: (1xi64) <- () - full_int_array_1 = [-1] - - # pd_op.mean: (8x1x1xf32) <- (8x1x27xf32, 1xi64) - mean_0 = paddle._C_ops.mean(reshape_0, full_int_array_1, True) - - # pd_op.subtract: (8x1x27xf32) <- (8x1x27xf32, 8x1x1xf32) - subtract_0 = paddle._C_ops.subtract(reshape_0, mean_0) - - # pd_op.pow: (8x1x27xf32) <- (8x1x27xf32) - pow_0 = paddle._C_ops.pow(subtract_0, float("2")) - del subtract_0 - - # pd_op.sum: (8x1x1xf32) <- (8x1x27xf32, 1xi64) - sum_0 = paddle._C_ops.sum(pow_0, full_int_array_1, paddle.float32, True) - del pow_0 - - # pd_op.numel: (xi64) <- (8x1x27xf32) - numel_0 = paddle._C_ops.numel(reshape_0) - del reshape_0 - - # pd_op.cast: (xi64) <- (xi64) - cast_0 = paddle._C_ops.cast(numel_0, paddle.int64) - del numel_0 - - # pd_op.numel: (xi64) <- (8x1x1xf32) - numel_1 = paddle._C_ops.numel(sum_0) - - # pd_op.cast: (xi64) <- (xi64) - cast_1 = paddle._C_ops.cast(numel_1, paddle.int64) - del numel_1 - - # pd_op.cast: (xf32) <- (xi64) - cast_2 = paddle._C_ops.cast(cast_0, paddle.float32) - del cast_0 - - # pd_op.cast: (xf32) <- (xi64) - cast_3 = paddle._C_ops.cast(cast_1, paddle.float32) - del cast_1 - - # pd_op.divide: (xf32) <- (xf32, xf32) - divide_0 = paddle._C_ops.divide(cast_2, cast_3) - del cast_2, cast_3 - - # pd_op.full: (1xf32) <- () - full_0 = paddle._C_ops.full( - [1], float("1"), paddle.float32, paddle.core.CPUPlace() - ) - - # pd_op.scale: (xf32) <- (xf32, 1xf32) - scale_0 = paddle._C_ops.scale(divide_0, full_0, float("-1"), True) - del divide_0, full_0 - - # pd_op.full: (1xf32) <- () - full_1 = paddle._C_ops.full( - [1], float("0"), paddle.float32, paddle.core.CPUPlace() - ) - - # pd_op.full_like: (xf32) <- (xf32, 1xf32) - full_like_0 = paddle._C_ops.full_like( - scale_0, full_1, paddle.float32, paddle.framework._current_expected_place() - ) - - # pd_op.maximum: (xf32) <- (xf32, xf32) - maximum_0 = paddle._C_ops.maximum(scale_0, full_like_0) - del full_like_0, scale_0 - - # pd_op.divide: (8x1x1xf32) <- (8x1x1xf32, xf32) - divide_1 = paddle._C_ops.divide(sum_0, maximum_0) - del maximum_0, sum_0 - - # pd_op.sqrt: (8x1x1xf32) <- (8x1x1xf32) - sqrt_0 = paddle._C_ops.sqrt(divide_1) - del divide_1 - - # pd_op.add: (8x1x1xf32) <- (8x1x1xf32, 8x1x1xf32) - add_0 = paddle._C_ops.add(mean_0, sqrt_0) - del mean_0, sqrt_0 - - # pd_op.greater_than: (8x1x10164xb) <- (8x1x10164xf32, 8x1x1xf32) - greater_than_1 = paddle._C_ops.greater_than(multiply_0, add_0) - del add_0, multiply_0 - - # pd_op.full_like: (8x1x10164xf32) <- (8x1x10164xf32, 1xf32) - full_like_1 = paddle._C_ops.full_like( - data_0, full_1, paddle.float32, paddle.framework._current_expected_place() - ) - del full_1 - - # pd_op.where: (8x1x10164xf32) <- (8x1x10164xb, 8x1x10164xf32, 8x1x10164xf32) - where_0 = paddle._C_ops.where(greater_than_1, data_0, full_like_1) - del data_0, full_like_1, greater_than_1 - - # pd_op.full_int_array: (2xi64) <- () - full_int_array_2 = [0, 1] - - # pd_op.unsqueeze: (1x1x10164x2xf32) <- (10164x2xf32, 2xi64) - unsqueeze_0 = paddle._C_ops.unsqueeze(data_3, full_int_array_2) - del data_3, full_int_array_2 - - # pd_op.full: (1xi32) <- () - full_2 = paddle._C_ops.full( - [1], float("3"), paddle.int32, paddle.core.CPUPlace() - ) - - # pd_op.split_with_num: ([1x1x10164x1xf32, 1x1x10164x1xf32]) <- (1x1x10164x2xf32, 1xi32) - split_with_num_0 = paddle._C_ops.split_with_num(unsqueeze_0, 2, full_2) - del unsqueeze_0 - - # builtin.split: (1x1x10164x1xf32, 1x1x10164x1xf32) <- ([1x1x10164x1xf32, 1x1x10164x1xf32]) - ( - split_0, - split_1, - ) = split_with_num_0 - del split_with_num_0 - - # pd_op.full_int_array: (1xi64) <- () - full_int_array_3 = [2] - - # pd_op.unsqueeze: (8x1x1x4xf32) <- (8x1x4xf32, 1xi64) - unsqueeze_1 = paddle._C_ops.unsqueeze(data_4, full_int_array_3) - del data_4, full_int_array_3 - - # pd_op.split_with_num: ([8x1x1x1xf32, 8x1x1x1xf32, 8x1x1x1xf32, 8x1x1x1xf32]) <- (8x1x1x4xf32, 1xi32) - split_with_num_1 = paddle._C_ops.split_with_num(unsqueeze_1, 4, full_2) - del full_2, unsqueeze_1 - - # builtin.split: (8x1x1x1xf32, 8x1x1x1xf32, 8x1x1x1xf32, 8x1x1x1xf32) <- ([8x1x1x1xf32, 8x1x1x1xf32, 8x1x1x1xf32, 8x1x1x1xf32]) - ( - split_2, - split_3, - split_4, - split_5, - ) = split_with_num_1 - del split_with_num_1 - - # pd_op.subtract: (8x1x10164x1xf32) <- (1x1x10164x1xf32, 8x1x1x1xf32) - subtract_1 = paddle._C_ops.subtract(split_0, split_2) - del split_2 - - # pd_op.subtract: (8x1x10164x1xf32) <- (1x1x10164x1xf32, 8x1x1x1xf32) - subtract_2 = paddle._C_ops.subtract(split_1, split_3) - del split_3 - - # pd_op.subtract: (8x1x10164x1xf32) <- (8x1x1x1xf32, 1x1x10164x1xf32) - subtract_3 = paddle._C_ops.subtract(split_4, split_0) - del split_0, split_4 - - # pd_op.subtract: (8x1x10164x1xf32) <- (8x1x1x1xf32, 1x1x10164x1xf32) - subtract_4 = paddle._C_ops.subtract(split_5, split_1) - del split_1, split_5 - - # pd_op.full: (1xi32) <- () - full_3 = paddle._C_ops.full( - [1], float("-1"), paddle.int32, paddle.core.CPUPlace() - ) - - # builtin.combine: ([8x1x10164x1xf32, 8x1x10164x1xf32, 8x1x10164x1xf32, 8x1x10164x1xf32]) <- (8x1x10164x1xf32, 8x1x10164x1xf32, 8x1x10164x1xf32, 8x1x10164x1xf32) - combine_0 = [subtract_1, subtract_2, subtract_3, subtract_4] - del subtract_1, subtract_2, subtract_3, subtract_4 - - # pd_op.concat: (8x1x10164x4xf32) <- ([8x1x10164x1xf32, 8x1x10164x1xf32, 8x1x10164x1xf32, 8x1x10164x1xf32], 1xi32) - concat_0 = paddle._C_ops.concat(combine_0, full_3) - del combine_0, full_3 - - # pd_op.min: (8x1x10164xf32) <- (8x1x10164x4xf32, 1xi64) - min_0 = paddle._C_ops.min(concat_0, full_int_array_1, False) - del concat_0, full_int_array_1 - - # pd_op.full: (xf32) <- () - full_4 = paddle._C_ops.full( - [], - float("1e-09"), - paddle.float32, - paddle.framework._current_expected_place(), - ) - - # pd_op.greater_than: (8x1x10164xb) <- (8x1x10164xf32, xf32) - greater_than_2 = paddle._C_ops.greater_than(min_0, full_4) - del full_4, min_0 - - # pd_op.cast: (8x1x10164xf32) <- (8x1x10164xb) - cast_4 = paddle._C_ops.cast(greater_than_2, paddle.float32) - del greater_than_2 - - # pd_op.multiply: (8x1x10164xf32) <- (8x1x10164xf32, 8x1x10164xf32) - multiply_1 = paddle._C_ops.multiply(where_0, cast_4) - del cast_4, where_0 - - # pd_op.multiply: (8x1x10164xf32) <- (8x1x10164xf32, 8x1x1xf32) - multiply_2 = paddle._C_ops.multiply(multiply_1, data_5) - del data_5, multiply_1 - - # pd_op.full_int_array: (1xi64) <- () - full_int_array_4 = [-2] - - # pd_op.sum: (8x10164xf32) <- (8x1x10164xf32, 1xi64) - sum_1 = paddle._C_ops.sum(multiply_2, full_int_array_4, None, False) - del full_int_array_4 - - # pd_op.full_int_array: (0xi64) <- () - full_int_array_5 = [] - - # pd_op.max: (xf32) <- (8x10164xf32, 0xi64) - max_0 = paddle._C_ops.max(sum_1, full_int_array_5, False) - del full_int_array_5 - - # pd_op.full: (xf32) <- () - full_5 = paddle._C_ops.full( - [], float("1"), paddle.float32, paddle.framework._current_expected_place() - ) - - # pd_op.greater_than: (xb) <- (xf32, xf32) - greater_than_0 = paddle._C_ops.greater_than(max_0, full_5) - del full_5, max_0, multiply_2, sum_1 - - return greater_than_0 diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus-M/subgraph_14/weight_meta.py b/paddle_samples/PaddleX/PP-YOLOE_plus-M/subgraph_14/weight_meta.py deleted file mode 100644 index 8b1378917..000000000 --- a/paddle_samples/PaddleX/PP-YOLOE_plus-M/subgraph_14/weight_meta.py +++ /dev/null @@ -1 +0,0 @@ - diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus-M/subgraph_15/graph_hash.txt b/paddle_samples/PaddleX/PP-YOLOE_plus-M/subgraph_15/graph_hash.txt deleted file mode 100644 index bcf85f70d..000000000 --- a/paddle_samples/PaddleX/PP-YOLOE_plus-M/subgraph_15/graph_hash.txt +++ /dev/null @@ -1 +0,0 @@ -79d36f8b3b83773115a0eec5c7e5e317486cffc279d107ab5b11f2f7c791fb82 \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus-M/subgraph_15/graph_net.json b/paddle_samples/PaddleX/PP-YOLOE_plus-M/subgraph_15/graph_net.json deleted file mode 100644 index 1c6cb32da..000000000 --- a/paddle_samples/PaddleX/PP-YOLOE_plus-M/subgraph_15/graph_net.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "framework": "paddle", - "model_name": "PP-YOLOE_plus-M", - "num_devices_required": 1, - "num_nodes_required": 1 -} \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus-M/subgraph_15/model.py b/paddle_samples/PaddleX/PP-YOLOE_plus-M/subgraph_15/model.py deleted file mode 100644 index 530823f43..000000000 --- a/paddle_samples/PaddleX/PP-YOLOE_plus-M/subgraph_15/model.py +++ /dev/null @@ -1,5806 +0,0 @@ -import paddle - - -class GraphModule(paddle.nn.Layer): - def __init__(self): - super().__init__() - - def forward( - self, - parameter_0, - parameter_1, - parameter_2, - parameter_3, - parameter_4, - parameter_5, - parameter_6, - parameter_7, - parameter_8, - parameter_9, - parameter_10, - parameter_11, - parameter_12, - parameter_13, - parameter_14, - parameter_15, - parameter_16, - parameter_17, - parameter_18, - parameter_19, - parameter_20, - parameter_21, - parameter_22, - parameter_23, - parameter_24, - parameter_25, - parameter_26, - parameter_27, - parameter_28, - parameter_29, - parameter_30, - parameter_31, - parameter_32, - parameter_33, - parameter_34, - parameter_35, - parameter_36, - parameter_37, - parameter_38, - parameter_39, - parameter_40, - parameter_41, - parameter_42, - parameter_43, - parameter_44, - parameter_45, - parameter_46, - parameter_47, - parameter_48, - parameter_49, - parameter_50, - parameter_51, - parameter_52, - parameter_53, - parameter_54, - parameter_55, - parameter_56, - parameter_57, - parameter_58, - parameter_59, - parameter_60, - parameter_61, - parameter_62, - parameter_63, - parameter_64, - parameter_65, - parameter_66, - parameter_67, - parameter_68, - parameter_69, - parameter_70, - parameter_71, - parameter_72, - parameter_73, - parameter_74, - parameter_75, - parameter_76, - parameter_77, - parameter_78, - parameter_79, - parameter_80, - parameter_81, - parameter_82, - parameter_83, - parameter_84, - parameter_85, - parameter_86, - parameter_87, - parameter_88, - parameter_89, - parameter_90, - parameter_91, - parameter_92, - parameter_93, - parameter_94, - parameter_95, - parameter_96, - parameter_97, - parameter_98, - parameter_99, - parameter_100, - parameter_101, - parameter_102, - parameter_103, - parameter_104, - parameter_105, - parameter_106, - parameter_107, - parameter_108, - parameter_109, - parameter_110, - parameter_111, - parameter_112, - parameter_113, - parameter_114, - parameter_115, - parameter_116, - parameter_117, - parameter_118, - parameter_119, - parameter_120, - parameter_121, - parameter_122, - parameter_123, - parameter_124, - parameter_125, - parameter_126, - parameter_127, - parameter_128, - parameter_129, - parameter_130, - parameter_131, - parameter_132, - parameter_133, - parameter_134, - parameter_135, - parameter_136, - parameter_137, - parameter_138, - parameter_139, - parameter_140, - parameter_141, - parameter_142, - parameter_143, - parameter_144, - parameter_145, - parameter_146, - parameter_147, - parameter_148, - parameter_149, - parameter_150, - parameter_151, - parameter_152, - parameter_153, - parameter_154, - parameter_155, - parameter_156, - parameter_157, - parameter_158, - parameter_159, - parameter_160, - parameter_161, - parameter_162, - parameter_163, - parameter_164, - parameter_165, - parameter_166, - parameter_167, - parameter_168, - parameter_169, - parameter_170, - parameter_171, - parameter_172, - parameter_173, - parameter_174, - parameter_175, - parameter_176, - parameter_177, - parameter_178, - parameter_179, - parameter_180, - parameter_181, - parameter_182, - parameter_183, - parameter_184, - parameter_185, - parameter_186, - parameter_187, - parameter_188, - parameter_189, - parameter_190, - parameter_191, - parameter_192, - parameter_193, - parameter_194, - parameter_195, - parameter_196, - parameter_197, - parameter_198, - parameter_199, - parameter_200, - parameter_201, - parameter_202, - parameter_203, - parameter_204, - parameter_205, - parameter_206, - parameter_207, - parameter_208, - parameter_209, - parameter_210, - parameter_211, - parameter_212, - parameter_213, - parameter_214, - parameter_215, - parameter_216, - parameter_217, - parameter_218, - parameter_219, - parameter_220, - parameter_221, - parameter_222, - parameter_223, - parameter_224, - parameter_225, - parameter_226, - parameter_227, - parameter_228, - parameter_229, - parameter_230, - parameter_231, - parameter_232, - parameter_233, - parameter_234, - parameter_235, - parameter_236, - parameter_237, - parameter_238, - parameter_239, - parameter_240, - parameter_241, - parameter_242, - parameter_243, - parameter_244, - parameter_245, - parameter_246, - parameter_247, - parameter_248, - parameter_249, - parameter_250, - parameter_251, - parameter_252, - parameter_253, - parameter_254, - parameter_255, - parameter_256, - parameter_257, - parameter_258, - parameter_259, - parameter_260, - parameter_261, - parameter_262, - parameter_263, - parameter_264, - parameter_265, - parameter_266, - parameter_267, - parameter_268, - parameter_269, - parameter_270, - parameter_271, - parameter_272, - parameter_273, - parameter_274, - parameter_275, - parameter_276, - parameter_277, - parameter_278, - parameter_279, - parameter_280, - parameter_281, - parameter_282, - parameter_283, - parameter_284, - parameter_285, - parameter_286, - parameter_287, - parameter_288, - parameter_289, - parameter_290, - parameter_291, - parameter_292, - parameter_293, - parameter_294, - parameter_295, - parameter_296, - parameter_297, - parameter_298, - parameter_299, - parameter_300, - parameter_301, - parameter_302, - parameter_303, - parameter_304, - parameter_305, - parameter_306, - parameter_307, - parameter_308, - parameter_309, - parameter_310, - parameter_311, - parameter_312, - parameter_313, - parameter_314, - parameter_315, - parameter_316, - parameter_317, - parameter_318, - parameter_319, - parameter_320, - parameter_321, - parameter_322, - parameter_323, - parameter_324, - parameter_325, - parameter_326, - parameter_327, - parameter_328, - parameter_329, - parameter_330, - parameter_331, - parameter_332, - parameter_333, - parameter_334, - parameter_335, - parameter_336, - parameter_337, - parameter_338, - parameter_339, - parameter_340, - parameter_341, - parameter_342, - parameter_343, - parameter_344, - parameter_345, - parameter_346, - parameter_347, - parameter_348, - parameter_349, - parameter_350, - parameter_351, - parameter_352, - parameter_353, - parameter_354, - parameter_355, - parameter_356, - parameter_357, - parameter_358, - parameter_359, - parameter_360, - parameter_361, - parameter_362, - parameter_363, - parameter_364, - parameter_365, - parameter_366, - parameter_367, - parameter_368, - parameter_369, - parameter_370, - parameter_371, - parameter_372, - parameter_373, - parameter_374, - parameter_375, - parameter_376, - parameter_377, - parameter_378, - parameter_379, - parameter_380, - parameter_381, - parameter_382, - parameter_383, - parameter_384, - parameter_385, - parameter_386, - parameter_387, - parameter_388, - parameter_389, - parameter_390, - parameter_391, - parameter_392, - parameter_393, - parameter_394, - parameter_395, - parameter_396, - parameter_397, - parameter_398, - parameter_399, - parameter_400, - parameter_401, - parameter_402, - parameter_403, - parameter_404, - parameter_405, - parameter_406, - parameter_407, - parameter_408, - parameter_409, - parameter_410, - parameter_411, - parameter_412, - parameter_413, - parameter_414, - parameter_415, - parameter_416, - parameter_417, - parameter_418, - parameter_419, - parameter_420, - parameter_421, - parameter_422, - parameter_423, - parameter_424, - parameter_425, - parameter_426, - parameter_427, - parameter_428, - parameter_429, - parameter_430, - parameter_431, - parameter_432, - parameter_433, - parameter_434, - parameter_435, - parameter_436, - parameter_437, - parameter_438, - parameter_439, - parameter_440, - parameter_441, - parameter_442, - parameter_443, - parameter_444, - parameter_445, - parameter_446, - parameter_447, - parameter_448, - parameter_449, - parameter_450, - parameter_451, - parameter_452, - parameter_453, - parameter_454, - parameter_455, - parameter_456, - parameter_457, - parameter_458, - parameter_459, - parameter_460, - parameter_461, - parameter_462, - parameter_463, - parameter_464, - parameter_465, - parameter_466, - parameter_467, - parameter_468, - parameter_469, - parameter_470, - parameter_471, - parameter_472, - parameter_473, - parameter_474, - parameter_475, - parameter_476, - parameter_477, - parameter_478, - parameter_479, - parameter_480, - parameter_481, - parameter_482, - parameter_483, - parameter_484, - parameter_485, - parameter_486, - parameter_487, - parameter_488, - parameter_489, - parameter_490, - parameter_491, - parameter_492, - parameter_493, - parameter_494, - parameter_495, - parameter_496, - parameter_497, - parameter_498, - parameter_499, - parameter_500, - parameter_501, - parameter_502, - parameter_503, - parameter_504, - parameter_505, - parameter_506, - parameter_507, - parameter_508, - parameter_509, - parameter_510, - parameter_511, - parameter_512, - parameter_513, - parameter_514, - parameter_515, - parameter_516, - parameter_517, - parameter_518, - parameter_519, - parameter_520, - parameter_521, - parameter_522, - parameter_523, - parameter_524, - parameter_525, - parameter_526, - parameter_527, - parameter_528, - parameter_529, - parameter_530, - parameter_531, - parameter_532, - parameter_533, - parameter_534, - parameter_535, - parameter_536, - parameter_537, - parameter_538, - parameter_539, - parameter_540, - parameter_541, - parameter_542, - parameter_543, - parameter_544, - parameter_545, - parameter_546, - parameter_547, - parameter_548, - parameter_549, - parameter_550, - parameter_551, - parameter_552, - parameter_553, - parameter_554, - parameter_555, - parameter_556, - parameter_557, - parameter_558, - parameter_559, - parameter_560, - parameter_561, - parameter_562, - parameter_563, - parameter_564, - parameter_565, - parameter_566, - parameter_567, - parameter_568, - parameter_569, - parameter_570, - parameter_571, - parameter_572, - parameter_573, - parameter_574, - parameter_575, - parameter_576, - parameter_577, - parameter_578, - parameter_579, - parameter_580, - parameter_581, - parameter_582, - parameter_583, - parameter_584, - parameter_585, - parameter_586, - parameter_587, - data_0, - data_1, - data_2, - data_3, - data_4, - data_5, - data_6, - data_7, - data_8, - data_9, - data_10, - data_11, - data_12, - ): - # pd_op.conv2d: (-1x24x-1x-1xf32) <- (-1x3x-1x-1xf32, 24x3x3x3xf32) - conv2d_0 = paddle._C_ops.conv2d( - data_12, parameter_587, [2, 2], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del data_12, parameter_587 - - # pd_op.batch_norm_: (-1x24x-1x-1xf32, 24xf32, 24xf32, 24xf32, 24xf32, -1xui8) <- (-1x24x-1x-1xf32, 24xf32, 24xf32, 24xf32, 24xf32) - ( - batch_norm__0, - batch_norm__1, - batch_norm__2, - batch_norm__3, - batch_norm__4, - batch_norm__5, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_0, - parameter_586, - parameter_585, - parameter_584, - parameter_583, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_0, parameter_583, parameter_584, parameter_585, parameter_586 - - # pd_op.swish: (-1x24x-1x-1xf32) <- (-1x24x-1x-1xf32) - swish_0 = paddle._C_ops.swish(batch_norm__0) - del batch_norm__0 - - # pd_op.conv2d: (-1x24x-1x-1xf32) <- (-1x24x-1x-1xf32, 24x24x3x3xf32) - conv2d_1 = paddle._C_ops.conv2d( - swish_0, parameter_582, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_582, swish_0 - - # pd_op.batch_norm_: (-1x24x-1x-1xf32, 24xf32, 24xf32, 24xf32, 24xf32, -1xui8) <- (-1x24x-1x-1xf32, 24xf32, 24xf32, 24xf32, 24xf32) - ( - batch_norm__6, - batch_norm__7, - batch_norm__8, - batch_norm__9, - batch_norm__10, - batch_norm__11, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_1, - parameter_581, - parameter_580, - parameter_579, - parameter_578, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_1, parameter_578, parameter_579, parameter_580, parameter_581 - - # pd_op.swish: (-1x24x-1x-1xf32) <- (-1x24x-1x-1xf32) - swish_1 = paddle._C_ops.swish(batch_norm__6) - del batch_norm__6 - - # pd_op.conv2d: (-1x48x-1x-1xf32) <- (-1x24x-1x-1xf32, 48x24x3x3xf32) - conv2d_2 = paddle._C_ops.conv2d( - swish_1, parameter_577, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_577, swish_1 - - # pd_op.batch_norm_: (-1x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32, -1xui8) <- (-1x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32) - ( - batch_norm__12, - batch_norm__13, - batch_norm__14, - batch_norm__15, - batch_norm__16, - batch_norm__17, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_2, - parameter_576, - parameter_575, - parameter_574, - parameter_573, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_2, parameter_573, parameter_574, parameter_575, parameter_576 - - # pd_op.swish: (-1x48x-1x-1xf32) <- (-1x48x-1x-1xf32) - swish_2 = paddle._C_ops.swish(batch_norm__12) - del batch_norm__12 - - # pd_op.conv2d: (-1x72x-1x-1xf32) <- (-1x48x-1x-1xf32, 72x48x3x3xf32) - conv2d_3 = paddle._C_ops.conv2d( - swish_2, parameter_572, [2, 2], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_572, swish_2 - - # pd_op.batch_norm_: (-1x72x-1x-1xf32, 72xf32, 72xf32, 72xf32, 72xf32, -1xui8) <- (-1x72x-1x-1xf32, 72xf32, 72xf32, 72xf32, 72xf32) - ( - batch_norm__18, - batch_norm__19, - batch_norm__20, - batch_norm__21, - batch_norm__22, - batch_norm__23, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_3, - parameter_571, - parameter_570, - parameter_569, - parameter_568, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_3, parameter_568, parameter_569, parameter_570, parameter_571 - - # pd_op.swish: (-1x72x-1x-1xf32) <- (-1x72x-1x-1xf32) - swish_3 = paddle._C_ops.swish(batch_norm__18) - del batch_norm__18 - - # pd_op.conv2d: (-1x36x-1x-1xf32) <- (-1x72x-1x-1xf32, 36x72x1x1xf32) - conv2d_4 = paddle._C_ops.conv2d( - swish_3, parameter_567, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_567 - - # pd_op.batch_norm_: (-1x36x-1x-1xf32, 36xf32, 36xf32, 36xf32, 36xf32, -1xui8) <- (-1x36x-1x-1xf32, 36xf32, 36xf32, 36xf32, 36xf32) - ( - batch_norm__24, - batch_norm__25, - batch_norm__26, - batch_norm__27, - batch_norm__28, - batch_norm__29, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_4, - parameter_566, - parameter_565, - parameter_564, - parameter_563, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_4, parameter_563, parameter_564, parameter_565, parameter_566 - - # pd_op.swish: (-1x36x-1x-1xf32) <- (-1x36x-1x-1xf32) - swish_4 = paddle._C_ops.swish(batch_norm__24) - del batch_norm__24 - - # pd_op.conv2d: (-1x36x-1x-1xf32) <- (-1x72x-1x-1xf32, 36x72x1x1xf32) - conv2d_5 = paddle._C_ops.conv2d( - swish_3, parameter_562, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_562, swish_3 - - # pd_op.batch_norm_: (-1x36x-1x-1xf32, 36xf32, 36xf32, 36xf32, 36xf32, -1xui8) <- (-1x36x-1x-1xf32, 36xf32, 36xf32, 36xf32, 36xf32) - ( - batch_norm__30, - batch_norm__31, - batch_norm__32, - batch_norm__33, - batch_norm__34, - batch_norm__35, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_5, - parameter_561, - parameter_560, - parameter_559, - parameter_558, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_5, parameter_558, parameter_559, parameter_560, parameter_561 - - # pd_op.swish: (-1x36x-1x-1xf32) <- (-1x36x-1x-1xf32) - swish_5 = paddle._C_ops.swish(batch_norm__30) - del batch_norm__30 - - # pd_op.conv2d: (-1x36x-1x-1xf32) <- (-1x36x-1x-1xf32, 36x36x3x3xf32) - conv2d_6 = paddle._C_ops.conv2d( - swish_5, parameter_557, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_557 - - # pd_op.batch_norm_: (-1x36x-1x-1xf32, 36xf32, 36xf32, 36xf32, 36xf32, -1xui8) <- (-1x36x-1x-1xf32, 36xf32, 36xf32, 36xf32, 36xf32) - ( - batch_norm__36, - batch_norm__37, - batch_norm__38, - batch_norm__39, - batch_norm__40, - batch_norm__41, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_6, - parameter_556, - parameter_555, - parameter_554, - parameter_553, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_6, parameter_553, parameter_554, parameter_555, parameter_556 - - # pd_op.swish: (-1x36x-1x-1xf32) <- (-1x36x-1x-1xf32) - swish_6 = paddle._C_ops.swish(batch_norm__36) - del batch_norm__36 - - # pd_op.conv2d: (-1x36x-1x-1xf32) <- (-1x36x-1x-1xf32, 36x36x3x3xf32) - conv2d_7 = paddle._C_ops.conv2d( - swish_6, parameter_552, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_552 - - # pd_op.batch_norm_: (-1x36x-1x-1xf32, 36xf32, 36xf32, 36xf32, 36xf32, -1xui8) <- (-1x36x-1x-1xf32, 36xf32, 36xf32, 36xf32, 36xf32) - ( - batch_norm__42, - batch_norm__43, - batch_norm__44, - batch_norm__45, - batch_norm__46, - batch_norm__47, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_7, - parameter_551, - parameter_550, - parameter_549, - parameter_548, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_7, parameter_548, parameter_549, parameter_550, parameter_551 - - # pd_op.conv2d: (-1x36x-1x-1xf32) <- (-1x36x-1x-1xf32, 36x36x1x1xf32) - conv2d_8 = paddle._C_ops.conv2d( - swish_6, parameter_547, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_547, swish_6 - - # pd_op.batch_norm_: (-1x36x-1x-1xf32, 36xf32, 36xf32, 36xf32, 36xf32, -1xui8) <- (-1x36x-1x-1xf32, 36xf32, 36xf32, 36xf32, 36xf32) - ( - batch_norm__48, - batch_norm__49, - batch_norm__50, - batch_norm__51, - batch_norm__52, - batch_norm__53, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_8, - parameter_546, - parameter_545, - parameter_544, - parameter_543, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_8, parameter_543, parameter_544, parameter_545, parameter_546 - - # pd_op.multiply: (-1x36x-1x-1xf32) <- (1xf32, -1x36x-1x-1xf32) - multiply_0 = paddle._C_ops.multiply(data_0, batch_norm__48) - del batch_norm__48, data_0 - - # pd_op.add: (-1x36x-1x-1xf32) <- (-1x36x-1x-1xf32, -1x36x-1x-1xf32) - add_0 = paddle._C_ops.add(batch_norm__42, multiply_0) - del batch_norm__42, multiply_0 - - # pd_op.swish: (-1x36x-1x-1xf32) <- (-1x36x-1x-1xf32) - swish_7 = paddle._C_ops.swish(add_0) - del add_0 - - # pd_op.add: (-1x36x-1x-1xf32) <- (-1x36x-1x-1xf32, -1x36x-1x-1xf32) - add_1 = paddle._C_ops.add(swish_5, swish_7) - del swish_5, swish_7 - - # pd_op.conv2d: (-1x36x-1x-1xf32) <- (-1x36x-1x-1xf32, 36x36x3x3xf32) - conv2d_9 = paddle._C_ops.conv2d( - add_1, parameter_542, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_542 - - # pd_op.batch_norm_: (-1x36x-1x-1xf32, 36xf32, 36xf32, 36xf32, 36xf32, -1xui8) <- (-1x36x-1x-1xf32, 36xf32, 36xf32, 36xf32, 36xf32) - ( - batch_norm__54, - batch_norm__55, - batch_norm__56, - batch_norm__57, - batch_norm__58, - batch_norm__59, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_9, - parameter_541, - parameter_540, - parameter_539, - parameter_538, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_9, parameter_538, parameter_539, parameter_540, parameter_541 - - # pd_op.swish: (-1x36x-1x-1xf32) <- (-1x36x-1x-1xf32) - swish_8 = paddle._C_ops.swish(batch_norm__54) - del batch_norm__54 - - # pd_op.conv2d: (-1x36x-1x-1xf32) <- (-1x36x-1x-1xf32, 36x36x3x3xf32) - conv2d_10 = paddle._C_ops.conv2d( - swish_8, parameter_537, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_537 - - # pd_op.batch_norm_: (-1x36x-1x-1xf32, 36xf32, 36xf32, 36xf32, 36xf32, -1xui8) <- (-1x36x-1x-1xf32, 36xf32, 36xf32, 36xf32, 36xf32) - ( - batch_norm__60, - batch_norm__61, - batch_norm__62, - batch_norm__63, - batch_norm__64, - batch_norm__65, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_10, - parameter_536, - parameter_535, - parameter_534, - parameter_533, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_10, parameter_533, parameter_534, parameter_535, parameter_536 - - # pd_op.conv2d: (-1x36x-1x-1xf32) <- (-1x36x-1x-1xf32, 36x36x1x1xf32) - conv2d_11 = paddle._C_ops.conv2d( - swish_8, parameter_532, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_532, swish_8 - - # pd_op.batch_norm_: (-1x36x-1x-1xf32, 36xf32, 36xf32, 36xf32, 36xf32, -1xui8) <- (-1x36x-1x-1xf32, 36xf32, 36xf32, 36xf32, 36xf32) - ( - batch_norm__66, - batch_norm__67, - batch_norm__68, - batch_norm__69, - batch_norm__70, - batch_norm__71, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_11, - parameter_531, - parameter_530, - parameter_529, - parameter_528, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_11, parameter_528, parameter_529, parameter_530, parameter_531 - - # pd_op.multiply: (-1x36x-1x-1xf32) <- (1xf32, -1x36x-1x-1xf32) - multiply_1 = paddle._C_ops.multiply(data_1, batch_norm__66) - del batch_norm__66, data_1 - - # pd_op.add: (-1x36x-1x-1xf32) <- (-1x36x-1x-1xf32, -1x36x-1x-1xf32) - add_2 = paddle._C_ops.add(batch_norm__60, multiply_1) - del batch_norm__60, multiply_1 - - # pd_op.swish: (-1x36x-1x-1xf32) <- (-1x36x-1x-1xf32) - swish_9 = paddle._C_ops.swish(add_2) - del add_2 - - # pd_op.add: (-1x36x-1x-1xf32) <- (-1x36x-1x-1xf32, -1x36x-1x-1xf32) - add_3 = paddle._C_ops.add(add_1, swish_9) - del add_1, swish_9 - - # pd_op.full: (1xi32) <- () - full_0 = paddle._C_ops.full( - [1], float("1"), paddle.int32, paddle.core.CPUPlace() - ) - - # builtin.combine: ([-1x36x-1x-1xf32, -1x36x-1x-1xf32]) <- (-1x36x-1x-1xf32, -1x36x-1x-1xf32) - combine_0 = [swish_4, add_3] - del add_3, swish_4 - - # pd_op.concat: (-1x72x-1x-1xf32) <- ([-1x36x-1x-1xf32, -1x36x-1x-1xf32], 1xi32) - concat_2 = paddle._C_ops.concat(combine_0, full_0) - del combine_0 - - # pd_op.full_int_array: (2xi64) <- () - full_int_array_0 = [2, 3] - - # pd_op.mean: (-1x72x1x1xf32) <- (-1x72x-1x-1xf32, 2xi64) - mean_0 = paddle._C_ops.mean(concat_2, full_int_array_0, True) - - # pd_op.conv2d: (-1x72x1x1xf32) <- (-1x72x1x1xf32, 72x72x1x1xf32) - conv2d_12 = paddle._C_ops.conv2d( - mean_0, parameter_527, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del mean_0, parameter_527 - - # pd_op.full_int_array: (4xi64) <- () - full_int_array_1 = [1, -1, 1, 1] - - # pd_op.reshape: (1x72x1x1xf32) <- (72xf32, 4xi64) - reshape_0 = paddle._C_ops.reshape(parameter_526, full_int_array_1) - del parameter_526 - - # pd_op.add: (-1x72x1x1xf32) <- (-1x72x1x1xf32, 1x72x1x1xf32) - add_4 = paddle._C_ops.add(conv2d_12, reshape_0) - del conv2d_12, reshape_0 - - # pd_op.hardsigmoid: (-1x72x1x1xf32) <- (-1x72x1x1xf32) - hardsigmoid_0 = paddle._C_ops.hardsigmoid( - add_4, float("0.166667"), float("0.5") - ) - del add_4 - - # pd_op.multiply: (-1x72x-1x-1xf32) <- (-1x72x-1x-1xf32, -1x72x1x1xf32) - multiply_2 = paddle._C_ops.multiply(concat_2, hardsigmoid_0) - del concat_2, hardsigmoid_0 - - # pd_op.conv2d: (-1x96x-1x-1xf32) <- (-1x72x-1x-1xf32, 96x72x1x1xf32) - conv2d_13 = paddle._C_ops.conv2d( - multiply_2, parameter_525, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del multiply_2, parameter_525 - - # pd_op.batch_norm_: (-1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (-1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) - ( - batch_norm__72, - batch_norm__73, - batch_norm__74, - batch_norm__75, - batch_norm__76, - batch_norm__77, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_13, - parameter_524, - parameter_523, - parameter_522, - parameter_521, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_13, parameter_521, parameter_522, parameter_523, parameter_524 - - # pd_op.swish: (-1x96x-1x-1xf32) <- (-1x96x-1x-1xf32) - swish_10 = paddle._C_ops.swish(batch_norm__72) - del batch_norm__72 - - # pd_op.conv2d: (-1x144x-1x-1xf32) <- (-1x96x-1x-1xf32, 144x96x3x3xf32) - conv2d_14 = paddle._C_ops.conv2d( - swish_10, parameter_520, [2, 2], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_520, swish_10 - - # pd_op.batch_norm_: (-1x144x-1x-1xf32, 144xf32, 144xf32, 144xf32, 144xf32, -1xui8) <- (-1x144x-1x-1xf32, 144xf32, 144xf32, 144xf32, 144xf32) - ( - batch_norm__78, - batch_norm__79, - batch_norm__80, - batch_norm__81, - batch_norm__82, - batch_norm__83, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_14, - parameter_519, - parameter_518, - parameter_517, - parameter_516, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_14, parameter_516, parameter_517, parameter_518, parameter_519 - - # pd_op.swish: (-1x144x-1x-1xf32) <- (-1x144x-1x-1xf32) - swish_11 = paddle._C_ops.swish(batch_norm__78) - del batch_norm__78 - - # pd_op.conv2d: (-1x72x-1x-1xf32) <- (-1x144x-1x-1xf32, 72x144x1x1xf32) - conv2d_15 = paddle._C_ops.conv2d( - swish_11, parameter_515, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_515 - - # pd_op.batch_norm_: (-1x72x-1x-1xf32, 72xf32, 72xf32, 72xf32, 72xf32, -1xui8) <- (-1x72x-1x-1xf32, 72xf32, 72xf32, 72xf32, 72xf32) - ( - batch_norm__84, - batch_norm__85, - batch_norm__86, - batch_norm__87, - batch_norm__88, - batch_norm__89, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_15, - parameter_514, - parameter_513, - parameter_512, - parameter_511, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_15, parameter_511, parameter_512, parameter_513, parameter_514 - - # pd_op.swish: (-1x72x-1x-1xf32) <- (-1x72x-1x-1xf32) - swish_12 = paddle._C_ops.swish(batch_norm__84) - del batch_norm__84 - - # pd_op.conv2d: (-1x72x-1x-1xf32) <- (-1x144x-1x-1xf32, 72x144x1x1xf32) - conv2d_16 = paddle._C_ops.conv2d( - swish_11, parameter_510, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_510, swish_11 - - # pd_op.batch_norm_: (-1x72x-1x-1xf32, 72xf32, 72xf32, 72xf32, 72xf32, -1xui8) <- (-1x72x-1x-1xf32, 72xf32, 72xf32, 72xf32, 72xf32) - ( - batch_norm__90, - batch_norm__91, - batch_norm__92, - batch_norm__93, - batch_norm__94, - batch_norm__95, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_16, - parameter_509, - parameter_508, - parameter_507, - parameter_506, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_16, parameter_506, parameter_507, parameter_508, parameter_509 - - # pd_op.swish: (-1x72x-1x-1xf32) <- (-1x72x-1x-1xf32) - swish_13 = paddle._C_ops.swish(batch_norm__90) - del batch_norm__90 - - # pd_op.conv2d: (-1x72x-1x-1xf32) <- (-1x72x-1x-1xf32, 72x72x3x3xf32) - conv2d_17 = paddle._C_ops.conv2d( - swish_13, parameter_505, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_505 - - # pd_op.batch_norm_: (-1x72x-1x-1xf32, 72xf32, 72xf32, 72xf32, 72xf32, -1xui8) <- (-1x72x-1x-1xf32, 72xf32, 72xf32, 72xf32, 72xf32) - ( - batch_norm__96, - batch_norm__97, - batch_norm__98, - batch_norm__99, - batch_norm__100, - batch_norm__101, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_17, - parameter_504, - parameter_503, - parameter_502, - parameter_501, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_17, parameter_501, parameter_502, parameter_503, parameter_504 - - # pd_op.swish: (-1x72x-1x-1xf32) <- (-1x72x-1x-1xf32) - swish_14 = paddle._C_ops.swish(batch_norm__96) - del batch_norm__96 - - # pd_op.conv2d: (-1x72x-1x-1xf32) <- (-1x72x-1x-1xf32, 72x72x3x3xf32) - conv2d_18 = paddle._C_ops.conv2d( - swish_14, parameter_500, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_500 - - # pd_op.batch_norm_: (-1x72x-1x-1xf32, 72xf32, 72xf32, 72xf32, 72xf32, -1xui8) <- (-1x72x-1x-1xf32, 72xf32, 72xf32, 72xf32, 72xf32) - ( - batch_norm__102, - batch_norm__103, - batch_norm__104, - batch_norm__105, - batch_norm__106, - batch_norm__107, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_18, - parameter_499, - parameter_498, - parameter_497, - parameter_496, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_18, parameter_496, parameter_497, parameter_498, parameter_499 - - # pd_op.conv2d: (-1x72x-1x-1xf32) <- (-1x72x-1x-1xf32, 72x72x1x1xf32) - conv2d_19 = paddle._C_ops.conv2d( - swish_14, parameter_495, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_495, swish_14 - - # pd_op.batch_norm_: (-1x72x-1x-1xf32, 72xf32, 72xf32, 72xf32, 72xf32, -1xui8) <- (-1x72x-1x-1xf32, 72xf32, 72xf32, 72xf32, 72xf32) - ( - batch_norm__108, - batch_norm__109, - batch_norm__110, - batch_norm__111, - batch_norm__112, - batch_norm__113, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_19, - parameter_494, - parameter_493, - parameter_492, - parameter_491, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_19, parameter_491, parameter_492, parameter_493, parameter_494 - - # pd_op.multiply: (-1x72x-1x-1xf32) <- (1xf32, -1x72x-1x-1xf32) - multiply_3 = paddle._C_ops.multiply(data_2, batch_norm__108) - del batch_norm__108, data_2 - - # pd_op.add: (-1x72x-1x-1xf32) <- (-1x72x-1x-1xf32, -1x72x-1x-1xf32) - add_5 = paddle._C_ops.add(batch_norm__102, multiply_3) - del batch_norm__102, multiply_3 - - # pd_op.swish: (-1x72x-1x-1xf32) <- (-1x72x-1x-1xf32) - swish_15 = paddle._C_ops.swish(add_5) - del add_5 - - # pd_op.add: (-1x72x-1x-1xf32) <- (-1x72x-1x-1xf32, -1x72x-1x-1xf32) - add_6 = paddle._C_ops.add(swish_13, swish_15) - del swish_13, swish_15 - - # pd_op.conv2d: (-1x72x-1x-1xf32) <- (-1x72x-1x-1xf32, 72x72x3x3xf32) - conv2d_20 = paddle._C_ops.conv2d( - add_6, parameter_490, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_490 - - # pd_op.batch_norm_: (-1x72x-1x-1xf32, 72xf32, 72xf32, 72xf32, 72xf32, -1xui8) <- (-1x72x-1x-1xf32, 72xf32, 72xf32, 72xf32, 72xf32) - ( - batch_norm__114, - batch_norm__115, - batch_norm__116, - batch_norm__117, - batch_norm__118, - batch_norm__119, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_20, - parameter_489, - parameter_488, - parameter_487, - parameter_486, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_20, parameter_486, parameter_487, parameter_488, parameter_489 - - # pd_op.swish: (-1x72x-1x-1xf32) <- (-1x72x-1x-1xf32) - swish_16 = paddle._C_ops.swish(batch_norm__114) - del batch_norm__114 - - # pd_op.conv2d: (-1x72x-1x-1xf32) <- (-1x72x-1x-1xf32, 72x72x3x3xf32) - conv2d_21 = paddle._C_ops.conv2d( - swish_16, parameter_485, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_485 - - # pd_op.batch_norm_: (-1x72x-1x-1xf32, 72xf32, 72xf32, 72xf32, 72xf32, -1xui8) <- (-1x72x-1x-1xf32, 72xf32, 72xf32, 72xf32, 72xf32) - ( - batch_norm__120, - batch_norm__121, - batch_norm__122, - batch_norm__123, - batch_norm__124, - batch_norm__125, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_21, - parameter_484, - parameter_483, - parameter_482, - parameter_481, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_21, parameter_481, parameter_482, parameter_483, parameter_484 - - # pd_op.conv2d: (-1x72x-1x-1xf32) <- (-1x72x-1x-1xf32, 72x72x1x1xf32) - conv2d_22 = paddle._C_ops.conv2d( - swish_16, parameter_480, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_480, swish_16 - - # pd_op.batch_norm_: (-1x72x-1x-1xf32, 72xf32, 72xf32, 72xf32, 72xf32, -1xui8) <- (-1x72x-1x-1xf32, 72xf32, 72xf32, 72xf32, 72xf32) - ( - batch_norm__126, - batch_norm__127, - batch_norm__128, - batch_norm__129, - batch_norm__130, - batch_norm__131, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_22, - parameter_479, - parameter_478, - parameter_477, - parameter_476, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_22, parameter_476, parameter_477, parameter_478, parameter_479 - - # pd_op.multiply: (-1x72x-1x-1xf32) <- (1xf32, -1x72x-1x-1xf32) - multiply_4 = paddle._C_ops.multiply(data_3, batch_norm__126) - del batch_norm__126, data_3 - - # pd_op.add: (-1x72x-1x-1xf32) <- (-1x72x-1x-1xf32, -1x72x-1x-1xf32) - add_7 = paddle._C_ops.add(batch_norm__120, multiply_4) - del batch_norm__120, multiply_4 - - # pd_op.swish: (-1x72x-1x-1xf32) <- (-1x72x-1x-1xf32) - swish_17 = paddle._C_ops.swish(add_7) - del add_7 - - # pd_op.add: (-1x72x-1x-1xf32) <- (-1x72x-1x-1xf32, -1x72x-1x-1xf32) - add_8 = paddle._C_ops.add(add_6, swish_17) - del add_6, swish_17 - - # pd_op.conv2d: (-1x72x-1x-1xf32) <- (-1x72x-1x-1xf32, 72x72x3x3xf32) - conv2d_23 = paddle._C_ops.conv2d( - add_8, parameter_475, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_475 - - # pd_op.batch_norm_: (-1x72x-1x-1xf32, 72xf32, 72xf32, 72xf32, 72xf32, -1xui8) <- (-1x72x-1x-1xf32, 72xf32, 72xf32, 72xf32, 72xf32) - ( - batch_norm__132, - batch_norm__133, - batch_norm__134, - batch_norm__135, - batch_norm__136, - batch_norm__137, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_23, - parameter_474, - parameter_473, - parameter_472, - parameter_471, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_23, parameter_471, parameter_472, parameter_473, parameter_474 - - # pd_op.swish: (-1x72x-1x-1xf32) <- (-1x72x-1x-1xf32) - swish_18 = paddle._C_ops.swish(batch_norm__132) - del batch_norm__132 - - # pd_op.conv2d: (-1x72x-1x-1xf32) <- (-1x72x-1x-1xf32, 72x72x3x3xf32) - conv2d_24 = paddle._C_ops.conv2d( - swish_18, parameter_470, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_470 - - # pd_op.batch_norm_: (-1x72x-1x-1xf32, 72xf32, 72xf32, 72xf32, 72xf32, -1xui8) <- (-1x72x-1x-1xf32, 72xf32, 72xf32, 72xf32, 72xf32) - ( - batch_norm__138, - batch_norm__139, - batch_norm__140, - batch_norm__141, - batch_norm__142, - batch_norm__143, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_24, - parameter_469, - parameter_468, - parameter_467, - parameter_466, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_24, parameter_466, parameter_467, parameter_468, parameter_469 - - # pd_op.conv2d: (-1x72x-1x-1xf32) <- (-1x72x-1x-1xf32, 72x72x1x1xf32) - conv2d_25 = paddle._C_ops.conv2d( - swish_18, parameter_465, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_465, swish_18 - - # pd_op.batch_norm_: (-1x72x-1x-1xf32, 72xf32, 72xf32, 72xf32, 72xf32, -1xui8) <- (-1x72x-1x-1xf32, 72xf32, 72xf32, 72xf32, 72xf32) - ( - batch_norm__144, - batch_norm__145, - batch_norm__146, - batch_norm__147, - batch_norm__148, - batch_norm__149, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_25, - parameter_464, - parameter_463, - parameter_462, - parameter_461, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_25, parameter_461, parameter_462, parameter_463, parameter_464 - - # pd_op.multiply: (-1x72x-1x-1xf32) <- (1xf32, -1x72x-1x-1xf32) - multiply_5 = paddle._C_ops.multiply(data_4, batch_norm__144) - del batch_norm__144, data_4 - - # pd_op.add: (-1x72x-1x-1xf32) <- (-1x72x-1x-1xf32, -1x72x-1x-1xf32) - add_9 = paddle._C_ops.add(batch_norm__138, multiply_5) - del batch_norm__138, multiply_5 - - # pd_op.swish: (-1x72x-1x-1xf32) <- (-1x72x-1x-1xf32) - swish_19 = paddle._C_ops.swish(add_9) - del add_9 - - # pd_op.add: (-1x72x-1x-1xf32) <- (-1x72x-1x-1xf32, -1x72x-1x-1xf32) - add_10 = paddle._C_ops.add(add_8, swish_19) - del add_8, swish_19 - - # pd_op.conv2d: (-1x72x-1x-1xf32) <- (-1x72x-1x-1xf32, 72x72x3x3xf32) - conv2d_26 = paddle._C_ops.conv2d( - add_10, parameter_460, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_460 - - # pd_op.batch_norm_: (-1x72x-1x-1xf32, 72xf32, 72xf32, 72xf32, 72xf32, -1xui8) <- (-1x72x-1x-1xf32, 72xf32, 72xf32, 72xf32, 72xf32) - ( - batch_norm__150, - batch_norm__151, - batch_norm__152, - batch_norm__153, - batch_norm__154, - batch_norm__155, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_26, - parameter_459, - parameter_458, - parameter_457, - parameter_456, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_26, parameter_456, parameter_457, parameter_458, parameter_459 - - # pd_op.swish: (-1x72x-1x-1xf32) <- (-1x72x-1x-1xf32) - swish_20 = paddle._C_ops.swish(batch_norm__150) - del batch_norm__150 - - # pd_op.conv2d: (-1x72x-1x-1xf32) <- (-1x72x-1x-1xf32, 72x72x3x3xf32) - conv2d_27 = paddle._C_ops.conv2d( - swish_20, parameter_455, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_455 - - # pd_op.batch_norm_: (-1x72x-1x-1xf32, 72xf32, 72xf32, 72xf32, 72xf32, -1xui8) <- (-1x72x-1x-1xf32, 72xf32, 72xf32, 72xf32, 72xf32) - ( - batch_norm__156, - batch_norm__157, - batch_norm__158, - batch_norm__159, - batch_norm__160, - batch_norm__161, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_27, - parameter_454, - parameter_453, - parameter_452, - parameter_451, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_27, parameter_451, parameter_452, parameter_453, parameter_454 - - # pd_op.conv2d: (-1x72x-1x-1xf32) <- (-1x72x-1x-1xf32, 72x72x1x1xf32) - conv2d_28 = paddle._C_ops.conv2d( - swish_20, parameter_450, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_450, swish_20 - - # pd_op.batch_norm_: (-1x72x-1x-1xf32, 72xf32, 72xf32, 72xf32, 72xf32, -1xui8) <- (-1x72x-1x-1xf32, 72xf32, 72xf32, 72xf32, 72xf32) - ( - batch_norm__162, - batch_norm__163, - batch_norm__164, - batch_norm__165, - batch_norm__166, - batch_norm__167, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_28, - parameter_449, - parameter_448, - parameter_447, - parameter_446, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_28, parameter_446, parameter_447, parameter_448, parameter_449 - - # pd_op.multiply: (-1x72x-1x-1xf32) <- (1xf32, -1x72x-1x-1xf32) - multiply_6 = paddle._C_ops.multiply(data_5, batch_norm__162) - del batch_norm__162, data_5 - - # pd_op.add: (-1x72x-1x-1xf32) <- (-1x72x-1x-1xf32, -1x72x-1x-1xf32) - add_11 = paddle._C_ops.add(batch_norm__156, multiply_6) - del batch_norm__156, multiply_6 - - # pd_op.swish: (-1x72x-1x-1xf32) <- (-1x72x-1x-1xf32) - swish_21 = paddle._C_ops.swish(add_11) - del add_11 - - # pd_op.add: (-1x72x-1x-1xf32) <- (-1x72x-1x-1xf32, -1x72x-1x-1xf32) - add_12 = paddle._C_ops.add(add_10, swish_21) - del add_10, swish_21 - - # builtin.combine: ([-1x72x-1x-1xf32, -1x72x-1x-1xf32]) <- (-1x72x-1x-1xf32, -1x72x-1x-1xf32) - combine_1 = [swish_12, add_12] - del add_12, swish_12 - - # pd_op.concat: (-1x144x-1x-1xf32) <- ([-1x72x-1x-1xf32, -1x72x-1x-1xf32], 1xi32) - concat_3 = paddle._C_ops.concat(combine_1, full_0) - del combine_1 - - # pd_op.mean: (-1x144x1x1xf32) <- (-1x144x-1x-1xf32, 2xi64) - mean_1 = paddle._C_ops.mean(concat_3, full_int_array_0, True) - - # pd_op.conv2d: (-1x144x1x1xf32) <- (-1x144x1x1xf32, 144x144x1x1xf32) - conv2d_29 = paddle._C_ops.conv2d( - mean_1, parameter_445, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del mean_1, parameter_445 - - # pd_op.reshape: (1x144x1x1xf32) <- (144xf32, 4xi64) - reshape_1 = paddle._C_ops.reshape(parameter_444, full_int_array_1) - del parameter_444 - - # pd_op.add: (-1x144x1x1xf32) <- (-1x144x1x1xf32, 1x144x1x1xf32) - add_13 = paddle._C_ops.add(conv2d_29, reshape_1) - del conv2d_29, reshape_1 - - # pd_op.hardsigmoid: (-1x144x1x1xf32) <- (-1x144x1x1xf32) - hardsigmoid_1 = paddle._C_ops.hardsigmoid( - add_13, float("0.166667"), float("0.5") - ) - del add_13 - - # pd_op.multiply: (-1x144x-1x-1xf32) <- (-1x144x-1x-1xf32, -1x144x1x1xf32) - multiply_7 = paddle._C_ops.multiply(concat_3, hardsigmoid_1) - del concat_3, hardsigmoid_1 - - # pd_op.conv2d: (-1x192x-1x-1xf32) <- (-1x144x-1x-1xf32, 192x144x1x1xf32) - conv2d_30 = paddle._C_ops.conv2d( - multiply_7, parameter_443, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del multiply_7, parameter_443 - - # pd_op.batch_norm_: (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) - ( - batch_norm__168, - batch_norm__169, - batch_norm__170, - batch_norm__171, - batch_norm__172, - batch_norm__173, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_30, - parameter_442, - parameter_441, - parameter_440, - parameter_439, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_30, parameter_439, parameter_440, parameter_441, parameter_442 - - # pd_op.swish: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32) - swish_22 = paddle._C_ops.swish(batch_norm__168) - del batch_norm__168 - - # pd_op.conv2d: (-1x288x-1x-1xf32) <- (-1x192x-1x-1xf32, 288x192x3x3xf32) - conv2d_31 = paddle._C_ops.conv2d( - swish_22, parameter_438, [2, 2], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_438 - - # pd_op.batch_norm_: (-1x288x-1x-1xf32, 288xf32, 288xf32, 288xf32, 288xf32, -1xui8) <- (-1x288x-1x-1xf32, 288xf32, 288xf32, 288xf32, 288xf32) - ( - batch_norm__174, - batch_norm__175, - batch_norm__176, - batch_norm__177, - batch_norm__178, - batch_norm__179, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_31, - parameter_437, - parameter_436, - parameter_435, - parameter_434, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_31, parameter_434, parameter_435, parameter_436, parameter_437 - - # pd_op.swish: (-1x288x-1x-1xf32) <- (-1x288x-1x-1xf32) - swish_23 = paddle._C_ops.swish(batch_norm__174) - del batch_norm__174 - - # pd_op.conv2d: (-1x144x-1x-1xf32) <- (-1x288x-1x-1xf32, 144x288x1x1xf32) - conv2d_32 = paddle._C_ops.conv2d( - swish_23, parameter_433, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_433 - - # pd_op.batch_norm_: (-1x144x-1x-1xf32, 144xf32, 144xf32, 144xf32, 144xf32, -1xui8) <- (-1x144x-1x-1xf32, 144xf32, 144xf32, 144xf32, 144xf32) - ( - batch_norm__180, - batch_norm__181, - batch_norm__182, - batch_norm__183, - batch_norm__184, - batch_norm__185, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_32, - parameter_432, - parameter_431, - parameter_430, - parameter_429, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_32, parameter_429, parameter_430, parameter_431, parameter_432 - - # pd_op.swish: (-1x144x-1x-1xf32) <- (-1x144x-1x-1xf32) - swish_24 = paddle._C_ops.swish(batch_norm__180) - del batch_norm__180 - - # pd_op.conv2d: (-1x144x-1x-1xf32) <- (-1x288x-1x-1xf32, 144x288x1x1xf32) - conv2d_33 = paddle._C_ops.conv2d( - swish_23, parameter_428, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_428, swish_23 - - # pd_op.batch_norm_: (-1x144x-1x-1xf32, 144xf32, 144xf32, 144xf32, 144xf32, -1xui8) <- (-1x144x-1x-1xf32, 144xf32, 144xf32, 144xf32, 144xf32) - ( - batch_norm__186, - batch_norm__187, - batch_norm__188, - batch_norm__189, - batch_norm__190, - batch_norm__191, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_33, - parameter_427, - parameter_426, - parameter_425, - parameter_424, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_33, parameter_424, parameter_425, parameter_426, parameter_427 - - # pd_op.swish: (-1x144x-1x-1xf32) <- (-1x144x-1x-1xf32) - swish_25 = paddle._C_ops.swish(batch_norm__186) - del batch_norm__186 - - # pd_op.conv2d: (-1x144x-1x-1xf32) <- (-1x144x-1x-1xf32, 144x144x3x3xf32) - conv2d_34 = paddle._C_ops.conv2d( - swish_25, parameter_423, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_423 - - # pd_op.batch_norm_: (-1x144x-1x-1xf32, 144xf32, 144xf32, 144xf32, 144xf32, -1xui8) <- (-1x144x-1x-1xf32, 144xf32, 144xf32, 144xf32, 144xf32) - ( - batch_norm__192, - batch_norm__193, - batch_norm__194, - batch_norm__195, - batch_norm__196, - batch_norm__197, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_34, - parameter_422, - parameter_421, - parameter_420, - parameter_419, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_34, parameter_419, parameter_420, parameter_421, parameter_422 - - # pd_op.swish: (-1x144x-1x-1xf32) <- (-1x144x-1x-1xf32) - swish_26 = paddle._C_ops.swish(batch_norm__192) - del batch_norm__192 - - # pd_op.conv2d: (-1x144x-1x-1xf32) <- (-1x144x-1x-1xf32, 144x144x3x3xf32) - conv2d_35 = paddle._C_ops.conv2d( - swish_26, parameter_418, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_418 - - # pd_op.batch_norm_: (-1x144x-1x-1xf32, 144xf32, 144xf32, 144xf32, 144xf32, -1xui8) <- (-1x144x-1x-1xf32, 144xf32, 144xf32, 144xf32, 144xf32) - ( - batch_norm__198, - batch_norm__199, - batch_norm__200, - batch_norm__201, - batch_norm__202, - batch_norm__203, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_35, - parameter_417, - parameter_416, - parameter_415, - parameter_414, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_35, parameter_414, parameter_415, parameter_416, parameter_417 - - # pd_op.conv2d: (-1x144x-1x-1xf32) <- (-1x144x-1x-1xf32, 144x144x1x1xf32) - conv2d_36 = paddle._C_ops.conv2d( - swish_26, parameter_413, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_413, swish_26 - - # pd_op.batch_norm_: (-1x144x-1x-1xf32, 144xf32, 144xf32, 144xf32, 144xf32, -1xui8) <- (-1x144x-1x-1xf32, 144xf32, 144xf32, 144xf32, 144xf32) - ( - batch_norm__204, - batch_norm__205, - batch_norm__206, - batch_norm__207, - batch_norm__208, - batch_norm__209, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_36, - parameter_412, - parameter_411, - parameter_410, - parameter_409, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_36, parameter_409, parameter_410, parameter_411, parameter_412 - - # pd_op.multiply: (-1x144x-1x-1xf32) <- (1xf32, -1x144x-1x-1xf32) - multiply_8 = paddle._C_ops.multiply(data_6, batch_norm__204) - del batch_norm__204, data_6 - - # pd_op.add: (-1x144x-1x-1xf32) <- (-1x144x-1x-1xf32, -1x144x-1x-1xf32) - add_14 = paddle._C_ops.add(batch_norm__198, multiply_8) - del batch_norm__198, multiply_8 - - # pd_op.swish: (-1x144x-1x-1xf32) <- (-1x144x-1x-1xf32) - swish_27 = paddle._C_ops.swish(add_14) - del add_14 - - # pd_op.add: (-1x144x-1x-1xf32) <- (-1x144x-1x-1xf32, -1x144x-1x-1xf32) - add_15 = paddle._C_ops.add(swish_25, swish_27) - del swish_25, swish_27 - - # pd_op.conv2d: (-1x144x-1x-1xf32) <- (-1x144x-1x-1xf32, 144x144x3x3xf32) - conv2d_37 = paddle._C_ops.conv2d( - add_15, parameter_408, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_408 - - # pd_op.batch_norm_: (-1x144x-1x-1xf32, 144xf32, 144xf32, 144xf32, 144xf32, -1xui8) <- (-1x144x-1x-1xf32, 144xf32, 144xf32, 144xf32, 144xf32) - ( - batch_norm__210, - batch_norm__211, - batch_norm__212, - batch_norm__213, - batch_norm__214, - batch_norm__215, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_37, - parameter_407, - parameter_406, - parameter_405, - parameter_404, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_37, parameter_404, parameter_405, parameter_406, parameter_407 - - # pd_op.swish: (-1x144x-1x-1xf32) <- (-1x144x-1x-1xf32) - swish_28 = paddle._C_ops.swish(batch_norm__210) - del batch_norm__210 - - # pd_op.conv2d: (-1x144x-1x-1xf32) <- (-1x144x-1x-1xf32, 144x144x3x3xf32) - conv2d_38 = paddle._C_ops.conv2d( - swish_28, parameter_403, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_403 - - # pd_op.batch_norm_: (-1x144x-1x-1xf32, 144xf32, 144xf32, 144xf32, 144xf32, -1xui8) <- (-1x144x-1x-1xf32, 144xf32, 144xf32, 144xf32, 144xf32) - ( - batch_norm__216, - batch_norm__217, - batch_norm__218, - batch_norm__219, - batch_norm__220, - batch_norm__221, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_38, - parameter_402, - parameter_401, - parameter_400, - parameter_399, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_38, parameter_399, parameter_400, parameter_401, parameter_402 - - # pd_op.conv2d: (-1x144x-1x-1xf32) <- (-1x144x-1x-1xf32, 144x144x1x1xf32) - conv2d_39 = paddle._C_ops.conv2d( - swish_28, parameter_398, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_398, swish_28 - - # pd_op.batch_norm_: (-1x144x-1x-1xf32, 144xf32, 144xf32, 144xf32, 144xf32, -1xui8) <- (-1x144x-1x-1xf32, 144xf32, 144xf32, 144xf32, 144xf32) - ( - batch_norm__222, - batch_norm__223, - batch_norm__224, - batch_norm__225, - batch_norm__226, - batch_norm__227, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_39, - parameter_397, - parameter_396, - parameter_395, - parameter_394, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_39, parameter_394, parameter_395, parameter_396, parameter_397 - - # pd_op.multiply: (-1x144x-1x-1xf32) <- (1xf32, -1x144x-1x-1xf32) - multiply_9 = paddle._C_ops.multiply(data_7, batch_norm__222) - del batch_norm__222, data_7 - - # pd_op.add: (-1x144x-1x-1xf32) <- (-1x144x-1x-1xf32, -1x144x-1x-1xf32) - add_16 = paddle._C_ops.add(batch_norm__216, multiply_9) - del batch_norm__216, multiply_9 - - # pd_op.swish: (-1x144x-1x-1xf32) <- (-1x144x-1x-1xf32) - swish_29 = paddle._C_ops.swish(add_16) - del add_16 - - # pd_op.add: (-1x144x-1x-1xf32) <- (-1x144x-1x-1xf32, -1x144x-1x-1xf32) - add_17 = paddle._C_ops.add(add_15, swish_29) - del add_15, swish_29 - - # pd_op.conv2d: (-1x144x-1x-1xf32) <- (-1x144x-1x-1xf32, 144x144x3x3xf32) - conv2d_40 = paddle._C_ops.conv2d( - add_17, parameter_393, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_393 - - # pd_op.batch_norm_: (-1x144x-1x-1xf32, 144xf32, 144xf32, 144xf32, 144xf32, -1xui8) <- (-1x144x-1x-1xf32, 144xf32, 144xf32, 144xf32, 144xf32) - ( - batch_norm__228, - batch_norm__229, - batch_norm__230, - batch_norm__231, - batch_norm__232, - batch_norm__233, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_40, - parameter_392, - parameter_391, - parameter_390, - parameter_389, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_40, parameter_389, parameter_390, parameter_391, parameter_392 - - # pd_op.swish: (-1x144x-1x-1xf32) <- (-1x144x-1x-1xf32) - swish_30 = paddle._C_ops.swish(batch_norm__228) - del batch_norm__228 - - # pd_op.conv2d: (-1x144x-1x-1xf32) <- (-1x144x-1x-1xf32, 144x144x3x3xf32) - conv2d_41 = paddle._C_ops.conv2d( - swish_30, parameter_388, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_388 - - # pd_op.batch_norm_: (-1x144x-1x-1xf32, 144xf32, 144xf32, 144xf32, 144xf32, -1xui8) <- (-1x144x-1x-1xf32, 144xf32, 144xf32, 144xf32, 144xf32) - ( - batch_norm__234, - batch_norm__235, - batch_norm__236, - batch_norm__237, - batch_norm__238, - batch_norm__239, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_41, - parameter_387, - parameter_386, - parameter_385, - parameter_384, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_41, parameter_384, parameter_385, parameter_386, parameter_387 - - # pd_op.conv2d: (-1x144x-1x-1xf32) <- (-1x144x-1x-1xf32, 144x144x1x1xf32) - conv2d_42 = paddle._C_ops.conv2d( - swish_30, parameter_383, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_383, swish_30 - - # pd_op.batch_norm_: (-1x144x-1x-1xf32, 144xf32, 144xf32, 144xf32, 144xf32, -1xui8) <- (-1x144x-1x-1xf32, 144xf32, 144xf32, 144xf32, 144xf32) - ( - batch_norm__240, - batch_norm__241, - batch_norm__242, - batch_norm__243, - batch_norm__244, - batch_norm__245, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_42, - parameter_382, - parameter_381, - parameter_380, - parameter_379, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_42, parameter_379, parameter_380, parameter_381, parameter_382 - - # pd_op.multiply: (-1x144x-1x-1xf32) <- (1xf32, -1x144x-1x-1xf32) - multiply_10 = paddle._C_ops.multiply(data_8, batch_norm__240) - del batch_norm__240, data_8 - - # pd_op.add: (-1x144x-1x-1xf32) <- (-1x144x-1x-1xf32, -1x144x-1x-1xf32) - add_18 = paddle._C_ops.add(batch_norm__234, multiply_10) - del batch_norm__234, multiply_10 - - # pd_op.swish: (-1x144x-1x-1xf32) <- (-1x144x-1x-1xf32) - swish_31 = paddle._C_ops.swish(add_18) - del add_18 - - # pd_op.add: (-1x144x-1x-1xf32) <- (-1x144x-1x-1xf32, -1x144x-1x-1xf32) - add_19 = paddle._C_ops.add(add_17, swish_31) - del add_17, swish_31 - - # pd_op.conv2d: (-1x144x-1x-1xf32) <- (-1x144x-1x-1xf32, 144x144x3x3xf32) - conv2d_43 = paddle._C_ops.conv2d( - add_19, parameter_378, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_378 - - # pd_op.batch_norm_: (-1x144x-1x-1xf32, 144xf32, 144xf32, 144xf32, 144xf32, -1xui8) <- (-1x144x-1x-1xf32, 144xf32, 144xf32, 144xf32, 144xf32) - ( - batch_norm__246, - batch_norm__247, - batch_norm__248, - batch_norm__249, - batch_norm__250, - batch_norm__251, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_43, - parameter_377, - parameter_376, - parameter_375, - parameter_374, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_43, parameter_374, parameter_375, parameter_376, parameter_377 - - # pd_op.swish: (-1x144x-1x-1xf32) <- (-1x144x-1x-1xf32) - swish_32 = paddle._C_ops.swish(batch_norm__246) - del batch_norm__246 - - # pd_op.conv2d: (-1x144x-1x-1xf32) <- (-1x144x-1x-1xf32, 144x144x3x3xf32) - conv2d_44 = paddle._C_ops.conv2d( - swish_32, parameter_373, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_373 - - # pd_op.batch_norm_: (-1x144x-1x-1xf32, 144xf32, 144xf32, 144xf32, 144xf32, -1xui8) <- (-1x144x-1x-1xf32, 144xf32, 144xf32, 144xf32, 144xf32) - ( - batch_norm__252, - batch_norm__253, - batch_norm__254, - batch_norm__255, - batch_norm__256, - batch_norm__257, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_44, - parameter_372, - parameter_371, - parameter_370, - parameter_369, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_44, parameter_369, parameter_370, parameter_371, parameter_372 - - # pd_op.conv2d: (-1x144x-1x-1xf32) <- (-1x144x-1x-1xf32, 144x144x1x1xf32) - conv2d_45 = paddle._C_ops.conv2d( - swish_32, parameter_368, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_368, swish_32 - - # pd_op.batch_norm_: (-1x144x-1x-1xf32, 144xf32, 144xf32, 144xf32, 144xf32, -1xui8) <- (-1x144x-1x-1xf32, 144xf32, 144xf32, 144xf32, 144xf32) - ( - batch_norm__258, - batch_norm__259, - batch_norm__260, - batch_norm__261, - batch_norm__262, - batch_norm__263, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_45, - parameter_367, - parameter_366, - parameter_365, - parameter_364, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_45, parameter_364, parameter_365, parameter_366, parameter_367 - - # pd_op.multiply: (-1x144x-1x-1xf32) <- (1xf32, -1x144x-1x-1xf32) - multiply_11 = paddle._C_ops.multiply(data_9, batch_norm__258) - del batch_norm__258, data_9 - - # pd_op.add: (-1x144x-1x-1xf32) <- (-1x144x-1x-1xf32, -1x144x-1x-1xf32) - add_20 = paddle._C_ops.add(batch_norm__252, multiply_11) - del batch_norm__252, multiply_11 - - # pd_op.swish: (-1x144x-1x-1xf32) <- (-1x144x-1x-1xf32) - swish_33 = paddle._C_ops.swish(add_20) - del add_20 - - # pd_op.add: (-1x144x-1x-1xf32) <- (-1x144x-1x-1xf32, -1x144x-1x-1xf32) - add_21 = paddle._C_ops.add(add_19, swish_33) - del add_19, swish_33 - - # builtin.combine: ([-1x144x-1x-1xf32, -1x144x-1x-1xf32]) <- (-1x144x-1x-1xf32, -1x144x-1x-1xf32) - combine_2 = [swish_24, add_21] - del add_21, swish_24 - - # pd_op.concat: (-1x288x-1x-1xf32) <- ([-1x144x-1x-1xf32, -1x144x-1x-1xf32], 1xi32) - concat_4 = paddle._C_ops.concat(combine_2, full_0) - del combine_2 - - # pd_op.mean: (-1x288x1x1xf32) <- (-1x288x-1x-1xf32, 2xi64) - mean_2 = paddle._C_ops.mean(concat_4, full_int_array_0, True) - - # pd_op.conv2d: (-1x288x1x1xf32) <- (-1x288x1x1xf32, 288x288x1x1xf32) - conv2d_46 = paddle._C_ops.conv2d( - mean_2, parameter_363, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del mean_2, parameter_363 - - # pd_op.reshape: (1x288x1x1xf32) <- (288xf32, 4xi64) - reshape_2 = paddle._C_ops.reshape(parameter_362, full_int_array_1) - del parameter_362 - - # pd_op.add: (-1x288x1x1xf32) <- (-1x288x1x1xf32, 1x288x1x1xf32) - add_22 = paddle._C_ops.add(conv2d_46, reshape_2) - del conv2d_46, reshape_2 - - # pd_op.hardsigmoid: (-1x288x1x1xf32) <- (-1x288x1x1xf32) - hardsigmoid_2 = paddle._C_ops.hardsigmoid( - add_22, float("0.166667"), float("0.5") - ) - del add_22 - - # pd_op.multiply: (-1x288x-1x-1xf32) <- (-1x288x-1x-1xf32, -1x288x1x1xf32) - multiply_12 = paddle._C_ops.multiply(concat_4, hardsigmoid_2) - del concat_4, hardsigmoid_2 - - # pd_op.conv2d: (-1x384x-1x-1xf32) <- (-1x288x-1x-1xf32, 384x288x1x1xf32) - conv2d_47 = paddle._C_ops.conv2d( - multiply_12, parameter_361, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del multiply_12, parameter_361 - - # pd_op.batch_norm_: (-1x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (-1x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) - ( - batch_norm__264, - batch_norm__265, - batch_norm__266, - batch_norm__267, - batch_norm__268, - batch_norm__269, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_47, - parameter_360, - parameter_359, - parameter_358, - parameter_357, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_47, parameter_357, parameter_358, parameter_359, parameter_360 - - # pd_op.swish: (-1x384x-1x-1xf32) <- (-1x384x-1x-1xf32) - swish_34 = paddle._C_ops.swish(batch_norm__264) - del batch_norm__264 - - # pd_op.conv2d: (-1x576x-1x-1xf32) <- (-1x384x-1x-1xf32, 576x384x3x3xf32) - conv2d_48 = paddle._C_ops.conv2d( - swish_34, parameter_356, [2, 2], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_356 - - # pd_op.batch_norm_: (-1x576x-1x-1xf32, 576xf32, 576xf32, 576xf32, 576xf32, -1xui8) <- (-1x576x-1x-1xf32, 576xf32, 576xf32, 576xf32, 576xf32) - ( - batch_norm__270, - batch_norm__271, - batch_norm__272, - batch_norm__273, - batch_norm__274, - batch_norm__275, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_48, - parameter_355, - parameter_354, - parameter_353, - parameter_352, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_48, parameter_352, parameter_353, parameter_354, parameter_355 - - # pd_op.swish: (-1x576x-1x-1xf32) <- (-1x576x-1x-1xf32) - swish_35 = paddle._C_ops.swish(batch_norm__270) - del batch_norm__270 - - # pd_op.conv2d: (-1x288x-1x-1xf32) <- (-1x576x-1x-1xf32, 288x576x1x1xf32) - conv2d_49 = paddle._C_ops.conv2d( - swish_35, parameter_351, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_351 - - # pd_op.batch_norm_: (-1x288x-1x-1xf32, 288xf32, 288xf32, 288xf32, 288xf32, -1xui8) <- (-1x288x-1x-1xf32, 288xf32, 288xf32, 288xf32, 288xf32) - ( - batch_norm__276, - batch_norm__277, - batch_norm__278, - batch_norm__279, - batch_norm__280, - batch_norm__281, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_49, - parameter_350, - parameter_349, - parameter_348, - parameter_347, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_49, parameter_347, parameter_348, parameter_349, parameter_350 - - # pd_op.swish: (-1x288x-1x-1xf32) <- (-1x288x-1x-1xf32) - swish_36 = paddle._C_ops.swish(batch_norm__276) - del batch_norm__276 - - # pd_op.conv2d: (-1x288x-1x-1xf32) <- (-1x576x-1x-1xf32, 288x576x1x1xf32) - conv2d_50 = paddle._C_ops.conv2d( - swish_35, parameter_346, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_346, swish_35 - - # pd_op.batch_norm_: (-1x288x-1x-1xf32, 288xf32, 288xf32, 288xf32, 288xf32, -1xui8) <- (-1x288x-1x-1xf32, 288xf32, 288xf32, 288xf32, 288xf32) - ( - batch_norm__282, - batch_norm__283, - batch_norm__284, - batch_norm__285, - batch_norm__286, - batch_norm__287, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_50, - parameter_345, - parameter_344, - parameter_343, - parameter_342, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_50, parameter_342, parameter_343, parameter_344, parameter_345 - - # pd_op.swish: (-1x288x-1x-1xf32) <- (-1x288x-1x-1xf32) - swish_37 = paddle._C_ops.swish(batch_norm__282) - del batch_norm__282 - - # pd_op.conv2d: (-1x288x-1x-1xf32) <- (-1x288x-1x-1xf32, 288x288x3x3xf32) - conv2d_51 = paddle._C_ops.conv2d( - swish_37, parameter_341, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_341 - - # pd_op.batch_norm_: (-1x288x-1x-1xf32, 288xf32, 288xf32, 288xf32, 288xf32, -1xui8) <- (-1x288x-1x-1xf32, 288xf32, 288xf32, 288xf32, 288xf32) - ( - batch_norm__288, - batch_norm__289, - batch_norm__290, - batch_norm__291, - batch_norm__292, - batch_norm__293, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_51, - parameter_340, - parameter_339, - parameter_338, - parameter_337, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_51, parameter_337, parameter_338, parameter_339, parameter_340 - - # pd_op.swish: (-1x288x-1x-1xf32) <- (-1x288x-1x-1xf32) - swish_38 = paddle._C_ops.swish(batch_norm__288) - del batch_norm__288 - - # pd_op.conv2d: (-1x288x-1x-1xf32) <- (-1x288x-1x-1xf32, 288x288x3x3xf32) - conv2d_52 = paddle._C_ops.conv2d( - swish_38, parameter_336, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_336 - - # pd_op.batch_norm_: (-1x288x-1x-1xf32, 288xf32, 288xf32, 288xf32, 288xf32, -1xui8) <- (-1x288x-1x-1xf32, 288xf32, 288xf32, 288xf32, 288xf32) - ( - batch_norm__294, - batch_norm__295, - batch_norm__296, - batch_norm__297, - batch_norm__298, - batch_norm__299, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_52, - parameter_335, - parameter_334, - parameter_333, - parameter_332, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_52, parameter_332, parameter_333, parameter_334, parameter_335 - - # pd_op.conv2d: (-1x288x-1x-1xf32) <- (-1x288x-1x-1xf32, 288x288x1x1xf32) - conv2d_53 = paddle._C_ops.conv2d( - swish_38, parameter_331, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_331, swish_38 - - # pd_op.batch_norm_: (-1x288x-1x-1xf32, 288xf32, 288xf32, 288xf32, 288xf32, -1xui8) <- (-1x288x-1x-1xf32, 288xf32, 288xf32, 288xf32, 288xf32) - ( - batch_norm__300, - batch_norm__301, - batch_norm__302, - batch_norm__303, - batch_norm__304, - batch_norm__305, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_53, - parameter_330, - parameter_329, - parameter_328, - parameter_327, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_53, parameter_327, parameter_328, parameter_329, parameter_330 - - # pd_op.multiply: (-1x288x-1x-1xf32) <- (1xf32, -1x288x-1x-1xf32) - multiply_13 = paddle._C_ops.multiply(data_10, batch_norm__300) - del batch_norm__300, data_10 - - # pd_op.add: (-1x288x-1x-1xf32) <- (-1x288x-1x-1xf32, -1x288x-1x-1xf32) - add_23 = paddle._C_ops.add(batch_norm__294, multiply_13) - del batch_norm__294, multiply_13 - - # pd_op.swish: (-1x288x-1x-1xf32) <- (-1x288x-1x-1xf32) - swish_39 = paddle._C_ops.swish(add_23) - del add_23 - - # pd_op.add: (-1x288x-1x-1xf32) <- (-1x288x-1x-1xf32, -1x288x-1x-1xf32) - add_24 = paddle._C_ops.add(swish_37, swish_39) - del swish_37, swish_39 - - # pd_op.conv2d: (-1x288x-1x-1xf32) <- (-1x288x-1x-1xf32, 288x288x3x3xf32) - conv2d_54 = paddle._C_ops.conv2d( - add_24, parameter_326, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_326 - - # pd_op.batch_norm_: (-1x288x-1x-1xf32, 288xf32, 288xf32, 288xf32, 288xf32, -1xui8) <- (-1x288x-1x-1xf32, 288xf32, 288xf32, 288xf32, 288xf32) - ( - batch_norm__306, - batch_norm__307, - batch_norm__308, - batch_norm__309, - batch_norm__310, - batch_norm__311, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_54, - parameter_325, - parameter_324, - parameter_323, - parameter_322, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_54, parameter_322, parameter_323, parameter_324, parameter_325 - - # pd_op.swish: (-1x288x-1x-1xf32) <- (-1x288x-1x-1xf32) - swish_40 = paddle._C_ops.swish(batch_norm__306) - del batch_norm__306 - - # pd_op.conv2d: (-1x288x-1x-1xf32) <- (-1x288x-1x-1xf32, 288x288x3x3xf32) - conv2d_55 = paddle._C_ops.conv2d( - swish_40, parameter_321, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_321 - - # pd_op.batch_norm_: (-1x288x-1x-1xf32, 288xf32, 288xf32, 288xf32, 288xf32, -1xui8) <- (-1x288x-1x-1xf32, 288xf32, 288xf32, 288xf32, 288xf32) - ( - batch_norm__312, - batch_norm__313, - batch_norm__314, - batch_norm__315, - batch_norm__316, - batch_norm__317, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_55, - parameter_320, - parameter_319, - parameter_318, - parameter_317, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_55, parameter_317, parameter_318, parameter_319, parameter_320 - - # pd_op.conv2d: (-1x288x-1x-1xf32) <- (-1x288x-1x-1xf32, 288x288x1x1xf32) - conv2d_56 = paddle._C_ops.conv2d( - swish_40, parameter_316, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_316, swish_40 - - # pd_op.batch_norm_: (-1x288x-1x-1xf32, 288xf32, 288xf32, 288xf32, 288xf32, -1xui8) <- (-1x288x-1x-1xf32, 288xf32, 288xf32, 288xf32, 288xf32) - ( - batch_norm__318, - batch_norm__319, - batch_norm__320, - batch_norm__321, - batch_norm__322, - batch_norm__323, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_56, - parameter_315, - parameter_314, - parameter_313, - parameter_312, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_56, parameter_312, parameter_313, parameter_314, parameter_315 - - # pd_op.multiply: (-1x288x-1x-1xf32) <- (1xf32, -1x288x-1x-1xf32) - multiply_14 = paddle._C_ops.multiply(data_11, batch_norm__318) - del batch_norm__318, data_11 - - # pd_op.add: (-1x288x-1x-1xf32) <- (-1x288x-1x-1xf32, -1x288x-1x-1xf32) - add_25 = paddle._C_ops.add(batch_norm__312, multiply_14) - del batch_norm__312, multiply_14 - - # pd_op.swish: (-1x288x-1x-1xf32) <- (-1x288x-1x-1xf32) - swish_41 = paddle._C_ops.swish(add_25) - del add_25 - - # pd_op.add: (-1x288x-1x-1xf32) <- (-1x288x-1x-1xf32, -1x288x-1x-1xf32) - add_26 = paddle._C_ops.add(add_24, swish_41) - del add_24, swish_41 - - # builtin.combine: ([-1x288x-1x-1xf32, -1x288x-1x-1xf32]) <- (-1x288x-1x-1xf32, -1x288x-1x-1xf32) - combine_3 = [swish_36, add_26] - del add_26, swish_36 - - # pd_op.concat: (-1x576x-1x-1xf32) <- ([-1x288x-1x-1xf32, -1x288x-1x-1xf32], 1xi32) - concat_5 = paddle._C_ops.concat(combine_3, full_0) - del combine_3 - - # pd_op.mean: (-1x576x1x1xf32) <- (-1x576x-1x-1xf32, 2xi64) - mean_3 = paddle._C_ops.mean(concat_5, full_int_array_0, True) - del full_int_array_0 - - # pd_op.conv2d: (-1x576x1x1xf32) <- (-1x576x1x1xf32, 576x576x1x1xf32) - conv2d_57 = paddle._C_ops.conv2d( - mean_3, parameter_311, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del mean_3, parameter_311 - - # pd_op.reshape: (1x576x1x1xf32) <- (576xf32, 4xi64) - reshape_3 = paddle._C_ops.reshape(parameter_310, full_int_array_1) - del parameter_310 - - # pd_op.add: (-1x576x1x1xf32) <- (-1x576x1x1xf32, 1x576x1x1xf32) - add_27 = paddle._C_ops.add(conv2d_57, reshape_3) - del conv2d_57, reshape_3 - - # pd_op.hardsigmoid: (-1x576x1x1xf32) <- (-1x576x1x1xf32) - hardsigmoid_3 = paddle._C_ops.hardsigmoid( - add_27, float("0.166667"), float("0.5") - ) - del add_27 - - # pd_op.multiply: (-1x576x-1x-1xf32) <- (-1x576x-1x-1xf32, -1x576x1x1xf32) - multiply_15 = paddle._C_ops.multiply(concat_5, hardsigmoid_3) - del concat_5, hardsigmoid_3 - - # pd_op.conv2d: (-1x768x-1x-1xf32) <- (-1x576x-1x-1xf32, 768x576x1x1xf32) - conv2d_58 = paddle._C_ops.conv2d( - multiply_15, parameter_309, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del multiply_15, parameter_309 - - # pd_op.batch_norm_: (-1x768x-1x-1xf32, 768xf32, 768xf32, 768xf32, 768xf32, -1xui8) <- (-1x768x-1x-1xf32, 768xf32, 768xf32, 768xf32, 768xf32) - ( - batch_norm__324, - batch_norm__325, - batch_norm__326, - batch_norm__327, - batch_norm__328, - batch_norm__329, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_58, - parameter_308, - parameter_307, - parameter_306, - parameter_305, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_58, parameter_305, parameter_306, parameter_307, parameter_308 - - # pd_op.swish: (-1x768x-1x-1xf32) <- (-1x768x-1x-1xf32) - swish_42 = paddle._C_ops.swish(batch_norm__324) - del batch_norm__324 - - # pd_op.conv2d: (-1x288x-1x-1xf32) <- (-1x768x-1x-1xf32, 288x768x1x1xf32) - conv2d_59 = paddle._C_ops.conv2d( - swish_42, parameter_304, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_304 - - # pd_op.batch_norm_: (-1x288x-1x-1xf32, 288xf32, 288xf32, 288xf32, 288xf32, -1xui8) <- (-1x288x-1x-1xf32, 288xf32, 288xf32, 288xf32, 288xf32) - ( - batch_norm__330, - batch_norm__331, - batch_norm__332, - batch_norm__333, - batch_norm__334, - batch_norm__335, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_59, - parameter_303, - parameter_302, - parameter_301, - parameter_300, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_59, parameter_300, parameter_301, parameter_302, parameter_303 - - # pd_op.swish: (-1x288x-1x-1xf32) <- (-1x288x-1x-1xf32) - swish_43 = paddle._C_ops.swish(batch_norm__330) - del batch_norm__330 - - # pd_op.conv2d: (-1x288x-1x-1xf32) <- (-1x768x-1x-1xf32, 288x768x1x1xf32) - conv2d_60 = paddle._C_ops.conv2d( - swish_42, parameter_299, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_299, swish_42 - - # pd_op.batch_norm_: (-1x288x-1x-1xf32, 288xf32, 288xf32, 288xf32, 288xf32, -1xui8) <- (-1x288x-1x-1xf32, 288xf32, 288xf32, 288xf32, 288xf32) - ( - batch_norm__336, - batch_norm__337, - batch_norm__338, - batch_norm__339, - batch_norm__340, - batch_norm__341, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_60, - parameter_298, - parameter_297, - parameter_296, - parameter_295, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_60, parameter_295, parameter_296, parameter_297, parameter_298 - - # pd_op.swish: (-1x288x-1x-1xf32) <- (-1x288x-1x-1xf32) - swish_44 = paddle._C_ops.swish(batch_norm__336) - del batch_norm__336 - - # pd_op.conv2d: (-1x288x-1x-1xf32) <- (-1x288x-1x-1xf32, 288x288x3x3xf32) - conv2d_61 = paddle._C_ops.conv2d( - swish_44, parameter_294, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_294, swish_44 - - # pd_op.batch_norm_: (-1x288x-1x-1xf32, 288xf32, 288xf32, 288xf32, 288xf32, -1xui8) <- (-1x288x-1x-1xf32, 288xf32, 288xf32, 288xf32, 288xf32) - ( - batch_norm__342, - batch_norm__343, - batch_norm__344, - batch_norm__345, - batch_norm__346, - batch_norm__347, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_61, - parameter_293, - parameter_292, - parameter_291, - parameter_290, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_61, parameter_290, parameter_291, parameter_292, parameter_293 - - # pd_op.swish: (-1x288x-1x-1xf32) <- (-1x288x-1x-1xf32) - swish_45 = paddle._C_ops.swish(batch_norm__342) - del batch_norm__342 - - # pd_op.conv2d: (-1x288x-1x-1xf32) <- (-1x288x-1x-1xf32, 288x288x3x3xf32) - conv2d_62 = paddle._C_ops.conv2d( - swish_45, parameter_289, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_289 - - # pd_op.batch_norm_: (-1x288x-1x-1xf32, 288xf32, 288xf32, 288xf32, 288xf32, -1xui8) <- (-1x288x-1x-1xf32, 288xf32, 288xf32, 288xf32, 288xf32) - ( - batch_norm__348, - batch_norm__349, - batch_norm__350, - batch_norm__351, - batch_norm__352, - batch_norm__353, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_62, - parameter_288, - parameter_287, - parameter_286, - parameter_285, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_62, parameter_285, parameter_286, parameter_287, parameter_288 - - # pd_op.conv2d: (-1x288x-1x-1xf32) <- (-1x288x-1x-1xf32, 288x288x1x1xf32) - conv2d_63 = paddle._C_ops.conv2d( - swish_45, parameter_284, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_284, swish_45 - - # pd_op.batch_norm_: (-1x288x-1x-1xf32, 288xf32, 288xf32, 288xf32, 288xf32, -1xui8) <- (-1x288x-1x-1xf32, 288xf32, 288xf32, 288xf32, 288xf32) - ( - batch_norm__354, - batch_norm__355, - batch_norm__356, - batch_norm__357, - batch_norm__358, - batch_norm__359, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_63, - parameter_283, - parameter_282, - parameter_281, - parameter_280, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_63, parameter_280, parameter_281, parameter_282, parameter_283 - - # pd_op.add: (-1x288x-1x-1xf32) <- (-1x288x-1x-1xf32, -1x288x-1x-1xf32) - add_28 = paddle._C_ops.add(batch_norm__348, batch_norm__354) - del batch_norm__348, batch_norm__354 - - # pd_op.swish: (-1x288x-1x-1xf32) <- (-1x288x-1x-1xf32) - swish_46 = paddle._C_ops.swish(add_28) - del add_28 - - # pd_op.full_int_array: (2xi64) <- () - full_int_array_2 = [5, 5] - - # pd_op.pool2d: (-1x288x-1x-1xf32) <- (-1x288x-1x-1xf32, 2xi64) - pool2d_0 = paddle._C_ops.pool2d( - swish_46, - full_int_array_2, - [1, 1], - [2, 2], - False, - True, - "NCHW", - "max", - False, - False, - "EXPLICIT", - ) - del full_int_array_2 - - # pd_op.full_int_array: (2xi64) <- () - full_int_array_3 = [9, 9] - - # pd_op.pool2d: (-1x288x-1x-1xf32) <- (-1x288x-1x-1xf32, 2xi64) - pool2d_1 = paddle._C_ops.pool2d( - swish_46, - full_int_array_3, - [1, 1], - [4, 4], - False, - True, - "NCHW", - "max", - False, - False, - "EXPLICIT", - ) - del full_int_array_3 - - # pd_op.full_int_array: (2xi64) <- () - full_int_array_4 = [13, 13] - - # pd_op.pool2d: (-1x288x-1x-1xf32) <- (-1x288x-1x-1xf32, 2xi64) - pool2d_2 = paddle._C_ops.pool2d( - swish_46, - full_int_array_4, - [1, 1], - [6, 6], - False, - True, - "NCHW", - "max", - False, - False, - "EXPLICIT", - ) - del full_int_array_4 - - # builtin.combine: ([-1x288x-1x-1xf32, -1x288x-1x-1xf32, -1x288x-1x-1xf32, -1x288x-1x-1xf32]) <- (-1x288x-1x-1xf32, -1x288x-1x-1xf32, -1x288x-1x-1xf32, -1x288x-1x-1xf32) - combine_4 = [swish_46, pool2d_0, pool2d_1, pool2d_2] - del pool2d_0, pool2d_1, pool2d_2, swish_46 - - # pd_op.concat: (-1x1152x-1x-1xf32) <- ([-1x288x-1x-1xf32, -1x288x-1x-1xf32, -1x288x-1x-1xf32, -1x288x-1x-1xf32], 1xi32) - concat_6 = paddle._C_ops.concat(combine_4, full_0) - del combine_4 - - # pd_op.conv2d: (-1x288x-1x-1xf32) <- (-1x1152x-1x-1xf32, 288x1152x1x1xf32) - conv2d_64 = paddle._C_ops.conv2d( - concat_6, parameter_279, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del concat_6, parameter_279 - - # pd_op.batch_norm_: (-1x288x-1x-1xf32, 288xf32, 288xf32, 288xf32, 288xf32, -1xui8) <- (-1x288x-1x-1xf32, 288xf32, 288xf32, 288xf32, 288xf32) - ( - batch_norm__360, - batch_norm__361, - batch_norm__362, - batch_norm__363, - batch_norm__364, - batch_norm__365, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_64, - parameter_278, - parameter_277, - parameter_276, - parameter_275, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_64, parameter_275, parameter_276, parameter_277, parameter_278 - - # pd_op.swish: (-1x288x-1x-1xf32) <- (-1x288x-1x-1xf32) - swish_47 = paddle._C_ops.swish(batch_norm__360) - del batch_norm__360 - - # pd_op.conv2d: (-1x288x-1x-1xf32) <- (-1x288x-1x-1xf32, 288x288x3x3xf32) - conv2d_65 = paddle._C_ops.conv2d( - swish_47, parameter_274, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_274, swish_47 - - # pd_op.batch_norm_: (-1x288x-1x-1xf32, 288xf32, 288xf32, 288xf32, 288xf32, -1xui8) <- (-1x288x-1x-1xf32, 288xf32, 288xf32, 288xf32, 288xf32) - ( - batch_norm__366, - batch_norm__367, - batch_norm__368, - batch_norm__369, - batch_norm__370, - batch_norm__371, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_65, - parameter_273, - parameter_272, - parameter_271, - parameter_270, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_65, parameter_270, parameter_271, parameter_272, parameter_273 - - # pd_op.swish: (-1x288x-1x-1xf32) <- (-1x288x-1x-1xf32) - swish_48 = paddle._C_ops.swish(batch_norm__366) - del batch_norm__366 - - # pd_op.conv2d: (-1x288x-1x-1xf32) <- (-1x288x-1x-1xf32, 288x288x3x3xf32) - conv2d_66 = paddle._C_ops.conv2d( - swish_48, parameter_269, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_269 - - # pd_op.batch_norm_: (-1x288x-1x-1xf32, 288xf32, 288xf32, 288xf32, 288xf32, -1xui8) <- (-1x288x-1x-1xf32, 288xf32, 288xf32, 288xf32, 288xf32) - ( - batch_norm__372, - batch_norm__373, - batch_norm__374, - batch_norm__375, - batch_norm__376, - batch_norm__377, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_66, - parameter_268, - parameter_267, - parameter_266, - parameter_265, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_66, parameter_265, parameter_266, parameter_267, parameter_268 - - # pd_op.conv2d: (-1x288x-1x-1xf32) <- (-1x288x-1x-1xf32, 288x288x1x1xf32) - conv2d_67 = paddle._C_ops.conv2d( - swish_48, parameter_264, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_264, swish_48 - - # pd_op.batch_norm_: (-1x288x-1x-1xf32, 288xf32, 288xf32, 288xf32, 288xf32, -1xui8) <- (-1x288x-1x-1xf32, 288xf32, 288xf32, 288xf32, 288xf32) - ( - batch_norm__378, - batch_norm__379, - batch_norm__380, - batch_norm__381, - batch_norm__382, - batch_norm__383, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_67, - parameter_263, - parameter_262, - parameter_261, - parameter_260, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_67, parameter_260, parameter_261, parameter_262, parameter_263 - - # pd_op.add: (-1x288x-1x-1xf32) <- (-1x288x-1x-1xf32, -1x288x-1x-1xf32) - add_29 = paddle._C_ops.add(batch_norm__372, batch_norm__378) - del batch_norm__372, batch_norm__378 - - # pd_op.swish: (-1x288x-1x-1xf32) <- (-1x288x-1x-1xf32) - swish_49 = paddle._C_ops.swish(add_29) - del add_29 - - # builtin.combine: ([-1x288x-1x-1xf32, -1x288x-1x-1xf32]) <- (-1x288x-1x-1xf32, -1x288x-1x-1xf32) - combine_5 = [swish_43, swish_49] - del swish_43, swish_49 - - # pd_op.concat: (-1x576x-1x-1xf32) <- ([-1x288x-1x-1xf32, -1x288x-1x-1xf32], 1xi32) - concat_7 = paddle._C_ops.concat(combine_5, full_0) - del combine_5 - - # pd_op.conv2d: (-1x576x-1x-1xf32) <- (-1x576x-1x-1xf32, 576x576x1x1xf32) - conv2d_68 = paddle._C_ops.conv2d( - concat_7, parameter_259, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del concat_7, parameter_259 - - # pd_op.batch_norm_: (-1x576x-1x-1xf32, 576xf32, 576xf32, 576xf32, 576xf32, -1xui8) <- (-1x576x-1x-1xf32, 576xf32, 576xf32, 576xf32, 576xf32) - ( - batch_norm__384, - batch_norm__385, - batch_norm__386, - batch_norm__387, - batch_norm__388, - batch_norm__389, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_68, - parameter_258, - parameter_257, - parameter_256, - parameter_255, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_68, parameter_255, parameter_256, parameter_257, parameter_258 - - # pd_op.swish: (-1x576x-1x-1xf32) <- (-1x576x-1x-1xf32) - swish_50 = paddle._C_ops.swish(batch_norm__384) - del batch_norm__384 - - # pd_op.conv2d: (-1x288x-1x-1xf32) <- (-1x576x-1x-1xf32, 288x576x1x1xf32) - conv2d_69 = paddle._C_ops.conv2d( - swish_50, parameter_254, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_254 - - # pd_op.batch_norm_: (-1x288x-1x-1xf32, 288xf32, 288xf32, 288xf32, 288xf32, -1xui8) <- (-1x288x-1x-1xf32, 288xf32, 288xf32, 288xf32, 288xf32) - ( - batch_norm__390, - batch_norm__391, - batch_norm__392, - batch_norm__393, - batch_norm__394, - batch_norm__395, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_69, - parameter_253, - parameter_252, - parameter_251, - parameter_250, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_69, parameter_250, parameter_251, parameter_252, parameter_253 - - # pd_op.swish: (-1x288x-1x-1xf32) <- (-1x288x-1x-1xf32) - swish_51 = paddle._C_ops.swish(batch_norm__390) - del batch_norm__390 - - # pd_op.nearest_interp: (-1x288x-1x-1xf32) <- (-1x288x-1x-1xf32, None, None, None) - nearest_interp_0 = paddle._C_ops.nearest_interp( - swish_51, - None, - None, - None, - "NCHW", - -1, - -1, - -1, - [float("2"), float("2")], - "nearest", - False, - 0, - ) - del swish_51 - - # builtin.combine: ([-1x288x-1x-1xf32, -1x384x-1x-1xf32]) <- (-1x288x-1x-1xf32, -1x384x-1x-1xf32) - combine_6 = [nearest_interp_0, swish_34] - del nearest_interp_0, swish_34 - - # pd_op.concat: (-1x672x-1x-1xf32) <- ([-1x288x-1x-1xf32, -1x384x-1x-1xf32], 1xi32) - concat_8 = paddle._C_ops.concat(combine_6, full_0) - del combine_6 - - # pd_op.conv2d: (-1x144x-1x-1xf32) <- (-1x672x-1x-1xf32, 144x672x1x1xf32) - conv2d_70 = paddle._C_ops.conv2d( - concat_8, parameter_249, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_249 - - # pd_op.batch_norm_: (-1x144x-1x-1xf32, 144xf32, 144xf32, 144xf32, 144xf32, -1xui8) <- (-1x144x-1x-1xf32, 144xf32, 144xf32, 144xf32, 144xf32) - ( - batch_norm__396, - batch_norm__397, - batch_norm__398, - batch_norm__399, - batch_norm__400, - batch_norm__401, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_70, - parameter_248, - parameter_247, - parameter_246, - parameter_245, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_70, parameter_245, parameter_246, parameter_247, parameter_248 - - # pd_op.swish: (-1x144x-1x-1xf32) <- (-1x144x-1x-1xf32) - swish_52 = paddle._C_ops.swish(batch_norm__396) - del batch_norm__396 - - # pd_op.conv2d: (-1x144x-1x-1xf32) <- (-1x672x-1x-1xf32, 144x672x1x1xf32) - conv2d_71 = paddle._C_ops.conv2d( - concat_8, parameter_244, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del concat_8, parameter_244 - - # pd_op.batch_norm_: (-1x144x-1x-1xf32, 144xf32, 144xf32, 144xf32, 144xf32, -1xui8) <- (-1x144x-1x-1xf32, 144xf32, 144xf32, 144xf32, 144xf32) - ( - batch_norm__402, - batch_norm__403, - batch_norm__404, - batch_norm__405, - batch_norm__406, - batch_norm__407, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_71, - parameter_243, - parameter_242, - parameter_241, - parameter_240, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_71, parameter_240, parameter_241, parameter_242, parameter_243 - - # pd_op.swish: (-1x144x-1x-1xf32) <- (-1x144x-1x-1xf32) - swish_53 = paddle._C_ops.swish(batch_norm__402) - del batch_norm__402 - - # pd_op.conv2d: (-1x144x-1x-1xf32) <- (-1x144x-1x-1xf32, 144x144x3x3xf32) - conv2d_72 = paddle._C_ops.conv2d( - swish_53, parameter_239, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_239, swish_53 - - # pd_op.batch_norm_: (-1x144x-1x-1xf32, 144xf32, 144xf32, 144xf32, 144xf32, -1xui8) <- (-1x144x-1x-1xf32, 144xf32, 144xf32, 144xf32, 144xf32) - ( - batch_norm__408, - batch_norm__409, - batch_norm__410, - batch_norm__411, - batch_norm__412, - batch_norm__413, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_72, - parameter_238, - parameter_237, - parameter_236, - parameter_235, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_72, parameter_235, parameter_236, parameter_237, parameter_238 - - # pd_op.swish: (-1x144x-1x-1xf32) <- (-1x144x-1x-1xf32) - swish_54 = paddle._C_ops.swish(batch_norm__408) - del batch_norm__408 - - # pd_op.conv2d: (-1x144x-1x-1xf32) <- (-1x144x-1x-1xf32, 144x144x3x3xf32) - conv2d_73 = paddle._C_ops.conv2d( - swish_54, parameter_234, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_234 - - # pd_op.batch_norm_: (-1x144x-1x-1xf32, 144xf32, 144xf32, 144xf32, 144xf32, -1xui8) <- (-1x144x-1x-1xf32, 144xf32, 144xf32, 144xf32, 144xf32) - ( - batch_norm__414, - batch_norm__415, - batch_norm__416, - batch_norm__417, - batch_norm__418, - batch_norm__419, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_73, - parameter_233, - parameter_232, - parameter_231, - parameter_230, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_73, parameter_230, parameter_231, parameter_232, parameter_233 - - # pd_op.conv2d: (-1x144x-1x-1xf32) <- (-1x144x-1x-1xf32, 144x144x1x1xf32) - conv2d_74 = paddle._C_ops.conv2d( - swish_54, parameter_229, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_229, swish_54 - - # pd_op.batch_norm_: (-1x144x-1x-1xf32, 144xf32, 144xf32, 144xf32, 144xf32, -1xui8) <- (-1x144x-1x-1xf32, 144xf32, 144xf32, 144xf32, 144xf32) - ( - batch_norm__420, - batch_norm__421, - batch_norm__422, - batch_norm__423, - batch_norm__424, - batch_norm__425, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_74, - parameter_228, - parameter_227, - parameter_226, - parameter_225, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_74, parameter_225, parameter_226, parameter_227, parameter_228 - - # pd_op.add: (-1x144x-1x-1xf32) <- (-1x144x-1x-1xf32, -1x144x-1x-1xf32) - add_30 = paddle._C_ops.add(batch_norm__414, batch_norm__420) - del batch_norm__414, batch_norm__420 - - # pd_op.swish: (-1x144x-1x-1xf32) <- (-1x144x-1x-1xf32) - swish_55 = paddle._C_ops.swish(add_30) - del add_30 - - # pd_op.conv2d: (-1x144x-1x-1xf32) <- (-1x144x-1x-1xf32, 144x144x3x3xf32) - conv2d_75 = paddle._C_ops.conv2d( - swish_55, parameter_224, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_224, swish_55 - - # pd_op.batch_norm_: (-1x144x-1x-1xf32, 144xf32, 144xf32, 144xf32, 144xf32, -1xui8) <- (-1x144x-1x-1xf32, 144xf32, 144xf32, 144xf32, 144xf32) - ( - batch_norm__426, - batch_norm__427, - batch_norm__428, - batch_norm__429, - batch_norm__430, - batch_norm__431, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_75, - parameter_223, - parameter_222, - parameter_221, - parameter_220, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_75, parameter_220, parameter_221, parameter_222, parameter_223 - - # pd_op.swish: (-1x144x-1x-1xf32) <- (-1x144x-1x-1xf32) - swish_56 = paddle._C_ops.swish(batch_norm__426) - del batch_norm__426 - - # pd_op.conv2d: (-1x144x-1x-1xf32) <- (-1x144x-1x-1xf32, 144x144x3x3xf32) - conv2d_76 = paddle._C_ops.conv2d( - swish_56, parameter_219, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_219 - - # pd_op.batch_norm_: (-1x144x-1x-1xf32, 144xf32, 144xf32, 144xf32, 144xf32, -1xui8) <- (-1x144x-1x-1xf32, 144xf32, 144xf32, 144xf32, 144xf32) - ( - batch_norm__432, - batch_norm__433, - batch_norm__434, - batch_norm__435, - batch_norm__436, - batch_norm__437, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_76, - parameter_218, - parameter_217, - parameter_216, - parameter_215, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_76, parameter_215, parameter_216, parameter_217, parameter_218 - - # pd_op.conv2d: (-1x144x-1x-1xf32) <- (-1x144x-1x-1xf32, 144x144x1x1xf32) - conv2d_77 = paddle._C_ops.conv2d( - swish_56, parameter_214, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_214, swish_56 - - # pd_op.batch_norm_: (-1x144x-1x-1xf32, 144xf32, 144xf32, 144xf32, 144xf32, -1xui8) <- (-1x144x-1x-1xf32, 144xf32, 144xf32, 144xf32, 144xf32) - ( - batch_norm__438, - batch_norm__439, - batch_norm__440, - batch_norm__441, - batch_norm__442, - batch_norm__443, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_77, - parameter_213, - parameter_212, - parameter_211, - parameter_210, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_77, parameter_210, parameter_211, parameter_212, parameter_213 - - # pd_op.add: (-1x144x-1x-1xf32) <- (-1x144x-1x-1xf32, -1x144x-1x-1xf32) - add_31 = paddle._C_ops.add(batch_norm__432, batch_norm__438) - del batch_norm__432, batch_norm__438 - - # pd_op.swish: (-1x144x-1x-1xf32) <- (-1x144x-1x-1xf32) - swish_57 = paddle._C_ops.swish(add_31) - del add_31 - - # builtin.combine: ([-1x144x-1x-1xf32, -1x144x-1x-1xf32]) <- (-1x144x-1x-1xf32, -1x144x-1x-1xf32) - combine_7 = [swish_52, swish_57] - del swish_52, swish_57 - - # pd_op.concat: (-1x288x-1x-1xf32) <- ([-1x144x-1x-1xf32, -1x144x-1x-1xf32], 1xi32) - concat_9 = paddle._C_ops.concat(combine_7, full_0) - del combine_7 - - # pd_op.conv2d: (-1x288x-1x-1xf32) <- (-1x288x-1x-1xf32, 288x288x1x1xf32) - conv2d_78 = paddle._C_ops.conv2d( - concat_9, parameter_209, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del concat_9, parameter_209 - - # pd_op.batch_norm_: (-1x288x-1x-1xf32, 288xf32, 288xf32, 288xf32, 288xf32, -1xui8) <- (-1x288x-1x-1xf32, 288xf32, 288xf32, 288xf32, 288xf32) - ( - batch_norm__444, - batch_norm__445, - batch_norm__446, - batch_norm__447, - batch_norm__448, - batch_norm__449, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_78, - parameter_208, - parameter_207, - parameter_206, - parameter_205, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_78, parameter_205, parameter_206, parameter_207, parameter_208 - - # pd_op.swish: (-1x288x-1x-1xf32) <- (-1x288x-1x-1xf32) - swish_58 = paddle._C_ops.swish(batch_norm__444) - del batch_norm__444 - - # pd_op.conv2d: (-1x144x-1x-1xf32) <- (-1x288x-1x-1xf32, 144x288x1x1xf32) - conv2d_79 = paddle._C_ops.conv2d( - swish_58, parameter_204, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_204 - - # pd_op.batch_norm_: (-1x144x-1x-1xf32, 144xf32, 144xf32, 144xf32, 144xf32, -1xui8) <- (-1x144x-1x-1xf32, 144xf32, 144xf32, 144xf32, 144xf32) - ( - batch_norm__450, - batch_norm__451, - batch_norm__452, - batch_norm__453, - batch_norm__454, - batch_norm__455, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_79, - parameter_203, - parameter_202, - parameter_201, - parameter_200, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_79, parameter_200, parameter_201, parameter_202, parameter_203 - - # pd_op.swish: (-1x144x-1x-1xf32) <- (-1x144x-1x-1xf32) - swish_59 = paddle._C_ops.swish(batch_norm__450) - del batch_norm__450 - - # pd_op.nearest_interp: (-1x144x-1x-1xf32) <- (-1x144x-1x-1xf32, None, None, None) - nearest_interp_1 = paddle._C_ops.nearest_interp( - swish_59, - None, - None, - None, - "NCHW", - -1, - -1, - -1, - [float("2"), float("2")], - "nearest", - False, - 0, - ) - del swish_59 - - # builtin.combine: ([-1x144x-1x-1xf32, -1x192x-1x-1xf32]) <- (-1x144x-1x-1xf32, -1x192x-1x-1xf32) - combine_8 = [nearest_interp_1, swish_22] - del nearest_interp_1, swish_22 - - # pd_op.concat: (-1x336x-1x-1xf32) <- ([-1x144x-1x-1xf32, -1x192x-1x-1xf32], 1xi32) - concat_10 = paddle._C_ops.concat(combine_8, full_0) - del combine_8 - - # pd_op.conv2d: (-1x72x-1x-1xf32) <- (-1x336x-1x-1xf32, 72x336x1x1xf32) - conv2d_80 = paddle._C_ops.conv2d( - concat_10, parameter_199, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_199 - - # pd_op.batch_norm_: (-1x72x-1x-1xf32, 72xf32, 72xf32, 72xf32, 72xf32, -1xui8) <- (-1x72x-1x-1xf32, 72xf32, 72xf32, 72xf32, 72xf32) - ( - batch_norm__456, - batch_norm__457, - batch_norm__458, - batch_norm__459, - batch_norm__460, - batch_norm__461, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_80, - parameter_198, - parameter_197, - parameter_196, - parameter_195, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_80, parameter_195, parameter_196, parameter_197, parameter_198 - - # pd_op.swish: (-1x72x-1x-1xf32) <- (-1x72x-1x-1xf32) - swish_60 = paddle._C_ops.swish(batch_norm__456) - del batch_norm__456 - - # pd_op.conv2d: (-1x72x-1x-1xf32) <- (-1x336x-1x-1xf32, 72x336x1x1xf32) - conv2d_81 = paddle._C_ops.conv2d( - concat_10, parameter_194, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del concat_10, parameter_194 - - # pd_op.batch_norm_: (-1x72x-1x-1xf32, 72xf32, 72xf32, 72xf32, 72xf32, -1xui8) <- (-1x72x-1x-1xf32, 72xf32, 72xf32, 72xf32, 72xf32) - ( - batch_norm__462, - batch_norm__463, - batch_norm__464, - batch_norm__465, - batch_norm__466, - batch_norm__467, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_81, - parameter_193, - parameter_192, - parameter_191, - parameter_190, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_81, parameter_190, parameter_191, parameter_192, parameter_193 - - # pd_op.swish: (-1x72x-1x-1xf32) <- (-1x72x-1x-1xf32) - swish_61 = paddle._C_ops.swish(batch_norm__462) - del batch_norm__462 - - # pd_op.conv2d: (-1x72x-1x-1xf32) <- (-1x72x-1x-1xf32, 72x72x3x3xf32) - conv2d_82 = paddle._C_ops.conv2d( - swish_61, parameter_189, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_189, swish_61 - - # pd_op.batch_norm_: (-1x72x-1x-1xf32, 72xf32, 72xf32, 72xf32, 72xf32, -1xui8) <- (-1x72x-1x-1xf32, 72xf32, 72xf32, 72xf32, 72xf32) - ( - batch_norm__468, - batch_norm__469, - batch_norm__470, - batch_norm__471, - batch_norm__472, - batch_norm__473, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_82, - parameter_188, - parameter_187, - parameter_186, - parameter_185, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_82, parameter_185, parameter_186, parameter_187, parameter_188 - - # pd_op.swish: (-1x72x-1x-1xf32) <- (-1x72x-1x-1xf32) - swish_62 = paddle._C_ops.swish(batch_norm__468) - del batch_norm__468 - - # pd_op.conv2d: (-1x72x-1x-1xf32) <- (-1x72x-1x-1xf32, 72x72x3x3xf32) - conv2d_83 = paddle._C_ops.conv2d( - swish_62, parameter_184, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_184 - - # pd_op.batch_norm_: (-1x72x-1x-1xf32, 72xf32, 72xf32, 72xf32, 72xf32, -1xui8) <- (-1x72x-1x-1xf32, 72xf32, 72xf32, 72xf32, 72xf32) - ( - batch_norm__474, - batch_norm__475, - batch_norm__476, - batch_norm__477, - batch_norm__478, - batch_norm__479, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_83, - parameter_183, - parameter_182, - parameter_181, - parameter_180, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_83, parameter_180, parameter_181, parameter_182, parameter_183 - - # pd_op.conv2d: (-1x72x-1x-1xf32) <- (-1x72x-1x-1xf32, 72x72x1x1xf32) - conv2d_84 = paddle._C_ops.conv2d( - swish_62, parameter_179, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_179, swish_62 - - # pd_op.batch_norm_: (-1x72x-1x-1xf32, 72xf32, 72xf32, 72xf32, 72xf32, -1xui8) <- (-1x72x-1x-1xf32, 72xf32, 72xf32, 72xf32, 72xf32) - ( - batch_norm__480, - batch_norm__481, - batch_norm__482, - batch_norm__483, - batch_norm__484, - batch_norm__485, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_84, - parameter_178, - parameter_177, - parameter_176, - parameter_175, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_84, parameter_175, parameter_176, parameter_177, parameter_178 - - # pd_op.add: (-1x72x-1x-1xf32) <- (-1x72x-1x-1xf32, -1x72x-1x-1xf32) - add_32 = paddle._C_ops.add(batch_norm__474, batch_norm__480) - del batch_norm__474, batch_norm__480 - - # pd_op.swish: (-1x72x-1x-1xf32) <- (-1x72x-1x-1xf32) - swish_63 = paddle._C_ops.swish(add_32) - del add_32 - - # pd_op.conv2d: (-1x72x-1x-1xf32) <- (-1x72x-1x-1xf32, 72x72x3x3xf32) - conv2d_85 = paddle._C_ops.conv2d( - swish_63, parameter_174, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_174, swish_63 - - # pd_op.batch_norm_: (-1x72x-1x-1xf32, 72xf32, 72xf32, 72xf32, 72xf32, -1xui8) <- (-1x72x-1x-1xf32, 72xf32, 72xf32, 72xf32, 72xf32) - ( - batch_norm__486, - batch_norm__487, - batch_norm__488, - batch_norm__489, - batch_norm__490, - batch_norm__491, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_85, - parameter_173, - parameter_172, - parameter_171, - parameter_170, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_85, parameter_170, parameter_171, parameter_172, parameter_173 - - # pd_op.swish: (-1x72x-1x-1xf32) <- (-1x72x-1x-1xf32) - swish_64 = paddle._C_ops.swish(batch_norm__486) - del batch_norm__486 - - # pd_op.conv2d: (-1x72x-1x-1xf32) <- (-1x72x-1x-1xf32, 72x72x3x3xf32) - conv2d_86 = paddle._C_ops.conv2d( - swish_64, parameter_169, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_169 - - # pd_op.batch_norm_: (-1x72x-1x-1xf32, 72xf32, 72xf32, 72xf32, 72xf32, -1xui8) <- (-1x72x-1x-1xf32, 72xf32, 72xf32, 72xf32, 72xf32) - ( - batch_norm__492, - batch_norm__493, - batch_norm__494, - batch_norm__495, - batch_norm__496, - batch_norm__497, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_86, - parameter_168, - parameter_167, - parameter_166, - parameter_165, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_86, parameter_165, parameter_166, parameter_167, parameter_168 - - # pd_op.conv2d: (-1x72x-1x-1xf32) <- (-1x72x-1x-1xf32, 72x72x1x1xf32) - conv2d_87 = paddle._C_ops.conv2d( - swish_64, parameter_164, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_164, swish_64 - - # pd_op.batch_norm_: (-1x72x-1x-1xf32, 72xf32, 72xf32, 72xf32, 72xf32, -1xui8) <- (-1x72x-1x-1xf32, 72xf32, 72xf32, 72xf32, 72xf32) - ( - batch_norm__498, - batch_norm__499, - batch_norm__500, - batch_norm__501, - batch_norm__502, - batch_norm__503, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_87, - parameter_163, - parameter_162, - parameter_161, - parameter_160, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_87, parameter_160, parameter_161, parameter_162, parameter_163 - - # pd_op.add: (-1x72x-1x-1xf32) <- (-1x72x-1x-1xf32, -1x72x-1x-1xf32) - add_33 = paddle._C_ops.add(batch_norm__492, batch_norm__498) - del batch_norm__492, batch_norm__498 - - # pd_op.swish: (-1x72x-1x-1xf32) <- (-1x72x-1x-1xf32) - swish_65 = paddle._C_ops.swish(add_33) - del add_33 - - # builtin.combine: ([-1x72x-1x-1xf32, -1x72x-1x-1xf32]) <- (-1x72x-1x-1xf32, -1x72x-1x-1xf32) - combine_9 = [swish_60, swish_65] - del swish_60, swish_65 - - # pd_op.concat: (-1x144x-1x-1xf32) <- ([-1x72x-1x-1xf32, -1x72x-1x-1xf32], 1xi32) - concat_11 = paddle._C_ops.concat(combine_9, full_0) - del combine_9 - - # pd_op.conv2d: (-1x144x-1x-1xf32) <- (-1x144x-1x-1xf32, 144x144x1x1xf32) - conv2d_88 = paddle._C_ops.conv2d( - concat_11, parameter_159, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del concat_11, parameter_159 - - # pd_op.batch_norm_: (-1x144x-1x-1xf32, 144xf32, 144xf32, 144xf32, 144xf32, -1xui8) <- (-1x144x-1x-1xf32, 144xf32, 144xf32, 144xf32, 144xf32) - ( - batch_norm__504, - batch_norm__505, - batch_norm__506, - batch_norm__507, - batch_norm__508, - batch_norm__509, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_88, - parameter_158, - parameter_157, - parameter_156, - parameter_155, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_88, parameter_155, parameter_156, parameter_157, parameter_158 - - # pd_op.swish: (-1x144x-1x-1xf32) <- (-1x144x-1x-1xf32) - swish_66 = paddle._C_ops.swish(batch_norm__504) - del batch_norm__504 - - # pd_op.conv2d: (-1x144x-1x-1xf32) <- (-1x144x-1x-1xf32, 144x144x3x3xf32) - conv2d_89 = paddle._C_ops.conv2d( - swish_66, parameter_154, [2, 2], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_154 - - # pd_op.batch_norm_: (-1x144x-1x-1xf32, 144xf32, 144xf32, 144xf32, 144xf32, -1xui8) <- (-1x144x-1x-1xf32, 144xf32, 144xf32, 144xf32, 144xf32) - ( - batch_norm__510, - batch_norm__511, - batch_norm__512, - batch_norm__513, - batch_norm__514, - batch_norm__515, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_89, - parameter_153, - parameter_152, - parameter_151, - parameter_150, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_89, parameter_150, parameter_151, parameter_152, parameter_153 - - # pd_op.swish: (-1x144x-1x-1xf32) <- (-1x144x-1x-1xf32) - swish_67 = paddle._C_ops.swish(batch_norm__510) - del batch_norm__510 - - # builtin.combine: ([-1x144x-1x-1xf32, -1x288x-1x-1xf32]) <- (-1x144x-1x-1xf32, -1x288x-1x-1xf32) - combine_10 = [swish_67, swish_58] - del swish_58, swish_67 - - # pd_op.concat: (-1x432x-1x-1xf32) <- ([-1x144x-1x-1xf32, -1x288x-1x-1xf32], 1xi32) - concat_12 = paddle._C_ops.concat(combine_10, full_0) - del combine_10 - - # pd_op.conv2d: (-1x144x-1x-1xf32) <- (-1x432x-1x-1xf32, 144x432x1x1xf32) - conv2d_90 = paddle._C_ops.conv2d( - concat_12, parameter_149, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_149 - - # pd_op.batch_norm_: (-1x144x-1x-1xf32, 144xf32, 144xf32, 144xf32, 144xf32, -1xui8) <- (-1x144x-1x-1xf32, 144xf32, 144xf32, 144xf32, 144xf32) - ( - batch_norm__516, - batch_norm__517, - batch_norm__518, - batch_norm__519, - batch_norm__520, - batch_norm__521, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_90, - parameter_148, - parameter_147, - parameter_146, - parameter_145, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_90, parameter_145, parameter_146, parameter_147, parameter_148 - - # pd_op.swish: (-1x144x-1x-1xf32) <- (-1x144x-1x-1xf32) - swish_68 = paddle._C_ops.swish(batch_norm__516) - del batch_norm__516 - - # pd_op.conv2d: (-1x144x-1x-1xf32) <- (-1x432x-1x-1xf32, 144x432x1x1xf32) - conv2d_91 = paddle._C_ops.conv2d( - concat_12, parameter_144, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del concat_12, parameter_144 - - # pd_op.batch_norm_: (-1x144x-1x-1xf32, 144xf32, 144xf32, 144xf32, 144xf32, -1xui8) <- (-1x144x-1x-1xf32, 144xf32, 144xf32, 144xf32, 144xf32) - ( - batch_norm__522, - batch_norm__523, - batch_norm__524, - batch_norm__525, - batch_norm__526, - batch_norm__527, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_91, - parameter_143, - parameter_142, - parameter_141, - parameter_140, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_91, parameter_140, parameter_141, parameter_142, parameter_143 - - # pd_op.swish: (-1x144x-1x-1xf32) <- (-1x144x-1x-1xf32) - swish_69 = paddle._C_ops.swish(batch_norm__522) - del batch_norm__522 - - # pd_op.conv2d: (-1x144x-1x-1xf32) <- (-1x144x-1x-1xf32, 144x144x3x3xf32) - conv2d_92 = paddle._C_ops.conv2d( - swish_69, parameter_139, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_139, swish_69 - - # pd_op.batch_norm_: (-1x144x-1x-1xf32, 144xf32, 144xf32, 144xf32, 144xf32, -1xui8) <- (-1x144x-1x-1xf32, 144xf32, 144xf32, 144xf32, 144xf32) - ( - batch_norm__528, - batch_norm__529, - batch_norm__530, - batch_norm__531, - batch_norm__532, - batch_norm__533, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_92, - parameter_138, - parameter_137, - parameter_136, - parameter_135, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_92, parameter_135, parameter_136, parameter_137, parameter_138 - - # pd_op.swish: (-1x144x-1x-1xf32) <- (-1x144x-1x-1xf32) - swish_70 = paddle._C_ops.swish(batch_norm__528) - del batch_norm__528 - - # pd_op.conv2d: (-1x144x-1x-1xf32) <- (-1x144x-1x-1xf32, 144x144x3x3xf32) - conv2d_93 = paddle._C_ops.conv2d( - swish_70, parameter_134, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_134 - - # pd_op.batch_norm_: (-1x144x-1x-1xf32, 144xf32, 144xf32, 144xf32, 144xf32, -1xui8) <- (-1x144x-1x-1xf32, 144xf32, 144xf32, 144xf32, 144xf32) - ( - batch_norm__534, - batch_norm__535, - batch_norm__536, - batch_norm__537, - batch_norm__538, - batch_norm__539, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_93, - parameter_133, - parameter_132, - parameter_131, - parameter_130, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_93, parameter_130, parameter_131, parameter_132, parameter_133 - - # pd_op.conv2d: (-1x144x-1x-1xf32) <- (-1x144x-1x-1xf32, 144x144x1x1xf32) - conv2d_94 = paddle._C_ops.conv2d( - swish_70, parameter_129, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_129, swish_70 - - # pd_op.batch_norm_: (-1x144x-1x-1xf32, 144xf32, 144xf32, 144xf32, 144xf32, -1xui8) <- (-1x144x-1x-1xf32, 144xf32, 144xf32, 144xf32, 144xf32) - ( - batch_norm__540, - batch_norm__541, - batch_norm__542, - batch_norm__543, - batch_norm__544, - batch_norm__545, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_94, - parameter_128, - parameter_127, - parameter_126, - parameter_125, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_94, parameter_125, parameter_126, parameter_127, parameter_128 - - # pd_op.add: (-1x144x-1x-1xf32) <- (-1x144x-1x-1xf32, -1x144x-1x-1xf32) - add_34 = paddle._C_ops.add(batch_norm__534, batch_norm__540) - del batch_norm__534, batch_norm__540 - - # pd_op.swish: (-1x144x-1x-1xf32) <- (-1x144x-1x-1xf32) - swish_71 = paddle._C_ops.swish(add_34) - del add_34 - - # pd_op.conv2d: (-1x144x-1x-1xf32) <- (-1x144x-1x-1xf32, 144x144x3x3xf32) - conv2d_95 = paddle._C_ops.conv2d( - swish_71, parameter_124, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_124, swish_71 - - # pd_op.batch_norm_: (-1x144x-1x-1xf32, 144xf32, 144xf32, 144xf32, 144xf32, -1xui8) <- (-1x144x-1x-1xf32, 144xf32, 144xf32, 144xf32, 144xf32) - ( - batch_norm__546, - batch_norm__547, - batch_norm__548, - batch_norm__549, - batch_norm__550, - batch_norm__551, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_95, - parameter_123, - parameter_122, - parameter_121, - parameter_120, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_95, parameter_120, parameter_121, parameter_122, parameter_123 - - # pd_op.swish: (-1x144x-1x-1xf32) <- (-1x144x-1x-1xf32) - swish_72 = paddle._C_ops.swish(batch_norm__546) - del batch_norm__546 - - # pd_op.conv2d: (-1x144x-1x-1xf32) <- (-1x144x-1x-1xf32, 144x144x3x3xf32) - conv2d_96 = paddle._C_ops.conv2d( - swish_72, parameter_119, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_119 - - # pd_op.batch_norm_: (-1x144x-1x-1xf32, 144xf32, 144xf32, 144xf32, 144xf32, -1xui8) <- (-1x144x-1x-1xf32, 144xf32, 144xf32, 144xf32, 144xf32) - ( - batch_norm__552, - batch_norm__553, - batch_norm__554, - batch_norm__555, - batch_norm__556, - batch_norm__557, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_96, - parameter_118, - parameter_117, - parameter_116, - parameter_115, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_96, parameter_115, parameter_116, parameter_117, parameter_118 - - # pd_op.conv2d: (-1x144x-1x-1xf32) <- (-1x144x-1x-1xf32, 144x144x1x1xf32) - conv2d_97 = paddle._C_ops.conv2d( - swish_72, parameter_114, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_114, swish_72 - - # pd_op.batch_norm_: (-1x144x-1x-1xf32, 144xf32, 144xf32, 144xf32, 144xf32, -1xui8) <- (-1x144x-1x-1xf32, 144xf32, 144xf32, 144xf32, 144xf32) - ( - batch_norm__558, - batch_norm__559, - batch_norm__560, - batch_norm__561, - batch_norm__562, - batch_norm__563, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_97, - parameter_113, - parameter_112, - parameter_111, - parameter_110, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_97, parameter_110, parameter_111, parameter_112, parameter_113 - - # pd_op.add: (-1x144x-1x-1xf32) <- (-1x144x-1x-1xf32, -1x144x-1x-1xf32) - add_35 = paddle._C_ops.add(batch_norm__552, batch_norm__558) - del batch_norm__552, batch_norm__558 - - # pd_op.swish: (-1x144x-1x-1xf32) <- (-1x144x-1x-1xf32) - swish_73 = paddle._C_ops.swish(add_35) - del add_35 - - # builtin.combine: ([-1x144x-1x-1xf32, -1x144x-1x-1xf32]) <- (-1x144x-1x-1xf32, -1x144x-1x-1xf32) - combine_11 = [swish_68, swish_73] - del swish_68, swish_73 - - # pd_op.concat: (-1x288x-1x-1xf32) <- ([-1x144x-1x-1xf32, -1x144x-1x-1xf32], 1xi32) - concat_13 = paddle._C_ops.concat(combine_11, full_0) - del combine_11 - - # pd_op.conv2d: (-1x288x-1x-1xf32) <- (-1x288x-1x-1xf32, 288x288x1x1xf32) - conv2d_98 = paddle._C_ops.conv2d( - concat_13, parameter_109, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del concat_13, parameter_109 - - # pd_op.batch_norm_: (-1x288x-1x-1xf32, 288xf32, 288xf32, 288xf32, 288xf32, -1xui8) <- (-1x288x-1x-1xf32, 288xf32, 288xf32, 288xf32, 288xf32) - ( - batch_norm__564, - batch_norm__565, - batch_norm__566, - batch_norm__567, - batch_norm__568, - batch_norm__569, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_98, - parameter_108, - parameter_107, - parameter_106, - parameter_105, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_98, parameter_105, parameter_106, parameter_107, parameter_108 - - # pd_op.swish: (-1x288x-1x-1xf32) <- (-1x288x-1x-1xf32) - swish_74 = paddle._C_ops.swish(batch_norm__564) - del batch_norm__564 - - # pd_op.conv2d: (-1x288x-1x-1xf32) <- (-1x288x-1x-1xf32, 288x288x3x3xf32) - conv2d_99 = paddle._C_ops.conv2d( - swish_74, parameter_104, [2, 2], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_104 - - # pd_op.batch_norm_: (-1x288x-1x-1xf32, 288xf32, 288xf32, 288xf32, 288xf32, -1xui8) <- (-1x288x-1x-1xf32, 288xf32, 288xf32, 288xf32, 288xf32) - ( - batch_norm__570, - batch_norm__571, - batch_norm__572, - batch_norm__573, - batch_norm__574, - batch_norm__575, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_99, - parameter_103, - parameter_102, - parameter_101, - parameter_100, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_99, parameter_100, parameter_101, parameter_102, parameter_103 - - # pd_op.swish: (-1x288x-1x-1xf32) <- (-1x288x-1x-1xf32) - swish_75 = paddle._C_ops.swish(batch_norm__570) - del batch_norm__570 - - # builtin.combine: ([-1x288x-1x-1xf32, -1x576x-1x-1xf32]) <- (-1x288x-1x-1xf32, -1x576x-1x-1xf32) - combine_12 = [swish_75, swish_50] - del swish_50, swish_75 - - # pd_op.concat: (-1x864x-1x-1xf32) <- ([-1x288x-1x-1xf32, -1x576x-1x-1xf32], 1xi32) - concat_14 = paddle._C_ops.concat(combine_12, full_0) - del combine_12 - - # pd_op.conv2d: (-1x288x-1x-1xf32) <- (-1x864x-1x-1xf32, 288x864x1x1xf32) - conv2d_100 = paddle._C_ops.conv2d( - concat_14, parameter_99, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_99 - - # pd_op.batch_norm_: (-1x288x-1x-1xf32, 288xf32, 288xf32, 288xf32, 288xf32, -1xui8) <- (-1x288x-1x-1xf32, 288xf32, 288xf32, 288xf32, 288xf32) - ( - batch_norm__576, - batch_norm__577, - batch_norm__578, - batch_norm__579, - batch_norm__580, - batch_norm__581, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_100, - parameter_98, - parameter_97, - parameter_96, - parameter_95, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_100, parameter_95, parameter_96, parameter_97, parameter_98 - - # pd_op.swish: (-1x288x-1x-1xf32) <- (-1x288x-1x-1xf32) - swish_76 = paddle._C_ops.swish(batch_norm__576) - del batch_norm__576 - - # pd_op.conv2d: (-1x288x-1x-1xf32) <- (-1x864x-1x-1xf32, 288x864x1x1xf32) - conv2d_101 = paddle._C_ops.conv2d( - concat_14, parameter_94, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del concat_14, parameter_94 - - # pd_op.batch_norm_: (-1x288x-1x-1xf32, 288xf32, 288xf32, 288xf32, 288xf32, -1xui8) <- (-1x288x-1x-1xf32, 288xf32, 288xf32, 288xf32, 288xf32) - ( - batch_norm__582, - batch_norm__583, - batch_norm__584, - batch_norm__585, - batch_norm__586, - batch_norm__587, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_101, - parameter_93, - parameter_92, - parameter_91, - parameter_90, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_101, parameter_90, parameter_91, parameter_92, parameter_93 - - # pd_op.swish: (-1x288x-1x-1xf32) <- (-1x288x-1x-1xf32) - swish_77 = paddle._C_ops.swish(batch_norm__582) - del batch_norm__582 - - # pd_op.conv2d: (-1x288x-1x-1xf32) <- (-1x288x-1x-1xf32, 288x288x3x3xf32) - conv2d_102 = paddle._C_ops.conv2d( - swish_77, parameter_89, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_89, swish_77 - - # pd_op.batch_norm_: (-1x288x-1x-1xf32, 288xf32, 288xf32, 288xf32, 288xf32, -1xui8) <- (-1x288x-1x-1xf32, 288xf32, 288xf32, 288xf32, 288xf32) - ( - batch_norm__588, - batch_norm__589, - batch_norm__590, - batch_norm__591, - batch_norm__592, - batch_norm__593, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_102, - parameter_88, - parameter_87, - parameter_86, - parameter_85, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_102, parameter_85, parameter_86, parameter_87, parameter_88 - - # pd_op.swish: (-1x288x-1x-1xf32) <- (-1x288x-1x-1xf32) - swish_78 = paddle._C_ops.swish(batch_norm__588) - del batch_norm__588 - - # pd_op.conv2d: (-1x288x-1x-1xf32) <- (-1x288x-1x-1xf32, 288x288x3x3xf32) - conv2d_103 = paddle._C_ops.conv2d( - swish_78, parameter_84, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_84 - - # pd_op.batch_norm_: (-1x288x-1x-1xf32, 288xf32, 288xf32, 288xf32, 288xf32, -1xui8) <- (-1x288x-1x-1xf32, 288xf32, 288xf32, 288xf32, 288xf32) - ( - batch_norm__594, - batch_norm__595, - batch_norm__596, - batch_norm__597, - batch_norm__598, - batch_norm__599, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_103, - parameter_83, - parameter_82, - parameter_81, - parameter_80, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_103, parameter_80, parameter_81, parameter_82, parameter_83 - - # pd_op.conv2d: (-1x288x-1x-1xf32) <- (-1x288x-1x-1xf32, 288x288x1x1xf32) - conv2d_104 = paddle._C_ops.conv2d( - swish_78, parameter_79, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_79, swish_78 - - # pd_op.batch_norm_: (-1x288x-1x-1xf32, 288xf32, 288xf32, 288xf32, 288xf32, -1xui8) <- (-1x288x-1x-1xf32, 288xf32, 288xf32, 288xf32, 288xf32) - ( - batch_norm__600, - batch_norm__601, - batch_norm__602, - batch_norm__603, - batch_norm__604, - batch_norm__605, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_104, - parameter_78, - parameter_77, - parameter_76, - parameter_75, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_104, parameter_75, parameter_76, parameter_77, parameter_78 - - # pd_op.add: (-1x288x-1x-1xf32) <- (-1x288x-1x-1xf32, -1x288x-1x-1xf32) - add_36 = paddle._C_ops.add(batch_norm__594, batch_norm__600) - del batch_norm__594, batch_norm__600 - - # pd_op.swish: (-1x288x-1x-1xf32) <- (-1x288x-1x-1xf32) - swish_79 = paddle._C_ops.swish(add_36) - del add_36 - - # pd_op.conv2d: (-1x288x-1x-1xf32) <- (-1x288x-1x-1xf32, 288x288x3x3xf32) - conv2d_105 = paddle._C_ops.conv2d( - swish_79, parameter_74, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_74, swish_79 - - # pd_op.batch_norm_: (-1x288x-1x-1xf32, 288xf32, 288xf32, 288xf32, 288xf32, -1xui8) <- (-1x288x-1x-1xf32, 288xf32, 288xf32, 288xf32, 288xf32) - ( - batch_norm__606, - batch_norm__607, - batch_norm__608, - batch_norm__609, - batch_norm__610, - batch_norm__611, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_105, - parameter_73, - parameter_72, - parameter_71, - parameter_70, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_105, parameter_70, parameter_71, parameter_72, parameter_73 - - # pd_op.swish: (-1x288x-1x-1xf32) <- (-1x288x-1x-1xf32) - swish_80 = paddle._C_ops.swish(batch_norm__606) - del batch_norm__606 - - # pd_op.conv2d: (-1x288x-1x-1xf32) <- (-1x288x-1x-1xf32, 288x288x3x3xf32) - conv2d_106 = paddle._C_ops.conv2d( - swish_80, parameter_69, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_69 - - # pd_op.batch_norm_: (-1x288x-1x-1xf32, 288xf32, 288xf32, 288xf32, 288xf32, -1xui8) <- (-1x288x-1x-1xf32, 288xf32, 288xf32, 288xf32, 288xf32) - ( - batch_norm__612, - batch_norm__613, - batch_norm__614, - batch_norm__615, - batch_norm__616, - batch_norm__617, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_106, - parameter_68, - parameter_67, - parameter_66, - parameter_65, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_106, parameter_65, parameter_66, parameter_67, parameter_68 - - # pd_op.conv2d: (-1x288x-1x-1xf32) <- (-1x288x-1x-1xf32, 288x288x1x1xf32) - conv2d_107 = paddle._C_ops.conv2d( - swish_80, parameter_64, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_64, swish_80 - - # pd_op.batch_norm_: (-1x288x-1x-1xf32, 288xf32, 288xf32, 288xf32, 288xf32, -1xui8) <- (-1x288x-1x-1xf32, 288xf32, 288xf32, 288xf32, 288xf32) - ( - batch_norm__618, - batch_norm__619, - batch_norm__620, - batch_norm__621, - batch_norm__622, - batch_norm__623, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_107, - parameter_63, - parameter_62, - parameter_61, - parameter_60, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_107, parameter_60, parameter_61, parameter_62, parameter_63 - - # pd_op.add: (-1x288x-1x-1xf32) <- (-1x288x-1x-1xf32, -1x288x-1x-1xf32) - add_37 = paddle._C_ops.add(batch_norm__612, batch_norm__618) - del batch_norm__612, batch_norm__618 - - # pd_op.swish: (-1x288x-1x-1xf32) <- (-1x288x-1x-1xf32) - swish_81 = paddle._C_ops.swish(add_37) - del add_37 - - # builtin.combine: ([-1x288x-1x-1xf32, -1x288x-1x-1xf32]) <- (-1x288x-1x-1xf32, -1x288x-1x-1xf32) - combine_13 = [swish_76, swish_81] - del swish_76, swish_81 - - # pd_op.concat: (-1x576x-1x-1xf32) <- ([-1x288x-1x-1xf32, -1x288x-1x-1xf32], 1xi32) - concat_15 = paddle._C_ops.concat(combine_13, full_0) - del combine_13 - - # pd_op.conv2d: (-1x576x-1x-1xf32) <- (-1x576x-1x-1xf32, 576x576x1x1xf32) - conv2d_108 = paddle._C_ops.conv2d( - concat_15, parameter_59, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del concat_15, parameter_59 - - # pd_op.batch_norm_: (-1x576x-1x-1xf32, 576xf32, 576xf32, 576xf32, 576xf32, -1xui8) <- (-1x576x-1x-1xf32, 576xf32, 576xf32, 576xf32, 576xf32) - ( - batch_norm__624, - batch_norm__625, - batch_norm__626, - batch_norm__627, - batch_norm__628, - batch_norm__629, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_108, - parameter_58, - parameter_57, - parameter_56, - parameter_55, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_108, parameter_55, parameter_56, parameter_57, parameter_58 - - # pd_op.swish: (-1x576x-1x-1xf32) <- (-1x576x-1x-1xf32) - swish_82 = paddle._C_ops.swish(batch_norm__624) - del batch_norm__624 - - # pd_op.shape64: (4xi64) <- (-1x576x-1x-1xf32) - shape64_0 = paddle._C_ops.shape64(swish_82) - - # pd_op.full_int_array: (1xi64) <- () - full_int_array_5 = [0] - - # pd_op.full_int_array: (1xi64) <- () - full_int_array_6 = [1] - - # pd_op.slice: (xi64) <- (4xi64, 1xi64, 1xi64) - slice_0 = paddle._C_ops.slice( - shape64_0, [0], full_int_array_5, full_int_array_6, [1], [0] - ) - del shape64_0 - - # pd_op.shape64: (4xi64) <- (-1x576x-1x-1xf32) - shape64_1 = paddle._C_ops.shape64(swish_82) - - # pd_op.full_int_array: (1xi64) <- () - full_int_array_7 = [2] - - # pd_op.full_int_array: (1xi64) <- () - full_int_array_8 = [3] - - # pd_op.slice: (xi64) <- (4xi64, 1xi64, 1xi64) - slice_1 = paddle._C_ops.slice( - shape64_1, [0], full_int_array_7, full_int_array_8, [1], [0] - ) - del shape64_1 - - # pd_op.shape64: (4xi64) <- (-1x576x-1x-1xf32) - shape64_2 = paddle._C_ops.shape64(swish_82) - - # pd_op.full_int_array: (1xi64) <- () - full_int_array_9 = [4] - - # pd_op.slice: (xi64) <- (4xi64, 1xi64, 1xi64) - slice_2 = paddle._C_ops.slice( - shape64_2, [0], full_int_array_8, full_int_array_9, [1], [0] - ) - del shape64_2 - - # pd_op.multiply: (xi64) <- (xi64, xi64) - multiply_16 = paddle._C_ops.multiply(slice_1, slice_2) - del slice_1, slice_2 - - # pd_op.full_int_array: (2xi64) <- () - full_int_array_10 = [1, 1] - - # pd_op.pool2d: (-1x576x1x1xf32) <- (-1x576x-1x-1xf32, 2xi64) - pool2d_3 = paddle._C_ops.pool2d( - swish_82, - full_int_array_10, - [1, 1], - [0, 0], - False, - True, - "NCHW", - "avg", - False, - True, - "EXPLICIT", - ) - - # pd_op.conv2d: (-1x576x1x1xf32) <- (-1x576x1x1xf32, 576x576x1x1xf32) - conv2d_109 = paddle._C_ops.conv2d( - pool2d_3, parameter_54, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_54 - - # pd_op.reshape: (1x576x1x1xf32) <- (576xf32, 4xi64) - reshape_4 = paddle._C_ops.reshape(parameter_53, full_int_array_1) - del parameter_53 - - # pd_op.add: (-1x576x1x1xf32) <- (-1x576x1x1xf32, 1x576x1x1xf32) - add_38 = paddle._C_ops.add(conv2d_109, reshape_4) - del conv2d_109, reshape_4 - - # pd_op.sigmoid: (-1x576x1x1xf32) <- (-1x576x1x1xf32) - sigmoid_0 = paddle._C_ops.sigmoid(add_38) - del add_38 - - # pd_op.multiply: (-1x576x-1x-1xf32) <- (-1x576x-1x-1xf32, -1x576x1x1xf32) - multiply_17 = paddle._C_ops.multiply(swish_82, sigmoid_0) - del sigmoid_0 - - # pd_op.conv2d: (-1x576x-1x-1xf32) <- (-1x576x-1x-1xf32, 576x576x1x1xf32) - conv2d_110 = paddle._C_ops.conv2d( - multiply_17, parameter_52, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del multiply_17, parameter_52 - - # pd_op.batch_norm_: (-1x576x-1x-1xf32, 576xf32, 576xf32, 576xf32, 576xf32, -1xui8) <- (-1x576x-1x-1xf32, 576xf32, 576xf32, 576xf32, 576xf32) - ( - batch_norm__630, - batch_norm__631, - batch_norm__632, - batch_norm__633, - batch_norm__634, - batch_norm__635, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_110, - parameter_51, - parameter_50, - parameter_49, - parameter_48, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_110, parameter_48, parameter_49, parameter_50, parameter_51 - - # pd_op.swish: (-1x576x-1x-1xf32) <- (-1x576x-1x-1xf32) - swish_83 = paddle._C_ops.swish(batch_norm__630) - del batch_norm__630 - - # pd_op.add: (-1x576x-1x-1xf32) <- (-1x576x-1x-1xf32, -1x576x-1x-1xf32) - add_39 = paddle._C_ops.add(swish_83, swish_82) - del swish_83 - - # pd_op.conv2d: (-1x4x-1x-1xf32) <- (-1x576x-1x-1xf32, 4x576x3x3xf32) - conv2d_111 = paddle._C_ops.conv2d( - add_39, parameter_47, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del add_39, parameter_47 - - # pd_op.reshape: (1x4x1x1xf32) <- (4xf32, 4xi64) - reshape_5 = paddle._C_ops.reshape(parameter_46, full_int_array_1) - del parameter_46 - - # pd_op.add: (-1x4x-1x-1xf32) <- (-1x4x-1x-1xf32, 1x4x1x1xf32) - add_40 = paddle._C_ops.add(conv2d_111, reshape_5) - del conv2d_111, reshape_5 - - # pd_op.conv2d: (-1x576x1x1xf32) <- (-1x576x1x1xf32, 576x576x1x1xf32) - conv2d_112 = paddle._C_ops.conv2d( - pool2d_3, parameter_45, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_45, pool2d_3 - - # pd_op.reshape: (1x576x1x1xf32) <- (576xf32, 4xi64) - reshape_6 = paddle._C_ops.reshape(parameter_44, full_int_array_1) - del parameter_44 - - # pd_op.add: (-1x576x1x1xf32) <- (-1x576x1x1xf32, 1x576x1x1xf32) - add_41 = paddle._C_ops.add(conv2d_112, reshape_6) - del conv2d_112, reshape_6 - - # pd_op.sigmoid: (-1x576x1x1xf32) <- (-1x576x1x1xf32) - sigmoid_1 = paddle._C_ops.sigmoid(add_41) - del add_41 - - # pd_op.multiply: (-1x576x-1x-1xf32) <- (-1x576x-1x-1xf32, -1x576x1x1xf32) - multiply_18 = paddle._C_ops.multiply(swish_82, sigmoid_1) - del sigmoid_1, swish_82 - - # pd_op.conv2d: (-1x576x-1x-1xf32) <- (-1x576x-1x-1xf32, 576x576x1x1xf32) - conv2d_113 = paddle._C_ops.conv2d( - multiply_18, parameter_43, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del multiply_18, parameter_43 - - # pd_op.batch_norm_: (-1x576x-1x-1xf32, 576xf32, 576xf32, 576xf32, 576xf32, -1xui8) <- (-1x576x-1x-1xf32, 576xf32, 576xf32, 576xf32, 576xf32) - ( - batch_norm__636, - batch_norm__637, - batch_norm__638, - batch_norm__639, - batch_norm__640, - batch_norm__641, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_113, - parameter_42, - parameter_41, - parameter_40, - parameter_39, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_113, parameter_39, parameter_40, parameter_41, parameter_42 - - # pd_op.swish: (-1x576x-1x-1xf32) <- (-1x576x-1x-1xf32) - swish_84 = paddle._C_ops.swish(batch_norm__636) - del batch_norm__636 - - # pd_op.conv2d: (-1x68x-1x-1xf32) <- (-1x576x-1x-1xf32, 68x576x3x3xf32) - conv2d_114 = paddle._C_ops.conv2d( - swish_84, parameter_38, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_38, swish_84 - - # pd_op.reshape: (1x68x1x1xf32) <- (68xf32, 4xi64) - reshape_7 = paddle._C_ops.reshape(parameter_37, full_int_array_1) - del parameter_37 - - # pd_op.add: (-1x68x-1x-1xf32) <- (-1x68x-1x-1xf32, 1x68x1x1xf32) - add_42 = paddle._C_ops.add(conv2d_114, reshape_7) - del conv2d_114, reshape_7 - - # pd_op.full: (xi64) <- () - full_1 = paddle._C_ops.full( - [], float("-1"), paddle.int64, paddle.core.CPUPlace() - ) - - # pd_op.full: (xi64) <- () - full_2 = paddle._C_ops.full( - [], float("4"), paddle.int64, paddle.core.CPUPlace() - ) - - # pd_op.full: (xi64) <- () - full_3 = paddle._C_ops.full( - [], float("17"), paddle.int64, paddle.core.CPUPlace() - ) - - # builtin.combine: ([xi64, xi64, xi64, xi64]) <- (xi64, xi64, xi64, xi64) - combine_14 = [full_1, full_2, full_3, multiply_16] - - # pd_op.stack: (4xi64) <- ([xi64, xi64, xi64, xi64]) - stack_0 = paddle._C_ops.stack(combine_14, 0) - del combine_14 - - # pd_op.reshape: (-1x4x17x-1xf32) <- (-1x68x-1x-1xf32, 4xi64) - reshape_8 = paddle._C_ops.reshape(add_42, stack_0) - del add_42, stack_0 - - # pd_op.transpose: (-1x17x-1x4xf32) <- (-1x4x17x-1xf32) - transpose_0 = paddle._C_ops.transpose(reshape_8, [0, 2, 3, 1]) - del reshape_8 - - # pd_op.softmax: (-1x17x-1x4xf32) <- (-1x17x-1x4xf32) - softmax_0 = paddle._C_ops.softmax(transpose_0, 1) - del transpose_0 - - # pd_op.conv2d: (-1x1x-1x4xf32) <- (-1x17x-1x4xf32, 1x17x1x1xf32) - conv2d_115 = paddle._C_ops.conv2d( - softmax_0, parameter_36, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del softmax_0 - - # pd_op.squeeze: (-1x-1x4xf32) <- (-1x1x-1x4xf32, 1xi64) - squeeze_0 = paddle._C_ops.squeeze(conv2d_115, full_int_array_6) - del conv2d_115 - - # pd_op.sigmoid: (-1x4x-1x-1xf32) <- (-1x4x-1x-1xf32) - sigmoid_2 = paddle._C_ops.sigmoid(add_40) - del add_40 - - # builtin.combine: ([xi64, xi64, xi64]) <- (xi64, xi64, xi64) - combine_15 = [full_1, full_2, multiply_16] - del multiply_16 - - # pd_op.stack: (3xi64) <- ([xi64, xi64, xi64]) - stack_1 = paddle._C_ops.stack(combine_15, 0) - del combine_15 - - # pd_op.reshape: (-1x4x-1xf32) <- (-1x4x-1x-1xf32, 3xi64) - reshape_9 = paddle._C_ops.reshape(sigmoid_2, stack_1) - del sigmoid_2, stack_1 - - # pd_op.shape64: (4xi64) <- (-1x288x-1x-1xf32) - shape64_3 = paddle._C_ops.shape64(swish_74) - - # pd_op.slice: (xi64) <- (4xi64, 1xi64, 1xi64) - slice_3 = paddle._C_ops.slice( - shape64_3, [0], full_int_array_5, full_int_array_6, [1], [0] - ) - del shape64_3 - - # pd_op.shape64: (4xi64) <- (-1x288x-1x-1xf32) - shape64_4 = paddle._C_ops.shape64(swish_74) - - # pd_op.slice: (xi64) <- (4xi64, 1xi64, 1xi64) - slice_4 = paddle._C_ops.slice( - shape64_4, [0], full_int_array_7, full_int_array_8, [1], [0] - ) - del shape64_4 - - # pd_op.shape64: (4xi64) <- (-1x288x-1x-1xf32) - shape64_5 = paddle._C_ops.shape64(swish_74) - - # pd_op.slice: (xi64) <- (4xi64, 1xi64, 1xi64) - slice_5 = paddle._C_ops.slice( - shape64_5, [0], full_int_array_8, full_int_array_9, [1], [0] - ) - del shape64_5 - - # pd_op.multiply: (xi64) <- (xi64, xi64) - multiply_19 = paddle._C_ops.multiply(slice_4, slice_5) - del slice_4, slice_5 - - # pd_op.pool2d: (-1x288x1x1xf32) <- (-1x288x-1x-1xf32, 2xi64) - pool2d_4 = paddle._C_ops.pool2d( - swish_74, - full_int_array_10, - [1, 1], - [0, 0], - False, - True, - "NCHW", - "avg", - False, - True, - "EXPLICIT", - ) - - # pd_op.conv2d: (-1x288x1x1xf32) <- (-1x288x1x1xf32, 288x288x1x1xf32) - conv2d_116 = paddle._C_ops.conv2d( - pool2d_4, parameter_35, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_35 - - # pd_op.reshape: (1x288x1x1xf32) <- (288xf32, 4xi64) - reshape_10 = paddle._C_ops.reshape(parameter_34, full_int_array_1) - del parameter_34 - - # pd_op.add: (-1x288x1x1xf32) <- (-1x288x1x1xf32, 1x288x1x1xf32) - add_43 = paddle._C_ops.add(conv2d_116, reshape_10) - del conv2d_116, reshape_10 - - # pd_op.sigmoid: (-1x288x1x1xf32) <- (-1x288x1x1xf32) - sigmoid_3 = paddle._C_ops.sigmoid(add_43) - del add_43 - - # pd_op.multiply: (-1x288x-1x-1xf32) <- (-1x288x-1x-1xf32, -1x288x1x1xf32) - multiply_20 = paddle._C_ops.multiply(swish_74, sigmoid_3) - del sigmoid_3 - - # pd_op.conv2d: (-1x288x-1x-1xf32) <- (-1x288x-1x-1xf32, 288x288x1x1xf32) - conv2d_117 = paddle._C_ops.conv2d( - multiply_20, parameter_33, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del multiply_20, parameter_33 - - # pd_op.batch_norm_: (-1x288x-1x-1xf32, 288xf32, 288xf32, 288xf32, 288xf32, -1xui8) <- (-1x288x-1x-1xf32, 288xf32, 288xf32, 288xf32, 288xf32) - ( - batch_norm__642, - batch_norm__643, - batch_norm__644, - batch_norm__645, - batch_norm__646, - batch_norm__647, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_117, - parameter_32, - parameter_31, - parameter_30, - parameter_29, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_117, parameter_29, parameter_30, parameter_31, parameter_32 - - # pd_op.swish: (-1x288x-1x-1xf32) <- (-1x288x-1x-1xf32) - swish_85 = paddle._C_ops.swish(batch_norm__642) - del batch_norm__642 - - # pd_op.add: (-1x288x-1x-1xf32) <- (-1x288x-1x-1xf32, -1x288x-1x-1xf32) - add_44 = paddle._C_ops.add(swish_85, swish_74) - del swish_85 - - # pd_op.conv2d: (-1x4x-1x-1xf32) <- (-1x288x-1x-1xf32, 4x288x3x3xf32) - conv2d_118 = paddle._C_ops.conv2d( - add_44, parameter_28, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del add_44, parameter_28 - - # pd_op.reshape: (1x4x1x1xf32) <- (4xf32, 4xi64) - reshape_11 = paddle._C_ops.reshape(parameter_27, full_int_array_1) - del parameter_27 - - # pd_op.add: (-1x4x-1x-1xf32) <- (-1x4x-1x-1xf32, 1x4x1x1xf32) - add_45 = paddle._C_ops.add(conv2d_118, reshape_11) - del conv2d_118, reshape_11 - - # pd_op.conv2d: (-1x288x1x1xf32) <- (-1x288x1x1xf32, 288x288x1x1xf32) - conv2d_119 = paddle._C_ops.conv2d( - pool2d_4, parameter_26, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_26, pool2d_4 - - # pd_op.reshape: (1x288x1x1xf32) <- (288xf32, 4xi64) - reshape_12 = paddle._C_ops.reshape(parameter_25, full_int_array_1) - del parameter_25 - - # pd_op.add: (-1x288x1x1xf32) <- (-1x288x1x1xf32, 1x288x1x1xf32) - add_46 = paddle._C_ops.add(conv2d_119, reshape_12) - del conv2d_119, reshape_12 - - # pd_op.sigmoid: (-1x288x1x1xf32) <- (-1x288x1x1xf32) - sigmoid_4 = paddle._C_ops.sigmoid(add_46) - del add_46 - - # pd_op.multiply: (-1x288x-1x-1xf32) <- (-1x288x-1x-1xf32, -1x288x1x1xf32) - multiply_21 = paddle._C_ops.multiply(swish_74, sigmoid_4) - del sigmoid_4, swish_74 - - # pd_op.conv2d: (-1x288x-1x-1xf32) <- (-1x288x-1x-1xf32, 288x288x1x1xf32) - conv2d_120 = paddle._C_ops.conv2d( - multiply_21, parameter_24, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del multiply_21, parameter_24 - - # pd_op.batch_norm_: (-1x288x-1x-1xf32, 288xf32, 288xf32, 288xf32, 288xf32, -1xui8) <- (-1x288x-1x-1xf32, 288xf32, 288xf32, 288xf32, 288xf32) - ( - batch_norm__648, - batch_norm__649, - batch_norm__650, - batch_norm__651, - batch_norm__652, - batch_norm__653, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_120, - parameter_23, - parameter_22, - parameter_21, - parameter_20, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_120, parameter_20, parameter_21, parameter_22, parameter_23 - - # pd_op.swish: (-1x288x-1x-1xf32) <- (-1x288x-1x-1xf32) - swish_86 = paddle._C_ops.swish(batch_norm__648) - del batch_norm__648 - - # pd_op.conv2d: (-1x68x-1x-1xf32) <- (-1x288x-1x-1xf32, 68x288x3x3xf32) - conv2d_121 = paddle._C_ops.conv2d( - swish_86, parameter_19, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_19, swish_86 - - # pd_op.reshape: (1x68x1x1xf32) <- (68xf32, 4xi64) - reshape_13 = paddle._C_ops.reshape(parameter_18, full_int_array_1) - del parameter_18 - - # pd_op.add: (-1x68x-1x-1xf32) <- (-1x68x-1x-1xf32, 1x68x1x1xf32) - add_47 = paddle._C_ops.add(conv2d_121, reshape_13) - del conv2d_121, reshape_13 - - # builtin.combine: ([xi64, xi64, xi64, xi64]) <- (xi64, xi64, xi64, xi64) - combine_16 = [full_1, full_2, full_3, multiply_19] - - # pd_op.stack: (4xi64) <- ([xi64, xi64, xi64, xi64]) - stack_2 = paddle._C_ops.stack(combine_16, 0) - del combine_16 - - # pd_op.reshape: (-1x4x17x-1xf32) <- (-1x68x-1x-1xf32, 4xi64) - reshape_14 = paddle._C_ops.reshape(add_47, stack_2) - del add_47, stack_2 - - # pd_op.transpose: (-1x17x-1x4xf32) <- (-1x4x17x-1xf32) - transpose_1 = paddle._C_ops.transpose(reshape_14, [0, 2, 3, 1]) - del reshape_14 - - # pd_op.softmax: (-1x17x-1x4xf32) <- (-1x17x-1x4xf32) - softmax_1 = paddle._C_ops.softmax(transpose_1, 1) - del transpose_1 - - # pd_op.conv2d: (-1x1x-1x4xf32) <- (-1x17x-1x4xf32, 1x17x1x1xf32) - conv2d_122 = paddle._C_ops.conv2d( - softmax_1, parameter_36, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del softmax_1 - - # pd_op.squeeze: (-1x-1x4xf32) <- (-1x1x-1x4xf32, 1xi64) - squeeze_1 = paddle._C_ops.squeeze(conv2d_122, full_int_array_6) - del conv2d_122 - - # pd_op.sigmoid: (-1x4x-1x-1xf32) <- (-1x4x-1x-1xf32) - sigmoid_5 = paddle._C_ops.sigmoid(add_45) - del add_45 - - # builtin.combine: ([xi64, xi64, xi64]) <- (xi64, xi64, xi64) - combine_17 = [full_1, full_2, multiply_19] - del multiply_19 - - # pd_op.stack: (3xi64) <- ([xi64, xi64, xi64]) - stack_3 = paddle._C_ops.stack(combine_17, 0) - del combine_17 - - # pd_op.reshape: (-1x4x-1xf32) <- (-1x4x-1x-1xf32, 3xi64) - reshape_15 = paddle._C_ops.reshape(sigmoid_5, stack_3) - del sigmoid_5, stack_3 - - # pd_op.shape64: (4xi64) <- (-1x144x-1x-1xf32) - shape64_6 = paddle._C_ops.shape64(swish_66) - - # pd_op.slice: (xi64) <- (4xi64, 1xi64, 1xi64) - slice_6 = paddle._C_ops.slice( - shape64_6, [0], full_int_array_5, full_int_array_6, [1], [0] - ) - del full_int_array_5, shape64_6 - - # pd_op.shape64: (4xi64) <- (-1x144x-1x-1xf32) - shape64_7 = paddle._C_ops.shape64(swish_66) - - # pd_op.slice: (xi64) <- (4xi64, 1xi64, 1xi64) - slice_7 = paddle._C_ops.slice( - shape64_7, [0], full_int_array_7, full_int_array_8, [1], [0] - ) - del full_int_array_7, shape64_7 - - # pd_op.shape64: (4xi64) <- (-1x144x-1x-1xf32) - shape64_8 = paddle._C_ops.shape64(swish_66) - - # pd_op.slice: (xi64) <- (4xi64, 1xi64, 1xi64) - slice_8 = paddle._C_ops.slice( - shape64_8, [0], full_int_array_8, full_int_array_9, [1], [0] - ) - del full_int_array_8, full_int_array_9, shape64_8 - - # pd_op.multiply: (xi64) <- (xi64, xi64) - multiply_22 = paddle._C_ops.multiply(slice_7, slice_8) - del slice_7, slice_8 - - # pd_op.pool2d: (-1x144x1x1xf32) <- (-1x144x-1x-1xf32, 2xi64) - pool2d_5 = paddle._C_ops.pool2d( - swish_66, - full_int_array_10, - [1, 1], - [0, 0], - False, - True, - "NCHW", - "avg", - False, - True, - "EXPLICIT", - ) - del full_int_array_10 - - # pd_op.conv2d: (-1x144x1x1xf32) <- (-1x144x1x1xf32, 144x144x1x1xf32) - conv2d_123 = paddle._C_ops.conv2d( - pool2d_5, parameter_17, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_17 - - # pd_op.reshape: (1x144x1x1xf32) <- (144xf32, 4xi64) - reshape_16 = paddle._C_ops.reshape(parameter_16, full_int_array_1) - del parameter_16 - - # pd_op.add: (-1x144x1x1xf32) <- (-1x144x1x1xf32, 1x144x1x1xf32) - add_48 = paddle._C_ops.add(conv2d_123, reshape_16) - del conv2d_123, reshape_16 - - # pd_op.sigmoid: (-1x144x1x1xf32) <- (-1x144x1x1xf32) - sigmoid_6 = paddle._C_ops.sigmoid(add_48) - del add_48 - - # pd_op.multiply: (-1x144x-1x-1xf32) <- (-1x144x-1x-1xf32, -1x144x1x1xf32) - multiply_23 = paddle._C_ops.multiply(swish_66, sigmoid_6) - del sigmoid_6 - - # pd_op.conv2d: (-1x144x-1x-1xf32) <- (-1x144x-1x-1xf32, 144x144x1x1xf32) - conv2d_124 = paddle._C_ops.conv2d( - multiply_23, parameter_15, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del multiply_23, parameter_15 - - # pd_op.batch_norm_: (-1x144x-1x-1xf32, 144xf32, 144xf32, 144xf32, 144xf32, -1xui8) <- (-1x144x-1x-1xf32, 144xf32, 144xf32, 144xf32, 144xf32) - ( - batch_norm__654, - batch_norm__655, - batch_norm__656, - batch_norm__657, - batch_norm__658, - batch_norm__659, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_124, - parameter_14, - parameter_13, - parameter_12, - parameter_11, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_124, parameter_11, parameter_12, parameter_13, parameter_14 - - # pd_op.swish: (-1x144x-1x-1xf32) <- (-1x144x-1x-1xf32) - swish_87 = paddle._C_ops.swish(batch_norm__654) - del batch_norm__654 - - # pd_op.add: (-1x144x-1x-1xf32) <- (-1x144x-1x-1xf32, -1x144x-1x-1xf32) - add_49 = paddle._C_ops.add(swish_87, swish_66) - del swish_87 - - # pd_op.conv2d: (-1x4x-1x-1xf32) <- (-1x144x-1x-1xf32, 4x144x3x3xf32) - conv2d_125 = paddle._C_ops.conv2d( - add_49, parameter_10, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del add_49, parameter_10 - - # pd_op.reshape: (1x4x1x1xf32) <- (4xf32, 4xi64) - reshape_17 = paddle._C_ops.reshape(parameter_9, full_int_array_1) - del parameter_9 - - # pd_op.add: (-1x4x-1x-1xf32) <- (-1x4x-1x-1xf32, 1x4x1x1xf32) - add_50 = paddle._C_ops.add(conv2d_125, reshape_17) - del conv2d_125, reshape_17 - - # pd_op.conv2d: (-1x144x1x1xf32) <- (-1x144x1x1xf32, 144x144x1x1xf32) - conv2d_126 = paddle._C_ops.conv2d( - pool2d_5, parameter_8, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_8, pool2d_5 - - # pd_op.reshape: (1x144x1x1xf32) <- (144xf32, 4xi64) - reshape_18 = paddle._C_ops.reshape(parameter_7, full_int_array_1) - del parameter_7 - - # pd_op.add: (-1x144x1x1xf32) <- (-1x144x1x1xf32, 1x144x1x1xf32) - add_51 = paddle._C_ops.add(conv2d_126, reshape_18) - del conv2d_126, reshape_18 - - # pd_op.sigmoid: (-1x144x1x1xf32) <- (-1x144x1x1xf32) - sigmoid_7 = paddle._C_ops.sigmoid(add_51) - del add_51 - - # pd_op.multiply: (-1x144x-1x-1xf32) <- (-1x144x-1x-1xf32, -1x144x1x1xf32) - multiply_24 = paddle._C_ops.multiply(swish_66, sigmoid_7) - del sigmoid_7, swish_66 - - # pd_op.conv2d: (-1x144x-1x-1xf32) <- (-1x144x-1x-1xf32, 144x144x1x1xf32) - conv2d_127 = paddle._C_ops.conv2d( - multiply_24, parameter_6, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del multiply_24, parameter_6 - - # pd_op.batch_norm_: (-1x144x-1x-1xf32, 144xf32, 144xf32, 144xf32, 144xf32, -1xui8) <- (-1x144x-1x-1xf32, 144xf32, 144xf32, 144xf32, 144xf32) - ( - batch_norm__660, - batch_norm__661, - batch_norm__662, - batch_norm__663, - batch_norm__664, - batch_norm__665, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_127, - parameter_5, - parameter_4, - parameter_3, - parameter_2, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_127, parameter_2, parameter_3, parameter_4, parameter_5 - - # pd_op.swish: (-1x144x-1x-1xf32) <- (-1x144x-1x-1xf32) - swish_88 = paddle._C_ops.swish(batch_norm__660) - del batch_norm__660 - - # pd_op.conv2d: (-1x68x-1x-1xf32) <- (-1x144x-1x-1xf32, 68x144x3x3xf32) - conv2d_128 = paddle._C_ops.conv2d( - swish_88, parameter_1, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_1, swish_88 - - # pd_op.reshape: (1x68x1x1xf32) <- (68xf32, 4xi64) - reshape_19 = paddle._C_ops.reshape(parameter_0, full_int_array_1) - del full_int_array_1, parameter_0 - - # pd_op.add: (-1x68x-1x-1xf32) <- (-1x68x-1x-1xf32, 1x68x1x1xf32) - add_52 = paddle._C_ops.add(conv2d_128, reshape_19) - del conv2d_128, reshape_19 - - # builtin.combine: ([xi64, xi64, xi64, xi64]) <- (xi64, xi64, xi64, xi64) - combine_18 = [full_1, full_2, full_3, multiply_22] - del full_3 - - # pd_op.stack: (4xi64) <- ([xi64, xi64, xi64, xi64]) - stack_4 = paddle._C_ops.stack(combine_18, 0) - del combine_18 - - # pd_op.reshape: (-1x4x17x-1xf32) <- (-1x68x-1x-1xf32, 4xi64) - reshape_20 = paddle._C_ops.reshape(add_52, stack_4) - del add_52, stack_4 - - # pd_op.transpose: (-1x17x-1x4xf32) <- (-1x4x17x-1xf32) - transpose_2 = paddle._C_ops.transpose(reshape_20, [0, 2, 3, 1]) - del reshape_20 - - # pd_op.softmax: (-1x17x-1x4xf32) <- (-1x17x-1x4xf32) - softmax_2 = paddle._C_ops.softmax(transpose_2, 1) - del transpose_2 - - # pd_op.conv2d: (-1x1x-1x4xf32) <- (-1x17x-1x4xf32, 1x17x1x1xf32) - conv2d_129 = paddle._C_ops.conv2d( - softmax_2, parameter_36, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_36, softmax_2 - - # pd_op.squeeze: (-1x-1x4xf32) <- (-1x1x-1x4xf32, 1xi64) - squeeze_2 = paddle._C_ops.squeeze(conv2d_129, full_int_array_6) - del conv2d_129, full_int_array_6 - - # pd_op.sigmoid: (-1x4x-1x-1xf32) <- (-1x4x-1x-1xf32) - sigmoid_8 = paddle._C_ops.sigmoid(add_50) - del add_50 - - # builtin.combine: ([xi64, xi64, xi64]) <- (xi64, xi64, xi64) - combine_19 = [full_1, full_2, multiply_22] - del full_1, full_2, multiply_22 - - # pd_op.stack: (3xi64) <- ([xi64, xi64, xi64]) - stack_5 = paddle._C_ops.stack(combine_19, 0) - del combine_19 - - # pd_op.reshape: (-1x4x-1xf32) <- (-1x4x-1x-1xf32, 3xi64) - reshape_21 = paddle._C_ops.reshape(sigmoid_8, stack_5) - del sigmoid_8, stack_5 - - # pd_op.full: (1xi32) <- () - full_4 = paddle._C_ops.full( - [1], float("-1"), paddle.int32, paddle.core.CPUPlace() - ) - - # builtin.combine: ([-1x4x-1xf32, -1x4x-1xf32, -1x4x-1xf32]) <- (-1x4x-1xf32, -1x4x-1xf32, -1x4x-1xf32) - combine_20 = [reshape_9, reshape_15, reshape_21] - del reshape_15, reshape_21, reshape_9 - - # pd_op.concat: (-1x4x-1xf32) <- ([-1x4x-1xf32, -1x4x-1xf32, -1x4x-1xf32], 1xi32) - concat_0 = paddle._C_ops.concat(combine_20, full_4) - del combine_20, full_4 - - # builtin.combine: ([-1x-1x4xf32, -1x-1x4xf32, -1x-1x4xf32]) <- (-1x-1x4xf32, -1x-1x4xf32, -1x-1x4xf32) - combine_21 = [squeeze_0, squeeze_1, squeeze_2] - del squeeze_0, squeeze_1, squeeze_2 - - # pd_op.concat: (-1x-1x4xf32) <- ([-1x-1x4xf32, -1x-1x4xf32, -1x-1x4xf32], 1xi32) - concat_1 = paddle._C_ops.concat(combine_21, full_0) - del combine_21, full_0 - - return concat_0, concat_1 diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus-M/subgraph_15/weight_meta.py b/paddle_samples/PaddleX/PP-YOLOE_plus-M/subgraph_15/weight_meta.py deleted file mode 100644 index 49282087c..000000000 --- a/paddle_samples/PaddleX/PP-YOLOE_plus-M/subgraph_15/weight_meta.py +++ /dev/null @@ -1,6370 +0,0 @@ -class Program_weight_tensor_parameter_0: - name = "parameter_0" - shape = [68] - dtype = "float32" - min_val = float("-0.0141635") - max_val = float("0.0241976") - mean = float("6.52944e-08") - std = float("0.00671931") - data = None - - -class Program_weight_tensor_parameter_1: - name = "parameter_1" - shape = [68, 144, 3, 3] - dtype = "float32" - min_val = float("-0.15978") - max_val = float("0.188116") - mean = float("6.16128e-08") - std = float("0.00828307") - data = None - - -class Program_weight_tensor_parameter_2: - name = "parameter_2" - shape = [144] - dtype = "float32" - min_val = float("-0.104364") - max_val = float("0.335598") - mean = float("0.0805429") - std = float("0.0951253") - data = None - - -class Program_weight_tensor_parameter_3: - name = "parameter_3" - shape = [144] - dtype = "float32" - min_val = float("0.83568") - max_val = float("2.14576") - mean = float("1.40531") - std = float("0.259813") - data = None - - -class Program_weight_tensor_parameter_4: - name = "parameter_4" - shape = [144] - dtype = "float32" - min_val = float("0.000157802") - max_val = float("0.00220136") - mean = float("0.000572424") - std = float("0.000355381") - data = None - - -class Program_weight_tensor_parameter_5: - name = "parameter_5" - shape = [144] - dtype = "float32" - min_val = float("-0.0503245") - max_val = float("0.0384794") - mean = float("-0.00745528") - std = float("0.0176331") - data = None - - -class Program_weight_tensor_parameter_6: - name = "parameter_6" - shape = [144, 144, 1, 1] - dtype = "float32" - min_val = float("-0.0726671") - max_val = float("0.0967547") - mean = float("-0.000265342") - std = float("0.00739699") - data = None - - -class Program_weight_tensor_parameter_7: - name = "parameter_7" - shape = [144] - dtype = "float32" - min_val = float("-0.00637615") - max_val = float("0.0068") - mean = float("-0.000174127") - std = float("0.00321334") - data = None - - -class Program_weight_tensor_parameter_8: - name = "parameter_8" - shape = [144, 144, 1, 1] - dtype = "float32" - min_val = float("-0.0118268") - max_val = float("0.0153714") - mean = float("-0.000124547") - std = float("0.00221024") - data = None - - -class Program_weight_tensor_parameter_9: - name = "parameter_9" - shape = [4] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_10: - name = "parameter_10" - shape = [4, 144, 3, 3] - dtype = "float32" - min_val = float("-6.03644e-06") - max_val = float("0.000338216") - mean = float("1.1862e-05") - std = float("2.42884e-05") - data = None - - -class Program_weight_tensor_parameter_11: - name = "parameter_11" - shape = [144] - dtype = "float32" - min_val = float("-0.6431") - max_val = float("1.5172") - mean = float("0.437678") - std = float("0.39753") - data = None - - -class Program_weight_tensor_parameter_12: - name = "parameter_12" - shape = [144] - dtype = "float32" - min_val = float("0.91498") - max_val = float("2.11591") - mean = float("1.39192") - std = float("0.197821") - data = None - - -class Program_weight_tensor_parameter_13: - name = "parameter_13" - shape = [144] - dtype = "float32" - min_val = float("0.000201827") - max_val = float("0.0033487") - mean = float("0.000776001") - std = float("0.000505157") - data = None - - -class Program_weight_tensor_parameter_14: - name = "parameter_14" - shape = [144] - dtype = "float32" - min_val = float("-0.246557") - max_val = float("0.035997") - mean = float("-0.0278908") - std = float("0.0405564") - data = None - - -class Program_weight_tensor_parameter_15: - name = "parameter_15" - shape = [144, 144, 1, 1] - dtype = "float32" - min_val = float("-0.0650974") - max_val = float("0.0803623") - mean = float("-0.000600887") - std = float("0.00887669") - data = None - - -class Program_weight_tensor_parameter_16: - name = "parameter_16" - shape = [144] - dtype = "float32" - min_val = float("-0.0055726") - max_val = float("0.00565327") - mean = float("-0.000280367") - std = float("0.00215197") - data = None - - -class Program_weight_tensor_parameter_17: - name = "parameter_17" - shape = [144, 144, 1, 1] - dtype = "float32" - min_val = float("-0.0293837") - max_val = float("0.0538049") - mean = float("-5.73599e-05") - std = float("0.00246484") - data = None - - -class Program_weight_tensor_parameter_18: - name = "parameter_18" - shape = [68] - dtype = "float32" - min_val = float("-0.00497141") - max_val = float("0.0272935") - mean = float("6.13218e-08") - std = float("0.00562603") - data = None - - -class Program_weight_tensor_parameter_19: - name = "parameter_19" - shape = [68, 288, 3, 3] - dtype = "float32" - min_val = float("-0.11143") - max_val = float("0.129486") - mean = float("3.29019e-08") - std = float("0.00574142") - data = None - - -class Program_weight_tensor_parameter_20: - name = "parameter_20" - shape = [288] - dtype = "float32" - min_val = float("-0.017394") - max_val = float("0.147115") - mean = float("0.0536437") - std = float("0.0321882") - data = None - - -class Program_weight_tensor_parameter_21: - name = "parameter_21" - shape = [288] - dtype = "float32" - min_val = float("1.01671") - max_val = float("1.45294") - mean = float("1.22922") - std = float("0.0813549") - data = None - - -class Program_weight_tensor_parameter_22: - name = "parameter_22" - shape = [288] - dtype = "float32" - min_val = float("9.58753e-05") - max_val = float("0.0041103") - mean = float("0.000468789") - std = float("0.000456147") - data = None - - -class Program_weight_tensor_parameter_23: - name = "parameter_23" - shape = [288] - dtype = "float32" - min_val = float("-0.0518176") - max_val = float("0.0162465") - mean = float("-0.00788198") - std = float("0.00923439") - data = None - - -class Program_weight_tensor_parameter_24: - name = "parameter_24" - shape = [288, 288, 1, 1] - dtype = "float32" - min_val = float("-0.0595991") - max_val = float("0.0737716") - mean = float("-0.000137555") - std = float("0.00365117") - data = None - - -class Program_weight_tensor_parameter_25: - name = "parameter_25" - shape = [288] - dtype = "float32" - min_val = float("-0.00315414") - max_val = float("0.00622371") - mean = float("2.49002e-05") - std = float("0.00195236") - data = None - - -class Program_weight_tensor_parameter_26: - name = "parameter_26" - shape = [288, 288, 1, 1] - dtype = "float32" - min_val = float("-0.00389382") - max_val = float("0.008433") - mean = float("-1.88398e-05") - std = float("0.000934231") - data = None - - -class Program_weight_tensor_parameter_27: - name = "parameter_27" - shape = [4] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_28: - name = "parameter_28" - shape = [4, 288, 3, 3] - dtype = "float32" - min_val = float("-5.34524e-06") - max_val = float("0.000115912") - mean = float("5.31803e-06") - std = float("7.70699e-06") - data = None - - -class Program_weight_tensor_parameter_29: - name = "parameter_29" - shape = [288] - dtype = "float32" - min_val = float("-0.2707") - max_val = float("0.772625") - mean = float("0.312276") - std = float("0.172178") - data = None - - -class Program_weight_tensor_parameter_30: - name = "parameter_30" - shape = [288] - dtype = "float32" - min_val = float("0.993855") - max_val = float("1.7254") - mean = float("1.25878") - std = float("0.0948141") - data = None - - -class Program_weight_tensor_parameter_31: - name = "parameter_31" - shape = [288] - dtype = "float32" - min_val = float("0.000222055") - max_val = float("0.00541883") - mean = float("0.000812938") - std = float("0.000640229") - data = None - - -class Program_weight_tensor_parameter_32: - name = "parameter_32" - shape = [288] - dtype = "float32" - min_val = float("-0.1308") - max_val = float("0.0697218") - mean = float("-0.0266256") - std = float("0.0288337") - data = None - - -class Program_weight_tensor_parameter_33: - name = "parameter_33" - shape = [288, 288, 1, 1] - dtype = "float32" - min_val = float("-0.0481293") - max_val = float("0.0594145") - mean = float("-0.000431989") - std = float("0.0042605") - data = None - - -class Program_weight_tensor_parameter_34: - name = "parameter_34" - shape = [288] - dtype = "float32" - min_val = float("-0.00286415") - max_val = float("0.00722174") - mean = float("-7.35274e-05") - std = float("0.00117538") - data = None - - -class Program_weight_tensor_parameter_35: - name = "parameter_35" - shape = [288, 288, 1, 1] - dtype = "float32" - min_val = float("-0.0122917") - max_val = float("0.016227") - mean = float("-1.72956e-05") - std = float("0.000997845") - data = None - - -class Program_weight_tensor_parameter_36: - name = "parameter_36" - shape = [1, 17, 1, 1] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_37: - name = "parameter_37" - shape = [68] - dtype = "float32" - min_val = float("-0.00366283") - max_val = float("0.0136174") - mean = float("3.45317e-08") - std = float("0.00377369") - data = None - - -class Program_weight_tensor_parameter_38: - name = "parameter_38" - shape = [68, 576, 3, 3] - dtype = "float32" - min_val = float("-0.066466") - max_val = float("0.0676466") - mean = float("1.75132e-08") - std = float("0.00360721") - data = None - - -class Program_weight_tensor_parameter_39: - name = "parameter_39" - shape = [576] - dtype = "float32" - min_val = float("-0.0422788") - max_val = float("0.113782") - mean = float("0.0223079") - std = float("0.0258955") - data = None - - -class Program_weight_tensor_parameter_40: - name = "parameter_40" - shape = [576] - dtype = "float32" - min_val = float("1.05316") - max_val = float("1.39794") - mean = float("1.15071") - std = float("0.0429545") - data = None - - -class Program_weight_tensor_parameter_41: - name = "parameter_41" - shape = [576] - dtype = "float32" - min_val = float("4.91637e-05") - max_val = float("0.00266511") - mean = float("0.000246075") - std = float("0.000221109") - data = None - - -class Program_weight_tensor_parameter_42: - name = "parameter_42" - shape = [576] - dtype = "float32" - min_val = float("-0.0345042") - max_val = float("0.0202052") - mean = float("-0.00576832") - std = float("0.00545338") - data = None - - -class Program_weight_tensor_parameter_43: - name = "parameter_43" - shape = [576, 576, 1, 1] - dtype = "float32" - min_val = float("-0.0386702") - max_val = float("0.0418189") - mean = float("-6.13284e-05") - std = float("0.00176803") - data = None - - -class Program_weight_tensor_parameter_44: - name = "parameter_44" - shape = [576] - dtype = "float32" - min_val = float("-0.00435722") - max_val = float("0.00342043") - mean = float("0.000100629") - std = float("0.00100298") - data = None - - -class Program_weight_tensor_parameter_45: - name = "parameter_45" - shape = [576, 576, 1, 1] - dtype = "float32" - min_val = float("-0.00356092") - max_val = float("0.00418283") - mean = float("2.84068e-05") - std = float("0.000365604") - data = None - - -class Program_weight_tensor_parameter_46: - name = "parameter_46" - shape = [4] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_47: - name = "parameter_47" - shape = [4, 576, 3, 3] - dtype = "float32" - min_val = float("-6.95821e-06") - max_val = float("0.000100905") - mean = float("3.36451e-06") - std = float("6.30444e-06") - data = None - - -class Program_weight_tensor_parameter_48: - name = "parameter_48" - shape = [576] - dtype = "float32" - min_val = float("-0.248813") - max_val = float("0.372082") - mean = float("0.155726") - std = float("0.0836817") - data = None - - -class Program_weight_tensor_parameter_49: - name = "parameter_49" - shape = [576] - dtype = "float32" - min_val = float("1.02627") - max_val = float("1.42841") - mean = float("1.1361") - std = float("0.0516175") - data = None - - -class Program_weight_tensor_parameter_50: - name = "parameter_50" - shape = [576] - dtype = "float32" - min_val = float("0.000120346") - max_val = float("0.00280838") - mean = float("0.000713572") - std = float("0.000507142") - data = None - - -class Program_weight_tensor_parameter_51: - name = "parameter_51" - shape = [576] - dtype = "float32" - min_val = float("-0.0736708") - max_val = float("0.0822341") - mean = float("-0.0216779") - std = float("0.0158763") - data = None - - -class Program_weight_tensor_parameter_52: - name = "parameter_52" - shape = [576, 576, 1, 1] - dtype = "float32" - min_val = float("-0.0600565") - max_val = float("0.0356134") - mean = float("-0.00023781") - std = float("0.00193273") - data = None - - -class Program_weight_tensor_parameter_53: - name = "parameter_53" - shape = [576] - dtype = "float32" - min_val = float("-0.00774628") - max_val = float("0.00637093") - mean = float("-2.65663e-05") - std = float("0.000686488") - data = None - - -class Program_weight_tensor_parameter_54: - name = "parameter_54" - shape = [576, 576, 1, 1] - dtype = "float32" - min_val = float("-0.0266525") - max_val = float("0.0453807") - mean = float("-1.04982e-06") - std = float("0.000525074") - data = None - - -class Program_weight_tensor_parameter_55: - name = "parameter_55" - shape = [576] - dtype = "float32" - min_val = float("-0.363147") - max_val = float("0.450423") - mean = float("0.129446") - std = float("0.123275") - data = None - - -class Program_weight_tensor_parameter_56: - name = "parameter_56" - shape = [576] - dtype = "float32" - min_val = float("0.962108") - max_val = float("1.62765") - mean = float("1.1062") - std = float("0.0594285") - data = None - - -class Program_weight_tensor_parameter_57: - name = "parameter_57" - shape = [576] - dtype = "float32" - min_val = float("0.00128466") - max_val = float("0.0464107") - mean = float("0.00458397") - std = float("0.00374471") - data = None - - -class Program_weight_tensor_parameter_58: - name = "parameter_58" - shape = [576] - dtype = "float32" - min_val = float("-0.177259") - max_val = float("0.103565") - mean = float("-0.0218799") - std = float("0.0230091") - data = None - - -class Program_weight_tensor_parameter_59: - name = "parameter_59" - shape = [576, 576, 1, 1] - dtype = "float32" - min_val = float("-0.0611952") - max_val = float("0.04003") - mean = float("-0.000170338") - std = float("0.00295543") - data = None - - -class Program_weight_tensor_parameter_60: - name = "parameter_60" - shape = [288] - dtype = "float32" - min_val = float("-0.27771") - max_val = float("0.0488218") - mean = float("-0.0555783") - std = float("0.0592335") - data = None - - -class Program_weight_tensor_parameter_61: - name = "parameter_61" - shape = [288] - dtype = "float32" - min_val = float("0.915284") - max_val = float("1.07407") - mean = float("0.969917") - std = float("0.0228204") - data = None - - -class Program_weight_tensor_parameter_62: - name = "parameter_62" - shape = [288] - dtype = "float32" - min_val = float("0.000953466") - max_val = float("0.0142773") - mean = float("0.00304707") - std = float("0.00176714") - data = None - - -class Program_weight_tensor_parameter_63: - name = "parameter_63" - shape = [288] - dtype = "float32" - min_val = float("-0.04362") - max_val = float("0.0544441") - mean = float("0.00710198") - std = float("0.0174711") - data = None - - -class Program_weight_tensor_parameter_64: - name = "parameter_64" - shape = [288, 288, 1, 1] - dtype = "float32" - min_val = float("-0.0344584") - max_val = float("0.0245762") - mean = float("8.69469e-05") - std = float("0.00219113") - data = None - - -class Program_weight_tensor_parameter_65: - name = "parameter_65" - shape = [288] - dtype = "float32" - min_val = float("-0.27771") - max_val = float("0.0488218") - mean = float("-0.0555783") - std = float("0.0592335") - data = None - - -class Program_weight_tensor_parameter_66: - name = "parameter_66" - shape = [288] - dtype = "float32" - min_val = float("0.981008") - max_val = float("1.21689") - mean = float("1.04938") - std = float("0.0359279") - data = None - - -class Program_weight_tensor_parameter_67: - name = "parameter_67" - shape = [288] - dtype = "float32" - min_val = float("0.00172209") - max_val = float("0.0250926") - mean = float("0.00470188") - std = float("0.00208766") - data = None - - -class Program_weight_tensor_parameter_68: - name = "parameter_68" - shape = [288] - dtype = "float32" - min_val = float("-0.12407") - max_val = float("0.0767199") - mean = float("-0.029451") - std = float("0.022194") - data = None - - -class Program_weight_tensor_parameter_69: - name = "parameter_69" - shape = [288, 288, 3, 3] - dtype = "float32" - min_val = float("-0.0377338") - max_val = float("0.0461348") - mean = float("-8.57263e-05") - std = float("0.00163106") - data = None - - -class Program_weight_tensor_parameter_70: - name = "parameter_70" - shape = [288] - dtype = "float32" - min_val = float("-0.385862") - max_val = float("0.0653291") - mean = float("-0.105618") - std = float("0.0696428") - data = None - - -class Program_weight_tensor_parameter_71: - name = "parameter_71" - shape = [288] - dtype = "float32" - min_val = float("0.900938") - max_val = float("1.32086") - mean = float("1.04193") - std = float("0.0560555") - data = None - - -class Program_weight_tensor_parameter_72: - name = "parameter_72" - shape = [288] - dtype = "float32" - min_val = float("0.00421839") - max_val = float("0.0367987") - mean = float("0.0103437") - std = float("0.00489499") - data = None - - -class Program_weight_tensor_parameter_73: - name = "parameter_73" - shape = [288] - dtype = "float32" - min_val = float("-0.153433") - max_val = float("0.0932305") - mean = float("-0.0112633") - std = float("0.0256948") - data = None - - -class Program_weight_tensor_parameter_74: - name = "parameter_74" - shape = [288, 288, 3, 3] - dtype = "float32" - min_val = float("-0.0364099") - max_val = float("0.0523836") - mean = float("-8.24632e-05") - std = float("0.00184979") - data = None - - -class Program_weight_tensor_parameter_75: - name = "parameter_75" - shape = [288] - dtype = "float32" - min_val = float("-0.299402") - max_val = float("0.0134731") - mean = float("-0.109532") - std = float("0.0597187") - data = None - - -class Program_weight_tensor_parameter_76: - name = "parameter_76" - shape = [288] - dtype = "float32" - min_val = float("0.890481") - max_val = float("1.0932") - mean = float("0.968348") - std = float("0.0257197") - data = None - - -class Program_weight_tensor_parameter_77: - name = "parameter_77" - shape = [288] - dtype = "float32" - min_val = float("0.00100979") - max_val = float("0.00878796") - mean = float("0.00444441") - std = float("0.00149939") - data = None - - -class Program_weight_tensor_parameter_78: - name = "parameter_78" - shape = [288] - dtype = "float32" - min_val = float("-0.0520888") - max_val = float("0.049478") - mean = float("0.00820149") - std = float("0.0145604") - data = None - - -class Program_weight_tensor_parameter_79: - name = "parameter_79" - shape = [288, 288, 1, 1] - dtype = "float32" - min_val = float("-0.0322537") - max_val = float("0.029478") - mean = float("1.23234e-05") - std = float("0.00229431") - data = None - - -class Program_weight_tensor_parameter_80: - name = "parameter_80" - shape = [288] - dtype = "float32" - min_val = float("-0.299402") - max_val = float("0.0134731") - mean = float("-0.109532") - std = float("0.0597187") - data = None - - -class Program_weight_tensor_parameter_81: - name = "parameter_81" - shape = [288] - dtype = "float32" - min_val = float("0.962862") - max_val = float("1.20254") - mean = float("1.04567") - std = float("0.0391825") - data = None - - -class Program_weight_tensor_parameter_82: - name = "parameter_82" - shape = [288] - dtype = "float32" - min_val = float("0.00284955") - max_val = float("0.0225437") - mean = float("0.007316") - std = float("0.00273474") - data = None - - -class Program_weight_tensor_parameter_83: - name = "parameter_83" - shape = [288] - dtype = "float32" - min_val = float("-0.140963") - max_val = float("0.0369856") - mean = float("-0.0354432") - std = float("0.0220809") - data = None - - -class Program_weight_tensor_parameter_84: - name = "parameter_84" - shape = [288, 288, 3, 3] - dtype = "float32" - min_val = float("-0.0384327") - max_val = float("0.0469147") - mean = float("-0.000103927") - std = float("0.00179042") - data = None - - -class Program_weight_tensor_parameter_85: - name = "parameter_85" - shape = [288] - dtype = "float32" - min_val = float("-0.29697") - max_val = float("0.139867") - mean = float("-0.0979302") - std = float("0.0600107") - data = None - - -class Program_weight_tensor_parameter_86: - name = "parameter_86" - shape = [288] - dtype = "float32" - min_val = float("0.857317") - max_val = float("1.30652") - mean = float("1.03469") - std = float("0.0730064") - data = None - - -class Program_weight_tensor_parameter_87: - name = "parameter_87" - shape = [288] - dtype = "float32" - min_val = float("0.00405577") - max_val = float("0.0443345") - mean = float("0.0130506") - std = float("0.00639368") - data = None - - -class Program_weight_tensor_parameter_88: - name = "parameter_88" - shape = [288] - dtype = "float32" - min_val = float("-0.133397") - max_val = float("0.122273") - mean = float("-0.0433752") - std = float("0.036443") - data = None - - -class Program_weight_tensor_parameter_89: - name = "parameter_89" - shape = [288, 288, 3, 3] - dtype = "float32" - min_val = float("-0.0411107") - max_val = float("0.0539263") - mean = float("-0.000111625") - std = float("0.00199295") - data = None - - -class Program_weight_tensor_parameter_90: - name = "parameter_90" - shape = [288] - dtype = "float32" - min_val = float("-0.215301") - max_val = float("0.173622") - mean = float("-0.0676104") - std = float("0.0466325") - data = None - - -class Program_weight_tensor_parameter_91: - name = "parameter_91" - shape = [288] - dtype = "float32" - min_val = float("0.91523") - max_val = float("1.17267") - mean = float("1.02621") - std = float("0.0431361") - data = None - - -class Program_weight_tensor_parameter_92: - name = "parameter_92" - shape = [288] - dtype = "float32" - min_val = float("0.00251899") - max_val = float("0.0117666") - mean = float("0.00474816") - std = float("0.00149593") - data = None - - -class Program_weight_tensor_parameter_93: - name = "parameter_93" - shape = [288] - dtype = "float32" - min_val = float("-0.0692205") - max_val = float("0.053793") - mean = float("-0.0160558") - std = float("0.0210432") - data = None - - -class Program_weight_tensor_parameter_94: - name = "parameter_94" - shape = [288, 864, 1, 1] - dtype = "float32" - min_val = float("-0.0745047") - max_val = float("0.0842196") - mean = float("-0.00010905") - std = float("0.00289749") - data = None - - -class Program_weight_tensor_parameter_95: - name = "parameter_95" - shape = [288] - dtype = "float32" - min_val = float("-0.0916511") - max_val = float("0.0302142") - mean = float("-0.0278605") - std = float("0.0201815") - data = None - - -class Program_weight_tensor_parameter_96: - name = "parameter_96" - shape = [288] - dtype = "float32" - min_val = float("0.899444") - max_val = float("1.09798") - mean = float("1.01228") - std = float("0.0255773") - data = None - - -class Program_weight_tensor_parameter_97: - name = "parameter_97" - shape = [288] - dtype = "float32" - min_val = float("0.00233029") - max_val = float("0.0161175") - mean = float("0.00376379") - std = float("0.00141808") - data = None - - -class Program_weight_tensor_parameter_98: - name = "parameter_98" - shape = [288] - dtype = "float32" - min_val = float("-0.0681009") - max_val = float("0.0535") - mean = float("-0.0176997") - std = float("0.018982") - data = None - - -class Program_weight_tensor_parameter_99: - name = "parameter_99" - shape = [288, 864, 1, 1] - dtype = "float32" - min_val = float("-0.0500682") - max_val = float("0.0412844") - mean = float("-0.000136185") - std = float("0.00251502") - data = None - - -class Program_weight_tensor_parameter_100: - name = "parameter_100" - shape = [288] - dtype = "float32" - min_val = float("-0.140589") - max_val = float("0.00421123") - mean = float("-0.0496541") - std = float("0.0252022") - data = None - - -class Program_weight_tensor_parameter_101: - name = "parameter_101" - shape = [288] - dtype = "float32" - min_val = float("0.944857") - max_val = float("1.21748") - mean = float("1.05083") - std = float("0.0346128") - data = None - - -class Program_weight_tensor_parameter_102: - name = "parameter_102" - shape = [288] - dtype = "float32" - min_val = float("0.00540006") - max_val = float("0.0523987") - mean = float("0.0137401") - std = float("0.00670132") - data = None - - -class Program_weight_tensor_parameter_103: - name = "parameter_103" - shape = [288] - dtype = "float32" - min_val = float("-0.369448") - max_val = float("0.183087") - mean = float("-0.0467212") - std = float("0.0761257") - data = None - - -class Program_weight_tensor_parameter_104: - name = "parameter_104" - shape = [288, 288, 3, 3] - dtype = "float32" - min_val = float("-0.029027") - max_val = float("0.0392117") - mean = float("-4.01833e-05") - std = float("0.00156935") - data = None - - -class Program_weight_tensor_parameter_105: - name = "parameter_105" - shape = [288] - dtype = "float32" - min_val = float("-0.703153") - max_val = float("0.889582") - mean = float("0.279296") - std = float("0.234385") - data = None - - -class Program_weight_tensor_parameter_106: - name = "parameter_106" - shape = [288] - dtype = "float32" - min_val = float("0.665579") - max_val = float("1.52345") - mean = float("1.16577") - std = float("0.119601") - data = None - - -class Program_weight_tensor_parameter_107: - name = "parameter_107" - shape = [288] - dtype = "float32" - min_val = float("0.00263439") - max_val = float("0.0673288") - mean = float("0.00986936") - std = float("0.00642903") - data = None - - -class Program_weight_tensor_parameter_108: - name = "parameter_108" - shape = [288] - dtype = "float32" - min_val = float("-0.172453") - max_val = float("0.213078") - mean = float("-0.0233247") - std = float("0.0347766") - data = None - - -class Program_weight_tensor_parameter_109: - name = "parameter_109" - shape = [288, 288, 1, 1] - dtype = "float32" - min_val = float("-0.10595") - max_val = float("0.0861042") - mean = float("-0.000322342") - std = float("0.00641596") - data = None - - -class Program_weight_tensor_parameter_110: - name = "parameter_110" - shape = [144] - dtype = "float32" - min_val = float("-0.348757") - max_val = float("0.143868") - mean = float("-0.0670231") - std = float("0.092848") - data = None - - -class Program_weight_tensor_parameter_111: - name = "parameter_111" - shape = [144] - dtype = "float32" - min_val = float("0.826843") - max_val = float("1.10694") - mean = float("0.932296") - std = float("0.0366632") - data = None - - -class Program_weight_tensor_parameter_112: - name = "parameter_112" - shape = [144] - dtype = "float32" - min_val = float("0.00141229") - max_val = float("0.0167029") - mean = float("0.00556534") - std = float("0.0030144") - data = None - - -class Program_weight_tensor_parameter_113: - name = "parameter_113" - shape = [144] - dtype = "float32" - min_val = float("-0.0528065") - max_val = float("0.0511357") - mean = float("-0.00205834") - std = float("0.0130524") - data = None - - -class Program_weight_tensor_parameter_114: - name = "parameter_114" - shape = [144, 144, 1, 1] - dtype = "float32" - min_val = float("-0.0580646") - max_val = float("0.0259261") - mean = float("-0.000238707") - std = float("0.004453") - data = None - - -class Program_weight_tensor_parameter_115: - name = "parameter_115" - shape = [144] - dtype = "float32" - min_val = float("-0.348757") - max_val = float("0.143868") - mean = float("-0.0670231") - std = float("0.092848") - data = None - - -class Program_weight_tensor_parameter_116: - name = "parameter_116" - shape = [144] - dtype = "float32" - min_val = float("0.69241") - max_val = float("1.27544") - mean = float("1.06354") - std = float("0.0790744") - data = None - - -class Program_weight_tensor_parameter_117: - name = "parameter_117" - shape = [144] - dtype = "float32" - min_val = float("0.00383033") - max_val = float("0.0240594") - mean = float("0.0089051") - std = float("0.00329963") - data = None - - -class Program_weight_tensor_parameter_118: - name = "parameter_118" - shape = [144] - dtype = "float32" - min_val = float("-0.116614") - max_val = float("0.0942855") - mean = float("-0.0189062") - std = float("0.0284546") - data = None - - -class Program_weight_tensor_parameter_119: - name = "parameter_119" - shape = [144, 144, 3, 3] - dtype = "float32" - min_val = float("-0.0533475") - max_val = float("0.0528243") - mean = float("-0.000100816") - std = float("0.00343967") - data = None - - -class Program_weight_tensor_parameter_120: - name = "parameter_120" - shape = [144] - dtype = "float32" - min_val = float("-0.439905") - max_val = float("0.120748") - mean = float("-0.165545") - std = float("0.123755") - data = None - - -class Program_weight_tensor_parameter_121: - name = "parameter_121" - shape = [144] - dtype = "float32" - min_val = float("0.853433") - max_val = float("1.29875") - mean = float("1.03371") - std = float("0.0909163") - data = None - - -class Program_weight_tensor_parameter_122: - name = "parameter_122" - shape = [144] - dtype = "float32" - min_val = float("0.00423297") - max_val = float("0.0438086") - mean = float("0.013521") - std = float("0.00655558") - data = None - - -class Program_weight_tensor_parameter_123: - name = "parameter_123" - shape = [144] - dtype = "float32" - min_val = float("-0.139") - max_val = float("0.0849961") - mean = float("-0.00217177") - std = float("0.0237809") - data = None - - -class Program_weight_tensor_parameter_124: - name = "parameter_124" - shape = [144, 144, 3, 3] - dtype = "float32" - min_val = float("-0.0544763") - max_val = float("0.0689249") - mean = float("-0.000164816") - std = float("0.00383836") - data = None - - -class Program_weight_tensor_parameter_125: - name = "parameter_125" - shape = [144] - dtype = "float32" - min_val = float("-0.441804") - max_val = float("0.028665") - mean = float("-0.192994") - std = float("0.0888286") - data = None - - -class Program_weight_tensor_parameter_126: - name = "parameter_126" - shape = [144] - dtype = "float32" - min_val = float("0.704647") - max_val = float("1.06451") - mean = float("0.920355") - std = float("0.0516476") - data = None - - -class Program_weight_tensor_parameter_127: - name = "parameter_127" - shape = [144] - dtype = "float32" - min_val = float("0.00221389") - max_val = float("0.0126339") - mean = float("0.00634418") - std = float("0.00185267") - data = None - - -class Program_weight_tensor_parameter_128: - name = "parameter_128" - shape = [144] - dtype = "float32" - min_val = float("-0.037796") - max_val = float("0.0319353") - mean = float("0.00816013") - std = float("0.0133567") - data = None - - -class Program_weight_tensor_parameter_129: - name = "parameter_129" - shape = [144, 144, 1, 1] - dtype = "float32" - min_val = float("-0.0502432") - max_val = float("0.053736") - mean = float("-0.000306758") - std = float("0.00495078") - data = None - - -class Program_weight_tensor_parameter_130: - name = "parameter_130" - shape = [144] - dtype = "float32" - min_val = float("-0.441804") - max_val = float("0.028665") - mean = float("-0.192994") - std = float("0.0888286") - data = None - - -class Program_weight_tensor_parameter_131: - name = "parameter_131" - shape = [144] - dtype = "float32" - min_val = float("0.767985") - max_val = float("1.24616") - mean = float("1.05535") - std = float("0.0556624") - data = None - - -class Program_weight_tensor_parameter_132: - name = "parameter_132" - shape = [144] - dtype = "float32" - min_val = float("0.00524932") - max_val = float("0.0354973") - mean = float("0.0124491") - std = float("0.00528304") - data = None - - -class Program_weight_tensor_parameter_133: - name = "parameter_133" - shape = [144] - dtype = "float32" - min_val = float("-0.104281") - max_val = float("0.0433549") - mean = float("-0.017271") - std = float("0.0183086") - data = None - - -class Program_weight_tensor_parameter_134: - name = "parameter_134" - shape = [144, 144, 3, 3] - dtype = "float32" - min_val = float("-0.0537528") - max_val = float("0.0682326") - mean = float("-0.000146146") - std = float("0.00381018") - data = None - - -class Program_weight_tensor_parameter_135: - name = "parameter_135" - shape = [144] - dtype = "float32" - min_val = float("-0.509343") - max_val = float("0.25213") - mean = float("-0.219535") - std = float("0.129933") - data = None - - -class Program_weight_tensor_parameter_136: - name = "parameter_136" - shape = [144] - dtype = "float32" - min_val = float("0.785015") - max_val = float("1.53418") - mean = float("1.02864") - std = float("0.12374") - data = None - - -class Program_weight_tensor_parameter_137: - name = "parameter_137" - shape = [144] - dtype = "float32" - min_val = float("0.00862567") - max_val = float("0.0571773") - mean = float("0.0178791") - std = float("0.00917033") - data = None - - -class Program_weight_tensor_parameter_138: - name = "parameter_138" - shape = [144] - dtype = "float32" - min_val = float("-0.111979") - max_val = float("0.0136057") - mean = float("-0.0389999") - std = float("0.0203047") - data = None - - -class Program_weight_tensor_parameter_139: - name = "parameter_139" - shape = [144, 144, 3, 3] - dtype = "float32" - min_val = float("-0.0671493") - max_val = float("0.0824849") - mean = float("-0.000218676") - std = float("0.00431301") - data = None - - -class Program_weight_tensor_parameter_140: - name = "parameter_140" - shape = [144] - dtype = "float32" - min_val = float("-0.598583") - max_val = float("0.0570557") - mean = float("-0.155009") - std = float("0.0803566") - data = None - - -class Program_weight_tensor_parameter_141: - name = "parameter_141" - shape = [144] - dtype = "float32" - min_val = float("0.873451") - max_val = float("1.41532") - mean = float("1.02822") - std = float("0.071914") - data = None - - -class Program_weight_tensor_parameter_142: - name = "parameter_142" - shape = [144] - dtype = "float32" - min_val = float("0.00392816") - max_val = float("0.0241202") - mean = float("0.00791989") - std = float("0.00288505") - data = None - - -class Program_weight_tensor_parameter_143: - name = "parameter_143" - shape = [144] - dtype = "float32" - min_val = float("-0.0850175") - max_val = float("0.063316") - mean = float("-0.0234895") - std = float("0.0233601") - data = None - - -class Program_weight_tensor_parameter_144: - name = "parameter_144" - shape = [144, 432, 1, 1] - dtype = "float32" - min_val = float("-0.0664166") - max_val = float("0.0782398") - mean = float("-0.000296481") - std = float("0.00608418") - data = None - - -class Program_weight_tensor_parameter_145: - name = "parameter_145" - shape = [144] - dtype = "float32" - min_val = float("-0.146243") - max_val = float("0.075235") - mean = float("-0.026676") - std = float("0.0383535") - data = None - - -class Program_weight_tensor_parameter_146: - name = "parameter_146" - shape = [144] - dtype = "float32" - min_val = float("0.888091") - max_val = float("1.40929") - mean = float("0.996461") - std = float("0.0558093") - data = None - - -class Program_weight_tensor_parameter_147: - name = "parameter_147" - shape = [144] - dtype = "float32" - min_val = float("0.00247041") - max_val = float("0.0278519") - mean = float("0.00613474") - std = float("0.00320836") - data = None - - -class Program_weight_tensor_parameter_148: - name = "parameter_148" - shape = [144] - dtype = "float32" - min_val = float("-0.0513007") - max_val = float("0.0419876") - mean = float("-0.0117934") - std = float("0.0193503") - data = None - - -class Program_weight_tensor_parameter_149: - name = "parameter_149" - shape = [144, 432, 1, 1] - dtype = "float32" - min_val = float("-0.0642335") - max_val = float("0.0683001") - mean = float("-0.00013801") - std = float("0.00524856") - data = None - - -class Program_weight_tensor_parameter_150: - name = "parameter_150" - shape = [144] - dtype = "float32" - min_val = float("-0.258062") - max_val = float("0.0158585") - mean = float("-0.0907321") - std = float("0.0530183") - data = None - - -class Program_weight_tensor_parameter_151: - name = "parameter_151" - shape = [144] - dtype = "float32" - min_val = float("0.818043") - max_val = float("1.19478") - mean = float("1.02377") - std = float("0.056563") - data = None - - -class Program_weight_tensor_parameter_152: - name = "parameter_152" - shape = [144] - dtype = "float32" - min_val = float("0.0060931") - max_val = float("0.0623377") - mean = float("0.0181503") - std = float("0.0094012") - data = None - - -class Program_weight_tensor_parameter_153: - name = "parameter_153" - shape = [144] - dtype = "float32" - min_val = float("-0.571436") - max_val = float("0.34374") - mean = float("-0.0108407") - std = float("0.126187") - data = None - - -class Program_weight_tensor_parameter_154: - name = "parameter_154" - shape = [144, 144, 3, 3] - dtype = "float32" - min_val = float("-0.0410201") - max_val = float("0.048983") - mean = float("-1.72048e-05") - std = float("0.00338842") - data = None - - -class Program_weight_tensor_parameter_155: - name = "parameter_155" - shape = [144] - dtype = "float32" - min_val = float("-0.793878") - max_val = float("2.02014") - mean = float("0.408754") - std = float("0.537855") - data = None - - -class Program_weight_tensor_parameter_156: - name = "parameter_156" - shape = [144] - dtype = "float32" - min_val = float("0.642031") - max_val = float("1.86401") - mean = float("1.10314") - std = float("0.254887") - data = None - - -class Program_weight_tensor_parameter_157: - name = "parameter_157" - shape = [144] - dtype = "float32" - min_val = float("0.00315688") - max_val = float("0.0651796") - mean = float("0.0177255") - std = float("0.0123227") - data = None - - -class Program_weight_tensor_parameter_158: - name = "parameter_158" - shape = [144] - dtype = "float32" - min_val = float("-0.211434") - max_val = float("0.214478") - mean = float("-0.0150602") - std = float("0.062693") - data = None - - -class Program_weight_tensor_parameter_159: - name = "parameter_159" - shape = [144, 144, 1, 1] - dtype = "float32" - min_val = float("-0.18355") - max_val = float("0.101318") - mean = float("-0.000559139") - std = float("0.0130426") - data = None - - -class Program_weight_tensor_parameter_160: - name = "parameter_160" - shape = [72] - dtype = "float32" - min_val = float("-0.596492") - max_val = float("0.496629") - mean = float("0.00880207") - std = float("0.280008") - data = None - - -class Program_weight_tensor_parameter_161: - name = "parameter_161" - shape = [72] - dtype = "float32" - min_val = float("0.544805") - max_val = float("1.28262") - mean = float("0.828695") - std = float("0.105308") - data = None - - -class Program_weight_tensor_parameter_162: - name = "parameter_162" - shape = [72] - dtype = "float32" - min_val = float("0.00121121") - max_val = float("0.0154097") - mean = float("0.00657741") - std = float("0.00405399") - data = None - - -class Program_weight_tensor_parameter_163: - name = "parameter_163" - shape = [72] - dtype = "float32" - min_val = float("-0.0445254") - max_val = float("0.0643771") - mean = float("-0.00597889") - std = float("0.0159114") - data = None - - -class Program_weight_tensor_parameter_164: - name = "parameter_164" - shape = [72, 72, 1, 1] - dtype = "float32" - min_val = float("-0.079712") - max_val = float("0.0572723") - mean = float("-0.000917905") - std = float("0.00853453") - data = None - - -class Program_weight_tensor_parameter_165: - name = "parameter_165" - shape = [72] - dtype = "float32" - min_val = float("-0.596492") - max_val = float("0.496629") - mean = float("0.00880207") - std = float("0.280008") - data = None - - -class Program_weight_tensor_parameter_166: - name = "parameter_166" - shape = [72] - dtype = "float32" - min_val = float("0.692696") - max_val = float("1.58855") - mean = float("1.06707") - std = float("0.139152") - data = None - - -class Program_weight_tensor_parameter_167: - name = "parameter_167" - shape = [72] - dtype = "float32" - min_val = float("0.00325206") - max_val = float("0.0341689") - mean = float("0.0125694") - std = float("0.00721053") - data = None - - -class Program_weight_tensor_parameter_168: - name = "parameter_168" - shape = [72] - dtype = "float32" - min_val = float("-0.202431") - max_val = float("0.069641") - mean = float("-0.0138793") - std = float("0.0479398") - data = None - - -class Program_weight_tensor_parameter_169: - name = "parameter_169" - shape = [72, 72, 3, 3] - dtype = "float32" - min_val = float("-0.0886597") - max_val = float("0.0870701") - mean = float("-0.000129493") - std = float("0.00706853") - data = None - - -class Program_weight_tensor_parameter_170: - name = "parameter_170" - shape = [72] - dtype = "float32" - min_val = float("-0.788096") - max_val = float("0.657236") - mean = float("-0.317593") - std = float("0.294445") - data = None - - -class Program_weight_tensor_parameter_171: - name = "parameter_171" - shape = [72] - dtype = "float32" - min_val = float("0.319105") - max_val = float("2.17325") - mean = float("0.870637") - std = float("0.238971") - data = None - - -class Program_weight_tensor_parameter_172: - name = "parameter_172" - shape = [72] - dtype = "float32" - min_val = float("0.00358424") - max_val = float("0.0266004") - mean = float("0.0101194") - std = float("0.00504532") - data = None - - -class Program_weight_tensor_parameter_173: - name = "parameter_173" - shape = [72] - dtype = "float32" - min_val = float("-0.0783159") - max_val = float("0.0807353") - mean = float("0.0185967") - std = float("0.0324074") - data = None - - -class Program_weight_tensor_parameter_174: - name = "parameter_174" - shape = [72, 72, 3, 3] - dtype = "float32" - min_val = float("-0.114122") - max_val = float("0.0910359") - mean = float("-0.00051978") - std = float("0.00807761") - data = None - - -class Program_weight_tensor_parameter_175: - name = "parameter_175" - shape = [72] - dtype = "float32" - min_val = float("-0.529687") - max_val = float("0.199379") - mean = float("-0.259751") - std = float("0.171947") - data = None - - -class Program_weight_tensor_parameter_176: - name = "parameter_176" - shape = [72] - dtype = "float32" - min_val = float("0.599551") - max_val = float("0.970551") - mean = float("0.792683") - std = float("0.0733557") - data = None - - -class Program_weight_tensor_parameter_177: - name = "parameter_177" - shape = [72] - dtype = "float32" - min_val = float("0.00301817") - max_val = float("0.0147212") - mean = float("0.00748466") - std = float("0.00212384") - data = None - - -class Program_weight_tensor_parameter_178: - name = "parameter_178" - shape = [72] - dtype = "float32" - min_val = float("-0.0505147") - max_val = float("0.0345987") - mean = float("0.00396601") - std = float("0.0159843") - data = None - - -class Program_weight_tensor_parameter_179: - name = "parameter_179" - shape = [72, 72, 1, 1] - dtype = "float32" - min_val = float("-0.0719846") - max_val = float("0.0561072") - mean = float("-0.00174883") - std = float("0.010313") - data = None - - -class Program_weight_tensor_parameter_180: - name = "parameter_180" - shape = [72] - dtype = "float32" - min_val = float("-0.529687") - max_val = float("0.199379") - mean = float("-0.259751") - std = float("0.171947") - data = None - - -class Program_weight_tensor_parameter_181: - name = "parameter_181" - shape = [72] - dtype = "float32" - min_val = float("0.706439") - max_val = float("1.22884") - mean = float("0.981247") - std = float("0.110846") - data = None - - -class Program_weight_tensor_parameter_182: - name = "parameter_182" - shape = [72] - dtype = "float32" - min_val = float("0.00767675") - max_val = float("0.0447227") - mean = float("0.0168985") - std = float("0.0076087") - data = None - - -class Program_weight_tensor_parameter_183: - name = "parameter_183" - shape = [72] - dtype = "float32" - min_val = float("-0.130446") - max_val = float("0.0730463") - mean = float("0.00326599") - std = float("0.0351423") - data = None - - -class Program_weight_tensor_parameter_184: - name = "parameter_184" - shape = [72, 72, 3, 3] - dtype = "float32" - min_val = float("-0.0927827") - max_val = float("0.118671") - mean = float("-0.000436366") - std = float("0.00816753") - data = None - - -class Program_weight_tensor_parameter_185: - name = "parameter_185" - shape = [72] - dtype = "float32" - min_val = float("-0.978319") - max_val = float("0.910199") - mean = float("-0.373348") - std = float("0.382498") - data = None - - -class Program_weight_tensor_parameter_186: - name = "parameter_186" - shape = [72] - dtype = "float32" - min_val = float("0.598562") - max_val = float("1.22465") - mean = float("0.867317") - std = float("0.120439") - data = None - - -class Program_weight_tensor_parameter_187: - name = "parameter_187" - shape = [72] - dtype = "float32" - min_val = float("0.00336641") - max_val = float("0.0470094") - mean = float("0.00884736") - std = float("0.00728432") - data = None - - -class Program_weight_tensor_parameter_188: - name = "parameter_188" - shape = [72] - dtype = "float32" - min_val = float("-0.241371") - max_val = float("0.2411") - mean = float("-0.027551") - std = float("0.0896444") - data = None - - -class Program_weight_tensor_parameter_189: - name = "parameter_189" - shape = [72, 72, 3, 3] - dtype = "float32" - min_val = float("-0.0886619") - max_val = float("0.0880968") - mean = float("-0.00027759") - std = float("0.0091381") - data = None - - -class Program_weight_tensor_parameter_190: - name = "parameter_190" - shape = [72] - dtype = "float32" - min_val = float("-1.03643") - max_val = float("0.87465") - mean = float("-0.149865") - std = float("0.508874") - data = None - - -class Program_weight_tensor_parameter_191: - name = "parameter_191" - shape = [72] - dtype = "float32" - min_val = float("0.313694") - max_val = float("1.10515") - mean = float("0.655033") - std = float("0.174056") - data = None - - -class Program_weight_tensor_parameter_192: - name = "parameter_192" - shape = [72] - dtype = "float32" - min_val = float("0.0053502") - max_val = float("0.0455901") - mean = float("0.0132061") - std = float("0.00705897") - data = None - - -class Program_weight_tensor_parameter_193: - name = "parameter_193" - shape = [72] - dtype = "float32" - min_val = float("-0.14339") - max_val = float("0.123691") - mean = float("-0.0055304") - std = float("0.0504267") - data = None - - -class Program_weight_tensor_parameter_194: - name = "parameter_194" - shape = [72, 336, 1, 1] - dtype = "float32" - min_val = float("-0.14681") - max_val = float("0.127726") - mean = float("-0.000474756") - std = float("0.0119385") - data = None - - -class Program_weight_tensor_parameter_195: - name = "parameter_195" - shape = [72] - dtype = "float32" - min_val = float("-0.120348") - max_val = float("0.412377") - mean = float("0.165721") - std = float("0.110684") - data = None - - -class Program_weight_tensor_parameter_196: - name = "parameter_196" - shape = [72] - dtype = "float32" - min_val = float("0.659788") - max_val = float("1.34775") - mean = float("0.850032") - std = float("0.109302") - data = None - - -class Program_weight_tensor_parameter_197: - name = "parameter_197" - shape = [72] - dtype = "float32" - min_val = float("0.00159057") - max_val = float("0.0317446") - mean = float("0.00599932") - std = float("0.00340858") - data = None - - -class Program_weight_tensor_parameter_198: - name = "parameter_198" - shape = [72] - dtype = "float32" - min_val = float("-0.0880301") - max_val = float("0.074714") - mean = float("-0.00829577") - std = float("0.030976") - data = None - - -class Program_weight_tensor_parameter_199: - name = "parameter_199" - shape = [72, 336, 1, 1] - dtype = "float32" - min_val = float("-0.108096") - max_val = float("0.107109") - mean = float("7.87305e-05") - std = float("0.00828471") - data = None - - -class Program_weight_tensor_parameter_200: - name = "parameter_200" - shape = [144] - dtype = "float32" - min_val = float("-0.476764") - max_val = float("0.168049") - mean = float("-0.0838514") - std = float("0.124575") - data = None - - -class Program_weight_tensor_parameter_201: - name = "parameter_201" - shape = [144] - dtype = "float32" - min_val = float("0.617689") - max_val = float("1.54284") - mean = float("0.785115") - std = float("0.113178") - data = None - - -class Program_weight_tensor_parameter_202: - name = "parameter_202" - shape = [144] - dtype = "float32" - min_val = float("0.00602933") - max_val = float("0.0596091") - mean = float("0.0124482") - std = float("0.00635021") - data = None - - -class Program_weight_tensor_parameter_203: - name = "parameter_203" - shape = [144] - dtype = "float32" - min_val = float("-0.109409") - max_val = float("0.0390456") - mean = float("-0.0261988") - std = float("0.0242817") - data = None - - -class Program_weight_tensor_parameter_204: - name = "parameter_204" - shape = [144, 288, 1, 1] - dtype = "float32" - min_val = float("-0.0819936") - max_val = float("0.0914399") - mean = float("-0.000602675") - std = float("0.00882576") - data = None - - -class Program_weight_tensor_parameter_205: - name = "parameter_205" - shape = [288] - dtype = "float32" - min_val = float("-0.43398") - max_val = float("0.195139") - mean = float("-0.113323") - std = float("0.0837459") - data = None - - -class Program_weight_tensor_parameter_206: - name = "parameter_206" - shape = [288] - dtype = "float32" - min_val = float("0.801149") - max_val = float("1.51552") - mean = float("1.01244") - std = float("0.0942505") - data = None - - -class Program_weight_tensor_parameter_207: - name = "parameter_207" - shape = [288] - dtype = "float32" - min_val = float("0.00697899") - max_val = float("0.0426641") - mean = float("0.0125514") - std = float("0.00524946") - data = None - - -class Program_weight_tensor_parameter_208: - name = "parameter_208" - shape = [288] - dtype = "float32" - min_val = float("-0.199677") - max_val = float("0.152914") - mean = float("-0.0401824") - std = float("0.042326") - data = None - - -class Program_weight_tensor_parameter_209: - name = "parameter_209" - shape = [288, 288, 1, 1] - dtype = "float32" - min_val = float("-0.105612") - max_val = float("0.118139") - mean = float("-0.000716065") - std = float("0.00840771") - data = None - - -class Program_weight_tensor_parameter_210: - name = "parameter_210" - shape = [144] - dtype = "float32" - min_val = float("-0.413817") - max_val = float("0.0254389") - mean = float("-0.106277") - std = float("0.066947") - data = None - - -class Program_weight_tensor_parameter_211: - name = "parameter_211" - shape = [144] - dtype = "float32" - min_val = float("0.691501") - max_val = float("0.955612") - mean = float("0.877305") - std = float("0.0375328") - data = None - - -class Program_weight_tensor_parameter_212: - name = "parameter_212" - shape = [144] - dtype = "float32" - min_val = float("0.00387808") - max_val = float("0.028644") - mean = float("0.00817049") - std = float("0.00276292") - data = None - - -class Program_weight_tensor_parameter_213: - name = "parameter_213" - shape = [144] - dtype = "float32" - min_val = float("-0.0443968") - max_val = float("0.042331") - mean = float("-0.0131527") - std = float("0.0192302") - data = None - - -class Program_weight_tensor_parameter_214: - name = "parameter_214" - shape = [144, 144, 1, 1] - dtype = "float32" - min_val = float("-0.0418187") - max_val = float("0.0336758") - mean = float("-0.000853057") - std = float("0.00590709") - data = None - - -class Program_weight_tensor_parameter_215: - name = "parameter_215" - shape = [144] - dtype = "float32" - min_val = float("-0.413817") - max_val = float("0.0254389") - mean = float("-0.106277") - std = float("0.066947") - data = None - - -class Program_weight_tensor_parameter_216: - name = "parameter_216" - shape = [144] - dtype = "float32" - min_val = float("0.871658") - max_val = float("1.14399") - mean = float("0.991674") - std = float("0.0442735") - data = None - - -class Program_weight_tensor_parameter_217: - name = "parameter_217" - shape = [144] - dtype = "float32" - min_val = float("0.00847572") - max_val = float("0.037261") - mean = float("0.0170862") - std = float("0.00540559") - data = None - - -class Program_weight_tensor_parameter_218: - name = "parameter_218" - shape = [144] - dtype = "float32" - min_val = float("-0.0858501") - max_val = float("0.0752316") - mean = float("-0.0215803") - std = float("0.0267547") - data = None - - -class Program_weight_tensor_parameter_219: - name = "parameter_219" - shape = [144, 144, 3, 3] - dtype = "float32" - min_val = float("-0.0708304") - max_val = float("0.126419") - mean = float("-0.000129981") - std = float("0.00418282") - data = None - - -class Program_weight_tensor_parameter_220: - name = "parameter_220" - shape = [144] - dtype = "float32" - min_val = float("-0.503198") - max_val = float("-0.00723544") - mean = float("-0.215277") - std = float("0.107333") - data = None - - -class Program_weight_tensor_parameter_221: - name = "parameter_221" - shape = [144] - dtype = "float32" - min_val = float("0.837897") - max_val = float("1.40392") - mean = float("1.05848") - std = float("0.0870235") - data = None - - -class Program_weight_tensor_parameter_222: - name = "parameter_222" - shape = [144] - dtype = "float32" - min_val = float("0.0204089") - max_val = float("0.114905") - mean = float("0.0339913") - std = float("0.0111585") - data = None - - -class Program_weight_tensor_parameter_223: - name = "parameter_223" - shape = [144] - dtype = "float32" - min_val = float("-0.115567") - max_val = float("0.074236") - mean = float("-0.0297392") - std = float("0.0292001") - data = None - - -class Program_weight_tensor_parameter_224: - name = "parameter_224" - shape = [144, 144, 3, 3] - dtype = "float32" - min_val = float("-0.0638722") - max_val = float("0.094548") - mean = float("-0.00028317") - std = float("0.00489057") - data = None - - -class Program_weight_tensor_parameter_225: - name = "parameter_225" - shape = [144] - dtype = "float32" - min_val = float("-0.443227") - max_val = float("0.0300293") - mean = float("-0.20916") - std = float("0.0836857") - data = None - - -class Program_weight_tensor_parameter_226: - name = "parameter_226" - shape = [144] - dtype = "float32" - min_val = float("0.796667") - max_val = float("1.16669") - mean = float("0.944169") - std = float("0.0541301") - data = None - - -class Program_weight_tensor_parameter_227: - name = "parameter_227" - shape = [144] - dtype = "float32" - min_val = float("0.0023532") - max_val = float("0.0139004") - mean = float("0.00492937") - std = float("0.00150275") - data = None - - -class Program_weight_tensor_parameter_228: - name = "parameter_228" - shape = [144] - dtype = "float32" - min_val = float("-0.0396228") - max_val = float("0.0357555") - mean = float("-0.00883661") - std = float("0.0111889") - data = None - - -class Program_weight_tensor_parameter_229: - name = "parameter_229" - shape = [144, 144, 1, 1] - dtype = "float32" - min_val = float("-0.0446649") - max_val = float("0.0619167") - mean = float("-0.000724838") - std = float("0.00745171") - data = None - - -class Program_weight_tensor_parameter_230: - name = "parameter_230" - shape = [144] - dtype = "float32" - min_val = float("-0.443227") - max_val = float("0.0300293") - mean = float("-0.20916") - std = float("0.0836857") - data = None - - -class Program_weight_tensor_parameter_231: - name = "parameter_231" - shape = [144] - dtype = "float32" - min_val = float("0.855523") - max_val = float("1.20493") - mean = float("1.00232") - std = float("0.0650819") - data = None - - -class Program_weight_tensor_parameter_232: - name = "parameter_232" - shape = [144] - dtype = "float32" - min_val = float("0.00909786") - max_val = float("0.0362024") - mean = float("0.0145287") - std = float("0.00368455") - data = None - - -class Program_weight_tensor_parameter_233: - name = "parameter_233" - shape = [144] - dtype = "float32" - min_val = float("-0.0654417") - max_val = float("0.0471346") - mean = float("-0.0150238") - std = float("0.0228131") - data = None - - -class Program_weight_tensor_parameter_234: - name = "parameter_234" - shape = [144, 144, 3, 3] - dtype = "float32" - min_val = float("-0.0569246") - max_val = float("0.0681235") - mean = float("-0.000190838") - std = float("0.00429518") - data = None - - -class Program_weight_tensor_parameter_235: - name = "parameter_235" - shape = [144] - dtype = "float32" - min_val = float("-0.619341") - max_val = float("-0.0112904") - mean = float("-0.271335") - std = float("0.107403") - data = None - - -class Program_weight_tensor_parameter_236: - name = "parameter_236" - shape = [144] - dtype = "float32" - min_val = float("0.887194") - max_val = float("1.60529") - mean = float("1.03008") - std = float("0.0832458") - data = None - - -class Program_weight_tensor_parameter_237: - name = "parameter_237" - shape = [144] - dtype = "float32" - min_val = float("0.0111345") - max_val = float("0.0421569") - mean = float("0.0185696") - std = float("0.00509881") - data = None - - -class Program_weight_tensor_parameter_238: - name = "parameter_238" - shape = [144] - dtype = "float32" - min_val = float("-0.196773") - max_val = float("0.0810831") - mean = float("-0.0379885") - std = float("0.0331759") - data = None - - -class Program_weight_tensor_parameter_239: - name = "parameter_239" - shape = [144, 144, 3, 3] - dtype = "float32" - min_val = float("-0.0581918") - max_val = float("0.0721246") - mean = float("-0.000276431") - std = float("0.00532616") - data = None - - -class Program_weight_tensor_parameter_240: - name = "parameter_240" - shape = [144] - dtype = "float32" - min_val = float("-0.67915") - max_val = float("0.300043") - mean = float("-0.249268") - std = float("0.140088") - data = None - - -class Program_weight_tensor_parameter_241: - name = "parameter_241" - shape = [144] - dtype = "float32" - min_val = float("0.820156") - max_val = float("1.25926") - mean = float("1.02103") - std = float("0.0835064") - data = None - - -class Program_weight_tensor_parameter_242: - name = "parameter_242" - shape = [144] - dtype = "float32" - min_val = float("0.00487426") - max_val = float("0.0195417") - mean = float("0.00798593") - std = float("0.00214001") - data = None - - -class Program_weight_tensor_parameter_243: - name = "parameter_243" - shape = [144] - dtype = "float32" - min_val = float("-0.0685193") - max_val = float("0.111677") - mean = float("0.012203") - std = float("0.0263654") - data = None - - -class Program_weight_tensor_parameter_244: - name = "parameter_244" - shape = [144, 672, 1, 1] - dtype = "float32" - min_val = float("-0.0518257") - max_val = float("0.0801678") - mean = float("-0.000299396") - std = float("0.00706162") - data = None - - -class Program_weight_tensor_parameter_245: - name = "parameter_245" - shape = [144] - dtype = "float32" - min_val = float("-0.219529") - max_val = float("0.482816") - mean = float("0.00625415") - std = float("0.0996225") - data = None - - -class Program_weight_tensor_parameter_246: - name = "parameter_246" - shape = [144] - dtype = "float32" - min_val = float("0.943085") - max_val = float("1.31189") - mean = float("1.06741") - std = float("0.0759901") - data = None - - -class Program_weight_tensor_parameter_247: - name = "parameter_247" - shape = [144] - dtype = "float32" - min_val = float("0.00430579") - max_val = float("0.0498874") - mean = float("0.00849452") - std = float("0.00419529") - data = None - - -class Program_weight_tensor_parameter_248: - name = "parameter_248" - shape = [144] - dtype = "float32" - min_val = float("-0.0734484") - max_val = float("0.0501055") - mean = float("-0.00422856") - std = float("0.0238347") - data = None - - -class Program_weight_tensor_parameter_249: - name = "parameter_249" - shape = [144, 672, 1, 1] - dtype = "float32" - min_val = float("-0.283127") - max_val = float("0.126053") - mean = float("-0.000223953") - std = float("0.00673773") - data = None - - -class Program_weight_tensor_parameter_250: - name = "parameter_250" - shape = [288] - dtype = "float32" - min_val = float("-0.471007") - max_val = float("-0.0768897") - mean = float("-0.24229") - std = float("0.058604") - data = None - - -class Program_weight_tensor_parameter_251: - name = "parameter_251" - shape = [288] - dtype = "float32" - min_val = float("0.693872") - max_val = float("1.04898") - mean = float("0.819337") - std = float("0.0537956") - data = None - - -class Program_weight_tensor_parameter_252: - name = "parameter_252" - shape = [288] - dtype = "float32" - min_val = float("0.00693365") - max_val = float("0.0444148") - mean = float("0.0112387") - std = float("0.0036165") - data = None - - -class Program_weight_tensor_parameter_253: - name = "parameter_253" - shape = [288] - dtype = "float32" - min_val = float("-0.0895123") - max_val = float("0.0744079") - mean = float("-0.0247192") - std = float("0.0210024") - data = None - - -class Program_weight_tensor_parameter_254: - name = "parameter_254" - shape = [288, 576, 1, 1] - dtype = "float32" - min_val = float("-0.0678711") - max_val = float("0.0563575") - mean = float("-0.000356836") - std = float("0.00561004") - data = None - - -class Program_weight_tensor_parameter_255: - name = "parameter_255" - shape = [576] - dtype = "float32" - min_val = float("-0.22392") - max_val = float("0.236737") - mean = float("-0.12655") - std = float("0.0408273") - data = None - - -class Program_weight_tensor_parameter_256: - name = "parameter_256" - shape = [576] - dtype = "float32" - min_val = float("0.899743") - max_val = float("1.38442") - mean = float("1.04482") - std = float("0.0445486") - data = None - - -class Program_weight_tensor_parameter_257: - name = "parameter_257" - shape = [576] - dtype = "float32" - min_val = float("0.00534322") - max_val = float("0.0242086") - mean = float("0.00935106") - std = float("0.00235956") - data = None - - -class Program_weight_tensor_parameter_258: - name = "parameter_258" - shape = [576] - dtype = "float32" - min_val = float("-0.11956") - max_val = float("0.0966195") - mean = float("-0.0385181") - std = float("0.0241664") - data = None - - -class Program_weight_tensor_parameter_259: - name = "parameter_259" - shape = [576, 576, 1, 1] - dtype = "float32" - min_val = float("-0.0794723") - max_val = float("0.109769") - mean = float("-0.00038882") - std = float("0.00481347") - data = None - - -class Program_weight_tensor_parameter_260: - name = "parameter_260" - shape = [288] - dtype = "float32" - min_val = float("-0.23913") - max_val = float("0.268967") - mean = float("-0.0788435") - std = float("0.0530956") - data = None - - -class Program_weight_tensor_parameter_261: - name = "parameter_261" - shape = [288] - dtype = "float32" - min_val = float("0.782606") - max_val = float("1.06451") - mean = float("0.949247") - std = float("0.0301924") - data = None - - -class Program_weight_tensor_parameter_262: - name = "parameter_262" - shape = [288] - dtype = "float32" - min_val = float("0.00297758") - max_val = float("0.0373329") - mean = float("0.00987717") - std = float("0.0043303") - data = None - - -class Program_weight_tensor_parameter_263: - name = "parameter_263" - shape = [288] - dtype = "float32" - min_val = float("-0.0628018") - max_val = float("0.0495571") - mean = float("-0.00912493") - std = float("0.0163036") - data = None - - -class Program_weight_tensor_parameter_264: - name = "parameter_264" - shape = [288, 288, 1, 1] - dtype = "float32" - min_val = float("-0.0359925") - max_val = float("0.0349088") - mean = float("-0.000152439") - std = float("0.00410565") - data = None - - -class Program_weight_tensor_parameter_265: - name = "parameter_265" - shape = [288] - dtype = "float32" - min_val = float("-0.23913") - max_val = float("0.268967") - mean = float("-0.0788435") - std = float("0.0530956") - data = None - - -class Program_weight_tensor_parameter_266: - name = "parameter_266" - shape = [288] - dtype = "float32" - min_val = float("0.873184") - max_val = float("1.24138") - mean = float("1.00729") - std = float("0.0416184") - data = None - - -class Program_weight_tensor_parameter_267: - name = "parameter_267" - shape = [288] - dtype = "float32" - min_val = float("0.0144312") - max_val = float("0.267786") - mean = float("0.0465309") - std = float("0.0213648") - data = None - - -class Program_weight_tensor_parameter_268: - name = "parameter_268" - shape = [288] - dtype = "float32" - min_val = float("-0.278993") - max_val = float("0.0513671") - mean = float("-0.0773057") - std = float("0.0501911") - data = None - - -class Program_weight_tensor_parameter_269: - name = "parameter_269" - shape = [288, 288, 3, 3] - dtype = "float32" - min_val = float("-0.0445249") - max_val = float("0.067184") - mean = float("-0.000174577") - std = float("0.00185068") - data = None - - -class Program_weight_tensor_parameter_270: - name = "parameter_270" - shape = [288] - dtype = "float32" - min_val = float("-0.154725") - max_val = float("0.176405") - mean = float("-0.0451395") - std = float("0.0405051") - data = None - - -class Program_weight_tensor_parameter_271: - name = "parameter_271" - shape = [288] - dtype = "float32" - min_val = float("0.906089") - max_val = float("1.22107") - mean = float("1.03843") - std = float("0.0584222") - data = None - - -class Program_weight_tensor_parameter_272: - name = "parameter_272" - shape = [288] - dtype = "float32" - min_val = float("0.012856") - max_val = float("0.161846") - mean = float("0.0405644") - std = float("0.0172627") - data = None - - -class Program_weight_tensor_parameter_273: - name = "parameter_273" - shape = [288] - dtype = "float32" - min_val = float("-0.188737") - max_val = float("0.116279") - mean = float("-0.0533434") - std = float("0.0531372") - data = None - - -class Program_weight_tensor_parameter_274: - name = "parameter_274" - shape = [288, 288, 3, 3] - dtype = "float32" - min_val = float("-0.0349189") - max_val = float("0.0583932") - mean = float("-0.000119797") - std = float("0.00228585") - data = None - - -class Program_weight_tensor_parameter_275: - name = "parameter_275" - shape = [288] - dtype = "float32" - min_val = float("-0.22913") - max_val = float("0.0550435") - mean = float("-0.0679392") - std = float("0.0398991") - data = None - - -class Program_weight_tensor_parameter_276: - name = "parameter_276" - shape = [288] - dtype = "float32" - min_val = float("0.901237") - max_val = float("1.30891") - mean = float("1.04301") - std = float("0.0700643") - data = None - - -class Program_weight_tensor_parameter_277: - name = "parameter_277" - shape = [288] - dtype = "float32" - min_val = float("0.0398981") - max_val = float("0.306777") - mean = float("0.105215") - std = float("0.0367725") - data = None - - -class Program_weight_tensor_parameter_278: - name = "parameter_278" - shape = [288] - dtype = "float32" - min_val = float("-1.6438") - max_val = float("1.76347") - mean = float("-0.0742338") - std = float("0.501869") - data = None - - -class Program_weight_tensor_parameter_279: - name = "parameter_279" - shape = [288, 1152, 1, 1] - dtype = "float32" - min_val = float("-0.0739741") - max_val = float("0.0690574") - mean = float("4.74459e-05") - std = float("0.00375253") - data = None - - -class Program_weight_tensor_parameter_280: - name = "parameter_280" - shape = [288] - dtype = "float32" - min_val = float("-0.104232") - max_val = float("0.0384892") - mean = float("-0.0070986") - std = float("0.0181476") - data = None - - -class Program_weight_tensor_parameter_281: - name = "parameter_281" - shape = [288] - dtype = "float32" - min_val = float("0.900639") - max_val = float("1.15952") - mean = float("0.963662") - std = float("0.0263273") - data = None - - -class Program_weight_tensor_parameter_282: - name = "parameter_282" - shape = [288] - dtype = "float32" - min_val = float("0.00287171") - max_val = float("0.0106048") - mean = float("0.0049299") - std = float("0.00108551") - data = None - - -class Program_weight_tensor_parameter_283: - name = "parameter_283" - shape = [288] - dtype = "float32" - min_val = float("-0.0988844") - max_val = float("0.0789783") - mean = float("-0.0485279") - std = float("0.0238655") - data = None - - -class Program_weight_tensor_parameter_284: - name = "parameter_284" - shape = [288, 288, 1, 1] - dtype = "float32" - min_val = float("-0.0304843") - max_val = float("0.0399859") - mean = float("-0.000813718") - std = float("0.00406643") - data = None - - -class Program_weight_tensor_parameter_285: - name = "parameter_285" - shape = [288] - dtype = "float32" - min_val = float("-0.104232") - max_val = float("0.0384892") - mean = float("-0.0070986") - std = float("0.0181476") - data = None - - -class Program_weight_tensor_parameter_286: - name = "parameter_286" - shape = [288] - dtype = "float32" - min_val = float("0.925735") - max_val = float("1.3113") - mean = float("1.02745") - std = float("0.0619514") - data = None - - -class Program_weight_tensor_parameter_287: - name = "parameter_287" - shape = [288] - dtype = "float32" - min_val = float("0.00770713") - max_val = float("0.0389126") - mean = float("0.0204063") - std = float("0.00536096") - data = None - - -class Program_weight_tensor_parameter_288: - name = "parameter_288" - shape = [288] - dtype = "float32" - min_val = float("-0.234289") - max_val = float("0.10658") - mean = float("-0.109382") - std = float("0.0469286") - data = None - - -class Program_weight_tensor_parameter_289: - name = "parameter_289" - shape = [288, 288, 3, 3] - dtype = "float32" - min_val = float("-0.0337794") - max_val = float("0.0422064") - mean = float("-0.000230922") - std = float("0.00195527") - data = None - - -class Program_weight_tensor_parameter_290: - name = "parameter_290" - shape = [288] - dtype = "float32" - min_val = float("-0.223666") - max_val = float("0.0286354") - mean = float("-0.0472094") - std = float("0.0307449") - data = None - - -class Program_weight_tensor_parameter_291: - name = "parameter_291" - shape = [288] - dtype = "float32" - min_val = float("0.929918") - max_val = float("1.3633") - mean = float("1.05315") - std = float("0.0557061") - data = None - - -class Program_weight_tensor_parameter_292: - name = "parameter_292" - shape = [288] - dtype = "float32" - min_val = float("0.011915") - max_val = float("0.0576644") - mean = float("0.0215698") - std = float("0.00513117") - data = None - - -class Program_weight_tensor_parameter_293: - name = "parameter_293" - shape = [288] - dtype = "float32" - min_val = float("-0.424449") - max_val = float("0.4419") - mean = float("-0.106397") - std = float("0.0754344") - data = None - - -class Program_weight_tensor_parameter_294: - name = "parameter_294" - shape = [288, 288, 3, 3] - dtype = "float32" - min_val = float("-0.028501") - max_val = float("0.0366931") - mean = float("-0.000232809") - std = float("0.00238709") - data = None - - -class Program_weight_tensor_parameter_295: - name = "parameter_295" - shape = [288] - dtype = "float32" - min_val = float("-0.21661") - max_val = float("0.119111") - mean = float("-0.0631797") - std = float("0.0519387") - data = None - - -class Program_weight_tensor_parameter_296: - name = "parameter_296" - shape = [288] - dtype = "float32" - min_val = float("0.987879") - max_val = float("1.24617") - mean = float("1.05608") - std = float("0.0335288") - data = None - - -class Program_weight_tensor_parameter_297: - name = "parameter_297" - shape = [288] - dtype = "float32" - min_val = float("0.0167741") - max_val = float("0.063874") - mean = float("0.0243677") - std = float("0.00554264") - data = None - - -class Program_weight_tensor_parameter_298: - name = "parameter_298" - shape = [288] - dtype = "float32" - min_val = float("-0.171478") - max_val = float("0.180474") - mean = float("-0.0617349") - std = float("0.0437105") - data = None - - -class Program_weight_tensor_parameter_299: - name = "parameter_299" - shape = [288, 768, 1, 1] - dtype = "float32" - min_val = float("-0.0315675") - max_val = float("0.0606433") - mean = float("-0.000353396") - std = float("0.00416745") - data = None - - -class Program_weight_tensor_parameter_300: - name = "parameter_300" - shape = [288] - dtype = "float32" - min_val = float("-0.0812952") - max_val = float("0.0354981") - mean = float("-0.00934253") - std = float("0.0173255") - data = None - - -class Program_weight_tensor_parameter_301: - name = "parameter_301" - shape = [288] - dtype = "float32" - min_val = float("1.01432") - max_val = float("1.1416") - mean = float("1.08017") - std = float("0.0240114") - data = None - - -class Program_weight_tensor_parameter_302: - name = "parameter_302" - shape = [288] - dtype = "float32" - min_val = float("0.0168412") - max_val = float("0.0350895") - mean = float("0.0225807") - std = float("0.00281515") - data = None - - -class Program_weight_tensor_parameter_303: - name = "parameter_303" - shape = [288] - dtype = "float32" - min_val = float("-0.136888") - max_val = float("0.0259121") - mean = float("-0.0590415") - std = float("0.025695") - data = None - - -class Program_weight_tensor_parameter_304: - name = "parameter_304" - shape = [288, 768, 1, 1] - dtype = "float32" - min_val = float("-0.0220127") - max_val = float("0.0395105") - mean = float("-0.000346125") - std = float("0.00412748") - data = None - - -class Program_weight_tensor_parameter_305: - name = "parameter_305" - shape = [768] - dtype = "float32" - min_val = float("-4.17201") - max_val = float("-0.103231") - mean = float("-2.23559") - std = float("0.546417") - data = None - - -class Program_weight_tensor_parameter_306: - name = "parameter_306" - shape = [768] - dtype = "float32" - min_val = float("1.67639") - max_val = float("4.7074") - mean = float("3.3138") - std = float("0.317348") - data = None - - -class Program_weight_tensor_parameter_307: - name = "parameter_307" - shape = [768] - dtype = "float32" - min_val = float("0.0031352") - max_val = float("0.0144649") - mean = float("0.00530109") - std = float("0.00129942") - data = None - - -class Program_weight_tensor_parameter_308: - name = "parameter_308" - shape = [768] - dtype = "float32" - min_val = float("-0.102364") - max_val = float("0.140331") - mean = float("-0.0380092") - std = float("0.0222492") - data = None - - -class Program_weight_tensor_parameter_309: - name = "parameter_309" - shape = [768, 576, 1, 1] - dtype = "float32" - min_val = float("-0.0579352") - max_val = float("0.0710566") - mean = float("-0.00047174") - std = float("0.00454072") - data = None - - -class Program_weight_tensor_parameter_310: - name = "parameter_310" - shape = [576] - dtype = "float32" - min_val = float("-0.0194227") - max_val = float("0.00104465") - mean = float("-0.00108579") - std = float("0.00283685") - data = None - - -class Program_weight_tensor_parameter_311: - name = "parameter_311" - shape = [576, 576, 1, 1] - dtype = "float32" - min_val = float("-0.181942") - max_val = float("0.183689") - mean = float("-0.000333769") - std = float("0.00240042") - data = None - - -class Program_weight_tensor_parameter_312: - name = "parameter_312" - shape = [288] - dtype = "float32" - min_val = float("-2.05493") - max_val = float("0.978") - mean = float("-0.272319") - std = float("0.353617") - data = None - - -class Program_weight_tensor_parameter_313: - name = "parameter_313" - shape = [288] - dtype = "float32" - min_val = float("0.135081") - max_val = float("2.13114") - mean = float("0.537166") - std = float("0.306899") - data = None - - -class Program_weight_tensor_parameter_314: - name = "parameter_314" - shape = [288] - dtype = "float32" - min_val = float("7.69798e-05") - max_val = float("0.00216471") - mean = float("0.000305136") - std = float("0.000236517") - data = None - - -class Program_weight_tensor_parameter_315: - name = "parameter_315" - shape = [288] - dtype = "float32" - min_val = float("-0.0302303") - max_val = float("0.0570347") - mean = float("0.0184598") - std = float("0.0154999") - data = None - - -class Program_weight_tensor_parameter_316: - name = "parameter_316" - shape = [288, 288, 1, 1] - dtype = "float32" - min_val = float("-0.0362218") - max_val = float("0.039556") - mean = float("-0.000416452") - std = float("0.00309243") - data = None - - -class Program_weight_tensor_parameter_317: - name = "parameter_317" - shape = [288] - dtype = "float32" - min_val = float("-2.05493") - max_val = float("0.978") - mean = float("-0.272319") - std = float("0.353617") - data = None - - -class Program_weight_tensor_parameter_318: - name = "parameter_318" - shape = [288] - dtype = "float32" - min_val = float("0.485427") - max_val = float("2.77924") - mean = float("1.1636") - std = float("0.366821") - data = None - - -class Program_weight_tensor_parameter_319: - name = "parameter_319" - shape = [288] - dtype = "float32" - min_val = float("0.000702807") - max_val = float("0.00951581") - mean = float("0.00190549") - std = float("0.00103207") - data = None - - -class Program_weight_tensor_parameter_320: - name = "parameter_320" - shape = [288] - dtype = "float32" - min_val = float("-0.222082") - max_val = float("0.100345") - mean = float("0.0213653") - std = float("0.0261282") - data = None - - -class Program_weight_tensor_parameter_321: - name = "parameter_321" - shape = [288, 288, 3, 3] - dtype = "float32" - min_val = float("-0.0292711") - max_val = float("0.035843") - mean = float("-7.17498e-05") - std = float("0.00239497") - data = None - - -class Program_weight_tensor_parameter_322: - name = "parameter_322" - shape = [288] - dtype = "float32" - min_val = float("-3.04804") - max_val = float("0.902721") - mean = float("-1.62463") - std = float("0.522032") - data = None - - -class Program_weight_tensor_parameter_323: - name = "parameter_323" - shape = [288] - dtype = "float32" - min_val = float("0.412288") - max_val = float("1.85924") - mean = float("1.14297") - std = float("0.183586") - data = None - - -class Program_weight_tensor_parameter_324: - name = "parameter_324" - shape = [288] - dtype = "float32" - min_val = float("0.0227719") - max_val = float("0.185075") - mean = float("0.0456612") - std = float("0.0139034") - data = None - - -class Program_weight_tensor_parameter_325: - name = "parameter_325" - shape = [288] - dtype = "float32" - min_val = float("-1.05487") - max_val = float("0.350092") - mean = float("-0.153281") - std = float("0.112535") - data = None - - -class Program_weight_tensor_parameter_326: - name = "parameter_326" - shape = [288, 288, 3, 3] - dtype = "float32" - min_val = float("-0.0310577") - max_val = float("0.0584148") - mean = float("-0.00021026") - std = float("0.00299679") - data = None - - -class Program_weight_tensor_parameter_327: - name = "parameter_327" - shape = [288] - dtype = "float32" - min_val = float("-2.14217") - max_val = float("1.60455") - mean = float("-0.41596") - std = float("0.434273") - data = None - - -class Program_weight_tensor_parameter_328: - name = "parameter_328" - shape = [288] - dtype = "float32" - min_val = float("0.0226806") - max_val = float("2.20193") - mean = float("0.427501") - std = float("0.286031") - data = None - - -class Program_weight_tensor_parameter_329: - name = "parameter_329" - shape = [288] - dtype = "float32" - min_val = float("1.90422e-05") - max_val = float("0.00251891") - mean = float("0.000473033") - std = float("0.00032871") - data = None - - -class Program_weight_tensor_parameter_330: - name = "parameter_330" - shape = [288] - dtype = "float32" - min_val = float("-0.0304152") - max_val = float("0.0734532") - mean = float("0.0241604") - std = float("0.0155045") - data = None - - -class Program_weight_tensor_parameter_331: - name = "parameter_331" - shape = [288, 288, 1, 1] - dtype = "float32" - min_val = float("-0.0253061") - max_val = float("0.0316181") - mean = float("-0.000587413") - std = float("0.00266057") - data = None - - -class Program_weight_tensor_parameter_332: - name = "parameter_332" - shape = [288] - dtype = "float32" - min_val = float("-2.14217") - max_val = float("1.60455") - mean = float("-0.41596") - std = float("0.434273") - data = None - - -class Program_weight_tensor_parameter_333: - name = "parameter_333" - shape = [288] - dtype = "float32" - min_val = float("0.469583") - max_val = float("2.46723") - mean = float("1.14325") - std = float("0.313639") - data = None - - -class Program_weight_tensor_parameter_334: - name = "parameter_334" - shape = [288] - dtype = "float32" - min_val = float("0.00155497") - max_val = float("0.0075944") - mean = float("0.00342237") - std = float("0.00109591") - data = None - - -class Program_weight_tensor_parameter_335: - name = "parameter_335" - shape = [288] - dtype = "float32" - min_val = float("-0.166374") - max_val = float("0.142145") - mean = float("0.0385124") - std = float("0.0284128") - data = None - - -class Program_weight_tensor_parameter_336: - name = "parameter_336" - shape = [288, 288, 3, 3] - dtype = "float32" - min_val = float("-0.0300473") - max_val = float("0.0566634") - mean = float("-0.000115715") - std = float("0.00253983") - data = None - - -class Program_weight_tensor_parameter_337: - name = "parameter_337" - shape = [288] - dtype = "float32" - min_val = float("-2.64172") - max_val = float("0.380203") - mean = float("-1.38767") - std = float("0.385623") - data = None - - -class Program_weight_tensor_parameter_338: - name = "parameter_338" - shape = [288] - dtype = "float32" - min_val = float("0.558043") - max_val = float("1.71262") - mean = float("1.12622") - std = float("0.13931") - data = None - - -class Program_weight_tensor_parameter_339: - name = "parameter_339" - shape = [288] - dtype = "float32" - min_val = float("0.0148874") - max_val = float("0.0607992") - mean = float("0.0260852") - std = float("0.00787152") - data = None - - -class Program_weight_tensor_parameter_340: - name = "parameter_340" - shape = [288] - dtype = "float32" - min_val = float("-0.824043") - max_val = float("0.12343") - mean = float("-0.10388") - std = float("0.0750312") - data = None - - -class Program_weight_tensor_parameter_341: - name = "parameter_341" - shape = [288, 288, 3, 3] - dtype = "float32" - min_val = float("-0.0416163") - max_val = float("0.0562858") - mean = float("-0.000193988") - std = float("0.00280354") - data = None - - -class Program_weight_tensor_parameter_342: - name = "parameter_342" - shape = [288] - dtype = "float32" - min_val = float("-3.62765") - max_val = float("2.97036") - mean = float("-0.711269") - std = float("0.761329") - data = None - - -class Program_weight_tensor_parameter_343: - name = "parameter_343" - shape = [288] - dtype = "float32" - min_val = float("0.890678") - max_val = float("2.92256") - mean = float("1.6374") - std = float("0.311721") - data = None - - -class Program_weight_tensor_parameter_344: - name = "parameter_344" - shape = [288] - dtype = "float32" - min_val = float("0.00239225") - max_val = float("0.00831343") - mean = float("0.00407401") - std = float("0.000948454") - data = None - - -class Program_weight_tensor_parameter_345: - name = "parameter_345" - shape = [288] - dtype = "float32" - min_val = float("-0.182535") - max_val = float("0.115867") - mean = float("0.0486434") - std = float("0.031021") - data = None - - -class Program_weight_tensor_parameter_346: - name = "parameter_346" - shape = [288, 576, 1, 1] - dtype = "float32" - min_val = float("-0.0933941") - max_val = float("0.0753658") - mean = float("-0.000787655") - std = float("0.00567589") - data = None - - -class Program_weight_tensor_parameter_347: - name = "parameter_347" - shape = [288] - dtype = "float32" - min_val = float("-2.8628") - max_val = float("0.479748") - mean = float("-0.682305") - std = float("0.57709") - data = None - - -class Program_weight_tensor_parameter_348: - name = "parameter_348" - shape = [288] - dtype = "float32" - min_val = float("0.975929") - max_val = float("3.52368") - mean = float("1.80201") - std = float("0.36815") - data = None - - -class Program_weight_tensor_parameter_349: - name = "parameter_349" - shape = [288] - dtype = "float32" - min_val = float("0.000804451") - max_val = float("0.00292711") - mean = float("0.00143956") - std = float("0.000325738") - data = None - - -class Program_weight_tensor_parameter_350: - name = "parameter_350" - shape = [288] - dtype = "float32" - min_val = float("-0.0342743") - max_val = float("0.0590277") - mean = float("0.0195141") - std = float("0.0151883") - data = None - - -class Program_weight_tensor_parameter_351: - name = "parameter_351" - shape = [288, 576, 1, 1] - dtype = "float32" - min_val = float("-0.0373152") - max_val = float("0.0841779") - mean = float("-0.00034827") - std = float("0.004295") - data = None - - -class Program_weight_tensor_parameter_352: - name = "parameter_352" - shape = [576] - dtype = "float32" - min_val = float("-2.52101") - max_val = float("0.857915") - mean = float("-0.842205") - std = float("0.39859") - data = None - - -class Program_weight_tensor_parameter_353: - name = "parameter_353" - shape = [576] - dtype = "float32" - min_val = float("0.488026") - max_val = float("1.95305") - mean = float("0.895161") - std = float("0.179359") - data = None - - -class Program_weight_tensor_parameter_354: - name = "parameter_354" - shape = [576] - dtype = "float32" - min_val = float("0.00558504") - max_val = float("0.0454555") - mean = float("0.0102845") - std = float("0.00312323") - data = None - - -class Program_weight_tensor_parameter_355: - name = "parameter_355" - shape = [576] - dtype = "float32" - min_val = float("-0.170477") - max_val = float("0.187667") - mean = float("0.0352537") - std = float("0.0479894") - data = None - - -class Program_weight_tensor_parameter_356: - name = "parameter_356" - shape = [576, 384, 3, 3] - dtype = "float32" - min_val = float("-0.0260577") - max_val = float("0.0532348") - mean = float("-0.0001041") - std = float("0.00269908") - data = None - - -class Program_weight_tensor_parameter_357: - name = "parameter_357" - shape = [384] - dtype = "float32" - min_val = float("-2.62379") - max_val = float("1.27783") - mean = float("-1.09735") - std = float("0.546546") - data = None - - -class Program_weight_tensor_parameter_358: - name = "parameter_358" - shape = [384] - dtype = "float32" - min_val = float("0.408486") - max_val = float("1.56047") - mean = float("1.0748") - std = float("0.155167") - data = None - - -class Program_weight_tensor_parameter_359: - name = "parameter_359" - shape = [384] - dtype = "float32" - min_val = float("0.00140498") - max_val = float("0.00887108") - mean = float("0.00364747") - std = float("0.000973317") - data = None - - -class Program_weight_tensor_parameter_360: - name = "parameter_360" - shape = [384] - dtype = "float32" - min_val = float("-0.173561") - max_val = float("0.131263") - mean = float("-0.0405442") - std = float("0.0419344") - data = None - - -class Program_weight_tensor_parameter_361: - name = "parameter_361" - shape = [384, 288, 1, 1] - dtype = "float32" - min_val = float("-0.315079") - max_val = float("0.114758") - mean = float("-0.000694619") - std = float("0.00882253") - data = None - - -class Program_weight_tensor_parameter_362: - name = "parameter_362" - shape = [288] - dtype = "float32" - min_val = float("-0.0129129") - max_val = float("0.00094621") - mean = float("-0.00331667") - std = float("0.00268758") - data = None - - -class Program_weight_tensor_parameter_363: - name = "parameter_363" - shape = [288, 288, 1, 1] - dtype = "float32" - min_val = float("-0.299495") - max_val = float("0.215047") - mean = float("-0.00235806") - std = float("0.0062383") - data = None - - -class Program_weight_tensor_parameter_364: - name = "parameter_364" - shape = [144] - dtype = "float32" - min_val = float("-1.89031") - max_val = float("0.682555") - mean = float("-0.250602") - std = float("0.42457") - data = None - - -class Program_weight_tensor_parameter_365: - name = "parameter_365" - shape = [144] - dtype = "float32" - min_val = float("-3.39574e-06") - max_val = float("2.27363") - mean = float("0.459077") - std = float("0.465494") - data = None - - -class Program_weight_tensor_parameter_366: - name = "parameter_366" - shape = [144] - dtype = "float32" - min_val = float("4.41383e-12") - max_val = float("0.00139051") - mean = float("0.000354333") - std = float("0.000234562") - data = None - - -class Program_weight_tensor_parameter_367: - name = "parameter_367" - shape = [144] - dtype = "float32" - min_val = float("-0.0453964") - max_val = float("0.0337442") - mean = float("0.00432955") - std = float("0.0128423") - data = None - - -class Program_weight_tensor_parameter_368: - name = "parameter_368" - shape = [144, 144, 1, 1] - dtype = "float32" - min_val = float("-0.0348046") - max_val = float("0.0712641") - mean = float("-0.000374649") - std = float("0.00443917") - data = None - - -class Program_weight_tensor_parameter_369: - name = "parameter_369" - shape = [144] - dtype = "float32" - min_val = float("-1.89031") - max_val = float("0.682555") - mean = float("-0.250602") - std = float("0.42457") - data = None - - -class Program_weight_tensor_parameter_370: - name = "parameter_370" - shape = [144] - dtype = "float32" - min_val = float("0.418716") - max_val = float("3.73401") - mean = float("1.30895") - std = float("0.607907") - data = None - - -class Program_weight_tensor_parameter_371: - name = "parameter_371" - shape = [144] - dtype = "float32" - min_val = float("0.00100253") - max_val = float("0.00898137") - mean = float("0.00451763") - std = float("0.00157334") - data = None - - -class Program_weight_tensor_parameter_372: - name = "parameter_372" - shape = [144] - dtype = "float32" - min_val = float("-0.12794") - max_val = float("0.101272") - mean = float("0.0247504") - std = float("0.0355174") - data = None - - -class Program_weight_tensor_parameter_373: - name = "parameter_373" - shape = [144, 144, 3, 3] - dtype = "float32" - min_val = float("-0.0346061") - max_val = float("0.0470164") - mean = float("-0.000215707") - std = float("0.0041935") - data = None - - -class Program_weight_tensor_parameter_374: - name = "parameter_374" - shape = [144] - dtype = "float32" - min_val = float("-2.66323") - max_val = float("0.362254") - mean = float("-1.26569") - std = float("0.512716") - data = None - - -class Program_weight_tensor_parameter_375: - name = "parameter_375" - shape = [144] - dtype = "float32" - min_val = float("0.576539") - max_val = float("1.97695") - mean = float("1.18023") - std = float("0.187262") - data = None - - -class Program_weight_tensor_parameter_376: - name = "parameter_376" - shape = [144] - dtype = "float32" - min_val = float("0.0377575") - max_val = float("0.21017") - mean = float("0.0696304") - std = float("0.0235545") - data = None - - -class Program_weight_tensor_parameter_377: - name = "parameter_377" - shape = [144] - dtype = "float32" - min_val = float("-2.48186") - max_val = float("1.76252") - mean = float("-0.157181") - std = float("0.322131") - data = None - - -class Program_weight_tensor_parameter_378: - name = "parameter_378" - shape = [144, 144, 3, 3] - dtype = "float32" - min_val = float("-0.0497725") - max_val = float("0.0492881") - mean = float("-0.000281602") - std = float("0.00478039") - data = None - - -class Program_weight_tensor_parameter_379: - name = "parameter_379" - shape = [144] - dtype = "float32" - min_val = float("-1.73734") - max_val = float("0.692917") - mean = float("-0.18539") - std = float("0.413661") - data = None - - -class Program_weight_tensor_parameter_380: - name = "parameter_380" - shape = [144] - dtype = "float32" - min_val = float("0.000376458") - max_val = float("2.82934") - mean = float("0.328185") - std = float("0.356839") - data = None - - -class Program_weight_tensor_parameter_381: - name = "parameter_381" - shape = [144] - dtype = "float32" - min_val = float("2.39692e-08") - max_val = float("0.0041016") - mean = float("0.000455802") - std = float("0.000479833") - data = None - - -class Program_weight_tensor_parameter_382: - name = "parameter_382" - shape = [144] - dtype = "float32" - min_val = float("-0.030728") - max_val = float("0.0415527") - mean = float("0.00831477") - std = float("0.0125762") - data = None - - -class Program_weight_tensor_parameter_383: - name = "parameter_383" - shape = [144, 144, 1, 1] - dtype = "float32" - min_val = float("-0.051179") - max_val = float("0.0576014") - mean = float("-0.000493177") - std = float("0.00427804") - data = None - - -class Program_weight_tensor_parameter_384: - name = "parameter_384" - shape = [144] - dtype = "float32" - min_val = float("-1.73734") - max_val = float("0.692917") - mean = float("-0.18539") - std = float("0.413661") - data = None - - -class Program_weight_tensor_parameter_385: - name = "parameter_385" - shape = [144] - dtype = "float32" - min_val = float("0.375666") - max_val = float("3.03489") - mean = float("1.09356") - std = float("0.399713") - data = None - - -class Program_weight_tensor_parameter_386: - name = "parameter_386" - shape = [144] - dtype = "float32" - min_val = float("0.00208899") - max_val = float("0.00974547") - mean = float("0.00520382") - std = float("0.0013558") - data = None - - -class Program_weight_tensor_parameter_387: - name = "parameter_387" - shape = [144] - dtype = "float32" - min_val = float("-0.0355158") - max_val = float("0.0761709") - mean = float("0.0259472") - std = float("0.0220464") - data = None - - -class Program_weight_tensor_parameter_388: - name = "parameter_388" - shape = [144, 144, 3, 3] - dtype = "float32" - min_val = float("-0.0387272") - max_val = float("0.0410833") - mean = float("-0.000193716") - std = float("0.00436167") - data = None - - -class Program_weight_tensor_parameter_389: - name = "parameter_389" - shape = [144] - dtype = "float32" - min_val = float("-3.02892") - max_val = float("0.118332") - mean = float("-1.27789") - std = float("0.573834") - data = None - - -class Program_weight_tensor_parameter_390: - name = "parameter_390" - shape = [144] - dtype = "float32" - min_val = float("0.676134") - max_val = float("1.94767") - mean = float("1.17746") - std = float("0.203827") - data = None - - -class Program_weight_tensor_parameter_391: - name = "parameter_391" - shape = [144] - dtype = "float32" - min_val = float("0.0206076") - max_val = float("0.0888994") - mean = float("0.0394175") - std = float("0.0100858") - data = None - - -class Program_weight_tensor_parameter_392: - name = "parameter_392" - shape = [144] - dtype = "float32" - min_val = float("-0.408893") - max_val = float("0.265643") - mean = float("-0.0431433") - std = float("0.13118") - data = None - - -class Program_weight_tensor_parameter_393: - name = "parameter_393" - shape = [144, 144, 3, 3] - dtype = "float32" - min_val = float("-0.0561149") - max_val = float("0.0750932") - mean = float("-0.00029987") - std = float("0.00486889") - data = None - - -class Program_weight_tensor_parameter_394: - name = "parameter_394" - shape = [144] - dtype = "float32" - min_val = float("-1.35133") - max_val = float("0.821835") - mean = float("-0.13373") - std = float("0.357825") - data = None - - -class Program_weight_tensor_parameter_395: - name = "parameter_395" - shape = [144] - dtype = "float32" - min_val = float("-1.05123e-08") - max_val = float("1.49414") - mean = float("0.18472") - std = float("0.153441") - data = None - - -class Program_weight_tensor_parameter_396: - name = "parameter_396" - shape = [144] - dtype = "float32" - min_val = float("4.16705e-17") - max_val = float("0.00240365") - mean = float("0.000251688") - std = float("0.000227807") - data = None - - -class Program_weight_tensor_parameter_397: - name = "parameter_397" - shape = [144] - dtype = "float32" - min_val = float("-0.047473") - max_val = float("0.0646974") - mean = float("0.00888849") - std = float("0.0132203") - data = None - - -class Program_weight_tensor_parameter_398: - name = "parameter_398" - shape = [144, 144, 1, 1] - dtype = "float32" - min_val = float("-0.0355803") - max_val = float("0.0307658") - mean = float("-0.000399692") - std = float("0.00394002") - data = None - - -class Program_weight_tensor_parameter_399: - name = "parameter_399" - shape = [144] - dtype = "float32" - min_val = float("-1.35133") - max_val = float("0.821835") - mean = float("-0.13373") - std = float("0.357825") - data = None - - -class Program_weight_tensor_parameter_400: - name = "parameter_400" - shape = [144] - dtype = "float32" - min_val = float("0.305448") - max_val = float("1.84848") - mean = float("0.89377") - std = float("0.301052") - data = None - - -class Program_weight_tensor_parameter_401: - name = "parameter_401" - shape = [144] - dtype = "float32" - min_val = float("0.00208198") - max_val = float("0.00965758") - mean = float("0.00489852") - std = float("0.00141206") - data = None - - -class Program_weight_tensor_parameter_402: - name = "parameter_402" - shape = [144] - dtype = "float32" - min_val = float("-0.0282734") - max_val = float("0.114052") - mean = float("0.0403195") - std = float("0.0275993") - data = None - - -class Program_weight_tensor_parameter_403: - name = "parameter_403" - shape = [144, 144, 3, 3] - dtype = "float32" - min_val = float("-0.0392374") - max_val = float("0.0493932") - mean = float("-0.000231543") - std = float("0.00438483") - data = None - - -class Program_weight_tensor_parameter_404: - name = "parameter_404" - shape = [144] - dtype = "float32" - min_val = float("-2.73135") - max_val = float("0.0567831") - mean = float("-1.27362") - std = float("0.5112") - data = None - - -class Program_weight_tensor_parameter_405: - name = "parameter_405" - shape = [144] - dtype = "float32" - min_val = float("0.62038") - max_val = float("1.55808") - mean = float("1.10846") - std = float("0.164403") - data = None - - -class Program_weight_tensor_parameter_406: - name = "parameter_406" - shape = [144] - dtype = "float32" - min_val = float("0.0113622") - max_val = float("0.0371877") - mean = float("0.0225402") - std = float("0.00518884") - data = None - - -class Program_weight_tensor_parameter_407: - name = "parameter_407" - shape = [144] - dtype = "float32" - min_val = float("-0.55178") - max_val = float("0.193574") - mean = float("-0.0453446") - std = float("0.110957") - data = None - - -class Program_weight_tensor_parameter_408: - name = "parameter_408" - shape = [144, 144, 3, 3] - dtype = "float32" - min_val = float("-0.0546125") - max_val = float("0.0718452") - mean = float("-0.000310657") - std = float("0.00493932") - data = None - - -class Program_weight_tensor_parameter_409: - name = "parameter_409" - shape = [144] - dtype = "float32" - min_val = float("-1.92079") - max_val = float("0.644614") - mean = float("-0.125228") - std = float("0.358111") - data = None - - -class Program_weight_tensor_parameter_410: - name = "parameter_410" - shape = [144] - dtype = "float32" - min_val = float("6.93466e-11") - max_val = float("1.7663") - mean = float("0.238668") - std = float("0.271207") - data = None - - -class Program_weight_tensor_parameter_411: - name = "parameter_411" - shape = [144] - dtype = "float32" - min_val = float("3.34966e-19") - max_val = float("0.0106171") - mean = float("0.000715393") - std = float("0.00128468") - data = None - - -class Program_weight_tensor_parameter_412: - name = "parameter_412" - shape = [144] - dtype = "float32" - min_val = float("-0.0415258") - max_val = float("0.130452") - mean = float("0.0113087") - std = float("0.0225185") - data = None - - -class Program_weight_tensor_parameter_413: - name = "parameter_413" - shape = [144, 144, 1, 1] - dtype = "float32" - min_val = float("-0.0911586") - max_val = float("0.0543513") - mean = float("-0.000545815") - std = float("0.00558185") - data = None - - -class Program_weight_tensor_parameter_414: - name = "parameter_414" - shape = [144] - dtype = "float32" - min_val = float("-1.92079") - max_val = float("0.644614") - mean = float("-0.125228") - std = float("0.358111") - data = None - - -class Program_weight_tensor_parameter_415: - name = "parameter_415" - shape = [144] - dtype = "float32" - min_val = float("0.30759") - max_val = float("1.61305") - mean = float("0.74094") - std = float("0.255891") - data = None - - -class Program_weight_tensor_parameter_416: - name = "parameter_416" - shape = [144] - dtype = "float32" - min_val = float("0.00349199") - max_val = float("0.0167988") - mean = float("0.00831531") - std = float("0.00253196") - data = None - - -class Program_weight_tensor_parameter_417: - name = "parameter_417" - shape = [144] - dtype = "float32" - min_val = float("-0.0930515") - max_val = float("0.160494") - mean = float("0.0426139") - std = float("0.0448304") - data = None - - -class Program_weight_tensor_parameter_418: - name = "parameter_418" - shape = [144, 144, 3, 3] - dtype = "float32" - min_val = float("-0.0865275") - max_val = float("0.0693328") - mean = float("-0.000270166") - std = float("0.00433576") - data = None - - -class Program_weight_tensor_parameter_419: - name = "parameter_419" - shape = [144] - dtype = "float32" - min_val = float("-2.46604") - max_val = float("0.311463") - mean = float("-1.11261") - std = float("0.444526") - data = None - - -class Program_weight_tensor_parameter_420: - name = "parameter_420" - shape = [144] - dtype = "float32" - min_val = float("0.645192") - max_val = float("1.43977") - mean = float("1.10398") - std = float("0.149059") - data = None - - -class Program_weight_tensor_parameter_421: - name = "parameter_421" - shape = [144] - dtype = "float32" - min_val = float("0.00792923") - max_val = float("0.0410344") - mean = float("0.017982") - std = float("0.00594959") - data = None - - -class Program_weight_tensor_parameter_422: - name = "parameter_422" - shape = [144] - dtype = "float32" - min_val = float("-0.402604") - max_val = float("0.181192") - mean = float("-0.0350284") - std = float("0.0901153") - data = None - - -class Program_weight_tensor_parameter_423: - name = "parameter_423" - shape = [144, 144, 3, 3] - dtype = "float32" - min_val = float("-0.12551") - max_val = float("0.134437") - mean = float("-0.000230808") - std = float("0.00507475") - data = None - - -class Program_weight_tensor_parameter_424: - name = "parameter_424" - shape = [144] - dtype = "float32" - min_val = float("-1.63695") - max_val = float("1.61843") - mean = float("0.00914071") - std = float("0.773499") - data = None - - -class Program_weight_tensor_parameter_425: - name = "parameter_425" - shape = [144] - dtype = "float32" - min_val = float("0.446498") - max_val = float("1.44574") - mean = float("0.791631") - std = float("0.199487") - data = None - - -class Program_weight_tensor_parameter_426: - name = "parameter_426" - shape = [144] - dtype = "float32" - min_val = float("0.00803846") - max_val = float("0.0483941") - mean = float("0.0185568") - std = float("0.00763975") - data = None - - -class Program_weight_tensor_parameter_427: - name = "parameter_427" - shape = [144] - dtype = "float32" - min_val = float("-0.220237") - max_val = float("0.262194") - mean = float("-0.0333374") - std = float("0.0733832") - data = None - - -class Program_weight_tensor_parameter_428: - name = "parameter_428" - shape = [144, 288, 1, 1] - dtype = "float32" - min_val = float("-0.131175") - max_val = float("0.101205") - mean = float("-0.0007374") - std = float("0.00944354") - data = None - - -class Program_weight_tensor_parameter_429: - name = "parameter_429" - shape = [144] - dtype = "float32" - min_val = float("-3.94913") - max_val = float("1.48833") - mean = float("0.184232") - std = float("0.812302") - data = None - - -class Program_weight_tensor_parameter_430: - name = "parameter_430" - shape = [144] - dtype = "float32" - min_val = float("0.632046") - max_val = float("5.81404") - mean = float("1.61523") - std = float("1.07746") - data = None - - -class Program_weight_tensor_parameter_431: - name = "parameter_431" - shape = [144] - dtype = "float32" - min_val = float("0.0042963") - max_val = float("0.0490893") - mean = float("0.0122438") - std = float("0.00606181") - data = None - - -class Program_weight_tensor_parameter_432: - name = "parameter_432" - shape = [144] - dtype = "float32" - min_val = float("-0.166662") - max_val = float("0.120288") - mean = float("-0.00996138") - std = float("0.0632725") - data = None - - -class Program_weight_tensor_parameter_433: - name = "parameter_433" - shape = [144, 288, 1, 1] - dtype = "float32" - min_val = float("-0.0767773") - max_val = float("0.128459") - mean = float("-0.000375959") - std = float("0.00898229") - data = None - - -class Program_weight_tensor_parameter_434: - name = "parameter_434" - shape = [288] - dtype = "float32" - min_val = float("-3.32769") - max_val = float("1.68356") - mean = float("-0.218033") - std = float("0.689316") - data = None - - -class Program_weight_tensor_parameter_435: - name = "parameter_435" - shape = [288] - dtype = "float32" - min_val = float("0.644859") - max_val = float("3.56312") - mean = float("1.11815") - std = float("0.319838") - data = None - - -class Program_weight_tensor_parameter_436: - name = "parameter_436" - shape = [288] - dtype = "float32" - min_val = float("0.00576419") - max_val = float("0.0664151") - mean = float("0.0156741") - std = float("0.00811449") - data = None - - -class Program_weight_tensor_parameter_437: - name = "parameter_437" - shape = [288] - dtype = "float32" - min_val = float("-0.326028") - max_val = float("0.217421") - mean = float("0.0293076") - std = float("0.0862963") - data = None - - -class Program_weight_tensor_parameter_438: - name = "parameter_438" - shape = [288, 192, 3, 3] - dtype = "float32" - min_val = float("-0.0789327") - max_val = float("0.0726046") - mean = float("-0.000113775") - std = float("0.00477469") - data = None - - -class Program_weight_tensor_parameter_439: - name = "parameter_439" - shape = [192] - dtype = "float32" - min_val = float("-2.19954") - max_val = float("1.29755") - mean = float("-0.833953") - std = float("0.661352") - data = None - - -class Program_weight_tensor_parameter_440: - name = "parameter_440" - shape = [192] - dtype = "float32" - min_val = float("0.376407") - max_val = float("1.58497") - mean = float("0.954824") - std = float("0.21826") - data = None - - -class Program_weight_tensor_parameter_441: - name = "parameter_441" - shape = [192] - dtype = "float32" - min_val = float("0.00123879") - max_val = float("0.0100177") - mean = float("0.00334585") - std = float("0.00120554") - data = None - - -class Program_weight_tensor_parameter_442: - name = "parameter_442" - shape = [192] - dtype = "float32" - min_val = float("-0.305203") - max_val = float("0.270228") - mean = float("-0.0509251") - std = float("0.0850592") - data = None - - -class Program_weight_tensor_parameter_443: - name = "parameter_443" - shape = [192, 144, 1, 1] - dtype = "float32" - min_val = float("-0.147457") - max_val = float("0.125776") - mean = float("-0.000856162") - std = float("0.0147595") - data = None - - -class Program_weight_tensor_parameter_444: - name = "parameter_444" - shape = [144] - dtype = "float32" - min_val = float("-0.0128446") - max_val = float("0.00187189") - mean = float("-0.00516629") - std = float("0.00361178") - data = None - - -class Program_weight_tensor_parameter_445: - name = "parameter_445" - shape = [144, 144, 1, 1] - dtype = "float32" - min_val = float("-0.231297") - max_val = float("0.222024") - mean = float("-0.0042663") - std = float("0.0116858") - data = None - - -class Program_weight_tensor_parameter_446: - name = "parameter_446" - shape = [72] - dtype = "float32" - min_val = float("-1.59453") - max_val = float("0.880938") - mean = float("-0.105095") - std = float("0.484514") - data = None - - -class Program_weight_tensor_parameter_447: - name = "parameter_447" - shape = [72] - dtype = "float32" - min_val = float("0.0969578") - max_val = float("2.4756") - mean = float("0.437551") - std = float("0.391975") - data = None - - -class Program_weight_tensor_parameter_448: - name = "parameter_448" - shape = [72] - dtype = "float32" - min_val = float("0.000120925") - max_val = float("0.00213528") - mean = float("0.000683241") - std = float("0.000440938") - data = None - - -class Program_weight_tensor_parameter_449: - name = "parameter_449" - shape = [72] - dtype = "float32" - min_val = float("-0.0372676") - max_val = float("0.0322531") - mean = float("0.000230328") - std = float("0.0144331") - data = None - - -class Program_weight_tensor_parameter_450: - name = "parameter_450" - shape = [72, 72, 1, 1] - dtype = "float32" - min_val = float("-0.0558609") - max_val = float("0.0913162") - mean = float("-0.000461775") - std = float("0.00827768") - data = None - - -class Program_weight_tensor_parameter_451: - name = "parameter_451" - shape = [72] - dtype = "float32" - min_val = float("-1.59453") - max_val = float("0.880938") - mean = float("-0.105095") - std = float("0.484514") - data = None - - -class Program_weight_tensor_parameter_452: - name = "parameter_452" - shape = [72] - dtype = "float32" - min_val = float("0.351618") - max_val = float("4.88449") - mean = float("1.06806") - std = float("0.665872") - data = None - - -class Program_weight_tensor_parameter_453: - name = "parameter_453" - shape = [72] - dtype = "float32" - min_val = float("0.00145915") - max_val = float("0.02274") - mean = float("0.00638047") - std = float("0.00330097") - data = None - - -class Program_weight_tensor_parameter_454: - name = "parameter_454" - shape = [72] - dtype = "float32" - min_val = float("-0.0780665") - max_val = float("0.116729") - mean = float("0.0070792") - std = float("0.0386135") - data = None - - -class Program_weight_tensor_parameter_455: - name = "parameter_455" - shape = [72, 72, 3, 3] - dtype = "float32" - min_val = float("-0.0823999") - max_val = float("0.109201") - mean = float("-0.000361883") - std = float("0.00694661") - data = None - - -class Program_weight_tensor_parameter_456: - name = "parameter_456" - shape = [72] - dtype = "float32" - min_val = float("-3.9802") - max_val = float("-0.178539") - mean = float("-1.14275") - std = float("0.563232") - data = None - - -class Program_weight_tensor_parameter_457: - name = "parameter_457" - shape = [72] - dtype = "float32" - min_val = float("0.761114") - max_val = float("2.01918") - mean = float("1.00695") - std = float("0.201531") - data = None - - -class Program_weight_tensor_parameter_458: - name = "parameter_458" - shape = [72] - dtype = "float32" - min_val = float("0.0233433") - max_val = float("0.211234") - mean = float("0.0493984") - std = float("0.0278563") - data = None - - -class Program_weight_tensor_parameter_459: - name = "parameter_459" - shape = [72] - dtype = "float32" - min_val = float("-3.35838") - max_val = float("0.64805") - mean = float("-0.172059") - std = float("0.442294") - data = None - - -class Program_weight_tensor_parameter_460: - name = "parameter_460" - shape = [72, 72, 3, 3] - dtype = "float32" - min_val = float("-0.0675785") - max_val = float("0.0898198") - mean = float("-0.000478771") - std = float("0.00806884") - data = None - - -class Program_weight_tensor_parameter_461: - name = "parameter_461" - shape = [72] - dtype = "float32" - min_val = float("-1.40989") - max_val = float("0.81614") - mean = float("-0.0675083") - std = float("0.400837") - data = None - - -class Program_weight_tensor_parameter_462: - name = "parameter_462" - shape = [72] - dtype = "float32" - min_val = float("0.0959584") - max_val = float("1.55629") - mean = float("0.346688") - std = float("0.249308") - data = None - - -class Program_weight_tensor_parameter_463: - name = "parameter_463" - shape = [72] - dtype = "float32" - min_val = float("0.000134639") - max_val = float("0.0029982") - mean = float("0.000643173") - std = float("0.000523054") - data = None - - -class Program_weight_tensor_parameter_464: - name = "parameter_464" - shape = [72] - dtype = "float32" - min_val = float("-0.0562932") - max_val = float("0.0718023") - mean = float("0.00880714") - std = float("0.0284848") - data = None - - -class Program_weight_tensor_parameter_465: - name = "parameter_465" - shape = [72, 72, 1, 1] - dtype = "float32" - min_val = float("-0.059374") - max_val = float("0.0468202") - mean = float("-0.00060072") - std = float("0.00841895") - data = None - - -class Program_weight_tensor_parameter_466: - name = "parameter_466" - shape = [72] - dtype = "float32" - min_val = float("-1.40989") - max_val = float("0.81614") - mean = float("-0.0675083") - std = float("0.400837") - data = None - - -class Program_weight_tensor_parameter_467: - name = "parameter_467" - shape = [72] - dtype = "float32" - min_val = float("0.277615") - max_val = float("1.97277") - mean = float("0.861421") - std = float("0.350217") - data = None - - -class Program_weight_tensor_parameter_468: - name = "parameter_468" - shape = [72] - dtype = "float32" - min_val = float("0.00246978") - max_val = float("0.0141959") - mean = float("0.00528593") - std = float("0.0022058") - data = None - - -class Program_weight_tensor_parameter_469: - name = "parameter_469" - shape = [72] - dtype = "float32" - min_val = float("-0.214044") - max_val = float("0.181833") - mean = float("0.0221895") - std = float("0.0597059") - data = None - - -class Program_weight_tensor_parameter_470: - name = "parameter_470" - shape = [72, 72, 3, 3] - dtype = "float32" - min_val = float("-0.0447981") - max_val = float("0.040427") - mean = float("-0.000399935") - std = float("0.00701009") - data = None - - -class Program_weight_tensor_parameter_471: - name = "parameter_471" - shape = [72] - dtype = "float32" - min_val = float("-2.73732") - max_val = float("1.92796") - mean = float("-1.14581") - std = float("0.582464") - data = None - - -class Program_weight_tensor_parameter_472: - name = "parameter_472" - shape = [72] - dtype = "float32" - min_val = float("0.270445") - max_val = float("1.8443") - mean = float("0.891096") - std = float("0.213621") - data = None - - -class Program_weight_tensor_parameter_473: - name = "parameter_473" - shape = [72] - dtype = "float32" - min_val = float("0.0120423") - max_val = float("0.0631383") - mean = float("0.0225737") - std = float("0.00801368") - data = None - - -class Program_weight_tensor_parameter_474: - name = "parameter_474" - shape = [72] - dtype = "float32" - min_val = float("-0.570576") - max_val = float("0.529122") - mean = float("-0.0639033") - std = float("0.165627") - data = None - - -class Program_weight_tensor_parameter_475: - name = "parameter_475" - shape = [72, 72, 3, 3] - dtype = "float32" - min_val = float("-0.066849") - max_val = float("0.0819528") - mean = float("-0.000501507") - std = float("0.00812549") - data = None - - -class Program_weight_tensor_parameter_476: - name = "parameter_476" - shape = [72] - dtype = "float32" - min_val = float("-1.42901") - max_val = float("0.650624") - mean = float("-0.067694") - std = float("0.355642") - data = None - - -class Program_weight_tensor_parameter_477: - name = "parameter_477" - shape = [72] - dtype = "float32" - min_val = float("0.0695595") - max_val = float("1.96318") - mean = float("0.298781") - std = float("0.2518") - data = None - - -class Program_weight_tensor_parameter_478: - name = "parameter_478" - shape = [72] - dtype = "float32" - min_val = float("0.000165661") - max_val = float("0.00254699") - mean = float("0.000671496") - std = float("0.000381215") - data = None - - -class Program_weight_tensor_parameter_479: - name = "parameter_479" - shape = [72] - dtype = "float32" - min_val = float("-0.0852135") - max_val = float("0.0739923") - mean = float("0.0145054") - std = float("0.0282282") - data = None - - -class Program_weight_tensor_parameter_480: - name = "parameter_480" - shape = [72, 72, 1, 1] - dtype = "float32" - min_val = float("-0.061587") - max_val = float("0.0555039") - mean = float("-0.00111145") - std = float("0.00915049") - data = None - - -class Program_weight_tensor_parameter_481: - name = "parameter_481" - shape = [72] - dtype = "float32" - min_val = float("-1.42901") - max_val = float("0.650624") - mean = float("-0.067694") - std = float("0.355642") - data = None - - -class Program_weight_tensor_parameter_482: - name = "parameter_482" - shape = [72] - dtype = "float32" - min_val = float("0.228885") - max_val = float("2.65725") - mean = float("0.654562") - std = float("0.342676") - data = None - - -class Program_weight_tensor_parameter_483: - name = "parameter_483" - shape = [72] - dtype = "float32" - min_val = float("0.00221666") - max_val = float("0.0113837") - mean = float("0.00528644") - std = float("0.00192438") - data = None - - -class Program_weight_tensor_parameter_484: - name = "parameter_484" - shape = [72] - dtype = "float32" - min_val = float("-0.0689213") - max_val = float("0.134873") - mean = float("0.0202346") - std = float("0.0478195") - data = None - - -class Program_weight_tensor_parameter_485: - name = "parameter_485" - shape = [72, 72, 3, 3] - dtype = "float32" - min_val = float("-0.0502643") - max_val = float("0.0385429") - mean = float("-0.000381721") - std = float("0.00699717") - data = None - - -class Program_weight_tensor_parameter_486: - name = "parameter_486" - shape = [72] - dtype = "float32" - min_val = float("-1.72935") - max_val = float("1.81323") - mean = float("-0.953972") - std = float("0.484557") - data = None - - -class Program_weight_tensor_parameter_487: - name = "parameter_487" - shape = [72] - dtype = "float32" - min_val = float("0.291338") - max_val = float("1.67102") - mean = float("0.890264") - std = float("0.154972") - data = None - - -class Program_weight_tensor_parameter_488: - name = "parameter_488" - shape = [72] - dtype = "float32" - min_val = float("0.00650387") - max_val = float("0.0314521") - mean = float("0.0153011") - std = float("0.00489433") - data = None - - -class Program_weight_tensor_parameter_489: - name = "parameter_489" - shape = [72] - dtype = "float32" - min_val = float("-0.371426") - max_val = float("0.24566") - mean = float("-0.0388813") - std = float("0.125483") - data = None - - -class Program_weight_tensor_parameter_490: - name = "parameter_490" - shape = [72, 72, 3, 3] - dtype = "float32" - min_val = float("-0.0883656") - max_val = float("0.0854602") - mean = float("-0.000434419") - std = float("0.00817123") - data = None - - -class Program_weight_tensor_parameter_491: - name = "parameter_491" - shape = [72] - dtype = "float32" - min_val = float("-0.72078") - max_val = float("0.53883") - mean = float("-0.0116229") - std = float("0.315004") - data = None - - -class Program_weight_tensor_parameter_492: - name = "parameter_492" - shape = [72] - dtype = "float32" - min_val = float("0.0630193") - max_val = float("1.13622") - mean = float("0.304108") - std = float("0.18516") - data = None - - -class Program_weight_tensor_parameter_493: - name = "parameter_493" - shape = [72] - dtype = "float32" - min_val = float("0.000596299") - max_val = float("0.0104308") - mean = float("0.00238262") - std = float("0.00179682") - data = None - - -class Program_weight_tensor_parameter_494: - name = "parameter_494" - shape = [72] - dtype = "float32" - min_val = float("-0.0220883") - max_val = float("0.0767462") - mean = float("0.00800126") - std = float("0.0185227") - data = None - - -class Program_weight_tensor_parameter_495: - name = "parameter_495" - shape = [72, 72, 1, 1] - dtype = "float32" - min_val = float("-0.10925") - max_val = float("0.0549459") - mean = float("-0.00157008") - std = float("0.0103222") - data = None - - -class Program_weight_tensor_parameter_496: - name = "parameter_496" - shape = [72] - dtype = "float32" - min_val = float("-0.72078") - max_val = float("0.53883") - mean = float("-0.0116229") - std = float("0.315004") - data = None - - -class Program_weight_tensor_parameter_497: - name = "parameter_497" - shape = [72] - dtype = "float32" - min_val = float("0.195354") - max_val = float("1.27903") - mean = float("0.580385") - std = float("0.250193") - data = None - - -class Program_weight_tensor_parameter_498: - name = "parameter_498" - shape = [72] - dtype = "float32" - min_val = float("0.00628037") - max_val = float("0.0320368") - mean = float("0.0150653") - std = float("0.00543441") - data = None - - -class Program_weight_tensor_parameter_499: - name = "parameter_499" - shape = [72] - dtype = "float32" - min_val = float("-0.173879") - max_val = float("0.126666") - mean = float("0.0195401") - std = float("0.0459446") - data = None - - -class Program_weight_tensor_parameter_500: - name = "parameter_500" - shape = [72, 72, 3, 3] - dtype = "float32" - min_val = float("-0.0489714") - max_val = float("0.0583292") - mean = float("-0.000497285") - std = float("0.00710222") - data = None - - -class Program_weight_tensor_parameter_501: - name = "parameter_501" - shape = [72] - dtype = "float32" - min_val = float("-2.83412") - max_val = float("0.704945") - mean = float("-0.732679") - std = float("0.560659") - data = None - - -class Program_weight_tensor_parameter_502: - name = "parameter_502" - shape = [72] - dtype = "float32" - min_val = float("0.470798") - max_val = float("3.05163") - mean = float("1.0155") - std = float("0.305246") - data = None - - -class Program_weight_tensor_parameter_503: - name = "parameter_503" - shape = [72] - dtype = "float32" - min_val = float("0.00353406") - max_val = float("0.0255573") - mean = float("0.0103341") - std = float("0.00485984") - data = None - - -class Program_weight_tensor_parameter_504: - name = "parameter_504" - shape = [72] - dtype = "float32" - min_val = float("-0.318927") - max_val = float("0.403846") - mean = float("-0.0481203") - std = float("0.111077") - data = None - - -class Program_weight_tensor_parameter_505: - name = "parameter_505" - shape = [72, 72, 3, 3] - dtype = "float32" - min_val = float("-0.140292") - max_val = float("0.149074") - mean = float("-0.000235178") - std = float("0.00845115") - data = None - - -class Program_weight_tensor_parameter_506: - name = "parameter_506" - shape = [72] - dtype = "float32" - min_val = float("-3.74716") - max_val = float("2.00205") - mean = float("0.294505") - std = float("0.800208") - data = None - - -class Program_weight_tensor_parameter_507: - name = "parameter_507" - shape = [72] - dtype = "float32" - min_val = float("0.251599") - max_val = float("2.95243") - mean = float("0.505656") - std = float("0.340486") - data = None - - -class Program_weight_tensor_parameter_508: - name = "parameter_508" - shape = [72] - dtype = "float32" - min_val = float("0.00662379") - max_val = float("0.0404531") - mean = float("0.0152965") - std = float("0.00696086") - data = None - - -class Program_weight_tensor_parameter_509: - name = "parameter_509" - shape = [72] - dtype = "float32" - min_val = float("-0.337189") - max_val = float("0.326696") - mean = float("-0.0315403") - std = float("0.110722") - data = None - - -class Program_weight_tensor_parameter_510: - name = "parameter_510" - shape = [72, 144, 1, 1] - dtype = "float32" - min_val = float("-0.140141") - max_val = float("0.0942677") - mean = float("-0.00109115") - std = float("0.0156176") - data = None - - -class Program_weight_tensor_parameter_511: - name = "parameter_511" - shape = [72] - dtype = "float32" - min_val = float("-5.40533") - max_val = float("2.19476") - mean = float("0.480097") - std = float("1.18405") - data = None - - -class Program_weight_tensor_parameter_512: - name = "parameter_512" - shape = [72] - dtype = "float32" - min_val = float("0.407901") - max_val = float("7.10951") - mean = float("1.68326") - std = float("1.34462") - data = None - - -class Program_weight_tensor_parameter_513: - name = "parameter_513" - shape = [72] - dtype = "float32" - min_val = float("0.00341379") - max_val = float("0.0737263") - mean = float("0.0131538") - std = float("0.0117571") - data = None - - -class Program_weight_tensor_parameter_514: - name = "parameter_514" - shape = [72] - dtype = "float32" - min_val = float("-0.254707") - max_val = float("0.243852") - mean = float("0.0114279") - std = float("0.121176") - data = None - - -class Program_weight_tensor_parameter_515: - name = "parameter_515" - shape = [72, 144, 1, 1] - dtype = "float32" - min_val = float("-0.156741") - max_val = float("0.189555") - mean = float("-0.000167284") - std = float("0.0152826") - data = None - - -class Program_weight_tensor_parameter_516: - name = "parameter_516" - shape = [144] - dtype = "float32" - min_val = float("-2.47799") - max_val = float("2.78512") - mean = float("-0.0256035") - std = float("0.855982") - data = None - - -class Program_weight_tensor_parameter_517: - name = "parameter_517" - shape = [144] - dtype = "float32" - min_val = float("0.475945") - max_val = float("3.78899") - mean = float("0.948111") - std = float("0.382821") - data = None - - -class Program_weight_tensor_parameter_518: - name = "parameter_518" - shape = [144] - dtype = "float32" - min_val = float("0.00505316") - max_val = float("0.222987") - mean = float("0.0272953") - std = float("0.0270536") - data = None - - -class Program_weight_tensor_parameter_519: - name = "parameter_519" - shape = [144] - dtype = "float32" - min_val = float("-0.374849") - max_val = float("0.421126") - mean = float("-0.040238") - std = float("0.109009") - data = None - - -class Program_weight_tensor_parameter_520: - name = "parameter_520" - shape = [144, 96, 3, 3] - dtype = "float32" - min_val = float("-0.107682") - max_val = float("0.0970517") - mean = float("-0.000288181") - std = float("0.00781599") - data = None - - -class Program_weight_tensor_parameter_521: - name = "parameter_521" - shape = [96] - dtype = "float32" - min_val = float("-2.31785") - max_val = float("1.2856") - mean = float("-0.536223") - std = float("0.66831") - data = None - - -class Program_weight_tensor_parameter_522: - name = "parameter_522" - shape = [96] - dtype = "float32" - min_val = float("0.363377") - max_val = float("2.87595") - mean = float("0.945544") - std = float("0.309798") - data = None - - -class Program_weight_tensor_parameter_523: - name = "parameter_523" - shape = [96] - dtype = "float32" - min_val = float("0.000401939") - max_val = float("0.0066261") - mean = float("0.00193929") - std = float("0.00108543") - data = None - - -class Program_weight_tensor_parameter_524: - name = "parameter_524" - shape = [96] - dtype = "float32" - min_val = float("-0.276558") - max_val = float("0.293873") - mean = float("0.032138") - std = float("0.0824672") - data = None - - -class Program_weight_tensor_parameter_525: - name = "parameter_525" - shape = [96, 72, 1, 1] - dtype = "float32" - min_val = float("-0.220262") - max_val = float("0.154706") - mean = float("-0.000674063") - std = float("0.0231789") - data = None - - -class Program_weight_tensor_parameter_526: - name = "parameter_526" - shape = [72] - dtype = "float32" - min_val = float("-0.0194254") - max_val = float("-0.00185158") - mean = float("-0.00904749") - std = float("0.00476033") - data = None - - -class Program_weight_tensor_parameter_527: - name = "parameter_527" - shape = [72, 72, 1, 1] - dtype = "float32" - min_val = float("-0.365295") - max_val = float("0.215913") - mean = float("-0.0105827") - std = float("0.0212646") - data = None - - -class Program_weight_tensor_parameter_528: - name = "parameter_528" - shape = [36] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_529: - name = "parameter_529" - shape = [36] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_530: - name = "parameter_530" - shape = [36] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_531: - name = "parameter_531" - shape = [36] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_532: - name = "parameter_532" - shape = [36, 36, 1, 1] - dtype = "float32" - min_val = float("-0.13914") - max_val = float("0.0710278") - mean = float("-0.00122495") - std = float("0.0155312") - data = None - - -class Program_weight_tensor_parameter_533: - name = "parameter_533" - shape = [36] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_534: - name = "parameter_534" - shape = [36] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_535: - name = "parameter_535" - shape = [36] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_536: - name = "parameter_536" - shape = [36] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_537: - name = "parameter_537" - shape = [36, 36, 3, 3] - dtype = "float32" - min_val = float("-0.0864069") - max_val = float("0.0715444") - mean = float("-0.000368309") - std = float("0.0121821") - data = None - - -class Program_weight_tensor_parameter_538: - name = "parameter_538" - shape = [36] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_539: - name = "parameter_539" - shape = [36] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_540: - name = "parameter_540" - shape = [36] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_541: - name = "parameter_541" - shape = [36] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_542: - name = "parameter_542" - shape = [36, 36, 3, 3] - dtype = "float32" - min_val = float("-0.139936") - max_val = float("0.126155") - mean = float("-0.000357288") - std = float("0.0135344") - data = None - - -class Program_weight_tensor_parameter_543: - name = "parameter_543" - shape = [36] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_544: - name = "parameter_544" - shape = [36] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_545: - name = "parameter_545" - shape = [36] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_546: - name = "parameter_546" - shape = [36] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_547: - name = "parameter_547" - shape = [36, 36, 1, 1] - dtype = "float32" - min_val = float("-0.101119") - max_val = float("0.078908") - mean = float("-0.00143404") - std = float("0.0191301") - data = None - - -class Program_weight_tensor_parameter_548: - name = "parameter_548" - shape = [36] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_549: - name = "parameter_549" - shape = [36] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_550: - name = "parameter_550" - shape = [36] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_551: - name = "parameter_551" - shape = [36] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_552: - name = "parameter_552" - shape = [36, 36, 3, 3] - dtype = "float32" - min_val = float("-0.0981736") - max_val = float("0.072635") - mean = float("-0.000893312") - std = float("0.0128138") - data = None - - -class Program_weight_tensor_parameter_553: - name = "parameter_553" - shape = [36] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_554: - name = "parameter_554" - shape = [36] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_555: - name = "parameter_555" - shape = [36] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_556: - name = "parameter_556" - shape = [36] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_557: - name = "parameter_557" - shape = [36, 36, 3, 3] - dtype = "float32" - min_val = float("-0.138589") - max_val = float("0.124213") - mean = float("-0.000371635") - std = float("0.0148638") - data = None - - -class Program_weight_tensor_parameter_558: - name = "parameter_558" - shape = [36] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_559: - name = "parameter_559" - shape = [36] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_560: - name = "parameter_560" - shape = [36] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_561: - name = "parameter_561" - shape = [36] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_562: - name = "parameter_562" - shape = [36, 72, 1, 1] - dtype = "float32" - min_val = float("-0.174523") - max_val = float("0.146394") - mean = float("-0.0022875") - std = float("0.0249619") - data = None - - -class Program_weight_tensor_parameter_563: - name = "parameter_563" - shape = [36] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_564: - name = "parameter_564" - shape = [36] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_565: - name = "parameter_565" - shape = [36] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_566: - name = "parameter_566" - shape = [36] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_567: - name = "parameter_567" - shape = [36, 72, 1, 1] - dtype = "float32" - min_val = float("-0.141728") - max_val = float("0.14303") - mean = float("-0.000661605") - std = float("0.0238487") - data = None - - -class Program_weight_tensor_parameter_568: - name = "parameter_568" - shape = [72] - dtype = "float32" - min_val = float("-1.42602") - max_val = float("3.23089") - mean = float("0.695169") - std = float("1.23395") - data = None - - -class Program_weight_tensor_parameter_569: - name = "parameter_569" - shape = [72] - dtype = "float32" - min_val = float("1.06748") - max_val = float("4.16245") - mean = float("1.98371") - std = float("0.773369") - data = None - - -class Program_weight_tensor_parameter_570: - name = "parameter_570" - shape = [72] - dtype = "float32" - min_val = float("0.463827") - max_val = float("20.7924") - mean = float("2.40174") - std = float("2.50279") - data = None - - -class Program_weight_tensor_parameter_571: - name = "parameter_571" - shape = [72] - dtype = "float32" - min_val = float("-1.94543") - max_val = float("3.15499") - mean = float("-0.190338") - std = float("0.839007") - data = None - - -class Program_weight_tensor_parameter_572: - name = "parameter_572" - shape = [72, 48, 3, 3] - dtype = "float32" - min_val = float("-0.0880297") - max_val = float("0.132734") - mean = float("-0.000246572") - std = float("0.0130906") - data = None - - -class Program_weight_tensor_parameter_573: - name = "parameter_573" - shape = [48] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_574: - name = "parameter_574" - shape = [48] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_575: - name = "parameter_575" - shape = [48] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_576: - name = "parameter_576" - shape = [48] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_577: - name = "parameter_577" - shape = [48, 24, 3, 3] - dtype = "float32" - min_val = float("-0.190405") - max_val = float("0.141206") - mean = float("-0.000379777") - std = float("0.021606") - data = None - - -class Program_weight_tensor_parameter_578: - name = "parameter_578" - shape = [24] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_579: - name = "parameter_579" - shape = [24] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_580: - name = "parameter_580" - shape = [24] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_581: - name = "parameter_581" - shape = [24] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_582: - name = "parameter_582" - shape = [24, 24, 3, 3] - dtype = "float32" - min_val = float("-0.259253") - max_val = float("0.269373") - mean = float("-0.000405256") - std = float("0.03018") - data = None - - -class Program_weight_tensor_parameter_583: - name = "parameter_583" - shape = [24] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_584: - name = "parameter_584" - shape = [24] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_585: - name = "parameter_585" - shape = [24] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_586: - name = "parameter_586" - shape = [24] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_587: - name = "parameter_587" - shape = [24, 3, 3, 3] - dtype = "float32" - min_val = float("-0.195438") - max_val = float("0.245061") - mean = float("-0.000197873") - std = float("0.0621767") - data = None diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus-M/subgraph_16/graph_net.json b/paddle_samples/PaddleX/PP-YOLOE_plus-M/subgraph_16/graph_net.json deleted file mode 100644 index 1c6cb32da..000000000 --- a/paddle_samples/PaddleX/PP-YOLOE_plus-M/subgraph_16/graph_net.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "framework": "paddle", - "model_name": "PP-YOLOE_plus-M", - "num_devices_required": 1, - "num_nodes_required": 1 -} \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus-M/subgraph_16/weight_meta.py b/paddle_samples/PaddleX/PP-YOLOE_plus-M/subgraph_16/weight_meta.py deleted file mode 100644 index 8b1378917..000000000 --- a/paddle_samples/PaddleX/PP-YOLOE_plus-M/subgraph_16/weight_meta.py +++ /dev/null @@ -1 +0,0 @@ - diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus-M/subgraph_17/graph_hash.txt b/paddle_samples/PaddleX/PP-YOLOE_plus-M/subgraph_17/graph_hash.txt deleted file mode 100644 index 5a3b9f807..000000000 --- a/paddle_samples/PaddleX/PP-YOLOE_plus-M/subgraph_17/graph_hash.txt +++ /dev/null @@ -1 +0,0 @@ -844261737f8230902d017b25798482fe873277e37f04757e32206af29ccc9250 \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus-M/subgraph_17/graph_net.json b/paddle_samples/PaddleX/PP-YOLOE_plus-M/subgraph_17/graph_net.json deleted file mode 100644 index 1c6cb32da..000000000 --- a/paddle_samples/PaddleX/PP-YOLOE_plus-M/subgraph_17/graph_net.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "framework": "paddle", - "model_name": "PP-YOLOE_plus-M", - "num_devices_required": 1, - "num_nodes_required": 1 -} \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus-M/subgraph_17/model.py b/paddle_samples/PaddleX/PP-YOLOE_plus-M/subgraph_17/model.py deleted file mode 100644 index bc3dd4f0e..000000000 --- a/paddle_samples/PaddleX/PP-YOLOE_plus-M/subgraph_17/model.py +++ /dev/null @@ -1,158 +0,0 @@ -import paddle - - -class GraphModule(paddle.nn.Layer): - def __init__(self): - super().__init__() - - def forward(self, parameter_0, data_0, data_1, data_2): - # pd_op.divide: (10164x2xf32) <- (10164x2xf32, 10164x1xf32) - divide_0 = paddle._C_ops.divide(data_1, data_2) - del data_1 - - # pd_op.shape64: (3xi64) <- (8x10164x68xf32) - shape64_0 = paddle._C_ops.shape64(data_0) - - # pd_op.full_int_array: (1xi64) <- () - full_int_array_0 = [0] - - # pd_op.full_int_array: (1xi64) <- () - full_int_array_1 = [1] - - # pd_op.assign: (1xi64) <- (1xi64) - assign_0 = full_int_array_1 - - # pd_op.slice: (xi64) <- (3xi64, 1xi64, 1xi64) - slice_0 = paddle._C_ops.slice( - shape64_0, [0], full_int_array_0, full_int_array_1, [1], [0] - ) - del full_int_array_0 - - # pd_op.full_int_array: (1xi64) <- () - full_int_array_2 = [2] - - # pd_op.slice: (xi64) <- (3xi64, 1xi64, 1xi64) - slice_1 = paddle._C_ops.slice( - shape64_0, [0], full_int_array_1, full_int_array_2, [1], [0] - ) - - # pd_op.full_int_array: (1xi64) <- () - full_int_array_3 = [3] - - # pd_op.slice: (xi64) <- (3xi64, 1xi64, 1xi64) - slice_2 = paddle._C_ops.slice( - shape64_0, [0], full_int_array_2, full_int_array_3, [1], [0] - ) - del full_int_array_2, full_int_array_3, shape64_0 - - # pd_op.full: (xi64) <- () - full_0 = paddle._C_ops.full( - [], float("-1"), paddle.int64, paddle.core.CPUPlace() - ) - - # pd_op.full: (xi64) <- () - full_1 = paddle._C_ops.full( - [], float("4"), paddle.int64, paddle.core.CPUPlace() - ) - - # pd_op.full: (xi64) <- () - full_2 = paddle._C_ops.full( - [], float("17"), paddle.int64, paddle.core.CPUPlace() - ) - - # builtin.combine: ([xi64, xi64, xi64, xi64]) <- (xi64, xi64, xi64, xi64) - combine_0 = [full_0, slice_1, full_1, full_2] - del full_0, full_1, full_2, slice_1 - - # pd_op.stack: (4xi64) <- ([xi64, xi64, xi64, xi64]) - stack_0 = paddle._C_ops.stack(combine_0, 0) - del combine_0 - - # pd_op.reshape: (-1x-1x4x17xf32) <- (8x10164x68xf32, 4xi64) - reshape_0 = paddle._C_ops.reshape(data_0, stack_0) - del data_0, stack_0 - - # pd_op.softmax: (-1x-1x4x17xf32) <- (-1x-1x4x17xf32) - softmax_0 = paddle._C_ops.softmax(reshape_0, -1) - del reshape_0 - - # pd_op.transpose: (-1x17x-1x4xf32) <- (-1x-1x4x17xf32) - transpose_0 = paddle._C_ops.transpose(softmax_0, [0, 3, 1, 2]) - - # pd_op.conv2d: (-1x1x-1x4xf32) <- (-1x17x-1x4xf32, 1x17x1x1xf32) - conv2d_0 = paddle._C_ops.conv2d( - transpose_0, parameter_0, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_0 - - # pd_op.squeeze: (-1x-1x4xf32) <- (-1x1x-1x4xf32, 1xi64) - squeeze_0 = paddle._C_ops.squeeze(conv2d_0, full_int_array_1) - del full_int_array_1 - - # pd_op.full: (1xi32) <- () - full_3 = paddle._C_ops.full( - [1], float("2"), paddle.int32, paddle.core.CPUPlace() - ) - - # pd_op.split_with_num: ([-1x-1x2xf32, -1x-1x2xf32]) <- (-1x-1x4xf32, 1xi32) - split_with_num_0 = paddle._C_ops.split_with_num(squeeze_0, 2, full_3) - del squeeze_0 - - # builtin.split: (-1x-1x2xf32, -1x-1x2xf32) <- ([-1x-1x2xf32, -1x-1x2xf32]) - ( - split_0, - split_1, - ) = split_with_num_0 - del split_with_num_0 - - # pd_op.full: (1xf32) <- () - full_4 = paddle._C_ops.full( - [1], float("-1"), paddle.float32, paddle.core.CPUPlace() - ) - - # pd_op.scale: (-1x-1x2xf32) <- (-1x-1x2xf32, 1xf32) - scale_0 = paddle._C_ops.scale(split_0, full_4, float("0"), True) - del split_0 - - # pd_op.add: (-1x10164x2xf32) <- (-1x-1x2xf32, 10164x2xf32) - add_0 = paddle._C_ops.add(scale_0, divide_0) - - # pd_op.add: (-1x10164x2xf32) <- (-1x-1x2xf32, 10164x2xf32) - add_1 = paddle._C_ops.add(split_1, divide_0) - - # pd_op.full: (1xi32) <- () - full_5 = paddle._C_ops.full( - [1], float("-1"), paddle.int32, paddle.core.CPUPlace() - ) - - # builtin.combine: ([-1x10164x2xf32, -1x10164x2xf32]) <- (-1x10164x2xf32, -1x10164x2xf32) - combine_1 = [add_0, add_1] - - # pd_op.concat: (-1x10164x4xf32) <- ([-1x10164x2xf32, -1x10164x2xf32], 1xi32) - concat_0 = paddle._C_ops.concat(combine_1, full_5) - del combine_1 - - # pd_op.share_data_: (-1x10164x4xf32) <- (-1x10164x4xf32) - share_data__0 = concat_0.detach() - - # pd_op.multiply: (-1x10164x4xf32) <- (-1x10164x4xf32, 10164x1xf32) - multiply_0 = paddle._C_ops.multiply(share_data__0, data_2) - del ( - add_0, - add_1, - assign_0, - concat_0, - conv2d_0, - data_2, - divide_0, - full_3, - full_4, - full_5, - scale_0, - share_data__0, - softmax_0, - split_1, - transpose_0, - ) - - return multiply_0 diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus-M/subgraph_17/weight_meta.py b/paddle_samples/PaddleX/PP-YOLOE_plus-M/subgraph_17/weight_meta.py deleted file mode 100644 index 28198680e..000000000 --- a/paddle_samples/PaddleX/PP-YOLOE_plus-M/subgraph_17/weight_meta.py +++ /dev/null @@ -1,7 +0,0 @@ -class Program_weight_tensor_parameter_0: - name = "parameter_0" - shape = [1, 17, 1, 1] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus-M/subgraph_2/graph_hash.txt b/paddle_samples/PaddleX/PP-YOLOE_plus-M/subgraph_2/graph_hash.txt index 27bd82e0e..0eb7b765d 100644 --- a/paddle_samples/PaddleX/PP-YOLOE_plus-M/subgraph_2/graph_hash.txt +++ b/paddle_samples/PaddleX/PP-YOLOE_plus-M/subgraph_2/graph_hash.txt @@ -1 +1 @@ -8d319f5ec2a187a0cbeb6b629bf54f2df054e934514d4a5042fa9c577597355f \ No newline at end of file +87f1c9d15791927678923354dcfc589bd225484f29e377f70153217772641dce \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus-M/subgraph_2/input_meta.py b/paddle_samples/PaddleX/PP-YOLOE_plus-M/subgraph_2/input_meta.py index 42b68473c..880246b79 100644 --- a/paddle_samples/PaddleX/PP-YOLOE_plus-M/subgraph_2/input_meta.py +++ b/paddle_samples/PaddleX/PP-YOLOE_plus-M/subgraph_2/input_meta.py @@ -1,68 +1,76 @@ class Program_weight_tensor_data_0: name = "data_0" - shape = [8, 10164] - dtype = "bool" - min_val = 0 - max_val = 2 + shape = [8, 1, 10164] + dtype = "float32" + max_val = float("1.0") + mean = float("0.000676407") + std = float("0.025999") data = None class Program_weight_tensor_data_1: name = "data_1" - shape = [8, 10164, 4] - dtype = "float32" - min_val = float("-8.4998") - max_val = float("93.297") - mean = float("38.2803") - std = float("25.8039") - data = None + shape = [8, 1, 1] + dtype = "int32" + data = [0, 0, 0, 0, 0, 0, 0, 3] class Program_weight_tensor_data_2: name = "data_2" - shape = [8, 10164, 4] + shape = [8, 10164] dtype = "float32" - min_val = float("2.52804") - max_val = float("79.2") - mean = float("34.6196") - std = float("18.1525") + max_val = float("1.0") + mean = float("0.000676407") + std = float("0.025999") data = None class Program_weight_tensor_data_3: name = "data_3" - shape = [8, 10164, 4] + shape = [8, 1, 4] dtype = "float32" - max_val = float("0.941962") - mean = float("0.000134148") - std = float("0.0107789") - data = None + data = [ + 174.715, + 140.8, + 405.956, + 457.6, + 375.985, + 345.193, + 411.639, + 372.906, + 317.49, + 292.0, + 450.008, + 388.0, + 287.439, + 452.211, + 340.211, + 490.947, + 352.0, + 296.267, + 584.17, + 384.267, + 222.933, + 194.723, + 332.444, + 275.609, + 80.8974, + 117.694, + 116.531, + 143.688, + 124.847, + 201.813, + 433.498, + 633.6, + ] class Program_weight_tensor_data_4: name = "data_4" - shape = [] - dtype = "float32" - data = [43.6314] - - -class Program_weight_tensor_data_5: - name = "data_5" - shape = [8, 10164, 68] - dtype = "float32" - min_val = float("-7.06467") - max_val = float("15.2111") - mean = float("2.71498e-05") - std = float("1.60341") - data = None - - -class Program_weight_tensor_data_6: - name = "data_6" - shape = [10164, 2] + shape = [8, 10164, 4] dtype = "float32" - min_val = float("0.5") - max_val = float("87.5") - mean = float("38.2381") - std = float("25.2012") + min_val = float("-271.994") + max_val = float("993.136") + mean = float("352.517") + std = float("213.87") data = None diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus-M/subgraph_2/model.py b/paddle_samples/PaddleX/PP-YOLOE_plus-M/subgraph_2/model.py index adbc73554..b0b1964b8 100644 --- a/paddle_samples/PaddleX/PP-YOLOE_plus-M/subgraph_2/model.py +++ b/paddle_samples/PaddleX/PP-YOLOE_plus-M/subgraph_2/model.py @@ -5,505 +5,283 @@ class GraphModule(paddle.nn.Layer): def __init__(self): super().__init__() - def forward(self, data_0, data_1, data_2, data_3, data_4, data_5, data_6): - # pd_op.cast: (8x10164xi32) <- (8x10164xb) - cast_0 = paddle._C_ops.cast(data_0, paddle.int32) - - # pd_op.full_int_array: (1xi64) <- () - full_int_array_0 = [-1] - - # pd_op.assign: (1xi64) <- (1xi64) - assign_0 = full_int_array_0 - - # pd_op.assign: (1xi64) <- (1xi64) - assign_1 = full_int_array_0 - - # pd_op.assign: (1xi64) <- (1xi64) - assign_2 = full_int_array_0 - - # pd_op.unsqueeze: (8x10164x1xi32) <- (8x10164xi32, 1xi64) - unsqueeze_0 = paddle._C_ops.unsqueeze(cast_0, full_int_array_0) - del cast_0 - - # pd_op.full_int_array: (3xi64) <- () - full_int_array_1 = [1, 1, 4] - - # pd_op.tile: (8x10164x4xi32) <- (8x10164x1xi32, 3xi64) - tile_0 = paddle._C_ops.tile(unsqueeze_0, full_int_array_1) - del full_int_array_1, unsqueeze_0 - - # pd_op.cast: (8x10164x4xb) <- (8x10164x4xi32) - cast_1 = paddle._C_ops.cast(tile_0, paddle.bool) - del tile_0 - - # pd_op.masked_select: (-1xf32) <- (8x10164x4xf32, 8x10164x4xb) - masked_select_0 = paddle._C_ops.masked_select(data_1, cast_1) - del data_1 - - # pd_op.full_int_array: (2xi64) <- () - full_int_array_2 = [-1, 4] - - # pd_op.reshape: (-1x4xf32) <- (-1xf32, 2xi64) - reshape_0 = paddle._C_ops.reshape(masked_select_0, full_int_array_2) - - # pd_op.masked_select: (-1xf32) <- (8x10164x4xf32, 8x10164x4xb) - masked_select_1 = paddle._C_ops.masked_select(data_2, cast_1) - - # pd_op.reshape: (-1x4xf32) <- (-1xf32, 2xi64) - reshape_1 = paddle._C_ops.reshape(masked_select_1, full_int_array_2) - del masked_select_1 - - # pd_op.sum: (8x10164xf32) <- (8x10164x4xf32, 1xi64) - sum_0 = paddle._C_ops.sum(data_3, full_int_array_0, None, False) - del data_3 - - # pd_op.masked_select: (-1xf32) <- (8x10164xf32, 8x10164xb) - masked_select_2 = paddle._C_ops.masked_select(sum_0, data_0) - del sum_0 - - # pd_op.unsqueeze: (-1x1xf32) <- (-1xf32, 1xi64) - unsqueeze_1 = paddle._C_ops.unsqueeze(masked_select_2, full_int_array_0) - del masked_select_2 - - # pd_op.subtract: (-1x4xf32) <- (-1x4xf32, -1x4xf32) - subtract_0 = paddle._C_ops.subtract(reshape_0, reshape_1) - - # pd_op.abs: (-1x4xf32) <- (-1x4xf32) - abs_0 = paddle._C_ops.abs(subtract_0) - - # pd_op.mean_all: (xf32) <- (-1x4xf32) - mean_all_0 = paddle._C_ops.mean_all(abs_0) - - # pd_op.full: (1xi32) <- () + def forward(self, data_0, data_1, data_2, data_3, data_4): + # pd_op.full: (1xi64) <- () full_0 = paddle._C_ops.full( - [1], float("1"), paddle.int32, paddle.core.CPUPlace() + [1], float("-2"), paddle.int64, paddle.core.CPUPlace() ) - # pd_op.split_with_num: ([-1x1xf32, -1x1xf32, -1x1xf32, -1x1xf32]) <- (-1x4xf32, 1xi32) - split_with_num_0 = paddle._C_ops.split_with_num(reshape_0, 4, full_0) + # pd_op.argmax: (8x10164xi64) <- (8x1x10164xf32, 1xi64) + argmax_0 = paddle._C_ops.argmax(data_0, full_0, False, False, paddle.int64) + del full_0 - # builtin.split: (-1x1xf32, -1x1xf32, -1x1xf32, -1x1xf32) <- ([-1x1xf32, -1x1xf32, -1x1xf32, -1x1xf32]) - ( - split_0, - split_1, - split_2, - split_3, - ) = split_with_num_0 - del split_with_num_0 - - # pd_op.split_with_num: ([-1x1xf32, -1x1xf32, -1x1xf32, -1x1xf32]) <- (-1x4xf32, 1xi32) - split_with_num_1 = paddle._C_ops.split_with_num(reshape_1, 4, full_0) - - # builtin.split: (-1x1xf32, -1x1xf32, -1x1xf32, -1x1xf32) <- ([-1x1xf32, -1x1xf32, -1x1xf32, -1x1xf32]) - ( - split_4, - split_5, - split_6, - split_7, - ) = split_with_num_1 - del split_with_num_1 - - # pd_op.maximum: (-1x1xf32) <- (-1x1xf32, -1x1xf32) - maximum_0 = paddle._C_ops.maximum(split_0, split_4) - - # pd_op.maximum: (-1x1xf32) <- (-1x1xf32, -1x1xf32) - maximum_1 = paddle._C_ops.maximum(split_1, split_5) - - # pd_op.minimum: (-1x1xf32) <- (-1x1xf32, -1x1xf32) - minimum_0 = paddle._C_ops.minimum(split_2, split_6) - - # pd_op.minimum: (-1x1xf32) <- (-1x1xf32, -1x1xf32) - minimum_1 = paddle._C_ops.minimum(split_3, split_7) - - # pd_op.subtract: (-1x1xf32) <- (-1x1xf32, -1x1xf32) - subtract_1 = paddle._C_ops.subtract(minimum_0, maximum_0) - - # pd_op.full: (1xf32) <- () + # pd_op.full: (1xf64) <- () full_1 = paddle._C_ops.full( - [1], float("0"), paddle.float32, paddle.core.CPUPlace() + [1], float("0"), paddle.float64, paddle.core.CPUPlace() ) - # pd_op.assign: (1xf32) <- (1xf32) - assign_3 = full_1 - - # pd_op.full: (1xf32) <- () + # pd_op.full: (1xf64) <- () full_2 = paddle._C_ops.full( - [1], float("3.40282e+38"), paddle.float32, paddle.core.CPUPlace() + [1], float("8"), paddle.float64, paddle.core.CPUPlace() ) - # pd_op.assign: (1xf32) <- (1xf32) - assign_4 = full_2 - - # pd_op.clip: (-1x1xf32) <- (-1x1xf32, 1xf32, 1xf32) - clip_0 = paddle._C_ops.clip(subtract_1, full_1, full_2) - - # pd_op.subtract: (-1x1xf32) <- (-1x1xf32, -1x1xf32) - subtract_2 = paddle._C_ops.subtract(minimum_1, maximum_1) - - # pd_op.clip: (-1x1xf32) <- (-1x1xf32, 1xf32, 1xf32) - clip_1 = paddle._C_ops.clip(subtract_2, full_1, full_2) - - # pd_op.multiply: (-1x1xf32) <- (-1x1xf32, -1x1xf32) - multiply_0 = paddle._C_ops.multiply(clip_0, clip_1) - - # pd_op.subtract: (-1x1xf32) <- (-1x1xf32, -1x1xf32) - subtract_3 = paddle._C_ops.subtract(split_2, split_0) - - # pd_op.subtract: (-1x1xf32) <- (-1x1xf32, -1x1xf32) - subtract_4 = paddle._C_ops.subtract(split_3, split_1) - - # pd_op.multiply: (-1x1xf32) <- (-1x1xf32, -1x1xf32) - multiply_1 = paddle._C_ops.multiply(subtract_3, subtract_4) - - # pd_op.subtract: (-1x1xf32) <- (-1x1xf32, -1x1xf32) - subtract_5 = paddle._C_ops.subtract(split_6, split_4) - - # pd_op.subtract: (-1x1xf32) <- (-1x1xf32, -1x1xf32) - subtract_6 = paddle._C_ops.subtract(split_7, split_5) + # pd_op.full: (1xf64) <- () + full_3 = paddle._C_ops.full( + [1], float("1"), paddle.float64, paddle.core.CPUPlace() + ) - # pd_op.multiply: (-1x1xf32) <- (-1x1xf32, -1x1xf32) - multiply_2 = paddle._C_ops.multiply(subtract_5, subtract_6) - del subtract_5, subtract_6 + # pd_op.arange: (8xi32) <- (1xf64, 1xf64, 1xf64) + arange_0 = paddle.arange(full_1, full_2, full_3, dtype="int32") + del full_1, full_2, full_3 - # pd_op.add: (-1x1xf32) <- (-1x1xf32, -1x1xf32) - add_0 = paddle._C_ops.add(multiply_1, multiply_2) + # pd_op.full_int_array: (1xi64) <- () + full_int_array_0 = [-1] - # pd_op.subtract: (-1x1xf32) <- (-1x1xf32, -1x1xf32) - subtract_7 = paddle._C_ops.subtract(add_0, multiply_0) + # pd_op.unsqueeze: (8x1xi32) <- (8xi32, 1xi64) + unsqueeze_0 = paddle._C_ops.unsqueeze(arange_0, full_int_array_0) + del arange_0 # pd_op.full: (1xf32) <- () - full_3 = paddle._C_ops.full( + full_4 = paddle._C_ops.full( [1], float("1"), paddle.float32, paddle.core.CPUPlace() ) - # pd_op.assign: (1xf32) <- (1xf32) - assign_5 = full_3 + # pd_op.scale: (8x1xi32) <- (8x1xi32, 1xf32) + scale_0 = paddle._C_ops.scale(unsqueeze_0, full_4, float("0"), True) + del unsqueeze_0 - # pd_op.assign: (1xf32) <- (1xf32) - assign_6 = full_3 + # pd_op.cast: (8x1xi64) <- (8x1xi32) + cast_0 = paddle._C_ops.cast(scale_0, paddle.int64) + del scale_0 - # pd_op.scale: (-1x1xf32) <- (-1x1xf32, 1xf32) - scale_0 = paddle._C_ops.scale(subtract_7, full_3, float("1e-10"), True) - del subtract_7 + # pd_op.add: (8x10164xi64) <- (8x10164xi64, 8x1xi64) + add_0 = paddle._C_ops.add(argmax_0, cast_0) + del argmax_0, cast_0 - # pd_op.divide: (-1x1xf32) <- (-1x1xf32, -1x1xf32) - divide_2 = paddle._C_ops.divide(multiply_0, scale_0) + # pd_op.flatten: (8xi32) <- (8x1x1xi32) + flatten_0 = paddle._C_ops.flatten(data_1, 0, 2) + del data_1 - # pd_op.minimum: (-1x1xf32) <- (-1x1xf32, -1x1xf32) - minimum_2 = paddle._C_ops.minimum(split_0, split_4) + # pd_op.flatten: (81312xi64) <- (8x10164xi64) + flatten_1 = paddle._C_ops.flatten(add_0, 0, 1) + del add_0 - # pd_op.minimum: (-1x1xf32) <- (-1x1xf32, -1x1xf32) - minimum_3 = paddle._C_ops.minimum(split_1, split_5) + # pd_op.full: (1xi32) <- () + full_5 = paddle._C_ops.full( + [1], float("0"), paddle.int32, paddle.core.CPUPlace() + ) - # pd_op.maximum: (-1x1xf32) <- (-1x1xf32, -1x1xf32) - maximum_2 = paddle._C_ops.maximum(split_2, split_6) + # pd_op.gather: (81312xi32) <- (8xi32, 81312xi64, 1xi32) + gather_0 = paddle._C_ops.gather(flatten_0, flatten_1, full_5) + del flatten_0 - # pd_op.maximum: (-1x1xf32) <- (-1x1xf32, -1x1xf32) - maximum_3 = paddle._C_ops.maximum(split_3, split_7) + # pd_op.full_int_array: (2xi64) <- () + full_int_array_1 = [8, 10164] - # pd_op.subtract: (-1x1xf32) <- (-1x1xf32, -1x1xf32) - subtract_8 = paddle._C_ops.subtract(maximum_2, minimum_2) + # pd_op.reshape: (8x10164xi32) <- (81312xi32, 2xi64) + reshape_1 = paddle._C_ops.reshape(gather_0, full_int_array_1) + del full_int_array_1, gather_0 - # pd_op.subtract: (-1x1xf32) <- (-1x1xf32, -1x1xf32) - subtract_9 = paddle._C_ops.subtract(maximum_3, minimum_3) + # pd_op.full: (xf32) <- () + full_6 = paddle._C_ops.full( + [], float("0"), paddle.float32, paddle.framework._current_expected_place() + ) - # pd_op.multiply: (-1x1xf32) <- (-1x1xf32, -1x1xf32) - multiply_3 = paddle._C_ops.multiply(subtract_8, subtract_9) + # pd_op.greater_than: (8x10164xb) <- (8x10164xf32, xf32) + greater_than_0 = paddle._C_ops.greater_than(data_2, full_6) + del data_2, full_6 - # pd_op.scale: (-1x1xf32) <- (-1x1xf32, 1xf32) - scale_1 = paddle._C_ops.scale(multiply_3, full_3, float("1e-10"), True) - del multiply_3 + # pd_op.full: (1xf32) <- () + full_7 = paddle._C_ops.full( + [1], float("4"), paddle.float32, paddle.core.CPUPlace() + ) - # pd_op.subtract: (-1x1xf32) <- (-1x1xf32, -1x1xf32) - subtract_10 = paddle._C_ops.subtract(scale_1, scale_0) + # pd_op.full_like: (8x10164xi32) <- (8x10164xi32, 1xf32) + full_like_0 = paddle._C_ops.full_like( + reshape_1, full_7, paddle.int32, paddle.framework._current_expected_place() + ) + del full_7 - # pd_op.divide: (-1x1xf32) <- (-1x1xf32, -1x1xf32) - divide_3 = paddle._C_ops.divide(subtract_10, scale_1) + # pd_op.where: (8x10164xi32) <- (8x10164xb, 8x10164xi32, 8x10164xi32) + where_0 = paddle._C_ops.where(greater_than_0, reshape_1, full_like_0) + del full_like_0, greater_than_0, reshape_1 - # pd_op.subtract: (-1x1xf32) <- (-1x1xf32, -1x1xf32) - subtract_11 = paddle._C_ops.subtract(divide_2, divide_3) + # pd_op.full_int_array: (2xi64) <- () + full_int_array_2 = [-1, 4] - # pd_op.full: (1xf32) <- () - full_4 = paddle._C_ops.full( - [1], float("-1"), paddle.float32, paddle.core.CPUPlace() - ) + # pd_op.reshape: (8x4xf32) <- (8x1x4xf32, 2xi64) + reshape_2 = paddle._C_ops.reshape(data_3, full_int_array_2) + del full_int_array_2 - # pd_op.scale: (-1x1xf32) <- (-1x1xf32, 1xf32) - scale_2 = paddle._C_ops.scale(subtract_11, full_4, float("1"), True) - del subtract_11 + # pd_op.gather: (81312x4xf32) <- (8x4xf32, 81312xi64, 1xi32) + gather_1 = paddle._C_ops.gather(reshape_2, flatten_1, full_5) + del flatten_1, full_5, reshape_2 - # pd_op.scale: (-1x1xf32) <- (-1x1xf32, 1xf32) - scale_3 = paddle._C_ops.scale(scale_2, full_3, float("0"), True) - del scale_2 + # pd_op.full_int_array: (3xi64) <- () + full_int_array_3 = [8, 10164, 4] - # pd_op.multiply: (-1x1xf32) <- (-1x1xf32, -1x1xf32) - multiply_4 = paddle._C_ops.multiply(scale_3, unsqueeze_1) + # pd_op.reshape: (8x10164x4xf32) <- (81312x4xf32, 3xi64) + reshape_0 = paddle._C_ops.reshape(gather_1, full_int_array_3) + del full_int_array_3, gather_1 - # pd_op.full_int_array: (0xi64) <- () - full_int_array_3 = [] + # pd_op.full: (1xi32) <- () + full_8 = paddle._C_ops.full( + [1], float("5"), paddle.int32, paddle.core.CPUPlace() + ) - # pd_op.assign: (0xi64) <- (0xi64) - assign_7 = full_int_array_3 + # pd_op.one_hot: (8x10164x5xf32) <- (8x10164xi32, 1xi32) + one_hot_0 = paddle._C_ops.one_hot( + where_0 % paddle.cast(full_8, where_0.dtype), full_8 + ) + del full_8 - # pd_op.sum: (xf32) <- (-1x1xf32, 0xi64) - sum_1 = paddle._C_ops.sum(multiply_4, full_int_array_3, None, False) + # pd_op.full: (4xi64) <- () + full_9 = paddle._C_ops.full( + [4], float("0"), paddle.int64, paddle.framework._current_expected_place() + ) + + # pd_op.assign_value_: (4xi64) <- (4xi64) + assign_value__0 = paddle._C_ops.assign_value_( + full_9, + [4], + paddle.int64, + [float("0"), float("1"), float("2"), float("3")], + paddle.framework._current_expected_place(), + ) + del full_9 - # pd_op.divide: (xf32) <- (xf32, xf32) - divide_0 = paddle._C_ops.divide(sum_1, data_4) + # pd_op.index_select: (8x10164x4xf32) <- (8x10164x5xf32, 4xi64) + index_select_0 = paddle._C_ops.index_select(one_hot_0, assign_value__0, -1) + del assign_value__0, one_hot_0 - # pd_op.unsqueeze: (8x10164x1xb) <- (8x10164xb, 1xi64) - unsqueeze_2 = paddle._C_ops.unsqueeze(data_0, full_int_array_0) - del data_0 + # pd_op.full_int_array: (1xi64) <- () + full_int_array_4 = [2] - # pd_op.cast: (8x10164x1xi32) <- (8x10164x1xb) - cast_2 = paddle._C_ops.cast(unsqueeze_2, paddle.int32) - del unsqueeze_2 + # pd_op.unsqueeze: (8x1x1x4xf32) <- (8x1x4xf32, 1xi64) + unsqueeze_1 = paddle._C_ops.unsqueeze(data_3, full_int_array_4) + del data_3 - # pd_op.full_int_array: (3xi64) <- () - full_int_array_4 = [1, 1, 68] + # pd_op.full_int_array: (1xi64) <- () + full_int_array_5 = [1] - # pd_op.tile: (8x10164x68xi32) <- (8x10164x1xi32, 3xi64) - tile_1 = paddle._C_ops.tile(cast_2, full_int_array_4) - del cast_2, full_int_array_4 + # pd_op.unsqueeze: (8x1x10164x4xf32) <- (8x10164x4xf32, 1xi64) + unsqueeze_2 = paddle._C_ops.unsqueeze(data_4, full_int_array_5) + del data_4, full_int_array_5 - # pd_op.cast: (8x10164x68xb) <- (8x10164x68xi32) - cast_3 = paddle._C_ops.cast(tile_1, paddle.bool) - del tile_1 + # pd_op.full_int_array: (1xi64) <- () + full_int_array_6 = [0] - # pd_op.masked_select: (-1xf32) <- (8x10164x68xf32, 8x10164x68xb) - masked_select_3 = paddle._C_ops.masked_select(data_5, cast_3) - del data_5 + # pd_op.slice: (8x1x1x2xf32) <- (8x1x1x4xf32, 1xi64, 1xi64) + slice_0 = paddle._C_ops.slice( + unsqueeze_1, [3], full_int_array_6, full_int_array_4, [1], [] + ) - # pd_op.full_int_array: (3xi64) <- () - full_int_array_5 = [-1, 4, 17] + # pd_op.full_int_array: (1xi64) <- () + full_int_array_7 = [2147483647] - # pd_op.reshape: (-1x4x17xf32) <- (-1xf32, 3xi64) - reshape_2 = paddle._C_ops.reshape(masked_select_3, full_int_array_5) - del full_int_array_5 + # pd_op.slice: (8x1x1x2xf32) <- (8x1x1x4xf32, 1xi64, 1xi64) + slice_1 = paddle._C_ops.slice( + unsqueeze_1, [3], full_int_array_4, full_int_array_7, [1], [] + ) + del unsqueeze_1 - # pd_op.full: (1xi32) <- () - full_5 = paddle._C_ops.full( - [1], float("2"), paddle.int32, paddle.core.CPUPlace() + # pd_op.slice: (8x1x10164x2xf32) <- (8x1x10164x4xf32, 1xi64, 1xi64) + slice_2 = paddle._C_ops.slice( + unsqueeze_2, [3], full_int_array_6, full_int_array_4, [1], [] ) + del full_int_array_6 - # pd_op.split_with_num: ([8x10164x2xf32, 8x10164x2xf32]) <- (8x10164x4xf32, 1xi32) - split_with_num_2 = paddle._C_ops.split_with_num(data_2, 2, full_5) - del data_2, full_5 + # pd_op.slice: (8x1x10164x2xf32) <- (8x1x10164x4xf32, 1xi64, 1xi64) + slice_3 = paddle._C_ops.slice( + unsqueeze_2, [3], full_int_array_4, full_int_array_7, [1], [] + ) + del full_int_array_4, full_int_array_7, unsqueeze_2 - # builtin.split: (8x10164x2xf32, 8x10164x2xf32) <- ([8x10164x2xf32, 8x10164x2xf32]) - ( - split_8, - split_9, - ) = split_with_num_2 - del split_with_num_2 + # pd_op.maximum: (8x1x10164x2xf32) <- (8x1x1x2xf32, 8x1x10164x2xf32) + maximum_0 = paddle._C_ops.maximum(slice_0, slice_2) - # pd_op.subtract: (8x10164x2xf32) <- (10164x2xf32, 8x10164x2xf32) - subtract_12 = paddle._C_ops.subtract(data_6, split_8) - del split_8 + # pd_op.minimum: (8x1x10164x2xf32) <- (8x1x1x2xf32, 8x1x10164x2xf32) + minimum_0 = paddle._C_ops.minimum(slice_1, slice_3) - # pd_op.subtract: (8x10164x2xf32) <- (8x10164x2xf32, 10164x2xf32) - subtract_13 = paddle._C_ops.subtract(split_9, data_6) - del data_6, split_9 + # pd_op.subtract: (8x1x10164x2xf32) <- (8x1x10164x2xf32, 8x1x10164x2xf32) + subtract_0 = paddle._C_ops.subtract(minimum_0, maximum_0) + del maximum_0, minimum_0 - # pd_op.full: (1xi32) <- () - full_6 = paddle._C_ops.full( - [1], float("-1"), paddle.int32, paddle.core.CPUPlace() + # pd_op.full: (1xf32) <- () + full_10 = paddle._C_ops.full( + [1], float("0"), paddle.float32, paddle.core.CPUPlace() ) - # builtin.combine: ([8x10164x2xf32, 8x10164x2xf32]) <- (8x10164x2xf32, 8x10164x2xf32) - combine_0 = [subtract_12, subtract_13] - del subtract_12, subtract_13 - - # pd_op.concat: (8x10164x4xf32) <- ([8x10164x2xf32, 8x10164x2xf32], 1xi32) - concat_0 = paddle._C_ops.concat(combine_0, full_6) - del combine_0, full_6 - # pd_op.full: (1xf32) <- () - full_7 = paddle._C_ops.full( - [1], float("15.99"), paddle.float32, paddle.core.CPUPlace() + full_11 = paddle._C_ops.full( + [1], float("3.40282e+38"), paddle.float32, paddle.core.CPUPlace() ) - # pd_op.clip: (8x10164x4xf32) <- (8x10164x4xf32, 1xf32, 1xf32) - clip_2 = paddle._C_ops.clip(concat_0, full_1, full_7) - del concat_0, full_7 + # pd_op.clip: (8x1x10164x2xf32) <- (8x1x10164x2xf32, 1xf32, 1xf32) + clip_0 = paddle._C_ops.clip(subtract_0, full_10, full_11) + del subtract_0 - # pd_op.masked_select: (-1xf32) <- (8x10164x4xf32, 8x10164x4xb) - masked_select_4 = paddle._C_ops.masked_select(clip_2, cast_1) - del clip_2 + # pd_op.prod: (8x1x10164xf32) <- (8x1x10164x2xf32, 1xi64) + prod_0 = paddle._C_ops.prod(clip_0, full_int_array_0, False, False) + del clip_0 - # pd_op.reshape: (-1x4xf32) <- (-1xf32, 2xi64) - reshape_3 = paddle._C_ops.reshape(masked_select_4, full_int_array_2) - del full_int_array_2, masked_select_4 + # pd_op.subtract: (8x1x1x2xf32) <- (8x1x1x2xf32, 8x1x1x2xf32) + subtract_1 = paddle._C_ops.subtract(slice_1, slice_0) + del slice_0, slice_1 - # pd_op.floor: (-1x4xf32) <- (-1x4xf32) - floor_0 = paddle._C_ops.floor(reshape_3) + # pd_op.clip: (8x1x1x2xf32) <- (8x1x1x2xf32, 1xf32, 1xf32) + clip_1 = paddle._C_ops.clip(subtract_1, full_10, full_11) + del subtract_1 - # pd_op.cast: (-1x4xi64) <- (-1x4xf32) - cast_4 = paddle._C_ops.cast(floor_0, paddle.int64) - del floor_0 + # pd_op.prod: (8x1x1xf32) <- (8x1x1x2xf32, 1xi64) + prod_1 = paddle._C_ops.prod(clip_1, full_int_array_0, False, False) + del clip_1 - # pd_op.scale: (-1x4xi64) <- (-1x4xi64, 1xf32) - scale_4 = paddle._C_ops.scale(cast_4, full_3, float("1"), True) + # pd_op.subtract: (8x1x10164x2xf32) <- (8x1x10164x2xf32, 8x1x10164x2xf32) + subtract_2 = paddle._C_ops.subtract(slice_3, slice_2) + del slice_2, slice_3 - # pd_op.cast: (-1x4xf32) <- (-1x4xi64) - cast_5 = paddle._C_ops.cast(scale_4, paddle.float32) + # pd_op.clip: (8x1x10164x2xf32) <- (8x1x10164x2xf32, 1xf32, 1xf32) + clip_2 = paddle._C_ops.clip(subtract_2, full_10, full_11) + del full_10, full_11, subtract_2 - # pd_op.subtract: (-1x4xf32) <- (-1x4xf32, -1x4xf32) - subtract_14 = paddle._C_ops.subtract(cast_5, reshape_3) - del cast_5, reshape_3 + # pd_op.prod: (8x1x10164xf32) <- (8x1x10164x2xf32, 1xi64) + prod_2 = paddle._C_ops.prod(clip_2, full_int_array_0, False, False) + del clip_2 - # pd_op.scale: (-1x4xf32) <- (-1x4xf32, 1xf32) - scale_5 = paddle._C_ops.scale(subtract_14, full_4, float("1"), True) + # pd_op.add: (8x1x10164xf32) <- (8x1x1xf32, 8x1x10164xf32) + add_1 = paddle._C_ops.add(prod_1, prod_2) + del prod_1, prod_2 - # pd_op.scale: (-1x4xi64) <- (-1x4xi64, 1xf32) - scale_6 = paddle._C_ops.scale(cast_4, full_3, float("0"), True) - del cast_4 + # pd_op.subtract: (8x1x10164xf32) <- (8x1x10164xf32, 8x1x10164xf32) + subtract_3 = paddle._C_ops.subtract(add_1, prod_0) + del add_1 - # pd_op.unsqueeze: (-1x4x1xi64) <- (-1x4xi64, 1xi64) - unsqueeze_3 = paddle._C_ops.unsqueeze(scale_6, full_int_array_0) - del scale_6 + # pd_op.scale: (8x1x10164xf32) <- (8x1x10164xf32, 1xf32) + scale_1 = paddle._C_ops.scale(subtract_3, full_4, float("1e-09"), True) + del full_4, subtract_3 - # pd_op.cross_entropy_with_softmax: (-1x4x17xf32, -1x4x1xf32) <- (-1x4x17xf32, -1x4x1xi64) - cross_entropy_with_softmax_0, cross_entropy_with_softmax_2 = ( - lambda x, f: f(x) - )( - paddle._C_ops.cross_entropy_with_softmax( - reshape_2, unsqueeze_3, False, True, True, -100, -1 - ), - lambda out: out if isinstance(out, (list, tuple)) else (out, None), - ) + # pd_op.divide: (8x1x10164xf32) <- (8x1x10164xf32, 8x1x10164xf32) + divide_0 = paddle._C_ops.divide(prod_0, scale_1) + del prod_0, scale_1 - # pd_op.squeeze: (-1x4xf32) <- (-1x4x1xf32, 1xi64) - squeeze_0 = paddle._C_ops.squeeze( - cross_entropy_with_softmax_2, full_int_array_0 - ) + # pd_op.multiply: (8x1x10164xf32) <- (8x1x10164xf32, 8x1x10164xf32) + multiply_1 = paddle._C_ops.multiply(divide_0, data_0) + del data_0, divide_0 - # pd_op.multiply: (-1x4xf32) <- (-1x4xf32, -1x4xf32) - multiply_5 = paddle._C_ops.multiply(squeeze_0, subtract_14) - - # pd_op.scale: (-1x4xi64) <- (-1x4xi64, 1xf32) - scale_7 = paddle._C_ops.scale(scale_4, full_3, float("0"), True) - del scale_4 - - # pd_op.unsqueeze: (-1x4x1xi64) <- (-1x4xi64, 1xi64) - unsqueeze_4 = paddle._C_ops.unsqueeze(scale_7, full_int_array_0) - del scale_7 - - # pd_op.cross_entropy_with_softmax: (-1x4x17xf32, -1x4x1xf32) <- (-1x4x17xf32, -1x4x1xi64) - cross_entropy_with_softmax_1, cross_entropy_with_softmax_3 = ( - lambda x, f: f(x) - )( - paddle._C_ops.cross_entropy_with_softmax( - reshape_2, unsqueeze_4, False, True, True, -100, -1 - ), - lambda out: out if isinstance(out, (list, tuple)) else (out, None), - ) - del reshape_2 + # pd_op.full_int_array: (1xi64) <- () + full_int_array_8 = [-2] - # pd_op.squeeze: (-1x4xf32) <- (-1x4x1xf32, 1xi64) - squeeze_1 = paddle._C_ops.squeeze( - cross_entropy_with_softmax_3, full_int_array_0 - ) + # pd_op.max: (8x10164xf32) <- (8x1x10164xf32, 1xi64) + max_0 = paddle._C_ops.max(multiply_1, full_int_array_8, False) + del full_int_array_8, multiply_1 - # pd_op.multiply: (-1x4xf32) <- (-1x4xf32, -1x4xf32) - multiply_6 = paddle._C_ops.multiply(squeeze_1, scale_5) - - # pd_op.add: (-1x4xf32) <- (-1x4xf32, -1x4xf32) - add_1 = paddle._C_ops.add(multiply_5, multiply_6) - - # pd_op.mean: (-1x1xf32) <- (-1x4xf32, 1xi64) - mean_0 = paddle._C_ops.mean(add_1, full_int_array_0, True) - del full_int_array_0 - - # pd_op.multiply: (-1x1xf32) <- (-1x1xf32, -1x1xf32) - multiply_7 = paddle._C_ops.multiply(mean_0, unsqueeze_1) - - # pd_op.sum: (xf32) <- (-1x1xf32, 0xi64) - sum_2 = paddle._C_ops.sum(multiply_7, full_int_array_3, None, False) - - # pd_op.divide: (xf32) <- (xf32, xf32) - divide_1 = paddle._C_ops.divide(sum_2, data_4) - del ( - abs_0, - add_0, - add_1, - assign_0, - assign_1, - assign_2, - assign_3, - assign_4, - assign_5, - assign_6, - assign_7, - cast_1, - cast_3, - clip_0, - clip_1, - cross_entropy_with_softmax_2, - cross_entropy_with_softmax_3, - data_4, - divide_2, - divide_3, - full_0, - full_1, - full_2, - full_3, - full_4, - full_int_array_3, - masked_select_0, - masked_select_3, - maximum_0, - maximum_1, - maximum_2, - maximum_3, - mean_0, - minimum_0, - minimum_1, - minimum_2, - minimum_3, - multiply_0, - multiply_1, - multiply_2, - multiply_4, - multiply_5, - multiply_6, - multiply_7, - reshape_0, - reshape_1, - scale_0, - scale_1, - scale_3, - scale_5, - split_0, - split_1, - split_2, - split_3, - split_4, - split_5, - split_6, - split_7, - squeeze_0, - squeeze_1, - subtract_0, - subtract_1, - subtract_10, - subtract_14, - subtract_2, - subtract_3, - subtract_4, - subtract_8, - subtract_9, - sum_1, - sum_2, - unsqueeze_1, - unsqueeze_3, - unsqueeze_4, - ) + # pd_op.unsqueeze: (8x10164x1xf32) <- (8x10164xf32, 1xi64) + unsqueeze_3 = paddle._C_ops.unsqueeze(max_0, full_int_array_0) + del full_int_array_0, max_0 - return ( - cross_entropy_with_softmax_0, - cross_entropy_with_softmax_1, - mean_all_0, - divide_0, - divide_1, - ) + # pd_op.multiply: (8x10164x4xf32) <- (8x10164x4xf32, 8x10164x1xf32) + multiply_0 = paddle._C_ops.multiply(index_select_0, unsqueeze_3) + del index_select_0, unsqueeze_3, where_0 + + return reshape_0, multiply_0 diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus-M/subgraph_4/graph_hash.txt b/paddle_samples/PaddleX/PP-YOLOE_plus-M/subgraph_4/graph_hash.txt index cf9cecf24..bcf85f70d 100644 --- a/paddle_samples/PaddleX/PP-YOLOE_plus-M/subgraph_4/graph_hash.txt +++ b/paddle_samples/PaddleX/PP-YOLOE_plus-M/subgraph_4/graph_hash.txt @@ -1 +1 @@ -b570e084ecf7776a98a81e353b5f581d19f9ab243969ebfb5988dfda43a8a50f \ No newline at end of file +79d36f8b3b83773115a0eec5c7e5e317486cffc279d107ab5b11f2f7c791fb82 \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus-M/subgraph_4/input_meta.py b/paddle_samples/PaddleX/PP-YOLOE_plus-M/subgraph_4/input_meta.py index d14444825..2ac13c044 100644 --- a/paddle_samples/PaddleX/PP-YOLOE_plus-M/subgraph_4/input_meta.py +++ b/paddle_samples/PaddleX/PP-YOLOE_plus-M/subgraph_4/input_meta.py @@ -1,73 +1,92 @@ class Program_weight_tensor_data_0: name = "data_0" - shape = [] - dtype = "int64" - data = [22] + shape = [1] + dtype = "float32" + data = [1.00237] class Program_weight_tensor_data_1: name = "data_1" - shape = [] - dtype = "int64" - data = [22] + shape = [1] + dtype = "float32" + data = [1.00237] class Program_weight_tensor_data_2: name = "data_2" - shape = [] - dtype = "int64" - data = [44] + shape = [1] + dtype = "float32" + data = [1.00237] class Program_weight_tensor_data_3: name = "data_3" - shape = [] - dtype = "int64" - data = [44] + shape = [1] + dtype = "float32" + data = [1.00237] class Program_weight_tensor_data_4: name = "data_4" - shape = [] - dtype = "int64" - data = [88] + shape = [1] + dtype = "float32" + data = [1.00237] class Program_weight_tensor_data_5: name = "data_5" - shape = [] - dtype = "int64" - data = [88] + shape = [1] + dtype = "float32" + data = [1.00237] class Program_weight_tensor_data_6: name = "data_6" - shape = [8, 576, 22, 22] + shape = [1] dtype = "float32" - min_val = float("-0.278465") - max_val = float("11.3441") - mean = float("0.308556") - std = float("0.659097") - data = None + data = [1.00237] class Program_weight_tensor_data_7: name = "data_7" - shape = [8, 288, 44, 44] + shape = [1] dtype = "float32" - min_val = float("-0.278465") - max_val = float("12.5041") - mean = float("0.419507") - std = float("0.74104") - data = None + data = [1.00237] class Program_weight_tensor_data_8: name = "data_8" - shape = [8, 144, 88, 88] + shape = [1] + dtype = "float32" + data = [1.00237] + + +class Program_weight_tensor_data_9: + name = "data_9" + shape = [1] + dtype = "float32" + data = [1.00237] + + +class Program_weight_tensor_data_10: + name = "data_10" + shape = [1] + dtype = "float32" + data = [1.00237] + + +class Program_weight_tensor_data_11: + name = "data_11" + shape = [1] + dtype = "float32" + data = [1.00237] + + +class Program_weight_tensor_data_12: + name = "data_12" + shape = [2, 3, 640, 640] dtype = "float32" - min_val = float("-0.278465") - max_val = float("17.9308") - mean = float("0.521464") - std = float("0.797879") + max_val = float("1.0") + mean = float("0.471598") + std = float("0.270715") data = None diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus-M/subgraph_4/model.py b/paddle_samples/PaddleX/PP-YOLOE_plus-M/subgraph_4/model.py index 4f3efca14..530823f43 100644 --- a/paddle_samples/PaddleX/PP-YOLOE_plus-M/subgraph_4/model.py +++ b/paddle_samples/PaddleX/PP-YOLOE_plus-M/subgraph_4/model.py @@ -61,6 +61,540 @@ def forward( parameter_51, parameter_52, parameter_53, + parameter_54, + parameter_55, + parameter_56, + parameter_57, + parameter_58, + parameter_59, + parameter_60, + parameter_61, + parameter_62, + parameter_63, + parameter_64, + parameter_65, + parameter_66, + parameter_67, + parameter_68, + parameter_69, + parameter_70, + parameter_71, + parameter_72, + parameter_73, + parameter_74, + parameter_75, + parameter_76, + parameter_77, + parameter_78, + parameter_79, + parameter_80, + parameter_81, + parameter_82, + parameter_83, + parameter_84, + parameter_85, + parameter_86, + parameter_87, + parameter_88, + parameter_89, + parameter_90, + parameter_91, + parameter_92, + parameter_93, + parameter_94, + parameter_95, + parameter_96, + parameter_97, + parameter_98, + parameter_99, + parameter_100, + parameter_101, + parameter_102, + parameter_103, + parameter_104, + parameter_105, + parameter_106, + parameter_107, + parameter_108, + parameter_109, + parameter_110, + parameter_111, + parameter_112, + parameter_113, + parameter_114, + parameter_115, + parameter_116, + parameter_117, + parameter_118, + parameter_119, + parameter_120, + parameter_121, + parameter_122, + parameter_123, + parameter_124, + parameter_125, + parameter_126, + parameter_127, + parameter_128, + parameter_129, + parameter_130, + parameter_131, + parameter_132, + parameter_133, + parameter_134, + parameter_135, + parameter_136, + parameter_137, + parameter_138, + parameter_139, + parameter_140, + parameter_141, + parameter_142, + parameter_143, + parameter_144, + parameter_145, + parameter_146, + parameter_147, + parameter_148, + parameter_149, + parameter_150, + parameter_151, + parameter_152, + parameter_153, + parameter_154, + parameter_155, + parameter_156, + parameter_157, + parameter_158, + parameter_159, + parameter_160, + parameter_161, + parameter_162, + parameter_163, + parameter_164, + parameter_165, + parameter_166, + parameter_167, + parameter_168, + parameter_169, + parameter_170, + parameter_171, + parameter_172, + parameter_173, + parameter_174, + parameter_175, + parameter_176, + parameter_177, + parameter_178, + parameter_179, + parameter_180, + parameter_181, + parameter_182, + parameter_183, + parameter_184, + parameter_185, + parameter_186, + parameter_187, + parameter_188, + parameter_189, + parameter_190, + parameter_191, + parameter_192, + parameter_193, + parameter_194, + parameter_195, + parameter_196, + parameter_197, + parameter_198, + parameter_199, + parameter_200, + parameter_201, + parameter_202, + parameter_203, + parameter_204, + parameter_205, + parameter_206, + parameter_207, + parameter_208, + parameter_209, + parameter_210, + parameter_211, + parameter_212, + parameter_213, + parameter_214, + parameter_215, + parameter_216, + parameter_217, + parameter_218, + parameter_219, + parameter_220, + parameter_221, + parameter_222, + parameter_223, + parameter_224, + parameter_225, + parameter_226, + parameter_227, + parameter_228, + parameter_229, + parameter_230, + parameter_231, + parameter_232, + parameter_233, + parameter_234, + parameter_235, + parameter_236, + parameter_237, + parameter_238, + parameter_239, + parameter_240, + parameter_241, + parameter_242, + parameter_243, + parameter_244, + parameter_245, + parameter_246, + parameter_247, + parameter_248, + parameter_249, + parameter_250, + parameter_251, + parameter_252, + parameter_253, + parameter_254, + parameter_255, + parameter_256, + parameter_257, + parameter_258, + parameter_259, + parameter_260, + parameter_261, + parameter_262, + parameter_263, + parameter_264, + parameter_265, + parameter_266, + parameter_267, + parameter_268, + parameter_269, + parameter_270, + parameter_271, + parameter_272, + parameter_273, + parameter_274, + parameter_275, + parameter_276, + parameter_277, + parameter_278, + parameter_279, + parameter_280, + parameter_281, + parameter_282, + parameter_283, + parameter_284, + parameter_285, + parameter_286, + parameter_287, + parameter_288, + parameter_289, + parameter_290, + parameter_291, + parameter_292, + parameter_293, + parameter_294, + parameter_295, + parameter_296, + parameter_297, + parameter_298, + parameter_299, + parameter_300, + parameter_301, + parameter_302, + parameter_303, + parameter_304, + parameter_305, + parameter_306, + parameter_307, + parameter_308, + parameter_309, + parameter_310, + parameter_311, + parameter_312, + parameter_313, + parameter_314, + parameter_315, + parameter_316, + parameter_317, + parameter_318, + parameter_319, + parameter_320, + parameter_321, + parameter_322, + parameter_323, + parameter_324, + parameter_325, + parameter_326, + parameter_327, + parameter_328, + parameter_329, + parameter_330, + parameter_331, + parameter_332, + parameter_333, + parameter_334, + parameter_335, + parameter_336, + parameter_337, + parameter_338, + parameter_339, + parameter_340, + parameter_341, + parameter_342, + parameter_343, + parameter_344, + parameter_345, + parameter_346, + parameter_347, + parameter_348, + parameter_349, + parameter_350, + parameter_351, + parameter_352, + parameter_353, + parameter_354, + parameter_355, + parameter_356, + parameter_357, + parameter_358, + parameter_359, + parameter_360, + parameter_361, + parameter_362, + parameter_363, + parameter_364, + parameter_365, + parameter_366, + parameter_367, + parameter_368, + parameter_369, + parameter_370, + parameter_371, + parameter_372, + parameter_373, + parameter_374, + parameter_375, + parameter_376, + parameter_377, + parameter_378, + parameter_379, + parameter_380, + parameter_381, + parameter_382, + parameter_383, + parameter_384, + parameter_385, + parameter_386, + parameter_387, + parameter_388, + parameter_389, + parameter_390, + parameter_391, + parameter_392, + parameter_393, + parameter_394, + parameter_395, + parameter_396, + parameter_397, + parameter_398, + parameter_399, + parameter_400, + parameter_401, + parameter_402, + parameter_403, + parameter_404, + parameter_405, + parameter_406, + parameter_407, + parameter_408, + parameter_409, + parameter_410, + parameter_411, + parameter_412, + parameter_413, + parameter_414, + parameter_415, + parameter_416, + parameter_417, + parameter_418, + parameter_419, + parameter_420, + parameter_421, + parameter_422, + parameter_423, + parameter_424, + parameter_425, + parameter_426, + parameter_427, + parameter_428, + parameter_429, + parameter_430, + parameter_431, + parameter_432, + parameter_433, + parameter_434, + parameter_435, + parameter_436, + parameter_437, + parameter_438, + parameter_439, + parameter_440, + parameter_441, + parameter_442, + parameter_443, + parameter_444, + parameter_445, + parameter_446, + parameter_447, + parameter_448, + parameter_449, + parameter_450, + parameter_451, + parameter_452, + parameter_453, + parameter_454, + parameter_455, + parameter_456, + parameter_457, + parameter_458, + parameter_459, + parameter_460, + parameter_461, + parameter_462, + parameter_463, + parameter_464, + parameter_465, + parameter_466, + parameter_467, + parameter_468, + parameter_469, + parameter_470, + parameter_471, + parameter_472, + parameter_473, + parameter_474, + parameter_475, + parameter_476, + parameter_477, + parameter_478, + parameter_479, + parameter_480, + parameter_481, + parameter_482, + parameter_483, + parameter_484, + parameter_485, + parameter_486, + parameter_487, + parameter_488, + parameter_489, + parameter_490, + parameter_491, + parameter_492, + parameter_493, + parameter_494, + parameter_495, + parameter_496, + parameter_497, + parameter_498, + parameter_499, + parameter_500, + parameter_501, + parameter_502, + parameter_503, + parameter_504, + parameter_505, + parameter_506, + parameter_507, + parameter_508, + parameter_509, + parameter_510, + parameter_511, + parameter_512, + parameter_513, + parameter_514, + parameter_515, + parameter_516, + parameter_517, + parameter_518, + parameter_519, + parameter_520, + parameter_521, + parameter_522, + parameter_523, + parameter_524, + parameter_525, + parameter_526, + parameter_527, + parameter_528, + parameter_529, + parameter_530, + parameter_531, + parameter_532, + parameter_533, + parameter_534, + parameter_535, + parameter_536, + parameter_537, + parameter_538, + parameter_539, + parameter_540, + parameter_541, + parameter_542, + parameter_543, + parameter_544, + parameter_545, + parameter_546, + parameter_547, + parameter_548, + parameter_549, + parameter_550, + parameter_551, + parameter_552, + parameter_553, + parameter_554, + parameter_555, + parameter_556, + parameter_557, + parameter_558, + parameter_559, + parameter_560, + parameter_561, + parameter_562, + parameter_563, + parameter_564, + parameter_565, + parameter_566, + parameter_567, + parameter_568, + parameter_569, + parameter_570, + parameter_571, + parameter_572, + parameter_573, + parameter_574, + parameter_575, + parameter_576, + parameter_577, + parameter_578, + parameter_579, + parameter_580, + parameter_581, + parameter_582, + parameter_583, + parameter_584, + parameter_585, + parameter_586, + parameter_587, data_0, data_1, data_2, @@ -70,413 +604,4473 @@ def forward( data_6, data_7, data_8, + data_9, + data_10, + data_11, + data_12, ): - # pd_op.full: (1xi64) <- () + # pd_op.conv2d: (-1x24x-1x-1xf32) <- (-1x3x-1x-1xf32, 24x3x3x3xf32) + conv2d_0 = paddle._C_ops.conv2d( + data_12, parameter_587, [2, 2], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del data_12, parameter_587 + + # pd_op.batch_norm_: (-1x24x-1x-1xf32, 24xf32, 24xf32, 24xf32, 24xf32, -1xui8) <- (-1x24x-1x-1xf32, 24xf32, 24xf32, 24xf32, 24xf32) + ( + batch_norm__0, + batch_norm__1, + batch_norm__2, + batch_norm__3, + batch_norm__4, + batch_norm__5, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_0, + parameter_586, + parameter_585, + parameter_584, + parameter_583, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_0, parameter_583, parameter_584, parameter_585, parameter_586 + + # pd_op.swish: (-1x24x-1x-1xf32) <- (-1x24x-1x-1xf32) + swish_0 = paddle._C_ops.swish(batch_norm__0) + del batch_norm__0 + + # pd_op.conv2d: (-1x24x-1x-1xf32) <- (-1x24x-1x-1xf32, 24x24x3x3xf32) + conv2d_1 = paddle._C_ops.conv2d( + swish_0, parameter_582, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_582, swish_0 + + # pd_op.batch_norm_: (-1x24x-1x-1xf32, 24xf32, 24xf32, 24xf32, 24xf32, -1xui8) <- (-1x24x-1x-1xf32, 24xf32, 24xf32, 24xf32, 24xf32) + ( + batch_norm__6, + batch_norm__7, + batch_norm__8, + batch_norm__9, + batch_norm__10, + batch_norm__11, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_1, + parameter_581, + parameter_580, + parameter_579, + parameter_578, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_1, parameter_578, parameter_579, parameter_580, parameter_581 + + # pd_op.swish: (-1x24x-1x-1xf32) <- (-1x24x-1x-1xf32) + swish_1 = paddle._C_ops.swish(batch_norm__6) + del batch_norm__6 + + # pd_op.conv2d: (-1x48x-1x-1xf32) <- (-1x24x-1x-1xf32, 48x24x3x3xf32) + conv2d_2 = paddle._C_ops.conv2d( + swish_1, parameter_577, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_577, swish_1 + + # pd_op.batch_norm_: (-1x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32, -1xui8) <- (-1x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32) + ( + batch_norm__12, + batch_norm__13, + batch_norm__14, + batch_norm__15, + batch_norm__16, + batch_norm__17, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_2, + parameter_576, + parameter_575, + parameter_574, + parameter_573, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_2, parameter_573, parameter_574, parameter_575, parameter_576 + + # pd_op.swish: (-1x48x-1x-1xf32) <- (-1x48x-1x-1xf32) + swish_2 = paddle._C_ops.swish(batch_norm__12) + del batch_norm__12 + + # pd_op.conv2d: (-1x72x-1x-1xf32) <- (-1x48x-1x-1xf32, 72x48x3x3xf32) + conv2d_3 = paddle._C_ops.conv2d( + swish_2, parameter_572, [2, 2], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_572, swish_2 + + # pd_op.batch_norm_: (-1x72x-1x-1xf32, 72xf32, 72xf32, 72xf32, 72xf32, -1xui8) <- (-1x72x-1x-1xf32, 72xf32, 72xf32, 72xf32, 72xf32) + ( + batch_norm__18, + batch_norm__19, + batch_norm__20, + batch_norm__21, + batch_norm__22, + batch_norm__23, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_3, + parameter_571, + parameter_570, + parameter_569, + parameter_568, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_3, parameter_568, parameter_569, parameter_570, parameter_571 + + # pd_op.swish: (-1x72x-1x-1xf32) <- (-1x72x-1x-1xf32) + swish_3 = paddle._C_ops.swish(batch_norm__18) + del batch_norm__18 + + # pd_op.conv2d: (-1x36x-1x-1xf32) <- (-1x72x-1x-1xf32, 36x72x1x1xf32) + conv2d_4 = paddle._C_ops.conv2d( + swish_3, parameter_567, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_567 + + # pd_op.batch_norm_: (-1x36x-1x-1xf32, 36xf32, 36xf32, 36xf32, 36xf32, -1xui8) <- (-1x36x-1x-1xf32, 36xf32, 36xf32, 36xf32, 36xf32) + ( + batch_norm__24, + batch_norm__25, + batch_norm__26, + batch_norm__27, + batch_norm__28, + batch_norm__29, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_4, + parameter_566, + parameter_565, + parameter_564, + parameter_563, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_4, parameter_563, parameter_564, parameter_565, parameter_566 + + # pd_op.swish: (-1x36x-1x-1xf32) <- (-1x36x-1x-1xf32) + swish_4 = paddle._C_ops.swish(batch_norm__24) + del batch_norm__24 + + # pd_op.conv2d: (-1x36x-1x-1xf32) <- (-1x72x-1x-1xf32, 36x72x1x1xf32) + conv2d_5 = paddle._C_ops.conv2d( + swish_3, parameter_562, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_562, swish_3 + + # pd_op.batch_norm_: (-1x36x-1x-1xf32, 36xf32, 36xf32, 36xf32, 36xf32, -1xui8) <- (-1x36x-1x-1xf32, 36xf32, 36xf32, 36xf32, 36xf32) + ( + batch_norm__30, + batch_norm__31, + batch_norm__32, + batch_norm__33, + batch_norm__34, + batch_norm__35, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_5, + parameter_561, + parameter_560, + parameter_559, + parameter_558, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_5, parameter_558, parameter_559, parameter_560, parameter_561 + + # pd_op.swish: (-1x36x-1x-1xf32) <- (-1x36x-1x-1xf32) + swish_5 = paddle._C_ops.swish(batch_norm__30) + del batch_norm__30 + + # pd_op.conv2d: (-1x36x-1x-1xf32) <- (-1x36x-1x-1xf32, 36x36x3x3xf32) + conv2d_6 = paddle._C_ops.conv2d( + swish_5, parameter_557, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_557 + + # pd_op.batch_norm_: (-1x36x-1x-1xf32, 36xf32, 36xf32, 36xf32, 36xf32, -1xui8) <- (-1x36x-1x-1xf32, 36xf32, 36xf32, 36xf32, 36xf32) + ( + batch_norm__36, + batch_norm__37, + batch_norm__38, + batch_norm__39, + batch_norm__40, + batch_norm__41, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_6, + parameter_556, + parameter_555, + parameter_554, + parameter_553, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_6, parameter_553, parameter_554, parameter_555, parameter_556 + + # pd_op.swish: (-1x36x-1x-1xf32) <- (-1x36x-1x-1xf32) + swish_6 = paddle._C_ops.swish(batch_norm__36) + del batch_norm__36 + + # pd_op.conv2d: (-1x36x-1x-1xf32) <- (-1x36x-1x-1xf32, 36x36x3x3xf32) + conv2d_7 = paddle._C_ops.conv2d( + swish_6, parameter_552, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_552 + + # pd_op.batch_norm_: (-1x36x-1x-1xf32, 36xf32, 36xf32, 36xf32, 36xf32, -1xui8) <- (-1x36x-1x-1xf32, 36xf32, 36xf32, 36xf32, 36xf32) + ( + batch_norm__42, + batch_norm__43, + batch_norm__44, + batch_norm__45, + batch_norm__46, + batch_norm__47, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_7, + parameter_551, + parameter_550, + parameter_549, + parameter_548, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_7, parameter_548, parameter_549, parameter_550, parameter_551 + + # pd_op.conv2d: (-1x36x-1x-1xf32) <- (-1x36x-1x-1xf32, 36x36x1x1xf32) + conv2d_8 = paddle._C_ops.conv2d( + swish_6, parameter_547, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_547, swish_6 + + # pd_op.batch_norm_: (-1x36x-1x-1xf32, 36xf32, 36xf32, 36xf32, 36xf32, -1xui8) <- (-1x36x-1x-1xf32, 36xf32, 36xf32, 36xf32, 36xf32) + ( + batch_norm__48, + batch_norm__49, + batch_norm__50, + batch_norm__51, + batch_norm__52, + batch_norm__53, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_8, + parameter_546, + parameter_545, + parameter_544, + parameter_543, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_8, parameter_543, parameter_544, parameter_545, parameter_546 + + # pd_op.multiply: (-1x36x-1x-1xf32) <- (1xf32, -1x36x-1x-1xf32) + multiply_0 = paddle._C_ops.multiply(data_0, batch_norm__48) + del batch_norm__48, data_0 + + # pd_op.add: (-1x36x-1x-1xf32) <- (-1x36x-1x-1xf32, -1x36x-1x-1xf32) + add_0 = paddle._C_ops.add(batch_norm__42, multiply_0) + del batch_norm__42, multiply_0 + + # pd_op.swish: (-1x36x-1x-1xf32) <- (-1x36x-1x-1xf32) + swish_7 = paddle._C_ops.swish(add_0) + del add_0 + + # pd_op.add: (-1x36x-1x-1xf32) <- (-1x36x-1x-1xf32, -1x36x-1x-1xf32) + add_1 = paddle._C_ops.add(swish_5, swish_7) + del swish_5, swish_7 + + # pd_op.conv2d: (-1x36x-1x-1xf32) <- (-1x36x-1x-1xf32, 36x36x3x3xf32) + conv2d_9 = paddle._C_ops.conv2d( + add_1, parameter_542, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_542 + + # pd_op.batch_norm_: (-1x36x-1x-1xf32, 36xf32, 36xf32, 36xf32, 36xf32, -1xui8) <- (-1x36x-1x-1xf32, 36xf32, 36xf32, 36xf32, 36xf32) + ( + batch_norm__54, + batch_norm__55, + batch_norm__56, + batch_norm__57, + batch_norm__58, + batch_norm__59, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_9, + parameter_541, + parameter_540, + parameter_539, + parameter_538, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_9, parameter_538, parameter_539, parameter_540, parameter_541 + + # pd_op.swish: (-1x36x-1x-1xf32) <- (-1x36x-1x-1xf32) + swish_8 = paddle._C_ops.swish(batch_norm__54) + del batch_norm__54 + + # pd_op.conv2d: (-1x36x-1x-1xf32) <- (-1x36x-1x-1xf32, 36x36x3x3xf32) + conv2d_10 = paddle._C_ops.conv2d( + swish_8, parameter_537, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_537 + + # pd_op.batch_norm_: (-1x36x-1x-1xf32, 36xf32, 36xf32, 36xf32, 36xf32, -1xui8) <- (-1x36x-1x-1xf32, 36xf32, 36xf32, 36xf32, 36xf32) + ( + batch_norm__60, + batch_norm__61, + batch_norm__62, + batch_norm__63, + batch_norm__64, + batch_norm__65, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_10, + parameter_536, + parameter_535, + parameter_534, + parameter_533, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_10, parameter_533, parameter_534, parameter_535, parameter_536 + + # pd_op.conv2d: (-1x36x-1x-1xf32) <- (-1x36x-1x-1xf32, 36x36x1x1xf32) + conv2d_11 = paddle._C_ops.conv2d( + swish_8, parameter_532, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_532, swish_8 + + # pd_op.batch_norm_: (-1x36x-1x-1xf32, 36xf32, 36xf32, 36xf32, 36xf32, -1xui8) <- (-1x36x-1x-1xf32, 36xf32, 36xf32, 36xf32, 36xf32) + ( + batch_norm__66, + batch_norm__67, + batch_norm__68, + batch_norm__69, + batch_norm__70, + batch_norm__71, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_11, + parameter_531, + parameter_530, + parameter_529, + parameter_528, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_11, parameter_528, parameter_529, parameter_530, parameter_531 + + # pd_op.multiply: (-1x36x-1x-1xf32) <- (1xf32, -1x36x-1x-1xf32) + multiply_1 = paddle._C_ops.multiply(data_1, batch_norm__66) + del batch_norm__66, data_1 + + # pd_op.add: (-1x36x-1x-1xf32) <- (-1x36x-1x-1xf32, -1x36x-1x-1xf32) + add_2 = paddle._C_ops.add(batch_norm__60, multiply_1) + del batch_norm__60, multiply_1 + + # pd_op.swish: (-1x36x-1x-1xf32) <- (-1x36x-1x-1xf32) + swish_9 = paddle._C_ops.swish(add_2) + del add_2 + + # pd_op.add: (-1x36x-1x-1xf32) <- (-1x36x-1x-1xf32, -1x36x-1x-1xf32) + add_3 = paddle._C_ops.add(add_1, swish_9) + del add_1, swish_9 + + # pd_op.full: (1xi32) <- () full_0 = paddle._C_ops.full( - [1], float("0"), paddle.int64, paddle.core.CPUPlace() + [1], float("1"), paddle.int32, paddle.core.CPUPlace() + ) + + # builtin.combine: ([-1x36x-1x-1xf32, -1x36x-1x-1xf32]) <- (-1x36x-1x-1xf32, -1x36x-1x-1xf32) + combine_0 = [swish_4, add_3] + del add_3, swish_4 + + # pd_op.concat: (-1x72x-1x-1xf32) <- ([-1x36x-1x-1xf32, -1x36x-1x-1xf32], 1xi32) + concat_2 = paddle._C_ops.concat(combine_0, full_0) + del combine_0 + + # pd_op.full_int_array: (2xi64) <- () + full_int_array_0 = [2, 3] + + # pd_op.mean: (-1x72x1x1xf32) <- (-1x72x-1x-1xf32, 2xi64) + mean_0 = paddle._C_ops.mean(concat_2, full_int_array_0, True) + + # pd_op.conv2d: (-1x72x1x1xf32) <- (-1x72x1x1xf32, 72x72x1x1xf32) + conv2d_12 = paddle._C_ops.conv2d( + mean_0, parameter_527, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del mean_0, parameter_527 + + # pd_op.full_int_array: (4xi64) <- () + full_int_array_1 = [1, -1, 1, 1] + + # pd_op.reshape: (1x72x1x1xf32) <- (72xf32, 4xi64) + reshape_0 = paddle._C_ops.reshape(parameter_526, full_int_array_1) + del parameter_526 + + # pd_op.add: (-1x72x1x1xf32) <- (-1x72x1x1xf32, 1x72x1x1xf32) + add_4 = paddle._C_ops.add(conv2d_12, reshape_0) + del conv2d_12, reshape_0 + + # pd_op.hardsigmoid: (-1x72x1x1xf32) <- (-1x72x1x1xf32) + hardsigmoid_0 = paddle._C_ops.hardsigmoid( + add_4, float("0.166667"), float("0.5") + ) + del add_4 + + # pd_op.multiply: (-1x72x-1x-1xf32) <- (-1x72x-1x-1xf32, -1x72x1x1xf32) + multiply_2 = paddle._C_ops.multiply(concat_2, hardsigmoid_0) + del concat_2, hardsigmoid_0 + + # pd_op.conv2d: (-1x96x-1x-1xf32) <- (-1x72x-1x-1xf32, 96x72x1x1xf32) + conv2d_13 = paddle._C_ops.conv2d( + multiply_2, parameter_525, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del multiply_2, parameter_525 + + # pd_op.batch_norm_: (-1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (-1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__72, + batch_norm__73, + batch_norm__74, + batch_norm__75, + batch_norm__76, + batch_norm__77, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_13, + parameter_524, + parameter_523, + parameter_522, + parameter_521, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_13, parameter_521, parameter_522, parameter_523, parameter_524 + + # pd_op.swish: (-1x96x-1x-1xf32) <- (-1x96x-1x-1xf32) + swish_10 = paddle._C_ops.swish(batch_norm__72) + del batch_norm__72 + + # pd_op.conv2d: (-1x144x-1x-1xf32) <- (-1x96x-1x-1xf32, 144x96x3x3xf32) + conv2d_14 = paddle._C_ops.conv2d( + swish_10, parameter_520, [2, 2], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_520, swish_10 + + # pd_op.batch_norm_: (-1x144x-1x-1xf32, 144xf32, 144xf32, 144xf32, 144xf32, -1xui8) <- (-1x144x-1x-1xf32, 144xf32, 144xf32, 144xf32, 144xf32) + ( + batch_norm__78, + batch_norm__79, + batch_norm__80, + batch_norm__81, + batch_norm__82, + batch_norm__83, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_14, + parameter_519, + parameter_518, + parameter_517, + parameter_516, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_14, parameter_516, parameter_517, parameter_518, parameter_519 + + # pd_op.swish: (-1x144x-1x-1xf32) <- (-1x144x-1x-1xf32) + swish_11 = paddle._C_ops.swish(batch_norm__78) + del batch_norm__78 + + # pd_op.conv2d: (-1x72x-1x-1xf32) <- (-1x144x-1x-1xf32, 72x144x1x1xf32) + conv2d_15 = paddle._C_ops.conv2d( + swish_11, parameter_515, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_515 + + # pd_op.batch_norm_: (-1x72x-1x-1xf32, 72xf32, 72xf32, 72xf32, 72xf32, -1xui8) <- (-1x72x-1x-1xf32, 72xf32, 72xf32, 72xf32, 72xf32) + ( + batch_norm__84, + batch_norm__85, + batch_norm__86, + batch_norm__87, + batch_norm__88, + batch_norm__89, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_15, + parameter_514, + parameter_513, + parameter_512, + parameter_511, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_15, parameter_511, parameter_512, parameter_513, parameter_514 + + # pd_op.swish: (-1x72x-1x-1xf32) <- (-1x72x-1x-1xf32) + swish_12 = paddle._C_ops.swish(batch_norm__84) + del batch_norm__84 + + # pd_op.conv2d: (-1x72x-1x-1xf32) <- (-1x144x-1x-1xf32, 72x144x1x1xf32) + conv2d_16 = paddle._C_ops.conv2d( + swish_11, parameter_510, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_510, swish_11 + + # pd_op.batch_norm_: (-1x72x-1x-1xf32, 72xf32, 72xf32, 72xf32, 72xf32, -1xui8) <- (-1x72x-1x-1xf32, 72xf32, 72xf32, 72xf32, 72xf32) + ( + batch_norm__90, + batch_norm__91, + batch_norm__92, + batch_norm__93, + batch_norm__94, + batch_norm__95, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_16, + parameter_509, + parameter_508, + parameter_507, + parameter_506, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_16, parameter_506, parameter_507, parameter_508, parameter_509 + + # pd_op.swish: (-1x72x-1x-1xf32) <- (-1x72x-1x-1xf32) + swish_13 = paddle._C_ops.swish(batch_norm__90) + del batch_norm__90 + + # pd_op.conv2d: (-1x72x-1x-1xf32) <- (-1x72x-1x-1xf32, 72x72x3x3xf32) + conv2d_17 = paddle._C_ops.conv2d( + swish_13, parameter_505, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_505 + + # pd_op.batch_norm_: (-1x72x-1x-1xf32, 72xf32, 72xf32, 72xf32, 72xf32, -1xui8) <- (-1x72x-1x-1xf32, 72xf32, 72xf32, 72xf32, 72xf32) + ( + batch_norm__96, + batch_norm__97, + batch_norm__98, + batch_norm__99, + batch_norm__100, + batch_norm__101, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_17, + parameter_504, + parameter_503, + parameter_502, + parameter_501, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_17, parameter_501, parameter_502, parameter_503, parameter_504 + + # pd_op.swish: (-1x72x-1x-1xf32) <- (-1x72x-1x-1xf32) + swish_14 = paddle._C_ops.swish(batch_norm__96) + del batch_norm__96 + + # pd_op.conv2d: (-1x72x-1x-1xf32) <- (-1x72x-1x-1xf32, 72x72x3x3xf32) + conv2d_18 = paddle._C_ops.conv2d( + swish_14, parameter_500, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_500 + + # pd_op.batch_norm_: (-1x72x-1x-1xf32, 72xf32, 72xf32, 72xf32, 72xf32, -1xui8) <- (-1x72x-1x-1xf32, 72xf32, 72xf32, 72xf32, 72xf32) + ( + batch_norm__102, + batch_norm__103, + batch_norm__104, + batch_norm__105, + batch_norm__106, + batch_norm__107, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_18, + parameter_499, + parameter_498, + parameter_497, + parameter_496, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_18, parameter_496, parameter_497, parameter_498, parameter_499 + + # pd_op.conv2d: (-1x72x-1x-1xf32) <- (-1x72x-1x-1xf32, 72x72x1x1xf32) + conv2d_19 = paddle._C_ops.conv2d( + swish_14, parameter_495, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_495, swish_14 + + # pd_op.batch_norm_: (-1x72x-1x-1xf32, 72xf32, 72xf32, 72xf32, 72xf32, -1xui8) <- (-1x72x-1x-1xf32, 72xf32, 72xf32, 72xf32, 72xf32) + ( + batch_norm__108, + batch_norm__109, + batch_norm__110, + batch_norm__111, + batch_norm__112, + batch_norm__113, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_19, + parameter_494, + parameter_493, + parameter_492, + parameter_491, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_19, parameter_491, parameter_492, parameter_493, parameter_494 + + # pd_op.multiply: (-1x72x-1x-1xf32) <- (1xf32, -1x72x-1x-1xf32) + multiply_3 = paddle._C_ops.multiply(data_2, batch_norm__108) + del batch_norm__108, data_2 + + # pd_op.add: (-1x72x-1x-1xf32) <- (-1x72x-1x-1xf32, -1x72x-1x-1xf32) + add_5 = paddle._C_ops.add(batch_norm__102, multiply_3) + del batch_norm__102, multiply_3 + + # pd_op.swish: (-1x72x-1x-1xf32) <- (-1x72x-1x-1xf32) + swish_15 = paddle._C_ops.swish(add_5) + del add_5 + + # pd_op.add: (-1x72x-1x-1xf32) <- (-1x72x-1x-1xf32, -1x72x-1x-1xf32) + add_6 = paddle._C_ops.add(swish_13, swish_15) + del swish_13, swish_15 + + # pd_op.conv2d: (-1x72x-1x-1xf32) <- (-1x72x-1x-1xf32, 72x72x3x3xf32) + conv2d_20 = paddle._C_ops.conv2d( + add_6, parameter_490, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_490 + + # pd_op.batch_norm_: (-1x72x-1x-1xf32, 72xf32, 72xf32, 72xf32, 72xf32, -1xui8) <- (-1x72x-1x-1xf32, 72xf32, 72xf32, 72xf32, 72xf32) + ( + batch_norm__114, + batch_norm__115, + batch_norm__116, + batch_norm__117, + batch_norm__118, + batch_norm__119, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_20, + parameter_489, + parameter_488, + parameter_487, + parameter_486, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_20, parameter_486, parameter_487, parameter_488, parameter_489 + + # pd_op.swish: (-1x72x-1x-1xf32) <- (-1x72x-1x-1xf32) + swish_16 = paddle._C_ops.swish(batch_norm__114) + del batch_norm__114 + + # pd_op.conv2d: (-1x72x-1x-1xf32) <- (-1x72x-1x-1xf32, 72x72x3x3xf32) + conv2d_21 = paddle._C_ops.conv2d( + swish_16, parameter_485, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_485 + + # pd_op.batch_norm_: (-1x72x-1x-1xf32, 72xf32, 72xf32, 72xf32, 72xf32, -1xui8) <- (-1x72x-1x-1xf32, 72xf32, 72xf32, 72xf32, 72xf32) + ( + batch_norm__120, + batch_norm__121, + batch_norm__122, + batch_norm__123, + batch_norm__124, + batch_norm__125, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_21, + parameter_484, + parameter_483, + parameter_482, + parameter_481, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_21, parameter_481, parameter_482, parameter_483, parameter_484 + + # pd_op.conv2d: (-1x72x-1x-1xf32) <- (-1x72x-1x-1xf32, 72x72x1x1xf32) + conv2d_22 = paddle._C_ops.conv2d( + swish_16, parameter_480, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_480, swish_16 + + # pd_op.batch_norm_: (-1x72x-1x-1xf32, 72xf32, 72xf32, 72xf32, 72xf32, -1xui8) <- (-1x72x-1x-1xf32, 72xf32, 72xf32, 72xf32, 72xf32) + ( + batch_norm__126, + batch_norm__127, + batch_norm__128, + batch_norm__129, + batch_norm__130, + batch_norm__131, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_22, + parameter_479, + parameter_478, + parameter_477, + parameter_476, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_22, parameter_476, parameter_477, parameter_478, parameter_479 + + # pd_op.multiply: (-1x72x-1x-1xf32) <- (1xf32, -1x72x-1x-1xf32) + multiply_4 = paddle._C_ops.multiply(data_3, batch_norm__126) + del batch_norm__126, data_3 + + # pd_op.add: (-1x72x-1x-1xf32) <- (-1x72x-1x-1xf32, -1x72x-1x-1xf32) + add_7 = paddle._C_ops.add(batch_norm__120, multiply_4) + del batch_norm__120, multiply_4 + + # pd_op.swish: (-1x72x-1x-1xf32) <- (-1x72x-1x-1xf32) + swish_17 = paddle._C_ops.swish(add_7) + del add_7 + + # pd_op.add: (-1x72x-1x-1xf32) <- (-1x72x-1x-1xf32, -1x72x-1x-1xf32) + add_8 = paddle._C_ops.add(add_6, swish_17) + del add_6, swish_17 + + # pd_op.conv2d: (-1x72x-1x-1xf32) <- (-1x72x-1x-1xf32, 72x72x3x3xf32) + conv2d_23 = paddle._C_ops.conv2d( + add_8, parameter_475, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_475 + + # pd_op.batch_norm_: (-1x72x-1x-1xf32, 72xf32, 72xf32, 72xf32, 72xf32, -1xui8) <- (-1x72x-1x-1xf32, 72xf32, 72xf32, 72xf32, 72xf32) + ( + batch_norm__132, + batch_norm__133, + batch_norm__134, + batch_norm__135, + batch_norm__136, + batch_norm__137, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_23, + parameter_474, + parameter_473, + parameter_472, + parameter_471, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_23, parameter_471, parameter_472, parameter_473, parameter_474 + + # pd_op.swish: (-1x72x-1x-1xf32) <- (-1x72x-1x-1xf32) + swish_18 = paddle._C_ops.swish(batch_norm__132) + del batch_norm__132 + + # pd_op.conv2d: (-1x72x-1x-1xf32) <- (-1x72x-1x-1xf32, 72x72x3x3xf32) + conv2d_24 = paddle._C_ops.conv2d( + swish_18, parameter_470, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_470 + + # pd_op.batch_norm_: (-1x72x-1x-1xf32, 72xf32, 72xf32, 72xf32, 72xf32, -1xui8) <- (-1x72x-1x-1xf32, 72xf32, 72xf32, 72xf32, 72xf32) + ( + batch_norm__138, + batch_norm__139, + batch_norm__140, + batch_norm__141, + batch_norm__142, + batch_norm__143, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_24, + parameter_469, + parameter_468, + parameter_467, + parameter_466, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_24, parameter_466, parameter_467, parameter_468, parameter_469 + + # pd_op.conv2d: (-1x72x-1x-1xf32) <- (-1x72x-1x-1xf32, 72x72x1x1xf32) + conv2d_25 = paddle._C_ops.conv2d( + swish_18, parameter_465, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_465, swish_18 + + # pd_op.batch_norm_: (-1x72x-1x-1xf32, 72xf32, 72xf32, 72xf32, 72xf32, -1xui8) <- (-1x72x-1x-1xf32, 72xf32, 72xf32, 72xf32, 72xf32) + ( + batch_norm__144, + batch_norm__145, + batch_norm__146, + batch_norm__147, + batch_norm__148, + batch_norm__149, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_25, + parameter_464, + parameter_463, + parameter_462, + parameter_461, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_25, parameter_461, parameter_462, parameter_463, parameter_464 + + # pd_op.multiply: (-1x72x-1x-1xf32) <- (1xf32, -1x72x-1x-1xf32) + multiply_5 = paddle._C_ops.multiply(data_4, batch_norm__144) + del batch_norm__144, data_4 + + # pd_op.add: (-1x72x-1x-1xf32) <- (-1x72x-1x-1xf32, -1x72x-1x-1xf32) + add_9 = paddle._C_ops.add(batch_norm__138, multiply_5) + del batch_norm__138, multiply_5 + + # pd_op.swish: (-1x72x-1x-1xf32) <- (-1x72x-1x-1xf32) + swish_19 = paddle._C_ops.swish(add_9) + del add_9 + + # pd_op.add: (-1x72x-1x-1xf32) <- (-1x72x-1x-1xf32, -1x72x-1x-1xf32) + add_10 = paddle._C_ops.add(add_8, swish_19) + del add_8, swish_19 + + # pd_op.conv2d: (-1x72x-1x-1xf32) <- (-1x72x-1x-1xf32, 72x72x3x3xf32) + conv2d_26 = paddle._C_ops.conv2d( + add_10, parameter_460, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_460 + + # pd_op.batch_norm_: (-1x72x-1x-1xf32, 72xf32, 72xf32, 72xf32, 72xf32, -1xui8) <- (-1x72x-1x-1xf32, 72xf32, 72xf32, 72xf32, 72xf32) + ( + batch_norm__150, + batch_norm__151, + batch_norm__152, + batch_norm__153, + batch_norm__154, + batch_norm__155, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_26, + parameter_459, + parameter_458, + parameter_457, + parameter_456, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_26, parameter_456, parameter_457, parameter_458, parameter_459 + + # pd_op.swish: (-1x72x-1x-1xf32) <- (-1x72x-1x-1xf32) + swish_20 = paddle._C_ops.swish(batch_norm__150) + del batch_norm__150 + + # pd_op.conv2d: (-1x72x-1x-1xf32) <- (-1x72x-1x-1xf32, 72x72x3x3xf32) + conv2d_27 = paddle._C_ops.conv2d( + swish_20, parameter_455, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_455 + + # pd_op.batch_norm_: (-1x72x-1x-1xf32, 72xf32, 72xf32, 72xf32, 72xf32, -1xui8) <- (-1x72x-1x-1xf32, 72xf32, 72xf32, 72xf32, 72xf32) + ( + batch_norm__156, + batch_norm__157, + batch_norm__158, + batch_norm__159, + batch_norm__160, + batch_norm__161, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_27, + parameter_454, + parameter_453, + parameter_452, + parameter_451, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_27, parameter_451, parameter_452, parameter_453, parameter_454 + + # pd_op.conv2d: (-1x72x-1x-1xf32) <- (-1x72x-1x-1xf32, 72x72x1x1xf32) + conv2d_28 = paddle._C_ops.conv2d( + swish_20, parameter_450, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_450, swish_20 + + # pd_op.batch_norm_: (-1x72x-1x-1xf32, 72xf32, 72xf32, 72xf32, 72xf32, -1xui8) <- (-1x72x-1x-1xf32, 72xf32, 72xf32, 72xf32, 72xf32) + ( + batch_norm__162, + batch_norm__163, + batch_norm__164, + batch_norm__165, + batch_norm__166, + batch_norm__167, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_28, + parameter_449, + parameter_448, + parameter_447, + parameter_446, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_28, parameter_446, parameter_447, parameter_448, parameter_449 + + # pd_op.multiply: (-1x72x-1x-1xf32) <- (1xf32, -1x72x-1x-1xf32) + multiply_6 = paddle._C_ops.multiply(data_5, batch_norm__162) + del batch_norm__162, data_5 + + # pd_op.add: (-1x72x-1x-1xf32) <- (-1x72x-1x-1xf32, -1x72x-1x-1xf32) + add_11 = paddle._C_ops.add(batch_norm__156, multiply_6) + del batch_norm__156, multiply_6 + + # pd_op.swish: (-1x72x-1x-1xf32) <- (-1x72x-1x-1xf32) + swish_21 = paddle._C_ops.swish(add_11) + del add_11 + + # pd_op.add: (-1x72x-1x-1xf32) <- (-1x72x-1x-1xf32, -1x72x-1x-1xf32) + add_12 = paddle._C_ops.add(add_10, swish_21) + del add_10, swish_21 + + # builtin.combine: ([-1x72x-1x-1xf32, -1x72x-1x-1xf32]) <- (-1x72x-1x-1xf32, -1x72x-1x-1xf32) + combine_1 = [swish_12, add_12] + del add_12, swish_12 + + # pd_op.concat: (-1x144x-1x-1xf32) <- ([-1x72x-1x-1xf32, -1x72x-1x-1xf32], 1xi32) + concat_3 = paddle._C_ops.concat(combine_1, full_0) + del combine_1 + + # pd_op.mean: (-1x144x1x1xf32) <- (-1x144x-1x-1xf32, 2xi64) + mean_1 = paddle._C_ops.mean(concat_3, full_int_array_0, True) + + # pd_op.conv2d: (-1x144x1x1xf32) <- (-1x144x1x1xf32, 144x144x1x1xf32) + conv2d_29 = paddle._C_ops.conv2d( + mean_1, parameter_445, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del mean_1, parameter_445 + + # pd_op.reshape: (1x144x1x1xf32) <- (144xf32, 4xi64) + reshape_1 = paddle._C_ops.reshape(parameter_444, full_int_array_1) + del parameter_444 + + # pd_op.add: (-1x144x1x1xf32) <- (-1x144x1x1xf32, 1x144x1x1xf32) + add_13 = paddle._C_ops.add(conv2d_29, reshape_1) + del conv2d_29, reshape_1 + + # pd_op.hardsigmoid: (-1x144x1x1xf32) <- (-1x144x1x1xf32) + hardsigmoid_1 = paddle._C_ops.hardsigmoid( + add_13, float("0.166667"), float("0.5") + ) + del add_13 + + # pd_op.multiply: (-1x144x-1x-1xf32) <- (-1x144x-1x-1xf32, -1x144x1x1xf32) + multiply_7 = paddle._C_ops.multiply(concat_3, hardsigmoid_1) + del concat_3, hardsigmoid_1 + + # pd_op.conv2d: (-1x192x-1x-1xf32) <- (-1x144x-1x-1xf32, 192x144x1x1xf32) + conv2d_30 = paddle._C_ops.conv2d( + multiply_7, parameter_443, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del multiply_7, parameter_443 + + # pd_op.batch_norm_: (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__168, + batch_norm__169, + batch_norm__170, + batch_norm__171, + batch_norm__172, + batch_norm__173, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_30, + parameter_442, + parameter_441, + parameter_440, + parameter_439, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_30, parameter_439, parameter_440, parameter_441, parameter_442 + + # pd_op.swish: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32) + swish_22 = paddle._C_ops.swish(batch_norm__168) + del batch_norm__168 + + # pd_op.conv2d: (-1x288x-1x-1xf32) <- (-1x192x-1x-1xf32, 288x192x3x3xf32) + conv2d_31 = paddle._C_ops.conv2d( + swish_22, parameter_438, [2, 2], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_438 + + # pd_op.batch_norm_: (-1x288x-1x-1xf32, 288xf32, 288xf32, 288xf32, 288xf32, -1xui8) <- (-1x288x-1x-1xf32, 288xf32, 288xf32, 288xf32, 288xf32) + ( + batch_norm__174, + batch_norm__175, + batch_norm__176, + batch_norm__177, + batch_norm__178, + batch_norm__179, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_31, + parameter_437, + parameter_436, + parameter_435, + parameter_434, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_31, parameter_434, parameter_435, parameter_436, parameter_437 + + # pd_op.swish: (-1x288x-1x-1xf32) <- (-1x288x-1x-1xf32) + swish_23 = paddle._C_ops.swish(batch_norm__174) + del batch_norm__174 + + # pd_op.conv2d: (-1x144x-1x-1xf32) <- (-1x288x-1x-1xf32, 144x288x1x1xf32) + conv2d_32 = paddle._C_ops.conv2d( + swish_23, parameter_433, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_433 + + # pd_op.batch_norm_: (-1x144x-1x-1xf32, 144xf32, 144xf32, 144xf32, 144xf32, -1xui8) <- (-1x144x-1x-1xf32, 144xf32, 144xf32, 144xf32, 144xf32) + ( + batch_norm__180, + batch_norm__181, + batch_norm__182, + batch_norm__183, + batch_norm__184, + batch_norm__185, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_32, + parameter_432, + parameter_431, + parameter_430, + parameter_429, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_32, parameter_429, parameter_430, parameter_431, parameter_432 + + # pd_op.swish: (-1x144x-1x-1xf32) <- (-1x144x-1x-1xf32) + swish_24 = paddle._C_ops.swish(batch_norm__180) + del batch_norm__180 + + # pd_op.conv2d: (-1x144x-1x-1xf32) <- (-1x288x-1x-1xf32, 144x288x1x1xf32) + conv2d_33 = paddle._C_ops.conv2d( + swish_23, parameter_428, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_428, swish_23 + + # pd_op.batch_norm_: (-1x144x-1x-1xf32, 144xf32, 144xf32, 144xf32, 144xf32, -1xui8) <- (-1x144x-1x-1xf32, 144xf32, 144xf32, 144xf32, 144xf32) + ( + batch_norm__186, + batch_norm__187, + batch_norm__188, + batch_norm__189, + batch_norm__190, + batch_norm__191, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_33, + parameter_427, + parameter_426, + parameter_425, + parameter_424, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_33, parameter_424, parameter_425, parameter_426, parameter_427 + + # pd_op.swish: (-1x144x-1x-1xf32) <- (-1x144x-1x-1xf32) + swish_25 = paddle._C_ops.swish(batch_norm__186) + del batch_norm__186 + + # pd_op.conv2d: (-1x144x-1x-1xf32) <- (-1x144x-1x-1xf32, 144x144x3x3xf32) + conv2d_34 = paddle._C_ops.conv2d( + swish_25, parameter_423, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_423 + + # pd_op.batch_norm_: (-1x144x-1x-1xf32, 144xf32, 144xf32, 144xf32, 144xf32, -1xui8) <- (-1x144x-1x-1xf32, 144xf32, 144xf32, 144xf32, 144xf32) + ( + batch_norm__192, + batch_norm__193, + batch_norm__194, + batch_norm__195, + batch_norm__196, + batch_norm__197, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_34, + parameter_422, + parameter_421, + parameter_420, + parameter_419, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_34, parameter_419, parameter_420, parameter_421, parameter_422 + + # pd_op.swish: (-1x144x-1x-1xf32) <- (-1x144x-1x-1xf32) + swish_26 = paddle._C_ops.swish(batch_norm__192) + del batch_norm__192 + + # pd_op.conv2d: (-1x144x-1x-1xf32) <- (-1x144x-1x-1xf32, 144x144x3x3xf32) + conv2d_35 = paddle._C_ops.conv2d( + swish_26, parameter_418, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_418 + + # pd_op.batch_norm_: (-1x144x-1x-1xf32, 144xf32, 144xf32, 144xf32, 144xf32, -1xui8) <- (-1x144x-1x-1xf32, 144xf32, 144xf32, 144xf32, 144xf32) + ( + batch_norm__198, + batch_norm__199, + batch_norm__200, + batch_norm__201, + batch_norm__202, + batch_norm__203, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_35, + parameter_417, + parameter_416, + parameter_415, + parameter_414, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_35, parameter_414, parameter_415, parameter_416, parameter_417 + + # pd_op.conv2d: (-1x144x-1x-1xf32) <- (-1x144x-1x-1xf32, 144x144x1x1xf32) + conv2d_36 = paddle._C_ops.conv2d( + swish_26, parameter_413, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_413, swish_26 + + # pd_op.batch_norm_: (-1x144x-1x-1xf32, 144xf32, 144xf32, 144xf32, 144xf32, -1xui8) <- (-1x144x-1x-1xf32, 144xf32, 144xf32, 144xf32, 144xf32) + ( + batch_norm__204, + batch_norm__205, + batch_norm__206, + batch_norm__207, + batch_norm__208, + batch_norm__209, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_36, + parameter_412, + parameter_411, + parameter_410, + parameter_409, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_36, parameter_409, parameter_410, parameter_411, parameter_412 + + # pd_op.multiply: (-1x144x-1x-1xf32) <- (1xf32, -1x144x-1x-1xf32) + multiply_8 = paddle._C_ops.multiply(data_6, batch_norm__204) + del batch_norm__204, data_6 + + # pd_op.add: (-1x144x-1x-1xf32) <- (-1x144x-1x-1xf32, -1x144x-1x-1xf32) + add_14 = paddle._C_ops.add(batch_norm__198, multiply_8) + del batch_norm__198, multiply_8 + + # pd_op.swish: (-1x144x-1x-1xf32) <- (-1x144x-1x-1xf32) + swish_27 = paddle._C_ops.swish(add_14) + del add_14 + + # pd_op.add: (-1x144x-1x-1xf32) <- (-1x144x-1x-1xf32, -1x144x-1x-1xf32) + add_15 = paddle._C_ops.add(swish_25, swish_27) + del swish_25, swish_27 + + # pd_op.conv2d: (-1x144x-1x-1xf32) <- (-1x144x-1x-1xf32, 144x144x3x3xf32) + conv2d_37 = paddle._C_ops.conv2d( + add_15, parameter_408, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_408 + + # pd_op.batch_norm_: (-1x144x-1x-1xf32, 144xf32, 144xf32, 144xf32, 144xf32, -1xui8) <- (-1x144x-1x-1xf32, 144xf32, 144xf32, 144xf32, 144xf32) + ( + batch_norm__210, + batch_norm__211, + batch_norm__212, + batch_norm__213, + batch_norm__214, + batch_norm__215, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_37, + parameter_407, + parameter_406, + parameter_405, + parameter_404, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_37, parameter_404, parameter_405, parameter_406, parameter_407 + + # pd_op.swish: (-1x144x-1x-1xf32) <- (-1x144x-1x-1xf32) + swish_28 = paddle._C_ops.swish(batch_norm__210) + del batch_norm__210 + + # pd_op.conv2d: (-1x144x-1x-1xf32) <- (-1x144x-1x-1xf32, 144x144x3x3xf32) + conv2d_38 = paddle._C_ops.conv2d( + swish_28, parameter_403, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_403 + + # pd_op.batch_norm_: (-1x144x-1x-1xf32, 144xf32, 144xf32, 144xf32, 144xf32, -1xui8) <- (-1x144x-1x-1xf32, 144xf32, 144xf32, 144xf32, 144xf32) + ( + batch_norm__216, + batch_norm__217, + batch_norm__218, + batch_norm__219, + batch_norm__220, + batch_norm__221, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_38, + parameter_402, + parameter_401, + parameter_400, + parameter_399, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_38, parameter_399, parameter_400, parameter_401, parameter_402 + + # pd_op.conv2d: (-1x144x-1x-1xf32) <- (-1x144x-1x-1xf32, 144x144x1x1xf32) + conv2d_39 = paddle._C_ops.conv2d( + swish_28, parameter_398, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_398, swish_28 + + # pd_op.batch_norm_: (-1x144x-1x-1xf32, 144xf32, 144xf32, 144xf32, 144xf32, -1xui8) <- (-1x144x-1x-1xf32, 144xf32, 144xf32, 144xf32, 144xf32) + ( + batch_norm__222, + batch_norm__223, + batch_norm__224, + batch_norm__225, + batch_norm__226, + batch_norm__227, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_39, + parameter_397, + parameter_396, + parameter_395, + parameter_394, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_39, parameter_394, parameter_395, parameter_396, parameter_397 + + # pd_op.multiply: (-1x144x-1x-1xf32) <- (1xf32, -1x144x-1x-1xf32) + multiply_9 = paddle._C_ops.multiply(data_7, batch_norm__222) + del batch_norm__222, data_7 + + # pd_op.add: (-1x144x-1x-1xf32) <- (-1x144x-1x-1xf32, -1x144x-1x-1xf32) + add_16 = paddle._C_ops.add(batch_norm__216, multiply_9) + del batch_norm__216, multiply_9 + + # pd_op.swish: (-1x144x-1x-1xf32) <- (-1x144x-1x-1xf32) + swish_29 = paddle._C_ops.swish(add_16) + del add_16 + + # pd_op.add: (-1x144x-1x-1xf32) <- (-1x144x-1x-1xf32, -1x144x-1x-1xf32) + add_17 = paddle._C_ops.add(add_15, swish_29) + del add_15, swish_29 + + # pd_op.conv2d: (-1x144x-1x-1xf32) <- (-1x144x-1x-1xf32, 144x144x3x3xf32) + conv2d_40 = paddle._C_ops.conv2d( + add_17, parameter_393, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_393 + + # pd_op.batch_norm_: (-1x144x-1x-1xf32, 144xf32, 144xf32, 144xf32, 144xf32, -1xui8) <- (-1x144x-1x-1xf32, 144xf32, 144xf32, 144xf32, 144xf32) + ( + batch_norm__228, + batch_norm__229, + batch_norm__230, + batch_norm__231, + batch_norm__232, + batch_norm__233, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_40, + parameter_392, + parameter_391, + parameter_390, + parameter_389, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_40, parameter_389, parameter_390, parameter_391, parameter_392 + + # pd_op.swish: (-1x144x-1x-1xf32) <- (-1x144x-1x-1xf32) + swish_30 = paddle._C_ops.swish(batch_norm__228) + del batch_norm__228 + + # pd_op.conv2d: (-1x144x-1x-1xf32) <- (-1x144x-1x-1xf32, 144x144x3x3xf32) + conv2d_41 = paddle._C_ops.conv2d( + swish_30, parameter_388, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_388 + + # pd_op.batch_norm_: (-1x144x-1x-1xf32, 144xf32, 144xf32, 144xf32, 144xf32, -1xui8) <- (-1x144x-1x-1xf32, 144xf32, 144xf32, 144xf32, 144xf32) + ( + batch_norm__234, + batch_norm__235, + batch_norm__236, + batch_norm__237, + batch_norm__238, + batch_norm__239, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_41, + parameter_387, + parameter_386, + parameter_385, + parameter_384, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_41, parameter_384, parameter_385, parameter_386, parameter_387 + + # pd_op.conv2d: (-1x144x-1x-1xf32) <- (-1x144x-1x-1xf32, 144x144x1x1xf32) + conv2d_42 = paddle._C_ops.conv2d( + swish_30, parameter_383, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_383, swish_30 + + # pd_op.batch_norm_: (-1x144x-1x-1xf32, 144xf32, 144xf32, 144xf32, 144xf32, -1xui8) <- (-1x144x-1x-1xf32, 144xf32, 144xf32, 144xf32, 144xf32) + ( + batch_norm__240, + batch_norm__241, + batch_norm__242, + batch_norm__243, + batch_norm__244, + batch_norm__245, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_42, + parameter_382, + parameter_381, + parameter_380, + parameter_379, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_42, parameter_379, parameter_380, parameter_381, parameter_382 + + # pd_op.multiply: (-1x144x-1x-1xf32) <- (1xf32, -1x144x-1x-1xf32) + multiply_10 = paddle._C_ops.multiply(data_8, batch_norm__240) + del batch_norm__240, data_8 + + # pd_op.add: (-1x144x-1x-1xf32) <- (-1x144x-1x-1xf32, -1x144x-1x-1xf32) + add_18 = paddle._C_ops.add(batch_norm__234, multiply_10) + del batch_norm__234, multiply_10 + + # pd_op.swish: (-1x144x-1x-1xf32) <- (-1x144x-1x-1xf32) + swish_31 = paddle._C_ops.swish(add_18) + del add_18 + + # pd_op.add: (-1x144x-1x-1xf32) <- (-1x144x-1x-1xf32, -1x144x-1x-1xf32) + add_19 = paddle._C_ops.add(add_17, swish_31) + del add_17, swish_31 + + # pd_op.conv2d: (-1x144x-1x-1xf32) <- (-1x144x-1x-1xf32, 144x144x3x3xf32) + conv2d_43 = paddle._C_ops.conv2d( + add_19, parameter_378, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_378 + + # pd_op.batch_norm_: (-1x144x-1x-1xf32, 144xf32, 144xf32, 144xf32, 144xf32, -1xui8) <- (-1x144x-1x-1xf32, 144xf32, 144xf32, 144xf32, 144xf32) + ( + batch_norm__246, + batch_norm__247, + batch_norm__248, + batch_norm__249, + batch_norm__250, + batch_norm__251, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_43, + parameter_377, + parameter_376, + parameter_375, + parameter_374, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_43, parameter_374, parameter_375, parameter_376, parameter_377 + + # pd_op.swish: (-1x144x-1x-1xf32) <- (-1x144x-1x-1xf32) + swish_32 = paddle._C_ops.swish(batch_norm__246) + del batch_norm__246 + + # pd_op.conv2d: (-1x144x-1x-1xf32) <- (-1x144x-1x-1xf32, 144x144x3x3xf32) + conv2d_44 = paddle._C_ops.conv2d( + swish_32, parameter_373, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_373 + + # pd_op.batch_norm_: (-1x144x-1x-1xf32, 144xf32, 144xf32, 144xf32, 144xf32, -1xui8) <- (-1x144x-1x-1xf32, 144xf32, 144xf32, 144xf32, 144xf32) + ( + batch_norm__252, + batch_norm__253, + batch_norm__254, + batch_norm__255, + batch_norm__256, + batch_norm__257, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_44, + parameter_372, + parameter_371, + parameter_370, + parameter_369, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_44, parameter_369, parameter_370, parameter_371, parameter_372 + + # pd_op.conv2d: (-1x144x-1x-1xf32) <- (-1x144x-1x-1xf32, 144x144x1x1xf32) + conv2d_45 = paddle._C_ops.conv2d( + swish_32, parameter_368, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_368, swish_32 + + # pd_op.batch_norm_: (-1x144x-1x-1xf32, 144xf32, 144xf32, 144xf32, 144xf32, -1xui8) <- (-1x144x-1x-1xf32, 144xf32, 144xf32, 144xf32, 144xf32) + ( + batch_norm__258, + batch_norm__259, + batch_norm__260, + batch_norm__261, + batch_norm__262, + batch_norm__263, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_45, + parameter_367, + parameter_366, + parameter_365, + parameter_364, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_45, parameter_364, parameter_365, parameter_366, parameter_367 + + # pd_op.multiply: (-1x144x-1x-1xf32) <- (1xf32, -1x144x-1x-1xf32) + multiply_11 = paddle._C_ops.multiply(data_9, batch_norm__258) + del batch_norm__258, data_9 + + # pd_op.add: (-1x144x-1x-1xf32) <- (-1x144x-1x-1xf32, -1x144x-1x-1xf32) + add_20 = paddle._C_ops.add(batch_norm__252, multiply_11) + del batch_norm__252, multiply_11 + + # pd_op.swish: (-1x144x-1x-1xf32) <- (-1x144x-1x-1xf32) + swish_33 = paddle._C_ops.swish(add_20) + del add_20 + + # pd_op.add: (-1x144x-1x-1xf32) <- (-1x144x-1x-1xf32, -1x144x-1x-1xf32) + add_21 = paddle._C_ops.add(add_19, swish_33) + del add_19, swish_33 + + # builtin.combine: ([-1x144x-1x-1xf32, -1x144x-1x-1xf32]) <- (-1x144x-1x-1xf32, -1x144x-1x-1xf32) + combine_2 = [swish_24, add_21] + del add_21, swish_24 + + # pd_op.concat: (-1x288x-1x-1xf32) <- ([-1x144x-1x-1xf32, -1x144x-1x-1xf32], 1xi32) + concat_4 = paddle._C_ops.concat(combine_2, full_0) + del combine_2 + + # pd_op.mean: (-1x288x1x1xf32) <- (-1x288x-1x-1xf32, 2xi64) + mean_2 = paddle._C_ops.mean(concat_4, full_int_array_0, True) + + # pd_op.conv2d: (-1x288x1x1xf32) <- (-1x288x1x1xf32, 288x288x1x1xf32) + conv2d_46 = paddle._C_ops.conv2d( + mean_2, parameter_363, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del mean_2, parameter_363 + + # pd_op.reshape: (1x288x1x1xf32) <- (288xf32, 4xi64) + reshape_2 = paddle._C_ops.reshape(parameter_362, full_int_array_1) + del parameter_362 + + # pd_op.add: (-1x288x1x1xf32) <- (-1x288x1x1xf32, 1x288x1x1xf32) + add_22 = paddle._C_ops.add(conv2d_46, reshape_2) + del conv2d_46, reshape_2 + + # pd_op.hardsigmoid: (-1x288x1x1xf32) <- (-1x288x1x1xf32) + hardsigmoid_2 = paddle._C_ops.hardsigmoid( + add_22, float("0.166667"), float("0.5") + ) + del add_22 + + # pd_op.multiply: (-1x288x-1x-1xf32) <- (-1x288x-1x-1xf32, -1x288x1x1xf32) + multiply_12 = paddle._C_ops.multiply(concat_4, hardsigmoid_2) + del concat_4, hardsigmoid_2 + + # pd_op.conv2d: (-1x384x-1x-1xf32) <- (-1x288x-1x-1xf32, 384x288x1x1xf32) + conv2d_47 = paddle._C_ops.conv2d( + multiply_12, parameter_361, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del multiply_12, parameter_361 + + # pd_op.batch_norm_: (-1x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (-1x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__264, + batch_norm__265, + batch_norm__266, + batch_norm__267, + batch_norm__268, + batch_norm__269, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_47, + parameter_360, + parameter_359, + parameter_358, + parameter_357, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_47, parameter_357, parameter_358, parameter_359, parameter_360 + + # pd_op.swish: (-1x384x-1x-1xf32) <- (-1x384x-1x-1xf32) + swish_34 = paddle._C_ops.swish(batch_norm__264) + del batch_norm__264 + + # pd_op.conv2d: (-1x576x-1x-1xf32) <- (-1x384x-1x-1xf32, 576x384x3x3xf32) + conv2d_48 = paddle._C_ops.conv2d( + swish_34, parameter_356, [2, 2], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_356 + + # pd_op.batch_norm_: (-1x576x-1x-1xf32, 576xf32, 576xf32, 576xf32, 576xf32, -1xui8) <- (-1x576x-1x-1xf32, 576xf32, 576xf32, 576xf32, 576xf32) + ( + batch_norm__270, + batch_norm__271, + batch_norm__272, + batch_norm__273, + batch_norm__274, + batch_norm__275, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_48, + parameter_355, + parameter_354, + parameter_353, + parameter_352, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_48, parameter_352, parameter_353, parameter_354, parameter_355 + + # pd_op.swish: (-1x576x-1x-1xf32) <- (-1x576x-1x-1xf32) + swish_35 = paddle._C_ops.swish(batch_norm__270) + del batch_norm__270 + + # pd_op.conv2d: (-1x288x-1x-1xf32) <- (-1x576x-1x-1xf32, 288x576x1x1xf32) + conv2d_49 = paddle._C_ops.conv2d( + swish_35, parameter_351, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_351 + + # pd_op.batch_norm_: (-1x288x-1x-1xf32, 288xf32, 288xf32, 288xf32, 288xf32, -1xui8) <- (-1x288x-1x-1xf32, 288xf32, 288xf32, 288xf32, 288xf32) + ( + batch_norm__276, + batch_norm__277, + batch_norm__278, + batch_norm__279, + batch_norm__280, + batch_norm__281, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_49, + parameter_350, + parameter_349, + parameter_348, + parameter_347, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_49, parameter_347, parameter_348, parameter_349, parameter_350 + + # pd_op.swish: (-1x288x-1x-1xf32) <- (-1x288x-1x-1xf32) + swish_36 = paddle._C_ops.swish(batch_norm__276) + del batch_norm__276 + + # pd_op.conv2d: (-1x288x-1x-1xf32) <- (-1x576x-1x-1xf32, 288x576x1x1xf32) + conv2d_50 = paddle._C_ops.conv2d( + swish_35, parameter_346, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_346, swish_35 + + # pd_op.batch_norm_: (-1x288x-1x-1xf32, 288xf32, 288xf32, 288xf32, 288xf32, -1xui8) <- (-1x288x-1x-1xf32, 288xf32, 288xf32, 288xf32, 288xf32) + ( + batch_norm__282, + batch_norm__283, + batch_norm__284, + batch_norm__285, + batch_norm__286, + batch_norm__287, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_50, + parameter_345, + parameter_344, + parameter_343, + parameter_342, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_50, parameter_342, parameter_343, parameter_344, parameter_345 + + # pd_op.swish: (-1x288x-1x-1xf32) <- (-1x288x-1x-1xf32) + swish_37 = paddle._C_ops.swish(batch_norm__282) + del batch_norm__282 + + # pd_op.conv2d: (-1x288x-1x-1xf32) <- (-1x288x-1x-1xf32, 288x288x3x3xf32) + conv2d_51 = paddle._C_ops.conv2d( + swish_37, parameter_341, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_341 + + # pd_op.batch_norm_: (-1x288x-1x-1xf32, 288xf32, 288xf32, 288xf32, 288xf32, -1xui8) <- (-1x288x-1x-1xf32, 288xf32, 288xf32, 288xf32, 288xf32) + ( + batch_norm__288, + batch_norm__289, + batch_norm__290, + batch_norm__291, + batch_norm__292, + batch_norm__293, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_51, + parameter_340, + parameter_339, + parameter_338, + parameter_337, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_51, parameter_337, parameter_338, parameter_339, parameter_340 + + # pd_op.swish: (-1x288x-1x-1xf32) <- (-1x288x-1x-1xf32) + swish_38 = paddle._C_ops.swish(batch_norm__288) + del batch_norm__288 + + # pd_op.conv2d: (-1x288x-1x-1xf32) <- (-1x288x-1x-1xf32, 288x288x3x3xf32) + conv2d_52 = paddle._C_ops.conv2d( + swish_38, parameter_336, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_336 + + # pd_op.batch_norm_: (-1x288x-1x-1xf32, 288xf32, 288xf32, 288xf32, 288xf32, -1xui8) <- (-1x288x-1x-1xf32, 288xf32, 288xf32, 288xf32, 288xf32) + ( + batch_norm__294, + batch_norm__295, + batch_norm__296, + batch_norm__297, + batch_norm__298, + batch_norm__299, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_52, + parameter_335, + parameter_334, + parameter_333, + parameter_332, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_52, parameter_332, parameter_333, parameter_334, parameter_335 + + # pd_op.conv2d: (-1x288x-1x-1xf32) <- (-1x288x-1x-1xf32, 288x288x1x1xf32) + conv2d_53 = paddle._C_ops.conv2d( + swish_38, parameter_331, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_331, swish_38 + + # pd_op.batch_norm_: (-1x288x-1x-1xf32, 288xf32, 288xf32, 288xf32, 288xf32, -1xui8) <- (-1x288x-1x-1xf32, 288xf32, 288xf32, 288xf32, 288xf32) + ( + batch_norm__300, + batch_norm__301, + batch_norm__302, + batch_norm__303, + batch_norm__304, + batch_norm__305, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_53, + parameter_330, + parameter_329, + parameter_328, + parameter_327, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_53, parameter_327, parameter_328, parameter_329, parameter_330 + + # pd_op.multiply: (-1x288x-1x-1xf32) <- (1xf32, -1x288x-1x-1xf32) + multiply_13 = paddle._C_ops.multiply(data_10, batch_norm__300) + del batch_norm__300, data_10 + + # pd_op.add: (-1x288x-1x-1xf32) <- (-1x288x-1x-1xf32, -1x288x-1x-1xf32) + add_23 = paddle._C_ops.add(batch_norm__294, multiply_13) + del batch_norm__294, multiply_13 + + # pd_op.swish: (-1x288x-1x-1xf32) <- (-1x288x-1x-1xf32) + swish_39 = paddle._C_ops.swish(add_23) + del add_23 + + # pd_op.add: (-1x288x-1x-1xf32) <- (-1x288x-1x-1xf32, -1x288x-1x-1xf32) + add_24 = paddle._C_ops.add(swish_37, swish_39) + del swish_37, swish_39 + + # pd_op.conv2d: (-1x288x-1x-1xf32) <- (-1x288x-1x-1xf32, 288x288x3x3xf32) + conv2d_54 = paddle._C_ops.conv2d( + add_24, parameter_326, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_326 + + # pd_op.batch_norm_: (-1x288x-1x-1xf32, 288xf32, 288xf32, 288xf32, 288xf32, -1xui8) <- (-1x288x-1x-1xf32, 288xf32, 288xf32, 288xf32, 288xf32) + ( + batch_norm__306, + batch_norm__307, + batch_norm__308, + batch_norm__309, + batch_norm__310, + batch_norm__311, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_54, + parameter_325, + parameter_324, + parameter_323, + parameter_322, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_54, parameter_322, parameter_323, parameter_324, parameter_325 + + # pd_op.swish: (-1x288x-1x-1xf32) <- (-1x288x-1x-1xf32) + swish_40 = paddle._C_ops.swish(batch_norm__306) + del batch_norm__306 + + # pd_op.conv2d: (-1x288x-1x-1xf32) <- (-1x288x-1x-1xf32, 288x288x3x3xf32) + conv2d_55 = paddle._C_ops.conv2d( + swish_40, parameter_321, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_321 + + # pd_op.batch_norm_: (-1x288x-1x-1xf32, 288xf32, 288xf32, 288xf32, 288xf32, -1xui8) <- (-1x288x-1x-1xf32, 288xf32, 288xf32, 288xf32, 288xf32) + ( + batch_norm__312, + batch_norm__313, + batch_norm__314, + batch_norm__315, + batch_norm__316, + batch_norm__317, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_55, + parameter_320, + parameter_319, + parameter_318, + parameter_317, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_55, parameter_317, parameter_318, parameter_319, parameter_320 + + # pd_op.conv2d: (-1x288x-1x-1xf32) <- (-1x288x-1x-1xf32, 288x288x1x1xf32) + conv2d_56 = paddle._C_ops.conv2d( + swish_40, parameter_316, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_316, swish_40 + + # pd_op.batch_norm_: (-1x288x-1x-1xf32, 288xf32, 288xf32, 288xf32, 288xf32, -1xui8) <- (-1x288x-1x-1xf32, 288xf32, 288xf32, 288xf32, 288xf32) + ( + batch_norm__318, + batch_norm__319, + batch_norm__320, + batch_norm__321, + batch_norm__322, + batch_norm__323, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_56, + parameter_315, + parameter_314, + parameter_313, + parameter_312, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_56, parameter_312, parameter_313, parameter_314, parameter_315 + + # pd_op.multiply: (-1x288x-1x-1xf32) <- (1xf32, -1x288x-1x-1xf32) + multiply_14 = paddle._C_ops.multiply(data_11, batch_norm__318) + del batch_norm__318, data_11 + + # pd_op.add: (-1x288x-1x-1xf32) <- (-1x288x-1x-1xf32, -1x288x-1x-1xf32) + add_25 = paddle._C_ops.add(batch_norm__312, multiply_14) + del batch_norm__312, multiply_14 + + # pd_op.swish: (-1x288x-1x-1xf32) <- (-1x288x-1x-1xf32) + swish_41 = paddle._C_ops.swish(add_25) + del add_25 + + # pd_op.add: (-1x288x-1x-1xf32) <- (-1x288x-1x-1xf32, -1x288x-1x-1xf32) + add_26 = paddle._C_ops.add(add_24, swish_41) + del add_24, swish_41 + + # builtin.combine: ([-1x288x-1x-1xf32, -1x288x-1x-1xf32]) <- (-1x288x-1x-1xf32, -1x288x-1x-1xf32) + combine_3 = [swish_36, add_26] + del add_26, swish_36 + + # pd_op.concat: (-1x576x-1x-1xf32) <- ([-1x288x-1x-1xf32, -1x288x-1x-1xf32], 1xi32) + concat_5 = paddle._C_ops.concat(combine_3, full_0) + del combine_3 + + # pd_op.mean: (-1x576x1x1xf32) <- (-1x576x-1x-1xf32, 2xi64) + mean_3 = paddle._C_ops.mean(concat_5, full_int_array_0, True) + del full_int_array_0 + + # pd_op.conv2d: (-1x576x1x1xf32) <- (-1x576x1x1xf32, 576x576x1x1xf32) + conv2d_57 = paddle._C_ops.conv2d( + mean_3, parameter_311, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del mean_3, parameter_311 + + # pd_op.reshape: (1x576x1x1xf32) <- (576xf32, 4xi64) + reshape_3 = paddle._C_ops.reshape(parameter_310, full_int_array_1) + del parameter_310 + + # pd_op.add: (-1x576x1x1xf32) <- (-1x576x1x1xf32, 1x576x1x1xf32) + add_27 = paddle._C_ops.add(conv2d_57, reshape_3) + del conv2d_57, reshape_3 + + # pd_op.hardsigmoid: (-1x576x1x1xf32) <- (-1x576x1x1xf32) + hardsigmoid_3 = paddle._C_ops.hardsigmoid( + add_27, float("0.166667"), float("0.5") + ) + del add_27 + + # pd_op.multiply: (-1x576x-1x-1xf32) <- (-1x576x-1x-1xf32, -1x576x1x1xf32) + multiply_15 = paddle._C_ops.multiply(concat_5, hardsigmoid_3) + del concat_5, hardsigmoid_3 + + # pd_op.conv2d: (-1x768x-1x-1xf32) <- (-1x576x-1x-1xf32, 768x576x1x1xf32) + conv2d_58 = paddle._C_ops.conv2d( + multiply_15, parameter_309, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del multiply_15, parameter_309 + + # pd_op.batch_norm_: (-1x768x-1x-1xf32, 768xf32, 768xf32, 768xf32, 768xf32, -1xui8) <- (-1x768x-1x-1xf32, 768xf32, 768xf32, 768xf32, 768xf32) + ( + batch_norm__324, + batch_norm__325, + batch_norm__326, + batch_norm__327, + batch_norm__328, + batch_norm__329, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_58, + parameter_308, + parameter_307, + parameter_306, + parameter_305, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_58, parameter_305, parameter_306, parameter_307, parameter_308 + + # pd_op.swish: (-1x768x-1x-1xf32) <- (-1x768x-1x-1xf32) + swish_42 = paddle._C_ops.swish(batch_norm__324) + del batch_norm__324 + + # pd_op.conv2d: (-1x288x-1x-1xf32) <- (-1x768x-1x-1xf32, 288x768x1x1xf32) + conv2d_59 = paddle._C_ops.conv2d( + swish_42, parameter_304, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_304 + + # pd_op.batch_norm_: (-1x288x-1x-1xf32, 288xf32, 288xf32, 288xf32, 288xf32, -1xui8) <- (-1x288x-1x-1xf32, 288xf32, 288xf32, 288xf32, 288xf32) + ( + batch_norm__330, + batch_norm__331, + batch_norm__332, + batch_norm__333, + batch_norm__334, + batch_norm__335, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_59, + parameter_303, + parameter_302, + parameter_301, + parameter_300, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_59, parameter_300, parameter_301, parameter_302, parameter_303 + + # pd_op.swish: (-1x288x-1x-1xf32) <- (-1x288x-1x-1xf32) + swish_43 = paddle._C_ops.swish(batch_norm__330) + del batch_norm__330 + + # pd_op.conv2d: (-1x288x-1x-1xf32) <- (-1x768x-1x-1xf32, 288x768x1x1xf32) + conv2d_60 = paddle._C_ops.conv2d( + swish_42, parameter_299, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_299, swish_42 + + # pd_op.batch_norm_: (-1x288x-1x-1xf32, 288xf32, 288xf32, 288xf32, 288xf32, -1xui8) <- (-1x288x-1x-1xf32, 288xf32, 288xf32, 288xf32, 288xf32) + ( + batch_norm__336, + batch_norm__337, + batch_norm__338, + batch_norm__339, + batch_norm__340, + batch_norm__341, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_60, + parameter_298, + parameter_297, + parameter_296, + parameter_295, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_60, parameter_295, parameter_296, parameter_297, parameter_298 + + # pd_op.swish: (-1x288x-1x-1xf32) <- (-1x288x-1x-1xf32) + swish_44 = paddle._C_ops.swish(batch_norm__336) + del batch_norm__336 + + # pd_op.conv2d: (-1x288x-1x-1xf32) <- (-1x288x-1x-1xf32, 288x288x3x3xf32) + conv2d_61 = paddle._C_ops.conv2d( + swish_44, parameter_294, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_294, swish_44 + + # pd_op.batch_norm_: (-1x288x-1x-1xf32, 288xf32, 288xf32, 288xf32, 288xf32, -1xui8) <- (-1x288x-1x-1xf32, 288xf32, 288xf32, 288xf32, 288xf32) + ( + batch_norm__342, + batch_norm__343, + batch_norm__344, + batch_norm__345, + batch_norm__346, + batch_norm__347, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_61, + parameter_293, + parameter_292, + parameter_291, + parameter_290, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_61, parameter_290, parameter_291, parameter_292, parameter_293 + + # pd_op.swish: (-1x288x-1x-1xf32) <- (-1x288x-1x-1xf32) + swish_45 = paddle._C_ops.swish(batch_norm__342) + del batch_norm__342 + + # pd_op.conv2d: (-1x288x-1x-1xf32) <- (-1x288x-1x-1xf32, 288x288x3x3xf32) + conv2d_62 = paddle._C_ops.conv2d( + swish_45, parameter_289, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_289 + + # pd_op.batch_norm_: (-1x288x-1x-1xf32, 288xf32, 288xf32, 288xf32, 288xf32, -1xui8) <- (-1x288x-1x-1xf32, 288xf32, 288xf32, 288xf32, 288xf32) + ( + batch_norm__348, + batch_norm__349, + batch_norm__350, + batch_norm__351, + batch_norm__352, + batch_norm__353, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_62, + parameter_288, + parameter_287, + parameter_286, + parameter_285, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_62, parameter_285, parameter_286, parameter_287, parameter_288 + + # pd_op.conv2d: (-1x288x-1x-1xf32) <- (-1x288x-1x-1xf32, 288x288x1x1xf32) + conv2d_63 = paddle._C_ops.conv2d( + swish_45, parameter_284, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_284, swish_45 + + # pd_op.batch_norm_: (-1x288x-1x-1xf32, 288xf32, 288xf32, 288xf32, 288xf32, -1xui8) <- (-1x288x-1x-1xf32, 288xf32, 288xf32, 288xf32, 288xf32) + ( + batch_norm__354, + batch_norm__355, + batch_norm__356, + batch_norm__357, + batch_norm__358, + batch_norm__359, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_63, + parameter_283, + parameter_282, + parameter_281, + parameter_280, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_63, parameter_280, parameter_281, parameter_282, parameter_283 + + # pd_op.add: (-1x288x-1x-1xf32) <- (-1x288x-1x-1xf32, -1x288x-1x-1xf32) + add_28 = paddle._C_ops.add(batch_norm__348, batch_norm__354) + del batch_norm__348, batch_norm__354 + + # pd_op.swish: (-1x288x-1x-1xf32) <- (-1x288x-1x-1xf32) + swish_46 = paddle._C_ops.swish(add_28) + del add_28 + + # pd_op.full_int_array: (2xi64) <- () + full_int_array_2 = [5, 5] + + # pd_op.pool2d: (-1x288x-1x-1xf32) <- (-1x288x-1x-1xf32, 2xi64) + pool2d_0 = paddle._C_ops.pool2d( + swish_46, + full_int_array_2, + [1, 1], + [2, 2], + False, + True, + "NCHW", + "max", + False, + False, + "EXPLICIT", + ) + del full_int_array_2 + + # pd_op.full_int_array: (2xi64) <- () + full_int_array_3 = [9, 9] + + # pd_op.pool2d: (-1x288x-1x-1xf32) <- (-1x288x-1x-1xf32, 2xi64) + pool2d_1 = paddle._C_ops.pool2d( + swish_46, + full_int_array_3, + [1, 1], + [4, 4], + False, + True, + "NCHW", + "max", + False, + False, + "EXPLICIT", + ) + del full_int_array_3 + + # pd_op.full_int_array: (2xi64) <- () + full_int_array_4 = [13, 13] + + # pd_op.pool2d: (-1x288x-1x-1xf32) <- (-1x288x-1x-1xf32, 2xi64) + pool2d_2 = paddle._C_ops.pool2d( + swish_46, + full_int_array_4, + [1, 1], + [6, 6], + False, + True, + "NCHW", + "max", + False, + False, + "EXPLICIT", + ) + del full_int_array_4 + + # builtin.combine: ([-1x288x-1x-1xf32, -1x288x-1x-1xf32, -1x288x-1x-1xf32, -1x288x-1x-1xf32]) <- (-1x288x-1x-1xf32, -1x288x-1x-1xf32, -1x288x-1x-1xf32, -1x288x-1x-1xf32) + combine_4 = [swish_46, pool2d_0, pool2d_1, pool2d_2] + del pool2d_0, pool2d_1, pool2d_2, swish_46 + + # pd_op.concat: (-1x1152x-1x-1xf32) <- ([-1x288x-1x-1xf32, -1x288x-1x-1xf32, -1x288x-1x-1xf32, -1x288x-1x-1xf32], 1xi32) + concat_6 = paddle._C_ops.concat(combine_4, full_0) + del combine_4 + + # pd_op.conv2d: (-1x288x-1x-1xf32) <- (-1x1152x-1x-1xf32, 288x1152x1x1xf32) + conv2d_64 = paddle._C_ops.conv2d( + concat_6, parameter_279, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del concat_6, parameter_279 + + # pd_op.batch_norm_: (-1x288x-1x-1xf32, 288xf32, 288xf32, 288xf32, 288xf32, -1xui8) <- (-1x288x-1x-1xf32, 288xf32, 288xf32, 288xf32, 288xf32) + ( + batch_norm__360, + batch_norm__361, + batch_norm__362, + batch_norm__363, + batch_norm__364, + batch_norm__365, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_64, + parameter_278, + parameter_277, + parameter_276, + parameter_275, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_64, parameter_275, parameter_276, parameter_277, parameter_278 + + # pd_op.swish: (-1x288x-1x-1xf32) <- (-1x288x-1x-1xf32) + swish_47 = paddle._C_ops.swish(batch_norm__360) + del batch_norm__360 + + # pd_op.conv2d: (-1x288x-1x-1xf32) <- (-1x288x-1x-1xf32, 288x288x3x3xf32) + conv2d_65 = paddle._C_ops.conv2d( + swish_47, parameter_274, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_274, swish_47 + + # pd_op.batch_norm_: (-1x288x-1x-1xf32, 288xf32, 288xf32, 288xf32, 288xf32, -1xui8) <- (-1x288x-1x-1xf32, 288xf32, 288xf32, 288xf32, 288xf32) + ( + batch_norm__366, + batch_norm__367, + batch_norm__368, + batch_norm__369, + batch_norm__370, + batch_norm__371, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_65, + parameter_273, + parameter_272, + parameter_271, + parameter_270, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_65, parameter_270, parameter_271, parameter_272, parameter_273 + + # pd_op.swish: (-1x288x-1x-1xf32) <- (-1x288x-1x-1xf32) + swish_48 = paddle._C_ops.swish(batch_norm__366) + del batch_norm__366 + + # pd_op.conv2d: (-1x288x-1x-1xf32) <- (-1x288x-1x-1xf32, 288x288x3x3xf32) + conv2d_66 = paddle._C_ops.conv2d( + swish_48, parameter_269, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_269 + + # pd_op.batch_norm_: (-1x288x-1x-1xf32, 288xf32, 288xf32, 288xf32, 288xf32, -1xui8) <- (-1x288x-1x-1xf32, 288xf32, 288xf32, 288xf32, 288xf32) + ( + batch_norm__372, + batch_norm__373, + batch_norm__374, + batch_norm__375, + batch_norm__376, + batch_norm__377, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_66, + parameter_268, + parameter_267, + parameter_266, + parameter_265, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_66, parameter_265, parameter_266, parameter_267, parameter_268 + + # pd_op.conv2d: (-1x288x-1x-1xf32) <- (-1x288x-1x-1xf32, 288x288x1x1xf32) + conv2d_67 = paddle._C_ops.conv2d( + swish_48, parameter_264, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_264, swish_48 + + # pd_op.batch_norm_: (-1x288x-1x-1xf32, 288xf32, 288xf32, 288xf32, 288xf32, -1xui8) <- (-1x288x-1x-1xf32, 288xf32, 288xf32, 288xf32, 288xf32) + ( + batch_norm__378, + batch_norm__379, + batch_norm__380, + batch_norm__381, + batch_norm__382, + batch_norm__383, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_67, + parameter_263, + parameter_262, + parameter_261, + parameter_260, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_67, parameter_260, parameter_261, parameter_262, parameter_263 + + # pd_op.add: (-1x288x-1x-1xf32) <- (-1x288x-1x-1xf32, -1x288x-1x-1xf32) + add_29 = paddle._C_ops.add(batch_norm__372, batch_norm__378) + del batch_norm__372, batch_norm__378 + + # pd_op.swish: (-1x288x-1x-1xf32) <- (-1x288x-1x-1xf32) + swish_49 = paddle._C_ops.swish(add_29) + del add_29 + + # builtin.combine: ([-1x288x-1x-1xf32, -1x288x-1x-1xf32]) <- (-1x288x-1x-1xf32, -1x288x-1x-1xf32) + combine_5 = [swish_43, swish_49] + del swish_43, swish_49 + + # pd_op.concat: (-1x576x-1x-1xf32) <- ([-1x288x-1x-1xf32, -1x288x-1x-1xf32], 1xi32) + concat_7 = paddle._C_ops.concat(combine_5, full_0) + del combine_5 + + # pd_op.conv2d: (-1x576x-1x-1xf32) <- (-1x576x-1x-1xf32, 576x576x1x1xf32) + conv2d_68 = paddle._C_ops.conv2d( + concat_7, parameter_259, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del concat_7, parameter_259 + + # pd_op.batch_norm_: (-1x576x-1x-1xf32, 576xf32, 576xf32, 576xf32, 576xf32, -1xui8) <- (-1x576x-1x-1xf32, 576xf32, 576xf32, 576xf32, 576xf32) + ( + batch_norm__384, + batch_norm__385, + batch_norm__386, + batch_norm__387, + batch_norm__388, + batch_norm__389, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_68, + parameter_258, + parameter_257, + parameter_256, + parameter_255, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_68, parameter_255, parameter_256, parameter_257, parameter_258 + + # pd_op.swish: (-1x576x-1x-1xf32) <- (-1x576x-1x-1xf32) + swish_50 = paddle._C_ops.swish(batch_norm__384) + del batch_norm__384 + + # pd_op.conv2d: (-1x288x-1x-1xf32) <- (-1x576x-1x-1xf32, 288x576x1x1xf32) + conv2d_69 = paddle._C_ops.conv2d( + swish_50, parameter_254, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_254 + + # pd_op.batch_norm_: (-1x288x-1x-1xf32, 288xf32, 288xf32, 288xf32, 288xf32, -1xui8) <- (-1x288x-1x-1xf32, 288xf32, 288xf32, 288xf32, 288xf32) + ( + batch_norm__390, + batch_norm__391, + batch_norm__392, + batch_norm__393, + batch_norm__394, + batch_norm__395, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_69, + parameter_253, + parameter_252, + parameter_251, + parameter_250, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_69, parameter_250, parameter_251, parameter_252, parameter_253 + + # pd_op.swish: (-1x288x-1x-1xf32) <- (-1x288x-1x-1xf32) + swish_51 = paddle._C_ops.swish(batch_norm__390) + del batch_norm__390 + + # pd_op.nearest_interp: (-1x288x-1x-1xf32) <- (-1x288x-1x-1xf32, None, None, None) + nearest_interp_0 = paddle._C_ops.nearest_interp( + swish_51, + None, + None, + None, + "NCHW", + -1, + -1, + -1, + [float("2"), float("2")], + "nearest", + False, + 0, + ) + del swish_51 + + # builtin.combine: ([-1x288x-1x-1xf32, -1x384x-1x-1xf32]) <- (-1x288x-1x-1xf32, -1x384x-1x-1xf32) + combine_6 = [nearest_interp_0, swish_34] + del nearest_interp_0, swish_34 + + # pd_op.concat: (-1x672x-1x-1xf32) <- ([-1x288x-1x-1xf32, -1x384x-1x-1xf32], 1xi32) + concat_8 = paddle._C_ops.concat(combine_6, full_0) + del combine_6 + + # pd_op.conv2d: (-1x144x-1x-1xf32) <- (-1x672x-1x-1xf32, 144x672x1x1xf32) + conv2d_70 = paddle._C_ops.conv2d( + concat_8, parameter_249, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_249 + + # pd_op.batch_norm_: (-1x144x-1x-1xf32, 144xf32, 144xf32, 144xf32, 144xf32, -1xui8) <- (-1x144x-1x-1xf32, 144xf32, 144xf32, 144xf32, 144xf32) + ( + batch_norm__396, + batch_norm__397, + batch_norm__398, + batch_norm__399, + batch_norm__400, + batch_norm__401, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_70, + parameter_248, + parameter_247, + parameter_246, + parameter_245, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_70, parameter_245, parameter_246, parameter_247, parameter_248 + + # pd_op.swish: (-1x144x-1x-1xf32) <- (-1x144x-1x-1xf32) + swish_52 = paddle._C_ops.swish(batch_norm__396) + del batch_norm__396 + + # pd_op.conv2d: (-1x144x-1x-1xf32) <- (-1x672x-1x-1xf32, 144x672x1x1xf32) + conv2d_71 = paddle._C_ops.conv2d( + concat_8, parameter_244, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del concat_8, parameter_244 + + # pd_op.batch_norm_: (-1x144x-1x-1xf32, 144xf32, 144xf32, 144xf32, 144xf32, -1xui8) <- (-1x144x-1x-1xf32, 144xf32, 144xf32, 144xf32, 144xf32) + ( + batch_norm__402, + batch_norm__403, + batch_norm__404, + batch_norm__405, + batch_norm__406, + batch_norm__407, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_71, + parameter_243, + parameter_242, + parameter_241, + parameter_240, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_71, parameter_240, parameter_241, parameter_242, parameter_243 + + # pd_op.swish: (-1x144x-1x-1xf32) <- (-1x144x-1x-1xf32) + swish_53 = paddle._C_ops.swish(batch_norm__402) + del batch_norm__402 + + # pd_op.conv2d: (-1x144x-1x-1xf32) <- (-1x144x-1x-1xf32, 144x144x3x3xf32) + conv2d_72 = paddle._C_ops.conv2d( + swish_53, parameter_239, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_239, swish_53 + + # pd_op.batch_norm_: (-1x144x-1x-1xf32, 144xf32, 144xf32, 144xf32, 144xf32, -1xui8) <- (-1x144x-1x-1xf32, 144xf32, 144xf32, 144xf32, 144xf32) + ( + batch_norm__408, + batch_norm__409, + batch_norm__410, + batch_norm__411, + batch_norm__412, + batch_norm__413, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_72, + parameter_238, + parameter_237, + parameter_236, + parameter_235, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_72, parameter_235, parameter_236, parameter_237, parameter_238 + + # pd_op.swish: (-1x144x-1x-1xf32) <- (-1x144x-1x-1xf32) + swish_54 = paddle._C_ops.swish(batch_norm__408) + del batch_norm__408 + + # pd_op.conv2d: (-1x144x-1x-1xf32) <- (-1x144x-1x-1xf32, 144x144x3x3xf32) + conv2d_73 = paddle._C_ops.conv2d( + swish_54, parameter_234, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" ) + del parameter_234 - # pd_op.full: (1xi64) <- () - full_1 = paddle._C_ops.full( - [1], float("1"), paddle.int64, paddle.core.CPUPlace() + # pd_op.batch_norm_: (-1x144x-1x-1xf32, 144xf32, 144xf32, 144xf32, 144xf32, -1xui8) <- (-1x144x-1x-1xf32, 144xf32, 144xf32, 144xf32, 144xf32) + ( + batch_norm__414, + batch_norm__415, + batch_norm__416, + batch_norm__417, + batch_norm__418, + batch_norm__419, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_73, + parameter_233, + parameter_232, + parameter_231, + parameter_230, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_73, parameter_230, parameter_231, parameter_232, parameter_233 + + # pd_op.conv2d: (-1x144x-1x-1xf32) <- (-1x144x-1x-1xf32, 144x144x1x1xf32) + conv2d_74 = paddle._C_ops.conv2d( + swish_54, parameter_229, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_229, swish_54 + + # pd_op.batch_norm_: (-1x144x-1x-1xf32, 144xf32, 144xf32, 144xf32, 144xf32, -1xui8) <- (-1x144x-1x-1xf32, 144xf32, 144xf32, 144xf32, 144xf32) + ( + batch_norm__420, + batch_norm__421, + batch_norm__422, + batch_norm__423, + batch_norm__424, + batch_norm__425, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_74, + parameter_228, + parameter_227, + parameter_226, + parameter_225, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_74, parameter_225, parameter_226, parameter_227, parameter_228 + + # pd_op.add: (-1x144x-1x-1xf32) <- (-1x144x-1x-1xf32, -1x144x-1x-1xf32) + add_30 = paddle._C_ops.add(batch_norm__414, batch_norm__420) + del batch_norm__414, batch_norm__420 + + # pd_op.swish: (-1x144x-1x-1xf32) <- (-1x144x-1x-1xf32) + swish_55 = paddle._C_ops.swish(add_30) + del add_30 + + # pd_op.conv2d: (-1x144x-1x-1xf32) <- (-1x144x-1x-1xf32, 144x144x3x3xf32) + conv2d_75 = paddle._C_ops.conv2d( + swish_55, parameter_224, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_224, swish_55 + + # pd_op.batch_norm_: (-1x144x-1x-1xf32, 144xf32, 144xf32, 144xf32, 144xf32, -1xui8) <- (-1x144x-1x-1xf32, 144xf32, 144xf32, 144xf32, 144xf32) + ( + batch_norm__426, + batch_norm__427, + batch_norm__428, + batch_norm__429, + batch_norm__430, + batch_norm__431, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_75, + parameter_223, + parameter_222, + parameter_221, + parameter_220, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_75, parameter_220, parameter_221, parameter_222, parameter_223 + + # pd_op.swish: (-1x144x-1x-1xf32) <- (-1x144x-1x-1xf32) + swish_56 = paddle._C_ops.swish(batch_norm__426) + del batch_norm__426 + + # pd_op.conv2d: (-1x144x-1x-1xf32) <- (-1x144x-1x-1xf32, 144x144x3x3xf32) + conv2d_76 = paddle._C_ops.conv2d( + swish_56, parameter_219, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_219 + + # pd_op.batch_norm_: (-1x144x-1x-1xf32, 144xf32, 144xf32, 144xf32, 144xf32, -1xui8) <- (-1x144x-1x-1xf32, 144xf32, 144xf32, 144xf32, 144xf32) + ( + batch_norm__432, + batch_norm__433, + batch_norm__434, + batch_norm__435, + batch_norm__436, + batch_norm__437, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_76, + parameter_218, + parameter_217, + parameter_216, + parameter_215, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_76, parameter_215, parameter_216, parameter_217, parameter_218 + + # pd_op.conv2d: (-1x144x-1x-1xf32) <- (-1x144x-1x-1xf32, 144x144x1x1xf32) + conv2d_77 = paddle._C_ops.conv2d( + swish_56, parameter_214, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_214, swish_56 + + # pd_op.batch_norm_: (-1x144x-1x-1xf32, 144xf32, 144xf32, 144xf32, 144xf32, -1xui8) <- (-1x144x-1x-1xf32, 144xf32, 144xf32, 144xf32, 144xf32) + ( + batch_norm__438, + batch_norm__439, + batch_norm__440, + batch_norm__441, + batch_norm__442, + batch_norm__443, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_77, + parameter_213, + parameter_212, + parameter_211, + parameter_210, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_77, parameter_210, parameter_211, parameter_212, parameter_213 + + # pd_op.add: (-1x144x-1x-1xf32) <- (-1x144x-1x-1xf32, -1x144x-1x-1xf32) + add_31 = paddle._C_ops.add(batch_norm__432, batch_norm__438) + del batch_norm__432, batch_norm__438 + + # pd_op.swish: (-1x144x-1x-1xf32) <- (-1x144x-1x-1xf32) + swish_57 = paddle._C_ops.swish(add_31) + del add_31 + + # builtin.combine: ([-1x144x-1x-1xf32, -1x144x-1x-1xf32]) <- (-1x144x-1x-1xf32, -1x144x-1x-1xf32) + combine_7 = [swish_52, swish_57] + del swish_52, swish_57 + + # pd_op.concat: (-1x288x-1x-1xf32) <- ([-1x144x-1x-1xf32, -1x144x-1x-1xf32], 1xi32) + concat_9 = paddle._C_ops.concat(combine_7, full_0) + del combine_7 + + # pd_op.conv2d: (-1x288x-1x-1xf32) <- (-1x288x-1x-1xf32, 288x288x1x1xf32) + conv2d_78 = paddle._C_ops.conv2d( + concat_9, parameter_209, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del concat_9, parameter_209 + + # pd_op.batch_norm_: (-1x288x-1x-1xf32, 288xf32, 288xf32, 288xf32, 288xf32, -1xui8) <- (-1x288x-1x-1xf32, 288xf32, 288xf32, 288xf32, 288xf32) + ( + batch_norm__444, + batch_norm__445, + batch_norm__446, + batch_norm__447, + batch_norm__448, + batch_norm__449, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_78, + parameter_208, + parameter_207, + parameter_206, + parameter_205, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_78, parameter_205, parameter_206, parameter_207, parameter_208 + + # pd_op.swish: (-1x288x-1x-1xf32) <- (-1x288x-1x-1xf32) + swish_58 = paddle._C_ops.swish(batch_norm__444) + del batch_norm__444 + + # pd_op.conv2d: (-1x144x-1x-1xf32) <- (-1x288x-1x-1xf32, 144x288x1x1xf32) + conv2d_79 = paddle._C_ops.conv2d( + swish_58, parameter_204, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_204 + + # pd_op.batch_norm_: (-1x144x-1x-1xf32, 144xf32, 144xf32, 144xf32, 144xf32, -1xui8) <- (-1x144x-1x-1xf32, 144xf32, 144xf32, 144xf32, 144xf32) + ( + batch_norm__450, + batch_norm__451, + batch_norm__452, + batch_norm__453, + batch_norm__454, + batch_norm__455, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_79, + parameter_203, + parameter_202, + parameter_201, + parameter_200, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_79, parameter_200, parameter_201, parameter_202, parameter_203 + + # pd_op.swish: (-1x144x-1x-1xf32) <- (-1x144x-1x-1xf32) + swish_59 = paddle._C_ops.swish(batch_norm__450) + del batch_norm__450 + + # pd_op.nearest_interp: (-1x144x-1x-1xf32) <- (-1x144x-1x-1xf32, None, None, None) + nearest_interp_1 = paddle._C_ops.nearest_interp( + swish_59, + None, + None, + None, + "NCHW", + -1, + -1, + -1, + [float("2"), float("2")], + "nearest", + False, + 0, + ) + del swish_59 + + # builtin.combine: ([-1x144x-1x-1xf32, -1x192x-1x-1xf32]) <- (-1x144x-1x-1xf32, -1x192x-1x-1xf32) + combine_8 = [nearest_interp_1, swish_22] + del nearest_interp_1, swish_22 + + # pd_op.concat: (-1x336x-1x-1xf32) <- ([-1x144x-1x-1xf32, -1x192x-1x-1xf32], 1xi32) + concat_10 = paddle._C_ops.concat(combine_8, full_0) + del combine_8 + + # pd_op.conv2d: (-1x72x-1x-1xf32) <- (-1x336x-1x-1xf32, 72x336x1x1xf32) + conv2d_80 = paddle._C_ops.conv2d( + concat_10, parameter_199, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_199 + + # pd_op.batch_norm_: (-1x72x-1x-1xf32, 72xf32, 72xf32, 72xf32, 72xf32, -1xui8) <- (-1x72x-1x-1xf32, 72xf32, 72xf32, 72xf32, 72xf32) + ( + batch_norm__456, + batch_norm__457, + batch_norm__458, + batch_norm__459, + batch_norm__460, + batch_norm__461, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_80, + parameter_198, + parameter_197, + parameter_196, + parameter_195, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_80, parameter_195, parameter_196, parameter_197, parameter_198 + + # pd_op.swish: (-1x72x-1x-1xf32) <- (-1x72x-1x-1xf32) + swish_60 = paddle._C_ops.swish(batch_norm__456) + del batch_norm__456 + + # pd_op.conv2d: (-1x72x-1x-1xf32) <- (-1x336x-1x-1xf32, 72x336x1x1xf32) + conv2d_81 = paddle._C_ops.conv2d( + concat_10, parameter_194, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del concat_10, parameter_194 + + # pd_op.batch_norm_: (-1x72x-1x-1xf32, 72xf32, 72xf32, 72xf32, 72xf32, -1xui8) <- (-1x72x-1x-1xf32, 72xf32, 72xf32, 72xf32, 72xf32) + ( + batch_norm__462, + batch_norm__463, + batch_norm__464, + batch_norm__465, + batch_norm__466, + batch_norm__467, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_81, + parameter_193, + parameter_192, + parameter_191, + parameter_190, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_81, parameter_190, parameter_191, parameter_192, parameter_193 + + # pd_op.swish: (-1x72x-1x-1xf32) <- (-1x72x-1x-1xf32) + swish_61 = paddle._C_ops.swish(batch_norm__462) + del batch_norm__462 + + # pd_op.conv2d: (-1x72x-1x-1xf32) <- (-1x72x-1x-1xf32, 72x72x3x3xf32) + conv2d_82 = paddle._C_ops.conv2d( + swish_61, parameter_189, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_189, swish_61 + + # pd_op.batch_norm_: (-1x72x-1x-1xf32, 72xf32, 72xf32, 72xf32, 72xf32, -1xui8) <- (-1x72x-1x-1xf32, 72xf32, 72xf32, 72xf32, 72xf32) + ( + batch_norm__468, + batch_norm__469, + batch_norm__470, + batch_norm__471, + batch_norm__472, + batch_norm__473, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_82, + parameter_188, + parameter_187, + parameter_186, + parameter_185, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_82, parameter_185, parameter_186, parameter_187, parameter_188 + + # pd_op.swish: (-1x72x-1x-1xf32) <- (-1x72x-1x-1xf32) + swish_62 = paddle._C_ops.swish(batch_norm__468) + del batch_norm__468 + + # pd_op.conv2d: (-1x72x-1x-1xf32) <- (-1x72x-1x-1xf32, 72x72x3x3xf32) + conv2d_83 = paddle._C_ops.conv2d( + swish_62, parameter_184, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_184 + + # pd_op.batch_norm_: (-1x72x-1x-1xf32, 72xf32, 72xf32, 72xf32, 72xf32, -1xui8) <- (-1x72x-1x-1xf32, 72xf32, 72xf32, 72xf32, 72xf32) + ( + batch_norm__474, + batch_norm__475, + batch_norm__476, + batch_norm__477, + batch_norm__478, + batch_norm__479, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_83, + parameter_183, + parameter_182, + parameter_181, + parameter_180, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_83, parameter_180, parameter_181, parameter_182, parameter_183 + + # pd_op.conv2d: (-1x72x-1x-1xf32) <- (-1x72x-1x-1xf32, 72x72x1x1xf32) + conv2d_84 = paddle._C_ops.conv2d( + swish_62, parameter_179, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_179, swish_62 + + # pd_op.batch_norm_: (-1x72x-1x-1xf32, 72xf32, 72xf32, 72xf32, 72xf32, -1xui8) <- (-1x72x-1x-1xf32, 72xf32, 72xf32, 72xf32, 72xf32) + ( + batch_norm__480, + batch_norm__481, + batch_norm__482, + batch_norm__483, + batch_norm__484, + batch_norm__485, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_84, + parameter_178, + parameter_177, + parameter_176, + parameter_175, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_84, parameter_175, parameter_176, parameter_177, parameter_178 + + # pd_op.add: (-1x72x-1x-1xf32) <- (-1x72x-1x-1xf32, -1x72x-1x-1xf32) + add_32 = paddle._C_ops.add(batch_norm__474, batch_norm__480) + del batch_norm__474, batch_norm__480 + + # pd_op.swish: (-1x72x-1x-1xf32) <- (-1x72x-1x-1xf32) + swish_63 = paddle._C_ops.swish(add_32) + del add_32 + + # pd_op.conv2d: (-1x72x-1x-1xf32) <- (-1x72x-1x-1xf32, 72x72x3x3xf32) + conv2d_85 = paddle._C_ops.conv2d( + swish_63, parameter_174, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_174, swish_63 + + # pd_op.batch_norm_: (-1x72x-1x-1xf32, 72xf32, 72xf32, 72xf32, 72xf32, -1xui8) <- (-1x72x-1x-1xf32, 72xf32, 72xf32, 72xf32, 72xf32) + ( + batch_norm__486, + batch_norm__487, + batch_norm__488, + batch_norm__489, + batch_norm__490, + batch_norm__491, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_85, + parameter_173, + parameter_172, + parameter_171, + parameter_170, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_85, parameter_170, parameter_171, parameter_172, parameter_173 + + # pd_op.swish: (-1x72x-1x-1xf32) <- (-1x72x-1x-1xf32) + swish_64 = paddle._C_ops.swish(batch_norm__486) + del batch_norm__486 + + # pd_op.conv2d: (-1x72x-1x-1xf32) <- (-1x72x-1x-1xf32, 72x72x3x3xf32) + conv2d_86 = paddle._C_ops.conv2d( + swish_64, parameter_169, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_169 + + # pd_op.batch_norm_: (-1x72x-1x-1xf32, 72xf32, 72xf32, 72xf32, 72xf32, -1xui8) <- (-1x72x-1x-1xf32, 72xf32, 72xf32, 72xf32, 72xf32) + ( + batch_norm__492, + batch_norm__493, + batch_norm__494, + batch_norm__495, + batch_norm__496, + batch_norm__497, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_86, + parameter_168, + parameter_167, + parameter_166, + parameter_165, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_86, parameter_165, parameter_166, parameter_167, parameter_168 + + # pd_op.conv2d: (-1x72x-1x-1xf32) <- (-1x72x-1x-1xf32, 72x72x1x1xf32) + conv2d_87 = paddle._C_ops.conv2d( + swish_64, parameter_164, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_164, swish_64 + + # pd_op.batch_norm_: (-1x72x-1x-1xf32, 72xf32, 72xf32, 72xf32, 72xf32, -1xui8) <- (-1x72x-1x-1xf32, 72xf32, 72xf32, 72xf32, 72xf32) + ( + batch_norm__498, + batch_norm__499, + batch_norm__500, + batch_norm__501, + batch_norm__502, + batch_norm__503, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_87, + parameter_163, + parameter_162, + parameter_161, + parameter_160, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_87, parameter_160, parameter_161, parameter_162, parameter_163 + + # pd_op.add: (-1x72x-1x-1xf32) <- (-1x72x-1x-1xf32, -1x72x-1x-1xf32) + add_33 = paddle._C_ops.add(batch_norm__492, batch_norm__498) + del batch_norm__492, batch_norm__498 + + # pd_op.swish: (-1x72x-1x-1xf32) <- (-1x72x-1x-1xf32) + swish_65 = paddle._C_ops.swish(add_33) + del add_33 + + # builtin.combine: ([-1x72x-1x-1xf32, -1x72x-1x-1xf32]) <- (-1x72x-1x-1xf32, -1x72x-1x-1xf32) + combine_9 = [swish_60, swish_65] + del swish_60, swish_65 + + # pd_op.concat: (-1x144x-1x-1xf32) <- ([-1x72x-1x-1xf32, -1x72x-1x-1xf32], 1xi32) + concat_11 = paddle._C_ops.concat(combine_9, full_0) + del combine_9 + + # pd_op.conv2d: (-1x144x-1x-1xf32) <- (-1x144x-1x-1xf32, 144x144x1x1xf32) + conv2d_88 = paddle._C_ops.conv2d( + concat_11, parameter_159, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del concat_11, parameter_159 + + # pd_op.batch_norm_: (-1x144x-1x-1xf32, 144xf32, 144xf32, 144xf32, 144xf32, -1xui8) <- (-1x144x-1x-1xf32, 144xf32, 144xf32, 144xf32, 144xf32) + ( + batch_norm__504, + batch_norm__505, + batch_norm__506, + batch_norm__507, + batch_norm__508, + batch_norm__509, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_88, + parameter_158, + parameter_157, + parameter_156, + parameter_155, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), ) + del conv2d_88, parameter_155, parameter_156, parameter_157, parameter_158 - # pd_op.arange: (-1xi64) <- (1xi64, xi64, 1xi64) - arange_0 = paddle.arange(full_0, data_1, full_1, dtype="int64") - del data_1 - - # pd_op.cast: (-1xf32) <- (-1xi64) - cast_0 = paddle._C_ops.cast(arange_0, paddle.float32) - del arange_0 + # pd_op.swish: (-1x144x-1x-1xf32) <- (-1x144x-1x-1xf32) + swish_66 = paddle._C_ops.swish(batch_norm__504) + del batch_norm__504 - # pd_op.full: (1xf32) <- () - full_2 = paddle._C_ops.full( - [1], float("1"), paddle.float32, paddle.core.CPUPlace() + # pd_op.conv2d: (-1x144x-1x-1xf32) <- (-1x144x-1x-1xf32, 144x144x3x3xf32) + conv2d_89 = paddle._C_ops.conv2d( + swish_66, parameter_154, [2, 2], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" ) + del parameter_154 - # pd_op.scale: (-1xf32) <- (-1xf32, 1xf32) - scale_0 = paddle._C_ops.scale(cast_0, full_2, float("0.5"), True) - del cast_0 - - # pd_op.full: (1xf32) <- () - full_3 = paddle._C_ops.full( - [1], float("32"), paddle.float32, paddle.core.CPUPlace() + # pd_op.batch_norm_: (-1x144x-1x-1xf32, 144xf32, 144xf32, 144xf32, 144xf32, -1xui8) <- (-1x144x-1x-1xf32, 144xf32, 144xf32, 144xf32, 144xf32) + ( + batch_norm__510, + batch_norm__511, + batch_norm__512, + batch_norm__513, + batch_norm__514, + batch_norm__515, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_89, + parameter_153, + parameter_152, + parameter_151, + parameter_150, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), ) + del conv2d_89, parameter_150, parameter_151, parameter_152, parameter_153 - # pd_op.scale: (-1xf32) <- (-1xf32, 1xf32) - scale_1 = paddle._C_ops.scale(scale_0, full_3, float("0"), True) - del scale_0 + # pd_op.swish: (-1x144x-1x-1xf32) <- (-1x144x-1x-1xf32) + swish_67 = paddle._C_ops.swish(batch_norm__510) + del batch_norm__510 - # pd_op.arange: (-1xi64) <- (1xi64, xi64, 1xi64) - arange_1 = paddle.arange(full_0, data_0, full_1, dtype="int64") - del data_0 + # builtin.combine: ([-1x144x-1x-1xf32, -1x288x-1x-1xf32]) <- (-1x144x-1x-1xf32, -1x288x-1x-1xf32) + combine_10 = [swish_67, swish_58] + del swish_58, swish_67 - # pd_op.cast: (-1xf32) <- (-1xi64) - cast_1 = paddle._C_ops.cast(arange_1, paddle.float32) - del arange_1 + # pd_op.concat: (-1x432x-1x-1xf32) <- ([-1x144x-1x-1xf32, -1x288x-1x-1xf32], 1xi32) + concat_12 = paddle._C_ops.concat(combine_10, full_0) + del combine_10 - # pd_op.scale: (-1xf32) <- (-1xf32, 1xf32) - scale_2 = paddle._C_ops.scale(cast_1, full_2, float("0.5"), True) - del cast_1 + # pd_op.conv2d: (-1x144x-1x-1xf32) <- (-1x432x-1x-1xf32, 144x432x1x1xf32) + conv2d_90 = paddle._C_ops.conv2d( + concat_12, parameter_149, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_149 - # pd_op.scale: (-1xf32) <- (-1xf32, 1xf32) - scale_3 = paddle._C_ops.scale(scale_2, full_3, float("0"), True) - del scale_2 + # pd_op.batch_norm_: (-1x144x-1x-1xf32, 144xf32, 144xf32, 144xf32, 144xf32, -1xui8) <- (-1x144x-1x-1xf32, 144xf32, 144xf32, 144xf32, 144xf32) + ( + batch_norm__516, + batch_norm__517, + batch_norm__518, + batch_norm__519, + batch_norm__520, + batch_norm__521, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_90, + parameter_148, + parameter_147, + parameter_146, + parameter_145, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_90, parameter_145, parameter_146, parameter_147, parameter_148 - # builtin.combine: ([-1xf32, -1xf32]) <- (-1xf32, -1xf32) - combine_0 = [scale_3, scale_1] - del scale_1, scale_3 + # pd_op.swish: (-1x144x-1x-1xf32) <- (-1x144x-1x-1xf32) + swish_68 = paddle._C_ops.swish(batch_norm__516) + del batch_norm__516 - # pd_op.meshgrid: ([-1x-1xf32, -1x-1xf32]) <- ([-1xf32, -1xf32]) - meshgrid_0 = paddle._C_ops.meshgrid(combine_0) - del combine_0 + # pd_op.conv2d: (-1x144x-1x-1xf32) <- (-1x432x-1x-1xf32, 144x432x1x1xf32) + conv2d_91 = paddle._C_ops.conv2d( + concat_12, parameter_144, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del concat_12, parameter_144 - # builtin.split: (-1x-1xf32, -1x-1xf32) <- ([-1x-1xf32, -1x-1xf32]) + # pd_op.batch_norm_: (-1x144x-1x-1xf32, 144xf32, 144xf32, 144xf32, 144xf32, -1xui8) <- (-1x144x-1x-1xf32, 144xf32, 144xf32, 144xf32, 144xf32) ( - split_0, - split_1, - ) = meshgrid_0 - del meshgrid_0 + batch_norm__522, + batch_norm__523, + batch_norm__524, + batch_norm__525, + batch_norm__526, + batch_norm__527, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_91, + parameter_143, + parameter_142, + parameter_141, + parameter_140, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_91, parameter_140, parameter_141, parameter_142, parameter_143 - # pd_op.scale: (-1x-1xf32) <- (-1x-1xf32, 1xf32) - scale_4 = paddle._C_ops.scale(split_1, full_2, float("-80"), True) + # pd_op.swish: (-1x144x-1x-1xf32) <- (-1x144x-1x-1xf32) + swish_69 = paddle._C_ops.swish(batch_norm__522) + del batch_norm__522 - # pd_op.scale: (-1x-1xf32) <- (-1x-1xf32, 1xf32) - scale_5 = paddle._C_ops.scale(split_0, full_2, float("-80"), True) + # pd_op.conv2d: (-1x144x-1x-1xf32) <- (-1x144x-1x-1xf32, 144x144x3x3xf32) + conv2d_92 = paddle._C_ops.conv2d( + swish_69, parameter_139, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_139, swish_69 - # pd_op.scale: (-1x-1xf32) <- (-1x-1xf32, 1xf32) - scale_6 = paddle._C_ops.scale(split_1, full_2, float("80"), True) + # pd_op.batch_norm_: (-1x144x-1x-1xf32, 144xf32, 144xf32, 144xf32, 144xf32, -1xui8) <- (-1x144x-1x-1xf32, 144xf32, 144xf32, 144xf32, 144xf32) + ( + batch_norm__528, + batch_norm__529, + batch_norm__530, + batch_norm__531, + batch_norm__532, + batch_norm__533, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_92, + parameter_138, + parameter_137, + parameter_136, + parameter_135, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_92, parameter_135, parameter_136, parameter_137, parameter_138 - # pd_op.scale: (-1x-1xf32) <- (-1x-1xf32, 1xf32) - scale_7 = paddle._C_ops.scale(split_0, full_2, float("80"), True) + # pd_op.swish: (-1x144x-1x-1xf32) <- (-1x144x-1x-1xf32) + swish_70 = paddle._C_ops.swish(batch_norm__528) + del batch_norm__528 - # builtin.combine: ([-1x-1xf32, -1x-1xf32, -1x-1xf32, -1x-1xf32]) <- (-1x-1xf32, -1x-1xf32, -1x-1xf32, -1x-1xf32) - combine_1 = [scale_4, scale_5, scale_6, scale_7] - del scale_4, scale_5, scale_6, scale_7 + # pd_op.conv2d: (-1x144x-1x-1xf32) <- (-1x144x-1x-1xf32, 144x144x3x3xf32) + conv2d_93 = paddle._C_ops.conv2d( + swish_70, parameter_134, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_134 - # pd_op.stack: (-1x-1x4xf32) <- ([-1x-1xf32, -1x-1xf32, -1x-1xf32, -1x-1xf32]) - stack_0 = paddle._C_ops.stack(combine_1, -1) - del combine_1 + # pd_op.batch_norm_: (-1x144x-1x-1xf32, 144xf32, 144xf32, 144xf32, 144xf32, -1xui8) <- (-1x144x-1x-1xf32, 144xf32, 144xf32, 144xf32, 144xf32) + ( + batch_norm__534, + batch_norm__535, + batch_norm__536, + batch_norm__537, + batch_norm__538, + batch_norm__539, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_93, + parameter_133, + parameter_132, + parameter_131, + parameter_130, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_93, parameter_130, parameter_131, parameter_132, parameter_133 - # builtin.combine: ([-1x-1xf32, -1x-1xf32]) <- (-1x-1xf32, -1x-1xf32) - combine_2 = [split_1, split_0] - del split_0, split_1 + # pd_op.conv2d: (-1x144x-1x-1xf32) <- (-1x144x-1x-1xf32, 144x144x1x1xf32) + conv2d_94 = paddle._C_ops.conv2d( + swish_70, parameter_129, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_129, swish_70 - # pd_op.stack: (-1x-1x2xf32) <- ([-1x-1xf32, -1x-1xf32]) - stack_1 = paddle._C_ops.stack(combine_2, -1) - del combine_2 + # pd_op.batch_norm_: (-1x144x-1x-1xf32, 144xf32, 144xf32, 144xf32, 144xf32, -1xui8) <- (-1x144x-1x-1xf32, 144xf32, 144xf32, 144xf32, 144xf32) + ( + batch_norm__540, + batch_norm__541, + batch_norm__542, + batch_norm__543, + batch_norm__544, + batch_norm__545, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_94, + parameter_128, + parameter_127, + parameter_126, + parameter_125, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_94, parameter_125, parameter_126, parameter_127, parameter_128 - # pd_op.full_int_array: (2xi64) <- () - full_int_array_0 = [-1, 4] + # pd_op.add: (-1x144x-1x-1xf32) <- (-1x144x-1x-1xf32, -1x144x-1x-1xf32) + add_34 = paddle._C_ops.add(batch_norm__534, batch_norm__540) + del batch_norm__534, batch_norm__540 - # pd_op.reshape: (-1x4xf32) <- (-1x-1x4xf32, 2xi64) - reshape_0 = paddle._C_ops.reshape(stack_0, full_int_array_0) - del stack_0 + # pd_op.swish: (-1x144x-1x-1xf32) <- (-1x144x-1x-1xf32) + swish_71 = paddle._C_ops.swish(add_34) + del add_34 - # pd_op.full_int_array: (2xi64) <- () - full_int_array_1 = [-1, 2] + # pd_op.conv2d: (-1x144x-1x-1xf32) <- (-1x144x-1x-1xf32, 144x144x3x3xf32) + conv2d_95 = paddle._C_ops.conv2d( + swish_71, parameter_124, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_124, swish_71 - # pd_op.reshape: (-1x2xf32) <- (-1x-1x2xf32, 2xi64) - reshape_1 = paddle._C_ops.reshape(stack_1, full_int_array_1) - del stack_1 + # pd_op.batch_norm_: (-1x144x-1x-1xf32, 144xf32, 144xf32, 144xf32, 144xf32, -1xui8) <- (-1x144x-1x-1xf32, 144xf32, 144xf32, 144xf32, 144xf32) + ( + batch_norm__546, + batch_norm__547, + batch_norm__548, + batch_norm__549, + batch_norm__550, + batch_norm__551, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_95, + parameter_123, + parameter_122, + parameter_121, + parameter_120, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_95, parameter_120, parameter_121, parameter_122, parameter_123 - # pd_op.shape64: (2xi64) <- (-1x4xf32) - shape64_0 = paddle._C_ops.shape64(reshape_0) + # pd_op.swish: (-1x144x-1x-1xf32) <- (-1x144x-1x-1xf32) + swish_72 = paddle._C_ops.swish(batch_norm__546) + del batch_norm__546 - # pd_op.full_int_array: (1xi64) <- () - full_int_array_2 = [0] + # pd_op.conv2d: (-1x144x-1x-1xf32) <- (-1x144x-1x-1xf32, 144x144x3x3xf32) + conv2d_96 = paddle._C_ops.conv2d( + swish_72, parameter_119, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_119 - # pd_op.full_int_array: (1xi64) <- () - full_int_array_3 = [1] + # pd_op.batch_norm_: (-1x144x-1x-1xf32, 144xf32, 144xf32, 144xf32, 144xf32, -1xui8) <- (-1x144x-1x-1xf32, 144xf32, 144xf32, 144xf32, 144xf32) + ( + batch_norm__552, + batch_norm__553, + batch_norm__554, + batch_norm__555, + batch_norm__556, + batch_norm__557, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_96, + parameter_118, + parameter_117, + parameter_116, + parameter_115, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_96, parameter_115, parameter_116, parameter_117, parameter_118 - # pd_op.slice: (xi64) <- (2xi64, 1xi64, 1xi64) - slice_0 = paddle._C_ops.slice( - shape64_0, [0], full_int_array_2, full_int_array_3, [1], [0] + # pd_op.conv2d: (-1x144x-1x-1xf32) <- (-1x144x-1x-1xf32, 144x144x1x1xf32) + conv2d_97 = paddle._C_ops.conv2d( + swish_72, parameter_114, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" ) - del shape64_0 + del parameter_114, swish_72 - # pd_op.full: (xi64) <- () - full_4 = paddle._C_ops.full( - [], float("1"), paddle.int64, paddle.core.CPUPlace() + # pd_op.batch_norm_: (-1x144x-1x-1xf32, 144xf32, 144xf32, 144xf32, 144xf32, -1xui8) <- (-1x144x-1x-1xf32, 144xf32, 144xf32, 144xf32, 144xf32) + ( + batch_norm__558, + batch_norm__559, + batch_norm__560, + batch_norm__561, + batch_norm__562, + batch_norm__563, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_97, + parameter_113, + parameter_112, + parameter_111, + parameter_110, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), ) + del conv2d_97, parameter_110, parameter_111, parameter_112, parameter_113 - # builtin.combine: ([xi64, xi64]) <- (xi64, xi64) - combine_3 = [slice_0, full_4] + # pd_op.add: (-1x144x-1x-1xf32) <- (-1x144x-1x-1xf32, -1x144x-1x-1xf32) + add_35 = paddle._C_ops.add(batch_norm__552, batch_norm__558) + del batch_norm__552, batch_norm__558 - # pd_op.stack: (2xi64) <- ([xi64, xi64]) - stack_2 = paddle._C_ops.stack(combine_3, 0) - del combine_3 + # pd_op.swish: (-1x144x-1x-1xf32) <- (-1x144x-1x-1xf32) + swish_73 = paddle._C_ops.swish(add_35) + del add_35 - # pd_op.full_with_tensor: (-1x1xf32) <- (1xf32, 2xi64) - full_with_tensor_0 = paddle._C_ops.full_with_tensor( - full_3, stack_2, paddle.float32 + # builtin.combine: ([-1x144x-1x-1xf32, -1x144x-1x-1xf32]) <- (-1x144x-1x-1xf32, -1x144x-1x-1xf32) + combine_11 = [swish_68, swish_73] + del swish_68, swish_73 + + # pd_op.concat: (-1x288x-1x-1xf32) <- ([-1x144x-1x-1xf32, -1x144x-1x-1xf32], 1xi32) + concat_13 = paddle._C_ops.concat(combine_11, full_0) + del combine_11 + + # pd_op.conv2d: (-1x288x-1x-1xf32) <- (-1x288x-1x-1xf32, 288x288x1x1xf32) + conv2d_98 = paddle._C_ops.conv2d( + concat_13, parameter_109, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" ) - del full_3, stack_2 + del concat_13, parameter_109 - # pd_op.arange: (-1xi64) <- (1xi64, xi64, 1xi64) - arange_2 = paddle.arange(full_0, data_3, full_1, dtype="int64") - del data_3 + # pd_op.batch_norm_: (-1x288x-1x-1xf32, 288xf32, 288xf32, 288xf32, 288xf32, -1xui8) <- (-1x288x-1x-1xf32, 288xf32, 288xf32, 288xf32, 288xf32) + ( + batch_norm__564, + batch_norm__565, + batch_norm__566, + batch_norm__567, + batch_norm__568, + batch_norm__569, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_98, + parameter_108, + parameter_107, + parameter_106, + parameter_105, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_98, parameter_105, parameter_106, parameter_107, parameter_108 - # pd_op.cast: (-1xf32) <- (-1xi64) - cast_2 = paddle._C_ops.cast(arange_2, paddle.float32) - del arange_2 + # pd_op.swish: (-1x288x-1x-1xf32) <- (-1x288x-1x-1xf32) + swish_74 = paddle._C_ops.swish(batch_norm__564) + del batch_norm__564 - # pd_op.scale: (-1xf32) <- (-1xf32, 1xf32) - scale_8 = paddle._C_ops.scale(cast_2, full_2, float("0.5"), True) - del cast_2 + # pd_op.conv2d: (-1x288x-1x-1xf32) <- (-1x288x-1x-1xf32, 288x288x3x3xf32) + conv2d_99 = paddle._C_ops.conv2d( + swish_74, parameter_104, [2, 2], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_104 - # pd_op.full: (1xf32) <- () - full_5 = paddle._C_ops.full( - [1], float("16"), paddle.float32, paddle.core.CPUPlace() + # pd_op.batch_norm_: (-1x288x-1x-1xf32, 288xf32, 288xf32, 288xf32, 288xf32, -1xui8) <- (-1x288x-1x-1xf32, 288xf32, 288xf32, 288xf32, 288xf32) + ( + batch_norm__570, + batch_norm__571, + batch_norm__572, + batch_norm__573, + batch_norm__574, + batch_norm__575, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_99, + parameter_103, + parameter_102, + parameter_101, + parameter_100, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), ) + del conv2d_99, parameter_100, parameter_101, parameter_102, parameter_103 - # pd_op.scale: (-1xf32) <- (-1xf32, 1xf32) - scale_9 = paddle._C_ops.scale(scale_8, full_5, float("0"), True) - del scale_8 + # pd_op.swish: (-1x288x-1x-1xf32) <- (-1x288x-1x-1xf32) + swish_75 = paddle._C_ops.swish(batch_norm__570) + del batch_norm__570 - # pd_op.arange: (-1xi64) <- (1xi64, xi64, 1xi64) - arange_3 = paddle.arange(full_0, data_2, full_1, dtype="int64") - del data_2 + # builtin.combine: ([-1x288x-1x-1xf32, -1x576x-1x-1xf32]) <- (-1x288x-1x-1xf32, -1x576x-1x-1xf32) + combine_12 = [swish_75, swish_50] + del swish_50, swish_75 - # pd_op.cast: (-1xf32) <- (-1xi64) - cast_3 = paddle._C_ops.cast(arange_3, paddle.float32) - del arange_3 + # pd_op.concat: (-1x864x-1x-1xf32) <- ([-1x288x-1x-1xf32, -1x576x-1x-1xf32], 1xi32) + concat_14 = paddle._C_ops.concat(combine_12, full_0) + del combine_12 - # pd_op.scale: (-1xf32) <- (-1xf32, 1xf32) - scale_10 = paddle._C_ops.scale(cast_3, full_2, float("0.5"), True) - del cast_3 + # pd_op.conv2d: (-1x288x-1x-1xf32) <- (-1x864x-1x-1xf32, 288x864x1x1xf32) + conv2d_100 = paddle._C_ops.conv2d( + concat_14, parameter_99, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_99 - # pd_op.scale: (-1xf32) <- (-1xf32, 1xf32) - scale_11 = paddle._C_ops.scale(scale_10, full_5, float("0"), True) - del scale_10 + # pd_op.batch_norm_: (-1x288x-1x-1xf32, 288xf32, 288xf32, 288xf32, 288xf32, -1xui8) <- (-1x288x-1x-1xf32, 288xf32, 288xf32, 288xf32, 288xf32) + ( + batch_norm__576, + batch_norm__577, + batch_norm__578, + batch_norm__579, + batch_norm__580, + batch_norm__581, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_100, + parameter_98, + parameter_97, + parameter_96, + parameter_95, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_100, parameter_95, parameter_96, parameter_97, parameter_98 - # builtin.combine: ([-1xf32, -1xf32]) <- (-1xf32, -1xf32) - combine_4 = [scale_11, scale_9] - del scale_11, scale_9 + # pd_op.swish: (-1x288x-1x-1xf32) <- (-1x288x-1x-1xf32) + swish_76 = paddle._C_ops.swish(batch_norm__576) + del batch_norm__576 - # pd_op.meshgrid: ([-1x-1xf32, -1x-1xf32]) <- ([-1xf32, -1xf32]) - meshgrid_1 = paddle._C_ops.meshgrid(combine_4) - del combine_4 + # pd_op.conv2d: (-1x288x-1x-1xf32) <- (-1x864x-1x-1xf32, 288x864x1x1xf32) + conv2d_101 = paddle._C_ops.conv2d( + concat_14, parameter_94, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del concat_14, parameter_94 - # builtin.split: (-1x-1xf32, -1x-1xf32) <- ([-1x-1xf32, -1x-1xf32]) + # pd_op.batch_norm_: (-1x288x-1x-1xf32, 288xf32, 288xf32, 288xf32, 288xf32, -1xui8) <- (-1x288x-1x-1xf32, 288xf32, 288xf32, 288xf32, 288xf32) ( - split_2, - split_3, - ) = meshgrid_1 - del meshgrid_1 + batch_norm__582, + batch_norm__583, + batch_norm__584, + batch_norm__585, + batch_norm__586, + batch_norm__587, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_101, + parameter_93, + parameter_92, + parameter_91, + parameter_90, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_101, parameter_90, parameter_91, parameter_92, parameter_93 - # pd_op.scale: (-1x-1xf32) <- (-1x-1xf32, 1xf32) - scale_12 = paddle._C_ops.scale(split_3, full_2, float("-40"), True) + # pd_op.swish: (-1x288x-1x-1xf32) <- (-1x288x-1x-1xf32) + swish_77 = paddle._C_ops.swish(batch_norm__582) + del batch_norm__582 - # pd_op.scale: (-1x-1xf32) <- (-1x-1xf32, 1xf32) - scale_13 = paddle._C_ops.scale(split_2, full_2, float("-40"), True) + # pd_op.conv2d: (-1x288x-1x-1xf32) <- (-1x288x-1x-1xf32, 288x288x3x3xf32) + conv2d_102 = paddle._C_ops.conv2d( + swish_77, parameter_89, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_89, swish_77 - # pd_op.scale: (-1x-1xf32) <- (-1x-1xf32, 1xf32) - scale_14 = paddle._C_ops.scale(split_3, full_2, float("40"), True) + # pd_op.batch_norm_: (-1x288x-1x-1xf32, 288xf32, 288xf32, 288xf32, 288xf32, -1xui8) <- (-1x288x-1x-1xf32, 288xf32, 288xf32, 288xf32, 288xf32) + ( + batch_norm__588, + batch_norm__589, + batch_norm__590, + batch_norm__591, + batch_norm__592, + batch_norm__593, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_102, + parameter_88, + parameter_87, + parameter_86, + parameter_85, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_102, parameter_85, parameter_86, parameter_87, parameter_88 - # pd_op.scale: (-1x-1xf32) <- (-1x-1xf32, 1xf32) - scale_15 = paddle._C_ops.scale(split_2, full_2, float("40"), True) + # pd_op.swish: (-1x288x-1x-1xf32) <- (-1x288x-1x-1xf32) + swish_78 = paddle._C_ops.swish(batch_norm__588) + del batch_norm__588 - # builtin.combine: ([-1x-1xf32, -1x-1xf32, -1x-1xf32, -1x-1xf32]) <- (-1x-1xf32, -1x-1xf32, -1x-1xf32, -1x-1xf32) - combine_5 = [scale_12, scale_13, scale_14, scale_15] - del scale_12, scale_13, scale_14, scale_15 + # pd_op.conv2d: (-1x288x-1x-1xf32) <- (-1x288x-1x-1xf32, 288x288x3x3xf32) + conv2d_103 = paddle._C_ops.conv2d( + swish_78, parameter_84, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_84 - # pd_op.stack: (-1x-1x4xf32) <- ([-1x-1xf32, -1x-1xf32, -1x-1xf32, -1x-1xf32]) - stack_3 = paddle._C_ops.stack(combine_5, -1) - del combine_5 + # pd_op.batch_norm_: (-1x288x-1x-1xf32, 288xf32, 288xf32, 288xf32, 288xf32, -1xui8) <- (-1x288x-1x-1xf32, 288xf32, 288xf32, 288xf32, 288xf32) + ( + batch_norm__594, + batch_norm__595, + batch_norm__596, + batch_norm__597, + batch_norm__598, + batch_norm__599, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_103, + parameter_83, + parameter_82, + parameter_81, + parameter_80, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_103, parameter_80, parameter_81, parameter_82, parameter_83 - # builtin.combine: ([-1x-1xf32, -1x-1xf32]) <- (-1x-1xf32, -1x-1xf32) - combine_6 = [split_3, split_2] - del split_2, split_3 + # pd_op.conv2d: (-1x288x-1x-1xf32) <- (-1x288x-1x-1xf32, 288x288x1x1xf32) + conv2d_104 = paddle._C_ops.conv2d( + swish_78, parameter_79, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_79, swish_78 - # pd_op.stack: (-1x-1x2xf32) <- ([-1x-1xf32, -1x-1xf32]) - stack_4 = paddle._C_ops.stack(combine_6, -1) - del combine_6 + # pd_op.batch_norm_: (-1x288x-1x-1xf32, 288xf32, 288xf32, 288xf32, 288xf32, -1xui8) <- (-1x288x-1x-1xf32, 288xf32, 288xf32, 288xf32, 288xf32) + ( + batch_norm__600, + batch_norm__601, + batch_norm__602, + batch_norm__603, + batch_norm__604, + batch_norm__605, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_104, + parameter_78, + parameter_77, + parameter_76, + parameter_75, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_104, parameter_75, parameter_76, parameter_77, parameter_78 - # pd_op.reshape: (-1x4xf32) <- (-1x-1x4xf32, 2xi64) - reshape_2 = paddle._C_ops.reshape(stack_3, full_int_array_0) - del stack_3 + # pd_op.add: (-1x288x-1x-1xf32) <- (-1x288x-1x-1xf32, -1x288x-1x-1xf32) + add_36 = paddle._C_ops.add(batch_norm__594, batch_norm__600) + del batch_norm__594, batch_norm__600 - # pd_op.reshape: (-1x2xf32) <- (-1x-1x2xf32, 2xi64) - reshape_3 = paddle._C_ops.reshape(stack_4, full_int_array_1) - del stack_4 + # pd_op.swish: (-1x288x-1x-1xf32) <- (-1x288x-1x-1xf32) + swish_79 = paddle._C_ops.swish(add_36) + del add_36 - # pd_op.shape64: (2xi64) <- (-1x4xf32) - shape64_1 = paddle._C_ops.shape64(reshape_2) + # pd_op.conv2d: (-1x288x-1x-1xf32) <- (-1x288x-1x-1xf32, 288x288x3x3xf32) + conv2d_105 = paddle._C_ops.conv2d( + swish_79, parameter_74, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_74, swish_79 - # pd_op.slice: (xi64) <- (2xi64, 1xi64, 1xi64) - slice_1 = paddle._C_ops.slice( - shape64_1, [0], full_int_array_2, full_int_array_3, [1], [0] + # pd_op.batch_norm_: (-1x288x-1x-1xf32, 288xf32, 288xf32, 288xf32, 288xf32, -1xui8) <- (-1x288x-1x-1xf32, 288xf32, 288xf32, 288xf32, 288xf32) + ( + batch_norm__606, + batch_norm__607, + batch_norm__608, + batch_norm__609, + batch_norm__610, + batch_norm__611, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_105, + parameter_73, + parameter_72, + parameter_71, + parameter_70, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), ) - del shape64_1 + del conv2d_105, parameter_70, parameter_71, parameter_72, parameter_73 - # builtin.combine: ([xi64, xi64]) <- (xi64, xi64) - combine_7 = [slice_1, full_4] + # pd_op.swish: (-1x288x-1x-1xf32) <- (-1x288x-1x-1xf32) + swish_80 = paddle._C_ops.swish(batch_norm__606) + del batch_norm__606 - # pd_op.stack: (2xi64) <- ([xi64, xi64]) - stack_5 = paddle._C_ops.stack(combine_7, 0) - del combine_7 + # pd_op.conv2d: (-1x288x-1x-1xf32) <- (-1x288x-1x-1xf32, 288x288x3x3xf32) + conv2d_106 = paddle._C_ops.conv2d( + swish_80, parameter_69, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_69 - # pd_op.full_with_tensor: (-1x1xf32) <- (1xf32, 2xi64) - full_with_tensor_1 = paddle._C_ops.full_with_tensor( - full_5, stack_5, paddle.float32 + # pd_op.batch_norm_: (-1x288x-1x-1xf32, 288xf32, 288xf32, 288xf32, 288xf32, -1xui8) <- (-1x288x-1x-1xf32, 288xf32, 288xf32, 288xf32, 288xf32) + ( + batch_norm__612, + batch_norm__613, + batch_norm__614, + batch_norm__615, + batch_norm__616, + batch_norm__617, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_106, + parameter_68, + parameter_67, + parameter_66, + parameter_65, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), ) - del full_5, stack_5 - - # pd_op.arange: (-1xi64) <- (1xi64, xi64, 1xi64) - arange_4 = paddle.arange(full_0, data_5, full_1, dtype="int64") - del data_5 - - # pd_op.cast: (-1xf32) <- (-1xi64) - cast_4 = paddle._C_ops.cast(arange_4, paddle.float32) - del arange_4 + del conv2d_106, parameter_65, parameter_66, parameter_67, parameter_68 - # pd_op.scale: (-1xf32) <- (-1xf32, 1xf32) - scale_16 = paddle._C_ops.scale(cast_4, full_2, float("0.5"), True) - del cast_4 - - # pd_op.full: (1xf32) <- () - full_6 = paddle._C_ops.full( - [1], float("8"), paddle.float32, paddle.core.CPUPlace() + # pd_op.conv2d: (-1x288x-1x-1xf32) <- (-1x288x-1x-1xf32, 288x288x1x1xf32) + conv2d_107 = paddle._C_ops.conv2d( + swish_80, parameter_64, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" ) + del parameter_64, swish_80 - # pd_op.scale: (-1xf32) <- (-1xf32, 1xf32) - scale_17 = paddle._C_ops.scale(scale_16, full_6, float("0"), True) - del scale_16 - - # pd_op.arange: (-1xi64) <- (1xi64, xi64, 1xi64) - arange_5 = paddle.arange(full_0, data_4, full_1, dtype="int64") - del data_4, full_0, full_1 - - # pd_op.cast: (-1xf32) <- (-1xi64) - cast_5 = paddle._C_ops.cast(arange_5, paddle.float32) - del arange_5 - - # pd_op.scale: (-1xf32) <- (-1xf32, 1xf32) - scale_18 = paddle._C_ops.scale(cast_5, full_2, float("0.5"), True) - del cast_5 - - # pd_op.scale: (-1xf32) <- (-1xf32, 1xf32) - scale_19 = paddle._C_ops.scale(scale_18, full_6, float("0"), True) - del scale_18 - - # builtin.combine: ([-1xf32, -1xf32]) <- (-1xf32, -1xf32) - combine_8 = [scale_19, scale_17] - del scale_17, scale_19 - - # pd_op.meshgrid: ([-1x-1xf32, -1x-1xf32]) <- ([-1xf32, -1xf32]) - meshgrid_2 = paddle._C_ops.meshgrid(combine_8) - del combine_8 - - # builtin.split: (-1x-1xf32, -1x-1xf32) <- ([-1x-1xf32, -1x-1xf32]) + # pd_op.batch_norm_: (-1x288x-1x-1xf32, 288xf32, 288xf32, 288xf32, 288xf32, -1xui8) <- (-1x288x-1x-1xf32, 288xf32, 288xf32, 288xf32, 288xf32) ( - split_4, - split_5, - ) = meshgrid_2 - del meshgrid_2 - - # pd_op.scale: (-1x-1xf32) <- (-1x-1xf32, 1xf32) - scale_20 = paddle._C_ops.scale(split_5, full_2, float("-20"), True) + batch_norm__618, + batch_norm__619, + batch_norm__620, + batch_norm__621, + batch_norm__622, + batch_norm__623, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_107, + parameter_63, + parameter_62, + parameter_61, + parameter_60, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_107, parameter_60, parameter_61, parameter_62, parameter_63 - # pd_op.scale: (-1x-1xf32) <- (-1x-1xf32, 1xf32) - scale_21 = paddle._C_ops.scale(split_4, full_2, float("-20"), True) + # pd_op.add: (-1x288x-1x-1xf32) <- (-1x288x-1x-1xf32, -1x288x-1x-1xf32) + add_37 = paddle._C_ops.add(batch_norm__612, batch_norm__618) + del batch_norm__612, batch_norm__618 - # pd_op.scale: (-1x-1xf32) <- (-1x-1xf32, 1xf32) - scale_22 = paddle._C_ops.scale(split_5, full_2, float("20"), True) + # pd_op.swish: (-1x288x-1x-1xf32) <- (-1x288x-1x-1xf32) + swish_81 = paddle._C_ops.swish(add_37) + del add_37 - # pd_op.scale: (-1x-1xf32) <- (-1x-1xf32, 1xf32) - scale_23 = paddle._C_ops.scale(split_4, full_2, float("20"), True) - del full_2 + # builtin.combine: ([-1x288x-1x-1xf32, -1x288x-1x-1xf32]) <- (-1x288x-1x-1xf32, -1x288x-1x-1xf32) + combine_13 = [swish_76, swish_81] + del swish_76, swish_81 - # builtin.combine: ([-1x-1xf32, -1x-1xf32, -1x-1xf32, -1x-1xf32]) <- (-1x-1xf32, -1x-1xf32, -1x-1xf32, -1x-1xf32) - combine_9 = [scale_20, scale_21, scale_22, scale_23] - del scale_20, scale_21, scale_22, scale_23 + # pd_op.concat: (-1x576x-1x-1xf32) <- ([-1x288x-1x-1xf32, -1x288x-1x-1xf32], 1xi32) + concat_15 = paddle._C_ops.concat(combine_13, full_0) + del combine_13 - # pd_op.stack: (-1x-1x4xf32) <- ([-1x-1xf32, -1x-1xf32, -1x-1xf32, -1x-1xf32]) - stack_6 = paddle._C_ops.stack(combine_9, -1) - del combine_9 + # pd_op.conv2d: (-1x576x-1x-1xf32) <- (-1x576x-1x-1xf32, 576x576x1x1xf32) + conv2d_108 = paddle._C_ops.conv2d( + concat_15, parameter_59, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del concat_15, parameter_59 - # builtin.combine: ([-1x-1xf32, -1x-1xf32]) <- (-1x-1xf32, -1x-1xf32) - combine_10 = [split_5, split_4] - del split_4, split_5 + # pd_op.batch_norm_: (-1x576x-1x-1xf32, 576xf32, 576xf32, 576xf32, 576xf32, -1xui8) <- (-1x576x-1x-1xf32, 576xf32, 576xf32, 576xf32, 576xf32) + ( + batch_norm__624, + batch_norm__625, + batch_norm__626, + batch_norm__627, + batch_norm__628, + batch_norm__629, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_108, + parameter_58, + parameter_57, + parameter_56, + parameter_55, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_108, parameter_55, parameter_56, parameter_57, parameter_58 - # pd_op.stack: (-1x-1x2xf32) <- ([-1x-1xf32, -1x-1xf32]) - stack_7 = paddle._C_ops.stack(combine_10, -1) - del combine_10 + # pd_op.swish: (-1x576x-1x-1xf32) <- (-1x576x-1x-1xf32) + swish_82 = paddle._C_ops.swish(batch_norm__624) + del batch_norm__624 - # pd_op.reshape: (-1x4xf32) <- (-1x-1x4xf32, 2xi64) - reshape_4 = paddle._C_ops.reshape(stack_6, full_int_array_0) - del full_int_array_0, stack_6 + # pd_op.shape64: (4xi64) <- (-1x576x-1x-1xf32) + shape64_0 = paddle._C_ops.shape64(swish_82) - # pd_op.reshape: (-1x2xf32) <- (-1x-1x2xf32, 2xi64) - reshape_5 = paddle._C_ops.reshape(stack_7, full_int_array_1) - del full_int_array_1, stack_7 + # pd_op.full_int_array: (1xi64) <- () + full_int_array_5 = [0] - # pd_op.shape64: (2xi64) <- (-1x4xf32) - shape64_2 = paddle._C_ops.shape64(reshape_4) + # pd_op.full_int_array: (1xi64) <- () + full_int_array_6 = [1] - # pd_op.slice: (xi64) <- (2xi64, 1xi64, 1xi64) - slice_2 = paddle._C_ops.slice( - shape64_2, [0], full_int_array_2, full_int_array_3, [1], [0] + # pd_op.slice: (xi64) <- (4xi64, 1xi64, 1xi64) + slice_0 = paddle._C_ops.slice( + shape64_0, [0], full_int_array_5, full_int_array_6, [1], [0] ) - del full_int_array_2, full_int_array_3, shape64_2 + del shape64_0 - # builtin.combine: ([xi64, xi64]) <- (xi64, xi64) - combine_11 = [slice_2, full_4] - del full_4 + # pd_op.shape64: (4xi64) <- (-1x576x-1x-1xf32) + shape64_1 = paddle._C_ops.shape64(swish_82) - # pd_op.stack: (2xi64) <- ([xi64, xi64]) - stack_8 = paddle._C_ops.stack(combine_11, 0) - del combine_11 + # pd_op.full_int_array: (1xi64) <- () + full_int_array_7 = [2] - # pd_op.full_with_tensor: (-1x1xf32) <- (1xf32, 2xi64) - full_with_tensor_2 = paddle._C_ops.full_with_tensor( - full_6, stack_8, paddle.float32 - ) - del full_6, stack_8 + # pd_op.full_int_array: (1xi64) <- () + full_int_array_8 = [3] - # pd_op.full: (1xi32) <- () - full_7 = paddle._C_ops.full( - [1], float("0"), paddle.int32, paddle.core.CPUPlace() + # pd_op.slice: (xi64) <- (4xi64, 1xi64, 1xi64) + slice_1 = paddle._C_ops.slice( + shape64_1, [0], full_int_array_7, full_int_array_8, [1], [0] ) + del shape64_1 - # builtin.combine: ([-1x4xf32, -1x4xf32, -1x4xf32]) <- (-1x4xf32, -1x4xf32, -1x4xf32) - combine_12 = [reshape_0, reshape_2, reshape_4] - del reshape_0, reshape_2, reshape_4 - - # pd_op.concat: (-1x4xf32) <- ([-1x4xf32, -1x4xf32, -1x4xf32], 1xi32) - concat_2 = paddle._C_ops.concat(combine_12, full_7) - del combine_12 - - # builtin.combine: ([-1x2xf32, -1x2xf32, -1x2xf32]) <- (-1x2xf32, -1x2xf32, -1x2xf32) - combine_13 = [reshape_1, reshape_3, reshape_5] - del reshape_1, reshape_3, reshape_5 + # pd_op.shape64: (4xi64) <- (-1x576x-1x-1xf32) + shape64_2 = paddle._C_ops.shape64(swish_82) - # pd_op.concat: (-1x2xf32) <- ([-1x2xf32, -1x2xf32, -1x2xf32], 1xi32) - concat_3 = paddle._C_ops.concat(combine_13, full_7) - del combine_13 + # pd_op.full_int_array: (1xi64) <- () + full_int_array_9 = [4] - # builtin.combine: ([-1x1xf32, -1x1xf32, -1x1xf32]) <- (-1x1xf32, -1x1xf32, -1x1xf32) - combine_14 = [full_with_tensor_0, full_with_tensor_1, full_with_tensor_2] - del full_with_tensor_0, full_with_tensor_1, full_with_tensor_2 + # pd_op.slice: (xi64) <- (4xi64, 1xi64, 1xi64) + slice_2 = paddle._C_ops.slice( + shape64_2, [0], full_int_array_8, full_int_array_9, [1], [0] + ) + del shape64_2 - # pd_op.concat: (-1x1xf32) <- ([-1x1xf32, -1x1xf32, -1x1xf32], 1xi32) - concat_4 = paddle._C_ops.concat(combine_14, full_7) - del combine_14, full_7 + # pd_op.multiply: (xi64) <- (xi64, xi64) + multiply_16 = paddle._C_ops.multiply(slice_1, slice_2) + del slice_1, slice_2 # pd_op.full_int_array: (2xi64) <- () - full_int_array_4 = [1, 1] + full_int_array_10 = [1, 1] - # pd_op.assign: (2xi64) <- (2xi64) - assign_0 = full_int_array_4 - - # pd_op.assign: (2xi64) <- (2xi64) - assign_1 = full_int_array_4 - - # pd_op.pool2d: (8x576x1x1xf32) <- (8x576x-1x-1xf32, 2xi64) - pool2d_0 = paddle._C_ops.pool2d( - data_6, - full_int_array_4, + # pd_op.pool2d: (-1x576x1x1xf32) <- (-1x576x-1x-1xf32, 2xi64) + pool2d_3 = paddle._C_ops.pool2d( + swish_82, + full_int_array_10, [1, 1], [0, 0], False, @@ -488,175 +5082,253 @@ def forward( "EXPLICIT", ) - # pd_op.conv2d: (8x576x1x1xf32) <- (8x576x1x1xf32, 576x576x1x1xf32) - conv2d_0 = paddle._C_ops.conv2d( - pool2d_0, parameter_53, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + # pd_op.conv2d: (-1x576x1x1xf32) <- (-1x576x1x1xf32, 576x576x1x1xf32) + conv2d_109 = paddle._C_ops.conv2d( + pool2d_3, parameter_54, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" ) - del parameter_53 - - # pd_op.full_int_array: (4xi64) <- () - full_int_array_5 = [1, -1, 1, 1] + del parameter_54 # pd_op.reshape: (1x576x1x1xf32) <- (576xf32, 4xi64) - reshape_6 = paddle._C_ops.reshape(parameter_52, full_int_array_5) - del parameter_52 + reshape_4 = paddle._C_ops.reshape(parameter_53, full_int_array_1) + del parameter_53 - # pd_op.add: (8x576x1x1xf32) <- (8x576x1x1xf32, 1x576x1x1xf32) - add_0 = paddle._C_ops.add(conv2d_0, reshape_6) + # pd_op.add: (-1x576x1x1xf32) <- (-1x576x1x1xf32, 1x576x1x1xf32) + add_38 = paddle._C_ops.add(conv2d_109, reshape_4) + del conv2d_109, reshape_4 - # pd_op.sigmoid: (8x576x1x1xf32) <- (8x576x1x1xf32) - sigmoid_0 = paddle._C_ops.sigmoid(add_0) - del add_0 + # pd_op.sigmoid: (-1x576x1x1xf32) <- (-1x576x1x1xf32) + sigmoid_0 = paddle._C_ops.sigmoid(add_38) + del add_38 - # pd_op.multiply: (8x576x-1x-1xf32) <- (8x576x-1x-1xf32, 8x576x1x1xf32) - multiply_0 = paddle._C_ops.multiply(data_6, sigmoid_0) + # pd_op.multiply: (-1x576x-1x-1xf32) <- (-1x576x-1x-1xf32, -1x576x1x1xf32) + multiply_17 = paddle._C_ops.multiply(swish_82, sigmoid_0) + del sigmoid_0 - # pd_op.conv2d: (8x576x-1x-1xf32) <- (8x576x-1x-1xf32, 576x576x1x1xf32) - conv2d_1 = paddle._C_ops.conv2d( - multiply_0, parameter_51, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + # pd_op.conv2d: (-1x576x-1x-1xf32) <- (-1x576x-1x-1xf32, 576x576x1x1xf32) + conv2d_110 = paddle._C_ops.conv2d( + multiply_17, parameter_52, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" ) - del parameter_51 + del multiply_17, parameter_52 - # pd_op.batch_norm_: (8x576x-1x-1xf32, 576xf32, 576xf32, 576xf32, 576xf32, -1xui8) <- (8x576x-1x-1xf32, 576xf32, 576xf32, 576xf32, 576xf32) + # pd_op.batch_norm_: (-1x576x-1x-1xf32, 576xf32, 576xf32, 576xf32, 576xf32, -1xui8) <- (-1x576x-1x-1xf32, 576xf32, 576xf32, 576xf32, 576xf32) ( - batch_norm__0, - batch_norm__1, - batch_norm__2, - batch_norm__3, - batch_norm__4, - batch_norm__5, + batch_norm__630, + batch_norm__631, + batch_norm__632, + batch_norm__633, + batch_norm__634, + batch_norm__635, ) = (lambda x, f: f(x))( paddle._C_ops.batch_norm( - conv2d_1, + conv2d_110, + parameter_51, parameter_50, parameter_49, parameter_48, - parameter_47, - False, + True, float("0.9"), float("1e-05"), "NCHW", - False, + True, False, ), lambda out: out if isinstance(out, (list, tuple)) else (out, None, None, None, None, None), ) - del parameter_47, parameter_48, parameter_49, parameter_50 + del conv2d_110, parameter_48, parameter_49, parameter_50, parameter_51 - # pd_op.swish: (8x576x-1x-1xf32) <- (8x576x-1x-1xf32) - swish_0 = paddle._C_ops.swish(batch_norm__0) + # pd_op.swish: (-1x576x-1x-1xf32) <- (-1x576x-1x-1xf32) + swish_83 = paddle._C_ops.swish(batch_norm__630) + del batch_norm__630 - # pd_op.add: (8x576x-1x-1xf32) <- (8x576x-1x-1xf32, 8x576x-1x-1xf32) - add_1 = paddle._C_ops.add(swish_0, data_6) + # pd_op.add: (-1x576x-1x-1xf32) <- (-1x576x-1x-1xf32, -1x576x-1x-1xf32) + add_39 = paddle._C_ops.add(swish_83, swish_82) + del swish_83 - # pd_op.conv2d: (8x4x-1x-1xf32) <- (8x576x-1x-1xf32, 4x576x3x3xf32) - conv2d_2 = paddle._C_ops.conv2d( - add_1, parameter_46, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + # pd_op.conv2d: (-1x4x-1x-1xf32) <- (-1x576x-1x-1xf32, 4x576x3x3xf32) + conv2d_111 = paddle._C_ops.conv2d( + add_39, parameter_47, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" ) - del parameter_46 + del add_39, parameter_47 # pd_op.reshape: (1x4x1x1xf32) <- (4xf32, 4xi64) - reshape_7 = paddle._C_ops.reshape(parameter_45, full_int_array_5) - del parameter_45 + reshape_5 = paddle._C_ops.reshape(parameter_46, full_int_array_1) + del parameter_46 - # pd_op.add: (8x4x-1x-1xf32) <- (8x4x-1x-1xf32, 1x4x1x1xf32) - add_2 = paddle._C_ops.add(conv2d_2, reshape_7) + # pd_op.add: (-1x4x-1x-1xf32) <- (-1x4x-1x-1xf32, 1x4x1x1xf32) + add_40 = paddle._C_ops.add(conv2d_111, reshape_5) + del conv2d_111, reshape_5 - # pd_op.conv2d: (8x576x1x1xf32) <- (8x576x1x1xf32, 576x576x1x1xf32) - conv2d_3 = paddle._C_ops.conv2d( - pool2d_0, parameter_44, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + # pd_op.conv2d: (-1x576x1x1xf32) <- (-1x576x1x1xf32, 576x576x1x1xf32) + conv2d_112 = paddle._C_ops.conv2d( + pool2d_3, parameter_45, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" ) - del parameter_44 + del parameter_45, pool2d_3 # pd_op.reshape: (1x576x1x1xf32) <- (576xf32, 4xi64) - reshape_8 = paddle._C_ops.reshape(parameter_43, full_int_array_5) - del parameter_43 + reshape_6 = paddle._C_ops.reshape(parameter_44, full_int_array_1) + del parameter_44 - # pd_op.add: (8x576x1x1xf32) <- (8x576x1x1xf32, 1x576x1x1xf32) - add_3 = paddle._C_ops.add(conv2d_3, reshape_8) + # pd_op.add: (-1x576x1x1xf32) <- (-1x576x1x1xf32, 1x576x1x1xf32) + add_41 = paddle._C_ops.add(conv2d_112, reshape_6) + del conv2d_112, reshape_6 - # pd_op.sigmoid: (8x576x1x1xf32) <- (8x576x1x1xf32) - sigmoid_1 = paddle._C_ops.sigmoid(add_3) - del add_3 + # pd_op.sigmoid: (-1x576x1x1xf32) <- (-1x576x1x1xf32) + sigmoid_1 = paddle._C_ops.sigmoid(add_41) + del add_41 - # pd_op.multiply: (8x576x-1x-1xf32) <- (8x576x-1x-1xf32, 8x576x1x1xf32) - multiply_1 = paddle._C_ops.multiply(data_6, sigmoid_1) - del data_6 + # pd_op.multiply: (-1x576x-1x-1xf32) <- (-1x576x-1x-1xf32, -1x576x1x1xf32) + multiply_18 = paddle._C_ops.multiply(swish_82, sigmoid_1) + del sigmoid_1, swish_82 - # pd_op.conv2d: (8x576x-1x-1xf32) <- (8x576x-1x-1xf32, 576x576x1x1xf32) - conv2d_4 = paddle._C_ops.conv2d( - multiply_1, parameter_42, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + # pd_op.conv2d: (-1x576x-1x-1xf32) <- (-1x576x-1x-1xf32, 576x576x1x1xf32) + conv2d_113 = paddle._C_ops.conv2d( + multiply_18, parameter_43, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" ) - del parameter_42 + del multiply_18, parameter_43 - # pd_op.batch_norm_: (8x576x-1x-1xf32, 576xf32, 576xf32, 576xf32, 576xf32, -1xui8) <- (8x576x-1x-1xf32, 576xf32, 576xf32, 576xf32, 576xf32) + # pd_op.batch_norm_: (-1x576x-1x-1xf32, 576xf32, 576xf32, 576xf32, 576xf32, -1xui8) <- (-1x576x-1x-1xf32, 576xf32, 576xf32, 576xf32, 576xf32) ( - batch_norm__6, - batch_norm__7, - batch_norm__8, - batch_norm__9, - batch_norm__10, - batch_norm__11, + batch_norm__636, + batch_norm__637, + batch_norm__638, + batch_norm__639, + batch_norm__640, + batch_norm__641, ) = (lambda x, f: f(x))( paddle._C_ops.batch_norm( - conv2d_4, + conv2d_113, + parameter_42, parameter_41, parameter_40, parameter_39, - parameter_38, - False, + True, float("0.9"), float("1e-05"), "NCHW", - False, + True, False, ), lambda out: out if isinstance(out, (list, tuple)) else (out, None, None, None, None, None), ) - del parameter_38, parameter_39, parameter_40, parameter_41 + del conv2d_113, parameter_39, parameter_40, parameter_41, parameter_42 - # pd_op.swish: (8x576x-1x-1xf32) <- (8x576x-1x-1xf32) - swish_1 = paddle._C_ops.swish(batch_norm__6) + # pd_op.swish: (-1x576x-1x-1xf32) <- (-1x576x-1x-1xf32) + swish_84 = paddle._C_ops.swish(batch_norm__636) + del batch_norm__636 - # pd_op.conv2d: (8x68x-1x-1xf32) <- (8x576x-1x-1xf32, 68x576x3x3xf32) - conv2d_5 = paddle._C_ops.conv2d( - swish_1, parameter_37, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + # pd_op.conv2d: (-1x68x-1x-1xf32) <- (-1x576x-1x-1xf32, 68x576x3x3xf32) + conv2d_114 = paddle._C_ops.conv2d( + swish_84, parameter_38, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" ) - del parameter_37 + del parameter_38, swish_84 # pd_op.reshape: (1x68x1x1xf32) <- (68xf32, 4xi64) - reshape_9 = paddle._C_ops.reshape(parameter_36, full_int_array_5) - del parameter_36 + reshape_7 = paddle._C_ops.reshape(parameter_37, full_int_array_1) + del parameter_37 - # pd_op.add: (8x68x-1x-1xf32) <- (8x68x-1x-1xf32, 1x68x1x1xf32) - add_4 = paddle._C_ops.add(conv2d_5, reshape_9) + # pd_op.add: (-1x68x-1x-1xf32) <- (-1x68x-1x-1xf32, 1x68x1x1xf32) + add_42 = paddle._C_ops.add(conv2d_114, reshape_7) + del conv2d_114, reshape_7 - # pd_op.sigmoid: (8x4x-1x-1xf32) <- (8x4x-1x-1xf32) - sigmoid_2 = paddle._C_ops.sigmoid(add_2) - del add_2 + # pd_op.full: (xi64) <- () + full_1 = paddle._C_ops.full( + [], float("-1"), paddle.int64, paddle.core.CPUPlace() + ) - # pd_op.flatten: (8x4x-1xf32) <- (8x4x-1x-1xf32) - flatten_0 = paddle._C_ops.flatten(sigmoid_2, 2, 3) + # pd_op.full: (xi64) <- () + full_2 = paddle._C_ops.full( + [], float("4"), paddle.int64, paddle.core.CPUPlace() + ) - # pd_op.transpose: (8x-1x4xf32) <- (8x4x-1xf32) - transpose_0 = paddle._C_ops.transpose(flatten_0, [0, 2, 1]) - del flatten_0 + # pd_op.full: (xi64) <- () + full_3 = paddle._C_ops.full( + [], float("17"), paddle.int64, paddle.core.CPUPlace() + ) - # pd_op.flatten: (8x68x-1xf32) <- (8x68x-1x-1xf32) - flatten_1 = paddle._C_ops.flatten(add_4, 2, 3) + # builtin.combine: ([xi64, xi64, xi64, xi64]) <- (xi64, xi64, xi64, xi64) + combine_14 = [full_1, full_2, full_3, multiply_16] - # pd_op.transpose: (8x-1x68xf32) <- (8x68x-1xf32) - transpose_1 = paddle._C_ops.transpose(flatten_1, [0, 2, 1]) - del flatten_1 + # pd_op.stack: (4xi64) <- ([xi64, xi64, xi64, xi64]) + stack_0 = paddle._C_ops.stack(combine_14, 0) + del combine_14 - # pd_op.pool2d: (8x288x1x1xf32) <- (8x288x-1x-1xf32, 2xi64) - pool2d_1 = paddle._C_ops.pool2d( - data_7, - full_int_array_4, + # pd_op.reshape: (-1x4x17x-1xf32) <- (-1x68x-1x-1xf32, 4xi64) + reshape_8 = paddle._C_ops.reshape(add_42, stack_0) + del add_42, stack_0 + + # pd_op.transpose: (-1x17x-1x4xf32) <- (-1x4x17x-1xf32) + transpose_0 = paddle._C_ops.transpose(reshape_8, [0, 2, 3, 1]) + del reshape_8 + + # pd_op.softmax: (-1x17x-1x4xf32) <- (-1x17x-1x4xf32) + softmax_0 = paddle._C_ops.softmax(transpose_0, 1) + del transpose_0 + + # pd_op.conv2d: (-1x1x-1x4xf32) <- (-1x17x-1x4xf32, 1x17x1x1xf32) + conv2d_115 = paddle._C_ops.conv2d( + softmax_0, parameter_36, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del softmax_0 + + # pd_op.squeeze: (-1x-1x4xf32) <- (-1x1x-1x4xf32, 1xi64) + squeeze_0 = paddle._C_ops.squeeze(conv2d_115, full_int_array_6) + del conv2d_115 + + # pd_op.sigmoid: (-1x4x-1x-1xf32) <- (-1x4x-1x-1xf32) + sigmoid_2 = paddle._C_ops.sigmoid(add_40) + del add_40 + + # builtin.combine: ([xi64, xi64, xi64]) <- (xi64, xi64, xi64) + combine_15 = [full_1, full_2, multiply_16] + del multiply_16 + + # pd_op.stack: (3xi64) <- ([xi64, xi64, xi64]) + stack_1 = paddle._C_ops.stack(combine_15, 0) + del combine_15 + + # pd_op.reshape: (-1x4x-1xf32) <- (-1x4x-1x-1xf32, 3xi64) + reshape_9 = paddle._C_ops.reshape(sigmoid_2, stack_1) + del sigmoid_2, stack_1 + + # pd_op.shape64: (4xi64) <- (-1x288x-1x-1xf32) + shape64_3 = paddle._C_ops.shape64(swish_74) + + # pd_op.slice: (xi64) <- (4xi64, 1xi64, 1xi64) + slice_3 = paddle._C_ops.slice( + shape64_3, [0], full_int_array_5, full_int_array_6, [1], [0] + ) + del shape64_3 + + # pd_op.shape64: (4xi64) <- (-1x288x-1x-1xf32) + shape64_4 = paddle._C_ops.shape64(swish_74) + + # pd_op.slice: (xi64) <- (4xi64, 1xi64, 1xi64) + slice_4 = paddle._C_ops.slice( + shape64_4, [0], full_int_array_7, full_int_array_8, [1], [0] + ) + del shape64_4 + + # pd_op.shape64: (4xi64) <- (-1x288x-1x-1xf32) + shape64_5 = paddle._C_ops.shape64(swish_74) + + # pd_op.slice: (xi64) <- (4xi64, 1xi64, 1xi64) + slice_5 = paddle._C_ops.slice( + shape64_5, [0], full_int_array_8, full_int_array_9, [1], [0] + ) + del shape64_5 + + # pd_op.multiply: (xi64) <- (xi64, xi64) + multiply_19 = paddle._C_ops.multiply(slice_4, slice_5) + del slice_4, slice_5 + + # pd_op.pool2d: (-1x288x1x1xf32) <- (-1x288x-1x-1xf32, 2xi64) + pool2d_4 = paddle._C_ops.pool2d( + swish_74, + full_int_array_10, [1, 1], [0, 0], False, @@ -668,172 +5340,238 @@ def forward( "EXPLICIT", ) - # pd_op.conv2d: (8x288x1x1xf32) <- (8x288x1x1xf32, 288x288x1x1xf32) - conv2d_6 = paddle._C_ops.conv2d( - pool2d_1, parameter_35, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + # pd_op.conv2d: (-1x288x1x1xf32) <- (-1x288x1x1xf32, 288x288x1x1xf32) + conv2d_116 = paddle._C_ops.conv2d( + pool2d_4, parameter_35, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" ) del parameter_35 # pd_op.reshape: (1x288x1x1xf32) <- (288xf32, 4xi64) - reshape_10 = paddle._C_ops.reshape(parameter_34, full_int_array_5) + reshape_10 = paddle._C_ops.reshape(parameter_34, full_int_array_1) del parameter_34 - # pd_op.add: (8x288x1x1xf32) <- (8x288x1x1xf32, 1x288x1x1xf32) - add_5 = paddle._C_ops.add(conv2d_6, reshape_10) + # pd_op.add: (-1x288x1x1xf32) <- (-1x288x1x1xf32, 1x288x1x1xf32) + add_43 = paddle._C_ops.add(conv2d_116, reshape_10) + del conv2d_116, reshape_10 - # pd_op.sigmoid: (8x288x1x1xf32) <- (8x288x1x1xf32) - sigmoid_3 = paddle._C_ops.sigmoid(add_5) - del add_5 + # pd_op.sigmoid: (-1x288x1x1xf32) <- (-1x288x1x1xf32) + sigmoid_3 = paddle._C_ops.sigmoid(add_43) + del add_43 - # pd_op.multiply: (8x288x-1x-1xf32) <- (8x288x-1x-1xf32, 8x288x1x1xf32) - multiply_2 = paddle._C_ops.multiply(data_7, sigmoid_3) + # pd_op.multiply: (-1x288x-1x-1xf32) <- (-1x288x-1x-1xf32, -1x288x1x1xf32) + multiply_20 = paddle._C_ops.multiply(swish_74, sigmoid_3) + del sigmoid_3 - # pd_op.conv2d: (8x288x-1x-1xf32) <- (8x288x-1x-1xf32, 288x288x1x1xf32) - conv2d_7 = paddle._C_ops.conv2d( - multiply_2, parameter_33, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + # pd_op.conv2d: (-1x288x-1x-1xf32) <- (-1x288x-1x-1xf32, 288x288x1x1xf32) + conv2d_117 = paddle._C_ops.conv2d( + multiply_20, parameter_33, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" ) - del parameter_33 + del multiply_20, parameter_33 - # pd_op.batch_norm_: (8x288x-1x-1xf32, 288xf32, 288xf32, 288xf32, 288xf32, -1xui8) <- (8x288x-1x-1xf32, 288xf32, 288xf32, 288xf32, 288xf32) + # pd_op.batch_norm_: (-1x288x-1x-1xf32, 288xf32, 288xf32, 288xf32, 288xf32, -1xui8) <- (-1x288x-1x-1xf32, 288xf32, 288xf32, 288xf32, 288xf32) ( - batch_norm__12, - batch_norm__13, - batch_norm__14, - batch_norm__15, - batch_norm__16, - batch_norm__17, + batch_norm__642, + batch_norm__643, + batch_norm__644, + batch_norm__645, + batch_norm__646, + batch_norm__647, ) = (lambda x, f: f(x))( paddle._C_ops.batch_norm( - conv2d_7, + conv2d_117, parameter_32, parameter_31, parameter_30, parameter_29, - False, + True, float("0.9"), float("1e-05"), "NCHW", - False, + True, False, ), lambda out: out if isinstance(out, (list, tuple)) else (out, None, None, None, None, None), ) - del parameter_29, parameter_30, parameter_31, parameter_32 + del conv2d_117, parameter_29, parameter_30, parameter_31, parameter_32 - # pd_op.swish: (8x288x-1x-1xf32) <- (8x288x-1x-1xf32) - swish_2 = paddle._C_ops.swish(batch_norm__12) + # pd_op.swish: (-1x288x-1x-1xf32) <- (-1x288x-1x-1xf32) + swish_85 = paddle._C_ops.swish(batch_norm__642) + del batch_norm__642 - # pd_op.add: (8x288x-1x-1xf32) <- (8x288x-1x-1xf32, 8x288x-1x-1xf32) - add_6 = paddle._C_ops.add(swish_2, data_7) + # pd_op.add: (-1x288x-1x-1xf32) <- (-1x288x-1x-1xf32, -1x288x-1x-1xf32) + add_44 = paddle._C_ops.add(swish_85, swish_74) + del swish_85 - # pd_op.conv2d: (8x4x-1x-1xf32) <- (8x288x-1x-1xf32, 4x288x3x3xf32) - conv2d_8 = paddle._C_ops.conv2d( - add_6, parameter_28, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + # pd_op.conv2d: (-1x4x-1x-1xf32) <- (-1x288x-1x-1xf32, 4x288x3x3xf32) + conv2d_118 = paddle._C_ops.conv2d( + add_44, parameter_28, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" ) - del parameter_28 + del add_44, parameter_28 # pd_op.reshape: (1x4x1x1xf32) <- (4xf32, 4xi64) - reshape_11 = paddle._C_ops.reshape(parameter_27, full_int_array_5) + reshape_11 = paddle._C_ops.reshape(parameter_27, full_int_array_1) del parameter_27 - # pd_op.add: (8x4x-1x-1xf32) <- (8x4x-1x-1xf32, 1x4x1x1xf32) - add_7 = paddle._C_ops.add(conv2d_8, reshape_11) + # pd_op.add: (-1x4x-1x-1xf32) <- (-1x4x-1x-1xf32, 1x4x1x1xf32) + add_45 = paddle._C_ops.add(conv2d_118, reshape_11) + del conv2d_118, reshape_11 - # pd_op.conv2d: (8x288x1x1xf32) <- (8x288x1x1xf32, 288x288x1x1xf32) - conv2d_9 = paddle._C_ops.conv2d( - pool2d_1, parameter_26, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + # pd_op.conv2d: (-1x288x1x1xf32) <- (-1x288x1x1xf32, 288x288x1x1xf32) + conv2d_119 = paddle._C_ops.conv2d( + pool2d_4, parameter_26, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" ) - del parameter_26 + del parameter_26, pool2d_4 # pd_op.reshape: (1x288x1x1xf32) <- (288xf32, 4xi64) - reshape_12 = paddle._C_ops.reshape(parameter_25, full_int_array_5) + reshape_12 = paddle._C_ops.reshape(parameter_25, full_int_array_1) del parameter_25 - # pd_op.add: (8x288x1x1xf32) <- (8x288x1x1xf32, 1x288x1x1xf32) - add_8 = paddle._C_ops.add(conv2d_9, reshape_12) + # pd_op.add: (-1x288x1x1xf32) <- (-1x288x1x1xf32, 1x288x1x1xf32) + add_46 = paddle._C_ops.add(conv2d_119, reshape_12) + del conv2d_119, reshape_12 - # pd_op.sigmoid: (8x288x1x1xf32) <- (8x288x1x1xf32) - sigmoid_4 = paddle._C_ops.sigmoid(add_8) - del add_8 + # pd_op.sigmoid: (-1x288x1x1xf32) <- (-1x288x1x1xf32) + sigmoid_4 = paddle._C_ops.sigmoid(add_46) + del add_46 - # pd_op.multiply: (8x288x-1x-1xf32) <- (8x288x-1x-1xf32, 8x288x1x1xf32) - multiply_3 = paddle._C_ops.multiply(data_7, sigmoid_4) - del data_7 + # pd_op.multiply: (-1x288x-1x-1xf32) <- (-1x288x-1x-1xf32, -1x288x1x1xf32) + multiply_21 = paddle._C_ops.multiply(swish_74, sigmoid_4) + del sigmoid_4, swish_74 - # pd_op.conv2d: (8x288x-1x-1xf32) <- (8x288x-1x-1xf32, 288x288x1x1xf32) - conv2d_10 = paddle._C_ops.conv2d( - multiply_3, parameter_24, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + # pd_op.conv2d: (-1x288x-1x-1xf32) <- (-1x288x-1x-1xf32, 288x288x1x1xf32) + conv2d_120 = paddle._C_ops.conv2d( + multiply_21, parameter_24, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" ) - del parameter_24 + del multiply_21, parameter_24 - # pd_op.batch_norm_: (8x288x-1x-1xf32, 288xf32, 288xf32, 288xf32, 288xf32, -1xui8) <- (8x288x-1x-1xf32, 288xf32, 288xf32, 288xf32, 288xf32) + # pd_op.batch_norm_: (-1x288x-1x-1xf32, 288xf32, 288xf32, 288xf32, 288xf32, -1xui8) <- (-1x288x-1x-1xf32, 288xf32, 288xf32, 288xf32, 288xf32) ( - batch_norm__18, - batch_norm__19, - batch_norm__20, - batch_norm__21, - batch_norm__22, - batch_norm__23, + batch_norm__648, + batch_norm__649, + batch_norm__650, + batch_norm__651, + batch_norm__652, + batch_norm__653, ) = (lambda x, f: f(x))( paddle._C_ops.batch_norm( - conv2d_10, + conv2d_120, parameter_23, parameter_22, parameter_21, parameter_20, - False, + True, float("0.9"), float("1e-05"), "NCHW", - False, + True, False, ), lambda out: out if isinstance(out, (list, tuple)) else (out, None, None, None, None, None), ) - del parameter_20, parameter_21, parameter_22, parameter_23 + del conv2d_120, parameter_20, parameter_21, parameter_22, parameter_23 - # pd_op.swish: (8x288x-1x-1xf32) <- (8x288x-1x-1xf32) - swish_3 = paddle._C_ops.swish(batch_norm__18) + # pd_op.swish: (-1x288x-1x-1xf32) <- (-1x288x-1x-1xf32) + swish_86 = paddle._C_ops.swish(batch_norm__648) + del batch_norm__648 - # pd_op.conv2d: (8x68x-1x-1xf32) <- (8x288x-1x-1xf32, 68x288x3x3xf32) - conv2d_11 = paddle._C_ops.conv2d( - swish_3, parameter_19, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + # pd_op.conv2d: (-1x68x-1x-1xf32) <- (-1x288x-1x-1xf32, 68x288x3x3xf32) + conv2d_121 = paddle._C_ops.conv2d( + swish_86, parameter_19, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" ) - del parameter_19 + del parameter_19, swish_86 # pd_op.reshape: (1x68x1x1xf32) <- (68xf32, 4xi64) - reshape_13 = paddle._C_ops.reshape(parameter_18, full_int_array_5) + reshape_13 = paddle._C_ops.reshape(parameter_18, full_int_array_1) del parameter_18 - # pd_op.add: (8x68x-1x-1xf32) <- (8x68x-1x-1xf32, 1x68x1x1xf32) - add_9 = paddle._C_ops.add(conv2d_11, reshape_13) + # pd_op.add: (-1x68x-1x-1xf32) <- (-1x68x-1x-1xf32, 1x68x1x1xf32) + add_47 = paddle._C_ops.add(conv2d_121, reshape_13) + del conv2d_121, reshape_13 - # pd_op.sigmoid: (8x4x-1x-1xf32) <- (8x4x-1x-1xf32) - sigmoid_5 = paddle._C_ops.sigmoid(add_7) - del add_7 + # builtin.combine: ([xi64, xi64, xi64, xi64]) <- (xi64, xi64, xi64, xi64) + combine_16 = [full_1, full_2, full_3, multiply_19] - # pd_op.flatten: (8x4x-1xf32) <- (8x4x-1x-1xf32) - flatten_2 = paddle._C_ops.flatten(sigmoid_5, 2, 3) + # pd_op.stack: (4xi64) <- ([xi64, xi64, xi64, xi64]) + stack_2 = paddle._C_ops.stack(combine_16, 0) + del combine_16 - # pd_op.transpose: (8x-1x4xf32) <- (8x4x-1xf32) - transpose_2 = paddle._C_ops.transpose(flatten_2, [0, 2, 1]) - del flatten_2 + # pd_op.reshape: (-1x4x17x-1xf32) <- (-1x68x-1x-1xf32, 4xi64) + reshape_14 = paddle._C_ops.reshape(add_47, stack_2) + del add_47, stack_2 - # pd_op.flatten: (8x68x-1xf32) <- (8x68x-1x-1xf32) - flatten_3 = paddle._C_ops.flatten(add_9, 2, 3) + # pd_op.transpose: (-1x17x-1x4xf32) <- (-1x4x17x-1xf32) + transpose_1 = paddle._C_ops.transpose(reshape_14, [0, 2, 3, 1]) + del reshape_14 - # pd_op.transpose: (8x-1x68xf32) <- (8x68x-1xf32) - transpose_3 = paddle._C_ops.transpose(flatten_3, [0, 2, 1]) - del flatten_3 + # pd_op.softmax: (-1x17x-1x4xf32) <- (-1x17x-1x4xf32) + softmax_1 = paddle._C_ops.softmax(transpose_1, 1) + del transpose_1 - # pd_op.pool2d: (8x144x1x1xf32) <- (8x144x-1x-1xf32, 2xi64) - pool2d_2 = paddle._C_ops.pool2d( - data_8, - full_int_array_4, + # pd_op.conv2d: (-1x1x-1x4xf32) <- (-1x17x-1x4xf32, 1x17x1x1xf32) + conv2d_122 = paddle._C_ops.conv2d( + softmax_1, parameter_36, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del softmax_1 + + # pd_op.squeeze: (-1x-1x4xf32) <- (-1x1x-1x4xf32, 1xi64) + squeeze_1 = paddle._C_ops.squeeze(conv2d_122, full_int_array_6) + del conv2d_122 + + # pd_op.sigmoid: (-1x4x-1x-1xf32) <- (-1x4x-1x-1xf32) + sigmoid_5 = paddle._C_ops.sigmoid(add_45) + del add_45 + + # builtin.combine: ([xi64, xi64, xi64]) <- (xi64, xi64, xi64) + combine_17 = [full_1, full_2, multiply_19] + del multiply_19 + + # pd_op.stack: (3xi64) <- ([xi64, xi64, xi64]) + stack_3 = paddle._C_ops.stack(combine_17, 0) + del combine_17 + + # pd_op.reshape: (-1x4x-1xf32) <- (-1x4x-1x-1xf32, 3xi64) + reshape_15 = paddle._C_ops.reshape(sigmoid_5, stack_3) + del sigmoid_5, stack_3 + + # pd_op.shape64: (4xi64) <- (-1x144x-1x-1xf32) + shape64_6 = paddle._C_ops.shape64(swish_66) + + # pd_op.slice: (xi64) <- (4xi64, 1xi64, 1xi64) + slice_6 = paddle._C_ops.slice( + shape64_6, [0], full_int_array_5, full_int_array_6, [1], [0] + ) + del full_int_array_5, shape64_6 + + # pd_op.shape64: (4xi64) <- (-1x144x-1x-1xf32) + shape64_7 = paddle._C_ops.shape64(swish_66) + + # pd_op.slice: (xi64) <- (4xi64, 1xi64, 1xi64) + slice_7 = paddle._C_ops.slice( + shape64_7, [0], full_int_array_7, full_int_array_8, [1], [0] + ) + del full_int_array_7, shape64_7 + + # pd_op.shape64: (4xi64) <- (-1x144x-1x-1xf32) + shape64_8 = paddle._C_ops.shape64(swish_66) + + # pd_op.slice: (xi64) <- (4xi64, 1xi64, 1xi64) + slice_8 = paddle._C_ops.slice( + shape64_8, [0], full_int_array_8, full_int_array_9, [1], [0] + ) + del full_int_array_8, full_int_array_9, shape64_8 + + # pd_op.multiply: (xi64) <- (xi64, xi64) + multiply_22 = paddle._C_ops.multiply(slice_7, slice_8) + del slice_7, slice_8 + + # pd_op.pool2d: (-1x144x1x1xf32) <- (-1x144x-1x-1xf32, 2xi64) + pool2d_5 = paddle._C_ops.pool2d( + swish_66, + full_int_array_10, [1, 1], [0, 0], False, @@ -844,301 +5582,225 @@ def forward( True, "EXPLICIT", ) + del full_int_array_10 - # pd_op.conv2d: (8x144x1x1xf32) <- (8x144x1x1xf32, 144x144x1x1xf32) - conv2d_12 = paddle._C_ops.conv2d( - pool2d_2, parameter_17, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + # pd_op.conv2d: (-1x144x1x1xf32) <- (-1x144x1x1xf32, 144x144x1x1xf32) + conv2d_123 = paddle._C_ops.conv2d( + pool2d_5, parameter_17, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" ) del parameter_17 # pd_op.reshape: (1x144x1x1xf32) <- (144xf32, 4xi64) - reshape_14 = paddle._C_ops.reshape(parameter_16, full_int_array_5) + reshape_16 = paddle._C_ops.reshape(parameter_16, full_int_array_1) del parameter_16 - # pd_op.add: (8x144x1x1xf32) <- (8x144x1x1xf32, 1x144x1x1xf32) - add_10 = paddle._C_ops.add(conv2d_12, reshape_14) + # pd_op.add: (-1x144x1x1xf32) <- (-1x144x1x1xf32, 1x144x1x1xf32) + add_48 = paddle._C_ops.add(conv2d_123, reshape_16) + del conv2d_123, reshape_16 - # pd_op.sigmoid: (8x144x1x1xf32) <- (8x144x1x1xf32) - sigmoid_6 = paddle._C_ops.sigmoid(add_10) - del add_10 + # pd_op.sigmoid: (-1x144x1x1xf32) <- (-1x144x1x1xf32) + sigmoid_6 = paddle._C_ops.sigmoid(add_48) + del add_48 - # pd_op.multiply: (8x144x-1x-1xf32) <- (8x144x-1x-1xf32, 8x144x1x1xf32) - multiply_4 = paddle._C_ops.multiply(data_8, sigmoid_6) + # pd_op.multiply: (-1x144x-1x-1xf32) <- (-1x144x-1x-1xf32, -1x144x1x1xf32) + multiply_23 = paddle._C_ops.multiply(swish_66, sigmoid_6) + del sigmoid_6 - # pd_op.conv2d: (8x144x-1x-1xf32) <- (8x144x-1x-1xf32, 144x144x1x1xf32) - conv2d_13 = paddle._C_ops.conv2d( - multiply_4, parameter_15, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + # pd_op.conv2d: (-1x144x-1x-1xf32) <- (-1x144x-1x-1xf32, 144x144x1x1xf32) + conv2d_124 = paddle._C_ops.conv2d( + multiply_23, parameter_15, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" ) - del parameter_15 + del multiply_23, parameter_15 - # pd_op.batch_norm_: (8x144x-1x-1xf32, 144xf32, 144xf32, 144xf32, 144xf32, -1xui8) <- (8x144x-1x-1xf32, 144xf32, 144xf32, 144xf32, 144xf32) + # pd_op.batch_norm_: (-1x144x-1x-1xf32, 144xf32, 144xf32, 144xf32, 144xf32, -1xui8) <- (-1x144x-1x-1xf32, 144xf32, 144xf32, 144xf32, 144xf32) ( - batch_norm__24, - batch_norm__25, - batch_norm__26, - batch_norm__27, - batch_norm__28, - batch_norm__29, + batch_norm__654, + batch_norm__655, + batch_norm__656, + batch_norm__657, + batch_norm__658, + batch_norm__659, ) = (lambda x, f: f(x))( paddle._C_ops.batch_norm( - conv2d_13, + conv2d_124, parameter_14, parameter_13, parameter_12, parameter_11, - False, + True, float("0.9"), float("1e-05"), "NCHW", - False, + True, False, ), lambda out: out if isinstance(out, (list, tuple)) else (out, None, None, None, None, None), ) - del parameter_11, parameter_12, parameter_13, parameter_14 + del conv2d_124, parameter_11, parameter_12, parameter_13, parameter_14 - # pd_op.swish: (8x144x-1x-1xf32) <- (8x144x-1x-1xf32) - swish_4 = paddle._C_ops.swish(batch_norm__24) + # pd_op.swish: (-1x144x-1x-1xf32) <- (-1x144x-1x-1xf32) + swish_87 = paddle._C_ops.swish(batch_norm__654) + del batch_norm__654 - # pd_op.add: (8x144x-1x-1xf32) <- (8x144x-1x-1xf32, 8x144x-1x-1xf32) - add_11 = paddle._C_ops.add(swish_4, data_8) + # pd_op.add: (-1x144x-1x-1xf32) <- (-1x144x-1x-1xf32, -1x144x-1x-1xf32) + add_49 = paddle._C_ops.add(swish_87, swish_66) + del swish_87 - # pd_op.conv2d: (8x4x-1x-1xf32) <- (8x144x-1x-1xf32, 4x144x3x3xf32) - conv2d_14 = paddle._C_ops.conv2d( - add_11, parameter_10, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + # pd_op.conv2d: (-1x4x-1x-1xf32) <- (-1x144x-1x-1xf32, 4x144x3x3xf32) + conv2d_125 = paddle._C_ops.conv2d( + add_49, parameter_10, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" ) - del parameter_10 + del add_49, parameter_10 # pd_op.reshape: (1x4x1x1xf32) <- (4xf32, 4xi64) - reshape_15 = paddle._C_ops.reshape(parameter_9, full_int_array_5) + reshape_17 = paddle._C_ops.reshape(parameter_9, full_int_array_1) del parameter_9 - # pd_op.add: (8x4x-1x-1xf32) <- (8x4x-1x-1xf32, 1x4x1x1xf32) - add_12 = paddle._C_ops.add(conv2d_14, reshape_15) + # pd_op.add: (-1x4x-1x-1xf32) <- (-1x4x-1x-1xf32, 1x4x1x1xf32) + add_50 = paddle._C_ops.add(conv2d_125, reshape_17) + del conv2d_125, reshape_17 - # pd_op.conv2d: (8x144x1x1xf32) <- (8x144x1x1xf32, 144x144x1x1xf32) - conv2d_15 = paddle._C_ops.conv2d( - pool2d_2, parameter_8, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + # pd_op.conv2d: (-1x144x1x1xf32) <- (-1x144x1x1xf32, 144x144x1x1xf32) + conv2d_126 = paddle._C_ops.conv2d( + pool2d_5, parameter_8, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" ) - del parameter_8 + del parameter_8, pool2d_5 # pd_op.reshape: (1x144x1x1xf32) <- (144xf32, 4xi64) - reshape_16 = paddle._C_ops.reshape(parameter_7, full_int_array_5) + reshape_18 = paddle._C_ops.reshape(parameter_7, full_int_array_1) del parameter_7 - # pd_op.add: (8x144x1x1xf32) <- (8x144x1x1xf32, 1x144x1x1xf32) - add_13 = paddle._C_ops.add(conv2d_15, reshape_16) + # pd_op.add: (-1x144x1x1xf32) <- (-1x144x1x1xf32, 1x144x1x1xf32) + add_51 = paddle._C_ops.add(conv2d_126, reshape_18) + del conv2d_126, reshape_18 - # pd_op.sigmoid: (8x144x1x1xf32) <- (8x144x1x1xf32) - sigmoid_7 = paddle._C_ops.sigmoid(add_13) - del add_13 + # pd_op.sigmoid: (-1x144x1x1xf32) <- (-1x144x1x1xf32) + sigmoid_7 = paddle._C_ops.sigmoid(add_51) + del add_51 - # pd_op.multiply: (8x144x-1x-1xf32) <- (8x144x-1x-1xf32, 8x144x1x1xf32) - multiply_5 = paddle._C_ops.multiply(data_8, sigmoid_7) - del data_8 + # pd_op.multiply: (-1x144x-1x-1xf32) <- (-1x144x-1x-1xf32, -1x144x1x1xf32) + multiply_24 = paddle._C_ops.multiply(swish_66, sigmoid_7) + del sigmoid_7, swish_66 - # pd_op.conv2d: (8x144x-1x-1xf32) <- (8x144x-1x-1xf32, 144x144x1x1xf32) - conv2d_16 = paddle._C_ops.conv2d( - multiply_5, parameter_6, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + # pd_op.conv2d: (-1x144x-1x-1xf32) <- (-1x144x-1x-1xf32, 144x144x1x1xf32) + conv2d_127 = paddle._C_ops.conv2d( + multiply_24, parameter_6, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" ) - del parameter_6 + del multiply_24, parameter_6 - # pd_op.batch_norm_: (8x144x-1x-1xf32, 144xf32, 144xf32, 144xf32, 144xf32, -1xui8) <- (8x144x-1x-1xf32, 144xf32, 144xf32, 144xf32, 144xf32) + # pd_op.batch_norm_: (-1x144x-1x-1xf32, 144xf32, 144xf32, 144xf32, 144xf32, -1xui8) <- (-1x144x-1x-1xf32, 144xf32, 144xf32, 144xf32, 144xf32) ( - batch_norm__30, - batch_norm__31, - batch_norm__32, - batch_norm__33, - batch_norm__34, - batch_norm__35, + batch_norm__660, + batch_norm__661, + batch_norm__662, + batch_norm__663, + batch_norm__664, + batch_norm__665, ) = (lambda x, f: f(x))( paddle._C_ops.batch_norm( - conv2d_16, + conv2d_127, parameter_5, parameter_4, parameter_3, parameter_2, - False, + True, float("0.9"), float("1e-05"), "NCHW", - False, + True, False, ), lambda out: out if isinstance(out, (list, tuple)) else (out, None, None, None, None, None), ) - del parameter_2, parameter_3, parameter_4, parameter_5 + del conv2d_127, parameter_2, parameter_3, parameter_4, parameter_5 - # pd_op.swish: (8x144x-1x-1xf32) <- (8x144x-1x-1xf32) - swish_5 = paddle._C_ops.swish(batch_norm__30) + # pd_op.swish: (-1x144x-1x-1xf32) <- (-1x144x-1x-1xf32) + swish_88 = paddle._C_ops.swish(batch_norm__660) + del batch_norm__660 - # pd_op.conv2d: (8x68x-1x-1xf32) <- (8x144x-1x-1xf32, 68x144x3x3xf32) - conv2d_17 = paddle._C_ops.conv2d( - swish_5, parameter_1, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + # pd_op.conv2d: (-1x68x-1x-1xf32) <- (-1x144x-1x-1xf32, 68x144x3x3xf32) + conv2d_128 = paddle._C_ops.conv2d( + swish_88, parameter_1, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" ) - del parameter_1 + del parameter_1, swish_88 # pd_op.reshape: (1x68x1x1xf32) <- (68xf32, 4xi64) - reshape_17 = paddle._C_ops.reshape(parameter_0, full_int_array_5) - del full_int_array_5, parameter_0 + reshape_19 = paddle._C_ops.reshape(parameter_0, full_int_array_1) + del full_int_array_1, parameter_0 - # pd_op.add: (8x68x-1x-1xf32) <- (8x68x-1x-1xf32, 1x68x1x1xf32) - add_14 = paddle._C_ops.add(conv2d_17, reshape_17) + # pd_op.add: (-1x68x-1x-1xf32) <- (-1x68x-1x-1xf32, 1x68x1x1xf32) + add_52 = paddle._C_ops.add(conv2d_128, reshape_19) + del conv2d_128, reshape_19 - # pd_op.sigmoid: (8x4x-1x-1xf32) <- (8x4x-1x-1xf32) - sigmoid_8 = paddle._C_ops.sigmoid(add_12) - del add_12 + # builtin.combine: ([xi64, xi64, xi64, xi64]) <- (xi64, xi64, xi64, xi64) + combine_18 = [full_1, full_2, full_3, multiply_22] + del full_3 - # pd_op.flatten: (8x4x-1xf32) <- (8x4x-1x-1xf32) - flatten_4 = paddle._C_ops.flatten(sigmoid_8, 2, 3) + # pd_op.stack: (4xi64) <- ([xi64, xi64, xi64, xi64]) + stack_4 = paddle._C_ops.stack(combine_18, 0) + del combine_18 - # pd_op.transpose: (8x-1x4xf32) <- (8x4x-1xf32) - transpose_4 = paddle._C_ops.transpose(flatten_4, [0, 2, 1]) - del flatten_4 + # pd_op.reshape: (-1x4x17x-1xf32) <- (-1x68x-1x-1xf32, 4xi64) + reshape_20 = paddle._C_ops.reshape(add_52, stack_4) + del add_52, stack_4 - # pd_op.flatten: (8x68x-1xf32) <- (8x68x-1x-1xf32) - flatten_5 = paddle._C_ops.flatten(add_14, 2, 3) + # pd_op.transpose: (-1x17x-1x4xf32) <- (-1x4x17x-1xf32) + transpose_2 = paddle._C_ops.transpose(reshape_20, [0, 2, 3, 1]) + del reshape_20 - # pd_op.transpose: (8x-1x68xf32) <- (8x68x-1xf32) - transpose_5 = paddle._C_ops.transpose(flatten_5, [0, 2, 1]) - del flatten_5 + # pd_op.softmax: (-1x17x-1x4xf32) <- (-1x17x-1x4xf32) + softmax_2 = paddle._C_ops.softmax(transpose_2, 1) + del transpose_2 + + # pd_op.conv2d: (-1x1x-1x4xf32) <- (-1x17x-1x4xf32, 1x17x1x1xf32) + conv2d_129 = paddle._C_ops.conv2d( + softmax_2, parameter_36, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_36, softmax_2 + + # pd_op.squeeze: (-1x-1x4xf32) <- (-1x1x-1x4xf32, 1xi64) + squeeze_2 = paddle._C_ops.squeeze(conv2d_129, full_int_array_6) + del conv2d_129, full_int_array_6 + + # pd_op.sigmoid: (-1x4x-1x-1xf32) <- (-1x4x-1x-1xf32) + sigmoid_8 = paddle._C_ops.sigmoid(add_50) + del add_50 + + # builtin.combine: ([xi64, xi64, xi64]) <- (xi64, xi64, xi64) + combine_19 = [full_1, full_2, multiply_22] + del full_1, full_2, multiply_22 + + # pd_op.stack: (3xi64) <- ([xi64, xi64, xi64]) + stack_5 = paddle._C_ops.stack(combine_19, 0) + del combine_19 + + # pd_op.reshape: (-1x4x-1xf32) <- (-1x4x-1x-1xf32, 3xi64) + reshape_21 = paddle._C_ops.reshape(sigmoid_8, stack_5) + del sigmoid_8, stack_5 # pd_op.full: (1xi32) <- () - full_8 = paddle._C_ops.full( - [1], float("1"), paddle.int32, paddle.core.CPUPlace() + full_4 = paddle._C_ops.full( + [1], float("-1"), paddle.int32, paddle.core.CPUPlace() ) - # pd_op.assign: (1xi32) <- (1xi32) - assign_2 = full_8 + # builtin.combine: ([-1x4x-1xf32, -1x4x-1xf32, -1x4x-1xf32]) <- (-1x4x-1xf32, -1x4x-1xf32, -1x4x-1xf32) + combine_20 = [reshape_9, reshape_15, reshape_21] + del reshape_15, reshape_21, reshape_9 - # builtin.combine: ([8x-1x4xf32, 8x-1x4xf32, 8x-1x4xf32]) <- (8x-1x4xf32, 8x-1x4xf32, 8x-1x4xf32) - combine_15 = [transpose_0, transpose_2, transpose_4] + # pd_op.concat: (-1x4x-1xf32) <- ([-1x4x-1xf32, -1x4x-1xf32, -1x4x-1xf32], 1xi32) + concat_0 = paddle._C_ops.concat(combine_20, full_4) + del combine_20, full_4 - # pd_op.concat: (8x-1x4xf32) <- ([8x-1x4xf32, 8x-1x4xf32, 8x-1x4xf32], 1xi32) - concat_0 = paddle._C_ops.concat(combine_15, full_8) - del combine_15 + # builtin.combine: ([-1x-1x4xf32, -1x-1x4xf32, -1x-1x4xf32]) <- (-1x-1x4xf32, -1x-1x4xf32, -1x-1x4xf32) + combine_21 = [squeeze_0, squeeze_1, squeeze_2] + del squeeze_0, squeeze_1, squeeze_2 - # builtin.combine: ([8x-1x68xf32, 8x-1x68xf32, 8x-1x68xf32]) <- (8x-1x68xf32, 8x-1x68xf32, 8x-1x68xf32) - combine_16 = [transpose_1, transpose_3, transpose_5] - - # pd_op.concat: (8x-1x68xf32) <- ([8x-1x68xf32, 8x-1x68xf32, 8x-1x68xf32], 1xi32) - concat_1 = paddle._C_ops.concat(combine_16, full_8) - del ( - add_1, - add_11, - add_14, - add_4, - add_6, - add_9, - assign_0, - assign_1, - assign_2, - batch_norm__0, - batch_norm__1, - batch_norm__10, - batch_norm__11, - batch_norm__12, - batch_norm__13, - batch_norm__14, - batch_norm__15, - batch_norm__16, - batch_norm__17, - batch_norm__18, - batch_norm__19, - batch_norm__2, - batch_norm__20, - batch_norm__21, - batch_norm__22, - batch_norm__23, - batch_norm__24, - batch_norm__25, - batch_norm__26, - batch_norm__27, - batch_norm__28, - batch_norm__29, - batch_norm__3, - batch_norm__30, - batch_norm__31, - batch_norm__32, - batch_norm__33, - batch_norm__34, - batch_norm__35, - batch_norm__4, - batch_norm__5, - batch_norm__6, - batch_norm__7, - batch_norm__8, - batch_norm__9, - combine_16, - conv2d_0, - conv2d_1, - conv2d_10, - conv2d_11, - conv2d_12, - conv2d_13, - conv2d_14, - conv2d_15, - conv2d_16, - conv2d_17, - conv2d_2, - conv2d_3, - conv2d_4, - conv2d_5, - conv2d_6, - conv2d_7, - conv2d_8, - conv2d_9, - full_8, - full_int_array_4, - multiply_0, - multiply_1, - multiply_2, - multiply_3, - multiply_4, - multiply_5, - pool2d_0, - pool2d_1, - pool2d_2, - reshape_10, - reshape_11, - reshape_12, - reshape_13, - reshape_14, - reshape_15, - reshape_16, - reshape_17, - reshape_6, - reshape_7, - reshape_8, - reshape_9, - sigmoid_0, - sigmoid_1, - sigmoid_2, - sigmoid_3, - sigmoid_4, - sigmoid_5, - sigmoid_6, - sigmoid_7, - sigmoid_8, - slice_0, - slice_1, - slice_2, - swish_0, - swish_1, - swish_2, - swish_3, - swish_4, - swish_5, - transpose_0, - transpose_1, - transpose_2, - transpose_3, - transpose_4, - transpose_5, - ) - - return concat_0, concat_1, concat_2, concat_3, concat_4 + # pd_op.concat: (-1x-1x4xf32) <- ([-1x-1x4xf32, -1x-1x4xf32, -1x-1x4xf32], 1xi32) + concat_1 = paddle._C_ops.concat(combine_21, full_0) + del combine_21, full_0 + + return concat_0, concat_1 diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus-M/subgraph_4/weight_meta.py b/paddle_samples/PaddleX/PP-YOLOE_plus-M/subgraph_4/weight_meta.py index 0fbfee683..49282087c 100644 --- a/paddle_samples/PaddleX/PP-YOLOE_plus-M/subgraph_4/weight_meta.py +++ b/paddle_samples/PaddleX/PP-YOLOE_plus-M/subgraph_4/weight_meta.py @@ -2,10 +2,10 @@ class Program_weight_tensor_parameter_0: name = "parameter_0" shape = [68] dtype = "float32" - min_val = float("-0.0141298") - max_val = float("0.0241404") - mean = float("6.51635e-08") - std = float("0.00670344") + min_val = float("-0.0141635") + max_val = float("0.0241976") + mean = float("6.52944e-08") + std = float("0.00671931") data = None @@ -13,10 +13,10 @@ class Program_weight_tensor_parameter_1: name = "parameter_1" shape = [68, 144, 3, 3] dtype = "float32" - min_val = float("-0.159403") - max_val = float("0.18767") - mean = float("6.14746e-08") - std = float("0.00826349") + min_val = float("-0.15978") + max_val = float("0.188116") + mean = float("6.16128e-08") + std = float("0.00828307") data = None @@ -24,10 +24,10 @@ class Program_weight_tensor_parameter_2: name = "parameter_2" shape = [144] dtype = "float32" - min_val = float("-0.104117") - max_val = float("0.334805") - mean = float("0.0803525") - std = float("0.0949004") + min_val = float("-0.104364") + max_val = float("0.335598") + mean = float("0.0805429") + std = float("0.0951253") data = None @@ -35,10 +35,10 @@ class Program_weight_tensor_parameter_3: name = "parameter_3" shape = [144] dtype = "float32" - min_val = float("0.833704") - max_val = float("2.14069") - mean = float("1.40199") - std = float("0.259198") + min_val = float("0.83568") + max_val = float("2.14576") + mean = float("1.40531") + std = float("0.259813") data = None @@ -46,10 +46,10 @@ class Program_weight_tensor_parameter_4: name = "parameter_4" shape = [144] dtype = "float32" - min_val = float("0.000157642") - max_val = float("0.00219112") - mean = float("0.000569831") - std = float("0.000354466") + min_val = float("0.000157802") + max_val = float("0.00220136") + mean = float("0.000572424") + std = float("0.000355381") data = None @@ -57,10 +57,10 @@ class Program_weight_tensor_parameter_5: name = "parameter_5" shape = [144] dtype = "float32" - min_val = float("-0.0501844") - max_val = float("0.038398") - mean = float("-0.00743265") - std = float("0.0175874") + min_val = float("-0.0503245") + max_val = float("0.0384794") + mean = float("-0.00745528") + std = float("0.0176331") data = None @@ -68,10 +68,10 @@ class Program_weight_tensor_parameter_6: name = "parameter_6" shape = [144, 144, 1, 1] dtype = "float32" - min_val = float("-0.0724953") - max_val = float("0.0965259") - mean = float("-0.000264722") - std = float("0.00737951") + min_val = float("-0.0726671") + max_val = float("0.0967547") + mean = float("-0.000265342") + std = float("0.00739699") data = None @@ -79,10 +79,10 @@ class Program_weight_tensor_parameter_7: name = "parameter_7" shape = [144] dtype = "float32" - min_val = float("-0.00636108") - max_val = float("0.00678391") - mean = float("-0.000173716") - std = float("0.00320574") + min_val = float("-0.00637615") + max_val = float("0.0068") + mean = float("-0.000174127") + std = float("0.00321334") data = None @@ -90,10 +90,10 @@ class Program_weight_tensor_parameter_8: name = "parameter_8" shape = [144, 144, 1, 1] dtype = "float32" - min_val = float("-0.0117989") - max_val = float("0.015335") - mean = float("-0.000124252") - std = float("0.00220502") + min_val = float("-0.0118268") + max_val = float("0.0153714") + mean = float("-0.000124547") + std = float("0.00221024") data = None @@ -110,10 +110,10 @@ class Program_weight_tensor_parameter_10: name = "parameter_10" shape = [4, 144, 3, 3] dtype = "float32" - min_val = float("-7.87108e-06") - max_val = float("0.000443999") - mean = float("1.5363e-05") - std = float("3.18915e-05") + min_val = float("-6.03644e-06") + max_val = float("0.000338216") + mean = float("1.1862e-05") + std = float("2.42884e-05") data = None @@ -121,10 +121,10 @@ class Program_weight_tensor_parameter_11: name = "parameter_11" shape = [144] dtype = "float32" - min_val = float("-0.64158") - max_val = float("1.51361") - mean = float("0.436643") - std = float("0.396591") + min_val = float("-0.6431") + max_val = float("1.5172") + mean = float("0.437678") + std = float("0.39753") data = None @@ -132,10 +132,10 @@ class Program_weight_tensor_parameter_12: name = "parameter_12" shape = [144] dtype = "float32" - min_val = float("0.912817") - max_val = float("2.11091") - mean = float("1.38863") - std = float("0.197354") + min_val = float("0.91498") + max_val = float("2.11591") + mean = float("1.39192") + std = float("0.197821") data = None @@ -143,10 +143,10 @@ class Program_weight_tensor_parameter_13: name = "parameter_13" shape = [144] dtype = "float32" - min_val = float("0.000201053") - max_val = float("0.00337522") - mean = float("0.000771889") - std = float("0.000504667") + min_val = float("0.000201827") + max_val = float("0.0033487") + mean = float("0.000776001") + std = float("0.000505157") data = None @@ -154,10 +154,10 @@ class Program_weight_tensor_parameter_14: name = "parameter_14" shape = [144] dtype = "float32" - min_val = float("-0.2459") - max_val = float("0.0358757") - mean = float("-0.0278203") - std = float("0.0404512") + min_val = float("-0.246557") + max_val = float("0.035997") + mean = float("-0.0278908") + std = float("0.0405564") data = None @@ -165,10 +165,10 @@ class Program_weight_tensor_parameter_15: name = "parameter_15" shape = [144, 144, 1, 1] dtype = "float32" - min_val = float("-0.0649436") - max_val = float("0.0801723") - mean = float("-0.000599465") - std = float("0.0088557") + min_val = float("-0.0650974") + max_val = float("0.0803623") + mean = float("-0.000600887") + std = float("0.00887669") data = None @@ -176,10 +176,10 @@ class Program_weight_tensor_parameter_16: name = "parameter_16" shape = [144] dtype = "float32" - min_val = float("-0.00555943") - max_val = float("0.0056399") - mean = float("-0.000279704") - std = float("0.00214688") + min_val = float("-0.0055726") + max_val = float("0.00565327") + mean = float("-0.000280367") + std = float("0.00215197") data = None @@ -187,10 +187,10 @@ class Program_weight_tensor_parameter_17: name = "parameter_17" shape = [144, 144, 1, 1] dtype = "float32" - min_val = float("-0.0293142") - max_val = float("0.0536777") - mean = float("-5.72243e-05") - std = float("0.00245901") + min_val = float("-0.0293837") + max_val = float("0.0538049") + mean = float("-5.73599e-05") + std = float("0.00246484") data = None @@ -198,10 +198,10 @@ class Program_weight_tensor_parameter_18: name = "parameter_18" shape = [68] dtype = "float32" - min_val = float("-0.0049597") - max_val = float("0.0272289") - mean = float("6.11326e-08") - std = float("0.00561272") + min_val = float("-0.00497141") + max_val = float("0.0272935") + mean = float("6.13218e-08") + std = float("0.00562603") data = None @@ -209,10 +209,10 @@ class Program_weight_tensor_parameter_19: name = "parameter_19" shape = [68, 288, 3, 3] dtype = "float32" - min_val = float("-0.111167") - max_val = float("0.12918") - mean = float("3.28073e-08") - std = float("0.00572785") + min_val = float("-0.11143") + max_val = float("0.129486") + mean = float("3.29019e-08") + std = float("0.00574142") data = None @@ -220,10 +220,10 @@ class Program_weight_tensor_parameter_20: name = "parameter_20" shape = [288] dtype = "float32" - min_val = float("-0.0173529") - max_val = float("0.146767") - mean = float("0.0535169") - std = float("0.0321121") + min_val = float("-0.017394") + max_val = float("0.147115") + mean = float("0.0536437") + std = float("0.0321882") data = None @@ -231,10 +231,10 @@ class Program_weight_tensor_parameter_21: name = "parameter_21" shape = [288] dtype = "float32" - min_val = float("1.01431") - max_val = float("1.4495") - mean = float("1.22632") - std = float("0.0811625") + min_val = float("1.01671") + max_val = float("1.45294") + mean = float("1.22922") + std = float("0.0813549") data = None @@ -242,10 +242,10 @@ class Program_weight_tensor_parameter_22: name = "parameter_22" shape = [288] dtype = "float32" - min_val = float("9.53404e-05") - max_val = float("0.00409233") - mean = float("0.000467458") - std = float("0.000455701") + min_val = float("9.58753e-05") + max_val = float("0.0041103") + mean = float("0.000468789") + std = float("0.000456147") data = None @@ -253,10 +253,10 @@ class Program_weight_tensor_parameter_23: name = "parameter_23" shape = [288] dtype = "float32" - min_val = float("-0.0517031") - max_val = float("0.0162222") - mean = float("-0.00786597") - std = float("0.00921386") + min_val = float("-0.0518176") + max_val = float("0.0162465") + mean = float("-0.00788198") + std = float("0.00923439") data = None @@ -264,10 +264,10 @@ class Program_weight_tensor_parameter_24: name = "parameter_24" shape = [288, 288, 1, 1] dtype = "float32" - min_val = float("-0.0594582") - max_val = float("0.0735971") - mean = float("-0.00013723") - std = float("0.00364254") + min_val = float("-0.0595991") + max_val = float("0.0737716") + mean = float("-0.000137555") + std = float("0.00365117") data = None @@ -275,10 +275,10 @@ class Program_weight_tensor_parameter_25: name = "parameter_25" shape = [288] dtype = "float32" - min_val = float("-0.00314669") - max_val = float("0.00620898") - mean = float("2.48414e-05") - std = float("0.00194774") + min_val = float("-0.00315414") + max_val = float("0.00622371") + mean = float("2.49002e-05") + std = float("0.00195236") data = None @@ -286,10 +286,10 @@ class Program_weight_tensor_parameter_26: name = "parameter_26" shape = [288, 288, 1, 1] dtype = "float32" - min_val = float("-0.00388461") - max_val = float("0.00841306") - mean = float("-1.87953e-05") - std = float("0.000932022") + min_val = float("-0.00389382") + max_val = float("0.008433") + mean = float("-1.88398e-05") + std = float("0.000934231") data = None @@ -306,10 +306,10 @@ class Program_weight_tensor_parameter_28: name = "parameter_28" shape = [4, 288, 3, 3] dtype = "float32" - min_val = float("-6.48766e-06") - max_val = float("0.000140856") - mean = float("6.52221e-06") - std = float("9.39411e-06") + min_val = float("-5.34524e-06") + max_val = float("0.000115912") + mean = float("5.31803e-06") + std = float("7.70699e-06") data = None @@ -317,10 +317,10 @@ class Program_weight_tensor_parameter_29: name = "parameter_29" shape = [288] dtype = "float32" - min_val = float("-0.27006") - max_val = float("0.770798") - mean = float("0.311538") - std = float("0.171771") + min_val = float("-0.2707") + max_val = float("0.772625") + mean = float("0.312276") + std = float("0.172178") data = None @@ -328,10 +328,10 @@ class Program_weight_tensor_parameter_30: name = "parameter_30" shape = [288] dtype = "float32" - min_val = float("0.991505") - max_val = float("1.72132") - mean = float("1.2558") - std = float("0.0945899") + min_val = float("0.993855") + max_val = float("1.7254") + mean = float("1.25878") + std = float("0.0948141") data = None @@ -339,10 +339,10 @@ class Program_weight_tensor_parameter_31: name = "parameter_31" shape = [288] dtype = "float32" - min_val = float("0.000219746") - max_val = float("0.0054385") - mean = float("0.000809585") - std = float("0.000639851") + min_val = float("0.000222055") + max_val = float("0.00541883") + mean = float("0.000812938") + std = float("0.000640229") data = None @@ -350,10 +350,10 @@ class Program_weight_tensor_parameter_32: name = "parameter_32" shape = [288] dtype = "float32" - min_val = float("-0.130478") - max_val = float("0.0695712") - mean = float("-0.0265584") - std = float("0.0287591") + min_val = float("-0.1308") + max_val = float("0.0697218") + mean = float("-0.0266256") + std = float("0.0288337") data = None @@ -361,10 +361,10 @@ class Program_weight_tensor_parameter_33: name = "parameter_33" shape = [288, 288, 1, 1] dtype = "float32" - min_val = float("-0.0480156") - max_val = float("0.0592741") - mean = float("-0.000430968") - std = float("0.00425043") + min_val = float("-0.0481293") + max_val = float("0.0594145") + mean = float("-0.000431989") + std = float("0.0042605") data = None @@ -372,10 +372,10 @@ class Program_weight_tensor_parameter_34: name = "parameter_34" shape = [288] dtype = "float32" - min_val = float("-0.00285738") - max_val = float("0.00720467") - mean = float("-7.33536e-05") - std = float("0.0011726") + min_val = float("-0.00286415") + max_val = float("0.00722174") + mean = float("-7.35274e-05") + std = float("0.00117538") data = None @@ -383,43 +383,41 @@ class Program_weight_tensor_parameter_35: name = "parameter_35" shape = [288, 288, 1, 1] dtype = "float32" - min_val = float("-0.0122627") - max_val = float("0.0161886") - mean = float("-1.72547e-05") - std = float("0.000995486") + min_val = float("-0.0122917") + max_val = float("0.016227") + mean = float("-1.72956e-05") + std = float("0.000997845") data = None class Program_weight_tensor_parameter_36: name = "parameter_36" - shape = [68] + shape = [1, 17, 1, 1] dtype = "float32" - min_val = float("-0.00365417") - max_val = float("0.0135851") - mean = float("3.44444e-08") - std = float("0.00376477") + min_val = float("0") + max_val = float("0.5") data = None class Program_weight_tensor_parameter_37: name = "parameter_37" - shape = [68, 576, 3, 3] + shape = [68] dtype = "float32" - min_val = float("-0.0663088") - max_val = float("0.0674867") - mean = float("1.74732e-08") - std = float("0.00359868") + min_val = float("-0.00366283") + max_val = float("0.0136174") + mean = float("3.45317e-08") + std = float("0.00377369") data = None class Program_weight_tensor_parameter_38: name = "parameter_38" - shape = [576] + shape = [68, 576, 3, 3] dtype = "float32" - min_val = float("-0.0421788") - max_val = float("0.113513") - mean = float("0.0222551") - std = float("0.0258343") + min_val = float("-0.066466") + max_val = float("0.0676466") + mean = float("1.75132e-08") + std = float("0.00360721") data = None @@ -427,10 +425,10 @@ class Program_weight_tensor_parameter_39: name = "parameter_39" shape = [576] dtype = "float32" - min_val = float("1.05067") - max_val = float("1.39463") - mean = float("1.14799") - std = float("0.042853") + min_val = float("-0.0422788") + max_val = float("0.113782") + mean = float("0.0223079") + std = float("0.0258955") data = None @@ -438,10 +436,10 @@ class Program_weight_tensor_parameter_40: name = "parameter_40" shape = [576] dtype = "float32" - min_val = float("4.89493e-05") - max_val = float("0.00264842") - mean = float("0.000245751") - std = float("0.000220246") + min_val = float("1.05316") + max_val = float("1.39794") + mean = float("1.15071") + std = float("0.0429545") data = None @@ -449,74 +447,74 @@ class Program_weight_tensor_parameter_41: name = "parameter_41" shape = [576] dtype = "float32" - min_val = float("-0.0344271") - max_val = float("0.0201663") - mean = float("-0.00575569") - std = float("0.00544377") + min_val = float("4.91637e-05") + max_val = float("0.00266511") + mean = float("0.000246075") + std = float("0.000221109") data = None class Program_weight_tensor_parameter_42: name = "parameter_42" - shape = [576, 576, 1, 1] + shape = [576] dtype = "float32" - min_val = float("-0.0385787") - max_val = float("0.0417201") - mean = float("-6.1183e-05") - std = float("0.00176385") + min_val = float("-0.0345042") + max_val = float("0.0202052") + mean = float("-0.00576832") + std = float("0.00545338") data = None class Program_weight_tensor_parameter_43: name = "parameter_43" - shape = [576] + shape = [576, 576, 1, 1] dtype = "float32" - min_val = float("-0.00434691") - max_val = float("0.00341236") - mean = float("0.000100391") - std = float("0.00100061") + min_val = float("-0.0386702") + max_val = float("0.0418189") + mean = float("-6.13284e-05") + std = float("0.00176803") data = None class Program_weight_tensor_parameter_44: name = "parameter_44" - shape = [576, 576, 1, 1] + shape = [576] dtype = "float32" - min_val = float("-0.0035525") - max_val = float("0.00417295") - mean = float("2.83396e-05") - std = float("0.00036474") + min_val = float("-0.00435722") + max_val = float("0.00342043") + mean = float("0.000100629") + std = float("0.00100298") data = None class Program_weight_tensor_parameter_45: name = "parameter_45" - shape = [4] + shape = [576, 576, 1, 1] dtype = "float32" - min_val = float("0") - max_val = float("0.5") + min_val = float("-0.00356092") + max_val = float("0.00418283") + mean = float("2.84068e-05") + std = float("0.000365604") data = None class Program_weight_tensor_parameter_46: name = "parameter_46" - shape = [4, 576, 3, 3] + shape = [4] dtype = "float32" - min_val = float("-8.78041e-06") - max_val = float("0.000132008") - mean = float("4.36014e-06") - std = float("8.07886e-06") + min_val = float("0") + max_val = float("0.5") data = None class Program_weight_tensor_parameter_47: name = "parameter_47" - shape = [576] + shape = [4, 576, 3, 3] dtype = "float32" - min_val = float("-0.248224") - max_val = float("0.371203") - mean = float("0.155358") - std = float("0.0834839") + min_val = float("-6.95821e-06") + max_val = float("0.000100905") + mean = float("3.36451e-06") + std = float("6.30444e-06") data = None @@ -524,10 +522,10 @@ class Program_weight_tensor_parameter_48: name = "parameter_48" shape = [576] dtype = "float32" - min_val = float("1.02385") - max_val = float("1.42504") - mean = float("1.13342") - std = float("0.0514955") + min_val = float("-0.248813") + max_val = float("0.372082") + mean = float("0.155726") + std = float("0.0836817") data = None @@ -535,10 +533,10 @@ class Program_weight_tensor_parameter_49: name = "parameter_49" shape = [576] dtype = "float32" - min_val = float("0.000119442") - max_val = float("0.00279863") - mean = float("0.000709705") - std = float("0.00050333") + min_val = float("1.02627") + max_val = float("1.42841") + mean = float("1.1361") + std = float("0.0516175") data = None @@ -546,41 +544,5827 @@ class Program_weight_tensor_parameter_50: name = "parameter_50" shape = [576] dtype = "float32" - min_val = float("-0.0735115") - max_val = float("0.0820593") - mean = float("-0.0216337") - std = float("0.0158423") + min_val = float("0.000120346") + max_val = float("0.00280838") + mean = float("0.000713572") + std = float("0.000507142") data = None class Program_weight_tensor_parameter_51: name = "parameter_51" - shape = [576, 576, 1, 1] + shape = [576] dtype = "float32" - min_val = float("-0.0599145") - max_val = float("0.0355292") - mean = float("-0.000237247") - std = float("0.00192816") + min_val = float("-0.0736708") + max_val = float("0.0822341") + mean = float("-0.0216779") + std = float("0.0158763") data = None class Program_weight_tensor_parameter_52: name = "parameter_52" - shape = [576] + shape = [576, 576, 1, 1] dtype = "float32" - min_val = float("-0.00772796") - max_val = float("0.00635587") - mean = float("-2.65035e-05") - std = float("0.000684865") + min_val = float("-0.0600565") + max_val = float("0.0356134") + mean = float("-0.00023781") + std = float("0.00193273") data = None class Program_weight_tensor_parameter_53: name = "parameter_53" + shape = [576] + dtype = "float32" + min_val = float("-0.00774628") + max_val = float("0.00637093") + mean = float("-2.65663e-05") + std = float("0.000686488") + data = None + + +class Program_weight_tensor_parameter_54: + name = "parameter_54" shape = [576, 576, 1, 1] dtype = "float32" - min_val = float("-0.0265895") - max_val = float("0.0452734") - mean = float("-1.04734e-06") - std = float("0.000523833") + min_val = float("-0.0266525") + max_val = float("0.0453807") + mean = float("-1.04982e-06") + std = float("0.000525074") + data = None + + +class Program_weight_tensor_parameter_55: + name = "parameter_55" + shape = [576] + dtype = "float32" + min_val = float("-0.363147") + max_val = float("0.450423") + mean = float("0.129446") + std = float("0.123275") + data = None + + +class Program_weight_tensor_parameter_56: + name = "parameter_56" + shape = [576] + dtype = "float32" + min_val = float("0.962108") + max_val = float("1.62765") + mean = float("1.1062") + std = float("0.0594285") + data = None + + +class Program_weight_tensor_parameter_57: + name = "parameter_57" + shape = [576] + dtype = "float32" + min_val = float("0.00128466") + max_val = float("0.0464107") + mean = float("0.00458397") + std = float("0.00374471") + data = None + + +class Program_weight_tensor_parameter_58: + name = "parameter_58" + shape = [576] + dtype = "float32" + min_val = float("-0.177259") + max_val = float("0.103565") + mean = float("-0.0218799") + std = float("0.0230091") + data = None + + +class Program_weight_tensor_parameter_59: + name = "parameter_59" + shape = [576, 576, 1, 1] + dtype = "float32" + min_val = float("-0.0611952") + max_val = float("0.04003") + mean = float("-0.000170338") + std = float("0.00295543") + data = None + + +class Program_weight_tensor_parameter_60: + name = "parameter_60" + shape = [288] + dtype = "float32" + min_val = float("-0.27771") + max_val = float("0.0488218") + mean = float("-0.0555783") + std = float("0.0592335") + data = None + + +class Program_weight_tensor_parameter_61: + name = "parameter_61" + shape = [288] + dtype = "float32" + min_val = float("0.915284") + max_val = float("1.07407") + mean = float("0.969917") + std = float("0.0228204") + data = None + + +class Program_weight_tensor_parameter_62: + name = "parameter_62" + shape = [288] + dtype = "float32" + min_val = float("0.000953466") + max_val = float("0.0142773") + mean = float("0.00304707") + std = float("0.00176714") + data = None + + +class Program_weight_tensor_parameter_63: + name = "parameter_63" + shape = [288] + dtype = "float32" + min_val = float("-0.04362") + max_val = float("0.0544441") + mean = float("0.00710198") + std = float("0.0174711") + data = None + + +class Program_weight_tensor_parameter_64: + name = "parameter_64" + shape = [288, 288, 1, 1] + dtype = "float32" + min_val = float("-0.0344584") + max_val = float("0.0245762") + mean = float("8.69469e-05") + std = float("0.00219113") + data = None + + +class Program_weight_tensor_parameter_65: + name = "parameter_65" + shape = [288] + dtype = "float32" + min_val = float("-0.27771") + max_val = float("0.0488218") + mean = float("-0.0555783") + std = float("0.0592335") + data = None + + +class Program_weight_tensor_parameter_66: + name = "parameter_66" + shape = [288] + dtype = "float32" + min_val = float("0.981008") + max_val = float("1.21689") + mean = float("1.04938") + std = float("0.0359279") + data = None + + +class Program_weight_tensor_parameter_67: + name = "parameter_67" + shape = [288] + dtype = "float32" + min_val = float("0.00172209") + max_val = float("0.0250926") + mean = float("0.00470188") + std = float("0.00208766") + data = None + + +class Program_weight_tensor_parameter_68: + name = "parameter_68" + shape = [288] + dtype = "float32" + min_val = float("-0.12407") + max_val = float("0.0767199") + mean = float("-0.029451") + std = float("0.022194") + data = None + + +class Program_weight_tensor_parameter_69: + name = "parameter_69" + shape = [288, 288, 3, 3] + dtype = "float32" + min_val = float("-0.0377338") + max_val = float("0.0461348") + mean = float("-8.57263e-05") + std = float("0.00163106") + data = None + + +class Program_weight_tensor_parameter_70: + name = "parameter_70" + shape = [288] + dtype = "float32" + min_val = float("-0.385862") + max_val = float("0.0653291") + mean = float("-0.105618") + std = float("0.0696428") + data = None + + +class Program_weight_tensor_parameter_71: + name = "parameter_71" + shape = [288] + dtype = "float32" + min_val = float("0.900938") + max_val = float("1.32086") + mean = float("1.04193") + std = float("0.0560555") + data = None + + +class Program_weight_tensor_parameter_72: + name = "parameter_72" + shape = [288] + dtype = "float32" + min_val = float("0.00421839") + max_val = float("0.0367987") + mean = float("0.0103437") + std = float("0.00489499") + data = None + + +class Program_weight_tensor_parameter_73: + name = "parameter_73" + shape = [288] + dtype = "float32" + min_val = float("-0.153433") + max_val = float("0.0932305") + mean = float("-0.0112633") + std = float("0.0256948") + data = None + + +class Program_weight_tensor_parameter_74: + name = "parameter_74" + shape = [288, 288, 3, 3] + dtype = "float32" + min_val = float("-0.0364099") + max_val = float("0.0523836") + mean = float("-8.24632e-05") + std = float("0.00184979") + data = None + + +class Program_weight_tensor_parameter_75: + name = "parameter_75" + shape = [288] + dtype = "float32" + min_val = float("-0.299402") + max_val = float("0.0134731") + mean = float("-0.109532") + std = float("0.0597187") + data = None + + +class Program_weight_tensor_parameter_76: + name = "parameter_76" + shape = [288] + dtype = "float32" + min_val = float("0.890481") + max_val = float("1.0932") + mean = float("0.968348") + std = float("0.0257197") + data = None + + +class Program_weight_tensor_parameter_77: + name = "parameter_77" + shape = [288] + dtype = "float32" + min_val = float("0.00100979") + max_val = float("0.00878796") + mean = float("0.00444441") + std = float("0.00149939") + data = None + + +class Program_weight_tensor_parameter_78: + name = "parameter_78" + shape = [288] + dtype = "float32" + min_val = float("-0.0520888") + max_val = float("0.049478") + mean = float("0.00820149") + std = float("0.0145604") + data = None + + +class Program_weight_tensor_parameter_79: + name = "parameter_79" + shape = [288, 288, 1, 1] + dtype = "float32" + min_val = float("-0.0322537") + max_val = float("0.029478") + mean = float("1.23234e-05") + std = float("0.00229431") + data = None + + +class Program_weight_tensor_parameter_80: + name = "parameter_80" + shape = [288] + dtype = "float32" + min_val = float("-0.299402") + max_val = float("0.0134731") + mean = float("-0.109532") + std = float("0.0597187") + data = None + + +class Program_weight_tensor_parameter_81: + name = "parameter_81" + shape = [288] + dtype = "float32" + min_val = float("0.962862") + max_val = float("1.20254") + mean = float("1.04567") + std = float("0.0391825") + data = None + + +class Program_weight_tensor_parameter_82: + name = "parameter_82" + shape = [288] + dtype = "float32" + min_val = float("0.00284955") + max_val = float("0.0225437") + mean = float("0.007316") + std = float("0.00273474") + data = None + + +class Program_weight_tensor_parameter_83: + name = "parameter_83" + shape = [288] + dtype = "float32" + min_val = float("-0.140963") + max_val = float("0.0369856") + mean = float("-0.0354432") + std = float("0.0220809") + data = None + + +class Program_weight_tensor_parameter_84: + name = "parameter_84" + shape = [288, 288, 3, 3] + dtype = "float32" + min_val = float("-0.0384327") + max_val = float("0.0469147") + mean = float("-0.000103927") + std = float("0.00179042") + data = None + + +class Program_weight_tensor_parameter_85: + name = "parameter_85" + shape = [288] + dtype = "float32" + min_val = float("-0.29697") + max_val = float("0.139867") + mean = float("-0.0979302") + std = float("0.0600107") + data = None + + +class Program_weight_tensor_parameter_86: + name = "parameter_86" + shape = [288] + dtype = "float32" + min_val = float("0.857317") + max_val = float("1.30652") + mean = float("1.03469") + std = float("0.0730064") + data = None + + +class Program_weight_tensor_parameter_87: + name = "parameter_87" + shape = [288] + dtype = "float32" + min_val = float("0.00405577") + max_val = float("0.0443345") + mean = float("0.0130506") + std = float("0.00639368") + data = None + + +class Program_weight_tensor_parameter_88: + name = "parameter_88" + shape = [288] + dtype = "float32" + min_val = float("-0.133397") + max_val = float("0.122273") + mean = float("-0.0433752") + std = float("0.036443") + data = None + + +class Program_weight_tensor_parameter_89: + name = "parameter_89" + shape = [288, 288, 3, 3] + dtype = "float32" + min_val = float("-0.0411107") + max_val = float("0.0539263") + mean = float("-0.000111625") + std = float("0.00199295") + data = None + + +class Program_weight_tensor_parameter_90: + name = "parameter_90" + shape = [288] + dtype = "float32" + min_val = float("-0.215301") + max_val = float("0.173622") + mean = float("-0.0676104") + std = float("0.0466325") + data = None + + +class Program_weight_tensor_parameter_91: + name = "parameter_91" + shape = [288] + dtype = "float32" + min_val = float("0.91523") + max_val = float("1.17267") + mean = float("1.02621") + std = float("0.0431361") + data = None + + +class Program_weight_tensor_parameter_92: + name = "parameter_92" + shape = [288] + dtype = "float32" + min_val = float("0.00251899") + max_val = float("0.0117666") + mean = float("0.00474816") + std = float("0.00149593") + data = None + + +class Program_weight_tensor_parameter_93: + name = "parameter_93" + shape = [288] + dtype = "float32" + min_val = float("-0.0692205") + max_val = float("0.053793") + mean = float("-0.0160558") + std = float("0.0210432") + data = None + + +class Program_weight_tensor_parameter_94: + name = "parameter_94" + shape = [288, 864, 1, 1] + dtype = "float32" + min_val = float("-0.0745047") + max_val = float("0.0842196") + mean = float("-0.00010905") + std = float("0.00289749") + data = None + + +class Program_weight_tensor_parameter_95: + name = "parameter_95" + shape = [288] + dtype = "float32" + min_val = float("-0.0916511") + max_val = float("0.0302142") + mean = float("-0.0278605") + std = float("0.0201815") + data = None + + +class Program_weight_tensor_parameter_96: + name = "parameter_96" + shape = [288] + dtype = "float32" + min_val = float("0.899444") + max_val = float("1.09798") + mean = float("1.01228") + std = float("0.0255773") + data = None + + +class Program_weight_tensor_parameter_97: + name = "parameter_97" + shape = [288] + dtype = "float32" + min_val = float("0.00233029") + max_val = float("0.0161175") + mean = float("0.00376379") + std = float("0.00141808") + data = None + + +class Program_weight_tensor_parameter_98: + name = "parameter_98" + shape = [288] + dtype = "float32" + min_val = float("-0.0681009") + max_val = float("0.0535") + mean = float("-0.0176997") + std = float("0.018982") + data = None + + +class Program_weight_tensor_parameter_99: + name = "parameter_99" + shape = [288, 864, 1, 1] + dtype = "float32" + min_val = float("-0.0500682") + max_val = float("0.0412844") + mean = float("-0.000136185") + std = float("0.00251502") + data = None + + +class Program_weight_tensor_parameter_100: + name = "parameter_100" + shape = [288] + dtype = "float32" + min_val = float("-0.140589") + max_val = float("0.00421123") + mean = float("-0.0496541") + std = float("0.0252022") + data = None + + +class Program_weight_tensor_parameter_101: + name = "parameter_101" + shape = [288] + dtype = "float32" + min_val = float("0.944857") + max_val = float("1.21748") + mean = float("1.05083") + std = float("0.0346128") + data = None + + +class Program_weight_tensor_parameter_102: + name = "parameter_102" + shape = [288] + dtype = "float32" + min_val = float("0.00540006") + max_val = float("0.0523987") + mean = float("0.0137401") + std = float("0.00670132") + data = None + + +class Program_weight_tensor_parameter_103: + name = "parameter_103" + shape = [288] + dtype = "float32" + min_val = float("-0.369448") + max_val = float("0.183087") + mean = float("-0.0467212") + std = float("0.0761257") + data = None + + +class Program_weight_tensor_parameter_104: + name = "parameter_104" + shape = [288, 288, 3, 3] + dtype = "float32" + min_val = float("-0.029027") + max_val = float("0.0392117") + mean = float("-4.01833e-05") + std = float("0.00156935") + data = None + + +class Program_weight_tensor_parameter_105: + name = "parameter_105" + shape = [288] + dtype = "float32" + min_val = float("-0.703153") + max_val = float("0.889582") + mean = float("0.279296") + std = float("0.234385") + data = None + + +class Program_weight_tensor_parameter_106: + name = "parameter_106" + shape = [288] + dtype = "float32" + min_val = float("0.665579") + max_val = float("1.52345") + mean = float("1.16577") + std = float("0.119601") + data = None + + +class Program_weight_tensor_parameter_107: + name = "parameter_107" + shape = [288] + dtype = "float32" + min_val = float("0.00263439") + max_val = float("0.0673288") + mean = float("0.00986936") + std = float("0.00642903") + data = None + + +class Program_weight_tensor_parameter_108: + name = "parameter_108" + shape = [288] + dtype = "float32" + min_val = float("-0.172453") + max_val = float("0.213078") + mean = float("-0.0233247") + std = float("0.0347766") + data = None + + +class Program_weight_tensor_parameter_109: + name = "parameter_109" + shape = [288, 288, 1, 1] + dtype = "float32" + min_val = float("-0.10595") + max_val = float("0.0861042") + mean = float("-0.000322342") + std = float("0.00641596") + data = None + + +class Program_weight_tensor_parameter_110: + name = "parameter_110" + shape = [144] + dtype = "float32" + min_val = float("-0.348757") + max_val = float("0.143868") + mean = float("-0.0670231") + std = float("0.092848") + data = None + + +class Program_weight_tensor_parameter_111: + name = "parameter_111" + shape = [144] + dtype = "float32" + min_val = float("0.826843") + max_val = float("1.10694") + mean = float("0.932296") + std = float("0.0366632") + data = None + + +class Program_weight_tensor_parameter_112: + name = "parameter_112" + shape = [144] + dtype = "float32" + min_val = float("0.00141229") + max_val = float("0.0167029") + mean = float("0.00556534") + std = float("0.0030144") + data = None + + +class Program_weight_tensor_parameter_113: + name = "parameter_113" + shape = [144] + dtype = "float32" + min_val = float("-0.0528065") + max_val = float("0.0511357") + mean = float("-0.00205834") + std = float("0.0130524") + data = None + + +class Program_weight_tensor_parameter_114: + name = "parameter_114" + shape = [144, 144, 1, 1] + dtype = "float32" + min_val = float("-0.0580646") + max_val = float("0.0259261") + mean = float("-0.000238707") + std = float("0.004453") + data = None + + +class Program_weight_tensor_parameter_115: + name = "parameter_115" + shape = [144] + dtype = "float32" + min_val = float("-0.348757") + max_val = float("0.143868") + mean = float("-0.0670231") + std = float("0.092848") + data = None + + +class Program_weight_tensor_parameter_116: + name = "parameter_116" + shape = [144] + dtype = "float32" + min_val = float("0.69241") + max_val = float("1.27544") + mean = float("1.06354") + std = float("0.0790744") + data = None + + +class Program_weight_tensor_parameter_117: + name = "parameter_117" + shape = [144] + dtype = "float32" + min_val = float("0.00383033") + max_val = float("0.0240594") + mean = float("0.0089051") + std = float("0.00329963") + data = None + + +class Program_weight_tensor_parameter_118: + name = "parameter_118" + shape = [144] + dtype = "float32" + min_val = float("-0.116614") + max_val = float("0.0942855") + mean = float("-0.0189062") + std = float("0.0284546") + data = None + + +class Program_weight_tensor_parameter_119: + name = "parameter_119" + shape = [144, 144, 3, 3] + dtype = "float32" + min_val = float("-0.0533475") + max_val = float("0.0528243") + mean = float("-0.000100816") + std = float("0.00343967") + data = None + + +class Program_weight_tensor_parameter_120: + name = "parameter_120" + shape = [144] + dtype = "float32" + min_val = float("-0.439905") + max_val = float("0.120748") + mean = float("-0.165545") + std = float("0.123755") + data = None + + +class Program_weight_tensor_parameter_121: + name = "parameter_121" + shape = [144] + dtype = "float32" + min_val = float("0.853433") + max_val = float("1.29875") + mean = float("1.03371") + std = float("0.0909163") + data = None + + +class Program_weight_tensor_parameter_122: + name = "parameter_122" + shape = [144] + dtype = "float32" + min_val = float("0.00423297") + max_val = float("0.0438086") + mean = float("0.013521") + std = float("0.00655558") + data = None + + +class Program_weight_tensor_parameter_123: + name = "parameter_123" + shape = [144] + dtype = "float32" + min_val = float("-0.139") + max_val = float("0.0849961") + mean = float("-0.00217177") + std = float("0.0237809") + data = None + + +class Program_weight_tensor_parameter_124: + name = "parameter_124" + shape = [144, 144, 3, 3] + dtype = "float32" + min_val = float("-0.0544763") + max_val = float("0.0689249") + mean = float("-0.000164816") + std = float("0.00383836") + data = None + + +class Program_weight_tensor_parameter_125: + name = "parameter_125" + shape = [144] + dtype = "float32" + min_val = float("-0.441804") + max_val = float("0.028665") + mean = float("-0.192994") + std = float("0.0888286") + data = None + + +class Program_weight_tensor_parameter_126: + name = "parameter_126" + shape = [144] + dtype = "float32" + min_val = float("0.704647") + max_val = float("1.06451") + mean = float("0.920355") + std = float("0.0516476") + data = None + + +class Program_weight_tensor_parameter_127: + name = "parameter_127" + shape = [144] + dtype = "float32" + min_val = float("0.00221389") + max_val = float("0.0126339") + mean = float("0.00634418") + std = float("0.00185267") + data = None + + +class Program_weight_tensor_parameter_128: + name = "parameter_128" + shape = [144] + dtype = "float32" + min_val = float("-0.037796") + max_val = float("0.0319353") + mean = float("0.00816013") + std = float("0.0133567") + data = None + + +class Program_weight_tensor_parameter_129: + name = "parameter_129" + shape = [144, 144, 1, 1] + dtype = "float32" + min_val = float("-0.0502432") + max_val = float("0.053736") + mean = float("-0.000306758") + std = float("0.00495078") + data = None + + +class Program_weight_tensor_parameter_130: + name = "parameter_130" + shape = [144] + dtype = "float32" + min_val = float("-0.441804") + max_val = float("0.028665") + mean = float("-0.192994") + std = float("0.0888286") + data = None + + +class Program_weight_tensor_parameter_131: + name = "parameter_131" + shape = [144] + dtype = "float32" + min_val = float("0.767985") + max_val = float("1.24616") + mean = float("1.05535") + std = float("0.0556624") + data = None + + +class Program_weight_tensor_parameter_132: + name = "parameter_132" + shape = [144] + dtype = "float32" + min_val = float("0.00524932") + max_val = float("0.0354973") + mean = float("0.0124491") + std = float("0.00528304") + data = None + + +class Program_weight_tensor_parameter_133: + name = "parameter_133" + shape = [144] + dtype = "float32" + min_val = float("-0.104281") + max_val = float("0.0433549") + mean = float("-0.017271") + std = float("0.0183086") + data = None + + +class Program_weight_tensor_parameter_134: + name = "parameter_134" + shape = [144, 144, 3, 3] + dtype = "float32" + min_val = float("-0.0537528") + max_val = float("0.0682326") + mean = float("-0.000146146") + std = float("0.00381018") + data = None + + +class Program_weight_tensor_parameter_135: + name = "parameter_135" + shape = [144] + dtype = "float32" + min_val = float("-0.509343") + max_val = float("0.25213") + mean = float("-0.219535") + std = float("0.129933") + data = None + + +class Program_weight_tensor_parameter_136: + name = "parameter_136" + shape = [144] + dtype = "float32" + min_val = float("0.785015") + max_val = float("1.53418") + mean = float("1.02864") + std = float("0.12374") + data = None + + +class Program_weight_tensor_parameter_137: + name = "parameter_137" + shape = [144] + dtype = "float32" + min_val = float("0.00862567") + max_val = float("0.0571773") + mean = float("0.0178791") + std = float("0.00917033") + data = None + + +class Program_weight_tensor_parameter_138: + name = "parameter_138" + shape = [144] + dtype = "float32" + min_val = float("-0.111979") + max_val = float("0.0136057") + mean = float("-0.0389999") + std = float("0.0203047") + data = None + + +class Program_weight_tensor_parameter_139: + name = "parameter_139" + shape = [144, 144, 3, 3] + dtype = "float32" + min_val = float("-0.0671493") + max_val = float("0.0824849") + mean = float("-0.000218676") + std = float("0.00431301") + data = None + + +class Program_weight_tensor_parameter_140: + name = "parameter_140" + shape = [144] + dtype = "float32" + min_val = float("-0.598583") + max_val = float("0.0570557") + mean = float("-0.155009") + std = float("0.0803566") + data = None + + +class Program_weight_tensor_parameter_141: + name = "parameter_141" + shape = [144] + dtype = "float32" + min_val = float("0.873451") + max_val = float("1.41532") + mean = float("1.02822") + std = float("0.071914") + data = None + + +class Program_weight_tensor_parameter_142: + name = "parameter_142" + shape = [144] + dtype = "float32" + min_val = float("0.00392816") + max_val = float("0.0241202") + mean = float("0.00791989") + std = float("0.00288505") + data = None + + +class Program_weight_tensor_parameter_143: + name = "parameter_143" + shape = [144] + dtype = "float32" + min_val = float("-0.0850175") + max_val = float("0.063316") + mean = float("-0.0234895") + std = float("0.0233601") + data = None + + +class Program_weight_tensor_parameter_144: + name = "parameter_144" + shape = [144, 432, 1, 1] + dtype = "float32" + min_val = float("-0.0664166") + max_val = float("0.0782398") + mean = float("-0.000296481") + std = float("0.00608418") + data = None + + +class Program_weight_tensor_parameter_145: + name = "parameter_145" + shape = [144] + dtype = "float32" + min_val = float("-0.146243") + max_val = float("0.075235") + mean = float("-0.026676") + std = float("0.0383535") + data = None + + +class Program_weight_tensor_parameter_146: + name = "parameter_146" + shape = [144] + dtype = "float32" + min_val = float("0.888091") + max_val = float("1.40929") + mean = float("0.996461") + std = float("0.0558093") + data = None + + +class Program_weight_tensor_parameter_147: + name = "parameter_147" + shape = [144] + dtype = "float32" + min_val = float("0.00247041") + max_val = float("0.0278519") + mean = float("0.00613474") + std = float("0.00320836") + data = None + + +class Program_weight_tensor_parameter_148: + name = "parameter_148" + shape = [144] + dtype = "float32" + min_val = float("-0.0513007") + max_val = float("0.0419876") + mean = float("-0.0117934") + std = float("0.0193503") + data = None + + +class Program_weight_tensor_parameter_149: + name = "parameter_149" + shape = [144, 432, 1, 1] + dtype = "float32" + min_val = float("-0.0642335") + max_val = float("0.0683001") + mean = float("-0.00013801") + std = float("0.00524856") + data = None + + +class Program_weight_tensor_parameter_150: + name = "parameter_150" + shape = [144] + dtype = "float32" + min_val = float("-0.258062") + max_val = float("0.0158585") + mean = float("-0.0907321") + std = float("0.0530183") + data = None + + +class Program_weight_tensor_parameter_151: + name = "parameter_151" + shape = [144] + dtype = "float32" + min_val = float("0.818043") + max_val = float("1.19478") + mean = float("1.02377") + std = float("0.056563") + data = None + + +class Program_weight_tensor_parameter_152: + name = "parameter_152" + shape = [144] + dtype = "float32" + min_val = float("0.0060931") + max_val = float("0.0623377") + mean = float("0.0181503") + std = float("0.0094012") + data = None + + +class Program_weight_tensor_parameter_153: + name = "parameter_153" + shape = [144] + dtype = "float32" + min_val = float("-0.571436") + max_val = float("0.34374") + mean = float("-0.0108407") + std = float("0.126187") + data = None + + +class Program_weight_tensor_parameter_154: + name = "parameter_154" + shape = [144, 144, 3, 3] + dtype = "float32" + min_val = float("-0.0410201") + max_val = float("0.048983") + mean = float("-1.72048e-05") + std = float("0.00338842") + data = None + + +class Program_weight_tensor_parameter_155: + name = "parameter_155" + shape = [144] + dtype = "float32" + min_val = float("-0.793878") + max_val = float("2.02014") + mean = float("0.408754") + std = float("0.537855") + data = None + + +class Program_weight_tensor_parameter_156: + name = "parameter_156" + shape = [144] + dtype = "float32" + min_val = float("0.642031") + max_val = float("1.86401") + mean = float("1.10314") + std = float("0.254887") + data = None + + +class Program_weight_tensor_parameter_157: + name = "parameter_157" + shape = [144] + dtype = "float32" + min_val = float("0.00315688") + max_val = float("0.0651796") + mean = float("0.0177255") + std = float("0.0123227") + data = None + + +class Program_weight_tensor_parameter_158: + name = "parameter_158" + shape = [144] + dtype = "float32" + min_val = float("-0.211434") + max_val = float("0.214478") + mean = float("-0.0150602") + std = float("0.062693") + data = None + + +class Program_weight_tensor_parameter_159: + name = "parameter_159" + shape = [144, 144, 1, 1] + dtype = "float32" + min_val = float("-0.18355") + max_val = float("0.101318") + mean = float("-0.000559139") + std = float("0.0130426") + data = None + + +class Program_weight_tensor_parameter_160: + name = "parameter_160" + shape = [72] + dtype = "float32" + min_val = float("-0.596492") + max_val = float("0.496629") + mean = float("0.00880207") + std = float("0.280008") + data = None + + +class Program_weight_tensor_parameter_161: + name = "parameter_161" + shape = [72] + dtype = "float32" + min_val = float("0.544805") + max_val = float("1.28262") + mean = float("0.828695") + std = float("0.105308") + data = None + + +class Program_weight_tensor_parameter_162: + name = "parameter_162" + shape = [72] + dtype = "float32" + min_val = float("0.00121121") + max_val = float("0.0154097") + mean = float("0.00657741") + std = float("0.00405399") + data = None + + +class Program_weight_tensor_parameter_163: + name = "parameter_163" + shape = [72] + dtype = "float32" + min_val = float("-0.0445254") + max_val = float("0.0643771") + mean = float("-0.00597889") + std = float("0.0159114") + data = None + + +class Program_weight_tensor_parameter_164: + name = "parameter_164" + shape = [72, 72, 1, 1] + dtype = "float32" + min_val = float("-0.079712") + max_val = float("0.0572723") + mean = float("-0.000917905") + std = float("0.00853453") + data = None + + +class Program_weight_tensor_parameter_165: + name = "parameter_165" + shape = [72] + dtype = "float32" + min_val = float("-0.596492") + max_val = float("0.496629") + mean = float("0.00880207") + std = float("0.280008") + data = None + + +class Program_weight_tensor_parameter_166: + name = "parameter_166" + shape = [72] + dtype = "float32" + min_val = float("0.692696") + max_val = float("1.58855") + mean = float("1.06707") + std = float("0.139152") + data = None + + +class Program_weight_tensor_parameter_167: + name = "parameter_167" + shape = [72] + dtype = "float32" + min_val = float("0.00325206") + max_val = float("0.0341689") + mean = float("0.0125694") + std = float("0.00721053") + data = None + + +class Program_weight_tensor_parameter_168: + name = "parameter_168" + shape = [72] + dtype = "float32" + min_val = float("-0.202431") + max_val = float("0.069641") + mean = float("-0.0138793") + std = float("0.0479398") + data = None + + +class Program_weight_tensor_parameter_169: + name = "parameter_169" + shape = [72, 72, 3, 3] + dtype = "float32" + min_val = float("-0.0886597") + max_val = float("0.0870701") + mean = float("-0.000129493") + std = float("0.00706853") + data = None + + +class Program_weight_tensor_parameter_170: + name = "parameter_170" + shape = [72] + dtype = "float32" + min_val = float("-0.788096") + max_val = float("0.657236") + mean = float("-0.317593") + std = float("0.294445") + data = None + + +class Program_weight_tensor_parameter_171: + name = "parameter_171" + shape = [72] + dtype = "float32" + min_val = float("0.319105") + max_val = float("2.17325") + mean = float("0.870637") + std = float("0.238971") + data = None + + +class Program_weight_tensor_parameter_172: + name = "parameter_172" + shape = [72] + dtype = "float32" + min_val = float("0.00358424") + max_val = float("0.0266004") + mean = float("0.0101194") + std = float("0.00504532") + data = None + + +class Program_weight_tensor_parameter_173: + name = "parameter_173" + shape = [72] + dtype = "float32" + min_val = float("-0.0783159") + max_val = float("0.0807353") + mean = float("0.0185967") + std = float("0.0324074") + data = None + + +class Program_weight_tensor_parameter_174: + name = "parameter_174" + shape = [72, 72, 3, 3] + dtype = "float32" + min_val = float("-0.114122") + max_val = float("0.0910359") + mean = float("-0.00051978") + std = float("0.00807761") + data = None + + +class Program_weight_tensor_parameter_175: + name = "parameter_175" + shape = [72] + dtype = "float32" + min_val = float("-0.529687") + max_val = float("0.199379") + mean = float("-0.259751") + std = float("0.171947") + data = None + + +class Program_weight_tensor_parameter_176: + name = "parameter_176" + shape = [72] + dtype = "float32" + min_val = float("0.599551") + max_val = float("0.970551") + mean = float("0.792683") + std = float("0.0733557") + data = None + + +class Program_weight_tensor_parameter_177: + name = "parameter_177" + shape = [72] + dtype = "float32" + min_val = float("0.00301817") + max_val = float("0.0147212") + mean = float("0.00748466") + std = float("0.00212384") + data = None + + +class Program_weight_tensor_parameter_178: + name = "parameter_178" + shape = [72] + dtype = "float32" + min_val = float("-0.0505147") + max_val = float("0.0345987") + mean = float("0.00396601") + std = float("0.0159843") + data = None + + +class Program_weight_tensor_parameter_179: + name = "parameter_179" + shape = [72, 72, 1, 1] + dtype = "float32" + min_val = float("-0.0719846") + max_val = float("0.0561072") + mean = float("-0.00174883") + std = float("0.010313") + data = None + + +class Program_weight_tensor_parameter_180: + name = "parameter_180" + shape = [72] + dtype = "float32" + min_val = float("-0.529687") + max_val = float("0.199379") + mean = float("-0.259751") + std = float("0.171947") + data = None + + +class Program_weight_tensor_parameter_181: + name = "parameter_181" + shape = [72] + dtype = "float32" + min_val = float("0.706439") + max_val = float("1.22884") + mean = float("0.981247") + std = float("0.110846") + data = None + + +class Program_weight_tensor_parameter_182: + name = "parameter_182" + shape = [72] + dtype = "float32" + min_val = float("0.00767675") + max_val = float("0.0447227") + mean = float("0.0168985") + std = float("0.0076087") + data = None + + +class Program_weight_tensor_parameter_183: + name = "parameter_183" + shape = [72] + dtype = "float32" + min_val = float("-0.130446") + max_val = float("0.0730463") + mean = float("0.00326599") + std = float("0.0351423") + data = None + + +class Program_weight_tensor_parameter_184: + name = "parameter_184" + shape = [72, 72, 3, 3] + dtype = "float32" + min_val = float("-0.0927827") + max_val = float("0.118671") + mean = float("-0.000436366") + std = float("0.00816753") + data = None + + +class Program_weight_tensor_parameter_185: + name = "parameter_185" + shape = [72] + dtype = "float32" + min_val = float("-0.978319") + max_val = float("0.910199") + mean = float("-0.373348") + std = float("0.382498") + data = None + + +class Program_weight_tensor_parameter_186: + name = "parameter_186" + shape = [72] + dtype = "float32" + min_val = float("0.598562") + max_val = float("1.22465") + mean = float("0.867317") + std = float("0.120439") + data = None + + +class Program_weight_tensor_parameter_187: + name = "parameter_187" + shape = [72] + dtype = "float32" + min_val = float("0.00336641") + max_val = float("0.0470094") + mean = float("0.00884736") + std = float("0.00728432") + data = None + + +class Program_weight_tensor_parameter_188: + name = "parameter_188" + shape = [72] + dtype = "float32" + min_val = float("-0.241371") + max_val = float("0.2411") + mean = float("-0.027551") + std = float("0.0896444") + data = None + + +class Program_weight_tensor_parameter_189: + name = "parameter_189" + shape = [72, 72, 3, 3] + dtype = "float32" + min_val = float("-0.0886619") + max_val = float("0.0880968") + mean = float("-0.00027759") + std = float("0.0091381") + data = None + + +class Program_weight_tensor_parameter_190: + name = "parameter_190" + shape = [72] + dtype = "float32" + min_val = float("-1.03643") + max_val = float("0.87465") + mean = float("-0.149865") + std = float("0.508874") + data = None + + +class Program_weight_tensor_parameter_191: + name = "parameter_191" + shape = [72] + dtype = "float32" + min_val = float("0.313694") + max_val = float("1.10515") + mean = float("0.655033") + std = float("0.174056") + data = None + + +class Program_weight_tensor_parameter_192: + name = "parameter_192" + shape = [72] + dtype = "float32" + min_val = float("0.0053502") + max_val = float("0.0455901") + mean = float("0.0132061") + std = float("0.00705897") + data = None + + +class Program_weight_tensor_parameter_193: + name = "parameter_193" + shape = [72] + dtype = "float32" + min_val = float("-0.14339") + max_val = float("0.123691") + mean = float("-0.0055304") + std = float("0.0504267") + data = None + + +class Program_weight_tensor_parameter_194: + name = "parameter_194" + shape = [72, 336, 1, 1] + dtype = "float32" + min_val = float("-0.14681") + max_val = float("0.127726") + mean = float("-0.000474756") + std = float("0.0119385") + data = None + + +class Program_weight_tensor_parameter_195: + name = "parameter_195" + shape = [72] + dtype = "float32" + min_val = float("-0.120348") + max_val = float("0.412377") + mean = float("0.165721") + std = float("0.110684") + data = None + + +class Program_weight_tensor_parameter_196: + name = "parameter_196" + shape = [72] + dtype = "float32" + min_val = float("0.659788") + max_val = float("1.34775") + mean = float("0.850032") + std = float("0.109302") + data = None + + +class Program_weight_tensor_parameter_197: + name = "parameter_197" + shape = [72] + dtype = "float32" + min_val = float("0.00159057") + max_val = float("0.0317446") + mean = float("0.00599932") + std = float("0.00340858") + data = None + + +class Program_weight_tensor_parameter_198: + name = "parameter_198" + shape = [72] + dtype = "float32" + min_val = float("-0.0880301") + max_val = float("0.074714") + mean = float("-0.00829577") + std = float("0.030976") + data = None + + +class Program_weight_tensor_parameter_199: + name = "parameter_199" + shape = [72, 336, 1, 1] + dtype = "float32" + min_val = float("-0.108096") + max_val = float("0.107109") + mean = float("7.87305e-05") + std = float("0.00828471") + data = None + + +class Program_weight_tensor_parameter_200: + name = "parameter_200" + shape = [144] + dtype = "float32" + min_val = float("-0.476764") + max_val = float("0.168049") + mean = float("-0.0838514") + std = float("0.124575") + data = None + + +class Program_weight_tensor_parameter_201: + name = "parameter_201" + shape = [144] + dtype = "float32" + min_val = float("0.617689") + max_val = float("1.54284") + mean = float("0.785115") + std = float("0.113178") + data = None + + +class Program_weight_tensor_parameter_202: + name = "parameter_202" + shape = [144] + dtype = "float32" + min_val = float("0.00602933") + max_val = float("0.0596091") + mean = float("0.0124482") + std = float("0.00635021") + data = None + + +class Program_weight_tensor_parameter_203: + name = "parameter_203" + shape = [144] + dtype = "float32" + min_val = float("-0.109409") + max_val = float("0.0390456") + mean = float("-0.0261988") + std = float("0.0242817") + data = None + + +class Program_weight_tensor_parameter_204: + name = "parameter_204" + shape = [144, 288, 1, 1] + dtype = "float32" + min_val = float("-0.0819936") + max_val = float("0.0914399") + mean = float("-0.000602675") + std = float("0.00882576") + data = None + + +class Program_weight_tensor_parameter_205: + name = "parameter_205" + shape = [288] + dtype = "float32" + min_val = float("-0.43398") + max_val = float("0.195139") + mean = float("-0.113323") + std = float("0.0837459") + data = None + + +class Program_weight_tensor_parameter_206: + name = "parameter_206" + shape = [288] + dtype = "float32" + min_val = float("0.801149") + max_val = float("1.51552") + mean = float("1.01244") + std = float("0.0942505") + data = None + + +class Program_weight_tensor_parameter_207: + name = "parameter_207" + shape = [288] + dtype = "float32" + min_val = float("0.00697899") + max_val = float("0.0426641") + mean = float("0.0125514") + std = float("0.00524946") + data = None + + +class Program_weight_tensor_parameter_208: + name = "parameter_208" + shape = [288] + dtype = "float32" + min_val = float("-0.199677") + max_val = float("0.152914") + mean = float("-0.0401824") + std = float("0.042326") + data = None + + +class Program_weight_tensor_parameter_209: + name = "parameter_209" + shape = [288, 288, 1, 1] + dtype = "float32" + min_val = float("-0.105612") + max_val = float("0.118139") + mean = float("-0.000716065") + std = float("0.00840771") + data = None + + +class Program_weight_tensor_parameter_210: + name = "parameter_210" + shape = [144] + dtype = "float32" + min_val = float("-0.413817") + max_val = float("0.0254389") + mean = float("-0.106277") + std = float("0.066947") + data = None + + +class Program_weight_tensor_parameter_211: + name = "parameter_211" + shape = [144] + dtype = "float32" + min_val = float("0.691501") + max_val = float("0.955612") + mean = float("0.877305") + std = float("0.0375328") + data = None + + +class Program_weight_tensor_parameter_212: + name = "parameter_212" + shape = [144] + dtype = "float32" + min_val = float("0.00387808") + max_val = float("0.028644") + mean = float("0.00817049") + std = float("0.00276292") + data = None + + +class Program_weight_tensor_parameter_213: + name = "parameter_213" + shape = [144] + dtype = "float32" + min_val = float("-0.0443968") + max_val = float("0.042331") + mean = float("-0.0131527") + std = float("0.0192302") + data = None + + +class Program_weight_tensor_parameter_214: + name = "parameter_214" + shape = [144, 144, 1, 1] + dtype = "float32" + min_val = float("-0.0418187") + max_val = float("0.0336758") + mean = float("-0.000853057") + std = float("0.00590709") + data = None + + +class Program_weight_tensor_parameter_215: + name = "parameter_215" + shape = [144] + dtype = "float32" + min_val = float("-0.413817") + max_val = float("0.0254389") + mean = float("-0.106277") + std = float("0.066947") + data = None + + +class Program_weight_tensor_parameter_216: + name = "parameter_216" + shape = [144] + dtype = "float32" + min_val = float("0.871658") + max_val = float("1.14399") + mean = float("0.991674") + std = float("0.0442735") + data = None + + +class Program_weight_tensor_parameter_217: + name = "parameter_217" + shape = [144] + dtype = "float32" + min_val = float("0.00847572") + max_val = float("0.037261") + mean = float("0.0170862") + std = float("0.00540559") + data = None + + +class Program_weight_tensor_parameter_218: + name = "parameter_218" + shape = [144] + dtype = "float32" + min_val = float("-0.0858501") + max_val = float("0.0752316") + mean = float("-0.0215803") + std = float("0.0267547") + data = None + + +class Program_weight_tensor_parameter_219: + name = "parameter_219" + shape = [144, 144, 3, 3] + dtype = "float32" + min_val = float("-0.0708304") + max_val = float("0.126419") + mean = float("-0.000129981") + std = float("0.00418282") + data = None + + +class Program_weight_tensor_parameter_220: + name = "parameter_220" + shape = [144] + dtype = "float32" + min_val = float("-0.503198") + max_val = float("-0.00723544") + mean = float("-0.215277") + std = float("0.107333") + data = None + + +class Program_weight_tensor_parameter_221: + name = "parameter_221" + shape = [144] + dtype = "float32" + min_val = float("0.837897") + max_val = float("1.40392") + mean = float("1.05848") + std = float("0.0870235") + data = None + + +class Program_weight_tensor_parameter_222: + name = "parameter_222" + shape = [144] + dtype = "float32" + min_val = float("0.0204089") + max_val = float("0.114905") + mean = float("0.0339913") + std = float("0.0111585") + data = None + + +class Program_weight_tensor_parameter_223: + name = "parameter_223" + shape = [144] + dtype = "float32" + min_val = float("-0.115567") + max_val = float("0.074236") + mean = float("-0.0297392") + std = float("0.0292001") + data = None + + +class Program_weight_tensor_parameter_224: + name = "parameter_224" + shape = [144, 144, 3, 3] + dtype = "float32" + min_val = float("-0.0638722") + max_val = float("0.094548") + mean = float("-0.00028317") + std = float("0.00489057") + data = None + + +class Program_weight_tensor_parameter_225: + name = "parameter_225" + shape = [144] + dtype = "float32" + min_val = float("-0.443227") + max_val = float("0.0300293") + mean = float("-0.20916") + std = float("0.0836857") + data = None + + +class Program_weight_tensor_parameter_226: + name = "parameter_226" + shape = [144] + dtype = "float32" + min_val = float("0.796667") + max_val = float("1.16669") + mean = float("0.944169") + std = float("0.0541301") + data = None + + +class Program_weight_tensor_parameter_227: + name = "parameter_227" + shape = [144] + dtype = "float32" + min_val = float("0.0023532") + max_val = float("0.0139004") + mean = float("0.00492937") + std = float("0.00150275") + data = None + + +class Program_weight_tensor_parameter_228: + name = "parameter_228" + shape = [144] + dtype = "float32" + min_val = float("-0.0396228") + max_val = float("0.0357555") + mean = float("-0.00883661") + std = float("0.0111889") + data = None + + +class Program_weight_tensor_parameter_229: + name = "parameter_229" + shape = [144, 144, 1, 1] + dtype = "float32" + min_val = float("-0.0446649") + max_val = float("0.0619167") + mean = float("-0.000724838") + std = float("0.00745171") + data = None + + +class Program_weight_tensor_parameter_230: + name = "parameter_230" + shape = [144] + dtype = "float32" + min_val = float("-0.443227") + max_val = float("0.0300293") + mean = float("-0.20916") + std = float("0.0836857") + data = None + + +class Program_weight_tensor_parameter_231: + name = "parameter_231" + shape = [144] + dtype = "float32" + min_val = float("0.855523") + max_val = float("1.20493") + mean = float("1.00232") + std = float("0.0650819") + data = None + + +class Program_weight_tensor_parameter_232: + name = "parameter_232" + shape = [144] + dtype = "float32" + min_val = float("0.00909786") + max_val = float("0.0362024") + mean = float("0.0145287") + std = float("0.00368455") + data = None + + +class Program_weight_tensor_parameter_233: + name = "parameter_233" + shape = [144] + dtype = "float32" + min_val = float("-0.0654417") + max_val = float("0.0471346") + mean = float("-0.0150238") + std = float("0.0228131") + data = None + + +class Program_weight_tensor_parameter_234: + name = "parameter_234" + shape = [144, 144, 3, 3] + dtype = "float32" + min_val = float("-0.0569246") + max_val = float("0.0681235") + mean = float("-0.000190838") + std = float("0.00429518") + data = None + + +class Program_weight_tensor_parameter_235: + name = "parameter_235" + shape = [144] + dtype = "float32" + min_val = float("-0.619341") + max_val = float("-0.0112904") + mean = float("-0.271335") + std = float("0.107403") + data = None + + +class Program_weight_tensor_parameter_236: + name = "parameter_236" + shape = [144] + dtype = "float32" + min_val = float("0.887194") + max_val = float("1.60529") + mean = float("1.03008") + std = float("0.0832458") + data = None + + +class Program_weight_tensor_parameter_237: + name = "parameter_237" + shape = [144] + dtype = "float32" + min_val = float("0.0111345") + max_val = float("0.0421569") + mean = float("0.0185696") + std = float("0.00509881") + data = None + + +class Program_weight_tensor_parameter_238: + name = "parameter_238" + shape = [144] + dtype = "float32" + min_val = float("-0.196773") + max_val = float("0.0810831") + mean = float("-0.0379885") + std = float("0.0331759") + data = None + + +class Program_weight_tensor_parameter_239: + name = "parameter_239" + shape = [144, 144, 3, 3] + dtype = "float32" + min_val = float("-0.0581918") + max_val = float("0.0721246") + mean = float("-0.000276431") + std = float("0.00532616") + data = None + + +class Program_weight_tensor_parameter_240: + name = "parameter_240" + shape = [144] + dtype = "float32" + min_val = float("-0.67915") + max_val = float("0.300043") + mean = float("-0.249268") + std = float("0.140088") + data = None + + +class Program_weight_tensor_parameter_241: + name = "parameter_241" + shape = [144] + dtype = "float32" + min_val = float("0.820156") + max_val = float("1.25926") + mean = float("1.02103") + std = float("0.0835064") + data = None + + +class Program_weight_tensor_parameter_242: + name = "parameter_242" + shape = [144] + dtype = "float32" + min_val = float("0.00487426") + max_val = float("0.0195417") + mean = float("0.00798593") + std = float("0.00214001") + data = None + + +class Program_weight_tensor_parameter_243: + name = "parameter_243" + shape = [144] + dtype = "float32" + min_val = float("-0.0685193") + max_val = float("0.111677") + mean = float("0.012203") + std = float("0.0263654") + data = None + + +class Program_weight_tensor_parameter_244: + name = "parameter_244" + shape = [144, 672, 1, 1] + dtype = "float32" + min_val = float("-0.0518257") + max_val = float("0.0801678") + mean = float("-0.000299396") + std = float("0.00706162") + data = None + + +class Program_weight_tensor_parameter_245: + name = "parameter_245" + shape = [144] + dtype = "float32" + min_val = float("-0.219529") + max_val = float("0.482816") + mean = float("0.00625415") + std = float("0.0996225") + data = None + + +class Program_weight_tensor_parameter_246: + name = "parameter_246" + shape = [144] + dtype = "float32" + min_val = float("0.943085") + max_val = float("1.31189") + mean = float("1.06741") + std = float("0.0759901") + data = None + + +class Program_weight_tensor_parameter_247: + name = "parameter_247" + shape = [144] + dtype = "float32" + min_val = float("0.00430579") + max_val = float("0.0498874") + mean = float("0.00849452") + std = float("0.00419529") + data = None + + +class Program_weight_tensor_parameter_248: + name = "parameter_248" + shape = [144] + dtype = "float32" + min_val = float("-0.0734484") + max_val = float("0.0501055") + mean = float("-0.00422856") + std = float("0.0238347") + data = None + + +class Program_weight_tensor_parameter_249: + name = "parameter_249" + shape = [144, 672, 1, 1] + dtype = "float32" + min_val = float("-0.283127") + max_val = float("0.126053") + mean = float("-0.000223953") + std = float("0.00673773") + data = None + + +class Program_weight_tensor_parameter_250: + name = "parameter_250" + shape = [288] + dtype = "float32" + min_val = float("-0.471007") + max_val = float("-0.0768897") + mean = float("-0.24229") + std = float("0.058604") + data = None + + +class Program_weight_tensor_parameter_251: + name = "parameter_251" + shape = [288] + dtype = "float32" + min_val = float("0.693872") + max_val = float("1.04898") + mean = float("0.819337") + std = float("0.0537956") + data = None + + +class Program_weight_tensor_parameter_252: + name = "parameter_252" + shape = [288] + dtype = "float32" + min_val = float("0.00693365") + max_val = float("0.0444148") + mean = float("0.0112387") + std = float("0.0036165") + data = None + + +class Program_weight_tensor_parameter_253: + name = "parameter_253" + shape = [288] + dtype = "float32" + min_val = float("-0.0895123") + max_val = float("0.0744079") + mean = float("-0.0247192") + std = float("0.0210024") + data = None + + +class Program_weight_tensor_parameter_254: + name = "parameter_254" + shape = [288, 576, 1, 1] + dtype = "float32" + min_val = float("-0.0678711") + max_val = float("0.0563575") + mean = float("-0.000356836") + std = float("0.00561004") + data = None + + +class Program_weight_tensor_parameter_255: + name = "parameter_255" + shape = [576] + dtype = "float32" + min_val = float("-0.22392") + max_val = float("0.236737") + mean = float("-0.12655") + std = float("0.0408273") + data = None + + +class Program_weight_tensor_parameter_256: + name = "parameter_256" + shape = [576] + dtype = "float32" + min_val = float("0.899743") + max_val = float("1.38442") + mean = float("1.04482") + std = float("0.0445486") + data = None + + +class Program_weight_tensor_parameter_257: + name = "parameter_257" + shape = [576] + dtype = "float32" + min_val = float("0.00534322") + max_val = float("0.0242086") + mean = float("0.00935106") + std = float("0.00235956") + data = None + + +class Program_weight_tensor_parameter_258: + name = "parameter_258" + shape = [576] + dtype = "float32" + min_val = float("-0.11956") + max_val = float("0.0966195") + mean = float("-0.0385181") + std = float("0.0241664") + data = None + + +class Program_weight_tensor_parameter_259: + name = "parameter_259" + shape = [576, 576, 1, 1] + dtype = "float32" + min_val = float("-0.0794723") + max_val = float("0.109769") + mean = float("-0.00038882") + std = float("0.00481347") + data = None + + +class Program_weight_tensor_parameter_260: + name = "parameter_260" + shape = [288] + dtype = "float32" + min_val = float("-0.23913") + max_val = float("0.268967") + mean = float("-0.0788435") + std = float("0.0530956") + data = None + + +class Program_weight_tensor_parameter_261: + name = "parameter_261" + shape = [288] + dtype = "float32" + min_val = float("0.782606") + max_val = float("1.06451") + mean = float("0.949247") + std = float("0.0301924") + data = None + + +class Program_weight_tensor_parameter_262: + name = "parameter_262" + shape = [288] + dtype = "float32" + min_val = float("0.00297758") + max_val = float("0.0373329") + mean = float("0.00987717") + std = float("0.0043303") + data = None + + +class Program_weight_tensor_parameter_263: + name = "parameter_263" + shape = [288] + dtype = "float32" + min_val = float("-0.0628018") + max_val = float("0.0495571") + mean = float("-0.00912493") + std = float("0.0163036") + data = None + + +class Program_weight_tensor_parameter_264: + name = "parameter_264" + shape = [288, 288, 1, 1] + dtype = "float32" + min_val = float("-0.0359925") + max_val = float("0.0349088") + mean = float("-0.000152439") + std = float("0.00410565") + data = None + + +class Program_weight_tensor_parameter_265: + name = "parameter_265" + shape = [288] + dtype = "float32" + min_val = float("-0.23913") + max_val = float("0.268967") + mean = float("-0.0788435") + std = float("0.0530956") + data = None + + +class Program_weight_tensor_parameter_266: + name = "parameter_266" + shape = [288] + dtype = "float32" + min_val = float("0.873184") + max_val = float("1.24138") + mean = float("1.00729") + std = float("0.0416184") + data = None + + +class Program_weight_tensor_parameter_267: + name = "parameter_267" + shape = [288] + dtype = "float32" + min_val = float("0.0144312") + max_val = float("0.267786") + mean = float("0.0465309") + std = float("0.0213648") + data = None + + +class Program_weight_tensor_parameter_268: + name = "parameter_268" + shape = [288] + dtype = "float32" + min_val = float("-0.278993") + max_val = float("0.0513671") + mean = float("-0.0773057") + std = float("0.0501911") + data = None + + +class Program_weight_tensor_parameter_269: + name = "parameter_269" + shape = [288, 288, 3, 3] + dtype = "float32" + min_val = float("-0.0445249") + max_val = float("0.067184") + mean = float("-0.000174577") + std = float("0.00185068") + data = None + + +class Program_weight_tensor_parameter_270: + name = "parameter_270" + shape = [288] + dtype = "float32" + min_val = float("-0.154725") + max_val = float("0.176405") + mean = float("-0.0451395") + std = float("0.0405051") + data = None + + +class Program_weight_tensor_parameter_271: + name = "parameter_271" + shape = [288] + dtype = "float32" + min_val = float("0.906089") + max_val = float("1.22107") + mean = float("1.03843") + std = float("0.0584222") + data = None + + +class Program_weight_tensor_parameter_272: + name = "parameter_272" + shape = [288] + dtype = "float32" + min_val = float("0.012856") + max_val = float("0.161846") + mean = float("0.0405644") + std = float("0.0172627") + data = None + + +class Program_weight_tensor_parameter_273: + name = "parameter_273" + shape = [288] + dtype = "float32" + min_val = float("-0.188737") + max_val = float("0.116279") + mean = float("-0.0533434") + std = float("0.0531372") + data = None + + +class Program_weight_tensor_parameter_274: + name = "parameter_274" + shape = [288, 288, 3, 3] + dtype = "float32" + min_val = float("-0.0349189") + max_val = float("0.0583932") + mean = float("-0.000119797") + std = float("0.00228585") + data = None + + +class Program_weight_tensor_parameter_275: + name = "parameter_275" + shape = [288] + dtype = "float32" + min_val = float("-0.22913") + max_val = float("0.0550435") + mean = float("-0.0679392") + std = float("0.0398991") + data = None + + +class Program_weight_tensor_parameter_276: + name = "parameter_276" + shape = [288] + dtype = "float32" + min_val = float("0.901237") + max_val = float("1.30891") + mean = float("1.04301") + std = float("0.0700643") + data = None + + +class Program_weight_tensor_parameter_277: + name = "parameter_277" + shape = [288] + dtype = "float32" + min_val = float("0.0398981") + max_val = float("0.306777") + mean = float("0.105215") + std = float("0.0367725") + data = None + + +class Program_weight_tensor_parameter_278: + name = "parameter_278" + shape = [288] + dtype = "float32" + min_val = float("-1.6438") + max_val = float("1.76347") + mean = float("-0.0742338") + std = float("0.501869") + data = None + + +class Program_weight_tensor_parameter_279: + name = "parameter_279" + shape = [288, 1152, 1, 1] + dtype = "float32" + min_val = float("-0.0739741") + max_val = float("0.0690574") + mean = float("4.74459e-05") + std = float("0.00375253") + data = None + + +class Program_weight_tensor_parameter_280: + name = "parameter_280" + shape = [288] + dtype = "float32" + min_val = float("-0.104232") + max_val = float("0.0384892") + mean = float("-0.0070986") + std = float("0.0181476") + data = None + + +class Program_weight_tensor_parameter_281: + name = "parameter_281" + shape = [288] + dtype = "float32" + min_val = float("0.900639") + max_val = float("1.15952") + mean = float("0.963662") + std = float("0.0263273") + data = None + + +class Program_weight_tensor_parameter_282: + name = "parameter_282" + shape = [288] + dtype = "float32" + min_val = float("0.00287171") + max_val = float("0.0106048") + mean = float("0.0049299") + std = float("0.00108551") + data = None + + +class Program_weight_tensor_parameter_283: + name = "parameter_283" + shape = [288] + dtype = "float32" + min_val = float("-0.0988844") + max_val = float("0.0789783") + mean = float("-0.0485279") + std = float("0.0238655") + data = None + + +class Program_weight_tensor_parameter_284: + name = "parameter_284" + shape = [288, 288, 1, 1] + dtype = "float32" + min_val = float("-0.0304843") + max_val = float("0.0399859") + mean = float("-0.000813718") + std = float("0.00406643") + data = None + + +class Program_weight_tensor_parameter_285: + name = "parameter_285" + shape = [288] + dtype = "float32" + min_val = float("-0.104232") + max_val = float("0.0384892") + mean = float("-0.0070986") + std = float("0.0181476") + data = None + + +class Program_weight_tensor_parameter_286: + name = "parameter_286" + shape = [288] + dtype = "float32" + min_val = float("0.925735") + max_val = float("1.3113") + mean = float("1.02745") + std = float("0.0619514") + data = None + + +class Program_weight_tensor_parameter_287: + name = "parameter_287" + shape = [288] + dtype = "float32" + min_val = float("0.00770713") + max_val = float("0.0389126") + mean = float("0.0204063") + std = float("0.00536096") + data = None + + +class Program_weight_tensor_parameter_288: + name = "parameter_288" + shape = [288] + dtype = "float32" + min_val = float("-0.234289") + max_val = float("0.10658") + mean = float("-0.109382") + std = float("0.0469286") + data = None + + +class Program_weight_tensor_parameter_289: + name = "parameter_289" + shape = [288, 288, 3, 3] + dtype = "float32" + min_val = float("-0.0337794") + max_val = float("0.0422064") + mean = float("-0.000230922") + std = float("0.00195527") + data = None + + +class Program_weight_tensor_parameter_290: + name = "parameter_290" + shape = [288] + dtype = "float32" + min_val = float("-0.223666") + max_val = float("0.0286354") + mean = float("-0.0472094") + std = float("0.0307449") + data = None + + +class Program_weight_tensor_parameter_291: + name = "parameter_291" + shape = [288] + dtype = "float32" + min_val = float("0.929918") + max_val = float("1.3633") + mean = float("1.05315") + std = float("0.0557061") + data = None + + +class Program_weight_tensor_parameter_292: + name = "parameter_292" + shape = [288] + dtype = "float32" + min_val = float("0.011915") + max_val = float("0.0576644") + mean = float("0.0215698") + std = float("0.00513117") + data = None + + +class Program_weight_tensor_parameter_293: + name = "parameter_293" + shape = [288] + dtype = "float32" + min_val = float("-0.424449") + max_val = float("0.4419") + mean = float("-0.106397") + std = float("0.0754344") + data = None + + +class Program_weight_tensor_parameter_294: + name = "parameter_294" + shape = [288, 288, 3, 3] + dtype = "float32" + min_val = float("-0.028501") + max_val = float("0.0366931") + mean = float("-0.000232809") + std = float("0.00238709") + data = None + + +class Program_weight_tensor_parameter_295: + name = "parameter_295" + shape = [288] + dtype = "float32" + min_val = float("-0.21661") + max_val = float("0.119111") + mean = float("-0.0631797") + std = float("0.0519387") + data = None + + +class Program_weight_tensor_parameter_296: + name = "parameter_296" + shape = [288] + dtype = "float32" + min_val = float("0.987879") + max_val = float("1.24617") + mean = float("1.05608") + std = float("0.0335288") + data = None + + +class Program_weight_tensor_parameter_297: + name = "parameter_297" + shape = [288] + dtype = "float32" + min_val = float("0.0167741") + max_val = float("0.063874") + mean = float("0.0243677") + std = float("0.00554264") + data = None + + +class Program_weight_tensor_parameter_298: + name = "parameter_298" + shape = [288] + dtype = "float32" + min_val = float("-0.171478") + max_val = float("0.180474") + mean = float("-0.0617349") + std = float("0.0437105") + data = None + + +class Program_weight_tensor_parameter_299: + name = "parameter_299" + shape = [288, 768, 1, 1] + dtype = "float32" + min_val = float("-0.0315675") + max_val = float("0.0606433") + mean = float("-0.000353396") + std = float("0.00416745") + data = None + + +class Program_weight_tensor_parameter_300: + name = "parameter_300" + shape = [288] + dtype = "float32" + min_val = float("-0.0812952") + max_val = float("0.0354981") + mean = float("-0.00934253") + std = float("0.0173255") + data = None + + +class Program_weight_tensor_parameter_301: + name = "parameter_301" + shape = [288] + dtype = "float32" + min_val = float("1.01432") + max_val = float("1.1416") + mean = float("1.08017") + std = float("0.0240114") + data = None + + +class Program_weight_tensor_parameter_302: + name = "parameter_302" + shape = [288] + dtype = "float32" + min_val = float("0.0168412") + max_val = float("0.0350895") + mean = float("0.0225807") + std = float("0.00281515") + data = None + + +class Program_weight_tensor_parameter_303: + name = "parameter_303" + shape = [288] + dtype = "float32" + min_val = float("-0.136888") + max_val = float("0.0259121") + mean = float("-0.0590415") + std = float("0.025695") + data = None + + +class Program_weight_tensor_parameter_304: + name = "parameter_304" + shape = [288, 768, 1, 1] + dtype = "float32" + min_val = float("-0.0220127") + max_val = float("0.0395105") + mean = float("-0.000346125") + std = float("0.00412748") + data = None + + +class Program_weight_tensor_parameter_305: + name = "parameter_305" + shape = [768] + dtype = "float32" + min_val = float("-4.17201") + max_val = float("-0.103231") + mean = float("-2.23559") + std = float("0.546417") + data = None + + +class Program_weight_tensor_parameter_306: + name = "parameter_306" + shape = [768] + dtype = "float32" + min_val = float("1.67639") + max_val = float("4.7074") + mean = float("3.3138") + std = float("0.317348") + data = None + + +class Program_weight_tensor_parameter_307: + name = "parameter_307" + shape = [768] + dtype = "float32" + min_val = float("0.0031352") + max_val = float("0.0144649") + mean = float("0.00530109") + std = float("0.00129942") + data = None + + +class Program_weight_tensor_parameter_308: + name = "parameter_308" + shape = [768] + dtype = "float32" + min_val = float("-0.102364") + max_val = float("0.140331") + mean = float("-0.0380092") + std = float("0.0222492") + data = None + + +class Program_weight_tensor_parameter_309: + name = "parameter_309" + shape = [768, 576, 1, 1] + dtype = "float32" + min_val = float("-0.0579352") + max_val = float("0.0710566") + mean = float("-0.00047174") + std = float("0.00454072") + data = None + + +class Program_weight_tensor_parameter_310: + name = "parameter_310" + shape = [576] + dtype = "float32" + min_val = float("-0.0194227") + max_val = float("0.00104465") + mean = float("-0.00108579") + std = float("0.00283685") + data = None + + +class Program_weight_tensor_parameter_311: + name = "parameter_311" + shape = [576, 576, 1, 1] + dtype = "float32" + min_val = float("-0.181942") + max_val = float("0.183689") + mean = float("-0.000333769") + std = float("0.00240042") + data = None + + +class Program_weight_tensor_parameter_312: + name = "parameter_312" + shape = [288] + dtype = "float32" + min_val = float("-2.05493") + max_val = float("0.978") + mean = float("-0.272319") + std = float("0.353617") + data = None + + +class Program_weight_tensor_parameter_313: + name = "parameter_313" + shape = [288] + dtype = "float32" + min_val = float("0.135081") + max_val = float("2.13114") + mean = float("0.537166") + std = float("0.306899") + data = None + + +class Program_weight_tensor_parameter_314: + name = "parameter_314" + shape = [288] + dtype = "float32" + min_val = float("7.69798e-05") + max_val = float("0.00216471") + mean = float("0.000305136") + std = float("0.000236517") + data = None + + +class Program_weight_tensor_parameter_315: + name = "parameter_315" + shape = [288] + dtype = "float32" + min_val = float("-0.0302303") + max_val = float("0.0570347") + mean = float("0.0184598") + std = float("0.0154999") + data = None + + +class Program_weight_tensor_parameter_316: + name = "parameter_316" + shape = [288, 288, 1, 1] + dtype = "float32" + min_val = float("-0.0362218") + max_val = float("0.039556") + mean = float("-0.000416452") + std = float("0.00309243") + data = None + + +class Program_weight_tensor_parameter_317: + name = "parameter_317" + shape = [288] + dtype = "float32" + min_val = float("-2.05493") + max_val = float("0.978") + mean = float("-0.272319") + std = float("0.353617") + data = None + + +class Program_weight_tensor_parameter_318: + name = "parameter_318" + shape = [288] + dtype = "float32" + min_val = float("0.485427") + max_val = float("2.77924") + mean = float("1.1636") + std = float("0.366821") + data = None + + +class Program_weight_tensor_parameter_319: + name = "parameter_319" + shape = [288] + dtype = "float32" + min_val = float("0.000702807") + max_val = float("0.00951581") + mean = float("0.00190549") + std = float("0.00103207") + data = None + + +class Program_weight_tensor_parameter_320: + name = "parameter_320" + shape = [288] + dtype = "float32" + min_val = float("-0.222082") + max_val = float("0.100345") + mean = float("0.0213653") + std = float("0.0261282") + data = None + + +class Program_weight_tensor_parameter_321: + name = "parameter_321" + shape = [288, 288, 3, 3] + dtype = "float32" + min_val = float("-0.0292711") + max_val = float("0.035843") + mean = float("-7.17498e-05") + std = float("0.00239497") + data = None + + +class Program_weight_tensor_parameter_322: + name = "parameter_322" + shape = [288] + dtype = "float32" + min_val = float("-3.04804") + max_val = float("0.902721") + mean = float("-1.62463") + std = float("0.522032") + data = None + + +class Program_weight_tensor_parameter_323: + name = "parameter_323" + shape = [288] + dtype = "float32" + min_val = float("0.412288") + max_val = float("1.85924") + mean = float("1.14297") + std = float("0.183586") + data = None + + +class Program_weight_tensor_parameter_324: + name = "parameter_324" + shape = [288] + dtype = "float32" + min_val = float("0.0227719") + max_val = float("0.185075") + mean = float("0.0456612") + std = float("0.0139034") + data = None + + +class Program_weight_tensor_parameter_325: + name = "parameter_325" + shape = [288] + dtype = "float32" + min_val = float("-1.05487") + max_val = float("0.350092") + mean = float("-0.153281") + std = float("0.112535") + data = None + + +class Program_weight_tensor_parameter_326: + name = "parameter_326" + shape = [288, 288, 3, 3] + dtype = "float32" + min_val = float("-0.0310577") + max_val = float("0.0584148") + mean = float("-0.00021026") + std = float("0.00299679") + data = None + + +class Program_weight_tensor_parameter_327: + name = "parameter_327" + shape = [288] + dtype = "float32" + min_val = float("-2.14217") + max_val = float("1.60455") + mean = float("-0.41596") + std = float("0.434273") + data = None + + +class Program_weight_tensor_parameter_328: + name = "parameter_328" + shape = [288] + dtype = "float32" + min_val = float("0.0226806") + max_val = float("2.20193") + mean = float("0.427501") + std = float("0.286031") + data = None + + +class Program_weight_tensor_parameter_329: + name = "parameter_329" + shape = [288] + dtype = "float32" + min_val = float("1.90422e-05") + max_val = float("0.00251891") + mean = float("0.000473033") + std = float("0.00032871") + data = None + + +class Program_weight_tensor_parameter_330: + name = "parameter_330" + shape = [288] + dtype = "float32" + min_val = float("-0.0304152") + max_val = float("0.0734532") + mean = float("0.0241604") + std = float("0.0155045") + data = None + + +class Program_weight_tensor_parameter_331: + name = "parameter_331" + shape = [288, 288, 1, 1] + dtype = "float32" + min_val = float("-0.0253061") + max_val = float("0.0316181") + mean = float("-0.000587413") + std = float("0.00266057") + data = None + + +class Program_weight_tensor_parameter_332: + name = "parameter_332" + shape = [288] + dtype = "float32" + min_val = float("-2.14217") + max_val = float("1.60455") + mean = float("-0.41596") + std = float("0.434273") + data = None + + +class Program_weight_tensor_parameter_333: + name = "parameter_333" + shape = [288] + dtype = "float32" + min_val = float("0.469583") + max_val = float("2.46723") + mean = float("1.14325") + std = float("0.313639") + data = None + + +class Program_weight_tensor_parameter_334: + name = "parameter_334" + shape = [288] + dtype = "float32" + min_val = float("0.00155497") + max_val = float("0.0075944") + mean = float("0.00342237") + std = float("0.00109591") + data = None + + +class Program_weight_tensor_parameter_335: + name = "parameter_335" + shape = [288] + dtype = "float32" + min_val = float("-0.166374") + max_val = float("0.142145") + mean = float("0.0385124") + std = float("0.0284128") + data = None + + +class Program_weight_tensor_parameter_336: + name = "parameter_336" + shape = [288, 288, 3, 3] + dtype = "float32" + min_val = float("-0.0300473") + max_val = float("0.0566634") + mean = float("-0.000115715") + std = float("0.00253983") + data = None + + +class Program_weight_tensor_parameter_337: + name = "parameter_337" + shape = [288] + dtype = "float32" + min_val = float("-2.64172") + max_val = float("0.380203") + mean = float("-1.38767") + std = float("0.385623") + data = None + + +class Program_weight_tensor_parameter_338: + name = "parameter_338" + shape = [288] + dtype = "float32" + min_val = float("0.558043") + max_val = float("1.71262") + mean = float("1.12622") + std = float("0.13931") + data = None + + +class Program_weight_tensor_parameter_339: + name = "parameter_339" + shape = [288] + dtype = "float32" + min_val = float("0.0148874") + max_val = float("0.0607992") + mean = float("0.0260852") + std = float("0.00787152") + data = None + + +class Program_weight_tensor_parameter_340: + name = "parameter_340" + shape = [288] + dtype = "float32" + min_val = float("-0.824043") + max_val = float("0.12343") + mean = float("-0.10388") + std = float("0.0750312") + data = None + + +class Program_weight_tensor_parameter_341: + name = "parameter_341" + shape = [288, 288, 3, 3] + dtype = "float32" + min_val = float("-0.0416163") + max_val = float("0.0562858") + mean = float("-0.000193988") + std = float("0.00280354") + data = None + + +class Program_weight_tensor_parameter_342: + name = "parameter_342" + shape = [288] + dtype = "float32" + min_val = float("-3.62765") + max_val = float("2.97036") + mean = float("-0.711269") + std = float("0.761329") + data = None + + +class Program_weight_tensor_parameter_343: + name = "parameter_343" + shape = [288] + dtype = "float32" + min_val = float("0.890678") + max_val = float("2.92256") + mean = float("1.6374") + std = float("0.311721") + data = None + + +class Program_weight_tensor_parameter_344: + name = "parameter_344" + shape = [288] + dtype = "float32" + min_val = float("0.00239225") + max_val = float("0.00831343") + mean = float("0.00407401") + std = float("0.000948454") + data = None + + +class Program_weight_tensor_parameter_345: + name = "parameter_345" + shape = [288] + dtype = "float32" + min_val = float("-0.182535") + max_val = float("0.115867") + mean = float("0.0486434") + std = float("0.031021") + data = None + + +class Program_weight_tensor_parameter_346: + name = "parameter_346" + shape = [288, 576, 1, 1] + dtype = "float32" + min_val = float("-0.0933941") + max_val = float("0.0753658") + mean = float("-0.000787655") + std = float("0.00567589") + data = None + + +class Program_weight_tensor_parameter_347: + name = "parameter_347" + shape = [288] + dtype = "float32" + min_val = float("-2.8628") + max_val = float("0.479748") + mean = float("-0.682305") + std = float("0.57709") + data = None + + +class Program_weight_tensor_parameter_348: + name = "parameter_348" + shape = [288] + dtype = "float32" + min_val = float("0.975929") + max_val = float("3.52368") + mean = float("1.80201") + std = float("0.36815") + data = None + + +class Program_weight_tensor_parameter_349: + name = "parameter_349" + shape = [288] + dtype = "float32" + min_val = float("0.000804451") + max_val = float("0.00292711") + mean = float("0.00143956") + std = float("0.000325738") + data = None + + +class Program_weight_tensor_parameter_350: + name = "parameter_350" + shape = [288] + dtype = "float32" + min_val = float("-0.0342743") + max_val = float("0.0590277") + mean = float("0.0195141") + std = float("0.0151883") + data = None + + +class Program_weight_tensor_parameter_351: + name = "parameter_351" + shape = [288, 576, 1, 1] + dtype = "float32" + min_val = float("-0.0373152") + max_val = float("0.0841779") + mean = float("-0.00034827") + std = float("0.004295") + data = None + + +class Program_weight_tensor_parameter_352: + name = "parameter_352" + shape = [576] + dtype = "float32" + min_val = float("-2.52101") + max_val = float("0.857915") + mean = float("-0.842205") + std = float("0.39859") + data = None + + +class Program_weight_tensor_parameter_353: + name = "parameter_353" + shape = [576] + dtype = "float32" + min_val = float("0.488026") + max_val = float("1.95305") + mean = float("0.895161") + std = float("0.179359") + data = None + + +class Program_weight_tensor_parameter_354: + name = "parameter_354" + shape = [576] + dtype = "float32" + min_val = float("0.00558504") + max_val = float("0.0454555") + mean = float("0.0102845") + std = float("0.00312323") + data = None + + +class Program_weight_tensor_parameter_355: + name = "parameter_355" + shape = [576] + dtype = "float32" + min_val = float("-0.170477") + max_val = float("0.187667") + mean = float("0.0352537") + std = float("0.0479894") + data = None + + +class Program_weight_tensor_parameter_356: + name = "parameter_356" + shape = [576, 384, 3, 3] + dtype = "float32" + min_val = float("-0.0260577") + max_val = float("0.0532348") + mean = float("-0.0001041") + std = float("0.00269908") + data = None + + +class Program_weight_tensor_parameter_357: + name = "parameter_357" + shape = [384] + dtype = "float32" + min_val = float("-2.62379") + max_val = float("1.27783") + mean = float("-1.09735") + std = float("0.546546") + data = None + + +class Program_weight_tensor_parameter_358: + name = "parameter_358" + shape = [384] + dtype = "float32" + min_val = float("0.408486") + max_val = float("1.56047") + mean = float("1.0748") + std = float("0.155167") + data = None + + +class Program_weight_tensor_parameter_359: + name = "parameter_359" + shape = [384] + dtype = "float32" + min_val = float("0.00140498") + max_val = float("0.00887108") + mean = float("0.00364747") + std = float("0.000973317") + data = None + + +class Program_weight_tensor_parameter_360: + name = "parameter_360" + shape = [384] + dtype = "float32" + min_val = float("-0.173561") + max_val = float("0.131263") + mean = float("-0.0405442") + std = float("0.0419344") + data = None + + +class Program_weight_tensor_parameter_361: + name = "parameter_361" + shape = [384, 288, 1, 1] + dtype = "float32" + min_val = float("-0.315079") + max_val = float("0.114758") + mean = float("-0.000694619") + std = float("0.00882253") + data = None + + +class Program_weight_tensor_parameter_362: + name = "parameter_362" + shape = [288] + dtype = "float32" + min_val = float("-0.0129129") + max_val = float("0.00094621") + mean = float("-0.00331667") + std = float("0.00268758") + data = None + + +class Program_weight_tensor_parameter_363: + name = "parameter_363" + shape = [288, 288, 1, 1] + dtype = "float32" + min_val = float("-0.299495") + max_val = float("0.215047") + mean = float("-0.00235806") + std = float("0.0062383") + data = None + + +class Program_weight_tensor_parameter_364: + name = "parameter_364" + shape = [144] + dtype = "float32" + min_val = float("-1.89031") + max_val = float("0.682555") + mean = float("-0.250602") + std = float("0.42457") + data = None + + +class Program_weight_tensor_parameter_365: + name = "parameter_365" + shape = [144] + dtype = "float32" + min_val = float("-3.39574e-06") + max_val = float("2.27363") + mean = float("0.459077") + std = float("0.465494") + data = None + + +class Program_weight_tensor_parameter_366: + name = "parameter_366" + shape = [144] + dtype = "float32" + min_val = float("4.41383e-12") + max_val = float("0.00139051") + mean = float("0.000354333") + std = float("0.000234562") + data = None + + +class Program_weight_tensor_parameter_367: + name = "parameter_367" + shape = [144] + dtype = "float32" + min_val = float("-0.0453964") + max_val = float("0.0337442") + mean = float("0.00432955") + std = float("0.0128423") + data = None + + +class Program_weight_tensor_parameter_368: + name = "parameter_368" + shape = [144, 144, 1, 1] + dtype = "float32" + min_val = float("-0.0348046") + max_val = float("0.0712641") + mean = float("-0.000374649") + std = float("0.00443917") + data = None + + +class Program_weight_tensor_parameter_369: + name = "parameter_369" + shape = [144] + dtype = "float32" + min_val = float("-1.89031") + max_val = float("0.682555") + mean = float("-0.250602") + std = float("0.42457") + data = None + + +class Program_weight_tensor_parameter_370: + name = "parameter_370" + shape = [144] + dtype = "float32" + min_val = float("0.418716") + max_val = float("3.73401") + mean = float("1.30895") + std = float("0.607907") + data = None + + +class Program_weight_tensor_parameter_371: + name = "parameter_371" + shape = [144] + dtype = "float32" + min_val = float("0.00100253") + max_val = float("0.00898137") + mean = float("0.00451763") + std = float("0.00157334") + data = None + + +class Program_weight_tensor_parameter_372: + name = "parameter_372" + shape = [144] + dtype = "float32" + min_val = float("-0.12794") + max_val = float("0.101272") + mean = float("0.0247504") + std = float("0.0355174") + data = None + + +class Program_weight_tensor_parameter_373: + name = "parameter_373" + shape = [144, 144, 3, 3] + dtype = "float32" + min_val = float("-0.0346061") + max_val = float("0.0470164") + mean = float("-0.000215707") + std = float("0.0041935") + data = None + + +class Program_weight_tensor_parameter_374: + name = "parameter_374" + shape = [144] + dtype = "float32" + min_val = float("-2.66323") + max_val = float("0.362254") + mean = float("-1.26569") + std = float("0.512716") + data = None + + +class Program_weight_tensor_parameter_375: + name = "parameter_375" + shape = [144] + dtype = "float32" + min_val = float("0.576539") + max_val = float("1.97695") + mean = float("1.18023") + std = float("0.187262") + data = None + + +class Program_weight_tensor_parameter_376: + name = "parameter_376" + shape = [144] + dtype = "float32" + min_val = float("0.0377575") + max_val = float("0.21017") + mean = float("0.0696304") + std = float("0.0235545") + data = None + + +class Program_weight_tensor_parameter_377: + name = "parameter_377" + shape = [144] + dtype = "float32" + min_val = float("-2.48186") + max_val = float("1.76252") + mean = float("-0.157181") + std = float("0.322131") + data = None + + +class Program_weight_tensor_parameter_378: + name = "parameter_378" + shape = [144, 144, 3, 3] + dtype = "float32" + min_val = float("-0.0497725") + max_val = float("0.0492881") + mean = float("-0.000281602") + std = float("0.00478039") + data = None + + +class Program_weight_tensor_parameter_379: + name = "parameter_379" + shape = [144] + dtype = "float32" + min_val = float("-1.73734") + max_val = float("0.692917") + mean = float("-0.18539") + std = float("0.413661") + data = None + + +class Program_weight_tensor_parameter_380: + name = "parameter_380" + shape = [144] + dtype = "float32" + min_val = float("0.000376458") + max_val = float("2.82934") + mean = float("0.328185") + std = float("0.356839") + data = None + + +class Program_weight_tensor_parameter_381: + name = "parameter_381" + shape = [144] + dtype = "float32" + min_val = float("2.39692e-08") + max_val = float("0.0041016") + mean = float("0.000455802") + std = float("0.000479833") + data = None + + +class Program_weight_tensor_parameter_382: + name = "parameter_382" + shape = [144] + dtype = "float32" + min_val = float("-0.030728") + max_val = float("0.0415527") + mean = float("0.00831477") + std = float("0.0125762") + data = None + + +class Program_weight_tensor_parameter_383: + name = "parameter_383" + shape = [144, 144, 1, 1] + dtype = "float32" + min_val = float("-0.051179") + max_val = float("0.0576014") + mean = float("-0.000493177") + std = float("0.00427804") + data = None + + +class Program_weight_tensor_parameter_384: + name = "parameter_384" + shape = [144] + dtype = "float32" + min_val = float("-1.73734") + max_val = float("0.692917") + mean = float("-0.18539") + std = float("0.413661") + data = None + + +class Program_weight_tensor_parameter_385: + name = "parameter_385" + shape = [144] + dtype = "float32" + min_val = float("0.375666") + max_val = float("3.03489") + mean = float("1.09356") + std = float("0.399713") + data = None + + +class Program_weight_tensor_parameter_386: + name = "parameter_386" + shape = [144] + dtype = "float32" + min_val = float("0.00208899") + max_val = float("0.00974547") + mean = float("0.00520382") + std = float("0.0013558") + data = None + + +class Program_weight_tensor_parameter_387: + name = "parameter_387" + shape = [144] + dtype = "float32" + min_val = float("-0.0355158") + max_val = float("0.0761709") + mean = float("0.0259472") + std = float("0.0220464") + data = None + + +class Program_weight_tensor_parameter_388: + name = "parameter_388" + shape = [144, 144, 3, 3] + dtype = "float32" + min_val = float("-0.0387272") + max_val = float("0.0410833") + mean = float("-0.000193716") + std = float("0.00436167") + data = None + + +class Program_weight_tensor_parameter_389: + name = "parameter_389" + shape = [144] + dtype = "float32" + min_val = float("-3.02892") + max_val = float("0.118332") + mean = float("-1.27789") + std = float("0.573834") + data = None + + +class Program_weight_tensor_parameter_390: + name = "parameter_390" + shape = [144] + dtype = "float32" + min_val = float("0.676134") + max_val = float("1.94767") + mean = float("1.17746") + std = float("0.203827") + data = None + + +class Program_weight_tensor_parameter_391: + name = "parameter_391" + shape = [144] + dtype = "float32" + min_val = float("0.0206076") + max_val = float("0.0888994") + mean = float("0.0394175") + std = float("0.0100858") + data = None + + +class Program_weight_tensor_parameter_392: + name = "parameter_392" + shape = [144] + dtype = "float32" + min_val = float("-0.408893") + max_val = float("0.265643") + mean = float("-0.0431433") + std = float("0.13118") + data = None + + +class Program_weight_tensor_parameter_393: + name = "parameter_393" + shape = [144, 144, 3, 3] + dtype = "float32" + min_val = float("-0.0561149") + max_val = float("0.0750932") + mean = float("-0.00029987") + std = float("0.00486889") + data = None + + +class Program_weight_tensor_parameter_394: + name = "parameter_394" + shape = [144] + dtype = "float32" + min_val = float("-1.35133") + max_val = float("0.821835") + mean = float("-0.13373") + std = float("0.357825") + data = None + + +class Program_weight_tensor_parameter_395: + name = "parameter_395" + shape = [144] + dtype = "float32" + min_val = float("-1.05123e-08") + max_val = float("1.49414") + mean = float("0.18472") + std = float("0.153441") + data = None + + +class Program_weight_tensor_parameter_396: + name = "parameter_396" + shape = [144] + dtype = "float32" + min_val = float("4.16705e-17") + max_val = float("0.00240365") + mean = float("0.000251688") + std = float("0.000227807") + data = None + + +class Program_weight_tensor_parameter_397: + name = "parameter_397" + shape = [144] + dtype = "float32" + min_val = float("-0.047473") + max_val = float("0.0646974") + mean = float("0.00888849") + std = float("0.0132203") + data = None + + +class Program_weight_tensor_parameter_398: + name = "parameter_398" + shape = [144, 144, 1, 1] + dtype = "float32" + min_val = float("-0.0355803") + max_val = float("0.0307658") + mean = float("-0.000399692") + std = float("0.00394002") + data = None + + +class Program_weight_tensor_parameter_399: + name = "parameter_399" + shape = [144] + dtype = "float32" + min_val = float("-1.35133") + max_val = float("0.821835") + mean = float("-0.13373") + std = float("0.357825") + data = None + + +class Program_weight_tensor_parameter_400: + name = "parameter_400" + shape = [144] + dtype = "float32" + min_val = float("0.305448") + max_val = float("1.84848") + mean = float("0.89377") + std = float("0.301052") + data = None + + +class Program_weight_tensor_parameter_401: + name = "parameter_401" + shape = [144] + dtype = "float32" + min_val = float("0.00208198") + max_val = float("0.00965758") + mean = float("0.00489852") + std = float("0.00141206") + data = None + + +class Program_weight_tensor_parameter_402: + name = "parameter_402" + shape = [144] + dtype = "float32" + min_val = float("-0.0282734") + max_val = float("0.114052") + mean = float("0.0403195") + std = float("0.0275993") + data = None + + +class Program_weight_tensor_parameter_403: + name = "parameter_403" + shape = [144, 144, 3, 3] + dtype = "float32" + min_val = float("-0.0392374") + max_val = float("0.0493932") + mean = float("-0.000231543") + std = float("0.00438483") + data = None + + +class Program_weight_tensor_parameter_404: + name = "parameter_404" + shape = [144] + dtype = "float32" + min_val = float("-2.73135") + max_val = float("0.0567831") + mean = float("-1.27362") + std = float("0.5112") + data = None + + +class Program_weight_tensor_parameter_405: + name = "parameter_405" + shape = [144] + dtype = "float32" + min_val = float("0.62038") + max_val = float("1.55808") + mean = float("1.10846") + std = float("0.164403") + data = None + + +class Program_weight_tensor_parameter_406: + name = "parameter_406" + shape = [144] + dtype = "float32" + min_val = float("0.0113622") + max_val = float("0.0371877") + mean = float("0.0225402") + std = float("0.00518884") + data = None + + +class Program_weight_tensor_parameter_407: + name = "parameter_407" + shape = [144] + dtype = "float32" + min_val = float("-0.55178") + max_val = float("0.193574") + mean = float("-0.0453446") + std = float("0.110957") + data = None + + +class Program_weight_tensor_parameter_408: + name = "parameter_408" + shape = [144, 144, 3, 3] + dtype = "float32" + min_val = float("-0.0546125") + max_val = float("0.0718452") + mean = float("-0.000310657") + std = float("0.00493932") + data = None + + +class Program_weight_tensor_parameter_409: + name = "parameter_409" + shape = [144] + dtype = "float32" + min_val = float("-1.92079") + max_val = float("0.644614") + mean = float("-0.125228") + std = float("0.358111") + data = None + + +class Program_weight_tensor_parameter_410: + name = "parameter_410" + shape = [144] + dtype = "float32" + min_val = float("6.93466e-11") + max_val = float("1.7663") + mean = float("0.238668") + std = float("0.271207") + data = None + + +class Program_weight_tensor_parameter_411: + name = "parameter_411" + shape = [144] + dtype = "float32" + min_val = float("3.34966e-19") + max_val = float("0.0106171") + mean = float("0.000715393") + std = float("0.00128468") + data = None + + +class Program_weight_tensor_parameter_412: + name = "parameter_412" + shape = [144] + dtype = "float32" + min_val = float("-0.0415258") + max_val = float("0.130452") + mean = float("0.0113087") + std = float("0.0225185") + data = None + + +class Program_weight_tensor_parameter_413: + name = "parameter_413" + shape = [144, 144, 1, 1] + dtype = "float32" + min_val = float("-0.0911586") + max_val = float("0.0543513") + mean = float("-0.000545815") + std = float("0.00558185") + data = None + + +class Program_weight_tensor_parameter_414: + name = "parameter_414" + shape = [144] + dtype = "float32" + min_val = float("-1.92079") + max_val = float("0.644614") + mean = float("-0.125228") + std = float("0.358111") + data = None + + +class Program_weight_tensor_parameter_415: + name = "parameter_415" + shape = [144] + dtype = "float32" + min_val = float("0.30759") + max_val = float("1.61305") + mean = float("0.74094") + std = float("0.255891") + data = None + + +class Program_weight_tensor_parameter_416: + name = "parameter_416" + shape = [144] + dtype = "float32" + min_val = float("0.00349199") + max_val = float("0.0167988") + mean = float("0.00831531") + std = float("0.00253196") + data = None + + +class Program_weight_tensor_parameter_417: + name = "parameter_417" + shape = [144] + dtype = "float32" + min_val = float("-0.0930515") + max_val = float("0.160494") + mean = float("0.0426139") + std = float("0.0448304") + data = None + + +class Program_weight_tensor_parameter_418: + name = "parameter_418" + shape = [144, 144, 3, 3] + dtype = "float32" + min_val = float("-0.0865275") + max_val = float("0.0693328") + mean = float("-0.000270166") + std = float("0.00433576") + data = None + + +class Program_weight_tensor_parameter_419: + name = "parameter_419" + shape = [144] + dtype = "float32" + min_val = float("-2.46604") + max_val = float("0.311463") + mean = float("-1.11261") + std = float("0.444526") + data = None + + +class Program_weight_tensor_parameter_420: + name = "parameter_420" + shape = [144] + dtype = "float32" + min_val = float("0.645192") + max_val = float("1.43977") + mean = float("1.10398") + std = float("0.149059") + data = None + + +class Program_weight_tensor_parameter_421: + name = "parameter_421" + shape = [144] + dtype = "float32" + min_val = float("0.00792923") + max_val = float("0.0410344") + mean = float("0.017982") + std = float("0.00594959") + data = None + + +class Program_weight_tensor_parameter_422: + name = "parameter_422" + shape = [144] + dtype = "float32" + min_val = float("-0.402604") + max_val = float("0.181192") + mean = float("-0.0350284") + std = float("0.0901153") + data = None + + +class Program_weight_tensor_parameter_423: + name = "parameter_423" + shape = [144, 144, 3, 3] + dtype = "float32" + min_val = float("-0.12551") + max_val = float("0.134437") + mean = float("-0.000230808") + std = float("0.00507475") + data = None + + +class Program_weight_tensor_parameter_424: + name = "parameter_424" + shape = [144] + dtype = "float32" + min_val = float("-1.63695") + max_val = float("1.61843") + mean = float("0.00914071") + std = float("0.773499") + data = None + + +class Program_weight_tensor_parameter_425: + name = "parameter_425" + shape = [144] + dtype = "float32" + min_val = float("0.446498") + max_val = float("1.44574") + mean = float("0.791631") + std = float("0.199487") + data = None + + +class Program_weight_tensor_parameter_426: + name = "parameter_426" + shape = [144] + dtype = "float32" + min_val = float("0.00803846") + max_val = float("0.0483941") + mean = float("0.0185568") + std = float("0.00763975") + data = None + + +class Program_weight_tensor_parameter_427: + name = "parameter_427" + shape = [144] + dtype = "float32" + min_val = float("-0.220237") + max_val = float("0.262194") + mean = float("-0.0333374") + std = float("0.0733832") + data = None + + +class Program_weight_tensor_parameter_428: + name = "parameter_428" + shape = [144, 288, 1, 1] + dtype = "float32" + min_val = float("-0.131175") + max_val = float("0.101205") + mean = float("-0.0007374") + std = float("0.00944354") + data = None + + +class Program_weight_tensor_parameter_429: + name = "parameter_429" + shape = [144] + dtype = "float32" + min_val = float("-3.94913") + max_val = float("1.48833") + mean = float("0.184232") + std = float("0.812302") + data = None + + +class Program_weight_tensor_parameter_430: + name = "parameter_430" + shape = [144] + dtype = "float32" + min_val = float("0.632046") + max_val = float("5.81404") + mean = float("1.61523") + std = float("1.07746") + data = None + + +class Program_weight_tensor_parameter_431: + name = "parameter_431" + shape = [144] + dtype = "float32" + min_val = float("0.0042963") + max_val = float("0.0490893") + mean = float("0.0122438") + std = float("0.00606181") + data = None + + +class Program_weight_tensor_parameter_432: + name = "parameter_432" + shape = [144] + dtype = "float32" + min_val = float("-0.166662") + max_val = float("0.120288") + mean = float("-0.00996138") + std = float("0.0632725") + data = None + + +class Program_weight_tensor_parameter_433: + name = "parameter_433" + shape = [144, 288, 1, 1] + dtype = "float32" + min_val = float("-0.0767773") + max_val = float("0.128459") + mean = float("-0.000375959") + std = float("0.00898229") + data = None + + +class Program_weight_tensor_parameter_434: + name = "parameter_434" + shape = [288] + dtype = "float32" + min_val = float("-3.32769") + max_val = float("1.68356") + mean = float("-0.218033") + std = float("0.689316") + data = None + + +class Program_weight_tensor_parameter_435: + name = "parameter_435" + shape = [288] + dtype = "float32" + min_val = float("0.644859") + max_val = float("3.56312") + mean = float("1.11815") + std = float("0.319838") + data = None + + +class Program_weight_tensor_parameter_436: + name = "parameter_436" + shape = [288] + dtype = "float32" + min_val = float("0.00576419") + max_val = float("0.0664151") + mean = float("0.0156741") + std = float("0.00811449") + data = None + + +class Program_weight_tensor_parameter_437: + name = "parameter_437" + shape = [288] + dtype = "float32" + min_val = float("-0.326028") + max_val = float("0.217421") + mean = float("0.0293076") + std = float("0.0862963") + data = None + + +class Program_weight_tensor_parameter_438: + name = "parameter_438" + shape = [288, 192, 3, 3] + dtype = "float32" + min_val = float("-0.0789327") + max_val = float("0.0726046") + mean = float("-0.000113775") + std = float("0.00477469") + data = None + + +class Program_weight_tensor_parameter_439: + name = "parameter_439" + shape = [192] + dtype = "float32" + min_val = float("-2.19954") + max_val = float("1.29755") + mean = float("-0.833953") + std = float("0.661352") + data = None + + +class Program_weight_tensor_parameter_440: + name = "parameter_440" + shape = [192] + dtype = "float32" + min_val = float("0.376407") + max_val = float("1.58497") + mean = float("0.954824") + std = float("0.21826") + data = None + + +class Program_weight_tensor_parameter_441: + name = "parameter_441" + shape = [192] + dtype = "float32" + min_val = float("0.00123879") + max_val = float("0.0100177") + mean = float("0.00334585") + std = float("0.00120554") + data = None + + +class Program_weight_tensor_parameter_442: + name = "parameter_442" + shape = [192] + dtype = "float32" + min_val = float("-0.305203") + max_val = float("0.270228") + mean = float("-0.0509251") + std = float("0.0850592") + data = None + + +class Program_weight_tensor_parameter_443: + name = "parameter_443" + shape = [192, 144, 1, 1] + dtype = "float32" + min_val = float("-0.147457") + max_val = float("0.125776") + mean = float("-0.000856162") + std = float("0.0147595") + data = None + + +class Program_weight_tensor_parameter_444: + name = "parameter_444" + shape = [144] + dtype = "float32" + min_val = float("-0.0128446") + max_val = float("0.00187189") + mean = float("-0.00516629") + std = float("0.00361178") + data = None + + +class Program_weight_tensor_parameter_445: + name = "parameter_445" + shape = [144, 144, 1, 1] + dtype = "float32" + min_val = float("-0.231297") + max_val = float("0.222024") + mean = float("-0.0042663") + std = float("0.0116858") + data = None + + +class Program_weight_tensor_parameter_446: + name = "parameter_446" + shape = [72] + dtype = "float32" + min_val = float("-1.59453") + max_val = float("0.880938") + mean = float("-0.105095") + std = float("0.484514") + data = None + + +class Program_weight_tensor_parameter_447: + name = "parameter_447" + shape = [72] + dtype = "float32" + min_val = float("0.0969578") + max_val = float("2.4756") + mean = float("0.437551") + std = float("0.391975") + data = None + + +class Program_weight_tensor_parameter_448: + name = "parameter_448" + shape = [72] + dtype = "float32" + min_val = float("0.000120925") + max_val = float("0.00213528") + mean = float("0.000683241") + std = float("0.000440938") + data = None + + +class Program_weight_tensor_parameter_449: + name = "parameter_449" + shape = [72] + dtype = "float32" + min_val = float("-0.0372676") + max_val = float("0.0322531") + mean = float("0.000230328") + std = float("0.0144331") + data = None + + +class Program_weight_tensor_parameter_450: + name = "parameter_450" + shape = [72, 72, 1, 1] + dtype = "float32" + min_val = float("-0.0558609") + max_val = float("0.0913162") + mean = float("-0.000461775") + std = float("0.00827768") + data = None + + +class Program_weight_tensor_parameter_451: + name = "parameter_451" + shape = [72] + dtype = "float32" + min_val = float("-1.59453") + max_val = float("0.880938") + mean = float("-0.105095") + std = float("0.484514") + data = None + + +class Program_weight_tensor_parameter_452: + name = "parameter_452" + shape = [72] + dtype = "float32" + min_val = float("0.351618") + max_val = float("4.88449") + mean = float("1.06806") + std = float("0.665872") + data = None + + +class Program_weight_tensor_parameter_453: + name = "parameter_453" + shape = [72] + dtype = "float32" + min_val = float("0.00145915") + max_val = float("0.02274") + mean = float("0.00638047") + std = float("0.00330097") + data = None + + +class Program_weight_tensor_parameter_454: + name = "parameter_454" + shape = [72] + dtype = "float32" + min_val = float("-0.0780665") + max_val = float("0.116729") + mean = float("0.0070792") + std = float("0.0386135") + data = None + + +class Program_weight_tensor_parameter_455: + name = "parameter_455" + shape = [72, 72, 3, 3] + dtype = "float32" + min_val = float("-0.0823999") + max_val = float("0.109201") + mean = float("-0.000361883") + std = float("0.00694661") + data = None + + +class Program_weight_tensor_parameter_456: + name = "parameter_456" + shape = [72] + dtype = "float32" + min_val = float("-3.9802") + max_val = float("-0.178539") + mean = float("-1.14275") + std = float("0.563232") + data = None + + +class Program_weight_tensor_parameter_457: + name = "parameter_457" + shape = [72] + dtype = "float32" + min_val = float("0.761114") + max_val = float("2.01918") + mean = float("1.00695") + std = float("0.201531") + data = None + + +class Program_weight_tensor_parameter_458: + name = "parameter_458" + shape = [72] + dtype = "float32" + min_val = float("0.0233433") + max_val = float("0.211234") + mean = float("0.0493984") + std = float("0.0278563") + data = None + + +class Program_weight_tensor_parameter_459: + name = "parameter_459" + shape = [72] + dtype = "float32" + min_val = float("-3.35838") + max_val = float("0.64805") + mean = float("-0.172059") + std = float("0.442294") + data = None + + +class Program_weight_tensor_parameter_460: + name = "parameter_460" + shape = [72, 72, 3, 3] + dtype = "float32" + min_val = float("-0.0675785") + max_val = float("0.0898198") + mean = float("-0.000478771") + std = float("0.00806884") + data = None + + +class Program_weight_tensor_parameter_461: + name = "parameter_461" + shape = [72] + dtype = "float32" + min_val = float("-1.40989") + max_val = float("0.81614") + mean = float("-0.0675083") + std = float("0.400837") + data = None + + +class Program_weight_tensor_parameter_462: + name = "parameter_462" + shape = [72] + dtype = "float32" + min_val = float("0.0959584") + max_val = float("1.55629") + mean = float("0.346688") + std = float("0.249308") + data = None + + +class Program_weight_tensor_parameter_463: + name = "parameter_463" + shape = [72] + dtype = "float32" + min_val = float("0.000134639") + max_val = float("0.0029982") + mean = float("0.000643173") + std = float("0.000523054") + data = None + + +class Program_weight_tensor_parameter_464: + name = "parameter_464" + shape = [72] + dtype = "float32" + min_val = float("-0.0562932") + max_val = float("0.0718023") + mean = float("0.00880714") + std = float("0.0284848") + data = None + + +class Program_weight_tensor_parameter_465: + name = "parameter_465" + shape = [72, 72, 1, 1] + dtype = "float32" + min_val = float("-0.059374") + max_val = float("0.0468202") + mean = float("-0.00060072") + std = float("0.00841895") + data = None + + +class Program_weight_tensor_parameter_466: + name = "parameter_466" + shape = [72] + dtype = "float32" + min_val = float("-1.40989") + max_val = float("0.81614") + mean = float("-0.0675083") + std = float("0.400837") + data = None + + +class Program_weight_tensor_parameter_467: + name = "parameter_467" + shape = [72] + dtype = "float32" + min_val = float("0.277615") + max_val = float("1.97277") + mean = float("0.861421") + std = float("0.350217") + data = None + + +class Program_weight_tensor_parameter_468: + name = "parameter_468" + shape = [72] + dtype = "float32" + min_val = float("0.00246978") + max_val = float("0.0141959") + mean = float("0.00528593") + std = float("0.0022058") + data = None + + +class Program_weight_tensor_parameter_469: + name = "parameter_469" + shape = [72] + dtype = "float32" + min_val = float("-0.214044") + max_val = float("0.181833") + mean = float("0.0221895") + std = float("0.0597059") + data = None + + +class Program_weight_tensor_parameter_470: + name = "parameter_470" + shape = [72, 72, 3, 3] + dtype = "float32" + min_val = float("-0.0447981") + max_val = float("0.040427") + mean = float("-0.000399935") + std = float("0.00701009") + data = None + + +class Program_weight_tensor_parameter_471: + name = "parameter_471" + shape = [72] + dtype = "float32" + min_val = float("-2.73732") + max_val = float("1.92796") + mean = float("-1.14581") + std = float("0.582464") + data = None + + +class Program_weight_tensor_parameter_472: + name = "parameter_472" + shape = [72] + dtype = "float32" + min_val = float("0.270445") + max_val = float("1.8443") + mean = float("0.891096") + std = float("0.213621") + data = None + + +class Program_weight_tensor_parameter_473: + name = "parameter_473" + shape = [72] + dtype = "float32" + min_val = float("0.0120423") + max_val = float("0.0631383") + mean = float("0.0225737") + std = float("0.00801368") + data = None + + +class Program_weight_tensor_parameter_474: + name = "parameter_474" + shape = [72] + dtype = "float32" + min_val = float("-0.570576") + max_val = float("0.529122") + mean = float("-0.0639033") + std = float("0.165627") + data = None + + +class Program_weight_tensor_parameter_475: + name = "parameter_475" + shape = [72, 72, 3, 3] + dtype = "float32" + min_val = float("-0.066849") + max_val = float("0.0819528") + mean = float("-0.000501507") + std = float("0.00812549") + data = None + + +class Program_weight_tensor_parameter_476: + name = "parameter_476" + shape = [72] + dtype = "float32" + min_val = float("-1.42901") + max_val = float("0.650624") + mean = float("-0.067694") + std = float("0.355642") + data = None + + +class Program_weight_tensor_parameter_477: + name = "parameter_477" + shape = [72] + dtype = "float32" + min_val = float("0.0695595") + max_val = float("1.96318") + mean = float("0.298781") + std = float("0.2518") + data = None + + +class Program_weight_tensor_parameter_478: + name = "parameter_478" + shape = [72] + dtype = "float32" + min_val = float("0.000165661") + max_val = float("0.00254699") + mean = float("0.000671496") + std = float("0.000381215") + data = None + + +class Program_weight_tensor_parameter_479: + name = "parameter_479" + shape = [72] + dtype = "float32" + min_val = float("-0.0852135") + max_val = float("0.0739923") + mean = float("0.0145054") + std = float("0.0282282") + data = None + + +class Program_weight_tensor_parameter_480: + name = "parameter_480" + shape = [72, 72, 1, 1] + dtype = "float32" + min_val = float("-0.061587") + max_val = float("0.0555039") + mean = float("-0.00111145") + std = float("0.00915049") + data = None + + +class Program_weight_tensor_parameter_481: + name = "parameter_481" + shape = [72] + dtype = "float32" + min_val = float("-1.42901") + max_val = float("0.650624") + mean = float("-0.067694") + std = float("0.355642") + data = None + + +class Program_weight_tensor_parameter_482: + name = "parameter_482" + shape = [72] + dtype = "float32" + min_val = float("0.228885") + max_val = float("2.65725") + mean = float("0.654562") + std = float("0.342676") + data = None + + +class Program_weight_tensor_parameter_483: + name = "parameter_483" + shape = [72] + dtype = "float32" + min_val = float("0.00221666") + max_val = float("0.0113837") + mean = float("0.00528644") + std = float("0.00192438") + data = None + + +class Program_weight_tensor_parameter_484: + name = "parameter_484" + shape = [72] + dtype = "float32" + min_val = float("-0.0689213") + max_val = float("0.134873") + mean = float("0.0202346") + std = float("0.0478195") + data = None + + +class Program_weight_tensor_parameter_485: + name = "parameter_485" + shape = [72, 72, 3, 3] + dtype = "float32" + min_val = float("-0.0502643") + max_val = float("0.0385429") + mean = float("-0.000381721") + std = float("0.00699717") + data = None + + +class Program_weight_tensor_parameter_486: + name = "parameter_486" + shape = [72] + dtype = "float32" + min_val = float("-1.72935") + max_val = float("1.81323") + mean = float("-0.953972") + std = float("0.484557") + data = None + + +class Program_weight_tensor_parameter_487: + name = "parameter_487" + shape = [72] + dtype = "float32" + min_val = float("0.291338") + max_val = float("1.67102") + mean = float("0.890264") + std = float("0.154972") + data = None + + +class Program_weight_tensor_parameter_488: + name = "parameter_488" + shape = [72] + dtype = "float32" + min_val = float("0.00650387") + max_val = float("0.0314521") + mean = float("0.0153011") + std = float("0.00489433") + data = None + + +class Program_weight_tensor_parameter_489: + name = "parameter_489" + shape = [72] + dtype = "float32" + min_val = float("-0.371426") + max_val = float("0.24566") + mean = float("-0.0388813") + std = float("0.125483") + data = None + + +class Program_weight_tensor_parameter_490: + name = "parameter_490" + shape = [72, 72, 3, 3] + dtype = "float32" + min_val = float("-0.0883656") + max_val = float("0.0854602") + mean = float("-0.000434419") + std = float("0.00817123") + data = None + + +class Program_weight_tensor_parameter_491: + name = "parameter_491" + shape = [72] + dtype = "float32" + min_val = float("-0.72078") + max_val = float("0.53883") + mean = float("-0.0116229") + std = float("0.315004") + data = None + + +class Program_weight_tensor_parameter_492: + name = "parameter_492" + shape = [72] + dtype = "float32" + min_val = float("0.0630193") + max_val = float("1.13622") + mean = float("0.304108") + std = float("0.18516") + data = None + + +class Program_weight_tensor_parameter_493: + name = "parameter_493" + shape = [72] + dtype = "float32" + min_val = float("0.000596299") + max_val = float("0.0104308") + mean = float("0.00238262") + std = float("0.00179682") + data = None + + +class Program_weight_tensor_parameter_494: + name = "parameter_494" + shape = [72] + dtype = "float32" + min_val = float("-0.0220883") + max_val = float("0.0767462") + mean = float("0.00800126") + std = float("0.0185227") + data = None + + +class Program_weight_tensor_parameter_495: + name = "parameter_495" + shape = [72, 72, 1, 1] + dtype = "float32" + min_val = float("-0.10925") + max_val = float("0.0549459") + mean = float("-0.00157008") + std = float("0.0103222") + data = None + + +class Program_weight_tensor_parameter_496: + name = "parameter_496" + shape = [72] + dtype = "float32" + min_val = float("-0.72078") + max_val = float("0.53883") + mean = float("-0.0116229") + std = float("0.315004") + data = None + + +class Program_weight_tensor_parameter_497: + name = "parameter_497" + shape = [72] + dtype = "float32" + min_val = float("0.195354") + max_val = float("1.27903") + mean = float("0.580385") + std = float("0.250193") + data = None + + +class Program_weight_tensor_parameter_498: + name = "parameter_498" + shape = [72] + dtype = "float32" + min_val = float("0.00628037") + max_val = float("0.0320368") + mean = float("0.0150653") + std = float("0.00543441") + data = None + + +class Program_weight_tensor_parameter_499: + name = "parameter_499" + shape = [72] + dtype = "float32" + min_val = float("-0.173879") + max_val = float("0.126666") + mean = float("0.0195401") + std = float("0.0459446") + data = None + + +class Program_weight_tensor_parameter_500: + name = "parameter_500" + shape = [72, 72, 3, 3] + dtype = "float32" + min_val = float("-0.0489714") + max_val = float("0.0583292") + mean = float("-0.000497285") + std = float("0.00710222") + data = None + + +class Program_weight_tensor_parameter_501: + name = "parameter_501" + shape = [72] + dtype = "float32" + min_val = float("-2.83412") + max_val = float("0.704945") + mean = float("-0.732679") + std = float("0.560659") + data = None + + +class Program_weight_tensor_parameter_502: + name = "parameter_502" + shape = [72] + dtype = "float32" + min_val = float("0.470798") + max_val = float("3.05163") + mean = float("1.0155") + std = float("0.305246") + data = None + + +class Program_weight_tensor_parameter_503: + name = "parameter_503" + shape = [72] + dtype = "float32" + min_val = float("0.00353406") + max_val = float("0.0255573") + mean = float("0.0103341") + std = float("0.00485984") + data = None + + +class Program_weight_tensor_parameter_504: + name = "parameter_504" + shape = [72] + dtype = "float32" + min_val = float("-0.318927") + max_val = float("0.403846") + mean = float("-0.0481203") + std = float("0.111077") + data = None + + +class Program_weight_tensor_parameter_505: + name = "parameter_505" + shape = [72, 72, 3, 3] + dtype = "float32" + min_val = float("-0.140292") + max_val = float("0.149074") + mean = float("-0.000235178") + std = float("0.00845115") + data = None + + +class Program_weight_tensor_parameter_506: + name = "parameter_506" + shape = [72] + dtype = "float32" + min_val = float("-3.74716") + max_val = float("2.00205") + mean = float("0.294505") + std = float("0.800208") + data = None + + +class Program_weight_tensor_parameter_507: + name = "parameter_507" + shape = [72] + dtype = "float32" + min_val = float("0.251599") + max_val = float("2.95243") + mean = float("0.505656") + std = float("0.340486") + data = None + + +class Program_weight_tensor_parameter_508: + name = "parameter_508" + shape = [72] + dtype = "float32" + min_val = float("0.00662379") + max_val = float("0.0404531") + mean = float("0.0152965") + std = float("0.00696086") + data = None + + +class Program_weight_tensor_parameter_509: + name = "parameter_509" + shape = [72] + dtype = "float32" + min_val = float("-0.337189") + max_val = float("0.326696") + mean = float("-0.0315403") + std = float("0.110722") + data = None + + +class Program_weight_tensor_parameter_510: + name = "parameter_510" + shape = [72, 144, 1, 1] + dtype = "float32" + min_val = float("-0.140141") + max_val = float("0.0942677") + mean = float("-0.00109115") + std = float("0.0156176") + data = None + + +class Program_weight_tensor_parameter_511: + name = "parameter_511" + shape = [72] + dtype = "float32" + min_val = float("-5.40533") + max_val = float("2.19476") + mean = float("0.480097") + std = float("1.18405") + data = None + + +class Program_weight_tensor_parameter_512: + name = "parameter_512" + shape = [72] + dtype = "float32" + min_val = float("0.407901") + max_val = float("7.10951") + mean = float("1.68326") + std = float("1.34462") + data = None + + +class Program_weight_tensor_parameter_513: + name = "parameter_513" + shape = [72] + dtype = "float32" + min_val = float("0.00341379") + max_val = float("0.0737263") + mean = float("0.0131538") + std = float("0.0117571") + data = None + + +class Program_weight_tensor_parameter_514: + name = "parameter_514" + shape = [72] + dtype = "float32" + min_val = float("-0.254707") + max_val = float("0.243852") + mean = float("0.0114279") + std = float("0.121176") + data = None + + +class Program_weight_tensor_parameter_515: + name = "parameter_515" + shape = [72, 144, 1, 1] + dtype = "float32" + min_val = float("-0.156741") + max_val = float("0.189555") + mean = float("-0.000167284") + std = float("0.0152826") + data = None + + +class Program_weight_tensor_parameter_516: + name = "parameter_516" + shape = [144] + dtype = "float32" + min_val = float("-2.47799") + max_val = float("2.78512") + mean = float("-0.0256035") + std = float("0.855982") + data = None + + +class Program_weight_tensor_parameter_517: + name = "parameter_517" + shape = [144] + dtype = "float32" + min_val = float("0.475945") + max_val = float("3.78899") + mean = float("0.948111") + std = float("0.382821") + data = None + + +class Program_weight_tensor_parameter_518: + name = "parameter_518" + shape = [144] + dtype = "float32" + min_val = float("0.00505316") + max_val = float("0.222987") + mean = float("0.0272953") + std = float("0.0270536") + data = None + + +class Program_weight_tensor_parameter_519: + name = "parameter_519" + shape = [144] + dtype = "float32" + min_val = float("-0.374849") + max_val = float("0.421126") + mean = float("-0.040238") + std = float("0.109009") + data = None + + +class Program_weight_tensor_parameter_520: + name = "parameter_520" + shape = [144, 96, 3, 3] + dtype = "float32" + min_val = float("-0.107682") + max_val = float("0.0970517") + mean = float("-0.000288181") + std = float("0.00781599") + data = None + + +class Program_weight_tensor_parameter_521: + name = "parameter_521" + shape = [96] + dtype = "float32" + min_val = float("-2.31785") + max_val = float("1.2856") + mean = float("-0.536223") + std = float("0.66831") + data = None + + +class Program_weight_tensor_parameter_522: + name = "parameter_522" + shape = [96] + dtype = "float32" + min_val = float("0.363377") + max_val = float("2.87595") + mean = float("0.945544") + std = float("0.309798") + data = None + + +class Program_weight_tensor_parameter_523: + name = "parameter_523" + shape = [96] + dtype = "float32" + min_val = float("0.000401939") + max_val = float("0.0066261") + mean = float("0.00193929") + std = float("0.00108543") + data = None + + +class Program_weight_tensor_parameter_524: + name = "parameter_524" + shape = [96] + dtype = "float32" + min_val = float("-0.276558") + max_val = float("0.293873") + mean = float("0.032138") + std = float("0.0824672") + data = None + + +class Program_weight_tensor_parameter_525: + name = "parameter_525" + shape = [96, 72, 1, 1] + dtype = "float32" + min_val = float("-0.220262") + max_val = float("0.154706") + mean = float("-0.000674063") + std = float("0.0231789") + data = None + + +class Program_weight_tensor_parameter_526: + name = "parameter_526" + shape = [72] + dtype = "float32" + min_val = float("-0.0194254") + max_val = float("-0.00185158") + mean = float("-0.00904749") + std = float("0.00476033") + data = None + + +class Program_weight_tensor_parameter_527: + name = "parameter_527" + shape = [72, 72, 1, 1] + dtype = "float32" + min_val = float("-0.365295") + max_val = float("0.215913") + mean = float("-0.0105827") + std = float("0.0212646") + data = None + + +class Program_weight_tensor_parameter_528: + name = "parameter_528" + shape = [36] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_529: + name = "parameter_529" + shape = [36] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_530: + name = "parameter_530" + shape = [36] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_531: + name = "parameter_531" + shape = [36] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_532: + name = "parameter_532" + shape = [36, 36, 1, 1] + dtype = "float32" + min_val = float("-0.13914") + max_val = float("0.0710278") + mean = float("-0.00122495") + std = float("0.0155312") + data = None + + +class Program_weight_tensor_parameter_533: + name = "parameter_533" + shape = [36] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_534: + name = "parameter_534" + shape = [36] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_535: + name = "parameter_535" + shape = [36] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_536: + name = "parameter_536" + shape = [36] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_537: + name = "parameter_537" + shape = [36, 36, 3, 3] + dtype = "float32" + min_val = float("-0.0864069") + max_val = float("0.0715444") + mean = float("-0.000368309") + std = float("0.0121821") + data = None + + +class Program_weight_tensor_parameter_538: + name = "parameter_538" + shape = [36] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_539: + name = "parameter_539" + shape = [36] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_540: + name = "parameter_540" + shape = [36] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_541: + name = "parameter_541" + shape = [36] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_542: + name = "parameter_542" + shape = [36, 36, 3, 3] + dtype = "float32" + min_val = float("-0.139936") + max_val = float("0.126155") + mean = float("-0.000357288") + std = float("0.0135344") + data = None + + +class Program_weight_tensor_parameter_543: + name = "parameter_543" + shape = [36] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_544: + name = "parameter_544" + shape = [36] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_545: + name = "parameter_545" + shape = [36] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_546: + name = "parameter_546" + shape = [36] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_547: + name = "parameter_547" + shape = [36, 36, 1, 1] + dtype = "float32" + min_val = float("-0.101119") + max_val = float("0.078908") + mean = float("-0.00143404") + std = float("0.0191301") + data = None + + +class Program_weight_tensor_parameter_548: + name = "parameter_548" + shape = [36] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_549: + name = "parameter_549" + shape = [36] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_550: + name = "parameter_550" + shape = [36] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_551: + name = "parameter_551" + shape = [36] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_552: + name = "parameter_552" + shape = [36, 36, 3, 3] + dtype = "float32" + min_val = float("-0.0981736") + max_val = float("0.072635") + mean = float("-0.000893312") + std = float("0.0128138") + data = None + + +class Program_weight_tensor_parameter_553: + name = "parameter_553" + shape = [36] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_554: + name = "parameter_554" + shape = [36] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_555: + name = "parameter_555" + shape = [36] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_556: + name = "parameter_556" + shape = [36] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_557: + name = "parameter_557" + shape = [36, 36, 3, 3] + dtype = "float32" + min_val = float("-0.138589") + max_val = float("0.124213") + mean = float("-0.000371635") + std = float("0.0148638") + data = None + + +class Program_weight_tensor_parameter_558: + name = "parameter_558" + shape = [36] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_559: + name = "parameter_559" + shape = [36] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_560: + name = "parameter_560" + shape = [36] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_561: + name = "parameter_561" + shape = [36] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_562: + name = "parameter_562" + shape = [36, 72, 1, 1] + dtype = "float32" + min_val = float("-0.174523") + max_val = float("0.146394") + mean = float("-0.0022875") + std = float("0.0249619") + data = None + + +class Program_weight_tensor_parameter_563: + name = "parameter_563" + shape = [36] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_564: + name = "parameter_564" + shape = [36] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_565: + name = "parameter_565" + shape = [36] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_566: + name = "parameter_566" + shape = [36] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_567: + name = "parameter_567" + shape = [36, 72, 1, 1] + dtype = "float32" + min_val = float("-0.141728") + max_val = float("0.14303") + mean = float("-0.000661605") + std = float("0.0238487") + data = None + + +class Program_weight_tensor_parameter_568: + name = "parameter_568" + shape = [72] + dtype = "float32" + min_val = float("-1.42602") + max_val = float("3.23089") + mean = float("0.695169") + std = float("1.23395") + data = None + + +class Program_weight_tensor_parameter_569: + name = "parameter_569" + shape = [72] + dtype = "float32" + min_val = float("1.06748") + max_val = float("4.16245") + mean = float("1.98371") + std = float("0.773369") + data = None + + +class Program_weight_tensor_parameter_570: + name = "parameter_570" + shape = [72] + dtype = "float32" + min_val = float("0.463827") + max_val = float("20.7924") + mean = float("2.40174") + std = float("2.50279") + data = None + + +class Program_weight_tensor_parameter_571: + name = "parameter_571" + shape = [72] + dtype = "float32" + min_val = float("-1.94543") + max_val = float("3.15499") + mean = float("-0.190338") + std = float("0.839007") + data = None + + +class Program_weight_tensor_parameter_572: + name = "parameter_572" + shape = [72, 48, 3, 3] + dtype = "float32" + min_val = float("-0.0880297") + max_val = float("0.132734") + mean = float("-0.000246572") + std = float("0.0130906") + data = None + + +class Program_weight_tensor_parameter_573: + name = "parameter_573" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_574: + name = "parameter_574" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_575: + name = "parameter_575" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_576: + name = "parameter_576" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_577: + name = "parameter_577" + shape = [48, 24, 3, 3] + dtype = "float32" + min_val = float("-0.190405") + max_val = float("0.141206") + mean = float("-0.000379777") + std = float("0.021606") + data = None + + +class Program_weight_tensor_parameter_578: + name = "parameter_578" + shape = [24] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_579: + name = "parameter_579" + shape = [24] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_580: + name = "parameter_580" + shape = [24] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_581: + name = "parameter_581" + shape = [24] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_582: + name = "parameter_582" + shape = [24, 24, 3, 3] + dtype = "float32" + min_val = float("-0.259253") + max_val = float("0.269373") + mean = float("-0.000405256") + std = float("0.03018") + data = None + + +class Program_weight_tensor_parameter_583: + name = "parameter_583" + shape = [24] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_584: + name = "parameter_584" + shape = [24] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_585: + name = "parameter_585" + shape = [24] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_586: + name = "parameter_586" + shape = [24] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_587: + name = "parameter_587" + shape = [24, 3, 3, 3] + dtype = "float32" + min_val = float("-0.195438") + max_val = float("0.245061") + mean = float("-0.000197873") + std = float("0.0621767") data = None diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus-M/subgraph_16/graph_hash.txt b/paddle_samples/PaddleX/PP-YOLOE_plus-M/subgraph_5/graph_hash.txt similarity index 100% rename from paddle_samples/PaddleX/PP-YOLOE_plus-M/subgraph_16/graph_hash.txt rename to paddle_samples/PaddleX/PP-YOLOE_plus-M/subgraph_5/graph_hash.txt diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus-M/subgraph_10/graph_net.json b/paddle_samples/PaddleX/PP-YOLOE_plus-M/subgraph_5/graph_net.json similarity index 100% rename from paddle_samples/PaddleX/PP-YOLOE_plus-M/subgraph_10/graph_net.json rename to paddle_samples/PaddleX/PP-YOLOE_plus-M/subgraph_5/graph_net.json diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus-M/subgraph_16/input_meta.py b/paddle_samples/PaddleX/PP-YOLOE_plus-M/subgraph_5/input_meta.py similarity index 100% rename from paddle_samples/PaddleX/PP-YOLOE_plus-M/subgraph_16/input_meta.py rename to paddle_samples/PaddleX/PP-YOLOE_plus-M/subgraph_5/input_meta.py diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus-M/subgraph_16/model.py b/paddle_samples/PaddleX/PP-YOLOE_plus-M/subgraph_5/model.py similarity index 100% rename from paddle_samples/PaddleX/PP-YOLOE_plus-M/subgraph_16/model.py rename to paddle_samples/PaddleX/PP-YOLOE_plus-M/subgraph_5/model.py diff --git a/paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_15/weight_meta.py b/paddle_samples/PaddleX/PP-YOLOE_plus-M/subgraph_5/weight_meta.py similarity index 100% rename from paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_15/weight_meta.py rename to paddle_samples/PaddleX/PP-YOLOE_plus-M/subgraph_5/weight_meta.py diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus-M/subgraph_6/graph_hash.txt b/paddle_samples/PaddleX/PP-YOLOE_plus-M/subgraph_6/graph_hash.txt index 21b305551..0f1417140 100644 --- a/paddle_samples/PaddleX/PP-YOLOE_plus-M/subgraph_6/graph_hash.txt +++ b/paddle_samples/PaddleX/PP-YOLOE_plus-M/subgraph_6/graph_hash.txt @@ -1 +1 @@ -700c99cae481b4de7b4ae0500e225ae03e4708238020ea1d75b3fa409c1ef3e9 \ No newline at end of file +4e12c05fa6e6a5e1e133bd3dbc35b49e9bfa6622e2eadfc18e6c6100e8b158af \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus-M/subgraph_6/input_meta.py b/paddle_samples/PaddleX/PP-YOLOE_plus-M/subgraph_6/input_meta.py index 23b4d80c0..cca2497dd 100644 --- a/paddle_samples/PaddleX/PP-YOLOE_plus-M/subgraph_6/input_meta.py +++ b/paddle_samples/PaddleX/PP-YOLOE_plus-M/subgraph_6/input_meta.py @@ -1,28 +1,38 @@ class Program_weight_tensor_data_0: name = "data_0" - shape = [8, 10164, 4] - dtype = "float32" - min_val = float("0.01") - max_val = float("0.01") - mean = float("0.01") - std = float("2.79397e-09") - data = None + shape = [] + dtype = "int64" + data = [1] class Program_weight_tensor_data_1: name = "data_1" - shape = [8, 10164] - dtype = "int32" - min_val = 0 - max_val = 4 + shape = [8, 10164, 68] + dtype = "float32" + min_val = float("-7.58872") + max_val = float("15.3673") + mean = float("2.68906e-05") + std = float("1.599") data = None class Program_weight_tensor_data_2: name = "data_2" - shape = [8, 10164, 4] + shape = [10164, 2] + dtype = "float32" + min_val = float("4.0") + max_val = float("700.0") + mean = float("352.0") + std = float("203.197") + data = None + + +class Program_weight_tensor_data_3: + name = "data_3" + shape = [10164, 1] dtype = "float32" - max_val = float("0.941962") - mean = float("0.000134148") - std = float("0.0107789") + min_val = float("8.0") + max_val = float("32.0") + mean = float("10.6667") + std = float("5.70157") data = None diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus-M/subgraph_6/model.py b/paddle_samples/PaddleX/PP-YOLOE_plus-M/subgraph_6/model.py index dbaa99912..2991b2954 100644 --- a/paddle_samples/PaddleX/PP-YOLOE_plus-M/subgraph_6/model.py +++ b/paddle_samples/PaddleX/PP-YOLOE_plus-M/subgraph_6/model.py @@ -5,106 +5,184 @@ class GraphModule(paddle.nn.Layer): def __init__(self): super().__init__() - def forward(self, data_0, data_1, data_2): - # pd_op.full: (1xi32) <- () - full_0 = paddle._C_ops.full( - [1], float("5"), paddle.int32, paddle.core.CPUPlace() - ) + def forward(self, parameter_0, data_0, data_1, data_2, data_3): + # pd_op.divide: (-1x2xf32) <- (-1x2xf32, -1x1xf32) + divide_0 = paddle._C_ops.divide(data_2, data_3) + del data_2 - # pd_op.one_hot: (8x10164x5xf32) <- (8x10164xi32, 1xi32) - one_hot_0 = paddle._C_ops.one_hot( - data_1 % paddle.cast(full_0, data_1.dtype), full_0 - ) - del data_1, full_0 + # pd_op.shape64: (3xi64) <- (8x-1x68xf32) + shape64_0 = paddle._C_ops.shape64(data_1) # pd_op.full_int_array: (1xi64) <- () full_int_array_0 = [0] # pd_op.full_int_array: (1xi64) <- () - full_int_array_1 = [-1] + full_int_array_1 = [1] + + # pd_op.assign: (1xi64) <- (1xi64) + assign_0 = full_int_array_1 - # pd_op.slice: (8x10164x4xf32) <- (8x10164x5xf32, 1xi64, 1xi64) + # pd_op.slice: (xi64) <- (3xi64, 1xi64, 1xi64) slice_0 = paddle._C_ops.slice( - one_hot_0, [2], full_int_array_0, full_int_array_1, [1], [] + shape64_0, [0], full_int_array_0, full_int_array_1, [1], [0] ) - del full_int_array_0, full_int_array_1, one_hot_0 + del full_int_array_0 - # pd_op.pow: (8x10164x4xf32) <- (8x10164x4xf32) - pow_0 = paddle._C_ops.pow(data_0, float("2")) + # pd_op.full_int_array: (1xi64) <- () + full_int_array_2 = [2] - # pd_op.full: (1xf32) <- () - full_1 = paddle._C_ops.full( - [1], float("0.75"), paddle.float32, paddle.core.CPUPlace() + # pd_op.slice: (xi64) <- (3xi64, 1xi64, 1xi64) + slice_1 = paddle._C_ops.slice( + shape64_0, [0], full_int_array_1, full_int_array_2, [1], [0] ) - # pd_op.scale: (8x10164x4xf32) <- (8x10164x4xf32, 1xf32) - scale_0 = paddle._C_ops.scale(pow_0, full_1, float("0"), True) - del pow_0 + # pd_op.full_int_array: (1xi64) <- () + full_int_array_3 = [3] - # pd_op.full: (1xf32) <- () - full_2 = paddle._C_ops.full( - [1], float("-1"), paddle.float32, paddle.core.CPUPlace() + # pd_op.slice: (xi64) <- (3xi64, 1xi64, 1xi64) + slice_2 = paddle._C_ops.slice( + shape64_0, [0], full_int_array_2, full_int_array_3, [1], [0] ) + del full_int_array_2, full_int_array_3, shape64_0 - # pd_op.scale: (8x10164x4xf32) <- (8x10164x4xf32, 1xf32) - scale_1 = paddle._C_ops.scale(slice_0, full_2, float("1"), True) - del full_2 + # pd_op.full: (xi64) <- () + full_0 = paddle._C_ops.full( + [], float("-1"), paddle.int64, paddle.core.CPUPlace() + ) - # pd_op.multiply: (8x10164x4xf32) <- (8x10164x4xf32, 8x10164x4xf32) - multiply_0 = paddle._C_ops.multiply(scale_0, scale_1) + # pd_op.full: (xi64) <- () + full_1 = paddle._C_ops.full( + [], float("4"), paddle.int64, paddle.core.CPUPlace() + ) - # pd_op.multiply: (8x10164x4xf32) <- (8x10164x4xf32, 8x10164x4xf32) - multiply_1 = paddle._C_ops.multiply(data_2, slice_0) - del slice_0 + # pd_op.full: (xi64) <- () + full_2 = paddle._C_ops.full( + [], float("17"), paddle.int64, paddle.core.CPUPlace() + ) - # pd_op.add: (8x10164x4xf32) <- (8x10164x4xf32, 8x10164x4xf32) - add_0 = paddle._C_ops.add(multiply_0, multiply_1) + # builtin.combine: ([xi64, xi64, xi64, xi64]) <- (xi64, xi64, xi64, xi64) + combine_0 = [full_0, slice_1, full_1, full_2] + del full_0, full_1, full_2, slice_1 - # pd_op.bce_loss: (8x10164x4xf32) <- (8x10164x4xf32, 8x10164x4xf32) - bce_loss_0 = paddle._C_ops.bce_loss(data_0, data_2) - del data_0 + # pd_op.stack: (4xi64) <- ([xi64, xi64, xi64, xi64]) + stack_0 = paddle._C_ops.stack(combine_0, 0) + del combine_0 - # pd_op.multiply: (8x10164x4xf32) <- (8x10164x4xf32, 8x10164x4xf32) - multiply_2 = paddle._C_ops.multiply(bce_loss_0, add_0) + # pd_op.reshape: (-1x-1x4x17xf32) <- (8x-1x68xf32, 4xi64) + reshape_0 = paddle._C_ops.reshape(data_1, stack_0) + del data_1, stack_0 - # pd_op.full_int_array: (0xi64) <- () - full_int_array_2 = [] + # pd_op.softmax: (-1x-1x4x17xf32) <- (-1x-1x4x17xf32) + softmax_0 = paddle._C_ops.softmax(reshape_0, -1) + del reshape_0 - # pd_op.sum: (xf32) <- (8x10164x4xf32, 0xi64) - sum_0 = paddle._C_ops.sum(multiply_2, full_int_array_2, None, False) + # pd_op.transpose: (-1x17x-1x4xf32) <- (-1x-1x4x17xf32) + transpose_0 = paddle._C_ops.transpose(softmax_0, [0, 3, 1, 2]) - # pd_op.sum: (xf32) <- (8x10164x4xf32, 0xi64) - sum_1 = paddle._C_ops.sum(data_2, full_int_array_2, None, False) - del data_2 + # pd_op.conv2d: (-1x1x-1x4xf32) <- (-1x17x-1x4xf32, 1x17x1x1xf32) + conv2d_0 = paddle._C_ops.conv2d( + transpose_0, parameter_0, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_0 - # pd_op.full: (1xf32) <- () + # pd_op.squeeze: (-1x-1x4xf32) <- (-1x1x-1x4xf32, 1xi64) + squeeze_0 = paddle._C_ops.squeeze(conv2d_0, full_int_array_1) + del full_int_array_1 + + # pd_op.full: (1xi32) <- () full_3 = paddle._C_ops.full( - [1], float("1"), paddle.float32, paddle.core.CPUPlace() + [1], float("2"), paddle.int32, paddle.core.CPUPlace() ) + # pd_op.split_with_num: ([-1x-1x2xf32, -1x-1x2xf32]) <- (-1x-1x4xf32, 1xi32) + split_with_num_0 = paddle._C_ops.split_with_num(squeeze_0, 2, full_3) + del squeeze_0 + + # builtin.split: (-1x-1x2xf32, -1x-1x2xf32) <- ([-1x-1x2xf32, -1x-1x2xf32]) + ( + split_0, + split_1, + ) = split_with_num_0 + del split_with_num_0 + # pd_op.full: (1xf32) <- () full_4 = paddle._C_ops.full( - [1], float("3.40282e+38"), paddle.float32, paddle.core.CPUPlace() + [1], float("-1"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.scale: (-1x-1x2xf32) <- (-1x-1x2xf32, 1xf32) + scale_0 = paddle._C_ops.scale(split_0, full_4, float("0"), True) + del split_0 + + # pd_op.add: (-1x-1x2xf32) <- (-1x-1x2xf32, -1x2xf32) + add_0 = paddle._C_ops.add(scale_0, divide_0) + + # pd_op.add: (-1x-1x2xf32) <- (-1x-1x2xf32, -1x2xf32) + add_1 = paddle._C_ops.add(split_1, divide_0) + + # pd_op.full: (1xi32) <- () + full_5 = paddle._C_ops.full( + [1], float("-1"), paddle.int32, paddle.core.CPUPlace() ) - # pd_op.clip: (xf32) <- (xf32, 1xf32, 1xf32) - clip_0 = paddle._C_ops.clip(sum_1, full_3, full_4) - del full_3, full_4, sum_1 + # builtin.combine: ([-1x-1x2xf32, -1x-1x2xf32]) <- (-1x-1x2xf32, -1x-1x2xf32) + combine_1 = [add_0, add_1] + + # pd_op.concat: (-1x-1x4xf32) <- ([-1x-1x2xf32, -1x-1x2xf32], 1xi32) + concat_0 = paddle._C_ops.concat(combine_1, full_5) + del combine_1 + + # pd_op.full: (xi64) <- () + full_6 = paddle._C_ops.full( + [], float("30"), paddle.int64, paddle.framework._current_expected_place() + ) + + # pd_op.less_than: (xb) <- (xi64, xi64) + less_than_0 = paddle._C_ops.less_than(data_0, full_6) + del data_0, full_6 + + # pd_op.cast: (xi64) <- (xb) + cast_0 = paddle._C_ops.cast(less_than_0, paddle.int64) + del less_than_0 + + # pd_op.full: (xi64) <- () + full_7 = paddle._C_ops.full( + [], float("0"), paddle.int64, paddle.framework._current_expected_place() + ) + + # pd_op.not_equal: (xb) <- (xi64, xi64) + not_equal_0 = paddle._C_ops.not_equal(cast_0, full_7) + del cast_0 + + # pd_op.cast: (xi64) <- (xb) + cast_1 = paddle._C_ops.cast(not_equal_0, paddle.int64) + del not_equal_0 + + # pd_op.equal: (xb) <- (xi64, xi64) + equal_0 = paddle._C_ops.equal(cast_1, full_7) + del cast_1, full_7 + + # pd_op.share_data_: (-1x-1x4xf32) <- (-1x-1x4xf32) + share_data__0 = concat_0.detach() - # pd_op.divide: (xf32) <- (xf32, xf32) - divide_0 = paddle._C_ops.divide(sum_0, clip_0) + # pd_op.multiply: (-1x-1x4xf32) <- (-1x-1x4xf32, -1x1xf32) + multiply_0 = paddle._C_ops.multiply(share_data__0, data_3) del ( add_0, - bce_loss_0, - clip_0, - full_1, - full_int_array_2, - multiply_0, - multiply_1, - multiply_2, + add_1, + assign_0, + concat_0, + conv2d_0, + data_3, + divide_0, + full_3, + full_4, + full_5, scale_0, - scale_1, - sum_0, + share_data__0, + softmax_0, + split_1, + transpose_0, ) - return divide_0 + return multiply_0 diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus-M/subgraph_6/weight_meta.py b/paddle_samples/PaddleX/PP-YOLOE_plus-M/subgraph_6/weight_meta.py index 8b1378917..28198680e 100644 --- a/paddle_samples/PaddleX/PP-YOLOE_plus-M/subgraph_6/weight_meta.py +++ b/paddle_samples/PaddleX/PP-YOLOE_plus-M/subgraph_6/weight_meta.py @@ -1 +1,7 @@ - +class Program_weight_tensor_parameter_0: + name = "parameter_0" + shape = [1, 17, 1, 1] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus-M/subgraph_7/graph_hash.txt b/paddle_samples/PaddleX/PP-YOLOE_plus-M/subgraph_7/graph_hash.txt deleted file mode 100644 index 0f1417140..000000000 --- a/paddle_samples/PaddleX/PP-YOLOE_plus-M/subgraph_7/graph_hash.txt +++ /dev/null @@ -1 +0,0 @@ -4e12c05fa6e6a5e1e133bd3dbc35b49e9bfa6622e2eadfc18e6c6100e8b158af \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus-M/subgraph_7/graph_net.json b/paddle_samples/PaddleX/PP-YOLOE_plus-M/subgraph_7/graph_net.json deleted file mode 100644 index 1c6cb32da..000000000 --- a/paddle_samples/PaddleX/PP-YOLOE_plus-M/subgraph_7/graph_net.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "framework": "paddle", - "model_name": "PP-YOLOE_plus-M", - "num_devices_required": 1, - "num_nodes_required": 1 -} \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus-M/subgraph_7/input_meta.py b/paddle_samples/PaddleX/PP-YOLOE_plus-M/subgraph_7/input_meta.py deleted file mode 100644 index cca2497dd..000000000 --- a/paddle_samples/PaddleX/PP-YOLOE_plus-M/subgraph_7/input_meta.py +++ /dev/null @@ -1,38 +0,0 @@ -class Program_weight_tensor_data_0: - name = "data_0" - shape = [] - dtype = "int64" - data = [1] - - -class Program_weight_tensor_data_1: - name = "data_1" - shape = [8, 10164, 68] - dtype = "float32" - min_val = float("-7.58872") - max_val = float("15.3673") - mean = float("2.68906e-05") - std = float("1.599") - data = None - - -class Program_weight_tensor_data_2: - name = "data_2" - shape = [10164, 2] - dtype = "float32" - min_val = float("4.0") - max_val = float("700.0") - mean = float("352.0") - std = float("203.197") - data = None - - -class Program_weight_tensor_data_3: - name = "data_3" - shape = [10164, 1] - dtype = "float32" - min_val = float("8.0") - max_val = float("32.0") - mean = float("10.6667") - std = float("5.70157") - data = None diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus-M/subgraph_7/model.py b/paddle_samples/PaddleX/PP-YOLOE_plus-M/subgraph_7/model.py deleted file mode 100644 index 2991b2954..000000000 --- a/paddle_samples/PaddleX/PP-YOLOE_plus-M/subgraph_7/model.py +++ /dev/null @@ -1,188 +0,0 @@ -import paddle - - -class GraphModule(paddle.nn.Layer): - def __init__(self): - super().__init__() - - def forward(self, parameter_0, data_0, data_1, data_2, data_3): - # pd_op.divide: (-1x2xf32) <- (-1x2xf32, -1x1xf32) - divide_0 = paddle._C_ops.divide(data_2, data_3) - del data_2 - - # pd_op.shape64: (3xi64) <- (8x-1x68xf32) - shape64_0 = paddle._C_ops.shape64(data_1) - - # pd_op.full_int_array: (1xi64) <- () - full_int_array_0 = [0] - - # pd_op.full_int_array: (1xi64) <- () - full_int_array_1 = [1] - - # pd_op.assign: (1xi64) <- (1xi64) - assign_0 = full_int_array_1 - - # pd_op.slice: (xi64) <- (3xi64, 1xi64, 1xi64) - slice_0 = paddle._C_ops.slice( - shape64_0, [0], full_int_array_0, full_int_array_1, [1], [0] - ) - del full_int_array_0 - - # pd_op.full_int_array: (1xi64) <- () - full_int_array_2 = [2] - - # pd_op.slice: (xi64) <- (3xi64, 1xi64, 1xi64) - slice_1 = paddle._C_ops.slice( - shape64_0, [0], full_int_array_1, full_int_array_2, [1], [0] - ) - - # pd_op.full_int_array: (1xi64) <- () - full_int_array_3 = [3] - - # pd_op.slice: (xi64) <- (3xi64, 1xi64, 1xi64) - slice_2 = paddle._C_ops.slice( - shape64_0, [0], full_int_array_2, full_int_array_3, [1], [0] - ) - del full_int_array_2, full_int_array_3, shape64_0 - - # pd_op.full: (xi64) <- () - full_0 = paddle._C_ops.full( - [], float("-1"), paddle.int64, paddle.core.CPUPlace() - ) - - # pd_op.full: (xi64) <- () - full_1 = paddle._C_ops.full( - [], float("4"), paddle.int64, paddle.core.CPUPlace() - ) - - # pd_op.full: (xi64) <- () - full_2 = paddle._C_ops.full( - [], float("17"), paddle.int64, paddle.core.CPUPlace() - ) - - # builtin.combine: ([xi64, xi64, xi64, xi64]) <- (xi64, xi64, xi64, xi64) - combine_0 = [full_0, slice_1, full_1, full_2] - del full_0, full_1, full_2, slice_1 - - # pd_op.stack: (4xi64) <- ([xi64, xi64, xi64, xi64]) - stack_0 = paddle._C_ops.stack(combine_0, 0) - del combine_0 - - # pd_op.reshape: (-1x-1x4x17xf32) <- (8x-1x68xf32, 4xi64) - reshape_0 = paddle._C_ops.reshape(data_1, stack_0) - del data_1, stack_0 - - # pd_op.softmax: (-1x-1x4x17xf32) <- (-1x-1x4x17xf32) - softmax_0 = paddle._C_ops.softmax(reshape_0, -1) - del reshape_0 - - # pd_op.transpose: (-1x17x-1x4xf32) <- (-1x-1x4x17xf32) - transpose_0 = paddle._C_ops.transpose(softmax_0, [0, 3, 1, 2]) - - # pd_op.conv2d: (-1x1x-1x4xf32) <- (-1x17x-1x4xf32, 1x17x1x1xf32) - conv2d_0 = paddle._C_ops.conv2d( - transpose_0, parameter_0, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_0 - - # pd_op.squeeze: (-1x-1x4xf32) <- (-1x1x-1x4xf32, 1xi64) - squeeze_0 = paddle._C_ops.squeeze(conv2d_0, full_int_array_1) - del full_int_array_1 - - # pd_op.full: (1xi32) <- () - full_3 = paddle._C_ops.full( - [1], float("2"), paddle.int32, paddle.core.CPUPlace() - ) - - # pd_op.split_with_num: ([-1x-1x2xf32, -1x-1x2xf32]) <- (-1x-1x4xf32, 1xi32) - split_with_num_0 = paddle._C_ops.split_with_num(squeeze_0, 2, full_3) - del squeeze_0 - - # builtin.split: (-1x-1x2xf32, -1x-1x2xf32) <- ([-1x-1x2xf32, -1x-1x2xf32]) - ( - split_0, - split_1, - ) = split_with_num_0 - del split_with_num_0 - - # pd_op.full: (1xf32) <- () - full_4 = paddle._C_ops.full( - [1], float("-1"), paddle.float32, paddle.core.CPUPlace() - ) - - # pd_op.scale: (-1x-1x2xf32) <- (-1x-1x2xf32, 1xf32) - scale_0 = paddle._C_ops.scale(split_0, full_4, float("0"), True) - del split_0 - - # pd_op.add: (-1x-1x2xf32) <- (-1x-1x2xf32, -1x2xf32) - add_0 = paddle._C_ops.add(scale_0, divide_0) - - # pd_op.add: (-1x-1x2xf32) <- (-1x-1x2xf32, -1x2xf32) - add_1 = paddle._C_ops.add(split_1, divide_0) - - # pd_op.full: (1xi32) <- () - full_5 = paddle._C_ops.full( - [1], float("-1"), paddle.int32, paddle.core.CPUPlace() - ) - - # builtin.combine: ([-1x-1x2xf32, -1x-1x2xf32]) <- (-1x-1x2xf32, -1x-1x2xf32) - combine_1 = [add_0, add_1] - - # pd_op.concat: (-1x-1x4xf32) <- ([-1x-1x2xf32, -1x-1x2xf32], 1xi32) - concat_0 = paddle._C_ops.concat(combine_1, full_5) - del combine_1 - - # pd_op.full: (xi64) <- () - full_6 = paddle._C_ops.full( - [], float("30"), paddle.int64, paddle.framework._current_expected_place() - ) - - # pd_op.less_than: (xb) <- (xi64, xi64) - less_than_0 = paddle._C_ops.less_than(data_0, full_6) - del data_0, full_6 - - # pd_op.cast: (xi64) <- (xb) - cast_0 = paddle._C_ops.cast(less_than_0, paddle.int64) - del less_than_0 - - # pd_op.full: (xi64) <- () - full_7 = paddle._C_ops.full( - [], float("0"), paddle.int64, paddle.framework._current_expected_place() - ) - - # pd_op.not_equal: (xb) <- (xi64, xi64) - not_equal_0 = paddle._C_ops.not_equal(cast_0, full_7) - del cast_0 - - # pd_op.cast: (xi64) <- (xb) - cast_1 = paddle._C_ops.cast(not_equal_0, paddle.int64) - del not_equal_0 - - # pd_op.equal: (xb) <- (xi64, xi64) - equal_0 = paddle._C_ops.equal(cast_1, full_7) - del cast_1, full_7 - - # pd_op.share_data_: (-1x-1x4xf32) <- (-1x-1x4xf32) - share_data__0 = concat_0.detach() - - # pd_op.multiply: (-1x-1x4xf32) <- (-1x-1x4xf32, -1x1xf32) - multiply_0 = paddle._C_ops.multiply(share_data__0, data_3) - del ( - add_0, - add_1, - assign_0, - concat_0, - conv2d_0, - data_3, - divide_0, - full_3, - full_4, - full_5, - scale_0, - share_data__0, - softmax_0, - split_1, - transpose_0, - ) - - return multiply_0 diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus-M/subgraph_9/graph_hash.txt b/paddle_samples/PaddleX/PP-YOLOE_plus-M/subgraph_9/graph_hash.txt deleted file mode 100644 index 88f716dff..000000000 --- a/paddle_samples/PaddleX/PP-YOLOE_plus-M/subgraph_9/graph_hash.txt +++ /dev/null @@ -1 +0,0 @@ -839bed95f06a549ca0a6c49aa3c1a018fbd7c4f0023cedf35760437922761076 \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus-M/subgraph_9/graph_net.json b/paddle_samples/PaddleX/PP-YOLOE_plus-M/subgraph_9/graph_net.json deleted file mode 100644 index 1c6cb32da..000000000 --- a/paddle_samples/PaddleX/PP-YOLOE_plus-M/subgraph_9/graph_net.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "framework": "paddle", - "model_name": "PP-YOLOE_plus-M", - "num_devices_required": 1, - "num_nodes_required": 1 -} \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus-M/subgraph_9/model.py b/paddle_samples/PaddleX/PP-YOLOE_plus-M/subgraph_9/model.py deleted file mode 100644 index d07b6549b..000000000 --- a/paddle_samples/PaddleX/PP-YOLOE_plus-M/subgraph_9/model.py +++ /dev/null @@ -1,34 +0,0 @@ -import paddle - - -class GraphModule(paddle.nn.Layer): - def __init__(self): - super().__init__() - - def forward(self, data_0): - # pd_op.full: (xi32) <- () - full_0 = paddle._C_ops.full( - [], float("4"), paddle.int32, paddle.framework._current_expected_place() - ) - - # pd_op.not_equal: (8x10164xb) <- (8x10164xi32, xi32) - not_equal_0 = paddle._C_ops.not_equal(data_0, full_0) - del data_0, full_0 - - # pd_op.full_int_array: (0xi64) <- () - full_int_array_0 = [] - - # pd_op.sum: (xi64) <- (8x10164xb, 0xi64) - sum_0 = paddle._C_ops.sum(not_equal_0, full_int_array_0, None, False) - del full_int_array_0 - - # pd_op.full: (xi64) <- () - full_1 = paddle._C_ops.full( - [], float("0"), paddle.int64, paddle.framework._current_expected_place() - ) - - # pd_op.greater_than: (xb) <- (xi64, xi64) - greater_than_0 = paddle._C_ops.greater_than(sum_0, full_1) - del full_1, not_equal_0, sum_0 - - return greater_than_0 diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus-M/subgraph_9/weight_meta.py b/paddle_samples/PaddleX/PP-YOLOE_plus-M/subgraph_9/weight_meta.py deleted file mode 100644 index 8b1378917..000000000 --- a/paddle_samples/PaddleX/PP-YOLOE_plus-M/subgraph_9/weight_meta.py +++ /dev/null @@ -1 +0,0 @@ - diff --git a/paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_8/input_meta.py b/paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_1/shape_patches_PP-YOLOE-S_vehicle/input_meta.py similarity index 100% rename from paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_8/input_meta.py rename to paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_1/shape_patches_PP-YOLOE-S_vehicle/input_meta.py diff --git a/paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_16/weight_meta.py b/paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_1/shape_patches_PP-YOLOE-S_vehicle/weight_meta.py similarity index 100% rename from paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_16/weight_meta.py rename to paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_1/shape_patches_PP-YOLOE-S_vehicle/weight_meta.py diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus-L/subgraph_8/input_meta.py b/paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_1/shape_patches_PP-YOLOE_plus-L/input_meta.py similarity index 100% rename from paddle_samples/PaddleX/PP-YOLOE_plus-L/subgraph_8/input_meta.py rename to paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_1/shape_patches_PP-YOLOE_plus-L/input_meta.py diff --git a/paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_7/weight_meta.py b/paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_1/shape_patches_PP-YOLOE_plus-L/weight_meta.py similarity index 100% rename from paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_7/weight_meta.py rename to paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_1/shape_patches_PP-YOLOE_plus-L/weight_meta.py diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus-M/subgraph_9/input_meta.py b/paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_1/shape_patches_PP-YOLOE_plus-M/input_meta.py similarity index 100% rename from paddle_samples/PaddleX/PP-YOLOE_plus-M/subgraph_9/input_meta.py rename to paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_1/shape_patches_PP-YOLOE_plus-M/input_meta.py diff --git a/paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_8/weight_meta.py b/paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_1/shape_patches_PP-YOLOE_plus-M/weight_meta.py similarity index 100% rename from paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_8/weight_meta.py rename to paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_1/shape_patches_PP-YOLOE_plus-M/weight_meta.py diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus-L/subgraph_6/input_meta.py b/paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_16/shape_patches_PP-YOLOE_plus-L/input_meta.py similarity index 100% rename from paddle_samples/PaddleX/PP-YOLOE_plus-L/subgraph_6/input_meta.py rename to paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_16/shape_patches_PP-YOLOE_plus-L/input_meta.py diff --git a/paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_9/weight_meta.py b/paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_16/shape_patches_PP-YOLOE_plus-L/weight_meta.py similarity index 100% rename from paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_9/weight_meta.py rename to paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_16/shape_patches_PP-YOLOE_plus-L/weight_meta.py diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus-M/subgraph_14/input_meta.py b/paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_16/shape_patches_PP-YOLOE_plus-M/input_meta.py similarity index 100% rename from paddle_samples/PaddleX/PP-YOLOE_plus-M/subgraph_14/input_meta.py rename to paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_16/shape_patches_PP-YOLOE_plus-M/input_meta.py diff --git a/paddle_samples/PaddleX/PP-YOLOE-L_vehicle/subgraph_11/weight_meta.py b/paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_16/shape_patches_PP-YOLOE_plus-M/weight_meta.py similarity index 100% rename from paddle_samples/PaddleX/PP-YOLOE-L_vehicle/subgraph_11/weight_meta.py rename to paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_16/shape_patches_PP-YOLOE_plus-M/weight_meta.py diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-S/subgraph_17/input_meta.py b/paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_17/shape_patches_PP-YOLOE_plus_SOD-S/input_meta.py similarity index 100% rename from paddle_samples/PaddleX/PP-YOLOE_plus_SOD-S/subgraph_17/input_meta.py rename to paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_17/shape_patches_PP-YOLOE_plus_SOD-S/input_meta.py diff --git a/paddle_samples/PaddleX/PP-YOLOE-L_vehicle/subgraph_14/weight_meta.py b/paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_17/shape_patches_PP-YOLOE_plus_SOD-S/weight_meta.py similarity index 100% rename from paddle_samples/PaddleX/PP-YOLOE-L_vehicle/subgraph_14/weight_meta.py rename to paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_17/shape_patches_PP-YOLOE_plus_SOD-S/weight_meta.py diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_3/shape_patches_PP-YOLOE-L_human/input_meta.py b/paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_3/shape_patches_PP-YOLOE-L_human/input_meta.py new file mode 100644 index 000000000..8b3874e61 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_3/shape_patches_PP-YOLOE-L_human/input_meta.py @@ -0,0 +1,73 @@ +class Program_weight_tensor_data_0: + name = "data_0" + shape = [] + dtype = "int64" + data = [14] + + +class Program_weight_tensor_data_1: + name = "data_1" + shape = [] + dtype = "int64" + data = [14] + + +class Program_weight_tensor_data_2: + name = "data_2" + shape = [] + dtype = "int64" + data = [28] + + +class Program_weight_tensor_data_3: + name = "data_3" + shape = [] + dtype = "int64" + data = [28] + + +class Program_weight_tensor_data_4: + name = "data_4" + shape = [] + dtype = "int64" + data = [56] + + +class Program_weight_tensor_data_5: + name = "data_5" + shape = [] + dtype = "int64" + data = [56] + + +class Program_weight_tensor_data_6: + name = "data_6" + shape = [2, 768, 14, 14] + dtype = "float32" + min_val = float("-0.278465") + max_val = float("6.38042") + mean = float("0.198977") + std = float("0.492361") + data = None + + +class Program_weight_tensor_data_7: + name = "data_7" + shape = [2, 384, 28, 28] + dtype = "float32" + min_val = float("-0.278465") + max_val = float("8.58115") + mean = float("0.212227") + std = float("0.589868") + data = None + + +class Program_weight_tensor_data_8: + name = "data_8" + shape = [2, 192, 56, 56] + dtype = "float32" + min_val = float("-0.278465") + max_val = float("8.27989") + mean = float("0.302059") + std = float("0.556663") + data = None diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_3/shape_patches_PP-YOLOE-L_human/weight_meta.py b/paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_3/shape_patches_PP-YOLOE-L_human/weight_meta.py new file mode 100644 index 000000000..e37ea3f3b --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_3/shape_patches_PP-YOLOE-L_human/weight_meta.py @@ -0,0 +1,586 @@ +class Program_weight_tensor_parameter_0: + name = "parameter_0" + shape = [68] + dtype = "float32" + min_val = float("-0.0120063") + max_val = float("0.0335161") + mean = float("1.74565e-07") + std = float("0.00787881") + data = None + + +class Program_weight_tensor_parameter_1: + name = "parameter_1" + shape = [68, 192, 3, 3] + dtype = "float32" + min_val = float("-0.168682") + max_val = float("0.167259") + mean = float("7.71688e-08") + std = float("0.00779909") + data = None + + +class Program_weight_tensor_parameter_2: + name = "parameter_2" + shape = [192] + dtype = "float32" + min_val = float("-0.128229") + max_val = float("0.242306") + mean = float("0.03158") + std = float("0.0650427") + data = None + + +class Program_weight_tensor_parameter_3: + name = "parameter_3" + shape = [192] + dtype = "float32" + min_val = float("0.74995") + max_val = float("1.60753") + mean = float("1.16227") + std = float("0.142938") + data = None + + +class Program_weight_tensor_parameter_4: + name = "parameter_4" + shape = [192] + dtype = "float32" + min_val = float("0.000266157") + max_val = float("0.0040105") + mean = float("0.0010684") + std = float("0.000708224") + data = None + + +class Program_weight_tensor_parameter_5: + name = "parameter_5" + shape = [192] + dtype = "float32" + min_val = float("-0.0377588") + max_val = float("0.0284686") + mean = float("-0.00327812") + std = float("0.0104515") + data = None + + +class Program_weight_tensor_parameter_6: + name = "parameter_6" + shape = [192, 192, 1, 1] + dtype = "float32" + min_val = float("-0.08141") + max_val = float("0.0877749") + mean = float("-0.00031094") + std = float("0.00761982") + data = None + + +class Program_weight_tensor_parameter_7: + name = "parameter_7" + shape = [192] + dtype = "float32" + min_val = float("-0.00811822") + max_val = float("0.0146164") + mean = float("-0.000174084") + std = float("0.00426814") + data = None + + +class Program_weight_tensor_parameter_8: + name = "parameter_8" + shape = [192, 192, 1, 1] + dtype = "float32" + min_val = float("-0.0116131") + max_val = float("0.0197689") + mean = float("-0.000103125") + std = float("0.00169022") + data = None + + +class Program_weight_tensor_parameter_9: + name = "parameter_9" + shape = [1] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_10: + name = "parameter_10" + shape = [1, 192, 3, 3] + dtype = "float32" + min_val = float("-0.0678163") + max_val = float("0.0273811") + mean = float("-0.000125764") + std = float("0.00819254") + data = None + + +class Program_weight_tensor_parameter_11: + name = "parameter_11" + shape = [192] + dtype = "float32" + min_val = float("-0.461256") + max_val = float("0.536796") + mean = float("0.114829") + std = float("0.141718") + data = None + + +class Program_weight_tensor_parameter_12: + name = "parameter_12" + shape = [192] + dtype = "float32" + min_val = float("0.859577") + max_val = float("1.50378") + mean = float("1.09021") + std = float("0.0873597") + data = None + + +class Program_weight_tensor_parameter_13: + name = "parameter_13" + shape = [192] + dtype = "float32" + min_val = float("0.000431253") + max_val = float("0.0137074") + mean = float("0.00230506") + std = float("0.00181221") + data = None + + +class Program_weight_tensor_parameter_14: + name = "parameter_14" + shape = [192] + dtype = "float32" + min_val = float("-0.15411") + max_val = float("0.0351219") + mean = float("-0.0393248") + std = float("0.0376471") + data = None + + +class Program_weight_tensor_parameter_15: + name = "parameter_15" + shape = [192, 192, 1, 1] + dtype = "float32" + min_val = float("-0.0575188") + max_val = float("0.0739459") + mean = float("-0.00110288") + std = float("0.0076522") + data = None + + +class Program_weight_tensor_parameter_16: + name = "parameter_16" + shape = [192] + dtype = "float32" + min_val = float("-0.00397738") + max_val = float("0.0119464") + mean = float("-0.000221189") + std = float("0.00218333") + data = None + + +class Program_weight_tensor_parameter_17: + name = "parameter_17" + shape = [192, 192, 1, 1] + dtype = "float32" + min_val = float("-0.0524122") + max_val = float("0.0693195") + mean = float("-9.53701e-05") + std = float("0.00173109") + data = None + + +class Program_weight_tensor_parameter_18: + name = "parameter_18" + shape = [68] + dtype = "float32" + min_val = float("-0.00573793") + max_val = float("0.0217784") + mean = float("1.54891e-07") + std = float("0.00541763") + data = None + + +class Program_weight_tensor_parameter_19: + name = "parameter_19" + shape = [68, 384, 3, 3] + dtype = "float32" + min_val = float("-0.0981024") + max_val = float("0.124946") + mean = float("4.94838e-08") + std = float("0.00515546") + data = None + + +class Program_weight_tensor_parameter_20: + name = "parameter_20" + shape = [384] + dtype = "float32" + min_val = float("-0.0868715") + max_val = float("0.0669849") + mean = float("0.0121538") + std = float("0.0210256") + data = None + + +class Program_weight_tensor_parameter_21: + name = "parameter_21" + shape = [384] + dtype = "float32" + min_val = float("0.90122") + max_val = float("1.2108") + mean = float("1.07141") + std = float("0.0473601") + data = None + + +class Program_weight_tensor_parameter_22: + name = "parameter_22" + shape = [384] + dtype = "float32" + min_val = float("0.000115831") + max_val = float("0.00541574") + mean = float("0.000753911") + std = float("0.000632097") + data = None + + +class Program_weight_tensor_parameter_23: + name = "parameter_23" + shape = [384] + dtype = "float32" + min_val = float("-0.0423513") + max_val = float("0.0117177") + mean = float("-0.00494504") + std = float("0.00661737") + data = None + + +class Program_weight_tensor_parameter_24: + name = "parameter_24" + shape = [384, 384, 1, 1] + dtype = "float32" + min_val = float("-0.0495158") + max_val = float("0.0695034") + mean = float("-0.000125359") + std = float("0.00350255") + data = None + + +class Program_weight_tensor_parameter_25: + name = "parameter_25" + shape = [384] + dtype = "float32" + min_val = float("-0.00583976") + max_val = float("0.00760424") + mean = float("1.90588e-05") + std = float("0.0021531") + data = None + + +class Program_weight_tensor_parameter_26: + name = "parameter_26" + shape = [384, 384, 1, 1] + dtype = "float32" + min_val = float("-0.00305361") + max_val = float("0.00596843") + mean = float("-3.93664e-05") + std = float("0.000601011") + data = None + + +class Program_weight_tensor_parameter_27: + name = "parameter_27" + shape = [1] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_28: + name = "parameter_28" + shape = [1, 384, 3, 3] + dtype = "float32" + min_val = float("-0.0633952") + max_val = float("0.0250402") + mean = float("-0.000143176") + std = float("0.00571183") + data = None + + +class Program_weight_tensor_parameter_29: + name = "parameter_29" + shape = [384] + dtype = "float32" + min_val = float("-0.167044") + max_val = float("0.241629") + mean = float("0.0667936") + std = float("0.0667169") + data = None + + +class Program_weight_tensor_parameter_30: + name = "parameter_30" + shape = [384] + dtype = "float32" + min_val = float("0.949533") + max_val = float("1.31274") + mean = float("1.04702") + std = float("0.0484413") + data = None + + +class Program_weight_tensor_parameter_31: + name = "parameter_31" + shape = [384] + dtype = "float32" + min_val = float("0.000357813") + max_val = float("0.020638") + mean = float("0.00383275") + std = float("0.00340836") + data = None + + +class Program_weight_tensor_parameter_32: + name = "parameter_32" + shape = [384] + dtype = "float32" + min_val = float("-0.0843473") + max_val = float("0.02507") + mean = float("-0.027169") + std = float("0.0211633") + data = None + + +class Program_weight_tensor_parameter_33: + name = "parameter_33" + shape = [384, 384, 1, 1] + dtype = "float32" + min_val = float("-0.0580925") + max_val = float("0.0555854") + mean = float("-0.000603587") + std = float("0.00353173") + data = None + + +class Program_weight_tensor_parameter_34: + name = "parameter_34" + shape = [384] + dtype = "float32" + min_val = float("-0.00347224") + max_val = float("0.014402") + mean = float("-4.23883e-05") + std = float("0.0014551") + data = None + + +class Program_weight_tensor_parameter_35: + name = "parameter_35" + shape = [384, 384, 1, 1] + dtype = "float32" + min_val = float("-0.0163252") + max_val = float("0.0101556") + mean = float("-4.84765e-05") + std = float("0.000584142") + data = None + + +class Program_weight_tensor_parameter_36: + name = "parameter_36" + shape = [68] + dtype = "float32" + min_val = float("-0.00369389") + max_val = float("0.00674372") + mean = float("1.39786e-07") + std = float("0.00287983") + data = None + + +class Program_weight_tensor_parameter_37: + name = "parameter_37" + shape = [68, 768, 3, 3] + dtype = "float32" + min_val = float("-0.0366324") + max_val = float("0.0462164") + mean = float("-3.25235e-09") + std = float("0.00290201") + data = None + + +class Program_weight_tensor_parameter_38: + name = "parameter_38" + shape = [768] + dtype = "float32" + min_val = float("-0.0688838") + max_val = float("0.0553698") + mean = float("-0.000947437") + std = float("0.0161684") + data = None + + +class Program_weight_tensor_parameter_39: + name = "parameter_39" + shape = [768] + dtype = "float32" + min_val = float("0.964207") + max_val = float("1.26838") + mean = float("1.05105") + std = float("0.0346631") + data = None + + +class Program_weight_tensor_parameter_40: + name = "parameter_40" + shape = [768] + dtype = "float32" + min_val = float("3.12347e-05") + max_val = float("0.00203003") + mean = float("0.000380075") + std = float("0.00026984") + data = None + + +class Program_weight_tensor_parameter_41: + name = "parameter_41" + shape = [768] + dtype = "float32" + min_val = float("-0.0153051") + max_val = float("0.0154829") + mean = float("-0.00229315") + std = float("0.00345522") + data = None + + +class Program_weight_tensor_parameter_42: + name = "parameter_42" + shape = [768, 768, 1, 1] + dtype = "float32" + min_val = float("-0.0373301") + max_val = float("0.0386579") + mean = float("-3.68871e-05") + std = float("0.00154547") + data = None + + +class Program_weight_tensor_parameter_43: + name = "parameter_43" + shape = [768] + dtype = "float32" + min_val = float("-0.00311382") + max_val = float("0.00471731") + mean = float("1.99403e-05") + std = float("0.00111448") + data = None + + +class Program_weight_tensor_parameter_44: + name = "parameter_44" + shape = [768, 768, 1, 1] + dtype = "float32" + min_val = float("-0.003459") + max_val = float("0.00512325") + mean = float("-2.05348e-05") + std = float("0.000302259") + data = None + + +class Program_weight_tensor_parameter_45: + name = "parameter_45" + shape = [1] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_46: + name = "parameter_46" + shape = [1, 768, 3, 3] + dtype = "float32" + min_val = float("-0.0278575") + max_val = float("0.0206799") + mean = float("0.000226758") + std = float("0.00242893") + data = None + + +class Program_weight_tensor_parameter_47: + name = "parameter_47" + shape = [768] + dtype = "float32" + min_val = float("-0.270176") + max_val = float("0.251616") + mean = float("0.00962775") + std = float("0.0522785") + data = None + + +class Program_weight_tensor_parameter_48: + name = "parameter_48" + shape = [768] + dtype = "float32" + min_val = float("0.914105") + max_val = float("1.28201") + mean = float("1.03121") + std = float("0.0519879") + data = None + + +class Program_weight_tensor_parameter_49: + name = "parameter_49" + shape = [768] + dtype = "float32" + min_val = float("6.12283e-05") + max_val = float("0.00858465") + mean = float("0.00112119") + std = float("0.000817386") + data = None + + +class Program_weight_tensor_parameter_50: + name = "parameter_50" + shape = [768] + dtype = "float32" + min_val = float("-0.0881042") + max_val = float("0.0255774") + mean = float("-0.0239762") + std = float("0.0175392") + data = None + + +class Program_weight_tensor_parameter_51: + name = "parameter_51" + shape = [768, 768, 1, 1] + dtype = "float32" + min_val = float("-0.0500488") + max_val = float("0.0289173") + mean = float("-0.000294642") + std = float("0.0016182") + data = None + + +class Program_weight_tensor_parameter_52: + name = "parameter_52" + shape = [768] + dtype = "float32" + min_val = float("-0.0131155") + max_val = float("0.00722008") + mean = float("-3.7653e-05") + std = float("0.000847186") + data = None + + +class Program_weight_tensor_parameter_53: + name = "parameter_53" + shape = [768, 768, 1, 1] + dtype = "float32" + min_val = float("-0.026325") + max_val = float("0.0506588") + mean = float("7.82541e-06") + std = float("0.000366702") + data = None diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_3/shape_patches_PP-YOLOE-L_vehicle/input_meta.py b/paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_3/shape_patches_PP-YOLOE-L_vehicle/input_meta.py new file mode 100644 index 000000000..7bd7807fe --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_3/shape_patches_PP-YOLOE-L_vehicle/input_meta.py @@ -0,0 +1,73 @@ +class Program_weight_tensor_data_0: + name = "data_0" + shape = [] + dtype = "int64" + data = [14] + + +class Program_weight_tensor_data_1: + name = "data_1" + shape = [] + dtype = "int64" + data = [14] + + +class Program_weight_tensor_data_2: + name = "data_2" + shape = [] + dtype = "int64" + data = [28] + + +class Program_weight_tensor_data_3: + name = "data_3" + shape = [] + dtype = "int64" + data = [28] + + +class Program_weight_tensor_data_4: + name = "data_4" + shape = [] + dtype = "int64" + data = [56] + + +class Program_weight_tensor_data_5: + name = "data_5" + shape = [] + dtype = "int64" + data = [56] + + +class Program_weight_tensor_data_6: + name = "data_6" + shape = [2, 768, 14, 14] + dtype = "float32" + min_val = float("-0.278465") + max_val = float("11.1806") + mean = float("0.192012") + std = float("0.569649") + data = None + + +class Program_weight_tensor_data_7: + name = "data_7" + shape = [2, 384, 28, 28] + dtype = "float32" + min_val = float("-0.278465") + max_val = float("25.4887") + mean = float("0.232567") + std = float("0.845376") + data = None + + +class Program_weight_tensor_data_8: + name = "data_8" + shape = [2, 192, 56, 56] + dtype = "float32" + min_val = float("-0.278465") + max_val = float("28.7635") + mean = float("0.345133") + std = float("0.91854") + data = None diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus-L/subgraph_9/weight_meta.py b/paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_3/shape_patches_PP-YOLOE-L_vehicle/weight_meta.py similarity index 52% rename from paddle_samples/PaddleX/PP-YOLOE_plus-L/subgraph_9/weight_meta.py rename to paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_3/shape_patches_PP-YOLOE-L_vehicle/weight_meta.py index 42b452682..c7e0aa45d 100644 --- a/paddle_samples/PaddleX/PP-YOLOE_plus-L/subgraph_9/weight_meta.py +++ b/paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_3/shape_patches_PP-YOLOE-L_vehicle/weight_meta.py @@ -2,10 +2,10 @@ class Program_weight_tensor_parameter_0: name = "parameter_0" shape = [68] dtype = "float32" - min_val = float("-0.00994345") - max_val = float("0.0295467") - mean = float("1.85188e-07") - std = float("0.00657184") + min_val = float("-0.00925576") + max_val = float("0.0373948") + mean = float("8.72824e-08") + std = float("0.00862349") data = None @@ -13,10 +13,10 @@ class Program_weight_tensor_parameter_1: name = "parameter_1" shape = [68, 192, 3, 3] dtype = "float32" - min_val = float("-0.132052") - max_val = float("0.152993") - mean = float("5.82659e-08") - std = float("0.00697617") + min_val = float("-0.143827") + max_val = float("0.168304") + mean = float("5.68543e-08") + std = float("0.00711119") data = None @@ -24,10 +24,10 @@ class Program_weight_tensor_parameter_2: name = "parameter_2" shape = [192] dtype = "float32" - min_val = float("-0.0439922") - max_val = float("0.203765") - mean = float("0.0504072") - std = float("0.0395669") + min_val = float("-0.0740156") + max_val = float("0.254426") + mean = float("0.0633438") + std = float("0.0574767") data = None @@ -35,10 +35,10 @@ class Program_weight_tensor_parameter_3: name = "parameter_3" shape = [192] dtype = "float32" - min_val = float("0.852608") - max_val = float("1.61916") - mean = float("1.21933") - std = float("0.143034") + min_val = float("0.838238") + max_val = float("1.78552") + mean = float("1.2918") + std = float("0.191195") data = None @@ -46,10 +46,10 @@ class Program_weight_tensor_parameter_4: name = "parameter_4" shape = [192] dtype = "float32" - min_val = float("0.000117627") - max_val = float("0.0026124") - mean = float("0.000404459") - std = float("0.000312994") + min_val = float("0.000822901") + max_val = float("0.063123") + mean = float("0.0112077") + std = float("0.00842708") data = None @@ -57,10 +57,10 @@ class Program_weight_tensor_parameter_5: name = "parameter_5" shape = [192] dtype = "float32" - min_val = float("-0.0352285") - max_val = float("0.029767") - mean = float("-0.0034756") - std = float("0.0106549") + min_val = float("-0.115789") + max_val = float("0.122016") + mean = float("-0.00147854") + std = float("0.0384486") data = None @@ -68,10 +68,10 @@ class Program_weight_tensor_parameter_6: name = "parameter_6" shape = [192, 192, 1, 1] dtype = "float32" - min_val = float("-0.0519368") - max_val = float("0.075145") - mean = float("-0.000115247") - std = float("0.00540221") + min_val = float("-0.0602355") + max_val = float("0.0853349") + mean = float("-0.000554639") + std = float("0.00702224") data = None @@ -79,10 +79,10 @@ class Program_weight_tensor_parameter_7: name = "parameter_7" shape = [192] dtype = "float32" - min_val = float("-0.00467515") - max_val = float("0.00851991") - mean = float("3.17583e-05") - std = float("0.00259113") + min_val = float("-0.00462773") + max_val = float("0.0060156") + mean = float("4.24223e-05") + std = float("0.0019278") data = None @@ -90,10 +90,10 @@ class Program_weight_tensor_parameter_8: name = "parameter_8" shape = [192, 192, 1, 1] dtype = "float32" - min_val = float("-0.00531914") - max_val = float("0.00943649") - mean = float("-9.32844e-05") - std = float("0.00138252") + min_val = float("-0.0100771") + max_val = float("0.01135") + mean = float("-7.39524e-06") + std = float("0.00128718") data = None @@ -110,6 +110,10 @@ class Program_weight_tensor_parameter_10: name = "parameter_10" shape = [4, 192, 3, 3] dtype = "float32" + min_val = float("-0.344983") + max_val = float("0.0528926") + mean = float("-0.0184469") + std = float("0.0468232") data = None @@ -117,10 +121,10 @@ class Program_weight_tensor_parameter_11: name = "parameter_11" shape = [192] dtype = "float32" - min_val = float("-0.327098") - max_val = float("0.890395") - mean = float("0.357592") - std = float("0.269366") + min_val = float("-0.445875") + max_val = float("1.50051") + mean = float("0.404315") + std = float("0.335424") data = None @@ -128,10 +132,10 @@ class Program_weight_tensor_parameter_12: name = "parameter_12" shape = [192] dtype = "float32" - min_val = float("1.01729") - max_val = float("1.7703") - mean = float("1.31569") - std = float("0.141051") + min_val = float("0.959458") + max_val = float("2.22908") + mean = float("1.3733") + std = float("0.176915") data = None @@ -139,10 +143,10 @@ class Program_weight_tensor_parameter_13: name = "parameter_13" shape = [192] dtype = "float32" - min_val = float("0.000191938") - max_val = float("0.00371493") - mean = float("0.00070204") - std = float("0.000534277") + min_val = float("0.00434037") + max_val = float("26.8554") + mean = float("0.524987") + std = float("2.35391") data = None @@ -150,10 +154,10 @@ class Program_weight_tensor_parameter_14: name = "parameter_14" shape = [192] dtype = "float32" - min_val = float("-0.172109") - max_val = float("0.0388642") - mean = float("-0.0248015") - std = float("0.0310403") + min_val = float("-0.548344") + max_val = float("2.67378") + mean = float("0.0899683") + std = float("0.32715") data = None @@ -161,10 +165,10 @@ class Program_weight_tensor_parameter_15: name = "parameter_15" shape = [192, 192, 1, 1] dtype = "float32" - min_val = float("-0.0756384") - max_val = float("0.068676") - mean = float("-0.0005045") - std = float("0.00650538") + min_val = float("-0.725619") + max_val = float("0.454136") + mean = float("0.00160697") + std = float("0.0255239") data = None @@ -172,10 +176,10 @@ class Program_weight_tensor_parameter_16: name = "parameter_16" shape = [192] dtype = "float32" - min_val = float("-0.00462125") - max_val = float("0.00952984") - mean = float("-0.000108344") - std = float("0.00180966") + min_val = float("-0.0117797") + max_val = float("0.0160663") + mean = float("-8.90954e-05") + std = float("0.00280426") data = None @@ -183,10 +187,10 @@ class Program_weight_tensor_parameter_17: name = "parameter_17" shape = [192, 192, 1, 1] dtype = "float32" - min_val = float("-0.0166403") - max_val = float("0.015016") - mean = float("-1.40574e-05") - std = float("0.00152367") + min_val = float("-0.0350776") + max_val = float("0.0312972") + mean = float("2.94677e-05") + std = float("0.00183097") data = None @@ -194,10 +198,10 @@ class Program_weight_tensor_parameter_18: name = "parameter_18" shape = [68] dtype = "float32" - min_val = float("-0.00413441") - max_val = float("0.0247658") - mean = float("1.70403e-07") - std = float("0.00515374") + min_val = float("-0.00469375") + max_val = float("0.0194489") + mean = float("6.58692e-08") + std = float("0.00452283") data = None @@ -205,10 +209,10 @@ class Program_weight_tensor_parameter_19: name = "parameter_19" shape = [68, 384, 3, 3] dtype = "float32" - min_val = float("-0.0879382") - max_val = float("0.115543") - mean = float("3.09883e-08") - std = float("0.0046703") + min_val = float("-0.082678") + max_val = float("0.109438") + mean = float("3.68454e-08") + std = float("0.00410717") data = None @@ -216,10 +220,10 @@ class Program_weight_tensor_parameter_20: name = "parameter_20" shape = [384] dtype = "float32" - min_val = float("-0.00504264") - max_val = float("0.0677623") - mean = float("0.025296") - std = float("0.012925") + min_val = float("-0.0150875") + max_val = float("0.107487") + mean = float("0.0329244") + std = float("0.0178576") data = None @@ -227,10 +231,10 @@ class Program_weight_tensor_parameter_21: name = "parameter_21" shape = [384] dtype = "float32" - min_val = float("0.998659") - max_val = float("1.23249") - mean = float("1.10437") - std = float("0.040582") + min_val = float("1.00854") + max_val = float("1.29001") + mean = float("1.14543") + std = float("0.0510595") data = None @@ -238,10 +242,10 @@ class Program_weight_tensor_parameter_22: name = "parameter_22" shape = [384] dtype = "float32" - min_val = float("6.8383e-05") - max_val = float("0.00279866") - mean = float("0.000303046") - std = float("0.000306114") + min_val = float("0.000224402") + max_val = float("0.554167") + mean = float("0.0237249") + std = float("0.0506673") data = None @@ -249,10 +253,10 @@ class Program_weight_tensor_parameter_23: name = "parameter_23" shape = [384] dtype = "float32" - min_val = float("-0.0400502") - max_val = float("0.0131297") - mean = float("-0.00632822") - std = float("0.00736343") + min_val = float("-0.10498") + max_val = float("0.125234") + mean = float("-0.0096529") + std = float("0.0315707") data = None @@ -260,10 +264,10 @@ class Program_weight_tensor_parameter_24: name = "parameter_24" shape = [384, 384, 1, 1] dtype = "float32" - min_val = float("-0.0493892") - max_val = float("0.0650265") - mean = float("-8.70135e-05") - std = float("0.00262175") + min_val = float("-0.046203") + max_val = float("0.0533854") + mean = float("-0.00024935") + std = float("0.00299432") data = None @@ -271,10 +275,10 @@ class Program_weight_tensor_parameter_25: name = "parameter_25" shape = [384] dtype = "float32" - min_val = float("-0.00258806") - max_val = float("0.00556431") - mean = float("9.34648e-05") - std = float("0.00146967") + min_val = float("-0.00255311") + max_val = float("0.00302266") + mean = float("9.24001e-05") + std = float("0.00102546") data = None @@ -282,10 +286,10 @@ class Program_weight_tensor_parameter_26: name = "parameter_26" shape = [384, 384, 1, 1] dtype = "float32" - min_val = float("-0.00175289") - max_val = float("0.00489605") - mean = float("1.06219e-05") - std = float("0.000586047") + min_val = float("-0.00218781") + max_val = float("0.00368412") + mean = float("2.14576e-05") + std = float("0.000454436") data = None @@ -302,6 +306,10 @@ class Program_weight_tensor_parameter_28: name = "parameter_28" shape = [4, 384, 3, 3] dtype = "float32" + min_val = float("-0.408866") + max_val = float("0.0192085") + mean = float("-0.0556575") + std = float("0.0692171") data = None @@ -309,10 +317,10 @@ class Program_weight_tensor_parameter_29: name = "parameter_29" shape = [384] dtype = "float32" - min_val = float("-0.150027") - max_val = float("0.451389") - mean = float("0.229437") - std = float("0.0996485") + min_val = float("-0.232909") + max_val = float("0.549512") + mean = float("0.280237") + std = float("0.126665") data = None @@ -320,10 +328,10 @@ class Program_weight_tensor_parameter_30: name = "parameter_30" shape = [384] dtype = "float32" - min_val = float("1.00298") - max_val = float("1.39843") - mean = float("1.18623") - std = float("0.059847") + min_val = float("0.99194") + max_val = float("1.51118") + mean = float("1.23481") + std = float("0.0724215") data = None @@ -331,10 +339,10 @@ class Program_weight_tensor_parameter_31: name = "parameter_31" shape = [384] dtype = "float32" - min_val = float("0.000149666") - max_val = float("0.00370625") - mean = float("0.000713075") - std = float("0.000590199") + min_val = float("0.00162732") + max_val = float("157.489") + mean = float("2.37771") + std = float("10.0765") data = None @@ -342,10 +350,10 @@ class Program_weight_tensor_parameter_32: name = "parameter_32" shape = [384] dtype = "float32" - min_val = float("-0.108532") - max_val = float("0.0565238") - mean = float("-0.0264124") - std = float("0.0221279") + min_val = float("-3.4158") + max_val = float("2.26802") + mean = float("0.0219363") + std = float("0.412554") data = None @@ -353,10 +361,10 @@ class Program_weight_tensor_parameter_33: name = "parameter_33" shape = [384, 384, 1, 1] dtype = "float32" - min_val = float("-0.0480316") - max_val = float("0.0447625") - mean = float("-0.000359058") - std = float("0.00295423") + min_val = float("-0.303292") + max_val = float("0.19556") + mean = float("0.000605287") + std = float("0.0144313") data = None @@ -364,10 +372,10 @@ class Program_weight_tensor_parameter_34: name = "parameter_34" shape = [384] dtype = "float32" - min_val = float("-0.00203913") - max_val = float("0.00903738") - mean = float("-3.66197e-06") - std = float("0.000959619") + min_val = float("-0.00209029") + max_val = float("0.00798783") + mean = float("1.7015e-06") + std = float("0.000935296") data = None @@ -375,10 +383,10 @@ class Program_weight_tensor_parameter_35: name = "parameter_35" shape = [384, 384, 1, 1] dtype = "float32" - min_val = float("-0.00522582") - max_val = float("0.0088395") - mean = float("-4.68691e-06") - std = float("0.000619469") + min_val = float("-0.00535335") + max_val = float("0.0106302") + mean = float("3.37412e-06") + std = float("0.000515341") data = None @@ -386,10 +394,10 @@ class Program_weight_tensor_parameter_36: name = "parameter_36" shape = [68] dtype = "float32" - min_val = float("-0.00290222") - max_val = float("0.0101817") - mean = float("1.30633e-07") - std = float("0.00299044") + min_val = float("-0.00525521") + max_val = float("0.0145446") + mean = float("1.87138e-08") + std = float("0.00475") data = None @@ -397,10 +405,10 @@ class Program_weight_tensor_parameter_37: name = "parameter_37" shape = [68, 768, 3, 3] dtype = "float32" - min_val = float("-0.0411349") - max_val = float("0.0738933") - mean = float("1.4159e-08") - std = float("0.00274115") + min_val = float("-0.0328719") + max_val = float("0.0658116") + mean = float("1.06011e-08") + std = float("0.00223125") data = None @@ -408,10 +416,10 @@ class Program_weight_tensor_parameter_38: name = "parameter_38" shape = [768] dtype = "float32" - min_val = float("-0.0141815") - max_val = float("0.0470838") - mean = float("0.0110249") - std = float("0.0102353") + min_val = float("-0.0159444") + max_val = float("0.0716664") + mean = float("0.0156827") + std = float("0.0141851") data = None @@ -419,10 +427,10 @@ class Program_weight_tensor_parameter_39: name = "parameter_39" shape = [768] dtype = "float32" - min_val = float("1.00835") - max_val = float("1.19911") - mean = float("1.06458") - std = float("0.0222771") + min_val = float("1.02746") + max_val = float("1.22316") + mean = float("1.09258") + std = float("0.0262997") data = None @@ -430,10 +438,10 @@ class Program_weight_tensor_parameter_40: name = "parameter_40" shape = [768] dtype = "float32" - min_val = float("3.80062e-05") - max_val = float("0.00131862") - mean = float("0.000152402") - std = float("0.000108701") + min_val = float("9.27059e-05") + max_val = float("0.0491799") + mean = float("0.00131356") + std = float("0.00220594") data = None @@ -441,10 +449,10 @@ class Program_weight_tensor_parameter_41: name = "parameter_41" shape = [768] dtype = "float32" - min_val = float("-0.0236698") - max_val = float("0.00795434") - mean = float("-0.00383744") - std = float("0.00338712") + min_val = float("-0.118335") + max_val = float("0.0502021") + mean = float("-0.00579341") + std = float("0.0133184") data = None @@ -452,10 +460,10 @@ class Program_weight_tensor_parameter_42: name = "parameter_42" shape = [768, 768, 1, 1] dtype = "float32" - min_val = float("-0.0354011") - max_val = float("0.0311472") - mean = float("-3.50875e-05") - std = float("0.0011905") + min_val = float("-0.0245855") + max_val = float("0.0259771") + mean = float("-9.1685e-05") + std = float("0.00109901") data = None @@ -463,10 +471,10 @@ class Program_weight_tensor_parameter_43: name = "parameter_43" shape = [768] dtype = "float32" - min_val = float("-0.00350695") - max_val = float("0.00217249") - mean = float("0.000104712") - std = float("0.000668859") + min_val = float("-0.00228367") + max_val = float("0.0017984") + mean = float("9.02642e-05") + std = float("0.000464238") data = None @@ -474,10 +482,10 @@ class Program_weight_tensor_parameter_44: name = "parameter_44" shape = [768, 768, 1, 1] dtype = "float32" - min_val = float("-0.00236527") - max_val = float("0.00288539") - mean = float("2.74168e-05") - std = float("0.000209731") + min_val = float("-0.00190114") + max_val = float("0.00166296") + mean = float("2.82487e-05") + std = float("0.000154769") data = None @@ -494,6 +502,10 @@ class Program_weight_tensor_parameter_46: name = "parameter_46" shape = [4, 768, 3, 3] dtype = "float32" + min_val = float("-0.398112") + max_val = float("0.0341335") + mean = float("-0.0183825") + std = float("0.0458581") data = None @@ -501,10 +513,10 @@ class Program_weight_tensor_parameter_47: name = "parameter_47" shape = [768] dtype = "float32" - min_val = float("-0.109319") - max_val = float("0.200294") - mean = float("0.0936331") - std = float("0.0420139") + min_val = float("-0.149591") + max_val = float("0.25563") + mean = float("0.127271") + std = float("0.0546892") data = None @@ -512,10 +524,10 @@ class Program_weight_tensor_parameter_48: name = "parameter_48" shape = [768] dtype = "float32" - min_val = float("1.00715") - max_val = float("1.25105") - mean = float("1.07838") - std = float("0.0259236") + min_val = float("1.01586") + max_val = float("1.35031") + mean = float("1.10996") + std = float("0.0353314") data = None @@ -523,10 +535,10 @@ class Program_weight_tensor_parameter_49: name = "parameter_49" shape = [768] dtype = "float32" - min_val = float("9.94571e-05") - max_val = float("0.00338121") - mean = float("0.000633121") - std = float("0.000467813") + min_val = float("7.61599e-05") + max_val = float("7.24991") + mean = float("0.129052") + std = float("0.546235") data = None @@ -534,10 +546,10 @@ class Program_weight_tensor_parameter_50: name = "parameter_50" shape = [768] dtype = "float32" - min_val = float("-0.0501712") - max_val = float("0.0941121") - mean = float("-0.0191505") - std = float("0.0110933") + min_val = float("-0.787154") + max_val = float("0.440341") + mean = float("-0.0324434") + std = float("0.112186") data = None @@ -545,10 +557,10 @@ class Program_weight_tensor_parameter_51: name = "parameter_51" shape = [768, 768, 1, 1] dtype = "float32" - min_val = float("-0.0485631") - max_val = float("0.0317378") - mean = float("-0.000183678") - std = float("0.00129677") + min_val = float("-0.0434972") + max_val = float("0.028497") + mean = float("-0.00050544") + std = float("0.0033397") data = None @@ -556,10 +568,10 @@ class Program_weight_tensor_parameter_52: name = "parameter_52" shape = [768] dtype = "float32" - min_val = float("-0.00522906") - max_val = float("0.00428608") - mean = float("1.59338e-05") - std = float("0.000442491") + min_val = float("-0.0036321") + max_val = float("0.00249458") + mean = float("1.83682e-05") + std = float("0.000350058") data = None @@ -567,8 +579,8 @@ class Program_weight_tensor_parameter_53: name = "parameter_53" shape = [768, 768, 1, 1] dtype = "float32" - min_val = float("-0.0190742") - max_val = float("0.0352904") - mean = float("5.40338e-06") - std = float("0.000293143") + min_val = float("-0.0129388") + max_val = float("0.040846") + mean = float("7.13241e-06") + std = float("0.000222922") data = None diff --git a/paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_15/input_meta.py b/paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_3/shape_patches_PP-YOLOE-S_human/input_meta.py similarity index 100% rename from paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_15/input_meta.py rename to paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_3/shape_patches_PP-YOLOE-S_human/input_meta.py diff --git a/paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_15/weight_meta.py b/paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_3/shape_patches_PP-YOLOE-S_human/weight_meta.py similarity index 100% rename from paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_15/weight_meta.py rename to paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_3/shape_patches_PP-YOLOE-S_human/weight_meta.py diff --git a/paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_13/input_meta.py b/paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_3/shape_patches_PP-YOLOE-S_vehicle/input_meta.py similarity index 100% rename from paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_13/input_meta.py rename to paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_3/shape_patches_PP-YOLOE-S_vehicle/input_meta.py diff --git a/paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_13/weight_meta.py b/paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_3/shape_patches_PP-YOLOE-S_vehicle/weight_meta.py similarity index 100% rename from paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_13/weight_meta.py rename to paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_3/shape_patches_PP-YOLOE-S_vehicle/weight_meta.py diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus-L/subgraph_15/input_meta.py b/paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_3/shape_patches_PP-YOLOE_plus-L/input_meta.py similarity index 100% rename from paddle_samples/PaddleX/PP-YOLOE_plus-L/subgraph_15/input_meta.py rename to paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_3/shape_patches_PP-YOLOE_plus-L/input_meta.py diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus-L/subgraph_15/weight_meta.py b/paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_3/shape_patches_PP-YOLOE_plus-L/weight_meta.py similarity index 100% rename from paddle_samples/PaddleX/PP-YOLOE_plus-L/subgraph_15/weight_meta.py rename to paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_3/shape_patches_PP-YOLOE_plus-L/weight_meta.py diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_3/shape_patches_PP-YOLOE_plus-M/input_meta.py b/paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_3/shape_patches_PP-YOLOE_plus-M/input_meta.py new file mode 100644 index 000000000..d14444825 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_3/shape_patches_PP-YOLOE_plus-M/input_meta.py @@ -0,0 +1,73 @@ +class Program_weight_tensor_data_0: + name = "data_0" + shape = [] + dtype = "int64" + data = [22] + + +class Program_weight_tensor_data_1: + name = "data_1" + shape = [] + dtype = "int64" + data = [22] + + +class Program_weight_tensor_data_2: + name = "data_2" + shape = [] + dtype = "int64" + data = [44] + + +class Program_weight_tensor_data_3: + name = "data_3" + shape = [] + dtype = "int64" + data = [44] + + +class Program_weight_tensor_data_4: + name = "data_4" + shape = [] + dtype = "int64" + data = [88] + + +class Program_weight_tensor_data_5: + name = "data_5" + shape = [] + dtype = "int64" + data = [88] + + +class Program_weight_tensor_data_6: + name = "data_6" + shape = [8, 576, 22, 22] + dtype = "float32" + min_val = float("-0.278465") + max_val = float("11.3441") + mean = float("0.308556") + std = float("0.659097") + data = None + + +class Program_weight_tensor_data_7: + name = "data_7" + shape = [8, 288, 44, 44] + dtype = "float32" + min_val = float("-0.278465") + max_val = float("12.5041") + mean = float("0.419507") + std = float("0.74104") + data = None + + +class Program_weight_tensor_data_8: + name = "data_8" + shape = [8, 144, 88, 88] + dtype = "float32" + min_val = float("-0.278465") + max_val = float("17.9308") + mean = float("0.521464") + std = float("0.797879") + data = None diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_3/shape_patches_PP-YOLOE_plus-M/weight_meta.py b/paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_3/shape_patches_PP-YOLOE_plus-M/weight_meta.py new file mode 100644 index 000000000..0fbfee683 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_3/shape_patches_PP-YOLOE_plus-M/weight_meta.py @@ -0,0 +1,586 @@ +class Program_weight_tensor_parameter_0: + name = "parameter_0" + shape = [68] + dtype = "float32" + min_val = float("-0.0141298") + max_val = float("0.0241404") + mean = float("6.51635e-08") + std = float("0.00670344") + data = None + + +class Program_weight_tensor_parameter_1: + name = "parameter_1" + shape = [68, 144, 3, 3] + dtype = "float32" + min_val = float("-0.159403") + max_val = float("0.18767") + mean = float("6.14746e-08") + std = float("0.00826349") + data = None + + +class Program_weight_tensor_parameter_2: + name = "parameter_2" + shape = [144] + dtype = "float32" + min_val = float("-0.104117") + max_val = float("0.334805") + mean = float("0.0803525") + std = float("0.0949004") + data = None + + +class Program_weight_tensor_parameter_3: + name = "parameter_3" + shape = [144] + dtype = "float32" + min_val = float("0.833704") + max_val = float("2.14069") + mean = float("1.40199") + std = float("0.259198") + data = None + + +class Program_weight_tensor_parameter_4: + name = "parameter_4" + shape = [144] + dtype = "float32" + min_val = float("0.000157642") + max_val = float("0.00219112") + mean = float("0.000569831") + std = float("0.000354466") + data = None + + +class Program_weight_tensor_parameter_5: + name = "parameter_5" + shape = [144] + dtype = "float32" + min_val = float("-0.0501844") + max_val = float("0.038398") + mean = float("-0.00743265") + std = float("0.0175874") + data = None + + +class Program_weight_tensor_parameter_6: + name = "parameter_6" + shape = [144, 144, 1, 1] + dtype = "float32" + min_val = float("-0.0724953") + max_val = float("0.0965259") + mean = float("-0.000264722") + std = float("0.00737951") + data = None + + +class Program_weight_tensor_parameter_7: + name = "parameter_7" + shape = [144] + dtype = "float32" + min_val = float("-0.00636108") + max_val = float("0.00678391") + mean = float("-0.000173716") + std = float("0.00320574") + data = None + + +class Program_weight_tensor_parameter_8: + name = "parameter_8" + shape = [144, 144, 1, 1] + dtype = "float32" + min_val = float("-0.0117989") + max_val = float("0.015335") + mean = float("-0.000124252") + std = float("0.00220502") + data = None + + +class Program_weight_tensor_parameter_9: + name = "parameter_9" + shape = [4] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_10: + name = "parameter_10" + shape = [4, 144, 3, 3] + dtype = "float32" + min_val = float("-7.87108e-06") + max_val = float("0.000443999") + mean = float("1.5363e-05") + std = float("3.18915e-05") + data = None + + +class Program_weight_tensor_parameter_11: + name = "parameter_11" + shape = [144] + dtype = "float32" + min_val = float("-0.64158") + max_val = float("1.51361") + mean = float("0.436643") + std = float("0.396591") + data = None + + +class Program_weight_tensor_parameter_12: + name = "parameter_12" + shape = [144] + dtype = "float32" + min_val = float("0.912817") + max_val = float("2.11091") + mean = float("1.38863") + std = float("0.197354") + data = None + + +class Program_weight_tensor_parameter_13: + name = "parameter_13" + shape = [144] + dtype = "float32" + min_val = float("0.000201053") + max_val = float("0.00337522") + mean = float("0.000771889") + std = float("0.000504667") + data = None + + +class Program_weight_tensor_parameter_14: + name = "parameter_14" + shape = [144] + dtype = "float32" + min_val = float("-0.2459") + max_val = float("0.0358757") + mean = float("-0.0278203") + std = float("0.0404512") + data = None + + +class Program_weight_tensor_parameter_15: + name = "parameter_15" + shape = [144, 144, 1, 1] + dtype = "float32" + min_val = float("-0.0649436") + max_val = float("0.0801723") + mean = float("-0.000599465") + std = float("0.0088557") + data = None + + +class Program_weight_tensor_parameter_16: + name = "parameter_16" + shape = [144] + dtype = "float32" + min_val = float("-0.00555943") + max_val = float("0.0056399") + mean = float("-0.000279704") + std = float("0.00214688") + data = None + + +class Program_weight_tensor_parameter_17: + name = "parameter_17" + shape = [144, 144, 1, 1] + dtype = "float32" + min_val = float("-0.0293142") + max_val = float("0.0536777") + mean = float("-5.72243e-05") + std = float("0.00245901") + data = None + + +class Program_weight_tensor_parameter_18: + name = "parameter_18" + shape = [68] + dtype = "float32" + min_val = float("-0.0049597") + max_val = float("0.0272289") + mean = float("6.11326e-08") + std = float("0.00561272") + data = None + + +class Program_weight_tensor_parameter_19: + name = "parameter_19" + shape = [68, 288, 3, 3] + dtype = "float32" + min_val = float("-0.111167") + max_val = float("0.12918") + mean = float("3.28073e-08") + std = float("0.00572785") + data = None + + +class Program_weight_tensor_parameter_20: + name = "parameter_20" + shape = [288] + dtype = "float32" + min_val = float("-0.0173529") + max_val = float("0.146767") + mean = float("0.0535169") + std = float("0.0321121") + data = None + + +class Program_weight_tensor_parameter_21: + name = "parameter_21" + shape = [288] + dtype = "float32" + min_val = float("1.01431") + max_val = float("1.4495") + mean = float("1.22632") + std = float("0.0811625") + data = None + + +class Program_weight_tensor_parameter_22: + name = "parameter_22" + shape = [288] + dtype = "float32" + min_val = float("9.53404e-05") + max_val = float("0.00409233") + mean = float("0.000467458") + std = float("0.000455701") + data = None + + +class Program_weight_tensor_parameter_23: + name = "parameter_23" + shape = [288] + dtype = "float32" + min_val = float("-0.0517031") + max_val = float("0.0162222") + mean = float("-0.00786597") + std = float("0.00921386") + data = None + + +class Program_weight_tensor_parameter_24: + name = "parameter_24" + shape = [288, 288, 1, 1] + dtype = "float32" + min_val = float("-0.0594582") + max_val = float("0.0735971") + mean = float("-0.00013723") + std = float("0.00364254") + data = None + + +class Program_weight_tensor_parameter_25: + name = "parameter_25" + shape = [288] + dtype = "float32" + min_val = float("-0.00314669") + max_val = float("0.00620898") + mean = float("2.48414e-05") + std = float("0.00194774") + data = None + + +class Program_weight_tensor_parameter_26: + name = "parameter_26" + shape = [288, 288, 1, 1] + dtype = "float32" + min_val = float("-0.00388461") + max_val = float("0.00841306") + mean = float("-1.87953e-05") + std = float("0.000932022") + data = None + + +class Program_weight_tensor_parameter_27: + name = "parameter_27" + shape = [4] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_28: + name = "parameter_28" + shape = [4, 288, 3, 3] + dtype = "float32" + min_val = float("-6.48766e-06") + max_val = float("0.000140856") + mean = float("6.52221e-06") + std = float("9.39411e-06") + data = None + + +class Program_weight_tensor_parameter_29: + name = "parameter_29" + shape = [288] + dtype = "float32" + min_val = float("-0.27006") + max_val = float("0.770798") + mean = float("0.311538") + std = float("0.171771") + data = None + + +class Program_weight_tensor_parameter_30: + name = "parameter_30" + shape = [288] + dtype = "float32" + min_val = float("0.991505") + max_val = float("1.72132") + mean = float("1.2558") + std = float("0.0945899") + data = None + + +class Program_weight_tensor_parameter_31: + name = "parameter_31" + shape = [288] + dtype = "float32" + min_val = float("0.000219746") + max_val = float("0.0054385") + mean = float("0.000809585") + std = float("0.000639851") + data = None + + +class Program_weight_tensor_parameter_32: + name = "parameter_32" + shape = [288] + dtype = "float32" + min_val = float("-0.130478") + max_val = float("0.0695712") + mean = float("-0.0265584") + std = float("0.0287591") + data = None + + +class Program_weight_tensor_parameter_33: + name = "parameter_33" + shape = [288, 288, 1, 1] + dtype = "float32" + min_val = float("-0.0480156") + max_val = float("0.0592741") + mean = float("-0.000430968") + std = float("0.00425043") + data = None + + +class Program_weight_tensor_parameter_34: + name = "parameter_34" + shape = [288] + dtype = "float32" + min_val = float("-0.00285738") + max_val = float("0.00720467") + mean = float("-7.33536e-05") + std = float("0.0011726") + data = None + + +class Program_weight_tensor_parameter_35: + name = "parameter_35" + shape = [288, 288, 1, 1] + dtype = "float32" + min_val = float("-0.0122627") + max_val = float("0.0161886") + mean = float("-1.72547e-05") + std = float("0.000995486") + data = None + + +class Program_weight_tensor_parameter_36: + name = "parameter_36" + shape = [68] + dtype = "float32" + min_val = float("-0.00365417") + max_val = float("0.0135851") + mean = float("3.44444e-08") + std = float("0.00376477") + data = None + + +class Program_weight_tensor_parameter_37: + name = "parameter_37" + shape = [68, 576, 3, 3] + dtype = "float32" + min_val = float("-0.0663088") + max_val = float("0.0674867") + mean = float("1.74732e-08") + std = float("0.00359868") + data = None + + +class Program_weight_tensor_parameter_38: + name = "parameter_38" + shape = [576] + dtype = "float32" + min_val = float("-0.0421788") + max_val = float("0.113513") + mean = float("0.0222551") + std = float("0.0258343") + data = None + + +class Program_weight_tensor_parameter_39: + name = "parameter_39" + shape = [576] + dtype = "float32" + min_val = float("1.05067") + max_val = float("1.39463") + mean = float("1.14799") + std = float("0.042853") + data = None + + +class Program_weight_tensor_parameter_40: + name = "parameter_40" + shape = [576] + dtype = "float32" + min_val = float("4.89493e-05") + max_val = float("0.00264842") + mean = float("0.000245751") + std = float("0.000220246") + data = None + + +class Program_weight_tensor_parameter_41: + name = "parameter_41" + shape = [576] + dtype = "float32" + min_val = float("-0.0344271") + max_val = float("0.0201663") + mean = float("-0.00575569") + std = float("0.00544377") + data = None + + +class Program_weight_tensor_parameter_42: + name = "parameter_42" + shape = [576, 576, 1, 1] + dtype = "float32" + min_val = float("-0.0385787") + max_val = float("0.0417201") + mean = float("-6.1183e-05") + std = float("0.00176385") + data = None + + +class Program_weight_tensor_parameter_43: + name = "parameter_43" + shape = [576] + dtype = "float32" + min_val = float("-0.00434691") + max_val = float("0.00341236") + mean = float("0.000100391") + std = float("0.00100061") + data = None + + +class Program_weight_tensor_parameter_44: + name = "parameter_44" + shape = [576, 576, 1, 1] + dtype = "float32" + min_val = float("-0.0035525") + max_val = float("0.00417295") + mean = float("2.83396e-05") + std = float("0.00036474") + data = None + + +class Program_weight_tensor_parameter_45: + name = "parameter_45" + shape = [4] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_46: + name = "parameter_46" + shape = [4, 576, 3, 3] + dtype = "float32" + min_val = float("-8.78041e-06") + max_val = float("0.000132008") + mean = float("4.36014e-06") + std = float("8.07886e-06") + data = None + + +class Program_weight_tensor_parameter_47: + name = "parameter_47" + shape = [576] + dtype = "float32" + min_val = float("-0.248224") + max_val = float("0.371203") + mean = float("0.155358") + std = float("0.0834839") + data = None + + +class Program_weight_tensor_parameter_48: + name = "parameter_48" + shape = [576] + dtype = "float32" + min_val = float("1.02385") + max_val = float("1.42504") + mean = float("1.13342") + std = float("0.0514955") + data = None + + +class Program_weight_tensor_parameter_49: + name = "parameter_49" + shape = [576] + dtype = "float32" + min_val = float("0.000119442") + max_val = float("0.00279863") + mean = float("0.000709705") + std = float("0.00050333") + data = None + + +class Program_weight_tensor_parameter_50: + name = "parameter_50" + shape = [576] + dtype = "float32" + min_val = float("-0.0735115") + max_val = float("0.0820593") + mean = float("-0.0216337") + std = float("0.0158423") + data = None + + +class Program_weight_tensor_parameter_51: + name = "parameter_51" + shape = [576, 576, 1, 1] + dtype = "float32" + min_val = float("-0.0599145") + max_val = float("0.0355292") + mean = float("-0.000237247") + std = float("0.00192816") + data = None + + +class Program_weight_tensor_parameter_52: + name = "parameter_52" + shape = [576] + dtype = "float32" + min_val = float("-0.00772796") + max_val = float("0.00635587") + mean = float("-2.65035e-05") + std = float("0.000684865") + data = None + + +class Program_weight_tensor_parameter_53: + name = "parameter_53" + shape = [576, 576, 1, 1] + dtype = "float32" + min_val = float("-0.0265895") + max_val = float("0.0452734") + mean = float("-1.04734e-06") + std = float("0.000523833") + data = None diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_3/shape_patches_PP-YOLOE_plus_SOD-L/input_meta.py b/paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_3/shape_patches_PP-YOLOE_plus_SOD-L/input_meta.py new file mode 100644 index 000000000..36ef80eba --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_3/shape_patches_PP-YOLOE_plus_SOD-L/input_meta.py @@ -0,0 +1,73 @@ +class Program_weight_tensor_data_0: + name = "data_0" + shape = [] + dtype = "int64" + data = [21] + + +class Program_weight_tensor_data_1: + name = "data_1" + shape = [] + dtype = "int64" + data = [21] + + +class Program_weight_tensor_data_2: + name = "data_2" + shape = [] + dtype = "int64" + data = [42] + + +class Program_weight_tensor_data_3: + name = "data_3" + shape = [] + dtype = "int64" + data = [42] + + +class Program_weight_tensor_data_4: + name = "data_4" + shape = [] + dtype = "int64" + data = [84] + + +class Program_weight_tensor_data_5: + name = "data_5" + shape = [] + dtype = "int64" + data = [84] + + +class Program_weight_tensor_data_6: + name = "data_6" + shape = [2, 768, 21, 21] + dtype = "float32" + min_val = float("-0.278465") + max_val = float("16.1843") + mean = float("0.147783") + std = float("0.640828") + data = None + + +class Program_weight_tensor_data_7: + name = "data_7" + shape = [2, 384, 42, 42] + dtype = "float32" + min_val = float("-0.278465") + max_val = float("27.7299") + mean = float("0.204054") + std = float("0.900832") + data = None + + +class Program_weight_tensor_data_8: + name = "data_8" + shape = [2, 192, 84, 84] + dtype = "float32" + min_val = float("-0.278465") + max_val = float("499.504") + mean = float("12.5639") + std = float("18.7273") + data = None diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_3/shape_patches_PP-YOLOE_plus_SOD-L/weight_meta.py b/paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_3/shape_patches_PP-YOLOE_plus_SOD-L/weight_meta.py new file mode 100644 index 000000000..7ebfc8bcf --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_3/shape_patches_PP-YOLOE_plus_SOD-L/weight_meta.py @@ -0,0 +1,580 @@ +class Program_weight_tensor_parameter_0: + name = "parameter_0" + shape = [40] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_1: + name = "parameter_1" + shape = [40, 192, 3, 3] + dtype = "float32" + min_val = float("-0.200034") + max_val = float("0.204555") + mean = float("1.49957e-08") + std = float("0.0116846") + data = None + + +class Program_weight_tensor_parameter_2: + name = "parameter_2" + shape = [192] + dtype = "float32" + min_val = float("-0.049655") + max_val = float("0.233417") + mean = float("0.0551577") + std = float("0.0442585") + data = None + + +class Program_weight_tensor_parameter_3: + name = "parameter_3" + shape = [192] + dtype = "float32" + min_val = float("0.836511") + max_val = float("1.62756") + mean = float("1.22136") + std = float("0.145365") + data = None + + +class Program_weight_tensor_parameter_4: + name = "parameter_4" + shape = [192] + dtype = "float32" + min_val = float("0.00534518") + max_val = float("5.22517") + mean = float("0.466305") + std = float("0.726268") + data = None + + +class Program_weight_tensor_parameter_5: + name = "parameter_5" + shape = [192] + dtype = "float32" + min_val = float("-8.55821") + max_val = float("10.0832") + mean = float("0.104165") + std = float("2.83033") + data = None + + +class Program_weight_tensor_parameter_6: + name = "parameter_6" + shape = [192, 192, 1, 1] + dtype = "float32" + min_val = float("-0.101586") + max_val = float("0.138874") + mean = float("-0.000767801") + std = float("0.0120829") + data = None + + +class Program_weight_tensor_parameter_7: + name = "parameter_7" + shape = [192] + dtype = "float32" + min_val = float("-0.00860419") + max_val = float("0.0156884") + mean = float("-0.000140933") + std = float("0.00406783") + data = None + + +class Program_weight_tensor_parameter_8: + name = "parameter_8" + shape = [192, 192, 1, 1] + dtype = "float32" + min_val = float("-0.0102778") + max_val = float("0.0182908") + mean = float("-0.000259715") + std = float("0.00201379") + data = None + + +class Program_weight_tensor_parameter_9: + name = "parameter_9" + shape = [10] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_10: + name = "parameter_10" + shape = [10, 192, 3, 3] + dtype = "float32" + min_val = float("-1151.38") + max_val = float("133.782") + mean = float("-21.9826") + std = float("92.2655") + data = None + + +class Program_weight_tensor_parameter_11: + name = "parameter_11" + shape = [192] + dtype = "float32" + min_val = float("-83.505") + max_val = float("85.9246") + mean = float("2.58206") + std = float("26.6954") + data = None + + +class Program_weight_tensor_parameter_12: + name = "parameter_12" + shape = [192] + dtype = "float32" + min_val = float("-14.1722") + max_val = float("24.9711") + mean = float("-0.553052") + std = float("5.93472") + data = None + + +class Program_weight_tensor_parameter_13: + name = "parameter_13" + shape = [192] + dtype = "float32" + min_val = float("2.32866") + max_val = float("16618600.0") + mean = float("525947.0") + std = float("1853320.0") + data = None + + +class Program_weight_tensor_parameter_14: + name = "parameter_14" + shape = [192] + dtype = "float32" + min_val = float("-12504.7") + max_val = float("7911.06") + mean = float("-492.832") + std = float("2441.54") + data = None + + +class Program_weight_tensor_parameter_15: + name = "parameter_15" + shape = [192, 192, 1, 1] + dtype = "float32" + min_val = float("-138.688") + max_val = float("100.601") + mean = float("-0.0990701") + std = float("4.16445") + data = None + + +class Program_weight_tensor_parameter_16: + name = "parameter_16" + shape = [192] + dtype = "float32" + min_val = float("-10.9738") + max_val = float("7.09472") + mean = float("-0.172259") + std = float("1.7704") + data = None + + +class Program_weight_tensor_parameter_17: + name = "parameter_17" + shape = [192, 192, 1, 1] + dtype = "float32" + min_val = float("-21.7725") + max_val = float("14.0694") + mean = float("-0.0616887") + std = float("0.957234") + data = None + + +class Program_weight_tensor_parameter_18: + name = "parameter_18" + shape = [40] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_19: + name = "parameter_19" + shape = [40, 384, 3, 3] + dtype = "float32" + min_val = float("-0.124724") + max_val = float("0.129603") + mean = float("3.40515e-09") + std = float("0.00677564") + data = None + + +class Program_weight_tensor_parameter_20: + name = "parameter_20" + shape = [384] + dtype = "float32" + min_val = float("-0.00274099") + max_val = float("0.100798") + mean = float("0.03273") + std = float("0.0175032") + data = None + + +class Program_weight_tensor_parameter_21: + name = "parameter_21" + shape = [384] + dtype = "float32" + min_val = float("0.999002") + max_val = float("1.24047") + mean = float("1.10664") + std = float("0.0410217") + data = None + + +class Program_weight_tensor_parameter_22: + name = "parameter_22" + shape = [384] + dtype = "float32" + min_val = float("0.00338575") + max_val = float("0.497954") + mean = float("0.0471292") + std = float("0.05553") + data = None + + +class Program_weight_tensor_parameter_23: + name = "parameter_23" + shape = [384] + dtype = "float32" + min_val = float("-0.202557") + max_val = float("0.162712") + mean = float("-0.0211337") + std = float("0.0479677") + data = None + + +class Program_weight_tensor_parameter_24: + name = "parameter_24" + shape = [384, 384, 1, 1] + dtype = "float32" + min_val = float("-0.0592913") + max_val = float("0.0683296") + mean = float("-0.000516701") + std = float("0.00403998") + data = None + + +class Program_weight_tensor_parameter_25: + name = "parameter_25" + shape = [384] + dtype = "float32" + min_val = float("-0.00282189") + max_val = float("0.00800835") + mean = float("4.07712e-05") + std = float("0.00164155") + data = None + + +class Program_weight_tensor_parameter_26: + name = "parameter_26" + shape = [384, 384, 1, 1] + dtype = "float32" + min_val = float("-0.00191414") + max_val = float("0.00571394") + mean = float("-3.68985e-05") + std = float("0.00061666") + data = None + + +class Program_weight_tensor_parameter_27: + name = "parameter_27" + shape = [10] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_28: + name = "parameter_28" + shape = [10, 384, 3, 3] + dtype = "float32" + min_val = float("-4.35504") + max_val = float("0.450249") + mean = float("-0.170375") + std = float("0.297706") + data = None + + +class Program_weight_tensor_parameter_29: + name = "parameter_29" + shape = [384] + dtype = "float32" + min_val = float("-0.129636") + max_val = float("0.537228") + mean = float("0.252657") + std = float("0.116675") + data = None + + +class Program_weight_tensor_parameter_30: + name = "parameter_30" + shape = [384] + dtype = "float32" + min_val = float("0.994039") + max_val = float("1.41192") + mean = float("1.16982") + std = float("0.0589143") + data = None + + +class Program_weight_tensor_parameter_31: + name = "parameter_31" + shape = [384] + dtype = "float32" + min_val = float("0.189816") + max_val = float("625.421") + mean = float("13.2176") + std = float("39.326") + data = None + + +class Program_weight_tensor_parameter_32: + name = "parameter_32" + shape = [384] + dtype = "float32" + min_val = float("-6.46967") + max_val = float("2.53183") + mean = float("-0.259474") + std = float("0.862378") + data = None + + +class Program_weight_tensor_parameter_33: + name = "parameter_33" + shape = [384, 384, 1, 1] + dtype = "float32" + min_val = float("-0.372426") + max_val = float("0.933267") + mean = float("-0.00500754") + std = float("0.034042") + data = None + + +class Program_weight_tensor_parameter_34: + name = "parameter_34" + shape = [384] + dtype = "float32" + min_val = float("-0.168065") + max_val = float("0.0324648") + mean = float("0.000330264") + std = float("0.0172051") + data = None + + +class Program_weight_tensor_parameter_35: + name = "parameter_35" + shape = [384, 384, 1, 1] + dtype = "float32" + min_val = float("-0.137078") + max_val = float("0.0270712") + mean = float("-0.000467459") + std = float("0.00704374") + data = None + + +class Program_weight_tensor_parameter_36: + name = "parameter_36" + shape = [40] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_37: + name = "parameter_37" + shape = [40, 768, 3, 3] + dtype = "float32" + min_val = float("-0.0663824") + max_val = float("0.0420303") + mean = float("4.07454e-10") + std = float("0.00430679") + data = None + + +class Program_weight_tensor_parameter_38: + name = "parameter_38" + shape = [768] + dtype = "float32" + min_val = float("-0.0211256") + max_val = float("0.0557602") + mean = float("0.00994946") + std = float("0.0117441") + data = None + + +class Program_weight_tensor_parameter_39: + name = "parameter_39" + shape = [768] + dtype = "float32" + min_val = float("1.0067") + max_val = float("1.19979") + mean = float("1.06442") + std = float("0.0224675") + data = None + + +class Program_weight_tensor_parameter_40: + name = "parameter_40" + shape = [768] + dtype = "float32" + min_val = float("0.00543727") + max_val = float("4.44191") + mean = float("0.147009") + std = float("0.218322") + data = None + + +class Program_weight_tensor_parameter_41: + name = "parameter_41" + shape = [768] + dtype = "float32" + min_val = float("-0.239306") + max_val = float("0.308633") + mean = float("-0.0116159") + std = float("0.0711555") + data = None + + +class Program_weight_tensor_parameter_42: + name = "parameter_42" + shape = [768, 768, 1, 1] + dtype = "float32" + min_val = float("-0.0315101") + max_val = float("0.0300214") + mean = float("-0.00021905") + std = float("0.00273367") + data = None + + +class Program_weight_tensor_parameter_43: + name = "parameter_43" + shape = [768] + dtype = "float32" + min_val = float("-0.00399512") + max_val = float("0.00318684") + mean = float("6.30584e-05") + std = float("0.00082816") + data = None + + +class Program_weight_tensor_parameter_44: + name = "parameter_44" + shape = [768, 768, 1, 1] + dtype = "float32" + min_val = float("-0.00263886") + max_val = float("0.00246544") + mean = float("4.12622e-06") + std = float("0.000248146") + data = None + + +class Program_weight_tensor_parameter_45: + name = "parameter_45" + shape = [10] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_46: + name = "parameter_46" + shape = [10, 768, 3, 3] + dtype = "float32" + min_val = float("-3.66048") + max_val = float("0.4728") + mean = float("-0.108391") + std = float("0.234778") + data = None + + +class Program_weight_tensor_parameter_47: + name = "parameter_47" + shape = [768] + dtype = "float32" + min_val = float("-0.093079") + max_val = float("0.260714") + mean = float("0.112569") + std = float("0.0586991") + data = None + + +class Program_weight_tensor_parameter_48: + name = "parameter_48" + shape = [768] + dtype = "float32" + min_val = float("0.977893") + max_val = float("1.22745") + mean = float("1.06571") + std = float("0.0280202") + data = None + + +class Program_weight_tensor_parameter_49: + name = "parameter_49" + shape = [768] + dtype = "float32" + min_val = float("0.0580549") + max_val = float("178.552") + mean = float("7.98546") + std = float("16.2805") + data = None + + +class Program_weight_tensor_parameter_50: + name = "parameter_50" + shape = [768] + dtype = "float32" + min_val = float("-3.133") + max_val = float("2.19298") + mean = float("-0.123927") + std = float("0.472723") + data = None + + +class Program_weight_tensor_parameter_51: + name = "parameter_51" + shape = [768, 768, 1, 1] + dtype = "float32" + min_val = float("-0.352391") + max_val = float("0.641069") + mean = float("-0.00191347") + std = float("0.0277056") + data = None + + +class Program_weight_tensor_parameter_52: + name = "parameter_52" + shape = [768] + dtype = "float32" + min_val = float("-0.0409679") + max_val = float("0.024778") + mean = float("-2.53984e-05") + std = float("0.00726461") + data = None + + +class Program_weight_tensor_parameter_53: + name = "parameter_53" + shape = [768, 768, 1, 1] + dtype = "float32" + min_val = float("-0.0172154") + max_val = float("0.0204702") + mean = float("-6.95149e-05") + std = float("0.00176879") + data = None diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_3/shape_patches_PP-YOLOE_plus_SOD-S/input_meta.py b/paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_3/shape_patches_PP-YOLOE_plus_SOD-S/input_meta.py new file mode 100644 index 000000000..84df7ef74 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_3/shape_patches_PP-YOLOE_plus_SOD-S/input_meta.py @@ -0,0 +1,73 @@ +class Program_weight_tensor_data_0: + name = "data_0" + shape = [] + dtype = "int64" + data = [15] + + +class Program_weight_tensor_data_1: + name = "data_1" + shape = [] + dtype = "int64" + data = [15] + + +class Program_weight_tensor_data_2: + name = "data_2" + shape = [] + dtype = "int64" + data = [30] + + +class Program_weight_tensor_data_3: + name = "data_3" + shape = [] + dtype = "int64" + data = [30] + + +class Program_weight_tensor_data_4: + name = "data_4" + shape = [] + dtype = "int64" + data = [60] + + +class Program_weight_tensor_data_5: + name = "data_5" + shape = [] + dtype = "int64" + data = [60] + + +class Program_weight_tensor_data_6: + name = "data_6" + shape = [2, 384, 15, 15] + dtype = "float32" + min_val = float("-0.278465") + max_val = float("6.73963") + mean = float("0.347025") + std = float("0.708791") + data = None + + +class Program_weight_tensor_data_7: + name = "data_7" + shape = [2, 192, 30, 30] + dtype = "float32" + min_val = float("-0.278465") + max_val = float("10.0545") + mean = float("0.469318") + std = float("0.798413") + data = None + + +class Program_weight_tensor_data_8: + name = "data_8" + shape = [2, 96, 60, 60] + dtype = "float32" + min_val = float("-0.278465") + max_val = float("14.5401") + mean = float("0.60308") + std = float("0.85045") + data = None diff --git a/paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_14/weight_meta.py b/paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_3/shape_patches_PP-YOLOE_plus_SOD-S/weight_meta.py similarity index 50% rename from paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_14/weight_meta.py rename to paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_3/shape_patches_PP-YOLOE_plus_SOD-S/weight_meta.py index a80fe5856..cd9bb1db0 100644 --- a/paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_14/weight_meta.py +++ b/paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_3/shape_patches_PP-YOLOE_plus_SOD-S/weight_meta.py @@ -1,22 +1,20 @@ class Program_weight_tensor_parameter_0: name = "parameter_0" - shape = [68] + shape = [40] dtype = "float32" - min_val = float("-0.0172485") - max_val = float("0.027465") - mean = float("1.46232e-07") - std = float("0.00758165") + min_val = float("0") + max_val = float("0.5") data = None class Program_weight_tensor_parameter_1: name = "parameter_1" - shape = [68, 96, 3, 3] + shape = [40, 96, 3, 3] dtype = "float32" - min_val = float("-0.193407") - max_val = float("0.203896") - mean = float("4.08909e-08") - std = float("0.0115569") + min_val = float("-0.220386") + max_val = float("0.222309") + mean = float("2.62808e-08") + std = float("0.0161374") data = None @@ -24,10 +22,10 @@ class Program_weight_tensor_parameter_2: name = "parameter_2" shape = [96] dtype = "float32" - min_val = float("-0.149226") - max_val = float("0.348802") - mean = float("0.0836582") - std = float("0.116186") + min_val = float("-0.127121") + max_val = float("0.345832") + mean = float("0.108268") + std = float("0.11199") data = None @@ -35,10 +33,10 @@ class Program_weight_tensor_parameter_3: name = "parameter_3" shape = [96] dtype = "float32" - min_val = float("0.92059") - max_val = float("2.01352") - mean = float("1.39698") - std = float("0.216217") + min_val = float("0.947191") + max_val = float("2.26117") + mean = float("1.5267") + std = float("0.270308") data = None @@ -46,10 +44,10 @@ class Program_weight_tensor_parameter_4: name = "parameter_4" shape = [96] dtype = "float32" - min_val = float("0.000218703") - max_val = float("0.00374521") - mean = float("0.000878038") - std = float("0.00058106") + min_val = float("0.000653841") + max_val = float("0.0512872") + mean = float("0.00630045") + std = float("0.00803373") data = None @@ -57,10 +55,10 @@ class Program_weight_tensor_parameter_5: name = "parameter_5" shape = [96] dtype = "float32" - min_val = float("-0.0825809") - max_val = float("0.0417536") - mean = float("-0.00846266") - std = float("0.0198982") + min_val = float("-0.1806") + max_val = float("0.105559") + mean = float("-0.0118148") + std = float("0.0471293") data = None @@ -68,10 +66,10 @@ class Program_weight_tensor_parameter_6: name = "parameter_6" shape = [96, 96, 1, 1] dtype = "float32" - min_val = float("-0.0948878") - max_val = float("0.109061") - mean = float("-0.000797905") - std = float("0.0137305") + min_val = float("-0.16617") + max_val = float("0.150574") + mean = float("-0.0015204") + std = float("0.018482") data = None @@ -79,10 +77,10 @@ class Program_weight_tensor_parameter_7: name = "parameter_7" shape = [96] dtype = "float32" - min_val = float("-0.012275") - max_val = float("0.0115959") - mean = float("-0.000356062") - std = float("0.00537855") + min_val = float("-0.01695") + max_val = float("0.0144876") + mean = float("-0.000590983") + std = float("0.0062921") data = None @@ -90,16 +88,16 @@ class Program_weight_tensor_parameter_8: name = "parameter_8" shape = [96, 96, 1, 1] dtype = "float32" - min_val = float("-0.0233111") - max_val = float("0.0248623") - mean = float("-0.000113331") - std = float("0.0035026") + min_val = float("-0.0289852") + max_val = float("0.0383979") + mean = float("-0.00056367") + std = float("0.00473631") data = None class Program_weight_tensor_parameter_9: name = "parameter_9" - shape = [1] + shape = [10] dtype = "float32" min_val = float("0") max_val = float("0.5") @@ -108,12 +106,12 @@ class Program_weight_tensor_parameter_9: class Program_weight_tensor_parameter_10: name = "parameter_10" - shape = [1, 96, 3, 3] + shape = [10, 96, 3, 3] dtype = "float32" - min_val = float("-0.0555531") - max_val = float("0.0394738") - mean = float("0.00027914") - std = float("0.0112485") + min_val = float("-0.158508") + max_val = float("0.111546") + mean = float("-0.00123997") + std = float("0.0149231") data = None @@ -121,10 +119,10 @@ class Program_weight_tensor_parameter_11: name = "parameter_11" shape = [96] dtype = "float32" - min_val = float("-0.661613") - max_val = float("1.11986") - mean = float("0.208505") - std = float("0.335963") + min_val = float("-1.00246") + max_val = float("1.70494") + mean = float("0.553674") + std = float("0.526983") data = None @@ -132,10 +130,10 @@ class Program_weight_tensor_parameter_12: name = "parameter_12" shape = [96] dtype = "float32" - min_val = float("0.773318") - max_val = float("1.56281") - mean = float("1.11195") - std = float("0.138849") + min_val = float("0.751697") + max_val = float("2.08213") + mean = float("1.46982") + std = float("0.2376") data = None @@ -143,10 +141,10 @@ class Program_weight_tensor_parameter_13: name = "parameter_13" shape = [96] dtype = "float32" - min_val = float("0.000158442") - max_val = float("0.00563483") - mean = float("0.00119669") - std = float("0.0010054") + min_val = float("0.000680279") + max_val = float("0.0389719") + mean = float("0.00525898") + std = float("0.00550929") data = None @@ -154,10 +152,10 @@ class Program_weight_tensor_parameter_14: name = "parameter_14" shape = [96] dtype = "float32" - min_val = float("-0.217371") - max_val = float("0.0873086") - mean = float("-0.0303735") - std = float("0.050933") + min_val = float("-0.302926") + max_val = float("0.242336") + mean = float("0.0338812") + std = float("0.0731476") data = None @@ -165,10 +163,10 @@ class Program_weight_tensor_parameter_15: name = "parameter_15" shape = [96, 96, 1, 1] dtype = "float32" - min_val = float("-0.113703") - max_val = float("0.0885765") - mean = float("-0.00122357") - std = float("0.0140255") + min_val = float("-0.0920552") + max_val = float("0.101403") + mean = float("-0.000391196") + std = float("0.016093") data = None @@ -176,10 +174,10 @@ class Program_weight_tensor_parameter_16: name = "parameter_16" shape = [96] dtype = "float32" - min_val = float("-0.00655335") - max_val = float("0.00788225") - mean = float("-0.000702387") - std = float("0.00315771") + min_val = float("-0.00790501") + max_val = float("0.0118839") + mean = float("-0.000616206") + std = float("0.00334672") data = None @@ -187,32 +185,30 @@ class Program_weight_tensor_parameter_17: name = "parameter_17" shape = [96, 96, 1, 1] dtype = "float32" - min_val = float("-0.0695131") - max_val = float("0.0989064") - mean = float("-0.000394188") - std = float("0.00429104") + min_val = float("-0.0354745") + max_val = float("0.0393783") + mean = float("-0.000415455") + std = float("0.00398812") data = None class Program_weight_tensor_parameter_18: name = "parameter_18" - shape = [68] + shape = [40] dtype = "float32" - min_val = float("-0.00653538") - max_val = float("0.0248622") - mean = float("1.52999e-07") - std = float("0.00624483") + min_val = float("0") + max_val = float("0.5") data = None class Program_weight_tensor_parameter_19: name = "parameter_19" - shape = [68, 192, 3, 3] + shape = [40, 192, 3, 3] dtype = "float32" - min_val = float("-0.155994") - max_val = float("0.17862") - mean = float("-1.00845e-08") - std = float("0.00807346") + min_val = float("-0.15995") + max_val = float("0.172865") + mean = float("7.42875e-09") + std = float("0.00878877") data = None @@ -220,10 +216,10 @@ class Program_weight_tensor_parameter_20: name = "parameter_20" shape = [192] dtype = "float32" - min_val = float("-0.111521") - max_val = float("0.136793") - mean = float("0.050372") - std = float("0.0428473") + min_val = float("-0.0211182") + max_val = float("0.16802") + mean = float("0.0783479") + std = float("0.0391695") data = None @@ -231,10 +227,10 @@ class Program_weight_tensor_parameter_21: name = "parameter_21" shape = [192] dtype = "float32" - min_val = float("0.941879") - max_val = float("1.4895") - mean = float("1.20932") - std = float("0.101229") + min_val = float("1.07811") + max_val = float("1.51568") + mean = float("1.30274") + std = float("0.0876299") data = None @@ -242,10 +238,10 @@ class Program_weight_tensor_parameter_22: name = "parameter_22" shape = [192] dtype = "float32" - min_val = float("0.000166872") - max_val = float("0.00497783") - mean = float("0.00083062") - std = float("0.000691699") + min_val = float("0.000349327") + max_val = float("0.0216234") + mean = float("0.00330979") + std = float("0.00396173") data = None @@ -253,10 +249,10 @@ class Program_weight_tensor_parameter_23: name = "parameter_23" shape = [192] dtype = "float32" - min_val = float("-0.0352484") - max_val = float("0.0207761") - mean = float("-0.00483897") - std = float("0.00870871") + min_val = float("-0.124726") + max_val = float("0.0534715") + mean = float("-0.0102394") + std = float("0.0264137") data = None @@ -264,10 +260,10 @@ class Program_weight_tensor_parameter_24: name = "parameter_24" shape = [192, 192, 1, 1] dtype = "float32" - min_val = float("-0.0620273") - max_val = float("0.101258") - mean = float("-0.000222798") - std = float("0.00681063") + min_val = float("-0.0792181") + max_val = float("0.107778") + mean = float("-0.000368823") + std = float("0.00729713") data = None @@ -275,10 +271,10 @@ class Program_weight_tensor_parameter_25: name = "parameter_25" shape = [192] dtype = "float32" - min_val = float("-0.0102195") - max_val = float("0.0101765") - mean = float("-0.000115416") - std = float("0.00396591") + min_val = float("-0.00746441") + max_val = float("0.00722328") + mean = float("-8.13383e-05") + std = float("0.00296425") data = None @@ -286,16 +282,16 @@ class Program_weight_tensor_parameter_26: name = "parameter_26" shape = [192, 192, 1, 1] dtype = "float32" - min_val = float("-0.00892386") - max_val = float("0.0199459") - mean = float("-0.000135272") - std = float("0.00154918") + min_val = float("-0.00795566") + max_val = float("0.0110585") + mean = float("-0.000113663") + std = float("0.00156623") data = None class Program_weight_tensor_parameter_27: name = "parameter_27" - shape = [1] + shape = [10] dtype = "float32" min_val = float("0") max_val = float("0.5") @@ -304,12 +300,12 @@ class Program_weight_tensor_parameter_27: class Program_weight_tensor_parameter_28: name = "parameter_28" - shape = [1, 192, 3, 3] + shape = [10, 192, 3, 3] dtype = "float32" - min_val = float("-0.0670605") - max_val = float("0.0310249") - mean = float("0.000294137") - std = float("0.00757806") + min_val = float("-0.0899103") + max_val = float("0.0751371") + mean = float("-0.000593556") + std = float("0.00682701") data = None @@ -317,10 +313,10 @@ class Program_weight_tensor_parameter_29: name = "parameter_29" shape = [192] dtype = "float32" - min_val = float("-0.290544") - max_val = float("0.608277") - mean = float("0.147622") - std = float("0.158959") + min_val = float("-0.291965") + max_val = float("1.00507") + mean = float("0.404706") + std = float("0.237409") data = None @@ -328,10 +324,10 @@ class Program_weight_tensor_parameter_30: name = "parameter_30" shape = [192] dtype = "float32" - min_val = float("0.913214") - max_val = float("1.4959") - mean = float("1.08724") - std = float("0.0815711") + min_val = float("1.0436") + max_val = float("1.84744") + mean = float("1.34394") + std = float("0.127482") data = None @@ -339,10 +335,10 @@ class Program_weight_tensor_parameter_31: name = "parameter_31" shape = [192] dtype = "float32" - min_val = float("0.000173535") - max_val = float("0.00980167") - mean = float("0.00157464") - std = float("0.00163303") + min_val = float("0.000363284") + max_val = float("0.0104766") + mean = float("0.00183808") + std = float("0.00170966") data = None @@ -350,10 +346,10 @@ class Program_weight_tensor_parameter_32: name = "parameter_32" shape = [192] dtype = "float32" - min_val = float("-0.1722") - max_val = float("0.0293933") - mean = float("-0.0362368") - std = float("0.0320849") + min_val = float("-0.160158") + max_val = float("0.11803") + mean = float("-0.00116506") + std = float("0.0399602") data = None @@ -361,10 +357,10 @@ class Program_weight_tensor_parameter_33: name = "parameter_33" shape = [192, 192, 1, 1] dtype = "float32" - min_val = float("-0.0771266") - max_val = float("0.0600578") - mean = float("-0.00104893") - std = float("0.00708841") + min_val = float("-0.0517684") + max_val = float("0.0545506") + mean = float("-0.000237345") + std = float("0.00623955") data = None @@ -372,10 +368,10 @@ class Program_weight_tensor_parameter_34: name = "parameter_34" shape = [192] dtype = "float32" - min_val = float("-0.00520655") - max_val = float("0.0122276") - mean = float("-0.000186298") - std = float("0.00206584") + min_val = float("-0.00367397") + max_val = float("0.00885849") + mean = float("-0.000153054") + std = float("0.00155494") data = None @@ -383,32 +379,30 @@ class Program_weight_tensor_parameter_35: name = "parameter_35" shape = [192, 192, 1, 1] dtype = "float32" - min_val = float("-0.0221196") - max_val = float("0.0288162") - mean = float("-0.000192599") - std = float("0.00153376") + min_val = float("-0.0153384") + max_val = float("0.0338853") + mean = float("-0.000100079") + std = float("0.00136892") data = None class Program_weight_tensor_parameter_36: name = "parameter_36" - shape = [68] + shape = [40] dtype = "float32" - min_val = float("-0.00618645") - max_val = float("0.0126918") - mean = float("1.55094e-07") - std = float("0.0052247") + min_val = float("0") + max_val = float("0.5") data = None class Program_weight_tensor_parameter_37: name = "parameter_37" - shape = [68, 384, 3, 3] + shape = [40, 384, 3, 3] dtype = "float32" - min_val = float("-0.0898313") - max_val = float("0.115939") - mean = float("1.97397e-08") - std = float("0.00562912") + min_val = float("-0.02606") + max_val = float("0.0317561") + mean = float("9.52241e-10") + std = float("0.0020846") data = None @@ -416,10 +410,10 @@ class Program_weight_tensor_parameter_38: name = "parameter_38" shape = [384] dtype = "float32" - min_val = float("-0.0751669") - max_val = float("0.111426") - mean = float("0.0119129") - std = float("0.0354094") + min_val = float("-0.0246739") + max_val = float("0.152016") + mean = float("0.0408553") + std = float("0.032015") data = None @@ -427,10 +421,10 @@ class Program_weight_tensor_parameter_39: name = "parameter_39" shape = [384] dtype = "float32" - min_val = float("0.969448") - max_val = float("1.49376") - mean = float("1.16935") - std = float("0.0775516") + min_val = float("1.05705") + max_val = float("1.41825") + mean = float("1.2191") + std = float("0.0542166") data = None @@ -438,10 +432,10 @@ class Program_weight_tensor_parameter_40: name = "parameter_40" shape = [384] dtype = "float32" - min_val = float("9.22385e-05") - max_val = float("0.00462362") - mean = float("0.000541906") - std = float("0.000444355") + min_val = float("0.000103167") + max_val = float("0.00446044") + mean = float("0.000448355") + std = float("0.000443999") data = None @@ -449,10 +443,10 @@ class Program_weight_tensor_parameter_41: name = "parameter_41" shape = [384] dtype = "float32" - min_val = float("-0.0402063") - max_val = float("0.0136726") - mean = float("-0.00386345") - std = float("0.00617741") + min_val = float("-0.0274058") + max_val = float("0.0126066") + mean = float("-0.00688192") + std = float("0.00609138") data = None @@ -460,10 +454,10 @@ class Program_weight_tensor_parameter_42: name = "parameter_42" shape = [384, 384, 1, 1] dtype = "float32" - min_val = float("-0.0664712") - max_val = float("0.0691916") - mean = float("-0.000151613") - std = float("0.00377438") + min_val = float("-0.0464693") + max_val = float("0.0548612") + mean = float("-0.000100111") + std = float("0.00295451") data = None @@ -471,10 +465,10 @@ class Program_weight_tensor_parameter_43: name = "parameter_43" shape = [384] dtype = "float32" - min_val = float("-0.00514094") - max_val = float("0.00626889") - mean = float("-6.43167e-05") - std = float("0.00278132") + min_val = float("-0.00639402") + max_val = float("0.00421888") + mean = float("3.54671e-05") + std = float("0.00171694") data = None @@ -482,16 +476,16 @@ class Program_weight_tensor_parameter_44: name = "parameter_44" shape = [384, 384, 1, 1] dtype = "float32" - min_val = float("-0.0303347") - max_val = float("0.0123028") - mean = float("-5.00562e-05") - std = float("0.000943714") + min_val = float("-0.00654664") + max_val = float("0.00763939") + mean = float("-4.30022e-06") + std = float("0.000719087") data = None class Program_weight_tensor_parameter_45: name = "parameter_45" - shape = [1] + shape = [10] dtype = "float32" min_val = float("0") max_val = float("0.5") @@ -500,12 +494,12 @@ class Program_weight_tensor_parameter_45: class Program_weight_tensor_parameter_46: name = "parameter_46" - shape = [1, 384, 3, 3] + shape = [10, 384, 3, 3] dtype = "float32" - min_val = float("-0.0481817") - max_val = float("0.0280031") - mean = float("0.000415518") - std = float("0.0047328") + min_val = float("-0.0276653") + max_val = float("0.0223204") + mean = float("-0.000443044") + std = float("0.00213442") data = None @@ -513,10 +507,10 @@ class Program_weight_tensor_parameter_47: name = "parameter_47" shape = [384] dtype = "float32" - min_val = float("-0.369118") - max_val = float("0.494762") - mean = float("0.0350664") - std = float("0.121505") + min_val = float("-0.409858") + max_val = float("0.610632") + mean = float("0.212569") + std = float("0.109879") data = None @@ -524,10 +518,10 @@ class Program_weight_tensor_parameter_48: name = "parameter_48" shape = [384] dtype = "float32" - min_val = float("0.883076") - max_val = float("1.55393") - mean = float("1.05789") - std = float("0.0835119") + min_val = float("1.05961") + max_val = float("1.46695") + mean = float("1.20196") + std = float("0.0657315") data = None @@ -535,10 +529,10 @@ class Program_weight_tensor_parameter_49: name = "parameter_49" shape = [384] dtype = "float32" - min_val = float("0.000182895") - max_val = float("0.00777303") - mean = float("0.001135") - std = float("0.00109732") + min_val = float("7.46256e-05") + max_val = float("0.0054246") + mean = float("0.000793045") + std = float("0.00065768") data = None @@ -546,10 +540,10 @@ class Program_weight_tensor_parameter_50: name = "parameter_50" shape = [384] dtype = "float32" - min_val = float("-0.130947") - max_val = float("0.0318047") - mean = float("-0.0288516") - std = float("0.0238519") + min_val = float("-0.0807872") + max_val = float("0.0680079") + mean = float("-0.0131062") + std = float("0.0222074") data = None @@ -557,10 +551,10 @@ class Program_weight_tensor_parameter_51: name = "parameter_51" shape = [384, 384, 1, 1] dtype = "float32" - min_val = float("-0.0496468") - max_val = float("0.0518443") - mean = float("-0.000562622") - std = float("0.0041021") + min_val = float("-0.0661697") + max_val = float("0.0355423") + mean = float("-0.00020161") + std = float("0.00342025") data = None @@ -568,10 +562,10 @@ class Program_weight_tensor_parameter_52: name = "parameter_52" shape = [384] dtype = "float32" - min_val = float("-0.0141813") - max_val = float("0.0110761") - mean = float("-0.000147647") - std = float("0.00165558") + min_val = float("-0.00360413") + max_val = float("0.00538468") + mean = float("-8.84011e-05") + std = float("0.000952451") data = None @@ -579,8 +573,8 @@ class Program_weight_tensor_parameter_53: name = "parameter_53" shape = [384, 384, 1, 1] dtype = "float32" - min_val = float("-0.109289") - max_val = float("0.057067") - mean = float("-4.32392e-05") - std = float("0.00113413") + min_val = float("-0.0169924") + max_val = float("0.0292524") + mean = float("-2.04535e-05") + std = float("0.000783345") data = None diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_3/shape_patches_PP-YOLOE_plus_SOD-largesize-L/input_meta.py b/paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_3/shape_patches_PP-YOLOE_plus_SOD-largesize-L/input_meta.py new file mode 100644 index 000000000..0f83461ca --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_3/shape_patches_PP-YOLOE_plus_SOD-largesize-L/input_meta.py @@ -0,0 +1,73 @@ +class Program_weight_tensor_data_0: + name = "data_0" + shape = [] + dtype = "int64" + data = [48] + + +class Program_weight_tensor_data_1: + name = "data_1" + shape = [] + dtype = "int64" + data = [48] + + +class Program_weight_tensor_data_2: + name = "data_2" + shape = [] + dtype = "int64" + data = [96] + + +class Program_weight_tensor_data_3: + name = "data_3" + shape = [] + dtype = "int64" + data = [96] + + +class Program_weight_tensor_data_4: + name = "data_4" + shape = [] + dtype = "int64" + data = [192] + + +class Program_weight_tensor_data_5: + name = "data_5" + shape = [] + dtype = "int64" + data = [192] + + +class Program_weight_tensor_data_6: + name = "data_6" + shape = [1, 768, 48, 48] + dtype = "float32" + min_val = float("-0.278465") + max_val = float("6.21279") + mean = float("0.263701") + std = float("0.615333") + data = None + + +class Program_weight_tensor_data_7: + name = "data_7" + shape = [1, 384, 96, 96] + dtype = "float32" + min_val = float("-0.278465") + max_val = float("9.39853") + mean = float("0.366505") + std = float("0.697682") + data = None + + +class Program_weight_tensor_data_8: + name = "data_8" + shape = [1, 192, 192, 192] + dtype = "float32" + min_val = float("-0.278465") + max_val = float("13.9944") + mean = float("0.442546") + std = float("0.692429") + data = None diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_3/shape_patches_PP-YOLOE_plus_SOD-largesize-L/weight_meta.py b/paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_3/shape_patches_PP-YOLOE_plus_SOD-largesize-L/weight_meta.py new file mode 100644 index 000000000..8e33e8e3a --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_3/shape_patches_PP-YOLOE_plus_SOD-largesize-L/weight_meta.py @@ -0,0 +1,586 @@ +class Program_weight_tensor_parameter_0: + name = "parameter_0" + shape = [88] + dtype = "float32" + min_val = float("0.825624") + max_val = float("0.846159") + mean = float("0.828073") + std = float("0.00356405") + data = None + + +class Program_weight_tensor_parameter_1: + name = "parameter_1" + shape = [88, 192, 3, 3] + dtype = "float32" + min_val = float("-0.120061") + max_val = float("0.122726") + mean = float("1.20344e-08") + std = float("0.00589807") + data = None + + +class Program_weight_tensor_parameter_2: + name = "parameter_2" + shape = [192] + dtype = "float32" + min_val = float("-0.0433758") + max_val = float("0.207094") + mean = float("0.0514628") + std = float("0.0402492") + data = None + + +class Program_weight_tensor_parameter_3: + name = "parameter_3" + shape = [192] + dtype = "float32" + min_val = float("0.850872") + max_val = float("1.63127") + mean = float("1.22454") + std = float("0.145326") + data = None + + +class Program_weight_tensor_parameter_4: + name = "parameter_4" + shape = [192] + dtype = "float32" + min_val = float("0.000193476") + max_val = float("0.00844475") + mean = float("0.00135281") + std = float("0.00118991") + data = None + + +class Program_weight_tensor_parameter_5: + name = "parameter_5" + shape = [192] + dtype = "float32" + min_val = float("-0.0754706") + max_val = float("0.0319285") + mean = float("-0.0124719") + std = float("0.0176434") + data = None + + +class Program_weight_tensor_parameter_6: + name = "parameter_6" + shape = [192, 192, 1, 1] + dtype = "float32" + min_val = float("-0.0734168") + max_val = float("0.108512") + mean = float("-0.000444445") + std = float("0.00763924") + data = None + + +class Program_weight_tensor_parameter_7: + name = "parameter_7" + shape = [192] + dtype = "float32" + min_val = float("-0.00613384") + max_val = float("0.00922103") + mean = float("-7.86384e-05") + std = float("0.00341788") + data = None + + +class Program_weight_tensor_parameter_8: + name = "parameter_8" + shape = [192, 192, 1, 1] + dtype = "float32" + min_val = float("-0.00662369") + max_val = float("0.0119822") + mean = float("-0.000127685") + std = float("0.00177164") + data = None + + +class Program_weight_tensor_parameter_9: + name = "parameter_9" + shape = [10] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_10: + name = "parameter_10" + shape = [10, 192, 3, 3] + dtype = "float32" + min_val = float("-0.175474") + max_val = float("0.055091") + mean = float("-0.00147039") + std = float("0.00842565") + data = None + + +class Program_weight_tensor_parameter_11: + name = "parameter_11" + shape = [192] + dtype = "float32" + min_val = float("-0.329693") + max_val = float("0.892228") + mean = float("0.356694") + std = float("0.271228") + data = None + + +class Program_weight_tensor_parameter_12: + name = "parameter_12" + shape = [192] + dtype = "float32" + min_val = float("1.01538") + max_val = float("1.77428") + mean = float("1.31556") + std = float("0.143187") + data = None + + +class Program_weight_tensor_parameter_13: + name = "parameter_13" + shape = [192] + dtype = "float32" + min_val = float("0.000400596") + max_val = float("0.0113603") + mean = float("0.00180946") + std = float("0.00164804") + data = None + + +class Program_weight_tensor_parameter_14: + name = "parameter_14" + shape = [192] + dtype = "float32" + min_val = float("-0.164541") + max_val = float("0.127897") + mean = float("-0.00423752") + std = float("0.0391887") + data = None + + +class Program_weight_tensor_parameter_15: + name = "parameter_15" + shape = [192, 192, 1, 1] + dtype = "float32" + min_val = float("-0.0620339") + max_val = float("0.0578527") + mean = float("-0.000609344") + std = float("0.0074149") + data = None + + +class Program_weight_tensor_parameter_16: + name = "parameter_16" + shape = [192] + dtype = "float32" + min_val = float("-0.0053139") + max_val = float("0.0128966") + mean = float("-0.000148839") + std = float("0.00226529") + data = None + + +class Program_weight_tensor_parameter_17: + name = "parameter_17" + shape = [192, 192, 1, 1] + dtype = "float32" + min_val = float("-0.0108365") + max_val = float("0.0180944") + mean = float("-7.70563e-05") + std = float("0.0014898") + data = None + + +class Program_weight_tensor_parameter_18: + name = "parameter_18" + shape = [88] + dtype = "float32" + min_val = float("0.826359") + max_val = float("0.837586") + mean = float("0.828071") + std = float("0.00217956") + data = None + + +class Program_weight_tensor_parameter_19: + name = "parameter_19" + shape = [88, 384, 3, 3] + dtype = "float32" + min_val = float("-0.0854456") + max_val = float("0.0873423") + mean = float("4.34375e-09") + std = float("0.0031191") + data = None + + +class Program_weight_tensor_parameter_20: + name = "parameter_20" + shape = [384] + dtype = "float32" + min_val = float("-0.00526138") + max_val = float("0.0696216") + mean = float("0.0259227") + std = float("0.0132331") + data = None + + +class Program_weight_tensor_parameter_21: + name = "parameter_21" + shape = [384] + dtype = "float32" + min_val = float("0.99865") + max_val = float("1.23747") + mean = float("1.1069") + std = float("0.0410692") + data = None + + +class Program_weight_tensor_parameter_22: + name = "parameter_22" + shape = [384] + dtype = "float32" + min_val = float("8.35707e-05") + max_val = float("0.00597629") + mean = float("0.000723624") + std = float("0.000763811") + data = None + + +class Program_weight_tensor_parameter_23: + name = "parameter_23" + shape = [384] + dtype = "float32" + min_val = float("-0.0442923") + max_val = float("0.00827867") + mean = float("-0.00966063") + std = float("0.00931597") + data = None + + +class Program_weight_tensor_parameter_24: + name = "parameter_24" + shape = [384, 384, 1, 1] + dtype = "float32" + min_val = float("-0.0461624") + max_val = float("0.063746") + mean = float("-0.000138651") + std = float("0.00306682") + data = None + + +class Program_weight_tensor_parameter_25: + name = "parameter_25" + shape = [384] + dtype = "float32" + min_val = float("-0.00274369") + max_val = float("0.00511827") + mean = float("6.10625e-05") + std = float("0.00152895") + data = None + + +class Program_weight_tensor_parameter_26: + name = "parameter_26" + shape = [384, 384, 1, 1] + dtype = "float32" + min_val = float("-0.00184752") + max_val = float("0.0048019") + mean = float("3.76045e-06") + std = float("0.000597256") + data = None + + +class Program_weight_tensor_parameter_27: + name = "parameter_27" + shape = [10] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_28: + name = "parameter_28" + shape = [10, 384, 3, 3] + dtype = "float32" + min_val = float("-0.0721721") + max_val = float("0.0404456") + mean = float("-0.000999238") + std = float("0.00397589") + data = None + + +class Program_weight_tensor_parameter_29: + name = "parameter_29" + shape = [384] + dtype = "float32" + min_val = float("-0.152983") + max_val = float("0.452749") + mean = float("0.229344") + std = float("0.100245") + data = None + + +class Program_weight_tensor_parameter_30: + name = "parameter_30" + shape = [384] + dtype = "float32" + min_val = float("1.00417") + max_val = float("1.40261") + mean = float("1.1866") + std = float("0.0603403") + data = None + + +class Program_weight_tensor_parameter_31: + name = "parameter_31" + shape = [384] + dtype = "float32" + min_val = float("0.000148108") + max_val = float("0.00569047") + mean = float("0.000833938") + std = float("0.000746994") + data = None + + +class Program_weight_tensor_parameter_32: + name = "parameter_32" + shape = [384] + dtype = "float32" + min_val = float("-0.0987288") + max_val = float("0.0644616") + mean = float("-0.0146565") + std = float("0.0228018") + data = None + + +class Program_weight_tensor_parameter_33: + name = "parameter_33" + shape = [384, 384, 1, 1] + dtype = "float32" + min_val = float("-0.0528263") + max_val = float("0.037782") + mean = float("-0.000246446") + std = float("0.00296888") + data = None + + +class Program_weight_tensor_parameter_34: + name = "parameter_34" + shape = [384] + dtype = "float32" + min_val = float("-0.00198673") + max_val = float("0.0108277") + mean = float("-1.89144e-05") + std = float("0.00104636") + data = None + + +class Program_weight_tensor_parameter_35: + name = "parameter_35" + shape = [384, 384, 1, 1] + dtype = "float32" + min_val = float("-0.00490867") + max_val = float("0.00769719") + mean = float("-1.63033e-05") + std = float("0.00053086") + data = None + + +class Program_weight_tensor_parameter_36: + name = "parameter_36" + shape = [88] + dtype = "float32" + min_val = float("0.827794") + max_val = float("0.828556") + mean = float("0.828072") + std = float("0.000199979") + data = None + + +class Program_weight_tensor_parameter_37: + name = "parameter_37" + shape = [88, 768, 3, 3] + dtype = "float32" + min_val = float("-0.00645056") + max_val = float("0.0120405") + mean = float("4.87489e-10") + std = float("0.000843696") + data = None + + +class Program_weight_tensor_parameter_38: + name = "parameter_38" + shape = [768] + dtype = "float32" + min_val = float("-0.0143031") + max_val = float("0.0478323") + mean = float("0.0113513") + std = float("0.0104705") + data = None + + +class Program_weight_tensor_parameter_39: + name = "parameter_39" + shape = [768] + dtype = "float32" + min_val = float("1.00867") + max_val = float("1.20113") + mean = float("1.06607") + std = float("0.0224781") + data = None + + +class Program_weight_tensor_parameter_40: + name = "parameter_40" + shape = [768] + dtype = "float32" + min_val = float("3.23648e-05") + max_val = float("0.00119716") + mean = float("0.00013138") + std = float("9.50346e-05") + data = None + + +class Program_weight_tensor_parameter_41: + name = "parameter_41" + shape = [768] + dtype = "float32" + min_val = float("-0.0210089") + max_val = float("0.00349173") + mean = float("-0.00472837") + std = float("0.00312975") + data = None + + +class Program_weight_tensor_parameter_42: + name = "parameter_42" + shape = [768, 768, 1, 1] + dtype = "float32" + min_val = float("-0.034299") + max_val = float("0.0337449") + mean = float("-4.66048e-05") + std = float("0.00140342") + data = None + + +class Program_weight_tensor_parameter_43: + name = "parameter_43" + shape = [768] + dtype = "float32" + min_val = float("-0.00390782") + max_val = float("0.00260919") + mean = float("7.12606e-05") + std = float("0.000831057") + data = None + + +class Program_weight_tensor_parameter_44: + name = "parameter_44" + shape = [768, 768, 1, 1] + dtype = "float32" + min_val = float("-0.00261376") + max_val = float("0.00228504") + mean = float("1.77784e-05") + std = float("0.000252276") + data = None + + +class Program_weight_tensor_parameter_45: + name = "parameter_45" + shape = [10] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_46: + name = "parameter_46" + shape = [10, 768, 3, 3] + dtype = "float32" + min_val = float("-0.015813") + max_val = float("0.00965711") + mean = float("-0.000546702") + std = float("0.00140653") + data = None + + +class Program_weight_tensor_parameter_47: + name = "parameter_47" + shape = [768] + dtype = "float32" + min_val = float("-0.110932") + max_val = float("0.199913") + mean = float("0.0934025") + std = float("0.0422427") + data = None + + +class Program_weight_tensor_parameter_48: + name = "parameter_48" + shape = [768] + dtype = "float32" + min_val = float("1.00786") + max_val = float("1.25519") + mean = float("1.07879") + std = float("0.0261974") + data = None + + +class Program_weight_tensor_parameter_49: + name = "parameter_49" + shape = [768] + dtype = "float32" + min_val = float("7.25456e-05") + max_val = float("0.00245344") + mean = float("0.000534885") + std = float("0.000318044") + data = None + + +class Program_weight_tensor_parameter_50: + name = "parameter_50" + shape = [768] + dtype = "float32" + min_val = float("-0.0725999") + max_val = float("0.0495439") + mean = float("-0.0155315") + std = float("0.0150006") + data = None + + +class Program_weight_tensor_parameter_51: + name = "parameter_51" + shape = [768, 768, 1, 1] + dtype = "float32" + min_val = float("-0.0538655") + max_val = float("0.0252785") + mean = float("-0.000146665") + std = float("0.00153735") + data = None + + +class Program_weight_tensor_parameter_52: + name = "parameter_52" + shape = [768] + dtype = "float32" + min_val = float("-0.00117685") + max_val = float("0.00393971") + mean = float("3.40689e-06") + std = float("0.000473697") + data = None + + +class Program_weight_tensor_parameter_53: + name = "parameter_53" + shape = [768, 768, 1, 1] + dtype = "float32" + min_val = float("-0.0124531") + max_val = float("0.02256") + mean = float("2.28217e-06") + std = float("0.00024775") + data = None diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_4/shape_patches_PP-YOLOE-S_vehicle/input_meta.py b/paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_4/shape_patches_PP-YOLOE-S_vehicle/input_meta.py new file mode 100644 index 000000000..dce0b815d --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_4/shape_patches_PP-YOLOE-S_vehicle/input_meta.py @@ -0,0 +1,28 @@ +class Program_weight_tensor_data_0: + name = "data_0" + shape = [2, 5376, 4] + dtype = "float32" + min_val = float("0.01") + max_val = float("0.01") + mean = float("0.01") + std = float("9.31323e-10") + data = None + + +class Program_weight_tensor_data_1: + name = "data_1" + shape = [2, 5376] + dtype = "int32" + min_val = 0 + max_val = 4 + data = None + + +class Program_weight_tensor_data_2: + name = "data_2" + shape = [2, 5376, 4] + dtype = "float32" + max_val = float("0.945922") + mean = float("0.00102082") + std = float("0.0260199") + data = None diff --git a/paddle_samples/PaddleX/PP-YOLOE-L_vehicle/subgraph_16/weight_meta.py b/paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_4/shape_patches_PP-YOLOE-S_vehicle/weight_meta.py similarity index 100% rename from paddle_samples/PaddleX/PP-YOLOE-L_vehicle/subgraph_16/weight_meta.py rename to paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_4/shape_patches_PP-YOLOE-S_vehicle/weight_meta.py diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_4/shape_patches_PP-YOLOE_plus-L/input_meta.py b/paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_4/shape_patches_PP-YOLOE_plus-L/input_meta.py new file mode 100644 index 000000000..fd9b56629 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_4/shape_patches_PP-YOLOE_plus-L/input_meta.py @@ -0,0 +1,27 @@ +class Program_weight_tensor_data_0: + name = "data_0" + shape = [8, 4116, 4] + dtype = "float32" + min_val = float("0.01") + max_val = float("0.01") + mean = float("0.01") + data = None + + +class Program_weight_tensor_data_1: + name = "data_1" + shape = [8, 4116] + dtype = "int32" + min_val = 0 + max_val = 4 + data = None + + +class Program_weight_tensor_data_2: + name = "data_2" + shape = [8, 4116, 4] + dtype = "float32" + max_val = float("0.947339") + mean = float("0.000280391") + std = float("0.0157366") + data = None diff --git a/paddle_samples/PaddleX/PP-YOLOE-L_vehicle/subgraph_17/weight_meta.py b/paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_4/shape_patches_PP-YOLOE_plus-L/weight_meta.py similarity index 100% rename from paddle_samples/PaddleX/PP-YOLOE-L_vehicle/subgraph_17/weight_meta.py rename to paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_4/shape_patches_PP-YOLOE_plus-L/weight_meta.py diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_4/shape_patches_PP-YOLOE_plus-M/input_meta.py b/paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_4/shape_patches_PP-YOLOE_plus-M/input_meta.py new file mode 100644 index 000000000..23b4d80c0 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_4/shape_patches_PP-YOLOE_plus-M/input_meta.py @@ -0,0 +1,28 @@ +class Program_weight_tensor_data_0: + name = "data_0" + shape = [8, 10164, 4] + dtype = "float32" + min_val = float("0.01") + max_val = float("0.01") + mean = float("0.01") + std = float("2.79397e-09") + data = None + + +class Program_weight_tensor_data_1: + name = "data_1" + shape = [8, 10164] + dtype = "int32" + min_val = 0 + max_val = 4 + data = None + + +class Program_weight_tensor_data_2: + name = "data_2" + shape = [8, 10164, 4] + dtype = "float32" + max_val = float("0.941962") + mean = float("0.000134148") + std = float("0.0107789") + data = None diff --git a/paddle_samples/PaddleX/PP-YOLOE-L_vehicle/subgraph_7/weight_meta.py b/paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_4/shape_patches_PP-YOLOE_plus-M/weight_meta.py similarity index 100% rename from paddle_samples/PaddleX/PP-YOLOE-L_vehicle/subgraph_7/weight_meta.py rename to paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_4/shape_patches_PP-YOLOE_plus-M/weight_meta.py diff --git a/paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_15/input_meta.py b/paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_5/shape_patches_PP-YOLOE-S_vehicle/input_meta.py similarity index 100% rename from paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_15/input_meta.py rename to paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_5/shape_patches_PP-YOLOE-S_vehicle/input_meta.py diff --git a/paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_15/weight_meta.py b/paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_5/shape_patches_PP-YOLOE-S_vehicle/weight_meta.py similarity index 100% rename from paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_15/weight_meta.py rename to paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_5/shape_patches_PP-YOLOE-S_vehicle/weight_meta.py diff --git a/paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_9/input_meta.py b/paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_6/shape_patches_PP-YOLOE-L_human/input_meta.py similarity index 100% rename from paddle_samples/PaddleX/PP-YOLOE-L_human/subgraph_9/input_meta.py rename to paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_6/shape_patches_PP-YOLOE-L_human/input_meta.py diff --git a/paddle_samples/PaddleX/PP-YOLOE-L_vehicle/subgraph_8/weight_meta.py b/paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_6/shape_patches_PP-YOLOE-L_human/weight_meta.py similarity index 100% rename from paddle_samples/PaddleX/PP-YOLOE-L_vehicle/subgraph_8/weight_meta.py rename to paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_6/shape_patches_PP-YOLOE-L_human/weight_meta.py diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_13/input_meta.py b/paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_6/shape_patches_PP-YOLOE-S_human/input_meta.py similarity index 50% rename from paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_13/input_meta.py rename to paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_6/shape_patches_PP-YOLOE-S_human/input_meta.py index 59f05b8d6..201ee0397 100644 --- a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_13/input_meta.py +++ b/paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_6/shape_patches_PP-YOLOE-S_human/input_meta.py @@ -1,6 +1,6 @@ class Program_weight_tensor_data_0: name = "data_0" - shape = [1, 24276] + shape = [2, 8400] dtype = "bool" min_val = 0 max_val = 2 @@ -9,33 +9,32 @@ class Program_weight_tensor_data_0: class Program_weight_tensor_data_1: name = "data_1" - shape = [1, 24276, 4] + shape = [2, 8400, 4] dtype = "float32" - min_val = float("-8.51911") - max_val = float("141.539") - mean = float("59.1174") - std = float("39.293") + min_val = float("-8.72054") + max_val = float("86.5058") + mean = float("34.8123") + std = float("23.6174") data = None class Program_weight_tensor_data_2: name = "data_2" - shape = [1, 24276, 4] + shape = [2, 8400, 4] dtype = "float32" - min_val = float("11.9244") - max_val = float("131.375") - mean = float("50.6359") - std = float("16.0055") + max_val = float("80.0") + mean = float("34.7665") + std = float("25.0051") data = None class Program_weight_tensor_data_3: name = "data_3" - shape = [1, 24276, 10] + shape = [2, 8400, 1] dtype = "float32" - max_val = float("0.981539") - mean = float("0.000708745") - std = float("0.0222415") + max_val = float("0.911359") + mean = float("0.00967398") + std = float("0.0703979") data = None @@ -43,26 +42,26 @@ class Program_weight_tensor_data_4: name = "data_4" shape = [] dtype = "float32" - data = [172.055] + data = [162.523] class Program_weight_tensor_data_5: name = "data_5" - shape = [1, 24276, 88] + shape = [2, 8400, 68] dtype = "float32" - min_val = float("-3.36394") - max_val = float("13.3402") - mean = float("0.828078") - std = float("1.48853") + min_val = float("-7.01895") + max_val = float("14.5228") + mean = float("8.74382e-06") + std = float("1.67872") data = None class Program_weight_tensor_data_6: name = "data_6" - shape = [24276, 2] + shape = [8400, 2] dtype = "float32" min_val = float("0.5") - max_val = float("135.5") - mean = float("59.0952") - std = float("38.9487") + max_val = float("79.5") + mean = float("34.7619") + std = float("22.9098") data = None diff --git a/paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_16/weight_meta.py b/paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_6/shape_patches_PP-YOLOE-S_human/weight_meta.py similarity index 100% rename from paddle_samples/PaddleX/PP-YOLOE-S_human/subgraph_16/weight_meta.py rename to paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_6/shape_patches_PP-YOLOE-S_human/weight_meta.py diff --git a/paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_9/input_meta.py b/paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_6/shape_patches_PP-YOLOE-S_vehicle/input_meta.py similarity index 100% rename from paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_9/input_meta.py rename to paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_6/shape_patches_PP-YOLOE-S_vehicle/input_meta.py diff --git a/paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_11/weight_meta.py b/paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_6/shape_patches_PP-YOLOE-S_vehicle/weight_meta.py similarity index 100% rename from paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_11/weight_meta.py rename to paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_6/shape_patches_PP-YOLOE-S_vehicle/weight_meta.py diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_6/shape_patches_PP-YOLOE_plus-L/input_meta.py b/paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_6/shape_patches_PP-YOLOE_plus-L/input_meta.py new file mode 100644 index 000000000..d6af4f831 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_6/shape_patches_PP-YOLOE_plus-L/input_meta.py @@ -0,0 +1,67 @@ +class Program_weight_tensor_data_0: + name = "data_0" + shape = [8, 4116] + dtype = "bool" + min_val = 0 + max_val = 2 + data = None + + +class Program_weight_tensor_data_1: + name = "data_1" + shape = [8, 4116, 4] + dtype = "float32" + min_val = float("-9.93527") + max_val = float("63.9265") + mean = float("24.3499") + std = float("17.1828") + data = None + + +class Program_weight_tensor_data_2: + name = "data_2" + shape = [8, 4116, 4] + dtype = "float32" + max_val = float("55.2727") + mean = float("24.1964") + std = float("14.7865") + data = None + + +class Program_weight_tensor_data_3: + name = "data_3" + shape = [8, 4116, 4] + dtype = "float32" + max_val = float("0.947339") + mean = float("0.000280391") + std = float("0.0157366") + data = None + + +class Program_weight_tensor_data_4: + name = "data_4" + shape = [] + dtype = "float32" + data = [36.9308] + + +class Program_weight_tensor_data_5: + name = "data_5" + shape = [8, 4116, 68] + dtype = "float32" + min_val = float("-6.52177") + max_val = float("13.8275") + mean = float("2.55256e-05") + std = float("1.49822") + data = None + + +class Program_weight_tensor_data_6: + name = "data_6" + shape = [4116, 2] + dtype = "float32" + min_val = float("0.5") + max_val = float("55.5") + mean = float("24.3333") + std = float("16.0356") + data = None diff --git a/paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_12/weight_meta.py b/paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_6/shape_patches_PP-YOLOE_plus-L/weight_meta.py similarity index 100% rename from paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_12/weight_meta.py rename to paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_6/shape_patches_PP-YOLOE_plus-L/weight_meta.py diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_6/shape_patches_PP-YOLOE_plus-M/input_meta.py b/paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_6/shape_patches_PP-YOLOE_plus-M/input_meta.py new file mode 100644 index 000000000..42b68473c --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_6/shape_patches_PP-YOLOE_plus-M/input_meta.py @@ -0,0 +1,68 @@ +class Program_weight_tensor_data_0: + name = "data_0" + shape = [8, 10164] + dtype = "bool" + min_val = 0 + max_val = 2 + data = None + + +class Program_weight_tensor_data_1: + name = "data_1" + shape = [8, 10164, 4] + dtype = "float32" + min_val = float("-8.4998") + max_val = float("93.297") + mean = float("38.2803") + std = float("25.8039") + data = None + + +class Program_weight_tensor_data_2: + name = "data_2" + shape = [8, 10164, 4] + dtype = "float32" + min_val = float("2.52804") + max_val = float("79.2") + mean = float("34.6196") + std = float("18.1525") + data = None + + +class Program_weight_tensor_data_3: + name = "data_3" + shape = [8, 10164, 4] + dtype = "float32" + max_val = float("0.941962") + mean = float("0.000134148") + std = float("0.0107789") + data = None + + +class Program_weight_tensor_data_4: + name = "data_4" + shape = [] + dtype = "float32" + data = [43.6314] + + +class Program_weight_tensor_data_5: + name = "data_5" + shape = [8, 10164, 68] + dtype = "float32" + min_val = float("-7.06467") + max_val = float("15.2111") + mean = float("2.71498e-05") + std = float("1.60341") + data = None + + +class Program_weight_tensor_data_6: + name = "data_6" + shape = [10164, 2] + dtype = "float32" + min_val = float("0.5") + max_val = float("87.5") + mean = float("38.2381") + std = float("25.2012") + data = None diff --git a/paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_14/weight_meta.py b/paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_6/shape_patches_PP-YOLOE_plus-M/weight_meta.py similarity index 100% rename from paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_14/weight_meta.py rename to paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_6/shape_patches_PP-YOLOE_plus-M/weight_meta.py diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus-L/subgraph_12/input_meta.py b/paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_9/shape_patches_PP-YOLOE_plus-L/input_meta.py similarity index 100% rename from paddle_samples/PaddleX/PP-YOLOE_plus-L/subgraph_12/input_meta.py rename to paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_9/shape_patches_PP-YOLOE_plus-L/input_meta.py diff --git a/paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_10/weight_meta.py b/paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_9/shape_patches_PP-YOLOE_plus-L/weight_meta.py similarity index 100% rename from paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_10/weight_meta.py rename to paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_9/shape_patches_PP-YOLOE_plus-L/weight_meta.py diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus-M/subgraph_17/input_meta.py b/paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_9/shape_patches_PP-YOLOE_plus-M/input_meta.py similarity index 100% rename from paddle_samples/PaddleX/PP-YOLOE_plus-M/subgraph_17/input_meta.py rename to paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_9/shape_patches_PP-YOLOE_plus-M/input_meta.py diff --git a/paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_7/weight_meta.py b/paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_9/shape_patches_PP-YOLOE_plus-M/weight_meta.py similarity index 100% rename from paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_7/weight_meta.py rename to paddle_samples/PaddleX/PP-YOLOE_plus-S/subgraph_9/shape_patches_PP-YOLOE_plus-M/weight_meta.py diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/subgraph_1/graph_hash.txt b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/subgraph_1/graph_hash.txt index cf9cecf24..f33996b4a 100644 --- a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/subgraph_1/graph_hash.txt +++ b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/subgraph_1/graph_hash.txt @@ -1 +1 @@ -b570e084ecf7776a98a81e353b5f581d19f9ab243969ebfb5988dfda43a8a50f \ No newline at end of file +331d31c12329e180f8072f92c095e5fa3ed0d1dbf984c2e334eff6b5b3862c64 \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/subgraph_1/input_meta.py b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/subgraph_1/input_meta.py index 36ef80eba..8361f35e8 100644 --- a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/subgraph_1/input_meta.py +++ b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/subgraph_1/input_meta.py @@ -1,73 +1,84 @@ class Program_weight_tensor_data_0: name = "data_0" - shape = [] - dtype = "int64" - data = [21] + shape = [2, 3, 5376] + dtype = "float32" + max_val = float("1.0") + mean = float("0.00124008") + std = float("0.0351929") + data = None class Program_weight_tensor_data_1: name = "data_1" - shape = [] - dtype = "int64" - data = [21] + shape = [2, 1] + dtype = "int32" + data = [0, 1] class Program_weight_tensor_data_2: name = "data_2" - shape = [] - dtype = "int64" - data = [42] + shape = [2, 3, 1] + dtype = "int32" + data = [4, 3, 3, 3, 1, 0] class Program_weight_tensor_data_3: name = "data_3" - shape = [] - dtype = "int64" - data = [42] + shape = [2, 5376] + dtype = "float32" + max_val = float("1.0") + mean = float("0.00372024") + std = float("0.0608802") + data = None class Program_weight_tensor_data_4: name = "data_4" - shape = [] - dtype = "int64" - data = [84] + shape = [2, 3, 4] + dtype = "float32" + data = [ + 270.791, + 234.887, + 332.231, + 356.289, + 38.6844, + 240.165, + 99.3659, + 448.66, + 476.35, + 311.423, + 512.0, + 504.082, + 2.03175, + 161.292, + 9.34603, + 181.971, + 39.619, + 61.622, + 40.8381, + 69.0662, + 0.0, + 0.0, + 0.0, + 0.0, + ] class Program_weight_tensor_data_5: name = "data_5" - shape = [] - dtype = "int64" - data = [84] - - -class Program_weight_tensor_data_6: - name = "data_6" - shape = [2, 768, 21, 21] + shape = [2, 3, 5376] dtype = "float32" - min_val = float("-0.278465") - max_val = float("16.1843") - mean = float("0.147783") - std = float("0.640828") + max_val = float("0.00886879") + mean = float("8.72339e-07") + std = float("6.13309e-05") data = None -class Program_weight_tensor_data_7: - name = "data_7" - shape = [2, 384, 42, 42] - dtype = "float32" - min_val = float("-0.278465") - max_val = float("27.7299") - mean = float("0.204054") - std = float("0.900832") - data = None - - -class Program_weight_tensor_data_8: - name = "data_8" - shape = [2, 192, 84, 84] +class Program_weight_tensor_data_6: + name = "data_6" + shape = [2, 3, 5376] dtype = "float32" - min_val = float("-0.278465") - max_val = float("499.504") - mean = float("12.5639") - std = float("18.7273") + max_val = float("0.712006") + mean = float("0.00169782") + std = float("0.0189379") data = None diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/subgraph_1/model.py b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/subgraph_1/model.py index 84b06b995..c7c749882 100644 --- a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/subgraph_1/model.py +++ b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/subgraph_1/model.py @@ -5,1140 +5,188 @@ class GraphModule(paddle.nn.Layer): def __init__(self): super().__init__() - def forward( - self, - parameter_0, - parameter_1, - parameter_2, - parameter_3, - parameter_4, - parameter_5, - parameter_6, - parameter_7, - parameter_8, - parameter_9, - parameter_10, - parameter_11, - parameter_12, - parameter_13, - parameter_14, - parameter_15, - parameter_16, - parameter_17, - parameter_18, - parameter_19, - parameter_20, - parameter_21, - parameter_22, - parameter_23, - parameter_24, - parameter_25, - parameter_26, - parameter_27, - parameter_28, - parameter_29, - parameter_30, - parameter_31, - parameter_32, - parameter_33, - parameter_34, - parameter_35, - parameter_36, - parameter_37, - parameter_38, - parameter_39, - parameter_40, - parameter_41, - parameter_42, - parameter_43, - parameter_44, - parameter_45, - parameter_46, - parameter_47, - parameter_48, - parameter_49, - parameter_50, - parameter_51, - parameter_52, - parameter_53, - data_0, - data_1, - data_2, - data_3, - data_4, - data_5, - data_6, - data_7, - data_8, - ): + def forward(self, data_0, data_1, data_2, data_3, data_4, data_5, data_6): # pd_op.full: (1xi64) <- () full_0 = paddle._C_ops.full( - [1], float("0"), paddle.int64, paddle.core.CPUPlace() + [1], float("-2"), paddle.int64, paddle.core.CPUPlace() ) - # pd_op.full: (1xi64) <- () - full_1 = paddle._C_ops.full( - [1], float("1"), paddle.int64, paddle.core.CPUPlace() - ) - - # pd_op.arange: (-1xi64) <- (1xi64, xi64, 1xi64) - arange_0 = paddle.arange(full_0, data_1, full_1, dtype="int64") - del data_1 - - # pd_op.cast: (-1xf32) <- (-1xi64) - cast_0 = paddle._C_ops.cast(arange_0, paddle.float32) - del arange_0 + # pd_op.argmax: (2x5376xi64) <- (2x3x5376xf32, 1xi64) + argmax_0 = paddle._C_ops.argmax(data_0, full_0, False, False, paddle.int64) + del full_0 # pd_op.full: (1xf32) <- () - full_2 = paddle._C_ops.full( - [1], float("1"), paddle.float32, paddle.core.CPUPlace() + full_1 = paddle._C_ops.full( + [1], float("3"), paddle.float32, paddle.core.CPUPlace() ) - # pd_op.scale: (-1xf32) <- (-1xf32, 1xf32) - scale_0 = paddle._C_ops.scale(cast_0, full_2, float("0.5"), True) - del cast_0 + # pd_op.scale: (2x1xi32) <- (2x1xi32, 1xf32) + scale_0 = paddle._C_ops.scale(data_1, full_1, float("0"), True) + del data_1, full_1 - # pd_op.full: (1xf32) <- () - full_3 = paddle._C_ops.full( - [1], float("32"), paddle.float32, paddle.core.CPUPlace() - ) - - # pd_op.scale: (-1xf32) <- (-1xf32, 1xf32) - scale_1 = paddle._C_ops.scale(scale_0, full_3, float("0"), True) + # pd_op.cast: (2x1xi64) <- (2x1xi32) + cast_0 = paddle._C_ops.cast(scale_0, paddle.int64) del scale_0 - # pd_op.arange: (-1xi64) <- (1xi64, xi64, 1xi64) - arange_1 = paddle.arange(full_0, data_0, full_1, dtype="int64") - del data_0 - - # pd_op.cast: (-1xf32) <- (-1xi64) - cast_1 = paddle._C_ops.cast(arange_1, paddle.float32) - del arange_1 - - # pd_op.scale: (-1xf32) <- (-1xf32, 1xf32) - scale_2 = paddle._C_ops.scale(cast_1, full_2, float("0.5"), True) - del cast_1 - - # pd_op.scale: (-1xf32) <- (-1xf32, 1xf32) - scale_3 = paddle._C_ops.scale(scale_2, full_3, float("0"), True) - del scale_2 - - # builtin.combine: ([-1xf32, -1xf32]) <- (-1xf32, -1xf32) - combine_0 = [scale_3, scale_1] - del scale_1, scale_3 - - # pd_op.meshgrid: ([-1x-1xf32, -1x-1xf32]) <- ([-1xf32, -1xf32]) - meshgrid_0 = paddle._C_ops.meshgrid(combine_0) - del combine_0 - - # builtin.split: (-1x-1xf32, -1x-1xf32) <- ([-1x-1xf32, -1x-1xf32]) - ( - split_0, - split_1, - ) = meshgrid_0 - del meshgrid_0 - - # pd_op.scale: (-1x-1xf32) <- (-1x-1xf32, 1xf32) - scale_4 = paddle._C_ops.scale(split_1, full_2, float("-80"), True) - - # pd_op.scale: (-1x-1xf32) <- (-1x-1xf32, 1xf32) - scale_5 = paddle._C_ops.scale(split_0, full_2, float("-80"), True) - - # pd_op.scale: (-1x-1xf32) <- (-1x-1xf32, 1xf32) - scale_6 = paddle._C_ops.scale(split_1, full_2, float("80"), True) - - # pd_op.scale: (-1x-1xf32) <- (-1x-1xf32, 1xf32) - scale_7 = paddle._C_ops.scale(split_0, full_2, float("80"), True) - - # builtin.combine: ([-1x-1xf32, -1x-1xf32, -1x-1xf32, -1x-1xf32]) <- (-1x-1xf32, -1x-1xf32, -1x-1xf32, -1x-1xf32) - combine_1 = [scale_4, scale_5, scale_6, scale_7] - del scale_4, scale_5, scale_6, scale_7 - - # pd_op.stack: (-1x-1x4xf32) <- ([-1x-1xf32, -1x-1xf32, -1x-1xf32, -1x-1xf32]) - stack_0 = paddle._C_ops.stack(combine_1, -1) - del combine_1 - - # builtin.combine: ([-1x-1xf32, -1x-1xf32]) <- (-1x-1xf32, -1x-1xf32) - combine_2 = [split_1, split_0] - del split_0, split_1 - - # pd_op.stack: (-1x-1x2xf32) <- ([-1x-1xf32, -1x-1xf32]) - stack_1 = paddle._C_ops.stack(combine_2, -1) - del combine_2 - - # pd_op.full_int_array: (2xi64) <- () - full_int_array_0 = [-1, 4] - - # pd_op.reshape: (-1x4xf32) <- (-1x-1x4xf32, 2xi64) - reshape_0 = paddle._C_ops.reshape(stack_0, full_int_array_0) - del stack_0 + # pd_op.add: (2x5376xi64) <- (2x5376xi64, 2x1xi64) + add_0 = paddle._C_ops.add(argmax_0, cast_0) + del argmax_0, cast_0 - # pd_op.full_int_array: (2xi64) <- () - full_int_array_1 = [-1, 2] - - # pd_op.reshape: (-1x2xf32) <- (-1x-1x2xf32, 2xi64) - reshape_1 = paddle._C_ops.reshape(stack_1, full_int_array_1) - del stack_1 - - # pd_op.shape64: (2xi64) <- (-1x4xf32) - shape64_0 = paddle._C_ops.shape64(reshape_0) - - # pd_op.full_int_array: (1xi64) <- () - full_int_array_2 = [0] - - # pd_op.full_int_array: (1xi64) <- () - full_int_array_3 = [1] - - # pd_op.slice: (xi64) <- (2xi64, 1xi64, 1xi64) - slice_0 = paddle._C_ops.slice( - shape64_0, [0], full_int_array_2, full_int_array_3, [1], [0] - ) - del shape64_0 - - # pd_op.full: (xi64) <- () - full_4 = paddle._C_ops.full( - [], float("1"), paddle.int64, paddle.core.CPUPlace() - ) - - # builtin.combine: ([xi64, xi64]) <- (xi64, xi64) - combine_3 = [slice_0, full_4] - - # pd_op.stack: (2xi64) <- ([xi64, xi64]) - stack_2 = paddle._C_ops.stack(combine_3, 0) - del combine_3 - - # pd_op.full_with_tensor: (-1x1xf32) <- (1xf32, 2xi64) - full_with_tensor_0 = paddle._C_ops.full_with_tensor( - full_3, stack_2, paddle.float32 - ) - del full_3, stack_2 - - # pd_op.arange: (-1xi64) <- (1xi64, xi64, 1xi64) - arange_2 = paddle.arange(full_0, data_3, full_1, dtype="int64") - del data_3 - - # pd_op.cast: (-1xf32) <- (-1xi64) - cast_2 = paddle._C_ops.cast(arange_2, paddle.float32) - del arange_2 - - # pd_op.scale: (-1xf32) <- (-1xf32, 1xf32) - scale_8 = paddle._C_ops.scale(cast_2, full_2, float("0.5"), True) - del cast_2 - - # pd_op.full: (1xf32) <- () - full_5 = paddle._C_ops.full( - [1], float("16"), paddle.float32, paddle.core.CPUPlace() - ) - - # pd_op.scale: (-1xf32) <- (-1xf32, 1xf32) - scale_9 = paddle._C_ops.scale(scale_8, full_5, float("0"), True) - del scale_8 - - # pd_op.arange: (-1xi64) <- (1xi64, xi64, 1xi64) - arange_3 = paddle.arange(full_0, data_2, full_1, dtype="int64") + # pd_op.flatten: (6xi32) <- (2x3x1xi32) + flatten_0 = paddle._C_ops.flatten(data_2, 0, 2) del data_2 - # pd_op.cast: (-1xf32) <- (-1xi64) - cast_3 = paddle._C_ops.cast(arange_3, paddle.float32) - del arange_3 - - # pd_op.scale: (-1xf32) <- (-1xf32, 1xf32) - scale_10 = paddle._C_ops.scale(cast_3, full_2, float("0.5"), True) - del cast_3 - - # pd_op.scale: (-1xf32) <- (-1xf32, 1xf32) - scale_11 = paddle._C_ops.scale(scale_10, full_5, float("0"), True) - del scale_10 - - # builtin.combine: ([-1xf32, -1xf32]) <- (-1xf32, -1xf32) - combine_4 = [scale_11, scale_9] - del scale_11, scale_9 - - # pd_op.meshgrid: ([-1x-1xf32, -1x-1xf32]) <- ([-1xf32, -1xf32]) - meshgrid_1 = paddle._C_ops.meshgrid(combine_4) - del combine_4 - - # builtin.split: (-1x-1xf32, -1x-1xf32) <- ([-1x-1xf32, -1x-1xf32]) - ( - split_2, - split_3, - ) = meshgrid_1 - del meshgrid_1 - - # pd_op.scale: (-1x-1xf32) <- (-1x-1xf32, 1xf32) - scale_12 = paddle._C_ops.scale(split_3, full_2, float("-40"), True) - - # pd_op.scale: (-1x-1xf32) <- (-1x-1xf32, 1xf32) - scale_13 = paddle._C_ops.scale(split_2, full_2, float("-40"), True) - - # pd_op.scale: (-1x-1xf32) <- (-1x-1xf32, 1xf32) - scale_14 = paddle._C_ops.scale(split_3, full_2, float("40"), True) - - # pd_op.scale: (-1x-1xf32) <- (-1x-1xf32, 1xf32) - scale_15 = paddle._C_ops.scale(split_2, full_2, float("40"), True) - - # builtin.combine: ([-1x-1xf32, -1x-1xf32, -1x-1xf32, -1x-1xf32]) <- (-1x-1xf32, -1x-1xf32, -1x-1xf32, -1x-1xf32) - combine_5 = [scale_12, scale_13, scale_14, scale_15] - del scale_12, scale_13, scale_14, scale_15 - - # pd_op.stack: (-1x-1x4xf32) <- ([-1x-1xf32, -1x-1xf32, -1x-1xf32, -1x-1xf32]) - stack_3 = paddle._C_ops.stack(combine_5, -1) - del combine_5 - - # builtin.combine: ([-1x-1xf32, -1x-1xf32]) <- (-1x-1xf32, -1x-1xf32) - combine_6 = [split_3, split_2] - del split_2, split_3 - - # pd_op.stack: (-1x-1x2xf32) <- ([-1x-1xf32, -1x-1xf32]) - stack_4 = paddle._C_ops.stack(combine_6, -1) - del combine_6 - - # pd_op.reshape: (-1x4xf32) <- (-1x-1x4xf32, 2xi64) - reshape_2 = paddle._C_ops.reshape(stack_3, full_int_array_0) - del stack_3 - - # pd_op.reshape: (-1x2xf32) <- (-1x-1x2xf32, 2xi64) - reshape_3 = paddle._C_ops.reshape(stack_4, full_int_array_1) - del stack_4 - - # pd_op.shape64: (2xi64) <- (-1x4xf32) - shape64_1 = paddle._C_ops.shape64(reshape_2) - - # pd_op.slice: (xi64) <- (2xi64, 1xi64, 1xi64) - slice_1 = paddle._C_ops.slice( - shape64_1, [0], full_int_array_2, full_int_array_3, [1], [0] - ) - del shape64_1 - - # builtin.combine: ([xi64, xi64]) <- (xi64, xi64) - combine_7 = [slice_1, full_4] - - # pd_op.stack: (2xi64) <- ([xi64, xi64]) - stack_5 = paddle._C_ops.stack(combine_7, 0) - del combine_7 - - # pd_op.full_with_tensor: (-1x1xf32) <- (1xf32, 2xi64) - full_with_tensor_1 = paddle._C_ops.full_with_tensor( - full_5, stack_5, paddle.float32 - ) - del full_5, stack_5 - - # pd_op.arange: (-1xi64) <- (1xi64, xi64, 1xi64) - arange_4 = paddle.arange(full_0, data_5, full_1, dtype="int64") - del data_5 - - # pd_op.cast: (-1xf32) <- (-1xi64) - cast_4 = paddle._C_ops.cast(arange_4, paddle.float32) - del arange_4 - - # pd_op.scale: (-1xf32) <- (-1xf32, 1xf32) - scale_16 = paddle._C_ops.scale(cast_4, full_2, float("0.5"), True) - del cast_4 - - # pd_op.full: (1xf32) <- () - full_6 = paddle._C_ops.full( - [1], float("8"), paddle.float32, paddle.core.CPUPlace() - ) - - # pd_op.scale: (-1xf32) <- (-1xf32, 1xf32) - scale_17 = paddle._C_ops.scale(scale_16, full_6, float("0"), True) - del scale_16 - - # pd_op.arange: (-1xi64) <- (1xi64, xi64, 1xi64) - arange_5 = paddle.arange(full_0, data_4, full_1, dtype="int64") - del data_4, full_0, full_1 - - # pd_op.cast: (-1xf32) <- (-1xi64) - cast_5 = paddle._C_ops.cast(arange_5, paddle.float32) - del arange_5 - - # pd_op.scale: (-1xf32) <- (-1xf32, 1xf32) - scale_18 = paddle._C_ops.scale(cast_5, full_2, float("0.5"), True) - del cast_5 - - # pd_op.scale: (-1xf32) <- (-1xf32, 1xf32) - scale_19 = paddle._C_ops.scale(scale_18, full_6, float("0"), True) - del scale_18 - - # builtin.combine: ([-1xf32, -1xf32]) <- (-1xf32, -1xf32) - combine_8 = [scale_19, scale_17] - del scale_17, scale_19 - - # pd_op.meshgrid: ([-1x-1xf32, -1x-1xf32]) <- ([-1xf32, -1xf32]) - meshgrid_2 = paddle._C_ops.meshgrid(combine_8) - del combine_8 - - # builtin.split: (-1x-1xf32, -1x-1xf32) <- ([-1x-1xf32, -1x-1xf32]) - ( - split_4, - split_5, - ) = meshgrid_2 - del meshgrid_2 - - # pd_op.scale: (-1x-1xf32) <- (-1x-1xf32, 1xf32) - scale_20 = paddle._C_ops.scale(split_5, full_2, float("-20"), True) - - # pd_op.scale: (-1x-1xf32) <- (-1x-1xf32, 1xf32) - scale_21 = paddle._C_ops.scale(split_4, full_2, float("-20"), True) - - # pd_op.scale: (-1x-1xf32) <- (-1x-1xf32, 1xf32) - scale_22 = paddle._C_ops.scale(split_5, full_2, float("20"), True) - - # pd_op.scale: (-1x-1xf32) <- (-1x-1xf32, 1xf32) - scale_23 = paddle._C_ops.scale(split_4, full_2, float("20"), True) - del full_2 - - # builtin.combine: ([-1x-1xf32, -1x-1xf32, -1x-1xf32, -1x-1xf32]) <- (-1x-1xf32, -1x-1xf32, -1x-1xf32, -1x-1xf32) - combine_9 = [scale_20, scale_21, scale_22, scale_23] - del scale_20, scale_21, scale_22, scale_23 - - # pd_op.stack: (-1x-1x4xf32) <- ([-1x-1xf32, -1x-1xf32, -1x-1xf32, -1x-1xf32]) - stack_6 = paddle._C_ops.stack(combine_9, -1) - del combine_9 - - # builtin.combine: ([-1x-1xf32, -1x-1xf32]) <- (-1x-1xf32, -1x-1xf32) - combine_10 = [split_5, split_4] - del split_4, split_5 - - # pd_op.stack: (-1x-1x2xf32) <- ([-1x-1xf32, -1x-1xf32]) - stack_7 = paddle._C_ops.stack(combine_10, -1) - del combine_10 - - # pd_op.reshape: (-1x4xf32) <- (-1x-1x4xf32, 2xi64) - reshape_4 = paddle._C_ops.reshape(stack_6, full_int_array_0) - del full_int_array_0, stack_6 - - # pd_op.reshape: (-1x2xf32) <- (-1x-1x2xf32, 2xi64) - reshape_5 = paddle._C_ops.reshape(stack_7, full_int_array_1) - del full_int_array_1, stack_7 - - # pd_op.shape64: (2xi64) <- (-1x4xf32) - shape64_2 = paddle._C_ops.shape64(reshape_4) - - # pd_op.slice: (xi64) <- (2xi64, 1xi64, 1xi64) - slice_2 = paddle._C_ops.slice( - shape64_2, [0], full_int_array_2, full_int_array_3, [1], [0] - ) - del full_int_array_2, full_int_array_3, shape64_2 - - # builtin.combine: ([xi64, xi64]) <- (xi64, xi64) - combine_11 = [slice_2, full_4] - del full_4 - - # pd_op.stack: (2xi64) <- ([xi64, xi64]) - stack_8 = paddle._C_ops.stack(combine_11, 0) - del combine_11 - - # pd_op.full_with_tensor: (-1x1xf32) <- (1xf32, 2xi64) - full_with_tensor_2 = paddle._C_ops.full_with_tensor( - full_6, stack_8, paddle.float32 - ) - del full_6, stack_8 + # pd_op.flatten: (10752xi64) <- (2x5376xi64) + flatten_1 = paddle._C_ops.flatten(add_0, 0, 1) + del add_0 # pd_op.full: (1xi32) <- () - full_7 = paddle._C_ops.full( + full_2 = paddle._C_ops.full( [1], float("0"), paddle.int32, paddle.core.CPUPlace() ) - # builtin.combine: ([-1x4xf32, -1x4xf32, -1x4xf32]) <- (-1x4xf32, -1x4xf32, -1x4xf32) - combine_12 = [reshape_0, reshape_2, reshape_4] - del reshape_0, reshape_2, reshape_4 - - # pd_op.concat: (-1x4xf32) <- ([-1x4xf32, -1x4xf32, -1x4xf32], 1xi32) - concat_2 = paddle._C_ops.concat(combine_12, full_7) - del combine_12 - - # builtin.combine: ([-1x2xf32, -1x2xf32, -1x2xf32]) <- (-1x2xf32, -1x2xf32, -1x2xf32) - combine_13 = [reshape_1, reshape_3, reshape_5] - del reshape_1, reshape_3, reshape_5 - - # pd_op.concat: (-1x2xf32) <- ([-1x2xf32, -1x2xf32, -1x2xf32], 1xi32) - concat_3 = paddle._C_ops.concat(combine_13, full_7) - del combine_13 - - # builtin.combine: ([-1x1xf32, -1x1xf32, -1x1xf32]) <- (-1x1xf32, -1x1xf32, -1x1xf32) - combine_14 = [full_with_tensor_0, full_with_tensor_1, full_with_tensor_2] - del full_with_tensor_0, full_with_tensor_1, full_with_tensor_2 - - # pd_op.concat: (-1x1xf32) <- ([-1x1xf32, -1x1xf32, -1x1xf32], 1xi32) - concat_4 = paddle._C_ops.concat(combine_14, full_7) - del combine_14, full_7 - - # pd_op.full_int_array: (2xi64) <- () - full_int_array_4 = [1, 1] - - # pd_op.assign: (2xi64) <- (2xi64) - assign_0 = full_int_array_4 - - # pd_op.assign: (2xi64) <- (2xi64) - assign_1 = full_int_array_4 - - # pd_op.pool2d: (2x768x1x1xf32) <- (2x768x-1x-1xf32, 2xi64) - pool2d_0 = paddle._C_ops.pool2d( - data_6, - full_int_array_4, - [1, 1], - [0, 0], - False, - True, - "NCHW", - "avg", - False, - True, - "EXPLICIT", - ) - - # pd_op.conv2d: (2x768x1x1xf32) <- (2x768x1x1xf32, 768x768x1x1xf32) - conv2d_0 = paddle._C_ops.conv2d( - pool2d_0, parameter_53, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_53 - - # pd_op.full_int_array: (4xi64) <- () - full_int_array_5 = [1, -1, 1, 1] - - # pd_op.reshape: (1x768x1x1xf32) <- (768xf32, 4xi64) - reshape_6 = paddle._C_ops.reshape(parameter_52, full_int_array_5) - del parameter_52 - - # pd_op.add: (2x768x1x1xf32) <- (2x768x1x1xf32, 1x768x1x1xf32) - add_0 = paddle._C_ops.add(conv2d_0, reshape_6) - - # pd_op.sigmoid: (2x768x1x1xf32) <- (2x768x1x1xf32) - sigmoid_0 = paddle._C_ops.sigmoid(add_0) - del add_0 - - # pd_op.multiply: (2x768x-1x-1xf32) <- (2x768x-1x-1xf32, 2x768x1x1xf32) - multiply_0 = paddle._C_ops.multiply(data_6, sigmoid_0) - - # pd_op.conv2d: (2x768x-1x-1xf32) <- (2x768x-1x-1xf32, 768x768x1x1xf32) - conv2d_1 = paddle._C_ops.conv2d( - multiply_0, parameter_51, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_51 - - # pd_op.batch_norm_: (2x768x-1x-1xf32, 768xf32, 768xf32, 768xf32, 768xf32, -1xui8) <- (2x768x-1x-1xf32, 768xf32, 768xf32, 768xf32, 768xf32) - ( - batch_norm__0, - batch_norm__1, - batch_norm__2, - batch_norm__3, - batch_norm__4, - batch_norm__5, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_1, - parameter_50, - parameter_49, - parameter_48, - parameter_47, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_47, parameter_48, parameter_49, parameter_50 - - # pd_op.swish: (2x768x-1x-1xf32) <- (2x768x-1x-1xf32) - swish_0 = paddle._C_ops.swish(batch_norm__0) - - # pd_op.add: (2x768x-1x-1xf32) <- (2x768x-1x-1xf32, 2x768x-1x-1xf32) - add_1 = paddle._C_ops.add(swish_0, data_6) - - # pd_op.conv2d: (2x10x-1x-1xf32) <- (2x768x-1x-1xf32, 10x768x3x3xf32) - conv2d_2 = paddle._C_ops.conv2d( - add_1, parameter_46, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_46 - - # pd_op.reshape: (1x10x1x1xf32) <- (10xf32, 4xi64) - reshape_7 = paddle._C_ops.reshape(parameter_45, full_int_array_5) - del parameter_45 - - # pd_op.add: (2x10x-1x-1xf32) <- (2x10x-1x-1xf32, 1x10x1x1xf32) - add_2 = paddle._C_ops.add(conv2d_2, reshape_7) - - # pd_op.conv2d: (2x768x1x1xf32) <- (2x768x1x1xf32, 768x768x1x1xf32) - conv2d_3 = paddle._C_ops.conv2d( - pool2d_0, parameter_44, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_44 - - # pd_op.reshape: (1x768x1x1xf32) <- (768xf32, 4xi64) - reshape_8 = paddle._C_ops.reshape(parameter_43, full_int_array_5) - del parameter_43 - - # pd_op.add: (2x768x1x1xf32) <- (2x768x1x1xf32, 1x768x1x1xf32) - add_3 = paddle._C_ops.add(conv2d_3, reshape_8) - - # pd_op.sigmoid: (2x768x1x1xf32) <- (2x768x1x1xf32) - sigmoid_1 = paddle._C_ops.sigmoid(add_3) - del add_3 - - # pd_op.multiply: (2x768x-1x-1xf32) <- (2x768x-1x-1xf32, 2x768x1x1xf32) - multiply_1 = paddle._C_ops.multiply(data_6, sigmoid_1) - del data_6 - - # pd_op.conv2d: (2x768x-1x-1xf32) <- (2x768x-1x-1xf32, 768x768x1x1xf32) - conv2d_4 = paddle._C_ops.conv2d( - multiply_1, parameter_42, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_42 - - # pd_op.batch_norm_: (2x768x-1x-1xf32, 768xf32, 768xf32, 768xf32, 768xf32, -1xui8) <- (2x768x-1x-1xf32, 768xf32, 768xf32, 768xf32, 768xf32) - ( - batch_norm__6, - batch_norm__7, - batch_norm__8, - batch_norm__9, - batch_norm__10, - batch_norm__11, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_4, - parameter_41, - parameter_40, - parameter_39, - parameter_38, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_38, parameter_39, parameter_40, parameter_41 - - # pd_op.swish: (2x768x-1x-1xf32) <- (2x768x-1x-1xf32) - swish_1 = paddle._C_ops.swish(batch_norm__6) - - # pd_op.conv2d: (2x40x-1x-1xf32) <- (2x768x-1x-1xf32, 40x768x3x3xf32) - conv2d_5 = paddle._C_ops.conv2d( - swish_1, parameter_37, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_37 - - # pd_op.reshape: (1x40x1x1xf32) <- (40xf32, 4xi64) - reshape_9 = paddle._C_ops.reshape(parameter_36, full_int_array_5) - del parameter_36 - - # pd_op.add: (2x40x-1x-1xf32) <- (2x40x-1x-1xf32, 1x40x1x1xf32) - add_4 = paddle._C_ops.add(conv2d_5, reshape_9) - - # pd_op.sigmoid: (2x10x-1x-1xf32) <- (2x10x-1x-1xf32) - sigmoid_2 = paddle._C_ops.sigmoid(add_2) - del add_2 - - # pd_op.flatten: (2x10x-1xf32) <- (2x10x-1x-1xf32) - flatten_0 = paddle._C_ops.flatten(sigmoid_2, 2, 3) - - # pd_op.transpose: (2x-1x10xf32) <- (2x10x-1xf32) - transpose_0 = paddle._C_ops.transpose(flatten_0, [0, 2, 1]) + # pd_op.gather: (10752xi32) <- (6xi32, 10752xi64, 1xi32) + gather_0 = paddle._C_ops.gather(flatten_0, flatten_1, full_2) del flatten_0 - # pd_op.flatten: (2x40x-1xf32) <- (2x40x-1x-1xf32) - flatten_1 = paddle._C_ops.flatten(add_4, 2, 3) - - # pd_op.transpose: (2x-1x40xf32) <- (2x40x-1xf32) - transpose_1 = paddle._C_ops.transpose(flatten_1, [0, 2, 1]) - del flatten_1 - - # pd_op.pool2d: (2x384x1x1xf32) <- (2x384x-1x-1xf32, 2xi64) - pool2d_1 = paddle._C_ops.pool2d( - data_7, - full_int_array_4, - [1, 1], - [0, 0], - False, - True, - "NCHW", - "avg", - False, - True, - "EXPLICIT", - ) - - # pd_op.conv2d: (2x384x1x1xf32) <- (2x384x1x1xf32, 384x384x1x1xf32) - conv2d_6 = paddle._C_ops.conv2d( - pool2d_1, parameter_35, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_35 - - # pd_op.reshape: (1x384x1x1xf32) <- (384xf32, 4xi64) - reshape_10 = paddle._C_ops.reshape(parameter_34, full_int_array_5) - del parameter_34 - - # pd_op.add: (2x384x1x1xf32) <- (2x384x1x1xf32, 1x384x1x1xf32) - add_5 = paddle._C_ops.add(conv2d_6, reshape_10) - - # pd_op.sigmoid: (2x384x1x1xf32) <- (2x384x1x1xf32) - sigmoid_3 = paddle._C_ops.sigmoid(add_5) - del add_5 - - # pd_op.multiply: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32, 2x384x1x1xf32) - multiply_2 = paddle._C_ops.multiply(data_7, sigmoid_3) - - # pd_op.conv2d: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32, 384x384x1x1xf32) - conv2d_7 = paddle._C_ops.conv2d( - multiply_2, parameter_33, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_33 - - # pd_op.batch_norm_: (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) - ( - batch_norm__12, - batch_norm__13, - batch_norm__14, - batch_norm__15, - batch_norm__16, - batch_norm__17, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_7, - parameter_32, - parameter_31, - parameter_30, - parameter_29, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_29, parameter_30, parameter_31, parameter_32 - - # pd_op.swish: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32) - swish_2 = paddle._C_ops.swish(batch_norm__12) - - # pd_op.add: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32, 2x384x-1x-1xf32) - add_6 = paddle._C_ops.add(swish_2, data_7) - - # pd_op.conv2d: (2x10x-1x-1xf32) <- (2x384x-1x-1xf32, 10x384x3x3xf32) - conv2d_8 = paddle._C_ops.conv2d( - add_6, parameter_28, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_28 - - # pd_op.reshape: (1x10x1x1xf32) <- (10xf32, 4xi64) - reshape_11 = paddle._C_ops.reshape(parameter_27, full_int_array_5) - del parameter_27 - - # pd_op.add: (2x10x-1x-1xf32) <- (2x10x-1x-1xf32, 1x10x1x1xf32) - add_7 = paddle._C_ops.add(conv2d_8, reshape_11) - - # pd_op.conv2d: (2x384x1x1xf32) <- (2x384x1x1xf32, 384x384x1x1xf32) - conv2d_9 = paddle._C_ops.conv2d( - pool2d_1, parameter_26, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_26 - - # pd_op.reshape: (1x384x1x1xf32) <- (384xf32, 4xi64) - reshape_12 = paddle._C_ops.reshape(parameter_25, full_int_array_5) - del parameter_25 - - # pd_op.add: (2x384x1x1xf32) <- (2x384x1x1xf32, 1x384x1x1xf32) - add_8 = paddle._C_ops.add(conv2d_9, reshape_12) - - # pd_op.sigmoid: (2x384x1x1xf32) <- (2x384x1x1xf32) - sigmoid_4 = paddle._C_ops.sigmoid(add_8) - del add_8 - - # pd_op.multiply: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32, 2x384x1x1xf32) - multiply_3 = paddle._C_ops.multiply(data_7, sigmoid_4) - del data_7 - - # pd_op.conv2d: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32, 384x384x1x1xf32) - conv2d_10 = paddle._C_ops.conv2d( - multiply_3, parameter_24, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_24 - - # pd_op.batch_norm_: (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) - ( - batch_norm__18, - batch_norm__19, - batch_norm__20, - batch_norm__21, - batch_norm__22, - batch_norm__23, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_10, - parameter_23, - parameter_22, - parameter_21, - parameter_20, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_20, parameter_21, parameter_22, parameter_23 + # pd_op.full_int_array: (2xi64) <- () + full_int_array_0 = [2, 5376] - # pd_op.swish: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32) - swish_3 = paddle._C_ops.swish(batch_norm__18) + # pd_op.reshape: (2x5376xi32) <- (10752xi32, 2xi64) + reshape_1 = paddle._C_ops.reshape(gather_0, full_int_array_0) + del full_int_array_0, gather_0 - # pd_op.conv2d: (2x40x-1x-1xf32) <- (2x384x-1x-1xf32, 40x384x3x3xf32) - conv2d_11 = paddle._C_ops.conv2d( - swish_3, parameter_19, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + # pd_op.full: (xf32) <- () + full_3 = paddle._C_ops.full( + [], float("0"), paddle.float32, paddle.framework._current_expected_place() ) - del parameter_19 - - # pd_op.reshape: (1x40x1x1xf32) <- (40xf32, 4xi64) - reshape_13 = paddle._C_ops.reshape(parameter_18, full_int_array_5) - del parameter_18 - - # pd_op.add: (2x40x-1x-1xf32) <- (2x40x-1x-1xf32, 1x40x1x1xf32) - add_9 = paddle._C_ops.add(conv2d_11, reshape_13) - - # pd_op.sigmoid: (2x10x-1x-1xf32) <- (2x10x-1x-1xf32) - sigmoid_5 = paddle._C_ops.sigmoid(add_7) - del add_7 - # pd_op.flatten: (2x10x-1xf32) <- (2x10x-1x-1xf32) - flatten_2 = paddle._C_ops.flatten(sigmoid_5, 2, 3) + # pd_op.greater_than: (2x5376xb) <- (2x5376xf32, xf32) + greater_than_0 = paddle._C_ops.greater_than(data_3, full_3) + del data_3, full_3 - # pd_op.transpose: (2x-1x10xf32) <- (2x10x-1xf32) - transpose_2 = paddle._C_ops.transpose(flatten_2, [0, 2, 1]) - del flatten_2 - - # pd_op.flatten: (2x40x-1xf32) <- (2x40x-1x-1xf32) - flatten_3 = paddle._C_ops.flatten(add_9, 2, 3) - - # pd_op.transpose: (2x-1x40xf32) <- (2x40x-1xf32) - transpose_3 = paddle._C_ops.transpose(flatten_3, [0, 2, 1]) - del flatten_3 - - # pd_op.pool2d: (2x192x1x1xf32) <- (2x192x-1x-1xf32, 2xi64) - pool2d_2 = paddle._C_ops.pool2d( - data_8, - full_int_array_4, - [1, 1], - [0, 0], - False, - True, - "NCHW", - "avg", - False, - True, - "EXPLICIT", + # pd_op.full: (1xf32) <- () + full_4 = paddle._C_ops.full( + [1], float("10"), paddle.float32, paddle.core.CPUPlace() ) - # pd_op.conv2d: (2x192x1x1xf32) <- (2x192x1x1xf32, 192x192x1x1xf32) - conv2d_12 = paddle._C_ops.conv2d( - pool2d_2, parameter_17, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + # pd_op.full_like: (2x5376xi32) <- (2x5376xi32, 1xf32) + full_like_0 = paddle._C_ops.full_like( + reshape_1, full_4, paddle.int32, paddle.framework._current_expected_place() ) - del parameter_17 - - # pd_op.reshape: (1x192x1x1xf32) <- (192xf32, 4xi64) - reshape_14 = paddle._C_ops.reshape(parameter_16, full_int_array_5) - del parameter_16 - - # pd_op.add: (2x192x1x1xf32) <- (2x192x1x1xf32, 1x192x1x1xf32) - add_10 = paddle._C_ops.add(conv2d_12, reshape_14) + del full_4 - # pd_op.sigmoid: (2x192x1x1xf32) <- (2x192x1x1xf32) - sigmoid_6 = paddle._C_ops.sigmoid(add_10) - del add_10 + # pd_op.where: (2x5376xi32) <- (2x5376xb, 2x5376xi32, 2x5376xi32) + where_0 = paddle._C_ops.where(greater_than_0, reshape_1, full_like_0) + del full_like_0, greater_than_0, reshape_1 - # pd_op.multiply: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 2x192x1x1xf32) - multiply_4 = paddle._C_ops.multiply(data_8, sigmoid_6) + # pd_op.full_int_array: (2xi64) <- () + full_int_array_1 = [-1, 4] - # pd_op.conv2d: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 192x192x1x1xf32) - conv2d_13 = paddle._C_ops.conv2d( - multiply_4, parameter_15, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_15 + # pd_op.reshape: (6x4xf32) <- (2x3x4xf32, 2xi64) + reshape_2 = paddle._C_ops.reshape(data_4, full_int_array_1) + del data_4, full_int_array_1 - # pd_op.batch_norm_: (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) - ( - batch_norm__24, - batch_norm__25, - batch_norm__26, - batch_norm__27, - batch_norm__28, - batch_norm__29, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_13, - parameter_14, - parameter_13, - parameter_12, - parameter_11, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_11, parameter_12, parameter_13, parameter_14 + # pd_op.gather: (10752x4xf32) <- (6x4xf32, 10752xi64, 1xi32) + gather_1 = paddle._C_ops.gather(reshape_2, flatten_1, full_2) + del flatten_1, full_2, reshape_2 - # pd_op.swish: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32) - swish_4 = paddle._C_ops.swish(batch_norm__24) + # pd_op.full_int_array: (3xi64) <- () + full_int_array_2 = [2, 5376, 4] - # pd_op.add: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 2x192x-1x-1xf32) - add_11 = paddle._C_ops.add(swish_4, data_8) + # pd_op.reshape: (2x5376x4xf32) <- (10752x4xf32, 3xi64) + reshape_0 = paddle._C_ops.reshape(gather_1, full_int_array_2) + del full_int_array_2, gather_1 - # pd_op.conv2d: (2x10x-1x-1xf32) <- (2x192x-1x-1xf32, 10x192x3x3xf32) - conv2d_14 = paddle._C_ops.conv2d( - add_11, parameter_10, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + # pd_op.full: (1xi32) <- () + full_5 = paddle._C_ops.full( + [1], float("11"), paddle.int32, paddle.core.CPUPlace() ) - del parameter_10 - - # pd_op.reshape: (1x10x1x1xf32) <- (10xf32, 4xi64) - reshape_15 = paddle._C_ops.reshape(parameter_9, full_int_array_5) - del parameter_9 - # pd_op.add: (2x10x-1x-1xf32) <- (2x10x-1x-1xf32, 1x10x1x1xf32) - add_12 = paddle._C_ops.add(conv2d_14, reshape_15) - - # pd_op.conv2d: (2x192x1x1xf32) <- (2x192x1x1xf32, 192x192x1x1xf32) - conv2d_15 = paddle._C_ops.conv2d( - pool2d_2, parameter_8, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + # pd_op.one_hot: (2x5376x11xf32) <- (2x5376xi32, 1xi32) + one_hot_0 = paddle._C_ops.one_hot( + where_0 % paddle.cast(full_5, where_0.dtype), full_5 ) - del parameter_8 - - # pd_op.reshape: (1x192x1x1xf32) <- (192xf32, 4xi64) - reshape_16 = paddle._C_ops.reshape(parameter_7, full_int_array_5) - del parameter_7 + del full_5 - # pd_op.add: (2x192x1x1xf32) <- (2x192x1x1xf32, 1x192x1x1xf32) - add_13 = paddle._C_ops.add(conv2d_15, reshape_16) - - # pd_op.sigmoid: (2x192x1x1xf32) <- (2x192x1x1xf32) - sigmoid_7 = paddle._C_ops.sigmoid(add_13) - del add_13 + # pd_op.full: (10xi64) <- () + full_6 = paddle._C_ops.full( + [10], float("0"), paddle.int64, paddle.framework._current_expected_place() + ) + + # pd_op.assign_value_: (10xi64) <- (10xi64) + assign_value__0 = paddle._C_ops.assign_value_( + full_6, + [10], + paddle.int64, + [ + float("0"), + float("1"), + float("2"), + float("3"), + float("4"), + float("5"), + float("6"), + float("7"), + float("8"), + float("9"), + ], + paddle.framework._current_expected_place(), + ) + del full_6 + + # pd_op.index_select: (2x5376x10xf32) <- (2x5376x11xf32, 10xi64) + index_select_0 = paddle._C_ops.index_select(one_hot_0, assign_value__0, -1) + del assign_value__0, one_hot_0 + + # pd_op.multiply: (2x3x5376xf32) <- (2x3x5376xf32, 2x3x5376xf32) + multiply_1 = paddle._C_ops.multiply(data_5, data_0) + del data_5 - # pd_op.multiply: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 2x192x1x1xf32) - multiply_5 = paddle._C_ops.multiply(data_8, sigmoid_7) - del data_8 + # pd_op.full_int_array: (1xi64) <- () + full_int_array_3 = [-1] - # pd_op.conv2d: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 192x192x1x1xf32) - conv2d_16 = paddle._C_ops.conv2d( - multiply_5, parameter_6, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_6 + # pd_op.max: (2x3x1xf32) <- (2x3x5376xf32, 1xi64) + max_0 = paddle._C_ops.max(multiply_1, full_int_array_3, True) - # pd_op.batch_norm_: (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) - ( - batch_norm__30, - batch_norm__31, - batch_norm__32, - batch_norm__33, - batch_norm__34, - batch_norm__35, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_16, - parameter_5, - parameter_4, - parameter_3, - parameter_2, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_2, parameter_3, parameter_4, parameter_5 + # pd_op.multiply: (2x3x5376xf32) <- (2x3x5376xf32, 2x3x5376xf32) + multiply_2 = paddle._C_ops.multiply(data_6, data_0) + del data_0, data_6 - # pd_op.swish: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32) - swish_5 = paddle._C_ops.swish(batch_norm__30) + # pd_op.max: (2x3x1xf32) <- (2x3x5376xf32, 1xi64) + max_1 = paddle._C_ops.max(multiply_2, full_int_array_3, True) + del multiply_2 - # pd_op.conv2d: (2x40x-1x-1xf32) <- (2x192x-1x-1xf32, 40x192x3x3xf32) - conv2d_17 = paddle._C_ops.conv2d( - swish_5, parameter_1, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + # pd_op.full: (1xf32) <- () + full_7 = paddle._C_ops.full( + [1], float("1"), paddle.float32, paddle.core.CPUPlace() ) - del parameter_1 - - # pd_op.reshape: (1x40x1x1xf32) <- (40xf32, 4xi64) - reshape_17 = paddle._C_ops.reshape(parameter_0, full_int_array_5) - del full_int_array_5, parameter_0 - - # pd_op.add: (2x40x-1x-1xf32) <- (2x40x-1x-1xf32, 1x40x1x1xf32) - add_14 = paddle._C_ops.add(conv2d_17, reshape_17) - - # pd_op.sigmoid: (2x10x-1x-1xf32) <- (2x10x-1x-1xf32) - sigmoid_8 = paddle._C_ops.sigmoid(add_12) - del add_12 - # pd_op.flatten: (2x10x-1xf32) <- (2x10x-1x-1xf32) - flatten_4 = paddle._C_ops.flatten(sigmoid_8, 2, 3) + # pd_op.scale: (2x3x1xf32) <- (2x3x1xf32, 1xf32) + scale_1 = paddle._C_ops.scale(max_0, full_7, float("1e-09"), True) + del full_7, max_0 - # pd_op.transpose: (2x-1x10xf32) <- (2x10x-1xf32) - transpose_4 = paddle._C_ops.transpose(flatten_4, [0, 2, 1]) - del flatten_4 + # pd_op.divide: (2x3x5376xf32) <- (2x3x5376xf32, 2x3x1xf32) + divide_0 = paddle._C_ops.divide(multiply_1, scale_1) + del multiply_1, scale_1 - # pd_op.flatten: (2x40x-1xf32) <- (2x40x-1x-1xf32) - flatten_5 = paddle._C_ops.flatten(add_14, 2, 3) + # pd_op.multiply: (2x3x5376xf32) <- (2x3x5376xf32, 2x3x1xf32) + multiply_3 = paddle._C_ops.multiply(divide_0, max_1) + del divide_0, max_1 - # pd_op.transpose: (2x-1x40xf32) <- (2x40x-1xf32) - transpose_5 = paddle._C_ops.transpose(flatten_5, [0, 2, 1]) - del flatten_5 - - # pd_op.full: (1xi32) <- () - full_8 = paddle._C_ops.full( - [1], float("1"), paddle.int32, paddle.core.CPUPlace() - ) - - # pd_op.assign: (1xi32) <- (1xi32) - assign_2 = full_8 - - # builtin.combine: ([2x-1x10xf32, 2x-1x10xf32, 2x-1x10xf32]) <- (2x-1x10xf32, 2x-1x10xf32, 2x-1x10xf32) - combine_15 = [transpose_0, transpose_2, transpose_4] + # pd_op.full_int_array: (1xi64) <- () + full_int_array_4 = [-2] - # pd_op.concat: (2x-1x10xf32) <- ([2x-1x10xf32, 2x-1x10xf32, 2x-1x10xf32], 1xi32) - concat_0 = paddle._C_ops.concat(combine_15, full_8) - del combine_15 + # pd_op.max: (2x5376xf32) <- (2x3x5376xf32, 1xi64) + max_2 = paddle._C_ops.max(multiply_3, full_int_array_4, False) + del full_int_array_4, multiply_3 - # builtin.combine: ([2x-1x40xf32, 2x-1x40xf32, 2x-1x40xf32]) <- (2x-1x40xf32, 2x-1x40xf32, 2x-1x40xf32) - combine_16 = [transpose_1, transpose_3, transpose_5] + # pd_op.unsqueeze: (2x5376x1xf32) <- (2x5376xf32, 1xi64) + unsqueeze_0 = paddle._C_ops.unsqueeze(max_2, full_int_array_3) + del full_int_array_3, max_2 - # pd_op.concat: (2x-1x40xf32) <- ([2x-1x40xf32, 2x-1x40xf32, 2x-1x40xf32], 1xi32) - concat_1 = paddle._C_ops.concat(combine_16, full_8) - del ( - add_1, - add_11, - add_14, - add_4, - add_6, - add_9, - assign_0, - assign_1, - assign_2, - batch_norm__0, - batch_norm__1, - batch_norm__10, - batch_norm__11, - batch_norm__12, - batch_norm__13, - batch_norm__14, - batch_norm__15, - batch_norm__16, - batch_norm__17, - batch_norm__18, - batch_norm__19, - batch_norm__2, - batch_norm__20, - batch_norm__21, - batch_norm__22, - batch_norm__23, - batch_norm__24, - batch_norm__25, - batch_norm__26, - batch_norm__27, - batch_norm__28, - batch_norm__29, - batch_norm__3, - batch_norm__30, - batch_norm__31, - batch_norm__32, - batch_norm__33, - batch_norm__34, - batch_norm__35, - batch_norm__4, - batch_norm__5, - batch_norm__6, - batch_norm__7, - batch_norm__8, - batch_norm__9, - combine_16, - conv2d_0, - conv2d_1, - conv2d_10, - conv2d_11, - conv2d_12, - conv2d_13, - conv2d_14, - conv2d_15, - conv2d_16, - conv2d_17, - conv2d_2, - conv2d_3, - conv2d_4, - conv2d_5, - conv2d_6, - conv2d_7, - conv2d_8, - conv2d_9, - full_8, - full_int_array_4, - multiply_0, - multiply_1, - multiply_2, - multiply_3, - multiply_4, - multiply_5, - pool2d_0, - pool2d_1, - pool2d_2, - reshape_10, - reshape_11, - reshape_12, - reshape_13, - reshape_14, - reshape_15, - reshape_16, - reshape_17, - reshape_6, - reshape_7, - reshape_8, - reshape_9, - sigmoid_0, - sigmoid_1, - sigmoid_2, - sigmoid_3, - sigmoid_4, - sigmoid_5, - sigmoid_6, - sigmoid_7, - sigmoid_8, - slice_0, - slice_1, - slice_2, - swish_0, - swish_1, - swish_2, - swish_3, - swish_4, - swish_5, - transpose_0, - transpose_1, - transpose_2, - transpose_3, - transpose_4, - transpose_5, - ) + # pd_op.multiply: (2x5376x10xf32) <- (2x5376x10xf32, 2x5376x1xf32) + multiply_0 = paddle._C_ops.multiply(index_select_0, unsqueeze_0) + del index_select_0, unsqueeze_0, where_0 - return concat_0, concat_1, concat_2, concat_3, concat_4 + return reshape_0, multiply_0 diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/subgraph_1/weight_meta.py b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/subgraph_1/weight_meta.py index 7ebfc8bcf..8b1378917 100644 --- a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/subgraph_1/weight_meta.py +++ b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/subgraph_1/weight_meta.py @@ -1,580 +1 @@ -class Program_weight_tensor_parameter_0: - name = "parameter_0" - shape = [40] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - -class Program_weight_tensor_parameter_1: - name = "parameter_1" - shape = [40, 192, 3, 3] - dtype = "float32" - min_val = float("-0.200034") - max_val = float("0.204555") - mean = float("1.49957e-08") - std = float("0.0116846") - data = None - - -class Program_weight_tensor_parameter_2: - name = "parameter_2" - shape = [192] - dtype = "float32" - min_val = float("-0.049655") - max_val = float("0.233417") - mean = float("0.0551577") - std = float("0.0442585") - data = None - - -class Program_weight_tensor_parameter_3: - name = "parameter_3" - shape = [192] - dtype = "float32" - min_val = float("0.836511") - max_val = float("1.62756") - mean = float("1.22136") - std = float("0.145365") - data = None - - -class Program_weight_tensor_parameter_4: - name = "parameter_4" - shape = [192] - dtype = "float32" - min_val = float("0.00534518") - max_val = float("5.22517") - mean = float("0.466305") - std = float("0.726268") - data = None - - -class Program_weight_tensor_parameter_5: - name = "parameter_5" - shape = [192] - dtype = "float32" - min_val = float("-8.55821") - max_val = float("10.0832") - mean = float("0.104165") - std = float("2.83033") - data = None - - -class Program_weight_tensor_parameter_6: - name = "parameter_6" - shape = [192, 192, 1, 1] - dtype = "float32" - min_val = float("-0.101586") - max_val = float("0.138874") - mean = float("-0.000767801") - std = float("0.0120829") - data = None - - -class Program_weight_tensor_parameter_7: - name = "parameter_7" - shape = [192] - dtype = "float32" - min_val = float("-0.00860419") - max_val = float("0.0156884") - mean = float("-0.000140933") - std = float("0.00406783") - data = None - - -class Program_weight_tensor_parameter_8: - name = "parameter_8" - shape = [192, 192, 1, 1] - dtype = "float32" - min_val = float("-0.0102778") - max_val = float("0.0182908") - mean = float("-0.000259715") - std = float("0.00201379") - data = None - - -class Program_weight_tensor_parameter_9: - name = "parameter_9" - shape = [10] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_10: - name = "parameter_10" - shape = [10, 192, 3, 3] - dtype = "float32" - min_val = float("-1151.38") - max_val = float("133.782") - mean = float("-21.9826") - std = float("92.2655") - data = None - - -class Program_weight_tensor_parameter_11: - name = "parameter_11" - shape = [192] - dtype = "float32" - min_val = float("-83.505") - max_val = float("85.9246") - mean = float("2.58206") - std = float("26.6954") - data = None - - -class Program_weight_tensor_parameter_12: - name = "parameter_12" - shape = [192] - dtype = "float32" - min_val = float("-14.1722") - max_val = float("24.9711") - mean = float("-0.553052") - std = float("5.93472") - data = None - - -class Program_weight_tensor_parameter_13: - name = "parameter_13" - shape = [192] - dtype = "float32" - min_val = float("2.32866") - max_val = float("16618600.0") - mean = float("525947.0") - std = float("1853320.0") - data = None - - -class Program_weight_tensor_parameter_14: - name = "parameter_14" - shape = [192] - dtype = "float32" - min_val = float("-12504.7") - max_val = float("7911.06") - mean = float("-492.832") - std = float("2441.54") - data = None - - -class Program_weight_tensor_parameter_15: - name = "parameter_15" - shape = [192, 192, 1, 1] - dtype = "float32" - min_val = float("-138.688") - max_val = float("100.601") - mean = float("-0.0990701") - std = float("4.16445") - data = None - - -class Program_weight_tensor_parameter_16: - name = "parameter_16" - shape = [192] - dtype = "float32" - min_val = float("-10.9738") - max_val = float("7.09472") - mean = float("-0.172259") - std = float("1.7704") - data = None - - -class Program_weight_tensor_parameter_17: - name = "parameter_17" - shape = [192, 192, 1, 1] - dtype = "float32" - min_val = float("-21.7725") - max_val = float("14.0694") - mean = float("-0.0616887") - std = float("0.957234") - data = None - - -class Program_weight_tensor_parameter_18: - name = "parameter_18" - shape = [40] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_19: - name = "parameter_19" - shape = [40, 384, 3, 3] - dtype = "float32" - min_val = float("-0.124724") - max_val = float("0.129603") - mean = float("3.40515e-09") - std = float("0.00677564") - data = None - - -class Program_weight_tensor_parameter_20: - name = "parameter_20" - shape = [384] - dtype = "float32" - min_val = float("-0.00274099") - max_val = float("0.100798") - mean = float("0.03273") - std = float("0.0175032") - data = None - - -class Program_weight_tensor_parameter_21: - name = "parameter_21" - shape = [384] - dtype = "float32" - min_val = float("0.999002") - max_val = float("1.24047") - mean = float("1.10664") - std = float("0.0410217") - data = None - - -class Program_weight_tensor_parameter_22: - name = "parameter_22" - shape = [384] - dtype = "float32" - min_val = float("0.00338575") - max_val = float("0.497954") - mean = float("0.0471292") - std = float("0.05553") - data = None - - -class Program_weight_tensor_parameter_23: - name = "parameter_23" - shape = [384] - dtype = "float32" - min_val = float("-0.202557") - max_val = float("0.162712") - mean = float("-0.0211337") - std = float("0.0479677") - data = None - - -class Program_weight_tensor_parameter_24: - name = "parameter_24" - shape = [384, 384, 1, 1] - dtype = "float32" - min_val = float("-0.0592913") - max_val = float("0.0683296") - mean = float("-0.000516701") - std = float("0.00403998") - data = None - - -class Program_weight_tensor_parameter_25: - name = "parameter_25" - shape = [384] - dtype = "float32" - min_val = float("-0.00282189") - max_val = float("0.00800835") - mean = float("4.07712e-05") - std = float("0.00164155") - data = None - - -class Program_weight_tensor_parameter_26: - name = "parameter_26" - shape = [384, 384, 1, 1] - dtype = "float32" - min_val = float("-0.00191414") - max_val = float("0.00571394") - mean = float("-3.68985e-05") - std = float("0.00061666") - data = None - - -class Program_weight_tensor_parameter_27: - name = "parameter_27" - shape = [10] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_28: - name = "parameter_28" - shape = [10, 384, 3, 3] - dtype = "float32" - min_val = float("-4.35504") - max_val = float("0.450249") - mean = float("-0.170375") - std = float("0.297706") - data = None - - -class Program_weight_tensor_parameter_29: - name = "parameter_29" - shape = [384] - dtype = "float32" - min_val = float("-0.129636") - max_val = float("0.537228") - mean = float("0.252657") - std = float("0.116675") - data = None - - -class Program_weight_tensor_parameter_30: - name = "parameter_30" - shape = [384] - dtype = "float32" - min_val = float("0.994039") - max_val = float("1.41192") - mean = float("1.16982") - std = float("0.0589143") - data = None - - -class Program_weight_tensor_parameter_31: - name = "parameter_31" - shape = [384] - dtype = "float32" - min_val = float("0.189816") - max_val = float("625.421") - mean = float("13.2176") - std = float("39.326") - data = None - - -class Program_weight_tensor_parameter_32: - name = "parameter_32" - shape = [384] - dtype = "float32" - min_val = float("-6.46967") - max_val = float("2.53183") - mean = float("-0.259474") - std = float("0.862378") - data = None - - -class Program_weight_tensor_parameter_33: - name = "parameter_33" - shape = [384, 384, 1, 1] - dtype = "float32" - min_val = float("-0.372426") - max_val = float("0.933267") - mean = float("-0.00500754") - std = float("0.034042") - data = None - - -class Program_weight_tensor_parameter_34: - name = "parameter_34" - shape = [384] - dtype = "float32" - min_val = float("-0.168065") - max_val = float("0.0324648") - mean = float("0.000330264") - std = float("0.0172051") - data = None - - -class Program_weight_tensor_parameter_35: - name = "parameter_35" - shape = [384, 384, 1, 1] - dtype = "float32" - min_val = float("-0.137078") - max_val = float("0.0270712") - mean = float("-0.000467459") - std = float("0.00704374") - data = None - - -class Program_weight_tensor_parameter_36: - name = "parameter_36" - shape = [40] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_37: - name = "parameter_37" - shape = [40, 768, 3, 3] - dtype = "float32" - min_val = float("-0.0663824") - max_val = float("0.0420303") - mean = float("4.07454e-10") - std = float("0.00430679") - data = None - - -class Program_weight_tensor_parameter_38: - name = "parameter_38" - shape = [768] - dtype = "float32" - min_val = float("-0.0211256") - max_val = float("0.0557602") - mean = float("0.00994946") - std = float("0.0117441") - data = None - - -class Program_weight_tensor_parameter_39: - name = "parameter_39" - shape = [768] - dtype = "float32" - min_val = float("1.0067") - max_val = float("1.19979") - mean = float("1.06442") - std = float("0.0224675") - data = None - - -class Program_weight_tensor_parameter_40: - name = "parameter_40" - shape = [768] - dtype = "float32" - min_val = float("0.00543727") - max_val = float("4.44191") - mean = float("0.147009") - std = float("0.218322") - data = None - - -class Program_weight_tensor_parameter_41: - name = "parameter_41" - shape = [768] - dtype = "float32" - min_val = float("-0.239306") - max_val = float("0.308633") - mean = float("-0.0116159") - std = float("0.0711555") - data = None - - -class Program_weight_tensor_parameter_42: - name = "parameter_42" - shape = [768, 768, 1, 1] - dtype = "float32" - min_val = float("-0.0315101") - max_val = float("0.0300214") - mean = float("-0.00021905") - std = float("0.00273367") - data = None - - -class Program_weight_tensor_parameter_43: - name = "parameter_43" - shape = [768] - dtype = "float32" - min_val = float("-0.00399512") - max_val = float("0.00318684") - mean = float("6.30584e-05") - std = float("0.00082816") - data = None - - -class Program_weight_tensor_parameter_44: - name = "parameter_44" - shape = [768, 768, 1, 1] - dtype = "float32" - min_val = float("-0.00263886") - max_val = float("0.00246544") - mean = float("4.12622e-06") - std = float("0.000248146") - data = None - - -class Program_weight_tensor_parameter_45: - name = "parameter_45" - shape = [10] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_46: - name = "parameter_46" - shape = [10, 768, 3, 3] - dtype = "float32" - min_val = float("-3.66048") - max_val = float("0.4728") - mean = float("-0.108391") - std = float("0.234778") - data = None - - -class Program_weight_tensor_parameter_47: - name = "parameter_47" - shape = [768] - dtype = "float32" - min_val = float("-0.093079") - max_val = float("0.260714") - mean = float("0.112569") - std = float("0.0586991") - data = None - - -class Program_weight_tensor_parameter_48: - name = "parameter_48" - shape = [768] - dtype = "float32" - min_val = float("0.977893") - max_val = float("1.22745") - mean = float("1.06571") - std = float("0.0280202") - data = None - - -class Program_weight_tensor_parameter_49: - name = "parameter_49" - shape = [768] - dtype = "float32" - min_val = float("0.0580549") - max_val = float("178.552") - mean = float("7.98546") - std = float("16.2805") - data = None - - -class Program_weight_tensor_parameter_50: - name = "parameter_50" - shape = [768] - dtype = "float32" - min_val = float("-3.133") - max_val = float("2.19298") - mean = float("-0.123927") - std = float("0.472723") - data = None - - -class Program_weight_tensor_parameter_51: - name = "parameter_51" - shape = [768, 768, 1, 1] - dtype = "float32" - min_val = float("-0.352391") - max_val = float("0.641069") - mean = float("-0.00191347") - std = float("0.0277056") - data = None - - -class Program_weight_tensor_parameter_52: - name = "parameter_52" - shape = [768] - dtype = "float32" - min_val = float("-0.0409679") - max_val = float("0.024778") - mean = float("-2.53984e-05") - std = float("0.00726461") - data = None - - -class Program_weight_tensor_parameter_53: - name = "parameter_53" - shape = [768, 768, 1, 1] - dtype = "float32" - min_val = float("-0.0172154") - max_val = float("0.0204702") - mean = float("-6.95149e-05") - std = float("0.00176879") - data = None diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/subgraph_10/graph_hash.txt b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/subgraph_10/graph_hash.txt deleted file mode 100644 index 896fa94fd..000000000 --- a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/subgraph_10/graph_hash.txt +++ /dev/null @@ -1 +0,0 @@ -66b8a266cc256b5299b432a3f1cf5582a5f9a5321ba1505da3c19b3bc24f5624 \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/subgraph_10/input_meta.py b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/subgraph_10/input_meta.py deleted file mode 100644 index 3c170c64e..000000000 --- a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/subgraph_10/input_meta.py +++ /dev/null @@ -1,38 +0,0 @@ -class Program_weight_tensor_data_0: - name = "data_0" - shape = [1, 8400, 4] - dtype = "float32" - min_val = float("0.00768409") - max_val = float("2.99242") - mean = float("1.93544") - std = float("0.747544") - data = None - - -class Program_weight_tensor_data_1: - name = "data_1" - shape = [8400, 2] - dtype = "float32" - min_val = float("0.5") - max_val = float("79.5") - mean = float("34.7619") - std = float("22.9098") - data = None - - -class Program_weight_tensor_data_2: - name = "data_2" - shape = [8400, 1] - dtype = "float32" - min_val = float("8.0") - max_val = float("32.0") - mean = float("10.6667") - std = float("5.70157") - data = None - - -class Program_weight_tensor_data_3: - name = "data_3" - shape = [1, 2] - dtype = "float32" - data = [0.836601, 0.470588] diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/subgraph_10/model.py b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/subgraph_10/model.py deleted file mode 100644 index 5d4c5e86c..000000000 --- a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/subgraph_10/model.py +++ /dev/null @@ -1,94 +0,0 @@ -import paddle - - -class GraphModule(paddle.nn.Layer): - def __init__(self): - super().__init__() - - def forward(self, data_0, data_1, data_2, data_3): - # pd_op.full: (1xi32) <- () - full_0 = paddle._C_ops.full( - [1], float("2"), paddle.int32, paddle.core.CPUPlace() - ) - - # pd_op.split_with_num: ([1x8400x2xf32, 1x8400x2xf32]) <- (1x8400x4xf32, 1xi32) - split_with_num_0 = paddle._C_ops.split_with_num(data_0, 2, full_0) - del data_0, full_0 - - # builtin.split: (1x8400x2xf32, 1x8400x2xf32) <- ([1x8400x2xf32, 1x8400x2xf32]) - ( - split_0, - split_1, - ) = split_with_num_0 - del split_with_num_0 - - # pd_op.full: (1xf32) <- () - full_1 = paddle._C_ops.full( - [1], float("-1"), paddle.float32, paddle.core.CPUPlace() - ) - - # pd_op.scale: (1x8400x2xf32) <- (1x8400x2xf32, 1xf32) - scale_0 = paddle._C_ops.scale(split_0, full_1, float("0"), True) - del full_1, split_0 - - # pd_op.add: (1x8400x2xf32) <- (1x8400x2xf32, 8400x2xf32) - add_0 = paddle._C_ops.add(scale_0, data_1) - del scale_0 - - # pd_op.add: (1x8400x2xf32) <- (1x8400x2xf32, 8400x2xf32) - add_1 = paddle._C_ops.add(split_1, data_1) - del data_1, split_1 - - # pd_op.full: (1xi32) <- () - full_2 = paddle._C_ops.full( - [1], float("-1"), paddle.int32, paddle.core.CPUPlace() - ) - - # builtin.combine: ([1x8400x2xf32, 1x8400x2xf32]) <- (1x8400x2xf32, 1x8400x2xf32) - combine_0 = [add_0, add_1] - del add_0, add_1 - - # pd_op.concat: (1x8400x4xf32) <- ([1x8400x2xf32, 1x8400x2xf32], 1xi32) - concat_0 = paddle._C_ops.concat(combine_0, full_2) - del combine_0 - - # pd_op.multiply: (1x8400x4xf32) <- (1x8400x4xf32, 8400x1xf32) - multiply_0 = paddle._C_ops.multiply(concat_0, data_2) - del concat_0, data_2 - - # pd_op.full: (1xi32) <- () - full_3 = paddle._C_ops.full( - [1], float("1"), paddle.int32, paddle.core.CPUPlace() - ) - - # pd_op.split_with_num: ([1x1xf32, 1x1xf32]) <- (1x2xf32, 1xi32) - split_with_num_1 = paddle._C_ops.split_with_num(data_3, 2, full_3) - del data_3, full_3 - - # builtin.split: (1x1xf32, 1x1xf32) <- ([1x1xf32, 1x1xf32]) - ( - split_2, - split_3, - ) = split_with_num_1 - del split_with_num_1 - - # builtin.combine: ([1x1xf32, 1x1xf32, 1x1xf32, 1x1xf32]) <- (1x1xf32, 1x1xf32, 1x1xf32, 1x1xf32) - combine_1 = [split_3, split_2, split_3, split_2] - del split_2, split_3 - - # pd_op.concat: (1x4xf32) <- ([1x1xf32, 1x1xf32, 1x1xf32, 1x1xf32], 1xi32) - concat_1 = paddle._C_ops.concat(combine_1, full_2) - del combine_1, full_2 - - # pd_op.full_int_array: (3xi64) <- () - full_int_array_0 = [-1, 1, 4] - - # pd_op.reshape: (1x1x4xf32) <- (1x4xf32, 3xi64) - reshape_0 = paddle._C_ops.reshape(concat_1, full_int_array_0) - del concat_1, full_int_array_0 - - # pd_op.divide: (1x8400x4xf32) <- (1x8400x4xf32, 1x1x4xf32) - divide_0 = paddle._C_ops.divide(multiply_0, reshape_0) - del multiply_0, reshape_0 - - return divide_0 diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/subgraph_10/weight_meta.py b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/subgraph_10/weight_meta.py deleted file mode 100644 index 8b1378917..000000000 --- a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/subgraph_10/weight_meta.py +++ /dev/null @@ -1 +0,0 @@ - diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/subgraph_11/graph_hash.txt b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/subgraph_11/graph_hash.txt deleted file mode 100644 index d7d509a2f..000000000 --- a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/subgraph_11/graph_hash.txt +++ /dev/null @@ -1 +0,0 @@ -2474d5c0140e3ca8b342671eaefb285e6ee8c6c96b775962f701e40c7ee40211 \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/subgraph_11/graph_net.json b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/subgraph_11/graph_net.json deleted file mode 100644 index 8b4fccfd1..000000000 --- a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/subgraph_11/graph_net.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "framework": "paddle", - "model_name": "PP-YOLOE_plus_SOD-L", - "num_devices_required": 1, - "num_nodes_required": 1 -} \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/subgraph_11/input_meta.py b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/subgraph_11/input_meta.py deleted file mode 100644 index f4ab533fd..000000000 --- a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/subgraph_11/input_meta.py +++ /dev/null @@ -1,64 +0,0 @@ -class Program_weight_tensor_data_0: - name = "data_0" - shape = [2, 3549] - dtype = "float32" - max_val = float("26.0") - mean = float("0.0874894") - std = float("0.880638") - data = None - - -class Program_weight_tensor_data_1: - name = "data_1" - shape = [2, 49, 3549] - dtype = "float32" - max_val = float("0.980323") - mean = float("0.000678688") - std = float("0.0200074") - data = None - - -class Program_weight_tensor_data_2: - name = "data_2" - shape = [2, 49, 3549] - dtype = "float32" - max_val = float("1.0") - mean = float("0.0017855") - std = float("0.0422174") - data = None - - -class Program_weight_tensor_data_3: - name = "data_3" - shape = [2, 1] - dtype = "int32" - data = [0, 1] - - -class Program_weight_tensor_data_4: - name = "data_4" - shape = [2, 49, 1] - dtype = "int32" - min_val = 0 - max_val = 8 - data = None - - -class Program_weight_tensor_data_5: - name = "data_5" - shape = [2, 49, 4] - dtype = "float32" - max_val = float("408.482") - mean = float("110.196") - std = float("133.414") - data = None - - -class Program_weight_tensor_data_6: - name = "data_6" - shape = [2, 49, 3549] - dtype = "float32" - max_val = float("0.795764") - mean = float("4.47944e-05") - std = float("0.00452556") - data = None diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/subgraph_11/model.py b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/subgraph_11/model.py deleted file mode 100644 index 2cc272861..000000000 --- a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/subgraph_11/model.py +++ /dev/null @@ -1,244 +0,0 @@ -import paddle - - -class GraphModule(paddle.nn.Layer): - def __init__(self): - super().__init__() - - def forward(self, data_0, data_1, data_2, data_3, data_4, data_5, data_6): - # pd_op.full_int_array: (1xi64) <- () - full_int_array_0 = [1] - - # pd_op.unsqueeze: (2x1x3549xf32) <- (2x3549xf32, 1xi64) - unsqueeze_0 = paddle._C_ops.unsqueeze(data_0, full_int_array_0) - del data_0, full_int_array_0 - - # pd_op.full: (xf32) <- () - full_0 = paddle._C_ops.full( - [], float("1"), paddle.float32, paddle.framework._current_expected_place() - ) - - # pd_op.greater_than: (2x1x3549xb) <- (2x1x3549xf32, xf32) - greater_than_0 = paddle._C_ops.greater_than(unsqueeze_0, full_0) - del full_0, unsqueeze_0 - - # pd_op.full_int_array: (3xi64) <- () - full_int_array_1 = [1, 49, 1] - - # pd_op.tile: (2x49x3549xb) <- (2x1x3549xb, 3xi64) - tile_0 = paddle._C_ops.tile(greater_than_0, full_int_array_1) - del full_int_array_1, greater_than_0 - - # pd_op.multiply: (2x49x3549xf32) <- (2x49x3549xf32, 2x49x3549xf32) - multiply_1 = paddle._C_ops.multiply(data_1, data_2) - - # pd_op.full: (1xi64) <- () - full_1 = paddle._C_ops.full( - [1], float("-2"), paddle.int64, paddle.core.CPUPlace() - ) - - # pd_op.argmax: (2x3549xi64) <- (2x49x3549xf32, 1xi64) - argmax_0 = paddle._C_ops.argmax(multiply_1, full_1, False, False, paddle.int64) - del multiply_1 - - # pd_op.full: (1xi32) <- () - full_2 = paddle._C_ops.full( - [1], float("49"), paddle.int32, paddle.core.CPUPlace() - ) - - # pd_op.one_hot: (2x3549x49xf32) <- (2x3549xi64, 1xi32) - one_hot_0 = paddle._C_ops.one_hot( - argmax_0 % paddle.cast(full_2, argmax_0.dtype), full_2 - ) - del argmax_0, full_2 - - # pd_op.transpose: (2x49x3549xf32) <- (2x3549x49xf32) - transpose_0 = paddle._C_ops.transpose(one_hot_0, [0, 2, 1]) - del one_hot_0 - - # pd_op.where: (2x49x3549xf32) <- (2x49x3549xb, 2x49x3549xf32, 2x49x3549xf32) - where_0 = paddle._C_ops.where(tile_0, transpose_0, data_2) - del data_2, tile_0, transpose_0 - - # pd_op.full_int_array: (1xi64) <- () - full_int_array_2 = [-2] - - # pd_op.sum: (2x3549xf32) <- (2x49x3549xf32, 1xi64) - sum_0 = paddle._C_ops.sum(where_0, full_int_array_2, None, False) - - # pd_op.argmax: (2x3549xi64) <- (2x49x3549xf32, 1xi64) - argmax_1 = paddle._C_ops.argmax(where_0, full_1, False, False, paddle.int64) - del full_1 - - # pd_op.full: (1xf32) <- () - full_3 = paddle._C_ops.full( - [1], float("49"), paddle.float32, paddle.core.CPUPlace() - ) - - # pd_op.scale: (2x1xi32) <- (2x1xi32, 1xf32) - scale_0 = paddle._C_ops.scale(data_3, full_3, float("0"), True) - del data_3, full_3 - - # pd_op.cast: (2x1xi64) <- (2x1xi32) - cast_0 = paddle._C_ops.cast(scale_0, paddle.int64) - del scale_0 - - # pd_op.add: (2x3549xi64) <- (2x3549xi64, 2x1xi64) - add_0 = paddle._C_ops.add(argmax_1, cast_0) - del argmax_1, cast_0 - - # pd_op.flatten: (98xi32) <- (2x49x1xi32) - flatten_0 = paddle._C_ops.flatten(data_4, 0, 2) - del data_4 - - # pd_op.flatten: (7098xi64) <- (2x3549xi64) - flatten_1 = paddle._C_ops.flatten(add_0, 0, 1) - del add_0 - - # pd_op.full: (1xi32) <- () - full_4 = paddle._C_ops.full( - [1], float("0"), paddle.int32, paddle.core.CPUPlace() - ) - - # pd_op.gather: (7098xi32) <- (98xi32, 7098xi64, 1xi32) - gather_0 = paddle._C_ops.gather(flatten_0, flatten_1, full_4) - del flatten_0 - - # pd_op.full_int_array: (2xi64) <- () - full_int_array_3 = [2, 3549] - - # pd_op.reshape: (2x3549xi32) <- (7098xi32, 2xi64) - reshape_1 = paddle._C_ops.reshape(gather_0, full_int_array_3) - del full_int_array_3, gather_0 - - # pd_op.full: (xf32) <- () - full_5 = paddle._C_ops.full( - [], float("0"), paddle.float32, paddle.framework._current_expected_place() - ) - - # pd_op.greater_than: (2x3549xb) <- (2x3549xf32, xf32) - greater_than_1 = paddle._C_ops.greater_than(sum_0, full_5) - del full_5, sum_0 - - # pd_op.full: (1xf32) <- () - full_6 = paddle._C_ops.full( - [1], float("10"), paddle.float32, paddle.core.CPUPlace() - ) - - # pd_op.full_like: (2x3549xi32) <- (2x3549xi32, 1xf32) - full_like_0 = paddle._C_ops.full_like( - reshape_1, full_6, paddle.int32, paddle.framework._current_expected_place() - ) - del full_6 - - # pd_op.where: (2x3549xi32) <- (2x3549xb, 2x3549xi32, 2x3549xi32) - where_1 = paddle._C_ops.where(greater_than_1, reshape_1, full_like_0) - del full_like_0, greater_than_1, reshape_1 - - # pd_op.full_int_array: (2xi64) <- () - full_int_array_4 = [-1, 4] - - # pd_op.reshape: (98x4xf32) <- (2x49x4xf32, 2xi64) - reshape_2 = paddle._C_ops.reshape(data_5, full_int_array_4) - del data_5, full_int_array_4 - - # pd_op.gather: (7098x4xf32) <- (98x4xf32, 7098xi64, 1xi32) - gather_1 = paddle._C_ops.gather(reshape_2, flatten_1, full_4) - del flatten_1, full_4, reshape_2 - - # pd_op.full_int_array: (3xi64) <- () - full_int_array_5 = [2, 3549, 4] - - # pd_op.reshape: (2x3549x4xf32) <- (7098x4xf32, 3xi64) - reshape_0 = paddle._C_ops.reshape(gather_1, full_int_array_5) - del full_int_array_5, gather_1 - - # pd_op.full: (1xi32) <- () - full_7 = paddle._C_ops.full( - [1], float("11"), paddle.int32, paddle.core.CPUPlace() - ) - - # pd_op.one_hot: (2x3549x11xf32) <- (2x3549xi32, 1xi32) - one_hot_1 = paddle._C_ops.one_hot( - where_1 % paddle.cast(full_7, where_1.dtype), full_7 - ) - del full_7 - - # pd_op.full: (10xi64) <- () - full_8 = paddle._C_ops.full( - [10], float("0"), paddle.int64, paddle.framework._current_expected_place() - ) - - # pd_op.assign_value_: (10xi64) <- (10xi64) - assign_value__0 = paddle._C_ops.assign_value_( - full_8, - [10], - paddle.int64, - [ - float("0"), - float("1"), - float("2"), - float("3"), - float("4"), - float("5"), - float("6"), - float("7"), - float("8"), - float("9"), - ], - paddle.framework._current_expected_place(), - ) - del full_8 - - # pd_op.index_select: (2x3549x10xf32) <- (2x3549x11xf32, 10xi64) - index_select_0 = paddle._C_ops.index_select(one_hot_1, assign_value__0, -1) - del assign_value__0, one_hot_1 - - # pd_op.multiply: (2x49x3549xf32) <- (2x49x3549xf32, 2x49x3549xf32) - multiply_2 = paddle._C_ops.multiply(data_6, where_0) - del data_6 - - # pd_op.full_int_array: (1xi64) <- () - full_int_array_6 = [-1] - - # pd_op.max: (2x49x1xf32) <- (2x49x3549xf32, 1xi64) - max_0 = paddle._C_ops.max(multiply_2, full_int_array_6, True) - - # pd_op.multiply: (2x49x3549xf32) <- (2x49x3549xf32, 2x49x3549xf32) - multiply_3 = paddle._C_ops.multiply(data_1, where_0) - del data_1, where_0 - - # pd_op.max: (2x49x1xf32) <- (2x49x3549xf32, 1xi64) - max_1 = paddle._C_ops.max(multiply_3, full_int_array_6, True) - del multiply_3 - - # pd_op.full: (1xf32) <- () - full_9 = paddle._C_ops.full( - [1], float("1"), paddle.float32, paddle.core.CPUPlace() - ) - - # pd_op.scale: (2x49x1xf32) <- (2x49x1xf32, 1xf32) - scale_1 = paddle._C_ops.scale(max_0, full_9, float("1e-09"), True) - del full_9, max_0 - - # pd_op.divide: (2x49x3549xf32) <- (2x49x3549xf32, 2x49x1xf32) - divide_0 = paddle._C_ops.divide(multiply_2, scale_1) - del multiply_2, scale_1 - - # pd_op.multiply: (2x49x3549xf32) <- (2x49x3549xf32, 2x49x1xf32) - multiply_4 = paddle._C_ops.multiply(divide_0, max_1) - del divide_0, max_1 - - # pd_op.max: (2x3549xf32) <- (2x49x3549xf32, 1xi64) - max_2 = paddle._C_ops.max(multiply_4, full_int_array_2, False) - del full_int_array_2, multiply_4 - - # pd_op.unsqueeze: (2x3549x1xf32) <- (2x3549xf32, 1xi64) - unsqueeze_1 = paddle._C_ops.unsqueeze(max_2, full_int_array_6) - del full_int_array_6, max_2 - - # pd_op.multiply: (2x3549x10xf32) <- (2x3549x10xf32, 2x3549x1xf32) - multiply_0 = paddle._C_ops.multiply(index_select_0, unsqueeze_1) - del index_select_0, unsqueeze_1, where_1 - - return reshape_0, multiply_0 diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/subgraph_11/weight_meta.py b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/subgraph_11/weight_meta.py deleted file mode 100644 index 8b1378917..000000000 --- a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/subgraph_11/weight_meta.py +++ /dev/null @@ -1 +0,0 @@ - diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/subgraph_12/graph_hash.txt b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/subgraph_12/graph_hash.txt deleted file mode 100644 index ef13ddd7e..000000000 --- a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/subgraph_12/graph_hash.txt +++ /dev/null @@ -1 +0,0 @@ -5a0efce50442936c174003f6338fd05f5366aa6cf630787a35d4c3d9fd30bc22 \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/subgraph_12/graph_net.json b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/subgraph_12/graph_net.json deleted file mode 100644 index 8b4fccfd1..000000000 --- a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/subgraph_12/graph_net.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "framework": "paddle", - "model_name": "PP-YOLOE_plus_SOD-L", - "num_devices_required": 1, - "num_nodes_required": 1 -} \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/subgraph_12/input_meta.py b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/subgraph_12/input_meta.py deleted file mode 100644 index 38a68a034..000000000 --- a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/subgraph_12/input_meta.py +++ /dev/null @@ -1,222 +0,0 @@ -class Program_weight_tensor_data_0: - name = "data_0" - shape = [1] - dtype = "float32" - data = [0.699884] - - -class Program_weight_tensor_data_1: - name = "data_1" - shape = [1] - dtype = "float32" - data = [0.667963] - - -class Program_weight_tensor_data_2: - name = "data_2" - shape = [1] - dtype = "float32" - data = [0.675792] - - -class Program_weight_tensor_data_3: - name = "data_3" - shape = [1] - dtype = "float32" - data = [0.676071] - - -class Program_weight_tensor_data_4: - name = "data_4" - shape = [1] - dtype = "float32" - data = [0.658719] - - -class Program_weight_tensor_data_5: - name = "data_5" - shape = [1] - dtype = "float32" - data = [0.620637] - - -class Program_weight_tensor_data_6: - name = "data_6" - shape = [1] - dtype = "float32" - data = [0.637685] - - -class Program_weight_tensor_data_7: - name = "data_7" - shape = [1] - dtype = "float32" - data = [0.619238] - - -class Program_weight_tensor_data_8: - name = "data_8" - shape = [1] - dtype = "float32" - data = [0.773168] - - -class Program_weight_tensor_data_9: - name = "data_9" - shape = [1] - dtype = "float32" - data = [0.635316] - - -class Program_weight_tensor_data_10: - name = "data_10" - shape = [1] - dtype = "float32" - data = [0.623672] - - -class Program_weight_tensor_data_11: - name = "data_11" - shape = [1] - dtype = "float32" - data = [0.620323] - - -class Program_weight_tensor_data_12: - name = "data_12" - shape = [1] - dtype = "float32" - data = [0.621219] - - -class Program_weight_tensor_data_13: - name = "data_13" - shape = [1] - dtype = "float32" - data = [0.624329] - - -class Program_weight_tensor_data_14: - name = "data_14" - shape = [1] - dtype = "float32" - data = [0.733117] - - -class Program_weight_tensor_data_15: - name = "data_15" - shape = [1] - dtype = "float32" - data = [0.557224] - - -class Program_weight_tensor_data_16: - name = "data_16" - shape = [1] - dtype = "float32" - data = [0.579909] - - -class Program_weight_tensor_data_17: - name = "data_17" - shape = [1] - dtype = "float32" - data = [0.70327] - - -class Program_weight_tensor_data_18: - name = "data_18" - shape = [1024, 3072] - dtype = "float32" - min_val = float("-0.0319247") - max_val = float("0.0317725") - mean = float("-5.10487e-06") - std = float("0.0176379") - data = None - - -class Program_weight_tensor_data_19: - name = "data_19" - shape = [3072] - dtype = "float32" - min_val = float("-0.000610453") - max_val = float("0.000772214") - mean = float("-4.76504e-06") - std = float("0.000159152") - data = None - - -class Program_weight_tensor_data_20: - name = "data_20" - shape = [1024, 3072] - dtype = "float32" - min_val = float("-0.0310115") - max_val = float("0.0310403") - mean = float("-4.13224e-06") - std = float("0.0176369") - data = None - - -class Program_weight_tensor_data_21: - name = "data_21" - shape = [3072] - dtype = "float32" - min_val = float("-0.000442001") - max_val = float("0.0003857") - mean = float("-8.27006e-07") - std = float("0.000101626") - data = None - - -class Program_weight_tensor_data_22: - name = "data_22" - shape = [1024, 3072] - dtype = "float32" - min_val = float("-0.0310211") - max_val = float("0.0309302") - mean = float("-4.07149e-06") - std = float("0.0176364") - data = None - - -class Program_weight_tensor_data_23: - name = "data_23" - shape = [3072] - dtype = "float32" - min_val = float("-0.00027446") - max_val = float("0.000292125") - mean = float("-1.05896e-07") - std = float("6.90506e-05") - data = None - - -class Program_weight_tensor_data_24: - name = "data_24" - shape = [1024, 3072] - dtype = "float32" - min_val = float("-0.030934") - max_val = float("0.0309738") - mean = float("-4.04736e-06") - std = float("0.017636") - data = None - - -class Program_weight_tensor_data_25: - name = "data_25" - shape = [3072] - dtype = "float32" - min_val = float("-0.000277695") - max_val = float("0.000255924") - mean = float("3.36681e-07") - std = float("5.71105e-05") - data = None - - -class Program_weight_tensor_data_26: - name = "data_26" - shape = [2, 3, 416, 416] - dtype = "float32" - max_val = float("1.0") - mean = float("0.333385") - std = float("0.180644") - data = None diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/subgraph_12/model.py b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/subgraph_12/model.py deleted file mode 100644 index 0e4271fc6..000000000 --- a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/subgraph_12/model.py +++ /dev/null @@ -1,8874 +0,0 @@ -import paddle - - -class GraphModule(paddle.nn.Layer): - def __init__(self): - super().__init__() - - def forward( - self, - parameter_0, - parameter_1, - parameter_2, - parameter_3, - parameter_4, - parameter_5, - parameter_6, - parameter_7, - parameter_8, - parameter_9, - parameter_10, - parameter_11, - parameter_12, - parameter_13, - parameter_14, - parameter_15, - parameter_16, - parameter_17, - parameter_18, - parameter_19, - parameter_20, - parameter_21, - parameter_22, - parameter_23, - parameter_24, - parameter_25, - parameter_26, - parameter_27, - parameter_28, - parameter_29, - parameter_30, - parameter_31, - parameter_32, - parameter_33, - parameter_34, - parameter_35, - parameter_36, - parameter_37, - parameter_38, - parameter_39, - parameter_40, - parameter_41, - parameter_42, - parameter_43, - parameter_44, - parameter_45, - parameter_46, - parameter_47, - parameter_48, - parameter_49, - parameter_50, - parameter_51, - parameter_52, - parameter_53, - parameter_54, - parameter_55, - parameter_56, - parameter_57, - parameter_58, - parameter_59, - parameter_60, - parameter_61, - parameter_62, - parameter_63, - parameter_64, - parameter_65, - parameter_66, - parameter_67, - parameter_68, - parameter_69, - parameter_70, - parameter_71, - parameter_72, - parameter_73, - parameter_74, - parameter_75, - parameter_76, - parameter_77, - parameter_78, - parameter_79, - parameter_80, - parameter_81, - parameter_82, - parameter_83, - parameter_84, - parameter_85, - parameter_86, - parameter_87, - parameter_88, - parameter_89, - parameter_90, - parameter_91, - parameter_92, - parameter_93, - parameter_94, - parameter_95, - parameter_96, - parameter_97, - parameter_98, - parameter_99, - parameter_100, - parameter_101, - parameter_102, - parameter_103, - parameter_104, - parameter_105, - parameter_106, - parameter_107, - parameter_108, - parameter_109, - parameter_110, - parameter_111, - parameter_112, - parameter_113, - parameter_114, - parameter_115, - parameter_116, - parameter_117, - parameter_118, - parameter_119, - parameter_120, - parameter_121, - parameter_122, - parameter_123, - parameter_124, - parameter_125, - parameter_126, - parameter_127, - parameter_128, - parameter_129, - parameter_130, - parameter_131, - parameter_132, - parameter_133, - parameter_134, - parameter_135, - parameter_136, - parameter_137, - parameter_138, - parameter_139, - parameter_140, - parameter_141, - parameter_142, - parameter_143, - parameter_144, - parameter_145, - parameter_146, - parameter_147, - parameter_148, - parameter_149, - parameter_150, - parameter_151, - parameter_152, - parameter_153, - parameter_154, - parameter_155, - parameter_156, - parameter_157, - parameter_158, - parameter_159, - parameter_160, - parameter_161, - parameter_162, - parameter_163, - parameter_164, - parameter_165, - parameter_166, - parameter_167, - parameter_168, - parameter_169, - parameter_170, - parameter_171, - parameter_172, - parameter_173, - parameter_174, - parameter_175, - parameter_176, - parameter_177, - parameter_178, - parameter_179, - parameter_180, - parameter_181, - parameter_182, - parameter_183, - parameter_184, - parameter_185, - parameter_186, - parameter_187, - parameter_188, - parameter_189, - parameter_190, - parameter_191, - parameter_192, - parameter_193, - parameter_194, - parameter_195, - parameter_196, - parameter_197, - parameter_198, - parameter_199, - parameter_200, - parameter_201, - parameter_202, - parameter_203, - parameter_204, - parameter_205, - parameter_206, - parameter_207, - parameter_208, - parameter_209, - parameter_210, - parameter_211, - parameter_212, - parameter_213, - parameter_214, - parameter_215, - parameter_216, - parameter_217, - parameter_218, - parameter_219, - parameter_220, - parameter_221, - parameter_222, - parameter_223, - parameter_224, - parameter_225, - parameter_226, - parameter_227, - parameter_228, - parameter_229, - parameter_230, - parameter_231, - parameter_232, - parameter_233, - parameter_234, - parameter_235, - parameter_236, - parameter_237, - parameter_238, - parameter_239, - parameter_240, - parameter_241, - parameter_242, - parameter_243, - parameter_244, - parameter_245, - parameter_246, - parameter_247, - parameter_248, - parameter_249, - parameter_250, - parameter_251, - parameter_252, - parameter_253, - parameter_254, - parameter_255, - parameter_256, - parameter_257, - parameter_258, - parameter_259, - parameter_260, - parameter_261, - parameter_262, - parameter_263, - parameter_264, - parameter_265, - parameter_266, - parameter_267, - parameter_268, - parameter_269, - parameter_270, - parameter_271, - parameter_272, - parameter_273, - parameter_274, - parameter_275, - parameter_276, - parameter_277, - parameter_278, - parameter_279, - parameter_280, - parameter_281, - parameter_282, - parameter_283, - parameter_284, - parameter_285, - parameter_286, - parameter_287, - parameter_288, - parameter_289, - parameter_290, - parameter_291, - parameter_292, - parameter_293, - parameter_294, - parameter_295, - parameter_296, - parameter_297, - parameter_298, - parameter_299, - parameter_300, - parameter_301, - parameter_302, - parameter_303, - parameter_304, - parameter_305, - parameter_306, - parameter_307, - parameter_308, - parameter_309, - parameter_310, - parameter_311, - parameter_312, - parameter_313, - parameter_314, - parameter_315, - parameter_316, - parameter_317, - parameter_318, - parameter_319, - parameter_320, - parameter_321, - parameter_322, - parameter_323, - parameter_324, - parameter_325, - parameter_326, - parameter_327, - parameter_328, - parameter_329, - parameter_330, - parameter_331, - parameter_332, - parameter_333, - parameter_334, - parameter_335, - parameter_336, - parameter_337, - parameter_338, - parameter_339, - parameter_340, - parameter_341, - parameter_342, - parameter_343, - parameter_344, - parameter_345, - parameter_346, - parameter_347, - parameter_348, - parameter_349, - parameter_350, - parameter_351, - parameter_352, - parameter_353, - parameter_354, - parameter_355, - parameter_356, - parameter_357, - parameter_358, - parameter_359, - parameter_360, - parameter_361, - parameter_362, - parameter_363, - parameter_364, - parameter_365, - parameter_366, - parameter_367, - parameter_368, - parameter_369, - parameter_370, - parameter_371, - parameter_372, - parameter_373, - parameter_374, - parameter_375, - parameter_376, - parameter_377, - parameter_378, - parameter_379, - parameter_380, - parameter_381, - parameter_382, - parameter_383, - parameter_384, - parameter_385, - parameter_386, - parameter_387, - parameter_388, - parameter_389, - parameter_390, - parameter_391, - parameter_392, - parameter_393, - parameter_394, - parameter_395, - parameter_396, - parameter_397, - parameter_398, - parameter_399, - parameter_400, - parameter_401, - parameter_402, - parameter_403, - parameter_404, - parameter_405, - parameter_406, - parameter_407, - parameter_408, - parameter_409, - parameter_410, - parameter_411, - parameter_412, - parameter_413, - parameter_414, - parameter_415, - parameter_416, - parameter_417, - parameter_418, - parameter_419, - parameter_420, - parameter_421, - parameter_422, - parameter_423, - parameter_424, - parameter_425, - parameter_426, - parameter_427, - parameter_428, - parameter_429, - parameter_430, - parameter_431, - parameter_432, - parameter_433, - parameter_434, - parameter_435, - parameter_436, - parameter_437, - parameter_438, - parameter_439, - parameter_440, - parameter_441, - parameter_442, - parameter_443, - parameter_444, - parameter_445, - parameter_446, - parameter_447, - parameter_448, - parameter_449, - parameter_450, - parameter_451, - parameter_452, - parameter_453, - parameter_454, - parameter_455, - parameter_456, - parameter_457, - parameter_458, - parameter_459, - parameter_460, - parameter_461, - parameter_462, - parameter_463, - parameter_464, - parameter_465, - parameter_466, - parameter_467, - parameter_468, - parameter_469, - parameter_470, - parameter_471, - parameter_472, - parameter_473, - parameter_474, - parameter_475, - parameter_476, - parameter_477, - parameter_478, - parameter_479, - parameter_480, - parameter_481, - parameter_482, - parameter_483, - parameter_484, - parameter_485, - parameter_486, - parameter_487, - parameter_488, - parameter_489, - parameter_490, - parameter_491, - parameter_492, - parameter_493, - parameter_494, - parameter_495, - parameter_496, - parameter_497, - parameter_498, - parameter_499, - parameter_500, - parameter_501, - parameter_502, - parameter_503, - parameter_504, - parameter_505, - parameter_506, - parameter_507, - parameter_508, - parameter_509, - parameter_510, - parameter_511, - parameter_512, - parameter_513, - parameter_514, - parameter_515, - parameter_516, - parameter_517, - parameter_518, - parameter_519, - parameter_520, - parameter_521, - parameter_522, - parameter_523, - parameter_524, - parameter_525, - parameter_526, - parameter_527, - parameter_528, - parameter_529, - parameter_530, - parameter_531, - parameter_532, - parameter_533, - parameter_534, - parameter_535, - parameter_536, - parameter_537, - parameter_538, - parameter_539, - parameter_540, - parameter_541, - parameter_542, - parameter_543, - parameter_544, - parameter_545, - parameter_546, - parameter_547, - parameter_548, - parameter_549, - parameter_550, - parameter_551, - parameter_552, - parameter_553, - parameter_554, - parameter_555, - parameter_556, - parameter_557, - parameter_558, - parameter_559, - parameter_560, - parameter_561, - parameter_562, - parameter_563, - parameter_564, - parameter_565, - parameter_566, - parameter_567, - parameter_568, - parameter_569, - parameter_570, - parameter_571, - parameter_572, - parameter_573, - parameter_574, - parameter_575, - parameter_576, - parameter_577, - parameter_578, - parameter_579, - parameter_580, - parameter_581, - parameter_582, - parameter_583, - parameter_584, - parameter_585, - parameter_586, - parameter_587, - parameter_588, - parameter_589, - parameter_590, - parameter_591, - parameter_592, - parameter_593, - parameter_594, - parameter_595, - parameter_596, - parameter_597, - parameter_598, - parameter_599, - parameter_600, - parameter_601, - parameter_602, - parameter_603, - parameter_604, - parameter_605, - parameter_606, - parameter_607, - parameter_608, - parameter_609, - parameter_610, - parameter_611, - parameter_612, - parameter_613, - parameter_614, - parameter_615, - parameter_616, - parameter_617, - parameter_618, - parameter_619, - parameter_620, - parameter_621, - parameter_622, - parameter_623, - parameter_624, - parameter_625, - parameter_626, - parameter_627, - parameter_628, - parameter_629, - parameter_630, - parameter_631, - parameter_632, - parameter_633, - parameter_634, - parameter_635, - parameter_636, - parameter_637, - parameter_638, - parameter_639, - parameter_640, - parameter_641, - parameter_642, - parameter_643, - parameter_644, - parameter_645, - parameter_646, - parameter_647, - parameter_648, - parameter_649, - parameter_650, - parameter_651, - parameter_652, - parameter_653, - parameter_654, - parameter_655, - parameter_656, - parameter_657, - parameter_658, - parameter_659, - parameter_660, - parameter_661, - parameter_662, - parameter_663, - parameter_664, - parameter_665, - parameter_666, - parameter_667, - parameter_668, - parameter_669, - parameter_670, - parameter_671, - parameter_672, - parameter_673, - parameter_674, - parameter_675, - parameter_676, - parameter_677, - parameter_678, - parameter_679, - parameter_680, - parameter_681, - parameter_682, - parameter_683, - parameter_684, - parameter_685, - parameter_686, - parameter_687, - parameter_688, - parameter_689, - parameter_690, - parameter_691, - parameter_692, - parameter_693, - parameter_694, - parameter_695, - parameter_696, - parameter_697, - parameter_698, - parameter_699, - parameter_700, - parameter_701, - parameter_702, - parameter_703, - parameter_704, - parameter_705, - parameter_706, - parameter_707, - parameter_708, - parameter_709, - parameter_710, - parameter_711, - parameter_712, - parameter_713, - parameter_714, - parameter_715, - parameter_716, - parameter_717, - parameter_718, - parameter_719, - parameter_720, - parameter_721, - parameter_722, - parameter_723, - parameter_724, - parameter_725, - parameter_726, - parameter_727, - parameter_728, - parameter_729, - parameter_730, - parameter_731, - parameter_732, - parameter_733, - parameter_734, - parameter_735, - parameter_736, - parameter_737, - data_0, - data_1, - data_2, - data_3, - data_4, - data_5, - data_6, - data_7, - data_8, - data_9, - data_10, - data_11, - data_12, - data_13, - data_14, - data_15, - data_16, - data_17, - data_18, - data_19, - data_20, - data_21, - data_22, - data_23, - data_24, - data_25, - data_26, - ): - # pd_op.conv2d: (2x32x208x208xf32) <- (2x3x416x416xf32, 32x3x3x3xf32) - conv2d_0 = paddle._C_ops.conv2d( - data_26, parameter_737, [2, 2], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del data_26, parameter_737 - - # pd_op.batch_norm_: (2x32x208x208xf32, 32xf32, 32xf32, 32xf32, 32xf32, -1xui8) <- (2x32x208x208xf32, 32xf32, 32xf32, 32xf32, 32xf32) - ( - batch_norm__0, - batch_norm__1, - batch_norm__2, - batch_norm__3, - batch_norm__4, - batch_norm__5, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_0, - parameter_736, - parameter_735, - parameter_734, - parameter_733, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_733, parameter_734, parameter_735, parameter_736 - - # pd_op.swish: (2x32x208x208xf32) <- (2x32x208x208xf32) - swish_1 = paddle._C_ops.swish(batch_norm__0) - - # pd_op.conv2d: (2x32x208x208xf32) <- (2x32x208x208xf32, 32x32x3x3xf32) - conv2d_1 = paddle._C_ops.conv2d( - swish_1, parameter_732, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_732 - - # pd_op.batch_norm_: (2x32x208x208xf32, 32xf32, 32xf32, 32xf32, 32xf32, -1xui8) <- (2x32x208x208xf32, 32xf32, 32xf32, 32xf32, 32xf32) - ( - batch_norm__6, - batch_norm__7, - batch_norm__8, - batch_norm__9, - batch_norm__10, - batch_norm__11, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_1, - parameter_731, - parameter_730, - parameter_729, - parameter_728, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_728, parameter_729, parameter_730, parameter_731 - - # pd_op.swish: (2x32x208x208xf32) <- (2x32x208x208xf32) - swish_2 = paddle._C_ops.swish(batch_norm__6) - - # pd_op.conv2d: (2x64x208x208xf32) <- (2x32x208x208xf32, 64x32x3x3xf32) - conv2d_2 = paddle._C_ops.conv2d( - swish_2, parameter_727, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_727 - - # pd_op.batch_norm_: (2x64x208x208xf32, 64xf32, 64xf32, 64xf32, 64xf32, -1xui8) <- (2x64x208x208xf32, 64xf32, 64xf32, 64xf32, 64xf32) - ( - batch_norm__12, - batch_norm__13, - batch_norm__14, - batch_norm__15, - batch_norm__16, - batch_norm__17, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_2, - parameter_726, - parameter_725, - parameter_724, - parameter_723, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_723, parameter_724, parameter_725, parameter_726 - - # pd_op.swish: (2x64x208x208xf32) <- (2x64x208x208xf32) - swish_3 = paddle._C_ops.swish(batch_norm__12) - - # pd_op.conv2d: (2x96x104x104xf32) <- (2x64x208x208xf32, 96x64x3x3xf32) - conv2d_3 = paddle._C_ops.conv2d( - swish_3, parameter_722, [2, 2], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_722 - - # pd_op.batch_norm_: (2x96x104x104xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x104x104xf32, 96xf32, 96xf32, 96xf32, 96xf32) - ( - batch_norm__18, - batch_norm__19, - batch_norm__20, - batch_norm__21, - batch_norm__22, - batch_norm__23, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_3, - parameter_721, - parameter_720, - parameter_719, - parameter_718, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_718, parameter_719, parameter_720, parameter_721 - - # pd_op.swish: (2x96x104x104xf32) <- (2x96x104x104xf32) - swish_4 = paddle._C_ops.swish(batch_norm__18) - - # pd_op.conv2d: (2x48x104x104xf32) <- (2x96x104x104xf32, 48x96x1x1xf32) - conv2d_4 = paddle._C_ops.conv2d( - swish_4, parameter_717, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_717 - - # pd_op.batch_norm_: (2x48x104x104xf32, 48xf32, 48xf32, 48xf32, 48xf32, -1xui8) <- (2x48x104x104xf32, 48xf32, 48xf32, 48xf32, 48xf32) - ( - batch_norm__24, - batch_norm__25, - batch_norm__26, - batch_norm__27, - batch_norm__28, - batch_norm__29, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_4, - parameter_716, - parameter_715, - parameter_714, - parameter_713, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_713, parameter_714, parameter_715, parameter_716 - - # pd_op.swish: (2x48x104x104xf32) <- (2x48x104x104xf32) - swish_5 = paddle._C_ops.swish(batch_norm__24) - - # pd_op.conv2d: (2x48x104x104xf32) <- (2x96x104x104xf32, 48x96x1x1xf32) - conv2d_5 = paddle._C_ops.conv2d( - swish_4, parameter_712, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_712 - - # pd_op.batch_norm_: (2x48x104x104xf32, 48xf32, 48xf32, 48xf32, 48xf32, -1xui8) <- (2x48x104x104xf32, 48xf32, 48xf32, 48xf32, 48xf32) - ( - batch_norm__30, - batch_norm__31, - batch_norm__32, - batch_norm__33, - batch_norm__34, - batch_norm__35, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_5, - parameter_711, - parameter_710, - parameter_709, - parameter_708, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_708, parameter_709, parameter_710, parameter_711 - - # pd_op.swish: (2x48x104x104xf32) <- (2x48x104x104xf32) - swish_6 = paddle._C_ops.swish(batch_norm__30) - - # pd_op.conv2d: (2x48x104x104xf32) <- (2x48x104x104xf32, 48x48x3x3xf32) - conv2d_6 = paddle._C_ops.conv2d( - swish_6, parameter_707, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_707 - - # pd_op.batch_norm_: (2x48x104x104xf32, 48xf32, 48xf32, 48xf32, 48xf32, -1xui8) <- (2x48x104x104xf32, 48xf32, 48xf32, 48xf32, 48xf32) - ( - batch_norm__36, - batch_norm__37, - batch_norm__38, - batch_norm__39, - batch_norm__40, - batch_norm__41, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_6, - parameter_706, - parameter_705, - parameter_704, - parameter_703, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_703, parameter_704, parameter_705, parameter_706 - - # pd_op.swish: (2x48x104x104xf32) <- (2x48x104x104xf32) - swish_7 = paddle._C_ops.swish(batch_norm__36) - - # pd_op.conv2d: (2x48x104x104xf32) <- (2x48x104x104xf32, 48x48x3x3xf32) - conv2d_7 = paddle._C_ops.conv2d( - swish_7, parameter_702, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_702 - - # pd_op.batch_norm_: (2x48x104x104xf32, 48xf32, 48xf32, 48xf32, 48xf32, -1xui8) <- (2x48x104x104xf32, 48xf32, 48xf32, 48xf32, 48xf32) - ( - batch_norm__42, - batch_norm__43, - batch_norm__44, - batch_norm__45, - batch_norm__46, - batch_norm__47, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_7, - parameter_701, - parameter_700, - parameter_699, - parameter_698, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_698, parameter_699, parameter_700, parameter_701 - - # pd_op.conv2d: (2x48x104x104xf32) <- (2x48x104x104xf32, 48x48x1x1xf32) - conv2d_8 = paddle._C_ops.conv2d( - swish_7, parameter_697, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_697 - - # pd_op.batch_norm_: (2x48x104x104xf32, 48xf32, 48xf32, 48xf32, 48xf32, -1xui8) <- (2x48x104x104xf32, 48xf32, 48xf32, 48xf32, 48xf32) - ( - batch_norm__48, - batch_norm__49, - batch_norm__50, - batch_norm__51, - batch_norm__52, - batch_norm__53, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_8, - parameter_696, - parameter_695, - parameter_694, - parameter_693, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_693, parameter_694, parameter_695, parameter_696 - - # pd_op.multiply: (2x48x104x104xf32) <- (1xf32, 2x48x104x104xf32) - multiply_0 = paddle._C_ops.multiply(data_0, batch_norm__48) - del data_0 - - # pd_op.add: (2x48x104x104xf32) <- (2x48x104x104xf32, 2x48x104x104xf32) - add_0 = paddle._C_ops.add(batch_norm__42, multiply_0) - - # pd_op.swish: (2x48x104x104xf32) <- (2x48x104x104xf32) - swish_8 = paddle._C_ops.swish(add_0) - - # pd_op.add: (2x48x104x104xf32) <- (2x48x104x104xf32, 2x48x104x104xf32) - add_1 = paddle._C_ops.add(swish_6, swish_8) - - # pd_op.conv2d: (2x48x104x104xf32) <- (2x48x104x104xf32, 48x48x3x3xf32) - conv2d_9 = paddle._C_ops.conv2d( - add_1, parameter_692, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_692 - - # pd_op.batch_norm_: (2x48x104x104xf32, 48xf32, 48xf32, 48xf32, 48xf32, -1xui8) <- (2x48x104x104xf32, 48xf32, 48xf32, 48xf32, 48xf32) - ( - batch_norm__54, - batch_norm__55, - batch_norm__56, - batch_norm__57, - batch_norm__58, - batch_norm__59, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_9, - parameter_691, - parameter_690, - parameter_689, - parameter_688, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_688, parameter_689, parameter_690, parameter_691 - - # pd_op.swish: (2x48x104x104xf32) <- (2x48x104x104xf32) - swish_9 = paddle._C_ops.swish(batch_norm__54) - - # pd_op.conv2d: (2x48x104x104xf32) <- (2x48x104x104xf32, 48x48x3x3xf32) - conv2d_10 = paddle._C_ops.conv2d( - swish_9, parameter_687, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_687 - - # pd_op.batch_norm_: (2x48x104x104xf32, 48xf32, 48xf32, 48xf32, 48xf32, -1xui8) <- (2x48x104x104xf32, 48xf32, 48xf32, 48xf32, 48xf32) - ( - batch_norm__60, - batch_norm__61, - batch_norm__62, - batch_norm__63, - batch_norm__64, - batch_norm__65, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_10, - parameter_686, - parameter_685, - parameter_684, - parameter_683, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_683, parameter_684, parameter_685, parameter_686 - - # pd_op.conv2d: (2x48x104x104xf32) <- (2x48x104x104xf32, 48x48x1x1xf32) - conv2d_11 = paddle._C_ops.conv2d( - swish_9, parameter_682, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_682 - - # pd_op.batch_norm_: (2x48x104x104xf32, 48xf32, 48xf32, 48xf32, 48xf32, -1xui8) <- (2x48x104x104xf32, 48xf32, 48xf32, 48xf32, 48xf32) - ( - batch_norm__66, - batch_norm__67, - batch_norm__68, - batch_norm__69, - batch_norm__70, - batch_norm__71, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_11, - parameter_681, - parameter_680, - parameter_679, - parameter_678, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_678, parameter_679, parameter_680, parameter_681 - - # pd_op.multiply: (2x48x104x104xf32) <- (1xf32, 2x48x104x104xf32) - multiply_1 = paddle._C_ops.multiply(data_1, batch_norm__66) - del data_1 - - # pd_op.add: (2x48x104x104xf32) <- (2x48x104x104xf32, 2x48x104x104xf32) - add_2 = paddle._C_ops.add(batch_norm__60, multiply_1) - - # pd_op.swish: (2x48x104x104xf32) <- (2x48x104x104xf32) - swish_10 = paddle._C_ops.swish(add_2) - - # pd_op.add: (2x48x104x104xf32) <- (2x48x104x104xf32, 2x48x104x104xf32) - add_3 = paddle._C_ops.add(add_1, swish_10) - - # pd_op.conv2d: (2x48x104x104xf32) <- (2x48x104x104xf32, 48x48x3x3xf32) - conv2d_12 = paddle._C_ops.conv2d( - add_3, parameter_677, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_677 - - # pd_op.batch_norm_: (2x48x104x104xf32, 48xf32, 48xf32, 48xf32, 48xf32, -1xui8) <- (2x48x104x104xf32, 48xf32, 48xf32, 48xf32, 48xf32) - ( - batch_norm__72, - batch_norm__73, - batch_norm__74, - batch_norm__75, - batch_norm__76, - batch_norm__77, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_12, - parameter_676, - parameter_675, - parameter_674, - parameter_673, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_673, parameter_674, parameter_675, parameter_676 - - # pd_op.swish: (2x48x104x104xf32) <- (2x48x104x104xf32) - swish_11 = paddle._C_ops.swish(batch_norm__72) - - # pd_op.conv2d: (2x48x104x104xf32) <- (2x48x104x104xf32, 48x48x3x3xf32) - conv2d_13 = paddle._C_ops.conv2d( - swish_11, parameter_672, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_672 - - # pd_op.batch_norm_: (2x48x104x104xf32, 48xf32, 48xf32, 48xf32, 48xf32, -1xui8) <- (2x48x104x104xf32, 48xf32, 48xf32, 48xf32, 48xf32) - ( - batch_norm__78, - batch_norm__79, - batch_norm__80, - batch_norm__81, - batch_norm__82, - batch_norm__83, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_13, - parameter_671, - parameter_670, - parameter_669, - parameter_668, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_668, parameter_669, parameter_670, parameter_671 - - # pd_op.conv2d: (2x48x104x104xf32) <- (2x48x104x104xf32, 48x48x1x1xf32) - conv2d_14 = paddle._C_ops.conv2d( - swish_11, parameter_667, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_667 - - # pd_op.batch_norm_: (2x48x104x104xf32, 48xf32, 48xf32, 48xf32, 48xf32, -1xui8) <- (2x48x104x104xf32, 48xf32, 48xf32, 48xf32, 48xf32) - ( - batch_norm__84, - batch_norm__85, - batch_norm__86, - batch_norm__87, - batch_norm__88, - batch_norm__89, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_14, - parameter_666, - parameter_665, - parameter_664, - parameter_663, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_663, parameter_664, parameter_665, parameter_666 - - # pd_op.multiply: (2x48x104x104xf32) <- (1xf32, 2x48x104x104xf32) - multiply_2 = paddle._C_ops.multiply(data_2, batch_norm__84) - del data_2 - - # pd_op.add: (2x48x104x104xf32) <- (2x48x104x104xf32, 2x48x104x104xf32) - add_4 = paddle._C_ops.add(batch_norm__78, multiply_2) - - # pd_op.swish: (2x48x104x104xf32) <- (2x48x104x104xf32) - swish_12 = paddle._C_ops.swish(add_4) - - # pd_op.add: (2x48x104x104xf32) <- (2x48x104x104xf32, 2x48x104x104xf32) - add_5 = paddle._C_ops.add(add_3, swish_12) - - # pd_op.full: (1xi32) <- () - full_0 = paddle._C_ops.full( - [1], float("1"), paddle.int32, paddle.core.CPUPlace() - ) - - # pd_op.assign: (1xi32) <- (1xi32) - assign_0 = full_0 - - # pd_op.assign: (1xi32) <- (1xi32) - assign_1 = full_0 - - # pd_op.assign: (1xi32) <- (1xi32) - assign_2 = full_0 - - # pd_op.assign: (1xi32) <- (1xi32) - assign_3 = full_0 - - # pd_op.assign: (1xi32) <- (1xi32) - assign_4 = full_0 - - # pd_op.assign: (1xi32) <- (1xi32) - assign_5 = full_0 - - # pd_op.assign: (1xi32) <- (1xi32) - assign_6 = full_0 - - # pd_op.assign: (1xi32) <- (1xi32) - assign_7 = full_0 - - # pd_op.assign: (1xi32) <- (1xi32) - assign_8 = full_0 - - # pd_op.assign: (1xi32) <- (1xi32) - assign_9 = full_0 - - # pd_op.assign: (1xi32) <- (1xi32) - assign_10 = full_0 - - # pd_op.assign: (1xi32) <- (1xi32) - assign_11 = full_0 - - # pd_op.assign: (1xi32) <- (1xi32) - assign_12 = full_0 - - # builtin.combine: ([2x48x104x104xf32, 2x48x104x104xf32]) <- (2x48x104x104xf32, 2x48x104x104xf32) - combine_0 = [swish_5, add_5] - - # pd_op.concat: (2x96x104x104xf32) <- ([2x48x104x104xf32, 2x48x104x104xf32], 1xi32) - concat_0 = paddle._C_ops.concat(combine_0, full_0) - del combine_0 - - # pd_op.full_int_array: (2xi64) <- () - full_int_array_0 = [2, 3] - - # pd_op.assign: (2xi64) <- (2xi64) - assign_13 = full_int_array_0 - - # pd_op.assign: (2xi64) <- (2xi64) - assign_14 = full_int_array_0 - - # pd_op.assign: (2xi64) <- (2xi64) - assign_15 = full_int_array_0 - - # pd_op.mean: (2x96x1x1xf32) <- (2x96x104x104xf32, 2xi64) - mean_0 = paddle._C_ops.mean(concat_0, full_int_array_0, True) - - # pd_op.conv2d: (2x96x1x1xf32) <- (2x96x1x1xf32, 96x96x1x1xf32) - conv2d_15 = paddle._C_ops.conv2d( - mean_0, parameter_662, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_662 - - # pd_op.full_int_array: (4xi64) <- () - full_int_array_1 = [1, -1, 1, 1] - - # pd_op.reshape: (1x96x1x1xf32) <- (96xf32, 4xi64) - reshape_0 = paddle._C_ops.reshape(parameter_661, full_int_array_1) - del parameter_661 - - # pd_op.add: (2x96x1x1xf32) <- (2x96x1x1xf32, 1x96x1x1xf32) - add_6 = paddle._C_ops.add(conv2d_15, reshape_0) - - # pd_op.hardsigmoid: (2x96x1x1xf32) <- (2x96x1x1xf32) - hardsigmoid_0 = paddle._C_ops.hardsigmoid( - add_6, float("0.166667"), float("0.5") - ) - del add_6 - - # pd_op.multiply: (2x96x104x104xf32) <- (2x96x104x104xf32, 2x96x1x1xf32) - multiply_3 = paddle._C_ops.multiply(concat_0, hardsigmoid_0) - - # pd_op.conv2d: (2x128x104x104xf32) <- (2x96x104x104xf32, 128x96x1x1xf32) - conv2d_16 = paddle._C_ops.conv2d( - multiply_3, parameter_660, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_660 - - # pd_op.batch_norm_: (2x128x104x104xf32, 128xf32, 128xf32, 128xf32, 128xf32, -1xui8) <- (2x128x104x104xf32, 128xf32, 128xf32, 128xf32, 128xf32) - ( - batch_norm__90, - batch_norm__91, - batch_norm__92, - batch_norm__93, - batch_norm__94, - batch_norm__95, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_16, - parameter_659, - parameter_658, - parameter_657, - parameter_656, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_656, parameter_657, parameter_658, parameter_659 - - # pd_op.swish: (2x128x104x104xf32) <- (2x128x104x104xf32) - swish_13 = paddle._C_ops.swish(batch_norm__90) - - # pd_op.conv2d: (2x192x52x52xf32) <- (2x128x104x104xf32, 192x128x3x3xf32) - conv2d_17 = paddle._C_ops.conv2d( - swish_13, parameter_655, [2, 2], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_655 - - # pd_op.batch_norm_: (2x192x52x52xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x52x52xf32, 192xf32, 192xf32, 192xf32, 192xf32) - ( - batch_norm__96, - batch_norm__97, - batch_norm__98, - batch_norm__99, - batch_norm__100, - batch_norm__101, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_17, - parameter_654, - parameter_653, - parameter_652, - parameter_651, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_651, parameter_652, parameter_653, parameter_654 - - # pd_op.swish: (2x192x52x52xf32) <- (2x192x52x52xf32) - swish_14 = paddle._C_ops.swish(batch_norm__96) - - # pd_op.conv2d: (2x96x52x52xf32) <- (2x192x52x52xf32, 96x192x1x1xf32) - conv2d_18 = paddle._C_ops.conv2d( - swish_14, parameter_650, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_650 - - # pd_op.batch_norm_: (2x96x52x52xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x52x52xf32, 96xf32, 96xf32, 96xf32, 96xf32) - ( - batch_norm__102, - batch_norm__103, - batch_norm__104, - batch_norm__105, - batch_norm__106, - batch_norm__107, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_18, - parameter_649, - parameter_648, - parameter_647, - parameter_646, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_646, parameter_647, parameter_648, parameter_649 - - # pd_op.swish: (2x96x52x52xf32) <- (2x96x52x52xf32) - swish_15 = paddle._C_ops.swish(batch_norm__102) - - # pd_op.conv2d: (2x96x52x52xf32) <- (2x192x52x52xf32, 96x192x1x1xf32) - conv2d_19 = paddle._C_ops.conv2d( - swish_14, parameter_645, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_645 - - # pd_op.batch_norm_: (2x96x52x52xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x52x52xf32, 96xf32, 96xf32, 96xf32, 96xf32) - ( - batch_norm__108, - batch_norm__109, - batch_norm__110, - batch_norm__111, - batch_norm__112, - batch_norm__113, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_19, - parameter_644, - parameter_643, - parameter_642, - parameter_641, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_641, parameter_642, parameter_643, parameter_644 - - # pd_op.swish: (2x96x52x52xf32) <- (2x96x52x52xf32) - swish_16 = paddle._C_ops.swish(batch_norm__108) - - # pd_op.conv2d: (2x96x52x52xf32) <- (2x96x52x52xf32, 96x96x3x3xf32) - conv2d_20 = paddle._C_ops.conv2d( - swish_16, parameter_640, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_640 - - # pd_op.batch_norm_: (2x96x52x52xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x52x52xf32, 96xf32, 96xf32, 96xf32, 96xf32) - ( - batch_norm__114, - batch_norm__115, - batch_norm__116, - batch_norm__117, - batch_norm__118, - batch_norm__119, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_20, - parameter_639, - parameter_638, - parameter_637, - parameter_636, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_636, parameter_637, parameter_638, parameter_639 - - # pd_op.swish: (2x96x52x52xf32) <- (2x96x52x52xf32) - swish_17 = paddle._C_ops.swish(batch_norm__114) - - # pd_op.conv2d: (2x96x52x52xf32) <- (2x96x52x52xf32, 96x96x3x3xf32) - conv2d_21 = paddle._C_ops.conv2d( - swish_17, parameter_635, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_635 - - # pd_op.batch_norm_: (2x96x52x52xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x52x52xf32, 96xf32, 96xf32, 96xf32, 96xf32) - ( - batch_norm__120, - batch_norm__121, - batch_norm__122, - batch_norm__123, - batch_norm__124, - batch_norm__125, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_21, - parameter_634, - parameter_633, - parameter_632, - parameter_631, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_631, parameter_632, parameter_633, parameter_634 - - # pd_op.conv2d: (2x96x52x52xf32) <- (2x96x52x52xf32, 96x96x1x1xf32) - conv2d_22 = paddle._C_ops.conv2d( - swish_17, parameter_630, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_630 - - # pd_op.batch_norm_: (2x96x52x52xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x52x52xf32, 96xf32, 96xf32, 96xf32, 96xf32) - ( - batch_norm__126, - batch_norm__127, - batch_norm__128, - batch_norm__129, - batch_norm__130, - batch_norm__131, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_22, - parameter_629, - parameter_628, - parameter_627, - parameter_626, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_626, parameter_627, parameter_628, parameter_629 - - # pd_op.multiply: (2x96x52x52xf32) <- (1xf32, 2x96x52x52xf32) - multiply_4 = paddle._C_ops.multiply(data_3, batch_norm__126) - del data_3 - - # pd_op.add: (2x96x52x52xf32) <- (2x96x52x52xf32, 2x96x52x52xf32) - add_7 = paddle._C_ops.add(batch_norm__120, multiply_4) - - # pd_op.swish: (2x96x52x52xf32) <- (2x96x52x52xf32) - swish_18 = paddle._C_ops.swish(add_7) - - # pd_op.add: (2x96x52x52xf32) <- (2x96x52x52xf32, 2x96x52x52xf32) - add_8 = paddle._C_ops.add(swish_16, swish_18) - - # pd_op.conv2d: (2x96x52x52xf32) <- (2x96x52x52xf32, 96x96x3x3xf32) - conv2d_23 = paddle._C_ops.conv2d( - add_8, parameter_625, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_625 - - # pd_op.batch_norm_: (2x96x52x52xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x52x52xf32, 96xf32, 96xf32, 96xf32, 96xf32) - ( - batch_norm__132, - batch_norm__133, - batch_norm__134, - batch_norm__135, - batch_norm__136, - batch_norm__137, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_23, - parameter_624, - parameter_623, - parameter_622, - parameter_621, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_621, parameter_622, parameter_623, parameter_624 - - # pd_op.swish: (2x96x52x52xf32) <- (2x96x52x52xf32) - swish_19 = paddle._C_ops.swish(batch_norm__132) - - # pd_op.conv2d: (2x96x52x52xf32) <- (2x96x52x52xf32, 96x96x3x3xf32) - conv2d_24 = paddle._C_ops.conv2d( - swish_19, parameter_620, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_620 - - # pd_op.batch_norm_: (2x96x52x52xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x52x52xf32, 96xf32, 96xf32, 96xf32, 96xf32) - ( - batch_norm__138, - batch_norm__139, - batch_norm__140, - batch_norm__141, - batch_norm__142, - batch_norm__143, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_24, - parameter_619, - parameter_618, - parameter_617, - parameter_616, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_616, parameter_617, parameter_618, parameter_619 - - # pd_op.conv2d: (2x96x52x52xf32) <- (2x96x52x52xf32, 96x96x1x1xf32) - conv2d_25 = paddle._C_ops.conv2d( - swish_19, parameter_615, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_615 - - # pd_op.batch_norm_: (2x96x52x52xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x52x52xf32, 96xf32, 96xf32, 96xf32, 96xf32) - ( - batch_norm__144, - batch_norm__145, - batch_norm__146, - batch_norm__147, - batch_norm__148, - batch_norm__149, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_25, - parameter_614, - parameter_613, - parameter_612, - parameter_611, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_611, parameter_612, parameter_613, parameter_614 - - # pd_op.multiply: (2x96x52x52xf32) <- (1xf32, 2x96x52x52xf32) - multiply_5 = paddle._C_ops.multiply(data_4, batch_norm__144) - del data_4 - - # pd_op.add: (2x96x52x52xf32) <- (2x96x52x52xf32, 2x96x52x52xf32) - add_9 = paddle._C_ops.add(batch_norm__138, multiply_5) - - # pd_op.swish: (2x96x52x52xf32) <- (2x96x52x52xf32) - swish_20 = paddle._C_ops.swish(add_9) - - # pd_op.add: (2x96x52x52xf32) <- (2x96x52x52xf32, 2x96x52x52xf32) - add_10 = paddle._C_ops.add(add_8, swish_20) - - # pd_op.conv2d: (2x96x52x52xf32) <- (2x96x52x52xf32, 96x96x3x3xf32) - conv2d_26 = paddle._C_ops.conv2d( - add_10, parameter_610, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_610 - - # pd_op.batch_norm_: (2x96x52x52xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x52x52xf32, 96xf32, 96xf32, 96xf32, 96xf32) - ( - batch_norm__150, - batch_norm__151, - batch_norm__152, - batch_norm__153, - batch_norm__154, - batch_norm__155, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_26, - parameter_609, - parameter_608, - parameter_607, - parameter_606, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_606, parameter_607, parameter_608, parameter_609 - - # pd_op.swish: (2x96x52x52xf32) <- (2x96x52x52xf32) - swish_21 = paddle._C_ops.swish(batch_norm__150) - - # pd_op.conv2d: (2x96x52x52xf32) <- (2x96x52x52xf32, 96x96x3x3xf32) - conv2d_27 = paddle._C_ops.conv2d( - swish_21, parameter_605, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_605 - - # pd_op.batch_norm_: (2x96x52x52xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x52x52xf32, 96xf32, 96xf32, 96xf32, 96xf32) - ( - batch_norm__156, - batch_norm__157, - batch_norm__158, - batch_norm__159, - batch_norm__160, - batch_norm__161, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_27, - parameter_604, - parameter_603, - parameter_602, - parameter_601, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_601, parameter_602, parameter_603, parameter_604 - - # pd_op.conv2d: (2x96x52x52xf32) <- (2x96x52x52xf32, 96x96x1x1xf32) - conv2d_28 = paddle._C_ops.conv2d( - swish_21, parameter_600, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_600 - - # pd_op.batch_norm_: (2x96x52x52xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x52x52xf32, 96xf32, 96xf32, 96xf32, 96xf32) - ( - batch_norm__162, - batch_norm__163, - batch_norm__164, - batch_norm__165, - batch_norm__166, - batch_norm__167, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_28, - parameter_599, - parameter_598, - parameter_597, - parameter_596, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_596, parameter_597, parameter_598, parameter_599 - - # pd_op.multiply: (2x96x52x52xf32) <- (1xf32, 2x96x52x52xf32) - multiply_6 = paddle._C_ops.multiply(data_5, batch_norm__162) - del data_5 - - # pd_op.add: (2x96x52x52xf32) <- (2x96x52x52xf32, 2x96x52x52xf32) - add_11 = paddle._C_ops.add(batch_norm__156, multiply_6) - - # pd_op.swish: (2x96x52x52xf32) <- (2x96x52x52xf32) - swish_22 = paddle._C_ops.swish(add_11) - - # pd_op.add: (2x96x52x52xf32) <- (2x96x52x52xf32, 2x96x52x52xf32) - add_12 = paddle._C_ops.add(add_10, swish_22) - - # pd_op.conv2d: (2x96x52x52xf32) <- (2x96x52x52xf32, 96x96x3x3xf32) - conv2d_29 = paddle._C_ops.conv2d( - add_12, parameter_595, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_595 - - # pd_op.batch_norm_: (2x96x52x52xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x52x52xf32, 96xf32, 96xf32, 96xf32, 96xf32) - ( - batch_norm__168, - batch_norm__169, - batch_norm__170, - batch_norm__171, - batch_norm__172, - batch_norm__173, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_29, - parameter_594, - parameter_593, - parameter_592, - parameter_591, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_591, parameter_592, parameter_593, parameter_594 - - # pd_op.swish: (2x96x52x52xf32) <- (2x96x52x52xf32) - swish_23 = paddle._C_ops.swish(batch_norm__168) - - # pd_op.conv2d: (2x96x52x52xf32) <- (2x96x52x52xf32, 96x96x3x3xf32) - conv2d_30 = paddle._C_ops.conv2d( - swish_23, parameter_590, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_590 - - # pd_op.batch_norm_: (2x96x52x52xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x52x52xf32, 96xf32, 96xf32, 96xf32, 96xf32) - ( - batch_norm__174, - batch_norm__175, - batch_norm__176, - batch_norm__177, - batch_norm__178, - batch_norm__179, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_30, - parameter_589, - parameter_588, - parameter_587, - parameter_586, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_586, parameter_587, parameter_588, parameter_589 - - # pd_op.conv2d: (2x96x52x52xf32) <- (2x96x52x52xf32, 96x96x1x1xf32) - conv2d_31 = paddle._C_ops.conv2d( - swish_23, parameter_585, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_585 - - # pd_op.batch_norm_: (2x96x52x52xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x52x52xf32, 96xf32, 96xf32, 96xf32, 96xf32) - ( - batch_norm__180, - batch_norm__181, - batch_norm__182, - batch_norm__183, - batch_norm__184, - batch_norm__185, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_31, - parameter_584, - parameter_583, - parameter_582, - parameter_581, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_581, parameter_582, parameter_583, parameter_584 - - # pd_op.multiply: (2x96x52x52xf32) <- (1xf32, 2x96x52x52xf32) - multiply_7 = paddle._C_ops.multiply(data_6, batch_norm__180) - del data_6 - - # pd_op.add: (2x96x52x52xf32) <- (2x96x52x52xf32, 2x96x52x52xf32) - add_13 = paddle._C_ops.add(batch_norm__174, multiply_7) - - # pd_op.swish: (2x96x52x52xf32) <- (2x96x52x52xf32) - swish_24 = paddle._C_ops.swish(add_13) - - # pd_op.add: (2x96x52x52xf32) <- (2x96x52x52xf32, 2x96x52x52xf32) - add_14 = paddle._C_ops.add(add_12, swish_24) - - # pd_op.conv2d: (2x96x52x52xf32) <- (2x96x52x52xf32, 96x96x3x3xf32) - conv2d_32 = paddle._C_ops.conv2d( - add_14, parameter_580, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_580 - - # pd_op.batch_norm_: (2x96x52x52xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x52x52xf32, 96xf32, 96xf32, 96xf32, 96xf32) - ( - batch_norm__186, - batch_norm__187, - batch_norm__188, - batch_norm__189, - batch_norm__190, - batch_norm__191, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_32, - parameter_579, - parameter_578, - parameter_577, - parameter_576, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_576, parameter_577, parameter_578, parameter_579 - - # pd_op.swish: (2x96x52x52xf32) <- (2x96x52x52xf32) - swish_25 = paddle._C_ops.swish(batch_norm__186) - - # pd_op.conv2d: (2x96x52x52xf32) <- (2x96x52x52xf32, 96x96x3x3xf32) - conv2d_33 = paddle._C_ops.conv2d( - swish_25, parameter_575, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_575 - - # pd_op.batch_norm_: (2x96x52x52xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x52x52xf32, 96xf32, 96xf32, 96xf32, 96xf32) - ( - batch_norm__192, - batch_norm__193, - batch_norm__194, - batch_norm__195, - batch_norm__196, - batch_norm__197, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_33, - parameter_574, - parameter_573, - parameter_572, - parameter_571, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_571, parameter_572, parameter_573, parameter_574 - - # pd_op.conv2d: (2x96x52x52xf32) <- (2x96x52x52xf32, 96x96x1x1xf32) - conv2d_34 = paddle._C_ops.conv2d( - swish_25, parameter_570, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_570 - - # pd_op.batch_norm_: (2x96x52x52xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x52x52xf32, 96xf32, 96xf32, 96xf32, 96xf32) - ( - batch_norm__198, - batch_norm__199, - batch_norm__200, - batch_norm__201, - batch_norm__202, - batch_norm__203, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_34, - parameter_569, - parameter_568, - parameter_567, - parameter_566, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_566, parameter_567, parameter_568, parameter_569 - - # pd_op.multiply: (2x96x52x52xf32) <- (1xf32, 2x96x52x52xf32) - multiply_8 = paddle._C_ops.multiply(data_7, batch_norm__198) - del data_7 - - # pd_op.add: (2x96x52x52xf32) <- (2x96x52x52xf32, 2x96x52x52xf32) - add_15 = paddle._C_ops.add(batch_norm__192, multiply_8) - - # pd_op.swish: (2x96x52x52xf32) <- (2x96x52x52xf32) - swish_26 = paddle._C_ops.swish(add_15) - - # pd_op.add: (2x96x52x52xf32) <- (2x96x52x52xf32, 2x96x52x52xf32) - add_16 = paddle._C_ops.add(add_14, swish_26) - - # pd_op.conv2d: (2x96x52x52xf32) <- (2x96x52x52xf32, 96x96x3x3xf32) - conv2d_35 = paddle._C_ops.conv2d( - add_16, parameter_565, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_565 - - # pd_op.batch_norm_: (2x96x52x52xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x52x52xf32, 96xf32, 96xf32, 96xf32, 96xf32) - ( - batch_norm__204, - batch_norm__205, - batch_norm__206, - batch_norm__207, - batch_norm__208, - batch_norm__209, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_35, - parameter_564, - parameter_563, - parameter_562, - parameter_561, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_561, parameter_562, parameter_563, parameter_564 - - # pd_op.swish: (2x96x52x52xf32) <- (2x96x52x52xf32) - swish_27 = paddle._C_ops.swish(batch_norm__204) - - # pd_op.conv2d: (2x96x52x52xf32) <- (2x96x52x52xf32, 96x96x3x3xf32) - conv2d_36 = paddle._C_ops.conv2d( - swish_27, parameter_560, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_560 - - # pd_op.batch_norm_: (2x96x52x52xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x52x52xf32, 96xf32, 96xf32, 96xf32, 96xf32) - ( - batch_norm__210, - batch_norm__211, - batch_norm__212, - batch_norm__213, - batch_norm__214, - batch_norm__215, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_36, - parameter_559, - parameter_558, - parameter_557, - parameter_556, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_556, parameter_557, parameter_558, parameter_559 - - # pd_op.conv2d: (2x96x52x52xf32) <- (2x96x52x52xf32, 96x96x1x1xf32) - conv2d_37 = paddle._C_ops.conv2d( - swish_27, parameter_555, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_555 - - # pd_op.batch_norm_: (2x96x52x52xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x52x52xf32, 96xf32, 96xf32, 96xf32, 96xf32) - ( - batch_norm__216, - batch_norm__217, - batch_norm__218, - batch_norm__219, - batch_norm__220, - batch_norm__221, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_37, - parameter_554, - parameter_553, - parameter_552, - parameter_551, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_551, parameter_552, parameter_553, parameter_554 - - # pd_op.multiply: (2x96x52x52xf32) <- (1xf32, 2x96x52x52xf32) - multiply_9 = paddle._C_ops.multiply(data_8, batch_norm__216) - del data_8 - - # pd_op.add: (2x96x52x52xf32) <- (2x96x52x52xf32, 2x96x52x52xf32) - add_17 = paddle._C_ops.add(batch_norm__210, multiply_9) - - # pd_op.swish: (2x96x52x52xf32) <- (2x96x52x52xf32) - swish_28 = paddle._C_ops.swish(add_17) - - # pd_op.add: (2x96x52x52xf32) <- (2x96x52x52xf32, 2x96x52x52xf32) - add_18 = paddle._C_ops.add(add_16, swish_28) - - # builtin.combine: ([2x96x52x52xf32, 2x96x52x52xf32]) <- (2x96x52x52xf32, 2x96x52x52xf32) - combine_1 = [swish_15, add_18] - - # pd_op.concat: (2x192x52x52xf32) <- ([2x96x52x52xf32, 2x96x52x52xf32], 1xi32) - concat_1 = paddle._C_ops.concat(combine_1, full_0) - del combine_1 - - # pd_op.mean: (2x192x1x1xf32) <- (2x192x52x52xf32, 2xi64) - mean_1 = paddle._C_ops.mean(concat_1, full_int_array_0, True) - - # pd_op.conv2d: (2x192x1x1xf32) <- (2x192x1x1xf32, 192x192x1x1xf32) - conv2d_38 = paddle._C_ops.conv2d( - mean_1, parameter_550, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_550 - - # pd_op.reshape: (1x192x1x1xf32) <- (192xf32, 4xi64) - reshape_1 = paddle._C_ops.reshape(parameter_549, full_int_array_1) - del parameter_549 - - # pd_op.add: (2x192x1x1xf32) <- (2x192x1x1xf32, 1x192x1x1xf32) - add_19 = paddle._C_ops.add(conv2d_38, reshape_1) - - # pd_op.hardsigmoid: (2x192x1x1xf32) <- (2x192x1x1xf32) - hardsigmoid_1 = paddle._C_ops.hardsigmoid( - add_19, float("0.166667"), float("0.5") - ) - del add_19 - - # pd_op.multiply: (2x192x52x52xf32) <- (2x192x52x52xf32, 2x192x1x1xf32) - multiply_10 = paddle._C_ops.multiply(concat_1, hardsigmoid_1) - - # pd_op.conv2d: (2x256x52x52xf32) <- (2x192x52x52xf32, 256x192x1x1xf32) - conv2d_39 = paddle._C_ops.conv2d( - multiply_10, parameter_548, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_548 - - # pd_op.batch_norm_: (2x256x52x52xf32, 256xf32, 256xf32, 256xf32, 256xf32, -1xui8) <- (2x256x52x52xf32, 256xf32, 256xf32, 256xf32, 256xf32) - ( - batch_norm__222, - batch_norm__223, - batch_norm__224, - batch_norm__225, - batch_norm__226, - batch_norm__227, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_39, - parameter_547, - parameter_546, - parameter_545, - parameter_544, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_544, parameter_545, parameter_546, parameter_547 - - # pd_op.swish: (2x256x52x52xf32) <- (2x256x52x52xf32) - swish_29 = paddle._C_ops.swish(batch_norm__222) - - # pd_op.conv2d: (2x384x26x26xf32) <- (2x256x52x52xf32, 384x256x3x3xf32) - conv2d_40 = paddle._C_ops.conv2d( - swish_29, parameter_543, [2, 2], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_543 - - # pd_op.batch_norm_: (2x384x26x26xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x26x26xf32, 384xf32, 384xf32, 384xf32, 384xf32) - ( - batch_norm__228, - batch_norm__229, - batch_norm__230, - batch_norm__231, - batch_norm__232, - batch_norm__233, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_40, - parameter_542, - parameter_541, - parameter_540, - parameter_539, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_539, parameter_540, parameter_541, parameter_542 - - # pd_op.swish: (2x384x26x26xf32) <- (2x384x26x26xf32) - swish_30 = paddle._C_ops.swish(batch_norm__228) - - # pd_op.conv2d: (2x192x26x26xf32) <- (2x384x26x26xf32, 192x384x1x1xf32) - conv2d_41 = paddle._C_ops.conv2d( - swish_30, parameter_538, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_538 - - # pd_op.batch_norm_: (2x192x26x26xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x26x26xf32, 192xf32, 192xf32, 192xf32, 192xf32) - ( - batch_norm__234, - batch_norm__235, - batch_norm__236, - batch_norm__237, - batch_norm__238, - batch_norm__239, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_41, - parameter_537, - parameter_536, - parameter_535, - parameter_534, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_534, parameter_535, parameter_536, parameter_537 - - # pd_op.swish: (2x192x26x26xf32) <- (2x192x26x26xf32) - swish_31 = paddle._C_ops.swish(batch_norm__234) - - # pd_op.conv2d: (2x192x26x26xf32) <- (2x384x26x26xf32, 192x384x1x1xf32) - conv2d_42 = paddle._C_ops.conv2d( - swish_30, parameter_533, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_533 - - # pd_op.batch_norm_: (2x192x26x26xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x26x26xf32, 192xf32, 192xf32, 192xf32, 192xf32) - ( - batch_norm__240, - batch_norm__241, - batch_norm__242, - batch_norm__243, - batch_norm__244, - batch_norm__245, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_42, - parameter_532, - parameter_531, - parameter_530, - parameter_529, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_529, parameter_530, parameter_531, parameter_532 - - # pd_op.swish: (2x192x26x26xf32) <- (2x192x26x26xf32) - swish_32 = paddle._C_ops.swish(batch_norm__240) - - # pd_op.conv2d: (2x192x26x26xf32) <- (2x192x26x26xf32, 192x192x3x3xf32) - conv2d_43 = paddle._C_ops.conv2d( - swish_32, parameter_528, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_528 - - # pd_op.batch_norm_: (2x192x26x26xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x26x26xf32, 192xf32, 192xf32, 192xf32, 192xf32) - ( - batch_norm__246, - batch_norm__247, - batch_norm__248, - batch_norm__249, - batch_norm__250, - batch_norm__251, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_43, - parameter_527, - parameter_526, - parameter_525, - parameter_524, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_524, parameter_525, parameter_526, parameter_527 - - # pd_op.swish: (2x192x26x26xf32) <- (2x192x26x26xf32) - swish_33 = paddle._C_ops.swish(batch_norm__246) - - # pd_op.conv2d: (2x192x26x26xf32) <- (2x192x26x26xf32, 192x192x3x3xf32) - conv2d_44 = paddle._C_ops.conv2d( - swish_33, parameter_523, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_523 - - # pd_op.batch_norm_: (2x192x26x26xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x26x26xf32, 192xf32, 192xf32, 192xf32, 192xf32) - ( - batch_norm__252, - batch_norm__253, - batch_norm__254, - batch_norm__255, - batch_norm__256, - batch_norm__257, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_44, - parameter_522, - parameter_521, - parameter_520, - parameter_519, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_519, parameter_520, parameter_521, parameter_522 - - # pd_op.conv2d: (2x192x26x26xf32) <- (2x192x26x26xf32, 192x192x1x1xf32) - conv2d_45 = paddle._C_ops.conv2d( - swish_33, parameter_518, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_518 - - # pd_op.batch_norm_: (2x192x26x26xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x26x26xf32, 192xf32, 192xf32, 192xf32, 192xf32) - ( - batch_norm__258, - batch_norm__259, - batch_norm__260, - batch_norm__261, - batch_norm__262, - batch_norm__263, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_45, - parameter_517, - parameter_516, - parameter_515, - parameter_514, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_514, parameter_515, parameter_516, parameter_517 - - # pd_op.multiply: (2x192x26x26xf32) <- (1xf32, 2x192x26x26xf32) - multiply_11 = paddle._C_ops.multiply(data_9, batch_norm__258) - del data_9 - - # pd_op.add: (2x192x26x26xf32) <- (2x192x26x26xf32, 2x192x26x26xf32) - add_20 = paddle._C_ops.add(batch_norm__252, multiply_11) - - # pd_op.swish: (2x192x26x26xf32) <- (2x192x26x26xf32) - swish_34 = paddle._C_ops.swish(add_20) - - # pd_op.add: (2x192x26x26xf32) <- (2x192x26x26xf32, 2x192x26x26xf32) - add_21 = paddle._C_ops.add(swish_32, swish_34) - - # pd_op.conv2d: (2x192x26x26xf32) <- (2x192x26x26xf32, 192x192x3x3xf32) - conv2d_46 = paddle._C_ops.conv2d( - add_21, parameter_513, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_513 - - # pd_op.batch_norm_: (2x192x26x26xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x26x26xf32, 192xf32, 192xf32, 192xf32, 192xf32) - ( - batch_norm__264, - batch_norm__265, - batch_norm__266, - batch_norm__267, - batch_norm__268, - batch_norm__269, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_46, - parameter_512, - parameter_511, - parameter_510, - parameter_509, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_509, parameter_510, parameter_511, parameter_512 - - # pd_op.swish: (2x192x26x26xf32) <- (2x192x26x26xf32) - swish_35 = paddle._C_ops.swish(batch_norm__264) - - # pd_op.conv2d: (2x192x26x26xf32) <- (2x192x26x26xf32, 192x192x3x3xf32) - conv2d_47 = paddle._C_ops.conv2d( - swish_35, parameter_508, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_508 - - # pd_op.batch_norm_: (2x192x26x26xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x26x26xf32, 192xf32, 192xf32, 192xf32, 192xf32) - ( - batch_norm__270, - batch_norm__271, - batch_norm__272, - batch_norm__273, - batch_norm__274, - batch_norm__275, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_47, - parameter_507, - parameter_506, - parameter_505, - parameter_504, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_504, parameter_505, parameter_506, parameter_507 - - # pd_op.conv2d: (2x192x26x26xf32) <- (2x192x26x26xf32, 192x192x1x1xf32) - conv2d_48 = paddle._C_ops.conv2d( - swish_35, parameter_503, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_503 - - # pd_op.batch_norm_: (2x192x26x26xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x26x26xf32, 192xf32, 192xf32, 192xf32, 192xf32) - ( - batch_norm__276, - batch_norm__277, - batch_norm__278, - batch_norm__279, - batch_norm__280, - batch_norm__281, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_48, - parameter_502, - parameter_501, - parameter_500, - parameter_499, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_499, parameter_500, parameter_501, parameter_502 - - # pd_op.multiply: (2x192x26x26xf32) <- (1xf32, 2x192x26x26xf32) - multiply_12 = paddle._C_ops.multiply(data_10, batch_norm__276) - del data_10 - - # pd_op.add: (2x192x26x26xf32) <- (2x192x26x26xf32, 2x192x26x26xf32) - add_22 = paddle._C_ops.add(batch_norm__270, multiply_12) - - # pd_op.swish: (2x192x26x26xf32) <- (2x192x26x26xf32) - swish_36 = paddle._C_ops.swish(add_22) - - # pd_op.add: (2x192x26x26xf32) <- (2x192x26x26xf32, 2x192x26x26xf32) - add_23 = paddle._C_ops.add(add_21, swish_36) - - # pd_op.conv2d: (2x192x26x26xf32) <- (2x192x26x26xf32, 192x192x3x3xf32) - conv2d_49 = paddle._C_ops.conv2d( - add_23, parameter_498, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_498 - - # pd_op.batch_norm_: (2x192x26x26xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x26x26xf32, 192xf32, 192xf32, 192xf32, 192xf32) - ( - batch_norm__282, - batch_norm__283, - batch_norm__284, - batch_norm__285, - batch_norm__286, - batch_norm__287, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_49, - parameter_497, - parameter_496, - parameter_495, - parameter_494, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_494, parameter_495, parameter_496, parameter_497 - - # pd_op.swish: (2x192x26x26xf32) <- (2x192x26x26xf32) - swish_37 = paddle._C_ops.swish(batch_norm__282) - - # pd_op.conv2d: (2x192x26x26xf32) <- (2x192x26x26xf32, 192x192x3x3xf32) - conv2d_50 = paddle._C_ops.conv2d( - swish_37, parameter_493, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_493 - - # pd_op.batch_norm_: (2x192x26x26xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x26x26xf32, 192xf32, 192xf32, 192xf32, 192xf32) - ( - batch_norm__288, - batch_norm__289, - batch_norm__290, - batch_norm__291, - batch_norm__292, - batch_norm__293, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_50, - parameter_492, - parameter_491, - parameter_490, - parameter_489, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_489, parameter_490, parameter_491, parameter_492 - - # pd_op.conv2d: (2x192x26x26xf32) <- (2x192x26x26xf32, 192x192x1x1xf32) - conv2d_51 = paddle._C_ops.conv2d( - swish_37, parameter_488, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_488 - - # pd_op.batch_norm_: (2x192x26x26xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x26x26xf32, 192xf32, 192xf32, 192xf32, 192xf32) - ( - batch_norm__294, - batch_norm__295, - batch_norm__296, - batch_norm__297, - batch_norm__298, - batch_norm__299, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_51, - parameter_487, - parameter_486, - parameter_485, - parameter_484, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_484, parameter_485, parameter_486, parameter_487 - - # pd_op.multiply: (2x192x26x26xf32) <- (1xf32, 2x192x26x26xf32) - multiply_13 = paddle._C_ops.multiply(data_11, batch_norm__294) - del data_11 - - # pd_op.add: (2x192x26x26xf32) <- (2x192x26x26xf32, 2x192x26x26xf32) - add_24 = paddle._C_ops.add(batch_norm__288, multiply_13) - - # pd_op.swish: (2x192x26x26xf32) <- (2x192x26x26xf32) - swish_38 = paddle._C_ops.swish(add_24) - - # pd_op.add: (2x192x26x26xf32) <- (2x192x26x26xf32, 2x192x26x26xf32) - add_25 = paddle._C_ops.add(add_23, swish_38) - - # pd_op.conv2d: (2x192x26x26xf32) <- (2x192x26x26xf32, 192x192x3x3xf32) - conv2d_52 = paddle._C_ops.conv2d( - add_25, parameter_483, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_483 - - # pd_op.batch_norm_: (2x192x26x26xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x26x26xf32, 192xf32, 192xf32, 192xf32, 192xf32) - ( - batch_norm__300, - batch_norm__301, - batch_norm__302, - batch_norm__303, - batch_norm__304, - batch_norm__305, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_52, - parameter_482, - parameter_481, - parameter_480, - parameter_479, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_479, parameter_480, parameter_481, parameter_482 - - # pd_op.swish: (2x192x26x26xf32) <- (2x192x26x26xf32) - swish_39 = paddle._C_ops.swish(batch_norm__300) - - # pd_op.conv2d: (2x192x26x26xf32) <- (2x192x26x26xf32, 192x192x3x3xf32) - conv2d_53 = paddle._C_ops.conv2d( - swish_39, parameter_478, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_478 - - # pd_op.batch_norm_: (2x192x26x26xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x26x26xf32, 192xf32, 192xf32, 192xf32, 192xf32) - ( - batch_norm__306, - batch_norm__307, - batch_norm__308, - batch_norm__309, - batch_norm__310, - batch_norm__311, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_53, - parameter_477, - parameter_476, - parameter_475, - parameter_474, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_474, parameter_475, parameter_476, parameter_477 - - # pd_op.conv2d: (2x192x26x26xf32) <- (2x192x26x26xf32, 192x192x1x1xf32) - conv2d_54 = paddle._C_ops.conv2d( - swish_39, parameter_473, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_473 - - # pd_op.batch_norm_: (2x192x26x26xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x26x26xf32, 192xf32, 192xf32, 192xf32, 192xf32) - ( - batch_norm__312, - batch_norm__313, - batch_norm__314, - batch_norm__315, - batch_norm__316, - batch_norm__317, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_54, - parameter_472, - parameter_471, - parameter_470, - parameter_469, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_469, parameter_470, parameter_471, parameter_472 - - # pd_op.multiply: (2x192x26x26xf32) <- (1xf32, 2x192x26x26xf32) - multiply_14 = paddle._C_ops.multiply(data_12, batch_norm__312) - del data_12 - - # pd_op.add: (2x192x26x26xf32) <- (2x192x26x26xf32, 2x192x26x26xf32) - add_26 = paddle._C_ops.add(batch_norm__306, multiply_14) - - # pd_op.swish: (2x192x26x26xf32) <- (2x192x26x26xf32) - swish_40 = paddle._C_ops.swish(add_26) - - # pd_op.add: (2x192x26x26xf32) <- (2x192x26x26xf32, 2x192x26x26xf32) - add_27 = paddle._C_ops.add(add_25, swish_40) - - # pd_op.conv2d: (2x192x26x26xf32) <- (2x192x26x26xf32, 192x192x3x3xf32) - conv2d_55 = paddle._C_ops.conv2d( - add_27, parameter_468, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_468 - - # pd_op.batch_norm_: (2x192x26x26xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x26x26xf32, 192xf32, 192xf32, 192xf32, 192xf32) - ( - batch_norm__318, - batch_norm__319, - batch_norm__320, - batch_norm__321, - batch_norm__322, - batch_norm__323, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_55, - parameter_467, - parameter_466, - parameter_465, - parameter_464, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_464, parameter_465, parameter_466, parameter_467 - - # pd_op.swish: (2x192x26x26xf32) <- (2x192x26x26xf32) - swish_41 = paddle._C_ops.swish(batch_norm__318) - - # pd_op.conv2d: (2x192x26x26xf32) <- (2x192x26x26xf32, 192x192x3x3xf32) - conv2d_56 = paddle._C_ops.conv2d( - swish_41, parameter_463, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_463 - - # pd_op.batch_norm_: (2x192x26x26xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x26x26xf32, 192xf32, 192xf32, 192xf32, 192xf32) - ( - batch_norm__324, - batch_norm__325, - batch_norm__326, - batch_norm__327, - batch_norm__328, - batch_norm__329, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_56, - parameter_462, - parameter_461, - parameter_460, - parameter_459, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_459, parameter_460, parameter_461, parameter_462 - - # pd_op.conv2d: (2x192x26x26xf32) <- (2x192x26x26xf32, 192x192x1x1xf32) - conv2d_57 = paddle._C_ops.conv2d( - swish_41, parameter_458, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_458 - - # pd_op.batch_norm_: (2x192x26x26xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x26x26xf32, 192xf32, 192xf32, 192xf32, 192xf32) - ( - batch_norm__330, - batch_norm__331, - batch_norm__332, - batch_norm__333, - batch_norm__334, - batch_norm__335, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_57, - parameter_457, - parameter_456, - parameter_455, - parameter_454, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_454, parameter_455, parameter_456, parameter_457 - - # pd_op.multiply: (2x192x26x26xf32) <- (1xf32, 2x192x26x26xf32) - multiply_15 = paddle._C_ops.multiply(data_13, batch_norm__330) - del data_13 - - # pd_op.add: (2x192x26x26xf32) <- (2x192x26x26xf32, 2x192x26x26xf32) - add_28 = paddle._C_ops.add(batch_norm__324, multiply_15) - - # pd_op.swish: (2x192x26x26xf32) <- (2x192x26x26xf32) - swish_42 = paddle._C_ops.swish(add_28) - - # pd_op.add: (2x192x26x26xf32) <- (2x192x26x26xf32, 2x192x26x26xf32) - add_29 = paddle._C_ops.add(add_27, swish_42) - - # pd_op.conv2d: (2x192x26x26xf32) <- (2x192x26x26xf32, 192x192x3x3xf32) - conv2d_58 = paddle._C_ops.conv2d( - add_29, parameter_453, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_453 - - # pd_op.batch_norm_: (2x192x26x26xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x26x26xf32, 192xf32, 192xf32, 192xf32, 192xf32) - ( - batch_norm__336, - batch_norm__337, - batch_norm__338, - batch_norm__339, - batch_norm__340, - batch_norm__341, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_58, - parameter_452, - parameter_451, - parameter_450, - parameter_449, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_449, parameter_450, parameter_451, parameter_452 - - # pd_op.swish: (2x192x26x26xf32) <- (2x192x26x26xf32) - swish_43 = paddle._C_ops.swish(batch_norm__336) - - # pd_op.conv2d: (2x192x26x26xf32) <- (2x192x26x26xf32, 192x192x3x3xf32) - conv2d_59 = paddle._C_ops.conv2d( - swish_43, parameter_448, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_448 - - # pd_op.batch_norm_: (2x192x26x26xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x26x26xf32, 192xf32, 192xf32, 192xf32, 192xf32) - ( - batch_norm__342, - batch_norm__343, - batch_norm__344, - batch_norm__345, - batch_norm__346, - batch_norm__347, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_59, - parameter_447, - parameter_446, - parameter_445, - parameter_444, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_444, parameter_445, parameter_446, parameter_447 - - # pd_op.conv2d: (2x192x26x26xf32) <- (2x192x26x26xf32, 192x192x1x1xf32) - conv2d_60 = paddle._C_ops.conv2d( - swish_43, parameter_443, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_443 - - # pd_op.batch_norm_: (2x192x26x26xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x26x26xf32, 192xf32, 192xf32, 192xf32, 192xf32) - ( - batch_norm__348, - batch_norm__349, - batch_norm__350, - batch_norm__351, - batch_norm__352, - batch_norm__353, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_60, - parameter_442, - parameter_441, - parameter_440, - parameter_439, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_439, parameter_440, parameter_441, parameter_442 - - # pd_op.multiply: (2x192x26x26xf32) <- (1xf32, 2x192x26x26xf32) - multiply_16 = paddle._C_ops.multiply(data_14, batch_norm__348) - del data_14 - - # pd_op.add: (2x192x26x26xf32) <- (2x192x26x26xf32, 2x192x26x26xf32) - add_30 = paddle._C_ops.add(batch_norm__342, multiply_16) - - # pd_op.swish: (2x192x26x26xf32) <- (2x192x26x26xf32) - swish_44 = paddle._C_ops.swish(add_30) - - # pd_op.add: (2x192x26x26xf32) <- (2x192x26x26xf32, 2x192x26x26xf32) - add_31 = paddle._C_ops.add(add_29, swish_44) - - # builtin.combine: ([2x192x26x26xf32, 2x192x26x26xf32]) <- (2x192x26x26xf32, 2x192x26x26xf32) - combine_2 = [swish_31, add_31] - - # pd_op.concat: (2x384x26x26xf32) <- ([2x192x26x26xf32, 2x192x26x26xf32], 1xi32) - concat_2 = paddle._C_ops.concat(combine_2, full_0) - del combine_2 - - # pd_op.mean: (2x384x1x1xf32) <- (2x384x26x26xf32, 2xi64) - mean_2 = paddle._C_ops.mean(concat_2, full_int_array_0, True) - - # pd_op.conv2d: (2x384x1x1xf32) <- (2x384x1x1xf32, 384x384x1x1xf32) - conv2d_61 = paddle._C_ops.conv2d( - mean_2, parameter_438, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_438 - - # pd_op.reshape: (1x384x1x1xf32) <- (384xf32, 4xi64) - reshape_2 = paddle._C_ops.reshape(parameter_437, full_int_array_1) - del parameter_437 - - # pd_op.add: (2x384x1x1xf32) <- (2x384x1x1xf32, 1x384x1x1xf32) - add_32 = paddle._C_ops.add(conv2d_61, reshape_2) - - # pd_op.hardsigmoid: (2x384x1x1xf32) <- (2x384x1x1xf32) - hardsigmoid_2 = paddle._C_ops.hardsigmoid( - add_32, float("0.166667"), float("0.5") - ) - del add_32 - - # pd_op.multiply: (2x384x26x26xf32) <- (2x384x26x26xf32, 2x384x1x1xf32) - multiply_17 = paddle._C_ops.multiply(concat_2, hardsigmoid_2) - - # pd_op.conv2d: (2x512x26x26xf32) <- (2x384x26x26xf32, 512x384x1x1xf32) - conv2d_62 = paddle._C_ops.conv2d( - multiply_17, parameter_436, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_436 - - # pd_op.batch_norm_: (2x512x26x26xf32, 512xf32, 512xf32, 512xf32, 512xf32, -1xui8) <- (2x512x26x26xf32, 512xf32, 512xf32, 512xf32, 512xf32) - ( - batch_norm__354, - batch_norm__355, - batch_norm__356, - batch_norm__357, - batch_norm__358, - batch_norm__359, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_62, - parameter_435, - parameter_434, - parameter_433, - parameter_432, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_432, parameter_433, parameter_434, parameter_435 - - # pd_op.swish: (2x512x26x26xf32) <- (2x512x26x26xf32) - swish_45 = paddle._C_ops.swish(batch_norm__354) - - # pd_op.conv2d: (2x768x13x13xf32) <- (2x512x26x26xf32, 768x512x3x3xf32) - conv2d_63 = paddle._C_ops.conv2d( - swish_45, parameter_431, [2, 2], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_431 - - # pd_op.batch_norm_: (2x768x13x13xf32, 768xf32, 768xf32, 768xf32, 768xf32, -1xui8) <- (2x768x13x13xf32, 768xf32, 768xf32, 768xf32, 768xf32) - ( - batch_norm__360, - batch_norm__361, - batch_norm__362, - batch_norm__363, - batch_norm__364, - batch_norm__365, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_63, - parameter_430, - parameter_429, - parameter_428, - parameter_427, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_427, parameter_428, parameter_429, parameter_430 - - # pd_op.swish: (2x768x13x13xf32) <- (2x768x13x13xf32) - swish_46 = paddle._C_ops.swish(batch_norm__360) - - # pd_op.conv2d: (2x384x13x13xf32) <- (2x768x13x13xf32, 384x768x1x1xf32) - conv2d_64 = paddle._C_ops.conv2d( - swish_46, parameter_426, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_426 - - # pd_op.batch_norm_: (2x384x13x13xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x13x13xf32, 384xf32, 384xf32, 384xf32, 384xf32) - ( - batch_norm__366, - batch_norm__367, - batch_norm__368, - batch_norm__369, - batch_norm__370, - batch_norm__371, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_64, - parameter_425, - parameter_424, - parameter_423, - parameter_422, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_422, parameter_423, parameter_424, parameter_425 - - # pd_op.swish: (2x384x13x13xf32) <- (2x384x13x13xf32) - swish_47 = paddle._C_ops.swish(batch_norm__366) - - # pd_op.conv2d: (2x384x13x13xf32) <- (2x768x13x13xf32, 384x768x1x1xf32) - conv2d_65 = paddle._C_ops.conv2d( - swish_46, parameter_421, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_421 - - # pd_op.batch_norm_: (2x384x13x13xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x13x13xf32, 384xf32, 384xf32, 384xf32, 384xf32) - ( - batch_norm__372, - batch_norm__373, - batch_norm__374, - batch_norm__375, - batch_norm__376, - batch_norm__377, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_65, - parameter_420, - parameter_419, - parameter_418, - parameter_417, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_417, parameter_418, parameter_419, parameter_420 - - # pd_op.swish: (2x384x13x13xf32) <- (2x384x13x13xf32) - swish_48 = paddle._C_ops.swish(batch_norm__372) - - # pd_op.conv2d: (2x384x13x13xf32) <- (2x384x13x13xf32, 384x384x3x3xf32) - conv2d_66 = paddle._C_ops.conv2d( - swish_48, parameter_416, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_416 - - # pd_op.batch_norm_: (2x384x13x13xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x13x13xf32, 384xf32, 384xf32, 384xf32, 384xf32) - ( - batch_norm__378, - batch_norm__379, - batch_norm__380, - batch_norm__381, - batch_norm__382, - batch_norm__383, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_66, - parameter_415, - parameter_414, - parameter_413, - parameter_412, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_412, parameter_413, parameter_414, parameter_415 - - # pd_op.swish: (2x384x13x13xf32) <- (2x384x13x13xf32) - swish_49 = paddle._C_ops.swish(batch_norm__378) - - # pd_op.conv2d: (2x384x13x13xf32) <- (2x384x13x13xf32, 384x384x3x3xf32) - conv2d_67 = paddle._C_ops.conv2d( - swish_49, parameter_411, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_411 - - # pd_op.batch_norm_: (2x384x13x13xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x13x13xf32, 384xf32, 384xf32, 384xf32, 384xf32) - ( - batch_norm__384, - batch_norm__385, - batch_norm__386, - batch_norm__387, - batch_norm__388, - batch_norm__389, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_67, - parameter_410, - parameter_409, - parameter_408, - parameter_407, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_407, parameter_408, parameter_409, parameter_410 - - # pd_op.conv2d: (2x384x13x13xf32) <- (2x384x13x13xf32, 384x384x1x1xf32) - conv2d_68 = paddle._C_ops.conv2d( - swish_49, parameter_406, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_406 - - # pd_op.batch_norm_: (2x384x13x13xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x13x13xf32, 384xf32, 384xf32, 384xf32, 384xf32) - ( - batch_norm__390, - batch_norm__391, - batch_norm__392, - batch_norm__393, - batch_norm__394, - batch_norm__395, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_68, - parameter_405, - parameter_404, - parameter_403, - parameter_402, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_402, parameter_403, parameter_404, parameter_405 - - # pd_op.multiply: (2x384x13x13xf32) <- (1xf32, 2x384x13x13xf32) - multiply_18 = paddle._C_ops.multiply(data_15, batch_norm__390) - del data_15 - - # pd_op.add: (2x384x13x13xf32) <- (2x384x13x13xf32, 2x384x13x13xf32) - add_33 = paddle._C_ops.add(batch_norm__384, multiply_18) - - # pd_op.swish: (2x384x13x13xf32) <- (2x384x13x13xf32) - swish_50 = paddle._C_ops.swish(add_33) - - # pd_op.add: (2x384x13x13xf32) <- (2x384x13x13xf32, 2x384x13x13xf32) - add_34 = paddle._C_ops.add(swish_48, swish_50) - - # pd_op.conv2d: (2x384x13x13xf32) <- (2x384x13x13xf32, 384x384x3x3xf32) - conv2d_69 = paddle._C_ops.conv2d( - add_34, parameter_401, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_401 - - # pd_op.batch_norm_: (2x384x13x13xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x13x13xf32, 384xf32, 384xf32, 384xf32, 384xf32) - ( - batch_norm__396, - batch_norm__397, - batch_norm__398, - batch_norm__399, - batch_norm__400, - batch_norm__401, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_69, - parameter_400, - parameter_399, - parameter_398, - parameter_397, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_397, parameter_398, parameter_399, parameter_400 - - # pd_op.swish: (2x384x13x13xf32) <- (2x384x13x13xf32) - swish_51 = paddle._C_ops.swish(batch_norm__396) - - # pd_op.conv2d: (2x384x13x13xf32) <- (2x384x13x13xf32, 384x384x3x3xf32) - conv2d_70 = paddle._C_ops.conv2d( - swish_51, parameter_396, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_396 - - # pd_op.batch_norm_: (2x384x13x13xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x13x13xf32, 384xf32, 384xf32, 384xf32, 384xf32) - ( - batch_norm__402, - batch_norm__403, - batch_norm__404, - batch_norm__405, - batch_norm__406, - batch_norm__407, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_70, - parameter_395, - parameter_394, - parameter_393, - parameter_392, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_392, parameter_393, parameter_394, parameter_395 - - # pd_op.conv2d: (2x384x13x13xf32) <- (2x384x13x13xf32, 384x384x1x1xf32) - conv2d_71 = paddle._C_ops.conv2d( - swish_51, parameter_391, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_391 - - # pd_op.batch_norm_: (2x384x13x13xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x13x13xf32, 384xf32, 384xf32, 384xf32, 384xf32) - ( - batch_norm__408, - batch_norm__409, - batch_norm__410, - batch_norm__411, - batch_norm__412, - batch_norm__413, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_71, - parameter_390, - parameter_389, - parameter_388, - parameter_387, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_387, parameter_388, parameter_389, parameter_390 - - # pd_op.multiply: (2x384x13x13xf32) <- (1xf32, 2x384x13x13xf32) - multiply_19 = paddle._C_ops.multiply(data_16, batch_norm__408) - del data_16 - - # pd_op.add: (2x384x13x13xf32) <- (2x384x13x13xf32, 2x384x13x13xf32) - add_35 = paddle._C_ops.add(batch_norm__402, multiply_19) - - # pd_op.swish: (2x384x13x13xf32) <- (2x384x13x13xf32) - swish_52 = paddle._C_ops.swish(add_35) - - # pd_op.add: (2x384x13x13xf32) <- (2x384x13x13xf32, 2x384x13x13xf32) - add_36 = paddle._C_ops.add(add_34, swish_52) - - # pd_op.conv2d: (2x384x13x13xf32) <- (2x384x13x13xf32, 384x384x3x3xf32) - conv2d_72 = paddle._C_ops.conv2d( - add_36, parameter_386, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_386 - - # pd_op.batch_norm_: (2x384x13x13xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x13x13xf32, 384xf32, 384xf32, 384xf32, 384xf32) - ( - batch_norm__414, - batch_norm__415, - batch_norm__416, - batch_norm__417, - batch_norm__418, - batch_norm__419, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_72, - parameter_385, - parameter_384, - parameter_383, - parameter_382, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_382, parameter_383, parameter_384, parameter_385 - - # pd_op.swish: (2x384x13x13xf32) <- (2x384x13x13xf32) - swish_53 = paddle._C_ops.swish(batch_norm__414) - - # pd_op.conv2d: (2x384x13x13xf32) <- (2x384x13x13xf32, 384x384x3x3xf32) - conv2d_73 = paddle._C_ops.conv2d( - swish_53, parameter_381, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_381 - - # pd_op.batch_norm_: (2x384x13x13xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x13x13xf32, 384xf32, 384xf32, 384xf32, 384xf32) - ( - batch_norm__420, - batch_norm__421, - batch_norm__422, - batch_norm__423, - batch_norm__424, - batch_norm__425, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_73, - parameter_380, - parameter_379, - parameter_378, - parameter_377, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_377, parameter_378, parameter_379, parameter_380 - - # pd_op.conv2d: (2x384x13x13xf32) <- (2x384x13x13xf32, 384x384x1x1xf32) - conv2d_74 = paddle._C_ops.conv2d( - swish_53, parameter_376, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_376 - - # pd_op.batch_norm_: (2x384x13x13xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x13x13xf32, 384xf32, 384xf32, 384xf32, 384xf32) - ( - batch_norm__426, - batch_norm__427, - batch_norm__428, - batch_norm__429, - batch_norm__430, - batch_norm__431, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_74, - parameter_375, - parameter_374, - parameter_373, - parameter_372, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_372, parameter_373, parameter_374, parameter_375 - - # pd_op.multiply: (2x384x13x13xf32) <- (1xf32, 2x384x13x13xf32) - multiply_20 = paddle._C_ops.multiply(data_17, batch_norm__426) - del data_17 - - # pd_op.add: (2x384x13x13xf32) <- (2x384x13x13xf32, 2x384x13x13xf32) - add_37 = paddle._C_ops.add(batch_norm__420, multiply_20) - - # pd_op.swish: (2x384x13x13xf32) <- (2x384x13x13xf32) - swish_54 = paddle._C_ops.swish(add_37) - - # pd_op.add: (2x384x13x13xf32) <- (2x384x13x13xf32, 2x384x13x13xf32) - add_38 = paddle._C_ops.add(add_36, swish_54) - - # builtin.combine: ([2x384x13x13xf32, 2x384x13x13xf32]) <- (2x384x13x13xf32, 2x384x13x13xf32) - combine_3 = [swish_47, add_38] - - # pd_op.concat: (2x768x13x13xf32) <- ([2x384x13x13xf32, 2x384x13x13xf32], 1xi32) - concat_3 = paddle._C_ops.concat(combine_3, full_0) - del combine_3 - - # pd_op.mean: (2x768x1x1xf32) <- (2x768x13x13xf32, 2xi64) - mean_3 = paddle._C_ops.mean(concat_3, full_int_array_0, True) - - # pd_op.conv2d: (2x768x1x1xf32) <- (2x768x1x1xf32, 768x768x1x1xf32) - conv2d_75 = paddle._C_ops.conv2d( - mean_3, parameter_371, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_371 - - # pd_op.reshape: (1x768x1x1xf32) <- (768xf32, 4xi64) - reshape_3 = paddle._C_ops.reshape(parameter_370, full_int_array_1) - del full_int_array_1, parameter_370 - - # pd_op.add: (2x768x1x1xf32) <- (2x768x1x1xf32, 1x768x1x1xf32) - add_39 = paddle._C_ops.add(conv2d_75, reshape_3) - - # pd_op.hardsigmoid: (2x768x1x1xf32) <- (2x768x1x1xf32) - hardsigmoid_3 = paddle._C_ops.hardsigmoid( - add_39, float("0.166667"), float("0.5") - ) - del add_39 - - # pd_op.multiply: (2x768x13x13xf32) <- (2x768x13x13xf32, 2x768x1x1xf32) - multiply_21 = paddle._C_ops.multiply(concat_3, hardsigmoid_3) - - # pd_op.conv2d: (2x1024x13x13xf32) <- (2x768x13x13xf32, 1024x768x1x1xf32) - conv2d_76 = paddle._C_ops.conv2d( - multiply_21, parameter_369, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_369 - - # pd_op.batch_norm_: (2x1024x13x13xf32, 1024xf32, 1024xf32, 1024xf32, 1024xf32, -1xui8) <- (2x1024x13x13xf32, 1024xf32, 1024xf32, 1024xf32, 1024xf32) - ( - batch_norm__432, - batch_norm__433, - batch_norm__434, - batch_norm__435, - batch_norm__436, - batch_norm__437, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_76, - parameter_368, - parameter_367, - parameter_366, - parameter_365, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_365, parameter_366, parameter_367, parameter_368 - - # pd_op.swish: (2x1024x13x13xf32) <- (2x1024x13x13xf32) - swish_55 = paddle._C_ops.swish(batch_norm__432) - - # pd_op.flatten: (2x1024x169xf32) <- (2x1024x13x13xf32) - flatten_0 = paddle._C_ops.flatten(swish_55, 2, 3) - - # pd_op.transpose: (2x169x1024xf32) <- (2x1024x169xf32) - transpose_0 = paddle._C_ops.transpose(flatten_0, [0, 2, 1]) - del flatten_0 - - # pd_op.full: (1xf64) <- () - full_1 = paddle._C_ops.full( - [1], float("0"), paddle.float64, paddle.core.CPUPlace() - ) - - # pd_op.full: (1xf64) <- () - full_2 = paddle._C_ops.full( - [1], float("13"), paddle.float64, paddle.core.CPUPlace() - ) - - # pd_op.full: (1xf64) <- () - full_3 = paddle._C_ops.full( - [1], float("1"), paddle.float64, paddle.core.CPUPlace() - ) - - # pd_op.arange: (13xf32) <- (1xf64, 1xf64, 1xf64) - arange_0 = paddle.arange(full_1, full_2, full_3, dtype="float32") - del full_2 - - # builtin.combine: ([13xf32, 13xf32]) <- (13xf32, 13xf32) - combine_4 = [arange_0, arange_0] - del arange_0 - - # pd_op.meshgrid: ([13x13xf32, 13x13xf32]) <- ([13xf32, 13xf32]) - meshgrid_0 = paddle._C_ops.meshgrid(combine_4) - del combine_4 - - # builtin.split: (13x13xf32, 13x13xf32) <- ([13x13xf32, 13x13xf32]) - ( - split_0, - split_1, - ) = meshgrid_0 - del meshgrid_0 - - # pd_op.full: (1xf64) <- () - full_4 = paddle._C_ops.full( - [1], float("256"), paddle.float64, paddle.core.CPUPlace() - ) - - # pd_op.arange: (256xf32) <- (1xf64, 1xf64, 1xf64) - arange_1 = paddle.arange(full_1, full_4, full_3, dtype="float32") - del full_1, full_3, full_4 - - # pd_op.full: (1xf32) <- () - full_5 = paddle._C_ops.full( - [1], float("0.00390625"), paddle.float32, paddle.core.CPUPlace() - ) - - # pd_op.scale: (256xf32) <- (256xf32, 1xf32) - scale_0 = paddle._C_ops.scale(arange_1, full_5, float("0"), True) - del arange_1, full_5 - - # pd_op.full: (256xf32) <- () - full_6 = paddle._C_ops.full( - [256], - float("10000"), - paddle.float32, - paddle.framework._current_expected_place(), - ) - - # pd_op.elementwise_pow: (256xf32) <- (256xf32, 256xf32) - elementwise_pow_0 = paddle._C_ops.elementwise_pow(full_6, scale_0) - del full_6, scale_0 - - # pd_op.full: (256xf32) <- () - full_7 = paddle._C_ops.full( - [256], - float("1"), - paddle.float32, - paddle.framework._current_expected_place(), - ) - - # pd_op.divide: (256xf32) <- (256xf32, 256xf32) - divide_0 = paddle._C_ops.divide(full_7, elementwise_pow_0) - del elementwise_pow_0, full_7 - - # pd_op.flatten: (169xf32) <- (13x13xf32) - flatten_1 = paddle._C_ops.flatten(split_0, 0, 1) - del split_0 - - # pd_op.full_int_array: (1xi64) <- () - full_int_array_2 = [1] - - # pd_op.unsqueeze: (169x1xf32) <- (169xf32, 1xi64) - unsqueeze_0 = paddle._C_ops.unsqueeze(flatten_1, full_int_array_2) - del flatten_1 - - # pd_op.full_int_array: (1xi64) <- () - full_int_array_3 = [0] - - # pd_op.assign: (1xi64) <- (1xi64) - assign_16 = full_int_array_3 - - # pd_op.assign: (1xi64) <- (1xi64) - assign_17 = full_int_array_3 - - # pd_op.assign: (1xi64) <- (1xi64) - assign_18 = full_int_array_3 - - # pd_op.assign: (1xi64) <- (1xi64) - assign_19 = full_int_array_3 - - # pd_op.assign: (1xi64) <- (1xi64) - assign_20 = full_int_array_3 - - # pd_op.assign: (1xi64) <- (1xi64) - assign_21 = full_int_array_3 - - # pd_op.assign: (1xi64) <- (1xi64) - assign_22 = full_int_array_3 - - # pd_op.assign: (1xi64) <- (1xi64) - assign_23 = full_int_array_3 - - # pd_op.unsqueeze: (1x256xf32) <- (256xf32, 1xi64) - unsqueeze_1 = paddle._C_ops.unsqueeze(divide_0, full_int_array_3) - del divide_0 - - # pd_op.matmul: (169x256xf32) <- (169x1xf32, 1x256xf32) - matmul_0 = paddle._C_ops.matmul(unsqueeze_0, unsqueeze_1, False, False) - del unsqueeze_0 - - # pd_op.flatten: (169xf32) <- (13x13xf32) - flatten_2 = paddle._C_ops.flatten(split_1, 0, 1) - del split_1 - - # pd_op.unsqueeze: (169x1xf32) <- (169xf32, 1xi64) - unsqueeze_2 = paddle._C_ops.unsqueeze(flatten_2, full_int_array_2) - del flatten_2, full_int_array_2 - - # pd_op.matmul: (169x256xf32) <- (169x1xf32, 1x256xf32) - matmul_1 = paddle._C_ops.matmul(unsqueeze_2, unsqueeze_1, False, False) - del unsqueeze_1, unsqueeze_2 - - # pd_op.sin: (169x256xf32) <- (169x256xf32) - sin_0 = paddle._C_ops.sin(matmul_0) - - # pd_op.cos: (169x256xf32) <- (169x256xf32) - cos_0 = paddle._C_ops.cos(matmul_0) - del matmul_0 - - # pd_op.sin: (169x256xf32) <- (169x256xf32) - sin_1 = paddle._C_ops.sin(matmul_1) - - # pd_op.cos: (169x256xf32) <- (169x256xf32) - cos_1 = paddle._C_ops.cos(matmul_1) - del matmul_1 - - # builtin.combine: ([169x256xf32, 169x256xf32, 169x256xf32, 169x256xf32]) <- (169x256xf32, 169x256xf32, 169x256xf32, 169x256xf32) - combine_5 = [sin_0, cos_0, sin_1, cos_1] - del cos_0, cos_1, sin_0, sin_1 - - # pd_op.concat: (169x1024xf32) <- ([169x256xf32, 169x256xf32, 169x256xf32, 169x256xf32], 1xi32) - concat_4 = paddle._C_ops.concat(combine_5, full_0) - del combine_5 - - # pd_op.unsqueeze: (1x169x1024xf32) <- (169x1024xf32, 1xi64) - unsqueeze_3 = paddle._C_ops.unsqueeze(concat_4, full_int_array_3) - del concat_4 - - # pd_op.add: (2x169x1024xf32) <- (2x169x1024xf32, 1x169x1024xf32) - add_40 = paddle._C_ops.add(transpose_0, unsqueeze_3) - - # pd_op.full_int_array: (1xi64) <- () - full_int_array_4 = [1024] - - # pd_op.assign: (1xi64) <- (1xi64) - assign_24 = full_int_array_4 - - # pd_op.assign: (1xi64) <- (1xi64) - assign_25 = full_int_array_4 - - # pd_op.assign: (1xi64) <- (1xi64) - assign_26 = full_int_array_4 - - # pd_op.assign: (1xi64) <- (1xi64) - assign_27 = full_int_array_4 - - # pd_op.assign: (1xi64) <- (1xi64) - assign_28 = full_int_array_4 - - # pd_op.assign: (1xi64) <- (1xi64) - assign_29 = full_int_array_4 - - # pd_op.assign: (1xi64) <- (1xi64) - assign_30 = full_int_array_4 - - # pd_op.assign: (1xi64) <- (1xi64) - assign_31 = full_int_array_4 - - # pd_op.assign: (1xi64) <- (1xi64) - assign_32 = full_int_array_4 - - # pd_op.assign: (1xi64) <- (1xi64) - assign_33 = full_int_array_4 - - # pd_op.assign: (1xi64) <- (1xi64) - assign_34 = full_int_array_4 - - # pd_op.assign: (1xi64) <- (1xi64) - assign_35 = full_int_array_4 - - # pd_op.assign: (1xi64) <- (1xi64) - assign_36 = full_int_array_4 - - # pd_op.assign: (1xi64) <- (1xi64) - assign_37 = full_int_array_4 - - # pd_op.assign: (1xi64) <- (1xi64) - assign_38 = full_int_array_4 - - # pd_op.slice: (1024x1024xf32) <- (1024x3072xf32, 1xi64, 1xi64) - slice_0 = paddle._C_ops.slice( - data_18, [1], full_int_array_3, full_int_array_4, [1], [] - ) - - # pd_op.slice: (1024xf32) <- (3072xf32, 1xi64, 1xi64) - slice_1 = paddle._C_ops.slice( - data_19, [0], full_int_array_3, full_int_array_4, [1], [] - ) - - # pd_op.matmul: (2x169x1024xf32) <- (2x169x1024xf32, 1024x1024xf32) - matmul_2 = paddle._C_ops.matmul(add_40, slice_0, False, False) - - # pd_op.add: (2x169x1024xf32) <- (2x169x1024xf32, 1024xf32) - add_41 = paddle._C_ops.add(matmul_2, slice_1) - - # pd_op.full_int_array: (4xi64) <- () - full_int_array_5 = [0, 0, 4, 256] - - # pd_op.reshape: (2x169x4x256xf32) <- (2x169x1024xf32, 4xi64) - reshape_4 = paddle._C_ops.reshape(add_41, full_int_array_5) - - # pd_op.transpose: (2x4x169x256xf32) <- (2x169x4x256xf32) - transpose_1 = paddle._C_ops.transpose(reshape_4, [0, 2, 1, 3]) - del reshape_4 - - # pd_op.full_int_array: (1xi64) <- () - full_int_array_6 = [2048] - - # pd_op.assign: (1xi64) <- (1xi64) - assign_39 = full_int_array_6 - - # pd_op.assign: (1xi64) <- (1xi64) - assign_40 = full_int_array_6 - - # pd_op.assign: (1xi64) <- (1xi64) - assign_41 = full_int_array_6 - - # pd_op.assign: (1xi64) <- (1xi64) - assign_42 = full_int_array_6 - - # pd_op.assign: (1xi64) <- (1xi64) - assign_43 = full_int_array_6 - - # pd_op.assign: (1xi64) <- (1xi64) - assign_44 = full_int_array_6 - - # pd_op.assign: (1xi64) <- (1xi64) - assign_45 = full_int_array_6 - - # pd_op.assign: (1xi64) <- (1xi64) - assign_46 = full_int_array_6 - - # pd_op.assign: (1xi64) <- (1xi64) - assign_47 = full_int_array_6 - - # pd_op.assign: (1xi64) <- (1xi64) - assign_48 = full_int_array_6 - - # pd_op.assign: (1xi64) <- (1xi64) - assign_49 = full_int_array_6 - - # pd_op.assign: (1xi64) <- (1xi64) - assign_50 = full_int_array_6 - - # pd_op.assign: (1xi64) <- (1xi64) - assign_51 = full_int_array_6 - - # pd_op.assign: (1xi64) <- (1xi64) - assign_52 = full_int_array_6 - - # pd_op.assign: (1xi64) <- (1xi64) - assign_53 = full_int_array_6 - - # pd_op.slice: (1024x1024xf32) <- (1024x3072xf32, 1xi64, 1xi64) - slice_2 = paddle._C_ops.slice( - data_18, [1], full_int_array_4, full_int_array_6, [1], [] - ) - - # pd_op.slice: (1024xf32) <- (3072xf32, 1xi64, 1xi64) - slice_3 = paddle._C_ops.slice( - data_19, [0], full_int_array_4, full_int_array_6, [1], [] - ) - - # pd_op.matmul: (2x169x1024xf32) <- (2x169x1024xf32, 1024x1024xf32) - matmul_3 = paddle._C_ops.matmul(add_40, slice_2, False, False) - - # pd_op.add: (2x169x1024xf32) <- (2x169x1024xf32, 1024xf32) - add_42 = paddle._C_ops.add(matmul_3, slice_3) - - # pd_op.reshape: (2x169x4x256xf32) <- (2x169x1024xf32, 4xi64) - reshape_5 = paddle._C_ops.reshape(add_42, full_int_array_5) - - # pd_op.transpose: (2x4x169x256xf32) <- (2x169x4x256xf32) - transpose_2 = paddle._C_ops.transpose(reshape_5, [0, 2, 1, 3]) - del reshape_5 - - # pd_op.full_int_array: (1xi64) <- () - full_int_array_7 = [2147483647] - - # pd_op.assign: (1xi64) <- (1xi64) - assign_54 = full_int_array_7 - - # pd_op.assign: (1xi64) <- (1xi64) - assign_55 = full_int_array_7 - - # pd_op.assign: (1xi64) <- (1xi64) - assign_56 = full_int_array_7 - - # pd_op.assign: (1xi64) <- (1xi64) - assign_57 = full_int_array_7 - - # pd_op.assign: (1xi64) <- (1xi64) - assign_58 = full_int_array_7 - - # pd_op.assign: (1xi64) <- (1xi64) - assign_59 = full_int_array_7 - - # pd_op.assign: (1xi64) <- (1xi64) - assign_60 = full_int_array_7 - - # pd_op.slice: (1024x1024xf32) <- (1024x3072xf32, 1xi64, 1xi64) - slice_4 = paddle._C_ops.slice( - data_18, [1], full_int_array_6, full_int_array_7, [1], [] - ) - del data_18 - - # pd_op.slice: (1024xf32) <- (3072xf32, 1xi64, 1xi64) - slice_5 = paddle._C_ops.slice( - data_19, [0], full_int_array_6, full_int_array_7, [1], [] - ) - del data_19 - - # pd_op.matmul: (2x169x1024xf32) <- (2x169x1024xf32, 1024x1024xf32) - matmul_4 = paddle._C_ops.matmul(transpose_0, slice_4, False, False) - - # pd_op.add: (2x169x1024xf32) <- (2x169x1024xf32, 1024xf32) - add_43 = paddle._C_ops.add(matmul_4, slice_5) - - # pd_op.reshape: (2x169x4x256xf32) <- (2x169x1024xf32, 4xi64) - reshape_6 = paddle._C_ops.reshape(add_43, full_int_array_5) - - # pd_op.transpose: (2x4x169x256xf32) <- (2x169x4x256xf32) - transpose_3 = paddle._C_ops.transpose(reshape_6, [0, 2, 1, 3]) - del reshape_6 - - # pd_op.matmul: (2x4x169x169xf32) <- (2x4x169x256xf32, 2x4x169x256xf32) - matmul_5 = paddle._C_ops.matmul(transpose_1, transpose_2, False, True) - - # pd_op.full: (1xf32) <- () - full_8 = paddle._C_ops.full( - [1], float("0.0625"), paddle.float32, paddle.core.CPUPlace() - ) - - # pd_op.assign: (1xf32) <- (1xf32) - assign_61 = full_8 - - # pd_op.assign: (1xf32) <- (1xf32) - assign_62 = full_8 - - # pd_op.assign: (1xf32) <- (1xf32) - assign_63 = full_8 - - # pd_op.scale: (2x4x169x169xf32) <- (2x4x169x169xf32, 1xf32) - scale_1 = paddle._C_ops.scale(matmul_5, full_8, float("0"), True) - del matmul_5 - - # pd_op.softmax: (2x4x169x169xf32) <- (2x4x169x169xf32) - softmax_0 = paddle._C_ops.softmax(scale_1, -1) - del scale_1 - - # pd_op.full: (1xf32) <- () - full_9 = paddle._C_ops.full( - [1], float("0.1"), paddle.float32, paddle.core.CPUPlace() - ) - - # pd_op.assign: (1xf32) <- (1xf32) - assign_64 = full_9 - - # pd_op.assign: (1xf32) <- (1xf32) - assign_65 = full_9 - - # pd_op.assign: (1xf32) <- (1xf32) - assign_66 = full_9 - - # pd_op.assign: (1xf32) <- (1xf32) - assign_67 = full_9 - - # pd_op.assign: (1xf32) <- (1xf32) - assign_68 = full_9 - - # pd_op.assign: (1xf32) <- (1xf32) - assign_69 = full_9 - - # pd_op.assign: (1xf32) <- (1xf32) - assign_70 = full_9 - - # pd_op.assign: (1xf32) <- (1xf32) - assign_71 = full_9 - - # pd_op.assign: (1xf32) <- (1xf32) - assign_72 = full_9 - - # pd_op.assign: (1xf32) <- (1xf32) - assign_73 = full_9 - - # pd_op.assign: (1xf32) <- (1xf32) - assign_74 = full_9 - - # pd_op.assign: (1xf32) <- (1xf32) - assign_75 = full_9 - - # pd_op.assign: (1xf32) <- (1xf32) - assign_76 = full_9 - - # pd_op.assign: (1xf32) <- (1xf32) - assign_77 = full_9 - - # pd_op.assign: (1xf32) <- (1xf32) - assign_78 = full_9 - - # pd_op.dropout: (2x4x169x169xf32, 2x4x169x169xui8) <- (2x4x169x169xf32, None, 1xf32) - dropout_0, dropout_1 = (lambda x, f: f(x))( - paddle._C_ops.dropout( - softmax_0, None, full_9, False, "upscale_in_train", 0, False - ), - lambda out: out if isinstance(out, (list, tuple)) else (out, None), - ) - - # pd_op.matmul: (2x4x169x256xf32) <- (2x4x169x169xf32, 2x4x169x256xf32) - matmul_6 = paddle._C_ops.matmul(dropout_0, transpose_3, False, False) - - # pd_op.transpose: (2x169x4x256xf32) <- (2x4x169x256xf32) - transpose_4 = paddle._C_ops.transpose(matmul_6, [0, 2, 1, 3]) - del matmul_6 - - # pd_op.full_int_array: (3xi64) <- () - full_int_array_8 = [0, 0, 1024] - - # pd_op.reshape: (2x169x1024xf32) <- (2x169x4x256xf32, 3xi64) - reshape_7 = paddle._C_ops.reshape(transpose_4, full_int_array_8) - - # pd_op.matmul: (2x169x1024xf32) <- (2x169x1024xf32, 1024x1024xf32) - matmul_7 = paddle._C_ops.matmul(reshape_7, parameter_364, False, False) - del parameter_364 - - # pd_op.add: (2x169x1024xf32) <- (2x169x1024xf32, 1024xf32) - add_44 = paddle._C_ops.add(matmul_7, parameter_363) - del parameter_363 - - # pd_op.dropout: (2x169x1024xf32, 2x169x1024xui8) <- (2x169x1024xf32, None, 1xf32) - dropout_2, dropout_3 = (lambda x, f: f(x))( - paddle._C_ops.dropout( - add_44, None, full_9, False, "upscale_in_train", 0, False - ), - lambda out: out if isinstance(out, (list, tuple)) else (out, None), - ) - del add_44 - - # pd_op.add: (2x169x1024xf32) <- (2x169x1024xf32, 2x169x1024xf32) - add_45 = paddle._C_ops.add(transpose_0, dropout_2) - - # pd_op.layer_norm: (2x169x1024xf32, 2x169xf32, 2x169xf32) <- (2x169x1024xf32, 1024xf32, 1024xf32) - layer_norm_0, layer_norm_1, layer_norm_2 = (lambda x, f: f(x))( - paddle._C_ops.layer_norm( - add_45, parameter_362, parameter_361, float("1e-05"), 2 - ), - lambda out: out if isinstance(out, (list, tuple)) else (out, None, None), - ) - del parameter_361, parameter_362 - - # pd_op.matmul: (2x169x2048xf32) <- (2x169x1024xf32, 1024x2048xf32) - matmul_8 = paddle._C_ops.matmul(layer_norm_0, parameter_360, False, False) - del parameter_360 - - # pd_op.add: (2x169x2048xf32) <- (2x169x2048xf32, 2048xf32) - add_46 = paddle._C_ops.add(matmul_8, parameter_359) - del parameter_359 - - # pd_op.gelu: (2x169x2048xf32) <- (2x169x2048xf32) - gelu_0 = paddle._C_ops.gelu(add_46, False) - - # pd_op.dropout: (2x169x2048xf32, 2x169x2048xui8) <- (2x169x2048xf32, None, 1xf32) - dropout_4, dropout_5 = (lambda x, f: f(x))( - paddle._C_ops.dropout( - gelu_0, None, full_9, False, "upscale_in_train", 0, False - ), - lambda out: out if isinstance(out, (list, tuple)) else (out, None), - ) - del gelu_0 - - # pd_op.matmul: (2x169x1024xf32) <- (2x169x2048xf32, 2048x1024xf32) - matmul_9 = paddle._C_ops.matmul(dropout_4, parameter_358, False, False) - del parameter_358 - - # pd_op.add: (2x169x1024xf32) <- (2x169x1024xf32, 1024xf32) - add_47 = paddle._C_ops.add(matmul_9, parameter_357) - del parameter_357 - - # pd_op.dropout: (2x169x1024xf32, 2x169x1024xui8) <- (2x169x1024xf32, None, 1xf32) - dropout_6, dropout_7 = (lambda x, f: f(x))( - paddle._C_ops.dropout( - add_47, None, full_9, False, "upscale_in_train", 0, False - ), - lambda out: out if isinstance(out, (list, tuple)) else (out, None), - ) - del add_47 - - # pd_op.add: (2x169x1024xf32) <- (2x169x1024xf32, 2x169x1024xf32) - add_48 = paddle._C_ops.add(layer_norm_0, dropout_6) - - # pd_op.layer_norm: (2x169x1024xf32, 2x169xf32, 2x169xf32) <- (2x169x1024xf32, 1024xf32, 1024xf32) - layer_norm_3, layer_norm_4, layer_norm_5 = (lambda x, f: f(x))( - paddle._C_ops.layer_norm( - add_48, parameter_356, parameter_355, float("1e-05"), 2 - ), - lambda out: out if isinstance(out, (list, tuple)) else (out, None, None), - ) - del parameter_355, parameter_356 - - # pd_op.add: (2x169x1024xf32) <- (2x169x1024xf32, 1x169x1024xf32) - add_49 = paddle._C_ops.add(layer_norm_3, unsqueeze_3) - - # pd_op.slice: (1024x1024xf32) <- (1024x3072xf32, 1xi64, 1xi64) - slice_6 = paddle._C_ops.slice( - data_20, [1], full_int_array_3, full_int_array_4, [1], [] - ) - - # pd_op.slice: (1024xf32) <- (3072xf32, 1xi64, 1xi64) - slice_7 = paddle._C_ops.slice( - data_21, [0], full_int_array_3, full_int_array_4, [1], [] - ) - - # pd_op.matmul: (2x169x1024xf32) <- (2x169x1024xf32, 1024x1024xf32) - matmul_10 = paddle._C_ops.matmul(add_49, slice_6, False, False) - - # pd_op.add: (2x169x1024xf32) <- (2x169x1024xf32, 1024xf32) - add_50 = paddle._C_ops.add(matmul_10, slice_7) - - # pd_op.reshape: (2x169x4x256xf32) <- (2x169x1024xf32, 4xi64) - reshape_8 = paddle._C_ops.reshape(add_50, full_int_array_5) - - # pd_op.transpose: (2x4x169x256xf32) <- (2x169x4x256xf32) - transpose_5 = paddle._C_ops.transpose(reshape_8, [0, 2, 1, 3]) - del reshape_8 - - # pd_op.slice: (1024x1024xf32) <- (1024x3072xf32, 1xi64, 1xi64) - slice_8 = paddle._C_ops.slice( - data_20, [1], full_int_array_4, full_int_array_6, [1], [] - ) - - # pd_op.slice: (1024xf32) <- (3072xf32, 1xi64, 1xi64) - slice_9 = paddle._C_ops.slice( - data_21, [0], full_int_array_4, full_int_array_6, [1], [] - ) - - # pd_op.matmul: (2x169x1024xf32) <- (2x169x1024xf32, 1024x1024xf32) - matmul_11 = paddle._C_ops.matmul(add_49, slice_8, False, False) - - # pd_op.add: (2x169x1024xf32) <- (2x169x1024xf32, 1024xf32) - add_51 = paddle._C_ops.add(matmul_11, slice_9) - - # pd_op.reshape: (2x169x4x256xf32) <- (2x169x1024xf32, 4xi64) - reshape_9 = paddle._C_ops.reshape(add_51, full_int_array_5) - - # pd_op.transpose: (2x4x169x256xf32) <- (2x169x4x256xf32) - transpose_6 = paddle._C_ops.transpose(reshape_9, [0, 2, 1, 3]) - del reshape_9 - - # pd_op.slice: (1024x1024xf32) <- (1024x3072xf32, 1xi64, 1xi64) - slice_10 = paddle._C_ops.slice( - data_20, [1], full_int_array_6, full_int_array_7, [1], [] - ) - del data_20 - - # pd_op.slice: (1024xf32) <- (3072xf32, 1xi64, 1xi64) - slice_11 = paddle._C_ops.slice( - data_21, [0], full_int_array_6, full_int_array_7, [1], [] - ) - del data_21 - - # pd_op.matmul: (2x169x1024xf32) <- (2x169x1024xf32, 1024x1024xf32) - matmul_12 = paddle._C_ops.matmul(layer_norm_3, slice_10, False, False) - - # pd_op.add: (2x169x1024xf32) <- (2x169x1024xf32, 1024xf32) - add_52 = paddle._C_ops.add(matmul_12, slice_11) - - # pd_op.reshape: (2x169x4x256xf32) <- (2x169x1024xf32, 4xi64) - reshape_10 = paddle._C_ops.reshape(add_52, full_int_array_5) - - # pd_op.transpose: (2x4x169x256xf32) <- (2x169x4x256xf32) - transpose_7 = paddle._C_ops.transpose(reshape_10, [0, 2, 1, 3]) - del reshape_10 - - # pd_op.matmul: (2x4x169x169xf32) <- (2x4x169x256xf32, 2x4x169x256xf32) - matmul_13 = paddle._C_ops.matmul(transpose_5, transpose_6, False, True) - - # pd_op.scale: (2x4x169x169xf32) <- (2x4x169x169xf32, 1xf32) - scale_2 = paddle._C_ops.scale(matmul_13, full_8, float("0"), True) - del matmul_13 - - # pd_op.softmax: (2x4x169x169xf32) <- (2x4x169x169xf32) - softmax_1 = paddle._C_ops.softmax(scale_2, -1) - del scale_2 - - # pd_op.dropout: (2x4x169x169xf32, 2x4x169x169xui8) <- (2x4x169x169xf32, None, 1xf32) - dropout_8, dropout_9 = (lambda x, f: f(x))( - paddle._C_ops.dropout( - softmax_1, None, full_9, False, "upscale_in_train", 0, False - ), - lambda out: out if isinstance(out, (list, tuple)) else (out, None), - ) - - # pd_op.matmul: (2x4x169x256xf32) <- (2x4x169x169xf32, 2x4x169x256xf32) - matmul_14 = paddle._C_ops.matmul(dropout_8, transpose_7, False, False) - - # pd_op.transpose: (2x169x4x256xf32) <- (2x4x169x256xf32) - transpose_8 = paddle._C_ops.transpose(matmul_14, [0, 2, 1, 3]) - del matmul_14 - - # pd_op.reshape: (2x169x1024xf32) <- (2x169x4x256xf32, 3xi64) - reshape_11 = paddle._C_ops.reshape(transpose_8, full_int_array_8) - - # pd_op.matmul: (2x169x1024xf32) <- (2x169x1024xf32, 1024x1024xf32) - matmul_15 = paddle._C_ops.matmul(reshape_11, parameter_354, False, False) - del parameter_354 - - # pd_op.add: (2x169x1024xf32) <- (2x169x1024xf32, 1024xf32) - add_53 = paddle._C_ops.add(matmul_15, parameter_353) - del parameter_353 - - # pd_op.dropout: (2x169x1024xf32, 2x169x1024xui8) <- (2x169x1024xf32, None, 1xf32) - dropout_10, dropout_11 = (lambda x, f: f(x))( - paddle._C_ops.dropout( - add_53, None, full_9, False, "upscale_in_train", 0, False - ), - lambda out: out if isinstance(out, (list, tuple)) else (out, None), - ) - del add_53 - - # pd_op.add: (2x169x1024xf32) <- (2x169x1024xf32, 2x169x1024xf32) - add_54 = paddle._C_ops.add(layer_norm_3, dropout_10) - - # pd_op.layer_norm: (2x169x1024xf32, 2x169xf32, 2x169xf32) <- (2x169x1024xf32, 1024xf32, 1024xf32) - layer_norm_6, layer_norm_7, layer_norm_8 = (lambda x, f: f(x))( - paddle._C_ops.layer_norm( - add_54, parameter_352, parameter_351, float("1e-05"), 2 - ), - lambda out: out if isinstance(out, (list, tuple)) else (out, None, None), - ) - del parameter_351, parameter_352 - - # pd_op.matmul: (2x169x2048xf32) <- (2x169x1024xf32, 1024x2048xf32) - matmul_16 = paddle._C_ops.matmul(layer_norm_6, parameter_350, False, False) - del parameter_350 - - # pd_op.add: (2x169x2048xf32) <- (2x169x2048xf32, 2048xf32) - add_55 = paddle._C_ops.add(matmul_16, parameter_349) - del parameter_349 - - # pd_op.gelu: (2x169x2048xf32) <- (2x169x2048xf32) - gelu_1 = paddle._C_ops.gelu(add_55, False) - - # pd_op.dropout: (2x169x2048xf32, 2x169x2048xui8) <- (2x169x2048xf32, None, 1xf32) - dropout_12, dropout_13 = (lambda x, f: f(x))( - paddle._C_ops.dropout( - gelu_1, None, full_9, False, "upscale_in_train", 0, False - ), - lambda out: out if isinstance(out, (list, tuple)) else (out, None), - ) - del gelu_1 - - # pd_op.matmul: (2x169x1024xf32) <- (2x169x2048xf32, 2048x1024xf32) - matmul_17 = paddle._C_ops.matmul(dropout_12, parameter_348, False, False) - del parameter_348 - - # pd_op.add: (2x169x1024xf32) <- (2x169x1024xf32, 1024xf32) - add_56 = paddle._C_ops.add(matmul_17, parameter_347) - del parameter_347 - - # pd_op.dropout: (2x169x1024xf32, 2x169x1024xui8) <- (2x169x1024xf32, None, 1xf32) - dropout_14, dropout_15 = (lambda x, f: f(x))( - paddle._C_ops.dropout( - add_56, None, full_9, False, "upscale_in_train", 0, False - ), - lambda out: out if isinstance(out, (list, tuple)) else (out, None), - ) - del add_56 - - # pd_op.add: (2x169x1024xf32) <- (2x169x1024xf32, 2x169x1024xf32) - add_57 = paddle._C_ops.add(layer_norm_6, dropout_14) - - # pd_op.layer_norm: (2x169x1024xf32, 2x169xf32, 2x169xf32) <- (2x169x1024xf32, 1024xf32, 1024xf32) - layer_norm_9, layer_norm_10, layer_norm_11 = (lambda x, f: f(x))( - paddle._C_ops.layer_norm( - add_57, parameter_346, parameter_345, float("1e-05"), 2 - ), - lambda out: out if isinstance(out, (list, tuple)) else (out, None, None), - ) - del parameter_345, parameter_346 - - # pd_op.add: (2x169x1024xf32) <- (2x169x1024xf32, 1x169x1024xf32) - add_58 = paddle._C_ops.add(layer_norm_9, unsqueeze_3) - - # pd_op.slice: (1024x1024xf32) <- (1024x3072xf32, 1xi64, 1xi64) - slice_12 = paddle._C_ops.slice( - data_22, [1], full_int_array_3, full_int_array_4, [1], [] - ) - - # pd_op.slice: (1024xf32) <- (3072xf32, 1xi64, 1xi64) - slice_13 = paddle._C_ops.slice( - data_23, [0], full_int_array_3, full_int_array_4, [1], [] - ) - - # pd_op.matmul: (2x169x1024xf32) <- (2x169x1024xf32, 1024x1024xf32) - matmul_18 = paddle._C_ops.matmul(add_58, slice_12, False, False) - - # pd_op.add: (2x169x1024xf32) <- (2x169x1024xf32, 1024xf32) - add_59 = paddle._C_ops.add(matmul_18, slice_13) - - # pd_op.reshape: (2x169x4x256xf32) <- (2x169x1024xf32, 4xi64) - reshape_12 = paddle._C_ops.reshape(add_59, full_int_array_5) - - # pd_op.transpose: (2x4x169x256xf32) <- (2x169x4x256xf32) - transpose_9 = paddle._C_ops.transpose(reshape_12, [0, 2, 1, 3]) - del reshape_12 - - # pd_op.slice: (1024x1024xf32) <- (1024x3072xf32, 1xi64, 1xi64) - slice_14 = paddle._C_ops.slice( - data_22, [1], full_int_array_4, full_int_array_6, [1], [] - ) - - # pd_op.slice: (1024xf32) <- (3072xf32, 1xi64, 1xi64) - slice_15 = paddle._C_ops.slice( - data_23, [0], full_int_array_4, full_int_array_6, [1], [] - ) - - # pd_op.matmul: (2x169x1024xf32) <- (2x169x1024xf32, 1024x1024xf32) - matmul_19 = paddle._C_ops.matmul(add_58, slice_14, False, False) - - # pd_op.add: (2x169x1024xf32) <- (2x169x1024xf32, 1024xf32) - add_60 = paddle._C_ops.add(matmul_19, slice_15) - - # pd_op.reshape: (2x169x4x256xf32) <- (2x169x1024xf32, 4xi64) - reshape_13 = paddle._C_ops.reshape(add_60, full_int_array_5) - - # pd_op.transpose: (2x4x169x256xf32) <- (2x169x4x256xf32) - transpose_10 = paddle._C_ops.transpose(reshape_13, [0, 2, 1, 3]) - del reshape_13 - - # pd_op.slice: (1024x1024xf32) <- (1024x3072xf32, 1xi64, 1xi64) - slice_16 = paddle._C_ops.slice( - data_22, [1], full_int_array_6, full_int_array_7, [1], [] - ) - del data_22 - - # pd_op.slice: (1024xf32) <- (3072xf32, 1xi64, 1xi64) - slice_17 = paddle._C_ops.slice( - data_23, [0], full_int_array_6, full_int_array_7, [1], [] - ) - del data_23 - - # pd_op.matmul: (2x169x1024xf32) <- (2x169x1024xf32, 1024x1024xf32) - matmul_20 = paddle._C_ops.matmul(layer_norm_9, slice_16, False, False) - - # pd_op.add: (2x169x1024xf32) <- (2x169x1024xf32, 1024xf32) - add_61 = paddle._C_ops.add(matmul_20, slice_17) - - # pd_op.reshape: (2x169x4x256xf32) <- (2x169x1024xf32, 4xi64) - reshape_14 = paddle._C_ops.reshape(add_61, full_int_array_5) - - # pd_op.transpose: (2x4x169x256xf32) <- (2x169x4x256xf32) - transpose_11 = paddle._C_ops.transpose(reshape_14, [0, 2, 1, 3]) - del reshape_14 - - # pd_op.matmul: (2x4x169x169xf32) <- (2x4x169x256xf32, 2x4x169x256xf32) - matmul_21 = paddle._C_ops.matmul(transpose_9, transpose_10, False, True) - - # pd_op.scale: (2x4x169x169xf32) <- (2x4x169x169xf32, 1xf32) - scale_3 = paddle._C_ops.scale(matmul_21, full_8, float("0"), True) - del matmul_21 - - # pd_op.softmax: (2x4x169x169xf32) <- (2x4x169x169xf32) - softmax_2 = paddle._C_ops.softmax(scale_3, -1) - del scale_3 - - # pd_op.dropout: (2x4x169x169xf32, 2x4x169x169xui8) <- (2x4x169x169xf32, None, 1xf32) - dropout_16, dropout_17 = (lambda x, f: f(x))( - paddle._C_ops.dropout( - softmax_2, None, full_9, False, "upscale_in_train", 0, False - ), - lambda out: out if isinstance(out, (list, tuple)) else (out, None), - ) - - # pd_op.matmul: (2x4x169x256xf32) <- (2x4x169x169xf32, 2x4x169x256xf32) - matmul_22 = paddle._C_ops.matmul(dropout_16, transpose_11, False, False) - - # pd_op.transpose: (2x169x4x256xf32) <- (2x4x169x256xf32) - transpose_12 = paddle._C_ops.transpose(matmul_22, [0, 2, 1, 3]) - del matmul_22 - - # pd_op.reshape: (2x169x1024xf32) <- (2x169x4x256xf32, 3xi64) - reshape_15 = paddle._C_ops.reshape(transpose_12, full_int_array_8) - - # pd_op.matmul: (2x169x1024xf32) <- (2x169x1024xf32, 1024x1024xf32) - matmul_23 = paddle._C_ops.matmul(reshape_15, parameter_344, False, False) - del parameter_344 - - # pd_op.add: (2x169x1024xf32) <- (2x169x1024xf32, 1024xf32) - add_62 = paddle._C_ops.add(matmul_23, parameter_343) - del parameter_343 - - # pd_op.dropout: (2x169x1024xf32, 2x169x1024xui8) <- (2x169x1024xf32, None, 1xf32) - dropout_18, dropout_19 = (lambda x, f: f(x))( - paddle._C_ops.dropout( - add_62, None, full_9, False, "upscale_in_train", 0, False - ), - lambda out: out if isinstance(out, (list, tuple)) else (out, None), - ) - del add_62 - - # pd_op.add: (2x169x1024xf32) <- (2x169x1024xf32, 2x169x1024xf32) - add_63 = paddle._C_ops.add(layer_norm_9, dropout_18) - - # pd_op.layer_norm: (2x169x1024xf32, 2x169xf32, 2x169xf32) <- (2x169x1024xf32, 1024xf32, 1024xf32) - layer_norm_12, layer_norm_13, layer_norm_14 = (lambda x, f: f(x))( - paddle._C_ops.layer_norm( - add_63, parameter_342, parameter_341, float("1e-05"), 2 - ), - lambda out: out if isinstance(out, (list, tuple)) else (out, None, None), - ) - del parameter_341, parameter_342 - - # pd_op.matmul: (2x169x2048xf32) <- (2x169x1024xf32, 1024x2048xf32) - matmul_24 = paddle._C_ops.matmul(layer_norm_12, parameter_340, False, False) - del parameter_340 - - # pd_op.add: (2x169x2048xf32) <- (2x169x2048xf32, 2048xf32) - add_64 = paddle._C_ops.add(matmul_24, parameter_339) - del parameter_339 - - # pd_op.gelu: (2x169x2048xf32) <- (2x169x2048xf32) - gelu_2 = paddle._C_ops.gelu(add_64, False) - - # pd_op.dropout: (2x169x2048xf32, 2x169x2048xui8) <- (2x169x2048xf32, None, 1xf32) - dropout_20, dropout_21 = (lambda x, f: f(x))( - paddle._C_ops.dropout( - gelu_2, None, full_9, False, "upscale_in_train", 0, False - ), - lambda out: out if isinstance(out, (list, tuple)) else (out, None), - ) - del gelu_2 - - # pd_op.matmul: (2x169x1024xf32) <- (2x169x2048xf32, 2048x1024xf32) - matmul_25 = paddle._C_ops.matmul(dropout_20, parameter_338, False, False) - del parameter_338 - - # pd_op.add: (2x169x1024xf32) <- (2x169x1024xf32, 1024xf32) - add_65 = paddle._C_ops.add(matmul_25, parameter_337) - del parameter_337 - - # pd_op.dropout: (2x169x1024xf32, 2x169x1024xui8) <- (2x169x1024xf32, None, 1xf32) - dropout_22, dropout_23 = (lambda x, f: f(x))( - paddle._C_ops.dropout( - add_65, None, full_9, False, "upscale_in_train", 0, False - ), - lambda out: out if isinstance(out, (list, tuple)) else (out, None), - ) - del add_65 - - # pd_op.add: (2x169x1024xf32) <- (2x169x1024xf32, 2x169x1024xf32) - add_66 = paddle._C_ops.add(layer_norm_12, dropout_22) - - # pd_op.layer_norm: (2x169x1024xf32, 2x169xf32, 2x169xf32) <- (2x169x1024xf32, 1024xf32, 1024xf32) - layer_norm_15, layer_norm_16, layer_norm_17 = (lambda x, f: f(x))( - paddle._C_ops.layer_norm( - add_66, parameter_336, parameter_335, float("1e-05"), 2 - ), - lambda out: out if isinstance(out, (list, tuple)) else (out, None, None), - ) - del parameter_335, parameter_336 - - # pd_op.add: (2x169x1024xf32) <- (2x169x1024xf32, 1x169x1024xf32) - add_67 = paddle._C_ops.add(layer_norm_15, unsqueeze_3) - - # pd_op.slice: (1024x1024xf32) <- (1024x3072xf32, 1xi64, 1xi64) - slice_18 = paddle._C_ops.slice( - data_24, [1], full_int_array_3, full_int_array_4, [1], [] - ) - - # pd_op.slice: (1024xf32) <- (3072xf32, 1xi64, 1xi64) - slice_19 = paddle._C_ops.slice( - data_25, [0], full_int_array_3, full_int_array_4, [1], [] - ) - del full_int_array_3 - - # pd_op.matmul: (2x169x1024xf32) <- (2x169x1024xf32, 1024x1024xf32) - matmul_26 = paddle._C_ops.matmul(add_67, slice_18, False, False) - - # pd_op.add: (2x169x1024xf32) <- (2x169x1024xf32, 1024xf32) - add_68 = paddle._C_ops.add(matmul_26, slice_19) - - # pd_op.reshape: (2x169x4x256xf32) <- (2x169x1024xf32, 4xi64) - reshape_16 = paddle._C_ops.reshape(add_68, full_int_array_5) - - # pd_op.transpose: (2x4x169x256xf32) <- (2x169x4x256xf32) - transpose_13 = paddle._C_ops.transpose(reshape_16, [0, 2, 1, 3]) - del reshape_16 - - # pd_op.slice: (1024x1024xf32) <- (1024x3072xf32, 1xi64, 1xi64) - slice_20 = paddle._C_ops.slice( - data_24, [1], full_int_array_4, full_int_array_6, [1], [] - ) - - # pd_op.slice: (1024xf32) <- (3072xf32, 1xi64, 1xi64) - slice_21 = paddle._C_ops.slice( - data_25, [0], full_int_array_4, full_int_array_6, [1], [] - ) - - # pd_op.matmul: (2x169x1024xf32) <- (2x169x1024xf32, 1024x1024xf32) - matmul_27 = paddle._C_ops.matmul(add_67, slice_20, False, False) - - # pd_op.add: (2x169x1024xf32) <- (2x169x1024xf32, 1024xf32) - add_69 = paddle._C_ops.add(matmul_27, slice_21) - - # pd_op.reshape: (2x169x4x256xf32) <- (2x169x1024xf32, 4xi64) - reshape_17 = paddle._C_ops.reshape(add_69, full_int_array_5) - - # pd_op.transpose: (2x4x169x256xf32) <- (2x169x4x256xf32) - transpose_14 = paddle._C_ops.transpose(reshape_17, [0, 2, 1, 3]) - del reshape_17 - - # pd_op.slice: (1024x1024xf32) <- (1024x3072xf32, 1xi64, 1xi64) - slice_22 = paddle._C_ops.slice( - data_24, [1], full_int_array_6, full_int_array_7, [1], [] - ) - del data_24 - - # pd_op.slice: (1024xf32) <- (3072xf32, 1xi64, 1xi64) - slice_23 = paddle._C_ops.slice( - data_25, [0], full_int_array_6, full_int_array_7, [1], [] - ) - del data_25 - - # pd_op.matmul: (2x169x1024xf32) <- (2x169x1024xf32, 1024x1024xf32) - matmul_28 = paddle._C_ops.matmul(layer_norm_15, slice_22, False, False) - - # pd_op.add: (2x169x1024xf32) <- (2x169x1024xf32, 1024xf32) - add_70 = paddle._C_ops.add(matmul_28, slice_23) - - # pd_op.reshape: (2x169x4x256xf32) <- (2x169x1024xf32, 4xi64) - reshape_18 = paddle._C_ops.reshape(add_70, full_int_array_5) - del full_int_array_5 - - # pd_op.transpose: (2x4x169x256xf32) <- (2x169x4x256xf32) - transpose_15 = paddle._C_ops.transpose(reshape_18, [0, 2, 1, 3]) - del reshape_18 - - # pd_op.matmul: (2x4x169x169xf32) <- (2x4x169x256xf32, 2x4x169x256xf32) - matmul_29 = paddle._C_ops.matmul(transpose_13, transpose_14, False, True) - - # pd_op.scale: (2x4x169x169xf32) <- (2x4x169x169xf32, 1xf32) - scale_4 = paddle._C_ops.scale(matmul_29, full_8, float("0"), True) - del matmul_29 - - # pd_op.softmax: (2x4x169x169xf32) <- (2x4x169x169xf32) - softmax_3 = paddle._C_ops.softmax(scale_4, -1) - del scale_4 - - # pd_op.dropout: (2x4x169x169xf32, 2x4x169x169xui8) <- (2x4x169x169xf32, None, 1xf32) - dropout_24, dropout_25 = (lambda x, f: f(x))( - paddle._C_ops.dropout( - softmax_3, None, full_9, False, "upscale_in_train", 0, False - ), - lambda out: out if isinstance(out, (list, tuple)) else (out, None), - ) - - # pd_op.matmul: (2x4x169x256xf32) <- (2x4x169x169xf32, 2x4x169x256xf32) - matmul_30 = paddle._C_ops.matmul(dropout_24, transpose_15, False, False) - - # pd_op.transpose: (2x169x4x256xf32) <- (2x4x169x256xf32) - transpose_16 = paddle._C_ops.transpose(matmul_30, [0, 2, 1, 3]) - del matmul_30 - - # pd_op.reshape: (2x169x1024xf32) <- (2x169x4x256xf32, 3xi64) - reshape_19 = paddle._C_ops.reshape(transpose_16, full_int_array_8) - del full_int_array_8 - - # pd_op.matmul: (2x169x1024xf32) <- (2x169x1024xf32, 1024x1024xf32) - matmul_31 = paddle._C_ops.matmul(reshape_19, parameter_334, False, False) - del parameter_334 - - # pd_op.add: (2x169x1024xf32) <- (2x169x1024xf32, 1024xf32) - add_71 = paddle._C_ops.add(matmul_31, parameter_333) - del parameter_333 - - # pd_op.dropout: (2x169x1024xf32, 2x169x1024xui8) <- (2x169x1024xf32, None, 1xf32) - dropout_26, dropout_27 = (lambda x, f: f(x))( - paddle._C_ops.dropout( - add_71, None, full_9, False, "upscale_in_train", 0, False - ), - lambda out: out if isinstance(out, (list, tuple)) else (out, None), - ) - del add_71 - - # pd_op.add: (2x169x1024xf32) <- (2x169x1024xf32, 2x169x1024xf32) - add_72 = paddle._C_ops.add(layer_norm_15, dropout_26) - - # pd_op.layer_norm: (2x169x1024xf32, 2x169xf32, 2x169xf32) <- (2x169x1024xf32, 1024xf32, 1024xf32) - layer_norm_18, layer_norm_19, layer_norm_20 = (lambda x, f: f(x))( - paddle._C_ops.layer_norm( - add_72, parameter_332, parameter_331, float("1e-05"), 2 - ), - lambda out: out if isinstance(out, (list, tuple)) else (out, None, None), - ) - del parameter_331, parameter_332 - - # pd_op.matmul: (2x169x2048xf32) <- (2x169x1024xf32, 1024x2048xf32) - matmul_32 = paddle._C_ops.matmul(layer_norm_18, parameter_330, False, False) - del parameter_330 - - # pd_op.add: (2x169x2048xf32) <- (2x169x2048xf32, 2048xf32) - add_73 = paddle._C_ops.add(matmul_32, parameter_329) - del parameter_329 - - # pd_op.gelu: (2x169x2048xf32) <- (2x169x2048xf32) - gelu_3 = paddle._C_ops.gelu(add_73, False) - - # pd_op.dropout: (2x169x2048xf32, 2x169x2048xui8) <- (2x169x2048xf32, None, 1xf32) - dropout_28, dropout_29 = (lambda x, f: f(x))( - paddle._C_ops.dropout( - gelu_3, None, full_9, False, "upscale_in_train", 0, False - ), - lambda out: out if isinstance(out, (list, tuple)) else (out, None), - ) - del gelu_3 - - # pd_op.matmul: (2x169x1024xf32) <- (2x169x2048xf32, 2048x1024xf32) - matmul_33 = paddle._C_ops.matmul(dropout_28, parameter_328, False, False) - del parameter_328 - - # pd_op.add: (2x169x1024xf32) <- (2x169x1024xf32, 1024xf32) - add_74 = paddle._C_ops.add(matmul_33, parameter_327) - del parameter_327 - - # pd_op.dropout: (2x169x1024xf32, 2x169x1024xui8) <- (2x169x1024xf32, None, 1xf32) - dropout_30, dropout_31 = (lambda x, f: f(x))( - paddle._C_ops.dropout( - add_74, None, full_9, False, "upscale_in_train", 0, False - ), - lambda out: out if isinstance(out, (list, tuple)) else (out, None), - ) - del add_74 - - # pd_op.add: (2x169x1024xf32) <- (2x169x1024xf32, 2x169x1024xf32) - add_75 = paddle._C_ops.add(layer_norm_18, dropout_30) - - # pd_op.layer_norm: (2x169x1024xf32, 2x169xf32, 2x169xf32) <- (2x169x1024xf32, 1024xf32, 1024xf32) - layer_norm_21, layer_norm_22, layer_norm_23 = (lambda x, f: f(x))( - paddle._C_ops.layer_norm( - add_75, parameter_326, parameter_325, float("1e-05"), 2 - ), - lambda out: out if isinstance(out, (list, tuple)) else (out, None, None), - ) - del parameter_325, parameter_326 - - # pd_op.transpose: (2x1024x169xf32) <- (2x169x1024xf32) - transpose_17 = paddle._C_ops.transpose(layer_norm_21, [0, 2, 1]) - del layer_norm_21 - - # pd_op.full_int_array: (4xi64) <- () - full_int_array_9 = [2, 1024, 13, 13] - - # pd_op.reshape: (2x1024x13x13xf32) <- (2x1024x169xf32, 4xi64) - reshape_20 = paddle._C_ops.reshape(transpose_17, full_int_array_9) - del full_int_array_9 - - # pd_op.conv2d: (2x384x13x13xf32) <- (2x1024x13x13xf32, 384x1024x1x1xf32) - conv2d_77 = paddle._C_ops.conv2d( - reshape_20, parameter_324, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_324 - - # pd_op.batch_norm_: (2x384x13x13xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x13x13xf32, 384xf32, 384xf32, 384xf32, 384xf32) - ( - batch_norm__438, - batch_norm__439, - batch_norm__440, - batch_norm__441, - batch_norm__442, - batch_norm__443, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_77, - parameter_323, - parameter_322, - parameter_321, - parameter_320, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_320, parameter_321, parameter_322, parameter_323 - - # pd_op.swish: (2x384x13x13xf32) <- (2x384x13x13xf32) - swish_56 = paddle._C_ops.swish(batch_norm__438) - - # pd_op.conv2d: (2x384x13x13xf32) <- (2x1024x13x13xf32, 384x1024x1x1xf32) - conv2d_78 = paddle._C_ops.conv2d( - reshape_20, parameter_319, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_319 - - # pd_op.batch_norm_: (2x384x13x13xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x13x13xf32, 384xf32, 384xf32, 384xf32, 384xf32) - ( - batch_norm__444, - batch_norm__445, - batch_norm__446, - batch_norm__447, - batch_norm__448, - batch_norm__449, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_78, - parameter_318, - parameter_317, - parameter_316, - parameter_315, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_315, parameter_316, parameter_317, parameter_318 - - # pd_op.swish: (2x384x13x13xf32) <- (2x384x13x13xf32) - swish_57 = paddle._C_ops.swish(batch_norm__444) - - # pd_op.conv2d: (2x384x13x13xf32) <- (2x384x13x13xf32, 384x384x3x3xf32) - conv2d_79 = paddle._C_ops.conv2d( - swish_57, parameter_314, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_314 - - # pd_op.batch_norm_: (2x384x13x13xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x13x13xf32, 384xf32, 384xf32, 384xf32, 384xf32) - ( - batch_norm__450, - batch_norm__451, - batch_norm__452, - batch_norm__453, - batch_norm__454, - batch_norm__455, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_79, - parameter_313, - parameter_312, - parameter_311, - parameter_310, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_310, parameter_311, parameter_312, parameter_313 - - # pd_op.swish: (2x384x13x13xf32) <- (2x384x13x13xf32) - swish_58 = paddle._C_ops.swish(batch_norm__450) - - # pd_op.conv2d: (2x384x13x13xf32) <- (2x384x13x13xf32, 384x384x3x3xf32) - conv2d_80 = paddle._C_ops.conv2d( - swish_58, parameter_309, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_309 - - # pd_op.batch_norm_: (2x384x13x13xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x13x13xf32, 384xf32, 384xf32, 384xf32, 384xf32) - ( - batch_norm__456, - batch_norm__457, - batch_norm__458, - batch_norm__459, - batch_norm__460, - batch_norm__461, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_80, - parameter_308, - parameter_307, - parameter_306, - parameter_305, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_305, parameter_306, parameter_307, parameter_308 - - # pd_op.conv2d: (2x384x13x13xf32) <- (2x384x13x13xf32, 384x384x1x1xf32) - conv2d_81 = paddle._C_ops.conv2d( - swish_58, parameter_304, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_304 - - # pd_op.batch_norm_: (2x384x13x13xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x13x13xf32, 384xf32, 384xf32, 384xf32, 384xf32) - ( - batch_norm__462, - batch_norm__463, - batch_norm__464, - batch_norm__465, - batch_norm__466, - batch_norm__467, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_81, - parameter_303, - parameter_302, - parameter_301, - parameter_300, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_300, parameter_301, parameter_302, parameter_303 - - # pd_op.add: (2x384x13x13xf32) <- (2x384x13x13xf32, 2x384x13x13xf32) - add_76 = paddle._C_ops.add(batch_norm__456, batch_norm__462) - - # pd_op.swish: (2x384x13x13xf32) <- (2x384x13x13xf32) - swish_59 = paddle._C_ops.swish(add_76) - - # pd_op.conv2d: (2x384x13x13xf32) <- (2x384x13x13xf32, 384x384x3x3xf32) - conv2d_82 = paddle._C_ops.conv2d( - swish_59, parameter_299, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_299 - - # pd_op.batch_norm_: (2x384x13x13xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x13x13xf32, 384xf32, 384xf32, 384xf32, 384xf32) - ( - batch_norm__468, - batch_norm__469, - batch_norm__470, - batch_norm__471, - batch_norm__472, - batch_norm__473, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_82, - parameter_298, - parameter_297, - parameter_296, - parameter_295, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_295, parameter_296, parameter_297, parameter_298 - - # pd_op.swish: (2x384x13x13xf32) <- (2x384x13x13xf32) - swish_60 = paddle._C_ops.swish(batch_norm__468) - - # pd_op.conv2d: (2x384x13x13xf32) <- (2x384x13x13xf32, 384x384x3x3xf32) - conv2d_83 = paddle._C_ops.conv2d( - swish_60, parameter_294, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_294 - - # pd_op.batch_norm_: (2x384x13x13xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x13x13xf32, 384xf32, 384xf32, 384xf32, 384xf32) - ( - batch_norm__474, - batch_norm__475, - batch_norm__476, - batch_norm__477, - batch_norm__478, - batch_norm__479, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_83, - parameter_293, - parameter_292, - parameter_291, - parameter_290, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_290, parameter_291, parameter_292, parameter_293 - - # pd_op.conv2d: (2x384x13x13xf32) <- (2x384x13x13xf32, 384x384x1x1xf32) - conv2d_84 = paddle._C_ops.conv2d( - swish_60, parameter_289, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_289 - - # pd_op.batch_norm_: (2x384x13x13xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x13x13xf32, 384xf32, 384xf32, 384xf32, 384xf32) - ( - batch_norm__480, - batch_norm__481, - batch_norm__482, - batch_norm__483, - batch_norm__484, - batch_norm__485, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_84, - parameter_288, - parameter_287, - parameter_286, - parameter_285, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_285, parameter_286, parameter_287, parameter_288 - - # pd_op.add: (2x384x13x13xf32) <- (2x384x13x13xf32, 2x384x13x13xf32) - add_77 = paddle._C_ops.add(batch_norm__474, batch_norm__480) - - # pd_op.swish: (2x384x13x13xf32) <- (2x384x13x13xf32) - swish_61 = paddle._C_ops.swish(add_77) - - # pd_op.full_int_array: (2xi64) <- () - full_int_array_10 = [5, 5] - - # pd_op.pool2d: (2x384x13x13xf32) <- (2x384x13x13xf32, 2xi64) - pool2d_0 = paddle._C_ops.pool2d( - swish_61, - full_int_array_10, - [1, 1], - [2, 2], - False, - True, - "NCHW", - "max", - False, - False, - "EXPLICIT", - ) - - # pd_op.full_int_array: (2xi64) <- () - full_int_array_11 = [9, 9] - - # pd_op.pool2d: (2x384x13x13xf32) <- (2x384x13x13xf32, 2xi64) - pool2d_1 = paddle._C_ops.pool2d( - swish_61, - full_int_array_11, - [1, 1], - [4, 4], - False, - True, - "NCHW", - "max", - False, - False, - "EXPLICIT", - ) - - # pd_op.full_int_array: (2xi64) <- () - full_int_array_12 = [13, 13] - - # pd_op.pool2d: (2x384x13x13xf32) <- (2x384x13x13xf32, 2xi64) - pool2d_2 = paddle._C_ops.pool2d( - swish_61, - full_int_array_12, - [1, 1], - [6, 6], - False, - True, - "NCHW", - "max", - False, - False, - "EXPLICIT", - ) - - # builtin.combine: ([2x384x13x13xf32, 2x384x13x13xf32, 2x384x13x13xf32, 2x384x13x13xf32]) <- (2x384x13x13xf32, 2x384x13x13xf32, 2x384x13x13xf32, 2x384x13x13xf32) - combine_6 = [swish_61, pool2d_0, pool2d_1, pool2d_2] - - # pd_op.concat: (2x1536x13x13xf32) <- ([2x384x13x13xf32, 2x384x13x13xf32, 2x384x13x13xf32, 2x384x13x13xf32], 1xi32) - concat_5 = paddle._C_ops.concat(combine_6, full_0) - del combine_6 - - # pd_op.conv2d: (2x384x13x13xf32) <- (2x1536x13x13xf32, 384x1536x1x1xf32) - conv2d_85 = paddle._C_ops.conv2d( - concat_5, parameter_284, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_284 - - # pd_op.batch_norm_: (2x384x13x13xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x13x13xf32, 384xf32, 384xf32, 384xf32, 384xf32) - ( - batch_norm__486, - batch_norm__487, - batch_norm__488, - batch_norm__489, - batch_norm__490, - batch_norm__491, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_85, - parameter_283, - parameter_282, - parameter_281, - parameter_280, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_280, parameter_281, parameter_282, parameter_283 - - # pd_op.swish: (2x384x13x13xf32) <- (2x384x13x13xf32) - swish_62 = paddle._C_ops.swish(batch_norm__486) - - # pd_op.conv2d: (2x384x13x13xf32) <- (2x384x13x13xf32, 384x384x3x3xf32) - conv2d_86 = paddle._C_ops.conv2d( - swish_62, parameter_279, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_279 - - # pd_op.batch_norm_: (2x384x13x13xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x13x13xf32, 384xf32, 384xf32, 384xf32, 384xf32) - ( - batch_norm__492, - batch_norm__493, - batch_norm__494, - batch_norm__495, - batch_norm__496, - batch_norm__497, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_86, - parameter_278, - parameter_277, - parameter_276, - parameter_275, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_275, parameter_276, parameter_277, parameter_278 - - # pd_op.swish: (2x384x13x13xf32) <- (2x384x13x13xf32) - swish_63 = paddle._C_ops.swish(batch_norm__492) - - # pd_op.conv2d: (2x384x13x13xf32) <- (2x384x13x13xf32, 384x384x3x3xf32) - conv2d_87 = paddle._C_ops.conv2d( - swish_63, parameter_274, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_274 - - # pd_op.batch_norm_: (2x384x13x13xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x13x13xf32, 384xf32, 384xf32, 384xf32, 384xf32) - ( - batch_norm__498, - batch_norm__499, - batch_norm__500, - batch_norm__501, - batch_norm__502, - batch_norm__503, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_87, - parameter_273, - parameter_272, - parameter_271, - parameter_270, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_270, parameter_271, parameter_272, parameter_273 - - # pd_op.conv2d: (2x384x13x13xf32) <- (2x384x13x13xf32, 384x384x1x1xf32) - conv2d_88 = paddle._C_ops.conv2d( - swish_63, parameter_269, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_269 - - # pd_op.batch_norm_: (2x384x13x13xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x13x13xf32, 384xf32, 384xf32, 384xf32, 384xf32) - ( - batch_norm__504, - batch_norm__505, - batch_norm__506, - batch_norm__507, - batch_norm__508, - batch_norm__509, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_88, - parameter_268, - parameter_267, - parameter_266, - parameter_265, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_265, parameter_266, parameter_267, parameter_268 - - # pd_op.add: (2x384x13x13xf32) <- (2x384x13x13xf32, 2x384x13x13xf32) - add_78 = paddle._C_ops.add(batch_norm__498, batch_norm__504) - - # pd_op.swish: (2x384x13x13xf32) <- (2x384x13x13xf32) - swish_64 = paddle._C_ops.swish(add_78) - - # builtin.combine: ([2x384x13x13xf32, 2x384x13x13xf32]) <- (2x384x13x13xf32, 2x384x13x13xf32) - combine_7 = [swish_56, swish_64] - - # pd_op.concat: (2x768x13x13xf32) <- ([2x384x13x13xf32, 2x384x13x13xf32], 1xi32) - concat_6 = paddle._C_ops.concat(combine_7, full_0) - del combine_7 - - # pd_op.conv2d: (2x768x13x13xf32) <- (2x768x13x13xf32, 768x768x1x1xf32) - conv2d_89 = paddle._C_ops.conv2d( - concat_6, parameter_264, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_264 - - # pd_op.batch_norm_: (2x768x13x13xf32, 768xf32, 768xf32, 768xf32, 768xf32, -1xui8) <- (2x768x13x13xf32, 768xf32, 768xf32, 768xf32, 768xf32) - ( - batch_norm__510, - batch_norm__511, - batch_norm__512, - batch_norm__513, - batch_norm__514, - batch_norm__515, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_89, - parameter_263, - parameter_262, - parameter_261, - parameter_260, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_260, parameter_261, parameter_262, parameter_263 - - # pd_op.swish: (2x768x13x13xf32) <- (2x768x13x13xf32) - swish_65 = paddle._C_ops.swish(batch_norm__510) - - # pd_op.conv2d: (2x384x13x13xf32) <- (2x768x13x13xf32, 384x768x1x1xf32) - conv2d_90 = paddle._C_ops.conv2d( - swish_65, parameter_259, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_259 - - # pd_op.batch_norm_: (2x384x13x13xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x13x13xf32, 384xf32, 384xf32, 384xf32, 384xf32) - ( - batch_norm__516, - batch_norm__517, - batch_norm__518, - batch_norm__519, - batch_norm__520, - batch_norm__521, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_90, - parameter_258, - parameter_257, - parameter_256, - parameter_255, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_255, parameter_256, parameter_257, parameter_258 - - # pd_op.swish: (2x384x13x13xf32) <- (2x384x13x13xf32) - swish_66 = paddle._C_ops.swish(batch_norm__516) - - # pd_op.nearest_interp: (2x384x26x26xf32) <- (2x384x13x13xf32, None, None, None) - nearest_interp_0 = paddle._C_ops.nearest_interp( - swish_66, - None, - None, - None, - "NCHW", - -1, - -1, - -1, - [float("2"), float("2")], - "nearest", - False, - 0, - ) - - # builtin.combine: ([2x384x26x26xf32, 2x512x26x26xf32]) <- (2x384x26x26xf32, 2x512x26x26xf32) - combine_8 = [nearest_interp_0, swish_45] - - # pd_op.concat: (2x896x26x26xf32) <- ([2x384x26x26xf32, 2x512x26x26xf32], 1xi32) - concat_7 = paddle._C_ops.concat(combine_8, full_0) - del combine_8 - - # pd_op.conv2d: (2x192x26x26xf32) <- (2x896x26x26xf32, 192x896x1x1xf32) - conv2d_91 = paddle._C_ops.conv2d( - concat_7, parameter_254, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_254 - - # pd_op.batch_norm_: (2x192x26x26xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x26x26xf32, 192xf32, 192xf32, 192xf32, 192xf32) - ( - batch_norm__522, - batch_norm__523, - batch_norm__524, - batch_norm__525, - batch_norm__526, - batch_norm__527, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_91, - parameter_253, - parameter_252, - parameter_251, - parameter_250, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_250, parameter_251, parameter_252, parameter_253 - - # pd_op.swish: (2x192x26x26xf32) <- (2x192x26x26xf32) - swish_67 = paddle._C_ops.swish(batch_norm__522) - - # pd_op.conv2d: (2x192x26x26xf32) <- (2x896x26x26xf32, 192x896x1x1xf32) - conv2d_92 = paddle._C_ops.conv2d( - concat_7, parameter_249, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_249 - - # pd_op.batch_norm_: (2x192x26x26xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x26x26xf32, 192xf32, 192xf32, 192xf32, 192xf32) - ( - batch_norm__528, - batch_norm__529, - batch_norm__530, - batch_norm__531, - batch_norm__532, - batch_norm__533, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_92, - parameter_248, - parameter_247, - parameter_246, - parameter_245, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_245, parameter_246, parameter_247, parameter_248 - - # pd_op.swish: (2x192x26x26xf32) <- (2x192x26x26xf32) - swish_68 = paddle._C_ops.swish(batch_norm__528) - - # pd_op.conv2d: (2x192x26x26xf32) <- (2x192x26x26xf32, 192x192x3x3xf32) - conv2d_93 = paddle._C_ops.conv2d( - swish_68, parameter_244, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_244 - - # pd_op.batch_norm_: (2x192x26x26xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x26x26xf32, 192xf32, 192xf32, 192xf32, 192xf32) - ( - batch_norm__534, - batch_norm__535, - batch_norm__536, - batch_norm__537, - batch_norm__538, - batch_norm__539, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_93, - parameter_243, - parameter_242, - parameter_241, - parameter_240, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_240, parameter_241, parameter_242, parameter_243 - - # pd_op.swish: (2x192x26x26xf32) <- (2x192x26x26xf32) - swish_69 = paddle._C_ops.swish(batch_norm__534) - - # pd_op.conv2d: (2x192x26x26xf32) <- (2x192x26x26xf32, 192x192x3x3xf32) - conv2d_94 = paddle._C_ops.conv2d( - swish_69, parameter_239, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_239 - - # pd_op.batch_norm_: (2x192x26x26xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x26x26xf32, 192xf32, 192xf32, 192xf32, 192xf32) - ( - batch_norm__540, - batch_norm__541, - batch_norm__542, - batch_norm__543, - batch_norm__544, - batch_norm__545, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_94, - parameter_238, - parameter_237, - parameter_236, - parameter_235, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_235, parameter_236, parameter_237, parameter_238 - - # pd_op.conv2d: (2x192x26x26xf32) <- (2x192x26x26xf32, 192x192x1x1xf32) - conv2d_95 = paddle._C_ops.conv2d( - swish_69, parameter_234, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_234 - - # pd_op.batch_norm_: (2x192x26x26xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x26x26xf32, 192xf32, 192xf32, 192xf32, 192xf32) - ( - batch_norm__546, - batch_norm__547, - batch_norm__548, - batch_norm__549, - batch_norm__550, - batch_norm__551, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_95, - parameter_233, - parameter_232, - parameter_231, - parameter_230, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_230, parameter_231, parameter_232, parameter_233 - - # pd_op.add: (2x192x26x26xf32) <- (2x192x26x26xf32, 2x192x26x26xf32) - add_79 = paddle._C_ops.add(batch_norm__540, batch_norm__546) - - # pd_op.swish: (2x192x26x26xf32) <- (2x192x26x26xf32) - swish_70 = paddle._C_ops.swish(add_79) - - # pd_op.conv2d: (2x192x26x26xf32) <- (2x192x26x26xf32, 192x192x3x3xf32) - conv2d_96 = paddle._C_ops.conv2d( - swish_70, parameter_229, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_229 - - # pd_op.batch_norm_: (2x192x26x26xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x26x26xf32, 192xf32, 192xf32, 192xf32, 192xf32) - ( - batch_norm__552, - batch_norm__553, - batch_norm__554, - batch_norm__555, - batch_norm__556, - batch_norm__557, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_96, - parameter_228, - parameter_227, - parameter_226, - parameter_225, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_225, parameter_226, parameter_227, parameter_228 - - # pd_op.swish: (2x192x26x26xf32) <- (2x192x26x26xf32) - swish_71 = paddle._C_ops.swish(batch_norm__552) - - # pd_op.conv2d: (2x192x26x26xf32) <- (2x192x26x26xf32, 192x192x3x3xf32) - conv2d_97 = paddle._C_ops.conv2d( - swish_71, parameter_224, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_224 - - # pd_op.batch_norm_: (2x192x26x26xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x26x26xf32, 192xf32, 192xf32, 192xf32, 192xf32) - ( - batch_norm__558, - batch_norm__559, - batch_norm__560, - batch_norm__561, - batch_norm__562, - batch_norm__563, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_97, - parameter_223, - parameter_222, - parameter_221, - parameter_220, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_220, parameter_221, parameter_222, parameter_223 - - # pd_op.conv2d: (2x192x26x26xf32) <- (2x192x26x26xf32, 192x192x1x1xf32) - conv2d_98 = paddle._C_ops.conv2d( - swish_71, parameter_219, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_219 - - # pd_op.batch_norm_: (2x192x26x26xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x26x26xf32, 192xf32, 192xf32, 192xf32, 192xf32) - ( - batch_norm__564, - batch_norm__565, - batch_norm__566, - batch_norm__567, - batch_norm__568, - batch_norm__569, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_98, - parameter_218, - parameter_217, - parameter_216, - parameter_215, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_215, parameter_216, parameter_217, parameter_218 - - # pd_op.add: (2x192x26x26xf32) <- (2x192x26x26xf32, 2x192x26x26xf32) - add_80 = paddle._C_ops.add(batch_norm__558, batch_norm__564) - - # pd_op.swish: (2x192x26x26xf32) <- (2x192x26x26xf32) - swish_72 = paddle._C_ops.swish(add_80) - - # pd_op.conv2d: (2x192x26x26xf32) <- (2x192x26x26xf32, 192x192x3x3xf32) - conv2d_99 = paddle._C_ops.conv2d( - swish_72, parameter_214, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_214 - - # pd_op.batch_norm_: (2x192x26x26xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x26x26xf32, 192xf32, 192xf32, 192xf32, 192xf32) - ( - batch_norm__570, - batch_norm__571, - batch_norm__572, - batch_norm__573, - batch_norm__574, - batch_norm__575, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_99, - parameter_213, - parameter_212, - parameter_211, - parameter_210, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_210, parameter_211, parameter_212, parameter_213 - - # pd_op.swish: (2x192x26x26xf32) <- (2x192x26x26xf32) - swish_73 = paddle._C_ops.swish(batch_norm__570) - - # pd_op.conv2d: (2x192x26x26xf32) <- (2x192x26x26xf32, 192x192x3x3xf32) - conv2d_100 = paddle._C_ops.conv2d( - swish_73, parameter_209, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_209 - - # pd_op.batch_norm_: (2x192x26x26xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x26x26xf32, 192xf32, 192xf32, 192xf32, 192xf32) - ( - batch_norm__576, - batch_norm__577, - batch_norm__578, - batch_norm__579, - batch_norm__580, - batch_norm__581, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_100, - parameter_208, - parameter_207, - parameter_206, - parameter_205, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_205, parameter_206, parameter_207, parameter_208 - - # pd_op.conv2d: (2x192x26x26xf32) <- (2x192x26x26xf32, 192x192x1x1xf32) - conv2d_101 = paddle._C_ops.conv2d( - swish_73, parameter_204, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_204 - - # pd_op.batch_norm_: (2x192x26x26xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x26x26xf32, 192xf32, 192xf32, 192xf32, 192xf32) - ( - batch_norm__582, - batch_norm__583, - batch_norm__584, - batch_norm__585, - batch_norm__586, - batch_norm__587, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_101, - parameter_203, - parameter_202, - parameter_201, - parameter_200, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_200, parameter_201, parameter_202, parameter_203 - - # pd_op.add: (2x192x26x26xf32) <- (2x192x26x26xf32, 2x192x26x26xf32) - add_81 = paddle._C_ops.add(batch_norm__576, batch_norm__582) - - # pd_op.swish: (2x192x26x26xf32) <- (2x192x26x26xf32) - swish_74 = paddle._C_ops.swish(add_81) - - # builtin.combine: ([2x192x26x26xf32, 2x192x26x26xf32]) <- (2x192x26x26xf32, 2x192x26x26xf32) - combine_9 = [swish_67, swish_74] - - # pd_op.concat: (2x384x26x26xf32) <- ([2x192x26x26xf32, 2x192x26x26xf32], 1xi32) - concat_8 = paddle._C_ops.concat(combine_9, full_0) - del combine_9 - - # pd_op.conv2d: (2x384x26x26xf32) <- (2x384x26x26xf32, 384x384x1x1xf32) - conv2d_102 = paddle._C_ops.conv2d( - concat_8, parameter_199, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_199 - - # pd_op.batch_norm_: (2x384x26x26xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x26x26xf32, 384xf32, 384xf32, 384xf32, 384xf32) - ( - batch_norm__588, - batch_norm__589, - batch_norm__590, - batch_norm__591, - batch_norm__592, - batch_norm__593, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_102, - parameter_198, - parameter_197, - parameter_196, - parameter_195, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_195, parameter_196, parameter_197, parameter_198 - - # pd_op.swish: (2x384x26x26xf32) <- (2x384x26x26xf32) - swish_75 = paddle._C_ops.swish(batch_norm__588) - - # pd_op.conv2d: (2x192x26x26xf32) <- (2x384x26x26xf32, 192x384x1x1xf32) - conv2d_103 = paddle._C_ops.conv2d( - swish_75, parameter_194, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_194 - - # pd_op.batch_norm_: (2x192x26x26xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x26x26xf32, 192xf32, 192xf32, 192xf32, 192xf32) - ( - batch_norm__594, - batch_norm__595, - batch_norm__596, - batch_norm__597, - batch_norm__598, - batch_norm__599, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_103, - parameter_193, - parameter_192, - parameter_191, - parameter_190, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_190, parameter_191, parameter_192, parameter_193 - - # pd_op.swish: (2x192x26x26xf32) <- (2x192x26x26xf32) - swish_76 = paddle._C_ops.swish(batch_norm__594) - - # pd_op.nearest_interp: (2x192x52x52xf32) <- (2x192x26x26xf32, None, None, None) - nearest_interp_1 = paddle._C_ops.nearest_interp( - swish_76, - None, - None, - None, - "NCHW", - -1, - -1, - -1, - [float("2"), float("2")], - "nearest", - False, - 0, - ) - - # builtin.combine: ([2x192x52x52xf32, 2x256x52x52xf32]) <- (2x192x52x52xf32, 2x256x52x52xf32) - combine_10 = [nearest_interp_1, swish_29] - - # pd_op.concat: (2x448x52x52xf32) <- ([2x192x52x52xf32, 2x256x52x52xf32], 1xi32) - concat_9 = paddle._C_ops.concat(combine_10, full_0) - del combine_10 - - # pd_op.conv2d: (2x96x52x52xf32) <- (2x448x52x52xf32, 96x448x1x1xf32) - conv2d_104 = paddle._C_ops.conv2d( - concat_9, parameter_189, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_189 - - # pd_op.batch_norm_: (2x96x52x52xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x52x52xf32, 96xf32, 96xf32, 96xf32, 96xf32) - ( - batch_norm__600, - batch_norm__601, - batch_norm__602, - batch_norm__603, - batch_norm__604, - batch_norm__605, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_104, - parameter_188, - parameter_187, - parameter_186, - parameter_185, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_185, parameter_186, parameter_187, parameter_188 - - # pd_op.swish: (2x96x52x52xf32) <- (2x96x52x52xf32) - swish_77 = paddle._C_ops.swish(batch_norm__600) - - # pd_op.conv2d: (2x96x52x52xf32) <- (2x448x52x52xf32, 96x448x1x1xf32) - conv2d_105 = paddle._C_ops.conv2d( - concat_9, parameter_184, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_184 - - # pd_op.batch_norm_: (2x96x52x52xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x52x52xf32, 96xf32, 96xf32, 96xf32, 96xf32) - ( - batch_norm__606, - batch_norm__607, - batch_norm__608, - batch_norm__609, - batch_norm__610, - batch_norm__611, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_105, - parameter_183, - parameter_182, - parameter_181, - parameter_180, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_180, parameter_181, parameter_182, parameter_183 - - # pd_op.swish: (2x96x52x52xf32) <- (2x96x52x52xf32) - swish_78 = paddle._C_ops.swish(batch_norm__606) - - # pd_op.conv2d: (2x96x52x52xf32) <- (2x96x52x52xf32, 96x96x3x3xf32) - conv2d_106 = paddle._C_ops.conv2d( - swish_78, parameter_179, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_179 - - # pd_op.batch_norm_: (2x96x52x52xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x52x52xf32, 96xf32, 96xf32, 96xf32, 96xf32) - ( - batch_norm__612, - batch_norm__613, - batch_norm__614, - batch_norm__615, - batch_norm__616, - batch_norm__617, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_106, - parameter_178, - parameter_177, - parameter_176, - parameter_175, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_175, parameter_176, parameter_177, parameter_178 - - # pd_op.swish: (2x96x52x52xf32) <- (2x96x52x52xf32) - swish_79 = paddle._C_ops.swish(batch_norm__612) - - # pd_op.conv2d: (2x96x52x52xf32) <- (2x96x52x52xf32, 96x96x3x3xf32) - conv2d_107 = paddle._C_ops.conv2d( - swish_79, parameter_174, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_174 - - # pd_op.batch_norm_: (2x96x52x52xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x52x52xf32, 96xf32, 96xf32, 96xf32, 96xf32) - ( - batch_norm__618, - batch_norm__619, - batch_norm__620, - batch_norm__621, - batch_norm__622, - batch_norm__623, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_107, - parameter_173, - parameter_172, - parameter_171, - parameter_170, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_170, parameter_171, parameter_172, parameter_173 - - # pd_op.conv2d: (2x96x52x52xf32) <- (2x96x52x52xf32, 96x96x1x1xf32) - conv2d_108 = paddle._C_ops.conv2d( - swish_79, parameter_169, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_169 - - # pd_op.batch_norm_: (2x96x52x52xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x52x52xf32, 96xf32, 96xf32, 96xf32, 96xf32) - ( - batch_norm__624, - batch_norm__625, - batch_norm__626, - batch_norm__627, - batch_norm__628, - batch_norm__629, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_108, - parameter_168, - parameter_167, - parameter_166, - parameter_165, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_165, parameter_166, parameter_167, parameter_168 - - # pd_op.add: (2x96x52x52xf32) <- (2x96x52x52xf32, 2x96x52x52xf32) - add_82 = paddle._C_ops.add(batch_norm__618, batch_norm__624) - - # pd_op.swish: (2x96x52x52xf32) <- (2x96x52x52xf32) - swish_80 = paddle._C_ops.swish(add_82) - - # pd_op.conv2d: (2x96x52x52xf32) <- (2x96x52x52xf32, 96x96x3x3xf32) - conv2d_109 = paddle._C_ops.conv2d( - swish_80, parameter_164, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_164 - - # pd_op.batch_norm_: (2x96x52x52xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x52x52xf32, 96xf32, 96xf32, 96xf32, 96xf32) - ( - batch_norm__630, - batch_norm__631, - batch_norm__632, - batch_norm__633, - batch_norm__634, - batch_norm__635, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_109, - parameter_163, - parameter_162, - parameter_161, - parameter_160, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_160, parameter_161, parameter_162, parameter_163 - - # pd_op.swish: (2x96x52x52xf32) <- (2x96x52x52xf32) - swish_81 = paddle._C_ops.swish(batch_norm__630) - - # pd_op.conv2d: (2x96x52x52xf32) <- (2x96x52x52xf32, 96x96x3x3xf32) - conv2d_110 = paddle._C_ops.conv2d( - swish_81, parameter_159, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_159 - - # pd_op.batch_norm_: (2x96x52x52xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x52x52xf32, 96xf32, 96xf32, 96xf32, 96xf32) - ( - batch_norm__636, - batch_norm__637, - batch_norm__638, - batch_norm__639, - batch_norm__640, - batch_norm__641, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_110, - parameter_158, - parameter_157, - parameter_156, - parameter_155, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_155, parameter_156, parameter_157, parameter_158 - - # pd_op.conv2d: (2x96x52x52xf32) <- (2x96x52x52xf32, 96x96x1x1xf32) - conv2d_111 = paddle._C_ops.conv2d( - swish_81, parameter_154, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_154 - - # pd_op.batch_norm_: (2x96x52x52xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x52x52xf32, 96xf32, 96xf32, 96xf32, 96xf32) - ( - batch_norm__642, - batch_norm__643, - batch_norm__644, - batch_norm__645, - batch_norm__646, - batch_norm__647, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_111, - parameter_153, - parameter_152, - parameter_151, - parameter_150, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_150, parameter_151, parameter_152, parameter_153 - - # pd_op.add: (2x96x52x52xf32) <- (2x96x52x52xf32, 2x96x52x52xf32) - add_83 = paddle._C_ops.add(batch_norm__636, batch_norm__642) - - # pd_op.swish: (2x96x52x52xf32) <- (2x96x52x52xf32) - swish_82 = paddle._C_ops.swish(add_83) - - # pd_op.conv2d: (2x96x52x52xf32) <- (2x96x52x52xf32, 96x96x3x3xf32) - conv2d_112 = paddle._C_ops.conv2d( - swish_82, parameter_149, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_149 - - # pd_op.batch_norm_: (2x96x52x52xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x52x52xf32, 96xf32, 96xf32, 96xf32, 96xf32) - ( - batch_norm__648, - batch_norm__649, - batch_norm__650, - batch_norm__651, - batch_norm__652, - batch_norm__653, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_112, - parameter_148, - parameter_147, - parameter_146, - parameter_145, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_145, parameter_146, parameter_147, parameter_148 - - # pd_op.swish: (2x96x52x52xf32) <- (2x96x52x52xf32) - swish_83 = paddle._C_ops.swish(batch_norm__648) - - # pd_op.conv2d: (2x96x52x52xf32) <- (2x96x52x52xf32, 96x96x3x3xf32) - conv2d_113 = paddle._C_ops.conv2d( - swish_83, parameter_144, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_144 - - # pd_op.batch_norm_: (2x96x52x52xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x52x52xf32, 96xf32, 96xf32, 96xf32, 96xf32) - ( - batch_norm__654, - batch_norm__655, - batch_norm__656, - batch_norm__657, - batch_norm__658, - batch_norm__659, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_113, - parameter_143, - parameter_142, - parameter_141, - parameter_140, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_140, parameter_141, parameter_142, parameter_143 - - # pd_op.conv2d: (2x96x52x52xf32) <- (2x96x52x52xf32, 96x96x1x1xf32) - conv2d_114 = paddle._C_ops.conv2d( - swish_83, parameter_139, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_139 - - # pd_op.batch_norm_: (2x96x52x52xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x52x52xf32, 96xf32, 96xf32, 96xf32, 96xf32) - ( - batch_norm__660, - batch_norm__661, - batch_norm__662, - batch_norm__663, - batch_norm__664, - batch_norm__665, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_114, - parameter_138, - parameter_137, - parameter_136, - parameter_135, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_135, parameter_136, parameter_137, parameter_138 - - # pd_op.add: (2x96x52x52xf32) <- (2x96x52x52xf32, 2x96x52x52xf32) - add_84 = paddle._C_ops.add(batch_norm__654, batch_norm__660) - - # pd_op.swish: (2x96x52x52xf32) <- (2x96x52x52xf32) - swish_84 = paddle._C_ops.swish(add_84) - - # builtin.combine: ([2x96x52x52xf32, 2x96x52x52xf32]) <- (2x96x52x52xf32, 2x96x52x52xf32) - combine_11 = [swish_77, swish_84] - - # pd_op.concat: (2x192x52x52xf32) <- ([2x96x52x52xf32, 2x96x52x52xf32], 1xi32) - concat_10 = paddle._C_ops.concat(combine_11, full_0) - del combine_11 - - # pd_op.conv2d: (2x192x52x52xf32) <- (2x192x52x52xf32, 192x192x1x1xf32) - conv2d_115 = paddle._C_ops.conv2d( - concat_10, parameter_134, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_134 - - # pd_op.batch_norm_: (2x192x52x52xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x52x52xf32, 192xf32, 192xf32, 192xf32, 192xf32) - ( - batch_norm__666, - batch_norm__667, - batch_norm__668, - batch_norm__669, - batch_norm__670, - batch_norm__671, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_115, - parameter_133, - parameter_132, - parameter_131, - parameter_130, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_130, parameter_131, parameter_132, parameter_133 - - # pd_op.swish: (2x192x52x52xf32) <- (2x192x52x52xf32) - swish_85 = paddle._C_ops.swish(batch_norm__666) - - # pd_op.conv2d: (2x192x26x26xf32) <- (2x192x52x52xf32, 192x192x3x3xf32) - conv2d_116 = paddle._C_ops.conv2d( - swish_85, parameter_129, [2, 2], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_129 - - # pd_op.batch_norm_: (2x192x26x26xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x26x26xf32, 192xf32, 192xf32, 192xf32, 192xf32) - ( - batch_norm__672, - batch_norm__673, - batch_norm__674, - batch_norm__675, - batch_norm__676, - batch_norm__677, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_116, - parameter_128, - parameter_127, - parameter_126, - parameter_125, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_125, parameter_126, parameter_127, parameter_128 - - # pd_op.swish: (2x192x26x26xf32) <- (2x192x26x26xf32) - swish_86 = paddle._C_ops.swish(batch_norm__672) - - # builtin.combine: ([2x192x26x26xf32, 2x384x26x26xf32]) <- (2x192x26x26xf32, 2x384x26x26xf32) - combine_12 = [swish_86, swish_75] - - # pd_op.concat: (2x576x26x26xf32) <- ([2x192x26x26xf32, 2x384x26x26xf32], 1xi32) - concat_11 = paddle._C_ops.concat(combine_12, full_0) - del combine_12 - - # pd_op.conv2d: (2x192x26x26xf32) <- (2x576x26x26xf32, 192x576x1x1xf32) - conv2d_117 = paddle._C_ops.conv2d( - concat_11, parameter_124, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_124 - - # pd_op.batch_norm_: (2x192x26x26xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x26x26xf32, 192xf32, 192xf32, 192xf32, 192xf32) - ( - batch_norm__678, - batch_norm__679, - batch_norm__680, - batch_norm__681, - batch_norm__682, - batch_norm__683, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_117, - parameter_123, - parameter_122, - parameter_121, - parameter_120, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_120, parameter_121, parameter_122, parameter_123 - - # pd_op.swish: (2x192x26x26xf32) <- (2x192x26x26xf32) - swish_87 = paddle._C_ops.swish(batch_norm__678) - - # pd_op.conv2d: (2x192x26x26xf32) <- (2x576x26x26xf32, 192x576x1x1xf32) - conv2d_118 = paddle._C_ops.conv2d( - concat_11, parameter_119, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_119 - - # pd_op.batch_norm_: (2x192x26x26xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x26x26xf32, 192xf32, 192xf32, 192xf32, 192xf32) - ( - batch_norm__684, - batch_norm__685, - batch_norm__686, - batch_norm__687, - batch_norm__688, - batch_norm__689, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_118, - parameter_118, - parameter_117, - parameter_116, - parameter_115, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_115, parameter_116, parameter_117, parameter_118 - - # pd_op.swish: (2x192x26x26xf32) <- (2x192x26x26xf32) - swish_88 = paddle._C_ops.swish(batch_norm__684) - - # pd_op.conv2d: (2x192x26x26xf32) <- (2x192x26x26xf32, 192x192x3x3xf32) - conv2d_119 = paddle._C_ops.conv2d( - swish_88, parameter_114, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_114 - - # pd_op.batch_norm_: (2x192x26x26xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x26x26xf32, 192xf32, 192xf32, 192xf32, 192xf32) - ( - batch_norm__690, - batch_norm__691, - batch_norm__692, - batch_norm__693, - batch_norm__694, - batch_norm__695, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_119, - parameter_113, - parameter_112, - parameter_111, - parameter_110, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_110, parameter_111, parameter_112, parameter_113 - - # pd_op.swish: (2x192x26x26xf32) <- (2x192x26x26xf32) - swish_89 = paddle._C_ops.swish(batch_norm__690) - - # pd_op.conv2d: (2x192x26x26xf32) <- (2x192x26x26xf32, 192x192x3x3xf32) - conv2d_120 = paddle._C_ops.conv2d( - swish_89, parameter_109, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_109 - - # pd_op.batch_norm_: (2x192x26x26xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x26x26xf32, 192xf32, 192xf32, 192xf32, 192xf32) - ( - batch_norm__696, - batch_norm__697, - batch_norm__698, - batch_norm__699, - batch_norm__700, - batch_norm__701, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_120, - parameter_108, - parameter_107, - parameter_106, - parameter_105, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_105, parameter_106, parameter_107, parameter_108 - - # pd_op.conv2d: (2x192x26x26xf32) <- (2x192x26x26xf32, 192x192x1x1xf32) - conv2d_121 = paddle._C_ops.conv2d( - swish_89, parameter_104, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_104 - - # pd_op.batch_norm_: (2x192x26x26xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x26x26xf32, 192xf32, 192xf32, 192xf32, 192xf32) - ( - batch_norm__702, - batch_norm__703, - batch_norm__704, - batch_norm__705, - batch_norm__706, - batch_norm__707, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_121, - parameter_103, - parameter_102, - parameter_101, - parameter_100, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_100, parameter_101, parameter_102, parameter_103 - - # pd_op.add: (2x192x26x26xf32) <- (2x192x26x26xf32, 2x192x26x26xf32) - add_85 = paddle._C_ops.add(batch_norm__696, batch_norm__702) - - # pd_op.swish: (2x192x26x26xf32) <- (2x192x26x26xf32) - swish_90 = paddle._C_ops.swish(add_85) - - # pd_op.conv2d: (2x192x26x26xf32) <- (2x192x26x26xf32, 192x192x3x3xf32) - conv2d_122 = paddle._C_ops.conv2d( - swish_90, parameter_99, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_99 - - # pd_op.batch_norm_: (2x192x26x26xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x26x26xf32, 192xf32, 192xf32, 192xf32, 192xf32) - ( - batch_norm__708, - batch_norm__709, - batch_norm__710, - batch_norm__711, - batch_norm__712, - batch_norm__713, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_122, - parameter_98, - parameter_97, - parameter_96, - parameter_95, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_95, parameter_96, parameter_97, parameter_98 - - # pd_op.swish: (2x192x26x26xf32) <- (2x192x26x26xf32) - swish_91 = paddle._C_ops.swish(batch_norm__708) - - # pd_op.conv2d: (2x192x26x26xf32) <- (2x192x26x26xf32, 192x192x3x3xf32) - conv2d_123 = paddle._C_ops.conv2d( - swish_91, parameter_94, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_94 - - # pd_op.batch_norm_: (2x192x26x26xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x26x26xf32, 192xf32, 192xf32, 192xf32, 192xf32) - ( - batch_norm__714, - batch_norm__715, - batch_norm__716, - batch_norm__717, - batch_norm__718, - batch_norm__719, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_123, - parameter_93, - parameter_92, - parameter_91, - parameter_90, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_90, parameter_91, parameter_92, parameter_93 - - # pd_op.conv2d: (2x192x26x26xf32) <- (2x192x26x26xf32, 192x192x1x1xf32) - conv2d_124 = paddle._C_ops.conv2d( - swish_91, parameter_89, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_89 - - # pd_op.batch_norm_: (2x192x26x26xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x26x26xf32, 192xf32, 192xf32, 192xf32, 192xf32) - ( - batch_norm__720, - batch_norm__721, - batch_norm__722, - batch_norm__723, - batch_norm__724, - batch_norm__725, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_124, - parameter_88, - parameter_87, - parameter_86, - parameter_85, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_85, parameter_86, parameter_87, parameter_88 - - # pd_op.add: (2x192x26x26xf32) <- (2x192x26x26xf32, 2x192x26x26xf32) - add_86 = paddle._C_ops.add(batch_norm__714, batch_norm__720) - - # pd_op.swish: (2x192x26x26xf32) <- (2x192x26x26xf32) - swish_92 = paddle._C_ops.swish(add_86) - - # pd_op.conv2d: (2x192x26x26xf32) <- (2x192x26x26xf32, 192x192x3x3xf32) - conv2d_125 = paddle._C_ops.conv2d( - swish_92, parameter_84, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_84 - - # pd_op.batch_norm_: (2x192x26x26xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x26x26xf32, 192xf32, 192xf32, 192xf32, 192xf32) - ( - batch_norm__726, - batch_norm__727, - batch_norm__728, - batch_norm__729, - batch_norm__730, - batch_norm__731, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_125, - parameter_83, - parameter_82, - parameter_81, - parameter_80, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_80, parameter_81, parameter_82, parameter_83 - - # pd_op.swish: (2x192x26x26xf32) <- (2x192x26x26xf32) - swish_93 = paddle._C_ops.swish(batch_norm__726) - - # pd_op.conv2d: (2x192x26x26xf32) <- (2x192x26x26xf32, 192x192x3x3xf32) - conv2d_126 = paddle._C_ops.conv2d( - swish_93, parameter_79, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_79 - - # pd_op.batch_norm_: (2x192x26x26xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x26x26xf32, 192xf32, 192xf32, 192xf32, 192xf32) - ( - batch_norm__732, - batch_norm__733, - batch_norm__734, - batch_norm__735, - batch_norm__736, - batch_norm__737, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_126, - parameter_78, - parameter_77, - parameter_76, - parameter_75, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_75, parameter_76, parameter_77, parameter_78 - - # pd_op.conv2d: (2x192x26x26xf32) <- (2x192x26x26xf32, 192x192x1x1xf32) - conv2d_127 = paddle._C_ops.conv2d( - swish_93, parameter_74, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_74 - - # pd_op.batch_norm_: (2x192x26x26xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x26x26xf32, 192xf32, 192xf32, 192xf32, 192xf32) - ( - batch_norm__738, - batch_norm__739, - batch_norm__740, - batch_norm__741, - batch_norm__742, - batch_norm__743, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_127, - parameter_73, - parameter_72, - parameter_71, - parameter_70, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_70, parameter_71, parameter_72, parameter_73 - - # pd_op.add: (2x192x26x26xf32) <- (2x192x26x26xf32, 2x192x26x26xf32) - add_87 = paddle._C_ops.add(batch_norm__732, batch_norm__738) - - # pd_op.swish: (2x192x26x26xf32) <- (2x192x26x26xf32) - swish_94 = paddle._C_ops.swish(add_87) - - # builtin.combine: ([2x192x26x26xf32, 2x192x26x26xf32]) <- (2x192x26x26xf32, 2x192x26x26xf32) - combine_13 = [swish_87, swish_94] - - # pd_op.concat: (2x384x26x26xf32) <- ([2x192x26x26xf32, 2x192x26x26xf32], 1xi32) - concat_12 = paddle._C_ops.concat(combine_13, full_0) - del combine_13 - - # pd_op.conv2d: (2x384x26x26xf32) <- (2x384x26x26xf32, 384x384x1x1xf32) - conv2d_128 = paddle._C_ops.conv2d( - concat_12, parameter_69, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_69 - - # pd_op.batch_norm_: (2x384x26x26xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x26x26xf32, 384xf32, 384xf32, 384xf32, 384xf32) - ( - batch_norm__744, - batch_norm__745, - batch_norm__746, - batch_norm__747, - batch_norm__748, - batch_norm__749, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_128, - parameter_68, - parameter_67, - parameter_66, - parameter_65, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_65, parameter_66, parameter_67, parameter_68 - - # pd_op.swish: (2x384x26x26xf32) <- (2x384x26x26xf32) - swish_95 = paddle._C_ops.swish(batch_norm__744) - - # pd_op.conv2d: (2x384x13x13xf32) <- (2x384x26x26xf32, 384x384x3x3xf32) - conv2d_129 = paddle._C_ops.conv2d( - swish_95, parameter_64, [2, 2], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_64 - - # pd_op.batch_norm_: (2x384x13x13xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x13x13xf32, 384xf32, 384xf32, 384xf32, 384xf32) - ( - batch_norm__750, - batch_norm__751, - batch_norm__752, - batch_norm__753, - batch_norm__754, - batch_norm__755, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_129, - parameter_63, - parameter_62, - parameter_61, - parameter_60, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_60, parameter_61, parameter_62, parameter_63 - - # pd_op.swish: (2x384x13x13xf32) <- (2x384x13x13xf32) - swish_96 = paddle._C_ops.swish(batch_norm__750) - - # builtin.combine: ([2x384x13x13xf32, 2x768x13x13xf32]) <- (2x384x13x13xf32, 2x768x13x13xf32) - combine_14 = [swish_96, swish_65] - - # pd_op.concat: (2x1152x13x13xf32) <- ([2x384x13x13xf32, 2x768x13x13xf32], 1xi32) - concat_13 = paddle._C_ops.concat(combine_14, full_0) - del combine_14 - - # pd_op.conv2d: (2x384x13x13xf32) <- (2x1152x13x13xf32, 384x1152x1x1xf32) - conv2d_130 = paddle._C_ops.conv2d( - concat_13, parameter_59, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_59 - - # pd_op.batch_norm_: (2x384x13x13xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x13x13xf32, 384xf32, 384xf32, 384xf32, 384xf32) - ( - batch_norm__756, - batch_norm__757, - batch_norm__758, - batch_norm__759, - batch_norm__760, - batch_norm__761, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_130, - parameter_58, - parameter_57, - parameter_56, - parameter_55, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_55, parameter_56, parameter_57, parameter_58 - - # pd_op.swish: (2x384x13x13xf32) <- (2x384x13x13xf32) - swish_97 = paddle._C_ops.swish(batch_norm__756) - - # pd_op.conv2d: (2x384x13x13xf32) <- (2x1152x13x13xf32, 384x1152x1x1xf32) - conv2d_131 = paddle._C_ops.conv2d( - concat_13, parameter_54, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_54 - - # pd_op.batch_norm_: (2x384x13x13xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x13x13xf32, 384xf32, 384xf32, 384xf32, 384xf32) - ( - batch_norm__762, - batch_norm__763, - batch_norm__764, - batch_norm__765, - batch_norm__766, - batch_norm__767, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_131, - parameter_53, - parameter_52, - parameter_51, - parameter_50, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_50, parameter_51, parameter_52, parameter_53 - - # pd_op.swish: (2x384x13x13xf32) <- (2x384x13x13xf32) - swish_98 = paddle._C_ops.swish(batch_norm__762) - - # pd_op.conv2d: (2x384x13x13xf32) <- (2x384x13x13xf32, 384x384x3x3xf32) - conv2d_132 = paddle._C_ops.conv2d( - swish_98, parameter_49, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_49 - - # pd_op.batch_norm_: (2x384x13x13xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x13x13xf32, 384xf32, 384xf32, 384xf32, 384xf32) - ( - batch_norm__768, - batch_norm__769, - batch_norm__770, - batch_norm__771, - batch_norm__772, - batch_norm__773, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_132, - parameter_48, - parameter_47, - parameter_46, - parameter_45, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_45, parameter_46, parameter_47, parameter_48 - - # pd_op.swish: (2x384x13x13xf32) <- (2x384x13x13xf32) - swish_99 = paddle._C_ops.swish(batch_norm__768) - - # pd_op.conv2d: (2x384x13x13xf32) <- (2x384x13x13xf32, 384x384x3x3xf32) - conv2d_133 = paddle._C_ops.conv2d( - swish_99, parameter_44, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_44 - - # pd_op.batch_norm_: (2x384x13x13xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x13x13xf32, 384xf32, 384xf32, 384xf32, 384xf32) - ( - batch_norm__774, - batch_norm__775, - batch_norm__776, - batch_norm__777, - batch_norm__778, - batch_norm__779, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_133, - parameter_43, - parameter_42, - parameter_41, - parameter_40, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_40, parameter_41, parameter_42, parameter_43 - - # pd_op.conv2d: (2x384x13x13xf32) <- (2x384x13x13xf32, 384x384x1x1xf32) - conv2d_134 = paddle._C_ops.conv2d( - swish_99, parameter_39, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_39 - - # pd_op.batch_norm_: (2x384x13x13xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x13x13xf32, 384xf32, 384xf32, 384xf32, 384xf32) - ( - batch_norm__780, - batch_norm__781, - batch_norm__782, - batch_norm__783, - batch_norm__784, - batch_norm__785, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_134, - parameter_38, - parameter_37, - parameter_36, - parameter_35, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_35, parameter_36, parameter_37, parameter_38 - - # pd_op.add: (2x384x13x13xf32) <- (2x384x13x13xf32, 2x384x13x13xf32) - add_88 = paddle._C_ops.add(batch_norm__774, batch_norm__780) - - # pd_op.swish: (2x384x13x13xf32) <- (2x384x13x13xf32) - swish_100 = paddle._C_ops.swish(add_88) - - # pd_op.conv2d: (2x384x13x13xf32) <- (2x384x13x13xf32, 384x384x3x3xf32) - conv2d_135 = paddle._C_ops.conv2d( - swish_100, parameter_34, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_34 - - # pd_op.batch_norm_: (2x384x13x13xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x13x13xf32, 384xf32, 384xf32, 384xf32, 384xf32) - ( - batch_norm__786, - batch_norm__787, - batch_norm__788, - batch_norm__789, - batch_norm__790, - batch_norm__791, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_135, - parameter_33, - parameter_32, - parameter_31, - parameter_30, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_30, parameter_31, parameter_32, parameter_33 - - # pd_op.swish: (2x384x13x13xf32) <- (2x384x13x13xf32) - swish_101 = paddle._C_ops.swish(batch_norm__786) - - # pd_op.conv2d: (2x384x13x13xf32) <- (2x384x13x13xf32, 384x384x3x3xf32) - conv2d_136 = paddle._C_ops.conv2d( - swish_101, parameter_29, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_29 - - # pd_op.batch_norm_: (2x384x13x13xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x13x13xf32, 384xf32, 384xf32, 384xf32, 384xf32) - ( - batch_norm__792, - batch_norm__793, - batch_norm__794, - batch_norm__795, - batch_norm__796, - batch_norm__797, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_136, - parameter_28, - parameter_27, - parameter_26, - parameter_25, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_25, parameter_26, parameter_27, parameter_28 - - # pd_op.conv2d: (2x384x13x13xf32) <- (2x384x13x13xf32, 384x384x1x1xf32) - conv2d_137 = paddle._C_ops.conv2d( - swish_101, parameter_24, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_24 - - # pd_op.batch_norm_: (2x384x13x13xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x13x13xf32, 384xf32, 384xf32, 384xf32, 384xf32) - ( - batch_norm__798, - batch_norm__799, - batch_norm__800, - batch_norm__801, - batch_norm__802, - batch_norm__803, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_137, - parameter_23, - parameter_22, - parameter_21, - parameter_20, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_20, parameter_21, parameter_22, parameter_23 - - # pd_op.add: (2x384x13x13xf32) <- (2x384x13x13xf32, 2x384x13x13xf32) - add_89 = paddle._C_ops.add(batch_norm__792, batch_norm__798) - - # pd_op.swish: (2x384x13x13xf32) <- (2x384x13x13xf32) - swish_102 = paddle._C_ops.swish(add_89) - - # pd_op.conv2d: (2x384x13x13xf32) <- (2x384x13x13xf32, 384x384x3x3xf32) - conv2d_138 = paddle._C_ops.conv2d( - swish_102, parameter_19, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_19 - - # pd_op.batch_norm_: (2x384x13x13xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x13x13xf32, 384xf32, 384xf32, 384xf32, 384xf32) - ( - batch_norm__804, - batch_norm__805, - batch_norm__806, - batch_norm__807, - batch_norm__808, - batch_norm__809, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_138, - parameter_18, - parameter_17, - parameter_16, - parameter_15, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_15, parameter_16, parameter_17, parameter_18 - - # pd_op.swish: (2x384x13x13xf32) <- (2x384x13x13xf32) - swish_103 = paddle._C_ops.swish(batch_norm__804) - - # pd_op.conv2d: (2x384x13x13xf32) <- (2x384x13x13xf32, 384x384x3x3xf32) - conv2d_139 = paddle._C_ops.conv2d( - swish_103, parameter_14, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_14 - - # pd_op.batch_norm_: (2x384x13x13xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x13x13xf32, 384xf32, 384xf32, 384xf32, 384xf32) - ( - batch_norm__810, - batch_norm__811, - batch_norm__812, - batch_norm__813, - batch_norm__814, - batch_norm__815, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_139, - parameter_13, - parameter_12, - parameter_11, - parameter_10, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_10, parameter_11, parameter_12, parameter_13 - - # pd_op.conv2d: (2x384x13x13xf32) <- (2x384x13x13xf32, 384x384x1x1xf32) - conv2d_140 = paddle._C_ops.conv2d( - swish_103, parameter_9, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_9 - - # pd_op.batch_norm_: (2x384x13x13xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x13x13xf32, 384xf32, 384xf32, 384xf32, 384xf32) - ( - batch_norm__816, - batch_norm__817, - batch_norm__818, - batch_norm__819, - batch_norm__820, - batch_norm__821, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_140, - parameter_8, - parameter_7, - parameter_6, - parameter_5, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_5, parameter_6, parameter_7, parameter_8 - - # pd_op.add: (2x384x13x13xf32) <- (2x384x13x13xf32, 2x384x13x13xf32) - add_90 = paddle._C_ops.add(batch_norm__810, batch_norm__816) - - # pd_op.swish: (2x384x13x13xf32) <- (2x384x13x13xf32) - swish_104 = paddle._C_ops.swish(add_90) - - # builtin.combine: ([2x384x13x13xf32, 2x384x13x13xf32]) <- (2x384x13x13xf32, 2x384x13x13xf32) - combine_15 = [swish_97, swish_104] - - # pd_op.concat: (2x768x13x13xf32) <- ([2x384x13x13xf32, 2x384x13x13xf32], 1xi32) - concat_14 = paddle._C_ops.concat(combine_15, full_0) - del combine_15 - - # pd_op.conv2d: (2x768x13x13xf32) <- (2x768x13x13xf32, 768x768x1x1xf32) - conv2d_141 = paddle._C_ops.conv2d( - concat_14, parameter_4, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_4 - - # pd_op.batch_norm_: (2x768x13x13xf32, 768xf32, 768xf32, 768xf32, 768xf32, -1xui8) <- (2x768x13x13xf32, 768xf32, 768xf32, 768xf32, 768xf32) - ( - batch_norm__822, - batch_norm__823, - batch_norm__824, - batch_norm__825, - batch_norm__826, - batch_norm__827, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_141, - parameter_3, - parameter_2, - parameter_1, - parameter_0, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_0, parameter_1, parameter_2, parameter_3 - - # pd_op.swish: (2x768x13x13xf32) <- (2x768x13x13xf32) - swish_0 = paddle._C_ops.swish(batch_norm__822) - del ( - add_0, - add_1, - add_10, - add_11, - add_12, - add_13, - add_14, - add_15, - add_16, - add_17, - add_18, - add_2, - add_20, - add_21, - add_22, - add_23, - add_24, - add_25, - add_26, - add_27, - add_28, - add_29, - add_3, - add_30, - add_31, - add_33, - add_34, - add_35, - add_36, - add_37, - add_38, - add_4, - add_40, - add_41, - add_42, - add_43, - add_45, - add_46, - add_48, - add_49, - add_5, - add_50, - add_51, - add_52, - add_54, - add_55, - add_57, - add_58, - add_59, - add_60, - add_61, - add_63, - add_64, - add_66, - add_67, - add_68, - add_69, - add_7, - add_70, - add_72, - add_73, - add_75, - add_76, - add_77, - add_78, - add_79, - add_8, - add_80, - add_81, - add_82, - add_83, - add_84, - add_85, - add_86, - add_87, - add_88, - add_89, - add_9, - add_90, - assign_0, - assign_1, - assign_10, - assign_11, - assign_12, - assign_13, - assign_14, - assign_15, - assign_16, - assign_17, - assign_18, - assign_19, - assign_2, - assign_20, - assign_21, - assign_22, - assign_23, - assign_24, - assign_25, - assign_26, - assign_27, - assign_28, - assign_29, - assign_3, - assign_30, - assign_31, - assign_32, - assign_33, - assign_34, - assign_35, - assign_36, - assign_37, - assign_38, - assign_39, - assign_4, - assign_40, - assign_41, - assign_42, - assign_43, - assign_44, - assign_45, - assign_46, - assign_47, - assign_48, - assign_49, - assign_5, - assign_50, - assign_51, - assign_52, - assign_53, - assign_54, - assign_55, - assign_56, - assign_57, - assign_58, - assign_59, - assign_6, - assign_60, - assign_61, - assign_62, - assign_63, - assign_64, - assign_65, - assign_66, - assign_67, - assign_68, - assign_69, - assign_7, - assign_70, - assign_71, - assign_72, - assign_73, - assign_74, - assign_75, - assign_76, - assign_77, - assign_78, - assign_8, - assign_9, - batch_norm__0, - batch_norm__1, - batch_norm__10, - batch_norm__100, - batch_norm__101, - batch_norm__102, - batch_norm__103, - batch_norm__104, - batch_norm__105, - batch_norm__106, - batch_norm__107, - batch_norm__108, - batch_norm__109, - batch_norm__11, - batch_norm__110, - batch_norm__111, - batch_norm__112, - batch_norm__113, - batch_norm__114, - batch_norm__115, - batch_norm__116, - batch_norm__117, - batch_norm__118, - batch_norm__119, - batch_norm__12, - batch_norm__120, - batch_norm__121, - batch_norm__122, - batch_norm__123, - batch_norm__124, - batch_norm__125, - batch_norm__126, - batch_norm__127, - batch_norm__128, - batch_norm__129, - batch_norm__13, - batch_norm__130, - batch_norm__131, - batch_norm__132, - batch_norm__133, - batch_norm__134, - batch_norm__135, - batch_norm__136, - batch_norm__137, - batch_norm__138, - batch_norm__139, - batch_norm__14, - batch_norm__140, - batch_norm__141, - batch_norm__142, - batch_norm__143, - batch_norm__144, - batch_norm__145, - batch_norm__146, - batch_norm__147, - batch_norm__148, - batch_norm__149, - batch_norm__15, - batch_norm__150, - batch_norm__151, - batch_norm__152, - batch_norm__153, - batch_norm__154, - batch_norm__155, - batch_norm__156, - batch_norm__157, - batch_norm__158, - batch_norm__159, - batch_norm__16, - batch_norm__160, - batch_norm__161, - batch_norm__162, - batch_norm__163, - batch_norm__164, - batch_norm__165, - batch_norm__166, - batch_norm__167, - batch_norm__168, - batch_norm__169, - batch_norm__17, - batch_norm__170, - batch_norm__171, - batch_norm__172, - batch_norm__173, - batch_norm__174, - batch_norm__175, - batch_norm__176, - batch_norm__177, - batch_norm__178, - batch_norm__179, - batch_norm__18, - batch_norm__180, - batch_norm__181, - batch_norm__182, - batch_norm__183, - batch_norm__184, - batch_norm__185, - batch_norm__186, - batch_norm__187, - batch_norm__188, - batch_norm__189, - batch_norm__19, - batch_norm__190, - batch_norm__191, - batch_norm__192, - batch_norm__193, - batch_norm__194, - batch_norm__195, - batch_norm__196, - batch_norm__197, - batch_norm__198, - batch_norm__199, - batch_norm__2, - batch_norm__20, - batch_norm__200, - batch_norm__201, - batch_norm__202, - batch_norm__203, - batch_norm__204, - batch_norm__205, - batch_norm__206, - batch_norm__207, - batch_norm__208, - batch_norm__209, - batch_norm__21, - batch_norm__210, - batch_norm__211, - batch_norm__212, - batch_norm__213, - batch_norm__214, - batch_norm__215, - batch_norm__216, - batch_norm__217, - batch_norm__218, - batch_norm__219, - batch_norm__22, - batch_norm__220, - batch_norm__221, - batch_norm__222, - batch_norm__223, - batch_norm__224, - batch_norm__225, - batch_norm__226, - batch_norm__227, - batch_norm__228, - batch_norm__229, - batch_norm__23, - batch_norm__230, - batch_norm__231, - batch_norm__232, - batch_norm__233, - batch_norm__234, - batch_norm__235, - batch_norm__236, - batch_norm__237, - batch_norm__238, - batch_norm__239, - batch_norm__24, - batch_norm__240, - batch_norm__241, - batch_norm__242, - batch_norm__243, - batch_norm__244, - batch_norm__245, - batch_norm__246, - batch_norm__247, - batch_norm__248, - batch_norm__249, - batch_norm__25, - batch_norm__250, - batch_norm__251, - batch_norm__252, - batch_norm__253, - batch_norm__254, - batch_norm__255, - batch_norm__256, - batch_norm__257, - batch_norm__258, - batch_norm__259, - batch_norm__26, - batch_norm__260, - batch_norm__261, - batch_norm__262, - batch_norm__263, - batch_norm__264, - batch_norm__265, - batch_norm__266, - batch_norm__267, - batch_norm__268, - batch_norm__269, - batch_norm__27, - batch_norm__270, - batch_norm__271, - batch_norm__272, - batch_norm__273, - batch_norm__274, - batch_norm__275, - batch_norm__276, - batch_norm__277, - batch_norm__278, - batch_norm__279, - batch_norm__28, - batch_norm__280, - batch_norm__281, - batch_norm__282, - batch_norm__283, - batch_norm__284, - batch_norm__285, - batch_norm__286, - batch_norm__287, - batch_norm__288, - batch_norm__289, - batch_norm__29, - batch_norm__290, - batch_norm__291, - batch_norm__292, - batch_norm__293, - batch_norm__294, - batch_norm__295, - batch_norm__296, - batch_norm__297, - batch_norm__298, - batch_norm__299, - batch_norm__3, - batch_norm__30, - batch_norm__300, - batch_norm__301, - batch_norm__302, - batch_norm__303, - batch_norm__304, - batch_norm__305, - batch_norm__306, - batch_norm__307, - batch_norm__308, - batch_norm__309, - batch_norm__31, - batch_norm__310, - batch_norm__311, - batch_norm__312, - batch_norm__313, - batch_norm__314, - batch_norm__315, - batch_norm__316, - batch_norm__317, - batch_norm__318, - batch_norm__319, - batch_norm__32, - batch_norm__320, - batch_norm__321, - batch_norm__322, - batch_norm__323, - batch_norm__324, - batch_norm__325, - batch_norm__326, - batch_norm__327, - batch_norm__328, - batch_norm__329, - batch_norm__33, - batch_norm__330, - batch_norm__331, - batch_norm__332, - batch_norm__333, - batch_norm__334, - batch_norm__335, - batch_norm__336, - batch_norm__337, - batch_norm__338, - batch_norm__339, - batch_norm__34, - batch_norm__340, - batch_norm__341, - batch_norm__342, - batch_norm__343, - batch_norm__344, - batch_norm__345, - batch_norm__346, - batch_norm__347, - batch_norm__348, - batch_norm__349, - batch_norm__35, - batch_norm__350, - batch_norm__351, - batch_norm__352, - batch_norm__353, - batch_norm__354, - batch_norm__355, - batch_norm__356, - batch_norm__357, - batch_norm__358, - batch_norm__359, - batch_norm__36, - batch_norm__360, - batch_norm__361, - batch_norm__362, - batch_norm__363, - batch_norm__364, - batch_norm__365, - batch_norm__366, - batch_norm__367, - batch_norm__368, - batch_norm__369, - batch_norm__37, - batch_norm__370, - batch_norm__371, - batch_norm__372, - batch_norm__373, - batch_norm__374, - batch_norm__375, - batch_norm__376, - batch_norm__377, - batch_norm__378, - batch_norm__379, - batch_norm__38, - batch_norm__380, - batch_norm__381, - batch_norm__382, - batch_norm__383, - batch_norm__384, - batch_norm__385, - batch_norm__386, - batch_norm__387, - batch_norm__388, - batch_norm__389, - batch_norm__39, - batch_norm__390, - batch_norm__391, - batch_norm__392, - batch_norm__393, - batch_norm__394, - batch_norm__395, - batch_norm__396, - batch_norm__397, - batch_norm__398, - batch_norm__399, - batch_norm__4, - batch_norm__40, - batch_norm__400, - batch_norm__401, - batch_norm__402, - batch_norm__403, - batch_norm__404, - batch_norm__405, - batch_norm__406, - batch_norm__407, - batch_norm__408, - batch_norm__409, - batch_norm__41, - batch_norm__410, - batch_norm__411, - batch_norm__412, - batch_norm__413, - batch_norm__414, - batch_norm__415, - batch_norm__416, - batch_norm__417, - batch_norm__418, - batch_norm__419, - batch_norm__42, - batch_norm__420, - batch_norm__421, - batch_norm__422, - batch_norm__423, - batch_norm__424, - batch_norm__425, - batch_norm__426, - batch_norm__427, - batch_norm__428, - batch_norm__429, - batch_norm__43, - batch_norm__430, - batch_norm__431, - batch_norm__432, - batch_norm__433, - batch_norm__434, - batch_norm__435, - batch_norm__436, - batch_norm__437, - batch_norm__438, - batch_norm__439, - batch_norm__44, - batch_norm__440, - batch_norm__441, - batch_norm__442, - batch_norm__443, - batch_norm__444, - batch_norm__445, - batch_norm__446, - batch_norm__447, - batch_norm__448, - batch_norm__449, - batch_norm__45, - batch_norm__450, - batch_norm__451, - batch_norm__452, - batch_norm__453, - batch_norm__454, - batch_norm__455, - batch_norm__456, - batch_norm__457, - batch_norm__458, - batch_norm__459, - batch_norm__46, - batch_norm__460, - batch_norm__461, - batch_norm__462, - batch_norm__463, - batch_norm__464, - batch_norm__465, - batch_norm__466, - batch_norm__467, - batch_norm__468, - batch_norm__469, - batch_norm__47, - batch_norm__470, - batch_norm__471, - batch_norm__472, - batch_norm__473, - batch_norm__474, - batch_norm__475, - batch_norm__476, - batch_norm__477, - batch_norm__478, - batch_norm__479, - batch_norm__48, - batch_norm__480, - batch_norm__481, - batch_norm__482, - batch_norm__483, - batch_norm__484, - batch_norm__485, - batch_norm__486, - batch_norm__487, - batch_norm__488, - batch_norm__489, - batch_norm__49, - batch_norm__490, - batch_norm__491, - batch_norm__492, - batch_norm__493, - batch_norm__494, - batch_norm__495, - batch_norm__496, - batch_norm__497, - batch_norm__498, - batch_norm__499, - batch_norm__5, - batch_norm__50, - batch_norm__500, - batch_norm__501, - batch_norm__502, - batch_norm__503, - batch_norm__504, - batch_norm__505, - batch_norm__506, - batch_norm__507, - batch_norm__508, - batch_norm__509, - batch_norm__51, - batch_norm__510, - batch_norm__511, - batch_norm__512, - batch_norm__513, - batch_norm__514, - batch_norm__515, - batch_norm__516, - batch_norm__517, - batch_norm__518, - batch_norm__519, - batch_norm__52, - batch_norm__520, - batch_norm__521, - batch_norm__522, - batch_norm__523, - batch_norm__524, - batch_norm__525, - batch_norm__526, - batch_norm__527, - batch_norm__528, - batch_norm__529, - batch_norm__53, - batch_norm__530, - batch_norm__531, - batch_norm__532, - batch_norm__533, - batch_norm__534, - batch_norm__535, - batch_norm__536, - batch_norm__537, - batch_norm__538, - batch_norm__539, - batch_norm__54, - batch_norm__540, - batch_norm__541, - batch_norm__542, - batch_norm__543, - batch_norm__544, - batch_norm__545, - batch_norm__546, - batch_norm__547, - batch_norm__548, - batch_norm__549, - batch_norm__55, - batch_norm__550, - batch_norm__551, - batch_norm__552, - batch_norm__553, - batch_norm__554, - batch_norm__555, - batch_norm__556, - batch_norm__557, - batch_norm__558, - batch_norm__559, - batch_norm__56, - batch_norm__560, - batch_norm__561, - batch_norm__562, - batch_norm__563, - batch_norm__564, - batch_norm__565, - batch_norm__566, - batch_norm__567, - batch_norm__568, - batch_norm__569, - batch_norm__57, - batch_norm__570, - batch_norm__571, - batch_norm__572, - batch_norm__573, - batch_norm__574, - batch_norm__575, - batch_norm__576, - batch_norm__577, - batch_norm__578, - batch_norm__579, - batch_norm__58, - batch_norm__580, - batch_norm__581, - batch_norm__582, - batch_norm__583, - batch_norm__584, - batch_norm__585, - batch_norm__586, - batch_norm__587, - batch_norm__588, - batch_norm__589, - batch_norm__59, - batch_norm__590, - batch_norm__591, - batch_norm__592, - batch_norm__593, - batch_norm__594, - batch_norm__595, - batch_norm__596, - batch_norm__597, - batch_norm__598, - batch_norm__599, - batch_norm__6, - batch_norm__60, - batch_norm__600, - batch_norm__601, - batch_norm__602, - batch_norm__603, - batch_norm__604, - batch_norm__605, - batch_norm__606, - batch_norm__607, - batch_norm__608, - batch_norm__609, - batch_norm__61, - batch_norm__610, - batch_norm__611, - batch_norm__612, - batch_norm__613, - batch_norm__614, - batch_norm__615, - batch_norm__616, - batch_norm__617, - batch_norm__618, - batch_norm__619, - batch_norm__62, - batch_norm__620, - batch_norm__621, - batch_norm__622, - batch_norm__623, - batch_norm__624, - batch_norm__625, - batch_norm__626, - batch_norm__627, - batch_norm__628, - batch_norm__629, - batch_norm__63, - batch_norm__630, - batch_norm__631, - batch_norm__632, - batch_norm__633, - batch_norm__634, - batch_norm__635, - batch_norm__636, - batch_norm__637, - batch_norm__638, - batch_norm__639, - batch_norm__64, - batch_norm__640, - batch_norm__641, - batch_norm__642, - batch_norm__643, - batch_norm__644, - batch_norm__645, - batch_norm__646, - batch_norm__647, - batch_norm__648, - batch_norm__649, - batch_norm__65, - batch_norm__650, - batch_norm__651, - batch_norm__652, - batch_norm__653, - batch_norm__654, - batch_norm__655, - batch_norm__656, - batch_norm__657, - batch_norm__658, - batch_norm__659, - batch_norm__66, - batch_norm__660, - batch_norm__661, - batch_norm__662, - batch_norm__663, - batch_norm__664, - batch_norm__665, - batch_norm__666, - batch_norm__667, - batch_norm__668, - batch_norm__669, - batch_norm__67, - batch_norm__670, - batch_norm__671, - batch_norm__672, - batch_norm__673, - batch_norm__674, - batch_norm__675, - batch_norm__676, - batch_norm__677, - batch_norm__678, - batch_norm__679, - batch_norm__68, - batch_norm__680, - batch_norm__681, - batch_norm__682, - batch_norm__683, - batch_norm__684, - batch_norm__685, - batch_norm__686, - batch_norm__687, - batch_norm__688, - batch_norm__689, - batch_norm__69, - batch_norm__690, - batch_norm__691, - batch_norm__692, - batch_norm__693, - batch_norm__694, - batch_norm__695, - batch_norm__696, - batch_norm__697, - batch_norm__698, - batch_norm__699, - batch_norm__7, - batch_norm__70, - batch_norm__700, - batch_norm__701, - batch_norm__702, - batch_norm__703, - batch_norm__704, - batch_norm__705, - batch_norm__706, - batch_norm__707, - batch_norm__708, - batch_norm__709, - batch_norm__71, - batch_norm__710, - batch_norm__711, - batch_norm__712, - batch_norm__713, - batch_norm__714, - batch_norm__715, - batch_norm__716, - batch_norm__717, - batch_norm__718, - batch_norm__719, - batch_norm__72, - batch_norm__720, - batch_norm__721, - batch_norm__722, - batch_norm__723, - batch_norm__724, - batch_norm__725, - batch_norm__726, - batch_norm__727, - batch_norm__728, - batch_norm__729, - batch_norm__73, - batch_norm__730, - batch_norm__731, - batch_norm__732, - batch_norm__733, - batch_norm__734, - batch_norm__735, - batch_norm__736, - batch_norm__737, - batch_norm__738, - batch_norm__739, - batch_norm__74, - batch_norm__740, - batch_norm__741, - batch_norm__742, - batch_norm__743, - batch_norm__744, - batch_norm__745, - batch_norm__746, - batch_norm__747, - batch_norm__748, - batch_norm__749, - batch_norm__75, - batch_norm__750, - batch_norm__751, - batch_norm__752, - batch_norm__753, - batch_norm__754, - batch_norm__755, - batch_norm__756, - batch_norm__757, - batch_norm__758, - batch_norm__759, - batch_norm__76, - batch_norm__760, - batch_norm__761, - batch_norm__762, - batch_norm__763, - batch_norm__764, - batch_norm__765, - batch_norm__766, - batch_norm__767, - batch_norm__768, - batch_norm__769, - batch_norm__77, - batch_norm__770, - batch_norm__771, - batch_norm__772, - batch_norm__773, - batch_norm__774, - batch_norm__775, - batch_norm__776, - batch_norm__777, - batch_norm__778, - batch_norm__779, - batch_norm__78, - batch_norm__780, - batch_norm__781, - batch_norm__782, - batch_norm__783, - batch_norm__784, - batch_norm__785, - batch_norm__786, - batch_norm__787, - batch_norm__788, - batch_norm__789, - batch_norm__79, - batch_norm__790, - batch_norm__791, - batch_norm__792, - batch_norm__793, - batch_norm__794, - batch_norm__795, - batch_norm__796, - batch_norm__797, - batch_norm__798, - batch_norm__799, - batch_norm__8, - batch_norm__80, - batch_norm__800, - batch_norm__801, - batch_norm__802, - batch_norm__803, - batch_norm__804, - batch_norm__805, - batch_norm__806, - batch_norm__807, - batch_norm__808, - batch_norm__809, - batch_norm__81, - batch_norm__810, - batch_norm__811, - batch_norm__812, - batch_norm__813, - batch_norm__814, - batch_norm__815, - batch_norm__816, - batch_norm__817, - batch_norm__818, - batch_norm__819, - batch_norm__82, - batch_norm__820, - batch_norm__821, - batch_norm__822, - batch_norm__823, - batch_norm__824, - batch_norm__825, - batch_norm__826, - batch_norm__827, - batch_norm__83, - batch_norm__84, - batch_norm__85, - batch_norm__86, - batch_norm__87, - batch_norm__88, - batch_norm__89, - batch_norm__9, - batch_norm__90, - batch_norm__91, - batch_norm__92, - batch_norm__93, - batch_norm__94, - batch_norm__95, - batch_norm__96, - batch_norm__97, - batch_norm__98, - batch_norm__99, - concat_0, - concat_1, - concat_10, - concat_11, - concat_12, - concat_13, - concat_14, - concat_2, - concat_3, - concat_5, - concat_6, - concat_7, - concat_8, - concat_9, - conv2d_0, - conv2d_1, - conv2d_10, - conv2d_100, - conv2d_101, - conv2d_102, - conv2d_103, - conv2d_104, - conv2d_105, - conv2d_106, - conv2d_107, - conv2d_108, - conv2d_109, - conv2d_11, - conv2d_110, - conv2d_111, - conv2d_112, - conv2d_113, - conv2d_114, - conv2d_115, - conv2d_116, - conv2d_117, - conv2d_118, - conv2d_119, - conv2d_12, - conv2d_120, - conv2d_121, - conv2d_122, - conv2d_123, - conv2d_124, - conv2d_125, - conv2d_126, - conv2d_127, - conv2d_128, - conv2d_129, - conv2d_13, - conv2d_130, - conv2d_131, - conv2d_132, - conv2d_133, - conv2d_134, - conv2d_135, - conv2d_136, - conv2d_137, - conv2d_138, - conv2d_139, - conv2d_14, - conv2d_140, - conv2d_141, - conv2d_15, - conv2d_16, - conv2d_17, - conv2d_18, - conv2d_19, - conv2d_2, - conv2d_20, - conv2d_21, - conv2d_22, - conv2d_23, - conv2d_24, - conv2d_25, - conv2d_26, - conv2d_27, - conv2d_28, - conv2d_29, - conv2d_3, - conv2d_30, - conv2d_31, - conv2d_32, - conv2d_33, - conv2d_34, - conv2d_35, - conv2d_36, - conv2d_37, - conv2d_38, - conv2d_39, - conv2d_4, - conv2d_40, - conv2d_41, - conv2d_42, - conv2d_43, - conv2d_44, - conv2d_45, - conv2d_46, - conv2d_47, - conv2d_48, - conv2d_49, - conv2d_5, - conv2d_50, - conv2d_51, - conv2d_52, - conv2d_53, - conv2d_54, - conv2d_55, - conv2d_56, - conv2d_57, - conv2d_58, - conv2d_59, - conv2d_6, - conv2d_60, - conv2d_61, - conv2d_62, - conv2d_63, - conv2d_64, - conv2d_65, - conv2d_66, - conv2d_67, - conv2d_68, - conv2d_69, - conv2d_7, - conv2d_70, - conv2d_71, - conv2d_72, - conv2d_73, - conv2d_74, - conv2d_75, - conv2d_76, - conv2d_77, - conv2d_78, - conv2d_79, - conv2d_8, - conv2d_80, - conv2d_81, - conv2d_82, - conv2d_83, - conv2d_84, - conv2d_85, - conv2d_86, - conv2d_87, - conv2d_88, - conv2d_89, - conv2d_9, - conv2d_90, - conv2d_91, - conv2d_92, - conv2d_93, - conv2d_94, - conv2d_95, - conv2d_96, - conv2d_97, - conv2d_98, - conv2d_99, - dropout_0, - dropout_1, - dropout_10, - dropout_11, - dropout_12, - dropout_13, - dropout_14, - dropout_15, - dropout_16, - dropout_17, - dropout_18, - dropout_19, - dropout_2, - dropout_20, - dropout_21, - dropout_22, - dropout_23, - dropout_24, - dropout_25, - dropout_26, - dropout_27, - dropout_28, - dropout_29, - dropout_3, - dropout_30, - dropout_31, - dropout_4, - dropout_5, - dropout_6, - dropout_7, - dropout_8, - dropout_9, - full_0, - full_8, - full_9, - full_int_array_0, - full_int_array_10, - full_int_array_11, - full_int_array_12, - full_int_array_4, - full_int_array_6, - full_int_array_7, - hardsigmoid_0, - hardsigmoid_1, - hardsigmoid_2, - hardsigmoid_3, - layer_norm_0, - layer_norm_1, - layer_norm_10, - layer_norm_11, - layer_norm_12, - layer_norm_13, - layer_norm_14, - layer_norm_15, - layer_norm_16, - layer_norm_17, - layer_norm_18, - layer_norm_19, - layer_norm_2, - layer_norm_20, - layer_norm_22, - layer_norm_23, - layer_norm_3, - layer_norm_4, - layer_norm_5, - layer_norm_6, - layer_norm_7, - layer_norm_8, - layer_norm_9, - matmul_10, - matmul_11, - matmul_12, - matmul_15, - matmul_16, - matmul_17, - matmul_18, - matmul_19, - matmul_2, - matmul_20, - matmul_23, - matmul_24, - matmul_25, - matmul_26, - matmul_27, - matmul_28, - matmul_3, - matmul_31, - matmul_32, - matmul_33, - matmul_4, - matmul_7, - matmul_8, - matmul_9, - mean_0, - mean_1, - mean_2, - mean_3, - multiply_0, - multiply_1, - multiply_10, - multiply_11, - multiply_12, - multiply_13, - multiply_14, - multiply_15, - multiply_16, - multiply_17, - multiply_18, - multiply_19, - multiply_2, - multiply_20, - multiply_21, - multiply_3, - multiply_4, - multiply_5, - multiply_6, - multiply_7, - multiply_8, - multiply_9, - nearest_interp_0, - nearest_interp_1, - pool2d_0, - pool2d_1, - pool2d_2, - reshape_0, - reshape_1, - reshape_11, - reshape_15, - reshape_19, - reshape_2, - reshape_20, - reshape_3, - reshape_7, - slice_0, - slice_1, - slice_10, - slice_11, - slice_12, - slice_13, - slice_14, - slice_15, - slice_16, - slice_17, - slice_18, - slice_19, - slice_2, - slice_20, - slice_21, - slice_22, - slice_23, - slice_3, - slice_4, - slice_5, - slice_6, - slice_7, - slice_8, - slice_9, - softmax_0, - softmax_1, - softmax_2, - softmax_3, - swish_1, - swish_10, - swish_100, - swish_101, - swish_102, - swish_103, - swish_104, - swish_11, - swish_12, - swish_13, - swish_14, - swish_15, - swish_16, - swish_17, - swish_18, - swish_19, - swish_2, - swish_20, - swish_21, - swish_22, - swish_23, - swish_24, - swish_25, - swish_26, - swish_27, - swish_28, - swish_29, - swish_3, - swish_30, - swish_31, - swish_32, - swish_33, - swish_34, - swish_35, - swish_36, - swish_37, - swish_38, - swish_39, - swish_4, - swish_40, - swish_41, - swish_42, - swish_43, - swish_44, - swish_45, - swish_46, - swish_47, - swish_48, - swish_49, - swish_5, - swish_50, - swish_51, - swish_52, - swish_53, - swish_54, - swish_55, - swish_56, - swish_57, - swish_58, - swish_59, - swish_6, - swish_60, - swish_61, - swish_62, - swish_63, - swish_64, - swish_65, - swish_66, - swish_67, - swish_68, - swish_69, - swish_7, - swish_70, - swish_71, - swish_72, - swish_73, - swish_74, - swish_75, - swish_76, - swish_77, - swish_78, - swish_79, - swish_8, - swish_80, - swish_81, - swish_82, - swish_83, - swish_84, - swish_85, - swish_86, - swish_87, - swish_88, - swish_89, - swish_9, - swish_90, - swish_91, - swish_92, - swish_93, - swish_94, - swish_95, - swish_96, - swish_97, - swish_98, - swish_99, - transpose_0, - transpose_1, - transpose_10, - transpose_11, - transpose_12, - transpose_13, - transpose_14, - transpose_15, - transpose_16, - transpose_17, - transpose_2, - transpose_3, - transpose_4, - transpose_5, - transpose_6, - transpose_7, - transpose_8, - transpose_9, - unsqueeze_3, - ) - - return swish_0 diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/subgraph_12/weight_meta.py b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/subgraph_12/weight_meta.py deleted file mode 100644 index 5dcac1a4a..000000000 --- a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/subgraph_12/weight_meta.py +++ /dev/null @@ -1,8004 +0,0 @@ -class Program_weight_tensor_parameter_0: - name = "parameter_0" - shape = [768] - dtype = "float32" - min_val = float("-0.175929") - max_val = float("0.21086") - mean = float("0.0834788") - std = float("0.0566121") - data = None - - -class Program_weight_tensor_parameter_1: - name = "parameter_1" - shape = [768] - dtype = "float32" - min_val = float("0.939955") - max_val = float("1.29826") - mean = float("1.064") - std = float("0.031232") - data = None - - -class Program_weight_tensor_parameter_2: - name = "parameter_2" - shape = [768] - dtype = "float32" - min_val = float("0.00130768") - max_val = float("0.0463603") - mean = float("0.00625725") - std = float("0.00393263") - data = None - - -class Program_weight_tensor_parameter_3: - name = "parameter_3" - shape = [768] - dtype = "float32" - min_val = float("-0.127586") - max_val = float("0.0517413") - mean = float("-0.02654") - std = float("0.0270736") - data = None - - -class Program_weight_tensor_parameter_4: - name = "parameter_4" - shape = [768, 768, 1, 1] - dtype = "float32" - min_val = float("-0.0515626") - max_val = float("0.0379063") - mean = float("-0.00014164") - std = float("0.00240107") - data = None - - -class Program_weight_tensor_parameter_5: - name = "parameter_5" - shape = [384] - dtype = "float32" - min_val = float("-0.141651") - max_val = float("0.0305715") - mean = float("-0.0187927") - std = float("0.0234486") - data = None - - -class Program_weight_tensor_parameter_6: - name = "parameter_6" - shape = [384] - dtype = "float32" - min_val = float("0.945806") - max_val = float("1.04446") - mean = float("0.986675") - std = float("0.0105808") - data = None - - -class Program_weight_tensor_parameter_7: - name = "parameter_7" - shape = [384] - dtype = "float32" - min_val = float("0.000827797") - max_val = float("0.0161073") - mean = float("0.00424179") - std = float("0.00259626") - data = None - - -class Program_weight_tensor_parameter_8: - name = "parameter_8" - shape = [384] - dtype = "float32" - min_val = float("-0.0553707") - max_val = float("0.0596015") - mean = float("0.00271009") - std = float("0.0216885") - data = None - - -class Program_weight_tensor_parameter_9: - name = "parameter_9" - shape = [384, 384, 1, 1] - dtype = "float32" - min_val = float("-0.0308331") - max_val = float("0.0199146") - mean = float("2.09539e-05") - std = float("0.00184746") - data = None - - -class Program_weight_tensor_parameter_10: - name = "parameter_10" - shape = [384] - dtype = "float32" - min_val = float("-0.141651") - max_val = float("0.0305715") - mean = float("-0.0187927") - std = float("0.0234486") - data = None - - -class Program_weight_tensor_parameter_11: - name = "parameter_11" - shape = [384] - dtype = "float32" - min_val = float("0.968047") - max_val = float("1.13059") - mean = float("1.01542") - std = float("0.0171839") - data = None - - -class Program_weight_tensor_parameter_12: - name = "parameter_12" - shape = [384] - dtype = "float32" - min_val = float("0.00219862") - max_val = float("0.0391499") - mean = float("0.00725225") - std = float("0.00432626") - data = None - - -class Program_weight_tensor_parameter_13: - name = "parameter_13" - shape = [384] - dtype = "float32" - min_val = float("-0.173733") - max_val = float("0.12517") - mean = float("-0.0400312") - std = float("0.0316991") - data = None - - -class Program_weight_tensor_parameter_14: - name = "parameter_14" - shape = [384, 384, 3, 3] - dtype = "float32" - min_val = float("-0.0283367") - max_val = float("0.0330588") - mean = float("-7.3825e-05") - std = float("0.00125417") - data = None - - -class Program_weight_tensor_parameter_15: - name = "parameter_15" - shape = [384] - dtype = "float32" - min_val = float("-0.170186") - max_val = float("0.0209452") - mean = float("-0.0348788") - std = float("0.0279259") - data = None - - -class Program_weight_tensor_parameter_16: - name = "parameter_16" - shape = [384] - dtype = "float32" - min_val = float("0.975256") - max_val = float("1.12591") - mean = float("1.01501") - std = float("0.0240755") - data = None - - -class Program_weight_tensor_parameter_17: - name = "parameter_17" - shape = [384] - dtype = "float32" - min_val = float("0.00658639") - max_val = float("0.20726") - mean = float("0.0257203") - std = float("0.0173571") - data = None - - -class Program_weight_tensor_parameter_18: - name = "parameter_18" - shape = [384] - dtype = "float32" - min_val = float("-0.245253") - max_val = float("0.416056") - mean = float("-0.0421834") - std = float("0.0522217") - data = None - - -class Program_weight_tensor_parameter_19: - name = "parameter_19" - shape = [384, 384, 3, 3] - dtype = "float32" - min_val = float("-0.0311751") - max_val = float("0.0503276") - mean = float("-6.05092e-05") - std = float("0.00141653") - data = None - - -class Program_weight_tensor_parameter_20: - name = "parameter_20" - shape = [384] - dtype = "float32" - min_val = float("-0.105187") - max_val = float("0.0129827") - mean = float("-0.0357886") - std = float("0.0193112") - data = None - - -class Program_weight_tensor_parameter_21: - name = "parameter_21" - shape = [384] - dtype = "float32" - min_val = float("0.945568") - max_val = float("1.0451") - mean = float("0.98866") - std = float("0.00984887") - data = None - - -class Program_weight_tensor_parameter_22: - name = "parameter_22" - shape = [384] - dtype = "float32" - min_val = float("0.000652651") - max_val = float("0.0220653") - mean = float("0.00339318") - std = float("0.00205907") - data = None - - -class Program_weight_tensor_parameter_23: - name = "parameter_23" - shape = [384] - dtype = "float32" - min_val = float("-0.077936") - max_val = float("0.0409903") - mean = float("-0.0020853") - std = float("0.01599") - data = None - - -class Program_weight_tensor_parameter_24: - name = "parameter_24" - shape = [384, 384, 1, 1] - dtype = "float32" - min_val = float("-0.0260426") - max_val = float("0.0248082") - mean = float("-5.14195e-05") - std = float("0.00195167") - data = None - - -class Program_weight_tensor_parameter_25: - name = "parameter_25" - shape = [384] - dtype = "float32" - min_val = float("-0.105187") - max_val = float("0.0129827") - mean = float("-0.0357886") - std = float("0.0193112") - data = None - - -class Program_weight_tensor_parameter_26: - name = "parameter_26" - shape = [384] - dtype = "float32" - min_val = float("0.959552") - max_val = float("1.10507") - mean = float("1.01607") - std = float("0.0177491") - data = None - - -class Program_weight_tensor_parameter_27: - name = "parameter_27" - shape = [384] - dtype = "float32" - min_val = float("0.0024964") - max_val = float("0.0250836") - mean = float("0.00780127") - std = float("0.00352904") - data = None - - -class Program_weight_tensor_parameter_28: - name = "parameter_28" - shape = [384] - dtype = "float32" - min_val = float("-0.160103") - max_val = float("0.245929") - mean = float("-0.0452978") - std = float("0.0365583") - data = None - - -class Program_weight_tensor_parameter_29: - name = "parameter_29" - shape = [384, 384, 3, 3] - dtype = "float32" - min_val = float("-0.034241") - max_val = float("0.0490696") - mean = float("-8.25271e-05") - std = float("0.00126683") - data = None - - -class Program_weight_tensor_parameter_30: - name = "parameter_30" - shape = [384] - dtype = "float32" - min_val = float("-0.0896542") - max_val = float("0.0192769") - mean = float("-0.036069") - std = float("0.0194634") - data = None - - -class Program_weight_tensor_parameter_31: - name = "parameter_31" - shape = [384] - dtype = "float32" - min_val = float("0.933175") - max_val = float("1.1146") - mean = float("1.01167") - std = float("0.0265813") - data = None - - -class Program_weight_tensor_parameter_32: - name = "parameter_32" - shape = [384] - dtype = "float32" - min_val = float("0.00448167") - max_val = float("0.0632357") - mean = float("0.0163368") - std = float("0.00906915") - data = None - - -class Program_weight_tensor_parameter_33: - name = "parameter_33" - shape = [384] - dtype = "float32" - min_val = float("-0.168694") - max_val = float("0.0937087") - mean = float("-0.01934") - std = float("0.0451176") - data = None - - -class Program_weight_tensor_parameter_34: - name = "parameter_34" - shape = [384, 384, 3, 3] - dtype = "float32" - min_val = float("-0.0389109") - max_val = float("0.0474721") - mean = float("-5.22495e-05") - std = float("0.00144713") - data = None - - -class Program_weight_tensor_parameter_35: - name = "parameter_35" - shape = [384] - dtype = "float32" - min_val = float("-0.116304") - max_val = float("0.016211") - mean = float("-0.0373544") - std = float("0.0201432") - data = None - - -class Program_weight_tensor_parameter_36: - name = "parameter_36" - shape = [384] - dtype = "float32" - min_val = float("0.929317") - max_val = float("1.02782") - mean = float("0.987068") - std = float("0.0110352") - data = None - - -class Program_weight_tensor_parameter_37: - name = "parameter_37" - shape = [384] - dtype = "float32" - min_val = float("0.00119145") - max_val = float("0.0108677") - mean = float("0.00443524") - std = float("0.00164967") - data = None - - -class Program_weight_tensor_parameter_38: - name = "parameter_38" - shape = [384] - dtype = "float32" - min_val = float("-0.0535005") - max_val = float("0.0365936") - mean = float("-0.0073878") - std = float("0.0125324") - data = None - - -class Program_weight_tensor_parameter_39: - name = "parameter_39" - shape = [384, 384, 1, 1] - dtype = "float32" - min_val = float("-0.037048") - max_val = float("0.0266802") - mean = float("-0.000137547") - std = float("0.00195837") - data = None - - -class Program_weight_tensor_parameter_40: - name = "parameter_40" - shape = [384] - dtype = "float32" - min_val = float("-0.116304") - max_val = float("0.016211") - mean = float("-0.0373544") - std = float("0.0201432") - data = None - - -class Program_weight_tensor_parameter_41: - name = "parameter_41" - shape = [384] - dtype = "float32" - min_val = float("0.98123") - max_val = float("1.10689") - mean = float("1.01832") - std = float("0.0222072") - data = None - - -class Program_weight_tensor_parameter_42: - name = "parameter_42" - shape = [384] - dtype = "float32" - min_val = float("0.00438825") - max_val = float("0.0317931") - mean = float("0.0111305") - std = float("0.00497164") - data = None - - -class Program_weight_tensor_parameter_43: - name = "parameter_43" - shape = [384] - dtype = "float32" - min_val = float("-0.158148") - max_val = float("0.0963003") - mean = float("-0.0215295") - std = float("0.032226") - data = None - - -class Program_weight_tensor_parameter_44: - name = "parameter_44" - shape = [384, 384, 3, 3] - dtype = "float32" - min_val = float("-0.0342558") - max_val = float("0.0596347") - mean = float("-4.36682e-05") - std = float("0.00131956") - data = None - - -class Program_weight_tensor_parameter_45: - name = "parameter_45" - shape = [384] - dtype = "float32" - min_val = float("-0.10708") - max_val = float("0.0239013") - mean = float("-0.0375156") - std = float("0.0214475") - data = None - - -class Program_weight_tensor_parameter_46: - name = "parameter_46" - shape = [384] - dtype = "float32" - min_val = float("0.944782") - max_val = float("1.11463") - mean = float("1.01186") - std = float("0.0277861") - data = None - - -class Program_weight_tensor_parameter_47: - name = "parameter_47" - shape = [384] - dtype = "float32" - min_val = float("0.00510026") - max_val = float("0.073565") - mean = float("0.0136809") - std = float("0.00702878") - data = None - - -class Program_weight_tensor_parameter_48: - name = "parameter_48" - shape = [384] - dtype = "float32" - min_val = float("-0.145833") - max_val = float("0.12243") - mean = float("-0.0419585") - std = float("0.0460457") - data = None - - -class Program_weight_tensor_parameter_49: - name = "parameter_49" - shape = [384, 384, 3, 3] - dtype = "float32" - min_val = float("-0.0265761") - max_val = float("0.0412318") - mean = float("-7.48799e-05") - std = float("0.00147167") - data = None - - -class Program_weight_tensor_parameter_50: - name = "parameter_50" - shape = [384] - dtype = "float32" - min_val = float("-0.106796") - max_val = float("0.0466792") - mean = float("-0.0263049") - std = float("0.0154085") - data = None - - -class Program_weight_tensor_parameter_51: - name = "parameter_51" - shape = [384] - dtype = "float32" - min_val = float("0.973685") - max_val = float("1.08651") - mean = float("1.00904") - std = float("0.0171201") - data = None - - -class Program_weight_tensor_parameter_52: - name = "parameter_52" - shape = [384] - dtype = "float32" - min_val = float("0.00211342") - max_val = float("0.0171623") - mean = float("0.00491392") - std = float("0.00189255") - data = None - - -class Program_weight_tensor_parameter_53: - name = "parameter_53" - shape = [384] - dtype = "float32" - min_val = float("-0.0938724") - max_val = float("0.0726207") - mean = float("-0.0169301") - std = float("0.0255402") - data = None - - -class Program_weight_tensor_parameter_54: - name = "parameter_54" - shape = [384, 1152, 1, 1] - dtype = "float32" - min_val = float("-0.0600923") - max_val = float("0.0698518") - mean = float("-7.83959e-05") - std = float("0.00221115") - data = None - - -class Program_weight_tensor_parameter_55: - name = "parameter_55" - shape = [384] - dtype = "float32" - min_val = float("-0.0425267") - max_val = float("0.0160945") - mean = float("-0.00899786") - std = float("0.00841522") - data = None - - -class Program_weight_tensor_parameter_56: - name = "parameter_56" - shape = [384] - dtype = "float32" - min_val = float("0.959381") - max_val = float("1.05138") - mean = float("1.0079") - std = float("0.0115872") - data = None - - -class Program_weight_tensor_parameter_57: - name = "parameter_57" - shape = [384] - dtype = "float32" - min_val = float("0.00177107") - max_val = float("0.0283498") - mean = float("0.00429961") - std = float("0.00192508") - data = None - - -class Program_weight_tensor_parameter_58: - name = "parameter_58" - shape = [384] - dtype = "float32" - min_val = float("-0.101028") - max_val = float("0.097759") - mean = float("-0.0218202") - std = float("0.0240243") - data = None - - -class Program_weight_tensor_parameter_59: - name = "parameter_59" - shape = [384, 1152, 1, 1] - dtype = "float32" - min_val = float("-0.0252393") - max_val = float("0.0409905") - mean = float("-0.000104687") - std = float("0.00201567") - data = None - - -class Program_weight_tensor_parameter_60: - name = "parameter_60" - shape = [384] - dtype = "float32" - min_val = float("-0.0530202") - max_val = float("0.00596341") - mean = float("-0.0166175") - std = float("0.00987673") - data = None - - -class Program_weight_tensor_parameter_61: - name = "parameter_61" - shape = [384] - dtype = "float32" - min_val = float("0.988638") - max_val = float("1.10406") - mean = float("1.0196") - std = float("0.0169012") - data = None - - -class Program_weight_tensor_parameter_62: - name = "parameter_62" - shape = [384] - dtype = "float32" - min_val = float("0.00403058") - max_val = float("0.0513783") - mean = float("0.0126843") - std = float("0.00717747") - data = None - - -class Program_weight_tensor_parameter_63: - name = "parameter_63" - shape = [384] - dtype = "float32" - min_val = float("-0.365522") - max_val = float("0.198026") - mean = float("-0.0414412") - std = float("0.0639995") - data = None - - -class Program_weight_tensor_parameter_64: - name = "parameter_64" - shape = [384, 384, 3, 3] - dtype = "float32" - min_val = float("-0.0201867") - max_val = float("0.0314514") - mean = float("-3.01993e-05") - std = float("0.00114789") - data = None - - -class Program_weight_tensor_parameter_65: - name = "parameter_65" - shape = [384] - dtype = "float32" - min_val = float("-0.22273") - max_val = float("0.49215") - mean = float("0.217192") - std = float("0.124233") - data = None - - -class Program_weight_tensor_parameter_66: - name = "parameter_66" - shape = [384] - dtype = "float32" - min_val = float("0.919294") - max_val = float("1.48063") - mean = float("1.14128") - std = float("0.0737757") - data = None - - -class Program_weight_tensor_parameter_67: - name = "parameter_67" - shape = [384] - dtype = "float32" - min_val = float("0.00389442") - max_val = float("0.0590123") - mean = float("0.011774") - std = float("0.00590078") - data = None - - -class Program_weight_tensor_parameter_68: - name = "parameter_68" - shape = [384] - dtype = "float32" - min_val = float("-0.153266") - max_val = float("0.0818695") - mean = float("-0.0278176") - std = float("0.0324133") - data = None - - -class Program_weight_tensor_parameter_69: - name = "parameter_69" - shape = [384, 384, 1, 1] - dtype = "float32" - min_val = float("-0.0888812") - max_val = float("0.0966329") - mean = float("-0.000338059") - std = float("0.00511266") - data = None - - -class Program_weight_tensor_parameter_70: - name = "parameter_70" - shape = [192] - dtype = "float32" - min_val = float("-0.166124") - max_val = float("0.0467039") - mean = float("-0.0250411") - std = float("0.0394646") - data = None - - -class Program_weight_tensor_parameter_71: - name = "parameter_71" - shape = [192] - dtype = "float32" - min_val = float("0.84107") - max_val = float("1.05105") - mean = float("0.97282") - std = float("0.0237598") - data = None - - -class Program_weight_tensor_parameter_72: - name = "parameter_72" - shape = [192] - dtype = "float32" - min_val = float("0.00162489") - max_val = float("0.0270337") - mean = float("0.00519677") - std = float("0.00308321") - data = None - - -class Program_weight_tensor_parameter_73: - name = "parameter_73" - shape = [192] - dtype = "float32" - min_val = float("-0.0629501") - max_val = float("0.0796146") - mean = float("-0.00468793") - std = float("0.0191205") - data = None - - -class Program_weight_tensor_parameter_74: - name = "parameter_74" - shape = [192, 192, 1, 1] - dtype = "float32" - min_val = float("-0.0448536") - max_val = float("0.0336517") - mean = float("-0.000157415") - std = float("0.00377073") - data = None - - -class Program_weight_tensor_parameter_75: - name = "parameter_75" - shape = [192] - dtype = "float32" - min_val = float("-0.166124") - max_val = float("0.0467039") - mean = float("-0.0250411") - std = float("0.0394646") - data = None - - -class Program_weight_tensor_parameter_76: - name = "parameter_76" - shape = [192] - dtype = "float32" - min_val = float("0.729345") - max_val = float("1.12261") - mean = float("1.02194") - std = float("0.0372571") - data = None - - -class Program_weight_tensor_parameter_77: - name = "parameter_77" - shape = [192] - dtype = "float32" - min_val = float("0.00562341") - max_val = float("0.0781491") - mean = float("0.0170834") - std = float("0.0090384") - data = None - - -class Program_weight_tensor_parameter_78: - name = "parameter_78" - shape = [192] - dtype = "float32" - min_val = float("-0.191415") - max_val = float("0.0874893") - mean = float("-0.0410796") - std = float("0.042934") - data = None - - -class Program_weight_tensor_parameter_79: - name = "parameter_79" - shape = [192, 192, 3, 3] - dtype = "float32" - min_val = float("-0.0346474") - max_val = float("0.0452792") - mean = float("-0.00013752") - std = float("0.00244921") - data = None - - -class Program_weight_tensor_parameter_80: - name = "parameter_80" - shape = [192] - dtype = "float32" - min_val = float("-0.191424") - max_val = float("0.0441491") - mean = float("-0.0580252") - std = float("0.0490538") - data = None - - -class Program_weight_tensor_parameter_81: - name = "parameter_81" - shape = [192] - dtype = "float32" - min_val = float("0.897189") - max_val = float("1.18714") - mean = float("1.01553") - std = float("0.048456") - data = None - - -class Program_weight_tensor_parameter_82: - name = "parameter_82" - shape = [192] - dtype = "float32" - min_val = float("0.0130131") - max_val = float("0.155912") - mean = float("0.0355598") - std = float("0.0183742") - data = None - - -class Program_weight_tensor_parameter_83: - name = "parameter_83" - shape = [192] - dtype = "float32" - min_val = float("-0.322411") - max_val = float("0.461446") - mean = float("-0.0402579") - std = float("0.0600098") - data = None - - -class Program_weight_tensor_parameter_84: - name = "parameter_84" - shape = [192, 192, 3, 3] - dtype = "float32" - min_val = float("-0.0436326") - max_val = float("0.0759463") - mean = float("-0.00010151") - std = float("0.00273793") - data = None - - -class Program_weight_tensor_parameter_85: - name = "parameter_85" - shape = [192] - dtype = "float32" - min_val = float("-0.191731") - max_val = float("0.00856722") - mean = float("-0.0642182") - std = float("0.033376") - data = None - - -class Program_weight_tensor_parameter_86: - name = "parameter_86" - shape = [192] - dtype = "float32" - min_val = float("0.922072") - max_val = float("1.04657") - mean = float("0.97362") - std = float("0.017993") - data = None - - -class Program_weight_tensor_parameter_87: - name = "parameter_87" - shape = [192] - dtype = "float32" - min_val = float("0.00120452") - max_val = float("0.0109813") - mean = float("0.00399435") - std = float("0.00161666") - data = None - - -class Program_weight_tensor_parameter_88: - name = "parameter_88" - shape = [192] - dtype = "float32" - min_val = float("-0.0561207") - max_val = float("0.0353681") - mean = float("-0.00713616") - std = float("0.0144372") - data = None - - -class Program_weight_tensor_parameter_89: - name = "parameter_89" - shape = [192, 192, 1, 1] - dtype = "float32" - min_val = float("-0.0389677") - max_val = float("0.0317083") - mean = float("-0.000327359") - std = float("0.00370864") - data = None - - -class Program_weight_tensor_parameter_90: - name = "parameter_90" - shape = [192] - dtype = "float32" - min_val = float("-0.191731") - max_val = float("0.00856722") - mean = float("-0.0642182") - std = float("0.033376") - data = None - - -class Program_weight_tensor_parameter_91: - name = "parameter_91" - shape = [192] - dtype = "float32" - min_val = float("0.967917") - max_val = float("1.14773") - mean = float("1.02404") - std = float("0.0293683") - data = None - - -class Program_weight_tensor_parameter_92: - name = "parameter_92" - shape = [192] - dtype = "float32" - min_val = float("0.00374803") - max_val = float("0.0495216") - mean = float("0.010846") - std = float("0.00639043") - data = None - - -class Program_weight_tensor_parameter_93: - name = "parameter_93" - shape = [192] - dtype = "float32" - min_val = float("-0.141797") - max_val = float("0.12988") - mean = float("-0.0331827") - std = float("0.0341888") - data = None - - -class Program_weight_tensor_parameter_94: - name = "parameter_94" - shape = [192, 192, 3, 3] - dtype = "float32" - min_val = float("-0.0433361") - max_val = float("0.0521915") - mean = float("-0.000128374") - std = float("0.00248482") - data = None - - -class Program_weight_tensor_parameter_95: - name = "parameter_95" - shape = [192] - dtype = "float32" - min_val = float("-0.188937") - max_val = float("0.0617064") - mean = float("-0.0755865") - std = float("0.0405704") - data = None - - -class Program_weight_tensor_parameter_96: - name = "parameter_96" - shape = [192] - dtype = "float32" - min_val = float("0.88236") - max_val = float("1.21791") - mean = float("1.01474") - std = float("0.050711") - data = None - - -class Program_weight_tensor_parameter_97: - name = "parameter_97" - shape = [192] - dtype = "float32" - min_val = float("0.00731951") - max_val = float("0.0706393") - mean = float("0.0229053") - std = float("0.0124686") - data = None - - -class Program_weight_tensor_parameter_98: - name = "parameter_98" - shape = [192] - dtype = "float32" - min_val = float("-0.105024") - max_val = float("0.045449") - mean = float("-0.0227613") - std = float("0.0300699") - data = None - - -class Program_weight_tensor_parameter_99: - name = "parameter_99" - shape = [192, 192, 3, 3] - dtype = "float32" - min_val = float("-0.0453344") - max_val = float("0.0804613") - mean = float("-0.000102284") - std = float("0.00285929") - data = None - - -class Program_weight_tensor_parameter_100: - name = "parameter_100" - shape = [192] - dtype = "float32" - min_val = float("-0.229338") - max_val = float("-0.0102477") - mean = float("-0.0831807") - std = float("0.0422279") - data = None - - -class Program_weight_tensor_parameter_101: - name = "parameter_101" - shape = [192] - dtype = "float32" - min_val = float("0.900655") - max_val = float("1.0279") - mean = float("0.975271") - std = float("0.0229719") - data = None - - -class Program_weight_tensor_parameter_102: - name = "parameter_102" - shape = [192] - dtype = "float32" - min_val = float("0.00158349") - max_val = float("0.0138258") - mean = float("0.00506183") - std = float("0.00178933") - data = None - - -class Program_weight_tensor_parameter_103: - name = "parameter_103" - shape = [192] - dtype = "float32" - min_val = float("-0.0350209") - max_val = float("0.0440672") - mean = float("-0.00863895") - std = float("0.0159988") - data = None - - -class Program_weight_tensor_parameter_104: - name = "parameter_104" - shape = [192, 192, 1, 1] - dtype = "float32" - min_val = float("-0.0414024") - max_val = float("0.072312") - mean = float("-0.000424235") - std = float("0.00418287") - data = None - - -class Program_weight_tensor_parameter_105: - name = "parameter_105" - shape = [192] - dtype = "float32" - min_val = float("-0.229338") - max_val = float("-0.0102477") - mean = float("-0.0831807") - std = float("0.0422279") - data = None - - -class Program_weight_tensor_parameter_106: - name = "parameter_106" - shape = [192] - dtype = "float32" - min_val = float("0.947228") - max_val = float("1.11076") - mean = float("1.02102") - std = float("0.0305612") - data = None - - -class Program_weight_tensor_parameter_107: - name = "parameter_107" - shape = [192] - dtype = "float32" - min_val = float("0.00636548") - max_val = float("0.0561151") - mean = float("0.0150209") - std = float("0.00712578") - data = None - - -class Program_weight_tensor_parameter_108: - name = "parameter_108" - shape = [192] - dtype = "float32" - min_val = float("-0.12433") - max_val = float("0.09672") - mean = float("-0.0168594") - std = float("0.0346152") - data = None - - -class Program_weight_tensor_parameter_109: - name = "parameter_109" - shape = [192, 192, 3, 3] - dtype = "float32" - min_val = float("-0.0444124") - max_val = float("0.0493834") - mean = float("-7.49687e-05") - std = float("0.00263663") - data = None - - -class Program_weight_tensor_parameter_110: - name = "parameter_110" - shape = [192] - dtype = "float32" - min_val = float("-0.234043") - max_val = float("0.0809248") - mean = float("-0.0946909") - std = float("0.0462546") - data = None - - -class Program_weight_tensor_parameter_111: - name = "parameter_111" - shape = [192] - dtype = "float32" - min_val = float("0.886425") - max_val = float("1.20415") - mean = float("1.01671") - std = float("0.0539421") - data = None - - -class Program_weight_tensor_parameter_112: - name = "parameter_112" - shape = [192] - dtype = "float32" - min_val = float("0.00720534") - max_val = float("0.0816429") - mean = float("0.0188148") - std = float("0.0102621") - data = None - - -class Program_weight_tensor_parameter_113: - name = "parameter_113" - shape = [192] - dtype = "float32" - min_val = float("-0.162095") - max_val = float("0.0789193") - mean = float("-0.0380274") - std = float("0.039478") - data = None - - -class Program_weight_tensor_parameter_114: - name = "parameter_114" - shape = [192, 192, 3, 3] - dtype = "float32" - min_val = float("-0.0388947") - max_val = float("0.087211") - mean = float("-0.000134346") - std = float("0.00310298") - data = None - - -class Program_weight_tensor_parameter_115: - name = "parameter_115" - shape = [192] - dtype = "float32" - min_val = float("-0.200195") - max_val = float("0.0157584") - mean = float("-0.0662765") - std = float("0.0312062") - data = None - - -class Program_weight_tensor_parameter_116: - name = "parameter_116" - shape = [192] - dtype = "float32" - min_val = float("0.925347") - max_val = float("1.15235") - mean = float("1.0133") - std = float("0.0383988") - data = None - - -class Program_weight_tensor_parameter_117: - name = "parameter_117" - shape = [192] - dtype = "float32" - min_val = float("0.00390106") - max_val = float("0.0290639") - mean = float("0.00837401") - std = float("0.00327435") - data = None - - -class Program_weight_tensor_parameter_118: - name = "parameter_118" - shape = [192] - dtype = "float32" - min_val = float("-0.0919967") - max_val = float("0.135726") - mean = float("-0.022066") - std = float("0.0311243") - data = None - - -class Program_weight_tensor_parameter_119: - name = "parameter_119" - shape = [192, 576, 1, 1] - dtype = "float32" - min_val = float("-0.0594804") - max_val = float("0.0592757") - mean = float("-0.000196378") - std = float("0.00449942") - data = None - - -class Program_weight_tensor_parameter_120: - name = "parameter_120" - shape = [192] - dtype = "float32" - min_val = float("-0.0998406") - max_val = float("0.0381397") - mean = float("-0.0139719") - std = float("0.0205426") - data = None - - -class Program_weight_tensor_parameter_121: - name = "parameter_121" - shape = [192] - dtype = "float32" - min_val = float("0.92253") - max_val = float("1.19791") - mean = float("1.00313") - std = float("0.0257744") - data = None - - -class Program_weight_tensor_parameter_122: - name = "parameter_122" - shape = [192] - dtype = "float32" - min_val = float("0.00364414") - max_val = float("0.0462398") - mean = float("0.0110928") - std = float("0.00672902") - data = None - - -class Program_weight_tensor_parameter_123: - name = "parameter_123" - shape = [192] - dtype = "float32" - min_val = float("-0.0792517") - max_val = float("0.0537729") - mean = float("-0.0185521") - std = float("0.0235437") - data = None - - -class Program_weight_tensor_parameter_124: - name = "parameter_124" - shape = [192, 576, 1, 1] - dtype = "float32" - min_val = float("-0.0668912") - max_val = float("0.102316") - mean = float("-0.000166523") - std = float("0.00443571") - data = None - - -class Program_weight_tensor_parameter_125: - name = "parameter_125" - shape = [192] - dtype = "float32" - min_val = float("-0.159157") - max_val = float("-0.000957455") - mean = float("-0.0390269") - std = float("0.0217825") - data = None - - -class Program_weight_tensor_parameter_126: - name = "parameter_126" - shape = [192] - dtype = "float32" - min_val = float("0.921604") - max_val = float("1.24953") - mean = float("1.00821") - std = float("0.0303984") - data = None - - -class Program_weight_tensor_parameter_127: - name = "parameter_127" - shape = [192] - dtype = "float32" - min_val = float("0.00642947") - max_val = float("0.0670086") - mean = float("0.0210833") - std = float("0.00953439") - data = None - - -class Program_weight_tensor_parameter_128: - name = "parameter_128" - shape = [192] - dtype = "float32" - min_val = float("-0.330766") - max_val = float("0.234299") - mean = float("-0.0509451") - std = float("0.0885345") - data = None - - -class Program_weight_tensor_parameter_129: - name = "parameter_129" - shape = [192, 192, 3, 3] - dtype = "float32" - min_val = float("-0.0500862") - max_val = float("0.0679449") - mean = float("-5.08306e-05") - std = float("0.0028463") - data = None - - -class Program_weight_tensor_parameter_130: - name = "parameter_130" - shape = [192] - dtype = "float32" - min_val = float("-0.554317") - max_val = float("1.14248") - mean = float("0.353082") - std = float("0.345155") - data = None - - -class Program_weight_tensor_parameter_131: - name = "parameter_131" - shape = [192] - dtype = "float32" - min_val = float("0.546484") - max_val = float("1.57412") - mean = float("1.15038") - std = float("0.183458") - data = None - - -class Program_weight_tensor_parameter_132: - name = "parameter_132" - shape = [192] - dtype = "float32" - min_val = float("0.0100657") - max_val = float("0.221031") - mean = float("0.0407891") - std = float("0.0263878") - data = None - - -class Program_weight_tensor_parameter_133: - name = "parameter_133" - shape = [192] - dtype = "float32" - min_val = float("-0.377989") - max_val = float("0.257639") - mean = float("-0.0649794") - std = float("0.0721023") - data = None - - -class Program_weight_tensor_parameter_134: - name = "parameter_134" - shape = [192, 192, 1, 1] - dtype = "float32" - min_val = float("-0.153545") - max_val = float("0.134074") - mean = float("-0.00121304") - std = float("0.0133081") - data = None - - -class Program_weight_tensor_parameter_135: - name = "parameter_135" - shape = [96] - dtype = "float32" - min_val = float("-0.457832") - max_val = float("0.237746") - mean = float("-0.00924267") - std = float("0.145032") - data = None - - -class Program_weight_tensor_parameter_136: - name = "parameter_136" - shape = [96] - dtype = "float32" - min_val = float("0.761673") - max_val = float("1.23302") - mean = float("0.949003") - std = float("0.0714195") - data = None - - -class Program_weight_tensor_parameter_137: - name = "parameter_137" - shape = [96] - dtype = "float32" - min_val = float("0.00289003") - max_val = float("0.0548184") - mean = float("0.0147543") - std = float("0.0100438") - data = None - - -class Program_weight_tensor_parameter_138: - name = "parameter_138" - shape = [96] - dtype = "float32" - min_val = float("-0.0823141") - max_val = float("0.101545") - mean = float("-0.015142") - std = float("0.0287704") - data = None - - -class Program_weight_tensor_parameter_139: - name = "parameter_139" - shape = [96, 96, 1, 1] - dtype = "float32" - min_val = float("-0.0870691") - max_val = float("0.0850803") - mean = float("-0.00142159") - std = float("0.01113") - data = None - - -class Program_weight_tensor_parameter_140: - name = "parameter_140" - shape = [96] - dtype = "float32" - min_val = float("-0.457832") - max_val = float("0.237746") - mean = float("-0.00924267") - std = float("0.145032") - data = None - - -class Program_weight_tensor_parameter_141: - name = "parameter_141" - shape = [96] - dtype = "float32" - min_val = float("0.507912") - max_val = float("1.26895") - mean = float("1.02934") - std = float("0.096504") - data = None - - -class Program_weight_tensor_parameter_142: - name = "parameter_142" - shape = [96] - dtype = "float32" - min_val = float("0.0126569") - max_val = float("0.170178") - mean = float("0.046809") - std = float("0.0277218") - data = None - - -class Program_weight_tensor_parameter_143: - name = "parameter_143" - shape = [96] - dtype = "float32" - min_val = float("-0.311825") - max_val = float("0.120203") - mean = float("-0.0489369") - std = float("0.077116") - data = None - - -class Program_weight_tensor_parameter_144: - name = "parameter_144" - shape = [96, 96, 3, 3] - dtype = "float32" - min_val = float("-0.0902007") - max_val = float("0.0894371") - mean = float("-0.000317607") - std = float("0.0067737") - data = None - - -class Program_weight_tensor_parameter_145: - name = "parameter_145" - shape = [96] - dtype = "float32" - min_val = float("-0.702428") - max_val = float("0.490659") - mean = float("-0.113325") - std = float("0.198985") - data = None - - -class Program_weight_tensor_parameter_146: - name = "parameter_146" - shape = [96] - dtype = "float32" - min_val = float("0.718272") - max_val = float("1.71659") - mean = float("0.996064") - std = float("0.134561") - data = None - - -class Program_weight_tensor_parameter_147: - name = "parameter_147" - shape = [96] - dtype = "float32" - min_val = float("0.0158852") - max_val = float("0.220156") - mean = float("0.0611441") - std = float("0.0432503") - data = None - - -class Program_weight_tensor_parameter_148: - name = "parameter_148" - shape = [96] - dtype = "float32" - min_val = float("-0.218411") - max_val = float("0.176567") - mean = float("-0.04823") - std = float("0.0699582") - data = None - - -class Program_weight_tensor_parameter_149: - name = "parameter_149" - shape = [96, 96, 3, 3] - dtype = "float32" - min_val = float("-0.128433") - max_val = float("0.105681") - mean = float("-0.00053974") - std = float("0.00768105") - data = None - - -class Program_weight_tensor_parameter_150: - name = "parameter_150" - shape = [96] - dtype = "float32" - min_val = float("-0.365465") - max_val = float("0.189556") - mean = float("-0.138691") - std = float("0.0965007") - data = None - - -class Program_weight_tensor_parameter_151: - name = "parameter_151" - shape = [96] - dtype = "float32" - min_val = float("0.628614") - max_val = float("1.02635") - mean = float("0.906997") - std = float("0.055833") - data = None - - -class Program_weight_tensor_parameter_152: - name = "parameter_152" - shape = [96] - dtype = "float32" - min_val = float("0.00386648") - max_val = float("0.0250898") - mean = float("0.0113332") - std = float("0.00452204") - data = None - - -class Program_weight_tensor_parameter_153: - name = "parameter_153" - shape = [96] - dtype = "float32" - min_val = float("-0.0829535") - max_val = float("0.0455066") - mean = float("-0.0107845") - std = float("0.0210116") - data = None - - -class Program_weight_tensor_parameter_154: - name = "parameter_154" - shape = [96, 96, 1, 1] - dtype = "float32" - min_val = float("-0.0826433") - max_val = float("0.0836331") - mean = float("-0.00123565") - std = float("0.0111633") - data = None - - -class Program_weight_tensor_parameter_155: - name = "parameter_155" - shape = [96] - dtype = "float32" - min_val = float("-0.365465") - max_val = float("0.189556") - mean = float("-0.138691") - std = float("0.0965007") - data = None - - -class Program_weight_tensor_parameter_156: - name = "parameter_156" - shape = [96] - dtype = "float32" - min_val = float("0.808147") - max_val = float("1.15745") - mean = float("1.02165") - std = float("0.061107") - data = None - - -class Program_weight_tensor_parameter_157: - name = "parameter_157" - shape = [96] - dtype = "float32" - min_val = float("0.0105243") - max_val = float("0.146319") - mean = float("0.0374655") - std = float("0.0298762") - data = None - - -class Program_weight_tensor_parameter_158: - name = "parameter_158" - shape = [96] - dtype = "float32" - min_val = float("-0.202738") - max_val = float("0.0648004") - mean = float("-0.042437") - std = float("0.0413335") - data = None - - -class Program_weight_tensor_parameter_159: - name = "parameter_159" - shape = [96, 96, 3, 3] - dtype = "float32" - min_val = float("-0.0790843") - max_val = float("0.0756688") - mean = float("-0.000522856") - std = float("0.00687379") - data = None - - -class Program_weight_tensor_parameter_160: - name = "parameter_160" - shape = [96] - dtype = "float32" - min_val = float("-0.488696") - max_val = float("0.1689") - mean = float("-0.167703") - std = float("0.131642") - data = None - - -class Program_weight_tensor_parameter_161: - name = "parameter_161" - shape = [96] - dtype = "float32" - min_val = float("0.775323") - max_val = float("1.29288") - mean = float("0.963604") - std = float("0.0984425") - data = None - - -class Program_weight_tensor_parameter_162: - name = "parameter_162" - shape = [96] - dtype = "float32" - min_val = float("0.0130451") - max_val = float("0.138622") - mean = float("0.0336631") - std = float("0.0190887") - data = None - - -class Program_weight_tensor_parameter_163: - name = "parameter_163" - shape = [96] - dtype = "float32" - min_val = float("-0.186613") - max_val = float("0.0841258") - mean = float("-5.39021e-05") - std = float("0.0459065") - data = None - - -class Program_weight_tensor_parameter_164: - name = "parameter_164" - shape = [96, 96, 3, 3] - dtype = "float32" - min_val = float("-0.128553") - max_val = float("0.113025") - mean = float("-0.000437448") - std = float("0.00839485") - data = None - - -class Program_weight_tensor_parameter_165: - name = "parameter_165" - shape = [96] - dtype = "float32" - min_val = float("-0.492552") - max_val = float("0.0643671") - mean = float("-0.168979") - std = float("0.115008") - data = None - - -class Program_weight_tensor_parameter_166: - name = "parameter_166" - shape = [96] - dtype = "float32" - min_val = float("0.725079") - max_val = float("1.00348") - mean = float("0.919861") - std = float("0.0526643") - data = None - - -class Program_weight_tensor_parameter_167: - name = "parameter_167" - shape = [96] - dtype = "float32" - min_val = float("0.00612442") - max_val = float("0.0365536") - mean = float("0.016419") - std = float("0.00586326") - data = None - - -class Program_weight_tensor_parameter_168: - name = "parameter_168" - shape = [96] - dtype = "float32" - min_val = float("-0.0660382") - max_val = float("0.043413") - mean = float("-0.0231643") - std = float("0.0225905") - data = None - - -class Program_weight_tensor_parameter_169: - name = "parameter_169" - shape = [96, 96, 1, 1] - dtype = "float32" - min_val = float("-0.107707") - max_val = float("0.0826476") - mean = float("-0.00240553") - std = float("0.0126403") - data = None - - -class Program_weight_tensor_parameter_170: - name = "parameter_170" - shape = [96] - dtype = "float32" - min_val = float("-0.492552") - max_val = float("0.0643671") - mean = float("-0.168979") - std = float("0.115008") - data = None - - -class Program_weight_tensor_parameter_171: - name = "parameter_171" - shape = [96] - dtype = "float32" - min_val = float("0.759592") - max_val = float("1.15371") - mean = float("0.981072") - std = float("0.058465") - data = None - - -class Program_weight_tensor_parameter_172: - name = "parameter_172" - shape = [96] - dtype = "float32" - min_val = float("0.0172157") - max_val = float("0.224833") - mean = float("0.0507609") - std = float("0.0362346") - data = None - - -class Program_weight_tensor_parameter_173: - name = "parameter_173" - shape = [96] - dtype = "float32" - min_val = float("-0.248811") - max_val = float("0.0998924") - mean = float("-0.0166411") - std = float("0.0490542") - data = None - - -class Program_weight_tensor_parameter_174: - name = "parameter_174" - shape = [96, 96, 3, 3] - dtype = "float32" - min_val = float("-0.116242") - max_val = float("0.0898586") - mean = float("-0.000231126") - std = float("0.00783901") - data = None - - -class Program_weight_tensor_parameter_175: - name = "parameter_175" - shape = [96] - dtype = "float32" - min_val = float("-0.567319") - max_val = float("0.348494") - mean = float("-0.179712") - std = float("0.173626") - data = None - - -class Program_weight_tensor_parameter_176: - name = "parameter_176" - shape = [96] - dtype = "float32" - min_val = float("0.772527") - max_val = float("1.33704") - mean = float("0.955269") - std = float("0.110943") - data = None - - -class Program_weight_tensor_parameter_177: - name = "parameter_177" - shape = [96] - dtype = "float32" - min_val = float("0.0160524") - max_val = float("0.114434") - mean = float("0.036106") - std = float("0.0195034") - data = None - - -class Program_weight_tensor_parameter_178: - name = "parameter_178" - shape = [96] - dtype = "float32" - min_val = float("-0.189984") - max_val = float("0.255512") - mean = float("-0.0275046") - std = float("0.0974112") - data = None - - -class Program_weight_tensor_parameter_179: - name = "parameter_179" - shape = [96, 96, 3, 3] - dtype = "float32" - min_val = float("-0.161713") - max_val = float("0.142438") - mean = float("-0.000316146") - std = float("0.00950798") - data = None - - -class Program_weight_tensor_parameter_180: - name = "parameter_180" - shape = [96] - dtype = "float32" - min_val = float("-0.627686") - max_val = float("0.598483") - mean = float("-0.082655") - std = float("0.256323") - data = None - - -class Program_weight_tensor_parameter_181: - name = "parameter_181" - shape = [96] - dtype = "float32" - min_val = float("0.653052") - max_val = float("1.22671") - mean = float("0.866558") - std = float("0.114981") - data = None - - -class Program_weight_tensor_parameter_182: - name = "parameter_182" - shape = [96] - dtype = "float32" - min_val = float("0.0123443") - max_val = float("0.0857623") - mean = float("0.0307528") - std = float("0.01414") - data = None - - -class Program_weight_tensor_parameter_183: - name = "parameter_183" - shape = [96] - dtype = "float32" - min_val = float("-0.117179") - max_val = float("0.0889702") - mean = float("-0.0137918") - std = float("0.041907") - data = None - - -class Program_weight_tensor_parameter_184: - name = "parameter_184" - shape = [96, 448, 1, 1] - dtype = "float32" - min_val = float("-0.162211") - max_val = float("0.186586") - mean = float("-0.00057998") - std = float("0.0123504") - data = None - - -class Program_weight_tensor_parameter_185: - name = "parameter_185" - shape = [96] - dtype = "float32" - min_val = float("-0.0984774") - max_val = float("0.230057") - mean = float("0.0612113") - std = float("0.0550468") - data = None - - -class Program_weight_tensor_parameter_186: - name = "parameter_186" - shape = [96] - dtype = "float32" - min_val = float("0.692561") - max_val = float("1.12833") - mean = float("0.931782") - std = float("0.0640907") - data = None - - -class Program_weight_tensor_parameter_187: - name = "parameter_187" - shape = [96] - dtype = "float32" - min_val = float("0.00681771") - max_val = float("0.0772474") - mean = float("0.0165796") - std = float("0.00961681") - data = None - - -class Program_weight_tensor_parameter_188: - name = "parameter_188" - shape = [96] - dtype = "float32" - min_val = float("-0.133097") - max_val = float("0.161018") - mean = float("-0.0184376") - std = float("0.0386089") - data = None - - -class Program_weight_tensor_parameter_189: - name = "parameter_189" - shape = [96, 448, 1, 1] - dtype = "float32" - min_val = float("-0.104838") - max_val = float("0.136041") - mean = float("-0.000352218") - std = float("0.00870856") - data = None - - -class Program_weight_tensor_parameter_190: - name = "parameter_190" - shape = [192] - dtype = "float32" - min_val = float("-0.296963") - max_val = float("0.196688") - mean = float("-0.0669209") - std = float("0.0696946") - data = None - - -class Program_weight_tensor_parameter_191: - name = "parameter_191" - shape = [192] - dtype = "float32" - min_val = float("0.672164") - max_val = float("1.45538") - mean = float("0.884399") - std = float("0.0784254") - data = None - - -class Program_weight_tensor_parameter_192: - name = "parameter_192" - shape = [192] - dtype = "float32" - min_val = float("0.0110872") - max_val = float("0.127506") - mean = float("0.0262649") - std = float("0.0134352") - data = None - - -class Program_weight_tensor_parameter_193: - name = "parameter_193" - shape = [192] - dtype = "float32" - min_val = float("-0.151007") - max_val = float("0.0461615") - mean = float("-0.0397354") - std = float("0.0392124") - data = None - - -class Program_weight_tensor_parameter_194: - name = "parameter_194" - shape = [192, 384, 1, 1] - dtype = "float32" - min_val = float("-0.0904828") - max_val = float("0.110909") - mean = float("-0.000644138") - std = float("0.00794554") - data = None - - -class Program_weight_tensor_parameter_195: - name = "parameter_195" - shape = [384] - dtype = "float32" - min_val = float("-0.202322") - max_val = float("0.238987") - mean = float("-0.0675229") - std = float("0.0415855") - data = None - - -class Program_weight_tensor_parameter_196: - name = "parameter_196" - shape = [384] - dtype = "float32" - min_val = float("0.872032") - max_val = float("1.54191") - mean = float("1.019") - std = float("0.063367") - data = None - - -class Program_weight_tensor_parameter_197: - name = "parameter_197" - shape = [384] - dtype = "float32" - min_val = float("0.00721296") - max_val = float("0.101353") - mean = float("0.0168635") - std = float("0.00915068") - data = None - - -class Program_weight_tensor_parameter_198: - name = "parameter_198" - shape = [384] - dtype = "float32" - min_val = float("-0.297588") - max_val = float("0.152976") - mean = float("-0.051365") - std = float("0.0447816") - data = None - - -class Program_weight_tensor_parameter_199: - name = "parameter_199" - shape = [384, 384, 1, 1] - dtype = "float32" - min_val = float("-0.102299") - max_val = float("0.0969442") - mean = float("-0.000664132") - std = float("0.00717478") - data = None - - -class Program_weight_tensor_parameter_200: - name = "parameter_200" - shape = [192] - dtype = "float32" - min_val = float("-0.177096") - max_val = float("0.00551918") - mean = float("-0.0655662") - std = float("0.0325016") - data = None - - -class Program_weight_tensor_parameter_201: - name = "parameter_201" - shape = [192] - dtype = "float32" - min_val = float("0.884728") - max_val = float("0.992155") - mean = float("0.94926") - std = float("0.0164178") - data = None - - -class Program_weight_tensor_parameter_202: - name = "parameter_202" - shape = [192] - dtype = "float32" - min_val = float("0.00441221") - max_val = float("0.0261083") - mean = float("0.01005") - std = float("0.00351877") - data = None - - -class Program_weight_tensor_parameter_203: - name = "parameter_203" - shape = [192] - dtype = "float32" - min_val = float("-0.0812598") - max_val = float("0.0748845") - mean = float("-0.0233222") - std = float("0.0310809") - data = None - - -class Program_weight_tensor_parameter_204: - name = "parameter_204" - shape = [192, 192, 1, 1] - dtype = "float32" - min_val = float("-0.0495063") - max_val = float("0.0379562") - mean = float("-0.000707927") - std = float("0.00526785") - data = None - - -class Program_weight_tensor_parameter_205: - name = "parameter_205" - shape = [192] - dtype = "float32" - min_val = float("-0.177096") - max_val = float("0.00551918") - mean = float("-0.0655662") - std = float("0.0325016") - data = None - - -class Program_weight_tensor_parameter_206: - name = "parameter_206" - shape = [192] - dtype = "float32" - min_val = float("0.944815") - max_val = float("1.03167") - mean = float("0.987873") - std = float("0.016613") - data = None - - -class Program_weight_tensor_parameter_207: - name = "parameter_207" - shape = [192] - dtype = "float32" - min_val = float("0.016531") - max_val = float("0.109259") - mean = float("0.0394289") - std = float("0.0158791") - data = None - - -class Program_weight_tensor_parameter_208: - name = "parameter_208" - shape = [192] - dtype = "float32" - min_val = float("-0.21924") - max_val = float("0.183769") - mean = float("-0.0255868") - std = float("0.0652744") - data = None - - -class Program_weight_tensor_parameter_209: - name = "parameter_209" - shape = [192, 192, 3, 3] - dtype = "float32" - min_val = float("-0.0428771") - max_val = float("0.0579551") - mean = float("-7.64353e-05") - std = float("0.00287948") - data = None - - -class Program_weight_tensor_parameter_210: - name = "parameter_210" - shape = [192] - dtype = "float32" - min_val = float("-0.216413") - max_val = float("-0.00156605") - mean = float("-0.0741052") - std = float("0.0353871") - data = None - - -class Program_weight_tensor_parameter_211: - name = "parameter_211" - shape = [192] - dtype = "float32" - min_val = float("0.939878") - max_val = float("1.15492") - mean = float("1.02948") - std = float("0.0431484") - data = None - - -class Program_weight_tensor_parameter_212: - name = "parameter_212" - shape = [192] - dtype = "float32" - min_val = float("0.0381921") - max_val = float("0.244841") - mean = float("0.0704194") - std = float("0.0260547") - data = None - - -class Program_weight_tensor_parameter_213: - name = "parameter_213" - shape = [192] - dtype = "float32" - min_val = float("-0.194913") - max_val = float("0.284776") - mean = float("-0.0487736") - std = float("0.0787153") - data = None - - -class Program_weight_tensor_parameter_214: - name = "parameter_214" - shape = [192, 192, 3, 3] - dtype = "float32" - min_val = float("-0.0537036") - max_val = float("0.0569212") - mean = float("-0.000101544") - std = float("0.00352016") - data = None - - -class Program_weight_tensor_parameter_215: - name = "parameter_215" - shape = [192] - dtype = "float32" - min_val = float("-0.196865") - max_val = float("-0.00996621") - mean = float("-0.0711692") - std = float("0.0319161") - data = None - - -class Program_weight_tensor_parameter_216: - name = "parameter_216" - shape = [192] - dtype = "float32" - min_val = float("0.944171") - max_val = float("1.04842") - mean = float("0.987927") - std = float("0.0137867") - data = None - - -class Program_weight_tensor_parameter_217: - name = "parameter_217" - shape = [192] - dtype = "float32" - min_val = float("0.00232538") - max_val = float("0.0114743") - mean = float("0.00426258") - std = float("0.0011511") - data = None - - -class Program_weight_tensor_parameter_218: - name = "parameter_218" - shape = [192] - dtype = "float32" - min_val = float("-0.0893703") - max_val = float("0.0463746") - mean = float("-0.0214534") - std = float("0.0217776") - data = None - - -class Program_weight_tensor_parameter_219: - name = "parameter_219" - shape = [192, 192, 1, 1] - dtype = "float32" - min_val = float("-0.0308295") - max_val = float("0.0472366") - mean = float("-0.000698195") - std = float("0.00548431") - data = None - - -class Program_weight_tensor_parameter_220: - name = "parameter_220" - shape = [192] - dtype = "float32" - min_val = float("-0.196865") - max_val = float("-0.00996621") - mean = float("-0.0711692") - std = float("0.0319161") - data = None - - -class Program_weight_tensor_parameter_221: - name = "parameter_221" - shape = [192] - dtype = "float32" - min_val = float("0.953905") - max_val = float("1.11243") - mean = float("1.00461") - std = float("0.0264007") - data = None - - -class Program_weight_tensor_parameter_222: - name = "parameter_222" - shape = [192] - dtype = "float32" - min_val = float("0.00885181") - max_val = float("0.0602316") - mean = float("0.0174647") - std = float("0.00611072") - data = None - - -class Program_weight_tensor_parameter_223: - name = "parameter_223" - shape = [192] - dtype = "float32" - min_val = float("-0.214638") - max_val = float("0.0916921") - mean = float("-0.042109") - std = float("0.0445113") - data = None - - -class Program_weight_tensor_parameter_224: - name = "parameter_224" - shape = [192, 192, 3, 3] - dtype = "float32" - min_val = float("-0.0395263") - max_val = float("0.0647994") - mean = float("-0.000147334") - std = float("0.00290748") - data = None - - -class Program_weight_tensor_parameter_225: - name = "parameter_225" - shape = [192] - dtype = "float32" - min_val = float("-0.23254") - max_val = float("-0.0186192") - mean = float("-0.094269") - std = float("0.0399954") - data = None - - -class Program_weight_tensor_parameter_226: - name = "parameter_226" - shape = [192] - dtype = "float32" - min_val = float("0.94661") - max_val = float("1.1911") - mean = float("1.02415") - std = float("0.0459878") - data = None - - -class Program_weight_tensor_parameter_227: - name = "parameter_227" - shape = [192] - dtype = "float32" - min_val = float("0.0325254") - max_val = float("0.154956") - mean = float("0.066483") - std = float("0.0222298") - data = None - - -class Program_weight_tensor_parameter_228: - name = "parameter_228" - shape = [192] - dtype = "float32" - min_val = float("-0.348988") - max_val = float("0.20982") - mean = float("-0.0943644") - std = float("0.0968837") - data = None - - -class Program_weight_tensor_parameter_229: - name = "parameter_229" - shape = [192, 192, 3, 3] - dtype = "float32" - min_val = float("-0.0523599") - max_val = float("0.0680231") - mean = float("-0.000170588") - std = float("0.00366692") - data = None - - -class Program_weight_tensor_parameter_230: - name = "parameter_230" - shape = [192] - dtype = "float32" - min_val = float("-0.154829") - max_val = float("-0.00101215") - mean = float("-0.0685481") - std = float("0.0233247") - data = None - - -class Program_weight_tensor_parameter_231: - name = "parameter_231" - shape = [192] - dtype = "float32" - min_val = float("0.932711") - max_val = float("1.07089") - mean = float("0.998751") - std = float("0.0218702") - data = None - - -class Program_weight_tensor_parameter_232: - name = "parameter_232" - shape = [192] - dtype = "float32" - min_val = float("0.00202058") - max_val = float("0.0085046") - mean = float("0.00390999") - std = float("0.00110762") - data = None - - -class Program_weight_tensor_parameter_233: - name = "parameter_233" - shape = [192] - dtype = "float32" - min_val = float("-0.0777916") - max_val = float("0.098492") - mean = float("-0.0116677") - std = float("0.020466") - data = None - - -class Program_weight_tensor_parameter_234: - name = "parameter_234" - shape = [192, 192, 1, 1] - dtype = "float32" - min_val = float("-0.0340527") - max_val = float("0.0501646") - mean = float("-0.00038914") - std = float("0.00614264") - data = None - - -class Program_weight_tensor_parameter_235: - name = "parameter_235" - shape = [192] - dtype = "float32" - min_val = float("-0.15483") - max_val = float("-0.00101216") - mean = float("-0.0685481") - std = float("0.0233247") - data = None - - -class Program_weight_tensor_parameter_236: - name = "parameter_236" - shape = [192] - dtype = "float32" - min_val = float("0.935817") - max_val = float("1.11381") - mean = float("0.992462") - std = float("0.0258361") - data = None - - -class Program_weight_tensor_parameter_237: - name = "parameter_237" - shape = [192] - dtype = "float32" - min_val = float("0.00891321") - max_val = float("0.0478413") - mean = float("0.0181253") - std = float("0.00570544") - data = None - - -class Program_weight_tensor_parameter_238: - name = "parameter_238" - shape = [192] - dtype = "float32" - min_val = float("-0.258263") - max_val = float("0.130443") - mean = float("-0.0435931") - std = float("0.0468497") - data = None - - -class Program_weight_tensor_parameter_239: - name = "parameter_239" - shape = [192, 192, 3, 3] - dtype = "float32" - min_val = float("-0.0292394") - max_val = float("0.0522577") - mean = float("-0.00017048") - std = float("0.00289304") - data = None - - -class Program_weight_tensor_parameter_240: - name = "parameter_240" - shape = [192] - dtype = "float32" - min_val = float("-0.288771") - max_val = float("0.0148396") - mean = float("-0.109714") - std = float("0.0400291") - data = None - - -class Program_weight_tensor_parameter_241: - name = "parameter_241" - shape = [192] - dtype = "float32" - min_val = float("0.944044") - max_val = float("1.25876") - mean = float("1.02656") - std = float("0.0419352") - data = None - - -class Program_weight_tensor_parameter_242: - name = "parameter_242" - shape = [192] - dtype = "float32" - min_val = float("0.0137713") - max_val = float("0.0682612") - mean = float("0.0280618") - std = float("0.00984345") - data = None - - -class Program_weight_tensor_parameter_243: - name = "parameter_243" - shape = [192] - dtype = "float32" - min_val = float("-0.354673") - max_val = float("0.124192") - mean = float("-0.0487223") - std = float("0.0583851") - data = None - - -class Program_weight_tensor_parameter_244: - name = "parameter_244" - shape = [192, 192, 3, 3] - dtype = "float32" - min_val = float("-0.0564756") - max_val = float("0.0674707") - mean = float("-0.000194109") - std = float("0.00414155") - data = None - - -class Program_weight_tensor_parameter_245: - name = "parameter_245" - shape = [192] - dtype = "float32" - min_val = float("-0.257095") - max_val = float("-0.01369") - mean = float("-0.121797") - std = float("0.0441852") - data = None - - -class Program_weight_tensor_parameter_246: - name = "parameter_246" - shape = [192] - dtype = "float32" - min_val = float("0.916459") - max_val = float("1.13702") - mean = float("1.02436") - std = float("0.0422629") - data = None - - -class Program_weight_tensor_parameter_247: - name = "parameter_247" - shape = [192] - dtype = "float32" - min_val = float("0.00516469") - max_val = float("0.0229026") - mean = float("0.0106646") - std = float("0.00317044") - data = None - - -class Program_weight_tensor_parameter_248: - name = "parameter_248" - shape = [192] - dtype = "float32" - min_val = float("-0.127284") - max_val = float("0.0963655") - mean = float("0.0144008") - std = float("0.029143") - data = None - - -class Program_weight_tensor_parameter_249: - name = "parameter_249" - shape = [192, 896, 1, 1] - dtype = "float32" - min_val = float("-0.0721174") - max_val = float("0.0971018") - mean = float("-0.000190287") - std = float("0.00582491") - data = None - - -class Program_weight_tensor_parameter_250: - name = "parameter_250" - shape = [192] - dtype = "float32" - min_val = float("-0.177705") - max_val = float("0.21267") - mean = float("-0.00755062") - std = float("0.0506748") - data = None - - -class Program_weight_tensor_parameter_251: - name = "parameter_251" - shape = [192] - dtype = "float32" - min_val = float("0.954707") - max_val = float("1.21638") - mean = float("1.05592") - std = float("0.0497891") - data = None - - -class Program_weight_tensor_parameter_252: - name = "parameter_252" - shape = [192] - dtype = "float32" - min_val = float("0.00824461") - max_val = float("0.0577322") - mean = float("0.0175512") - std = float("0.00716064") - data = None - - -class Program_weight_tensor_parameter_253: - name = "parameter_253" - shape = [192] - dtype = "float32" - min_val = float("-0.0769287") - max_val = float("0.0893626") - mean = float("-0.00207579") - std = float("0.0304575") - data = None - - -class Program_weight_tensor_parameter_254: - name = "parameter_254" - shape = [192, 896, 1, 1] - dtype = "float32" - min_val = float("-0.0604261") - max_val = float("0.102952") - mean = float("-0.000212686") - std = float("0.00623353") - data = None - - -class Program_weight_tensor_parameter_255: - name = "parameter_255" - shape = [384] - dtype = "float32" - min_val = float("-0.249989") - max_val = float("-0.0574309") - mean = float("-0.125167") - std = float("0.0336736") - data = None - - -class Program_weight_tensor_parameter_256: - name = "parameter_256" - shape = [384] - dtype = "float32" - min_val = float("0.816049") - max_val = float("1.01536") - mean = float("0.909295") - std = float("0.0258085") - data = None - - -class Program_weight_tensor_parameter_257: - name = "parameter_257" - shape = [384] - dtype = "float32" - min_val = float("0.0103681") - max_val = float("0.0948348") - mean = float("0.0266136") - std = float("0.0121593") - data = None - - -class Program_weight_tensor_parameter_258: - name = "parameter_258" - shape = [384] - dtype = "float32" - min_val = float("-0.16079") - max_val = float("0.0920586") - mean = float("-0.0398222") - std = float("0.0395335") - data = None - - -class Program_weight_tensor_parameter_259: - name = "parameter_259" - shape = [384, 768, 1, 1] - dtype = "float32" - min_val = float("-0.03192") - max_val = float("0.0346747") - mean = float("-0.000309907") - std = float("0.00449398") - data = None - - -class Program_weight_tensor_parameter_260: - name = "parameter_260" - shape = [768] - dtype = "float32" - min_val = float("-0.104731") - max_val = float("0.0725498") - mean = float("-0.0568804") - std = float("0.0152729") - data = None - - -class Program_weight_tensor_parameter_261: - name = "parameter_261" - shape = [768] - dtype = "float32" - min_val = float("0.952515") - max_val = float("1.14217") - mean = float("1.02086") - std = float("0.0209603") - data = None - - -class Program_weight_tensor_parameter_262: - name = "parameter_262" - shape = [768] - dtype = "float32" - min_val = float("0.00400082") - max_val = float("0.0306736") - mean = float("0.00912551") - std = float("0.00348196") - data = None - - -class Program_weight_tensor_parameter_263: - name = "parameter_263" - shape = [768] - dtype = "float32" - min_val = float("-0.108131") - max_val = float("0.0840492") - mean = float("-0.0308524") - std = float("0.0256791") - data = None - - -class Program_weight_tensor_parameter_264: - name = "parameter_264" - shape = [768, 768, 1, 1] - dtype = "float32" - min_val = float("-0.0561444") - max_val = float("0.104026") - mean = float("-0.00028012") - std = float("0.00382445") - data = None - - -class Program_weight_tensor_parameter_265: - name = "parameter_265" - shape = [384] - dtype = "float32" - min_val = float("-0.158352") - max_val = float("0.074486") - mean = float("-0.0400406") - std = float("0.0206674") - data = None - - -class Program_weight_tensor_parameter_266: - name = "parameter_266" - shape = [384] - dtype = "float32" - min_val = float("0.888537") - max_val = float("1.07535") - mean = float("0.982149") - std = float("0.0131757") - data = None - - -class Program_weight_tensor_parameter_267: - name = "parameter_267" - shape = [384] - dtype = "float32" - min_val = float("0.00629694") - max_val = float("0.0917591") - mean = float("0.0233191") - std = float("0.00930269") - data = None - - -class Program_weight_tensor_parameter_268: - name = "parameter_268" - shape = [384] - dtype = "float32" - min_val = float("-0.0725783") - max_val = float("0.0597257") - mean = float("-0.0049494") - std = float("0.0256969") - data = None - - -class Program_weight_tensor_parameter_269: - name = "parameter_269" - shape = [384, 384, 1, 1] - dtype = "float32" - min_val = float("-0.0355484") - max_val = float("0.0687831") - mean = float("-5.36416e-05") - std = float("0.00327465") - data = None - - -class Program_weight_tensor_parameter_270: - name = "parameter_270" - shape = [384] - dtype = "float32" - min_val = float("-0.158353") - max_val = float("0.074486") - mean = float("-0.0400406") - std = float("0.0206674") - data = None - - -class Program_weight_tensor_parameter_271: - name = "parameter_271" - shape = [384] - dtype = "float32" - min_val = float("0.880933") - max_val = float("1.0776") - mean = float("0.993865") - std = float("0.0122579") - data = None - - -class Program_weight_tensor_parameter_272: - name = "parameter_272" - shape = [384] - dtype = "float32" - min_val = float("0.0302136") - max_val = float("0.658467") - mean = float("0.153161") - std = float("0.0615601") - data = None - - -class Program_weight_tensor_parameter_273: - name = "parameter_273" - shape = [384] - dtype = "float32" - min_val = float("-0.280458") - max_val = float("0.127177") - mean = float("-0.0754248") - std = float("0.0821892") - data = None - - -class Program_weight_tensor_parameter_274: - name = "parameter_274" - shape = [384, 384, 3, 3] - dtype = "float32" - min_val = float("-0.0402524") - max_val = float("0.0446942") - mean = float("-0.000120035") - std = float("0.00122819") - data = None - - -class Program_weight_tensor_parameter_275: - name = "parameter_275" - shape = [384] - dtype = "float32" - min_val = float("-0.080174") - max_val = float("0.116977") - mean = float("-0.0189992") - std = float("0.0160148") - data = None - - -class Program_weight_tensor_parameter_276: - name = "parameter_276" - shape = [384] - dtype = "float32" - min_val = float("0.920426") - max_val = float("1.16701") - mean = float("1.01503") - std = float("0.0247134") - data = None - - -class Program_weight_tensor_parameter_277: - name = "parameter_277" - shape = [384] - dtype = "float32" - min_val = float("0.0258173") - max_val = float("0.191023") - mean = float("0.0694403") - std = float("0.0298835") - data = None - - -class Program_weight_tensor_parameter_278: - name = "parameter_278" - shape = [384] - dtype = "float32" - min_val = float("-0.231545") - max_val = float("0.20914") - mean = float("-0.0203256") - std = float("0.0747077") - data = None - - -class Program_weight_tensor_parameter_279: - name = "parameter_279" - shape = [384, 384, 3, 3] - dtype = "float32" - min_val = float("-0.023736") - max_val = float("0.03185") - mean = float("-3.10455e-05") - std = float("0.00160831") - data = None - - -class Program_weight_tensor_parameter_280: - name = "parameter_280" - shape = [384] - dtype = "float32" - min_val = float("-0.0734011") - max_val = float("0.0209518") - mean = float("-0.0234929") - std = float("0.0134643") - data = None - - -class Program_weight_tensor_parameter_281: - name = "parameter_281" - shape = [384] - dtype = "float32" - min_val = float("0.946001") - max_val = float("1.1693") - mean = float("1.01467") - std = float("0.0274094") - data = None - - -class Program_weight_tensor_parameter_282: - name = "parameter_282" - shape = [384] - dtype = "float32" - min_val = float("0.0574212") - max_val = float("0.36113") - mean = float("0.167754") - std = float("0.0624421") - data = None - - -class Program_weight_tensor_parameter_283: - name = "parameter_283" - shape = [384] - dtype = "float32" - min_val = float("-1.55759") - max_val = float("1.75104") - mean = float("0.0310697") - std = float("0.530417") - data = None - - -class Program_weight_tensor_parameter_284: - name = "parameter_284" - shape = [384, 1536, 1, 1] - dtype = "float32" - min_val = float("-0.0464008") - max_val = float("0.0539612") - mean = float("8.40176e-05") - std = float("0.00279856") - data = None - - -class Program_weight_tensor_parameter_285: - name = "parameter_285" - shape = [384] - dtype = "float32" - min_val = float("-0.0183405") - max_val = float("0.0258023") - mean = float("-0.00146113") - std = float("0.00679536") - data = None - - -class Program_weight_tensor_parameter_286: - name = "parameter_286" - shape = [384] - dtype = "float32" - min_val = float("0.969528") - max_val = float("1.06063") - mean = float("0.993845") - std = float("0.0122858") - data = None - - -class Program_weight_tensor_parameter_287: - name = "parameter_287" - shape = [384] - dtype = "float32" - min_val = float("0.00270127") - max_val = float("0.0149795") - mean = float("0.00630368") - std = float("0.00229884") - data = None - - -class Program_weight_tensor_parameter_288: - name = "parameter_288" - shape = [384] - dtype = "float32" - min_val = float("-0.100294") - max_val = float("0.0506795") - mean = float("-0.0387404") - std = float("0.0226508") - data = None - - -class Program_weight_tensor_parameter_289: - name = "parameter_289" - shape = [384, 384, 1, 1] - dtype = "float32" - min_val = float("-0.0312313") - max_val = float("0.0414999") - mean = float("-0.000484357") - std = float("0.00306542") - data = None - - -class Program_weight_tensor_parameter_290: - name = "parameter_290" - shape = [384] - dtype = "float32" - min_val = float("-0.0183405") - max_val = float("0.0258023") - mean = float("-0.00146113") - std = float("0.00679536") - data = None - - -class Program_weight_tensor_parameter_291: - name = "parameter_291" - shape = [384] - dtype = "float32" - min_val = float("0.971893") - max_val = float("1.08657") - mean = float("1.00365") - std = float("0.0181767") - data = None - - -class Program_weight_tensor_parameter_292: - name = "parameter_292" - shape = [384] - dtype = "float32" - min_val = float("0.0120607") - max_val = float("0.104138") - mean = float("0.0369785") - std = float("0.0161136") - data = None - - -class Program_weight_tensor_parameter_293: - name = "parameter_293" - shape = [384] - dtype = "float32" - min_val = float("-0.26934") - max_val = float("0.110834") - mean = float("-0.115324") - std = float("0.0508614") - data = None - - -class Program_weight_tensor_parameter_294: - name = "parameter_294" - shape = [384, 384, 3, 3] - dtype = "float32" - min_val = float("-0.0290225") - max_val = float("0.0663413") - mean = float("-0.000179167") - std = float("0.00128493") - data = None - - -class Program_weight_tensor_parameter_295: - name = "parameter_295" - shape = [384] - dtype = "float32" - min_val = float("-0.0494678") - max_val = float("0.00858064") - mean = float("-0.00839597") - std = float("0.00776335") - data = None - - -class Program_weight_tensor_parameter_296: - name = "parameter_296" - shape = [384] - dtype = "float32" - min_val = float("0.95427") - max_val = float("1.13764") - mean = float("1.01254") - std = float("0.0201656") - data = None - - -class Program_weight_tensor_parameter_297: - name = "parameter_297" - shape = [384] - dtype = "float32" - min_val = float("0.0686687") - max_val = float("0.339855") - mean = float("0.163513") - std = float("0.0492742") - data = None - - -class Program_weight_tensor_parameter_298: - name = "parameter_298" - shape = [384] - dtype = "float32" - min_val = float("-1.18586") - max_val = float("0.819546") - mean = float("-0.222925") - std = float("0.255668") - data = None - - -class Program_weight_tensor_parameter_299: - name = "parameter_299" - shape = [384, 384, 3, 3] - dtype = "float32" - min_val = float("-0.0230308") - max_val = float("0.0510876") - mean = float("-0.000132883") - std = float("0.00152671") - data = None - - -class Program_weight_tensor_parameter_300: - name = "parameter_300" - shape = [384] - dtype = "float32" - min_val = float("-0.0358263") - max_val = float("0.0138961") - mean = float("-0.00764663") - std = float("0.00787851") - data = None - - -class Program_weight_tensor_parameter_301: - name = "parameter_301" - shape = [384] - dtype = "float32" - min_val = float("0.984161") - max_val = float("1.03457") - mean = float("0.999922") - std = float("0.00712994") - data = None - - -class Program_weight_tensor_parameter_302: - name = "parameter_302" - shape = [384] - dtype = "float32" - min_val = float("0.00175415") - max_val = float("0.0108139") - mean = float("0.00364709") - std = float("0.0011719") - data = None - - -class Program_weight_tensor_parameter_303: - name = "parameter_303" - shape = [384] - dtype = "float32" - min_val = float("-0.0809601") - max_val = float("0.126515") - mean = float("-0.0205001") - std = float("0.0225741") - data = None - - -class Program_weight_tensor_parameter_304: - name = "parameter_304" - shape = [384, 384, 1, 1] - dtype = "float32" - min_val = float("-0.0193951") - max_val = float("0.033031") - mean = float("-0.00027113") - std = float("0.00265482") - data = None - - -class Program_weight_tensor_parameter_305: - name = "parameter_305" - shape = [384] - dtype = "float32" - min_val = float("-0.0358263") - max_val = float("0.0138961") - mean = float("-0.00764663") - std = float("0.00787851") - data = None - - -class Program_weight_tensor_parameter_306: - name = "parameter_306" - shape = [384] - dtype = "float32" - min_val = float("0.981952") - max_val = float("1.06739") - mean = float("1.00455") - std = float("0.0126595") - data = None - - -class Program_weight_tensor_parameter_307: - name = "parameter_307" - shape = [384] - dtype = "float32" - min_val = float("0.00890186") - max_val = float("0.0527898") - mean = float("0.0229235") - std = float("0.00801757") - data = None - - -class Program_weight_tensor_parameter_308: - name = "parameter_308" - shape = [384] - dtype = "float32" - min_val = float("-0.216942") - max_val = float("0.320178") - mean = float("-0.0713609") - std = float("0.0619825") - data = None - - -class Program_weight_tensor_parameter_309: - name = "parameter_309" - shape = [384, 384, 3, 3] - dtype = "float32" - min_val = float("-0.011273") - max_val = float("0.0330076") - mean = float("-0.000116379") - std = float("0.00107823") - data = None - - -class Program_weight_tensor_parameter_310: - name = "parameter_310" - shape = [384] - dtype = "float32" - min_val = float("-0.0530152") - max_val = float("0.00371186") - mean = float("-0.02064") - std = float("0.00869095") - data = None - - -class Program_weight_tensor_parameter_311: - name = "parameter_311" - shape = [384] - dtype = "float32" - min_val = float("0.975684") - max_val = float("1.08474") - mean = float("1.01197") - std = float("0.0159573") - data = None - - -class Program_weight_tensor_parameter_312: - name = "parameter_312" - shape = [384] - dtype = "float32" - min_val = float("0.0111248") - max_val = float("0.075422") - mean = float("0.0304294") - std = float("0.0099954") - data = None - - -class Program_weight_tensor_parameter_313: - name = "parameter_313" - shape = [384] - dtype = "float32" - min_val = float("-0.17639") - max_val = float("0.208749") - mean = float("-0.0371536") - std = float("0.0489761") - data = None - - -class Program_weight_tensor_parameter_314: - name = "parameter_314" - shape = [384, 384, 3, 3] - dtype = "float32" - min_val = float("-0.0145313") - max_val = float("0.0244098") - mean = float("-6.42304e-05") - std = float("0.00148685") - data = None - - -class Program_weight_tensor_parameter_315: - name = "parameter_315" - shape = [384] - dtype = "float32" - min_val = float("-0.0699692") - max_val = float("0.0213726") - mean = float("-0.0333959") - std = float("0.0126416") - data = None - - -class Program_weight_tensor_parameter_316: - name = "parameter_316" - shape = [384] - dtype = "float32" - min_val = float("0.981916") - max_val = float("1.05598") - mean = float("1.01336") - std = float("0.0107863") - data = None - - -class Program_weight_tensor_parameter_317: - name = "parameter_317" - shape = [384] - dtype = "float32" - min_val = float("0.0069754") - max_val = float("0.0280088") - mean = float("0.0131181") - std = float("0.00318771") - data = None - - -class Program_weight_tensor_parameter_318: - name = "parameter_318" - shape = [384] - dtype = "float32" - min_val = float("-0.145777") - max_val = float("0.0727488") - mean = float("-0.0135891") - std = float("0.034538") - data = None - - -class Program_weight_tensor_parameter_319: - name = "parameter_319" - shape = [384, 1024, 1, 1] - dtype = "float32" - min_val = float("-0.017823") - max_val = float("0.0467678") - mean = float("-0.000196972") - std = float("0.00306745") - data = None - - -class Program_weight_tensor_parameter_320: - name = "parameter_320" - shape = [384] - dtype = "float32" - min_val = float("-0.0243502") - max_val = float("0.0209146") - mean = float("-0.000403346") - std = float("0.00795216") - data = None - - -class Program_weight_tensor_parameter_321: - name = "parameter_321" - shape = [384] - dtype = "float32" - min_val = float("0.994149") - max_val = float("1.08382") - mean = float("1.04111") - std = float("0.0136566") - data = None - - -class Program_weight_tensor_parameter_322: - name = "parameter_322" - shape = [384] - dtype = "float32" - min_val = float("0.0116675") - max_val = float("0.0524889") - mean = float("0.0209671") - std = float("0.00553338") - data = None - - -class Program_weight_tensor_parameter_323: - name = "parameter_323" - shape = [384] - dtype = "float32" - min_val = float("-0.150832") - max_val = float("0.146748") - mean = float("-0.00513308") - std = float("0.045991") - data = None - - -class Program_weight_tensor_parameter_324: - name = "parameter_324" - shape = [384, 1024, 1, 1] - dtype = "float32" - min_val = float("-0.0386333") - max_val = float("0.0273306") - mean = float("-0.000230864") - std = float("0.00376096") - data = None - - -class Program_weight_tensor_parameter_325: - name = "parameter_325" - shape = [1024] - dtype = "float32" - min_val = float("-2.92289e-10") - max_val = float("3.60219e-10") - mean = float("3.09639e-12") - std = float("8.43535e-11") - data = None - - -class Program_weight_tensor_parameter_326: - name = "parameter_326" - shape = [1024] - dtype = "float32" - min_val = float("0.797367") - max_val = float("0.801926") - mean = float("0.79841") - std = float("0.000347528") - data = None - - -class Program_weight_tensor_parameter_327: - name = "parameter_327" - shape = [1024] - dtype = "float32" - min_val = float("-0.0176922") - max_val = float("0.0176774") - mean = float("0.000103427") - std = float("0.0103725") - data = None - - -class Program_weight_tensor_parameter_328: - name = "parameter_328" - shape = [2048, 1024] - dtype = "float32" - min_val = float("-0.0180007") - max_val = float("0.0179557") - mean = float("-3.21152e-07") - std = float("0.01019") - data = None - - -class Program_weight_tensor_parameter_329: - name = "parameter_329" - shape = [2048] - dtype = "float32" - min_val = float("-0.0249658") - max_val = float("0.0249348") - mean = float("-0.000264432") - std = float("0.0140502") - data = None - - -class Program_weight_tensor_parameter_330: - name = "parameter_330" - shape = [1024, 2048] - dtype = "float32" - min_val = float("-0.0252063") - max_val = float("0.025197") - mean = float("-1.07218e-06") - std = float("0.0144035") - data = None - - -class Program_weight_tensor_parameter_331: - name = "parameter_331" - shape = [1024] - dtype = "float32" - min_val = float("-0.000555217") - max_val = float("0.000243272") - mean = float("1.9929e-07") - std = float("9.19554e-05") - data = None - - -class Program_weight_tensor_parameter_332: - name = "parameter_332" - shape = [1024] - dtype = "float32" - min_val = float("0.7967") - max_val = float("0.802162") - mean = float("0.798413") - std = float("0.000401989") - data = None - - -class Program_weight_tensor_parameter_333: - name = "parameter_333" - shape = [1024] - dtype = "float32" - min_val = float("-0.000419711") - max_val = float("0.000305357") - mean = float("3.42389e-06") - std = float("9.72847e-05") - data = None - - -class Program_weight_tensor_parameter_334: - name = "parameter_334" - shape = [1024, 1024] - dtype = "float32" - min_val = float("-0.0435604") - max_val = float("0.0434697") - mean = float("8.92987e-06") - std = float("0.0249341") - data = None - - -class Program_weight_tensor_parameter_335: - name = "parameter_335" - shape = [1024] - dtype = "float32" - min_val = float("-0.0003762") - max_val = float("0.000251332") - mean = float("1.8105e-05") - std = float("9.15252e-05") - data = None - - -class Program_weight_tensor_parameter_336: - name = "parameter_336" - shape = [1024] - dtype = "float32" - min_val = float("0.796851") - max_val = float("0.802025") - mean = float("0.798428") - std = float("0.00039013") - data = None - - -class Program_weight_tensor_parameter_337: - name = "parameter_337" - shape = [1024] - dtype = "float32" - min_val = float("-0.0176874") - max_val = float("0.0176099") - mean = float("0.000102328") - std = float("0.0103695") - data = None - - -class Program_weight_tensor_parameter_338: - name = "parameter_338" - shape = [2048, 1024] - dtype = "float32" - min_val = float("-0.0179828") - max_val = float("0.0178747") - mean = float("-4.45837e-07") - std = float("0.01019") - data = None - - -class Program_weight_tensor_parameter_339: - name = "parameter_339" - shape = [2048] - dtype = "float32" - min_val = float("-0.024962") - max_val = float("0.0249286") - mean = float("-0.000262687") - std = float("0.0140493") - data = None - - -class Program_weight_tensor_parameter_340: - name = "parameter_340" - shape = [1024, 2048] - dtype = "float32" - min_val = float("-0.0251902") - max_val = float("0.0251499") - mean = float("-1.07229e-06") - std = float("0.0144035") - data = None - - -class Program_weight_tensor_parameter_341: - name = "parameter_341" - shape = [1024] - dtype = "float32" - min_val = float("-0.000350131") - max_val = float("0.000241698") - mean = float("-4.47589e-07") - std = float("8.78869e-05") - data = None - - -class Program_weight_tensor_parameter_342: - name = "parameter_342" - shape = [1024] - dtype = "float32" - min_val = float("0.797093") - max_val = float("0.80173") - mean = float("0.798412") - std = float("0.000358076") - data = None - - -class Program_weight_tensor_parameter_343: - name = "parameter_343" - shape = [1024] - dtype = "float32" - min_val = float("-0.000363068") - max_val = float("0.000309618") - mean = float("1.5817e-06") - std = float("9.93162e-05") - data = None - - -class Program_weight_tensor_parameter_344: - name = "parameter_344" - shape = [1024, 1024] - dtype = "float32" - min_val = float("-0.0434436") - max_val = float("0.0434242") - mean = float("8.89642e-06") - std = float("0.0249342") - data = None - - -class Program_weight_tensor_parameter_345: - name = "parameter_345" - shape = [1024] - dtype = "float32" - min_val = float("-0.000420154") - max_val = float("0.00045328") - mean = float("2.54749e-05") - std = float("0.000129154") - data = None - - -class Program_weight_tensor_parameter_346: - name = "parameter_346" - shape = [1024] - dtype = "float32" - min_val = float("0.797252") - max_val = float("0.801713") - mean = float("0.798442") - std = float("0.000356685") - data = None - - -class Program_weight_tensor_parameter_347: - name = "parameter_347" - shape = [1024] - dtype = "float32" - min_val = float("-0.0177165") - max_val = float("0.0176232") - mean = float("0.000101918") - std = float("0.01036") - data = None - - -class Program_weight_tensor_parameter_348: - name = "parameter_348" - shape = [2048, 1024] - dtype = "float32" - min_val = float("-0.0179359") - max_val = float("0.0179271") - mean = float("-5.19214e-07") - std = float("0.01019") - data = None - - -class Program_weight_tensor_parameter_349: - name = "parameter_349" - shape = [2048] - dtype = "float32" - min_val = float("-0.0249926") - max_val = float("0.024906") - mean = float("-0.000261807") - std = float("0.0140485") - data = None - - -class Program_weight_tensor_parameter_350: - name = "parameter_350" - shape = [1024, 2048] - dtype = "float32" - min_val = float("-0.0251321") - max_val = float("0.0251645") - mean = float("-1.07239e-06") - std = float("0.0144035") - data = None - - -class Program_weight_tensor_parameter_351: - name = "parameter_351" - shape = [1024] - dtype = "float32" - min_val = float("-0.000524045") - max_val = float("0.000440655") - mean = float("-3.94412e-07") - std = float("0.000134893") - data = None - - -class Program_weight_tensor_parameter_352: - name = "parameter_352" - shape = [1024] - dtype = "float32" - min_val = float("0.797309") - max_val = float("0.801401") - mean = float("0.798411") - std = float("0.000338184") - data = None - - -class Program_weight_tensor_parameter_353: - name = "parameter_353" - shape = [1024] - dtype = "float32" - min_val = float("-0.000493111") - max_val = float("0.000473844") - mean = float("-1.84034e-06") - std = float("0.000140404") - data = None - - -class Program_weight_tensor_parameter_354: - name = "parameter_354" - shape = [1024, 1024] - dtype = "float32" - min_val = float("-0.043449") - max_val = float("0.0434786") - mean = float("8.89802e-06") - std = float("0.0249343") - data = None - - -class Program_weight_tensor_parameter_355: - name = "parameter_355" - shape = [1024] - dtype = "float32" - min_val = float("-0.000657995") - max_val = float("0.000731658") - mean = float("3.06102e-05") - std = float("0.000218356") - data = None - - -class Program_weight_tensor_parameter_356: - name = "parameter_356" - shape = [1024] - dtype = "float32" - min_val = float("0.797424") - max_val = float("0.801436") - mean = float("0.798458") - std = float("0.000356085") - data = None - - -class Program_weight_tensor_parameter_357: - name = "parameter_357" - shape = [1024] - dtype = "float32" - min_val = float("-0.0177131") - max_val = float("0.0177688") - mean = float("0.000101971") - std = float("0.0103559") - data = None - - -class Program_weight_tensor_parameter_358: - name = "parameter_358" - shape = [2048, 1024] - dtype = "float32" - min_val = float("-0.0180121") - max_val = float("0.0179253") - mean = float("-4.95311e-07") - std = float("0.0101901") - data = None - - -class Program_weight_tensor_parameter_359: - name = "parameter_359" - shape = [2048] - dtype = "float32" - min_val = float("-0.024929") - max_val = float("0.0249049") - mean = float("-0.000259673") - std = float("0.0140481") - data = None - - -class Program_weight_tensor_parameter_360: - name = "parameter_360" - shape = [1024, 2048] - dtype = "float32" - min_val = float("-0.0251524") - max_val = float("0.0252285") - mean = float("-1.0724e-06") - std = float("0.0144035") - data = None - - -class Program_weight_tensor_parameter_361: - name = "parameter_361" - shape = [1024] - dtype = "float32" - min_val = float("-0.000733503") - max_val = float("0.00075958") - mean = float("-7.75532e-07") - std = float("0.000228197") - data = None - - -class Program_weight_tensor_parameter_362: - name = "parameter_362" - shape = [1024] - dtype = "float32" - min_val = float("0.797348") - max_val = float("0.801141") - mean = float("0.79841") - std = float("0.000355182") - data = None - - -class Program_weight_tensor_parameter_363: - name = "parameter_363" - shape = [1024] - dtype = "float32" - min_val = float("-0.000783512") - max_val = float("0.000888767") - mean = float("-2.37833e-07") - std = float("0.000237219") - data = None - - -class Program_weight_tensor_parameter_364: - name = "parameter_364" - shape = [1024, 1024] - dtype = "float32" - min_val = float("-0.0437281") - max_val = float("0.0438114") - mean = float("8.92961e-06") - std = float("0.0249348") - data = None - - -class Program_weight_tensor_parameter_365: - name = "parameter_365" - shape = [1024] - dtype = "float32" - min_val = float("-3.7594") - max_val = float("-0.734446") - mean = float("-2.18722") - std = float("0.428724") - data = None - - -class Program_weight_tensor_parameter_366: - name = "parameter_366" - shape = [1024] - dtype = "float32" - min_val = float("1.61913") - max_val = float("4.44136") - mean = float("3.08039") - std = float("0.25425") - data = None - - -class Program_weight_tensor_parameter_367: - name = "parameter_367" - shape = [1024] - dtype = "float32" - min_val = float("0.00445214") - max_val = float("0.0229447") - mean = float("0.00885079") - std = float("0.00174582") - data = None - - -class Program_weight_tensor_parameter_368: - name = "parameter_368" - shape = [1024] - dtype = "float32" - min_val = float("-0.140092") - max_val = float("0.122679") - mean = float("-0.0555511") - std = float("0.0303054") - data = None - - -class Program_weight_tensor_parameter_369: - name = "parameter_369" - shape = [1024, 768, 1, 1] - dtype = "float32" - min_val = float("-0.0427729") - max_val = float("0.0695573") - mean = float("-0.000391863") - std = float("0.00403905") - data = None - - -class Program_weight_tensor_parameter_370: - name = "parameter_370" - shape = [768] - dtype = "float32" - min_val = float("-0.014467") - max_val = float("0.00131875") - mean = float("-0.000761015") - std = float("0.00204153") - data = None - - -class Program_weight_tensor_parameter_371: - name = "parameter_371" - shape = [768, 768, 1, 1] - dtype = "float32" - min_val = float("-0.0787519") - max_val = float("0.135878") - mean = float("-0.000282851") - std = float("0.0016268") - data = None - - -class Program_weight_tensor_parameter_372: - name = "parameter_372" - shape = [384] - dtype = "float32" - min_val = float("-1.77402") - max_val = float("0.318654") - mean = float("-0.310798") - std = float("0.291236") - data = None - - -class Program_weight_tensor_parameter_373: - name = "parameter_373" - shape = [384] - dtype = "float32" - min_val = float("0.188523") - max_val = float("1.82125") - mean = float("0.609641") - std = float("0.262607") - data = None - - -class Program_weight_tensor_parameter_374: - name = "parameter_374" - shape = [384] - dtype = "float32" - min_val = float("5.24774e-05") - max_val = float("0.00104929") - mean = float("0.000233242") - std = float("0.000113289") - data = None - - -class Program_weight_tensor_parameter_375: - name = "parameter_375" - shape = [384] - dtype = "float32" - min_val = float("-0.0915885") - max_val = float("0.074148") - mean = float("0.0209669") - std = float("0.0171605") - data = None - - -class Program_weight_tensor_parameter_376: - name = "parameter_376" - shape = [384, 384, 1, 1] - dtype = "float32" - min_val = float("-0.020214") - max_val = float("0.0255239") - mean = float("-0.000361046") - std = float("0.00271802") - data = None - - -class Program_weight_tensor_parameter_377: - name = "parameter_377" - shape = [384] - dtype = "float32" - min_val = float("-1.77402") - max_val = float("0.318949") - mean = float("-0.310739") - std = float("0.291254") - data = None - - -class Program_weight_tensor_parameter_378: - name = "parameter_378" - shape = [384] - dtype = "float32" - min_val = float("0.334653") - max_val = float("2.60511") - mean = float("1.02603") - std = float("0.290253") - data = None - - -class Program_weight_tensor_parameter_379: - name = "parameter_379" - shape = [384] - dtype = "float32" - min_val = float("0.000614651") - max_val = float("0.00608489") - mean = float("0.00209955") - std = float("0.000752612") - data = None - - -class Program_weight_tensor_parameter_380: - name = "parameter_380" - shape = [384] - dtype = "float32" - min_val = float("-0.228713") - max_val = float("0.112503") - mean = float("0.0217264") - std = float("0.036913") - data = None - - -class Program_weight_tensor_parameter_381: - name = "parameter_381" - shape = [384, 384, 3, 3] - dtype = "float32" - min_val = float("-0.0190584") - max_val = float("0.0259183") - mean = float("-4.76047e-05") - std = float("0.0017617") - data = None - - -class Program_weight_tensor_parameter_382: - name = "parameter_382" - shape = [384] - dtype = "float32" - min_val = float("-2.58225") - max_val = float("0.0329867") - mean = float("-1.56843") - std = float("0.415962") - data = None - - -class Program_weight_tensor_parameter_383: - name = "parameter_383" - shape = [384] - dtype = "float32" - min_val = float("0.52002") - max_val = float("1.64429") - mean = float("1.13566") - std = float("0.149475") - data = None - - -class Program_weight_tensor_parameter_384: - name = "parameter_384" - shape = [384] - dtype = "float32" - min_val = float("0.0409059") - max_val = float("0.240151") - mean = float("0.08781") - std = float("0.0241685") - data = None - - -class Program_weight_tensor_parameter_385: - name = "parameter_385" - shape = [384] - dtype = "float32" - min_val = float("-0.904392") - max_val = float("0.384792") - mean = float("-0.257009") - std = float("0.123376") - data = None - - -class Program_weight_tensor_parameter_386: - name = "parameter_386" - shape = [384, 384, 3, 3] - dtype = "float32" - min_val = float("-0.0213207") - max_val = float("0.0602371") - mean = float("-0.000201951") - std = float("0.00231308") - data = None - - -class Program_weight_tensor_parameter_387: - name = "parameter_387" - shape = [384] - dtype = "float32" - min_val = float("-1.93927") - max_val = float("0.644474") - mean = float("-0.574884") - std = float("0.358671") - data = None - - -class Program_weight_tensor_parameter_388: - name = "parameter_388" - shape = [384] - dtype = "float32" - min_val = float("0.163873") - max_val = float("2.06585") - mean = float("0.562027") - std = float("0.227242") - data = None - - -class Program_weight_tensor_parameter_389: - name = "parameter_389" - shape = [384] - dtype = "float32" - min_val = float("7.74518e-05") - max_val = float("0.00147627") - mean = float("0.000262123") - std = float("0.000127782") - data = None - - -class Program_weight_tensor_parameter_390: - name = "parameter_390" - shape = [384] - dtype = "float32" - min_val = float("-0.0472137") - max_val = float("0.0687191") - mean = float("0.0210509") - std = float("0.0147693") - data = None - - -class Program_weight_tensor_parameter_391: - name = "parameter_391" - shape = [384, 384, 1, 1] - dtype = "float32" - min_val = float("-0.0246209") - max_val = float("0.0323191") - mean = float("-0.00038074") - std = float("0.00249603") - data = None - - -class Program_weight_tensor_parameter_392: - name = "parameter_392" - shape = [384] - dtype = "float32" - min_val = float("-1.93932") - max_val = float("0.645257") - mean = float("-0.574812") - std = float("0.358742") - data = None - - -class Program_weight_tensor_parameter_393: - name = "parameter_393" - shape = [384] - dtype = "float32" - min_val = float("0.58315") - max_val = float("2.15642") - mean = float("1.08405") - std = float("0.255745") - data = None - - -class Program_weight_tensor_parameter_394: - name = "parameter_394" - shape = [384] - dtype = "float32" - min_val = float("0.00138889") - max_val = float("0.00913889") - mean = float("0.00296031") - std = float("0.000878211") - data = None - - -class Program_weight_tensor_parameter_395: - name = "parameter_395" - shape = [384] - dtype = "float32" - min_val = float("-0.082843") - max_val = float("0.147398") - mean = float("0.0337157") - std = float("0.0397786") - data = None - - -class Program_weight_tensor_parameter_396: - name = "parameter_396" - shape = [384, 384, 3, 3] - dtype = "float32" - min_val = float("-0.017236") - max_val = float("0.0310435") - mean = float("-8.47071e-05") - std = float("0.00189556") - data = None - - -class Program_weight_tensor_parameter_397: - name = "parameter_397" - shape = [384] - dtype = "float32" - min_val = float("-2.39591") - max_val = float("0.845752") - mean = float("-1.40539") - std = float("0.360596") - data = None - - -class Program_weight_tensor_parameter_398: - name = "parameter_398" - shape = [384] - dtype = "float32" - min_val = float("0.453112") - max_val = float("1.91948") - mean = float("1.16636") - std = float("0.14802") - data = None - - -class Program_weight_tensor_parameter_399: - name = "parameter_399" - shape = [384] - dtype = "float32" - min_val = float("0.0310517") - max_val = float("0.140233") - mean = float("0.0619149") - std = float("0.0160952") - data = None - - -class Program_weight_tensor_parameter_400: - name = "parameter_400" - shape = [384] - dtype = "float32" - min_val = float("-0.74615") - max_val = float("0.831533") - mean = float("-0.183365") - std = float("0.11049") - data = None - - -class Program_weight_tensor_parameter_401: - name = "parameter_401" - shape = [384, 384, 3, 3] - dtype = "float32" - min_val = float("-0.0259567") - max_val = float("0.0450409") - mean = float("-0.000200361") - std = float("0.00234146") - data = None - - -class Program_weight_tensor_parameter_402: - name = "parameter_402" - shape = [384] - dtype = "float32" - min_val = float("-1.8762") - max_val = float("0.453243") - mean = float("-0.485339") - std = float("0.376467") - data = None - - -class Program_weight_tensor_parameter_403: - name = "parameter_403" - shape = [384] - dtype = "float32" - min_val = float("0.0773354") - max_val = float("2.11925") - mean = float("0.441956") - std = float("0.217663") - data = None - - -class Program_weight_tensor_parameter_404: - name = "parameter_404" - shape = [384] - dtype = "float32" - min_val = float("6.06445e-05") - max_val = float("0.00132946") - mean = float("0.000308842") - std = float("0.000151133") - data = None - - -class Program_weight_tensor_parameter_405: - name = "parameter_405" - shape = [384] - dtype = "float32" - min_val = float("-0.0476186") - max_val = float("0.0717926") - mean = float("0.0252544") - std = float("0.0165171") - data = None - - -class Program_weight_tensor_parameter_406: - name = "parameter_406" - shape = [384, 384, 1, 1] - dtype = "float32" - min_val = float("-0.0207296") - max_val = float("0.0301957") - mean = float("-0.000479918") - std = float("0.0021441") - data = None - - -class Program_weight_tensor_parameter_407: - name = "parameter_407" - shape = [384] - dtype = "float32" - min_val = float("-1.87654") - max_val = float("0.453653") - mean = float("-0.485263") - std = float("0.376563") - data = None - - -class Program_weight_tensor_parameter_408: - name = "parameter_408" - shape = [384] - dtype = "float32" - min_val = float("0.521871") - max_val = float("2.22439") - mean = float("1.05289") - std = float("0.260102") - data = None - - -class Program_weight_tensor_parameter_409: - name = "parameter_409" - shape = [384] - dtype = "float32" - min_val = float("0.00183356") - max_val = float("0.00905176") - mean = float("0.00403889") - std = float("0.0012136") - data = None - - -class Program_weight_tensor_parameter_410: - name = "parameter_410" - shape = [384] - dtype = "float32" - min_val = float("-0.210488") - max_val = float("0.180984") - mean = float("0.039756") - std = float("0.0449489") - data = None - - -class Program_weight_tensor_parameter_411: - name = "parameter_411" - shape = [384, 384, 3, 3] - dtype = "float32" - min_val = float("-0.0177497") - max_val = float("0.036737") - mean = float("-9.16795e-05") - std = float("0.00200706") - data = None - - -class Program_weight_tensor_parameter_412: - name = "parameter_412" - shape = [384] - dtype = "float32" - min_val = float("-2.15635") - max_val = float("0.418177") - mean = float("-1.36712") - std = float("0.277468") - data = None - - -class Program_weight_tensor_parameter_413: - name = "parameter_413" - shape = [384] - dtype = "float32" - min_val = float("0.706134") - max_val = float("1.6357") - mean = float("1.14301") - std = float("0.101583") - data = None - - -class Program_weight_tensor_parameter_414: - name = "parameter_414" - shape = [384] - dtype = "float32" - min_val = float("0.0221089") - max_val = float("0.144688") - mean = float("0.0472828") - std = float("0.013291") - data = None - - -class Program_weight_tensor_parameter_415: - name = "parameter_415" - shape = [384] - dtype = "float32" - min_val = float("-0.690683") - max_val = float("0.206204") - mean = float("-0.128898") - std = float("0.0935638") - data = None - - -class Program_weight_tensor_parameter_416: - name = "parameter_416" - shape = [384, 384, 3, 3] - dtype = "float32" - min_val = float("-0.0274071") - max_val = float("0.0448565") - mean = float("-0.000158418") - std = float("0.00223888") - data = None - - -class Program_weight_tensor_parameter_417: - name = "parameter_417" - shape = [384] - dtype = "float32" - min_val = float("-2.9232") - max_val = float("1.66463") - mean = float("-0.760372") - std = float("0.643546") - data = None - - -class Program_weight_tensor_parameter_418: - name = "parameter_418" - shape = [384] - dtype = "float32" - min_val = float("0.953224") - max_val = float("2.91794") - mean = float("1.86322") - std = float("0.27618") - data = None - - -class Program_weight_tensor_parameter_419: - name = "parameter_419" - shape = [384] - dtype = "float32" - min_val = float("0.00282756") - max_val = float("0.0125667") - mean = float("0.00523085") - std = float("0.0013343") - data = None - - -class Program_weight_tensor_parameter_420: - name = "parameter_420" - shape = [384] - dtype = "float32" - min_val = float("-0.250212") - max_val = float("0.146125") - mean = float("0.0636405") - std = float("0.0327087") - data = None - - -class Program_weight_tensor_parameter_421: - name = "parameter_421" - shape = [384, 768, 1, 1] - dtype = "float32" - min_val = float("-0.0371909") - max_val = float("0.0509187") - mean = float("-0.000727671") - std = float("0.00522845") - data = None - - -class Program_weight_tensor_parameter_422: - name = "parameter_422" - shape = [384] - dtype = "float32" - min_val = float("-2.2471") - max_val = float("0.681977") - mean = float("-0.777142") - std = float("0.472903") - data = None - - -class Program_weight_tensor_parameter_423: - name = "parameter_423" - shape = [384] - dtype = "float32" - min_val = float("0.965853") - max_val = float("2.89359") - mean = float("2.09705") - std = float("0.305433") - data = None - - -class Program_weight_tensor_parameter_424: - name = "parameter_424" - shape = [384] - dtype = "float32" - min_val = float("0.000815531") - max_val = float("0.00405601") - mean = float("0.00200318") - std = float("0.000443934") - data = None - - -class Program_weight_tensor_parameter_425: - name = "parameter_425" - shape = [384] - dtype = "float32" - min_val = float("-0.0161045") - max_val = float("0.0799797") - mean = float("0.0350115") - std = float("0.0164865") - data = None - - -class Program_weight_tensor_parameter_426: - name = "parameter_426" - shape = [384, 768, 1, 1] - dtype = "float32" - min_val = float("-0.0815437") - max_val = float("0.0646253") - mean = float("-0.000388202") - std = float("0.00359255") - data = None - - -class Program_weight_tensor_parameter_427: - name = "parameter_427" - shape = [768] - dtype = "float32" - min_val = float("-2.40199") - max_val = float("0.642394") - mean = float("-0.908374") - std = float("0.339302") - data = None - - -class Program_weight_tensor_parameter_428: - name = "parameter_428" - shape = [768] - dtype = "float32" - min_val = float("0.530297") - max_val = float("1.90727") - mean = float("0.919687") - std = float("0.149179") - data = None - - -class Program_weight_tensor_parameter_429: - name = "parameter_429" - shape = [768] - dtype = "float32" - min_val = float("0.00640934") - max_val = float("0.0572679") - mean = float("0.0157251") - std = float("0.0047052") - data = None - - -class Program_weight_tensor_parameter_430: - name = "parameter_430" - shape = [768] - dtype = "float32" - min_val = float("-0.235794") - max_val = float("0.254524") - mean = float("0.0393271") - std = float("0.0563154") - data = None - - -class Program_weight_tensor_parameter_431: - name = "parameter_431" - shape = [768, 512, 3, 3] - dtype = "float32" - min_val = float("-0.0378314") - max_val = float("0.0543419") - mean = float("-9.75912e-05") - std = float("0.00233888") - data = None - - -class Program_weight_tensor_parameter_432: - name = "parameter_432" - shape = [512] - dtype = "float32" - min_val = float("-3.38998") - max_val = float("1.66652") - mean = float("-1.16179") - std = float("0.513719") - data = None - - -class Program_weight_tensor_parameter_433: - name = "parameter_433" - shape = [512] - dtype = "float32" - min_val = float("0.523767") - max_val = float("1.67712") - mean = float("1.11122") - std = float("0.148184") - data = None - - -class Program_weight_tensor_parameter_434: - name = "parameter_434" - shape = [512] - dtype = "float32" - min_val = float("0.00248322") - max_val = float("0.0169425") - mean = float("0.00762328") - std = float("0.00205743") - data = None - - -class Program_weight_tensor_parameter_435: - name = "parameter_435" - shape = [512] - dtype = "float32" - min_val = float("-0.172258") - max_val = float("0.0979883") - mean = float("-0.0487286") - std = float("0.0396462") - data = None - - -class Program_weight_tensor_parameter_436: - name = "parameter_436" - shape = [512, 384, 1, 1] - dtype = "float32" - min_val = float("-0.202262") - max_val = float("0.184296") - mean = float("-0.000573477") - std = float("0.00792306") - data = None - - -class Program_weight_tensor_parameter_437: - name = "parameter_437" - shape = [384] - dtype = "float32" - min_val = float("-0.0100703") - max_val = float("0.00138871") - mean = float("-0.00295173") - std = float("0.00227127") - data = None - - -class Program_weight_tensor_parameter_438: - name = "parameter_438" - shape = [384, 384, 1, 1] - dtype = "float32" - min_val = float("-0.202729") - max_val = float("0.140205") - mean = float("-0.002055") - std = float("0.00490701") - data = None - - -class Program_weight_tensor_parameter_439: - name = "parameter_439" - shape = [192] - dtype = "float32" - min_val = float("-1.97045") - max_val = float("0.409864") - mean = float("-0.348766") - std = float("0.333488") - data = None - - -class Program_weight_tensor_parameter_440: - name = "parameter_440" - shape = [192] - dtype = "float32" - min_val = float("0.0528864") - max_val = float("2.15987") - mean = float("0.581255") - std = float("0.419833") - data = None - - -class Program_weight_tensor_parameter_441: - name = "parameter_441" - shape = [192] - dtype = "float32" - min_val = float("9.0619e-05") - max_val = float("0.0013381") - mean = float("0.000452295") - std = float("0.000216487") - data = None - - -class Program_weight_tensor_parameter_442: - name = "parameter_442" - shape = [192] - dtype = "float32" - min_val = float("-0.0346181") - max_val = float("0.054258") - mean = float("0.00535595") - std = float("0.0149315") - data = None - - -class Program_weight_tensor_parameter_443: - name = "parameter_443" - shape = [192, 192, 1, 1] - dtype = "float32" - min_val = float("-0.023487") - max_val = float("0.0581182") - mean = float("-0.000339748") - std = float("0.0040934") - data = None - - -class Program_weight_tensor_parameter_444: - name = "parameter_444" - shape = [192] - dtype = "float32" - min_val = float("-1.97037") - max_val = float("0.410702") - mean = float("-0.34863") - std = float("0.333546") - data = None - - -class Program_weight_tensor_parameter_445: - name = "parameter_445" - shape = [192] - dtype = "float32" - min_val = float("0.372338") - max_val = float("2.70216") - mean = float("1.20181") - std = float("0.493699") - data = None - - -class Program_weight_tensor_parameter_446: - name = "parameter_446" - shape = [192] - dtype = "float32" - min_val = float("0.00127295") - max_val = float("0.0154499") - mean = float("0.00513167") - std = float("0.00187691") - data = None - - -class Program_weight_tensor_parameter_447: - name = "parameter_447" - shape = [192] - dtype = "float32" - min_val = float("-0.0977349") - max_val = float("0.146963") - mean = float("0.0204027") - std = float("0.0429259") - data = None - - -class Program_weight_tensor_parameter_448: - name = "parameter_448" - shape = [192, 192, 3, 3] - dtype = "float32" - min_val = float("-0.0289902") - max_val = float("0.0378296") - mean = float("-0.000154473") - std = float("0.00313532") - data = None - - -class Program_weight_tensor_parameter_449: - name = "parameter_449" - shape = [192] - dtype = "float32" - min_val = float("-2.89065") - max_val = float("-0.176734") - mean = float("-1.31453") - std = float("0.40113") - data = None - - -class Program_weight_tensor_parameter_450: - name = "parameter_450" - shape = [192] - dtype = "float32" - min_val = float("0.696524") - max_val = float("2.09454") - mean = float("1.17918") - std = float("0.169868") - data = None - - -class Program_weight_tensor_parameter_451: - name = "parameter_451" - shape = [192] - dtype = "float32" - min_val = float("0.0632461") - max_val = float("0.338318") - mean = float("0.131968") - std = float("0.0437735") - data = None - - -class Program_weight_tensor_parameter_452: - name = "parameter_452" - shape = [192] - dtype = "float32" - min_val = float("-2.50976") - max_val = float("1.70367") - mean = float("-0.20284") - std = float("0.378719") - data = None - - -class Program_weight_tensor_parameter_453: - name = "parameter_453" - shape = [192, 192, 3, 3] - dtype = "float32" - min_val = float("-0.0331927") - max_val = float("0.0456383") - mean = float("-0.000188198") - std = float("0.00374306") - data = None - - -class Program_weight_tensor_parameter_454: - name = "parameter_454" - shape = [192] - dtype = "float32" - min_val = float("-1.9404") - max_val = float("0.513024") - mean = float("-0.279434") - std = float("0.321452") - data = None - - -class Program_weight_tensor_parameter_455: - name = "parameter_455" - shape = [192] - dtype = "float32" - min_val = float("0.0454025") - max_val = float("1.77027") - mean = float("0.444331") - std = float("0.305722") - data = None - - -class Program_weight_tensor_parameter_456: - name = "parameter_456" - shape = [192] - dtype = "float32" - min_val = float("7.46909e-05") - max_val = float("0.00134485") - mean = float("0.000400773") - std = float("0.000214016") - data = None - - -class Program_weight_tensor_parameter_457: - name = "parameter_457" - shape = [192] - dtype = "float32" - min_val = float("-0.0293086") - max_val = float("0.0470179") - mean = float("0.00801703") - std = float("0.0116545") - data = None - - -class Program_weight_tensor_parameter_458: - name = "parameter_458" - shape = [192, 192, 1, 1] - dtype = "float32" - min_val = float("-0.0234926") - max_val = float("0.036738") - mean = float("-0.000377237") - std = float("0.00377417") - data = None - - -class Program_weight_tensor_parameter_459: - name = "parameter_459" - shape = [192] - dtype = "float32" - min_val = float("-1.94044") - max_val = float("0.51462") - mean = float("-0.279235") - std = float("0.321666") - data = None - - -class Program_weight_tensor_parameter_460: - name = "parameter_460" - shape = [192] - dtype = "float32" - min_val = float("0.483074") - max_val = float("2.27001") - mean = float("1.13833") - std = float("0.37563") - data = None - - -class Program_weight_tensor_parameter_461: - name = "parameter_461" - shape = [192] - dtype = "float32" - min_val = float("0.00274472") - max_val = float("0.0142561") - mean = float("0.00601192") - std = float("0.0018096") - data = None - - -class Program_weight_tensor_parameter_462: - name = "parameter_462" - shape = [192] - dtype = "float32" - min_val = float("-0.0926083") - max_val = float("0.111934") - mean = float("0.0327612") - std = float("0.0355469") - data = None - - -class Program_weight_tensor_parameter_463: - name = "parameter_463" - shape = [192, 192, 3, 3] - dtype = "float32" - min_val = float("-0.0231072") - max_val = float("0.038718") - mean = float("-0.000192078") - std = float("0.00338604") - data = None - - -class Program_weight_tensor_parameter_464: - name = "parameter_464" - shape = [192] - dtype = "float32" - min_val = float("-2.50828") - max_val = float("-0.123237") - mean = float("-1.28886") - std = float("0.44374") - data = None - - -class Program_weight_tensor_parameter_465: - name = "parameter_465" - shape = [192] - dtype = "float32" - min_val = float("0.65494") - max_val = float("1.66968") - mean = float("1.19938") - std = float("0.166128") - data = None - - -class Program_weight_tensor_parameter_466: - name = "parameter_466" - shape = [192] - dtype = "float32" - min_val = float("0.0467958") - max_val = float("0.20027") - mean = float("0.0945377") - std = float("0.0272574") - data = None - - -class Program_weight_tensor_parameter_467: - name = "parameter_467" - shape = [192] - dtype = "float32" - min_val = float("-2.14487") - max_val = float("0.410589") - mean = float("-0.110743") - std = float("0.24642") - data = None - - -class Program_weight_tensor_parameter_468: - name = "parameter_468" - shape = [192, 192, 3, 3] - dtype = "float32" - min_val = float("-0.0362254") - max_val = float("0.0508084") - mean = float("-0.000238085") - std = float("0.00389331") - data = None - - -class Program_weight_tensor_parameter_469: - name = "parameter_469" - shape = [192] - dtype = "float32" - min_val = float("-1.7573") - max_val = float("0.468575") - mean = float("-0.262432") - std = float("0.335818") - data = None - - -class Program_weight_tensor_parameter_470: - name = "parameter_470" - shape = [192] - dtype = "float32" - min_val = float("0.00295124") - max_val = float("1.67875") - mean = float("0.351961") - std = float("0.251699") - data = None - - -class Program_weight_tensor_parameter_471: - name = "parameter_471" - shape = [192] - dtype = "float32" - min_val = float("9.28523e-07") - max_val = float("0.00191867") - mean = float("0.000359659") - std = float("0.00024946") - data = None - - -class Program_weight_tensor_parameter_472: - name = "parameter_472" - shape = [192] - dtype = "float32" - min_val = float("-0.0373738") - max_val = float("0.0528657") - mean = float("0.0101716") - std = float("0.0121908") - data = None - - -class Program_weight_tensor_parameter_473: - name = "parameter_473" - shape = [192, 192, 1, 1] - dtype = "float32" - min_val = float("-0.0303466") - max_val = float("0.0356195") - mean = float("-0.000425557") - std = float("0.0036432") - data = None - - -class Program_weight_tensor_parameter_474: - name = "parameter_474" - shape = [192] - dtype = "float32" - min_val = float("-1.7573") - max_val = float("0.470016") - mean = float("-0.262262") - std = float("0.336041") - data = None - - -class Program_weight_tensor_parameter_475: - name = "parameter_475" - shape = [192] - dtype = "float32" - min_val = float("0.406102") - max_val = float("1.97794") - mean = float("1.06588") - std = float("0.334156") - data = None - - -class Program_weight_tensor_parameter_476: - name = "parameter_476" - shape = [192] - dtype = "float32" - min_val = float("0.0026697") - max_val = float("0.0132838") - mean = float("0.00612262") - std = float("0.00179786") - data = None - - -class Program_weight_tensor_parameter_477: - name = "parameter_477" - shape = [192] - dtype = "float32" - min_val = float("-0.0636079") - max_val = float("0.115567") - mean = float("0.035464") - std = float("0.0321331") - data = None - - -class Program_weight_tensor_parameter_478: - name = "parameter_478" - shape = [192, 192, 3, 3] - dtype = "float32" - min_val = float("-0.0321474") - max_val = float("0.0388371") - mean = float("-0.000190596") - std = float("0.00354187") - data = None - - -class Program_weight_tensor_parameter_479: - name = "parameter_479" - shape = [192] - dtype = "float32" - min_val = float("-2.49735") - max_val = float("0.137985") - mean = float("-1.24334") - std = float("0.424316") - data = None - - -class Program_weight_tensor_parameter_480: - name = "parameter_480" - shape = [192] - dtype = "float32" - min_val = float("0.652126") - max_val = float("1.80991") - mean = float("1.16717") - std = float("0.165409") - data = None - - -class Program_weight_tensor_parameter_481: - name = "parameter_481" - shape = [192] - dtype = "float32" - min_val = float("0.0309664") - max_val = float("0.139012") - mean = float("0.0677931") - std = float("0.0174696") - data = None - - -class Program_weight_tensor_parameter_482: - name = "parameter_482" - shape = [192] - dtype = "float32" - min_val = float("-1.51706") - max_val = float("0.284541") - mean = float("-0.0982665") - std = float("0.179401") - data = None - - -class Program_weight_tensor_parameter_483: - name = "parameter_483" - shape = [192, 192, 3, 3] - dtype = "float32" - min_val = float("-0.05013") - max_val = float("0.0656662") - mean = float("-0.000261502") - std = float("0.00399974") - data = None - - -class Program_weight_tensor_parameter_484: - name = "parameter_484" - shape = [192] - dtype = "float32" - min_val = float("-2.07916") - max_val = float("0.533363") - mean = float("-0.272351") - std = float("0.375289") - data = None - - -class Program_weight_tensor_parameter_485: - name = "parameter_485" - shape = [192] - dtype = "float32" - min_val = float("0.000510371") - max_val = float("0.732354") - mean = float("0.211968") - std = float("0.136272") - data = None - - -class Program_weight_tensor_parameter_486: - name = "parameter_486" - shape = [192] - dtype = "float32" - min_val = float("6.2328e-08") - max_val = float("0.00079658") - mean = float("0.00024174") - std = float("0.00013494") - data = None - - -class Program_weight_tensor_parameter_487: - name = "parameter_487" - shape = [192] - dtype = "float32" - min_val = float("-0.0197338") - max_val = float("0.031677") - mean = float("0.00620505") - std = float("0.0092414") - data = None - - -class Program_weight_tensor_parameter_488: - name = "parameter_488" - shape = [192, 192, 1, 1] - dtype = "float32" - min_val = float("-0.0202783") - max_val = float("0.036136") - mean = float("-0.000265605") - std = float("0.00319736") - data = None - - -class Program_weight_tensor_parameter_489: - name = "parameter_489" - shape = [192] - dtype = "float32" - min_val = float("-2.07922") - max_val = float("0.535166") - mean = float("-0.272236") - std = float("0.375502") - data = None - - -class Program_weight_tensor_parameter_490: - name = "parameter_490" - shape = [192] - dtype = "float32" - min_val = float("0.396505") - max_val = float("1.96272") - mean = float("0.958924") - std = float("0.303858") - data = None - - -class Program_weight_tensor_parameter_491: - name = "parameter_491" - shape = [192] - dtype = "float32" - min_val = float("0.00305567") - max_val = float("0.014764") - mean = float("0.00641083") - std = float("0.00196591") - data = None - - -class Program_weight_tensor_parameter_492: - name = "parameter_492" - shape = [192] - dtype = "float32" - min_val = float("-0.0910836") - max_val = float("0.162129") - mean = float("0.0386063") - std = float("0.0345053") - data = None - - -class Program_weight_tensor_parameter_493: - name = "parameter_493" - shape = [192, 192, 3, 3] - dtype = "float32" - min_val = float("-0.0299549") - max_val = float("0.0371106") - mean = float("-0.000205046") - std = float("0.00364104") - data = None - - -class Program_weight_tensor_parameter_494: - name = "parameter_494" - shape = [192] - dtype = "float32" - min_val = float("-2.74084") - max_val = float("-0.0810353") - mean = float("-1.23693") - std = float("0.434057") - data = None - - -class Program_weight_tensor_parameter_495: - name = "parameter_495" - shape = [192] - dtype = "float32" - min_val = float("0.761623") - max_val = float("1.62105") - mean = float("1.15096") - std = float("0.142541") - data = None - - -class Program_weight_tensor_parameter_496: - name = "parameter_496" - shape = [192] - dtype = "float32" - min_val = float("0.0272966") - max_val = float("0.103735") - mean = float("0.0487407") - std = float("0.0115761") - data = None - - -class Program_weight_tensor_parameter_497: - name = "parameter_497" - shape = [192] - dtype = "float32" - min_val = float("-1.23827") - max_val = float("0.28535") - mean = float("-0.0748347") - std = float("0.164085") - data = None - - -class Program_weight_tensor_parameter_498: - name = "parameter_498" - shape = [192, 192, 3, 3] - dtype = "float32" - min_val = float("-0.0531238") - max_val = float("0.0579085") - mean = float("-0.000268921") - std = float("0.00396934") - data = None - - -class Program_weight_tensor_parameter_499: - name = "parameter_499" - shape = [192] - dtype = "float32" - min_val = float("-1.21219") - max_val = float("0.446681") - mean = float("-0.232278") - std = float("0.339349") - data = None - - -class Program_weight_tensor_parameter_500: - name = "parameter_500" - shape = [192] - dtype = "float32" - min_val = float("-9.82711e-05") - max_val = float("0.677789") - mean = float("0.192032") - std = float("0.120727") - data = None - - -class Program_weight_tensor_parameter_501: - name = "parameter_501" - shape = [192] - dtype = "float32" - min_val = float("2.25949e-10") - max_val = float("0.000865962") - mean = float("0.000239023") - std = float("0.000143426") - data = None - - -class Program_weight_tensor_parameter_502: - name = "parameter_502" - shape = [192] - dtype = "float32" - min_val = float("-0.0494718") - max_val = float("0.0374457") - mean = float("0.00677273") - std = float("0.0117019") - data = None - - -class Program_weight_tensor_parameter_503: - name = "parameter_503" - shape = [192, 192, 1, 1] - dtype = "float32" - min_val = float("-0.0342199") - max_val = float("0.0396943") - mean = float("-0.000272099") - std = float("0.00329482") - data = None - - -class Program_weight_tensor_parameter_504: - name = "parameter_504" - shape = [192] - dtype = "float32" - min_val = float("-1.21223") - max_val = float("0.447751") - mean = float("-0.232181") - std = float("0.33961") - data = None - - -class Program_weight_tensor_parameter_505: - name = "parameter_505" - shape = [192] - dtype = "float32" - min_val = float("0.382831") - max_val = float("1.56386") - mean = float("0.852099") - std = float("0.259991") - data = None - - -class Program_weight_tensor_parameter_506: - name = "parameter_506" - shape = [192] - dtype = "float32" - min_val = float("0.00222243") - max_val = float("0.013094") - mean = float("0.00622857") - std = float("0.00178192") - data = None - - -class Program_weight_tensor_parameter_507: - name = "parameter_507" - shape = [192] - dtype = "float32" - min_val = float("-0.0846332") - max_val = float("0.142415") - mean = float("0.0388704") - std = float("0.03792") - data = None - - -class Program_weight_tensor_parameter_508: - name = "parameter_508" - shape = [192, 192, 3, 3] - dtype = "float32" - min_val = float("-0.0323048") - max_val = float("0.0364338") - mean = float("-0.000186547") - std = float("0.00363857") - data = None - - -class Program_weight_tensor_parameter_509: - name = "parameter_509" - shape = [192] - dtype = "float32" - min_val = float("-2.48701") - max_val = float("-0.131293") - mean = float("-1.25014") - std = float("0.418255") - data = None - - -class Program_weight_tensor_parameter_510: - name = "parameter_510" - shape = [192] - dtype = "float32" - min_val = float("0.689678") - max_val = float("1.5199") - mean = float("1.12491") - std = float("0.13482") - data = None - - -class Program_weight_tensor_parameter_511: - name = "parameter_511" - shape = [192] - dtype = "float32" - min_val = float("0.0185507") - max_val = float("0.061415") - mean = float("0.0351899") - std = float("0.00879212") - data = None - - -class Program_weight_tensor_parameter_512: - name = "parameter_512" - shape = [192] - dtype = "float32" - min_val = float("-0.717377") - max_val = float("0.320847") - mean = float("-0.0746543") - std = float("0.131126") - data = None - - -class Program_weight_tensor_parameter_513: - name = "parameter_513" - shape = [192, 192, 3, 3] - dtype = "float32" - min_val = float("-0.0610342") - max_val = float("0.0592016") - mean = float("-0.000277763") - std = float("0.00397261") - data = None - - -class Program_weight_tensor_parameter_514: - name = "parameter_514" - shape = [192] - dtype = "float32" - min_val = float("-1.21753") - max_val = float("0.499396") - mean = float("-0.167678") - std = float("0.2936") - data = None - - -class Program_weight_tensor_parameter_515: - name = "parameter_515" - shape = [192] - dtype = "float32" - min_val = float("0.00836385") - max_val = float("1.53625") - mean = float("0.238111") - std = float("0.211728") - data = None - - -class Program_weight_tensor_parameter_516: - name = "parameter_516" - shape = [192] - dtype = "float32" - min_val = float("1.9816e-05") - max_val = float("0.00693944") - mean = float("0.000506424") - std = float("0.00066743") - data = None - - -class Program_weight_tensor_parameter_517: - name = "parameter_517" - shape = [192] - dtype = "float32" - min_val = float("-0.0656722") - max_val = float("0.0862214") - mean = float("0.00951742") - std = float("0.0164341") - data = None - - -class Program_weight_tensor_parameter_518: - name = "parameter_518" - shape = [192, 192, 1, 1] - dtype = "float32" - min_val = float("-0.0600528") - max_val = float("0.0312537") - mean = float("-0.000425532") - std = float("0.00397123") - data = None - - -class Program_weight_tensor_parameter_519: - name = "parameter_519" - shape = [192] - dtype = "float32" - min_val = float("-1.21747") - max_val = float("0.500448") - mean = float("-0.167516") - std = float("0.293818") - data = None - - -class Program_weight_tensor_parameter_520: - name = "parameter_520" - shape = [192] - dtype = "float32" - min_val = float("0.354999") - max_val = float("1.44989") - mean = float("0.756941") - std = float("0.21662") - data = None - - -class Program_weight_tensor_parameter_521: - name = "parameter_521" - shape = [192] - dtype = "float32" - min_val = float("0.00437457") - max_val = float("0.0169983") - mean = float("0.00911743") - std = float("0.0026827") - data = None - - -class Program_weight_tensor_parameter_522: - name = "parameter_522" - shape = [192] - dtype = "float32" - min_val = float("-0.159743") - max_val = float("0.154142") - mean = float("0.0493949") - std = float("0.0451797") - data = None - - -class Program_weight_tensor_parameter_523: - name = "parameter_523" - shape = [192, 192, 3, 3] - dtype = "float32" - min_val = float("-0.062497") - max_val = float("0.0530577") - mean = float("-0.000241352") - std = float("0.00357809") - data = None - - -class Program_weight_tensor_parameter_524: - name = "parameter_524" - shape = [192] - dtype = "float32" - min_val = float("-1.87905") - max_val = float("-0.211382") - mean = float("-1.14643") - std = float("0.325653") - data = None - - -class Program_weight_tensor_parameter_525: - name = "parameter_525" - shape = [192] - dtype = "float32" - min_val = float("0.788784") - max_val = float("1.59753") - mean = float("1.12152") - std = float("0.12987") - data = None - - -class Program_weight_tensor_parameter_526: - name = "parameter_526" - shape = [192] - dtype = "float32" - min_val = float("0.0159247") - max_val = float("0.0763614") - mean = float("0.0315734") - std = float("0.00929052") - data = None - - -class Program_weight_tensor_parameter_527: - name = "parameter_527" - shape = [192] - dtype = "float32" - min_val = float("-0.690131") - max_val = float("0.284936") - mean = float("-0.0667142") - std = float("0.130814") - data = None - - -class Program_weight_tensor_parameter_528: - name = "parameter_528" - shape = [192, 192, 3, 3] - dtype = "float32" - min_val = float("-0.062874") - max_val = float("0.076648") - mean = float("-0.000213471") - std = float("0.00383126") - data = None - - -class Program_weight_tensor_parameter_529: - name = "parameter_529" - shape = [192] - dtype = "float32" - min_val = float("-2.86217") - max_val = float("1.58057") - mean = float("-0.0275412") - std = float("0.747651") - data = None - - -class Program_weight_tensor_parameter_530: - name = "parameter_530" - shape = [192] - dtype = "float32" - min_val = float("0.487672") - max_val = float("2.0776") - mean = float("0.90163") - std = float("0.232007") - data = None - - -class Program_weight_tensor_parameter_531: - name = "parameter_531" - shape = [192] - dtype = "float32" - min_val = float("0.00962562") - max_val = float("0.0593409") - mean = float("0.0232174") - std = float("0.00900384") - data = None - - -class Program_weight_tensor_parameter_532: - name = "parameter_532" - shape = [192] - dtype = "float32" - min_val = float("-0.230196") - max_val = float("0.297365") - mean = float("-0.0377198") - std = float("0.0596344") - data = None - - -class Program_weight_tensor_parameter_533: - name = "parameter_533" - shape = [192, 384, 1, 1] - dtype = "float32" - min_val = float("-0.108831") - max_val = float("0.0931739") - mean = float("-0.000512323") - std = float("0.00842399") - data = None - - -class Program_weight_tensor_parameter_534: - name = "parameter_534" - shape = [192] - dtype = "float32" - min_val = float("-2.96764") - max_val = float("1.66844") - mean = float("0.0968476") - std = float("0.663233") - data = None - - -class Program_weight_tensor_parameter_535: - name = "parameter_535" - shape = [192] - dtype = "float32" - min_val = float("0.830791") - max_val = float("5.55835") - mean = float("1.91342") - std = float("0.933379") - data = None - - -class Program_weight_tensor_parameter_536: - name = "parameter_536" - shape = [192] - dtype = "float32" - min_val = float("0.00601536") - max_val = float("0.0460481") - mean = float("0.0175059") - std = float("0.00564168") - data = None - - -class Program_weight_tensor_parameter_537: - name = "parameter_537" - shape = [192] - dtype = "float32" - min_val = float("-0.133093") - max_val = float("0.157686") - mean = float("-0.0238439") - std = float("0.0565348") - data = None - - -class Program_weight_tensor_parameter_538: - name = "parameter_538" - shape = [192, 384, 1, 1] - dtype = "float32" - min_val = float("-0.0985625") - max_val = float("0.0941202") - mean = float("-0.000511784") - std = float("0.00783691") - data = None - - -class Program_weight_tensor_parameter_539: - name = "parameter_539" - shape = [384] - dtype = "float32" - min_val = float("-2.92359") - max_val = float("1.32666") - mean = float("-0.301116") - std = float("0.563662") - data = None - - -class Program_weight_tensor_parameter_540: - name = "parameter_540" - shape = [384] - dtype = "float32" - min_val = float("0.631853") - max_val = float("2.47541") - mean = float("1.15998") - std = float("0.257348") - data = None - - -class Program_weight_tensor_parameter_541: - name = "parameter_541" - shape = [384] - dtype = "float32" - min_val = float("0.0103628") - max_val = float("0.113663") - mean = float("0.0263639") - std = float("0.0126689") - data = None - - -class Program_weight_tensor_parameter_542: - name = "parameter_542" - shape = [384] - dtype = "float32" - min_val = float("-0.269684") - max_val = float("0.245058") - mean = float("0.022821") - std = float("0.0693499") - data = None - - -class Program_weight_tensor_parameter_543: - name = "parameter_543" - shape = [384, 256, 3, 3] - dtype = "float32" - min_val = float("-0.0753194") - max_val = float("0.0720032") - mean = float("-0.000103466") - std = float("0.00421781") - data = None - - -class Program_weight_tensor_parameter_544: - name = "parameter_544" - shape = [256] - dtype = "float32" - min_val = float("-2.04502") - max_val = float("1.28816") - mean = float("-0.924614") - std = float("0.543015") - data = None - - -class Program_weight_tensor_parameter_545: - name = "parameter_545" - shape = [256] - dtype = "float32" - min_val = float("0.517239") - max_val = float("1.68961") - mean = float("1.05432") - std = float("0.176149") - data = None - - -class Program_weight_tensor_parameter_546: - name = "parameter_546" - shape = [256] - dtype = "float32" - min_val = float("0.00196874") - max_val = float("0.02692") - mean = float("0.00629128") - std = float("0.00300317") - data = None - - -class Program_weight_tensor_parameter_547: - name = "parameter_547" - shape = [256] - dtype = "float32" - min_val = float("-0.230499") - max_val = float("0.154945") - mean = float("-0.0516552") - std = float("0.0688298") - data = None - - -class Program_weight_tensor_parameter_548: - name = "parameter_548" - shape = [256, 192, 1, 1] - dtype = "float32" - min_val = float("-0.206154") - max_val = float("0.170783") - mean = float("-0.000884197") - std = float("0.0145162") - data = None - - -class Program_weight_tensor_parameter_549: - name = "parameter_549" - shape = [192] - dtype = "float32" - min_val = float("-0.0139357") - max_val = float("0.00388361") - mean = float("-0.00495662") - std = float("0.00371291") - data = None - - -class Program_weight_tensor_parameter_550: - name = "parameter_550" - shape = [192, 192, 1, 1] - dtype = "float32" - min_val = float("-0.347135") - max_val = float("0.228777") - mean = float("-0.00389388") - std = float("0.0106293") - data = None - - -class Program_weight_tensor_parameter_551: - name = "parameter_551" - shape = [96] - dtype = "float32" - min_val = float("-1.91355") - max_val = float("0.53303") - mean = float("-0.208939") - std = float("0.434311") - data = None - - -class Program_weight_tensor_parameter_552: - name = "parameter_552" - shape = [96] - dtype = "float32" - min_val = float("0.142427") - max_val = float("3.22988") - mean = float("0.635833") - std = float("0.668487") - data = None - - -class Program_weight_tensor_parameter_553: - name = "parameter_553" - shape = [96] - dtype = "float32" - min_val = float("7.75639e-05") - max_val = float("0.00243254") - mean = float("0.000585507") - std = float("0.000428968") - data = None - - -class Program_weight_tensor_parameter_554: - name = "parameter_554" - shape = [96] - dtype = "float32" - min_val = float("-0.0546921") - max_val = float("0.0598506") - mean = float("0.0051419") - std = float("0.0215625") - data = None - - -class Program_weight_tensor_parameter_555: - name = "parameter_555" - shape = [96, 96, 1, 1] - dtype = "float32" - min_val = float("-0.0500852") - max_val = float("0.0932317") - mean = float("-0.000561284") - std = float("0.00794853") - data = None - - -class Program_weight_tensor_parameter_556: - name = "parameter_556" - shape = [96] - dtype = "float32" - min_val = float("-1.91314") - max_val = float("0.534306") - mean = float("-0.208596") - std = float("0.434435") - data = None - - -class Program_weight_tensor_parameter_557: - name = "parameter_557" - shape = [96] - dtype = "float32" - min_val = float("0.343774") - max_val = float("5.47118") - mean = float("1.08565") - std = float("0.88383") - data = None - - -class Program_weight_tensor_parameter_558: - name = "parameter_558" - shape = [96] - dtype = "float32" - min_val = float("0.000976934") - max_val = float("0.0156948") - mean = float("0.0053425") - std = float("0.00272306") - data = None - - -class Program_weight_tensor_parameter_559: - name = "parameter_559" - shape = [96] - dtype = "float32" - min_val = float("-0.137727") - max_val = float("0.212796") - mean = float("0.0123751") - std = float("0.0612578") - data = None - - -class Program_weight_tensor_parameter_560: - name = "parameter_560" - shape = [96, 96, 3, 3] - dtype = "float32" - min_val = float("-0.0398886") - max_val = float("0.0746673") - mean = float("-0.000229692") - std = float("0.00588155") - data = None - - -class Program_weight_tensor_parameter_561: - name = "parameter_561" - shape = [96] - dtype = "float32" - min_val = float("-2.46605") - max_val = float("-0.0202143") - mean = float("-1.22676") - std = float("0.443304") - data = None - - -class Program_weight_tensor_parameter_562: - name = "parameter_562" - shape = [96] - dtype = "float32" - min_val = float("0.542082") - max_val = float("1.6433") - mean = float("0.945634") - std = float("0.172529") - data = None - - -class Program_weight_tensor_parameter_563: - name = "parameter_563" - shape = [96] - dtype = "float32" - min_val = float("0.0406212") - max_val = float("0.236841") - mean = float("0.0868745") - std = float("0.0368113") - data = None - - -class Program_weight_tensor_parameter_564: - name = "parameter_564" - shape = [96] - dtype = "float32" - min_val = float("-2.80804") - max_val = float("1.61985") - mean = float("-0.194669") - std = float("0.469655") - data = None - - -class Program_weight_tensor_parameter_565: - name = "parameter_565" - shape = [96, 96, 3, 3] - dtype = "float32" - min_val = float("-0.150203") - max_val = float("0.114223") - mean = float("-0.000376735") - std = float("0.00724688") - data = None - - -class Program_weight_tensor_parameter_566: - name = "parameter_566" - shape = [96] - dtype = "float32" - min_val = float("-1.38826") - max_val = float("0.562406") - mean = float("-0.132909") - std = float("0.347394") - data = None - - -class Program_weight_tensor_parameter_567: - name = "parameter_567" - shape = [96] - dtype = "float32" - min_val = float("0.0453402") - max_val = float("1.86504") - mean = float("0.460875") - std = float("0.366369") - data = None - - -class Program_weight_tensor_parameter_568: - name = "parameter_568" - shape = [96] - dtype = "float32" - min_val = float("7.68974e-05") - max_val = float("0.00276882") - mean = float("0.000760156") - std = float("0.000616821") - data = None - - -class Program_weight_tensor_parameter_569: - name = "parameter_569" - shape = [96] - dtype = "float32" - min_val = float("-0.0484682") - max_val = float("0.0463877") - mean = float("0.00677392") - std = float("0.017635") - data = None - - -class Program_weight_tensor_parameter_570: - name = "parameter_570" - shape = [96, 96, 1, 1] - dtype = "float32" - min_val = float("-0.0483138") - max_val = float("0.0415922") - mean = float("-0.000498568") - std = float("0.00710731") - data = None - - -class Program_weight_tensor_parameter_571: - name = "parameter_571" - shape = [96] - dtype = "float32" - min_val = float("-1.38834") - max_val = float("0.5648") - mean = float("-0.13256") - std = float("0.347894") - data = None - - -class Program_weight_tensor_parameter_572: - name = "parameter_572" - shape = [96] - dtype = "float32" - min_val = float("0.370504") - max_val = float("2.32822") - mean = float("0.901933") - std = float("0.426522") - data = None - - -class Program_weight_tensor_parameter_573: - name = "parameter_573" - shape = [96] - dtype = "float32" - min_val = float("0.00320483") - max_val = float("0.0242439") - mean = float("0.00920914") - std = float("0.00476152") - data = None - - -class Program_weight_tensor_parameter_574: - name = "parameter_574" - shape = [96] - dtype = "float32" - min_val = float("-0.0963095") - max_val = float("0.121293") - mean = float("0.0354751") - std = float("0.0431439") - data = None - - -class Program_weight_tensor_parameter_575: - name = "parameter_575" - shape = [96, 96, 3, 3] - dtype = "float32" - min_val = float("-0.058655") - max_val = float("0.0591114") - mean = float("-0.000356621") - std = float("0.0059174") - data = None - - -class Program_weight_tensor_parameter_576: - name = "parameter_576" - shape = [96] - dtype = "float32" - min_val = float("-3.31955") - max_val = float("0.36603") - mean = float("-1.17895") - std = float("0.556023") - data = None - - -class Program_weight_tensor_parameter_577: - name = "parameter_577" - shape = [96] - dtype = "float32" - min_val = float("0.473098") - max_val = float("1.98183") - mean = float("1.03911") - std = float("0.238708") - data = None - - -class Program_weight_tensor_parameter_578: - name = "parameter_578" - shape = [96] - dtype = "float32" - min_val = float("0.0285476") - max_val = float("0.145477") - mean = float("0.0548077") - std = float("0.0170506") - data = None - - -class Program_weight_tensor_parameter_579: - name = "parameter_579" - shape = [96] - dtype = "float32" - min_val = float("-1.25068") - max_val = float("0.505193") - mean = float("-0.0605176") - std = float("0.268142") - data = None - - -class Program_weight_tensor_parameter_580: - name = "parameter_580" - shape = [96, 96, 3, 3] - dtype = "float32" - min_val = float("-0.147666") - max_val = float("0.152112") - mean = float("-0.000410438") - std = float("0.00711818") - data = None - - -class Program_weight_tensor_parameter_581: - name = "parameter_581" - shape = [96] - dtype = "float32" - min_val = float("-1.24956") - max_val = float("0.58267") - mean = float("-0.109749") - std = float("0.291966") - data = None - - -class Program_weight_tensor_parameter_582: - name = "parameter_582" - shape = [96] - dtype = "float32" - min_val = float("0.0243293") - max_val = float("1.27785") - mean = float("0.324816") - std = float("0.192866") - data = None - - -class Program_weight_tensor_parameter_583: - name = "parameter_583" - shape = [96] - dtype = "float32" - min_val = float("6.31792e-05") - max_val = float("0.00359895") - mean = float("0.000713188") - std = float("0.000575582") - data = None - - -class Program_weight_tensor_parameter_584: - name = "parameter_584" - shape = [96] - dtype = "float32" - min_val = float("-0.0383061") - max_val = float("0.050179") - mean = float("0.00405305") - std = float("0.016189") - data = None - - -class Program_weight_tensor_parameter_585: - name = "parameter_585" - shape = [96, 96, 1, 1] - dtype = "float32" - min_val = float("-0.0448708") - max_val = float("0.0573038") - mean = float("-0.000336044") - std = float("0.00726838") - data = None - - -class Program_weight_tensor_parameter_586: - name = "parameter_586" - shape = [96] - dtype = "float32" - min_val = float("-1.24942") - max_val = float("0.584539") - mean = float("-0.109552") - std = float("0.292478") - data = None - - -class Program_weight_tensor_parameter_587: - name = "parameter_587" - shape = [96] - dtype = "float32" - min_val = float("0.315495") - max_val = float("1.67063") - mean = float("0.747087") - std = float("0.257847") - data = None - - -class Program_weight_tensor_parameter_588: - name = "parameter_588" - shape = [96] - dtype = "float32" - min_val = float("0.00339766") - max_val = float("0.0255152") - mean = float("0.0102502") - std = float("0.00411405") - data = None - - -class Program_weight_tensor_parameter_589: - name = "parameter_589" - shape = [96] - dtype = "float32" - min_val = float("-0.0545808") - max_val = float("0.144753") - mean = float("0.0274454") - std = float("0.0383073") - data = None - - -class Program_weight_tensor_parameter_590: - name = "parameter_590" - shape = [96, 96, 3, 3] - dtype = "float32" - min_val = float("-0.065253") - max_val = float("0.0583777") - mean = float("-0.000331097") - std = float("0.00602268") - data = None - - -class Program_weight_tensor_parameter_591: - name = "parameter_591" - shape = [96] - dtype = "float32" - min_val = float("-3.58296") - max_val = float("0.290726") - mean = float("-1.12856") - std = float("0.572409") - data = None - - -class Program_weight_tensor_parameter_592: - name = "parameter_592" - shape = [96] - dtype = "float32" - min_val = float("0.511106") - max_val = float("2.19165") - mean = float("1.05198") - std = float("0.238255") - data = None - - -class Program_weight_tensor_parameter_593: - name = "parameter_593" - shape = [96] - dtype = "float32" - min_val = float("0.0202502") - max_val = float("0.0763383") - mean = float("0.0399884") - std = float("0.00963234") - data = None - - -class Program_weight_tensor_parameter_594: - name = "parameter_594" - shape = [96] - dtype = "float32" - min_val = float("-0.823777") - max_val = float("0.397341") - mean = float("-0.0477408") - std = float("0.195386") - data = None - - -class Program_weight_tensor_parameter_595: - name = "parameter_595" - shape = [96, 96, 3, 3] - dtype = "float32" - min_val = float("-0.0973524") - max_val = float("0.130681") - mean = float("-0.000422376") - std = float("0.00719502") - data = None - - -class Program_weight_tensor_parameter_596: - name = "parameter_596" - shape = [96] - dtype = "float32" - min_val = float("-0.892064") - max_val = float("0.529384") - mean = float("-0.160709") - std = float("0.281574") - data = None - - -class Program_weight_tensor_parameter_597: - name = "parameter_597" - shape = [96] - dtype = "float32" - min_val = float("0.0191223") - max_val = float("1.40524") - mean = float("0.32501") - std = float("0.213327") - data = None - - -class Program_weight_tensor_parameter_598: - name = "parameter_598" - shape = [96] - dtype = "float32" - min_val = float("4.82579e-05") - max_val = float("0.00368813") - mean = float("0.000731321") - std = float("0.0005637") - data = None - - -class Program_weight_tensor_parameter_599: - name = "parameter_599" - shape = [96] - dtype = "float32" - min_val = float("-0.0327526") - max_val = float("0.0463647") - mean = float("0.00722598") - std = float("0.0145649") - data = None - - -class Program_weight_tensor_parameter_600: - name = "parameter_600" - shape = [96, 96, 1, 1] - dtype = "float32" - min_val = float("-0.0499906") - max_val = float("0.0448114") - mean = float("-0.000606145") - std = float("0.00724394") - data = None - - -class Program_weight_tensor_parameter_601: - name = "parameter_601" - shape = [96] - dtype = "float32" - min_val = float("-0.891955") - max_val = float("0.530721") - mean = float("-0.160571") - std = float("0.281998") - data = None - - -class Program_weight_tensor_parameter_602: - name = "parameter_602" - shape = [96] - dtype = "float32" - min_val = float("0.17446") - max_val = float("1.78047") - mean = float("0.708571") - std = float("0.284378") - data = None - - -class Program_weight_tensor_parameter_603: - name = "parameter_603" - shape = [96] - dtype = "float32" - min_val = float("0.00236192") - max_val = float("0.0258909") - mean = float("0.0102105") - std = float("0.00395084") - data = None - - -class Program_weight_tensor_parameter_604: - name = "parameter_604" - shape = [96] - dtype = "float32" - min_val = float("-0.0582992") - max_val = float("0.137218") - mean = float("0.0409603") - std = float("0.0377027") - data = None - - -class Program_weight_tensor_parameter_605: - name = "parameter_605" - shape = [96, 96, 3, 3] - dtype = "float32" - min_val = float("-0.057305") - max_val = float("0.0650381") - mean = float("-0.000417143") - std = float("0.00601776") - data = None - - -class Program_weight_tensor_parameter_606: - name = "parameter_606" - shape = [96] - dtype = "float32" - min_val = float("-2.65777") - max_val = float("0.065358") - mean = float("-1.06432") - std = float("0.48826") - data = None - - -class Program_weight_tensor_parameter_607: - name = "parameter_607" - shape = [96] - dtype = "float32" - min_val = float("0.512951") - max_val = float("1.73806") - mean = float("1.01547") - std = float("0.193357") - data = None - - -class Program_weight_tensor_parameter_608: - name = "parameter_608" - shape = [96] - dtype = "float32" - min_val = float("0.0176905") - max_val = float("0.0567785") - mean = float("0.0307593") - std = float("0.00710222") - data = None - - -class Program_weight_tensor_parameter_609: - name = "parameter_609" - shape = [96] - dtype = "float32" - min_val = float("-0.762613") - max_val = float("0.609475") - mean = float("-0.0648606") - std = float("0.194567") - data = None - - -class Program_weight_tensor_parameter_610: - name = "parameter_610" - shape = [96, 96, 3, 3] - dtype = "float32" - min_val = float("-0.0738037") - max_val = float("0.125248") - mean = float("-0.000426247") - std = float("0.0069708") - data = None - - -class Program_weight_tensor_parameter_611: - name = "parameter_611" - shape = [96] - dtype = "float32" - min_val = float("-0.978262") - max_val = float("0.489992") - mean = float("-0.136691") - std = float("0.278636") - data = None - - -class Program_weight_tensor_parameter_612: - name = "parameter_612" - shape = [96] - dtype = "float32" - min_val = float("0.0498074") - max_val = float("1.1462") - mean = float("0.296075") - std = float("0.172323") - data = None - - -class Program_weight_tensor_parameter_613: - name = "parameter_613" - shape = [96] - dtype = "float32" - min_val = float("0.000185263") - max_val = float("0.00518845") - mean = float("0.00108541") - std = float("0.000730374") - data = None - - -class Program_weight_tensor_parameter_614: - name = "parameter_614" - shape = [96] - dtype = "float32" - min_val = float("-0.041706") - max_val = float("0.0562903") - mean = float("0.00553756") - std = float("0.017998") - data = None - - -class Program_weight_tensor_parameter_615: - name = "parameter_615" - shape = [96, 96, 1, 1] - dtype = "float32" - min_val = float("-0.0731207") - max_val = float("0.0763792") - mean = float("-0.000594618") - std = float("0.00825765") - data = None - - -class Program_weight_tensor_parameter_616: - name = "parameter_616" - shape = [96] - dtype = "float32" - min_val = float("-0.978083") - max_val = float("0.492448") - mean = float("-0.136655") - std = float("0.279122") - data = None - - -class Program_weight_tensor_parameter_617: - name = "parameter_617" - shape = [96] - dtype = "float32" - min_val = float("0.236133") - max_val = float("1.69671") - mean = float("0.603953") - std = float("0.228164") - data = None - - -class Program_weight_tensor_parameter_618: - name = "parameter_618" - shape = [96] - dtype = "float32" - min_val = float("0.00623834") - max_val = float("0.0304043") - mean = float("0.0139144") - std = float("0.00496797") - data = None - - -class Program_weight_tensor_parameter_619: - name = "parameter_619" - shape = [96] - dtype = "float32" - min_val = float("-0.0709982") - max_val = float("0.13525") - mean = float("0.0270257") - std = float("0.0460507") - data = None - - -class Program_weight_tensor_parameter_620: - name = "parameter_620" - shape = [96, 96, 3, 3] - dtype = "float32" - min_val = float("-0.0654835") - max_val = float("0.0522648") - mean = float("-0.00036204") - std = float("0.0060426") - data = None - - -class Program_weight_tensor_parameter_621: - name = "parameter_621" - shape = [96] - dtype = "float32" - min_val = float("-3.46434") - max_val = float("0.199609") - mean = float("-1.00527") - std = float("0.548081") - data = None - - -class Program_weight_tensor_parameter_622: - name = "parameter_622" - shape = [96] - dtype = "float32" - min_val = float("0.686506") - max_val = float("2.51291") - mean = float("1.07427") - std = float("0.212412") - data = None - - -class Program_weight_tensor_parameter_623: - name = "parameter_623" - shape = [96] - dtype = "float32" - min_val = float("0.0132607") - max_val = float("0.0547669") - mean = float("0.0263345") - std = float("0.00858541") - data = None - - -class Program_weight_tensor_parameter_624: - name = "parameter_624" - shape = [96] - dtype = "float32" - min_val = float("-0.483153") - max_val = float("0.528087") - mean = float("-0.0517666") - std = float("0.193156") - data = None - - -class Program_weight_tensor_parameter_625: - name = "parameter_625" - shape = [96, 96, 3, 3] - dtype = "float32" - min_val = float("-0.0824841") - max_val = float("0.0934753") - mean = float("-0.000357672") - std = float("0.00712731") - data = None - - -class Program_weight_tensor_parameter_626: - name = "parameter_626" - shape = [96] - dtype = "float32" - min_val = float("-0.625302") - max_val = float("0.449836") - mean = float("-0.0825559") - std = float("0.256738") - data = None - - -class Program_weight_tensor_parameter_627: - name = "parameter_627" - shape = [96] - dtype = "float32" - min_val = float("0.0910018") - max_val = float("1.30085") - mean = float("0.309049") - std = float("0.196412") - data = None - - -class Program_weight_tensor_parameter_628: - name = "parameter_628" - shape = [96] - dtype = "float32" - min_val = float("0.000380277") - max_val = float("0.0176497") - mean = float("0.00357657") - std = float("0.00282864") - data = None - - -class Program_weight_tensor_parameter_629: - name = "parameter_629" - shape = [96] - dtype = "float32" - min_val = float("-0.035932") - max_val = float("0.0300925") - mean = float("-5.114e-05") - std = float("0.0106361") - data = None - - -class Program_weight_tensor_parameter_630: - name = "parameter_630" - shape = [96, 96, 1, 1] - dtype = "float32" - min_val = float("-0.0925016") - max_val = float("0.0753255") - mean = float("-0.00105853") - std = float("0.00936655") - data = None - - -class Program_weight_tensor_parameter_631: - name = "parameter_631" - shape = [96] - dtype = "float32" - min_val = float("-0.625183") - max_val = float("0.450937") - mean = float("-0.082575") - std = float("0.257081") - data = None - - -class Program_weight_tensor_parameter_632: - name = "parameter_632" - shape = [96] - dtype = "float32" - min_val = float("0.210658") - max_val = float("1.42703") - mean = float("0.527208") - std = float("0.258269") - data = None - - -class Program_weight_tensor_parameter_633: - name = "parameter_633" - shape = [96] - dtype = "float32" - min_val = float("0.0103464") - max_val = float("0.0958287") - mean = float("0.0339795") - std = float("0.0172322") - data = None - - -class Program_weight_tensor_parameter_634: - name = "parameter_634" - shape = [96] - dtype = "float32" - min_val = float("-0.108648") - max_val = float("0.0906186") - mean = float("-0.00832054") - std = float("0.0379588") - data = None - - -class Program_weight_tensor_parameter_635: - name = "parameter_635" - shape = [96, 96, 3, 3] - dtype = "float32" - min_val = float("-0.0885375") - max_val = float("0.0525934") - mean = float("-0.000466484") - std = float("0.00584459") - data = None - - -class Program_weight_tensor_parameter_636: - name = "parameter_636" - shape = [96] - dtype = "float32" - min_val = float("-2.40893") - max_val = float("0.508421") - mean = float("-0.828862") - std = float("0.467337") - data = None - - -class Program_weight_tensor_parameter_637: - name = "parameter_637" - shape = [96] - dtype = "float32" - min_val = float("0.853968") - max_val = float("2.18309") - mean = float("1.27545") - std = float("0.208741") - data = None - - -class Program_weight_tensor_parameter_638: - name = "parameter_638" - shape = [96] - dtype = "float32" - min_val = float("0.0115478") - max_val = float("0.0463068") - mean = float("0.0216413") - std = float("0.00767502") - data = None - - -class Program_weight_tensor_parameter_639: - name = "parameter_639" - shape = [96] - dtype = "float32" - min_val = float("-0.571223") - max_val = float("0.473029") - mean = float("-0.053671") - std = float("0.173924") - data = None - - -class Program_weight_tensor_parameter_640: - name = "parameter_640" - shape = [96, 96, 3, 3] - dtype = "float32" - min_val = float("-0.15411") - max_val = float("0.150524") - mean = float("-0.000241604") - std = float("0.00722176") - data = None - - -class Program_weight_tensor_parameter_641: - name = "parameter_641" - shape = [96] - dtype = "float32" - min_val = float("-3.16609") - max_val = float("1.88989") - mean = float("0.501666") - std = float("0.861493") - data = None - - -class Program_weight_tensor_parameter_642: - name = "parameter_642" - shape = [96] - dtype = "float32" - min_val = float("0.214988") - max_val = float("2.6299") - mean = float("0.562885") - std = float("0.31708") - data = None - - -class Program_weight_tensor_parameter_643: - name = "parameter_643" - shape = [96] - dtype = "float32" - min_val = float("0.00741665") - max_val = float("0.158098") - mean = float("0.0323603") - std = float("0.0239189") - data = None - - -class Program_weight_tensor_parameter_644: - name = "parameter_644" - shape = [96] - dtype = "float32" - min_val = float("-0.27197") - max_val = float("0.329568") - mean = float("-0.0148612") - std = float("0.0939931") - data = None - - -class Program_weight_tensor_parameter_645: - name = "parameter_645" - shape = [96, 192, 1, 1] - dtype = "float32" - min_val = float("-0.186901") - max_val = float("0.225419") - mean = float("-0.000291508") - std = float("0.0156297") - data = None - - -class Program_weight_tensor_parameter_646: - name = "parameter_646" - shape = [96] - dtype = "float32" - min_val = float("-4.92284") - max_val = float("1.57998") - mean = float("0.384603") - std = float("1.04888") - data = None - - -class Program_weight_tensor_parameter_647: - name = "parameter_647" - shape = [96] - dtype = "float32" - min_val = float("0.414126") - max_val = float("6.78093") - mean = float("1.69449") - std = float("1.30795") - data = None - - -class Program_weight_tensor_parameter_648: - name = "parameter_648" - shape = [96] - dtype = "float32" - min_val = float("0.00527536") - max_val = float("0.274604") - mean = float("0.0382764") - std = float("0.0355398") - data = None - - -class Program_weight_tensor_parameter_649: - name = "parameter_649" - shape = [96] - dtype = "float32" - min_val = float("-0.171845") - max_val = float("0.443762") - mean = float("0.0466766") - std = float("0.0965758") - data = None - - -class Program_weight_tensor_parameter_650: - name = "parameter_650" - shape = [96, 192, 1, 1] - dtype = "float32" - min_val = float("-0.116975") - max_val = float("0.156029") - mean = float("0.000440768") - std = float("0.0149691") - data = None - - -class Program_weight_tensor_parameter_651: - name = "parameter_651" - shape = [192] - dtype = "float32" - min_val = float("-2.27475") - max_val = float("1.75104") - mean = float("-0.126037") - std = float("0.740702") - data = None - - -class Program_weight_tensor_parameter_652: - name = "parameter_652" - shape = [192] - dtype = "float32" - min_val = float("0.632268") - max_val = float("2.97322") - mean = float("1.08733") - std = float("0.283408") - data = None - - -class Program_weight_tensor_parameter_653: - name = "parameter_653" - shape = [192] - dtype = "float32" - min_val = float("0.0110312") - max_val = float("0.234931") - mean = float("0.0439587") - std = float("0.0319644") - data = None - - -class Program_weight_tensor_parameter_654: - name = "parameter_654" - shape = [192] - dtype = "float32" - min_val = float("-0.578422") - max_val = float("0.269069") - mean = float("-0.0941015") - std = float("0.118583") - data = None - - -class Program_weight_tensor_parameter_655: - name = "parameter_655" - shape = [192, 128, 3, 3] - dtype = "float32" - min_val = float("-0.0856428") - max_val = float("0.123627") - mean = float("-0.000225745") - std = float("0.00765725") - data = None - - -class Program_weight_tensor_parameter_656: - name = "parameter_656" - shape = [128] - dtype = "float32" - min_val = float("-2.81597") - max_val = float("1.9636") - mean = float("-0.71259") - std = float("0.647835") - data = None - - -class Program_weight_tensor_parameter_657: - name = "parameter_657" - shape = [128] - dtype = "float32" - min_val = float("0.311227") - max_val = float("2.8783") - mean = float("1.01845") - std = float("0.278722") - data = None - - -class Program_weight_tensor_parameter_658: - name = "parameter_658" - shape = [128] - dtype = "float32" - min_val = float("0.000859604") - max_val = float("0.015639") - mean = float("0.00453842") - std = float("0.00230768") - data = None - - -class Program_weight_tensor_parameter_659: - name = "parameter_659" - shape = [128] - dtype = "float32" - min_val = float("-0.237838") - max_val = float("0.261934") - mean = float("0.00314431") - std = float("0.0867318") - data = None - - -class Program_weight_tensor_parameter_660: - name = "parameter_660" - shape = [128, 96, 1, 1] - dtype = "float32" - min_val = float("-0.171773") - max_val = float("0.211127") - mean = float("-0.00142636") - std = float("0.0224525") - data = None - - -class Program_weight_tensor_parameter_661: - name = "parameter_661" - shape = [96] - dtype = "float32" - min_val = float("-0.0180386") - max_val = float("3.78007e-05") - mean = float("-0.00735479") - std = float("0.00450801") - data = None - - -class Program_weight_tensor_parameter_662: - name = "parameter_662" - shape = [96, 96, 1, 1] - dtype = "float32" - min_val = float("-0.30281") - max_val = float("0.123007") - mean = float("-0.00790532") - std = float("0.0180213") - data = None - - -class Program_weight_tensor_parameter_663: - name = "parameter_663" - shape = [48] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_664: - name = "parameter_664" - shape = [48] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_665: - name = "parameter_665" - shape = [48] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_666: - name = "parameter_666" - shape = [48] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_667: - name = "parameter_667" - shape = [48, 48, 1, 1] - dtype = "float32" - min_val = float("-0.0501789") - max_val = float("0.0563261") - mean = float("-0.00170388") - std = float("0.0129798") - data = None - - -class Program_weight_tensor_parameter_668: - name = "parameter_668" - shape = [48] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_669: - name = "parameter_669" - shape = [48] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_670: - name = "parameter_670" - shape = [48] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_671: - name = "parameter_671" - shape = [48] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_672: - name = "parameter_672" - shape = [48, 48, 3, 3] - dtype = "float32" - min_val = float("-0.0578676") - max_val = float("0.0799749") - mean = float("-0.000509865") - std = float("0.0110281") - data = None - - -class Program_weight_tensor_parameter_673: - name = "parameter_673" - shape = [48] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_674: - name = "parameter_674" - shape = [48] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_675: - name = "parameter_675" - shape = [48] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_676: - name = "parameter_676" - shape = [48] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_677: - name = "parameter_677" - shape = [48, 48, 3, 3] - dtype = "float32" - min_val = float("-0.0925274") - max_val = float("0.0949158") - mean = float("-0.00064859") - std = float("0.0123667") - data = None - - -class Program_weight_tensor_parameter_678: - name = "parameter_678" - shape = [48] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_679: - name = "parameter_679" - shape = [48] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_680: - name = "parameter_680" - shape = [48] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_681: - name = "parameter_681" - shape = [48] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_682: - name = "parameter_682" - shape = [48, 48, 1, 1] - dtype = "float32" - min_val = float("-0.0727088") - max_val = float("0.0782992") - mean = float("-0.00102365") - std = float("0.0139349") - data = None - - -class Program_weight_tensor_parameter_683: - name = "parameter_683" - shape = [48] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_684: - name = "parameter_684" - shape = [48] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_685: - name = "parameter_685" - shape = [48] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_686: - name = "parameter_686" - shape = [48] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_687: - name = "parameter_687" - shape = [48, 48, 3, 3] - dtype = "float32" - min_val = float("-0.0621898") - max_val = float("0.0692526") - mean = float("-0.000822014") - std = float("0.0111057") - data = None - - -class Program_weight_tensor_parameter_688: - name = "parameter_688" - shape = [48] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_689: - name = "parameter_689" - shape = [48] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_690: - name = "parameter_690" - shape = [48] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_691: - name = "parameter_691" - shape = [48] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_692: - name = "parameter_692" - shape = [48, 48, 3, 3] - dtype = "float32" - min_val = float("-0.11162") - max_val = float("0.0943574") - mean = float("-0.000368661") - std = float("0.0125785") - data = None - - -class Program_weight_tensor_parameter_693: - name = "parameter_693" - shape = [48] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_694: - name = "parameter_694" - shape = [48] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_695: - name = "parameter_695" - shape = [48] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_696: - name = "parameter_696" - shape = [48] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_697: - name = "parameter_697" - shape = [48, 48, 1, 1] - dtype = "float32" - min_val = float("-0.0944494") - max_val = float("0.0702451") - mean = float("-0.00185301") - std = float("0.0172184") - data = None - - -class Program_weight_tensor_parameter_698: - name = "parameter_698" - shape = [48] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_699: - name = "parameter_699" - shape = [48] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_700: - name = "parameter_700" - shape = [48] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_701: - name = "parameter_701" - shape = [48] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_702: - name = "parameter_702" - shape = [48, 48, 3, 3] - dtype = "float32" - min_val = float("-0.0691644") - max_val = float("0.0974384") - mean = float("-0.000506655") - std = float("0.011691") - data = None - - -class Program_weight_tensor_parameter_703: - name = "parameter_703" - shape = [48] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_704: - name = "parameter_704" - shape = [48] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_705: - name = "parameter_705" - shape = [48] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_706: - name = "parameter_706" - shape = [48] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_707: - name = "parameter_707" - shape = [48, 48, 3, 3] - dtype = "float32" - min_val = float("-0.133213") - max_val = float("0.0905212") - mean = float("-0.000334254") - std = float("0.0134452") - data = None - - -class Program_weight_tensor_parameter_708: - name = "parameter_708" - shape = [48] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_709: - name = "parameter_709" - shape = [48] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_710: - name = "parameter_710" - shape = [48] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_711: - name = "parameter_711" - shape = [48] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_712: - name = "parameter_712" - shape = [48, 96, 1, 1] - dtype = "float32" - min_val = float("-0.17806") - max_val = float("0.14305") - mean = float("-0.00229242") - std = float("0.0246641") - data = None - - -class Program_weight_tensor_parameter_713: - name = "parameter_713" - shape = [48] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_714: - name = "parameter_714" - shape = [48] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_715: - name = "parameter_715" - shape = [48] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_716: - name = "parameter_716" - shape = [48] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_717: - name = "parameter_717" - shape = [48, 96, 1, 1] - dtype = "float32" - min_val = float("-0.135012") - max_val = float("0.178483") - mean = float("-0.0004429") - std = float("0.0226955") - data = None - - -class Program_weight_tensor_parameter_718: - name = "parameter_718" - shape = [96] - dtype = "float32" - min_val = float("-3.40701") - max_val = float("3.27538") - mean = float("0.329531") - std = float("1.14502") - data = None - - -class Program_weight_tensor_parameter_719: - name = "parameter_719" - shape = [96] - dtype = "float32" - min_val = float("0.865919") - max_val = float("4.91404") - mean = float("1.91603") - std = float("0.752783") - data = None - - -class Program_weight_tensor_parameter_720: - name = "parameter_720" - shape = [96] - dtype = "float32" - min_val = float("0.704881") - max_val = float("31.7293") - mean = float("2.73326") - std = float("3.48853") - data = None - - -class Program_weight_tensor_parameter_721: - name = "parameter_721" - shape = [96] - dtype = "float32" - min_val = float("-1.47461") - max_val = float("2.59735") - mean = float("-0.288555") - std = float("0.730674") - data = None - - -class Program_weight_tensor_parameter_722: - name = "parameter_722" - shape = [96, 64, 3, 3] - dtype = "float32" - min_val = float("-0.110689") - max_val = float("0.13859") - mean = float("-0.000360127") - std = float("0.0133189") - data = None - - -class Program_weight_tensor_parameter_723: - name = "parameter_723" - shape = [64] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_724: - name = "parameter_724" - shape = [64] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_725: - name = "parameter_725" - shape = [64] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_726: - name = "parameter_726" - shape = [64] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_727: - name = "parameter_727" - shape = [64, 32, 3, 3] - dtype = "float32" - min_val = float("-0.179264") - max_val = float("0.162144") - mean = float("-0.000679023") - std = float("0.020536") - data = None - - -class Program_weight_tensor_parameter_728: - name = "parameter_728" - shape = [32] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_729: - name = "parameter_729" - shape = [32] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_730: - name = "parameter_730" - shape = [32] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_731: - name = "parameter_731" - shape = [32] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_732: - name = "parameter_732" - shape = [32, 32, 3, 3] - dtype = "float32" - min_val = float("-0.347786") - max_val = float("0.218964") - mean = float("-0.000199571") - std = float("0.0261033") - data = None - - -class Program_weight_tensor_parameter_733: - name = "parameter_733" - shape = [32] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_734: - name = "parameter_734" - shape = [32] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_735: - name = "parameter_735" - shape = [32] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_736: - name = "parameter_736" - shape = [32] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_737: - name = "parameter_737" - shape = [32, 3, 3, 3] - dtype = "float32" - min_val = float("-0.317155") - max_val = float("0.280865") - mean = float("-0.00214957") - std = float("0.0702742") - data = None diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/subgraph_13/graph_hash.txt b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/subgraph_13/graph_hash.txt deleted file mode 100644 index f1aa15364..000000000 --- a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/subgraph_13/graph_hash.txt +++ /dev/null @@ -1 +0,0 @@ -09f7308dca33192a680fa6e253963eb73d874927eb1eaddbb7fe31eeed376574 \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/subgraph_13/graph_net.json b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/subgraph_13/graph_net.json deleted file mode 100644 index 8b4fccfd1..000000000 --- a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/subgraph_13/graph_net.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "framework": "paddle", - "model_name": "PP-YOLOE_plus_SOD-L", - "num_devices_required": 1, - "num_nodes_required": 1 -} \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/subgraph_13/model.py b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/subgraph_13/model.py deleted file mode 100644 index c085c8396..000000000 --- a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/subgraph_13/model.py +++ /dev/null @@ -1,110 +0,0 @@ -import paddle - - -class GraphModule(paddle.nn.Layer): - def __init__(self): - super().__init__() - - def forward(self, data_0, data_1, data_2): - # pd_op.full: (1xi32) <- () - full_0 = paddle._C_ops.full( - [1], float("11"), paddle.int32, paddle.core.CPUPlace() - ) - - # pd_op.one_hot: (2x3549x11xf32) <- (2x3549xi32, 1xi32) - one_hot_0 = paddle._C_ops.one_hot( - data_1 % paddle.cast(full_0, data_1.dtype), full_0 - ) - del data_1, full_0 - - # pd_op.full_int_array: (1xi64) <- () - full_int_array_0 = [0] - - # pd_op.full_int_array: (1xi64) <- () - full_int_array_1 = [-1] - - # pd_op.slice: (2x3549x10xf32) <- (2x3549x11xf32, 1xi64, 1xi64) - slice_0 = paddle._C_ops.slice( - one_hot_0, [2], full_int_array_0, full_int_array_1, [1], [] - ) - del full_int_array_0, full_int_array_1, one_hot_0 - - # pd_op.pow: (2x3549x10xf32) <- (2x3549x10xf32) - pow_0 = paddle._C_ops.pow(data_0, float("2")) - - # pd_op.full: (1xf32) <- () - full_1 = paddle._C_ops.full( - [1], float("0.75"), paddle.float32, paddle.core.CPUPlace() - ) - - # pd_op.scale: (2x3549x10xf32) <- (2x3549x10xf32, 1xf32) - scale_0 = paddle._C_ops.scale(pow_0, full_1, float("0"), True) - del pow_0 - - # pd_op.full: (1xf32) <- () - full_2 = paddle._C_ops.full( - [1], float("-1"), paddle.float32, paddle.core.CPUPlace() - ) - - # pd_op.scale: (2x3549x10xf32) <- (2x3549x10xf32, 1xf32) - scale_1 = paddle._C_ops.scale(slice_0, full_2, float("1"), True) - del full_2 - - # pd_op.multiply: (2x3549x10xf32) <- (2x3549x10xf32, 2x3549x10xf32) - multiply_0 = paddle._C_ops.multiply(scale_0, scale_1) - - # pd_op.multiply: (2x3549x10xf32) <- (2x3549x10xf32, 2x3549x10xf32) - multiply_1 = paddle._C_ops.multiply(data_2, slice_0) - del slice_0 - - # pd_op.add: (2x3549x10xf32) <- (2x3549x10xf32, 2x3549x10xf32) - add_0 = paddle._C_ops.add(multiply_0, multiply_1) - - # pd_op.bce_loss: (2x3549x10xf32) <- (2x3549x10xf32, 2x3549x10xf32) - bce_loss_0 = paddle._C_ops.bce_loss(data_0, data_2) - del data_0 - - # pd_op.multiply: (2x3549x10xf32) <- (2x3549x10xf32, 2x3549x10xf32) - multiply_2 = paddle._C_ops.multiply(bce_loss_0, add_0) - - # pd_op.full_int_array: (0xi64) <- () - full_int_array_2 = [] - - # pd_op.sum: (xf32) <- (2x3549x10xf32, 0xi64) - sum_0 = paddle._C_ops.sum(multiply_2, full_int_array_2, None, False) - - # pd_op.sum: (xf32) <- (2x3549x10xf32, 0xi64) - sum_1 = paddle._C_ops.sum(data_2, full_int_array_2, None, False) - del data_2 - - # pd_op.full: (1xf32) <- () - full_3 = paddle._C_ops.full( - [1], float("1"), paddle.float32, paddle.core.CPUPlace() - ) - - # pd_op.full: (1xf32) <- () - full_4 = paddle._C_ops.full( - [1], float("3.40282e+38"), paddle.float32, paddle.core.CPUPlace() - ) - - # pd_op.clip: (xf32) <- (xf32, 1xf32, 1xf32) - clip_0 = paddle._C_ops.clip(sum_1, full_3, full_4) - del full_3, full_4, sum_1 - - # pd_op.divide: (xf32) <- (xf32, xf32) - divide_0 = paddle._C_ops.divide(sum_0, clip_0) - del ( - add_0, - bce_loss_0, - clip_0, - full_1, - full_int_array_2, - multiply_0, - multiply_1, - multiply_2, - scale_0, - scale_1, - sum_0, - ) - - return divide_0 diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/subgraph_13/weight_meta.py b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/subgraph_13/weight_meta.py deleted file mode 100644 index 8b1378917..000000000 --- a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/subgraph_13/weight_meta.py +++ /dev/null @@ -1 +0,0 @@ - diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/subgraph_14/graph_net.json b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/subgraph_14/graph_net.json deleted file mode 100644 index 8b4fccfd1..000000000 --- a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/subgraph_14/graph_net.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "framework": "paddle", - "model_name": "PP-YOLOE_plus_SOD-L", - "num_devices_required": 1, - "num_nodes_required": 1 -} \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/subgraph_18/graph_hash.txt b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/subgraph_18/graph_hash.txt deleted file mode 100644 index 82d83ca0b..000000000 --- a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/subgraph_18/graph_hash.txt +++ /dev/null @@ -1 +0,0 @@ -2e1652dec5c10dbfbb766b39e6dd1a2a376f1e03061d76cd66a79bf79d0616f3 \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/subgraph_18/graph_net.json b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/subgraph_18/graph_net.json deleted file mode 100644 index 8b4fccfd1..000000000 --- a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/subgraph_18/graph_net.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "framework": "paddle", - "model_name": "PP-YOLOE_plus_SOD-L", - "num_devices_required": 1, - "num_nodes_required": 1 -} \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/subgraph_18/input_meta.py b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/subgraph_18/input_meta.py deleted file mode 100644 index 812c05090..000000000 --- a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/subgraph_18/input_meta.py +++ /dev/null @@ -1,19 +0,0 @@ -class Program_weight_tensor_data_0: - name = "data_0" - shape = [] - dtype = "float32" - data = [0.311244] - - -class Program_weight_tensor_data_1: - name = "data_1" - shape = [] - dtype = "float32" - data = [0.695293] - - -class Program_weight_tensor_data_2: - name = "data_2" - shape = [] - dtype = "float32" - data = [0.894052] diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/subgraph_18/model.py b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/subgraph_18/model.py deleted file mode 100644 index 4cccb2b8e..000000000 --- a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/subgraph_18/model.py +++ /dev/null @@ -1,43 +0,0 @@ -import paddle - - -class GraphModule(paddle.nn.Layer): - def __init__(self): - super().__init__() - - def forward(self, data_0, data_1, data_2): - # pd_op.full: (1xf32) <- () - full_0 = paddle._C_ops.full( - [1], float("1"), paddle.float32, paddle.core.CPUPlace() - ) - - # pd_op.scale: (xf32) <- (xf32, 1xf32) - scale_0 = paddle._C_ops.scale(data_2, full_0, float("0"), True) - del data_2 - - # pd_op.full: (1xf32) <- () - full_1 = paddle._C_ops.full( - [1], float("2.5"), paddle.float32, paddle.core.CPUPlace() - ) - - # pd_op.scale: (xf32) <- (xf32, 1xf32) - scale_1 = paddle._C_ops.scale(data_0, full_1, float("0"), True) - del data_0 - - # pd_op.add: (xf32) <- (xf32, xf32) - add_1 = paddle._C_ops.add(scale_0, scale_1) - - # pd_op.full: (1xf32) <- () - full_2 = paddle._C_ops.full( - [1], float("0.5"), paddle.float32, paddle.core.CPUPlace() - ) - - # pd_op.scale: (xf32) <- (xf32, 1xf32) - scale_2 = paddle._C_ops.scale(data_1, full_2, float("0"), True) - del data_1 - - # pd_op.add: (xf32) <- (xf32, xf32) - add_0 = paddle._C_ops.add(add_1, scale_2) - del add_1, full_0, full_1, full_2, scale_0, scale_1, scale_2 - - return add_0 diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/subgraph_18/weight_meta.py b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/subgraph_18/weight_meta.py deleted file mode 100644 index 8b1378917..000000000 --- a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/subgraph_18/weight_meta.py +++ /dev/null @@ -1 +0,0 @@ - diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/subgraph_2/graph_hash.txt b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/subgraph_2/graph_hash.txt index f33996b4a..6fe7297b8 100644 --- a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/subgraph_2/graph_hash.txt +++ b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/subgraph_2/graph_hash.txt @@ -1 +1 @@ -331d31c12329e180f8072f92c095e5fa3ed0d1dbf984c2e334eff6b5b3862c64 \ No newline at end of file +8b11cddc56e8bf2fc7551237b756b7f8f8a4e9dd2f556be8d328af948ebf41da \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/subgraph_2/input_meta.py b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/subgraph_2/input_meta.py index 8361f35e8..b94668a44 100644 --- a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/subgraph_2/input_meta.py +++ b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/subgraph_2/input_meta.py @@ -1,84 +1,134 @@ class Program_weight_tensor_data_0: name = "data_0" - shape = [2, 3, 5376] + shape = [1] dtype = "float32" - max_val = float("1.0") - mean = float("0.00124008") - std = float("0.0351929") - data = None + data = [0.699884] class Program_weight_tensor_data_1: name = "data_1" - shape = [2, 1] - dtype = "int32" - data = [0, 1] + shape = [1] + dtype = "float32" + data = [0.667963] class Program_weight_tensor_data_2: name = "data_2" - shape = [2, 3, 1] - dtype = "int32" - data = [4, 3, 3, 3, 1, 0] + shape = [1] + dtype = "float32" + data = [0.675792] class Program_weight_tensor_data_3: name = "data_3" - shape = [2, 5376] + shape = [1] dtype = "float32" - max_val = float("1.0") - mean = float("0.00372024") - std = float("0.0608802") - data = None + data = [0.676071] class Program_weight_tensor_data_4: name = "data_4" - shape = [2, 3, 4] - dtype = "float32" - data = [ - 270.791, - 234.887, - 332.231, - 356.289, - 38.6844, - 240.165, - 99.3659, - 448.66, - 476.35, - 311.423, - 512.0, - 504.082, - 2.03175, - 161.292, - 9.34603, - 181.971, - 39.619, - 61.622, - 40.8381, - 69.0662, - 0.0, - 0.0, - 0.0, - 0.0, - ] + shape = [1] + dtype = "float32" + data = [0.658719] class Program_weight_tensor_data_5: name = "data_5" - shape = [2, 3, 5376] + shape = [1] dtype = "float32" - max_val = float("0.00886879") - mean = float("8.72339e-07") - std = float("6.13309e-05") - data = None + data = [0.620637] class Program_weight_tensor_data_6: name = "data_6" - shape = [2, 3, 5376] + shape = [1] + dtype = "float32" + data = [0.637685] + + +class Program_weight_tensor_data_7: + name = "data_7" + shape = [1] + dtype = "float32" + data = [0.619238] + + +class Program_weight_tensor_data_8: + name = "data_8" + shape = [1] + dtype = "float32" + data = [0.773168] + + +class Program_weight_tensor_data_9: + name = "data_9" + shape = [1] + dtype = "float32" + data = [0.635316] + + +class Program_weight_tensor_data_10: + name = "data_10" + shape = [1] + dtype = "float32" + data = [0.623672] + + +class Program_weight_tensor_data_11: + name = "data_11" + shape = [1] + dtype = "float32" + data = [0.620323] + + +class Program_weight_tensor_data_12: + name = "data_12" + shape = [1] + dtype = "float32" + data = [0.621219] + + +class Program_weight_tensor_data_13: + name = "data_13" + shape = [1] + dtype = "float32" + data = [0.624329] + + +class Program_weight_tensor_data_14: + name = "data_14" + shape = [1] + dtype = "float32" + data = [0.733117] + + +class Program_weight_tensor_data_15: + name = "data_15" + shape = [1] + dtype = "float32" + data = [0.557224] + + +class Program_weight_tensor_data_16: + name = "data_16" + shape = [1] + dtype = "float32" + data = [0.579909] + + +class Program_weight_tensor_data_17: + name = "data_17" + shape = [1] + dtype = "float32" + data = [0.70327] + + +class Program_weight_tensor_data_18: + name = "data_18" + shape = [2, 3, 768, 768] dtype = "float32" - max_val = float("0.712006") - mean = float("0.00169782") - std = float("0.0189379") + max_val = float("0.933333") + mean = float("0.380665") + std = float("0.139647") data = None diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/subgraph_2/model.py b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/subgraph_2/model.py index c7c749882..331f6a597 100644 --- a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/subgraph_2/model.py +++ b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/subgraph_2/model.py @@ -5,188 +5,4036 @@ class GraphModule(paddle.nn.Layer): def __init__(self): super().__init__() - def forward(self, data_0, data_1, data_2, data_3, data_4, data_5, data_6): - # pd_op.full: (1xi64) <- () - full_0 = paddle._C_ops.full( - [1], float("-2"), paddle.int64, paddle.core.CPUPlace() + def forward( + self, + parameter_0, + parameter_1, + parameter_2, + parameter_3, + parameter_4, + parameter_5, + parameter_6, + parameter_7, + parameter_8, + parameter_9, + parameter_10, + parameter_11, + parameter_12, + parameter_13, + parameter_14, + parameter_15, + parameter_16, + parameter_17, + parameter_18, + parameter_19, + parameter_20, + parameter_21, + parameter_22, + parameter_23, + parameter_24, + parameter_25, + parameter_26, + parameter_27, + parameter_28, + parameter_29, + parameter_30, + parameter_31, + parameter_32, + parameter_33, + parameter_34, + parameter_35, + parameter_36, + parameter_37, + parameter_38, + parameter_39, + parameter_40, + parameter_41, + parameter_42, + parameter_43, + parameter_44, + parameter_45, + parameter_46, + parameter_47, + parameter_48, + parameter_49, + parameter_50, + parameter_51, + parameter_52, + parameter_53, + parameter_54, + parameter_55, + parameter_56, + parameter_57, + parameter_58, + parameter_59, + parameter_60, + parameter_61, + parameter_62, + parameter_63, + parameter_64, + parameter_65, + parameter_66, + parameter_67, + parameter_68, + parameter_69, + parameter_70, + parameter_71, + parameter_72, + parameter_73, + parameter_74, + parameter_75, + parameter_76, + parameter_77, + parameter_78, + parameter_79, + parameter_80, + parameter_81, + parameter_82, + parameter_83, + parameter_84, + parameter_85, + parameter_86, + parameter_87, + parameter_88, + parameter_89, + parameter_90, + parameter_91, + parameter_92, + parameter_93, + parameter_94, + parameter_95, + parameter_96, + parameter_97, + parameter_98, + parameter_99, + parameter_100, + parameter_101, + parameter_102, + parameter_103, + parameter_104, + parameter_105, + parameter_106, + parameter_107, + parameter_108, + parameter_109, + parameter_110, + parameter_111, + parameter_112, + parameter_113, + parameter_114, + parameter_115, + parameter_116, + parameter_117, + parameter_118, + parameter_119, + parameter_120, + parameter_121, + parameter_122, + parameter_123, + parameter_124, + parameter_125, + parameter_126, + parameter_127, + parameter_128, + parameter_129, + parameter_130, + parameter_131, + parameter_132, + parameter_133, + parameter_134, + parameter_135, + parameter_136, + parameter_137, + parameter_138, + parameter_139, + parameter_140, + parameter_141, + parameter_142, + parameter_143, + parameter_144, + parameter_145, + parameter_146, + parameter_147, + parameter_148, + parameter_149, + parameter_150, + parameter_151, + parameter_152, + parameter_153, + parameter_154, + parameter_155, + parameter_156, + parameter_157, + parameter_158, + parameter_159, + parameter_160, + parameter_161, + parameter_162, + parameter_163, + parameter_164, + parameter_165, + parameter_166, + parameter_167, + parameter_168, + parameter_169, + parameter_170, + parameter_171, + parameter_172, + parameter_173, + parameter_174, + parameter_175, + parameter_176, + parameter_177, + parameter_178, + parameter_179, + parameter_180, + parameter_181, + parameter_182, + parameter_183, + parameter_184, + parameter_185, + parameter_186, + parameter_187, + parameter_188, + parameter_189, + parameter_190, + parameter_191, + parameter_192, + parameter_193, + parameter_194, + parameter_195, + parameter_196, + parameter_197, + parameter_198, + parameter_199, + parameter_200, + parameter_201, + parameter_202, + parameter_203, + parameter_204, + parameter_205, + parameter_206, + parameter_207, + parameter_208, + parameter_209, + parameter_210, + parameter_211, + parameter_212, + parameter_213, + parameter_214, + parameter_215, + parameter_216, + parameter_217, + parameter_218, + parameter_219, + parameter_220, + parameter_221, + parameter_222, + parameter_223, + parameter_224, + parameter_225, + parameter_226, + parameter_227, + parameter_228, + parameter_229, + parameter_230, + parameter_231, + parameter_232, + parameter_233, + parameter_234, + parameter_235, + parameter_236, + parameter_237, + parameter_238, + parameter_239, + parameter_240, + parameter_241, + parameter_242, + parameter_243, + parameter_244, + parameter_245, + parameter_246, + parameter_247, + parameter_248, + parameter_249, + parameter_250, + parameter_251, + parameter_252, + parameter_253, + parameter_254, + parameter_255, + parameter_256, + parameter_257, + parameter_258, + parameter_259, + parameter_260, + parameter_261, + parameter_262, + parameter_263, + parameter_264, + parameter_265, + parameter_266, + parameter_267, + parameter_268, + parameter_269, + parameter_270, + parameter_271, + parameter_272, + parameter_273, + parameter_274, + parameter_275, + parameter_276, + parameter_277, + parameter_278, + parameter_279, + parameter_280, + parameter_281, + parameter_282, + parameter_283, + parameter_284, + parameter_285, + parameter_286, + parameter_287, + parameter_288, + parameter_289, + parameter_290, + parameter_291, + parameter_292, + parameter_293, + parameter_294, + parameter_295, + parameter_296, + parameter_297, + parameter_298, + parameter_299, + parameter_300, + parameter_301, + parameter_302, + parameter_303, + parameter_304, + parameter_305, + parameter_306, + parameter_307, + parameter_308, + parameter_309, + parameter_310, + parameter_311, + parameter_312, + parameter_313, + parameter_314, + parameter_315, + parameter_316, + parameter_317, + parameter_318, + parameter_319, + parameter_320, + parameter_321, + parameter_322, + parameter_323, + parameter_324, + parameter_325, + parameter_326, + parameter_327, + parameter_328, + parameter_329, + parameter_330, + parameter_331, + parameter_332, + parameter_333, + parameter_334, + parameter_335, + parameter_336, + parameter_337, + parameter_338, + parameter_339, + parameter_340, + parameter_341, + parameter_342, + parameter_343, + parameter_344, + parameter_345, + parameter_346, + parameter_347, + parameter_348, + parameter_349, + parameter_350, + parameter_351, + parameter_352, + parameter_353, + parameter_354, + parameter_355, + parameter_356, + parameter_357, + parameter_358, + parameter_359, + parameter_360, + parameter_361, + parameter_362, + parameter_363, + parameter_364, + parameter_365, + parameter_366, + parameter_367, + parameter_368, + parameter_369, + parameter_370, + parameter_371, + parameter_372, + data_0, + data_1, + data_2, + data_3, + data_4, + data_5, + data_6, + data_7, + data_8, + data_9, + data_10, + data_11, + data_12, + data_13, + data_14, + data_15, + data_16, + data_17, + data_18, + ): + # pd_op.conv2d: (2x32x-1x-1xf32) <- (2x3x-1x-1xf32, 32x3x3x3xf32) + conv2d_0 = paddle._C_ops.conv2d( + data_18, parameter_372, [2, 2], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del data_18, parameter_372 + + # pd_op.batch_norm_: (2x32x-1x-1xf32, 32xf32, 32xf32, 32xf32, 32xf32, -1xui8) <- (2x32x-1x-1xf32, 32xf32, 32xf32, 32xf32, 32xf32) + ( + batch_norm__0, + batch_norm__1, + batch_norm__2, + batch_norm__3, + batch_norm__4, + batch_norm__5, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_0, + parameter_371, + parameter_370, + parameter_369, + parameter_368, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_368, parameter_369, parameter_370, parameter_371 + + # pd_op.swish: (2x32x-1x-1xf32) <- (2x32x-1x-1xf32) + swish_1 = paddle._C_ops.swish(batch_norm__0) + + # pd_op.conv2d: (2x32x-1x-1xf32) <- (2x32x-1x-1xf32, 32x32x3x3xf32) + conv2d_1 = paddle._C_ops.conv2d( + swish_1, parameter_367, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_367 + + # pd_op.batch_norm_: (2x32x-1x-1xf32, 32xf32, 32xf32, 32xf32, 32xf32, -1xui8) <- (2x32x-1x-1xf32, 32xf32, 32xf32, 32xf32, 32xf32) + ( + batch_norm__6, + batch_norm__7, + batch_norm__8, + batch_norm__9, + batch_norm__10, + batch_norm__11, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_1, + parameter_366, + parameter_365, + parameter_364, + parameter_363, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_363, parameter_364, parameter_365, parameter_366 + + # pd_op.swish: (2x32x-1x-1xf32) <- (2x32x-1x-1xf32) + swish_2 = paddle._C_ops.swish(batch_norm__6) + + # pd_op.conv2d: (2x64x-1x-1xf32) <- (2x32x-1x-1xf32, 64x32x3x3xf32) + conv2d_2 = paddle._C_ops.conv2d( + swish_2, parameter_362, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_362 + + # pd_op.batch_norm_: (2x64x-1x-1xf32, 64xf32, 64xf32, 64xf32, 64xf32, -1xui8) <- (2x64x-1x-1xf32, 64xf32, 64xf32, 64xf32, 64xf32) + ( + batch_norm__12, + batch_norm__13, + batch_norm__14, + batch_norm__15, + batch_norm__16, + batch_norm__17, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_2, + parameter_361, + parameter_360, + parameter_359, + parameter_358, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_358, parameter_359, parameter_360, parameter_361 + + # pd_op.swish: (2x64x-1x-1xf32) <- (2x64x-1x-1xf32) + swish_3 = paddle._C_ops.swish(batch_norm__12) + + # pd_op.conv2d: (2x96x-1x-1xf32) <- (2x64x-1x-1xf32, 96x64x3x3xf32) + conv2d_3 = paddle._C_ops.conv2d( + swish_3, parameter_357, [2, 2], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_357 + + # pd_op.batch_norm_: (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__18, + batch_norm__19, + batch_norm__20, + batch_norm__21, + batch_norm__22, + batch_norm__23, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_3, + parameter_356, + parameter_355, + parameter_354, + parameter_353, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_353, parameter_354, parameter_355, parameter_356 + + # pd_op.swish: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32) + swish_4 = paddle._C_ops.swish(batch_norm__18) + + # pd_op.conv2d: (2x48x-1x-1xf32) <- (2x96x-1x-1xf32, 48x96x1x1xf32) + conv2d_4 = paddle._C_ops.conv2d( + swish_4, parameter_352, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_352 + + # pd_op.batch_norm_: (2x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32, -1xui8) <- (2x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32) + ( + batch_norm__24, + batch_norm__25, + batch_norm__26, + batch_norm__27, + batch_norm__28, + batch_norm__29, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_4, + parameter_351, + parameter_350, + parameter_349, + parameter_348, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_348, parameter_349, parameter_350, parameter_351 + + # pd_op.swish: (2x48x-1x-1xf32) <- (2x48x-1x-1xf32) + swish_5 = paddle._C_ops.swish(batch_norm__24) + + # pd_op.conv2d: (2x48x-1x-1xf32) <- (2x96x-1x-1xf32, 48x96x1x1xf32) + conv2d_5 = paddle._C_ops.conv2d( + swish_4, parameter_347, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_347 + + # pd_op.batch_norm_: (2x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32, -1xui8) <- (2x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32) + ( + batch_norm__30, + batch_norm__31, + batch_norm__32, + batch_norm__33, + batch_norm__34, + batch_norm__35, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_5, + parameter_346, + parameter_345, + parameter_344, + parameter_343, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_343, parameter_344, parameter_345, parameter_346 + + # pd_op.swish: (2x48x-1x-1xf32) <- (2x48x-1x-1xf32) + swish_6 = paddle._C_ops.swish(batch_norm__30) + + # pd_op.conv2d: (2x48x-1x-1xf32) <- (2x48x-1x-1xf32, 48x48x3x3xf32) + conv2d_6 = paddle._C_ops.conv2d( + swish_6, parameter_342, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_342 + + # pd_op.batch_norm_: (2x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32, -1xui8) <- (2x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32) + ( + batch_norm__36, + batch_norm__37, + batch_norm__38, + batch_norm__39, + batch_norm__40, + batch_norm__41, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_6, + parameter_341, + parameter_340, + parameter_339, + parameter_338, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_338, parameter_339, parameter_340, parameter_341 + + # pd_op.swish: (2x48x-1x-1xf32) <- (2x48x-1x-1xf32) + swish_7 = paddle._C_ops.swish(batch_norm__36) + + # pd_op.conv2d: (2x48x-1x-1xf32) <- (2x48x-1x-1xf32, 48x48x3x3xf32) + conv2d_7 = paddle._C_ops.conv2d( + swish_7, parameter_337, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_337 + + # pd_op.batch_norm_: (2x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32, -1xui8) <- (2x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32) + ( + batch_norm__42, + batch_norm__43, + batch_norm__44, + batch_norm__45, + batch_norm__46, + batch_norm__47, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_7, + parameter_336, + parameter_335, + parameter_334, + parameter_333, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_333, parameter_334, parameter_335, parameter_336 + + # pd_op.conv2d: (2x48x-1x-1xf32) <- (2x48x-1x-1xf32, 48x48x1x1xf32) + conv2d_8 = paddle._C_ops.conv2d( + swish_7, parameter_332, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_332 + + # pd_op.batch_norm_: (2x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32, -1xui8) <- (2x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32) + ( + batch_norm__48, + batch_norm__49, + batch_norm__50, + batch_norm__51, + batch_norm__52, + batch_norm__53, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_8, + parameter_331, + parameter_330, + parameter_329, + parameter_328, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_328, parameter_329, parameter_330, parameter_331 + + # pd_op.multiply: (2x48x-1x-1xf32) <- (1xf32, 2x48x-1x-1xf32) + multiply_0 = paddle._C_ops.multiply(data_0, batch_norm__48) + del data_0 + + # pd_op.add: (2x48x-1x-1xf32) <- (2x48x-1x-1xf32, 2x48x-1x-1xf32) + add_0 = paddle._C_ops.add(batch_norm__42, multiply_0) + + # pd_op.swish: (2x48x-1x-1xf32) <- (2x48x-1x-1xf32) + swish_8 = paddle._C_ops.swish(add_0) + + # pd_op.add: (2x48x-1x-1xf32) <- (2x48x-1x-1xf32, 2x48x-1x-1xf32) + add_1 = paddle._C_ops.add(swish_6, swish_8) + + # pd_op.conv2d: (2x48x-1x-1xf32) <- (2x48x-1x-1xf32, 48x48x3x3xf32) + conv2d_9 = paddle._C_ops.conv2d( + add_1, parameter_327, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_327 + + # pd_op.batch_norm_: (2x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32, -1xui8) <- (2x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32) + ( + batch_norm__54, + batch_norm__55, + batch_norm__56, + batch_norm__57, + batch_norm__58, + batch_norm__59, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_9, + parameter_326, + parameter_325, + parameter_324, + parameter_323, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_323, parameter_324, parameter_325, parameter_326 + + # pd_op.swish: (2x48x-1x-1xf32) <- (2x48x-1x-1xf32) + swish_9 = paddle._C_ops.swish(batch_norm__54) + + # pd_op.conv2d: (2x48x-1x-1xf32) <- (2x48x-1x-1xf32, 48x48x3x3xf32) + conv2d_10 = paddle._C_ops.conv2d( + swish_9, parameter_322, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_322 + + # pd_op.batch_norm_: (2x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32, -1xui8) <- (2x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32) + ( + batch_norm__60, + batch_norm__61, + batch_norm__62, + batch_norm__63, + batch_norm__64, + batch_norm__65, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_10, + parameter_321, + parameter_320, + parameter_319, + parameter_318, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_318, parameter_319, parameter_320, parameter_321 + + # pd_op.conv2d: (2x48x-1x-1xf32) <- (2x48x-1x-1xf32, 48x48x1x1xf32) + conv2d_11 = paddle._C_ops.conv2d( + swish_9, parameter_317, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_317 + + # pd_op.batch_norm_: (2x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32, -1xui8) <- (2x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32) + ( + batch_norm__66, + batch_norm__67, + batch_norm__68, + batch_norm__69, + batch_norm__70, + batch_norm__71, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_11, + parameter_316, + parameter_315, + parameter_314, + parameter_313, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_313, parameter_314, parameter_315, parameter_316 + + # pd_op.multiply: (2x48x-1x-1xf32) <- (1xf32, 2x48x-1x-1xf32) + multiply_1 = paddle._C_ops.multiply(data_1, batch_norm__66) + del data_1 + + # pd_op.add: (2x48x-1x-1xf32) <- (2x48x-1x-1xf32, 2x48x-1x-1xf32) + add_2 = paddle._C_ops.add(batch_norm__60, multiply_1) + + # pd_op.swish: (2x48x-1x-1xf32) <- (2x48x-1x-1xf32) + swish_10 = paddle._C_ops.swish(add_2) + + # pd_op.add: (2x48x-1x-1xf32) <- (2x48x-1x-1xf32, 2x48x-1x-1xf32) + add_3 = paddle._C_ops.add(add_1, swish_10) + + # pd_op.conv2d: (2x48x-1x-1xf32) <- (2x48x-1x-1xf32, 48x48x3x3xf32) + conv2d_12 = paddle._C_ops.conv2d( + add_3, parameter_312, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" ) + del parameter_312 - # pd_op.argmax: (2x5376xi64) <- (2x3x5376xf32, 1xi64) - argmax_0 = paddle._C_ops.argmax(data_0, full_0, False, False, paddle.int64) - del full_0 + # pd_op.batch_norm_: (2x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32, -1xui8) <- (2x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32) + ( + batch_norm__72, + batch_norm__73, + batch_norm__74, + batch_norm__75, + batch_norm__76, + batch_norm__77, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_12, + parameter_311, + parameter_310, + parameter_309, + parameter_308, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_308, parameter_309, parameter_310, parameter_311 + + # pd_op.swish: (2x48x-1x-1xf32) <- (2x48x-1x-1xf32) + swish_11 = paddle._C_ops.swish(batch_norm__72) - # pd_op.full: (1xf32) <- () - full_1 = paddle._C_ops.full( - [1], float("3"), paddle.float32, paddle.core.CPUPlace() + # pd_op.conv2d: (2x48x-1x-1xf32) <- (2x48x-1x-1xf32, 48x48x3x3xf32) + conv2d_13 = paddle._C_ops.conv2d( + swish_11, parameter_307, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" ) + del parameter_307 - # pd_op.scale: (2x1xi32) <- (2x1xi32, 1xf32) - scale_0 = paddle._C_ops.scale(data_1, full_1, float("0"), True) - del data_1, full_1 + # pd_op.batch_norm_: (2x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32, -1xui8) <- (2x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32) + ( + batch_norm__78, + batch_norm__79, + batch_norm__80, + batch_norm__81, + batch_norm__82, + batch_norm__83, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_13, + parameter_306, + parameter_305, + parameter_304, + parameter_303, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_303, parameter_304, parameter_305, parameter_306 - # pd_op.cast: (2x1xi64) <- (2x1xi32) - cast_0 = paddle._C_ops.cast(scale_0, paddle.int64) - del scale_0 + # pd_op.conv2d: (2x48x-1x-1xf32) <- (2x48x-1x-1xf32, 48x48x1x1xf32) + conv2d_14 = paddle._C_ops.conv2d( + swish_11, parameter_302, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_302 - # pd_op.add: (2x5376xi64) <- (2x5376xi64, 2x1xi64) - add_0 = paddle._C_ops.add(argmax_0, cast_0) - del argmax_0, cast_0 + # pd_op.batch_norm_: (2x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32, -1xui8) <- (2x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32) + ( + batch_norm__84, + batch_norm__85, + batch_norm__86, + batch_norm__87, + batch_norm__88, + batch_norm__89, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_14, + parameter_301, + parameter_300, + parameter_299, + parameter_298, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_298, parameter_299, parameter_300, parameter_301 - # pd_op.flatten: (6xi32) <- (2x3x1xi32) - flatten_0 = paddle._C_ops.flatten(data_2, 0, 2) + # pd_op.multiply: (2x48x-1x-1xf32) <- (1xf32, 2x48x-1x-1xf32) + multiply_2 = paddle._C_ops.multiply(data_2, batch_norm__84) del data_2 - # pd_op.flatten: (10752xi64) <- (2x5376xi64) - flatten_1 = paddle._C_ops.flatten(add_0, 0, 1) - del add_0 + # pd_op.add: (2x48x-1x-1xf32) <- (2x48x-1x-1xf32, 2x48x-1x-1xf32) + add_4 = paddle._C_ops.add(batch_norm__78, multiply_2) + + # pd_op.swish: (2x48x-1x-1xf32) <- (2x48x-1x-1xf32) + swish_12 = paddle._C_ops.swish(add_4) + + # pd_op.add: (2x48x-1x-1xf32) <- (2x48x-1x-1xf32, 2x48x-1x-1xf32) + add_5 = paddle._C_ops.add(add_3, swish_12) # pd_op.full: (1xi32) <- () - full_2 = paddle._C_ops.full( - [1], float("0"), paddle.int32, paddle.core.CPUPlace() + full_0 = paddle._C_ops.full( + [1], float("1"), paddle.int32, paddle.core.CPUPlace() ) - # pd_op.gather: (10752xi32) <- (6xi32, 10752xi64, 1xi32) - gather_0 = paddle._C_ops.gather(flatten_0, flatten_1, full_2) - del flatten_0 + # pd_op.assign: (1xi32) <- (1xi32) + assign_0 = full_0 + + # pd_op.assign: (1xi32) <- (1xi32) + assign_1 = full_0 + + # pd_op.assign: (1xi32) <- (1xi32) + assign_2 = full_0 + + # builtin.combine: ([2x48x-1x-1xf32, 2x48x-1x-1xf32]) <- (2x48x-1x-1xf32, 2x48x-1x-1xf32) + combine_0 = [swish_5, add_5] + + # pd_op.concat: (2x96x-1x-1xf32) <- ([2x48x-1x-1xf32, 2x48x-1x-1xf32], 1xi32) + concat_0 = paddle._C_ops.concat(combine_0, full_0) + del combine_0 # pd_op.full_int_array: (2xi64) <- () - full_int_array_0 = [2, 5376] + full_int_array_0 = [2, 3] + + # pd_op.assign: (2xi64) <- (2xi64) + assign_3 = full_int_array_0 - # pd_op.reshape: (2x5376xi32) <- (10752xi32, 2xi64) - reshape_1 = paddle._C_ops.reshape(gather_0, full_int_array_0) - del full_int_array_0, gather_0 + # pd_op.assign: (2xi64) <- (2xi64) + assign_4 = full_int_array_0 - # pd_op.full: (xf32) <- () - full_3 = paddle._C_ops.full( - [], float("0"), paddle.float32, paddle.framework._current_expected_place() + # pd_op.assign: (2xi64) <- (2xi64) + assign_5 = full_int_array_0 + + # pd_op.mean: (2x96x1x1xf32) <- (2x96x-1x-1xf32, 2xi64) + mean_0 = paddle._C_ops.mean(concat_0, full_int_array_0, True) + + # pd_op.conv2d: (2x96x1x1xf32) <- (2x96x1x1xf32, 96x96x1x1xf32) + conv2d_15 = paddle._C_ops.conv2d( + mean_0, parameter_297, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" ) + del parameter_297 + + # pd_op.full_int_array: (4xi64) <- () + full_int_array_1 = [1, -1, 1, 1] + + # pd_op.reshape: (1x96x1x1xf32) <- (96xf32, 4xi64) + reshape_0 = paddle._C_ops.reshape(parameter_296, full_int_array_1) + del parameter_296 - # pd_op.greater_than: (2x5376xb) <- (2x5376xf32, xf32) - greater_than_0 = paddle._C_ops.greater_than(data_3, full_3) - del data_3, full_3 + # pd_op.add: (2x96x1x1xf32) <- (2x96x1x1xf32, 1x96x1x1xf32) + add_6 = paddle._C_ops.add(conv2d_15, reshape_0) - # pd_op.full: (1xf32) <- () - full_4 = paddle._C_ops.full( - [1], float("10"), paddle.float32, paddle.core.CPUPlace() + # pd_op.hardsigmoid: (2x96x1x1xf32) <- (2x96x1x1xf32) + hardsigmoid_0 = paddle._C_ops.hardsigmoid( + add_6, float("0.166667"), float("0.5") ) + del add_6 - # pd_op.full_like: (2x5376xi32) <- (2x5376xi32, 1xf32) - full_like_0 = paddle._C_ops.full_like( - reshape_1, full_4, paddle.int32, paddle.framework._current_expected_place() + # pd_op.multiply: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, 2x96x1x1xf32) + multiply_3 = paddle._C_ops.multiply(concat_0, hardsigmoid_0) + + # pd_op.conv2d: (2x128x-1x-1xf32) <- (2x96x-1x-1xf32, 128x96x1x1xf32) + conv2d_16 = paddle._C_ops.conv2d( + multiply_3, parameter_295, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" ) - del full_4 + del parameter_295 - # pd_op.where: (2x5376xi32) <- (2x5376xb, 2x5376xi32, 2x5376xi32) - where_0 = paddle._C_ops.where(greater_than_0, reshape_1, full_like_0) - del full_like_0, greater_than_0, reshape_1 + # pd_op.batch_norm_: (2x128x-1x-1xf32, 128xf32, 128xf32, 128xf32, 128xf32, -1xui8) <- (2x128x-1x-1xf32, 128xf32, 128xf32, 128xf32, 128xf32) + ( + batch_norm__90, + batch_norm__91, + batch_norm__92, + batch_norm__93, + batch_norm__94, + batch_norm__95, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_16, + parameter_294, + parameter_293, + parameter_292, + parameter_291, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_291, parameter_292, parameter_293, parameter_294 - # pd_op.full_int_array: (2xi64) <- () - full_int_array_1 = [-1, 4] + # pd_op.swish: (2x128x-1x-1xf32) <- (2x128x-1x-1xf32) + swish_13 = paddle._C_ops.swish(batch_norm__90) - # pd_op.reshape: (6x4xf32) <- (2x3x4xf32, 2xi64) - reshape_2 = paddle._C_ops.reshape(data_4, full_int_array_1) - del data_4, full_int_array_1 + # pd_op.conv2d: (2x192x-1x-1xf32) <- (2x128x-1x-1xf32, 192x128x3x3xf32) + conv2d_17 = paddle._C_ops.conv2d( + swish_13, parameter_290, [2, 2], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_290 + + # pd_op.batch_norm_: (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__96, + batch_norm__97, + batch_norm__98, + batch_norm__99, + batch_norm__100, + batch_norm__101, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_17, + parameter_289, + parameter_288, + parameter_287, + parameter_286, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_286, parameter_287, parameter_288, parameter_289 - # pd_op.gather: (10752x4xf32) <- (6x4xf32, 10752xi64, 1xi32) - gather_1 = paddle._C_ops.gather(reshape_2, flatten_1, full_2) - del flatten_1, full_2, reshape_2 + # pd_op.swish: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32) + swish_14 = paddle._C_ops.swish(batch_norm__96) - # pd_op.full_int_array: (3xi64) <- () - full_int_array_2 = [2, 5376, 4] + # pd_op.conv2d: (2x96x-1x-1xf32) <- (2x192x-1x-1xf32, 96x192x1x1xf32) + conv2d_18 = paddle._C_ops.conv2d( + swish_14, parameter_285, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_285 - # pd_op.reshape: (2x5376x4xf32) <- (10752x4xf32, 3xi64) - reshape_0 = paddle._C_ops.reshape(gather_1, full_int_array_2) - del full_int_array_2, gather_1 + # pd_op.batch_norm_: (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__102, + batch_norm__103, + batch_norm__104, + batch_norm__105, + batch_norm__106, + batch_norm__107, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_18, + parameter_284, + parameter_283, + parameter_282, + parameter_281, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_281, parameter_282, parameter_283, parameter_284 - # pd_op.full: (1xi32) <- () - full_5 = paddle._C_ops.full( - [1], float("11"), paddle.int32, paddle.core.CPUPlace() - ) - - # pd_op.one_hot: (2x5376x11xf32) <- (2x5376xi32, 1xi32) - one_hot_0 = paddle._C_ops.one_hot( - where_0 % paddle.cast(full_5, where_0.dtype), full_5 - ) - del full_5 - - # pd_op.full: (10xi64) <- () - full_6 = paddle._C_ops.full( - [10], float("0"), paddle.int64, paddle.framework._current_expected_place() - ) - - # pd_op.assign_value_: (10xi64) <- (10xi64) - assign_value__0 = paddle._C_ops.assign_value_( - full_6, - [10], - paddle.int64, - [ - float("0"), - float("1"), - float("2"), - float("3"), - float("4"), - float("5"), - float("6"), - float("7"), - float("8"), - float("9"), - ], - paddle.framework._current_expected_place(), - ) - del full_6 - - # pd_op.index_select: (2x5376x10xf32) <- (2x5376x11xf32, 10xi64) - index_select_0 = paddle._C_ops.index_select(one_hot_0, assign_value__0, -1) - del assign_value__0, one_hot_0 - - # pd_op.multiply: (2x3x5376xf32) <- (2x3x5376xf32, 2x3x5376xf32) - multiply_1 = paddle._C_ops.multiply(data_5, data_0) + # pd_op.swish: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32) + swish_15 = paddle._C_ops.swish(batch_norm__102) + + # pd_op.conv2d: (2x96x-1x-1xf32) <- (2x192x-1x-1xf32, 96x192x1x1xf32) + conv2d_19 = paddle._C_ops.conv2d( + swish_14, parameter_280, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_280 + + # pd_op.batch_norm_: (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__108, + batch_norm__109, + batch_norm__110, + batch_norm__111, + batch_norm__112, + batch_norm__113, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_19, + parameter_279, + parameter_278, + parameter_277, + parameter_276, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_276, parameter_277, parameter_278, parameter_279 + + # pd_op.swish: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32) + swish_16 = paddle._C_ops.swish(batch_norm__108) + + # pd_op.conv2d: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, 96x96x3x3xf32) + conv2d_20 = paddle._C_ops.conv2d( + swish_16, parameter_275, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_275 + + # pd_op.batch_norm_: (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__114, + batch_norm__115, + batch_norm__116, + batch_norm__117, + batch_norm__118, + batch_norm__119, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_20, + parameter_274, + parameter_273, + parameter_272, + parameter_271, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_271, parameter_272, parameter_273, parameter_274 + + # pd_op.swish: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32) + swish_17 = paddle._C_ops.swish(batch_norm__114) + + # pd_op.conv2d: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, 96x96x3x3xf32) + conv2d_21 = paddle._C_ops.conv2d( + swish_17, parameter_270, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_270 + + # pd_op.batch_norm_: (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__120, + batch_norm__121, + batch_norm__122, + batch_norm__123, + batch_norm__124, + batch_norm__125, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_21, + parameter_269, + parameter_268, + parameter_267, + parameter_266, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_266, parameter_267, parameter_268, parameter_269 + + # pd_op.conv2d: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, 96x96x1x1xf32) + conv2d_22 = paddle._C_ops.conv2d( + swish_17, parameter_265, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_265 + + # pd_op.batch_norm_: (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__126, + batch_norm__127, + batch_norm__128, + batch_norm__129, + batch_norm__130, + batch_norm__131, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_22, + parameter_264, + parameter_263, + parameter_262, + parameter_261, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_261, parameter_262, parameter_263, parameter_264 + + # pd_op.multiply: (2x96x-1x-1xf32) <- (1xf32, 2x96x-1x-1xf32) + multiply_4 = paddle._C_ops.multiply(data_3, batch_norm__126) + del data_3 + + # pd_op.add: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, 2x96x-1x-1xf32) + add_7 = paddle._C_ops.add(batch_norm__120, multiply_4) + + # pd_op.swish: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32) + swish_18 = paddle._C_ops.swish(add_7) + + # pd_op.add: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, 2x96x-1x-1xf32) + add_8 = paddle._C_ops.add(swish_16, swish_18) + + # pd_op.conv2d: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, 96x96x3x3xf32) + conv2d_23 = paddle._C_ops.conv2d( + add_8, parameter_260, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_260 + + # pd_op.batch_norm_: (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__132, + batch_norm__133, + batch_norm__134, + batch_norm__135, + batch_norm__136, + batch_norm__137, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_23, + parameter_259, + parameter_258, + parameter_257, + parameter_256, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_256, parameter_257, parameter_258, parameter_259 + + # pd_op.swish: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32) + swish_19 = paddle._C_ops.swish(batch_norm__132) + + # pd_op.conv2d: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, 96x96x3x3xf32) + conv2d_24 = paddle._C_ops.conv2d( + swish_19, parameter_255, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_255 + + # pd_op.batch_norm_: (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__138, + batch_norm__139, + batch_norm__140, + batch_norm__141, + batch_norm__142, + batch_norm__143, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_24, + parameter_254, + parameter_253, + parameter_252, + parameter_251, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_251, parameter_252, parameter_253, parameter_254 + + # pd_op.conv2d: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, 96x96x1x1xf32) + conv2d_25 = paddle._C_ops.conv2d( + swish_19, parameter_250, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_250 + + # pd_op.batch_norm_: (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__144, + batch_norm__145, + batch_norm__146, + batch_norm__147, + batch_norm__148, + batch_norm__149, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_25, + parameter_249, + parameter_248, + parameter_247, + parameter_246, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_246, parameter_247, parameter_248, parameter_249 + + # pd_op.multiply: (2x96x-1x-1xf32) <- (1xf32, 2x96x-1x-1xf32) + multiply_5 = paddle._C_ops.multiply(data_4, batch_norm__144) + del data_4 + + # pd_op.add: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, 2x96x-1x-1xf32) + add_9 = paddle._C_ops.add(batch_norm__138, multiply_5) + + # pd_op.swish: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32) + swish_20 = paddle._C_ops.swish(add_9) + + # pd_op.add: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, 2x96x-1x-1xf32) + add_10 = paddle._C_ops.add(add_8, swish_20) + + # pd_op.conv2d: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, 96x96x3x3xf32) + conv2d_26 = paddle._C_ops.conv2d( + add_10, parameter_245, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_245 + + # pd_op.batch_norm_: (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__150, + batch_norm__151, + batch_norm__152, + batch_norm__153, + batch_norm__154, + batch_norm__155, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_26, + parameter_244, + parameter_243, + parameter_242, + parameter_241, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_241, parameter_242, parameter_243, parameter_244 + + # pd_op.swish: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32) + swish_21 = paddle._C_ops.swish(batch_norm__150) + + # pd_op.conv2d: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, 96x96x3x3xf32) + conv2d_27 = paddle._C_ops.conv2d( + swish_21, parameter_240, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_240 + + # pd_op.batch_norm_: (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__156, + batch_norm__157, + batch_norm__158, + batch_norm__159, + batch_norm__160, + batch_norm__161, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_27, + parameter_239, + parameter_238, + parameter_237, + parameter_236, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_236, parameter_237, parameter_238, parameter_239 + + # pd_op.conv2d: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, 96x96x1x1xf32) + conv2d_28 = paddle._C_ops.conv2d( + swish_21, parameter_235, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_235 + + # pd_op.batch_norm_: (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__162, + batch_norm__163, + batch_norm__164, + batch_norm__165, + batch_norm__166, + batch_norm__167, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_28, + parameter_234, + parameter_233, + parameter_232, + parameter_231, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_231, parameter_232, parameter_233, parameter_234 + + # pd_op.multiply: (2x96x-1x-1xf32) <- (1xf32, 2x96x-1x-1xf32) + multiply_6 = paddle._C_ops.multiply(data_5, batch_norm__162) del data_5 - # pd_op.full_int_array: (1xi64) <- () - full_int_array_3 = [-1] + # pd_op.add: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, 2x96x-1x-1xf32) + add_11 = paddle._C_ops.add(batch_norm__156, multiply_6) + + # pd_op.swish: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32) + swish_22 = paddle._C_ops.swish(add_11) + + # pd_op.add: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, 2x96x-1x-1xf32) + add_12 = paddle._C_ops.add(add_10, swish_22) + + # pd_op.conv2d: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, 96x96x3x3xf32) + conv2d_29 = paddle._C_ops.conv2d( + add_12, parameter_230, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_230 + + # pd_op.batch_norm_: (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__168, + batch_norm__169, + batch_norm__170, + batch_norm__171, + batch_norm__172, + batch_norm__173, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_29, + parameter_229, + parameter_228, + parameter_227, + parameter_226, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_226, parameter_227, parameter_228, parameter_229 + + # pd_op.swish: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32) + swish_23 = paddle._C_ops.swish(batch_norm__168) + + # pd_op.conv2d: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, 96x96x3x3xf32) + conv2d_30 = paddle._C_ops.conv2d( + swish_23, parameter_225, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_225 + + # pd_op.batch_norm_: (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__174, + batch_norm__175, + batch_norm__176, + batch_norm__177, + batch_norm__178, + batch_norm__179, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_30, + parameter_224, + parameter_223, + parameter_222, + parameter_221, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_221, parameter_222, parameter_223, parameter_224 + + # pd_op.conv2d: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, 96x96x1x1xf32) + conv2d_31 = paddle._C_ops.conv2d( + swish_23, parameter_220, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_220 + + # pd_op.batch_norm_: (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__180, + batch_norm__181, + batch_norm__182, + batch_norm__183, + batch_norm__184, + batch_norm__185, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_31, + parameter_219, + parameter_218, + parameter_217, + parameter_216, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_216, parameter_217, parameter_218, parameter_219 + + # pd_op.multiply: (2x96x-1x-1xf32) <- (1xf32, 2x96x-1x-1xf32) + multiply_7 = paddle._C_ops.multiply(data_6, batch_norm__180) + del data_6 + + # pd_op.add: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, 2x96x-1x-1xf32) + add_13 = paddle._C_ops.add(batch_norm__174, multiply_7) + + # pd_op.swish: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32) + swish_24 = paddle._C_ops.swish(add_13) + + # pd_op.add: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, 2x96x-1x-1xf32) + add_14 = paddle._C_ops.add(add_12, swish_24) + + # pd_op.conv2d: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, 96x96x3x3xf32) + conv2d_32 = paddle._C_ops.conv2d( + add_14, parameter_215, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_215 + + # pd_op.batch_norm_: (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__186, + batch_norm__187, + batch_norm__188, + batch_norm__189, + batch_norm__190, + batch_norm__191, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_32, + parameter_214, + parameter_213, + parameter_212, + parameter_211, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_211, parameter_212, parameter_213, parameter_214 + + # pd_op.swish: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32) + swish_25 = paddle._C_ops.swish(batch_norm__186) + + # pd_op.conv2d: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, 96x96x3x3xf32) + conv2d_33 = paddle._C_ops.conv2d( + swish_25, parameter_210, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_210 + + # pd_op.batch_norm_: (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__192, + batch_norm__193, + batch_norm__194, + batch_norm__195, + batch_norm__196, + batch_norm__197, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_33, + parameter_209, + parameter_208, + parameter_207, + parameter_206, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_206, parameter_207, parameter_208, parameter_209 + + # pd_op.conv2d: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, 96x96x1x1xf32) + conv2d_34 = paddle._C_ops.conv2d( + swish_25, parameter_205, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_205 + + # pd_op.batch_norm_: (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__198, + batch_norm__199, + batch_norm__200, + batch_norm__201, + batch_norm__202, + batch_norm__203, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_34, + parameter_204, + parameter_203, + parameter_202, + parameter_201, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_201, parameter_202, parameter_203, parameter_204 + + # pd_op.multiply: (2x96x-1x-1xf32) <- (1xf32, 2x96x-1x-1xf32) + multiply_8 = paddle._C_ops.multiply(data_7, batch_norm__198) + del data_7 + + # pd_op.add: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, 2x96x-1x-1xf32) + add_15 = paddle._C_ops.add(batch_norm__192, multiply_8) + + # pd_op.swish: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32) + swish_26 = paddle._C_ops.swish(add_15) + + # pd_op.add: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, 2x96x-1x-1xf32) + add_16 = paddle._C_ops.add(add_14, swish_26) + + # pd_op.conv2d: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, 96x96x3x3xf32) + conv2d_35 = paddle._C_ops.conv2d( + add_16, parameter_200, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_200 + + # pd_op.batch_norm_: (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__204, + batch_norm__205, + batch_norm__206, + batch_norm__207, + batch_norm__208, + batch_norm__209, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_35, + parameter_199, + parameter_198, + parameter_197, + parameter_196, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_196, parameter_197, parameter_198, parameter_199 + + # pd_op.swish: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32) + swish_27 = paddle._C_ops.swish(batch_norm__204) + + # pd_op.conv2d: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, 96x96x3x3xf32) + conv2d_36 = paddle._C_ops.conv2d( + swish_27, parameter_195, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_195 + + # pd_op.batch_norm_: (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__210, + batch_norm__211, + batch_norm__212, + batch_norm__213, + batch_norm__214, + batch_norm__215, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_36, + parameter_194, + parameter_193, + parameter_192, + parameter_191, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_191, parameter_192, parameter_193, parameter_194 + + # pd_op.conv2d: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, 96x96x1x1xf32) + conv2d_37 = paddle._C_ops.conv2d( + swish_27, parameter_190, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_190 + + # pd_op.batch_norm_: (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__216, + batch_norm__217, + batch_norm__218, + batch_norm__219, + batch_norm__220, + batch_norm__221, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_37, + parameter_189, + parameter_188, + parameter_187, + parameter_186, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_186, parameter_187, parameter_188, parameter_189 + + # pd_op.multiply: (2x96x-1x-1xf32) <- (1xf32, 2x96x-1x-1xf32) + multiply_9 = paddle._C_ops.multiply(data_8, batch_norm__216) + del data_8 + + # pd_op.add: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, 2x96x-1x-1xf32) + add_17 = paddle._C_ops.add(batch_norm__210, multiply_9) + + # pd_op.swish: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32) + swish_28 = paddle._C_ops.swish(add_17) + + # pd_op.add: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, 2x96x-1x-1xf32) + add_18 = paddle._C_ops.add(add_16, swish_28) + + # builtin.combine: ([2x96x-1x-1xf32, 2x96x-1x-1xf32]) <- (2x96x-1x-1xf32, 2x96x-1x-1xf32) + combine_1 = [swish_15, add_18] + + # pd_op.concat: (2x192x-1x-1xf32) <- ([2x96x-1x-1xf32, 2x96x-1x-1xf32], 1xi32) + concat_1 = paddle._C_ops.concat(combine_1, full_0) + del combine_1 + + # pd_op.mean: (2x192x1x1xf32) <- (2x192x-1x-1xf32, 2xi64) + mean_1 = paddle._C_ops.mean(concat_1, full_int_array_0, True) + + # pd_op.conv2d: (2x192x1x1xf32) <- (2x192x1x1xf32, 192x192x1x1xf32) + conv2d_38 = paddle._C_ops.conv2d( + mean_1, parameter_185, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_185 + + # pd_op.reshape: (1x192x1x1xf32) <- (192xf32, 4xi64) + reshape_1 = paddle._C_ops.reshape(parameter_184, full_int_array_1) + del parameter_184 + + # pd_op.add: (2x192x1x1xf32) <- (2x192x1x1xf32, 1x192x1x1xf32) + add_19 = paddle._C_ops.add(conv2d_38, reshape_1) + + # pd_op.hardsigmoid: (2x192x1x1xf32) <- (2x192x1x1xf32) + hardsigmoid_1 = paddle._C_ops.hardsigmoid( + add_19, float("0.166667"), float("0.5") + ) + del add_19 + + # pd_op.multiply: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 2x192x1x1xf32) + multiply_10 = paddle._C_ops.multiply(concat_1, hardsigmoid_1) - # pd_op.max: (2x3x1xf32) <- (2x3x5376xf32, 1xi64) - max_0 = paddle._C_ops.max(multiply_1, full_int_array_3, True) + # pd_op.conv2d: (2x256x-1x-1xf32) <- (2x192x-1x-1xf32, 256x192x1x1xf32) + conv2d_39 = paddle._C_ops.conv2d( + multiply_10, parameter_183, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_183 + + # pd_op.batch_norm_: (2x256x-1x-1xf32, 256xf32, 256xf32, 256xf32, 256xf32, -1xui8) <- (2x256x-1x-1xf32, 256xf32, 256xf32, 256xf32, 256xf32) + ( + batch_norm__222, + batch_norm__223, + batch_norm__224, + batch_norm__225, + batch_norm__226, + batch_norm__227, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_39, + parameter_182, + parameter_181, + parameter_180, + parameter_179, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_179, parameter_180, parameter_181, parameter_182 + + # pd_op.swish: (2x256x-1x-1xf32) <- (2x256x-1x-1xf32) + swish_29 = paddle._C_ops.swish(batch_norm__222) + + # pd_op.conv2d: (2x384x-1x-1xf32) <- (2x256x-1x-1xf32, 384x256x3x3xf32) + conv2d_40 = paddle._C_ops.conv2d( + swish_29, parameter_178, [2, 2], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_178 + + # pd_op.batch_norm_: (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__228, + batch_norm__229, + batch_norm__230, + batch_norm__231, + batch_norm__232, + batch_norm__233, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_40, + parameter_177, + parameter_176, + parameter_175, + parameter_174, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_174, parameter_175, parameter_176, parameter_177 + + # pd_op.swish: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32) + swish_30 = paddle._C_ops.swish(batch_norm__228) + + # pd_op.conv2d: (2x192x-1x-1xf32) <- (2x384x-1x-1xf32, 192x384x1x1xf32) + conv2d_41 = paddle._C_ops.conv2d( + swish_30, parameter_173, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_173 + + # pd_op.batch_norm_: (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__234, + batch_norm__235, + batch_norm__236, + batch_norm__237, + batch_norm__238, + batch_norm__239, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_41, + parameter_172, + parameter_171, + parameter_170, + parameter_169, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_169, parameter_170, parameter_171, parameter_172 + + # pd_op.swish: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32) + swish_31 = paddle._C_ops.swish(batch_norm__234) + + # pd_op.conv2d: (2x192x-1x-1xf32) <- (2x384x-1x-1xf32, 192x384x1x1xf32) + conv2d_42 = paddle._C_ops.conv2d( + swish_30, parameter_168, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_168 + + # pd_op.batch_norm_: (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__240, + batch_norm__241, + batch_norm__242, + batch_norm__243, + batch_norm__244, + batch_norm__245, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_42, + parameter_167, + parameter_166, + parameter_165, + parameter_164, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_164, parameter_165, parameter_166, parameter_167 + + # pd_op.swish: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32) + swish_32 = paddle._C_ops.swish(batch_norm__240) + + # pd_op.conv2d: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 192x192x3x3xf32) + conv2d_43 = paddle._C_ops.conv2d( + swish_32, parameter_163, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_163 + + # pd_op.batch_norm_: (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__246, + batch_norm__247, + batch_norm__248, + batch_norm__249, + batch_norm__250, + batch_norm__251, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_43, + parameter_162, + parameter_161, + parameter_160, + parameter_159, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_159, parameter_160, parameter_161, parameter_162 + + # pd_op.swish: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32) + swish_33 = paddle._C_ops.swish(batch_norm__246) + + # pd_op.conv2d: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 192x192x3x3xf32) + conv2d_44 = paddle._C_ops.conv2d( + swish_33, parameter_158, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_158 + + # pd_op.batch_norm_: (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__252, + batch_norm__253, + batch_norm__254, + batch_norm__255, + batch_norm__256, + batch_norm__257, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_44, + parameter_157, + parameter_156, + parameter_155, + parameter_154, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_154, parameter_155, parameter_156, parameter_157 + + # pd_op.conv2d: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 192x192x1x1xf32) + conv2d_45 = paddle._C_ops.conv2d( + swish_33, parameter_153, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_153 + + # pd_op.batch_norm_: (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__258, + batch_norm__259, + batch_norm__260, + batch_norm__261, + batch_norm__262, + batch_norm__263, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_45, + parameter_152, + parameter_151, + parameter_150, + parameter_149, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_149, parameter_150, parameter_151, parameter_152 + + # pd_op.multiply: (2x192x-1x-1xf32) <- (1xf32, 2x192x-1x-1xf32) + multiply_11 = paddle._C_ops.multiply(data_9, batch_norm__258) + del data_9 + + # pd_op.add: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 2x192x-1x-1xf32) + add_20 = paddle._C_ops.add(batch_norm__252, multiply_11) + + # pd_op.swish: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32) + swish_34 = paddle._C_ops.swish(add_20) + + # pd_op.add: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 2x192x-1x-1xf32) + add_21 = paddle._C_ops.add(swish_32, swish_34) + + # pd_op.conv2d: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 192x192x3x3xf32) + conv2d_46 = paddle._C_ops.conv2d( + add_21, parameter_148, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_148 + + # pd_op.batch_norm_: (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__264, + batch_norm__265, + batch_norm__266, + batch_norm__267, + batch_norm__268, + batch_norm__269, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_46, + parameter_147, + parameter_146, + parameter_145, + parameter_144, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_144, parameter_145, parameter_146, parameter_147 + + # pd_op.swish: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32) + swish_35 = paddle._C_ops.swish(batch_norm__264) + + # pd_op.conv2d: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 192x192x3x3xf32) + conv2d_47 = paddle._C_ops.conv2d( + swish_35, parameter_143, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_143 + + # pd_op.batch_norm_: (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__270, + batch_norm__271, + batch_norm__272, + batch_norm__273, + batch_norm__274, + batch_norm__275, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_47, + parameter_142, + parameter_141, + parameter_140, + parameter_139, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_139, parameter_140, parameter_141, parameter_142 + + # pd_op.conv2d: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 192x192x1x1xf32) + conv2d_48 = paddle._C_ops.conv2d( + swish_35, parameter_138, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_138 + + # pd_op.batch_norm_: (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__276, + batch_norm__277, + batch_norm__278, + batch_norm__279, + batch_norm__280, + batch_norm__281, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_48, + parameter_137, + parameter_136, + parameter_135, + parameter_134, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_134, parameter_135, parameter_136, parameter_137 + + # pd_op.multiply: (2x192x-1x-1xf32) <- (1xf32, 2x192x-1x-1xf32) + multiply_12 = paddle._C_ops.multiply(data_10, batch_norm__276) + del data_10 + + # pd_op.add: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 2x192x-1x-1xf32) + add_22 = paddle._C_ops.add(batch_norm__270, multiply_12) + + # pd_op.swish: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32) + swish_36 = paddle._C_ops.swish(add_22) - # pd_op.multiply: (2x3x5376xf32) <- (2x3x5376xf32, 2x3x5376xf32) - multiply_2 = paddle._C_ops.multiply(data_6, data_0) - del data_0, data_6 + # pd_op.add: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 2x192x-1x-1xf32) + add_23 = paddle._C_ops.add(add_21, swish_36) - # pd_op.max: (2x3x1xf32) <- (2x3x5376xf32, 1xi64) - max_1 = paddle._C_ops.max(multiply_2, full_int_array_3, True) - del multiply_2 + # pd_op.conv2d: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 192x192x3x3xf32) + conv2d_49 = paddle._C_ops.conv2d( + add_23, parameter_133, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_133 + + # pd_op.batch_norm_: (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__282, + batch_norm__283, + batch_norm__284, + batch_norm__285, + batch_norm__286, + batch_norm__287, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_49, + parameter_132, + parameter_131, + parameter_130, + parameter_129, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_129, parameter_130, parameter_131, parameter_132 + + # pd_op.swish: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32) + swish_37 = paddle._C_ops.swish(batch_norm__282) + + # pd_op.conv2d: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 192x192x3x3xf32) + conv2d_50 = paddle._C_ops.conv2d( + swish_37, parameter_128, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_128 + + # pd_op.batch_norm_: (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__288, + batch_norm__289, + batch_norm__290, + batch_norm__291, + batch_norm__292, + batch_norm__293, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_50, + parameter_127, + parameter_126, + parameter_125, + parameter_124, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_124, parameter_125, parameter_126, parameter_127 + + # pd_op.conv2d: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 192x192x1x1xf32) + conv2d_51 = paddle._C_ops.conv2d( + swish_37, parameter_123, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_123 + + # pd_op.batch_norm_: (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__294, + batch_norm__295, + batch_norm__296, + batch_norm__297, + batch_norm__298, + batch_norm__299, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_51, + parameter_122, + parameter_121, + parameter_120, + parameter_119, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_119, parameter_120, parameter_121, parameter_122 + + # pd_op.multiply: (2x192x-1x-1xf32) <- (1xf32, 2x192x-1x-1xf32) + multiply_13 = paddle._C_ops.multiply(data_11, batch_norm__294) + del data_11 + + # pd_op.add: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 2x192x-1x-1xf32) + add_24 = paddle._C_ops.add(batch_norm__288, multiply_13) + + # pd_op.swish: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32) + swish_38 = paddle._C_ops.swish(add_24) + + # pd_op.add: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 2x192x-1x-1xf32) + add_25 = paddle._C_ops.add(add_23, swish_38) + + # pd_op.conv2d: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 192x192x3x3xf32) + conv2d_52 = paddle._C_ops.conv2d( + add_25, parameter_118, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_118 + + # pd_op.batch_norm_: (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__300, + batch_norm__301, + batch_norm__302, + batch_norm__303, + batch_norm__304, + batch_norm__305, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_52, + parameter_117, + parameter_116, + parameter_115, + parameter_114, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_114, parameter_115, parameter_116, parameter_117 + + # pd_op.swish: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32) + swish_39 = paddle._C_ops.swish(batch_norm__300) + + # pd_op.conv2d: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 192x192x3x3xf32) + conv2d_53 = paddle._C_ops.conv2d( + swish_39, parameter_113, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_113 + + # pd_op.batch_norm_: (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__306, + batch_norm__307, + batch_norm__308, + batch_norm__309, + batch_norm__310, + batch_norm__311, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_53, + parameter_112, + parameter_111, + parameter_110, + parameter_109, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_109, parameter_110, parameter_111, parameter_112 + + # pd_op.conv2d: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 192x192x1x1xf32) + conv2d_54 = paddle._C_ops.conv2d( + swish_39, parameter_108, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_108 + + # pd_op.batch_norm_: (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__312, + batch_norm__313, + batch_norm__314, + batch_norm__315, + batch_norm__316, + batch_norm__317, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_54, + parameter_107, + parameter_106, + parameter_105, + parameter_104, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_104, parameter_105, parameter_106, parameter_107 + + # pd_op.multiply: (2x192x-1x-1xf32) <- (1xf32, 2x192x-1x-1xf32) + multiply_14 = paddle._C_ops.multiply(data_12, batch_norm__312) + del data_12 + + # pd_op.add: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 2x192x-1x-1xf32) + add_26 = paddle._C_ops.add(batch_norm__306, multiply_14) + + # pd_op.swish: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32) + swish_40 = paddle._C_ops.swish(add_26) + + # pd_op.add: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 2x192x-1x-1xf32) + add_27 = paddle._C_ops.add(add_25, swish_40) + + # pd_op.conv2d: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 192x192x3x3xf32) + conv2d_55 = paddle._C_ops.conv2d( + add_27, parameter_103, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_103 + + # pd_op.batch_norm_: (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__318, + batch_norm__319, + batch_norm__320, + batch_norm__321, + batch_norm__322, + batch_norm__323, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_55, + parameter_102, + parameter_101, + parameter_100, + parameter_99, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_100, parameter_101, parameter_102, parameter_99 + + # pd_op.swish: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32) + swish_41 = paddle._C_ops.swish(batch_norm__318) + + # pd_op.conv2d: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 192x192x3x3xf32) + conv2d_56 = paddle._C_ops.conv2d( + swish_41, parameter_98, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_98 + + # pd_op.batch_norm_: (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__324, + batch_norm__325, + batch_norm__326, + batch_norm__327, + batch_norm__328, + batch_norm__329, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_56, + parameter_97, + parameter_96, + parameter_95, + parameter_94, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_94, parameter_95, parameter_96, parameter_97 - # pd_op.full: (1xf32) <- () - full_7 = paddle._C_ops.full( - [1], float("1"), paddle.float32, paddle.core.CPUPlace() + # pd_op.conv2d: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 192x192x1x1xf32) + conv2d_57 = paddle._C_ops.conv2d( + swish_41, parameter_93, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" ) + del parameter_93 - # pd_op.scale: (2x3x1xf32) <- (2x3x1xf32, 1xf32) - scale_1 = paddle._C_ops.scale(max_0, full_7, float("1e-09"), True) - del full_7, max_0 + # pd_op.batch_norm_: (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__330, + batch_norm__331, + batch_norm__332, + batch_norm__333, + batch_norm__334, + batch_norm__335, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_57, + parameter_92, + parameter_91, + parameter_90, + parameter_89, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_89, parameter_90, parameter_91, parameter_92 - # pd_op.divide: (2x3x5376xf32) <- (2x3x5376xf32, 2x3x1xf32) - divide_0 = paddle._C_ops.divide(multiply_1, scale_1) - del multiply_1, scale_1 + # pd_op.multiply: (2x192x-1x-1xf32) <- (1xf32, 2x192x-1x-1xf32) + multiply_15 = paddle._C_ops.multiply(data_13, batch_norm__330) + del data_13 - # pd_op.multiply: (2x3x5376xf32) <- (2x3x5376xf32, 2x3x1xf32) - multiply_3 = paddle._C_ops.multiply(divide_0, max_1) - del divide_0, max_1 + # pd_op.add: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 2x192x-1x-1xf32) + add_28 = paddle._C_ops.add(batch_norm__324, multiply_15) - # pd_op.full_int_array: (1xi64) <- () - full_int_array_4 = [-2] + # pd_op.swish: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32) + swish_42 = paddle._C_ops.swish(add_28) - # pd_op.max: (2x5376xf32) <- (2x3x5376xf32, 1xi64) - max_2 = paddle._C_ops.max(multiply_3, full_int_array_4, False) - del full_int_array_4, multiply_3 + # pd_op.add: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 2x192x-1x-1xf32) + add_29 = paddle._C_ops.add(add_27, swish_42) - # pd_op.unsqueeze: (2x5376x1xf32) <- (2x5376xf32, 1xi64) - unsqueeze_0 = paddle._C_ops.unsqueeze(max_2, full_int_array_3) - del full_int_array_3, max_2 + # pd_op.conv2d: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 192x192x3x3xf32) + conv2d_58 = paddle._C_ops.conv2d( + add_29, parameter_88, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_88 + + # pd_op.batch_norm_: (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__336, + batch_norm__337, + batch_norm__338, + batch_norm__339, + batch_norm__340, + batch_norm__341, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_58, + parameter_87, + parameter_86, + parameter_85, + parameter_84, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_84, parameter_85, parameter_86, parameter_87 - # pd_op.multiply: (2x5376x10xf32) <- (2x5376x10xf32, 2x5376x1xf32) - multiply_0 = paddle._C_ops.multiply(index_select_0, unsqueeze_0) - del index_select_0, unsqueeze_0, where_0 + # pd_op.swish: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32) + swish_43 = paddle._C_ops.swish(batch_norm__336) + + # pd_op.conv2d: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 192x192x3x3xf32) + conv2d_59 = paddle._C_ops.conv2d( + swish_43, parameter_83, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_83 + + # pd_op.batch_norm_: (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__342, + batch_norm__343, + batch_norm__344, + batch_norm__345, + batch_norm__346, + batch_norm__347, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_59, + parameter_82, + parameter_81, + parameter_80, + parameter_79, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_79, parameter_80, parameter_81, parameter_82 + + # pd_op.conv2d: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 192x192x1x1xf32) + conv2d_60 = paddle._C_ops.conv2d( + swish_43, parameter_78, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_78 + + # pd_op.batch_norm_: (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__348, + batch_norm__349, + batch_norm__350, + batch_norm__351, + batch_norm__352, + batch_norm__353, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_60, + parameter_77, + parameter_76, + parameter_75, + parameter_74, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_74, parameter_75, parameter_76, parameter_77 + + # pd_op.multiply: (2x192x-1x-1xf32) <- (1xf32, 2x192x-1x-1xf32) + multiply_16 = paddle._C_ops.multiply(data_14, batch_norm__348) + del data_14 + + # pd_op.add: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 2x192x-1x-1xf32) + add_30 = paddle._C_ops.add(batch_norm__342, multiply_16) + + # pd_op.swish: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32) + swish_44 = paddle._C_ops.swish(add_30) + + # pd_op.add: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 2x192x-1x-1xf32) + add_31 = paddle._C_ops.add(add_29, swish_44) + + # builtin.combine: ([2x192x-1x-1xf32, 2x192x-1x-1xf32]) <- (2x192x-1x-1xf32, 2x192x-1x-1xf32) + combine_2 = [swish_31, add_31] + + # pd_op.concat: (2x384x-1x-1xf32) <- ([2x192x-1x-1xf32, 2x192x-1x-1xf32], 1xi32) + concat_2 = paddle._C_ops.concat(combine_2, full_0) + del combine_2 + + # pd_op.mean: (2x384x1x1xf32) <- (2x384x-1x-1xf32, 2xi64) + mean_2 = paddle._C_ops.mean(concat_2, full_int_array_0, True) + + # pd_op.conv2d: (2x384x1x1xf32) <- (2x384x1x1xf32, 384x384x1x1xf32) + conv2d_61 = paddle._C_ops.conv2d( + mean_2, parameter_73, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_73 + + # pd_op.reshape: (1x384x1x1xf32) <- (384xf32, 4xi64) + reshape_2 = paddle._C_ops.reshape(parameter_72, full_int_array_1) + del parameter_72 + + # pd_op.add: (2x384x1x1xf32) <- (2x384x1x1xf32, 1x384x1x1xf32) + add_32 = paddle._C_ops.add(conv2d_61, reshape_2) + + # pd_op.hardsigmoid: (2x384x1x1xf32) <- (2x384x1x1xf32) + hardsigmoid_2 = paddle._C_ops.hardsigmoid( + add_32, float("0.166667"), float("0.5") + ) + del add_32 + + # pd_op.multiply: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32, 2x384x1x1xf32) + multiply_17 = paddle._C_ops.multiply(concat_2, hardsigmoid_2) + + # pd_op.conv2d: (2x512x-1x-1xf32) <- (2x384x-1x-1xf32, 512x384x1x1xf32) + conv2d_62 = paddle._C_ops.conv2d( + multiply_17, parameter_71, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_71 + + # pd_op.batch_norm_: (2x512x-1x-1xf32, 512xf32, 512xf32, 512xf32, 512xf32, -1xui8) <- (2x512x-1x-1xf32, 512xf32, 512xf32, 512xf32, 512xf32) + ( + batch_norm__354, + batch_norm__355, + batch_norm__356, + batch_norm__357, + batch_norm__358, + batch_norm__359, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_62, + parameter_70, + parameter_69, + parameter_68, + parameter_67, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_67, parameter_68, parameter_69, parameter_70 + + # pd_op.swish: (2x512x-1x-1xf32) <- (2x512x-1x-1xf32) + swish_45 = paddle._C_ops.swish(batch_norm__354) + + # pd_op.conv2d: (2x768x-1x-1xf32) <- (2x512x-1x-1xf32, 768x512x3x3xf32) + conv2d_63 = paddle._C_ops.conv2d( + swish_45, parameter_66, [2, 2], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_66 + + # pd_op.batch_norm_: (2x768x-1x-1xf32, 768xf32, 768xf32, 768xf32, 768xf32, -1xui8) <- (2x768x-1x-1xf32, 768xf32, 768xf32, 768xf32, 768xf32) + ( + batch_norm__360, + batch_norm__361, + batch_norm__362, + batch_norm__363, + batch_norm__364, + batch_norm__365, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_63, + parameter_65, + parameter_64, + parameter_63, + parameter_62, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_62, parameter_63, parameter_64, parameter_65 + + # pd_op.swish: (2x768x-1x-1xf32) <- (2x768x-1x-1xf32) + swish_46 = paddle._C_ops.swish(batch_norm__360) + + # pd_op.conv2d: (2x384x-1x-1xf32) <- (2x768x-1x-1xf32, 384x768x1x1xf32) + conv2d_64 = paddle._C_ops.conv2d( + swish_46, parameter_61, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_61 + + # pd_op.batch_norm_: (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__366, + batch_norm__367, + batch_norm__368, + batch_norm__369, + batch_norm__370, + batch_norm__371, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_64, + parameter_60, + parameter_59, + parameter_58, + parameter_57, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_57, parameter_58, parameter_59, parameter_60 + + # pd_op.swish: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32) + swish_47 = paddle._C_ops.swish(batch_norm__366) + + # pd_op.conv2d: (2x384x-1x-1xf32) <- (2x768x-1x-1xf32, 384x768x1x1xf32) + conv2d_65 = paddle._C_ops.conv2d( + swish_46, parameter_56, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_56 + + # pd_op.batch_norm_: (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__372, + batch_norm__373, + batch_norm__374, + batch_norm__375, + batch_norm__376, + batch_norm__377, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_65, + parameter_55, + parameter_54, + parameter_53, + parameter_52, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_52, parameter_53, parameter_54, parameter_55 + + # pd_op.swish: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32) + swish_48 = paddle._C_ops.swish(batch_norm__372) + + # pd_op.conv2d: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32, 384x384x3x3xf32) + conv2d_66 = paddle._C_ops.conv2d( + swish_48, parameter_51, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_51 + + # pd_op.batch_norm_: (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__378, + batch_norm__379, + batch_norm__380, + batch_norm__381, + batch_norm__382, + batch_norm__383, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_66, + parameter_50, + parameter_49, + parameter_48, + parameter_47, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_47, parameter_48, parameter_49, parameter_50 + + # pd_op.swish: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32) + swish_49 = paddle._C_ops.swish(batch_norm__378) + + # pd_op.conv2d: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32, 384x384x3x3xf32) + conv2d_67 = paddle._C_ops.conv2d( + swish_49, parameter_46, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_46 + + # pd_op.batch_norm_: (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__384, + batch_norm__385, + batch_norm__386, + batch_norm__387, + batch_norm__388, + batch_norm__389, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_67, + parameter_45, + parameter_44, + parameter_43, + parameter_42, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_42, parameter_43, parameter_44, parameter_45 + + # pd_op.conv2d: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32, 384x384x1x1xf32) + conv2d_68 = paddle._C_ops.conv2d( + swish_49, parameter_41, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_41 + + # pd_op.batch_norm_: (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__390, + batch_norm__391, + batch_norm__392, + batch_norm__393, + batch_norm__394, + batch_norm__395, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_68, + parameter_40, + parameter_39, + parameter_38, + parameter_37, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_37, parameter_38, parameter_39, parameter_40 + + # pd_op.multiply: (2x384x-1x-1xf32) <- (1xf32, 2x384x-1x-1xf32) + multiply_18 = paddle._C_ops.multiply(data_15, batch_norm__390) + del data_15 + + # pd_op.add: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32, 2x384x-1x-1xf32) + add_33 = paddle._C_ops.add(batch_norm__384, multiply_18) + + # pd_op.swish: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32) + swish_50 = paddle._C_ops.swish(add_33) + + # pd_op.add: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32, 2x384x-1x-1xf32) + add_34 = paddle._C_ops.add(swish_48, swish_50) + + # pd_op.conv2d: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32, 384x384x3x3xf32) + conv2d_69 = paddle._C_ops.conv2d( + add_34, parameter_36, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_36 + + # pd_op.batch_norm_: (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__396, + batch_norm__397, + batch_norm__398, + batch_norm__399, + batch_norm__400, + batch_norm__401, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_69, + parameter_35, + parameter_34, + parameter_33, + parameter_32, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_32, parameter_33, parameter_34, parameter_35 + + # pd_op.swish: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32) + swish_51 = paddle._C_ops.swish(batch_norm__396) + + # pd_op.conv2d: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32, 384x384x3x3xf32) + conv2d_70 = paddle._C_ops.conv2d( + swish_51, parameter_31, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_31 + + # pd_op.batch_norm_: (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__402, + batch_norm__403, + batch_norm__404, + batch_norm__405, + batch_norm__406, + batch_norm__407, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_70, + parameter_30, + parameter_29, + parameter_28, + parameter_27, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_27, parameter_28, parameter_29, parameter_30 + + # pd_op.conv2d: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32, 384x384x1x1xf32) + conv2d_71 = paddle._C_ops.conv2d( + swish_51, parameter_26, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_26 + + # pd_op.batch_norm_: (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__408, + batch_norm__409, + batch_norm__410, + batch_norm__411, + batch_norm__412, + batch_norm__413, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_71, + parameter_25, + parameter_24, + parameter_23, + parameter_22, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_22, parameter_23, parameter_24, parameter_25 + + # pd_op.multiply: (2x384x-1x-1xf32) <- (1xf32, 2x384x-1x-1xf32) + multiply_19 = paddle._C_ops.multiply(data_16, batch_norm__408) + del data_16 + + # pd_op.add: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32, 2x384x-1x-1xf32) + add_35 = paddle._C_ops.add(batch_norm__402, multiply_19) + + # pd_op.swish: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32) + swish_52 = paddle._C_ops.swish(add_35) + + # pd_op.add: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32, 2x384x-1x-1xf32) + add_36 = paddle._C_ops.add(add_34, swish_52) + + # pd_op.conv2d: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32, 384x384x3x3xf32) + conv2d_72 = paddle._C_ops.conv2d( + add_36, parameter_21, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_21 + + # pd_op.batch_norm_: (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__414, + batch_norm__415, + batch_norm__416, + batch_norm__417, + batch_norm__418, + batch_norm__419, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_72, + parameter_20, + parameter_19, + parameter_18, + parameter_17, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_17, parameter_18, parameter_19, parameter_20 + + # pd_op.swish: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32) + swish_53 = paddle._C_ops.swish(batch_norm__414) + + # pd_op.conv2d: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32, 384x384x3x3xf32) + conv2d_73 = paddle._C_ops.conv2d( + swish_53, parameter_16, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_16 + + # pd_op.batch_norm_: (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__420, + batch_norm__421, + batch_norm__422, + batch_norm__423, + batch_norm__424, + batch_norm__425, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_73, + parameter_15, + parameter_14, + parameter_13, + parameter_12, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_12, parameter_13, parameter_14, parameter_15 + + # pd_op.conv2d: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32, 384x384x1x1xf32) + conv2d_74 = paddle._C_ops.conv2d( + swish_53, parameter_11, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_11 + + # pd_op.batch_norm_: (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__426, + batch_norm__427, + batch_norm__428, + batch_norm__429, + batch_norm__430, + batch_norm__431, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_74, + parameter_10, + parameter_9, + parameter_8, + parameter_7, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_10, parameter_7, parameter_8, parameter_9 + + # pd_op.multiply: (2x384x-1x-1xf32) <- (1xf32, 2x384x-1x-1xf32) + multiply_20 = paddle._C_ops.multiply(data_17, batch_norm__426) + del data_17 + + # pd_op.add: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32, 2x384x-1x-1xf32) + add_37 = paddle._C_ops.add(batch_norm__420, multiply_20) + + # pd_op.swish: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32) + swish_54 = paddle._C_ops.swish(add_37) + + # pd_op.add: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32, 2x384x-1x-1xf32) + add_38 = paddle._C_ops.add(add_36, swish_54) + + # builtin.combine: ([2x384x-1x-1xf32, 2x384x-1x-1xf32]) <- (2x384x-1x-1xf32, 2x384x-1x-1xf32) + combine_3 = [swish_47, add_38] + + # pd_op.concat: (2x768x-1x-1xf32) <- ([2x384x-1x-1xf32, 2x384x-1x-1xf32], 1xi32) + concat_3 = paddle._C_ops.concat(combine_3, full_0) + del combine_3 + + # pd_op.mean: (2x768x1x1xf32) <- (2x768x-1x-1xf32, 2xi64) + mean_3 = paddle._C_ops.mean(concat_3, full_int_array_0, True) + + # pd_op.conv2d: (2x768x1x1xf32) <- (2x768x1x1xf32, 768x768x1x1xf32) + conv2d_75 = paddle._C_ops.conv2d( + mean_3, parameter_6, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_6 + + # pd_op.reshape: (1x768x1x1xf32) <- (768xf32, 4xi64) + reshape_3 = paddle._C_ops.reshape(parameter_5, full_int_array_1) + del full_int_array_1, parameter_5 + + # pd_op.add: (2x768x1x1xf32) <- (2x768x1x1xf32, 1x768x1x1xf32) + add_39 = paddle._C_ops.add(conv2d_75, reshape_3) + + # pd_op.hardsigmoid: (2x768x1x1xf32) <- (2x768x1x1xf32) + hardsigmoid_3 = paddle._C_ops.hardsigmoid( + add_39, float("0.166667"), float("0.5") + ) + del add_39 + + # pd_op.multiply: (2x768x-1x-1xf32) <- (2x768x-1x-1xf32, 2x768x1x1xf32) + multiply_21 = paddle._C_ops.multiply(concat_3, hardsigmoid_3) + + # pd_op.conv2d: (2x1024x-1x-1xf32) <- (2x768x-1x-1xf32, 1024x768x1x1xf32) + conv2d_76 = paddle._C_ops.conv2d( + multiply_21, parameter_4, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_4 + + # pd_op.batch_norm_: (2x1024x-1x-1xf32, 1024xf32, 1024xf32, 1024xf32, 1024xf32, -1xui8) <- (2x1024x-1x-1xf32, 1024xf32, 1024xf32, 1024xf32, 1024xf32) + ( + batch_norm__432, + batch_norm__433, + batch_norm__434, + batch_norm__435, + batch_norm__436, + batch_norm__437, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_76, + parameter_3, + parameter_2, + parameter_1, + parameter_0, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_0, parameter_1, parameter_2, parameter_3 + + # pd_op.swish: (2x1024x-1x-1xf32) <- (2x1024x-1x-1xf32) + swish_0 = paddle._C_ops.swish(batch_norm__432) + del ( + add_0, + add_1, + add_10, + add_11, + add_12, + add_13, + add_14, + add_15, + add_16, + add_17, + add_18, + add_2, + add_20, + add_21, + add_22, + add_23, + add_24, + add_25, + add_26, + add_27, + add_28, + add_29, + add_3, + add_30, + add_31, + add_33, + add_34, + add_35, + add_36, + add_37, + add_38, + add_4, + add_5, + add_7, + add_8, + add_9, + assign_0, + assign_1, + assign_2, + assign_3, + assign_4, + assign_5, + batch_norm__0, + batch_norm__1, + batch_norm__10, + batch_norm__100, + batch_norm__101, + batch_norm__102, + batch_norm__103, + batch_norm__104, + batch_norm__105, + batch_norm__106, + batch_norm__107, + batch_norm__108, + batch_norm__109, + batch_norm__11, + batch_norm__110, + batch_norm__111, + batch_norm__112, + batch_norm__113, + batch_norm__114, + batch_norm__115, + batch_norm__116, + batch_norm__117, + batch_norm__118, + batch_norm__119, + batch_norm__12, + batch_norm__120, + batch_norm__121, + batch_norm__122, + batch_norm__123, + batch_norm__124, + batch_norm__125, + batch_norm__126, + batch_norm__127, + batch_norm__128, + batch_norm__129, + batch_norm__13, + batch_norm__130, + batch_norm__131, + batch_norm__132, + batch_norm__133, + batch_norm__134, + batch_norm__135, + batch_norm__136, + batch_norm__137, + batch_norm__138, + batch_norm__139, + batch_norm__14, + batch_norm__140, + batch_norm__141, + batch_norm__142, + batch_norm__143, + batch_norm__144, + batch_norm__145, + batch_norm__146, + batch_norm__147, + batch_norm__148, + batch_norm__149, + batch_norm__15, + batch_norm__150, + batch_norm__151, + batch_norm__152, + batch_norm__153, + batch_norm__154, + batch_norm__155, + batch_norm__156, + batch_norm__157, + batch_norm__158, + batch_norm__159, + batch_norm__16, + batch_norm__160, + batch_norm__161, + batch_norm__162, + batch_norm__163, + batch_norm__164, + batch_norm__165, + batch_norm__166, + batch_norm__167, + batch_norm__168, + batch_norm__169, + batch_norm__17, + batch_norm__170, + batch_norm__171, + batch_norm__172, + batch_norm__173, + batch_norm__174, + batch_norm__175, + batch_norm__176, + batch_norm__177, + batch_norm__178, + batch_norm__179, + batch_norm__18, + batch_norm__180, + batch_norm__181, + batch_norm__182, + batch_norm__183, + batch_norm__184, + batch_norm__185, + batch_norm__186, + batch_norm__187, + batch_norm__188, + batch_norm__189, + batch_norm__19, + batch_norm__190, + batch_norm__191, + batch_norm__192, + batch_norm__193, + batch_norm__194, + batch_norm__195, + batch_norm__196, + batch_norm__197, + batch_norm__198, + batch_norm__199, + batch_norm__2, + batch_norm__20, + batch_norm__200, + batch_norm__201, + batch_norm__202, + batch_norm__203, + batch_norm__204, + batch_norm__205, + batch_norm__206, + batch_norm__207, + batch_norm__208, + batch_norm__209, + batch_norm__21, + batch_norm__210, + batch_norm__211, + batch_norm__212, + batch_norm__213, + batch_norm__214, + batch_norm__215, + batch_norm__216, + batch_norm__217, + batch_norm__218, + batch_norm__219, + batch_norm__22, + batch_norm__220, + batch_norm__221, + batch_norm__222, + batch_norm__223, + batch_norm__224, + batch_norm__225, + batch_norm__226, + batch_norm__227, + batch_norm__228, + batch_norm__229, + batch_norm__23, + batch_norm__230, + batch_norm__231, + batch_norm__232, + batch_norm__233, + batch_norm__234, + batch_norm__235, + batch_norm__236, + batch_norm__237, + batch_norm__238, + batch_norm__239, + batch_norm__24, + batch_norm__240, + batch_norm__241, + batch_norm__242, + batch_norm__243, + batch_norm__244, + batch_norm__245, + batch_norm__246, + batch_norm__247, + batch_norm__248, + batch_norm__249, + batch_norm__25, + batch_norm__250, + batch_norm__251, + batch_norm__252, + batch_norm__253, + batch_norm__254, + batch_norm__255, + batch_norm__256, + batch_norm__257, + batch_norm__258, + batch_norm__259, + batch_norm__26, + batch_norm__260, + batch_norm__261, + batch_norm__262, + batch_norm__263, + batch_norm__264, + batch_norm__265, + batch_norm__266, + batch_norm__267, + batch_norm__268, + batch_norm__269, + batch_norm__27, + batch_norm__270, + batch_norm__271, + batch_norm__272, + batch_norm__273, + batch_norm__274, + batch_norm__275, + batch_norm__276, + batch_norm__277, + batch_norm__278, + batch_norm__279, + batch_norm__28, + batch_norm__280, + batch_norm__281, + batch_norm__282, + batch_norm__283, + batch_norm__284, + batch_norm__285, + batch_norm__286, + batch_norm__287, + batch_norm__288, + batch_norm__289, + batch_norm__29, + batch_norm__290, + batch_norm__291, + batch_norm__292, + batch_norm__293, + batch_norm__294, + batch_norm__295, + batch_norm__296, + batch_norm__297, + batch_norm__298, + batch_norm__299, + batch_norm__3, + batch_norm__30, + batch_norm__300, + batch_norm__301, + batch_norm__302, + batch_norm__303, + batch_norm__304, + batch_norm__305, + batch_norm__306, + batch_norm__307, + batch_norm__308, + batch_norm__309, + batch_norm__31, + batch_norm__310, + batch_norm__311, + batch_norm__312, + batch_norm__313, + batch_norm__314, + batch_norm__315, + batch_norm__316, + batch_norm__317, + batch_norm__318, + batch_norm__319, + batch_norm__32, + batch_norm__320, + batch_norm__321, + batch_norm__322, + batch_norm__323, + batch_norm__324, + batch_norm__325, + batch_norm__326, + batch_norm__327, + batch_norm__328, + batch_norm__329, + batch_norm__33, + batch_norm__330, + batch_norm__331, + batch_norm__332, + batch_norm__333, + batch_norm__334, + batch_norm__335, + batch_norm__336, + batch_norm__337, + batch_norm__338, + batch_norm__339, + batch_norm__34, + batch_norm__340, + batch_norm__341, + batch_norm__342, + batch_norm__343, + batch_norm__344, + batch_norm__345, + batch_norm__346, + batch_norm__347, + batch_norm__348, + batch_norm__349, + batch_norm__35, + batch_norm__350, + batch_norm__351, + batch_norm__352, + batch_norm__353, + batch_norm__354, + batch_norm__355, + batch_norm__356, + batch_norm__357, + batch_norm__358, + batch_norm__359, + batch_norm__36, + batch_norm__360, + batch_norm__361, + batch_norm__362, + batch_norm__363, + batch_norm__364, + batch_norm__365, + batch_norm__366, + batch_norm__367, + batch_norm__368, + batch_norm__369, + batch_norm__37, + batch_norm__370, + batch_norm__371, + batch_norm__372, + batch_norm__373, + batch_norm__374, + batch_norm__375, + batch_norm__376, + batch_norm__377, + batch_norm__378, + batch_norm__379, + batch_norm__38, + batch_norm__380, + batch_norm__381, + batch_norm__382, + batch_norm__383, + batch_norm__384, + batch_norm__385, + batch_norm__386, + batch_norm__387, + batch_norm__388, + batch_norm__389, + batch_norm__39, + batch_norm__390, + batch_norm__391, + batch_norm__392, + batch_norm__393, + batch_norm__394, + batch_norm__395, + batch_norm__396, + batch_norm__397, + batch_norm__398, + batch_norm__399, + batch_norm__4, + batch_norm__40, + batch_norm__400, + batch_norm__401, + batch_norm__402, + batch_norm__403, + batch_norm__404, + batch_norm__405, + batch_norm__406, + batch_norm__407, + batch_norm__408, + batch_norm__409, + batch_norm__41, + batch_norm__410, + batch_norm__411, + batch_norm__412, + batch_norm__413, + batch_norm__414, + batch_norm__415, + batch_norm__416, + batch_norm__417, + batch_norm__418, + batch_norm__419, + batch_norm__42, + batch_norm__420, + batch_norm__421, + batch_norm__422, + batch_norm__423, + batch_norm__424, + batch_norm__425, + batch_norm__426, + batch_norm__427, + batch_norm__428, + batch_norm__429, + batch_norm__43, + batch_norm__430, + batch_norm__431, + batch_norm__432, + batch_norm__433, + batch_norm__434, + batch_norm__435, + batch_norm__436, + batch_norm__437, + batch_norm__44, + batch_norm__45, + batch_norm__46, + batch_norm__47, + batch_norm__48, + batch_norm__49, + batch_norm__5, + batch_norm__50, + batch_norm__51, + batch_norm__52, + batch_norm__53, + batch_norm__54, + batch_norm__55, + batch_norm__56, + batch_norm__57, + batch_norm__58, + batch_norm__59, + batch_norm__6, + batch_norm__60, + batch_norm__61, + batch_norm__62, + batch_norm__63, + batch_norm__64, + batch_norm__65, + batch_norm__66, + batch_norm__67, + batch_norm__68, + batch_norm__69, + batch_norm__7, + batch_norm__70, + batch_norm__71, + batch_norm__72, + batch_norm__73, + batch_norm__74, + batch_norm__75, + batch_norm__76, + batch_norm__77, + batch_norm__78, + batch_norm__79, + batch_norm__8, + batch_norm__80, + batch_norm__81, + batch_norm__82, + batch_norm__83, + batch_norm__84, + batch_norm__85, + batch_norm__86, + batch_norm__87, + batch_norm__88, + batch_norm__89, + batch_norm__9, + batch_norm__90, + batch_norm__91, + batch_norm__92, + batch_norm__93, + batch_norm__94, + batch_norm__95, + batch_norm__96, + batch_norm__97, + batch_norm__98, + batch_norm__99, + concat_0, + concat_1, + concat_2, + concat_3, + conv2d_0, + conv2d_1, + conv2d_10, + conv2d_11, + conv2d_12, + conv2d_13, + conv2d_14, + conv2d_15, + conv2d_16, + conv2d_17, + conv2d_18, + conv2d_19, + conv2d_2, + conv2d_20, + conv2d_21, + conv2d_22, + conv2d_23, + conv2d_24, + conv2d_25, + conv2d_26, + conv2d_27, + conv2d_28, + conv2d_29, + conv2d_3, + conv2d_30, + conv2d_31, + conv2d_32, + conv2d_33, + conv2d_34, + conv2d_35, + conv2d_36, + conv2d_37, + conv2d_38, + conv2d_39, + conv2d_4, + conv2d_40, + conv2d_41, + conv2d_42, + conv2d_43, + conv2d_44, + conv2d_45, + conv2d_46, + conv2d_47, + conv2d_48, + conv2d_49, + conv2d_5, + conv2d_50, + conv2d_51, + conv2d_52, + conv2d_53, + conv2d_54, + conv2d_55, + conv2d_56, + conv2d_57, + conv2d_58, + conv2d_59, + conv2d_6, + conv2d_60, + conv2d_61, + conv2d_62, + conv2d_63, + conv2d_64, + conv2d_65, + conv2d_66, + conv2d_67, + conv2d_68, + conv2d_69, + conv2d_7, + conv2d_70, + conv2d_71, + conv2d_72, + conv2d_73, + conv2d_74, + conv2d_75, + conv2d_76, + conv2d_8, + conv2d_9, + full_0, + full_int_array_0, + hardsigmoid_0, + hardsigmoid_1, + hardsigmoid_2, + hardsigmoid_3, + mean_0, + mean_1, + mean_2, + mean_3, + multiply_0, + multiply_1, + multiply_10, + multiply_11, + multiply_12, + multiply_13, + multiply_14, + multiply_15, + multiply_16, + multiply_17, + multiply_18, + multiply_19, + multiply_2, + multiply_20, + multiply_21, + multiply_3, + multiply_4, + multiply_5, + multiply_6, + multiply_7, + multiply_8, + multiply_9, + reshape_0, + reshape_1, + reshape_2, + reshape_3, + swish_1, + swish_10, + swish_11, + swish_12, + swish_13, + swish_14, + swish_15, + swish_16, + swish_17, + swish_18, + swish_19, + swish_2, + swish_20, + swish_21, + swish_22, + swish_23, + swish_24, + swish_25, + swish_26, + swish_27, + swish_28, + swish_29, + swish_3, + swish_30, + swish_31, + swish_32, + swish_33, + swish_34, + swish_35, + swish_36, + swish_37, + swish_38, + swish_39, + swish_4, + swish_40, + swish_41, + swish_42, + swish_43, + swish_44, + swish_45, + swish_46, + swish_47, + swish_48, + swish_49, + swish_5, + swish_50, + swish_51, + swish_52, + swish_53, + swish_54, + swish_6, + swish_7, + swish_8, + swish_9, + ) - return reshape_0, multiply_0 + return swish_0 diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus-M/subgraph_15/input_meta.py b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/subgraph_2/shape_patches_PP-YOLOE_plus_SOD-largesize-L/input_meta.py similarity index 52% rename from paddle_samples/PaddleX/PP-YOLOE_plus-M/subgraph_15/input_meta.py rename to paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/subgraph_2/shape_patches_PP-YOLOE_plus_SOD-largesize-L/input_meta.py index 2ac13c044..62a914daf 100644 --- a/paddle_samples/PaddleX/PP-YOLOE_plus-M/subgraph_15/input_meta.py +++ b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/subgraph_2/shape_patches_PP-YOLOE_plus_SOD-largesize-L/input_meta.py @@ -2,91 +2,134 @@ class Program_weight_tensor_data_0: name = "data_0" shape = [1] dtype = "float32" - data = [1.00237] + data = [0.724553] class Program_weight_tensor_data_1: name = "data_1" shape = [1] dtype = "float32" - data = [1.00237] + data = [0.710696] class Program_weight_tensor_data_2: name = "data_2" shape = [1] dtype = "float32" - data = [1.00237] + data = [0.69274] class Program_weight_tensor_data_3: name = "data_3" shape = [1] dtype = "float32" - data = [1.00237] + data = [0.697763] class Program_weight_tensor_data_4: name = "data_4" shape = [1] dtype = "float32" - data = [1.00237] + data = [0.67767] class Program_weight_tensor_data_5: name = "data_5" shape = [1] dtype = "float32" - data = [1.00237] + data = [0.628229] class Program_weight_tensor_data_6: name = "data_6" shape = [1] dtype = "float32" - data = [1.00237] + data = [0.643942] class Program_weight_tensor_data_7: name = "data_7" shape = [1] dtype = "float32" - data = [1.00237] + data = [0.633569] class Program_weight_tensor_data_8: name = "data_8" shape = [1] dtype = "float32" - data = [1.00237] + data = [0.801205] class Program_weight_tensor_data_9: name = "data_9" shape = [1] dtype = "float32" - data = [1.00237] + data = [0.652613] class Program_weight_tensor_data_10: name = "data_10" shape = [1] dtype = "float32" - data = [1.00237] + data = [0.636874] class Program_weight_tensor_data_11: name = "data_11" shape = [1] dtype = "float32" - data = [1.00237] + data = [0.631148] class Program_weight_tensor_data_12: name = "data_12" - shape = [2, 3, 640, 640] + shape = [1] + dtype = "float32" + data = [0.635341] + + +class Program_weight_tensor_data_13: + name = "data_13" + shape = [1] + dtype = "float32" + data = [0.640054] + + +class Program_weight_tensor_data_14: + name = "data_14" + shape = [1] + dtype = "float32" + data = [0.755822] + + +class Program_weight_tensor_data_15: + name = "data_15" + shape = [1] + dtype = "float32" + data = [0.575326] + + +class Program_weight_tensor_data_16: + name = "data_16" + shape = [1] + dtype = "float32" + data = [0.59257] + + +class Program_weight_tensor_data_17: + name = "data_17" + shape = [1] + dtype = "float32" + data = [0.72331] + + +class Program_weight_tensor_data_18: + name = "data_18" + shape = [1, 3, 1536, 1536] dtype = "float32" - max_val = float("1.0") - mean = float("0.471598") - std = float("0.270715") + min_val = float("0.0218883") + max_val = float("0.663022") + mean = float("0.428838") + std = float("0.0832401") data = None diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/subgraph_2/shape_patches_PP-YOLOE_plus_SOD-largesize-L/weight_meta.py b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/subgraph_2/shape_patches_PP-YOLOE_plus_SOD-largesize-L/weight_meta.py new file mode 100644 index 000000000..a4133e248 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/subgraph_2/shape_patches_PP-YOLOE_plus_SOD-largesize-L/weight_meta.py @@ -0,0 +1,3989 @@ +class Program_weight_tensor_parameter_0: + name = "parameter_0" + shape = [1024] + dtype = "float32" + min_val = float("-3.75937") + max_val = float("-0.734") + mean = float("-2.18719") + std = float("0.428746") + data = None + + +class Program_weight_tensor_parameter_1: + name = "parameter_1" + shape = [1024] + dtype = "float32" + min_val = float("1.61944") + max_val = float("4.44114") + mean = float("3.08041") + std = float("0.254214") + data = None + + +class Program_weight_tensor_parameter_2: + name = "parameter_2" + shape = [1024] + dtype = "float32" + min_val = float("0.0050978") + max_val = float("0.0274139") + mean = float("0.00876083") + std = float("0.00191502") + data = None + + +class Program_weight_tensor_parameter_3: + name = "parameter_3" + shape = [1024] + dtype = "float32" + min_val = float("-0.173441") + max_val = float("0.132182") + mean = float("-0.0624446") + std = float("0.0318177") + data = None + + +class Program_weight_tensor_parameter_4: + name = "parameter_4" + shape = [1024, 768, 1, 1] + dtype = "float32" + min_val = float("-0.0420016") + max_val = float("0.0672891") + mean = float("-0.000434506") + std = float("0.00419984") + data = None + + +class Program_weight_tensor_parameter_5: + name = "parameter_5" + shape = [768] + dtype = "float32" + min_val = float("-0.0144958") + max_val = float("0.00204154") + mean = float("-0.000784991") + std = float("0.00208566") + data = None + + +class Program_weight_tensor_parameter_6: + name = "parameter_6" + shape = [768, 768, 1, 1] + dtype = "float32" + min_val = float("-0.0809974") + max_val = float("0.144837") + mean = float("-0.000290719") + std = float("0.0016779") + data = None + + +class Program_weight_tensor_parameter_7: + name = "parameter_7" + shape = [384] + dtype = "float32" + min_val = float("-1.77404") + max_val = float("0.318904") + mean = float("-0.31075") + std = float("0.291253") + data = None + + +class Program_weight_tensor_parameter_8: + name = "parameter_8" + shape = [384] + dtype = "float32" + min_val = float("0.188368") + max_val = float("1.82104") + mean = float("0.60964") + std = float("0.262596") + data = None + + +class Program_weight_tensor_parameter_9: + name = "parameter_9" + shape = [384] + dtype = "float32" + min_val = float("7.63933e-05") + max_val = float("0.00106861") + mean = float("0.000259708") + std = float("0.000131578") + data = None + + +class Program_weight_tensor_parameter_10: + name = "parameter_10" + shape = [384] + dtype = "float32" + min_val = float("-0.0655344") + max_val = float("0.0775217") + mean = float("0.0238682") + std = float("0.0176001") + data = None + + +class Program_weight_tensor_parameter_11: + name = "parameter_11" + shape = [384, 384, 1, 1] + dtype = "float32" + min_val = float("-0.020871") + max_val = float("0.0273244") + mean = float("-0.000414716") + std = float("0.00284754") + data = None + + +class Program_weight_tensor_parameter_12: + name = "parameter_12" + shape = [384] + dtype = "float32" + min_val = float("-1.77405") + max_val = float("0.319251") + mean = float("-0.310681") + std = float("0.291275") + data = None + + +class Program_weight_tensor_parameter_13: + name = "parameter_13" + shape = [384] + dtype = "float32" + min_val = float("0.335122") + max_val = float("2.60483") + mean = float("1.02609") + std = float("0.290246") + data = None + + +class Program_weight_tensor_parameter_14: + name = "parameter_14" + shape = [384] + dtype = "float32" + min_val = float("0.000763408") + max_val = float("0.00774847") + mean = float("0.0023501") + std = float("0.000855015") + data = None + + +class Program_weight_tensor_parameter_15: + name = "parameter_15" + shape = [384] + dtype = "float32" + min_val = float("-0.228802") + max_val = float("0.161783") + mean = float("0.0348261") + std = float("0.0422183") + data = None + + +class Program_weight_tensor_parameter_16: + name = "parameter_16" + shape = [384, 384, 3, 3] + dtype = "float32" + min_val = float("-0.0185255") + max_val = float("0.0282844") + mean = float("-7.21101e-05") + std = float("0.00183304") + data = None + + +class Program_weight_tensor_parameter_17: + name = "parameter_17" + shape = [384] + dtype = "float32" + min_val = float("-2.58205") + max_val = float("0.0326997") + mean = float("-1.56844") + std = float("0.416017") + data = None + + +class Program_weight_tensor_parameter_18: + name = "parameter_18" + shape = [384] + dtype = "float32" + min_val = float("0.51894") + max_val = float("1.64424") + mean = float("1.13558") + std = float("0.149427") + data = None + + +class Program_weight_tensor_parameter_19: + name = "parameter_19" + shape = [384] + dtype = "float32" + min_val = float("0.0432612") + max_val = float("0.263912") + mean = float("0.0990143") + std = float("0.0258689") + data = None + + +class Program_weight_tensor_parameter_20: + name = "parameter_20" + shape = [384] + dtype = "float32" + min_val = float("-1.05647") + max_val = float("0.500171") + mean = float("-0.284757") + std = float("0.144218") + data = None + + +class Program_weight_tensor_parameter_21: + name = "parameter_21" + shape = [384, 384, 3, 3] + dtype = "float32" + min_val = float("-0.0217847") + max_val = float("0.0601331") + mean = float("-0.000214232") + std = float("0.00242153") + data = None + + +class Program_weight_tensor_parameter_22: + name = "parameter_22" + shape = [384] + dtype = "float32" + min_val = float("-1.93932") + max_val = float("0.644238") + mean = float("-0.57485") + std = float("0.358678") + data = None + + +class Program_weight_tensor_parameter_23: + name = "parameter_23" + shape = [384] + dtype = "float32" + min_val = float("0.163976") + max_val = float("2.06584") + mean = float("0.56203") + std = float("0.227231") + data = None + + +class Program_weight_tensor_parameter_24: + name = "parameter_24" + shape = [384] + dtype = "float32" + min_val = float("8.27966e-05") + max_val = float("0.00179396") + mean = float("0.000297678") + std = float("0.000146921") + data = None + + +class Program_weight_tensor_parameter_25: + name = "parameter_25" + shape = [384] + dtype = "float32" + min_val = float("-0.039417") + max_val = float("0.0723179") + mean = float("0.0222404") + std = float("0.0153684") + data = None + + +class Program_weight_tensor_parameter_26: + name = "parameter_26" + shape = [384, 384, 1, 1] + dtype = "float32" + min_val = float("-0.0311026") + max_val = float("0.039225") + mean = float("-0.000409791") + std = float("0.00262815") + data = None + + +class Program_weight_tensor_parameter_27: + name = "parameter_27" + shape = [384] + dtype = "float32" + min_val = float("-1.9394") + max_val = float("0.644918") + mean = float("-0.574762") + std = float("0.358753") + data = None + + +class Program_weight_tensor_parameter_28: + name = "parameter_28" + shape = [384] + dtype = "float32" + min_val = float("0.583818") + max_val = float("2.15633") + mean = float("1.08411") + std = float("0.255713") + data = None + + +class Program_weight_tensor_parameter_29: + name = "parameter_29" + shape = [384] + dtype = "float32" + min_val = float("0.00147808") + max_val = float("0.0112958") + mean = float("0.00356599") + std = float("0.00110113") + data = None + + +class Program_weight_tensor_parameter_30: + name = "parameter_30" + shape = [384] + dtype = "float32" + min_val = float("-0.114487") + max_val = float("0.168596") + mean = float("0.0403135") + std = float("0.0412827") + data = None + + +class Program_weight_tensor_parameter_31: + name = "parameter_31" + shape = [384, 384, 3, 3] + dtype = "float32" + min_val = float("-0.0211861") + max_val = float("0.0312284") + mean = float("-9.86606e-05") + std = float("0.00198109") + data = None + + +class Program_weight_tensor_parameter_32: + name = "parameter_32" + shape = [384] + dtype = "float32" + min_val = float("-2.39618") + max_val = float("0.845899") + mean = float("-1.40537") + std = float("0.36063") + data = None + + +class Program_weight_tensor_parameter_33: + name = "parameter_33" + shape = [384] + dtype = "float32" + min_val = float("0.454223") + max_val = float("1.91875") + mean = float("1.16633") + std = float("0.147984") + data = None + + +class Program_weight_tensor_parameter_34: + name = "parameter_34" + shape = [384] + dtype = "float32" + min_val = float("0.0366463") + max_val = float("0.164533") + mean = float("0.0661917") + std = float("0.0162349") + data = None + + +class Program_weight_tensor_parameter_35: + name = "parameter_35" + shape = [384] + dtype = "float32" + min_val = float("-0.915528") + max_val = float("0.831942") + mean = float("-0.196761") + std = float("0.117911") + data = None + + +class Program_weight_tensor_parameter_36: + name = "parameter_36" + shape = [384, 384, 3, 3] + dtype = "float32" + min_val = float("-0.0304568") + max_val = float("0.0446889") + mean = float("-0.000206096") + std = float("0.00245489") + data = None + + +class Program_weight_tensor_parameter_37: + name = "parameter_37" + shape = [384] + dtype = "float32" + min_val = float("-1.87628") + max_val = float("0.453077") + mean = float("-0.485305") + std = float("0.376481") + data = None + + +class Program_weight_tensor_parameter_38: + name = "parameter_38" + shape = [384] + dtype = "float32" + min_val = float("0.0771953") + max_val = float("2.11917") + mean = float("0.441977") + std = float("0.217648") + data = None + + +class Program_weight_tensor_parameter_39: + name = "parameter_39" + shape = [384] + dtype = "float32" + min_val = float("7.36916e-05") + max_val = float("0.00170445") + mean = float("0.000357372") + std = float("0.000182226") + data = None + + +class Program_weight_tensor_parameter_40: + name = "parameter_40" + shape = [384] + dtype = "float32" + min_val = float("-0.0529189") + max_val = float("0.0858856") + mean = float("0.0268843") + std = float("0.0175464") + data = None + + +class Program_weight_tensor_parameter_41: + name = "parameter_41" + shape = [384, 384, 1, 1] + dtype = "float32" + min_val = float("-0.0213328") + max_val = float("0.0283453") + mean = float("-0.000505242") + std = float("0.00224656") + data = None + + +class Program_weight_tensor_parameter_42: + name = "parameter_42" + shape = [384] + dtype = "float32" + min_val = float("-1.87669") + max_val = float("0.45341") + mean = float("-0.485211") + std = float("0.376586") + data = None + + +class Program_weight_tensor_parameter_43: + name = "parameter_43" + shape = [384] + dtype = "float32" + min_val = float("0.522977") + max_val = float("2.22431") + mean = float("1.05297") + std = float("0.260052") + data = None + + +class Program_weight_tensor_parameter_44: + name = "parameter_44" + shape = [384] + dtype = "float32" + min_val = float("0.0021093") + max_val = float("0.0103458") + mean = float("0.00457088") + std = float("0.00131851") + data = None + + +class Program_weight_tensor_parameter_45: + name = "parameter_45" + shape = [384] + dtype = "float32" + min_val = float("-0.272542") + max_val = float("0.182129") + mean = float("0.0462809") + std = float("0.0484224") + data = None + + +class Program_weight_tensor_parameter_46: + name = "parameter_46" + shape = [384, 384, 3, 3] + dtype = "float32" + min_val = float("-0.0214852") + max_val = float("0.0348977") + mean = float("-0.000101693") + std = float("0.00210424") + data = None + + +class Program_weight_tensor_parameter_47: + name = "parameter_47" + shape = [384] + dtype = "float32" + min_val = float("-2.1565") + max_val = float("0.418538") + mean = float("-1.36711") + std = float("0.277506") + data = None + + +class Program_weight_tensor_parameter_48: + name = "parameter_48" + shape = [384] + dtype = "float32" + min_val = float("0.707119") + max_val = float("1.63571") + mean = float("1.14297") + std = float("0.101612") + data = None + + +class Program_weight_tensor_parameter_49: + name = "parameter_49" + shape = [384] + dtype = "float32" + min_val = float("0.027003") + max_val = float("0.119021") + mean = float("0.0524081") + std = float("0.0141785") + data = None + + +class Program_weight_tensor_parameter_50: + name = "parameter_50" + shape = [384] + dtype = "float32" + min_val = float("-0.735058") + max_val = float("0.211464") + mean = float("-0.135262") + std = float("0.0973352") + data = None + + +class Program_weight_tensor_parameter_51: + name = "parameter_51" + shape = [384, 384, 3, 3] + dtype = "float32" + min_val = float("-0.0300983") + max_val = float("0.05499") + mean = float("-0.000159015") + std = float("0.00235156") + data = None + + +class Program_weight_tensor_parameter_52: + name = "parameter_52" + shape = [384] + dtype = "float32" + min_val = float("-2.92344") + max_val = float("1.66439") + mean = float("-0.760407") + std = float("0.643554") + data = None + + +class Program_weight_tensor_parameter_53: + name = "parameter_53" + shape = [384] + dtype = "float32" + min_val = float("0.953228") + max_val = float("2.9182") + mean = float("1.86309") + std = float("0.276205") + data = None + + +class Program_weight_tensor_parameter_54: + name = "parameter_54" + shape = [384] + dtype = "float32" + min_val = float("0.00273344") + max_val = float("0.0130488") + mean = float("0.00578892") + std = float("0.00146091") + data = None + + +class Program_weight_tensor_parameter_55: + name = "parameter_55" + shape = [384] + dtype = "float32" + min_val = float("-0.279522") + max_val = float("0.136057") + mean = float("0.068312") + std = float("0.0329566") + data = None + + +class Program_weight_tensor_parameter_56: + name = "parameter_56" + shape = [384, 768, 1, 1] + dtype = "float32" + min_val = float("-0.0411036") + max_val = float("0.048141") + mean = float("-0.000774534") + std = float("0.00548625") + data = None + + +class Program_weight_tensor_parameter_57: + name = "parameter_57" + shape = [384] + dtype = "float32" + min_val = float("-2.24702") + max_val = float("0.681993") + mean = float("-0.777088") + std = float("0.472908") + data = None + + +class Program_weight_tensor_parameter_58: + name = "parameter_58" + shape = [384] + dtype = "float32" + min_val = float("0.965876") + max_val = float("2.89361") + mean = float("2.09705") + std = float("0.305445") + data = None + + +class Program_weight_tensor_parameter_59: + name = "parameter_59" + shape = [384] + dtype = "float32" + min_val = float("0.000836446") + max_val = float("0.0043118") + mean = float("0.00221644") + std = float("0.000544507") + data = None + + +class Program_weight_tensor_parameter_60: + name = "parameter_60" + shape = [384] + dtype = "float32" + min_val = float("-0.0181609") + max_val = float("0.0915652") + mean = float("0.0419498") + std = float("0.0183738") + data = None + + +class Program_weight_tensor_parameter_61: + name = "parameter_61" + shape = [384, 768, 1, 1] + dtype = "float32" + min_val = float("-0.0837021") + max_val = float("0.0611426") + mean = float("-0.00045084") + std = float("0.00374174") + data = None + + +class Program_weight_tensor_parameter_62: + name = "parameter_62" + shape = [768] + dtype = "float32" + min_val = float("-2.40194") + max_val = float("0.642339") + mean = float("-0.908288") + std = float("0.339331") + data = None + + +class Program_weight_tensor_parameter_63: + name = "parameter_63" + shape = [768] + dtype = "float32" + min_val = float("0.53146") + max_val = float("1.90712") + mean = float("0.919684") + std = float("0.149212") + data = None + + +class Program_weight_tensor_parameter_64: + name = "parameter_64" + shape = [768] + dtype = "float32" + min_val = float("0.00736934") + max_val = float("0.074494") + mean = float("0.0176525") + std = float("0.00547046") + data = None + + +class Program_weight_tensor_parameter_65: + name = "parameter_65" + shape = [768] + dtype = "float32" + min_val = float("-0.236448") + max_val = float("0.209185") + mean = float("0.0420968") + std = float("0.0580626") + data = None + + +class Program_weight_tensor_parameter_66: + name = "parameter_66" + shape = [768, 512, 3, 3] + dtype = "float32" + min_val = float("-0.0383779") + max_val = float("0.0519002") + mean = float("-9.93933e-05") + std = float("0.00244217") + data = None + + +class Program_weight_tensor_parameter_67: + name = "parameter_67" + shape = [512] + dtype = "float32" + min_val = float("-3.39029") + max_val = float("1.66616") + mean = float("-1.16168") + std = float("0.513766") + data = None + + +class Program_weight_tensor_parameter_68: + name = "parameter_68" + shape = [512] + dtype = "float32" + min_val = float("0.520928") + max_val = float("1.67546") + mean = float("1.11104") + std = float("0.148384") + data = None + + +class Program_weight_tensor_parameter_69: + name = "parameter_69" + shape = [512] + dtype = "float32" + min_val = float("0.00220886") + max_val = float("0.0162899") + mean = float("0.00755366") + std = float("0.00191954") + data = None + + +class Program_weight_tensor_parameter_70: + name = "parameter_70" + shape = [512] + dtype = "float32" + min_val = float("-0.159233") + max_val = float("0.0720554") + mean = float("-0.0485279") + std = float("0.0411912") + data = None + + +class Program_weight_tensor_parameter_71: + name = "parameter_71" + shape = [512, 384, 1, 1] + dtype = "float32" + min_val = float("-0.208779") + max_val = float("0.179911") + mean = float("-0.000606249") + std = float("0.0081171") + data = None + + +class Program_weight_tensor_parameter_72: + name = "parameter_72" + shape = [384] + dtype = "float32" + min_val = float("-0.0103559") + max_val = float("0.00155602") + mean = float("-0.00302775") + std = float("0.0023618") + data = None + + +class Program_weight_tensor_parameter_73: + name = "parameter_73" + shape = [384, 384, 1, 1] + dtype = "float32" + min_val = float("-0.204999") + max_val = float("0.141306") + mean = float("-0.00211219") + std = float("0.00500511") + data = None + + +class Program_weight_tensor_parameter_74: + name = "parameter_74" + shape = [192] + dtype = "float32" + min_val = float("-1.97063") + max_val = float("0.41045") + mean = float("-0.348649") + std = float("0.333533") + data = None + + +class Program_weight_tensor_parameter_75: + name = "parameter_75" + shape = [192] + dtype = "float32" + min_val = float("0.0528508") + max_val = float("2.16013") + mean = float("0.581272") + std = float("0.419844") + data = None + + +class Program_weight_tensor_parameter_76: + name = "parameter_76" + shape = [192] + dtype = "float32" + min_val = float("9.84565e-05") + max_val = float("0.00122402") + mean = float("0.000477939") + std = float("0.000224956") + data = None + + +class Program_weight_tensor_parameter_77: + name = "parameter_77" + shape = [192] + dtype = "float32" + min_val = float("-0.0376647") + max_val = float("0.0569873") + mean = float("0.00567798") + std = float("0.015222") + data = None + + +class Program_weight_tensor_parameter_78: + name = "parameter_78" + shape = [192, 192, 1, 1] + dtype = "float32" + min_val = float("-0.0210389") + max_val = float("0.0585363") + mean = float("-0.000352054") + std = float("0.00423892") + data = None + + +class Program_weight_tensor_parameter_79: + name = "parameter_79" + shape = [192] + dtype = "float32" + min_val = float("-1.97059") + max_val = float("0.411367") + mean = float("-0.348497") + std = float("0.333596") + data = None + + +class Program_weight_tensor_parameter_80: + name = "parameter_80" + shape = [192] + dtype = "float32" + min_val = float("0.372764") + max_val = float("2.70243") + mean = float("1.20208") + std = float("0.49364") + data = None + + +class Program_weight_tensor_parameter_81: + name = "parameter_81" + shape = [192] + dtype = "float32" + min_val = float("0.0014624") + max_val = float("0.0202289") + mean = float("0.00559275") + std = float("0.0020797") + data = None + + +class Program_weight_tensor_parameter_82: + name = "parameter_82" + shape = [192] + dtype = "float32" + min_val = float("-0.115196") + max_val = float("0.163529") + mean = float("0.0192204") + std = float("0.0435021") + data = None + + +class Program_weight_tensor_parameter_83: + name = "parameter_83" + shape = [192, 192, 3, 3] + dtype = "float32" + min_val = float("-0.031927") + max_val = float("0.0389496") + mean = float("-0.000144904") + std = float("0.00325908") + data = None + + +class Program_weight_tensor_parameter_84: + name = "parameter_84" + shape = [192] + dtype = "float32" + min_val = float("-2.89054") + max_val = float("-0.177595") + mean = float("-1.31446") + std = float("0.401195") + data = None + + +class Program_weight_tensor_parameter_85: + name = "parameter_85" + shape = [192] + dtype = "float32" + min_val = float("0.695074") + max_val = float("2.09481") + mean = float("1.17912") + std = float("0.169901") + data = None + + +class Program_weight_tensor_parameter_86: + name = "parameter_86" + shape = [192] + dtype = "float32" + min_val = float("0.0654421") + max_val = float("0.471484") + mean = float("0.138461") + std = float("0.0475155") + data = None + + +class Program_weight_tensor_parameter_87: + name = "parameter_87" + shape = [192] + dtype = "float32" + min_val = float("-2.47419") + max_val = float("1.83595") + mean = float("-0.229004") + std = float("0.395047") + data = None + + +class Program_weight_tensor_parameter_88: + name = "parameter_88" + shape = [192, 192, 3, 3] + dtype = "float32" + min_val = float("-0.0350379") + max_val = float("0.0468605") + mean = float("-0.000221381") + std = float("0.00388426") + data = None + + +class Program_weight_tensor_parameter_89: + name = "parameter_89" + shape = [192] + dtype = "float32" + min_val = float("-1.94031") + max_val = float("0.513263") + mean = float("-0.279273") + std = float("0.321486") + data = None + + +class Program_weight_tensor_parameter_90: + name = "parameter_90" + shape = [192] + dtype = "float32" + min_val = float("0.0449424") + max_val = float("1.76947") + mean = float("0.444383") + std = float("0.305669") + data = None + + +class Program_weight_tensor_parameter_91: + name = "parameter_91" + shape = [192] + dtype = "float32" + min_val = float("7.96339e-05") + max_val = float("0.00168176") + mean = float("0.000430774") + std = float("0.000230126") + data = None + + +class Program_weight_tensor_parameter_92: + name = "parameter_92" + shape = [192] + dtype = "float32" + min_val = float("-0.0362367") + max_val = float("0.0459797") + mean = float("0.0087194") + std = float("0.0119612") + data = None + + +class Program_weight_tensor_parameter_93: + name = "parameter_93" + shape = [192, 192, 1, 1] + dtype = "float32" + min_val = float("-0.02483") + max_val = float("0.0404131") + mean = float("-0.000400917") + std = float("0.00391908") + data = None + + +class Program_weight_tensor_parameter_94: + name = "parameter_94" + shape = [192] + dtype = "float32" + min_val = float("-1.94031") + max_val = float("0.514903") + mean = float("-0.279015") + std = float("0.321709") + data = None + + +class Program_weight_tensor_parameter_95: + name = "parameter_95" + shape = [192] + dtype = "float32" + min_val = float("0.481654") + max_val = float("2.27026") + mean = float("1.13859") + std = float("0.375612") + data = None + + +class Program_weight_tensor_parameter_96: + name = "parameter_96" + shape = [192] + dtype = "float32" + min_val = float("0.00304728") + max_val = float("0.0144724") + mean = float("0.00647186") + std = float("0.00181328") + data = None + + +class Program_weight_tensor_parameter_97: + name = "parameter_97" + shape = [192] + dtype = "float32" + min_val = float("-0.0801327") + max_val = float("0.116547") + mean = float("0.0356733") + std = float("0.0320593") + data = None + + +class Program_weight_tensor_parameter_98: + name = "parameter_98" + shape = [192, 192, 3, 3] + dtype = "float32" + min_val = float("-0.0229799") + max_val = float("0.0371751") + mean = float("-0.000196939") + std = float("0.00352878") + data = None + + +class Program_weight_tensor_parameter_99: + name = "parameter_99" + shape = [192] + dtype = "float32" + min_val = float("-2.50826") + max_val = float("-0.12355") + mean = float("-1.2887") + std = float("0.443822") + data = None + + +class Program_weight_tensor_parameter_100: + name = "parameter_100" + shape = [192] + dtype = "float32" + min_val = float("0.653803") + max_val = float("1.66962") + mean = float("1.19928") + std = float("0.166233") + data = None + + +class Program_weight_tensor_parameter_101: + name = "parameter_101" + shape = [192] + dtype = "float32" + min_val = float("0.0475495") + max_val = float("0.208235") + mean = float("0.0948451") + std = float("0.0245631") + data = None + + +class Program_weight_tensor_parameter_102: + name = "parameter_102" + shape = [192] + dtype = "float32" + min_val = float("-2.1632") + max_val = float("0.473042") + mean = float("-0.118896") + std = float("0.249139") + data = None + + +class Program_weight_tensor_parameter_103: + name = "parameter_103" + shape = [192, 192, 3, 3] + dtype = "float32" + min_val = float("-0.038582") + max_val = float("0.0537646") + mean = float("-0.00026749") + std = float("0.0040656") + data = None + + +class Program_weight_tensor_parameter_104: + name = "parameter_104" + shape = [192] + dtype = "float32" + min_val = float("-1.75738") + max_val = float("0.468608") + mean = float("-0.262263") + std = float("0.335862") + data = None + + +class Program_weight_tensor_parameter_105: + name = "parameter_105" + shape = [192] + dtype = "float32" + min_val = float("0.00305103") + max_val = float("1.67905") + mean = float("0.351948") + std = float("0.251703") + data = None + + +class Program_weight_tensor_parameter_106: + name = "parameter_106" + shape = [192] + dtype = "float32" + min_val = float("1.01992e-06") + max_val = float("0.00222302") + mean = float("0.000398674") + std = float("0.000279493") + data = None + + +class Program_weight_tensor_parameter_107: + name = "parameter_107" + shape = [192] + dtype = "float32" + min_val = float("-0.0314916") + max_val = float("0.0548995") + mean = float("0.0110299") + std = float("0.0122915") + data = None + + +class Program_weight_tensor_parameter_108: + name = "parameter_108" + shape = [192, 192, 1, 1] + dtype = "float32" + min_val = float("-0.0307534") + max_val = float("0.0384153") + mean = float("-0.00045859") + std = float("0.00377622") + data = None + + +class Program_weight_tensor_parameter_109: + name = "parameter_109" + shape = [192] + dtype = "float32" + min_val = float("-1.75744") + max_val = float("0.470024") + mean = float("-0.262025") + std = float("0.336099") + data = None + + +class Program_weight_tensor_parameter_110: + name = "parameter_110" + shape = [192] + dtype = "float32" + min_val = float("0.405457") + max_val = float("1.97843") + mean = float("1.06603") + std = float("0.334153") + data = None + + +class Program_weight_tensor_parameter_111: + name = "parameter_111" + shape = [192] + dtype = "float32" + min_val = float("0.00267407") + max_val = float("0.0142805") + mean = float("0.00698013") + std = float("0.0019104") + data = None + + +class Program_weight_tensor_parameter_112: + name = "parameter_112" + shape = [192] + dtype = "float32" + min_val = float("-0.0878738") + max_val = float("0.110839") + mean = float("0.0399626") + std = float("0.0323914") + data = None + + +class Program_weight_tensor_parameter_113: + name = "parameter_113" + shape = [192, 192, 3, 3] + dtype = "float32" + min_val = float("-0.0336081") + max_val = float("0.0420323") + mean = float("-0.000205836") + std = float("0.00368544") + data = None + + +class Program_weight_tensor_parameter_114: + name = "parameter_114" + shape = [192] + dtype = "float32" + min_val = float("-2.49703") + max_val = float("0.138789") + mean = float("-1.24309") + std = float("0.424468") + data = None + + +class Program_weight_tensor_parameter_115: + name = "parameter_115" + shape = [192] + dtype = "float32" + min_val = float("0.652493") + max_val = float("1.80896") + mean = float("1.16711") + std = float("0.165463") + data = None + + +class Program_weight_tensor_parameter_116: + name = "parameter_116" + shape = [192] + dtype = "float32" + min_val = float("0.0303129") + max_val = float("0.14633") + mean = float("0.0670479") + std = float("0.0163216") + data = None + + +class Program_weight_tensor_parameter_117: + name = "parameter_117" + shape = [192] + dtype = "float32" + min_val = float("-1.70247") + max_val = float("0.30536") + mean = float("-0.0862267") + std = float("0.199355") + data = None + + +class Program_weight_tensor_parameter_118: + name = "parameter_118" + shape = [192, 192, 3, 3] + dtype = "float32" + min_val = float("-0.0472912") + max_val = float("0.0583976") + mean = float("-0.000284769") + std = float("0.00417002") + data = None + + +class Program_weight_tensor_parameter_119: + name = "parameter_119" + shape = [192] + dtype = "float32" + min_val = float("-2.07915") + max_val = float("0.533836") + mean = float("-0.272165") + std = float("0.375339") + data = None + + +class Program_weight_tensor_parameter_120: + name = "parameter_120" + shape = [192] + dtype = "float32" + min_val = float("0.000522804") + max_val = float("0.732366") + mean = float("0.21194") + std = float("0.136205") + data = None + + +class Program_weight_tensor_parameter_121: + name = "parameter_121" + shape = [192] + dtype = "float32" + min_val = float("5.9055e-08") + max_val = float("0.000953757") + mean = float("0.000261566") + std = float("0.000147906") + data = None + + +class Program_weight_tensor_parameter_122: + name = "parameter_122" + shape = [192] + dtype = "float32" + min_val = float("-0.0264134") + max_val = float("0.0356786") + mean = float("0.00695978") + std = float("0.00983596") + data = None + + +class Program_weight_tensor_parameter_123: + name = "parameter_123" + shape = [192, 192, 1, 1] + dtype = "float32" + min_val = float("-0.0207564") + max_val = float("0.0335475") + mean = float("-0.000292443") + std = float("0.00332227") + data = None + + +class Program_weight_tensor_parameter_124: + name = "parameter_124" + shape = [192] + dtype = "float32" + min_val = float("-2.07924") + max_val = float("0.535791") + mean = float("-0.271976") + std = float("0.375569") + data = None + + +class Program_weight_tensor_parameter_125: + name = "parameter_125" + shape = [192] + dtype = "float32" + min_val = float("0.395086") + max_val = float("1.96267") + mean = float("0.959008") + std = float("0.303814") + data = None + + +class Program_weight_tensor_parameter_126: + name = "parameter_126" + shape = [192] + dtype = "float32" + min_val = float("0.00304751") + max_val = float("0.015787") + mean = float("0.00706292") + std = float("0.00213169") + data = None + + +class Program_weight_tensor_parameter_127: + name = "parameter_127" + shape = [192] + dtype = "float32" + min_val = float("-0.078765") + max_val = float("0.118653") + mean = float("0.0428106") + std = float("0.0338285") + data = None + + +class Program_weight_tensor_parameter_128: + name = "parameter_128" + shape = [192, 192, 3, 3] + dtype = "float32" + min_val = float("-0.0340016") + max_val = float("0.0403474") + mean = float("-0.000216247") + std = float("0.00380285") + data = None + + +class Program_weight_tensor_parameter_129: + name = "parameter_129" + shape = [192] + dtype = "float32" + min_val = float("-2.74084") + max_val = float("-0.0805818") + mean = float("-1.23662") + std = float("0.434286") + data = None + + +class Program_weight_tensor_parameter_130: + name = "parameter_130" + shape = [192] + dtype = "float32" + min_val = float("0.761952") + max_val = float("1.62053") + mean = float("1.15094") + std = float("0.142444") + data = None + + +class Program_weight_tensor_parameter_131: + name = "parameter_131" + shape = [192] + dtype = "float32" + min_val = float("0.0276638") + max_val = float("0.0803679") + mean = float("0.0486605") + std = float("0.0101769") + data = None + + +class Program_weight_tensor_parameter_132: + name = "parameter_132" + shape = [192] + dtype = "float32" + min_val = float("-1.39612") + max_val = float("0.291383") + mean = float("-0.0742001") + std = float("0.166863") + data = None + + +class Program_weight_tensor_parameter_133: + name = "parameter_133" + shape = [192, 192, 3, 3] + dtype = "float32" + min_val = float("-0.0589398") + max_val = float("0.0606418") + mean = float("-0.000300541") + std = float("0.00415388") + data = None + + +class Program_weight_tensor_parameter_134: + name = "parameter_134" + shape = [192] + dtype = "float32" + min_val = float("-1.212") + max_val = float("0.447452") + mean = float("-0.232044") + std = float("0.339385") + data = None + + +class Program_weight_tensor_parameter_135: + name = "parameter_135" + shape = [192] + dtype = "float32" + min_val = float("-9.43381e-05") + max_val = float("0.678118") + mean = float("0.192025") + std = float("0.120758") + data = None + + +class Program_weight_tensor_parameter_136: + name = "parameter_136" + shape = [192] + dtype = "float32" + min_val = float("2.4814e-10") + max_val = float("0.000962865") + mean = float("0.000259823") + std = float("0.000158281") + data = None + + +class Program_weight_tensor_parameter_137: + name = "parameter_137" + shape = [192] + dtype = "float32" + min_val = float("-0.0444415") + max_val = float("0.0432657") + mean = float("0.00752981") + std = float("0.0124547") + data = None + + +class Program_weight_tensor_parameter_138: + name = "parameter_138" + shape = [192, 192, 1, 1] + dtype = "float32" + min_val = float("-0.0374404") + max_val = float("0.0395949") + mean = float("-0.000292615") + std = float("0.00342625") + data = None + + +class Program_weight_tensor_parameter_139: + name = "parameter_139" + shape = [192] + dtype = "float32" + min_val = float("-1.21197") + max_val = float("0.448806") + mean = float("-0.231853") + std = float("0.339659") + data = None + + +class Program_weight_tensor_parameter_140: + name = "parameter_140" + shape = [192] + dtype = "float32" + min_val = float("0.382853") + max_val = float("1.56358") + mean = float("0.852209") + std = float("0.259926") + data = None + + +class Program_weight_tensor_parameter_141: + name = "parameter_141" + shape = [192] + dtype = "float32" + min_val = float("0.00286251") + max_val = float("0.0142248") + mean = float("0.00680236") + std = float("0.00188027") + data = None + + +class Program_weight_tensor_parameter_142: + name = "parameter_142" + shape = [192] + dtype = "float32" + min_val = float("-0.0777897") + max_val = float("0.150363") + mean = float("0.0469745") + std = float("0.0370425") + data = None + + +class Program_weight_tensor_parameter_143: + name = "parameter_143" + shape = [192, 192, 3, 3] + dtype = "float32" + min_val = float("-0.0368355") + max_val = float("0.0400254") + mean = float("-0.000211959") + std = float("0.00380574") + data = None + + +class Program_weight_tensor_parameter_144: + name = "parameter_144" + shape = [192] + dtype = "float32" + min_val = float("-2.48699") + max_val = float("-0.132487") + mean = float("-1.2498") + std = float("0.418473") + data = None + + +class Program_weight_tensor_parameter_145: + name = "parameter_145" + shape = [192] + dtype = "float32" + min_val = float("0.689021") + max_val = float("1.51961") + mean = float("1.12491") + std = float("0.134826") + data = None + + +class Program_weight_tensor_parameter_146: + name = "parameter_146" + shape = [192] + dtype = "float32" + min_val = float("0.0194344") + max_val = float("0.0647326") + mean = float("0.0353335") + std = float("0.00848713") + data = None + + +class Program_weight_tensor_parameter_147: + name = "parameter_147" + shape = [192] + dtype = "float32" + min_val = float("-0.842031") + max_val = float("0.288259") + mean = float("-0.0809481") + std = float("0.135503") + data = None + + +class Program_weight_tensor_parameter_148: + name = "parameter_148" + shape = [192, 192, 3, 3] + dtype = "float32" + min_val = float("-0.0647608") + max_val = float("0.0671244") + mean = float("-0.000301379") + std = float("0.00415559") + data = None + + +class Program_weight_tensor_parameter_149: + name = "parameter_149" + shape = [192] + dtype = "float32" + min_val = float("-1.21773") + max_val = float("0.49966") + mean = float("-0.167333") + std = float("0.293611") + data = None + + +class Program_weight_tensor_parameter_150: + name = "parameter_150" + shape = [192] + dtype = "float32" + min_val = float("0.00864435") + max_val = float("1.53701") + mean = float("0.238131") + std = float("0.21185") + data = None + + +class Program_weight_tensor_parameter_151: + name = "parameter_151" + shape = [192] + dtype = "float32" + min_val = float("2.34858e-05") + max_val = float("0.00710491") + mean = float("0.000531262") + std = float("0.00068873") + data = None + + +class Program_weight_tensor_parameter_152: + name = "parameter_152" + shape = [192] + dtype = "float32" + min_val = float("-0.0691024") + max_val = float("0.101541") + mean = float("0.0105168") + std = float("0.0186603") + data = None + + +class Program_weight_tensor_parameter_153: + name = "parameter_153" + shape = [192, 192, 1, 1] + dtype = "float32" + min_val = float("-0.0626678") + max_val = float("0.0382933") + mean = float("-0.000453582") + std = float("0.00413962") + data = None + + +class Program_weight_tensor_parameter_154: + name = "parameter_154" + shape = [192] + dtype = "float32" + min_val = float("-1.21774") + max_val = float("0.50078") + mean = float("-0.167049") + std = float("0.293829") + data = None + + +class Program_weight_tensor_parameter_155: + name = "parameter_155" + shape = [192] + dtype = "float32" + min_val = float("0.353208") + max_val = float("1.45018") + mean = float("0.756982") + std = float("0.216639") + data = None + + +class Program_weight_tensor_parameter_156: + name = "parameter_156" + shape = [192] + dtype = "float32" + min_val = float("0.00481832") + max_val = float("0.0211758") + mean = float("0.00953731") + std = float("0.00267146") + data = None + + +class Program_weight_tensor_parameter_157: + name = "parameter_157" + shape = [192] + dtype = "float32" + min_val = float("-0.103005") + max_val = float("0.150479") + mean = float("0.0568873") + std = float("0.0497249") + data = None + + +class Program_weight_tensor_parameter_158: + name = "parameter_158" + shape = [192, 192, 3, 3] + dtype = "float32" + min_val = float("-0.0712483") + max_val = float("0.0533123") + mean = float("-0.000260747") + std = float("0.00375359") + data = None + + +class Program_weight_tensor_parameter_159: + name = "parameter_159" + shape = [192] + dtype = "float32" + min_val = float("-1.87984") + max_val = float("-0.210289") + mean = float("-1.14605") + std = float("0.325945") + data = None + + +class Program_weight_tensor_parameter_160: + name = "parameter_160" + shape = [192] + dtype = "float32" + min_val = float("0.790161") + max_val = float("1.59635") + mean = float("1.12149") + std = float("0.129857") + data = None + + +class Program_weight_tensor_parameter_161: + name = "parameter_161" + shape = [192] + dtype = "float32" + min_val = float("0.0174547") + max_val = float("0.0659133") + mean = float("0.031237") + std = float("0.00884456") + data = None + + +class Program_weight_tensor_parameter_162: + name = "parameter_162" + shape = [192] + dtype = "float32" + min_val = float("-0.857208") + max_val = float("0.269781") + mean = float("-0.0676028") + std = float("0.134013") + data = None + + +class Program_weight_tensor_parameter_163: + name = "parameter_163" + shape = [192, 192, 3, 3] + dtype = "float32" + min_val = float("-0.0680887") + max_val = float("0.0796042") + mean = float("-0.000244907") + std = float("0.0040245") + data = None + + +class Program_weight_tensor_parameter_164: + name = "parameter_164" + shape = [192] + dtype = "float32" + min_val = float("-2.86208") + max_val = float("1.58104") + mean = float("-0.027572") + std = float("0.747892") + data = None + + +class Program_weight_tensor_parameter_165: + name = "parameter_165" + shape = [192] + dtype = "float32" + min_val = float("0.490153") + max_val = float("2.07789") + mean = float("0.900423") + std = float("0.231981") + data = None + + +class Program_weight_tensor_parameter_166: + name = "parameter_166" + shape = [192] + dtype = "float32" + min_val = float("0.012085") + max_val = float("0.0729411") + mean = float("0.0254063") + std = float("0.00999328") + data = None + + +class Program_weight_tensor_parameter_167: + name = "parameter_167" + shape = [192] + dtype = "float32" + min_val = float("-0.232877") + max_val = float("0.322739") + mean = float("-0.043425") + std = float("0.0608633") + data = None + + +class Program_weight_tensor_parameter_168: + name = "parameter_168" + shape = [192, 384, 1, 1] + dtype = "float32" + min_val = float("-0.112904") + max_val = float("0.101906") + mean = float("-0.000605477") + std = float("0.00869645") + data = None + + +class Program_weight_tensor_parameter_169: + name = "parameter_169" + shape = [192] + dtype = "float32" + min_val = float("-2.96795") + max_val = float("1.66848") + mean = float("0.0967615") + std = float("0.663297") + data = None + + +class Program_weight_tensor_parameter_170: + name = "parameter_170" + shape = [192] + dtype = "float32" + min_val = float("0.830405") + max_val = float("5.55794") + mean = float("1.91324") + std = float("0.933276") + data = None + + +class Program_weight_tensor_parameter_171: + name = "parameter_171" + shape = [192] + dtype = "float32" + min_val = float("0.00638727") + max_val = float("0.0461032") + mean = float("0.0175233") + std = float("0.00555475") + data = None + + +class Program_weight_tensor_parameter_172: + name = "parameter_172" + shape = [192] + dtype = "float32" + min_val = float("-0.14477") + max_val = float("0.154899") + mean = float("-0.0220724") + std = float("0.0559826") + data = None + + +class Program_weight_tensor_parameter_173: + name = "parameter_173" + shape = [192, 384, 1, 1] + dtype = "float32" + min_val = float("-0.100414") + max_val = float("0.0965722") + mean = float("-0.000481739") + std = float("0.00788359") + data = None + + +class Program_weight_tensor_parameter_174: + name = "parameter_174" + shape = [384] + dtype = "float32" + min_val = float("-2.9234") + max_val = float("1.32689") + mean = float("-0.300856") + std = float("0.563737") + data = None + + +class Program_weight_tensor_parameter_175: + name = "parameter_175" + shape = [384] + dtype = "float32" + min_val = float("0.633896") + max_val = float("2.47246") + mean = float("1.15988") + std = float("0.257349") + data = None + + +class Program_weight_tensor_parameter_176: + name = "parameter_176" + shape = [384] + dtype = "float32" + min_val = float("0.0120681") + max_val = float("0.111573") + mean = float("0.027173") + std = float("0.0132211") + data = None + + +class Program_weight_tensor_parameter_177: + name = "parameter_177" + shape = [384] + dtype = "float32" + min_val = float("-0.269578") + max_val = float("0.241792") + mean = float("0.0299257") + std = float("0.0746028") + data = None + + +class Program_weight_tensor_parameter_178: + name = "parameter_178" + shape = [384, 256, 3, 3] + dtype = "float32" + min_val = float("-0.0777711") + max_val = float("0.0733026") + mean = float("-9.30129e-05") + std = float("0.00423326") + data = None + + +class Program_weight_tensor_parameter_179: + name = "parameter_179" + shape = [256] + dtype = "float32" + min_val = float("-2.04675") + max_val = float("1.2869") + mean = float("-0.92413") + std = float("0.542635") + data = None + + +class Program_weight_tensor_parameter_180: + name = "parameter_180" + shape = [256] + dtype = "float32" + min_val = float("0.509654") + max_val = float("1.69024") + mean = float("1.05364") + std = float("0.177449") + data = None + + +class Program_weight_tensor_parameter_181: + name = "parameter_181" + shape = [256] + dtype = "float32" + min_val = float("0.0016847") + max_val = float("0.0202013") + mean = float("0.00552268") + std = float("0.00242365") + data = None + + +class Program_weight_tensor_parameter_182: + name = "parameter_182" + shape = [256] + dtype = "float32" + min_val = float("-0.247824") + max_val = float("0.180174") + mean = float("-0.0483161") + std = float("0.064182") + data = None + + +class Program_weight_tensor_parameter_183: + name = "parameter_183" + shape = [256, 192, 1, 1] + dtype = "float32" + min_val = float("-0.211445") + max_val = float("0.154025") + mean = float("-0.00090718") + std = float("0.0139364") + data = None + + +class Program_weight_tensor_parameter_184: + name = "parameter_184" + shape = [192] + dtype = "float32" + min_val = float("-0.0146056") + max_val = float("0.00252242") + mean = float("-0.00513018") + std = float("0.00389486") + data = None + + +class Program_weight_tensor_parameter_185: + name = "parameter_185" + shape = [192, 192, 1, 1] + dtype = "float32" + min_val = float("-0.340895") + max_val = float("0.243469") + mean = float("-0.00395929") + std = float("0.0107136") + data = None + + +class Program_weight_tensor_parameter_186: + name = "parameter_186" + shape = [96] + dtype = "float32" + min_val = float("-1.9141") + max_val = float("0.53448") + mean = float("-0.208812") + std = float("0.434585") + data = None + + +class Program_weight_tensor_parameter_187: + name = "parameter_187" + shape = [96] + dtype = "float32" + min_val = float("0.139627") + max_val = float("3.23019") + mean = float("0.63562") + std = float("0.668608") + data = None + + +class Program_weight_tensor_parameter_188: + name = "parameter_188" + shape = [96] + dtype = "float32" + min_val = float("9.81546e-05") + max_val = float("0.00262635") + mean = float("0.000631594") + std = float("0.000470416") + data = None + + +class Program_weight_tensor_parameter_189: + name = "parameter_189" + shape = [96] + dtype = "float32" + min_val = float("-0.0508496") + max_val = float("0.0645139") + mean = float("0.0073241") + std = float("0.0226978") + data = None + + +class Program_weight_tensor_parameter_190: + name = "parameter_190" + shape = [96, 96, 1, 1] + dtype = "float32" + min_val = float("-0.0529209") + max_val = float("0.0938109") + mean = float("-0.00068654") + std = float("0.00780134") + data = None + + +class Program_weight_tensor_parameter_191: + name = "parameter_191" + shape = [96] + dtype = "float32" + min_val = float("-1.91385") + max_val = float("0.535947") + mean = float("-0.208472") + std = float("0.434758") + data = None + + +class Program_weight_tensor_parameter_192: + name = "parameter_192" + shape = [96] + dtype = "float32" + min_val = float("0.343945") + max_val = float("5.46861") + mean = float("1.08565") + std = float("0.883653") + data = None + + +class Program_weight_tensor_parameter_193: + name = "parameter_193" + shape = [96] + dtype = "float32" + min_val = float("0.000857905") + max_val = float("0.0144521") + mean = float("0.00502113") + std = float("0.0025215") + data = None + + +class Program_weight_tensor_parameter_194: + name = "parameter_194" + shape = [96] + dtype = "float32" + min_val = float("-0.134633") + max_val = float("0.206261") + mean = float("0.0108598") + std = float("0.0610727") + data = None + + +class Program_weight_tensor_parameter_195: + name = "parameter_195" + shape = [96, 96, 3, 3] + dtype = "float32" + min_val = float("-0.0417476") + max_val = float("0.0707409") + mean = float("-0.000200496") + std = float("0.00586268") + data = None + + +class Program_weight_tensor_parameter_196: + name = "parameter_196" + shape = [96] + dtype = "float32" + min_val = float("-2.46669") + max_val = float("-0.0188941") + mean = float("-1.22596") + std = float("0.444206") + data = None + + +class Program_weight_tensor_parameter_197: + name = "parameter_197" + shape = [96] + dtype = "float32" + min_val = float("0.540095") + max_val = float("1.63859") + mean = float("0.945542") + std = float("0.172479") + data = None + + +class Program_weight_tensor_parameter_198: + name = "parameter_198" + shape = [96] + dtype = "float32" + min_val = float("0.0347183") + max_val = float("0.227627") + mean = float("0.082417") + std = float("0.0336491") + data = None + + +class Program_weight_tensor_parameter_199: + name = "parameter_199" + shape = [96] + dtype = "float32" + min_val = float("-2.59922") + max_val = float("2.15076") + mean = float("-0.188655") + std = float("0.479579") + data = None + + +class Program_weight_tensor_parameter_200: + name = "parameter_200" + shape = [96, 96, 3, 3] + dtype = "float32" + min_val = float("-0.159603") + max_val = float("0.105542") + mean = float("-0.000422661") + std = float("0.00713371") + data = None + + +class Program_weight_tensor_parameter_201: + name = "parameter_201" + shape = [96] + dtype = "float32" + min_val = float("-1.38744") + max_val = float("0.563004") + mean = float("-0.132441") + std = float("0.347447") + data = None + + +class Program_weight_tensor_parameter_202: + name = "parameter_202" + shape = [96] + dtype = "float32" + min_val = float("0.0452771") + max_val = float("1.86502") + mean = float("0.460871") + std = float("0.366358") + data = None + + +class Program_weight_tensor_parameter_203: + name = "parameter_203" + shape = [96] + dtype = "float32" + min_val = float("7.20046e-05") + max_val = float("0.00271049") + mean = float("0.000780889") + std = float("0.000618051") + data = None + + +class Program_weight_tensor_parameter_204: + name = "parameter_204" + shape = [96] + dtype = "float32" + min_val = float("-0.0499407") + max_val = float("0.0480118") + mean = float("0.00767865") + std = float("0.0176588") + data = None + + +class Program_weight_tensor_parameter_205: + name = "parameter_205" + shape = [96, 96, 1, 1] + dtype = "float32" + min_val = float("-0.0484855") + max_val = float("0.0469527") + mean = float("-0.000557248") + std = float("0.00696514") + data = None + + +class Program_weight_tensor_parameter_206: + name = "parameter_206" + shape = [96] + dtype = "float32" + min_val = float("-1.38716") + max_val = float("0.565575") + mean = float("-0.131901") + std = float("0.347951") + data = None + + +class Program_weight_tensor_parameter_207: + name = "parameter_207" + shape = [96] + dtype = "float32" + min_val = float("0.373276") + max_val = float("2.32827") + mean = float("0.902354") + std = float("0.426303") + data = None + + +class Program_weight_tensor_parameter_208: + name = "parameter_208" + shape = [96] + dtype = "float32" + min_val = float("0.00300443") + max_val = float("0.0229962") + mean = float("0.00858887") + std = float("0.00415652") + data = None + + +class Program_weight_tensor_parameter_209: + name = "parameter_209" + shape = [96] + dtype = "float32" + min_val = float("-0.106265") + max_val = float("0.119063") + mean = float("0.0359685") + std = float("0.0431121") + data = None + + +class Program_weight_tensor_parameter_210: + name = "parameter_210" + shape = [96, 96, 3, 3] + dtype = "float32" + min_val = float("-0.0601192") + max_val = float("0.0479345") + mean = float("-0.000334461") + std = float("0.00588243") + data = None + + +class Program_weight_tensor_parameter_211: + name = "parameter_211" + shape = [96] + dtype = "float32" + min_val = float("-3.32059") + max_val = float("0.366033") + mean = float("-1.1777") + std = float("0.556588") + data = None + + +class Program_weight_tensor_parameter_212: + name = "parameter_212" + shape = [96] + dtype = "float32" + min_val = float("0.470758") + max_val = float("1.9813") + mean = float("1.03925") + std = float("0.238611") + data = None + + +class Program_weight_tensor_parameter_213: + name = "parameter_213" + shape = [96] + dtype = "float32" + min_val = float("0.0279332") + max_val = float("0.176668") + mean = float("0.0504175") + std = float("0.0177105") + data = None + + +class Program_weight_tensor_parameter_214: + name = "parameter_214" + shape = [96] + dtype = "float32" + min_val = float("-1.05972") + max_val = float("0.787961") + mean = float("-0.0421876") + std = float("0.278962") + data = None + + +class Program_weight_tensor_parameter_215: + name = "parameter_215" + shape = [96, 96, 3, 3] + dtype = "float32" + min_val = float("-0.152735") + max_val = float("0.158912") + mean = float("-0.000426001") + std = float("0.00705743") + data = None + + +class Program_weight_tensor_parameter_216: + name = "parameter_216" + shape = [96] + dtype = "float32" + min_val = float("-1.24949") + max_val = float("0.583942") + mean = float("-0.109112") + std = float("0.292117") + data = None + + +class Program_weight_tensor_parameter_217: + name = "parameter_217" + shape = [96] + dtype = "float32" + min_val = float("0.0224878") + max_val = float("1.27796") + mean = float("0.324443") + std = float("0.192946") + data = None + + +class Program_weight_tensor_parameter_218: + name = "parameter_218" + shape = [96] + dtype = "float32" + min_val = float("2.48592e-05") + max_val = float("0.00308798") + mean = float("0.000656812") + std = float("0.000490412") + data = None + + +class Program_weight_tensor_parameter_219: + name = "parameter_219" + shape = [96] + dtype = "float32" + min_val = float("-0.0398012") + max_val = float("0.0538955") + mean = float("0.00423704") + std = float("0.0172967") + data = None + + +class Program_weight_tensor_parameter_220: + name = "parameter_220" + shape = [96, 96, 1, 1] + dtype = "float32" + min_val = float("-0.0406747") + max_val = float("0.0494878") + mean = float("-0.000325615") + std = float("0.0071059") + data = None + + +class Program_weight_tensor_parameter_221: + name = "parameter_221" + shape = [96] + dtype = "float32" + min_val = float("-1.24929") + max_val = float("0.586311") + mean = float("-0.108658") + std = float("0.29268") + data = None + + +class Program_weight_tensor_parameter_222: + name = "parameter_222" + shape = [96] + dtype = "float32" + min_val = float("0.311326") + max_val = float("1.67043") + mean = float("0.747441") + std = float("0.257878") + data = None + + +class Program_weight_tensor_parameter_223: + name = "parameter_223" + shape = [96] + dtype = "float32" + min_val = float("0.00302674") + max_val = float("0.0184666") + mean = float("0.00857766") + std = float("0.0033254") + data = None + + +class Program_weight_tensor_parameter_224: + name = "parameter_224" + shape = [96] + dtype = "float32" + min_val = float("-0.105385") + max_val = float("0.147591") + mean = float("0.0293962") + std = float("0.0383478") + data = None + + +class Program_weight_tensor_parameter_225: + name = "parameter_225" + shape = [96, 96, 3, 3] + dtype = "float32" + min_val = float("-0.0728298") + max_val = float("0.065903") + mean = float("-0.000300919") + std = float("0.00597289") + data = None + + +class Program_weight_tensor_parameter_226: + name = "parameter_226" + shape = [96] + dtype = "float32" + min_val = float("-3.5826") + max_val = float("0.291706") + mean = float("-1.12744") + std = float("0.572685") + data = None + + +class Program_weight_tensor_parameter_227: + name = "parameter_227" + shape = [96] + dtype = "float32" + min_val = float("0.511064") + max_val = float("2.19222") + mean = float("1.05217") + std = float("0.238287") + data = None + + +class Program_weight_tensor_parameter_228: + name = "parameter_228" + shape = [96] + dtype = "float32" + min_val = float("0.021508") + max_val = float("0.0772456") + mean = float("0.0390789") + std = float("0.00924531") + data = None + + +class Program_weight_tensor_parameter_229: + name = "parameter_229" + shape = [96] + dtype = "float32" + min_val = float("-0.95569") + max_val = float("0.64461") + mean = float("-0.0425366") + std = float("0.216225") + data = None + + +class Program_weight_tensor_parameter_230: + name = "parameter_230" + shape = [96, 96, 3, 3] + dtype = "float32" + min_val = float("-0.0984925") + max_val = float("0.137263") + mean = float("-0.000483231") + std = float("0.00714155") + data = None + + +class Program_weight_tensor_parameter_231: + name = "parameter_231" + shape = [96] + dtype = "float32" + min_val = float("-0.891765") + max_val = float("0.530315") + mean = float("-0.160042") + std = float("0.28168") + data = None + + +class Program_weight_tensor_parameter_232: + name = "parameter_232" + shape = [96] + dtype = "float32" + min_val = float("0.0202036") + max_val = float("1.40549") + mean = float("0.324747") + std = float("0.213549") + data = None + + +class Program_weight_tensor_parameter_233: + name = "parameter_233" + shape = [96] + dtype = "float32" + min_val = float("5.1999e-05") + max_val = float("0.00308025") + mean = float("0.000681748") + std = float("0.000468256") + data = None + + +class Program_weight_tensor_parameter_234: + name = "parameter_234" + shape = [96] + dtype = "float32" + min_val = float("-0.0356116") + max_val = float("0.0543912") + mean = float("0.00763867") + std = float("0.0160098") + data = None + + +class Program_weight_tensor_parameter_235: + name = "parameter_235" + shape = [96, 96, 1, 1] + dtype = "float32" + min_val = float("-0.050403") + max_val = float("0.0470333") + mean = float("-0.000602859") + std = float("0.00719125") + data = None + + +class Program_weight_tensor_parameter_236: + name = "parameter_236" + shape = [96] + dtype = "float32" + min_val = float("-0.891522") + max_val = float("0.532005") + mean = float("-0.15962") + std = float("0.282144") + data = None + + +class Program_weight_tensor_parameter_237: + name = "parameter_237" + shape = [96] + dtype = "float32" + min_val = float("0.170998") + max_val = float("1.78064") + mean = float("0.708933") + std = float("0.284476") + data = None + + +class Program_weight_tensor_parameter_238: + name = "parameter_238" + shape = [96] + dtype = "float32" + min_val = float("0.00181135") + max_val = float("0.0235388") + mean = float("0.00884351") + std = float("0.00329263") + data = None + + +class Program_weight_tensor_parameter_239: + name = "parameter_239" + shape = [96] + dtype = "float32" + min_val = float("-0.0317818") + max_val = float("0.148669") + mean = float("0.0443214") + std = float("0.0385248") + data = None + + +class Program_weight_tensor_parameter_240: + name = "parameter_240" + shape = [96, 96, 3, 3] + dtype = "float32" + min_val = float("-0.0673552") + max_val = float("0.0665555") + mean = float("-0.000406403") + std = float("0.00600122") + data = None + + +class Program_weight_tensor_parameter_241: + name = "parameter_241" + shape = [96] + dtype = "float32" + min_val = float("-2.65797") + max_val = float("0.0644665") + mean = float("-1.06329") + std = float("0.488575") + data = None + + +class Program_weight_tensor_parameter_242: + name = "parameter_242" + shape = [96] + dtype = "float32" + min_val = float("0.510122") + max_val = float("1.73722") + mean = float("1.01545") + std = float("0.193669") + data = None + + +class Program_weight_tensor_parameter_243: + name = "parameter_243" + shape = [96] + dtype = "float32" + min_val = float("0.0172563") + max_val = float("0.0595435") + mean = float("0.0301509") + std = float("0.00732214") + data = None + + +class Program_weight_tensor_parameter_244: + name = "parameter_244" + shape = [96] + dtype = "float32" + min_val = float("-0.801324") + max_val = float("0.759004") + mean = float("-0.0646748") + std = float("0.211257") + data = None + + +class Program_weight_tensor_parameter_245: + name = "parameter_245" + shape = [96, 96, 3, 3] + dtype = "float32" + min_val = float("-0.0799583") + max_val = float("0.12863") + mean = float("-0.000463251") + std = float("0.00696947") + data = None + + +class Program_weight_tensor_parameter_246: + name = "parameter_246" + shape = [96] + dtype = "float32" + min_val = float("-0.979363") + max_val = float("0.488329") + mean = float("-0.1357") + std = float("0.278693") + data = None + + +class Program_weight_tensor_parameter_247: + name = "parameter_247" + shape = [96] + dtype = "float32" + min_val = float("0.0499672") + max_val = float("1.15174") + mean = float("0.296075") + std = float("0.172795") + data = None + + +class Program_weight_tensor_parameter_248: + name = "parameter_248" + shape = [96] + dtype = "float32" + min_val = float("0.000124111") + max_val = float("0.00434228") + mean = float("0.00108239") + std = float("0.000694533") + data = None + + +class Program_weight_tensor_parameter_249: + name = "parameter_249" + shape = [96] + dtype = "float32" + min_val = float("-0.0430023") + max_val = float("0.0614512") + mean = float("0.00682349") + std = float("0.019208") + data = None + + +class Program_weight_tensor_parameter_250: + name = "parameter_250" + shape = [96, 96, 1, 1] + dtype = "float32" + min_val = float("-0.0730409") + max_val = float("0.0734237") + mean = float("-0.000668194") + std = float("0.00816827") + data = None + + +class Program_weight_tensor_parameter_251: + name = "parameter_251" + shape = [96] + dtype = "float32" + min_val = float("-0.979598") + max_val = float("0.490087") + mean = float("-0.135308") + std = float("0.279185") + data = None + + +class Program_weight_tensor_parameter_252: + name = "parameter_252" + shape = [96] + dtype = "float32" + min_val = float("0.240111") + max_val = float("1.69891") + mean = float("0.604647") + std = float("0.228294") + data = None + + +class Program_weight_tensor_parameter_253: + name = "parameter_253" + shape = [96] + dtype = "float32" + min_val = float("0.00464956") + max_val = float("0.0447737") + mean = float("0.0124628") + std = float("0.00526411") + data = None + + +class Program_weight_tensor_parameter_254: + name = "parameter_254" + shape = [96] + dtype = "float32" + min_val = float("-0.088988") + max_val = float("0.163347") + mean = float("0.0332765") + std = float("0.0457333") + data = None + + +class Program_weight_tensor_parameter_255: + name = "parameter_255" + shape = [96, 96, 3, 3] + dtype = "float32" + min_val = float("-0.070586") + max_val = float("0.053917") + mean = float("-0.000353734") + std = float("0.00603503") + data = None + + +class Program_weight_tensor_parameter_256: + name = "parameter_256" + shape = [96] + dtype = "float32" + min_val = float("-3.46749") + max_val = float("0.20134") + mean = float("-1.00429") + std = float("0.548683") + data = None + + +class Program_weight_tensor_parameter_257: + name = "parameter_257" + shape = [96] + dtype = "float32" + min_val = float("0.68469") + max_val = float("2.50521") + mean = float("1.07421") + std = float("0.212064") + data = None + + +class Program_weight_tensor_parameter_258: + name = "parameter_258" + shape = [96] + dtype = "float32" + min_val = float("0.0128335") + max_val = float("0.0562505") + mean = float("0.0252273") + std = float("0.00835494") + data = None + + +class Program_weight_tensor_parameter_259: + name = "parameter_259" + shape = [96] + dtype = "float32" + min_val = float("-0.594873") + max_val = float("0.694291") + mean = float("-0.0599848") + std = float("0.200504") + data = None + + +class Program_weight_tensor_parameter_260: + name = "parameter_260" + shape = [96, 96, 3, 3] + dtype = "float32" + min_val = float("-0.0875016") + max_val = float("0.0958638") + mean = float("-0.000393602") + std = float("0.00713622") + data = None + + +class Program_weight_tensor_parameter_261: + name = "parameter_261" + shape = [96] + dtype = "float32" + min_val = float("-0.623249") + max_val = float("0.450355") + mean = float("-0.0811173") + std = float("0.25665") + data = None + + +class Program_weight_tensor_parameter_262: + name = "parameter_262" + shape = [96] + dtype = "float32" + min_val = float("0.0905173") + max_val = float("1.30172") + mean = float("0.309137") + std = float("0.196898") + data = None + + +class Program_weight_tensor_parameter_263: + name = "parameter_263" + shape = [96] + dtype = "float32" + min_val = float("0.000482307") + max_val = float("0.0212544") + mean = float("0.00391036") + std = float("0.00335167") + data = None + + +class Program_weight_tensor_parameter_264: + name = "parameter_264" + shape = [96] + dtype = "float32" + min_val = float("-0.0380137") + max_val = float("0.0274317") + mean = float("0.000597392") + std = float("0.0117867") + data = None + + +class Program_weight_tensor_parameter_265: + name = "parameter_265" + shape = [96, 96, 1, 1] + dtype = "float32" + min_val = float("-0.0967686") + max_val = float("0.0726096") + mean = float("-0.00111676") + std = float("0.00943776") + data = None + + +class Program_weight_tensor_parameter_266: + name = "parameter_266" + shape = [96] + dtype = "float32" + min_val = float("-0.62253") + max_val = float("0.451504") + mean = float("-0.0806935") + std = float("0.256953") + data = None + + +class Program_weight_tensor_parameter_267: + name = "parameter_267" + shape = [96] + dtype = "float32" + min_val = float("0.210918") + max_val = float("1.42997") + mean = float("0.527932") + std = float("0.258611") + data = None + + +class Program_weight_tensor_parameter_268: + name = "parameter_268" + shape = [96] + dtype = "float32" + min_val = float("0.0108854") + max_val = float("0.101724") + mean = float("0.0340185") + std = float("0.0173202") + data = None + + +class Program_weight_tensor_parameter_269: + name = "parameter_269" + shape = [96] + dtype = "float32" + min_val = float("-0.10483") + max_val = float("0.0991255") + mean = float("-0.00462957") + std = float("0.0392523") + data = None + + +class Program_weight_tensor_parameter_270: + name = "parameter_270" + shape = [96, 96, 3, 3] + dtype = "float32" + min_val = float("-0.0996365") + max_val = float("0.0540305") + mean = float("-0.00042977") + std = float("0.00592197") + data = None + + +class Program_weight_tensor_parameter_271: + name = "parameter_271" + shape = [96] + dtype = "float32" + min_val = float("-2.4099") + max_val = float("0.510062") + mean = float("-0.827896") + std = float("0.467957") + data = None + + +class Program_weight_tensor_parameter_272: + name = "parameter_272" + shape = [96] + dtype = "float32" + min_val = float("0.855439") + max_val = float("2.18052") + mean = float("1.27541") + std = float("0.20896") + data = None + + +class Program_weight_tensor_parameter_273: + name = "parameter_273" + shape = [96] + dtype = "float32" + min_val = float("0.0103972") + max_val = float("0.0527158") + mean = float("0.0209256") + std = float("0.00862648") + data = None + + +class Program_weight_tensor_parameter_274: + name = "parameter_274" + shape = [96] + dtype = "float32" + min_val = float("-0.780321") + max_val = float("0.470817") + mean = float("-0.061274") + std = float("0.196346") + data = None + + +class Program_weight_tensor_parameter_275: + name = "parameter_275" + shape = [96, 96, 3, 3] + dtype = "float32" + min_val = float("-0.154701") + max_val = float("0.153806") + mean = float("-0.00026052") + std = float("0.00735431") + data = None + + +class Program_weight_tensor_parameter_276: + name = "parameter_276" + shape = [96] + dtype = "float32" + min_val = float("-3.15956") + max_val = float("1.89061") + mean = float("0.502181") + std = float("0.861277") + data = None + + +class Program_weight_tensor_parameter_277: + name = "parameter_277" + shape = [96] + dtype = "float32" + min_val = float("0.209789") + max_val = float("2.62802") + mean = float("0.557131") + std = float("0.318659") + data = None + + +class Program_weight_tensor_parameter_278: + name = "parameter_278" + shape = [96] + dtype = "float32" + min_val = float("0.00944476") + max_val = float("0.145226") + mean = float("0.0342646") + std = float("0.0234271") + data = None + + +class Program_weight_tensor_parameter_279: + name = "parameter_279" + shape = [96] + dtype = "float32" + min_val = float("-0.271688") + max_val = float("0.303077") + mean = float("-0.0264941") + std = float("0.0868152") + data = None + + +class Program_weight_tensor_parameter_280: + name = "parameter_280" + shape = [96, 192, 1, 1] + dtype = "float32" + min_val = float("-0.190092") + max_val = float("0.235795") + mean = float("-0.00054682") + std = float("0.0152601") + data = None + + +class Program_weight_tensor_parameter_281: + name = "parameter_281" + shape = [96] + dtype = "float32" + min_val = float("-4.92412") + max_val = float("1.57941") + mean = float("0.384226") + std = float("1.04886") + data = None + + +class Program_weight_tensor_parameter_282: + name = "parameter_282" + shape = [96] + dtype = "float32" + min_val = float("0.411425") + max_val = float("6.77791") + mean = float("1.69479") + std = float("1.30749") + data = None + + +class Program_weight_tensor_parameter_283: + name = "parameter_283" + shape = [96] + dtype = "float32" + min_val = float("0.0059326") + max_val = float("0.187703") + mean = float("0.0313027") + std = float("0.0270184") + data = None + + +class Program_weight_tensor_parameter_284: + name = "parameter_284" + shape = [96] + dtype = "float32" + min_val = float("-0.122136") + max_val = float("0.395194") + mean = float("0.0355431") + std = float("0.0933339") + data = None + + +class Program_weight_tensor_parameter_285: + name = "parameter_285" + shape = [96, 192, 1, 1] + dtype = "float32" + min_val = float("-0.115428") + max_val = float("0.143096") + mean = float("0.000288353") + std = float("0.0138526") + data = None + + +class Program_weight_tensor_parameter_286: + name = "parameter_286" + shape = [192] + dtype = "float32" + min_val = float("-2.27512") + max_val = float("1.75006") + mean = float("-0.125702") + std = float("0.740468") + data = None + + +class Program_weight_tensor_parameter_287: + name = "parameter_287" + shape = [192] + dtype = "float32" + min_val = float("0.632726") + max_val = float("2.96908") + mean = float("1.08749") + std = float("0.283555") + data = None + + +class Program_weight_tensor_parameter_288: + name = "parameter_288" + shape = [192] + dtype = "float32" + min_val = float("0.0130979") + max_val = float("0.31876") + mean = float("0.04291") + std = float("0.035214") + data = None + + +class Program_weight_tensor_parameter_289: + name = "parameter_289" + shape = [192] + dtype = "float32" + min_val = float("-0.47354") + max_val = float("0.278468") + mean = float("-0.0584653") + std = float("0.115063") + data = None + + +class Program_weight_tensor_parameter_290: + name = "parameter_290" + shape = [192, 128, 3, 3] + dtype = "float32" + min_val = float("-0.0811233") + max_val = float("0.11238") + mean = float("-0.000121273") + std = float("0.00716338") + data = None + + +class Program_weight_tensor_parameter_291: + name = "parameter_291" + shape = [128] + dtype = "float32" + min_val = float("-2.81253") + max_val = float("1.96258") + mean = float("-0.709313") + std = float("0.64886") + data = None + + +class Program_weight_tensor_parameter_292: + name = "parameter_292" + shape = [128] + dtype = "float32" + min_val = float("0.302011") + max_val = float("2.86022") + mean = float("1.01859") + std = float("0.279425") + data = None + + +class Program_weight_tensor_parameter_293: + name = "parameter_293" + shape = [128] + dtype = "float32" + min_val = float("0.000689708") + max_val = float("0.0143167") + mean = float("0.00379586") + std = float("0.00196197") + data = None + + +class Program_weight_tensor_parameter_294: + name = "parameter_294" + shape = [128] + dtype = "float32" + min_val = float("-0.240616") + max_val = float("0.230863") + mean = float("0.00348518") + std = float("0.0801109") + data = None + + +class Program_weight_tensor_parameter_295: + name = "parameter_295" + shape = [128, 96, 1, 1] + dtype = "float32" + min_val = float("-0.16828") + max_val = float("0.191318") + mean = float("-0.00143145") + std = float("0.0216253") + data = None + + +class Program_weight_tensor_parameter_296: + name = "parameter_296" + shape = [96] + dtype = "float32" + min_val = float("-0.0182017") + max_val = float("-0.00100735") + mean = float("-0.00761377") + std = float("0.00459165") + data = None + + +class Program_weight_tensor_parameter_297: + name = "parameter_297" + shape = [96, 96, 1, 1] + dtype = "float32" + min_val = float("-0.297058") + max_val = float("0.124247") + mean = float("-0.00811798") + std = float("0.0180434") + data = None + + +class Program_weight_tensor_parameter_298: + name = "parameter_298" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_299: + name = "parameter_299" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_300: + name = "parameter_300" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_301: + name = "parameter_301" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_302: + name = "parameter_302" + shape = [48, 48, 1, 1] + dtype = "float32" + min_val = float("-0.0524219") + max_val = float("0.062819") + mean = float("-0.00145834") + std = float("0.0124603") + data = None + + +class Program_weight_tensor_parameter_303: + name = "parameter_303" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_304: + name = "parameter_304" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_305: + name = "parameter_305" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_306: + name = "parameter_306" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_307: + name = "parameter_307" + shape = [48, 48, 3, 3] + dtype = "float32" + min_val = float("-0.053396") + max_val = float("0.0780475") + mean = float("-0.000432103") + std = float("0.0105215") + data = None + + +class Program_weight_tensor_parameter_308: + name = "parameter_308" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_309: + name = "parameter_309" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_310: + name = "parameter_310" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_311: + name = "parameter_311" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_312: + name = "parameter_312" + shape = [48, 48, 3, 3] + dtype = "float32" + min_val = float("-0.0907736") + max_val = float("0.0889891") + mean = float("-0.000674195") + std = float("0.0115766") + data = None + + +class Program_weight_tensor_parameter_313: + name = "parameter_313" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_314: + name = "parameter_314" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_315: + name = "parameter_315" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_316: + name = "parameter_316" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_317: + name = "parameter_317" + shape = [48, 48, 1, 1] + dtype = "float32" + min_val = float("-0.0701343") + max_val = float("0.0744403") + mean = float("-0.000969115") + std = float("0.0132523") + data = None + + +class Program_weight_tensor_parameter_318: + name = "parameter_318" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_319: + name = "parameter_319" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_320: + name = "parameter_320" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_321: + name = "parameter_321" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_322: + name = "parameter_322" + shape = [48, 48, 3, 3] + dtype = "float32" + min_val = float("-0.0625249") + max_val = float("0.0628193") + mean = float("-0.000704405") + std = float("0.010522") + data = None + + +class Program_weight_tensor_parameter_323: + name = "parameter_323" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_324: + name = "parameter_324" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_325: + name = "parameter_325" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_326: + name = "parameter_326" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_327: + name = "parameter_327" + shape = [48, 48, 3, 3] + dtype = "float32" + min_val = float("-0.105534") + max_val = float("0.0876318") + mean = float("-0.000291303") + std = float("0.0118198") + data = None + + +class Program_weight_tensor_parameter_328: + name = "parameter_328" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_329: + name = "parameter_329" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_330: + name = "parameter_330" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_331: + name = "parameter_331" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_332: + name = "parameter_332" + shape = [48, 48, 1, 1] + dtype = "float32" + min_val = float("-0.0927544") + max_val = float("0.067179") + mean = float("-0.00167319") + std = float("0.0164656") + data = None + + +class Program_weight_tensor_parameter_333: + name = "parameter_333" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_334: + name = "parameter_334" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_335: + name = "parameter_335" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_336: + name = "parameter_336" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_337: + name = "parameter_337" + shape = [48, 48, 3, 3] + dtype = "float32" + min_val = float("-0.0662936") + max_val = float("0.0926268") + mean = float("-0.000546134") + std = float("0.0110591") + data = None + + +class Program_weight_tensor_parameter_338: + name = "parameter_338" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_339: + name = "parameter_339" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_340: + name = "parameter_340" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_341: + name = "parameter_341" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_342: + name = "parameter_342" + shape = [48, 48, 3, 3] + dtype = "float32" + min_val = float("-0.115861") + max_val = float("0.0843934") + mean = float("-0.000390165") + std = float("0.0126271") + data = None + + +class Program_weight_tensor_parameter_343: + name = "parameter_343" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_344: + name = "parameter_344" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_345: + name = "parameter_345" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_346: + name = "parameter_346" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_347: + name = "parameter_347" + shape = [48, 96, 1, 1] + dtype = "float32" + min_val = float("-0.156722") + max_val = float("0.12438") + mean = float("-0.00240073") + std = float("0.0227151") + data = None + + +class Program_weight_tensor_parameter_348: + name = "parameter_348" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_349: + name = "parameter_349" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_350: + name = "parameter_350" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_351: + name = "parameter_351" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_352: + name = "parameter_352" + shape = [48, 96, 1, 1] + dtype = "float32" + min_val = float("-0.133366") + max_val = float("0.190723") + mean = float("-0.000461332") + std = float("0.0215494") + data = None + + +class Program_weight_tensor_parameter_353: + name = "parameter_353" + shape = [96] + dtype = "float32" + min_val = float("-3.40388") + max_val = float("3.27594") + mean = float("0.331") + std = float("1.14502") + data = None + + +class Program_weight_tensor_parameter_354: + name = "parameter_354" + shape = [96] + dtype = "float32" + min_val = float("0.861639") + max_val = float("4.91749") + mean = float("1.91516") + std = float("0.75496") + data = None + + +class Program_weight_tensor_parameter_355: + name = "parameter_355" + shape = [96] + dtype = "float32" + min_val = float("0.674644") + max_val = float("20.4484") + mean = float("2.3946") + std = float("2.42082") + data = None + + +class Program_weight_tensor_parameter_356: + name = "parameter_356" + shape = [96] + dtype = "float32" + min_val = float("-1.41455") + max_val = float("1.80091") + mean = float("-0.328594") + std = float("0.607956") + data = None + + +class Program_weight_tensor_parameter_357: + name = "parameter_357" + shape = [96, 64, 3, 3] + dtype = "float32" + min_val = float("-0.115845") + max_val = float("0.115419") + mean = float("-0.000438744") + std = float("0.0120833") + data = None + + +class Program_weight_tensor_parameter_358: + name = "parameter_358" + shape = [64] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_359: + name = "parameter_359" + shape = [64] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_360: + name = "parameter_360" + shape = [64] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_361: + name = "parameter_361" + shape = [64] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_362: + name = "parameter_362" + shape = [64, 32, 3, 3] + dtype = "float32" + min_val = float("-0.153743") + max_val = float("0.135272") + mean = float("-0.000740633") + std = float("0.0191711") + data = None + + +class Program_weight_tensor_parameter_363: + name = "parameter_363" + shape = [32] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_364: + name = "parameter_364" + shape = [32] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_365: + name = "parameter_365" + shape = [32] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_366: + name = "parameter_366" + shape = [32] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_367: + name = "parameter_367" + shape = [32, 32, 3, 3] + dtype = "float32" + min_val = float("-0.307002") + max_val = float("0.202588") + mean = float("-4.43961e-05") + std = float("0.025069") + data = None + + +class Program_weight_tensor_parameter_368: + name = "parameter_368" + shape = [32] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_369: + name = "parameter_369" + shape = [32] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_370: + name = "parameter_370" + shape = [32] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_371: + name = "parameter_371" + shape = [32] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_372: + name = "parameter_372" + shape = [32, 3, 3, 3] + dtype = "float32" + min_val = float("-0.297631") + max_val = float("0.278985") + mean = float("-0.00146872") + std = float("0.0683342") + data = None diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/subgraph_2/weight_meta.py b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/subgraph_2/weight_meta.py index 8b1378917..f5b3eb50d 100644 --- a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/subgraph_2/weight_meta.py +++ b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/subgraph_2/weight_meta.py @@ -1 +1,3989 @@ +class Program_weight_tensor_parameter_0: + name = "parameter_0" + shape = [1024] + dtype = "float32" + min_val = float("-3.7594") + max_val = float("-0.734446") + mean = float("-2.18722") + std = float("0.428724") + data = None + +class Program_weight_tensor_parameter_1: + name = "parameter_1" + shape = [1024] + dtype = "float32" + min_val = float("1.61913") + max_val = float("4.44136") + mean = float("3.08039") + std = float("0.25425") + data = None + + +class Program_weight_tensor_parameter_2: + name = "parameter_2" + shape = [1024] + dtype = "float32" + min_val = float("0.00437889") + max_val = float("0.0223173") + mean = float("0.00872826") + std = float("0.00173091") + data = None + + +class Program_weight_tensor_parameter_3: + name = "parameter_3" + shape = [1024] + dtype = "float32" + min_val = float("-0.140825") + max_val = float("0.123559") + mean = float("-0.0558655") + std = float("0.0304319") + data = None + + +class Program_weight_tensor_parameter_4: + name = "parameter_4" + shape = [1024, 768, 1, 1] + dtype = "float32" + min_val = float("-0.0427729") + max_val = float("0.0695573") + mean = float("-0.000391863") + std = float("0.00403905") + data = None + + +class Program_weight_tensor_parameter_5: + name = "parameter_5" + shape = [768] + dtype = "float32" + min_val = float("-0.014467") + max_val = float("0.00131875") + mean = float("-0.000761015") + std = float("0.00204153") + data = None + + +class Program_weight_tensor_parameter_6: + name = "parameter_6" + shape = [768, 768, 1, 1] + dtype = "float32" + min_val = float("-0.0787519") + max_val = float("0.135878") + mean = float("-0.000282851") + std = float("0.0016268") + data = None + + +class Program_weight_tensor_parameter_7: + name = "parameter_7" + shape = [384] + dtype = "float32" + min_val = float("-1.77402") + max_val = float("0.318654") + mean = float("-0.310798") + std = float("0.291236") + data = None + + +class Program_weight_tensor_parameter_8: + name = "parameter_8" + shape = [384] + dtype = "float32" + min_val = float("0.188523") + max_val = float("1.82125") + mean = float("0.609641") + std = float("0.262607") + data = None + + +class Program_weight_tensor_parameter_9: + name = "parameter_9" + shape = [384] + dtype = "float32" + min_val = float("5.23505e-05") + max_val = float("0.00107224") + mean = float("0.000231574") + std = float("0.000114041") + data = None + + +class Program_weight_tensor_parameter_10: + name = "parameter_10" + shape = [384] + dtype = "float32" + min_val = float("-0.0913202") + max_val = float("0.0738998") + mean = float("0.0208982") + std = float("0.0171143") + data = None + + +class Program_weight_tensor_parameter_11: + name = "parameter_11" + shape = [384, 384, 1, 1] + dtype = "float32" + min_val = float("-0.020214") + max_val = float("0.0255239") + mean = float("-0.000361046") + std = float("0.00271802") + data = None + + +class Program_weight_tensor_parameter_12: + name = "parameter_12" + shape = [384] + dtype = "float32" + min_val = float("-1.77402") + max_val = float("0.318949") + mean = float("-0.310739") + std = float("0.291254") + data = None + + +class Program_weight_tensor_parameter_13: + name = "parameter_13" + shape = [384] + dtype = "float32" + min_val = float("0.334653") + max_val = float("2.60511") + mean = float("1.02603") + std = float("0.290253") + data = None + + +class Program_weight_tensor_parameter_14: + name = "parameter_14" + shape = [384] + dtype = "float32" + min_val = float("0.000593004") + max_val = float("0.00596735") + mean = float("0.00204404") + std = float("0.00073718") + data = None + + +class Program_weight_tensor_parameter_15: + name = "parameter_15" + shape = [384] + dtype = "float32" + min_val = float("-0.227759") + max_val = float("0.112191") + mean = float("0.0216656") + std = float("0.0367499") + data = None + + +class Program_weight_tensor_parameter_16: + name = "parameter_16" + shape = [384, 384, 3, 3] + dtype = "float32" + min_val = float("-0.0190584") + max_val = float("0.0259183") + mean = float("-4.76047e-05") + std = float("0.0017617") + data = None + + +class Program_weight_tensor_parameter_17: + name = "parameter_17" + shape = [384] + dtype = "float32" + min_val = float("-2.58225") + max_val = float("0.0329867") + mean = float("-1.56843") + std = float("0.415962") + data = None + + +class Program_weight_tensor_parameter_18: + name = "parameter_18" + shape = [384] + dtype = "float32" + min_val = float("0.52002") + max_val = float("1.64429") + mean = float("1.13566") + std = float("0.149475") + data = None + + +class Program_weight_tensor_parameter_19: + name = "parameter_19" + shape = [384] + dtype = "float32" + min_val = float("0.0404637") + max_val = float("0.23") + mean = float("0.0856589") + std = float("0.0233439") + data = None + + +class Program_weight_tensor_parameter_20: + name = "parameter_20" + shape = [384] + dtype = "float32" + min_val = float("-0.9033") + max_val = float("0.385661") + mean = float("-0.25773") + std = float("0.123402") + data = None + + +class Program_weight_tensor_parameter_21: + name = "parameter_21" + shape = [384, 384, 3, 3] + dtype = "float32" + min_val = float("-0.0213207") + max_val = float("0.0602371") + mean = float("-0.000201951") + std = float("0.00231308") + data = None + + +class Program_weight_tensor_parameter_22: + name = "parameter_22" + shape = [384] + dtype = "float32" + min_val = float("-1.93927") + max_val = float("0.644474") + mean = float("-0.574884") + std = float("0.358671") + data = None + + +class Program_weight_tensor_parameter_23: + name = "parameter_23" + shape = [384] + dtype = "float32" + min_val = float("0.163873") + max_val = float("2.06585") + mean = float("0.562027") + std = float("0.227242") + data = None + + +class Program_weight_tensor_parameter_24: + name = "parameter_24" + shape = [384] + dtype = "float32" + min_val = float("7.65946e-05") + max_val = float("0.00146603") + mean = float("0.000260801") + std = float("0.000127115") + data = None + + +class Program_weight_tensor_parameter_25: + name = "parameter_25" + shape = [384] + dtype = "float32" + min_val = float("-0.047045") + max_val = float("0.0685866") + mean = float("0.0209886") + std = float("0.0147269") + data = None + + +class Program_weight_tensor_parameter_26: + name = "parameter_26" + shape = [384, 384, 1, 1] + dtype = "float32" + min_val = float("-0.0246209") + max_val = float("0.0323191") + mean = float("-0.00038074") + std = float("0.00249603") + data = None + + +class Program_weight_tensor_parameter_27: + name = "parameter_27" + shape = [384] + dtype = "float32" + min_val = float("-1.93932") + max_val = float("0.645257") + mean = float("-0.574812") + std = float("0.358742") + data = None + + +class Program_weight_tensor_parameter_28: + name = "parameter_28" + shape = [384] + dtype = "float32" + min_val = float("0.58315") + max_val = float("2.15642") + mean = float("1.08405") + std = float("0.255745") + data = None + + +class Program_weight_tensor_parameter_29: + name = "parameter_29" + shape = [384] + dtype = "float32" + min_val = float("0.0013599") + max_val = float("0.00896475") + mean = float("0.00289759") + std = float("0.000860853") + data = None + + +class Program_weight_tensor_parameter_30: + name = "parameter_30" + shape = [384] + dtype = "float32" + min_val = float("-0.0821017") + max_val = float("0.146645") + mean = float("0.0336192") + std = float("0.0396185") + data = None + + +class Program_weight_tensor_parameter_31: + name = "parameter_31" + shape = [384, 384, 3, 3] + dtype = "float32" + min_val = float("-0.017236") + max_val = float("0.0310435") + mean = float("-8.47071e-05") + std = float("0.00189556") + data = None + + +class Program_weight_tensor_parameter_32: + name = "parameter_32" + shape = [384] + dtype = "float32" + min_val = float("-2.39591") + max_val = float("0.845752") + mean = float("-1.40539") + std = float("0.360596") + data = None + + +class Program_weight_tensor_parameter_33: + name = "parameter_33" + shape = [384] + dtype = "float32" + min_val = float("0.453112") + max_val = float("1.91948") + mean = float("1.16636") + std = float("0.14802") + data = None + + +class Program_weight_tensor_parameter_34: + name = "parameter_34" + shape = [384] + dtype = "float32" + min_val = float("0.0300933") + max_val = float("0.138775") + mean = float("0.0607843") + std = float("0.0156745") + data = None + + +class Program_weight_tensor_parameter_35: + name = "parameter_35" + shape = [384] + dtype = "float32" + min_val = float("-0.749117") + max_val = float("0.836662") + mean = float("-0.184023") + std = float("0.110734") + data = None + + +class Program_weight_tensor_parameter_36: + name = "parameter_36" + shape = [384, 384, 3, 3] + dtype = "float32" + min_val = float("-0.0259567") + max_val = float("0.0450409") + mean = float("-0.000200361") + std = float("0.00234146") + data = None + + +class Program_weight_tensor_parameter_37: + name = "parameter_37" + shape = [384] + dtype = "float32" + min_val = float("-1.8762") + max_val = float("0.453243") + mean = float("-0.485339") + std = float("0.376467") + data = None + + +class Program_weight_tensor_parameter_38: + name = "parameter_38" + shape = [384] + dtype = "float32" + min_val = float("0.0773354") + max_val = float("2.11925") + mean = float("0.441956") + std = float("0.217663") + data = None + + +class Program_weight_tensor_parameter_39: + name = "parameter_39" + shape = [384] + dtype = "float32" + min_val = float("6.01092e-05") + max_val = float("0.00133181") + mean = float("0.000306226") + std = float("0.000149439") + data = None + + +class Program_weight_tensor_parameter_40: + name = "parameter_40" + shape = [384] + dtype = "float32" + min_val = float("-0.0475575") + max_val = float("0.071738") + mean = float("0.0252212") + std = float("0.0164931") + data = None + + +class Program_weight_tensor_parameter_41: + name = "parameter_41" + shape = [384, 384, 1, 1] + dtype = "float32" + min_val = float("-0.0207296") + max_val = float("0.0301957") + mean = float("-0.000479918") + std = float("0.0021441") + data = None + + +class Program_weight_tensor_parameter_42: + name = "parameter_42" + shape = [384] + dtype = "float32" + min_val = float("-1.87654") + max_val = float("0.453653") + mean = float("-0.485263") + std = float("0.376563") + data = None + + +class Program_weight_tensor_parameter_43: + name = "parameter_43" + shape = [384] + dtype = "float32" + min_val = float("0.521871") + max_val = float("2.22439") + mean = float("1.05289") + std = float("0.260102") + data = None + + +class Program_weight_tensor_parameter_44: + name = "parameter_44" + shape = [384] + dtype = "float32" + min_val = float("0.00177683") + max_val = float("0.00907934") + mean = float("0.0039468") + std = float("0.00118085") + data = None + + +class Program_weight_tensor_parameter_45: + name = "parameter_45" + shape = [384] + dtype = "float32" + min_val = float("-0.209845") + max_val = float("0.180608") + mean = float("0.0397265") + std = float("0.04484") + data = None + + +class Program_weight_tensor_parameter_46: + name = "parameter_46" + shape = [384, 384, 3, 3] + dtype = "float32" + min_val = float("-0.0177497") + max_val = float("0.036737") + mean = float("-9.16795e-05") + std = float("0.00200706") + data = None + + +class Program_weight_tensor_parameter_47: + name = "parameter_47" + shape = [384] + dtype = "float32" + min_val = float("-2.15635") + max_val = float("0.418177") + mean = float("-1.36712") + std = float("0.277468") + data = None + + +class Program_weight_tensor_parameter_48: + name = "parameter_48" + shape = [384] + dtype = "float32" + min_val = float("0.706134") + max_val = float("1.6357") + mean = float("1.14301") + std = float("0.101583") + data = None + + +class Program_weight_tensor_parameter_49: + name = "parameter_49" + shape = [384] + dtype = "float32" + min_val = float("0.0216198") + max_val = float("0.138164") + mean = float("0.046727") + std = float("0.0129664") + data = None + + +class Program_weight_tensor_parameter_50: + name = "parameter_50" + shape = [384] + dtype = "float32" + min_val = float("-0.694464") + max_val = float("0.208372") + mean = float("-0.129315") + std = float("0.0938846") + data = None + + +class Program_weight_tensor_parameter_51: + name = "parameter_51" + shape = [384, 384, 3, 3] + dtype = "float32" + min_val = float("-0.0274071") + max_val = float("0.0448565") + mean = float("-0.000158418") + std = float("0.00223888") + data = None + + +class Program_weight_tensor_parameter_52: + name = "parameter_52" + shape = [384] + dtype = "float32" + min_val = float("-2.9232") + max_val = float("1.66463") + mean = float("-0.760372") + std = float("0.643546") + data = None + + +class Program_weight_tensor_parameter_53: + name = "parameter_53" + shape = [384] + dtype = "float32" + min_val = float("0.953224") + max_val = float("2.91794") + mean = float("1.86322") + std = float("0.27618") + data = None + + +class Program_weight_tensor_parameter_54: + name = "parameter_54" + shape = [384] + dtype = "float32" + min_val = float("0.00275058") + max_val = float("0.01231") + mean = float("0.00516812") + std = float("0.00130652") + data = None + + +class Program_weight_tensor_parameter_55: + name = "parameter_55" + shape = [384] + dtype = "float32" + min_val = float("-0.249794") + max_val = float("0.145992") + mean = float("0.0635442") + std = float("0.0326689") + data = None + + +class Program_weight_tensor_parameter_56: + name = "parameter_56" + shape = [384, 768, 1, 1] + dtype = "float32" + min_val = float("-0.0371909") + max_val = float("0.0509187") + mean = float("-0.000727671") + std = float("0.00522845") + data = None + + +class Program_weight_tensor_parameter_57: + name = "parameter_57" + shape = [384] + dtype = "float32" + min_val = float("-2.2471") + max_val = float("0.681977") + mean = float("-0.777142") + std = float("0.472903") + data = None + + +class Program_weight_tensor_parameter_58: + name = "parameter_58" + shape = [384] + dtype = "float32" + min_val = float("0.965853") + max_val = float("2.89359") + mean = float("2.09705") + std = float("0.305433") + data = None + + +class Program_weight_tensor_parameter_59: + name = "parameter_59" + shape = [384] + dtype = "float32" + min_val = float("0.000799495") + max_val = float("0.00402168") + mean = float("0.00198534") + std = float("0.0004402") + data = None + + +class Program_weight_tensor_parameter_60: + name = "parameter_60" + shape = [384] + dtype = "float32" + min_val = float("-0.0161372") + max_val = float("0.0799072") + mean = float("0.0349754") + std = float("0.0164707") + data = None + + +class Program_weight_tensor_parameter_61: + name = "parameter_61" + shape = [384, 768, 1, 1] + dtype = "float32" + min_val = float("-0.0815437") + max_val = float("0.0646253") + mean = float("-0.000388202") + std = float("0.00359255") + data = None + + +class Program_weight_tensor_parameter_62: + name = "parameter_62" + shape = [768] + dtype = "float32" + min_val = float("-2.40199") + max_val = float("0.642394") + mean = float("-0.908374") + std = float("0.339302") + data = None + + +class Program_weight_tensor_parameter_63: + name = "parameter_63" + shape = [768] + dtype = "float32" + min_val = float("0.530297") + max_val = float("1.90727") + mean = float("0.919687") + std = float("0.149179") + data = None + + +class Program_weight_tensor_parameter_64: + name = "parameter_64" + shape = [768] + dtype = "float32" + min_val = float("0.00625688") + max_val = float("0.056665") + mean = float("0.0153943") + std = float("0.00459564") + data = None + + +class Program_weight_tensor_parameter_65: + name = "parameter_65" + shape = [768] + dtype = "float32" + min_val = float("-0.235652") + max_val = float("0.254751") + mean = float("0.0393354") + std = float("0.0563281") + data = None + + +class Program_weight_tensor_parameter_66: + name = "parameter_66" + shape = [768, 512, 3, 3] + dtype = "float32" + min_val = float("-0.0378314") + max_val = float("0.0543419") + mean = float("-9.75912e-05") + std = float("0.00233888") + data = None + + +class Program_weight_tensor_parameter_67: + name = "parameter_67" + shape = [512] + dtype = "float32" + min_val = float("-3.38998") + max_val = float("1.66652") + mean = float("-1.16179") + std = float("0.513719") + data = None + + +class Program_weight_tensor_parameter_68: + name = "parameter_68" + shape = [512] + dtype = "float32" + min_val = float("0.523767") + max_val = float("1.67712") + mean = float("1.11122") + std = float("0.148184") + data = None + + +class Program_weight_tensor_parameter_69: + name = "parameter_69" + shape = [512] + dtype = "float32" + min_val = float("0.00233511") + max_val = float("0.0167819") + mean = float("0.00761769") + std = float("0.00204484") + data = None + + +class Program_weight_tensor_parameter_70: + name = "parameter_70" + shape = [512] + dtype = "float32" + min_val = float("-0.172067") + max_val = float("0.0981938") + mean = float("-0.0487285") + std = float("0.0396677") + data = None + + +class Program_weight_tensor_parameter_71: + name = "parameter_71" + shape = [512, 384, 1, 1] + dtype = "float32" + min_val = float("-0.202262") + max_val = float("0.184296") + mean = float("-0.000573477") + std = float("0.00792306") + data = None + + +class Program_weight_tensor_parameter_72: + name = "parameter_72" + shape = [384] + dtype = "float32" + min_val = float("-0.0100703") + max_val = float("0.00138871") + mean = float("-0.00295173") + std = float("0.00227127") + data = None + + +class Program_weight_tensor_parameter_73: + name = "parameter_73" + shape = [384, 384, 1, 1] + dtype = "float32" + min_val = float("-0.202729") + max_val = float("0.140205") + mean = float("-0.002055") + std = float("0.00490701") + data = None + + +class Program_weight_tensor_parameter_74: + name = "parameter_74" + shape = [192] + dtype = "float32" + min_val = float("-1.97045") + max_val = float("0.409864") + mean = float("-0.348766") + std = float("0.333488") + data = None + + +class Program_weight_tensor_parameter_75: + name = "parameter_75" + shape = [192] + dtype = "float32" + min_val = float("0.0528864") + max_val = float("2.15987") + mean = float("0.581255") + std = float("0.419833") + data = None + + +class Program_weight_tensor_parameter_76: + name = "parameter_76" + shape = [192] + dtype = "float32" + min_val = float("8.99309e-05") + max_val = float("0.00136239") + mean = float("0.00045405") + std = float("0.000217854") + data = None + + +class Program_weight_tensor_parameter_77: + name = "parameter_77" + shape = [192] + dtype = "float32" + min_val = float("-0.0345233") + max_val = float("0.0542267") + mean = float("0.00534646") + std = float("0.0149125") + data = None + + +class Program_weight_tensor_parameter_78: + name = "parameter_78" + shape = [192, 192, 1, 1] + dtype = "float32" + min_val = float("-0.023487") + max_val = float("0.0581182") + mean = float("-0.000339748") + std = float("0.0040934") + data = None + + +class Program_weight_tensor_parameter_79: + name = "parameter_79" + shape = [192] + dtype = "float32" + min_val = float("-1.97037") + max_val = float("0.410702") + mean = float("-0.34863") + std = float("0.333546") + data = None + + +class Program_weight_tensor_parameter_80: + name = "parameter_80" + shape = [192] + dtype = "float32" + min_val = float("0.372338") + max_val = float("2.70216") + mean = float("1.20181") + std = float("0.493699") + data = None + + +class Program_weight_tensor_parameter_81: + name = "parameter_81" + shape = [192] + dtype = "float32" + min_val = float("0.00122552") + max_val = float("0.0156617") + mean = float("0.00510239") + std = float("0.00188493") + data = None + + +class Program_weight_tensor_parameter_82: + name = "parameter_82" + shape = [192] + dtype = "float32" + min_val = float("-0.097226") + max_val = float("0.146797") + mean = float("0.0203808") + std = float("0.0428675") + data = None + + +class Program_weight_tensor_parameter_83: + name = "parameter_83" + shape = [192, 192, 3, 3] + dtype = "float32" + min_val = float("-0.0289902") + max_val = float("0.0378296") + mean = float("-0.000154473") + std = float("0.00313532") + data = None + + +class Program_weight_tensor_parameter_84: + name = "parameter_84" + shape = [192] + dtype = "float32" + min_val = float("-2.89065") + max_val = float("-0.176734") + mean = float("-1.31453") + std = float("0.40113") + data = None + + +class Program_weight_tensor_parameter_85: + name = "parameter_85" + shape = [192] + dtype = "float32" + min_val = float("0.696524") + max_val = float("2.09454") + mean = float("1.17918") + std = float("0.169868") + data = None + + +class Program_weight_tensor_parameter_86: + name = "parameter_86" + shape = [192] + dtype = "float32" + min_val = float("0.0626606") + max_val = float("0.335775") + mean = float("0.130864") + std = float("0.0432639") + data = None + + +class Program_weight_tensor_parameter_87: + name = "parameter_87" + shape = [192] + dtype = "float32" + min_val = float("-2.50771") + max_val = float("1.70173") + mean = float("-0.202725") + std = float("0.37842") + data = None + + +class Program_weight_tensor_parameter_88: + name = "parameter_88" + shape = [192, 192, 3, 3] + dtype = "float32" + min_val = float("-0.0331927") + max_val = float("0.0456383") + mean = float("-0.000188198") + std = float("0.00374306") + data = None + + +class Program_weight_tensor_parameter_89: + name = "parameter_89" + shape = [192] + dtype = "float32" + min_val = float("-1.9404") + max_val = float("0.513024") + mean = float("-0.279434") + std = float("0.321452") + data = None + + +class Program_weight_tensor_parameter_90: + name = "parameter_90" + shape = [192] + dtype = "float32" + min_val = float("0.0454025") + max_val = float("1.77027") + mean = float("0.444331") + std = float("0.305722") + data = None + + +class Program_weight_tensor_parameter_91: + name = "parameter_91" + shape = [192] + dtype = "float32" + min_val = float("7.44986e-05") + max_val = float("0.00137158") + mean = float("0.000401239") + std = float("0.000216438") + data = None + + +class Program_weight_tensor_parameter_92: + name = "parameter_92" + shape = [192] + dtype = "float32" + min_val = float("-0.029272") + max_val = float("0.0469656") + mean = float("0.00801306") + std = float("0.0116412") + data = None + + +class Program_weight_tensor_parameter_93: + name = "parameter_93" + shape = [192, 192, 1, 1] + dtype = "float32" + min_val = float("-0.0234926") + max_val = float("0.036738") + mean = float("-0.000377237") + std = float("0.00377417") + data = None + + +class Program_weight_tensor_parameter_94: + name = "parameter_94" + shape = [192] + dtype = "float32" + min_val = float("-1.94044") + max_val = float("0.51462") + mean = float("-0.279235") + std = float("0.321666") + data = None + + +class Program_weight_tensor_parameter_95: + name = "parameter_95" + shape = [192] + dtype = "float32" + min_val = float("0.483074") + max_val = float("2.27001") + mean = float("1.13833") + std = float("0.37563") + data = None + + +class Program_weight_tensor_parameter_96: + name = "parameter_96" + shape = [192] + dtype = "float32" + min_val = float("0.00270072") + max_val = float("0.014292") + mean = float("0.00597179") + std = float("0.00181526") + data = None + + +class Program_weight_tensor_parameter_97: + name = "parameter_97" + shape = [192] + dtype = "float32" + min_val = float("-0.0923253") + max_val = float("0.111642") + mean = float("0.0327645") + std = float("0.0355125") + data = None + + +class Program_weight_tensor_parameter_98: + name = "parameter_98" + shape = [192, 192, 3, 3] + dtype = "float32" + min_val = float("-0.0231072") + max_val = float("0.038718") + mean = float("-0.000192078") + std = float("0.00338604") + data = None + + +class Program_weight_tensor_parameter_99: + name = "parameter_99" + shape = [192] + dtype = "float32" + min_val = float("-2.50828") + max_val = float("-0.123237") + mean = float("-1.28886") + std = float("0.44374") + data = None + + +class Program_weight_tensor_parameter_100: + name = "parameter_100" + shape = [192] + dtype = "float32" + min_val = float("0.65494") + max_val = float("1.66968") + mean = float("1.19938") + std = float("0.166128") + data = None + + +class Program_weight_tensor_parameter_101: + name = "parameter_101" + shape = [192] + dtype = "float32" + min_val = float("0.0463808") + max_val = float("0.199074") + mean = float("0.0939914") + std = float("0.0271592") + data = None + + +class Program_weight_tensor_parameter_102: + name = "parameter_102" + shape = [192] + dtype = "float32" + min_val = float("-2.14238") + max_val = float("0.410379") + mean = float("-0.110821") + std = float("0.246177") + data = None + + +class Program_weight_tensor_parameter_103: + name = "parameter_103" + shape = [192, 192, 3, 3] + dtype = "float32" + min_val = float("-0.0362254") + max_val = float("0.0508084") + mean = float("-0.000238085") + std = float("0.00389331") + data = None + + +class Program_weight_tensor_parameter_104: + name = "parameter_104" + shape = [192] + dtype = "float32" + min_val = float("-1.7573") + max_val = float("0.468575") + mean = float("-0.262432") + std = float("0.335818") + data = None + + +class Program_weight_tensor_parameter_105: + name = "parameter_105" + shape = [192] + dtype = "float32" + min_val = float("0.00295124") + max_val = float("1.67875") + mean = float("0.351961") + std = float("0.251699") + data = None + + +class Program_weight_tensor_parameter_106: + name = "parameter_106" + shape = [192] + dtype = "float32" + min_val = float("9.30792e-07") + max_val = float("0.00191072") + mean = float("0.000361058") + std = float("0.000248966") + data = None + + +class Program_weight_tensor_parameter_107: + name = "parameter_107" + shape = [192] + dtype = "float32" + min_val = float("-0.0372993") + max_val = float("0.0527515") + mean = float("0.0101454") + std = float("0.0121623") + data = None + + +class Program_weight_tensor_parameter_108: + name = "parameter_108" + shape = [192, 192, 1, 1] + dtype = "float32" + min_val = float("-0.0303466") + max_val = float("0.0356195") + mean = float("-0.000425557") + std = float("0.0036432") + data = None + + +class Program_weight_tensor_parameter_109: + name = "parameter_109" + shape = [192] + dtype = "float32" + min_val = float("-1.7573") + max_val = float("0.470016") + mean = float("-0.262262") + std = float("0.336041") + data = None + + +class Program_weight_tensor_parameter_110: + name = "parameter_110" + shape = [192] + dtype = "float32" + min_val = float("0.406102") + max_val = float("1.97794") + mean = float("1.06588") + std = float("0.334156") + data = None + + +class Program_weight_tensor_parameter_111: + name = "parameter_111" + shape = [192] + dtype = "float32" + min_val = float("0.00267891") + max_val = float("0.013042") + mean = float("0.00612078") + std = float("0.00178677") + data = None + + +class Program_weight_tensor_parameter_112: + name = "parameter_112" + shape = [192] + dtype = "float32" + min_val = float("-0.0635846") + max_val = float("0.115227") + mean = float("0.0353282") + std = float("0.0320147") + data = None + + +class Program_weight_tensor_parameter_113: + name = "parameter_113" + shape = [192, 192, 3, 3] + dtype = "float32" + min_val = float("-0.0321474") + max_val = float("0.0388371") + mean = float("-0.000190596") + std = float("0.00354187") + data = None + + +class Program_weight_tensor_parameter_114: + name = "parameter_114" + shape = [192] + dtype = "float32" + min_val = float("-2.49735") + max_val = float("0.137985") + mean = float("-1.24334") + std = float("0.424316") + data = None + + +class Program_weight_tensor_parameter_115: + name = "parameter_115" + shape = [192] + dtype = "float32" + min_val = float("0.652126") + max_val = float("1.80991") + mean = float("1.16717") + std = float("0.165409") + data = None + + +class Program_weight_tensor_parameter_116: + name = "parameter_116" + shape = [192] + dtype = "float32" + min_val = float("0.0307206") + max_val = float("0.141566") + mean = float("0.0673632") + std = float("0.0172678") + data = None + + +class Program_weight_tensor_parameter_117: + name = "parameter_117" + shape = [192] + dtype = "float32" + min_val = float("-1.51549") + max_val = float("0.284447") + mean = float("-0.0982023") + std = float("0.179232") + data = None + + +class Program_weight_tensor_parameter_118: + name = "parameter_118" + shape = [192, 192, 3, 3] + dtype = "float32" + min_val = float("-0.05013") + max_val = float("0.0656662") + mean = float("-0.000261502") + std = float("0.00399974") + data = None + + +class Program_weight_tensor_parameter_119: + name = "parameter_119" + shape = [192] + dtype = "float32" + min_val = float("-2.07916") + max_val = float("0.533363") + mean = float("-0.272351") + std = float("0.375289") + data = None + + +class Program_weight_tensor_parameter_120: + name = "parameter_120" + shape = [192] + dtype = "float32" + min_val = float("0.000510371") + max_val = float("0.732354") + mean = float("0.211968") + std = float("0.136272") + data = None + + +class Program_weight_tensor_parameter_121: + name = "parameter_121" + shape = [192] + dtype = "float32" + min_val = float("6.27846e-08") + max_val = float("0.0007887") + mean = float("0.000243037") + std = float("0.000135288") + data = None + + +class Program_weight_tensor_parameter_122: + name = "parameter_122" + shape = [192] + dtype = "float32" + min_val = float("-0.0197245") + max_val = float("0.0315593") + mean = float("0.00618711") + std = float("0.00922289") + data = None + + +class Program_weight_tensor_parameter_123: + name = "parameter_123" + shape = [192, 192, 1, 1] + dtype = "float32" + min_val = float("-0.0202783") + max_val = float("0.036136") + mean = float("-0.000265605") + std = float("0.00319736") + data = None + + +class Program_weight_tensor_parameter_124: + name = "parameter_124" + shape = [192] + dtype = "float32" + min_val = float("-2.07922") + max_val = float("0.535166") + mean = float("-0.272236") + std = float("0.375502") + data = None + + +class Program_weight_tensor_parameter_125: + name = "parameter_125" + shape = [192] + dtype = "float32" + min_val = float("0.396505") + max_val = float("1.96272") + mean = float("0.958924") + std = float("0.303858") + data = None + + +class Program_weight_tensor_parameter_126: + name = "parameter_126" + shape = [192] + dtype = "float32" + min_val = float("0.00316561") + max_val = float("0.0147681") + mean = float("0.00641687") + std = float("0.00196185") + data = None + + +class Program_weight_tensor_parameter_127: + name = "parameter_127" + shape = [192] + dtype = "float32" + min_val = float("-0.0910185") + max_val = float("0.161293") + mean = float("0.0384701") + std = float("0.0343716") + data = None + + +class Program_weight_tensor_parameter_128: + name = "parameter_128" + shape = [192, 192, 3, 3] + dtype = "float32" + min_val = float("-0.0299549") + max_val = float("0.0371106") + mean = float("-0.000205046") + std = float("0.00364104") + data = None + + +class Program_weight_tensor_parameter_129: + name = "parameter_129" + shape = [192] + dtype = "float32" + min_val = float("-2.74084") + max_val = float("-0.0810353") + mean = float("-1.23693") + std = float("0.434057") + data = None + + +class Program_weight_tensor_parameter_130: + name = "parameter_130" + shape = [192] + dtype = "float32" + min_val = float("0.761623") + max_val = float("1.62105") + mean = float("1.15096") + std = float("0.142541") + data = None + + +class Program_weight_tensor_parameter_131: + name = "parameter_131" + shape = [192] + dtype = "float32" + min_val = float("0.0268285") + max_val = float("0.102609") + mean = float("0.0483826") + std = float("0.0115209") + data = None + + +class Program_weight_tensor_parameter_132: + name = "parameter_132" + shape = [192] + dtype = "float32" + min_val = float("-1.23693") + max_val = float("0.284811") + mean = float("-0.0747383") + std = float("0.163901") + data = None + + +class Program_weight_tensor_parameter_133: + name = "parameter_133" + shape = [192, 192, 3, 3] + dtype = "float32" + min_val = float("-0.0531238") + max_val = float("0.0579085") + mean = float("-0.000268921") + std = float("0.00396934") + data = None + + +class Program_weight_tensor_parameter_134: + name = "parameter_134" + shape = [192] + dtype = "float32" + min_val = float("-1.21219") + max_val = float("0.446681") + mean = float("-0.232278") + std = float("0.339349") + data = None + + +class Program_weight_tensor_parameter_135: + name = "parameter_135" + shape = [192] + dtype = "float32" + min_val = float("-9.82711e-05") + max_val = float("0.677789") + mean = float("0.192032") + std = float("0.120727") + data = None + + +class Program_weight_tensor_parameter_136: + name = "parameter_136" + shape = [192] + dtype = "float32" + min_val = float("2.25073e-10") + max_val = float("0.000874414") + mean = float("0.000240801") + std = float("0.000144458") + data = None + + +class Program_weight_tensor_parameter_137: + name = "parameter_137" + shape = [192] + dtype = "float32" + min_val = float("-0.0493103") + max_val = float("0.0373767") + mean = float("0.00675281") + std = float("0.0116645") + data = None + + +class Program_weight_tensor_parameter_138: + name = "parameter_138" + shape = [192, 192, 1, 1] + dtype = "float32" + min_val = float("-0.0342199") + max_val = float("0.0396943") + mean = float("-0.000272099") + std = float("0.00329482") + data = None + + +class Program_weight_tensor_parameter_139: + name = "parameter_139" + shape = [192] + dtype = "float32" + min_val = float("-1.21223") + max_val = float("0.447751") + mean = float("-0.232181") + std = float("0.33961") + data = None + + +class Program_weight_tensor_parameter_140: + name = "parameter_140" + shape = [192] + dtype = "float32" + min_val = float("0.382831") + max_val = float("1.56386") + mean = float("0.852099") + std = float("0.259991") + data = None + + +class Program_weight_tensor_parameter_141: + name = "parameter_141" + shape = [192] + dtype = "float32" + min_val = float("0.00221295") + max_val = float("0.013395") + mean = float("0.00625423") + std = float("0.00179181") + data = None + + +class Program_weight_tensor_parameter_142: + name = "parameter_142" + shape = [192] + dtype = "float32" + min_val = float("-0.0844719") + max_val = float("0.142009") + mean = float("0.0387308") + std = float("0.0378044") + data = None + + +class Program_weight_tensor_parameter_143: + name = "parameter_143" + shape = [192, 192, 3, 3] + dtype = "float32" + min_val = float("-0.0323048") + max_val = float("0.0364338") + mean = float("-0.000186547") + std = float("0.00363857") + data = None + + +class Program_weight_tensor_parameter_144: + name = "parameter_144" + shape = [192] + dtype = "float32" + min_val = float("-2.48701") + max_val = float("-0.131293") + mean = float("-1.25014") + std = float("0.418255") + data = None + + +class Program_weight_tensor_parameter_145: + name = "parameter_145" + shape = [192] + dtype = "float32" + min_val = float("0.689678") + max_val = float("1.5199") + mean = float("1.12491") + std = float("0.13482") + data = None + + +class Program_weight_tensor_parameter_146: + name = "parameter_146" + shape = [192] + dtype = "float32" + min_val = float("0.0183928") + max_val = float("0.0607598") + mean = float("0.0349167") + std = float("0.00875797") + data = None + + +class Program_weight_tensor_parameter_147: + name = "parameter_147" + shape = [192] + dtype = "float32" + min_val = float("-0.716469") + max_val = float("0.320455") + mean = float("-0.0746187") + std = float("0.131028") + data = None + + +class Program_weight_tensor_parameter_148: + name = "parameter_148" + shape = [192, 192, 3, 3] + dtype = "float32" + min_val = float("-0.0610342") + max_val = float("0.0592016") + mean = float("-0.000277763") + std = float("0.00397261") + data = None + + +class Program_weight_tensor_parameter_149: + name = "parameter_149" + shape = [192] + dtype = "float32" + min_val = float("-1.21753") + max_val = float("0.499396") + mean = float("-0.167678") + std = float("0.2936") + data = None + + +class Program_weight_tensor_parameter_150: + name = "parameter_150" + shape = [192] + dtype = "float32" + min_val = float("0.00836385") + max_val = float("1.53625") + mean = float("0.238111") + std = float("0.211728") + data = None + + +class Program_weight_tensor_parameter_151: + name = "parameter_151" + shape = [192] + dtype = "float32" + min_val = float("1.96593e-05") + max_val = float("0.00679754") + mean = float("0.000504925") + std = float("0.000658808") + data = None + + +class Program_weight_tensor_parameter_152: + name = "parameter_152" + shape = [192] + dtype = "float32" + min_val = float("-0.0656068") + max_val = float("0.0861781") + mean = float("0.00950526") + std = float("0.0164034") + data = None + + +class Program_weight_tensor_parameter_153: + name = "parameter_153" + shape = [192, 192, 1, 1] + dtype = "float32" + min_val = float("-0.0600528") + max_val = float("0.0312537") + mean = float("-0.000425532") + std = float("0.00397123") + data = None + + +class Program_weight_tensor_parameter_154: + name = "parameter_154" + shape = [192] + dtype = "float32" + min_val = float("-1.21747") + max_val = float("0.500448") + mean = float("-0.167516") + std = float("0.293818") + data = None + + +class Program_weight_tensor_parameter_155: + name = "parameter_155" + shape = [192] + dtype = "float32" + min_val = float("0.354999") + max_val = float("1.44989") + mean = float("0.756941") + std = float("0.21662") + data = None + + +class Program_weight_tensor_parameter_156: + name = "parameter_156" + shape = [192] + dtype = "float32" + min_val = float("0.00436972") + max_val = float("0.0168976") + mean = float("0.00908178") + std = float("0.00268591") + data = None + + +class Program_weight_tensor_parameter_157: + name = "parameter_157" + shape = [192] + dtype = "float32" + min_val = float("-0.15942") + max_val = float("0.153659") + mean = float("0.0492924") + std = float("0.0450903") + data = None + + +class Program_weight_tensor_parameter_158: + name = "parameter_158" + shape = [192, 192, 3, 3] + dtype = "float32" + min_val = float("-0.062497") + max_val = float("0.0530577") + mean = float("-0.000241352") + std = float("0.00357809") + data = None + + +class Program_weight_tensor_parameter_159: + name = "parameter_159" + shape = [192] + dtype = "float32" + min_val = float("-1.87905") + max_val = float("-0.211382") + mean = float("-1.14643") + std = float("0.325653") + data = None + + +class Program_weight_tensor_parameter_160: + name = "parameter_160" + shape = [192] + dtype = "float32" + min_val = float("0.788784") + max_val = float("1.59753") + mean = float("1.12152") + std = float("0.12987") + data = None + + +class Program_weight_tensor_parameter_161: + name = "parameter_161" + shape = [192] + dtype = "float32" + min_val = float("0.0156512") + max_val = float("0.0757502") + mean = float("0.0313837") + std = float("0.00924696") + data = None + + +class Program_weight_tensor_parameter_162: + name = "parameter_162" + shape = [192] + dtype = "float32" + min_val = float("-0.689275") + max_val = float("0.284905") + mean = float("-0.0666799") + std = float("0.130681") + data = None + + +class Program_weight_tensor_parameter_163: + name = "parameter_163" + shape = [192, 192, 3, 3] + dtype = "float32" + min_val = float("-0.062874") + max_val = float("0.076648") + mean = float("-0.000213471") + std = float("0.00383126") + data = None + + +class Program_weight_tensor_parameter_164: + name = "parameter_164" + shape = [192] + dtype = "float32" + min_val = float("-2.86217") + max_val = float("1.58057") + mean = float("-0.0275412") + std = float("0.747651") + data = None + + +class Program_weight_tensor_parameter_165: + name = "parameter_165" + shape = [192] + dtype = "float32" + min_val = float("0.487672") + max_val = float("2.0776") + mean = float("0.90163") + std = float("0.232007") + data = None + + +class Program_weight_tensor_parameter_166: + name = "parameter_166" + shape = [192] + dtype = "float32" + min_val = float("0.00975444") + max_val = float("0.0591661") + mean = float("0.0230902") + std = float("0.00900034") + data = None + + +class Program_weight_tensor_parameter_167: + name = "parameter_167" + shape = [192] + dtype = "float32" + min_val = float("-0.230409") + max_val = float("0.297734") + mean = float("-0.0377667") + std = float("0.0596951") + data = None + + +class Program_weight_tensor_parameter_168: + name = "parameter_168" + shape = [192, 384, 1, 1] + dtype = "float32" + min_val = float("-0.108831") + max_val = float("0.0931739") + mean = float("-0.000512323") + std = float("0.00842399") + data = None + + +class Program_weight_tensor_parameter_169: + name = "parameter_169" + shape = [192] + dtype = "float32" + min_val = float("-2.96764") + max_val = float("1.66844") + mean = float("0.0968476") + std = float("0.663233") + data = None + + +class Program_weight_tensor_parameter_170: + name = "parameter_170" + shape = [192] + dtype = "float32" + min_val = float("0.830791") + max_val = float("5.55835") + mean = float("1.91342") + std = float("0.933379") + data = None + + +class Program_weight_tensor_parameter_171: + name = "parameter_171" + shape = [192] + dtype = "float32" + min_val = float("0.00614721") + max_val = float("0.0464744") + mean = float("0.0174745") + std = float("0.00563515") + data = None + + +class Program_weight_tensor_parameter_172: + name = "parameter_172" + shape = [192] + dtype = "float32" + min_val = float("-0.133354") + max_val = float("0.157826") + mean = float("-0.0239396") + std = float("0.0565686") + data = None + + +class Program_weight_tensor_parameter_173: + name = "parameter_173" + shape = [192, 384, 1, 1] + dtype = "float32" + min_val = float("-0.0985625") + max_val = float("0.0941202") + mean = float("-0.000511784") + std = float("0.00783691") + data = None + + +class Program_weight_tensor_parameter_174: + name = "parameter_174" + shape = [384] + dtype = "float32" + min_val = float("-2.92359") + max_val = float("1.32666") + mean = float("-0.301116") + std = float("0.563662") + data = None + + +class Program_weight_tensor_parameter_175: + name = "parameter_175" + shape = [384] + dtype = "float32" + min_val = float("0.631853") + max_val = float("2.47541") + mean = float("1.15998") + std = float("0.257348") + data = None + + +class Program_weight_tensor_parameter_176: + name = "parameter_176" + shape = [384] + dtype = "float32" + min_val = float("0.0104507") + max_val = float("0.111288") + mean = float("0.0262303") + std = float("0.0126363") + data = None + + +class Program_weight_tensor_parameter_177: + name = "parameter_177" + shape = [384] + dtype = "float32" + min_val = float("-0.269997") + max_val = float("0.244719") + mean = float("0.0226896") + std = float("0.0692565") + data = None + + +class Program_weight_tensor_parameter_178: + name = "parameter_178" + shape = [384, 256, 3, 3] + dtype = "float32" + min_val = float("-0.0753194") + max_val = float("0.0720032") + mean = float("-0.000103466") + std = float("0.00421781") + data = None + + +class Program_weight_tensor_parameter_179: + name = "parameter_179" + shape = [256] + dtype = "float32" + min_val = float("-2.04502") + max_val = float("1.28816") + mean = float("-0.924614") + std = float("0.543015") + data = None + + +class Program_weight_tensor_parameter_180: + name = "parameter_180" + shape = [256] + dtype = "float32" + min_val = float("0.517239") + max_val = float("1.68961") + mean = float("1.05432") + std = float("0.176149") + data = None + + +class Program_weight_tensor_parameter_181: + name = "parameter_181" + shape = [256] + dtype = "float32" + min_val = float("0.00195657") + max_val = float("0.0265577") + mean = float("0.00628818") + std = float("0.00298858") + data = None + + +class Program_weight_tensor_parameter_182: + name = "parameter_182" + shape = [256] + dtype = "float32" + min_val = float("-0.230372") + max_val = float("0.154861") + mean = float("-0.0517653") + std = float("0.0687788") + data = None + + +class Program_weight_tensor_parameter_183: + name = "parameter_183" + shape = [256, 192, 1, 1] + dtype = "float32" + min_val = float("-0.206154") + max_val = float("0.170783") + mean = float("-0.000884197") + std = float("0.0145162") + data = None + + +class Program_weight_tensor_parameter_184: + name = "parameter_184" + shape = [192] + dtype = "float32" + min_val = float("-0.0139357") + max_val = float("0.00388361") + mean = float("-0.00495662") + std = float("0.00371291") + data = None + + +class Program_weight_tensor_parameter_185: + name = "parameter_185" + shape = [192, 192, 1, 1] + dtype = "float32" + min_val = float("-0.347135") + max_val = float("0.228777") + mean = float("-0.00389388") + std = float("0.0106293") + data = None + + +class Program_weight_tensor_parameter_186: + name = "parameter_186" + shape = [96] + dtype = "float32" + min_val = float("-1.91355") + max_val = float("0.53303") + mean = float("-0.208939") + std = float("0.434311") + data = None + + +class Program_weight_tensor_parameter_187: + name = "parameter_187" + shape = [96] + dtype = "float32" + min_val = float("0.142427") + max_val = float("3.22988") + mean = float("0.635833") + std = float("0.668487") + data = None + + +class Program_weight_tensor_parameter_188: + name = "parameter_188" + shape = [96] + dtype = "float32" + min_val = float("7.85249e-05") + max_val = float("0.00245675") + mean = float("0.000588646") + std = float("0.000433443") + data = None + + +class Program_weight_tensor_parameter_189: + name = "parameter_189" + shape = [96] + dtype = "float32" + min_val = float("-0.0546919") + max_val = float("0.0598857") + mean = float("0.00511828") + std = float("0.0215456") + data = None + + +class Program_weight_tensor_parameter_190: + name = "parameter_190" + shape = [96, 96, 1, 1] + dtype = "float32" + min_val = float("-0.0500852") + max_val = float("0.0932317") + mean = float("-0.000561284") + std = float("0.00794853") + data = None + + +class Program_weight_tensor_parameter_191: + name = "parameter_191" + shape = [96] + dtype = "float32" + min_val = float("-1.91314") + max_val = float("0.534306") + mean = float("-0.208596") + std = float("0.434435") + data = None + + +class Program_weight_tensor_parameter_192: + name = "parameter_192" + shape = [96] + dtype = "float32" + min_val = float("0.343774") + max_val = float("5.47118") + mean = float("1.08565") + std = float("0.88383") + data = None + + +class Program_weight_tensor_parameter_193: + name = "parameter_193" + shape = [96] + dtype = "float32" + min_val = float("0.000942165") + max_val = float("0.0158414") + mean = float("0.00535059") + std = float("0.00274901") + data = None + + +class Program_weight_tensor_parameter_194: + name = "parameter_194" + shape = [96] + dtype = "float32" + min_val = float("-0.137594") + max_val = float("0.212423") + mean = float("0.0122222") + std = float("0.0611614") + data = None + + +class Program_weight_tensor_parameter_195: + name = "parameter_195" + shape = [96, 96, 3, 3] + dtype = "float32" + min_val = float("-0.0398886") + max_val = float("0.0746673") + mean = float("-0.000229692") + std = float("0.00588155") + data = None + + +class Program_weight_tensor_parameter_196: + name = "parameter_196" + shape = [96] + dtype = "float32" + min_val = float("-2.46605") + max_val = float("-0.0202143") + mean = float("-1.22676") + std = float("0.443304") + data = None + + +class Program_weight_tensor_parameter_197: + name = "parameter_197" + shape = [96] + dtype = "float32" + min_val = float("0.542082") + max_val = float("1.6433") + mean = float("0.945634") + std = float("0.172529") + data = None + + +class Program_weight_tensor_parameter_198: + name = "parameter_198" + shape = [96] + dtype = "float32" + min_val = float("0.041446") + max_val = float("0.237068") + mean = float("0.0867119") + std = float("0.0369728") + data = None + + +class Program_weight_tensor_parameter_199: + name = "parameter_199" + shape = [96] + dtype = "float32" + min_val = float("-2.80547") + max_val = float("1.61972") + mean = float("-0.194801") + std = float("0.469392") + data = None + + +class Program_weight_tensor_parameter_200: + name = "parameter_200" + shape = [96, 96, 3, 3] + dtype = "float32" + min_val = float("-0.150203") + max_val = float("0.114223") + mean = float("-0.000376735") + std = float("0.00724688") + data = None + + +class Program_weight_tensor_parameter_201: + name = "parameter_201" + shape = [96] + dtype = "float32" + min_val = float("-1.38826") + max_val = float("0.562406") + mean = float("-0.132909") + std = float("0.347394") + data = None + + +class Program_weight_tensor_parameter_202: + name = "parameter_202" + shape = [96] + dtype = "float32" + min_val = float("0.0453402") + max_val = float("1.86504") + mean = float("0.460875") + std = float("0.366369") + data = None + + +class Program_weight_tensor_parameter_203: + name = "parameter_203" + shape = [96] + dtype = "float32" + min_val = float("7.55835e-05") + max_val = float("0.00277897") + mean = float("0.000754646") + std = float("0.000610909") + data = None + + +class Program_weight_tensor_parameter_204: + name = "parameter_204" + shape = [96] + dtype = "float32" + min_val = float("-0.048531") + max_val = float("0.0464578") + mean = float("0.00676756") + std = float("0.0176703") + data = None + + +class Program_weight_tensor_parameter_205: + name = "parameter_205" + shape = [96, 96, 1, 1] + dtype = "float32" + min_val = float("-0.0483138") + max_val = float("0.0415922") + mean = float("-0.000498568") + std = float("0.00710731") + data = None + + +class Program_weight_tensor_parameter_206: + name = "parameter_206" + shape = [96] + dtype = "float32" + min_val = float("-1.38834") + max_val = float("0.5648") + mean = float("-0.13256") + std = float("0.347894") + data = None + + +class Program_weight_tensor_parameter_207: + name = "parameter_207" + shape = [96] + dtype = "float32" + min_val = float("0.370504") + max_val = float("2.32822") + mean = float("0.901933") + std = float("0.426522") + data = None + + +class Program_weight_tensor_parameter_208: + name = "parameter_208" + shape = [96] + dtype = "float32" + min_val = float("0.00323837") + max_val = float("0.0234632") + mean = float("0.00912274") + std = float("0.00470534") + data = None + + +class Program_weight_tensor_parameter_209: + name = "parameter_209" + shape = [96] + dtype = "float32" + min_val = float("-0.0965735") + max_val = float("0.12145") + mean = float("0.0354439") + std = float("0.0432668") + data = None + + +class Program_weight_tensor_parameter_210: + name = "parameter_210" + shape = [96, 96, 3, 3] + dtype = "float32" + min_val = float("-0.058655") + max_val = float("0.0591114") + mean = float("-0.000356621") + std = float("0.0059174") + data = None + + +class Program_weight_tensor_parameter_211: + name = "parameter_211" + shape = [96] + dtype = "float32" + min_val = float("-3.31955") + max_val = float("0.36603") + mean = float("-1.17895") + std = float("0.556023") + data = None + + +class Program_weight_tensor_parameter_212: + name = "parameter_212" + shape = [96] + dtype = "float32" + min_val = float("0.473098") + max_val = float("1.98183") + mean = float("1.03911") + std = float("0.238708") + data = None + + +class Program_weight_tensor_parameter_213: + name = "parameter_213" + shape = [96] + dtype = "float32" + min_val = float("0.0282091") + max_val = float("0.14579") + mean = float("0.0545779") + std = float("0.0168222") + data = None + + +class Program_weight_tensor_parameter_214: + name = "parameter_214" + shape = [96] + dtype = "float32" + min_val = float("-1.24896") + max_val = float("0.504954") + mean = float("-0.0599842") + std = float("0.268023") + data = None + + +class Program_weight_tensor_parameter_215: + name = "parameter_215" + shape = [96, 96, 3, 3] + dtype = "float32" + min_val = float("-0.147666") + max_val = float("0.152112") + mean = float("-0.000410438") + std = float("0.00711818") + data = None + + +class Program_weight_tensor_parameter_216: + name = "parameter_216" + shape = [96] + dtype = "float32" + min_val = float("-1.24956") + max_val = float("0.58267") + mean = float("-0.109749") + std = float("0.291966") + data = None + + +class Program_weight_tensor_parameter_217: + name = "parameter_217" + shape = [96] + dtype = "float32" + min_val = float("0.0243293") + max_val = float("1.27785") + mean = float("0.324816") + std = float("0.192866") + data = None + + +class Program_weight_tensor_parameter_218: + name = "parameter_218" + shape = [96] + dtype = "float32" + min_val = float("6.35226e-05") + max_val = float("0.00358684") + mean = float("0.000713081") + std = float("0.000576864") + data = None + + +class Program_weight_tensor_parameter_219: + name = "parameter_219" + shape = [96] + dtype = "float32" + min_val = float("-0.0385319") + max_val = float("0.0503831") + mean = float("0.00407058") + std = float("0.0162571") + data = None + + +class Program_weight_tensor_parameter_220: + name = "parameter_220" + shape = [96, 96, 1, 1] + dtype = "float32" + min_val = float("-0.0448708") + max_val = float("0.0573038") + mean = float("-0.000336044") + std = float("0.00726838") + data = None + + +class Program_weight_tensor_parameter_221: + name = "parameter_221" + shape = [96] + dtype = "float32" + min_val = float("-1.24942") + max_val = float("0.584539") + mean = float("-0.109552") + std = float("0.292478") + data = None + + +class Program_weight_tensor_parameter_222: + name = "parameter_222" + shape = [96] + dtype = "float32" + min_val = float("0.315495") + max_val = float("1.67063") + mean = float("0.747087") + std = float("0.257847") + data = None + + +class Program_weight_tensor_parameter_223: + name = "parameter_223" + shape = [96] + dtype = "float32" + min_val = float("0.00338498") + max_val = float("0.0252591") + mean = float("0.0101982") + std = float("0.00406466") + data = None + + +class Program_weight_tensor_parameter_224: + name = "parameter_224" + shape = [96] + dtype = "float32" + min_val = float("-0.0546488") + max_val = float("0.145267") + mean = float("0.0275953") + std = float("0.038381") + data = None + + +class Program_weight_tensor_parameter_225: + name = "parameter_225" + shape = [96, 96, 3, 3] + dtype = "float32" + min_val = float("-0.065253") + max_val = float("0.0583777") + mean = float("-0.000331097") + std = float("0.00602268") + data = None + + +class Program_weight_tensor_parameter_226: + name = "parameter_226" + shape = [96] + dtype = "float32" + min_val = float("-3.58296") + max_val = float("0.290726") + mean = float("-1.12856") + std = float("0.572409") + data = None + + +class Program_weight_tensor_parameter_227: + name = "parameter_227" + shape = [96] + dtype = "float32" + min_val = float("0.511106") + max_val = float("2.19165") + mean = float("1.05198") + std = float("0.238255") + data = None + + +class Program_weight_tensor_parameter_228: + name = "parameter_228" + shape = [96] + dtype = "float32" + min_val = float("0.0201149") + max_val = float("0.0748228") + mean = float("0.0399945") + std = float("0.0094601") + data = None + + +class Program_weight_tensor_parameter_229: + name = "parameter_229" + shape = [96] + dtype = "float32" + min_val = float("-0.822261") + max_val = float("0.396367") + mean = float("-0.0472576") + std = float("0.195175") + data = None + + +class Program_weight_tensor_parameter_230: + name = "parameter_230" + shape = [96, 96, 3, 3] + dtype = "float32" + min_val = float("-0.0973524") + max_val = float("0.130681") + mean = float("-0.000422376") + std = float("0.00719502") + data = None + + +class Program_weight_tensor_parameter_231: + name = "parameter_231" + shape = [96] + dtype = "float32" + min_val = float("-0.892064") + max_val = float("0.529384") + mean = float("-0.160709") + std = float("0.281574") + data = None + + +class Program_weight_tensor_parameter_232: + name = "parameter_232" + shape = [96] + dtype = "float32" + min_val = float("0.0191223") + max_val = float("1.40524") + mean = float("0.32501") + std = float("0.213327") + data = None + + +class Program_weight_tensor_parameter_233: + name = "parameter_233" + shape = [96] + dtype = "float32" + min_val = float("4.77273e-05") + max_val = float("0.00366742") + mean = float("0.000733131") + std = float("0.000561341") + data = None + + +class Program_weight_tensor_parameter_234: + name = "parameter_234" + shape = [96] + dtype = "float32" + min_val = float("-0.0328433") + max_val = float("0.0466064") + mean = float("0.00724984") + std = float("0.0146293") + data = None + + +class Program_weight_tensor_parameter_235: + name = "parameter_235" + shape = [96, 96, 1, 1] + dtype = "float32" + min_val = float("-0.0499906") + max_val = float("0.0448114") + mean = float("-0.000606145") + std = float("0.00724394") + data = None + + +class Program_weight_tensor_parameter_236: + name = "parameter_236" + shape = [96] + dtype = "float32" + min_val = float("-0.891955") + max_val = float("0.530721") + mean = float("-0.160571") + std = float("0.281998") + data = None + + +class Program_weight_tensor_parameter_237: + name = "parameter_237" + shape = [96] + dtype = "float32" + min_val = float("0.17446") + max_val = float("1.78047") + mean = float("0.708571") + std = float("0.284378") + data = None + + +class Program_weight_tensor_parameter_238: + name = "parameter_238" + shape = [96] + dtype = "float32" + min_val = float("0.00236768") + max_val = float("0.0256587") + mean = float("0.0102153") + std = float("0.00392496") + data = None + + +class Program_weight_tensor_parameter_239: + name = "parameter_239" + shape = [96] + dtype = "float32" + min_val = float("-0.0582404") + max_val = float("0.137905") + mean = float("0.0410397") + std = float("0.0378938") + data = None + + +class Program_weight_tensor_parameter_240: + name = "parameter_240" + shape = [96, 96, 3, 3] + dtype = "float32" + min_val = float("-0.057305") + max_val = float("0.0650381") + mean = float("-0.000417143") + std = float("0.00601776") + data = None + + +class Program_weight_tensor_parameter_241: + name = "parameter_241" + shape = [96] + dtype = "float32" + min_val = float("-2.65777") + max_val = float("0.065358") + mean = float("-1.06432") + std = float("0.48826") + data = None + + +class Program_weight_tensor_parameter_242: + name = "parameter_242" + shape = [96] + dtype = "float32" + min_val = float("0.512951") + max_val = float("1.73806") + mean = float("1.01547") + std = float("0.193357") + data = None + + +class Program_weight_tensor_parameter_243: + name = "parameter_243" + shape = [96] + dtype = "float32" + min_val = float("0.0176967") + max_val = float("0.0574371") + mean = float("0.0307481") + std = float("0.00699669") + data = None + + +class Program_weight_tensor_parameter_244: + name = "parameter_244" + shape = [96] + dtype = "float32" + min_val = float("-0.761559") + max_val = float("0.60821") + mean = float("-0.0646145") + std = float("0.194302") + data = None + + +class Program_weight_tensor_parameter_245: + name = "parameter_245" + shape = [96, 96, 3, 3] + dtype = "float32" + min_val = float("-0.0738037") + max_val = float("0.125248") + mean = float("-0.000426247") + std = float("0.0069708") + data = None + + +class Program_weight_tensor_parameter_246: + name = "parameter_246" + shape = [96] + dtype = "float32" + min_val = float("-0.978262") + max_val = float("0.489992") + mean = float("-0.136691") + std = float("0.278636") + data = None + + +class Program_weight_tensor_parameter_247: + name = "parameter_247" + shape = [96] + dtype = "float32" + min_val = float("0.0498074") + max_val = float("1.1462") + mean = float("0.296075") + std = float("0.172323") + data = None + + +class Program_weight_tensor_parameter_248: + name = "parameter_248" + shape = [96] + dtype = "float32" + min_val = float("0.000180546") + max_val = float("0.00509335") + mean = float("0.00108394") + std = float("0.00072352") + data = None + + +class Program_weight_tensor_parameter_249: + name = "parameter_249" + shape = [96] + dtype = "float32" + min_val = float("-0.041806") + max_val = float("0.0564684") + mean = float("0.00557215") + std = float("0.0180702") + data = None + + +class Program_weight_tensor_parameter_250: + name = "parameter_250" + shape = [96, 96, 1, 1] + dtype = "float32" + min_val = float("-0.0731207") + max_val = float("0.0763792") + mean = float("-0.000594618") + std = float("0.00825765") + data = None + + +class Program_weight_tensor_parameter_251: + name = "parameter_251" + shape = [96] + dtype = "float32" + min_val = float("-0.978083") + max_val = float("0.492448") + mean = float("-0.136655") + std = float("0.279122") + data = None + + +class Program_weight_tensor_parameter_252: + name = "parameter_252" + shape = [96] + dtype = "float32" + min_val = float("0.236133") + max_val = float("1.69671") + mean = float("0.603953") + std = float("0.228164") + data = None + + +class Program_weight_tensor_parameter_253: + name = "parameter_253" + shape = [96] + dtype = "float32" + min_val = float("0.00612804") + max_val = float("0.0281159") + mean = float("0.0137457") + std = float("0.00478723") + data = None + + +class Program_weight_tensor_parameter_254: + name = "parameter_254" + shape = [96] + dtype = "float32" + min_val = float("-0.0713362") + max_val = float("0.135694") + mean = float("0.0273635") + std = float("0.0460879") + data = None + + +class Program_weight_tensor_parameter_255: + name = "parameter_255" + shape = [96, 96, 3, 3] + dtype = "float32" + min_val = float("-0.0654835") + max_val = float("0.0522648") + mean = float("-0.00036204") + std = float("0.0060426") + data = None + + +class Program_weight_tensor_parameter_256: + name = "parameter_256" + shape = [96] + dtype = "float32" + min_val = float("-3.46434") + max_val = float("0.199609") + mean = float("-1.00527") + std = float("0.548081") + data = None + + +class Program_weight_tensor_parameter_257: + name = "parameter_257" + shape = [96] + dtype = "float32" + min_val = float("0.686506") + max_val = float("2.51291") + mean = float("1.07427") + std = float("0.212412") + data = None + + +class Program_weight_tensor_parameter_258: + name = "parameter_258" + shape = [96] + dtype = "float32" + min_val = float("0.013513") + max_val = float("0.0542585") + mean = float("0.0262785") + std = float("0.00851322") + data = None + + +class Program_weight_tensor_parameter_259: + name = "parameter_259" + shape = [96] + dtype = "float32" + min_val = float("-0.482054") + max_val = float("0.527892") + mean = float("-0.0515599") + std = float("0.192746") + data = None + + +class Program_weight_tensor_parameter_260: + name = "parameter_260" + shape = [96, 96, 3, 3] + dtype = "float32" + min_val = float("-0.0824841") + max_val = float("0.0934753") + mean = float("-0.000357672") + std = float("0.00712731") + data = None + + +class Program_weight_tensor_parameter_261: + name = "parameter_261" + shape = [96] + dtype = "float32" + min_val = float("-0.625302") + max_val = float("0.449836") + mean = float("-0.0825559") + std = float("0.256738") + data = None + + +class Program_weight_tensor_parameter_262: + name = "parameter_262" + shape = [96] + dtype = "float32" + min_val = float("0.0910018") + max_val = float("1.30085") + mean = float("0.309049") + std = float("0.196412") + data = None + + +class Program_weight_tensor_parameter_263: + name = "parameter_263" + shape = [96] + dtype = "float32" + min_val = float("0.000380114") + max_val = float("0.0177806") + mean = float("0.00357894") + std = float("0.00283759") + data = None + + +class Program_weight_tensor_parameter_264: + name = "parameter_264" + shape = [96] + dtype = "float32" + min_val = float("-0.0360021") + max_val = float("0.0301813") + mean = float("-5.04728e-05") + std = float("0.0106632") + data = None + + +class Program_weight_tensor_parameter_265: + name = "parameter_265" + shape = [96, 96, 1, 1] + dtype = "float32" + min_val = float("-0.0925016") + max_val = float("0.0753255") + mean = float("-0.00105853") + std = float("0.00936655") + data = None + + +class Program_weight_tensor_parameter_266: + name = "parameter_266" + shape = [96] + dtype = "float32" + min_val = float("-0.625183") + max_val = float("0.450937") + mean = float("-0.082575") + std = float("0.257081") + data = None + + +class Program_weight_tensor_parameter_267: + name = "parameter_267" + shape = [96] + dtype = "float32" + min_val = float("0.210658") + max_val = float("1.42703") + mean = float("0.527208") + std = float("0.258269") + data = None + + +class Program_weight_tensor_parameter_268: + name = "parameter_268" + shape = [96] + dtype = "float32" + min_val = float("0.0102276") + max_val = float("0.0952331") + mean = float("0.0336542") + std = float("0.0171249") + data = None + + +class Program_weight_tensor_parameter_269: + name = "parameter_269" + shape = [96] + dtype = "float32" + min_val = float("-0.108112") + max_val = float("0.0898917") + mean = float("-0.00831331") + std = float("0.0381217") + data = None + + +class Program_weight_tensor_parameter_270: + name = "parameter_270" + shape = [96, 96, 3, 3] + dtype = "float32" + min_val = float("-0.0885375") + max_val = float("0.0525934") + mean = float("-0.000466484") + std = float("0.00584459") + data = None + + +class Program_weight_tensor_parameter_271: + name = "parameter_271" + shape = [96] + dtype = "float32" + min_val = float("-2.40893") + max_val = float("0.508421") + mean = float("-0.828862") + std = float("0.467337") + data = None + + +class Program_weight_tensor_parameter_272: + name = "parameter_272" + shape = [96] + dtype = "float32" + min_val = float("0.853968") + max_val = float("2.18309") + mean = float("1.27545") + std = float("0.208741") + data = None + + +class Program_weight_tensor_parameter_273: + name = "parameter_273" + shape = [96] + dtype = "float32" + min_val = float("0.011454") + max_val = float("0.0464459") + mean = float("0.0216361") + std = float("0.00778855") + data = None + + +class Program_weight_tensor_parameter_274: + name = "parameter_274" + shape = [96] + dtype = "float32" + min_val = float("-0.570491") + max_val = float("0.473016") + mean = float("-0.0532507") + std = float("0.173783") + data = None + + +class Program_weight_tensor_parameter_275: + name = "parameter_275" + shape = [96, 96, 3, 3] + dtype = "float32" + min_val = float("-0.15411") + max_val = float("0.150524") + mean = float("-0.000241604") + std = float("0.00722176") + data = None + + +class Program_weight_tensor_parameter_276: + name = "parameter_276" + shape = [96] + dtype = "float32" + min_val = float("-3.16609") + max_val = float("1.88989") + mean = float("0.501666") + std = float("0.861493") + data = None + + +class Program_weight_tensor_parameter_277: + name = "parameter_277" + shape = [96] + dtype = "float32" + min_val = float("0.214988") + max_val = float("2.6299") + mean = float("0.562885") + std = float("0.31708") + data = None + + +class Program_weight_tensor_parameter_278: + name = "parameter_278" + shape = [96] + dtype = "float32" + min_val = float("0.00763171") + max_val = float("0.154967") + mean = float("0.0323459") + std = float("0.0238839") + data = None + + +class Program_weight_tensor_parameter_279: + name = "parameter_279" + shape = [96] + dtype = "float32" + min_val = float("-0.271678") + max_val = float("0.329389") + mean = float("-0.0147432") + std = float("0.0938868") + data = None + + +class Program_weight_tensor_parameter_280: + name = "parameter_280" + shape = [96, 192, 1, 1] + dtype = "float32" + min_val = float("-0.186901") + max_val = float("0.225419") + mean = float("-0.000291508") + std = float("0.0156297") + data = None + + +class Program_weight_tensor_parameter_281: + name = "parameter_281" + shape = [96] + dtype = "float32" + min_val = float("-4.92284") + max_val = float("1.57998") + mean = float("0.384603") + std = float("1.04888") + data = None + + +class Program_weight_tensor_parameter_282: + name = "parameter_282" + shape = [96] + dtype = "float32" + min_val = float("0.414126") + max_val = float("6.78093") + mean = float("1.69449") + std = float("1.30795") + data = None + + +class Program_weight_tensor_parameter_283: + name = "parameter_283" + shape = [96] + dtype = "float32" + min_val = float("0.00531954") + max_val = float("0.272953") + mean = float("0.0381955") + std = float("0.0353968") + data = None + + +class Program_weight_tensor_parameter_284: + name = "parameter_284" + shape = [96] + dtype = "float32" + min_val = float("-0.17223") + max_val = float("0.443347") + mean = float("0.0466399") + std = float("0.0966002") + data = None + + +class Program_weight_tensor_parameter_285: + name = "parameter_285" + shape = [96, 192, 1, 1] + dtype = "float32" + min_val = float("-0.116975") + max_val = float("0.156029") + mean = float("0.000440768") + std = float("0.0149691") + data = None + + +class Program_weight_tensor_parameter_286: + name = "parameter_286" + shape = [192] + dtype = "float32" + min_val = float("-2.27475") + max_val = float("1.75104") + mean = float("-0.126037") + std = float("0.740702") + data = None + + +class Program_weight_tensor_parameter_287: + name = "parameter_287" + shape = [192] + dtype = "float32" + min_val = float("0.632268") + max_val = float("2.97322") + mean = float("1.08733") + std = float("0.283408") + data = None + + +class Program_weight_tensor_parameter_288: + name = "parameter_288" + shape = [192] + dtype = "float32" + min_val = float("0.0109431") + max_val = float("0.233442") + mean = float("0.0433778") + std = float("0.0318779") + data = None + + +class Program_weight_tensor_parameter_289: + name = "parameter_289" + shape = [192] + dtype = "float32" + min_val = float("-0.576887") + max_val = float("0.269966") + mean = float("-0.0934605") + std = float("0.118655") + data = None + + +class Program_weight_tensor_parameter_290: + name = "parameter_290" + shape = [192, 128, 3, 3] + dtype = "float32" + min_val = float("-0.0856428") + max_val = float("0.123627") + mean = float("-0.000225745") + std = float("0.00765725") + data = None + + +class Program_weight_tensor_parameter_291: + name = "parameter_291" + shape = [128] + dtype = "float32" + min_val = float("-2.81597") + max_val = float("1.9636") + mean = float("-0.71259") + std = float("0.647835") + data = None + + +class Program_weight_tensor_parameter_292: + name = "parameter_292" + shape = [128] + dtype = "float32" + min_val = float("0.311227") + max_val = float("2.8783") + mean = float("1.01845") + std = float("0.278722") + data = None + + +class Program_weight_tensor_parameter_293: + name = "parameter_293" + shape = [128] + dtype = "float32" + min_val = float("0.000843216") + max_val = float("0.0154502") + mean = float("0.00453611") + std = float("0.00230447") + data = None + + +class Program_weight_tensor_parameter_294: + name = "parameter_294" + shape = [128] + dtype = "float32" + min_val = float("-0.237755") + max_val = float("0.26225") + mean = float("0.00315393") + std = float("0.0867451") + data = None + + +class Program_weight_tensor_parameter_295: + name = "parameter_295" + shape = [128, 96, 1, 1] + dtype = "float32" + min_val = float("-0.171773") + max_val = float("0.211127") + mean = float("-0.00142636") + std = float("0.0224525") + data = None + + +class Program_weight_tensor_parameter_296: + name = "parameter_296" + shape = [96] + dtype = "float32" + min_val = float("-0.0180386") + max_val = float("3.78007e-05") + mean = float("-0.00735479") + std = float("0.00450801") + data = None + + +class Program_weight_tensor_parameter_297: + name = "parameter_297" + shape = [96, 96, 1, 1] + dtype = "float32" + min_val = float("-0.30281") + max_val = float("0.123007") + mean = float("-0.00790532") + std = float("0.0180213") + data = None + + +class Program_weight_tensor_parameter_298: + name = "parameter_298" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_299: + name = "parameter_299" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_300: + name = "parameter_300" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_301: + name = "parameter_301" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_302: + name = "parameter_302" + shape = [48, 48, 1, 1] + dtype = "float32" + min_val = float("-0.0501789") + max_val = float("0.0563261") + mean = float("-0.00170388") + std = float("0.0129798") + data = None + + +class Program_weight_tensor_parameter_303: + name = "parameter_303" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_304: + name = "parameter_304" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_305: + name = "parameter_305" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_306: + name = "parameter_306" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_307: + name = "parameter_307" + shape = [48, 48, 3, 3] + dtype = "float32" + min_val = float("-0.0578676") + max_val = float("0.0799749") + mean = float("-0.000509865") + std = float("0.0110281") + data = None + + +class Program_weight_tensor_parameter_308: + name = "parameter_308" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_309: + name = "parameter_309" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_310: + name = "parameter_310" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_311: + name = "parameter_311" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_312: + name = "parameter_312" + shape = [48, 48, 3, 3] + dtype = "float32" + min_val = float("-0.0925274") + max_val = float("0.0949158") + mean = float("-0.00064859") + std = float("0.0123667") + data = None + + +class Program_weight_tensor_parameter_313: + name = "parameter_313" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_314: + name = "parameter_314" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_315: + name = "parameter_315" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_316: + name = "parameter_316" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_317: + name = "parameter_317" + shape = [48, 48, 1, 1] + dtype = "float32" + min_val = float("-0.0727088") + max_val = float("0.0782992") + mean = float("-0.00102365") + std = float("0.0139349") + data = None + + +class Program_weight_tensor_parameter_318: + name = "parameter_318" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_319: + name = "parameter_319" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_320: + name = "parameter_320" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_321: + name = "parameter_321" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_322: + name = "parameter_322" + shape = [48, 48, 3, 3] + dtype = "float32" + min_val = float("-0.0621898") + max_val = float("0.0692526") + mean = float("-0.000822014") + std = float("0.0111057") + data = None + + +class Program_weight_tensor_parameter_323: + name = "parameter_323" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_324: + name = "parameter_324" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_325: + name = "parameter_325" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_326: + name = "parameter_326" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_327: + name = "parameter_327" + shape = [48, 48, 3, 3] + dtype = "float32" + min_val = float("-0.11162") + max_val = float("0.0943574") + mean = float("-0.000368661") + std = float("0.0125785") + data = None + + +class Program_weight_tensor_parameter_328: + name = "parameter_328" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_329: + name = "parameter_329" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_330: + name = "parameter_330" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_331: + name = "parameter_331" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_332: + name = "parameter_332" + shape = [48, 48, 1, 1] + dtype = "float32" + min_val = float("-0.0944494") + max_val = float("0.0702451") + mean = float("-0.00185301") + std = float("0.0172184") + data = None + + +class Program_weight_tensor_parameter_333: + name = "parameter_333" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_334: + name = "parameter_334" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_335: + name = "parameter_335" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_336: + name = "parameter_336" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_337: + name = "parameter_337" + shape = [48, 48, 3, 3] + dtype = "float32" + min_val = float("-0.0691644") + max_val = float("0.0974384") + mean = float("-0.000506655") + std = float("0.011691") + data = None + + +class Program_weight_tensor_parameter_338: + name = "parameter_338" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_339: + name = "parameter_339" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_340: + name = "parameter_340" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_341: + name = "parameter_341" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_342: + name = "parameter_342" + shape = [48, 48, 3, 3] + dtype = "float32" + min_val = float("-0.133213") + max_val = float("0.0905212") + mean = float("-0.000334254") + std = float("0.0134452") + data = None + + +class Program_weight_tensor_parameter_343: + name = "parameter_343" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_344: + name = "parameter_344" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_345: + name = "parameter_345" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_346: + name = "parameter_346" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_347: + name = "parameter_347" + shape = [48, 96, 1, 1] + dtype = "float32" + min_val = float("-0.17806") + max_val = float("0.14305") + mean = float("-0.00229242") + std = float("0.0246641") + data = None + + +class Program_weight_tensor_parameter_348: + name = "parameter_348" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_349: + name = "parameter_349" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_350: + name = "parameter_350" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_351: + name = "parameter_351" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_352: + name = "parameter_352" + shape = [48, 96, 1, 1] + dtype = "float32" + min_val = float("-0.135012") + max_val = float("0.178483") + mean = float("-0.0004429") + std = float("0.0226955") + data = None + + +class Program_weight_tensor_parameter_353: + name = "parameter_353" + shape = [96] + dtype = "float32" + min_val = float("-3.40701") + max_val = float("3.27538") + mean = float("0.329531") + std = float("1.14502") + data = None + + +class Program_weight_tensor_parameter_354: + name = "parameter_354" + shape = [96] + dtype = "float32" + min_val = float("0.865919") + max_val = float("4.91404") + mean = float("1.91603") + std = float("0.752783") + data = None + + +class Program_weight_tensor_parameter_355: + name = "parameter_355" + shape = [96] + dtype = "float32" + min_val = float("0.705639") + max_val = float("32.368") + mean = float("2.73559") + std = float("3.53943") + data = None + + +class Program_weight_tensor_parameter_356: + name = "parameter_356" + shape = [96] + dtype = "float32" + min_val = float("-1.47426") + max_val = float("2.58312") + mean = float("-0.286183") + std = float("0.722428") + data = None + + +class Program_weight_tensor_parameter_357: + name = "parameter_357" + shape = [96, 64, 3, 3] + dtype = "float32" + min_val = float("-0.110689") + max_val = float("0.13859") + mean = float("-0.000360127") + std = float("0.0133189") + data = None + + +class Program_weight_tensor_parameter_358: + name = "parameter_358" + shape = [64] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_359: + name = "parameter_359" + shape = [64] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_360: + name = "parameter_360" + shape = [64] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_361: + name = "parameter_361" + shape = [64] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_362: + name = "parameter_362" + shape = [64, 32, 3, 3] + dtype = "float32" + min_val = float("-0.179264") + max_val = float("0.162144") + mean = float("-0.000679023") + std = float("0.020536") + data = None + + +class Program_weight_tensor_parameter_363: + name = "parameter_363" + shape = [32] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_364: + name = "parameter_364" + shape = [32] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_365: + name = "parameter_365" + shape = [32] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_366: + name = "parameter_366" + shape = [32] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_367: + name = "parameter_367" + shape = [32, 32, 3, 3] + dtype = "float32" + min_val = float("-0.347786") + max_val = float("0.218964") + mean = float("-0.000199571") + std = float("0.0261033") + data = None + + +class Program_weight_tensor_parameter_368: + name = "parameter_368" + shape = [32] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_369: + name = "parameter_369" + shape = [32] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_370: + name = "parameter_370" + shape = [32] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_371: + name = "parameter_371" + shape = [32] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_372: + name = "parameter_372" + shape = [32, 3, 3, 3] + dtype = "float32" + min_val = float("-0.317155") + max_val = float("0.280865") + mean = float("-0.00214957") + std = float("0.0702742") + data = None diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/subgraph_3/graph_hash.txt b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/subgraph_3/graph_hash.txt index 303d49362..0cf23b344 100644 --- a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/subgraph_3/graph_hash.txt +++ b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/subgraph_3/graph_hash.txt @@ -1 +1 @@ -0d838ae9c799b8f1ab11dae768474bd1a90cb99967969c129656d57d09380aa2 \ No newline at end of file +82564cb272bfe4052ec183285332a6999eb8f2e9097ea2f1faf3c1a1650939fa \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/subgraph_3/input_meta.py b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/subgraph_3/input_meta.py index b3f3bee9e..765fadabf 100644 --- a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/subgraph_3/input_meta.py +++ b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/subgraph_3/input_meta.py @@ -1,42 +1,71 @@ class Program_weight_tensor_data_0: name = "data_0" - shape = [2, 12096, 10] + shape = [2, 3549, 10] dtype = "float32" - min_val = float("5.85845e-11") - max_val = float("0.891047") - mean = float("0.00620757") - std = float("0.0222101") + min_val = float("1.49258e-10") + max_val = float("0.896854") + mean = float("0.00647097") + std = float("0.0257209") data = None class Program_weight_tensor_data_1: name = "data_1" - shape = [2, 12096, 40] + shape = [2, 3549, 4] dtype = "float32" - min_val = float("-15.4735") - max_val = float("25.7451") - mean = float("0.798417") - std = float("2.11877") + min_val = float("-112.857") + max_val = float("515.47") + mean = float("207.765") + std = float("123.433") data = None class Program_weight_tensor_data_2: name = "data_2" - shape = [12096, 2] + shape = [3549, 2] dtype = "float32" min_val = float("4.0") - max_val = float("764.0") - mean = float("384.0") - std = float("221.675") + max_val = float("412.0") + mean = float("208.0") + std = float("120.038") data = None class Program_weight_tensor_data_3: name = "data_3" - shape = [12096, 1] + shape = [3549, 1] dtype = "float32" min_val = float("8.0") max_val = float("32.0") mean = float("10.6667") std = float("5.70157") data = None + + +class Program_weight_tensor_data_4: + name = "data_4" + shape = [2, 49, 1] + dtype = "int32" + min_val = 0 + max_val = 8 + data = None + + +class Program_weight_tensor_data_5: + name = "data_5" + shape = [2, 49, 4] + dtype = "float32" + max_val = float("408.482") + mean = float("110.196") + std = float("133.414") + data = None + + +class Program_weight_tensor_data_6: + name = "data_6" + shape = [2, 49, 1] + dtype = "float32" + max_val = float("1.0") + mean = float("0.571429") + std = float("0.494872") + data = None diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/subgraph_3/model.py b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/subgraph_3/model.py index ffb87dfd6..2cdd157ba 100644 --- a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/subgraph_3/model.py +++ b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/subgraph_3/model.py @@ -5,158 +5,500 @@ class GraphModule(paddle.nn.Layer): def __init__(self): super().__init__() - def forward(self, parameter_0, data_0, data_1, data_2, data_3): - # pd_op.divide: (-1x2xf32) <- (-1x2xf32, -1x1xf32) - divide_0 = paddle._C_ops.divide(data_2, data_3) - del data_2 - - # pd_op.shape64: (3xi64) <- (2x-1x40xf32) - shape64_0 = paddle._C_ops.shape64(data_1) - + def forward(self, data_0, data_1, data_2, data_3, data_4, data_5, data_6): # pd_op.full_int_array: (1xi64) <- () - full_int_array_0 = [0] + full_int_array_0 = [2] + + # pd_op.unsqueeze: (2x49x1x4xf32) <- (2x49x4xf32, 1xi64) + unsqueeze_0 = paddle._C_ops.unsqueeze(data_5, full_int_array_0) + del data_5 # pd_op.full_int_array: (1xi64) <- () full_int_array_1 = [1] - # pd_op.assign: (1xi64) <- (1xi64) - assign_0 = full_int_array_1 + # pd_op.unsqueeze: (2x1x3549x4xf32) <- (2x3549x4xf32, 1xi64) + unsqueeze_1 = paddle._C_ops.unsqueeze(data_1, full_int_array_1) + del data_1, full_int_array_1 - # pd_op.slice: (xi64) <- (3xi64, 1xi64, 1xi64) + # pd_op.full_int_array: (1xi64) <- () + full_int_array_2 = [0] + + # pd_op.slice: (2x49x1x2xf32) <- (2x49x1x4xf32, 1xi64, 1xi64) slice_0 = paddle._C_ops.slice( - shape64_0, [0], full_int_array_0, full_int_array_1, [1], [0] + unsqueeze_0, [3], full_int_array_2, full_int_array_0, [1], [] ) - del full_int_array_0 # pd_op.full_int_array: (1xi64) <- () - full_int_array_2 = [2] + full_int_array_3 = [2147483647] - # pd_op.slice: (xi64) <- (3xi64, 1xi64, 1xi64) + # pd_op.slice: (2x49x1x2xf32) <- (2x49x1x4xf32, 1xi64, 1xi64) slice_1 = paddle._C_ops.slice( - shape64_0, [0], full_int_array_1, full_int_array_2, [1], [0] + unsqueeze_0, [3], full_int_array_0, full_int_array_3, [1], [] ) - # pd_op.full_int_array: (1xi64) <- () - full_int_array_3 = [3] - - # pd_op.slice: (xi64) <- (3xi64, 1xi64, 1xi64) + # pd_op.slice: (2x1x3549x2xf32) <- (2x1x3549x4xf32, 1xi64, 1xi64) slice_2 = paddle._C_ops.slice( - shape64_0, [0], full_int_array_2, full_int_array_3, [1], [0] + unsqueeze_1, [3], full_int_array_2, full_int_array_0, [1], [] ) - del full_int_array_2, full_int_array_3, shape64_0 + del full_int_array_2 - # pd_op.full: (xi64) <- () + # pd_op.slice: (2x1x3549x2xf32) <- (2x1x3549x4xf32, 1xi64, 1xi64) + slice_3 = paddle._C_ops.slice( + unsqueeze_1, [3], full_int_array_0, full_int_array_3, [1], [] + ) + del full_int_array_0, full_int_array_3, unsqueeze_1 + + # pd_op.maximum: (2x49x3549x2xf32) <- (2x49x1x2xf32, 2x1x3549x2xf32) + maximum_0 = paddle._C_ops.maximum(slice_0, slice_2) + + # pd_op.minimum: (2x49x3549x2xf32) <- (2x49x1x2xf32, 2x1x3549x2xf32) + minimum_0 = paddle._C_ops.minimum(slice_1, slice_3) + + # pd_op.subtract: (2x49x3549x2xf32) <- (2x49x3549x2xf32, 2x49x3549x2xf32) + subtract_0 = paddle._C_ops.subtract(minimum_0, maximum_0) + del maximum_0, minimum_0 + + # pd_op.full: (1xf32) <- () full_0 = paddle._C_ops.full( - [], float("-1"), paddle.int64, paddle.core.CPUPlace() + [1], float("0"), paddle.float32, paddle.core.CPUPlace() ) - # pd_op.full: (xi64) <- () + # pd_op.full: (1xf32) <- () full_1 = paddle._C_ops.full( - [], float("4"), paddle.int64, paddle.core.CPUPlace() + [1], float("3.40282e+38"), paddle.float32, paddle.core.CPUPlace() ) - # pd_op.full: (xi64) <- () + # pd_op.clip: (2x49x3549x2xf32) <- (2x49x3549x2xf32, 1xf32, 1xf32) + clip_0 = paddle._C_ops.clip(subtract_0, full_0, full_1) + del subtract_0 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_4 = [-1] + + # pd_op.prod: (2x49x3549xf32) <- (2x49x3549x2xf32, 1xi64) + prod_0 = paddle._C_ops.prod(clip_0, full_int_array_4, False, False) + del clip_0 + + # pd_op.subtract: (2x49x1x2xf32) <- (2x49x1x2xf32, 2x49x1x2xf32) + subtract_1 = paddle._C_ops.subtract(slice_1, slice_0) + del slice_0, slice_1 + + # pd_op.clip: (2x49x1x2xf32) <- (2x49x1x2xf32, 1xf32, 1xf32) + clip_1 = paddle._C_ops.clip(subtract_1, full_0, full_1) + del subtract_1 + + # pd_op.prod: (2x49x1xf32) <- (2x49x1x2xf32, 1xi64) + prod_1 = paddle._C_ops.prod(clip_1, full_int_array_4, False, False) + del clip_1 + + # pd_op.subtract: (2x1x3549x2xf32) <- (2x1x3549x2xf32, 2x1x3549x2xf32) + subtract_2 = paddle._C_ops.subtract(slice_3, slice_2) + del slice_2, slice_3 + + # pd_op.clip: (2x1x3549x2xf32) <- (2x1x3549x2xf32, 1xf32, 1xf32) + clip_2 = paddle._C_ops.clip(subtract_2, full_0, full_1) + del full_1, subtract_2 + + # pd_op.prod: (2x1x3549xf32) <- (2x1x3549x2xf32, 1xi64) + prod_2 = paddle._C_ops.prod(clip_2, full_int_array_4, False, False) + del clip_2 + + # pd_op.add: (2x49x3549xf32) <- (2x49x1xf32, 2x1x3549xf32) + add_0 = paddle._C_ops.add(prod_1, prod_2) + del prod_1, prod_2 + + # pd_op.subtract: (2x49x3549xf32) <- (2x49x3549xf32, 2x49x3549xf32) + subtract_3 = paddle._C_ops.subtract(add_0, prod_0) + del add_0 + + # pd_op.full: (1xf32) <- () full_2 = paddle._C_ops.full( - [], float("10"), paddle.int64, paddle.core.CPUPlace() + [1], float("1"), paddle.float32, paddle.core.CPUPlace() ) - # builtin.combine: ([xi64, xi64, xi64, xi64]) <- (xi64, xi64, xi64, xi64) - combine_0 = [full_0, slice_1, full_1, full_2] - del full_0, full_1, full_2, slice_1 + # pd_op.scale: (2x49x3549xf32) <- (2x49x3549xf32, 1xf32) + scale_0 = paddle._C_ops.scale(subtract_3, full_2, float("1e-09"), True) + del subtract_3 - # pd_op.stack: (4xi64) <- ([xi64, xi64, xi64, xi64]) - stack_0 = paddle._C_ops.stack(combine_0, 0) - del combine_0 + # pd_op.divide: (2x49x3549xf32) <- (2x49x3549xf32, 2x49x3549xf32) + divide_0 = paddle._C_ops.divide(prod_0, scale_0) + del prod_0, scale_0 - # pd_op.reshape: (-1x-1x4x10xf32) <- (2x-1x40xf32, 4xi64) - reshape_0 = paddle._C_ops.reshape(data_1, stack_0) - del data_1, stack_0 + # pd_op.transpose: (2x10x3549xf32) <- (2x3549x10xf32) + transpose_0 = paddle._C_ops.transpose(data_0, [0, 2, 1]) + del data_0 - # pd_op.softmax: (-1x-1x4x10xf32) <- (-1x-1x4x10xf32) - softmax_0 = paddle._C_ops.softmax(reshape_0, -1) - del reshape_0 + # pd_op.full: (1xf64) <- () + full_3 = paddle._C_ops.full( + [1], float("0"), paddle.float64, paddle.core.CPUPlace() + ) - # pd_op.transpose: (-1x10x-1x4xf32) <- (-1x-1x4x10xf32) - transpose_0 = paddle._C_ops.transpose(softmax_0, [0, 3, 1, 2]) + # pd_op.full: (1xf64) <- () + full_4 = paddle._C_ops.full( + [1], float("2"), paddle.float64, paddle.core.CPUPlace() + ) - # pd_op.conv2d: (-1x1x-1x4xf32) <- (-1x10x-1x4xf32, 1x10x1x1xf32) - conv2d_0 = paddle._C_ops.conv2d( - transpose_0, parameter_0, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + # pd_op.full: (1xf64) <- () + full_5 = paddle._C_ops.full( + [1], float("1"), paddle.float64, paddle.core.CPUPlace() ) - del parameter_0 - # pd_op.squeeze: (-1x-1x4xf32) <- (-1x1x-1x4xf32, 1xi64) - squeeze_0 = paddle._C_ops.squeeze(conv2d_0, full_int_array_1) - del full_int_array_1 + # pd_op.arange: (2xi32) <- (1xf64, 1xf64, 1xf64) + arange_0 = paddle.arange(full_3, full_4, full_5, dtype="int32") + del full_3, full_4, full_5 + + # pd_op.unsqueeze: (2x1xi32) <- (2xi32, 1xi64) + unsqueeze_2 = paddle._C_ops.unsqueeze(arange_0, full_int_array_4) + del arange_0 + + # pd_op.full_int_array: (2xi64) <- () + full_int_array_5 = [1, 49] + + # pd_op.tile: (2x49xi32) <- (2x1xi32, 2xi64) + tile_0 = paddle._C_ops.tile(unsqueeze_2, full_int_array_5) + del full_int_array_5 + + # pd_op.squeeze: (2x49xi32) <- (2x49x1xi32, 1xi64) + squeeze_0 = paddle._C_ops.squeeze(data_4, full_int_array_4) + del data_4 + + # builtin.combine: ([2x49xi32, 2x49xi32]) <- (2x49xi32, 2x49xi32) + combine_0 = [tile_0, squeeze_0] + del squeeze_0, tile_0 + + # pd_op.stack: (2x49x2xi32) <- ([2x49xi32, 2x49xi32]) + stack_0 = paddle._C_ops.stack(combine_0, -1) + del combine_0 + + # pd_op.gather_nd: (2x49x3549xf32) <- (2x10x3549xf32, 2x49x2xi32) + gather_nd_0 = paddle._C_ops.gather_nd(transpose_0, stack_0) + del stack_0, transpose_0 + + # pd_op.pow: (2x49x3549xf32) <- (2x49x3549xf32) + pow_0 = paddle._C_ops.pow(gather_nd_0, float("1")) + del gather_nd_0 + + # pd_op.pow: (2x49x3549xf32) <- (2x49x3549xf32) + pow_1 = paddle._C_ops.pow(divide_0, float("6")) + + # pd_op.multiply: (2x49x3549xf32) <- (2x49x3549xf32, 2x49x3549xf32) + multiply_0 = paddle._C_ops.multiply(pow_0, pow_1) + del pow_0, pow_1 + + # pd_op.multiply: (2x49x3549xf32) <- (2x49x3549xf32, 2x49x1xf32) + multiply_1 = paddle._C_ops.multiply(multiply_0, data_6) + del multiply_0 + + # pd_op.scale: (3549x1xf32) <- (3549x1xf32, 1xf32) + scale_1 = paddle._C_ops.scale(data_3, full_2, float("0"), True) + del data_3, full_2 + + # pd_op.full_int_array: (2xi64) <- () + full_int_array_6 = [0, 1] + + # pd_op.unsqueeze: (1x1x3549x2xf32) <- (3549x2xf32, 2xi64) + unsqueeze_3 = paddle._C_ops.unsqueeze(data_2, full_int_array_6) + del data_2 # pd_op.full: (1xi32) <- () - full_3 = paddle._C_ops.full( - [1], float("2"), paddle.int32, paddle.core.CPUPlace() + full_6 = paddle._C_ops.full( + [1], float("3"), paddle.int32, paddle.core.CPUPlace() ) - # pd_op.split_with_num: ([-1x-1x2xf32, -1x-1x2xf32]) <- (-1x-1x4xf32, 1xi32) - split_with_num_0 = paddle._C_ops.split_with_num(squeeze_0, 2, full_3) - del squeeze_0 + # pd_op.split_with_num: ([1x1x3549x1xf32, 1x1x3549x1xf32]) <- (1x1x3549x2xf32, 1xi32) + split_with_num_0 = paddle._C_ops.split_with_num(unsqueeze_3, 2, full_6) + del unsqueeze_3 - # builtin.split: (-1x-1x2xf32, -1x-1x2xf32) <- ([-1x-1x2xf32, -1x-1x2xf32]) + # builtin.split: (1x1x3549x1xf32, 1x1x3549x1xf32) <- ([1x1x3549x1xf32, 1x1x3549x1xf32]) ( split_0, split_1, ) = split_with_num_0 del split_with_num_0 - # pd_op.full: (1xf32) <- () - full_4 = paddle._C_ops.full( - [1], float("-1"), paddle.float32, paddle.core.CPUPlace() - ) + # pd_op.split_with_num: ([2x49x1x1xf32, 2x49x1x1xf32, 2x49x1x1xf32, 2x49x1x1xf32]) <- (2x49x1x4xf32, 1xi32) + split_with_num_1 = paddle._C_ops.split_with_num(unsqueeze_0, 4, full_6) + del full_6, unsqueeze_0 + + # builtin.split: (2x49x1x1xf32, 2x49x1x1xf32, 2x49x1x1xf32, 2x49x1x1xf32) <- ([2x49x1x1xf32, 2x49x1x1xf32, 2x49x1x1xf32, 2x49x1x1xf32]) + ( + split_2, + split_3, + split_4, + split_5, + ) = split_with_num_1 + del split_with_num_1 + + # pd_op.subtract: (2x49x3549x1xf32) <- (1x1x3549x1xf32, 2x49x1x1xf32) + subtract_4 = paddle._C_ops.subtract(split_0, split_2) - # pd_op.scale: (-1x-1x2xf32) <- (-1x-1x2xf32, 1xf32) - scale_0 = paddle._C_ops.scale(split_0, full_4, float("0"), True) - del split_0 + # pd_op.subtract: (2x49x3549x1xf32) <- (1x1x3549x1xf32, 2x49x1x1xf32) + subtract_5 = paddle._C_ops.subtract(split_1, split_3) - # pd_op.add: (-1x-1x2xf32) <- (-1x-1x2xf32, -1x2xf32) - add_0 = paddle._C_ops.add(scale_0, divide_0) + # pd_op.subtract: (2x49x3549x1xf32) <- (2x49x1x1xf32, 1x1x3549x1xf32) + subtract_6 = paddle._C_ops.subtract(split_4, split_0) - # pd_op.add: (-1x-1x2xf32) <- (-1x-1x2xf32, -1x2xf32) - add_1 = paddle._C_ops.add(split_1, divide_0) + # pd_op.subtract: (2x49x3549x1xf32) <- (2x49x1x1xf32, 1x1x3549x1xf32) + subtract_7 = paddle._C_ops.subtract(split_5, split_1) # pd_op.full: (1xi32) <- () - full_5 = paddle._C_ops.full( + full_7 = paddle._C_ops.full( [1], float("-1"), paddle.int32, paddle.core.CPUPlace() ) - # builtin.combine: ([-1x-1x2xf32, -1x-1x2xf32]) <- (-1x-1x2xf32, -1x-1x2xf32) - combine_1 = [add_0, add_1] + # builtin.combine: ([2x49x3549x1xf32, 2x49x3549x1xf32, 2x49x3549x1xf32, 2x49x3549x1xf32]) <- (2x49x3549x1xf32, 2x49x3549x1xf32, 2x49x3549x1xf32, 2x49x3549x1xf32) + combine_1 = [subtract_4, subtract_5, subtract_6, subtract_7] + del subtract_4, subtract_5, subtract_6, subtract_7 - # pd_op.concat: (-1x-1x4xf32) <- ([-1x-1x2xf32, -1x-1x2xf32], 1xi32) - concat_0 = paddle._C_ops.concat(combine_1, full_5) + # pd_op.concat: (2x49x3549x4xf32) <- ([2x49x3549x1xf32, 2x49x3549x1xf32, 2x49x3549x1xf32, 2x49x3549x1xf32], 1xi32) + concat_0 = paddle._C_ops.concat(combine_1, full_7) del combine_1 - # pd_op.share_data_: (2x-1x10xf32) <- (2x-1x10xf32) - share_data__0 = data_0.detach() - del data_0 + # pd_op.min: (2x49x3549xf32) <- (2x49x3549x4xf32, 1xi64) + min_0 = paddle._C_ops.min(concat_0, full_int_array_4, False) + del concat_0 - # pd_op.share_data_: (-1x-1x4xf32) <- (-1x-1x4xf32) - share_data__1 = concat_0.detach() - - # pd_op.multiply: (-1x-1x4xf32) <- (-1x-1x4xf32, -1x1xf32) - multiply_0 = paddle._C_ops.multiply(share_data__1, data_3) - del ( - add_0, - add_1, - assign_0, - concat_0, - conv2d_0, - data_3, - divide_0, - full_3, - full_4, - full_5, - scale_0, - share_data__1, - softmax_0, - split_1, - transpose_0, + # pd_op.full: (xf32) <- () + full_8 = paddle._C_ops.full( + [], + float("1e-09"), + paddle.float32, + paddle.framework._current_expected_place(), ) - return share_data__0, multiply_0 + # pd_op.greater_than: (2x49x3549xb) <- (2x49x3549xf32, xf32) + greater_than_1 = paddle._C_ops.greater_than(min_0, full_8) + del min_0 + + # pd_op.unsqueeze: (1x1x3549x1xf32) <- (3549x1xf32, 2xi64) + unsqueeze_4 = paddle._C_ops.unsqueeze(scale_1, full_int_array_6) + del full_int_array_6, scale_1 + + # pd_op.add: (2x49x1x1xf32) <- (2x49x1x1xf32, 2x49x1x1xf32) + add_1 = paddle._C_ops.add(split_2, split_4) + del split_2, split_4 + + # pd_op.full: (1xf32) <- () + full_9 = paddle._C_ops.full( + [1], float("0.5"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.scale: (2x49x1x1xf32) <- (2x49x1x1xf32, 1xf32) + scale_2 = paddle._C_ops.scale(add_1, full_9, float("0"), True) + del add_1 + + # pd_op.add: (2x49x1x1xf32) <- (2x49x1x1xf32, 2x49x1x1xf32) + add_2 = paddle._C_ops.add(split_3, split_5) + del split_3, split_5 + + # pd_op.scale: (2x49x1x1xf32) <- (2x49x1x1xf32, 1xf32) + scale_3 = paddle._C_ops.scale(add_2, full_9, float("0"), True) + del add_2, full_9 + + # pd_op.subtract: (2x49x3549x1xf32) <- (2x49x1x1xf32, 1x1x3549x1xf32) + subtract_8 = paddle._C_ops.subtract(scale_2, unsqueeze_4) + + # pd_op.subtract: (2x49x3549x1xf32) <- (1x1x3549x1xf32, 2x49x3549x1xf32) + subtract_9 = paddle._C_ops.subtract(split_0, subtract_8) + del subtract_8 + + # pd_op.subtract: (2x49x3549x1xf32) <- (2x49x1x1xf32, 1x1x3549x1xf32) + subtract_10 = paddle._C_ops.subtract(scale_3, unsqueeze_4) + + # pd_op.subtract: (2x49x3549x1xf32) <- (1x1x3549x1xf32, 2x49x3549x1xf32) + subtract_11 = paddle._C_ops.subtract(split_1, subtract_10) + del subtract_10 + + # pd_op.add: (2x49x3549x1xf32) <- (2x49x1x1xf32, 1x1x3549x1xf32) + add_3 = paddle._C_ops.add(scale_2, unsqueeze_4) + del scale_2 + + # pd_op.subtract: (2x49x3549x1xf32) <- (2x49x3549x1xf32, 1x1x3549x1xf32) + subtract_12 = paddle._C_ops.subtract(add_3, split_0) + del add_3, split_0 + + # pd_op.add: (2x49x3549x1xf32) <- (2x49x1x1xf32, 1x1x3549x1xf32) + add_4 = paddle._C_ops.add(scale_3, unsqueeze_4) + del scale_3, unsqueeze_4 + + # pd_op.subtract: (2x49x3549x1xf32) <- (2x49x3549x1xf32, 1x1x3549x1xf32) + subtract_13 = paddle._C_ops.subtract(add_4, split_1) + del add_4, split_1 + + # builtin.combine: ([2x49x3549x1xf32, 2x49x3549x1xf32, 2x49x3549x1xf32, 2x49x3549x1xf32]) <- (2x49x3549x1xf32, 2x49x3549x1xf32, 2x49x3549x1xf32, 2x49x3549x1xf32) + combine_2 = [subtract_9, subtract_11, subtract_12, subtract_13] + del subtract_11, subtract_12, subtract_13, subtract_9 + + # pd_op.concat: (2x49x3549x4xf32) <- ([2x49x3549x1xf32, 2x49x3549x1xf32, 2x49x3549x1xf32, 2x49x3549x1xf32], 1xi32) + concat_1 = paddle._C_ops.concat(combine_2, full_7) + del combine_2, full_7 + + # pd_op.min: (2x49x3549xf32) <- (2x49x3549x4xf32, 1xi64) + min_1 = paddle._C_ops.min(concat_1, full_int_array_4, False) + del concat_1 + + # pd_op.greater_than: (2x49x3549xb) <- (2x49x3549xf32, xf32) + greater_than_2 = paddle._C_ops.greater_than(min_1, full_8) + del full_8, min_1 + + # pd_op.cast: (2x49x3549xf32) <- (2x49x3549xb) + cast_0 = paddle._C_ops.cast(greater_than_1, paddle.float32) + del greater_than_1 + + # pd_op.cast: (2x49x3549xf32) <- (2x49x3549xb) + cast_1 = paddle._C_ops.cast(greater_than_2, paddle.float32) + del greater_than_2 + + # pd_op.multiply: (2x49x3549xf32) <- (2x49x3549xf32, 2x49x1xf32) + multiply_2 = paddle._C_ops.multiply(cast_0, data_6) + del cast_0 + + # pd_op.multiply: (2x49x3549xf32) <- (2x49x3549xf32, 2x49x1xf32) + multiply_3 = paddle._C_ops.multiply(cast_1, data_6) + del cast_1 + + # pd_op.sum: (2x49x1xf32) <- (2x49x3549xf32, 1xi64) + sum_0 = paddle._C_ops.sum(multiply_2, full_int_array_4, None, True) + del full_int_array_4 + + # pd_op.full: (xf32) <- () + full_10 = paddle._C_ops.full( + [], float("0"), paddle.float32, paddle.framework._current_expected_place() + ) + + # pd_op.equal: (2x49x1xb) <- (2x49x1xf32, xf32) + equal_0 = paddle._C_ops.equal(sum_0, full_10) + del sum_0 + + # pd_op.add: (2x49x3549xf32) <- (2x49x3549xf32, 2x49x3549xf32) + add_5 = paddle._C_ops.add(multiply_1, multiply_3) + + # pd_op.full_like: (2x49x3549xf32) <- (2x49x3549xf32, 1xf32) + full_like_0 = paddle._C_ops.full_like( + add_5, full_0, paddle.float32, paddle.framework._current_expected_place() + ) + + # pd_op.full_like: (2x49x3549xf32) <- (2x49x3549xf32, 1xf32) + full_like_1 = paddle._C_ops.full_like( + multiply_1, + full_0, + paddle.float32, + paddle.framework._current_expected_place(), + ) + + # pd_op.full_like: (2x49x1xb) <- (2x49x1xb, 1xf32) + full_like_2 = paddle._C_ops.full_like( + equal_0, full_0, paddle.bool, paddle.framework._current_expected_place() + ) + del full_0 + + # pd_op.cast: (2x49x1xf32) <- (2x49x1xb) + cast_2 = paddle._C_ops.cast(full_like_2, paddle.float32) + del full_like_2 + + # pd_op.cast: (2x49x1xf32) <- (2x49x1xb) + cast_3 = paddle._C_ops.cast(equal_0, paddle.float32) + del equal_0 + + # pd_op.add: (2x49x3549xf32) <- (2x49x3549xf32, 2x49x3549xf32) + add_6 = paddle._C_ops.add(full_like_0, full_like_1) + del full_like_0, full_like_1 + + # pd_op.add: (2x49x3549xf32) <- (2x49x3549xf32, 2x49x1xf32) + add_7 = paddle._C_ops.add(add_6, cast_2) + del add_6, cast_2 + + # pd_op.add: (2x49x3549xf32) <- (2x49x3549xf32, 2x49x3549xf32) + add_8 = paddle._C_ops.add(add_5, add_7) + del add_5 + + # pd_op.add: (2x49x3549xf32) <- (2x49x3549xf32, 2x49x3549xf32) + add_9 = paddle._C_ops.add(multiply_1, add_7) + + # pd_op.add: (2x49x3549xf32) <- (2x49x1xf32, 2x49x3549xf32) + add_10 = paddle._C_ops.add(cast_3, add_7) + del add_7, cast_3 + + # pd_op.cast: (2x49x3549xb) <- (2x49x3549xf32) + cast_4 = paddle._C_ops.cast(add_10, paddle.bool) + del add_10 + + # pd_op.where: (2x49x3549xf32) <- (2x49x3549xb, 2x49x3549xf32, 2x49x3549xf32) + where_0 = paddle._C_ops.where(cast_4, add_8, add_9) + del add_8, add_9, cast_4 + + # pd_op.full: (1xi32) <- () + full_11 = paddle._C_ops.full( + [1], float("13"), paddle.int32, paddle.core.CPUPlace() + ) + + # pd_op.topk: (2x49x13xf32, 2x49x13xi64) <- (2x49x3549xf32, 1xi32) + topk_0, topk_1 = (lambda x, f: f(x))( + paddle._C_ops.topk(where_0, full_11, -1, True, True), + lambda out: out if isinstance(out, (list, tuple)) else (out, None), + ) + del full_11, where_0 + + # pd_op.full: (1xi32) <- () + full_12 = paddle._C_ops.full( + [1], float("3549"), paddle.int32, paddle.core.CPUPlace() + ) + + # pd_op.one_hot: (2x49x13x3549xf32) <- (2x49x13xi64, 1xi32) + one_hot_0 = paddle._C_ops.one_hot( + topk_1 % paddle.cast(full_12, topk_1.dtype), full_12 + ) + del full_12, topk_1 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_7 = [-2] + + # pd_op.sum: (2x49x3549xf32) <- (2x49x13x3549xf32, 1xi64) + sum_1 = paddle._C_ops.sum(one_hot_0, full_int_array_7, None, False) + del one_hot_0 + + # pd_op.multiply: (2x49x3549xf32) <- (2x49x3549xf32, 2x49x1xf32) + multiply_4 = paddle._C_ops.multiply(sum_1, data_6) + del data_6, sum_1 + + # pd_op.greater_than: (2x49x3549xb) <- (2x49x3549xf32, xf32) + greater_than_3 = paddle._C_ops.greater_than(multiply_3, full_10) + del multiply_3 + + # pd_op.greater_than: (2x49x3549xb) <- (2x49x3549xf32, xf32) + greater_than_4 = paddle._C_ops.greater_than(multiply_2, full_10) + del full_10, multiply_2 + + # pd_op.bitwise_or: (2x49x3549xb) <- (2x49x3549xb, 2x49x3549xb) + bitwise_or_0 = paddle._C_ops.bitwise_or(greater_than_3, greater_than_4) + del greater_than_3, greater_than_4 + + # pd_op.cast: (2x49x3549xf32) <- (2x49x3549xb) + cast_5 = paddle._C_ops.cast(bitwise_or_0, paddle.float32) + del bitwise_or_0 + + # pd_op.multiply: (2x49x3549xf32) <- (2x49x3549xf32, 2x49x3549xf32) + multiply_5 = paddle._C_ops.multiply(multiply_4, cast_5) + del cast_5, multiply_4 + + # pd_op.sum: (2x3549xf32) <- (2x49x3549xf32, 1xi64) + sum_2 = paddle._C_ops.sum(multiply_5, full_int_array_7, None, False) + del full_int_array_7 + + # pd_op.full_int_array: (0xi64) <- () + full_int_array_8 = [] + + # pd_op.max: (xf32) <- (2x3549xf32, 0xi64) + max_0 = paddle._C_ops.max(sum_2, full_int_array_8, False) + del full_int_array_8 + + # pd_op.full: (xf32) <- () + full_13 = paddle._C_ops.full( + [], float("1"), paddle.float32, paddle.framework._current_expected_place() + ) + + # pd_op.greater_than: (xb) <- (xf32, xf32) + greater_than_0 = paddle._C_ops.greater_than(max_0, full_13) + del divide_0, full_13, max_0, multiply_1, multiply_5, sum_2, unsqueeze_2 + + return greater_than_0 diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/subgraph_3/weight_meta.py b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/subgraph_3/weight_meta.py index 88fef0bea..8b1378917 100644 --- a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/subgraph_3/weight_meta.py +++ b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/subgraph_3/weight_meta.py @@ -1,7 +1 @@ -class Program_weight_tensor_parameter_0: - name = "parameter_0" - shape = [1, 10, 1, 1] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None + diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/subgraph_4/graph_hash.txt b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/subgraph_4/graph_hash.txt index 6fe7297b8..4813c5fb9 100644 --- a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/subgraph_4/graph_hash.txt +++ b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/subgraph_4/graph_hash.txt @@ -1 +1 @@ -8b11cddc56e8bf2fc7551237b756b7f8f8a4e9dd2f556be8d328af948ebf41da \ No newline at end of file +0ade118bf5113a2f63aa1b27c2409d65cdb05050be009516c3881c1dd15b2c66 \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/subgraph_4/input_meta.py b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/subgraph_4/input_meta.py index b94668a44..7a270e845 100644 --- a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/subgraph_4/input_meta.py +++ b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/subgraph_4/input_meta.py @@ -2,133 +2,232 @@ class Program_weight_tensor_data_0: name = "data_0" shape = [1] dtype = "float32" - data = [0.699884] + data = [3.32824] class Program_weight_tensor_data_1: name = "data_1" shape = [1] dtype = "float32" - data = [0.667963] + data = [0.0732286] class Program_weight_tensor_data_2: name = "data_2" shape = [1] dtype = "float32" - data = [0.675792] + data = [2.19723] class Program_weight_tensor_data_3: name = "data_3" shape = [1] dtype = "float32" - data = [0.676071] + data = [-1.08555] class Program_weight_tensor_data_4: name = "data_4" shape = [1] dtype = "float32" - data = [0.658719] + data = [1.71785] class Program_weight_tensor_data_5: name = "data_5" shape = [1] dtype = "float32" - data = [0.620637] + data = [1.13331] class Program_weight_tensor_data_6: name = "data_6" shape = [1] dtype = "float32" - data = [0.637685] + data = [0.590431] class Program_weight_tensor_data_7: name = "data_7" shape = [1] dtype = "float32" - data = [0.619238] + data = [0.708919] class Program_weight_tensor_data_8: name = "data_8" shape = [1] dtype = "float32" - data = [0.773168] + data = [0.743773] class Program_weight_tensor_data_9: name = "data_9" shape = [1] dtype = "float32" - data = [0.635316] + data = [0.858462] class Program_weight_tensor_data_10: name = "data_10" shape = [1] dtype = "float32" - data = [0.623672] + data = [0.636941] class Program_weight_tensor_data_11: name = "data_11" shape = [1] dtype = "float32" - data = [0.620323] + data = [0.828404] class Program_weight_tensor_data_12: name = "data_12" shape = [1] dtype = "float32" - data = [0.621219] + data = [0.370716] class Program_weight_tensor_data_13: name = "data_13" shape = [1] dtype = "float32" - data = [0.624329] + data = [0.993379] class Program_weight_tensor_data_14: name = "data_14" shape = [1] dtype = "float32" - data = [0.733117] + data = [1.17653] class Program_weight_tensor_data_15: name = "data_15" shape = [1] dtype = "float32" - data = [0.557224] + data = [0.50449] class Program_weight_tensor_data_16: name = "data_16" shape = [1] dtype = "float32" - data = [0.579909] + data = [0.633712] class Program_weight_tensor_data_17: name = "data_17" shape = [1] dtype = "float32" - data = [0.70327] + data = [0.683349] class Program_weight_tensor_data_18: name = "data_18" - shape = [2, 3, 768, 768] + shape = [1024, 3072] dtype = "float32" - max_val = float("0.933333") - mean = float("0.380665") - std = float("0.139647") + min_val = float("-0.168174") + max_val = float("0.148061") + mean = float("-3.61898e-05") + std = float("0.018088") + data = None + + +class Program_weight_tensor_data_19: + name = "data_19" + shape = [3072] + dtype = "float32" + min_val = float("-0.0404125") + max_val = float("0.0382792") + mean = float("0.000304818") + std = float("0.00730327") + data = None + + +class Program_weight_tensor_data_20: + name = "data_20" + shape = [1024, 3072] + dtype = "float32" + min_val = float("-0.0819269") + max_val = float("0.0975206") + mean = float("-8.50294e-06") + std = float("0.0176017") + data = None + + +class Program_weight_tensor_data_21: + name = "data_21" + shape = [3072] + dtype = "float32" + min_val = float("-0.0329042") + max_val = float("0.0303536") + mean = float("-7.16758e-05") + std = float("0.00591918") + data = None + + +class Program_weight_tensor_data_22: + name = "data_22" + shape = [1024, 3072] + dtype = "float32" + min_val = float("-0.535505") + max_val = float("0.535918") + mean = float("-4.04094e-06") + std = float("0.0286549") + data = None + + +class Program_weight_tensor_data_23: + name = "data_23" + shape = [3072] + dtype = "float32" + min_val = float("-0.144567") + max_val = float("0.151832") + mean = float("8.68538e-05") + std = float("0.028637") + data = None + + +class Program_weight_tensor_data_24: + name = "data_24" + shape = [1024, 3072] + dtype = "float32" + min_val = float("-0.410122") + max_val = float("0.357528") + mean = float("-1.73642e-05") + std = float("0.0248621") + data = None + + +class Program_weight_tensor_data_25: + name = "data_25" + shape = [3072] + dtype = "float32" + min_val = float("-0.118434") + max_val = float("0.157878") + mean = float("0.000208634") + std = float("0.025849") + data = None + + +class Program_weight_tensor_data_26: + name = "data_26" + shape = [1, 3, 640, 640] + dtype = "float32" + max_val = float("1.0") + mean = float("0.467665") + std = float("0.176432") + data = None + + +class Program_weight_tensor_data_27: + name = "data_27" + shape = [1, 400, 1024] + dtype = "float32" + min_val = float("-1.0") + max_val = float("1.0") + mean = float("0.444201") + std = float("0.550168") data = None diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/subgraph_4/model.py b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/subgraph_4/model.py index 331f6a597..ceff0732f 100644 --- a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/subgraph_4/model.py +++ b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/subgraph_4/model.py @@ -380,6 +380,426 @@ def forward( parameter_370, parameter_371, parameter_372, + parameter_373, + parameter_374, + parameter_375, + parameter_376, + parameter_377, + parameter_378, + parameter_379, + parameter_380, + parameter_381, + parameter_382, + parameter_383, + parameter_384, + parameter_385, + parameter_386, + parameter_387, + parameter_388, + parameter_389, + parameter_390, + parameter_391, + parameter_392, + parameter_393, + parameter_394, + parameter_395, + parameter_396, + parameter_397, + parameter_398, + parameter_399, + parameter_400, + parameter_401, + parameter_402, + parameter_403, + parameter_404, + parameter_405, + parameter_406, + parameter_407, + parameter_408, + parameter_409, + parameter_410, + parameter_411, + parameter_412, + parameter_413, + parameter_414, + parameter_415, + parameter_416, + parameter_417, + parameter_418, + parameter_419, + parameter_420, + parameter_421, + parameter_422, + parameter_423, + parameter_424, + parameter_425, + parameter_426, + parameter_427, + parameter_428, + parameter_429, + parameter_430, + parameter_431, + parameter_432, + parameter_433, + parameter_434, + parameter_435, + parameter_436, + parameter_437, + parameter_438, + parameter_439, + parameter_440, + parameter_441, + parameter_442, + parameter_443, + parameter_444, + parameter_445, + parameter_446, + parameter_447, + parameter_448, + parameter_449, + parameter_450, + parameter_451, + parameter_452, + parameter_453, + parameter_454, + parameter_455, + parameter_456, + parameter_457, + parameter_458, + parameter_459, + parameter_460, + parameter_461, + parameter_462, + parameter_463, + parameter_464, + parameter_465, + parameter_466, + parameter_467, + parameter_468, + parameter_469, + parameter_470, + parameter_471, + parameter_472, + parameter_473, + parameter_474, + parameter_475, + parameter_476, + parameter_477, + parameter_478, + parameter_479, + parameter_480, + parameter_481, + parameter_482, + parameter_483, + parameter_484, + parameter_485, + parameter_486, + parameter_487, + parameter_488, + parameter_489, + parameter_490, + parameter_491, + parameter_492, + parameter_493, + parameter_494, + parameter_495, + parameter_496, + parameter_497, + parameter_498, + parameter_499, + parameter_500, + parameter_501, + parameter_502, + parameter_503, + parameter_504, + parameter_505, + parameter_506, + parameter_507, + parameter_508, + parameter_509, + parameter_510, + parameter_511, + parameter_512, + parameter_513, + parameter_514, + parameter_515, + parameter_516, + parameter_517, + parameter_518, + parameter_519, + parameter_520, + parameter_521, + parameter_522, + parameter_523, + parameter_524, + parameter_525, + parameter_526, + parameter_527, + parameter_528, + parameter_529, + parameter_530, + parameter_531, + parameter_532, + parameter_533, + parameter_534, + parameter_535, + parameter_536, + parameter_537, + parameter_538, + parameter_539, + parameter_540, + parameter_541, + parameter_542, + parameter_543, + parameter_544, + parameter_545, + parameter_546, + parameter_547, + parameter_548, + parameter_549, + parameter_550, + parameter_551, + parameter_552, + parameter_553, + parameter_554, + parameter_555, + parameter_556, + parameter_557, + parameter_558, + parameter_559, + parameter_560, + parameter_561, + parameter_562, + parameter_563, + parameter_564, + parameter_565, + parameter_566, + parameter_567, + parameter_568, + parameter_569, + parameter_570, + parameter_571, + parameter_572, + parameter_573, + parameter_574, + parameter_575, + parameter_576, + parameter_577, + parameter_578, + parameter_579, + parameter_580, + parameter_581, + parameter_582, + parameter_583, + parameter_584, + parameter_585, + parameter_586, + parameter_587, + parameter_588, + parameter_589, + parameter_590, + parameter_591, + parameter_592, + parameter_593, + parameter_594, + parameter_595, + parameter_596, + parameter_597, + parameter_598, + parameter_599, + parameter_600, + parameter_601, + parameter_602, + parameter_603, + parameter_604, + parameter_605, + parameter_606, + parameter_607, + parameter_608, + parameter_609, + parameter_610, + parameter_611, + parameter_612, + parameter_613, + parameter_614, + parameter_615, + parameter_616, + parameter_617, + parameter_618, + parameter_619, + parameter_620, + parameter_621, + parameter_622, + parameter_623, + parameter_624, + parameter_625, + parameter_626, + parameter_627, + parameter_628, + parameter_629, + parameter_630, + parameter_631, + parameter_632, + parameter_633, + parameter_634, + parameter_635, + parameter_636, + parameter_637, + parameter_638, + parameter_639, + parameter_640, + parameter_641, + parameter_642, + parameter_643, + parameter_644, + parameter_645, + parameter_646, + parameter_647, + parameter_648, + parameter_649, + parameter_650, + parameter_651, + parameter_652, + parameter_653, + parameter_654, + parameter_655, + parameter_656, + parameter_657, + parameter_658, + parameter_659, + parameter_660, + parameter_661, + parameter_662, + parameter_663, + parameter_664, + parameter_665, + parameter_666, + parameter_667, + parameter_668, + parameter_669, + parameter_670, + parameter_671, + parameter_672, + parameter_673, + parameter_674, + parameter_675, + parameter_676, + parameter_677, + parameter_678, + parameter_679, + parameter_680, + parameter_681, + parameter_682, + parameter_683, + parameter_684, + parameter_685, + parameter_686, + parameter_687, + parameter_688, + parameter_689, + parameter_690, + parameter_691, + parameter_692, + parameter_693, + parameter_694, + parameter_695, + parameter_696, + parameter_697, + parameter_698, + parameter_699, + parameter_700, + parameter_701, + parameter_702, + parameter_703, + parameter_704, + parameter_705, + parameter_706, + parameter_707, + parameter_708, + parameter_709, + parameter_710, + parameter_711, + parameter_712, + parameter_713, + parameter_714, + parameter_715, + parameter_716, + parameter_717, + parameter_718, + parameter_719, + parameter_720, + parameter_721, + parameter_722, + parameter_723, + parameter_724, + parameter_725, + parameter_726, + parameter_727, + parameter_728, + parameter_729, + parameter_730, + parameter_731, + parameter_732, + parameter_733, + parameter_734, + parameter_735, + parameter_736, + parameter_737, + parameter_738, + parameter_739, + parameter_740, + parameter_741, + parameter_742, + parameter_743, + parameter_744, + parameter_745, + parameter_746, + parameter_747, + parameter_748, + parameter_749, + parameter_750, + parameter_751, + parameter_752, + parameter_753, + parameter_754, + parameter_755, + parameter_756, + parameter_757, + parameter_758, + parameter_759, + parameter_760, + parameter_761, + parameter_762, + parameter_763, + parameter_764, + parameter_765, + parameter_766, + parameter_767, + parameter_768, + parameter_769, + parameter_770, + parameter_771, + parameter_772, + parameter_773, + parameter_774, + parameter_775, + parameter_776, + parameter_777, + parameter_778, + parameter_779, + parameter_780, + parameter_781, + parameter_782, + parameter_783, + parameter_784, + parameter_785, + parameter_786, + parameter_787, + parameter_788, + parameter_789, + parameter_790, + parameter_791, + parameter_792, data_0, data_1, data_2, @@ -399,14 +819,23 @@ def forward( data_16, data_17, data_18, + data_19, + data_20, + data_21, + data_22, + data_23, + data_24, + data_25, + data_26, + data_27, ): - # pd_op.conv2d: (2x32x-1x-1xf32) <- (2x3x-1x-1xf32, 32x3x3x3xf32) + # pd_op.conv2d: (-1x32x-1x-1xf32) <- (-1x3x-1x-1xf32, 32x3x3x3xf32) conv2d_0 = paddle._C_ops.conv2d( - data_18, parameter_372, [2, 2], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + data_26, parameter_792, [2, 2], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" ) - del data_18, parameter_372 + del data_26, parameter_792 - # pd_op.batch_norm_: (2x32x-1x-1xf32, 32xf32, 32xf32, 32xf32, 32xf32, -1xui8) <- (2x32x-1x-1xf32, 32xf32, 32xf32, 32xf32, 32xf32) + # pd_op.batch_norm_: (-1x32x-1x-1xf32, 32xf32, 32xf32, 32xf32, 32xf32, -1xui8) <- (-1x32x-1x-1xf32, 32xf32, 32xf32, 32xf32, 32xf32) ( batch_norm__0, batch_norm__1, @@ -417,33 +846,34 @@ def forward( ) = (lambda x, f: f(x))( paddle._C_ops.batch_norm( conv2d_0, - parameter_371, - parameter_370, - parameter_369, - parameter_368, - False, + parameter_791, + parameter_790, + parameter_789, + parameter_788, + True, float("0.9"), float("1e-05"), "NCHW", - False, + True, False, ), lambda out: out if isinstance(out, (list, tuple)) else (out, None, None, None, None, None), ) - del parameter_368, parameter_369, parameter_370, parameter_371 + del conv2d_0, parameter_788, parameter_789, parameter_790, parameter_791 - # pd_op.swish: (2x32x-1x-1xf32) <- (2x32x-1x-1xf32) - swish_1 = paddle._C_ops.swish(batch_norm__0) + # pd_op.swish: (-1x32x-1x-1xf32) <- (-1x32x-1x-1xf32) + swish_0 = paddle._C_ops.swish(batch_norm__0) + del batch_norm__0 - # pd_op.conv2d: (2x32x-1x-1xf32) <- (2x32x-1x-1xf32, 32x32x3x3xf32) + # pd_op.conv2d: (-1x32x-1x-1xf32) <- (-1x32x-1x-1xf32, 32x32x3x3xf32) conv2d_1 = paddle._C_ops.conv2d( - swish_1, parameter_367, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + swish_0, parameter_787, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" ) - del parameter_367 + del parameter_787, swish_0 - # pd_op.batch_norm_: (2x32x-1x-1xf32, 32xf32, 32xf32, 32xf32, 32xf32, -1xui8) <- (2x32x-1x-1xf32, 32xf32, 32xf32, 32xf32, 32xf32) + # pd_op.batch_norm_: (-1x32x-1x-1xf32, 32xf32, 32xf32, 32xf32, 32xf32, -1xui8) <- (-1x32x-1x-1xf32, 32xf32, 32xf32, 32xf32, 32xf32) ( batch_norm__6, batch_norm__7, @@ -454,33 +884,34 @@ def forward( ) = (lambda x, f: f(x))( paddle._C_ops.batch_norm( conv2d_1, - parameter_366, - parameter_365, - parameter_364, - parameter_363, - False, + parameter_786, + parameter_785, + parameter_784, + parameter_783, + True, float("0.9"), float("1e-05"), "NCHW", - False, + True, False, ), lambda out: out if isinstance(out, (list, tuple)) else (out, None, None, None, None, None), ) - del parameter_363, parameter_364, parameter_365, parameter_366 + del conv2d_1, parameter_783, parameter_784, parameter_785, parameter_786 - # pd_op.swish: (2x32x-1x-1xf32) <- (2x32x-1x-1xf32) - swish_2 = paddle._C_ops.swish(batch_norm__6) + # pd_op.swish: (-1x32x-1x-1xf32) <- (-1x32x-1x-1xf32) + swish_1 = paddle._C_ops.swish(batch_norm__6) + del batch_norm__6 - # pd_op.conv2d: (2x64x-1x-1xf32) <- (2x32x-1x-1xf32, 64x32x3x3xf32) + # pd_op.conv2d: (-1x64x-1x-1xf32) <- (-1x32x-1x-1xf32, 64x32x3x3xf32) conv2d_2 = paddle._C_ops.conv2d( - swish_2, parameter_362, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + swish_1, parameter_782, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" ) - del parameter_362 + del parameter_782, swish_1 - # pd_op.batch_norm_: (2x64x-1x-1xf32, 64xf32, 64xf32, 64xf32, 64xf32, -1xui8) <- (2x64x-1x-1xf32, 64xf32, 64xf32, 64xf32, 64xf32) + # pd_op.batch_norm_: (-1x64x-1x-1xf32, 64xf32, 64xf32, 64xf32, 64xf32, -1xui8) <- (-1x64x-1x-1xf32, 64xf32, 64xf32, 64xf32, 64xf32) ( batch_norm__12, batch_norm__13, @@ -491,33 +922,34 @@ def forward( ) = (lambda x, f: f(x))( paddle._C_ops.batch_norm( conv2d_2, - parameter_361, - parameter_360, - parameter_359, - parameter_358, - False, + parameter_781, + parameter_780, + parameter_779, + parameter_778, + True, float("0.9"), float("1e-05"), "NCHW", - False, + True, False, ), lambda out: out if isinstance(out, (list, tuple)) else (out, None, None, None, None, None), ) - del parameter_358, parameter_359, parameter_360, parameter_361 + del conv2d_2, parameter_778, parameter_779, parameter_780, parameter_781 - # pd_op.swish: (2x64x-1x-1xf32) <- (2x64x-1x-1xf32) - swish_3 = paddle._C_ops.swish(batch_norm__12) + # pd_op.swish: (-1x64x-1x-1xf32) <- (-1x64x-1x-1xf32) + swish_2 = paddle._C_ops.swish(batch_norm__12) + del batch_norm__12 - # pd_op.conv2d: (2x96x-1x-1xf32) <- (2x64x-1x-1xf32, 96x64x3x3xf32) + # pd_op.conv2d: (-1x96x-1x-1xf32) <- (-1x64x-1x-1xf32, 96x64x3x3xf32) conv2d_3 = paddle._C_ops.conv2d( - swish_3, parameter_357, [2, 2], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + swish_2, parameter_777, [2, 2], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" ) - del parameter_357 + del parameter_777, swish_2 - # pd_op.batch_norm_: (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) + # pd_op.batch_norm_: (-1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (-1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) ( batch_norm__18, batch_norm__19, @@ -528,33 +960,34 @@ def forward( ) = (lambda x, f: f(x))( paddle._C_ops.batch_norm( conv2d_3, - parameter_356, - parameter_355, - parameter_354, - parameter_353, - False, + parameter_776, + parameter_775, + parameter_774, + parameter_773, + True, float("0.9"), float("1e-05"), "NCHW", - False, + True, False, ), lambda out: out if isinstance(out, (list, tuple)) else (out, None, None, None, None, None), ) - del parameter_353, parameter_354, parameter_355, parameter_356 + del conv2d_3, parameter_773, parameter_774, parameter_775, parameter_776 - # pd_op.swish: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32) - swish_4 = paddle._C_ops.swish(batch_norm__18) + # pd_op.swish: (-1x96x-1x-1xf32) <- (-1x96x-1x-1xf32) + swish_3 = paddle._C_ops.swish(batch_norm__18) + del batch_norm__18 - # pd_op.conv2d: (2x48x-1x-1xf32) <- (2x96x-1x-1xf32, 48x96x1x1xf32) + # pd_op.conv2d: (-1x48x-1x-1xf32) <- (-1x96x-1x-1xf32, 48x96x1x1xf32) conv2d_4 = paddle._C_ops.conv2d( - swish_4, parameter_352, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + swish_3, parameter_772, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" ) - del parameter_352 + del parameter_772 - # pd_op.batch_norm_: (2x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32, -1xui8) <- (2x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32) + # pd_op.batch_norm_: (-1x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32, -1xui8) <- (-1x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32) ( batch_norm__24, batch_norm__25, @@ -565,33 +998,34 @@ def forward( ) = (lambda x, f: f(x))( paddle._C_ops.batch_norm( conv2d_4, - parameter_351, - parameter_350, - parameter_349, - parameter_348, - False, + parameter_771, + parameter_770, + parameter_769, + parameter_768, + True, float("0.9"), float("1e-05"), "NCHW", - False, + True, False, ), lambda out: out if isinstance(out, (list, tuple)) else (out, None, None, None, None, None), ) - del parameter_348, parameter_349, parameter_350, parameter_351 + del conv2d_4, parameter_768, parameter_769, parameter_770, parameter_771 - # pd_op.swish: (2x48x-1x-1xf32) <- (2x48x-1x-1xf32) - swish_5 = paddle._C_ops.swish(batch_norm__24) + # pd_op.swish: (-1x48x-1x-1xf32) <- (-1x48x-1x-1xf32) + swish_4 = paddle._C_ops.swish(batch_norm__24) + del batch_norm__24 - # pd_op.conv2d: (2x48x-1x-1xf32) <- (2x96x-1x-1xf32, 48x96x1x1xf32) + # pd_op.conv2d: (-1x48x-1x-1xf32) <- (-1x96x-1x-1xf32, 48x96x1x1xf32) conv2d_5 = paddle._C_ops.conv2d( - swish_4, parameter_347, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + swish_3, parameter_767, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" ) - del parameter_347 + del parameter_767, swish_3 - # pd_op.batch_norm_: (2x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32, -1xui8) <- (2x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32) + # pd_op.batch_norm_: (-1x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32, -1xui8) <- (-1x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32) ( batch_norm__30, batch_norm__31, @@ -602,33 +1036,34 @@ def forward( ) = (lambda x, f: f(x))( paddle._C_ops.batch_norm( conv2d_5, - parameter_346, - parameter_345, - parameter_344, - parameter_343, - False, + parameter_766, + parameter_765, + parameter_764, + parameter_763, + True, float("0.9"), float("1e-05"), "NCHW", - False, + True, False, ), lambda out: out if isinstance(out, (list, tuple)) else (out, None, None, None, None, None), ) - del parameter_343, parameter_344, parameter_345, parameter_346 + del conv2d_5, parameter_763, parameter_764, parameter_765, parameter_766 - # pd_op.swish: (2x48x-1x-1xf32) <- (2x48x-1x-1xf32) - swish_6 = paddle._C_ops.swish(batch_norm__30) + # pd_op.swish: (-1x48x-1x-1xf32) <- (-1x48x-1x-1xf32) + swish_5 = paddle._C_ops.swish(batch_norm__30) + del batch_norm__30 - # pd_op.conv2d: (2x48x-1x-1xf32) <- (2x48x-1x-1xf32, 48x48x3x3xf32) + # pd_op.conv2d: (-1x48x-1x-1xf32) <- (-1x48x-1x-1xf32, 48x48x3x3xf32) conv2d_6 = paddle._C_ops.conv2d( - swish_6, parameter_342, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + swish_5, parameter_762, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" ) - del parameter_342 + del parameter_762 - # pd_op.batch_norm_: (2x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32, -1xui8) <- (2x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32) + # pd_op.batch_norm_: (-1x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32, -1xui8) <- (-1x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32) ( batch_norm__36, batch_norm__37, @@ -639,33 +1074,34 @@ def forward( ) = (lambda x, f: f(x))( paddle._C_ops.batch_norm( conv2d_6, - parameter_341, - parameter_340, - parameter_339, - parameter_338, - False, + parameter_761, + parameter_760, + parameter_759, + parameter_758, + True, float("0.9"), float("1e-05"), "NCHW", - False, + True, False, ), lambda out: out if isinstance(out, (list, tuple)) else (out, None, None, None, None, None), ) - del parameter_338, parameter_339, parameter_340, parameter_341 + del conv2d_6, parameter_758, parameter_759, parameter_760, parameter_761 - # pd_op.swish: (2x48x-1x-1xf32) <- (2x48x-1x-1xf32) - swish_7 = paddle._C_ops.swish(batch_norm__36) + # pd_op.swish: (-1x48x-1x-1xf32) <- (-1x48x-1x-1xf32) + swish_6 = paddle._C_ops.swish(batch_norm__36) + del batch_norm__36 - # pd_op.conv2d: (2x48x-1x-1xf32) <- (2x48x-1x-1xf32, 48x48x3x3xf32) + # pd_op.conv2d: (-1x48x-1x-1xf32) <- (-1x48x-1x-1xf32, 48x48x3x3xf32) conv2d_7 = paddle._C_ops.conv2d( - swish_7, parameter_337, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + swish_6, parameter_757, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" ) - del parameter_337 + del parameter_757 - # pd_op.batch_norm_: (2x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32, -1xui8) <- (2x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32) + # pd_op.batch_norm_: (-1x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32, -1xui8) <- (-1x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32) ( batch_norm__42, batch_norm__43, @@ -676,30 +1112,30 @@ def forward( ) = (lambda x, f: f(x))( paddle._C_ops.batch_norm( conv2d_7, - parameter_336, - parameter_335, - parameter_334, - parameter_333, - False, + parameter_756, + parameter_755, + parameter_754, + parameter_753, + True, float("0.9"), float("1e-05"), "NCHW", - False, + True, False, ), lambda out: out if isinstance(out, (list, tuple)) else (out, None, None, None, None, None), ) - del parameter_333, parameter_334, parameter_335, parameter_336 + del conv2d_7, parameter_753, parameter_754, parameter_755, parameter_756 - # pd_op.conv2d: (2x48x-1x-1xf32) <- (2x48x-1x-1xf32, 48x48x1x1xf32) + # pd_op.conv2d: (-1x48x-1x-1xf32) <- (-1x48x-1x-1xf32, 48x48x1x1xf32) conv2d_8 = paddle._C_ops.conv2d( - swish_7, parameter_332, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + swish_6, parameter_752, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" ) - del parameter_332 + del parameter_752, swish_6 - # pd_op.batch_norm_: (2x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32, -1xui8) <- (2x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32) + # pd_op.batch_norm_: (-1x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32, -1xui8) <- (-1x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32) ( batch_norm__48, batch_norm__49, @@ -710,43 +1146,46 @@ def forward( ) = (lambda x, f: f(x))( paddle._C_ops.batch_norm( conv2d_8, - parameter_331, - parameter_330, - parameter_329, - parameter_328, - False, + parameter_751, + parameter_750, + parameter_749, + parameter_748, + True, float("0.9"), float("1e-05"), "NCHW", - False, + True, False, ), lambda out: out if isinstance(out, (list, tuple)) else (out, None, None, None, None, None), ) - del parameter_328, parameter_329, parameter_330, parameter_331 + del conv2d_8, parameter_748, parameter_749, parameter_750, parameter_751 - # pd_op.multiply: (2x48x-1x-1xf32) <- (1xf32, 2x48x-1x-1xf32) + # pd_op.multiply: (-1x48x-1x-1xf32) <- (1xf32, -1x48x-1x-1xf32) multiply_0 = paddle._C_ops.multiply(data_0, batch_norm__48) - del data_0 + del batch_norm__48, data_0 - # pd_op.add: (2x48x-1x-1xf32) <- (2x48x-1x-1xf32, 2x48x-1x-1xf32) + # pd_op.add: (-1x48x-1x-1xf32) <- (-1x48x-1x-1xf32, -1x48x-1x-1xf32) add_0 = paddle._C_ops.add(batch_norm__42, multiply_0) + del batch_norm__42, multiply_0 - # pd_op.swish: (2x48x-1x-1xf32) <- (2x48x-1x-1xf32) - swish_8 = paddle._C_ops.swish(add_0) + # pd_op.swish: (-1x48x-1x-1xf32) <- (-1x48x-1x-1xf32) + swish_7 = paddle._C_ops.swish(add_0) + del add_0 - # pd_op.add: (2x48x-1x-1xf32) <- (2x48x-1x-1xf32, 2x48x-1x-1xf32) - add_1 = paddle._C_ops.add(swish_6, swish_8) + # pd_op.add: (-1x48x-1x-1xf32) <- (-1x48x-1x-1xf32, -1x48x-1x-1xf32) + add_1 = paddle._C_ops.add(swish_5, swish_7) + del swish_5, swish_7 - # pd_op.conv2d: (2x48x-1x-1xf32) <- (2x48x-1x-1xf32, 48x48x3x3xf32) + # pd_op.conv2d: (-1x48x-1x-1xf32) <- (-1x48x-1x-1xf32, 48x48x3x3xf32) conv2d_9 = paddle._C_ops.conv2d( - add_1, parameter_327, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + add_1, parameter_747, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" ) - del parameter_327 + del parameter_747 - # pd_op.batch_norm_: (2x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32, -1xui8) <- (2x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32) + # pd_op.batch_norm_: (-1x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32, -1xui8) <- (-1x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32) ( batch_norm__54, batch_norm__55, @@ -757,33 +1196,34 @@ def forward( ) = (lambda x, f: f(x))( paddle._C_ops.batch_norm( conv2d_9, - parameter_326, - parameter_325, - parameter_324, - parameter_323, - False, + parameter_746, + parameter_745, + parameter_744, + parameter_743, + True, float("0.9"), float("1e-05"), "NCHW", - False, + True, False, ), lambda out: out if isinstance(out, (list, tuple)) else (out, None, None, None, None, None), ) - del parameter_323, parameter_324, parameter_325, parameter_326 + del conv2d_9, parameter_743, parameter_744, parameter_745, parameter_746 - # pd_op.swish: (2x48x-1x-1xf32) <- (2x48x-1x-1xf32) - swish_9 = paddle._C_ops.swish(batch_norm__54) + # pd_op.swish: (-1x48x-1x-1xf32) <- (-1x48x-1x-1xf32) + swish_8 = paddle._C_ops.swish(batch_norm__54) + del batch_norm__54 - # pd_op.conv2d: (2x48x-1x-1xf32) <- (2x48x-1x-1xf32, 48x48x3x3xf32) + # pd_op.conv2d: (-1x48x-1x-1xf32) <- (-1x48x-1x-1xf32, 48x48x3x3xf32) conv2d_10 = paddle._C_ops.conv2d( - swish_9, parameter_322, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + swish_8, parameter_742, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" ) - del parameter_322 + del parameter_742 - # pd_op.batch_norm_: (2x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32, -1xui8) <- (2x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32) + # pd_op.batch_norm_: (-1x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32, -1xui8) <- (-1x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32) ( batch_norm__60, batch_norm__61, @@ -794,30 +1234,30 @@ def forward( ) = (lambda x, f: f(x))( paddle._C_ops.batch_norm( conv2d_10, - parameter_321, - parameter_320, - parameter_319, - parameter_318, - False, + parameter_741, + parameter_740, + parameter_739, + parameter_738, + True, float("0.9"), float("1e-05"), "NCHW", - False, + True, False, ), lambda out: out if isinstance(out, (list, tuple)) else (out, None, None, None, None, None), ) - del parameter_318, parameter_319, parameter_320, parameter_321 + del conv2d_10, parameter_738, parameter_739, parameter_740, parameter_741 - # pd_op.conv2d: (2x48x-1x-1xf32) <- (2x48x-1x-1xf32, 48x48x1x1xf32) + # pd_op.conv2d: (-1x48x-1x-1xf32) <- (-1x48x-1x-1xf32, 48x48x1x1xf32) conv2d_11 = paddle._C_ops.conv2d( - swish_9, parameter_317, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + swish_8, parameter_737, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" ) - del parameter_317 + del parameter_737, swish_8 - # pd_op.batch_norm_: (2x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32, -1xui8) <- (2x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32) + # pd_op.batch_norm_: (-1x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32, -1xui8) <- (-1x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32) ( batch_norm__66, batch_norm__67, @@ -828,43 +1268,46 @@ def forward( ) = (lambda x, f: f(x))( paddle._C_ops.batch_norm( conv2d_11, - parameter_316, - parameter_315, - parameter_314, - parameter_313, - False, + parameter_736, + parameter_735, + parameter_734, + parameter_733, + True, float("0.9"), float("1e-05"), "NCHW", - False, + True, False, ), lambda out: out if isinstance(out, (list, tuple)) else (out, None, None, None, None, None), ) - del parameter_313, parameter_314, parameter_315, parameter_316 + del conv2d_11, parameter_733, parameter_734, parameter_735, parameter_736 - # pd_op.multiply: (2x48x-1x-1xf32) <- (1xf32, 2x48x-1x-1xf32) + # pd_op.multiply: (-1x48x-1x-1xf32) <- (1xf32, -1x48x-1x-1xf32) multiply_1 = paddle._C_ops.multiply(data_1, batch_norm__66) - del data_1 + del batch_norm__66, data_1 - # pd_op.add: (2x48x-1x-1xf32) <- (2x48x-1x-1xf32, 2x48x-1x-1xf32) + # pd_op.add: (-1x48x-1x-1xf32) <- (-1x48x-1x-1xf32, -1x48x-1x-1xf32) add_2 = paddle._C_ops.add(batch_norm__60, multiply_1) + del batch_norm__60, multiply_1 - # pd_op.swish: (2x48x-1x-1xf32) <- (2x48x-1x-1xf32) - swish_10 = paddle._C_ops.swish(add_2) + # pd_op.swish: (-1x48x-1x-1xf32) <- (-1x48x-1x-1xf32) + swish_9 = paddle._C_ops.swish(add_2) + del add_2 - # pd_op.add: (2x48x-1x-1xf32) <- (2x48x-1x-1xf32, 2x48x-1x-1xf32) - add_3 = paddle._C_ops.add(add_1, swish_10) + # pd_op.add: (-1x48x-1x-1xf32) <- (-1x48x-1x-1xf32, -1x48x-1x-1xf32) + add_3 = paddle._C_ops.add(add_1, swish_9) + del add_1, swish_9 - # pd_op.conv2d: (2x48x-1x-1xf32) <- (2x48x-1x-1xf32, 48x48x3x3xf32) + # pd_op.conv2d: (-1x48x-1x-1xf32) <- (-1x48x-1x-1xf32, 48x48x3x3xf32) conv2d_12 = paddle._C_ops.conv2d( - add_3, parameter_312, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + add_3, parameter_732, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" ) - del parameter_312 + del parameter_732 - # pd_op.batch_norm_: (2x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32, -1xui8) <- (2x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32) + # pd_op.batch_norm_: (-1x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32, -1xui8) <- (-1x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32) ( batch_norm__72, batch_norm__73, @@ -875,33 +1318,34 @@ def forward( ) = (lambda x, f: f(x))( paddle._C_ops.batch_norm( conv2d_12, - parameter_311, - parameter_310, - parameter_309, - parameter_308, - False, + parameter_731, + parameter_730, + parameter_729, + parameter_728, + True, float("0.9"), float("1e-05"), "NCHW", - False, + True, False, ), lambda out: out if isinstance(out, (list, tuple)) else (out, None, None, None, None, None), ) - del parameter_308, parameter_309, parameter_310, parameter_311 + del conv2d_12, parameter_728, parameter_729, parameter_730, parameter_731 - # pd_op.swish: (2x48x-1x-1xf32) <- (2x48x-1x-1xf32) - swish_11 = paddle._C_ops.swish(batch_norm__72) + # pd_op.swish: (-1x48x-1x-1xf32) <- (-1x48x-1x-1xf32) + swish_10 = paddle._C_ops.swish(batch_norm__72) + del batch_norm__72 - # pd_op.conv2d: (2x48x-1x-1xf32) <- (2x48x-1x-1xf32, 48x48x3x3xf32) + # pd_op.conv2d: (-1x48x-1x-1xf32) <- (-1x48x-1x-1xf32, 48x48x3x3xf32) conv2d_13 = paddle._C_ops.conv2d( - swish_11, parameter_307, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + swish_10, parameter_727, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" ) - del parameter_307 + del parameter_727 - # pd_op.batch_norm_: (2x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32, -1xui8) <- (2x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32) + # pd_op.batch_norm_: (-1x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32, -1xui8) <- (-1x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32) ( batch_norm__78, batch_norm__79, @@ -912,30 +1356,30 @@ def forward( ) = (lambda x, f: f(x))( paddle._C_ops.batch_norm( conv2d_13, - parameter_306, - parameter_305, - parameter_304, - parameter_303, - False, + parameter_726, + parameter_725, + parameter_724, + parameter_723, + True, float("0.9"), float("1e-05"), "NCHW", - False, + True, False, ), lambda out: out if isinstance(out, (list, tuple)) else (out, None, None, None, None, None), ) - del parameter_303, parameter_304, parameter_305, parameter_306 + del conv2d_13, parameter_723, parameter_724, parameter_725, parameter_726 - # pd_op.conv2d: (2x48x-1x-1xf32) <- (2x48x-1x-1xf32, 48x48x1x1xf32) + # pd_op.conv2d: (-1x48x-1x-1xf32) <- (-1x48x-1x-1xf32, 48x48x1x1xf32) conv2d_14 = paddle._C_ops.conv2d( - swish_11, parameter_302, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + swish_10, parameter_722, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" ) - del parameter_302 + del parameter_722, swish_10 - # pd_op.batch_norm_: (2x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32, -1xui8) <- (2x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32) + # pd_op.batch_norm_: (-1x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32, -1xui8) <- (-1x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32) ( batch_norm__84, batch_norm__85, @@ -946,104 +1390,92 @@ def forward( ) = (lambda x, f: f(x))( paddle._C_ops.batch_norm( conv2d_14, - parameter_301, - parameter_300, - parameter_299, - parameter_298, - False, + parameter_721, + parameter_720, + parameter_719, + parameter_718, + True, float("0.9"), float("1e-05"), "NCHW", - False, + True, False, ), lambda out: out if isinstance(out, (list, tuple)) else (out, None, None, None, None, None), ) - del parameter_298, parameter_299, parameter_300, parameter_301 + del conv2d_14, parameter_718, parameter_719, parameter_720, parameter_721 - # pd_op.multiply: (2x48x-1x-1xf32) <- (1xf32, 2x48x-1x-1xf32) + # pd_op.multiply: (-1x48x-1x-1xf32) <- (1xf32, -1x48x-1x-1xf32) multiply_2 = paddle._C_ops.multiply(data_2, batch_norm__84) - del data_2 + del batch_norm__84, data_2 - # pd_op.add: (2x48x-1x-1xf32) <- (2x48x-1x-1xf32, 2x48x-1x-1xf32) + # pd_op.add: (-1x48x-1x-1xf32) <- (-1x48x-1x-1xf32, -1x48x-1x-1xf32) add_4 = paddle._C_ops.add(batch_norm__78, multiply_2) + del batch_norm__78, multiply_2 - # pd_op.swish: (2x48x-1x-1xf32) <- (2x48x-1x-1xf32) - swish_12 = paddle._C_ops.swish(add_4) + # pd_op.swish: (-1x48x-1x-1xf32) <- (-1x48x-1x-1xf32) + swish_11 = paddle._C_ops.swish(add_4) + del add_4 - # pd_op.add: (2x48x-1x-1xf32) <- (2x48x-1x-1xf32, 2x48x-1x-1xf32) - add_5 = paddle._C_ops.add(add_3, swish_12) + # pd_op.add: (-1x48x-1x-1xf32) <- (-1x48x-1x-1xf32, -1x48x-1x-1xf32) + add_5 = paddle._C_ops.add(add_3, swish_11) + del add_3, swish_11 # pd_op.full: (1xi32) <- () full_0 = paddle._C_ops.full( [1], float("1"), paddle.int32, paddle.core.CPUPlace() ) - # pd_op.assign: (1xi32) <- (1xi32) - assign_0 = full_0 - - # pd_op.assign: (1xi32) <- (1xi32) - assign_1 = full_0 + # builtin.combine: ([-1x48x-1x-1xf32, -1x48x-1x-1xf32]) <- (-1x48x-1x-1xf32, -1x48x-1x-1xf32) + combine_0 = [swish_4, add_5] + del add_5, swish_4 - # pd_op.assign: (1xi32) <- (1xi32) - assign_2 = full_0 - - # builtin.combine: ([2x48x-1x-1xf32, 2x48x-1x-1xf32]) <- (2x48x-1x-1xf32, 2x48x-1x-1xf32) - combine_0 = [swish_5, add_5] - - # pd_op.concat: (2x96x-1x-1xf32) <- ([2x48x-1x-1xf32, 2x48x-1x-1xf32], 1xi32) - concat_0 = paddle._C_ops.concat(combine_0, full_0) + # pd_op.concat: (-1x96x-1x-1xf32) <- ([-1x48x-1x-1xf32, -1x48x-1x-1xf32], 1xi32) + concat_2 = paddle._C_ops.concat(combine_0, full_0) del combine_0 # pd_op.full_int_array: (2xi64) <- () full_int_array_0 = [2, 3] - # pd_op.assign: (2xi64) <- (2xi64) - assign_3 = full_int_array_0 - - # pd_op.assign: (2xi64) <- (2xi64) - assign_4 = full_int_array_0 - - # pd_op.assign: (2xi64) <- (2xi64) - assign_5 = full_int_array_0 + # pd_op.mean: (-1x96x1x1xf32) <- (-1x96x-1x-1xf32, 2xi64) + mean_0 = paddle._C_ops.mean(concat_2, full_int_array_0, True) - # pd_op.mean: (2x96x1x1xf32) <- (2x96x-1x-1xf32, 2xi64) - mean_0 = paddle._C_ops.mean(concat_0, full_int_array_0, True) - - # pd_op.conv2d: (2x96x1x1xf32) <- (2x96x1x1xf32, 96x96x1x1xf32) + # pd_op.conv2d: (-1x96x1x1xf32) <- (-1x96x1x1xf32, 96x96x1x1xf32) conv2d_15 = paddle._C_ops.conv2d( - mean_0, parameter_297, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + mean_0, parameter_717, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" ) - del parameter_297 + del mean_0, parameter_717 # pd_op.full_int_array: (4xi64) <- () full_int_array_1 = [1, -1, 1, 1] # pd_op.reshape: (1x96x1x1xf32) <- (96xf32, 4xi64) - reshape_0 = paddle._C_ops.reshape(parameter_296, full_int_array_1) - del parameter_296 + reshape_0 = paddle._C_ops.reshape(parameter_716, full_int_array_1) + del parameter_716 - # pd_op.add: (2x96x1x1xf32) <- (2x96x1x1xf32, 1x96x1x1xf32) + # pd_op.add: (-1x96x1x1xf32) <- (-1x96x1x1xf32, 1x96x1x1xf32) add_6 = paddle._C_ops.add(conv2d_15, reshape_0) + del conv2d_15, reshape_0 - # pd_op.hardsigmoid: (2x96x1x1xf32) <- (2x96x1x1xf32) + # pd_op.hardsigmoid: (-1x96x1x1xf32) <- (-1x96x1x1xf32) hardsigmoid_0 = paddle._C_ops.hardsigmoid( add_6, float("0.166667"), float("0.5") ) del add_6 - # pd_op.multiply: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, 2x96x1x1xf32) - multiply_3 = paddle._C_ops.multiply(concat_0, hardsigmoid_0) + # pd_op.multiply: (-1x96x-1x-1xf32) <- (-1x96x-1x-1xf32, -1x96x1x1xf32) + multiply_3 = paddle._C_ops.multiply(concat_2, hardsigmoid_0) + del concat_2, hardsigmoid_0 - # pd_op.conv2d: (2x128x-1x-1xf32) <- (2x96x-1x-1xf32, 128x96x1x1xf32) + # pd_op.conv2d: (-1x128x-1x-1xf32) <- (-1x96x-1x-1xf32, 128x96x1x1xf32) conv2d_16 = paddle._C_ops.conv2d( - multiply_3, parameter_295, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + multiply_3, parameter_715, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" ) - del parameter_295 + del multiply_3, parameter_715 - # pd_op.batch_norm_: (2x128x-1x-1xf32, 128xf32, 128xf32, 128xf32, 128xf32, -1xui8) <- (2x128x-1x-1xf32, 128xf32, 128xf32, 128xf32, 128xf32) + # pd_op.batch_norm_: (-1x128x-1x-1xf32, 128xf32, 128xf32, 128xf32, 128xf32, -1xui8) <- (-1x128x-1x-1xf32, 128xf32, 128xf32, 128xf32, 128xf32) ( batch_norm__90, batch_norm__91, @@ -1054,33 +1486,34 @@ def forward( ) = (lambda x, f: f(x))( paddle._C_ops.batch_norm( conv2d_16, - parameter_294, - parameter_293, - parameter_292, - parameter_291, - False, + parameter_714, + parameter_713, + parameter_712, + parameter_711, + True, float("0.9"), float("1e-05"), "NCHW", - False, + True, False, ), lambda out: out if isinstance(out, (list, tuple)) else (out, None, None, None, None, None), ) - del parameter_291, parameter_292, parameter_293, parameter_294 + del conv2d_16, parameter_711, parameter_712, parameter_713, parameter_714 - # pd_op.swish: (2x128x-1x-1xf32) <- (2x128x-1x-1xf32) - swish_13 = paddle._C_ops.swish(batch_norm__90) + # pd_op.swish: (-1x128x-1x-1xf32) <- (-1x128x-1x-1xf32) + swish_12 = paddle._C_ops.swish(batch_norm__90) + del batch_norm__90 - # pd_op.conv2d: (2x192x-1x-1xf32) <- (2x128x-1x-1xf32, 192x128x3x3xf32) + # pd_op.conv2d: (-1x192x-1x-1xf32) <- (-1x128x-1x-1xf32, 192x128x3x3xf32) conv2d_17 = paddle._C_ops.conv2d( - swish_13, parameter_290, [2, 2], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + swish_12, parameter_710, [2, 2], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" ) - del parameter_290 + del parameter_710, swish_12 - # pd_op.batch_norm_: (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + # pd_op.batch_norm_: (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) ( batch_norm__96, batch_norm__97, @@ -1091,33 +1524,34 @@ def forward( ) = (lambda x, f: f(x))( paddle._C_ops.batch_norm( conv2d_17, - parameter_289, - parameter_288, - parameter_287, - parameter_286, - False, + parameter_709, + parameter_708, + parameter_707, + parameter_706, + True, float("0.9"), float("1e-05"), "NCHW", - False, + True, False, ), lambda out: out if isinstance(out, (list, tuple)) else (out, None, None, None, None, None), ) - del parameter_286, parameter_287, parameter_288, parameter_289 + del conv2d_17, parameter_706, parameter_707, parameter_708, parameter_709 - # pd_op.swish: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32) - swish_14 = paddle._C_ops.swish(batch_norm__96) + # pd_op.swish: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32) + swish_13 = paddle._C_ops.swish(batch_norm__96) + del batch_norm__96 - # pd_op.conv2d: (2x96x-1x-1xf32) <- (2x192x-1x-1xf32, 96x192x1x1xf32) + # pd_op.conv2d: (-1x96x-1x-1xf32) <- (-1x192x-1x-1xf32, 96x192x1x1xf32) conv2d_18 = paddle._C_ops.conv2d( - swish_14, parameter_285, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + swish_13, parameter_705, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" ) - del parameter_285 + del parameter_705 - # pd_op.batch_norm_: (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) + # pd_op.batch_norm_: (-1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (-1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) ( batch_norm__102, batch_norm__103, @@ -1128,33 +1562,34 @@ def forward( ) = (lambda x, f: f(x))( paddle._C_ops.batch_norm( conv2d_18, - parameter_284, - parameter_283, - parameter_282, - parameter_281, - False, + parameter_704, + parameter_703, + parameter_702, + parameter_701, + True, float("0.9"), float("1e-05"), "NCHW", - False, + True, False, ), lambda out: out if isinstance(out, (list, tuple)) else (out, None, None, None, None, None), ) - del parameter_281, parameter_282, parameter_283, parameter_284 + del conv2d_18, parameter_701, parameter_702, parameter_703, parameter_704 - # pd_op.swish: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32) - swish_15 = paddle._C_ops.swish(batch_norm__102) + # pd_op.swish: (-1x96x-1x-1xf32) <- (-1x96x-1x-1xf32) + swish_14 = paddle._C_ops.swish(batch_norm__102) + del batch_norm__102 - # pd_op.conv2d: (2x96x-1x-1xf32) <- (2x192x-1x-1xf32, 96x192x1x1xf32) + # pd_op.conv2d: (-1x96x-1x-1xf32) <- (-1x192x-1x-1xf32, 96x192x1x1xf32) conv2d_19 = paddle._C_ops.conv2d( - swish_14, parameter_280, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + swish_13, parameter_700, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" ) - del parameter_280 + del parameter_700, swish_13 - # pd_op.batch_norm_: (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) + # pd_op.batch_norm_: (-1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (-1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) ( batch_norm__108, batch_norm__109, @@ -1165,33 +1600,34 @@ def forward( ) = (lambda x, f: f(x))( paddle._C_ops.batch_norm( conv2d_19, - parameter_279, - parameter_278, - parameter_277, - parameter_276, - False, + parameter_699, + parameter_698, + parameter_697, + parameter_696, + True, float("0.9"), float("1e-05"), "NCHW", - False, + True, False, ), lambda out: out if isinstance(out, (list, tuple)) else (out, None, None, None, None, None), ) - del parameter_276, parameter_277, parameter_278, parameter_279 + del conv2d_19, parameter_696, parameter_697, parameter_698, parameter_699 - # pd_op.swish: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32) - swish_16 = paddle._C_ops.swish(batch_norm__108) + # pd_op.swish: (-1x96x-1x-1xf32) <- (-1x96x-1x-1xf32) + swish_15 = paddle._C_ops.swish(batch_norm__108) + del batch_norm__108 - # pd_op.conv2d: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, 96x96x3x3xf32) + # pd_op.conv2d: (-1x96x-1x-1xf32) <- (-1x96x-1x-1xf32, 96x96x3x3xf32) conv2d_20 = paddle._C_ops.conv2d( - swish_16, parameter_275, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + swish_15, parameter_695, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" ) - del parameter_275 + del parameter_695 - # pd_op.batch_norm_: (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) + # pd_op.batch_norm_: (-1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (-1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) ( batch_norm__114, batch_norm__115, @@ -1202,33 +1638,34 @@ def forward( ) = (lambda x, f: f(x))( paddle._C_ops.batch_norm( conv2d_20, - parameter_274, - parameter_273, - parameter_272, - parameter_271, - False, + parameter_694, + parameter_693, + parameter_692, + parameter_691, + True, float("0.9"), float("1e-05"), "NCHW", - False, + True, False, ), lambda out: out if isinstance(out, (list, tuple)) else (out, None, None, None, None, None), ) - del parameter_271, parameter_272, parameter_273, parameter_274 + del conv2d_20, parameter_691, parameter_692, parameter_693, parameter_694 - # pd_op.swish: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32) - swish_17 = paddle._C_ops.swish(batch_norm__114) + # pd_op.swish: (-1x96x-1x-1xf32) <- (-1x96x-1x-1xf32) + swish_16 = paddle._C_ops.swish(batch_norm__114) + del batch_norm__114 - # pd_op.conv2d: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, 96x96x3x3xf32) + # pd_op.conv2d: (-1x96x-1x-1xf32) <- (-1x96x-1x-1xf32, 96x96x3x3xf32) conv2d_21 = paddle._C_ops.conv2d( - swish_17, parameter_270, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + swish_16, parameter_690, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" ) - del parameter_270 + del parameter_690 - # pd_op.batch_norm_: (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) + # pd_op.batch_norm_: (-1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (-1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) ( batch_norm__120, batch_norm__121, @@ -1239,30 +1676,30 @@ def forward( ) = (lambda x, f: f(x))( paddle._C_ops.batch_norm( conv2d_21, - parameter_269, - parameter_268, - parameter_267, - parameter_266, - False, + parameter_689, + parameter_688, + parameter_687, + parameter_686, + True, float("0.9"), float("1e-05"), "NCHW", - False, + True, False, ), lambda out: out if isinstance(out, (list, tuple)) else (out, None, None, None, None, None), ) - del parameter_266, parameter_267, parameter_268, parameter_269 + del conv2d_21, parameter_686, parameter_687, parameter_688, parameter_689 - # pd_op.conv2d: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, 96x96x1x1xf32) + # pd_op.conv2d: (-1x96x-1x-1xf32) <- (-1x96x-1x-1xf32, 96x96x1x1xf32) conv2d_22 = paddle._C_ops.conv2d( - swish_17, parameter_265, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + swish_16, parameter_685, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" ) - del parameter_265 + del parameter_685, swish_16 - # pd_op.batch_norm_: (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) + # pd_op.batch_norm_: (-1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (-1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) ( batch_norm__126, batch_norm__127, @@ -1273,43 +1710,46 @@ def forward( ) = (lambda x, f: f(x))( paddle._C_ops.batch_norm( conv2d_22, - parameter_264, - parameter_263, - parameter_262, - parameter_261, - False, + parameter_684, + parameter_683, + parameter_682, + parameter_681, + True, float("0.9"), float("1e-05"), "NCHW", - False, + True, False, ), lambda out: out if isinstance(out, (list, tuple)) else (out, None, None, None, None, None), ) - del parameter_261, parameter_262, parameter_263, parameter_264 + del conv2d_22, parameter_681, parameter_682, parameter_683, parameter_684 - # pd_op.multiply: (2x96x-1x-1xf32) <- (1xf32, 2x96x-1x-1xf32) + # pd_op.multiply: (-1x96x-1x-1xf32) <- (1xf32, -1x96x-1x-1xf32) multiply_4 = paddle._C_ops.multiply(data_3, batch_norm__126) - del data_3 + del batch_norm__126, data_3 - # pd_op.add: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, 2x96x-1x-1xf32) + # pd_op.add: (-1x96x-1x-1xf32) <- (-1x96x-1x-1xf32, -1x96x-1x-1xf32) add_7 = paddle._C_ops.add(batch_norm__120, multiply_4) + del batch_norm__120, multiply_4 - # pd_op.swish: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32) - swish_18 = paddle._C_ops.swish(add_7) + # pd_op.swish: (-1x96x-1x-1xf32) <- (-1x96x-1x-1xf32) + swish_17 = paddle._C_ops.swish(add_7) + del add_7 - # pd_op.add: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, 2x96x-1x-1xf32) - add_8 = paddle._C_ops.add(swish_16, swish_18) + # pd_op.add: (-1x96x-1x-1xf32) <- (-1x96x-1x-1xf32, -1x96x-1x-1xf32) + add_8 = paddle._C_ops.add(swish_15, swish_17) + del swish_15, swish_17 - # pd_op.conv2d: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, 96x96x3x3xf32) + # pd_op.conv2d: (-1x96x-1x-1xf32) <- (-1x96x-1x-1xf32, 96x96x3x3xf32) conv2d_23 = paddle._C_ops.conv2d( - add_8, parameter_260, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + add_8, parameter_680, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" ) - del parameter_260 + del parameter_680 - # pd_op.batch_norm_: (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) + # pd_op.batch_norm_: (-1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (-1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) ( batch_norm__132, batch_norm__133, @@ -1320,33 +1760,34 @@ def forward( ) = (lambda x, f: f(x))( paddle._C_ops.batch_norm( conv2d_23, - parameter_259, - parameter_258, - parameter_257, - parameter_256, - False, + parameter_679, + parameter_678, + parameter_677, + parameter_676, + True, float("0.9"), float("1e-05"), "NCHW", - False, + True, False, ), lambda out: out if isinstance(out, (list, tuple)) else (out, None, None, None, None, None), ) - del parameter_256, parameter_257, parameter_258, parameter_259 + del conv2d_23, parameter_676, parameter_677, parameter_678, parameter_679 - # pd_op.swish: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32) - swish_19 = paddle._C_ops.swish(batch_norm__132) + # pd_op.swish: (-1x96x-1x-1xf32) <- (-1x96x-1x-1xf32) + swish_18 = paddle._C_ops.swish(batch_norm__132) + del batch_norm__132 - # pd_op.conv2d: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, 96x96x3x3xf32) + # pd_op.conv2d: (-1x96x-1x-1xf32) <- (-1x96x-1x-1xf32, 96x96x3x3xf32) conv2d_24 = paddle._C_ops.conv2d( - swish_19, parameter_255, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + swish_18, parameter_675, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" ) - del parameter_255 + del parameter_675 - # pd_op.batch_norm_: (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) + # pd_op.batch_norm_: (-1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (-1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) ( batch_norm__138, batch_norm__139, @@ -1357,30 +1798,30 @@ def forward( ) = (lambda x, f: f(x))( paddle._C_ops.batch_norm( conv2d_24, - parameter_254, - parameter_253, - parameter_252, - parameter_251, - False, + parameter_674, + parameter_673, + parameter_672, + parameter_671, + True, float("0.9"), float("1e-05"), "NCHW", - False, + True, False, ), lambda out: out if isinstance(out, (list, tuple)) else (out, None, None, None, None, None), ) - del parameter_251, parameter_252, parameter_253, parameter_254 + del conv2d_24, parameter_671, parameter_672, parameter_673, parameter_674 - # pd_op.conv2d: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, 96x96x1x1xf32) + # pd_op.conv2d: (-1x96x-1x-1xf32) <- (-1x96x-1x-1xf32, 96x96x1x1xf32) conv2d_25 = paddle._C_ops.conv2d( - swish_19, parameter_250, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + swish_18, parameter_670, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" ) - del parameter_250 + del parameter_670, swish_18 - # pd_op.batch_norm_: (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) + # pd_op.batch_norm_: (-1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (-1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) ( batch_norm__144, batch_norm__145, @@ -1391,43 +1832,46 @@ def forward( ) = (lambda x, f: f(x))( paddle._C_ops.batch_norm( conv2d_25, - parameter_249, - parameter_248, - parameter_247, - parameter_246, - False, + parameter_669, + parameter_668, + parameter_667, + parameter_666, + True, float("0.9"), float("1e-05"), "NCHW", - False, + True, False, ), lambda out: out if isinstance(out, (list, tuple)) else (out, None, None, None, None, None), ) - del parameter_246, parameter_247, parameter_248, parameter_249 + del conv2d_25, parameter_666, parameter_667, parameter_668, parameter_669 - # pd_op.multiply: (2x96x-1x-1xf32) <- (1xf32, 2x96x-1x-1xf32) + # pd_op.multiply: (-1x96x-1x-1xf32) <- (1xf32, -1x96x-1x-1xf32) multiply_5 = paddle._C_ops.multiply(data_4, batch_norm__144) - del data_4 + del batch_norm__144, data_4 - # pd_op.add: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, 2x96x-1x-1xf32) + # pd_op.add: (-1x96x-1x-1xf32) <- (-1x96x-1x-1xf32, -1x96x-1x-1xf32) add_9 = paddle._C_ops.add(batch_norm__138, multiply_5) + del batch_norm__138, multiply_5 - # pd_op.swish: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32) - swish_20 = paddle._C_ops.swish(add_9) + # pd_op.swish: (-1x96x-1x-1xf32) <- (-1x96x-1x-1xf32) + swish_19 = paddle._C_ops.swish(add_9) + del add_9 - # pd_op.add: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, 2x96x-1x-1xf32) - add_10 = paddle._C_ops.add(add_8, swish_20) + # pd_op.add: (-1x96x-1x-1xf32) <- (-1x96x-1x-1xf32, -1x96x-1x-1xf32) + add_10 = paddle._C_ops.add(add_8, swish_19) + del add_8, swish_19 - # pd_op.conv2d: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, 96x96x3x3xf32) + # pd_op.conv2d: (-1x96x-1x-1xf32) <- (-1x96x-1x-1xf32, 96x96x3x3xf32) conv2d_26 = paddle._C_ops.conv2d( - add_10, parameter_245, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + add_10, parameter_665, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" ) - del parameter_245 + del parameter_665 - # pd_op.batch_norm_: (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) + # pd_op.batch_norm_: (-1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (-1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) ( batch_norm__150, batch_norm__151, @@ -1438,33 +1882,34 @@ def forward( ) = (lambda x, f: f(x))( paddle._C_ops.batch_norm( conv2d_26, - parameter_244, - parameter_243, - parameter_242, - parameter_241, - False, + parameter_664, + parameter_663, + parameter_662, + parameter_661, + True, float("0.9"), float("1e-05"), "NCHW", - False, + True, False, ), lambda out: out if isinstance(out, (list, tuple)) else (out, None, None, None, None, None), ) - del parameter_241, parameter_242, parameter_243, parameter_244 + del conv2d_26, parameter_661, parameter_662, parameter_663, parameter_664 - # pd_op.swish: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32) - swish_21 = paddle._C_ops.swish(batch_norm__150) + # pd_op.swish: (-1x96x-1x-1xf32) <- (-1x96x-1x-1xf32) + swish_20 = paddle._C_ops.swish(batch_norm__150) + del batch_norm__150 - # pd_op.conv2d: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, 96x96x3x3xf32) + # pd_op.conv2d: (-1x96x-1x-1xf32) <- (-1x96x-1x-1xf32, 96x96x3x3xf32) conv2d_27 = paddle._C_ops.conv2d( - swish_21, parameter_240, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + swish_20, parameter_660, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" ) - del parameter_240 + del parameter_660 - # pd_op.batch_norm_: (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) + # pd_op.batch_norm_: (-1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (-1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) ( batch_norm__156, batch_norm__157, @@ -1475,30 +1920,30 @@ def forward( ) = (lambda x, f: f(x))( paddle._C_ops.batch_norm( conv2d_27, - parameter_239, - parameter_238, - parameter_237, - parameter_236, - False, + parameter_659, + parameter_658, + parameter_657, + parameter_656, + True, float("0.9"), float("1e-05"), "NCHW", - False, + True, False, ), lambda out: out if isinstance(out, (list, tuple)) else (out, None, None, None, None, None), ) - del parameter_236, parameter_237, parameter_238, parameter_239 + del conv2d_27, parameter_656, parameter_657, parameter_658, parameter_659 - # pd_op.conv2d: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, 96x96x1x1xf32) + # pd_op.conv2d: (-1x96x-1x-1xf32) <- (-1x96x-1x-1xf32, 96x96x1x1xf32) conv2d_28 = paddle._C_ops.conv2d( - swish_21, parameter_235, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + swish_20, parameter_655, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" ) - del parameter_235 + del parameter_655, swish_20 - # pd_op.batch_norm_: (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) + # pd_op.batch_norm_: (-1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (-1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) ( batch_norm__162, batch_norm__163, @@ -1509,43 +1954,46 @@ def forward( ) = (lambda x, f: f(x))( paddle._C_ops.batch_norm( conv2d_28, - parameter_234, - parameter_233, - parameter_232, - parameter_231, - False, + parameter_654, + parameter_653, + parameter_652, + parameter_651, + True, float("0.9"), float("1e-05"), "NCHW", - False, + True, False, ), lambda out: out if isinstance(out, (list, tuple)) else (out, None, None, None, None, None), ) - del parameter_231, parameter_232, parameter_233, parameter_234 + del conv2d_28, parameter_651, parameter_652, parameter_653, parameter_654 - # pd_op.multiply: (2x96x-1x-1xf32) <- (1xf32, 2x96x-1x-1xf32) + # pd_op.multiply: (-1x96x-1x-1xf32) <- (1xf32, -1x96x-1x-1xf32) multiply_6 = paddle._C_ops.multiply(data_5, batch_norm__162) - del data_5 + del batch_norm__162, data_5 - # pd_op.add: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, 2x96x-1x-1xf32) + # pd_op.add: (-1x96x-1x-1xf32) <- (-1x96x-1x-1xf32, -1x96x-1x-1xf32) add_11 = paddle._C_ops.add(batch_norm__156, multiply_6) + del batch_norm__156, multiply_6 - # pd_op.swish: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32) - swish_22 = paddle._C_ops.swish(add_11) + # pd_op.swish: (-1x96x-1x-1xf32) <- (-1x96x-1x-1xf32) + swish_21 = paddle._C_ops.swish(add_11) + del add_11 - # pd_op.add: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, 2x96x-1x-1xf32) - add_12 = paddle._C_ops.add(add_10, swish_22) + # pd_op.add: (-1x96x-1x-1xf32) <- (-1x96x-1x-1xf32, -1x96x-1x-1xf32) + add_12 = paddle._C_ops.add(add_10, swish_21) + del add_10, swish_21 - # pd_op.conv2d: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, 96x96x3x3xf32) + # pd_op.conv2d: (-1x96x-1x-1xf32) <- (-1x96x-1x-1xf32, 96x96x3x3xf32) conv2d_29 = paddle._C_ops.conv2d( - add_12, parameter_230, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + add_12, parameter_650, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" ) - del parameter_230 + del parameter_650 - # pd_op.batch_norm_: (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) + # pd_op.batch_norm_: (-1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (-1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) ( batch_norm__168, batch_norm__169, @@ -1556,33 +2004,34 @@ def forward( ) = (lambda x, f: f(x))( paddle._C_ops.batch_norm( conv2d_29, - parameter_229, - parameter_228, - parameter_227, - parameter_226, - False, + parameter_649, + parameter_648, + parameter_647, + parameter_646, + True, float("0.9"), float("1e-05"), "NCHW", - False, + True, False, ), lambda out: out if isinstance(out, (list, tuple)) else (out, None, None, None, None, None), ) - del parameter_226, parameter_227, parameter_228, parameter_229 + del conv2d_29, parameter_646, parameter_647, parameter_648, parameter_649 - # pd_op.swish: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32) - swish_23 = paddle._C_ops.swish(batch_norm__168) + # pd_op.swish: (-1x96x-1x-1xf32) <- (-1x96x-1x-1xf32) + swish_22 = paddle._C_ops.swish(batch_norm__168) + del batch_norm__168 - # pd_op.conv2d: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, 96x96x3x3xf32) + # pd_op.conv2d: (-1x96x-1x-1xf32) <- (-1x96x-1x-1xf32, 96x96x3x3xf32) conv2d_30 = paddle._C_ops.conv2d( - swish_23, parameter_225, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + swish_22, parameter_645, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" ) - del parameter_225 + del parameter_645 - # pd_op.batch_norm_: (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) + # pd_op.batch_norm_: (-1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (-1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) ( batch_norm__174, batch_norm__175, @@ -1593,30 +2042,30 @@ def forward( ) = (lambda x, f: f(x))( paddle._C_ops.batch_norm( conv2d_30, - parameter_224, - parameter_223, - parameter_222, - parameter_221, - False, + parameter_644, + parameter_643, + parameter_642, + parameter_641, + True, float("0.9"), float("1e-05"), "NCHW", - False, + True, False, ), lambda out: out if isinstance(out, (list, tuple)) else (out, None, None, None, None, None), ) - del parameter_221, parameter_222, parameter_223, parameter_224 + del conv2d_30, parameter_641, parameter_642, parameter_643, parameter_644 - # pd_op.conv2d: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, 96x96x1x1xf32) + # pd_op.conv2d: (-1x96x-1x-1xf32) <- (-1x96x-1x-1xf32, 96x96x1x1xf32) conv2d_31 = paddle._C_ops.conv2d( - swish_23, parameter_220, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + swish_22, parameter_640, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" ) - del parameter_220 + del parameter_640, swish_22 - # pd_op.batch_norm_: (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) + # pd_op.batch_norm_: (-1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (-1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) ( batch_norm__180, batch_norm__181, @@ -1627,43 +2076,46 @@ def forward( ) = (lambda x, f: f(x))( paddle._C_ops.batch_norm( conv2d_31, - parameter_219, - parameter_218, - parameter_217, - parameter_216, - False, + parameter_639, + parameter_638, + parameter_637, + parameter_636, + True, float("0.9"), float("1e-05"), "NCHW", - False, + True, False, ), lambda out: out if isinstance(out, (list, tuple)) else (out, None, None, None, None, None), ) - del parameter_216, parameter_217, parameter_218, parameter_219 + del conv2d_31, parameter_636, parameter_637, parameter_638, parameter_639 - # pd_op.multiply: (2x96x-1x-1xf32) <- (1xf32, 2x96x-1x-1xf32) + # pd_op.multiply: (-1x96x-1x-1xf32) <- (1xf32, -1x96x-1x-1xf32) multiply_7 = paddle._C_ops.multiply(data_6, batch_norm__180) - del data_6 + del batch_norm__180, data_6 - # pd_op.add: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, 2x96x-1x-1xf32) + # pd_op.add: (-1x96x-1x-1xf32) <- (-1x96x-1x-1xf32, -1x96x-1x-1xf32) add_13 = paddle._C_ops.add(batch_norm__174, multiply_7) + del batch_norm__174, multiply_7 - # pd_op.swish: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32) - swish_24 = paddle._C_ops.swish(add_13) + # pd_op.swish: (-1x96x-1x-1xf32) <- (-1x96x-1x-1xf32) + swish_23 = paddle._C_ops.swish(add_13) + del add_13 - # pd_op.add: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, 2x96x-1x-1xf32) - add_14 = paddle._C_ops.add(add_12, swish_24) + # pd_op.add: (-1x96x-1x-1xf32) <- (-1x96x-1x-1xf32, -1x96x-1x-1xf32) + add_14 = paddle._C_ops.add(add_12, swish_23) + del add_12, swish_23 - # pd_op.conv2d: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, 96x96x3x3xf32) + # pd_op.conv2d: (-1x96x-1x-1xf32) <- (-1x96x-1x-1xf32, 96x96x3x3xf32) conv2d_32 = paddle._C_ops.conv2d( - add_14, parameter_215, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + add_14, parameter_635, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" ) - del parameter_215 + del parameter_635 - # pd_op.batch_norm_: (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) + # pd_op.batch_norm_: (-1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (-1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) ( batch_norm__186, batch_norm__187, @@ -1674,33 +2126,34 @@ def forward( ) = (lambda x, f: f(x))( paddle._C_ops.batch_norm( conv2d_32, - parameter_214, - parameter_213, - parameter_212, - parameter_211, - False, + parameter_634, + parameter_633, + parameter_632, + parameter_631, + True, float("0.9"), float("1e-05"), "NCHW", - False, + True, False, ), lambda out: out if isinstance(out, (list, tuple)) else (out, None, None, None, None, None), ) - del parameter_211, parameter_212, parameter_213, parameter_214 + del conv2d_32, parameter_631, parameter_632, parameter_633, parameter_634 - # pd_op.swish: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32) - swish_25 = paddle._C_ops.swish(batch_norm__186) + # pd_op.swish: (-1x96x-1x-1xf32) <- (-1x96x-1x-1xf32) + swish_24 = paddle._C_ops.swish(batch_norm__186) + del batch_norm__186 - # pd_op.conv2d: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, 96x96x3x3xf32) + # pd_op.conv2d: (-1x96x-1x-1xf32) <- (-1x96x-1x-1xf32, 96x96x3x3xf32) conv2d_33 = paddle._C_ops.conv2d( - swish_25, parameter_210, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + swish_24, parameter_630, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" ) - del parameter_210 + del parameter_630 - # pd_op.batch_norm_: (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) + # pd_op.batch_norm_: (-1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (-1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) ( batch_norm__192, batch_norm__193, @@ -1711,30 +2164,30 @@ def forward( ) = (lambda x, f: f(x))( paddle._C_ops.batch_norm( conv2d_33, - parameter_209, - parameter_208, - parameter_207, - parameter_206, - False, + parameter_629, + parameter_628, + parameter_627, + parameter_626, + True, float("0.9"), float("1e-05"), "NCHW", - False, + True, False, ), lambda out: out if isinstance(out, (list, tuple)) else (out, None, None, None, None, None), ) - del parameter_206, parameter_207, parameter_208, parameter_209 + del conv2d_33, parameter_626, parameter_627, parameter_628, parameter_629 - # pd_op.conv2d: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, 96x96x1x1xf32) + # pd_op.conv2d: (-1x96x-1x-1xf32) <- (-1x96x-1x-1xf32, 96x96x1x1xf32) conv2d_34 = paddle._C_ops.conv2d( - swish_25, parameter_205, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + swish_24, parameter_625, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" ) - del parameter_205 + del parameter_625, swish_24 - # pd_op.batch_norm_: (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) + # pd_op.batch_norm_: (-1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (-1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) ( batch_norm__198, batch_norm__199, @@ -1745,43 +2198,46 @@ def forward( ) = (lambda x, f: f(x))( paddle._C_ops.batch_norm( conv2d_34, - parameter_204, - parameter_203, - parameter_202, - parameter_201, - False, + parameter_624, + parameter_623, + parameter_622, + parameter_621, + True, float("0.9"), float("1e-05"), "NCHW", - False, + True, False, ), lambda out: out if isinstance(out, (list, tuple)) else (out, None, None, None, None, None), ) - del parameter_201, parameter_202, parameter_203, parameter_204 + del conv2d_34, parameter_621, parameter_622, parameter_623, parameter_624 - # pd_op.multiply: (2x96x-1x-1xf32) <- (1xf32, 2x96x-1x-1xf32) + # pd_op.multiply: (-1x96x-1x-1xf32) <- (1xf32, -1x96x-1x-1xf32) multiply_8 = paddle._C_ops.multiply(data_7, batch_norm__198) - del data_7 + del batch_norm__198, data_7 - # pd_op.add: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, 2x96x-1x-1xf32) + # pd_op.add: (-1x96x-1x-1xf32) <- (-1x96x-1x-1xf32, -1x96x-1x-1xf32) add_15 = paddle._C_ops.add(batch_norm__192, multiply_8) + del batch_norm__192, multiply_8 - # pd_op.swish: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32) - swish_26 = paddle._C_ops.swish(add_15) + # pd_op.swish: (-1x96x-1x-1xf32) <- (-1x96x-1x-1xf32) + swish_25 = paddle._C_ops.swish(add_15) + del add_15 - # pd_op.add: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, 2x96x-1x-1xf32) - add_16 = paddle._C_ops.add(add_14, swish_26) + # pd_op.add: (-1x96x-1x-1xf32) <- (-1x96x-1x-1xf32, -1x96x-1x-1xf32) + add_16 = paddle._C_ops.add(add_14, swish_25) + del add_14, swish_25 - # pd_op.conv2d: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, 96x96x3x3xf32) + # pd_op.conv2d: (-1x96x-1x-1xf32) <- (-1x96x-1x-1xf32, 96x96x3x3xf32) conv2d_35 = paddle._C_ops.conv2d( - add_16, parameter_200, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + add_16, parameter_620, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" ) - del parameter_200 + del parameter_620 - # pd_op.batch_norm_: (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) + # pd_op.batch_norm_: (-1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (-1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) ( batch_norm__204, batch_norm__205, @@ -1792,33 +2248,34 @@ def forward( ) = (lambda x, f: f(x))( paddle._C_ops.batch_norm( conv2d_35, - parameter_199, - parameter_198, - parameter_197, - parameter_196, - False, + parameter_619, + parameter_618, + parameter_617, + parameter_616, + True, float("0.9"), float("1e-05"), "NCHW", - False, + True, False, ), lambda out: out if isinstance(out, (list, tuple)) else (out, None, None, None, None, None), ) - del parameter_196, parameter_197, parameter_198, parameter_199 + del conv2d_35, parameter_616, parameter_617, parameter_618, parameter_619 - # pd_op.swish: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32) - swish_27 = paddle._C_ops.swish(batch_norm__204) + # pd_op.swish: (-1x96x-1x-1xf32) <- (-1x96x-1x-1xf32) + swish_26 = paddle._C_ops.swish(batch_norm__204) + del batch_norm__204 - # pd_op.conv2d: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, 96x96x3x3xf32) + # pd_op.conv2d: (-1x96x-1x-1xf32) <- (-1x96x-1x-1xf32, 96x96x3x3xf32) conv2d_36 = paddle._C_ops.conv2d( - swish_27, parameter_195, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + swish_26, parameter_615, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" ) - del parameter_195 + del parameter_615 - # pd_op.batch_norm_: (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) + # pd_op.batch_norm_: (-1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (-1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) ( batch_norm__210, batch_norm__211, @@ -1829,30 +2286,30 @@ def forward( ) = (lambda x, f: f(x))( paddle._C_ops.batch_norm( conv2d_36, - parameter_194, - parameter_193, - parameter_192, - parameter_191, - False, + parameter_614, + parameter_613, + parameter_612, + parameter_611, + True, float("0.9"), float("1e-05"), "NCHW", - False, + True, False, ), lambda out: out if isinstance(out, (list, tuple)) else (out, None, None, None, None, None), ) - del parameter_191, parameter_192, parameter_193, parameter_194 + del conv2d_36, parameter_611, parameter_612, parameter_613, parameter_614 - # pd_op.conv2d: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, 96x96x1x1xf32) + # pd_op.conv2d: (-1x96x-1x-1xf32) <- (-1x96x-1x-1xf32, 96x96x1x1xf32) conv2d_37 = paddle._C_ops.conv2d( - swish_27, parameter_190, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + swish_26, parameter_610, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" ) - del parameter_190 + del parameter_610, swish_26 - # pd_op.batch_norm_: (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) + # pd_op.batch_norm_: (-1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (-1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) ( batch_norm__216, batch_norm__217, @@ -1863,75 +2320,81 @@ def forward( ) = (lambda x, f: f(x))( paddle._C_ops.batch_norm( conv2d_37, - parameter_189, - parameter_188, - parameter_187, - parameter_186, - False, + parameter_609, + parameter_608, + parameter_607, + parameter_606, + True, float("0.9"), float("1e-05"), "NCHW", - False, + True, False, ), lambda out: out if isinstance(out, (list, tuple)) else (out, None, None, None, None, None), ) - del parameter_186, parameter_187, parameter_188, parameter_189 + del conv2d_37, parameter_606, parameter_607, parameter_608, parameter_609 - # pd_op.multiply: (2x96x-1x-1xf32) <- (1xf32, 2x96x-1x-1xf32) + # pd_op.multiply: (-1x96x-1x-1xf32) <- (1xf32, -1x96x-1x-1xf32) multiply_9 = paddle._C_ops.multiply(data_8, batch_norm__216) - del data_8 + del batch_norm__216, data_8 - # pd_op.add: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, 2x96x-1x-1xf32) + # pd_op.add: (-1x96x-1x-1xf32) <- (-1x96x-1x-1xf32, -1x96x-1x-1xf32) add_17 = paddle._C_ops.add(batch_norm__210, multiply_9) + del batch_norm__210, multiply_9 - # pd_op.swish: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32) - swish_28 = paddle._C_ops.swish(add_17) + # pd_op.swish: (-1x96x-1x-1xf32) <- (-1x96x-1x-1xf32) + swish_27 = paddle._C_ops.swish(add_17) + del add_17 - # pd_op.add: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, 2x96x-1x-1xf32) - add_18 = paddle._C_ops.add(add_16, swish_28) + # pd_op.add: (-1x96x-1x-1xf32) <- (-1x96x-1x-1xf32, -1x96x-1x-1xf32) + add_18 = paddle._C_ops.add(add_16, swish_27) + del add_16, swish_27 - # builtin.combine: ([2x96x-1x-1xf32, 2x96x-1x-1xf32]) <- (2x96x-1x-1xf32, 2x96x-1x-1xf32) - combine_1 = [swish_15, add_18] + # builtin.combine: ([-1x96x-1x-1xf32, -1x96x-1x-1xf32]) <- (-1x96x-1x-1xf32, -1x96x-1x-1xf32) + combine_1 = [swish_14, add_18] + del add_18, swish_14 - # pd_op.concat: (2x192x-1x-1xf32) <- ([2x96x-1x-1xf32, 2x96x-1x-1xf32], 1xi32) - concat_1 = paddle._C_ops.concat(combine_1, full_0) + # pd_op.concat: (-1x192x-1x-1xf32) <- ([-1x96x-1x-1xf32, -1x96x-1x-1xf32], 1xi32) + concat_3 = paddle._C_ops.concat(combine_1, full_0) del combine_1 - # pd_op.mean: (2x192x1x1xf32) <- (2x192x-1x-1xf32, 2xi64) - mean_1 = paddle._C_ops.mean(concat_1, full_int_array_0, True) + # pd_op.mean: (-1x192x1x1xf32) <- (-1x192x-1x-1xf32, 2xi64) + mean_1 = paddle._C_ops.mean(concat_3, full_int_array_0, True) - # pd_op.conv2d: (2x192x1x1xf32) <- (2x192x1x1xf32, 192x192x1x1xf32) + # pd_op.conv2d: (-1x192x1x1xf32) <- (-1x192x1x1xf32, 192x192x1x1xf32) conv2d_38 = paddle._C_ops.conv2d( - mean_1, parameter_185, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + mean_1, parameter_605, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" ) - del parameter_185 + del mean_1, parameter_605 # pd_op.reshape: (1x192x1x1xf32) <- (192xf32, 4xi64) - reshape_1 = paddle._C_ops.reshape(parameter_184, full_int_array_1) - del parameter_184 + reshape_1 = paddle._C_ops.reshape(parameter_604, full_int_array_1) + del parameter_604 - # pd_op.add: (2x192x1x1xf32) <- (2x192x1x1xf32, 1x192x1x1xf32) + # pd_op.add: (-1x192x1x1xf32) <- (-1x192x1x1xf32, 1x192x1x1xf32) add_19 = paddle._C_ops.add(conv2d_38, reshape_1) + del conv2d_38, reshape_1 - # pd_op.hardsigmoid: (2x192x1x1xf32) <- (2x192x1x1xf32) + # pd_op.hardsigmoid: (-1x192x1x1xf32) <- (-1x192x1x1xf32) hardsigmoid_1 = paddle._C_ops.hardsigmoid( add_19, float("0.166667"), float("0.5") ) del add_19 - # pd_op.multiply: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 2x192x1x1xf32) - multiply_10 = paddle._C_ops.multiply(concat_1, hardsigmoid_1) + # pd_op.multiply: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32, -1x192x1x1xf32) + multiply_10 = paddle._C_ops.multiply(concat_3, hardsigmoid_1) + del concat_3, hardsigmoid_1 - # pd_op.conv2d: (2x256x-1x-1xf32) <- (2x192x-1x-1xf32, 256x192x1x1xf32) + # pd_op.conv2d: (-1x256x-1x-1xf32) <- (-1x192x-1x-1xf32, 256x192x1x1xf32) conv2d_39 = paddle._C_ops.conv2d( - multiply_10, parameter_183, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + multiply_10, parameter_603, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" ) - del parameter_183 + del multiply_10, parameter_603 - # pd_op.batch_norm_: (2x256x-1x-1xf32, 256xf32, 256xf32, 256xf32, 256xf32, -1xui8) <- (2x256x-1x-1xf32, 256xf32, 256xf32, 256xf32, 256xf32) + # pd_op.batch_norm_: (-1x256x-1x-1xf32, 256xf32, 256xf32, 256xf32, 256xf32, -1xui8) <- (-1x256x-1x-1xf32, 256xf32, 256xf32, 256xf32, 256xf32) ( batch_norm__222, batch_norm__223, @@ -1942,33 +2405,34 @@ def forward( ) = (lambda x, f: f(x))( paddle._C_ops.batch_norm( conv2d_39, - parameter_182, - parameter_181, - parameter_180, - parameter_179, - False, + parameter_602, + parameter_601, + parameter_600, + parameter_599, + True, float("0.9"), float("1e-05"), "NCHW", - False, + True, False, ), lambda out: out if isinstance(out, (list, tuple)) else (out, None, None, None, None, None), ) - del parameter_179, parameter_180, parameter_181, parameter_182 + del conv2d_39, parameter_599, parameter_600, parameter_601, parameter_602 - # pd_op.swish: (2x256x-1x-1xf32) <- (2x256x-1x-1xf32) - swish_29 = paddle._C_ops.swish(batch_norm__222) + # pd_op.swish: (-1x256x-1x-1xf32) <- (-1x256x-1x-1xf32) + swish_28 = paddle._C_ops.swish(batch_norm__222) + del batch_norm__222 - # pd_op.conv2d: (2x384x-1x-1xf32) <- (2x256x-1x-1xf32, 384x256x3x3xf32) + # pd_op.conv2d: (-1x384x-1x-1xf32) <- (-1x256x-1x-1xf32, 384x256x3x3xf32) conv2d_40 = paddle._C_ops.conv2d( - swish_29, parameter_178, [2, 2], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + swish_28, parameter_598, [2, 2], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" ) - del parameter_178 + del parameter_598 - # pd_op.batch_norm_: (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) + # pd_op.batch_norm_: (-1x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (-1x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) ( batch_norm__228, batch_norm__229, @@ -1979,33 +2443,34 @@ def forward( ) = (lambda x, f: f(x))( paddle._C_ops.batch_norm( conv2d_40, - parameter_177, - parameter_176, - parameter_175, - parameter_174, - False, + parameter_597, + parameter_596, + parameter_595, + parameter_594, + True, float("0.9"), float("1e-05"), "NCHW", - False, + True, False, ), lambda out: out if isinstance(out, (list, tuple)) else (out, None, None, None, None, None), ) - del parameter_174, parameter_175, parameter_176, parameter_177 + del conv2d_40, parameter_594, parameter_595, parameter_596, parameter_597 - # pd_op.swish: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32) - swish_30 = paddle._C_ops.swish(batch_norm__228) + # pd_op.swish: (-1x384x-1x-1xf32) <- (-1x384x-1x-1xf32) + swish_29 = paddle._C_ops.swish(batch_norm__228) + del batch_norm__228 - # pd_op.conv2d: (2x192x-1x-1xf32) <- (2x384x-1x-1xf32, 192x384x1x1xf32) + # pd_op.conv2d: (-1x192x-1x-1xf32) <- (-1x384x-1x-1xf32, 192x384x1x1xf32) conv2d_41 = paddle._C_ops.conv2d( - swish_30, parameter_173, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + swish_29, parameter_593, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" ) - del parameter_173 + del parameter_593 - # pd_op.batch_norm_: (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + # pd_op.batch_norm_: (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) ( batch_norm__234, batch_norm__235, @@ -2016,33 +2481,34 @@ def forward( ) = (lambda x, f: f(x))( paddle._C_ops.batch_norm( conv2d_41, - parameter_172, - parameter_171, - parameter_170, - parameter_169, - False, + parameter_592, + parameter_591, + parameter_590, + parameter_589, + True, float("0.9"), float("1e-05"), "NCHW", - False, + True, False, ), lambda out: out if isinstance(out, (list, tuple)) else (out, None, None, None, None, None), ) - del parameter_169, parameter_170, parameter_171, parameter_172 + del conv2d_41, parameter_589, parameter_590, parameter_591, parameter_592 - # pd_op.swish: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32) - swish_31 = paddle._C_ops.swish(batch_norm__234) + # pd_op.swish: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32) + swish_30 = paddle._C_ops.swish(batch_norm__234) + del batch_norm__234 - # pd_op.conv2d: (2x192x-1x-1xf32) <- (2x384x-1x-1xf32, 192x384x1x1xf32) + # pd_op.conv2d: (-1x192x-1x-1xf32) <- (-1x384x-1x-1xf32, 192x384x1x1xf32) conv2d_42 = paddle._C_ops.conv2d( - swish_30, parameter_168, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + swish_29, parameter_588, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" ) - del parameter_168 + del parameter_588, swish_29 - # pd_op.batch_norm_: (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + # pd_op.batch_norm_: (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) ( batch_norm__240, batch_norm__241, @@ -2053,33 +2519,34 @@ def forward( ) = (lambda x, f: f(x))( paddle._C_ops.batch_norm( conv2d_42, - parameter_167, - parameter_166, - parameter_165, - parameter_164, - False, + parameter_587, + parameter_586, + parameter_585, + parameter_584, + True, float("0.9"), float("1e-05"), "NCHW", - False, + True, False, ), lambda out: out if isinstance(out, (list, tuple)) else (out, None, None, None, None, None), ) - del parameter_164, parameter_165, parameter_166, parameter_167 + del conv2d_42, parameter_584, parameter_585, parameter_586, parameter_587 - # pd_op.swish: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32) - swish_32 = paddle._C_ops.swish(batch_norm__240) + # pd_op.swish: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32) + swish_31 = paddle._C_ops.swish(batch_norm__240) + del batch_norm__240 - # pd_op.conv2d: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 192x192x3x3xf32) + # pd_op.conv2d: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32, 192x192x3x3xf32) conv2d_43 = paddle._C_ops.conv2d( - swish_32, parameter_163, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + swish_31, parameter_583, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" ) - del parameter_163 + del parameter_583 - # pd_op.batch_norm_: (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + # pd_op.batch_norm_: (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) ( batch_norm__246, batch_norm__247, @@ -2090,33 +2557,34 @@ def forward( ) = (lambda x, f: f(x))( paddle._C_ops.batch_norm( conv2d_43, - parameter_162, - parameter_161, - parameter_160, - parameter_159, - False, + parameter_582, + parameter_581, + parameter_580, + parameter_579, + True, float("0.9"), float("1e-05"), "NCHW", - False, + True, False, ), lambda out: out if isinstance(out, (list, tuple)) else (out, None, None, None, None, None), ) - del parameter_159, parameter_160, parameter_161, parameter_162 + del conv2d_43, parameter_579, parameter_580, parameter_581, parameter_582 - # pd_op.swish: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32) - swish_33 = paddle._C_ops.swish(batch_norm__246) + # pd_op.swish: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32) + swish_32 = paddle._C_ops.swish(batch_norm__246) + del batch_norm__246 - # pd_op.conv2d: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 192x192x3x3xf32) + # pd_op.conv2d: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32, 192x192x3x3xf32) conv2d_44 = paddle._C_ops.conv2d( - swish_33, parameter_158, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + swish_32, parameter_578, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" ) - del parameter_158 + del parameter_578 - # pd_op.batch_norm_: (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + # pd_op.batch_norm_: (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) ( batch_norm__252, batch_norm__253, @@ -2127,30 +2595,30 @@ def forward( ) = (lambda x, f: f(x))( paddle._C_ops.batch_norm( conv2d_44, - parameter_157, - parameter_156, - parameter_155, - parameter_154, - False, + parameter_577, + parameter_576, + parameter_575, + parameter_574, + True, float("0.9"), float("1e-05"), "NCHW", - False, + True, False, ), lambda out: out if isinstance(out, (list, tuple)) else (out, None, None, None, None, None), ) - del parameter_154, parameter_155, parameter_156, parameter_157 + del conv2d_44, parameter_574, parameter_575, parameter_576, parameter_577 - # pd_op.conv2d: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 192x192x1x1xf32) + # pd_op.conv2d: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32, 192x192x1x1xf32) conv2d_45 = paddle._C_ops.conv2d( - swish_33, parameter_153, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + swish_32, parameter_573, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" ) - del parameter_153 + del parameter_573, swish_32 - # pd_op.batch_norm_: (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + # pd_op.batch_norm_: (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) ( batch_norm__258, batch_norm__259, @@ -2161,43 +2629,46 @@ def forward( ) = (lambda x, f: f(x))( paddle._C_ops.batch_norm( conv2d_45, - parameter_152, - parameter_151, - parameter_150, - parameter_149, - False, + parameter_572, + parameter_571, + parameter_570, + parameter_569, + True, float("0.9"), float("1e-05"), "NCHW", - False, + True, False, ), lambda out: out if isinstance(out, (list, tuple)) else (out, None, None, None, None, None), ) - del parameter_149, parameter_150, parameter_151, parameter_152 + del conv2d_45, parameter_569, parameter_570, parameter_571, parameter_572 - # pd_op.multiply: (2x192x-1x-1xf32) <- (1xf32, 2x192x-1x-1xf32) + # pd_op.multiply: (-1x192x-1x-1xf32) <- (1xf32, -1x192x-1x-1xf32) multiply_11 = paddle._C_ops.multiply(data_9, batch_norm__258) - del data_9 + del batch_norm__258, data_9 - # pd_op.add: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 2x192x-1x-1xf32) + # pd_op.add: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32, -1x192x-1x-1xf32) add_20 = paddle._C_ops.add(batch_norm__252, multiply_11) + del batch_norm__252, multiply_11 - # pd_op.swish: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32) - swish_34 = paddle._C_ops.swish(add_20) + # pd_op.swish: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32) + swish_33 = paddle._C_ops.swish(add_20) + del add_20 - # pd_op.add: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 2x192x-1x-1xf32) - add_21 = paddle._C_ops.add(swish_32, swish_34) + # pd_op.add: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32, -1x192x-1x-1xf32) + add_21 = paddle._C_ops.add(swish_31, swish_33) + del swish_31, swish_33 - # pd_op.conv2d: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 192x192x3x3xf32) + # pd_op.conv2d: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32, 192x192x3x3xf32) conv2d_46 = paddle._C_ops.conv2d( - add_21, parameter_148, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + add_21, parameter_568, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" ) - del parameter_148 + del parameter_568 - # pd_op.batch_norm_: (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + # pd_op.batch_norm_: (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) ( batch_norm__264, batch_norm__265, @@ -2208,33 +2679,34 @@ def forward( ) = (lambda x, f: f(x))( paddle._C_ops.batch_norm( conv2d_46, - parameter_147, - parameter_146, - parameter_145, - parameter_144, - False, + parameter_567, + parameter_566, + parameter_565, + parameter_564, + True, float("0.9"), float("1e-05"), "NCHW", - False, + True, False, ), lambda out: out if isinstance(out, (list, tuple)) else (out, None, None, None, None, None), ) - del parameter_144, parameter_145, parameter_146, parameter_147 + del conv2d_46, parameter_564, parameter_565, parameter_566, parameter_567 - # pd_op.swish: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32) - swish_35 = paddle._C_ops.swish(batch_norm__264) + # pd_op.swish: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32) + swish_34 = paddle._C_ops.swish(batch_norm__264) + del batch_norm__264 - # pd_op.conv2d: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 192x192x3x3xf32) + # pd_op.conv2d: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32, 192x192x3x3xf32) conv2d_47 = paddle._C_ops.conv2d( - swish_35, parameter_143, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + swish_34, parameter_563, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" ) - del parameter_143 + del parameter_563 - # pd_op.batch_norm_: (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + # pd_op.batch_norm_: (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) ( batch_norm__270, batch_norm__271, @@ -2245,30 +2717,30 @@ def forward( ) = (lambda x, f: f(x))( paddle._C_ops.batch_norm( conv2d_47, - parameter_142, - parameter_141, - parameter_140, - parameter_139, - False, + parameter_562, + parameter_561, + parameter_560, + parameter_559, + True, float("0.9"), float("1e-05"), "NCHW", - False, + True, False, ), lambda out: out if isinstance(out, (list, tuple)) else (out, None, None, None, None, None), ) - del parameter_139, parameter_140, parameter_141, parameter_142 + del conv2d_47, parameter_559, parameter_560, parameter_561, parameter_562 - # pd_op.conv2d: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 192x192x1x1xf32) + # pd_op.conv2d: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32, 192x192x1x1xf32) conv2d_48 = paddle._C_ops.conv2d( - swish_35, parameter_138, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + swish_34, parameter_558, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" ) - del parameter_138 + del parameter_558, swish_34 - # pd_op.batch_norm_: (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + # pd_op.batch_norm_: (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) ( batch_norm__276, batch_norm__277, @@ -2279,43 +2751,46 @@ def forward( ) = (lambda x, f: f(x))( paddle._C_ops.batch_norm( conv2d_48, - parameter_137, - parameter_136, - parameter_135, - parameter_134, - False, + parameter_557, + parameter_556, + parameter_555, + parameter_554, + True, float("0.9"), float("1e-05"), "NCHW", - False, + True, False, ), lambda out: out if isinstance(out, (list, tuple)) else (out, None, None, None, None, None), ) - del parameter_134, parameter_135, parameter_136, parameter_137 + del conv2d_48, parameter_554, parameter_555, parameter_556, parameter_557 - # pd_op.multiply: (2x192x-1x-1xf32) <- (1xf32, 2x192x-1x-1xf32) + # pd_op.multiply: (-1x192x-1x-1xf32) <- (1xf32, -1x192x-1x-1xf32) multiply_12 = paddle._C_ops.multiply(data_10, batch_norm__276) - del data_10 + del batch_norm__276, data_10 - # pd_op.add: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 2x192x-1x-1xf32) + # pd_op.add: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32, -1x192x-1x-1xf32) add_22 = paddle._C_ops.add(batch_norm__270, multiply_12) + del batch_norm__270, multiply_12 - # pd_op.swish: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32) - swish_36 = paddle._C_ops.swish(add_22) + # pd_op.swish: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32) + swish_35 = paddle._C_ops.swish(add_22) + del add_22 - # pd_op.add: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 2x192x-1x-1xf32) - add_23 = paddle._C_ops.add(add_21, swish_36) + # pd_op.add: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32, -1x192x-1x-1xf32) + add_23 = paddle._C_ops.add(add_21, swish_35) + del add_21, swish_35 - # pd_op.conv2d: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 192x192x3x3xf32) + # pd_op.conv2d: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32, 192x192x3x3xf32) conv2d_49 = paddle._C_ops.conv2d( - add_23, parameter_133, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + add_23, parameter_553, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" ) - del parameter_133 + del parameter_553 - # pd_op.batch_norm_: (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + # pd_op.batch_norm_: (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) ( batch_norm__282, batch_norm__283, @@ -2326,33 +2801,34 @@ def forward( ) = (lambda x, f: f(x))( paddle._C_ops.batch_norm( conv2d_49, - parameter_132, - parameter_131, - parameter_130, - parameter_129, - False, + parameter_552, + parameter_551, + parameter_550, + parameter_549, + True, float("0.9"), float("1e-05"), "NCHW", - False, + True, False, ), lambda out: out if isinstance(out, (list, tuple)) else (out, None, None, None, None, None), ) - del parameter_129, parameter_130, parameter_131, parameter_132 + del conv2d_49, parameter_549, parameter_550, parameter_551, parameter_552 - # pd_op.swish: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32) - swish_37 = paddle._C_ops.swish(batch_norm__282) + # pd_op.swish: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32) + swish_36 = paddle._C_ops.swish(batch_norm__282) + del batch_norm__282 - # pd_op.conv2d: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 192x192x3x3xf32) + # pd_op.conv2d: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32, 192x192x3x3xf32) conv2d_50 = paddle._C_ops.conv2d( - swish_37, parameter_128, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + swish_36, parameter_548, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" ) - del parameter_128 + del parameter_548 - # pd_op.batch_norm_: (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + # pd_op.batch_norm_: (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) ( batch_norm__288, batch_norm__289, @@ -2363,30 +2839,30 @@ def forward( ) = (lambda x, f: f(x))( paddle._C_ops.batch_norm( conv2d_50, - parameter_127, - parameter_126, - parameter_125, - parameter_124, - False, + parameter_547, + parameter_546, + parameter_545, + parameter_544, + True, float("0.9"), float("1e-05"), "NCHW", - False, + True, False, ), lambda out: out if isinstance(out, (list, tuple)) else (out, None, None, None, None, None), ) - del parameter_124, parameter_125, parameter_126, parameter_127 + del conv2d_50, parameter_544, parameter_545, parameter_546, parameter_547 - # pd_op.conv2d: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 192x192x1x1xf32) + # pd_op.conv2d: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32, 192x192x1x1xf32) conv2d_51 = paddle._C_ops.conv2d( - swish_37, parameter_123, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + swish_36, parameter_543, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" ) - del parameter_123 + del parameter_543, swish_36 - # pd_op.batch_norm_: (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + # pd_op.batch_norm_: (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) ( batch_norm__294, batch_norm__295, @@ -2397,43 +2873,46 @@ def forward( ) = (lambda x, f: f(x))( paddle._C_ops.batch_norm( conv2d_51, - parameter_122, - parameter_121, - parameter_120, - parameter_119, - False, + parameter_542, + parameter_541, + parameter_540, + parameter_539, + True, float("0.9"), float("1e-05"), "NCHW", - False, + True, False, ), lambda out: out if isinstance(out, (list, tuple)) else (out, None, None, None, None, None), ) - del parameter_119, parameter_120, parameter_121, parameter_122 + del conv2d_51, parameter_539, parameter_540, parameter_541, parameter_542 - # pd_op.multiply: (2x192x-1x-1xf32) <- (1xf32, 2x192x-1x-1xf32) + # pd_op.multiply: (-1x192x-1x-1xf32) <- (1xf32, -1x192x-1x-1xf32) multiply_13 = paddle._C_ops.multiply(data_11, batch_norm__294) - del data_11 + del batch_norm__294, data_11 - # pd_op.add: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 2x192x-1x-1xf32) + # pd_op.add: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32, -1x192x-1x-1xf32) add_24 = paddle._C_ops.add(batch_norm__288, multiply_13) + del batch_norm__288, multiply_13 - # pd_op.swish: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32) - swish_38 = paddle._C_ops.swish(add_24) + # pd_op.swish: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32) + swish_37 = paddle._C_ops.swish(add_24) + del add_24 - # pd_op.add: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 2x192x-1x-1xf32) - add_25 = paddle._C_ops.add(add_23, swish_38) + # pd_op.add: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32, -1x192x-1x-1xf32) + add_25 = paddle._C_ops.add(add_23, swish_37) + del add_23, swish_37 - # pd_op.conv2d: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 192x192x3x3xf32) + # pd_op.conv2d: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32, 192x192x3x3xf32) conv2d_52 = paddle._C_ops.conv2d( - add_25, parameter_118, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + add_25, parameter_538, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" ) - del parameter_118 + del parameter_538 - # pd_op.batch_norm_: (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + # pd_op.batch_norm_: (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) ( batch_norm__300, batch_norm__301, @@ -2444,33 +2923,34 @@ def forward( ) = (lambda x, f: f(x))( paddle._C_ops.batch_norm( conv2d_52, - parameter_117, - parameter_116, - parameter_115, - parameter_114, - False, + parameter_537, + parameter_536, + parameter_535, + parameter_534, + True, float("0.9"), float("1e-05"), "NCHW", - False, + True, False, ), lambda out: out if isinstance(out, (list, tuple)) else (out, None, None, None, None, None), ) - del parameter_114, parameter_115, parameter_116, parameter_117 + del conv2d_52, parameter_534, parameter_535, parameter_536, parameter_537 - # pd_op.swish: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32) - swish_39 = paddle._C_ops.swish(batch_norm__300) + # pd_op.swish: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32) + swish_38 = paddle._C_ops.swish(batch_norm__300) + del batch_norm__300 - # pd_op.conv2d: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 192x192x3x3xf32) + # pd_op.conv2d: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32, 192x192x3x3xf32) conv2d_53 = paddle._C_ops.conv2d( - swish_39, parameter_113, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + swish_38, parameter_533, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" ) - del parameter_113 + del parameter_533 - # pd_op.batch_norm_: (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + # pd_op.batch_norm_: (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) ( batch_norm__306, batch_norm__307, @@ -2481,30 +2961,30 @@ def forward( ) = (lambda x, f: f(x))( paddle._C_ops.batch_norm( conv2d_53, - parameter_112, - parameter_111, - parameter_110, - parameter_109, - False, + parameter_532, + parameter_531, + parameter_530, + parameter_529, + True, float("0.9"), float("1e-05"), "NCHW", - False, + True, False, ), lambda out: out if isinstance(out, (list, tuple)) else (out, None, None, None, None, None), ) - del parameter_109, parameter_110, parameter_111, parameter_112 + del conv2d_53, parameter_529, parameter_530, parameter_531, parameter_532 - # pd_op.conv2d: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 192x192x1x1xf32) + # pd_op.conv2d: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32, 192x192x1x1xf32) conv2d_54 = paddle._C_ops.conv2d( - swish_39, parameter_108, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + swish_38, parameter_528, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" ) - del parameter_108 + del parameter_528, swish_38 - # pd_op.batch_norm_: (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + # pd_op.batch_norm_: (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) ( batch_norm__312, batch_norm__313, @@ -2515,43 +2995,46 @@ def forward( ) = (lambda x, f: f(x))( paddle._C_ops.batch_norm( conv2d_54, - parameter_107, - parameter_106, - parameter_105, - parameter_104, - False, + parameter_527, + parameter_526, + parameter_525, + parameter_524, + True, float("0.9"), float("1e-05"), "NCHW", - False, + True, False, ), lambda out: out if isinstance(out, (list, tuple)) else (out, None, None, None, None, None), ) - del parameter_104, parameter_105, parameter_106, parameter_107 + del conv2d_54, parameter_524, parameter_525, parameter_526, parameter_527 - # pd_op.multiply: (2x192x-1x-1xf32) <- (1xf32, 2x192x-1x-1xf32) + # pd_op.multiply: (-1x192x-1x-1xf32) <- (1xf32, -1x192x-1x-1xf32) multiply_14 = paddle._C_ops.multiply(data_12, batch_norm__312) - del data_12 + del batch_norm__312, data_12 - # pd_op.add: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 2x192x-1x-1xf32) + # pd_op.add: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32, -1x192x-1x-1xf32) add_26 = paddle._C_ops.add(batch_norm__306, multiply_14) + del batch_norm__306, multiply_14 - # pd_op.swish: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32) - swish_40 = paddle._C_ops.swish(add_26) + # pd_op.swish: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32) + swish_39 = paddle._C_ops.swish(add_26) + del add_26 - # pd_op.add: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 2x192x-1x-1xf32) - add_27 = paddle._C_ops.add(add_25, swish_40) + # pd_op.add: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32, -1x192x-1x-1xf32) + add_27 = paddle._C_ops.add(add_25, swish_39) + del add_25, swish_39 - # pd_op.conv2d: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 192x192x3x3xf32) + # pd_op.conv2d: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32, 192x192x3x3xf32) conv2d_55 = paddle._C_ops.conv2d( - add_27, parameter_103, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + add_27, parameter_523, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" ) - del parameter_103 + del parameter_523 - # pd_op.batch_norm_: (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + # pd_op.batch_norm_: (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) ( batch_norm__318, batch_norm__319, @@ -2562,33 +3045,34 @@ def forward( ) = (lambda x, f: f(x))( paddle._C_ops.batch_norm( conv2d_55, - parameter_102, - parameter_101, - parameter_100, - parameter_99, - False, + parameter_522, + parameter_521, + parameter_520, + parameter_519, + True, float("0.9"), float("1e-05"), "NCHW", - False, + True, False, ), lambda out: out if isinstance(out, (list, tuple)) else (out, None, None, None, None, None), ) - del parameter_100, parameter_101, parameter_102, parameter_99 + del conv2d_55, parameter_519, parameter_520, parameter_521, parameter_522 - # pd_op.swish: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32) - swish_41 = paddle._C_ops.swish(batch_norm__318) + # pd_op.swish: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32) + swish_40 = paddle._C_ops.swish(batch_norm__318) + del batch_norm__318 - # pd_op.conv2d: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 192x192x3x3xf32) + # pd_op.conv2d: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32, 192x192x3x3xf32) conv2d_56 = paddle._C_ops.conv2d( - swish_41, parameter_98, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + swish_40, parameter_518, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" ) - del parameter_98 + del parameter_518 - # pd_op.batch_norm_: (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + # pd_op.batch_norm_: (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) ( batch_norm__324, batch_norm__325, @@ -2599,30 +3083,30 @@ def forward( ) = (lambda x, f: f(x))( paddle._C_ops.batch_norm( conv2d_56, - parameter_97, - parameter_96, - parameter_95, - parameter_94, - False, + parameter_517, + parameter_516, + parameter_515, + parameter_514, + True, float("0.9"), float("1e-05"), "NCHW", - False, + True, False, ), lambda out: out if isinstance(out, (list, tuple)) else (out, None, None, None, None, None), ) - del parameter_94, parameter_95, parameter_96, parameter_97 + del conv2d_56, parameter_514, parameter_515, parameter_516, parameter_517 - # pd_op.conv2d: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 192x192x1x1xf32) + # pd_op.conv2d: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32, 192x192x1x1xf32) conv2d_57 = paddle._C_ops.conv2d( - swish_41, parameter_93, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + swish_40, parameter_513, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" ) - del parameter_93 + del parameter_513, swish_40 - # pd_op.batch_norm_: (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + # pd_op.batch_norm_: (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) ( batch_norm__330, batch_norm__331, @@ -2633,43 +3117,46 @@ def forward( ) = (lambda x, f: f(x))( paddle._C_ops.batch_norm( conv2d_57, - parameter_92, - parameter_91, - parameter_90, - parameter_89, - False, + parameter_512, + parameter_511, + parameter_510, + parameter_509, + True, float("0.9"), float("1e-05"), "NCHW", - False, + True, False, ), lambda out: out if isinstance(out, (list, tuple)) else (out, None, None, None, None, None), ) - del parameter_89, parameter_90, parameter_91, parameter_92 + del conv2d_57, parameter_509, parameter_510, parameter_511, parameter_512 - # pd_op.multiply: (2x192x-1x-1xf32) <- (1xf32, 2x192x-1x-1xf32) + # pd_op.multiply: (-1x192x-1x-1xf32) <- (1xf32, -1x192x-1x-1xf32) multiply_15 = paddle._C_ops.multiply(data_13, batch_norm__330) - del data_13 + del batch_norm__330, data_13 - # pd_op.add: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 2x192x-1x-1xf32) + # pd_op.add: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32, -1x192x-1x-1xf32) add_28 = paddle._C_ops.add(batch_norm__324, multiply_15) + del batch_norm__324, multiply_15 - # pd_op.swish: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32) - swish_42 = paddle._C_ops.swish(add_28) + # pd_op.swish: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32) + swish_41 = paddle._C_ops.swish(add_28) + del add_28 - # pd_op.add: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 2x192x-1x-1xf32) - add_29 = paddle._C_ops.add(add_27, swish_42) + # pd_op.add: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32, -1x192x-1x-1xf32) + add_29 = paddle._C_ops.add(add_27, swish_41) + del add_27, swish_41 - # pd_op.conv2d: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 192x192x3x3xf32) + # pd_op.conv2d: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32, 192x192x3x3xf32) conv2d_58 = paddle._C_ops.conv2d( - add_29, parameter_88, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + add_29, parameter_508, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" ) - del parameter_88 + del parameter_508 - # pd_op.batch_norm_: (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + # pd_op.batch_norm_: (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) ( batch_norm__336, batch_norm__337, @@ -2680,33 +3167,34 @@ def forward( ) = (lambda x, f: f(x))( paddle._C_ops.batch_norm( conv2d_58, - parameter_87, - parameter_86, - parameter_85, - parameter_84, - False, + parameter_507, + parameter_506, + parameter_505, + parameter_504, + True, float("0.9"), float("1e-05"), "NCHW", - False, + True, False, ), lambda out: out if isinstance(out, (list, tuple)) else (out, None, None, None, None, None), ) - del parameter_84, parameter_85, parameter_86, parameter_87 + del conv2d_58, parameter_504, parameter_505, parameter_506, parameter_507 - # pd_op.swish: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32) - swish_43 = paddle._C_ops.swish(batch_norm__336) + # pd_op.swish: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32) + swish_42 = paddle._C_ops.swish(batch_norm__336) + del batch_norm__336 - # pd_op.conv2d: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 192x192x3x3xf32) + # pd_op.conv2d: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32, 192x192x3x3xf32) conv2d_59 = paddle._C_ops.conv2d( - swish_43, parameter_83, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + swish_42, parameter_503, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" ) - del parameter_83 + del parameter_503 - # pd_op.batch_norm_: (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + # pd_op.batch_norm_: (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) ( batch_norm__342, batch_norm__343, @@ -2717,30 +3205,30 @@ def forward( ) = (lambda x, f: f(x))( paddle._C_ops.batch_norm( conv2d_59, - parameter_82, - parameter_81, - parameter_80, - parameter_79, - False, + parameter_502, + parameter_501, + parameter_500, + parameter_499, + True, float("0.9"), float("1e-05"), "NCHW", - False, + True, False, ), lambda out: out if isinstance(out, (list, tuple)) else (out, None, None, None, None, None), ) - del parameter_79, parameter_80, parameter_81, parameter_82 + del conv2d_59, parameter_499, parameter_500, parameter_501, parameter_502 - # pd_op.conv2d: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 192x192x1x1xf32) + # pd_op.conv2d: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32, 192x192x1x1xf32) conv2d_60 = paddle._C_ops.conv2d( - swish_43, parameter_78, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + swish_42, parameter_498, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" ) - del parameter_78 + del parameter_498, swish_42 - # pd_op.batch_norm_: (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + # pd_op.batch_norm_: (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) ( batch_norm__348, batch_norm__349, @@ -2751,75 +3239,81 @@ def forward( ) = (lambda x, f: f(x))( paddle._C_ops.batch_norm( conv2d_60, - parameter_77, - parameter_76, - parameter_75, - parameter_74, - False, + parameter_497, + parameter_496, + parameter_495, + parameter_494, + True, float("0.9"), float("1e-05"), "NCHW", - False, + True, False, ), lambda out: out if isinstance(out, (list, tuple)) else (out, None, None, None, None, None), ) - del parameter_74, parameter_75, parameter_76, parameter_77 + del conv2d_60, parameter_494, parameter_495, parameter_496, parameter_497 - # pd_op.multiply: (2x192x-1x-1xf32) <- (1xf32, 2x192x-1x-1xf32) + # pd_op.multiply: (-1x192x-1x-1xf32) <- (1xf32, -1x192x-1x-1xf32) multiply_16 = paddle._C_ops.multiply(data_14, batch_norm__348) - del data_14 + del batch_norm__348, data_14 - # pd_op.add: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 2x192x-1x-1xf32) + # pd_op.add: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32, -1x192x-1x-1xf32) add_30 = paddle._C_ops.add(batch_norm__342, multiply_16) + del batch_norm__342, multiply_16 - # pd_op.swish: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32) - swish_44 = paddle._C_ops.swish(add_30) + # pd_op.swish: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32) + swish_43 = paddle._C_ops.swish(add_30) + del add_30 - # pd_op.add: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 2x192x-1x-1xf32) - add_31 = paddle._C_ops.add(add_29, swish_44) + # pd_op.add: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32, -1x192x-1x-1xf32) + add_31 = paddle._C_ops.add(add_29, swish_43) + del add_29, swish_43 - # builtin.combine: ([2x192x-1x-1xf32, 2x192x-1x-1xf32]) <- (2x192x-1x-1xf32, 2x192x-1x-1xf32) - combine_2 = [swish_31, add_31] + # builtin.combine: ([-1x192x-1x-1xf32, -1x192x-1x-1xf32]) <- (-1x192x-1x-1xf32, -1x192x-1x-1xf32) + combine_2 = [swish_30, add_31] + del add_31, swish_30 - # pd_op.concat: (2x384x-1x-1xf32) <- ([2x192x-1x-1xf32, 2x192x-1x-1xf32], 1xi32) - concat_2 = paddle._C_ops.concat(combine_2, full_0) + # pd_op.concat: (-1x384x-1x-1xf32) <- ([-1x192x-1x-1xf32, -1x192x-1x-1xf32], 1xi32) + concat_4 = paddle._C_ops.concat(combine_2, full_0) del combine_2 - # pd_op.mean: (2x384x1x1xf32) <- (2x384x-1x-1xf32, 2xi64) - mean_2 = paddle._C_ops.mean(concat_2, full_int_array_0, True) + # pd_op.mean: (-1x384x1x1xf32) <- (-1x384x-1x-1xf32, 2xi64) + mean_2 = paddle._C_ops.mean(concat_4, full_int_array_0, True) - # pd_op.conv2d: (2x384x1x1xf32) <- (2x384x1x1xf32, 384x384x1x1xf32) + # pd_op.conv2d: (-1x384x1x1xf32) <- (-1x384x1x1xf32, 384x384x1x1xf32) conv2d_61 = paddle._C_ops.conv2d( - mean_2, parameter_73, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + mean_2, parameter_493, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" ) - del parameter_73 + del mean_2, parameter_493 # pd_op.reshape: (1x384x1x1xf32) <- (384xf32, 4xi64) - reshape_2 = paddle._C_ops.reshape(parameter_72, full_int_array_1) - del parameter_72 + reshape_2 = paddle._C_ops.reshape(parameter_492, full_int_array_1) + del parameter_492 - # pd_op.add: (2x384x1x1xf32) <- (2x384x1x1xf32, 1x384x1x1xf32) + # pd_op.add: (-1x384x1x1xf32) <- (-1x384x1x1xf32, 1x384x1x1xf32) add_32 = paddle._C_ops.add(conv2d_61, reshape_2) + del conv2d_61, reshape_2 - # pd_op.hardsigmoid: (2x384x1x1xf32) <- (2x384x1x1xf32) + # pd_op.hardsigmoid: (-1x384x1x1xf32) <- (-1x384x1x1xf32) hardsigmoid_2 = paddle._C_ops.hardsigmoid( add_32, float("0.166667"), float("0.5") ) del add_32 - # pd_op.multiply: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32, 2x384x1x1xf32) - multiply_17 = paddle._C_ops.multiply(concat_2, hardsigmoid_2) + # pd_op.multiply: (-1x384x-1x-1xf32) <- (-1x384x-1x-1xf32, -1x384x1x1xf32) + multiply_17 = paddle._C_ops.multiply(concat_4, hardsigmoid_2) + del concat_4, hardsigmoid_2 - # pd_op.conv2d: (2x512x-1x-1xf32) <- (2x384x-1x-1xf32, 512x384x1x1xf32) + # pd_op.conv2d: (-1x512x-1x-1xf32) <- (-1x384x-1x-1xf32, 512x384x1x1xf32) conv2d_62 = paddle._C_ops.conv2d( - multiply_17, parameter_71, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + multiply_17, parameter_491, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" ) - del parameter_71 + del multiply_17, parameter_491 - # pd_op.batch_norm_: (2x512x-1x-1xf32, 512xf32, 512xf32, 512xf32, 512xf32, -1xui8) <- (2x512x-1x-1xf32, 512xf32, 512xf32, 512xf32, 512xf32) + # pd_op.batch_norm_: (-1x512x-1x-1xf32, 512xf32, 512xf32, 512xf32, 512xf32, -1xui8) <- (-1x512x-1x-1xf32, 512xf32, 512xf32, 512xf32, 512xf32) ( batch_norm__354, batch_norm__355, @@ -2830,33 +3324,34 @@ def forward( ) = (lambda x, f: f(x))( paddle._C_ops.batch_norm( conv2d_62, - parameter_70, - parameter_69, - parameter_68, - parameter_67, - False, + parameter_490, + parameter_489, + parameter_488, + parameter_487, + True, float("0.9"), float("1e-05"), "NCHW", - False, + True, False, ), lambda out: out if isinstance(out, (list, tuple)) else (out, None, None, None, None, None), ) - del parameter_67, parameter_68, parameter_69, parameter_70 + del conv2d_62, parameter_487, parameter_488, parameter_489, parameter_490 - # pd_op.swish: (2x512x-1x-1xf32) <- (2x512x-1x-1xf32) - swish_45 = paddle._C_ops.swish(batch_norm__354) + # pd_op.swish: (-1x512x-1x-1xf32) <- (-1x512x-1x-1xf32) + swish_44 = paddle._C_ops.swish(batch_norm__354) + del batch_norm__354 - # pd_op.conv2d: (2x768x-1x-1xf32) <- (2x512x-1x-1xf32, 768x512x3x3xf32) + # pd_op.conv2d: (-1x768x-1x-1xf32) <- (-1x512x-1x-1xf32, 768x512x3x3xf32) conv2d_63 = paddle._C_ops.conv2d( - swish_45, parameter_66, [2, 2], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + swish_44, parameter_486, [2, 2], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" ) - del parameter_66 + del parameter_486 - # pd_op.batch_norm_: (2x768x-1x-1xf32, 768xf32, 768xf32, 768xf32, 768xf32, -1xui8) <- (2x768x-1x-1xf32, 768xf32, 768xf32, 768xf32, 768xf32) + # pd_op.batch_norm_: (-1x768x-1x-1xf32, 768xf32, 768xf32, 768xf32, 768xf32, -1xui8) <- (-1x768x-1x-1xf32, 768xf32, 768xf32, 768xf32, 768xf32) ( batch_norm__360, batch_norm__361, @@ -2867,33 +3362,34 @@ def forward( ) = (lambda x, f: f(x))( paddle._C_ops.batch_norm( conv2d_63, - parameter_65, - parameter_64, - parameter_63, - parameter_62, - False, + parameter_485, + parameter_484, + parameter_483, + parameter_482, + True, float("0.9"), float("1e-05"), "NCHW", - False, + True, False, ), lambda out: out if isinstance(out, (list, tuple)) else (out, None, None, None, None, None), ) - del parameter_62, parameter_63, parameter_64, parameter_65 + del conv2d_63, parameter_482, parameter_483, parameter_484, parameter_485 - # pd_op.swish: (2x768x-1x-1xf32) <- (2x768x-1x-1xf32) - swish_46 = paddle._C_ops.swish(batch_norm__360) + # pd_op.swish: (-1x768x-1x-1xf32) <- (-1x768x-1x-1xf32) + swish_45 = paddle._C_ops.swish(batch_norm__360) + del batch_norm__360 - # pd_op.conv2d: (2x384x-1x-1xf32) <- (2x768x-1x-1xf32, 384x768x1x1xf32) + # pd_op.conv2d: (-1x384x-1x-1xf32) <- (-1x768x-1x-1xf32, 384x768x1x1xf32) conv2d_64 = paddle._C_ops.conv2d( - swish_46, parameter_61, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + swish_45, parameter_481, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" ) - del parameter_61 + del parameter_481 - # pd_op.batch_norm_: (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) + # pd_op.batch_norm_: (-1x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (-1x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) ( batch_norm__366, batch_norm__367, @@ -2904,33 +3400,34 @@ def forward( ) = (lambda x, f: f(x))( paddle._C_ops.batch_norm( conv2d_64, - parameter_60, - parameter_59, - parameter_58, - parameter_57, - False, + parameter_480, + parameter_479, + parameter_478, + parameter_477, + True, float("0.9"), float("1e-05"), "NCHW", - False, + True, False, ), lambda out: out if isinstance(out, (list, tuple)) else (out, None, None, None, None, None), ) - del parameter_57, parameter_58, parameter_59, parameter_60 + del conv2d_64, parameter_477, parameter_478, parameter_479, parameter_480 - # pd_op.swish: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32) - swish_47 = paddle._C_ops.swish(batch_norm__366) + # pd_op.swish: (-1x384x-1x-1xf32) <- (-1x384x-1x-1xf32) + swish_46 = paddle._C_ops.swish(batch_norm__366) + del batch_norm__366 - # pd_op.conv2d: (2x384x-1x-1xf32) <- (2x768x-1x-1xf32, 384x768x1x1xf32) + # pd_op.conv2d: (-1x384x-1x-1xf32) <- (-1x768x-1x-1xf32, 384x768x1x1xf32) conv2d_65 = paddle._C_ops.conv2d( - swish_46, parameter_56, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + swish_45, parameter_476, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" ) - del parameter_56 + del parameter_476, swish_45 - # pd_op.batch_norm_: (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) + # pd_op.batch_norm_: (-1x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (-1x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) ( batch_norm__372, batch_norm__373, @@ -2941,33 +3438,34 @@ def forward( ) = (lambda x, f: f(x))( paddle._C_ops.batch_norm( conv2d_65, - parameter_55, - parameter_54, - parameter_53, - parameter_52, - False, + parameter_475, + parameter_474, + parameter_473, + parameter_472, + True, float("0.9"), float("1e-05"), "NCHW", - False, + True, False, ), lambda out: out if isinstance(out, (list, tuple)) else (out, None, None, None, None, None), ) - del parameter_52, parameter_53, parameter_54, parameter_55 + del conv2d_65, parameter_472, parameter_473, parameter_474, parameter_475 - # pd_op.swish: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32) - swish_48 = paddle._C_ops.swish(batch_norm__372) + # pd_op.swish: (-1x384x-1x-1xf32) <- (-1x384x-1x-1xf32) + swish_47 = paddle._C_ops.swish(batch_norm__372) + del batch_norm__372 - # pd_op.conv2d: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32, 384x384x3x3xf32) + # pd_op.conv2d: (-1x384x-1x-1xf32) <- (-1x384x-1x-1xf32, 384x384x3x3xf32) conv2d_66 = paddle._C_ops.conv2d( - swish_48, parameter_51, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + swish_47, parameter_471, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" ) - del parameter_51 + del parameter_471 - # pd_op.batch_norm_: (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) + # pd_op.batch_norm_: (-1x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (-1x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) ( batch_norm__378, batch_norm__379, @@ -2978,33 +3476,34 @@ def forward( ) = (lambda x, f: f(x))( paddle._C_ops.batch_norm( conv2d_66, - parameter_50, - parameter_49, - parameter_48, - parameter_47, - False, + parameter_470, + parameter_469, + parameter_468, + parameter_467, + True, float("0.9"), float("1e-05"), "NCHW", - False, + True, False, ), lambda out: out if isinstance(out, (list, tuple)) else (out, None, None, None, None, None), ) - del parameter_47, parameter_48, parameter_49, parameter_50 + del conv2d_66, parameter_467, parameter_468, parameter_469, parameter_470 - # pd_op.swish: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32) - swish_49 = paddle._C_ops.swish(batch_norm__378) + # pd_op.swish: (-1x384x-1x-1xf32) <- (-1x384x-1x-1xf32) + swish_48 = paddle._C_ops.swish(batch_norm__378) + del batch_norm__378 - # pd_op.conv2d: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32, 384x384x3x3xf32) + # pd_op.conv2d: (-1x384x-1x-1xf32) <- (-1x384x-1x-1xf32, 384x384x3x3xf32) conv2d_67 = paddle._C_ops.conv2d( - swish_49, parameter_46, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + swish_48, parameter_466, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" ) - del parameter_46 + del parameter_466 - # pd_op.batch_norm_: (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) + # pd_op.batch_norm_: (-1x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (-1x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) ( batch_norm__384, batch_norm__385, @@ -3015,30 +3514,30 @@ def forward( ) = (lambda x, f: f(x))( paddle._C_ops.batch_norm( conv2d_67, - parameter_45, - parameter_44, - parameter_43, - parameter_42, - False, + parameter_465, + parameter_464, + parameter_463, + parameter_462, + True, float("0.9"), float("1e-05"), "NCHW", - False, + True, False, ), lambda out: out if isinstance(out, (list, tuple)) else (out, None, None, None, None, None), ) - del parameter_42, parameter_43, parameter_44, parameter_45 + del conv2d_67, parameter_462, parameter_463, parameter_464, parameter_465 - # pd_op.conv2d: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32, 384x384x1x1xf32) + # pd_op.conv2d: (-1x384x-1x-1xf32) <- (-1x384x-1x-1xf32, 384x384x1x1xf32) conv2d_68 = paddle._C_ops.conv2d( - swish_49, parameter_41, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + swish_48, parameter_461, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" ) - del parameter_41 + del parameter_461, swish_48 - # pd_op.batch_norm_: (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) + # pd_op.batch_norm_: (-1x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (-1x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) ( batch_norm__390, batch_norm__391, @@ -3049,43 +3548,46 @@ def forward( ) = (lambda x, f: f(x))( paddle._C_ops.batch_norm( conv2d_68, - parameter_40, - parameter_39, - parameter_38, - parameter_37, - False, + parameter_460, + parameter_459, + parameter_458, + parameter_457, + True, float("0.9"), float("1e-05"), "NCHW", - False, + True, False, ), lambda out: out if isinstance(out, (list, tuple)) else (out, None, None, None, None, None), ) - del parameter_37, parameter_38, parameter_39, parameter_40 + del conv2d_68, parameter_457, parameter_458, parameter_459, parameter_460 - # pd_op.multiply: (2x384x-1x-1xf32) <- (1xf32, 2x384x-1x-1xf32) + # pd_op.multiply: (-1x384x-1x-1xf32) <- (1xf32, -1x384x-1x-1xf32) multiply_18 = paddle._C_ops.multiply(data_15, batch_norm__390) - del data_15 + del batch_norm__390, data_15 - # pd_op.add: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32, 2x384x-1x-1xf32) + # pd_op.add: (-1x384x-1x-1xf32) <- (-1x384x-1x-1xf32, -1x384x-1x-1xf32) add_33 = paddle._C_ops.add(batch_norm__384, multiply_18) + del batch_norm__384, multiply_18 - # pd_op.swish: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32) - swish_50 = paddle._C_ops.swish(add_33) + # pd_op.swish: (-1x384x-1x-1xf32) <- (-1x384x-1x-1xf32) + swish_49 = paddle._C_ops.swish(add_33) + del add_33 - # pd_op.add: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32, 2x384x-1x-1xf32) - add_34 = paddle._C_ops.add(swish_48, swish_50) + # pd_op.add: (-1x384x-1x-1xf32) <- (-1x384x-1x-1xf32, -1x384x-1x-1xf32) + add_34 = paddle._C_ops.add(swish_47, swish_49) + del swish_47, swish_49 - # pd_op.conv2d: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32, 384x384x3x3xf32) + # pd_op.conv2d: (-1x384x-1x-1xf32) <- (-1x384x-1x-1xf32, 384x384x3x3xf32) conv2d_69 = paddle._C_ops.conv2d( - add_34, parameter_36, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + add_34, parameter_456, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" ) - del parameter_36 + del parameter_456 - # pd_op.batch_norm_: (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) + # pd_op.batch_norm_: (-1x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (-1x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) ( batch_norm__396, batch_norm__397, @@ -3096,33 +3598,34 @@ def forward( ) = (lambda x, f: f(x))( paddle._C_ops.batch_norm( conv2d_69, - parameter_35, - parameter_34, - parameter_33, - parameter_32, - False, + parameter_455, + parameter_454, + parameter_453, + parameter_452, + True, float("0.9"), float("1e-05"), "NCHW", - False, + True, False, ), lambda out: out if isinstance(out, (list, tuple)) else (out, None, None, None, None, None), ) - del parameter_32, parameter_33, parameter_34, parameter_35 + del conv2d_69, parameter_452, parameter_453, parameter_454, parameter_455 - # pd_op.swish: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32) - swish_51 = paddle._C_ops.swish(batch_norm__396) + # pd_op.swish: (-1x384x-1x-1xf32) <- (-1x384x-1x-1xf32) + swish_50 = paddle._C_ops.swish(batch_norm__396) + del batch_norm__396 - # pd_op.conv2d: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32, 384x384x3x3xf32) + # pd_op.conv2d: (-1x384x-1x-1xf32) <- (-1x384x-1x-1xf32, 384x384x3x3xf32) conv2d_70 = paddle._C_ops.conv2d( - swish_51, parameter_31, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + swish_50, parameter_451, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" ) - del parameter_31 + del parameter_451 - # pd_op.batch_norm_: (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) + # pd_op.batch_norm_: (-1x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (-1x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) ( batch_norm__402, batch_norm__403, @@ -3133,30 +3636,30 @@ def forward( ) = (lambda x, f: f(x))( paddle._C_ops.batch_norm( conv2d_70, - parameter_30, - parameter_29, - parameter_28, - parameter_27, - False, + parameter_450, + parameter_449, + parameter_448, + parameter_447, + True, float("0.9"), float("1e-05"), "NCHW", - False, + True, False, ), lambda out: out if isinstance(out, (list, tuple)) else (out, None, None, None, None, None), ) - del parameter_27, parameter_28, parameter_29, parameter_30 + del conv2d_70, parameter_447, parameter_448, parameter_449, parameter_450 - # pd_op.conv2d: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32, 384x384x1x1xf32) + # pd_op.conv2d: (-1x384x-1x-1xf32) <- (-1x384x-1x-1xf32, 384x384x1x1xf32) conv2d_71 = paddle._C_ops.conv2d( - swish_51, parameter_26, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + swish_50, parameter_446, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" ) - del parameter_26 + del parameter_446, swish_50 - # pd_op.batch_norm_: (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) + # pd_op.batch_norm_: (-1x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (-1x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) ( batch_norm__408, batch_norm__409, @@ -3167,43 +3670,46 @@ def forward( ) = (lambda x, f: f(x))( paddle._C_ops.batch_norm( conv2d_71, - parameter_25, - parameter_24, - parameter_23, - parameter_22, - False, + parameter_445, + parameter_444, + parameter_443, + parameter_442, + True, float("0.9"), float("1e-05"), "NCHW", - False, + True, False, ), lambda out: out if isinstance(out, (list, tuple)) else (out, None, None, None, None, None), ) - del parameter_22, parameter_23, parameter_24, parameter_25 + del conv2d_71, parameter_442, parameter_443, parameter_444, parameter_445 - # pd_op.multiply: (2x384x-1x-1xf32) <- (1xf32, 2x384x-1x-1xf32) + # pd_op.multiply: (-1x384x-1x-1xf32) <- (1xf32, -1x384x-1x-1xf32) multiply_19 = paddle._C_ops.multiply(data_16, batch_norm__408) - del data_16 + del batch_norm__408, data_16 - # pd_op.add: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32, 2x384x-1x-1xf32) + # pd_op.add: (-1x384x-1x-1xf32) <- (-1x384x-1x-1xf32, -1x384x-1x-1xf32) add_35 = paddle._C_ops.add(batch_norm__402, multiply_19) + del batch_norm__402, multiply_19 - # pd_op.swish: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32) - swish_52 = paddle._C_ops.swish(add_35) + # pd_op.swish: (-1x384x-1x-1xf32) <- (-1x384x-1x-1xf32) + swish_51 = paddle._C_ops.swish(add_35) + del add_35 - # pd_op.add: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32, 2x384x-1x-1xf32) - add_36 = paddle._C_ops.add(add_34, swish_52) + # pd_op.add: (-1x384x-1x-1xf32) <- (-1x384x-1x-1xf32, -1x384x-1x-1xf32) + add_36 = paddle._C_ops.add(add_34, swish_51) + del add_34, swish_51 - # pd_op.conv2d: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32, 384x384x3x3xf32) + # pd_op.conv2d: (-1x384x-1x-1xf32) <- (-1x384x-1x-1xf32, 384x384x3x3xf32) conv2d_72 = paddle._C_ops.conv2d( - add_36, parameter_21, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + add_36, parameter_441, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" ) - del parameter_21 + del parameter_441 - # pd_op.batch_norm_: (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) + # pd_op.batch_norm_: (-1x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (-1x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) ( batch_norm__414, batch_norm__415, @@ -3214,33 +3720,34 @@ def forward( ) = (lambda x, f: f(x))( paddle._C_ops.batch_norm( conv2d_72, - parameter_20, - parameter_19, - parameter_18, - parameter_17, - False, + parameter_440, + parameter_439, + parameter_438, + parameter_437, + True, float("0.9"), float("1e-05"), "NCHW", - False, + True, False, ), lambda out: out if isinstance(out, (list, tuple)) else (out, None, None, None, None, None), ) - del parameter_17, parameter_18, parameter_19, parameter_20 + del conv2d_72, parameter_437, parameter_438, parameter_439, parameter_440 - # pd_op.swish: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32) - swish_53 = paddle._C_ops.swish(batch_norm__414) + # pd_op.swish: (-1x384x-1x-1xf32) <- (-1x384x-1x-1xf32) + swish_52 = paddle._C_ops.swish(batch_norm__414) + del batch_norm__414 - # pd_op.conv2d: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32, 384x384x3x3xf32) + # pd_op.conv2d: (-1x384x-1x-1xf32) <- (-1x384x-1x-1xf32, 384x384x3x3xf32) conv2d_73 = paddle._C_ops.conv2d( - swish_53, parameter_16, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + swish_52, parameter_436, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" ) - del parameter_16 + del parameter_436 - # pd_op.batch_norm_: (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) + # pd_op.batch_norm_: (-1x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (-1x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) ( batch_norm__420, batch_norm__421, @@ -3251,30 +3758,30 @@ def forward( ) = (lambda x, f: f(x))( paddle._C_ops.batch_norm( conv2d_73, - parameter_15, - parameter_14, - parameter_13, - parameter_12, - False, + parameter_435, + parameter_434, + parameter_433, + parameter_432, + True, float("0.9"), float("1e-05"), "NCHW", - False, + True, False, ), lambda out: out if isinstance(out, (list, tuple)) else (out, None, None, None, None, None), ) - del parameter_12, parameter_13, parameter_14, parameter_15 + del conv2d_73, parameter_432, parameter_433, parameter_434, parameter_435 - # pd_op.conv2d: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32, 384x384x1x1xf32) + # pd_op.conv2d: (-1x384x-1x-1xf32) <- (-1x384x-1x-1xf32, 384x384x1x1xf32) conv2d_74 = paddle._C_ops.conv2d( - swish_53, parameter_11, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + swish_52, parameter_431, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" ) - del parameter_11 + del parameter_431, swish_52 - # pd_op.batch_norm_: (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) + # pd_op.batch_norm_: (-1x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (-1x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) ( batch_norm__426, batch_norm__427, @@ -3285,75 +3792,82 @@ def forward( ) = (lambda x, f: f(x))( paddle._C_ops.batch_norm( conv2d_74, - parameter_10, - parameter_9, - parameter_8, - parameter_7, - False, + parameter_430, + parameter_429, + parameter_428, + parameter_427, + True, float("0.9"), float("1e-05"), "NCHW", - False, + True, False, ), lambda out: out if isinstance(out, (list, tuple)) else (out, None, None, None, None, None), ) - del parameter_10, parameter_7, parameter_8, parameter_9 + del conv2d_74, parameter_427, parameter_428, parameter_429, parameter_430 - # pd_op.multiply: (2x384x-1x-1xf32) <- (1xf32, 2x384x-1x-1xf32) + # pd_op.multiply: (-1x384x-1x-1xf32) <- (1xf32, -1x384x-1x-1xf32) multiply_20 = paddle._C_ops.multiply(data_17, batch_norm__426) - del data_17 + del batch_norm__426, data_17 - # pd_op.add: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32, 2x384x-1x-1xf32) + # pd_op.add: (-1x384x-1x-1xf32) <- (-1x384x-1x-1xf32, -1x384x-1x-1xf32) add_37 = paddle._C_ops.add(batch_norm__420, multiply_20) + del batch_norm__420, multiply_20 - # pd_op.swish: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32) - swish_54 = paddle._C_ops.swish(add_37) + # pd_op.swish: (-1x384x-1x-1xf32) <- (-1x384x-1x-1xf32) + swish_53 = paddle._C_ops.swish(add_37) + del add_37 - # pd_op.add: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32, 2x384x-1x-1xf32) - add_38 = paddle._C_ops.add(add_36, swish_54) + # pd_op.add: (-1x384x-1x-1xf32) <- (-1x384x-1x-1xf32, -1x384x-1x-1xf32) + add_38 = paddle._C_ops.add(add_36, swish_53) + del add_36, swish_53 - # builtin.combine: ([2x384x-1x-1xf32, 2x384x-1x-1xf32]) <- (2x384x-1x-1xf32, 2x384x-1x-1xf32) - combine_3 = [swish_47, add_38] + # builtin.combine: ([-1x384x-1x-1xf32, -1x384x-1x-1xf32]) <- (-1x384x-1x-1xf32, -1x384x-1x-1xf32) + combine_3 = [swish_46, add_38] + del add_38, swish_46 - # pd_op.concat: (2x768x-1x-1xf32) <- ([2x384x-1x-1xf32, 2x384x-1x-1xf32], 1xi32) - concat_3 = paddle._C_ops.concat(combine_3, full_0) + # pd_op.concat: (-1x768x-1x-1xf32) <- ([-1x384x-1x-1xf32, -1x384x-1x-1xf32], 1xi32) + concat_5 = paddle._C_ops.concat(combine_3, full_0) del combine_3 - # pd_op.mean: (2x768x1x1xf32) <- (2x768x-1x-1xf32, 2xi64) - mean_3 = paddle._C_ops.mean(concat_3, full_int_array_0, True) + # pd_op.mean: (-1x768x1x1xf32) <- (-1x768x-1x-1xf32, 2xi64) + mean_3 = paddle._C_ops.mean(concat_5, full_int_array_0, True) + del full_int_array_0 - # pd_op.conv2d: (2x768x1x1xf32) <- (2x768x1x1xf32, 768x768x1x1xf32) + # pd_op.conv2d: (-1x768x1x1xf32) <- (-1x768x1x1xf32, 768x768x1x1xf32) conv2d_75 = paddle._C_ops.conv2d( - mean_3, parameter_6, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + mean_3, parameter_426, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" ) - del parameter_6 + del mean_3, parameter_426 # pd_op.reshape: (1x768x1x1xf32) <- (768xf32, 4xi64) - reshape_3 = paddle._C_ops.reshape(parameter_5, full_int_array_1) - del full_int_array_1, parameter_5 + reshape_3 = paddle._C_ops.reshape(parameter_425, full_int_array_1) + del parameter_425 - # pd_op.add: (2x768x1x1xf32) <- (2x768x1x1xf32, 1x768x1x1xf32) + # pd_op.add: (-1x768x1x1xf32) <- (-1x768x1x1xf32, 1x768x1x1xf32) add_39 = paddle._C_ops.add(conv2d_75, reshape_3) + del conv2d_75, reshape_3 - # pd_op.hardsigmoid: (2x768x1x1xf32) <- (2x768x1x1xf32) + # pd_op.hardsigmoid: (-1x768x1x1xf32) <- (-1x768x1x1xf32) hardsigmoid_3 = paddle._C_ops.hardsigmoid( add_39, float("0.166667"), float("0.5") ) del add_39 - # pd_op.multiply: (2x768x-1x-1xf32) <- (2x768x-1x-1xf32, 2x768x1x1xf32) - multiply_21 = paddle._C_ops.multiply(concat_3, hardsigmoid_3) + # pd_op.multiply: (-1x768x-1x-1xf32) <- (-1x768x-1x-1xf32, -1x768x1x1xf32) + multiply_21 = paddle._C_ops.multiply(concat_5, hardsigmoid_3) + del concat_5, hardsigmoid_3 - # pd_op.conv2d: (2x1024x-1x-1xf32) <- (2x768x-1x-1xf32, 1024x768x1x1xf32) + # pd_op.conv2d: (-1x1024x-1x-1xf32) <- (-1x768x-1x-1xf32, 1024x768x1x1xf32) conv2d_76 = paddle._C_ops.conv2d( - multiply_21, parameter_4, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + multiply_21, parameter_424, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" ) - del parameter_4 + del multiply_21, parameter_424 - # pd_op.batch_norm_: (2x1024x-1x-1xf32, 1024xf32, 1024xf32, 1024xf32, 1024xf32, -1xui8) <- (2x1024x-1x-1xf32, 1024xf32, 1024xf32, 1024xf32, 1024xf32) + # pd_op.batch_norm_: (-1x1024x-1x-1xf32, 1024xf32, 1024xf32, 1024xf32, 1024xf32, -1xui8) <- (-1x1024x-1x-1xf32, 1024xf32, 1024xf32, 1024xf32, 1024xf32) ( batch_norm__432, batch_norm__433, @@ -3364,677 +3878,4358 @@ def forward( ) = (lambda x, f: f(x))( paddle._C_ops.batch_norm( conv2d_76, - parameter_3, - parameter_2, - parameter_1, - parameter_0, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_0, parameter_1, parameter_2, parameter_3 - - # pd_op.swish: (2x1024x-1x-1xf32) <- (2x1024x-1x-1xf32) - swish_0 = paddle._C_ops.swish(batch_norm__432) - del ( - add_0, - add_1, - add_10, - add_11, - add_12, - add_13, - add_14, - add_15, - add_16, - add_17, - add_18, - add_2, - add_20, - add_21, - add_22, - add_23, - add_24, - add_25, - add_26, - add_27, - add_28, - add_29, - add_3, - add_30, - add_31, - add_33, - add_34, - add_35, - add_36, - add_37, - add_38, - add_4, - add_5, - add_7, - add_8, - add_9, - assign_0, - assign_1, - assign_2, - assign_3, - assign_4, - assign_5, - batch_norm__0, - batch_norm__1, - batch_norm__10, - batch_norm__100, - batch_norm__101, - batch_norm__102, - batch_norm__103, - batch_norm__104, - batch_norm__105, - batch_norm__106, - batch_norm__107, - batch_norm__108, - batch_norm__109, - batch_norm__11, - batch_norm__110, - batch_norm__111, - batch_norm__112, - batch_norm__113, - batch_norm__114, - batch_norm__115, - batch_norm__116, - batch_norm__117, - batch_norm__118, - batch_norm__119, - batch_norm__12, - batch_norm__120, - batch_norm__121, - batch_norm__122, - batch_norm__123, - batch_norm__124, - batch_norm__125, - batch_norm__126, - batch_norm__127, - batch_norm__128, - batch_norm__129, - batch_norm__13, - batch_norm__130, - batch_norm__131, - batch_norm__132, - batch_norm__133, - batch_norm__134, - batch_norm__135, - batch_norm__136, - batch_norm__137, - batch_norm__138, - batch_norm__139, - batch_norm__14, - batch_norm__140, - batch_norm__141, - batch_norm__142, - batch_norm__143, - batch_norm__144, - batch_norm__145, - batch_norm__146, - batch_norm__147, - batch_norm__148, - batch_norm__149, - batch_norm__15, - batch_norm__150, - batch_norm__151, - batch_norm__152, - batch_norm__153, - batch_norm__154, - batch_norm__155, - batch_norm__156, - batch_norm__157, - batch_norm__158, - batch_norm__159, - batch_norm__16, - batch_norm__160, - batch_norm__161, - batch_norm__162, - batch_norm__163, - batch_norm__164, - batch_norm__165, - batch_norm__166, - batch_norm__167, - batch_norm__168, - batch_norm__169, - batch_norm__17, - batch_norm__170, - batch_norm__171, - batch_norm__172, - batch_norm__173, - batch_norm__174, - batch_norm__175, - batch_norm__176, - batch_norm__177, - batch_norm__178, - batch_norm__179, - batch_norm__18, - batch_norm__180, - batch_norm__181, - batch_norm__182, - batch_norm__183, - batch_norm__184, - batch_norm__185, - batch_norm__186, - batch_norm__187, - batch_norm__188, - batch_norm__189, - batch_norm__19, - batch_norm__190, - batch_norm__191, - batch_norm__192, - batch_norm__193, - batch_norm__194, - batch_norm__195, - batch_norm__196, - batch_norm__197, - batch_norm__198, - batch_norm__199, - batch_norm__2, - batch_norm__20, - batch_norm__200, - batch_norm__201, - batch_norm__202, - batch_norm__203, - batch_norm__204, - batch_norm__205, - batch_norm__206, - batch_norm__207, - batch_norm__208, - batch_norm__209, - batch_norm__21, - batch_norm__210, - batch_norm__211, - batch_norm__212, - batch_norm__213, - batch_norm__214, - batch_norm__215, - batch_norm__216, - batch_norm__217, - batch_norm__218, - batch_norm__219, - batch_norm__22, - batch_norm__220, - batch_norm__221, - batch_norm__222, - batch_norm__223, - batch_norm__224, - batch_norm__225, - batch_norm__226, - batch_norm__227, - batch_norm__228, - batch_norm__229, - batch_norm__23, - batch_norm__230, - batch_norm__231, - batch_norm__232, - batch_norm__233, - batch_norm__234, - batch_norm__235, - batch_norm__236, - batch_norm__237, - batch_norm__238, - batch_norm__239, - batch_norm__24, - batch_norm__240, - batch_norm__241, - batch_norm__242, - batch_norm__243, - batch_norm__244, - batch_norm__245, - batch_norm__246, - batch_norm__247, - batch_norm__248, - batch_norm__249, - batch_norm__25, - batch_norm__250, - batch_norm__251, - batch_norm__252, - batch_norm__253, - batch_norm__254, - batch_norm__255, - batch_norm__256, - batch_norm__257, - batch_norm__258, - batch_norm__259, - batch_norm__26, - batch_norm__260, - batch_norm__261, - batch_norm__262, - batch_norm__263, - batch_norm__264, - batch_norm__265, - batch_norm__266, - batch_norm__267, - batch_norm__268, - batch_norm__269, - batch_norm__27, - batch_norm__270, - batch_norm__271, - batch_norm__272, - batch_norm__273, - batch_norm__274, - batch_norm__275, - batch_norm__276, - batch_norm__277, - batch_norm__278, - batch_norm__279, - batch_norm__28, - batch_norm__280, - batch_norm__281, - batch_norm__282, - batch_norm__283, - batch_norm__284, - batch_norm__285, - batch_norm__286, - batch_norm__287, - batch_norm__288, - batch_norm__289, - batch_norm__29, - batch_norm__290, - batch_norm__291, - batch_norm__292, - batch_norm__293, - batch_norm__294, - batch_norm__295, - batch_norm__296, - batch_norm__297, - batch_norm__298, - batch_norm__299, - batch_norm__3, - batch_norm__30, - batch_norm__300, - batch_norm__301, - batch_norm__302, - batch_norm__303, - batch_norm__304, - batch_norm__305, - batch_norm__306, - batch_norm__307, - batch_norm__308, - batch_norm__309, - batch_norm__31, - batch_norm__310, - batch_norm__311, - batch_norm__312, - batch_norm__313, - batch_norm__314, - batch_norm__315, - batch_norm__316, - batch_norm__317, - batch_norm__318, - batch_norm__319, - batch_norm__32, - batch_norm__320, - batch_norm__321, - batch_norm__322, - batch_norm__323, - batch_norm__324, - batch_norm__325, - batch_norm__326, - batch_norm__327, - batch_norm__328, - batch_norm__329, - batch_norm__33, - batch_norm__330, - batch_norm__331, - batch_norm__332, - batch_norm__333, - batch_norm__334, - batch_norm__335, - batch_norm__336, - batch_norm__337, - batch_norm__338, - batch_norm__339, - batch_norm__34, - batch_norm__340, - batch_norm__341, - batch_norm__342, - batch_norm__343, - batch_norm__344, - batch_norm__345, - batch_norm__346, - batch_norm__347, - batch_norm__348, - batch_norm__349, - batch_norm__35, - batch_norm__350, - batch_norm__351, - batch_norm__352, - batch_norm__353, - batch_norm__354, - batch_norm__355, - batch_norm__356, - batch_norm__357, - batch_norm__358, - batch_norm__359, - batch_norm__36, - batch_norm__360, - batch_norm__361, - batch_norm__362, - batch_norm__363, - batch_norm__364, - batch_norm__365, - batch_norm__366, - batch_norm__367, - batch_norm__368, - batch_norm__369, - batch_norm__37, - batch_norm__370, - batch_norm__371, - batch_norm__372, - batch_norm__373, - batch_norm__374, - batch_norm__375, - batch_norm__376, - batch_norm__377, - batch_norm__378, - batch_norm__379, - batch_norm__38, - batch_norm__380, - batch_norm__381, - batch_norm__382, - batch_norm__383, - batch_norm__384, - batch_norm__385, - batch_norm__386, - batch_norm__387, - batch_norm__388, - batch_norm__389, - batch_norm__39, - batch_norm__390, - batch_norm__391, - batch_norm__392, - batch_norm__393, - batch_norm__394, - batch_norm__395, - batch_norm__396, - batch_norm__397, - batch_norm__398, - batch_norm__399, - batch_norm__4, - batch_norm__40, - batch_norm__400, - batch_norm__401, - batch_norm__402, - batch_norm__403, - batch_norm__404, - batch_norm__405, - batch_norm__406, - batch_norm__407, - batch_norm__408, - batch_norm__409, - batch_norm__41, - batch_norm__410, - batch_norm__411, - batch_norm__412, - batch_norm__413, - batch_norm__414, - batch_norm__415, - batch_norm__416, - batch_norm__417, - batch_norm__418, - batch_norm__419, - batch_norm__42, - batch_norm__420, - batch_norm__421, - batch_norm__422, - batch_norm__423, - batch_norm__424, - batch_norm__425, - batch_norm__426, - batch_norm__427, - batch_norm__428, - batch_norm__429, - batch_norm__43, - batch_norm__430, - batch_norm__431, - batch_norm__432, - batch_norm__433, - batch_norm__434, - batch_norm__435, - batch_norm__436, - batch_norm__437, - batch_norm__44, - batch_norm__45, - batch_norm__46, - batch_norm__47, - batch_norm__48, - batch_norm__49, - batch_norm__5, - batch_norm__50, - batch_norm__51, - batch_norm__52, - batch_norm__53, - batch_norm__54, - batch_norm__55, - batch_norm__56, - batch_norm__57, - batch_norm__58, - batch_norm__59, - batch_norm__6, - batch_norm__60, - batch_norm__61, - batch_norm__62, - batch_norm__63, - batch_norm__64, - batch_norm__65, - batch_norm__66, - batch_norm__67, - batch_norm__68, - batch_norm__69, - batch_norm__7, - batch_norm__70, - batch_norm__71, - batch_norm__72, - batch_norm__73, - batch_norm__74, - batch_norm__75, - batch_norm__76, - batch_norm__77, - batch_norm__78, - batch_norm__79, - batch_norm__8, - batch_norm__80, - batch_norm__81, - batch_norm__82, - batch_norm__83, - batch_norm__84, - batch_norm__85, - batch_norm__86, - batch_norm__87, - batch_norm__88, - batch_norm__89, - batch_norm__9, - batch_norm__90, - batch_norm__91, - batch_norm__92, - batch_norm__93, - batch_norm__94, - batch_norm__95, - batch_norm__96, - batch_norm__97, - batch_norm__98, - batch_norm__99, - concat_0, - concat_1, - concat_2, - concat_3, - conv2d_0, - conv2d_1, - conv2d_10, - conv2d_11, - conv2d_12, - conv2d_13, - conv2d_14, - conv2d_15, - conv2d_16, - conv2d_17, - conv2d_18, - conv2d_19, - conv2d_2, - conv2d_20, - conv2d_21, - conv2d_22, - conv2d_23, - conv2d_24, - conv2d_25, - conv2d_26, - conv2d_27, - conv2d_28, - conv2d_29, - conv2d_3, - conv2d_30, - conv2d_31, - conv2d_32, - conv2d_33, - conv2d_34, - conv2d_35, - conv2d_36, - conv2d_37, - conv2d_38, - conv2d_39, - conv2d_4, - conv2d_40, - conv2d_41, - conv2d_42, - conv2d_43, - conv2d_44, - conv2d_45, - conv2d_46, - conv2d_47, - conv2d_48, - conv2d_49, - conv2d_5, - conv2d_50, - conv2d_51, - conv2d_52, - conv2d_53, - conv2d_54, - conv2d_55, - conv2d_56, - conv2d_57, - conv2d_58, - conv2d_59, - conv2d_6, - conv2d_60, - conv2d_61, - conv2d_62, - conv2d_63, - conv2d_64, - conv2d_65, - conv2d_66, - conv2d_67, - conv2d_68, - conv2d_69, - conv2d_7, - conv2d_70, - conv2d_71, - conv2d_72, - conv2d_73, - conv2d_74, - conv2d_75, - conv2d_76, - conv2d_8, - conv2d_9, - full_0, - full_int_array_0, - hardsigmoid_0, - hardsigmoid_1, - hardsigmoid_2, - hardsigmoid_3, - mean_0, - mean_1, - mean_2, - mean_3, - multiply_0, - multiply_1, - multiply_10, - multiply_11, - multiply_12, - multiply_13, - multiply_14, - multiply_15, - multiply_16, - multiply_17, - multiply_18, - multiply_19, - multiply_2, - multiply_20, - multiply_21, - multiply_3, - multiply_4, - multiply_5, - multiply_6, - multiply_7, - multiply_8, - multiply_9, - reshape_0, - reshape_1, - reshape_2, - reshape_3, - swish_1, - swish_10, - swish_11, - swish_12, - swish_13, - swish_14, - swish_15, - swish_16, - swish_17, - swish_18, - swish_19, - swish_2, - swish_20, - swish_21, - swish_22, - swish_23, - swish_24, - swish_25, - swish_26, - swish_27, - swish_28, - swish_29, - swish_3, - swish_30, - swish_31, - swish_32, - swish_33, - swish_34, - swish_35, - swish_36, - swish_37, - swish_38, - swish_39, - swish_4, - swish_40, - swish_41, - swish_42, - swish_43, - swish_44, - swish_45, - swish_46, - swish_47, - swish_48, - swish_49, - swish_5, - swish_50, - swish_51, - swish_52, - swish_53, - swish_54, - swish_6, - swish_7, - swish_8, - swish_9, - ) - - return swish_0 + parameter_423, + parameter_422, + parameter_421, + parameter_420, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_76, parameter_420, parameter_421, parameter_422, parameter_423 + + # pd_op.swish: (-1x1024x-1x-1xf32) <- (-1x1024x-1x-1xf32) + swish_54 = paddle._C_ops.swish(batch_norm__432) + del batch_norm__432 + + # pd_op.shape64: (4xi64) <- (-1x1024x-1x-1xf32) + shape64_0 = paddle._C_ops.shape64(swish_54) + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_2 = [0] + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_3 = [1] + + # pd_op.slice: (xi64) <- (4xi64, 1xi64, 1xi64) + slice_0 = paddle._C_ops.slice( + shape64_0, [0], full_int_array_2, full_int_array_3, [1], [0] + ) + del shape64_0 + + # pd_op.shape64: (4xi64) <- (-1x1024x-1x-1xf32) + shape64_1 = paddle._C_ops.shape64(swish_54) + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_4 = [2] + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_5 = [3] + + # pd_op.slice: (xi64) <- (4xi64, 1xi64, 1xi64) + slice_1 = paddle._C_ops.slice( + shape64_1, [0], full_int_array_4, full_int_array_5, [1], [0] + ) + del shape64_1 + + # pd_op.shape64: (4xi64) <- (-1x1024x-1x-1xf32) + shape64_2 = paddle._C_ops.shape64(swish_54) + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_6 = [4] + + # pd_op.slice: (xi64) <- (4xi64, 1xi64, 1xi64) + slice_2 = paddle._C_ops.slice( + shape64_2, [0], full_int_array_5, full_int_array_6, [1], [0] + ) + del shape64_2 + + # pd_op.flatten: (-1x1024x-1xf32) <- (-1x1024x-1x-1xf32) + flatten_0 = paddle._C_ops.flatten(swish_54, 2, 3) + del swish_54 + + # pd_op.transpose: (-1x-1x1024xf32) <- (-1x1024x-1xf32) + transpose_0 = paddle._C_ops.transpose(flatten_0, [0, 2, 1]) + del flatten_0 + + # pd_op.add: (-1x400x1024xf32) <- (-1x-1x1024xf32, 1x400x1024xf32) + add_40 = paddle._C_ops.add(transpose_0, data_27) + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_7 = [1024] + + # pd_op.slice: (1024x1024xf32) <- (1024x3072xf32, 1xi64, 1xi64) + slice_3 = paddle._C_ops.slice( + data_18, [1], full_int_array_2, full_int_array_7, [1], [] + ) + + # pd_op.slice: (1024xf32) <- (3072xf32, 1xi64, 1xi64) + slice_4 = paddle._C_ops.slice( + data_19, [0], full_int_array_2, full_int_array_7, [1], [] + ) + + # pd_op.matmul: (-1x400x1024xf32) <- (-1x400x1024xf32, 1024x1024xf32) + matmul_0 = paddle._C_ops.matmul(add_40, slice_3, False, False) + del slice_3 + + # pd_op.add: (-1x400x1024xf32) <- (-1x400x1024xf32, 1024xf32) + add_41 = paddle._C_ops.add(matmul_0, slice_4) + del matmul_0, slice_4 + + # pd_op.full_int_array: (4xi64) <- () + full_int_array_8 = [0, 0, 4, 256] + + # pd_op.reshape: (-1x400x4x256xf32) <- (-1x400x1024xf32, 4xi64) + reshape_4 = paddle._C_ops.reshape(add_41, full_int_array_8) + del add_41 + + # pd_op.transpose: (-1x4x400x256xf32) <- (-1x400x4x256xf32) + transpose_1 = paddle._C_ops.transpose(reshape_4, [0, 2, 1, 3]) + del reshape_4 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_9 = [2048] + + # pd_op.slice: (1024x1024xf32) <- (1024x3072xf32, 1xi64, 1xi64) + slice_5 = paddle._C_ops.slice( + data_18, [1], full_int_array_7, full_int_array_9, [1], [] + ) + + # pd_op.slice: (1024xf32) <- (3072xf32, 1xi64, 1xi64) + slice_6 = paddle._C_ops.slice( + data_19, [0], full_int_array_7, full_int_array_9, [1], [] + ) + + # pd_op.matmul: (-1x400x1024xf32) <- (-1x400x1024xf32, 1024x1024xf32) + matmul_1 = paddle._C_ops.matmul(add_40, slice_5, False, False) + del add_40, slice_5 + + # pd_op.add: (-1x400x1024xf32) <- (-1x400x1024xf32, 1024xf32) + add_42 = paddle._C_ops.add(matmul_1, slice_6) + del matmul_1, slice_6 + + # pd_op.reshape: (-1x400x4x256xf32) <- (-1x400x1024xf32, 4xi64) + reshape_5 = paddle._C_ops.reshape(add_42, full_int_array_8) + del add_42 + + # pd_op.transpose: (-1x4x400x256xf32) <- (-1x400x4x256xf32) + transpose_2 = paddle._C_ops.transpose(reshape_5, [0, 2, 1, 3]) + del reshape_5 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_10 = [2147483647] + + # pd_op.slice: (1024x1024xf32) <- (1024x3072xf32, 1xi64, 1xi64) + slice_7 = paddle._C_ops.slice( + data_18, [1], full_int_array_9, full_int_array_10, [1], [] + ) + del data_18 + + # pd_op.slice: (1024xf32) <- (3072xf32, 1xi64, 1xi64) + slice_8 = paddle._C_ops.slice( + data_19, [0], full_int_array_9, full_int_array_10, [1], [] + ) + del data_19 + + # pd_op.matmul: (-1x-1x1024xf32) <- (-1x-1x1024xf32, 1024x1024xf32) + matmul_2 = paddle._C_ops.matmul(transpose_0, slice_7, False, False) + del slice_7 + + # pd_op.add: (-1x-1x1024xf32) <- (-1x-1x1024xf32, 1024xf32) + add_43 = paddle._C_ops.add(matmul_2, slice_8) + del matmul_2, slice_8 + + # pd_op.reshape: (-1x-1x4x256xf32) <- (-1x-1x1024xf32, 4xi64) + reshape_6 = paddle._C_ops.reshape(add_43, full_int_array_8) + del add_43 + + # pd_op.transpose: (-1x4x-1x256xf32) <- (-1x-1x4x256xf32) + transpose_3 = paddle._C_ops.transpose(reshape_6, [0, 2, 1, 3]) + del reshape_6 + + # pd_op.matmul: (-1x4x400x400xf32) <- (-1x4x400x256xf32, -1x4x400x256xf32) + matmul_3 = paddle._C_ops.matmul(transpose_1, transpose_2, False, True) + del transpose_1, transpose_2 + + # pd_op.full: (1xf32) <- () + full_1 = paddle._C_ops.full( + [1], float("0.0625"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.scale: (-1x4x400x400xf32) <- (-1x4x400x400xf32, 1xf32) + scale_0 = paddle._C_ops.scale(matmul_3, full_1, float("0"), True) + del matmul_3 + + # pd_op.softmax: (-1x4x400x400xf32) <- (-1x4x400x400xf32) + softmax_0 = paddle._C_ops.softmax(scale_0, -1) + del scale_0 + + # pd_op.full: (1xf32) <- () + full_2 = paddle._C_ops.full( + [1], float("0.1"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.dropout: (-1x4x400x400xf32, -1x4x400x400xui8) <- (-1x4x400x400xf32, None, 1xf32) + dropout_0, dropout_1 = (lambda x, f: f(x))( + paddle._C_ops.dropout( + softmax_0, None, full_2, True, "upscale_in_train", 0, False + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None), + ) + del softmax_0 + + # pd_op.matmul: (-1x4x400x256xf32) <- (-1x4x400x400xf32, -1x4x-1x256xf32) + matmul_4 = paddle._C_ops.matmul(dropout_0, transpose_3, False, False) + del dropout_0, transpose_3 + + # pd_op.transpose: (-1x400x4x256xf32) <- (-1x4x400x256xf32) + transpose_4 = paddle._C_ops.transpose(matmul_4, [0, 2, 1, 3]) + del matmul_4 + + # pd_op.shape64: (4xi64) <- (-1x400x4x256xf32) + shape64_3 = paddle._C_ops.shape64(transpose_4) + + # pd_op.slice: (xi64) <- (4xi64, 1xi64, 1xi64) + slice_9 = paddle._C_ops.slice( + shape64_3, [0], full_int_array_2, full_int_array_3, [1], [0] + ) + del shape64_3 + + # pd_op.full_int_array: (3xi64) <- () + full_int_array_11 = [0, 0, 1024] + + # pd_op.reshape: (-1x400x1024xf32) <- (-1x400x4x256xf32, 3xi64) + reshape_7 = paddle._C_ops.reshape(transpose_4, full_int_array_11) + del transpose_4 + + # pd_op.matmul: (-1x400x1024xf32) <- (-1x400x1024xf32, 1024x1024xf32) + matmul_5 = paddle._C_ops.matmul(reshape_7, parameter_419, False, False) + del parameter_419, reshape_7 + + # pd_op.add: (-1x400x1024xf32) <- (-1x400x1024xf32, 1024xf32) + add_44 = paddle._C_ops.add(matmul_5, parameter_418) + del matmul_5, parameter_418 + + # pd_op.dropout: (-1x400x1024xf32, -1x400x1024xui8) <- (-1x400x1024xf32, None, 1xf32) + dropout_2, dropout_3 = (lambda x, f: f(x))( + paddle._C_ops.dropout( + add_44, None, full_2, True, "upscale_in_train", 0, False + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None), + ) + del add_44 + + # pd_op.add: (-1x400x1024xf32) <- (-1x-1x1024xf32, -1x400x1024xf32) + add_45 = paddle._C_ops.add(transpose_0, dropout_2) + del dropout_2, transpose_0 + + # pd_op.layer_norm: (-1x400x1024xf32, -1x400xf32, -1x400xf32) <- (-1x400x1024xf32, 1024xf32, 1024xf32) + layer_norm_0, layer_norm_1, layer_norm_2 = (lambda x, f: f(x))( + paddle._C_ops.layer_norm( + add_45, parameter_417, parameter_416, float("1e-05"), 2 + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None, None), + ) + del add_45, parameter_416, parameter_417 + + # pd_op.matmul: (-1x400x2048xf32) <- (-1x400x1024xf32, 1024x2048xf32) + matmul_6 = paddle._C_ops.matmul(layer_norm_0, parameter_415, False, False) + del parameter_415 + + # pd_op.add: (-1x400x2048xf32) <- (-1x400x2048xf32, 2048xf32) + add_46 = paddle._C_ops.add(matmul_6, parameter_414) + del matmul_6, parameter_414 + + # pd_op.gelu: (-1x400x2048xf32) <- (-1x400x2048xf32) + gelu_0 = paddle._C_ops.gelu(add_46, False) + del add_46 + + # pd_op.dropout: (-1x400x2048xf32, -1x400x2048xui8) <- (-1x400x2048xf32, None, 1xf32) + dropout_4, dropout_5 = (lambda x, f: f(x))( + paddle._C_ops.dropout( + gelu_0, None, full_2, True, "upscale_in_train", 0, False + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None), + ) + del gelu_0 + + # pd_op.matmul: (-1x400x1024xf32) <- (-1x400x2048xf32, 2048x1024xf32) + matmul_7 = paddle._C_ops.matmul(dropout_4, parameter_413, False, False) + del dropout_4, parameter_413 + + # pd_op.add: (-1x400x1024xf32) <- (-1x400x1024xf32, 1024xf32) + add_47 = paddle._C_ops.add(matmul_7, parameter_412) + del matmul_7, parameter_412 + + # pd_op.dropout: (-1x400x1024xf32, -1x400x1024xui8) <- (-1x400x1024xf32, None, 1xf32) + dropout_6, dropout_7 = (lambda x, f: f(x))( + paddle._C_ops.dropout( + add_47, None, full_2, True, "upscale_in_train", 0, False + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None), + ) + del add_47 + + # pd_op.add: (-1x400x1024xf32) <- (-1x400x1024xf32, -1x400x1024xf32) + add_48 = paddle._C_ops.add(layer_norm_0, dropout_6) + del dropout_6, layer_norm_0 + + # pd_op.layer_norm: (-1x400x1024xf32, -1x400xf32, -1x400xf32) <- (-1x400x1024xf32, 1024xf32, 1024xf32) + layer_norm_3, layer_norm_4, layer_norm_5 = (lambda x, f: f(x))( + paddle._C_ops.layer_norm( + add_48, parameter_411, parameter_410, float("1e-05"), 2 + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None, None), + ) + del add_48, parameter_410, parameter_411 + + # pd_op.add: (-1x400x1024xf32) <- (-1x400x1024xf32, 1x400x1024xf32) + add_49 = paddle._C_ops.add(layer_norm_3, data_27) + + # pd_op.slice: (1024x1024xf32) <- (1024x3072xf32, 1xi64, 1xi64) + slice_10 = paddle._C_ops.slice( + data_20, [1], full_int_array_2, full_int_array_7, [1], [] + ) + + # pd_op.slice: (1024xf32) <- (3072xf32, 1xi64, 1xi64) + slice_11 = paddle._C_ops.slice( + data_21, [0], full_int_array_2, full_int_array_7, [1], [] + ) + + # pd_op.matmul: (-1x400x1024xf32) <- (-1x400x1024xf32, 1024x1024xf32) + matmul_8 = paddle._C_ops.matmul(add_49, slice_10, False, False) + del slice_10 + + # pd_op.add: (-1x400x1024xf32) <- (-1x400x1024xf32, 1024xf32) + add_50 = paddle._C_ops.add(matmul_8, slice_11) + del matmul_8, slice_11 + + # pd_op.reshape: (-1x400x4x256xf32) <- (-1x400x1024xf32, 4xi64) + reshape_8 = paddle._C_ops.reshape(add_50, full_int_array_8) + del add_50 + + # pd_op.transpose: (-1x4x400x256xf32) <- (-1x400x4x256xf32) + transpose_5 = paddle._C_ops.transpose(reshape_8, [0, 2, 1, 3]) + del reshape_8 + + # pd_op.slice: (1024x1024xf32) <- (1024x3072xf32, 1xi64, 1xi64) + slice_12 = paddle._C_ops.slice( + data_20, [1], full_int_array_7, full_int_array_9, [1], [] + ) + + # pd_op.slice: (1024xf32) <- (3072xf32, 1xi64, 1xi64) + slice_13 = paddle._C_ops.slice( + data_21, [0], full_int_array_7, full_int_array_9, [1], [] + ) + + # pd_op.matmul: (-1x400x1024xf32) <- (-1x400x1024xf32, 1024x1024xf32) + matmul_9 = paddle._C_ops.matmul(add_49, slice_12, False, False) + del add_49, slice_12 + + # pd_op.add: (-1x400x1024xf32) <- (-1x400x1024xf32, 1024xf32) + add_51 = paddle._C_ops.add(matmul_9, slice_13) + del matmul_9, slice_13 + + # pd_op.reshape: (-1x400x4x256xf32) <- (-1x400x1024xf32, 4xi64) + reshape_9 = paddle._C_ops.reshape(add_51, full_int_array_8) + del add_51 + + # pd_op.transpose: (-1x4x400x256xf32) <- (-1x400x4x256xf32) + transpose_6 = paddle._C_ops.transpose(reshape_9, [0, 2, 1, 3]) + del reshape_9 + + # pd_op.slice: (1024x1024xf32) <- (1024x3072xf32, 1xi64, 1xi64) + slice_14 = paddle._C_ops.slice( + data_20, [1], full_int_array_9, full_int_array_10, [1], [] + ) + del data_20 + + # pd_op.slice: (1024xf32) <- (3072xf32, 1xi64, 1xi64) + slice_15 = paddle._C_ops.slice( + data_21, [0], full_int_array_9, full_int_array_10, [1], [] + ) + del data_21 + + # pd_op.matmul: (-1x400x1024xf32) <- (-1x400x1024xf32, 1024x1024xf32) + matmul_10 = paddle._C_ops.matmul(layer_norm_3, slice_14, False, False) + del slice_14 + + # pd_op.add: (-1x400x1024xf32) <- (-1x400x1024xf32, 1024xf32) + add_52 = paddle._C_ops.add(matmul_10, slice_15) + del matmul_10, slice_15 + + # pd_op.reshape: (-1x400x4x256xf32) <- (-1x400x1024xf32, 4xi64) + reshape_10 = paddle._C_ops.reshape(add_52, full_int_array_8) + del add_52 + + # pd_op.transpose: (-1x4x400x256xf32) <- (-1x400x4x256xf32) + transpose_7 = paddle._C_ops.transpose(reshape_10, [0, 2, 1, 3]) + del reshape_10 + + # pd_op.matmul: (-1x4x400x400xf32) <- (-1x4x400x256xf32, -1x4x400x256xf32) + matmul_11 = paddle._C_ops.matmul(transpose_5, transpose_6, False, True) + del transpose_5, transpose_6 + + # pd_op.scale: (-1x4x400x400xf32) <- (-1x4x400x400xf32, 1xf32) + scale_1 = paddle._C_ops.scale(matmul_11, full_1, float("0"), True) + del matmul_11 + + # pd_op.softmax: (-1x4x400x400xf32) <- (-1x4x400x400xf32) + softmax_1 = paddle._C_ops.softmax(scale_1, -1) + del scale_1 + + # pd_op.dropout: (-1x4x400x400xf32, -1x4x400x400xui8) <- (-1x4x400x400xf32, None, 1xf32) + dropout_8, dropout_9 = (lambda x, f: f(x))( + paddle._C_ops.dropout( + softmax_1, None, full_2, True, "upscale_in_train", 0, False + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None), + ) + del softmax_1 + + # pd_op.matmul: (-1x4x400x256xf32) <- (-1x4x400x400xf32, -1x4x400x256xf32) + matmul_12 = paddle._C_ops.matmul(dropout_8, transpose_7, False, False) + del dropout_8, transpose_7 + + # pd_op.transpose: (-1x400x4x256xf32) <- (-1x4x400x256xf32) + transpose_8 = paddle._C_ops.transpose(matmul_12, [0, 2, 1, 3]) + del matmul_12 + + # pd_op.shape64: (4xi64) <- (-1x400x4x256xf32) + shape64_4 = paddle._C_ops.shape64(transpose_8) + + # pd_op.slice: (xi64) <- (4xi64, 1xi64, 1xi64) + slice_16 = paddle._C_ops.slice( + shape64_4, [0], full_int_array_2, full_int_array_3, [1], [0] + ) + del shape64_4 + + # pd_op.reshape: (-1x400x1024xf32) <- (-1x400x4x256xf32, 3xi64) + reshape_11 = paddle._C_ops.reshape(transpose_8, full_int_array_11) + del transpose_8 + + # pd_op.matmul: (-1x400x1024xf32) <- (-1x400x1024xf32, 1024x1024xf32) + matmul_13 = paddle._C_ops.matmul(reshape_11, parameter_409, False, False) + del parameter_409, reshape_11 + + # pd_op.add: (-1x400x1024xf32) <- (-1x400x1024xf32, 1024xf32) + add_53 = paddle._C_ops.add(matmul_13, parameter_408) + del matmul_13, parameter_408 + + # pd_op.dropout: (-1x400x1024xf32, -1x400x1024xui8) <- (-1x400x1024xf32, None, 1xf32) + dropout_10, dropout_11 = (lambda x, f: f(x))( + paddle._C_ops.dropout( + add_53, None, full_2, True, "upscale_in_train", 0, False + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None), + ) + del add_53 + + # pd_op.add: (-1x400x1024xf32) <- (-1x400x1024xf32, -1x400x1024xf32) + add_54 = paddle._C_ops.add(layer_norm_3, dropout_10) + del dropout_10, layer_norm_3 + + # pd_op.layer_norm: (-1x400x1024xf32, -1x400xf32, -1x400xf32) <- (-1x400x1024xf32, 1024xf32, 1024xf32) + layer_norm_6, layer_norm_7, layer_norm_8 = (lambda x, f: f(x))( + paddle._C_ops.layer_norm( + add_54, parameter_407, parameter_406, float("1e-05"), 2 + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None, None), + ) + del add_54, parameter_406, parameter_407 + + # pd_op.matmul: (-1x400x2048xf32) <- (-1x400x1024xf32, 1024x2048xf32) + matmul_14 = paddle._C_ops.matmul(layer_norm_6, parameter_405, False, False) + del parameter_405 + + # pd_op.add: (-1x400x2048xf32) <- (-1x400x2048xf32, 2048xf32) + add_55 = paddle._C_ops.add(matmul_14, parameter_404) + del matmul_14, parameter_404 + + # pd_op.gelu: (-1x400x2048xf32) <- (-1x400x2048xf32) + gelu_1 = paddle._C_ops.gelu(add_55, False) + del add_55 + + # pd_op.dropout: (-1x400x2048xf32, -1x400x2048xui8) <- (-1x400x2048xf32, None, 1xf32) + dropout_12, dropout_13 = (lambda x, f: f(x))( + paddle._C_ops.dropout( + gelu_1, None, full_2, True, "upscale_in_train", 0, False + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None), + ) + del gelu_1 + + # pd_op.matmul: (-1x400x1024xf32) <- (-1x400x2048xf32, 2048x1024xf32) + matmul_15 = paddle._C_ops.matmul(dropout_12, parameter_403, False, False) + del dropout_12, parameter_403 + + # pd_op.add: (-1x400x1024xf32) <- (-1x400x1024xf32, 1024xf32) + add_56 = paddle._C_ops.add(matmul_15, parameter_402) + del matmul_15, parameter_402 + + # pd_op.dropout: (-1x400x1024xf32, -1x400x1024xui8) <- (-1x400x1024xf32, None, 1xf32) + dropout_14, dropout_15 = (lambda x, f: f(x))( + paddle._C_ops.dropout( + add_56, None, full_2, True, "upscale_in_train", 0, False + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None), + ) + del add_56 + + # pd_op.add: (-1x400x1024xf32) <- (-1x400x1024xf32, -1x400x1024xf32) + add_57 = paddle._C_ops.add(layer_norm_6, dropout_14) + del dropout_14, layer_norm_6 + + # pd_op.layer_norm: (-1x400x1024xf32, -1x400xf32, -1x400xf32) <- (-1x400x1024xf32, 1024xf32, 1024xf32) + layer_norm_9, layer_norm_10, layer_norm_11 = (lambda x, f: f(x))( + paddle._C_ops.layer_norm( + add_57, parameter_401, parameter_400, float("1e-05"), 2 + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None, None), + ) + del add_57, parameter_400, parameter_401 + + # pd_op.add: (-1x400x1024xf32) <- (-1x400x1024xf32, 1x400x1024xf32) + add_58 = paddle._C_ops.add(layer_norm_9, data_27) + + # pd_op.slice: (1024x1024xf32) <- (1024x3072xf32, 1xi64, 1xi64) + slice_17 = paddle._C_ops.slice( + data_22, [1], full_int_array_2, full_int_array_7, [1], [] + ) + + # pd_op.slice: (1024xf32) <- (3072xf32, 1xi64, 1xi64) + slice_18 = paddle._C_ops.slice( + data_23, [0], full_int_array_2, full_int_array_7, [1], [] + ) + + # pd_op.matmul: (-1x400x1024xf32) <- (-1x400x1024xf32, 1024x1024xf32) + matmul_16 = paddle._C_ops.matmul(add_58, slice_17, False, False) + del slice_17 + + # pd_op.add: (-1x400x1024xf32) <- (-1x400x1024xf32, 1024xf32) + add_59 = paddle._C_ops.add(matmul_16, slice_18) + del matmul_16, slice_18 + + # pd_op.reshape: (-1x400x4x256xf32) <- (-1x400x1024xf32, 4xi64) + reshape_12 = paddle._C_ops.reshape(add_59, full_int_array_8) + del add_59 + + # pd_op.transpose: (-1x4x400x256xf32) <- (-1x400x4x256xf32) + transpose_9 = paddle._C_ops.transpose(reshape_12, [0, 2, 1, 3]) + del reshape_12 + + # pd_op.slice: (1024x1024xf32) <- (1024x3072xf32, 1xi64, 1xi64) + slice_19 = paddle._C_ops.slice( + data_22, [1], full_int_array_7, full_int_array_9, [1], [] + ) + + # pd_op.slice: (1024xf32) <- (3072xf32, 1xi64, 1xi64) + slice_20 = paddle._C_ops.slice( + data_23, [0], full_int_array_7, full_int_array_9, [1], [] + ) + + # pd_op.matmul: (-1x400x1024xf32) <- (-1x400x1024xf32, 1024x1024xf32) + matmul_17 = paddle._C_ops.matmul(add_58, slice_19, False, False) + del add_58, slice_19 + + # pd_op.add: (-1x400x1024xf32) <- (-1x400x1024xf32, 1024xf32) + add_60 = paddle._C_ops.add(matmul_17, slice_20) + del matmul_17, slice_20 + + # pd_op.reshape: (-1x400x4x256xf32) <- (-1x400x1024xf32, 4xi64) + reshape_13 = paddle._C_ops.reshape(add_60, full_int_array_8) + del add_60 + + # pd_op.transpose: (-1x4x400x256xf32) <- (-1x400x4x256xf32) + transpose_10 = paddle._C_ops.transpose(reshape_13, [0, 2, 1, 3]) + del reshape_13 + + # pd_op.slice: (1024x1024xf32) <- (1024x3072xf32, 1xi64, 1xi64) + slice_21 = paddle._C_ops.slice( + data_22, [1], full_int_array_9, full_int_array_10, [1], [] + ) + del data_22 + + # pd_op.slice: (1024xf32) <- (3072xf32, 1xi64, 1xi64) + slice_22 = paddle._C_ops.slice( + data_23, [0], full_int_array_9, full_int_array_10, [1], [] + ) + del data_23 + + # pd_op.matmul: (-1x400x1024xf32) <- (-1x400x1024xf32, 1024x1024xf32) + matmul_18 = paddle._C_ops.matmul(layer_norm_9, slice_21, False, False) + del slice_21 + + # pd_op.add: (-1x400x1024xf32) <- (-1x400x1024xf32, 1024xf32) + add_61 = paddle._C_ops.add(matmul_18, slice_22) + del matmul_18, slice_22 + + # pd_op.reshape: (-1x400x4x256xf32) <- (-1x400x1024xf32, 4xi64) + reshape_14 = paddle._C_ops.reshape(add_61, full_int_array_8) + del add_61 + + # pd_op.transpose: (-1x4x400x256xf32) <- (-1x400x4x256xf32) + transpose_11 = paddle._C_ops.transpose(reshape_14, [0, 2, 1, 3]) + del reshape_14 + + # pd_op.matmul: (-1x4x400x400xf32) <- (-1x4x400x256xf32, -1x4x400x256xf32) + matmul_19 = paddle._C_ops.matmul(transpose_9, transpose_10, False, True) + del transpose_10, transpose_9 + + # pd_op.scale: (-1x4x400x400xf32) <- (-1x4x400x400xf32, 1xf32) + scale_2 = paddle._C_ops.scale(matmul_19, full_1, float("0"), True) + del matmul_19 + + # pd_op.softmax: (-1x4x400x400xf32) <- (-1x4x400x400xf32) + softmax_2 = paddle._C_ops.softmax(scale_2, -1) + del scale_2 + + # pd_op.dropout: (-1x4x400x400xf32, -1x4x400x400xui8) <- (-1x4x400x400xf32, None, 1xf32) + dropout_16, dropout_17 = (lambda x, f: f(x))( + paddle._C_ops.dropout( + softmax_2, None, full_2, True, "upscale_in_train", 0, False + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None), + ) + del softmax_2 + + # pd_op.matmul: (-1x4x400x256xf32) <- (-1x4x400x400xf32, -1x4x400x256xf32) + matmul_20 = paddle._C_ops.matmul(dropout_16, transpose_11, False, False) + del dropout_16, transpose_11 + + # pd_op.transpose: (-1x400x4x256xf32) <- (-1x4x400x256xf32) + transpose_12 = paddle._C_ops.transpose(matmul_20, [0, 2, 1, 3]) + del matmul_20 + + # pd_op.shape64: (4xi64) <- (-1x400x4x256xf32) + shape64_5 = paddle._C_ops.shape64(transpose_12) + + # pd_op.slice: (xi64) <- (4xi64, 1xi64, 1xi64) + slice_23 = paddle._C_ops.slice( + shape64_5, [0], full_int_array_2, full_int_array_3, [1], [0] + ) + del shape64_5 + + # pd_op.reshape: (-1x400x1024xf32) <- (-1x400x4x256xf32, 3xi64) + reshape_15 = paddle._C_ops.reshape(transpose_12, full_int_array_11) + del transpose_12 + + # pd_op.matmul: (-1x400x1024xf32) <- (-1x400x1024xf32, 1024x1024xf32) + matmul_21 = paddle._C_ops.matmul(reshape_15, parameter_399, False, False) + del parameter_399, reshape_15 + + # pd_op.add: (-1x400x1024xf32) <- (-1x400x1024xf32, 1024xf32) + add_62 = paddle._C_ops.add(matmul_21, parameter_398) + del matmul_21, parameter_398 + + # pd_op.dropout: (-1x400x1024xf32, -1x400x1024xui8) <- (-1x400x1024xf32, None, 1xf32) + dropout_18, dropout_19 = (lambda x, f: f(x))( + paddle._C_ops.dropout( + add_62, None, full_2, True, "upscale_in_train", 0, False + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None), + ) + del add_62 + + # pd_op.add: (-1x400x1024xf32) <- (-1x400x1024xf32, -1x400x1024xf32) + add_63 = paddle._C_ops.add(layer_norm_9, dropout_18) + del dropout_18, layer_norm_9 + + # pd_op.layer_norm: (-1x400x1024xf32, -1x400xf32, -1x400xf32) <- (-1x400x1024xf32, 1024xf32, 1024xf32) + layer_norm_12, layer_norm_13, layer_norm_14 = (lambda x, f: f(x))( + paddle._C_ops.layer_norm( + add_63, parameter_397, parameter_396, float("1e-05"), 2 + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None, None), + ) + del add_63, parameter_396, parameter_397 + + # pd_op.matmul: (-1x400x2048xf32) <- (-1x400x1024xf32, 1024x2048xf32) + matmul_22 = paddle._C_ops.matmul(layer_norm_12, parameter_395, False, False) + del parameter_395 + + # pd_op.add: (-1x400x2048xf32) <- (-1x400x2048xf32, 2048xf32) + add_64 = paddle._C_ops.add(matmul_22, parameter_394) + del matmul_22, parameter_394 + + # pd_op.gelu: (-1x400x2048xf32) <- (-1x400x2048xf32) + gelu_2 = paddle._C_ops.gelu(add_64, False) + del add_64 + + # pd_op.dropout: (-1x400x2048xf32, -1x400x2048xui8) <- (-1x400x2048xf32, None, 1xf32) + dropout_20, dropout_21 = (lambda x, f: f(x))( + paddle._C_ops.dropout( + gelu_2, None, full_2, True, "upscale_in_train", 0, False + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None), + ) + del gelu_2 + + # pd_op.matmul: (-1x400x1024xf32) <- (-1x400x2048xf32, 2048x1024xf32) + matmul_23 = paddle._C_ops.matmul(dropout_20, parameter_393, False, False) + del dropout_20, parameter_393 + + # pd_op.add: (-1x400x1024xf32) <- (-1x400x1024xf32, 1024xf32) + add_65 = paddle._C_ops.add(matmul_23, parameter_392) + del matmul_23, parameter_392 + + # pd_op.dropout: (-1x400x1024xf32, -1x400x1024xui8) <- (-1x400x1024xf32, None, 1xf32) + dropout_22, dropout_23 = (lambda x, f: f(x))( + paddle._C_ops.dropout( + add_65, None, full_2, True, "upscale_in_train", 0, False + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None), + ) + del add_65 + + # pd_op.add: (-1x400x1024xf32) <- (-1x400x1024xf32, -1x400x1024xf32) + add_66 = paddle._C_ops.add(layer_norm_12, dropout_22) + del dropout_22, layer_norm_12 + + # pd_op.layer_norm: (-1x400x1024xf32, -1x400xf32, -1x400xf32) <- (-1x400x1024xf32, 1024xf32, 1024xf32) + layer_norm_15, layer_norm_16, layer_norm_17 = (lambda x, f: f(x))( + paddle._C_ops.layer_norm( + add_66, parameter_391, parameter_390, float("1e-05"), 2 + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None, None), + ) + del add_66, parameter_390, parameter_391 + + # pd_op.add: (-1x400x1024xf32) <- (-1x400x1024xf32, 1x400x1024xf32) + add_67 = paddle._C_ops.add(layer_norm_15, data_27) + del data_27 + + # pd_op.slice: (1024x1024xf32) <- (1024x3072xf32, 1xi64, 1xi64) + slice_24 = paddle._C_ops.slice( + data_24, [1], full_int_array_2, full_int_array_7, [1], [] + ) + + # pd_op.slice: (1024xf32) <- (3072xf32, 1xi64, 1xi64) + slice_25 = paddle._C_ops.slice( + data_25, [0], full_int_array_2, full_int_array_7, [1], [] + ) + + # pd_op.matmul: (-1x400x1024xf32) <- (-1x400x1024xf32, 1024x1024xf32) + matmul_24 = paddle._C_ops.matmul(add_67, slice_24, False, False) + del slice_24 + + # pd_op.add: (-1x400x1024xf32) <- (-1x400x1024xf32, 1024xf32) + add_68 = paddle._C_ops.add(matmul_24, slice_25) + del matmul_24, slice_25 + + # pd_op.reshape: (-1x400x4x256xf32) <- (-1x400x1024xf32, 4xi64) + reshape_16 = paddle._C_ops.reshape(add_68, full_int_array_8) + del add_68 + + # pd_op.transpose: (-1x4x400x256xf32) <- (-1x400x4x256xf32) + transpose_13 = paddle._C_ops.transpose(reshape_16, [0, 2, 1, 3]) + del reshape_16 + + # pd_op.slice: (1024x1024xf32) <- (1024x3072xf32, 1xi64, 1xi64) + slice_26 = paddle._C_ops.slice( + data_24, [1], full_int_array_7, full_int_array_9, [1], [] + ) + + # pd_op.slice: (1024xf32) <- (3072xf32, 1xi64, 1xi64) + slice_27 = paddle._C_ops.slice( + data_25, [0], full_int_array_7, full_int_array_9, [1], [] + ) + del full_int_array_7 + + # pd_op.matmul: (-1x400x1024xf32) <- (-1x400x1024xf32, 1024x1024xf32) + matmul_25 = paddle._C_ops.matmul(add_67, slice_26, False, False) + del add_67, slice_26 + + # pd_op.add: (-1x400x1024xf32) <- (-1x400x1024xf32, 1024xf32) + add_69 = paddle._C_ops.add(matmul_25, slice_27) + del matmul_25, slice_27 + + # pd_op.reshape: (-1x400x4x256xf32) <- (-1x400x1024xf32, 4xi64) + reshape_17 = paddle._C_ops.reshape(add_69, full_int_array_8) + del add_69 + + # pd_op.transpose: (-1x4x400x256xf32) <- (-1x400x4x256xf32) + transpose_14 = paddle._C_ops.transpose(reshape_17, [0, 2, 1, 3]) + del reshape_17 + + # pd_op.slice: (1024x1024xf32) <- (1024x3072xf32, 1xi64, 1xi64) + slice_28 = paddle._C_ops.slice( + data_24, [1], full_int_array_9, full_int_array_10, [1], [] + ) + del data_24 + + # pd_op.slice: (1024xf32) <- (3072xf32, 1xi64, 1xi64) + slice_29 = paddle._C_ops.slice( + data_25, [0], full_int_array_9, full_int_array_10, [1], [] + ) + del data_25, full_int_array_10, full_int_array_9 + + # pd_op.matmul: (-1x400x1024xf32) <- (-1x400x1024xf32, 1024x1024xf32) + matmul_26 = paddle._C_ops.matmul(layer_norm_15, slice_28, False, False) + del slice_28 + + # pd_op.add: (-1x400x1024xf32) <- (-1x400x1024xf32, 1024xf32) + add_70 = paddle._C_ops.add(matmul_26, slice_29) + del matmul_26, slice_29 + + # pd_op.reshape: (-1x400x4x256xf32) <- (-1x400x1024xf32, 4xi64) + reshape_18 = paddle._C_ops.reshape(add_70, full_int_array_8) + del add_70, full_int_array_8 + + # pd_op.transpose: (-1x4x400x256xf32) <- (-1x400x4x256xf32) + transpose_15 = paddle._C_ops.transpose(reshape_18, [0, 2, 1, 3]) + del reshape_18 + + # pd_op.matmul: (-1x4x400x400xf32) <- (-1x4x400x256xf32, -1x4x400x256xf32) + matmul_27 = paddle._C_ops.matmul(transpose_13, transpose_14, False, True) + del transpose_13, transpose_14 + + # pd_op.scale: (-1x4x400x400xf32) <- (-1x4x400x400xf32, 1xf32) + scale_3 = paddle._C_ops.scale(matmul_27, full_1, float("0"), True) + del full_1, matmul_27 + + # pd_op.softmax: (-1x4x400x400xf32) <- (-1x4x400x400xf32) + softmax_3 = paddle._C_ops.softmax(scale_3, -1) + del scale_3 + + # pd_op.dropout: (-1x4x400x400xf32, -1x4x400x400xui8) <- (-1x4x400x400xf32, None, 1xf32) + dropout_24, dropout_25 = (lambda x, f: f(x))( + paddle._C_ops.dropout( + softmax_3, None, full_2, True, "upscale_in_train", 0, False + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None), + ) + del softmax_3 + + # pd_op.matmul: (-1x4x400x256xf32) <- (-1x4x400x400xf32, -1x4x400x256xf32) + matmul_28 = paddle._C_ops.matmul(dropout_24, transpose_15, False, False) + del dropout_24, transpose_15 + + # pd_op.transpose: (-1x400x4x256xf32) <- (-1x4x400x256xf32) + transpose_16 = paddle._C_ops.transpose(matmul_28, [0, 2, 1, 3]) + del matmul_28 + + # pd_op.shape64: (4xi64) <- (-1x400x4x256xf32) + shape64_6 = paddle._C_ops.shape64(transpose_16) + + # pd_op.slice: (xi64) <- (4xi64, 1xi64, 1xi64) + slice_30 = paddle._C_ops.slice( + shape64_6, [0], full_int_array_2, full_int_array_3, [1], [0] + ) + del shape64_6 + + # pd_op.reshape: (-1x400x1024xf32) <- (-1x400x4x256xf32, 3xi64) + reshape_19 = paddle._C_ops.reshape(transpose_16, full_int_array_11) + del full_int_array_11, transpose_16 + + # pd_op.matmul: (-1x400x1024xf32) <- (-1x400x1024xf32, 1024x1024xf32) + matmul_29 = paddle._C_ops.matmul(reshape_19, parameter_389, False, False) + del parameter_389, reshape_19 + + # pd_op.add: (-1x400x1024xf32) <- (-1x400x1024xf32, 1024xf32) + add_71 = paddle._C_ops.add(matmul_29, parameter_388) + del matmul_29, parameter_388 + + # pd_op.dropout: (-1x400x1024xf32, -1x400x1024xui8) <- (-1x400x1024xf32, None, 1xf32) + dropout_26, dropout_27 = (lambda x, f: f(x))( + paddle._C_ops.dropout( + add_71, None, full_2, True, "upscale_in_train", 0, False + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None), + ) + del add_71 + + # pd_op.add: (-1x400x1024xf32) <- (-1x400x1024xf32, -1x400x1024xf32) + add_72 = paddle._C_ops.add(layer_norm_15, dropout_26) + del dropout_26, layer_norm_15 + + # pd_op.layer_norm: (-1x400x1024xf32, -1x400xf32, -1x400xf32) <- (-1x400x1024xf32, 1024xf32, 1024xf32) + layer_norm_18, layer_norm_19, layer_norm_20 = (lambda x, f: f(x))( + paddle._C_ops.layer_norm( + add_72, parameter_387, parameter_386, float("1e-05"), 2 + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None, None), + ) + del add_72, parameter_386, parameter_387 + + # pd_op.matmul: (-1x400x2048xf32) <- (-1x400x1024xf32, 1024x2048xf32) + matmul_30 = paddle._C_ops.matmul(layer_norm_18, parameter_385, False, False) + del parameter_385 + + # pd_op.add: (-1x400x2048xf32) <- (-1x400x2048xf32, 2048xf32) + add_73 = paddle._C_ops.add(matmul_30, parameter_384) + del matmul_30, parameter_384 + + # pd_op.gelu: (-1x400x2048xf32) <- (-1x400x2048xf32) + gelu_3 = paddle._C_ops.gelu(add_73, False) + del add_73 + + # pd_op.dropout: (-1x400x2048xf32, -1x400x2048xui8) <- (-1x400x2048xf32, None, 1xf32) + dropout_28, dropout_29 = (lambda x, f: f(x))( + paddle._C_ops.dropout( + gelu_3, None, full_2, True, "upscale_in_train", 0, False + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None), + ) + del gelu_3 + + # pd_op.matmul: (-1x400x1024xf32) <- (-1x400x2048xf32, 2048x1024xf32) + matmul_31 = paddle._C_ops.matmul(dropout_28, parameter_383, False, False) + del dropout_28, parameter_383 + + # pd_op.add: (-1x400x1024xf32) <- (-1x400x1024xf32, 1024xf32) + add_74 = paddle._C_ops.add(matmul_31, parameter_382) + del matmul_31, parameter_382 + + # pd_op.dropout: (-1x400x1024xf32, -1x400x1024xui8) <- (-1x400x1024xf32, None, 1xf32) + dropout_30, dropout_31 = (lambda x, f: f(x))( + paddle._C_ops.dropout( + add_74, None, full_2, True, "upscale_in_train", 0, False + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None), + ) + del add_74, full_2 + + # pd_op.add: (-1x400x1024xf32) <- (-1x400x1024xf32, -1x400x1024xf32) + add_75 = paddle._C_ops.add(layer_norm_18, dropout_30) + del dropout_30, layer_norm_18 + + # pd_op.layer_norm: (-1x400x1024xf32, -1x400xf32, -1x400xf32) <- (-1x400x1024xf32, 1024xf32, 1024xf32) + layer_norm_21, layer_norm_22, layer_norm_23 = (lambda x, f: f(x))( + paddle._C_ops.layer_norm( + add_75, parameter_381, parameter_380, float("1e-05"), 2 + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None, None), + ) + del add_75, parameter_380, parameter_381 + + # pd_op.transpose: (-1x1024x400xf32) <- (-1x400x1024xf32) + transpose_17 = paddle._C_ops.transpose(layer_norm_21, [0, 2, 1]) + del layer_norm_21 + + # pd_op.full: (xi64) <- () + full_3 = paddle._C_ops.full( + [], float("1024"), paddle.int64, paddle.core.CPUPlace() + ) + + # builtin.combine: ([xi64, xi64, xi64, xi64]) <- (xi64, xi64, xi64, xi64) + combine_4 = [slice_0, full_3, slice_1, slice_2] + del full_3, slice_0, slice_1, slice_2 + + # pd_op.stack: (4xi64) <- ([xi64, xi64, xi64, xi64]) + stack_0 = paddle._C_ops.stack(combine_4, 0) + del combine_4 + + # pd_op.reshape: (-1x1024x-1x-1xf32) <- (-1x1024x400xf32, 4xi64) + reshape_20 = paddle._C_ops.reshape(transpose_17, stack_0) + del stack_0, transpose_17 + + # pd_op.conv2d: (-1x384x-1x-1xf32) <- (-1x1024x-1x-1xf32, 384x1024x1x1xf32) + conv2d_77 = paddle._C_ops.conv2d( + reshape_20, parameter_379, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_379 + + # pd_op.batch_norm_: (-1x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (-1x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__438, + batch_norm__439, + batch_norm__440, + batch_norm__441, + batch_norm__442, + batch_norm__443, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_77, + parameter_378, + parameter_377, + parameter_376, + parameter_375, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_77, parameter_375, parameter_376, parameter_377, parameter_378 + + # pd_op.swish: (-1x384x-1x-1xf32) <- (-1x384x-1x-1xf32) + swish_55 = paddle._C_ops.swish(batch_norm__438) + del batch_norm__438 + + # pd_op.conv2d: (-1x384x-1x-1xf32) <- (-1x1024x-1x-1xf32, 384x1024x1x1xf32) + conv2d_78 = paddle._C_ops.conv2d( + reshape_20, parameter_374, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_374, reshape_20 + + # pd_op.batch_norm_: (-1x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (-1x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__444, + batch_norm__445, + batch_norm__446, + batch_norm__447, + batch_norm__448, + batch_norm__449, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_78, + parameter_373, + parameter_372, + parameter_371, + parameter_370, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_78, parameter_370, parameter_371, parameter_372, parameter_373 + + # pd_op.swish: (-1x384x-1x-1xf32) <- (-1x384x-1x-1xf32) + swish_56 = paddle._C_ops.swish(batch_norm__444) + del batch_norm__444 + + # pd_op.conv2d: (-1x384x-1x-1xf32) <- (-1x384x-1x-1xf32, 384x384x3x3xf32) + conv2d_79 = paddle._C_ops.conv2d( + swish_56, parameter_369, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_369, swish_56 + + # pd_op.batch_norm_: (-1x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (-1x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__450, + batch_norm__451, + batch_norm__452, + batch_norm__453, + batch_norm__454, + batch_norm__455, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_79, + parameter_368, + parameter_367, + parameter_366, + parameter_365, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_79, parameter_365, parameter_366, parameter_367, parameter_368 + + # pd_op.swish: (-1x384x-1x-1xf32) <- (-1x384x-1x-1xf32) + swish_57 = paddle._C_ops.swish(batch_norm__450) + del batch_norm__450 + + # pd_op.conv2d: (-1x384x-1x-1xf32) <- (-1x384x-1x-1xf32, 384x384x3x3xf32) + conv2d_80 = paddle._C_ops.conv2d( + swish_57, parameter_364, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_364 + + # pd_op.batch_norm_: (-1x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (-1x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__456, + batch_norm__457, + batch_norm__458, + batch_norm__459, + batch_norm__460, + batch_norm__461, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_80, + parameter_363, + parameter_362, + parameter_361, + parameter_360, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_80, parameter_360, parameter_361, parameter_362, parameter_363 + + # pd_op.conv2d: (-1x384x-1x-1xf32) <- (-1x384x-1x-1xf32, 384x384x1x1xf32) + conv2d_81 = paddle._C_ops.conv2d( + swish_57, parameter_359, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_359, swish_57 + + # pd_op.batch_norm_: (-1x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (-1x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__462, + batch_norm__463, + batch_norm__464, + batch_norm__465, + batch_norm__466, + batch_norm__467, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_81, + parameter_358, + parameter_357, + parameter_356, + parameter_355, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_81, parameter_355, parameter_356, parameter_357, parameter_358 + + # pd_op.add: (-1x384x-1x-1xf32) <- (-1x384x-1x-1xf32, -1x384x-1x-1xf32) + add_76 = paddle._C_ops.add(batch_norm__456, batch_norm__462) + del batch_norm__456, batch_norm__462 + + # pd_op.swish: (-1x384x-1x-1xf32) <- (-1x384x-1x-1xf32) + swish_58 = paddle._C_ops.swish(add_76) + del add_76 + + # pd_op.conv2d: (-1x384x-1x-1xf32) <- (-1x384x-1x-1xf32, 384x384x3x3xf32) + conv2d_82 = paddle._C_ops.conv2d( + swish_58, parameter_354, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_354, swish_58 + + # pd_op.batch_norm_: (-1x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (-1x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__468, + batch_norm__469, + batch_norm__470, + batch_norm__471, + batch_norm__472, + batch_norm__473, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_82, + parameter_353, + parameter_352, + parameter_351, + parameter_350, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_82, parameter_350, parameter_351, parameter_352, parameter_353 + + # pd_op.swish: (-1x384x-1x-1xf32) <- (-1x384x-1x-1xf32) + swish_59 = paddle._C_ops.swish(batch_norm__468) + del batch_norm__468 + + # pd_op.conv2d: (-1x384x-1x-1xf32) <- (-1x384x-1x-1xf32, 384x384x3x3xf32) + conv2d_83 = paddle._C_ops.conv2d( + swish_59, parameter_349, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_349 + + # pd_op.batch_norm_: (-1x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (-1x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__474, + batch_norm__475, + batch_norm__476, + batch_norm__477, + batch_norm__478, + batch_norm__479, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_83, + parameter_348, + parameter_347, + parameter_346, + parameter_345, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_83, parameter_345, parameter_346, parameter_347, parameter_348 + + # pd_op.conv2d: (-1x384x-1x-1xf32) <- (-1x384x-1x-1xf32, 384x384x1x1xf32) + conv2d_84 = paddle._C_ops.conv2d( + swish_59, parameter_344, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_344, swish_59 + + # pd_op.batch_norm_: (-1x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (-1x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__480, + batch_norm__481, + batch_norm__482, + batch_norm__483, + batch_norm__484, + batch_norm__485, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_84, + parameter_343, + parameter_342, + parameter_341, + parameter_340, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_84, parameter_340, parameter_341, parameter_342, parameter_343 + + # pd_op.add: (-1x384x-1x-1xf32) <- (-1x384x-1x-1xf32, -1x384x-1x-1xf32) + add_77 = paddle._C_ops.add(batch_norm__474, batch_norm__480) + del batch_norm__474, batch_norm__480 + + # pd_op.swish: (-1x384x-1x-1xf32) <- (-1x384x-1x-1xf32) + swish_60 = paddle._C_ops.swish(add_77) + del add_77 + + # pd_op.full_int_array: (2xi64) <- () + full_int_array_12 = [5, 5] + + # pd_op.pool2d: (-1x384x-1x-1xf32) <- (-1x384x-1x-1xf32, 2xi64) + pool2d_0 = paddle._C_ops.pool2d( + swish_60, + full_int_array_12, + [1, 1], + [2, 2], + False, + True, + "NCHW", + "max", + False, + False, + "EXPLICIT", + ) + del full_int_array_12 + + # pd_op.full_int_array: (2xi64) <- () + full_int_array_13 = [9, 9] + + # pd_op.pool2d: (-1x384x-1x-1xf32) <- (-1x384x-1x-1xf32, 2xi64) + pool2d_1 = paddle._C_ops.pool2d( + swish_60, + full_int_array_13, + [1, 1], + [4, 4], + False, + True, + "NCHW", + "max", + False, + False, + "EXPLICIT", + ) + del full_int_array_13 + + # pd_op.full_int_array: (2xi64) <- () + full_int_array_14 = [13, 13] + + # pd_op.pool2d: (-1x384x-1x-1xf32) <- (-1x384x-1x-1xf32, 2xi64) + pool2d_2 = paddle._C_ops.pool2d( + swish_60, + full_int_array_14, + [1, 1], + [6, 6], + False, + True, + "NCHW", + "max", + False, + False, + "EXPLICIT", + ) + del full_int_array_14 + + # builtin.combine: ([-1x384x-1x-1xf32, -1x384x-1x-1xf32, -1x384x-1x-1xf32, -1x384x-1x-1xf32]) <- (-1x384x-1x-1xf32, -1x384x-1x-1xf32, -1x384x-1x-1xf32, -1x384x-1x-1xf32) + combine_5 = [swish_60, pool2d_0, pool2d_1, pool2d_2] + del pool2d_0, pool2d_1, pool2d_2, swish_60 + + # pd_op.concat: (-1x1536x-1x-1xf32) <- ([-1x384x-1x-1xf32, -1x384x-1x-1xf32, -1x384x-1x-1xf32, -1x384x-1x-1xf32], 1xi32) + concat_6 = paddle._C_ops.concat(combine_5, full_0) + del combine_5 + + # pd_op.conv2d: (-1x384x-1x-1xf32) <- (-1x1536x-1x-1xf32, 384x1536x1x1xf32) + conv2d_85 = paddle._C_ops.conv2d( + concat_6, parameter_339, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del concat_6, parameter_339 + + # pd_op.batch_norm_: (-1x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (-1x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__486, + batch_norm__487, + batch_norm__488, + batch_norm__489, + batch_norm__490, + batch_norm__491, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_85, + parameter_338, + parameter_337, + parameter_336, + parameter_335, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_85, parameter_335, parameter_336, parameter_337, parameter_338 + + # pd_op.swish: (-1x384x-1x-1xf32) <- (-1x384x-1x-1xf32) + swish_61 = paddle._C_ops.swish(batch_norm__486) + del batch_norm__486 + + # pd_op.conv2d: (-1x384x-1x-1xf32) <- (-1x384x-1x-1xf32, 384x384x3x3xf32) + conv2d_86 = paddle._C_ops.conv2d( + swish_61, parameter_334, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_334, swish_61 + + # pd_op.batch_norm_: (-1x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (-1x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__492, + batch_norm__493, + batch_norm__494, + batch_norm__495, + batch_norm__496, + batch_norm__497, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_86, + parameter_333, + parameter_332, + parameter_331, + parameter_330, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_86, parameter_330, parameter_331, parameter_332, parameter_333 + + # pd_op.swish: (-1x384x-1x-1xf32) <- (-1x384x-1x-1xf32) + swish_62 = paddle._C_ops.swish(batch_norm__492) + del batch_norm__492 + + # pd_op.conv2d: (-1x384x-1x-1xf32) <- (-1x384x-1x-1xf32, 384x384x3x3xf32) + conv2d_87 = paddle._C_ops.conv2d( + swish_62, parameter_329, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_329 + + # pd_op.batch_norm_: (-1x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (-1x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__498, + batch_norm__499, + batch_norm__500, + batch_norm__501, + batch_norm__502, + batch_norm__503, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_87, + parameter_328, + parameter_327, + parameter_326, + parameter_325, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_87, parameter_325, parameter_326, parameter_327, parameter_328 + + # pd_op.conv2d: (-1x384x-1x-1xf32) <- (-1x384x-1x-1xf32, 384x384x1x1xf32) + conv2d_88 = paddle._C_ops.conv2d( + swish_62, parameter_324, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_324, swish_62 + + # pd_op.batch_norm_: (-1x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (-1x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__504, + batch_norm__505, + batch_norm__506, + batch_norm__507, + batch_norm__508, + batch_norm__509, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_88, + parameter_323, + parameter_322, + parameter_321, + parameter_320, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_88, parameter_320, parameter_321, parameter_322, parameter_323 + + # pd_op.add: (-1x384x-1x-1xf32) <- (-1x384x-1x-1xf32, -1x384x-1x-1xf32) + add_78 = paddle._C_ops.add(batch_norm__498, batch_norm__504) + del batch_norm__498, batch_norm__504 + + # pd_op.swish: (-1x384x-1x-1xf32) <- (-1x384x-1x-1xf32) + swish_63 = paddle._C_ops.swish(add_78) + del add_78 + + # builtin.combine: ([-1x384x-1x-1xf32, -1x384x-1x-1xf32]) <- (-1x384x-1x-1xf32, -1x384x-1x-1xf32) + combine_6 = [swish_55, swish_63] + del swish_55, swish_63 + + # pd_op.concat: (-1x768x-1x-1xf32) <- ([-1x384x-1x-1xf32, -1x384x-1x-1xf32], 1xi32) + concat_7 = paddle._C_ops.concat(combine_6, full_0) + del combine_6 + + # pd_op.conv2d: (-1x768x-1x-1xf32) <- (-1x768x-1x-1xf32, 768x768x1x1xf32) + conv2d_89 = paddle._C_ops.conv2d( + concat_7, parameter_319, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del concat_7, parameter_319 + + # pd_op.batch_norm_: (-1x768x-1x-1xf32, 768xf32, 768xf32, 768xf32, 768xf32, -1xui8) <- (-1x768x-1x-1xf32, 768xf32, 768xf32, 768xf32, 768xf32) + ( + batch_norm__510, + batch_norm__511, + batch_norm__512, + batch_norm__513, + batch_norm__514, + batch_norm__515, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_89, + parameter_318, + parameter_317, + parameter_316, + parameter_315, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_89, parameter_315, parameter_316, parameter_317, parameter_318 + + # pd_op.swish: (-1x768x-1x-1xf32) <- (-1x768x-1x-1xf32) + swish_64 = paddle._C_ops.swish(batch_norm__510) + del batch_norm__510 + + # pd_op.conv2d: (-1x384x-1x-1xf32) <- (-1x768x-1x-1xf32, 384x768x1x1xf32) + conv2d_90 = paddle._C_ops.conv2d( + swish_64, parameter_314, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_314 + + # pd_op.batch_norm_: (-1x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (-1x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__516, + batch_norm__517, + batch_norm__518, + batch_norm__519, + batch_norm__520, + batch_norm__521, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_90, + parameter_313, + parameter_312, + parameter_311, + parameter_310, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_90, parameter_310, parameter_311, parameter_312, parameter_313 + + # pd_op.swish: (-1x384x-1x-1xf32) <- (-1x384x-1x-1xf32) + swish_65 = paddle._C_ops.swish(batch_norm__516) + del batch_norm__516 + + # pd_op.nearest_interp: (-1x384x-1x-1xf32) <- (-1x384x-1x-1xf32, None, None, None) + nearest_interp_0 = paddle._C_ops.nearest_interp( + swish_65, + None, + None, + None, + "NCHW", + -1, + -1, + -1, + [float("2"), float("2")], + "nearest", + False, + 0, + ) + del swish_65 + + # builtin.combine: ([-1x384x-1x-1xf32, -1x512x-1x-1xf32]) <- (-1x384x-1x-1xf32, -1x512x-1x-1xf32) + combine_7 = [nearest_interp_0, swish_44] + del nearest_interp_0, swish_44 + + # pd_op.concat: (-1x896x-1x-1xf32) <- ([-1x384x-1x-1xf32, -1x512x-1x-1xf32], 1xi32) + concat_8 = paddle._C_ops.concat(combine_7, full_0) + del combine_7 + + # pd_op.conv2d: (-1x192x-1x-1xf32) <- (-1x896x-1x-1xf32, 192x896x1x1xf32) + conv2d_91 = paddle._C_ops.conv2d( + concat_8, parameter_309, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_309 + + # pd_op.batch_norm_: (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__522, + batch_norm__523, + batch_norm__524, + batch_norm__525, + batch_norm__526, + batch_norm__527, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_91, + parameter_308, + parameter_307, + parameter_306, + parameter_305, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_91, parameter_305, parameter_306, parameter_307, parameter_308 + + # pd_op.swish: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32) + swish_66 = paddle._C_ops.swish(batch_norm__522) + del batch_norm__522 + + # pd_op.conv2d: (-1x192x-1x-1xf32) <- (-1x896x-1x-1xf32, 192x896x1x1xf32) + conv2d_92 = paddle._C_ops.conv2d( + concat_8, parameter_304, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del concat_8, parameter_304 + + # pd_op.batch_norm_: (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__528, + batch_norm__529, + batch_norm__530, + batch_norm__531, + batch_norm__532, + batch_norm__533, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_92, + parameter_303, + parameter_302, + parameter_301, + parameter_300, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_92, parameter_300, parameter_301, parameter_302, parameter_303 + + # pd_op.swish: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32) + swish_67 = paddle._C_ops.swish(batch_norm__528) + del batch_norm__528 + + # pd_op.conv2d: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32, 192x192x3x3xf32) + conv2d_93 = paddle._C_ops.conv2d( + swish_67, parameter_299, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_299, swish_67 + + # pd_op.batch_norm_: (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__534, + batch_norm__535, + batch_norm__536, + batch_norm__537, + batch_norm__538, + batch_norm__539, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_93, + parameter_298, + parameter_297, + parameter_296, + parameter_295, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_93, parameter_295, parameter_296, parameter_297, parameter_298 + + # pd_op.swish: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32) + swish_68 = paddle._C_ops.swish(batch_norm__534) + del batch_norm__534 + + # pd_op.conv2d: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32, 192x192x3x3xf32) + conv2d_94 = paddle._C_ops.conv2d( + swish_68, parameter_294, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_294 + + # pd_op.batch_norm_: (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__540, + batch_norm__541, + batch_norm__542, + batch_norm__543, + batch_norm__544, + batch_norm__545, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_94, + parameter_293, + parameter_292, + parameter_291, + parameter_290, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_94, parameter_290, parameter_291, parameter_292, parameter_293 + + # pd_op.conv2d: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32, 192x192x1x1xf32) + conv2d_95 = paddle._C_ops.conv2d( + swish_68, parameter_289, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_289, swish_68 + + # pd_op.batch_norm_: (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__546, + batch_norm__547, + batch_norm__548, + batch_norm__549, + batch_norm__550, + batch_norm__551, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_95, + parameter_288, + parameter_287, + parameter_286, + parameter_285, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_95, parameter_285, parameter_286, parameter_287, parameter_288 + + # pd_op.add: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32, -1x192x-1x-1xf32) + add_79 = paddle._C_ops.add(batch_norm__540, batch_norm__546) + del batch_norm__540, batch_norm__546 + + # pd_op.swish: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32) + swish_69 = paddle._C_ops.swish(add_79) + del add_79 + + # pd_op.conv2d: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32, 192x192x3x3xf32) + conv2d_96 = paddle._C_ops.conv2d( + swish_69, parameter_284, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_284, swish_69 + + # pd_op.batch_norm_: (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__552, + batch_norm__553, + batch_norm__554, + batch_norm__555, + batch_norm__556, + batch_norm__557, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_96, + parameter_283, + parameter_282, + parameter_281, + parameter_280, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_96, parameter_280, parameter_281, parameter_282, parameter_283 + + # pd_op.swish: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32) + swish_70 = paddle._C_ops.swish(batch_norm__552) + del batch_norm__552 + + # pd_op.conv2d: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32, 192x192x3x3xf32) + conv2d_97 = paddle._C_ops.conv2d( + swish_70, parameter_279, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_279 + + # pd_op.batch_norm_: (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__558, + batch_norm__559, + batch_norm__560, + batch_norm__561, + batch_norm__562, + batch_norm__563, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_97, + parameter_278, + parameter_277, + parameter_276, + parameter_275, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_97, parameter_275, parameter_276, parameter_277, parameter_278 + + # pd_op.conv2d: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32, 192x192x1x1xf32) + conv2d_98 = paddle._C_ops.conv2d( + swish_70, parameter_274, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_274, swish_70 + + # pd_op.batch_norm_: (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__564, + batch_norm__565, + batch_norm__566, + batch_norm__567, + batch_norm__568, + batch_norm__569, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_98, + parameter_273, + parameter_272, + parameter_271, + parameter_270, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_98, parameter_270, parameter_271, parameter_272, parameter_273 + + # pd_op.add: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32, -1x192x-1x-1xf32) + add_80 = paddle._C_ops.add(batch_norm__558, batch_norm__564) + del batch_norm__558, batch_norm__564 + + # pd_op.swish: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32) + swish_71 = paddle._C_ops.swish(add_80) + del add_80 + + # pd_op.conv2d: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32, 192x192x3x3xf32) + conv2d_99 = paddle._C_ops.conv2d( + swish_71, parameter_269, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_269, swish_71 + + # pd_op.batch_norm_: (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__570, + batch_norm__571, + batch_norm__572, + batch_norm__573, + batch_norm__574, + batch_norm__575, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_99, + parameter_268, + parameter_267, + parameter_266, + parameter_265, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_99, parameter_265, parameter_266, parameter_267, parameter_268 + + # pd_op.swish: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32) + swish_72 = paddle._C_ops.swish(batch_norm__570) + del batch_norm__570 + + # pd_op.conv2d: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32, 192x192x3x3xf32) + conv2d_100 = paddle._C_ops.conv2d( + swish_72, parameter_264, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_264 + + # pd_op.batch_norm_: (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__576, + batch_norm__577, + batch_norm__578, + batch_norm__579, + batch_norm__580, + batch_norm__581, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_100, + parameter_263, + parameter_262, + parameter_261, + parameter_260, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_100, parameter_260, parameter_261, parameter_262, parameter_263 + + # pd_op.conv2d: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32, 192x192x1x1xf32) + conv2d_101 = paddle._C_ops.conv2d( + swish_72, parameter_259, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_259, swish_72 + + # pd_op.batch_norm_: (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__582, + batch_norm__583, + batch_norm__584, + batch_norm__585, + batch_norm__586, + batch_norm__587, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_101, + parameter_258, + parameter_257, + parameter_256, + parameter_255, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_101, parameter_255, parameter_256, parameter_257, parameter_258 + + # pd_op.add: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32, -1x192x-1x-1xf32) + add_81 = paddle._C_ops.add(batch_norm__576, batch_norm__582) + del batch_norm__576, batch_norm__582 + + # pd_op.swish: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32) + swish_73 = paddle._C_ops.swish(add_81) + del add_81 + + # builtin.combine: ([-1x192x-1x-1xf32, -1x192x-1x-1xf32]) <- (-1x192x-1x-1xf32, -1x192x-1x-1xf32) + combine_8 = [swish_66, swish_73] + del swish_66, swish_73 + + # pd_op.concat: (-1x384x-1x-1xf32) <- ([-1x192x-1x-1xf32, -1x192x-1x-1xf32], 1xi32) + concat_9 = paddle._C_ops.concat(combine_8, full_0) + del combine_8 + + # pd_op.conv2d: (-1x384x-1x-1xf32) <- (-1x384x-1x-1xf32, 384x384x1x1xf32) + conv2d_102 = paddle._C_ops.conv2d( + concat_9, parameter_254, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del concat_9, parameter_254 + + # pd_op.batch_norm_: (-1x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (-1x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__588, + batch_norm__589, + batch_norm__590, + batch_norm__591, + batch_norm__592, + batch_norm__593, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_102, + parameter_253, + parameter_252, + parameter_251, + parameter_250, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_102, parameter_250, parameter_251, parameter_252, parameter_253 + + # pd_op.swish: (-1x384x-1x-1xf32) <- (-1x384x-1x-1xf32) + swish_74 = paddle._C_ops.swish(batch_norm__588) + del batch_norm__588 + + # pd_op.conv2d: (-1x192x-1x-1xf32) <- (-1x384x-1x-1xf32, 192x384x1x1xf32) + conv2d_103 = paddle._C_ops.conv2d( + swish_74, parameter_249, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_249 + + # pd_op.batch_norm_: (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__594, + batch_norm__595, + batch_norm__596, + batch_norm__597, + batch_norm__598, + batch_norm__599, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_103, + parameter_248, + parameter_247, + parameter_246, + parameter_245, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_103, parameter_245, parameter_246, parameter_247, parameter_248 + + # pd_op.swish: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32) + swish_75 = paddle._C_ops.swish(batch_norm__594) + del batch_norm__594 + + # pd_op.nearest_interp: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32, None, None, None) + nearest_interp_1 = paddle._C_ops.nearest_interp( + swish_75, + None, + None, + None, + "NCHW", + -1, + -1, + -1, + [float("2"), float("2")], + "nearest", + False, + 0, + ) + del swish_75 + + # builtin.combine: ([-1x192x-1x-1xf32, -1x256x-1x-1xf32]) <- (-1x192x-1x-1xf32, -1x256x-1x-1xf32) + combine_9 = [nearest_interp_1, swish_28] + del nearest_interp_1, swish_28 + + # pd_op.concat: (-1x448x-1x-1xf32) <- ([-1x192x-1x-1xf32, -1x256x-1x-1xf32], 1xi32) + concat_10 = paddle._C_ops.concat(combine_9, full_0) + del combine_9 + + # pd_op.conv2d: (-1x96x-1x-1xf32) <- (-1x448x-1x-1xf32, 96x448x1x1xf32) + conv2d_104 = paddle._C_ops.conv2d( + concat_10, parameter_244, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_244 + + # pd_op.batch_norm_: (-1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (-1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__600, + batch_norm__601, + batch_norm__602, + batch_norm__603, + batch_norm__604, + batch_norm__605, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_104, + parameter_243, + parameter_242, + parameter_241, + parameter_240, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_104, parameter_240, parameter_241, parameter_242, parameter_243 + + # pd_op.swish: (-1x96x-1x-1xf32) <- (-1x96x-1x-1xf32) + swish_76 = paddle._C_ops.swish(batch_norm__600) + del batch_norm__600 + + # pd_op.conv2d: (-1x96x-1x-1xf32) <- (-1x448x-1x-1xf32, 96x448x1x1xf32) + conv2d_105 = paddle._C_ops.conv2d( + concat_10, parameter_239, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del concat_10, parameter_239 + + # pd_op.batch_norm_: (-1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (-1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__606, + batch_norm__607, + batch_norm__608, + batch_norm__609, + batch_norm__610, + batch_norm__611, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_105, + parameter_238, + parameter_237, + parameter_236, + parameter_235, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_105, parameter_235, parameter_236, parameter_237, parameter_238 + + # pd_op.swish: (-1x96x-1x-1xf32) <- (-1x96x-1x-1xf32) + swish_77 = paddle._C_ops.swish(batch_norm__606) + del batch_norm__606 + + # pd_op.conv2d: (-1x96x-1x-1xf32) <- (-1x96x-1x-1xf32, 96x96x3x3xf32) + conv2d_106 = paddle._C_ops.conv2d( + swish_77, parameter_234, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_234, swish_77 + + # pd_op.batch_norm_: (-1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (-1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__612, + batch_norm__613, + batch_norm__614, + batch_norm__615, + batch_norm__616, + batch_norm__617, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_106, + parameter_233, + parameter_232, + parameter_231, + parameter_230, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_106, parameter_230, parameter_231, parameter_232, parameter_233 + + # pd_op.swish: (-1x96x-1x-1xf32) <- (-1x96x-1x-1xf32) + swish_78 = paddle._C_ops.swish(batch_norm__612) + del batch_norm__612 + + # pd_op.conv2d: (-1x96x-1x-1xf32) <- (-1x96x-1x-1xf32, 96x96x3x3xf32) + conv2d_107 = paddle._C_ops.conv2d( + swish_78, parameter_229, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_229 + + # pd_op.batch_norm_: (-1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (-1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__618, + batch_norm__619, + batch_norm__620, + batch_norm__621, + batch_norm__622, + batch_norm__623, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_107, + parameter_228, + parameter_227, + parameter_226, + parameter_225, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_107, parameter_225, parameter_226, parameter_227, parameter_228 + + # pd_op.conv2d: (-1x96x-1x-1xf32) <- (-1x96x-1x-1xf32, 96x96x1x1xf32) + conv2d_108 = paddle._C_ops.conv2d( + swish_78, parameter_224, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_224, swish_78 + + # pd_op.batch_norm_: (-1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (-1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__624, + batch_norm__625, + batch_norm__626, + batch_norm__627, + batch_norm__628, + batch_norm__629, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_108, + parameter_223, + parameter_222, + parameter_221, + parameter_220, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_108, parameter_220, parameter_221, parameter_222, parameter_223 + + # pd_op.add: (-1x96x-1x-1xf32) <- (-1x96x-1x-1xf32, -1x96x-1x-1xf32) + add_82 = paddle._C_ops.add(batch_norm__618, batch_norm__624) + del batch_norm__618, batch_norm__624 + + # pd_op.swish: (-1x96x-1x-1xf32) <- (-1x96x-1x-1xf32) + swish_79 = paddle._C_ops.swish(add_82) + del add_82 + + # pd_op.conv2d: (-1x96x-1x-1xf32) <- (-1x96x-1x-1xf32, 96x96x3x3xf32) + conv2d_109 = paddle._C_ops.conv2d( + swish_79, parameter_219, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_219, swish_79 + + # pd_op.batch_norm_: (-1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (-1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__630, + batch_norm__631, + batch_norm__632, + batch_norm__633, + batch_norm__634, + batch_norm__635, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_109, + parameter_218, + parameter_217, + parameter_216, + parameter_215, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_109, parameter_215, parameter_216, parameter_217, parameter_218 + + # pd_op.swish: (-1x96x-1x-1xf32) <- (-1x96x-1x-1xf32) + swish_80 = paddle._C_ops.swish(batch_norm__630) + del batch_norm__630 + + # pd_op.conv2d: (-1x96x-1x-1xf32) <- (-1x96x-1x-1xf32, 96x96x3x3xf32) + conv2d_110 = paddle._C_ops.conv2d( + swish_80, parameter_214, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_214 + + # pd_op.batch_norm_: (-1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (-1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__636, + batch_norm__637, + batch_norm__638, + batch_norm__639, + batch_norm__640, + batch_norm__641, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_110, + parameter_213, + parameter_212, + parameter_211, + parameter_210, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_110, parameter_210, parameter_211, parameter_212, parameter_213 + + # pd_op.conv2d: (-1x96x-1x-1xf32) <- (-1x96x-1x-1xf32, 96x96x1x1xf32) + conv2d_111 = paddle._C_ops.conv2d( + swish_80, parameter_209, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_209, swish_80 + + # pd_op.batch_norm_: (-1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (-1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__642, + batch_norm__643, + batch_norm__644, + batch_norm__645, + batch_norm__646, + batch_norm__647, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_111, + parameter_208, + parameter_207, + parameter_206, + parameter_205, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_111, parameter_205, parameter_206, parameter_207, parameter_208 + + # pd_op.add: (-1x96x-1x-1xf32) <- (-1x96x-1x-1xf32, -1x96x-1x-1xf32) + add_83 = paddle._C_ops.add(batch_norm__636, batch_norm__642) + del batch_norm__636, batch_norm__642 + + # pd_op.swish: (-1x96x-1x-1xf32) <- (-1x96x-1x-1xf32) + swish_81 = paddle._C_ops.swish(add_83) + del add_83 + + # pd_op.conv2d: (-1x96x-1x-1xf32) <- (-1x96x-1x-1xf32, 96x96x3x3xf32) + conv2d_112 = paddle._C_ops.conv2d( + swish_81, parameter_204, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_204, swish_81 + + # pd_op.batch_norm_: (-1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (-1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__648, + batch_norm__649, + batch_norm__650, + batch_norm__651, + batch_norm__652, + batch_norm__653, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_112, + parameter_203, + parameter_202, + parameter_201, + parameter_200, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_112, parameter_200, parameter_201, parameter_202, parameter_203 + + # pd_op.swish: (-1x96x-1x-1xf32) <- (-1x96x-1x-1xf32) + swish_82 = paddle._C_ops.swish(batch_norm__648) + del batch_norm__648 + + # pd_op.conv2d: (-1x96x-1x-1xf32) <- (-1x96x-1x-1xf32, 96x96x3x3xf32) + conv2d_113 = paddle._C_ops.conv2d( + swish_82, parameter_199, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_199 + + # pd_op.batch_norm_: (-1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (-1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__654, + batch_norm__655, + batch_norm__656, + batch_norm__657, + batch_norm__658, + batch_norm__659, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_113, + parameter_198, + parameter_197, + parameter_196, + parameter_195, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_113, parameter_195, parameter_196, parameter_197, parameter_198 + + # pd_op.conv2d: (-1x96x-1x-1xf32) <- (-1x96x-1x-1xf32, 96x96x1x1xf32) + conv2d_114 = paddle._C_ops.conv2d( + swish_82, parameter_194, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_194, swish_82 + + # pd_op.batch_norm_: (-1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (-1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__660, + batch_norm__661, + batch_norm__662, + batch_norm__663, + batch_norm__664, + batch_norm__665, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_114, + parameter_193, + parameter_192, + parameter_191, + parameter_190, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_114, parameter_190, parameter_191, parameter_192, parameter_193 + + # pd_op.add: (-1x96x-1x-1xf32) <- (-1x96x-1x-1xf32, -1x96x-1x-1xf32) + add_84 = paddle._C_ops.add(batch_norm__654, batch_norm__660) + del batch_norm__654, batch_norm__660 + + # pd_op.swish: (-1x96x-1x-1xf32) <- (-1x96x-1x-1xf32) + swish_83 = paddle._C_ops.swish(add_84) + del add_84 + + # builtin.combine: ([-1x96x-1x-1xf32, -1x96x-1x-1xf32]) <- (-1x96x-1x-1xf32, -1x96x-1x-1xf32) + combine_10 = [swish_76, swish_83] + del swish_76, swish_83 + + # pd_op.concat: (-1x192x-1x-1xf32) <- ([-1x96x-1x-1xf32, -1x96x-1x-1xf32], 1xi32) + concat_11 = paddle._C_ops.concat(combine_10, full_0) + del combine_10 + + # pd_op.conv2d: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32, 192x192x1x1xf32) + conv2d_115 = paddle._C_ops.conv2d( + concat_11, parameter_189, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del concat_11, parameter_189 + + # pd_op.batch_norm_: (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__666, + batch_norm__667, + batch_norm__668, + batch_norm__669, + batch_norm__670, + batch_norm__671, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_115, + parameter_188, + parameter_187, + parameter_186, + parameter_185, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_115, parameter_185, parameter_186, parameter_187, parameter_188 + + # pd_op.swish: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32) + swish_84 = paddle._C_ops.swish(batch_norm__666) + del batch_norm__666 + + # pd_op.conv2d: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32, 192x192x3x3xf32) + conv2d_116 = paddle._C_ops.conv2d( + swish_84, parameter_184, [2, 2], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_184 + + # pd_op.batch_norm_: (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__672, + batch_norm__673, + batch_norm__674, + batch_norm__675, + batch_norm__676, + batch_norm__677, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_116, + parameter_183, + parameter_182, + parameter_181, + parameter_180, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_116, parameter_180, parameter_181, parameter_182, parameter_183 + + # pd_op.swish: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32) + swish_85 = paddle._C_ops.swish(batch_norm__672) + del batch_norm__672 + + # builtin.combine: ([-1x192x-1x-1xf32, -1x384x-1x-1xf32]) <- (-1x192x-1x-1xf32, -1x384x-1x-1xf32) + combine_11 = [swish_85, swish_74] + del swish_74, swish_85 + + # pd_op.concat: (-1x576x-1x-1xf32) <- ([-1x192x-1x-1xf32, -1x384x-1x-1xf32], 1xi32) + concat_12 = paddle._C_ops.concat(combine_11, full_0) + del combine_11 + + # pd_op.conv2d: (-1x192x-1x-1xf32) <- (-1x576x-1x-1xf32, 192x576x1x1xf32) + conv2d_117 = paddle._C_ops.conv2d( + concat_12, parameter_179, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_179 + + # pd_op.batch_norm_: (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__678, + batch_norm__679, + batch_norm__680, + batch_norm__681, + batch_norm__682, + batch_norm__683, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_117, + parameter_178, + parameter_177, + parameter_176, + parameter_175, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_117, parameter_175, parameter_176, parameter_177, parameter_178 + + # pd_op.swish: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32) + swish_86 = paddle._C_ops.swish(batch_norm__678) + del batch_norm__678 + + # pd_op.conv2d: (-1x192x-1x-1xf32) <- (-1x576x-1x-1xf32, 192x576x1x1xf32) + conv2d_118 = paddle._C_ops.conv2d( + concat_12, parameter_174, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del concat_12, parameter_174 + + # pd_op.batch_norm_: (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__684, + batch_norm__685, + batch_norm__686, + batch_norm__687, + batch_norm__688, + batch_norm__689, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_118, + parameter_173, + parameter_172, + parameter_171, + parameter_170, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_118, parameter_170, parameter_171, parameter_172, parameter_173 + + # pd_op.swish: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32) + swish_87 = paddle._C_ops.swish(batch_norm__684) + del batch_norm__684 + + # pd_op.conv2d: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32, 192x192x3x3xf32) + conv2d_119 = paddle._C_ops.conv2d( + swish_87, parameter_169, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_169, swish_87 + + # pd_op.batch_norm_: (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__690, + batch_norm__691, + batch_norm__692, + batch_norm__693, + batch_norm__694, + batch_norm__695, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_119, + parameter_168, + parameter_167, + parameter_166, + parameter_165, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_119, parameter_165, parameter_166, parameter_167, parameter_168 + + # pd_op.swish: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32) + swish_88 = paddle._C_ops.swish(batch_norm__690) + del batch_norm__690 + + # pd_op.conv2d: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32, 192x192x3x3xf32) + conv2d_120 = paddle._C_ops.conv2d( + swish_88, parameter_164, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_164 + + # pd_op.batch_norm_: (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__696, + batch_norm__697, + batch_norm__698, + batch_norm__699, + batch_norm__700, + batch_norm__701, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_120, + parameter_163, + parameter_162, + parameter_161, + parameter_160, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_120, parameter_160, parameter_161, parameter_162, parameter_163 + + # pd_op.conv2d: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32, 192x192x1x1xf32) + conv2d_121 = paddle._C_ops.conv2d( + swish_88, parameter_159, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_159, swish_88 + + # pd_op.batch_norm_: (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__702, + batch_norm__703, + batch_norm__704, + batch_norm__705, + batch_norm__706, + batch_norm__707, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_121, + parameter_158, + parameter_157, + parameter_156, + parameter_155, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_121, parameter_155, parameter_156, parameter_157, parameter_158 + + # pd_op.add: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32, -1x192x-1x-1xf32) + add_85 = paddle._C_ops.add(batch_norm__696, batch_norm__702) + del batch_norm__696, batch_norm__702 + + # pd_op.swish: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32) + swish_89 = paddle._C_ops.swish(add_85) + del add_85 + + # pd_op.conv2d: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32, 192x192x3x3xf32) + conv2d_122 = paddle._C_ops.conv2d( + swish_89, parameter_154, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_154, swish_89 + + # pd_op.batch_norm_: (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__708, + batch_norm__709, + batch_norm__710, + batch_norm__711, + batch_norm__712, + batch_norm__713, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_122, + parameter_153, + parameter_152, + parameter_151, + parameter_150, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_122, parameter_150, parameter_151, parameter_152, parameter_153 + + # pd_op.swish: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32) + swish_90 = paddle._C_ops.swish(batch_norm__708) + del batch_norm__708 + + # pd_op.conv2d: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32, 192x192x3x3xf32) + conv2d_123 = paddle._C_ops.conv2d( + swish_90, parameter_149, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_149 + + # pd_op.batch_norm_: (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__714, + batch_norm__715, + batch_norm__716, + batch_norm__717, + batch_norm__718, + batch_norm__719, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_123, + parameter_148, + parameter_147, + parameter_146, + parameter_145, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_123, parameter_145, parameter_146, parameter_147, parameter_148 + + # pd_op.conv2d: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32, 192x192x1x1xf32) + conv2d_124 = paddle._C_ops.conv2d( + swish_90, parameter_144, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_144, swish_90 + + # pd_op.batch_norm_: (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__720, + batch_norm__721, + batch_norm__722, + batch_norm__723, + batch_norm__724, + batch_norm__725, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_124, + parameter_143, + parameter_142, + parameter_141, + parameter_140, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_124, parameter_140, parameter_141, parameter_142, parameter_143 + + # pd_op.add: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32, -1x192x-1x-1xf32) + add_86 = paddle._C_ops.add(batch_norm__714, batch_norm__720) + del batch_norm__714, batch_norm__720 + + # pd_op.swish: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32) + swish_91 = paddle._C_ops.swish(add_86) + del add_86 + + # pd_op.conv2d: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32, 192x192x3x3xf32) + conv2d_125 = paddle._C_ops.conv2d( + swish_91, parameter_139, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_139, swish_91 + + # pd_op.batch_norm_: (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__726, + batch_norm__727, + batch_norm__728, + batch_norm__729, + batch_norm__730, + batch_norm__731, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_125, + parameter_138, + parameter_137, + parameter_136, + parameter_135, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_125, parameter_135, parameter_136, parameter_137, parameter_138 + + # pd_op.swish: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32) + swish_92 = paddle._C_ops.swish(batch_norm__726) + del batch_norm__726 + + # pd_op.conv2d: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32, 192x192x3x3xf32) + conv2d_126 = paddle._C_ops.conv2d( + swish_92, parameter_134, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_134 + + # pd_op.batch_norm_: (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__732, + batch_norm__733, + batch_norm__734, + batch_norm__735, + batch_norm__736, + batch_norm__737, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_126, + parameter_133, + parameter_132, + parameter_131, + parameter_130, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_126, parameter_130, parameter_131, parameter_132, parameter_133 + + # pd_op.conv2d: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32, 192x192x1x1xf32) + conv2d_127 = paddle._C_ops.conv2d( + swish_92, parameter_129, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_129, swish_92 + + # pd_op.batch_norm_: (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__738, + batch_norm__739, + batch_norm__740, + batch_norm__741, + batch_norm__742, + batch_norm__743, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_127, + parameter_128, + parameter_127, + parameter_126, + parameter_125, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_127, parameter_125, parameter_126, parameter_127, parameter_128 + + # pd_op.add: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32, -1x192x-1x-1xf32) + add_87 = paddle._C_ops.add(batch_norm__732, batch_norm__738) + del batch_norm__732, batch_norm__738 + + # pd_op.swish: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32) + swish_93 = paddle._C_ops.swish(add_87) + del add_87 + + # builtin.combine: ([-1x192x-1x-1xf32, -1x192x-1x-1xf32]) <- (-1x192x-1x-1xf32, -1x192x-1x-1xf32) + combine_12 = [swish_86, swish_93] + del swish_86, swish_93 + + # pd_op.concat: (-1x384x-1x-1xf32) <- ([-1x192x-1x-1xf32, -1x192x-1x-1xf32], 1xi32) + concat_13 = paddle._C_ops.concat(combine_12, full_0) + del combine_12 + + # pd_op.conv2d: (-1x384x-1x-1xf32) <- (-1x384x-1x-1xf32, 384x384x1x1xf32) + conv2d_128 = paddle._C_ops.conv2d( + concat_13, parameter_124, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del concat_13, parameter_124 + + # pd_op.batch_norm_: (-1x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (-1x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__744, + batch_norm__745, + batch_norm__746, + batch_norm__747, + batch_norm__748, + batch_norm__749, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_128, + parameter_123, + parameter_122, + parameter_121, + parameter_120, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_128, parameter_120, parameter_121, parameter_122, parameter_123 + + # pd_op.swish: (-1x384x-1x-1xf32) <- (-1x384x-1x-1xf32) + swish_94 = paddle._C_ops.swish(batch_norm__744) + del batch_norm__744 + + # pd_op.conv2d: (-1x384x-1x-1xf32) <- (-1x384x-1x-1xf32, 384x384x3x3xf32) + conv2d_129 = paddle._C_ops.conv2d( + swish_94, parameter_119, [2, 2], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_119 + + # pd_op.batch_norm_: (-1x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (-1x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__750, + batch_norm__751, + batch_norm__752, + batch_norm__753, + batch_norm__754, + batch_norm__755, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_129, + parameter_118, + parameter_117, + parameter_116, + parameter_115, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_129, parameter_115, parameter_116, parameter_117, parameter_118 + + # pd_op.swish: (-1x384x-1x-1xf32) <- (-1x384x-1x-1xf32) + swish_95 = paddle._C_ops.swish(batch_norm__750) + del batch_norm__750 + + # builtin.combine: ([-1x384x-1x-1xf32, -1x768x-1x-1xf32]) <- (-1x384x-1x-1xf32, -1x768x-1x-1xf32) + combine_13 = [swish_95, swish_64] + del swish_64, swish_95 + + # pd_op.concat: (-1x1152x-1x-1xf32) <- ([-1x384x-1x-1xf32, -1x768x-1x-1xf32], 1xi32) + concat_14 = paddle._C_ops.concat(combine_13, full_0) + del combine_13 + + # pd_op.conv2d: (-1x384x-1x-1xf32) <- (-1x1152x-1x-1xf32, 384x1152x1x1xf32) + conv2d_130 = paddle._C_ops.conv2d( + concat_14, parameter_114, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_114 + + # pd_op.batch_norm_: (-1x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (-1x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__756, + batch_norm__757, + batch_norm__758, + batch_norm__759, + batch_norm__760, + batch_norm__761, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_130, + parameter_113, + parameter_112, + parameter_111, + parameter_110, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_130, parameter_110, parameter_111, parameter_112, parameter_113 + + # pd_op.swish: (-1x384x-1x-1xf32) <- (-1x384x-1x-1xf32) + swish_96 = paddle._C_ops.swish(batch_norm__756) + del batch_norm__756 + + # pd_op.conv2d: (-1x384x-1x-1xf32) <- (-1x1152x-1x-1xf32, 384x1152x1x1xf32) + conv2d_131 = paddle._C_ops.conv2d( + concat_14, parameter_109, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del concat_14, parameter_109 + + # pd_op.batch_norm_: (-1x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (-1x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__762, + batch_norm__763, + batch_norm__764, + batch_norm__765, + batch_norm__766, + batch_norm__767, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_131, + parameter_108, + parameter_107, + parameter_106, + parameter_105, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_131, parameter_105, parameter_106, parameter_107, parameter_108 + + # pd_op.swish: (-1x384x-1x-1xf32) <- (-1x384x-1x-1xf32) + swish_97 = paddle._C_ops.swish(batch_norm__762) + del batch_norm__762 + + # pd_op.conv2d: (-1x384x-1x-1xf32) <- (-1x384x-1x-1xf32, 384x384x3x3xf32) + conv2d_132 = paddle._C_ops.conv2d( + swish_97, parameter_104, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_104, swish_97 + + # pd_op.batch_norm_: (-1x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (-1x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__768, + batch_norm__769, + batch_norm__770, + batch_norm__771, + batch_norm__772, + batch_norm__773, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_132, + parameter_103, + parameter_102, + parameter_101, + parameter_100, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_132, parameter_100, parameter_101, parameter_102, parameter_103 + + # pd_op.swish: (-1x384x-1x-1xf32) <- (-1x384x-1x-1xf32) + swish_98 = paddle._C_ops.swish(batch_norm__768) + del batch_norm__768 + + # pd_op.conv2d: (-1x384x-1x-1xf32) <- (-1x384x-1x-1xf32, 384x384x3x3xf32) + conv2d_133 = paddle._C_ops.conv2d( + swish_98, parameter_99, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_99 + + # pd_op.batch_norm_: (-1x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (-1x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__774, + batch_norm__775, + batch_norm__776, + batch_norm__777, + batch_norm__778, + batch_norm__779, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_133, + parameter_98, + parameter_97, + parameter_96, + parameter_95, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_133, parameter_95, parameter_96, parameter_97, parameter_98 + + # pd_op.conv2d: (-1x384x-1x-1xf32) <- (-1x384x-1x-1xf32, 384x384x1x1xf32) + conv2d_134 = paddle._C_ops.conv2d( + swish_98, parameter_94, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_94, swish_98 + + # pd_op.batch_norm_: (-1x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (-1x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__780, + batch_norm__781, + batch_norm__782, + batch_norm__783, + batch_norm__784, + batch_norm__785, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_134, + parameter_93, + parameter_92, + parameter_91, + parameter_90, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_134, parameter_90, parameter_91, parameter_92, parameter_93 + + # pd_op.add: (-1x384x-1x-1xf32) <- (-1x384x-1x-1xf32, -1x384x-1x-1xf32) + add_88 = paddle._C_ops.add(batch_norm__774, batch_norm__780) + del batch_norm__774, batch_norm__780 + + # pd_op.swish: (-1x384x-1x-1xf32) <- (-1x384x-1x-1xf32) + swish_99 = paddle._C_ops.swish(add_88) + del add_88 + + # pd_op.conv2d: (-1x384x-1x-1xf32) <- (-1x384x-1x-1xf32, 384x384x3x3xf32) + conv2d_135 = paddle._C_ops.conv2d( + swish_99, parameter_89, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_89, swish_99 + + # pd_op.batch_norm_: (-1x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (-1x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__786, + batch_norm__787, + batch_norm__788, + batch_norm__789, + batch_norm__790, + batch_norm__791, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_135, + parameter_88, + parameter_87, + parameter_86, + parameter_85, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_135, parameter_85, parameter_86, parameter_87, parameter_88 + + # pd_op.swish: (-1x384x-1x-1xf32) <- (-1x384x-1x-1xf32) + swish_100 = paddle._C_ops.swish(batch_norm__786) + del batch_norm__786 + + # pd_op.conv2d: (-1x384x-1x-1xf32) <- (-1x384x-1x-1xf32, 384x384x3x3xf32) + conv2d_136 = paddle._C_ops.conv2d( + swish_100, parameter_84, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_84 + + # pd_op.batch_norm_: (-1x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (-1x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__792, + batch_norm__793, + batch_norm__794, + batch_norm__795, + batch_norm__796, + batch_norm__797, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_136, + parameter_83, + parameter_82, + parameter_81, + parameter_80, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_136, parameter_80, parameter_81, parameter_82, parameter_83 + + # pd_op.conv2d: (-1x384x-1x-1xf32) <- (-1x384x-1x-1xf32, 384x384x1x1xf32) + conv2d_137 = paddle._C_ops.conv2d( + swish_100, parameter_79, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_79, swish_100 + + # pd_op.batch_norm_: (-1x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (-1x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__798, + batch_norm__799, + batch_norm__800, + batch_norm__801, + batch_norm__802, + batch_norm__803, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_137, + parameter_78, + parameter_77, + parameter_76, + parameter_75, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_137, parameter_75, parameter_76, parameter_77, parameter_78 + + # pd_op.add: (-1x384x-1x-1xf32) <- (-1x384x-1x-1xf32, -1x384x-1x-1xf32) + add_89 = paddle._C_ops.add(batch_norm__792, batch_norm__798) + del batch_norm__792, batch_norm__798 + + # pd_op.swish: (-1x384x-1x-1xf32) <- (-1x384x-1x-1xf32) + swish_101 = paddle._C_ops.swish(add_89) + del add_89 + + # pd_op.conv2d: (-1x384x-1x-1xf32) <- (-1x384x-1x-1xf32, 384x384x3x3xf32) + conv2d_138 = paddle._C_ops.conv2d( + swish_101, parameter_74, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_74, swish_101 + + # pd_op.batch_norm_: (-1x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (-1x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__804, + batch_norm__805, + batch_norm__806, + batch_norm__807, + batch_norm__808, + batch_norm__809, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_138, + parameter_73, + parameter_72, + parameter_71, + parameter_70, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_138, parameter_70, parameter_71, parameter_72, parameter_73 + + # pd_op.swish: (-1x384x-1x-1xf32) <- (-1x384x-1x-1xf32) + swish_102 = paddle._C_ops.swish(batch_norm__804) + del batch_norm__804 + + # pd_op.conv2d: (-1x384x-1x-1xf32) <- (-1x384x-1x-1xf32, 384x384x3x3xf32) + conv2d_139 = paddle._C_ops.conv2d( + swish_102, parameter_69, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_69 + + # pd_op.batch_norm_: (-1x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (-1x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__810, + batch_norm__811, + batch_norm__812, + batch_norm__813, + batch_norm__814, + batch_norm__815, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_139, + parameter_68, + parameter_67, + parameter_66, + parameter_65, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_139, parameter_65, parameter_66, parameter_67, parameter_68 + + # pd_op.conv2d: (-1x384x-1x-1xf32) <- (-1x384x-1x-1xf32, 384x384x1x1xf32) + conv2d_140 = paddle._C_ops.conv2d( + swish_102, parameter_64, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_64, swish_102 + + # pd_op.batch_norm_: (-1x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (-1x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__816, + batch_norm__817, + batch_norm__818, + batch_norm__819, + batch_norm__820, + batch_norm__821, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_140, + parameter_63, + parameter_62, + parameter_61, + parameter_60, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_140, parameter_60, parameter_61, parameter_62, parameter_63 + + # pd_op.add: (-1x384x-1x-1xf32) <- (-1x384x-1x-1xf32, -1x384x-1x-1xf32) + add_90 = paddle._C_ops.add(batch_norm__810, batch_norm__816) + del batch_norm__810, batch_norm__816 + + # pd_op.swish: (-1x384x-1x-1xf32) <- (-1x384x-1x-1xf32) + swish_103 = paddle._C_ops.swish(add_90) + del add_90 + + # builtin.combine: ([-1x384x-1x-1xf32, -1x384x-1x-1xf32]) <- (-1x384x-1x-1xf32, -1x384x-1x-1xf32) + combine_14 = [swish_96, swish_103] + del swish_103, swish_96 + + # pd_op.concat: (-1x768x-1x-1xf32) <- ([-1x384x-1x-1xf32, -1x384x-1x-1xf32], 1xi32) + concat_15 = paddle._C_ops.concat(combine_14, full_0) + del combine_14 + + # pd_op.conv2d: (-1x768x-1x-1xf32) <- (-1x768x-1x-1xf32, 768x768x1x1xf32) + conv2d_141 = paddle._C_ops.conv2d( + concat_15, parameter_59, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del concat_15, parameter_59 + + # pd_op.batch_norm_: (-1x768x-1x-1xf32, 768xf32, 768xf32, 768xf32, 768xf32, -1xui8) <- (-1x768x-1x-1xf32, 768xf32, 768xf32, 768xf32, 768xf32) + ( + batch_norm__822, + batch_norm__823, + batch_norm__824, + batch_norm__825, + batch_norm__826, + batch_norm__827, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_141, + parameter_58, + parameter_57, + parameter_56, + parameter_55, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_141, parameter_55, parameter_56, parameter_57, parameter_58 + + # pd_op.swish: (-1x768x-1x-1xf32) <- (-1x768x-1x-1xf32) + swish_104 = paddle._C_ops.swish(batch_norm__822) + del batch_norm__822 + + # pd_op.shape64: (4xi64) <- (-1x768x-1x-1xf32) + shape64_7 = paddle._C_ops.shape64(swish_104) + + # pd_op.slice: (xi64) <- (4xi64, 1xi64, 1xi64) + slice_31 = paddle._C_ops.slice( + shape64_7, [0], full_int_array_2, full_int_array_3, [1], [0] + ) + del shape64_7 + + # pd_op.shape64: (4xi64) <- (-1x768x-1x-1xf32) + shape64_8 = paddle._C_ops.shape64(swish_104) + + # pd_op.slice: (xi64) <- (4xi64, 1xi64, 1xi64) + slice_32 = paddle._C_ops.slice( + shape64_8, [0], full_int_array_4, full_int_array_5, [1], [0] + ) + del shape64_8 + + # pd_op.shape64: (4xi64) <- (-1x768x-1x-1xf32) + shape64_9 = paddle._C_ops.shape64(swish_104) + + # pd_op.slice: (xi64) <- (4xi64, 1xi64, 1xi64) + slice_33 = paddle._C_ops.slice( + shape64_9, [0], full_int_array_5, full_int_array_6, [1], [0] + ) + del shape64_9 + + # pd_op.multiply: (xi64) <- (xi64, xi64) + multiply_22 = paddle._C_ops.multiply(slice_32, slice_33) + del slice_32, slice_33 + + # pd_op.full_int_array: (2xi64) <- () + full_int_array_15 = [1, 1] + + # pd_op.pool2d: (-1x768x1x1xf32) <- (-1x768x-1x-1xf32, 2xi64) + pool2d_3 = paddle._C_ops.pool2d( + swish_104, + full_int_array_15, + [1, 1], + [0, 0], + False, + True, + "NCHW", + "avg", + False, + True, + "EXPLICIT", + ) + + # pd_op.conv2d: (-1x768x1x1xf32) <- (-1x768x1x1xf32, 768x768x1x1xf32) + conv2d_142 = paddle._C_ops.conv2d( + pool2d_3, parameter_54, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_54 + + # pd_op.reshape: (1x768x1x1xf32) <- (768xf32, 4xi64) + reshape_21 = paddle._C_ops.reshape(parameter_53, full_int_array_1) + del parameter_53 + + # pd_op.add: (-1x768x1x1xf32) <- (-1x768x1x1xf32, 1x768x1x1xf32) + add_91 = paddle._C_ops.add(conv2d_142, reshape_21) + del conv2d_142, reshape_21 + + # pd_op.sigmoid: (-1x768x1x1xf32) <- (-1x768x1x1xf32) + sigmoid_0 = paddle._C_ops.sigmoid(add_91) + del add_91 + + # pd_op.multiply: (-1x768x-1x-1xf32) <- (-1x768x-1x-1xf32, -1x768x1x1xf32) + multiply_23 = paddle._C_ops.multiply(swish_104, sigmoid_0) + del sigmoid_0 + + # pd_op.conv2d: (-1x768x-1x-1xf32) <- (-1x768x-1x-1xf32, 768x768x1x1xf32) + conv2d_143 = paddle._C_ops.conv2d( + multiply_23, parameter_52, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del multiply_23, parameter_52 + + # pd_op.batch_norm_: (-1x768x-1x-1xf32, 768xf32, 768xf32, 768xf32, 768xf32, -1xui8) <- (-1x768x-1x-1xf32, 768xf32, 768xf32, 768xf32, 768xf32) + ( + batch_norm__828, + batch_norm__829, + batch_norm__830, + batch_norm__831, + batch_norm__832, + batch_norm__833, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_143, + parameter_51, + parameter_50, + parameter_49, + parameter_48, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_143, parameter_48, parameter_49, parameter_50, parameter_51 + + # pd_op.swish: (-1x768x-1x-1xf32) <- (-1x768x-1x-1xf32) + swish_105 = paddle._C_ops.swish(batch_norm__828) + del batch_norm__828 + + # pd_op.add: (-1x768x-1x-1xf32) <- (-1x768x-1x-1xf32, -1x768x-1x-1xf32) + add_92 = paddle._C_ops.add(swish_105, swish_104) + del swish_105 + + # pd_op.conv2d: (-1x10x-1x-1xf32) <- (-1x768x-1x-1xf32, 10x768x3x3xf32) + conv2d_144 = paddle._C_ops.conv2d( + add_92, parameter_47, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del add_92, parameter_47 + + # pd_op.reshape: (1x10x1x1xf32) <- (10xf32, 4xi64) + reshape_22 = paddle._C_ops.reshape(parameter_46, full_int_array_1) + del parameter_46 + + # pd_op.add: (-1x10x-1x-1xf32) <- (-1x10x-1x-1xf32, 1x10x1x1xf32) + add_93 = paddle._C_ops.add(conv2d_144, reshape_22) + del conv2d_144, reshape_22 + + # pd_op.conv2d: (-1x768x1x1xf32) <- (-1x768x1x1xf32, 768x768x1x1xf32) + conv2d_145 = paddle._C_ops.conv2d( + pool2d_3, parameter_45, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_45, pool2d_3 + + # pd_op.reshape: (1x768x1x1xf32) <- (768xf32, 4xi64) + reshape_23 = paddle._C_ops.reshape(parameter_44, full_int_array_1) + del parameter_44 + + # pd_op.add: (-1x768x1x1xf32) <- (-1x768x1x1xf32, 1x768x1x1xf32) + add_94 = paddle._C_ops.add(conv2d_145, reshape_23) + del conv2d_145, reshape_23 + + # pd_op.sigmoid: (-1x768x1x1xf32) <- (-1x768x1x1xf32) + sigmoid_1 = paddle._C_ops.sigmoid(add_94) + del add_94 + + # pd_op.multiply: (-1x768x-1x-1xf32) <- (-1x768x-1x-1xf32, -1x768x1x1xf32) + multiply_24 = paddle._C_ops.multiply(swish_104, sigmoid_1) + del sigmoid_1, swish_104 + + # pd_op.conv2d: (-1x768x-1x-1xf32) <- (-1x768x-1x-1xf32, 768x768x1x1xf32) + conv2d_146 = paddle._C_ops.conv2d( + multiply_24, parameter_43, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del multiply_24, parameter_43 + + # pd_op.batch_norm_: (-1x768x-1x-1xf32, 768xf32, 768xf32, 768xf32, 768xf32, -1xui8) <- (-1x768x-1x-1xf32, 768xf32, 768xf32, 768xf32, 768xf32) + ( + batch_norm__834, + batch_norm__835, + batch_norm__836, + batch_norm__837, + batch_norm__838, + batch_norm__839, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_146, + parameter_42, + parameter_41, + parameter_40, + parameter_39, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_146, parameter_39, parameter_40, parameter_41, parameter_42 + + # pd_op.swish: (-1x768x-1x-1xf32) <- (-1x768x-1x-1xf32) + swish_106 = paddle._C_ops.swish(batch_norm__834) + del batch_norm__834 + + # pd_op.conv2d: (-1x40x-1x-1xf32) <- (-1x768x-1x-1xf32, 40x768x3x3xf32) + conv2d_147 = paddle._C_ops.conv2d( + swish_106, parameter_38, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_38, swish_106 + + # pd_op.reshape: (1x40x1x1xf32) <- (40xf32, 4xi64) + reshape_24 = paddle._C_ops.reshape(parameter_37, full_int_array_1) + del parameter_37 + + # pd_op.add: (-1x40x-1x-1xf32) <- (-1x40x-1x-1xf32, 1x40x1x1xf32) + add_95 = paddle._C_ops.add(conv2d_147, reshape_24) + del conv2d_147, reshape_24 + + # pd_op.full: (xi64) <- () + full_4 = paddle._C_ops.full( + [], float("-1"), paddle.int64, paddle.core.CPUPlace() + ) + + # pd_op.full: (xi64) <- () + full_5 = paddle._C_ops.full( + [], float("4"), paddle.int64, paddle.core.CPUPlace() + ) + + # pd_op.full: (xi64) <- () + full_6 = paddle._C_ops.full( + [], float("10"), paddle.int64, paddle.core.CPUPlace() + ) + + # builtin.combine: ([xi64, xi64, xi64, xi64]) <- (xi64, xi64, xi64, xi64) + combine_15 = [full_4, full_5, full_6, multiply_22] + + # pd_op.stack: (4xi64) <- ([xi64, xi64, xi64, xi64]) + stack_1 = paddle._C_ops.stack(combine_15, 0) + del combine_15 + + # pd_op.reshape: (-1x4x10x-1xf32) <- (-1x40x-1x-1xf32, 4xi64) + reshape_25 = paddle._C_ops.reshape(add_95, stack_1) + del add_95, stack_1 + + # pd_op.transpose: (-1x10x-1x4xf32) <- (-1x4x10x-1xf32) + transpose_18 = paddle._C_ops.transpose(reshape_25, [0, 2, 3, 1]) + del reshape_25 + + # pd_op.softmax: (-1x10x-1x4xf32) <- (-1x10x-1x4xf32) + softmax_4 = paddle._C_ops.softmax(transpose_18, 1) + del transpose_18 + + # pd_op.conv2d: (-1x1x-1x4xf32) <- (-1x10x-1x4xf32, 1x10x1x1xf32) + conv2d_148 = paddle._C_ops.conv2d( + softmax_4, parameter_36, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del softmax_4 + + # pd_op.squeeze: (-1x-1x4xf32) <- (-1x1x-1x4xf32, 1xi64) + squeeze_0 = paddle._C_ops.squeeze(conv2d_148, full_int_array_3) + del conv2d_148 + + # pd_op.sigmoid: (-1x10x-1x-1xf32) <- (-1x10x-1x-1xf32) + sigmoid_2 = paddle._C_ops.sigmoid(add_93) + del add_93 + + # builtin.combine: ([xi64, xi64, xi64]) <- (xi64, xi64, xi64) + combine_16 = [full_4, full_6, multiply_22] + del multiply_22 + + # pd_op.stack: (3xi64) <- ([xi64, xi64, xi64]) + stack_2 = paddle._C_ops.stack(combine_16, 0) + del combine_16 + + # pd_op.reshape: (-1x10x-1xf32) <- (-1x10x-1x-1xf32, 3xi64) + reshape_26 = paddle._C_ops.reshape(sigmoid_2, stack_2) + del sigmoid_2, stack_2 + + # pd_op.shape64: (4xi64) <- (-1x384x-1x-1xf32) + shape64_10 = paddle._C_ops.shape64(swish_94) + + # pd_op.slice: (xi64) <- (4xi64, 1xi64, 1xi64) + slice_34 = paddle._C_ops.slice( + shape64_10, [0], full_int_array_2, full_int_array_3, [1], [0] + ) + del shape64_10 + + # pd_op.shape64: (4xi64) <- (-1x384x-1x-1xf32) + shape64_11 = paddle._C_ops.shape64(swish_94) + + # pd_op.slice: (xi64) <- (4xi64, 1xi64, 1xi64) + slice_35 = paddle._C_ops.slice( + shape64_11, [0], full_int_array_4, full_int_array_5, [1], [0] + ) + del shape64_11 + + # pd_op.shape64: (4xi64) <- (-1x384x-1x-1xf32) + shape64_12 = paddle._C_ops.shape64(swish_94) + + # pd_op.slice: (xi64) <- (4xi64, 1xi64, 1xi64) + slice_36 = paddle._C_ops.slice( + shape64_12, [0], full_int_array_5, full_int_array_6, [1], [0] + ) + del shape64_12 + + # pd_op.multiply: (xi64) <- (xi64, xi64) + multiply_25 = paddle._C_ops.multiply(slice_35, slice_36) + del slice_35, slice_36 + + # pd_op.pool2d: (-1x384x1x1xf32) <- (-1x384x-1x-1xf32, 2xi64) + pool2d_4 = paddle._C_ops.pool2d( + swish_94, + full_int_array_15, + [1, 1], + [0, 0], + False, + True, + "NCHW", + "avg", + False, + True, + "EXPLICIT", + ) + + # pd_op.conv2d: (-1x384x1x1xf32) <- (-1x384x1x1xf32, 384x384x1x1xf32) + conv2d_149 = paddle._C_ops.conv2d( + pool2d_4, parameter_35, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_35 + + # pd_op.reshape: (1x384x1x1xf32) <- (384xf32, 4xi64) + reshape_27 = paddle._C_ops.reshape(parameter_34, full_int_array_1) + del parameter_34 + + # pd_op.add: (-1x384x1x1xf32) <- (-1x384x1x1xf32, 1x384x1x1xf32) + add_96 = paddle._C_ops.add(conv2d_149, reshape_27) + del conv2d_149, reshape_27 + + # pd_op.sigmoid: (-1x384x1x1xf32) <- (-1x384x1x1xf32) + sigmoid_3 = paddle._C_ops.sigmoid(add_96) + del add_96 + + # pd_op.multiply: (-1x384x-1x-1xf32) <- (-1x384x-1x-1xf32, -1x384x1x1xf32) + multiply_26 = paddle._C_ops.multiply(swish_94, sigmoid_3) + del sigmoid_3 + + # pd_op.conv2d: (-1x384x-1x-1xf32) <- (-1x384x-1x-1xf32, 384x384x1x1xf32) + conv2d_150 = paddle._C_ops.conv2d( + multiply_26, parameter_33, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del multiply_26, parameter_33 + + # pd_op.batch_norm_: (-1x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (-1x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__840, + batch_norm__841, + batch_norm__842, + batch_norm__843, + batch_norm__844, + batch_norm__845, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_150, + parameter_32, + parameter_31, + parameter_30, + parameter_29, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_150, parameter_29, parameter_30, parameter_31, parameter_32 + + # pd_op.swish: (-1x384x-1x-1xf32) <- (-1x384x-1x-1xf32) + swish_107 = paddle._C_ops.swish(batch_norm__840) + del batch_norm__840 + + # pd_op.add: (-1x384x-1x-1xf32) <- (-1x384x-1x-1xf32, -1x384x-1x-1xf32) + add_97 = paddle._C_ops.add(swish_107, swish_94) + del swish_107 + + # pd_op.conv2d: (-1x10x-1x-1xf32) <- (-1x384x-1x-1xf32, 10x384x3x3xf32) + conv2d_151 = paddle._C_ops.conv2d( + add_97, parameter_28, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del add_97, parameter_28 + + # pd_op.reshape: (1x10x1x1xf32) <- (10xf32, 4xi64) + reshape_28 = paddle._C_ops.reshape(parameter_27, full_int_array_1) + del parameter_27 + + # pd_op.add: (-1x10x-1x-1xf32) <- (-1x10x-1x-1xf32, 1x10x1x1xf32) + add_98 = paddle._C_ops.add(conv2d_151, reshape_28) + del conv2d_151, reshape_28 + + # pd_op.conv2d: (-1x384x1x1xf32) <- (-1x384x1x1xf32, 384x384x1x1xf32) + conv2d_152 = paddle._C_ops.conv2d( + pool2d_4, parameter_26, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_26, pool2d_4 + + # pd_op.reshape: (1x384x1x1xf32) <- (384xf32, 4xi64) + reshape_29 = paddle._C_ops.reshape(parameter_25, full_int_array_1) + del parameter_25 + + # pd_op.add: (-1x384x1x1xf32) <- (-1x384x1x1xf32, 1x384x1x1xf32) + add_99 = paddle._C_ops.add(conv2d_152, reshape_29) + del conv2d_152, reshape_29 + + # pd_op.sigmoid: (-1x384x1x1xf32) <- (-1x384x1x1xf32) + sigmoid_4 = paddle._C_ops.sigmoid(add_99) + del add_99 + + # pd_op.multiply: (-1x384x-1x-1xf32) <- (-1x384x-1x-1xf32, -1x384x1x1xf32) + multiply_27 = paddle._C_ops.multiply(swish_94, sigmoid_4) + del sigmoid_4, swish_94 + + # pd_op.conv2d: (-1x384x-1x-1xf32) <- (-1x384x-1x-1xf32, 384x384x1x1xf32) + conv2d_153 = paddle._C_ops.conv2d( + multiply_27, parameter_24, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del multiply_27, parameter_24 + + # pd_op.batch_norm_: (-1x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (-1x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__846, + batch_norm__847, + batch_norm__848, + batch_norm__849, + batch_norm__850, + batch_norm__851, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_153, + parameter_23, + parameter_22, + parameter_21, + parameter_20, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_153, parameter_20, parameter_21, parameter_22, parameter_23 + + # pd_op.swish: (-1x384x-1x-1xf32) <- (-1x384x-1x-1xf32) + swish_108 = paddle._C_ops.swish(batch_norm__846) + del batch_norm__846 + + # pd_op.conv2d: (-1x40x-1x-1xf32) <- (-1x384x-1x-1xf32, 40x384x3x3xf32) + conv2d_154 = paddle._C_ops.conv2d( + swish_108, parameter_19, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_19, swish_108 + + # pd_op.reshape: (1x40x1x1xf32) <- (40xf32, 4xi64) + reshape_30 = paddle._C_ops.reshape(parameter_18, full_int_array_1) + del parameter_18 + + # pd_op.add: (-1x40x-1x-1xf32) <- (-1x40x-1x-1xf32, 1x40x1x1xf32) + add_100 = paddle._C_ops.add(conv2d_154, reshape_30) + del conv2d_154, reshape_30 + + # builtin.combine: ([xi64, xi64, xi64, xi64]) <- (xi64, xi64, xi64, xi64) + combine_17 = [full_4, full_5, full_6, multiply_25] + + # pd_op.stack: (4xi64) <- ([xi64, xi64, xi64, xi64]) + stack_3 = paddle._C_ops.stack(combine_17, 0) + del combine_17 + + # pd_op.reshape: (-1x4x10x-1xf32) <- (-1x40x-1x-1xf32, 4xi64) + reshape_31 = paddle._C_ops.reshape(add_100, stack_3) + del add_100, stack_3 + + # pd_op.transpose: (-1x10x-1x4xf32) <- (-1x4x10x-1xf32) + transpose_19 = paddle._C_ops.transpose(reshape_31, [0, 2, 3, 1]) + del reshape_31 + + # pd_op.softmax: (-1x10x-1x4xf32) <- (-1x10x-1x4xf32) + softmax_5 = paddle._C_ops.softmax(transpose_19, 1) + del transpose_19 + + # pd_op.conv2d: (-1x1x-1x4xf32) <- (-1x10x-1x4xf32, 1x10x1x1xf32) + conv2d_155 = paddle._C_ops.conv2d( + softmax_5, parameter_36, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del softmax_5 + + # pd_op.squeeze: (-1x-1x4xf32) <- (-1x1x-1x4xf32, 1xi64) + squeeze_1 = paddle._C_ops.squeeze(conv2d_155, full_int_array_3) + del conv2d_155 + + # pd_op.sigmoid: (-1x10x-1x-1xf32) <- (-1x10x-1x-1xf32) + sigmoid_5 = paddle._C_ops.sigmoid(add_98) + del add_98 + + # builtin.combine: ([xi64, xi64, xi64]) <- (xi64, xi64, xi64) + combine_18 = [full_4, full_6, multiply_25] + del multiply_25 + + # pd_op.stack: (3xi64) <- ([xi64, xi64, xi64]) + stack_4 = paddle._C_ops.stack(combine_18, 0) + del combine_18 + + # pd_op.reshape: (-1x10x-1xf32) <- (-1x10x-1x-1xf32, 3xi64) + reshape_32 = paddle._C_ops.reshape(sigmoid_5, stack_4) + del sigmoid_5, stack_4 + + # pd_op.shape64: (4xi64) <- (-1x192x-1x-1xf32) + shape64_13 = paddle._C_ops.shape64(swish_84) + + # pd_op.slice: (xi64) <- (4xi64, 1xi64, 1xi64) + slice_37 = paddle._C_ops.slice( + shape64_13, [0], full_int_array_2, full_int_array_3, [1], [0] + ) + del full_int_array_2, shape64_13 + + # pd_op.shape64: (4xi64) <- (-1x192x-1x-1xf32) + shape64_14 = paddle._C_ops.shape64(swish_84) + + # pd_op.slice: (xi64) <- (4xi64, 1xi64, 1xi64) + slice_38 = paddle._C_ops.slice( + shape64_14, [0], full_int_array_4, full_int_array_5, [1], [0] + ) + del full_int_array_4, shape64_14 + + # pd_op.shape64: (4xi64) <- (-1x192x-1x-1xf32) + shape64_15 = paddle._C_ops.shape64(swish_84) + + # pd_op.slice: (xi64) <- (4xi64, 1xi64, 1xi64) + slice_39 = paddle._C_ops.slice( + shape64_15, [0], full_int_array_5, full_int_array_6, [1], [0] + ) + del full_int_array_5, full_int_array_6, shape64_15 + + # pd_op.multiply: (xi64) <- (xi64, xi64) + multiply_28 = paddle._C_ops.multiply(slice_38, slice_39) + del slice_38, slice_39 + + # pd_op.pool2d: (-1x192x1x1xf32) <- (-1x192x-1x-1xf32, 2xi64) + pool2d_5 = paddle._C_ops.pool2d( + swish_84, + full_int_array_15, + [1, 1], + [0, 0], + False, + True, + "NCHW", + "avg", + False, + True, + "EXPLICIT", + ) + del full_int_array_15 + + # pd_op.conv2d: (-1x192x1x1xf32) <- (-1x192x1x1xf32, 192x192x1x1xf32) + conv2d_156 = paddle._C_ops.conv2d( + pool2d_5, parameter_17, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_17 + + # pd_op.reshape: (1x192x1x1xf32) <- (192xf32, 4xi64) + reshape_33 = paddle._C_ops.reshape(parameter_16, full_int_array_1) + del parameter_16 + + # pd_op.add: (-1x192x1x1xf32) <- (-1x192x1x1xf32, 1x192x1x1xf32) + add_101 = paddle._C_ops.add(conv2d_156, reshape_33) + del conv2d_156, reshape_33 + + # pd_op.sigmoid: (-1x192x1x1xf32) <- (-1x192x1x1xf32) + sigmoid_6 = paddle._C_ops.sigmoid(add_101) + del add_101 + + # pd_op.multiply: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32, -1x192x1x1xf32) + multiply_29 = paddle._C_ops.multiply(swish_84, sigmoid_6) + del sigmoid_6 + + # pd_op.conv2d: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32, 192x192x1x1xf32) + conv2d_157 = paddle._C_ops.conv2d( + multiply_29, parameter_15, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del multiply_29, parameter_15 + + # pd_op.batch_norm_: (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__852, + batch_norm__853, + batch_norm__854, + batch_norm__855, + batch_norm__856, + batch_norm__857, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_157, + parameter_14, + parameter_13, + parameter_12, + parameter_11, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_157, parameter_11, parameter_12, parameter_13, parameter_14 + + # pd_op.swish: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32) + swish_109 = paddle._C_ops.swish(batch_norm__852) + del batch_norm__852 + + # pd_op.add: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32, -1x192x-1x-1xf32) + add_102 = paddle._C_ops.add(swish_109, swish_84) + del swish_109 + + # pd_op.conv2d: (-1x10x-1x-1xf32) <- (-1x192x-1x-1xf32, 10x192x3x3xf32) + conv2d_158 = paddle._C_ops.conv2d( + add_102, parameter_10, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del add_102, parameter_10 + + # pd_op.reshape: (1x10x1x1xf32) <- (10xf32, 4xi64) + reshape_34 = paddle._C_ops.reshape(parameter_9, full_int_array_1) + del parameter_9 + + # pd_op.add: (-1x10x-1x-1xf32) <- (-1x10x-1x-1xf32, 1x10x1x1xf32) + add_103 = paddle._C_ops.add(conv2d_158, reshape_34) + del conv2d_158, reshape_34 + + # pd_op.conv2d: (-1x192x1x1xf32) <- (-1x192x1x1xf32, 192x192x1x1xf32) + conv2d_159 = paddle._C_ops.conv2d( + pool2d_5, parameter_8, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_8, pool2d_5 + + # pd_op.reshape: (1x192x1x1xf32) <- (192xf32, 4xi64) + reshape_35 = paddle._C_ops.reshape(parameter_7, full_int_array_1) + del parameter_7 + + # pd_op.add: (-1x192x1x1xf32) <- (-1x192x1x1xf32, 1x192x1x1xf32) + add_104 = paddle._C_ops.add(conv2d_159, reshape_35) + del conv2d_159, reshape_35 + + # pd_op.sigmoid: (-1x192x1x1xf32) <- (-1x192x1x1xf32) + sigmoid_7 = paddle._C_ops.sigmoid(add_104) + del add_104 + + # pd_op.multiply: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32, -1x192x1x1xf32) + multiply_30 = paddle._C_ops.multiply(swish_84, sigmoid_7) + del sigmoid_7, swish_84 + + # pd_op.conv2d: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32, 192x192x1x1xf32) + conv2d_160 = paddle._C_ops.conv2d( + multiply_30, parameter_6, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del multiply_30, parameter_6 + + # pd_op.batch_norm_: (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__858, + batch_norm__859, + batch_norm__860, + batch_norm__861, + batch_norm__862, + batch_norm__863, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_160, + parameter_5, + parameter_4, + parameter_3, + parameter_2, + True, + float("0.9"), + float("1e-05"), + "NCHW", + True, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del conv2d_160, parameter_2, parameter_3, parameter_4, parameter_5 + + # pd_op.swish: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32) + swish_110 = paddle._C_ops.swish(batch_norm__858) + del batch_norm__858 + + # pd_op.conv2d: (-1x40x-1x-1xf32) <- (-1x192x-1x-1xf32, 40x192x3x3xf32) + conv2d_161 = paddle._C_ops.conv2d( + swish_110, parameter_1, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_1, swish_110 + + # pd_op.reshape: (1x40x1x1xf32) <- (40xf32, 4xi64) + reshape_36 = paddle._C_ops.reshape(parameter_0, full_int_array_1) + del full_int_array_1, parameter_0 + + # pd_op.add: (-1x40x-1x-1xf32) <- (-1x40x-1x-1xf32, 1x40x1x1xf32) + add_105 = paddle._C_ops.add(conv2d_161, reshape_36) + del conv2d_161, reshape_36 + + # builtin.combine: ([xi64, xi64, xi64, xi64]) <- (xi64, xi64, xi64, xi64) + combine_19 = [full_4, full_5, full_6, multiply_28] + del full_5 + + # pd_op.stack: (4xi64) <- ([xi64, xi64, xi64, xi64]) + stack_5 = paddle._C_ops.stack(combine_19, 0) + del combine_19 + + # pd_op.reshape: (-1x4x10x-1xf32) <- (-1x40x-1x-1xf32, 4xi64) + reshape_37 = paddle._C_ops.reshape(add_105, stack_5) + del add_105, stack_5 + + # pd_op.transpose: (-1x10x-1x4xf32) <- (-1x4x10x-1xf32) + transpose_20 = paddle._C_ops.transpose(reshape_37, [0, 2, 3, 1]) + del reshape_37 + + # pd_op.softmax: (-1x10x-1x4xf32) <- (-1x10x-1x4xf32) + softmax_6 = paddle._C_ops.softmax(transpose_20, 1) + del transpose_20 + + # pd_op.conv2d: (-1x1x-1x4xf32) <- (-1x10x-1x4xf32, 1x10x1x1xf32) + conv2d_162 = paddle._C_ops.conv2d( + softmax_6, parameter_36, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_36, softmax_6 + + # pd_op.squeeze: (-1x-1x4xf32) <- (-1x1x-1x4xf32, 1xi64) + squeeze_2 = paddle._C_ops.squeeze(conv2d_162, full_int_array_3) + del conv2d_162, full_int_array_3 + + # pd_op.sigmoid: (-1x10x-1x-1xf32) <- (-1x10x-1x-1xf32) + sigmoid_8 = paddle._C_ops.sigmoid(add_103) + del add_103 + + # builtin.combine: ([xi64, xi64, xi64]) <- (xi64, xi64, xi64) + combine_20 = [full_4, full_6, multiply_28] + del full_4, full_6, multiply_28 + + # pd_op.stack: (3xi64) <- ([xi64, xi64, xi64]) + stack_6 = paddle._C_ops.stack(combine_20, 0) + del combine_20 + + # pd_op.reshape: (-1x10x-1xf32) <- (-1x10x-1x-1xf32, 3xi64) + reshape_38 = paddle._C_ops.reshape(sigmoid_8, stack_6) + del sigmoid_8, stack_6 + + # pd_op.full: (1xi32) <- () + full_7 = paddle._C_ops.full( + [1], float("-1"), paddle.int32, paddle.core.CPUPlace() + ) + + # builtin.combine: ([-1x10x-1xf32, -1x10x-1xf32, -1x10x-1xf32]) <- (-1x10x-1xf32, -1x10x-1xf32, -1x10x-1xf32) + combine_21 = [reshape_26, reshape_32, reshape_38] + del reshape_26, reshape_32, reshape_38 + + # pd_op.concat: (-1x10x-1xf32) <- ([-1x10x-1xf32, -1x10x-1xf32, -1x10x-1xf32], 1xi32) + concat_0 = paddle._C_ops.concat(combine_21, full_7) + del combine_21, full_7 + + # builtin.combine: ([-1x-1x4xf32, -1x-1x4xf32, -1x-1x4xf32]) <- (-1x-1x4xf32, -1x-1x4xf32, -1x-1x4xf32) + combine_22 = [squeeze_0, squeeze_1, squeeze_2] + del squeeze_0, squeeze_1, squeeze_2 + + # pd_op.concat: (-1x-1x4xf32) <- ([-1x-1x4xf32, -1x-1x4xf32, -1x-1x4xf32], 1xi32) + concat_1 = paddle._C_ops.concat(combine_22, full_0) + del combine_22, full_0 + + return concat_0, concat_1 diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/subgraph_4/weight_meta.py b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/subgraph_4/weight_meta.py index f5b3eb50d..c934d8553 100644 --- a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/subgraph_4/weight_meta.py +++ b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/subgraph_4/weight_meta.py @@ -1,220 +1,214 @@ class Program_weight_tensor_parameter_0: name = "parameter_0" - shape = [1024] + shape = [40] dtype = "float32" - min_val = float("-3.7594") - max_val = float("-0.734446") - mean = float("-2.18722") - std = float("0.428724") + min_val = float("0") + max_val = float("0.5") data = None class Program_weight_tensor_parameter_1: name = "parameter_1" - shape = [1024] + shape = [40, 192, 3, 3] dtype = "float32" - min_val = float("1.61913") - max_val = float("4.44136") - mean = float("3.08039") - std = float("0.25425") + min_val = float("-0.200868") + max_val = float("0.205409") + mean = float("1.50903e-08") + std = float("0.0117333") data = None class Program_weight_tensor_parameter_2: name = "parameter_2" - shape = [1024] + shape = [192] dtype = "float32" - min_val = float("0.00437889") - max_val = float("0.0223173") - mean = float("0.00872826") - std = float("0.00173091") + min_val = float("-0.0496614") + max_val = float("0.233447") + mean = float("0.0551649") + std = float("0.0442642") data = None class Program_weight_tensor_parameter_3: name = "parameter_3" - shape = [1024] + shape = [192] dtype = "float32" - min_val = float("-0.140825") - max_val = float("0.123559") - mean = float("-0.0558655") - std = float("0.0304319") + min_val = float("0.83662") + max_val = float("1.62777") + mean = float("1.22152") + std = float("0.145383") data = None class Program_weight_tensor_parameter_4: name = "parameter_4" - shape = [1024, 768, 1, 1] + shape = [192] dtype = "float32" - min_val = float("-0.0427729") - max_val = float("0.0695573") - mean = float("-0.000391863") - std = float("0.00403905") + min_val = float("0.00517837") + max_val = float("5.37372") + mean = float("0.472226") + std = float("0.745068") data = None class Program_weight_tensor_parameter_5: name = "parameter_5" - shape = [768] + shape = [192] dtype = "float32" - min_val = float("-0.014467") - max_val = float("0.00131875") - mean = float("-0.000761015") - std = float("0.00204153") + min_val = float("-8.59248") + max_val = float("10.1229") + mean = float("0.104301") + std = float("2.84169") data = None class Program_weight_tensor_parameter_6: name = "parameter_6" - shape = [768, 768, 1, 1] + shape = [192, 192, 1, 1] dtype = "float32" - min_val = float("-0.0787519") - max_val = float("0.135878") - mean = float("-0.000282851") - std = float("0.0016268") + min_val = float("-0.10201") + max_val = float("0.139453") + mean = float("-0.000771004") + std = float("0.0121333") data = None class Program_weight_tensor_parameter_7: name = "parameter_7" - shape = [384] + shape = [192] dtype = "float32" - min_val = float("-1.77402") - max_val = float("0.318654") - mean = float("-0.310798") - std = float("0.291236") + min_val = float("-0.00864008") + max_val = float("0.0157539") + mean = float("-0.000141521") + std = float("0.00408479") data = None class Program_weight_tensor_parameter_8: name = "parameter_8" - shape = [384] + shape = [192, 192, 1, 1] dtype = "float32" - min_val = float("0.188523") - max_val = float("1.82125") - mean = float("0.609641") - std = float("0.262607") + min_val = float("-0.0103206") + max_val = float("0.0183671") + mean = float("-0.000260799") + std = float("0.00202219") data = None class Program_weight_tensor_parameter_9: name = "parameter_9" - shape = [384] + shape = [10] dtype = "float32" - min_val = float("5.23505e-05") - max_val = float("0.00107224") - mean = float("0.000231574") - std = float("0.000114041") + min_val = float("0") + max_val = float("0.5") data = None class Program_weight_tensor_parameter_10: name = "parameter_10" - shape = [384] + shape = [10, 192, 3, 3] dtype = "float32" - min_val = float("-0.0913202") - max_val = float("0.0738998") - mean = float("0.0208982") - std = float("0.0171143") + min_val = float("-1156.18") + max_val = float("134.34") + mean = float("-22.0743") + std = float("92.6504") data = None class Program_weight_tensor_parameter_11: name = "parameter_11" - shape = [384, 384, 1, 1] + shape = [192] dtype = "float32" - min_val = float("-0.020214") - max_val = float("0.0255239") - mean = float("-0.000361046") - std = float("0.00271802") + min_val = float("-83.5158") + max_val = float("85.9358") + mean = float("2.58239") + std = float("26.6989") data = None class Program_weight_tensor_parameter_12: name = "parameter_12" - shape = [384] + shape = [192] dtype = "float32" - min_val = float("-1.77402") - max_val = float("0.318949") - mean = float("-0.310739") - std = float("0.291254") + min_val = float("-14.174") + max_val = float("24.9743") + mean = float("-0.553124") + std = float("5.93549") data = None class Program_weight_tensor_parameter_13: name = "parameter_13" - shape = [384] + shape = [192] dtype = "float32" - min_val = float("0.334653") - max_val = float("2.60511") - mean = float("1.02603") - std = float("0.290253") + min_val = float("2.28992") + max_val = float("16941700.0") + mean = float("535823.0") + std = float("1889860.0") data = None class Program_weight_tensor_parameter_14: name = "parameter_14" - shape = [384] + shape = [192] dtype = "float32" - min_val = float("0.000593004") - max_val = float("0.00596735") - mean = float("0.00204404") - std = float("0.00073718") + min_val = float("-12545.3") + max_val = float("7938.65") + mean = float("-494.603") + std = float("2450.0") data = None class Program_weight_tensor_parameter_15: name = "parameter_15" - shape = [384] + shape = [192, 192, 1, 1] dtype = "float32" - min_val = float("-0.227759") - max_val = float("0.112191") - mean = float("0.0216656") - std = float("0.0367499") + min_val = float("-139.267") + max_val = float("101.02") + mean = float("-0.0994834") + std = float("4.18182") data = None class Program_weight_tensor_parameter_16: name = "parameter_16" - shape = [384, 384, 3, 3] + shape = [192] dtype = "float32" - min_val = float("-0.0190584") - max_val = float("0.0259183") - mean = float("-4.76047e-05") - std = float("0.0017617") + min_val = float("-11.0196") + max_val = float("7.12431") + mean = float("-0.172978") + std = float("1.77779") data = None class Program_weight_tensor_parameter_17: name = "parameter_17" - shape = [384] + shape = [192, 192, 1, 1] dtype = "float32" - min_val = float("-2.58225") - max_val = float("0.0329867") - mean = float("-1.56843") - std = float("0.415962") + min_val = float("-21.8633") + max_val = float("14.1281") + mean = float("-0.061946") + std = float("0.961227") data = None class Program_weight_tensor_parameter_18: name = "parameter_18" - shape = [384] + shape = [40] dtype = "float32" - min_val = float("0.52002") - max_val = float("1.64429") - mean = float("1.13566") - std = float("0.149475") + min_val = float("0") + max_val = float("0.5") data = None class Program_weight_tensor_parameter_19: name = "parameter_19" - shape = [384] + shape = [40, 384, 3, 3] dtype = "float32" - min_val = float("0.0404637") - max_val = float("0.23") - mean = float("0.0856589") - std = float("0.0233439") + min_val = float("-0.125302") + max_val = float("0.130317") + mean = float("3.40515e-09") + std = float("0.00680734") data = None @@ -222,21 +216,21 @@ class Program_weight_tensor_parameter_20: name = "parameter_20" shape = [384] dtype = "float32" - min_val = float("-0.9033") - max_val = float("0.385661") - mean = float("-0.25773") - std = float("0.123402") + min_val = float("-0.00280162") + max_val = float("0.100804") + mean = float("0.0327571") + std = float("0.0175274") data = None class Program_weight_tensor_parameter_21: name = "parameter_21" - shape = [384, 384, 3, 3] + shape = [384] dtype = "float32" - min_val = float("-0.0213207") - max_val = float("0.0602371") - mean = float("-0.000201951") - std = float("0.00231308") + min_val = float("0.99914") + max_val = float("1.24063") + mean = float("1.1068") + std = float("0.0410271") data = None @@ -244,10 +238,10 @@ class Program_weight_tensor_parameter_22: name = "parameter_22" shape = [384] dtype = "float32" - min_val = float("-1.93927") - max_val = float("0.644474") - mean = float("-0.574884") - std = float("0.358671") + min_val = float("0.00351367") + max_val = float("0.502965") + mean = float("0.0475644") + std = float("0.0560398") data = None @@ -255,21 +249,21 @@ class Program_weight_tensor_parameter_23: name = "parameter_23" shape = [384] dtype = "float32" - min_val = float("0.163873") - max_val = float("2.06585") - mean = float("0.562027") - std = float("0.227242") + min_val = float("-0.20324") + max_val = float("0.163306") + mean = float("-0.0211934") + std = float("0.0480836") data = None class Program_weight_tensor_parameter_24: name = "parameter_24" - shape = [384] + shape = [384, 384, 1, 1] dtype = "float32" - min_val = float("7.65946e-05") - max_val = float("0.00146603") - mean = float("0.000260801") - std = float("0.000127115") + min_val = float("-0.0595396") + max_val = float("0.0686159") + mean = float("-0.000519359") + std = float("0.00405674") data = None @@ -277,10 +271,10 @@ class Program_weight_tensor_parameter_25: name = "parameter_25" shape = [384] dtype = "float32" - min_val = float("-0.047045") - max_val = float("0.0685866") - mean = float("0.0209886") - std = float("0.0147269") + min_val = float("-0.00283386") + max_val = float("0.00804157") + mean = float("4.09459e-05") + std = float("0.00164843") data = None @@ -288,32 +282,30 @@ class Program_weight_tensor_parameter_26: name = "parameter_26" shape = [384, 384, 1, 1] dtype = "float32" - min_val = float("-0.0246209") - max_val = float("0.0323191") - mean = float("-0.00038074") - std = float("0.00249603") + min_val = float("-0.00192633") + max_val = float("0.00573757") + mean = float("-3.72961e-05") + std = float("0.000619199") data = None class Program_weight_tensor_parameter_27: name = "parameter_27" - shape = [384] + shape = [10] dtype = "float32" - min_val = float("-1.93932") - max_val = float("0.645257") - mean = float("-0.574812") - std = float("0.358742") + min_val = float("0") + max_val = float("0.5") data = None class Program_weight_tensor_parameter_28: name = "parameter_28" - shape = [384] + shape = [10, 384, 3, 3] dtype = "float32" - min_val = float("0.58315") - max_val = float("2.15642") - mean = float("1.08405") - std = float("0.255745") + min_val = float("-4.37321") + max_val = float("0.452128") + mean = float("-0.17109") + std = float("0.298945") data = None @@ -321,10 +313,10 @@ class Program_weight_tensor_parameter_29: name = "parameter_29" shape = [384] dtype = "float32" - min_val = float("0.0013599") - max_val = float("0.00896475") - mean = float("0.00289759") - std = float("0.000860853") + min_val = float("-0.129645") + max_val = float("0.537367") + mean = float("0.252723") + std = float("0.116707") data = None @@ -332,21 +324,21 @@ class Program_weight_tensor_parameter_30: name = "parameter_30" shape = [384] dtype = "float32" - min_val = float("-0.0821017") - max_val = float("0.146645") - mean = float("0.0336192") - std = float("0.0396185") + min_val = float("0.994167") + max_val = float("1.41211") + mean = float("1.16997") + std = float("0.0589217") data = None class Program_weight_tensor_parameter_31: name = "parameter_31" - shape = [384, 384, 3, 3] + shape = [384] dtype = "float32" - min_val = float("-0.017236") - max_val = float("0.0310435") - mean = float("-8.47071e-05") - std = float("0.00189556") + min_val = float("0.190338") + max_val = float("627.738") + mean = float("13.2438") + std = float("39.4458") data = None @@ -354,21 +346,21 @@ class Program_weight_tensor_parameter_32: name = "parameter_32" shape = [384] dtype = "float32" - min_val = float("-2.39591") - max_val = float("0.845752") - mean = float("-1.40539") - std = float("0.360596") + min_val = float("-6.47831") + max_val = float("2.53274") + mean = float("-0.258943") + std = float("0.862987") data = None class Program_weight_tensor_parameter_33: name = "parameter_33" - shape = [384] + shape = [384, 384, 1, 1] dtype = "float32" - min_val = float("0.453112") - max_val = float("1.91948") - mean = float("1.16636") - std = float("0.14802") + min_val = float("-0.37398") + max_val = float("0.93716") + mean = float("-0.0050287") + std = float("0.034184") data = None @@ -376,285 +368,279 @@ class Program_weight_tensor_parameter_34: name = "parameter_34" shape = [384] dtype = "float32" - min_val = float("0.0300933") - max_val = float("0.138775") - mean = float("0.0607843") - std = float("0.0156745") + min_val = float("-0.168767") + max_val = float("0.0326032") + mean = float("0.00033161") + std = float("0.0172771") data = None class Program_weight_tensor_parameter_35: name = "parameter_35" - shape = [384] + shape = [384, 384, 1, 1] dtype = "float32" - min_val = float("-0.749117") - max_val = float("0.836662") - mean = float("-0.184023") - std = float("0.110734") + min_val = float("-0.13765") + max_val = float("0.0271847") + mean = float("-0.000469481") + std = float("0.00707315") data = None class Program_weight_tensor_parameter_36: name = "parameter_36" - shape = [384, 384, 3, 3] + shape = [1, 10, 1, 1] dtype = "float32" - min_val = float("-0.0259567") - max_val = float("0.0450409") - mean = float("-0.000200361") - std = float("0.00234146") + min_val = float("0") + max_val = float("0.5") data = None class Program_weight_tensor_parameter_37: name = "parameter_37" - shape = [384] + shape = [40] dtype = "float32" - min_val = float("-1.8762") - max_val = float("0.453243") - mean = float("-0.485339") - std = float("0.376467") + min_val = float("0") + max_val = float("0.5") data = None class Program_weight_tensor_parameter_38: name = "parameter_38" - shape = [384] + shape = [40, 768, 3, 3] dtype = "float32" - min_val = float("0.0773354") - max_val = float("2.11925") - mean = float("0.441956") - std = float("0.217663") + min_val = float("-0.0666495") + max_val = float("0.0422192") + mean = float("4.22006e-10") + std = float("0.004326") data = None class Program_weight_tensor_parameter_39: name = "parameter_39" - shape = [384] + shape = [768] dtype = "float32" - min_val = float("6.01092e-05") - max_val = float("0.00133181") - mean = float("0.000306226") - std = float("0.000149439") + min_val = float("-0.0211301") + max_val = float("0.0557583") + mean = float("0.00994542") + std = float("0.0117475") data = None class Program_weight_tensor_parameter_40: name = "parameter_40" - shape = [384] + shape = [768] dtype = "float32" - min_val = float("-0.0475575") - max_val = float("0.071738") - mean = float("0.0252212") - std = float("0.0164931") + min_val = float("1.00683") + max_val = float("1.19995") + mean = float("1.06456") + std = float("0.0224702") data = None class Program_weight_tensor_parameter_41: name = "parameter_41" - shape = [384, 384, 1, 1] + shape = [768] dtype = "float32" - min_val = float("-0.0207296") - max_val = float("0.0301957") - mean = float("-0.000479918") - std = float("0.0021441") + min_val = float("0.00536813") + max_val = float("4.48397") + mean = float("0.148381") + std = float("0.22041") data = None class Program_weight_tensor_parameter_42: name = "parameter_42" - shape = [384] + shape = [768] dtype = "float32" - min_val = float("-1.87654") - max_val = float("0.453653") - mean = float("-0.485263") - std = float("0.376563") + min_val = float("-0.2391") + max_val = float("0.308194") + mean = float("-0.0115961") + std = float("0.071089") data = None class Program_weight_tensor_parameter_43: name = "parameter_43" - shape = [384] + shape = [768, 768, 1, 1] dtype = "float32" - min_val = float("0.521871") - max_val = float("2.22439") - mean = float("1.05289") - std = float("0.260102") + min_val = float("-0.0316374") + max_val = float("0.030148") + mean = float("-0.000219685") + std = float("0.00274506") data = None class Program_weight_tensor_parameter_44: name = "parameter_44" - shape = [384] + shape = [768] dtype = "float32" - min_val = float("0.00177683") - max_val = float("0.00907934") - mean = float("0.0039468") - std = float("0.00118085") + min_val = float("-0.00401297") + max_val = float("0.00319868") + mean = float("6.33231e-05") + std = float("0.000831526") data = None class Program_weight_tensor_parameter_45: name = "parameter_45" - shape = [384] + shape = [768, 768, 1, 1] dtype = "float32" - min_val = float("-0.209845") - max_val = float("0.180608") - mean = float("0.0397265") - std = float("0.04484") + min_val = float("-0.00264943") + max_val = float("0.00247676") + mean = float("4.17753e-06") + std = float("0.000249163") data = None class Program_weight_tensor_parameter_46: name = "parameter_46" - shape = [384, 384, 3, 3] + shape = [10] dtype = "float32" - min_val = float("-0.0177497") - max_val = float("0.036737") - mean = float("-9.16795e-05") - std = float("0.00200706") + min_val = float("0") + max_val = float("0.5") data = None class Program_weight_tensor_parameter_47: name = "parameter_47" - shape = [384] + shape = [10, 768, 3, 3] dtype = "float32" - min_val = float("-2.15635") - max_val = float("0.418177") - mean = float("-1.36712") - std = float("0.277468") + min_val = float("-3.67573") + max_val = float("0.47477") + mean = float("-0.108836") + std = float("0.235747") data = None class Program_weight_tensor_parameter_48: name = "parameter_48" - shape = [384] + shape = [768] dtype = "float32" - min_val = float("0.706134") - max_val = float("1.6357") - mean = float("1.14301") - std = float("0.101583") + min_val = float("-0.0929864") + max_val = float("0.260814") + mean = float("0.112641") + std = float("0.0586929") data = None class Program_weight_tensor_parameter_49: name = "parameter_49" - shape = [384] + shape = [768] dtype = "float32" - min_val = float("0.0216198") - max_val = float("0.138164") - mean = float("0.046727") - std = float("0.0129664") + min_val = float("0.978014") + max_val = float("1.2276") + mean = float("1.06585") + std = float("0.0280238") data = None class Program_weight_tensor_parameter_50: name = "parameter_50" - shape = [384] + shape = [768] dtype = "float32" - min_val = float("-0.694464") - max_val = float("0.208372") - mean = float("-0.129315") - std = float("0.0938846") + min_val = float("0.0583214") + max_val = float("179.503") + mean = float("8.00532") + std = float("16.3237") data = None class Program_weight_tensor_parameter_51: name = "parameter_51" - shape = [384, 384, 3, 3] + shape = [768] dtype = "float32" - min_val = float("-0.0274071") - max_val = float("0.0448565") - mean = float("-0.000158418") - std = float("0.00223888") + min_val = float("-3.13569") + max_val = float("2.18092") + mean = float("-0.125865") + std = float("0.472168") data = None class Program_weight_tensor_parameter_52: name = "parameter_52" - shape = [384] + shape = [768, 768, 1, 1] dtype = "float32" - min_val = float("-2.9232") - max_val = float("1.66463") - mean = float("-0.760372") - std = float("0.643546") + min_val = float("-0.353861") + max_val = float("0.643743") + mean = float("-0.00192258") + std = float("0.0278211") data = None class Program_weight_tensor_parameter_53: name = "parameter_53" - shape = [384] + shape = [768] dtype = "float32" - min_val = float("0.953224") - max_val = float("2.91794") - mean = float("1.86322") - std = float("0.27618") + min_val = float("-0.0411452") + max_val = float("0.024878") + mean = float("-2.52466e-05") + std = float("0.00729347") data = None class Program_weight_tensor_parameter_54: name = "parameter_54" - shape = [384] + shape = [768, 768, 1, 1] dtype = "float32" - min_val = float("0.00275058") - max_val = float("0.01231") - mean = float("0.00516812") - std = float("0.00130652") + min_val = float("-0.0172894") + max_val = float("0.0205533") + mean = float("-6.97847e-05") + std = float("0.00177578") data = None class Program_weight_tensor_parameter_55: name = "parameter_55" - shape = [384] + shape = [768] dtype = "float32" - min_val = float("-0.249794") - max_val = float("0.145992") - mean = float("0.0635442") - std = float("0.0326689") + min_val = float("-0.148835") + max_val = float("0.366507") + mean = float("0.109619") + std = float("0.0784726") data = None class Program_weight_tensor_parameter_56: name = "parameter_56" - shape = [384, 768, 1, 1] + shape = [768] dtype = "float32" - min_val = float("-0.0371909") - max_val = float("0.0509187") - mean = float("-0.000727671") - std = float("0.00522845") + min_val = float("0.857119") + max_val = float("1.27215") + mean = float("1.05537") + std = float("0.0395249") data = None class Program_weight_tensor_parameter_57: name = "parameter_57" - shape = [384] + shape = [768] dtype = "float32" - min_val = float("-2.2471") - max_val = float("0.681977") - mean = float("-0.777142") - std = float("0.472903") + min_val = float("2.1452") + max_val = float("15653.5") + mean = float("111.411") + std = float("608.25") data = None class Program_weight_tensor_parameter_58: name = "parameter_58" - shape = [384] + shape = [768] dtype = "float32" - min_val = float("0.965853") - max_val = float("2.89359") - mean = float("2.09705") - std = float("0.305433") + min_val = float("-4.93063") + max_val = float("14.695") + mean = float("-0.317615") + std = float("1.44253") data = None class Program_weight_tensor_parameter_59: name = "parameter_59" - shape = [384] + shape = [768, 768, 1, 1] dtype = "float32" - min_val = float("0.000799495") - max_val = float("0.00402168") - mean = float("0.00198534") - std = float("0.0004402") + min_val = float("-0.547982") + max_val = float("1.31648") + mean = float("-0.00228986") + std = float("0.031089") data = None @@ -662,131 +648,131 @@ class Program_weight_tensor_parameter_60: name = "parameter_60" shape = [384] dtype = "float32" - min_val = float("-0.0161372") - max_val = float("0.0799072") - mean = float("0.0349754") - std = float("0.0164707") + min_val = float("-0.142261") + max_val = float("0.0236224") + mean = float("-0.0251959") + std = float("0.0245419") data = None class Program_weight_tensor_parameter_61: name = "parameter_61" - shape = [384, 768, 1, 1] + shape = [384] dtype = "float32" - min_val = float("-0.0815437") - max_val = float("0.0646253") - mean = float("-0.000388202") - std = float("0.00359255") + min_val = float("0.923585") + max_val = float("1.06744") + mean = float("0.990921") + std = float("0.0255245") data = None class Program_weight_tensor_parameter_62: name = "parameter_62" - shape = [768] + shape = [384] dtype = "float32" - min_val = float("-2.40199") - max_val = float("0.642394") - mean = float("-0.908374") - std = float("0.339302") + min_val = float("0.0622324") + max_val = float("493.49") + mean = float("19.4794") + std = float("46.1366") data = None class Program_weight_tensor_parameter_63: name = "parameter_63" - shape = [768] + shape = [384] dtype = "float32" - min_val = float("0.530297") - max_val = float("1.90727") - mean = float("0.919687") - std = float("0.149179") + min_val = float("-1.45105") + max_val = float("2.69314") + mean = float("0.0290829") + std = float("0.54902") data = None class Program_weight_tensor_parameter_64: name = "parameter_64" - shape = [768] + shape = [384, 384, 1, 1] dtype = "float32" - min_val = float("0.00625688") - max_val = float("0.056665") - mean = float("0.0153943") - std = float("0.00459564") + min_val = float("-0.528789") + max_val = float("0.475083") + mean = float("0.000636297") + std = float("0.0382416") data = None class Program_weight_tensor_parameter_65: name = "parameter_65" - shape = [768] + shape = [384] dtype = "float32" - min_val = float("-0.235652") - max_val = float("0.254751") - mean = float("0.0393354") - std = float("0.0563281") + min_val = float("-0.142261") + max_val = float("0.0236224") + mean = float("-0.0251959") + std = float("0.0245419") data = None class Program_weight_tensor_parameter_66: name = "parameter_66" - shape = [768, 512, 3, 3] + shape = [384] dtype = "float32" - min_val = float("-0.0378314") - max_val = float("0.0543419") - mean = float("-9.75912e-05") - std = float("0.00233888") + min_val = float("0.956171") + max_val = float("1.12145") + mean = float("1.01722") + std = float("0.0276561") data = None class Program_weight_tensor_parameter_67: name = "parameter_67" - shape = [512] + shape = [384] dtype = "float32" - min_val = float("-3.38998") - max_val = float("1.66652") - mean = float("-1.16179") - std = float("0.513719") + min_val = float("0.329317") + max_val = float("3828.52") + mean = float("104.542") + std = float("264.784") data = None class Program_weight_tensor_parameter_68: name = "parameter_68" - shape = [512] + shape = [384] dtype = "float32" - min_val = float("0.523767") - max_val = float("1.67712") - mean = float("1.11122") - std = float("0.148184") + min_val = float("-8.30504") + max_val = float("7.29752") + mean = float("-0.046446") + std = float("1.50992") data = None class Program_weight_tensor_parameter_69: name = "parameter_69" - shape = [512] + shape = [384, 384, 3, 3] dtype = "float32" - min_val = float("0.00233511") - max_val = float("0.0167819") - mean = float("0.00761769") - std = float("0.00204484") + min_val = float("-0.216923") + max_val = float("0.225482") + mean = float("-0.000140598") + std = float("0.0129397") data = None class Program_weight_tensor_parameter_70: name = "parameter_70" - shape = [512] + shape = [384] dtype = "float32" - min_val = float("-0.172067") - max_val = float("0.0981938") - mean = float("-0.0487285") - std = float("0.0396677") + min_val = float("-0.186619") + max_val = float("0.0253096") + mean = float("-0.0403406") + std = float("0.0301782") data = None class Program_weight_tensor_parameter_71: name = "parameter_71" - shape = [512, 384, 1, 1] + shape = [384] dtype = "float32" - min_val = float("-0.202262") - max_val = float("0.184296") - mean = float("-0.000573477") - std = float("0.00792306") + min_val = float("0.930641") + max_val = float("1.15944") + mean = float("1.0145") + std = float("0.0373315") data = None @@ -794,582 +780,582 @@ class Program_weight_tensor_parameter_72: name = "parameter_72" shape = [384] dtype = "float32" - min_val = float("-0.0100703") - max_val = float("0.00138871") - mean = float("-0.00295173") - std = float("0.00227127") + min_val = float("2.04808") + max_val = float("21266.9") + mean = float("589.689") + std = float("2078.57") data = None class Program_weight_tensor_parameter_73: name = "parameter_73" - shape = [384, 384, 1, 1] + shape = [384] dtype = "float32" - min_val = float("-0.202729") - max_val = float("0.140205") - mean = float("-0.002055") - std = float("0.00490701") + min_val = float("-24.5893") + max_val = float("30.4673") + mean = float("0.404088") + std = float("5.12896") data = None class Program_weight_tensor_parameter_74: name = "parameter_74" - shape = [192] + shape = [384, 384, 3, 3] dtype = "float32" - min_val = float("-1.97045") - max_val = float("0.409864") - mean = float("-0.348766") - std = float("0.333488") + min_val = float("-0.338033") + max_val = float("0.42615") + mean = float("0.000530178") + std = float("0.0222656") data = None class Program_weight_tensor_parameter_75: name = "parameter_75" - shape = [192] + shape = [384] dtype = "float32" - min_val = float("0.0528864") - max_val = float("2.15987") - mean = float("0.581255") - std = float("0.419833") + min_val = float("-0.119217") + max_val = float("0.0124743") + mean = float("-0.0405258") + std = float("0.0215146") data = None class Program_weight_tensor_parameter_76: name = "parameter_76" - shape = [192] + shape = [384] dtype = "float32" - min_val = float("8.99309e-05") - max_val = float("0.00136239") - mean = float("0.00045405") - std = float("0.000217854") + min_val = float("0.923648") + max_val = float("1.04368") + mean = float("0.989661") + std = float("0.0170925") data = None class Program_weight_tensor_parameter_77: name = "parameter_77" - shape = [192] + shape = [384] dtype = "float32" - min_val = float("-0.0345233") - max_val = float("0.0542267") - mean = float("0.00534646") - std = float("0.0149125") + min_val = float("0.0605594") + max_val = float("2562.44") + mean = float("30.3737") + std = float("153.669") data = None class Program_weight_tensor_parameter_78: name = "parameter_78" - shape = [192, 192, 1, 1] + shape = [384] dtype = "float32" - min_val = float("-0.023487") - max_val = float("0.0581182") - mean = float("-0.000339748") - std = float("0.0040934") + min_val = float("-2.00406") + max_val = float("9.78589") + mean = float("0.0692858") + std = float("0.922895") data = None class Program_weight_tensor_parameter_79: name = "parameter_79" - shape = [192] + shape = [384, 384, 1, 1] dtype = "float32" - min_val = float("-1.97037") - max_val = float("0.410702") - mean = float("-0.34863") - std = float("0.333546") + min_val = float("-0.359065") + max_val = float("0.736107") + mean = float("0.00142685") + std = float("0.0416064") data = None class Program_weight_tensor_parameter_80: name = "parameter_80" - shape = [192] + shape = [384] dtype = "float32" - min_val = float("0.372338") - max_val = float("2.70216") - mean = float("1.20181") - std = float("0.493699") + min_val = float("-0.119217") + max_val = float("0.0124743") + mean = float("-0.0405258") + std = float("0.0215146") data = None class Program_weight_tensor_parameter_81: name = "parameter_81" - shape = [192] + shape = [384] dtype = "float32" - min_val = float("0.00122552") - max_val = float("0.0156617") - mean = float("0.00510239") - std = float("0.00188493") + min_val = float("0.940925") + max_val = float("1.10279") + mean = float("1.01574") + std = float("0.0223556") data = None class Program_weight_tensor_parameter_82: name = "parameter_82" - shape = [192] + shape = [384] dtype = "float32" - min_val = float("-0.097226") - max_val = float("0.146797") - mean = float("0.0203808") - std = float("0.0428675") + min_val = float("1.38621") + max_val = float("4812.1") + mean = float("159.271") + std = float("503.316") data = None class Program_weight_tensor_parameter_83: name = "parameter_83" - shape = [192, 192, 3, 3] + shape = [384] dtype = "float32" - min_val = float("-0.0289902") - max_val = float("0.0378296") - mean = float("-0.000154473") - std = float("0.00313532") + min_val = float("-5.88263") + max_val = float("15.8533") + mean = float("0.140376") + std = float("2.40236") data = None class Program_weight_tensor_parameter_84: name = "parameter_84" - shape = [192] + shape = [384, 384, 3, 3] dtype = "float32" - min_val = float("-2.89065") - max_val = float("-0.176734") - mean = float("-1.31453") - std = float("0.40113") + min_val = float("-0.226706") + max_val = float("0.208603") + mean = float("0.000307493") + std = float("0.0144029") data = None class Program_weight_tensor_parameter_85: name = "parameter_85" - shape = [192] + shape = [384] dtype = "float32" - min_val = float("0.696524") - max_val = float("2.09454") - mean = float("1.17918") - std = float("0.169868") + min_val = float("-0.130515") + max_val = float("0.0142189") + mean = float("-0.0397866") + std = float("0.0225046") data = None class Program_weight_tensor_parameter_86: name = "parameter_86" - shape = [192] + shape = [384] dtype = "float32" - min_val = float("0.0626606") - max_val = float("0.335775") - mean = float("0.130864") - std = float("0.0432639") + min_val = float("0.862983") + max_val = float("1.11419") + mean = float("1.01096") + std = float("0.0331669") data = None class Program_weight_tensor_parameter_87: name = "parameter_87" - shape = [192] + shape = [384] dtype = "float32" - min_val = float("-2.50771") - max_val = float("1.70173") - mean = float("-0.202725") - std = float("0.37842") + min_val = float("2.23371") + max_val = float("4709.64") + mean = float("260.249") + std = float("508.379") data = None class Program_weight_tensor_parameter_88: name = "parameter_88" - shape = [192, 192, 3, 3] + shape = [384] dtype = "float32" - min_val = float("-0.0331927") - max_val = float("0.0456383") - mean = float("-0.000188198") - std = float("0.00374306") + min_val = float("-13.5512") + max_val = float("16.4082") + mean = float("0.360129") + std = float("3.43025") data = None class Program_weight_tensor_parameter_89: name = "parameter_89" - shape = [192] + shape = [384, 384, 3, 3] dtype = "float32" - min_val = float("-1.9404") - max_val = float("0.513024") - mean = float("-0.279434") - std = float("0.321452") + min_val = float("-0.191356") + max_val = float("0.178253") + mean = float("0.000526337") + std = float("0.0137294") data = None class Program_weight_tensor_parameter_90: name = "parameter_90" - shape = [192] + shape = [384] dtype = "float32" - min_val = float("0.0454025") - max_val = float("1.77027") - mean = float("0.444331") - std = float("0.305722") + min_val = float("-0.113425") + max_val = float("0.0127038") + mean = float("-0.0408835") + std = float("0.0214936") data = None class Program_weight_tensor_parameter_91: name = "parameter_91" - shape = [192] + shape = [384] dtype = "float32" - min_val = float("7.44986e-05") - max_val = float("0.00137158") - mean = float("0.000401239") - std = float("0.000216438") + min_val = float("0.934591") + max_val = float("1.02911") + mean = float("0.987626") + std = float("0.0127672") data = None class Program_weight_tensor_parameter_92: name = "parameter_92" - shape = [192] + shape = [384] dtype = "float32" - min_val = float("-0.029272") - max_val = float("0.0469656") - mean = float("0.00801306") - std = float("0.0116412") + min_val = float("0.0466938") + max_val = float("166.308") + mean = float("8.70658") + std = float("15.7428") data = None class Program_weight_tensor_parameter_93: name = "parameter_93" - shape = [192, 192, 1, 1] + shape = [384] dtype = "float32" - min_val = float("-0.0234926") - max_val = float("0.036738") - mean = float("-0.000377237") - std = float("0.00377417") + min_val = float("-1.61681") + max_val = float("2.24513") + mean = float("0.120268") + std = float("0.507015") data = None class Program_weight_tensor_parameter_94: name = "parameter_94" - shape = [192] + shape = [384, 384, 1, 1] dtype = "float32" - min_val = float("-1.94044") - max_val = float("0.51462") - mean = float("-0.279235") - std = float("0.321666") + min_val = float("-0.229179") + max_val = float("0.225991") + mean = float("0.00223037") + std = float("0.0276474") data = None class Program_weight_tensor_parameter_95: name = "parameter_95" - shape = [192] + shape = [384] dtype = "float32" - min_val = float("0.483074") - max_val = float("2.27001") - mean = float("1.13833") - std = float("0.37563") + min_val = float("-0.113425") + max_val = float("0.0127038") + mean = float("-0.0408835") + std = float("0.0214936") data = None class Program_weight_tensor_parameter_96: name = "parameter_96" - shape = [192] + shape = [384] dtype = "float32" - min_val = float("0.00270072") - max_val = float("0.014292") - mean = float("0.00597179") - std = float("0.00181526") + min_val = float("0.966253") + max_val = float("1.11851") + mean = float("1.01774") + std = float("0.0232224") data = None class Program_weight_tensor_parameter_97: name = "parameter_97" - shape = [192] + shape = [384] dtype = "float32" - min_val = float("-0.0923253") - max_val = float("0.111642") - mean = float("0.0327645") - std = float("0.0355125") + min_val = float("0.388381") + max_val = float("3807.62") + mean = float("53.0047") + std = float("216.966") data = None class Program_weight_tensor_parameter_98: name = "parameter_98" - shape = [192, 192, 3, 3] + shape = [384] dtype = "float32" - min_val = float("-0.0231072") - max_val = float("0.038718") - mean = float("-0.000192078") - std = float("0.00338604") + min_val = float("-9.05284") + max_val = float("5.84204") + mean = float("0.111327") + std = float("1.23517") data = None class Program_weight_tensor_parameter_99: name = "parameter_99" - shape = [192] + shape = [384, 384, 3, 3] dtype = "float32" - min_val = float("-2.50828") - max_val = float("-0.123237") - mean = float("-1.28886") - std = float("0.44374") + min_val = float("-0.237817") + max_val = float("0.124946") + mean = float("0.000100381") + std = float("0.011587") data = None class Program_weight_tensor_parameter_100: name = "parameter_100" - shape = [192] + shape = [384] dtype = "float32" - min_val = float("0.65494") - max_val = float("1.66968") - mean = float("1.19938") - std = float("0.166128") + min_val = float("-0.103321") + max_val = float("0.0232727") + mean = float("-0.0418488") + std = float("0.0225603") data = None class Program_weight_tensor_parameter_101: name = "parameter_101" - shape = [192] + shape = [384] dtype = "float32" - min_val = float("0.0463808") - max_val = float("0.199074") - mean = float("0.0939914") - std = float("0.0271592") + min_val = float("0.929001") + max_val = float("1.1158") + mean = float("1.01191") + std = float("0.0303721") data = None class Program_weight_tensor_parameter_102: name = "parameter_102" - shape = [192] + shape = [384] dtype = "float32" - min_val = float("-2.14238") - max_val = float("0.410379") - mean = float("-0.110821") - std = float("0.246177") + min_val = float("0.323487") + max_val = float("2311.03") + mean = float("79.6394") + std = float("153.489") data = None class Program_weight_tensor_parameter_103: name = "parameter_103" - shape = [192, 192, 3, 3] + shape = [384] dtype = "float32" - min_val = float("-0.0362254") - max_val = float("0.0508084") - mean = float("-0.000238085") - std = float("0.00389331") + min_val = float("-6.12644") + max_val = float("14.665") + mean = float("0.49423") + std = float("2.21458") data = None class Program_weight_tensor_parameter_104: name = "parameter_104" - shape = [192] + shape = [384, 384, 3, 3] dtype = "float32" - min_val = float("-1.7573") - max_val = float("0.468575") - mean = float("-0.262432") - std = float("0.335818") + min_val = float("-0.339134") + max_val = float("0.191022") + mean = float("0.000881583") + std = float("0.0173865") data = None class Program_weight_tensor_parameter_105: name = "parameter_105" - shape = [192] + shape = [384] dtype = "float32" - min_val = float("0.00295124") - max_val = float("1.67875") - mean = float("0.351961") - std = float("0.251699") + min_val = float("-0.109396") + max_val = float("0.0442887") + mean = float("-0.0296739") + std = float("0.017334") data = None class Program_weight_tensor_parameter_106: name = "parameter_106" - shape = [192] + shape = [384] dtype = "float32" - min_val = float("9.30792e-07") - max_val = float("0.00191072") - mean = float("0.000361058") - std = float("0.000248966") + min_val = float("0.930867") + max_val = float("1.07806") + mean = float("1.00932") + std = float("0.0225248") data = None class Program_weight_tensor_parameter_107: name = "parameter_107" - shape = [192] + shape = [384] dtype = "float32" - min_val = float("-0.0372993") - max_val = float("0.0527515") - mean = float("0.0101454") - std = float("0.0121623") + min_val = float("0.146648") + max_val = float("626.166") + mean = float("15.3179") + std = float("43.6414") data = None class Program_weight_tensor_parameter_108: name = "parameter_108" - shape = [192, 192, 1, 1] + shape = [384] dtype = "float32" - min_val = float("-0.0303466") - max_val = float("0.0356195") - mean = float("-0.000425557") - std = float("0.0036432") + min_val = float("-6.69579") + max_val = float("5.18983") + mean = float("0.32161") + std = float("0.980842") data = None class Program_weight_tensor_parameter_109: name = "parameter_109" - shape = [192] + shape = [384, 1152, 1, 1] dtype = "float32" - min_val = float("-1.7573") - max_val = float("0.470016") - mean = float("-0.262262") - std = float("0.336041") + min_val = float("-0.560359") + max_val = float("0.446273") + mean = float("0.00139089") + std = float("0.0337706") data = None class Program_weight_tensor_parameter_110: name = "parameter_110" - shape = [192] + shape = [384] dtype = "float32" - min_val = float("0.406102") - max_val = float("1.97794") - mean = float("1.06588") - std = float("0.334156") + min_val = float("-0.0578452") + max_val = float("0.016811") + mean = float("-0.0161156") + std = float("0.011502") data = None class Program_weight_tensor_parameter_111: name = "parameter_111" - shape = [192] + shape = [384] dtype = "float32" - min_val = float("0.00267891") - max_val = float("0.013042") - mean = float("0.00612078") - std = float("0.00178677") + min_val = float("0.920085") + max_val = float("1.10058") + mean = float("1.00226") + std = float("0.0184711") data = None class Program_weight_tensor_parameter_112: name = "parameter_112" - shape = [192] + shape = [384] dtype = "float32" - min_val = float("-0.0635846") - max_val = float("0.115227") - mean = float("0.0353282") - std = float("0.0320147") + min_val = float("0.495073") + max_val = float("876.99") + mean = float("8.39435") + std = float("45.7821") data = None class Program_weight_tensor_parameter_113: name = "parameter_113" - shape = [192, 192, 3, 3] + shape = [384] dtype = "float32" - min_val = float("-0.0321474") - max_val = float("0.0388371") - mean = float("-0.000190596") - std = float("0.00354187") + min_val = float("-4.00856") + max_val = float("1.42008") + mean = float("-0.218361") + std = float("0.649639") data = None class Program_weight_tensor_parameter_114: name = "parameter_114" - shape = [192] + shape = [384, 1152, 1, 1] dtype = "float32" - min_val = float("-2.49735") - max_val = float("0.137985") - mean = float("-1.24334") - std = float("0.424316") + min_val = float("-0.290536") + max_val = float("0.175016") + mean = float("-0.00070814") + std = float("0.0138241") data = None class Program_weight_tensor_parameter_115: name = "parameter_115" - shape = [192] + shape = [384] dtype = "float32" - min_val = float("0.652126") - max_val = float("1.80991") - mean = float("1.16717") - std = float("0.165409") + min_val = float("-0.0540258") + max_val = float("0.0115933") + mean = float("-0.0157488") + std = float("0.0104757") data = None class Program_weight_tensor_parameter_116: name = "parameter_116" - shape = [192] + shape = [384] dtype = "float32" - min_val = float("0.0307206") - max_val = float("0.141566") - mean = float("0.0673632") - std = float("0.0172678") + min_val = float("0.987802") + max_val = float("1.11339") + mean = float("1.02541") + std = float("0.019391") data = None class Program_weight_tensor_parameter_117: name = "parameter_117" - shape = [192] + shape = [384] dtype = "float32" - min_val = float("-1.51549") - max_val = float("0.284447") - mean = float("-0.0982023") - std = float("0.179232") + min_val = float("0.320507") + max_val = float("596.285") + mean = float("38.1824") + std = float("79.9607") data = None class Program_weight_tensor_parameter_118: name = "parameter_118" - shape = [192, 192, 3, 3] + shape = [384] dtype = "float32" - min_val = float("-0.05013") - max_val = float("0.0656662") - mean = float("-0.000261502") - std = float("0.00399974") + min_val = float("-6.7222") + max_val = float("6.5412") + mean = float("-0.0320944") + std = float("1.4521") data = None class Program_weight_tensor_parameter_119: name = "parameter_119" - shape = [192] + shape = [384, 384, 3, 3] dtype = "float32" - min_val = float("-2.07916") - max_val = float("0.533363") - mean = float("-0.272351") - std = float("0.375289") + min_val = float("-0.0639514") + max_val = float("0.0601372") + mean = float("-8.51741e-05") + std = float("0.00526146") data = None class Program_weight_tensor_parameter_120: name = "parameter_120" - shape = [192] + shape = [384] dtype = "float32" - min_val = float("0.000510371") - max_val = float("0.732354") - mean = float("0.211968") - std = float("0.136272") + min_val = float("-0.216482") + max_val = float("0.607924") + mean = float("0.238936") + std = float("0.14305") data = None class Program_weight_tensor_parameter_121: name = "parameter_121" - shape = [192] + shape = [384] dtype = "float32" - min_val = float("6.27846e-08") - max_val = float("0.0007887") - mean = float("0.000243037") - std = float("0.000135288") + min_val = float("0.677427") + max_val = float("1.50605") + mean = float("1.12914") + std = float("0.0838432") data = None class Program_weight_tensor_parameter_122: name = "parameter_122" - shape = [192] + shape = [384] dtype = "float32" - min_val = float("-0.0197245") - max_val = float("0.0315593") - mean = float("0.00618711") - std = float("0.00922289") + min_val = float("1.74736") + max_val = float("22695.2") + mean = float("204.124") + std = float("1630.7") data = None class Program_weight_tensor_parameter_123: name = "parameter_123" - shape = [192, 192, 1, 1] + shape = [384] dtype = "float32" - min_val = float("-0.0202783") - max_val = float("0.036136") - mean = float("-0.000265605") - std = float("0.00319736") + min_val = float("-10.7392") + max_val = float("6.92707") + mean = float("0.0292054") + std = float("0.881457") data = None class Program_weight_tensor_parameter_124: name = "parameter_124" - shape = [192] + shape = [384, 384, 1, 1] dtype = "float32" - min_val = float("-2.07922") - max_val = float("0.535166") - mean = float("-0.272236") - std = float("0.375502") + min_val = float("-3.55448") + max_val = float("1.46716") + mean = float("0.00287851") + std = float("0.0619363") data = None @@ -1377,10 +1363,10 @@ class Program_weight_tensor_parameter_125: name = "parameter_125" shape = [192] dtype = "float32" - min_val = float("0.396505") - max_val = float("1.96272") - mean = float("0.958924") - std = float("0.303858") + min_val = float("-0.188818") + max_val = float("0.0459523") + mean = float("-0.0294237") + std = float("0.0400839") data = None @@ -1388,10 +1374,10 @@ class Program_weight_tensor_parameter_126: name = "parameter_126" shape = [192] dtype = "float32" - min_val = float("0.00316561") - max_val = float("0.0147681") - mean = float("0.00641687") - std = float("0.00196185") + min_val = float("0.839771") + max_val = float("1.06967") + mean = float("0.976243") + std = float("0.0259122") data = None @@ -1399,32 +1385,32 @@ class Program_weight_tensor_parameter_127: name = "parameter_127" shape = [192] dtype = "float32" - min_val = float("-0.0910185") - max_val = float("0.161293") - mean = float("0.0384701") - std = float("0.0343716") + min_val = float("0.0133959") + max_val = float("1454.17") + mean = float("13.1647") + std = float("105.548") data = None class Program_weight_tensor_parameter_128: name = "parameter_128" - shape = [192, 192, 3, 3] + shape = [192] dtype = "float32" - min_val = float("-0.0299549") - max_val = float("0.0371106") - mean = float("-0.000205046") - std = float("0.00364104") + min_val = float("-0.261237") + max_val = float("1.28075") + mean = float("0.0271209") + std = float("0.114701") data = None class Program_weight_tensor_parameter_129: name = "parameter_129" - shape = [192] + shape = [192, 192, 1, 1] dtype = "float32" - min_val = float("-2.74084") - max_val = float("-0.0810353") - mean = float("-1.23693") - std = float("0.434057") + min_val = float("-0.123591") + max_val = float("0.43619") + mean = float("0.00447012") + std = float("0.0296425") data = None @@ -1432,10 +1418,10 @@ class Program_weight_tensor_parameter_130: name = "parameter_130" shape = [192] dtype = "float32" - min_val = float("0.761623") - max_val = float("1.62105") - mean = float("1.15096") - std = float("0.142541") + min_val = float("-0.188818") + max_val = float("0.0459523") + mean = float("-0.0294237") + std = float("0.0400839") data = None @@ -1443,10 +1429,10 @@ class Program_weight_tensor_parameter_131: name = "parameter_131" shape = [192] dtype = "float32" - min_val = float("0.0268285") - max_val = float("0.102609") - mean = float("0.0483826") - std = float("0.0115209") + min_val = float("0.721948") + max_val = float("1.13657") + mean = float("1.02394") + std = float("0.0395189") data = None @@ -1454,32 +1440,32 @@ class Program_weight_tensor_parameter_132: name = "parameter_132" shape = [192] dtype = "float32" - min_val = float("-1.23693") - max_val = float("0.284811") - mean = float("-0.0747383") - std = float("0.163901") + min_val = float("0.142781") + max_val = float("538.324") + mean = float("19.9641") + std = float("54.0569") data = None class Program_weight_tensor_parameter_133: name = "parameter_133" - shape = [192, 192, 3, 3] + shape = [192] dtype = "float32" - min_val = float("-0.0531238") - max_val = float("0.0579085") - mean = float("-0.000268921") - std = float("0.00396934") + min_val = float("-0.376722") + max_val = float("0.838454") + mean = float("0.0359573") + std = float("0.16185") data = None class Program_weight_tensor_parameter_134: name = "parameter_134" - shape = [192] + shape = [192, 192, 3, 3] dtype = "float32" - min_val = float("-1.21219") - max_val = float("0.446681") - mean = float("-0.232278") - std = float("0.339349") + min_val = float("-0.0490945") + max_val = float("0.0667615") + mean = float("0.00072352") + std = float("0.00630598") data = None @@ -1487,10 +1473,10 @@ class Program_weight_tensor_parameter_135: name = "parameter_135" shape = [192] dtype = "float32" - min_val = float("-9.82711e-05") - max_val = float("0.677789") - mean = float("0.192032") - std = float("0.120727") + min_val = float("-0.197783") + max_val = float("0.0410661") + mean = float("-0.0617541") + std = float("0.0482775") data = None @@ -1498,10 +1484,10 @@ class Program_weight_tensor_parameter_136: name = "parameter_136" shape = [192] dtype = "float32" - min_val = float("2.25073e-10") - max_val = float("0.000874414") - mean = float("0.000240801") - std = float("0.000144458") + min_val = float("0.894643") + max_val = float("1.18384") + mean = float("1.01562") + std = float("0.0486636") data = None @@ -1509,32 +1495,32 @@ class Program_weight_tensor_parameter_137: name = "parameter_137" shape = [192] dtype = "float32" - min_val = float("-0.0493103") - max_val = float("0.0373767") - mean = float("0.00675281") - std = float("0.0116645") + min_val = float("1.08082") + max_val = float("908.404") + mean = float("45.0674") + std = float("96.6794") data = None class Program_weight_tensor_parameter_138: name = "parameter_138" - shape = [192, 192, 1, 1] + shape = [192] dtype = "float32" - min_val = float("-0.0342199") - max_val = float("0.0396943") - mean = float("-0.000272099") - std = float("0.00329482") + min_val = float("-0.933607") + max_val = float("1.82089") + mean = float("0.102296") + std = float("0.412254") data = None class Program_weight_tensor_parameter_139: name = "parameter_139" - shape = [192] + shape = [192, 192, 3, 3] dtype = "float32" - min_val = float("-1.21223") - max_val = float("0.447751") - mean = float("-0.232181") - std = float("0.33961") + min_val = float("-0.0519582") + max_val = float("0.0691943") + mean = float("0.000736163") + std = float("0.00618428") data = None @@ -1542,10 +1528,10 @@ class Program_weight_tensor_parameter_140: name = "parameter_140" shape = [192] dtype = "float32" - min_val = float("0.382831") - max_val = float("1.56386") - mean = float("0.852099") - std = float("0.259991") + min_val = float("-0.192937") + max_val = float("0.00891605") + mean = float("-0.0669654") + std = float("0.0334937") data = None @@ -1553,10 +1539,10 @@ class Program_weight_tensor_parameter_141: name = "parameter_141" shape = [192] dtype = "float32" - min_val = float("0.00221295") - max_val = float("0.013395") - mean = float("0.00625423") - std = float("0.00179181") + min_val = float("0.921792") + max_val = float("1.05091") + mean = float("0.974424") + std = float("0.0180091") data = None @@ -1564,32 +1550,32 @@ class Program_weight_tensor_parameter_142: name = "parameter_142" shape = [192] dtype = "float32" - min_val = float("-0.0844719") - max_val = float("0.142009") - mean = float("0.0387308") - std = float("0.0378044") + min_val = float("0.00757897") + max_val = float("5.59766") + mean = float("0.426099") + std = float("0.667531") data = None class Program_weight_tensor_parameter_143: name = "parameter_143" - shape = [192, 192, 3, 3] + shape = [192] dtype = "float32" - min_val = float("-0.0323048") - max_val = float("0.0364338") - mean = float("-0.000186547") - std = float("0.00363857") + min_val = float("-0.113501") + max_val = float("0.219786") + mean = float("0.0198383") + std = float("0.0428864") data = None class Program_weight_tensor_parameter_144: name = "parameter_144" - shape = [192] + shape = [192, 192, 1, 1] dtype = "float32" - min_val = float("-2.48701") - max_val = float("-0.131293") - mean = float("-1.25014") - std = float("0.418255") + min_val = float("-0.280343") + max_val = float("0.242364") + mean = float("0.00234259") + std = float("0.0137845") data = None @@ -1597,10 +1583,10 @@ class Program_weight_tensor_parameter_145: name = "parameter_145" shape = [192] dtype = "float32" - min_val = float("0.689678") - max_val = float("1.5199") - mean = float("1.12491") - std = float("0.13482") + min_val = float("-0.192937") + max_val = float("0.00891605") + mean = float("-0.0669654") + std = float("0.0334937") data = None @@ -1608,10 +1594,10 @@ class Program_weight_tensor_parameter_146: name = "parameter_146" shape = [192] dtype = "float32" - min_val = float("0.0183928") - max_val = float("0.0607598") - mean = float("0.0349167") - std = float("0.00875797") + min_val = float("0.965058") + max_val = float("1.14639") + mean = float("1.02358") + std = float("0.0295349") data = None @@ -1619,32 +1605,32 @@ class Program_weight_tensor_parameter_147: name = "parameter_147" shape = [192] dtype = "float32" - min_val = float("-0.716469") - max_val = float("0.320455") - mean = float("-0.0746187") - std = float("0.131028") + min_val = float("0.160794") + max_val = float("55.6905") + mean = float("2.44066") + std = float("4.39657") data = None class Program_weight_tensor_parameter_148: name = "parameter_148" - shape = [192, 192, 3, 3] + shape = [192] dtype = "float32" - min_val = float("-0.0610342") - max_val = float("0.0592016") - mean = float("-0.000277763") - std = float("0.00397261") + min_val = float("-0.264263") + max_val = float("0.791243") + mean = float("0.04945") + std = float("0.117164") data = None class Program_weight_tensor_parameter_149: name = "parameter_149" - shape = [192] + shape = [192, 192, 3, 3] dtype = "float32" - min_val = float("-1.21753") - max_val = float("0.499396") - mean = float("-0.167678") - std = float("0.2936") + min_val = float("-0.0585351") + max_val = float("0.0927808") + mean = float("0.000685876") + std = float("0.00518043") data = None @@ -1652,10 +1638,10 @@ class Program_weight_tensor_parameter_150: name = "parameter_150" shape = [192] dtype = "float32" - min_val = float("0.00836385") - max_val = float("1.53625") - mean = float("0.238111") - std = float("0.211728") + min_val = float("-0.196647") + max_val = float("0.0641564") + mean = float("-0.0774401") + std = float("0.0414081") data = None @@ -1663,10 +1649,10 @@ class Program_weight_tensor_parameter_151: name = "parameter_151" shape = [192] dtype = "float32" - min_val = float("1.96593e-05") - max_val = float("0.00679754") - mean = float("0.000504925") - std = float("0.000658808") + min_val = float("0.877502") + max_val = float("1.23877") + mean = float("1.01492") + std = float("0.0523329") data = None @@ -1674,32 +1660,32 @@ class Program_weight_tensor_parameter_152: name = "parameter_152" shape = [192] dtype = "float32" - min_val = float("-0.0656068") - max_val = float("0.0861781") - mean = float("0.00950526") - std = float("0.0164034") + min_val = float("0.566361") + max_val = float("908.179") + mean = float("23.4539") + std = float("69.3761") data = None class Program_weight_tensor_parameter_153: name = "parameter_153" - shape = [192, 192, 1, 1] + shape = [192] dtype = "float32" - min_val = float("-0.0600528") - max_val = float("0.0312537") - mean = float("-0.000425532") - std = float("0.00397123") + min_val = float("-4.28743") + max_val = float("1.23655") + mean = float("0.0695418") + std = float("0.513951") data = None class Program_weight_tensor_parameter_154: name = "parameter_154" - shape = [192] + shape = [192, 192, 3, 3] dtype = "float32" - min_val = float("-1.21747") - max_val = float("0.500448") - mean = float("-0.167516") - std = float("0.293818") + min_val = float("-0.135687") + max_val = float("0.125448") + mean = float("0.000810476") + std = float("0.00986455") data = None @@ -1707,10 +1693,10 @@ class Program_weight_tensor_parameter_155: name = "parameter_155" shape = [192] dtype = "float32" - min_val = float("0.354999") - max_val = float("1.44989") - mean = float("0.756941") - std = float("0.21662") + min_val = float("-0.23144") + max_val = float("-0.0126862") + mean = float("-0.0851822") + std = float("0.0422856") data = None @@ -1718,10 +1704,10 @@ class Program_weight_tensor_parameter_156: name = "parameter_156" shape = [192] dtype = "float32" - min_val = float("0.00436972") - max_val = float("0.0168976") - mean = float("0.00908178") - std = float("0.00268591") + min_val = float("0.899519") + max_val = float("1.03514") + mean = float("0.976157") + std = float("0.0242367") data = None @@ -1729,32 +1715,32 @@ class Program_weight_tensor_parameter_157: name = "parameter_157" shape = [192] dtype = "float32" - min_val = float("-0.15942") - max_val = float("0.153659") - mean = float("0.0492924") - std = float("0.0450903") + min_val = float("0.00813932") + max_val = float("152.929") + mean = float("3.6385") + std = float("11.7244") data = None class Program_weight_tensor_parameter_158: name = "parameter_158" - shape = [192, 192, 3, 3] + shape = [192] dtype = "float32" - min_val = float("-0.062497") - max_val = float("0.0530577") - mean = float("-0.000241352") - std = float("0.00357809") + min_val = float("-0.318432") + max_val = float("0.187761") + mean = float("-0.0014536") + std = float("0.0590097") data = None class Program_weight_tensor_parameter_159: name = "parameter_159" - shape = [192] + shape = [192, 192, 1, 1] dtype = "float32" - min_val = float("-1.87905") - max_val = float("-0.211382") - mean = float("-1.14643") - std = float("0.325653") + min_val = float("-0.566113") + max_val = float("0.374272") + mean = float("-0.00203655") + std = float("0.033229") data = None @@ -1762,10 +1748,10 @@ class Program_weight_tensor_parameter_160: name = "parameter_160" shape = [192] dtype = "float32" - min_val = float("0.788784") - max_val = float("1.59753") - mean = float("1.12152") - std = float("0.12987") + min_val = float("-0.23144") + max_val = float("-0.0126862") + mean = float("-0.0851822") + std = float("0.0422856") data = None @@ -1773,10 +1759,10 @@ class Program_weight_tensor_parameter_161: name = "parameter_161" shape = [192] dtype = "float32" - min_val = float("0.0156512") - max_val = float("0.0757502") - mean = float("0.0313837") - std = float("0.00924696") + min_val = float("0.94299") + max_val = float("1.11066") + mean = float("1.02006") + std = float("0.0312105") data = None @@ -1784,32 +1770,32 @@ class Program_weight_tensor_parameter_162: name = "parameter_162" shape = [192] dtype = "float32" - min_val = float("-0.689275") - max_val = float("0.284905") - mean = float("-0.0666799") - std = float("0.130681") + min_val = float("0.1289") + max_val = float("1699.95") + mean = float("28.0305") + std = float("128.999") data = None class Program_weight_tensor_parameter_163: name = "parameter_163" - shape = [192, 192, 3, 3] + shape = [192] dtype = "float32" - min_val = float("-0.062874") - max_val = float("0.076648") - mean = float("-0.000213471") - std = float("0.00383126") + min_val = float("-1.81173") + max_val = float("0.674127") + mean = float("0.0159434") + std = float("0.239598") data = None class Program_weight_tensor_parameter_164: name = "parameter_164" - shape = [192] + shape = [192, 192, 3, 3] dtype = "float32" - min_val = float("-2.86217") - max_val = float("1.58057") - mean = float("-0.0275412") - std = float("0.747651") + min_val = float("-0.137869") + max_val = float("0.224919") + mean = float("-0.000155728") + std = float("0.0111564") data = None @@ -1817,10 +1803,10 @@ class Program_weight_tensor_parameter_165: name = "parameter_165" shape = [192] dtype = "float32" - min_val = float("0.487672") - max_val = float("2.0776") - mean = float("0.90163") - std = float("0.232007") + min_val = float("-0.233221") + max_val = float("0.0637355") + mean = float("-0.0967178") + std = float("0.0459598") data = None @@ -1828,10 +1814,10 @@ class Program_weight_tensor_parameter_166: name = "parameter_166" shape = [192] dtype = "float32" - min_val = float("0.00975444") - max_val = float("0.0591661") - mean = float("0.0230902") - std = float("0.00900034") + min_val = float("0.895324") + max_val = float("1.24271") + mean = float("1.01717") + std = float("0.0593265") data = None @@ -1839,32 +1825,32 @@ class Program_weight_tensor_parameter_167: name = "parameter_167" shape = [192] dtype = "float32" - min_val = float("-0.230409") - max_val = float("0.297734") - mean = float("-0.0377667") - std = float("0.0596951") + min_val = float("0.156623") + max_val = float("607.838") + mean = float("54.6281") + std = float("95.2855") data = None class Program_weight_tensor_parameter_168: name = "parameter_168" - shape = [192, 384, 1, 1] + shape = [192] dtype = "float32" - min_val = float("-0.108831") - max_val = float("0.0931739") - mean = float("-0.000512323") - std = float("0.00842399") + min_val = float("-1.62265") + max_val = float("1.34409") + mean = float("0.005687") + std = float("0.403979") data = None class Program_weight_tensor_parameter_169: name = "parameter_169" - shape = [192] + shape = [192, 192, 3, 3] dtype = "float32" - min_val = float("-2.96764") - max_val = float("1.66844") - mean = float("0.0968476") - std = float("0.663233") + min_val = float("-0.235146") + max_val = float("0.2559") + mean = float("-0.000272163") + std = float("0.0222664") data = None @@ -1872,10 +1858,10 @@ class Program_weight_tensor_parameter_170: name = "parameter_170" shape = [192] dtype = "float32" - min_val = float("0.830791") - max_val = float("5.55835") - mean = float("1.91342") - std = float("0.933379") + min_val = float("-0.199795") + max_val = float("0.0207518") + mean = float("-0.0665751") + std = float("0.0322603") data = None @@ -1883,10 +1869,10 @@ class Program_weight_tensor_parameter_171: name = "parameter_171" shape = [192] dtype = "float32" - min_val = float("0.00614721") - max_val = float("0.0464744") - mean = float("0.0174745") - std = float("0.00563515") + min_val = float("0.838902") + max_val = float("1.28467") + mean = float("1.01429") + std = float("0.0597393") data = None @@ -1894,208 +1880,208 @@ class Program_weight_tensor_parameter_172: name = "parameter_172" shape = [192] dtype = "float32" - min_val = float("-0.133354") - max_val = float("0.157826") - mean = float("-0.0239396") - std = float("0.0565686") + min_val = float("0.0720484") + max_val = float("1139.6") + mean = float("65.0503") + std = float("132.66") data = None class Program_weight_tensor_parameter_173: name = "parameter_173" - shape = [192, 384, 1, 1] + shape = [192] dtype = "float32" - min_val = float("-0.0985625") - max_val = float("0.0941202") - mean = float("-0.000511784") - std = float("0.00783691") + min_val = float("-7.56675") + max_val = float("2.4842") + mean = float("-0.383258") + std = float("1.53872") data = None class Program_weight_tensor_parameter_174: name = "parameter_174" - shape = [384] + shape = [192, 576, 1, 1] dtype = "float32" - min_val = float("-2.92359") - max_val = float("1.32666") - mean = float("-0.301116") - std = float("0.563662") + min_val = float("-0.570014") + max_val = float("1.07892") + mean = float("-0.000455192") + std = float("0.0620441") data = None class Program_weight_tensor_parameter_175: name = "parameter_175" - shape = [384] + shape = [192] dtype = "float32" - min_val = float("0.631853") - max_val = float("2.47541") - mean = float("1.15998") - std = float("0.257348") + min_val = float("-0.104993") + max_val = float("0.0307948") + mean = float("-0.0245968") + std = float("0.024377") data = None class Program_weight_tensor_parameter_176: name = "parameter_176" - shape = [384] + shape = [192] dtype = "float32" - min_val = float("0.0104507") - max_val = float("0.111288") - mean = float("0.0262303") - std = float("0.0126363") + min_val = float("0.822217") + max_val = float("1.21452") + mean = float("1.00159") + std = float("0.0456234") data = None class Program_weight_tensor_parameter_177: name = "parameter_177" - shape = [384] + shape = [192] dtype = "float32" - min_val = float("-0.269997") - max_val = float("0.244719") - mean = float("0.0226896") - std = float("0.0692565") + min_val = float("0.108174") + max_val = float("1899.67") + mean = float("46.8811") + std = float("186.384") data = None class Program_weight_tensor_parameter_178: name = "parameter_178" - shape = [384, 256, 3, 3] + shape = [192] dtype = "float32" - min_val = float("-0.0753194") - max_val = float("0.0720032") - mean = float("-0.000103466") - std = float("0.00421781") + min_val = float("-6.03851") + max_val = float("2.66203") + mean = float("0.0745216") + std = float("0.996719") data = None class Program_weight_tensor_parameter_179: name = "parameter_179" - shape = [256] + shape = [192, 576, 1, 1] dtype = "float32" - min_val = float("-2.04502") - max_val = float("1.28816") - mean = float("-0.924614") - std = float("0.543015") + min_val = float("-0.874328") + max_val = float("0.287848") + mean = float("-0.00271476") + std = float("0.0440209") data = None class Program_weight_tensor_parameter_180: name = "parameter_180" - shape = [256] + shape = [192] dtype = "float32" - min_val = float("0.517239") - max_val = float("1.68961") - mean = float("1.05432") - std = float("0.176149") + min_val = float("-0.168512") + max_val = float("0.00330333") + mean = float("-0.0443386") + std = float("0.0224484") data = None class Program_weight_tensor_parameter_181: name = "parameter_181" - shape = [256] + shape = [192] dtype = "float32" - min_val = float("0.00195657") - max_val = float("0.0265577") - mean = float("0.00628818") - std = float("0.00298858") + min_val = float("0.849694") + max_val = float("1.19077") + mean = float("0.991767") + std = float("0.0408626") data = None class Program_weight_tensor_parameter_182: name = "parameter_182" - shape = [256] + shape = [192] dtype = "float32" - min_val = float("-0.230372") - max_val = float("0.154861") - mean = float("-0.0517653") - std = float("0.0687788") + min_val = float("6.16524") + max_val = float("124201.0") + mean = float("2816.92") + std = float("9762.79") data = None class Program_weight_tensor_parameter_183: name = "parameter_183" - shape = [256, 192, 1, 1] + shape = [192] dtype = "float32" - min_val = float("-0.206154") - max_val = float("0.170783") - mean = float("-0.000884197") - std = float("0.0145162") + min_val = float("-516.507") + max_val = float("458.567") + mean = float("-22.2018") + std = float("117.069") data = None class Program_weight_tensor_parameter_184: name = "parameter_184" - shape = [192] + shape = [192, 192, 3, 3] dtype = "float32" - min_val = float("-0.0139357") - max_val = float("0.00388361") - mean = float("-0.00495662") - std = float("0.00371291") + min_val = float("-0.399414") + max_val = float("0.235883") + mean = float("-0.0021386") + std = float("0.0211608") data = None class Program_weight_tensor_parameter_185: name = "parameter_185" - shape = [192, 192, 1, 1] + shape = [192] dtype = "float32" - min_val = float("-0.347135") - max_val = float("0.228777") - mean = float("-0.00389388") - std = float("0.0106293") + min_val = float("-85.128") + max_val = float("88.3635") + mean = float("3.58989") + std = float("27.7568") data = None class Program_weight_tensor_parameter_186: name = "parameter_186" - shape = [96] + shape = [192] dtype = "float32" - min_val = float("-1.91355") - max_val = float("0.53303") - mean = float("-0.208939") - std = float("0.434311") + min_val = float("-31.9346") + max_val = float("19.7089") + mean = float("0.428102") + std = float("7.9625") data = None class Program_weight_tensor_parameter_187: name = "parameter_187" - shape = [96] + shape = [192] dtype = "float32" - min_val = float("0.142427") - max_val = float("3.22988") - mean = float("0.635833") - std = float("0.668487") + min_val = float("4.93657") + max_val = float("817684.0") + mean = float("36364.9") + std = float("89921.0") data = None class Program_weight_tensor_parameter_188: name = "parameter_188" - shape = [96] + shape = [192] dtype = "float32" - min_val = float("7.85249e-05") - max_val = float("0.00245675") - mean = float("0.000588646") - std = float("0.000433443") + min_val = float("-162.528") + max_val = float("314.565") + mean = float("4.32602") + std = float("60.1911") data = None class Program_weight_tensor_parameter_189: name = "parameter_189" - shape = [96] + shape = [192, 192, 1, 1] dtype = "float32" - min_val = float("-0.0546919") - max_val = float("0.0598857") - mean = float("0.00511828") - std = float("0.0215456") + min_val = float("-27.6825") + max_val = float("13.0687") + mean = float("-0.022105") + std = float("1.26716") data = None class Program_weight_tensor_parameter_190: name = "parameter_190" - shape = [96, 96, 1, 1] + shape = [96] dtype = "float32" - min_val = float("-0.0500852") - max_val = float("0.0932317") - mean = float("-0.000561284") - std = float("0.00794853") + min_val = float("-7.24863") + max_val = float("5.05502") + mean = float("-0.962893") + std = float("2.20937") data = None @@ -2103,10 +2089,10 @@ class Program_weight_tensor_parameter_191: name = "parameter_191" shape = [96] dtype = "float32" - min_val = float("-1.91314") - max_val = float("0.534306") - mean = float("-0.208596") - std = float("0.434435") + min_val = float("-3.53429") + max_val = float("3.92679") + mean = float("0.710033") + std = float("1.35103") data = None @@ -2114,10 +2100,10 @@ class Program_weight_tensor_parameter_192: name = "parameter_192" shape = [96] dtype = "float32" - min_val = float("0.343774") - max_val = float("5.47118") - mean = float("1.08565") - std = float("0.88383") + min_val = float("0.0751746") + max_val = float("5214.13") + mean = float("468.433") + std = float("1111.49") data = None @@ -2125,32 +2111,32 @@ class Program_weight_tensor_parameter_193: name = "parameter_193" shape = [96] dtype = "float32" - min_val = float("0.000942165") - max_val = float("0.0158414") - mean = float("0.00535059") - std = float("0.00274901") + min_val = float("-45.3576") + max_val = float("94.342") + mean = float("6.78995") + std = float("24.5091") data = None class Program_weight_tensor_parameter_194: name = "parameter_194" - shape = [96] + shape = [96, 96, 1, 1] dtype = "float32" - min_val = float("-0.137594") - max_val = float("0.212423") - mean = float("0.0122222") - std = float("0.0611614") + min_val = float("-19.379") + max_val = float("5.94629") + mean = float("0.0933808") + std = float("0.838543") data = None class Program_weight_tensor_parameter_195: name = "parameter_195" - shape = [96, 96, 3, 3] + shape = [96] dtype = "float32" - min_val = float("-0.0398886") - max_val = float("0.0746673") - mean = float("-0.000229692") - std = float("0.00588155") + min_val = float("-7.24863") + max_val = float("5.05502") + mean = float("-0.962893") + std = float("2.20937") data = None @@ -2158,10 +2144,10 @@ class Program_weight_tensor_parameter_196: name = "parameter_196" shape = [96] dtype = "float32" - min_val = float("-2.46605") - max_val = float("-0.0202143") - mean = float("-1.22676") - std = float("0.443304") + min_val = float("-5.32819") + max_val = float("4.14921") + mean = float("0.636519") + std = float("1.50817") data = None @@ -2169,10 +2155,10 @@ class Program_weight_tensor_parameter_197: name = "parameter_197" shape = [96] dtype = "float32" - min_val = float("0.542082") - max_val = float("1.6433") - mean = float("0.945634") - std = float("0.172529") + min_val = float("1.04384") + max_val = float("59220.1") + mean = float("2569.29") + std = float("8056.44") data = None @@ -2180,32 +2166,32 @@ class Program_weight_tensor_parameter_198: name = "parameter_198" shape = [96] dtype = "float32" - min_val = float("0.041446") - max_val = float("0.237068") - mean = float("0.0867119") - std = float("0.0369728") + min_val = float("-202.998") + max_val = float("339.698") + mean = float("7.6026") + std = float("67.6599") data = None class Program_weight_tensor_parameter_199: name = "parameter_199" - shape = [96] + shape = [96, 96, 3, 3] dtype = "float32" - min_val = float("-2.80547") - max_val = float("1.61972") - mean = float("-0.194801") - std = float("0.469392") + min_val = float("-7.61018") + max_val = float("2.06596") + mean = float("0.00408673") + std = float("0.270734") data = None class Program_weight_tensor_parameter_200: name = "parameter_200" - shape = [96, 96, 3, 3] + shape = [96] dtype = "float32" - min_val = float("-0.150203") - max_val = float("0.114223") - mean = float("-0.000376735") - std = float("0.00724688") + min_val = float("-12.7487") + max_val = float("6.04207") + mean = float("-0.970305") + std = float("3.03245") data = None @@ -2213,10 +2199,10 @@ class Program_weight_tensor_parameter_201: name = "parameter_201" shape = [96] dtype = "float32" - min_val = float("-1.38826") - max_val = float("0.562406") - mean = float("-0.132909") - std = float("0.347394") + min_val = float("-6.54554") + max_val = float("4.74788") + mean = float("0.995298") + std = float("1.40525") data = None @@ -2224,10 +2210,10 @@ class Program_weight_tensor_parameter_202: name = "parameter_202" shape = [96] dtype = "float32" - min_val = float("0.0453402") - max_val = float("1.86504") - mean = float("0.460875") - std = float("0.366369") + min_val = float("9.39318") + max_val = float("2190150.0") + mean = float("66154.5") + std = float("240596.0") data = None @@ -2235,32 +2221,32 @@ class Program_weight_tensor_parameter_203: name = "parameter_203" shape = [96] dtype = "float32" - min_val = float("7.55835e-05") - max_val = float("0.00277897") - mean = float("0.000754646") - std = float("0.000610909") + min_val = float("-80.81") + max_val = float("224.697") + mean = float("9.21888") + std = float("37.6238") data = None class Program_weight_tensor_parameter_204: name = "parameter_204" - shape = [96] + shape = [96, 96, 3, 3] dtype = "float32" - min_val = float("-0.048531") - max_val = float("0.0464578") - mean = float("0.00676756") - std = float("0.0176703") + min_val = float("-3.09795") + max_val = float("6.23125") + mean = float("0.039322") + std = float("0.339853") data = None class Program_weight_tensor_parameter_205: name = "parameter_205" - shape = [96, 96, 1, 1] + shape = [96] dtype = "float32" - min_val = float("-0.0483138") - max_val = float("0.0415922") - mean = float("-0.000498568") - std = float("0.00710731") + min_val = float("-6.57041") + max_val = float("1.23513") + mean = float("-1.08467") + std = float("1.31742") data = None @@ -2268,10 +2254,10 @@ class Program_weight_tensor_parameter_206: name = "parameter_206" shape = [96] dtype = "float32" - min_val = float("-1.38834") - max_val = float("0.5648") - mean = float("-0.13256") - std = float("0.347894") + min_val = float("-4.57105") + max_val = float("7.43697") + mean = float("0.761073") + std = float("1.58355") data = None @@ -2279,10 +2265,10 @@ class Program_weight_tensor_parameter_207: name = "parameter_207" shape = [96] dtype = "float32" - min_val = float("0.370504") - max_val = float("2.32822") - mean = float("0.901933") - std = float("0.426522") + min_val = float("0.122882") + max_val = float("5384610.0") + mean = float("110211.0") + std = float("572085.0") data = None @@ -2290,32 +2276,32 @@ class Program_weight_tensor_parameter_208: name = "parameter_208" shape = [96] dtype = "float32" - min_val = float("0.00323837") - max_val = float("0.0234632") - mean = float("0.00912274") - std = float("0.00470534") + min_val = float("-48.2301") + max_val = float("166.551") + mean = float("6.24412") + std = float("24.2542") data = None class Program_weight_tensor_parameter_209: name = "parameter_209" - shape = [96] + shape = [96, 96, 1, 1] dtype = "float32" - min_val = float("-0.0965735") - max_val = float("0.12145") - mean = float("0.0354439") - std = float("0.0432668") + min_val = float("-6.4048") + max_val = float("25.2239") + mean = float("0.251097") + std = float("1.3616") data = None class Program_weight_tensor_parameter_210: name = "parameter_210" - shape = [96, 96, 3, 3] + shape = [96] dtype = "float32" - min_val = float("-0.058655") - max_val = float("0.0591114") - mean = float("-0.000356621") - std = float("0.0059174") + min_val = float("-6.57041") + max_val = float("1.23513") + mean = float("-1.08467") + std = float("1.31742") data = None @@ -2323,10 +2309,10 @@ class Program_weight_tensor_parameter_211: name = "parameter_211" shape = [96] dtype = "float32" - min_val = float("-3.31955") - max_val = float("0.36603") - mean = float("-1.17895") - std = float("0.556023") + min_val = float("-4.84455") + max_val = float("4.21014") + mean = float("0.997804") + std = float("1.25033") data = None @@ -2334,10 +2320,10 @@ class Program_weight_tensor_parameter_212: name = "parameter_212" shape = [96] dtype = "float32" - min_val = float("0.473098") - max_val = float("1.98183") - mean = float("1.03911") - std = float("0.238708") + min_val = float("8.7857") + max_val = float("11788800.0") + mean = float("727757.0") + std = float("1936730.0") data = None @@ -2345,32 +2331,32 @@ class Program_weight_tensor_parameter_213: name = "parameter_213" shape = [96] dtype = "float32" - min_val = float("0.0282091") - max_val = float("0.14579") - mean = float("0.0545779") - std = float("0.0168222") + min_val = float("-305.931") + max_val = float("314.393") + mean = float("20.0074") + std = float("74.2719") data = None class Program_weight_tensor_parameter_214: name = "parameter_214" - shape = [96] + shape = [96, 96, 3, 3] dtype = "float32" - min_val = float("-1.24896") - max_val = float("0.504954") - mean = float("-0.0599842") - std = float("0.268023") + min_val = float("-4.16137") + max_val = float("5.97165") + mean = float("0.112215") + std = float("0.461145") data = None class Program_weight_tensor_parameter_215: name = "parameter_215" - shape = [96, 96, 3, 3] + shape = [96] dtype = "float32" - min_val = float("-0.147666") - max_val = float("0.152112") - mean = float("-0.000410438") - std = float("0.00711818") + min_val = float("-6.38172") + max_val = float("0.373498") + mean = float("-1.33479") + std = float("1.05978") data = None @@ -2378,10 +2364,10 @@ class Program_weight_tensor_parameter_216: name = "parameter_216" shape = [96] dtype = "float32" - min_val = float("-1.24956") - max_val = float("0.58267") - mean = float("-0.109749") - std = float("0.291966") + min_val = float("-5.79719") + max_val = float("12.1783") + mean = float("0.960027") + std = float("3.2153") data = None @@ -2389,10 +2375,10 @@ class Program_weight_tensor_parameter_217: name = "parameter_217" shape = [96] dtype = "float32" - min_val = float("0.0243293") - max_val = float("1.27785") - mean = float("0.324816") - std = float("0.192866") + min_val = float("45.1963") + max_val = float("16654400.0") + mean = float("673305.0") + std = float("2003700.0") data = None @@ -2400,32 +2386,32 @@ class Program_weight_tensor_parameter_218: name = "parameter_218" shape = [96] dtype = "float32" - min_val = float("6.35226e-05") - max_val = float("0.00358684") - mean = float("0.000713081") - std = float("0.000576864") + min_val = float("-355.958") + max_val = float("408.085") + mean = float("8.33521") + std = float("84.948") data = None class Program_weight_tensor_parameter_219: name = "parameter_219" - shape = [96] + shape = [96, 96, 3, 3] dtype = "float32" - min_val = float("-0.0385319") - max_val = float("0.0503831") - mean = float("0.00407058") - std = float("0.0162571") + min_val = float("-5.59114") + max_val = float("6.22526") + mean = float("0.0518284") + std = float("0.488386") data = None class Program_weight_tensor_parameter_220: name = "parameter_220" - shape = [96, 96, 1, 1] + shape = [96] dtype = "float32" - min_val = float("-0.0448708") - max_val = float("0.0573038") - mean = float("-0.000336044") - std = float("0.00726838") + min_val = float("-6.40528") + max_val = float("0.439558") + mean = float("-1.04892") + std = float("1.44668") data = None @@ -2433,10 +2419,10 @@ class Program_weight_tensor_parameter_221: name = "parameter_221" shape = [96] dtype = "float32" - min_val = float("-1.24942") - max_val = float("0.584539") - mean = float("-0.109552") - std = float("0.292478") + min_val = float("-7.47233") + max_val = float("12.2951") + mean = float("0.896125") + std = float("2.48316") data = None @@ -2444,10 +2430,10 @@ class Program_weight_tensor_parameter_222: name = "parameter_222" shape = [96] dtype = "float32" - min_val = float("0.315495") - max_val = float("1.67063") - mean = float("0.747087") - std = float("0.257847") + min_val = float("0.227469") + max_val = float("674599.0") + mean = float("19715.9") + std = float("80090.6") data = None @@ -2455,32 +2441,32 @@ class Program_weight_tensor_parameter_223: name = "parameter_223" shape = [96] dtype = "float32" - min_val = float("0.00338498") - max_val = float("0.0252591") - mean = float("0.0101982") - std = float("0.00406466") + min_val = float("-91.0419") + max_val = float("72.2002") + mean = float("2.37886") + std = float("17.3356") data = None class Program_weight_tensor_parameter_224: name = "parameter_224" - shape = [96] + shape = [96, 96, 1, 1] dtype = "float32" - min_val = float("-0.0546488") - max_val = float("0.145267") - mean = float("0.0275953") - std = float("0.038381") + min_val = float("-11.5461") + max_val = float("22.862") + mean = float("0.0613126") + std = float("1.12064") data = None class Program_weight_tensor_parameter_225: name = "parameter_225" - shape = [96, 96, 3, 3] + shape = [96] dtype = "float32" - min_val = float("-0.065253") - max_val = float("0.0583777") - mean = float("-0.000331097") - std = float("0.00602268") + min_val = float("-6.40528") + max_val = float("0.439558") + mean = float("-1.04892") + std = float("1.44668") data = None @@ -2488,10 +2474,10 @@ class Program_weight_tensor_parameter_226: name = "parameter_226" shape = [96] dtype = "float32" - min_val = float("-3.58296") - max_val = float("0.290726") - mean = float("-1.12856") - std = float("0.572409") + min_val = float("-5.55409") + max_val = float("8.57283") + mean = float("0.767581") + std = float("1.91915") data = None @@ -2499,10 +2485,10 @@ class Program_weight_tensor_parameter_227: name = "parameter_227" shape = [96] dtype = "float32" - min_val = float("0.511106") - max_val = float("2.19165") - mean = float("1.05198") - std = float("0.238255") + min_val = float("36.8885") + max_val = float("1641000.0") + mean = float("154074.0") + std = float("333545.0") data = None @@ -2510,32 +2496,32 @@ class Program_weight_tensor_parameter_228: name = "parameter_228" shape = [96] dtype = "float32" - min_val = float("0.0201149") - max_val = float("0.0748228") - mean = float("0.0399945") - std = float("0.0094601") + min_val = float("-148.097") + max_val = float("151.343") + mean = float("8.60596") + std = float("46.4808") data = None class Program_weight_tensor_parameter_229: name = "parameter_229" - shape = [96] + shape = [96, 96, 3, 3] dtype = "float32" - min_val = float("-0.822261") - max_val = float("0.396367") - mean = float("-0.0472576") - std = float("0.195175") + min_val = float("-6.37265") + max_val = float("6.48435") + mean = float("0.0246406") + std = float("0.404267") data = None class Program_weight_tensor_parameter_230: name = "parameter_230" - shape = [96, 96, 3, 3] + shape = [96] dtype = "float32" - min_val = float("-0.0973524") - max_val = float("0.130681") - mean = float("-0.000422376") - std = float("0.00719502") + min_val = float("-12.1058") + max_val = float("2.65046") + mean = float("-1.35949") + std = float("2.31774") data = None @@ -2543,10 +2529,10 @@ class Program_weight_tensor_parameter_231: name = "parameter_231" shape = [96] dtype = "float32" - min_val = float("-0.892064") - max_val = float("0.529384") - mean = float("-0.160709") - std = float("0.281574") + min_val = float("-5.35437") + max_val = float("15.6996") + mean = float("0.620018") + std = float("3.80279") data = None @@ -2554,10 +2540,10 @@ class Program_weight_tensor_parameter_232: name = "parameter_232" shape = [96] dtype = "float32" - min_val = float("0.0191223") - max_val = float("1.40524") - mean = float("0.32501") - std = float("0.213327") + min_val = float("119.771") + max_val = float("2429810.0") + mean = float("193542.0") + std = float("392172.0") data = None @@ -2565,32 +2551,32 @@ class Program_weight_tensor_parameter_233: name = "parameter_233" shape = [96] dtype = "float32" - min_val = float("4.77273e-05") - max_val = float("0.00366742") - mean = float("0.000733131") - std = float("0.000561341") + min_val = float("-183.549") + max_val = float("100.622") + mean = float("1.67747") + std = float("40.3903") data = None class Program_weight_tensor_parameter_234: name = "parameter_234" - shape = [96] + shape = [96, 96, 3, 3] dtype = "float32" - min_val = float("-0.0328433") - max_val = float("0.0466064") - mean = float("0.00724984") - std = float("0.0146293") + min_val = float("-5.05043") + max_val = float("3.64647") + mean = float("-0.00681032") + std = float("0.486976") data = None class Program_weight_tensor_parameter_235: name = "parameter_235" - shape = [96, 96, 1, 1] + shape = [96] dtype = "float32" - min_val = float("-0.0499906") - max_val = float("0.0448114") - mean = float("-0.000606145") - std = float("0.00724394") + min_val = float("-6.51727") + max_val = float("4.07312") + mean = float("-0.742646") + std = float("1.54691") data = None @@ -2598,10 +2584,10 @@ class Program_weight_tensor_parameter_236: name = "parameter_236" shape = [96] dtype = "float32" - min_val = float("-0.891955") - max_val = float("0.530721") - mean = float("-0.160571") - std = float("0.281998") + min_val = float("-17.2429") + max_val = float("16.1753") + mean = float("0.496246") + std = float("4.30544") data = None @@ -2609,10 +2595,10 @@ class Program_weight_tensor_parameter_237: name = "parameter_237" shape = [96] dtype = "float32" - min_val = float("0.17446") - max_val = float("1.78047") - mean = float("0.708571") - std = float("0.284378") + min_val = float("33.613") + max_val = float("328553.0") + mean = float("25107.8") + std = float("56405.7") data = None @@ -2620,32 +2606,32 @@ class Program_weight_tensor_parameter_238: name = "parameter_238" shape = [96] dtype = "float32" - min_val = float("0.00236768") - max_val = float("0.0256587") - mean = float("0.0102153") - std = float("0.00392496") + min_val = float("-61.9912") + max_val = float("47.6969") + mean = float("-1.08568") + std = float("14.1662") data = None class Program_weight_tensor_parameter_239: name = "parameter_239" - shape = [96] + shape = [96, 448, 1, 1] dtype = "float32" - min_val = float("-0.0582404") - max_val = float("0.137905") - mean = float("0.0410397") - std = float("0.0378938") + min_val = float("-22.5977") + max_val = float("9.97906") + mean = float("0.00384249") + std = float("1.00918") data = None class Program_weight_tensor_parameter_240: name = "parameter_240" - shape = [96, 96, 3, 3] + shape = [96] dtype = "float32" - min_val = float("-0.057305") - max_val = float("0.0650381") - mean = float("-0.000417143") - std = float("0.00601776") + min_val = float("-2.39003") + max_val = float("1.00251") + mean = float("-0.133253") + std = float("0.503822") data = None @@ -2653,10 +2639,10 @@ class Program_weight_tensor_parameter_241: name = "parameter_241" shape = [96] dtype = "float32" - min_val = float("-2.65777") - max_val = float("0.065358") - mean = float("-1.06432") - std = float("0.48826") + min_val = float("-4.56305") + max_val = float("6.14021") + mean = float("1.38317") + std = float("1.84076") data = None @@ -2664,10 +2650,10 @@ class Program_weight_tensor_parameter_242: name = "parameter_242" shape = [96] dtype = "float32" - min_val = float("0.512951") - max_val = float("1.73806") - mean = float("1.01547") - std = float("0.193357") + min_val = float("5.07934") + max_val = float("29574.8") + mean = float("1194.75") + std = float("3229.19") data = None @@ -2675,472 +2661,472 @@ class Program_weight_tensor_parameter_243: name = "parameter_243" shape = [96] dtype = "float32" - min_val = float("0.0176967") - max_val = float("0.0574371") - mean = float("0.0307481") - std = float("0.00699669") + min_val = float("-4.92322") + max_val = float("20.0659") + mean = float("1.71861") + std = float("4.04769") data = None class Program_weight_tensor_parameter_244: name = "parameter_244" - shape = [96] + shape = [96, 448, 1, 1] dtype = "float32" - min_val = float("-0.761559") - max_val = float("0.60821") - mean = float("-0.0646145") - std = float("0.194302") + min_val = float("-3.49471") + max_val = float("1.20362") + mean = float("0.00515059") + std = float("0.189079") data = None class Program_weight_tensor_parameter_245: name = "parameter_245" - shape = [96, 96, 3, 3] + shape = [192] dtype = "float32" - min_val = float("-0.0738037") - max_val = float("0.125248") - mean = float("-0.000426247") - std = float("0.0069708") + min_val = float("-6.61152") + max_val = float("1.7835") + mean = float("-0.419521") + std = float("1.07353") data = None class Program_weight_tensor_parameter_246: name = "parameter_246" - shape = [96] + shape = [192] dtype = "float32" - min_val = float("-0.978262") - max_val = float("0.489992") - mean = float("-0.136691") - std = float("0.278636") + min_val = float("-14.4931") + max_val = float("16.6779") + mean = float("0.0422574") + std = float("2.46703") data = None class Program_weight_tensor_parameter_247: name = "parameter_247" - shape = [96] + shape = [192] dtype = "float32" - min_val = float("0.0498074") - max_val = float("1.1462") - mean = float("0.296075") - std = float("0.172323") + min_val = float("0.28209") + max_val = float("76246.9") + mean = float("6772.73") + std = float("14681.3") data = None class Program_weight_tensor_parameter_248: name = "parameter_248" - shape = [96] + shape = [192] dtype = "float32" - min_val = float("0.000180546") - max_val = float("0.00509335") - mean = float("0.00108394") - std = float("0.00072352") + min_val = float("-31.4966") + max_val = float("41.7212") + mean = float("1.71254") + std = float("7.38567") data = None class Program_weight_tensor_parameter_249: name = "parameter_249" - shape = [96] + shape = [192, 384, 1, 1] dtype = "float32" - min_val = float("-0.041806") - max_val = float("0.0564684") - mean = float("0.00557215") - std = float("0.0180702") + min_val = float("-3.49758") + max_val = float("2.55718") + mean = float("0.0265243") + std = float("0.348243") data = None class Program_weight_tensor_parameter_250: name = "parameter_250" - shape = [96, 96, 1, 1] + shape = [384] dtype = "float32" - min_val = float("-0.0731207") - max_val = float("0.0763792") - mean = float("-0.000594618") - std = float("0.00825765") + min_val = float("-1.87838") + max_val = float("1.09629") + mean = float("-0.183799") + std = float("0.345718") data = None class Program_weight_tensor_parameter_251: name = "parameter_251" - shape = [96] + shape = [384] dtype = "float32" - min_val = float("-0.978083") - max_val = float("0.492448") - mean = float("-0.136655") - std = float("0.279122") + min_val = float("-3.0247") + max_val = float("2.74203") + mean = float("0.964417") + std = float("0.845217") data = None class Program_weight_tensor_parameter_252: name = "parameter_252" - shape = [96] + shape = [384] dtype = "float32" - min_val = float("0.236133") - max_val = float("1.69671") - mean = float("0.603953") - std = float("0.228164") + min_val = float("0.258076") + max_val = float("117117.0") + mean = float("2098.36") + std = float("9309.06") data = None class Program_weight_tensor_parameter_253: name = "parameter_253" - shape = [96] + shape = [384] dtype = "float32" - min_val = float("0.00612804") - max_val = float("0.0281159") - mean = float("0.0137457") - std = float("0.00478723") + min_val = float("-37.8342") + max_val = float("38.526") + mean = float("2.07375") + std = float("5.93701") data = None class Program_weight_tensor_parameter_254: name = "parameter_254" - shape = [96] + shape = [384, 384, 1, 1] dtype = "float32" - min_val = float("-0.0713362") - max_val = float("0.135694") - mean = float("0.0273635") - std = float("0.0460879") + min_val = float("-3.75963") + max_val = float("3.45352") + mean = float("-0.00284906") + std = float("0.205711") data = None class Program_weight_tensor_parameter_255: name = "parameter_255" - shape = [96, 96, 3, 3] + shape = [192] dtype = "float32" - min_val = float("-0.0654835") - max_val = float("0.0522648") - mean = float("-0.00036204") - std = float("0.0060426") + min_val = float("-0.377474") + max_val = float("0.270252") + mean = float("-0.09324") + std = float("0.113903") data = None class Program_weight_tensor_parameter_256: name = "parameter_256" - shape = [96] + shape = [192] dtype = "float32" - min_val = float("-3.46434") - max_val = float("0.199609") - mean = float("-1.00527") - std = float("0.548081") + min_val = float("0.589898") + max_val = float("2.19572") + mean = float("1.10833") + std = float("0.298081") data = None class Program_weight_tensor_parameter_257: name = "parameter_257" - shape = [96] + shape = [192] dtype = "float32" - min_val = float("0.686506") - max_val = float("2.51291") - mean = float("1.07427") - std = float("0.212412") + min_val = float("0.0224855") + max_val = float("6349.83") + mean = float("93.5002") + std = float("543.789") data = None class Program_weight_tensor_parameter_258: name = "parameter_258" - shape = [96] + shape = [192] dtype = "float32" - min_val = float("0.013513") - max_val = float("0.0542585") - mean = float("0.0262785") - std = float("0.00851322") + min_val = float("-0.711802") + max_val = float("1.86246") + mean = float("0.0422734") + std = float("0.271396") data = None class Program_weight_tensor_parameter_259: name = "parameter_259" - shape = [96] + shape = [192, 192, 1, 1] dtype = "float32" - min_val = float("-0.482054") - max_val = float("0.527892") - mean = float("-0.0515599") - std = float("0.192746") + min_val = float("-0.513946") + max_val = float("1.90858") + mean = float("0.0211941") + std = float("0.126887") data = None class Program_weight_tensor_parameter_260: name = "parameter_260" - shape = [96, 96, 3, 3] + shape = [192] dtype = "float32" - min_val = float("-0.0824841") - max_val = float("0.0934753") - mean = float("-0.000357672") - std = float("0.00712731") + min_val = float("-0.377474") + max_val = float("0.270252") + mean = float("-0.09324") + std = float("0.113903") data = None class Program_weight_tensor_parameter_261: name = "parameter_261" - shape = [96] + shape = [192] dtype = "float32" - min_val = float("-0.625302") - max_val = float("0.449836") - mean = float("-0.0825559") - std = float("0.256738") + min_val = float("0.664841") + max_val = float("2.1235") + mean = float("1.14039") + std = float("0.294828") data = None class Program_weight_tensor_parameter_262: name = "parameter_262" - shape = [96] + shape = [192] dtype = "float32" - min_val = float("0.0910018") - max_val = float("1.30085") - mean = float("0.309049") - std = float("0.196412") + min_val = float("0.447646") + max_val = float("9622.59") + mean = float("555.521") + std = float("1466.93") data = None class Program_weight_tensor_parameter_263: name = "parameter_263" - shape = [96] + shape = [192] dtype = "float32" - min_val = float("0.000380114") - max_val = float("0.0177806") - mean = float("0.00357894") - std = float("0.00283759") + min_val = float("-1.42131") + max_val = float("4.63902") + mean = float("0.15401") + std = float("0.797684") data = None class Program_weight_tensor_parameter_264: name = "parameter_264" - shape = [96] + shape = [192, 192, 3, 3] dtype = "float32" - min_val = float("-0.0360021") - max_val = float("0.0301813") - mean = float("-5.04728e-05") - std = float("0.0106632") + min_val = float("-0.200786") + max_val = float("0.349522") + mean = float("0.00864049") + std = float("0.0419031") data = None class Program_weight_tensor_parameter_265: name = "parameter_265" - shape = [96, 96, 1, 1] + shape = [192] dtype = "float32" - min_val = float("-0.0925016") - max_val = float("0.0753255") - mean = float("-0.00105853") - std = float("0.00936655") + min_val = float("-0.376564") + max_val = float("0.0843041") + mean = float("-0.165316") + std = float("0.0820009") data = None class Program_weight_tensor_parameter_266: name = "parameter_266" - shape = [96] + shape = [192] dtype = "float32" - min_val = float("-0.625183") - max_val = float("0.450937") - mean = float("-0.082575") - std = float("0.257081") + min_val = float("0.618528") + max_val = float("1.87359") + mean = float("1.00495") + std = float("0.220123") data = None class Program_weight_tensor_parameter_267: name = "parameter_267" - shape = [96] + shape = [192] dtype = "float32" - min_val = float("0.210658") - max_val = float("1.42703") - mean = float("0.527208") - std = float("0.258269") + min_val = float("4.40675") + max_val = float("11495.1") + mean = float("1075.25") + std = float("1952.54") data = None class Program_weight_tensor_parameter_268: name = "parameter_268" - shape = [96] + shape = [192] dtype = "float32" - min_val = float("0.0102276") - max_val = float("0.0952331") - mean = float("0.0336542") - std = float("0.0171249") + min_val = float("-7.62537") + max_val = float("8.84763") + mean = float("0.605399") + std = float("2.44971") data = None class Program_weight_tensor_parameter_269: name = "parameter_269" - shape = [96] + shape = [192, 192, 3, 3] dtype = "float32" - min_val = float("-0.108112") - max_val = float("0.0898917") - mean = float("-0.00831331") - std = float("0.0381217") + min_val = float("-0.610252") + max_val = float("0.46974") + mean = float("0.00434873") + std = float("0.0378261") data = None class Program_weight_tensor_parameter_270: name = "parameter_270" - shape = [96, 96, 3, 3] + shape = [192] dtype = "float32" - min_val = float("-0.0885375") - max_val = float("0.0525934") - mean = float("-0.000466484") - std = float("0.00584459") + min_val = float("-0.396329") + max_val = float("0.0895731") + mean = float("-0.135227") + std = float("0.0909936") data = None class Program_weight_tensor_parameter_271: name = "parameter_271" - shape = [96] + shape = [192] dtype = "float32" - min_val = float("-2.40893") - max_val = float("0.508421") - mean = float("-0.828862") - std = float("0.467337") + min_val = float("0.724836") + max_val = float("1.31254") + mean = float("0.989403") + std = float("0.0899495") data = None class Program_weight_tensor_parameter_272: name = "parameter_272" - shape = [96] + shape = [192] dtype = "float32" - min_val = float("0.853968") - max_val = float("2.18309") - mean = float("1.27545") - std = float("0.208741") + min_val = float("0.0366499") + max_val = float("2579.48") + mean = float("94.136") + std = float("297.849") data = None class Program_weight_tensor_parameter_273: name = "parameter_273" - shape = [96] + shape = [192] dtype = "float32" - min_val = float("0.011454") - max_val = float("0.0464459") - mean = float("0.0216361") - std = float("0.00778855") + min_val = float("-1.21638") + max_val = float("3.00263") + mean = float("0.19362") + std = float("0.597692") data = None class Program_weight_tensor_parameter_274: name = "parameter_274" - shape = [96] + shape = [192, 192, 1, 1] dtype = "float32" - min_val = float("-0.570491") - max_val = float("0.473016") - mean = float("-0.0532507") - std = float("0.173783") + min_val = float("-1.50522") + max_val = float("2.56014") + mean = float("0.0167357") + std = float("0.128452") data = None class Program_weight_tensor_parameter_275: name = "parameter_275" - shape = [96, 96, 3, 3] + shape = [192] dtype = "float32" - min_val = float("-0.15411") - max_val = float("0.150524") - mean = float("-0.000241604") - std = float("0.00722176") + min_val = float("-0.396329") + max_val = float("0.0895731") + mean = float("-0.135227") + std = float("0.0909936") data = None class Program_weight_tensor_parameter_276: name = "parameter_276" - shape = [96] + shape = [192] dtype = "float32" - min_val = float("-3.16609") - max_val = float("1.88989") - mean = float("0.501666") - std = float("0.861493") + min_val = float("0.702365") + max_val = float("1.48726") + mean = float("0.978343") + std = float("0.0932404") data = None class Program_weight_tensor_parameter_277: name = "parameter_277" - shape = [96] + shape = [192] dtype = "float32" - min_val = float("0.214988") - max_val = float("2.6299") - mean = float("0.562885") - std = float("0.31708") + min_val = float("0.341776") + max_val = float("10383.4") + mean = float("546.718") + std = float("1146.4") data = None class Program_weight_tensor_parameter_278: name = "parameter_278" - shape = [96] + shape = [192] dtype = "float32" - min_val = float("0.00763171") - max_val = float("0.154967") - mean = float("0.0323459") - std = float("0.0238839") + min_val = float("-4.04849") + max_val = float("7.68697") + mean = float("0.626776") + std = float("1.72632") data = None class Program_weight_tensor_parameter_279: name = "parameter_279" - shape = [96] + shape = [192, 192, 3, 3] dtype = "float32" - min_val = float("-0.271678") - max_val = float("0.329389") - mean = float("-0.0147432") - std = float("0.0938868") + min_val = float("-0.496227") + max_val = float("0.685374") + mean = float("0.00417206") + std = float("0.0441635") data = None class Program_weight_tensor_parameter_280: name = "parameter_280" - shape = [96, 192, 1, 1] + shape = [192] dtype = "float32" - min_val = float("-0.186901") - max_val = float("0.225419") - mean = float("-0.000291508") - std = float("0.0156297") + min_val = float("-1.64459") + max_val = float("0.267005") + mean = float("-0.224858") + std = float("0.296988") data = None class Program_weight_tensor_parameter_281: name = "parameter_281" - shape = [96] + shape = [192] dtype = "float32" - min_val = float("-4.92284") - max_val = float("1.57998") - mean = float("0.384603") - std = float("1.04888") + min_val = float("-1.27679") + max_val = float("2.832") + mean = float("1.02513") + std = float("0.465622") data = None class Program_weight_tensor_parameter_282: name = "parameter_282" - shape = [96] + shape = [192] dtype = "float32" - min_val = float("0.414126") - max_val = float("6.78093") - mean = float("1.69449") - std = float("1.30795") + min_val = float("5.86811") + max_val = float("186650.0") + mean = float("7734.42") + std = float("21154.4") data = None class Program_weight_tensor_parameter_283: name = "parameter_283" - shape = [96] + shape = [192] dtype = "float32" - min_val = float("0.00531954") - max_val = float("0.272953") - mean = float("0.0381955") - std = float("0.0353968") + min_val = float("-13.8475") + max_val = float("24.3198") + mean = float("1.04016") + std = float("5.19036") data = None class Program_weight_tensor_parameter_284: name = "parameter_284" - shape = [96] + shape = [192, 192, 3, 3] dtype = "float32" - min_val = float("-0.17223") - max_val = float("0.443347") - mean = float("0.0466399") - std = float("0.0966002") + min_val = float("-2.35083") + max_val = float("1.83667") + mean = float("-0.000570115") + std = float("0.103887") data = None class Program_weight_tensor_parameter_285: name = "parameter_285" - shape = [96, 192, 1, 1] + shape = [192] dtype = "float32" - min_val = float("-0.116975") - max_val = float("0.156029") - mean = float("0.000440768") - std = float("0.0149691") + min_val = float("-1.03838") + max_val = float("0.354784") + mean = float("-0.15507") + std = float("0.21939") data = None @@ -3148,10 +3134,10 @@ class Program_weight_tensor_parameter_286: name = "parameter_286" shape = [192] dtype = "float32" - min_val = float("-2.27475") - max_val = float("1.75104") - mean = float("-0.126037") - std = float("0.740702") + min_val = float("-1.45219") + max_val = float("2.97832") + mean = float("0.989616") + std = float("0.41137") data = None @@ -3159,10 +3145,10 @@ class Program_weight_tensor_parameter_287: name = "parameter_287" shape = [192] dtype = "float32" - min_val = float("0.632268") - max_val = float("2.97322") - mean = float("1.08733") - std = float("0.283408") + min_val = float("0.0529003") + max_val = float("11535.5") + mean = float("313.393") + std = float("1268.61") data = None @@ -3170,311 +3156,4734 @@ class Program_weight_tensor_parameter_288: name = "parameter_288" shape = [192] dtype = "float32" - min_val = float("0.0109431") - max_val = float("0.233442") - mean = float("0.0433778") - std = float("0.0318779") + min_val = float("-5.17365") + max_val = float("2.80353") + mean = float("-0.0123197") + std = float("0.835049") data = None class Program_weight_tensor_parameter_289: name = "parameter_289" - shape = [192] + shape = [192, 192, 1, 1] dtype = "float32" - min_val = float("-0.576887") - max_val = float("0.269966") - mean = float("-0.0934605") - std = float("0.118655") + min_val = float("-4.9408") + max_val = float("2.27411") + mean = float("0.000442447") + std = float("0.213835") data = None class Program_weight_tensor_parameter_290: name = "parameter_290" - shape = [192, 128, 3, 3] + shape = [192] dtype = "float32" - min_val = float("-0.0856428") - max_val = float("0.123627") - mean = float("-0.000225745") - std = float("0.00765725") + min_val = float("-1.03838") + max_val = float("0.354784") + mean = float("-0.15507") + std = float("0.21939") data = None class Program_weight_tensor_parameter_291: name = "parameter_291" - shape = [128] + shape = [192] dtype = "float32" - min_val = float("-2.81597") - max_val = float("1.9636") - mean = float("-0.71259") - std = float("0.647835") + min_val = float("-1.73076") + max_val = float("4.22948") + mean = float("0.992476") + std = float("0.513532") data = None class Program_weight_tensor_parameter_292: name = "parameter_292" - shape = [128] + shape = [192] dtype = "float32" - min_val = float("0.311227") - max_val = float("2.8783") - mean = float("1.01845") - std = float("0.278722") + min_val = float("1.28858") + max_val = float("72884.6") + mean = float("1632.57") + std = float("6195.79") data = None class Program_weight_tensor_parameter_293: name = "parameter_293" - shape = [128] + shape = [192] dtype = "float32" - min_val = float("0.000843216") - max_val = float("0.0154502") - mean = float("0.00453611") - std = float("0.00230447") + min_val = float("-13.4733") + max_val = float("7.33824") + mean = float("0.00635399") + std = float("2.2987") data = None class Program_weight_tensor_parameter_294: name = "parameter_294" - shape = [128] + shape = [192, 192, 3, 3] dtype = "float32" - min_val = float("-0.237755") - max_val = float("0.26225") - mean = float("0.00315393") - std = float("0.0867451") + min_val = float("-2.75576") + max_val = float("1.65718") + mean = float("0.00141057") + std = float("0.0847456") data = None class Program_weight_tensor_parameter_295: name = "parameter_295" - shape = [128, 96, 1, 1] + shape = [192] dtype = "float32" - min_val = float("-0.171773") - max_val = float("0.211127") - mean = float("-0.00142636") - std = float("0.0224525") + min_val = float("-1.54041") + max_val = float("1.11627") + mean = float("-0.2651") + std = float("0.234523") data = None class Program_weight_tensor_parameter_296: name = "parameter_296" - shape = [96] + shape = [192] dtype = "float32" - min_val = float("-0.0180386") - max_val = float("3.78007e-05") - mean = float("-0.00735479") - std = float("0.00450801") + min_val = float("-1.63593") + max_val = float("4.58953") + mean = float("1.00964") + std = float("0.703509") data = None class Program_weight_tensor_parameter_297: name = "parameter_297" - shape = [96, 96, 1, 1] + shape = [192] dtype = "float32" - min_val = float("-0.30281") - max_val = float("0.123007") - mean = float("-0.00790532") - std = float("0.0180213") + min_val = float("2.05421") + max_val = float("104887.0") + mean = float("4244.7") + std = float("11719.7") data = None class Program_weight_tensor_parameter_298: name = "parameter_298" - shape = [48] + shape = [192] dtype = "float32" - min_val = float("0") - max_val = float("0.5") + min_val = float("-25.8375") + max_val = float("52.7078") + mean = float("1.02666") + std = float("7.5325") data = None class Program_weight_tensor_parameter_299: name = "parameter_299" - shape = [48] + shape = [192, 192, 3, 3] dtype = "float32" - min_val = float("0") - max_val = float("0.5") + min_val = float("-2.79383") + max_val = float("1.88844") + mean = float("-0.00365623") + std = float("0.104147") data = None class Program_weight_tensor_parameter_300: name = "parameter_300" - shape = [48] + shape = [192] dtype = "float32" - min_val = float("0") - max_val = float("0.5") + min_val = float("-0.871095") + max_val = float("1.01126") + mean = float("-0.217258") + std = float("0.291713") data = None class Program_weight_tensor_parameter_301: name = "parameter_301" - shape = [48] + shape = [192] dtype = "float32" - min_val = float("0") - max_val = float("0.5") + min_val = float("-1.96925") + max_val = float("3.64939") + mean = float("1.00405") + std = float("0.722218") data = None class Program_weight_tensor_parameter_302: name = "parameter_302" - shape = [48, 48, 1, 1] + shape = [192] dtype = "float32" - min_val = float("-0.0501789") - max_val = float("0.0563261") - mean = float("-0.00170388") - std = float("0.0129798") + min_val = float("0.620813") + max_val = float("27316.4") + mean = float("1238.85") + std = float("3200.83") data = None class Program_weight_tensor_parameter_303: name = "parameter_303" - shape = [48] + shape = [192] dtype = "float32" - min_val = float("0") - max_val = float("0.5") + min_val = float("-12.1711") + max_val = float("17.836") + mean = float("0.562202") + std = float("3.62838") data = None class Program_weight_tensor_parameter_304: name = "parameter_304" - shape = [48] + shape = [192, 896, 1, 1] dtype = "float32" - min_val = float("0") - max_val = float("0.5") + min_val = float("-3.17099") + max_val = float("1.29685") + mean = float("-0.0023653") + std = float("0.108022") data = None class Program_weight_tensor_parameter_305: name = "parameter_305" - shape = [48] + shape = [192] dtype = "float32" - min_val = float("0") - max_val = float("0.5") + min_val = float("-1.10782") + max_val = float("0.300641") + mean = float("-0.173971") + std = float("0.264032") data = None class Program_weight_tensor_parameter_306: name = "parameter_306" - shape = [48] + shape = [192] dtype = "float32" - min_val = float("0") - max_val = float("0.5") + min_val = float("-1.04342") + max_val = float("2.59244") + mean = float("0.671735") + std = float("0.577557") data = None class Program_weight_tensor_parameter_307: name = "parameter_307" - shape = [48, 48, 3, 3] + shape = [192] dtype = "float32" - min_val = float("-0.0578676") - max_val = float("0.0799749") - mean = float("-0.000509865") - std = float("0.0110281") + min_val = float("0.398919") + max_val = float("62548.8") + mean = float("1538.4") + std = float("5143.75") data = None class Program_weight_tensor_parameter_308: name = "parameter_308" - shape = [48] + shape = [192] dtype = "float32" - min_val = float("0") - max_val = float("0.5") + min_val = float("-16.9307") + max_val = float("5.62477") + mean = float("-1.1326") + std = float("2.74663") data = None class Program_weight_tensor_parameter_309: name = "parameter_309" - shape = [48] + shape = [192, 896, 1, 1] dtype = "float32" - min_val = float("0") - max_val = float("0.5") + min_val = float("-1.82279") + max_val = float("2.47484") + mean = float("0.00901672") + std = float("0.107436") data = None class Program_weight_tensor_parameter_310: name = "parameter_310" - shape = [48] + shape = [384] dtype = "float32" - min_val = float("0") - max_val = float("0.5") + min_val = float("-1.13258") + max_val = float("0.552573") + mean = float("-0.197087") + std = float("0.242407") data = None class Program_weight_tensor_parameter_311: name = "parameter_311" - shape = [48] + shape = [384] dtype = "float32" - min_val = float("0") - max_val = float("0.5") + min_val = float("-2.53691") + max_val = float("1.43108") + mean = float("0.682152") + std = float("0.594817") data = None class Program_weight_tensor_parameter_312: name = "parameter_312" - shape = [48, 48, 3, 3] + shape = [384] dtype = "float32" - min_val = float("-0.0925274") - max_val = float("0.0949158") - mean = float("-0.00064859") - std = float("0.0123667") + min_val = float("0.191916") + max_val = float("2843.48") + mean = float("114.134") + std = float("305.508") data = None class Program_weight_tensor_parameter_313: name = "parameter_313" - shape = [48] + shape = [384] dtype = "float32" - min_val = float("0") - max_val = float("0.5") + min_val = float("-24.6995") + max_val = float("10.5962") + mean = float("-0.476045") + std = float("2.8163") data = None class Program_weight_tensor_parameter_314: name = "parameter_314" - shape = [48] + shape = [384, 768, 1, 1] dtype = "float32" - min_val = float("0") - max_val = float("0.5") + min_val = float("-1.24276") + max_val = float("0.692904") + mean = float("-0.00886373") + std = float("0.0955856") data = None class Program_weight_tensor_parameter_315: name = "parameter_315" - shape = [48] + shape = [768] dtype = "float32" - min_val = float("0") - max_val = float("0.5") + min_val = float("-0.552757") + max_val = float("0.267353") + mean = float("-0.0446819") + std = float("0.0843754") data = None class Program_weight_tensor_parameter_316: name = "parameter_316" - shape = [48] + shape = [768] dtype = "float32" - min_val = float("0") - max_val = float("0.5") + min_val = float("-0.652821") + max_val = float("1.76439") + mean = float("1.00747") + std = float("0.251713") data = None class Program_weight_tensor_parameter_317: name = "parameter_317" - shape = [48, 48, 1, 1] + shape = [768] dtype = "float32" - min_val = float("-0.0727088") - max_val = float("0.0782992") - mean = float("-0.00102365") - std = float("0.0139349") + min_val = float("0.776778") + max_val = float("7746.52") + mean = float("431.196") + std = float("833.862") data = None class Program_weight_tensor_parameter_318: name = "parameter_318" - shape = [48] + shape = [768] dtype = "float32" - min_val = float("0") - max_val = float("0.5") + min_val = float("-41.0995") + max_val = float("25.7342") + mean = float("3.18905") + std = float("8.42379") data = None class Program_weight_tensor_parameter_319: name = "parameter_319" + shape = [768, 768, 1, 1] + dtype = "float32" + min_val = float("-1.4092") + max_val = float("1.23702") + mean = float("-0.000496878") + std = float("0.0816775") + data = None + + +class Program_weight_tensor_parameter_320: + name = "parameter_320" + shape = [384] + dtype = "float32" + min_val = float("-0.644666") + max_val = float("0.835279") + mean = float("0.00588657") + std = float("0.183308") + data = None + + +class Program_weight_tensor_parameter_321: + name = "parameter_321" + shape = [384] + dtype = "float32" + min_val = float("0.330389") + max_val = float("1.98606") + mean = float("1.01943") + std = float("0.246731") + data = None + + +class Program_weight_tensor_parameter_322: + name = "parameter_322" + shape = [384] + dtype = "float32" + min_val = float("0.17996") + max_val = float("722.077") + mean = float("49.876") + std = float("90.3738") + data = None + + +class Program_weight_tensor_parameter_323: + name = "parameter_323" + shape = [384] + dtype = "float32" + min_val = float("-8.71472") + max_val = float("7.07804") + mean = float("-0.0401495") + std = float("2.17987") + data = None + + +class Program_weight_tensor_parameter_324: + name = "parameter_324" + shape = [384, 384, 1, 1] + dtype = "float32" + min_val = float("-0.945908") + max_val = float("0.673776") + mean = float("-0.0019768") + std = float("0.0900507") + data = None + + +class Program_weight_tensor_parameter_325: + name = "parameter_325" + shape = [384] + dtype = "float32" + min_val = float("-0.644666") + max_val = float("0.835279") + mean = float("0.00588657") + std = float("0.183308") + data = None + + +class Program_weight_tensor_parameter_326: + name = "parameter_326" + shape = [384] + dtype = "float32" + min_val = float("0.369886") + max_val = float("1.99554") + mean = float("1.01704") + std = float("0.242599") + data = None + + +class Program_weight_tensor_parameter_327: + name = "parameter_327" + shape = [384] + dtype = "float32" + min_val = float("1.10774") + max_val = float("5474.29") + mean = float("285.755") + std = float("535.771") + data = None + + +class Program_weight_tensor_parameter_328: + name = "parameter_328" + shape = [384] + dtype = "float32" + min_val = float("-18.3584") + max_val = float("28.6474") + mean = float("0.580143") + std = float("5.75435") + data = None + + +class Program_weight_tensor_parameter_329: + name = "parameter_329" + shape = [384, 384, 3, 3] + dtype = "float32" + min_val = float("-0.300868") + max_val = float("0.28201") + mean = float("0.0005649") + std = float("0.0293953") + data = None + + +class Program_weight_tensor_parameter_330: + name = "parameter_330" + shape = [384] + dtype = "float32" + min_val = float("-0.428626") + max_val = float("0.483915") + mean = float("0.00707735") + std = float("0.142579") + data = None + + +class Program_weight_tensor_parameter_331: + name = "parameter_331" + shape = [384] + dtype = "float32" + min_val = float("0.0548827") + max_val = float("2.17216") + mean = float("1.02118") + std = float("0.25626") + data = None + + +class Program_weight_tensor_parameter_332: + name = "parameter_332" + shape = [384] + dtype = "float32" + min_val = float("5.82237") + max_val = float("71105.7") + mean = float("966.456") + std = float("3856.37") + data = None + + +class Program_weight_tensor_parameter_333: + name = "parameter_333" + shape = [384] + dtype = "float32" + min_val = float("-74.904") + max_val = float("45.7392") + mean = float("-4.55969") + std = float("15.2973") + data = None + + +class Program_weight_tensor_parameter_334: + name = "parameter_334" + shape = [384, 384, 3, 3] + dtype = "float32" + min_val = float("-1.28932") + max_val = float("0.64534") + mean = float("-0.00766119") + std = float("0.0588517") + data = None + + +class Program_weight_tensor_parameter_335: + name = "parameter_335" + shape = [384] + dtype = "float32" + min_val = float("-0.417859") + max_val = float("0.567516") + mean = float("0.0358606") + std = float("0.145139") + data = None + + +class Program_weight_tensor_parameter_336: + name = "parameter_336" + shape = [384] + dtype = "float32" + min_val = float("-0.692244") + max_val = float("2.91955") + mean = float("0.989476") + std = float("0.366097") + data = None + + +class Program_weight_tensor_parameter_337: + name = "parameter_337" + shape = [384] + dtype = "float32" + min_val = float("35.6891") + max_val = float("527628.0") + mean = float("15940.6") + std = float("54157.5") + data = None + + +class Program_weight_tensor_parameter_338: + name = "parameter_338" + shape = [384] + dtype = "float32" + min_val = float("-1304.56") + max_val = float("1018.05") + mean = float("11.2757") + std = float("221.509") + data = None + + +class Program_weight_tensor_parameter_339: + name = "parameter_339" + shape = [384, 1536, 1, 1] + dtype = "float32" + min_val = float("-1.70677") + max_val = float("0.933125") + mean = float("-0.00166836") + std = float("0.0885713") + data = None + + +class Program_weight_tensor_parameter_340: + name = "parameter_340" + shape = [384] + dtype = "float32" + min_val = float("-0.0873874") + max_val = float("0.120578") + mean = float("-0.000954519") + std = float("0.0228511") + data = None + + +class Program_weight_tensor_parameter_341: + name = "parameter_341" + shape = [384] + dtype = "float32" + min_val = float("0.237824") + max_val = float("1.58927") + mean = float("0.995856") + std = float("0.165535") + data = None + + +class Program_weight_tensor_parameter_342: + name = "parameter_342" + shape = [384] + dtype = "float32" + min_val = float("0.722547") + max_val = float("6460.57") + mean = float("77.691") + std = float("369.085") + data = None + + +class Program_weight_tensor_parameter_343: + name = "parameter_343" + shape = [384] + dtype = "float32" + min_val = float("-20.8843") + max_val = float("8.9376") + mean = float("-0.0632021") + std = float("2.59641") + data = None + + +class Program_weight_tensor_parameter_344: + name = "parameter_344" + shape = [384, 384, 1, 1] + dtype = "float32" + min_val = float("-1.57816") + max_val = float("0.862954") + mean = float("-0.00389417") + std = float("0.0817908") + data = None + + +class Program_weight_tensor_parameter_345: + name = "parameter_345" + shape = [384] + dtype = "float32" + min_val = float("-0.0873874") + max_val = float("0.120578") + mean = float("-0.000954519") + std = float("0.0228511") + data = None + + +class Program_weight_tensor_parameter_346: + name = "parameter_346" + shape = [384] + dtype = "float32" + min_val = float("-0.0396481") + max_val = float("1.62807") + mean = float("1.00169") + std = float("0.153603") + data = None + + +class Program_weight_tensor_parameter_347: + name = "parameter_347" + shape = [384] + dtype = "float32" + min_val = float("6.51221") + max_val = float("5904.92") + mean = float("267.165") + std = float("563.207") + data = None + + +class Program_weight_tensor_parameter_348: + name = "parameter_348" + shape = [384] + dtype = "float32" + min_val = float("-26.0803") + max_val = float("22.4756") + mean = float("-0.244403") + std = float("5.5182") + data = None + + +class Program_weight_tensor_parameter_349: + name = "parameter_349" + shape = [384, 384, 3, 3] + dtype = "float32" + min_val = float("-0.331772") + max_val = float("0.275082") + mean = float("-0.00114864") + std = float("0.0288421") + data = None + + +class Program_weight_tensor_parameter_350: + name = "parameter_350" + shape = [384] + dtype = "float32" + min_val = float("-0.169758") + max_val = float("0.387136") + mean = float("-0.0135191") + std = float("0.0872435") + data = None + + +class Program_weight_tensor_parameter_351: + name = "parameter_351" + shape = [384] + dtype = "float32" + min_val = float("0.530164") + max_val = float("1.95399") + mean = float("1.02728") + std = float("0.170907") + data = None + + +class Program_weight_tensor_parameter_352: + name = "parameter_352" + shape = [384] + dtype = "float32" + min_val = float("34.4183") + max_val = float("28418.4") + mean = float("1029.76") + std = float("2701.61") + data = None + + +class Program_weight_tensor_parameter_353: + name = "parameter_353" + shape = [384] + dtype = "float32" + min_val = float("-84.2497") + max_val = float("28.1431") + mean = float("1.92494") + std = float("14.5184") + data = None + + +class Program_weight_tensor_parameter_354: + name = "parameter_354" + shape = [384, 384, 3, 3] + dtype = "float32" + min_val = float("-0.509431") + max_val = float("0.23397") + mean = float("0.000289444") + std = float("0.0306037") + data = None + + +class Program_weight_tensor_parameter_355: + name = "parameter_355" + shape = [384] + dtype = "float32" + min_val = float("-0.17013") + max_val = float("0.363103") + mean = float("0.0145125") + std = float("0.0634369") + data = None + + +class Program_weight_tensor_parameter_356: + name = "parameter_356" + shape = [384] + dtype = "float32" + min_val = float("0.693201") + max_val = float("1.62355") + mean = float("1.01359") + std = float("0.102217") + data = None + + +class Program_weight_tensor_parameter_357: + name = "parameter_357" + shape = [384] + dtype = "float32" + min_val = float("0.931458") + max_val = float("373.701") + mean = float("14.1979") + std = float("26.7127") + data = None + + +class Program_weight_tensor_parameter_358: + name = "parameter_358" + shape = [384] + dtype = "float32" + min_val = float("-4.77194") + max_val = float("7.91629") + mean = float("0.274654") + std = float("1.32549") + data = None + + +class Program_weight_tensor_parameter_359: + name = "parameter_359" + shape = [384, 384, 1, 1] + dtype = "float32" + min_val = float("-0.446355") + max_val = float("0.493553") + mean = float("0.00261486") + std = float("0.0485342") + data = None + + +class Program_weight_tensor_parameter_360: + name = "parameter_360" + shape = [384] + dtype = "float32" + min_val = float("-0.17013") + max_val = float("0.363103") + mean = float("0.0145125") + std = float("0.0634369") + data = None + + +class Program_weight_tensor_parameter_361: + name = "parameter_361" + shape = [384] + dtype = "float32" + min_val = float("0.114663") + max_val = float("1.61016") + mean = float("0.991551") + std = float("0.149223") + data = None + + +class Program_weight_tensor_parameter_362: + name = "parameter_362" + shape = [384] + dtype = "float32" + min_val = float("7.85773") + max_val = float("1001.22") + mean = float("89.9772") + std = float("108.332") + data = None + + +class Program_weight_tensor_parameter_363: + name = "parameter_363" + shape = [384] + dtype = "float32" + min_val = float("-13.9198") + max_val = float("19.3761") + mean = float("1.00742") + std = float("4.17813") + data = None + + +class Program_weight_tensor_parameter_364: + name = "parameter_364" + shape = [384, 384, 3, 3] + dtype = "float32" + min_val = float("-0.295433") + max_val = float("0.246348") + mean = float("0.00110397") + std = float("0.0234049") + data = None + + +class Program_weight_tensor_parameter_365: + name = "parameter_365" + shape = [384] + dtype = "float32" + min_val = float("-0.234135") + max_val = float("0.22812") + mean = float("0.00559602") + std = float("0.0627099") + data = None + + +class Program_weight_tensor_parameter_366: + name = "parameter_366" + shape = [384] + dtype = "float32" + min_val = float("0.597373") + max_val = float("1.56294") + mean = float("1.00758") + std = float("0.141447") + data = None + + +class Program_weight_tensor_parameter_367: + name = "parameter_367" + shape = [384] + dtype = "float32" + min_val = float("16.4499") + max_val = float("639.647") + mean = float("101.474") + std = float("86.4824") + data = None + + +class Program_weight_tensor_parameter_368: + name = "parameter_368" + shape = [384] + dtype = "float32" + min_val = float("-11.5778") + max_val = float("17.0663") + mean = float("2.01731") + std = float("3.57531") + data = None + + +class Program_weight_tensor_parameter_369: + name = "parameter_369" + shape = [384, 384, 3, 3] + dtype = "float32" + min_val = float("-0.234291") + max_val = float("0.284904") + mean = float("0.00264446") + std = float("0.0302637") + data = None + + +class Program_weight_tensor_parameter_370: + name = "parameter_370" + shape = [384] + dtype = "float32" + min_val = float("-0.405084") + max_val = float("0.228539") + mean = float("-0.0546357") + std = float("0.106613") + data = None + + +class Program_weight_tensor_parameter_371: + name = "parameter_371" + shape = [384] + dtype = "float32" + min_val = float("0.456281") + max_val = float("1.81298") + mean = float("1.01603") + std = float("0.21616") + data = None + + +class Program_weight_tensor_parameter_372: + name = "parameter_372" + shape = [384] + dtype = "float32" + min_val = float("9.59515") + max_val = float("11834.9") + mean = float("612.148") + std = float("918.789") + data = None + + +class Program_weight_tensor_parameter_373: + name = "parameter_373" + shape = [384] + dtype = "float32" + min_val = float("-247.962") + max_val = float("324.535") + mean = float("-18.2895") + std = float("68.074") + data = None + + +class Program_weight_tensor_parameter_374: + name = "parameter_374" + shape = [384, 1024, 1, 1] + dtype = "float32" + min_val = float("-6.13846") + max_val = float("3.56809") + mean = float("-0.000144253") + std = float("0.0970159") + data = None + + +class Program_weight_tensor_parameter_375: + name = "parameter_375" + shape = [384] + dtype = "float32" + min_val = float("-0.108175") + max_val = float("0.143437") + mean = float("-0.0212447") + std = float("0.0285578") + data = None + + +class Program_weight_tensor_parameter_376: + name = "parameter_376" + shape = [384] + dtype = "float32" + min_val = float("0.556928") + max_val = float("1.33029") + mean = float("0.984681") + std = float("0.116005") + data = None + + +class Program_weight_tensor_parameter_377: + name = "parameter_377" + shape = [384] + dtype = "float32" + min_val = float("0.748213") + max_val = float("62636.1") + mean = float("1444.21") + std = float("4777.5") + data = None + + +class Program_weight_tensor_parameter_378: + name = "parameter_378" + shape = [384] + dtype = "float32" + min_val = float("-786.339") + max_val = float("426.68") + mean = float("32.1313") + std = float("104.81") + data = None + + +class Program_weight_tensor_parameter_379: + name = "parameter_379" + shape = [384, 1024, 1, 1] + dtype = "float32" + min_val = float("-7.12945") + max_val = float("13.8328") + mean = float("-0.000268657") + std = float("0.0830857") + data = None + + +class Program_weight_tensor_parameter_380: + name = "parameter_380" + shape = [1024] + dtype = "float32" + min_val = float("-2.13639e-06") + max_val = float("2.09543e-06") + mean = float("8.64227e-09") + std = float("3.57256e-07") + data = None + + +class Program_weight_tensor_parameter_381: + name = "parameter_381" + shape = [1024] + dtype = "float32" + min_val = float("-5.47744") + max_val = float("11.4054") + mean = float("0.768884") + std = float("0.662971") + data = None + + +class Program_weight_tensor_parameter_382: + name = "parameter_382" + shape = [1024] + dtype = "float32" + min_val = float("-0.427674") + max_val = float("0.266324") + mean = float("-0.000194783") + std = float("0.0566951") + data = None + + +class Program_weight_tensor_parameter_383: + name = "parameter_383" + shape = [2048, 1024] + dtype = "float32" + min_val = float("-8.0062") + max_val = float("2.78176") + mean = float("-5.77928e-05") + std = float("0.0557509") + data = None + + +class Program_weight_tensor_parameter_384: + name = "parameter_384" + shape = [2048] + dtype = "float32" + min_val = float("-0.251987") + max_val = float("0.302276") + mean = float("0.000512312") + std = float("0.0388834") + data = None + + +class Program_weight_tensor_parameter_385: + name = "parameter_385" + shape = [1024, 2048] + dtype = "float32" + min_val = float("-3.07176") + max_val = float("3.57066") + mean = float("-7.0143e-08") + std = float("0.0401237") + data = None + + +class Program_weight_tensor_parameter_386: + name = "parameter_386" + shape = [1024] + dtype = "float32" + min_val = float("-0.373534") + max_val = float("0.402167") + mean = float("0.000830289") + std = float("0.0765843") + data = None + + +class Program_weight_tensor_parameter_387: + name = "parameter_387" + shape = [1024] + dtype = "float32" + min_val = float("-2.75952") + max_val = float("2.92771") + mean = float("0.758426") + std = float("0.282416") + data = None + + +class Program_weight_tensor_parameter_388: + name = "parameter_388" + shape = [1024] + dtype = "float32" + min_val = float("-0.193343") + max_val = float("0.180378") + mean = float("-0.000779537") + std = float("0.0370855") + data = None + + +class Program_weight_tensor_parameter_389: + name = "parameter_389" + shape = [1024, 1024] + dtype = "float32" + min_val = float("-1.38014") + max_val = float("1.27733") + mean = float("-9.81423e-07") + std = float("0.0600835") + data = None + + +class Program_weight_tensor_parameter_390: + name = "parameter_390" + shape = [1024] + dtype = "float32" + min_val = float("-0.757163") + max_val = float("1.00779") + mean = float("0.000330772") + std = float("0.236465") + data = None + + +class Program_weight_tensor_parameter_391: + name = "parameter_391" + shape = [1024] + dtype = "float32" + min_val = float("-0.817671") + max_val = float("2.55534") + mean = float("0.753427") + std = float("0.227651") + data = None + + +class Program_weight_tensor_parameter_392: + name = "parameter_392" + shape = [1024] + dtype = "float32" + min_val = float("-0.222994") + max_val = float("0.252568") + mean = float("-0.00120532") + std = float("0.048857") + data = None + + +class Program_weight_tensor_parameter_393: + name = "parameter_393" + shape = [2048, 1024] + dtype = "float32" + min_val = float("-0.870748") + max_val = float("1.17152") + mean = float("-8.51244e-05") + std = float("0.0362422") + data = None + + +class Program_weight_tensor_parameter_394: + name = "parameter_394" + shape = [2048] + dtype = "float32" + min_val = float("-0.100256") + max_val = float("0.37127") + mean = float("-0.000329183") + std = float("0.0429112") + data = None + + +class Program_weight_tensor_parameter_395: + name = "parameter_395" + shape = [1024, 2048] + dtype = "float32" + min_val = float("-2.07235") + max_val = float("2.28312") + mean = float("-7.78487e-08") + std = float("0.0418402") + data = None + + +class Program_weight_tensor_parameter_396: + name = "parameter_396" + shape = [1024] + dtype = "float32" + min_val = float("-1.1905") + max_val = float("1.43001") + mean = float("0.00104808") + std = float("0.299857") + data = None + + +class Program_weight_tensor_parameter_397: + name = "parameter_397" + shape = [1024] + dtype = "float32" + min_val = float("-1.34618") + max_val = float("2.57738") + mean = float("0.767945") + std = float("0.300486") + data = None + + +class Program_weight_tensor_parameter_398: + name = "parameter_398" + shape = [1024] + dtype = "float32" + min_val = float("-0.119799") + max_val = float("0.13848") + mean = float("-0.000153038") + std = float("0.0233011") + data = None + + +class Program_weight_tensor_parameter_399: + name = "parameter_399" + shape = [1024, 1024] + dtype = "float32" + min_val = float("-0.646145") + max_val = float("0.660627") + mean = float("9.12025e-06") + std = float("0.0406609") + data = None + + +class Program_weight_tensor_parameter_400: + name = "parameter_400" + shape = [1024] + dtype = "float32" + min_val = float("-0.376786") + max_val = float("0.283756") + mean = float("0.000858306") + std = float("0.0796666") + data = None + + +class Program_weight_tensor_parameter_401: + name = "parameter_401" + shape = [1024] + dtype = "float32" + min_val = float("0.432676") + max_val = float("1.70722") + mean = float("0.772387") + std = float("0.0772969") + data = None + + +class Program_weight_tensor_parameter_402: + name = "parameter_402" + shape = [1024] + dtype = "float32" + min_val = float("-0.179557") + max_val = float("0.115916") + mean = float("-0.000489374") + std = float("0.0340337") + data = None + + +class Program_weight_tensor_parameter_403: + name = "parameter_403" + shape = [2048, 1024] + dtype = "float32" + min_val = float("-0.809732") + max_val = float("0.561587") + mean = float("-3.63529e-05") + std = float("0.0201157") + data = None + + +class Program_weight_tensor_parameter_404: + name = "parameter_404" + shape = [2048] + dtype = "float32" + min_val = float("-0.0614404") + max_val = float("0.170786") + mean = float("-0.000372604") + std = float("0.0228483") + data = None + + +class Program_weight_tensor_parameter_405: + name = "parameter_405" + shape = [1024, 2048] + dtype = "float32" + min_val = float("-0.408439") + max_val = float("0.609131") + mean = float("-1.65619e-06") + std = float("0.01953") + data = None + + +class Program_weight_tensor_parameter_406: + name = "parameter_406" + shape = [1024] + dtype = "float32" + min_val = float("-0.132853") + max_val = float("0.116198") + mean = float("0.000532131") + std = float("0.0355694") + data = None + + +class Program_weight_tensor_parameter_407: + name = "parameter_407" + shape = [1024] + dtype = "float32" + min_val = float("0.561748") + max_val = float("0.98164") + mean = float("0.774081") + std = float("0.0415141") + data = None + + +class Program_weight_tensor_parameter_408: + name = "parameter_408" + shape = [1024] + dtype = "float32" + min_val = float("-0.0332844") + max_val = float("0.0397268") + mean = float("0.000161912") + std = float("0.0099558") + data = None + + +class Program_weight_tensor_parameter_409: + name = "parameter_409" + shape = [1024, 1024] + dtype = "float32" + min_val = float("-0.123777") + max_val = float("0.124455") + mean = float("1.09109e-05") + std = float("0.0259569") + data = None + + +class Program_weight_tensor_parameter_410: + name = "parameter_410" + shape = [1024] + dtype = "float32" + min_val = float("-0.044097") + max_val = float("0.0392825") + mean = float("0.000663671") + std = float("0.011796") + data = None + + +class Program_weight_tensor_parameter_411: + name = "parameter_411" + shape = [1024] + dtype = "float32" + min_val = float("0.677342") + max_val = float("0.824057") + mean = float("0.772183") + std = float("0.0108191") + data = None + + +class Program_weight_tensor_parameter_412: + name = "parameter_412" + shape = [1024] + dtype = "float32" + min_val = float("-0.0510995") + max_val = float("0.0464933") + mean = float("-3.34255e-05") + std = float("0.0157544") + data = None + + +class Program_weight_tensor_parameter_413: + name = "parameter_413" + shape = [2048, 1024] + dtype = "float32" + min_val = float("-0.0809581") + max_val = float("0.0763313") + mean = float("-7.16222e-06") + std = float("0.0105854") + data = None + + +class Program_weight_tensor_parameter_414: + name = "parameter_414" + shape = [2048] + dtype = "float32" + min_val = float("-0.0301308") + max_val = float("0.0358787") + mean = float("0.000247623") + std = float("0.0140049") + data = None + + +class Program_weight_tensor_parameter_415: + name = "parameter_415" + shape = [1024, 2048] + dtype = "float32" + min_val = float("-0.0659331") + max_val = float("0.064373") + mean = float("-1.40326e-06") + std = float("0.0142046") + data = None + + +class Program_weight_tensor_parameter_416: + name = "parameter_416" + shape = [1024] + dtype = "float32" + min_val = float("-0.0295178") + max_val = float("0.0281065") + mean = float("1.88381e-05") + std = float("0.00950261") + data = None + + +class Program_weight_tensor_parameter_417: + name = "parameter_417" + shape = [1024] + dtype = "float32" + min_val = float("0.644602") + max_val = float("0.848359") + mean = float("0.772298") + std = float("0.0127321") + data = None + + +class Program_weight_tensor_parameter_418: + name = "parameter_418" + shape = [1024] + dtype = "float32" + min_val = float("-0.0790784") + max_val = float("0.0651222") + mean = float("-0.000211896") + std = float("0.0151313") + data = None + + +class Program_weight_tensor_parameter_419: + name = "parameter_419" + shape = [1024, 1024] + dtype = "float32" + min_val = float("-0.244804") + max_val = float("0.193716") + mean = float("7.12405e-06") + std = float("0.0253563") + data = None + + +class Program_weight_tensor_parameter_420: + name = "parameter_420" + shape = [1024] + dtype = "float32" + min_val = float("-3.75718") + max_val = float("-0.734662") + mean = float("-2.18749") + std = float("0.42871") + data = None + + +class Program_weight_tensor_parameter_421: + name = "parameter_421" + shape = [1024] + dtype = "float32" + min_val = float("1.61923") + max_val = float("4.43994") + mean = float("3.0808") + std = float("0.254311") + data = None + + +class Program_weight_tensor_parameter_422: + name = "parameter_422" + shape = [1024] + dtype = "float32" + min_val = float("0.00308285") + max_val = float("17013.4") + mean = float("25.4014") + std = float("536.038") + data = None + + +class Program_weight_tensor_parameter_423: + name = "parameter_423" + shape = [1024] + dtype = "float32" + min_val = float("-4.71495") + max_val = float("23.0879") + mean = float("0.0230467") + std = float("0.959745") + data = None + + +class Program_weight_tensor_parameter_424: + name = "parameter_424" + shape = [1024, 768, 1, 1] + dtype = "float32" + min_val = float("-1.76134") + max_val = float("1.01155") + mean = float("-0.000333937") + std = float("0.0199517") + data = None + + +class Program_weight_tensor_parameter_425: + name = "parameter_425" + shape = [768] + dtype = "float32" + min_val = float("-0.019835") + max_val = float("0.0113714") + mean = float("-0.000741206") + std = float("0.00266306") + data = None + + +class Program_weight_tensor_parameter_426: + name = "parameter_426" + shape = [768, 768, 1, 1] + dtype = "float32" + min_val = float("-0.0753175") + max_val = float("0.130102") + mean = float("-0.000275394") + std = float("0.00174935") + data = None + + +class Program_weight_tensor_parameter_427: + name = "parameter_427" + shape = [384] + dtype = "float32" + min_val = float("-1.77417") + max_val = float("0.319995") + mean = float("-0.31086") + std = float("0.291331") + data = None + + +class Program_weight_tensor_parameter_428: + name = "parameter_428" + shape = [384] + dtype = "float32" + min_val = float("0.188386") + max_val = float("1.81753") + mean = float("0.609696") + std = float("0.262622") + data = None + + +class Program_weight_tensor_parameter_429: + name = "parameter_429" + shape = [384] + dtype = "float32" + min_val = float("6.15935e-05") + max_val = float("4.62814") + mean = float("0.0557435") + std = float("0.276651") + data = None + + +class Program_weight_tensor_parameter_430: + name = "parameter_430" + shape = [384] + dtype = "float32" + min_val = float("-0.324677") + max_val = float("0.744616") + mean = float("0.0422052") + std = float("0.0941939") + data = None + + +class Program_weight_tensor_parameter_431: + name = "parameter_431" + shape = [384, 384, 1, 1] + dtype = "float32" + min_val = float("-0.0909018") + max_val = float("0.0595466") + mean = float("-0.000571428") + std = float("0.0044968") + data = None + + +class Program_weight_tensor_parameter_432: + name = "parameter_432" + shape = [384] + dtype = "float32" + min_val = float("-1.77414") + max_val = float("0.320788") + mean = float("-0.31081") + std = float("0.291371") + data = None + + +class Program_weight_tensor_parameter_433: + name = "parameter_433" + shape = [384] + dtype = "float32" + min_val = float("0.333443") + max_val = float("2.59782") + mean = float("1.02587") + std = float("0.290167") + data = None + + +class Program_weight_tensor_parameter_434: + name = "parameter_434" + shape = [384] + dtype = "float32" + min_val = float("0.00526696") + max_val = float("10.6996") + mean = float("0.380233") + std = float("0.946604") + data = None + + +class Program_weight_tensor_parameter_435: + name = "parameter_435" + shape = [384] + dtype = "float32" + min_val = float("-1.59457") + max_val = float("1.73203") + mean = float("0.027563") + std = float("0.392866") + data = None + + +class Program_weight_tensor_parameter_436: + name = "parameter_436" + shape = [384, 384, 3, 3] + dtype = "float32" + min_val = float("-0.0409816") + max_val = float("0.0679231") + mean = float("-6.68397e-05") + std = float("0.00268875") + data = None + + +class Program_weight_tensor_parameter_437: + name = "parameter_437" + shape = [384] + dtype = "float32" + min_val = float("-2.58257") + max_val = float("0.0340648") + mean = float("-1.56873") + std = float("0.416046") + data = None + + +class Program_weight_tensor_parameter_438: + name = "parameter_438" + shape = [384] + dtype = "float32" + min_val = float("0.521872") + max_val = float("1.64448") + mean = float("1.13567") + std = float("0.149452") + data = None + + +class Program_weight_tensor_parameter_439: + name = "parameter_439" + shape = [384] + dtype = "float32" + min_val = float("0.0434595") + max_val = float("137.616") + mean = float("3.86704") + std = float("10.8376") + data = None + + +class Program_weight_tensor_parameter_440: + name = "parameter_440" + shape = [384] + dtype = "float32" + min_val = float("-2.41808") + max_val = float("3.09603") + mean = float("0.231089") + std = float("0.62894") + data = None + + +class Program_weight_tensor_parameter_441: + name = "parameter_441" + shape = [384, 384, 3, 3] + dtype = "float32" + min_val = float("-0.054677") + max_val = float("0.0563856") + mean = float("-0.000288685") + std = float("0.00307104") + data = None + + +class Program_weight_tensor_parameter_442: + name = "parameter_442" + shape = [384] + dtype = "float32" + min_val = float("-1.93947") + max_val = float("0.64501") + mean = float("-0.575028") + std = float("0.358749") + data = None + + +class Program_weight_tensor_parameter_443: + name = "parameter_443" + shape = [384] + dtype = "float32" + min_val = float("0.16358") + max_val = float("2.07281") + mean = float("0.562153") + std = float("0.227419") + data = None + + +class Program_weight_tensor_parameter_444: + name = "parameter_444" + shape = [384] + dtype = "float32" + min_val = float("0.000134774") + max_val = float("9.62556") + mean = float("0.118304") + std = float("0.563335") + data = None + + +class Program_weight_tensor_parameter_445: + name = "parameter_445" + shape = [384] + dtype = "float32" + min_val = float("-0.543874") + max_val = float("0.774253") + mean = float("0.0423873") + std = float("0.0963768") + data = None + + +class Program_weight_tensor_parameter_446: + name = "parameter_446" + shape = [384, 384, 1, 1] + dtype = "float32" + min_val = float("-0.0892927") + max_val = float("0.0554288") + mean = float("-0.000566883") + std = float("0.00429646") + data = None + + +class Program_weight_tensor_parameter_447: + name = "parameter_447" + shape = [384] + dtype = "float32" + min_val = float("-1.93949") + max_val = float("0.646073") + mean = float("-0.575") + std = float("0.35884") + data = None + + +class Program_weight_tensor_parameter_448: + name = "parameter_448" + shape = [384] + dtype = "float32" + min_val = float("0.583556") + max_val = float("2.15683") + mean = float("1.08421") + std = float("0.255749") + data = None + + +class Program_weight_tensor_parameter_449: + name = "parameter_449" + shape = [384] + dtype = "float32" + min_val = float("0.00783921") + max_val = float("124.799") + mean = float("1.60597") + std = float("7.86036") + data = None + + +class Program_weight_tensor_parameter_450: + name = "parameter_450" + shape = [384] + dtype = "float32" + min_val = float("-3.22343") + max_val = float("3.94482") + mean = float("0.0868392") + std = float("0.461958") + data = None + + +class Program_weight_tensor_parameter_451: + name = "parameter_451" + shape = [384, 384, 3, 3] + dtype = "float32" + min_val = float("-0.0549026") + max_val = float("0.0517603") + mean = float("-0.000166704") + std = float("0.00299084") + data = None + + +class Program_weight_tensor_parameter_452: + name = "parameter_452" + shape = [384] + dtype = "float32" + min_val = float("-2.39604") + max_val = float("0.84662") + mean = float("-1.40556") + std = float("0.360638") + data = None + + +class Program_weight_tensor_parameter_453: + name = "parameter_453" + shape = [384] + dtype = "float32" + min_val = float("0.454807") + max_val = float("1.91969") + mean = float("1.1665") + std = float("0.148108") + data = None + + +class Program_weight_tensor_parameter_454: + name = "parameter_454" + shape = [384] + dtype = "float32" + min_val = float("0.045164") + max_val = float("100.409") + mean = float("2.60368") + std = float("7.74496") + data = None + + +class Program_weight_tensor_parameter_455: + name = "parameter_455" + shape = [384] + dtype = "float32" + min_val = float("-1.74076") + max_val = float("2.04402") + mean = float("0.0999496") + std = float("0.364673") + data = None + + +class Program_weight_tensor_parameter_456: + name = "parameter_456" + shape = [384, 384, 3, 3] + dtype = "float32" + min_val = float("-0.0546141") + max_val = float("0.0505175") + mean = float("-0.000226174") + std = float("0.00309866") + data = None + + +class Program_weight_tensor_parameter_457: + name = "parameter_457" + shape = [384] + dtype = "float32" + min_val = float("-1.87645") + max_val = float("0.453192") + mean = float("-0.485461") + std = float("0.3765") + data = None + + +class Program_weight_tensor_parameter_458: + name = "parameter_458" + shape = [384] + dtype = "float32" + min_val = float("0.0773511") + max_val = float("2.11967") + mean = float("0.441788") + std = float("0.217778") + data = None + + +class Program_weight_tensor_parameter_459: + name = "parameter_459" + shape = [384] + dtype = "float32" + min_val = float("0.000187171") + max_val = float("57.8954") + mean = float("0.270576") + std = float("2.97593") + data = None + + +class Program_weight_tensor_parameter_460: + name = "parameter_460" + shape = [384] + dtype = "float32" + min_val = float("-0.586198") + max_val = float("2.71157") + mean = float("0.0563236") + std = float("0.173875") + data = None + + +class Program_weight_tensor_parameter_461: + name = "parameter_461" + shape = [384, 384, 1, 1] + dtype = "float32" + min_val = float("-0.192933") + max_val = float("0.0894331") + mean = float("-0.000737868") + std = float("0.00524842") + data = None + + +class Program_weight_tensor_parameter_462: + name = "parameter_462" + shape = [384] + dtype = "float32" + min_val = float("-1.8768") + max_val = float("0.453535") + mean = float("-0.485423") + std = float("0.376585") + data = None + + +class Program_weight_tensor_parameter_463: + name = "parameter_463" + shape = [384] + dtype = "float32" + min_val = float("0.522396") + max_val = float("2.22479") + mean = float("1.0531") + std = float("0.260023") + data = None + + +class Program_weight_tensor_parameter_464: + name = "parameter_464" + shape = [384] + dtype = "float32" + min_val = float("0.0164889") + max_val = float("54.2063") + mean = float("1.7199") + std = float("5.49213") + data = None + + +class Program_weight_tensor_parameter_465: + name = "parameter_465" + shape = [384] + dtype = "float32" + min_val = float("-2.79875") + max_val = float("3.1827") + mean = float("0.133888") + std = float("0.520105") + data = None + + +class Program_weight_tensor_parameter_466: + name = "parameter_466" + shape = [384, 384, 3, 3] + dtype = "float32" + min_val = float("-0.0551643") + max_val = float("0.0464871") + mean = float("-0.000209229") + std = float("0.00312542") + data = None + + +class Program_weight_tensor_parameter_467: + name = "parameter_467" + shape = [384] + dtype = "float32" + min_val = float("-2.15679") + max_val = float("0.417994") + mean = float("-1.36728") + std = float("0.27749") + data = None + + +class Program_weight_tensor_parameter_468: + name = "parameter_468" + shape = [384] + dtype = "float32" + min_val = float("0.703467") + max_val = float("1.63812") + mean = float("1.14314") + std = float("0.101723") + data = None + + +class Program_weight_tensor_parameter_469: + name = "parameter_469" + shape = [384] + dtype = "float32" + min_val = float("0.00919977") + max_val = float("81.76") + mean = float("1.34673") + std = float("4.5747") + data = None + + +class Program_weight_tensor_parameter_470: + name = "parameter_470" + shape = [384] + dtype = "float32" + min_val = float("-0.575647") + max_val = float("1.06647") + mean = float("0.035897") + std = float("0.208355") + data = None + + +class Program_weight_tensor_parameter_471: + name = "parameter_471" + shape = [384, 384, 3, 3] + dtype = "float32" + min_val = float("-0.0647421") + max_val = float("0.0467325") + mean = float("-0.000209626") + std = float("0.00304124") + data = None + + +class Program_weight_tensor_parameter_472: + name = "parameter_472" + shape = [384] + dtype = "float32" + min_val = float("-2.92359") + max_val = float("1.66367") + mean = float("-0.760505") + std = float("0.643564") + data = None + + +class Program_weight_tensor_parameter_473: + name = "parameter_473" + shape = [384] + dtype = "float32" + min_val = float("0.952469") + max_val = float("2.9182") + mean = float("1.86349") + std = float("0.276369") + data = None + + +class Program_weight_tensor_parameter_474: + name = "parameter_474" + shape = [384] + dtype = "float32" + min_val = float("0.00173967") + max_val = float("105.064") + mean = float("1.59074") + std = float("7.33152") + data = None + + +class Program_weight_tensor_parameter_475: + name = "parameter_475" + shape = [384] + dtype = "float32" + min_val = float("-4.49694") + max_val = float("2.52903") + mean = float("0.12643") + std = float("0.583266") + data = None + + +class Program_weight_tensor_parameter_476: + name = "parameter_476" + shape = [384, 768, 1, 1] + dtype = "float32" + min_val = float("-0.154099") + max_val = float("0.233525") + mean = float("-0.000812832") + std = float("0.00998468") + data = None + + +class Program_weight_tensor_parameter_477: + name = "parameter_477" + shape = [384] + dtype = "float32" + min_val = float("-2.24744") + max_val = float("0.681285") + mean = float("-0.777265") + std = float("0.47295") + data = None + + +class Program_weight_tensor_parameter_478: + name = "parameter_478" + shape = [384] + dtype = "float32" + min_val = float("0.965764") + max_val = float("2.89406") + mean = float("2.09742") + std = float("0.305508") + data = None + + +class Program_weight_tensor_parameter_479: + name = "parameter_479" + shape = [384] + dtype = "float32" + min_val = float("0.000269628") + max_val = float("135.338") + mean = float("1.36164") + std = float("8.44144") + data = None + + +class Program_weight_tensor_parameter_480: + name = "parameter_480" + shape = [384] + dtype = "float32" + min_val = float("-5.20344") + max_val = float("3.39669") + mean = float("0.0630202") + std = float("0.514197") + data = None + + +class Program_weight_tensor_parameter_481: + name = "parameter_481" + shape = [384, 768, 1, 1] + dtype = "float32" + min_val = float("-0.16414") + max_val = float("0.337926") + mean = float("-0.000419817") + std = float("0.00888272") + data = None + + +class Program_weight_tensor_parameter_482: + name = "parameter_482" + shape = [768] + dtype = "float32" + min_val = float("-2.40228") + max_val = float("0.644848") + mean = float("-0.908493") + std = float("0.339383") + data = None + + +class Program_weight_tensor_parameter_483: + name = "parameter_483" + shape = [768] + dtype = "float32" + min_val = float("0.530034") + max_val = float("1.90745") + mean = float("0.919715") + std = float("0.149307") + data = None + + +class Program_weight_tensor_parameter_484: + name = "parameter_484" + shape = [768] + dtype = "float32" + min_val = float("0.0150888") + max_val = float("1732.27") + mean = float("29.3911") + std = float("114.584") + data = None + + +class Program_weight_tensor_parameter_485: + name = "parameter_485" + shape = [768] + dtype = "float32" + min_val = float("-5.94074") + max_val = float("6.69318") + mean = float("0.211305") + std = float("0.84809") + data = None + + +class Program_weight_tensor_parameter_486: + name = "parameter_486" + shape = [768, 512, 3, 3] + dtype = "float32" + min_val = float("-0.218355") + max_val = float("0.28161") + mean = float("-0.000294452") + std = float("0.0064194") + data = None + + +class Program_weight_tensor_parameter_487: + name = "parameter_487" + shape = [512] + dtype = "float32" + min_val = float("-3.38939") + max_val = float("1.70918") + mean = float("-1.14983") + std = float("0.514302") + data = None + + +class Program_weight_tensor_parameter_488: + name = "parameter_488" + shape = [512] + dtype = "float32" + min_val = float("0.521801") + max_val = float("2.22375") + mean = float("1.23032") + std = float("0.302913") + data = None + + +class Program_weight_tensor_parameter_489: + name = "parameter_489" + shape = [512] + dtype = "float32" + min_val = float("0.0145403") + max_val = float("54186.8") + mean = float("348.808") + std = float("2906.74") + data = None + + +class Program_weight_tensor_parameter_490: + name = "parameter_490" + shape = [512] + dtype = "float32" + min_val = float("-26.3804") + max_val = float("7.97325") + mean = float("-0.484726") + std = float("2.21214") + data = None + + +class Program_weight_tensor_parameter_491: + name = "parameter_491" + shape = [512, 384, 1, 1] + dtype = "float32" + min_val = float("-2.03421") + max_val = float("3.42772") + mean = float("0.00117115") + std = float("0.0623002") + data = None + + +class Program_weight_tensor_parameter_492: + name = "parameter_492" + shape = [384] + dtype = "float32" + min_val = float("-0.0823458") + max_val = float("0.12967") + mean = float("-0.00608152") + std = float("0.0201329") + data = None + + +class Program_weight_tensor_parameter_493: + name = "parameter_493" + shape = [384, 384, 1, 1] + dtype = "float32" + min_val = float("-0.224516") + max_val = float("0.342766") + mean = float("-0.00262916") + std = float("0.0143209") + data = None + + +class Program_weight_tensor_parameter_494: + name = "parameter_494" + shape = [192] + dtype = "float32" + min_val = float("-1.98142") + max_val = float("0.451263") + mean = float("-0.346403") + std = float("0.335794") + data = None + + +class Program_weight_tensor_parameter_495: + name = "parameter_495" + shape = [192] + dtype = "float32" + min_val = float("0.0373373") + max_val = float("2.155") + mean = float("0.583284") + std = float("0.423173") + data = None + + +class Program_weight_tensor_parameter_496: + name = "parameter_496" + shape = [192] + dtype = "float32" + min_val = float("0.000185619") + max_val = float("192.815") + mean = float("5.7247") + std = float("24.3824") + data = None + + +class Program_weight_tensor_parameter_497: + name = "parameter_497" + shape = [192] + dtype = "float32" + min_val = float("-7.38842") + max_val = float("1.98841") + mean = float("-0.198995") + std = float("0.943909") + data = None + + +class Program_weight_tensor_parameter_498: + name = "parameter_498" + shape = [192, 192, 1, 1] + dtype = "float32" + min_val = float("-0.580025") + max_val = float("2.15283") + mean = float("0.00513099") + std = float("0.0569125") + data = None + + +class Program_weight_tensor_parameter_499: + name = "parameter_499" + shape = [192] + dtype = "float32" + min_val = float("-1.97967") + max_val = float("0.447507") + mean = float("-0.345995") + std = float("0.335506") + data = None + + +class Program_weight_tensor_parameter_500: + name = "parameter_500" + shape = [192] + dtype = "float32" + min_val = float("0.371023") + max_val = float("2.70259") + mean = float("1.21044") + std = float("0.504894") + data = None + + +class Program_weight_tensor_parameter_501: + name = "parameter_501" + shape = [192] + dtype = "float32" + min_val = float("0.00637081") + max_val = float("1674.61") + mean = float("44.1997") + std = float("166.821") + data = None + + +class Program_weight_tensor_parameter_502: + name = "parameter_502" + shape = [192] + dtype = "float32" + min_val = float("-18.0063") + max_val = float("7.12971") + mean = float("-0.405877") + std = float("3.08127") + data = None + + +class Program_weight_tensor_parameter_503: + name = "parameter_503" + shape = [192, 192, 3, 3] + dtype = "float32" + min_val = float("-0.397216") + max_val = float("0.812883") + mean = float("0.0012172") + std = float("0.0227744") + data = None + + +class Program_weight_tensor_parameter_504: + name = "parameter_504" + shape = [192] + dtype = "float32" + min_val = float("-2.89075") + max_val = float("-0.189656") + mean = float("-1.31556") + std = float("0.398965") + data = None + + +class Program_weight_tensor_parameter_505: + name = "parameter_505" + shape = [192] + dtype = "float32" + min_val = float("0.646243") + max_val = float("2.09349") + mean = float("1.18163") + std = float("0.178004") + data = None + + +class Program_weight_tensor_parameter_506: + name = "parameter_506" + shape = [192] + dtype = "float32" + min_val = float("0.471418") + max_val = float("72426.6") + mean = float("1773.09") + std = float("6247.22") + data = None + + +class Program_weight_tensor_parameter_507: + name = "parameter_507" + shape = [192] + dtype = "float32" + min_val = float("-37.115") + max_val = float("23.7835") + mean = float("0.211371") + std = float("5.61628") + data = None + + +class Program_weight_tensor_parameter_508: + name = "parameter_508" + shape = [192, 192, 3, 3] + dtype = "float32" + min_val = float("-0.592805") + max_val = float("0.537074") + mean = float("-0.000652126") + std = float("0.0250761") + data = None + + +class Program_weight_tensor_parameter_509: + name = "parameter_509" + shape = [192] + dtype = "float32" + min_val = float("-1.94029") + max_val = float("0.531231") + mean = float("-0.278479") + std = float("0.323345") + data = None + + +class Program_weight_tensor_parameter_510: + name = "parameter_510" + shape = [192] + dtype = "float32" + min_val = float("0.0356558") + max_val = float("1.75139") + mean = float("0.446893") + std = float("0.305307") + data = None + + +class Program_weight_tensor_parameter_511: + name = "parameter_511" + shape = [192] + dtype = "float32" + min_val = float("0.000301537") + max_val = float("145.303") + mean = float("1.92494") + std = float("11.1055") + data = None + + +class Program_weight_tensor_parameter_512: + name = "parameter_512" + shape = [192] + dtype = "float32" + min_val = float("-2.88926") + max_val = float("1.57866") + mean = float("0.0242455") + std = float("0.365955") + data = None + + +class Program_weight_tensor_parameter_513: + name = "parameter_513" + shape = [192, 192, 1, 1] + dtype = "float32" + min_val = float("-0.410132") + max_val = float("0.734914") + mean = float("-0.000525589") + std = float("0.0264104") + data = None + + +class Program_weight_tensor_parameter_514: + name = "parameter_514" + shape = [192] + dtype = "float32" + min_val = float("-1.94023") + max_val = float("0.548467") + mean = float("-0.277621") + std = float("0.325203") + data = None + + +class Program_weight_tensor_parameter_515: + name = "parameter_515" + shape = [192] + dtype = "float32" + min_val = float("0.481263") + max_val = float("2.27039") + mean = float("1.13778") + std = float("0.378291") + data = None + + +class Program_weight_tensor_parameter_516: + name = "parameter_516" + shape = [192] + dtype = "float32" + min_val = float("0.0203109") + max_val = float("22478.4") + mean = float("225.796") + std = float("1659.66") + data = None + + +class Program_weight_tensor_parameter_517: + name = "parameter_517" + shape = [192] + dtype = "float32" + min_val = float("-48.6907") + max_val = float("8.4395") + mean = float("-0.502169") + std = float("4.76403") + data = None + + +class Program_weight_tensor_parameter_518: + name = "parameter_518" + shape = [192, 192, 3, 3] + dtype = "float32" + min_val = float("-0.313622") + max_val = float("1.27335") + mean = float("0.00170232") + std = float("0.031685") + data = None + + +class Program_weight_tensor_parameter_519: + name = "parameter_519" + shape = [192] + dtype = "float32" + min_val = float("-2.50879") + max_val = float("-0.125677") + mean = float("-1.29056") + std = float("0.442856") + data = None + + +class Program_weight_tensor_parameter_520: + name = "parameter_520" + shape = [192] + dtype = "float32" + min_val = float("0.664414") + max_val = float("1.67017") + mean = float("1.19888") + std = float("0.169685") + data = None + + +class Program_weight_tensor_parameter_521: + name = "parameter_521" + shape = [192] + dtype = "float32" + min_val = float("0.287109") + max_val = float("29547.7") + mean = float("678.615") + std = float("2860.07") + data = None + + +class Program_weight_tensor_parameter_522: + name = "parameter_522" + shape = [192] + dtype = "float32" + min_val = float("-7.98132") + max_val = float("18.8422") + mean = float("0.247665") + std = float("2.39087") + data = None + + +class Program_weight_tensor_parameter_523: + name = "parameter_523" + shape = [192, 192, 3, 3] + dtype = "float32" + min_val = float("-0.523885") + max_val = float("0.286298") + mean = float("-0.000893893") + std = float("0.0195481") + data = None + + +class Program_weight_tensor_parameter_524: + name = "parameter_524" + shape = [192] + dtype = "float32" + min_val = float("-1.7571") + max_val = float("0.488578") + mean = float("-0.261083") + std = float("0.338234") + data = None + + +class Program_weight_tensor_parameter_525: + name = "parameter_525" + shape = [192] + dtype = "float32" + min_val = float("0.00549547") + max_val = float("1.67906") + mean = float("0.350066") + std = float("0.250449") + data = None + + +class Program_weight_tensor_parameter_526: + name = "parameter_526" + shape = [192] + dtype = "float32" + min_val = float("0.000263742") + max_val = float("67.4674") + mean = float("1.69806") + std = float("7.49189") + data = None + + +class Program_weight_tensor_parameter_527: + name = "parameter_527" + shape = [192] + dtype = "float32" + min_val = float("-1.87036") + max_val = float("1.36173") + mean = float("0.00772141") + std = float("0.331707") + data = None + + +class Program_weight_tensor_parameter_528: + name = "parameter_528" + shape = [192, 192, 1, 1] + dtype = "float32" + min_val = float("-0.244631") + max_val = float("0.629934") + mean = float("-0.000125584") + std = float("0.0216092") + data = None + + +class Program_weight_tensor_parameter_529: + name = "parameter_529" + shape = [192] + dtype = "float32" + min_val = float("-1.75687") + max_val = float("0.505733") + mean = float("-0.259961") + std = float("0.340366") + data = None + + +class Program_weight_tensor_parameter_530: + name = "parameter_530" + shape = [192] + dtype = "float32" + min_val = float("0.404833") + max_val = float("1.97797") + mean = float("1.0658") + std = float("0.336907") + data = None + + +class Program_weight_tensor_parameter_531: + name = "parameter_531" + shape = [192] + dtype = "float32" + min_val = float("0.0455324") + max_val = float("2298.58") + mean = float("100.245") + std = float("322.102") + data = None + + +class Program_weight_tensor_parameter_532: + name = "parameter_532" + shape = [192] + dtype = "float32" + min_val = float("-13.5983") + max_val = float("11.9641") + mean = float("0.202523") + std = float("2.83912") + data = None + + +class Program_weight_tensor_parameter_533: + name = "parameter_533" + shape = [192, 192, 3, 3] + dtype = "float32" + min_val = float("-0.285037") + max_val = float("0.46813") + mean = float("-0.000664059") + std = float("0.0213735") + data = None + + +class Program_weight_tensor_parameter_534: + name = "parameter_534" + shape = [192] + dtype = "float32" + min_val = float("-2.49739") + max_val = float("0.140187") + mean = float("-1.24342") + std = float("0.424497") + data = None + + +class Program_weight_tensor_parameter_535: + name = "parameter_535" + shape = [192] + dtype = "float32" + min_val = float("0.651114") + max_val = float("1.77127") + mean = float("1.16792") + std = float("0.169098") + data = None + + +class Program_weight_tensor_parameter_536: + name = "parameter_536" + shape = [192] + dtype = "float32" + min_val = float("0.194451") + max_val = float("4128.54") + mean = float("163.103") + std = float("472.833") + data = None + + +class Program_weight_tensor_parameter_537: + name = "parameter_537" + shape = [192] + dtype = "float32" + min_val = float("-4.64446") + max_val = float("5.59883") + mean = float("-0.14272") + std = float("1.30511") + data = None + + +class Program_weight_tensor_parameter_538: + name = "parameter_538" + shape = [192, 192, 3, 3] + dtype = "float32" + min_val = float("-0.284668") + max_val = float("0.265208") + mean = float("0.000179334") + std = float("0.0149656") + data = None + + +class Program_weight_tensor_parameter_539: + name = "parameter_539" + shape = [192] + dtype = "float32" + min_val = float("-2.07951") + max_val = float("0.55605") + mean = float("-0.270635") + std = float("0.37776") + data = None + + +class Program_weight_tensor_parameter_540: + name = "parameter_540" + shape = [192] + dtype = "float32" + min_val = float("-0.00686515") + max_val = float("0.752308") + mean = float("0.215992") + std = float("0.136444") + data = None + + +class Program_weight_tensor_parameter_541: + name = "parameter_541" + shape = [192] + dtype = "float32" + min_val = float("1.77584e-06") + max_val = float("63.1959") + mean = float("0.938992") + std = float("4.94515") + data = None + + +class Program_weight_tensor_parameter_542: + name = "parameter_542" + shape = [192] + dtype = "float32" + min_val = float("-2.2753") + max_val = float("1.12971") + mean = float("0.0266441") + std = float("0.358626") + data = None + + +class Program_weight_tensor_parameter_543: + name = "parameter_543" + shape = [192, 192, 1, 1] + dtype = "float32" + min_val = float("-0.345764") + max_val = float("0.421705") + mean = float("-0.000348753") + std = float("0.0214336") + data = None + + +class Program_weight_tensor_parameter_544: + name = "parameter_544" + shape = [192] + dtype = "float32" + min_val = float("-2.07965") + max_val = float("0.574226") + mean = float("-0.269426") + std = float("0.379863") + data = None + + +class Program_weight_tensor_parameter_545: + name = "parameter_545" + shape = [192] + dtype = "float32" + min_val = float("0.3928") + max_val = float("1.96281") + mean = float("0.95567") + std = float("0.310273") + data = None + + +class Program_weight_tensor_parameter_546: + name = "parameter_546" + shape = [192] + dtype = "float32" + min_val = float("0.0232563") + max_val = float("879.129") + mean = float("36.8329") + std = float("108.746") + data = None + + +class Program_weight_tensor_parameter_547: + name = "parameter_547" + shape = [192] + dtype = "float32" + min_val = float("-17.4775") + max_val = float("7.90044") + mean = float("0.231825") + std = float("2.99101") + data = None + + +class Program_weight_tensor_parameter_548: + name = "parameter_548" + shape = [192, 192, 3, 3] + dtype = "float32" + min_val = float("-0.242859") + max_val = float("0.437694") + mean = float("-0.000616003") + std = float("0.0203443") + data = None + + +class Program_weight_tensor_parameter_549: + name = "parameter_549" + shape = [192] + dtype = "float32" + min_val = float("-2.74147") + max_val = float("-0.0826416") + mean = float("-1.23743") + std = float("0.434625") + data = None + + +class Program_weight_tensor_parameter_550: + name = "parameter_550" + shape = [192] + dtype = "float32" + min_val = float("0.716526") + max_val = float("1.6211") + mean = float("1.15068") + std = float("0.146716") + data = None + + +class Program_weight_tensor_parameter_551: + name = "parameter_551" + shape = [192] + dtype = "float32" + min_val = float("0.199091") + max_val = float("12243.2") + mean = float("232.788") + std = float("982.724") + data = None + + +class Program_weight_tensor_parameter_552: + name = "parameter_552" + shape = [192] + dtype = "float32" + min_val = float("-2.18119") + max_val = float("8.70009") + mean = float("0.136404") + std = float("1.21707") + data = None + + +class Program_weight_tensor_parameter_553: + name = "parameter_553" + shape = [192, 192, 3, 3] + dtype = "float32" + min_val = float("-0.378008") + max_val = float("0.395816") + mean = float("-0.00267221") + std = float("0.0195782") + data = None + + +class Program_weight_tensor_parameter_554: + name = "parameter_554" + shape = [192] + dtype = "float32" + min_val = float("-1.21764") + max_val = float("0.465113") + mean = float("-0.2312") + std = float("0.342153") + data = None + + +class Program_weight_tensor_parameter_555: + name = "parameter_555" + shape = [192] + dtype = "float32" + min_val = float("-0.0716155") + max_val = float("0.68623") + mean = float("0.188329") + std = float("0.126481") + data = None + + +class Program_weight_tensor_parameter_556: + name = "parameter_556" + shape = [192] + dtype = "float32" + min_val = float("7.23985e-05") + max_val = float("104.829") + mean = float("2.37577") + std = float("11.3397") + data = None + + +class Program_weight_tensor_parameter_557: + name = "parameter_557" + shape = [192] + dtype = "float32" + min_val = float("-3.9469") + max_val = float("5.6855") + mean = float("-0.0147774") + std = float("0.606084") + data = None + + +class Program_weight_tensor_parameter_558: + name = "parameter_558" + shape = [192, 192, 1, 1] + dtype = "float32" + min_val = float("-0.721449") + max_val = float("0.592602") + mean = float("0.000724118") + std = float("0.0311054") + data = None + + +class Program_weight_tensor_parameter_559: + name = "parameter_559" + shape = [192] + dtype = "float32" + min_val = float("-1.22134") + max_val = float("0.47785") + mean = float("-0.230452") + std = float("0.344237") + data = None + + +class Program_weight_tensor_parameter_560: + name = "parameter_560" + shape = [192] + dtype = "float32" + min_val = float("0.379249") + max_val = float("1.5641") + mean = float("0.845207") + std = float("0.264352") + data = None + + +class Program_weight_tensor_parameter_561: + name = "parameter_561" + shape = [192] + dtype = "float32" + min_val = float("0.0326402") + max_val = float("1699.91") + mean = float("79.7938") + std = float("203.032") + data = None + + +class Program_weight_tensor_parameter_562: + name = "parameter_562" + shape = [192] + dtype = "float32" + min_val = float("-22.9383") + max_val = float("10.8789") + mean = float("0.262372") + std = float("3.76359") + data = None + + +class Program_weight_tensor_parameter_563: + name = "parameter_563" + shape = [192, 192, 3, 3] + dtype = "float32" + min_val = float("-0.342571") + max_val = float("0.553615") + mean = float("-0.000835491") + std = float("0.0244072") + data = None + + +class Program_weight_tensor_parameter_564: + name = "parameter_564" + shape = [192] + dtype = "float32" + min_val = float("-2.49862") + max_val = float("-0.1334") + mean = float("-1.25035") + std = float("0.418402") + data = None + + +class Program_weight_tensor_parameter_565: + name = "parameter_565" + shape = [192] + dtype = "float32" + min_val = float("0.687417") + max_val = float("1.51921") + mean = float("1.12542") + std = float("0.137076") + data = None + + +class Program_weight_tensor_parameter_566: + name = "parameter_566" + shape = [192] + dtype = "float32" + min_val = float("0.0234704") + max_val = float("2282.94") + mean = float("135.36") + std = float("361.333") + data = None + + +class Program_weight_tensor_parameter_567: + name = "parameter_567" + shape = [192] + dtype = "float32" + min_val = float("-4.30705") + max_val = float("4.49319") + mean = float("-0.0200033") + std = float("0.945224") + data = None + + +class Program_weight_tensor_parameter_568: + name = "parameter_568" + shape = [192, 192, 3, 3] + dtype = "float32" + min_val = float("-0.442987") + max_val = float("0.472427") + mean = float("0.00134724") + std = float("0.0235963") + data = None + + +class Program_weight_tensor_parameter_569: + name = "parameter_569" + shape = [192] + dtype = "float32" + min_val = float("-1.21821") + max_val = float("0.512429") + mean = float("-0.167023") + std = float("0.295713") + data = None + + +class Program_weight_tensor_parameter_570: + name = "parameter_570" + shape = [192] + dtype = "float32" + min_val = float("-0.0291043") + max_val = float("1.56896") + mean = float("0.237779") + std = float("0.215149") + data = None + + +class Program_weight_tensor_parameter_571: + name = "parameter_571" + shape = [192] + dtype = "float32" + min_val = float("7.90699e-05") + max_val = float("49.1783") + mean = float("1.93217") + std = float("6.20102") + data = None + + +class Program_weight_tensor_parameter_572: + name = "parameter_572" + shape = [192] + dtype = "float32" + min_val = float("-1.39605") + max_val = float("4.77961") + mean = float("0.0637951") + std = float("0.550877") + data = None + + +class Program_weight_tensor_parameter_573: + name = "parameter_573" + shape = [192, 192, 1, 1] + dtype = "float32" + min_val = float("-0.645951") + max_val = float("0.375303") + mean = float("-0.00125196") + std = float("0.0318071") + data = None + + +class Program_weight_tensor_parameter_574: + name = "parameter_574" + shape = [192] + dtype = "float32" + min_val = float("-1.2185") + max_val = float("0.527355") + mean = float("-0.166844") + std = float("0.298285") + data = None + + +class Program_weight_tensor_parameter_575: + name = "parameter_575" + shape = [192] + dtype = "float32" + min_val = float("0.309676") + max_val = float("1.56341") + mean = float("0.760182") + std = float("0.23226") + data = None + + +class Program_weight_tensor_parameter_576: + name = "parameter_576" + shape = [192] + dtype = "float32" + min_val = float("0.0361632") + max_val = float("858.774") + mean = float("64.0277") + std = float("129.822") + data = None + + +class Program_weight_tensor_parameter_577: + name = "parameter_577" + shape = [192] + dtype = "float32" + min_val = float("-18.5822") + max_val = float("19.3904") + mean = float("-0.138431") + std = float("4.06149") + data = None + + +class Program_weight_tensor_parameter_578: + name = "parameter_578" + shape = [192, 192, 3, 3] + dtype = "float32" + min_val = float("-0.495237") + max_val = float("0.583184") + mean = float("0.000581124") + std = float("0.0274539") + data = None + + +class Program_weight_tensor_parameter_579: + name = "parameter_579" + shape = [192] + dtype = "float32" + min_val = float("-1.88035") + max_val = float("-0.215406") + mean = float("-1.14935") + std = float("0.32495") + data = None + + +class Program_weight_tensor_parameter_580: + name = "parameter_580" + shape = [192] + dtype = "float32" + min_val = float("0.788564") + max_val = float("1.75022") + mean = float("1.1191") + std = float("0.142522") + data = None + + +class Program_weight_tensor_parameter_581: + name = "parameter_581" + shape = [192] + dtype = "float32" + min_val = float("0.0172999") + max_val = float("311.063") + mean = float("18.5297") + std = float("40.3369") + data = None + + +class Program_weight_tensor_parameter_582: + name = "parameter_582" + shape = [192] + dtype = "float32" + min_val = float("-5.09272") + max_val = float("4.68315") + mean = float("0.0566841") + std = float("1.36936") + data = None + + +class Program_weight_tensor_parameter_583: + name = "parameter_583" + shape = [192, 192, 3, 3] + dtype = "float32" + min_val = float("-0.220454") + max_val = float("0.337137") + mean = float("-0.000195961") + std = float("0.0169417") + data = None + + +class Program_weight_tensor_parameter_584: + name = "parameter_584" + shape = [192] + dtype = "float32" + min_val = float("-2.91697") + max_val = float("1.61235") + mean = float("-0.027192") + std = float("0.752121") + data = None + + +class Program_weight_tensor_parameter_585: + name = "parameter_585" + shape = [192] + dtype = "float32" + min_val = float("0.396043") + max_val = float("1.75405") + mean = float("0.900478") + std = float("0.234394") + data = None + + +class Program_weight_tensor_parameter_586: + name = "parameter_586" + shape = [192] + dtype = "float32" + min_val = float("0.00833347") + max_val = float("4804.89") + mean = float("67.294") + std = float("384.311") + data = None + + +class Program_weight_tensor_parameter_587: + name = "parameter_587" + shape = [192] + dtype = "float32" + min_val = float("-2.08976") + max_val = float("2.12545") + mean = float("-0.027234") + std = float("0.343916") + data = None + + +class Program_weight_tensor_parameter_588: + name = "parameter_588" + shape = [192, 384, 1, 1] + dtype = "float32" + min_val = float("-1.42325") + max_val = float("3.22093") + mean = float("-0.000215879") + std = float("0.0689701") + data = None + + +class Program_weight_tensor_parameter_589: + name = "parameter_589" + shape = [192] + dtype = "float32" + min_val = float("-2.96702") + max_val = float("1.70072") + mean = float("0.100509") + std = float("0.666605") + data = None + + +class Program_weight_tensor_parameter_590: + name = "parameter_590" + shape = [192] + dtype = "float32" + min_val = float("0.812343") + max_val = float("5.55805") + mean = float("1.90737") + std = float("0.933933") + data = None + + +class Program_weight_tensor_parameter_591: + name = "parameter_591" + shape = [192] + dtype = "float32" + min_val = float("0.00143576") + max_val = float("202.609") + mean = float("3.23072") + std = float("18.3131") + data = None + + +class Program_weight_tensor_parameter_592: + name = "parameter_592" + shape = [192] + dtype = "float32" + min_val = float("-0.381013") + max_val = float("0.905162") + mean = float("0.0657245") + std = float("0.161239") + data = None + + +class Program_weight_tensor_parameter_593: + name = "parameter_593" + shape = [192, 384, 1, 1] + dtype = "float32" + min_val = float("-0.259077") + max_val = float("0.331409") + mean = float("-0.00132013") + std = float("0.0192623") + data = None + + +class Program_weight_tensor_parameter_594: + name = "parameter_594" + shape = [384] + dtype = "float32" + min_val = float("-2.92371") + max_val = float("1.31882") + mean = float("-0.301972") + std = float("0.563982") + data = None + + +class Program_weight_tensor_parameter_595: + name = "parameter_595" + shape = [384] + dtype = "float32" + min_val = float("0.646526") + max_val = float("2.49291") + mean = float("1.15848") + std = float("0.262959") + data = None + + +class Program_weight_tensor_parameter_596: + name = "parameter_596" + shape = [384] + dtype = "float32" + min_val = float("0.13752") + max_val = float("33554.1") + mean = float("441.904") + std = float("2215.12") + data = None + + +class Program_weight_tensor_parameter_597: + name = "parameter_597" + shape = [384] + dtype = "float32" + min_val = float("-10.6841") + max_val = float("7.24379") + mean = float("0.245326") + std = float("1.24953") + data = None + + +class Program_weight_tensor_parameter_598: + name = "parameter_598" + shape = [384, 256, 3, 3] + dtype = "float32" + min_val = float("-0.420455") + max_val = float("0.352272") + mean = float("-0.00112555") + std = float("0.0151274") + data = None + + +class Program_weight_tensor_parameter_599: + name = "parameter_599" + shape = [256] + dtype = "float32" + min_val = float("-2.04434") + max_val = float("1.33309") + mean = float("-0.916794") + std = float("0.542582") + data = None + + +class Program_weight_tensor_parameter_600: + name = "parameter_600" + shape = [256] + dtype = "float32" + min_val = float("0.550387") + max_val = float("2.97247") + mean = float("1.34652") + std = float("0.492025") + data = None + + +class Program_weight_tensor_parameter_601: + name = "parameter_601" + shape = [256] + dtype = "float32" + min_val = float("0.00440378") + max_val = float("105978.0") + mean = float("456.901") + std = float("6614.54") + data = None + + +class Program_weight_tensor_parameter_602: + name = "parameter_602" + shape = [256] + dtype = "float32" + min_val = float("-4.34738") + max_val = float("143.205") + mean = float("1.74977") + std = float("9.28044") + data = None + + +class Program_weight_tensor_parameter_603: + name = "parameter_603" + shape = [256, 192, 1, 1] + dtype = "float32" + min_val = float("-0.869804") + max_val = float("15.9901") + mean = float("0.00715179") + std = float("0.19203") + data = None + + +class Program_weight_tensor_parameter_604: + name = "parameter_604" + shape = [192] + dtype = "float32" + min_val = float("-0.0987519") + max_val = float("0.0663724") + mean = float("-0.0118358") + std = float("0.0235705") + data = None + + +class Program_weight_tensor_parameter_605: + name = "parameter_605" + shape = [192, 192, 1, 1] + dtype = "float32" + min_val = float("-0.405807") + max_val = float("0.264014") + mean = float("-0.0102004") + std = float("0.0319322") + data = None + + +class Program_weight_tensor_parameter_606: + name = "parameter_606" + shape = [96] + dtype = "float32" + min_val = float("-1.91285") + max_val = float("0.546302") + mean = float("-0.205148") + std = float("0.439731") + data = None + + +class Program_weight_tensor_parameter_607: + name = "parameter_607" + shape = [96] + dtype = "float32" + min_val = float("0.0895864") + max_val = float("3.22778") + mean = float("0.637041") + std = float("0.670036") + data = None + + +class Program_weight_tensor_parameter_608: + name = "parameter_608" + shape = [96] + dtype = "float32" + min_val = float("6.87535e-05") + max_val = float("66.5661") + mean = float("1.37836") + std = float("7.15906") + data = None + + +class Program_weight_tensor_parameter_609: + name = "parameter_609" + shape = [96] + dtype = "float32" + min_val = float("-1.39614") + max_val = float("2.38344") + mean = float("0.0320484") + std = float("0.377448") + data = None + + +class Program_weight_tensor_parameter_610: + name = "parameter_610" + shape = [96, 96, 1, 1] + dtype = "float32" + min_val = float("-1.24365") + max_val = float("1.18406") + mean = float("-0.000738032") + std = float("0.0621546") + data = None + + +class Program_weight_tensor_parameter_611: + name = "parameter_611" + shape = [96] + dtype = "float32" + min_val = float("-1.91234") + max_val = float("0.55126") + mean = float("-0.203811") + std = float("0.441223") + data = None + + +class Program_weight_tensor_parameter_612: + name = "parameter_612" + shape = [96] + dtype = "float32" + min_val = float("0.335907") + max_val = float("5.47089") + mean = float("1.09302") + std = float("0.887054") + data = None + + +class Program_weight_tensor_parameter_613: + name = "parameter_613" + shape = [96] + dtype = "float32" + min_val = float("0.0187208") + max_val = float("717.249") + mean = float("24.146") + std = float("98.0324") + data = None + + +class Program_weight_tensor_parameter_614: + name = "parameter_614" + shape = [96] + dtype = "float32" + min_val = float("-8.11887") + max_val = float("10.6872") + mean = float("0.447085") + std = float("1.98274") + data = None + + +class Program_weight_tensor_parameter_615: + name = "parameter_615" + shape = [96, 96, 3, 3] + dtype = "float32" + min_val = float("-0.64786") + max_val = float("0.610955") + mean = float("-0.00228054") + std = float("0.0317145") + data = None + + +class Program_weight_tensor_parameter_616: + name = "parameter_616" + shape = [96] + dtype = "float32" + min_val = float("-2.48733") + max_val = float("-0.0177181") + mean = float("-1.24064") + std = float("0.462674") + data = None + + +class Program_weight_tensor_parameter_617: + name = "parameter_617" + shape = [96] + dtype = "float32" + min_val = float("0.376749") + max_val = float("1.61027") + mean = float("0.92679") + std = float("0.174674") + data = None + + +class Program_weight_tensor_parameter_618: + name = "parameter_618" + shape = [96] + dtype = "float32" + min_val = float("0.458787") + max_val = float("14051.9") + mean = float("581.565") + std = float("2213.73") + data = None + + +class Program_weight_tensor_parameter_619: + name = "parameter_619" + shape = [96] + dtype = "float32" + min_val = float("-39.2") + max_val = float("92.3483") + mean = float("2.75935") + std = float("16.8796") + data = None + + +class Program_weight_tensor_parameter_620: + name = "parameter_620" + shape = [96, 96, 3, 3] + dtype = "float32" + min_val = float("-0.893357") + max_val = float("1.83963") + mean = float("0.00316801") + std = float("0.0516147") + data = None + + +class Program_weight_tensor_parameter_621: + name = "parameter_621" + shape = [96] + dtype = "float32" + min_val = float("-1.4122") + max_val = float("0.638769") + mean = float("-0.124599") + std = float("0.359237") + data = None + + +class Program_weight_tensor_parameter_622: + name = "parameter_622" + shape = [96] + dtype = "float32" + min_val = float("0.0151502") + max_val = float("1.86528") + mean = float("0.460244") + std = float("0.383531") + data = None + + +class Program_weight_tensor_parameter_623: + name = "parameter_623" + shape = [96] + dtype = "float32" + min_val = float("7.42465e-05") + max_val = float("105.345") + mean = float("2.00551") + std = float("11.8642") + data = None + + +class Program_weight_tensor_parameter_624: + name = "parameter_624" + shape = [96] + dtype = "float32" + min_val = float("-1.33815") + max_val = float("3.52671") + mean = float("0.134428") + std = float("0.582479") + data = None + + +class Program_weight_tensor_parameter_625: + name = "parameter_625" + shape = [96, 96, 1, 1] + dtype = "float32" + min_val = float("-7.68975") + max_val = float("6.35168") + mean = float("-0.005773") + std = float("0.180518") + data = None + + +class Program_weight_tensor_parameter_626: + name = "parameter_626" + shape = [96] + dtype = "float32" + min_val = float("-1.41481") + max_val = float("0.679644") + mean = float("-0.122232") + std = float("0.362344") + data = None + + +class Program_weight_tensor_parameter_627: + name = "parameter_627" + shape = [96] + dtype = "float32" + min_val = float("0.363177") + max_val = float("2.32912") + mean = float("0.921647") + std = float("0.446698") + data = None + + +class Program_weight_tensor_parameter_628: + name = "parameter_628" + shape = [96] + dtype = "float32" + min_val = float("0.00667803") + max_val = float("904.461") + mean = float("25.9921") + std = float("124.724") + data = None + + +class Program_weight_tensor_parameter_629: + name = "parameter_629" + shape = [96] + dtype = "float32" + min_val = float("-1.07626") + max_val = float("17.9598") + mean = float("1.18124") + std = float("2.65473") + data = None + + +class Program_weight_tensor_parameter_630: + name = "parameter_630" + shape = [96, 96, 3, 3] + dtype = "float32" + min_val = float("-1.68483") + max_val = float("0.545277") + mean = float("-0.00668389") + std = float("0.0418904") + data = None + + +class Program_weight_tensor_parameter_631: + name = "parameter_631" + shape = [96] + dtype = "float32" + min_val = float("-3.31429") + max_val = float("0.406389") + mean = float("-1.18503") + std = float("0.560632") + data = None + + +class Program_weight_tensor_parameter_632: + name = "parameter_632" + shape = [96] + dtype = "float32" + min_val = float("0.397342") + max_val = float("1.98469") + mean = float("1.02414") + std = float("0.257601") + data = None + + +class Program_weight_tensor_parameter_633: + name = "parameter_633" + shape = [96] + dtype = "float32" + min_val = float("0.171811") + max_val = float("5118.63") + mean = float("129.006") + std = float("573.108") + data = None + + +class Program_weight_tensor_parameter_634: + name = "parameter_634" + shape = [96] + dtype = "float32" + min_val = float("-16.5294") + max_val = float("56.8048") + mean = float("0.660576") + std = float("9.1639") + data = None + + +class Program_weight_tensor_parameter_635: + name = "parameter_635" + shape = [96, 96, 3, 3] + dtype = "float32" + min_val = float("-0.45496") + max_val = float("1.25223") + mean = float("-0.000778588") + std = float("0.0350886") + data = None + + +class Program_weight_tensor_parameter_636: + name = "parameter_636" + shape = [96] + dtype = "float32" + min_val = float("-1.25001") + max_val = float("0.639741") + mean = float("-0.0989652") + std = float("0.302867") + data = None + + +class Program_weight_tensor_parameter_637: + name = "parameter_637" + shape = [96] + dtype = "float32" + min_val = float("-0.429595") + max_val = float("1.27713") + mean = float("0.306498") + std = float("0.23536") + data = None + + +class Program_weight_tensor_parameter_638: + name = "parameter_638" + shape = [96] + dtype = "float32" + min_val = float("0.000129874") + max_val = float("656.233") + mean = float("8.72779") + std = float("66.7746") + data = None + + +class Program_weight_tensor_parameter_639: + name = "parameter_639" + shape = [96] + dtype = "float32" + min_val = float("-0.700466") + max_val = float("6.05986") + mean = float("0.144363") + std = float("0.725059") + data = None + + +class Program_weight_tensor_parameter_640: + name = "parameter_640" + shape = [96, 96, 1, 1] + dtype = "float32" + min_val = float("-4.5365") + max_val = float("0.985421") + mean = float("-0.00920448") + std = float("0.122988") + data = None + + +class Program_weight_tensor_parameter_641: + name = "parameter_641" + shape = [96] + dtype = "float32" + min_val = float("-1.25019") + max_val = float("0.698569") + mean = float("-0.0957422") + std = float("0.306901") + data = None + + +class Program_weight_tensor_parameter_642: + name = "parameter_642" + shape = [96] + dtype = "float32" + min_val = float("-0.268583") + max_val = float("1.67072") + mean = float("0.716015") + std = float("0.312801") + data = None + + +class Program_weight_tensor_parameter_643: + name = "parameter_643" + shape = [96] + dtype = "float32" + min_val = float("0.0242012") + max_val = float("692.616") + mean = float("37.3314") + std = float("101.342") + data = None + + +class Program_weight_tensor_parameter_644: + name = "parameter_644" + shape = [96] + dtype = "float32" + min_val = float("-2.63584") + max_val = float("9.84662") + mean = float("0.502801") + std = float("1.7425") + data = None + + +class Program_weight_tensor_parameter_645: + name = "parameter_645" + shape = [96, 96, 3, 3] + dtype = "float32" + min_val = float("-0.71585") + max_val = float("1.8677") + mean = float("-0.00331932") + std = float("0.0418535") + data = None + + +class Program_weight_tensor_parameter_646: + name = "parameter_646" + shape = [96] + dtype = "float32" + min_val = float("-3.58339") + max_val = float("0.283111") + mean = float("-1.13341") + std = float("0.568821") + data = None + + +class Program_weight_tensor_parameter_647: + name = "parameter_647" + shape = [96] + dtype = "float32" + min_val = float("0.448275") + max_val = float("2.19169") + mean = float("1.04969") + std = float("0.242886") + data = None + + +class Program_weight_tensor_parameter_648: + name = "parameter_648" + shape = [96] + dtype = "float32" + min_val = float("0.187804") + max_val = float("13267.2") + mean = float("371.726") + std = float("1684.34") + data = None + + +class Program_weight_tensor_parameter_649: + name = "parameter_649" + shape = [96] + dtype = "float32" + min_val = float("-67.2311") + max_val = float("84.7225") + mean = float("0.0843976") + std = float("14.2964") + data = None + + +class Program_weight_tensor_parameter_650: + name = "parameter_650" + shape = [96, 96, 3, 3] + dtype = "float32" + min_val = float("-0.840055") + max_val = float("0.994105") + mean = float("-0.00223153") + std = float("0.0530689") + data = None + + +class Program_weight_tensor_parameter_651: + name = "parameter_651" + shape = [96] + dtype = "float32" + min_val = float("-0.896599") + max_val = float("0.53856") + mean = float("-0.157416") + std = float("0.295036") + data = None + + +class Program_weight_tensor_parameter_652: + name = "parameter_652" + shape = [96] + dtype = "float32" + min_val = float("0.00576366") + max_val = float("1.40649") + mean = float("0.338961") + std = float("0.222205") + data = None + + +class Program_weight_tensor_parameter_653: + name = "parameter_653" + shape = [96] + dtype = "float32" + min_val = float("0.000125609") + max_val = float("4.926") + mean = float("0.192034") + std = float("0.563473") + data = None + + +class Program_weight_tensor_parameter_654: + name = "parameter_654" + shape = [96] + dtype = "float32" + min_val = float("-1.15434") + max_val = float("1.71827") + mean = float("0.0970238") + std = float("0.329713") + data = None + + +class Program_weight_tensor_parameter_655: + name = "parameter_655" + shape = [96, 96, 1, 1] + dtype = "float32" + min_val = float("-1.11751") + max_val = float("1.22849") + mean = float("-0.00515414") + std = float("0.0668462") + data = None + + +class Program_weight_tensor_parameter_656: + name = "parameter_656" + shape = [96] + dtype = "float32" + min_val = float("-0.89747") + max_val = float("0.547756") + mean = float("-0.155531") + std = float("0.300238") + data = None + + +class Program_weight_tensor_parameter_657: + name = "parameter_657" + shape = [96] + dtype = "float32" + min_val = float("-0.680114") + max_val = float("1.77938") + mean = float("0.715642") + std = float("0.343738") + data = None + + +class Program_weight_tensor_parameter_658: + name = "parameter_658" + shape = [96] + dtype = "float32" + min_val = float("0.00962529") + max_val = float("117.592") + mean = float("6.28867") + std = float("17.7652") + data = None + + +class Program_weight_tensor_parameter_659: + name = "parameter_659" + shape = [96] + dtype = "float32" + min_val = float("-8.19355") + max_val = float("8.76688") + mean = float("0.523784") + std = float("1.96973") + data = None + + +class Program_weight_tensor_parameter_660: + name = "parameter_660" + shape = [96, 96, 3, 3] + dtype = "float32" + min_val = float("-0.963159") + max_val = float("0.937514") + mean = float("-0.00312149") + std = float("0.0409439") + data = None + + +class Program_weight_tensor_parameter_661: + name = "parameter_661" + shape = [96] + dtype = "float32" + min_val = float("-2.65947") + max_val = float("0.0663366") + mean = float("-1.07481") + std = float("0.49964") + data = None + + +class Program_weight_tensor_parameter_662: + name = "parameter_662" + shape = [96] + dtype = "float32" + min_val = float("0.494054") + max_val = float("1.73711") + mean = float("1.00358") + std = float("0.201714") + data = None + + +class Program_weight_tensor_parameter_663: + name = "parameter_663" + shape = [96] + dtype = "float32" + min_val = float("0.0670117") + max_val = float("108.685") + mean = float("11.7244") + std = float("22.7056") + data = None + + +class Program_weight_tensor_parameter_664: + name = "parameter_664" + shape = [96] + dtype = "float32" + min_val = float("-19.6122") + max_val = float("22.3347") + mean = float("-1.43905") + std = float("6.53676") + data = None + + +class Program_weight_tensor_parameter_665: + name = "parameter_665" + shape = [96, 96, 3, 3] + dtype = "float32" + min_val = float("-0.883886") + max_val = float("0.636549") + mean = float("-0.00434231") + std = float("0.0362072") + data = None + + +class Program_weight_tensor_parameter_666: + name = "parameter_666" + shape = [96] + dtype = "float32" + min_val = float("-0.971987") + max_val = float("0.722959") + mean = float("-0.144632") + std = float("0.309261") + data = None + + +class Program_weight_tensor_parameter_667: + name = "parameter_667" + shape = [96] + dtype = "float32" + min_val = float("-0.272081") + max_val = float("1.61121") + mean = float("0.305009") + std = float("0.272026") + data = None + + +class Program_weight_tensor_parameter_668: + name = "parameter_668" + shape = [96] + dtype = "float32" + min_val = float("0.000543445") + max_val = float("31.5598") + mean = float("1.4504") + std = float("3.93834") + data = None + + +class Program_weight_tensor_parameter_669: + name = "parameter_669" + shape = [96] + dtype = "float32" + min_val = float("-2.05874") + max_val = float("2.35712") + mean = float("0.153436") + std = float("0.577965") + data = None + + +class Program_weight_tensor_parameter_670: + name = "parameter_670" + shape = [96, 96, 1, 1] + dtype = "float32" + min_val = float("-1.77571") + max_val = float("0.727339") + mean = float("-0.012376") + std = float("0.0994884") + data = None + + +class Program_weight_tensor_parameter_671: + name = "parameter_671" + shape = [96] + dtype = "float32" + min_val = float("-0.973155") + max_val = float("0.722476") + mean = float("-0.141307") + std = float("0.307416") + data = None + + +class Program_weight_tensor_parameter_672: + name = "parameter_672" + shape = [96] + dtype = "float32" + min_val = float("-0.0263993") + max_val = float("2.25472") + mean = float("0.596976") + std = float("0.320417") + data = None + + +class Program_weight_tensor_parameter_673: + name = "parameter_673" + shape = [96] + dtype = "float32" + min_val = float("0.0138918") + max_val = float("3176.65") + mean = float("50.1448") + std = float("326.27") + data = None + + +class Program_weight_tensor_parameter_674: + name = "parameter_674" + shape = [96] + dtype = "float32" + min_val = float("-11.8577") + max_val = float("18.2503") + mean = float("0.529937") + std = float("2.87528") + data = None + + +class Program_weight_tensor_parameter_675: + name = "parameter_675" + shape = [96, 96, 3, 3] + dtype = "float32" + min_val = float("-1.3398") + max_val = float("0.471906") + mean = float("-0.00519289") + std = float("0.054962") + data = None + + +class Program_weight_tensor_parameter_676: + name = "parameter_676" + shape = [96] + dtype = "float32" + min_val = float("-3.4571") + max_val = float("0.163505") + mean = float("-1.04781") + std = float("0.565484") + data = None + + +class Program_weight_tensor_parameter_677: + name = "parameter_677" + shape = [96] + dtype = "float32" + min_val = float("0.181655") + max_val = float("2.51355") + mean = float("1.04007") + std = float("0.337096") + data = None + + +class Program_weight_tensor_parameter_678: + name = "parameter_678" + shape = [96] + dtype = "float32" + min_val = float("0.0450568") + max_val = float("591.311") + mean = float("35.261") + std = float("84.194") + data = None + + +class Program_weight_tensor_parameter_679: + name = "parameter_679" + shape = [96] + dtype = "float32" + min_val = float("-53.1661") + max_val = float("29.0565") + mean = float("-0.946483") + std = float("12.2566") + data = None + + +class Program_weight_tensor_parameter_680: + name = "parameter_680" + shape = [96, 96, 3, 3] + dtype = "float32" + min_val = float("-1.44326") + max_val = float("0.605019") + mean = float("-0.00302561") + std = float("0.0604205") + data = None + + +class Program_weight_tensor_parameter_681: + name = "parameter_681" + shape = [96] + dtype = "float32" + min_val = float("-0.979853") + max_val = float("0.506673") + mean = float("-0.105704") + std = float("0.301591") + data = None + + +class Program_weight_tensor_parameter_682: + name = "parameter_682" + shape = [96] + dtype = "float32" + min_val = float("-0.690202") + max_val = float("1.30922") + mean = float("0.212369") + std = float("0.326455") + data = None + + +class Program_weight_tensor_parameter_683: + name = "parameter_683" + shape = [96] + dtype = "float32" + min_val = float("0.00319137") + max_val = float("122.847") + mean = float("4.41125") + std = float("14.7791") + data = None + + +class Program_weight_tensor_parameter_684: + name = "parameter_684" + shape = [96] + dtype = "float32" + min_val = float("-1.63063") + max_val = float("0.873704") + mean = float("-0.0808103") + std = float("0.375139") + data = None + + +class Program_weight_tensor_parameter_685: + name = "parameter_685" + shape = [96, 96, 1, 1] + dtype = "float32" + min_val = float("-1.21185") + max_val = float("1.0658") + mean = float("0.00855805") + std = float("0.102244") + data = None + + +class Program_weight_tensor_parameter_686: + name = "parameter_686" + shape = [96] + dtype = "float32" + min_val = float("-0.929882") + max_val = float("0.508894") + mean = float("-0.0994689") + std = float("0.294231") + data = None + + +class Program_weight_tensor_parameter_687: + name = "parameter_687" + shape = [96] + dtype = "float32" + min_val = float("-0.652054") + max_val = float("1.42808") + mean = float("0.46451") + std = float("0.343141") + data = None + + +class Program_weight_tensor_parameter_688: + name = "parameter_688" + shape = [96] + dtype = "float32" + min_val = float("0.0905742") + max_val = float("4913.13") + mean = float("111.121") + std = float("522.181") + data = None + + +class Program_weight_tensor_parameter_689: + name = "parameter_689" + shape = [96] + dtype = "float32" + min_val = float("-6.23794") + max_val = float("20.563") + mean = float("0.310017") + std = float("2.71574") + data = None + + +class Program_weight_tensor_parameter_690: + name = "parameter_690" + shape = [96, 96, 3, 3] + dtype = "float32" + min_val = float("-1.49733") + max_val = float("0.706858") + mean = float("-0.0040344") + std = float("0.0702472") + data = None + + +class Program_weight_tensor_parameter_691: + name = "parameter_691" + shape = [96] + dtype = "float32" + min_val = float("-2.44345") + max_val = float("0.44985") + mean = float("-0.86721") + std = float("0.476335") + data = None + + +class Program_weight_tensor_parameter_692: + name = "parameter_692" + shape = [96] + dtype = "float32" + min_val = float("0.709681") + max_val = float("2.12223") + mean = float("1.25465") + std = float("0.222513") + data = None + + +class Program_weight_tensor_parameter_693: + name = "parameter_693" + shape = [96] + dtype = "float32" + min_val = float("0.0168317") + max_val = float("328.239") + mean = float("23.0774") + std = float("55.6159") + data = None + + +class Program_weight_tensor_parameter_694: + name = "parameter_694" + shape = [96] + dtype = "float32" + min_val = float("-50.6514") + max_val = float("40.9589") + mean = float("0.723976") + std = float("11.3687") + data = None + + +class Program_weight_tensor_parameter_695: + name = "parameter_695" + shape = [96, 96, 3, 3] + dtype = "float32" + min_val = float("-0.849899") + max_val = float("0.852562") + mean = float("-0.000185799") + std = float("0.0645014") + data = None + + +class Program_weight_tensor_parameter_696: + name = "parameter_696" + shape = [96] + dtype = "float32" + min_val = float("-3.17223") + max_val = float("1.90963") + mean = float("0.534742") + std = float("0.924546") + data = None + + +class Program_weight_tensor_parameter_697: + name = "parameter_697" + shape = [96] + dtype = "float32" + min_val = float("-1.03377") + max_val = float("2.62632") + mean = float("0.492338") + std = float("0.520207") + data = None + + +class Program_weight_tensor_parameter_698: + name = "parameter_698" + shape = [96] + dtype = "float32" + min_val = float("0.00977803") + max_val = float("734.582") + mean = float("37.8") + std = float("108.323") + data = None + + +class Program_weight_tensor_parameter_699: + name = "parameter_699" + shape = [96] + dtype = "float32" + min_val = float("-7.31017") + max_val = float("14.4237") + mean = float("-0.228157") + std = float("2.72443") + data = None + + +class Program_weight_tensor_parameter_700: + name = "parameter_700" + shape = [96, 192, 1, 1] + dtype = "float32" + min_val = float("-1.95633") + max_val = float("1.65436") + mean = float("-0.017819") + std = float("0.179031") + data = None + + +class Program_weight_tensor_parameter_701: + name = "parameter_701" + shape = [96] + dtype = "float32" + min_val = float("-4.92471") + max_val = float("1.58003") + mean = float("0.384968") + std = float("1.04986") + data = None + + +class Program_weight_tensor_parameter_702: + name = "parameter_702" + shape = [96] + dtype = "float32" + min_val = float("0.381893") + max_val = float("6.77746") + mean = float("1.68003") + std = float("1.3141") + data = None + + +class Program_weight_tensor_parameter_703: + name = "parameter_703" + shape = [96] + dtype = "float32" + min_val = float("0.0102556") + max_val = float("63.1576") + mean = float("1.94865") + std = float("8.72854") + data = None + + +class Program_weight_tensor_parameter_704: + name = "parameter_704" + shape = [96] + dtype = "float32" + min_val = float("-1.81576") + max_val = float("1.31792") + mean = float("0.00314065") + std = float("0.477093") + data = None + + +class Program_weight_tensor_parameter_705: + name = "parameter_705" + shape = [96, 192, 1, 1] + dtype = "float32" + min_val = float("-0.476384") + max_val = float("0.704714") + mean = float("-0.00467472") + std = float("0.0415664") + data = None + + +class Program_weight_tensor_parameter_706: + name = "parameter_706" + shape = [192] + dtype = "float32" + min_val = float("-2.2747") + max_val = float("1.80222") + mean = float("-0.130603") + std = float("0.756459") + data = None + + +class Program_weight_tensor_parameter_707: + name = "parameter_707" + shape = [192] + dtype = "float32" + min_val = float("0.466035") + max_val = float("2.96638") + mean = float("1.07147") + std = float("0.320539") + data = None + + +class Program_weight_tensor_parameter_708: + name = "parameter_708" + shape = [192] + dtype = "float32" + min_val = float("0.0464299") + max_val = float("1042.66") + mean = float("38.2298") + std = float("121.792") + data = None + + +class Program_weight_tensor_parameter_709: + name = "parameter_709" + shape = [192] + dtype = "float32" + min_val = float("-9.56391") + max_val = float("27.4888") + mean = float("0.648476") + std = float("3.1584") + data = None + + +class Program_weight_tensor_parameter_710: + name = "parameter_710" + shape = [192, 128, 3, 3] + dtype = "float32" + min_val = float("-2.4927") + max_val = float("0.935874") + mean = float("-0.00472509") + std = float("0.0589256") + data = None + + +class Program_weight_tensor_parameter_711: + name = "parameter_711" + shape = [128] + dtype = "float32" + min_val = float("-2.8142") + max_val = float("1.94884") + mean = float("-0.734722") + std = float("0.677613") + data = None + + +class Program_weight_tensor_parameter_712: + name = "parameter_712" + shape = [128] + dtype = "float32" + min_val = float("-0.229859") + max_val = float("2.99053") + mean = float("0.992786") + std = float("0.396784") + data = None + + +class Program_weight_tensor_parameter_713: + name = "parameter_713" + shape = [128] + dtype = "float32" + min_val = float("2.39451") + max_val = float("125679.0") + mean = float("4054.76") + std = float("14301.4") + data = None + + +class Program_weight_tensor_parameter_714: + name = "parameter_714" + shape = [128] + dtype = "float32" + min_val = float("-271.424") + max_val = float("158.592") + mean = float("-11.2378") + std = float("49.0082") + data = None + + +class Program_weight_tensor_parameter_715: + name = "parameter_715" + shape = [128, 96, 1, 1] + dtype = "float32" + min_val = float("-5.10604") + max_val = float("3.19008") + mean = float("-0.0220007") + std = float("0.265626") + data = None + + +class Program_weight_tensor_parameter_716: + name = "parameter_716" + shape = [96] + dtype = "float32" + min_val = float("-0.161611") + max_val = float("0.37311") + mean = float("0.00734357") + std = float("0.0624152") + data = None + + +class Program_weight_tensor_parameter_717: + name = "parameter_717" + shape = [96, 96, 1, 1] + dtype = "float32" + min_val = float("-1.50057") + max_val = float("1.5759") + mean = float("0.00741555") + std = float("0.12383") + data = None + + +class Program_weight_tensor_parameter_718: + name = "parameter_718" shape = [48] dtype = "float32" min_val = float("0") @@ -3482,8 +7891,8 @@ class Program_weight_tensor_parameter_319: data = None -class Program_weight_tensor_parameter_320: - name = "parameter_320" +class Program_weight_tensor_parameter_719: + name = "parameter_719" shape = [48] dtype = "float32" min_val = float("0") @@ -3491,8 +7900,8 @@ class Program_weight_tensor_parameter_320: data = None -class Program_weight_tensor_parameter_321: - name = "parameter_321" +class Program_weight_tensor_parameter_720: + name = "parameter_720" shape = [48] dtype = "float32" min_val = float("0") @@ -3500,19 +7909,75 @@ class Program_weight_tensor_parameter_321: data = None -class Program_weight_tensor_parameter_322: - name = "parameter_322" +class Program_weight_tensor_parameter_721: + name = "parameter_721" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_722: + name = "parameter_722" + shape = [48, 48, 1, 1] + dtype = "float32" + min_val = float("-2.83139") + max_val = float("3.12402") + mean = float("0.00934544") + std = float("0.243272") + data = None + + +class Program_weight_tensor_parameter_723: + name = "parameter_723" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_724: + name = "parameter_724" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_725: + name = "parameter_725" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_726: + name = "parameter_726" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_727: + name = "parameter_727" shape = [48, 48, 3, 3] dtype = "float32" - min_val = float("-0.0621898") - max_val = float("0.0692526") - mean = float("-0.000822014") - std = float("0.0111057") + min_val = float("-1.103") + max_val = float("1.0228") + mean = float("0.00321872") + std = float("0.113578") data = None -class Program_weight_tensor_parameter_323: - name = "parameter_323" +class Program_weight_tensor_parameter_728: + name = "parameter_728" shape = [48] dtype = "float32" min_val = float("0") @@ -3520,8 +7985,8 @@ class Program_weight_tensor_parameter_323: data = None -class Program_weight_tensor_parameter_324: - name = "parameter_324" +class Program_weight_tensor_parameter_729: + name = "parameter_729" shape = [48] dtype = "float32" min_val = float("0") @@ -3529,8 +7994,8 @@ class Program_weight_tensor_parameter_324: data = None -class Program_weight_tensor_parameter_325: - name = "parameter_325" +class Program_weight_tensor_parameter_730: + name = "parameter_730" shape = [48] dtype = "float32" min_val = float("0") @@ -3538,8 +8003,8 @@ class Program_weight_tensor_parameter_325: data = None -class Program_weight_tensor_parameter_326: - name = "parameter_326" +class Program_weight_tensor_parameter_731: + name = "parameter_731" shape = [48] dtype = "float32" min_val = float("0") @@ -3547,19 +8012,19 @@ class Program_weight_tensor_parameter_326: data = None -class Program_weight_tensor_parameter_327: - name = "parameter_327" +class Program_weight_tensor_parameter_732: + name = "parameter_732" shape = [48, 48, 3, 3] dtype = "float32" - min_val = float("-0.11162") - max_val = float("0.0943574") - mean = float("-0.000368661") - std = float("0.0125785") + min_val = float("-2.92076") + max_val = float("1.62643") + mean = float("-0.0145573") + std = float("0.236441") data = None -class Program_weight_tensor_parameter_328: - name = "parameter_328" +class Program_weight_tensor_parameter_733: + name = "parameter_733" shape = [48] dtype = "float32" min_val = float("0") @@ -3567,8 +8032,8 @@ class Program_weight_tensor_parameter_328: data = None -class Program_weight_tensor_parameter_329: - name = "parameter_329" +class Program_weight_tensor_parameter_734: + name = "parameter_734" shape = [48] dtype = "float32" min_val = float("0") @@ -3576,8 +8041,8 @@ class Program_weight_tensor_parameter_329: data = None -class Program_weight_tensor_parameter_330: - name = "parameter_330" +class Program_weight_tensor_parameter_735: + name = "parameter_735" shape = [48] dtype = "float32" min_val = float("0") @@ -3585,8 +8050,8 @@ class Program_weight_tensor_parameter_330: data = None -class Program_weight_tensor_parameter_331: - name = "parameter_331" +class Program_weight_tensor_parameter_736: + name = "parameter_736" shape = [48] dtype = "float32" min_val = float("0") @@ -3594,19 +8059,19 @@ class Program_weight_tensor_parameter_331: data = None -class Program_weight_tensor_parameter_332: - name = "parameter_332" +class Program_weight_tensor_parameter_737: + name = "parameter_737" shape = [48, 48, 1, 1] dtype = "float32" - min_val = float("-0.0944494") - max_val = float("0.0702451") - mean = float("-0.00185301") - std = float("0.0172184") + min_val = float("-1.61531") + max_val = float("0.722945") + mean = float("-0.0152863") + std = float("0.122929") data = None -class Program_weight_tensor_parameter_333: - name = "parameter_333" +class Program_weight_tensor_parameter_738: + name = "parameter_738" shape = [48] dtype = "float32" min_val = float("0") @@ -3614,8 +8079,8 @@ class Program_weight_tensor_parameter_333: data = None -class Program_weight_tensor_parameter_334: - name = "parameter_334" +class Program_weight_tensor_parameter_739: + name = "parameter_739" shape = [48] dtype = "float32" min_val = float("0") @@ -3623,8 +8088,8 @@ class Program_weight_tensor_parameter_334: data = None -class Program_weight_tensor_parameter_335: - name = "parameter_335" +class Program_weight_tensor_parameter_740: + name = "parameter_740" shape = [48] dtype = "float32" min_val = float("0") @@ -3632,8 +8097,8 @@ class Program_weight_tensor_parameter_335: data = None -class Program_weight_tensor_parameter_336: - name = "parameter_336" +class Program_weight_tensor_parameter_741: + name = "parameter_741" shape = [48] dtype = "float32" min_val = float("0") @@ -3641,19 +8106,19 @@ class Program_weight_tensor_parameter_336: data = None -class Program_weight_tensor_parameter_337: - name = "parameter_337" +class Program_weight_tensor_parameter_742: + name = "parameter_742" shape = [48, 48, 3, 3] dtype = "float32" - min_val = float("-0.0691644") - max_val = float("0.0974384") - mean = float("-0.000506655") - std = float("0.011691") + min_val = float("-9.07419") + max_val = float("3.91485") + mean = float("-0.0791542") + std = float("0.545463") data = None -class Program_weight_tensor_parameter_338: - name = "parameter_338" +class Program_weight_tensor_parameter_743: + name = "parameter_743" shape = [48] dtype = "float32" min_val = float("0") @@ -3661,8 +8126,8 @@ class Program_weight_tensor_parameter_338: data = None -class Program_weight_tensor_parameter_339: - name = "parameter_339" +class Program_weight_tensor_parameter_744: + name = "parameter_744" shape = [48] dtype = "float32" min_val = float("0") @@ -3670,8 +8135,8 @@ class Program_weight_tensor_parameter_339: data = None -class Program_weight_tensor_parameter_340: - name = "parameter_340" +class Program_weight_tensor_parameter_745: + name = "parameter_745" shape = [48] dtype = "float32" min_val = float("0") @@ -3679,8 +8144,8 @@ class Program_weight_tensor_parameter_340: data = None -class Program_weight_tensor_parameter_341: - name = "parameter_341" +class Program_weight_tensor_parameter_746: + name = "parameter_746" shape = [48] dtype = "float32" min_val = float("0") @@ -3688,19 +8153,19 @@ class Program_weight_tensor_parameter_341: data = None -class Program_weight_tensor_parameter_342: - name = "parameter_342" +class Program_weight_tensor_parameter_747: + name = "parameter_747" shape = [48, 48, 3, 3] dtype = "float32" - min_val = float("-0.133213") - max_val = float("0.0905212") - mean = float("-0.000334254") - std = float("0.0134452") + min_val = float("-6.23405") + max_val = float("6.69712") + mean = float("-0.00447183") + std = float("0.74018") data = None -class Program_weight_tensor_parameter_343: - name = "parameter_343" +class Program_weight_tensor_parameter_748: + name = "parameter_748" shape = [48] dtype = "float32" min_val = float("0") @@ -3708,8 +8173,8 @@ class Program_weight_tensor_parameter_343: data = None -class Program_weight_tensor_parameter_344: - name = "parameter_344" +class Program_weight_tensor_parameter_749: + name = "parameter_749" shape = [48] dtype = "float32" min_val = float("0") @@ -3717,8 +8182,8 @@ class Program_weight_tensor_parameter_344: data = None -class Program_weight_tensor_parameter_345: - name = "parameter_345" +class Program_weight_tensor_parameter_750: + name = "parameter_750" shape = [48] dtype = "float32" min_val = float("0") @@ -3726,8 +8191,8 @@ class Program_weight_tensor_parameter_345: data = None -class Program_weight_tensor_parameter_346: - name = "parameter_346" +class Program_weight_tensor_parameter_751: + name = "parameter_751" shape = [48] dtype = "float32" min_val = float("0") @@ -3735,19 +8200,160 @@ class Program_weight_tensor_parameter_346: data = None -class Program_weight_tensor_parameter_347: - name = "parameter_347" +class Program_weight_tensor_parameter_752: + name = "parameter_752" + shape = [48, 48, 1, 1] + dtype = "float32" + min_val = float("-7.99359") + max_val = float("7.76186") + mean = float("0.0820972") + std = float("1.09773") + data = None + + +class Program_weight_tensor_parameter_753: + name = "parameter_753" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_754: + name = "parameter_754" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_755: + name = "parameter_755" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_756: + name = "parameter_756" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_757: + name = "parameter_757" + shape = [48, 48, 3, 3] + dtype = "float32" + min_val = float("-10.0014") + max_val = float("5.47988") + mean = float("0.0621421") + std = float("0.658574") + data = None + + +class Program_weight_tensor_parameter_758: + name = "parameter_758" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_759: + name = "parameter_759" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_760: + name = "parameter_760" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_761: + name = "parameter_761" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_762: + name = "parameter_762" + shape = [48, 48, 3, 3] + dtype = "float32" + min_val = float("-12.9982") + max_val = float("11.0176") + mean = float("-0.0784427") + std = float("1.11962") + data = None + + +class Program_weight_tensor_parameter_763: + name = "parameter_763" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_764: + name = "parameter_764" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_765: + name = "parameter_765" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_766: + name = "parameter_766" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_767: + name = "parameter_767" shape = [48, 96, 1, 1] dtype = "float32" - min_val = float("-0.17806") - max_val = float("0.14305") - mean = float("-0.00229242") - std = float("0.0246641") + min_val = float("-59.2065") + max_val = float("44.9563") + mean = float("-0.705466") + std = float("5.28086") data = None -class Program_weight_tensor_parameter_348: - name = "parameter_348" +class Program_weight_tensor_parameter_768: + name = "parameter_768" shape = [48] dtype = "float32" min_val = float("0") @@ -3755,8 +8361,8 @@ class Program_weight_tensor_parameter_348: data = None -class Program_weight_tensor_parameter_349: - name = "parameter_349" +class Program_weight_tensor_parameter_769: + name = "parameter_769" shape = [48] dtype = "float32" min_val = float("0") @@ -3764,8 +8370,8 @@ class Program_weight_tensor_parameter_349: data = None -class Program_weight_tensor_parameter_350: - name = "parameter_350" +class Program_weight_tensor_parameter_770: + name = "parameter_770" shape = [48] dtype = "float32" min_val = float("0") @@ -3773,8 +8379,8 @@ class Program_weight_tensor_parameter_350: data = None -class Program_weight_tensor_parameter_351: - name = "parameter_351" +class Program_weight_tensor_parameter_771: + name = "parameter_771" shape = [48] dtype = "float32" min_val = float("0") @@ -3782,74 +8388,74 @@ class Program_weight_tensor_parameter_351: data = None -class Program_weight_tensor_parameter_352: - name = "parameter_352" +class Program_weight_tensor_parameter_772: + name = "parameter_772" shape = [48, 96, 1, 1] dtype = "float32" - min_val = float("-0.135012") - max_val = float("0.178483") - mean = float("-0.0004429") - std = float("0.0226955") + min_val = float("-2.11481") + max_val = float("1.29398") + mean = float("-0.0184801") + std = float("0.17633") data = None -class Program_weight_tensor_parameter_353: - name = "parameter_353" +class Program_weight_tensor_parameter_773: + name = "parameter_773" shape = [96] dtype = "float32" - min_val = float("-3.40701") - max_val = float("3.27538") - mean = float("0.329531") - std = float("1.14502") + min_val = float("-8.09242") + max_val = float("7.25992") + mean = float("0.75209") + std = float("2.67553") data = None -class Program_weight_tensor_parameter_354: - name = "parameter_354" +class Program_weight_tensor_parameter_774: + name = "parameter_774" shape = [96] dtype = "float32" - min_val = float("0.865919") - max_val = float("4.91404") - mean = float("1.91603") - std = float("0.752783") + min_val = float("-8.02644") + max_val = float("10.6649") + mean = float("2.05862") + std = float("3.39567") data = None -class Program_weight_tensor_parameter_355: - name = "parameter_355" +class Program_weight_tensor_parameter_775: + name = "parameter_775" shape = [96] dtype = "float32" - min_val = float("0.705639") - max_val = float("32.368") - mean = float("2.73559") - std = float("3.53943") + min_val = float("544.401") + max_val = float("9795570.0") + mean = float("868764.0") + std = float("1759410.0") data = None -class Program_weight_tensor_parameter_356: - name = "parameter_356" +class Program_weight_tensor_parameter_776: + name = "parameter_776" shape = [96] dtype = "float32" - min_val = float("-1.47426") - max_val = float("2.58312") - mean = float("-0.286183") - std = float("0.722428") + min_val = float("-3150.84") + max_val = float("1801.7") + mean = float("-292.814") + std = float("845.923") data = None -class Program_weight_tensor_parameter_357: - name = "parameter_357" +class Program_weight_tensor_parameter_777: + name = "parameter_777" shape = [96, 64, 3, 3] dtype = "float32" - min_val = float("-0.110689") - max_val = float("0.13859") - mean = float("-0.000360127") - std = float("0.0133189") + min_val = float("-16.205") + max_val = float("9.48416") + mean = float("-0.195033") + std = float("1.32428") data = None -class Program_weight_tensor_parameter_358: - name = "parameter_358" +class Program_weight_tensor_parameter_778: + name = "parameter_778" shape = [64] dtype = "float32" min_val = float("0") @@ -3857,8 +8463,8 @@ class Program_weight_tensor_parameter_358: data = None -class Program_weight_tensor_parameter_359: - name = "parameter_359" +class Program_weight_tensor_parameter_779: + name = "parameter_779" shape = [64] dtype = "float32" min_val = float("0") @@ -3866,8 +8472,8 @@ class Program_weight_tensor_parameter_359: data = None -class Program_weight_tensor_parameter_360: - name = "parameter_360" +class Program_weight_tensor_parameter_780: + name = "parameter_780" shape = [64] dtype = "float32" min_val = float("0") @@ -3875,8 +8481,8 @@ class Program_weight_tensor_parameter_360: data = None -class Program_weight_tensor_parameter_361: - name = "parameter_361" +class Program_weight_tensor_parameter_781: + name = "parameter_781" shape = [64] dtype = "float32" min_val = float("0") @@ -3884,19 +8490,19 @@ class Program_weight_tensor_parameter_361: data = None -class Program_weight_tensor_parameter_362: - name = "parameter_362" +class Program_weight_tensor_parameter_782: + name = "parameter_782" shape = [64, 32, 3, 3] dtype = "float32" - min_val = float("-0.179264") - max_val = float("0.162144") - mean = float("-0.000679023") - std = float("0.020536") + min_val = float("-39.064") + max_val = float("47.84") + mean = float("0.203101") + std = float("4.43152") data = None -class Program_weight_tensor_parameter_363: - name = "parameter_363" +class Program_weight_tensor_parameter_783: + name = "parameter_783" shape = [32] dtype = "float32" min_val = float("0") @@ -3904,8 +8510,8 @@ class Program_weight_tensor_parameter_363: data = None -class Program_weight_tensor_parameter_364: - name = "parameter_364" +class Program_weight_tensor_parameter_784: + name = "parameter_784" shape = [32] dtype = "float32" min_val = float("0") @@ -3913,8 +8519,8 @@ class Program_weight_tensor_parameter_364: data = None -class Program_weight_tensor_parameter_365: - name = "parameter_365" +class Program_weight_tensor_parameter_785: + name = "parameter_785" shape = [32] dtype = "float32" min_val = float("0") @@ -3922,8 +8528,8 @@ class Program_weight_tensor_parameter_365: data = None -class Program_weight_tensor_parameter_366: - name = "parameter_366" +class Program_weight_tensor_parameter_786: + name = "parameter_786" shape = [32] dtype = "float32" min_val = float("0") @@ -3931,19 +8537,19 @@ class Program_weight_tensor_parameter_366: data = None -class Program_weight_tensor_parameter_367: - name = "parameter_367" +class Program_weight_tensor_parameter_787: + name = "parameter_787" shape = [32, 32, 3, 3] dtype = "float32" - min_val = float("-0.347786") - max_val = float("0.218964") - mean = float("-0.000199571") - std = float("0.0261033") + min_val = float("-18.7778") + max_val = float("17.8983") + mean = float("-0.451057") + std = float("2.25346") data = None -class Program_weight_tensor_parameter_368: - name = "parameter_368" +class Program_weight_tensor_parameter_788: + name = "parameter_788" shape = [32] dtype = "float32" min_val = float("0") @@ -3951,8 +8557,8 @@ class Program_weight_tensor_parameter_368: data = None -class Program_weight_tensor_parameter_369: - name = "parameter_369" +class Program_weight_tensor_parameter_789: + name = "parameter_789" shape = [32] dtype = "float32" min_val = float("0") @@ -3960,8 +8566,8 @@ class Program_weight_tensor_parameter_369: data = None -class Program_weight_tensor_parameter_370: - name = "parameter_370" +class Program_weight_tensor_parameter_790: + name = "parameter_790" shape = [32] dtype = "float32" min_val = float("0") @@ -3969,8 +8575,8 @@ class Program_weight_tensor_parameter_370: data = None -class Program_weight_tensor_parameter_371: - name = "parameter_371" +class Program_weight_tensor_parameter_791: + name = "parameter_791" shape = [32] dtype = "float32" min_val = float("0") @@ -3978,12 +8584,12 @@ class Program_weight_tensor_parameter_371: data = None -class Program_weight_tensor_parameter_372: - name = "parameter_372" +class Program_weight_tensor_parameter_792: + name = "parameter_792" shape = [32, 3, 3, 3] dtype = "float32" - min_val = float("-0.317155") - max_val = float("0.280865") - mean = float("-0.00214957") - std = float("0.0702742") + min_val = float("-8.84763") + max_val = float("9.81836") + mean = float("-0.13036") + std = float("2.17947") data = None diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/subgraph_5/graph_hash.txt b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/subgraph_5/graph_hash.txt index 0cf23b344..d7d509a2f 100644 --- a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/subgraph_5/graph_hash.txt +++ b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/subgraph_5/graph_hash.txt @@ -1 +1 @@ -82564cb272bfe4052ec183285332a6999eb8f2e9097ea2f1faf3c1a1650939fa \ No newline at end of file +2474d5c0140e3ca8b342671eaefb285e6ee8c6c96b775962f701e40c7ee40211 \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/subgraph_5/input_meta.py b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/subgraph_5/input_meta.py index 765fadabf..f4ab533fd 100644 --- a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/subgraph_5/input_meta.py +++ b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/subgraph_5/input_meta.py @@ -1,45 +1,38 @@ class Program_weight_tensor_data_0: name = "data_0" - shape = [2, 3549, 10] + shape = [2, 3549] dtype = "float32" - min_val = float("1.49258e-10") - max_val = float("0.896854") - mean = float("0.00647097") - std = float("0.0257209") + max_val = float("26.0") + mean = float("0.0874894") + std = float("0.880638") data = None class Program_weight_tensor_data_1: name = "data_1" - shape = [2, 3549, 4] + shape = [2, 49, 3549] dtype = "float32" - min_val = float("-112.857") - max_val = float("515.47") - mean = float("207.765") - std = float("123.433") + max_val = float("0.980323") + mean = float("0.000678688") + std = float("0.0200074") data = None class Program_weight_tensor_data_2: name = "data_2" - shape = [3549, 2] + shape = [2, 49, 3549] dtype = "float32" - min_val = float("4.0") - max_val = float("412.0") - mean = float("208.0") - std = float("120.038") + max_val = float("1.0") + mean = float("0.0017855") + std = float("0.0422174") data = None class Program_weight_tensor_data_3: name = "data_3" - shape = [3549, 1] - dtype = "float32" - min_val = float("8.0") - max_val = float("32.0") - mean = float("10.6667") - std = float("5.70157") - data = None + shape = [2, 1] + dtype = "int32" + data = [0, 1] class Program_weight_tensor_data_4: @@ -63,9 +56,9 @@ class Program_weight_tensor_data_5: class Program_weight_tensor_data_6: name = "data_6" - shape = [2, 49, 1] + shape = [2, 49, 3549] dtype = "float32" - max_val = float("1.0") - mean = float("0.571429") - std = float("0.494872") + max_val = float("0.795764") + mean = float("4.47944e-05") + std = float("0.00452556") data = None diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/subgraph_5/model.py b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/subgraph_5/model.py index 2cdd157ba..2cc272861 100644 --- a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/subgraph_5/model.py +++ b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/subgraph_5/model.py @@ -7,498 +7,238 @@ def __init__(self): def forward(self, data_0, data_1, data_2, data_3, data_4, data_5, data_6): # pd_op.full_int_array: (1xi64) <- () - full_int_array_0 = [2] + full_int_array_0 = [1] - # pd_op.unsqueeze: (2x49x1x4xf32) <- (2x49x4xf32, 1xi64) - unsqueeze_0 = paddle._C_ops.unsqueeze(data_5, full_int_array_0) - del data_5 + # pd_op.unsqueeze: (2x1x3549xf32) <- (2x3549xf32, 1xi64) + unsqueeze_0 = paddle._C_ops.unsqueeze(data_0, full_int_array_0) + del data_0, full_int_array_0 - # pd_op.full_int_array: (1xi64) <- () - full_int_array_1 = [1] + # pd_op.full: (xf32) <- () + full_0 = paddle._C_ops.full( + [], float("1"), paddle.float32, paddle.framework._current_expected_place() + ) - # pd_op.unsqueeze: (2x1x3549x4xf32) <- (2x3549x4xf32, 1xi64) - unsqueeze_1 = paddle._C_ops.unsqueeze(data_1, full_int_array_1) - del data_1, full_int_array_1 + # pd_op.greater_than: (2x1x3549xb) <- (2x1x3549xf32, xf32) + greater_than_0 = paddle._C_ops.greater_than(unsqueeze_0, full_0) + del full_0, unsqueeze_0 - # pd_op.full_int_array: (1xi64) <- () - full_int_array_2 = [0] + # pd_op.full_int_array: (3xi64) <- () + full_int_array_1 = [1, 49, 1] - # pd_op.slice: (2x49x1x2xf32) <- (2x49x1x4xf32, 1xi64, 1xi64) - slice_0 = paddle._C_ops.slice( - unsqueeze_0, [3], full_int_array_2, full_int_array_0, [1], [] - ) + # pd_op.tile: (2x49x3549xb) <- (2x1x3549xb, 3xi64) + tile_0 = paddle._C_ops.tile(greater_than_0, full_int_array_1) + del full_int_array_1, greater_than_0 - # pd_op.full_int_array: (1xi64) <- () - full_int_array_3 = [2147483647] + # pd_op.multiply: (2x49x3549xf32) <- (2x49x3549xf32, 2x49x3549xf32) + multiply_1 = paddle._C_ops.multiply(data_1, data_2) - # pd_op.slice: (2x49x1x2xf32) <- (2x49x1x4xf32, 1xi64, 1xi64) - slice_1 = paddle._C_ops.slice( - unsqueeze_0, [3], full_int_array_0, full_int_array_3, [1], [] + # pd_op.full: (1xi64) <- () + full_1 = paddle._C_ops.full( + [1], float("-2"), paddle.int64, paddle.core.CPUPlace() ) - # pd_op.slice: (2x1x3549x2xf32) <- (2x1x3549x4xf32, 1xi64, 1xi64) - slice_2 = paddle._C_ops.slice( - unsqueeze_1, [3], full_int_array_2, full_int_array_0, [1], [] - ) - del full_int_array_2 + # pd_op.argmax: (2x3549xi64) <- (2x49x3549xf32, 1xi64) + argmax_0 = paddle._C_ops.argmax(multiply_1, full_1, False, False, paddle.int64) + del multiply_1 - # pd_op.slice: (2x1x3549x2xf32) <- (2x1x3549x4xf32, 1xi64, 1xi64) - slice_3 = paddle._C_ops.slice( - unsqueeze_1, [3], full_int_array_0, full_int_array_3, [1], [] + # pd_op.full: (1xi32) <- () + full_2 = paddle._C_ops.full( + [1], float("49"), paddle.int32, paddle.core.CPUPlace() ) - del full_int_array_0, full_int_array_3, unsqueeze_1 - - # pd_op.maximum: (2x49x3549x2xf32) <- (2x49x1x2xf32, 2x1x3549x2xf32) - maximum_0 = paddle._C_ops.maximum(slice_0, slice_2) - - # pd_op.minimum: (2x49x3549x2xf32) <- (2x49x1x2xf32, 2x1x3549x2xf32) - minimum_0 = paddle._C_ops.minimum(slice_1, slice_3) - # pd_op.subtract: (2x49x3549x2xf32) <- (2x49x3549x2xf32, 2x49x3549x2xf32) - subtract_0 = paddle._C_ops.subtract(minimum_0, maximum_0) - del maximum_0, minimum_0 - - # pd_op.full: (1xf32) <- () - full_0 = paddle._C_ops.full( - [1], float("0"), paddle.float32, paddle.core.CPUPlace() + # pd_op.one_hot: (2x3549x49xf32) <- (2x3549xi64, 1xi32) + one_hot_0 = paddle._C_ops.one_hot( + argmax_0 % paddle.cast(full_2, argmax_0.dtype), full_2 ) + del argmax_0, full_2 - # pd_op.full: (1xf32) <- () - full_1 = paddle._C_ops.full( - [1], float("3.40282e+38"), paddle.float32, paddle.core.CPUPlace() - ) + # pd_op.transpose: (2x49x3549xf32) <- (2x3549x49xf32) + transpose_0 = paddle._C_ops.transpose(one_hot_0, [0, 2, 1]) + del one_hot_0 - # pd_op.clip: (2x49x3549x2xf32) <- (2x49x3549x2xf32, 1xf32, 1xf32) - clip_0 = paddle._C_ops.clip(subtract_0, full_0, full_1) - del subtract_0 + # pd_op.where: (2x49x3549xf32) <- (2x49x3549xb, 2x49x3549xf32, 2x49x3549xf32) + where_0 = paddle._C_ops.where(tile_0, transpose_0, data_2) + del data_2, tile_0, transpose_0 # pd_op.full_int_array: (1xi64) <- () - full_int_array_4 = [-1] - - # pd_op.prod: (2x49x3549xf32) <- (2x49x3549x2xf32, 1xi64) - prod_0 = paddle._C_ops.prod(clip_0, full_int_array_4, False, False) - del clip_0 - - # pd_op.subtract: (2x49x1x2xf32) <- (2x49x1x2xf32, 2x49x1x2xf32) - subtract_1 = paddle._C_ops.subtract(slice_1, slice_0) - del slice_0, slice_1 - - # pd_op.clip: (2x49x1x2xf32) <- (2x49x1x2xf32, 1xf32, 1xf32) - clip_1 = paddle._C_ops.clip(subtract_1, full_0, full_1) - del subtract_1 - - # pd_op.prod: (2x49x1xf32) <- (2x49x1x2xf32, 1xi64) - prod_1 = paddle._C_ops.prod(clip_1, full_int_array_4, False, False) - del clip_1 + full_int_array_2 = [-2] - # pd_op.subtract: (2x1x3549x2xf32) <- (2x1x3549x2xf32, 2x1x3549x2xf32) - subtract_2 = paddle._C_ops.subtract(slice_3, slice_2) - del slice_2, slice_3 - - # pd_op.clip: (2x1x3549x2xf32) <- (2x1x3549x2xf32, 1xf32, 1xf32) - clip_2 = paddle._C_ops.clip(subtract_2, full_0, full_1) - del full_1, subtract_2 - - # pd_op.prod: (2x1x3549xf32) <- (2x1x3549x2xf32, 1xi64) - prod_2 = paddle._C_ops.prod(clip_2, full_int_array_4, False, False) - del clip_2 - - # pd_op.add: (2x49x3549xf32) <- (2x49x1xf32, 2x1x3549xf32) - add_0 = paddle._C_ops.add(prod_1, prod_2) - del prod_1, prod_2 + # pd_op.sum: (2x3549xf32) <- (2x49x3549xf32, 1xi64) + sum_0 = paddle._C_ops.sum(where_0, full_int_array_2, None, False) - # pd_op.subtract: (2x49x3549xf32) <- (2x49x3549xf32, 2x49x3549xf32) - subtract_3 = paddle._C_ops.subtract(add_0, prod_0) - del add_0 + # pd_op.argmax: (2x3549xi64) <- (2x49x3549xf32, 1xi64) + argmax_1 = paddle._C_ops.argmax(where_0, full_1, False, False, paddle.int64) + del full_1 # pd_op.full: (1xf32) <- () - full_2 = paddle._C_ops.full( - [1], float("1"), paddle.float32, paddle.core.CPUPlace() - ) - - # pd_op.scale: (2x49x3549xf32) <- (2x49x3549xf32, 1xf32) - scale_0 = paddle._C_ops.scale(subtract_3, full_2, float("1e-09"), True) - del subtract_3 - - # pd_op.divide: (2x49x3549xf32) <- (2x49x3549xf32, 2x49x3549xf32) - divide_0 = paddle._C_ops.divide(prod_0, scale_0) - del prod_0, scale_0 - - # pd_op.transpose: (2x10x3549xf32) <- (2x3549x10xf32) - transpose_0 = paddle._C_ops.transpose(data_0, [0, 2, 1]) - del data_0 - - # pd_op.full: (1xf64) <- () full_3 = paddle._C_ops.full( - [1], float("0"), paddle.float64, paddle.core.CPUPlace() - ) - - # pd_op.full: (1xf64) <- () - full_4 = paddle._C_ops.full( - [1], float("2"), paddle.float64, paddle.core.CPUPlace() - ) - - # pd_op.full: (1xf64) <- () - full_5 = paddle._C_ops.full( - [1], float("1"), paddle.float64, paddle.core.CPUPlace() + [1], float("49"), paddle.float32, paddle.core.CPUPlace() ) - # pd_op.arange: (2xi32) <- (1xf64, 1xf64, 1xf64) - arange_0 = paddle.arange(full_3, full_4, full_5, dtype="int32") - del full_3, full_4, full_5 + # pd_op.scale: (2x1xi32) <- (2x1xi32, 1xf32) + scale_0 = paddle._C_ops.scale(data_3, full_3, float("0"), True) + del data_3, full_3 - # pd_op.unsqueeze: (2x1xi32) <- (2xi32, 1xi64) - unsqueeze_2 = paddle._C_ops.unsqueeze(arange_0, full_int_array_4) - del arange_0 + # pd_op.cast: (2x1xi64) <- (2x1xi32) + cast_0 = paddle._C_ops.cast(scale_0, paddle.int64) + del scale_0 - # pd_op.full_int_array: (2xi64) <- () - full_int_array_5 = [1, 49] - - # pd_op.tile: (2x49xi32) <- (2x1xi32, 2xi64) - tile_0 = paddle._C_ops.tile(unsqueeze_2, full_int_array_5) - del full_int_array_5 + # pd_op.add: (2x3549xi64) <- (2x3549xi64, 2x1xi64) + add_0 = paddle._C_ops.add(argmax_1, cast_0) + del argmax_1, cast_0 - # pd_op.squeeze: (2x49xi32) <- (2x49x1xi32, 1xi64) - squeeze_0 = paddle._C_ops.squeeze(data_4, full_int_array_4) + # pd_op.flatten: (98xi32) <- (2x49x1xi32) + flatten_0 = paddle._C_ops.flatten(data_4, 0, 2) del data_4 - # builtin.combine: ([2x49xi32, 2x49xi32]) <- (2x49xi32, 2x49xi32) - combine_0 = [tile_0, squeeze_0] - del squeeze_0, tile_0 - - # pd_op.stack: (2x49x2xi32) <- ([2x49xi32, 2x49xi32]) - stack_0 = paddle._C_ops.stack(combine_0, -1) - del combine_0 - - # pd_op.gather_nd: (2x49x3549xf32) <- (2x10x3549xf32, 2x49x2xi32) - gather_nd_0 = paddle._C_ops.gather_nd(transpose_0, stack_0) - del stack_0, transpose_0 - - # pd_op.pow: (2x49x3549xf32) <- (2x49x3549xf32) - pow_0 = paddle._C_ops.pow(gather_nd_0, float("1")) - del gather_nd_0 - - # pd_op.pow: (2x49x3549xf32) <- (2x49x3549xf32) - pow_1 = paddle._C_ops.pow(divide_0, float("6")) - - # pd_op.multiply: (2x49x3549xf32) <- (2x49x3549xf32, 2x49x3549xf32) - multiply_0 = paddle._C_ops.multiply(pow_0, pow_1) - del pow_0, pow_1 - - # pd_op.multiply: (2x49x3549xf32) <- (2x49x3549xf32, 2x49x1xf32) - multiply_1 = paddle._C_ops.multiply(multiply_0, data_6) - del multiply_0 - - # pd_op.scale: (3549x1xf32) <- (3549x1xf32, 1xf32) - scale_1 = paddle._C_ops.scale(data_3, full_2, float("0"), True) - del data_3, full_2 - - # pd_op.full_int_array: (2xi64) <- () - full_int_array_6 = [0, 1] - - # pd_op.unsqueeze: (1x1x3549x2xf32) <- (3549x2xf32, 2xi64) - unsqueeze_3 = paddle._C_ops.unsqueeze(data_2, full_int_array_6) - del data_2 - - # pd_op.full: (1xi32) <- () - full_6 = paddle._C_ops.full( - [1], float("3"), paddle.int32, paddle.core.CPUPlace() - ) - - # pd_op.split_with_num: ([1x1x3549x1xf32, 1x1x3549x1xf32]) <- (1x1x3549x2xf32, 1xi32) - split_with_num_0 = paddle._C_ops.split_with_num(unsqueeze_3, 2, full_6) - del unsqueeze_3 - - # builtin.split: (1x1x3549x1xf32, 1x1x3549x1xf32) <- ([1x1x3549x1xf32, 1x1x3549x1xf32]) - ( - split_0, - split_1, - ) = split_with_num_0 - del split_with_num_0 - - # pd_op.split_with_num: ([2x49x1x1xf32, 2x49x1x1xf32, 2x49x1x1xf32, 2x49x1x1xf32]) <- (2x49x1x4xf32, 1xi32) - split_with_num_1 = paddle._C_ops.split_with_num(unsqueeze_0, 4, full_6) - del full_6, unsqueeze_0 - - # builtin.split: (2x49x1x1xf32, 2x49x1x1xf32, 2x49x1x1xf32, 2x49x1x1xf32) <- ([2x49x1x1xf32, 2x49x1x1xf32, 2x49x1x1xf32, 2x49x1x1xf32]) - ( - split_2, - split_3, - split_4, - split_5, - ) = split_with_num_1 - del split_with_num_1 - - # pd_op.subtract: (2x49x3549x1xf32) <- (1x1x3549x1xf32, 2x49x1x1xf32) - subtract_4 = paddle._C_ops.subtract(split_0, split_2) - - # pd_op.subtract: (2x49x3549x1xf32) <- (1x1x3549x1xf32, 2x49x1x1xf32) - subtract_5 = paddle._C_ops.subtract(split_1, split_3) - - # pd_op.subtract: (2x49x3549x1xf32) <- (2x49x1x1xf32, 1x1x3549x1xf32) - subtract_6 = paddle._C_ops.subtract(split_4, split_0) - - # pd_op.subtract: (2x49x3549x1xf32) <- (2x49x1x1xf32, 1x1x3549x1xf32) - subtract_7 = paddle._C_ops.subtract(split_5, split_1) + # pd_op.flatten: (7098xi64) <- (2x3549xi64) + flatten_1 = paddle._C_ops.flatten(add_0, 0, 1) + del add_0 # pd_op.full: (1xi32) <- () - full_7 = paddle._C_ops.full( - [1], float("-1"), paddle.int32, paddle.core.CPUPlace() + full_4 = paddle._C_ops.full( + [1], float("0"), paddle.int32, paddle.core.CPUPlace() ) - # builtin.combine: ([2x49x3549x1xf32, 2x49x3549x1xf32, 2x49x3549x1xf32, 2x49x3549x1xf32]) <- (2x49x3549x1xf32, 2x49x3549x1xf32, 2x49x3549x1xf32, 2x49x3549x1xf32) - combine_1 = [subtract_4, subtract_5, subtract_6, subtract_7] - del subtract_4, subtract_5, subtract_6, subtract_7 + # pd_op.gather: (7098xi32) <- (98xi32, 7098xi64, 1xi32) + gather_0 = paddle._C_ops.gather(flatten_0, flatten_1, full_4) + del flatten_0 - # pd_op.concat: (2x49x3549x4xf32) <- ([2x49x3549x1xf32, 2x49x3549x1xf32, 2x49x3549x1xf32, 2x49x3549x1xf32], 1xi32) - concat_0 = paddle._C_ops.concat(combine_1, full_7) - del combine_1 + # pd_op.full_int_array: (2xi64) <- () + full_int_array_3 = [2, 3549] - # pd_op.min: (2x49x3549xf32) <- (2x49x3549x4xf32, 1xi64) - min_0 = paddle._C_ops.min(concat_0, full_int_array_4, False) - del concat_0 + # pd_op.reshape: (2x3549xi32) <- (7098xi32, 2xi64) + reshape_1 = paddle._C_ops.reshape(gather_0, full_int_array_3) + del full_int_array_3, gather_0 # pd_op.full: (xf32) <- () - full_8 = paddle._C_ops.full( - [], - float("1e-09"), - paddle.float32, - paddle.framework._current_expected_place(), + full_5 = paddle._C_ops.full( + [], float("0"), paddle.float32, paddle.framework._current_expected_place() ) - # pd_op.greater_than: (2x49x3549xb) <- (2x49x3549xf32, xf32) - greater_than_1 = paddle._C_ops.greater_than(min_0, full_8) - del min_0 - - # pd_op.unsqueeze: (1x1x3549x1xf32) <- (3549x1xf32, 2xi64) - unsqueeze_4 = paddle._C_ops.unsqueeze(scale_1, full_int_array_6) - del full_int_array_6, scale_1 - - # pd_op.add: (2x49x1x1xf32) <- (2x49x1x1xf32, 2x49x1x1xf32) - add_1 = paddle._C_ops.add(split_2, split_4) - del split_2, split_4 + # pd_op.greater_than: (2x3549xb) <- (2x3549xf32, xf32) + greater_than_1 = paddle._C_ops.greater_than(sum_0, full_5) + del full_5, sum_0 # pd_op.full: (1xf32) <- () - full_9 = paddle._C_ops.full( - [1], float("0.5"), paddle.float32, paddle.core.CPUPlace() - ) - - # pd_op.scale: (2x49x1x1xf32) <- (2x49x1x1xf32, 1xf32) - scale_2 = paddle._C_ops.scale(add_1, full_9, float("0"), True) - del add_1 - - # pd_op.add: (2x49x1x1xf32) <- (2x49x1x1xf32, 2x49x1x1xf32) - add_2 = paddle._C_ops.add(split_3, split_5) - del split_3, split_5 - - # pd_op.scale: (2x49x1x1xf32) <- (2x49x1x1xf32, 1xf32) - scale_3 = paddle._C_ops.scale(add_2, full_9, float("0"), True) - del add_2, full_9 - - # pd_op.subtract: (2x49x3549x1xf32) <- (2x49x1x1xf32, 1x1x3549x1xf32) - subtract_8 = paddle._C_ops.subtract(scale_2, unsqueeze_4) - - # pd_op.subtract: (2x49x3549x1xf32) <- (1x1x3549x1xf32, 2x49x3549x1xf32) - subtract_9 = paddle._C_ops.subtract(split_0, subtract_8) - del subtract_8 - - # pd_op.subtract: (2x49x3549x1xf32) <- (2x49x1x1xf32, 1x1x3549x1xf32) - subtract_10 = paddle._C_ops.subtract(scale_3, unsqueeze_4) - - # pd_op.subtract: (2x49x3549x1xf32) <- (1x1x3549x1xf32, 2x49x3549x1xf32) - subtract_11 = paddle._C_ops.subtract(split_1, subtract_10) - del subtract_10 - - # pd_op.add: (2x49x3549x1xf32) <- (2x49x1x1xf32, 1x1x3549x1xf32) - add_3 = paddle._C_ops.add(scale_2, unsqueeze_4) - del scale_2 - - # pd_op.subtract: (2x49x3549x1xf32) <- (2x49x3549x1xf32, 1x1x3549x1xf32) - subtract_12 = paddle._C_ops.subtract(add_3, split_0) - del add_3, split_0 - - # pd_op.add: (2x49x3549x1xf32) <- (2x49x1x1xf32, 1x1x3549x1xf32) - add_4 = paddle._C_ops.add(scale_3, unsqueeze_4) - del scale_3, unsqueeze_4 - - # pd_op.subtract: (2x49x3549x1xf32) <- (2x49x3549x1xf32, 1x1x3549x1xf32) - subtract_13 = paddle._C_ops.subtract(add_4, split_1) - del add_4, split_1 - - # builtin.combine: ([2x49x3549x1xf32, 2x49x3549x1xf32, 2x49x3549x1xf32, 2x49x3549x1xf32]) <- (2x49x3549x1xf32, 2x49x3549x1xf32, 2x49x3549x1xf32, 2x49x3549x1xf32) - combine_2 = [subtract_9, subtract_11, subtract_12, subtract_13] - del subtract_11, subtract_12, subtract_13, subtract_9 - - # pd_op.concat: (2x49x3549x4xf32) <- ([2x49x3549x1xf32, 2x49x3549x1xf32, 2x49x3549x1xf32, 2x49x3549x1xf32], 1xi32) - concat_1 = paddle._C_ops.concat(combine_2, full_7) - del combine_2, full_7 - - # pd_op.min: (2x49x3549xf32) <- (2x49x3549x4xf32, 1xi64) - min_1 = paddle._C_ops.min(concat_1, full_int_array_4, False) - del concat_1 - - # pd_op.greater_than: (2x49x3549xb) <- (2x49x3549xf32, xf32) - greater_than_2 = paddle._C_ops.greater_than(min_1, full_8) - del full_8, min_1 - - # pd_op.cast: (2x49x3549xf32) <- (2x49x3549xb) - cast_0 = paddle._C_ops.cast(greater_than_1, paddle.float32) - del greater_than_1 - - # pd_op.cast: (2x49x3549xf32) <- (2x49x3549xb) - cast_1 = paddle._C_ops.cast(greater_than_2, paddle.float32) - del greater_than_2 - - # pd_op.multiply: (2x49x3549xf32) <- (2x49x3549xf32, 2x49x1xf32) - multiply_2 = paddle._C_ops.multiply(cast_0, data_6) - del cast_0 - - # pd_op.multiply: (2x49x3549xf32) <- (2x49x3549xf32, 2x49x1xf32) - multiply_3 = paddle._C_ops.multiply(cast_1, data_6) - del cast_1 - - # pd_op.sum: (2x49x1xf32) <- (2x49x3549xf32, 1xi64) - sum_0 = paddle._C_ops.sum(multiply_2, full_int_array_4, None, True) - del full_int_array_4 - - # pd_op.full: (xf32) <- () - full_10 = paddle._C_ops.full( - [], float("0"), paddle.float32, paddle.framework._current_expected_place() + full_6 = paddle._C_ops.full( + [1], float("10"), paddle.float32, paddle.core.CPUPlace() ) - # pd_op.equal: (2x49x1xb) <- (2x49x1xf32, xf32) - equal_0 = paddle._C_ops.equal(sum_0, full_10) - del sum_0 - - # pd_op.add: (2x49x3549xf32) <- (2x49x3549xf32, 2x49x3549xf32) - add_5 = paddle._C_ops.add(multiply_1, multiply_3) - - # pd_op.full_like: (2x49x3549xf32) <- (2x49x3549xf32, 1xf32) + # pd_op.full_like: (2x3549xi32) <- (2x3549xi32, 1xf32) full_like_0 = paddle._C_ops.full_like( - add_5, full_0, paddle.float32, paddle.framework._current_expected_place() + reshape_1, full_6, paddle.int32, paddle.framework._current_expected_place() ) + del full_6 - # pd_op.full_like: (2x49x3549xf32) <- (2x49x3549xf32, 1xf32) - full_like_1 = paddle._C_ops.full_like( - multiply_1, - full_0, - paddle.float32, - paddle.framework._current_expected_place(), - ) - - # pd_op.full_like: (2x49x1xb) <- (2x49x1xb, 1xf32) - full_like_2 = paddle._C_ops.full_like( - equal_0, full_0, paddle.bool, paddle.framework._current_expected_place() - ) - del full_0 - - # pd_op.cast: (2x49x1xf32) <- (2x49x1xb) - cast_2 = paddle._C_ops.cast(full_like_2, paddle.float32) - del full_like_2 - - # pd_op.cast: (2x49x1xf32) <- (2x49x1xb) - cast_3 = paddle._C_ops.cast(equal_0, paddle.float32) - del equal_0 + # pd_op.where: (2x3549xi32) <- (2x3549xb, 2x3549xi32, 2x3549xi32) + where_1 = paddle._C_ops.where(greater_than_1, reshape_1, full_like_0) + del full_like_0, greater_than_1, reshape_1 - # pd_op.add: (2x49x3549xf32) <- (2x49x3549xf32, 2x49x3549xf32) - add_6 = paddle._C_ops.add(full_like_0, full_like_1) - del full_like_0, full_like_1 - - # pd_op.add: (2x49x3549xf32) <- (2x49x3549xf32, 2x49x1xf32) - add_7 = paddle._C_ops.add(add_6, cast_2) - del add_6, cast_2 + # pd_op.full_int_array: (2xi64) <- () + full_int_array_4 = [-1, 4] - # pd_op.add: (2x49x3549xf32) <- (2x49x3549xf32, 2x49x3549xf32) - add_8 = paddle._C_ops.add(add_5, add_7) - del add_5 + # pd_op.reshape: (98x4xf32) <- (2x49x4xf32, 2xi64) + reshape_2 = paddle._C_ops.reshape(data_5, full_int_array_4) + del data_5, full_int_array_4 - # pd_op.add: (2x49x3549xf32) <- (2x49x3549xf32, 2x49x3549xf32) - add_9 = paddle._C_ops.add(multiply_1, add_7) + # pd_op.gather: (7098x4xf32) <- (98x4xf32, 7098xi64, 1xi32) + gather_1 = paddle._C_ops.gather(reshape_2, flatten_1, full_4) + del flatten_1, full_4, reshape_2 - # pd_op.add: (2x49x3549xf32) <- (2x49x1xf32, 2x49x3549xf32) - add_10 = paddle._C_ops.add(cast_3, add_7) - del add_7, cast_3 + # pd_op.full_int_array: (3xi64) <- () + full_int_array_5 = [2, 3549, 4] - # pd_op.cast: (2x49x3549xb) <- (2x49x3549xf32) - cast_4 = paddle._C_ops.cast(add_10, paddle.bool) - del add_10 - - # pd_op.where: (2x49x3549xf32) <- (2x49x3549xb, 2x49x3549xf32, 2x49x3549xf32) - where_0 = paddle._C_ops.where(cast_4, add_8, add_9) - del add_8, add_9, cast_4 + # pd_op.reshape: (2x3549x4xf32) <- (7098x4xf32, 3xi64) + reshape_0 = paddle._C_ops.reshape(gather_1, full_int_array_5) + del full_int_array_5, gather_1 # pd_op.full: (1xi32) <- () - full_11 = paddle._C_ops.full( - [1], float("13"), paddle.int32, paddle.core.CPUPlace() + full_7 = paddle._C_ops.full( + [1], float("11"), paddle.int32, paddle.core.CPUPlace() ) - # pd_op.topk: (2x49x13xf32, 2x49x13xi64) <- (2x49x3549xf32, 1xi32) - topk_0, topk_1 = (lambda x, f: f(x))( - paddle._C_ops.topk(where_0, full_11, -1, True, True), - lambda out: out if isinstance(out, (list, tuple)) else (out, None), + # pd_op.one_hot: (2x3549x11xf32) <- (2x3549xi32, 1xi32) + one_hot_1 = paddle._C_ops.one_hot( + where_1 % paddle.cast(full_7, where_1.dtype), full_7 ) - del full_11, where_0 + del full_7 - # pd_op.full: (1xi32) <- () - full_12 = paddle._C_ops.full( - [1], float("3549"), paddle.int32, paddle.core.CPUPlace() + # pd_op.full: (10xi64) <- () + full_8 = paddle._C_ops.full( + [10], float("0"), paddle.int64, paddle.framework._current_expected_place() + ) + + # pd_op.assign_value_: (10xi64) <- (10xi64) + assign_value__0 = paddle._C_ops.assign_value_( + full_8, + [10], + paddle.int64, + [ + float("0"), + float("1"), + float("2"), + float("3"), + float("4"), + float("5"), + float("6"), + float("7"), + float("8"), + float("9"), + ], + paddle.framework._current_expected_place(), ) + del full_8 - # pd_op.one_hot: (2x49x13x3549xf32) <- (2x49x13xi64, 1xi32) - one_hot_0 = paddle._C_ops.one_hot( - topk_1 % paddle.cast(full_12, topk_1.dtype), full_12 - ) - del full_12, topk_1 + # pd_op.index_select: (2x3549x10xf32) <- (2x3549x11xf32, 10xi64) + index_select_0 = paddle._C_ops.index_select(one_hot_1, assign_value__0, -1) + del assign_value__0, one_hot_1 + + # pd_op.multiply: (2x49x3549xf32) <- (2x49x3549xf32, 2x49x3549xf32) + multiply_2 = paddle._C_ops.multiply(data_6, where_0) + del data_6 # pd_op.full_int_array: (1xi64) <- () - full_int_array_7 = [-2] + full_int_array_6 = [-1] - # pd_op.sum: (2x49x3549xf32) <- (2x49x13x3549xf32, 1xi64) - sum_1 = paddle._C_ops.sum(one_hot_0, full_int_array_7, None, False) - del one_hot_0 + # pd_op.max: (2x49x1xf32) <- (2x49x3549xf32, 1xi64) + max_0 = paddle._C_ops.max(multiply_2, full_int_array_6, True) - # pd_op.multiply: (2x49x3549xf32) <- (2x49x3549xf32, 2x49x1xf32) - multiply_4 = paddle._C_ops.multiply(sum_1, data_6) - del data_6, sum_1 + # pd_op.multiply: (2x49x3549xf32) <- (2x49x3549xf32, 2x49x3549xf32) + multiply_3 = paddle._C_ops.multiply(data_1, where_0) + del data_1, where_0 - # pd_op.greater_than: (2x49x3549xb) <- (2x49x3549xf32, xf32) - greater_than_3 = paddle._C_ops.greater_than(multiply_3, full_10) + # pd_op.max: (2x49x1xf32) <- (2x49x3549xf32, 1xi64) + max_1 = paddle._C_ops.max(multiply_3, full_int_array_6, True) del multiply_3 - # pd_op.greater_than: (2x49x3549xb) <- (2x49x3549xf32, xf32) - greater_than_4 = paddle._C_ops.greater_than(multiply_2, full_10) - del full_10, multiply_2 - - # pd_op.bitwise_or: (2x49x3549xb) <- (2x49x3549xb, 2x49x3549xb) - bitwise_or_0 = paddle._C_ops.bitwise_or(greater_than_3, greater_than_4) - del greater_than_3, greater_than_4 - - # pd_op.cast: (2x49x3549xf32) <- (2x49x3549xb) - cast_5 = paddle._C_ops.cast(bitwise_or_0, paddle.float32) - del bitwise_or_0 + # pd_op.full: (1xf32) <- () + full_9 = paddle._C_ops.full( + [1], float("1"), paddle.float32, paddle.core.CPUPlace() + ) - # pd_op.multiply: (2x49x3549xf32) <- (2x49x3549xf32, 2x49x3549xf32) - multiply_5 = paddle._C_ops.multiply(multiply_4, cast_5) - del cast_5, multiply_4 + # pd_op.scale: (2x49x1xf32) <- (2x49x1xf32, 1xf32) + scale_1 = paddle._C_ops.scale(max_0, full_9, float("1e-09"), True) + del full_9, max_0 - # pd_op.sum: (2x3549xf32) <- (2x49x3549xf32, 1xi64) - sum_2 = paddle._C_ops.sum(multiply_5, full_int_array_7, None, False) - del full_int_array_7 + # pd_op.divide: (2x49x3549xf32) <- (2x49x3549xf32, 2x49x1xf32) + divide_0 = paddle._C_ops.divide(multiply_2, scale_1) + del multiply_2, scale_1 - # pd_op.full_int_array: (0xi64) <- () - full_int_array_8 = [] + # pd_op.multiply: (2x49x3549xf32) <- (2x49x3549xf32, 2x49x1xf32) + multiply_4 = paddle._C_ops.multiply(divide_0, max_1) + del divide_0, max_1 - # pd_op.max: (xf32) <- (2x3549xf32, 0xi64) - max_0 = paddle._C_ops.max(sum_2, full_int_array_8, False) - del full_int_array_8 + # pd_op.max: (2x3549xf32) <- (2x49x3549xf32, 1xi64) + max_2 = paddle._C_ops.max(multiply_4, full_int_array_2, False) + del full_int_array_2, multiply_4 - # pd_op.full: (xf32) <- () - full_13 = paddle._C_ops.full( - [], float("1"), paddle.float32, paddle.framework._current_expected_place() - ) + # pd_op.unsqueeze: (2x3549x1xf32) <- (2x3549xf32, 1xi64) + unsqueeze_1 = paddle._C_ops.unsqueeze(max_2, full_int_array_6) + del full_int_array_6, max_2 - # pd_op.greater_than: (xb) <- (xf32, xf32) - greater_than_0 = paddle._C_ops.greater_than(max_0, full_13) - del divide_0, full_13, max_0, multiply_1, multiply_5, sum_2, unsqueeze_2 + # pd_op.multiply: (2x3549x10xf32) <- (2x3549x10xf32, 2x3549x1xf32) + multiply_0 = paddle._C_ops.multiply(index_select_0, unsqueeze_1) + del index_select_0, unsqueeze_1, where_1 - return greater_than_0 + return reshape_0, multiply_0 diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/subgraph_6/graph_hash.txt b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/subgraph_6/graph_hash.txt index a11a69efc..ef13ddd7e 100644 --- a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/subgraph_6/graph_hash.txt +++ b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/subgraph_6/graph_hash.txt @@ -1 +1 @@ -e51621c1344d3da65f6a657749e919a558ba3fbedad58e6e8c7e6f48e6194981 \ No newline at end of file +5a0efce50442936c174003f6338fd05f5366aa6cf630787a35d4c3d9fd30bc22 \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/subgraph_6/input_meta.py b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/subgraph_6/input_meta.py index 6041bc113..38a68a034 100644 --- a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/subgraph_6/input_meta.py +++ b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/subgraph_6/input_meta.py @@ -1,67 +1,222 @@ class Program_weight_tensor_data_0: name = "data_0" - shape = [2, 3549] - dtype = "bool" - min_val = 0 - max_val = 2 - data = None + shape = [1] + dtype = "float32" + data = [0.699884] class Program_weight_tensor_data_1: name = "data_1" - shape = [2, 3549, 4] + shape = [1] dtype = "float32" - min_val = float("-3.52679") - max_val = float("54.3718") - mean = float("22.566") - std = float("15.0427") - data = None + data = [0.667963] class Program_weight_tensor_data_2: name = "data_2" - shape = [2, 3549, 4] + shape = [1] dtype = "float32" - max_val = float("51.0602") - mean = float("19.8807") - std = float("16.6824") - data = None + data = [0.675792] class Program_weight_tensor_data_3: name = "data_3" - shape = [2, 3549, 10] + shape = [1] dtype = "float32" - max_val = float("0.980323") - mean = float("0.000730272") - std = float("0.020635") - data = None + data = [0.676071] class Program_weight_tensor_data_4: name = "data_4" - shape = [] + shape = [1] dtype = "float32" - data = [51.8347] + data = [0.658719] class Program_weight_tensor_data_5: name = "data_5" - shape = [2, 3549, 40] + shape = [1] dtype = "float32" - min_val = float("-11.2006") - max_val = float("19.6674") - mean = float("0.798417") - std = float("2.05193") - data = None + data = [0.620637] class Program_weight_tensor_data_6: name = "data_6" - shape = [3549, 2] + shape = [1] + dtype = "float32" + data = [0.637685] + + +class Program_weight_tensor_data_7: + name = "data_7" + shape = [1] + dtype = "float32" + data = [0.619238] + + +class Program_weight_tensor_data_8: + name = "data_8" + shape = [1] + dtype = "float32" + data = [0.773168] + + +class Program_weight_tensor_data_9: + name = "data_9" + shape = [1] + dtype = "float32" + data = [0.635316] + + +class Program_weight_tensor_data_10: + name = "data_10" + shape = [1] + dtype = "float32" + data = [0.623672] + + +class Program_weight_tensor_data_11: + name = "data_11" + shape = [1] + dtype = "float32" + data = [0.620323] + + +class Program_weight_tensor_data_12: + name = "data_12" + shape = [1] + dtype = "float32" + data = [0.621219] + + +class Program_weight_tensor_data_13: + name = "data_13" + shape = [1] + dtype = "float32" + data = [0.624329] + + +class Program_weight_tensor_data_14: + name = "data_14" + shape = [1] + dtype = "float32" + data = [0.733117] + + +class Program_weight_tensor_data_15: + name = "data_15" + shape = [1] + dtype = "float32" + data = [0.557224] + + +class Program_weight_tensor_data_16: + name = "data_16" + shape = [1] + dtype = "float32" + data = [0.579909] + + +class Program_weight_tensor_data_17: + name = "data_17" + shape = [1] + dtype = "float32" + data = [0.70327] + + +class Program_weight_tensor_data_18: + name = "data_18" + shape = [1024, 3072] + dtype = "float32" + min_val = float("-0.0319247") + max_val = float("0.0317725") + mean = float("-5.10487e-06") + std = float("0.0176379") + data = None + + +class Program_weight_tensor_data_19: + name = "data_19" + shape = [3072] + dtype = "float32" + min_val = float("-0.000610453") + max_val = float("0.000772214") + mean = float("-4.76504e-06") + std = float("0.000159152") + data = None + + +class Program_weight_tensor_data_20: + name = "data_20" + shape = [1024, 3072] + dtype = "float32" + min_val = float("-0.0310115") + max_val = float("0.0310403") + mean = float("-4.13224e-06") + std = float("0.0176369") + data = None + + +class Program_weight_tensor_data_21: + name = "data_21" + shape = [3072] + dtype = "float32" + min_val = float("-0.000442001") + max_val = float("0.0003857") + mean = float("-8.27006e-07") + std = float("0.000101626") + data = None + + +class Program_weight_tensor_data_22: + name = "data_22" + shape = [1024, 3072] + dtype = "float32" + min_val = float("-0.0310211") + max_val = float("0.0309302") + mean = float("-4.07149e-06") + std = float("0.0176364") + data = None + + +class Program_weight_tensor_data_23: + name = "data_23" + shape = [3072] + dtype = "float32" + min_val = float("-0.00027446") + max_val = float("0.000292125") + mean = float("-1.05896e-07") + std = float("6.90506e-05") + data = None + + +class Program_weight_tensor_data_24: + name = "data_24" + shape = [1024, 3072] + dtype = "float32" + min_val = float("-0.030934") + max_val = float("0.0309738") + mean = float("-4.04736e-06") + std = float("0.017636") + data = None + + +class Program_weight_tensor_data_25: + name = "data_25" + shape = [3072] + dtype = "float32" + min_val = float("-0.000277695") + max_val = float("0.000255924") + mean = float("3.36681e-07") + std = float("5.71105e-05") + data = None + + +class Program_weight_tensor_data_26: + name = "data_26" + shape = [2, 3, 416, 416] dtype = "float32" - min_val = float("0.5") - max_val = float("51.5") - mean = float("22.5952") - std = float("14.8898") + max_val = float("1.0") + mean = float("0.333385") + std = float("0.180644") data = None diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/subgraph_6/model.py b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/subgraph_6/model.py index e520d1038..0e4271fc6 100644 --- a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/subgraph_6/model.py +++ b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/subgraph_6/model.py @@ -5,510 +5,8870 @@ class GraphModule(paddle.nn.Layer): def __init__(self): super().__init__() - def forward(self, data_0, data_1, data_2, data_3, data_4, data_5, data_6): - # pd_op.cast: (2x3549xi32) <- (2x3549xb) - cast_0 = paddle._C_ops.cast(data_0, paddle.int32) + def forward( + self, + parameter_0, + parameter_1, + parameter_2, + parameter_3, + parameter_4, + parameter_5, + parameter_6, + parameter_7, + parameter_8, + parameter_9, + parameter_10, + parameter_11, + parameter_12, + parameter_13, + parameter_14, + parameter_15, + parameter_16, + parameter_17, + parameter_18, + parameter_19, + parameter_20, + parameter_21, + parameter_22, + parameter_23, + parameter_24, + parameter_25, + parameter_26, + parameter_27, + parameter_28, + parameter_29, + parameter_30, + parameter_31, + parameter_32, + parameter_33, + parameter_34, + parameter_35, + parameter_36, + parameter_37, + parameter_38, + parameter_39, + parameter_40, + parameter_41, + parameter_42, + parameter_43, + parameter_44, + parameter_45, + parameter_46, + parameter_47, + parameter_48, + parameter_49, + parameter_50, + parameter_51, + parameter_52, + parameter_53, + parameter_54, + parameter_55, + parameter_56, + parameter_57, + parameter_58, + parameter_59, + parameter_60, + parameter_61, + parameter_62, + parameter_63, + parameter_64, + parameter_65, + parameter_66, + parameter_67, + parameter_68, + parameter_69, + parameter_70, + parameter_71, + parameter_72, + parameter_73, + parameter_74, + parameter_75, + parameter_76, + parameter_77, + parameter_78, + parameter_79, + parameter_80, + parameter_81, + parameter_82, + parameter_83, + parameter_84, + parameter_85, + parameter_86, + parameter_87, + parameter_88, + parameter_89, + parameter_90, + parameter_91, + parameter_92, + parameter_93, + parameter_94, + parameter_95, + parameter_96, + parameter_97, + parameter_98, + parameter_99, + parameter_100, + parameter_101, + parameter_102, + parameter_103, + parameter_104, + parameter_105, + parameter_106, + parameter_107, + parameter_108, + parameter_109, + parameter_110, + parameter_111, + parameter_112, + parameter_113, + parameter_114, + parameter_115, + parameter_116, + parameter_117, + parameter_118, + parameter_119, + parameter_120, + parameter_121, + parameter_122, + parameter_123, + parameter_124, + parameter_125, + parameter_126, + parameter_127, + parameter_128, + parameter_129, + parameter_130, + parameter_131, + parameter_132, + parameter_133, + parameter_134, + parameter_135, + parameter_136, + parameter_137, + parameter_138, + parameter_139, + parameter_140, + parameter_141, + parameter_142, + parameter_143, + parameter_144, + parameter_145, + parameter_146, + parameter_147, + parameter_148, + parameter_149, + parameter_150, + parameter_151, + parameter_152, + parameter_153, + parameter_154, + parameter_155, + parameter_156, + parameter_157, + parameter_158, + parameter_159, + parameter_160, + parameter_161, + parameter_162, + parameter_163, + parameter_164, + parameter_165, + parameter_166, + parameter_167, + parameter_168, + parameter_169, + parameter_170, + parameter_171, + parameter_172, + parameter_173, + parameter_174, + parameter_175, + parameter_176, + parameter_177, + parameter_178, + parameter_179, + parameter_180, + parameter_181, + parameter_182, + parameter_183, + parameter_184, + parameter_185, + parameter_186, + parameter_187, + parameter_188, + parameter_189, + parameter_190, + parameter_191, + parameter_192, + parameter_193, + parameter_194, + parameter_195, + parameter_196, + parameter_197, + parameter_198, + parameter_199, + parameter_200, + parameter_201, + parameter_202, + parameter_203, + parameter_204, + parameter_205, + parameter_206, + parameter_207, + parameter_208, + parameter_209, + parameter_210, + parameter_211, + parameter_212, + parameter_213, + parameter_214, + parameter_215, + parameter_216, + parameter_217, + parameter_218, + parameter_219, + parameter_220, + parameter_221, + parameter_222, + parameter_223, + parameter_224, + parameter_225, + parameter_226, + parameter_227, + parameter_228, + parameter_229, + parameter_230, + parameter_231, + parameter_232, + parameter_233, + parameter_234, + parameter_235, + parameter_236, + parameter_237, + parameter_238, + parameter_239, + parameter_240, + parameter_241, + parameter_242, + parameter_243, + parameter_244, + parameter_245, + parameter_246, + parameter_247, + parameter_248, + parameter_249, + parameter_250, + parameter_251, + parameter_252, + parameter_253, + parameter_254, + parameter_255, + parameter_256, + parameter_257, + parameter_258, + parameter_259, + parameter_260, + parameter_261, + parameter_262, + parameter_263, + parameter_264, + parameter_265, + parameter_266, + parameter_267, + parameter_268, + parameter_269, + parameter_270, + parameter_271, + parameter_272, + parameter_273, + parameter_274, + parameter_275, + parameter_276, + parameter_277, + parameter_278, + parameter_279, + parameter_280, + parameter_281, + parameter_282, + parameter_283, + parameter_284, + parameter_285, + parameter_286, + parameter_287, + parameter_288, + parameter_289, + parameter_290, + parameter_291, + parameter_292, + parameter_293, + parameter_294, + parameter_295, + parameter_296, + parameter_297, + parameter_298, + parameter_299, + parameter_300, + parameter_301, + parameter_302, + parameter_303, + parameter_304, + parameter_305, + parameter_306, + parameter_307, + parameter_308, + parameter_309, + parameter_310, + parameter_311, + parameter_312, + parameter_313, + parameter_314, + parameter_315, + parameter_316, + parameter_317, + parameter_318, + parameter_319, + parameter_320, + parameter_321, + parameter_322, + parameter_323, + parameter_324, + parameter_325, + parameter_326, + parameter_327, + parameter_328, + parameter_329, + parameter_330, + parameter_331, + parameter_332, + parameter_333, + parameter_334, + parameter_335, + parameter_336, + parameter_337, + parameter_338, + parameter_339, + parameter_340, + parameter_341, + parameter_342, + parameter_343, + parameter_344, + parameter_345, + parameter_346, + parameter_347, + parameter_348, + parameter_349, + parameter_350, + parameter_351, + parameter_352, + parameter_353, + parameter_354, + parameter_355, + parameter_356, + parameter_357, + parameter_358, + parameter_359, + parameter_360, + parameter_361, + parameter_362, + parameter_363, + parameter_364, + parameter_365, + parameter_366, + parameter_367, + parameter_368, + parameter_369, + parameter_370, + parameter_371, + parameter_372, + parameter_373, + parameter_374, + parameter_375, + parameter_376, + parameter_377, + parameter_378, + parameter_379, + parameter_380, + parameter_381, + parameter_382, + parameter_383, + parameter_384, + parameter_385, + parameter_386, + parameter_387, + parameter_388, + parameter_389, + parameter_390, + parameter_391, + parameter_392, + parameter_393, + parameter_394, + parameter_395, + parameter_396, + parameter_397, + parameter_398, + parameter_399, + parameter_400, + parameter_401, + parameter_402, + parameter_403, + parameter_404, + parameter_405, + parameter_406, + parameter_407, + parameter_408, + parameter_409, + parameter_410, + parameter_411, + parameter_412, + parameter_413, + parameter_414, + parameter_415, + parameter_416, + parameter_417, + parameter_418, + parameter_419, + parameter_420, + parameter_421, + parameter_422, + parameter_423, + parameter_424, + parameter_425, + parameter_426, + parameter_427, + parameter_428, + parameter_429, + parameter_430, + parameter_431, + parameter_432, + parameter_433, + parameter_434, + parameter_435, + parameter_436, + parameter_437, + parameter_438, + parameter_439, + parameter_440, + parameter_441, + parameter_442, + parameter_443, + parameter_444, + parameter_445, + parameter_446, + parameter_447, + parameter_448, + parameter_449, + parameter_450, + parameter_451, + parameter_452, + parameter_453, + parameter_454, + parameter_455, + parameter_456, + parameter_457, + parameter_458, + parameter_459, + parameter_460, + parameter_461, + parameter_462, + parameter_463, + parameter_464, + parameter_465, + parameter_466, + parameter_467, + parameter_468, + parameter_469, + parameter_470, + parameter_471, + parameter_472, + parameter_473, + parameter_474, + parameter_475, + parameter_476, + parameter_477, + parameter_478, + parameter_479, + parameter_480, + parameter_481, + parameter_482, + parameter_483, + parameter_484, + parameter_485, + parameter_486, + parameter_487, + parameter_488, + parameter_489, + parameter_490, + parameter_491, + parameter_492, + parameter_493, + parameter_494, + parameter_495, + parameter_496, + parameter_497, + parameter_498, + parameter_499, + parameter_500, + parameter_501, + parameter_502, + parameter_503, + parameter_504, + parameter_505, + parameter_506, + parameter_507, + parameter_508, + parameter_509, + parameter_510, + parameter_511, + parameter_512, + parameter_513, + parameter_514, + parameter_515, + parameter_516, + parameter_517, + parameter_518, + parameter_519, + parameter_520, + parameter_521, + parameter_522, + parameter_523, + parameter_524, + parameter_525, + parameter_526, + parameter_527, + parameter_528, + parameter_529, + parameter_530, + parameter_531, + parameter_532, + parameter_533, + parameter_534, + parameter_535, + parameter_536, + parameter_537, + parameter_538, + parameter_539, + parameter_540, + parameter_541, + parameter_542, + parameter_543, + parameter_544, + parameter_545, + parameter_546, + parameter_547, + parameter_548, + parameter_549, + parameter_550, + parameter_551, + parameter_552, + parameter_553, + parameter_554, + parameter_555, + parameter_556, + parameter_557, + parameter_558, + parameter_559, + parameter_560, + parameter_561, + parameter_562, + parameter_563, + parameter_564, + parameter_565, + parameter_566, + parameter_567, + parameter_568, + parameter_569, + parameter_570, + parameter_571, + parameter_572, + parameter_573, + parameter_574, + parameter_575, + parameter_576, + parameter_577, + parameter_578, + parameter_579, + parameter_580, + parameter_581, + parameter_582, + parameter_583, + parameter_584, + parameter_585, + parameter_586, + parameter_587, + parameter_588, + parameter_589, + parameter_590, + parameter_591, + parameter_592, + parameter_593, + parameter_594, + parameter_595, + parameter_596, + parameter_597, + parameter_598, + parameter_599, + parameter_600, + parameter_601, + parameter_602, + parameter_603, + parameter_604, + parameter_605, + parameter_606, + parameter_607, + parameter_608, + parameter_609, + parameter_610, + parameter_611, + parameter_612, + parameter_613, + parameter_614, + parameter_615, + parameter_616, + parameter_617, + parameter_618, + parameter_619, + parameter_620, + parameter_621, + parameter_622, + parameter_623, + parameter_624, + parameter_625, + parameter_626, + parameter_627, + parameter_628, + parameter_629, + parameter_630, + parameter_631, + parameter_632, + parameter_633, + parameter_634, + parameter_635, + parameter_636, + parameter_637, + parameter_638, + parameter_639, + parameter_640, + parameter_641, + parameter_642, + parameter_643, + parameter_644, + parameter_645, + parameter_646, + parameter_647, + parameter_648, + parameter_649, + parameter_650, + parameter_651, + parameter_652, + parameter_653, + parameter_654, + parameter_655, + parameter_656, + parameter_657, + parameter_658, + parameter_659, + parameter_660, + parameter_661, + parameter_662, + parameter_663, + parameter_664, + parameter_665, + parameter_666, + parameter_667, + parameter_668, + parameter_669, + parameter_670, + parameter_671, + parameter_672, + parameter_673, + parameter_674, + parameter_675, + parameter_676, + parameter_677, + parameter_678, + parameter_679, + parameter_680, + parameter_681, + parameter_682, + parameter_683, + parameter_684, + parameter_685, + parameter_686, + parameter_687, + parameter_688, + parameter_689, + parameter_690, + parameter_691, + parameter_692, + parameter_693, + parameter_694, + parameter_695, + parameter_696, + parameter_697, + parameter_698, + parameter_699, + parameter_700, + parameter_701, + parameter_702, + parameter_703, + parameter_704, + parameter_705, + parameter_706, + parameter_707, + parameter_708, + parameter_709, + parameter_710, + parameter_711, + parameter_712, + parameter_713, + parameter_714, + parameter_715, + parameter_716, + parameter_717, + parameter_718, + parameter_719, + parameter_720, + parameter_721, + parameter_722, + parameter_723, + parameter_724, + parameter_725, + parameter_726, + parameter_727, + parameter_728, + parameter_729, + parameter_730, + parameter_731, + parameter_732, + parameter_733, + parameter_734, + parameter_735, + parameter_736, + parameter_737, + data_0, + data_1, + data_2, + data_3, + data_4, + data_5, + data_6, + data_7, + data_8, + data_9, + data_10, + data_11, + data_12, + data_13, + data_14, + data_15, + data_16, + data_17, + data_18, + data_19, + data_20, + data_21, + data_22, + data_23, + data_24, + data_25, + data_26, + ): + # pd_op.conv2d: (2x32x208x208xf32) <- (2x3x416x416xf32, 32x3x3x3xf32) + conv2d_0 = paddle._C_ops.conv2d( + data_26, parameter_737, [2, 2], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del data_26, parameter_737 + + # pd_op.batch_norm_: (2x32x208x208xf32, 32xf32, 32xf32, 32xf32, 32xf32, -1xui8) <- (2x32x208x208xf32, 32xf32, 32xf32, 32xf32, 32xf32) + ( + batch_norm__0, + batch_norm__1, + batch_norm__2, + batch_norm__3, + batch_norm__4, + batch_norm__5, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_0, + parameter_736, + parameter_735, + parameter_734, + parameter_733, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_733, parameter_734, parameter_735, parameter_736 + + # pd_op.swish: (2x32x208x208xf32) <- (2x32x208x208xf32) + swish_1 = paddle._C_ops.swish(batch_norm__0) + + # pd_op.conv2d: (2x32x208x208xf32) <- (2x32x208x208xf32, 32x32x3x3xf32) + conv2d_1 = paddle._C_ops.conv2d( + swish_1, parameter_732, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_732 + + # pd_op.batch_norm_: (2x32x208x208xf32, 32xf32, 32xf32, 32xf32, 32xf32, -1xui8) <- (2x32x208x208xf32, 32xf32, 32xf32, 32xf32, 32xf32) + ( + batch_norm__6, + batch_norm__7, + batch_norm__8, + batch_norm__9, + batch_norm__10, + batch_norm__11, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_1, + parameter_731, + parameter_730, + parameter_729, + parameter_728, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_728, parameter_729, parameter_730, parameter_731 + + # pd_op.swish: (2x32x208x208xf32) <- (2x32x208x208xf32) + swish_2 = paddle._C_ops.swish(batch_norm__6) + + # pd_op.conv2d: (2x64x208x208xf32) <- (2x32x208x208xf32, 64x32x3x3xf32) + conv2d_2 = paddle._C_ops.conv2d( + swish_2, parameter_727, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_727 + + # pd_op.batch_norm_: (2x64x208x208xf32, 64xf32, 64xf32, 64xf32, 64xf32, -1xui8) <- (2x64x208x208xf32, 64xf32, 64xf32, 64xf32, 64xf32) + ( + batch_norm__12, + batch_norm__13, + batch_norm__14, + batch_norm__15, + batch_norm__16, + batch_norm__17, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_2, + parameter_726, + parameter_725, + parameter_724, + parameter_723, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_723, parameter_724, parameter_725, parameter_726 + + # pd_op.swish: (2x64x208x208xf32) <- (2x64x208x208xf32) + swish_3 = paddle._C_ops.swish(batch_norm__12) + + # pd_op.conv2d: (2x96x104x104xf32) <- (2x64x208x208xf32, 96x64x3x3xf32) + conv2d_3 = paddle._C_ops.conv2d( + swish_3, parameter_722, [2, 2], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_722 + + # pd_op.batch_norm_: (2x96x104x104xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x104x104xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__18, + batch_norm__19, + batch_norm__20, + batch_norm__21, + batch_norm__22, + batch_norm__23, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_3, + parameter_721, + parameter_720, + parameter_719, + parameter_718, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_718, parameter_719, parameter_720, parameter_721 + + # pd_op.swish: (2x96x104x104xf32) <- (2x96x104x104xf32) + swish_4 = paddle._C_ops.swish(batch_norm__18) + + # pd_op.conv2d: (2x48x104x104xf32) <- (2x96x104x104xf32, 48x96x1x1xf32) + conv2d_4 = paddle._C_ops.conv2d( + swish_4, parameter_717, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_717 + + # pd_op.batch_norm_: (2x48x104x104xf32, 48xf32, 48xf32, 48xf32, 48xf32, -1xui8) <- (2x48x104x104xf32, 48xf32, 48xf32, 48xf32, 48xf32) + ( + batch_norm__24, + batch_norm__25, + batch_norm__26, + batch_norm__27, + batch_norm__28, + batch_norm__29, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_4, + parameter_716, + parameter_715, + parameter_714, + parameter_713, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_713, parameter_714, parameter_715, parameter_716 + + # pd_op.swish: (2x48x104x104xf32) <- (2x48x104x104xf32) + swish_5 = paddle._C_ops.swish(batch_norm__24) + + # pd_op.conv2d: (2x48x104x104xf32) <- (2x96x104x104xf32, 48x96x1x1xf32) + conv2d_5 = paddle._C_ops.conv2d( + swish_4, parameter_712, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_712 + + # pd_op.batch_norm_: (2x48x104x104xf32, 48xf32, 48xf32, 48xf32, 48xf32, -1xui8) <- (2x48x104x104xf32, 48xf32, 48xf32, 48xf32, 48xf32) + ( + batch_norm__30, + batch_norm__31, + batch_norm__32, + batch_norm__33, + batch_norm__34, + batch_norm__35, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_5, + parameter_711, + parameter_710, + parameter_709, + parameter_708, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_708, parameter_709, parameter_710, parameter_711 + + # pd_op.swish: (2x48x104x104xf32) <- (2x48x104x104xf32) + swish_6 = paddle._C_ops.swish(batch_norm__30) + + # pd_op.conv2d: (2x48x104x104xf32) <- (2x48x104x104xf32, 48x48x3x3xf32) + conv2d_6 = paddle._C_ops.conv2d( + swish_6, parameter_707, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_707 + + # pd_op.batch_norm_: (2x48x104x104xf32, 48xf32, 48xf32, 48xf32, 48xf32, -1xui8) <- (2x48x104x104xf32, 48xf32, 48xf32, 48xf32, 48xf32) + ( + batch_norm__36, + batch_norm__37, + batch_norm__38, + batch_norm__39, + batch_norm__40, + batch_norm__41, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_6, + parameter_706, + parameter_705, + parameter_704, + parameter_703, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_703, parameter_704, parameter_705, parameter_706 + + # pd_op.swish: (2x48x104x104xf32) <- (2x48x104x104xf32) + swish_7 = paddle._C_ops.swish(batch_norm__36) + + # pd_op.conv2d: (2x48x104x104xf32) <- (2x48x104x104xf32, 48x48x3x3xf32) + conv2d_7 = paddle._C_ops.conv2d( + swish_7, parameter_702, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_702 + + # pd_op.batch_norm_: (2x48x104x104xf32, 48xf32, 48xf32, 48xf32, 48xf32, -1xui8) <- (2x48x104x104xf32, 48xf32, 48xf32, 48xf32, 48xf32) + ( + batch_norm__42, + batch_norm__43, + batch_norm__44, + batch_norm__45, + batch_norm__46, + batch_norm__47, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_7, + parameter_701, + parameter_700, + parameter_699, + parameter_698, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_698, parameter_699, parameter_700, parameter_701 + + # pd_op.conv2d: (2x48x104x104xf32) <- (2x48x104x104xf32, 48x48x1x1xf32) + conv2d_8 = paddle._C_ops.conv2d( + swish_7, parameter_697, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_697 + + # pd_op.batch_norm_: (2x48x104x104xf32, 48xf32, 48xf32, 48xf32, 48xf32, -1xui8) <- (2x48x104x104xf32, 48xf32, 48xf32, 48xf32, 48xf32) + ( + batch_norm__48, + batch_norm__49, + batch_norm__50, + batch_norm__51, + batch_norm__52, + batch_norm__53, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_8, + parameter_696, + parameter_695, + parameter_694, + parameter_693, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_693, parameter_694, parameter_695, parameter_696 + + # pd_op.multiply: (2x48x104x104xf32) <- (1xf32, 2x48x104x104xf32) + multiply_0 = paddle._C_ops.multiply(data_0, batch_norm__48) + del data_0 + + # pd_op.add: (2x48x104x104xf32) <- (2x48x104x104xf32, 2x48x104x104xf32) + add_0 = paddle._C_ops.add(batch_norm__42, multiply_0) + + # pd_op.swish: (2x48x104x104xf32) <- (2x48x104x104xf32) + swish_8 = paddle._C_ops.swish(add_0) + + # pd_op.add: (2x48x104x104xf32) <- (2x48x104x104xf32, 2x48x104x104xf32) + add_1 = paddle._C_ops.add(swish_6, swish_8) + + # pd_op.conv2d: (2x48x104x104xf32) <- (2x48x104x104xf32, 48x48x3x3xf32) + conv2d_9 = paddle._C_ops.conv2d( + add_1, parameter_692, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_692 + + # pd_op.batch_norm_: (2x48x104x104xf32, 48xf32, 48xf32, 48xf32, 48xf32, -1xui8) <- (2x48x104x104xf32, 48xf32, 48xf32, 48xf32, 48xf32) + ( + batch_norm__54, + batch_norm__55, + batch_norm__56, + batch_norm__57, + batch_norm__58, + batch_norm__59, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_9, + parameter_691, + parameter_690, + parameter_689, + parameter_688, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_688, parameter_689, parameter_690, parameter_691 + + # pd_op.swish: (2x48x104x104xf32) <- (2x48x104x104xf32) + swish_9 = paddle._C_ops.swish(batch_norm__54) + + # pd_op.conv2d: (2x48x104x104xf32) <- (2x48x104x104xf32, 48x48x3x3xf32) + conv2d_10 = paddle._C_ops.conv2d( + swish_9, parameter_687, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_687 + + # pd_op.batch_norm_: (2x48x104x104xf32, 48xf32, 48xf32, 48xf32, 48xf32, -1xui8) <- (2x48x104x104xf32, 48xf32, 48xf32, 48xf32, 48xf32) + ( + batch_norm__60, + batch_norm__61, + batch_norm__62, + batch_norm__63, + batch_norm__64, + batch_norm__65, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_10, + parameter_686, + parameter_685, + parameter_684, + parameter_683, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_683, parameter_684, parameter_685, parameter_686 + + # pd_op.conv2d: (2x48x104x104xf32) <- (2x48x104x104xf32, 48x48x1x1xf32) + conv2d_11 = paddle._C_ops.conv2d( + swish_9, parameter_682, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_682 + + # pd_op.batch_norm_: (2x48x104x104xf32, 48xf32, 48xf32, 48xf32, 48xf32, -1xui8) <- (2x48x104x104xf32, 48xf32, 48xf32, 48xf32, 48xf32) + ( + batch_norm__66, + batch_norm__67, + batch_norm__68, + batch_norm__69, + batch_norm__70, + batch_norm__71, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_11, + parameter_681, + parameter_680, + parameter_679, + parameter_678, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_678, parameter_679, parameter_680, parameter_681 + + # pd_op.multiply: (2x48x104x104xf32) <- (1xf32, 2x48x104x104xf32) + multiply_1 = paddle._C_ops.multiply(data_1, batch_norm__66) + del data_1 + + # pd_op.add: (2x48x104x104xf32) <- (2x48x104x104xf32, 2x48x104x104xf32) + add_2 = paddle._C_ops.add(batch_norm__60, multiply_1) + + # pd_op.swish: (2x48x104x104xf32) <- (2x48x104x104xf32) + swish_10 = paddle._C_ops.swish(add_2) + + # pd_op.add: (2x48x104x104xf32) <- (2x48x104x104xf32, 2x48x104x104xf32) + add_3 = paddle._C_ops.add(add_1, swish_10) + + # pd_op.conv2d: (2x48x104x104xf32) <- (2x48x104x104xf32, 48x48x3x3xf32) + conv2d_12 = paddle._C_ops.conv2d( + add_3, parameter_677, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_677 + + # pd_op.batch_norm_: (2x48x104x104xf32, 48xf32, 48xf32, 48xf32, 48xf32, -1xui8) <- (2x48x104x104xf32, 48xf32, 48xf32, 48xf32, 48xf32) + ( + batch_norm__72, + batch_norm__73, + batch_norm__74, + batch_norm__75, + batch_norm__76, + batch_norm__77, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_12, + parameter_676, + parameter_675, + parameter_674, + parameter_673, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_673, parameter_674, parameter_675, parameter_676 + + # pd_op.swish: (2x48x104x104xf32) <- (2x48x104x104xf32) + swish_11 = paddle._C_ops.swish(batch_norm__72) + + # pd_op.conv2d: (2x48x104x104xf32) <- (2x48x104x104xf32, 48x48x3x3xf32) + conv2d_13 = paddle._C_ops.conv2d( + swish_11, parameter_672, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_672 + + # pd_op.batch_norm_: (2x48x104x104xf32, 48xf32, 48xf32, 48xf32, 48xf32, -1xui8) <- (2x48x104x104xf32, 48xf32, 48xf32, 48xf32, 48xf32) + ( + batch_norm__78, + batch_norm__79, + batch_norm__80, + batch_norm__81, + batch_norm__82, + batch_norm__83, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_13, + parameter_671, + parameter_670, + parameter_669, + parameter_668, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_668, parameter_669, parameter_670, parameter_671 + + # pd_op.conv2d: (2x48x104x104xf32) <- (2x48x104x104xf32, 48x48x1x1xf32) + conv2d_14 = paddle._C_ops.conv2d( + swish_11, parameter_667, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_667 + + # pd_op.batch_norm_: (2x48x104x104xf32, 48xf32, 48xf32, 48xf32, 48xf32, -1xui8) <- (2x48x104x104xf32, 48xf32, 48xf32, 48xf32, 48xf32) + ( + batch_norm__84, + batch_norm__85, + batch_norm__86, + batch_norm__87, + batch_norm__88, + batch_norm__89, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_14, + parameter_666, + parameter_665, + parameter_664, + parameter_663, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_663, parameter_664, parameter_665, parameter_666 + + # pd_op.multiply: (2x48x104x104xf32) <- (1xf32, 2x48x104x104xf32) + multiply_2 = paddle._C_ops.multiply(data_2, batch_norm__84) + del data_2 + + # pd_op.add: (2x48x104x104xf32) <- (2x48x104x104xf32, 2x48x104x104xf32) + add_4 = paddle._C_ops.add(batch_norm__78, multiply_2) + + # pd_op.swish: (2x48x104x104xf32) <- (2x48x104x104xf32) + swish_12 = paddle._C_ops.swish(add_4) + + # pd_op.add: (2x48x104x104xf32) <- (2x48x104x104xf32, 2x48x104x104xf32) + add_5 = paddle._C_ops.add(add_3, swish_12) + + # pd_op.full: (1xi32) <- () + full_0 = paddle._C_ops.full( + [1], float("1"), paddle.int32, paddle.core.CPUPlace() + ) + + # pd_op.assign: (1xi32) <- (1xi32) + assign_0 = full_0 + + # pd_op.assign: (1xi32) <- (1xi32) + assign_1 = full_0 + + # pd_op.assign: (1xi32) <- (1xi32) + assign_2 = full_0 + + # pd_op.assign: (1xi32) <- (1xi32) + assign_3 = full_0 + + # pd_op.assign: (1xi32) <- (1xi32) + assign_4 = full_0 + + # pd_op.assign: (1xi32) <- (1xi32) + assign_5 = full_0 + + # pd_op.assign: (1xi32) <- (1xi32) + assign_6 = full_0 + + # pd_op.assign: (1xi32) <- (1xi32) + assign_7 = full_0 + + # pd_op.assign: (1xi32) <- (1xi32) + assign_8 = full_0 + + # pd_op.assign: (1xi32) <- (1xi32) + assign_9 = full_0 + + # pd_op.assign: (1xi32) <- (1xi32) + assign_10 = full_0 + + # pd_op.assign: (1xi32) <- (1xi32) + assign_11 = full_0 + + # pd_op.assign: (1xi32) <- (1xi32) + assign_12 = full_0 + + # builtin.combine: ([2x48x104x104xf32, 2x48x104x104xf32]) <- (2x48x104x104xf32, 2x48x104x104xf32) + combine_0 = [swish_5, add_5] + + # pd_op.concat: (2x96x104x104xf32) <- ([2x48x104x104xf32, 2x48x104x104xf32], 1xi32) + concat_0 = paddle._C_ops.concat(combine_0, full_0) + del combine_0 + + # pd_op.full_int_array: (2xi64) <- () + full_int_array_0 = [2, 3] + + # pd_op.assign: (2xi64) <- (2xi64) + assign_13 = full_int_array_0 + + # pd_op.assign: (2xi64) <- (2xi64) + assign_14 = full_int_array_0 + + # pd_op.assign: (2xi64) <- (2xi64) + assign_15 = full_int_array_0 + + # pd_op.mean: (2x96x1x1xf32) <- (2x96x104x104xf32, 2xi64) + mean_0 = paddle._C_ops.mean(concat_0, full_int_array_0, True) + + # pd_op.conv2d: (2x96x1x1xf32) <- (2x96x1x1xf32, 96x96x1x1xf32) + conv2d_15 = paddle._C_ops.conv2d( + mean_0, parameter_662, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_662 + + # pd_op.full_int_array: (4xi64) <- () + full_int_array_1 = [1, -1, 1, 1] + + # pd_op.reshape: (1x96x1x1xf32) <- (96xf32, 4xi64) + reshape_0 = paddle._C_ops.reshape(parameter_661, full_int_array_1) + del parameter_661 + + # pd_op.add: (2x96x1x1xf32) <- (2x96x1x1xf32, 1x96x1x1xf32) + add_6 = paddle._C_ops.add(conv2d_15, reshape_0) + + # pd_op.hardsigmoid: (2x96x1x1xf32) <- (2x96x1x1xf32) + hardsigmoid_0 = paddle._C_ops.hardsigmoid( + add_6, float("0.166667"), float("0.5") + ) + del add_6 + + # pd_op.multiply: (2x96x104x104xf32) <- (2x96x104x104xf32, 2x96x1x1xf32) + multiply_3 = paddle._C_ops.multiply(concat_0, hardsigmoid_0) + + # pd_op.conv2d: (2x128x104x104xf32) <- (2x96x104x104xf32, 128x96x1x1xf32) + conv2d_16 = paddle._C_ops.conv2d( + multiply_3, parameter_660, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_660 + + # pd_op.batch_norm_: (2x128x104x104xf32, 128xf32, 128xf32, 128xf32, 128xf32, -1xui8) <- (2x128x104x104xf32, 128xf32, 128xf32, 128xf32, 128xf32) + ( + batch_norm__90, + batch_norm__91, + batch_norm__92, + batch_norm__93, + batch_norm__94, + batch_norm__95, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_16, + parameter_659, + parameter_658, + parameter_657, + parameter_656, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_656, parameter_657, parameter_658, parameter_659 + + # pd_op.swish: (2x128x104x104xf32) <- (2x128x104x104xf32) + swish_13 = paddle._C_ops.swish(batch_norm__90) + + # pd_op.conv2d: (2x192x52x52xf32) <- (2x128x104x104xf32, 192x128x3x3xf32) + conv2d_17 = paddle._C_ops.conv2d( + swish_13, parameter_655, [2, 2], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_655 + + # pd_op.batch_norm_: (2x192x52x52xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x52x52xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__96, + batch_norm__97, + batch_norm__98, + batch_norm__99, + batch_norm__100, + batch_norm__101, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_17, + parameter_654, + parameter_653, + parameter_652, + parameter_651, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_651, parameter_652, parameter_653, parameter_654 + + # pd_op.swish: (2x192x52x52xf32) <- (2x192x52x52xf32) + swish_14 = paddle._C_ops.swish(batch_norm__96) + + # pd_op.conv2d: (2x96x52x52xf32) <- (2x192x52x52xf32, 96x192x1x1xf32) + conv2d_18 = paddle._C_ops.conv2d( + swish_14, parameter_650, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_650 + + # pd_op.batch_norm_: (2x96x52x52xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x52x52xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__102, + batch_norm__103, + batch_norm__104, + batch_norm__105, + batch_norm__106, + batch_norm__107, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_18, + parameter_649, + parameter_648, + parameter_647, + parameter_646, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_646, parameter_647, parameter_648, parameter_649 + + # pd_op.swish: (2x96x52x52xf32) <- (2x96x52x52xf32) + swish_15 = paddle._C_ops.swish(batch_norm__102) + + # pd_op.conv2d: (2x96x52x52xf32) <- (2x192x52x52xf32, 96x192x1x1xf32) + conv2d_19 = paddle._C_ops.conv2d( + swish_14, parameter_645, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_645 + + # pd_op.batch_norm_: (2x96x52x52xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x52x52xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__108, + batch_norm__109, + batch_norm__110, + batch_norm__111, + batch_norm__112, + batch_norm__113, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_19, + parameter_644, + parameter_643, + parameter_642, + parameter_641, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_641, parameter_642, parameter_643, parameter_644 + + # pd_op.swish: (2x96x52x52xf32) <- (2x96x52x52xf32) + swish_16 = paddle._C_ops.swish(batch_norm__108) + + # pd_op.conv2d: (2x96x52x52xf32) <- (2x96x52x52xf32, 96x96x3x3xf32) + conv2d_20 = paddle._C_ops.conv2d( + swish_16, parameter_640, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_640 + + # pd_op.batch_norm_: (2x96x52x52xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x52x52xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__114, + batch_norm__115, + batch_norm__116, + batch_norm__117, + batch_norm__118, + batch_norm__119, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_20, + parameter_639, + parameter_638, + parameter_637, + parameter_636, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_636, parameter_637, parameter_638, parameter_639 + + # pd_op.swish: (2x96x52x52xf32) <- (2x96x52x52xf32) + swish_17 = paddle._C_ops.swish(batch_norm__114) + + # pd_op.conv2d: (2x96x52x52xf32) <- (2x96x52x52xf32, 96x96x3x3xf32) + conv2d_21 = paddle._C_ops.conv2d( + swish_17, parameter_635, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_635 + + # pd_op.batch_norm_: (2x96x52x52xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x52x52xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__120, + batch_norm__121, + batch_norm__122, + batch_norm__123, + batch_norm__124, + batch_norm__125, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_21, + parameter_634, + parameter_633, + parameter_632, + parameter_631, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_631, parameter_632, parameter_633, parameter_634 + + # pd_op.conv2d: (2x96x52x52xf32) <- (2x96x52x52xf32, 96x96x1x1xf32) + conv2d_22 = paddle._C_ops.conv2d( + swish_17, parameter_630, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_630 + + # pd_op.batch_norm_: (2x96x52x52xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x52x52xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__126, + batch_norm__127, + batch_norm__128, + batch_norm__129, + batch_norm__130, + batch_norm__131, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_22, + parameter_629, + parameter_628, + parameter_627, + parameter_626, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_626, parameter_627, parameter_628, parameter_629 + + # pd_op.multiply: (2x96x52x52xf32) <- (1xf32, 2x96x52x52xf32) + multiply_4 = paddle._C_ops.multiply(data_3, batch_norm__126) + del data_3 + + # pd_op.add: (2x96x52x52xf32) <- (2x96x52x52xf32, 2x96x52x52xf32) + add_7 = paddle._C_ops.add(batch_norm__120, multiply_4) + + # pd_op.swish: (2x96x52x52xf32) <- (2x96x52x52xf32) + swish_18 = paddle._C_ops.swish(add_7) + + # pd_op.add: (2x96x52x52xf32) <- (2x96x52x52xf32, 2x96x52x52xf32) + add_8 = paddle._C_ops.add(swish_16, swish_18) + + # pd_op.conv2d: (2x96x52x52xf32) <- (2x96x52x52xf32, 96x96x3x3xf32) + conv2d_23 = paddle._C_ops.conv2d( + add_8, parameter_625, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_625 + + # pd_op.batch_norm_: (2x96x52x52xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x52x52xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__132, + batch_norm__133, + batch_norm__134, + batch_norm__135, + batch_norm__136, + batch_norm__137, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_23, + parameter_624, + parameter_623, + parameter_622, + parameter_621, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_621, parameter_622, parameter_623, parameter_624 + + # pd_op.swish: (2x96x52x52xf32) <- (2x96x52x52xf32) + swish_19 = paddle._C_ops.swish(batch_norm__132) + + # pd_op.conv2d: (2x96x52x52xf32) <- (2x96x52x52xf32, 96x96x3x3xf32) + conv2d_24 = paddle._C_ops.conv2d( + swish_19, parameter_620, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_620 + + # pd_op.batch_norm_: (2x96x52x52xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x52x52xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__138, + batch_norm__139, + batch_norm__140, + batch_norm__141, + batch_norm__142, + batch_norm__143, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_24, + parameter_619, + parameter_618, + parameter_617, + parameter_616, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_616, parameter_617, parameter_618, parameter_619 + + # pd_op.conv2d: (2x96x52x52xf32) <- (2x96x52x52xf32, 96x96x1x1xf32) + conv2d_25 = paddle._C_ops.conv2d( + swish_19, parameter_615, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_615 + + # pd_op.batch_norm_: (2x96x52x52xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x52x52xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__144, + batch_norm__145, + batch_norm__146, + batch_norm__147, + batch_norm__148, + batch_norm__149, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_25, + parameter_614, + parameter_613, + parameter_612, + parameter_611, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_611, parameter_612, parameter_613, parameter_614 + + # pd_op.multiply: (2x96x52x52xf32) <- (1xf32, 2x96x52x52xf32) + multiply_5 = paddle._C_ops.multiply(data_4, batch_norm__144) + del data_4 + + # pd_op.add: (2x96x52x52xf32) <- (2x96x52x52xf32, 2x96x52x52xf32) + add_9 = paddle._C_ops.add(batch_norm__138, multiply_5) + + # pd_op.swish: (2x96x52x52xf32) <- (2x96x52x52xf32) + swish_20 = paddle._C_ops.swish(add_9) + + # pd_op.add: (2x96x52x52xf32) <- (2x96x52x52xf32, 2x96x52x52xf32) + add_10 = paddle._C_ops.add(add_8, swish_20) + + # pd_op.conv2d: (2x96x52x52xf32) <- (2x96x52x52xf32, 96x96x3x3xf32) + conv2d_26 = paddle._C_ops.conv2d( + add_10, parameter_610, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_610 + + # pd_op.batch_norm_: (2x96x52x52xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x52x52xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__150, + batch_norm__151, + batch_norm__152, + batch_norm__153, + batch_norm__154, + batch_norm__155, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_26, + parameter_609, + parameter_608, + parameter_607, + parameter_606, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_606, parameter_607, parameter_608, parameter_609 + + # pd_op.swish: (2x96x52x52xf32) <- (2x96x52x52xf32) + swish_21 = paddle._C_ops.swish(batch_norm__150) + + # pd_op.conv2d: (2x96x52x52xf32) <- (2x96x52x52xf32, 96x96x3x3xf32) + conv2d_27 = paddle._C_ops.conv2d( + swish_21, parameter_605, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_605 + + # pd_op.batch_norm_: (2x96x52x52xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x52x52xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__156, + batch_norm__157, + batch_norm__158, + batch_norm__159, + batch_norm__160, + batch_norm__161, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_27, + parameter_604, + parameter_603, + parameter_602, + parameter_601, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_601, parameter_602, parameter_603, parameter_604 + + # pd_op.conv2d: (2x96x52x52xf32) <- (2x96x52x52xf32, 96x96x1x1xf32) + conv2d_28 = paddle._C_ops.conv2d( + swish_21, parameter_600, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_600 + + # pd_op.batch_norm_: (2x96x52x52xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x52x52xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__162, + batch_norm__163, + batch_norm__164, + batch_norm__165, + batch_norm__166, + batch_norm__167, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_28, + parameter_599, + parameter_598, + parameter_597, + parameter_596, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_596, parameter_597, parameter_598, parameter_599 + + # pd_op.multiply: (2x96x52x52xf32) <- (1xf32, 2x96x52x52xf32) + multiply_6 = paddle._C_ops.multiply(data_5, batch_norm__162) + del data_5 + + # pd_op.add: (2x96x52x52xf32) <- (2x96x52x52xf32, 2x96x52x52xf32) + add_11 = paddle._C_ops.add(batch_norm__156, multiply_6) + + # pd_op.swish: (2x96x52x52xf32) <- (2x96x52x52xf32) + swish_22 = paddle._C_ops.swish(add_11) + + # pd_op.add: (2x96x52x52xf32) <- (2x96x52x52xf32, 2x96x52x52xf32) + add_12 = paddle._C_ops.add(add_10, swish_22) + + # pd_op.conv2d: (2x96x52x52xf32) <- (2x96x52x52xf32, 96x96x3x3xf32) + conv2d_29 = paddle._C_ops.conv2d( + add_12, parameter_595, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_595 + + # pd_op.batch_norm_: (2x96x52x52xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x52x52xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__168, + batch_norm__169, + batch_norm__170, + batch_norm__171, + batch_norm__172, + batch_norm__173, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_29, + parameter_594, + parameter_593, + parameter_592, + parameter_591, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_591, parameter_592, parameter_593, parameter_594 + + # pd_op.swish: (2x96x52x52xf32) <- (2x96x52x52xf32) + swish_23 = paddle._C_ops.swish(batch_norm__168) + + # pd_op.conv2d: (2x96x52x52xf32) <- (2x96x52x52xf32, 96x96x3x3xf32) + conv2d_30 = paddle._C_ops.conv2d( + swish_23, parameter_590, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_590 + + # pd_op.batch_norm_: (2x96x52x52xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x52x52xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__174, + batch_norm__175, + batch_norm__176, + batch_norm__177, + batch_norm__178, + batch_norm__179, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_30, + parameter_589, + parameter_588, + parameter_587, + parameter_586, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_586, parameter_587, parameter_588, parameter_589 + + # pd_op.conv2d: (2x96x52x52xf32) <- (2x96x52x52xf32, 96x96x1x1xf32) + conv2d_31 = paddle._C_ops.conv2d( + swish_23, parameter_585, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_585 + + # pd_op.batch_norm_: (2x96x52x52xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x52x52xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__180, + batch_norm__181, + batch_norm__182, + batch_norm__183, + batch_norm__184, + batch_norm__185, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_31, + parameter_584, + parameter_583, + parameter_582, + parameter_581, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_581, parameter_582, parameter_583, parameter_584 + + # pd_op.multiply: (2x96x52x52xf32) <- (1xf32, 2x96x52x52xf32) + multiply_7 = paddle._C_ops.multiply(data_6, batch_norm__180) + del data_6 + + # pd_op.add: (2x96x52x52xf32) <- (2x96x52x52xf32, 2x96x52x52xf32) + add_13 = paddle._C_ops.add(batch_norm__174, multiply_7) + + # pd_op.swish: (2x96x52x52xf32) <- (2x96x52x52xf32) + swish_24 = paddle._C_ops.swish(add_13) + + # pd_op.add: (2x96x52x52xf32) <- (2x96x52x52xf32, 2x96x52x52xf32) + add_14 = paddle._C_ops.add(add_12, swish_24) + + # pd_op.conv2d: (2x96x52x52xf32) <- (2x96x52x52xf32, 96x96x3x3xf32) + conv2d_32 = paddle._C_ops.conv2d( + add_14, parameter_580, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_580 + + # pd_op.batch_norm_: (2x96x52x52xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x52x52xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__186, + batch_norm__187, + batch_norm__188, + batch_norm__189, + batch_norm__190, + batch_norm__191, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_32, + parameter_579, + parameter_578, + parameter_577, + parameter_576, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_576, parameter_577, parameter_578, parameter_579 + + # pd_op.swish: (2x96x52x52xf32) <- (2x96x52x52xf32) + swish_25 = paddle._C_ops.swish(batch_norm__186) + + # pd_op.conv2d: (2x96x52x52xf32) <- (2x96x52x52xf32, 96x96x3x3xf32) + conv2d_33 = paddle._C_ops.conv2d( + swish_25, parameter_575, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_575 + + # pd_op.batch_norm_: (2x96x52x52xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x52x52xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__192, + batch_norm__193, + batch_norm__194, + batch_norm__195, + batch_norm__196, + batch_norm__197, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_33, + parameter_574, + parameter_573, + parameter_572, + parameter_571, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_571, parameter_572, parameter_573, parameter_574 + + # pd_op.conv2d: (2x96x52x52xf32) <- (2x96x52x52xf32, 96x96x1x1xf32) + conv2d_34 = paddle._C_ops.conv2d( + swish_25, parameter_570, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_570 + + # pd_op.batch_norm_: (2x96x52x52xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x52x52xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__198, + batch_norm__199, + batch_norm__200, + batch_norm__201, + batch_norm__202, + batch_norm__203, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_34, + parameter_569, + parameter_568, + parameter_567, + parameter_566, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_566, parameter_567, parameter_568, parameter_569 + + # pd_op.multiply: (2x96x52x52xf32) <- (1xf32, 2x96x52x52xf32) + multiply_8 = paddle._C_ops.multiply(data_7, batch_norm__198) + del data_7 + + # pd_op.add: (2x96x52x52xf32) <- (2x96x52x52xf32, 2x96x52x52xf32) + add_15 = paddle._C_ops.add(batch_norm__192, multiply_8) + + # pd_op.swish: (2x96x52x52xf32) <- (2x96x52x52xf32) + swish_26 = paddle._C_ops.swish(add_15) + + # pd_op.add: (2x96x52x52xf32) <- (2x96x52x52xf32, 2x96x52x52xf32) + add_16 = paddle._C_ops.add(add_14, swish_26) + + # pd_op.conv2d: (2x96x52x52xf32) <- (2x96x52x52xf32, 96x96x3x3xf32) + conv2d_35 = paddle._C_ops.conv2d( + add_16, parameter_565, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_565 + + # pd_op.batch_norm_: (2x96x52x52xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x52x52xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__204, + batch_norm__205, + batch_norm__206, + batch_norm__207, + batch_norm__208, + batch_norm__209, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_35, + parameter_564, + parameter_563, + parameter_562, + parameter_561, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_561, parameter_562, parameter_563, parameter_564 + + # pd_op.swish: (2x96x52x52xf32) <- (2x96x52x52xf32) + swish_27 = paddle._C_ops.swish(batch_norm__204) + + # pd_op.conv2d: (2x96x52x52xf32) <- (2x96x52x52xf32, 96x96x3x3xf32) + conv2d_36 = paddle._C_ops.conv2d( + swish_27, parameter_560, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_560 + + # pd_op.batch_norm_: (2x96x52x52xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x52x52xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__210, + batch_norm__211, + batch_norm__212, + batch_norm__213, + batch_norm__214, + batch_norm__215, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_36, + parameter_559, + parameter_558, + parameter_557, + parameter_556, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_556, parameter_557, parameter_558, parameter_559 + + # pd_op.conv2d: (2x96x52x52xf32) <- (2x96x52x52xf32, 96x96x1x1xf32) + conv2d_37 = paddle._C_ops.conv2d( + swish_27, parameter_555, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_555 + + # pd_op.batch_norm_: (2x96x52x52xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x52x52xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__216, + batch_norm__217, + batch_norm__218, + batch_norm__219, + batch_norm__220, + batch_norm__221, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_37, + parameter_554, + parameter_553, + parameter_552, + parameter_551, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_551, parameter_552, parameter_553, parameter_554 + + # pd_op.multiply: (2x96x52x52xf32) <- (1xf32, 2x96x52x52xf32) + multiply_9 = paddle._C_ops.multiply(data_8, batch_norm__216) + del data_8 + + # pd_op.add: (2x96x52x52xf32) <- (2x96x52x52xf32, 2x96x52x52xf32) + add_17 = paddle._C_ops.add(batch_norm__210, multiply_9) + + # pd_op.swish: (2x96x52x52xf32) <- (2x96x52x52xf32) + swish_28 = paddle._C_ops.swish(add_17) + + # pd_op.add: (2x96x52x52xf32) <- (2x96x52x52xf32, 2x96x52x52xf32) + add_18 = paddle._C_ops.add(add_16, swish_28) + + # builtin.combine: ([2x96x52x52xf32, 2x96x52x52xf32]) <- (2x96x52x52xf32, 2x96x52x52xf32) + combine_1 = [swish_15, add_18] + + # pd_op.concat: (2x192x52x52xf32) <- ([2x96x52x52xf32, 2x96x52x52xf32], 1xi32) + concat_1 = paddle._C_ops.concat(combine_1, full_0) + del combine_1 + + # pd_op.mean: (2x192x1x1xf32) <- (2x192x52x52xf32, 2xi64) + mean_1 = paddle._C_ops.mean(concat_1, full_int_array_0, True) + + # pd_op.conv2d: (2x192x1x1xf32) <- (2x192x1x1xf32, 192x192x1x1xf32) + conv2d_38 = paddle._C_ops.conv2d( + mean_1, parameter_550, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_550 + + # pd_op.reshape: (1x192x1x1xf32) <- (192xf32, 4xi64) + reshape_1 = paddle._C_ops.reshape(parameter_549, full_int_array_1) + del parameter_549 + + # pd_op.add: (2x192x1x1xf32) <- (2x192x1x1xf32, 1x192x1x1xf32) + add_19 = paddle._C_ops.add(conv2d_38, reshape_1) + + # pd_op.hardsigmoid: (2x192x1x1xf32) <- (2x192x1x1xf32) + hardsigmoid_1 = paddle._C_ops.hardsigmoid( + add_19, float("0.166667"), float("0.5") + ) + del add_19 + + # pd_op.multiply: (2x192x52x52xf32) <- (2x192x52x52xf32, 2x192x1x1xf32) + multiply_10 = paddle._C_ops.multiply(concat_1, hardsigmoid_1) + + # pd_op.conv2d: (2x256x52x52xf32) <- (2x192x52x52xf32, 256x192x1x1xf32) + conv2d_39 = paddle._C_ops.conv2d( + multiply_10, parameter_548, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_548 + + # pd_op.batch_norm_: (2x256x52x52xf32, 256xf32, 256xf32, 256xf32, 256xf32, -1xui8) <- (2x256x52x52xf32, 256xf32, 256xf32, 256xf32, 256xf32) + ( + batch_norm__222, + batch_norm__223, + batch_norm__224, + batch_norm__225, + batch_norm__226, + batch_norm__227, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_39, + parameter_547, + parameter_546, + parameter_545, + parameter_544, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_544, parameter_545, parameter_546, parameter_547 + + # pd_op.swish: (2x256x52x52xf32) <- (2x256x52x52xf32) + swish_29 = paddle._C_ops.swish(batch_norm__222) + + # pd_op.conv2d: (2x384x26x26xf32) <- (2x256x52x52xf32, 384x256x3x3xf32) + conv2d_40 = paddle._C_ops.conv2d( + swish_29, parameter_543, [2, 2], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_543 + + # pd_op.batch_norm_: (2x384x26x26xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x26x26xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__228, + batch_norm__229, + batch_norm__230, + batch_norm__231, + batch_norm__232, + batch_norm__233, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_40, + parameter_542, + parameter_541, + parameter_540, + parameter_539, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_539, parameter_540, parameter_541, parameter_542 + + # pd_op.swish: (2x384x26x26xf32) <- (2x384x26x26xf32) + swish_30 = paddle._C_ops.swish(batch_norm__228) + + # pd_op.conv2d: (2x192x26x26xf32) <- (2x384x26x26xf32, 192x384x1x1xf32) + conv2d_41 = paddle._C_ops.conv2d( + swish_30, parameter_538, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_538 + + # pd_op.batch_norm_: (2x192x26x26xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x26x26xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__234, + batch_norm__235, + batch_norm__236, + batch_norm__237, + batch_norm__238, + batch_norm__239, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_41, + parameter_537, + parameter_536, + parameter_535, + parameter_534, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_534, parameter_535, parameter_536, parameter_537 + + # pd_op.swish: (2x192x26x26xf32) <- (2x192x26x26xf32) + swish_31 = paddle._C_ops.swish(batch_norm__234) + + # pd_op.conv2d: (2x192x26x26xf32) <- (2x384x26x26xf32, 192x384x1x1xf32) + conv2d_42 = paddle._C_ops.conv2d( + swish_30, parameter_533, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_533 + + # pd_op.batch_norm_: (2x192x26x26xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x26x26xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__240, + batch_norm__241, + batch_norm__242, + batch_norm__243, + batch_norm__244, + batch_norm__245, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_42, + parameter_532, + parameter_531, + parameter_530, + parameter_529, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_529, parameter_530, parameter_531, parameter_532 + + # pd_op.swish: (2x192x26x26xf32) <- (2x192x26x26xf32) + swish_32 = paddle._C_ops.swish(batch_norm__240) + + # pd_op.conv2d: (2x192x26x26xf32) <- (2x192x26x26xf32, 192x192x3x3xf32) + conv2d_43 = paddle._C_ops.conv2d( + swish_32, parameter_528, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_528 + + # pd_op.batch_norm_: (2x192x26x26xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x26x26xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__246, + batch_norm__247, + batch_norm__248, + batch_norm__249, + batch_norm__250, + batch_norm__251, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_43, + parameter_527, + parameter_526, + parameter_525, + parameter_524, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_524, parameter_525, parameter_526, parameter_527 + + # pd_op.swish: (2x192x26x26xf32) <- (2x192x26x26xf32) + swish_33 = paddle._C_ops.swish(batch_norm__246) + + # pd_op.conv2d: (2x192x26x26xf32) <- (2x192x26x26xf32, 192x192x3x3xf32) + conv2d_44 = paddle._C_ops.conv2d( + swish_33, parameter_523, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_523 + + # pd_op.batch_norm_: (2x192x26x26xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x26x26xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__252, + batch_norm__253, + batch_norm__254, + batch_norm__255, + batch_norm__256, + batch_norm__257, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_44, + parameter_522, + parameter_521, + parameter_520, + parameter_519, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_519, parameter_520, parameter_521, parameter_522 + + # pd_op.conv2d: (2x192x26x26xf32) <- (2x192x26x26xf32, 192x192x1x1xf32) + conv2d_45 = paddle._C_ops.conv2d( + swish_33, parameter_518, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_518 + + # pd_op.batch_norm_: (2x192x26x26xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x26x26xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__258, + batch_norm__259, + batch_norm__260, + batch_norm__261, + batch_norm__262, + batch_norm__263, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_45, + parameter_517, + parameter_516, + parameter_515, + parameter_514, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_514, parameter_515, parameter_516, parameter_517 + + # pd_op.multiply: (2x192x26x26xf32) <- (1xf32, 2x192x26x26xf32) + multiply_11 = paddle._C_ops.multiply(data_9, batch_norm__258) + del data_9 + + # pd_op.add: (2x192x26x26xf32) <- (2x192x26x26xf32, 2x192x26x26xf32) + add_20 = paddle._C_ops.add(batch_norm__252, multiply_11) + + # pd_op.swish: (2x192x26x26xf32) <- (2x192x26x26xf32) + swish_34 = paddle._C_ops.swish(add_20) + + # pd_op.add: (2x192x26x26xf32) <- (2x192x26x26xf32, 2x192x26x26xf32) + add_21 = paddle._C_ops.add(swish_32, swish_34) + + # pd_op.conv2d: (2x192x26x26xf32) <- (2x192x26x26xf32, 192x192x3x3xf32) + conv2d_46 = paddle._C_ops.conv2d( + add_21, parameter_513, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_513 + + # pd_op.batch_norm_: (2x192x26x26xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x26x26xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__264, + batch_norm__265, + batch_norm__266, + batch_norm__267, + batch_norm__268, + batch_norm__269, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_46, + parameter_512, + parameter_511, + parameter_510, + parameter_509, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_509, parameter_510, parameter_511, parameter_512 + + # pd_op.swish: (2x192x26x26xf32) <- (2x192x26x26xf32) + swish_35 = paddle._C_ops.swish(batch_norm__264) + + # pd_op.conv2d: (2x192x26x26xf32) <- (2x192x26x26xf32, 192x192x3x3xf32) + conv2d_47 = paddle._C_ops.conv2d( + swish_35, parameter_508, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_508 + + # pd_op.batch_norm_: (2x192x26x26xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x26x26xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__270, + batch_norm__271, + batch_norm__272, + batch_norm__273, + batch_norm__274, + batch_norm__275, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_47, + parameter_507, + parameter_506, + parameter_505, + parameter_504, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_504, parameter_505, parameter_506, parameter_507 + + # pd_op.conv2d: (2x192x26x26xf32) <- (2x192x26x26xf32, 192x192x1x1xf32) + conv2d_48 = paddle._C_ops.conv2d( + swish_35, parameter_503, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_503 + + # pd_op.batch_norm_: (2x192x26x26xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x26x26xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__276, + batch_norm__277, + batch_norm__278, + batch_norm__279, + batch_norm__280, + batch_norm__281, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_48, + parameter_502, + parameter_501, + parameter_500, + parameter_499, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_499, parameter_500, parameter_501, parameter_502 + + # pd_op.multiply: (2x192x26x26xf32) <- (1xf32, 2x192x26x26xf32) + multiply_12 = paddle._C_ops.multiply(data_10, batch_norm__276) + del data_10 + + # pd_op.add: (2x192x26x26xf32) <- (2x192x26x26xf32, 2x192x26x26xf32) + add_22 = paddle._C_ops.add(batch_norm__270, multiply_12) + + # pd_op.swish: (2x192x26x26xf32) <- (2x192x26x26xf32) + swish_36 = paddle._C_ops.swish(add_22) + + # pd_op.add: (2x192x26x26xf32) <- (2x192x26x26xf32, 2x192x26x26xf32) + add_23 = paddle._C_ops.add(add_21, swish_36) + + # pd_op.conv2d: (2x192x26x26xf32) <- (2x192x26x26xf32, 192x192x3x3xf32) + conv2d_49 = paddle._C_ops.conv2d( + add_23, parameter_498, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_498 + + # pd_op.batch_norm_: (2x192x26x26xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x26x26xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__282, + batch_norm__283, + batch_norm__284, + batch_norm__285, + batch_norm__286, + batch_norm__287, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_49, + parameter_497, + parameter_496, + parameter_495, + parameter_494, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_494, parameter_495, parameter_496, parameter_497 + + # pd_op.swish: (2x192x26x26xf32) <- (2x192x26x26xf32) + swish_37 = paddle._C_ops.swish(batch_norm__282) + + # pd_op.conv2d: (2x192x26x26xf32) <- (2x192x26x26xf32, 192x192x3x3xf32) + conv2d_50 = paddle._C_ops.conv2d( + swish_37, parameter_493, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_493 + + # pd_op.batch_norm_: (2x192x26x26xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x26x26xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__288, + batch_norm__289, + batch_norm__290, + batch_norm__291, + batch_norm__292, + batch_norm__293, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_50, + parameter_492, + parameter_491, + parameter_490, + parameter_489, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_489, parameter_490, parameter_491, parameter_492 + + # pd_op.conv2d: (2x192x26x26xf32) <- (2x192x26x26xf32, 192x192x1x1xf32) + conv2d_51 = paddle._C_ops.conv2d( + swish_37, parameter_488, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_488 + + # pd_op.batch_norm_: (2x192x26x26xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x26x26xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__294, + batch_norm__295, + batch_norm__296, + batch_norm__297, + batch_norm__298, + batch_norm__299, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_51, + parameter_487, + parameter_486, + parameter_485, + parameter_484, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_484, parameter_485, parameter_486, parameter_487 + + # pd_op.multiply: (2x192x26x26xf32) <- (1xf32, 2x192x26x26xf32) + multiply_13 = paddle._C_ops.multiply(data_11, batch_norm__294) + del data_11 + + # pd_op.add: (2x192x26x26xf32) <- (2x192x26x26xf32, 2x192x26x26xf32) + add_24 = paddle._C_ops.add(batch_norm__288, multiply_13) + + # pd_op.swish: (2x192x26x26xf32) <- (2x192x26x26xf32) + swish_38 = paddle._C_ops.swish(add_24) + + # pd_op.add: (2x192x26x26xf32) <- (2x192x26x26xf32, 2x192x26x26xf32) + add_25 = paddle._C_ops.add(add_23, swish_38) + + # pd_op.conv2d: (2x192x26x26xf32) <- (2x192x26x26xf32, 192x192x3x3xf32) + conv2d_52 = paddle._C_ops.conv2d( + add_25, parameter_483, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_483 + + # pd_op.batch_norm_: (2x192x26x26xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x26x26xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__300, + batch_norm__301, + batch_norm__302, + batch_norm__303, + batch_norm__304, + batch_norm__305, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_52, + parameter_482, + parameter_481, + parameter_480, + parameter_479, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_479, parameter_480, parameter_481, parameter_482 + + # pd_op.swish: (2x192x26x26xf32) <- (2x192x26x26xf32) + swish_39 = paddle._C_ops.swish(batch_norm__300) + + # pd_op.conv2d: (2x192x26x26xf32) <- (2x192x26x26xf32, 192x192x3x3xf32) + conv2d_53 = paddle._C_ops.conv2d( + swish_39, parameter_478, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_478 + + # pd_op.batch_norm_: (2x192x26x26xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x26x26xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__306, + batch_norm__307, + batch_norm__308, + batch_norm__309, + batch_norm__310, + batch_norm__311, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_53, + parameter_477, + parameter_476, + parameter_475, + parameter_474, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_474, parameter_475, parameter_476, parameter_477 + + # pd_op.conv2d: (2x192x26x26xf32) <- (2x192x26x26xf32, 192x192x1x1xf32) + conv2d_54 = paddle._C_ops.conv2d( + swish_39, parameter_473, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_473 + + # pd_op.batch_norm_: (2x192x26x26xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x26x26xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__312, + batch_norm__313, + batch_norm__314, + batch_norm__315, + batch_norm__316, + batch_norm__317, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_54, + parameter_472, + parameter_471, + parameter_470, + parameter_469, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_469, parameter_470, parameter_471, parameter_472 + + # pd_op.multiply: (2x192x26x26xf32) <- (1xf32, 2x192x26x26xf32) + multiply_14 = paddle._C_ops.multiply(data_12, batch_norm__312) + del data_12 + + # pd_op.add: (2x192x26x26xf32) <- (2x192x26x26xf32, 2x192x26x26xf32) + add_26 = paddle._C_ops.add(batch_norm__306, multiply_14) + + # pd_op.swish: (2x192x26x26xf32) <- (2x192x26x26xf32) + swish_40 = paddle._C_ops.swish(add_26) + + # pd_op.add: (2x192x26x26xf32) <- (2x192x26x26xf32, 2x192x26x26xf32) + add_27 = paddle._C_ops.add(add_25, swish_40) + + # pd_op.conv2d: (2x192x26x26xf32) <- (2x192x26x26xf32, 192x192x3x3xf32) + conv2d_55 = paddle._C_ops.conv2d( + add_27, parameter_468, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_468 + + # pd_op.batch_norm_: (2x192x26x26xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x26x26xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__318, + batch_norm__319, + batch_norm__320, + batch_norm__321, + batch_norm__322, + batch_norm__323, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_55, + parameter_467, + parameter_466, + parameter_465, + parameter_464, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_464, parameter_465, parameter_466, parameter_467 + + # pd_op.swish: (2x192x26x26xf32) <- (2x192x26x26xf32) + swish_41 = paddle._C_ops.swish(batch_norm__318) + + # pd_op.conv2d: (2x192x26x26xf32) <- (2x192x26x26xf32, 192x192x3x3xf32) + conv2d_56 = paddle._C_ops.conv2d( + swish_41, parameter_463, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_463 + + # pd_op.batch_norm_: (2x192x26x26xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x26x26xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__324, + batch_norm__325, + batch_norm__326, + batch_norm__327, + batch_norm__328, + batch_norm__329, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_56, + parameter_462, + parameter_461, + parameter_460, + parameter_459, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_459, parameter_460, parameter_461, parameter_462 + + # pd_op.conv2d: (2x192x26x26xf32) <- (2x192x26x26xf32, 192x192x1x1xf32) + conv2d_57 = paddle._C_ops.conv2d( + swish_41, parameter_458, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_458 + + # pd_op.batch_norm_: (2x192x26x26xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x26x26xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__330, + batch_norm__331, + batch_norm__332, + batch_norm__333, + batch_norm__334, + batch_norm__335, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_57, + parameter_457, + parameter_456, + parameter_455, + parameter_454, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_454, parameter_455, parameter_456, parameter_457 + + # pd_op.multiply: (2x192x26x26xf32) <- (1xf32, 2x192x26x26xf32) + multiply_15 = paddle._C_ops.multiply(data_13, batch_norm__330) + del data_13 + + # pd_op.add: (2x192x26x26xf32) <- (2x192x26x26xf32, 2x192x26x26xf32) + add_28 = paddle._C_ops.add(batch_norm__324, multiply_15) + + # pd_op.swish: (2x192x26x26xf32) <- (2x192x26x26xf32) + swish_42 = paddle._C_ops.swish(add_28) + + # pd_op.add: (2x192x26x26xf32) <- (2x192x26x26xf32, 2x192x26x26xf32) + add_29 = paddle._C_ops.add(add_27, swish_42) + + # pd_op.conv2d: (2x192x26x26xf32) <- (2x192x26x26xf32, 192x192x3x3xf32) + conv2d_58 = paddle._C_ops.conv2d( + add_29, parameter_453, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_453 + + # pd_op.batch_norm_: (2x192x26x26xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x26x26xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__336, + batch_norm__337, + batch_norm__338, + batch_norm__339, + batch_norm__340, + batch_norm__341, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_58, + parameter_452, + parameter_451, + parameter_450, + parameter_449, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_449, parameter_450, parameter_451, parameter_452 + + # pd_op.swish: (2x192x26x26xf32) <- (2x192x26x26xf32) + swish_43 = paddle._C_ops.swish(batch_norm__336) + + # pd_op.conv2d: (2x192x26x26xf32) <- (2x192x26x26xf32, 192x192x3x3xf32) + conv2d_59 = paddle._C_ops.conv2d( + swish_43, parameter_448, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_448 + + # pd_op.batch_norm_: (2x192x26x26xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x26x26xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__342, + batch_norm__343, + batch_norm__344, + batch_norm__345, + batch_norm__346, + batch_norm__347, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_59, + parameter_447, + parameter_446, + parameter_445, + parameter_444, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_444, parameter_445, parameter_446, parameter_447 + + # pd_op.conv2d: (2x192x26x26xf32) <- (2x192x26x26xf32, 192x192x1x1xf32) + conv2d_60 = paddle._C_ops.conv2d( + swish_43, parameter_443, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_443 + + # pd_op.batch_norm_: (2x192x26x26xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x26x26xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__348, + batch_norm__349, + batch_norm__350, + batch_norm__351, + batch_norm__352, + batch_norm__353, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_60, + parameter_442, + parameter_441, + parameter_440, + parameter_439, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_439, parameter_440, parameter_441, parameter_442 + + # pd_op.multiply: (2x192x26x26xf32) <- (1xf32, 2x192x26x26xf32) + multiply_16 = paddle._C_ops.multiply(data_14, batch_norm__348) + del data_14 + + # pd_op.add: (2x192x26x26xf32) <- (2x192x26x26xf32, 2x192x26x26xf32) + add_30 = paddle._C_ops.add(batch_norm__342, multiply_16) + + # pd_op.swish: (2x192x26x26xf32) <- (2x192x26x26xf32) + swish_44 = paddle._C_ops.swish(add_30) + + # pd_op.add: (2x192x26x26xf32) <- (2x192x26x26xf32, 2x192x26x26xf32) + add_31 = paddle._C_ops.add(add_29, swish_44) + + # builtin.combine: ([2x192x26x26xf32, 2x192x26x26xf32]) <- (2x192x26x26xf32, 2x192x26x26xf32) + combine_2 = [swish_31, add_31] + + # pd_op.concat: (2x384x26x26xf32) <- ([2x192x26x26xf32, 2x192x26x26xf32], 1xi32) + concat_2 = paddle._C_ops.concat(combine_2, full_0) + del combine_2 + + # pd_op.mean: (2x384x1x1xf32) <- (2x384x26x26xf32, 2xi64) + mean_2 = paddle._C_ops.mean(concat_2, full_int_array_0, True) + + # pd_op.conv2d: (2x384x1x1xf32) <- (2x384x1x1xf32, 384x384x1x1xf32) + conv2d_61 = paddle._C_ops.conv2d( + mean_2, parameter_438, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_438 + + # pd_op.reshape: (1x384x1x1xf32) <- (384xf32, 4xi64) + reshape_2 = paddle._C_ops.reshape(parameter_437, full_int_array_1) + del parameter_437 + + # pd_op.add: (2x384x1x1xf32) <- (2x384x1x1xf32, 1x384x1x1xf32) + add_32 = paddle._C_ops.add(conv2d_61, reshape_2) + + # pd_op.hardsigmoid: (2x384x1x1xf32) <- (2x384x1x1xf32) + hardsigmoid_2 = paddle._C_ops.hardsigmoid( + add_32, float("0.166667"), float("0.5") + ) + del add_32 + + # pd_op.multiply: (2x384x26x26xf32) <- (2x384x26x26xf32, 2x384x1x1xf32) + multiply_17 = paddle._C_ops.multiply(concat_2, hardsigmoid_2) + + # pd_op.conv2d: (2x512x26x26xf32) <- (2x384x26x26xf32, 512x384x1x1xf32) + conv2d_62 = paddle._C_ops.conv2d( + multiply_17, parameter_436, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_436 + + # pd_op.batch_norm_: (2x512x26x26xf32, 512xf32, 512xf32, 512xf32, 512xf32, -1xui8) <- (2x512x26x26xf32, 512xf32, 512xf32, 512xf32, 512xf32) + ( + batch_norm__354, + batch_norm__355, + batch_norm__356, + batch_norm__357, + batch_norm__358, + batch_norm__359, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_62, + parameter_435, + parameter_434, + parameter_433, + parameter_432, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_432, parameter_433, parameter_434, parameter_435 + + # pd_op.swish: (2x512x26x26xf32) <- (2x512x26x26xf32) + swish_45 = paddle._C_ops.swish(batch_norm__354) + + # pd_op.conv2d: (2x768x13x13xf32) <- (2x512x26x26xf32, 768x512x3x3xf32) + conv2d_63 = paddle._C_ops.conv2d( + swish_45, parameter_431, [2, 2], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_431 + + # pd_op.batch_norm_: (2x768x13x13xf32, 768xf32, 768xf32, 768xf32, 768xf32, -1xui8) <- (2x768x13x13xf32, 768xf32, 768xf32, 768xf32, 768xf32) + ( + batch_norm__360, + batch_norm__361, + batch_norm__362, + batch_norm__363, + batch_norm__364, + batch_norm__365, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_63, + parameter_430, + parameter_429, + parameter_428, + parameter_427, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_427, parameter_428, parameter_429, parameter_430 + + # pd_op.swish: (2x768x13x13xf32) <- (2x768x13x13xf32) + swish_46 = paddle._C_ops.swish(batch_norm__360) + + # pd_op.conv2d: (2x384x13x13xf32) <- (2x768x13x13xf32, 384x768x1x1xf32) + conv2d_64 = paddle._C_ops.conv2d( + swish_46, parameter_426, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_426 + + # pd_op.batch_norm_: (2x384x13x13xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x13x13xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__366, + batch_norm__367, + batch_norm__368, + batch_norm__369, + batch_norm__370, + batch_norm__371, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_64, + parameter_425, + parameter_424, + parameter_423, + parameter_422, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_422, parameter_423, parameter_424, parameter_425 + + # pd_op.swish: (2x384x13x13xf32) <- (2x384x13x13xf32) + swish_47 = paddle._C_ops.swish(batch_norm__366) + + # pd_op.conv2d: (2x384x13x13xf32) <- (2x768x13x13xf32, 384x768x1x1xf32) + conv2d_65 = paddle._C_ops.conv2d( + swish_46, parameter_421, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_421 + + # pd_op.batch_norm_: (2x384x13x13xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x13x13xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__372, + batch_norm__373, + batch_norm__374, + batch_norm__375, + batch_norm__376, + batch_norm__377, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_65, + parameter_420, + parameter_419, + parameter_418, + parameter_417, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_417, parameter_418, parameter_419, parameter_420 + + # pd_op.swish: (2x384x13x13xf32) <- (2x384x13x13xf32) + swish_48 = paddle._C_ops.swish(batch_norm__372) + + # pd_op.conv2d: (2x384x13x13xf32) <- (2x384x13x13xf32, 384x384x3x3xf32) + conv2d_66 = paddle._C_ops.conv2d( + swish_48, parameter_416, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_416 + + # pd_op.batch_norm_: (2x384x13x13xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x13x13xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__378, + batch_norm__379, + batch_norm__380, + batch_norm__381, + batch_norm__382, + batch_norm__383, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_66, + parameter_415, + parameter_414, + parameter_413, + parameter_412, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_412, parameter_413, parameter_414, parameter_415 + + # pd_op.swish: (2x384x13x13xf32) <- (2x384x13x13xf32) + swish_49 = paddle._C_ops.swish(batch_norm__378) + + # pd_op.conv2d: (2x384x13x13xf32) <- (2x384x13x13xf32, 384x384x3x3xf32) + conv2d_67 = paddle._C_ops.conv2d( + swish_49, parameter_411, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_411 + + # pd_op.batch_norm_: (2x384x13x13xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x13x13xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__384, + batch_norm__385, + batch_norm__386, + batch_norm__387, + batch_norm__388, + batch_norm__389, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_67, + parameter_410, + parameter_409, + parameter_408, + parameter_407, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_407, parameter_408, parameter_409, parameter_410 + + # pd_op.conv2d: (2x384x13x13xf32) <- (2x384x13x13xf32, 384x384x1x1xf32) + conv2d_68 = paddle._C_ops.conv2d( + swish_49, parameter_406, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_406 + + # pd_op.batch_norm_: (2x384x13x13xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x13x13xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__390, + batch_norm__391, + batch_norm__392, + batch_norm__393, + batch_norm__394, + batch_norm__395, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_68, + parameter_405, + parameter_404, + parameter_403, + parameter_402, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_402, parameter_403, parameter_404, parameter_405 + + # pd_op.multiply: (2x384x13x13xf32) <- (1xf32, 2x384x13x13xf32) + multiply_18 = paddle._C_ops.multiply(data_15, batch_norm__390) + del data_15 + + # pd_op.add: (2x384x13x13xf32) <- (2x384x13x13xf32, 2x384x13x13xf32) + add_33 = paddle._C_ops.add(batch_norm__384, multiply_18) + + # pd_op.swish: (2x384x13x13xf32) <- (2x384x13x13xf32) + swish_50 = paddle._C_ops.swish(add_33) + + # pd_op.add: (2x384x13x13xf32) <- (2x384x13x13xf32, 2x384x13x13xf32) + add_34 = paddle._C_ops.add(swish_48, swish_50) + + # pd_op.conv2d: (2x384x13x13xf32) <- (2x384x13x13xf32, 384x384x3x3xf32) + conv2d_69 = paddle._C_ops.conv2d( + add_34, parameter_401, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_401 + + # pd_op.batch_norm_: (2x384x13x13xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x13x13xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__396, + batch_norm__397, + batch_norm__398, + batch_norm__399, + batch_norm__400, + batch_norm__401, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_69, + parameter_400, + parameter_399, + parameter_398, + parameter_397, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_397, parameter_398, parameter_399, parameter_400 + + # pd_op.swish: (2x384x13x13xf32) <- (2x384x13x13xf32) + swish_51 = paddle._C_ops.swish(batch_norm__396) + + # pd_op.conv2d: (2x384x13x13xf32) <- (2x384x13x13xf32, 384x384x3x3xf32) + conv2d_70 = paddle._C_ops.conv2d( + swish_51, parameter_396, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_396 + + # pd_op.batch_norm_: (2x384x13x13xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x13x13xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__402, + batch_norm__403, + batch_norm__404, + batch_norm__405, + batch_norm__406, + batch_norm__407, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_70, + parameter_395, + parameter_394, + parameter_393, + parameter_392, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_392, parameter_393, parameter_394, parameter_395 + + # pd_op.conv2d: (2x384x13x13xf32) <- (2x384x13x13xf32, 384x384x1x1xf32) + conv2d_71 = paddle._C_ops.conv2d( + swish_51, parameter_391, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_391 + + # pd_op.batch_norm_: (2x384x13x13xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x13x13xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__408, + batch_norm__409, + batch_norm__410, + batch_norm__411, + batch_norm__412, + batch_norm__413, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_71, + parameter_390, + parameter_389, + parameter_388, + parameter_387, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_387, parameter_388, parameter_389, parameter_390 + + # pd_op.multiply: (2x384x13x13xf32) <- (1xf32, 2x384x13x13xf32) + multiply_19 = paddle._C_ops.multiply(data_16, batch_norm__408) + del data_16 + + # pd_op.add: (2x384x13x13xf32) <- (2x384x13x13xf32, 2x384x13x13xf32) + add_35 = paddle._C_ops.add(batch_norm__402, multiply_19) + + # pd_op.swish: (2x384x13x13xf32) <- (2x384x13x13xf32) + swish_52 = paddle._C_ops.swish(add_35) + + # pd_op.add: (2x384x13x13xf32) <- (2x384x13x13xf32, 2x384x13x13xf32) + add_36 = paddle._C_ops.add(add_34, swish_52) + + # pd_op.conv2d: (2x384x13x13xf32) <- (2x384x13x13xf32, 384x384x3x3xf32) + conv2d_72 = paddle._C_ops.conv2d( + add_36, parameter_386, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_386 + + # pd_op.batch_norm_: (2x384x13x13xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x13x13xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__414, + batch_norm__415, + batch_norm__416, + batch_norm__417, + batch_norm__418, + batch_norm__419, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_72, + parameter_385, + parameter_384, + parameter_383, + parameter_382, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_382, parameter_383, parameter_384, parameter_385 + + # pd_op.swish: (2x384x13x13xf32) <- (2x384x13x13xf32) + swish_53 = paddle._C_ops.swish(batch_norm__414) + + # pd_op.conv2d: (2x384x13x13xf32) <- (2x384x13x13xf32, 384x384x3x3xf32) + conv2d_73 = paddle._C_ops.conv2d( + swish_53, parameter_381, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_381 + + # pd_op.batch_norm_: (2x384x13x13xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x13x13xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__420, + batch_norm__421, + batch_norm__422, + batch_norm__423, + batch_norm__424, + batch_norm__425, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_73, + parameter_380, + parameter_379, + parameter_378, + parameter_377, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_377, parameter_378, parameter_379, parameter_380 + + # pd_op.conv2d: (2x384x13x13xf32) <- (2x384x13x13xf32, 384x384x1x1xf32) + conv2d_74 = paddle._C_ops.conv2d( + swish_53, parameter_376, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_376 + + # pd_op.batch_norm_: (2x384x13x13xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x13x13xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__426, + batch_norm__427, + batch_norm__428, + batch_norm__429, + batch_norm__430, + batch_norm__431, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_74, + parameter_375, + parameter_374, + parameter_373, + parameter_372, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_372, parameter_373, parameter_374, parameter_375 + + # pd_op.multiply: (2x384x13x13xf32) <- (1xf32, 2x384x13x13xf32) + multiply_20 = paddle._C_ops.multiply(data_17, batch_norm__426) + del data_17 + + # pd_op.add: (2x384x13x13xf32) <- (2x384x13x13xf32, 2x384x13x13xf32) + add_37 = paddle._C_ops.add(batch_norm__420, multiply_20) + + # pd_op.swish: (2x384x13x13xf32) <- (2x384x13x13xf32) + swish_54 = paddle._C_ops.swish(add_37) + + # pd_op.add: (2x384x13x13xf32) <- (2x384x13x13xf32, 2x384x13x13xf32) + add_38 = paddle._C_ops.add(add_36, swish_54) + + # builtin.combine: ([2x384x13x13xf32, 2x384x13x13xf32]) <- (2x384x13x13xf32, 2x384x13x13xf32) + combine_3 = [swish_47, add_38] + + # pd_op.concat: (2x768x13x13xf32) <- ([2x384x13x13xf32, 2x384x13x13xf32], 1xi32) + concat_3 = paddle._C_ops.concat(combine_3, full_0) + del combine_3 + + # pd_op.mean: (2x768x1x1xf32) <- (2x768x13x13xf32, 2xi64) + mean_3 = paddle._C_ops.mean(concat_3, full_int_array_0, True) + + # pd_op.conv2d: (2x768x1x1xf32) <- (2x768x1x1xf32, 768x768x1x1xf32) + conv2d_75 = paddle._C_ops.conv2d( + mean_3, parameter_371, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_371 + + # pd_op.reshape: (1x768x1x1xf32) <- (768xf32, 4xi64) + reshape_3 = paddle._C_ops.reshape(parameter_370, full_int_array_1) + del full_int_array_1, parameter_370 + + # pd_op.add: (2x768x1x1xf32) <- (2x768x1x1xf32, 1x768x1x1xf32) + add_39 = paddle._C_ops.add(conv2d_75, reshape_3) + + # pd_op.hardsigmoid: (2x768x1x1xf32) <- (2x768x1x1xf32) + hardsigmoid_3 = paddle._C_ops.hardsigmoid( + add_39, float("0.166667"), float("0.5") + ) + del add_39 + + # pd_op.multiply: (2x768x13x13xf32) <- (2x768x13x13xf32, 2x768x1x1xf32) + multiply_21 = paddle._C_ops.multiply(concat_3, hardsigmoid_3) + + # pd_op.conv2d: (2x1024x13x13xf32) <- (2x768x13x13xf32, 1024x768x1x1xf32) + conv2d_76 = paddle._C_ops.conv2d( + multiply_21, parameter_369, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_369 + + # pd_op.batch_norm_: (2x1024x13x13xf32, 1024xf32, 1024xf32, 1024xf32, 1024xf32, -1xui8) <- (2x1024x13x13xf32, 1024xf32, 1024xf32, 1024xf32, 1024xf32) + ( + batch_norm__432, + batch_norm__433, + batch_norm__434, + batch_norm__435, + batch_norm__436, + batch_norm__437, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_76, + parameter_368, + parameter_367, + parameter_366, + parameter_365, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_365, parameter_366, parameter_367, parameter_368 + + # pd_op.swish: (2x1024x13x13xf32) <- (2x1024x13x13xf32) + swish_55 = paddle._C_ops.swish(batch_norm__432) + + # pd_op.flatten: (2x1024x169xf32) <- (2x1024x13x13xf32) + flatten_0 = paddle._C_ops.flatten(swish_55, 2, 3) + + # pd_op.transpose: (2x169x1024xf32) <- (2x1024x169xf32) + transpose_0 = paddle._C_ops.transpose(flatten_0, [0, 2, 1]) + del flatten_0 + + # pd_op.full: (1xf64) <- () + full_1 = paddle._C_ops.full( + [1], float("0"), paddle.float64, paddle.core.CPUPlace() + ) + + # pd_op.full: (1xf64) <- () + full_2 = paddle._C_ops.full( + [1], float("13"), paddle.float64, paddle.core.CPUPlace() + ) + + # pd_op.full: (1xf64) <- () + full_3 = paddle._C_ops.full( + [1], float("1"), paddle.float64, paddle.core.CPUPlace() + ) + + # pd_op.arange: (13xf32) <- (1xf64, 1xf64, 1xf64) + arange_0 = paddle.arange(full_1, full_2, full_3, dtype="float32") + del full_2 + + # builtin.combine: ([13xf32, 13xf32]) <- (13xf32, 13xf32) + combine_4 = [arange_0, arange_0] + del arange_0 + + # pd_op.meshgrid: ([13x13xf32, 13x13xf32]) <- ([13xf32, 13xf32]) + meshgrid_0 = paddle._C_ops.meshgrid(combine_4) + del combine_4 + + # builtin.split: (13x13xf32, 13x13xf32) <- ([13x13xf32, 13x13xf32]) + ( + split_0, + split_1, + ) = meshgrid_0 + del meshgrid_0 + + # pd_op.full: (1xf64) <- () + full_4 = paddle._C_ops.full( + [1], float("256"), paddle.float64, paddle.core.CPUPlace() + ) + + # pd_op.arange: (256xf32) <- (1xf64, 1xf64, 1xf64) + arange_1 = paddle.arange(full_1, full_4, full_3, dtype="float32") + del full_1, full_3, full_4 + + # pd_op.full: (1xf32) <- () + full_5 = paddle._C_ops.full( + [1], float("0.00390625"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.scale: (256xf32) <- (256xf32, 1xf32) + scale_0 = paddle._C_ops.scale(arange_1, full_5, float("0"), True) + del arange_1, full_5 + + # pd_op.full: (256xf32) <- () + full_6 = paddle._C_ops.full( + [256], + float("10000"), + paddle.float32, + paddle.framework._current_expected_place(), + ) + + # pd_op.elementwise_pow: (256xf32) <- (256xf32, 256xf32) + elementwise_pow_0 = paddle._C_ops.elementwise_pow(full_6, scale_0) + del full_6, scale_0 + + # pd_op.full: (256xf32) <- () + full_7 = paddle._C_ops.full( + [256], + float("1"), + paddle.float32, + paddle.framework._current_expected_place(), + ) + + # pd_op.divide: (256xf32) <- (256xf32, 256xf32) + divide_0 = paddle._C_ops.divide(full_7, elementwise_pow_0) + del elementwise_pow_0, full_7 + + # pd_op.flatten: (169xf32) <- (13x13xf32) + flatten_1 = paddle._C_ops.flatten(split_0, 0, 1) + del split_0 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_2 = [1] + + # pd_op.unsqueeze: (169x1xf32) <- (169xf32, 1xi64) + unsqueeze_0 = paddle._C_ops.unsqueeze(flatten_1, full_int_array_2) + del flatten_1 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_3 = [0] + + # pd_op.assign: (1xi64) <- (1xi64) + assign_16 = full_int_array_3 + + # pd_op.assign: (1xi64) <- (1xi64) + assign_17 = full_int_array_3 + + # pd_op.assign: (1xi64) <- (1xi64) + assign_18 = full_int_array_3 + + # pd_op.assign: (1xi64) <- (1xi64) + assign_19 = full_int_array_3 + + # pd_op.assign: (1xi64) <- (1xi64) + assign_20 = full_int_array_3 + + # pd_op.assign: (1xi64) <- (1xi64) + assign_21 = full_int_array_3 + + # pd_op.assign: (1xi64) <- (1xi64) + assign_22 = full_int_array_3 + + # pd_op.assign: (1xi64) <- (1xi64) + assign_23 = full_int_array_3 + + # pd_op.unsqueeze: (1x256xf32) <- (256xf32, 1xi64) + unsqueeze_1 = paddle._C_ops.unsqueeze(divide_0, full_int_array_3) + del divide_0 + + # pd_op.matmul: (169x256xf32) <- (169x1xf32, 1x256xf32) + matmul_0 = paddle._C_ops.matmul(unsqueeze_0, unsqueeze_1, False, False) + del unsqueeze_0 + + # pd_op.flatten: (169xf32) <- (13x13xf32) + flatten_2 = paddle._C_ops.flatten(split_1, 0, 1) + del split_1 + + # pd_op.unsqueeze: (169x1xf32) <- (169xf32, 1xi64) + unsqueeze_2 = paddle._C_ops.unsqueeze(flatten_2, full_int_array_2) + del flatten_2, full_int_array_2 + + # pd_op.matmul: (169x256xf32) <- (169x1xf32, 1x256xf32) + matmul_1 = paddle._C_ops.matmul(unsqueeze_2, unsqueeze_1, False, False) + del unsqueeze_1, unsqueeze_2 + + # pd_op.sin: (169x256xf32) <- (169x256xf32) + sin_0 = paddle._C_ops.sin(matmul_0) + + # pd_op.cos: (169x256xf32) <- (169x256xf32) + cos_0 = paddle._C_ops.cos(matmul_0) + del matmul_0 + + # pd_op.sin: (169x256xf32) <- (169x256xf32) + sin_1 = paddle._C_ops.sin(matmul_1) + + # pd_op.cos: (169x256xf32) <- (169x256xf32) + cos_1 = paddle._C_ops.cos(matmul_1) + del matmul_1 + + # builtin.combine: ([169x256xf32, 169x256xf32, 169x256xf32, 169x256xf32]) <- (169x256xf32, 169x256xf32, 169x256xf32, 169x256xf32) + combine_5 = [sin_0, cos_0, sin_1, cos_1] + del cos_0, cos_1, sin_0, sin_1 + + # pd_op.concat: (169x1024xf32) <- ([169x256xf32, 169x256xf32, 169x256xf32, 169x256xf32], 1xi32) + concat_4 = paddle._C_ops.concat(combine_5, full_0) + del combine_5 + + # pd_op.unsqueeze: (1x169x1024xf32) <- (169x1024xf32, 1xi64) + unsqueeze_3 = paddle._C_ops.unsqueeze(concat_4, full_int_array_3) + del concat_4 + + # pd_op.add: (2x169x1024xf32) <- (2x169x1024xf32, 1x169x1024xf32) + add_40 = paddle._C_ops.add(transpose_0, unsqueeze_3) + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_4 = [1024] + + # pd_op.assign: (1xi64) <- (1xi64) + assign_24 = full_int_array_4 + + # pd_op.assign: (1xi64) <- (1xi64) + assign_25 = full_int_array_4 + + # pd_op.assign: (1xi64) <- (1xi64) + assign_26 = full_int_array_4 + + # pd_op.assign: (1xi64) <- (1xi64) + assign_27 = full_int_array_4 + + # pd_op.assign: (1xi64) <- (1xi64) + assign_28 = full_int_array_4 + + # pd_op.assign: (1xi64) <- (1xi64) + assign_29 = full_int_array_4 + + # pd_op.assign: (1xi64) <- (1xi64) + assign_30 = full_int_array_4 + + # pd_op.assign: (1xi64) <- (1xi64) + assign_31 = full_int_array_4 + + # pd_op.assign: (1xi64) <- (1xi64) + assign_32 = full_int_array_4 + + # pd_op.assign: (1xi64) <- (1xi64) + assign_33 = full_int_array_4 + + # pd_op.assign: (1xi64) <- (1xi64) + assign_34 = full_int_array_4 + + # pd_op.assign: (1xi64) <- (1xi64) + assign_35 = full_int_array_4 + + # pd_op.assign: (1xi64) <- (1xi64) + assign_36 = full_int_array_4 + + # pd_op.assign: (1xi64) <- (1xi64) + assign_37 = full_int_array_4 + + # pd_op.assign: (1xi64) <- (1xi64) + assign_38 = full_int_array_4 + + # pd_op.slice: (1024x1024xf32) <- (1024x3072xf32, 1xi64, 1xi64) + slice_0 = paddle._C_ops.slice( + data_18, [1], full_int_array_3, full_int_array_4, [1], [] + ) + + # pd_op.slice: (1024xf32) <- (3072xf32, 1xi64, 1xi64) + slice_1 = paddle._C_ops.slice( + data_19, [0], full_int_array_3, full_int_array_4, [1], [] + ) + + # pd_op.matmul: (2x169x1024xf32) <- (2x169x1024xf32, 1024x1024xf32) + matmul_2 = paddle._C_ops.matmul(add_40, slice_0, False, False) + + # pd_op.add: (2x169x1024xf32) <- (2x169x1024xf32, 1024xf32) + add_41 = paddle._C_ops.add(matmul_2, slice_1) + + # pd_op.full_int_array: (4xi64) <- () + full_int_array_5 = [0, 0, 4, 256] + + # pd_op.reshape: (2x169x4x256xf32) <- (2x169x1024xf32, 4xi64) + reshape_4 = paddle._C_ops.reshape(add_41, full_int_array_5) + + # pd_op.transpose: (2x4x169x256xf32) <- (2x169x4x256xf32) + transpose_1 = paddle._C_ops.transpose(reshape_4, [0, 2, 1, 3]) + del reshape_4 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_6 = [2048] + + # pd_op.assign: (1xi64) <- (1xi64) + assign_39 = full_int_array_6 + + # pd_op.assign: (1xi64) <- (1xi64) + assign_40 = full_int_array_6 + + # pd_op.assign: (1xi64) <- (1xi64) + assign_41 = full_int_array_6 + + # pd_op.assign: (1xi64) <- (1xi64) + assign_42 = full_int_array_6 + + # pd_op.assign: (1xi64) <- (1xi64) + assign_43 = full_int_array_6 + + # pd_op.assign: (1xi64) <- (1xi64) + assign_44 = full_int_array_6 + + # pd_op.assign: (1xi64) <- (1xi64) + assign_45 = full_int_array_6 + + # pd_op.assign: (1xi64) <- (1xi64) + assign_46 = full_int_array_6 + + # pd_op.assign: (1xi64) <- (1xi64) + assign_47 = full_int_array_6 + + # pd_op.assign: (1xi64) <- (1xi64) + assign_48 = full_int_array_6 + + # pd_op.assign: (1xi64) <- (1xi64) + assign_49 = full_int_array_6 + + # pd_op.assign: (1xi64) <- (1xi64) + assign_50 = full_int_array_6 + + # pd_op.assign: (1xi64) <- (1xi64) + assign_51 = full_int_array_6 + + # pd_op.assign: (1xi64) <- (1xi64) + assign_52 = full_int_array_6 + + # pd_op.assign: (1xi64) <- (1xi64) + assign_53 = full_int_array_6 + + # pd_op.slice: (1024x1024xf32) <- (1024x3072xf32, 1xi64, 1xi64) + slice_2 = paddle._C_ops.slice( + data_18, [1], full_int_array_4, full_int_array_6, [1], [] + ) + + # pd_op.slice: (1024xf32) <- (3072xf32, 1xi64, 1xi64) + slice_3 = paddle._C_ops.slice( + data_19, [0], full_int_array_4, full_int_array_6, [1], [] + ) + + # pd_op.matmul: (2x169x1024xf32) <- (2x169x1024xf32, 1024x1024xf32) + matmul_3 = paddle._C_ops.matmul(add_40, slice_2, False, False) + + # pd_op.add: (2x169x1024xf32) <- (2x169x1024xf32, 1024xf32) + add_42 = paddle._C_ops.add(matmul_3, slice_3) + + # pd_op.reshape: (2x169x4x256xf32) <- (2x169x1024xf32, 4xi64) + reshape_5 = paddle._C_ops.reshape(add_42, full_int_array_5) + + # pd_op.transpose: (2x4x169x256xf32) <- (2x169x4x256xf32) + transpose_2 = paddle._C_ops.transpose(reshape_5, [0, 2, 1, 3]) + del reshape_5 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_7 = [2147483647] + + # pd_op.assign: (1xi64) <- (1xi64) + assign_54 = full_int_array_7 + + # pd_op.assign: (1xi64) <- (1xi64) + assign_55 = full_int_array_7 + + # pd_op.assign: (1xi64) <- (1xi64) + assign_56 = full_int_array_7 + + # pd_op.assign: (1xi64) <- (1xi64) + assign_57 = full_int_array_7 + + # pd_op.assign: (1xi64) <- (1xi64) + assign_58 = full_int_array_7 + + # pd_op.assign: (1xi64) <- (1xi64) + assign_59 = full_int_array_7 + + # pd_op.assign: (1xi64) <- (1xi64) + assign_60 = full_int_array_7 + + # pd_op.slice: (1024x1024xf32) <- (1024x3072xf32, 1xi64, 1xi64) + slice_4 = paddle._C_ops.slice( + data_18, [1], full_int_array_6, full_int_array_7, [1], [] + ) + del data_18 + + # pd_op.slice: (1024xf32) <- (3072xf32, 1xi64, 1xi64) + slice_5 = paddle._C_ops.slice( + data_19, [0], full_int_array_6, full_int_array_7, [1], [] + ) + del data_19 + + # pd_op.matmul: (2x169x1024xf32) <- (2x169x1024xf32, 1024x1024xf32) + matmul_4 = paddle._C_ops.matmul(transpose_0, slice_4, False, False) + + # pd_op.add: (2x169x1024xf32) <- (2x169x1024xf32, 1024xf32) + add_43 = paddle._C_ops.add(matmul_4, slice_5) + + # pd_op.reshape: (2x169x4x256xf32) <- (2x169x1024xf32, 4xi64) + reshape_6 = paddle._C_ops.reshape(add_43, full_int_array_5) + + # pd_op.transpose: (2x4x169x256xf32) <- (2x169x4x256xf32) + transpose_3 = paddle._C_ops.transpose(reshape_6, [0, 2, 1, 3]) + del reshape_6 + + # pd_op.matmul: (2x4x169x169xf32) <- (2x4x169x256xf32, 2x4x169x256xf32) + matmul_5 = paddle._C_ops.matmul(transpose_1, transpose_2, False, True) + + # pd_op.full: (1xf32) <- () + full_8 = paddle._C_ops.full( + [1], float("0.0625"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.assign: (1xf32) <- (1xf32) + assign_61 = full_8 + + # pd_op.assign: (1xf32) <- (1xf32) + assign_62 = full_8 + + # pd_op.assign: (1xf32) <- (1xf32) + assign_63 = full_8 + + # pd_op.scale: (2x4x169x169xf32) <- (2x4x169x169xf32, 1xf32) + scale_1 = paddle._C_ops.scale(matmul_5, full_8, float("0"), True) + del matmul_5 + + # pd_op.softmax: (2x4x169x169xf32) <- (2x4x169x169xf32) + softmax_0 = paddle._C_ops.softmax(scale_1, -1) + del scale_1 + + # pd_op.full: (1xf32) <- () + full_9 = paddle._C_ops.full( + [1], float("0.1"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.assign: (1xf32) <- (1xf32) + assign_64 = full_9 + + # pd_op.assign: (1xf32) <- (1xf32) + assign_65 = full_9 + + # pd_op.assign: (1xf32) <- (1xf32) + assign_66 = full_9 + + # pd_op.assign: (1xf32) <- (1xf32) + assign_67 = full_9 + + # pd_op.assign: (1xf32) <- (1xf32) + assign_68 = full_9 + + # pd_op.assign: (1xf32) <- (1xf32) + assign_69 = full_9 + + # pd_op.assign: (1xf32) <- (1xf32) + assign_70 = full_9 + + # pd_op.assign: (1xf32) <- (1xf32) + assign_71 = full_9 + + # pd_op.assign: (1xf32) <- (1xf32) + assign_72 = full_9 + + # pd_op.assign: (1xf32) <- (1xf32) + assign_73 = full_9 + + # pd_op.assign: (1xf32) <- (1xf32) + assign_74 = full_9 + + # pd_op.assign: (1xf32) <- (1xf32) + assign_75 = full_9 + + # pd_op.assign: (1xf32) <- (1xf32) + assign_76 = full_9 + + # pd_op.assign: (1xf32) <- (1xf32) + assign_77 = full_9 + + # pd_op.assign: (1xf32) <- (1xf32) + assign_78 = full_9 + + # pd_op.dropout: (2x4x169x169xf32, 2x4x169x169xui8) <- (2x4x169x169xf32, None, 1xf32) + dropout_0, dropout_1 = (lambda x, f: f(x))( + paddle._C_ops.dropout( + softmax_0, None, full_9, False, "upscale_in_train", 0, False + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None), + ) + + # pd_op.matmul: (2x4x169x256xf32) <- (2x4x169x169xf32, 2x4x169x256xf32) + matmul_6 = paddle._C_ops.matmul(dropout_0, transpose_3, False, False) + + # pd_op.transpose: (2x169x4x256xf32) <- (2x4x169x256xf32) + transpose_4 = paddle._C_ops.transpose(matmul_6, [0, 2, 1, 3]) + del matmul_6 + + # pd_op.full_int_array: (3xi64) <- () + full_int_array_8 = [0, 0, 1024] + + # pd_op.reshape: (2x169x1024xf32) <- (2x169x4x256xf32, 3xi64) + reshape_7 = paddle._C_ops.reshape(transpose_4, full_int_array_8) + + # pd_op.matmul: (2x169x1024xf32) <- (2x169x1024xf32, 1024x1024xf32) + matmul_7 = paddle._C_ops.matmul(reshape_7, parameter_364, False, False) + del parameter_364 + + # pd_op.add: (2x169x1024xf32) <- (2x169x1024xf32, 1024xf32) + add_44 = paddle._C_ops.add(matmul_7, parameter_363) + del parameter_363 + + # pd_op.dropout: (2x169x1024xf32, 2x169x1024xui8) <- (2x169x1024xf32, None, 1xf32) + dropout_2, dropout_3 = (lambda x, f: f(x))( + paddle._C_ops.dropout( + add_44, None, full_9, False, "upscale_in_train", 0, False + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None), + ) + del add_44 + + # pd_op.add: (2x169x1024xf32) <- (2x169x1024xf32, 2x169x1024xf32) + add_45 = paddle._C_ops.add(transpose_0, dropout_2) + + # pd_op.layer_norm: (2x169x1024xf32, 2x169xf32, 2x169xf32) <- (2x169x1024xf32, 1024xf32, 1024xf32) + layer_norm_0, layer_norm_1, layer_norm_2 = (lambda x, f: f(x))( + paddle._C_ops.layer_norm( + add_45, parameter_362, parameter_361, float("1e-05"), 2 + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None, None), + ) + del parameter_361, parameter_362 + + # pd_op.matmul: (2x169x2048xf32) <- (2x169x1024xf32, 1024x2048xf32) + matmul_8 = paddle._C_ops.matmul(layer_norm_0, parameter_360, False, False) + del parameter_360 + + # pd_op.add: (2x169x2048xf32) <- (2x169x2048xf32, 2048xf32) + add_46 = paddle._C_ops.add(matmul_8, parameter_359) + del parameter_359 + + # pd_op.gelu: (2x169x2048xf32) <- (2x169x2048xf32) + gelu_0 = paddle._C_ops.gelu(add_46, False) + + # pd_op.dropout: (2x169x2048xf32, 2x169x2048xui8) <- (2x169x2048xf32, None, 1xf32) + dropout_4, dropout_5 = (lambda x, f: f(x))( + paddle._C_ops.dropout( + gelu_0, None, full_9, False, "upscale_in_train", 0, False + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None), + ) + del gelu_0 + + # pd_op.matmul: (2x169x1024xf32) <- (2x169x2048xf32, 2048x1024xf32) + matmul_9 = paddle._C_ops.matmul(dropout_4, parameter_358, False, False) + del parameter_358 + + # pd_op.add: (2x169x1024xf32) <- (2x169x1024xf32, 1024xf32) + add_47 = paddle._C_ops.add(matmul_9, parameter_357) + del parameter_357 + + # pd_op.dropout: (2x169x1024xf32, 2x169x1024xui8) <- (2x169x1024xf32, None, 1xf32) + dropout_6, dropout_7 = (lambda x, f: f(x))( + paddle._C_ops.dropout( + add_47, None, full_9, False, "upscale_in_train", 0, False + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None), + ) + del add_47 + + # pd_op.add: (2x169x1024xf32) <- (2x169x1024xf32, 2x169x1024xf32) + add_48 = paddle._C_ops.add(layer_norm_0, dropout_6) + + # pd_op.layer_norm: (2x169x1024xf32, 2x169xf32, 2x169xf32) <- (2x169x1024xf32, 1024xf32, 1024xf32) + layer_norm_3, layer_norm_4, layer_norm_5 = (lambda x, f: f(x))( + paddle._C_ops.layer_norm( + add_48, parameter_356, parameter_355, float("1e-05"), 2 + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None, None), + ) + del parameter_355, parameter_356 + + # pd_op.add: (2x169x1024xf32) <- (2x169x1024xf32, 1x169x1024xf32) + add_49 = paddle._C_ops.add(layer_norm_3, unsqueeze_3) + + # pd_op.slice: (1024x1024xf32) <- (1024x3072xf32, 1xi64, 1xi64) + slice_6 = paddle._C_ops.slice( + data_20, [1], full_int_array_3, full_int_array_4, [1], [] + ) + + # pd_op.slice: (1024xf32) <- (3072xf32, 1xi64, 1xi64) + slice_7 = paddle._C_ops.slice( + data_21, [0], full_int_array_3, full_int_array_4, [1], [] + ) + + # pd_op.matmul: (2x169x1024xf32) <- (2x169x1024xf32, 1024x1024xf32) + matmul_10 = paddle._C_ops.matmul(add_49, slice_6, False, False) + + # pd_op.add: (2x169x1024xf32) <- (2x169x1024xf32, 1024xf32) + add_50 = paddle._C_ops.add(matmul_10, slice_7) + + # pd_op.reshape: (2x169x4x256xf32) <- (2x169x1024xf32, 4xi64) + reshape_8 = paddle._C_ops.reshape(add_50, full_int_array_5) + + # pd_op.transpose: (2x4x169x256xf32) <- (2x169x4x256xf32) + transpose_5 = paddle._C_ops.transpose(reshape_8, [0, 2, 1, 3]) + del reshape_8 + + # pd_op.slice: (1024x1024xf32) <- (1024x3072xf32, 1xi64, 1xi64) + slice_8 = paddle._C_ops.slice( + data_20, [1], full_int_array_4, full_int_array_6, [1], [] + ) + + # pd_op.slice: (1024xf32) <- (3072xf32, 1xi64, 1xi64) + slice_9 = paddle._C_ops.slice( + data_21, [0], full_int_array_4, full_int_array_6, [1], [] + ) + + # pd_op.matmul: (2x169x1024xf32) <- (2x169x1024xf32, 1024x1024xf32) + matmul_11 = paddle._C_ops.matmul(add_49, slice_8, False, False) + + # pd_op.add: (2x169x1024xf32) <- (2x169x1024xf32, 1024xf32) + add_51 = paddle._C_ops.add(matmul_11, slice_9) + + # pd_op.reshape: (2x169x4x256xf32) <- (2x169x1024xf32, 4xi64) + reshape_9 = paddle._C_ops.reshape(add_51, full_int_array_5) + + # pd_op.transpose: (2x4x169x256xf32) <- (2x169x4x256xf32) + transpose_6 = paddle._C_ops.transpose(reshape_9, [0, 2, 1, 3]) + del reshape_9 + + # pd_op.slice: (1024x1024xf32) <- (1024x3072xf32, 1xi64, 1xi64) + slice_10 = paddle._C_ops.slice( + data_20, [1], full_int_array_6, full_int_array_7, [1], [] + ) + del data_20 + + # pd_op.slice: (1024xf32) <- (3072xf32, 1xi64, 1xi64) + slice_11 = paddle._C_ops.slice( + data_21, [0], full_int_array_6, full_int_array_7, [1], [] + ) + del data_21 + + # pd_op.matmul: (2x169x1024xf32) <- (2x169x1024xf32, 1024x1024xf32) + matmul_12 = paddle._C_ops.matmul(layer_norm_3, slice_10, False, False) + + # pd_op.add: (2x169x1024xf32) <- (2x169x1024xf32, 1024xf32) + add_52 = paddle._C_ops.add(matmul_12, slice_11) + + # pd_op.reshape: (2x169x4x256xf32) <- (2x169x1024xf32, 4xi64) + reshape_10 = paddle._C_ops.reshape(add_52, full_int_array_5) + + # pd_op.transpose: (2x4x169x256xf32) <- (2x169x4x256xf32) + transpose_7 = paddle._C_ops.transpose(reshape_10, [0, 2, 1, 3]) + del reshape_10 + + # pd_op.matmul: (2x4x169x169xf32) <- (2x4x169x256xf32, 2x4x169x256xf32) + matmul_13 = paddle._C_ops.matmul(transpose_5, transpose_6, False, True) + + # pd_op.scale: (2x4x169x169xf32) <- (2x4x169x169xf32, 1xf32) + scale_2 = paddle._C_ops.scale(matmul_13, full_8, float("0"), True) + del matmul_13 + + # pd_op.softmax: (2x4x169x169xf32) <- (2x4x169x169xf32) + softmax_1 = paddle._C_ops.softmax(scale_2, -1) + del scale_2 + + # pd_op.dropout: (2x4x169x169xf32, 2x4x169x169xui8) <- (2x4x169x169xf32, None, 1xf32) + dropout_8, dropout_9 = (lambda x, f: f(x))( + paddle._C_ops.dropout( + softmax_1, None, full_9, False, "upscale_in_train", 0, False + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None), + ) + + # pd_op.matmul: (2x4x169x256xf32) <- (2x4x169x169xf32, 2x4x169x256xf32) + matmul_14 = paddle._C_ops.matmul(dropout_8, transpose_7, False, False) + + # pd_op.transpose: (2x169x4x256xf32) <- (2x4x169x256xf32) + transpose_8 = paddle._C_ops.transpose(matmul_14, [0, 2, 1, 3]) + del matmul_14 + + # pd_op.reshape: (2x169x1024xf32) <- (2x169x4x256xf32, 3xi64) + reshape_11 = paddle._C_ops.reshape(transpose_8, full_int_array_8) + + # pd_op.matmul: (2x169x1024xf32) <- (2x169x1024xf32, 1024x1024xf32) + matmul_15 = paddle._C_ops.matmul(reshape_11, parameter_354, False, False) + del parameter_354 + + # pd_op.add: (2x169x1024xf32) <- (2x169x1024xf32, 1024xf32) + add_53 = paddle._C_ops.add(matmul_15, parameter_353) + del parameter_353 + + # pd_op.dropout: (2x169x1024xf32, 2x169x1024xui8) <- (2x169x1024xf32, None, 1xf32) + dropout_10, dropout_11 = (lambda x, f: f(x))( + paddle._C_ops.dropout( + add_53, None, full_9, False, "upscale_in_train", 0, False + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None), + ) + del add_53 + + # pd_op.add: (2x169x1024xf32) <- (2x169x1024xf32, 2x169x1024xf32) + add_54 = paddle._C_ops.add(layer_norm_3, dropout_10) + + # pd_op.layer_norm: (2x169x1024xf32, 2x169xf32, 2x169xf32) <- (2x169x1024xf32, 1024xf32, 1024xf32) + layer_norm_6, layer_norm_7, layer_norm_8 = (lambda x, f: f(x))( + paddle._C_ops.layer_norm( + add_54, parameter_352, parameter_351, float("1e-05"), 2 + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None, None), + ) + del parameter_351, parameter_352 + + # pd_op.matmul: (2x169x2048xf32) <- (2x169x1024xf32, 1024x2048xf32) + matmul_16 = paddle._C_ops.matmul(layer_norm_6, parameter_350, False, False) + del parameter_350 + + # pd_op.add: (2x169x2048xf32) <- (2x169x2048xf32, 2048xf32) + add_55 = paddle._C_ops.add(matmul_16, parameter_349) + del parameter_349 + + # pd_op.gelu: (2x169x2048xf32) <- (2x169x2048xf32) + gelu_1 = paddle._C_ops.gelu(add_55, False) + + # pd_op.dropout: (2x169x2048xf32, 2x169x2048xui8) <- (2x169x2048xf32, None, 1xf32) + dropout_12, dropout_13 = (lambda x, f: f(x))( + paddle._C_ops.dropout( + gelu_1, None, full_9, False, "upscale_in_train", 0, False + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None), + ) + del gelu_1 + + # pd_op.matmul: (2x169x1024xf32) <- (2x169x2048xf32, 2048x1024xf32) + matmul_17 = paddle._C_ops.matmul(dropout_12, parameter_348, False, False) + del parameter_348 + + # pd_op.add: (2x169x1024xf32) <- (2x169x1024xf32, 1024xf32) + add_56 = paddle._C_ops.add(matmul_17, parameter_347) + del parameter_347 + + # pd_op.dropout: (2x169x1024xf32, 2x169x1024xui8) <- (2x169x1024xf32, None, 1xf32) + dropout_14, dropout_15 = (lambda x, f: f(x))( + paddle._C_ops.dropout( + add_56, None, full_9, False, "upscale_in_train", 0, False + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None), + ) + del add_56 + + # pd_op.add: (2x169x1024xf32) <- (2x169x1024xf32, 2x169x1024xf32) + add_57 = paddle._C_ops.add(layer_norm_6, dropout_14) + + # pd_op.layer_norm: (2x169x1024xf32, 2x169xf32, 2x169xf32) <- (2x169x1024xf32, 1024xf32, 1024xf32) + layer_norm_9, layer_norm_10, layer_norm_11 = (lambda x, f: f(x))( + paddle._C_ops.layer_norm( + add_57, parameter_346, parameter_345, float("1e-05"), 2 + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None, None), + ) + del parameter_345, parameter_346 + + # pd_op.add: (2x169x1024xf32) <- (2x169x1024xf32, 1x169x1024xf32) + add_58 = paddle._C_ops.add(layer_norm_9, unsqueeze_3) + + # pd_op.slice: (1024x1024xf32) <- (1024x3072xf32, 1xi64, 1xi64) + slice_12 = paddle._C_ops.slice( + data_22, [1], full_int_array_3, full_int_array_4, [1], [] + ) + + # pd_op.slice: (1024xf32) <- (3072xf32, 1xi64, 1xi64) + slice_13 = paddle._C_ops.slice( + data_23, [0], full_int_array_3, full_int_array_4, [1], [] + ) + + # pd_op.matmul: (2x169x1024xf32) <- (2x169x1024xf32, 1024x1024xf32) + matmul_18 = paddle._C_ops.matmul(add_58, slice_12, False, False) + + # pd_op.add: (2x169x1024xf32) <- (2x169x1024xf32, 1024xf32) + add_59 = paddle._C_ops.add(matmul_18, slice_13) + + # pd_op.reshape: (2x169x4x256xf32) <- (2x169x1024xf32, 4xi64) + reshape_12 = paddle._C_ops.reshape(add_59, full_int_array_5) + + # pd_op.transpose: (2x4x169x256xf32) <- (2x169x4x256xf32) + transpose_9 = paddle._C_ops.transpose(reshape_12, [0, 2, 1, 3]) + del reshape_12 + + # pd_op.slice: (1024x1024xf32) <- (1024x3072xf32, 1xi64, 1xi64) + slice_14 = paddle._C_ops.slice( + data_22, [1], full_int_array_4, full_int_array_6, [1], [] + ) + + # pd_op.slice: (1024xf32) <- (3072xf32, 1xi64, 1xi64) + slice_15 = paddle._C_ops.slice( + data_23, [0], full_int_array_4, full_int_array_6, [1], [] + ) + + # pd_op.matmul: (2x169x1024xf32) <- (2x169x1024xf32, 1024x1024xf32) + matmul_19 = paddle._C_ops.matmul(add_58, slice_14, False, False) + + # pd_op.add: (2x169x1024xf32) <- (2x169x1024xf32, 1024xf32) + add_60 = paddle._C_ops.add(matmul_19, slice_15) + + # pd_op.reshape: (2x169x4x256xf32) <- (2x169x1024xf32, 4xi64) + reshape_13 = paddle._C_ops.reshape(add_60, full_int_array_5) + + # pd_op.transpose: (2x4x169x256xf32) <- (2x169x4x256xf32) + transpose_10 = paddle._C_ops.transpose(reshape_13, [0, 2, 1, 3]) + del reshape_13 + + # pd_op.slice: (1024x1024xf32) <- (1024x3072xf32, 1xi64, 1xi64) + slice_16 = paddle._C_ops.slice( + data_22, [1], full_int_array_6, full_int_array_7, [1], [] + ) + del data_22 + + # pd_op.slice: (1024xf32) <- (3072xf32, 1xi64, 1xi64) + slice_17 = paddle._C_ops.slice( + data_23, [0], full_int_array_6, full_int_array_7, [1], [] + ) + del data_23 + + # pd_op.matmul: (2x169x1024xf32) <- (2x169x1024xf32, 1024x1024xf32) + matmul_20 = paddle._C_ops.matmul(layer_norm_9, slice_16, False, False) + + # pd_op.add: (2x169x1024xf32) <- (2x169x1024xf32, 1024xf32) + add_61 = paddle._C_ops.add(matmul_20, slice_17) + + # pd_op.reshape: (2x169x4x256xf32) <- (2x169x1024xf32, 4xi64) + reshape_14 = paddle._C_ops.reshape(add_61, full_int_array_5) + + # pd_op.transpose: (2x4x169x256xf32) <- (2x169x4x256xf32) + transpose_11 = paddle._C_ops.transpose(reshape_14, [0, 2, 1, 3]) + del reshape_14 + + # pd_op.matmul: (2x4x169x169xf32) <- (2x4x169x256xf32, 2x4x169x256xf32) + matmul_21 = paddle._C_ops.matmul(transpose_9, transpose_10, False, True) + + # pd_op.scale: (2x4x169x169xf32) <- (2x4x169x169xf32, 1xf32) + scale_3 = paddle._C_ops.scale(matmul_21, full_8, float("0"), True) + del matmul_21 + + # pd_op.softmax: (2x4x169x169xf32) <- (2x4x169x169xf32) + softmax_2 = paddle._C_ops.softmax(scale_3, -1) + del scale_3 + + # pd_op.dropout: (2x4x169x169xf32, 2x4x169x169xui8) <- (2x4x169x169xf32, None, 1xf32) + dropout_16, dropout_17 = (lambda x, f: f(x))( + paddle._C_ops.dropout( + softmax_2, None, full_9, False, "upscale_in_train", 0, False + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None), + ) + + # pd_op.matmul: (2x4x169x256xf32) <- (2x4x169x169xf32, 2x4x169x256xf32) + matmul_22 = paddle._C_ops.matmul(dropout_16, transpose_11, False, False) + + # pd_op.transpose: (2x169x4x256xf32) <- (2x4x169x256xf32) + transpose_12 = paddle._C_ops.transpose(matmul_22, [0, 2, 1, 3]) + del matmul_22 + + # pd_op.reshape: (2x169x1024xf32) <- (2x169x4x256xf32, 3xi64) + reshape_15 = paddle._C_ops.reshape(transpose_12, full_int_array_8) + + # pd_op.matmul: (2x169x1024xf32) <- (2x169x1024xf32, 1024x1024xf32) + matmul_23 = paddle._C_ops.matmul(reshape_15, parameter_344, False, False) + del parameter_344 + + # pd_op.add: (2x169x1024xf32) <- (2x169x1024xf32, 1024xf32) + add_62 = paddle._C_ops.add(matmul_23, parameter_343) + del parameter_343 + + # pd_op.dropout: (2x169x1024xf32, 2x169x1024xui8) <- (2x169x1024xf32, None, 1xf32) + dropout_18, dropout_19 = (lambda x, f: f(x))( + paddle._C_ops.dropout( + add_62, None, full_9, False, "upscale_in_train", 0, False + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None), + ) + del add_62 + + # pd_op.add: (2x169x1024xf32) <- (2x169x1024xf32, 2x169x1024xf32) + add_63 = paddle._C_ops.add(layer_norm_9, dropout_18) + + # pd_op.layer_norm: (2x169x1024xf32, 2x169xf32, 2x169xf32) <- (2x169x1024xf32, 1024xf32, 1024xf32) + layer_norm_12, layer_norm_13, layer_norm_14 = (lambda x, f: f(x))( + paddle._C_ops.layer_norm( + add_63, parameter_342, parameter_341, float("1e-05"), 2 + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None, None), + ) + del parameter_341, parameter_342 + + # pd_op.matmul: (2x169x2048xf32) <- (2x169x1024xf32, 1024x2048xf32) + matmul_24 = paddle._C_ops.matmul(layer_norm_12, parameter_340, False, False) + del parameter_340 + + # pd_op.add: (2x169x2048xf32) <- (2x169x2048xf32, 2048xf32) + add_64 = paddle._C_ops.add(matmul_24, parameter_339) + del parameter_339 + + # pd_op.gelu: (2x169x2048xf32) <- (2x169x2048xf32) + gelu_2 = paddle._C_ops.gelu(add_64, False) + + # pd_op.dropout: (2x169x2048xf32, 2x169x2048xui8) <- (2x169x2048xf32, None, 1xf32) + dropout_20, dropout_21 = (lambda x, f: f(x))( + paddle._C_ops.dropout( + gelu_2, None, full_9, False, "upscale_in_train", 0, False + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None), + ) + del gelu_2 + + # pd_op.matmul: (2x169x1024xf32) <- (2x169x2048xf32, 2048x1024xf32) + matmul_25 = paddle._C_ops.matmul(dropout_20, parameter_338, False, False) + del parameter_338 + + # pd_op.add: (2x169x1024xf32) <- (2x169x1024xf32, 1024xf32) + add_65 = paddle._C_ops.add(matmul_25, parameter_337) + del parameter_337 + + # pd_op.dropout: (2x169x1024xf32, 2x169x1024xui8) <- (2x169x1024xf32, None, 1xf32) + dropout_22, dropout_23 = (lambda x, f: f(x))( + paddle._C_ops.dropout( + add_65, None, full_9, False, "upscale_in_train", 0, False + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None), + ) + del add_65 + + # pd_op.add: (2x169x1024xf32) <- (2x169x1024xf32, 2x169x1024xf32) + add_66 = paddle._C_ops.add(layer_norm_12, dropout_22) + + # pd_op.layer_norm: (2x169x1024xf32, 2x169xf32, 2x169xf32) <- (2x169x1024xf32, 1024xf32, 1024xf32) + layer_norm_15, layer_norm_16, layer_norm_17 = (lambda x, f: f(x))( + paddle._C_ops.layer_norm( + add_66, parameter_336, parameter_335, float("1e-05"), 2 + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None, None), + ) + del parameter_335, parameter_336 + + # pd_op.add: (2x169x1024xf32) <- (2x169x1024xf32, 1x169x1024xf32) + add_67 = paddle._C_ops.add(layer_norm_15, unsqueeze_3) + + # pd_op.slice: (1024x1024xf32) <- (1024x3072xf32, 1xi64, 1xi64) + slice_18 = paddle._C_ops.slice( + data_24, [1], full_int_array_3, full_int_array_4, [1], [] + ) + + # pd_op.slice: (1024xf32) <- (3072xf32, 1xi64, 1xi64) + slice_19 = paddle._C_ops.slice( + data_25, [0], full_int_array_3, full_int_array_4, [1], [] + ) + del full_int_array_3 + + # pd_op.matmul: (2x169x1024xf32) <- (2x169x1024xf32, 1024x1024xf32) + matmul_26 = paddle._C_ops.matmul(add_67, slice_18, False, False) + + # pd_op.add: (2x169x1024xf32) <- (2x169x1024xf32, 1024xf32) + add_68 = paddle._C_ops.add(matmul_26, slice_19) + + # pd_op.reshape: (2x169x4x256xf32) <- (2x169x1024xf32, 4xi64) + reshape_16 = paddle._C_ops.reshape(add_68, full_int_array_5) + + # pd_op.transpose: (2x4x169x256xf32) <- (2x169x4x256xf32) + transpose_13 = paddle._C_ops.transpose(reshape_16, [0, 2, 1, 3]) + del reshape_16 + + # pd_op.slice: (1024x1024xf32) <- (1024x3072xf32, 1xi64, 1xi64) + slice_20 = paddle._C_ops.slice( + data_24, [1], full_int_array_4, full_int_array_6, [1], [] + ) + + # pd_op.slice: (1024xf32) <- (3072xf32, 1xi64, 1xi64) + slice_21 = paddle._C_ops.slice( + data_25, [0], full_int_array_4, full_int_array_6, [1], [] + ) + + # pd_op.matmul: (2x169x1024xf32) <- (2x169x1024xf32, 1024x1024xf32) + matmul_27 = paddle._C_ops.matmul(add_67, slice_20, False, False) + + # pd_op.add: (2x169x1024xf32) <- (2x169x1024xf32, 1024xf32) + add_69 = paddle._C_ops.add(matmul_27, slice_21) + + # pd_op.reshape: (2x169x4x256xf32) <- (2x169x1024xf32, 4xi64) + reshape_17 = paddle._C_ops.reshape(add_69, full_int_array_5) + + # pd_op.transpose: (2x4x169x256xf32) <- (2x169x4x256xf32) + transpose_14 = paddle._C_ops.transpose(reshape_17, [0, 2, 1, 3]) + del reshape_17 + + # pd_op.slice: (1024x1024xf32) <- (1024x3072xf32, 1xi64, 1xi64) + slice_22 = paddle._C_ops.slice( + data_24, [1], full_int_array_6, full_int_array_7, [1], [] + ) + del data_24 + + # pd_op.slice: (1024xf32) <- (3072xf32, 1xi64, 1xi64) + slice_23 = paddle._C_ops.slice( + data_25, [0], full_int_array_6, full_int_array_7, [1], [] + ) + del data_25 + + # pd_op.matmul: (2x169x1024xf32) <- (2x169x1024xf32, 1024x1024xf32) + matmul_28 = paddle._C_ops.matmul(layer_norm_15, slice_22, False, False) + + # pd_op.add: (2x169x1024xf32) <- (2x169x1024xf32, 1024xf32) + add_70 = paddle._C_ops.add(matmul_28, slice_23) + + # pd_op.reshape: (2x169x4x256xf32) <- (2x169x1024xf32, 4xi64) + reshape_18 = paddle._C_ops.reshape(add_70, full_int_array_5) + del full_int_array_5 + + # pd_op.transpose: (2x4x169x256xf32) <- (2x169x4x256xf32) + transpose_15 = paddle._C_ops.transpose(reshape_18, [0, 2, 1, 3]) + del reshape_18 + + # pd_op.matmul: (2x4x169x169xf32) <- (2x4x169x256xf32, 2x4x169x256xf32) + matmul_29 = paddle._C_ops.matmul(transpose_13, transpose_14, False, True) + + # pd_op.scale: (2x4x169x169xf32) <- (2x4x169x169xf32, 1xf32) + scale_4 = paddle._C_ops.scale(matmul_29, full_8, float("0"), True) + del matmul_29 + + # pd_op.softmax: (2x4x169x169xf32) <- (2x4x169x169xf32) + softmax_3 = paddle._C_ops.softmax(scale_4, -1) + del scale_4 + + # pd_op.dropout: (2x4x169x169xf32, 2x4x169x169xui8) <- (2x4x169x169xf32, None, 1xf32) + dropout_24, dropout_25 = (lambda x, f: f(x))( + paddle._C_ops.dropout( + softmax_3, None, full_9, False, "upscale_in_train", 0, False + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None), + ) + + # pd_op.matmul: (2x4x169x256xf32) <- (2x4x169x169xf32, 2x4x169x256xf32) + matmul_30 = paddle._C_ops.matmul(dropout_24, transpose_15, False, False) + + # pd_op.transpose: (2x169x4x256xf32) <- (2x4x169x256xf32) + transpose_16 = paddle._C_ops.transpose(matmul_30, [0, 2, 1, 3]) + del matmul_30 + + # pd_op.reshape: (2x169x1024xf32) <- (2x169x4x256xf32, 3xi64) + reshape_19 = paddle._C_ops.reshape(transpose_16, full_int_array_8) + del full_int_array_8 + + # pd_op.matmul: (2x169x1024xf32) <- (2x169x1024xf32, 1024x1024xf32) + matmul_31 = paddle._C_ops.matmul(reshape_19, parameter_334, False, False) + del parameter_334 + + # pd_op.add: (2x169x1024xf32) <- (2x169x1024xf32, 1024xf32) + add_71 = paddle._C_ops.add(matmul_31, parameter_333) + del parameter_333 + + # pd_op.dropout: (2x169x1024xf32, 2x169x1024xui8) <- (2x169x1024xf32, None, 1xf32) + dropout_26, dropout_27 = (lambda x, f: f(x))( + paddle._C_ops.dropout( + add_71, None, full_9, False, "upscale_in_train", 0, False + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None), + ) + del add_71 + + # pd_op.add: (2x169x1024xf32) <- (2x169x1024xf32, 2x169x1024xf32) + add_72 = paddle._C_ops.add(layer_norm_15, dropout_26) + + # pd_op.layer_norm: (2x169x1024xf32, 2x169xf32, 2x169xf32) <- (2x169x1024xf32, 1024xf32, 1024xf32) + layer_norm_18, layer_norm_19, layer_norm_20 = (lambda x, f: f(x))( + paddle._C_ops.layer_norm( + add_72, parameter_332, parameter_331, float("1e-05"), 2 + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None, None), + ) + del parameter_331, parameter_332 + + # pd_op.matmul: (2x169x2048xf32) <- (2x169x1024xf32, 1024x2048xf32) + matmul_32 = paddle._C_ops.matmul(layer_norm_18, parameter_330, False, False) + del parameter_330 + + # pd_op.add: (2x169x2048xf32) <- (2x169x2048xf32, 2048xf32) + add_73 = paddle._C_ops.add(matmul_32, parameter_329) + del parameter_329 + + # pd_op.gelu: (2x169x2048xf32) <- (2x169x2048xf32) + gelu_3 = paddle._C_ops.gelu(add_73, False) + + # pd_op.dropout: (2x169x2048xf32, 2x169x2048xui8) <- (2x169x2048xf32, None, 1xf32) + dropout_28, dropout_29 = (lambda x, f: f(x))( + paddle._C_ops.dropout( + gelu_3, None, full_9, False, "upscale_in_train", 0, False + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None), + ) + del gelu_3 + + # pd_op.matmul: (2x169x1024xf32) <- (2x169x2048xf32, 2048x1024xf32) + matmul_33 = paddle._C_ops.matmul(dropout_28, parameter_328, False, False) + del parameter_328 + + # pd_op.add: (2x169x1024xf32) <- (2x169x1024xf32, 1024xf32) + add_74 = paddle._C_ops.add(matmul_33, parameter_327) + del parameter_327 + + # pd_op.dropout: (2x169x1024xf32, 2x169x1024xui8) <- (2x169x1024xf32, None, 1xf32) + dropout_30, dropout_31 = (lambda x, f: f(x))( + paddle._C_ops.dropout( + add_74, None, full_9, False, "upscale_in_train", 0, False + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None), + ) + del add_74 + + # pd_op.add: (2x169x1024xf32) <- (2x169x1024xf32, 2x169x1024xf32) + add_75 = paddle._C_ops.add(layer_norm_18, dropout_30) + + # pd_op.layer_norm: (2x169x1024xf32, 2x169xf32, 2x169xf32) <- (2x169x1024xf32, 1024xf32, 1024xf32) + layer_norm_21, layer_norm_22, layer_norm_23 = (lambda x, f: f(x))( + paddle._C_ops.layer_norm( + add_75, parameter_326, parameter_325, float("1e-05"), 2 + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None, None), + ) + del parameter_325, parameter_326 + + # pd_op.transpose: (2x1024x169xf32) <- (2x169x1024xf32) + transpose_17 = paddle._C_ops.transpose(layer_norm_21, [0, 2, 1]) + del layer_norm_21 + + # pd_op.full_int_array: (4xi64) <- () + full_int_array_9 = [2, 1024, 13, 13] + + # pd_op.reshape: (2x1024x13x13xf32) <- (2x1024x169xf32, 4xi64) + reshape_20 = paddle._C_ops.reshape(transpose_17, full_int_array_9) + del full_int_array_9 + + # pd_op.conv2d: (2x384x13x13xf32) <- (2x1024x13x13xf32, 384x1024x1x1xf32) + conv2d_77 = paddle._C_ops.conv2d( + reshape_20, parameter_324, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_324 + + # pd_op.batch_norm_: (2x384x13x13xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x13x13xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__438, + batch_norm__439, + batch_norm__440, + batch_norm__441, + batch_norm__442, + batch_norm__443, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_77, + parameter_323, + parameter_322, + parameter_321, + parameter_320, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_320, parameter_321, parameter_322, parameter_323 + + # pd_op.swish: (2x384x13x13xf32) <- (2x384x13x13xf32) + swish_56 = paddle._C_ops.swish(batch_norm__438) + + # pd_op.conv2d: (2x384x13x13xf32) <- (2x1024x13x13xf32, 384x1024x1x1xf32) + conv2d_78 = paddle._C_ops.conv2d( + reshape_20, parameter_319, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_319 + + # pd_op.batch_norm_: (2x384x13x13xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x13x13xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__444, + batch_norm__445, + batch_norm__446, + batch_norm__447, + batch_norm__448, + batch_norm__449, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_78, + parameter_318, + parameter_317, + parameter_316, + parameter_315, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_315, parameter_316, parameter_317, parameter_318 + + # pd_op.swish: (2x384x13x13xf32) <- (2x384x13x13xf32) + swish_57 = paddle._C_ops.swish(batch_norm__444) + + # pd_op.conv2d: (2x384x13x13xf32) <- (2x384x13x13xf32, 384x384x3x3xf32) + conv2d_79 = paddle._C_ops.conv2d( + swish_57, parameter_314, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_314 + + # pd_op.batch_norm_: (2x384x13x13xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x13x13xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__450, + batch_norm__451, + batch_norm__452, + batch_norm__453, + batch_norm__454, + batch_norm__455, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_79, + parameter_313, + parameter_312, + parameter_311, + parameter_310, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_310, parameter_311, parameter_312, parameter_313 + + # pd_op.swish: (2x384x13x13xf32) <- (2x384x13x13xf32) + swish_58 = paddle._C_ops.swish(batch_norm__450) + + # pd_op.conv2d: (2x384x13x13xf32) <- (2x384x13x13xf32, 384x384x3x3xf32) + conv2d_80 = paddle._C_ops.conv2d( + swish_58, parameter_309, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_309 + + # pd_op.batch_norm_: (2x384x13x13xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x13x13xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__456, + batch_norm__457, + batch_norm__458, + batch_norm__459, + batch_norm__460, + batch_norm__461, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_80, + parameter_308, + parameter_307, + parameter_306, + parameter_305, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_305, parameter_306, parameter_307, parameter_308 + + # pd_op.conv2d: (2x384x13x13xf32) <- (2x384x13x13xf32, 384x384x1x1xf32) + conv2d_81 = paddle._C_ops.conv2d( + swish_58, parameter_304, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_304 + + # pd_op.batch_norm_: (2x384x13x13xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x13x13xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__462, + batch_norm__463, + batch_norm__464, + batch_norm__465, + batch_norm__466, + batch_norm__467, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_81, + parameter_303, + parameter_302, + parameter_301, + parameter_300, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_300, parameter_301, parameter_302, parameter_303 + + # pd_op.add: (2x384x13x13xf32) <- (2x384x13x13xf32, 2x384x13x13xf32) + add_76 = paddle._C_ops.add(batch_norm__456, batch_norm__462) + + # pd_op.swish: (2x384x13x13xf32) <- (2x384x13x13xf32) + swish_59 = paddle._C_ops.swish(add_76) + + # pd_op.conv2d: (2x384x13x13xf32) <- (2x384x13x13xf32, 384x384x3x3xf32) + conv2d_82 = paddle._C_ops.conv2d( + swish_59, parameter_299, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_299 + + # pd_op.batch_norm_: (2x384x13x13xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x13x13xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__468, + batch_norm__469, + batch_norm__470, + batch_norm__471, + batch_norm__472, + batch_norm__473, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_82, + parameter_298, + parameter_297, + parameter_296, + parameter_295, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_295, parameter_296, parameter_297, parameter_298 + + # pd_op.swish: (2x384x13x13xf32) <- (2x384x13x13xf32) + swish_60 = paddle._C_ops.swish(batch_norm__468) + + # pd_op.conv2d: (2x384x13x13xf32) <- (2x384x13x13xf32, 384x384x3x3xf32) + conv2d_83 = paddle._C_ops.conv2d( + swish_60, parameter_294, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_294 + + # pd_op.batch_norm_: (2x384x13x13xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x13x13xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__474, + batch_norm__475, + batch_norm__476, + batch_norm__477, + batch_norm__478, + batch_norm__479, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_83, + parameter_293, + parameter_292, + parameter_291, + parameter_290, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_290, parameter_291, parameter_292, parameter_293 + + # pd_op.conv2d: (2x384x13x13xf32) <- (2x384x13x13xf32, 384x384x1x1xf32) + conv2d_84 = paddle._C_ops.conv2d( + swish_60, parameter_289, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_289 + + # pd_op.batch_norm_: (2x384x13x13xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x13x13xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__480, + batch_norm__481, + batch_norm__482, + batch_norm__483, + batch_norm__484, + batch_norm__485, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_84, + parameter_288, + parameter_287, + parameter_286, + parameter_285, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_285, parameter_286, parameter_287, parameter_288 + + # pd_op.add: (2x384x13x13xf32) <- (2x384x13x13xf32, 2x384x13x13xf32) + add_77 = paddle._C_ops.add(batch_norm__474, batch_norm__480) + + # pd_op.swish: (2x384x13x13xf32) <- (2x384x13x13xf32) + swish_61 = paddle._C_ops.swish(add_77) + + # pd_op.full_int_array: (2xi64) <- () + full_int_array_10 = [5, 5] + + # pd_op.pool2d: (2x384x13x13xf32) <- (2x384x13x13xf32, 2xi64) + pool2d_0 = paddle._C_ops.pool2d( + swish_61, + full_int_array_10, + [1, 1], + [2, 2], + False, + True, + "NCHW", + "max", + False, + False, + "EXPLICIT", + ) + + # pd_op.full_int_array: (2xi64) <- () + full_int_array_11 = [9, 9] + + # pd_op.pool2d: (2x384x13x13xf32) <- (2x384x13x13xf32, 2xi64) + pool2d_1 = paddle._C_ops.pool2d( + swish_61, + full_int_array_11, + [1, 1], + [4, 4], + False, + True, + "NCHW", + "max", + False, + False, + "EXPLICIT", + ) + + # pd_op.full_int_array: (2xi64) <- () + full_int_array_12 = [13, 13] + + # pd_op.pool2d: (2x384x13x13xf32) <- (2x384x13x13xf32, 2xi64) + pool2d_2 = paddle._C_ops.pool2d( + swish_61, + full_int_array_12, + [1, 1], + [6, 6], + False, + True, + "NCHW", + "max", + False, + False, + "EXPLICIT", + ) + + # builtin.combine: ([2x384x13x13xf32, 2x384x13x13xf32, 2x384x13x13xf32, 2x384x13x13xf32]) <- (2x384x13x13xf32, 2x384x13x13xf32, 2x384x13x13xf32, 2x384x13x13xf32) + combine_6 = [swish_61, pool2d_0, pool2d_1, pool2d_2] + + # pd_op.concat: (2x1536x13x13xf32) <- ([2x384x13x13xf32, 2x384x13x13xf32, 2x384x13x13xf32, 2x384x13x13xf32], 1xi32) + concat_5 = paddle._C_ops.concat(combine_6, full_0) + del combine_6 + + # pd_op.conv2d: (2x384x13x13xf32) <- (2x1536x13x13xf32, 384x1536x1x1xf32) + conv2d_85 = paddle._C_ops.conv2d( + concat_5, parameter_284, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_284 + + # pd_op.batch_norm_: (2x384x13x13xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x13x13xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__486, + batch_norm__487, + batch_norm__488, + batch_norm__489, + batch_norm__490, + batch_norm__491, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_85, + parameter_283, + parameter_282, + parameter_281, + parameter_280, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_280, parameter_281, parameter_282, parameter_283 + + # pd_op.swish: (2x384x13x13xf32) <- (2x384x13x13xf32) + swish_62 = paddle._C_ops.swish(batch_norm__486) + + # pd_op.conv2d: (2x384x13x13xf32) <- (2x384x13x13xf32, 384x384x3x3xf32) + conv2d_86 = paddle._C_ops.conv2d( + swish_62, parameter_279, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_279 + + # pd_op.batch_norm_: (2x384x13x13xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x13x13xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__492, + batch_norm__493, + batch_norm__494, + batch_norm__495, + batch_norm__496, + batch_norm__497, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_86, + parameter_278, + parameter_277, + parameter_276, + parameter_275, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_275, parameter_276, parameter_277, parameter_278 + + # pd_op.swish: (2x384x13x13xf32) <- (2x384x13x13xf32) + swish_63 = paddle._C_ops.swish(batch_norm__492) + + # pd_op.conv2d: (2x384x13x13xf32) <- (2x384x13x13xf32, 384x384x3x3xf32) + conv2d_87 = paddle._C_ops.conv2d( + swish_63, parameter_274, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_274 + + # pd_op.batch_norm_: (2x384x13x13xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x13x13xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__498, + batch_norm__499, + batch_norm__500, + batch_norm__501, + batch_norm__502, + batch_norm__503, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_87, + parameter_273, + parameter_272, + parameter_271, + parameter_270, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_270, parameter_271, parameter_272, parameter_273 + + # pd_op.conv2d: (2x384x13x13xf32) <- (2x384x13x13xf32, 384x384x1x1xf32) + conv2d_88 = paddle._C_ops.conv2d( + swish_63, parameter_269, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_269 + + # pd_op.batch_norm_: (2x384x13x13xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x13x13xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__504, + batch_norm__505, + batch_norm__506, + batch_norm__507, + batch_norm__508, + batch_norm__509, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_88, + parameter_268, + parameter_267, + parameter_266, + parameter_265, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_265, parameter_266, parameter_267, parameter_268 + + # pd_op.add: (2x384x13x13xf32) <- (2x384x13x13xf32, 2x384x13x13xf32) + add_78 = paddle._C_ops.add(batch_norm__498, batch_norm__504) + + # pd_op.swish: (2x384x13x13xf32) <- (2x384x13x13xf32) + swish_64 = paddle._C_ops.swish(add_78) + + # builtin.combine: ([2x384x13x13xf32, 2x384x13x13xf32]) <- (2x384x13x13xf32, 2x384x13x13xf32) + combine_7 = [swish_56, swish_64] + + # pd_op.concat: (2x768x13x13xf32) <- ([2x384x13x13xf32, 2x384x13x13xf32], 1xi32) + concat_6 = paddle._C_ops.concat(combine_7, full_0) + del combine_7 + + # pd_op.conv2d: (2x768x13x13xf32) <- (2x768x13x13xf32, 768x768x1x1xf32) + conv2d_89 = paddle._C_ops.conv2d( + concat_6, parameter_264, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_264 + + # pd_op.batch_norm_: (2x768x13x13xf32, 768xf32, 768xf32, 768xf32, 768xf32, -1xui8) <- (2x768x13x13xf32, 768xf32, 768xf32, 768xf32, 768xf32) + ( + batch_norm__510, + batch_norm__511, + batch_norm__512, + batch_norm__513, + batch_norm__514, + batch_norm__515, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_89, + parameter_263, + parameter_262, + parameter_261, + parameter_260, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_260, parameter_261, parameter_262, parameter_263 + + # pd_op.swish: (2x768x13x13xf32) <- (2x768x13x13xf32) + swish_65 = paddle._C_ops.swish(batch_norm__510) + + # pd_op.conv2d: (2x384x13x13xf32) <- (2x768x13x13xf32, 384x768x1x1xf32) + conv2d_90 = paddle._C_ops.conv2d( + swish_65, parameter_259, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_259 + + # pd_op.batch_norm_: (2x384x13x13xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x13x13xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__516, + batch_norm__517, + batch_norm__518, + batch_norm__519, + batch_norm__520, + batch_norm__521, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_90, + parameter_258, + parameter_257, + parameter_256, + parameter_255, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_255, parameter_256, parameter_257, parameter_258 + + # pd_op.swish: (2x384x13x13xf32) <- (2x384x13x13xf32) + swish_66 = paddle._C_ops.swish(batch_norm__516) + + # pd_op.nearest_interp: (2x384x26x26xf32) <- (2x384x13x13xf32, None, None, None) + nearest_interp_0 = paddle._C_ops.nearest_interp( + swish_66, + None, + None, + None, + "NCHW", + -1, + -1, + -1, + [float("2"), float("2")], + "nearest", + False, + 0, + ) + + # builtin.combine: ([2x384x26x26xf32, 2x512x26x26xf32]) <- (2x384x26x26xf32, 2x512x26x26xf32) + combine_8 = [nearest_interp_0, swish_45] + + # pd_op.concat: (2x896x26x26xf32) <- ([2x384x26x26xf32, 2x512x26x26xf32], 1xi32) + concat_7 = paddle._C_ops.concat(combine_8, full_0) + del combine_8 + + # pd_op.conv2d: (2x192x26x26xf32) <- (2x896x26x26xf32, 192x896x1x1xf32) + conv2d_91 = paddle._C_ops.conv2d( + concat_7, parameter_254, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_254 + + # pd_op.batch_norm_: (2x192x26x26xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x26x26xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__522, + batch_norm__523, + batch_norm__524, + batch_norm__525, + batch_norm__526, + batch_norm__527, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_91, + parameter_253, + parameter_252, + parameter_251, + parameter_250, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_250, parameter_251, parameter_252, parameter_253 + + # pd_op.swish: (2x192x26x26xf32) <- (2x192x26x26xf32) + swish_67 = paddle._C_ops.swish(batch_norm__522) + + # pd_op.conv2d: (2x192x26x26xf32) <- (2x896x26x26xf32, 192x896x1x1xf32) + conv2d_92 = paddle._C_ops.conv2d( + concat_7, parameter_249, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_249 + + # pd_op.batch_norm_: (2x192x26x26xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x26x26xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__528, + batch_norm__529, + batch_norm__530, + batch_norm__531, + batch_norm__532, + batch_norm__533, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_92, + parameter_248, + parameter_247, + parameter_246, + parameter_245, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_245, parameter_246, parameter_247, parameter_248 + + # pd_op.swish: (2x192x26x26xf32) <- (2x192x26x26xf32) + swish_68 = paddle._C_ops.swish(batch_norm__528) + + # pd_op.conv2d: (2x192x26x26xf32) <- (2x192x26x26xf32, 192x192x3x3xf32) + conv2d_93 = paddle._C_ops.conv2d( + swish_68, parameter_244, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_244 + + # pd_op.batch_norm_: (2x192x26x26xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x26x26xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__534, + batch_norm__535, + batch_norm__536, + batch_norm__537, + batch_norm__538, + batch_norm__539, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_93, + parameter_243, + parameter_242, + parameter_241, + parameter_240, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_240, parameter_241, parameter_242, parameter_243 + + # pd_op.swish: (2x192x26x26xf32) <- (2x192x26x26xf32) + swish_69 = paddle._C_ops.swish(batch_norm__534) + + # pd_op.conv2d: (2x192x26x26xf32) <- (2x192x26x26xf32, 192x192x3x3xf32) + conv2d_94 = paddle._C_ops.conv2d( + swish_69, parameter_239, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_239 + + # pd_op.batch_norm_: (2x192x26x26xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x26x26xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__540, + batch_norm__541, + batch_norm__542, + batch_norm__543, + batch_norm__544, + batch_norm__545, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_94, + parameter_238, + parameter_237, + parameter_236, + parameter_235, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_235, parameter_236, parameter_237, parameter_238 + + # pd_op.conv2d: (2x192x26x26xf32) <- (2x192x26x26xf32, 192x192x1x1xf32) + conv2d_95 = paddle._C_ops.conv2d( + swish_69, parameter_234, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_234 + + # pd_op.batch_norm_: (2x192x26x26xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x26x26xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__546, + batch_norm__547, + batch_norm__548, + batch_norm__549, + batch_norm__550, + batch_norm__551, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_95, + parameter_233, + parameter_232, + parameter_231, + parameter_230, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_230, parameter_231, parameter_232, parameter_233 - # pd_op.full_int_array: (1xi64) <- () - full_int_array_0 = [-1] + # pd_op.add: (2x192x26x26xf32) <- (2x192x26x26xf32, 2x192x26x26xf32) + add_79 = paddle._C_ops.add(batch_norm__540, batch_norm__546) - # pd_op.assign: (1xi64) <- (1xi64) - assign_0 = full_int_array_0 + # pd_op.swish: (2x192x26x26xf32) <- (2x192x26x26xf32) + swish_70 = paddle._C_ops.swish(add_79) - # pd_op.assign: (1xi64) <- (1xi64) - assign_1 = full_int_array_0 + # pd_op.conv2d: (2x192x26x26xf32) <- (2x192x26x26xf32, 192x192x3x3xf32) + conv2d_96 = paddle._C_ops.conv2d( + swish_70, parameter_229, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_229 - # pd_op.assign: (1xi64) <- (1xi64) - assign_2 = full_int_array_0 + # pd_op.batch_norm_: (2x192x26x26xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x26x26xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__552, + batch_norm__553, + batch_norm__554, + batch_norm__555, + batch_norm__556, + batch_norm__557, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_96, + parameter_228, + parameter_227, + parameter_226, + parameter_225, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_225, parameter_226, parameter_227, parameter_228 - # pd_op.unsqueeze: (2x3549x1xi32) <- (2x3549xi32, 1xi64) - unsqueeze_0 = paddle._C_ops.unsqueeze(cast_0, full_int_array_0) - del cast_0 + # pd_op.swish: (2x192x26x26xf32) <- (2x192x26x26xf32) + swish_71 = paddle._C_ops.swish(batch_norm__552) - # pd_op.full_int_array: (3xi64) <- () - full_int_array_1 = [1, 1, 4] + # pd_op.conv2d: (2x192x26x26xf32) <- (2x192x26x26xf32, 192x192x3x3xf32) + conv2d_97 = paddle._C_ops.conv2d( + swish_71, parameter_224, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_224 - # pd_op.tile: (2x3549x4xi32) <- (2x3549x1xi32, 3xi64) - tile_0 = paddle._C_ops.tile(unsqueeze_0, full_int_array_1) - del full_int_array_1, unsqueeze_0 + # pd_op.batch_norm_: (2x192x26x26xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x26x26xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__558, + batch_norm__559, + batch_norm__560, + batch_norm__561, + batch_norm__562, + batch_norm__563, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_97, + parameter_223, + parameter_222, + parameter_221, + parameter_220, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_220, parameter_221, parameter_222, parameter_223 - # pd_op.cast: (2x3549x4xb) <- (2x3549x4xi32) - cast_1 = paddle._C_ops.cast(tile_0, paddle.bool) - del tile_0 + # pd_op.conv2d: (2x192x26x26xf32) <- (2x192x26x26xf32, 192x192x1x1xf32) + conv2d_98 = paddle._C_ops.conv2d( + swish_71, parameter_219, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_219 - # pd_op.masked_select: (-1xf32) <- (2x3549x4xf32, 2x3549x4xb) - masked_select_0 = paddle._C_ops.masked_select(data_1, cast_1) - del data_1 + # pd_op.batch_norm_: (2x192x26x26xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x26x26xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__564, + batch_norm__565, + batch_norm__566, + batch_norm__567, + batch_norm__568, + batch_norm__569, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_98, + parameter_218, + parameter_217, + parameter_216, + parameter_215, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_215, parameter_216, parameter_217, parameter_218 - # pd_op.full_int_array: (2xi64) <- () - full_int_array_2 = [-1, 4] + # pd_op.add: (2x192x26x26xf32) <- (2x192x26x26xf32, 2x192x26x26xf32) + add_80 = paddle._C_ops.add(batch_norm__558, batch_norm__564) - # pd_op.reshape: (-1x4xf32) <- (-1xf32, 2xi64) - reshape_0 = paddle._C_ops.reshape(masked_select_0, full_int_array_2) + # pd_op.swish: (2x192x26x26xf32) <- (2x192x26x26xf32) + swish_72 = paddle._C_ops.swish(add_80) - # pd_op.masked_select: (-1xf32) <- (2x3549x4xf32, 2x3549x4xb) - masked_select_1 = paddle._C_ops.masked_select(data_2, cast_1) + # pd_op.conv2d: (2x192x26x26xf32) <- (2x192x26x26xf32, 192x192x3x3xf32) + conv2d_99 = paddle._C_ops.conv2d( + swish_72, parameter_214, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_214 - # pd_op.reshape: (-1x4xf32) <- (-1xf32, 2xi64) - reshape_1 = paddle._C_ops.reshape(masked_select_1, full_int_array_2) - del masked_select_1 + # pd_op.batch_norm_: (2x192x26x26xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x26x26xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__570, + batch_norm__571, + batch_norm__572, + batch_norm__573, + batch_norm__574, + batch_norm__575, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_99, + parameter_213, + parameter_212, + parameter_211, + parameter_210, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_210, parameter_211, parameter_212, parameter_213 - # pd_op.sum: (2x3549xf32) <- (2x3549x10xf32, 1xi64) - sum_0 = paddle._C_ops.sum(data_3, full_int_array_0, None, False) - del data_3 + # pd_op.swish: (2x192x26x26xf32) <- (2x192x26x26xf32) + swish_73 = paddle._C_ops.swish(batch_norm__570) + + # pd_op.conv2d: (2x192x26x26xf32) <- (2x192x26x26xf32, 192x192x3x3xf32) + conv2d_100 = paddle._C_ops.conv2d( + swish_73, parameter_209, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_209 + + # pd_op.batch_norm_: (2x192x26x26xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x26x26xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__576, + batch_norm__577, + batch_norm__578, + batch_norm__579, + batch_norm__580, + batch_norm__581, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_100, + parameter_208, + parameter_207, + parameter_206, + parameter_205, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_205, parameter_206, parameter_207, parameter_208 + + # pd_op.conv2d: (2x192x26x26xf32) <- (2x192x26x26xf32, 192x192x1x1xf32) + conv2d_101 = paddle._C_ops.conv2d( + swish_73, parameter_204, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_204 - # pd_op.masked_select: (-1xf32) <- (2x3549xf32, 2x3549xb) - masked_select_2 = paddle._C_ops.masked_select(sum_0, data_0) - del sum_0 + # pd_op.batch_norm_: (2x192x26x26xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x26x26xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__582, + batch_norm__583, + batch_norm__584, + batch_norm__585, + batch_norm__586, + batch_norm__587, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_101, + parameter_203, + parameter_202, + parameter_201, + parameter_200, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_200, parameter_201, parameter_202, parameter_203 - # pd_op.unsqueeze: (-1x1xf32) <- (-1xf32, 1xi64) - unsqueeze_1 = paddle._C_ops.unsqueeze(masked_select_2, full_int_array_0) - del masked_select_2 + # pd_op.add: (2x192x26x26xf32) <- (2x192x26x26xf32, 2x192x26x26xf32) + add_81 = paddle._C_ops.add(batch_norm__576, batch_norm__582) - # pd_op.subtract: (-1x4xf32) <- (-1x4xf32, -1x4xf32) - subtract_0 = paddle._C_ops.subtract(reshape_0, reshape_1) + # pd_op.swish: (2x192x26x26xf32) <- (2x192x26x26xf32) + swish_74 = paddle._C_ops.swish(add_81) - # pd_op.abs: (-1x4xf32) <- (-1x4xf32) - abs_0 = paddle._C_ops.abs(subtract_0) + # builtin.combine: ([2x192x26x26xf32, 2x192x26x26xf32]) <- (2x192x26x26xf32, 2x192x26x26xf32) + combine_9 = [swish_67, swish_74] - # pd_op.mean_all: (xf32) <- (-1x4xf32) - mean_all_0 = paddle._C_ops.mean_all(abs_0) + # pd_op.concat: (2x384x26x26xf32) <- ([2x192x26x26xf32, 2x192x26x26xf32], 1xi32) + concat_8 = paddle._C_ops.concat(combine_9, full_0) + del combine_9 - # pd_op.full: (1xi32) <- () - full_0 = paddle._C_ops.full( - [1], float("1"), paddle.int32, paddle.core.CPUPlace() + # pd_op.conv2d: (2x384x26x26xf32) <- (2x384x26x26xf32, 384x384x1x1xf32) + conv2d_102 = paddle._C_ops.conv2d( + concat_8, parameter_199, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_199 + + # pd_op.batch_norm_: (2x384x26x26xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x26x26xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__588, + batch_norm__589, + batch_norm__590, + batch_norm__591, + batch_norm__592, + batch_norm__593, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_102, + parameter_198, + parameter_197, + parameter_196, + parameter_195, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), ) + del parameter_195, parameter_196, parameter_197, parameter_198 - # pd_op.split_with_num: ([-1x1xf32, -1x1xf32, -1x1xf32, -1x1xf32]) <- (-1x4xf32, 1xi32) - split_with_num_0 = paddle._C_ops.split_with_num(reshape_0, 4, full_0) + # pd_op.swish: (2x384x26x26xf32) <- (2x384x26x26xf32) + swish_75 = paddle._C_ops.swish(batch_norm__588) + + # pd_op.conv2d: (2x192x26x26xf32) <- (2x384x26x26xf32, 192x384x1x1xf32) + conv2d_103 = paddle._C_ops.conv2d( + swish_75, parameter_194, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_194 - # builtin.split: (-1x1xf32, -1x1xf32, -1x1xf32, -1x1xf32) <- ([-1x1xf32, -1x1xf32, -1x1xf32, -1x1xf32]) + # pd_op.batch_norm_: (2x192x26x26xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x26x26xf32, 192xf32, 192xf32, 192xf32, 192xf32) ( - split_0, - split_1, - split_2, - split_3, - ) = split_with_num_0 - del split_with_num_0 + batch_norm__594, + batch_norm__595, + batch_norm__596, + batch_norm__597, + batch_norm__598, + batch_norm__599, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_103, + parameter_193, + parameter_192, + parameter_191, + parameter_190, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_190, parameter_191, parameter_192, parameter_193 - # pd_op.split_with_num: ([-1x1xf32, -1x1xf32, -1x1xf32, -1x1xf32]) <- (-1x4xf32, 1xi32) - split_with_num_1 = paddle._C_ops.split_with_num(reshape_1, 4, full_0) + # pd_op.swish: (2x192x26x26xf32) <- (2x192x26x26xf32) + swish_76 = paddle._C_ops.swish(batch_norm__594) - # builtin.split: (-1x1xf32, -1x1xf32, -1x1xf32, -1x1xf32) <- ([-1x1xf32, -1x1xf32, -1x1xf32, -1x1xf32]) + # pd_op.nearest_interp: (2x192x52x52xf32) <- (2x192x26x26xf32, None, None, None) + nearest_interp_1 = paddle._C_ops.nearest_interp( + swish_76, + None, + None, + None, + "NCHW", + -1, + -1, + -1, + [float("2"), float("2")], + "nearest", + False, + 0, + ) + + # builtin.combine: ([2x192x52x52xf32, 2x256x52x52xf32]) <- (2x192x52x52xf32, 2x256x52x52xf32) + combine_10 = [nearest_interp_1, swish_29] + + # pd_op.concat: (2x448x52x52xf32) <- ([2x192x52x52xf32, 2x256x52x52xf32], 1xi32) + concat_9 = paddle._C_ops.concat(combine_10, full_0) + del combine_10 + + # pd_op.conv2d: (2x96x52x52xf32) <- (2x448x52x52xf32, 96x448x1x1xf32) + conv2d_104 = paddle._C_ops.conv2d( + concat_9, parameter_189, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_189 + + # pd_op.batch_norm_: (2x96x52x52xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x52x52xf32, 96xf32, 96xf32, 96xf32, 96xf32) ( - split_4, - split_5, - split_6, - split_7, - ) = split_with_num_1 - del split_with_num_1 + batch_norm__600, + batch_norm__601, + batch_norm__602, + batch_norm__603, + batch_norm__604, + batch_norm__605, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_104, + parameter_188, + parameter_187, + parameter_186, + parameter_185, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_185, parameter_186, parameter_187, parameter_188 - # pd_op.maximum: (-1x1xf32) <- (-1x1xf32, -1x1xf32) - maximum_0 = paddle._C_ops.maximum(split_0, split_4) + # pd_op.swish: (2x96x52x52xf32) <- (2x96x52x52xf32) + swish_77 = paddle._C_ops.swish(batch_norm__600) - # pd_op.maximum: (-1x1xf32) <- (-1x1xf32, -1x1xf32) - maximum_1 = paddle._C_ops.maximum(split_1, split_5) + # pd_op.conv2d: (2x96x52x52xf32) <- (2x448x52x52xf32, 96x448x1x1xf32) + conv2d_105 = paddle._C_ops.conv2d( + concat_9, parameter_184, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_184 - # pd_op.minimum: (-1x1xf32) <- (-1x1xf32, -1x1xf32) - minimum_0 = paddle._C_ops.minimum(split_2, split_6) + # pd_op.batch_norm_: (2x96x52x52xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x52x52xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__606, + batch_norm__607, + batch_norm__608, + batch_norm__609, + batch_norm__610, + batch_norm__611, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_105, + parameter_183, + parameter_182, + parameter_181, + parameter_180, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_180, parameter_181, parameter_182, parameter_183 - # pd_op.minimum: (-1x1xf32) <- (-1x1xf32, -1x1xf32) - minimum_1 = paddle._C_ops.minimum(split_3, split_7) + # pd_op.swish: (2x96x52x52xf32) <- (2x96x52x52xf32) + swish_78 = paddle._C_ops.swish(batch_norm__606) - # pd_op.subtract: (-1x1xf32) <- (-1x1xf32, -1x1xf32) - subtract_1 = paddle._C_ops.subtract(minimum_0, maximum_0) + # pd_op.conv2d: (2x96x52x52xf32) <- (2x96x52x52xf32, 96x96x3x3xf32) + conv2d_106 = paddle._C_ops.conv2d( + swish_78, parameter_179, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_179 - # pd_op.full: (1xf32) <- () - full_1 = paddle._C_ops.full( - [1], float("0"), paddle.float32, paddle.core.CPUPlace() + # pd_op.batch_norm_: (2x96x52x52xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x52x52xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__612, + batch_norm__613, + batch_norm__614, + batch_norm__615, + batch_norm__616, + batch_norm__617, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_106, + parameter_178, + parameter_177, + parameter_176, + parameter_175, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), ) + del parameter_175, parameter_176, parameter_177, parameter_178 - # pd_op.assign: (1xf32) <- (1xf32) - assign_3 = full_1 + # pd_op.swish: (2x96x52x52xf32) <- (2x96x52x52xf32) + swish_79 = paddle._C_ops.swish(batch_norm__612) - # pd_op.full: (1xf32) <- () - full_2 = paddle._C_ops.full( - [1], float("3.40282e+38"), paddle.float32, paddle.core.CPUPlace() + # pd_op.conv2d: (2x96x52x52xf32) <- (2x96x52x52xf32, 96x96x3x3xf32) + conv2d_107 = paddle._C_ops.conv2d( + swish_79, parameter_174, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" ) + del parameter_174 - # pd_op.assign: (1xf32) <- (1xf32) - assign_4 = full_2 + # pd_op.batch_norm_: (2x96x52x52xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x52x52xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__618, + batch_norm__619, + batch_norm__620, + batch_norm__621, + batch_norm__622, + batch_norm__623, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_107, + parameter_173, + parameter_172, + parameter_171, + parameter_170, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_170, parameter_171, parameter_172, parameter_173 - # pd_op.clip: (-1x1xf32) <- (-1x1xf32, 1xf32, 1xf32) - clip_0 = paddle._C_ops.clip(subtract_1, full_1, full_2) + # pd_op.conv2d: (2x96x52x52xf32) <- (2x96x52x52xf32, 96x96x1x1xf32) + conv2d_108 = paddle._C_ops.conv2d( + swish_79, parameter_169, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_169 + + # pd_op.batch_norm_: (2x96x52x52xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x52x52xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__624, + batch_norm__625, + batch_norm__626, + batch_norm__627, + batch_norm__628, + batch_norm__629, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_108, + parameter_168, + parameter_167, + parameter_166, + parameter_165, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_165, parameter_166, parameter_167, parameter_168 - # pd_op.subtract: (-1x1xf32) <- (-1x1xf32, -1x1xf32) - subtract_2 = paddle._C_ops.subtract(minimum_1, maximum_1) + # pd_op.add: (2x96x52x52xf32) <- (2x96x52x52xf32, 2x96x52x52xf32) + add_82 = paddle._C_ops.add(batch_norm__618, batch_norm__624) - # pd_op.clip: (-1x1xf32) <- (-1x1xf32, 1xf32, 1xf32) - clip_1 = paddle._C_ops.clip(subtract_2, full_1, full_2) + # pd_op.swish: (2x96x52x52xf32) <- (2x96x52x52xf32) + swish_80 = paddle._C_ops.swish(add_82) - # pd_op.multiply: (-1x1xf32) <- (-1x1xf32, -1x1xf32) - multiply_0 = paddle._C_ops.multiply(clip_0, clip_1) + # pd_op.conv2d: (2x96x52x52xf32) <- (2x96x52x52xf32, 96x96x3x3xf32) + conv2d_109 = paddle._C_ops.conv2d( + swish_80, parameter_164, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_164 - # pd_op.subtract: (-1x1xf32) <- (-1x1xf32, -1x1xf32) - subtract_3 = paddle._C_ops.subtract(split_2, split_0) + # pd_op.batch_norm_: (2x96x52x52xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x52x52xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__630, + batch_norm__631, + batch_norm__632, + batch_norm__633, + batch_norm__634, + batch_norm__635, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_109, + parameter_163, + parameter_162, + parameter_161, + parameter_160, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_160, parameter_161, parameter_162, parameter_163 - # pd_op.subtract: (-1x1xf32) <- (-1x1xf32, -1x1xf32) - subtract_4 = paddle._C_ops.subtract(split_3, split_1) + # pd_op.swish: (2x96x52x52xf32) <- (2x96x52x52xf32) + swish_81 = paddle._C_ops.swish(batch_norm__630) - # pd_op.multiply: (-1x1xf32) <- (-1x1xf32, -1x1xf32) - multiply_1 = paddle._C_ops.multiply(subtract_3, subtract_4) + # pd_op.conv2d: (2x96x52x52xf32) <- (2x96x52x52xf32, 96x96x3x3xf32) + conv2d_110 = paddle._C_ops.conv2d( + swish_81, parameter_159, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_159 - # pd_op.subtract: (-1x1xf32) <- (-1x1xf32, -1x1xf32) - subtract_5 = paddle._C_ops.subtract(split_6, split_4) + # pd_op.batch_norm_: (2x96x52x52xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x52x52xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__636, + batch_norm__637, + batch_norm__638, + batch_norm__639, + batch_norm__640, + batch_norm__641, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_110, + parameter_158, + parameter_157, + parameter_156, + parameter_155, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_155, parameter_156, parameter_157, parameter_158 - # pd_op.subtract: (-1x1xf32) <- (-1x1xf32, -1x1xf32) - subtract_6 = paddle._C_ops.subtract(split_7, split_5) + # pd_op.conv2d: (2x96x52x52xf32) <- (2x96x52x52xf32, 96x96x1x1xf32) + conv2d_111 = paddle._C_ops.conv2d( + swish_81, parameter_154, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_154 - # pd_op.multiply: (-1x1xf32) <- (-1x1xf32, -1x1xf32) - multiply_2 = paddle._C_ops.multiply(subtract_5, subtract_6) - del subtract_5, subtract_6 + # pd_op.batch_norm_: (2x96x52x52xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x52x52xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__642, + batch_norm__643, + batch_norm__644, + batch_norm__645, + batch_norm__646, + batch_norm__647, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_111, + parameter_153, + parameter_152, + parameter_151, + parameter_150, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_150, parameter_151, parameter_152, parameter_153 - # pd_op.add: (-1x1xf32) <- (-1x1xf32, -1x1xf32) - add_0 = paddle._C_ops.add(multiply_1, multiply_2) + # pd_op.add: (2x96x52x52xf32) <- (2x96x52x52xf32, 2x96x52x52xf32) + add_83 = paddle._C_ops.add(batch_norm__636, batch_norm__642) - # pd_op.subtract: (-1x1xf32) <- (-1x1xf32, -1x1xf32) - subtract_7 = paddle._C_ops.subtract(add_0, multiply_0) + # pd_op.swish: (2x96x52x52xf32) <- (2x96x52x52xf32) + swish_82 = paddle._C_ops.swish(add_83) - # pd_op.full: (1xf32) <- () - full_3 = paddle._C_ops.full( - [1], float("1"), paddle.float32, paddle.core.CPUPlace() + # pd_op.conv2d: (2x96x52x52xf32) <- (2x96x52x52xf32, 96x96x3x3xf32) + conv2d_112 = paddle._C_ops.conv2d( + swish_82, parameter_149, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" ) + del parameter_149 - # pd_op.assign: (1xf32) <- (1xf32) - assign_5 = full_3 + # pd_op.batch_norm_: (2x96x52x52xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x52x52xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__648, + batch_norm__649, + batch_norm__650, + batch_norm__651, + batch_norm__652, + batch_norm__653, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_112, + parameter_148, + parameter_147, + parameter_146, + parameter_145, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_145, parameter_146, parameter_147, parameter_148 - # pd_op.assign: (1xf32) <- (1xf32) - assign_6 = full_3 + # pd_op.swish: (2x96x52x52xf32) <- (2x96x52x52xf32) + swish_83 = paddle._C_ops.swish(batch_norm__648) - # pd_op.scale: (-1x1xf32) <- (-1x1xf32, 1xf32) - scale_0 = paddle._C_ops.scale(subtract_7, full_3, float("1e-10"), True) - del subtract_7 + # pd_op.conv2d: (2x96x52x52xf32) <- (2x96x52x52xf32, 96x96x3x3xf32) + conv2d_113 = paddle._C_ops.conv2d( + swish_83, parameter_144, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_144 + + # pd_op.batch_norm_: (2x96x52x52xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x52x52xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__654, + batch_norm__655, + batch_norm__656, + batch_norm__657, + batch_norm__658, + batch_norm__659, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_113, + parameter_143, + parameter_142, + parameter_141, + parameter_140, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_140, parameter_141, parameter_142, parameter_143 + + # pd_op.conv2d: (2x96x52x52xf32) <- (2x96x52x52xf32, 96x96x1x1xf32) + conv2d_114 = paddle._C_ops.conv2d( + swish_83, parameter_139, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_139 + + # pd_op.batch_norm_: (2x96x52x52xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x52x52xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__660, + batch_norm__661, + batch_norm__662, + batch_norm__663, + batch_norm__664, + batch_norm__665, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_114, + parameter_138, + parameter_137, + parameter_136, + parameter_135, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_135, parameter_136, parameter_137, parameter_138 + + # pd_op.add: (2x96x52x52xf32) <- (2x96x52x52xf32, 2x96x52x52xf32) + add_84 = paddle._C_ops.add(batch_norm__654, batch_norm__660) - # pd_op.divide: (-1x1xf32) <- (-1x1xf32, -1x1xf32) - divide_2 = paddle._C_ops.divide(multiply_0, scale_0) + # pd_op.swish: (2x96x52x52xf32) <- (2x96x52x52xf32) + swish_84 = paddle._C_ops.swish(add_84) - # pd_op.minimum: (-1x1xf32) <- (-1x1xf32, -1x1xf32) - minimum_2 = paddle._C_ops.minimum(split_0, split_4) + # builtin.combine: ([2x96x52x52xf32, 2x96x52x52xf32]) <- (2x96x52x52xf32, 2x96x52x52xf32) + combine_11 = [swish_77, swish_84] - # pd_op.minimum: (-1x1xf32) <- (-1x1xf32, -1x1xf32) - minimum_3 = paddle._C_ops.minimum(split_1, split_5) + # pd_op.concat: (2x192x52x52xf32) <- ([2x96x52x52xf32, 2x96x52x52xf32], 1xi32) + concat_10 = paddle._C_ops.concat(combine_11, full_0) + del combine_11 + + # pd_op.conv2d: (2x192x52x52xf32) <- (2x192x52x52xf32, 192x192x1x1xf32) + conv2d_115 = paddle._C_ops.conv2d( + concat_10, parameter_134, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_134 + + # pd_op.batch_norm_: (2x192x52x52xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x52x52xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__666, + batch_norm__667, + batch_norm__668, + batch_norm__669, + batch_norm__670, + batch_norm__671, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_115, + parameter_133, + parameter_132, + parameter_131, + parameter_130, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_130, parameter_131, parameter_132, parameter_133 - # pd_op.maximum: (-1x1xf32) <- (-1x1xf32, -1x1xf32) - maximum_2 = paddle._C_ops.maximum(split_2, split_6) + # pd_op.swish: (2x192x52x52xf32) <- (2x192x52x52xf32) + swish_85 = paddle._C_ops.swish(batch_norm__666) - # pd_op.maximum: (-1x1xf32) <- (-1x1xf32, -1x1xf32) - maximum_3 = paddle._C_ops.maximum(split_3, split_7) + # pd_op.conv2d: (2x192x26x26xf32) <- (2x192x52x52xf32, 192x192x3x3xf32) + conv2d_116 = paddle._C_ops.conv2d( + swish_85, parameter_129, [2, 2], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_129 - # pd_op.subtract: (-1x1xf32) <- (-1x1xf32, -1x1xf32) - subtract_8 = paddle._C_ops.subtract(maximum_2, minimum_2) + # pd_op.batch_norm_: (2x192x26x26xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x26x26xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__672, + batch_norm__673, + batch_norm__674, + batch_norm__675, + batch_norm__676, + batch_norm__677, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_116, + parameter_128, + parameter_127, + parameter_126, + parameter_125, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_125, parameter_126, parameter_127, parameter_128 - # pd_op.subtract: (-1x1xf32) <- (-1x1xf32, -1x1xf32) - subtract_9 = paddle._C_ops.subtract(maximum_3, minimum_3) + # pd_op.swish: (2x192x26x26xf32) <- (2x192x26x26xf32) + swish_86 = paddle._C_ops.swish(batch_norm__672) - # pd_op.multiply: (-1x1xf32) <- (-1x1xf32, -1x1xf32) - multiply_3 = paddle._C_ops.multiply(subtract_8, subtract_9) + # builtin.combine: ([2x192x26x26xf32, 2x384x26x26xf32]) <- (2x192x26x26xf32, 2x384x26x26xf32) + combine_12 = [swish_86, swish_75] - # pd_op.scale: (-1x1xf32) <- (-1x1xf32, 1xf32) - scale_1 = paddle._C_ops.scale(multiply_3, full_3, float("1e-10"), True) - del multiply_3 + # pd_op.concat: (2x576x26x26xf32) <- ([2x192x26x26xf32, 2x384x26x26xf32], 1xi32) + concat_11 = paddle._C_ops.concat(combine_12, full_0) + del combine_12 - # pd_op.subtract: (-1x1xf32) <- (-1x1xf32, -1x1xf32) - subtract_10 = paddle._C_ops.subtract(scale_1, scale_0) + # pd_op.conv2d: (2x192x26x26xf32) <- (2x576x26x26xf32, 192x576x1x1xf32) + conv2d_117 = paddle._C_ops.conv2d( + concat_11, parameter_124, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_124 - # pd_op.divide: (-1x1xf32) <- (-1x1xf32, -1x1xf32) - divide_3 = paddle._C_ops.divide(subtract_10, scale_1) + # pd_op.batch_norm_: (2x192x26x26xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x26x26xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__678, + batch_norm__679, + batch_norm__680, + batch_norm__681, + batch_norm__682, + batch_norm__683, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_117, + parameter_123, + parameter_122, + parameter_121, + parameter_120, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_120, parameter_121, parameter_122, parameter_123 - # pd_op.subtract: (-1x1xf32) <- (-1x1xf32, -1x1xf32) - subtract_11 = paddle._C_ops.subtract(divide_2, divide_3) + # pd_op.swish: (2x192x26x26xf32) <- (2x192x26x26xf32) + swish_87 = paddle._C_ops.swish(batch_norm__678) - # pd_op.full: (1xf32) <- () - full_4 = paddle._C_ops.full( - [1], float("-1"), paddle.float32, paddle.core.CPUPlace() + # pd_op.conv2d: (2x192x26x26xf32) <- (2x576x26x26xf32, 192x576x1x1xf32) + conv2d_118 = paddle._C_ops.conv2d( + concat_11, parameter_119, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" ) + del parameter_119 - # pd_op.scale: (-1x1xf32) <- (-1x1xf32, 1xf32) - scale_2 = paddle._C_ops.scale(subtract_11, full_4, float("1"), True) - del subtract_11 + # pd_op.batch_norm_: (2x192x26x26xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x26x26xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__684, + batch_norm__685, + batch_norm__686, + batch_norm__687, + batch_norm__688, + batch_norm__689, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_118, + parameter_118, + parameter_117, + parameter_116, + parameter_115, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_115, parameter_116, parameter_117, parameter_118 - # pd_op.scale: (-1x1xf32) <- (-1x1xf32, 1xf32) - scale_3 = paddle._C_ops.scale(scale_2, full_3, float("0"), True) - del scale_2 + # pd_op.swish: (2x192x26x26xf32) <- (2x192x26x26xf32) + swish_88 = paddle._C_ops.swish(batch_norm__684) - # pd_op.multiply: (-1x1xf32) <- (-1x1xf32, -1x1xf32) - multiply_4 = paddle._C_ops.multiply(scale_3, unsqueeze_1) + # pd_op.conv2d: (2x192x26x26xf32) <- (2x192x26x26xf32, 192x192x3x3xf32) + conv2d_119 = paddle._C_ops.conv2d( + swish_88, parameter_114, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_114 - # pd_op.full_int_array: (0xi64) <- () - full_int_array_3 = [] + # pd_op.batch_norm_: (2x192x26x26xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x26x26xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__690, + batch_norm__691, + batch_norm__692, + batch_norm__693, + batch_norm__694, + batch_norm__695, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_119, + parameter_113, + parameter_112, + parameter_111, + parameter_110, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_110, parameter_111, parameter_112, parameter_113 - # pd_op.assign: (0xi64) <- (0xi64) - assign_7 = full_int_array_3 + # pd_op.swish: (2x192x26x26xf32) <- (2x192x26x26xf32) + swish_89 = paddle._C_ops.swish(batch_norm__690) - # pd_op.sum: (xf32) <- (-1x1xf32, 0xi64) - sum_1 = paddle._C_ops.sum(multiply_4, full_int_array_3, None, False) + # pd_op.conv2d: (2x192x26x26xf32) <- (2x192x26x26xf32, 192x192x3x3xf32) + conv2d_120 = paddle._C_ops.conv2d( + swish_89, parameter_109, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_109 - # pd_op.divide: (xf32) <- (xf32, xf32) - divide_0 = paddle._C_ops.divide(sum_1, data_4) + # pd_op.batch_norm_: (2x192x26x26xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x26x26xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__696, + batch_norm__697, + batch_norm__698, + batch_norm__699, + batch_norm__700, + batch_norm__701, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_120, + parameter_108, + parameter_107, + parameter_106, + parameter_105, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_105, parameter_106, parameter_107, parameter_108 - # pd_op.unsqueeze: (2x3549x1xb) <- (2x3549xb, 1xi64) - unsqueeze_2 = paddle._C_ops.unsqueeze(data_0, full_int_array_0) - del data_0 + # pd_op.conv2d: (2x192x26x26xf32) <- (2x192x26x26xf32, 192x192x1x1xf32) + conv2d_121 = paddle._C_ops.conv2d( + swish_89, parameter_104, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_104 - # pd_op.cast: (2x3549x1xi32) <- (2x3549x1xb) - cast_2 = paddle._C_ops.cast(unsqueeze_2, paddle.int32) - del unsqueeze_2 + # pd_op.batch_norm_: (2x192x26x26xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x26x26xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__702, + batch_norm__703, + batch_norm__704, + batch_norm__705, + batch_norm__706, + batch_norm__707, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_121, + parameter_103, + parameter_102, + parameter_101, + parameter_100, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_100, parameter_101, parameter_102, parameter_103 - # pd_op.full_int_array: (3xi64) <- () - full_int_array_4 = [1, 1, 40] + # pd_op.add: (2x192x26x26xf32) <- (2x192x26x26xf32, 2x192x26x26xf32) + add_85 = paddle._C_ops.add(batch_norm__696, batch_norm__702) - # pd_op.tile: (2x3549x40xi32) <- (2x3549x1xi32, 3xi64) - tile_1 = paddle._C_ops.tile(cast_2, full_int_array_4) - del cast_2, full_int_array_4 + # pd_op.swish: (2x192x26x26xf32) <- (2x192x26x26xf32) + swish_90 = paddle._C_ops.swish(add_85) - # pd_op.cast: (2x3549x40xb) <- (2x3549x40xi32) - cast_3 = paddle._C_ops.cast(tile_1, paddle.bool) - del tile_1 + # pd_op.conv2d: (2x192x26x26xf32) <- (2x192x26x26xf32, 192x192x3x3xf32) + conv2d_122 = paddle._C_ops.conv2d( + swish_90, parameter_99, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_99 - # pd_op.masked_select: (-1xf32) <- (2x3549x40xf32, 2x3549x40xb) - masked_select_3 = paddle._C_ops.masked_select(data_5, cast_3) - del data_5 + # pd_op.batch_norm_: (2x192x26x26xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x26x26xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__708, + batch_norm__709, + batch_norm__710, + batch_norm__711, + batch_norm__712, + batch_norm__713, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_122, + parameter_98, + parameter_97, + parameter_96, + parameter_95, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_95, parameter_96, parameter_97, parameter_98 - # pd_op.full_int_array: (3xi64) <- () - full_int_array_5 = [-1, 4, 10] + # pd_op.swish: (2x192x26x26xf32) <- (2x192x26x26xf32) + swish_91 = paddle._C_ops.swish(batch_norm__708) - # pd_op.reshape: (-1x4x10xf32) <- (-1xf32, 3xi64) - reshape_2 = paddle._C_ops.reshape(masked_select_3, full_int_array_5) - del full_int_array_5 + # pd_op.conv2d: (2x192x26x26xf32) <- (2x192x26x26xf32, 192x192x3x3xf32) + conv2d_123 = paddle._C_ops.conv2d( + swish_91, parameter_94, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_94 - # pd_op.full: (1xi32) <- () - full_5 = paddle._C_ops.full( - [1], float("2"), paddle.int32, paddle.core.CPUPlace() + # pd_op.batch_norm_: (2x192x26x26xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x26x26xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__714, + batch_norm__715, + batch_norm__716, + batch_norm__717, + batch_norm__718, + batch_norm__719, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_123, + parameter_93, + parameter_92, + parameter_91, + parameter_90, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), ) + del parameter_90, parameter_91, parameter_92, parameter_93 - # pd_op.split_with_num: ([2x3549x2xf32, 2x3549x2xf32]) <- (2x3549x4xf32, 1xi32) - split_with_num_2 = paddle._C_ops.split_with_num(data_2, 2, full_5) - del data_2, full_5 + # pd_op.conv2d: (2x192x26x26xf32) <- (2x192x26x26xf32, 192x192x1x1xf32) + conv2d_124 = paddle._C_ops.conv2d( + swish_91, parameter_89, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_89 - # builtin.split: (2x3549x2xf32, 2x3549x2xf32) <- ([2x3549x2xf32, 2x3549x2xf32]) + # pd_op.batch_norm_: (2x192x26x26xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x26x26xf32, 192xf32, 192xf32, 192xf32, 192xf32) ( - split_8, - split_9, - ) = split_with_num_2 - del split_with_num_2 + batch_norm__720, + batch_norm__721, + batch_norm__722, + batch_norm__723, + batch_norm__724, + batch_norm__725, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_124, + parameter_88, + parameter_87, + parameter_86, + parameter_85, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_85, parameter_86, parameter_87, parameter_88 - # pd_op.subtract: (2x3549x2xf32) <- (3549x2xf32, 2x3549x2xf32) - subtract_12 = paddle._C_ops.subtract(data_6, split_8) - del split_8 + # pd_op.add: (2x192x26x26xf32) <- (2x192x26x26xf32, 2x192x26x26xf32) + add_86 = paddle._C_ops.add(batch_norm__714, batch_norm__720) - # pd_op.subtract: (2x3549x2xf32) <- (2x3549x2xf32, 3549x2xf32) - subtract_13 = paddle._C_ops.subtract(split_9, data_6) - del data_6, split_9 + # pd_op.swish: (2x192x26x26xf32) <- (2x192x26x26xf32) + swish_92 = paddle._C_ops.swish(add_86) - # pd_op.full: (1xi32) <- () - full_6 = paddle._C_ops.full( - [1], float("-1"), paddle.int32, paddle.core.CPUPlace() + # pd_op.conv2d: (2x192x26x26xf32) <- (2x192x26x26xf32, 192x192x3x3xf32) + conv2d_125 = paddle._C_ops.conv2d( + swish_92, parameter_84, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_84 + + # pd_op.batch_norm_: (2x192x26x26xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x26x26xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__726, + batch_norm__727, + batch_norm__728, + batch_norm__729, + batch_norm__730, + batch_norm__731, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_125, + parameter_83, + parameter_82, + parameter_81, + parameter_80, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), ) + del parameter_80, parameter_81, parameter_82, parameter_83 - # builtin.combine: ([2x3549x2xf32, 2x3549x2xf32]) <- (2x3549x2xf32, 2x3549x2xf32) - combine_0 = [subtract_12, subtract_13] - del subtract_12, subtract_13 + # pd_op.swish: (2x192x26x26xf32) <- (2x192x26x26xf32) + swish_93 = paddle._C_ops.swish(batch_norm__726) - # pd_op.concat: (2x3549x4xf32) <- ([2x3549x2xf32, 2x3549x2xf32], 1xi32) - concat_0 = paddle._C_ops.concat(combine_0, full_6) - del combine_0, full_6 + # pd_op.conv2d: (2x192x26x26xf32) <- (2x192x26x26xf32, 192x192x3x3xf32) + conv2d_126 = paddle._C_ops.conv2d( + swish_93, parameter_79, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_79 - # pd_op.full: (1xf32) <- () - full_7 = paddle._C_ops.full( - [1], float("-2"), paddle.float32, paddle.core.CPUPlace() + # pd_op.batch_norm_: (2x192x26x26xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x26x26xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__732, + batch_norm__733, + batch_norm__734, + batch_norm__735, + batch_norm__736, + batch_norm__737, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_126, + parameter_78, + parameter_77, + parameter_76, + parameter_75, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), ) + del parameter_75, parameter_76, parameter_77, parameter_78 - # pd_op.full: (1xf32) <- () - full_8 = paddle._C_ops.full( - [1], float("6.99"), paddle.float32, paddle.core.CPUPlace() + # pd_op.conv2d: (2x192x26x26xf32) <- (2x192x26x26xf32, 192x192x1x1xf32) + conv2d_127 = paddle._C_ops.conv2d( + swish_93, parameter_74, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_74 + + # pd_op.batch_norm_: (2x192x26x26xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x26x26xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__738, + batch_norm__739, + batch_norm__740, + batch_norm__741, + batch_norm__742, + batch_norm__743, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_127, + parameter_73, + parameter_72, + parameter_71, + parameter_70, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), ) + del parameter_70, parameter_71, parameter_72, parameter_73 + + # pd_op.add: (2x192x26x26xf32) <- (2x192x26x26xf32, 2x192x26x26xf32) + add_87 = paddle._C_ops.add(batch_norm__732, batch_norm__738) + + # pd_op.swish: (2x192x26x26xf32) <- (2x192x26x26xf32) + swish_94 = paddle._C_ops.swish(add_87) - # pd_op.clip: (2x3549x4xf32) <- (2x3549x4xf32, 1xf32, 1xf32) - clip_2 = paddle._C_ops.clip(concat_0, full_7, full_8) - del concat_0, full_7, full_8 + # builtin.combine: ([2x192x26x26xf32, 2x192x26x26xf32]) <- (2x192x26x26xf32, 2x192x26x26xf32) + combine_13 = [swish_87, swish_94] - # pd_op.masked_select: (-1xf32) <- (2x3549x4xf32, 2x3549x4xb) - masked_select_4 = paddle._C_ops.masked_select(clip_2, cast_1) - del clip_2 + # pd_op.concat: (2x384x26x26xf32) <- ([2x192x26x26xf32, 2x192x26x26xf32], 1xi32) + concat_12 = paddle._C_ops.concat(combine_13, full_0) + del combine_13 - # pd_op.reshape: (-1x4xf32) <- (-1xf32, 2xi64) - reshape_3 = paddle._C_ops.reshape(masked_select_4, full_int_array_2) - del full_int_array_2, masked_select_4 + # pd_op.conv2d: (2x384x26x26xf32) <- (2x384x26x26xf32, 384x384x1x1xf32) + conv2d_128 = paddle._C_ops.conv2d( + concat_12, parameter_69, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_69 - # pd_op.floor: (-1x4xf32) <- (-1x4xf32) - floor_0 = paddle._C_ops.floor(reshape_3) + # pd_op.batch_norm_: (2x384x26x26xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x26x26xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__744, + batch_norm__745, + batch_norm__746, + batch_norm__747, + batch_norm__748, + batch_norm__749, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_128, + parameter_68, + parameter_67, + parameter_66, + parameter_65, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_65, parameter_66, parameter_67, parameter_68 - # pd_op.cast: (-1x4xi64) <- (-1x4xf32) - cast_4 = paddle._C_ops.cast(floor_0, paddle.int64) - del floor_0 + # pd_op.swish: (2x384x26x26xf32) <- (2x384x26x26xf32) + swish_95 = paddle._C_ops.swish(batch_norm__744) - # pd_op.scale: (-1x4xi64) <- (-1x4xi64, 1xf32) - scale_4 = paddle._C_ops.scale(cast_4, full_3, float("1"), True) + # pd_op.conv2d: (2x384x13x13xf32) <- (2x384x26x26xf32, 384x384x3x3xf32) + conv2d_129 = paddle._C_ops.conv2d( + swish_95, parameter_64, [2, 2], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_64 - # pd_op.cast: (-1x4xf32) <- (-1x4xi64) - cast_5 = paddle._C_ops.cast(scale_4, paddle.float32) + # pd_op.batch_norm_: (2x384x13x13xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x13x13xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__750, + batch_norm__751, + batch_norm__752, + batch_norm__753, + batch_norm__754, + batch_norm__755, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_129, + parameter_63, + parameter_62, + parameter_61, + parameter_60, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_60, parameter_61, parameter_62, parameter_63 - # pd_op.subtract: (-1x4xf32) <- (-1x4xf32, -1x4xf32) - subtract_14 = paddle._C_ops.subtract(cast_5, reshape_3) - del cast_5, reshape_3 + # pd_op.swish: (2x384x13x13xf32) <- (2x384x13x13xf32) + swish_96 = paddle._C_ops.swish(batch_norm__750) - # pd_op.scale: (-1x4xf32) <- (-1x4xf32, 1xf32) - scale_5 = paddle._C_ops.scale(subtract_14, full_4, float("1"), True) + # builtin.combine: ([2x384x13x13xf32, 2x768x13x13xf32]) <- (2x384x13x13xf32, 2x768x13x13xf32) + combine_14 = [swish_96, swish_65] - # pd_op.scale: (-1x4xi64) <- (-1x4xi64, 1xf32) - scale_6 = paddle._C_ops.scale(cast_4, full_3, float("2"), True) - del cast_4 + # pd_op.concat: (2x1152x13x13xf32) <- ([2x384x13x13xf32, 2x768x13x13xf32], 1xi32) + concat_13 = paddle._C_ops.concat(combine_14, full_0) + del combine_14 - # pd_op.unsqueeze: (-1x4x1xi64) <- (-1x4xi64, 1xi64) - unsqueeze_3 = paddle._C_ops.unsqueeze(scale_6, full_int_array_0) - del scale_6 + # pd_op.conv2d: (2x384x13x13xf32) <- (2x1152x13x13xf32, 384x1152x1x1xf32) + conv2d_130 = paddle._C_ops.conv2d( + concat_13, parameter_59, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_59 - # pd_op.cross_entropy_with_softmax: (-1x4x10xf32, -1x4x1xf32) <- (-1x4x10xf32, -1x4x1xi64) - cross_entropy_with_softmax_0, cross_entropy_with_softmax_2 = ( - lambda x, f: f(x) - )( - paddle._C_ops.cross_entropy_with_softmax( - reshape_2, unsqueeze_3, False, True, True, -100, -1 + # pd_op.batch_norm_: (2x384x13x13xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x13x13xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__756, + batch_norm__757, + batch_norm__758, + batch_norm__759, + batch_norm__760, + batch_norm__761, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_130, + parameter_58, + parameter_57, + parameter_56, + parameter_55, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, ), - lambda out: out if isinstance(out, (list, tuple)) else (out, None), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), ) + del parameter_55, parameter_56, parameter_57, parameter_58 + + # pd_op.swish: (2x384x13x13xf32) <- (2x384x13x13xf32) + swish_97 = paddle._C_ops.swish(batch_norm__756) - # pd_op.squeeze: (-1x4xf32) <- (-1x4x1xf32, 1xi64) - squeeze_0 = paddle._C_ops.squeeze( - cross_entropy_with_softmax_2, full_int_array_0 + # pd_op.conv2d: (2x384x13x13xf32) <- (2x1152x13x13xf32, 384x1152x1x1xf32) + conv2d_131 = paddle._C_ops.conv2d( + concat_13, parameter_54, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" ) + del parameter_54 - # pd_op.multiply: (-1x4xf32) <- (-1x4xf32, -1x4xf32) - multiply_5 = paddle._C_ops.multiply(squeeze_0, subtract_14) + # pd_op.batch_norm_: (2x384x13x13xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x13x13xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__762, + batch_norm__763, + batch_norm__764, + batch_norm__765, + batch_norm__766, + batch_norm__767, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_131, + parameter_53, + parameter_52, + parameter_51, + parameter_50, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_50, parameter_51, parameter_52, parameter_53 - # pd_op.scale: (-1x4xi64) <- (-1x4xi64, 1xf32) - scale_7 = paddle._C_ops.scale(scale_4, full_3, float("2"), True) - del scale_4 + # pd_op.swish: (2x384x13x13xf32) <- (2x384x13x13xf32) + swish_98 = paddle._C_ops.swish(batch_norm__762) - # pd_op.unsqueeze: (-1x4x1xi64) <- (-1x4xi64, 1xi64) - unsqueeze_4 = paddle._C_ops.unsqueeze(scale_7, full_int_array_0) - del scale_7 + # pd_op.conv2d: (2x384x13x13xf32) <- (2x384x13x13xf32, 384x384x3x3xf32) + conv2d_132 = paddle._C_ops.conv2d( + swish_98, parameter_49, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_49 - # pd_op.cross_entropy_with_softmax: (-1x4x10xf32, -1x4x1xf32) <- (-1x4x10xf32, -1x4x1xi64) - cross_entropy_with_softmax_1, cross_entropy_with_softmax_3 = ( - lambda x, f: f(x) - )( - paddle._C_ops.cross_entropy_with_softmax( - reshape_2, unsqueeze_4, False, True, True, -100, -1 + # pd_op.batch_norm_: (2x384x13x13xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x13x13xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__768, + batch_norm__769, + batch_norm__770, + batch_norm__771, + batch_norm__772, + batch_norm__773, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_132, + parameter_48, + parameter_47, + parameter_46, + parameter_45, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, ), - lambda out: out if isinstance(out, (list, tuple)) else (out, None), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_45, parameter_46, parameter_47, parameter_48 + + # pd_op.swish: (2x384x13x13xf32) <- (2x384x13x13xf32) + swish_99 = paddle._C_ops.swish(batch_norm__768) + + # pd_op.conv2d: (2x384x13x13xf32) <- (2x384x13x13xf32, 384x384x3x3xf32) + conv2d_133 = paddle._C_ops.conv2d( + swish_99, parameter_44, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_44 + + # pd_op.batch_norm_: (2x384x13x13xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x13x13xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__774, + batch_norm__775, + batch_norm__776, + batch_norm__777, + batch_norm__778, + batch_norm__779, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_133, + parameter_43, + parameter_42, + parameter_41, + parameter_40, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_40, parameter_41, parameter_42, parameter_43 + + # pd_op.conv2d: (2x384x13x13xf32) <- (2x384x13x13xf32, 384x384x1x1xf32) + conv2d_134 = paddle._C_ops.conv2d( + swish_99, parameter_39, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_39 + + # pd_op.batch_norm_: (2x384x13x13xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x13x13xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__780, + batch_norm__781, + batch_norm__782, + batch_norm__783, + batch_norm__784, + batch_norm__785, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_134, + parameter_38, + parameter_37, + parameter_36, + parameter_35, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_35, parameter_36, parameter_37, parameter_38 + + # pd_op.add: (2x384x13x13xf32) <- (2x384x13x13xf32, 2x384x13x13xf32) + add_88 = paddle._C_ops.add(batch_norm__774, batch_norm__780) + + # pd_op.swish: (2x384x13x13xf32) <- (2x384x13x13xf32) + swish_100 = paddle._C_ops.swish(add_88) + + # pd_op.conv2d: (2x384x13x13xf32) <- (2x384x13x13xf32, 384x384x3x3xf32) + conv2d_135 = paddle._C_ops.conv2d( + swish_100, parameter_34, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_34 + + # pd_op.batch_norm_: (2x384x13x13xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x13x13xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__786, + batch_norm__787, + batch_norm__788, + batch_norm__789, + batch_norm__790, + batch_norm__791, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_135, + parameter_33, + parameter_32, + parameter_31, + parameter_30, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_30, parameter_31, parameter_32, parameter_33 + + # pd_op.swish: (2x384x13x13xf32) <- (2x384x13x13xf32) + swish_101 = paddle._C_ops.swish(batch_norm__786) + + # pd_op.conv2d: (2x384x13x13xf32) <- (2x384x13x13xf32, 384x384x3x3xf32) + conv2d_136 = paddle._C_ops.conv2d( + swish_101, parameter_29, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_29 + + # pd_op.batch_norm_: (2x384x13x13xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x13x13xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__792, + batch_norm__793, + batch_norm__794, + batch_norm__795, + batch_norm__796, + batch_norm__797, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_136, + parameter_28, + parameter_27, + parameter_26, + parameter_25, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_25, parameter_26, parameter_27, parameter_28 + + # pd_op.conv2d: (2x384x13x13xf32) <- (2x384x13x13xf32, 384x384x1x1xf32) + conv2d_137 = paddle._C_ops.conv2d( + swish_101, parameter_24, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_24 + + # pd_op.batch_norm_: (2x384x13x13xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x13x13xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__798, + batch_norm__799, + batch_norm__800, + batch_norm__801, + batch_norm__802, + batch_norm__803, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_137, + parameter_23, + parameter_22, + parameter_21, + parameter_20, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_20, parameter_21, parameter_22, parameter_23 + + # pd_op.add: (2x384x13x13xf32) <- (2x384x13x13xf32, 2x384x13x13xf32) + add_89 = paddle._C_ops.add(batch_norm__792, batch_norm__798) + + # pd_op.swish: (2x384x13x13xf32) <- (2x384x13x13xf32) + swish_102 = paddle._C_ops.swish(add_89) + + # pd_op.conv2d: (2x384x13x13xf32) <- (2x384x13x13xf32, 384x384x3x3xf32) + conv2d_138 = paddle._C_ops.conv2d( + swish_102, parameter_19, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_19 + + # pd_op.batch_norm_: (2x384x13x13xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x13x13xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__804, + batch_norm__805, + batch_norm__806, + batch_norm__807, + batch_norm__808, + batch_norm__809, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_138, + parameter_18, + parameter_17, + parameter_16, + parameter_15, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_15, parameter_16, parameter_17, parameter_18 + + # pd_op.swish: (2x384x13x13xf32) <- (2x384x13x13xf32) + swish_103 = paddle._C_ops.swish(batch_norm__804) + + # pd_op.conv2d: (2x384x13x13xf32) <- (2x384x13x13xf32, 384x384x3x3xf32) + conv2d_139 = paddle._C_ops.conv2d( + swish_103, parameter_14, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" ) - del reshape_2 + del parameter_14 - # pd_op.squeeze: (-1x4xf32) <- (-1x4x1xf32, 1xi64) - squeeze_1 = paddle._C_ops.squeeze( - cross_entropy_with_softmax_3, full_int_array_0 + # pd_op.batch_norm_: (2x384x13x13xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x13x13xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__810, + batch_norm__811, + batch_norm__812, + batch_norm__813, + batch_norm__814, + batch_norm__815, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_139, + parameter_13, + parameter_12, + parameter_11, + parameter_10, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_10, parameter_11, parameter_12, parameter_13 + + # pd_op.conv2d: (2x384x13x13xf32) <- (2x384x13x13xf32, 384x384x1x1xf32) + conv2d_140 = paddle._C_ops.conv2d( + swish_103, parameter_9, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_9 + + # pd_op.batch_norm_: (2x384x13x13xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x13x13xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__816, + batch_norm__817, + batch_norm__818, + batch_norm__819, + batch_norm__820, + batch_norm__821, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_140, + parameter_8, + parameter_7, + parameter_6, + parameter_5, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), ) + del parameter_5, parameter_6, parameter_7, parameter_8 - # pd_op.multiply: (-1x4xf32) <- (-1x4xf32, -1x4xf32) - multiply_6 = paddle._C_ops.multiply(squeeze_1, scale_5) + # pd_op.add: (2x384x13x13xf32) <- (2x384x13x13xf32, 2x384x13x13xf32) + add_90 = paddle._C_ops.add(batch_norm__810, batch_norm__816) - # pd_op.add: (-1x4xf32) <- (-1x4xf32, -1x4xf32) - add_1 = paddle._C_ops.add(multiply_5, multiply_6) + # pd_op.swish: (2x384x13x13xf32) <- (2x384x13x13xf32) + swish_104 = paddle._C_ops.swish(add_90) - # pd_op.mean: (-1x1xf32) <- (-1x4xf32, 1xi64) - mean_0 = paddle._C_ops.mean(add_1, full_int_array_0, True) - del full_int_array_0 + # builtin.combine: ([2x384x13x13xf32, 2x384x13x13xf32]) <- (2x384x13x13xf32, 2x384x13x13xf32) + combine_15 = [swish_97, swish_104] - # pd_op.multiply: (-1x1xf32) <- (-1x1xf32, -1x1xf32) - multiply_7 = paddle._C_ops.multiply(mean_0, unsqueeze_1) + # pd_op.concat: (2x768x13x13xf32) <- ([2x384x13x13xf32, 2x384x13x13xf32], 1xi32) + concat_14 = paddle._C_ops.concat(combine_15, full_0) + del combine_15 - # pd_op.sum: (xf32) <- (-1x1xf32, 0xi64) - sum_2 = paddle._C_ops.sum(multiply_7, full_int_array_3, None, False) + # pd_op.conv2d: (2x768x13x13xf32) <- (2x768x13x13xf32, 768x768x1x1xf32) + conv2d_141 = paddle._C_ops.conv2d( + concat_14, parameter_4, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_4 + + # pd_op.batch_norm_: (2x768x13x13xf32, 768xf32, 768xf32, 768xf32, 768xf32, -1xui8) <- (2x768x13x13xf32, 768xf32, 768xf32, 768xf32, 768xf32) + ( + batch_norm__822, + batch_norm__823, + batch_norm__824, + batch_norm__825, + batch_norm__826, + batch_norm__827, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_141, + parameter_3, + parameter_2, + parameter_1, + parameter_0, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_0, parameter_1, parameter_2, parameter_3 - # pd_op.divide: (xf32) <- (xf32, xf32) - divide_1 = paddle._C_ops.divide(sum_2, data_4) + # pd_op.swish: (2x768x13x13xf32) <- (2x768x13x13xf32) + swish_0 = paddle._C_ops.swish(batch_norm__822) del ( - abs_0, add_0, add_1, + add_10, + add_11, + add_12, + add_13, + add_14, + add_15, + add_16, + add_17, + add_18, + add_2, + add_20, + add_21, + add_22, + add_23, + add_24, + add_25, + add_26, + add_27, + add_28, + add_29, + add_3, + add_30, + add_31, + add_33, + add_34, + add_35, + add_36, + add_37, + add_38, + add_4, + add_40, + add_41, + add_42, + add_43, + add_45, + add_46, + add_48, + add_49, + add_5, + add_50, + add_51, + add_52, + add_54, + add_55, + add_57, + add_58, + add_59, + add_60, + add_61, + add_63, + add_64, + add_66, + add_67, + add_68, + add_69, + add_7, + add_70, + add_72, + add_73, + add_75, + add_76, + add_77, + add_78, + add_79, + add_8, + add_80, + add_81, + add_82, + add_83, + add_84, + add_85, + add_86, + add_87, + add_88, + add_89, + add_9, + add_90, assign_0, assign_1, + assign_10, + assign_11, + assign_12, + assign_13, + assign_14, + assign_15, + assign_16, + assign_17, + assign_18, + assign_19, assign_2, + assign_20, + assign_21, + assign_22, + assign_23, + assign_24, + assign_25, + assign_26, + assign_27, + assign_28, + assign_29, assign_3, + assign_30, + assign_31, + assign_32, + assign_33, + assign_34, + assign_35, + assign_36, + assign_37, + assign_38, + assign_39, assign_4, + assign_40, + assign_41, + assign_42, + assign_43, + assign_44, + assign_45, + assign_46, + assign_47, + assign_48, + assign_49, assign_5, + assign_50, + assign_51, + assign_52, + assign_53, + assign_54, + assign_55, + assign_56, + assign_57, + assign_58, + assign_59, assign_6, + assign_60, + assign_61, + assign_62, + assign_63, + assign_64, + assign_65, + assign_66, + assign_67, + assign_68, + assign_69, assign_7, - cast_1, - cast_3, - clip_0, - clip_1, - cross_entropy_with_softmax_2, - cross_entropy_with_softmax_3, - data_4, - divide_2, - divide_3, + assign_70, + assign_71, + assign_72, + assign_73, + assign_74, + assign_75, + assign_76, + assign_77, + assign_78, + assign_8, + assign_9, + batch_norm__0, + batch_norm__1, + batch_norm__10, + batch_norm__100, + batch_norm__101, + batch_norm__102, + batch_norm__103, + batch_norm__104, + batch_norm__105, + batch_norm__106, + batch_norm__107, + batch_norm__108, + batch_norm__109, + batch_norm__11, + batch_norm__110, + batch_norm__111, + batch_norm__112, + batch_norm__113, + batch_norm__114, + batch_norm__115, + batch_norm__116, + batch_norm__117, + batch_norm__118, + batch_norm__119, + batch_norm__12, + batch_norm__120, + batch_norm__121, + batch_norm__122, + batch_norm__123, + batch_norm__124, + batch_norm__125, + batch_norm__126, + batch_norm__127, + batch_norm__128, + batch_norm__129, + batch_norm__13, + batch_norm__130, + batch_norm__131, + batch_norm__132, + batch_norm__133, + batch_norm__134, + batch_norm__135, + batch_norm__136, + batch_norm__137, + batch_norm__138, + batch_norm__139, + batch_norm__14, + batch_norm__140, + batch_norm__141, + batch_norm__142, + batch_norm__143, + batch_norm__144, + batch_norm__145, + batch_norm__146, + batch_norm__147, + batch_norm__148, + batch_norm__149, + batch_norm__15, + batch_norm__150, + batch_norm__151, + batch_norm__152, + batch_norm__153, + batch_norm__154, + batch_norm__155, + batch_norm__156, + batch_norm__157, + batch_norm__158, + batch_norm__159, + batch_norm__16, + batch_norm__160, + batch_norm__161, + batch_norm__162, + batch_norm__163, + batch_norm__164, + batch_norm__165, + batch_norm__166, + batch_norm__167, + batch_norm__168, + batch_norm__169, + batch_norm__17, + batch_norm__170, + batch_norm__171, + batch_norm__172, + batch_norm__173, + batch_norm__174, + batch_norm__175, + batch_norm__176, + batch_norm__177, + batch_norm__178, + batch_norm__179, + batch_norm__18, + batch_norm__180, + batch_norm__181, + batch_norm__182, + batch_norm__183, + batch_norm__184, + batch_norm__185, + batch_norm__186, + batch_norm__187, + batch_norm__188, + batch_norm__189, + batch_norm__19, + batch_norm__190, + batch_norm__191, + batch_norm__192, + batch_norm__193, + batch_norm__194, + batch_norm__195, + batch_norm__196, + batch_norm__197, + batch_norm__198, + batch_norm__199, + batch_norm__2, + batch_norm__20, + batch_norm__200, + batch_norm__201, + batch_norm__202, + batch_norm__203, + batch_norm__204, + batch_norm__205, + batch_norm__206, + batch_norm__207, + batch_norm__208, + batch_norm__209, + batch_norm__21, + batch_norm__210, + batch_norm__211, + batch_norm__212, + batch_norm__213, + batch_norm__214, + batch_norm__215, + batch_norm__216, + batch_norm__217, + batch_norm__218, + batch_norm__219, + batch_norm__22, + batch_norm__220, + batch_norm__221, + batch_norm__222, + batch_norm__223, + batch_norm__224, + batch_norm__225, + batch_norm__226, + batch_norm__227, + batch_norm__228, + batch_norm__229, + batch_norm__23, + batch_norm__230, + batch_norm__231, + batch_norm__232, + batch_norm__233, + batch_norm__234, + batch_norm__235, + batch_norm__236, + batch_norm__237, + batch_norm__238, + batch_norm__239, + batch_norm__24, + batch_norm__240, + batch_norm__241, + batch_norm__242, + batch_norm__243, + batch_norm__244, + batch_norm__245, + batch_norm__246, + batch_norm__247, + batch_norm__248, + batch_norm__249, + batch_norm__25, + batch_norm__250, + batch_norm__251, + batch_norm__252, + batch_norm__253, + batch_norm__254, + batch_norm__255, + batch_norm__256, + batch_norm__257, + batch_norm__258, + batch_norm__259, + batch_norm__26, + batch_norm__260, + batch_norm__261, + batch_norm__262, + batch_norm__263, + batch_norm__264, + batch_norm__265, + batch_norm__266, + batch_norm__267, + batch_norm__268, + batch_norm__269, + batch_norm__27, + batch_norm__270, + batch_norm__271, + batch_norm__272, + batch_norm__273, + batch_norm__274, + batch_norm__275, + batch_norm__276, + batch_norm__277, + batch_norm__278, + batch_norm__279, + batch_norm__28, + batch_norm__280, + batch_norm__281, + batch_norm__282, + batch_norm__283, + batch_norm__284, + batch_norm__285, + batch_norm__286, + batch_norm__287, + batch_norm__288, + batch_norm__289, + batch_norm__29, + batch_norm__290, + batch_norm__291, + batch_norm__292, + batch_norm__293, + batch_norm__294, + batch_norm__295, + batch_norm__296, + batch_norm__297, + batch_norm__298, + batch_norm__299, + batch_norm__3, + batch_norm__30, + batch_norm__300, + batch_norm__301, + batch_norm__302, + batch_norm__303, + batch_norm__304, + batch_norm__305, + batch_norm__306, + batch_norm__307, + batch_norm__308, + batch_norm__309, + batch_norm__31, + batch_norm__310, + batch_norm__311, + batch_norm__312, + batch_norm__313, + batch_norm__314, + batch_norm__315, + batch_norm__316, + batch_norm__317, + batch_norm__318, + batch_norm__319, + batch_norm__32, + batch_norm__320, + batch_norm__321, + batch_norm__322, + batch_norm__323, + batch_norm__324, + batch_norm__325, + batch_norm__326, + batch_norm__327, + batch_norm__328, + batch_norm__329, + batch_norm__33, + batch_norm__330, + batch_norm__331, + batch_norm__332, + batch_norm__333, + batch_norm__334, + batch_norm__335, + batch_norm__336, + batch_norm__337, + batch_norm__338, + batch_norm__339, + batch_norm__34, + batch_norm__340, + batch_norm__341, + batch_norm__342, + batch_norm__343, + batch_norm__344, + batch_norm__345, + batch_norm__346, + batch_norm__347, + batch_norm__348, + batch_norm__349, + batch_norm__35, + batch_norm__350, + batch_norm__351, + batch_norm__352, + batch_norm__353, + batch_norm__354, + batch_norm__355, + batch_norm__356, + batch_norm__357, + batch_norm__358, + batch_norm__359, + batch_norm__36, + batch_norm__360, + batch_norm__361, + batch_norm__362, + batch_norm__363, + batch_norm__364, + batch_norm__365, + batch_norm__366, + batch_norm__367, + batch_norm__368, + batch_norm__369, + batch_norm__37, + batch_norm__370, + batch_norm__371, + batch_norm__372, + batch_norm__373, + batch_norm__374, + batch_norm__375, + batch_norm__376, + batch_norm__377, + batch_norm__378, + batch_norm__379, + batch_norm__38, + batch_norm__380, + batch_norm__381, + batch_norm__382, + batch_norm__383, + batch_norm__384, + batch_norm__385, + batch_norm__386, + batch_norm__387, + batch_norm__388, + batch_norm__389, + batch_norm__39, + batch_norm__390, + batch_norm__391, + batch_norm__392, + batch_norm__393, + batch_norm__394, + batch_norm__395, + batch_norm__396, + batch_norm__397, + batch_norm__398, + batch_norm__399, + batch_norm__4, + batch_norm__40, + batch_norm__400, + batch_norm__401, + batch_norm__402, + batch_norm__403, + batch_norm__404, + batch_norm__405, + batch_norm__406, + batch_norm__407, + batch_norm__408, + batch_norm__409, + batch_norm__41, + batch_norm__410, + batch_norm__411, + batch_norm__412, + batch_norm__413, + batch_norm__414, + batch_norm__415, + batch_norm__416, + batch_norm__417, + batch_norm__418, + batch_norm__419, + batch_norm__42, + batch_norm__420, + batch_norm__421, + batch_norm__422, + batch_norm__423, + batch_norm__424, + batch_norm__425, + batch_norm__426, + batch_norm__427, + batch_norm__428, + batch_norm__429, + batch_norm__43, + batch_norm__430, + batch_norm__431, + batch_norm__432, + batch_norm__433, + batch_norm__434, + batch_norm__435, + batch_norm__436, + batch_norm__437, + batch_norm__438, + batch_norm__439, + batch_norm__44, + batch_norm__440, + batch_norm__441, + batch_norm__442, + batch_norm__443, + batch_norm__444, + batch_norm__445, + batch_norm__446, + batch_norm__447, + batch_norm__448, + batch_norm__449, + batch_norm__45, + batch_norm__450, + batch_norm__451, + batch_norm__452, + batch_norm__453, + batch_norm__454, + batch_norm__455, + batch_norm__456, + batch_norm__457, + batch_norm__458, + batch_norm__459, + batch_norm__46, + batch_norm__460, + batch_norm__461, + batch_norm__462, + batch_norm__463, + batch_norm__464, + batch_norm__465, + batch_norm__466, + batch_norm__467, + batch_norm__468, + batch_norm__469, + batch_norm__47, + batch_norm__470, + batch_norm__471, + batch_norm__472, + batch_norm__473, + batch_norm__474, + batch_norm__475, + batch_norm__476, + batch_norm__477, + batch_norm__478, + batch_norm__479, + batch_norm__48, + batch_norm__480, + batch_norm__481, + batch_norm__482, + batch_norm__483, + batch_norm__484, + batch_norm__485, + batch_norm__486, + batch_norm__487, + batch_norm__488, + batch_norm__489, + batch_norm__49, + batch_norm__490, + batch_norm__491, + batch_norm__492, + batch_norm__493, + batch_norm__494, + batch_norm__495, + batch_norm__496, + batch_norm__497, + batch_norm__498, + batch_norm__499, + batch_norm__5, + batch_norm__50, + batch_norm__500, + batch_norm__501, + batch_norm__502, + batch_norm__503, + batch_norm__504, + batch_norm__505, + batch_norm__506, + batch_norm__507, + batch_norm__508, + batch_norm__509, + batch_norm__51, + batch_norm__510, + batch_norm__511, + batch_norm__512, + batch_norm__513, + batch_norm__514, + batch_norm__515, + batch_norm__516, + batch_norm__517, + batch_norm__518, + batch_norm__519, + batch_norm__52, + batch_norm__520, + batch_norm__521, + batch_norm__522, + batch_norm__523, + batch_norm__524, + batch_norm__525, + batch_norm__526, + batch_norm__527, + batch_norm__528, + batch_norm__529, + batch_norm__53, + batch_norm__530, + batch_norm__531, + batch_norm__532, + batch_norm__533, + batch_norm__534, + batch_norm__535, + batch_norm__536, + batch_norm__537, + batch_norm__538, + batch_norm__539, + batch_norm__54, + batch_norm__540, + batch_norm__541, + batch_norm__542, + batch_norm__543, + batch_norm__544, + batch_norm__545, + batch_norm__546, + batch_norm__547, + batch_norm__548, + batch_norm__549, + batch_norm__55, + batch_norm__550, + batch_norm__551, + batch_norm__552, + batch_norm__553, + batch_norm__554, + batch_norm__555, + batch_norm__556, + batch_norm__557, + batch_norm__558, + batch_norm__559, + batch_norm__56, + batch_norm__560, + batch_norm__561, + batch_norm__562, + batch_norm__563, + batch_norm__564, + batch_norm__565, + batch_norm__566, + batch_norm__567, + batch_norm__568, + batch_norm__569, + batch_norm__57, + batch_norm__570, + batch_norm__571, + batch_norm__572, + batch_norm__573, + batch_norm__574, + batch_norm__575, + batch_norm__576, + batch_norm__577, + batch_norm__578, + batch_norm__579, + batch_norm__58, + batch_norm__580, + batch_norm__581, + batch_norm__582, + batch_norm__583, + batch_norm__584, + batch_norm__585, + batch_norm__586, + batch_norm__587, + batch_norm__588, + batch_norm__589, + batch_norm__59, + batch_norm__590, + batch_norm__591, + batch_norm__592, + batch_norm__593, + batch_norm__594, + batch_norm__595, + batch_norm__596, + batch_norm__597, + batch_norm__598, + batch_norm__599, + batch_norm__6, + batch_norm__60, + batch_norm__600, + batch_norm__601, + batch_norm__602, + batch_norm__603, + batch_norm__604, + batch_norm__605, + batch_norm__606, + batch_norm__607, + batch_norm__608, + batch_norm__609, + batch_norm__61, + batch_norm__610, + batch_norm__611, + batch_norm__612, + batch_norm__613, + batch_norm__614, + batch_norm__615, + batch_norm__616, + batch_norm__617, + batch_norm__618, + batch_norm__619, + batch_norm__62, + batch_norm__620, + batch_norm__621, + batch_norm__622, + batch_norm__623, + batch_norm__624, + batch_norm__625, + batch_norm__626, + batch_norm__627, + batch_norm__628, + batch_norm__629, + batch_norm__63, + batch_norm__630, + batch_norm__631, + batch_norm__632, + batch_norm__633, + batch_norm__634, + batch_norm__635, + batch_norm__636, + batch_norm__637, + batch_norm__638, + batch_norm__639, + batch_norm__64, + batch_norm__640, + batch_norm__641, + batch_norm__642, + batch_norm__643, + batch_norm__644, + batch_norm__645, + batch_norm__646, + batch_norm__647, + batch_norm__648, + batch_norm__649, + batch_norm__65, + batch_norm__650, + batch_norm__651, + batch_norm__652, + batch_norm__653, + batch_norm__654, + batch_norm__655, + batch_norm__656, + batch_norm__657, + batch_norm__658, + batch_norm__659, + batch_norm__66, + batch_norm__660, + batch_norm__661, + batch_norm__662, + batch_norm__663, + batch_norm__664, + batch_norm__665, + batch_norm__666, + batch_norm__667, + batch_norm__668, + batch_norm__669, + batch_norm__67, + batch_norm__670, + batch_norm__671, + batch_norm__672, + batch_norm__673, + batch_norm__674, + batch_norm__675, + batch_norm__676, + batch_norm__677, + batch_norm__678, + batch_norm__679, + batch_norm__68, + batch_norm__680, + batch_norm__681, + batch_norm__682, + batch_norm__683, + batch_norm__684, + batch_norm__685, + batch_norm__686, + batch_norm__687, + batch_norm__688, + batch_norm__689, + batch_norm__69, + batch_norm__690, + batch_norm__691, + batch_norm__692, + batch_norm__693, + batch_norm__694, + batch_norm__695, + batch_norm__696, + batch_norm__697, + batch_norm__698, + batch_norm__699, + batch_norm__7, + batch_norm__70, + batch_norm__700, + batch_norm__701, + batch_norm__702, + batch_norm__703, + batch_norm__704, + batch_norm__705, + batch_norm__706, + batch_norm__707, + batch_norm__708, + batch_norm__709, + batch_norm__71, + batch_norm__710, + batch_norm__711, + batch_norm__712, + batch_norm__713, + batch_norm__714, + batch_norm__715, + batch_norm__716, + batch_norm__717, + batch_norm__718, + batch_norm__719, + batch_norm__72, + batch_norm__720, + batch_norm__721, + batch_norm__722, + batch_norm__723, + batch_norm__724, + batch_norm__725, + batch_norm__726, + batch_norm__727, + batch_norm__728, + batch_norm__729, + batch_norm__73, + batch_norm__730, + batch_norm__731, + batch_norm__732, + batch_norm__733, + batch_norm__734, + batch_norm__735, + batch_norm__736, + batch_norm__737, + batch_norm__738, + batch_norm__739, + batch_norm__74, + batch_norm__740, + batch_norm__741, + batch_norm__742, + batch_norm__743, + batch_norm__744, + batch_norm__745, + batch_norm__746, + batch_norm__747, + batch_norm__748, + batch_norm__749, + batch_norm__75, + batch_norm__750, + batch_norm__751, + batch_norm__752, + batch_norm__753, + batch_norm__754, + batch_norm__755, + batch_norm__756, + batch_norm__757, + batch_norm__758, + batch_norm__759, + batch_norm__76, + batch_norm__760, + batch_norm__761, + batch_norm__762, + batch_norm__763, + batch_norm__764, + batch_norm__765, + batch_norm__766, + batch_norm__767, + batch_norm__768, + batch_norm__769, + batch_norm__77, + batch_norm__770, + batch_norm__771, + batch_norm__772, + batch_norm__773, + batch_norm__774, + batch_norm__775, + batch_norm__776, + batch_norm__777, + batch_norm__778, + batch_norm__779, + batch_norm__78, + batch_norm__780, + batch_norm__781, + batch_norm__782, + batch_norm__783, + batch_norm__784, + batch_norm__785, + batch_norm__786, + batch_norm__787, + batch_norm__788, + batch_norm__789, + batch_norm__79, + batch_norm__790, + batch_norm__791, + batch_norm__792, + batch_norm__793, + batch_norm__794, + batch_norm__795, + batch_norm__796, + batch_norm__797, + batch_norm__798, + batch_norm__799, + batch_norm__8, + batch_norm__80, + batch_norm__800, + batch_norm__801, + batch_norm__802, + batch_norm__803, + batch_norm__804, + batch_norm__805, + batch_norm__806, + batch_norm__807, + batch_norm__808, + batch_norm__809, + batch_norm__81, + batch_norm__810, + batch_norm__811, + batch_norm__812, + batch_norm__813, + batch_norm__814, + batch_norm__815, + batch_norm__816, + batch_norm__817, + batch_norm__818, + batch_norm__819, + batch_norm__82, + batch_norm__820, + batch_norm__821, + batch_norm__822, + batch_norm__823, + batch_norm__824, + batch_norm__825, + batch_norm__826, + batch_norm__827, + batch_norm__83, + batch_norm__84, + batch_norm__85, + batch_norm__86, + batch_norm__87, + batch_norm__88, + batch_norm__89, + batch_norm__9, + batch_norm__90, + batch_norm__91, + batch_norm__92, + batch_norm__93, + batch_norm__94, + batch_norm__95, + batch_norm__96, + batch_norm__97, + batch_norm__98, + batch_norm__99, + concat_0, + concat_1, + concat_10, + concat_11, + concat_12, + concat_13, + concat_14, + concat_2, + concat_3, + concat_5, + concat_6, + concat_7, + concat_8, + concat_9, + conv2d_0, + conv2d_1, + conv2d_10, + conv2d_100, + conv2d_101, + conv2d_102, + conv2d_103, + conv2d_104, + conv2d_105, + conv2d_106, + conv2d_107, + conv2d_108, + conv2d_109, + conv2d_11, + conv2d_110, + conv2d_111, + conv2d_112, + conv2d_113, + conv2d_114, + conv2d_115, + conv2d_116, + conv2d_117, + conv2d_118, + conv2d_119, + conv2d_12, + conv2d_120, + conv2d_121, + conv2d_122, + conv2d_123, + conv2d_124, + conv2d_125, + conv2d_126, + conv2d_127, + conv2d_128, + conv2d_129, + conv2d_13, + conv2d_130, + conv2d_131, + conv2d_132, + conv2d_133, + conv2d_134, + conv2d_135, + conv2d_136, + conv2d_137, + conv2d_138, + conv2d_139, + conv2d_14, + conv2d_140, + conv2d_141, + conv2d_15, + conv2d_16, + conv2d_17, + conv2d_18, + conv2d_19, + conv2d_2, + conv2d_20, + conv2d_21, + conv2d_22, + conv2d_23, + conv2d_24, + conv2d_25, + conv2d_26, + conv2d_27, + conv2d_28, + conv2d_29, + conv2d_3, + conv2d_30, + conv2d_31, + conv2d_32, + conv2d_33, + conv2d_34, + conv2d_35, + conv2d_36, + conv2d_37, + conv2d_38, + conv2d_39, + conv2d_4, + conv2d_40, + conv2d_41, + conv2d_42, + conv2d_43, + conv2d_44, + conv2d_45, + conv2d_46, + conv2d_47, + conv2d_48, + conv2d_49, + conv2d_5, + conv2d_50, + conv2d_51, + conv2d_52, + conv2d_53, + conv2d_54, + conv2d_55, + conv2d_56, + conv2d_57, + conv2d_58, + conv2d_59, + conv2d_6, + conv2d_60, + conv2d_61, + conv2d_62, + conv2d_63, + conv2d_64, + conv2d_65, + conv2d_66, + conv2d_67, + conv2d_68, + conv2d_69, + conv2d_7, + conv2d_70, + conv2d_71, + conv2d_72, + conv2d_73, + conv2d_74, + conv2d_75, + conv2d_76, + conv2d_77, + conv2d_78, + conv2d_79, + conv2d_8, + conv2d_80, + conv2d_81, + conv2d_82, + conv2d_83, + conv2d_84, + conv2d_85, + conv2d_86, + conv2d_87, + conv2d_88, + conv2d_89, + conv2d_9, + conv2d_90, + conv2d_91, + conv2d_92, + conv2d_93, + conv2d_94, + conv2d_95, + conv2d_96, + conv2d_97, + conv2d_98, + conv2d_99, + dropout_0, + dropout_1, + dropout_10, + dropout_11, + dropout_12, + dropout_13, + dropout_14, + dropout_15, + dropout_16, + dropout_17, + dropout_18, + dropout_19, + dropout_2, + dropout_20, + dropout_21, + dropout_22, + dropout_23, + dropout_24, + dropout_25, + dropout_26, + dropout_27, + dropout_28, + dropout_29, + dropout_3, + dropout_30, + dropout_31, + dropout_4, + dropout_5, + dropout_6, + dropout_7, + dropout_8, + dropout_9, full_0, - full_1, - full_2, - full_3, - full_4, - full_int_array_3, - masked_select_0, - masked_select_3, - maximum_0, - maximum_1, - maximum_2, - maximum_3, + full_8, + full_9, + full_int_array_0, + full_int_array_10, + full_int_array_11, + full_int_array_12, + full_int_array_4, + full_int_array_6, + full_int_array_7, + hardsigmoid_0, + hardsigmoid_1, + hardsigmoid_2, + hardsigmoid_3, + layer_norm_0, + layer_norm_1, + layer_norm_10, + layer_norm_11, + layer_norm_12, + layer_norm_13, + layer_norm_14, + layer_norm_15, + layer_norm_16, + layer_norm_17, + layer_norm_18, + layer_norm_19, + layer_norm_2, + layer_norm_20, + layer_norm_22, + layer_norm_23, + layer_norm_3, + layer_norm_4, + layer_norm_5, + layer_norm_6, + layer_norm_7, + layer_norm_8, + layer_norm_9, + matmul_10, + matmul_11, + matmul_12, + matmul_15, + matmul_16, + matmul_17, + matmul_18, + matmul_19, + matmul_2, + matmul_20, + matmul_23, + matmul_24, + matmul_25, + matmul_26, + matmul_27, + matmul_28, + matmul_3, + matmul_31, + matmul_32, + matmul_33, + matmul_4, + matmul_7, + matmul_8, + matmul_9, mean_0, - minimum_0, - minimum_1, - minimum_2, - minimum_3, + mean_1, + mean_2, + mean_3, multiply_0, multiply_1, + multiply_10, + multiply_11, + multiply_12, + multiply_13, + multiply_14, + multiply_15, + multiply_16, + multiply_17, + multiply_18, + multiply_19, multiply_2, + multiply_20, + multiply_21, + multiply_3, multiply_4, multiply_5, multiply_6, multiply_7, + multiply_8, + multiply_9, + nearest_interp_0, + nearest_interp_1, + pool2d_0, + pool2d_1, + pool2d_2, reshape_0, reshape_1, - scale_0, - scale_1, - scale_3, - scale_5, - split_0, - split_1, - split_2, - split_3, - split_4, - split_5, - split_6, - split_7, - squeeze_0, - squeeze_1, - subtract_0, - subtract_1, - subtract_10, - subtract_14, - subtract_2, - subtract_3, - subtract_4, - subtract_8, - subtract_9, - sum_1, - sum_2, - unsqueeze_1, + reshape_11, + reshape_15, + reshape_19, + reshape_2, + reshape_20, + reshape_3, + reshape_7, + slice_0, + slice_1, + slice_10, + slice_11, + slice_12, + slice_13, + slice_14, + slice_15, + slice_16, + slice_17, + slice_18, + slice_19, + slice_2, + slice_20, + slice_21, + slice_22, + slice_23, + slice_3, + slice_4, + slice_5, + slice_6, + slice_7, + slice_8, + slice_9, + softmax_0, + softmax_1, + softmax_2, + softmax_3, + swish_1, + swish_10, + swish_100, + swish_101, + swish_102, + swish_103, + swish_104, + swish_11, + swish_12, + swish_13, + swish_14, + swish_15, + swish_16, + swish_17, + swish_18, + swish_19, + swish_2, + swish_20, + swish_21, + swish_22, + swish_23, + swish_24, + swish_25, + swish_26, + swish_27, + swish_28, + swish_29, + swish_3, + swish_30, + swish_31, + swish_32, + swish_33, + swish_34, + swish_35, + swish_36, + swish_37, + swish_38, + swish_39, + swish_4, + swish_40, + swish_41, + swish_42, + swish_43, + swish_44, + swish_45, + swish_46, + swish_47, + swish_48, + swish_49, + swish_5, + swish_50, + swish_51, + swish_52, + swish_53, + swish_54, + swish_55, + swish_56, + swish_57, + swish_58, + swish_59, + swish_6, + swish_60, + swish_61, + swish_62, + swish_63, + swish_64, + swish_65, + swish_66, + swish_67, + swish_68, + swish_69, + swish_7, + swish_70, + swish_71, + swish_72, + swish_73, + swish_74, + swish_75, + swish_76, + swish_77, + swish_78, + swish_79, + swish_8, + swish_80, + swish_81, + swish_82, + swish_83, + swish_84, + swish_85, + swish_86, + swish_87, + swish_88, + swish_89, + swish_9, + swish_90, + swish_91, + swish_92, + swish_93, + swish_94, + swish_95, + swish_96, + swish_97, + swish_98, + swish_99, + transpose_0, + transpose_1, + transpose_10, + transpose_11, + transpose_12, + transpose_13, + transpose_14, + transpose_15, + transpose_16, + transpose_17, + transpose_2, + transpose_3, + transpose_4, + transpose_5, + transpose_6, + transpose_7, + transpose_8, + transpose_9, unsqueeze_3, - unsqueeze_4, ) - return ( - cross_entropy_with_softmax_0, - cross_entropy_with_softmax_1, - mean_all_0, - divide_0, - divide_1, - ) + return swish_0 diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/subgraph_6/weight_meta.py b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/subgraph_6/weight_meta.py index 8b1378917..5dcac1a4a 100644 --- a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/subgraph_6/weight_meta.py +++ b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/subgraph_6/weight_meta.py @@ -1 +1,8004 @@ +class Program_weight_tensor_parameter_0: + name = "parameter_0" + shape = [768] + dtype = "float32" + min_val = float("-0.175929") + max_val = float("0.21086") + mean = float("0.0834788") + std = float("0.0566121") + data = None + +class Program_weight_tensor_parameter_1: + name = "parameter_1" + shape = [768] + dtype = "float32" + min_val = float("0.939955") + max_val = float("1.29826") + mean = float("1.064") + std = float("0.031232") + data = None + + +class Program_weight_tensor_parameter_2: + name = "parameter_2" + shape = [768] + dtype = "float32" + min_val = float("0.00130768") + max_val = float("0.0463603") + mean = float("0.00625725") + std = float("0.00393263") + data = None + + +class Program_weight_tensor_parameter_3: + name = "parameter_3" + shape = [768] + dtype = "float32" + min_val = float("-0.127586") + max_val = float("0.0517413") + mean = float("-0.02654") + std = float("0.0270736") + data = None + + +class Program_weight_tensor_parameter_4: + name = "parameter_4" + shape = [768, 768, 1, 1] + dtype = "float32" + min_val = float("-0.0515626") + max_val = float("0.0379063") + mean = float("-0.00014164") + std = float("0.00240107") + data = None + + +class Program_weight_tensor_parameter_5: + name = "parameter_5" + shape = [384] + dtype = "float32" + min_val = float("-0.141651") + max_val = float("0.0305715") + mean = float("-0.0187927") + std = float("0.0234486") + data = None + + +class Program_weight_tensor_parameter_6: + name = "parameter_6" + shape = [384] + dtype = "float32" + min_val = float("0.945806") + max_val = float("1.04446") + mean = float("0.986675") + std = float("0.0105808") + data = None + + +class Program_weight_tensor_parameter_7: + name = "parameter_7" + shape = [384] + dtype = "float32" + min_val = float("0.000827797") + max_val = float("0.0161073") + mean = float("0.00424179") + std = float("0.00259626") + data = None + + +class Program_weight_tensor_parameter_8: + name = "parameter_8" + shape = [384] + dtype = "float32" + min_val = float("-0.0553707") + max_val = float("0.0596015") + mean = float("0.00271009") + std = float("0.0216885") + data = None + + +class Program_weight_tensor_parameter_9: + name = "parameter_9" + shape = [384, 384, 1, 1] + dtype = "float32" + min_val = float("-0.0308331") + max_val = float("0.0199146") + mean = float("2.09539e-05") + std = float("0.00184746") + data = None + + +class Program_weight_tensor_parameter_10: + name = "parameter_10" + shape = [384] + dtype = "float32" + min_val = float("-0.141651") + max_val = float("0.0305715") + mean = float("-0.0187927") + std = float("0.0234486") + data = None + + +class Program_weight_tensor_parameter_11: + name = "parameter_11" + shape = [384] + dtype = "float32" + min_val = float("0.968047") + max_val = float("1.13059") + mean = float("1.01542") + std = float("0.0171839") + data = None + + +class Program_weight_tensor_parameter_12: + name = "parameter_12" + shape = [384] + dtype = "float32" + min_val = float("0.00219862") + max_val = float("0.0391499") + mean = float("0.00725225") + std = float("0.00432626") + data = None + + +class Program_weight_tensor_parameter_13: + name = "parameter_13" + shape = [384] + dtype = "float32" + min_val = float("-0.173733") + max_val = float("0.12517") + mean = float("-0.0400312") + std = float("0.0316991") + data = None + + +class Program_weight_tensor_parameter_14: + name = "parameter_14" + shape = [384, 384, 3, 3] + dtype = "float32" + min_val = float("-0.0283367") + max_val = float("0.0330588") + mean = float("-7.3825e-05") + std = float("0.00125417") + data = None + + +class Program_weight_tensor_parameter_15: + name = "parameter_15" + shape = [384] + dtype = "float32" + min_val = float("-0.170186") + max_val = float("0.0209452") + mean = float("-0.0348788") + std = float("0.0279259") + data = None + + +class Program_weight_tensor_parameter_16: + name = "parameter_16" + shape = [384] + dtype = "float32" + min_val = float("0.975256") + max_val = float("1.12591") + mean = float("1.01501") + std = float("0.0240755") + data = None + + +class Program_weight_tensor_parameter_17: + name = "parameter_17" + shape = [384] + dtype = "float32" + min_val = float("0.00658639") + max_val = float("0.20726") + mean = float("0.0257203") + std = float("0.0173571") + data = None + + +class Program_weight_tensor_parameter_18: + name = "parameter_18" + shape = [384] + dtype = "float32" + min_val = float("-0.245253") + max_val = float("0.416056") + mean = float("-0.0421834") + std = float("0.0522217") + data = None + + +class Program_weight_tensor_parameter_19: + name = "parameter_19" + shape = [384, 384, 3, 3] + dtype = "float32" + min_val = float("-0.0311751") + max_val = float("0.0503276") + mean = float("-6.05092e-05") + std = float("0.00141653") + data = None + + +class Program_weight_tensor_parameter_20: + name = "parameter_20" + shape = [384] + dtype = "float32" + min_val = float("-0.105187") + max_val = float("0.0129827") + mean = float("-0.0357886") + std = float("0.0193112") + data = None + + +class Program_weight_tensor_parameter_21: + name = "parameter_21" + shape = [384] + dtype = "float32" + min_val = float("0.945568") + max_val = float("1.0451") + mean = float("0.98866") + std = float("0.00984887") + data = None + + +class Program_weight_tensor_parameter_22: + name = "parameter_22" + shape = [384] + dtype = "float32" + min_val = float("0.000652651") + max_val = float("0.0220653") + mean = float("0.00339318") + std = float("0.00205907") + data = None + + +class Program_weight_tensor_parameter_23: + name = "parameter_23" + shape = [384] + dtype = "float32" + min_val = float("-0.077936") + max_val = float("0.0409903") + mean = float("-0.0020853") + std = float("0.01599") + data = None + + +class Program_weight_tensor_parameter_24: + name = "parameter_24" + shape = [384, 384, 1, 1] + dtype = "float32" + min_val = float("-0.0260426") + max_val = float("0.0248082") + mean = float("-5.14195e-05") + std = float("0.00195167") + data = None + + +class Program_weight_tensor_parameter_25: + name = "parameter_25" + shape = [384] + dtype = "float32" + min_val = float("-0.105187") + max_val = float("0.0129827") + mean = float("-0.0357886") + std = float("0.0193112") + data = None + + +class Program_weight_tensor_parameter_26: + name = "parameter_26" + shape = [384] + dtype = "float32" + min_val = float("0.959552") + max_val = float("1.10507") + mean = float("1.01607") + std = float("0.0177491") + data = None + + +class Program_weight_tensor_parameter_27: + name = "parameter_27" + shape = [384] + dtype = "float32" + min_val = float("0.0024964") + max_val = float("0.0250836") + mean = float("0.00780127") + std = float("0.00352904") + data = None + + +class Program_weight_tensor_parameter_28: + name = "parameter_28" + shape = [384] + dtype = "float32" + min_val = float("-0.160103") + max_val = float("0.245929") + mean = float("-0.0452978") + std = float("0.0365583") + data = None + + +class Program_weight_tensor_parameter_29: + name = "parameter_29" + shape = [384, 384, 3, 3] + dtype = "float32" + min_val = float("-0.034241") + max_val = float("0.0490696") + mean = float("-8.25271e-05") + std = float("0.00126683") + data = None + + +class Program_weight_tensor_parameter_30: + name = "parameter_30" + shape = [384] + dtype = "float32" + min_val = float("-0.0896542") + max_val = float("0.0192769") + mean = float("-0.036069") + std = float("0.0194634") + data = None + + +class Program_weight_tensor_parameter_31: + name = "parameter_31" + shape = [384] + dtype = "float32" + min_val = float("0.933175") + max_val = float("1.1146") + mean = float("1.01167") + std = float("0.0265813") + data = None + + +class Program_weight_tensor_parameter_32: + name = "parameter_32" + shape = [384] + dtype = "float32" + min_val = float("0.00448167") + max_val = float("0.0632357") + mean = float("0.0163368") + std = float("0.00906915") + data = None + + +class Program_weight_tensor_parameter_33: + name = "parameter_33" + shape = [384] + dtype = "float32" + min_val = float("-0.168694") + max_val = float("0.0937087") + mean = float("-0.01934") + std = float("0.0451176") + data = None + + +class Program_weight_tensor_parameter_34: + name = "parameter_34" + shape = [384, 384, 3, 3] + dtype = "float32" + min_val = float("-0.0389109") + max_val = float("0.0474721") + mean = float("-5.22495e-05") + std = float("0.00144713") + data = None + + +class Program_weight_tensor_parameter_35: + name = "parameter_35" + shape = [384] + dtype = "float32" + min_val = float("-0.116304") + max_val = float("0.016211") + mean = float("-0.0373544") + std = float("0.0201432") + data = None + + +class Program_weight_tensor_parameter_36: + name = "parameter_36" + shape = [384] + dtype = "float32" + min_val = float("0.929317") + max_val = float("1.02782") + mean = float("0.987068") + std = float("0.0110352") + data = None + + +class Program_weight_tensor_parameter_37: + name = "parameter_37" + shape = [384] + dtype = "float32" + min_val = float("0.00119145") + max_val = float("0.0108677") + mean = float("0.00443524") + std = float("0.00164967") + data = None + + +class Program_weight_tensor_parameter_38: + name = "parameter_38" + shape = [384] + dtype = "float32" + min_val = float("-0.0535005") + max_val = float("0.0365936") + mean = float("-0.0073878") + std = float("0.0125324") + data = None + + +class Program_weight_tensor_parameter_39: + name = "parameter_39" + shape = [384, 384, 1, 1] + dtype = "float32" + min_val = float("-0.037048") + max_val = float("0.0266802") + mean = float("-0.000137547") + std = float("0.00195837") + data = None + + +class Program_weight_tensor_parameter_40: + name = "parameter_40" + shape = [384] + dtype = "float32" + min_val = float("-0.116304") + max_val = float("0.016211") + mean = float("-0.0373544") + std = float("0.0201432") + data = None + + +class Program_weight_tensor_parameter_41: + name = "parameter_41" + shape = [384] + dtype = "float32" + min_val = float("0.98123") + max_val = float("1.10689") + mean = float("1.01832") + std = float("0.0222072") + data = None + + +class Program_weight_tensor_parameter_42: + name = "parameter_42" + shape = [384] + dtype = "float32" + min_val = float("0.00438825") + max_val = float("0.0317931") + mean = float("0.0111305") + std = float("0.00497164") + data = None + + +class Program_weight_tensor_parameter_43: + name = "parameter_43" + shape = [384] + dtype = "float32" + min_val = float("-0.158148") + max_val = float("0.0963003") + mean = float("-0.0215295") + std = float("0.032226") + data = None + + +class Program_weight_tensor_parameter_44: + name = "parameter_44" + shape = [384, 384, 3, 3] + dtype = "float32" + min_val = float("-0.0342558") + max_val = float("0.0596347") + mean = float("-4.36682e-05") + std = float("0.00131956") + data = None + + +class Program_weight_tensor_parameter_45: + name = "parameter_45" + shape = [384] + dtype = "float32" + min_val = float("-0.10708") + max_val = float("0.0239013") + mean = float("-0.0375156") + std = float("0.0214475") + data = None + + +class Program_weight_tensor_parameter_46: + name = "parameter_46" + shape = [384] + dtype = "float32" + min_val = float("0.944782") + max_val = float("1.11463") + mean = float("1.01186") + std = float("0.0277861") + data = None + + +class Program_weight_tensor_parameter_47: + name = "parameter_47" + shape = [384] + dtype = "float32" + min_val = float("0.00510026") + max_val = float("0.073565") + mean = float("0.0136809") + std = float("0.00702878") + data = None + + +class Program_weight_tensor_parameter_48: + name = "parameter_48" + shape = [384] + dtype = "float32" + min_val = float("-0.145833") + max_val = float("0.12243") + mean = float("-0.0419585") + std = float("0.0460457") + data = None + + +class Program_weight_tensor_parameter_49: + name = "parameter_49" + shape = [384, 384, 3, 3] + dtype = "float32" + min_val = float("-0.0265761") + max_val = float("0.0412318") + mean = float("-7.48799e-05") + std = float("0.00147167") + data = None + + +class Program_weight_tensor_parameter_50: + name = "parameter_50" + shape = [384] + dtype = "float32" + min_val = float("-0.106796") + max_val = float("0.0466792") + mean = float("-0.0263049") + std = float("0.0154085") + data = None + + +class Program_weight_tensor_parameter_51: + name = "parameter_51" + shape = [384] + dtype = "float32" + min_val = float("0.973685") + max_val = float("1.08651") + mean = float("1.00904") + std = float("0.0171201") + data = None + + +class Program_weight_tensor_parameter_52: + name = "parameter_52" + shape = [384] + dtype = "float32" + min_val = float("0.00211342") + max_val = float("0.0171623") + mean = float("0.00491392") + std = float("0.00189255") + data = None + + +class Program_weight_tensor_parameter_53: + name = "parameter_53" + shape = [384] + dtype = "float32" + min_val = float("-0.0938724") + max_val = float("0.0726207") + mean = float("-0.0169301") + std = float("0.0255402") + data = None + + +class Program_weight_tensor_parameter_54: + name = "parameter_54" + shape = [384, 1152, 1, 1] + dtype = "float32" + min_val = float("-0.0600923") + max_val = float("0.0698518") + mean = float("-7.83959e-05") + std = float("0.00221115") + data = None + + +class Program_weight_tensor_parameter_55: + name = "parameter_55" + shape = [384] + dtype = "float32" + min_val = float("-0.0425267") + max_val = float("0.0160945") + mean = float("-0.00899786") + std = float("0.00841522") + data = None + + +class Program_weight_tensor_parameter_56: + name = "parameter_56" + shape = [384] + dtype = "float32" + min_val = float("0.959381") + max_val = float("1.05138") + mean = float("1.0079") + std = float("0.0115872") + data = None + + +class Program_weight_tensor_parameter_57: + name = "parameter_57" + shape = [384] + dtype = "float32" + min_val = float("0.00177107") + max_val = float("0.0283498") + mean = float("0.00429961") + std = float("0.00192508") + data = None + + +class Program_weight_tensor_parameter_58: + name = "parameter_58" + shape = [384] + dtype = "float32" + min_val = float("-0.101028") + max_val = float("0.097759") + mean = float("-0.0218202") + std = float("0.0240243") + data = None + + +class Program_weight_tensor_parameter_59: + name = "parameter_59" + shape = [384, 1152, 1, 1] + dtype = "float32" + min_val = float("-0.0252393") + max_val = float("0.0409905") + mean = float("-0.000104687") + std = float("0.00201567") + data = None + + +class Program_weight_tensor_parameter_60: + name = "parameter_60" + shape = [384] + dtype = "float32" + min_val = float("-0.0530202") + max_val = float("0.00596341") + mean = float("-0.0166175") + std = float("0.00987673") + data = None + + +class Program_weight_tensor_parameter_61: + name = "parameter_61" + shape = [384] + dtype = "float32" + min_val = float("0.988638") + max_val = float("1.10406") + mean = float("1.0196") + std = float("0.0169012") + data = None + + +class Program_weight_tensor_parameter_62: + name = "parameter_62" + shape = [384] + dtype = "float32" + min_val = float("0.00403058") + max_val = float("0.0513783") + mean = float("0.0126843") + std = float("0.00717747") + data = None + + +class Program_weight_tensor_parameter_63: + name = "parameter_63" + shape = [384] + dtype = "float32" + min_val = float("-0.365522") + max_val = float("0.198026") + mean = float("-0.0414412") + std = float("0.0639995") + data = None + + +class Program_weight_tensor_parameter_64: + name = "parameter_64" + shape = [384, 384, 3, 3] + dtype = "float32" + min_val = float("-0.0201867") + max_val = float("0.0314514") + mean = float("-3.01993e-05") + std = float("0.00114789") + data = None + + +class Program_weight_tensor_parameter_65: + name = "parameter_65" + shape = [384] + dtype = "float32" + min_val = float("-0.22273") + max_val = float("0.49215") + mean = float("0.217192") + std = float("0.124233") + data = None + + +class Program_weight_tensor_parameter_66: + name = "parameter_66" + shape = [384] + dtype = "float32" + min_val = float("0.919294") + max_val = float("1.48063") + mean = float("1.14128") + std = float("0.0737757") + data = None + + +class Program_weight_tensor_parameter_67: + name = "parameter_67" + shape = [384] + dtype = "float32" + min_val = float("0.00389442") + max_val = float("0.0590123") + mean = float("0.011774") + std = float("0.00590078") + data = None + + +class Program_weight_tensor_parameter_68: + name = "parameter_68" + shape = [384] + dtype = "float32" + min_val = float("-0.153266") + max_val = float("0.0818695") + mean = float("-0.0278176") + std = float("0.0324133") + data = None + + +class Program_weight_tensor_parameter_69: + name = "parameter_69" + shape = [384, 384, 1, 1] + dtype = "float32" + min_val = float("-0.0888812") + max_val = float("0.0966329") + mean = float("-0.000338059") + std = float("0.00511266") + data = None + + +class Program_weight_tensor_parameter_70: + name = "parameter_70" + shape = [192] + dtype = "float32" + min_val = float("-0.166124") + max_val = float("0.0467039") + mean = float("-0.0250411") + std = float("0.0394646") + data = None + + +class Program_weight_tensor_parameter_71: + name = "parameter_71" + shape = [192] + dtype = "float32" + min_val = float("0.84107") + max_val = float("1.05105") + mean = float("0.97282") + std = float("0.0237598") + data = None + + +class Program_weight_tensor_parameter_72: + name = "parameter_72" + shape = [192] + dtype = "float32" + min_val = float("0.00162489") + max_val = float("0.0270337") + mean = float("0.00519677") + std = float("0.00308321") + data = None + + +class Program_weight_tensor_parameter_73: + name = "parameter_73" + shape = [192] + dtype = "float32" + min_val = float("-0.0629501") + max_val = float("0.0796146") + mean = float("-0.00468793") + std = float("0.0191205") + data = None + + +class Program_weight_tensor_parameter_74: + name = "parameter_74" + shape = [192, 192, 1, 1] + dtype = "float32" + min_val = float("-0.0448536") + max_val = float("0.0336517") + mean = float("-0.000157415") + std = float("0.00377073") + data = None + + +class Program_weight_tensor_parameter_75: + name = "parameter_75" + shape = [192] + dtype = "float32" + min_val = float("-0.166124") + max_val = float("0.0467039") + mean = float("-0.0250411") + std = float("0.0394646") + data = None + + +class Program_weight_tensor_parameter_76: + name = "parameter_76" + shape = [192] + dtype = "float32" + min_val = float("0.729345") + max_val = float("1.12261") + mean = float("1.02194") + std = float("0.0372571") + data = None + + +class Program_weight_tensor_parameter_77: + name = "parameter_77" + shape = [192] + dtype = "float32" + min_val = float("0.00562341") + max_val = float("0.0781491") + mean = float("0.0170834") + std = float("0.0090384") + data = None + + +class Program_weight_tensor_parameter_78: + name = "parameter_78" + shape = [192] + dtype = "float32" + min_val = float("-0.191415") + max_val = float("0.0874893") + mean = float("-0.0410796") + std = float("0.042934") + data = None + + +class Program_weight_tensor_parameter_79: + name = "parameter_79" + shape = [192, 192, 3, 3] + dtype = "float32" + min_val = float("-0.0346474") + max_val = float("0.0452792") + mean = float("-0.00013752") + std = float("0.00244921") + data = None + + +class Program_weight_tensor_parameter_80: + name = "parameter_80" + shape = [192] + dtype = "float32" + min_val = float("-0.191424") + max_val = float("0.0441491") + mean = float("-0.0580252") + std = float("0.0490538") + data = None + + +class Program_weight_tensor_parameter_81: + name = "parameter_81" + shape = [192] + dtype = "float32" + min_val = float("0.897189") + max_val = float("1.18714") + mean = float("1.01553") + std = float("0.048456") + data = None + + +class Program_weight_tensor_parameter_82: + name = "parameter_82" + shape = [192] + dtype = "float32" + min_val = float("0.0130131") + max_val = float("0.155912") + mean = float("0.0355598") + std = float("0.0183742") + data = None + + +class Program_weight_tensor_parameter_83: + name = "parameter_83" + shape = [192] + dtype = "float32" + min_val = float("-0.322411") + max_val = float("0.461446") + mean = float("-0.0402579") + std = float("0.0600098") + data = None + + +class Program_weight_tensor_parameter_84: + name = "parameter_84" + shape = [192, 192, 3, 3] + dtype = "float32" + min_val = float("-0.0436326") + max_val = float("0.0759463") + mean = float("-0.00010151") + std = float("0.00273793") + data = None + + +class Program_weight_tensor_parameter_85: + name = "parameter_85" + shape = [192] + dtype = "float32" + min_val = float("-0.191731") + max_val = float("0.00856722") + mean = float("-0.0642182") + std = float("0.033376") + data = None + + +class Program_weight_tensor_parameter_86: + name = "parameter_86" + shape = [192] + dtype = "float32" + min_val = float("0.922072") + max_val = float("1.04657") + mean = float("0.97362") + std = float("0.017993") + data = None + + +class Program_weight_tensor_parameter_87: + name = "parameter_87" + shape = [192] + dtype = "float32" + min_val = float("0.00120452") + max_val = float("0.0109813") + mean = float("0.00399435") + std = float("0.00161666") + data = None + + +class Program_weight_tensor_parameter_88: + name = "parameter_88" + shape = [192] + dtype = "float32" + min_val = float("-0.0561207") + max_val = float("0.0353681") + mean = float("-0.00713616") + std = float("0.0144372") + data = None + + +class Program_weight_tensor_parameter_89: + name = "parameter_89" + shape = [192, 192, 1, 1] + dtype = "float32" + min_val = float("-0.0389677") + max_val = float("0.0317083") + mean = float("-0.000327359") + std = float("0.00370864") + data = None + + +class Program_weight_tensor_parameter_90: + name = "parameter_90" + shape = [192] + dtype = "float32" + min_val = float("-0.191731") + max_val = float("0.00856722") + mean = float("-0.0642182") + std = float("0.033376") + data = None + + +class Program_weight_tensor_parameter_91: + name = "parameter_91" + shape = [192] + dtype = "float32" + min_val = float("0.967917") + max_val = float("1.14773") + mean = float("1.02404") + std = float("0.0293683") + data = None + + +class Program_weight_tensor_parameter_92: + name = "parameter_92" + shape = [192] + dtype = "float32" + min_val = float("0.00374803") + max_val = float("0.0495216") + mean = float("0.010846") + std = float("0.00639043") + data = None + + +class Program_weight_tensor_parameter_93: + name = "parameter_93" + shape = [192] + dtype = "float32" + min_val = float("-0.141797") + max_val = float("0.12988") + mean = float("-0.0331827") + std = float("0.0341888") + data = None + + +class Program_weight_tensor_parameter_94: + name = "parameter_94" + shape = [192, 192, 3, 3] + dtype = "float32" + min_val = float("-0.0433361") + max_val = float("0.0521915") + mean = float("-0.000128374") + std = float("0.00248482") + data = None + + +class Program_weight_tensor_parameter_95: + name = "parameter_95" + shape = [192] + dtype = "float32" + min_val = float("-0.188937") + max_val = float("0.0617064") + mean = float("-0.0755865") + std = float("0.0405704") + data = None + + +class Program_weight_tensor_parameter_96: + name = "parameter_96" + shape = [192] + dtype = "float32" + min_val = float("0.88236") + max_val = float("1.21791") + mean = float("1.01474") + std = float("0.050711") + data = None + + +class Program_weight_tensor_parameter_97: + name = "parameter_97" + shape = [192] + dtype = "float32" + min_val = float("0.00731951") + max_val = float("0.0706393") + mean = float("0.0229053") + std = float("0.0124686") + data = None + + +class Program_weight_tensor_parameter_98: + name = "parameter_98" + shape = [192] + dtype = "float32" + min_val = float("-0.105024") + max_val = float("0.045449") + mean = float("-0.0227613") + std = float("0.0300699") + data = None + + +class Program_weight_tensor_parameter_99: + name = "parameter_99" + shape = [192, 192, 3, 3] + dtype = "float32" + min_val = float("-0.0453344") + max_val = float("0.0804613") + mean = float("-0.000102284") + std = float("0.00285929") + data = None + + +class Program_weight_tensor_parameter_100: + name = "parameter_100" + shape = [192] + dtype = "float32" + min_val = float("-0.229338") + max_val = float("-0.0102477") + mean = float("-0.0831807") + std = float("0.0422279") + data = None + + +class Program_weight_tensor_parameter_101: + name = "parameter_101" + shape = [192] + dtype = "float32" + min_val = float("0.900655") + max_val = float("1.0279") + mean = float("0.975271") + std = float("0.0229719") + data = None + + +class Program_weight_tensor_parameter_102: + name = "parameter_102" + shape = [192] + dtype = "float32" + min_val = float("0.00158349") + max_val = float("0.0138258") + mean = float("0.00506183") + std = float("0.00178933") + data = None + + +class Program_weight_tensor_parameter_103: + name = "parameter_103" + shape = [192] + dtype = "float32" + min_val = float("-0.0350209") + max_val = float("0.0440672") + mean = float("-0.00863895") + std = float("0.0159988") + data = None + + +class Program_weight_tensor_parameter_104: + name = "parameter_104" + shape = [192, 192, 1, 1] + dtype = "float32" + min_val = float("-0.0414024") + max_val = float("0.072312") + mean = float("-0.000424235") + std = float("0.00418287") + data = None + + +class Program_weight_tensor_parameter_105: + name = "parameter_105" + shape = [192] + dtype = "float32" + min_val = float("-0.229338") + max_val = float("-0.0102477") + mean = float("-0.0831807") + std = float("0.0422279") + data = None + + +class Program_weight_tensor_parameter_106: + name = "parameter_106" + shape = [192] + dtype = "float32" + min_val = float("0.947228") + max_val = float("1.11076") + mean = float("1.02102") + std = float("0.0305612") + data = None + + +class Program_weight_tensor_parameter_107: + name = "parameter_107" + shape = [192] + dtype = "float32" + min_val = float("0.00636548") + max_val = float("0.0561151") + mean = float("0.0150209") + std = float("0.00712578") + data = None + + +class Program_weight_tensor_parameter_108: + name = "parameter_108" + shape = [192] + dtype = "float32" + min_val = float("-0.12433") + max_val = float("0.09672") + mean = float("-0.0168594") + std = float("0.0346152") + data = None + + +class Program_weight_tensor_parameter_109: + name = "parameter_109" + shape = [192, 192, 3, 3] + dtype = "float32" + min_val = float("-0.0444124") + max_val = float("0.0493834") + mean = float("-7.49687e-05") + std = float("0.00263663") + data = None + + +class Program_weight_tensor_parameter_110: + name = "parameter_110" + shape = [192] + dtype = "float32" + min_val = float("-0.234043") + max_val = float("0.0809248") + mean = float("-0.0946909") + std = float("0.0462546") + data = None + + +class Program_weight_tensor_parameter_111: + name = "parameter_111" + shape = [192] + dtype = "float32" + min_val = float("0.886425") + max_val = float("1.20415") + mean = float("1.01671") + std = float("0.0539421") + data = None + + +class Program_weight_tensor_parameter_112: + name = "parameter_112" + shape = [192] + dtype = "float32" + min_val = float("0.00720534") + max_val = float("0.0816429") + mean = float("0.0188148") + std = float("0.0102621") + data = None + + +class Program_weight_tensor_parameter_113: + name = "parameter_113" + shape = [192] + dtype = "float32" + min_val = float("-0.162095") + max_val = float("0.0789193") + mean = float("-0.0380274") + std = float("0.039478") + data = None + + +class Program_weight_tensor_parameter_114: + name = "parameter_114" + shape = [192, 192, 3, 3] + dtype = "float32" + min_val = float("-0.0388947") + max_val = float("0.087211") + mean = float("-0.000134346") + std = float("0.00310298") + data = None + + +class Program_weight_tensor_parameter_115: + name = "parameter_115" + shape = [192] + dtype = "float32" + min_val = float("-0.200195") + max_val = float("0.0157584") + mean = float("-0.0662765") + std = float("0.0312062") + data = None + + +class Program_weight_tensor_parameter_116: + name = "parameter_116" + shape = [192] + dtype = "float32" + min_val = float("0.925347") + max_val = float("1.15235") + mean = float("1.0133") + std = float("0.0383988") + data = None + + +class Program_weight_tensor_parameter_117: + name = "parameter_117" + shape = [192] + dtype = "float32" + min_val = float("0.00390106") + max_val = float("0.0290639") + mean = float("0.00837401") + std = float("0.00327435") + data = None + + +class Program_weight_tensor_parameter_118: + name = "parameter_118" + shape = [192] + dtype = "float32" + min_val = float("-0.0919967") + max_val = float("0.135726") + mean = float("-0.022066") + std = float("0.0311243") + data = None + + +class Program_weight_tensor_parameter_119: + name = "parameter_119" + shape = [192, 576, 1, 1] + dtype = "float32" + min_val = float("-0.0594804") + max_val = float("0.0592757") + mean = float("-0.000196378") + std = float("0.00449942") + data = None + + +class Program_weight_tensor_parameter_120: + name = "parameter_120" + shape = [192] + dtype = "float32" + min_val = float("-0.0998406") + max_val = float("0.0381397") + mean = float("-0.0139719") + std = float("0.0205426") + data = None + + +class Program_weight_tensor_parameter_121: + name = "parameter_121" + shape = [192] + dtype = "float32" + min_val = float("0.92253") + max_val = float("1.19791") + mean = float("1.00313") + std = float("0.0257744") + data = None + + +class Program_weight_tensor_parameter_122: + name = "parameter_122" + shape = [192] + dtype = "float32" + min_val = float("0.00364414") + max_val = float("0.0462398") + mean = float("0.0110928") + std = float("0.00672902") + data = None + + +class Program_weight_tensor_parameter_123: + name = "parameter_123" + shape = [192] + dtype = "float32" + min_val = float("-0.0792517") + max_val = float("0.0537729") + mean = float("-0.0185521") + std = float("0.0235437") + data = None + + +class Program_weight_tensor_parameter_124: + name = "parameter_124" + shape = [192, 576, 1, 1] + dtype = "float32" + min_val = float("-0.0668912") + max_val = float("0.102316") + mean = float("-0.000166523") + std = float("0.00443571") + data = None + + +class Program_weight_tensor_parameter_125: + name = "parameter_125" + shape = [192] + dtype = "float32" + min_val = float("-0.159157") + max_val = float("-0.000957455") + mean = float("-0.0390269") + std = float("0.0217825") + data = None + + +class Program_weight_tensor_parameter_126: + name = "parameter_126" + shape = [192] + dtype = "float32" + min_val = float("0.921604") + max_val = float("1.24953") + mean = float("1.00821") + std = float("0.0303984") + data = None + + +class Program_weight_tensor_parameter_127: + name = "parameter_127" + shape = [192] + dtype = "float32" + min_val = float("0.00642947") + max_val = float("0.0670086") + mean = float("0.0210833") + std = float("0.00953439") + data = None + + +class Program_weight_tensor_parameter_128: + name = "parameter_128" + shape = [192] + dtype = "float32" + min_val = float("-0.330766") + max_val = float("0.234299") + mean = float("-0.0509451") + std = float("0.0885345") + data = None + + +class Program_weight_tensor_parameter_129: + name = "parameter_129" + shape = [192, 192, 3, 3] + dtype = "float32" + min_val = float("-0.0500862") + max_val = float("0.0679449") + mean = float("-5.08306e-05") + std = float("0.0028463") + data = None + + +class Program_weight_tensor_parameter_130: + name = "parameter_130" + shape = [192] + dtype = "float32" + min_val = float("-0.554317") + max_val = float("1.14248") + mean = float("0.353082") + std = float("0.345155") + data = None + + +class Program_weight_tensor_parameter_131: + name = "parameter_131" + shape = [192] + dtype = "float32" + min_val = float("0.546484") + max_val = float("1.57412") + mean = float("1.15038") + std = float("0.183458") + data = None + + +class Program_weight_tensor_parameter_132: + name = "parameter_132" + shape = [192] + dtype = "float32" + min_val = float("0.0100657") + max_val = float("0.221031") + mean = float("0.0407891") + std = float("0.0263878") + data = None + + +class Program_weight_tensor_parameter_133: + name = "parameter_133" + shape = [192] + dtype = "float32" + min_val = float("-0.377989") + max_val = float("0.257639") + mean = float("-0.0649794") + std = float("0.0721023") + data = None + + +class Program_weight_tensor_parameter_134: + name = "parameter_134" + shape = [192, 192, 1, 1] + dtype = "float32" + min_val = float("-0.153545") + max_val = float("0.134074") + mean = float("-0.00121304") + std = float("0.0133081") + data = None + + +class Program_weight_tensor_parameter_135: + name = "parameter_135" + shape = [96] + dtype = "float32" + min_val = float("-0.457832") + max_val = float("0.237746") + mean = float("-0.00924267") + std = float("0.145032") + data = None + + +class Program_weight_tensor_parameter_136: + name = "parameter_136" + shape = [96] + dtype = "float32" + min_val = float("0.761673") + max_val = float("1.23302") + mean = float("0.949003") + std = float("0.0714195") + data = None + + +class Program_weight_tensor_parameter_137: + name = "parameter_137" + shape = [96] + dtype = "float32" + min_val = float("0.00289003") + max_val = float("0.0548184") + mean = float("0.0147543") + std = float("0.0100438") + data = None + + +class Program_weight_tensor_parameter_138: + name = "parameter_138" + shape = [96] + dtype = "float32" + min_val = float("-0.0823141") + max_val = float("0.101545") + mean = float("-0.015142") + std = float("0.0287704") + data = None + + +class Program_weight_tensor_parameter_139: + name = "parameter_139" + shape = [96, 96, 1, 1] + dtype = "float32" + min_val = float("-0.0870691") + max_val = float("0.0850803") + mean = float("-0.00142159") + std = float("0.01113") + data = None + + +class Program_weight_tensor_parameter_140: + name = "parameter_140" + shape = [96] + dtype = "float32" + min_val = float("-0.457832") + max_val = float("0.237746") + mean = float("-0.00924267") + std = float("0.145032") + data = None + + +class Program_weight_tensor_parameter_141: + name = "parameter_141" + shape = [96] + dtype = "float32" + min_val = float("0.507912") + max_val = float("1.26895") + mean = float("1.02934") + std = float("0.096504") + data = None + + +class Program_weight_tensor_parameter_142: + name = "parameter_142" + shape = [96] + dtype = "float32" + min_val = float("0.0126569") + max_val = float("0.170178") + mean = float("0.046809") + std = float("0.0277218") + data = None + + +class Program_weight_tensor_parameter_143: + name = "parameter_143" + shape = [96] + dtype = "float32" + min_val = float("-0.311825") + max_val = float("0.120203") + mean = float("-0.0489369") + std = float("0.077116") + data = None + + +class Program_weight_tensor_parameter_144: + name = "parameter_144" + shape = [96, 96, 3, 3] + dtype = "float32" + min_val = float("-0.0902007") + max_val = float("0.0894371") + mean = float("-0.000317607") + std = float("0.0067737") + data = None + + +class Program_weight_tensor_parameter_145: + name = "parameter_145" + shape = [96] + dtype = "float32" + min_val = float("-0.702428") + max_val = float("0.490659") + mean = float("-0.113325") + std = float("0.198985") + data = None + + +class Program_weight_tensor_parameter_146: + name = "parameter_146" + shape = [96] + dtype = "float32" + min_val = float("0.718272") + max_val = float("1.71659") + mean = float("0.996064") + std = float("0.134561") + data = None + + +class Program_weight_tensor_parameter_147: + name = "parameter_147" + shape = [96] + dtype = "float32" + min_val = float("0.0158852") + max_val = float("0.220156") + mean = float("0.0611441") + std = float("0.0432503") + data = None + + +class Program_weight_tensor_parameter_148: + name = "parameter_148" + shape = [96] + dtype = "float32" + min_val = float("-0.218411") + max_val = float("0.176567") + mean = float("-0.04823") + std = float("0.0699582") + data = None + + +class Program_weight_tensor_parameter_149: + name = "parameter_149" + shape = [96, 96, 3, 3] + dtype = "float32" + min_val = float("-0.128433") + max_val = float("0.105681") + mean = float("-0.00053974") + std = float("0.00768105") + data = None + + +class Program_weight_tensor_parameter_150: + name = "parameter_150" + shape = [96] + dtype = "float32" + min_val = float("-0.365465") + max_val = float("0.189556") + mean = float("-0.138691") + std = float("0.0965007") + data = None + + +class Program_weight_tensor_parameter_151: + name = "parameter_151" + shape = [96] + dtype = "float32" + min_val = float("0.628614") + max_val = float("1.02635") + mean = float("0.906997") + std = float("0.055833") + data = None + + +class Program_weight_tensor_parameter_152: + name = "parameter_152" + shape = [96] + dtype = "float32" + min_val = float("0.00386648") + max_val = float("0.0250898") + mean = float("0.0113332") + std = float("0.00452204") + data = None + + +class Program_weight_tensor_parameter_153: + name = "parameter_153" + shape = [96] + dtype = "float32" + min_val = float("-0.0829535") + max_val = float("0.0455066") + mean = float("-0.0107845") + std = float("0.0210116") + data = None + + +class Program_weight_tensor_parameter_154: + name = "parameter_154" + shape = [96, 96, 1, 1] + dtype = "float32" + min_val = float("-0.0826433") + max_val = float("0.0836331") + mean = float("-0.00123565") + std = float("0.0111633") + data = None + + +class Program_weight_tensor_parameter_155: + name = "parameter_155" + shape = [96] + dtype = "float32" + min_val = float("-0.365465") + max_val = float("0.189556") + mean = float("-0.138691") + std = float("0.0965007") + data = None + + +class Program_weight_tensor_parameter_156: + name = "parameter_156" + shape = [96] + dtype = "float32" + min_val = float("0.808147") + max_val = float("1.15745") + mean = float("1.02165") + std = float("0.061107") + data = None + + +class Program_weight_tensor_parameter_157: + name = "parameter_157" + shape = [96] + dtype = "float32" + min_val = float("0.0105243") + max_val = float("0.146319") + mean = float("0.0374655") + std = float("0.0298762") + data = None + + +class Program_weight_tensor_parameter_158: + name = "parameter_158" + shape = [96] + dtype = "float32" + min_val = float("-0.202738") + max_val = float("0.0648004") + mean = float("-0.042437") + std = float("0.0413335") + data = None + + +class Program_weight_tensor_parameter_159: + name = "parameter_159" + shape = [96, 96, 3, 3] + dtype = "float32" + min_val = float("-0.0790843") + max_val = float("0.0756688") + mean = float("-0.000522856") + std = float("0.00687379") + data = None + + +class Program_weight_tensor_parameter_160: + name = "parameter_160" + shape = [96] + dtype = "float32" + min_val = float("-0.488696") + max_val = float("0.1689") + mean = float("-0.167703") + std = float("0.131642") + data = None + + +class Program_weight_tensor_parameter_161: + name = "parameter_161" + shape = [96] + dtype = "float32" + min_val = float("0.775323") + max_val = float("1.29288") + mean = float("0.963604") + std = float("0.0984425") + data = None + + +class Program_weight_tensor_parameter_162: + name = "parameter_162" + shape = [96] + dtype = "float32" + min_val = float("0.0130451") + max_val = float("0.138622") + mean = float("0.0336631") + std = float("0.0190887") + data = None + + +class Program_weight_tensor_parameter_163: + name = "parameter_163" + shape = [96] + dtype = "float32" + min_val = float("-0.186613") + max_val = float("0.0841258") + mean = float("-5.39021e-05") + std = float("0.0459065") + data = None + + +class Program_weight_tensor_parameter_164: + name = "parameter_164" + shape = [96, 96, 3, 3] + dtype = "float32" + min_val = float("-0.128553") + max_val = float("0.113025") + mean = float("-0.000437448") + std = float("0.00839485") + data = None + + +class Program_weight_tensor_parameter_165: + name = "parameter_165" + shape = [96] + dtype = "float32" + min_val = float("-0.492552") + max_val = float("0.0643671") + mean = float("-0.168979") + std = float("0.115008") + data = None + + +class Program_weight_tensor_parameter_166: + name = "parameter_166" + shape = [96] + dtype = "float32" + min_val = float("0.725079") + max_val = float("1.00348") + mean = float("0.919861") + std = float("0.0526643") + data = None + + +class Program_weight_tensor_parameter_167: + name = "parameter_167" + shape = [96] + dtype = "float32" + min_val = float("0.00612442") + max_val = float("0.0365536") + mean = float("0.016419") + std = float("0.00586326") + data = None + + +class Program_weight_tensor_parameter_168: + name = "parameter_168" + shape = [96] + dtype = "float32" + min_val = float("-0.0660382") + max_val = float("0.043413") + mean = float("-0.0231643") + std = float("0.0225905") + data = None + + +class Program_weight_tensor_parameter_169: + name = "parameter_169" + shape = [96, 96, 1, 1] + dtype = "float32" + min_val = float("-0.107707") + max_val = float("0.0826476") + mean = float("-0.00240553") + std = float("0.0126403") + data = None + + +class Program_weight_tensor_parameter_170: + name = "parameter_170" + shape = [96] + dtype = "float32" + min_val = float("-0.492552") + max_val = float("0.0643671") + mean = float("-0.168979") + std = float("0.115008") + data = None + + +class Program_weight_tensor_parameter_171: + name = "parameter_171" + shape = [96] + dtype = "float32" + min_val = float("0.759592") + max_val = float("1.15371") + mean = float("0.981072") + std = float("0.058465") + data = None + + +class Program_weight_tensor_parameter_172: + name = "parameter_172" + shape = [96] + dtype = "float32" + min_val = float("0.0172157") + max_val = float("0.224833") + mean = float("0.0507609") + std = float("0.0362346") + data = None + + +class Program_weight_tensor_parameter_173: + name = "parameter_173" + shape = [96] + dtype = "float32" + min_val = float("-0.248811") + max_val = float("0.0998924") + mean = float("-0.0166411") + std = float("0.0490542") + data = None + + +class Program_weight_tensor_parameter_174: + name = "parameter_174" + shape = [96, 96, 3, 3] + dtype = "float32" + min_val = float("-0.116242") + max_val = float("0.0898586") + mean = float("-0.000231126") + std = float("0.00783901") + data = None + + +class Program_weight_tensor_parameter_175: + name = "parameter_175" + shape = [96] + dtype = "float32" + min_val = float("-0.567319") + max_val = float("0.348494") + mean = float("-0.179712") + std = float("0.173626") + data = None + + +class Program_weight_tensor_parameter_176: + name = "parameter_176" + shape = [96] + dtype = "float32" + min_val = float("0.772527") + max_val = float("1.33704") + mean = float("0.955269") + std = float("0.110943") + data = None + + +class Program_weight_tensor_parameter_177: + name = "parameter_177" + shape = [96] + dtype = "float32" + min_val = float("0.0160524") + max_val = float("0.114434") + mean = float("0.036106") + std = float("0.0195034") + data = None + + +class Program_weight_tensor_parameter_178: + name = "parameter_178" + shape = [96] + dtype = "float32" + min_val = float("-0.189984") + max_val = float("0.255512") + mean = float("-0.0275046") + std = float("0.0974112") + data = None + + +class Program_weight_tensor_parameter_179: + name = "parameter_179" + shape = [96, 96, 3, 3] + dtype = "float32" + min_val = float("-0.161713") + max_val = float("0.142438") + mean = float("-0.000316146") + std = float("0.00950798") + data = None + + +class Program_weight_tensor_parameter_180: + name = "parameter_180" + shape = [96] + dtype = "float32" + min_val = float("-0.627686") + max_val = float("0.598483") + mean = float("-0.082655") + std = float("0.256323") + data = None + + +class Program_weight_tensor_parameter_181: + name = "parameter_181" + shape = [96] + dtype = "float32" + min_val = float("0.653052") + max_val = float("1.22671") + mean = float("0.866558") + std = float("0.114981") + data = None + + +class Program_weight_tensor_parameter_182: + name = "parameter_182" + shape = [96] + dtype = "float32" + min_val = float("0.0123443") + max_val = float("0.0857623") + mean = float("0.0307528") + std = float("0.01414") + data = None + + +class Program_weight_tensor_parameter_183: + name = "parameter_183" + shape = [96] + dtype = "float32" + min_val = float("-0.117179") + max_val = float("0.0889702") + mean = float("-0.0137918") + std = float("0.041907") + data = None + + +class Program_weight_tensor_parameter_184: + name = "parameter_184" + shape = [96, 448, 1, 1] + dtype = "float32" + min_val = float("-0.162211") + max_val = float("0.186586") + mean = float("-0.00057998") + std = float("0.0123504") + data = None + + +class Program_weight_tensor_parameter_185: + name = "parameter_185" + shape = [96] + dtype = "float32" + min_val = float("-0.0984774") + max_val = float("0.230057") + mean = float("0.0612113") + std = float("0.0550468") + data = None + + +class Program_weight_tensor_parameter_186: + name = "parameter_186" + shape = [96] + dtype = "float32" + min_val = float("0.692561") + max_val = float("1.12833") + mean = float("0.931782") + std = float("0.0640907") + data = None + + +class Program_weight_tensor_parameter_187: + name = "parameter_187" + shape = [96] + dtype = "float32" + min_val = float("0.00681771") + max_val = float("0.0772474") + mean = float("0.0165796") + std = float("0.00961681") + data = None + + +class Program_weight_tensor_parameter_188: + name = "parameter_188" + shape = [96] + dtype = "float32" + min_val = float("-0.133097") + max_val = float("0.161018") + mean = float("-0.0184376") + std = float("0.0386089") + data = None + + +class Program_weight_tensor_parameter_189: + name = "parameter_189" + shape = [96, 448, 1, 1] + dtype = "float32" + min_val = float("-0.104838") + max_val = float("0.136041") + mean = float("-0.000352218") + std = float("0.00870856") + data = None + + +class Program_weight_tensor_parameter_190: + name = "parameter_190" + shape = [192] + dtype = "float32" + min_val = float("-0.296963") + max_val = float("0.196688") + mean = float("-0.0669209") + std = float("0.0696946") + data = None + + +class Program_weight_tensor_parameter_191: + name = "parameter_191" + shape = [192] + dtype = "float32" + min_val = float("0.672164") + max_val = float("1.45538") + mean = float("0.884399") + std = float("0.0784254") + data = None + + +class Program_weight_tensor_parameter_192: + name = "parameter_192" + shape = [192] + dtype = "float32" + min_val = float("0.0110872") + max_val = float("0.127506") + mean = float("0.0262649") + std = float("0.0134352") + data = None + + +class Program_weight_tensor_parameter_193: + name = "parameter_193" + shape = [192] + dtype = "float32" + min_val = float("-0.151007") + max_val = float("0.0461615") + mean = float("-0.0397354") + std = float("0.0392124") + data = None + + +class Program_weight_tensor_parameter_194: + name = "parameter_194" + shape = [192, 384, 1, 1] + dtype = "float32" + min_val = float("-0.0904828") + max_val = float("0.110909") + mean = float("-0.000644138") + std = float("0.00794554") + data = None + + +class Program_weight_tensor_parameter_195: + name = "parameter_195" + shape = [384] + dtype = "float32" + min_val = float("-0.202322") + max_val = float("0.238987") + mean = float("-0.0675229") + std = float("0.0415855") + data = None + + +class Program_weight_tensor_parameter_196: + name = "parameter_196" + shape = [384] + dtype = "float32" + min_val = float("0.872032") + max_val = float("1.54191") + mean = float("1.019") + std = float("0.063367") + data = None + + +class Program_weight_tensor_parameter_197: + name = "parameter_197" + shape = [384] + dtype = "float32" + min_val = float("0.00721296") + max_val = float("0.101353") + mean = float("0.0168635") + std = float("0.00915068") + data = None + + +class Program_weight_tensor_parameter_198: + name = "parameter_198" + shape = [384] + dtype = "float32" + min_val = float("-0.297588") + max_val = float("0.152976") + mean = float("-0.051365") + std = float("0.0447816") + data = None + + +class Program_weight_tensor_parameter_199: + name = "parameter_199" + shape = [384, 384, 1, 1] + dtype = "float32" + min_val = float("-0.102299") + max_val = float("0.0969442") + mean = float("-0.000664132") + std = float("0.00717478") + data = None + + +class Program_weight_tensor_parameter_200: + name = "parameter_200" + shape = [192] + dtype = "float32" + min_val = float("-0.177096") + max_val = float("0.00551918") + mean = float("-0.0655662") + std = float("0.0325016") + data = None + + +class Program_weight_tensor_parameter_201: + name = "parameter_201" + shape = [192] + dtype = "float32" + min_val = float("0.884728") + max_val = float("0.992155") + mean = float("0.94926") + std = float("0.0164178") + data = None + + +class Program_weight_tensor_parameter_202: + name = "parameter_202" + shape = [192] + dtype = "float32" + min_val = float("0.00441221") + max_val = float("0.0261083") + mean = float("0.01005") + std = float("0.00351877") + data = None + + +class Program_weight_tensor_parameter_203: + name = "parameter_203" + shape = [192] + dtype = "float32" + min_val = float("-0.0812598") + max_val = float("0.0748845") + mean = float("-0.0233222") + std = float("0.0310809") + data = None + + +class Program_weight_tensor_parameter_204: + name = "parameter_204" + shape = [192, 192, 1, 1] + dtype = "float32" + min_val = float("-0.0495063") + max_val = float("0.0379562") + mean = float("-0.000707927") + std = float("0.00526785") + data = None + + +class Program_weight_tensor_parameter_205: + name = "parameter_205" + shape = [192] + dtype = "float32" + min_val = float("-0.177096") + max_val = float("0.00551918") + mean = float("-0.0655662") + std = float("0.0325016") + data = None + + +class Program_weight_tensor_parameter_206: + name = "parameter_206" + shape = [192] + dtype = "float32" + min_val = float("0.944815") + max_val = float("1.03167") + mean = float("0.987873") + std = float("0.016613") + data = None + + +class Program_weight_tensor_parameter_207: + name = "parameter_207" + shape = [192] + dtype = "float32" + min_val = float("0.016531") + max_val = float("0.109259") + mean = float("0.0394289") + std = float("0.0158791") + data = None + + +class Program_weight_tensor_parameter_208: + name = "parameter_208" + shape = [192] + dtype = "float32" + min_val = float("-0.21924") + max_val = float("0.183769") + mean = float("-0.0255868") + std = float("0.0652744") + data = None + + +class Program_weight_tensor_parameter_209: + name = "parameter_209" + shape = [192, 192, 3, 3] + dtype = "float32" + min_val = float("-0.0428771") + max_val = float("0.0579551") + mean = float("-7.64353e-05") + std = float("0.00287948") + data = None + + +class Program_weight_tensor_parameter_210: + name = "parameter_210" + shape = [192] + dtype = "float32" + min_val = float("-0.216413") + max_val = float("-0.00156605") + mean = float("-0.0741052") + std = float("0.0353871") + data = None + + +class Program_weight_tensor_parameter_211: + name = "parameter_211" + shape = [192] + dtype = "float32" + min_val = float("0.939878") + max_val = float("1.15492") + mean = float("1.02948") + std = float("0.0431484") + data = None + + +class Program_weight_tensor_parameter_212: + name = "parameter_212" + shape = [192] + dtype = "float32" + min_val = float("0.0381921") + max_val = float("0.244841") + mean = float("0.0704194") + std = float("0.0260547") + data = None + + +class Program_weight_tensor_parameter_213: + name = "parameter_213" + shape = [192] + dtype = "float32" + min_val = float("-0.194913") + max_val = float("0.284776") + mean = float("-0.0487736") + std = float("0.0787153") + data = None + + +class Program_weight_tensor_parameter_214: + name = "parameter_214" + shape = [192, 192, 3, 3] + dtype = "float32" + min_val = float("-0.0537036") + max_val = float("0.0569212") + mean = float("-0.000101544") + std = float("0.00352016") + data = None + + +class Program_weight_tensor_parameter_215: + name = "parameter_215" + shape = [192] + dtype = "float32" + min_val = float("-0.196865") + max_val = float("-0.00996621") + mean = float("-0.0711692") + std = float("0.0319161") + data = None + + +class Program_weight_tensor_parameter_216: + name = "parameter_216" + shape = [192] + dtype = "float32" + min_val = float("0.944171") + max_val = float("1.04842") + mean = float("0.987927") + std = float("0.0137867") + data = None + + +class Program_weight_tensor_parameter_217: + name = "parameter_217" + shape = [192] + dtype = "float32" + min_val = float("0.00232538") + max_val = float("0.0114743") + mean = float("0.00426258") + std = float("0.0011511") + data = None + + +class Program_weight_tensor_parameter_218: + name = "parameter_218" + shape = [192] + dtype = "float32" + min_val = float("-0.0893703") + max_val = float("0.0463746") + mean = float("-0.0214534") + std = float("0.0217776") + data = None + + +class Program_weight_tensor_parameter_219: + name = "parameter_219" + shape = [192, 192, 1, 1] + dtype = "float32" + min_val = float("-0.0308295") + max_val = float("0.0472366") + mean = float("-0.000698195") + std = float("0.00548431") + data = None + + +class Program_weight_tensor_parameter_220: + name = "parameter_220" + shape = [192] + dtype = "float32" + min_val = float("-0.196865") + max_val = float("-0.00996621") + mean = float("-0.0711692") + std = float("0.0319161") + data = None + + +class Program_weight_tensor_parameter_221: + name = "parameter_221" + shape = [192] + dtype = "float32" + min_val = float("0.953905") + max_val = float("1.11243") + mean = float("1.00461") + std = float("0.0264007") + data = None + + +class Program_weight_tensor_parameter_222: + name = "parameter_222" + shape = [192] + dtype = "float32" + min_val = float("0.00885181") + max_val = float("0.0602316") + mean = float("0.0174647") + std = float("0.00611072") + data = None + + +class Program_weight_tensor_parameter_223: + name = "parameter_223" + shape = [192] + dtype = "float32" + min_val = float("-0.214638") + max_val = float("0.0916921") + mean = float("-0.042109") + std = float("0.0445113") + data = None + + +class Program_weight_tensor_parameter_224: + name = "parameter_224" + shape = [192, 192, 3, 3] + dtype = "float32" + min_val = float("-0.0395263") + max_val = float("0.0647994") + mean = float("-0.000147334") + std = float("0.00290748") + data = None + + +class Program_weight_tensor_parameter_225: + name = "parameter_225" + shape = [192] + dtype = "float32" + min_val = float("-0.23254") + max_val = float("-0.0186192") + mean = float("-0.094269") + std = float("0.0399954") + data = None + + +class Program_weight_tensor_parameter_226: + name = "parameter_226" + shape = [192] + dtype = "float32" + min_val = float("0.94661") + max_val = float("1.1911") + mean = float("1.02415") + std = float("0.0459878") + data = None + + +class Program_weight_tensor_parameter_227: + name = "parameter_227" + shape = [192] + dtype = "float32" + min_val = float("0.0325254") + max_val = float("0.154956") + mean = float("0.066483") + std = float("0.0222298") + data = None + + +class Program_weight_tensor_parameter_228: + name = "parameter_228" + shape = [192] + dtype = "float32" + min_val = float("-0.348988") + max_val = float("0.20982") + mean = float("-0.0943644") + std = float("0.0968837") + data = None + + +class Program_weight_tensor_parameter_229: + name = "parameter_229" + shape = [192, 192, 3, 3] + dtype = "float32" + min_val = float("-0.0523599") + max_val = float("0.0680231") + mean = float("-0.000170588") + std = float("0.00366692") + data = None + + +class Program_weight_tensor_parameter_230: + name = "parameter_230" + shape = [192] + dtype = "float32" + min_val = float("-0.154829") + max_val = float("-0.00101215") + mean = float("-0.0685481") + std = float("0.0233247") + data = None + + +class Program_weight_tensor_parameter_231: + name = "parameter_231" + shape = [192] + dtype = "float32" + min_val = float("0.932711") + max_val = float("1.07089") + mean = float("0.998751") + std = float("0.0218702") + data = None + + +class Program_weight_tensor_parameter_232: + name = "parameter_232" + shape = [192] + dtype = "float32" + min_val = float("0.00202058") + max_val = float("0.0085046") + mean = float("0.00390999") + std = float("0.00110762") + data = None + + +class Program_weight_tensor_parameter_233: + name = "parameter_233" + shape = [192] + dtype = "float32" + min_val = float("-0.0777916") + max_val = float("0.098492") + mean = float("-0.0116677") + std = float("0.020466") + data = None + + +class Program_weight_tensor_parameter_234: + name = "parameter_234" + shape = [192, 192, 1, 1] + dtype = "float32" + min_val = float("-0.0340527") + max_val = float("0.0501646") + mean = float("-0.00038914") + std = float("0.00614264") + data = None + + +class Program_weight_tensor_parameter_235: + name = "parameter_235" + shape = [192] + dtype = "float32" + min_val = float("-0.15483") + max_val = float("-0.00101216") + mean = float("-0.0685481") + std = float("0.0233247") + data = None + + +class Program_weight_tensor_parameter_236: + name = "parameter_236" + shape = [192] + dtype = "float32" + min_val = float("0.935817") + max_val = float("1.11381") + mean = float("0.992462") + std = float("0.0258361") + data = None + + +class Program_weight_tensor_parameter_237: + name = "parameter_237" + shape = [192] + dtype = "float32" + min_val = float("0.00891321") + max_val = float("0.0478413") + mean = float("0.0181253") + std = float("0.00570544") + data = None + + +class Program_weight_tensor_parameter_238: + name = "parameter_238" + shape = [192] + dtype = "float32" + min_val = float("-0.258263") + max_val = float("0.130443") + mean = float("-0.0435931") + std = float("0.0468497") + data = None + + +class Program_weight_tensor_parameter_239: + name = "parameter_239" + shape = [192, 192, 3, 3] + dtype = "float32" + min_val = float("-0.0292394") + max_val = float("0.0522577") + mean = float("-0.00017048") + std = float("0.00289304") + data = None + + +class Program_weight_tensor_parameter_240: + name = "parameter_240" + shape = [192] + dtype = "float32" + min_val = float("-0.288771") + max_val = float("0.0148396") + mean = float("-0.109714") + std = float("0.0400291") + data = None + + +class Program_weight_tensor_parameter_241: + name = "parameter_241" + shape = [192] + dtype = "float32" + min_val = float("0.944044") + max_val = float("1.25876") + mean = float("1.02656") + std = float("0.0419352") + data = None + + +class Program_weight_tensor_parameter_242: + name = "parameter_242" + shape = [192] + dtype = "float32" + min_val = float("0.0137713") + max_val = float("0.0682612") + mean = float("0.0280618") + std = float("0.00984345") + data = None + + +class Program_weight_tensor_parameter_243: + name = "parameter_243" + shape = [192] + dtype = "float32" + min_val = float("-0.354673") + max_val = float("0.124192") + mean = float("-0.0487223") + std = float("0.0583851") + data = None + + +class Program_weight_tensor_parameter_244: + name = "parameter_244" + shape = [192, 192, 3, 3] + dtype = "float32" + min_val = float("-0.0564756") + max_val = float("0.0674707") + mean = float("-0.000194109") + std = float("0.00414155") + data = None + + +class Program_weight_tensor_parameter_245: + name = "parameter_245" + shape = [192] + dtype = "float32" + min_val = float("-0.257095") + max_val = float("-0.01369") + mean = float("-0.121797") + std = float("0.0441852") + data = None + + +class Program_weight_tensor_parameter_246: + name = "parameter_246" + shape = [192] + dtype = "float32" + min_val = float("0.916459") + max_val = float("1.13702") + mean = float("1.02436") + std = float("0.0422629") + data = None + + +class Program_weight_tensor_parameter_247: + name = "parameter_247" + shape = [192] + dtype = "float32" + min_val = float("0.00516469") + max_val = float("0.0229026") + mean = float("0.0106646") + std = float("0.00317044") + data = None + + +class Program_weight_tensor_parameter_248: + name = "parameter_248" + shape = [192] + dtype = "float32" + min_val = float("-0.127284") + max_val = float("0.0963655") + mean = float("0.0144008") + std = float("0.029143") + data = None + + +class Program_weight_tensor_parameter_249: + name = "parameter_249" + shape = [192, 896, 1, 1] + dtype = "float32" + min_val = float("-0.0721174") + max_val = float("0.0971018") + mean = float("-0.000190287") + std = float("0.00582491") + data = None + + +class Program_weight_tensor_parameter_250: + name = "parameter_250" + shape = [192] + dtype = "float32" + min_val = float("-0.177705") + max_val = float("0.21267") + mean = float("-0.00755062") + std = float("0.0506748") + data = None + + +class Program_weight_tensor_parameter_251: + name = "parameter_251" + shape = [192] + dtype = "float32" + min_val = float("0.954707") + max_val = float("1.21638") + mean = float("1.05592") + std = float("0.0497891") + data = None + + +class Program_weight_tensor_parameter_252: + name = "parameter_252" + shape = [192] + dtype = "float32" + min_val = float("0.00824461") + max_val = float("0.0577322") + mean = float("0.0175512") + std = float("0.00716064") + data = None + + +class Program_weight_tensor_parameter_253: + name = "parameter_253" + shape = [192] + dtype = "float32" + min_val = float("-0.0769287") + max_val = float("0.0893626") + mean = float("-0.00207579") + std = float("0.0304575") + data = None + + +class Program_weight_tensor_parameter_254: + name = "parameter_254" + shape = [192, 896, 1, 1] + dtype = "float32" + min_val = float("-0.0604261") + max_val = float("0.102952") + mean = float("-0.000212686") + std = float("0.00623353") + data = None + + +class Program_weight_tensor_parameter_255: + name = "parameter_255" + shape = [384] + dtype = "float32" + min_val = float("-0.249989") + max_val = float("-0.0574309") + mean = float("-0.125167") + std = float("0.0336736") + data = None + + +class Program_weight_tensor_parameter_256: + name = "parameter_256" + shape = [384] + dtype = "float32" + min_val = float("0.816049") + max_val = float("1.01536") + mean = float("0.909295") + std = float("0.0258085") + data = None + + +class Program_weight_tensor_parameter_257: + name = "parameter_257" + shape = [384] + dtype = "float32" + min_val = float("0.0103681") + max_val = float("0.0948348") + mean = float("0.0266136") + std = float("0.0121593") + data = None + + +class Program_weight_tensor_parameter_258: + name = "parameter_258" + shape = [384] + dtype = "float32" + min_val = float("-0.16079") + max_val = float("0.0920586") + mean = float("-0.0398222") + std = float("0.0395335") + data = None + + +class Program_weight_tensor_parameter_259: + name = "parameter_259" + shape = [384, 768, 1, 1] + dtype = "float32" + min_val = float("-0.03192") + max_val = float("0.0346747") + mean = float("-0.000309907") + std = float("0.00449398") + data = None + + +class Program_weight_tensor_parameter_260: + name = "parameter_260" + shape = [768] + dtype = "float32" + min_val = float("-0.104731") + max_val = float("0.0725498") + mean = float("-0.0568804") + std = float("0.0152729") + data = None + + +class Program_weight_tensor_parameter_261: + name = "parameter_261" + shape = [768] + dtype = "float32" + min_val = float("0.952515") + max_val = float("1.14217") + mean = float("1.02086") + std = float("0.0209603") + data = None + + +class Program_weight_tensor_parameter_262: + name = "parameter_262" + shape = [768] + dtype = "float32" + min_val = float("0.00400082") + max_val = float("0.0306736") + mean = float("0.00912551") + std = float("0.00348196") + data = None + + +class Program_weight_tensor_parameter_263: + name = "parameter_263" + shape = [768] + dtype = "float32" + min_val = float("-0.108131") + max_val = float("0.0840492") + mean = float("-0.0308524") + std = float("0.0256791") + data = None + + +class Program_weight_tensor_parameter_264: + name = "parameter_264" + shape = [768, 768, 1, 1] + dtype = "float32" + min_val = float("-0.0561444") + max_val = float("0.104026") + mean = float("-0.00028012") + std = float("0.00382445") + data = None + + +class Program_weight_tensor_parameter_265: + name = "parameter_265" + shape = [384] + dtype = "float32" + min_val = float("-0.158352") + max_val = float("0.074486") + mean = float("-0.0400406") + std = float("0.0206674") + data = None + + +class Program_weight_tensor_parameter_266: + name = "parameter_266" + shape = [384] + dtype = "float32" + min_val = float("0.888537") + max_val = float("1.07535") + mean = float("0.982149") + std = float("0.0131757") + data = None + + +class Program_weight_tensor_parameter_267: + name = "parameter_267" + shape = [384] + dtype = "float32" + min_val = float("0.00629694") + max_val = float("0.0917591") + mean = float("0.0233191") + std = float("0.00930269") + data = None + + +class Program_weight_tensor_parameter_268: + name = "parameter_268" + shape = [384] + dtype = "float32" + min_val = float("-0.0725783") + max_val = float("0.0597257") + mean = float("-0.0049494") + std = float("0.0256969") + data = None + + +class Program_weight_tensor_parameter_269: + name = "parameter_269" + shape = [384, 384, 1, 1] + dtype = "float32" + min_val = float("-0.0355484") + max_val = float("0.0687831") + mean = float("-5.36416e-05") + std = float("0.00327465") + data = None + + +class Program_weight_tensor_parameter_270: + name = "parameter_270" + shape = [384] + dtype = "float32" + min_val = float("-0.158353") + max_val = float("0.074486") + mean = float("-0.0400406") + std = float("0.0206674") + data = None + + +class Program_weight_tensor_parameter_271: + name = "parameter_271" + shape = [384] + dtype = "float32" + min_val = float("0.880933") + max_val = float("1.0776") + mean = float("0.993865") + std = float("0.0122579") + data = None + + +class Program_weight_tensor_parameter_272: + name = "parameter_272" + shape = [384] + dtype = "float32" + min_val = float("0.0302136") + max_val = float("0.658467") + mean = float("0.153161") + std = float("0.0615601") + data = None + + +class Program_weight_tensor_parameter_273: + name = "parameter_273" + shape = [384] + dtype = "float32" + min_val = float("-0.280458") + max_val = float("0.127177") + mean = float("-0.0754248") + std = float("0.0821892") + data = None + + +class Program_weight_tensor_parameter_274: + name = "parameter_274" + shape = [384, 384, 3, 3] + dtype = "float32" + min_val = float("-0.0402524") + max_val = float("0.0446942") + mean = float("-0.000120035") + std = float("0.00122819") + data = None + + +class Program_weight_tensor_parameter_275: + name = "parameter_275" + shape = [384] + dtype = "float32" + min_val = float("-0.080174") + max_val = float("0.116977") + mean = float("-0.0189992") + std = float("0.0160148") + data = None + + +class Program_weight_tensor_parameter_276: + name = "parameter_276" + shape = [384] + dtype = "float32" + min_val = float("0.920426") + max_val = float("1.16701") + mean = float("1.01503") + std = float("0.0247134") + data = None + + +class Program_weight_tensor_parameter_277: + name = "parameter_277" + shape = [384] + dtype = "float32" + min_val = float("0.0258173") + max_val = float("0.191023") + mean = float("0.0694403") + std = float("0.0298835") + data = None + + +class Program_weight_tensor_parameter_278: + name = "parameter_278" + shape = [384] + dtype = "float32" + min_val = float("-0.231545") + max_val = float("0.20914") + mean = float("-0.0203256") + std = float("0.0747077") + data = None + + +class Program_weight_tensor_parameter_279: + name = "parameter_279" + shape = [384, 384, 3, 3] + dtype = "float32" + min_val = float("-0.023736") + max_val = float("0.03185") + mean = float("-3.10455e-05") + std = float("0.00160831") + data = None + + +class Program_weight_tensor_parameter_280: + name = "parameter_280" + shape = [384] + dtype = "float32" + min_val = float("-0.0734011") + max_val = float("0.0209518") + mean = float("-0.0234929") + std = float("0.0134643") + data = None + + +class Program_weight_tensor_parameter_281: + name = "parameter_281" + shape = [384] + dtype = "float32" + min_val = float("0.946001") + max_val = float("1.1693") + mean = float("1.01467") + std = float("0.0274094") + data = None + + +class Program_weight_tensor_parameter_282: + name = "parameter_282" + shape = [384] + dtype = "float32" + min_val = float("0.0574212") + max_val = float("0.36113") + mean = float("0.167754") + std = float("0.0624421") + data = None + + +class Program_weight_tensor_parameter_283: + name = "parameter_283" + shape = [384] + dtype = "float32" + min_val = float("-1.55759") + max_val = float("1.75104") + mean = float("0.0310697") + std = float("0.530417") + data = None + + +class Program_weight_tensor_parameter_284: + name = "parameter_284" + shape = [384, 1536, 1, 1] + dtype = "float32" + min_val = float("-0.0464008") + max_val = float("0.0539612") + mean = float("8.40176e-05") + std = float("0.00279856") + data = None + + +class Program_weight_tensor_parameter_285: + name = "parameter_285" + shape = [384] + dtype = "float32" + min_val = float("-0.0183405") + max_val = float("0.0258023") + mean = float("-0.00146113") + std = float("0.00679536") + data = None + + +class Program_weight_tensor_parameter_286: + name = "parameter_286" + shape = [384] + dtype = "float32" + min_val = float("0.969528") + max_val = float("1.06063") + mean = float("0.993845") + std = float("0.0122858") + data = None + + +class Program_weight_tensor_parameter_287: + name = "parameter_287" + shape = [384] + dtype = "float32" + min_val = float("0.00270127") + max_val = float("0.0149795") + mean = float("0.00630368") + std = float("0.00229884") + data = None + + +class Program_weight_tensor_parameter_288: + name = "parameter_288" + shape = [384] + dtype = "float32" + min_val = float("-0.100294") + max_val = float("0.0506795") + mean = float("-0.0387404") + std = float("0.0226508") + data = None + + +class Program_weight_tensor_parameter_289: + name = "parameter_289" + shape = [384, 384, 1, 1] + dtype = "float32" + min_val = float("-0.0312313") + max_val = float("0.0414999") + mean = float("-0.000484357") + std = float("0.00306542") + data = None + + +class Program_weight_tensor_parameter_290: + name = "parameter_290" + shape = [384] + dtype = "float32" + min_val = float("-0.0183405") + max_val = float("0.0258023") + mean = float("-0.00146113") + std = float("0.00679536") + data = None + + +class Program_weight_tensor_parameter_291: + name = "parameter_291" + shape = [384] + dtype = "float32" + min_val = float("0.971893") + max_val = float("1.08657") + mean = float("1.00365") + std = float("0.0181767") + data = None + + +class Program_weight_tensor_parameter_292: + name = "parameter_292" + shape = [384] + dtype = "float32" + min_val = float("0.0120607") + max_val = float("0.104138") + mean = float("0.0369785") + std = float("0.0161136") + data = None + + +class Program_weight_tensor_parameter_293: + name = "parameter_293" + shape = [384] + dtype = "float32" + min_val = float("-0.26934") + max_val = float("0.110834") + mean = float("-0.115324") + std = float("0.0508614") + data = None + + +class Program_weight_tensor_parameter_294: + name = "parameter_294" + shape = [384, 384, 3, 3] + dtype = "float32" + min_val = float("-0.0290225") + max_val = float("0.0663413") + mean = float("-0.000179167") + std = float("0.00128493") + data = None + + +class Program_weight_tensor_parameter_295: + name = "parameter_295" + shape = [384] + dtype = "float32" + min_val = float("-0.0494678") + max_val = float("0.00858064") + mean = float("-0.00839597") + std = float("0.00776335") + data = None + + +class Program_weight_tensor_parameter_296: + name = "parameter_296" + shape = [384] + dtype = "float32" + min_val = float("0.95427") + max_val = float("1.13764") + mean = float("1.01254") + std = float("0.0201656") + data = None + + +class Program_weight_tensor_parameter_297: + name = "parameter_297" + shape = [384] + dtype = "float32" + min_val = float("0.0686687") + max_val = float("0.339855") + mean = float("0.163513") + std = float("0.0492742") + data = None + + +class Program_weight_tensor_parameter_298: + name = "parameter_298" + shape = [384] + dtype = "float32" + min_val = float("-1.18586") + max_val = float("0.819546") + mean = float("-0.222925") + std = float("0.255668") + data = None + + +class Program_weight_tensor_parameter_299: + name = "parameter_299" + shape = [384, 384, 3, 3] + dtype = "float32" + min_val = float("-0.0230308") + max_val = float("0.0510876") + mean = float("-0.000132883") + std = float("0.00152671") + data = None + + +class Program_weight_tensor_parameter_300: + name = "parameter_300" + shape = [384] + dtype = "float32" + min_val = float("-0.0358263") + max_val = float("0.0138961") + mean = float("-0.00764663") + std = float("0.00787851") + data = None + + +class Program_weight_tensor_parameter_301: + name = "parameter_301" + shape = [384] + dtype = "float32" + min_val = float("0.984161") + max_val = float("1.03457") + mean = float("0.999922") + std = float("0.00712994") + data = None + + +class Program_weight_tensor_parameter_302: + name = "parameter_302" + shape = [384] + dtype = "float32" + min_val = float("0.00175415") + max_val = float("0.0108139") + mean = float("0.00364709") + std = float("0.0011719") + data = None + + +class Program_weight_tensor_parameter_303: + name = "parameter_303" + shape = [384] + dtype = "float32" + min_val = float("-0.0809601") + max_val = float("0.126515") + mean = float("-0.0205001") + std = float("0.0225741") + data = None + + +class Program_weight_tensor_parameter_304: + name = "parameter_304" + shape = [384, 384, 1, 1] + dtype = "float32" + min_val = float("-0.0193951") + max_val = float("0.033031") + mean = float("-0.00027113") + std = float("0.00265482") + data = None + + +class Program_weight_tensor_parameter_305: + name = "parameter_305" + shape = [384] + dtype = "float32" + min_val = float("-0.0358263") + max_val = float("0.0138961") + mean = float("-0.00764663") + std = float("0.00787851") + data = None + + +class Program_weight_tensor_parameter_306: + name = "parameter_306" + shape = [384] + dtype = "float32" + min_val = float("0.981952") + max_val = float("1.06739") + mean = float("1.00455") + std = float("0.0126595") + data = None + + +class Program_weight_tensor_parameter_307: + name = "parameter_307" + shape = [384] + dtype = "float32" + min_val = float("0.00890186") + max_val = float("0.0527898") + mean = float("0.0229235") + std = float("0.00801757") + data = None + + +class Program_weight_tensor_parameter_308: + name = "parameter_308" + shape = [384] + dtype = "float32" + min_val = float("-0.216942") + max_val = float("0.320178") + mean = float("-0.0713609") + std = float("0.0619825") + data = None + + +class Program_weight_tensor_parameter_309: + name = "parameter_309" + shape = [384, 384, 3, 3] + dtype = "float32" + min_val = float("-0.011273") + max_val = float("0.0330076") + mean = float("-0.000116379") + std = float("0.00107823") + data = None + + +class Program_weight_tensor_parameter_310: + name = "parameter_310" + shape = [384] + dtype = "float32" + min_val = float("-0.0530152") + max_val = float("0.00371186") + mean = float("-0.02064") + std = float("0.00869095") + data = None + + +class Program_weight_tensor_parameter_311: + name = "parameter_311" + shape = [384] + dtype = "float32" + min_val = float("0.975684") + max_val = float("1.08474") + mean = float("1.01197") + std = float("0.0159573") + data = None + + +class Program_weight_tensor_parameter_312: + name = "parameter_312" + shape = [384] + dtype = "float32" + min_val = float("0.0111248") + max_val = float("0.075422") + mean = float("0.0304294") + std = float("0.0099954") + data = None + + +class Program_weight_tensor_parameter_313: + name = "parameter_313" + shape = [384] + dtype = "float32" + min_val = float("-0.17639") + max_val = float("0.208749") + mean = float("-0.0371536") + std = float("0.0489761") + data = None + + +class Program_weight_tensor_parameter_314: + name = "parameter_314" + shape = [384, 384, 3, 3] + dtype = "float32" + min_val = float("-0.0145313") + max_val = float("0.0244098") + mean = float("-6.42304e-05") + std = float("0.00148685") + data = None + + +class Program_weight_tensor_parameter_315: + name = "parameter_315" + shape = [384] + dtype = "float32" + min_val = float("-0.0699692") + max_val = float("0.0213726") + mean = float("-0.0333959") + std = float("0.0126416") + data = None + + +class Program_weight_tensor_parameter_316: + name = "parameter_316" + shape = [384] + dtype = "float32" + min_val = float("0.981916") + max_val = float("1.05598") + mean = float("1.01336") + std = float("0.0107863") + data = None + + +class Program_weight_tensor_parameter_317: + name = "parameter_317" + shape = [384] + dtype = "float32" + min_val = float("0.0069754") + max_val = float("0.0280088") + mean = float("0.0131181") + std = float("0.00318771") + data = None + + +class Program_weight_tensor_parameter_318: + name = "parameter_318" + shape = [384] + dtype = "float32" + min_val = float("-0.145777") + max_val = float("0.0727488") + mean = float("-0.0135891") + std = float("0.034538") + data = None + + +class Program_weight_tensor_parameter_319: + name = "parameter_319" + shape = [384, 1024, 1, 1] + dtype = "float32" + min_val = float("-0.017823") + max_val = float("0.0467678") + mean = float("-0.000196972") + std = float("0.00306745") + data = None + + +class Program_weight_tensor_parameter_320: + name = "parameter_320" + shape = [384] + dtype = "float32" + min_val = float("-0.0243502") + max_val = float("0.0209146") + mean = float("-0.000403346") + std = float("0.00795216") + data = None + + +class Program_weight_tensor_parameter_321: + name = "parameter_321" + shape = [384] + dtype = "float32" + min_val = float("0.994149") + max_val = float("1.08382") + mean = float("1.04111") + std = float("0.0136566") + data = None + + +class Program_weight_tensor_parameter_322: + name = "parameter_322" + shape = [384] + dtype = "float32" + min_val = float("0.0116675") + max_val = float("0.0524889") + mean = float("0.0209671") + std = float("0.00553338") + data = None + + +class Program_weight_tensor_parameter_323: + name = "parameter_323" + shape = [384] + dtype = "float32" + min_val = float("-0.150832") + max_val = float("0.146748") + mean = float("-0.00513308") + std = float("0.045991") + data = None + + +class Program_weight_tensor_parameter_324: + name = "parameter_324" + shape = [384, 1024, 1, 1] + dtype = "float32" + min_val = float("-0.0386333") + max_val = float("0.0273306") + mean = float("-0.000230864") + std = float("0.00376096") + data = None + + +class Program_weight_tensor_parameter_325: + name = "parameter_325" + shape = [1024] + dtype = "float32" + min_val = float("-2.92289e-10") + max_val = float("3.60219e-10") + mean = float("3.09639e-12") + std = float("8.43535e-11") + data = None + + +class Program_weight_tensor_parameter_326: + name = "parameter_326" + shape = [1024] + dtype = "float32" + min_val = float("0.797367") + max_val = float("0.801926") + mean = float("0.79841") + std = float("0.000347528") + data = None + + +class Program_weight_tensor_parameter_327: + name = "parameter_327" + shape = [1024] + dtype = "float32" + min_val = float("-0.0176922") + max_val = float("0.0176774") + mean = float("0.000103427") + std = float("0.0103725") + data = None + + +class Program_weight_tensor_parameter_328: + name = "parameter_328" + shape = [2048, 1024] + dtype = "float32" + min_val = float("-0.0180007") + max_val = float("0.0179557") + mean = float("-3.21152e-07") + std = float("0.01019") + data = None + + +class Program_weight_tensor_parameter_329: + name = "parameter_329" + shape = [2048] + dtype = "float32" + min_val = float("-0.0249658") + max_val = float("0.0249348") + mean = float("-0.000264432") + std = float("0.0140502") + data = None + + +class Program_weight_tensor_parameter_330: + name = "parameter_330" + shape = [1024, 2048] + dtype = "float32" + min_val = float("-0.0252063") + max_val = float("0.025197") + mean = float("-1.07218e-06") + std = float("0.0144035") + data = None + + +class Program_weight_tensor_parameter_331: + name = "parameter_331" + shape = [1024] + dtype = "float32" + min_val = float("-0.000555217") + max_val = float("0.000243272") + mean = float("1.9929e-07") + std = float("9.19554e-05") + data = None + + +class Program_weight_tensor_parameter_332: + name = "parameter_332" + shape = [1024] + dtype = "float32" + min_val = float("0.7967") + max_val = float("0.802162") + mean = float("0.798413") + std = float("0.000401989") + data = None + + +class Program_weight_tensor_parameter_333: + name = "parameter_333" + shape = [1024] + dtype = "float32" + min_val = float("-0.000419711") + max_val = float("0.000305357") + mean = float("3.42389e-06") + std = float("9.72847e-05") + data = None + + +class Program_weight_tensor_parameter_334: + name = "parameter_334" + shape = [1024, 1024] + dtype = "float32" + min_val = float("-0.0435604") + max_val = float("0.0434697") + mean = float("8.92987e-06") + std = float("0.0249341") + data = None + + +class Program_weight_tensor_parameter_335: + name = "parameter_335" + shape = [1024] + dtype = "float32" + min_val = float("-0.0003762") + max_val = float("0.000251332") + mean = float("1.8105e-05") + std = float("9.15252e-05") + data = None + + +class Program_weight_tensor_parameter_336: + name = "parameter_336" + shape = [1024] + dtype = "float32" + min_val = float("0.796851") + max_val = float("0.802025") + mean = float("0.798428") + std = float("0.00039013") + data = None + + +class Program_weight_tensor_parameter_337: + name = "parameter_337" + shape = [1024] + dtype = "float32" + min_val = float("-0.0176874") + max_val = float("0.0176099") + mean = float("0.000102328") + std = float("0.0103695") + data = None + + +class Program_weight_tensor_parameter_338: + name = "parameter_338" + shape = [2048, 1024] + dtype = "float32" + min_val = float("-0.0179828") + max_val = float("0.0178747") + mean = float("-4.45837e-07") + std = float("0.01019") + data = None + + +class Program_weight_tensor_parameter_339: + name = "parameter_339" + shape = [2048] + dtype = "float32" + min_val = float("-0.024962") + max_val = float("0.0249286") + mean = float("-0.000262687") + std = float("0.0140493") + data = None + + +class Program_weight_tensor_parameter_340: + name = "parameter_340" + shape = [1024, 2048] + dtype = "float32" + min_val = float("-0.0251902") + max_val = float("0.0251499") + mean = float("-1.07229e-06") + std = float("0.0144035") + data = None + + +class Program_weight_tensor_parameter_341: + name = "parameter_341" + shape = [1024] + dtype = "float32" + min_val = float("-0.000350131") + max_val = float("0.000241698") + mean = float("-4.47589e-07") + std = float("8.78869e-05") + data = None + + +class Program_weight_tensor_parameter_342: + name = "parameter_342" + shape = [1024] + dtype = "float32" + min_val = float("0.797093") + max_val = float("0.80173") + mean = float("0.798412") + std = float("0.000358076") + data = None + + +class Program_weight_tensor_parameter_343: + name = "parameter_343" + shape = [1024] + dtype = "float32" + min_val = float("-0.000363068") + max_val = float("0.000309618") + mean = float("1.5817e-06") + std = float("9.93162e-05") + data = None + + +class Program_weight_tensor_parameter_344: + name = "parameter_344" + shape = [1024, 1024] + dtype = "float32" + min_val = float("-0.0434436") + max_val = float("0.0434242") + mean = float("8.89642e-06") + std = float("0.0249342") + data = None + + +class Program_weight_tensor_parameter_345: + name = "parameter_345" + shape = [1024] + dtype = "float32" + min_val = float("-0.000420154") + max_val = float("0.00045328") + mean = float("2.54749e-05") + std = float("0.000129154") + data = None + + +class Program_weight_tensor_parameter_346: + name = "parameter_346" + shape = [1024] + dtype = "float32" + min_val = float("0.797252") + max_val = float("0.801713") + mean = float("0.798442") + std = float("0.000356685") + data = None + + +class Program_weight_tensor_parameter_347: + name = "parameter_347" + shape = [1024] + dtype = "float32" + min_val = float("-0.0177165") + max_val = float("0.0176232") + mean = float("0.000101918") + std = float("0.01036") + data = None + + +class Program_weight_tensor_parameter_348: + name = "parameter_348" + shape = [2048, 1024] + dtype = "float32" + min_val = float("-0.0179359") + max_val = float("0.0179271") + mean = float("-5.19214e-07") + std = float("0.01019") + data = None + + +class Program_weight_tensor_parameter_349: + name = "parameter_349" + shape = [2048] + dtype = "float32" + min_val = float("-0.0249926") + max_val = float("0.024906") + mean = float("-0.000261807") + std = float("0.0140485") + data = None + + +class Program_weight_tensor_parameter_350: + name = "parameter_350" + shape = [1024, 2048] + dtype = "float32" + min_val = float("-0.0251321") + max_val = float("0.0251645") + mean = float("-1.07239e-06") + std = float("0.0144035") + data = None + + +class Program_weight_tensor_parameter_351: + name = "parameter_351" + shape = [1024] + dtype = "float32" + min_val = float("-0.000524045") + max_val = float("0.000440655") + mean = float("-3.94412e-07") + std = float("0.000134893") + data = None + + +class Program_weight_tensor_parameter_352: + name = "parameter_352" + shape = [1024] + dtype = "float32" + min_val = float("0.797309") + max_val = float("0.801401") + mean = float("0.798411") + std = float("0.000338184") + data = None + + +class Program_weight_tensor_parameter_353: + name = "parameter_353" + shape = [1024] + dtype = "float32" + min_val = float("-0.000493111") + max_val = float("0.000473844") + mean = float("-1.84034e-06") + std = float("0.000140404") + data = None + + +class Program_weight_tensor_parameter_354: + name = "parameter_354" + shape = [1024, 1024] + dtype = "float32" + min_val = float("-0.043449") + max_val = float("0.0434786") + mean = float("8.89802e-06") + std = float("0.0249343") + data = None + + +class Program_weight_tensor_parameter_355: + name = "parameter_355" + shape = [1024] + dtype = "float32" + min_val = float("-0.000657995") + max_val = float("0.000731658") + mean = float("3.06102e-05") + std = float("0.000218356") + data = None + + +class Program_weight_tensor_parameter_356: + name = "parameter_356" + shape = [1024] + dtype = "float32" + min_val = float("0.797424") + max_val = float("0.801436") + mean = float("0.798458") + std = float("0.000356085") + data = None + + +class Program_weight_tensor_parameter_357: + name = "parameter_357" + shape = [1024] + dtype = "float32" + min_val = float("-0.0177131") + max_val = float("0.0177688") + mean = float("0.000101971") + std = float("0.0103559") + data = None + + +class Program_weight_tensor_parameter_358: + name = "parameter_358" + shape = [2048, 1024] + dtype = "float32" + min_val = float("-0.0180121") + max_val = float("0.0179253") + mean = float("-4.95311e-07") + std = float("0.0101901") + data = None + + +class Program_weight_tensor_parameter_359: + name = "parameter_359" + shape = [2048] + dtype = "float32" + min_val = float("-0.024929") + max_val = float("0.0249049") + mean = float("-0.000259673") + std = float("0.0140481") + data = None + + +class Program_weight_tensor_parameter_360: + name = "parameter_360" + shape = [1024, 2048] + dtype = "float32" + min_val = float("-0.0251524") + max_val = float("0.0252285") + mean = float("-1.0724e-06") + std = float("0.0144035") + data = None + + +class Program_weight_tensor_parameter_361: + name = "parameter_361" + shape = [1024] + dtype = "float32" + min_val = float("-0.000733503") + max_val = float("0.00075958") + mean = float("-7.75532e-07") + std = float("0.000228197") + data = None + + +class Program_weight_tensor_parameter_362: + name = "parameter_362" + shape = [1024] + dtype = "float32" + min_val = float("0.797348") + max_val = float("0.801141") + mean = float("0.79841") + std = float("0.000355182") + data = None + + +class Program_weight_tensor_parameter_363: + name = "parameter_363" + shape = [1024] + dtype = "float32" + min_val = float("-0.000783512") + max_val = float("0.000888767") + mean = float("-2.37833e-07") + std = float("0.000237219") + data = None + + +class Program_weight_tensor_parameter_364: + name = "parameter_364" + shape = [1024, 1024] + dtype = "float32" + min_val = float("-0.0437281") + max_val = float("0.0438114") + mean = float("8.92961e-06") + std = float("0.0249348") + data = None + + +class Program_weight_tensor_parameter_365: + name = "parameter_365" + shape = [1024] + dtype = "float32" + min_val = float("-3.7594") + max_val = float("-0.734446") + mean = float("-2.18722") + std = float("0.428724") + data = None + + +class Program_weight_tensor_parameter_366: + name = "parameter_366" + shape = [1024] + dtype = "float32" + min_val = float("1.61913") + max_val = float("4.44136") + mean = float("3.08039") + std = float("0.25425") + data = None + + +class Program_weight_tensor_parameter_367: + name = "parameter_367" + shape = [1024] + dtype = "float32" + min_val = float("0.00445214") + max_val = float("0.0229447") + mean = float("0.00885079") + std = float("0.00174582") + data = None + + +class Program_weight_tensor_parameter_368: + name = "parameter_368" + shape = [1024] + dtype = "float32" + min_val = float("-0.140092") + max_val = float("0.122679") + mean = float("-0.0555511") + std = float("0.0303054") + data = None + + +class Program_weight_tensor_parameter_369: + name = "parameter_369" + shape = [1024, 768, 1, 1] + dtype = "float32" + min_val = float("-0.0427729") + max_val = float("0.0695573") + mean = float("-0.000391863") + std = float("0.00403905") + data = None + + +class Program_weight_tensor_parameter_370: + name = "parameter_370" + shape = [768] + dtype = "float32" + min_val = float("-0.014467") + max_val = float("0.00131875") + mean = float("-0.000761015") + std = float("0.00204153") + data = None + + +class Program_weight_tensor_parameter_371: + name = "parameter_371" + shape = [768, 768, 1, 1] + dtype = "float32" + min_val = float("-0.0787519") + max_val = float("0.135878") + mean = float("-0.000282851") + std = float("0.0016268") + data = None + + +class Program_weight_tensor_parameter_372: + name = "parameter_372" + shape = [384] + dtype = "float32" + min_val = float("-1.77402") + max_val = float("0.318654") + mean = float("-0.310798") + std = float("0.291236") + data = None + + +class Program_weight_tensor_parameter_373: + name = "parameter_373" + shape = [384] + dtype = "float32" + min_val = float("0.188523") + max_val = float("1.82125") + mean = float("0.609641") + std = float("0.262607") + data = None + + +class Program_weight_tensor_parameter_374: + name = "parameter_374" + shape = [384] + dtype = "float32" + min_val = float("5.24774e-05") + max_val = float("0.00104929") + mean = float("0.000233242") + std = float("0.000113289") + data = None + + +class Program_weight_tensor_parameter_375: + name = "parameter_375" + shape = [384] + dtype = "float32" + min_val = float("-0.0915885") + max_val = float("0.074148") + mean = float("0.0209669") + std = float("0.0171605") + data = None + + +class Program_weight_tensor_parameter_376: + name = "parameter_376" + shape = [384, 384, 1, 1] + dtype = "float32" + min_val = float("-0.020214") + max_val = float("0.0255239") + mean = float("-0.000361046") + std = float("0.00271802") + data = None + + +class Program_weight_tensor_parameter_377: + name = "parameter_377" + shape = [384] + dtype = "float32" + min_val = float("-1.77402") + max_val = float("0.318949") + mean = float("-0.310739") + std = float("0.291254") + data = None + + +class Program_weight_tensor_parameter_378: + name = "parameter_378" + shape = [384] + dtype = "float32" + min_val = float("0.334653") + max_val = float("2.60511") + mean = float("1.02603") + std = float("0.290253") + data = None + + +class Program_weight_tensor_parameter_379: + name = "parameter_379" + shape = [384] + dtype = "float32" + min_val = float("0.000614651") + max_val = float("0.00608489") + mean = float("0.00209955") + std = float("0.000752612") + data = None + + +class Program_weight_tensor_parameter_380: + name = "parameter_380" + shape = [384] + dtype = "float32" + min_val = float("-0.228713") + max_val = float("0.112503") + mean = float("0.0217264") + std = float("0.036913") + data = None + + +class Program_weight_tensor_parameter_381: + name = "parameter_381" + shape = [384, 384, 3, 3] + dtype = "float32" + min_val = float("-0.0190584") + max_val = float("0.0259183") + mean = float("-4.76047e-05") + std = float("0.0017617") + data = None + + +class Program_weight_tensor_parameter_382: + name = "parameter_382" + shape = [384] + dtype = "float32" + min_val = float("-2.58225") + max_val = float("0.0329867") + mean = float("-1.56843") + std = float("0.415962") + data = None + + +class Program_weight_tensor_parameter_383: + name = "parameter_383" + shape = [384] + dtype = "float32" + min_val = float("0.52002") + max_val = float("1.64429") + mean = float("1.13566") + std = float("0.149475") + data = None + + +class Program_weight_tensor_parameter_384: + name = "parameter_384" + shape = [384] + dtype = "float32" + min_val = float("0.0409059") + max_val = float("0.240151") + mean = float("0.08781") + std = float("0.0241685") + data = None + + +class Program_weight_tensor_parameter_385: + name = "parameter_385" + shape = [384] + dtype = "float32" + min_val = float("-0.904392") + max_val = float("0.384792") + mean = float("-0.257009") + std = float("0.123376") + data = None + + +class Program_weight_tensor_parameter_386: + name = "parameter_386" + shape = [384, 384, 3, 3] + dtype = "float32" + min_val = float("-0.0213207") + max_val = float("0.0602371") + mean = float("-0.000201951") + std = float("0.00231308") + data = None + + +class Program_weight_tensor_parameter_387: + name = "parameter_387" + shape = [384] + dtype = "float32" + min_val = float("-1.93927") + max_val = float("0.644474") + mean = float("-0.574884") + std = float("0.358671") + data = None + + +class Program_weight_tensor_parameter_388: + name = "parameter_388" + shape = [384] + dtype = "float32" + min_val = float("0.163873") + max_val = float("2.06585") + mean = float("0.562027") + std = float("0.227242") + data = None + + +class Program_weight_tensor_parameter_389: + name = "parameter_389" + shape = [384] + dtype = "float32" + min_val = float("7.74518e-05") + max_val = float("0.00147627") + mean = float("0.000262123") + std = float("0.000127782") + data = None + + +class Program_weight_tensor_parameter_390: + name = "parameter_390" + shape = [384] + dtype = "float32" + min_val = float("-0.0472137") + max_val = float("0.0687191") + mean = float("0.0210509") + std = float("0.0147693") + data = None + + +class Program_weight_tensor_parameter_391: + name = "parameter_391" + shape = [384, 384, 1, 1] + dtype = "float32" + min_val = float("-0.0246209") + max_val = float("0.0323191") + mean = float("-0.00038074") + std = float("0.00249603") + data = None + + +class Program_weight_tensor_parameter_392: + name = "parameter_392" + shape = [384] + dtype = "float32" + min_val = float("-1.93932") + max_val = float("0.645257") + mean = float("-0.574812") + std = float("0.358742") + data = None + + +class Program_weight_tensor_parameter_393: + name = "parameter_393" + shape = [384] + dtype = "float32" + min_val = float("0.58315") + max_val = float("2.15642") + mean = float("1.08405") + std = float("0.255745") + data = None + + +class Program_weight_tensor_parameter_394: + name = "parameter_394" + shape = [384] + dtype = "float32" + min_val = float("0.00138889") + max_val = float("0.00913889") + mean = float("0.00296031") + std = float("0.000878211") + data = None + + +class Program_weight_tensor_parameter_395: + name = "parameter_395" + shape = [384] + dtype = "float32" + min_val = float("-0.082843") + max_val = float("0.147398") + mean = float("0.0337157") + std = float("0.0397786") + data = None + + +class Program_weight_tensor_parameter_396: + name = "parameter_396" + shape = [384, 384, 3, 3] + dtype = "float32" + min_val = float("-0.017236") + max_val = float("0.0310435") + mean = float("-8.47071e-05") + std = float("0.00189556") + data = None + + +class Program_weight_tensor_parameter_397: + name = "parameter_397" + shape = [384] + dtype = "float32" + min_val = float("-2.39591") + max_val = float("0.845752") + mean = float("-1.40539") + std = float("0.360596") + data = None + + +class Program_weight_tensor_parameter_398: + name = "parameter_398" + shape = [384] + dtype = "float32" + min_val = float("0.453112") + max_val = float("1.91948") + mean = float("1.16636") + std = float("0.14802") + data = None + + +class Program_weight_tensor_parameter_399: + name = "parameter_399" + shape = [384] + dtype = "float32" + min_val = float("0.0310517") + max_val = float("0.140233") + mean = float("0.0619149") + std = float("0.0160952") + data = None + + +class Program_weight_tensor_parameter_400: + name = "parameter_400" + shape = [384] + dtype = "float32" + min_val = float("-0.74615") + max_val = float("0.831533") + mean = float("-0.183365") + std = float("0.11049") + data = None + + +class Program_weight_tensor_parameter_401: + name = "parameter_401" + shape = [384, 384, 3, 3] + dtype = "float32" + min_val = float("-0.0259567") + max_val = float("0.0450409") + mean = float("-0.000200361") + std = float("0.00234146") + data = None + + +class Program_weight_tensor_parameter_402: + name = "parameter_402" + shape = [384] + dtype = "float32" + min_val = float("-1.8762") + max_val = float("0.453243") + mean = float("-0.485339") + std = float("0.376467") + data = None + + +class Program_weight_tensor_parameter_403: + name = "parameter_403" + shape = [384] + dtype = "float32" + min_val = float("0.0773354") + max_val = float("2.11925") + mean = float("0.441956") + std = float("0.217663") + data = None + + +class Program_weight_tensor_parameter_404: + name = "parameter_404" + shape = [384] + dtype = "float32" + min_val = float("6.06445e-05") + max_val = float("0.00132946") + mean = float("0.000308842") + std = float("0.000151133") + data = None + + +class Program_weight_tensor_parameter_405: + name = "parameter_405" + shape = [384] + dtype = "float32" + min_val = float("-0.0476186") + max_val = float("0.0717926") + mean = float("0.0252544") + std = float("0.0165171") + data = None + + +class Program_weight_tensor_parameter_406: + name = "parameter_406" + shape = [384, 384, 1, 1] + dtype = "float32" + min_val = float("-0.0207296") + max_val = float("0.0301957") + mean = float("-0.000479918") + std = float("0.0021441") + data = None + + +class Program_weight_tensor_parameter_407: + name = "parameter_407" + shape = [384] + dtype = "float32" + min_val = float("-1.87654") + max_val = float("0.453653") + mean = float("-0.485263") + std = float("0.376563") + data = None + + +class Program_weight_tensor_parameter_408: + name = "parameter_408" + shape = [384] + dtype = "float32" + min_val = float("0.521871") + max_val = float("2.22439") + mean = float("1.05289") + std = float("0.260102") + data = None + + +class Program_weight_tensor_parameter_409: + name = "parameter_409" + shape = [384] + dtype = "float32" + min_val = float("0.00183356") + max_val = float("0.00905176") + mean = float("0.00403889") + std = float("0.0012136") + data = None + + +class Program_weight_tensor_parameter_410: + name = "parameter_410" + shape = [384] + dtype = "float32" + min_val = float("-0.210488") + max_val = float("0.180984") + mean = float("0.039756") + std = float("0.0449489") + data = None + + +class Program_weight_tensor_parameter_411: + name = "parameter_411" + shape = [384, 384, 3, 3] + dtype = "float32" + min_val = float("-0.0177497") + max_val = float("0.036737") + mean = float("-9.16795e-05") + std = float("0.00200706") + data = None + + +class Program_weight_tensor_parameter_412: + name = "parameter_412" + shape = [384] + dtype = "float32" + min_val = float("-2.15635") + max_val = float("0.418177") + mean = float("-1.36712") + std = float("0.277468") + data = None + + +class Program_weight_tensor_parameter_413: + name = "parameter_413" + shape = [384] + dtype = "float32" + min_val = float("0.706134") + max_val = float("1.6357") + mean = float("1.14301") + std = float("0.101583") + data = None + + +class Program_weight_tensor_parameter_414: + name = "parameter_414" + shape = [384] + dtype = "float32" + min_val = float("0.0221089") + max_val = float("0.144688") + mean = float("0.0472828") + std = float("0.013291") + data = None + + +class Program_weight_tensor_parameter_415: + name = "parameter_415" + shape = [384] + dtype = "float32" + min_val = float("-0.690683") + max_val = float("0.206204") + mean = float("-0.128898") + std = float("0.0935638") + data = None + + +class Program_weight_tensor_parameter_416: + name = "parameter_416" + shape = [384, 384, 3, 3] + dtype = "float32" + min_val = float("-0.0274071") + max_val = float("0.0448565") + mean = float("-0.000158418") + std = float("0.00223888") + data = None + + +class Program_weight_tensor_parameter_417: + name = "parameter_417" + shape = [384] + dtype = "float32" + min_val = float("-2.9232") + max_val = float("1.66463") + mean = float("-0.760372") + std = float("0.643546") + data = None + + +class Program_weight_tensor_parameter_418: + name = "parameter_418" + shape = [384] + dtype = "float32" + min_val = float("0.953224") + max_val = float("2.91794") + mean = float("1.86322") + std = float("0.27618") + data = None + + +class Program_weight_tensor_parameter_419: + name = "parameter_419" + shape = [384] + dtype = "float32" + min_val = float("0.00282756") + max_val = float("0.0125667") + mean = float("0.00523085") + std = float("0.0013343") + data = None + + +class Program_weight_tensor_parameter_420: + name = "parameter_420" + shape = [384] + dtype = "float32" + min_val = float("-0.250212") + max_val = float("0.146125") + mean = float("0.0636405") + std = float("0.0327087") + data = None + + +class Program_weight_tensor_parameter_421: + name = "parameter_421" + shape = [384, 768, 1, 1] + dtype = "float32" + min_val = float("-0.0371909") + max_val = float("0.0509187") + mean = float("-0.000727671") + std = float("0.00522845") + data = None + + +class Program_weight_tensor_parameter_422: + name = "parameter_422" + shape = [384] + dtype = "float32" + min_val = float("-2.2471") + max_val = float("0.681977") + mean = float("-0.777142") + std = float("0.472903") + data = None + + +class Program_weight_tensor_parameter_423: + name = "parameter_423" + shape = [384] + dtype = "float32" + min_val = float("0.965853") + max_val = float("2.89359") + mean = float("2.09705") + std = float("0.305433") + data = None + + +class Program_weight_tensor_parameter_424: + name = "parameter_424" + shape = [384] + dtype = "float32" + min_val = float("0.000815531") + max_val = float("0.00405601") + mean = float("0.00200318") + std = float("0.000443934") + data = None + + +class Program_weight_tensor_parameter_425: + name = "parameter_425" + shape = [384] + dtype = "float32" + min_val = float("-0.0161045") + max_val = float("0.0799797") + mean = float("0.0350115") + std = float("0.0164865") + data = None + + +class Program_weight_tensor_parameter_426: + name = "parameter_426" + shape = [384, 768, 1, 1] + dtype = "float32" + min_val = float("-0.0815437") + max_val = float("0.0646253") + mean = float("-0.000388202") + std = float("0.00359255") + data = None + + +class Program_weight_tensor_parameter_427: + name = "parameter_427" + shape = [768] + dtype = "float32" + min_val = float("-2.40199") + max_val = float("0.642394") + mean = float("-0.908374") + std = float("0.339302") + data = None + + +class Program_weight_tensor_parameter_428: + name = "parameter_428" + shape = [768] + dtype = "float32" + min_val = float("0.530297") + max_val = float("1.90727") + mean = float("0.919687") + std = float("0.149179") + data = None + + +class Program_weight_tensor_parameter_429: + name = "parameter_429" + shape = [768] + dtype = "float32" + min_val = float("0.00640934") + max_val = float("0.0572679") + mean = float("0.0157251") + std = float("0.0047052") + data = None + + +class Program_weight_tensor_parameter_430: + name = "parameter_430" + shape = [768] + dtype = "float32" + min_val = float("-0.235794") + max_val = float("0.254524") + mean = float("0.0393271") + std = float("0.0563154") + data = None + + +class Program_weight_tensor_parameter_431: + name = "parameter_431" + shape = [768, 512, 3, 3] + dtype = "float32" + min_val = float("-0.0378314") + max_val = float("0.0543419") + mean = float("-9.75912e-05") + std = float("0.00233888") + data = None + + +class Program_weight_tensor_parameter_432: + name = "parameter_432" + shape = [512] + dtype = "float32" + min_val = float("-3.38998") + max_val = float("1.66652") + mean = float("-1.16179") + std = float("0.513719") + data = None + + +class Program_weight_tensor_parameter_433: + name = "parameter_433" + shape = [512] + dtype = "float32" + min_val = float("0.523767") + max_val = float("1.67712") + mean = float("1.11122") + std = float("0.148184") + data = None + + +class Program_weight_tensor_parameter_434: + name = "parameter_434" + shape = [512] + dtype = "float32" + min_val = float("0.00248322") + max_val = float("0.0169425") + mean = float("0.00762328") + std = float("0.00205743") + data = None + + +class Program_weight_tensor_parameter_435: + name = "parameter_435" + shape = [512] + dtype = "float32" + min_val = float("-0.172258") + max_val = float("0.0979883") + mean = float("-0.0487286") + std = float("0.0396462") + data = None + + +class Program_weight_tensor_parameter_436: + name = "parameter_436" + shape = [512, 384, 1, 1] + dtype = "float32" + min_val = float("-0.202262") + max_val = float("0.184296") + mean = float("-0.000573477") + std = float("0.00792306") + data = None + + +class Program_weight_tensor_parameter_437: + name = "parameter_437" + shape = [384] + dtype = "float32" + min_val = float("-0.0100703") + max_val = float("0.00138871") + mean = float("-0.00295173") + std = float("0.00227127") + data = None + + +class Program_weight_tensor_parameter_438: + name = "parameter_438" + shape = [384, 384, 1, 1] + dtype = "float32" + min_val = float("-0.202729") + max_val = float("0.140205") + mean = float("-0.002055") + std = float("0.00490701") + data = None + + +class Program_weight_tensor_parameter_439: + name = "parameter_439" + shape = [192] + dtype = "float32" + min_val = float("-1.97045") + max_val = float("0.409864") + mean = float("-0.348766") + std = float("0.333488") + data = None + + +class Program_weight_tensor_parameter_440: + name = "parameter_440" + shape = [192] + dtype = "float32" + min_val = float("0.0528864") + max_val = float("2.15987") + mean = float("0.581255") + std = float("0.419833") + data = None + + +class Program_weight_tensor_parameter_441: + name = "parameter_441" + shape = [192] + dtype = "float32" + min_val = float("9.0619e-05") + max_val = float("0.0013381") + mean = float("0.000452295") + std = float("0.000216487") + data = None + + +class Program_weight_tensor_parameter_442: + name = "parameter_442" + shape = [192] + dtype = "float32" + min_val = float("-0.0346181") + max_val = float("0.054258") + mean = float("0.00535595") + std = float("0.0149315") + data = None + + +class Program_weight_tensor_parameter_443: + name = "parameter_443" + shape = [192, 192, 1, 1] + dtype = "float32" + min_val = float("-0.023487") + max_val = float("0.0581182") + mean = float("-0.000339748") + std = float("0.0040934") + data = None + + +class Program_weight_tensor_parameter_444: + name = "parameter_444" + shape = [192] + dtype = "float32" + min_val = float("-1.97037") + max_val = float("0.410702") + mean = float("-0.34863") + std = float("0.333546") + data = None + + +class Program_weight_tensor_parameter_445: + name = "parameter_445" + shape = [192] + dtype = "float32" + min_val = float("0.372338") + max_val = float("2.70216") + mean = float("1.20181") + std = float("0.493699") + data = None + + +class Program_weight_tensor_parameter_446: + name = "parameter_446" + shape = [192] + dtype = "float32" + min_val = float("0.00127295") + max_val = float("0.0154499") + mean = float("0.00513167") + std = float("0.00187691") + data = None + + +class Program_weight_tensor_parameter_447: + name = "parameter_447" + shape = [192] + dtype = "float32" + min_val = float("-0.0977349") + max_val = float("0.146963") + mean = float("0.0204027") + std = float("0.0429259") + data = None + + +class Program_weight_tensor_parameter_448: + name = "parameter_448" + shape = [192, 192, 3, 3] + dtype = "float32" + min_val = float("-0.0289902") + max_val = float("0.0378296") + mean = float("-0.000154473") + std = float("0.00313532") + data = None + + +class Program_weight_tensor_parameter_449: + name = "parameter_449" + shape = [192] + dtype = "float32" + min_val = float("-2.89065") + max_val = float("-0.176734") + mean = float("-1.31453") + std = float("0.40113") + data = None + + +class Program_weight_tensor_parameter_450: + name = "parameter_450" + shape = [192] + dtype = "float32" + min_val = float("0.696524") + max_val = float("2.09454") + mean = float("1.17918") + std = float("0.169868") + data = None + + +class Program_weight_tensor_parameter_451: + name = "parameter_451" + shape = [192] + dtype = "float32" + min_val = float("0.0632461") + max_val = float("0.338318") + mean = float("0.131968") + std = float("0.0437735") + data = None + + +class Program_weight_tensor_parameter_452: + name = "parameter_452" + shape = [192] + dtype = "float32" + min_val = float("-2.50976") + max_val = float("1.70367") + mean = float("-0.20284") + std = float("0.378719") + data = None + + +class Program_weight_tensor_parameter_453: + name = "parameter_453" + shape = [192, 192, 3, 3] + dtype = "float32" + min_val = float("-0.0331927") + max_val = float("0.0456383") + mean = float("-0.000188198") + std = float("0.00374306") + data = None + + +class Program_weight_tensor_parameter_454: + name = "parameter_454" + shape = [192] + dtype = "float32" + min_val = float("-1.9404") + max_val = float("0.513024") + mean = float("-0.279434") + std = float("0.321452") + data = None + + +class Program_weight_tensor_parameter_455: + name = "parameter_455" + shape = [192] + dtype = "float32" + min_val = float("0.0454025") + max_val = float("1.77027") + mean = float("0.444331") + std = float("0.305722") + data = None + + +class Program_weight_tensor_parameter_456: + name = "parameter_456" + shape = [192] + dtype = "float32" + min_val = float("7.46909e-05") + max_val = float("0.00134485") + mean = float("0.000400773") + std = float("0.000214016") + data = None + + +class Program_weight_tensor_parameter_457: + name = "parameter_457" + shape = [192] + dtype = "float32" + min_val = float("-0.0293086") + max_val = float("0.0470179") + mean = float("0.00801703") + std = float("0.0116545") + data = None + + +class Program_weight_tensor_parameter_458: + name = "parameter_458" + shape = [192, 192, 1, 1] + dtype = "float32" + min_val = float("-0.0234926") + max_val = float("0.036738") + mean = float("-0.000377237") + std = float("0.00377417") + data = None + + +class Program_weight_tensor_parameter_459: + name = "parameter_459" + shape = [192] + dtype = "float32" + min_val = float("-1.94044") + max_val = float("0.51462") + mean = float("-0.279235") + std = float("0.321666") + data = None + + +class Program_weight_tensor_parameter_460: + name = "parameter_460" + shape = [192] + dtype = "float32" + min_val = float("0.483074") + max_val = float("2.27001") + mean = float("1.13833") + std = float("0.37563") + data = None + + +class Program_weight_tensor_parameter_461: + name = "parameter_461" + shape = [192] + dtype = "float32" + min_val = float("0.00274472") + max_val = float("0.0142561") + mean = float("0.00601192") + std = float("0.0018096") + data = None + + +class Program_weight_tensor_parameter_462: + name = "parameter_462" + shape = [192] + dtype = "float32" + min_val = float("-0.0926083") + max_val = float("0.111934") + mean = float("0.0327612") + std = float("0.0355469") + data = None + + +class Program_weight_tensor_parameter_463: + name = "parameter_463" + shape = [192, 192, 3, 3] + dtype = "float32" + min_val = float("-0.0231072") + max_val = float("0.038718") + mean = float("-0.000192078") + std = float("0.00338604") + data = None + + +class Program_weight_tensor_parameter_464: + name = "parameter_464" + shape = [192] + dtype = "float32" + min_val = float("-2.50828") + max_val = float("-0.123237") + mean = float("-1.28886") + std = float("0.44374") + data = None + + +class Program_weight_tensor_parameter_465: + name = "parameter_465" + shape = [192] + dtype = "float32" + min_val = float("0.65494") + max_val = float("1.66968") + mean = float("1.19938") + std = float("0.166128") + data = None + + +class Program_weight_tensor_parameter_466: + name = "parameter_466" + shape = [192] + dtype = "float32" + min_val = float("0.0467958") + max_val = float("0.20027") + mean = float("0.0945377") + std = float("0.0272574") + data = None + + +class Program_weight_tensor_parameter_467: + name = "parameter_467" + shape = [192] + dtype = "float32" + min_val = float("-2.14487") + max_val = float("0.410589") + mean = float("-0.110743") + std = float("0.24642") + data = None + + +class Program_weight_tensor_parameter_468: + name = "parameter_468" + shape = [192, 192, 3, 3] + dtype = "float32" + min_val = float("-0.0362254") + max_val = float("0.0508084") + mean = float("-0.000238085") + std = float("0.00389331") + data = None + + +class Program_weight_tensor_parameter_469: + name = "parameter_469" + shape = [192] + dtype = "float32" + min_val = float("-1.7573") + max_val = float("0.468575") + mean = float("-0.262432") + std = float("0.335818") + data = None + + +class Program_weight_tensor_parameter_470: + name = "parameter_470" + shape = [192] + dtype = "float32" + min_val = float("0.00295124") + max_val = float("1.67875") + mean = float("0.351961") + std = float("0.251699") + data = None + + +class Program_weight_tensor_parameter_471: + name = "parameter_471" + shape = [192] + dtype = "float32" + min_val = float("9.28523e-07") + max_val = float("0.00191867") + mean = float("0.000359659") + std = float("0.00024946") + data = None + + +class Program_weight_tensor_parameter_472: + name = "parameter_472" + shape = [192] + dtype = "float32" + min_val = float("-0.0373738") + max_val = float("0.0528657") + mean = float("0.0101716") + std = float("0.0121908") + data = None + + +class Program_weight_tensor_parameter_473: + name = "parameter_473" + shape = [192, 192, 1, 1] + dtype = "float32" + min_val = float("-0.0303466") + max_val = float("0.0356195") + mean = float("-0.000425557") + std = float("0.0036432") + data = None + + +class Program_weight_tensor_parameter_474: + name = "parameter_474" + shape = [192] + dtype = "float32" + min_val = float("-1.7573") + max_val = float("0.470016") + mean = float("-0.262262") + std = float("0.336041") + data = None + + +class Program_weight_tensor_parameter_475: + name = "parameter_475" + shape = [192] + dtype = "float32" + min_val = float("0.406102") + max_val = float("1.97794") + mean = float("1.06588") + std = float("0.334156") + data = None + + +class Program_weight_tensor_parameter_476: + name = "parameter_476" + shape = [192] + dtype = "float32" + min_val = float("0.0026697") + max_val = float("0.0132838") + mean = float("0.00612262") + std = float("0.00179786") + data = None + + +class Program_weight_tensor_parameter_477: + name = "parameter_477" + shape = [192] + dtype = "float32" + min_val = float("-0.0636079") + max_val = float("0.115567") + mean = float("0.035464") + std = float("0.0321331") + data = None + + +class Program_weight_tensor_parameter_478: + name = "parameter_478" + shape = [192, 192, 3, 3] + dtype = "float32" + min_val = float("-0.0321474") + max_val = float("0.0388371") + mean = float("-0.000190596") + std = float("0.00354187") + data = None + + +class Program_weight_tensor_parameter_479: + name = "parameter_479" + shape = [192] + dtype = "float32" + min_val = float("-2.49735") + max_val = float("0.137985") + mean = float("-1.24334") + std = float("0.424316") + data = None + + +class Program_weight_tensor_parameter_480: + name = "parameter_480" + shape = [192] + dtype = "float32" + min_val = float("0.652126") + max_val = float("1.80991") + mean = float("1.16717") + std = float("0.165409") + data = None + + +class Program_weight_tensor_parameter_481: + name = "parameter_481" + shape = [192] + dtype = "float32" + min_val = float("0.0309664") + max_val = float("0.139012") + mean = float("0.0677931") + std = float("0.0174696") + data = None + + +class Program_weight_tensor_parameter_482: + name = "parameter_482" + shape = [192] + dtype = "float32" + min_val = float("-1.51706") + max_val = float("0.284541") + mean = float("-0.0982665") + std = float("0.179401") + data = None + + +class Program_weight_tensor_parameter_483: + name = "parameter_483" + shape = [192, 192, 3, 3] + dtype = "float32" + min_val = float("-0.05013") + max_val = float("0.0656662") + mean = float("-0.000261502") + std = float("0.00399974") + data = None + + +class Program_weight_tensor_parameter_484: + name = "parameter_484" + shape = [192] + dtype = "float32" + min_val = float("-2.07916") + max_val = float("0.533363") + mean = float("-0.272351") + std = float("0.375289") + data = None + + +class Program_weight_tensor_parameter_485: + name = "parameter_485" + shape = [192] + dtype = "float32" + min_val = float("0.000510371") + max_val = float("0.732354") + mean = float("0.211968") + std = float("0.136272") + data = None + + +class Program_weight_tensor_parameter_486: + name = "parameter_486" + shape = [192] + dtype = "float32" + min_val = float("6.2328e-08") + max_val = float("0.00079658") + mean = float("0.00024174") + std = float("0.00013494") + data = None + + +class Program_weight_tensor_parameter_487: + name = "parameter_487" + shape = [192] + dtype = "float32" + min_val = float("-0.0197338") + max_val = float("0.031677") + mean = float("0.00620505") + std = float("0.0092414") + data = None + + +class Program_weight_tensor_parameter_488: + name = "parameter_488" + shape = [192, 192, 1, 1] + dtype = "float32" + min_val = float("-0.0202783") + max_val = float("0.036136") + mean = float("-0.000265605") + std = float("0.00319736") + data = None + + +class Program_weight_tensor_parameter_489: + name = "parameter_489" + shape = [192] + dtype = "float32" + min_val = float("-2.07922") + max_val = float("0.535166") + mean = float("-0.272236") + std = float("0.375502") + data = None + + +class Program_weight_tensor_parameter_490: + name = "parameter_490" + shape = [192] + dtype = "float32" + min_val = float("0.396505") + max_val = float("1.96272") + mean = float("0.958924") + std = float("0.303858") + data = None + + +class Program_weight_tensor_parameter_491: + name = "parameter_491" + shape = [192] + dtype = "float32" + min_val = float("0.00305567") + max_val = float("0.014764") + mean = float("0.00641083") + std = float("0.00196591") + data = None + + +class Program_weight_tensor_parameter_492: + name = "parameter_492" + shape = [192] + dtype = "float32" + min_val = float("-0.0910836") + max_val = float("0.162129") + mean = float("0.0386063") + std = float("0.0345053") + data = None + + +class Program_weight_tensor_parameter_493: + name = "parameter_493" + shape = [192, 192, 3, 3] + dtype = "float32" + min_val = float("-0.0299549") + max_val = float("0.0371106") + mean = float("-0.000205046") + std = float("0.00364104") + data = None + + +class Program_weight_tensor_parameter_494: + name = "parameter_494" + shape = [192] + dtype = "float32" + min_val = float("-2.74084") + max_val = float("-0.0810353") + mean = float("-1.23693") + std = float("0.434057") + data = None + + +class Program_weight_tensor_parameter_495: + name = "parameter_495" + shape = [192] + dtype = "float32" + min_val = float("0.761623") + max_val = float("1.62105") + mean = float("1.15096") + std = float("0.142541") + data = None + + +class Program_weight_tensor_parameter_496: + name = "parameter_496" + shape = [192] + dtype = "float32" + min_val = float("0.0272966") + max_val = float("0.103735") + mean = float("0.0487407") + std = float("0.0115761") + data = None + + +class Program_weight_tensor_parameter_497: + name = "parameter_497" + shape = [192] + dtype = "float32" + min_val = float("-1.23827") + max_val = float("0.28535") + mean = float("-0.0748347") + std = float("0.164085") + data = None + + +class Program_weight_tensor_parameter_498: + name = "parameter_498" + shape = [192, 192, 3, 3] + dtype = "float32" + min_val = float("-0.0531238") + max_val = float("0.0579085") + mean = float("-0.000268921") + std = float("0.00396934") + data = None + + +class Program_weight_tensor_parameter_499: + name = "parameter_499" + shape = [192] + dtype = "float32" + min_val = float("-1.21219") + max_val = float("0.446681") + mean = float("-0.232278") + std = float("0.339349") + data = None + + +class Program_weight_tensor_parameter_500: + name = "parameter_500" + shape = [192] + dtype = "float32" + min_val = float("-9.82711e-05") + max_val = float("0.677789") + mean = float("0.192032") + std = float("0.120727") + data = None + + +class Program_weight_tensor_parameter_501: + name = "parameter_501" + shape = [192] + dtype = "float32" + min_val = float("2.25949e-10") + max_val = float("0.000865962") + mean = float("0.000239023") + std = float("0.000143426") + data = None + + +class Program_weight_tensor_parameter_502: + name = "parameter_502" + shape = [192] + dtype = "float32" + min_val = float("-0.0494718") + max_val = float("0.0374457") + mean = float("0.00677273") + std = float("0.0117019") + data = None + + +class Program_weight_tensor_parameter_503: + name = "parameter_503" + shape = [192, 192, 1, 1] + dtype = "float32" + min_val = float("-0.0342199") + max_val = float("0.0396943") + mean = float("-0.000272099") + std = float("0.00329482") + data = None + + +class Program_weight_tensor_parameter_504: + name = "parameter_504" + shape = [192] + dtype = "float32" + min_val = float("-1.21223") + max_val = float("0.447751") + mean = float("-0.232181") + std = float("0.33961") + data = None + + +class Program_weight_tensor_parameter_505: + name = "parameter_505" + shape = [192] + dtype = "float32" + min_val = float("0.382831") + max_val = float("1.56386") + mean = float("0.852099") + std = float("0.259991") + data = None + + +class Program_weight_tensor_parameter_506: + name = "parameter_506" + shape = [192] + dtype = "float32" + min_val = float("0.00222243") + max_val = float("0.013094") + mean = float("0.00622857") + std = float("0.00178192") + data = None + + +class Program_weight_tensor_parameter_507: + name = "parameter_507" + shape = [192] + dtype = "float32" + min_val = float("-0.0846332") + max_val = float("0.142415") + mean = float("0.0388704") + std = float("0.03792") + data = None + + +class Program_weight_tensor_parameter_508: + name = "parameter_508" + shape = [192, 192, 3, 3] + dtype = "float32" + min_val = float("-0.0323048") + max_val = float("0.0364338") + mean = float("-0.000186547") + std = float("0.00363857") + data = None + + +class Program_weight_tensor_parameter_509: + name = "parameter_509" + shape = [192] + dtype = "float32" + min_val = float("-2.48701") + max_val = float("-0.131293") + mean = float("-1.25014") + std = float("0.418255") + data = None + + +class Program_weight_tensor_parameter_510: + name = "parameter_510" + shape = [192] + dtype = "float32" + min_val = float("0.689678") + max_val = float("1.5199") + mean = float("1.12491") + std = float("0.13482") + data = None + + +class Program_weight_tensor_parameter_511: + name = "parameter_511" + shape = [192] + dtype = "float32" + min_val = float("0.0185507") + max_val = float("0.061415") + mean = float("0.0351899") + std = float("0.00879212") + data = None + + +class Program_weight_tensor_parameter_512: + name = "parameter_512" + shape = [192] + dtype = "float32" + min_val = float("-0.717377") + max_val = float("0.320847") + mean = float("-0.0746543") + std = float("0.131126") + data = None + + +class Program_weight_tensor_parameter_513: + name = "parameter_513" + shape = [192, 192, 3, 3] + dtype = "float32" + min_val = float("-0.0610342") + max_val = float("0.0592016") + mean = float("-0.000277763") + std = float("0.00397261") + data = None + + +class Program_weight_tensor_parameter_514: + name = "parameter_514" + shape = [192] + dtype = "float32" + min_val = float("-1.21753") + max_val = float("0.499396") + mean = float("-0.167678") + std = float("0.2936") + data = None + + +class Program_weight_tensor_parameter_515: + name = "parameter_515" + shape = [192] + dtype = "float32" + min_val = float("0.00836385") + max_val = float("1.53625") + mean = float("0.238111") + std = float("0.211728") + data = None + + +class Program_weight_tensor_parameter_516: + name = "parameter_516" + shape = [192] + dtype = "float32" + min_val = float("1.9816e-05") + max_val = float("0.00693944") + mean = float("0.000506424") + std = float("0.00066743") + data = None + + +class Program_weight_tensor_parameter_517: + name = "parameter_517" + shape = [192] + dtype = "float32" + min_val = float("-0.0656722") + max_val = float("0.0862214") + mean = float("0.00951742") + std = float("0.0164341") + data = None + + +class Program_weight_tensor_parameter_518: + name = "parameter_518" + shape = [192, 192, 1, 1] + dtype = "float32" + min_val = float("-0.0600528") + max_val = float("0.0312537") + mean = float("-0.000425532") + std = float("0.00397123") + data = None + + +class Program_weight_tensor_parameter_519: + name = "parameter_519" + shape = [192] + dtype = "float32" + min_val = float("-1.21747") + max_val = float("0.500448") + mean = float("-0.167516") + std = float("0.293818") + data = None + + +class Program_weight_tensor_parameter_520: + name = "parameter_520" + shape = [192] + dtype = "float32" + min_val = float("0.354999") + max_val = float("1.44989") + mean = float("0.756941") + std = float("0.21662") + data = None + + +class Program_weight_tensor_parameter_521: + name = "parameter_521" + shape = [192] + dtype = "float32" + min_val = float("0.00437457") + max_val = float("0.0169983") + mean = float("0.00911743") + std = float("0.0026827") + data = None + + +class Program_weight_tensor_parameter_522: + name = "parameter_522" + shape = [192] + dtype = "float32" + min_val = float("-0.159743") + max_val = float("0.154142") + mean = float("0.0493949") + std = float("0.0451797") + data = None + + +class Program_weight_tensor_parameter_523: + name = "parameter_523" + shape = [192, 192, 3, 3] + dtype = "float32" + min_val = float("-0.062497") + max_val = float("0.0530577") + mean = float("-0.000241352") + std = float("0.00357809") + data = None + + +class Program_weight_tensor_parameter_524: + name = "parameter_524" + shape = [192] + dtype = "float32" + min_val = float("-1.87905") + max_val = float("-0.211382") + mean = float("-1.14643") + std = float("0.325653") + data = None + + +class Program_weight_tensor_parameter_525: + name = "parameter_525" + shape = [192] + dtype = "float32" + min_val = float("0.788784") + max_val = float("1.59753") + mean = float("1.12152") + std = float("0.12987") + data = None + + +class Program_weight_tensor_parameter_526: + name = "parameter_526" + shape = [192] + dtype = "float32" + min_val = float("0.0159247") + max_val = float("0.0763614") + mean = float("0.0315734") + std = float("0.00929052") + data = None + + +class Program_weight_tensor_parameter_527: + name = "parameter_527" + shape = [192] + dtype = "float32" + min_val = float("-0.690131") + max_val = float("0.284936") + mean = float("-0.0667142") + std = float("0.130814") + data = None + + +class Program_weight_tensor_parameter_528: + name = "parameter_528" + shape = [192, 192, 3, 3] + dtype = "float32" + min_val = float("-0.062874") + max_val = float("0.076648") + mean = float("-0.000213471") + std = float("0.00383126") + data = None + + +class Program_weight_tensor_parameter_529: + name = "parameter_529" + shape = [192] + dtype = "float32" + min_val = float("-2.86217") + max_val = float("1.58057") + mean = float("-0.0275412") + std = float("0.747651") + data = None + + +class Program_weight_tensor_parameter_530: + name = "parameter_530" + shape = [192] + dtype = "float32" + min_val = float("0.487672") + max_val = float("2.0776") + mean = float("0.90163") + std = float("0.232007") + data = None + + +class Program_weight_tensor_parameter_531: + name = "parameter_531" + shape = [192] + dtype = "float32" + min_val = float("0.00962562") + max_val = float("0.0593409") + mean = float("0.0232174") + std = float("0.00900384") + data = None + + +class Program_weight_tensor_parameter_532: + name = "parameter_532" + shape = [192] + dtype = "float32" + min_val = float("-0.230196") + max_val = float("0.297365") + mean = float("-0.0377198") + std = float("0.0596344") + data = None + + +class Program_weight_tensor_parameter_533: + name = "parameter_533" + shape = [192, 384, 1, 1] + dtype = "float32" + min_val = float("-0.108831") + max_val = float("0.0931739") + mean = float("-0.000512323") + std = float("0.00842399") + data = None + + +class Program_weight_tensor_parameter_534: + name = "parameter_534" + shape = [192] + dtype = "float32" + min_val = float("-2.96764") + max_val = float("1.66844") + mean = float("0.0968476") + std = float("0.663233") + data = None + + +class Program_weight_tensor_parameter_535: + name = "parameter_535" + shape = [192] + dtype = "float32" + min_val = float("0.830791") + max_val = float("5.55835") + mean = float("1.91342") + std = float("0.933379") + data = None + + +class Program_weight_tensor_parameter_536: + name = "parameter_536" + shape = [192] + dtype = "float32" + min_val = float("0.00601536") + max_val = float("0.0460481") + mean = float("0.0175059") + std = float("0.00564168") + data = None + + +class Program_weight_tensor_parameter_537: + name = "parameter_537" + shape = [192] + dtype = "float32" + min_val = float("-0.133093") + max_val = float("0.157686") + mean = float("-0.0238439") + std = float("0.0565348") + data = None + + +class Program_weight_tensor_parameter_538: + name = "parameter_538" + shape = [192, 384, 1, 1] + dtype = "float32" + min_val = float("-0.0985625") + max_val = float("0.0941202") + mean = float("-0.000511784") + std = float("0.00783691") + data = None + + +class Program_weight_tensor_parameter_539: + name = "parameter_539" + shape = [384] + dtype = "float32" + min_val = float("-2.92359") + max_val = float("1.32666") + mean = float("-0.301116") + std = float("0.563662") + data = None + + +class Program_weight_tensor_parameter_540: + name = "parameter_540" + shape = [384] + dtype = "float32" + min_val = float("0.631853") + max_val = float("2.47541") + mean = float("1.15998") + std = float("0.257348") + data = None + + +class Program_weight_tensor_parameter_541: + name = "parameter_541" + shape = [384] + dtype = "float32" + min_val = float("0.0103628") + max_val = float("0.113663") + mean = float("0.0263639") + std = float("0.0126689") + data = None + + +class Program_weight_tensor_parameter_542: + name = "parameter_542" + shape = [384] + dtype = "float32" + min_val = float("-0.269684") + max_val = float("0.245058") + mean = float("0.022821") + std = float("0.0693499") + data = None + + +class Program_weight_tensor_parameter_543: + name = "parameter_543" + shape = [384, 256, 3, 3] + dtype = "float32" + min_val = float("-0.0753194") + max_val = float("0.0720032") + mean = float("-0.000103466") + std = float("0.00421781") + data = None + + +class Program_weight_tensor_parameter_544: + name = "parameter_544" + shape = [256] + dtype = "float32" + min_val = float("-2.04502") + max_val = float("1.28816") + mean = float("-0.924614") + std = float("0.543015") + data = None + + +class Program_weight_tensor_parameter_545: + name = "parameter_545" + shape = [256] + dtype = "float32" + min_val = float("0.517239") + max_val = float("1.68961") + mean = float("1.05432") + std = float("0.176149") + data = None + + +class Program_weight_tensor_parameter_546: + name = "parameter_546" + shape = [256] + dtype = "float32" + min_val = float("0.00196874") + max_val = float("0.02692") + mean = float("0.00629128") + std = float("0.00300317") + data = None + + +class Program_weight_tensor_parameter_547: + name = "parameter_547" + shape = [256] + dtype = "float32" + min_val = float("-0.230499") + max_val = float("0.154945") + mean = float("-0.0516552") + std = float("0.0688298") + data = None + + +class Program_weight_tensor_parameter_548: + name = "parameter_548" + shape = [256, 192, 1, 1] + dtype = "float32" + min_val = float("-0.206154") + max_val = float("0.170783") + mean = float("-0.000884197") + std = float("0.0145162") + data = None + + +class Program_weight_tensor_parameter_549: + name = "parameter_549" + shape = [192] + dtype = "float32" + min_val = float("-0.0139357") + max_val = float("0.00388361") + mean = float("-0.00495662") + std = float("0.00371291") + data = None + + +class Program_weight_tensor_parameter_550: + name = "parameter_550" + shape = [192, 192, 1, 1] + dtype = "float32" + min_val = float("-0.347135") + max_val = float("0.228777") + mean = float("-0.00389388") + std = float("0.0106293") + data = None + + +class Program_weight_tensor_parameter_551: + name = "parameter_551" + shape = [96] + dtype = "float32" + min_val = float("-1.91355") + max_val = float("0.53303") + mean = float("-0.208939") + std = float("0.434311") + data = None + + +class Program_weight_tensor_parameter_552: + name = "parameter_552" + shape = [96] + dtype = "float32" + min_val = float("0.142427") + max_val = float("3.22988") + mean = float("0.635833") + std = float("0.668487") + data = None + + +class Program_weight_tensor_parameter_553: + name = "parameter_553" + shape = [96] + dtype = "float32" + min_val = float("7.75639e-05") + max_val = float("0.00243254") + mean = float("0.000585507") + std = float("0.000428968") + data = None + + +class Program_weight_tensor_parameter_554: + name = "parameter_554" + shape = [96] + dtype = "float32" + min_val = float("-0.0546921") + max_val = float("0.0598506") + mean = float("0.0051419") + std = float("0.0215625") + data = None + + +class Program_weight_tensor_parameter_555: + name = "parameter_555" + shape = [96, 96, 1, 1] + dtype = "float32" + min_val = float("-0.0500852") + max_val = float("0.0932317") + mean = float("-0.000561284") + std = float("0.00794853") + data = None + + +class Program_weight_tensor_parameter_556: + name = "parameter_556" + shape = [96] + dtype = "float32" + min_val = float("-1.91314") + max_val = float("0.534306") + mean = float("-0.208596") + std = float("0.434435") + data = None + + +class Program_weight_tensor_parameter_557: + name = "parameter_557" + shape = [96] + dtype = "float32" + min_val = float("0.343774") + max_val = float("5.47118") + mean = float("1.08565") + std = float("0.88383") + data = None + + +class Program_weight_tensor_parameter_558: + name = "parameter_558" + shape = [96] + dtype = "float32" + min_val = float("0.000976934") + max_val = float("0.0156948") + mean = float("0.0053425") + std = float("0.00272306") + data = None + + +class Program_weight_tensor_parameter_559: + name = "parameter_559" + shape = [96] + dtype = "float32" + min_val = float("-0.137727") + max_val = float("0.212796") + mean = float("0.0123751") + std = float("0.0612578") + data = None + + +class Program_weight_tensor_parameter_560: + name = "parameter_560" + shape = [96, 96, 3, 3] + dtype = "float32" + min_val = float("-0.0398886") + max_val = float("0.0746673") + mean = float("-0.000229692") + std = float("0.00588155") + data = None + + +class Program_weight_tensor_parameter_561: + name = "parameter_561" + shape = [96] + dtype = "float32" + min_val = float("-2.46605") + max_val = float("-0.0202143") + mean = float("-1.22676") + std = float("0.443304") + data = None + + +class Program_weight_tensor_parameter_562: + name = "parameter_562" + shape = [96] + dtype = "float32" + min_val = float("0.542082") + max_val = float("1.6433") + mean = float("0.945634") + std = float("0.172529") + data = None + + +class Program_weight_tensor_parameter_563: + name = "parameter_563" + shape = [96] + dtype = "float32" + min_val = float("0.0406212") + max_val = float("0.236841") + mean = float("0.0868745") + std = float("0.0368113") + data = None + + +class Program_weight_tensor_parameter_564: + name = "parameter_564" + shape = [96] + dtype = "float32" + min_val = float("-2.80804") + max_val = float("1.61985") + mean = float("-0.194669") + std = float("0.469655") + data = None + + +class Program_weight_tensor_parameter_565: + name = "parameter_565" + shape = [96, 96, 3, 3] + dtype = "float32" + min_val = float("-0.150203") + max_val = float("0.114223") + mean = float("-0.000376735") + std = float("0.00724688") + data = None + + +class Program_weight_tensor_parameter_566: + name = "parameter_566" + shape = [96] + dtype = "float32" + min_val = float("-1.38826") + max_val = float("0.562406") + mean = float("-0.132909") + std = float("0.347394") + data = None + + +class Program_weight_tensor_parameter_567: + name = "parameter_567" + shape = [96] + dtype = "float32" + min_val = float("0.0453402") + max_val = float("1.86504") + mean = float("0.460875") + std = float("0.366369") + data = None + + +class Program_weight_tensor_parameter_568: + name = "parameter_568" + shape = [96] + dtype = "float32" + min_val = float("7.68974e-05") + max_val = float("0.00276882") + mean = float("0.000760156") + std = float("0.000616821") + data = None + + +class Program_weight_tensor_parameter_569: + name = "parameter_569" + shape = [96] + dtype = "float32" + min_val = float("-0.0484682") + max_val = float("0.0463877") + mean = float("0.00677392") + std = float("0.017635") + data = None + + +class Program_weight_tensor_parameter_570: + name = "parameter_570" + shape = [96, 96, 1, 1] + dtype = "float32" + min_val = float("-0.0483138") + max_val = float("0.0415922") + mean = float("-0.000498568") + std = float("0.00710731") + data = None + + +class Program_weight_tensor_parameter_571: + name = "parameter_571" + shape = [96] + dtype = "float32" + min_val = float("-1.38834") + max_val = float("0.5648") + mean = float("-0.13256") + std = float("0.347894") + data = None + + +class Program_weight_tensor_parameter_572: + name = "parameter_572" + shape = [96] + dtype = "float32" + min_val = float("0.370504") + max_val = float("2.32822") + mean = float("0.901933") + std = float("0.426522") + data = None + + +class Program_weight_tensor_parameter_573: + name = "parameter_573" + shape = [96] + dtype = "float32" + min_val = float("0.00320483") + max_val = float("0.0242439") + mean = float("0.00920914") + std = float("0.00476152") + data = None + + +class Program_weight_tensor_parameter_574: + name = "parameter_574" + shape = [96] + dtype = "float32" + min_val = float("-0.0963095") + max_val = float("0.121293") + mean = float("0.0354751") + std = float("0.0431439") + data = None + + +class Program_weight_tensor_parameter_575: + name = "parameter_575" + shape = [96, 96, 3, 3] + dtype = "float32" + min_val = float("-0.058655") + max_val = float("0.0591114") + mean = float("-0.000356621") + std = float("0.0059174") + data = None + + +class Program_weight_tensor_parameter_576: + name = "parameter_576" + shape = [96] + dtype = "float32" + min_val = float("-3.31955") + max_val = float("0.36603") + mean = float("-1.17895") + std = float("0.556023") + data = None + + +class Program_weight_tensor_parameter_577: + name = "parameter_577" + shape = [96] + dtype = "float32" + min_val = float("0.473098") + max_val = float("1.98183") + mean = float("1.03911") + std = float("0.238708") + data = None + + +class Program_weight_tensor_parameter_578: + name = "parameter_578" + shape = [96] + dtype = "float32" + min_val = float("0.0285476") + max_val = float("0.145477") + mean = float("0.0548077") + std = float("0.0170506") + data = None + + +class Program_weight_tensor_parameter_579: + name = "parameter_579" + shape = [96] + dtype = "float32" + min_val = float("-1.25068") + max_val = float("0.505193") + mean = float("-0.0605176") + std = float("0.268142") + data = None + + +class Program_weight_tensor_parameter_580: + name = "parameter_580" + shape = [96, 96, 3, 3] + dtype = "float32" + min_val = float("-0.147666") + max_val = float("0.152112") + mean = float("-0.000410438") + std = float("0.00711818") + data = None + + +class Program_weight_tensor_parameter_581: + name = "parameter_581" + shape = [96] + dtype = "float32" + min_val = float("-1.24956") + max_val = float("0.58267") + mean = float("-0.109749") + std = float("0.291966") + data = None + + +class Program_weight_tensor_parameter_582: + name = "parameter_582" + shape = [96] + dtype = "float32" + min_val = float("0.0243293") + max_val = float("1.27785") + mean = float("0.324816") + std = float("0.192866") + data = None + + +class Program_weight_tensor_parameter_583: + name = "parameter_583" + shape = [96] + dtype = "float32" + min_val = float("6.31792e-05") + max_val = float("0.00359895") + mean = float("0.000713188") + std = float("0.000575582") + data = None + + +class Program_weight_tensor_parameter_584: + name = "parameter_584" + shape = [96] + dtype = "float32" + min_val = float("-0.0383061") + max_val = float("0.050179") + mean = float("0.00405305") + std = float("0.016189") + data = None + + +class Program_weight_tensor_parameter_585: + name = "parameter_585" + shape = [96, 96, 1, 1] + dtype = "float32" + min_val = float("-0.0448708") + max_val = float("0.0573038") + mean = float("-0.000336044") + std = float("0.00726838") + data = None + + +class Program_weight_tensor_parameter_586: + name = "parameter_586" + shape = [96] + dtype = "float32" + min_val = float("-1.24942") + max_val = float("0.584539") + mean = float("-0.109552") + std = float("0.292478") + data = None + + +class Program_weight_tensor_parameter_587: + name = "parameter_587" + shape = [96] + dtype = "float32" + min_val = float("0.315495") + max_val = float("1.67063") + mean = float("0.747087") + std = float("0.257847") + data = None + + +class Program_weight_tensor_parameter_588: + name = "parameter_588" + shape = [96] + dtype = "float32" + min_val = float("0.00339766") + max_val = float("0.0255152") + mean = float("0.0102502") + std = float("0.00411405") + data = None + + +class Program_weight_tensor_parameter_589: + name = "parameter_589" + shape = [96] + dtype = "float32" + min_val = float("-0.0545808") + max_val = float("0.144753") + mean = float("0.0274454") + std = float("0.0383073") + data = None + + +class Program_weight_tensor_parameter_590: + name = "parameter_590" + shape = [96, 96, 3, 3] + dtype = "float32" + min_val = float("-0.065253") + max_val = float("0.0583777") + mean = float("-0.000331097") + std = float("0.00602268") + data = None + + +class Program_weight_tensor_parameter_591: + name = "parameter_591" + shape = [96] + dtype = "float32" + min_val = float("-3.58296") + max_val = float("0.290726") + mean = float("-1.12856") + std = float("0.572409") + data = None + + +class Program_weight_tensor_parameter_592: + name = "parameter_592" + shape = [96] + dtype = "float32" + min_val = float("0.511106") + max_val = float("2.19165") + mean = float("1.05198") + std = float("0.238255") + data = None + + +class Program_weight_tensor_parameter_593: + name = "parameter_593" + shape = [96] + dtype = "float32" + min_val = float("0.0202502") + max_val = float("0.0763383") + mean = float("0.0399884") + std = float("0.00963234") + data = None + + +class Program_weight_tensor_parameter_594: + name = "parameter_594" + shape = [96] + dtype = "float32" + min_val = float("-0.823777") + max_val = float("0.397341") + mean = float("-0.0477408") + std = float("0.195386") + data = None + + +class Program_weight_tensor_parameter_595: + name = "parameter_595" + shape = [96, 96, 3, 3] + dtype = "float32" + min_val = float("-0.0973524") + max_val = float("0.130681") + mean = float("-0.000422376") + std = float("0.00719502") + data = None + + +class Program_weight_tensor_parameter_596: + name = "parameter_596" + shape = [96] + dtype = "float32" + min_val = float("-0.892064") + max_val = float("0.529384") + mean = float("-0.160709") + std = float("0.281574") + data = None + + +class Program_weight_tensor_parameter_597: + name = "parameter_597" + shape = [96] + dtype = "float32" + min_val = float("0.0191223") + max_val = float("1.40524") + mean = float("0.32501") + std = float("0.213327") + data = None + + +class Program_weight_tensor_parameter_598: + name = "parameter_598" + shape = [96] + dtype = "float32" + min_val = float("4.82579e-05") + max_val = float("0.00368813") + mean = float("0.000731321") + std = float("0.0005637") + data = None + + +class Program_weight_tensor_parameter_599: + name = "parameter_599" + shape = [96] + dtype = "float32" + min_val = float("-0.0327526") + max_val = float("0.0463647") + mean = float("0.00722598") + std = float("0.0145649") + data = None + + +class Program_weight_tensor_parameter_600: + name = "parameter_600" + shape = [96, 96, 1, 1] + dtype = "float32" + min_val = float("-0.0499906") + max_val = float("0.0448114") + mean = float("-0.000606145") + std = float("0.00724394") + data = None + + +class Program_weight_tensor_parameter_601: + name = "parameter_601" + shape = [96] + dtype = "float32" + min_val = float("-0.891955") + max_val = float("0.530721") + mean = float("-0.160571") + std = float("0.281998") + data = None + + +class Program_weight_tensor_parameter_602: + name = "parameter_602" + shape = [96] + dtype = "float32" + min_val = float("0.17446") + max_val = float("1.78047") + mean = float("0.708571") + std = float("0.284378") + data = None + + +class Program_weight_tensor_parameter_603: + name = "parameter_603" + shape = [96] + dtype = "float32" + min_val = float("0.00236192") + max_val = float("0.0258909") + mean = float("0.0102105") + std = float("0.00395084") + data = None + + +class Program_weight_tensor_parameter_604: + name = "parameter_604" + shape = [96] + dtype = "float32" + min_val = float("-0.0582992") + max_val = float("0.137218") + mean = float("0.0409603") + std = float("0.0377027") + data = None + + +class Program_weight_tensor_parameter_605: + name = "parameter_605" + shape = [96, 96, 3, 3] + dtype = "float32" + min_val = float("-0.057305") + max_val = float("0.0650381") + mean = float("-0.000417143") + std = float("0.00601776") + data = None + + +class Program_weight_tensor_parameter_606: + name = "parameter_606" + shape = [96] + dtype = "float32" + min_val = float("-2.65777") + max_val = float("0.065358") + mean = float("-1.06432") + std = float("0.48826") + data = None + + +class Program_weight_tensor_parameter_607: + name = "parameter_607" + shape = [96] + dtype = "float32" + min_val = float("0.512951") + max_val = float("1.73806") + mean = float("1.01547") + std = float("0.193357") + data = None + + +class Program_weight_tensor_parameter_608: + name = "parameter_608" + shape = [96] + dtype = "float32" + min_val = float("0.0176905") + max_val = float("0.0567785") + mean = float("0.0307593") + std = float("0.00710222") + data = None + + +class Program_weight_tensor_parameter_609: + name = "parameter_609" + shape = [96] + dtype = "float32" + min_val = float("-0.762613") + max_val = float("0.609475") + mean = float("-0.0648606") + std = float("0.194567") + data = None + + +class Program_weight_tensor_parameter_610: + name = "parameter_610" + shape = [96, 96, 3, 3] + dtype = "float32" + min_val = float("-0.0738037") + max_val = float("0.125248") + mean = float("-0.000426247") + std = float("0.0069708") + data = None + + +class Program_weight_tensor_parameter_611: + name = "parameter_611" + shape = [96] + dtype = "float32" + min_val = float("-0.978262") + max_val = float("0.489992") + mean = float("-0.136691") + std = float("0.278636") + data = None + + +class Program_weight_tensor_parameter_612: + name = "parameter_612" + shape = [96] + dtype = "float32" + min_val = float("0.0498074") + max_val = float("1.1462") + mean = float("0.296075") + std = float("0.172323") + data = None + + +class Program_weight_tensor_parameter_613: + name = "parameter_613" + shape = [96] + dtype = "float32" + min_val = float("0.000185263") + max_val = float("0.00518845") + mean = float("0.00108541") + std = float("0.000730374") + data = None + + +class Program_weight_tensor_parameter_614: + name = "parameter_614" + shape = [96] + dtype = "float32" + min_val = float("-0.041706") + max_val = float("0.0562903") + mean = float("0.00553756") + std = float("0.017998") + data = None + + +class Program_weight_tensor_parameter_615: + name = "parameter_615" + shape = [96, 96, 1, 1] + dtype = "float32" + min_val = float("-0.0731207") + max_val = float("0.0763792") + mean = float("-0.000594618") + std = float("0.00825765") + data = None + + +class Program_weight_tensor_parameter_616: + name = "parameter_616" + shape = [96] + dtype = "float32" + min_val = float("-0.978083") + max_val = float("0.492448") + mean = float("-0.136655") + std = float("0.279122") + data = None + + +class Program_weight_tensor_parameter_617: + name = "parameter_617" + shape = [96] + dtype = "float32" + min_val = float("0.236133") + max_val = float("1.69671") + mean = float("0.603953") + std = float("0.228164") + data = None + + +class Program_weight_tensor_parameter_618: + name = "parameter_618" + shape = [96] + dtype = "float32" + min_val = float("0.00623834") + max_val = float("0.0304043") + mean = float("0.0139144") + std = float("0.00496797") + data = None + + +class Program_weight_tensor_parameter_619: + name = "parameter_619" + shape = [96] + dtype = "float32" + min_val = float("-0.0709982") + max_val = float("0.13525") + mean = float("0.0270257") + std = float("0.0460507") + data = None + + +class Program_weight_tensor_parameter_620: + name = "parameter_620" + shape = [96, 96, 3, 3] + dtype = "float32" + min_val = float("-0.0654835") + max_val = float("0.0522648") + mean = float("-0.00036204") + std = float("0.0060426") + data = None + + +class Program_weight_tensor_parameter_621: + name = "parameter_621" + shape = [96] + dtype = "float32" + min_val = float("-3.46434") + max_val = float("0.199609") + mean = float("-1.00527") + std = float("0.548081") + data = None + + +class Program_weight_tensor_parameter_622: + name = "parameter_622" + shape = [96] + dtype = "float32" + min_val = float("0.686506") + max_val = float("2.51291") + mean = float("1.07427") + std = float("0.212412") + data = None + + +class Program_weight_tensor_parameter_623: + name = "parameter_623" + shape = [96] + dtype = "float32" + min_val = float("0.0132607") + max_val = float("0.0547669") + mean = float("0.0263345") + std = float("0.00858541") + data = None + + +class Program_weight_tensor_parameter_624: + name = "parameter_624" + shape = [96] + dtype = "float32" + min_val = float("-0.483153") + max_val = float("0.528087") + mean = float("-0.0517666") + std = float("0.193156") + data = None + + +class Program_weight_tensor_parameter_625: + name = "parameter_625" + shape = [96, 96, 3, 3] + dtype = "float32" + min_val = float("-0.0824841") + max_val = float("0.0934753") + mean = float("-0.000357672") + std = float("0.00712731") + data = None + + +class Program_weight_tensor_parameter_626: + name = "parameter_626" + shape = [96] + dtype = "float32" + min_val = float("-0.625302") + max_val = float("0.449836") + mean = float("-0.0825559") + std = float("0.256738") + data = None + + +class Program_weight_tensor_parameter_627: + name = "parameter_627" + shape = [96] + dtype = "float32" + min_val = float("0.0910018") + max_val = float("1.30085") + mean = float("0.309049") + std = float("0.196412") + data = None + + +class Program_weight_tensor_parameter_628: + name = "parameter_628" + shape = [96] + dtype = "float32" + min_val = float("0.000380277") + max_val = float("0.0176497") + mean = float("0.00357657") + std = float("0.00282864") + data = None + + +class Program_weight_tensor_parameter_629: + name = "parameter_629" + shape = [96] + dtype = "float32" + min_val = float("-0.035932") + max_val = float("0.0300925") + mean = float("-5.114e-05") + std = float("0.0106361") + data = None + + +class Program_weight_tensor_parameter_630: + name = "parameter_630" + shape = [96, 96, 1, 1] + dtype = "float32" + min_val = float("-0.0925016") + max_val = float("0.0753255") + mean = float("-0.00105853") + std = float("0.00936655") + data = None + + +class Program_weight_tensor_parameter_631: + name = "parameter_631" + shape = [96] + dtype = "float32" + min_val = float("-0.625183") + max_val = float("0.450937") + mean = float("-0.082575") + std = float("0.257081") + data = None + + +class Program_weight_tensor_parameter_632: + name = "parameter_632" + shape = [96] + dtype = "float32" + min_val = float("0.210658") + max_val = float("1.42703") + mean = float("0.527208") + std = float("0.258269") + data = None + + +class Program_weight_tensor_parameter_633: + name = "parameter_633" + shape = [96] + dtype = "float32" + min_val = float("0.0103464") + max_val = float("0.0958287") + mean = float("0.0339795") + std = float("0.0172322") + data = None + + +class Program_weight_tensor_parameter_634: + name = "parameter_634" + shape = [96] + dtype = "float32" + min_val = float("-0.108648") + max_val = float("0.0906186") + mean = float("-0.00832054") + std = float("0.0379588") + data = None + + +class Program_weight_tensor_parameter_635: + name = "parameter_635" + shape = [96, 96, 3, 3] + dtype = "float32" + min_val = float("-0.0885375") + max_val = float("0.0525934") + mean = float("-0.000466484") + std = float("0.00584459") + data = None + + +class Program_weight_tensor_parameter_636: + name = "parameter_636" + shape = [96] + dtype = "float32" + min_val = float("-2.40893") + max_val = float("0.508421") + mean = float("-0.828862") + std = float("0.467337") + data = None + + +class Program_weight_tensor_parameter_637: + name = "parameter_637" + shape = [96] + dtype = "float32" + min_val = float("0.853968") + max_val = float("2.18309") + mean = float("1.27545") + std = float("0.208741") + data = None + + +class Program_weight_tensor_parameter_638: + name = "parameter_638" + shape = [96] + dtype = "float32" + min_val = float("0.0115478") + max_val = float("0.0463068") + mean = float("0.0216413") + std = float("0.00767502") + data = None + + +class Program_weight_tensor_parameter_639: + name = "parameter_639" + shape = [96] + dtype = "float32" + min_val = float("-0.571223") + max_val = float("0.473029") + mean = float("-0.053671") + std = float("0.173924") + data = None + + +class Program_weight_tensor_parameter_640: + name = "parameter_640" + shape = [96, 96, 3, 3] + dtype = "float32" + min_val = float("-0.15411") + max_val = float("0.150524") + mean = float("-0.000241604") + std = float("0.00722176") + data = None + + +class Program_weight_tensor_parameter_641: + name = "parameter_641" + shape = [96] + dtype = "float32" + min_val = float("-3.16609") + max_val = float("1.88989") + mean = float("0.501666") + std = float("0.861493") + data = None + + +class Program_weight_tensor_parameter_642: + name = "parameter_642" + shape = [96] + dtype = "float32" + min_val = float("0.214988") + max_val = float("2.6299") + mean = float("0.562885") + std = float("0.31708") + data = None + + +class Program_weight_tensor_parameter_643: + name = "parameter_643" + shape = [96] + dtype = "float32" + min_val = float("0.00741665") + max_val = float("0.158098") + mean = float("0.0323603") + std = float("0.0239189") + data = None + + +class Program_weight_tensor_parameter_644: + name = "parameter_644" + shape = [96] + dtype = "float32" + min_val = float("-0.27197") + max_val = float("0.329568") + mean = float("-0.0148612") + std = float("0.0939931") + data = None + + +class Program_weight_tensor_parameter_645: + name = "parameter_645" + shape = [96, 192, 1, 1] + dtype = "float32" + min_val = float("-0.186901") + max_val = float("0.225419") + mean = float("-0.000291508") + std = float("0.0156297") + data = None + + +class Program_weight_tensor_parameter_646: + name = "parameter_646" + shape = [96] + dtype = "float32" + min_val = float("-4.92284") + max_val = float("1.57998") + mean = float("0.384603") + std = float("1.04888") + data = None + + +class Program_weight_tensor_parameter_647: + name = "parameter_647" + shape = [96] + dtype = "float32" + min_val = float("0.414126") + max_val = float("6.78093") + mean = float("1.69449") + std = float("1.30795") + data = None + + +class Program_weight_tensor_parameter_648: + name = "parameter_648" + shape = [96] + dtype = "float32" + min_val = float("0.00527536") + max_val = float("0.274604") + mean = float("0.0382764") + std = float("0.0355398") + data = None + + +class Program_weight_tensor_parameter_649: + name = "parameter_649" + shape = [96] + dtype = "float32" + min_val = float("-0.171845") + max_val = float("0.443762") + mean = float("0.0466766") + std = float("0.0965758") + data = None + + +class Program_weight_tensor_parameter_650: + name = "parameter_650" + shape = [96, 192, 1, 1] + dtype = "float32" + min_val = float("-0.116975") + max_val = float("0.156029") + mean = float("0.000440768") + std = float("0.0149691") + data = None + + +class Program_weight_tensor_parameter_651: + name = "parameter_651" + shape = [192] + dtype = "float32" + min_val = float("-2.27475") + max_val = float("1.75104") + mean = float("-0.126037") + std = float("0.740702") + data = None + + +class Program_weight_tensor_parameter_652: + name = "parameter_652" + shape = [192] + dtype = "float32" + min_val = float("0.632268") + max_val = float("2.97322") + mean = float("1.08733") + std = float("0.283408") + data = None + + +class Program_weight_tensor_parameter_653: + name = "parameter_653" + shape = [192] + dtype = "float32" + min_val = float("0.0110312") + max_val = float("0.234931") + mean = float("0.0439587") + std = float("0.0319644") + data = None + + +class Program_weight_tensor_parameter_654: + name = "parameter_654" + shape = [192] + dtype = "float32" + min_val = float("-0.578422") + max_val = float("0.269069") + mean = float("-0.0941015") + std = float("0.118583") + data = None + + +class Program_weight_tensor_parameter_655: + name = "parameter_655" + shape = [192, 128, 3, 3] + dtype = "float32" + min_val = float("-0.0856428") + max_val = float("0.123627") + mean = float("-0.000225745") + std = float("0.00765725") + data = None + + +class Program_weight_tensor_parameter_656: + name = "parameter_656" + shape = [128] + dtype = "float32" + min_val = float("-2.81597") + max_val = float("1.9636") + mean = float("-0.71259") + std = float("0.647835") + data = None + + +class Program_weight_tensor_parameter_657: + name = "parameter_657" + shape = [128] + dtype = "float32" + min_val = float("0.311227") + max_val = float("2.8783") + mean = float("1.01845") + std = float("0.278722") + data = None + + +class Program_weight_tensor_parameter_658: + name = "parameter_658" + shape = [128] + dtype = "float32" + min_val = float("0.000859604") + max_val = float("0.015639") + mean = float("0.00453842") + std = float("0.00230768") + data = None + + +class Program_weight_tensor_parameter_659: + name = "parameter_659" + shape = [128] + dtype = "float32" + min_val = float("-0.237838") + max_val = float("0.261934") + mean = float("0.00314431") + std = float("0.0867318") + data = None + + +class Program_weight_tensor_parameter_660: + name = "parameter_660" + shape = [128, 96, 1, 1] + dtype = "float32" + min_val = float("-0.171773") + max_val = float("0.211127") + mean = float("-0.00142636") + std = float("0.0224525") + data = None + + +class Program_weight_tensor_parameter_661: + name = "parameter_661" + shape = [96] + dtype = "float32" + min_val = float("-0.0180386") + max_val = float("3.78007e-05") + mean = float("-0.00735479") + std = float("0.00450801") + data = None + + +class Program_weight_tensor_parameter_662: + name = "parameter_662" + shape = [96, 96, 1, 1] + dtype = "float32" + min_val = float("-0.30281") + max_val = float("0.123007") + mean = float("-0.00790532") + std = float("0.0180213") + data = None + + +class Program_weight_tensor_parameter_663: + name = "parameter_663" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_664: + name = "parameter_664" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_665: + name = "parameter_665" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_666: + name = "parameter_666" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_667: + name = "parameter_667" + shape = [48, 48, 1, 1] + dtype = "float32" + min_val = float("-0.0501789") + max_val = float("0.0563261") + mean = float("-0.00170388") + std = float("0.0129798") + data = None + + +class Program_weight_tensor_parameter_668: + name = "parameter_668" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_669: + name = "parameter_669" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_670: + name = "parameter_670" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_671: + name = "parameter_671" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_672: + name = "parameter_672" + shape = [48, 48, 3, 3] + dtype = "float32" + min_val = float("-0.0578676") + max_val = float("0.0799749") + mean = float("-0.000509865") + std = float("0.0110281") + data = None + + +class Program_weight_tensor_parameter_673: + name = "parameter_673" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_674: + name = "parameter_674" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_675: + name = "parameter_675" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_676: + name = "parameter_676" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_677: + name = "parameter_677" + shape = [48, 48, 3, 3] + dtype = "float32" + min_val = float("-0.0925274") + max_val = float("0.0949158") + mean = float("-0.00064859") + std = float("0.0123667") + data = None + + +class Program_weight_tensor_parameter_678: + name = "parameter_678" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_679: + name = "parameter_679" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_680: + name = "parameter_680" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_681: + name = "parameter_681" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_682: + name = "parameter_682" + shape = [48, 48, 1, 1] + dtype = "float32" + min_val = float("-0.0727088") + max_val = float("0.0782992") + mean = float("-0.00102365") + std = float("0.0139349") + data = None + + +class Program_weight_tensor_parameter_683: + name = "parameter_683" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_684: + name = "parameter_684" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_685: + name = "parameter_685" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_686: + name = "parameter_686" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_687: + name = "parameter_687" + shape = [48, 48, 3, 3] + dtype = "float32" + min_val = float("-0.0621898") + max_val = float("0.0692526") + mean = float("-0.000822014") + std = float("0.0111057") + data = None + + +class Program_weight_tensor_parameter_688: + name = "parameter_688" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_689: + name = "parameter_689" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_690: + name = "parameter_690" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_691: + name = "parameter_691" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_692: + name = "parameter_692" + shape = [48, 48, 3, 3] + dtype = "float32" + min_val = float("-0.11162") + max_val = float("0.0943574") + mean = float("-0.000368661") + std = float("0.0125785") + data = None + + +class Program_weight_tensor_parameter_693: + name = "parameter_693" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_694: + name = "parameter_694" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_695: + name = "parameter_695" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_696: + name = "parameter_696" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_697: + name = "parameter_697" + shape = [48, 48, 1, 1] + dtype = "float32" + min_val = float("-0.0944494") + max_val = float("0.0702451") + mean = float("-0.00185301") + std = float("0.0172184") + data = None + + +class Program_weight_tensor_parameter_698: + name = "parameter_698" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_699: + name = "parameter_699" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_700: + name = "parameter_700" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_701: + name = "parameter_701" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_702: + name = "parameter_702" + shape = [48, 48, 3, 3] + dtype = "float32" + min_val = float("-0.0691644") + max_val = float("0.0974384") + mean = float("-0.000506655") + std = float("0.011691") + data = None + + +class Program_weight_tensor_parameter_703: + name = "parameter_703" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_704: + name = "parameter_704" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_705: + name = "parameter_705" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_706: + name = "parameter_706" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_707: + name = "parameter_707" + shape = [48, 48, 3, 3] + dtype = "float32" + min_val = float("-0.133213") + max_val = float("0.0905212") + mean = float("-0.000334254") + std = float("0.0134452") + data = None + + +class Program_weight_tensor_parameter_708: + name = "parameter_708" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_709: + name = "parameter_709" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_710: + name = "parameter_710" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_711: + name = "parameter_711" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_712: + name = "parameter_712" + shape = [48, 96, 1, 1] + dtype = "float32" + min_val = float("-0.17806") + max_val = float("0.14305") + mean = float("-0.00229242") + std = float("0.0246641") + data = None + + +class Program_weight_tensor_parameter_713: + name = "parameter_713" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_714: + name = "parameter_714" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_715: + name = "parameter_715" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_716: + name = "parameter_716" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_717: + name = "parameter_717" + shape = [48, 96, 1, 1] + dtype = "float32" + min_val = float("-0.135012") + max_val = float("0.178483") + mean = float("-0.0004429") + std = float("0.0226955") + data = None + + +class Program_weight_tensor_parameter_718: + name = "parameter_718" + shape = [96] + dtype = "float32" + min_val = float("-3.40701") + max_val = float("3.27538") + mean = float("0.329531") + std = float("1.14502") + data = None + + +class Program_weight_tensor_parameter_719: + name = "parameter_719" + shape = [96] + dtype = "float32" + min_val = float("0.865919") + max_val = float("4.91404") + mean = float("1.91603") + std = float("0.752783") + data = None + + +class Program_weight_tensor_parameter_720: + name = "parameter_720" + shape = [96] + dtype = "float32" + min_val = float("0.704881") + max_val = float("31.7293") + mean = float("2.73326") + std = float("3.48853") + data = None + + +class Program_weight_tensor_parameter_721: + name = "parameter_721" + shape = [96] + dtype = "float32" + min_val = float("-1.47461") + max_val = float("2.59735") + mean = float("-0.288555") + std = float("0.730674") + data = None + + +class Program_weight_tensor_parameter_722: + name = "parameter_722" + shape = [96, 64, 3, 3] + dtype = "float32" + min_val = float("-0.110689") + max_val = float("0.13859") + mean = float("-0.000360127") + std = float("0.0133189") + data = None + + +class Program_weight_tensor_parameter_723: + name = "parameter_723" + shape = [64] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_724: + name = "parameter_724" + shape = [64] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_725: + name = "parameter_725" + shape = [64] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_726: + name = "parameter_726" + shape = [64] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_727: + name = "parameter_727" + shape = [64, 32, 3, 3] + dtype = "float32" + min_val = float("-0.179264") + max_val = float("0.162144") + mean = float("-0.000679023") + std = float("0.020536") + data = None + + +class Program_weight_tensor_parameter_728: + name = "parameter_728" + shape = [32] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_729: + name = "parameter_729" + shape = [32] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_730: + name = "parameter_730" + shape = [32] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_731: + name = "parameter_731" + shape = [32] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_732: + name = "parameter_732" + shape = [32, 32, 3, 3] + dtype = "float32" + min_val = float("-0.347786") + max_val = float("0.218964") + mean = float("-0.000199571") + std = float("0.0261033") + data = None + + +class Program_weight_tensor_parameter_733: + name = "parameter_733" + shape = [32] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_734: + name = "parameter_734" + shape = [32] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_735: + name = "parameter_735" + shape = [32] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_736: + name = "parameter_736" + shape = [32] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_737: + name = "parameter_737" + shape = [32, 3, 3, 3] + dtype = "float32" + min_val = float("-0.317155") + max_val = float("0.280865") + mean = float("-0.00214957") + std = float("0.0702742") + data = None diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/subgraph_14/graph_hash.txt b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/subgraph_7/graph_hash.txt similarity index 100% rename from paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/subgraph_14/graph_hash.txt rename to paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/subgraph_7/graph_hash.txt diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/subgraph_10/graph_net.json b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/subgraph_7/graph_net.json similarity index 100% rename from paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/subgraph_10/graph_net.json rename to paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/subgraph_7/graph_net.json diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/subgraph_14/input_meta.py b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/subgraph_7/input_meta.py similarity index 100% rename from paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/subgraph_14/input_meta.py rename to paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/subgraph_7/input_meta.py diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/subgraph_14/model.py b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/subgraph_7/model.py similarity index 100% rename from paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/subgraph_14/model.py rename to paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/subgraph_7/model.py diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/subgraph_14/weight_meta.py b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/subgraph_7/weight_meta.py similarity index 100% rename from paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/subgraph_14/weight_meta.py rename to paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/subgraph_7/weight_meta.py diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/subgraph_8/graph_hash.txt b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/subgraph_8/graph_hash.txt deleted file mode 100644 index 4813c5fb9..000000000 --- a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/subgraph_8/graph_hash.txt +++ /dev/null @@ -1 +0,0 @@ -0ade118bf5113a2f63aa1b27c2409d65cdb05050be009516c3881c1dd15b2c66 \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/subgraph_8/graph_net.json b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/subgraph_8/graph_net.json deleted file mode 100644 index 8b4fccfd1..000000000 --- a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/subgraph_8/graph_net.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "framework": "paddle", - "model_name": "PP-YOLOE_plus_SOD-L", - "num_devices_required": 1, - "num_nodes_required": 1 -} \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/subgraph_8/input_meta.py b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/subgraph_8/input_meta.py deleted file mode 100644 index 7a270e845..000000000 --- a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/subgraph_8/input_meta.py +++ /dev/null @@ -1,233 +0,0 @@ -class Program_weight_tensor_data_0: - name = "data_0" - shape = [1] - dtype = "float32" - data = [3.32824] - - -class Program_weight_tensor_data_1: - name = "data_1" - shape = [1] - dtype = "float32" - data = [0.0732286] - - -class Program_weight_tensor_data_2: - name = "data_2" - shape = [1] - dtype = "float32" - data = [2.19723] - - -class Program_weight_tensor_data_3: - name = "data_3" - shape = [1] - dtype = "float32" - data = [-1.08555] - - -class Program_weight_tensor_data_4: - name = "data_4" - shape = [1] - dtype = "float32" - data = [1.71785] - - -class Program_weight_tensor_data_5: - name = "data_5" - shape = [1] - dtype = "float32" - data = [1.13331] - - -class Program_weight_tensor_data_6: - name = "data_6" - shape = [1] - dtype = "float32" - data = [0.590431] - - -class Program_weight_tensor_data_7: - name = "data_7" - shape = [1] - dtype = "float32" - data = [0.708919] - - -class Program_weight_tensor_data_8: - name = "data_8" - shape = [1] - dtype = "float32" - data = [0.743773] - - -class Program_weight_tensor_data_9: - name = "data_9" - shape = [1] - dtype = "float32" - data = [0.858462] - - -class Program_weight_tensor_data_10: - name = "data_10" - shape = [1] - dtype = "float32" - data = [0.636941] - - -class Program_weight_tensor_data_11: - name = "data_11" - shape = [1] - dtype = "float32" - data = [0.828404] - - -class Program_weight_tensor_data_12: - name = "data_12" - shape = [1] - dtype = "float32" - data = [0.370716] - - -class Program_weight_tensor_data_13: - name = "data_13" - shape = [1] - dtype = "float32" - data = [0.993379] - - -class Program_weight_tensor_data_14: - name = "data_14" - shape = [1] - dtype = "float32" - data = [1.17653] - - -class Program_weight_tensor_data_15: - name = "data_15" - shape = [1] - dtype = "float32" - data = [0.50449] - - -class Program_weight_tensor_data_16: - name = "data_16" - shape = [1] - dtype = "float32" - data = [0.633712] - - -class Program_weight_tensor_data_17: - name = "data_17" - shape = [1] - dtype = "float32" - data = [0.683349] - - -class Program_weight_tensor_data_18: - name = "data_18" - shape = [1024, 3072] - dtype = "float32" - min_val = float("-0.168174") - max_val = float("0.148061") - mean = float("-3.61898e-05") - std = float("0.018088") - data = None - - -class Program_weight_tensor_data_19: - name = "data_19" - shape = [3072] - dtype = "float32" - min_val = float("-0.0404125") - max_val = float("0.0382792") - mean = float("0.000304818") - std = float("0.00730327") - data = None - - -class Program_weight_tensor_data_20: - name = "data_20" - shape = [1024, 3072] - dtype = "float32" - min_val = float("-0.0819269") - max_val = float("0.0975206") - mean = float("-8.50294e-06") - std = float("0.0176017") - data = None - - -class Program_weight_tensor_data_21: - name = "data_21" - shape = [3072] - dtype = "float32" - min_val = float("-0.0329042") - max_val = float("0.0303536") - mean = float("-7.16758e-05") - std = float("0.00591918") - data = None - - -class Program_weight_tensor_data_22: - name = "data_22" - shape = [1024, 3072] - dtype = "float32" - min_val = float("-0.535505") - max_val = float("0.535918") - mean = float("-4.04094e-06") - std = float("0.0286549") - data = None - - -class Program_weight_tensor_data_23: - name = "data_23" - shape = [3072] - dtype = "float32" - min_val = float("-0.144567") - max_val = float("0.151832") - mean = float("8.68538e-05") - std = float("0.028637") - data = None - - -class Program_weight_tensor_data_24: - name = "data_24" - shape = [1024, 3072] - dtype = "float32" - min_val = float("-0.410122") - max_val = float("0.357528") - mean = float("-1.73642e-05") - std = float("0.0248621") - data = None - - -class Program_weight_tensor_data_25: - name = "data_25" - shape = [3072] - dtype = "float32" - min_val = float("-0.118434") - max_val = float("0.157878") - mean = float("0.000208634") - std = float("0.025849") - data = None - - -class Program_weight_tensor_data_26: - name = "data_26" - shape = [1, 3, 640, 640] - dtype = "float32" - max_val = float("1.0") - mean = float("0.467665") - std = float("0.176432") - data = None - - -class Program_weight_tensor_data_27: - name = "data_27" - shape = [1, 400, 1024] - dtype = "float32" - min_val = float("-1.0") - max_val = float("1.0") - mean = float("0.444201") - std = float("0.550168") - data = None diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/subgraph_8/model.py b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/subgraph_8/model.py deleted file mode 100644 index ceff0732f..000000000 --- a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/subgraph_8/model.py +++ /dev/null @@ -1,8235 +0,0 @@ -import paddle - - -class GraphModule(paddle.nn.Layer): - def __init__(self): - super().__init__() - - def forward( - self, - parameter_0, - parameter_1, - parameter_2, - parameter_3, - parameter_4, - parameter_5, - parameter_6, - parameter_7, - parameter_8, - parameter_9, - parameter_10, - parameter_11, - parameter_12, - parameter_13, - parameter_14, - parameter_15, - parameter_16, - parameter_17, - parameter_18, - parameter_19, - parameter_20, - parameter_21, - parameter_22, - parameter_23, - parameter_24, - parameter_25, - parameter_26, - parameter_27, - parameter_28, - parameter_29, - parameter_30, - parameter_31, - parameter_32, - parameter_33, - parameter_34, - parameter_35, - parameter_36, - parameter_37, - parameter_38, - parameter_39, - parameter_40, - parameter_41, - parameter_42, - parameter_43, - parameter_44, - parameter_45, - parameter_46, - parameter_47, - parameter_48, - parameter_49, - parameter_50, - parameter_51, - parameter_52, - parameter_53, - parameter_54, - parameter_55, - parameter_56, - parameter_57, - parameter_58, - parameter_59, - parameter_60, - parameter_61, - parameter_62, - parameter_63, - parameter_64, - parameter_65, - parameter_66, - parameter_67, - parameter_68, - parameter_69, - parameter_70, - parameter_71, - parameter_72, - parameter_73, - parameter_74, - parameter_75, - parameter_76, - parameter_77, - parameter_78, - parameter_79, - parameter_80, - parameter_81, - parameter_82, - parameter_83, - parameter_84, - parameter_85, - parameter_86, - parameter_87, - parameter_88, - parameter_89, - parameter_90, - parameter_91, - parameter_92, - parameter_93, - parameter_94, - parameter_95, - parameter_96, - parameter_97, - parameter_98, - parameter_99, - parameter_100, - parameter_101, - parameter_102, - parameter_103, - parameter_104, - parameter_105, - parameter_106, - parameter_107, - parameter_108, - parameter_109, - parameter_110, - parameter_111, - parameter_112, - parameter_113, - parameter_114, - parameter_115, - parameter_116, - parameter_117, - parameter_118, - parameter_119, - parameter_120, - parameter_121, - parameter_122, - parameter_123, - parameter_124, - parameter_125, - parameter_126, - parameter_127, - parameter_128, - parameter_129, - parameter_130, - parameter_131, - parameter_132, - parameter_133, - parameter_134, - parameter_135, - parameter_136, - parameter_137, - parameter_138, - parameter_139, - parameter_140, - parameter_141, - parameter_142, - parameter_143, - parameter_144, - parameter_145, - parameter_146, - parameter_147, - parameter_148, - parameter_149, - parameter_150, - parameter_151, - parameter_152, - parameter_153, - parameter_154, - parameter_155, - parameter_156, - parameter_157, - parameter_158, - parameter_159, - parameter_160, - parameter_161, - parameter_162, - parameter_163, - parameter_164, - parameter_165, - parameter_166, - parameter_167, - parameter_168, - parameter_169, - parameter_170, - parameter_171, - parameter_172, - parameter_173, - parameter_174, - parameter_175, - parameter_176, - parameter_177, - parameter_178, - parameter_179, - parameter_180, - parameter_181, - parameter_182, - parameter_183, - parameter_184, - parameter_185, - parameter_186, - parameter_187, - parameter_188, - parameter_189, - parameter_190, - parameter_191, - parameter_192, - parameter_193, - parameter_194, - parameter_195, - parameter_196, - parameter_197, - parameter_198, - parameter_199, - parameter_200, - parameter_201, - parameter_202, - parameter_203, - parameter_204, - parameter_205, - parameter_206, - parameter_207, - parameter_208, - parameter_209, - parameter_210, - parameter_211, - parameter_212, - parameter_213, - parameter_214, - parameter_215, - parameter_216, - parameter_217, - parameter_218, - parameter_219, - parameter_220, - parameter_221, - parameter_222, - parameter_223, - parameter_224, - parameter_225, - parameter_226, - parameter_227, - parameter_228, - parameter_229, - parameter_230, - parameter_231, - parameter_232, - parameter_233, - parameter_234, - parameter_235, - parameter_236, - parameter_237, - parameter_238, - parameter_239, - parameter_240, - parameter_241, - parameter_242, - parameter_243, - parameter_244, - parameter_245, - parameter_246, - parameter_247, - parameter_248, - parameter_249, - parameter_250, - parameter_251, - parameter_252, - parameter_253, - parameter_254, - parameter_255, - parameter_256, - parameter_257, - parameter_258, - parameter_259, - parameter_260, - parameter_261, - parameter_262, - parameter_263, - parameter_264, - parameter_265, - parameter_266, - parameter_267, - parameter_268, - parameter_269, - parameter_270, - parameter_271, - parameter_272, - parameter_273, - parameter_274, - parameter_275, - parameter_276, - parameter_277, - parameter_278, - parameter_279, - parameter_280, - parameter_281, - parameter_282, - parameter_283, - parameter_284, - parameter_285, - parameter_286, - parameter_287, - parameter_288, - parameter_289, - parameter_290, - parameter_291, - parameter_292, - parameter_293, - parameter_294, - parameter_295, - parameter_296, - parameter_297, - parameter_298, - parameter_299, - parameter_300, - parameter_301, - parameter_302, - parameter_303, - parameter_304, - parameter_305, - parameter_306, - parameter_307, - parameter_308, - parameter_309, - parameter_310, - parameter_311, - parameter_312, - parameter_313, - parameter_314, - parameter_315, - parameter_316, - parameter_317, - parameter_318, - parameter_319, - parameter_320, - parameter_321, - parameter_322, - parameter_323, - parameter_324, - parameter_325, - parameter_326, - parameter_327, - parameter_328, - parameter_329, - parameter_330, - parameter_331, - parameter_332, - parameter_333, - parameter_334, - parameter_335, - parameter_336, - parameter_337, - parameter_338, - parameter_339, - parameter_340, - parameter_341, - parameter_342, - parameter_343, - parameter_344, - parameter_345, - parameter_346, - parameter_347, - parameter_348, - parameter_349, - parameter_350, - parameter_351, - parameter_352, - parameter_353, - parameter_354, - parameter_355, - parameter_356, - parameter_357, - parameter_358, - parameter_359, - parameter_360, - parameter_361, - parameter_362, - parameter_363, - parameter_364, - parameter_365, - parameter_366, - parameter_367, - parameter_368, - parameter_369, - parameter_370, - parameter_371, - parameter_372, - parameter_373, - parameter_374, - parameter_375, - parameter_376, - parameter_377, - parameter_378, - parameter_379, - parameter_380, - parameter_381, - parameter_382, - parameter_383, - parameter_384, - parameter_385, - parameter_386, - parameter_387, - parameter_388, - parameter_389, - parameter_390, - parameter_391, - parameter_392, - parameter_393, - parameter_394, - parameter_395, - parameter_396, - parameter_397, - parameter_398, - parameter_399, - parameter_400, - parameter_401, - parameter_402, - parameter_403, - parameter_404, - parameter_405, - parameter_406, - parameter_407, - parameter_408, - parameter_409, - parameter_410, - parameter_411, - parameter_412, - parameter_413, - parameter_414, - parameter_415, - parameter_416, - parameter_417, - parameter_418, - parameter_419, - parameter_420, - parameter_421, - parameter_422, - parameter_423, - parameter_424, - parameter_425, - parameter_426, - parameter_427, - parameter_428, - parameter_429, - parameter_430, - parameter_431, - parameter_432, - parameter_433, - parameter_434, - parameter_435, - parameter_436, - parameter_437, - parameter_438, - parameter_439, - parameter_440, - parameter_441, - parameter_442, - parameter_443, - parameter_444, - parameter_445, - parameter_446, - parameter_447, - parameter_448, - parameter_449, - parameter_450, - parameter_451, - parameter_452, - parameter_453, - parameter_454, - parameter_455, - parameter_456, - parameter_457, - parameter_458, - parameter_459, - parameter_460, - parameter_461, - parameter_462, - parameter_463, - parameter_464, - parameter_465, - parameter_466, - parameter_467, - parameter_468, - parameter_469, - parameter_470, - parameter_471, - parameter_472, - parameter_473, - parameter_474, - parameter_475, - parameter_476, - parameter_477, - parameter_478, - parameter_479, - parameter_480, - parameter_481, - parameter_482, - parameter_483, - parameter_484, - parameter_485, - parameter_486, - parameter_487, - parameter_488, - parameter_489, - parameter_490, - parameter_491, - parameter_492, - parameter_493, - parameter_494, - parameter_495, - parameter_496, - parameter_497, - parameter_498, - parameter_499, - parameter_500, - parameter_501, - parameter_502, - parameter_503, - parameter_504, - parameter_505, - parameter_506, - parameter_507, - parameter_508, - parameter_509, - parameter_510, - parameter_511, - parameter_512, - parameter_513, - parameter_514, - parameter_515, - parameter_516, - parameter_517, - parameter_518, - parameter_519, - parameter_520, - parameter_521, - parameter_522, - parameter_523, - parameter_524, - parameter_525, - parameter_526, - parameter_527, - parameter_528, - parameter_529, - parameter_530, - parameter_531, - parameter_532, - parameter_533, - parameter_534, - parameter_535, - parameter_536, - parameter_537, - parameter_538, - parameter_539, - parameter_540, - parameter_541, - parameter_542, - parameter_543, - parameter_544, - parameter_545, - parameter_546, - parameter_547, - parameter_548, - parameter_549, - parameter_550, - parameter_551, - parameter_552, - parameter_553, - parameter_554, - parameter_555, - parameter_556, - parameter_557, - parameter_558, - parameter_559, - parameter_560, - parameter_561, - parameter_562, - parameter_563, - parameter_564, - parameter_565, - parameter_566, - parameter_567, - parameter_568, - parameter_569, - parameter_570, - parameter_571, - parameter_572, - parameter_573, - parameter_574, - parameter_575, - parameter_576, - parameter_577, - parameter_578, - parameter_579, - parameter_580, - parameter_581, - parameter_582, - parameter_583, - parameter_584, - parameter_585, - parameter_586, - parameter_587, - parameter_588, - parameter_589, - parameter_590, - parameter_591, - parameter_592, - parameter_593, - parameter_594, - parameter_595, - parameter_596, - parameter_597, - parameter_598, - parameter_599, - parameter_600, - parameter_601, - parameter_602, - parameter_603, - parameter_604, - parameter_605, - parameter_606, - parameter_607, - parameter_608, - parameter_609, - parameter_610, - parameter_611, - parameter_612, - parameter_613, - parameter_614, - parameter_615, - parameter_616, - parameter_617, - parameter_618, - parameter_619, - parameter_620, - parameter_621, - parameter_622, - parameter_623, - parameter_624, - parameter_625, - parameter_626, - parameter_627, - parameter_628, - parameter_629, - parameter_630, - parameter_631, - parameter_632, - parameter_633, - parameter_634, - parameter_635, - parameter_636, - parameter_637, - parameter_638, - parameter_639, - parameter_640, - parameter_641, - parameter_642, - parameter_643, - parameter_644, - parameter_645, - parameter_646, - parameter_647, - parameter_648, - parameter_649, - parameter_650, - parameter_651, - parameter_652, - parameter_653, - parameter_654, - parameter_655, - parameter_656, - parameter_657, - parameter_658, - parameter_659, - parameter_660, - parameter_661, - parameter_662, - parameter_663, - parameter_664, - parameter_665, - parameter_666, - parameter_667, - parameter_668, - parameter_669, - parameter_670, - parameter_671, - parameter_672, - parameter_673, - parameter_674, - parameter_675, - parameter_676, - parameter_677, - parameter_678, - parameter_679, - parameter_680, - parameter_681, - parameter_682, - parameter_683, - parameter_684, - parameter_685, - parameter_686, - parameter_687, - parameter_688, - parameter_689, - parameter_690, - parameter_691, - parameter_692, - parameter_693, - parameter_694, - parameter_695, - parameter_696, - parameter_697, - parameter_698, - parameter_699, - parameter_700, - parameter_701, - parameter_702, - parameter_703, - parameter_704, - parameter_705, - parameter_706, - parameter_707, - parameter_708, - parameter_709, - parameter_710, - parameter_711, - parameter_712, - parameter_713, - parameter_714, - parameter_715, - parameter_716, - parameter_717, - parameter_718, - parameter_719, - parameter_720, - parameter_721, - parameter_722, - parameter_723, - parameter_724, - parameter_725, - parameter_726, - parameter_727, - parameter_728, - parameter_729, - parameter_730, - parameter_731, - parameter_732, - parameter_733, - parameter_734, - parameter_735, - parameter_736, - parameter_737, - parameter_738, - parameter_739, - parameter_740, - parameter_741, - parameter_742, - parameter_743, - parameter_744, - parameter_745, - parameter_746, - parameter_747, - parameter_748, - parameter_749, - parameter_750, - parameter_751, - parameter_752, - parameter_753, - parameter_754, - parameter_755, - parameter_756, - parameter_757, - parameter_758, - parameter_759, - parameter_760, - parameter_761, - parameter_762, - parameter_763, - parameter_764, - parameter_765, - parameter_766, - parameter_767, - parameter_768, - parameter_769, - parameter_770, - parameter_771, - parameter_772, - parameter_773, - parameter_774, - parameter_775, - parameter_776, - parameter_777, - parameter_778, - parameter_779, - parameter_780, - parameter_781, - parameter_782, - parameter_783, - parameter_784, - parameter_785, - parameter_786, - parameter_787, - parameter_788, - parameter_789, - parameter_790, - parameter_791, - parameter_792, - data_0, - data_1, - data_2, - data_3, - data_4, - data_5, - data_6, - data_7, - data_8, - data_9, - data_10, - data_11, - data_12, - data_13, - data_14, - data_15, - data_16, - data_17, - data_18, - data_19, - data_20, - data_21, - data_22, - data_23, - data_24, - data_25, - data_26, - data_27, - ): - # pd_op.conv2d: (-1x32x-1x-1xf32) <- (-1x3x-1x-1xf32, 32x3x3x3xf32) - conv2d_0 = paddle._C_ops.conv2d( - data_26, parameter_792, [2, 2], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del data_26, parameter_792 - - # pd_op.batch_norm_: (-1x32x-1x-1xf32, 32xf32, 32xf32, 32xf32, 32xf32, -1xui8) <- (-1x32x-1x-1xf32, 32xf32, 32xf32, 32xf32, 32xf32) - ( - batch_norm__0, - batch_norm__1, - batch_norm__2, - batch_norm__3, - batch_norm__4, - batch_norm__5, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_0, - parameter_791, - parameter_790, - parameter_789, - parameter_788, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_0, parameter_788, parameter_789, parameter_790, parameter_791 - - # pd_op.swish: (-1x32x-1x-1xf32) <- (-1x32x-1x-1xf32) - swish_0 = paddle._C_ops.swish(batch_norm__0) - del batch_norm__0 - - # pd_op.conv2d: (-1x32x-1x-1xf32) <- (-1x32x-1x-1xf32, 32x32x3x3xf32) - conv2d_1 = paddle._C_ops.conv2d( - swish_0, parameter_787, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_787, swish_0 - - # pd_op.batch_norm_: (-1x32x-1x-1xf32, 32xf32, 32xf32, 32xf32, 32xf32, -1xui8) <- (-1x32x-1x-1xf32, 32xf32, 32xf32, 32xf32, 32xf32) - ( - batch_norm__6, - batch_norm__7, - batch_norm__8, - batch_norm__9, - batch_norm__10, - batch_norm__11, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_1, - parameter_786, - parameter_785, - parameter_784, - parameter_783, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_1, parameter_783, parameter_784, parameter_785, parameter_786 - - # pd_op.swish: (-1x32x-1x-1xf32) <- (-1x32x-1x-1xf32) - swish_1 = paddle._C_ops.swish(batch_norm__6) - del batch_norm__6 - - # pd_op.conv2d: (-1x64x-1x-1xf32) <- (-1x32x-1x-1xf32, 64x32x3x3xf32) - conv2d_2 = paddle._C_ops.conv2d( - swish_1, parameter_782, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_782, swish_1 - - # pd_op.batch_norm_: (-1x64x-1x-1xf32, 64xf32, 64xf32, 64xf32, 64xf32, -1xui8) <- (-1x64x-1x-1xf32, 64xf32, 64xf32, 64xf32, 64xf32) - ( - batch_norm__12, - batch_norm__13, - batch_norm__14, - batch_norm__15, - batch_norm__16, - batch_norm__17, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_2, - parameter_781, - parameter_780, - parameter_779, - parameter_778, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_2, parameter_778, parameter_779, parameter_780, parameter_781 - - # pd_op.swish: (-1x64x-1x-1xf32) <- (-1x64x-1x-1xf32) - swish_2 = paddle._C_ops.swish(batch_norm__12) - del batch_norm__12 - - # pd_op.conv2d: (-1x96x-1x-1xf32) <- (-1x64x-1x-1xf32, 96x64x3x3xf32) - conv2d_3 = paddle._C_ops.conv2d( - swish_2, parameter_777, [2, 2], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_777, swish_2 - - # pd_op.batch_norm_: (-1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (-1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) - ( - batch_norm__18, - batch_norm__19, - batch_norm__20, - batch_norm__21, - batch_norm__22, - batch_norm__23, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_3, - parameter_776, - parameter_775, - parameter_774, - parameter_773, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_3, parameter_773, parameter_774, parameter_775, parameter_776 - - # pd_op.swish: (-1x96x-1x-1xf32) <- (-1x96x-1x-1xf32) - swish_3 = paddle._C_ops.swish(batch_norm__18) - del batch_norm__18 - - # pd_op.conv2d: (-1x48x-1x-1xf32) <- (-1x96x-1x-1xf32, 48x96x1x1xf32) - conv2d_4 = paddle._C_ops.conv2d( - swish_3, parameter_772, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_772 - - # pd_op.batch_norm_: (-1x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32, -1xui8) <- (-1x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32) - ( - batch_norm__24, - batch_norm__25, - batch_norm__26, - batch_norm__27, - batch_norm__28, - batch_norm__29, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_4, - parameter_771, - parameter_770, - parameter_769, - parameter_768, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_4, parameter_768, parameter_769, parameter_770, parameter_771 - - # pd_op.swish: (-1x48x-1x-1xf32) <- (-1x48x-1x-1xf32) - swish_4 = paddle._C_ops.swish(batch_norm__24) - del batch_norm__24 - - # pd_op.conv2d: (-1x48x-1x-1xf32) <- (-1x96x-1x-1xf32, 48x96x1x1xf32) - conv2d_5 = paddle._C_ops.conv2d( - swish_3, parameter_767, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_767, swish_3 - - # pd_op.batch_norm_: (-1x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32, -1xui8) <- (-1x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32) - ( - batch_norm__30, - batch_norm__31, - batch_norm__32, - batch_norm__33, - batch_norm__34, - batch_norm__35, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_5, - parameter_766, - parameter_765, - parameter_764, - parameter_763, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_5, parameter_763, parameter_764, parameter_765, parameter_766 - - # pd_op.swish: (-1x48x-1x-1xf32) <- (-1x48x-1x-1xf32) - swish_5 = paddle._C_ops.swish(batch_norm__30) - del batch_norm__30 - - # pd_op.conv2d: (-1x48x-1x-1xf32) <- (-1x48x-1x-1xf32, 48x48x3x3xf32) - conv2d_6 = paddle._C_ops.conv2d( - swish_5, parameter_762, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_762 - - # pd_op.batch_norm_: (-1x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32, -1xui8) <- (-1x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32) - ( - batch_norm__36, - batch_norm__37, - batch_norm__38, - batch_norm__39, - batch_norm__40, - batch_norm__41, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_6, - parameter_761, - parameter_760, - parameter_759, - parameter_758, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_6, parameter_758, parameter_759, parameter_760, parameter_761 - - # pd_op.swish: (-1x48x-1x-1xf32) <- (-1x48x-1x-1xf32) - swish_6 = paddle._C_ops.swish(batch_norm__36) - del batch_norm__36 - - # pd_op.conv2d: (-1x48x-1x-1xf32) <- (-1x48x-1x-1xf32, 48x48x3x3xf32) - conv2d_7 = paddle._C_ops.conv2d( - swish_6, parameter_757, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_757 - - # pd_op.batch_norm_: (-1x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32, -1xui8) <- (-1x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32) - ( - batch_norm__42, - batch_norm__43, - batch_norm__44, - batch_norm__45, - batch_norm__46, - batch_norm__47, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_7, - parameter_756, - parameter_755, - parameter_754, - parameter_753, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_7, parameter_753, parameter_754, parameter_755, parameter_756 - - # pd_op.conv2d: (-1x48x-1x-1xf32) <- (-1x48x-1x-1xf32, 48x48x1x1xf32) - conv2d_8 = paddle._C_ops.conv2d( - swish_6, parameter_752, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_752, swish_6 - - # pd_op.batch_norm_: (-1x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32, -1xui8) <- (-1x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32) - ( - batch_norm__48, - batch_norm__49, - batch_norm__50, - batch_norm__51, - batch_norm__52, - batch_norm__53, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_8, - parameter_751, - parameter_750, - parameter_749, - parameter_748, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_8, parameter_748, parameter_749, parameter_750, parameter_751 - - # pd_op.multiply: (-1x48x-1x-1xf32) <- (1xf32, -1x48x-1x-1xf32) - multiply_0 = paddle._C_ops.multiply(data_0, batch_norm__48) - del batch_norm__48, data_0 - - # pd_op.add: (-1x48x-1x-1xf32) <- (-1x48x-1x-1xf32, -1x48x-1x-1xf32) - add_0 = paddle._C_ops.add(batch_norm__42, multiply_0) - del batch_norm__42, multiply_0 - - # pd_op.swish: (-1x48x-1x-1xf32) <- (-1x48x-1x-1xf32) - swish_7 = paddle._C_ops.swish(add_0) - del add_0 - - # pd_op.add: (-1x48x-1x-1xf32) <- (-1x48x-1x-1xf32, -1x48x-1x-1xf32) - add_1 = paddle._C_ops.add(swish_5, swish_7) - del swish_5, swish_7 - - # pd_op.conv2d: (-1x48x-1x-1xf32) <- (-1x48x-1x-1xf32, 48x48x3x3xf32) - conv2d_9 = paddle._C_ops.conv2d( - add_1, parameter_747, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_747 - - # pd_op.batch_norm_: (-1x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32, -1xui8) <- (-1x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32) - ( - batch_norm__54, - batch_norm__55, - batch_norm__56, - batch_norm__57, - batch_norm__58, - batch_norm__59, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_9, - parameter_746, - parameter_745, - parameter_744, - parameter_743, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_9, parameter_743, parameter_744, parameter_745, parameter_746 - - # pd_op.swish: (-1x48x-1x-1xf32) <- (-1x48x-1x-1xf32) - swish_8 = paddle._C_ops.swish(batch_norm__54) - del batch_norm__54 - - # pd_op.conv2d: (-1x48x-1x-1xf32) <- (-1x48x-1x-1xf32, 48x48x3x3xf32) - conv2d_10 = paddle._C_ops.conv2d( - swish_8, parameter_742, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_742 - - # pd_op.batch_norm_: (-1x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32, -1xui8) <- (-1x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32) - ( - batch_norm__60, - batch_norm__61, - batch_norm__62, - batch_norm__63, - batch_norm__64, - batch_norm__65, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_10, - parameter_741, - parameter_740, - parameter_739, - parameter_738, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_10, parameter_738, parameter_739, parameter_740, parameter_741 - - # pd_op.conv2d: (-1x48x-1x-1xf32) <- (-1x48x-1x-1xf32, 48x48x1x1xf32) - conv2d_11 = paddle._C_ops.conv2d( - swish_8, parameter_737, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_737, swish_8 - - # pd_op.batch_norm_: (-1x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32, -1xui8) <- (-1x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32) - ( - batch_norm__66, - batch_norm__67, - batch_norm__68, - batch_norm__69, - batch_norm__70, - batch_norm__71, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_11, - parameter_736, - parameter_735, - parameter_734, - parameter_733, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_11, parameter_733, parameter_734, parameter_735, parameter_736 - - # pd_op.multiply: (-1x48x-1x-1xf32) <- (1xf32, -1x48x-1x-1xf32) - multiply_1 = paddle._C_ops.multiply(data_1, batch_norm__66) - del batch_norm__66, data_1 - - # pd_op.add: (-1x48x-1x-1xf32) <- (-1x48x-1x-1xf32, -1x48x-1x-1xf32) - add_2 = paddle._C_ops.add(batch_norm__60, multiply_1) - del batch_norm__60, multiply_1 - - # pd_op.swish: (-1x48x-1x-1xf32) <- (-1x48x-1x-1xf32) - swish_9 = paddle._C_ops.swish(add_2) - del add_2 - - # pd_op.add: (-1x48x-1x-1xf32) <- (-1x48x-1x-1xf32, -1x48x-1x-1xf32) - add_3 = paddle._C_ops.add(add_1, swish_9) - del add_1, swish_9 - - # pd_op.conv2d: (-1x48x-1x-1xf32) <- (-1x48x-1x-1xf32, 48x48x3x3xf32) - conv2d_12 = paddle._C_ops.conv2d( - add_3, parameter_732, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_732 - - # pd_op.batch_norm_: (-1x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32, -1xui8) <- (-1x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32) - ( - batch_norm__72, - batch_norm__73, - batch_norm__74, - batch_norm__75, - batch_norm__76, - batch_norm__77, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_12, - parameter_731, - parameter_730, - parameter_729, - parameter_728, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_12, parameter_728, parameter_729, parameter_730, parameter_731 - - # pd_op.swish: (-1x48x-1x-1xf32) <- (-1x48x-1x-1xf32) - swish_10 = paddle._C_ops.swish(batch_norm__72) - del batch_norm__72 - - # pd_op.conv2d: (-1x48x-1x-1xf32) <- (-1x48x-1x-1xf32, 48x48x3x3xf32) - conv2d_13 = paddle._C_ops.conv2d( - swish_10, parameter_727, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_727 - - # pd_op.batch_norm_: (-1x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32, -1xui8) <- (-1x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32) - ( - batch_norm__78, - batch_norm__79, - batch_norm__80, - batch_norm__81, - batch_norm__82, - batch_norm__83, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_13, - parameter_726, - parameter_725, - parameter_724, - parameter_723, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_13, parameter_723, parameter_724, parameter_725, parameter_726 - - # pd_op.conv2d: (-1x48x-1x-1xf32) <- (-1x48x-1x-1xf32, 48x48x1x1xf32) - conv2d_14 = paddle._C_ops.conv2d( - swish_10, parameter_722, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_722, swish_10 - - # pd_op.batch_norm_: (-1x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32, -1xui8) <- (-1x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32) - ( - batch_norm__84, - batch_norm__85, - batch_norm__86, - batch_norm__87, - batch_norm__88, - batch_norm__89, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_14, - parameter_721, - parameter_720, - parameter_719, - parameter_718, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_14, parameter_718, parameter_719, parameter_720, parameter_721 - - # pd_op.multiply: (-1x48x-1x-1xf32) <- (1xf32, -1x48x-1x-1xf32) - multiply_2 = paddle._C_ops.multiply(data_2, batch_norm__84) - del batch_norm__84, data_2 - - # pd_op.add: (-1x48x-1x-1xf32) <- (-1x48x-1x-1xf32, -1x48x-1x-1xf32) - add_4 = paddle._C_ops.add(batch_norm__78, multiply_2) - del batch_norm__78, multiply_2 - - # pd_op.swish: (-1x48x-1x-1xf32) <- (-1x48x-1x-1xf32) - swish_11 = paddle._C_ops.swish(add_4) - del add_4 - - # pd_op.add: (-1x48x-1x-1xf32) <- (-1x48x-1x-1xf32, -1x48x-1x-1xf32) - add_5 = paddle._C_ops.add(add_3, swish_11) - del add_3, swish_11 - - # pd_op.full: (1xi32) <- () - full_0 = paddle._C_ops.full( - [1], float("1"), paddle.int32, paddle.core.CPUPlace() - ) - - # builtin.combine: ([-1x48x-1x-1xf32, -1x48x-1x-1xf32]) <- (-1x48x-1x-1xf32, -1x48x-1x-1xf32) - combine_0 = [swish_4, add_5] - del add_5, swish_4 - - # pd_op.concat: (-1x96x-1x-1xf32) <- ([-1x48x-1x-1xf32, -1x48x-1x-1xf32], 1xi32) - concat_2 = paddle._C_ops.concat(combine_0, full_0) - del combine_0 - - # pd_op.full_int_array: (2xi64) <- () - full_int_array_0 = [2, 3] - - # pd_op.mean: (-1x96x1x1xf32) <- (-1x96x-1x-1xf32, 2xi64) - mean_0 = paddle._C_ops.mean(concat_2, full_int_array_0, True) - - # pd_op.conv2d: (-1x96x1x1xf32) <- (-1x96x1x1xf32, 96x96x1x1xf32) - conv2d_15 = paddle._C_ops.conv2d( - mean_0, parameter_717, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del mean_0, parameter_717 - - # pd_op.full_int_array: (4xi64) <- () - full_int_array_1 = [1, -1, 1, 1] - - # pd_op.reshape: (1x96x1x1xf32) <- (96xf32, 4xi64) - reshape_0 = paddle._C_ops.reshape(parameter_716, full_int_array_1) - del parameter_716 - - # pd_op.add: (-1x96x1x1xf32) <- (-1x96x1x1xf32, 1x96x1x1xf32) - add_6 = paddle._C_ops.add(conv2d_15, reshape_0) - del conv2d_15, reshape_0 - - # pd_op.hardsigmoid: (-1x96x1x1xf32) <- (-1x96x1x1xf32) - hardsigmoid_0 = paddle._C_ops.hardsigmoid( - add_6, float("0.166667"), float("0.5") - ) - del add_6 - - # pd_op.multiply: (-1x96x-1x-1xf32) <- (-1x96x-1x-1xf32, -1x96x1x1xf32) - multiply_3 = paddle._C_ops.multiply(concat_2, hardsigmoid_0) - del concat_2, hardsigmoid_0 - - # pd_op.conv2d: (-1x128x-1x-1xf32) <- (-1x96x-1x-1xf32, 128x96x1x1xf32) - conv2d_16 = paddle._C_ops.conv2d( - multiply_3, parameter_715, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del multiply_3, parameter_715 - - # pd_op.batch_norm_: (-1x128x-1x-1xf32, 128xf32, 128xf32, 128xf32, 128xf32, -1xui8) <- (-1x128x-1x-1xf32, 128xf32, 128xf32, 128xf32, 128xf32) - ( - batch_norm__90, - batch_norm__91, - batch_norm__92, - batch_norm__93, - batch_norm__94, - batch_norm__95, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_16, - parameter_714, - parameter_713, - parameter_712, - parameter_711, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_16, parameter_711, parameter_712, parameter_713, parameter_714 - - # pd_op.swish: (-1x128x-1x-1xf32) <- (-1x128x-1x-1xf32) - swish_12 = paddle._C_ops.swish(batch_norm__90) - del batch_norm__90 - - # pd_op.conv2d: (-1x192x-1x-1xf32) <- (-1x128x-1x-1xf32, 192x128x3x3xf32) - conv2d_17 = paddle._C_ops.conv2d( - swish_12, parameter_710, [2, 2], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_710, swish_12 - - # pd_op.batch_norm_: (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) - ( - batch_norm__96, - batch_norm__97, - batch_norm__98, - batch_norm__99, - batch_norm__100, - batch_norm__101, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_17, - parameter_709, - parameter_708, - parameter_707, - parameter_706, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_17, parameter_706, parameter_707, parameter_708, parameter_709 - - # pd_op.swish: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32) - swish_13 = paddle._C_ops.swish(batch_norm__96) - del batch_norm__96 - - # pd_op.conv2d: (-1x96x-1x-1xf32) <- (-1x192x-1x-1xf32, 96x192x1x1xf32) - conv2d_18 = paddle._C_ops.conv2d( - swish_13, parameter_705, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_705 - - # pd_op.batch_norm_: (-1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (-1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) - ( - batch_norm__102, - batch_norm__103, - batch_norm__104, - batch_norm__105, - batch_norm__106, - batch_norm__107, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_18, - parameter_704, - parameter_703, - parameter_702, - parameter_701, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_18, parameter_701, parameter_702, parameter_703, parameter_704 - - # pd_op.swish: (-1x96x-1x-1xf32) <- (-1x96x-1x-1xf32) - swish_14 = paddle._C_ops.swish(batch_norm__102) - del batch_norm__102 - - # pd_op.conv2d: (-1x96x-1x-1xf32) <- (-1x192x-1x-1xf32, 96x192x1x1xf32) - conv2d_19 = paddle._C_ops.conv2d( - swish_13, parameter_700, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_700, swish_13 - - # pd_op.batch_norm_: (-1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (-1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) - ( - batch_norm__108, - batch_norm__109, - batch_norm__110, - batch_norm__111, - batch_norm__112, - batch_norm__113, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_19, - parameter_699, - parameter_698, - parameter_697, - parameter_696, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_19, parameter_696, parameter_697, parameter_698, parameter_699 - - # pd_op.swish: (-1x96x-1x-1xf32) <- (-1x96x-1x-1xf32) - swish_15 = paddle._C_ops.swish(batch_norm__108) - del batch_norm__108 - - # pd_op.conv2d: (-1x96x-1x-1xf32) <- (-1x96x-1x-1xf32, 96x96x3x3xf32) - conv2d_20 = paddle._C_ops.conv2d( - swish_15, parameter_695, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_695 - - # pd_op.batch_norm_: (-1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (-1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) - ( - batch_norm__114, - batch_norm__115, - batch_norm__116, - batch_norm__117, - batch_norm__118, - batch_norm__119, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_20, - parameter_694, - parameter_693, - parameter_692, - parameter_691, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_20, parameter_691, parameter_692, parameter_693, parameter_694 - - # pd_op.swish: (-1x96x-1x-1xf32) <- (-1x96x-1x-1xf32) - swish_16 = paddle._C_ops.swish(batch_norm__114) - del batch_norm__114 - - # pd_op.conv2d: (-1x96x-1x-1xf32) <- (-1x96x-1x-1xf32, 96x96x3x3xf32) - conv2d_21 = paddle._C_ops.conv2d( - swish_16, parameter_690, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_690 - - # pd_op.batch_norm_: (-1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (-1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) - ( - batch_norm__120, - batch_norm__121, - batch_norm__122, - batch_norm__123, - batch_norm__124, - batch_norm__125, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_21, - parameter_689, - parameter_688, - parameter_687, - parameter_686, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_21, parameter_686, parameter_687, parameter_688, parameter_689 - - # pd_op.conv2d: (-1x96x-1x-1xf32) <- (-1x96x-1x-1xf32, 96x96x1x1xf32) - conv2d_22 = paddle._C_ops.conv2d( - swish_16, parameter_685, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_685, swish_16 - - # pd_op.batch_norm_: (-1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (-1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) - ( - batch_norm__126, - batch_norm__127, - batch_norm__128, - batch_norm__129, - batch_norm__130, - batch_norm__131, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_22, - parameter_684, - parameter_683, - parameter_682, - parameter_681, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_22, parameter_681, parameter_682, parameter_683, parameter_684 - - # pd_op.multiply: (-1x96x-1x-1xf32) <- (1xf32, -1x96x-1x-1xf32) - multiply_4 = paddle._C_ops.multiply(data_3, batch_norm__126) - del batch_norm__126, data_3 - - # pd_op.add: (-1x96x-1x-1xf32) <- (-1x96x-1x-1xf32, -1x96x-1x-1xf32) - add_7 = paddle._C_ops.add(batch_norm__120, multiply_4) - del batch_norm__120, multiply_4 - - # pd_op.swish: (-1x96x-1x-1xf32) <- (-1x96x-1x-1xf32) - swish_17 = paddle._C_ops.swish(add_7) - del add_7 - - # pd_op.add: (-1x96x-1x-1xf32) <- (-1x96x-1x-1xf32, -1x96x-1x-1xf32) - add_8 = paddle._C_ops.add(swish_15, swish_17) - del swish_15, swish_17 - - # pd_op.conv2d: (-1x96x-1x-1xf32) <- (-1x96x-1x-1xf32, 96x96x3x3xf32) - conv2d_23 = paddle._C_ops.conv2d( - add_8, parameter_680, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_680 - - # pd_op.batch_norm_: (-1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (-1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) - ( - batch_norm__132, - batch_norm__133, - batch_norm__134, - batch_norm__135, - batch_norm__136, - batch_norm__137, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_23, - parameter_679, - parameter_678, - parameter_677, - parameter_676, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_23, parameter_676, parameter_677, parameter_678, parameter_679 - - # pd_op.swish: (-1x96x-1x-1xf32) <- (-1x96x-1x-1xf32) - swish_18 = paddle._C_ops.swish(batch_norm__132) - del batch_norm__132 - - # pd_op.conv2d: (-1x96x-1x-1xf32) <- (-1x96x-1x-1xf32, 96x96x3x3xf32) - conv2d_24 = paddle._C_ops.conv2d( - swish_18, parameter_675, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_675 - - # pd_op.batch_norm_: (-1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (-1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) - ( - batch_norm__138, - batch_norm__139, - batch_norm__140, - batch_norm__141, - batch_norm__142, - batch_norm__143, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_24, - parameter_674, - parameter_673, - parameter_672, - parameter_671, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_24, parameter_671, parameter_672, parameter_673, parameter_674 - - # pd_op.conv2d: (-1x96x-1x-1xf32) <- (-1x96x-1x-1xf32, 96x96x1x1xf32) - conv2d_25 = paddle._C_ops.conv2d( - swish_18, parameter_670, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_670, swish_18 - - # pd_op.batch_norm_: (-1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (-1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) - ( - batch_norm__144, - batch_norm__145, - batch_norm__146, - batch_norm__147, - batch_norm__148, - batch_norm__149, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_25, - parameter_669, - parameter_668, - parameter_667, - parameter_666, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_25, parameter_666, parameter_667, parameter_668, parameter_669 - - # pd_op.multiply: (-1x96x-1x-1xf32) <- (1xf32, -1x96x-1x-1xf32) - multiply_5 = paddle._C_ops.multiply(data_4, batch_norm__144) - del batch_norm__144, data_4 - - # pd_op.add: (-1x96x-1x-1xf32) <- (-1x96x-1x-1xf32, -1x96x-1x-1xf32) - add_9 = paddle._C_ops.add(batch_norm__138, multiply_5) - del batch_norm__138, multiply_5 - - # pd_op.swish: (-1x96x-1x-1xf32) <- (-1x96x-1x-1xf32) - swish_19 = paddle._C_ops.swish(add_9) - del add_9 - - # pd_op.add: (-1x96x-1x-1xf32) <- (-1x96x-1x-1xf32, -1x96x-1x-1xf32) - add_10 = paddle._C_ops.add(add_8, swish_19) - del add_8, swish_19 - - # pd_op.conv2d: (-1x96x-1x-1xf32) <- (-1x96x-1x-1xf32, 96x96x3x3xf32) - conv2d_26 = paddle._C_ops.conv2d( - add_10, parameter_665, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_665 - - # pd_op.batch_norm_: (-1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (-1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) - ( - batch_norm__150, - batch_norm__151, - batch_norm__152, - batch_norm__153, - batch_norm__154, - batch_norm__155, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_26, - parameter_664, - parameter_663, - parameter_662, - parameter_661, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_26, parameter_661, parameter_662, parameter_663, parameter_664 - - # pd_op.swish: (-1x96x-1x-1xf32) <- (-1x96x-1x-1xf32) - swish_20 = paddle._C_ops.swish(batch_norm__150) - del batch_norm__150 - - # pd_op.conv2d: (-1x96x-1x-1xf32) <- (-1x96x-1x-1xf32, 96x96x3x3xf32) - conv2d_27 = paddle._C_ops.conv2d( - swish_20, parameter_660, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_660 - - # pd_op.batch_norm_: (-1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (-1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) - ( - batch_norm__156, - batch_norm__157, - batch_norm__158, - batch_norm__159, - batch_norm__160, - batch_norm__161, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_27, - parameter_659, - parameter_658, - parameter_657, - parameter_656, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_27, parameter_656, parameter_657, parameter_658, parameter_659 - - # pd_op.conv2d: (-1x96x-1x-1xf32) <- (-1x96x-1x-1xf32, 96x96x1x1xf32) - conv2d_28 = paddle._C_ops.conv2d( - swish_20, parameter_655, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_655, swish_20 - - # pd_op.batch_norm_: (-1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (-1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) - ( - batch_norm__162, - batch_norm__163, - batch_norm__164, - batch_norm__165, - batch_norm__166, - batch_norm__167, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_28, - parameter_654, - parameter_653, - parameter_652, - parameter_651, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_28, parameter_651, parameter_652, parameter_653, parameter_654 - - # pd_op.multiply: (-1x96x-1x-1xf32) <- (1xf32, -1x96x-1x-1xf32) - multiply_6 = paddle._C_ops.multiply(data_5, batch_norm__162) - del batch_norm__162, data_5 - - # pd_op.add: (-1x96x-1x-1xf32) <- (-1x96x-1x-1xf32, -1x96x-1x-1xf32) - add_11 = paddle._C_ops.add(batch_norm__156, multiply_6) - del batch_norm__156, multiply_6 - - # pd_op.swish: (-1x96x-1x-1xf32) <- (-1x96x-1x-1xf32) - swish_21 = paddle._C_ops.swish(add_11) - del add_11 - - # pd_op.add: (-1x96x-1x-1xf32) <- (-1x96x-1x-1xf32, -1x96x-1x-1xf32) - add_12 = paddle._C_ops.add(add_10, swish_21) - del add_10, swish_21 - - # pd_op.conv2d: (-1x96x-1x-1xf32) <- (-1x96x-1x-1xf32, 96x96x3x3xf32) - conv2d_29 = paddle._C_ops.conv2d( - add_12, parameter_650, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_650 - - # pd_op.batch_norm_: (-1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (-1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) - ( - batch_norm__168, - batch_norm__169, - batch_norm__170, - batch_norm__171, - batch_norm__172, - batch_norm__173, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_29, - parameter_649, - parameter_648, - parameter_647, - parameter_646, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_29, parameter_646, parameter_647, parameter_648, parameter_649 - - # pd_op.swish: (-1x96x-1x-1xf32) <- (-1x96x-1x-1xf32) - swish_22 = paddle._C_ops.swish(batch_norm__168) - del batch_norm__168 - - # pd_op.conv2d: (-1x96x-1x-1xf32) <- (-1x96x-1x-1xf32, 96x96x3x3xf32) - conv2d_30 = paddle._C_ops.conv2d( - swish_22, parameter_645, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_645 - - # pd_op.batch_norm_: (-1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (-1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) - ( - batch_norm__174, - batch_norm__175, - batch_norm__176, - batch_norm__177, - batch_norm__178, - batch_norm__179, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_30, - parameter_644, - parameter_643, - parameter_642, - parameter_641, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_30, parameter_641, parameter_642, parameter_643, parameter_644 - - # pd_op.conv2d: (-1x96x-1x-1xf32) <- (-1x96x-1x-1xf32, 96x96x1x1xf32) - conv2d_31 = paddle._C_ops.conv2d( - swish_22, parameter_640, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_640, swish_22 - - # pd_op.batch_norm_: (-1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (-1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) - ( - batch_norm__180, - batch_norm__181, - batch_norm__182, - batch_norm__183, - batch_norm__184, - batch_norm__185, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_31, - parameter_639, - parameter_638, - parameter_637, - parameter_636, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_31, parameter_636, parameter_637, parameter_638, parameter_639 - - # pd_op.multiply: (-1x96x-1x-1xf32) <- (1xf32, -1x96x-1x-1xf32) - multiply_7 = paddle._C_ops.multiply(data_6, batch_norm__180) - del batch_norm__180, data_6 - - # pd_op.add: (-1x96x-1x-1xf32) <- (-1x96x-1x-1xf32, -1x96x-1x-1xf32) - add_13 = paddle._C_ops.add(batch_norm__174, multiply_7) - del batch_norm__174, multiply_7 - - # pd_op.swish: (-1x96x-1x-1xf32) <- (-1x96x-1x-1xf32) - swish_23 = paddle._C_ops.swish(add_13) - del add_13 - - # pd_op.add: (-1x96x-1x-1xf32) <- (-1x96x-1x-1xf32, -1x96x-1x-1xf32) - add_14 = paddle._C_ops.add(add_12, swish_23) - del add_12, swish_23 - - # pd_op.conv2d: (-1x96x-1x-1xf32) <- (-1x96x-1x-1xf32, 96x96x3x3xf32) - conv2d_32 = paddle._C_ops.conv2d( - add_14, parameter_635, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_635 - - # pd_op.batch_norm_: (-1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (-1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) - ( - batch_norm__186, - batch_norm__187, - batch_norm__188, - batch_norm__189, - batch_norm__190, - batch_norm__191, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_32, - parameter_634, - parameter_633, - parameter_632, - parameter_631, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_32, parameter_631, parameter_632, parameter_633, parameter_634 - - # pd_op.swish: (-1x96x-1x-1xf32) <- (-1x96x-1x-1xf32) - swish_24 = paddle._C_ops.swish(batch_norm__186) - del batch_norm__186 - - # pd_op.conv2d: (-1x96x-1x-1xf32) <- (-1x96x-1x-1xf32, 96x96x3x3xf32) - conv2d_33 = paddle._C_ops.conv2d( - swish_24, parameter_630, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_630 - - # pd_op.batch_norm_: (-1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (-1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) - ( - batch_norm__192, - batch_norm__193, - batch_norm__194, - batch_norm__195, - batch_norm__196, - batch_norm__197, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_33, - parameter_629, - parameter_628, - parameter_627, - parameter_626, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_33, parameter_626, parameter_627, parameter_628, parameter_629 - - # pd_op.conv2d: (-1x96x-1x-1xf32) <- (-1x96x-1x-1xf32, 96x96x1x1xf32) - conv2d_34 = paddle._C_ops.conv2d( - swish_24, parameter_625, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_625, swish_24 - - # pd_op.batch_norm_: (-1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (-1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) - ( - batch_norm__198, - batch_norm__199, - batch_norm__200, - batch_norm__201, - batch_norm__202, - batch_norm__203, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_34, - parameter_624, - parameter_623, - parameter_622, - parameter_621, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_34, parameter_621, parameter_622, parameter_623, parameter_624 - - # pd_op.multiply: (-1x96x-1x-1xf32) <- (1xf32, -1x96x-1x-1xf32) - multiply_8 = paddle._C_ops.multiply(data_7, batch_norm__198) - del batch_norm__198, data_7 - - # pd_op.add: (-1x96x-1x-1xf32) <- (-1x96x-1x-1xf32, -1x96x-1x-1xf32) - add_15 = paddle._C_ops.add(batch_norm__192, multiply_8) - del batch_norm__192, multiply_8 - - # pd_op.swish: (-1x96x-1x-1xf32) <- (-1x96x-1x-1xf32) - swish_25 = paddle._C_ops.swish(add_15) - del add_15 - - # pd_op.add: (-1x96x-1x-1xf32) <- (-1x96x-1x-1xf32, -1x96x-1x-1xf32) - add_16 = paddle._C_ops.add(add_14, swish_25) - del add_14, swish_25 - - # pd_op.conv2d: (-1x96x-1x-1xf32) <- (-1x96x-1x-1xf32, 96x96x3x3xf32) - conv2d_35 = paddle._C_ops.conv2d( - add_16, parameter_620, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_620 - - # pd_op.batch_norm_: (-1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (-1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) - ( - batch_norm__204, - batch_norm__205, - batch_norm__206, - batch_norm__207, - batch_norm__208, - batch_norm__209, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_35, - parameter_619, - parameter_618, - parameter_617, - parameter_616, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_35, parameter_616, parameter_617, parameter_618, parameter_619 - - # pd_op.swish: (-1x96x-1x-1xf32) <- (-1x96x-1x-1xf32) - swish_26 = paddle._C_ops.swish(batch_norm__204) - del batch_norm__204 - - # pd_op.conv2d: (-1x96x-1x-1xf32) <- (-1x96x-1x-1xf32, 96x96x3x3xf32) - conv2d_36 = paddle._C_ops.conv2d( - swish_26, parameter_615, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_615 - - # pd_op.batch_norm_: (-1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (-1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) - ( - batch_norm__210, - batch_norm__211, - batch_norm__212, - batch_norm__213, - batch_norm__214, - batch_norm__215, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_36, - parameter_614, - parameter_613, - parameter_612, - parameter_611, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_36, parameter_611, parameter_612, parameter_613, parameter_614 - - # pd_op.conv2d: (-1x96x-1x-1xf32) <- (-1x96x-1x-1xf32, 96x96x1x1xf32) - conv2d_37 = paddle._C_ops.conv2d( - swish_26, parameter_610, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_610, swish_26 - - # pd_op.batch_norm_: (-1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (-1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) - ( - batch_norm__216, - batch_norm__217, - batch_norm__218, - batch_norm__219, - batch_norm__220, - batch_norm__221, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_37, - parameter_609, - parameter_608, - parameter_607, - parameter_606, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_37, parameter_606, parameter_607, parameter_608, parameter_609 - - # pd_op.multiply: (-1x96x-1x-1xf32) <- (1xf32, -1x96x-1x-1xf32) - multiply_9 = paddle._C_ops.multiply(data_8, batch_norm__216) - del batch_norm__216, data_8 - - # pd_op.add: (-1x96x-1x-1xf32) <- (-1x96x-1x-1xf32, -1x96x-1x-1xf32) - add_17 = paddle._C_ops.add(batch_norm__210, multiply_9) - del batch_norm__210, multiply_9 - - # pd_op.swish: (-1x96x-1x-1xf32) <- (-1x96x-1x-1xf32) - swish_27 = paddle._C_ops.swish(add_17) - del add_17 - - # pd_op.add: (-1x96x-1x-1xf32) <- (-1x96x-1x-1xf32, -1x96x-1x-1xf32) - add_18 = paddle._C_ops.add(add_16, swish_27) - del add_16, swish_27 - - # builtin.combine: ([-1x96x-1x-1xf32, -1x96x-1x-1xf32]) <- (-1x96x-1x-1xf32, -1x96x-1x-1xf32) - combine_1 = [swish_14, add_18] - del add_18, swish_14 - - # pd_op.concat: (-1x192x-1x-1xf32) <- ([-1x96x-1x-1xf32, -1x96x-1x-1xf32], 1xi32) - concat_3 = paddle._C_ops.concat(combine_1, full_0) - del combine_1 - - # pd_op.mean: (-1x192x1x1xf32) <- (-1x192x-1x-1xf32, 2xi64) - mean_1 = paddle._C_ops.mean(concat_3, full_int_array_0, True) - - # pd_op.conv2d: (-1x192x1x1xf32) <- (-1x192x1x1xf32, 192x192x1x1xf32) - conv2d_38 = paddle._C_ops.conv2d( - mean_1, parameter_605, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del mean_1, parameter_605 - - # pd_op.reshape: (1x192x1x1xf32) <- (192xf32, 4xi64) - reshape_1 = paddle._C_ops.reshape(parameter_604, full_int_array_1) - del parameter_604 - - # pd_op.add: (-1x192x1x1xf32) <- (-1x192x1x1xf32, 1x192x1x1xf32) - add_19 = paddle._C_ops.add(conv2d_38, reshape_1) - del conv2d_38, reshape_1 - - # pd_op.hardsigmoid: (-1x192x1x1xf32) <- (-1x192x1x1xf32) - hardsigmoid_1 = paddle._C_ops.hardsigmoid( - add_19, float("0.166667"), float("0.5") - ) - del add_19 - - # pd_op.multiply: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32, -1x192x1x1xf32) - multiply_10 = paddle._C_ops.multiply(concat_3, hardsigmoid_1) - del concat_3, hardsigmoid_1 - - # pd_op.conv2d: (-1x256x-1x-1xf32) <- (-1x192x-1x-1xf32, 256x192x1x1xf32) - conv2d_39 = paddle._C_ops.conv2d( - multiply_10, parameter_603, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del multiply_10, parameter_603 - - # pd_op.batch_norm_: (-1x256x-1x-1xf32, 256xf32, 256xf32, 256xf32, 256xf32, -1xui8) <- (-1x256x-1x-1xf32, 256xf32, 256xf32, 256xf32, 256xf32) - ( - batch_norm__222, - batch_norm__223, - batch_norm__224, - batch_norm__225, - batch_norm__226, - batch_norm__227, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_39, - parameter_602, - parameter_601, - parameter_600, - parameter_599, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_39, parameter_599, parameter_600, parameter_601, parameter_602 - - # pd_op.swish: (-1x256x-1x-1xf32) <- (-1x256x-1x-1xf32) - swish_28 = paddle._C_ops.swish(batch_norm__222) - del batch_norm__222 - - # pd_op.conv2d: (-1x384x-1x-1xf32) <- (-1x256x-1x-1xf32, 384x256x3x3xf32) - conv2d_40 = paddle._C_ops.conv2d( - swish_28, parameter_598, [2, 2], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_598 - - # pd_op.batch_norm_: (-1x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (-1x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) - ( - batch_norm__228, - batch_norm__229, - batch_norm__230, - batch_norm__231, - batch_norm__232, - batch_norm__233, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_40, - parameter_597, - parameter_596, - parameter_595, - parameter_594, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_40, parameter_594, parameter_595, parameter_596, parameter_597 - - # pd_op.swish: (-1x384x-1x-1xf32) <- (-1x384x-1x-1xf32) - swish_29 = paddle._C_ops.swish(batch_norm__228) - del batch_norm__228 - - # pd_op.conv2d: (-1x192x-1x-1xf32) <- (-1x384x-1x-1xf32, 192x384x1x1xf32) - conv2d_41 = paddle._C_ops.conv2d( - swish_29, parameter_593, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_593 - - # pd_op.batch_norm_: (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) - ( - batch_norm__234, - batch_norm__235, - batch_norm__236, - batch_norm__237, - batch_norm__238, - batch_norm__239, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_41, - parameter_592, - parameter_591, - parameter_590, - parameter_589, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_41, parameter_589, parameter_590, parameter_591, parameter_592 - - # pd_op.swish: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32) - swish_30 = paddle._C_ops.swish(batch_norm__234) - del batch_norm__234 - - # pd_op.conv2d: (-1x192x-1x-1xf32) <- (-1x384x-1x-1xf32, 192x384x1x1xf32) - conv2d_42 = paddle._C_ops.conv2d( - swish_29, parameter_588, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_588, swish_29 - - # pd_op.batch_norm_: (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) - ( - batch_norm__240, - batch_norm__241, - batch_norm__242, - batch_norm__243, - batch_norm__244, - batch_norm__245, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_42, - parameter_587, - parameter_586, - parameter_585, - parameter_584, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_42, parameter_584, parameter_585, parameter_586, parameter_587 - - # pd_op.swish: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32) - swish_31 = paddle._C_ops.swish(batch_norm__240) - del batch_norm__240 - - # pd_op.conv2d: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32, 192x192x3x3xf32) - conv2d_43 = paddle._C_ops.conv2d( - swish_31, parameter_583, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_583 - - # pd_op.batch_norm_: (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) - ( - batch_norm__246, - batch_norm__247, - batch_norm__248, - batch_norm__249, - batch_norm__250, - batch_norm__251, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_43, - parameter_582, - parameter_581, - parameter_580, - parameter_579, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_43, parameter_579, parameter_580, parameter_581, parameter_582 - - # pd_op.swish: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32) - swish_32 = paddle._C_ops.swish(batch_norm__246) - del batch_norm__246 - - # pd_op.conv2d: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32, 192x192x3x3xf32) - conv2d_44 = paddle._C_ops.conv2d( - swish_32, parameter_578, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_578 - - # pd_op.batch_norm_: (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) - ( - batch_norm__252, - batch_norm__253, - batch_norm__254, - batch_norm__255, - batch_norm__256, - batch_norm__257, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_44, - parameter_577, - parameter_576, - parameter_575, - parameter_574, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_44, parameter_574, parameter_575, parameter_576, parameter_577 - - # pd_op.conv2d: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32, 192x192x1x1xf32) - conv2d_45 = paddle._C_ops.conv2d( - swish_32, parameter_573, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_573, swish_32 - - # pd_op.batch_norm_: (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) - ( - batch_norm__258, - batch_norm__259, - batch_norm__260, - batch_norm__261, - batch_norm__262, - batch_norm__263, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_45, - parameter_572, - parameter_571, - parameter_570, - parameter_569, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_45, parameter_569, parameter_570, parameter_571, parameter_572 - - # pd_op.multiply: (-1x192x-1x-1xf32) <- (1xf32, -1x192x-1x-1xf32) - multiply_11 = paddle._C_ops.multiply(data_9, batch_norm__258) - del batch_norm__258, data_9 - - # pd_op.add: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32, -1x192x-1x-1xf32) - add_20 = paddle._C_ops.add(batch_norm__252, multiply_11) - del batch_norm__252, multiply_11 - - # pd_op.swish: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32) - swish_33 = paddle._C_ops.swish(add_20) - del add_20 - - # pd_op.add: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32, -1x192x-1x-1xf32) - add_21 = paddle._C_ops.add(swish_31, swish_33) - del swish_31, swish_33 - - # pd_op.conv2d: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32, 192x192x3x3xf32) - conv2d_46 = paddle._C_ops.conv2d( - add_21, parameter_568, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_568 - - # pd_op.batch_norm_: (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) - ( - batch_norm__264, - batch_norm__265, - batch_norm__266, - batch_norm__267, - batch_norm__268, - batch_norm__269, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_46, - parameter_567, - parameter_566, - parameter_565, - parameter_564, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_46, parameter_564, parameter_565, parameter_566, parameter_567 - - # pd_op.swish: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32) - swish_34 = paddle._C_ops.swish(batch_norm__264) - del batch_norm__264 - - # pd_op.conv2d: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32, 192x192x3x3xf32) - conv2d_47 = paddle._C_ops.conv2d( - swish_34, parameter_563, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_563 - - # pd_op.batch_norm_: (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) - ( - batch_norm__270, - batch_norm__271, - batch_norm__272, - batch_norm__273, - batch_norm__274, - batch_norm__275, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_47, - parameter_562, - parameter_561, - parameter_560, - parameter_559, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_47, parameter_559, parameter_560, parameter_561, parameter_562 - - # pd_op.conv2d: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32, 192x192x1x1xf32) - conv2d_48 = paddle._C_ops.conv2d( - swish_34, parameter_558, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_558, swish_34 - - # pd_op.batch_norm_: (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) - ( - batch_norm__276, - batch_norm__277, - batch_norm__278, - batch_norm__279, - batch_norm__280, - batch_norm__281, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_48, - parameter_557, - parameter_556, - parameter_555, - parameter_554, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_48, parameter_554, parameter_555, parameter_556, parameter_557 - - # pd_op.multiply: (-1x192x-1x-1xf32) <- (1xf32, -1x192x-1x-1xf32) - multiply_12 = paddle._C_ops.multiply(data_10, batch_norm__276) - del batch_norm__276, data_10 - - # pd_op.add: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32, -1x192x-1x-1xf32) - add_22 = paddle._C_ops.add(batch_norm__270, multiply_12) - del batch_norm__270, multiply_12 - - # pd_op.swish: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32) - swish_35 = paddle._C_ops.swish(add_22) - del add_22 - - # pd_op.add: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32, -1x192x-1x-1xf32) - add_23 = paddle._C_ops.add(add_21, swish_35) - del add_21, swish_35 - - # pd_op.conv2d: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32, 192x192x3x3xf32) - conv2d_49 = paddle._C_ops.conv2d( - add_23, parameter_553, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_553 - - # pd_op.batch_norm_: (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) - ( - batch_norm__282, - batch_norm__283, - batch_norm__284, - batch_norm__285, - batch_norm__286, - batch_norm__287, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_49, - parameter_552, - parameter_551, - parameter_550, - parameter_549, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_49, parameter_549, parameter_550, parameter_551, parameter_552 - - # pd_op.swish: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32) - swish_36 = paddle._C_ops.swish(batch_norm__282) - del batch_norm__282 - - # pd_op.conv2d: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32, 192x192x3x3xf32) - conv2d_50 = paddle._C_ops.conv2d( - swish_36, parameter_548, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_548 - - # pd_op.batch_norm_: (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) - ( - batch_norm__288, - batch_norm__289, - batch_norm__290, - batch_norm__291, - batch_norm__292, - batch_norm__293, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_50, - parameter_547, - parameter_546, - parameter_545, - parameter_544, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_50, parameter_544, parameter_545, parameter_546, parameter_547 - - # pd_op.conv2d: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32, 192x192x1x1xf32) - conv2d_51 = paddle._C_ops.conv2d( - swish_36, parameter_543, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_543, swish_36 - - # pd_op.batch_norm_: (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) - ( - batch_norm__294, - batch_norm__295, - batch_norm__296, - batch_norm__297, - batch_norm__298, - batch_norm__299, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_51, - parameter_542, - parameter_541, - parameter_540, - parameter_539, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_51, parameter_539, parameter_540, parameter_541, parameter_542 - - # pd_op.multiply: (-1x192x-1x-1xf32) <- (1xf32, -1x192x-1x-1xf32) - multiply_13 = paddle._C_ops.multiply(data_11, batch_norm__294) - del batch_norm__294, data_11 - - # pd_op.add: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32, -1x192x-1x-1xf32) - add_24 = paddle._C_ops.add(batch_norm__288, multiply_13) - del batch_norm__288, multiply_13 - - # pd_op.swish: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32) - swish_37 = paddle._C_ops.swish(add_24) - del add_24 - - # pd_op.add: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32, -1x192x-1x-1xf32) - add_25 = paddle._C_ops.add(add_23, swish_37) - del add_23, swish_37 - - # pd_op.conv2d: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32, 192x192x3x3xf32) - conv2d_52 = paddle._C_ops.conv2d( - add_25, parameter_538, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_538 - - # pd_op.batch_norm_: (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) - ( - batch_norm__300, - batch_norm__301, - batch_norm__302, - batch_norm__303, - batch_norm__304, - batch_norm__305, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_52, - parameter_537, - parameter_536, - parameter_535, - parameter_534, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_52, parameter_534, parameter_535, parameter_536, parameter_537 - - # pd_op.swish: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32) - swish_38 = paddle._C_ops.swish(batch_norm__300) - del batch_norm__300 - - # pd_op.conv2d: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32, 192x192x3x3xf32) - conv2d_53 = paddle._C_ops.conv2d( - swish_38, parameter_533, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_533 - - # pd_op.batch_norm_: (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) - ( - batch_norm__306, - batch_norm__307, - batch_norm__308, - batch_norm__309, - batch_norm__310, - batch_norm__311, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_53, - parameter_532, - parameter_531, - parameter_530, - parameter_529, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_53, parameter_529, parameter_530, parameter_531, parameter_532 - - # pd_op.conv2d: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32, 192x192x1x1xf32) - conv2d_54 = paddle._C_ops.conv2d( - swish_38, parameter_528, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_528, swish_38 - - # pd_op.batch_norm_: (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) - ( - batch_norm__312, - batch_norm__313, - batch_norm__314, - batch_norm__315, - batch_norm__316, - batch_norm__317, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_54, - parameter_527, - parameter_526, - parameter_525, - parameter_524, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_54, parameter_524, parameter_525, parameter_526, parameter_527 - - # pd_op.multiply: (-1x192x-1x-1xf32) <- (1xf32, -1x192x-1x-1xf32) - multiply_14 = paddle._C_ops.multiply(data_12, batch_norm__312) - del batch_norm__312, data_12 - - # pd_op.add: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32, -1x192x-1x-1xf32) - add_26 = paddle._C_ops.add(batch_norm__306, multiply_14) - del batch_norm__306, multiply_14 - - # pd_op.swish: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32) - swish_39 = paddle._C_ops.swish(add_26) - del add_26 - - # pd_op.add: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32, -1x192x-1x-1xf32) - add_27 = paddle._C_ops.add(add_25, swish_39) - del add_25, swish_39 - - # pd_op.conv2d: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32, 192x192x3x3xf32) - conv2d_55 = paddle._C_ops.conv2d( - add_27, parameter_523, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_523 - - # pd_op.batch_norm_: (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) - ( - batch_norm__318, - batch_norm__319, - batch_norm__320, - batch_norm__321, - batch_norm__322, - batch_norm__323, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_55, - parameter_522, - parameter_521, - parameter_520, - parameter_519, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_55, parameter_519, parameter_520, parameter_521, parameter_522 - - # pd_op.swish: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32) - swish_40 = paddle._C_ops.swish(batch_norm__318) - del batch_norm__318 - - # pd_op.conv2d: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32, 192x192x3x3xf32) - conv2d_56 = paddle._C_ops.conv2d( - swish_40, parameter_518, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_518 - - # pd_op.batch_norm_: (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) - ( - batch_norm__324, - batch_norm__325, - batch_norm__326, - batch_norm__327, - batch_norm__328, - batch_norm__329, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_56, - parameter_517, - parameter_516, - parameter_515, - parameter_514, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_56, parameter_514, parameter_515, parameter_516, parameter_517 - - # pd_op.conv2d: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32, 192x192x1x1xf32) - conv2d_57 = paddle._C_ops.conv2d( - swish_40, parameter_513, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_513, swish_40 - - # pd_op.batch_norm_: (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) - ( - batch_norm__330, - batch_norm__331, - batch_norm__332, - batch_norm__333, - batch_norm__334, - batch_norm__335, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_57, - parameter_512, - parameter_511, - parameter_510, - parameter_509, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_57, parameter_509, parameter_510, parameter_511, parameter_512 - - # pd_op.multiply: (-1x192x-1x-1xf32) <- (1xf32, -1x192x-1x-1xf32) - multiply_15 = paddle._C_ops.multiply(data_13, batch_norm__330) - del batch_norm__330, data_13 - - # pd_op.add: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32, -1x192x-1x-1xf32) - add_28 = paddle._C_ops.add(batch_norm__324, multiply_15) - del batch_norm__324, multiply_15 - - # pd_op.swish: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32) - swish_41 = paddle._C_ops.swish(add_28) - del add_28 - - # pd_op.add: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32, -1x192x-1x-1xf32) - add_29 = paddle._C_ops.add(add_27, swish_41) - del add_27, swish_41 - - # pd_op.conv2d: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32, 192x192x3x3xf32) - conv2d_58 = paddle._C_ops.conv2d( - add_29, parameter_508, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_508 - - # pd_op.batch_norm_: (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) - ( - batch_norm__336, - batch_norm__337, - batch_norm__338, - batch_norm__339, - batch_norm__340, - batch_norm__341, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_58, - parameter_507, - parameter_506, - parameter_505, - parameter_504, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_58, parameter_504, parameter_505, parameter_506, parameter_507 - - # pd_op.swish: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32) - swish_42 = paddle._C_ops.swish(batch_norm__336) - del batch_norm__336 - - # pd_op.conv2d: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32, 192x192x3x3xf32) - conv2d_59 = paddle._C_ops.conv2d( - swish_42, parameter_503, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_503 - - # pd_op.batch_norm_: (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) - ( - batch_norm__342, - batch_norm__343, - batch_norm__344, - batch_norm__345, - batch_norm__346, - batch_norm__347, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_59, - parameter_502, - parameter_501, - parameter_500, - parameter_499, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_59, parameter_499, parameter_500, parameter_501, parameter_502 - - # pd_op.conv2d: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32, 192x192x1x1xf32) - conv2d_60 = paddle._C_ops.conv2d( - swish_42, parameter_498, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_498, swish_42 - - # pd_op.batch_norm_: (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) - ( - batch_norm__348, - batch_norm__349, - batch_norm__350, - batch_norm__351, - batch_norm__352, - batch_norm__353, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_60, - parameter_497, - parameter_496, - parameter_495, - parameter_494, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_60, parameter_494, parameter_495, parameter_496, parameter_497 - - # pd_op.multiply: (-1x192x-1x-1xf32) <- (1xf32, -1x192x-1x-1xf32) - multiply_16 = paddle._C_ops.multiply(data_14, batch_norm__348) - del batch_norm__348, data_14 - - # pd_op.add: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32, -1x192x-1x-1xf32) - add_30 = paddle._C_ops.add(batch_norm__342, multiply_16) - del batch_norm__342, multiply_16 - - # pd_op.swish: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32) - swish_43 = paddle._C_ops.swish(add_30) - del add_30 - - # pd_op.add: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32, -1x192x-1x-1xf32) - add_31 = paddle._C_ops.add(add_29, swish_43) - del add_29, swish_43 - - # builtin.combine: ([-1x192x-1x-1xf32, -1x192x-1x-1xf32]) <- (-1x192x-1x-1xf32, -1x192x-1x-1xf32) - combine_2 = [swish_30, add_31] - del add_31, swish_30 - - # pd_op.concat: (-1x384x-1x-1xf32) <- ([-1x192x-1x-1xf32, -1x192x-1x-1xf32], 1xi32) - concat_4 = paddle._C_ops.concat(combine_2, full_0) - del combine_2 - - # pd_op.mean: (-1x384x1x1xf32) <- (-1x384x-1x-1xf32, 2xi64) - mean_2 = paddle._C_ops.mean(concat_4, full_int_array_0, True) - - # pd_op.conv2d: (-1x384x1x1xf32) <- (-1x384x1x1xf32, 384x384x1x1xf32) - conv2d_61 = paddle._C_ops.conv2d( - mean_2, parameter_493, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del mean_2, parameter_493 - - # pd_op.reshape: (1x384x1x1xf32) <- (384xf32, 4xi64) - reshape_2 = paddle._C_ops.reshape(parameter_492, full_int_array_1) - del parameter_492 - - # pd_op.add: (-1x384x1x1xf32) <- (-1x384x1x1xf32, 1x384x1x1xf32) - add_32 = paddle._C_ops.add(conv2d_61, reshape_2) - del conv2d_61, reshape_2 - - # pd_op.hardsigmoid: (-1x384x1x1xf32) <- (-1x384x1x1xf32) - hardsigmoid_2 = paddle._C_ops.hardsigmoid( - add_32, float("0.166667"), float("0.5") - ) - del add_32 - - # pd_op.multiply: (-1x384x-1x-1xf32) <- (-1x384x-1x-1xf32, -1x384x1x1xf32) - multiply_17 = paddle._C_ops.multiply(concat_4, hardsigmoid_2) - del concat_4, hardsigmoid_2 - - # pd_op.conv2d: (-1x512x-1x-1xf32) <- (-1x384x-1x-1xf32, 512x384x1x1xf32) - conv2d_62 = paddle._C_ops.conv2d( - multiply_17, parameter_491, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del multiply_17, parameter_491 - - # pd_op.batch_norm_: (-1x512x-1x-1xf32, 512xf32, 512xf32, 512xf32, 512xf32, -1xui8) <- (-1x512x-1x-1xf32, 512xf32, 512xf32, 512xf32, 512xf32) - ( - batch_norm__354, - batch_norm__355, - batch_norm__356, - batch_norm__357, - batch_norm__358, - batch_norm__359, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_62, - parameter_490, - parameter_489, - parameter_488, - parameter_487, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_62, parameter_487, parameter_488, parameter_489, parameter_490 - - # pd_op.swish: (-1x512x-1x-1xf32) <- (-1x512x-1x-1xf32) - swish_44 = paddle._C_ops.swish(batch_norm__354) - del batch_norm__354 - - # pd_op.conv2d: (-1x768x-1x-1xf32) <- (-1x512x-1x-1xf32, 768x512x3x3xf32) - conv2d_63 = paddle._C_ops.conv2d( - swish_44, parameter_486, [2, 2], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_486 - - # pd_op.batch_norm_: (-1x768x-1x-1xf32, 768xf32, 768xf32, 768xf32, 768xf32, -1xui8) <- (-1x768x-1x-1xf32, 768xf32, 768xf32, 768xf32, 768xf32) - ( - batch_norm__360, - batch_norm__361, - batch_norm__362, - batch_norm__363, - batch_norm__364, - batch_norm__365, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_63, - parameter_485, - parameter_484, - parameter_483, - parameter_482, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_63, parameter_482, parameter_483, parameter_484, parameter_485 - - # pd_op.swish: (-1x768x-1x-1xf32) <- (-1x768x-1x-1xf32) - swish_45 = paddle._C_ops.swish(batch_norm__360) - del batch_norm__360 - - # pd_op.conv2d: (-1x384x-1x-1xf32) <- (-1x768x-1x-1xf32, 384x768x1x1xf32) - conv2d_64 = paddle._C_ops.conv2d( - swish_45, parameter_481, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_481 - - # pd_op.batch_norm_: (-1x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (-1x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) - ( - batch_norm__366, - batch_norm__367, - batch_norm__368, - batch_norm__369, - batch_norm__370, - batch_norm__371, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_64, - parameter_480, - parameter_479, - parameter_478, - parameter_477, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_64, parameter_477, parameter_478, parameter_479, parameter_480 - - # pd_op.swish: (-1x384x-1x-1xf32) <- (-1x384x-1x-1xf32) - swish_46 = paddle._C_ops.swish(batch_norm__366) - del batch_norm__366 - - # pd_op.conv2d: (-1x384x-1x-1xf32) <- (-1x768x-1x-1xf32, 384x768x1x1xf32) - conv2d_65 = paddle._C_ops.conv2d( - swish_45, parameter_476, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_476, swish_45 - - # pd_op.batch_norm_: (-1x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (-1x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) - ( - batch_norm__372, - batch_norm__373, - batch_norm__374, - batch_norm__375, - batch_norm__376, - batch_norm__377, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_65, - parameter_475, - parameter_474, - parameter_473, - parameter_472, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_65, parameter_472, parameter_473, parameter_474, parameter_475 - - # pd_op.swish: (-1x384x-1x-1xf32) <- (-1x384x-1x-1xf32) - swish_47 = paddle._C_ops.swish(batch_norm__372) - del batch_norm__372 - - # pd_op.conv2d: (-1x384x-1x-1xf32) <- (-1x384x-1x-1xf32, 384x384x3x3xf32) - conv2d_66 = paddle._C_ops.conv2d( - swish_47, parameter_471, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_471 - - # pd_op.batch_norm_: (-1x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (-1x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) - ( - batch_norm__378, - batch_norm__379, - batch_norm__380, - batch_norm__381, - batch_norm__382, - batch_norm__383, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_66, - parameter_470, - parameter_469, - parameter_468, - parameter_467, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_66, parameter_467, parameter_468, parameter_469, parameter_470 - - # pd_op.swish: (-1x384x-1x-1xf32) <- (-1x384x-1x-1xf32) - swish_48 = paddle._C_ops.swish(batch_norm__378) - del batch_norm__378 - - # pd_op.conv2d: (-1x384x-1x-1xf32) <- (-1x384x-1x-1xf32, 384x384x3x3xf32) - conv2d_67 = paddle._C_ops.conv2d( - swish_48, parameter_466, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_466 - - # pd_op.batch_norm_: (-1x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (-1x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) - ( - batch_norm__384, - batch_norm__385, - batch_norm__386, - batch_norm__387, - batch_norm__388, - batch_norm__389, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_67, - parameter_465, - parameter_464, - parameter_463, - parameter_462, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_67, parameter_462, parameter_463, parameter_464, parameter_465 - - # pd_op.conv2d: (-1x384x-1x-1xf32) <- (-1x384x-1x-1xf32, 384x384x1x1xf32) - conv2d_68 = paddle._C_ops.conv2d( - swish_48, parameter_461, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_461, swish_48 - - # pd_op.batch_norm_: (-1x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (-1x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) - ( - batch_norm__390, - batch_norm__391, - batch_norm__392, - batch_norm__393, - batch_norm__394, - batch_norm__395, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_68, - parameter_460, - parameter_459, - parameter_458, - parameter_457, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_68, parameter_457, parameter_458, parameter_459, parameter_460 - - # pd_op.multiply: (-1x384x-1x-1xf32) <- (1xf32, -1x384x-1x-1xf32) - multiply_18 = paddle._C_ops.multiply(data_15, batch_norm__390) - del batch_norm__390, data_15 - - # pd_op.add: (-1x384x-1x-1xf32) <- (-1x384x-1x-1xf32, -1x384x-1x-1xf32) - add_33 = paddle._C_ops.add(batch_norm__384, multiply_18) - del batch_norm__384, multiply_18 - - # pd_op.swish: (-1x384x-1x-1xf32) <- (-1x384x-1x-1xf32) - swish_49 = paddle._C_ops.swish(add_33) - del add_33 - - # pd_op.add: (-1x384x-1x-1xf32) <- (-1x384x-1x-1xf32, -1x384x-1x-1xf32) - add_34 = paddle._C_ops.add(swish_47, swish_49) - del swish_47, swish_49 - - # pd_op.conv2d: (-1x384x-1x-1xf32) <- (-1x384x-1x-1xf32, 384x384x3x3xf32) - conv2d_69 = paddle._C_ops.conv2d( - add_34, parameter_456, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_456 - - # pd_op.batch_norm_: (-1x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (-1x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) - ( - batch_norm__396, - batch_norm__397, - batch_norm__398, - batch_norm__399, - batch_norm__400, - batch_norm__401, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_69, - parameter_455, - parameter_454, - parameter_453, - parameter_452, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_69, parameter_452, parameter_453, parameter_454, parameter_455 - - # pd_op.swish: (-1x384x-1x-1xf32) <- (-1x384x-1x-1xf32) - swish_50 = paddle._C_ops.swish(batch_norm__396) - del batch_norm__396 - - # pd_op.conv2d: (-1x384x-1x-1xf32) <- (-1x384x-1x-1xf32, 384x384x3x3xf32) - conv2d_70 = paddle._C_ops.conv2d( - swish_50, parameter_451, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_451 - - # pd_op.batch_norm_: (-1x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (-1x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) - ( - batch_norm__402, - batch_norm__403, - batch_norm__404, - batch_norm__405, - batch_norm__406, - batch_norm__407, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_70, - parameter_450, - parameter_449, - parameter_448, - parameter_447, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_70, parameter_447, parameter_448, parameter_449, parameter_450 - - # pd_op.conv2d: (-1x384x-1x-1xf32) <- (-1x384x-1x-1xf32, 384x384x1x1xf32) - conv2d_71 = paddle._C_ops.conv2d( - swish_50, parameter_446, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_446, swish_50 - - # pd_op.batch_norm_: (-1x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (-1x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) - ( - batch_norm__408, - batch_norm__409, - batch_norm__410, - batch_norm__411, - batch_norm__412, - batch_norm__413, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_71, - parameter_445, - parameter_444, - parameter_443, - parameter_442, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_71, parameter_442, parameter_443, parameter_444, parameter_445 - - # pd_op.multiply: (-1x384x-1x-1xf32) <- (1xf32, -1x384x-1x-1xf32) - multiply_19 = paddle._C_ops.multiply(data_16, batch_norm__408) - del batch_norm__408, data_16 - - # pd_op.add: (-1x384x-1x-1xf32) <- (-1x384x-1x-1xf32, -1x384x-1x-1xf32) - add_35 = paddle._C_ops.add(batch_norm__402, multiply_19) - del batch_norm__402, multiply_19 - - # pd_op.swish: (-1x384x-1x-1xf32) <- (-1x384x-1x-1xf32) - swish_51 = paddle._C_ops.swish(add_35) - del add_35 - - # pd_op.add: (-1x384x-1x-1xf32) <- (-1x384x-1x-1xf32, -1x384x-1x-1xf32) - add_36 = paddle._C_ops.add(add_34, swish_51) - del add_34, swish_51 - - # pd_op.conv2d: (-1x384x-1x-1xf32) <- (-1x384x-1x-1xf32, 384x384x3x3xf32) - conv2d_72 = paddle._C_ops.conv2d( - add_36, parameter_441, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_441 - - # pd_op.batch_norm_: (-1x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (-1x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) - ( - batch_norm__414, - batch_norm__415, - batch_norm__416, - batch_norm__417, - batch_norm__418, - batch_norm__419, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_72, - parameter_440, - parameter_439, - parameter_438, - parameter_437, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_72, parameter_437, parameter_438, parameter_439, parameter_440 - - # pd_op.swish: (-1x384x-1x-1xf32) <- (-1x384x-1x-1xf32) - swish_52 = paddle._C_ops.swish(batch_norm__414) - del batch_norm__414 - - # pd_op.conv2d: (-1x384x-1x-1xf32) <- (-1x384x-1x-1xf32, 384x384x3x3xf32) - conv2d_73 = paddle._C_ops.conv2d( - swish_52, parameter_436, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_436 - - # pd_op.batch_norm_: (-1x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (-1x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) - ( - batch_norm__420, - batch_norm__421, - batch_norm__422, - batch_norm__423, - batch_norm__424, - batch_norm__425, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_73, - parameter_435, - parameter_434, - parameter_433, - parameter_432, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_73, parameter_432, parameter_433, parameter_434, parameter_435 - - # pd_op.conv2d: (-1x384x-1x-1xf32) <- (-1x384x-1x-1xf32, 384x384x1x1xf32) - conv2d_74 = paddle._C_ops.conv2d( - swish_52, parameter_431, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_431, swish_52 - - # pd_op.batch_norm_: (-1x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (-1x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) - ( - batch_norm__426, - batch_norm__427, - batch_norm__428, - batch_norm__429, - batch_norm__430, - batch_norm__431, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_74, - parameter_430, - parameter_429, - parameter_428, - parameter_427, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_74, parameter_427, parameter_428, parameter_429, parameter_430 - - # pd_op.multiply: (-1x384x-1x-1xf32) <- (1xf32, -1x384x-1x-1xf32) - multiply_20 = paddle._C_ops.multiply(data_17, batch_norm__426) - del batch_norm__426, data_17 - - # pd_op.add: (-1x384x-1x-1xf32) <- (-1x384x-1x-1xf32, -1x384x-1x-1xf32) - add_37 = paddle._C_ops.add(batch_norm__420, multiply_20) - del batch_norm__420, multiply_20 - - # pd_op.swish: (-1x384x-1x-1xf32) <- (-1x384x-1x-1xf32) - swish_53 = paddle._C_ops.swish(add_37) - del add_37 - - # pd_op.add: (-1x384x-1x-1xf32) <- (-1x384x-1x-1xf32, -1x384x-1x-1xf32) - add_38 = paddle._C_ops.add(add_36, swish_53) - del add_36, swish_53 - - # builtin.combine: ([-1x384x-1x-1xf32, -1x384x-1x-1xf32]) <- (-1x384x-1x-1xf32, -1x384x-1x-1xf32) - combine_3 = [swish_46, add_38] - del add_38, swish_46 - - # pd_op.concat: (-1x768x-1x-1xf32) <- ([-1x384x-1x-1xf32, -1x384x-1x-1xf32], 1xi32) - concat_5 = paddle._C_ops.concat(combine_3, full_0) - del combine_3 - - # pd_op.mean: (-1x768x1x1xf32) <- (-1x768x-1x-1xf32, 2xi64) - mean_3 = paddle._C_ops.mean(concat_5, full_int_array_0, True) - del full_int_array_0 - - # pd_op.conv2d: (-1x768x1x1xf32) <- (-1x768x1x1xf32, 768x768x1x1xf32) - conv2d_75 = paddle._C_ops.conv2d( - mean_3, parameter_426, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del mean_3, parameter_426 - - # pd_op.reshape: (1x768x1x1xf32) <- (768xf32, 4xi64) - reshape_3 = paddle._C_ops.reshape(parameter_425, full_int_array_1) - del parameter_425 - - # pd_op.add: (-1x768x1x1xf32) <- (-1x768x1x1xf32, 1x768x1x1xf32) - add_39 = paddle._C_ops.add(conv2d_75, reshape_3) - del conv2d_75, reshape_3 - - # pd_op.hardsigmoid: (-1x768x1x1xf32) <- (-1x768x1x1xf32) - hardsigmoid_3 = paddle._C_ops.hardsigmoid( - add_39, float("0.166667"), float("0.5") - ) - del add_39 - - # pd_op.multiply: (-1x768x-1x-1xf32) <- (-1x768x-1x-1xf32, -1x768x1x1xf32) - multiply_21 = paddle._C_ops.multiply(concat_5, hardsigmoid_3) - del concat_5, hardsigmoid_3 - - # pd_op.conv2d: (-1x1024x-1x-1xf32) <- (-1x768x-1x-1xf32, 1024x768x1x1xf32) - conv2d_76 = paddle._C_ops.conv2d( - multiply_21, parameter_424, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del multiply_21, parameter_424 - - # pd_op.batch_norm_: (-1x1024x-1x-1xf32, 1024xf32, 1024xf32, 1024xf32, 1024xf32, -1xui8) <- (-1x1024x-1x-1xf32, 1024xf32, 1024xf32, 1024xf32, 1024xf32) - ( - batch_norm__432, - batch_norm__433, - batch_norm__434, - batch_norm__435, - batch_norm__436, - batch_norm__437, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_76, - parameter_423, - parameter_422, - parameter_421, - parameter_420, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_76, parameter_420, parameter_421, parameter_422, parameter_423 - - # pd_op.swish: (-1x1024x-1x-1xf32) <- (-1x1024x-1x-1xf32) - swish_54 = paddle._C_ops.swish(batch_norm__432) - del batch_norm__432 - - # pd_op.shape64: (4xi64) <- (-1x1024x-1x-1xf32) - shape64_0 = paddle._C_ops.shape64(swish_54) - - # pd_op.full_int_array: (1xi64) <- () - full_int_array_2 = [0] - - # pd_op.full_int_array: (1xi64) <- () - full_int_array_3 = [1] - - # pd_op.slice: (xi64) <- (4xi64, 1xi64, 1xi64) - slice_0 = paddle._C_ops.slice( - shape64_0, [0], full_int_array_2, full_int_array_3, [1], [0] - ) - del shape64_0 - - # pd_op.shape64: (4xi64) <- (-1x1024x-1x-1xf32) - shape64_1 = paddle._C_ops.shape64(swish_54) - - # pd_op.full_int_array: (1xi64) <- () - full_int_array_4 = [2] - - # pd_op.full_int_array: (1xi64) <- () - full_int_array_5 = [3] - - # pd_op.slice: (xi64) <- (4xi64, 1xi64, 1xi64) - slice_1 = paddle._C_ops.slice( - shape64_1, [0], full_int_array_4, full_int_array_5, [1], [0] - ) - del shape64_1 - - # pd_op.shape64: (4xi64) <- (-1x1024x-1x-1xf32) - shape64_2 = paddle._C_ops.shape64(swish_54) - - # pd_op.full_int_array: (1xi64) <- () - full_int_array_6 = [4] - - # pd_op.slice: (xi64) <- (4xi64, 1xi64, 1xi64) - slice_2 = paddle._C_ops.slice( - shape64_2, [0], full_int_array_5, full_int_array_6, [1], [0] - ) - del shape64_2 - - # pd_op.flatten: (-1x1024x-1xf32) <- (-1x1024x-1x-1xf32) - flatten_0 = paddle._C_ops.flatten(swish_54, 2, 3) - del swish_54 - - # pd_op.transpose: (-1x-1x1024xf32) <- (-1x1024x-1xf32) - transpose_0 = paddle._C_ops.transpose(flatten_0, [0, 2, 1]) - del flatten_0 - - # pd_op.add: (-1x400x1024xf32) <- (-1x-1x1024xf32, 1x400x1024xf32) - add_40 = paddle._C_ops.add(transpose_0, data_27) - - # pd_op.full_int_array: (1xi64) <- () - full_int_array_7 = [1024] - - # pd_op.slice: (1024x1024xf32) <- (1024x3072xf32, 1xi64, 1xi64) - slice_3 = paddle._C_ops.slice( - data_18, [1], full_int_array_2, full_int_array_7, [1], [] - ) - - # pd_op.slice: (1024xf32) <- (3072xf32, 1xi64, 1xi64) - slice_4 = paddle._C_ops.slice( - data_19, [0], full_int_array_2, full_int_array_7, [1], [] - ) - - # pd_op.matmul: (-1x400x1024xf32) <- (-1x400x1024xf32, 1024x1024xf32) - matmul_0 = paddle._C_ops.matmul(add_40, slice_3, False, False) - del slice_3 - - # pd_op.add: (-1x400x1024xf32) <- (-1x400x1024xf32, 1024xf32) - add_41 = paddle._C_ops.add(matmul_0, slice_4) - del matmul_0, slice_4 - - # pd_op.full_int_array: (4xi64) <- () - full_int_array_8 = [0, 0, 4, 256] - - # pd_op.reshape: (-1x400x4x256xf32) <- (-1x400x1024xf32, 4xi64) - reshape_4 = paddle._C_ops.reshape(add_41, full_int_array_8) - del add_41 - - # pd_op.transpose: (-1x4x400x256xf32) <- (-1x400x4x256xf32) - transpose_1 = paddle._C_ops.transpose(reshape_4, [0, 2, 1, 3]) - del reshape_4 - - # pd_op.full_int_array: (1xi64) <- () - full_int_array_9 = [2048] - - # pd_op.slice: (1024x1024xf32) <- (1024x3072xf32, 1xi64, 1xi64) - slice_5 = paddle._C_ops.slice( - data_18, [1], full_int_array_7, full_int_array_9, [1], [] - ) - - # pd_op.slice: (1024xf32) <- (3072xf32, 1xi64, 1xi64) - slice_6 = paddle._C_ops.slice( - data_19, [0], full_int_array_7, full_int_array_9, [1], [] - ) - - # pd_op.matmul: (-1x400x1024xf32) <- (-1x400x1024xf32, 1024x1024xf32) - matmul_1 = paddle._C_ops.matmul(add_40, slice_5, False, False) - del add_40, slice_5 - - # pd_op.add: (-1x400x1024xf32) <- (-1x400x1024xf32, 1024xf32) - add_42 = paddle._C_ops.add(matmul_1, slice_6) - del matmul_1, slice_6 - - # pd_op.reshape: (-1x400x4x256xf32) <- (-1x400x1024xf32, 4xi64) - reshape_5 = paddle._C_ops.reshape(add_42, full_int_array_8) - del add_42 - - # pd_op.transpose: (-1x4x400x256xf32) <- (-1x400x4x256xf32) - transpose_2 = paddle._C_ops.transpose(reshape_5, [0, 2, 1, 3]) - del reshape_5 - - # pd_op.full_int_array: (1xi64) <- () - full_int_array_10 = [2147483647] - - # pd_op.slice: (1024x1024xf32) <- (1024x3072xf32, 1xi64, 1xi64) - slice_7 = paddle._C_ops.slice( - data_18, [1], full_int_array_9, full_int_array_10, [1], [] - ) - del data_18 - - # pd_op.slice: (1024xf32) <- (3072xf32, 1xi64, 1xi64) - slice_8 = paddle._C_ops.slice( - data_19, [0], full_int_array_9, full_int_array_10, [1], [] - ) - del data_19 - - # pd_op.matmul: (-1x-1x1024xf32) <- (-1x-1x1024xf32, 1024x1024xf32) - matmul_2 = paddle._C_ops.matmul(transpose_0, slice_7, False, False) - del slice_7 - - # pd_op.add: (-1x-1x1024xf32) <- (-1x-1x1024xf32, 1024xf32) - add_43 = paddle._C_ops.add(matmul_2, slice_8) - del matmul_2, slice_8 - - # pd_op.reshape: (-1x-1x4x256xf32) <- (-1x-1x1024xf32, 4xi64) - reshape_6 = paddle._C_ops.reshape(add_43, full_int_array_8) - del add_43 - - # pd_op.transpose: (-1x4x-1x256xf32) <- (-1x-1x4x256xf32) - transpose_3 = paddle._C_ops.transpose(reshape_6, [0, 2, 1, 3]) - del reshape_6 - - # pd_op.matmul: (-1x4x400x400xf32) <- (-1x4x400x256xf32, -1x4x400x256xf32) - matmul_3 = paddle._C_ops.matmul(transpose_1, transpose_2, False, True) - del transpose_1, transpose_2 - - # pd_op.full: (1xf32) <- () - full_1 = paddle._C_ops.full( - [1], float("0.0625"), paddle.float32, paddle.core.CPUPlace() - ) - - # pd_op.scale: (-1x4x400x400xf32) <- (-1x4x400x400xf32, 1xf32) - scale_0 = paddle._C_ops.scale(matmul_3, full_1, float("0"), True) - del matmul_3 - - # pd_op.softmax: (-1x4x400x400xf32) <- (-1x4x400x400xf32) - softmax_0 = paddle._C_ops.softmax(scale_0, -1) - del scale_0 - - # pd_op.full: (1xf32) <- () - full_2 = paddle._C_ops.full( - [1], float("0.1"), paddle.float32, paddle.core.CPUPlace() - ) - - # pd_op.dropout: (-1x4x400x400xf32, -1x4x400x400xui8) <- (-1x4x400x400xf32, None, 1xf32) - dropout_0, dropout_1 = (lambda x, f: f(x))( - paddle._C_ops.dropout( - softmax_0, None, full_2, True, "upscale_in_train", 0, False - ), - lambda out: out if isinstance(out, (list, tuple)) else (out, None), - ) - del softmax_0 - - # pd_op.matmul: (-1x4x400x256xf32) <- (-1x4x400x400xf32, -1x4x-1x256xf32) - matmul_4 = paddle._C_ops.matmul(dropout_0, transpose_3, False, False) - del dropout_0, transpose_3 - - # pd_op.transpose: (-1x400x4x256xf32) <- (-1x4x400x256xf32) - transpose_4 = paddle._C_ops.transpose(matmul_4, [0, 2, 1, 3]) - del matmul_4 - - # pd_op.shape64: (4xi64) <- (-1x400x4x256xf32) - shape64_3 = paddle._C_ops.shape64(transpose_4) - - # pd_op.slice: (xi64) <- (4xi64, 1xi64, 1xi64) - slice_9 = paddle._C_ops.slice( - shape64_3, [0], full_int_array_2, full_int_array_3, [1], [0] - ) - del shape64_3 - - # pd_op.full_int_array: (3xi64) <- () - full_int_array_11 = [0, 0, 1024] - - # pd_op.reshape: (-1x400x1024xf32) <- (-1x400x4x256xf32, 3xi64) - reshape_7 = paddle._C_ops.reshape(transpose_4, full_int_array_11) - del transpose_4 - - # pd_op.matmul: (-1x400x1024xf32) <- (-1x400x1024xf32, 1024x1024xf32) - matmul_5 = paddle._C_ops.matmul(reshape_7, parameter_419, False, False) - del parameter_419, reshape_7 - - # pd_op.add: (-1x400x1024xf32) <- (-1x400x1024xf32, 1024xf32) - add_44 = paddle._C_ops.add(matmul_5, parameter_418) - del matmul_5, parameter_418 - - # pd_op.dropout: (-1x400x1024xf32, -1x400x1024xui8) <- (-1x400x1024xf32, None, 1xf32) - dropout_2, dropout_3 = (lambda x, f: f(x))( - paddle._C_ops.dropout( - add_44, None, full_2, True, "upscale_in_train", 0, False - ), - lambda out: out if isinstance(out, (list, tuple)) else (out, None), - ) - del add_44 - - # pd_op.add: (-1x400x1024xf32) <- (-1x-1x1024xf32, -1x400x1024xf32) - add_45 = paddle._C_ops.add(transpose_0, dropout_2) - del dropout_2, transpose_0 - - # pd_op.layer_norm: (-1x400x1024xf32, -1x400xf32, -1x400xf32) <- (-1x400x1024xf32, 1024xf32, 1024xf32) - layer_norm_0, layer_norm_1, layer_norm_2 = (lambda x, f: f(x))( - paddle._C_ops.layer_norm( - add_45, parameter_417, parameter_416, float("1e-05"), 2 - ), - lambda out: out if isinstance(out, (list, tuple)) else (out, None, None), - ) - del add_45, parameter_416, parameter_417 - - # pd_op.matmul: (-1x400x2048xf32) <- (-1x400x1024xf32, 1024x2048xf32) - matmul_6 = paddle._C_ops.matmul(layer_norm_0, parameter_415, False, False) - del parameter_415 - - # pd_op.add: (-1x400x2048xf32) <- (-1x400x2048xf32, 2048xf32) - add_46 = paddle._C_ops.add(matmul_6, parameter_414) - del matmul_6, parameter_414 - - # pd_op.gelu: (-1x400x2048xf32) <- (-1x400x2048xf32) - gelu_0 = paddle._C_ops.gelu(add_46, False) - del add_46 - - # pd_op.dropout: (-1x400x2048xf32, -1x400x2048xui8) <- (-1x400x2048xf32, None, 1xf32) - dropout_4, dropout_5 = (lambda x, f: f(x))( - paddle._C_ops.dropout( - gelu_0, None, full_2, True, "upscale_in_train", 0, False - ), - lambda out: out if isinstance(out, (list, tuple)) else (out, None), - ) - del gelu_0 - - # pd_op.matmul: (-1x400x1024xf32) <- (-1x400x2048xf32, 2048x1024xf32) - matmul_7 = paddle._C_ops.matmul(dropout_4, parameter_413, False, False) - del dropout_4, parameter_413 - - # pd_op.add: (-1x400x1024xf32) <- (-1x400x1024xf32, 1024xf32) - add_47 = paddle._C_ops.add(matmul_7, parameter_412) - del matmul_7, parameter_412 - - # pd_op.dropout: (-1x400x1024xf32, -1x400x1024xui8) <- (-1x400x1024xf32, None, 1xf32) - dropout_6, dropout_7 = (lambda x, f: f(x))( - paddle._C_ops.dropout( - add_47, None, full_2, True, "upscale_in_train", 0, False - ), - lambda out: out if isinstance(out, (list, tuple)) else (out, None), - ) - del add_47 - - # pd_op.add: (-1x400x1024xf32) <- (-1x400x1024xf32, -1x400x1024xf32) - add_48 = paddle._C_ops.add(layer_norm_0, dropout_6) - del dropout_6, layer_norm_0 - - # pd_op.layer_norm: (-1x400x1024xf32, -1x400xf32, -1x400xf32) <- (-1x400x1024xf32, 1024xf32, 1024xf32) - layer_norm_3, layer_norm_4, layer_norm_5 = (lambda x, f: f(x))( - paddle._C_ops.layer_norm( - add_48, parameter_411, parameter_410, float("1e-05"), 2 - ), - lambda out: out if isinstance(out, (list, tuple)) else (out, None, None), - ) - del add_48, parameter_410, parameter_411 - - # pd_op.add: (-1x400x1024xf32) <- (-1x400x1024xf32, 1x400x1024xf32) - add_49 = paddle._C_ops.add(layer_norm_3, data_27) - - # pd_op.slice: (1024x1024xf32) <- (1024x3072xf32, 1xi64, 1xi64) - slice_10 = paddle._C_ops.slice( - data_20, [1], full_int_array_2, full_int_array_7, [1], [] - ) - - # pd_op.slice: (1024xf32) <- (3072xf32, 1xi64, 1xi64) - slice_11 = paddle._C_ops.slice( - data_21, [0], full_int_array_2, full_int_array_7, [1], [] - ) - - # pd_op.matmul: (-1x400x1024xf32) <- (-1x400x1024xf32, 1024x1024xf32) - matmul_8 = paddle._C_ops.matmul(add_49, slice_10, False, False) - del slice_10 - - # pd_op.add: (-1x400x1024xf32) <- (-1x400x1024xf32, 1024xf32) - add_50 = paddle._C_ops.add(matmul_8, slice_11) - del matmul_8, slice_11 - - # pd_op.reshape: (-1x400x4x256xf32) <- (-1x400x1024xf32, 4xi64) - reshape_8 = paddle._C_ops.reshape(add_50, full_int_array_8) - del add_50 - - # pd_op.transpose: (-1x4x400x256xf32) <- (-1x400x4x256xf32) - transpose_5 = paddle._C_ops.transpose(reshape_8, [0, 2, 1, 3]) - del reshape_8 - - # pd_op.slice: (1024x1024xf32) <- (1024x3072xf32, 1xi64, 1xi64) - slice_12 = paddle._C_ops.slice( - data_20, [1], full_int_array_7, full_int_array_9, [1], [] - ) - - # pd_op.slice: (1024xf32) <- (3072xf32, 1xi64, 1xi64) - slice_13 = paddle._C_ops.slice( - data_21, [0], full_int_array_7, full_int_array_9, [1], [] - ) - - # pd_op.matmul: (-1x400x1024xf32) <- (-1x400x1024xf32, 1024x1024xf32) - matmul_9 = paddle._C_ops.matmul(add_49, slice_12, False, False) - del add_49, slice_12 - - # pd_op.add: (-1x400x1024xf32) <- (-1x400x1024xf32, 1024xf32) - add_51 = paddle._C_ops.add(matmul_9, slice_13) - del matmul_9, slice_13 - - # pd_op.reshape: (-1x400x4x256xf32) <- (-1x400x1024xf32, 4xi64) - reshape_9 = paddle._C_ops.reshape(add_51, full_int_array_8) - del add_51 - - # pd_op.transpose: (-1x4x400x256xf32) <- (-1x400x4x256xf32) - transpose_6 = paddle._C_ops.transpose(reshape_9, [0, 2, 1, 3]) - del reshape_9 - - # pd_op.slice: (1024x1024xf32) <- (1024x3072xf32, 1xi64, 1xi64) - slice_14 = paddle._C_ops.slice( - data_20, [1], full_int_array_9, full_int_array_10, [1], [] - ) - del data_20 - - # pd_op.slice: (1024xf32) <- (3072xf32, 1xi64, 1xi64) - slice_15 = paddle._C_ops.slice( - data_21, [0], full_int_array_9, full_int_array_10, [1], [] - ) - del data_21 - - # pd_op.matmul: (-1x400x1024xf32) <- (-1x400x1024xf32, 1024x1024xf32) - matmul_10 = paddle._C_ops.matmul(layer_norm_3, slice_14, False, False) - del slice_14 - - # pd_op.add: (-1x400x1024xf32) <- (-1x400x1024xf32, 1024xf32) - add_52 = paddle._C_ops.add(matmul_10, slice_15) - del matmul_10, slice_15 - - # pd_op.reshape: (-1x400x4x256xf32) <- (-1x400x1024xf32, 4xi64) - reshape_10 = paddle._C_ops.reshape(add_52, full_int_array_8) - del add_52 - - # pd_op.transpose: (-1x4x400x256xf32) <- (-1x400x4x256xf32) - transpose_7 = paddle._C_ops.transpose(reshape_10, [0, 2, 1, 3]) - del reshape_10 - - # pd_op.matmul: (-1x4x400x400xf32) <- (-1x4x400x256xf32, -1x4x400x256xf32) - matmul_11 = paddle._C_ops.matmul(transpose_5, transpose_6, False, True) - del transpose_5, transpose_6 - - # pd_op.scale: (-1x4x400x400xf32) <- (-1x4x400x400xf32, 1xf32) - scale_1 = paddle._C_ops.scale(matmul_11, full_1, float("0"), True) - del matmul_11 - - # pd_op.softmax: (-1x4x400x400xf32) <- (-1x4x400x400xf32) - softmax_1 = paddle._C_ops.softmax(scale_1, -1) - del scale_1 - - # pd_op.dropout: (-1x4x400x400xf32, -1x4x400x400xui8) <- (-1x4x400x400xf32, None, 1xf32) - dropout_8, dropout_9 = (lambda x, f: f(x))( - paddle._C_ops.dropout( - softmax_1, None, full_2, True, "upscale_in_train", 0, False - ), - lambda out: out if isinstance(out, (list, tuple)) else (out, None), - ) - del softmax_1 - - # pd_op.matmul: (-1x4x400x256xf32) <- (-1x4x400x400xf32, -1x4x400x256xf32) - matmul_12 = paddle._C_ops.matmul(dropout_8, transpose_7, False, False) - del dropout_8, transpose_7 - - # pd_op.transpose: (-1x400x4x256xf32) <- (-1x4x400x256xf32) - transpose_8 = paddle._C_ops.transpose(matmul_12, [0, 2, 1, 3]) - del matmul_12 - - # pd_op.shape64: (4xi64) <- (-1x400x4x256xf32) - shape64_4 = paddle._C_ops.shape64(transpose_8) - - # pd_op.slice: (xi64) <- (4xi64, 1xi64, 1xi64) - slice_16 = paddle._C_ops.slice( - shape64_4, [0], full_int_array_2, full_int_array_3, [1], [0] - ) - del shape64_4 - - # pd_op.reshape: (-1x400x1024xf32) <- (-1x400x4x256xf32, 3xi64) - reshape_11 = paddle._C_ops.reshape(transpose_8, full_int_array_11) - del transpose_8 - - # pd_op.matmul: (-1x400x1024xf32) <- (-1x400x1024xf32, 1024x1024xf32) - matmul_13 = paddle._C_ops.matmul(reshape_11, parameter_409, False, False) - del parameter_409, reshape_11 - - # pd_op.add: (-1x400x1024xf32) <- (-1x400x1024xf32, 1024xf32) - add_53 = paddle._C_ops.add(matmul_13, parameter_408) - del matmul_13, parameter_408 - - # pd_op.dropout: (-1x400x1024xf32, -1x400x1024xui8) <- (-1x400x1024xf32, None, 1xf32) - dropout_10, dropout_11 = (lambda x, f: f(x))( - paddle._C_ops.dropout( - add_53, None, full_2, True, "upscale_in_train", 0, False - ), - lambda out: out if isinstance(out, (list, tuple)) else (out, None), - ) - del add_53 - - # pd_op.add: (-1x400x1024xf32) <- (-1x400x1024xf32, -1x400x1024xf32) - add_54 = paddle._C_ops.add(layer_norm_3, dropout_10) - del dropout_10, layer_norm_3 - - # pd_op.layer_norm: (-1x400x1024xf32, -1x400xf32, -1x400xf32) <- (-1x400x1024xf32, 1024xf32, 1024xf32) - layer_norm_6, layer_norm_7, layer_norm_8 = (lambda x, f: f(x))( - paddle._C_ops.layer_norm( - add_54, parameter_407, parameter_406, float("1e-05"), 2 - ), - lambda out: out if isinstance(out, (list, tuple)) else (out, None, None), - ) - del add_54, parameter_406, parameter_407 - - # pd_op.matmul: (-1x400x2048xf32) <- (-1x400x1024xf32, 1024x2048xf32) - matmul_14 = paddle._C_ops.matmul(layer_norm_6, parameter_405, False, False) - del parameter_405 - - # pd_op.add: (-1x400x2048xf32) <- (-1x400x2048xf32, 2048xf32) - add_55 = paddle._C_ops.add(matmul_14, parameter_404) - del matmul_14, parameter_404 - - # pd_op.gelu: (-1x400x2048xf32) <- (-1x400x2048xf32) - gelu_1 = paddle._C_ops.gelu(add_55, False) - del add_55 - - # pd_op.dropout: (-1x400x2048xf32, -1x400x2048xui8) <- (-1x400x2048xf32, None, 1xf32) - dropout_12, dropout_13 = (lambda x, f: f(x))( - paddle._C_ops.dropout( - gelu_1, None, full_2, True, "upscale_in_train", 0, False - ), - lambda out: out if isinstance(out, (list, tuple)) else (out, None), - ) - del gelu_1 - - # pd_op.matmul: (-1x400x1024xf32) <- (-1x400x2048xf32, 2048x1024xf32) - matmul_15 = paddle._C_ops.matmul(dropout_12, parameter_403, False, False) - del dropout_12, parameter_403 - - # pd_op.add: (-1x400x1024xf32) <- (-1x400x1024xf32, 1024xf32) - add_56 = paddle._C_ops.add(matmul_15, parameter_402) - del matmul_15, parameter_402 - - # pd_op.dropout: (-1x400x1024xf32, -1x400x1024xui8) <- (-1x400x1024xf32, None, 1xf32) - dropout_14, dropout_15 = (lambda x, f: f(x))( - paddle._C_ops.dropout( - add_56, None, full_2, True, "upscale_in_train", 0, False - ), - lambda out: out if isinstance(out, (list, tuple)) else (out, None), - ) - del add_56 - - # pd_op.add: (-1x400x1024xf32) <- (-1x400x1024xf32, -1x400x1024xf32) - add_57 = paddle._C_ops.add(layer_norm_6, dropout_14) - del dropout_14, layer_norm_6 - - # pd_op.layer_norm: (-1x400x1024xf32, -1x400xf32, -1x400xf32) <- (-1x400x1024xf32, 1024xf32, 1024xf32) - layer_norm_9, layer_norm_10, layer_norm_11 = (lambda x, f: f(x))( - paddle._C_ops.layer_norm( - add_57, parameter_401, parameter_400, float("1e-05"), 2 - ), - lambda out: out if isinstance(out, (list, tuple)) else (out, None, None), - ) - del add_57, parameter_400, parameter_401 - - # pd_op.add: (-1x400x1024xf32) <- (-1x400x1024xf32, 1x400x1024xf32) - add_58 = paddle._C_ops.add(layer_norm_9, data_27) - - # pd_op.slice: (1024x1024xf32) <- (1024x3072xf32, 1xi64, 1xi64) - slice_17 = paddle._C_ops.slice( - data_22, [1], full_int_array_2, full_int_array_7, [1], [] - ) - - # pd_op.slice: (1024xf32) <- (3072xf32, 1xi64, 1xi64) - slice_18 = paddle._C_ops.slice( - data_23, [0], full_int_array_2, full_int_array_7, [1], [] - ) - - # pd_op.matmul: (-1x400x1024xf32) <- (-1x400x1024xf32, 1024x1024xf32) - matmul_16 = paddle._C_ops.matmul(add_58, slice_17, False, False) - del slice_17 - - # pd_op.add: (-1x400x1024xf32) <- (-1x400x1024xf32, 1024xf32) - add_59 = paddle._C_ops.add(matmul_16, slice_18) - del matmul_16, slice_18 - - # pd_op.reshape: (-1x400x4x256xf32) <- (-1x400x1024xf32, 4xi64) - reshape_12 = paddle._C_ops.reshape(add_59, full_int_array_8) - del add_59 - - # pd_op.transpose: (-1x4x400x256xf32) <- (-1x400x4x256xf32) - transpose_9 = paddle._C_ops.transpose(reshape_12, [0, 2, 1, 3]) - del reshape_12 - - # pd_op.slice: (1024x1024xf32) <- (1024x3072xf32, 1xi64, 1xi64) - slice_19 = paddle._C_ops.slice( - data_22, [1], full_int_array_7, full_int_array_9, [1], [] - ) - - # pd_op.slice: (1024xf32) <- (3072xf32, 1xi64, 1xi64) - slice_20 = paddle._C_ops.slice( - data_23, [0], full_int_array_7, full_int_array_9, [1], [] - ) - - # pd_op.matmul: (-1x400x1024xf32) <- (-1x400x1024xf32, 1024x1024xf32) - matmul_17 = paddle._C_ops.matmul(add_58, slice_19, False, False) - del add_58, slice_19 - - # pd_op.add: (-1x400x1024xf32) <- (-1x400x1024xf32, 1024xf32) - add_60 = paddle._C_ops.add(matmul_17, slice_20) - del matmul_17, slice_20 - - # pd_op.reshape: (-1x400x4x256xf32) <- (-1x400x1024xf32, 4xi64) - reshape_13 = paddle._C_ops.reshape(add_60, full_int_array_8) - del add_60 - - # pd_op.transpose: (-1x4x400x256xf32) <- (-1x400x4x256xf32) - transpose_10 = paddle._C_ops.transpose(reshape_13, [0, 2, 1, 3]) - del reshape_13 - - # pd_op.slice: (1024x1024xf32) <- (1024x3072xf32, 1xi64, 1xi64) - slice_21 = paddle._C_ops.slice( - data_22, [1], full_int_array_9, full_int_array_10, [1], [] - ) - del data_22 - - # pd_op.slice: (1024xf32) <- (3072xf32, 1xi64, 1xi64) - slice_22 = paddle._C_ops.slice( - data_23, [0], full_int_array_9, full_int_array_10, [1], [] - ) - del data_23 - - # pd_op.matmul: (-1x400x1024xf32) <- (-1x400x1024xf32, 1024x1024xf32) - matmul_18 = paddle._C_ops.matmul(layer_norm_9, slice_21, False, False) - del slice_21 - - # pd_op.add: (-1x400x1024xf32) <- (-1x400x1024xf32, 1024xf32) - add_61 = paddle._C_ops.add(matmul_18, slice_22) - del matmul_18, slice_22 - - # pd_op.reshape: (-1x400x4x256xf32) <- (-1x400x1024xf32, 4xi64) - reshape_14 = paddle._C_ops.reshape(add_61, full_int_array_8) - del add_61 - - # pd_op.transpose: (-1x4x400x256xf32) <- (-1x400x4x256xf32) - transpose_11 = paddle._C_ops.transpose(reshape_14, [0, 2, 1, 3]) - del reshape_14 - - # pd_op.matmul: (-1x4x400x400xf32) <- (-1x4x400x256xf32, -1x4x400x256xf32) - matmul_19 = paddle._C_ops.matmul(transpose_9, transpose_10, False, True) - del transpose_10, transpose_9 - - # pd_op.scale: (-1x4x400x400xf32) <- (-1x4x400x400xf32, 1xf32) - scale_2 = paddle._C_ops.scale(matmul_19, full_1, float("0"), True) - del matmul_19 - - # pd_op.softmax: (-1x4x400x400xf32) <- (-1x4x400x400xf32) - softmax_2 = paddle._C_ops.softmax(scale_2, -1) - del scale_2 - - # pd_op.dropout: (-1x4x400x400xf32, -1x4x400x400xui8) <- (-1x4x400x400xf32, None, 1xf32) - dropout_16, dropout_17 = (lambda x, f: f(x))( - paddle._C_ops.dropout( - softmax_2, None, full_2, True, "upscale_in_train", 0, False - ), - lambda out: out if isinstance(out, (list, tuple)) else (out, None), - ) - del softmax_2 - - # pd_op.matmul: (-1x4x400x256xf32) <- (-1x4x400x400xf32, -1x4x400x256xf32) - matmul_20 = paddle._C_ops.matmul(dropout_16, transpose_11, False, False) - del dropout_16, transpose_11 - - # pd_op.transpose: (-1x400x4x256xf32) <- (-1x4x400x256xf32) - transpose_12 = paddle._C_ops.transpose(matmul_20, [0, 2, 1, 3]) - del matmul_20 - - # pd_op.shape64: (4xi64) <- (-1x400x4x256xf32) - shape64_5 = paddle._C_ops.shape64(transpose_12) - - # pd_op.slice: (xi64) <- (4xi64, 1xi64, 1xi64) - slice_23 = paddle._C_ops.slice( - shape64_5, [0], full_int_array_2, full_int_array_3, [1], [0] - ) - del shape64_5 - - # pd_op.reshape: (-1x400x1024xf32) <- (-1x400x4x256xf32, 3xi64) - reshape_15 = paddle._C_ops.reshape(transpose_12, full_int_array_11) - del transpose_12 - - # pd_op.matmul: (-1x400x1024xf32) <- (-1x400x1024xf32, 1024x1024xf32) - matmul_21 = paddle._C_ops.matmul(reshape_15, parameter_399, False, False) - del parameter_399, reshape_15 - - # pd_op.add: (-1x400x1024xf32) <- (-1x400x1024xf32, 1024xf32) - add_62 = paddle._C_ops.add(matmul_21, parameter_398) - del matmul_21, parameter_398 - - # pd_op.dropout: (-1x400x1024xf32, -1x400x1024xui8) <- (-1x400x1024xf32, None, 1xf32) - dropout_18, dropout_19 = (lambda x, f: f(x))( - paddle._C_ops.dropout( - add_62, None, full_2, True, "upscale_in_train", 0, False - ), - lambda out: out if isinstance(out, (list, tuple)) else (out, None), - ) - del add_62 - - # pd_op.add: (-1x400x1024xf32) <- (-1x400x1024xf32, -1x400x1024xf32) - add_63 = paddle._C_ops.add(layer_norm_9, dropout_18) - del dropout_18, layer_norm_9 - - # pd_op.layer_norm: (-1x400x1024xf32, -1x400xf32, -1x400xf32) <- (-1x400x1024xf32, 1024xf32, 1024xf32) - layer_norm_12, layer_norm_13, layer_norm_14 = (lambda x, f: f(x))( - paddle._C_ops.layer_norm( - add_63, parameter_397, parameter_396, float("1e-05"), 2 - ), - lambda out: out if isinstance(out, (list, tuple)) else (out, None, None), - ) - del add_63, parameter_396, parameter_397 - - # pd_op.matmul: (-1x400x2048xf32) <- (-1x400x1024xf32, 1024x2048xf32) - matmul_22 = paddle._C_ops.matmul(layer_norm_12, parameter_395, False, False) - del parameter_395 - - # pd_op.add: (-1x400x2048xf32) <- (-1x400x2048xf32, 2048xf32) - add_64 = paddle._C_ops.add(matmul_22, parameter_394) - del matmul_22, parameter_394 - - # pd_op.gelu: (-1x400x2048xf32) <- (-1x400x2048xf32) - gelu_2 = paddle._C_ops.gelu(add_64, False) - del add_64 - - # pd_op.dropout: (-1x400x2048xf32, -1x400x2048xui8) <- (-1x400x2048xf32, None, 1xf32) - dropout_20, dropout_21 = (lambda x, f: f(x))( - paddle._C_ops.dropout( - gelu_2, None, full_2, True, "upscale_in_train", 0, False - ), - lambda out: out if isinstance(out, (list, tuple)) else (out, None), - ) - del gelu_2 - - # pd_op.matmul: (-1x400x1024xf32) <- (-1x400x2048xf32, 2048x1024xf32) - matmul_23 = paddle._C_ops.matmul(dropout_20, parameter_393, False, False) - del dropout_20, parameter_393 - - # pd_op.add: (-1x400x1024xf32) <- (-1x400x1024xf32, 1024xf32) - add_65 = paddle._C_ops.add(matmul_23, parameter_392) - del matmul_23, parameter_392 - - # pd_op.dropout: (-1x400x1024xf32, -1x400x1024xui8) <- (-1x400x1024xf32, None, 1xf32) - dropout_22, dropout_23 = (lambda x, f: f(x))( - paddle._C_ops.dropout( - add_65, None, full_2, True, "upscale_in_train", 0, False - ), - lambda out: out if isinstance(out, (list, tuple)) else (out, None), - ) - del add_65 - - # pd_op.add: (-1x400x1024xf32) <- (-1x400x1024xf32, -1x400x1024xf32) - add_66 = paddle._C_ops.add(layer_norm_12, dropout_22) - del dropout_22, layer_norm_12 - - # pd_op.layer_norm: (-1x400x1024xf32, -1x400xf32, -1x400xf32) <- (-1x400x1024xf32, 1024xf32, 1024xf32) - layer_norm_15, layer_norm_16, layer_norm_17 = (lambda x, f: f(x))( - paddle._C_ops.layer_norm( - add_66, parameter_391, parameter_390, float("1e-05"), 2 - ), - lambda out: out if isinstance(out, (list, tuple)) else (out, None, None), - ) - del add_66, parameter_390, parameter_391 - - # pd_op.add: (-1x400x1024xf32) <- (-1x400x1024xf32, 1x400x1024xf32) - add_67 = paddle._C_ops.add(layer_norm_15, data_27) - del data_27 - - # pd_op.slice: (1024x1024xf32) <- (1024x3072xf32, 1xi64, 1xi64) - slice_24 = paddle._C_ops.slice( - data_24, [1], full_int_array_2, full_int_array_7, [1], [] - ) - - # pd_op.slice: (1024xf32) <- (3072xf32, 1xi64, 1xi64) - slice_25 = paddle._C_ops.slice( - data_25, [0], full_int_array_2, full_int_array_7, [1], [] - ) - - # pd_op.matmul: (-1x400x1024xf32) <- (-1x400x1024xf32, 1024x1024xf32) - matmul_24 = paddle._C_ops.matmul(add_67, slice_24, False, False) - del slice_24 - - # pd_op.add: (-1x400x1024xf32) <- (-1x400x1024xf32, 1024xf32) - add_68 = paddle._C_ops.add(matmul_24, slice_25) - del matmul_24, slice_25 - - # pd_op.reshape: (-1x400x4x256xf32) <- (-1x400x1024xf32, 4xi64) - reshape_16 = paddle._C_ops.reshape(add_68, full_int_array_8) - del add_68 - - # pd_op.transpose: (-1x4x400x256xf32) <- (-1x400x4x256xf32) - transpose_13 = paddle._C_ops.transpose(reshape_16, [0, 2, 1, 3]) - del reshape_16 - - # pd_op.slice: (1024x1024xf32) <- (1024x3072xf32, 1xi64, 1xi64) - slice_26 = paddle._C_ops.slice( - data_24, [1], full_int_array_7, full_int_array_9, [1], [] - ) - - # pd_op.slice: (1024xf32) <- (3072xf32, 1xi64, 1xi64) - slice_27 = paddle._C_ops.slice( - data_25, [0], full_int_array_7, full_int_array_9, [1], [] - ) - del full_int_array_7 - - # pd_op.matmul: (-1x400x1024xf32) <- (-1x400x1024xf32, 1024x1024xf32) - matmul_25 = paddle._C_ops.matmul(add_67, slice_26, False, False) - del add_67, slice_26 - - # pd_op.add: (-1x400x1024xf32) <- (-1x400x1024xf32, 1024xf32) - add_69 = paddle._C_ops.add(matmul_25, slice_27) - del matmul_25, slice_27 - - # pd_op.reshape: (-1x400x4x256xf32) <- (-1x400x1024xf32, 4xi64) - reshape_17 = paddle._C_ops.reshape(add_69, full_int_array_8) - del add_69 - - # pd_op.transpose: (-1x4x400x256xf32) <- (-1x400x4x256xf32) - transpose_14 = paddle._C_ops.transpose(reshape_17, [0, 2, 1, 3]) - del reshape_17 - - # pd_op.slice: (1024x1024xf32) <- (1024x3072xf32, 1xi64, 1xi64) - slice_28 = paddle._C_ops.slice( - data_24, [1], full_int_array_9, full_int_array_10, [1], [] - ) - del data_24 - - # pd_op.slice: (1024xf32) <- (3072xf32, 1xi64, 1xi64) - slice_29 = paddle._C_ops.slice( - data_25, [0], full_int_array_9, full_int_array_10, [1], [] - ) - del data_25, full_int_array_10, full_int_array_9 - - # pd_op.matmul: (-1x400x1024xf32) <- (-1x400x1024xf32, 1024x1024xf32) - matmul_26 = paddle._C_ops.matmul(layer_norm_15, slice_28, False, False) - del slice_28 - - # pd_op.add: (-1x400x1024xf32) <- (-1x400x1024xf32, 1024xf32) - add_70 = paddle._C_ops.add(matmul_26, slice_29) - del matmul_26, slice_29 - - # pd_op.reshape: (-1x400x4x256xf32) <- (-1x400x1024xf32, 4xi64) - reshape_18 = paddle._C_ops.reshape(add_70, full_int_array_8) - del add_70, full_int_array_8 - - # pd_op.transpose: (-1x4x400x256xf32) <- (-1x400x4x256xf32) - transpose_15 = paddle._C_ops.transpose(reshape_18, [0, 2, 1, 3]) - del reshape_18 - - # pd_op.matmul: (-1x4x400x400xf32) <- (-1x4x400x256xf32, -1x4x400x256xf32) - matmul_27 = paddle._C_ops.matmul(transpose_13, transpose_14, False, True) - del transpose_13, transpose_14 - - # pd_op.scale: (-1x4x400x400xf32) <- (-1x4x400x400xf32, 1xf32) - scale_3 = paddle._C_ops.scale(matmul_27, full_1, float("0"), True) - del full_1, matmul_27 - - # pd_op.softmax: (-1x4x400x400xf32) <- (-1x4x400x400xf32) - softmax_3 = paddle._C_ops.softmax(scale_3, -1) - del scale_3 - - # pd_op.dropout: (-1x4x400x400xf32, -1x4x400x400xui8) <- (-1x4x400x400xf32, None, 1xf32) - dropout_24, dropout_25 = (lambda x, f: f(x))( - paddle._C_ops.dropout( - softmax_3, None, full_2, True, "upscale_in_train", 0, False - ), - lambda out: out if isinstance(out, (list, tuple)) else (out, None), - ) - del softmax_3 - - # pd_op.matmul: (-1x4x400x256xf32) <- (-1x4x400x400xf32, -1x4x400x256xf32) - matmul_28 = paddle._C_ops.matmul(dropout_24, transpose_15, False, False) - del dropout_24, transpose_15 - - # pd_op.transpose: (-1x400x4x256xf32) <- (-1x4x400x256xf32) - transpose_16 = paddle._C_ops.transpose(matmul_28, [0, 2, 1, 3]) - del matmul_28 - - # pd_op.shape64: (4xi64) <- (-1x400x4x256xf32) - shape64_6 = paddle._C_ops.shape64(transpose_16) - - # pd_op.slice: (xi64) <- (4xi64, 1xi64, 1xi64) - slice_30 = paddle._C_ops.slice( - shape64_6, [0], full_int_array_2, full_int_array_3, [1], [0] - ) - del shape64_6 - - # pd_op.reshape: (-1x400x1024xf32) <- (-1x400x4x256xf32, 3xi64) - reshape_19 = paddle._C_ops.reshape(transpose_16, full_int_array_11) - del full_int_array_11, transpose_16 - - # pd_op.matmul: (-1x400x1024xf32) <- (-1x400x1024xf32, 1024x1024xf32) - matmul_29 = paddle._C_ops.matmul(reshape_19, parameter_389, False, False) - del parameter_389, reshape_19 - - # pd_op.add: (-1x400x1024xf32) <- (-1x400x1024xf32, 1024xf32) - add_71 = paddle._C_ops.add(matmul_29, parameter_388) - del matmul_29, parameter_388 - - # pd_op.dropout: (-1x400x1024xf32, -1x400x1024xui8) <- (-1x400x1024xf32, None, 1xf32) - dropout_26, dropout_27 = (lambda x, f: f(x))( - paddle._C_ops.dropout( - add_71, None, full_2, True, "upscale_in_train", 0, False - ), - lambda out: out if isinstance(out, (list, tuple)) else (out, None), - ) - del add_71 - - # pd_op.add: (-1x400x1024xf32) <- (-1x400x1024xf32, -1x400x1024xf32) - add_72 = paddle._C_ops.add(layer_norm_15, dropout_26) - del dropout_26, layer_norm_15 - - # pd_op.layer_norm: (-1x400x1024xf32, -1x400xf32, -1x400xf32) <- (-1x400x1024xf32, 1024xf32, 1024xf32) - layer_norm_18, layer_norm_19, layer_norm_20 = (lambda x, f: f(x))( - paddle._C_ops.layer_norm( - add_72, parameter_387, parameter_386, float("1e-05"), 2 - ), - lambda out: out if isinstance(out, (list, tuple)) else (out, None, None), - ) - del add_72, parameter_386, parameter_387 - - # pd_op.matmul: (-1x400x2048xf32) <- (-1x400x1024xf32, 1024x2048xf32) - matmul_30 = paddle._C_ops.matmul(layer_norm_18, parameter_385, False, False) - del parameter_385 - - # pd_op.add: (-1x400x2048xf32) <- (-1x400x2048xf32, 2048xf32) - add_73 = paddle._C_ops.add(matmul_30, parameter_384) - del matmul_30, parameter_384 - - # pd_op.gelu: (-1x400x2048xf32) <- (-1x400x2048xf32) - gelu_3 = paddle._C_ops.gelu(add_73, False) - del add_73 - - # pd_op.dropout: (-1x400x2048xf32, -1x400x2048xui8) <- (-1x400x2048xf32, None, 1xf32) - dropout_28, dropout_29 = (lambda x, f: f(x))( - paddle._C_ops.dropout( - gelu_3, None, full_2, True, "upscale_in_train", 0, False - ), - lambda out: out if isinstance(out, (list, tuple)) else (out, None), - ) - del gelu_3 - - # pd_op.matmul: (-1x400x1024xf32) <- (-1x400x2048xf32, 2048x1024xf32) - matmul_31 = paddle._C_ops.matmul(dropout_28, parameter_383, False, False) - del dropout_28, parameter_383 - - # pd_op.add: (-1x400x1024xf32) <- (-1x400x1024xf32, 1024xf32) - add_74 = paddle._C_ops.add(matmul_31, parameter_382) - del matmul_31, parameter_382 - - # pd_op.dropout: (-1x400x1024xf32, -1x400x1024xui8) <- (-1x400x1024xf32, None, 1xf32) - dropout_30, dropout_31 = (lambda x, f: f(x))( - paddle._C_ops.dropout( - add_74, None, full_2, True, "upscale_in_train", 0, False - ), - lambda out: out if isinstance(out, (list, tuple)) else (out, None), - ) - del add_74, full_2 - - # pd_op.add: (-1x400x1024xf32) <- (-1x400x1024xf32, -1x400x1024xf32) - add_75 = paddle._C_ops.add(layer_norm_18, dropout_30) - del dropout_30, layer_norm_18 - - # pd_op.layer_norm: (-1x400x1024xf32, -1x400xf32, -1x400xf32) <- (-1x400x1024xf32, 1024xf32, 1024xf32) - layer_norm_21, layer_norm_22, layer_norm_23 = (lambda x, f: f(x))( - paddle._C_ops.layer_norm( - add_75, parameter_381, parameter_380, float("1e-05"), 2 - ), - lambda out: out if isinstance(out, (list, tuple)) else (out, None, None), - ) - del add_75, parameter_380, parameter_381 - - # pd_op.transpose: (-1x1024x400xf32) <- (-1x400x1024xf32) - transpose_17 = paddle._C_ops.transpose(layer_norm_21, [0, 2, 1]) - del layer_norm_21 - - # pd_op.full: (xi64) <- () - full_3 = paddle._C_ops.full( - [], float("1024"), paddle.int64, paddle.core.CPUPlace() - ) - - # builtin.combine: ([xi64, xi64, xi64, xi64]) <- (xi64, xi64, xi64, xi64) - combine_4 = [slice_0, full_3, slice_1, slice_2] - del full_3, slice_0, slice_1, slice_2 - - # pd_op.stack: (4xi64) <- ([xi64, xi64, xi64, xi64]) - stack_0 = paddle._C_ops.stack(combine_4, 0) - del combine_4 - - # pd_op.reshape: (-1x1024x-1x-1xf32) <- (-1x1024x400xf32, 4xi64) - reshape_20 = paddle._C_ops.reshape(transpose_17, stack_0) - del stack_0, transpose_17 - - # pd_op.conv2d: (-1x384x-1x-1xf32) <- (-1x1024x-1x-1xf32, 384x1024x1x1xf32) - conv2d_77 = paddle._C_ops.conv2d( - reshape_20, parameter_379, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_379 - - # pd_op.batch_norm_: (-1x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (-1x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) - ( - batch_norm__438, - batch_norm__439, - batch_norm__440, - batch_norm__441, - batch_norm__442, - batch_norm__443, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_77, - parameter_378, - parameter_377, - parameter_376, - parameter_375, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_77, parameter_375, parameter_376, parameter_377, parameter_378 - - # pd_op.swish: (-1x384x-1x-1xf32) <- (-1x384x-1x-1xf32) - swish_55 = paddle._C_ops.swish(batch_norm__438) - del batch_norm__438 - - # pd_op.conv2d: (-1x384x-1x-1xf32) <- (-1x1024x-1x-1xf32, 384x1024x1x1xf32) - conv2d_78 = paddle._C_ops.conv2d( - reshape_20, parameter_374, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_374, reshape_20 - - # pd_op.batch_norm_: (-1x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (-1x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) - ( - batch_norm__444, - batch_norm__445, - batch_norm__446, - batch_norm__447, - batch_norm__448, - batch_norm__449, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_78, - parameter_373, - parameter_372, - parameter_371, - parameter_370, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_78, parameter_370, parameter_371, parameter_372, parameter_373 - - # pd_op.swish: (-1x384x-1x-1xf32) <- (-1x384x-1x-1xf32) - swish_56 = paddle._C_ops.swish(batch_norm__444) - del batch_norm__444 - - # pd_op.conv2d: (-1x384x-1x-1xf32) <- (-1x384x-1x-1xf32, 384x384x3x3xf32) - conv2d_79 = paddle._C_ops.conv2d( - swish_56, parameter_369, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_369, swish_56 - - # pd_op.batch_norm_: (-1x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (-1x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) - ( - batch_norm__450, - batch_norm__451, - batch_norm__452, - batch_norm__453, - batch_norm__454, - batch_norm__455, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_79, - parameter_368, - parameter_367, - parameter_366, - parameter_365, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_79, parameter_365, parameter_366, parameter_367, parameter_368 - - # pd_op.swish: (-1x384x-1x-1xf32) <- (-1x384x-1x-1xf32) - swish_57 = paddle._C_ops.swish(batch_norm__450) - del batch_norm__450 - - # pd_op.conv2d: (-1x384x-1x-1xf32) <- (-1x384x-1x-1xf32, 384x384x3x3xf32) - conv2d_80 = paddle._C_ops.conv2d( - swish_57, parameter_364, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_364 - - # pd_op.batch_norm_: (-1x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (-1x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) - ( - batch_norm__456, - batch_norm__457, - batch_norm__458, - batch_norm__459, - batch_norm__460, - batch_norm__461, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_80, - parameter_363, - parameter_362, - parameter_361, - parameter_360, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_80, parameter_360, parameter_361, parameter_362, parameter_363 - - # pd_op.conv2d: (-1x384x-1x-1xf32) <- (-1x384x-1x-1xf32, 384x384x1x1xf32) - conv2d_81 = paddle._C_ops.conv2d( - swish_57, parameter_359, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_359, swish_57 - - # pd_op.batch_norm_: (-1x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (-1x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) - ( - batch_norm__462, - batch_norm__463, - batch_norm__464, - batch_norm__465, - batch_norm__466, - batch_norm__467, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_81, - parameter_358, - parameter_357, - parameter_356, - parameter_355, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_81, parameter_355, parameter_356, parameter_357, parameter_358 - - # pd_op.add: (-1x384x-1x-1xf32) <- (-1x384x-1x-1xf32, -1x384x-1x-1xf32) - add_76 = paddle._C_ops.add(batch_norm__456, batch_norm__462) - del batch_norm__456, batch_norm__462 - - # pd_op.swish: (-1x384x-1x-1xf32) <- (-1x384x-1x-1xf32) - swish_58 = paddle._C_ops.swish(add_76) - del add_76 - - # pd_op.conv2d: (-1x384x-1x-1xf32) <- (-1x384x-1x-1xf32, 384x384x3x3xf32) - conv2d_82 = paddle._C_ops.conv2d( - swish_58, parameter_354, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_354, swish_58 - - # pd_op.batch_norm_: (-1x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (-1x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) - ( - batch_norm__468, - batch_norm__469, - batch_norm__470, - batch_norm__471, - batch_norm__472, - batch_norm__473, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_82, - parameter_353, - parameter_352, - parameter_351, - parameter_350, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_82, parameter_350, parameter_351, parameter_352, parameter_353 - - # pd_op.swish: (-1x384x-1x-1xf32) <- (-1x384x-1x-1xf32) - swish_59 = paddle._C_ops.swish(batch_norm__468) - del batch_norm__468 - - # pd_op.conv2d: (-1x384x-1x-1xf32) <- (-1x384x-1x-1xf32, 384x384x3x3xf32) - conv2d_83 = paddle._C_ops.conv2d( - swish_59, parameter_349, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_349 - - # pd_op.batch_norm_: (-1x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (-1x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) - ( - batch_norm__474, - batch_norm__475, - batch_norm__476, - batch_norm__477, - batch_norm__478, - batch_norm__479, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_83, - parameter_348, - parameter_347, - parameter_346, - parameter_345, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_83, parameter_345, parameter_346, parameter_347, parameter_348 - - # pd_op.conv2d: (-1x384x-1x-1xf32) <- (-1x384x-1x-1xf32, 384x384x1x1xf32) - conv2d_84 = paddle._C_ops.conv2d( - swish_59, parameter_344, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_344, swish_59 - - # pd_op.batch_norm_: (-1x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (-1x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) - ( - batch_norm__480, - batch_norm__481, - batch_norm__482, - batch_norm__483, - batch_norm__484, - batch_norm__485, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_84, - parameter_343, - parameter_342, - parameter_341, - parameter_340, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_84, parameter_340, parameter_341, parameter_342, parameter_343 - - # pd_op.add: (-1x384x-1x-1xf32) <- (-1x384x-1x-1xf32, -1x384x-1x-1xf32) - add_77 = paddle._C_ops.add(batch_norm__474, batch_norm__480) - del batch_norm__474, batch_norm__480 - - # pd_op.swish: (-1x384x-1x-1xf32) <- (-1x384x-1x-1xf32) - swish_60 = paddle._C_ops.swish(add_77) - del add_77 - - # pd_op.full_int_array: (2xi64) <- () - full_int_array_12 = [5, 5] - - # pd_op.pool2d: (-1x384x-1x-1xf32) <- (-1x384x-1x-1xf32, 2xi64) - pool2d_0 = paddle._C_ops.pool2d( - swish_60, - full_int_array_12, - [1, 1], - [2, 2], - False, - True, - "NCHW", - "max", - False, - False, - "EXPLICIT", - ) - del full_int_array_12 - - # pd_op.full_int_array: (2xi64) <- () - full_int_array_13 = [9, 9] - - # pd_op.pool2d: (-1x384x-1x-1xf32) <- (-1x384x-1x-1xf32, 2xi64) - pool2d_1 = paddle._C_ops.pool2d( - swish_60, - full_int_array_13, - [1, 1], - [4, 4], - False, - True, - "NCHW", - "max", - False, - False, - "EXPLICIT", - ) - del full_int_array_13 - - # pd_op.full_int_array: (2xi64) <- () - full_int_array_14 = [13, 13] - - # pd_op.pool2d: (-1x384x-1x-1xf32) <- (-1x384x-1x-1xf32, 2xi64) - pool2d_2 = paddle._C_ops.pool2d( - swish_60, - full_int_array_14, - [1, 1], - [6, 6], - False, - True, - "NCHW", - "max", - False, - False, - "EXPLICIT", - ) - del full_int_array_14 - - # builtin.combine: ([-1x384x-1x-1xf32, -1x384x-1x-1xf32, -1x384x-1x-1xf32, -1x384x-1x-1xf32]) <- (-1x384x-1x-1xf32, -1x384x-1x-1xf32, -1x384x-1x-1xf32, -1x384x-1x-1xf32) - combine_5 = [swish_60, pool2d_0, pool2d_1, pool2d_2] - del pool2d_0, pool2d_1, pool2d_2, swish_60 - - # pd_op.concat: (-1x1536x-1x-1xf32) <- ([-1x384x-1x-1xf32, -1x384x-1x-1xf32, -1x384x-1x-1xf32, -1x384x-1x-1xf32], 1xi32) - concat_6 = paddle._C_ops.concat(combine_5, full_0) - del combine_5 - - # pd_op.conv2d: (-1x384x-1x-1xf32) <- (-1x1536x-1x-1xf32, 384x1536x1x1xf32) - conv2d_85 = paddle._C_ops.conv2d( - concat_6, parameter_339, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del concat_6, parameter_339 - - # pd_op.batch_norm_: (-1x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (-1x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) - ( - batch_norm__486, - batch_norm__487, - batch_norm__488, - batch_norm__489, - batch_norm__490, - batch_norm__491, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_85, - parameter_338, - parameter_337, - parameter_336, - parameter_335, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_85, parameter_335, parameter_336, parameter_337, parameter_338 - - # pd_op.swish: (-1x384x-1x-1xf32) <- (-1x384x-1x-1xf32) - swish_61 = paddle._C_ops.swish(batch_norm__486) - del batch_norm__486 - - # pd_op.conv2d: (-1x384x-1x-1xf32) <- (-1x384x-1x-1xf32, 384x384x3x3xf32) - conv2d_86 = paddle._C_ops.conv2d( - swish_61, parameter_334, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_334, swish_61 - - # pd_op.batch_norm_: (-1x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (-1x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) - ( - batch_norm__492, - batch_norm__493, - batch_norm__494, - batch_norm__495, - batch_norm__496, - batch_norm__497, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_86, - parameter_333, - parameter_332, - parameter_331, - parameter_330, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_86, parameter_330, parameter_331, parameter_332, parameter_333 - - # pd_op.swish: (-1x384x-1x-1xf32) <- (-1x384x-1x-1xf32) - swish_62 = paddle._C_ops.swish(batch_norm__492) - del batch_norm__492 - - # pd_op.conv2d: (-1x384x-1x-1xf32) <- (-1x384x-1x-1xf32, 384x384x3x3xf32) - conv2d_87 = paddle._C_ops.conv2d( - swish_62, parameter_329, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_329 - - # pd_op.batch_norm_: (-1x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (-1x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) - ( - batch_norm__498, - batch_norm__499, - batch_norm__500, - batch_norm__501, - batch_norm__502, - batch_norm__503, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_87, - parameter_328, - parameter_327, - parameter_326, - parameter_325, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_87, parameter_325, parameter_326, parameter_327, parameter_328 - - # pd_op.conv2d: (-1x384x-1x-1xf32) <- (-1x384x-1x-1xf32, 384x384x1x1xf32) - conv2d_88 = paddle._C_ops.conv2d( - swish_62, parameter_324, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_324, swish_62 - - # pd_op.batch_norm_: (-1x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (-1x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) - ( - batch_norm__504, - batch_norm__505, - batch_norm__506, - batch_norm__507, - batch_norm__508, - batch_norm__509, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_88, - parameter_323, - parameter_322, - parameter_321, - parameter_320, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_88, parameter_320, parameter_321, parameter_322, parameter_323 - - # pd_op.add: (-1x384x-1x-1xf32) <- (-1x384x-1x-1xf32, -1x384x-1x-1xf32) - add_78 = paddle._C_ops.add(batch_norm__498, batch_norm__504) - del batch_norm__498, batch_norm__504 - - # pd_op.swish: (-1x384x-1x-1xf32) <- (-1x384x-1x-1xf32) - swish_63 = paddle._C_ops.swish(add_78) - del add_78 - - # builtin.combine: ([-1x384x-1x-1xf32, -1x384x-1x-1xf32]) <- (-1x384x-1x-1xf32, -1x384x-1x-1xf32) - combine_6 = [swish_55, swish_63] - del swish_55, swish_63 - - # pd_op.concat: (-1x768x-1x-1xf32) <- ([-1x384x-1x-1xf32, -1x384x-1x-1xf32], 1xi32) - concat_7 = paddle._C_ops.concat(combine_6, full_0) - del combine_6 - - # pd_op.conv2d: (-1x768x-1x-1xf32) <- (-1x768x-1x-1xf32, 768x768x1x1xf32) - conv2d_89 = paddle._C_ops.conv2d( - concat_7, parameter_319, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del concat_7, parameter_319 - - # pd_op.batch_norm_: (-1x768x-1x-1xf32, 768xf32, 768xf32, 768xf32, 768xf32, -1xui8) <- (-1x768x-1x-1xf32, 768xf32, 768xf32, 768xf32, 768xf32) - ( - batch_norm__510, - batch_norm__511, - batch_norm__512, - batch_norm__513, - batch_norm__514, - batch_norm__515, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_89, - parameter_318, - parameter_317, - parameter_316, - parameter_315, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_89, parameter_315, parameter_316, parameter_317, parameter_318 - - # pd_op.swish: (-1x768x-1x-1xf32) <- (-1x768x-1x-1xf32) - swish_64 = paddle._C_ops.swish(batch_norm__510) - del batch_norm__510 - - # pd_op.conv2d: (-1x384x-1x-1xf32) <- (-1x768x-1x-1xf32, 384x768x1x1xf32) - conv2d_90 = paddle._C_ops.conv2d( - swish_64, parameter_314, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_314 - - # pd_op.batch_norm_: (-1x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (-1x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) - ( - batch_norm__516, - batch_norm__517, - batch_norm__518, - batch_norm__519, - batch_norm__520, - batch_norm__521, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_90, - parameter_313, - parameter_312, - parameter_311, - parameter_310, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_90, parameter_310, parameter_311, parameter_312, parameter_313 - - # pd_op.swish: (-1x384x-1x-1xf32) <- (-1x384x-1x-1xf32) - swish_65 = paddle._C_ops.swish(batch_norm__516) - del batch_norm__516 - - # pd_op.nearest_interp: (-1x384x-1x-1xf32) <- (-1x384x-1x-1xf32, None, None, None) - nearest_interp_0 = paddle._C_ops.nearest_interp( - swish_65, - None, - None, - None, - "NCHW", - -1, - -1, - -1, - [float("2"), float("2")], - "nearest", - False, - 0, - ) - del swish_65 - - # builtin.combine: ([-1x384x-1x-1xf32, -1x512x-1x-1xf32]) <- (-1x384x-1x-1xf32, -1x512x-1x-1xf32) - combine_7 = [nearest_interp_0, swish_44] - del nearest_interp_0, swish_44 - - # pd_op.concat: (-1x896x-1x-1xf32) <- ([-1x384x-1x-1xf32, -1x512x-1x-1xf32], 1xi32) - concat_8 = paddle._C_ops.concat(combine_7, full_0) - del combine_7 - - # pd_op.conv2d: (-1x192x-1x-1xf32) <- (-1x896x-1x-1xf32, 192x896x1x1xf32) - conv2d_91 = paddle._C_ops.conv2d( - concat_8, parameter_309, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_309 - - # pd_op.batch_norm_: (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) - ( - batch_norm__522, - batch_norm__523, - batch_norm__524, - batch_norm__525, - batch_norm__526, - batch_norm__527, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_91, - parameter_308, - parameter_307, - parameter_306, - parameter_305, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_91, parameter_305, parameter_306, parameter_307, parameter_308 - - # pd_op.swish: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32) - swish_66 = paddle._C_ops.swish(batch_norm__522) - del batch_norm__522 - - # pd_op.conv2d: (-1x192x-1x-1xf32) <- (-1x896x-1x-1xf32, 192x896x1x1xf32) - conv2d_92 = paddle._C_ops.conv2d( - concat_8, parameter_304, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del concat_8, parameter_304 - - # pd_op.batch_norm_: (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) - ( - batch_norm__528, - batch_norm__529, - batch_norm__530, - batch_norm__531, - batch_norm__532, - batch_norm__533, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_92, - parameter_303, - parameter_302, - parameter_301, - parameter_300, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_92, parameter_300, parameter_301, parameter_302, parameter_303 - - # pd_op.swish: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32) - swish_67 = paddle._C_ops.swish(batch_norm__528) - del batch_norm__528 - - # pd_op.conv2d: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32, 192x192x3x3xf32) - conv2d_93 = paddle._C_ops.conv2d( - swish_67, parameter_299, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_299, swish_67 - - # pd_op.batch_norm_: (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) - ( - batch_norm__534, - batch_norm__535, - batch_norm__536, - batch_norm__537, - batch_norm__538, - batch_norm__539, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_93, - parameter_298, - parameter_297, - parameter_296, - parameter_295, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_93, parameter_295, parameter_296, parameter_297, parameter_298 - - # pd_op.swish: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32) - swish_68 = paddle._C_ops.swish(batch_norm__534) - del batch_norm__534 - - # pd_op.conv2d: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32, 192x192x3x3xf32) - conv2d_94 = paddle._C_ops.conv2d( - swish_68, parameter_294, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_294 - - # pd_op.batch_norm_: (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) - ( - batch_norm__540, - batch_norm__541, - batch_norm__542, - batch_norm__543, - batch_norm__544, - batch_norm__545, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_94, - parameter_293, - parameter_292, - parameter_291, - parameter_290, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_94, parameter_290, parameter_291, parameter_292, parameter_293 - - # pd_op.conv2d: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32, 192x192x1x1xf32) - conv2d_95 = paddle._C_ops.conv2d( - swish_68, parameter_289, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_289, swish_68 - - # pd_op.batch_norm_: (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) - ( - batch_norm__546, - batch_norm__547, - batch_norm__548, - batch_norm__549, - batch_norm__550, - batch_norm__551, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_95, - parameter_288, - parameter_287, - parameter_286, - parameter_285, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_95, parameter_285, parameter_286, parameter_287, parameter_288 - - # pd_op.add: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32, -1x192x-1x-1xf32) - add_79 = paddle._C_ops.add(batch_norm__540, batch_norm__546) - del batch_norm__540, batch_norm__546 - - # pd_op.swish: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32) - swish_69 = paddle._C_ops.swish(add_79) - del add_79 - - # pd_op.conv2d: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32, 192x192x3x3xf32) - conv2d_96 = paddle._C_ops.conv2d( - swish_69, parameter_284, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_284, swish_69 - - # pd_op.batch_norm_: (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) - ( - batch_norm__552, - batch_norm__553, - batch_norm__554, - batch_norm__555, - batch_norm__556, - batch_norm__557, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_96, - parameter_283, - parameter_282, - parameter_281, - parameter_280, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_96, parameter_280, parameter_281, parameter_282, parameter_283 - - # pd_op.swish: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32) - swish_70 = paddle._C_ops.swish(batch_norm__552) - del batch_norm__552 - - # pd_op.conv2d: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32, 192x192x3x3xf32) - conv2d_97 = paddle._C_ops.conv2d( - swish_70, parameter_279, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_279 - - # pd_op.batch_norm_: (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) - ( - batch_norm__558, - batch_norm__559, - batch_norm__560, - batch_norm__561, - batch_norm__562, - batch_norm__563, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_97, - parameter_278, - parameter_277, - parameter_276, - parameter_275, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_97, parameter_275, parameter_276, parameter_277, parameter_278 - - # pd_op.conv2d: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32, 192x192x1x1xf32) - conv2d_98 = paddle._C_ops.conv2d( - swish_70, parameter_274, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_274, swish_70 - - # pd_op.batch_norm_: (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) - ( - batch_norm__564, - batch_norm__565, - batch_norm__566, - batch_norm__567, - batch_norm__568, - batch_norm__569, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_98, - parameter_273, - parameter_272, - parameter_271, - parameter_270, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_98, parameter_270, parameter_271, parameter_272, parameter_273 - - # pd_op.add: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32, -1x192x-1x-1xf32) - add_80 = paddle._C_ops.add(batch_norm__558, batch_norm__564) - del batch_norm__558, batch_norm__564 - - # pd_op.swish: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32) - swish_71 = paddle._C_ops.swish(add_80) - del add_80 - - # pd_op.conv2d: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32, 192x192x3x3xf32) - conv2d_99 = paddle._C_ops.conv2d( - swish_71, parameter_269, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_269, swish_71 - - # pd_op.batch_norm_: (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) - ( - batch_norm__570, - batch_norm__571, - batch_norm__572, - batch_norm__573, - batch_norm__574, - batch_norm__575, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_99, - parameter_268, - parameter_267, - parameter_266, - parameter_265, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_99, parameter_265, parameter_266, parameter_267, parameter_268 - - # pd_op.swish: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32) - swish_72 = paddle._C_ops.swish(batch_norm__570) - del batch_norm__570 - - # pd_op.conv2d: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32, 192x192x3x3xf32) - conv2d_100 = paddle._C_ops.conv2d( - swish_72, parameter_264, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_264 - - # pd_op.batch_norm_: (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) - ( - batch_norm__576, - batch_norm__577, - batch_norm__578, - batch_norm__579, - batch_norm__580, - batch_norm__581, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_100, - parameter_263, - parameter_262, - parameter_261, - parameter_260, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_100, parameter_260, parameter_261, parameter_262, parameter_263 - - # pd_op.conv2d: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32, 192x192x1x1xf32) - conv2d_101 = paddle._C_ops.conv2d( - swish_72, parameter_259, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_259, swish_72 - - # pd_op.batch_norm_: (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) - ( - batch_norm__582, - batch_norm__583, - batch_norm__584, - batch_norm__585, - batch_norm__586, - batch_norm__587, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_101, - parameter_258, - parameter_257, - parameter_256, - parameter_255, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_101, parameter_255, parameter_256, parameter_257, parameter_258 - - # pd_op.add: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32, -1x192x-1x-1xf32) - add_81 = paddle._C_ops.add(batch_norm__576, batch_norm__582) - del batch_norm__576, batch_norm__582 - - # pd_op.swish: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32) - swish_73 = paddle._C_ops.swish(add_81) - del add_81 - - # builtin.combine: ([-1x192x-1x-1xf32, -1x192x-1x-1xf32]) <- (-1x192x-1x-1xf32, -1x192x-1x-1xf32) - combine_8 = [swish_66, swish_73] - del swish_66, swish_73 - - # pd_op.concat: (-1x384x-1x-1xf32) <- ([-1x192x-1x-1xf32, -1x192x-1x-1xf32], 1xi32) - concat_9 = paddle._C_ops.concat(combine_8, full_0) - del combine_8 - - # pd_op.conv2d: (-1x384x-1x-1xf32) <- (-1x384x-1x-1xf32, 384x384x1x1xf32) - conv2d_102 = paddle._C_ops.conv2d( - concat_9, parameter_254, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del concat_9, parameter_254 - - # pd_op.batch_norm_: (-1x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (-1x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) - ( - batch_norm__588, - batch_norm__589, - batch_norm__590, - batch_norm__591, - batch_norm__592, - batch_norm__593, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_102, - parameter_253, - parameter_252, - parameter_251, - parameter_250, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_102, parameter_250, parameter_251, parameter_252, parameter_253 - - # pd_op.swish: (-1x384x-1x-1xf32) <- (-1x384x-1x-1xf32) - swish_74 = paddle._C_ops.swish(batch_norm__588) - del batch_norm__588 - - # pd_op.conv2d: (-1x192x-1x-1xf32) <- (-1x384x-1x-1xf32, 192x384x1x1xf32) - conv2d_103 = paddle._C_ops.conv2d( - swish_74, parameter_249, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_249 - - # pd_op.batch_norm_: (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) - ( - batch_norm__594, - batch_norm__595, - batch_norm__596, - batch_norm__597, - batch_norm__598, - batch_norm__599, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_103, - parameter_248, - parameter_247, - parameter_246, - parameter_245, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_103, parameter_245, parameter_246, parameter_247, parameter_248 - - # pd_op.swish: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32) - swish_75 = paddle._C_ops.swish(batch_norm__594) - del batch_norm__594 - - # pd_op.nearest_interp: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32, None, None, None) - nearest_interp_1 = paddle._C_ops.nearest_interp( - swish_75, - None, - None, - None, - "NCHW", - -1, - -1, - -1, - [float("2"), float("2")], - "nearest", - False, - 0, - ) - del swish_75 - - # builtin.combine: ([-1x192x-1x-1xf32, -1x256x-1x-1xf32]) <- (-1x192x-1x-1xf32, -1x256x-1x-1xf32) - combine_9 = [nearest_interp_1, swish_28] - del nearest_interp_1, swish_28 - - # pd_op.concat: (-1x448x-1x-1xf32) <- ([-1x192x-1x-1xf32, -1x256x-1x-1xf32], 1xi32) - concat_10 = paddle._C_ops.concat(combine_9, full_0) - del combine_9 - - # pd_op.conv2d: (-1x96x-1x-1xf32) <- (-1x448x-1x-1xf32, 96x448x1x1xf32) - conv2d_104 = paddle._C_ops.conv2d( - concat_10, parameter_244, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_244 - - # pd_op.batch_norm_: (-1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (-1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) - ( - batch_norm__600, - batch_norm__601, - batch_norm__602, - batch_norm__603, - batch_norm__604, - batch_norm__605, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_104, - parameter_243, - parameter_242, - parameter_241, - parameter_240, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_104, parameter_240, parameter_241, parameter_242, parameter_243 - - # pd_op.swish: (-1x96x-1x-1xf32) <- (-1x96x-1x-1xf32) - swish_76 = paddle._C_ops.swish(batch_norm__600) - del batch_norm__600 - - # pd_op.conv2d: (-1x96x-1x-1xf32) <- (-1x448x-1x-1xf32, 96x448x1x1xf32) - conv2d_105 = paddle._C_ops.conv2d( - concat_10, parameter_239, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del concat_10, parameter_239 - - # pd_op.batch_norm_: (-1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (-1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) - ( - batch_norm__606, - batch_norm__607, - batch_norm__608, - batch_norm__609, - batch_norm__610, - batch_norm__611, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_105, - parameter_238, - parameter_237, - parameter_236, - parameter_235, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_105, parameter_235, parameter_236, parameter_237, parameter_238 - - # pd_op.swish: (-1x96x-1x-1xf32) <- (-1x96x-1x-1xf32) - swish_77 = paddle._C_ops.swish(batch_norm__606) - del batch_norm__606 - - # pd_op.conv2d: (-1x96x-1x-1xf32) <- (-1x96x-1x-1xf32, 96x96x3x3xf32) - conv2d_106 = paddle._C_ops.conv2d( - swish_77, parameter_234, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_234, swish_77 - - # pd_op.batch_norm_: (-1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (-1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) - ( - batch_norm__612, - batch_norm__613, - batch_norm__614, - batch_norm__615, - batch_norm__616, - batch_norm__617, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_106, - parameter_233, - parameter_232, - parameter_231, - parameter_230, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_106, parameter_230, parameter_231, parameter_232, parameter_233 - - # pd_op.swish: (-1x96x-1x-1xf32) <- (-1x96x-1x-1xf32) - swish_78 = paddle._C_ops.swish(batch_norm__612) - del batch_norm__612 - - # pd_op.conv2d: (-1x96x-1x-1xf32) <- (-1x96x-1x-1xf32, 96x96x3x3xf32) - conv2d_107 = paddle._C_ops.conv2d( - swish_78, parameter_229, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_229 - - # pd_op.batch_norm_: (-1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (-1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) - ( - batch_norm__618, - batch_norm__619, - batch_norm__620, - batch_norm__621, - batch_norm__622, - batch_norm__623, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_107, - parameter_228, - parameter_227, - parameter_226, - parameter_225, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_107, parameter_225, parameter_226, parameter_227, parameter_228 - - # pd_op.conv2d: (-1x96x-1x-1xf32) <- (-1x96x-1x-1xf32, 96x96x1x1xf32) - conv2d_108 = paddle._C_ops.conv2d( - swish_78, parameter_224, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_224, swish_78 - - # pd_op.batch_norm_: (-1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (-1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) - ( - batch_norm__624, - batch_norm__625, - batch_norm__626, - batch_norm__627, - batch_norm__628, - batch_norm__629, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_108, - parameter_223, - parameter_222, - parameter_221, - parameter_220, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_108, parameter_220, parameter_221, parameter_222, parameter_223 - - # pd_op.add: (-1x96x-1x-1xf32) <- (-1x96x-1x-1xf32, -1x96x-1x-1xf32) - add_82 = paddle._C_ops.add(batch_norm__618, batch_norm__624) - del batch_norm__618, batch_norm__624 - - # pd_op.swish: (-1x96x-1x-1xf32) <- (-1x96x-1x-1xf32) - swish_79 = paddle._C_ops.swish(add_82) - del add_82 - - # pd_op.conv2d: (-1x96x-1x-1xf32) <- (-1x96x-1x-1xf32, 96x96x3x3xf32) - conv2d_109 = paddle._C_ops.conv2d( - swish_79, parameter_219, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_219, swish_79 - - # pd_op.batch_norm_: (-1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (-1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) - ( - batch_norm__630, - batch_norm__631, - batch_norm__632, - batch_norm__633, - batch_norm__634, - batch_norm__635, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_109, - parameter_218, - parameter_217, - parameter_216, - parameter_215, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_109, parameter_215, parameter_216, parameter_217, parameter_218 - - # pd_op.swish: (-1x96x-1x-1xf32) <- (-1x96x-1x-1xf32) - swish_80 = paddle._C_ops.swish(batch_norm__630) - del batch_norm__630 - - # pd_op.conv2d: (-1x96x-1x-1xf32) <- (-1x96x-1x-1xf32, 96x96x3x3xf32) - conv2d_110 = paddle._C_ops.conv2d( - swish_80, parameter_214, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_214 - - # pd_op.batch_norm_: (-1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (-1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) - ( - batch_norm__636, - batch_norm__637, - batch_norm__638, - batch_norm__639, - batch_norm__640, - batch_norm__641, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_110, - parameter_213, - parameter_212, - parameter_211, - parameter_210, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_110, parameter_210, parameter_211, parameter_212, parameter_213 - - # pd_op.conv2d: (-1x96x-1x-1xf32) <- (-1x96x-1x-1xf32, 96x96x1x1xf32) - conv2d_111 = paddle._C_ops.conv2d( - swish_80, parameter_209, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_209, swish_80 - - # pd_op.batch_norm_: (-1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (-1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) - ( - batch_norm__642, - batch_norm__643, - batch_norm__644, - batch_norm__645, - batch_norm__646, - batch_norm__647, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_111, - parameter_208, - parameter_207, - parameter_206, - parameter_205, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_111, parameter_205, parameter_206, parameter_207, parameter_208 - - # pd_op.add: (-1x96x-1x-1xf32) <- (-1x96x-1x-1xf32, -1x96x-1x-1xf32) - add_83 = paddle._C_ops.add(batch_norm__636, batch_norm__642) - del batch_norm__636, batch_norm__642 - - # pd_op.swish: (-1x96x-1x-1xf32) <- (-1x96x-1x-1xf32) - swish_81 = paddle._C_ops.swish(add_83) - del add_83 - - # pd_op.conv2d: (-1x96x-1x-1xf32) <- (-1x96x-1x-1xf32, 96x96x3x3xf32) - conv2d_112 = paddle._C_ops.conv2d( - swish_81, parameter_204, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_204, swish_81 - - # pd_op.batch_norm_: (-1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (-1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) - ( - batch_norm__648, - batch_norm__649, - batch_norm__650, - batch_norm__651, - batch_norm__652, - batch_norm__653, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_112, - parameter_203, - parameter_202, - parameter_201, - parameter_200, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_112, parameter_200, parameter_201, parameter_202, parameter_203 - - # pd_op.swish: (-1x96x-1x-1xf32) <- (-1x96x-1x-1xf32) - swish_82 = paddle._C_ops.swish(batch_norm__648) - del batch_norm__648 - - # pd_op.conv2d: (-1x96x-1x-1xf32) <- (-1x96x-1x-1xf32, 96x96x3x3xf32) - conv2d_113 = paddle._C_ops.conv2d( - swish_82, parameter_199, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_199 - - # pd_op.batch_norm_: (-1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (-1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) - ( - batch_norm__654, - batch_norm__655, - batch_norm__656, - batch_norm__657, - batch_norm__658, - batch_norm__659, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_113, - parameter_198, - parameter_197, - parameter_196, - parameter_195, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_113, parameter_195, parameter_196, parameter_197, parameter_198 - - # pd_op.conv2d: (-1x96x-1x-1xf32) <- (-1x96x-1x-1xf32, 96x96x1x1xf32) - conv2d_114 = paddle._C_ops.conv2d( - swish_82, parameter_194, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_194, swish_82 - - # pd_op.batch_norm_: (-1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (-1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) - ( - batch_norm__660, - batch_norm__661, - batch_norm__662, - batch_norm__663, - batch_norm__664, - batch_norm__665, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_114, - parameter_193, - parameter_192, - parameter_191, - parameter_190, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_114, parameter_190, parameter_191, parameter_192, parameter_193 - - # pd_op.add: (-1x96x-1x-1xf32) <- (-1x96x-1x-1xf32, -1x96x-1x-1xf32) - add_84 = paddle._C_ops.add(batch_norm__654, batch_norm__660) - del batch_norm__654, batch_norm__660 - - # pd_op.swish: (-1x96x-1x-1xf32) <- (-1x96x-1x-1xf32) - swish_83 = paddle._C_ops.swish(add_84) - del add_84 - - # builtin.combine: ([-1x96x-1x-1xf32, -1x96x-1x-1xf32]) <- (-1x96x-1x-1xf32, -1x96x-1x-1xf32) - combine_10 = [swish_76, swish_83] - del swish_76, swish_83 - - # pd_op.concat: (-1x192x-1x-1xf32) <- ([-1x96x-1x-1xf32, -1x96x-1x-1xf32], 1xi32) - concat_11 = paddle._C_ops.concat(combine_10, full_0) - del combine_10 - - # pd_op.conv2d: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32, 192x192x1x1xf32) - conv2d_115 = paddle._C_ops.conv2d( - concat_11, parameter_189, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del concat_11, parameter_189 - - # pd_op.batch_norm_: (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) - ( - batch_norm__666, - batch_norm__667, - batch_norm__668, - batch_norm__669, - batch_norm__670, - batch_norm__671, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_115, - parameter_188, - parameter_187, - parameter_186, - parameter_185, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_115, parameter_185, parameter_186, parameter_187, parameter_188 - - # pd_op.swish: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32) - swish_84 = paddle._C_ops.swish(batch_norm__666) - del batch_norm__666 - - # pd_op.conv2d: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32, 192x192x3x3xf32) - conv2d_116 = paddle._C_ops.conv2d( - swish_84, parameter_184, [2, 2], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_184 - - # pd_op.batch_norm_: (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) - ( - batch_norm__672, - batch_norm__673, - batch_norm__674, - batch_norm__675, - batch_norm__676, - batch_norm__677, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_116, - parameter_183, - parameter_182, - parameter_181, - parameter_180, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_116, parameter_180, parameter_181, parameter_182, parameter_183 - - # pd_op.swish: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32) - swish_85 = paddle._C_ops.swish(batch_norm__672) - del batch_norm__672 - - # builtin.combine: ([-1x192x-1x-1xf32, -1x384x-1x-1xf32]) <- (-1x192x-1x-1xf32, -1x384x-1x-1xf32) - combine_11 = [swish_85, swish_74] - del swish_74, swish_85 - - # pd_op.concat: (-1x576x-1x-1xf32) <- ([-1x192x-1x-1xf32, -1x384x-1x-1xf32], 1xi32) - concat_12 = paddle._C_ops.concat(combine_11, full_0) - del combine_11 - - # pd_op.conv2d: (-1x192x-1x-1xf32) <- (-1x576x-1x-1xf32, 192x576x1x1xf32) - conv2d_117 = paddle._C_ops.conv2d( - concat_12, parameter_179, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_179 - - # pd_op.batch_norm_: (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) - ( - batch_norm__678, - batch_norm__679, - batch_norm__680, - batch_norm__681, - batch_norm__682, - batch_norm__683, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_117, - parameter_178, - parameter_177, - parameter_176, - parameter_175, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_117, parameter_175, parameter_176, parameter_177, parameter_178 - - # pd_op.swish: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32) - swish_86 = paddle._C_ops.swish(batch_norm__678) - del batch_norm__678 - - # pd_op.conv2d: (-1x192x-1x-1xf32) <- (-1x576x-1x-1xf32, 192x576x1x1xf32) - conv2d_118 = paddle._C_ops.conv2d( - concat_12, parameter_174, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del concat_12, parameter_174 - - # pd_op.batch_norm_: (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) - ( - batch_norm__684, - batch_norm__685, - batch_norm__686, - batch_norm__687, - batch_norm__688, - batch_norm__689, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_118, - parameter_173, - parameter_172, - parameter_171, - parameter_170, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_118, parameter_170, parameter_171, parameter_172, parameter_173 - - # pd_op.swish: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32) - swish_87 = paddle._C_ops.swish(batch_norm__684) - del batch_norm__684 - - # pd_op.conv2d: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32, 192x192x3x3xf32) - conv2d_119 = paddle._C_ops.conv2d( - swish_87, parameter_169, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_169, swish_87 - - # pd_op.batch_norm_: (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) - ( - batch_norm__690, - batch_norm__691, - batch_norm__692, - batch_norm__693, - batch_norm__694, - batch_norm__695, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_119, - parameter_168, - parameter_167, - parameter_166, - parameter_165, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_119, parameter_165, parameter_166, parameter_167, parameter_168 - - # pd_op.swish: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32) - swish_88 = paddle._C_ops.swish(batch_norm__690) - del batch_norm__690 - - # pd_op.conv2d: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32, 192x192x3x3xf32) - conv2d_120 = paddle._C_ops.conv2d( - swish_88, parameter_164, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_164 - - # pd_op.batch_norm_: (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) - ( - batch_norm__696, - batch_norm__697, - batch_norm__698, - batch_norm__699, - batch_norm__700, - batch_norm__701, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_120, - parameter_163, - parameter_162, - parameter_161, - parameter_160, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_120, parameter_160, parameter_161, parameter_162, parameter_163 - - # pd_op.conv2d: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32, 192x192x1x1xf32) - conv2d_121 = paddle._C_ops.conv2d( - swish_88, parameter_159, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_159, swish_88 - - # pd_op.batch_norm_: (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) - ( - batch_norm__702, - batch_norm__703, - batch_norm__704, - batch_norm__705, - batch_norm__706, - batch_norm__707, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_121, - parameter_158, - parameter_157, - parameter_156, - parameter_155, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_121, parameter_155, parameter_156, parameter_157, parameter_158 - - # pd_op.add: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32, -1x192x-1x-1xf32) - add_85 = paddle._C_ops.add(batch_norm__696, batch_norm__702) - del batch_norm__696, batch_norm__702 - - # pd_op.swish: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32) - swish_89 = paddle._C_ops.swish(add_85) - del add_85 - - # pd_op.conv2d: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32, 192x192x3x3xf32) - conv2d_122 = paddle._C_ops.conv2d( - swish_89, parameter_154, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_154, swish_89 - - # pd_op.batch_norm_: (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) - ( - batch_norm__708, - batch_norm__709, - batch_norm__710, - batch_norm__711, - batch_norm__712, - batch_norm__713, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_122, - parameter_153, - parameter_152, - parameter_151, - parameter_150, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_122, parameter_150, parameter_151, parameter_152, parameter_153 - - # pd_op.swish: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32) - swish_90 = paddle._C_ops.swish(batch_norm__708) - del batch_norm__708 - - # pd_op.conv2d: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32, 192x192x3x3xf32) - conv2d_123 = paddle._C_ops.conv2d( - swish_90, parameter_149, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_149 - - # pd_op.batch_norm_: (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) - ( - batch_norm__714, - batch_norm__715, - batch_norm__716, - batch_norm__717, - batch_norm__718, - batch_norm__719, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_123, - parameter_148, - parameter_147, - parameter_146, - parameter_145, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_123, parameter_145, parameter_146, parameter_147, parameter_148 - - # pd_op.conv2d: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32, 192x192x1x1xf32) - conv2d_124 = paddle._C_ops.conv2d( - swish_90, parameter_144, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_144, swish_90 - - # pd_op.batch_norm_: (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) - ( - batch_norm__720, - batch_norm__721, - batch_norm__722, - batch_norm__723, - batch_norm__724, - batch_norm__725, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_124, - parameter_143, - parameter_142, - parameter_141, - parameter_140, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_124, parameter_140, parameter_141, parameter_142, parameter_143 - - # pd_op.add: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32, -1x192x-1x-1xf32) - add_86 = paddle._C_ops.add(batch_norm__714, batch_norm__720) - del batch_norm__714, batch_norm__720 - - # pd_op.swish: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32) - swish_91 = paddle._C_ops.swish(add_86) - del add_86 - - # pd_op.conv2d: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32, 192x192x3x3xf32) - conv2d_125 = paddle._C_ops.conv2d( - swish_91, parameter_139, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_139, swish_91 - - # pd_op.batch_norm_: (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) - ( - batch_norm__726, - batch_norm__727, - batch_norm__728, - batch_norm__729, - batch_norm__730, - batch_norm__731, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_125, - parameter_138, - parameter_137, - parameter_136, - parameter_135, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_125, parameter_135, parameter_136, parameter_137, parameter_138 - - # pd_op.swish: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32) - swish_92 = paddle._C_ops.swish(batch_norm__726) - del batch_norm__726 - - # pd_op.conv2d: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32, 192x192x3x3xf32) - conv2d_126 = paddle._C_ops.conv2d( - swish_92, parameter_134, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_134 - - # pd_op.batch_norm_: (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) - ( - batch_norm__732, - batch_norm__733, - batch_norm__734, - batch_norm__735, - batch_norm__736, - batch_norm__737, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_126, - parameter_133, - parameter_132, - parameter_131, - parameter_130, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_126, parameter_130, parameter_131, parameter_132, parameter_133 - - # pd_op.conv2d: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32, 192x192x1x1xf32) - conv2d_127 = paddle._C_ops.conv2d( - swish_92, parameter_129, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_129, swish_92 - - # pd_op.batch_norm_: (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) - ( - batch_norm__738, - batch_norm__739, - batch_norm__740, - batch_norm__741, - batch_norm__742, - batch_norm__743, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_127, - parameter_128, - parameter_127, - parameter_126, - parameter_125, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_127, parameter_125, parameter_126, parameter_127, parameter_128 - - # pd_op.add: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32, -1x192x-1x-1xf32) - add_87 = paddle._C_ops.add(batch_norm__732, batch_norm__738) - del batch_norm__732, batch_norm__738 - - # pd_op.swish: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32) - swish_93 = paddle._C_ops.swish(add_87) - del add_87 - - # builtin.combine: ([-1x192x-1x-1xf32, -1x192x-1x-1xf32]) <- (-1x192x-1x-1xf32, -1x192x-1x-1xf32) - combine_12 = [swish_86, swish_93] - del swish_86, swish_93 - - # pd_op.concat: (-1x384x-1x-1xf32) <- ([-1x192x-1x-1xf32, -1x192x-1x-1xf32], 1xi32) - concat_13 = paddle._C_ops.concat(combine_12, full_0) - del combine_12 - - # pd_op.conv2d: (-1x384x-1x-1xf32) <- (-1x384x-1x-1xf32, 384x384x1x1xf32) - conv2d_128 = paddle._C_ops.conv2d( - concat_13, parameter_124, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del concat_13, parameter_124 - - # pd_op.batch_norm_: (-1x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (-1x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) - ( - batch_norm__744, - batch_norm__745, - batch_norm__746, - batch_norm__747, - batch_norm__748, - batch_norm__749, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_128, - parameter_123, - parameter_122, - parameter_121, - parameter_120, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_128, parameter_120, parameter_121, parameter_122, parameter_123 - - # pd_op.swish: (-1x384x-1x-1xf32) <- (-1x384x-1x-1xf32) - swish_94 = paddle._C_ops.swish(batch_norm__744) - del batch_norm__744 - - # pd_op.conv2d: (-1x384x-1x-1xf32) <- (-1x384x-1x-1xf32, 384x384x3x3xf32) - conv2d_129 = paddle._C_ops.conv2d( - swish_94, parameter_119, [2, 2], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_119 - - # pd_op.batch_norm_: (-1x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (-1x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) - ( - batch_norm__750, - batch_norm__751, - batch_norm__752, - batch_norm__753, - batch_norm__754, - batch_norm__755, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_129, - parameter_118, - parameter_117, - parameter_116, - parameter_115, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_129, parameter_115, parameter_116, parameter_117, parameter_118 - - # pd_op.swish: (-1x384x-1x-1xf32) <- (-1x384x-1x-1xf32) - swish_95 = paddle._C_ops.swish(batch_norm__750) - del batch_norm__750 - - # builtin.combine: ([-1x384x-1x-1xf32, -1x768x-1x-1xf32]) <- (-1x384x-1x-1xf32, -1x768x-1x-1xf32) - combine_13 = [swish_95, swish_64] - del swish_64, swish_95 - - # pd_op.concat: (-1x1152x-1x-1xf32) <- ([-1x384x-1x-1xf32, -1x768x-1x-1xf32], 1xi32) - concat_14 = paddle._C_ops.concat(combine_13, full_0) - del combine_13 - - # pd_op.conv2d: (-1x384x-1x-1xf32) <- (-1x1152x-1x-1xf32, 384x1152x1x1xf32) - conv2d_130 = paddle._C_ops.conv2d( - concat_14, parameter_114, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_114 - - # pd_op.batch_norm_: (-1x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (-1x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) - ( - batch_norm__756, - batch_norm__757, - batch_norm__758, - batch_norm__759, - batch_norm__760, - batch_norm__761, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_130, - parameter_113, - parameter_112, - parameter_111, - parameter_110, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_130, parameter_110, parameter_111, parameter_112, parameter_113 - - # pd_op.swish: (-1x384x-1x-1xf32) <- (-1x384x-1x-1xf32) - swish_96 = paddle._C_ops.swish(batch_norm__756) - del batch_norm__756 - - # pd_op.conv2d: (-1x384x-1x-1xf32) <- (-1x1152x-1x-1xf32, 384x1152x1x1xf32) - conv2d_131 = paddle._C_ops.conv2d( - concat_14, parameter_109, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del concat_14, parameter_109 - - # pd_op.batch_norm_: (-1x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (-1x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) - ( - batch_norm__762, - batch_norm__763, - batch_norm__764, - batch_norm__765, - batch_norm__766, - batch_norm__767, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_131, - parameter_108, - parameter_107, - parameter_106, - parameter_105, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_131, parameter_105, parameter_106, parameter_107, parameter_108 - - # pd_op.swish: (-1x384x-1x-1xf32) <- (-1x384x-1x-1xf32) - swish_97 = paddle._C_ops.swish(batch_norm__762) - del batch_norm__762 - - # pd_op.conv2d: (-1x384x-1x-1xf32) <- (-1x384x-1x-1xf32, 384x384x3x3xf32) - conv2d_132 = paddle._C_ops.conv2d( - swish_97, parameter_104, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_104, swish_97 - - # pd_op.batch_norm_: (-1x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (-1x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) - ( - batch_norm__768, - batch_norm__769, - batch_norm__770, - batch_norm__771, - batch_norm__772, - batch_norm__773, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_132, - parameter_103, - parameter_102, - parameter_101, - parameter_100, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_132, parameter_100, parameter_101, parameter_102, parameter_103 - - # pd_op.swish: (-1x384x-1x-1xf32) <- (-1x384x-1x-1xf32) - swish_98 = paddle._C_ops.swish(batch_norm__768) - del batch_norm__768 - - # pd_op.conv2d: (-1x384x-1x-1xf32) <- (-1x384x-1x-1xf32, 384x384x3x3xf32) - conv2d_133 = paddle._C_ops.conv2d( - swish_98, parameter_99, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_99 - - # pd_op.batch_norm_: (-1x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (-1x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) - ( - batch_norm__774, - batch_norm__775, - batch_norm__776, - batch_norm__777, - batch_norm__778, - batch_norm__779, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_133, - parameter_98, - parameter_97, - parameter_96, - parameter_95, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_133, parameter_95, parameter_96, parameter_97, parameter_98 - - # pd_op.conv2d: (-1x384x-1x-1xf32) <- (-1x384x-1x-1xf32, 384x384x1x1xf32) - conv2d_134 = paddle._C_ops.conv2d( - swish_98, parameter_94, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_94, swish_98 - - # pd_op.batch_norm_: (-1x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (-1x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) - ( - batch_norm__780, - batch_norm__781, - batch_norm__782, - batch_norm__783, - batch_norm__784, - batch_norm__785, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_134, - parameter_93, - parameter_92, - parameter_91, - parameter_90, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_134, parameter_90, parameter_91, parameter_92, parameter_93 - - # pd_op.add: (-1x384x-1x-1xf32) <- (-1x384x-1x-1xf32, -1x384x-1x-1xf32) - add_88 = paddle._C_ops.add(batch_norm__774, batch_norm__780) - del batch_norm__774, batch_norm__780 - - # pd_op.swish: (-1x384x-1x-1xf32) <- (-1x384x-1x-1xf32) - swish_99 = paddle._C_ops.swish(add_88) - del add_88 - - # pd_op.conv2d: (-1x384x-1x-1xf32) <- (-1x384x-1x-1xf32, 384x384x3x3xf32) - conv2d_135 = paddle._C_ops.conv2d( - swish_99, parameter_89, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_89, swish_99 - - # pd_op.batch_norm_: (-1x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (-1x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) - ( - batch_norm__786, - batch_norm__787, - batch_norm__788, - batch_norm__789, - batch_norm__790, - batch_norm__791, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_135, - parameter_88, - parameter_87, - parameter_86, - parameter_85, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_135, parameter_85, parameter_86, parameter_87, parameter_88 - - # pd_op.swish: (-1x384x-1x-1xf32) <- (-1x384x-1x-1xf32) - swish_100 = paddle._C_ops.swish(batch_norm__786) - del batch_norm__786 - - # pd_op.conv2d: (-1x384x-1x-1xf32) <- (-1x384x-1x-1xf32, 384x384x3x3xf32) - conv2d_136 = paddle._C_ops.conv2d( - swish_100, parameter_84, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_84 - - # pd_op.batch_norm_: (-1x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (-1x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) - ( - batch_norm__792, - batch_norm__793, - batch_norm__794, - batch_norm__795, - batch_norm__796, - batch_norm__797, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_136, - parameter_83, - parameter_82, - parameter_81, - parameter_80, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_136, parameter_80, parameter_81, parameter_82, parameter_83 - - # pd_op.conv2d: (-1x384x-1x-1xf32) <- (-1x384x-1x-1xf32, 384x384x1x1xf32) - conv2d_137 = paddle._C_ops.conv2d( - swish_100, parameter_79, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_79, swish_100 - - # pd_op.batch_norm_: (-1x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (-1x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) - ( - batch_norm__798, - batch_norm__799, - batch_norm__800, - batch_norm__801, - batch_norm__802, - batch_norm__803, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_137, - parameter_78, - parameter_77, - parameter_76, - parameter_75, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_137, parameter_75, parameter_76, parameter_77, parameter_78 - - # pd_op.add: (-1x384x-1x-1xf32) <- (-1x384x-1x-1xf32, -1x384x-1x-1xf32) - add_89 = paddle._C_ops.add(batch_norm__792, batch_norm__798) - del batch_norm__792, batch_norm__798 - - # pd_op.swish: (-1x384x-1x-1xf32) <- (-1x384x-1x-1xf32) - swish_101 = paddle._C_ops.swish(add_89) - del add_89 - - # pd_op.conv2d: (-1x384x-1x-1xf32) <- (-1x384x-1x-1xf32, 384x384x3x3xf32) - conv2d_138 = paddle._C_ops.conv2d( - swish_101, parameter_74, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_74, swish_101 - - # pd_op.batch_norm_: (-1x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (-1x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) - ( - batch_norm__804, - batch_norm__805, - batch_norm__806, - batch_norm__807, - batch_norm__808, - batch_norm__809, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_138, - parameter_73, - parameter_72, - parameter_71, - parameter_70, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_138, parameter_70, parameter_71, parameter_72, parameter_73 - - # pd_op.swish: (-1x384x-1x-1xf32) <- (-1x384x-1x-1xf32) - swish_102 = paddle._C_ops.swish(batch_norm__804) - del batch_norm__804 - - # pd_op.conv2d: (-1x384x-1x-1xf32) <- (-1x384x-1x-1xf32, 384x384x3x3xf32) - conv2d_139 = paddle._C_ops.conv2d( - swish_102, parameter_69, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_69 - - # pd_op.batch_norm_: (-1x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (-1x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) - ( - batch_norm__810, - batch_norm__811, - batch_norm__812, - batch_norm__813, - batch_norm__814, - batch_norm__815, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_139, - parameter_68, - parameter_67, - parameter_66, - parameter_65, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_139, parameter_65, parameter_66, parameter_67, parameter_68 - - # pd_op.conv2d: (-1x384x-1x-1xf32) <- (-1x384x-1x-1xf32, 384x384x1x1xf32) - conv2d_140 = paddle._C_ops.conv2d( - swish_102, parameter_64, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_64, swish_102 - - # pd_op.batch_norm_: (-1x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (-1x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) - ( - batch_norm__816, - batch_norm__817, - batch_norm__818, - batch_norm__819, - batch_norm__820, - batch_norm__821, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_140, - parameter_63, - parameter_62, - parameter_61, - parameter_60, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_140, parameter_60, parameter_61, parameter_62, parameter_63 - - # pd_op.add: (-1x384x-1x-1xf32) <- (-1x384x-1x-1xf32, -1x384x-1x-1xf32) - add_90 = paddle._C_ops.add(batch_norm__810, batch_norm__816) - del batch_norm__810, batch_norm__816 - - # pd_op.swish: (-1x384x-1x-1xf32) <- (-1x384x-1x-1xf32) - swish_103 = paddle._C_ops.swish(add_90) - del add_90 - - # builtin.combine: ([-1x384x-1x-1xf32, -1x384x-1x-1xf32]) <- (-1x384x-1x-1xf32, -1x384x-1x-1xf32) - combine_14 = [swish_96, swish_103] - del swish_103, swish_96 - - # pd_op.concat: (-1x768x-1x-1xf32) <- ([-1x384x-1x-1xf32, -1x384x-1x-1xf32], 1xi32) - concat_15 = paddle._C_ops.concat(combine_14, full_0) - del combine_14 - - # pd_op.conv2d: (-1x768x-1x-1xf32) <- (-1x768x-1x-1xf32, 768x768x1x1xf32) - conv2d_141 = paddle._C_ops.conv2d( - concat_15, parameter_59, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del concat_15, parameter_59 - - # pd_op.batch_norm_: (-1x768x-1x-1xf32, 768xf32, 768xf32, 768xf32, 768xf32, -1xui8) <- (-1x768x-1x-1xf32, 768xf32, 768xf32, 768xf32, 768xf32) - ( - batch_norm__822, - batch_norm__823, - batch_norm__824, - batch_norm__825, - batch_norm__826, - batch_norm__827, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_141, - parameter_58, - parameter_57, - parameter_56, - parameter_55, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_141, parameter_55, parameter_56, parameter_57, parameter_58 - - # pd_op.swish: (-1x768x-1x-1xf32) <- (-1x768x-1x-1xf32) - swish_104 = paddle._C_ops.swish(batch_norm__822) - del batch_norm__822 - - # pd_op.shape64: (4xi64) <- (-1x768x-1x-1xf32) - shape64_7 = paddle._C_ops.shape64(swish_104) - - # pd_op.slice: (xi64) <- (4xi64, 1xi64, 1xi64) - slice_31 = paddle._C_ops.slice( - shape64_7, [0], full_int_array_2, full_int_array_3, [1], [0] - ) - del shape64_7 - - # pd_op.shape64: (4xi64) <- (-1x768x-1x-1xf32) - shape64_8 = paddle._C_ops.shape64(swish_104) - - # pd_op.slice: (xi64) <- (4xi64, 1xi64, 1xi64) - slice_32 = paddle._C_ops.slice( - shape64_8, [0], full_int_array_4, full_int_array_5, [1], [0] - ) - del shape64_8 - - # pd_op.shape64: (4xi64) <- (-1x768x-1x-1xf32) - shape64_9 = paddle._C_ops.shape64(swish_104) - - # pd_op.slice: (xi64) <- (4xi64, 1xi64, 1xi64) - slice_33 = paddle._C_ops.slice( - shape64_9, [0], full_int_array_5, full_int_array_6, [1], [0] - ) - del shape64_9 - - # pd_op.multiply: (xi64) <- (xi64, xi64) - multiply_22 = paddle._C_ops.multiply(slice_32, slice_33) - del slice_32, slice_33 - - # pd_op.full_int_array: (2xi64) <- () - full_int_array_15 = [1, 1] - - # pd_op.pool2d: (-1x768x1x1xf32) <- (-1x768x-1x-1xf32, 2xi64) - pool2d_3 = paddle._C_ops.pool2d( - swish_104, - full_int_array_15, - [1, 1], - [0, 0], - False, - True, - "NCHW", - "avg", - False, - True, - "EXPLICIT", - ) - - # pd_op.conv2d: (-1x768x1x1xf32) <- (-1x768x1x1xf32, 768x768x1x1xf32) - conv2d_142 = paddle._C_ops.conv2d( - pool2d_3, parameter_54, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_54 - - # pd_op.reshape: (1x768x1x1xf32) <- (768xf32, 4xi64) - reshape_21 = paddle._C_ops.reshape(parameter_53, full_int_array_1) - del parameter_53 - - # pd_op.add: (-1x768x1x1xf32) <- (-1x768x1x1xf32, 1x768x1x1xf32) - add_91 = paddle._C_ops.add(conv2d_142, reshape_21) - del conv2d_142, reshape_21 - - # pd_op.sigmoid: (-1x768x1x1xf32) <- (-1x768x1x1xf32) - sigmoid_0 = paddle._C_ops.sigmoid(add_91) - del add_91 - - # pd_op.multiply: (-1x768x-1x-1xf32) <- (-1x768x-1x-1xf32, -1x768x1x1xf32) - multiply_23 = paddle._C_ops.multiply(swish_104, sigmoid_0) - del sigmoid_0 - - # pd_op.conv2d: (-1x768x-1x-1xf32) <- (-1x768x-1x-1xf32, 768x768x1x1xf32) - conv2d_143 = paddle._C_ops.conv2d( - multiply_23, parameter_52, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del multiply_23, parameter_52 - - # pd_op.batch_norm_: (-1x768x-1x-1xf32, 768xf32, 768xf32, 768xf32, 768xf32, -1xui8) <- (-1x768x-1x-1xf32, 768xf32, 768xf32, 768xf32, 768xf32) - ( - batch_norm__828, - batch_norm__829, - batch_norm__830, - batch_norm__831, - batch_norm__832, - batch_norm__833, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_143, - parameter_51, - parameter_50, - parameter_49, - parameter_48, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_143, parameter_48, parameter_49, parameter_50, parameter_51 - - # pd_op.swish: (-1x768x-1x-1xf32) <- (-1x768x-1x-1xf32) - swish_105 = paddle._C_ops.swish(batch_norm__828) - del batch_norm__828 - - # pd_op.add: (-1x768x-1x-1xf32) <- (-1x768x-1x-1xf32, -1x768x-1x-1xf32) - add_92 = paddle._C_ops.add(swish_105, swish_104) - del swish_105 - - # pd_op.conv2d: (-1x10x-1x-1xf32) <- (-1x768x-1x-1xf32, 10x768x3x3xf32) - conv2d_144 = paddle._C_ops.conv2d( - add_92, parameter_47, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del add_92, parameter_47 - - # pd_op.reshape: (1x10x1x1xf32) <- (10xf32, 4xi64) - reshape_22 = paddle._C_ops.reshape(parameter_46, full_int_array_1) - del parameter_46 - - # pd_op.add: (-1x10x-1x-1xf32) <- (-1x10x-1x-1xf32, 1x10x1x1xf32) - add_93 = paddle._C_ops.add(conv2d_144, reshape_22) - del conv2d_144, reshape_22 - - # pd_op.conv2d: (-1x768x1x1xf32) <- (-1x768x1x1xf32, 768x768x1x1xf32) - conv2d_145 = paddle._C_ops.conv2d( - pool2d_3, parameter_45, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_45, pool2d_3 - - # pd_op.reshape: (1x768x1x1xf32) <- (768xf32, 4xi64) - reshape_23 = paddle._C_ops.reshape(parameter_44, full_int_array_1) - del parameter_44 - - # pd_op.add: (-1x768x1x1xf32) <- (-1x768x1x1xf32, 1x768x1x1xf32) - add_94 = paddle._C_ops.add(conv2d_145, reshape_23) - del conv2d_145, reshape_23 - - # pd_op.sigmoid: (-1x768x1x1xf32) <- (-1x768x1x1xf32) - sigmoid_1 = paddle._C_ops.sigmoid(add_94) - del add_94 - - # pd_op.multiply: (-1x768x-1x-1xf32) <- (-1x768x-1x-1xf32, -1x768x1x1xf32) - multiply_24 = paddle._C_ops.multiply(swish_104, sigmoid_1) - del sigmoid_1, swish_104 - - # pd_op.conv2d: (-1x768x-1x-1xf32) <- (-1x768x-1x-1xf32, 768x768x1x1xf32) - conv2d_146 = paddle._C_ops.conv2d( - multiply_24, parameter_43, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del multiply_24, parameter_43 - - # pd_op.batch_norm_: (-1x768x-1x-1xf32, 768xf32, 768xf32, 768xf32, 768xf32, -1xui8) <- (-1x768x-1x-1xf32, 768xf32, 768xf32, 768xf32, 768xf32) - ( - batch_norm__834, - batch_norm__835, - batch_norm__836, - batch_norm__837, - batch_norm__838, - batch_norm__839, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_146, - parameter_42, - parameter_41, - parameter_40, - parameter_39, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_146, parameter_39, parameter_40, parameter_41, parameter_42 - - # pd_op.swish: (-1x768x-1x-1xf32) <- (-1x768x-1x-1xf32) - swish_106 = paddle._C_ops.swish(batch_norm__834) - del batch_norm__834 - - # pd_op.conv2d: (-1x40x-1x-1xf32) <- (-1x768x-1x-1xf32, 40x768x3x3xf32) - conv2d_147 = paddle._C_ops.conv2d( - swish_106, parameter_38, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_38, swish_106 - - # pd_op.reshape: (1x40x1x1xf32) <- (40xf32, 4xi64) - reshape_24 = paddle._C_ops.reshape(parameter_37, full_int_array_1) - del parameter_37 - - # pd_op.add: (-1x40x-1x-1xf32) <- (-1x40x-1x-1xf32, 1x40x1x1xf32) - add_95 = paddle._C_ops.add(conv2d_147, reshape_24) - del conv2d_147, reshape_24 - - # pd_op.full: (xi64) <- () - full_4 = paddle._C_ops.full( - [], float("-1"), paddle.int64, paddle.core.CPUPlace() - ) - - # pd_op.full: (xi64) <- () - full_5 = paddle._C_ops.full( - [], float("4"), paddle.int64, paddle.core.CPUPlace() - ) - - # pd_op.full: (xi64) <- () - full_6 = paddle._C_ops.full( - [], float("10"), paddle.int64, paddle.core.CPUPlace() - ) - - # builtin.combine: ([xi64, xi64, xi64, xi64]) <- (xi64, xi64, xi64, xi64) - combine_15 = [full_4, full_5, full_6, multiply_22] - - # pd_op.stack: (4xi64) <- ([xi64, xi64, xi64, xi64]) - stack_1 = paddle._C_ops.stack(combine_15, 0) - del combine_15 - - # pd_op.reshape: (-1x4x10x-1xf32) <- (-1x40x-1x-1xf32, 4xi64) - reshape_25 = paddle._C_ops.reshape(add_95, stack_1) - del add_95, stack_1 - - # pd_op.transpose: (-1x10x-1x4xf32) <- (-1x4x10x-1xf32) - transpose_18 = paddle._C_ops.transpose(reshape_25, [0, 2, 3, 1]) - del reshape_25 - - # pd_op.softmax: (-1x10x-1x4xf32) <- (-1x10x-1x4xf32) - softmax_4 = paddle._C_ops.softmax(transpose_18, 1) - del transpose_18 - - # pd_op.conv2d: (-1x1x-1x4xf32) <- (-1x10x-1x4xf32, 1x10x1x1xf32) - conv2d_148 = paddle._C_ops.conv2d( - softmax_4, parameter_36, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del softmax_4 - - # pd_op.squeeze: (-1x-1x4xf32) <- (-1x1x-1x4xf32, 1xi64) - squeeze_0 = paddle._C_ops.squeeze(conv2d_148, full_int_array_3) - del conv2d_148 - - # pd_op.sigmoid: (-1x10x-1x-1xf32) <- (-1x10x-1x-1xf32) - sigmoid_2 = paddle._C_ops.sigmoid(add_93) - del add_93 - - # builtin.combine: ([xi64, xi64, xi64]) <- (xi64, xi64, xi64) - combine_16 = [full_4, full_6, multiply_22] - del multiply_22 - - # pd_op.stack: (3xi64) <- ([xi64, xi64, xi64]) - stack_2 = paddle._C_ops.stack(combine_16, 0) - del combine_16 - - # pd_op.reshape: (-1x10x-1xf32) <- (-1x10x-1x-1xf32, 3xi64) - reshape_26 = paddle._C_ops.reshape(sigmoid_2, stack_2) - del sigmoid_2, stack_2 - - # pd_op.shape64: (4xi64) <- (-1x384x-1x-1xf32) - shape64_10 = paddle._C_ops.shape64(swish_94) - - # pd_op.slice: (xi64) <- (4xi64, 1xi64, 1xi64) - slice_34 = paddle._C_ops.slice( - shape64_10, [0], full_int_array_2, full_int_array_3, [1], [0] - ) - del shape64_10 - - # pd_op.shape64: (4xi64) <- (-1x384x-1x-1xf32) - shape64_11 = paddle._C_ops.shape64(swish_94) - - # pd_op.slice: (xi64) <- (4xi64, 1xi64, 1xi64) - slice_35 = paddle._C_ops.slice( - shape64_11, [0], full_int_array_4, full_int_array_5, [1], [0] - ) - del shape64_11 - - # pd_op.shape64: (4xi64) <- (-1x384x-1x-1xf32) - shape64_12 = paddle._C_ops.shape64(swish_94) - - # pd_op.slice: (xi64) <- (4xi64, 1xi64, 1xi64) - slice_36 = paddle._C_ops.slice( - shape64_12, [0], full_int_array_5, full_int_array_6, [1], [0] - ) - del shape64_12 - - # pd_op.multiply: (xi64) <- (xi64, xi64) - multiply_25 = paddle._C_ops.multiply(slice_35, slice_36) - del slice_35, slice_36 - - # pd_op.pool2d: (-1x384x1x1xf32) <- (-1x384x-1x-1xf32, 2xi64) - pool2d_4 = paddle._C_ops.pool2d( - swish_94, - full_int_array_15, - [1, 1], - [0, 0], - False, - True, - "NCHW", - "avg", - False, - True, - "EXPLICIT", - ) - - # pd_op.conv2d: (-1x384x1x1xf32) <- (-1x384x1x1xf32, 384x384x1x1xf32) - conv2d_149 = paddle._C_ops.conv2d( - pool2d_4, parameter_35, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_35 - - # pd_op.reshape: (1x384x1x1xf32) <- (384xf32, 4xi64) - reshape_27 = paddle._C_ops.reshape(parameter_34, full_int_array_1) - del parameter_34 - - # pd_op.add: (-1x384x1x1xf32) <- (-1x384x1x1xf32, 1x384x1x1xf32) - add_96 = paddle._C_ops.add(conv2d_149, reshape_27) - del conv2d_149, reshape_27 - - # pd_op.sigmoid: (-1x384x1x1xf32) <- (-1x384x1x1xf32) - sigmoid_3 = paddle._C_ops.sigmoid(add_96) - del add_96 - - # pd_op.multiply: (-1x384x-1x-1xf32) <- (-1x384x-1x-1xf32, -1x384x1x1xf32) - multiply_26 = paddle._C_ops.multiply(swish_94, sigmoid_3) - del sigmoid_3 - - # pd_op.conv2d: (-1x384x-1x-1xf32) <- (-1x384x-1x-1xf32, 384x384x1x1xf32) - conv2d_150 = paddle._C_ops.conv2d( - multiply_26, parameter_33, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del multiply_26, parameter_33 - - # pd_op.batch_norm_: (-1x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (-1x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) - ( - batch_norm__840, - batch_norm__841, - batch_norm__842, - batch_norm__843, - batch_norm__844, - batch_norm__845, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_150, - parameter_32, - parameter_31, - parameter_30, - parameter_29, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_150, parameter_29, parameter_30, parameter_31, parameter_32 - - # pd_op.swish: (-1x384x-1x-1xf32) <- (-1x384x-1x-1xf32) - swish_107 = paddle._C_ops.swish(batch_norm__840) - del batch_norm__840 - - # pd_op.add: (-1x384x-1x-1xf32) <- (-1x384x-1x-1xf32, -1x384x-1x-1xf32) - add_97 = paddle._C_ops.add(swish_107, swish_94) - del swish_107 - - # pd_op.conv2d: (-1x10x-1x-1xf32) <- (-1x384x-1x-1xf32, 10x384x3x3xf32) - conv2d_151 = paddle._C_ops.conv2d( - add_97, parameter_28, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del add_97, parameter_28 - - # pd_op.reshape: (1x10x1x1xf32) <- (10xf32, 4xi64) - reshape_28 = paddle._C_ops.reshape(parameter_27, full_int_array_1) - del parameter_27 - - # pd_op.add: (-1x10x-1x-1xf32) <- (-1x10x-1x-1xf32, 1x10x1x1xf32) - add_98 = paddle._C_ops.add(conv2d_151, reshape_28) - del conv2d_151, reshape_28 - - # pd_op.conv2d: (-1x384x1x1xf32) <- (-1x384x1x1xf32, 384x384x1x1xf32) - conv2d_152 = paddle._C_ops.conv2d( - pool2d_4, parameter_26, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_26, pool2d_4 - - # pd_op.reshape: (1x384x1x1xf32) <- (384xf32, 4xi64) - reshape_29 = paddle._C_ops.reshape(parameter_25, full_int_array_1) - del parameter_25 - - # pd_op.add: (-1x384x1x1xf32) <- (-1x384x1x1xf32, 1x384x1x1xf32) - add_99 = paddle._C_ops.add(conv2d_152, reshape_29) - del conv2d_152, reshape_29 - - # pd_op.sigmoid: (-1x384x1x1xf32) <- (-1x384x1x1xf32) - sigmoid_4 = paddle._C_ops.sigmoid(add_99) - del add_99 - - # pd_op.multiply: (-1x384x-1x-1xf32) <- (-1x384x-1x-1xf32, -1x384x1x1xf32) - multiply_27 = paddle._C_ops.multiply(swish_94, sigmoid_4) - del sigmoid_4, swish_94 - - # pd_op.conv2d: (-1x384x-1x-1xf32) <- (-1x384x-1x-1xf32, 384x384x1x1xf32) - conv2d_153 = paddle._C_ops.conv2d( - multiply_27, parameter_24, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del multiply_27, parameter_24 - - # pd_op.batch_norm_: (-1x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (-1x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) - ( - batch_norm__846, - batch_norm__847, - batch_norm__848, - batch_norm__849, - batch_norm__850, - batch_norm__851, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_153, - parameter_23, - parameter_22, - parameter_21, - parameter_20, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_153, parameter_20, parameter_21, parameter_22, parameter_23 - - # pd_op.swish: (-1x384x-1x-1xf32) <- (-1x384x-1x-1xf32) - swish_108 = paddle._C_ops.swish(batch_norm__846) - del batch_norm__846 - - # pd_op.conv2d: (-1x40x-1x-1xf32) <- (-1x384x-1x-1xf32, 40x384x3x3xf32) - conv2d_154 = paddle._C_ops.conv2d( - swish_108, parameter_19, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_19, swish_108 - - # pd_op.reshape: (1x40x1x1xf32) <- (40xf32, 4xi64) - reshape_30 = paddle._C_ops.reshape(parameter_18, full_int_array_1) - del parameter_18 - - # pd_op.add: (-1x40x-1x-1xf32) <- (-1x40x-1x-1xf32, 1x40x1x1xf32) - add_100 = paddle._C_ops.add(conv2d_154, reshape_30) - del conv2d_154, reshape_30 - - # builtin.combine: ([xi64, xi64, xi64, xi64]) <- (xi64, xi64, xi64, xi64) - combine_17 = [full_4, full_5, full_6, multiply_25] - - # pd_op.stack: (4xi64) <- ([xi64, xi64, xi64, xi64]) - stack_3 = paddle._C_ops.stack(combine_17, 0) - del combine_17 - - # pd_op.reshape: (-1x4x10x-1xf32) <- (-1x40x-1x-1xf32, 4xi64) - reshape_31 = paddle._C_ops.reshape(add_100, stack_3) - del add_100, stack_3 - - # pd_op.transpose: (-1x10x-1x4xf32) <- (-1x4x10x-1xf32) - transpose_19 = paddle._C_ops.transpose(reshape_31, [0, 2, 3, 1]) - del reshape_31 - - # pd_op.softmax: (-1x10x-1x4xf32) <- (-1x10x-1x4xf32) - softmax_5 = paddle._C_ops.softmax(transpose_19, 1) - del transpose_19 - - # pd_op.conv2d: (-1x1x-1x4xf32) <- (-1x10x-1x4xf32, 1x10x1x1xf32) - conv2d_155 = paddle._C_ops.conv2d( - softmax_5, parameter_36, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del softmax_5 - - # pd_op.squeeze: (-1x-1x4xf32) <- (-1x1x-1x4xf32, 1xi64) - squeeze_1 = paddle._C_ops.squeeze(conv2d_155, full_int_array_3) - del conv2d_155 - - # pd_op.sigmoid: (-1x10x-1x-1xf32) <- (-1x10x-1x-1xf32) - sigmoid_5 = paddle._C_ops.sigmoid(add_98) - del add_98 - - # builtin.combine: ([xi64, xi64, xi64]) <- (xi64, xi64, xi64) - combine_18 = [full_4, full_6, multiply_25] - del multiply_25 - - # pd_op.stack: (3xi64) <- ([xi64, xi64, xi64]) - stack_4 = paddle._C_ops.stack(combine_18, 0) - del combine_18 - - # pd_op.reshape: (-1x10x-1xf32) <- (-1x10x-1x-1xf32, 3xi64) - reshape_32 = paddle._C_ops.reshape(sigmoid_5, stack_4) - del sigmoid_5, stack_4 - - # pd_op.shape64: (4xi64) <- (-1x192x-1x-1xf32) - shape64_13 = paddle._C_ops.shape64(swish_84) - - # pd_op.slice: (xi64) <- (4xi64, 1xi64, 1xi64) - slice_37 = paddle._C_ops.slice( - shape64_13, [0], full_int_array_2, full_int_array_3, [1], [0] - ) - del full_int_array_2, shape64_13 - - # pd_op.shape64: (4xi64) <- (-1x192x-1x-1xf32) - shape64_14 = paddle._C_ops.shape64(swish_84) - - # pd_op.slice: (xi64) <- (4xi64, 1xi64, 1xi64) - slice_38 = paddle._C_ops.slice( - shape64_14, [0], full_int_array_4, full_int_array_5, [1], [0] - ) - del full_int_array_4, shape64_14 - - # pd_op.shape64: (4xi64) <- (-1x192x-1x-1xf32) - shape64_15 = paddle._C_ops.shape64(swish_84) - - # pd_op.slice: (xi64) <- (4xi64, 1xi64, 1xi64) - slice_39 = paddle._C_ops.slice( - shape64_15, [0], full_int_array_5, full_int_array_6, [1], [0] - ) - del full_int_array_5, full_int_array_6, shape64_15 - - # pd_op.multiply: (xi64) <- (xi64, xi64) - multiply_28 = paddle._C_ops.multiply(slice_38, slice_39) - del slice_38, slice_39 - - # pd_op.pool2d: (-1x192x1x1xf32) <- (-1x192x-1x-1xf32, 2xi64) - pool2d_5 = paddle._C_ops.pool2d( - swish_84, - full_int_array_15, - [1, 1], - [0, 0], - False, - True, - "NCHW", - "avg", - False, - True, - "EXPLICIT", - ) - del full_int_array_15 - - # pd_op.conv2d: (-1x192x1x1xf32) <- (-1x192x1x1xf32, 192x192x1x1xf32) - conv2d_156 = paddle._C_ops.conv2d( - pool2d_5, parameter_17, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_17 - - # pd_op.reshape: (1x192x1x1xf32) <- (192xf32, 4xi64) - reshape_33 = paddle._C_ops.reshape(parameter_16, full_int_array_1) - del parameter_16 - - # pd_op.add: (-1x192x1x1xf32) <- (-1x192x1x1xf32, 1x192x1x1xf32) - add_101 = paddle._C_ops.add(conv2d_156, reshape_33) - del conv2d_156, reshape_33 - - # pd_op.sigmoid: (-1x192x1x1xf32) <- (-1x192x1x1xf32) - sigmoid_6 = paddle._C_ops.sigmoid(add_101) - del add_101 - - # pd_op.multiply: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32, -1x192x1x1xf32) - multiply_29 = paddle._C_ops.multiply(swish_84, sigmoid_6) - del sigmoid_6 - - # pd_op.conv2d: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32, 192x192x1x1xf32) - conv2d_157 = paddle._C_ops.conv2d( - multiply_29, parameter_15, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del multiply_29, parameter_15 - - # pd_op.batch_norm_: (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) - ( - batch_norm__852, - batch_norm__853, - batch_norm__854, - batch_norm__855, - batch_norm__856, - batch_norm__857, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_157, - parameter_14, - parameter_13, - parameter_12, - parameter_11, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_157, parameter_11, parameter_12, parameter_13, parameter_14 - - # pd_op.swish: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32) - swish_109 = paddle._C_ops.swish(batch_norm__852) - del batch_norm__852 - - # pd_op.add: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32, -1x192x-1x-1xf32) - add_102 = paddle._C_ops.add(swish_109, swish_84) - del swish_109 - - # pd_op.conv2d: (-1x10x-1x-1xf32) <- (-1x192x-1x-1xf32, 10x192x3x3xf32) - conv2d_158 = paddle._C_ops.conv2d( - add_102, parameter_10, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del add_102, parameter_10 - - # pd_op.reshape: (1x10x1x1xf32) <- (10xf32, 4xi64) - reshape_34 = paddle._C_ops.reshape(parameter_9, full_int_array_1) - del parameter_9 - - # pd_op.add: (-1x10x-1x-1xf32) <- (-1x10x-1x-1xf32, 1x10x1x1xf32) - add_103 = paddle._C_ops.add(conv2d_158, reshape_34) - del conv2d_158, reshape_34 - - # pd_op.conv2d: (-1x192x1x1xf32) <- (-1x192x1x1xf32, 192x192x1x1xf32) - conv2d_159 = paddle._C_ops.conv2d( - pool2d_5, parameter_8, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_8, pool2d_5 - - # pd_op.reshape: (1x192x1x1xf32) <- (192xf32, 4xi64) - reshape_35 = paddle._C_ops.reshape(parameter_7, full_int_array_1) - del parameter_7 - - # pd_op.add: (-1x192x1x1xf32) <- (-1x192x1x1xf32, 1x192x1x1xf32) - add_104 = paddle._C_ops.add(conv2d_159, reshape_35) - del conv2d_159, reshape_35 - - # pd_op.sigmoid: (-1x192x1x1xf32) <- (-1x192x1x1xf32) - sigmoid_7 = paddle._C_ops.sigmoid(add_104) - del add_104 - - # pd_op.multiply: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32, -1x192x1x1xf32) - multiply_30 = paddle._C_ops.multiply(swish_84, sigmoid_7) - del sigmoid_7, swish_84 - - # pd_op.conv2d: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32, 192x192x1x1xf32) - conv2d_160 = paddle._C_ops.conv2d( - multiply_30, parameter_6, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del multiply_30, parameter_6 - - # pd_op.batch_norm_: (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (-1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) - ( - batch_norm__858, - batch_norm__859, - batch_norm__860, - batch_norm__861, - batch_norm__862, - batch_norm__863, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_160, - parameter_5, - parameter_4, - parameter_3, - parameter_2, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_160, parameter_2, parameter_3, parameter_4, parameter_5 - - # pd_op.swish: (-1x192x-1x-1xf32) <- (-1x192x-1x-1xf32) - swish_110 = paddle._C_ops.swish(batch_norm__858) - del batch_norm__858 - - # pd_op.conv2d: (-1x40x-1x-1xf32) <- (-1x192x-1x-1xf32, 40x192x3x3xf32) - conv2d_161 = paddle._C_ops.conv2d( - swish_110, parameter_1, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_1, swish_110 - - # pd_op.reshape: (1x40x1x1xf32) <- (40xf32, 4xi64) - reshape_36 = paddle._C_ops.reshape(parameter_0, full_int_array_1) - del full_int_array_1, parameter_0 - - # pd_op.add: (-1x40x-1x-1xf32) <- (-1x40x-1x-1xf32, 1x40x1x1xf32) - add_105 = paddle._C_ops.add(conv2d_161, reshape_36) - del conv2d_161, reshape_36 - - # builtin.combine: ([xi64, xi64, xi64, xi64]) <- (xi64, xi64, xi64, xi64) - combine_19 = [full_4, full_5, full_6, multiply_28] - del full_5 - - # pd_op.stack: (4xi64) <- ([xi64, xi64, xi64, xi64]) - stack_5 = paddle._C_ops.stack(combine_19, 0) - del combine_19 - - # pd_op.reshape: (-1x4x10x-1xf32) <- (-1x40x-1x-1xf32, 4xi64) - reshape_37 = paddle._C_ops.reshape(add_105, stack_5) - del add_105, stack_5 - - # pd_op.transpose: (-1x10x-1x4xf32) <- (-1x4x10x-1xf32) - transpose_20 = paddle._C_ops.transpose(reshape_37, [0, 2, 3, 1]) - del reshape_37 - - # pd_op.softmax: (-1x10x-1x4xf32) <- (-1x10x-1x4xf32) - softmax_6 = paddle._C_ops.softmax(transpose_20, 1) - del transpose_20 - - # pd_op.conv2d: (-1x1x-1x4xf32) <- (-1x10x-1x4xf32, 1x10x1x1xf32) - conv2d_162 = paddle._C_ops.conv2d( - softmax_6, parameter_36, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_36, softmax_6 - - # pd_op.squeeze: (-1x-1x4xf32) <- (-1x1x-1x4xf32, 1xi64) - squeeze_2 = paddle._C_ops.squeeze(conv2d_162, full_int_array_3) - del conv2d_162, full_int_array_3 - - # pd_op.sigmoid: (-1x10x-1x-1xf32) <- (-1x10x-1x-1xf32) - sigmoid_8 = paddle._C_ops.sigmoid(add_103) - del add_103 - - # builtin.combine: ([xi64, xi64, xi64]) <- (xi64, xi64, xi64) - combine_20 = [full_4, full_6, multiply_28] - del full_4, full_6, multiply_28 - - # pd_op.stack: (3xi64) <- ([xi64, xi64, xi64]) - stack_6 = paddle._C_ops.stack(combine_20, 0) - del combine_20 - - # pd_op.reshape: (-1x10x-1xf32) <- (-1x10x-1x-1xf32, 3xi64) - reshape_38 = paddle._C_ops.reshape(sigmoid_8, stack_6) - del sigmoid_8, stack_6 - - # pd_op.full: (1xi32) <- () - full_7 = paddle._C_ops.full( - [1], float("-1"), paddle.int32, paddle.core.CPUPlace() - ) - - # builtin.combine: ([-1x10x-1xf32, -1x10x-1xf32, -1x10x-1xf32]) <- (-1x10x-1xf32, -1x10x-1xf32, -1x10x-1xf32) - combine_21 = [reshape_26, reshape_32, reshape_38] - del reshape_26, reshape_32, reshape_38 - - # pd_op.concat: (-1x10x-1xf32) <- ([-1x10x-1xf32, -1x10x-1xf32, -1x10x-1xf32], 1xi32) - concat_0 = paddle._C_ops.concat(combine_21, full_7) - del combine_21, full_7 - - # builtin.combine: ([-1x-1x4xf32, -1x-1x4xf32, -1x-1x4xf32]) <- (-1x-1x4xf32, -1x-1x4xf32, -1x-1x4xf32) - combine_22 = [squeeze_0, squeeze_1, squeeze_2] - del squeeze_0, squeeze_1, squeeze_2 - - # pd_op.concat: (-1x-1x4xf32) <- ([-1x-1x4xf32, -1x-1x4xf32, -1x-1x4xf32], 1xi32) - concat_1 = paddle._C_ops.concat(combine_22, full_0) - del combine_22, full_0 - - return concat_0, concat_1 diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/subgraph_8/weight_meta.py b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/subgraph_8/weight_meta.py deleted file mode 100644 index c934d8553..000000000 --- a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/subgraph_8/weight_meta.py +++ /dev/null @@ -1,8595 +0,0 @@ -class Program_weight_tensor_parameter_0: - name = "parameter_0" - shape = [40] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_1: - name = "parameter_1" - shape = [40, 192, 3, 3] - dtype = "float32" - min_val = float("-0.200868") - max_val = float("0.205409") - mean = float("1.50903e-08") - std = float("0.0117333") - data = None - - -class Program_weight_tensor_parameter_2: - name = "parameter_2" - shape = [192] - dtype = "float32" - min_val = float("-0.0496614") - max_val = float("0.233447") - mean = float("0.0551649") - std = float("0.0442642") - data = None - - -class Program_weight_tensor_parameter_3: - name = "parameter_3" - shape = [192] - dtype = "float32" - min_val = float("0.83662") - max_val = float("1.62777") - mean = float("1.22152") - std = float("0.145383") - data = None - - -class Program_weight_tensor_parameter_4: - name = "parameter_4" - shape = [192] - dtype = "float32" - min_val = float("0.00517837") - max_val = float("5.37372") - mean = float("0.472226") - std = float("0.745068") - data = None - - -class Program_weight_tensor_parameter_5: - name = "parameter_5" - shape = [192] - dtype = "float32" - min_val = float("-8.59248") - max_val = float("10.1229") - mean = float("0.104301") - std = float("2.84169") - data = None - - -class Program_weight_tensor_parameter_6: - name = "parameter_6" - shape = [192, 192, 1, 1] - dtype = "float32" - min_val = float("-0.10201") - max_val = float("0.139453") - mean = float("-0.000771004") - std = float("0.0121333") - data = None - - -class Program_weight_tensor_parameter_7: - name = "parameter_7" - shape = [192] - dtype = "float32" - min_val = float("-0.00864008") - max_val = float("0.0157539") - mean = float("-0.000141521") - std = float("0.00408479") - data = None - - -class Program_weight_tensor_parameter_8: - name = "parameter_8" - shape = [192, 192, 1, 1] - dtype = "float32" - min_val = float("-0.0103206") - max_val = float("0.0183671") - mean = float("-0.000260799") - std = float("0.00202219") - data = None - - -class Program_weight_tensor_parameter_9: - name = "parameter_9" - shape = [10] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_10: - name = "parameter_10" - shape = [10, 192, 3, 3] - dtype = "float32" - min_val = float("-1156.18") - max_val = float("134.34") - mean = float("-22.0743") - std = float("92.6504") - data = None - - -class Program_weight_tensor_parameter_11: - name = "parameter_11" - shape = [192] - dtype = "float32" - min_val = float("-83.5158") - max_val = float("85.9358") - mean = float("2.58239") - std = float("26.6989") - data = None - - -class Program_weight_tensor_parameter_12: - name = "parameter_12" - shape = [192] - dtype = "float32" - min_val = float("-14.174") - max_val = float("24.9743") - mean = float("-0.553124") - std = float("5.93549") - data = None - - -class Program_weight_tensor_parameter_13: - name = "parameter_13" - shape = [192] - dtype = "float32" - min_val = float("2.28992") - max_val = float("16941700.0") - mean = float("535823.0") - std = float("1889860.0") - data = None - - -class Program_weight_tensor_parameter_14: - name = "parameter_14" - shape = [192] - dtype = "float32" - min_val = float("-12545.3") - max_val = float("7938.65") - mean = float("-494.603") - std = float("2450.0") - data = None - - -class Program_weight_tensor_parameter_15: - name = "parameter_15" - shape = [192, 192, 1, 1] - dtype = "float32" - min_val = float("-139.267") - max_val = float("101.02") - mean = float("-0.0994834") - std = float("4.18182") - data = None - - -class Program_weight_tensor_parameter_16: - name = "parameter_16" - shape = [192] - dtype = "float32" - min_val = float("-11.0196") - max_val = float("7.12431") - mean = float("-0.172978") - std = float("1.77779") - data = None - - -class Program_weight_tensor_parameter_17: - name = "parameter_17" - shape = [192, 192, 1, 1] - dtype = "float32" - min_val = float("-21.8633") - max_val = float("14.1281") - mean = float("-0.061946") - std = float("0.961227") - data = None - - -class Program_weight_tensor_parameter_18: - name = "parameter_18" - shape = [40] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_19: - name = "parameter_19" - shape = [40, 384, 3, 3] - dtype = "float32" - min_val = float("-0.125302") - max_val = float("0.130317") - mean = float("3.40515e-09") - std = float("0.00680734") - data = None - - -class Program_weight_tensor_parameter_20: - name = "parameter_20" - shape = [384] - dtype = "float32" - min_val = float("-0.00280162") - max_val = float("0.100804") - mean = float("0.0327571") - std = float("0.0175274") - data = None - - -class Program_weight_tensor_parameter_21: - name = "parameter_21" - shape = [384] - dtype = "float32" - min_val = float("0.99914") - max_val = float("1.24063") - mean = float("1.1068") - std = float("0.0410271") - data = None - - -class Program_weight_tensor_parameter_22: - name = "parameter_22" - shape = [384] - dtype = "float32" - min_val = float("0.00351367") - max_val = float("0.502965") - mean = float("0.0475644") - std = float("0.0560398") - data = None - - -class Program_weight_tensor_parameter_23: - name = "parameter_23" - shape = [384] - dtype = "float32" - min_val = float("-0.20324") - max_val = float("0.163306") - mean = float("-0.0211934") - std = float("0.0480836") - data = None - - -class Program_weight_tensor_parameter_24: - name = "parameter_24" - shape = [384, 384, 1, 1] - dtype = "float32" - min_val = float("-0.0595396") - max_val = float("0.0686159") - mean = float("-0.000519359") - std = float("0.00405674") - data = None - - -class Program_weight_tensor_parameter_25: - name = "parameter_25" - shape = [384] - dtype = "float32" - min_val = float("-0.00283386") - max_val = float("0.00804157") - mean = float("4.09459e-05") - std = float("0.00164843") - data = None - - -class Program_weight_tensor_parameter_26: - name = "parameter_26" - shape = [384, 384, 1, 1] - dtype = "float32" - min_val = float("-0.00192633") - max_val = float("0.00573757") - mean = float("-3.72961e-05") - std = float("0.000619199") - data = None - - -class Program_weight_tensor_parameter_27: - name = "parameter_27" - shape = [10] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_28: - name = "parameter_28" - shape = [10, 384, 3, 3] - dtype = "float32" - min_val = float("-4.37321") - max_val = float("0.452128") - mean = float("-0.17109") - std = float("0.298945") - data = None - - -class Program_weight_tensor_parameter_29: - name = "parameter_29" - shape = [384] - dtype = "float32" - min_val = float("-0.129645") - max_val = float("0.537367") - mean = float("0.252723") - std = float("0.116707") - data = None - - -class Program_weight_tensor_parameter_30: - name = "parameter_30" - shape = [384] - dtype = "float32" - min_val = float("0.994167") - max_val = float("1.41211") - mean = float("1.16997") - std = float("0.0589217") - data = None - - -class Program_weight_tensor_parameter_31: - name = "parameter_31" - shape = [384] - dtype = "float32" - min_val = float("0.190338") - max_val = float("627.738") - mean = float("13.2438") - std = float("39.4458") - data = None - - -class Program_weight_tensor_parameter_32: - name = "parameter_32" - shape = [384] - dtype = "float32" - min_val = float("-6.47831") - max_val = float("2.53274") - mean = float("-0.258943") - std = float("0.862987") - data = None - - -class Program_weight_tensor_parameter_33: - name = "parameter_33" - shape = [384, 384, 1, 1] - dtype = "float32" - min_val = float("-0.37398") - max_val = float("0.93716") - mean = float("-0.0050287") - std = float("0.034184") - data = None - - -class Program_weight_tensor_parameter_34: - name = "parameter_34" - shape = [384] - dtype = "float32" - min_val = float("-0.168767") - max_val = float("0.0326032") - mean = float("0.00033161") - std = float("0.0172771") - data = None - - -class Program_weight_tensor_parameter_35: - name = "parameter_35" - shape = [384, 384, 1, 1] - dtype = "float32" - min_val = float("-0.13765") - max_val = float("0.0271847") - mean = float("-0.000469481") - std = float("0.00707315") - data = None - - -class Program_weight_tensor_parameter_36: - name = "parameter_36" - shape = [1, 10, 1, 1] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_37: - name = "parameter_37" - shape = [40] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_38: - name = "parameter_38" - shape = [40, 768, 3, 3] - dtype = "float32" - min_val = float("-0.0666495") - max_val = float("0.0422192") - mean = float("4.22006e-10") - std = float("0.004326") - data = None - - -class Program_weight_tensor_parameter_39: - name = "parameter_39" - shape = [768] - dtype = "float32" - min_val = float("-0.0211301") - max_val = float("0.0557583") - mean = float("0.00994542") - std = float("0.0117475") - data = None - - -class Program_weight_tensor_parameter_40: - name = "parameter_40" - shape = [768] - dtype = "float32" - min_val = float("1.00683") - max_val = float("1.19995") - mean = float("1.06456") - std = float("0.0224702") - data = None - - -class Program_weight_tensor_parameter_41: - name = "parameter_41" - shape = [768] - dtype = "float32" - min_val = float("0.00536813") - max_val = float("4.48397") - mean = float("0.148381") - std = float("0.22041") - data = None - - -class Program_weight_tensor_parameter_42: - name = "parameter_42" - shape = [768] - dtype = "float32" - min_val = float("-0.2391") - max_val = float("0.308194") - mean = float("-0.0115961") - std = float("0.071089") - data = None - - -class Program_weight_tensor_parameter_43: - name = "parameter_43" - shape = [768, 768, 1, 1] - dtype = "float32" - min_val = float("-0.0316374") - max_val = float("0.030148") - mean = float("-0.000219685") - std = float("0.00274506") - data = None - - -class Program_weight_tensor_parameter_44: - name = "parameter_44" - shape = [768] - dtype = "float32" - min_val = float("-0.00401297") - max_val = float("0.00319868") - mean = float("6.33231e-05") - std = float("0.000831526") - data = None - - -class Program_weight_tensor_parameter_45: - name = "parameter_45" - shape = [768, 768, 1, 1] - dtype = "float32" - min_val = float("-0.00264943") - max_val = float("0.00247676") - mean = float("4.17753e-06") - std = float("0.000249163") - data = None - - -class Program_weight_tensor_parameter_46: - name = "parameter_46" - shape = [10] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_47: - name = "parameter_47" - shape = [10, 768, 3, 3] - dtype = "float32" - min_val = float("-3.67573") - max_val = float("0.47477") - mean = float("-0.108836") - std = float("0.235747") - data = None - - -class Program_weight_tensor_parameter_48: - name = "parameter_48" - shape = [768] - dtype = "float32" - min_val = float("-0.0929864") - max_val = float("0.260814") - mean = float("0.112641") - std = float("0.0586929") - data = None - - -class Program_weight_tensor_parameter_49: - name = "parameter_49" - shape = [768] - dtype = "float32" - min_val = float("0.978014") - max_val = float("1.2276") - mean = float("1.06585") - std = float("0.0280238") - data = None - - -class Program_weight_tensor_parameter_50: - name = "parameter_50" - shape = [768] - dtype = "float32" - min_val = float("0.0583214") - max_val = float("179.503") - mean = float("8.00532") - std = float("16.3237") - data = None - - -class Program_weight_tensor_parameter_51: - name = "parameter_51" - shape = [768] - dtype = "float32" - min_val = float("-3.13569") - max_val = float("2.18092") - mean = float("-0.125865") - std = float("0.472168") - data = None - - -class Program_weight_tensor_parameter_52: - name = "parameter_52" - shape = [768, 768, 1, 1] - dtype = "float32" - min_val = float("-0.353861") - max_val = float("0.643743") - mean = float("-0.00192258") - std = float("0.0278211") - data = None - - -class Program_weight_tensor_parameter_53: - name = "parameter_53" - shape = [768] - dtype = "float32" - min_val = float("-0.0411452") - max_val = float("0.024878") - mean = float("-2.52466e-05") - std = float("0.00729347") - data = None - - -class Program_weight_tensor_parameter_54: - name = "parameter_54" - shape = [768, 768, 1, 1] - dtype = "float32" - min_val = float("-0.0172894") - max_val = float("0.0205533") - mean = float("-6.97847e-05") - std = float("0.00177578") - data = None - - -class Program_weight_tensor_parameter_55: - name = "parameter_55" - shape = [768] - dtype = "float32" - min_val = float("-0.148835") - max_val = float("0.366507") - mean = float("0.109619") - std = float("0.0784726") - data = None - - -class Program_weight_tensor_parameter_56: - name = "parameter_56" - shape = [768] - dtype = "float32" - min_val = float("0.857119") - max_val = float("1.27215") - mean = float("1.05537") - std = float("0.0395249") - data = None - - -class Program_weight_tensor_parameter_57: - name = "parameter_57" - shape = [768] - dtype = "float32" - min_val = float("2.1452") - max_val = float("15653.5") - mean = float("111.411") - std = float("608.25") - data = None - - -class Program_weight_tensor_parameter_58: - name = "parameter_58" - shape = [768] - dtype = "float32" - min_val = float("-4.93063") - max_val = float("14.695") - mean = float("-0.317615") - std = float("1.44253") - data = None - - -class Program_weight_tensor_parameter_59: - name = "parameter_59" - shape = [768, 768, 1, 1] - dtype = "float32" - min_val = float("-0.547982") - max_val = float("1.31648") - mean = float("-0.00228986") - std = float("0.031089") - data = None - - -class Program_weight_tensor_parameter_60: - name = "parameter_60" - shape = [384] - dtype = "float32" - min_val = float("-0.142261") - max_val = float("0.0236224") - mean = float("-0.0251959") - std = float("0.0245419") - data = None - - -class Program_weight_tensor_parameter_61: - name = "parameter_61" - shape = [384] - dtype = "float32" - min_val = float("0.923585") - max_val = float("1.06744") - mean = float("0.990921") - std = float("0.0255245") - data = None - - -class Program_weight_tensor_parameter_62: - name = "parameter_62" - shape = [384] - dtype = "float32" - min_val = float("0.0622324") - max_val = float("493.49") - mean = float("19.4794") - std = float("46.1366") - data = None - - -class Program_weight_tensor_parameter_63: - name = "parameter_63" - shape = [384] - dtype = "float32" - min_val = float("-1.45105") - max_val = float("2.69314") - mean = float("0.0290829") - std = float("0.54902") - data = None - - -class Program_weight_tensor_parameter_64: - name = "parameter_64" - shape = [384, 384, 1, 1] - dtype = "float32" - min_val = float("-0.528789") - max_val = float("0.475083") - mean = float("0.000636297") - std = float("0.0382416") - data = None - - -class Program_weight_tensor_parameter_65: - name = "parameter_65" - shape = [384] - dtype = "float32" - min_val = float("-0.142261") - max_val = float("0.0236224") - mean = float("-0.0251959") - std = float("0.0245419") - data = None - - -class Program_weight_tensor_parameter_66: - name = "parameter_66" - shape = [384] - dtype = "float32" - min_val = float("0.956171") - max_val = float("1.12145") - mean = float("1.01722") - std = float("0.0276561") - data = None - - -class Program_weight_tensor_parameter_67: - name = "parameter_67" - shape = [384] - dtype = "float32" - min_val = float("0.329317") - max_val = float("3828.52") - mean = float("104.542") - std = float("264.784") - data = None - - -class Program_weight_tensor_parameter_68: - name = "parameter_68" - shape = [384] - dtype = "float32" - min_val = float("-8.30504") - max_val = float("7.29752") - mean = float("-0.046446") - std = float("1.50992") - data = None - - -class Program_weight_tensor_parameter_69: - name = "parameter_69" - shape = [384, 384, 3, 3] - dtype = "float32" - min_val = float("-0.216923") - max_val = float("0.225482") - mean = float("-0.000140598") - std = float("0.0129397") - data = None - - -class Program_weight_tensor_parameter_70: - name = "parameter_70" - shape = [384] - dtype = "float32" - min_val = float("-0.186619") - max_val = float("0.0253096") - mean = float("-0.0403406") - std = float("0.0301782") - data = None - - -class Program_weight_tensor_parameter_71: - name = "parameter_71" - shape = [384] - dtype = "float32" - min_val = float("0.930641") - max_val = float("1.15944") - mean = float("1.0145") - std = float("0.0373315") - data = None - - -class Program_weight_tensor_parameter_72: - name = "parameter_72" - shape = [384] - dtype = "float32" - min_val = float("2.04808") - max_val = float("21266.9") - mean = float("589.689") - std = float("2078.57") - data = None - - -class Program_weight_tensor_parameter_73: - name = "parameter_73" - shape = [384] - dtype = "float32" - min_val = float("-24.5893") - max_val = float("30.4673") - mean = float("0.404088") - std = float("5.12896") - data = None - - -class Program_weight_tensor_parameter_74: - name = "parameter_74" - shape = [384, 384, 3, 3] - dtype = "float32" - min_val = float("-0.338033") - max_val = float("0.42615") - mean = float("0.000530178") - std = float("0.0222656") - data = None - - -class Program_weight_tensor_parameter_75: - name = "parameter_75" - shape = [384] - dtype = "float32" - min_val = float("-0.119217") - max_val = float("0.0124743") - mean = float("-0.0405258") - std = float("0.0215146") - data = None - - -class Program_weight_tensor_parameter_76: - name = "parameter_76" - shape = [384] - dtype = "float32" - min_val = float("0.923648") - max_val = float("1.04368") - mean = float("0.989661") - std = float("0.0170925") - data = None - - -class Program_weight_tensor_parameter_77: - name = "parameter_77" - shape = [384] - dtype = "float32" - min_val = float("0.0605594") - max_val = float("2562.44") - mean = float("30.3737") - std = float("153.669") - data = None - - -class Program_weight_tensor_parameter_78: - name = "parameter_78" - shape = [384] - dtype = "float32" - min_val = float("-2.00406") - max_val = float("9.78589") - mean = float("0.0692858") - std = float("0.922895") - data = None - - -class Program_weight_tensor_parameter_79: - name = "parameter_79" - shape = [384, 384, 1, 1] - dtype = "float32" - min_val = float("-0.359065") - max_val = float("0.736107") - mean = float("0.00142685") - std = float("0.0416064") - data = None - - -class Program_weight_tensor_parameter_80: - name = "parameter_80" - shape = [384] - dtype = "float32" - min_val = float("-0.119217") - max_val = float("0.0124743") - mean = float("-0.0405258") - std = float("0.0215146") - data = None - - -class Program_weight_tensor_parameter_81: - name = "parameter_81" - shape = [384] - dtype = "float32" - min_val = float("0.940925") - max_val = float("1.10279") - mean = float("1.01574") - std = float("0.0223556") - data = None - - -class Program_weight_tensor_parameter_82: - name = "parameter_82" - shape = [384] - dtype = "float32" - min_val = float("1.38621") - max_val = float("4812.1") - mean = float("159.271") - std = float("503.316") - data = None - - -class Program_weight_tensor_parameter_83: - name = "parameter_83" - shape = [384] - dtype = "float32" - min_val = float("-5.88263") - max_val = float("15.8533") - mean = float("0.140376") - std = float("2.40236") - data = None - - -class Program_weight_tensor_parameter_84: - name = "parameter_84" - shape = [384, 384, 3, 3] - dtype = "float32" - min_val = float("-0.226706") - max_val = float("0.208603") - mean = float("0.000307493") - std = float("0.0144029") - data = None - - -class Program_weight_tensor_parameter_85: - name = "parameter_85" - shape = [384] - dtype = "float32" - min_val = float("-0.130515") - max_val = float("0.0142189") - mean = float("-0.0397866") - std = float("0.0225046") - data = None - - -class Program_weight_tensor_parameter_86: - name = "parameter_86" - shape = [384] - dtype = "float32" - min_val = float("0.862983") - max_val = float("1.11419") - mean = float("1.01096") - std = float("0.0331669") - data = None - - -class Program_weight_tensor_parameter_87: - name = "parameter_87" - shape = [384] - dtype = "float32" - min_val = float("2.23371") - max_val = float("4709.64") - mean = float("260.249") - std = float("508.379") - data = None - - -class Program_weight_tensor_parameter_88: - name = "parameter_88" - shape = [384] - dtype = "float32" - min_val = float("-13.5512") - max_val = float("16.4082") - mean = float("0.360129") - std = float("3.43025") - data = None - - -class Program_weight_tensor_parameter_89: - name = "parameter_89" - shape = [384, 384, 3, 3] - dtype = "float32" - min_val = float("-0.191356") - max_val = float("0.178253") - mean = float("0.000526337") - std = float("0.0137294") - data = None - - -class Program_weight_tensor_parameter_90: - name = "parameter_90" - shape = [384] - dtype = "float32" - min_val = float("-0.113425") - max_val = float("0.0127038") - mean = float("-0.0408835") - std = float("0.0214936") - data = None - - -class Program_weight_tensor_parameter_91: - name = "parameter_91" - shape = [384] - dtype = "float32" - min_val = float("0.934591") - max_val = float("1.02911") - mean = float("0.987626") - std = float("0.0127672") - data = None - - -class Program_weight_tensor_parameter_92: - name = "parameter_92" - shape = [384] - dtype = "float32" - min_val = float("0.0466938") - max_val = float("166.308") - mean = float("8.70658") - std = float("15.7428") - data = None - - -class Program_weight_tensor_parameter_93: - name = "parameter_93" - shape = [384] - dtype = "float32" - min_val = float("-1.61681") - max_val = float("2.24513") - mean = float("0.120268") - std = float("0.507015") - data = None - - -class Program_weight_tensor_parameter_94: - name = "parameter_94" - shape = [384, 384, 1, 1] - dtype = "float32" - min_val = float("-0.229179") - max_val = float("0.225991") - mean = float("0.00223037") - std = float("0.0276474") - data = None - - -class Program_weight_tensor_parameter_95: - name = "parameter_95" - shape = [384] - dtype = "float32" - min_val = float("-0.113425") - max_val = float("0.0127038") - mean = float("-0.0408835") - std = float("0.0214936") - data = None - - -class Program_weight_tensor_parameter_96: - name = "parameter_96" - shape = [384] - dtype = "float32" - min_val = float("0.966253") - max_val = float("1.11851") - mean = float("1.01774") - std = float("0.0232224") - data = None - - -class Program_weight_tensor_parameter_97: - name = "parameter_97" - shape = [384] - dtype = "float32" - min_val = float("0.388381") - max_val = float("3807.62") - mean = float("53.0047") - std = float("216.966") - data = None - - -class Program_weight_tensor_parameter_98: - name = "parameter_98" - shape = [384] - dtype = "float32" - min_val = float("-9.05284") - max_val = float("5.84204") - mean = float("0.111327") - std = float("1.23517") - data = None - - -class Program_weight_tensor_parameter_99: - name = "parameter_99" - shape = [384, 384, 3, 3] - dtype = "float32" - min_val = float("-0.237817") - max_val = float("0.124946") - mean = float("0.000100381") - std = float("0.011587") - data = None - - -class Program_weight_tensor_parameter_100: - name = "parameter_100" - shape = [384] - dtype = "float32" - min_val = float("-0.103321") - max_val = float("0.0232727") - mean = float("-0.0418488") - std = float("0.0225603") - data = None - - -class Program_weight_tensor_parameter_101: - name = "parameter_101" - shape = [384] - dtype = "float32" - min_val = float("0.929001") - max_val = float("1.1158") - mean = float("1.01191") - std = float("0.0303721") - data = None - - -class Program_weight_tensor_parameter_102: - name = "parameter_102" - shape = [384] - dtype = "float32" - min_val = float("0.323487") - max_val = float("2311.03") - mean = float("79.6394") - std = float("153.489") - data = None - - -class Program_weight_tensor_parameter_103: - name = "parameter_103" - shape = [384] - dtype = "float32" - min_val = float("-6.12644") - max_val = float("14.665") - mean = float("0.49423") - std = float("2.21458") - data = None - - -class Program_weight_tensor_parameter_104: - name = "parameter_104" - shape = [384, 384, 3, 3] - dtype = "float32" - min_val = float("-0.339134") - max_val = float("0.191022") - mean = float("0.000881583") - std = float("0.0173865") - data = None - - -class Program_weight_tensor_parameter_105: - name = "parameter_105" - shape = [384] - dtype = "float32" - min_val = float("-0.109396") - max_val = float("0.0442887") - mean = float("-0.0296739") - std = float("0.017334") - data = None - - -class Program_weight_tensor_parameter_106: - name = "parameter_106" - shape = [384] - dtype = "float32" - min_val = float("0.930867") - max_val = float("1.07806") - mean = float("1.00932") - std = float("0.0225248") - data = None - - -class Program_weight_tensor_parameter_107: - name = "parameter_107" - shape = [384] - dtype = "float32" - min_val = float("0.146648") - max_val = float("626.166") - mean = float("15.3179") - std = float("43.6414") - data = None - - -class Program_weight_tensor_parameter_108: - name = "parameter_108" - shape = [384] - dtype = "float32" - min_val = float("-6.69579") - max_val = float("5.18983") - mean = float("0.32161") - std = float("0.980842") - data = None - - -class Program_weight_tensor_parameter_109: - name = "parameter_109" - shape = [384, 1152, 1, 1] - dtype = "float32" - min_val = float("-0.560359") - max_val = float("0.446273") - mean = float("0.00139089") - std = float("0.0337706") - data = None - - -class Program_weight_tensor_parameter_110: - name = "parameter_110" - shape = [384] - dtype = "float32" - min_val = float("-0.0578452") - max_val = float("0.016811") - mean = float("-0.0161156") - std = float("0.011502") - data = None - - -class Program_weight_tensor_parameter_111: - name = "parameter_111" - shape = [384] - dtype = "float32" - min_val = float("0.920085") - max_val = float("1.10058") - mean = float("1.00226") - std = float("0.0184711") - data = None - - -class Program_weight_tensor_parameter_112: - name = "parameter_112" - shape = [384] - dtype = "float32" - min_val = float("0.495073") - max_val = float("876.99") - mean = float("8.39435") - std = float("45.7821") - data = None - - -class Program_weight_tensor_parameter_113: - name = "parameter_113" - shape = [384] - dtype = "float32" - min_val = float("-4.00856") - max_val = float("1.42008") - mean = float("-0.218361") - std = float("0.649639") - data = None - - -class Program_weight_tensor_parameter_114: - name = "parameter_114" - shape = [384, 1152, 1, 1] - dtype = "float32" - min_val = float("-0.290536") - max_val = float("0.175016") - mean = float("-0.00070814") - std = float("0.0138241") - data = None - - -class Program_weight_tensor_parameter_115: - name = "parameter_115" - shape = [384] - dtype = "float32" - min_val = float("-0.0540258") - max_val = float("0.0115933") - mean = float("-0.0157488") - std = float("0.0104757") - data = None - - -class Program_weight_tensor_parameter_116: - name = "parameter_116" - shape = [384] - dtype = "float32" - min_val = float("0.987802") - max_val = float("1.11339") - mean = float("1.02541") - std = float("0.019391") - data = None - - -class Program_weight_tensor_parameter_117: - name = "parameter_117" - shape = [384] - dtype = "float32" - min_val = float("0.320507") - max_val = float("596.285") - mean = float("38.1824") - std = float("79.9607") - data = None - - -class Program_weight_tensor_parameter_118: - name = "parameter_118" - shape = [384] - dtype = "float32" - min_val = float("-6.7222") - max_val = float("6.5412") - mean = float("-0.0320944") - std = float("1.4521") - data = None - - -class Program_weight_tensor_parameter_119: - name = "parameter_119" - shape = [384, 384, 3, 3] - dtype = "float32" - min_val = float("-0.0639514") - max_val = float("0.0601372") - mean = float("-8.51741e-05") - std = float("0.00526146") - data = None - - -class Program_weight_tensor_parameter_120: - name = "parameter_120" - shape = [384] - dtype = "float32" - min_val = float("-0.216482") - max_val = float("0.607924") - mean = float("0.238936") - std = float("0.14305") - data = None - - -class Program_weight_tensor_parameter_121: - name = "parameter_121" - shape = [384] - dtype = "float32" - min_val = float("0.677427") - max_val = float("1.50605") - mean = float("1.12914") - std = float("0.0838432") - data = None - - -class Program_weight_tensor_parameter_122: - name = "parameter_122" - shape = [384] - dtype = "float32" - min_val = float("1.74736") - max_val = float("22695.2") - mean = float("204.124") - std = float("1630.7") - data = None - - -class Program_weight_tensor_parameter_123: - name = "parameter_123" - shape = [384] - dtype = "float32" - min_val = float("-10.7392") - max_val = float("6.92707") - mean = float("0.0292054") - std = float("0.881457") - data = None - - -class Program_weight_tensor_parameter_124: - name = "parameter_124" - shape = [384, 384, 1, 1] - dtype = "float32" - min_val = float("-3.55448") - max_val = float("1.46716") - mean = float("0.00287851") - std = float("0.0619363") - data = None - - -class Program_weight_tensor_parameter_125: - name = "parameter_125" - shape = [192] - dtype = "float32" - min_val = float("-0.188818") - max_val = float("0.0459523") - mean = float("-0.0294237") - std = float("0.0400839") - data = None - - -class Program_weight_tensor_parameter_126: - name = "parameter_126" - shape = [192] - dtype = "float32" - min_val = float("0.839771") - max_val = float("1.06967") - mean = float("0.976243") - std = float("0.0259122") - data = None - - -class Program_weight_tensor_parameter_127: - name = "parameter_127" - shape = [192] - dtype = "float32" - min_val = float("0.0133959") - max_val = float("1454.17") - mean = float("13.1647") - std = float("105.548") - data = None - - -class Program_weight_tensor_parameter_128: - name = "parameter_128" - shape = [192] - dtype = "float32" - min_val = float("-0.261237") - max_val = float("1.28075") - mean = float("0.0271209") - std = float("0.114701") - data = None - - -class Program_weight_tensor_parameter_129: - name = "parameter_129" - shape = [192, 192, 1, 1] - dtype = "float32" - min_val = float("-0.123591") - max_val = float("0.43619") - mean = float("0.00447012") - std = float("0.0296425") - data = None - - -class Program_weight_tensor_parameter_130: - name = "parameter_130" - shape = [192] - dtype = "float32" - min_val = float("-0.188818") - max_val = float("0.0459523") - mean = float("-0.0294237") - std = float("0.0400839") - data = None - - -class Program_weight_tensor_parameter_131: - name = "parameter_131" - shape = [192] - dtype = "float32" - min_val = float("0.721948") - max_val = float("1.13657") - mean = float("1.02394") - std = float("0.0395189") - data = None - - -class Program_weight_tensor_parameter_132: - name = "parameter_132" - shape = [192] - dtype = "float32" - min_val = float("0.142781") - max_val = float("538.324") - mean = float("19.9641") - std = float("54.0569") - data = None - - -class Program_weight_tensor_parameter_133: - name = "parameter_133" - shape = [192] - dtype = "float32" - min_val = float("-0.376722") - max_val = float("0.838454") - mean = float("0.0359573") - std = float("0.16185") - data = None - - -class Program_weight_tensor_parameter_134: - name = "parameter_134" - shape = [192, 192, 3, 3] - dtype = "float32" - min_val = float("-0.0490945") - max_val = float("0.0667615") - mean = float("0.00072352") - std = float("0.00630598") - data = None - - -class Program_weight_tensor_parameter_135: - name = "parameter_135" - shape = [192] - dtype = "float32" - min_val = float("-0.197783") - max_val = float("0.0410661") - mean = float("-0.0617541") - std = float("0.0482775") - data = None - - -class Program_weight_tensor_parameter_136: - name = "parameter_136" - shape = [192] - dtype = "float32" - min_val = float("0.894643") - max_val = float("1.18384") - mean = float("1.01562") - std = float("0.0486636") - data = None - - -class Program_weight_tensor_parameter_137: - name = "parameter_137" - shape = [192] - dtype = "float32" - min_val = float("1.08082") - max_val = float("908.404") - mean = float("45.0674") - std = float("96.6794") - data = None - - -class Program_weight_tensor_parameter_138: - name = "parameter_138" - shape = [192] - dtype = "float32" - min_val = float("-0.933607") - max_val = float("1.82089") - mean = float("0.102296") - std = float("0.412254") - data = None - - -class Program_weight_tensor_parameter_139: - name = "parameter_139" - shape = [192, 192, 3, 3] - dtype = "float32" - min_val = float("-0.0519582") - max_val = float("0.0691943") - mean = float("0.000736163") - std = float("0.00618428") - data = None - - -class Program_weight_tensor_parameter_140: - name = "parameter_140" - shape = [192] - dtype = "float32" - min_val = float("-0.192937") - max_val = float("0.00891605") - mean = float("-0.0669654") - std = float("0.0334937") - data = None - - -class Program_weight_tensor_parameter_141: - name = "parameter_141" - shape = [192] - dtype = "float32" - min_val = float("0.921792") - max_val = float("1.05091") - mean = float("0.974424") - std = float("0.0180091") - data = None - - -class Program_weight_tensor_parameter_142: - name = "parameter_142" - shape = [192] - dtype = "float32" - min_val = float("0.00757897") - max_val = float("5.59766") - mean = float("0.426099") - std = float("0.667531") - data = None - - -class Program_weight_tensor_parameter_143: - name = "parameter_143" - shape = [192] - dtype = "float32" - min_val = float("-0.113501") - max_val = float("0.219786") - mean = float("0.0198383") - std = float("0.0428864") - data = None - - -class Program_weight_tensor_parameter_144: - name = "parameter_144" - shape = [192, 192, 1, 1] - dtype = "float32" - min_val = float("-0.280343") - max_val = float("0.242364") - mean = float("0.00234259") - std = float("0.0137845") - data = None - - -class Program_weight_tensor_parameter_145: - name = "parameter_145" - shape = [192] - dtype = "float32" - min_val = float("-0.192937") - max_val = float("0.00891605") - mean = float("-0.0669654") - std = float("0.0334937") - data = None - - -class Program_weight_tensor_parameter_146: - name = "parameter_146" - shape = [192] - dtype = "float32" - min_val = float("0.965058") - max_val = float("1.14639") - mean = float("1.02358") - std = float("0.0295349") - data = None - - -class Program_weight_tensor_parameter_147: - name = "parameter_147" - shape = [192] - dtype = "float32" - min_val = float("0.160794") - max_val = float("55.6905") - mean = float("2.44066") - std = float("4.39657") - data = None - - -class Program_weight_tensor_parameter_148: - name = "parameter_148" - shape = [192] - dtype = "float32" - min_val = float("-0.264263") - max_val = float("0.791243") - mean = float("0.04945") - std = float("0.117164") - data = None - - -class Program_weight_tensor_parameter_149: - name = "parameter_149" - shape = [192, 192, 3, 3] - dtype = "float32" - min_val = float("-0.0585351") - max_val = float("0.0927808") - mean = float("0.000685876") - std = float("0.00518043") - data = None - - -class Program_weight_tensor_parameter_150: - name = "parameter_150" - shape = [192] - dtype = "float32" - min_val = float("-0.196647") - max_val = float("0.0641564") - mean = float("-0.0774401") - std = float("0.0414081") - data = None - - -class Program_weight_tensor_parameter_151: - name = "parameter_151" - shape = [192] - dtype = "float32" - min_val = float("0.877502") - max_val = float("1.23877") - mean = float("1.01492") - std = float("0.0523329") - data = None - - -class Program_weight_tensor_parameter_152: - name = "parameter_152" - shape = [192] - dtype = "float32" - min_val = float("0.566361") - max_val = float("908.179") - mean = float("23.4539") - std = float("69.3761") - data = None - - -class Program_weight_tensor_parameter_153: - name = "parameter_153" - shape = [192] - dtype = "float32" - min_val = float("-4.28743") - max_val = float("1.23655") - mean = float("0.0695418") - std = float("0.513951") - data = None - - -class Program_weight_tensor_parameter_154: - name = "parameter_154" - shape = [192, 192, 3, 3] - dtype = "float32" - min_val = float("-0.135687") - max_val = float("0.125448") - mean = float("0.000810476") - std = float("0.00986455") - data = None - - -class Program_weight_tensor_parameter_155: - name = "parameter_155" - shape = [192] - dtype = "float32" - min_val = float("-0.23144") - max_val = float("-0.0126862") - mean = float("-0.0851822") - std = float("0.0422856") - data = None - - -class Program_weight_tensor_parameter_156: - name = "parameter_156" - shape = [192] - dtype = "float32" - min_val = float("0.899519") - max_val = float("1.03514") - mean = float("0.976157") - std = float("0.0242367") - data = None - - -class Program_weight_tensor_parameter_157: - name = "parameter_157" - shape = [192] - dtype = "float32" - min_val = float("0.00813932") - max_val = float("152.929") - mean = float("3.6385") - std = float("11.7244") - data = None - - -class Program_weight_tensor_parameter_158: - name = "parameter_158" - shape = [192] - dtype = "float32" - min_val = float("-0.318432") - max_val = float("0.187761") - mean = float("-0.0014536") - std = float("0.0590097") - data = None - - -class Program_weight_tensor_parameter_159: - name = "parameter_159" - shape = [192, 192, 1, 1] - dtype = "float32" - min_val = float("-0.566113") - max_val = float("0.374272") - mean = float("-0.00203655") - std = float("0.033229") - data = None - - -class Program_weight_tensor_parameter_160: - name = "parameter_160" - shape = [192] - dtype = "float32" - min_val = float("-0.23144") - max_val = float("-0.0126862") - mean = float("-0.0851822") - std = float("0.0422856") - data = None - - -class Program_weight_tensor_parameter_161: - name = "parameter_161" - shape = [192] - dtype = "float32" - min_val = float("0.94299") - max_val = float("1.11066") - mean = float("1.02006") - std = float("0.0312105") - data = None - - -class Program_weight_tensor_parameter_162: - name = "parameter_162" - shape = [192] - dtype = "float32" - min_val = float("0.1289") - max_val = float("1699.95") - mean = float("28.0305") - std = float("128.999") - data = None - - -class Program_weight_tensor_parameter_163: - name = "parameter_163" - shape = [192] - dtype = "float32" - min_val = float("-1.81173") - max_val = float("0.674127") - mean = float("0.0159434") - std = float("0.239598") - data = None - - -class Program_weight_tensor_parameter_164: - name = "parameter_164" - shape = [192, 192, 3, 3] - dtype = "float32" - min_val = float("-0.137869") - max_val = float("0.224919") - mean = float("-0.000155728") - std = float("0.0111564") - data = None - - -class Program_weight_tensor_parameter_165: - name = "parameter_165" - shape = [192] - dtype = "float32" - min_val = float("-0.233221") - max_val = float("0.0637355") - mean = float("-0.0967178") - std = float("0.0459598") - data = None - - -class Program_weight_tensor_parameter_166: - name = "parameter_166" - shape = [192] - dtype = "float32" - min_val = float("0.895324") - max_val = float("1.24271") - mean = float("1.01717") - std = float("0.0593265") - data = None - - -class Program_weight_tensor_parameter_167: - name = "parameter_167" - shape = [192] - dtype = "float32" - min_val = float("0.156623") - max_val = float("607.838") - mean = float("54.6281") - std = float("95.2855") - data = None - - -class Program_weight_tensor_parameter_168: - name = "parameter_168" - shape = [192] - dtype = "float32" - min_val = float("-1.62265") - max_val = float("1.34409") - mean = float("0.005687") - std = float("0.403979") - data = None - - -class Program_weight_tensor_parameter_169: - name = "parameter_169" - shape = [192, 192, 3, 3] - dtype = "float32" - min_val = float("-0.235146") - max_val = float("0.2559") - mean = float("-0.000272163") - std = float("0.0222664") - data = None - - -class Program_weight_tensor_parameter_170: - name = "parameter_170" - shape = [192] - dtype = "float32" - min_val = float("-0.199795") - max_val = float("0.0207518") - mean = float("-0.0665751") - std = float("0.0322603") - data = None - - -class Program_weight_tensor_parameter_171: - name = "parameter_171" - shape = [192] - dtype = "float32" - min_val = float("0.838902") - max_val = float("1.28467") - mean = float("1.01429") - std = float("0.0597393") - data = None - - -class Program_weight_tensor_parameter_172: - name = "parameter_172" - shape = [192] - dtype = "float32" - min_val = float("0.0720484") - max_val = float("1139.6") - mean = float("65.0503") - std = float("132.66") - data = None - - -class Program_weight_tensor_parameter_173: - name = "parameter_173" - shape = [192] - dtype = "float32" - min_val = float("-7.56675") - max_val = float("2.4842") - mean = float("-0.383258") - std = float("1.53872") - data = None - - -class Program_weight_tensor_parameter_174: - name = "parameter_174" - shape = [192, 576, 1, 1] - dtype = "float32" - min_val = float("-0.570014") - max_val = float("1.07892") - mean = float("-0.000455192") - std = float("0.0620441") - data = None - - -class Program_weight_tensor_parameter_175: - name = "parameter_175" - shape = [192] - dtype = "float32" - min_val = float("-0.104993") - max_val = float("0.0307948") - mean = float("-0.0245968") - std = float("0.024377") - data = None - - -class Program_weight_tensor_parameter_176: - name = "parameter_176" - shape = [192] - dtype = "float32" - min_val = float("0.822217") - max_val = float("1.21452") - mean = float("1.00159") - std = float("0.0456234") - data = None - - -class Program_weight_tensor_parameter_177: - name = "parameter_177" - shape = [192] - dtype = "float32" - min_val = float("0.108174") - max_val = float("1899.67") - mean = float("46.8811") - std = float("186.384") - data = None - - -class Program_weight_tensor_parameter_178: - name = "parameter_178" - shape = [192] - dtype = "float32" - min_val = float("-6.03851") - max_val = float("2.66203") - mean = float("0.0745216") - std = float("0.996719") - data = None - - -class Program_weight_tensor_parameter_179: - name = "parameter_179" - shape = [192, 576, 1, 1] - dtype = "float32" - min_val = float("-0.874328") - max_val = float("0.287848") - mean = float("-0.00271476") - std = float("0.0440209") - data = None - - -class Program_weight_tensor_parameter_180: - name = "parameter_180" - shape = [192] - dtype = "float32" - min_val = float("-0.168512") - max_val = float("0.00330333") - mean = float("-0.0443386") - std = float("0.0224484") - data = None - - -class Program_weight_tensor_parameter_181: - name = "parameter_181" - shape = [192] - dtype = "float32" - min_val = float("0.849694") - max_val = float("1.19077") - mean = float("0.991767") - std = float("0.0408626") - data = None - - -class Program_weight_tensor_parameter_182: - name = "parameter_182" - shape = [192] - dtype = "float32" - min_val = float("6.16524") - max_val = float("124201.0") - mean = float("2816.92") - std = float("9762.79") - data = None - - -class Program_weight_tensor_parameter_183: - name = "parameter_183" - shape = [192] - dtype = "float32" - min_val = float("-516.507") - max_val = float("458.567") - mean = float("-22.2018") - std = float("117.069") - data = None - - -class Program_weight_tensor_parameter_184: - name = "parameter_184" - shape = [192, 192, 3, 3] - dtype = "float32" - min_val = float("-0.399414") - max_val = float("0.235883") - mean = float("-0.0021386") - std = float("0.0211608") - data = None - - -class Program_weight_tensor_parameter_185: - name = "parameter_185" - shape = [192] - dtype = "float32" - min_val = float("-85.128") - max_val = float("88.3635") - mean = float("3.58989") - std = float("27.7568") - data = None - - -class Program_weight_tensor_parameter_186: - name = "parameter_186" - shape = [192] - dtype = "float32" - min_val = float("-31.9346") - max_val = float("19.7089") - mean = float("0.428102") - std = float("7.9625") - data = None - - -class Program_weight_tensor_parameter_187: - name = "parameter_187" - shape = [192] - dtype = "float32" - min_val = float("4.93657") - max_val = float("817684.0") - mean = float("36364.9") - std = float("89921.0") - data = None - - -class Program_weight_tensor_parameter_188: - name = "parameter_188" - shape = [192] - dtype = "float32" - min_val = float("-162.528") - max_val = float("314.565") - mean = float("4.32602") - std = float("60.1911") - data = None - - -class Program_weight_tensor_parameter_189: - name = "parameter_189" - shape = [192, 192, 1, 1] - dtype = "float32" - min_val = float("-27.6825") - max_val = float("13.0687") - mean = float("-0.022105") - std = float("1.26716") - data = None - - -class Program_weight_tensor_parameter_190: - name = "parameter_190" - shape = [96] - dtype = "float32" - min_val = float("-7.24863") - max_val = float("5.05502") - mean = float("-0.962893") - std = float("2.20937") - data = None - - -class Program_weight_tensor_parameter_191: - name = "parameter_191" - shape = [96] - dtype = "float32" - min_val = float("-3.53429") - max_val = float("3.92679") - mean = float("0.710033") - std = float("1.35103") - data = None - - -class Program_weight_tensor_parameter_192: - name = "parameter_192" - shape = [96] - dtype = "float32" - min_val = float("0.0751746") - max_val = float("5214.13") - mean = float("468.433") - std = float("1111.49") - data = None - - -class Program_weight_tensor_parameter_193: - name = "parameter_193" - shape = [96] - dtype = "float32" - min_val = float("-45.3576") - max_val = float("94.342") - mean = float("6.78995") - std = float("24.5091") - data = None - - -class Program_weight_tensor_parameter_194: - name = "parameter_194" - shape = [96, 96, 1, 1] - dtype = "float32" - min_val = float("-19.379") - max_val = float("5.94629") - mean = float("0.0933808") - std = float("0.838543") - data = None - - -class Program_weight_tensor_parameter_195: - name = "parameter_195" - shape = [96] - dtype = "float32" - min_val = float("-7.24863") - max_val = float("5.05502") - mean = float("-0.962893") - std = float("2.20937") - data = None - - -class Program_weight_tensor_parameter_196: - name = "parameter_196" - shape = [96] - dtype = "float32" - min_val = float("-5.32819") - max_val = float("4.14921") - mean = float("0.636519") - std = float("1.50817") - data = None - - -class Program_weight_tensor_parameter_197: - name = "parameter_197" - shape = [96] - dtype = "float32" - min_val = float("1.04384") - max_val = float("59220.1") - mean = float("2569.29") - std = float("8056.44") - data = None - - -class Program_weight_tensor_parameter_198: - name = "parameter_198" - shape = [96] - dtype = "float32" - min_val = float("-202.998") - max_val = float("339.698") - mean = float("7.6026") - std = float("67.6599") - data = None - - -class Program_weight_tensor_parameter_199: - name = "parameter_199" - shape = [96, 96, 3, 3] - dtype = "float32" - min_val = float("-7.61018") - max_val = float("2.06596") - mean = float("0.00408673") - std = float("0.270734") - data = None - - -class Program_weight_tensor_parameter_200: - name = "parameter_200" - shape = [96] - dtype = "float32" - min_val = float("-12.7487") - max_val = float("6.04207") - mean = float("-0.970305") - std = float("3.03245") - data = None - - -class Program_weight_tensor_parameter_201: - name = "parameter_201" - shape = [96] - dtype = "float32" - min_val = float("-6.54554") - max_val = float("4.74788") - mean = float("0.995298") - std = float("1.40525") - data = None - - -class Program_weight_tensor_parameter_202: - name = "parameter_202" - shape = [96] - dtype = "float32" - min_val = float("9.39318") - max_val = float("2190150.0") - mean = float("66154.5") - std = float("240596.0") - data = None - - -class Program_weight_tensor_parameter_203: - name = "parameter_203" - shape = [96] - dtype = "float32" - min_val = float("-80.81") - max_val = float("224.697") - mean = float("9.21888") - std = float("37.6238") - data = None - - -class Program_weight_tensor_parameter_204: - name = "parameter_204" - shape = [96, 96, 3, 3] - dtype = "float32" - min_val = float("-3.09795") - max_val = float("6.23125") - mean = float("0.039322") - std = float("0.339853") - data = None - - -class Program_weight_tensor_parameter_205: - name = "parameter_205" - shape = [96] - dtype = "float32" - min_val = float("-6.57041") - max_val = float("1.23513") - mean = float("-1.08467") - std = float("1.31742") - data = None - - -class Program_weight_tensor_parameter_206: - name = "parameter_206" - shape = [96] - dtype = "float32" - min_val = float("-4.57105") - max_val = float("7.43697") - mean = float("0.761073") - std = float("1.58355") - data = None - - -class Program_weight_tensor_parameter_207: - name = "parameter_207" - shape = [96] - dtype = "float32" - min_val = float("0.122882") - max_val = float("5384610.0") - mean = float("110211.0") - std = float("572085.0") - data = None - - -class Program_weight_tensor_parameter_208: - name = "parameter_208" - shape = [96] - dtype = "float32" - min_val = float("-48.2301") - max_val = float("166.551") - mean = float("6.24412") - std = float("24.2542") - data = None - - -class Program_weight_tensor_parameter_209: - name = "parameter_209" - shape = [96, 96, 1, 1] - dtype = "float32" - min_val = float("-6.4048") - max_val = float("25.2239") - mean = float("0.251097") - std = float("1.3616") - data = None - - -class Program_weight_tensor_parameter_210: - name = "parameter_210" - shape = [96] - dtype = "float32" - min_val = float("-6.57041") - max_val = float("1.23513") - mean = float("-1.08467") - std = float("1.31742") - data = None - - -class Program_weight_tensor_parameter_211: - name = "parameter_211" - shape = [96] - dtype = "float32" - min_val = float("-4.84455") - max_val = float("4.21014") - mean = float("0.997804") - std = float("1.25033") - data = None - - -class Program_weight_tensor_parameter_212: - name = "parameter_212" - shape = [96] - dtype = "float32" - min_val = float("8.7857") - max_val = float("11788800.0") - mean = float("727757.0") - std = float("1936730.0") - data = None - - -class Program_weight_tensor_parameter_213: - name = "parameter_213" - shape = [96] - dtype = "float32" - min_val = float("-305.931") - max_val = float("314.393") - mean = float("20.0074") - std = float("74.2719") - data = None - - -class Program_weight_tensor_parameter_214: - name = "parameter_214" - shape = [96, 96, 3, 3] - dtype = "float32" - min_val = float("-4.16137") - max_val = float("5.97165") - mean = float("0.112215") - std = float("0.461145") - data = None - - -class Program_weight_tensor_parameter_215: - name = "parameter_215" - shape = [96] - dtype = "float32" - min_val = float("-6.38172") - max_val = float("0.373498") - mean = float("-1.33479") - std = float("1.05978") - data = None - - -class Program_weight_tensor_parameter_216: - name = "parameter_216" - shape = [96] - dtype = "float32" - min_val = float("-5.79719") - max_val = float("12.1783") - mean = float("0.960027") - std = float("3.2153") - data = None - - -class Program_weight_tensor_parameter_217: - name = "parameter_217" - shape = [96] - dtype = "float32" - min_val = float("45.1963") - max_val = float("16654400.0") - mean = float("673305.0") - std = float("2003700.0") - data = None - - -class Program_weight_tensor_parameter_218: - name = "parameter_218" - shape = [96] - dtype = "float32" - min_val = float("-355.958") - max_val = float("408.085") - mean = float("8.33521") - std = float("84.948") - data = None - - -class Program_weight_tensor_parameter_219: - name = "parameter_219" - shape = [96, 96, 3, 3] - dtype = "float32" - min_val = float("-5.59114") - max_val = float("6.22526") - mean = float("0.0518284") - std = float("0.488386") - data = None - - -class Program_weight_tensor_parameter_220: - name = "parameter_220" - shape = [96] - dtype = "float32" - min_val = float("-6.40528") - max_val = float("0.439558") - mean = float("-1.04892") - std = float("1.44668") - data = None - - -class Program_weight_tensor_parameter_221: - name = "parameter_221" - shape = [96] - dtype = "float32" - min_val = float("-7.47233") - max_val = float("12.2951") - mean = float("0.896125") - std = float("2.48316") - data = None - - -class Program_weight_tensor_parameter_222: - name = "parameter_222" - shape = [96] - dtype = "float32" - min_val = float("0.227469") - max_val = float("674599.0") - mean = float("19715.9") - std = float("80090.6") - data = None - - -class Program_weight_tensor_parameter_223: - name = "parameter_223" - shape = [96] - dtype = "float32" - min_val = float("-91.0419") - max_val = float("72.2002") - mean = float("2.37886") - std = float("17.3356") - data = None - - -class Program_weight_tensor_parameter_224: - name = "parameter_224" - shape = [96, 96, 1, 1] - dtype = "float32" - min_val = float("-11.5461") - max_val = float("22.862") - mean = float("0.0613126") - std = float("1.12064") - data = None - - -class Program_weight_tensor_parameter_225: - name = "parameter_225" - shape = [96] - dtype = "float32" - min_val = float("-6.40528") - max_val = float("0.439558") - mean = float("-1.04892") - std = float("1.44668") - data = None - - -class Program_weight_tensor_parameter_226: - name = "parameter_226" - shape = [96] - dtype = "float32" - min_val = float("-5.55409") - max_val = float("8.57283") - mean = float("0.767581") - std = float("1.91915") - data = None - - -class Program_weight_tensor_parameter_227: - name = "parameter_227" - shape = [96] - dtype = "float32" - min_val = float("36.8885") - max_val = float("1641000.0") - mean = float("154074.0") - std = float("333545.0") - data = None - - -class Program_weight_tensor_parameter_228: - name = "parameter_228" - shape = [96] - dtype = "float32" - min_val = float("-148.097") - max_val = float("151.343") - mean = float("8.60596") - std = float("46.4808") - data = None - - -class Program_weight_tensor_parameter_229: - name = "parameter_229" - shape = [96, 96, 3, 3] - dtype = "float32" - min_val = float("-6.37265") - max_val = float("6.48435") - mean = float("0.0246406") - std = float("0.404267") - data = None - - -class Program_weight_tensor_parameter_230: - name = "parameter_230" - shape = [96] - dtype = "float32" - min_val = float("-12.1058") - max_val = float("2.65046") - mean = float("-1.35949") - std = float("2.31774") - data = None - - -class Program_weight_tensor_parameter_231: - name = "parameter_231" - shape = [96] - dtype = "float32" - min_val = float("-5.35437") - max_val = float("15.6996") - mean = float("0.620018") - std = float("3.80279") - data = None - - -class Program_weight_tensor_parameter_232: - name = "parameter_232" - shape = [96] - dtype = "float32" - min_val = float("119.771") - max_val = float("2429810.0") - mean = float("193542.0") - std = float("392172.0") - data = None - - -class Program_weight_tensor_parameter_233: - name = "parameter_233" - shape = [96] - dtype = "float32" - min_val = float("-183.549") - max_val = float("100.622") - mean = float("1.67747") - std = float("40.3903") - data = None - - -class Program_weight_tensor_parameter_234: - name = "parameter_234" - shape = [96, 96, 3, 3] - dtype = "float32" - min_val = float("-5.05043") - max_val = float("3.64647") - mean = float("-0.00681032") - std = float("0.486976") - data = None - - -class Program_weight_tensor_parameter_235: - name = "parameter_235" - shape = [96] - dtype = "float32" - min_val = float("-6.51727") - max_val = float("4.07312") - mean = float("-0.742646") - std = float("1.54691") - data = None - - -class Program_weight_tensor_parameter_236: - name = "parameter_236" - shape = [96] - dtype = "float32" - min_val = float("-17.2429") - max_val = float("16.1753") - mean = float("0.496246") - std = float("4.30544") - data = None - - -class Program_weight_tensor_parameter_237: - name = "parameter_237" - shape = [96] - dtype = "float32" - min_val = float("33.613") - max_val = float("328553.0") - mean = float("25107.8") - std = float("56405.7") - data = None - - -class Program_weight_tensor_parameter_238: - name = "parameter_238" - shape = [96] - dtype = "float32" - min_val = float("-61.9912") - max_val = float("47.6969") - mean = float("-1.08568") - std = float("14.1662") - data = None - - -class Program_weight_tensor_parameter_239: - name = "parameter_239" - shape = [96, 448, 1, 1] - dtype = "float32" - min_val = float("-22.5977") - max_val = float("9.97906") - mean = float("0.00384249") - std = float("1.00918") - data = None - - -class Program_weight_tensor_parameter_240: - name = "parameter_240" - shape = [96] - dtype = "float32" - min_val = float("-2.39003") - max_val = float("1.00251") - mean = float("-0.133253") - std = float("0.503822") - data = None - - -class Program_weight_tensor_parameter_241: - name = "parameter_241" - shape = [96] - dtype = "float32" - min_val = float("-4.56305") - max_val = float("6.14021") - mean = float("1.38317") - std = float("1.84076") - data = None - - -class Program_weight_tensor_parameter_242: - name = "parameter_242" - shape = [96] - dtype = "float32" - min_val = float("5.07934") - max_val = float("29574.8") - mean = float("1194.75") - std = float("3229.19") - data = None - - -class Program_weight_tensor_parameter_243: - name = "parameter_243" - shape = [96] - dtype = "float32" - min_val = float("-4.92322") - max_val = float("20.0659") - mean = float("1.71861") - std = float("4.04769") - data = None - - -class Program_weight_tensor_parameter_244: - name = "parameter_244" - shape = [96, 448, 1, 1] - dtype = "float32" - min_val = float("-3.49471") - max_val = float("1.20362") - mean = float("0.00515059") - std = float("0.189079") - data = None - - -class Program_weight_tensor_parameter_245: - name = "parameter_245" - shape = [192] - dtype = "float32" - min_val = float("-6.61152") - max_val = float("1.7835") - mean = float("-0.419521") - std = float("1.07353") - data = None - - -class Program_weight_tensor_parameter_246: - name = "parameter_246" - shape = [192] - dtype = "float32" - min_val = float("-14.4931") - max_val = float("16.6779") - mean = float("0.0422574") - std = float("2.46703") - data = None - - -class Program_weight_tensor_parameter_247: - name = "parameter_247" - shape = [192] - dtype = "float32" - min_val = float("0.28209") - max_val = float("76246.9") - mean = float("6772.73") - std = float("14681.3") - data = None - - -class Program_weight_tensor_parameter_248: - name = "parameter_248" - shape = [192] - dtype = "float32" - min_val = float("-31.4966") - max_val = float("41.7212") - mean = float("1.71254") - std = float("7.38567") - data = None - - -class Program_weight_tensor_parameter_249: - name = "parameter_249" - shape = [192, 384, 1, 1] - dtype = "float32" - min_val = float("-3.49758") - max_val = float("2.55718") - mean = float("0.0265243") - std = float("0.348243") - data = None - - -class Program_weight_tensor_parameter_250: - name = "parameter_250" - shape = [384] - dtype = "float32" - min_val = float("-1.87838") - max_val = float("1.09629") - mean = float("-0.183799") - std = float("0.345718") - data = None - - -class Program_weight_tensor_parameter_251: - name = "parameter_251" - shape = [384] - dtype = "float32" - min_val = float("-3.0247") - max_val = float("2.74203") - mean = float("0.964417") - std = float("0.845217") - data = None - - -class Program_weight_tensor_parameter_252: - name = "parameter_252" - shape = [384] - dtype = "float32" - min_val = float("0.258076") - max_val = float("117117.0") - mean = float("2098.36") - std = float("9309.06") - data = None - - -class Program_weight_tensor_parameter_253: - name = "parameter_253" - shape = [384] - dtype = "float32" - min_val = float("-37.8342") - max_val = float("38.526") - mean = float("2.07375") - std = float("5.93701") - data = None - - -class Program_weight_tensor_parameter_254: - name = "parameter_254" - shape = [384, 384, 1, 1] - dtype = "float32" - min_val = float("-3.75963") - max_val = float("3.45352") - mean = float("-0.00284906") - std = float("0.205711") - data = None - - -class Program_weight_tensor_parameter_255: - name = "parameter_255" - shape = [192] - dtype = "float32" - min_val = float("-0.377474") - max_val = float("0.270252") - mean = float("-0.09324") - std = float("0.113903") - data = None - - -class Program_weight_tensor_parameter_256: - name = "parameter_256" - shape = [192] - dtype = "float32" - min_val = float("0.589898") - max_val = float("2.19572") - mean = float("1.10833") - std = float("0.298081") - data = None - - -class Program_weight_tensor_parameter_257: - name = "parameter_257" - shape = [192] - dtype = "float32" - min_val = float("0.0224855") - max_val = float("6349.83") - mean = float("93.5002") - std = float("543.789") - data = None - - -class Program_weight_tensor_parameter_258: - name = "parameter_258" - shape = [192] - dtype = "float32" - min_val = float("-0.711802") - max_val = float("1.86246") - mean = float("0.0422734") - std = float("0.271396") - data = None - - -class Program_weight_tensor_parameter_259: - name = "parameter_259" - shape = [192, 192, 1, 1] - dtype = "float32" - min_val = float("-0.513946") - max_val = float("1.90858") - mean = float("0.0211941") - std = float("0.126887") - data = None - - -class Program_weight_tensor_parameter_260: - name = "parameter_260" - shape = [192] - dtype = "float32" - min_val = float("-0.377474") - max_val = float("0.270252") - mean = float("-0.09324") - std = float("0.113903") - data = None - - -class Program_weight_tensor_parameter_261: - name = "parameter_261" - shape = [192] - dtype = "float32" - min_val = float("0.664841") - max_val = float("2.1235") - mean = float("1.14039") - std = float("0.294828") - data = None - - -class Program_weight_tensor_parameter_262: - name = "parameter_262" - shape = [192] - dtype = "float32" - min_val = float("0.447646") - max_val = float("9622.59") - mean = float("555.521") - std = float("1466.93") - data = None - - -class Program_weight_tensor_parameter_263: - name = "parameter_263" - shape = [192] - dtype = "float32" - min_val = float("-1.42131") - max_val = float("4.63902") - mean = float("0.15401") - std = float("0.797684") - data = None - - -class Program_weight_tensor_parameter_264: - name = "parameter_264" - shape = [192, 192, 3, 3] - dtype = "float32" - min_val = float("-0.200786") - max_val = float("0.349522") - mean = float("0.00864049") - std = float("0.0419031") - data = None - - -class Program_weight_tensor_parameter_265: - name = "parameter_265" - shape = [192] - dtype = "float32" - min_val = float("-0.376564") - max_val = float("0.0843041") - mean = float("-0.165316") - std = float("0.0820009") - data = None - - -class Program_weight_tensor_parameter_266: - name = "parameter_266" - shape = [192] - dtype = "float32" - min_val = float("0.618528") - max_val = float("1.87359") - mean = float("1.00495") - std = float("0.220123") - data = None - - -class Program_weight_tensor_parameter_267: - name = "parameter_267" - shape = [192] - dtype = "float32" - min_val = float("4.40675") - max_val = float("11495.1") - mean = float("1075.25") - std = float("1952.54") - data = None - - -class Program_weight_tensor_parameter_268: - name = "parameter_268" - shape = [192] - dtype = "float32" - min_val = float("-7.62537") - max_val = float("8.84763") - mean = float("0.605399") - std = float("2.44971") - data = None - - -class Program_weight_tensor_parameter_269: - name = "parameter_269" - shape = [192, 192, 3, 3] - dtype = "float32" - min_val = float("-0.610252") - max_val = float("0.46974") - mean = float("0.00434873") - std = float("0.0378261") - data = None - - -class Program_weight_tensor_parameter_270: - name = "parameter_270" - shape = [192] - dtype = "float32" - min_val = float("-0.396329") - max_val = float("0.0895731") - mean = float("-0.135227") - std = float("0.0909936") - data = None - - -class Program_weight_tensor_parameter_271: - name = "parameter_271" - shape = [192] - dtype = "float32" - min_val = float("0.724836") - max_val = float("1.31254") - mean = float("0.989403") - std = float("0.0899495") - data = None - - -class Program_weight_tensor_parameter_272: - name = "parameter_272" - shape = [192] - dtype = "float32" - min_val = float("0.0366499") - max_val = float("2579.48") - mean = float("94.136") - std = float("297.849") - data = None - - -class Program_weight_tensor_parameter_273: - name = "parameter_273" - shape = [192] - dtype = "float32" - min_val = float("-1.21638") - max_val = float("3.00263") - mean = float("0.19362") - std = float("0.597692") - data = None - - -class Program_weight_tensor_parameter_274: - name = "parameter_274" - shape = [192, 192, 1, 1] - dtype = "float32" - min_val = float("-1.50522") - max_val = float("2.56014") - mean = float("0.0167357") - std = float("0.128452") - data = None - - -class Program_weight_tensor_parameter_275: - name = "parameter_275" - shape = [192] - dtype = "float32" - min_val = float("-0.396329") - max_val = float("0.0895731") - mean = float("-0.135227") - std = float("0.0909936") - data = None - - -class Program_weight_tensor_parameter_276: - name = "parameter_276" - shape = [192] - dtype = "float32" - min_val = float("0.702365") - max_val = float("1.48726") - mean = float("0.978343") - std = float("0.0932404") - data = None - - -class Program_weight_tensor_parameter_277: - name = "parameter_277" - shape = [192] - dtype = "float32" - min_val = float("0.341776") - max_val = float("10383.4") - mean = float("546.718") - std = float("1146.4") - data = None - - -class Program_weight_tensor_parameter_278: - name = "parameter_278" - shape = [192] - dtype = "float32" - min_val = float("-4.04849") - max_val = float("7.68697") - mean = float("0.626776") - std = float("1.72632") - data = None - - -class Program_weight_tensor_parameter_279: - name = "parameter_279" - shape = [192, 192, 3, 3] - dtype = "float32" - min_val = float("-0.496227") - max_val = float("0.685374") - mean = float("0.00417206") - std = float("0.0441635") - data = None - - -class Program_weight_tensor_parameter_280: - name = "parameter_280" - shape = [192] - dtype = "float32" - min_val = float("-1.64459") - max_val = float("0.267005") - mean = float("-0.224858") - std = float("0.296988") - data = None - - -class Program_weight_tensor_parameter_281: - name = "parameter_281" - shape = [192] - dtype = "float32" - min_val = float("-1.27679") - max_val = float("2.832") - mean = float("1.02513") - std = float("0.465622") - data = None - - -class Program_weight_tensor_parameter_282: - name = "parameter_282" - shape = [192] - dtype = "float32" - min_val = float("5.86811") - max_val = float("186650.0") - mean = float("7734.42") - std = float("21154.4") - data = None - - -class Program_weight_tensor_parameter_283: - name = "parameter_283" - shape = [192] - dtype = "float32" - min_val = float("-13.8475") - max_val = float("24.3198") - mean = float("1.04016") - std = float("5.19036") - data = None - - -class Program_weight_tensor_parameter_284: - name = "parameter_284" - shape = [192, 192, 3, 3] - dtype = "float32" - min_val = float("-2.35083") - max_val = float("1.83667") - mean = float("-0.000570115") - std = float("0.103887") - data = None - - -class Program_weight_tensor_parameter_285: - name = "parameter_285" - shape = [192] - dtype = "float32" - min_val = float("-1.03838") - max_val = float("0.354784") - mean = float("-0.15507") - std = float("0.21939") - data = None - - -class Program_weight_tensor_parameter_286: - name = "parameter_286" - shape = [192] - dtype = "float32" - min_val = float("-1.45219") - max_val = float("2.97832") - mean = float("0.989616") - std = float("0.41137") - data = None - - -class Program_weight_tensor_parameter_287: - name = "parameter_287" - shape = [192] - dtype = "float32" - min_val = float("0.0529003") - max_val = float("11535.5") - mean = float("313.393") - std = float("1268.61") - data = None - - -class Program_weight_tensor_parameter_288: - name = "parameter_288" - shape = [192] - dtype = "float32" - min_val = float("-5.17365") - max_val = float("2.80353") - mean = float("-0.0123197") - std = float("0.835049") - data = None - - -class Program_weight_tensor_parameter_289: - name = "parameter_289" - shape = [192, 192, 1, 1] - dtype = "float32" - min_val = float("-4.9408") - max_val = float("2.27411") - mean = float("0.000442447") - std = float("0.213835") - data = None - - -class Program_weight_tensor_parameter_290: - name = "parameter_290" - shape = [192] - dtype = "float32" - min_val = float("-1.03838") - max_val = float("0.354784") - mean = float("-0.15507") - std = float("0.21939") - data = None - - -class Program_weight_tensor_parameter_291: - name = "parameter_291" - shape = [192] - dtype = "float32" - min_val = float("-1.73076") - max_val = float("4.22948") - mean = float("0.992476") - std = float("0.513532") - data = None - - -class Program_weight_tensor_parameter_292: - name = "parameter_292" - shape = [192] - dtype = "float32" - min_val = float("1.28858") - max_val = float("72884.6") - mean = float("1632.57") - std = float("6195.79") - data = None - - -class Program_weight_tensor_parameter_293: - name = "parameter_293" - shape = [192] - dtype = "float32" - min_val = float("-13.4733") - max_val = float("7.33824") - mean = float("0.00635399") - std = float("2.2987") - data = None - - -class Program_weight_tensor_parameter_294: - name = "parameter_294" - shape = [192, 192, 3, 3] - dtype = "float32" - min_val = float("-2.75576") - max_val = float("1.65718") - mean = float("0.00141057") - std = float("0.0847456") - data = None - - -class Program_weight_tensor_parameter_295: - name = "parameter_295" - shape = [192] - dtype = "float32" - min_val = float("-1.54041") - max_val = float("1.11627") - mean = float("-0.2651") - std = float("0.234523") - data = None - - -class Program_weight_tensor_parameter_296: - name = "parameter_296" - shape = [192] - dtype = "float32" - min_val = float("-1.63593") - max_val = float("4.58953") - mean = float("1.00964") - std = float("0.703509") - data = None - - -class Program_weight_tensor_parameter_297: - name = "parameter_297" - shape = [192] - dtype = "float32" - min_val = float("2.05421") - max_val = float("104887.0") - mean = float("4244.7") - std = float("11719.7") - data = None - - -class Program_weight_tensor_parameter_298: - name = "parameter_298" - shape = [192] - dtype = "float32" - min_val = float("-25.8375") - max_val = float("52.7078") - mean = float("1.02666") - std = float("7.5325") - data = None - - -class Program_weight_tensor_parameter_299: - name = "parameter_299" - shape = [192, 192, 3, 3] - dtype = "float32" - min_val = float("-2.79383") - max_val = float("1.88844") - mean = float("-0.00365623") - std = float("0.104147") - data = None - - -class Program_weight_tensor_parameter_300: - name = "parameter_300" - shape = [192] - dtype = "float32" - min_val = float("-0.871095") - max_val = float("1.01126") - mean = float("-0.217258") - std = float("0.291713") - data = None - - -class Program_weight_tensor_parameter_301: - name = "parameter_301" - shape = [192] - dtype = "float32" - min_val = float("-1.96925") - max_val = float("3.64939") - mean = float("1.00405") - std = float("0.722218") - data = None - - -class Program_weight_tensor_parameter_302: - name = "parameter_302" - shape = [192] - dtype = "float32" - min_val = float("0.620813") - max_val = float("27316.4") - mean = float("1238.85") - std = float("3200.83") - data = None - - -class Program_weight_tensor_parameter_303: - name = "parameter_303" - shape = [192] - dtype = "float32" - min_val = float("-12.1711") - max_val = float("17.836") - mean = float("0.562202") - std = float("3.62838") - data = None - - -class Program_weight_tensor_parameter_304: - name = "parameter_304" - shape = [192, 896, 1, 1] - dtype = "float32" - min_val = float("-3.17099") - max_val = float("1.29685") - mean = float("-0.0023653") - std = float("0.108022") - data = None - - -class Program_weight_tensor_parameter_305: - name = "parameter_305" - shape = [192] - dtype = "float32" - min_val = float("-1.10782") - max_val = float("0.300641") - mean = float("-0.173971") - std = float("0.264032") - data = None - - -class Program_weight_tensor_parameter_306: - name = "parameter_306" - shape = [192] - dtype = "float32" - min_val = float("-1.04342") - max_val = float("2.59244") - mean = float("0.671735") - std = float("0.577557") - data = None - - -class Program_weight_tensor_parameter_307: - name = "parameter_307" - shape = [192] - dtype = "float32" - min_val = float("0.398919") - max_val = float("62548.8") - mean = float("1538.4") - std = float("5143.75") - data = None - - -class Program_weight_tensor_parameter_308: - name = "parameter_308" - shape = [192] - dtype = "float32" - min_val = float("-16.9307") - max_val = float("5.62477") - mean = float("-1.1326") - std = float("2.74663") - data = None - - -class Program_weight_tensor_parameter_309: - name = "parameter_309" - shape = [192, 896, 1, 1] - dtype = "float32" - min_val = float("-1.82279") - max_val = float("2.47484") - mean = float("0.00901672") - std = float("0.107436") - data = None - - -class Program_weight_tensor_parameter_310: - name = "parameter_310" - shape = [384] - dtype = "float32" - min_val = float("-1.13258") - max_val = float("0.552573") - mean = float("-0.197087") - std = float("0.242407") - data = None - - -class Program_weight_tensor_parameter_311: - name = "parameter_311" - shape = [384] - dtype = "float32" - min_val = float("-2.53691") - max_val = float("1.43108") - mean = float("0.682152") - std = float("0.594817") - data = None - - -class Program_weight_tensor_parameter_312: - name = "parameter_312" - shape = [384] - dtype = "float32" - min_val = float("0.191916") - max_val = float("2843.48") - mean = float("114.134") - std = float("305.508") - data = None - - -class Program_weight_tensor_parameter_313: - name = "parameter_313" - shape = [384] - dtype = "float32" - min_val = float("-24.6995") - max_val = float("10.5962") - mean = float("-0.476045") - std = float("2.8163") - data = None - - -class Program_weight_tensor_parameter_314: - name = "parameter_314" - shape = [384, 768, 1, 1] - dtype = "float32" - min_val = float("-1.24276") - max_val = float("0.692904") - mean = float("-0.00886373") - std = float("0.0955856") - data = None - - -class Program_weight_tensor_parameter_315: - name = "parameter_315" - shape = [768] - dtype = "float32" - min_val = float("-0.552757") - max_val = float("0.267353") - mean = float("-0.0446819") - std = float("0.0843754") - data = None - - -class Program_weight_tensor_parameter_316: - name = "parameter_316" - shape = [768] - dtype = "float32" - min_val = float("-0.652821") - max_val = float("1.76439") - mean = float("1.00747") - std = float("0.251713") - data = None - - -class Program_weight_tensor_parameter_317: - name = "parameter_317" - shape = [768] - dtype = "float32" - min_val = float("0.776778") - max_val = float("7746.52") - mean = float("431.196") - std = float("833.862") - data = None - - -class Program_weight_tensor_parameter_318: - name = "parameter_318" - shape = [768] - dtype = "float32" - min_val = float("-41.0995") - max_val = float("25.7342") - mean = float("3.18905") - std = float("8.42379") - data = None - - -class Program_weight_tensor_parameter_319: - name = "parameter_319" - shape = [768, 768, 1, 1] - dtype = "float32" - min_val = float("-1.4092") - max_val = float("1.23702") - mean = float("-0.000496878") - std = float("0.0816775") - data = None - - -class Program_weight_tensor_parameter_320: - name = "parameter_320" - shape = [384] - dtype = "float32" - min_val = float("-0.644666") - max_val = float("0.835279") - mean = float("0.00588657") - std = float("0.183308") - data = None - - -class Program_weight_tensor_parameter_321: - name = "parameter_321" - shape = [384] - dtype = "float32" - min_val = float("0.330389") - max_val = float("1.98606") - mean = float("1.01943") - std = float("0.246731") - data = None - - -class Program_weight_tensor_parameter_322: - name = "parameter_322" - shape = [384] - dtype = "float32" - min_val = float("0.17996") - max_val = float("722.077") - mean = float("49.876") - std = float("90.3738") - data = None - - -class Program_weight_tensor_parameter_323: - name = "parameter_323" - shape = [384] - dtype = "float32" - min_val = float("-8.71472") - max_val = float("7.07804") - mean = float("-0.0401495") - std = float("2.17987") - data = None - - -class Program_weight_tensor_parameter_324: - name = "parameter_324" - shape = [384, 384, 1, 1] - dtype = "float32" - min_val = float("-0.945908") - max_val = float("0.673776") - mean = float("-0.0019768") - std = float("0.0900507") - data = None - - -class Program_weight_tensor_parameter_325: - name = "parameter_325" - shape = [384] - dtype = "float32" - min_val = float("-0.644666") - max_val = float("0.835279") - mean = float("0.00588657") - std = float("0.183308") - data = None - - -class Program_weight_tensor_parameter_326: - name = "parameter_326" - shape = [384] - dtype = "float32" - min_val = float("0.369886") - max_val = float("1.99554") - mean = float("1.01704") - std = float("0.242599") - data = None - - -class Program_weight_tensor_parameter_327: - name = "parameter_327" - shape = [384] - dtype = "float32" - min_val = float("1.10774") - max_val = float("5474.29") - mean = float("285.755") - std = float("535.771") - data = None - - -class Program_weight_tensor_parameter_328: - name = "parameter_328" - shape = [384] - dtype = "float32" - min_val = float("-18.3584") - max_val = float("28.6474") - mean = float("0.580143") - std = float("5.75435") - data = None - - -class Program_weight_tensor_parameter_329: - name = "parameter_329" - shape = [384, 384, 3, 3] - dtype = "float32" - min_val = float("-0.300868") - max_val = float("0.28201") - mean = float("0.0005649") - std = float("0.0293953") - data = None - - -class Program_weight_tensor_parameter_330: - name = "parameter_330" - shape = [384] - dtype = "float32" - min_val = float("-0.428626") - max_val = float("0.483915") - mean = float("0.00707735") - std = float("0.142579") - data = None - - -class Program_weight_tensor_parameter_331: - name = "parameter_331" - shape = [384] - dtype = "float32" - min_val = float("0.0548827") - max_val = float("2.17216") - mean = float("1.02118") - std = float("0.25626") - data = None - - -class Program_weight_tensor_parameter_332: - name = "parameter_332" - shape = [384] - dtype = "float32" - min_val = float("5.82237") - max_val = float("71105.7") - mean = float("966.456") - std = float("3856.37") - data = None - - -class Program_weight_tensor_parameter_333: - name = "parameter_333" - shape = [384] - dtype = "float32" - min_val = float("-74.904") - max_val = float("45.7392") - mean = float("-4.55969") - std = float("15.2973") - data = None - - -class Program_weight_tensor_parameter_334: - name = "parameter_334" - shape = [384, 384, 3, 3] - dtype = "float32" - min_val = float("-1.28932") - max_val = float("0.64534") - mean = float("-0.00766119") - std = float("0.0588517") - data = None - - -class Program_weight_tensor_parameter_335: - name = "parameter_335" - shape = [384] - dtype = "float32" - min_val = float("-0.417859") - max_val = float("0.567516") - mean = float("0.0358606") - std = float("0.145139") - data = None - - -class Program_weight_tensor_parameter_336: - name = "parameter_336" - shape = [384] - dtype = "float32" - min_val = float("-0.692244") - max_val = float("2.91955") - mean = float("0.989476") - std = float("0.366097") - data = None - - -class Program_weight_tensor_parameter_337: - name = "parameter_337" - shape = [384] - dtype = "float32" - min_val = float("35.6891") - max_val = float("527628.0") - mean = float("15940.6") - std = float("54157.5") - data = None - - -class Program_weight_tensor_parameter_338: - name = "parameter_338" - shape = [384] - dtype = "float32" - min_val = float("-1304.56") - max_val = float("1018.05") - mean = float("11.2757") - std = float("221.509") - data = None - - -class Program_weight_tensor_parameter_339: - name = "parameter_339" - shape = [384, 1536, 1, 1] - dtype = "float32" - min_val = float("-1.70677") - max_val = float("0.933125") - mean = float("-0.00166836") - std = float("0.0885713") - data = None - - -class Program_weight_tensor_parameter_340: - name = "parameter_340" - shape = [384] - dtype = "float32" - min_val = float("-0.0873874") - max_val = float("0.120578") - mean = float("-0.000954519") - std = float("0.0228511") - data = None - - -class Program_weight_tensor_parameter_341: - name = "parameter_341" - shape = [384] - dtype = "float32" - min_val = float("0.237824") - max_val = float("1.58927") - mean = float("0.995856") - std = float("0.165535") - data = None - - -class Program_weight_tensor_parameter_342: - name = "parameter_342" - shape = [384] - dtype = "float32" - min_val = float("0.722547") - max_val = float("6460.57") - mean = float("77.691") - std = float("369.085") - data = None - - -class Program_weight_tensor_parameter_343: - name = "parameter_343" - shape = [384] - dtype = "float32" - min_val = float("-20.8843") - max_val = float("8.9376") - mean = float("-0.0632021") - std = float("2.59641") - data = None - - -class Program_weight_tensor_parameter_344: - name = "parameter_344" - shape = [384, 384, 1, 1] - dtype = "float32" - min_val = float("-1.57816") - max_val = float("0.862954") - mean = float("-0.00389417") - std = float("0.0817908") - data = None - - -class Program_weight_tensor_parameter_345: - name = "parameter_345" - shape = [384] - dtype = "float32" - min_val = float("-0.0873874") - max_val = float("0.120578") - mean = float("-0.000954519") - std = float("0.0228511") - data = None - - -class Program_weight_tensor_parameter_346: - name = "parameter_346" - shape = [384] - dtype = "float32" - min_val = float("-0.0396481") - max_val = float("1.62807") - mean = float("1.00169") - std = float("0.153603") - data = None - - -class Program_weight_tensor_parameter_347: - name = "parameter_347" - shape = [384] - dtype = "float32" - min_val = float("6.51221") - max_val = float("5904.92") - mean = float("267.165") - std = float("563.207") - data = None - - -class Program_weight_tensor_parameter_348: - name = "parameter_348" - shape = [384] - dtype = "float32" - min_val = float("-26.0803") - max_val = float("22.4756") - mean = float("-0.244403") - std = float("5.5182") - data = None - - -class Program_weight_tensor_parameter_349: - name = "parameter_349" - shape = [384, 384, 3, 3] - dtype = "float32" - min_val = float("-0.331772") - max_val = float("0.275082") - mean = float("-0.00114864") - std = float("0.0288421") - data = None - - -class Program_weight_tensor_parameter_350: - name = "parameter_350" - shape = [384] - dtype = "float32" - min_val = float("-0.169758") - max_val = float("0.387136") - mean = float("-0.0135191") - std = float("0.0872435") - data = None - - -class Program_weight_tensor_parameter_351: - name = "parameter_351" - shape = [384] - dtype = "float32" - min_val = float("0.530164") - max_val = float("1.95399") - mean = float("1.02728") - std = float("0.170907") - data = None - - -class Program_weight_tensor_parameter_352: - name = "parameter_352" - shape = [384] - dtype = "float32" - min_val = float("34.4183") - max_val = float("28418.4") - mean = float("1029.76") - std = float("2701.61") - data = None - - -class Program_weight_tensor_parameter_353: - name = "parameter_353" - shape = [384] - dtype = "float32" - min_val = float("-84.2497") - max_val = float("28.1431") - mean = float("1.92494") - std = float("14.5184") - data = None - - -class Program_weight_tensor_parameter_354: - name = "parameter_354" - shape = [384, 384, 3, 3] - dtype = "float32" - min_val = float("-0.509431") - max_val = float("0.23397") - mean = float("0.000289444") - std = float("0.0306037") - data = None - - -class Program_weight_tensor_parameter_355: - name = "parameter_355" - shape = [384] - dtype = "float32" - min_val = float("-0.17013") - max_val = float("0.363103") - mean = float("0.0145125") - std = float("0.0634369") - data = None - - -class Program_weight_tensor_parameter_356: - name = "parameter_356" - shape = [384] - dtype = "float32" - min_val = float("0.693201") - max_val = float("1.62355") - mean = float("1.01359") - std = float("0.102217") - data = None - - -class Program_weight_tensor_parameter_357: - name = "parameter_357" - shape = [384] - dtype = "float32" - min_val = float("0.931458") - max_val = float("373.701") - mean = float("14.1979") - std = float("26.7127") - data = None - - -class Program_weight_tensor_parameter_358: - name = "parameter_358" - shape = [384] - dtype = "float32" - min_val = float("-4.77194") - max_val = float("7.91629") - mean = float("0.274654") - std = float("1.32549") - data = None - - -class Program_weight_tensor_parameter_359: - name = "parameter_359" - shape = [384, 384, 1, 1] - dtype = "float32" - min_val = float("-0.446355") - max_val = float("0.493553") - mean = float("0.00261486") - std = float("0.0485342") - data = None - - -class Program_weight_tensor_parameter_360: - name = "parameter_360" - shape = [384] - dtype = "float32" - min_val = float("-0.17013") - max_val = float("0.363103") - mean = float("0.0145125") - std = float("0.0634369") - data = None - - -class Program_weight_tensor_parameter_361: - name = "parameter_361" - shape = [384] - dtype = "float32" - min_val = float("0.114663") - max_val = float("1.61016") - mean = float("0.991551") - std = float("0.149223") - data = None - - -class Program_weight_tensor_parameter_362: - name = "parameter_362" - shape = [384] - dtype = "float32" - min_val = float("7.85773") - max_val = float("1001.22") - mean = float("89.9772") - std = float("108.332") - data = None - - -class Program_weight_tensor_parameter_363: - name = "parameter_363" - shape = [384] - dtype = "float32" - min_val = float("-13.9198") - max_val = float("19.3761") - mean = float("1.00742") - std = float("4.17813") - data = None - - -class Program_weight_tensor_parameter_364: - name = "parameter_364" - shape = [384, 384, 3, 3] - dtype = "float32" - min_val = float("-0.295433") - max_val = float("0.246348") - mean = float("0.00110397") - std = float("0.0234049") - data = None - - -class Program_weight_tensor_parameter_365: - name = "parameter_365" - shape = [384] - dtype = "float32" - min_val = float("-0.234135") - max_val = float("0.22812") - mean = float("0.00559602") - std = float("0.0627099") - data = None - - -class Program_weight_tensor_parameter_366: - name = "parameter_366" - shape = [384] - dtype = "float32" - min_val = float("0.597373") - max_val = float("1.56294") - mean = float("1.00758") - std = float("0.141447") - data = None - - -class Program_weight_tensor_parameter_367: - name = "parameter_367" - shape = [384] - dtype = "float32" - min_val = float("16.4499") - max_val = float("639.647") - mean = float("101.474") - std = float("86.4824") - data = None - - -class Program_weight_tensor_parameter_368: - name = "parameter_368" - shape = [384] - dtype = "float32" - min_val = float("-11.5778") - max_val = float("17.0663") - mean = float("2.01731") - std = float("3.57531") - data = None - - -class Program_weight_tensor_parameter_369: - name = "parameter_369" - shape = [384, 384, 3, 3] - dtype = "float32" - min_val = float("-0.234291") - max_val = float("0.284904") - mean = float("0.00264446") - std = float("0.0302637") - data = None - - -class Program_weight_tensor_parameter_370: - name = "parameter_370" - shape = [384] - dtype = "float32" - min_val = float("-0.405084") - max_val = float("0.228539") - mean = float("-0.0546357") - std = float("0.106613") - data = None - - -class Program_weight_tensor_parameter_371: - name = "parameter_371" - shape = [384] - dtype = "float32" - min_val = float("0.456281") - max_val = float("1.81298") - mean = float("1.01603") - std = float("0.21616") - data = None - - -class Program_weight_tensor_parameter_372: - name = "parameter_372" - shape = [384] - dtype = "float32" - min_val = float("9.59515") - max_val = float("11834.9") - mean = float("612.148") - std = float("918.789") - data = None - - -class Program_weight_tensor_parameter_373: - name = "parameter_373" - shape = [384] - dtype = "float32" - min_val = float("-247.962") - max_val = float("324.535") - mean = float("-18.2895") - std = float("68.074") - data = None - - -class Program_weight_tensor_parameter_374: - name = "parameter_374" - shape = [384, 1024, 1, 1] - dtype = "float32" - min_val = float("-6.13846") - max_val = float("3.56809") - mean = float("-0.000144253") - std = float("0.0970159") - data = None - - -class Program_weight_tensor_parameter_375: - name = "parameter_375" - shape = [384] - dtype = "float32" - min_val = float("-0.108175") - max_val = float("0.143437") - mean = float("-0.0212447") - std = float("0.0285578") - data = None - - -class Program_weight_tensor_parameter_376: - name = "parameter_376" - shape = [384] - dtype = "float32" - min_val = float("0.556928") - max_val = float("1.33029") - mean = float("0.984681") - std = float("0.116005") - data = None - - -class Program_weight_tensor_parameter_377: - name = "parameter_377" - shape = [384] - dtype = "float32" - min_val = float("0.748213") - max_val = float("62636.1") - mean = float("1444.21") - std = float("4777.5") - data = None - - -class Program_weight_tensor_parameter_378: - name = "parameter_378" - shape = [384] - dtype = "float32" - min_val = float("-786.339") - max_val = float("426.68") - mean = float("32.1313") - std = float("104.81") - data = None - - -class Program_weight_tensor_parameter_379: - name = "parameter_379" - shape = [384, 1024, 1, 1] - dtype = "float32" - min_val = float("-7.12945") - max_val = float("13.8328") - mean = float("-0.000268657") - std = float("0.0830857") - data = None - - -class Program_weight_tensor_parameter_380: - name = "parameter_380" - shape = [1024] - dtype = "float32" - min_val = float("-2.13639e-06") - max_val = float("2.09543e-06") - mean = float("8.64227e-09") - std = float("3.57256e-07") - data = None - - -class Program_weight_tensor_parameter_381: - name = "parameter_381" - shape = [1024] - dtype = "float32" - min_val = float("-5.47744") - max_val = float("11.4054") - mean = float("0.768884") - std = float("0.662971") - data = None - - -class Program_weight_tensor_parameter_382: - name = "parameter_382" - shape = [1024] - dtype = "float32" - min_val = float("-0.427674") - max_val = float("0.266324") - mean = float("-0.000194783") - std = float("0.0566951") - data = None - - -class Program_weight_tensor_parameter_383: - name = "parameter_383" - shape = [2048, 1024] - dtype = "float32" - min_val = float("-8.0062") - max_val = float("2.78176") - mean = float("-5.77928e-05") - std = float("0.0557509") - data = None - - -class Program_weight_tensor_parameter_384: - name = "parameter_384" - shape = [2048] - dtype = "float32" - min_val = float("-0.251987") - max_val = float("0.302276") - mean = float("0.000512312") - std = float("0.0388834") - data = None - - -class Program_weight_tensor_parameter_385: - name = "parameter_385" - shape = [1024, 2048] - dtype = "float32" - min_val = float("-3.07176") - max_val = float("3.57066") - mean = float("-7.0143e-08") - std = float("0.0401237") - data = None - - -class Program_weight_tensor_parameter_386: - name = "parameter_386" - shape = [1024] - dtype = "float32" - min_val = float("-0.373534") - max_val = float("0.402167") - mean = float("0.000830289") - std = float("0.0765843") - data = None - - -class Program_weight_tensor_parameter_387: - name = "parameter_387" - shape = [1024] - dtype = "float32" - min_val = float("-2.75952") - max_val = float("2.92771") - mean = float("0.758426") - std = float("0.282416") - data = None - - -class Program_weight_tensor_parameter_388: - name = "parameter_388" - shape = [1024] - dtype = "float32" - min_val = float("-0.193343") - max_val = float("0.180378") - mean = float("-0.000779537") - std = float("0.0370855") - data = None - - -class Program_weight_tensor_parameter_389: - name = "parameter_389" - shape = [1024, 1024] - dtype = "float32" - min_val = float("-1.38014") - max_val = float("1.27733") - mean = float("-9.81423e-07") - std = float("0.0600835") - data = None - - -class Program_weight_tensor_parameter_390: - name = "parameter_390" - shape = [1024] - dtype = "float32" - min_val = float("-0.757163") - max_val = float("1.00779") - mean = float("0.000330772") - std = float("0.236465") - data = None - - -class Program_weight_tensor_parameter_391: - name = "parameter_391" - shape = [1024] - dtype = "float32" - min_val = float("-0.817671") - max_val = float("2.55534") - mean = float("0.753427") - std = float("0.227651") - data = None - - -class Program_weight_tensor_parameter_392: - name = "parameter_392" - shape = [1024] - dtype = "float32" - min_val = float("-0.222994") - max_val = float("0.252568") - mean = float("-0.00120532") - std = float("0.048857") - data = None - - -class Program_weight_tensor_parameter_393: - name = "parameter_393" - shape = [2048, 1024] - dtype = "float32" - min_val = float("-0.870748") - max_val = float("1.17152") - mean = float("-8.51244e-05") - std = float("0.0362422") - data = None - - -class Program_weight_tensor_parameter_394: - name = "parameter_394" - shape = [2048] - dtype = "float32" - min_val = float("-0.100256") - max_val = float("0.37127") - mean = float("-0.000329183") - std = float("0.0429112") - data = None - - -class Program_weight_tensor_parameter_395: - name = "parameter_395" - shape = [1024, 2048] - dtype = "float32" - min_val = float("-2.07235") - max_val = float("2.28312") - mean = float("-7.78487e-08") - std = float("0.0418402") - data = None - - -class Program_weight_tensor_parameter_396: - name = "parameter_396" - shape = [1024] - dtype = "float32" - min_val = float("-1.1905") - max_val = float("1.43001") - mean = float("0.00104808") - std = float("0.299857") - data = None - - -class Program_weight_tensor_parameter_397: - name = "parameter_397" - shape = [1024] - dtype = "float32" - min_val = float("-1.34618") - max_val = float("2.57738") - mean = float("0.767945") - std = float("0.300486") - data = None - - -class Program_weight_tensor_parameter_398: - name = "parameter_398" - shape = [1024] - dtype = "float32" - min_val = float("-0.119799") - max_val = float("0.13848") - mean = float("-0.000153038") - std = float("0.0233011") - data = None - - -class Program_weight_tensor_parameter_399: - name = "parameter_399" - shape = [1024, 1024] - dtype = "float32" - min_val = float("-0.646145") - max_val = float("0.660627") - mean = float("9.12025e-06") - std = float("0.0406609") - data = None - - -class Program_weight_tensor_parameter_400: - name = "parameter_400" - shape = [1024] - dtype = "float32" - min_val = float("-0.376786") - max_val = float("0.283756") - mean = float("0.000858306") - std = float("0.0796666") - data = None - - -class Program_weight_tensor_parameter_401: - name = "parameter_401" - shape = [1024] - dtype = "float32" - min_val = float("0.432676") - max_val = float("1.70722") - mean = float("0.772387") - std = float("0.0772969") - data = None - - -class Program_weight_tensor_parameter_402: - name = "parameter_402" - shape = [1024] - dtype = "float32" - min_val = float("-0.179557") - max_val = float("0.115916") - mean = float("-0.000489374") - std = float("0.0340337") - data = None - - -class Program_weight_tensor_parameter_403: - name = "parameter_403" - shape = [2048, 1024] - dtype = "float32" - min_val = float("-0.809732") - max_val = float("0.561587") - mean = float("-3.63529e-05") - std = float("0.0201157") - data = None - - -class Program_weight_tensor_parameter_404: - name = "parameter_404" - shape = [2048] - dtype = "float32" - min_val = float("-0.0614404") - max_val = float("0.170786") - mean = float("-0.000372604") - std = float("0.0228483") - data = None - - -class Program_weight_tensor_parameter_405: - name = "parameter_405" - shape = [1024, 2048] - dtype = "float32" - min_val = float("-0.408439") - max_val = float("0.609131") - mean = float("-1.65619e-06") - std = float("0.01953") - data = None - - -class Program_weight_tensor_parameter_406: - name = "parameter_406" - shape = [1024] - dtype = "float32" - min_val = float("-0.132853") - max_val = float("0.116198") - mean = float("0.000532131") - std = float("0.0355694") - data = None - - -class Program_weight_tensor_parameter_407: - name = "parameter_407" - shape = [1024] - dtype = "float32" - min_val = float("0.561748") - max_val = float("0.98164") - mean = float("0.774081") - std = float("0.0415141") - data = None - - -class Program_weight_tensor_parameter_408: - name = "parameter_408" - shape = [1024] - dtype = "float32" - min_val = float("-0.0332844") - max_val = float("0.0397268") - mean = float("0.000161912") - std = float("0.0099558") - data = None - - -class Program_weight_tensor_parameter_409: - name = "parameter_409" - shape = [1024, 1024] - dtype = "float32" - min_val = float("-0.123777") - max_val = float("0.124455") - mean = float("1.09109e-05") - std = float("0.0259569") - data = None - - -class Program_weight_tensor_parameter_410: - name = "parameter_410" - shape = [1024] - dtype = "float32" - min_val = float("-0.044097") - max_val = float("0.0392825") - mean = float("0.000663671") - std = float("0.011796") - data = None - - -class Program_weight_tensor_parameter_411: - name = "parameter_411" - shape = [1024] - dtype = "float32" - min_val = float("0.677342") - max_val = float("0.824057") - mean = float("0.772183") - std = float("0.0108191") - data = None - - -class Program_weight_tensor_parameter_412: - name = "parameter_412" - shape = [1024] - dtype = "float32" - min_val = float("-0.0510995") - max_val = float("0.0464933") - mean = float("-3.34255e-05") - std = float("0.0157544") - data = None - - -class Program_weight_tensor_parameter_413: - name = "parameter_413" - shape = [2048, 1024] - dtype = "float32" - min_val = float("-0.0809581") - max_val = float("0.0763313") - mean = float("-7.16222e-06") - std = float("0.0105854") - data = None - - -class Program_weight_tensor_parameter_414: - name = "parameter_414" - shape = [2048] - dtype = "float32" - min_val = float("-0.0301308") - max_val = float("0.0358787") - mean = float("0.000247623") - std = float("0.0140049") - data = None - - -class Program_weight_tensor_parameter_415: - name = "parameter_415" - shape = [1024, 2048] - dtype = "float32" - min_val = float("-0.0659331") - max_val = float("0.064373") - mean = float("-1.40326e-06") - std = float("0.0142046") - data = None - - -class Program_weight_tensor_parameter_416: - name = "parameter_416" - shape = [1024] - dtype = "float32" - min_val = float("-0.0295178") - max_val = float("0.0281065") - mean = float("1.88381e-05") - std = float("0.00950261") - data = None - - -class Program_weight_tensor_parameter_417: - name = "parameter_417" - shape = [1024] - dtype = "float32" - min_val = float("0.644602") - max_val = float("0.848359") - mean = float("0.772298") - std = float("0.0127321") - data = None - - -class Program_weight_tensor_parameter_418: - name = "parameter_418" - shape = [1024] - dtype = "float32" - min_val = float("-0.0790784") - max_val = float("0.0651222") - mean = float("-0.000211896") - std = float("0.0151313") - data = None - - -class Program_weight_tensor_parameter_419: - name = "parameter_419" - shape = [1024, 1024] - dtype = "float32" - min_val = float("-0.244804") - max_val = float("0.193716") - mean = float("7.12405e-06") - std = float("0.0253563") - data = None - - -class Program_weight_tensor_parameter_420: - name = "parameter_420" - shape = [1024] - dtype = "float32" - min_val = float("-3.75718") - max_val = float("-0.734662") - mean = float("-2.18749") - std = float("0.42871") - data = None - - -class Program_weight_tensor_parameter_421: - name = "parameter_421" - shape = [1024] - dtype = "float32" - min_val = float("1.61923") - max_val = float("4.43994") - mean = float("3.0808") - std = float("0.254311") - data = None - - -class Program_weight_tensor_parameter_422: - name = "parameter_422" - shape = [1024] - dtype = "float32" - min_val = float("0.00308285") - max_val = float("17013.4") - mean = float("25.4014") - std = float("536.038") - data = None - - -class Program_weight_tensor_parameter_423: - name = "parameter_423" - shape = [1024] - dtype = "float32" - min_val = float("-4.71495") - max_val = float("23.0879") - mean = float("0.0230467") - std = float("0.959745") - data = None - - -class Program_weight_tensor_parameter_424: - name = "parameter_424" - shape = [1024, 768, 1, 1] - dtype = "float32" - min_val = float("-1.76134") - max_val = float("1.01155") - mean = float("-0.000333937") - std = float("0.0199517") - data = None - - -class Program_weight_tensor_parameter_425: - name = "parameter_425" - shape = [768] - dtype = "float32" - min_val = float("-0.019835") - max_val = float("0.0113714") - mean = float("-0.000741206") - std = float("0.00266306") - data = None - - -class Program_weight_tensor_parameter_426: - name = "parameter_426" - shape = [768, 768, 1, 1] - dtype = "float32" - min_val = float("-0.0753175") - max_val = float("0.130102") - mean = float("-0.000275394") - std = float("0.00174935") - data = None - - -class Program_weight_tensor_parameter_427: - name = "parameter_427" - shape = [384] - dtype = "float32" - min_val = float("-1.77417") - max_val = float("0.319995") - mean = float("-0.31086") - std = float("0.291331") - data = None - - -class Program_weight_tensor_parameter_428: - name = "parameter_428" - shape = [384] - dtype = "float32" - min_val = float("0.188386") - max_val = float("1.81753") - mean = float("0.609696") - std = float("0.262622") - data = None - - -class Program_weight_tensor_parameter_429: - name = "parameter_429" - shape = [384] - dtype = "float32" - min_val = float("6.15935e-05") - max_val = float("4.62814") - mean = float("0.0557435") - std = float("0.276651") - data = None - - -class Program_weight_tensor_parameter_430: - name = "parameter_430" - shape = [384] - dtype = "float32" - min_val = float("-0.324677") - max_val = float("0.744616") - mean = float("0.0422052") - std = float("0.0941939") - data = None - - -class Program_weight_tensor_parameter_431: - name = "parameter_431" - shape = [384, 384, 1, 1] - dtype = "float32" - min_val = float("-0.0909018") - max_val = float("0.0595466") - mean = float("-0.000571428") - std = float("0.0044968") - data = None - - -class Program_weight_tensor_parameter_432: - name = "parameter_432" - shape = [384] - dtype = "float32" - min_val = float("-1.77414") - max_val = float("0.320788") - mean = float("-0.31081") - std = float("0.291371") - data = None - - -class Program_weight_tensor_parameter_433: - name = "parameter_433" - shape = [384] - dtype = "float32" - min_val = float("0.333443") - max_val = float("2.59782") - mean = float("1.02587") - std = float("0.290167") - data = None - - -class Program_weight_tensor_parameter_434: - name = "parameter_434" - shape = [384] - dtype = "float32" - min_val = float("0.00526696") - max_val = float("10.6996") - mean = float("0.380233") - std = float("0.946604") - data = None - - -class Program_weight_tensor_parameter_435: - name = "parameter_435" - shape = [384] - dtype = "float32" - min_val = float("-1.59457") - max_val = float("1.73203") - mean = float("0.027563") - std = float("0.392866") - data = None - - -class Program_weight_tensor_parameter_436: - name = "parameter_436" - shape = [384, 384, 3, 3] - dtype = "float32" - min_val = float("-0.0409816") - max_val = float("0.0679231") - mean = float("-6.68397e-05") - std = float("0.00268875") - data = None - - -class Program_weight_tensor_parameter_437: - name = "parameter_437" - shape = [384] - dtype = "float32" - min_val = float("-2.58257") - max_val = float("0.0340648") - mean = float("-1.56873") - std = float("0.416046") - data = None - - -class Program_weight_tensor_parameter_438: - name = "parameter_438" - shape = [384] - dtype = "float32" - min_val = float("0.521872") - max_val = float("1.64448") - mean = float("1.13567") - std = float("0.149452") - data = None - - -class Program_weight_tensor_parameter_439: - name = "parameter_439" - shape = [384] - dtype = "float32" - min_val = float("0.0434595") - max_val = float("137.616") - mean = float("3.86704") - std = float("10.8376") - data = None - - -class Program_weight_tensor_parameter_440: - name = "parameter_440" - shape = [384] - dtype = "float32" - min_val = float("-2.41808") - max_val = float("3.09603") - mean = float("0.231089") - std = float("0.62894") - data = None - - -class Program_weight_tensor_parameter_441: - name = "parameter_441" - shape = [384, 384, 3, 3] - dtype = "float32" - min_val = float("-0.054677") - max_val = float("0.0563856") - mean = float("-0.000288685") - std = float("0.00307104") - data = None - - -class Program_weight_tensor_parameter_442: - name = "parameter_442" - shape = [384] - dtype = "float32" - min_val = float("-1.93947") - max_val = float("0.64501") - mean = float("-0.575028") - std = float("0.358749") - data = None - - -class Program_weight_tensor_parameter_443: - name = "parameter_443" - shape = [384] - dtype = "float32" - min_val = float("0.16358") - max_val = float("2.07281") - mean = float("0.562153") - std = float("0.227419") - data = None - - -class Program_weight_tensor_parameter_444: - name = "parameter_444" - shape = [384] - dtype = "float32" - min_val = float("0.000134774") - max_val = float("9.62556") - mean = float("0.118304") - std = float("0.563335") - data = None - - -class Program_weight_tensor_parameter_445: - name = "parameter_445" - shape = [384] - dtype = "float32" - min_val = float("-0.543874") - max_val = float("0.774253") - mean = float("0.0423873") - std = float("0.0963768") - data = None - - -class Program_weight_tensor_parameter_446: - name = "parameter_446" - shape = [384, 384, 1, 1] - dtype = "float32" - min_val = float("-0.0892927") - max_val = float("0.0554288") - mean = float("-0.000566883") - std = float("0.00429646") - data = None - - -class Program_weight_tensor_parameter_447: - name = "parameter_447" - shape = [384] - dtype = "float32" - min_val = float("-1.93949") - max_val = float("0.646073") - mean = float("-0.575") - std = float("0.35884") - data = None - - -class Program_weight_tensor_parameter_448: - name = "parameter_448" - shape = [384] - dtype = "float32" - min_val = float("0.583556") - max_val = float("2.15683") - mean = float("1.08421") - std = float("0.255749") - data = None - - -class Program_weight_tensor_parameter_449: - name = "parameter_449" - shape = [384] - dtype = "float32" - min_val = float("0.00783921") - max_val = float("124.799") - mean = float("1.60597") - std = float("7.86036") - data = None - - -class Program_weight_tensor_parameter_450: - name = "parameter_450" - shape = [384] - dtype = "float32" - min_val = float("-3.22343") - max_val = float("3.94482") - mean = float("0.0868392") - std = float("0.461958") - data = None - - -class Program_weight_tensor_parameter_451: - name = "parameter_451" - shape = [384, 384, 3, 3] - dtype = "float32" - min_val = float("-0.0549026") - max_val = float("0.0517603") - mean = float("-0.000166704") - std = float("0.00299084") - data = None - - -class Program_weight_tensor_parameter_452: - name = "parameter_452" - shape = [384] - dtype = "float32" - min_val = float("-2.39604") - max_val = float("0.84662") - mean = float("-1.40556") - std = float("0.360638") - data = None - - -class Program_weight_tensor_parameter_453: - name = "parameter_453" - shape = [384] - dtype = "float32" - min_val = float("0.454807") - max_val = float("1.91969") - mean = float("1.1665") - std = float("0.148108") - data = None - - -class Program_weight_tensor_parameter_454: - name = "parameter_454" - shape = [384] - dtype = "float32" - min_val = float("0.045164") - max_val = float("100.409") - mean = float("2.60368") - std = float("7.74496") - data = None - - -class Program_weight_tensor_parameter_455: - name = "parameter_455" - shape = [384] - dtype = "float32" - min_val = float("-1.74076") - max_val = float("2.04402") - mean = float("0.0999496") - std = float("0.364673") - data = None - - -class Program_weight_tensor_parameter_456: - name = "parameter_456" - shape = [384, 384, 3, 3] - dtype = "float32" - min_val = float("-0.0546141") - max_val = float("0.0505175") - mean = float("-0.000226174") - std = float("0.00309866") - data = None - - -class Program_weight_tensor_parameter_457: - name = "parameter_457" - shape = [384] - dtype = "float32" - min_val = float("-1.87645") - max_val = float("0.453192") - mean = float("-0.485461") - std = float("0.3765") - data = None - - -class Program_weight_tensor_parameter_458: - name = "parameter_458" - shape = [384] - dtype = "float32" - min_val = float("0.0773511") - max_val = float("2.11967") - mean = float("0.441788") - std = float("0.217778") - data = None - - -class Program_weight_tensor_parameter_459: - name = "parameter_459" - shape = [384] - dtype = "float32" - min_val = float("0.000187171") - max_val = float("57.8954") - mean = float("0.270576") - std = float("2.97593") - data = None - - -class Program_weight_tensor_parameter_460: - name = "parameter_460" - shape = [384] - dtype = "float32" - min_val = float("-0.586198") - max_val = float("2.71157") - mean = float("0.0563236") - std = float("0.173875") - data = None - - -class Program_weight_tensor_parameter_461: - name = "parameter_461" - shape = [384, 384, 1, 1] - dtype = "float32" - min_val = float("-0.192933") - max_val = float("0.0894331") - mean = float("-0.000737868") - std = float("0.00524842") - data = None - - -class Program_weight_tensor_parameter_462: - name = "parameter_462" - shape = [384] - dtype = "float32" - min_val = float("-1.8768") - max_val = float("0.453535") - mean = float("-0.485423") - std = float("0.376585") - data = None - - -class Program_weight_tensor_parameter_463: - name = "parameter_463" - shape = [384] - dtype = "float32" - min_val = float("0.522396") - max_val = float("2.22479") - mean = float("1.0531") - std = float("0.260023") - data = None - - -class Program_weight_tensor_parameter_464: - name = "parameter_464" - shape = [384] - dtype = "float32" - min_val = float("0.0164889") - max_val = float("54.2063") - mean = float("1.7199") - std = float("5.49213") - data = None - - -class Program_weight_tensor_parameter_465: - name = "parameter_465" - shape = [384] - dtype = "float32" - min_val = float("-2.79875") - max_val = float("3.1827") - mean = float("0.133888") - std = float("0.520105") - data = None - - -class Program_weight_tensor_parameter_466: - name = "parameter_466" - shape = [384, 384, 3, 3] - dtype = "float32" - min_val = float("-0.0551643") - max_val = float("0.0464871") - mean = float("-0.000209229") - std = float("0.00312542") - data = None - - -class Program_weight_tensor_parameter_467: - name = "parameter_467" - shape = [384] - dtype = "float32" - min_val = float("-2.15679") - max_val = float("0.417994") - mean = float("-1.36728") - std = float("0.27749") - data = None - - -class Program_weight_tensor_parameter_468: - name = "parameter_468" - shape = [384] - dtype = "float32" - min_val = float("0.703467") - max_val = float("1.63812") - mean = float("1.14314") - std = float("0.101723") - data = None - - -class Program_weight_tensor_parameter_469: - name = "parameter_469" - shape = [384] - dtype = "float32" - min_val = float("0.00919977") - max_val = float("81.76") - mean = float("1.34673") - std = float("4.5747") - data = None - - -class Program_weight_tensor_parameter_470: - name = "parameter_470" - shape = [384] - dtype = "float32" - min_val = float("-0.575647") - max_val = float("1.06647") - mean = float("0.035897") - std = float("0.208355") - data = None - - -class Program_weight_tensor_parameter_471: - name = "parameter_471" - shape = [384, 384, 3, 3] - dtype = "float32" - min_val = float("-0.0647421") - max_val = float("0.0467325") - mean = float("-0.000209626") - std = float("0.00304124") - data = None - - -class Program_weight_tensor_parameter_472: - name = "parameter_472" - shape = [384] - dtype = "float32" - min_val = float("-2.92359") - max_val = float("1.66367") - mean = float("-0.760505") - std = float("0.643564") - data = None - - -class Program_weight_tensor_parameter_473: - name = "parameter_473" - shape = [384] - dtype = "float32" - min_val = float("0.952469") - max_val = float("2.9182") - mean = float("1.86349") - std = float("0.276369") - data = None - - -class Program_weight_tensor_parameter_474: - name = "parameter_474" - shape = [384] - dtype = "float32" - min_val = float("0.00173967") - max_val = float("105.064") - mean = float("1.59074") - std = float("7.33152") - data = None - - -class Program_weight_tensor_parameter_475: - name = "parameter_475" - shape = [384] - dtype = "float32" - min_val = float("-4.49694") - max_val = float("2.52903") - mean = float("0.12643") - std = float("0.583266") - data = None - - -class Program_weight_tensor_parameter_476: - name = "parameter_476" - shape = [384, 768, 1, 1] - dtype = "float32" - min_val = float("-0.154099") - max_val = float("0.233525") - mean = float("-0.000812832") - std = float("0.00998468") - data = None - - -class Program_weight_tensor_parameter_477: - name = "parameter_477" - shape = [384] - dtype = "float32" - min_val = float("-2.24744") - max_val = float("0.681285") - mean = float("-0.777265") - std = float("0.47295") - data = None - - -class Program_weight_tensor_parameter_478: - name = "parameter_478" - shape = [384] - dtype = "float32" - min_val = float("0.965764") - max_val = float("2.89406") - mean = float("2.09742") - std = float("0.305508") - data = None - - -class Program_weight_tensor_parameter_479: - name = "parameter_479" - shape = [384] - dtype = "float32" - min_val = float("0.000269628") - max_val = float("135.338") - mean = float("1.36164") - std = float("8.44144") - data = None - - -class Program_weight_tensor_parameter_480: - name = "parameter_480" - shape = [384] - dtype = "float32" - min_val = float("-5.20344") - max_val = float("3.39669") - mean = float("0.0630202") - std = float("0.514197") - data = None - - -class Program_weight_tensor_parameter_481: - name = "parameter_481" - shape = [384, 768, 1, 1] - dtype = "float32" - min_val = float("-0.16414") - max_val = float("0.337926") - mean = float("-0.000419817") - std = float("0.00888272") - data = None - - -class Program_weight_tensor_parameter_482: - name = "parameter_482" - shape = [768] - dtype = "float32" - min_val = float("-2.40228") - max_val = float("0.644848") - mean = float("-0.908493") - std = float("0.339383") - data = None - - -class Program_weight_tensor_parameter_483: - name = "parameter_483" - shape = [768] - dtype = "float32" - min_val = float("0.530034") - max_val = float("1.90745") - mean = float("0.919715") - std = float("0.149307") - data = None - - -class Program_weight_tensor_parameter_484: - name = "parameter_484" - shape = [768] - dtype = "float32" - min_val = float("0.0150888") - max_val = float("1732.27") - mean = float("29.3911") - std = float("114.584") - data = None - - -class Program_weight_tensor_parameter_485: - name = "parameter_485" - shape = [768] - dtype = "float32" - min_val = float("-5.94074") - max_val = float("6.69318") - mean = float("0.211305") - std = float("0.84809") - data = None - - -class Program_weight_tensor_parameter_486: - name = "parameter_486" - shape = [768, 512, 3, 3] - dtype = "float32" - min_val = float("-0.218355") - max_val = float("0.28161") - mean = float("-0.000294452") - std = float("0.0064194") - data = None - - -class Program_weight_tensor_parameter_487: - name = "parameter_487" - shape = [512] - dtype = "float32" - min_val = float("-3.38939") - max_val = float("1.70918") - mean = float("-1.14983") - std = float("0.514302") - data = None - - -class Program_weight_tensor_parameter_488: - name = "parameter_488" - shape = [512] - dtype = "float32" - min_val = float("0.521801") - max_val = float("2.22375") - mean = float("1.23032") - std = float("0.302913") - data = None - - -class Program_weight_tensor_parameter_489: - name = "parameter_489" - shape = [512] - dtype = "float32" - min_val = float("0.0145403") - max_val = float("54186.8") - mean = float("348.808") - std = float("2906.74") - data = None - - -class Program_weight_tensor_parameter_490: - name = "parameter_490" - shape = [512] - dtype = "float32" - min_val = float("-26.3804") - max_val = float("7.97325") - mean = float("-0.484726") - std = float("2.21214") - data = None - - -class Program_weight_tensor_parameter_491: - name = "parameter_491" - shape = [512, 384, 1, 1] - dtype = "float32" - min_val = float("-2.03421") - max_val = float("3.42772") - mean = float("0.00117115") - std = float("0.0623002") - data = None - - -class Program_weight_tensor_parameter_492: - name = "parameter_492" - shape = [384] - dtype = "float32" - min_val = float("-0.0823458") - max_val = float("0.12967") - mean = float("-0.00608152") - std = float("0.0201329") - data = None - - -class Program_weight_tensor_parameter_493: - name = "parameter_493" - shape = [384, 384, 1, 1] - dtype = "float32" - min_val = float("-0.224516") - max_val = float("0.342766") - mean = float("-0.00262916") - std = float("0.0143209") - data = None - - -class Program_weight_tensor_parameter_494: - name = "parameter_494" - shape = [192] - dtype = "float32" - min_val = float("-1.98142") - max_val = float("0.451263") - mean = float("-0.346403") - std = float("0.335794") - data = None - - -class Program_weight_tensor_parameter_495: - name = "parameter_495" - shape = [192] - dtype = "float32" - min_val = float("0.0373373") - max_val = float("2.155") - mean = float("0.583284") - std = float("0.423173") - data = None - - -class Program_weight_tensor_parameter_496: - name = "parameter_496" - shape = [192] - dtype = "float32" - min_val = float("0.000185619") - max_val = float("192.815") - mean = float("5.7247") - std = float("24.3824") - data = None - - -class Program_weight_tensor_parameter_497: - name = "parameter_497" - shape = [192] - dtype = "float32" - min_val = float("-7.38842") - max_val = float("1.98841") - mean = float("-0.198995") - std = float("0.943909") - data = None - - -class Program_weight_tensor_parameter_498: - name = "parameter_498" - shape = [192, 192, 1, 1] - dtype = "float32" - min_val = float("-0.580025") - max_val = float("2.15283") - mean = float("0.00513099") - std = float("0.0569125") - data = None - - -class Program_weight_tensor_parameter_499: - name = "parameter_499" - shape = [192] - dtype = "float32" - min_val = float("-1.97967") - max_val = float("0.447507") - mean = float("-0.345995") - std = float("0.335506") - data = None - - -class Program_weight_tensor_parameter_500: - name = "parameter_500" - shape = [192] - dtype = "float32" - min_val = float("0.371023") - max_val = float("2.70259") - mean = float("1.21044") - std = float("0.504894") - data = None - - -class Program_weight_tensor_parameter_501: - name = "parameter_501" - shape = [192] - dtype = "float32" - min_val = float("0.00637081") - max_val = float("1674.61") - mean = float("44.1997") - std = float("166.821") - data = None - - -class Program_weight_tensor_parameter_502: - name = "parameter_502" - shape = [192] - dtype = "float32" - min_val = float("-18.0063") - max_val = float("7.12971") - mean = float("-0.405877") - std = float("3.08127") - data = None - - -class Program_weight_tensor_parameter_503: - name = "parameter_503" - shape = [192, 192, 3, 3] - dtype = "float32" - min_val = float("-0.397216") - max_val = float("0.812883") - mean = float("0.0012172") - std = float("0.0227744") - data = None - - -class Program_weight_tensor_parameter_504: - name = "parameter_504" - shape = [192] - dtype = "float32" - min_val = float("-2.89075") - max_val = float("-0.189656") - mean = float("-1.31556") - std = float("0.398965") - data = None - - -class Program_weight_tensor_parameter_505: - name = "parameter_505" - shape = [192] - dtype = "float32" - min_val = float("0.646243") - max_val = float("2.09349") - mean = float("1.18163") - std = float("0.178004") - data = None - - -class Program_weight_tensor_parameter_506: - name = "parameter_506" - shape = [192] - dtype = "float32" - min_val = float("0.471418") - max_val = float("72426.6") - mean = float("1773.09") - std = float("6247.22") - data = None - - -class Program_weight_tensor_parameter_507: - name = "parameter_507" - shape = [192] - dtype = "float32" - min_val = float("-37.115") - max_val = float("23.7835") - mean = float("0.211371") - std = float("5.61628") - data = None - - -class Program_weight_tensor_parameter_508: - name = "parameter_508" - shape = [192, 192, 3, 3] - dtype = "float32" - min_val = float("-0.592805") - max_val = float("0.537074") - mean = float("-0.000652126") - std = float("0.0250761") - data = None - - -class Program_weight_tensor_parameter_509: - name = "parameter_509" - shape = [192] - dtype = "float32" - min_val = float("-1.94029") - max_val = float("0.531231") - mean = float("-0.278479") - std = float("0.323345") - data = None - - -class Program_weight_tensor_parameter_510: - name = "parameter_510" - shape = [192] - dtype = "float32" - min_val = float("0.0356558") - max_val = float("1.75139") - mean = float("0.446893") - std = float("0.305307") - data = None - - -class Program_weight_tensor_parameter_511: - name = "parameter_511" - shape = [192] - dtype = "float32" - min_val = float("0.000301537") - max_val = float("145.303") - mean = float("1.92494") - std = float("11.1055") - data = None - - -class Program_weight_tensor_parameter_512: - name = "parameter_512" - shape = [192] - dtype = "float32" - min_val = float("-2.88926") - max_val = float("1.57866") - mean = float("0.0242455") - std = float("0.365955") - data = None - - -class Program_weight_tensor_parameter_513: - name = "parameter_513" - shape = [192, 192, 1, 1] - dtype = "float32" - min_val = float("-0.410132") - max_val = float("0.734914") - mean = float("-0.000525589") - std = float("0.0264104") - data = None - - -class Program_weight_tensor_parameter_514: - name = "parameter_514" - shape = [192] - dtype = "float32" - min_val = float("-1.94023") - max_val = float("0.548467") - mean = float("-0.277621") - std = float("0.325203") - data = None - - -class Program_weight_tensor_parameter_515: - name = "parameter_515" - shape = [192] - dtype = "float32" - min_val = float("0.481263") - max_val = float("2.27039") - mean = float("1.13778") - std = float("0.378291") - data = None - - -class Program_weight_tensor_parameter_516: - name = "parameter_516" - shape = [192] - dtype = "float32" - min_val = float("0.0203109") - max_val = float("22478.4") - mean = float("225.796") - std = float("1659.66") - data = None - - -class Program_weight_tensor_parameter_517: - name = "parameter_517" - shape = [192] - dtype = "float32" - min_val = float("-48.6907") - max_val = float("8.4395") - mean = float("-0.502169") - std = float("4.76403") - data = None - - -class Program_weight_tensor_parameter_518: - name = "parameter_518" - shape = [192, 192, 3, 3] - dtype = "float32" - min_val = float("-0.313622") - max_val = float("1.27335") - mean = float("0.00170232") - std = float("0.031685") - data = None - - -class Program_weight_tensor_parameter_519: - name = "parameter_519" - shape = [192] - dtype = "float32" - min_val = float("-2.50879") - max_val = float("-0.125677") - mean = float("-1.29056") - std = float("0.442856") - data = None - - -class Program_weight_tensor_parameter_520: - name = "parameter_520" - shape = [192] - dtype = "float32" - min_val = float("0.664414") - max_val = float("1.67017") - mean = float("1.19888") - std = float("0.169685") - data = None - - -class Program_weight_tensor_parameter_521: - name = "parameter_521" - shape = [192] - dtype = "float32" - min_val = float("0.287109") - max_val = float("29547.7") - mean = float("678.615") - std = float("2860.07") - data = None - - -class Program_weight_tensor_parameter_522: - name = "parameter_522" - shape = [192] - dtype = "float32" - min_val = float("-7.98132") - max_val = float("18.8422") - mean = float("0.247665") - std = float("2.39087") - data = None - - -class Program_weight_tensor_parameter_523: - name = "parameter_523" - shape = [192, 192, 3, 3] - dtype = "float32" - min_val = float("-0.523885") - max_val = float("0.286298") - mean = float("-0.000893893") - std = float("0.0195481") - data = None - - -class Program_weight_tensor_parameter_524: - name = "parameter_524" - shape = [192] - dtype = "float32" - min_val = float("-1.7571") - max_val = float("0.488578") - mean = float("-0.261083") - std = float("0.338234") - data = None - - -class Program_weight_tensor_parameter_525: - name = "parameter_525" - shape = [192] - dtype = "float32" - min_val = float("0.00549547") - max_val = float("1.67906") - mean = float("0.350066") - std = float("0.250449") - data = None - - -class Program_weight_tensor_parameter_526: - name = "parameter_526" - shape = [192] - dtype = "float32" - min_val = float("0.000263742") - max_val = float("67.4674") - mean = float("1.69806") - std = float("7.49189") - data = None - - -class Program_weight_tensor_parameter_527: - name = "parameter_527" - shape = [192] - dtype = "float32" - min_val = float("-1.87036") - max_val = float("1.36173") - mean = float("0.00772141") - std = float("0.331707") - data = None - - -class Program_weight_tensor_parameter_528: - name = "parameter_528" - shape = [192, 192, 1, 1] - dtype = "float32" - min_val = float("-0.244631") - max_val = float("0.629934") - mean = float("-0.000125584") - std = float("0.0216092") - data = None - - -class Program_weight_tensor_parameter_529: - name = "parameter_529" - shape = [192] - dtype = "float32" - min_val = float("-1.75687") - max_val = float("0.505733") - mean = float("-0.259961") - std = float("0.340366") - data = None - - -class Program_weight_tensor_parameter_530: - name = "parameter_530" - shape = [192] - dtype = "float32" - min_val = float("0.404833") - max_val = float("1.97797") - mean = float("1.0658") - std = float("0.336907") - data = None - - -class Program_weight_tensor_parameter_531: - name = "parameter_531" - shape = [192] - dtype = "float32" - min_val = float("0.0455324") - max_val = float("2298.58") - mean = float("100.245") - std = float("322.102") - data = None - - -class Program_weight_tensor_parameter_532: - name = "parameter_532" - shape = [192] - dtype = "float32" - min_val = float("-13.5983") - max_val = float("11.9641") - mean = float("0.202523") - std = float("2.83912") - data = None - - -class Program_weight_tensor_parameter_533: - name = "parameter_533" - shape = [192, 192, 3, 3] - dtype = "float32" - min_val = float("-0.285037") - max_val = float("0.46813") - mean = float("-0.000664059") - std = float("0.0213735") - data = None - - -class Program_weight_tensor_parameter_534: - name = "parameter_534" - shape = [192] - dtype = "float32" - min_val = float("-2.49739") - max_val = float("0.140187") - mean = float("-1.24342") - std = float("0.424497") - data = None - - -class Program_weight_tensor_parameter_535: - name = "parameter_535" - shape = [192] - dtype = "float32" - min_val = float("0.651114") - max_val = float("1.77127") - mean = float("1.16792") - std = float("0.169098") - data = None - - -class Program_weight_tensor_parameter_536: - name = "parameter_536" - shape = [192] - dtype = "float32" - min_val = float("0.194451") - max_val = float("4128.54") - mean = float("163.103") - std = float("472.833") - data = None - - -class Program_weight_tensor_parameter_537: - name = "parameter_537" - shape = [192] - dtype = "float32" - min_val = float("-4.64446") - max_val = float("5.59883") - mean = float("-0.14272") - std = float("1.30511") - data = None - - -class Program_weight_tensor_parameter_538: - name = "parameter_538" - shape = [192, 192, 3, 3] - dtype = "float32" - min_val = float("-0.284668") - max_val = float("0.265208") - mean = float("0.000179334") - std = float("0.0149656") - data = None - - -class Program_weight_tensor_parameter_539: - name = "parameter_539" - shape = [192] - dtype = "float32" - min_val = float("-2.07951") - max_val = float("0.55605") - mean = float("-0.270635") - std = float("0.37776") - data = None - - -class Program_weight_tensor_parameter_540: - name = "parameter_540" - shape = [192] - dtype = "float32" - min_val = float("-0.00686515") - max_val = float("0.752308") - mean = float("0.215992") - std = float("0.136444") - data = None - - -class Program_weight_tensor_parameter_541: - name = "parameter_541" - shape = [192] - dtype = "float32" - min_val = float("1.77584e-06") - max_val = float("63.1959") - mean = float("0.938992") - std = float("4.94515") - data = None - - -class Program_weight_tensor_parameter_542: - name = "parameter_542" - shape = [192] - dtype = "float32" - min_val = float("-2.2753") - max_val = float("1.12971") - mean = float("0.0266441") - std = float("0.358626") - data = None - - -class Program_weight_tensor_parameter_543: - name = "parameter_543" - shape = [192, 192, 1, 1] - dtype = "float32" - min_val = float("-0.345764") - max_val = float("0.421705") - mean = float("-0.000348753") - std = float("0.0214336") - data = None - - -class Program_weight_tensor_parameter_544: - name = "parameter_544" - shape = [192] - dtype = "float32" - min_val = float("-2.07965") - max_val = float("0.574226") - mean = float("-0.269426") - std = float("0.379863") - data = None - - -class Program_weight_tensor_parameter_545: - name = "parameter_545" - shape = [192] - dtype = "float32" - min_val = float("0.3928") - max_val = float("1.96281") - mean = float("0.95567") - std = float("0.310273") - data = None - - -class Program_weight_tensor_parameter_546: - name = "parameter_546" - shape = [192] - dtype = "float32" - min_val = float("0.0232563") - max_val = float("879.129") - mean = float("36.8329") - std = float("108.746") - data = None - - -class Program_weight_tensor_parameter_547: - name = "parameter_547" - shape = [192] - dtype = "float32" - min_val = float("-17.4775") - max_val = float("7.90044") - mean = float("0.231825") - std = float("2.99101") - data = None - - -class Program_weight_tensor_parameter_548: - name = "parameter_548" - shape = [192, 192, 3, 3] - dtype = "float32" - min_val = float("-0.242859") - max_val = float("0.437694") - mean = float("-0.000616003") - std = float("0.0203443") - data = None - - -class Program_weight_tensor_parameter_549: - name = "parameter_549" - shape = [192] - dtype = "float32" - min_val = float("-2.74147") - max_val = float("-0.0826416") - mean = float("-1.23743") - std = float("0.434625") - data = None - - -class Program_weight_tensor_parameter_550: - name = "parameter_550" - shape = [192] - dtype = "float32" - min_val = float("0.716526") - max_val = float("1.6211") - mean = float("1.15068") - std = float("0.146716") - data = None - - -class Program_weight_tensor_parameter_551: - name = "parameter_551" - shape = [192] - dtype = "float32" - min_val = float("0.199091") - max_val = float("12243.2") - mean = float("232.788") - std = float("982.724") - data = None - - -class Program_weight_tensor_parameter_552: - name = "parameter_552" - shape = [192] - dtype = "float32" - min_val = float("-2.18119") - max_val = float("8.70009") - mean = float("0.136404") - std = float("1.21707") - data = None - - -class Program_weight_tensor_parameter_553: - name = "parameter_553" - shape = [192, 192, 3, 3] - dtype = "float32" - min_val = float("-0.378008") - max_val = float("0.395816") - mean = float("-0.00267221") - std = float("0.0195782") - data = None - - -class Program_weight_tensor_parameter_554: - name = "parameter_554" - shape = [192] - dtype = "float32" - min_val = float("-1.21764") - max_val = float("0.465113") - mean = float("-0.2312") - std = float("0.342153") - data = None - - -class Program_weight_tensor_parameter_555: - name = "parameter_555" - shape = [192] - dtype = "float32" - min_val = float("-0.0716155") - max_val = float("0.68623") - mean = float("0.188329") - std = float("0.126481") - data = None - - -class Program_weight_tensor_parameter_556: - name = "parameter_556" - shape = [192] - dtype = "float32" - min_val = float("7.23985e-05") - max_val = float("104.829") - mean = float("2.37577") - std = float("11.3397") - data = None - - -class Program_weight_tensor_parameter_557: - name = "parameter_557" - shape = [192] - dtype = "float32" - min_val = float("-3.9469") - max_val = float("5.6855") - mean = float("-0.0147774") - std = float("0.606084") - data = None - - -class Program_weight_tensor_parameter_558: - name = "parameter_558" - shape = [192, 192, 1, 1] - dtype = "float32" - min_val = float("-0.721449") - max_val = float("0.592602") - mean = float("0.000724118") - std = float("0.0311054") - data = None - - -class Program_weight_tensor_parameter_559: - name = "parameter_559" - shape = [192] - dtype = "float32" - min_val = float("-1.22134") - max_val = float("0.47785") - mean = float("-0.230452") - std = float("0.344237") - data = None - - -class Program_weight_tensor_parameter_560: - name = "parameter_560" - shape = [192] - dtype = "float32" - min_val = float("0.379249") - max_val = float("1.5641") - mean = float("0.845207") - std = float("0.264352") - data = None - - -class Program_weight_tensor_parameter_561: - name = "parameter_561" - shape = [192] - dtype = "float32" - min_val = float("0.0326402") - max_val = float("1699.91") - mean = float("79.7938") - std = float("203.032") - data = None - - -class Program_weight_tensor_parameter_562: - name = "parameter_562" - shape = [192] - dtype = "float32" - min_val = float("-22.9383") - max_val = float("10.8789") - mean = float("0.262372") - std = float("3.76359") - data = None - - -class Program_weight_tensor_parameter_563: - name = "parameter_563" - shape = [192, 192, 3, 3] - dtype = "float32" - min_val = float("-0.342571") - max_val = float("0.553615") - mean = float("-0.000835491") - std = float("0.0244072") - data = None - - -class Program_weight_tensor_parameter_564: - name = "parameter_564" - shape = [192] - dtype = "float32" - min_val = float("-2.49862") - max_val = float("-0.1334") - mean = float("-1.25035") - std = float("0.418402") - data = None - - -class Program_weight_tensor_parameter_565: - name = "parameter_565" - shape = [192] - dtype = "float32" - min_val = float("0.687417") - max_val = float("1.51921") - mean = float("1.12542") - std = float("0.137076") - data = None - - -class Program_weight_tensor_parameter_566: - name = "parameter_566" - shape = [192] - dtype = "float32" - min_val = float("0.0234704") - max_val = float("2282.94") - mean = float("135.36") - std = float("361.333") - data = None - - -class Program_weight_tensor_parameter_567: - name = "parameter_567" - shape = [192] - dtype = "float32" - min_val = float("-4.30705") - max_val = float("4.49319") - mean = float("-0.0200033") - std = float("0.945224") - data = None - - -class Program_weight_tensor_parameter_568: - name = "parameter_568" - shape = [192, 192, 3, 3] - dtype = "float32" - min_val = float("-0.442987") - max_val = float("0.472427") - mean = float("0.00134724") - std = float("0.0235963") - data = None - - -class Program_weight_tensor_parameter_569: - name = "parameter_569" - shape = [192] - dtype = "float32" - min_val = float("-1.21821") - max_val = float("0.512429") - mean = float("-0.167023") - std = float("0.295713") - data = None - - -class Program_weight_tensor_parameter_570: - name = "parameter_570" - shape = [192] - dtype = "float32" - min_val = float("-0.0291043") - max_val = float("1.56896") - mean = float("0.237779") - std = float("0.215149") - data = None - - -class Program_weight_tensor_parameter_571: - name = "parameter_571" - shape = [192] - dtype = "float32" - min_val = float("7.90699e-05") - max_val = float("49.1783") - mean = float("1.93217") - std = float("6.20102") - data = None - - -class Program_weight_tensor_parameter_572: - name = "parameter_572" - shape = [192] - dtype = "float32" - min_val = float("-1.39605") - max_val = float("4.77961") - mean = float("0.0637951") - std = float("0.550877") - data = None - - -class Program_weight_tensor_parameter_573: - name = "parameter_573" - shape = [192, 192, 1, 1] - dtype = "float32" - min_val = float("-0.645951") - max_val = float("0.375303") - mean = float("-0.00125196") - std = float("0.0318071") - data = None - - -class Program_weight_tensor_parameter_574: - name = "parameter_574" - shape = [192] - dtype = "float32" - min_val = float("-1.2185") - max_val = float("0.527355") - mean = float("-0.166844") - std = float("0.298285") - data = None - - -class Program_weight_tensor_parameter_575: - name = "parameter_575" - shape = [192] - dtype = "float32" - min_val = float("0.309676") - max_val = float("1.56341") - mean = float("0.760182") - std = float("0.23226") - data = None - - -class Program_weight_tensor_parameter_576: - name = "parameter_576" - shape = [192] - dtype = "float32" - min_val = float("0.0361632") - max_val = float("858.774") - mean = float("64.0277") - std = float("129.822") - data = None - - -class Program_weight_tensor_parameter_577: - name = "parameter_577" - shape = [192] - dtype = "float32" - min_val = float("-18.5822") - max_val = float("19.3904") - mean = float("-0.138431") - std = float("4.06149") - data = None - - -class Program_weight_tensor_parameter_578: - name = "parameter_578" - shape = [192, 192, 3, 3] - dtype = "float32" - min_val = float("-0.495237") - max_val = float("0.583184") - mean = float("0.000581124") - std = float("0.0274539") - data = None - - -class Program_weight_tensor_parameter_579: - name = "parameter_579" - shape = [192] - dtype = "float32" - min_val = float("-1.88035") - max_val = float("-0.215406") - mean = float("-1.14935") - std = float("0.32495") - data = None - - -class Program_weight_tensor_parameter_580: - name = "parameter_580" - shape = [192] - dtype = "float32" - min_val = float("0.788564") - max_val = float("1.75022") - mean = float("1.1191") - std = float("0.142522") - data = None - - -class Program_weight_tensor_parameter_581: - name = "parameter_581" - shape = [192] - dtype = "float32" - min_val = float("0.0172999") - max_val = float("311.063") - mean = float("18.5297") - std = float("40.3369") - data = None - - -class Program_weight_tensor_parameter_582: - name = "parameter_582" - shape = [192] - dtype = "float32" - min_val = float("-5.09272") - max_val = float("4.68315") - mean = float("0.0566841") - std = float("1.36936") - data = None - - -class Program_weight_tensor_parameter_583: - name = "parameter_583" - shape = [192, 192, 3, 3] - dtype = "float32" - min_val = float("-0.220454") - max_val = float("0.337137") - mean = float("-0.000195961") - std = float("0.0169417") - data = None - - -class Program_weight_tensor_parameter_584: - name = "parameter_584" - shape = [192] - dtype = "float32" - min_val = float("-2.91697") - max_val = float("1.61235") - mean = float("-0.027192") - std = float("0.752121") - data = None - - -class Program_weight_tensor_parameter_585: - name = "parameter_585" - shape = [192] - dtype = "float32" - min_val = float("0.396043") - max_val = float("1.75405") - mean = float("0.900478") - std = float("0.234394") - data = None - - -class Program_weight_tensor_parameter_586: - name = "parameter_586" - shape = [192] - dtype = "float32" - min_val = float("0.00833347") - max_val = float("4804.89") - mean = float("67.294") - std = float("384.311") - data = None - - -class Program_weight_tensor_parameter_587: - name = "parameter_587" - shape = [192] - dtype = "float32" - min_val = float("-2.08976") - max_val = float("2.12545") - mean = float("-0.027234") - std = float("0.343916") - data = None - - -class Program_weight_tensor_parameter_588: - name = "parameter_588" - shape = [192, 384, 1, 1] - dtype = "float32" - min_val = float("-1.42325") - max_val = float("3.22093") - mean = float("-0.000215879") - std = float("0.0689701") - data = None - - -class Program_weight_tensor_parameter_589: - name = "parameter_589" - shape = [192] - dtype = "float32" - min_val = float("-2.96702") - max_val = float("1.70072") - mean = float("0.100509") - std = float("0.666605") - data = None - - -class Program_weight_tensor_parameter_590: - name = "parameter_590" - shape = [192] - dtype = "float32" - min_val = float("0.812343") - max_val = float("5.55805") - mean = float("1.90737") - std = float("0.933933") - data = None - - -class Program_weight_tensor_parameter_591: - name = "parameter_591" - shape = [192] - dtype = "float32" - min_val = float("0.00143576") - max_val = float("202.609") - mean = float("3.23072") - std = float("18.3131") - data = None - - -class Program_weight_tensor_parameter_592: - name = "parameter_592" - shape = [192] - dtype = "float32" - min_val = float("-0.381013") - max_val = float("0.905162") - mean = float("0.0657245") - std = float("0.161239") - data = None - - -class Program_weight_tensor_parameter_593: - name = "parameter_593" - shape = [192, 384, 1, 1] - dtype = "float32" - min_val = float("-0.259077") - max_val = float("0.331409") - mean = float("-0.00132013") - std = float("0.0192623") - data = None - - -class Program_weight_tensor_parameter_594: - name = "parameter_594" - shape = [384] - dtype = "float32" - min_val = float("-2.92371") - max_val = float("1.31882") - mean = float("-0.301972") - std = float("0.563982") - data = None - - -class Program_weight_tensor_parameter_595: - name = "parameter_595" - shape = [384] - dtype = "float32" - min_val = float("0.646526") - max_val = float("2.49291") - mean = float("1.15848") - std = float("0.262959") - data = None - - -class Program_weight_tensor_parameter_596: - name = "parameter_596" - shape = [384] - dtype = "float32" - min_val = float("0.13752") - max_val = float("33554.1") - mean = float("441.904") - std = float("2215.12") - data = None - - -class Program_weight_tensor_parameter_597: - name = "parameter_597" - shape = [384] - dtype = "float32" - min_val = float("-10.6841") - max_val = float("7.24379") - mean = float("0.245326") - std = float("1.24953") - data = None - - -class Program_weight_tensor_parameter_598: - name = "parameter_598" - shape = [384, 256, 3, 3] - dtype = "float32" - min_val = float("-0.420455") - max_val = float("0.352272") - mean = float("-0.00112555") - std = float("0.0151274") - data = None - - -class Program_weight_tensor_parameter_599: - name = "parameter_599" - shape = [256] - dtype = "float32" - min_val = float("-2.04434") - max_val = float("1.33309") - mean = float("-0.916794") - std = float("0.542582") - data = None - - -class Program_weight_tensor_parameter_600: - name = "parameter_600" - shape = [256] - dtype = "float32" - min_val = float("0.550387") - max_val = float("2.97247") - mean = float("1.34652") - std = float("0.492025") - data = None - - -class Program_weight_tensor_parameter_601: - name = "parameter_601" - shape = [256] - dtype = "float32" - min_val = float("0.00440378") - max_val = float("105978.0") - mean = float("456.901") - std = float("6614.54") - data = None - - -class Program_weight_tensor_parameter_602: - name = "parameter_602" - shape = [256] - dtype = "float32" - min_val = float("-4.34738") - max_val = float("143.205") - mean = float("1.74977") - std = float("9.28044") - data = None - - -class Program_weight_tensor_parameter_603: - name = "parameter_603" - shape = [256, 192, 1, 1] - dtype = "float32" - min_val = float("-0.869804") - max_val = float("15.9901") - mean = float("0.00715179") - std = float("0.19203") - data = None - - -class Program_weight_tensor_parameter_604: - name = "parameter_604" - shape = [192] - dtype = "float32" - min_val = float("-0.0987519") - max_val = float("0.0663724") - mean = float("-0.0118358") - std = float("0.0235705") - data = None - - -class Program_weight_tensor_parameter_605: - name = "parameter_605" - shape = [192, 192, 1, 1] - dtype = "float32" - min_val = float("-0.405807") - max_val = float("0.264014") - mean = float("-0.0102004") - std = float("0.0319322") - data = None - - -class Program_weight_tensor_parameter_606: - name = "parameter_606" - shape = [96] - dtype = "float32" - min_val = float("-1.91285") - max_val = float("0.546302") - mean = float("-0.205148") - std = float("0.439731") - data = None - - -class Program_weight_tensor_parameter_607: - name = "parameter_607" - shape = [96] - dtype = "float32" - min_val = float("0.0895864") - max_val = float("3.22778") - mean = float("0.637041") - std = float("0.670036") - data = None - - -class Program_weight_tensor_parameter_608: - name = "parameter_608" - shape = [96] - dtype = "float32" - min_val = float("6.87535e-05") - max_val = float("66.5661") - mean = float("1.37836") - std = float("7.15906") - data = None - - -class Program_weight_tensor_parameter_609: - name = "parameter_609" - shape = [96] - dtype = "float32" - min_val = float("-1.39614") - max_val = float("2.38344") - mean = float("0.0320484") - std = float("0.377448") - data = None - - -class Program_weight_tensor_parameter_610: - name = "parameter_610" - shape = [96, 96, 1, 1] - dtype = "float32" - min_val = float("-1.24365") - max_val = float("1.18406") - mean = float("-0.000738032") - std = float("0.0621546") - data = None - - -class Program_weight_tensor_parameter_611: - name = "parameter_611" - shape = [96] - dtype = "float32" - min_val = float("-1.91234") - max_val = float("0.55126") - mean = float("-0.203811") - std = float("0.441223") - data = None - - -class Program_weight_tensor_parameter_612: - name = "parameter_612" - shape = [96] - dtype = "float32" - min_val = float("0.335907") - max_val = float("5.47089") - mean = float("1.09302") - std = float("0.887054") - data = None - - -class Program_weight_tensor_parameter_613: - name = "parameter_613" - shape = [96] - dtype = "float32" - min_val = float("0.0187208") - max_val = float("717.249") - mean = float("24.146") - std = float("98.0324") - data = None - - -class Program_weight_tensor_parameter_614: - name = "parameter_614" - shape = [96] - dtype = "float32" - min_val = float("-8.11887") - max_val = float("10.6872") - mean = float("0.447085") - std = float("1.98274") - data = None - - -class Program_weight_tensor_parameter_615: - name = "parameter_615" - shape = [96, 96, 3, 3] - dtype = "float32" - min_val = float("-0.64786") - max_val = float("0.610955") - mean = float("-0.00228054") - std = float("0.0317145") - data = None - - -class Program_weight_tensor_parameter_616: - name = "parameter_616" - shape = [96] - dtype = "float32" - min_val = float("-2.48733") - max_val = float("-0.0177181") - mean = float("-1.24064") - std = float("0.462674") - data = None - - -class Program_weight_tensor_parameter_617: - name = "parameter_617" - shape = [96] - dtype = "float32" - min_val = float("0.376749") - max_val = float("1.61027") - mean = float("0.92679") - std = float("0.174674") - data = None - - -class Program_weight_tensor_parameter_618: - name = "parameter_618" - shape = [96] - dtype = "float32" - min_val = float("0.458787") - max_val = float("14051.9") - mean = float("581.565") - std = float("2213.73") - data = None - - -class Program_weight_tensor_parameter_619: - name = "parameter_619" - shape = [96] - dtype = "float32" - min_val = float("-39.2") - max_val = float("92.3483") - mean = float("2.75935") - std = float("16.8796") - data = None - - -class Program_weight_tensor_parameter_620: - name = "parameter_620" - shape = [96, 96, 3, 3] - dtype = "float32" - min_val = float("-0.893357") - max_val = float("1.83963") - mean = float("0.00316801") - std = float("0.0516147") - data = None - - -class Program_weight_tensor_parameter_621: - name = "parameter_621" - shape = [96] - dtype = "float32" - min_val = float("-1.4122") - max_val = float("0.638769") - mean = float("-0.124599") - std = float("0.359237") - data = None - - -class Program_weight_tensor_parameter_622: - name = "parameter_622" - shape = [96] - dtype = "float32" - min_val = float("0.0151502") - max_val = float("1.86528") - mean = float("0.460244") - std = float("0.383531") - data = None - - -class Program_weight_tensor_parameter_623: - name = "parameter_623" - shape = [96] - dtype = "float32" - min_val = float("7.42465e-05") - max_val = float("105.345") - mean = float("2.00551") - std = float("11.8642") - data = None - - -class Program_weight_tensor_parameter_624: - name = "parameter_624" - shape = [96] - dtype = "float32" - min_val = float("-1.33815") - max_val = float("3.52671") - mean = float("0.134428") - std = float("0.582479") - data = None - - -class Program_weight_tensor_parameter_625: - name = "parameter_625" - shape = [96, 96, 1, 1] - dtype = "float32" - min_val = float("-7.68975") - max_val = float("6.35168") - mean = float("-0.005773") - std = float("0.180518") - data = None - - -class Program_weight_tensor_parameter_626: - name = "parameter_626" - shape = [96] - dtype = "float32" - min_val = float("-1.41481") - max_val = float("0.679644") - mean = float("-0.122232") - std = float("0.362344") - data = None - - -class Program_weight_tensor_parameter_627: - name = "parameter_627" - shape = [96] - dtype = "float32" - min_val = float("0.363177") - max_val = float("2.32912") - mean = float("0.921647") - std = float("0.446698") - data = None - - -class Program_weight_tensor_parameter_628: - name = "parameter_628" - shape = [96] - dtype = "float32" - min_val = float("0.00667803") - max_val = float("904.461") - mean = float("25.9921") - std = float("124.724") - data = None - - -class Program_weight_tensor_parameter_629: - name = "parameter_629" - shape = [96] - dtype = "float32" - min_val = float("-1.07626") - max_val = float("17.9598") - mean = float("1.18124") - std = float("2.65473") - data = None - - -class Program_weight_tensor_parameter_630: - name = "parameter_630" - shape = [96, 96, 3, 3] - dtype = "float32" - min_val = float("-1.68483") - max_val = float("0.545277") - mean = float("-0.00668389") - std = float("0.0418904") - data = None - - -class Program_weight_tensor_parameter_631: - name = "parameter_631" - shape = [96] - dtype = "float32" - min_val = float("-3.31429") - max_val = float("0.406389") - mean = float("-1.18503") - std = float("0.560632") - data = None - - -class Program_weight_tensor_parameter_632: - name = "parameter_632" - shape = [96] - dtype = "float32" - min_val = float("0.397342") - max_val = float("1.98469") - mean = float("1.02414") - std = float("0.257601") - data = None - - -class Program_weight_tensor_parameter_633: - name = "parameter_633" - shape = [96] - dtype = "float32" - min_val = float("0.171811") - max_val = float("5118.63") - mean = float("129.006") - std = float("573.108") - data = None - - -class Program_weight_tensor_parameter_634: - name = "parameter_634" - shape = [96] - dtype = "float32" - min_val = float("-16.5294") - max_val = float("56.8048") - mean = float("0.660576") - std = float("9.1639") - data = None - - -class Program_weight_tensor_parameter_635: - name = "parameter_635" - shape = [96, 96, 3, 3] - dtype = "float32" - min_val = float("-0.45496") - max_val = float("1.25223") - mean = float("-0.000778588") - std = float("0.0350886") - data = None - - -class Program_weight_tensor_parameter_636: - name = "parameter_636" - shape = [96] - dtype = "float32" - min_val = float("-1.25001") - max_val = float("0.639741") - mean = float("-0.0989652") - std = float("0.302867") - data = None - - -class Program_weight_tensor_parameter_637: - name = "parameter_637" - shape = [96] - dtype = "float32" - min_val = float("-0.429595") - max_val = float("1.27713") - mean = float("0.306498") - std = float("0.23536") - data = None - - -class Program_weight_tensor_parameter_638: - name = "parameter_638" - shape = [96] - dtype = "float32" - min_val = float("0.000129874") - max_val = float("656.233") - mean = float("8.72779") - std = float("66.7746") - data = None - - -class Program_weight_tensor_parameter_639: - name = "parameter_639" - shape = [96] - dtype = "float32" - min_val = float("-0.700466") - max_val = float("6.05986") - mean = float("0.144363") - std = float("0.725059") - data = None - - -class Program_weight_tensor_parameter_640: - name = "parameter_640" - shape = [96, 96, 1, 1] - dtype = "float32" - min_val = float("-4.5365") - max_val = float("0.985421") - mean = float("-0.00920448") - std = float("0.122988") - data = None - - -class Program_weight_tensor_parameter_641: - name = "parameter_641" - shape = [96] - dtype = "float32" - min_val = float("-1.25019") - max_val = float("0.698569") - mean = float("-0.0957422") - std = float("0.306901") - data = None - - -class Program_weight_tensor_parameter_642: - name = "parameter_642" - shape = [96] - dtype = "float32" - min_val = float("-0.268583") - max_val = float("1.67072") - mean = float("0.716015") - std = float("0.312801") - data = None - - -class Program_weight_tensor_parameter_643: - name = "parameter_643" - shape = [96] - dtype = "float32" - min_val = float("0.0242012") - max_val = float("692.616") - mean = float("37.3314") - std = float("101.342") - data = None - - -class Program_weight_tensor_parameter_644: - name = "parameter_644" - shape = [96] - dtype = "float32" - min_val = float("-2.63584") - max_val = float("9.84662") - mean = float("0.502801") - std = float("1.7425") - data = None - - -class Program_weight_tensor_parameter_645: - name = "parameter_645" - shape = [96, 96, 3, 3] - dtype = "float32" - min_val = float("-0.71585") - max_val = float("1.8677") - mean = float("-0.00331932") - std = float("0.0418535") - data = None - - -class Program_weight_tensor_parameter_646: - name = "parameter_646" - shape = [96] - dtype = "float32" - min_val = float("-3.58339") - max_val = float("0.283111") - mean = float("-1.13341") - std = float("0.568821") - data = None - - -class Program_weight_tensor_parameter_647: - name = "parameter_647" - shape = [96] - dtype = "float32" - min_val = float("0.448275") - max_val = float("2.19169") - mean = float("1.04969") - std = float("0.242886") - data = None - - -class Program_weight_tensor_parameter_648: - name = "parameter_648" - shape = [96] - dtype = "float32" - min_val = float("0.187804") - max_val = float("13267.2") - mean = float("371.726") - std = float("1684.34") - data = None - - -class Program_weight_tensor_parameter_649: - name = "parameter_649" - shape = [96] - dtype = "float32" - min_val = float("-67.2311") - max_val = float("84.7225") - mean = float("0.0843976") - std = float("14.2964") - data = None - - -class Program_weight_tensor_parameter_650: - name = "parameter_650" - shape = [96, 96, 3, 3] - dtype = "float32" - min_val = float("-0.840055") - max_val = float("0.994105") - mean = float("-0.00223153") - std = float("0.0530689") - data = None - - -class Program_weight_tensor_parameter_651: - name = "parameter_651" - shape = [96] - dtype = "float32" - min_val = float("-0.896599") - max_val = float("0.53856") - mean = float("-0.157416") - std = float("0.295036") - data = None - - -class Program_weight_tensor_parameter_652: - name = "parameter_652" - shape = [96] - dtype = "float32" - min_val = float("0.00576366") - max_val = float("1.40649") - mean = float("0.338961") - std = float("0.222205") - data = None - - -class Program_weight_tensor_parameter_653: - name = "parameter_653" - shape = [96] - dtype = "float32" - min_val = float("0.000125609") - max_val = float("4.926") - mean = float("0.192034") - std = float("0.563473") - data = None - - -class Program_weight_tensor_parameter_654: - name = "parameter_654" - shape = [96] - dtype = "float32" - min_val = float("-1.15434") - max_val = float("1.71827") - mean = float("0.0970238") - std = float("0.329713") - data = None - - -class Program_weight_tensor_parameter_655: - name = "parameter_655" - shape = [96, 96, 1, 1] - dtype = "float32" - min_val = float("-1.11751") - max_val = float("1.22849") - mean = float("-0.00515414") - std = float("0.0668462") - data = None - - -class Program_weight_tensor_parameter_656: - name = "parameter_656" - shape = [96] - dtype = "float32" - min_val = float("-0.89747") - max_val = float("0.547756") - mean = float("-0.155531") - std = float("0.300238") - data = None - - -class Program_weight_tensor_parameter_657: - name = "parameter_657" - shape = [96] - dtype = "float32" - min_val = float("-0.680114") - max_val = float("1.77938") - mean = float("0.715642") - std = float("0.343738") - data = None - - -class Program_weight_tensor_parameter_658: - name = "parameter_658" - shape = [96] - dtype = "float32" - min_val = float("0.00962529") - max_val = float("117.592") - mean = float("6.28867") - std = float("17.7652") - data = None - - -class Program_weight_tensor_parameter_659: - name = "parameter_659" - shape = [96] - dtype = "float32" - min_val = float("-8.19355") - max_val = float("8.76688") - mean = float("0.523784") - std = float("1.96973") - data = None - - -class Program_weight_tensor_parameter_660: - name = "parameter_660" - shape = [96, 96, 3, 3] - dtype = "float32" - min_val = float("-0.963159") - max_val = float("0.937514") - mean = float("-0.00312149") - std = float("0.0409439") - data = None - - -class Program_weight_tensor_parameter_661: - name = "parameter_661" - shape = [96] - dtype = "float32" - min_val = float("-2.65947") - max_val = float("0.0663366") - mean = float("-1.07481") - std = float("0.49964") - data = None - - -class Program_weight_tensor_parameter_662: - name = "parameter_662" - shape = [96] - dtype = "float32" - min_val = float("0.494054") - max_val = float("1.73711") - mean = float("1.00358") - std = float("0.201714") - data = None - - -class Program_weight_tensor_parameter_663: - name = "parameter_663" - shape = [96] - dtype = "float32" - min_val = float("0.0670117") - max_val = float("108.685") - mean = float("11.7244") - std = float("22.7056") - data = None - - -class Program_weight_tensor_parameter_664: - name = "parameter_664" - shape = [96] - dtype = "float32" - min_val = float("-19.6122") - max_val = float("22.3347") - mean = float("-1.43905") - std = float("6.53676") - data = None - - -class Program_weight_tensor_parameter_665: - name = "parameter_665" - shape = [96, 96, 3, 3] - dtype = "float32" - min_val = float("-0.883886") - max_val = float("0.636549") - mean = float("-0.00434231") - std = float("0.0362072") - data = None - - -class Program_weight_tensor_parameter_666: - name = "parameter_666" - shape = [96] - dtype = "float32" - min_val = float("-0.971987") - max_val = float("0.722959") - mean = float("-0.144632") - std = float("0.309261") - data = None - - -class Program_weight_tensor_parameter_667: - name = "parameter_667" - shape = [96] - dtype = "float32" - min_val = float("-0.272081") - max_val = float("1.61121") - mean = float("0.305009") - std = float("0.272026") - data = None - - -class Program_weight_tensor_parameter_668: - name = "parameter_668" - shape = [96] - dtype = "float32" - min_val = float("0.000543445") - max_val = float("31.5598") - mean = float("1.4504") - std = float("3.93834") - data = None - - -class Program_weight_tensor_parameter_669: - name = "parameter_669" - shape = [96] - dtype = "float32" - min_val = float("-2.05874") - max_val = float("2.35712") - mean = float("0.153436") - std = float("0.577965") - data = None - - -class Program_weight_tensor_parameter_670: - name = "parameter_670" - shape = [96, 96, 1, 1] - dtype = "float32" - min_val = float("-1.77571") - max_val = float("0.727339") - mean = float("-0.012376") - std = float("0.0994884") - data = None - - -class Program_weight_tensor_parameter_671: - name = "parameter_671" - shape = [96] - dtype = "float32" - min_val = float("-0.973155") - max_val = float("0.722476") - mean = float("-0.141307") - std = float("0.307416") - data = None - - -class Program_weight_tensor_parameter_672: - name = "parameter_672" - shape = [96] - dtype = "float32" - min_val = float("-0.0263993") - max_val = float("2.25472") - mean = float("0.596976") - std = float("0.320417") - data = None - - -class Program_weight_tensor_parameter_673: - name = "parameter_673" - shape = [96] - dtype = "float32" - min_val = float("0.0138918") - max_val = float("3176.65") - mean = float("50.1448") - std = float("326.27") - data = None - - -class Program_weight_tensor_parameter_674: - name = "parameter_674" - shape = [96] - dtype = "float32" - min_val = float("-11.8577") - max_val = float("18.2503") - mean = float("0.529937") - std = float("2.87528") - data = None - - -class Program_weight_tensor_parameter_675: - name = "parameter_675" - shape = [96, 96, 3, 3] - dtype = "float32" - min_val = float("-1.3398") - max_val = float("0.471906") - mean = float("-0.00519289") - std = float("0.054962") - data = None - - -class Program_weight_tensor_parameter_676: - name = "parameter_676" - shape = [96] - dtype = "float32" - min_val = float("-3.4571") - max_val = float("0.163505") - mean = float("-1.04781") - std = float("0.565484") - data = None - - -class Program_weight_tensor_parameter_677: - name = "parameter_677" - shape = [96] - dtype = "float32" - min_val = float("0.181655") - max_val = float("2.51355") - mean = float("1.04007") - std = float("0.337096") - data = None - - -class Program_weight_tensor_parameter_678: - name = "parameter_678" - shape = [96] - dtype = "float32" - min_val = float("0.0450568") - max_val = float("591.311") - mean = float("35.261") - std = float("84.194") - data = None - - -class Program_weight_tensor_parameter_679: - name = "parameter_679" - shape = [96] - dtype = "float32" - min_val = float("-53.1661") - max_val = float("29.0565") - mean = float("-0.946483") - std = float("12.2566") - data = None - - -class Program_weight_tensor_parameter_680: - name = "parameter_680" - shape = [96, 96, 3, 3] - dtype = "float32" - min_val = float("-1.44326") - max_val = float("0.605019") - mean = float("-0.00302561") - std = float("0.0604205") - data = None - - -class Program_weight_tensor_parameter_681: - name = "parameter_681" - shape = [96] - dtype = "float32" - min_val = float("-0.979853") - max_val = float("0.506673") - mean = float("-0.105704") - std = float("0.301591") - data = None - - -class Program_weight_tensor_parameter_682: - name = "parameter_682" - shape = [96] - dtype = "float32" - min_val = float("-0.690202") - max_val = float("1.30922") - mean = float("0.212369") - std = float("0.326455") - data = None - - -class Program_weight_tensor_parameter_683: - name = "parameter_683" - shape = [96] - dtype = "float32" - min_val = float("0.00319137") - max_val = float("122.847") - mean = float("4.41125") - std = float("14.7791") - data = None - - -class Program_weight_tensor_parameter_684: - name = "parameter_684" - shape = [96] - dtype = "float32" - min_val = float("-1.63063") - max_val = float("0.873704") - mean = float("-0.0808103") - std = float("0.375139") - data = None - - -class Program_weight_tensor_parameter_685: - name = "parameter_685" - shape = [96, 96, 1, 1] - dtype = "float32" - min_val = float("-1.21185") - max_val = float("1.0658") - mean = float("0.00855805") - std = float("0.102244") - data = None - - -class Program_weight_tensor_parameter_686: - name = "parameter_686" - shape = [96] - dtype = "float32" - min_val = float("-0.929882") - max_val = float("0.508894") - mean = float("-0.0994689") - std = float("0.294231") - data = None - - -class Program_weight_tensor_parameter_687: - name = "parameter_687" - shape = [96] - dtype = "float32" - min_val = float("-0.652054") - max_val = float("1.42808") - mean = float("0.46451") - std = float("0.343141") - data = None - - -class Program_weight_tensor_parameter_688: - name = "parameter_688" - shape = [96] - dtype = "float32" - min_val = float("0.0905742") - max_val = float("4913.13") - mean = float("111.121") - std = float("522.181") - data = None - - -class Program_weight_tensor_parameter_689: - name = "parameter_689" - shape = [96] - dtype = "float32" - min_val = float("-6.23794") - max_val = float("20.563") - mean = float("0.310017") - std = float("2.71574") - data = None - - -class Program_weight_tensor_parameter_690: - name = "parameter_690" - shape = [96, 96, 3, 3] - dtype = "float32" - min_val = float("-1.49733") - max_val = float("0.706858") - mean = float("-0.0040344") - std = float("0.0702472") - data = None - - -class Program_weight_tensor_parameter_691: - name = "parameter_691" - shape = [96] - dtype = "float32" - min_val = float("-2.44345") - max_val = float("0.44985") - mean = float("-0.86721") - std = float("0.476335") - data = None - - -class Program_weight_tensor_parameter_692: - name = "parameter_692" - shape = [96] - dtype = "float32" - min_val = float("0.709681") - max_val = float("2.12223") - mean = float("1.25465") - std = float("0.222513") - data = None - - -class Program_weight_tensor_parameter_693: - name = "parameter_693" - shape = [96] - dtype = "float32" - min_val = float("0.0168317") - max_val = float("328.239") - mean = float("23.0774") - std = float("55.6159") - data = None - - -class Program_weight_tensor_parameter_694: - name = "parameter_694" - shape = [96] - dtype = "float32" - min_val = float("-50.6514") - max_val = float("40.9589") - mean = float("0.723976") - std = float("11.3687") - data = None - - -class Program_weight_tensor_parameter_695: - name = "parameter_695" - shape = [96, 96, 3, 3] - dtype = "float32" - min_val = float("-0.849899") - max_val = float("0.852562") - mean = float("-0.000185799") - std = float("0.0645014") - data = None - - -class Program_weight_tensor_parameter_696: - name = "parameter_696" - shape = [96] - dtype = "float32" - min_val = float("-3.17223") - max_val = float("1.90963") - mean = float("0.534742") - std = float("0.924546") - data = None - - -class Program_weight_tensor_parameter_697: - name = "parameter_697" - shape = [96] - dtype = "float32" - min_val = float("-1.03377") - max_val = float("2.62632") - mean = float("0.492338") - std = float("0.520207") - data = None - - -class Program_weight_tensor_parameter_698: - name = "parameter_698" - shape = [96] - dtype = "float32" - min_val = float("0.00977803") - max_val = float("734.582") - mean = float("37.8") - std = float("108.323") - data = None - - -class Program_weight_tensor_parameter_699: - name = "parameter_699" - shape = [96] - dtype = "float32" - min_val = float("-7.31017") - max_val = float("14.4237") - mean = float("-0.228157") - std = float("2.72443") - data = None - - -class Program_weight_tensor_parameter_700: - name = "parameter_700" - shape = [96, 192, 1, 1] - dtype = "float32" - min_val = float("-1.95633") - max_val = float("1.65436") - mean = float("-0.017819") - std = float("0.179031") - data = None - - -class Program_weight_tensor_parameter_701: - name = "parameter_701" - shape = [96] - dtype = "float32" - min_val = float("-4.92471") - max_val = float("1.58003") - mean = float("0.384968") - std = float("1.04986") - data = None - - -class Program_weight_tensor_parameter_702: - name = "parameter_702" - shape = [96] - dtype = "float32" - min_val = float("0.381893") - max_val = float("6.77746") - mean = float("1.68003") - std = float("1.3141") - data = None - - -class Program_weight_tensor_parameter_703: - name = "parameter_703" - shape = [96] - dtype = "float32" - min_val = float("0.0102556") - max_val = float("63.1576") - mean = float("1.94865") - std = float("8.72854") - data = None - - -class Program_weight_tensor_parameter_704: - name = "parameter_704" - shape = [96] - dtype = "float32" - min_val = float("-1.81576") - max_val = float("1.31792") - mean = float("0.00314065") - std = float("0.477093") - data = None - - -class Program_weight_tensor_parameter_705: - name = "parameter_705" - shape = [96, 192, 1, 1] - dtype = "float32" - min_val = float("-0.476384") - max_val = float("0.704714") - mean = float("-0.00467472") - std = float("0.0415664") - data = None - - -class Program_weight_tensor_parameter_706: - name = "parameter_706" - shape = [192] - dtype = "float32" - min_val = float("-2.2747") - max_val = float("1.80222") - mean = float("-0.130603") - std = float("0.756459") - data = None - - -class Program_weight_tensor_parameter_707: - name = "parameter_707" - shape = [192] - dtype = "float32" - min_val = float("0.466035") - max_val = float("2.96638") - mean = float("1.07147") - std = float("0.320539") - data = None - - -class Program_weight_tensor_parameter_708: - name = "parameter_708" - shape = [192] - dtype = "float32" - min_val = float("0.0464299") - max_val = float("1042.66") - mean = float("38.2298") - std = float("121.792") - data = None - - -class Program_weight_tensor_parameter_709: - name = "parameter_709" - shape = [192] - dtype = "float32" - min_val = float("-9.56391") - max_val = float("27.4888") - mean = float("0.648476") - std = float("3.1584") - data = None - - -class Program_weight_tensor_parameter_710: - name = "parameter_710" - shape = [192, 128, 3, 3] - dtype = "float32" - min_val = float("-2.4927") - max_val = float("0.935874") - mean = float("-0.00472509") - std = float("0.0589256") - data = None - - -class Program_weight_tensor_parameter_711: - name = "parameter_711" - shape = [128] - dtype = "float32" - min_val = float("-2.8142") - max_val = float("1.94884") - mean = float("-0.734722") - std = float("0.677613") - data = None - - -class Program_weight_tensor_parameter_712: - name = "parameter_712" - shape = [128] - dtype = "float32" - min_val = float("-0.229859") - max_val = float("2.99053") - mean = float("0.992786") - std = float("0.396784") - data = None - - -class Program_weight_tensor_parameter_713: - name = "parameter_713" - shape = [128] - dtype = "float32" - min_val = float("2.39451") - max_val = float("125679.0") - mean = float("4054.76") - std = float("14301.4") - data = None - - -class Program_weight_tensor_parameter_714: - name = "parameter_714" - shape = [128] - dtype = "float32" - min_val = float("-271.424") - max_val = float("158.592") - mean = float("-11.2378") - std = float("49.0082") - data = None - - -class Program_weight_tensor_parameter_715: - name = "parameter_715" - shape = [128, 96, 1, 1] - dtype = "float32" - min_val = float("-5.10604") - max_val = float("3.19008") - mean = float("-0.0220007") - std = float("0.265626") - data = None - - -class Program_weight_tensor_parameter_716: - name = "parameter_716" - shape = [96] - dtype = "float32" - min_val = float("-0.161611") - max_val = float("0.37311") - mean = float("0.00734357") - std = float("0.0624152") - data = None - - -class Program_weight_tensor_parameter_717: - name = "parameter_717" - shape = [96, 96, 1, 1] - dtype = "float32" - min_val = float("-1.50057") - max_val = float("1.5759") - mean = float("0.00741555") - std = float("0.12383") - data = None - - -class Program_weight_tensor_parameter_718: - name = "parameter_718" - shape = [48] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_719: - name = "parameter_719" - shape = [48] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_720: - name = "parameter_720" - shape = [48] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_721: - name = "parameter_721" - shape = [48] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_722: - name = "parameter_722" - shape = [48, 48, 1, 1] - dtype = "float32" - min_val = float("-2.83139") - max_val = float("3.12402") - mean = float("0.00934544") - std = float("0.243272") - data = None - - -class Program_weight_tensor_parameter_723: - name = "parameter_723" - shape = [48] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_724: - name = "parameter_724" - shape = [48] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_725: - name = "parameter_725" - shape = [48] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_726: - name = "parameter_726" - shape = [48] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_727: - name = "parameter_727" - shape = [48, 48, 3, 3] - dtype = "float32" - min_val = float("-1.103") - max_val = float("1.0228") - mean = float("0.00321872") - std = float("0.113578") - data = None - - -class Program_weight_tensor_parameter_728: - name = "parameter_728" - shape = [48] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_729: - name = "parameter_729" - shape = [48] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_730: - name = "parameter_730" - shape = [48] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_731: - name = "parameter_731" - shape = [48] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_732: - name = "parameter_732" - shape = [48, 48, 3, 3] - dtype = "float32" - min_val = float("-2.92076") - max_val = float("1.62643") - mean = float("-0.0145573") - std = float("0.236441") - data = None - - -class Program_weight_tensor_parameter_733: - name = "parameter_733" - shape = [48] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_734: - name = "parameter_734" - shape = [48] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_735: - name = "parameter_735" - shape = [48] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_736: - name = "parameter_736" - shape = [48] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_737: - name = "parameter_737" - shape = [48, 48, 1, 1] - dtype = "float32" - min_val = float("-1.61531") - max_val = float("0.722945") - mean = float("-0.0152863") - std = float("0.122929") - data = None - - -class Program_weight_tensor_parameter_738: - name = "parameter_738" - shape = [48] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_739: - name = "parameter_739" - shape = [48] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_740: - name = "parameter_740" - shape = [48] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_741: - name = "parameter_741" - shape = [48] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_742: - name = "parameter_742" - shape = [48, 48, 3, 3] - dtype = "float32" - min_val = float("-9.07419") - max_val = float("3.91485") - mean = float("-0.0791542") - std = float("0.545463") - data = None - - -class Program_weight_tensor_parameter_743: - name = "parameter_743" - shape = [48] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_744: - name = "parameter_744" - shape = [48] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_745: - name = "parameter_745" - shape = [48] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_746: - name = "parameter_746" - shape = [48] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_747: - name = "parameter_747" - shape = [48, 48, 3, 3] - dtype = "float32" - min_val = float("-6.23405") - max_val = float("6.69712") - mean = float("-0.00447183") - std = float("0.74018") - data = None - - -class Program_weight_tensor_parameter_748: - name = "parameter_748" - shape = [48] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_749: - name = "parameter_749" - shape = [48] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_750: - name = "parameter_750" - shape = [48] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_751: - name = "parameter_751" - shape = [48] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_752: - name = "parameter_752" - shape = [48, 48, 1, 1] - dtype = "float32" - min_val = float("-7.99359") - max_val = float("7.76186") - mean = float("0.0820972") - std = float("1.09773") - data = None - - -class Program_weight_tensor_parameter_753: - name = "parameter_753" - shape = [48] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_754: - name = "parameter_754" - shape = [48] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_755: - name = "parameter_755" - shape = [48] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_756: - name = "parameter_756" - shape = [48] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_757: - name = "parameter_757" - shape = [48, 48, 3, 3] - dtype = "float32" - min_val = float("-10.0014") - max_val = float("5.47988") - mean = float("0.0621421") - std = float("0.658574") - data = None - - -class Program_weight_tensor_parameter_758: - name = "parameter_758" - shape = [48] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_759: - name = "parameter_759" - shape = [48] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_760: - name = "parameter_760" - shape = [48] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_761: - name = "parameter_761" - shape = [48] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_762: - name = "parameter_762" - shape = [48, 48, 3, 3] - dtype = "float32" - min_val = float("-12.9982") - max_val = float("11.0176") - mean = float("-0.0784427") - std = float("1.11962") - data = None - - -class Program_weight_tensor_parameter_763: - name = "parameter_763" - shape = [48] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_764: - name = "parameter_764" - shape = [48] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_765: - name = "parameter_765" - shape = [48] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_766: - name = "parameter_766" - shape = [48] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_767: - name = "parameter_767" - shape = [48, 96, 1, 1] - dtype = "float32" - min_val = float("-59.2065") - max_val = float("44.9563") - mean = float("-0.705466") - std = float("5.28086") - data = None - - -class Program_weight_tensor_parameter_768: - name = "parameter_768" - shape = [48] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_769: - name = "parameter_769" - shape = [48] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_770: - name = "parameter_770" - shape = [48] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_771: - name = "parameter_771" - shape = [48] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_772: - name = "parameter_772" - shape = [48, 96, 1, 1] - dtype = "float32" - min_val = float("-2.11481") - max_val = float("1.29398") - mean = float("-0.0184801") - std = float("0.17633") - data = None - - -class Program_weight_tensor_parameter_773: - name = "parameter_773" - shape = [96] - dtype = "float32" - min_val = float("-8.09242") - max_val = float("7.25992") - mean = float("0.75209") - std = float("2.67553") - data = None - - -class Program_weight_tensor_parameter_774: - name = "parameter_774" - shape = [96] - dtype = "float32" - min_val = float("-8.02644") - max_val = float("10.6649") - mean = float("2.05862") - std = float("3.39567") - data = None - - -class Program_weight_tensor_parameter_775: - name = "parameter_775" - shape = [96] - dtype = "float32" - min_val = float("544.401") - max_val = float("9795570.0") - mean = float("868764.0") - std = float("1759410.0") - data = None - - -class Program_weight_tensor_parameter_776: - name = "parameter_776" - shape = [96] - dtype = "float32" - min_val = float("-3150.84") - max_val = float("1801.7") - mean = float("-292.814") - std = float("845.923") - data = None - - -class Program_weight_tensor_parameter_777: - name = "parameter_777" - shape = [96, 64, 3, 3] - dtype = "float32" - min_val = float("-16.205") - max_val = float("9.48416") - mean = float("-0.195033") - std = float("1.32428") - data = None - - -class Program_weight_tensor_parameter_778: - name = "parameter_778" - shape = [64] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_779: - name = "parameter_779" - shape = [64] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_780: - name = "parameter_780" - shape = [64] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_781: - name = "parameter_781" - shape = [64] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_782: - name = "parameter_782" - shape = [64, 32, 3, 3] - dtype = "float32" - min_val = float("-39.064") - max_val = float("47.84") - mean = float("0.203101") - std = float("4.43152") - data = None - - -class Program_weight_tensor_parameter_783: - name = "parameter_783" - shape = [32] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_784: - name = "parameter_784" - shape = [32] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_785: - name = "parameter_785" - shape = [32] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_786: - name = "parameter_786" - shape = [32] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_787: - name = "parameter_787" - shape = [32, 32, 3, 3] - dtype = "float32" - min_val = float("-18.7778") - max_val = float("17.8983") - mean = float("-0.451057") - std = float("2.25346") - data = None - - -class Program_weight_tensor_parameter_788: - name = "parameter_788" - shape = [32] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_789: - name = "parameter_789" - shape = [32] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_790: - name = "parameter_790" - shape = [32] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_791: - name = "parameter_791" - shape = [32] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_792: - name = "parameter_792" - shape = [32, 3, 3, 3] - dtype = "float32" - min_val = float("-8.84763") - max_val = float("9.81836") - mean = float("-0.13036") - std = float("2.17947") - data = None diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/subgraph_13/input_meta.py b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-S/subgraph_11/shape_patches_PP-YOLOE_plus_SOD-L/input_meta.py similarity index 100% rename from paddle_samples/PaddleX/PP-YOLOE_plus_SOD-L/subgraph_13/input_meta.py rename to paddle_samples/PaddleX/PP-YOLOE_plus_SOD-S/subgraph_11/shape_patches_PP-YOLOE_plus_SOD-L/input_meta.py diff --git a/paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_16/weight_meta.py b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-S/subgraph_11/shape_patches_PP-YOLOE_plus_SOD-L/weight_meta.py similarity index 100% rename from paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_16/weight_meta.py rename to paddle_samples/PaddleX/PP-YOLOE_plus_SOD-S/subgraph_11/shape_patches_PP-YOLOE_plus_SOD-L/weight_meta.py diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-S/subgraph_11/shape_patches_PP-YOLOE_plus_SOD-largesize-L/input_meta.py b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-S/subgraph_11/shape_patches_PP-YOLOE_plus_SOD-largesize-L/input_meta.py new file mode 100644 index 000000000..d17aba14e --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-S/subgraph_11/shape_patches_PP-YOLOE_plus_SOD-largesize-L/input_meta.py @@ -0,0 +1,28 @@ +class Program_weight_tensor_data_0: + name = "data_0" + shape = [1, 48384, 10] + dtype = "float32" + min_val = float("1.08574e-08") + max_val = float("0.85674") + mean = float("0.00225546") + std = float("0.010105") + data = None + + +class Program_weight_tensor_data_1: + name = "data_1" + shape = [1, 48384] + dtype = "int32" + min_val = 0 + max_val = 10 + data = None + + +class Program_weight_tensor_data_2: + name = "data_2" + shape = [1, 48384, 10] + dtype = "float32" + max_val = float("0.949472") + mean = float("0.000146427") + std = float("0.0089493") + data = None diff --git a/paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_6/weight_meta.py b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-S/subgraph_11/shape_patches_PP-YOLOE_plus_SOD-largesize-L/weight_meta.py similarity index 100% rename from paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_6/weight_meta.py rename to paddle_samples/PaddleX/PP-YOLOE_plus_SOD-S/subgraph_11/shape_patches_PP-YOLOE_plus_SOD-largesize-L/weight_meta.py diff --git a/paddle_samples/PaddleX/PP-YOLOE-L_vehicle/subgraph_14/input_meta.py b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-S/subgraph_12/shape_patches_PP-YOLOE_plus_SOD-L/input_meta.py similarity index 64% rename from paddle_samples/PaddleX/PP-YOLOE-L_vehicle/subgraph_14/input_meta.py rename to paddle_samples/PaddleX/PP-YOLOE_plus_SOD-S/subgraph_12/shape_patches_PP-YOLOE_plus_SOD-L/input_meta.py index d1d51e8d6..6041bc113 100644 --- a/paddle_samples/PaddleX/PP-YOLOE-L_vehicle/subgraph_14/input_meta.py +++ b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-S/subgraph_12/shape_patches_PP-YOLOE_plus_SOD-L/input_meta.py @@ -11,10 +11,10 @@ class Program_weight_tensor_data_1: name = "data_1" shape = [2, 3549, 4] dtype = "float32" - min_val = float("-8.35609") - max_val = float("59.0717") - mean = float("22.5856") - std = float("15.7449") + min_val = float("-3.52679") + max_val = float("54.3718") + mean = float("22.566") + std = float("15.0427") data = None @@ -22,19 +22,19 @@ class Program_weight_tensor_data_2: name = "data_2" shape = [2, 3549, 4] dtype = "float32" - max_val = float("42.0071") - mean = float("21.2277") - std = float("13.4889") + max_val = float("51.0602") + mean = float("19.8807") + std = float("16.6824") data = None class Program_weight_tensor_data_3: name = "data_3" - shape = [2, 3549, 4] + shape = [2, 3549, 10] dtype = "float32" - max_val = float("0.923099") - mean = float("0.00145319") - std = float("0.0330365") + max_val = float("0.980323") + mean = float("0.000730272") + std = float("0.020635") data = None @@ -42,17 +42,17 @@ class Program_weight_tensor_data_4: name = "data_4" shape = [] dtype = "float32" - data = [41.2588] + data = [51.8347] class Program_weight_tensor_data_5: name = "data_5" - shape = [2, 3549, 68] + shape = [2, 3549, 40] dtype = "float32" - min_val = float("-3.91208") - max_val = float("10.707") - mean = float("3.11295e-05") - std = float("1.44354") + min_val = float("-11.2006") + max_val = float("19.6674") + mean = float("0.798417") + std = float("2.05193") data = None diff --git a/paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_8/weight_meta.py b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-S/subgraph_12/shape_patches_PP-YOLOE_plus_SOD-L/weight_meta.py similarity index 100% rename from paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_8/weight_meta.py rename to paddle_samples/PaddleX/PP-YOLOE_plus_SOD-S/subgraph_12/shape_patches_PP-YOLOE_plus_SOD-L/weight_meta.py diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-S/subgraph_17/graph_hash.txt b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-S/subgraph_17/graph_hash.txt deleted file mode 100644 index 896fa94fd..000000000 --- a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-S/subgraph_17/graph_hash.txt +++ /dev/null @@ -1 +0,0 @@ -66b8a266cc256b5299b432a3f1cf5582a5f9a5321ba1505da3c19b3bc24f5624 \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-S/subgraph_17/graph_net.json b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-S/subgraph_17/graph_net.json deleted file mode 100644 index 6ce3cf9a5..000000000 --- a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-S/subgraph_17/graph_net.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "framework": "paddle", - "model_name": "PP-YOLOE_plus_SOD-S", - "num_devices_required": 1, - "num_nodes_required": 1 -} \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-S/subgraph_17/model.py b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-S/subgraph_17/model.py deleted file mode 100644 index 5d4c5e86c..000000000 --- a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-S/subgraph_17/model.py +++ /dev/null @@ -1,94 +0,0 @@ -import paddle - - -class GraphModule(paddle.nn.Layer): - def __init__(self): - super().__init__() - - def forward(self, data_0, data_1, data_2, data_3): - # pd_op.full: (1xi32) <- () - full_0 = paddle._C_ops.full( - [1], float("2"), paddle.int32, paddle.core.CPUPlace() - ) - - # pd_op.split_with_num: ([1x8400x2xf32, 1x8400x2xf32]) <- (1x8400x4xf32, 1xi32) - split_with_num_0 = paddle._C_ops.split_with_num(data_0, 2, full_0) - del data_0, full_0 - - # builtin.split: (1x8400x2xf32, 1x8400x2xf32) <- ([1x8400x2xf32, 1x8400x2xf32]) - ( - split_0, - split_1, - ) = split_with_num_0 - del split_with_num_0 - - # pd_op.full: (1xf32) <- () - full_1 = paddle._C_ops.full( - [1], float("-1"), paddle.float32, paddle.core.CPUPlace() - ) - - # pd_op.scale: (1x8400x2xf32) <- (1x8400x2xf32, 1xf32) - scale_0 = paddle._C_ops.scale(split_0, full_1, float("0"), True) - del full_1, split_0 - - # pd_op.add: (1x8400x2xf32) <- (1x8400x2xf32, 8400x2xf32) - add_0 = paddle._C_ops.add(scale_0, data_1) - del scale_0 - - # pd_op.add: (1x8400x2xf32) <- (1x8400x2xf32, 8400x2xf32) - add_1 = paddle._C_ops.add(split_1, data_1) - del data_1, split_1 - - # pd_op.full: (1xi32) <- () - full_2 = paddle._C_ops.full( - [1], float("-1"), paddle.int32, paddle.core.CPUPlace() - ) - - # builtin.combine: ([1x8400x2xf32, 1x8400x2xf32]) <- (1x8400x2xf32, 1x8400x2xf32) - combine_0 = [add_0, add_1] - del add_0, add_1 - - # pd_op.concat: (1x8400x4xf32) <- ([1x8400x2xf32, 1x8400x2xf32], 1xi32) - concat_0 = paddle._C_ops.concat(combine_0, full_2) - del combine_0 - - # pd_op.multiply: (1x8400x4xf32) <- (1x8400x4xf32, 8400x1xf32) - multiply_0 = paddle._C_ops.multiply(concat_0, data_2) - del concat_0, data_2 - - # pd_op.full: (1xi32) <- () - full_3 = paddle._C_ops.full( - [1], float("1"), paddle.int32, paddle.core.CPUPlace() - ) - - # pd_op.split_with_num: ([1x1xf32, 1x1xf32]) <- (1x2xf32, 1xi32) - split_with_num_1 = paddle._C_ops.split_with_num(data_3, 2, full_3) - del data_3, full_3 - - # builtin.split: (1x1xf32, 1x1xf32) <- ([1x1xf32, 1x1xf32]) - ( - split_2, - split_3, - ) = split_with_num_1 - del split_with_num_1 - - # builtin.combine: ([1x1xf32, 1x1xf32, 1x1xf32, 1x1xf32]) <- (1x1xf32, 1x1xf32, 1x1xf32, 1x1xf32) - combine_1 = [split_3, split_2, split_3, split_2] - del split_2, split_3 - - # pd_op.concat: (1x4xf32) <- ([1x1xf32, 1x1xf32, 1x1xf32, 1x1xf32], 1xi32) - concat_1 = paddle._C_ops.concat(combine_1, full_2) - del combine_1, full_2 - - # pd_op.full_int_array: (3xi64) <- () - full_int_array_0 = [-1, 1, 4] - - # pd_op.reshape: (1x1x4xf32) <- (1x4xf32, 3xi64) - reshape_0 = paddle._C_ops.reshape(concat_1, full_int_array_0) - del concat_1, full_int_array_0 - - # pd_op.divide: (1x8400x4xf32) <- (1x8400x4xf32, 1x1x4xf32) - divide_0 = paddle._C_ops.divide(multiply_0, reshape_0) - del multiply_0, reshape_0 - - return divide_0 diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-S/subgraph_17/weight_meta.py b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-S/subgraph_17/weight_meta.py deleted file mode 100644 index 8b1378917..000000000 --- a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-S/subgraph_17/weight_meta.py +++ /dev/null @@ -1 +0,0 @@ - diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-S/subgraph_18/graph_hash.txt b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-S/subgraph_18/graph_hash.txt deleted file mode 100644 index bfa76b594..000000000 --- a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-S/subgraph_18/graph_hash.txt +++ /dev/null @@ -1 +0,0 @@ -fa6eb9b0757bd8e932b3ff14c3ba9c809889599d2cfbb8e5c4dc486ee670a158 \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-S/subgraph_18/graph_net.json b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-S/subgraph_18/graph_net.json deleted file mode 100644 index 6ce3cf9a5..000000000 --- a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-S/subgraph_18/graph_net.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "framework": "paddle", - "model_name": "PP-YOLOE_plus_SOD-S", - "num_devices_required": 1, - "num_nodes_required": 1 -} \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-S/subgraph_18/input_meta.py b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-S/subgraph_18/input_meta.py deleted file mode 100644 index b1ee32a68..000000000 --- a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-S/subgraph_18/input_meta.py +++ /dev/null @@ -1,138 +0,0 @@ -class Program_weight_tensor_data_0: - name = "data_0" - shape = [] - dtype = "int64" - data = [8] - - -class Program_weight_tensor_data_1: - name = "data_1" - shape = [] - dtype = "int64" - data = [4725] - - -class Program_weight_tensor_data_2: - name = "data_2" - shape = [2, 8, 4725] - dtype = "float32" - max_val = float("1.0") - mean = float("0.00149471") - std = float("0.0386326") - data = None - - -class Program_weight_tensor_data_3: - name = "data_3" - shape = [2, 1] - dtype = "int32" - data = [0, 1] - - -class Program_weight_tensor_data_4: - name = "data_4" - shape = [2, 8, 1] - dtype = "int32" - data = [3, 4, 8, 0, 0, 0, 0, 0, 3, 3, 3, 8, 8, 0, 5, 5] - - -class Program_weight_tensor_data_5: - name = "data_5" - shape = [2, 4725] - dtype = "float32" - max_val = float("1.0") - mean = float("0.0119577") - std = float("0.108695") - data = None - - -class Program_weight_tensor_data_6: - name = "data_6" - shape = [2, 8, 4] - dtype = "float32" - data = [ - 10.4673, - 88.2581, - 29.9065, - 109.935, - 86.729, - 34.0645, - 116.636, - 55.7419, - 231.776, - 374.71, - 480.0, - 453.677, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 24.5839, - 49.1873, - 36.2612, - 100.071, - 50.3969, - 84.8057, - 63.3035, - 147.562, - 25.8131, - 408.763, - 46.0947, - 480.0, - 65.1472, - 242.544, - 89.1165, - 480.0, - 296.85, - 351.095, - 326.351, - 478.304, - 280.256, - 120.424, - 283.944, - 150.954, - 251.985, - 61.0601, - 275.954, - 169.611, - 236.62, - 0.0, - 263.047, - 79.7173, - ] - - -class Program_weight_tensor_data_7: - name = "data_7" - shape = [2, 8, 4725] - dtype = "float32" - max_val = float("0.049982") - mean = float("1.7069e-06") - std = float("0.00021642") - data = None - - -class Program_weight_tensor_data_8: - name = "data_8" - shape = [2, 8, 4725] - dtype = "float32" - max_val = float("0.886292") - mean = float("0.0026412") - std = float("0.0230532") - data = None diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-S/subgraph_18/model.py b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-S/subgraph_18/model.py deleted file mode 100644 index 4307988eb..000000000 --- a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-S/subgraph_18/model.py +++ /dev/null @@ -1,212 +0,0 @@ -import paddle - - -class GraphModule(paddle.nn.Layer): - def __init__(self): - super().__init__() - - def forward( - self, data_0, data_1, data_2, data_3, data_4, data_5, data_6, data_7, data_8 - ): - # pd_op.full: (1xi64) <- () - full_0 = paddle._C_ops.full( - [1], float("-2"), paddle.int64, paddle.core.CPUPlace() - ) - - # pd_op.argmax: (2x-1xi64) <- (2x-1x-1xf32, 1xi64) - argmax_0 = paddle._C_ops.argmax(data_2, full_0, False, False, paddle.int64) - del full_0 - - # pd_op.cast: (xi32) <- (xi64) - cast_0 = paddle._C_ops.cast(data_0, paddle.int32) - del data_0 - - # pd_op.multiply: (2x1xi32) <- (2x1xi32, xi32) - multiply_1 = paddle._C_ops.multiply(data_3, cast_0) - del cast_0, data_3 - - # pd_op.cast: (2x1xi64) <- (2x1xi32) - cast_1 = paddle._C_ops.cast(multiply_1, paddle.int64) - del multiply_1 - - # pd_op.add: (2x-1xi64) <- (2x-1xi64, 2x1xi64) - add_0 = paddle._C_ops.add(argmax_0, cast_1) - del argmax_0, cast_1 - - # pd_op.flatten: (-1xi32) <- (2x-1x1xi32) - flatten_0 = paddle._C_ops.flatten(data_4, 0, 2) - del data_4 - - # pd_op.flatten: (-1xi64) <- (2x-1xi64) - flatten_1 = paddle._C_ops.flatten(add_0, 0, 1) - del add_0 - - # pd_op.full: (1xi32) <- () - full_1 = paddle._C_ops.full( - [1], float("0"), paddle.int32, paddle.core.CPUPlace() - ) - - # pd_op.gather: (-1xi32) <- (-1xi32, -1xi64, 1xi32) - gather_0 = paddle._C_ops.gather(flatten_0, flatten_1, full_1) - del flatten_0 - - # pd_op.full: (xi64) <- () - full_2 = paddle._C_ops.full( - [], float("2"), paddle.int64, paddle.core.CPUPlace() - ) - - # builtin.combine: ([xi64, xi64]) <- (xi64, xi64) - combine_0 = [full_2, data_1] - - # pd_op.stack: (2xi64) <- ([xi64, xi64]) - stack_0 = paddle._C_ops.stack(combine_0, 0) - del combine_0 - - # pd_op.reshape: (2x-1xi32) <- (-1xi32, 2xi64) - reshape_1 = paddle._C_ops.reshape(gather_0, stack_0) - del gather_0, stack_0 - - # pd_op.full: (xf32) <- () - full_3 = paddle._C_ops.full( - [], float("0"), paddle.float32, paddle.framework._current_expected_place() - ) - - # pd_op.greater_than: (2x-1xb) <- (2x-1xf32, xf32) - greater_than_0 = paddle._C_ops.greater_than(data_5, full_3) - del data_5, full_3 - - # pd_op.full: (1xf32) <- () - full_4 = paddle._C_ops.full( - [1], float("10"), paddle.float32, paddle.core.CPUPlace() - ) - - # pd_op.full_like: (2x-1xi32) <- (2x-1xi32, 1xf32) - full_like_0 = paddle._C_ops.full_like( - reshape_1, full_4, paddle.int32, paddle.framework._current_expected_place() - ) - del full_4 - - # pd_op.where: (2x-1xi32) <- (2x-1xb, 2x-1xi32, 2x-1xi32) - where_0 = paddle._C_ops.where(greater_than_0, reshape_1, full_like_0) - del full_like_0, greater_than_0, reshape_1 - - # pd_op.full_int_array: (2xi64) <- () - full_int_array_0 = [-1, 4] - - # pd_op.reshape: (-1x4xf32) <- (2x-1x4xf32, 2xi64) - reshape_2 = paddle._C_ops.reshape(data_6, full_int_array_0) - del data_6, full_int_array_0 - - # pd_op.gather: (-1x4xf32) <- (-1x4xf32, -1xi64, 1xi32) - gather_1 = paddle._C_ops.gather(reshape_2, flatten_1, full_1) - del flatten_1, full_1, reshape_2 - - # pd_op.full: (xi64) <- () - full_5 = paddle._C_ops.full( - [], float("4"), paddle.int64, paddle.core.CPUPlace() - ) - - # builtin.combine: ([xi64, xi64, xi64]) <- (xi64, xi64, xi64) - combine_1 = [full_2, data_1, full_5] - del data_1, full_2, full_5 - - # pd_op.stack: (3xi64) <- ([xi64, xi64, xi64]) - stack_1 = paddle._C_ops.stack(combine_1, 0) - del combine_1 - - # pd_op.reshape: (2x-1x4xf32) <- (-1x4xf32, 3xi64) - reshape_0 = paddle._C_ops.reshape(gather_1, stack_1) - del gather_1, stack_1 - - # pd_op.full: (1xi32) <- () - full_6 = paddle._C_ops.full( - [1], float("11"), paddle.int32, paddle.core.CPUPlace() - ) - - # pd_op.one_hot: (2x-1x11xf32) <- (2x-1xi32, 1xi32) - one_hot_0 = paddle._C_ops.one_hot( - where_0 % paddle.cast(full_6, where_0.dtype), full_6 - ) - del full_6 - - # pd_op.full: (10xi64) <- () - full_7 = paddle._C_ops.full( - [10], float("0"), paddle.int64, paddle.framework._current_expected_place() - ) - - # pd_op.assign_value_: (10xi64) <- (10xi64) - assign_value__0 = paddle._C_ops.assign_value_( - full_7, - [10], - paddle.int64, - [ - float("0"), - float("1"), - float("2"), - float("3"), - float("4"), - float("5"), - float("6"), - float("7"), - float("8"), - float("9"), - ], - paddle.framework._current_expected_place(), - ) - del full_7 - - # pd_op.index_select: (2x-1x10xf32) <- (2x-1x11xf32, 10xi64) - index_select_0 = paddle._C_ops.index_select(one_hot_0, assign_value__0, -1) - del assign_value__0, one_hot_0 - - # pd_op.multiply: (2x-1x-1xf32) <- (2x-1x-1xf32, 2x-1x-1xf32) - multiply_2 = paddle._C_ops.multiply(data_7, data_2) - del data_7 - - # pd_op.full_int_array: (1xi64) <- () - full_int_array_1 = [-1] - - # pd_op.max: (2x-1x1xf32) <- (2x-1x-1xf32, 1xi64) - max_0 = paddle._C_ops.max(multiply_2, full_int_array_1, True) - - # pd_op.multiply: (2x-1x-1xf32) <- (2x-1x-1xf32, 2x-1x-1xf32) - multiply_3 = paddle._C_ops.multiply(data_8, data_2) - del data_2, data_8 - - # pd_op.max: (2x-1x1xf32) <- (2x-1x-1xf32, 1xi64) - max_1 = paddle._C_ops.max(multiply_3, full_int_array_1, True) - del multiply_3 - - # pd_op.full: (1xf32) <- () - full_8 = paddle._C_ops.full( - [1], float("1"), paddle.float32, paddle.core.CPUPlace() - ) - - # pd_op.scale: (2x-1x1xf32) <- (2x-1x1xf32, 1xf32) - scale_0 = paddle._C_ops.scale(max_0, full_8, float("1e-09"), True) - del full_8, max_0 - - # pd_op.divide: (2x-1x-1xf32) <- (2x-1x-1xf32, 2x-1x1xf32) - divide_0 = paddle._C_ops.divide(multiply_2, scale_0) - del multiply_2, scale_0 - - # pd_op.multiply: (2x-1x-1xf32) <- (2x-1x-1xf32, 2x-1x1xf32) - multiply_4 = paddle._C_ops.multiply(divide_0, max_1) - del divide_0, max_1 - - # pd_op.full_int_array: (1xi64) <- () - full_int_array_2 = [-2] - - # pd_op.max: (2x-1xf32) <- (2x-1x-1xf32, 1xi64) - max_2 = paddle._C_ops.max(multiply_4, full_int_array_2, False) - del full_int_array_2, multiply_4 - - # pd_op.unsqueeze: (2x-1x1xf32) <- (2x-1xf32, 1xi64) - unsqueeze_0 = paddle._C_ops.unsqueeze(max_2, full_int_array_1) - del full_int_array_1, max_2 - - # pd_op.multiply: (2x-1x10xf32) <- (2x-1x10xf32, 2x-1x1xf32) - multiply_0 = paddle._C_ops.multiply(index_select_0, unsqueeze_0) - del index_select_0, unsqueeze_0, where_0 - - return reshape_0, multiply_0 diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-S/subgraph_18/weight_meta.py b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-S/subgraph_18/weight_meta.py deleted file mode 100644 index 8b1378917..000000000 --- a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-S/subgraph_18/weight_meta.py +++ /dev/null @@ -1 +0,0 @@ - diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_10/input_meta.py b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-S/subgraph_2/shape_patches_PP-YOLOE_plus_SOD-L/input_meta.py similarity index 54% rename from paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_10/input_meta.py rename to paddle_samples/PaddleX/PP-YOLOE_plus_SOD-S/subgraph_2/shape_patches_PP-YOLOE_plus_SOD-L/input_meta.py index aba4e15ed..b3f3bee9e 100644 --- a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_10/input_meta.py +++ b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-S/subgraph_2/shape_patches_PP-YOLOE_plus_SOD-L/input_meta.py @@ -1,39 +1,39 @@ class Program_weight_tensor_data_0: name = "data_0" - shape = [1, 48384, 10] + shape = [2, 12096, 10] dtype = "float32" - min_val = float("1.08574e-08") - max_val = float("0.85674") - mean = float("0.00225546") - std = float("0.010105") + min_val = float("5.85845e-11") + max_val = float("0.891047") + mean = float("0.00620757") + std = float("0.0222101") data = None class Program_weight_tensor_data_1: name = "data_1" - shape = [1, 48384, 88] + shape = [2, 12096, 40] dtype = "float32" - min_val = float("-3.34129") - max_val = float("13.1745") - mean = float("0.828078") - std = float("1.50377") + min_val = float("-15.4735") + max_val = float("25.7451") + mean = float("0.798417") + std = float("2.11877") data = None class Program_weight_tensor_data_2: name = "data_2" - shape = [48384, 2] + shape = [12096, 2] dtype = "float32" min_val = float("4.0") - max_val = float("1532.0") - mean = float("768.0") - std = float("443.391") + max_val = float("764.0") + mean = float("384.0") + std = float("221.675") data = None class Program_weight_tensor_data_3: name = "data_3" - shape = [48384, 1] + shape = [12096, 1] dtype = "float32" min_val = float("8.0") max_val = float("32.0") diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus-M/subgraph_7/weight_meta.py b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-S/subgraph_2/shape_patches_PP-YOLOE_plus_SOD-L/weight_meta.py similarity index 85% rename from paddle_samples/PaddleX/PP-YOLOE_plus-M/subgraph_7/weight_meta.py rename to paddle_samples/PaddleX/PP-YOLOE_plus_SOD-S/subgraph_2/shape_patches_PP-YOLOE_plus_SOD-L/weight_meta.py index 28198680e..88fef0bea 100644 --- a/paddle_samples/PaddleX/PP-YOLOE_plus-M/subgraph_7/weight_meta.py +++ b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-S/subgraph_2/shape_patches_PP-YOLOE_plus_SOD-L/weight_meta.py @@ -1,6 +1,6 @@ class Program_weight_tensor_parameter_0: name = "parameter_0" - shape = [1, 17, 1, 1] + shape = [1, 10, 1, 1] dtype = "float32" min_val = float("0") max_val = float("0.5") diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-S/subgraph_5/graph_hash.txt b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-S/subgraph_5/graph_hash.txt index cf9cecf24..bfa76b594 100644 --- a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-S/subgraph_5/graph_hash.txt +++ b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-S/subgraph_5/graph_hash.txt @@ -1 +1 @@ -b570e084ecf7776a98a81e353b5f581d19f9ab243969ebfb5988dfda43a8a50f \ No newline at end of file +fa6eb9b0757bd8e932b3ff14c3ba9c809889599d2cfbb8e5c4dc486ee670a158 \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-S/subgraph_5/input_meta.py b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-S/subgraph_5/input_meta.py index 84df7ef74..b1ee32a68 100644 --- a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-S/subgraph_5/input_meta.py +++ b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-S/subgraph_5/input_meta.py @@ -2,72 +2,137 @@ class Program_weight_tensor_data_0: name = "data_0" shape = [] dtype = "int64" - data = [15] + data = [8] class Program_weight_tensor_data_1: name = "data_1" shape = [] dtype = "int64" - data = [15] + data = [4725] class Program_weight_tensor_data_2: name = "data_2" - shape = [] - dtype = "int64" - data = [30] + shape = [2, 8, 4725] + dtype = "float32" + max_val = float("1.0") + mean = float("0.00149471") + std = float("0.0386326") + data = None class Program_weight_tensor_data_3: name = "data_3" - shape = [] - dtype = "int64" - data = [30] + shape = [2, 1] + dtype = "int32" + data = [0, 1] class Program_weight_tensor_data_4: name = "data_4" - shape = [] - dtype = "int64" - data = [60] + shape = [2, 8, 1] + dtype = "int32" + data = [3, 4, 8, 0, 0, 0, 0, 0, 3, 3, 3, 8, 8, 0, 5, 5] class Program_weight_tensor_data_5: name = "data_5" - shape = [] - dtype = "int64" - data = [60] + shape = [2, 4725] + dtype = "float32" + max_val = float("1.0") + mean = float("0.0119577") + std = float("0.108695") + data = None class Program_weight_tensor_data_6: name = "data_6" - shape = [2, 384, 15, 15] + shape = [2, 8, 4] dtype = "float32" - min_val = float("-0.278465") - max_val = float("6.73963") - mean = float("0.347025") - std = float("0.708791") - data = None + data = [ + 10.4673, + 88.2581, + 29.9065, + 109.935, + 86.729, + 34.0645, + 116.636, + 55.7419, + 231.776, + 374.71, + 480.0, + 453.677, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 24.5839, + 49.1873, + 36.2612, + 100.071, + 50.3969, + 84.8057, + 63.3035, + 147.562, + 25.8131, + 408.763, + 46.0947, + 480.0, + 65.1472, + 242.544, + 89.1165, + 480.0, + 296.85, + 351.095, + 326.351, + 478.304, + 280.256, + 120.424, + 283.944, + 150.954, + 251.985, + 61.0601, + 275.954, + 169.611, + 236.62, + 0.0, + 263.047, + 79.7173, + ] class Program_weight_tensor_data_7: name = "data_7" - shape = [2, 192, 30, 30] + shape = [2, 8, 4725] dtype = "float32" - min_val = float("-0.278465") - max_val = float("10.0545") - mean = float("0.469318") - std = float("0.798413") + max_val = float("0.049982") + mean = float("1.7069e-06") + std = float("0.00021642") data = None class Program_weight_tensor_data_8: name = "data_8" - shape = [2, 96, 60, 60] + shape = [2, 8, 4725] dtype = "float32" - min_val = float("-0.278465") - max_val = float("14.5401") - mean = float("0.60308") - std = float("0.85045") + max_val = float("0.886292") + mean = float("0.0026412") + std = float("0.0230532") data = None diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-S/subgraph_5/model.py b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-S/subgraph_5/model.py index 7cfbed716..4307988eb 100644 --- a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-S/subgraph_5/model.py +++ b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-S/subgraph_5/model.py @@ -6,1139 +6,207 @@ def __init__(self): super().__init__() def forward( - self, - parameter_0, - parameter_1, - parameter_2, - parameter_3, - parameter_4, - parameter_5, - parameter_6, - parameter_7, - parameter_8, - parameter_9, - parameter_10, - parameter_11, - parameter_12, - parameter_13, - parameter_14, - parameter_15, - parameter_16, - parameter_17, - parameter_18, - parameter_19, - parameter_20, - parameter_21, - parameter_22, - parameter_23, - parameter_24, - parameter_25, - parameter_26, - parameter_27, - parameter_28, - parameter_29, - parameter_30, - parameter_31, - parameter_32, - parameter_33, - parameter_34, - parameter_35, - parameter_36, - parameter_37, - parameter_38, - parameter_39, - parameter_40, - parameter_41, - parameter_42, - parameter_43, - parameter_44, - parameter_45, - parameter_46, - parameter_47, - parameter_48, - parameter_49, - parameter_50, - parameter_51, - parameter_52, - parameter_53, - data_0, - data_1, - data_2, - data_3, - data_4, - data_5, - data_6, - data_7, - data_8, + self, data_0, data_1, data_2, data_3, data_4, data_5, data_6, data_7, data_8 ): # pd_op.full: (1xi64) <- () full_0 = paddle._C_ops.full( - [1], float("0"), paddle.int64, paddle.core.CPUPlace() + [1], float("-2"), paddle.int64, paddle.core.CPUPlace() ) - # pd_op.full: (1xi64) <- () - full_1 = paddle._C_ops.full( - [1], float("1"), paddle.int64, paddle.core.CPUPlace() - ) - - # pd_op.arange: (-1xi64) <- (1xi64, xi64, 1xi64) - arange_0 = paddle.arange(full_0, data_1, full_1, dtype="int64") - del data_1 - - # pd_op.cast: (-1xf32) <- (-1xi64) - cast_0 = paddle._C_ops.cast(arange_0, paddle.float32) - del arange_0 - - # pd_op.full: (1xf32) <- () - full_2 = paddle._C_ops.full( - [1], float("1"), paddle.float32, paddle.core.CPUPlace() - ) - - # pd_op.scale: (-1xf32) <- (-1xf32, 1xf32) - scale_0 = paddle._C_ops.scale(cast_0, full_2, float("0.5"), True) - del cast_0 - - # pd_op.full: (1xf32) <- () - full_3 = paddle._C_ops.full( - [1], float("32"), paddle.float32, paddle.core.CPUPlace() - ) + # pd_op.argmax: (2x-1xi64) <- (2x-1x-1xf32, 1xi64) + argmax_0 = paddle._C_ops.argmax(data_2, full_0, False, False, paddle.int64) + del full_0 - # pd_op.scale: (-1xf32) <- (-1xf32, 1xf32) - scale_1 = paddle._C_ops.scale(scale_0, full_3, float("0"), True) - del scale_0 - - # pd_op.arange: (-1xi64) <- (1xi64, xi64, 1xi64) - arange_1 = paddle.arange(full_0, data_0, full_1, dtype="int64") + # pd_op.cast: (xi32) <- (xi64) + cast_0 = paddle._C_ops.cast(data_0, paddle.int32) del data_0 - # pd_op.cast: (-1xf32) <- (-1xi64) - cast_1 = paddle._C_ops.cast(arange_1, paddle.float32) - del arange_1 - - # pd_op.scale: (-1xf32) <- (-1xf32, 1xf32) - scale_2 = paddle._C_ops.scale(cast_1, full_2, float("0.5"), True) - del cast_1 - - # pd_op.scale: (-1xf32) <- (-1xf32, 1xf32) - scale_3 = paddle._C_ops.scale(scale_2, full_3, float("0"), True) - del scale_2 - - # builtin.combine: ([-1xf32, -1xf32]) <- (-1xf32, -1xf32) - combine_0 = [scale_3, scale_1] - del scale_1, scale_3 - - # pd_op.meshgrid: ([-1x-1xf32, -1x-1xf32]) <- ([-1xf32, -1xf32]) - meshgrid_0 = paddle._C_ops.meshgrid(combine_0) - del combine_0 - - # builtin.split: (-1x-1xf32, -1x-1xf32) <- ([-1x-1xf32, -1x-1xf32]) - ( - split_0, - split_1, - ) = meshgrid_0 - del meshgrid_0 - - # pd_op.scale: (-1x-1xf32) <- (-1x-1xf32, 1xf32) - scale_4 = paddle._C_ops.scale(split_1, full_2, float("-80"), True) - - # pd_op.scale: (-1x-1xf32) <- (-1x-1xf32, 1xf32) - scale_5 = paddle._C_ops.scale(split_0, full_2, float("-80"), True) - - # pd_op.scale: (-1x-1xf32) <- (-1x-1xf32, 1xf32) - scale_6 = paddle._C_ops.scale(split_1, full_2, float("80"), True) - - # pd_op.scale: (-1x-1xf32) <- (-1x-1xf32, 1xf32) - scale_7 = paddle._C_ops.scale(split_0, full_2, float("80"), True) - - # builtin.combine: ([-1x-1xf32, -1x-1xf32, -1x-1xf32, -1x-1xf32]) <- (-1x-1xf32, -1x-1xf32, -1x-1xf32, -1x-1xf32) - combine_1 = [scale_4, scale_5, scale_6, scale_7] - del scale_4, scale_5, scale_6, scale_7 - - # pd_op.stack: (-1x-1x4xf32) <- ([-1x-1xf32, -1x-1xf32, -1x-1xf32, -1x-1xf32]) - stack_0 = paddle._C_ops.stack(combine_1, -1) - del combine_1 - - # builtin.combine: ([-1x-1xf32, -1x-1xf32]) <- (-1x-1xf32, -1x-1xf32) - combine_2 = [split_1, split_0] - del split_0, split_1 + # pd_op.multiply: (2x1xi32) <- (2x1xi32, xi32) + multiply_1 = paddle._C_ops.multiply(data_3, cast_0) + del cast_0, data_3 - # pd_op.stack: (-1x-1x2xf32) <- ([-1x-1xf32, -1x-1xf32]) - stack_1 = paddle._C_ops.stack(combine_2, -1) - del combine_2 + # pd_op.cast: (2x1xi64) <- (2x1xi32) + cast_1 = paddle._C_ops.cast(multiply_1, paddle.int64) + del multiply_1 - # pd_op.full_int_array: (2xi64) <- () - full_int_array_0 = [-1, 4] - - # pd_op.reshape: (-1x4xf32) <- (-1x-1x4xf32, 2xi64) - reshape_0 = paddle._C_ops.reshape(stack_0, full_int_array_0) - del stack_0 - - # pd_op.full_int_array: (2xi64) <- () - full_int_array_1 = [-1, 2] - - # pd_op.reshape: (-1x2xf32) <- (-1x-1x2xf32, 2xi64) - reshape_1 = paddle._C_ops.reshape(stack_1, full_int_array_1) - del stack_1 + # pd_op.add: (2x-1xi64) <- (2x-1xi64, 2x1xi64) + add_0 = paddle._C_ops.add(argmax_0, cast_1) + del argmax_0, cast_1 - # pd_op.shape64: (2xi64) <- (-1x4xf32) - shape64_0 = paddle._C_ops.shape64(reshape_0) - - # pd_op.full_int_array: (1xi64) <- () - full_int_array_2 = [0] + # pd_op.flatten: (-1xi32) <- (2x-1x1xi32) + flatten_0 = paddle._C_ops.flatten(data_4, 0, 2) + del data_4 - # pd_op.full_int_array: (1xi64) <- () - full_int_array_3 = [1] + # pd_op.flatten: (-1xi64) <- (2x-1xi64) + flatten_1 = paddle._C_ops.flatten(add_0, 0, 1) + del add_0 - # pd_op.slice: (xi64) <- (2xi64, 1xi64, 1xi64) - slice_0 = paddle._C_ops.slice( - shape64_0, [0], full_int_array_2, full_int_array_3, [1], [0] + # pd_op.full: (1xi32) <- () + full_1 = paddle._C_ops.full( + [1], float("0"), paddle.int32, paddle.core.CPUPlace() ) - del shape64_0 + + # pd_op.gather: (-1xi32) <- (-1xi32, -1xi64, 1xi32) + gather_0 = paddle._C_ops.gather(flatten_0, flatten_1, full_1) + del flatten_0 # pd_op.full: (xi64) <- () - full_4 = paddle._C_ops.full( - [], float("1"), paddle.int64, paddle.core.CPUPlace() + full_2 = paddle._C_ops.full( + [], float("2"), paddle.int64, paddle.core.CPUPlace() ) # builtin.combine: ([xi64, xi64]) <- (xi64, xi64) - combine_3 = [slice_0, full_4] + combine_0 = [full_2, data_1] # pd_op.stack: (2xi64) <- ([xi64, xi64]) - stack_2 = paddle._C_ops.stack(combine_3, 0) - del combine_3 - - # pd_op.full_with_tensor: (-1x1xf32) <- (1xf32, 2xi64) - full_with_tensor_0 = paddle._C_ops.full_with_tensor( - full_3, stack_2, paddle.float32 - ) - del full_3, stack_2 - - # pd_op.arange: (-1xi64) <- (1xi64, xi64, 1xi64) - arange_2 = paddle.arange(full_0, data_3, full_1, dtype="int64") - del data_3 - - # pd_op.cast: (-1xf32) <- (-1xi64) - cast_2 = paddle._C_ops.cast(arange_2, paddle.float32) - del arange_2 - - # pd_op.scale: (-1xf32) <- (-1xf32, 1xf32) - scale_8 = paddle._C_ops.scale(cast_2, full_2, float("0.5"), True) - del cast_2 - - # pd_op.full: (1xf32) <- () - full_5 = paddle._C_ops.full( - [1], float("16"), paddle.float32, paddle.core.CPUPlace() - ) - - # pd_op.scale: (-1xf32) <- (-1xf32, 1xf32) - scale_9 = paddle._C_ops.scale(scale_8, full_5, float("0"), True) - del scale_8 - - # pd_op.arange: (-1xi64) <- (1xi64, xi64, 1xi64) - arange_3 = paddle.arange(full_0, data_2, full_1, dtype="int64") - del data_2 - - # pd_op.cast: (-1xf32) <- (-1xi64) - cast_3 = paddle._C_ops.cast(arange_3, paddle.float32) - del arange_3 - - # pd_op.scale: (-1xf32) <- (-1xf32, 1xf32) - scale_10 = paddle._C_ops.scale(cast_3, full_2, float("0.5"), True) - del cast_3 - - # pd_op.scale: (-1xf32) <- (-1xf32, 1xf32) - scale_11 = paddle._C_ops.scale(scale_10, full_5, float("0"), True) - del scale_10 - - # builtin.combine: ([-1xf32, -1xf32]) <- (-1xf32, -1xf32) - combine_4 = [scale_11, scale_9] - del scale_11, scale_9 - - # pd_op.meshgrid: ([-1x-1xf32, -1x-1xf32]) <- ([-1xf32, -1xf32]) - meshgrid_1 = paddle._C_ops.meshgrid(combine_4) - del combine_4 - - # builtin.split: (-1x-1xf32, -1x-1xf32) <- ([-1x-1xf32, -1x-1xf32]) - ( - split_2, - split_3, - ) = meshgrid_1 - del meshgrid_1 - - # pd_op.scale: (-1x-1xf32) <- (-1x-1xf32, 1xf32) - scale_12 = paddle._C_ops.scale(split_3, full_2, float("-40"), True) - - # pd_op.scale: (-1x-1xf32) <- (-1x-1xf32, 1xf32) - scale_13 = paddle._C_ops.scale(split_2, full_2, float("-40"), True) - - # pd_op.scale: (-1x-1xf32) <- (-1x-1xf32, 1xf32) - scale_14 = paddle._C_ops.scale(split_3, full_2, float("40"), True) - - # pd_op.scale: (-1x-1xf32) <- (-1x-1xf32, 1xf32) - scale_15 = paddle._C_ops.scale(split_2, full_2, float("40"), True) - - # builtin.combine: ([-1x-1xf32, -1x-1xf32, -1x-1xf32, -1x-1xf32]) <- (-1x-1xf32, -1x-1xf32, -1x-1xf32, -1x-1xf32) - combine_5 = [scale_12, scale_13, scale_14, scale_15] - del scale_12, scale_13, scale_14, scale_15 - - # pd_op.stack: (-1x-1x4xf32) <- ([-1x-1xf32, -1x-1xf32, -1x-1xf32, -1x-1xf32]) - stack_3 = paddle._C_ops.stack(combine_5, -1) - del combine_5 - - # builtin.combine: ([-1x-1xf32, -1x-1xf32]) <- (-1x-1xf32, -1x-1xf32) - combine_6 = [split_3, split_2] - del split_2, split_3 - - # pd_op.stack: (-1x-1x2xf32) <- ([-1x-1xf32, -1x-1xf32]) - stack_4 = paddle._C_ops.stack(combine_6, -1) - del combine_6 - - # pd_op.reshape: (-1x4xf32) <- (-1x-1x4xf32, 2xi64) - reshape_2 = paddle._C_ops.reshape(stack_3, full_int_array_0) - del stack_3 - - # pd_op.reshape: (-1x2xf32) <- (-1x-1x2xf32, 2xi64) - reshape_3 = paddle._C_ops.reshape(stack_4, full_int_array_1) - del stack_4 - - # pd_op.shape64: (2xi64) <- (-1x4xf32) - shape64_1 = paddle._C_ops.shape64(reshape_2) - - # pd_op.slice: (xi64) <- (2xi64, 1xi64, 1xi64) - slice_1 = paddle._C_ops.slice( - shape64_1, [0], full_int_array_2, full_int_array_3, [1], [0] - ) - del shape64_1 - - # builtin.combine: ([xi64, xi64]) <- (xi64, xi64) - combine_7 = [slice_1, full_4] + stack_0 = paddle._C_ops.stack(combine_0, 0) + del combine_0 - # pd_op.stack: (2xi64) <- ([xi64, xi64]) - stack_5 = paddle._C_ops.stack(combine_7, 0) - del combine_7 + # pd_op.reshape: (2x-1xi32) <- (-1xi32, 2xi64) + reshape_1 = paddle._C_ops.reshape(gather_0, stack_0) + del gather_0, stack_0 - # pd_op.full_with_tensor: (-1x1xf32) <- (1xf32, 2xi64) - full_with_tensor_1 = paddle._C_ops.full_with_tensor( - full_5, stack_5, paddle.float32 + # pd_op.full: (xf32) <- () + full_3 = paddle._C_ops.full( + [], float("0"), paddle.float32, paddle.framework._current_expected_place() ) - del full_5, stack_5 - - # pd_op.arange: (-1xi64) <- (1xi64, xi64, 1xi64) - arange_4 = paddle.arange(full_0, data_5, full_1, dtype="int64") - del data_5 - # pd_op.cast: (-1xf32) <- (-1xi64) - cast_4 = paddle._C_ops.cast(arange_4, paddle.float32) - del arange_4 - - # pd_op.scale: (-1xf32) <- (-1xf32, 1xf32) - scale_16 = paddle._C_ops.scale(cast_4, full_2, float("0.5"), True) - del cast_4 + # pd_op.greater_than: (2x-1xb) <- (2x-1xf32, xf32) + greater_than_0 = paddle._C_ops.greater_than(data_5, full_3) + del data_5, full_3 # pd_op.full: (1xf32) <- () - full_6 = paddle._C_ops.full( - [1], float("8"), paddle.float32, paddle.core.CPUPlace() + full_4 = paddle._C_ops.full( + [1], float("10"), paddle.float32, paddle.core.CPUPlace() ) - # pd_op.scale: (-1xf32) <- (-1xf32, 1xf32) - scale_17 = paddle._C_ops.scale(scale_16, full_6, float("0"), True) - del scale_16 - - # pd_op.arange: (-1xi64) <- (1xi64, xi64, 1xi64) - arange_5 = paddle.arange(full_0, data_4, full_1, dtype="int64") - del data_4, full_0, full_1 - - # pd_op.cast: (-1xf32) <- (-1xi64) - cast_5 = paddle._C_ops.cast(arange_5, paddle.float32) - del arange_5 - - # pd_op.scale: (-1xf32) <- (-1xf32, 1xf32) - scale_18 = paddle._C_ops.scale(cast_5, full_2, float("0.5"), True) - del cast_5 - - # pd_op.scale: (-1xf32) <- (-1xf32, 1xf32) - scale_19 = paddle._C_ops.scale(scale_18, full_6, float("0"), True) - del scale_18 - - # builtin.combine: ([-1xf32, -1xf32]) <- (-1xf32, -1xf32) - combine_8 = [scale_19, scale_17] - del scale_17, scale_19 - - # pd_op.meshgrid: ([-1x-1xf32, -1x-1xf32]) <- ([-1xf32, -1xf32]) - meshgrid_2 = paddle._C_ops.meshgrid(combine_8) - del combine_8 - - # builtin.split: (-1x-1xf32, -1x-1xf32) <- ([-1x-1xf32, -1x-1xf32]) - ( - split_4, - split_5, - ) = meshgrid_2 - del meshgrid_2 - - # pd_op.scale: (-1x-1xf32) <- (-1x-1xf32, 1xf32) - scale_20 = paddle._C_ops.scale(split_5, full_2, float("-20"), True) - - # pd_op.scale: (-1x-1xf32) <- (-1x-1xf32, 1xf32) - scale_21 = paddle._C_ops.scale(split_4, full_2, float("-20"), True) - - # pd_op.scale: (-1x-1xf32) <- (-1x-1xf32, 1xf32) - scale_22 = paddle._C_ops.scale(split_5, full_2, float("20"), True) - - # pd_op.scale: (-1x-1xf32) <- (-1x-1xf32, 1xf32) - scale_23 = paddle._C_ops.scale(split_4, full_2, float("20"), True) - del full_2 - - # builtin.combine: ([-1x-1xf32, -1x-1xf32, -1x-1xf32, -1x-1xf32]) <- (-1x-1xf32, -1x-1xf32, -1x-1xf32, -1x-1xf32) - combine_9 = [scale_20, scale_21, scale_22, scale_23] - del scale_20, scale_21, scale_22, scale_23 - - # pd_op.stack: (-1x-1x4xf32) <- ([-1x-1xf32, -1x-1xf32, -1x-1xf32, -1x-1xf32]) - stack_6 = paddle._C_ops.stack(combine_9, -1) - del combine_9 - - # builtin.combine: ([-1x-1xf32, -1x-1xf32]) <- (-1x-1xf32, -1x-1xf32) - combine_10 = [split_5, split_4] - del split_4, split_5 - - # pd_op.stack: (-1x-1x2xf32) <- ([-1x-1xf32, -1x-1xf32]) - stack_7 = paddle._C_ops.stack(combine_10, -1) - del combine_10 - - # pd_op.reshape: (-1x4xf32) <- (-1x-1x4xf32, 2xi64) - reshape_4 = paddle._C_ops.reshape(stack_6, full_int_array_0) - del full_int_array_0, stack_6 - - # pd_op.reshape: (-1x2xf32) <- (-1x-1x2xf32, 2xi64) - reshape_5 = paddle._C_ops.reshape(stack_7, full_int_array_1) - del full_int_array_1, stack_7 - - # pd_op.shape64: (2xi64) <- (-1x4xf32) - shape64_2 = paddle._C_ops.shape64(reshape_4) - - # pd_op.slice: (xi64) <- (2xi64, 1xi64, 1xi64) - slice_2 = paddle._C_ops.slice( - shape64_2, [0], full_int_array_2, full_int_array_3, [1], [0] + # pd_op.full_like: (2x-1xi32) <- (2x-1xi32, 1xf32) + full_like_0 = paddle._C_ops.full_like( + reshape_1, full_4, paddle.int32, paddle.framework._current_expected_place() ) - del full_int_array_2, full_int_array_3, shape64_2 - - # builtin.combine: ([xi64, xi64]) <- (xi64, xi64) - combine_11 = [slice_2, full_4] del full_4 - # pd_op.stack: (2xi64) <- ([xi64, xi64]) - stack_8 = paddle._C_ops.stack(combine_11, 0) - del combine_11 - - # pd_op.full_with_tensor: (-1x1xf32) <- (1xf32, 2xi64) - full_with_tensor_2 = paddle._C_ops.full_with_tensor( - full_6, stack_8, paddle.float32 - ) - del full_6, stack_8 - - # pd_op.full: (1xi32) <- () - full_7 = paddle._C_ops.full( - [1], float("0"), paddle.int32, paddle.core.CPUPlace() - ) - - # builtin.combine: ([-1x4xf32, -1x4xf32, -1x4xf32]) <- (-1x4xf32, -1x4xf32, -1x4xf32) - combine_12 = [reshape_0, reshape_2, reshape_4] - del reshape_0, reshape_2, reshape_4 - - # pd_op.concat: (-1x4xf32) <- ([-1x4xf32, -1x4xf32, -1x4xf32], 1xi32) - concat_2 = paddle._C_ops.concat(combine_12, full_7) - del combine_12 - - # builtin.combine: ([-1x2xf32, -1x2xf32, -1x2xf32]) <- (-1x2xf32, -1x2xf32, -1x2xf32) - combine_13 = [reshape_1, reshape_3, reshape_5] - del reshape_1, reshape_3, reshape_5 - - # pd_op.concat: (-1x2xf32) <- ([-1x2xf32, -1x2xf32, -1x2xf32], 1xi32) - concat_3 = paddle._C_ops.concat(combine_13, full_7) - del combine_13 - - # builtin.combine: ([-1x1xf32, -1x1xf32, -1x1xf32]) <- (-1x1xf32, -1x1xf32, -1x1xf32) - combine_14 = [full_with_tensor_0, full_with_tensor_1, full_with_tensor_2] - del full_with_tensor_0, full_with_tensor_1, full_with_tensor_2 - - # pd_op.concat: (-1x1xf32) <- ([-1x1xf32, -1x1xf32, -1x1xf32], 1xi32) - concat_4 = paddle._C_ops.concat(combine_14, full_7) - del combine_14, full_7 + # pd_op.where: (2x-1xi32) <- (2x-1xb, 2x-1xi32, 2x-1xi32) + where_0 = paddle._C_ops.where(greater_than_0, reshape_1, full_like_0) + del full_like_0, greater_than_0, reshape_1 # pd_op.full_int_array: (2xi64) <- () - full_int_array_4 = [1, 1] - - # pd_op.assign: (2xi64) <- (2xi64) - assign_0 = full_int_array_4 - - # pd_op.assign: (2xi64) <- (2xi64) - assign_1 = full_int_array_4 - - # pd_op.pool2d: (2x384x1x1xf32) <- (2x384x-1x-1xf32, 2xi64) - pool2d_0 = paddle._C_ops.pool2d( - data_6, - full_int_array_4, - [1, 1], - [0, 0], - False, - True, - "NCHW", - "avg", - False, - True, - "EXPLICIT", - ) - - # pd_op.conv2d: (2x384x1x1xf32) <- (2x384x1x1xf32, 384x384x1x1xf32) - conv2d_0 = paddle._C_ops.conv2d( - pool2d_0, parameter_53, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_53 - - # pd_op.full_int_array: (4xi64) <- () - full_int_array_5 = [1, -1, 1, 1] - - # pd_op.reshape: (1x384x1x1xf32) <- (384xf32, 4xi64) - reshape_6 = paddle._C_ops.reshape(parameter_52, full_int_array_5) - del parameter_52 - - # pd_op.add: (2x384x1x1xf32) <- (2x384x1x1xf32, 1x384x1x1xf32) - add_0 = paddle._C_ops.add(conv2d_0, reshape_6) - - # pd_op.sigmoid: (2x384x1x1xf32) <- (2x384x1x1xf32) - sigmoid_0 = paddle._C_ops.sigmoid(add_0) - del add_0 - - # pd_op.multiply: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32, 2x384x1x1xf32) - multiply_0 = paddle._C_ops.multiply(data_6, sigmoid_0) - - # pd_op.conv2d: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32, 384x384x1x1xf32) - conv2d_1 = paddle._C_ops.conv2d( - multiply_0, parameter_51, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_51 - - # pd_op.batch_norm_: (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) - ( - batch_norm__0, - batch_norm__1, - batch_norm__2, - batch_norm__3, - batch_norm__4, - batch_norm__5, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_1, - parameter_50, - parameter_49, - parameter_48, - parameter_47, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_47, parameter_48, parameter_49, parameter_50 - - # pd_op.swish: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32) - swish_0 = paddle._C_ops.swish(batch_norm__0) - - # pd_op.add: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32, 2x384x-1x-1xf32) - add_1 = paddle._C_ops.add(swish_0, data_6) - - # pd_op.conv2d: (2x10x-1x-1xf32) <- (2x384x-1x-1xf32, 10x384x3x3xf32) - conv2d_2 = paddle._C_ops.conv2d( - add_1, parameter_46, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_46 - - # pd_op.reshape: (1x10x1x1xf32) <- (10xf32, 4xi64) - reshape_7 = paddle._C_ops.reshape(parameter_45, full_int_array_5) - del parameter_45 - - # pd_op.add: (2x10x-1x-1xf32) <- (2x10x-1x-1xf32, 1x10x1x1xf32) - add_2 = paddle._C_ops.add(conv2d_2, reshape_7) - - # pd_op.conv2d: (2x384x1x1xf32) <- (2x384x1x1xf32, 384x384x1x1xf32) - conv2d_3 = paddle._C_ops.conv2d( - pool2d_0, parameter_44, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_44 - - # pd_op.reshape: (1x384x1x1xf32) <- (384xf32, 4xi64) - reshape_8 = paddle._C_ops.reshape(parameter_43, full_int_array_5) - del parameter_43 - - # pd_op.add: (2x384x1x1xf32) <- (2x384x1x1xf32, 1x384x1x1xf32) - add_3 = paddle._C_ops.add(conv2d_3, reshape_8) - - # pd_op.sigmoid: (2x384x1x1xf32) <- (2x384x1x1xf32) - sigmoid_1 = paddle._C_ops.sigmoid(add_3) - del add_3 - - # pd_op.multiply: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32, 2x384x1x1xf32) - multiply_1 = paddle._C_ops.multiply(data_6, sigmoid_1) - del data_6 - - # pd_op.conv2d: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32, 384x384x1x1xf32) - conv2d_4 = paddle._C_ops.conv2d( - multiply_1, parameter_42, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_42 - - # pd_op.batch_norm_: (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (2x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) - ( - batch_norm__6, - batch_norm__7, - batch_norm__8, - batch_norm__9, - batch_norm__10, - batch_norm__11, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_4, - parameter_41, - parameter_40, - parameter_39, - parameter_38, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_38, parameter_39, parameter_40, parameter_41 - - # pd_op.swish: (2x384x-1x-1xf32) <- (2x384x-1x-1xf32) - swish_1 = paddle._C_ops.swish(batch_norm__6) - - # pd_op.conv2d: (2x40x-1x-1xf32) <- (2x384x-1x-1xf32, 40x384x3x3xf32) - conv2d_5 = paddle._C_ops.conv2d( - swish_1, parameter_37, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_37 - - # pd_op.reshape: (1x40x1x1xf32) <- (40xf32, 4xi64) - reshape_9 = paddle._C_ops.reshape(parameter_36, full_int_array_5) - del parameter_36 - - # pd_op.add: (2x40x-1x-1xf32) <- (2x40x-1x-1xf32, 1x40x1x1xf32) - add_4 = paddle._C_ops.add(conv2d_5, reshape_9) - - # pd_op.sigmoid: (2x10x-1x-1xf32) <- (2x10x-1x-1xf32) - sigmoid_2 = paddle._C_ops.sigmoid(add_2) - del add_2 - - # pd_op.flatten: (2x10x-1xf32) <- (2x10x-1x-1xf32) - flatten_0 = paddle._C_ops.flatten(sigmoid_2, 2, 3) - - # pd_op.transpose: (2x-1x10xf32) <- (2x10x-1xf32) - transpose_0 = paddle._C_ops.transpose(flatten_0, [0, 2, 1]) - del flatten_0 - - # pd_op.flatten: (2x40x-1xf32) <- (2x40x-1x-1xf32) - flatten_1 = paddle._C_ops.flatten(add_4, 2, 3) - - # pd_op.transpose: (2x-1x40xf32) <- (2x40x-1xf32) - transpose_1 = paddle._C_ops.transpose(flatten_1, [0, 2, 1]) - del flatten_1 - - # pd_op.pool2d: (2x192x1x1xf32) <- (2x192x-1x-1xf32, 2xi64) - pool2d_1 = paddle._C_ops.pool2d( - data_7, - full_int_array_4, - [1, 1], - [0, 0], - False, - True, - "NCHW", - "avg", - False, - True, - "EXPLICIT", - ) - - # pd_op.conv2d: (2x192x1x1xf32) <- (2x192x1x1xf32, 192x192x1x1xf32) - conv2d_6 = paddle._C_ops.conv2d( - pool2d_1, parameter_35, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_35 - - # pd_op.reshape: (1x192x1x1xf32) <- (192xf32, 4xi64) - reshape_10 = paddle._C_ops.reshape(parameter_34, full_int_array_5) - del parameter_34 - - # pd_op.add: (2x192x1x1xf32) <- (2x192x1x1xf32, 1x192x1x1xf32) - add_5 = paddle._C_ops.add(conv2d_6, reshape_10) + full_int_array_0 = [-1, 4] - # pd_op.sigmoid: (2x192x1x1xf32) <- (2x192x1x1xf32) - sigmoid_3 = paddle._C_ops.sigmoid(add_5) - del add_5 + # pd_op.reshape: (-1x4xf32) <- (2x-1x4xf32, 2xi64) + reshape_2 = paddle._C_ops.reshape(data_6, full_int_array_0) + del data_6, full_int_array_0 - # pd_op.multiply: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 2x192x1x1xf32) - multiply_2 = paddle._C_ops.multiply(data_7, sigmoid_3) + # pd_op.gather: (-1x4xf32) <- (-1x4xf32, -1xi64, 1xi32) + gather_1 = paddle._C_ops.gather(reshape_2, flatten_1, full_1) + del flatten_1, full_1, reshape_2 - # pd_op.conv2d: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 192x192x1x1xf32) - conv2d_7 = paddle._C_ops.conv2d( - multiply_2, parameter_33, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + # pd_op.full: (xi64) <- () + full_5 = paddle._C_ops.full( + [], float("4"), paddle.int64, paddle.core.CPUPlace() ) - del parameter_33 - # pd_op.batch_norm_: (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) - ( - batch_norm__12, - batch_norm__13, - batch_norm__14, - batch_norm__15, - batch_norm__16, - batch_norm__17, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_7, - parameter_32, - parameter_31, - parameter_30, - parameter_29, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_29, parameter_30, parameter_31, parameter_32 + # builtin.combine: ([xi64, xi64, xi64]) <- (xi64, xi64, xi64) + combine_1 = [full_2, data_1, full_5] + del data_1, full_2, full_5 - # pd_op.swish: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32) - swish_2 = paddle._C_ops.swish(batch_norm__12) + # pd_op.stack: (3xi64) <- ([xi64, xi64, xi64]) + stack_1 = paddle._C_ops.stack(combine_1, 0) + del combine_1 - # pd_op.add: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 2x192x-1x-1xf32) - add_6 = paddle._C_ops.add(swish_2, data_7) + # pd_op.reshape: (2x-1x4xf32) <- (-1x4xf32, 3xi64) + reshape_0 = paddle._C_ops.reshape(gather_1, stack_1) + del gather_1, stack_1 - # pd_op.conv2d: (2x10x-1x-1xf32) <- (2x192x-1x-1xf32, 10x192x3x3xf32) - conv2d_8 = paddle._C_ops.conv2d( - add_6, parameter_28, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + # pd_op.full: (1xi32) <- () + full_6 = paddle._C_ops.full( + [1], float("11"), paddle.int32, paddle.core.CPUPlace() ) - del parameter_28 - - # pd_op.reshape: (1x10x1x1xf32) <- (10xf32, 4xi64) - reshape_11 = paddle._C_ops.reshape(parameter_27, full_int_array_5) - del parameter_27 - # pd_op.add: (2x10x-1x-1xf32) <- (2x10x-1x-1xf32, 1x10x1x1xf32) - add_7 = paddle._C_ops.add(conv2d_8, reshape_11) - - # pd_op.conv2d: (2x192x1x1xf32) <- (2x192x1x1xf32, 192x192x1x1xf32) - conv2d_9 = paddle._C_ops.conv2d( - pool2d_1, parameter_26, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + # pd_op.one_hot: (2x-1x11xf32) <- (2x-1xi32, 1xi32) + one_hot_0 = paddle._C_ops.one_hot( + where_0 % paddle.cast(full_6, where_0.dtype), full_6 ) - del parameter_26 - - # pd_op.reshape: (1x192x1x1xf32) <- (192xf32, 4xi64) - reshape_12 = paddle._C_ops.reshape(parameter_25, full_int_array_5) - del parameter_25 - - # pd_op.add: (2x192x1x1xf32) <- (2x192x1x1xf32, 1x192x1x1xf32) - add_8 = paddle._C_ops.add(conv2d_9, reshape_12) - - # pd_op.sigmoid: (2x192x1x1xf32) <- (2x192x1x1xf32) - sigmoid_4 = paddle._C_ops.sigmoid(add_8) - del add_8 + del full_6 - # pd_op.multiply: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 2x192x1x1xf32) - multiply_3 = paddle._C_ops.multiply(data_7, sigmoid_4) + # pd_op.full: (10xi64) <- () + full_7 = paddle._C_ops.full( + [10], float("0"), paddle.int64, paddle.framework._current_expected_place() + ) + + # pd_op.assign_value_: (10xi64) <- (10xi64) + assign_value__0 = paddle._C_ops.assign_value_( + full_7, + [10], + paddle.int64, + [ + float("0"), + float("1"), + float("2"), + float("3"), + float("4"), + float("5"), + float("6"), + float("7"), + float("8"), + float("9"), + ], + paddle.framework._current_expected_place(), + ) + del full_7 + + # pd_op.index_select: (2x-1x10xf32) <- (2x-1x11xf32, 10xi64) + index_select_0 = paddle._C_ops.index_select(one_hot_0, assign_value__0, -1) + del assign_value__0, one_hot_0 + + # pd_op.multiply: (2x-1x-1xf32) <- (2x-1x-1xf32, 2x-1x-1xf32) + multiply_2 = paddle._C_ops.multiply(data_7, data_2) del data_7 - # pd_op.conv2d: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32, 192x192x1x1xf32) - conv2d_10 = paddle._C_ops.conv2d( - multiply_3, parameter_24, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_24 - - # pd_op.batch_norm_: (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (2x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) - ( - batch_norm__18, - batch_norm__19, - batch_norm__20, - batch_norm__21, - batch_norm__22, - batch_norm__23, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_10, - parameter_23, - parameter_22, - parameter_21, - parameter_20, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_20, parameter_21, parameter_22, parameter_23 - - # pd_op.swish: (2x192x-1x-1xf32) <- (2x192x-1x-1xf32) - swish_3 = paddle._C_ops.swish(batch_norm__18) - - # pd_op.conv2d: (2x40x-1x-1xf32) <- (2x192x-1x-1xf32, 40x192x3x3xf32) - conv2d_11 = paddle._C_ops.conv2d( - swish_3, parameter_19, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_19 - - # pd_op.reshape: (1x40x1x1xf32) <- (40xf32, 4xi64) - reshape_13 = paddle._C_ops.reshape(parameter_18, full_int_array_5) - del parameter_18 - - # pd_op.add: (2x40x-1x-1xf32) <- (2x40x-1x-1xf32, 1x40x1x1xf32) - add_9 = paddle._C_ops.add(conv2d_11, reshape_13) - - # pd_op.sigmoid: (2x10x-1x-1xf32) <- (2x10x-1x-1xf32) - sigmoid_5 = paddle._C_ops.sigmoid(add_7) - del add_7 - - # pd_op.flatten: (2x10x-1xf32) <- (2x10x-1x-1xf32) - flatten_2 = paddle._C_ops.flatten(sigmoid_5, 2, 3) - - # pd_op.transpose: (2x-1x10xf32) <- (2x10x-1xf32) - transpose_2 = paddle._C_ops.transpose(flatten_2, [0, 2, 1]) - del flatten_2 - - # pd_op.flatten: (2x40x-1xf32) <- (2x40x-1x-1xf32) - flatten_3 = paddle._C_ops.flatten(add_9, 2, 3) - - # pd_op.transpose: (2x-1x40xf32) <- (2x40x-1xf32) - transpose_3 = paddle._C_ops.transpose(flatten_3, [0, 2, 1]) - del flatten_3 - - # pd_op.pool2d: (2x96x1x1xf32) <- (2x96x-1x-1xf32, 2xi64) - pool2d_2 = paddle._C_ops.pool2d( - data_8, - full_int_array_4, - [1, 1], - [0, 0], - False, - True, - "NCHW", - "avg", - False, - True, - "EXPLICIT", - ) - - # pd_op.conv2d: (2x96x1x1xf32) <- (2x96x1x1xf32, 96x96x1x1xf32) - conv2d_12 = paddle._C_ops.conv2d( - pool2d_2, parameter_17, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_17 - - # pd_op.reshape: (1x96x1x1xf32) <- (96xf32, 4xi64) - reshape_14 = paddle._C_ops.reshape(parameter_16, full_int_array_5) - del parameter_16 - - # pd_op.add: (2x96x1x1xf32) <- (2x96x1x1xf32, 1x96x1x1xf32) - add_10 = paddle._C_ops.add(conv2d_12, reshape_14) - - # pd_op.sigmoid: (2x96x1x1xf32) <- (2x96x1x1xf32) - sigmoid_6 = paddle._C_ops.sigmoid(add_10) - del add_10 - - # pd_op.multiply: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, 2x96x1x1xf32) - multiply_4 = paddle._C_ops.multiply(data_8, sigmoid_6) - - # pd_op.conv2d: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, 96x96x1x1xf32) - conv2d_13 = paddle._C_ops.conv2d( - multiply_4, parameter_15, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_15 - - # pd_op.batch_norm_: (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) - ( - batch_norm__24, - batch_norm__25, - batch_norm__26, - batch_norm__27, - batch_norm__28, - batch_norm__29, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_13, - parameter_14, - parameter_13, - parameter_12, - parameter_11, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_11, parameter_12, parameter_13, parameter_14 - - # pd_op.swish: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32) - swish_4 = paddle._C_ops.swish(batch_norm__24) - - # pd_op.add: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, 2x96x-1x-1xf32) - add_11 = paddle._C_ops.add(swish_4, data_8) - - # pd_op.conv2d: (2x10x-1x-1xf32) <- (2x96x-1x-1xf32, 10x96x3x3xf32) - conv2d_14 = paddle._C_ops.conv2d( - add_11, parameter_10, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_10 - - # pd_op.reshape: (1x10x1x1xf32) <- (10xf32, 4xi64) - reshape_15 = paddle._C_ops.reshape(parameter_9, full_int_array_5) - del parameter_9 - - # pd_op.add: (2x10x-1x-1xf32) <- (2x10x-1x-1xf32, 1x10x1x1xf32) - add_12 = paddle._C_ops.add(conv2d_14, reshape_15) - - # pd_op.conv2d: (2x96x1x1xf32) <- (2x96x1x1xf32, 96x96x1x1xf32) - conv2d_15 = paddle._C_ops.conv2d( - pool2d_2, parameter_8, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_8 - - # pd_op.reshape: (1x96x1x1xf32) <- (96xf32, 4xi64) - reshape_16 = paddle._C_ops.reshape(parameter_7, full_int_array_5) - del parameter_7 - - # pd_op.add: (2x96x1x1xf32) <- (2x96x1x1xf32, 1x96x1x1xf32) - add_13 = paddle._C_ops.add(conv2d_15, reshape_16) - - # pd_op.sigmoid: (2x96x1x1xf32) <- (2x96x1x1xf32) - sigmoid_7 = paddle._C_ops.sigmoid(add_13) - del add_13 - - # pd_op.multiply: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, 2x96x1x1xf32) - multiply_5 = paddle._C_ops.multiply(data_8, sigmoid_7) - del data_8 + # pd_op.full_int_array: (1xi64) <- () + full_int_array_1 = [-1] - # pd_op.conv2d: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32, 96x96x1x1xf32) - conv2d_16 = paddle._C_ops.conv2d( - multiply_5, parameter_6, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_6 + # pd_op.max: (2x-1x1xf32) <- (2x-1x-1xf32, 1xi64) + max_0 = paddle._C_ops.max(multiply_2, full_int_array_1, True) - # pd_op.batch_norm_: (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (2x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) - ( - batch_norm__30, - batch_norm__31, - batch_norm__32, - batch_norm__33, - batch_norm__34, - batch_norm__35, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_16, - parameter_5, - parameter_4, - parameter_3, - parameter_2, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_2, parameter_3, parameter_4, parameter_5 + # pd_op.multiply: (2x-1x-1xf32) <- (2x-1x-1xf32, 2x-1x-1xf32) + multiply_3 = paddle._C_ops.multiply(data_8, data_2) + del data_2, data_8 - # pd_op.swish: (2x96x-1x-1xf32) <- (2x96x-1x-1xf32) - swish_5 = paddle._C_ops.swish(batch_norm__30) + # pd_op.max: (2x-1x1xf32) <- (2x-1x-1xf32, 1xi64) + max_1 = paddle._C_ops.max(multiply_3, full_int_array_1, True) + del multiply_3 - # pd_op.conv2d: (2x40x-1x-1xf32) <- (2x96x-1x-1xf32, 40x96x3x3xf32) - conv2d_17 = paddle._C_ops.conv2d( - swish_5, parameter_1, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + # pd_op.full: (1xf32) <- () + full_8 = paddle._C_ops.full( + [1], float("1"), paddle.float32, paddle.core.CPUPlace() ) - del parameter_1 - - # pd_op.reshape: (1x40x1x1xf32) <- (40xf32, 4xi64) - reshape_17 = paddle._C_ops.reshape(parameter_0, full_int_array_5) - del full_int_array_5, parameter_0 - - # pd_op.add: (2x40x-1x-1xf32) <- (2x40x-1x-1xf32, 1x40x1x1xf32) - add_14 = paddle._C_ops.add(conv2d_17, reshape_17) - - # pd_op.sigmoid: (2x10x-1x-1xf32) <- (2x10x-1x-1xf32) - sigmoid_8 = paddle._C_ops.sigmoid(add_12) - del add_12 - - # pd_op.flatten: (2x10x-1xf32) <- (2x10x-1x-1xf32) - flatten_4 = paddle._C_ops.flatten(sigmoid_8, 2, 3) - # pd_op.transpose: (2x-1x10xf32) <- (2x10x-1xf32) - transpose_4 = paddle._C_ops.transpose(flatten_4, [0, 2, 1]) - del flatten_4 + # pd_op.scale: (2x-1x1xf32) <- (2x-1x1xf32, 1xf32) + scale_0 = paddle._C_ops.scale(max_0, full_8, float("1e-09"), True) + del full_8, max_0 - # pd_op.flatten: (2x40x-1xf32) <- (2x40x-1x-1xf32) - flatten_5 = paddle._C_ops.flatten(add_14, 2, 3) + # pd_op.divide: (2x-1x-1xf32) <- (2x-1x-1xf32, 2x-1x1xf32) + divide_0 = paddle._C_ops.divide(multiply_2, scale_0) + del multiply_2, scale_0 - # pd_op.transpose: (2x-1x40xf32) <- (2x40x-1xf32) - transpose_5 = paddle._C_ops.transpose(flatten_5, [0, 2, 1]) - del flatten_5 + # pd_op.multiply: (2x-1x-1xf32) <- (2x-1x-1xf32, 2x-1x1xf32) + multiply_4 = paddle._C_ops.multiply(divide_0, max_1) + del divide_0, max_1 - # pd_op.full: (1xi32) <- () - full_8 = paddle._C_ops.full( - [1], float("1"), paddle.int32, paddle.core.CPUPlace() - ) - - # pd_op.assign: (1xi32) <- (1xi32) - assign_2 = full_8 - - # builtin.combine: ([2x-1x10xf32, 2x-1x10xf32, 2x-1x10xf32]) <- (2x-1x10xf32, 2x-1x10xf32, 2x-1x10xf32) - combine_15 = [transpose_0, transpose_2, transpose_4] + # pd_op.full_int_array: (1xi64) <- () + full_int_array_2 = [-2] - # pd_op.concat: (2x-1x10xf32) <- ([2x-1x10xf32, 2x-1x10xf32, 2x-1x10xf32], 1xi32) - concat_0 = paddle._C_ops.concat(combine_15, full_8) - del combine_15 + # pd_op.max: (2x-1xf32) <- (2x-1x-1xf32, 1xi64) + max_2 = paddle._C_ops.max(multiply_4, full_int_array_2, False) + del full_int_array_2, multiply_4 - # builtin.combine: ([2x-1x40xf32, 2x-1x40xf32, 2x-1x40xf32]) <- (2x-1x40xf32, 2x-1x40xf32, 2x-1x40xf32) - combine_16 = [transpose_1, transpose_3, transpose_5] + # pd_op.unsqueeze: (2x-1x1xf32) <- (2x-1xf32, 1xi64) + unsqueeze_0 = paddle._C_ops.unsqueeze(max_2, full_int_array_1) + del full_int_array_1, max_2 - # pd_op.concat: (2x-1x40xf32) <- ([2x-1x40xf32, 2x-1x40xf32, 2x-1x40xf32], 1xi32) - concat_1 = paddle._C_ops.concat(combine_16, full_8) - del ( - add_1, - add_11, - add_14, - add_4, - add_6, - add_9, - assign_0, - assign_1, - assign_2, - batch_norm__0, - batch_norm__1, - batch_norm__10, - batch_norm__11, - batch_norm__12, - batch_norm__13, - batch_norm__14, - batch_norm__15, - batch_norm__16, - batch_norm__17, - batch_norm__18, - batch_norm__19, - batch_norm__2, - batch_norm__20, - batch_norm__21, - batch_norm__22, - batch_norm__23, - batch_norm__24, - batch_norm__25, - batch_norm__26, - batch_norm__27, - batch_norm__28, - batch_norm__29, - batch_norm__3, - batch_norm__30, - batch_norm__31, - batch_norm__32, - batch_norm__33, - batch_norm__34, - batch_norm__35, - batch_norm__4, - batch_norm__5, - batch_norm__6, - batch_norm__7, - batch_norm__8, - batch_norm__9, - combine_16, - conv2d_0, - conv2d_1, - conv2d_10, - conv2d_11, - conv2d_12, - conv2d_13, - conv2d_14, - conv2d_15, - conv2d_16, - conv2d_17, - conv2d_2, - conv2d_3, - conv2d_4, - conv2d_5, - conv2d_6, - conv2d_7, - conv2d_8, - conv2d_9, - full_8, - full_int_array_4, - multiply_0, - multiply_1, - multiply_2, - multiply_3, - multiply_4, - multiply_5, - pool2d_0, - pool2d_1, - pool2d_2, - reshape_10, - reshape_11, - reshape_12, - reshape_13, - reshape_14, - reshape_15, - reshape_16, - reshape_17, - reshape_6, - reshape_7, - reshape_8, - reshape_9, - sigmoid_0, - sigmoid_1, - sigmoid_2, - sigmoid_3, - sigmoid_4, - sigmoid_5, - sigmoid_6, - sigmoid_7, - sigmoid_8, - slice_0, - slice_1, - slice_2, - swish_0, - swish_1, - swish_2, - swish_3, - swish_4, - swish_5, - transpose_0, - transpose_1, - transpose_2, - transpose_3, - transpose_4, - transpose_5, - ) + # pd_op.multiply: (2x-1x10xf32) <- (2x-1x10xf32, 2x-1x1xf32) + multiply_0 = paddle._C_ops.multiply(index_select_0, unsqueeze_0) + del index_select_0, unsqueeze_0, where_0 - return concat_0, concat_1, concat_2, concat_3, concat_4 + return reshape_0, multiply_0 diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-S/subgraph_5/weight_meta.py b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-S/subgraph_5/weight_meta.py index cd9bb1db0..8b1378917 100644 --- a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-S/subgraph_5/weight_meta.py +++ b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-S/subgraph_5/weight_meta.py @@ -1,580 +1 @@ -class Program_weight_tensor_parameter_0: - name = "parameter_0" - shape = [40] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - -class Program_weight_tensor_parameter_1: - name = "parameter_1" - shape = [40, 96, 3, 3] - dtype = "float32" - min_val = float("-0.220386") - max_val = float("0.222309") - mean = float("2.62808e-08") - std = float("0.0161374") - data = None - - -class Program_weight_tensor_parameter_2: - name = "parameter_2" - shape = [96] - dtype = "float32" - min_val = float("-0.127121") - max_val = float("0.345832") - mean = float("0.108268") - std = float("0.11199") - data = None - - -class Program_weight_tensor_parameter_3: - name = "parameter_3" - shape = [96] - dtype = "float32" - min_val = float("0.947191") - max_val = float("2.26117") - mean = float("1.5267") - std = float("0.270308") - data = None - - -class Program_weight_tensor_parameter_4: - name = "parameter_4" - shape = [96] - dtype = "float32" - min_val = float("0.000653841") - max_val = float("0.0512872") - mean = float("0.00630045") - std = float("0.00803373") - data = None - - -class Program_weight_tensor_parameter_5: - name = "parameter_5" - shape = [96] - dtype = "float32" - min_val = float("-0.1806") - max_val = float("0.105559") - mean = float("-0.0118148") - std = float("0.0471293") - data = None - - -class Program_weight_tensor_parameter_6: - name = "parameter_6" - shape = [96, 96, 1, 1] - dtype = "float32" - min_val = float("-0.16617") - max_val = float("0.150574") - mean = float("-0.0015204") - std = float("0.018482") - data = None - - -class Program_weight_tensor_parameter_7: - name = "parameter_7" - shape = [96] - dtype = "float32" - min_val = float("-0.01695") - max_val = float("0.0144876") - mean = float("-0.000590983") - std = float("0.0062921") - data = None - - -class Program_weight_tensor_parameter_8: - name = "parameter_8" - shape = [96, 96, 1, 1] - dtype = "float32" - min_val = float("-0.0289852") - max_val = float("0.0383979") - mean = float("-0.00056367") - std = float("0.00473631") - data = None - - -class Program_weight_tensor_parameter_9: - name = "parameter_9" - shape = [10] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_10: - name = "parameter_10" - shape = [10, 96, 3, 3] - dtype = "float32" - min_val = float("-0.158508") - max_val = float("0.111546") - mean = float("-0.00123997") - std = float("0.0149231") - data = None - - -class Program_weight_tensor_parameter_11: - name = "parameter_11" - shape = [96] - dtype = "float32" - min_val = float("-1.00246") - max_val = float("1.70494") - mean = float("0.553674") - std = float("0.526983") - data = None - - -class Program_weight_tensor_parameter_12: - name = "parameter_12" - shape = [96] - dtype = "float32" - min_val = float("0.751697") - max_val = float("2.08213") - mean = float("1.46982") - std = float("0.2376") - data = None - - -class Program_weight_tensor_parameter_13: - name = "parameter_13" - shape = [96] - dtype = "float32" - min_val = float("0.000680279") - max_val = float("0.0389719") - mean = float("0.00525898") - std = float("0.00550929") - data = None - - -class Program_weight_tensor_parameter_14: - name = "parameter_14" - shape = [96] - dtype = "float32" - min_val = float("-0.302926") - max_val = float("0.242336") - mean = float("0.0338812") - std = float("0.0731476") - data = None - - -class Program_weight_tensor_parameter_15: - name = "parameter_15" - shape = [96, 96, 1, 1] - dtype = "float32" - min_val = float("-0.0920552") - max_val = float("0.101403") - mean = float("-0.000391196") - std = float("0.016093") - data = None - - -class Program_weight_tensor_parameter_16: - name = "parameter_16" - shape = [96] - dtype = "float32" - min_val = float("-0.00790501") - max_val = float("0.0118839") - mean = float("-0.000616206") - std = float("0.00334672") - data = None - - -class Program_weight_tensor_parameter_17: - name = "parameter_17" - shape = [96, 96, 1, 1] - dtype = "float32" - min_val = float("-0.0354745") - max_val = float("0.0393783") - mean = float("-0.000415455") - std = float("0.00398812") - data = None - - -class Program_weight_tensor_parameter_18: - name = "parameter_18" - shape = [40] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_19: - name = "parameter_19" - shape = [40, 192, 3, 3] - dtype = "float32" - min_val = float("-0.15995") - max_val = float("0.172865") - mean = float("7.42875e-09") - std = float("0.00878877") - data = None - - -class Program_weight_tensor_parameter_20: - name = "parameter_20" - shape = [192] - dtype = "float32" - min_val = float("-0.0211182") - max_val = float("0.16802") - mean = float("0.0783479") - std = float("0.0391695") - data = None - - -class Program_weight_tensor_parameter_21: - name = "parameter_21" - shape = [192] - dtype = "float32" - min_val = float("1.07811") - max_val = float("1.51568") - mean = float("1.30274") - std = float("0.0876299") - data = None - - -class Program_weight_tensor_parameter_22: - name = "parameter_22" - shape = [192] - dtype = "float32" - min_val = float("0.000349327") - max_val = float("0.0216234") - mean = float("0.00330979") - std = float("0.00396173") - data = None - - -class Program_weight_tensor_parameter_23: - name = "parameter_23" - shape = [192] - dtype = "float32" - min_val = float("-0.124726") - max_val = float("0.0534715") - mean = float("-0.0102394") - std = float("0.0264137") - data = None - - -class Program_weight_tensor_parameter_24: - name = "parameter_24" - shape = [192, 192, 1, 1] - dtype = "float32" - min_val = float("-0.0792181") - max_val = float("0.107778") - mean = float("-0.000368823") - std = float("0.00729713") - data = None - - -class Program_weight_tensor_parameter_25: - name = "parameter_25" - shape = [192] - dtype = "float32" - min_val = float("-0.00746441") - max_val = float("0.00722328") - mean = float("-8.13383e-05") - std = float("0.00296425") - data = None - - -class Program_weight_tensor_parameter_26: - name = "parameter_26" - shape = [192, 192, 1, 1] - dtype = "float32" - min_val = float("-0.00795566") - max_val = float("0.0110585") - mean = float("-0.000113663") - std = float("0.00156623") - data = None - - -class Program_weight_tensor_parameter_27: - name = "parameter_27" - shape = [10] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_28: - name = "parameter_28" - shape = [10, 192, 3, 3] - dtype = "float32" - min_val = float("-0.0899103") - max_val = float("0.0751371") - mean = float("-0.000593556") - std = float("0.00682701") - data = None - - -class Program_weight_tensor_parameter_29: - name = "parameter_29" - shape = [192] - dtype = "float32" - min_val = float("-0.291965") - max_val = float("1.00507") - mean = float("0.404706") - std = float("0.237409") - data = None - - -class Program_weight_tensor_parameter_30: - name = "parameter_30" - shape = [192] - dtype = "float32" - min_val = float("1.0436") - max_val = float("1.84744") - mean = float("1.34394") - std = float("0.127482") - data = None - - -class Program_weight_tensor_parameter_31: - name = "parameter_31" - shape = [192] - dtype = "float32" - min_val = float("0.000363284") - max_val = float("0.0104766") - mean = float("0.00183808") - std = float("0.00170966") - data = None - - -class Program_weight_tensor_parameter_32: - name = "parameter_32" - shape = [192] - dtype = "float32" - min_val = float("-0.160158") - max_val = float("0.11803") - mean = float("-0.00116506") - std = float("0.0399602") - data = None - - -class Program_weight_tensor_parameter_33: - name = "parameter_33" - shape = [192, 192, 1, 1] - dtype = "float32" - min_val = float("-0.0517684") - max_val = float("0.0545506") - mean = float("-0.000237345") - std = float("0.00623955") - data = None - - -class Program_weight_tensor_parameter_34: - name = "parameter_34" - shape = [192] - dtype = "float32" - min_val = float("-0.00367397") - max_val = float("0.00885849") - mean = float("-0.000153054") - std = float("0.00155494") - data = None - - -class Program_weight_tensor_parameter_35: - name = "parameter_35" - shape = [192, 192, 1, 1] - dtype = "float32" - min_val = float("-0.0153384") - max_val = float("0.0338853") - mean = float("-0.000100079") - std = float("0.00136892") - data = None - - -class Program_weight_tensor_parameter_36: - name = "parameter_36" - shape = [40] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_37: - name = "parameter_37" - shape = [40, 384, 3, 3] - dtype = "float32" - min_val = float("-0.02606") - max_val = float("0.0317561") - mean = float("9.52241e-10") - std = float("0.0020846") - data = None - - -class Program_weight_tensor_parameter_38: - name = "parameter_38" - shape = [384] - dtype = "float32" - min_val = float("-0.0246739") - max_val = float("0.152016") - mean = float("0.0408553") - std = float("0.032015") - data = None - - -class Program_weight_tensor_parameter_39: - name = "parameter_39" - shape = [384] - dtype = "float32" - min_val = float("1.05705") - max_val = float("1.41825") - mean = float("1.2191") - std = float("0.0542166") - data = None - - -class Program_weight_tensor_parameter_40: - name = "parameter_40" - shape = [384] - dtype = "float32" - min_val = float("0.000103167") - max_val = float("0.00446044") - mean = float("0.000448355") - std = float("0.000443999") - data = None - - -class Program_weight_tensor_parameter_41: - name = "parameter_41" - shape = [384] - dtype = "float32" - min_val = float("-0.0274058") - max_val = float("0.0126066") - mean = float("-0.00688192") - std = float("0.00609138") - data = None - - -class Program_weight_tensor_parameter_42: - name = "parameter_42" - shape = [384, 384, 1, 1] - dtype = "float32" - min_val = float("-0.0464693") - max_val = float("0.0548612") - mean = float("-0.000100111") - std = float("0.00295451") - data = None - - -class Program_weight_tensor_parameter_43: - name = "parameter_43" - shape = [384] - dtype = "float32" - min_val = float("-0.00639402") - max_val = float("0.00421888") - mean = float("3.54671e-05") - std = float("0.00171694") - data = None - - -class Program_weight_tensor_parameter_44: - name = "parameter_44" - shape = [384, 384, 1, 1] - dtype = "float32" - min_val = float("-0.00654664") - max_val = float("0.00763939") - mean = float("-4.30022e-06") - std = float("0.000719087") - data = None - - -class Program_weight_tensor_parameter_45: - name = "parameter_45" - shape = [10] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_46: - name = "parameter_46" - shape = [10, 384, 3, 3] - dtype = "float32" - min_val = float("-0.0276653") - max_val = float("0.0223204") - mean = float("-0.000443044") - std = float("0.00213442") - data = None - - -class Program_weight_tensor_parameter_47: - name = "parameter_47" - shape = [384] - dtype = "float32" - min_val = float("-0.409858") - max_val = float("0.610632") - mean = float("0.212569") - std = float("0.109879") - data = None - - -class Program_weight_tensor_parameter_48: - name = "parameter_48" - shape = [384] - dtype = "float32" - min_val = float("1.05961") - max_val = float("1.46695") - mean = float("1.20196") - std = float("0.0657315") - data = None - - -class Program_weight_tensor_parameter_49: - name = "parameter_49" - shape = [384] - dtype = "float32" - min_val = float("7.46256e-05") - max_val = float("0.0054246") - mean = float("0.000793045") - std = float("0.00065768") - data = None - - -class Program_weight_tensor_parameter_50: - name = "parameter_50" - shape = [384] - dtype = "float32" - min_val = float("-0.0807872") - max_val = float("0.0680079") - mean = float("-0.0131062") - std = float("0.0222074") - data = None - - -class Program_weight_tensor_parameter_51: - name = "parameter_51" - shape = [384, 384, 1, 1] - dtype = "float32" - min_val = float("-0.0661697") - max_val = float("0.0355423") - mean = float("-0.00020161") - std = float("0.00342025") - data = None - - -class Program_weight_tensor_parameter_52: - name = "parameter_52" - shape = [384] - dtype = "float32" - min_val = float("-0.00360413") - max_val = float("0.00538468") - mean = float("-8.84011e-05") - std = float("0.000952451") - data = None - - -class Program_weight_tensor_parameter_53: - name = "parameter_53" - shape = [384, 384, 1, 1] - dtype = "float32" - min_val = float("-0.0169924") - max_val = float("0.0292524") - mean = float("-2.04535e-05") - std = float("0.000783345") - data = None diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-S/subgraph_9/shape_patches_PP-YOLOE_plus_SOD-largesize-L/input_meta.py b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-S/subgraph_9/shape_patches_PP-YOLOE_plus_SOD-largesize-L/input_meta.py new file mode 100644 index 000000000..86f4e8443 --- /dev/null +++ b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-S/subgraph_9/shape_patches_PP-YOLOE_plus_SOD-largesize-L/input_meta.py @@ -0,0 +1,7 @@ +class Program_weight_tensor_data_0: + name = "data_0" + shape = [1, 24276] + dtype = "int32" + min_val = 0 + max_val = 10 + data = None diff --git a/paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_9/weight_meta.py b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-S/subgraph_9/shape_patches_PP-YOLOE_plus_SOD-largesize-L/weight_meta.py similarity index 100% rename from paddle_samples/PaddleX/PP-YOLOE-S_vehicle/subgraph_9/weight_meta.py rename to paddle_samples/PaddleX/PP-YOLOE_plus_SOD-S/subgraph_9/shape_patches_PP-YOLOE_plus_SOD-largesize-L/weight_meta.py diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_0/graph_hash.txt b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_0/graph_hash.txt index 82d83ca0b..3060b8e66 100644 --- a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_0/graph_hash.txt +++ b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_0/graph_hash.txt @@ -1 +1 @@ -2e1652dec5c10dbfbb766b39e6dd1a2a376f1e03061d76cd66a79bf79d0616f3 \ No newline at end of file +cfd03838c2e98747bf694d5196a2c44d4f61ead673e0d40c68b441b98f84ef34 \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_0/input_meta.py b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_0/input_meta.py index bf83235e8..6b8c591d4 100644 --- a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_0/input_meta.py +++ b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_0/input_meta.py @@ -1,19 +1,119 @@ class Program_weight_tensor_data_0: name = "data_0" - shape = [] + shape = [1024, 3072] dtype = "float32" - data = [0.136913] + min_val = float("-0.0337707") + max_val = float("0.03429") + mean = float("-1.71998e-05") + std = float("0.0182992") + data = None class Program_weight_tensor_data_1: name = "data_1" - shape = [] + shape = [3072] dtype = "float32" - data = [0.574719] + min_val = float("-0.000858163") + max_val = float("0.000895478") + mean = float("1.43758e-06") + std = float("0.000180847") + data = None class Program_weight_tensor_data_2: name = "data_2" - shape = [] + shape = [1024, 3072] dtype = "float32" - data = [0.566784] + min_val = float("-0.0324395") + max_val = float("0.0323104") + mean = float("-1.57215e-05") + std = float("0.0182981") + data = None + + +class Program_weight_tensor_data_3: + name = "data_3" + shape = [3072] + dtype = "float32" + min_val = float("-0.000630245") + max_val = float("0.000514232") + mean = float("2.75956e-06") + std = float("0.000126901") + data = None + + +class Program_weight_tensor_data_4: + name = "data_4" + shape = [1024, 3072] + dtype = "float32" + min_val = float("-0.0321875") + max_val = float("0.0321786") + mean = float("-1.59553e-05") + std = float("0.0182974") + data = None + + +class Program_weight_tensor_data_5: + name = "data_5" + shape = [3072] + dtype = "float32" + min_val = float("-0.000429817") + max_val = float("0.000427089") + mean = float("1.59167e-06") + std = float("8.84515e-05") + data = None + + +class Program_weight_tensor_data_6: + name = "data_6" + shape = [1024, 3072] + dtype = "float32" + min_val = float("-0.0321313") + max_val = float("0.0321203") + mean = float("-1.62062e-05") + std = float("0.018297") + data = None + + +class Program_weight_tensor_data_7: + name = "data_7" + shape = [3072] + dtype = "float32" + min_val = float("-0.000397465") + max_val = float("0.000488945") + mean = float("1.04525e-06") + std = float("8.30003e-05") + data = None + + +class Program_weight_tensor_data_8: + name = "data_8" + shape = [1, 256, 240, 240] + dtype = "float32" + min_val = float("-0.278465") + max_val = float("12.296") + mean = float("-0.0693066") + std = float("0.362327") + data = None + + +class Program_weight_tensor_data_9: + name = "data_9" + shape = [1, 512, 120, 120] + dtype = "float32" + min_val = float("-0.278465") + max_val = float("10.8892") + mean = float("-0.0919699") + std = float("0.352598") + data = None + + +class Program_weight_tensor_data_10: + name = "data_10" + shape = [1, 1024, 60, 60] + dtype = "float32" + min_val = float("-0.278465") + max_val = float("28.4265") + mean = float("0.296413") + std = float("1.17347") + data = None diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_0/model.py b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_0/model.py index 4cccb2b8e..2377a8523 100644 --- a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_0/model.py +++ b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_0/model.py @@ -5,39 +5,4853 @@ class GraphModule(paddle.nn.Layer): def __init__(self): super().__init__() - def forward(self, data_0, data_1, data_2): - # pd_op.full: (1xf32) <- () + def forward( + self, + parameter_0, + parameter_1, + parameter_2, + parameter_3, + parameter_4, + parameter_5, + parameter_6, + parameter_7, + parameter_8, + parameter_9, + parameter_10, + parameter_11, + parameter_12, + parameter_13, + parameter_14, + parameter_15, + parameter_16, + parameter_17, + parameter_18, + parameter_19, + parameter_20, + parameter_21, + parameter_22, + parameter_23, + parameter_24, + parameter_25, + parameter_26, + parameter_27, + parameter_28, + parameter_29, + parameter_30, + parameter_31, + parameter_32, + parameter_33, + parameter_34, + parameter_35, + parameter_36, + parameter_37, + parameter_38, + parameter_39, + parameter_40, + parameter_41, + parameter_42, + parameter_43, + parameter_44, + parameter_45, + parameter_46, + parameter_47, + parameter_48, + parameter_49, + parameter_50, + parameter_51, + parameter_52, + parameter_53, + parameter_54, + parameter_55, + parameter_56, + parameter_57, + parameter_58, + parameter_59, + parameter_60, + parameter_61, + parameter_62, + parameter_63, + parameter_64, + parameter_65, + parameter_66, + parameter_67, + parameter_68, + parameter_69, + parameter_70, + parameter_71, + parameter_72, + parameter_73, + parameter_74, + parameter_75, + parameter_76, + parameter_77, + parameter_78, + parameter_79, + parameter_80, + parameter_81, + parameter_82, + parameter_83, + parameter_84, + parameter_85, + parameter_86, + parameter_87, + parameter_88, + parameter_89, + parameter_90, + parameter_91, + parameter_92, + parameter_93, + parameter_94, + parameter_95, + parameter_96, + parameter_97, + parameter_98, + parameter_99, + parameter_100, + parameter_101, + parameter_102, + parameter_103, + parameter_104, + parameter_105, + parameter_106, + parameter_107, + parameter_108, + parameter_109, + parameter_110, + parameter_111, + parameter_112, + parameter_113, + parameter_114, + parameter_115, + parameter_116, + parameter_117, + parameter_118, + parameter_119, + parameter_120, + parameter_121, + parameter_122, + parameter_123, + parameter_124, + parameter_125, + parameter_126, + parameter_127, + parameter_128, + parameter_129, + parameter_130, + parameter_131, + parameter_132, + parameter_133, + parameter_134, + parameter_135, + parameter_136, + parameter_137, + parameter_138, + parameter_139, + parameter_140, + parameter_141, + parameter_142, + parameter_143, + parameter_144, + parameter_145, + parameter_146, + parameter_147, + parameter_148, + parameter_149, + parameter_150, + parameter_151, + parameter_152, + parameter_153, + parameter_154, + parameter_155, + parameter_156, + parameter_157, + parameter_158, + parameter_159, + parameter_160, + parameter_161, + parameter_162, + parameter_163, + parameter_164, + parameter_165, + parameter_166, + parameter_167, + parameter_168, + parameter_169, + parameter_170, + parameter_171, + parameter_172, + parameter_173, + parameter_174, + parameter_175, + parameter_176, + parameter_177, + parameter_178, + parameter_179, + parameter_180, + parameter_181, + parameter_182, + parameter_183, + parameter_184, + parameter_185, + parameter_186, + parameter_187, + parameter_188, + parameter_189, + parameter_190, + parameter_191, + parameter_192, + parameter_193, + parameter_194, + parameter_195, + parameter_196, + parameter_197, + parameter_198, + parameter_199, + parameter_200, + parameter_201, + parameter_202, + parameter_203, + parameter_204, + parameter_205, + parameter_206, + parameter_207, + parameter_208, + parameter_209, + parameter_210, + parameter_211, + parameter_212, + parameter_213, + parameter_214, + parameter_215, + parameter_216, + parameter_217, + parameter_218, + parameter_219, + parameter_220, + parameter_221, + parameter_222, + parameter_223, + parameter_224, + parameter_225, + parameter_226, + parameter_227, + parameter_228, + parameter_229, + parameter_230, + parameter_231, + parameter_232, + parameter_233, + parameter_234, + parameter_235, + parameter_236, + parameter_237, + parameter_238, + parameter_239, + parameter_240, + parameter_241, + parameter_242, + parameter_243, + parameter_244, + parameter_245, + parameter_246, + parameter_247, + parameter_248, + parameter_249, + parameter_250, + parameter_251, + parameter_252, + parameter_253, + parameter_254, + parameter_255, + parameter_256, + parameter_257, + parameter_258, + parameter_259, + parameter_260, + parameter_261, + parameter_262, + parameter_263, + parameter_264, + parameter_265, + parameter_266, + parameter_267, + parameter_268, + parameter_269, + parameter_270, + parameter_271, + parameter_272, + parameter_273, + parameter_274, + parameter_275, + parameter_276, + parameter_277, + parameter_278, + parameter_279, + parameter_280, + parameter_281, + parameter_282, + parameter_283, + parameter_284, + parameter_285, + parameter_286, + parameter_287, + parameter_288, + parameter_289, + parameter_290, + parameter_291, + parameter_292, + parameter_293, + parameter_294, + parameter_295, + parameter_296, + parameter_297, + parameter_298, + parameter_299, + parameter_300, + parameter_301, + parameter_302, + parameter_303, + parameter_304, + parameter_305, + parameter_306, + parameter_307, + parameter_308, + parameter_309, + parameter_310, + parameter_311, + parameter_312, + parameter_313, + parameter_314, + parameter_315, + parameter_316, + parameter_317, + parameter_318, + parameter_319, + parameter_320, + parameter_321, + parameter_322, + parameter_323, + parameter_324, + parameter_325, + parameter_326, + parameter_327, + parameter_328, + parameter_329, + parameter_330, + parameter_331, + parameter_332, + parameter_333, + parameter_334, + parameter_335, + parameter_336, + parameter_337, + parameter_338, + parameter_339, + parameter_340, + parameter_341, + parameter_342, + parameter_343, + parameter_344, + parameter_345, + parameter_346, + parameter_347, + parameter_348, + parameter_349, + parameter_350, + parameter_351, + parameter_352, + parameter_353, + parameter_354, + parameter_355, + parameter_356, + parameter_357, + parameter_358, + parameter_359, + parameter_360, + parameter_361, + parameter_362, + parameter_363, + parameter_364, + data_0, + data_1, + data_2, + data_3, + data_4, + data_5, + data_6, + data_7, + data_8, + data_9, + data_10, + ): + # pd_op.flatten: (1x1024x3600xf32) <- (1x1024x60x60xf32) + flatten_0 = paddle._C_ops.flatten(data_10, 2, 3) + del data_10 + + # pd_op.transpose: (1x3600x1024xf32) <- (1x1024x3600xf32) + transpose_0 = paddle._C_ops.transpose(flatten_0, [0, 2, 1]) + del flatten_0 + + # pd_op.full: (1xf64) <- () full_0 = paddle._C_ops.full( - [1], float("1"), paddle.float32, paddle.core.CPUPlace() + [1], float("0"), paddle.float64, paddle.core.CPUPlace() ) - # pd_op.scale: (xf32) <- (xf32, 1xf32) - scale_0 = paddle._C_ops.scale(data_2, full_0, float("0"), True) - del data_2 + # pd_op.full: (1xf64) <- () + full_1 = paddle._C_ops.full( + [1], float("60"), paddle.float64, paddle.core.CPUPlace() + ) + + # pd_op.full: (1xf64) <- () + full_2 = paddle._C_ops.full( + [1], float("1"), paddle.float64, paddle.core.CPUPlace() + ) + + # pd_op.arange: (60xf32) <- (1xf64, 1xf64, 1xf64) + arange_0 = paddle.arange(full_0, full_1, full_2, dtype="float32") + del full_1 + + # builtin.combine: ([60xf32, 60xf32]) <- (60xf32, 60xf32) + combine_0 = [arange_0, arange_0] + del arange_0 + + # pd_op.meshgrid: ([60x60xf32, 60x60xf32]) <- ([60xf32, 60xf32]) + meshgrid_0 = paddle._C_ops.meshgrid(combine_0) + del combine_0 + + # builtin.split: (60x60xf32, 60x60xf32) <- ([60x60xf32, 60x60xf32]) + ( + split_0, + split_1, + ) = meshgrid_0 + del meshgrid_0 + + # pd_op.full: (1xf64) <- () + full_3 = paddle._C_ops.full( + [1], float("256"), paddle.float64, paddle.core.CPUPlace() + ) + + # pd_op.arange: (256xf32) <- (1xf64, 1xf64, 1xf64) + arange_1 = paddle.arange(full_0, full_3, full_2, dtype="float32") + del full_0, full_2, full_3 # pd_op.full: (1xf32) <- () - full_1 = paddle._C_ops.full( - [1], float("2.5"), paddle.float32, paddle.core.CPUPlace() + full_4 = paddle._C_ops.full( + [1], float("0.00390625"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.scale: (256xf32) <- (256xf32, 1xf32) + scale_0 = paddle._C_ops.scale(arange_1, full_4, float("0"), True) + del arange_1, full_4 + + # pd_op.full: (256xf32) <- () + full_5 = paddle._C_ops.full( + [256], + float("10000"), + paddle.float32, + paddle.framework._current_expected_place(), + ) + + # pd_op.elementwise_pow: (256xf32) <- (256xf32, 256xf32) + elementwise_pow_0 = paddle._C_ops.elementwise_pow(full_5, scale_0) + del full_5, scale_0 + + # pd_op.full: (256xf32) <- () + full_6 = paddle._C_ops.full( + [256], + float("1"), + paddle.float32, + paddle.framework._current_expected_place(), + ) + + # pd_op.divide: (256xf32) <- (256xf32, 256xf32) + divide_0 = paddle._C_ops.divide(full_6, elementwise_pow_0) + del elementwise_pow_0, full_6 + + # pd_op.flatten: (3600xf32) <- (60x60xf32) + flatten_1 = paddle._C_ops.flatten(split_0, 0, 1) + del split_0 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_0 = [1] + + # pd_op.unsqueeze: (3600x1xf32) <- (3600xf32, 1xi64) + unsqueeze_0 = paddle._C_ops.unsqueeze(flatten_1, full_int_array_0) + del flatten_1 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_1 = [0] + + # pd_op.assign: (1xi64) <- (1xi64) + assign_0 = full_int_array_1 + + # pd_op.assign: (1xi64) <- (1xi64) + assign_1 = full_int_array_1 + + # pd_op.assign: (1xi64) <- (1xi64) + assign_2 = full_int_array_1 + + # pd_op.assign: (1xi64) <- (1xi64) + assign_3 = full_int_array_1 + + # pd_op.assign: (1xi64) <- (1xi64) + assign_4 = full_int_array_1 + + # pd_op.assign: (1xi64) <- (1xi64) + assign_5 = full_int_array_1 + + # pd_op.assign: (1xi64) <- (1xi64) + assign_6 = full_int_array_1 + + # pd_op.assign: (1xi64) <- (1xi64) + assign_7 = full_int_array_1 + + # pd_op.unsqueeze: (1x256xf32) <- (256xf32, 1xi64) + unsqueeze_1 = paddle._C_ops.unsqueeze(divide_0, full_int_array_1) + del divide_0 + + # pd_op.matmul: (3600x256xf32) <- (3600x1xf32, 1x256xf32) + matmul_0 = paddle._C_ops.matmul(unsqueeze_0, unsqueeze_1, False, False) + del unsqueeze_0 + + # pd_op.flatten: (3600xf32) <- (60x60xf32) + flatten_2 = paddle._C_ops.flatten(split_1, 0, 1) + del split_1 + + # pd_op.unsqueeze: (3600x1xf32) <- (3600xf32, 1xi64) + unsqueeze_2 = paddle._C_ops.unsqueeze(flatten_2, full_int_array_0) + del flatten_2, full_int_array_0 + + # pd_op.matmul: (3600x256xf32) <- (3600x1xf32, 1x256xf32) + matmul_1 = paddle._C_ops.matmul(unsqueeze_2, unsqueeze_1, False, False) + del unsqueeze_1, unsqueeze_2 + + # pd_op.sin: (3600x256xf32) <- (3600x256xf32) + sin_0 = paddle._C_ops.sin(matmul_0) + + # pd_op.cos: (3600x256xf32) <- (3600x256xf32) + cos_0 = paddle._C_ops.cos(matmul_0) + del matmul_0 + + # pd_op.sin: (3600x256xf32) <- (3600x256xf32) + sin_1 = paddle._C_ops.sin(matmul_1) + + # pd_op.cos: (3600x256xf32) <- (3600x256xf32) + cos_1 = paddle._C_ops.cos(matmul_1) + del matmul_1 + + # pd_op.full: (1xi32) <- () + full_7 = paddle._C_ops.full( + [1], float("1"), paddle.int32, paddle.core.CPUPlace() + ) + + # pd_op.assign: (1xi32) <- (1xi32) + assign_8 = full_7 + + # pd_op.assign: (1xi32) <- (1xi32) + assign_9 = full_7 + + # pd_op.assign: (1xi32) <- (1xi32) + assign_10 = full_7 + + # pd_op.assign: (1xi32) <- (1xi32) + assign_11 = full_7 + + # pd_op.assign: (1xi32) <- (1xi32) + assign_12 = full_7 + + # pd_op.assign: (1xi32) <- (1xi32) + assign_13 = full_7 + + # pd_op.assign: (1xi32) <- (1xi32) + assign_14 = full_7 + + # pd_op.assign: (1xi32) <- (1xi32) + assign_15 = full_7 + + # pd_op.assign: (1xi32) <- (1xi32) + assign_16 = full_7 + + # pd_op.assign: (1xi32) <- (1xi32) + assign_17 = full_7 + + # builtin.combine: ([3600x256xf32, 3600x256xf32, 3600x256xf32, 3600x256xf32]) <- (3600x256xf32, 3600x256xf32, 3600x256xf32, 3600x256xf32) + combine_1 = [sin_0, cos_0, sin_1, cos_1] + del cos_0, cos_1, sin_0, sin_1 + + # pd_op.concat: (3600x1024xf32) <- ([3600x256xf32, 3600x256xf32, 3600x256xf32, 3600x256xf32], 1xi32) + concat_0 = paddle._C_ops.concat(combine_1, full_7) + del combine_1 + + # pd_op.unsqueeze: (1x3600x1024xf32) <- (3600x1024xf32, 1xi64) + unsqueeze_3 = paddle._C_ops.unsqueeze(concat_0, full_int_array_1) + del concat_0 + + # pd_op.add: (1x3600x1024xf32) <- (1x3600x1024xf32, 1x3600x1024xf32) + add_0 = paddle._C_ops.add(transpose_0, unsqueeze_3) + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_2 = [1024] + + # pd_op.assign: (1xi64) <- (1xi64) + assign_18 = full_int_array_2 + + # pd_op.assign: (1xi64) <- (1xi64) + assign_19 = full_int_array_2 + + # pd_op.assign: (1xi64) <- (1xi64) + assign_20 = full_int_array_2 + + # pd_op.assign: (1xi64) <- (1xi64) + assign_21 = full_int_array_2 + + # pd_op.assign: (1xi64) <- (1xi64) + assign_22 = full_int_array_2 + + # pd_op.assign: (1xi64) <- (1xi64) + assign_23 = full_int_array_2 + + # pd_op.assign: (1xi64) <- (1xi64) + assign_24 = full_int_array_2 + + # pd_op.assign: (1xi64) <- (1xi64) + assign_25 = full_int_array_2 + + # pd_op.assign: (1xi64) <- (1xi64) + assign_26 = full_int_array_2 + + # pd_op.assign: (1xi64) <- (1xi64) + assign_27 = full_int_array_2 + + # pd_op.assign: (1xi64) <- (1xi64) + assign_28 = full_int_array_2 + + # pd_op.assign: (1xi64) <- (1xi64) + assign_29 = full_int_array_2 + + # pd_op.assign: (1xi64) <- (1xi64) + assign_30 = full_int_array_2 + + # pd_op.assign: (1xi64) <- (1xi64) + assign_31 = full_int_array_2 + + # pd_op.assign: (1xi64) <- (1xi64) + assign_32 = full_int_array_2 + + # pd_op.slice: (1024x1024xf32) <- (1024x3072xf32, 1xi64, 1xi64) + slice_0 = paddle._C_ops.slice( + data_0, [1], full_int_array_1, full_int_array_2, [1], [] + ) + + # pd_op.slice: (1024xf32) <- (3072xf32, 1xi64, 1xi64) + slice_1 = paddle._C_ops.slice( + data_1, [0], full_int_array_1, full_int_array_2, [1], [] + ) + + # pd_op.matmul: (1x3600x1024xf32) <- (1x3600x1024xf32, 1024x1024xf32) + matmul_2 = paddle._C_ops.matmul(add_0, slice_0, False, False) + + # pd_op.add: (1x3600x1024xf32) <- (1x3600x1024xf32, 1024xf32) + add_1 = paddle._C_ops.add(matmul_2, slice_1) + + # pd_op.full_int_array: (4xi64) <- () + full_int_array_3 = [0, 0, 4, 256] + + # pd_op.reshape: (1x3600x4x256xf32) <- (1x3600x1024xf32, 4xi64) + reshape_0 = paddle._C_ops.reshape(add_1, full_int_array_3) + + # pd_op.transpose: (1x4x3600x256xf32) <- (1x3600x4x256xf32) + transpose_1 = paddle._C_ops.transpose(reshape_0, [0, 2, 1, 3]) + del reshape_0 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_4 = [2048] + + # pd_op.assign: (1xi64) <- (1xi64) + assign_33 = full_int_array_4 + + # pd_op.assign: (1xi64) <- (1xi64) + assign_34 = full_int_array_4 + + # pd_op.assign: (1xi64) <- (1xi64) + assign_35 = full_int_array_4 + + # pd_op.assign: (1xi64) <- (1xi64) + assign_36 = full_int_array_4 + + # pd_op.assign: (1xi64) <- (1xi64) + assign_37 = full_int_array_4 + + # pd_op.assign: (1xi64) <- (1xi64) + assign_38 = full_int_array_4 + + # pd_op.assign: (1xi64) <- (1xi64) + assign_39 = full_int_array_4 + + # pd_op.assign: (1xi64) <- (1xi64) + assign_40 = full_int_array_4 + + # pd_op.assign: (1xi64) <- (1xi64) + assign_41 = full_int_array_4 + + # pd_op.assign: (1xi64) <- (1xi64) + assign_42 = full_int_array_4 + + # pd_op.assign: (1xi64) <- (1xi64) + assign_43 = full_int_array_4 + + # pd_op.assign: (1xi64) <- (1xi64) + assign_44 = full_int_array_4 + + # pd_op.assign: (1xi64) <- (1xi64) + assign_45 = full_int_array_4 + + # pd_op.assign: (1xi64) <- (1xi64) + assign_46 = full_int_array_4 + + # pd_op.assign: (1xi64) <- (1xi64) + assign_47 = full_int_array_4 + + # pd_op.slice: (1024x1024xf32) <- (1024x3072xf32, 1xi64, 1xi64) + slice_2 = paddle._C_ops.slice( + data_0, [1], full_int_array_2, full_int_array_4, [1], [] ) - # pd_op.scale: (xf32) <- (xf32, 1xf32) - scale_1 = paddle._C_ops.scale(data_0, full_1, float("0"), True) + # pd_op.slice: (1024xf32) <- (3072xf32, 1xi64, 1xi64) + slice_3 = paddle._C_ops.slice( + data_1, [0], full_int_array_2, full_int_array_4, [1], [] + ) + + # pd_op.matmul: (1x3600x1024xf32) <- (1x3600x1024xf32, 1024x1024xf32) + matmul_3 = paddle._C_ops.matmul(add_0, slice_2, False, False) + + # pd_op.add: (1x3600x1024xf32) <- (1x3600x1024xf32, 1024xf32) + add_2 = paddle._C_ops.add(matmul_3, slice_3) + + # pd_op.reshape: (1x3600x4x256xf32) <- (1x3600x1024xf32, 4xi64) + reshape_1 = paddle._C_ops.reshape(add_2, full_int_array_3) + + # pd_op.transpose: (1x4x3600x256xf32) <- (1x3600x4x256xf32) + transpose_2 = paddle._C_ops.transpose(reshape_1, [0, 2, 1, 3]) + del reshape_1 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_5 = [2147483647] + + # pd_op.assign: (1xi64) <- (1xi64) + assign_48 = full_int_array_5 + + # pd_op.assign: (1xi64) <- (1xi64) + assign_49 = full_int_array_5 + + # pd_op.assign: (1xi64) <- (1xi64) + assign_50 = full_int_array_5 + + # pd_op.assign: (1xi64) <- (1xi64) + assign_51 = full_int_array_5 + + # pd_op.assign: (1xi64) <- (1xi64) + assign_52 = full_int_array_5 + + # pd_op.assign: (1xi64) <- (1xi64) + assign_53 = full_int_array_5 + + # pd_op.assign: (1xi64) <- (1xi64) + assign_54 = full_int_array_5 + + # pd_op.slice: (1024x1024xf32) <- (1024x3072xf32, 1xi64, 1xi64) + slice_4 = paddle._C_ops.slice( + data_0, [1], full_int_array_4, full_int_array_5, [1], [] + ) del data_0 - # pd_op.add: (xf32) <- (xf32, xf32) - add_1 = paddle._C_ops.add(scale_0, scale_1) + # pd_op.slice: (1024xf32) <- (3072xf32, 1xi64, 1xi64) + slice_5 = paddle._C_ops.slice( + data_1, [0], full_int_array_4, full_int_array_5, [1], [] + ) + del data_1 + + # pd_op.matmul: (1x3600x1024xf32) <- (1x3600x1024xf32, 1024x1024xf32) + matmul_4 = paddle._C_ops.matmul(transpose_0, slice_4, False, False) + + # pd_op.add: (1x3600x1024xf32) <- (1x3600x1024xf32, 1024xf32) + add_3 = paddle._C_ops.add(matmul_4, slice_5) + + # pd_op.reshape: (1x3600x4x256xf32) <- (1x3600x1024xf32, 4xi64) + reshape_2 = paddle._C_ops.reshape(add_3, full_int_array_3) + + # pd_op.transpose: (1x4x3600x256xf32) <- (1x3600x4x256xf32) + transpose_3 = paddle._C_ops.transpose(reshape_2, [0, 2, 1, 3]) + del reshape_2 + + # pd_op.matmul: (1x4x3600x3600xf32) <- (1x4x3600x256xf32, 1x4x3600x256xf32) + matmul_5 = paddle._C_ops.matmul(transpose_1, transpose_2, False, True) # pd_op.full: (1xf32) <- () - full_2 = paddle._C_ops.full( - [1], float("0.5"), paddle.float32, paddle.core.CPUPlace() + full_8 = paddle._C_ops.full( + [1], float("0.0625"), paddle.float32, paddle.core.CPUPlace() ) - # pd_op.scale: (xf32) <- (xf32, 1xf32) - scale_2 = paddle._C_ops.scale(data_1, full_2, float("0"), True) - del data_1 + # pd_op.assign: (1xf32) <- (1xf32) + assign_55 = full_8 + + # pd_op.assign: (1xf32) <- (1xf32) + assign_56 = full_8 + + # pd_op.assign: (1xf32) <- (1xf32) + assign_57 = full_8 + + # pd_op.scale: (1x4x3600x3600xf32) <- (1x4x3600x3600xf32, 1xf32) + scale_1 = paddle._C_ops.scale(matmul_5, full_8, float("0"), True) + del matmul_5 + + # pd_op.softmax: (1x4x3600x3600xf32) <- (1x4x3600x3600xf32) + softmax_0 = paddle._C_ops.softmax(scale_1, -1) + del scale_1 + + # pd_op.full: (1xf32) <- () + full_9 = paddle._C_ops.full( + [1], float("0.1"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.assign: (1xf32) <- (1xf32) + assign_58 = full_9 + + # pd_op.assign: (1xf32) <- (1xf32) + assign_59 = full_9 + + # pd_op.assign: (1xf32) <- (1xf32) + assign_60 = full_9 + + # pd_op.assign: (1xf32) <- (1xf32) + assign_61 = full_9 + + # pd_op.assign: (1xf32) <- (1xf32) + assign_62 = full_9 + + # pd_op.assign: (1xf32) <- (1xf32) + assign_63 = full_9 + + # pd_op.assign: (1xf32) <- (1xf32) + assign_64 = full_9 + + # pd_op.assign: (1xf32) <- (1xf32) + assign_65 = full_9 + + # pd_op.assign: (1xf32) <- (1xf32) + assign_66 = full_9 + + # pd_op.assign: (1xf32) <- (1xf32) + assign_67 = full_9 + + # pd_op.assign: (1xf32) <- (1xf32) + assign_68 = full_9 + + # pd_op.assign: (1xf32) <- (1xf32) + assign_69 = full_9 + + # pd_op.assign: (1xf32) <- (1xf32) + assign_70 = full_9 + + # pd_op.assign: (1xf32) <- (1xf32) + assign_71 = full_9 + + # pd_op.assign: (1xf32) <- (1xf32) + assign_72 = full_9 + + # pd_op.dropout: (1x4x3600x3600xf32, 1x4x3600x3600xui8) <- (1x4x3600x3600xf32, None, 1xf32) + dropout_0, dropout_1 = (lambda x, f: f(x))( + paddle._C_ops.dropout( + softmax_0, None, full_9, False, "upscale_in_train", 0, False + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None), + ) + + # pd_op.matmul: (1x4x3600x256xf32) <- (1x4x3600x3600xf32, 1x4x3600x256xf32) + matmul_6 = paddle._C_ops.matmul(dropout_0, transpose_3, False, False) + + # pd_op.transpose: (1x3600x4x256xf32) <- (1x4x3600x256xf32) + transpose_4 = paddle._C_ops.transpose(matmul_6, [0, 2, 1, 3]) + del matmul_6 + + # pd_op.full_int_array: (3xi64) <- () + full_int_array_6 = [0, 0, 1024] + + # pd_op.reshape: (1x3600x1024xf32) <- (1x3600x4x256xf32, 3xi64) + reshape_3 = paddle._C_ops.reshape(transpose_4, full_int_array_6) + + # pd_op.matmul: (1x3600x1024xf32) <- (1x3600x1024xf32, 1024x1024xf32) + matmul_7 = paddle._C_ops.matmul(reshape_3, parameter_364, False, False) + del parameter_364 + + # pd_op.add: (1x3600x1024xf32) <- (1x3600x1024xf32, 1024xf32) + add_4 = paddle._C_ops.add(matmul_7, parameter_363) + del parameter_363 + + # pd_op.dropout: (1x3600x1024xf32, 1x3600x1024xui8) <- (1x3600x1024xf32, None, 1xf32) + dropout_2, dropout_3 = (lambda x, f: f(x))( + paddle._C_ops.dropout( + add_4, None, full_9, False, "upscale_in_train", 0, False + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None), + ) + del add_4 + + # pd_op.add: (1x3600x1024xf32) <- (1x3600x1024xf32, 1x3600x1024xf32) + add_5 = paddle._C_ops.add(transpose_0, dropout_2) + + # pd_op.layer_norm: (1x3600x1024xf32, 1x3600xf32, 1x3600xf32) <- (1x3600x1024xf32, 1024xf32, 1024xf32) + layer_norm_0, layer_norm_1, layer_norm_2 = (lambda x, f: f(x))( + paddle._C_ops.layer_norm( + add_5, parameter_362, parameter_361, float("1e-05"), 2 + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None, None), + ) + del parameter_361, parameter_362 + + # pd_op.matmul: (1x3600x2048xf32) <- (1x3600x1024xf32, 1024x2048xf32) + matmul_8 = paddle._C_ops.matmul(layer_norm_0, parameter_360, False, False) + del parameter_360 + + # pd_op.add: (1x3600x2048xf32) <- (1x3600x2048xf32, 2048xf32) + add_6 = paddle._C_ops.add(matmul_8, parameter_359) + del parameter_359 + + # pd_op.gelu: (1x3600x2048xf32) <- (1x3600x2048xf32) + gelu_0 = paddle._C_ops.gelu(add_6, False) + + # pd_op.dropout: (1x3600x2048xf32, 1x3600x2048xui8) <- (1x3600x2048xf32, None, 1xf32) + dropout_4, dropout_5 = (lambda x, f: f(x))( + paddle._C_ops.dropout( + gelu_0, None, full_9, False, "upscale_in_train", 0, False + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None), + ) + del gelu_0 + + # pd_op.matmul: (1x3600x1024xf32) <- (1x3600x2048xf32, 2048x1024xf32) + matmul_9 = paddle._C_ops.matmul(dropout_4, parameter_358, False, False) + del parameter_358 + + # pd_op.add: (1x3600x1024xf32) <- (1x3600x1024xf32, 1024xf32) + add_7 = paddle._C_ops.add(matmul_9, parameter_357) + del parameter_357 + + # pd_op.dropout: (1x3600x1024xf32, 1x3600x1024xui8) <- (1x3600x1024xf32, None, 1xf32) + dropout_6, dropout_7 = (lambda x, f: f(x))( + paddle._C_ops.dropout( + add_7, None, full_9, False, "upscale_in_train", 0, False + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None), + ) + del add_7 + + # pd_op.add: (1x3600x1024xf32) <- (1x3600x1024xf32, 1x3600x1024xf32) + add_8 = paddle._C_ops.add(layer_norm_0, dropout_6) + + # pd_op.layer_norm: (1x3600x1024xf32, 1x3600xf32, 1x3600xf32) <- (1x3600x1024xf32, 1024xf32, 1024xf32) + layer_norm_3, layer_norm_4, layer_norm_5 = (lambda x, f: f(x))( + paddle._C_ops.layer_norm( + add_8, parameter_356, parameter_355, float("1e-05"), 2 + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None, None), + ) + del parameter_355, parameter_356 + + # pd_op.add: (1x3600x1024xf32) <- (1x3600x1024xf32, 1x3600x1024xf32) + add_9 = paddle._C_ops.add(layer_norm_3, unsqueeze_3) + + # pd_op.slice: (1024x1024xf32) <- (1024x3072xf32, 1xi64, 1xi64) + slice_6 = paddle._C_ops.slice( + data_2, [1], full_int_array_1, full_int_array_2, [1], [] + ) + + # pd_op.slice: (1024xf32) <- (3072xf32, 1xi64, 1xi64) + slice_7 = paddle._C_ops.slice( + data_3, [0], full_int_array_1, full_int_array_2, [1], [] + ) + + # pd_op.matmul: (1x3600x1024xf32) <- (1x3600x1024xf32, 1024x1024xf32) + matmul_10 = paddle._C_ops.matmul(add_9, slice_6, False, False) + + # pd_op.add: (1x3600x1024xf32) <- (1x3600x1024xf32, 1024xf32) + add_10 = paddle._C_ops.add(matmul_10, slice_7) + + # pd_op.reshape: (1x3600x4x256xf32) <- (1x3600x1024xf32, 4xi64) + reshape_4 = paddle._C_ops.reshape(add_10, full_int_array_3) + + # pd_op.transpose: (1x4x3600x256xf32) <- (1x3600x4x256xf32) + transpose_5 = paddle._C_ops.transpose(reshape_4, [0, 2, 1, 3]) + del reshape_4 + + # pd_op.slice: (1024x1024xf32) <- (1024x3072xf32, 1xi64, 1xi64) + slice_8 = paddle._C_ops.slice( + data_2, [1], full_int_array_2, full_int_array_4, [1], [] + ) + + # pd_op.slice: (1024xf32) <- (3072xf32, 1xi64, 1xi64) + slice_9 = paddle._C_ops.slice( + data_3, [0], full_int_array_2, full_int_array_4, [1], [] + ) + + # pd_op.matmul: (1x3600x1024xf32) <- (1x3600x1024xf32, 1024x1024xf32) + matmul_11 = paddle._C_ops.matmul(add_9, slice_8, False, False) + + # pd_op.add: (1x3600x1024xf32) <- (1x3600x1024xf32, 1024xf32) + add_11 = paddle._C_ops.add(matmul_11, slice_9) + + # pd_op.reshape: (1x3600x4x256xf32) <- (1x3600x1024xf32, 4xi64) + reshape_5 = paddle._C_ops.reshape(add_11, full_int_array_3) + + # pd_op.transpose: (1x4x3600x256xf32) <- (1x3600x4x256xf32) + transpose_6 = paddle._C_ops.transpose(reshape_5, [0, 2, 1, 3]) + del reshape_5 + + # pd_op.slice: (1024x1024xf32) <- (1024x3072xf32, 1xi64, 1xi64) + slice_10 = paddle._C_ops.slice( + data_2, [1], full_int_array_4, full_int_array_5, [1], [] + ) + del data_2 + + # pd_op.slice: (1024xf32) <- (3072xf32, 1xi64, 1xi64) + slice_11 = paddle._C_ops.slice( + data_3, [0], full_int_array_4, full_int_array_5, [1], [] + ) + del data_3 + + # pd_op.matmul: (1x3600x1024xf32) <- (1x3600x1024xf32, 1024x1024xf32) + matmul_12 = paddle._C_ops.matmul(layer_norm_3, slice_10, False, False) + + # pd_op.add: (1x3600x1024xf32) <- (1x3600x1024xf32, 1024xf32) + add_12 = paddle._C_ops.add(matmul_12, slice_11) + + # pd_op.reshape: (1x3600x4x256xf32) <- (1x3600x1024xf32, 4xi64) + reshape_6 = paddle._C_ops.reshape(add_12, full_int_array_3) + + # pd_op.transpose: (1x4x3600x256xf32) <- (1x3600x4x256xf32) + transpose_7 = paddle._C_ops.transpose(reshape_6, [0, 2, 1, 3]) + del reshape_6 + + # pd_op.matmul: (1x4x3600x3600xf32) <- (1x4x3600x256xf32, 1x4x3600x256xf32) + matmul_13 = paddle._C_ops.matmul(transpose_5, transpose_6, False, True) + + # pd_op.scale: (1x4x3600x3600xf32) <- (1x4x3600x3600xf32, 1xf32) + scale_2 = paddle._C_ops.scale(matmul_13, full_8, float("0"), True) + del matmul_13 + + # pd_op.softmax: (1x4x3600x3600xf32) <- (1x4x3600x3600xf32) + softmax_1 = paddle._C_ops.softmax(scale_2, -1) + del scale_2 + + # pd_op.dropout: (1x4x3600x3600xf32, 1x4x3600x3600xui8) <- (1x4x3600x3600xf32, None, 1xf32) + dropout_8, dropout_9 = (lambda x, f: f(x))( + paddle._C_ops.dropout( + softmax_1, None, full_9, False, "upscale_in_train", 0, False + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None), + ) + + # pd_op.matmul: (1x4x3600x256xf32) <- (1x4x3600x3600xf32, 1x4x3600x256xf32) + matmul_14 = paddle._C_ops.matmul(dropout_8, transpose_7, False, False) + + # pd_op.transpose: (1x3600x4x256xf32) <- (1x4x3600x256xf32) + transpose_8 = paddle._C_ops.transpose(matmul_14, [0, 2, 1, 3]) + del matmul_14 + + # pd_op.reshape: (1x3600x1024xf32) <- (1x3600x4x256xf32, 3xi64) + reshape_7 = paddle._C_ops.reshape(transpose_8, full_int_array_6) + + # pd_op.matmul: (1x3600x1024xf32) <- (1x3600x1024xf32, 1024x1024xf32) + matmul_15 = paddle._C_ops.matmul(reshape_7, parameter_354, False, False) + del parameter_354 + + # pd_op.add: (1x3600x1024xf32) <- (1x3600x1024xf32, 1024xf32) + add_13 = paddle._C_ops.add(matmul_15, parameter_353) + del parameter_353 + + # pd_op.dropout: (1x3600x1024xf32, 1x3600x1024xui8) <- (1x3600x1024xf32, None, 1xf32) + dropout_10, dropout_11 = (lambda x, f: f(x))( + paddle._C_ops.dropout( + add_13, None, full_9, False, "upscale_in_train", 0, False + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None), + ) + del add_13 + + # pd_op.add: (1x3600x1024xf32) <- (1x3600x1024xf32, 1x3600x1024xf32) + add_14 = paddle._C_ops.add(layer_norm_3, dropout_10) + + # pd_op.layer_norm: (1x3600x1024xf32, 1x3600xf32, 1x3600xf32) <- (1x3600x1024xf32, 1024xf32, 1024xf32) + layer_norm_6, layer_norm_7, layer_norm_8 = (lambda x, f: f(x))( + paddle._C_ops.layer_norm( + add_14, parameter_352, parameter_351, float("1e-05"), 2 + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None, None), + ) + del parameter_351, parameter_352 + + # pd_op.matmul: (1x3600x2048xf32) <- (1x3600x1024xf32, 1024x2048xf32) + matmul_16 = paddle._C_ops.matmul(layer_norm_6, parameter_350, False, False) + del parameter_350 + + # pd_op.add: (1x3600x2048xf32) <- (1x3600x2048xf32, 2048xf32) + add_15 = paddle._C_ops.add(matmul_16, parameter_349) + del parameter_349 + + # pd_op.gelu: (1x3600x2048xf32) <- (1x3600x2048xf32) + gelu_1 = paddle._C_ops.gelu(add_15, False) + + # pd_op.dropout: (1x3600x2048xf32, 1x3600x2048xui8) <- (1x3600x2048xf32, None, 1xf32) + dropout_12, dropout_13 = (lambda x, f: f(x))( + paddle._C_ops.dropout( + gelu_1, None, full_9, False, "upscale_in_train", 0, False + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None), + ) + del gelu_1 + + # pd_op.matmul: (1x3600x1024xf32) <- (1x3600x2048xf32, 2048x1024xf32) + matmul_17 = paddle._C_ops.matmul(dropout_12, parameter_348, False, False) + del parameter_348 + + # pd_op.add: (1x3600x1024xf32) <- (1x3600x1024xf32, 1024xf32) + add_16 = paddle._C_ops.add(matmul_17, parameter_347) + del parameter_347 + + # pd_op.dropout: (1x3600x1024xf32, 1x3600x1024xui8) <- (1x3600x1024xf32, None, 1xf32) + dropout_14, dropout_15 = (lambda x, f: f(x))( + paddle._C_ops.dropout( + add_16, None, full_9, False, "upscale_in_train", 0, False + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None), + ) + del add_16 + + # pd_op.add: (1x3600x1024xf32) <- (1x3600x1024xf32, 1x3600x1024xf32) + add_17 = paddle._C_ops.add(layer_norm_6, dropout_14) + + # pd_op.layer_norm: (1x3600x1024xf32, 1x3600xf32, 1x3600xf32) <- (1x3600x1024xf32, 1024xf32, 1024xf32) + layer_norm_9, layer_norm_10, layer_norm_11 = (lambda x, f: f(x))( + paddle._C_ops.layer_norm( + add_17, parameter_346, parameter_345, float("1e-05"), 2 + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None, None), + ) + del parameter_345, parameter_346 + + # pd_op.add: (1x3600x1024xf32) <- (1x3600x1024xf32, 1x3600x1024xf32) + add_18 = paddle._C_ops.add(layer_norm_9, unsqueeze_3) + + # pd_op.slice: (1024x1024xf32) <- (1024x3072xf32, 1xi64, 1xi64) + slice_12 = paddle._C_ops.slice( + data_4, [1], full_int_array_1, full_int_array_2, [1], [] + ) + + # pd_op.slice: (1024xf32) <- (3072xf32, 1xi64, 1xi64) + slice_13 = paddle._C_ops.slice( + data_5, [0], full_int_array_1, full_int_array_2, [1], [] + ) + + # pd_op.matmul: (1x3600x1024xf32) <- (1x3600x1024xf32, 1024x1024xf32) + matmul_18 = paddle._C_ops.matmul(add_18, slice_12, False, False) + + # pd_op.add: (1x3600x1024xf32) <- (1x3600x1024xf32, 1024xf32) + add_19 = paddle._C_ops.add(matmul_18, slice_13) + + # pd_op.reshape: (1x3600x4x256xf32) <- (1x3600x1024xf32, 4xi64) + reshape_8 = paddle._C_ops.reshape(add_19, full_int_array_3) + + # pd_op.transpose: (1x4x3600x256xf32) <- (1x3600x4x256xf32) + transpose_9 = paddle._C_ops.transpose(reshape_8, [0, 2, 1, 3]) + del reshape_8 + + # pd_op.slice: (1024x1024xf32) <- (1024x3072xf32, 1xi64, 1xi64) + slice_14 = paddle._C_ops.slice( + data_4, [1], full_int_array_2, full_int_array_4, [1], [] + ) + + # pd_op.slice: (1024xf32) <- (3072xf32, 1xi64, 1xi64) + slice_15 = paddle._C_ops.slice( + data_5, [0], full_int_array_2, full_int_array_4, [1], [] + ) + + # pd_op.matmul: (1x3600x1024xf32) <- (1x3600x1024xf32, 1024x1024xf32) + matmul_19 = paddle._C_ops.matmul(add_18, slice_14, False, False) + + # pd_op.add: (1x3600x1024xf32) <- (1x3600x1024xf32, 1024xf32) + add_20 = paddle._C_ops.add(matmul_19, slice_15) + + # pd_op.reshape: (1x3600x4x256xf32) <- (1x3600x1024xf32, 4xi64) + reshape_9 = paddle._C_ops.reshape(add_20, full_int_array_3) + + # pd_op.transpose: (1x4x3600x256xf32) <- (1x3600x4x256xf32) + transpose_10 = paddle._C_ops.transpose(reshape_9, [0, 2, 1, 3]) + del reshape_9 + + # pd_op.slice: (1024x1024xf32) <- (1024x3072xf32, 1xi64, 1xi64) + slice_16 = paddle._C_ops.slice( + data_4, [1], full_int_array_4, full_int_array_5, [1], [] + ) + del data_4 + + # pd_op.slice: (1024xf32) <- (3072xf32, 1xi64, 1xi64) + slice_17 = paddle._C_ops.slice( + data_5, [0], full_int_array_4, full_int_array_5, [1], [] + ) + del data_5 + + # pd_op.matmul: (1x3600x1024xf32) <- (1x3600x1024xf32, 1024x1024xf32) + matmul_20 = paddle._C_ops.matmul(layer_norm_9, slice_16, False, False) + + # pd_op.add: (1x3600x1024xf32) <- (1x3600x1024xf32, 1024xf32) + add_21 = paddle._C_ops.add(matmul_20, slice_17) + + # pd_op.reshape: (1x3600x4x256xf32) <- (1x3600x1024xf32, 4xi64) + reshape_10 = paddle._C_ops.reshape(add_21, full_int_array_3) + + # pd_op.transpose: (1x4x3600x256xf32) <- (1x3600x4x256xf32) + transpose_11 = paddle._C_ops.transpose(reshape_10, [0, 2, 1, 3]) + del reshape_10 + + # pd_op.matmul: (1x4x3600x3600xf32) <- (1x4x3600x256xf32, 1x4x3600x256xf32) + matmul_21 = paddle._C_ops.matmul(transpose_9, transpose_10, False, True) + + # pd_op.scale: (1x4x3600x3600xf32) <- (1x4x3600x3600xf32, 1xf32) + scale_3 = paddle._C_ops.scale(matmul_21, full_8, float("0"), True) + del matmul_21 + + # pd_op.softmax: (1x4x3600x3600xf32) <- (1x4x3600x3600xf32) + softmax_2 = paddle._C_ops.softmax(scale_3, -1) + del scale_3 - # pd_op.add: (xf32) <- (xf32, xf32) - add_0 = paddle._C_ops.add(add_1, scale_2) - del add_1, full_0, full_1, full_2, scale_0, scale_1, scale_2 + # pd_op.dropout: (1x4x3600x3600xf32, 1x4x3600x3600xui8) <- (1x4x3600x3600xf32, None, 1xf32) + dropout_16, dropout_17 = (lambda x, f: f(x))( + paddle._C_ops.dropout( + softmax_2, None, full_9, False, "upscale_in_train", 0, False + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None), + ) + + # pd_op.matmul: (1x4x3600x256xf32) <- (1x4x3600x3600xf32, 1x4x3600x256xf32) + matmul_22 = paddle._C_ops.matmul(dropout_16, transpose_11, False, False) + + # pd_op.transpose: (1x3600x4x256xf32) <- (1x4x3600x256xf32) + transpose_12 = paddle._C_ops.transpose(matmul_22, [0, 2, 1, 3]) + del matmul_22 + + # pd_op.reshape: (1x3600x1024xf32) <- (1x3600x4x256xf32, 3xi64) + reshape_11 = paddle._C_ops.reshape(transpose_12, full_int_array_6) + + # pd_op.matmul: (1x3600x1024xf32) <- (1x3600x1024xf32, 1024x1024xf32) + matmul_23 = paddle._C_ops.matmul(reshape_11, parameter_344, False, False) + del parameter_344 + + # pd_op.add: (1x3600x1024xf32) <- (1x3600x1024xf32, 1024xf32) + add_22 = paddle._C_ops.add(matmul_23, parameter_343) + del parameter_343 + + # pd_op.dropout: (1x3600x1024xf32, 1x3600x1024xui8) <- (1x3600x1024xf32, None, 1xf32) + dropout_18, dropout_19 = (lambda x, f: f(x))( + paddle._C_ops.dropout( + add_22, None, full_9, False, "upscale_in_train", 0, False + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None), + ) + del add_22 + + # pd_op.add: (1x3600x1024xf32) <- (1x3600x1024xf32, 1x3600x1024xf32) + add_23 = paddle._C_ops.add(layer_norm_9, dropout_18) + + # pd_op.layer_norm: (1x3600x1024xf32, 1x3600xf32, 1x3600xf32) <- (1x3600x1024xf32, 1024xf32, 1024xf32) + layer_norm_12, layer_norm_13, layer_norm_14 = (lambda x, f: f(x))( + paddle._C_ops.layer_norm( + add_23, parameter_342, parameter_341, float("1e-05"), 2 + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None, None), + ) + del parameter_341, parameter_342 + + # pd_op.matmul: (1x3600x2048xf32) <- (1x3600x1024xf32, 1024x2048xf32) + matmul_24 = paddle._C_ops.matmul(layer_norm_12, parameter_340, False, False) + del parameter_340 + + # pd_op.add: (1x3600x2048xf32) <- (1x3600x2048xf32, 2048xf32) + add_24 = paddle._C_ops.add(matmul_24, parameter_339) + del parameter_339 + + # pd_op.gelu: (1x3600x2048xf32) <- (1x3600x2048xf32) + gelu_2 = paddle._C_ops.gelu(add_24, False) + + # pd_op.dropout: (1x3600x2048xf32, 1x3600x2048xui8) <- (1x3600x2048xf32, None, 1xf32) + dropout_20, dropout_21 = (lambda x, f: f(x))( + paddle._C_ops.dropout( + gelu_2, None, full_9, False, "upscale_in_train", 0, False + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None), + ) + del gelu_2 + + # pd_op.matmul: (1x3600x1024xf32) <- (1x3600x2048xf32, 2048x1024xf32) + matmul_25 = paddle._C_ops.matmul(dropout_20, parameter_338, False, False) + del parameter_338 + + # pd_op.add: (1x3600x1024xf32) <- (1x3600x1024xf32, 1024xf32) + add_25 = paddle._C_ops.add(matmul_25, parameter_337) + del parameter_337 + + # pd_op.dropout: (1x3600x1024xf32, 1x3600x1024xui8) <- (1x3600x1024xf32, None, 1xf32) + dropout_22, dropout_23 = (lambda x, f: f(x))( + paddle._C_ops.dropout( + add_25, None, full_9, False, "upscale_in_train", 0, False + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None), + ) + del add_25 + + # pd_op.add: (1x3600x1024xf32) <- (1x3600x1024xf32, 1x3600x1024xf32) + add_26 = paddle._C_ops.add(layer_norm_12, dropout_22) + + # pd_op.layer_norm: (1x3600x1024xf32, 1x3600xf32, 1x3600xf32) <- (1x3600x1024xf32, 1024xf32, 1024xf32) + layer_norm_15, layer_norm_16, layer_norm_17 = (lambda x, f: f(x))( + paddle._C_ops.layer_norm( + add_26, parameter_336, parameter_335, float("1e-05"), 2 + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None, None), + ) + del parameter_335, parameter_336 + + # pd_op.add: (1x3600x1024xf32) <- (1x3600x1024xf32, 1x3600x1024xf32) + add_27 = paddle._C_ops.add(layer_norm_15, unsqueeze_3) + + # pd_op.slice: (1024x1024xf32) <- (1024x3072xf32, 1xi64, 1xi64) + slice_18 = paddle._C_ops.slice( + data_6, [1], full_int_array_1, full_int_array_2, [1], [] + ) + + # pd_op.slice: (1024xf32) <- (3072xf32, 1xi64, 1xi64) + slice_19 = paddle._C_ops.slice( + data_7, [0], full_int_array_1, full_int_array_2, [1], [] + ) + del full_int_array_1 + + # pd_op.matmul: (1x3600x1024xf32) <- (1x3600x1024xf32, 1024x1024xf32) + matmul_26 = paddle._C_ops.matmul(add_27, slice_18, False, False) + + # pd_op.add: (1x3600x1024xf32) <- (1x3600x1024xf32, 1024xf32) + add_28 = paddle._C_ops.add(matmul_26, slice_19) + + # pd_op.reshape: (1x3600x4x256xf32) <- (1x3600x1024xf32, 4xi64) + reshape_12 = paddle._C_ops.reshape(add_28, full_int_array_3) + + # pd_op.transpose: (1x4x3600x256xf32) <- (1x3600x4x256xf32) + transpose_13 = paddle._C_ops.transpose(reshape_12, [0, 2, 1, 3]) + del reshape_12 + + # pd_op.slice: (1024x1024xf32) <- (1024x3072xf32, 1xi64, 1xi64) + slice_20 = paddle._C_ops.slice( + data_6, [1], full_int_array_2, full_int_array_4, [1], [] + ) + + # pd_op.slice: (1024xf32) <- (3072xf32, 1xi64, 1xi64) + slice_21 = paddle._C_ops.slice( + data_7, [0], full_int_array_2, full_int_array_4, [1], [] + ) + + # pd_op.matmul: (1x3600x1024xf32) <- (1x3600x1024xf32, 1024x1024xf32) + matmul_27 = paddle._C_ops.matmul(add_27, slice_20, False, False) + + # pd_op.add: (1x3600x1024xf32) <- (1x3600x1024xf32, 1024xf32) + add_29 = paddle._C_ops.add(matmul_27, slice_21) + + # pd_op.reshape: (1x3600x4x256xf32) <- (1x3600x1024xf32, 4xi64) + reshape_13 = paddle._C_ops.reshape(add_29, full_int_array_3) + + # pd_op.transpose: (1x4x3600x256xf32) <- (1x3600x4x256xf32) + transpose_14 = paddle._C_ops.transpose(reshape_13, [0, 2, 1, 3]) + del reshape_13 + + # pd_op.slice: (1024x1024xf32) <- (1024x3072xf32, 1xi64, 1xi64) + slice_22 = paddle._C_ops.slice( + data_6, [1], full_int_array_4, full_int_array_5, [1], [] + ) + del data_6 + + # pd_op.slice: (1024xf32) <- (3072xf32, 1xi64, 1xi64) + slice_23 = paddle._C_ops.slice( + data_7, [0], full_int_array_4, full_int_array_5, [1], [] + ) + del data_7 + + # pd_op.matmul: (1x3600x1024xf32) <- (1x3600x1024xf32, 1024x1024xf32) + matmul_28 = paddle._C_ops.matmul(layer_norm_15, slice_22, False, False) + + # pd_op.add: (1x3600x1024xf32) <- (1x3600x1024xf32, 1024xf32) + add_30 = paddle._C_ops.add(matmul_28, slice_23) + + # pd_op.reshape: (1x3600x4x256xf32) <- (1x3600x1024xf32, 4xi64) + reshape_14 = paddle._C_ops.reshape(add_30, full_int_array_3) + del full_int_array_3 + + # pd_op.transpose: (1x4x3600x256xf32) <- (1x3600x4x256xf32) + transpose_15 = paddle._C_ops.transpose(reshape_14, [0, 2, 1, 3]) + del reshape_14 + + # pd_op.matmul: (1x4x3600x3600xf32) <- (1x4x3600x256xf32, 1x4x3600x256xf32) + matmul_29 = paddle._C_ops.matmul(transpose_13, transpose_14, False, True) + + # pd_op.scale: (1x4x3600x3600xf32) <- (1x4x3600x3600xf32, 1xf32) + scale_4 = paddle._C_ops.scale(matmul_29, full_8, float("0"), True) + del matmul_29 + + # pd_op.softmax: (1x4x3600x3600xf32) <- (1x4x3600x3600xf32) + softmax_3 = paddle._C_ops.softmax(scale_4, -1) + del scale_4 + + # pd_op.dropout: (1x4x3600x3600xf32, 1x4x3600x3600xui8) <- (1x4x3600x3600xf32, None, 1xf32) + dropout_24, dropout_25 = (lambda x, f: f(x))( + paddle._C_ops.dropout( + softmax_3, None, full_9, False, "upscale_in_train", 0, False + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None), + ) + + # pd_op.matmul: (1x4x3600x256xf32) <- (1x4x3600x3600xf32, 1x4x3600x256xf32) + matmul_30 = paddle._C_ops.matmul(dropout_24, transpose_15, False, False) + + # pd_op.transpose: (1x3600x4x256xf32) <- (1x4x3600x256xf32) + transpose_16 = paddle._C_ops.transpose(matmul_30, [0, 2, 1, 3]) + del matmul_30 + + # pd_op.reshape: (1x3600x1024xf32) <- (1x3600x4x256xf32, 3xi64) + reshape_15 = paddle._C_ops.reshape(transpose_16, full_int_array_6) + del full_int_array_6 + + # pd_op.matmul: (1x3600x1024xf32) <- (1x3600x1024xf32, 1024x1024xf32) + matmul_31 = paddle._C_ops.matmul(reshape_15, parameter_334, False, False) + del parameter_334 + + # pd_op.add: (1x3600x1024xf32) <- (1x3600x1024xf32, 1024xf32) + add_31 = paddle._C_ops.add(matmul_31, parameter_333) + del parameter_333 + + # pd_op.dropout: (1x3600x1024xf32, 1x3600x1024xui8) <- (1x3600x1024xf32, None, 1xf32) + dropout_26, dropout_27 = (lambda x, f: f(x))( + paddle._C_ops.dropout( + add_31, None, full_9, False, "upscale_in_train", 0, False + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None), + ) + del add_31 + + # pd_op.add: (1x3600x1024xf32) <- (1x3600x1024xf32, 1x3600x1024xf32) + add_32 = paddle._C_ops.add(layer_norm_15, dropout_26) + + # pd_op.layer_norm: (1x3600x1024xf32, 1x3600xf32, 1x3600xf32) <- (1x3600x1024xf32, 1024xf32, 1024xf32) + layer_norm_18, layer_norm_19, layer_norm_20 = (lambda x, f: f(x))( + paddle._C_ops.layer_norm( + add_32, parameter_332, parameter_331, float("1e-05"), 2 + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None, None), + ) + del parameter_331, parameter_332 + + # pd_op.matmul: (1x3600x2048xf32) <- (1x3600x1024xf32, 1024x2048xf32) + matmul_32 = paddle._C_ops.matmul(layer_norm_18, parameter_330, False, False) + del parameter_330 + + # pd_op.add: (1x3600x2048xf32) <- (1x3600x2048xf32, 2048xf32) + add_33 = paddle._C_ops.add(matmul_32, parameter_329) + del parameter_329 + + # pd_op.gelu: (1x3600x2048xf32) <- (1x3600x2048xf32) + gelu_3 = paddle._C_ops.gelu(add_33, False) + + # pd_op.dropout: (1x3600x2048xf32, 1x3600x2048xui8) <- (1x3600x2048xf32, None, 1xf32) + dropout_28, dropout_29 = (lambda x, f: f(x))( + paddle._C_ops.dropout( + gelu_3, None, full_9, False, "upscale_in_train", 0, False + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None), + ) + del gelu_3 + + # pd_op.matmul: (1x3600x1024xf32) <- (1x3600x2048xf32, 2048x1024xf32) + matmul_33 = paddle._C_ops.matmul(dropout_28, parameter_328, False, False) + del parameter_328 + + # pd_op.add: (1x3600x1024xf32) <- (1x3600x1024xf32, 1024xf32) + add_34 = paddle._C_ops.add(matmul_33, parameter_327) + del parameter_327 + + # pd_op.dropout: (1x3600x1024xf32, 1x3600x1024xui8) <- (1x3600x1024xf32, None, 1xf32) + dropout_30, dropout_31 = (lambda x, f: f(x))( + paddle._C_ops.dropout( + add_34, None, full_9, False, "upscale_in_train", 0, False + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None), + ) + del add_34 + + # pd_op.add: (1x3600x1024xf32) <- (1x3600x1024xf32, 1x3600x1024xf32) + add_35 = paddle._C_ops.add(layer_norm_18, dropout_30) + + # pd_op.layer_norm: (1x3600x1024xf32, 1x3600xf32, 1x3600xf32) <- (1x3600x1024xf32, 1024xf32, 1024xf32) + layer_norm_21, layer_norm_22, layer_norm_23 = (lambda x, f: f(x))( + paddle._C_ops.layer_norm( + add_35, parameter_326, parameter_325, float("1e-05"), 2 + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None, None), + ) + del parameter_325, parameter_326 + + # pd_op.transpose: (1x1024x3600xf32) <- (1x3600x1024xf32) + transpose_17 = paddle._C_ops.transpose(layer_norm_21, [0, 2, 1]) + del layer_norm_21 + + # pd_op.full_int_array: (4xi64) <- () + full_int_array_7 = [1, 1024, 60, 60] + + # pd_op.reshape: (1x1024x60x60xf32) <- (1x1024x3600xf32, 4xi64) + reshape_16 = paddle._C_ops.reshape(transpose_17, full_int_array_7) + del full_int_array_7 + + # pd_op.conv2d: (1x384x60x60xf32) <- (1x1024x60x60xf32, 384x1024x1x1xf32) + conv2d_0 = paddle._C_ops.conv2d( + reshape_16, parameter_324, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_324 + + # pd_op.batch_norm_: (1x384x60x60xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (1x384x60x60xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__0, + batch_norm__1, + batch_norm__2, + batch_norm__3, + batch_norm__4, + batch_norm__5, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_0, + parameter_323, + parameter_322, + parameter_321, + parameter_320, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_320, parameter_321, parameter_322, parameter_323 + + # pd_op.swish: (1x384x60x60xf32) <- (1x384x60x60xf32) + swish_1 = paddle._C_ops.swish(batch_norm__0) + + # pd_op.conv2d: (1x384x60x60xf32) <- (1x1024x60x60xf32, 384x1024x1x1xf32) + conv2d_1 = paddle._C_ops.conv2d( + reshape_16, parameter_319, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_319 + + # pd_op.batch_norm_: (1x384x60x60xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (1x384x60x60xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__6, + batch_norm__7, + batch_norm__8, + batch_norm__9, + batch_norm__10, + batch_norm__11, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_1, + parameter_318, + parameter_317, + parameter_316, + parameter_315, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_315, parameter_316, parameter_317, parameter_318 + + # pd_op.swish: (1x384x60x60xf32) <- (1x384x60x60xf32) + swish_2 = paddle._C_ops.swish(batch_norm__6) + + # pd_op.conv2d: (1x384x60x60xf32) <- (1x384x60x60xf32, 384x384x3x3xf32) + conv2d_2 = paddle._C_ops.conv2d( + swish_2, parameter_314, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_314 + + # pd_op.batch_norm_: (1x384x60x60xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (1x384x60x60xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__12, + batch_norm__13, + batch_norm__14, + batch_norm__15, + batch_norm__16, + batch_norm__17, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_2, + parameter_313, + parameter_312, + parameter_311, + parameter_310, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_310, parameter_311, parameter_312, parameter_313 + + # pd_op.swish: (1x384x60x60xf32) <- (1x384x60x60xf32) + swish_3 = paddle._C_ops.swish(batch_norm__12) + + # pd_op.conv2d: (1x384x60x60xf32) <- (1x384x60x60xf32, 384x384x3x3xf32) + conv2d_3 = paddle._C_ops.conv2d( + swish_3, parameter_309, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_309 + + # pd_op.batch_norm_: (1x384x60x60xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (1x384x60x60xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__18, + batch_norm__19, + batch_norm__20, + batch_norm__21, + batch_norm__22, + batch_norm__23, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_3, + parameter_308, + parameter_307, + parameter_306, + parameter_305, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_305, parameter_306, parameter_307, parameter_308 + + # pd_op.conv2d: (1x384x60x60xf32) <- (1x384x60x60xf32, 384x384x1x1xf32) + conv2d_4 = paddle._C_ops.conv2d( + swish_3, parameter_304, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_304 + + # pd_op.batch_norm_: (1x384x60x60xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (1x384x60x60xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__24, + batch_norm__25, + batch_norm__26, + batch_norm__27, + batch_norm__28, + batch_norm__29, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_4, + parameter_303, + parameter_302, + parameter_301, + parameter_300, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_300, parameter_301, parameter_302, parameter_303 + + # pd_op.add: (1x384x60x60xf32) <- (1x384x60x60xf32, 1x384x60x60xf32) + add_36 = paddle._C_ops.add(batch_norm__18, batch_norm__24) + + # pd_op.swish: (1x384x60x60xf32) <- (1x384x60x60xf32) + swish_4 = paddle._C_ops.swish(add_36) + + # pd_op.conv2d: (1x384x60x60xf32) <- (1x384x60x60xf32, 384x384x3x3xf32) + conv2d_5 = paddle._C_ops.conv2d( + swish_4, parameter_299, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_299 + + # pd_op.batch_norm_: (1x384x60x60xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (1x384x60x60xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__30, + batch_norm__31, + batch_norm__32, + batch_norm__33, + batch_norm__34, + batch_norm__35, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_5, + parameter_298, + parameter_297, + parameter_296, + parameter_295, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_295, parameter_296, parameter_297, parameter_298 + + # pd_op.swish: (1x384x60x60xf32) <- (1x384x60x60xf32) + swish_5 = paddle._C_ops.swish(batch_norm__30) + + # pd_op.conv2d: (1x384x60x60xf32) <- (1x384x60x60xf32, 384x384x3x3xf32) + conv2d_6 = paddle._C_ops.conv2d( + swish_5, parameter_294, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_294 + + # pd_op.batch_norm_: (1x384x60x60xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (1x384x60x60xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__36, + batch_norm__37, + batch_norm__38, + batch_norm__39, + batch_norm__40, + batch_norm__41, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_6, + parameter_293, + parameter_292, + parameter_291, + parameter_290, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_290, parameter_291, parameter_292, parameter_293 + + # pd_op.conv2d: (1x384x60x60xf32) <- (1x384x60x60xf32, 384x384x1x1xf32) + conv2d_7 = paddle._C_ops.conv2d( + swish_5, parameter_289, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_289 + + # pd_op.batch_norm_: (1x384x60x60xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (1x384x60x60xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__42, + batch_norm__43, + batch_norm__44, + batch_norm__45, + batch_norm__46, + batch_norm__47, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_7, + parameter_288, + parameter_287, + parameter_286, + parameter_285, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_285, parameter_286, parameter_287, parameter_288 + + # pd_op.add: (1x384x60x60xf32) <- (1x384x60x60xf32, 1x384x60x60xf32) + add_37 = paddle._C_ops.add(batch_norm__36, batch_norm__42) + + # pd_op.swish: (1x384x60x60xf32) <- (1x384x60x60xf32) + swish_6 = paddle._C_ops.swish(add_37) + + # pd_op.full_int_array: (2xi64) <- () + full_int_array_8 = [5, 5] + + # pd_op.pool2d: (1x384x60x60xf32) <- (1x384x60x60xf32, 2xi64) + pool2d_0 = paddle._C_ops.pool2d( + swish_6, + full_int_array_8, + [1, 1], + [2, 2], + False, + True, + "NCHW", + "max", + False, + False, + "EXPLICIT", + ) + + # pd_op.full_int_array: (2xi64) <- () + full_int_array_9 = [9, 9] + + # pd_op.pool2d: (1x384x60x60xf32) <- (1x384x60x60xf32, 2xi64) + pool2d_1 = paddle._C_ops.pool2d( + swish_6, + full_int_array_9, + [1, 1], + [4, 4], + False, + True, + "NCHW", + "max", + False, + False, + "EXPLICIT", + ) + + # pd_op.full_int_array: (2xi64) <- () + full_int_array_10 = [13, 13] + + # pd_op.pool2d: (1x384x60x60xf32) <- (1x384x60x60xf32, 2xi64) + pool2d_2 = paddle._C_ops.pool2d( + swish_6, + full_int_array_10, + [1, 1], + [6, 6], + False, + True, + "NCHW", + "max", + False, + False, + "EXPLICIT", + ) + + # builtin.combine: ([1x384x60x60xf32, 1x384x60x60xf32, 1x384x60x60xf32, 1x384x60x60xf32]) <- (1x384x60x60xf32, 1x384x60x60xf32, 1x384x60x60xf32, 1x384x60x60xf32) + combine_2 = [swish_6, pool2d_0, pool2d_1, pool2d_2] + + # pd_op.concat: (1x1536x60x60xf32) <- ([1x384x60x60xf32, 1x384x60x60xf32, 1x384x60x60xf32, 1x384x60x60xf32], 1xi32) + concat_1 = paddle._C_ops.concat(combine_2, full_7) + del combine_2 + + # pd_op.conv2d: (1x384x60x60xf32) <- (1x1536x60x60xf32, 384x1536x1x1xf32) + conv2d_8 = paddle._C_ops.conv2d( + concat_1, parameter_284, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_284 + + # pd_op.batch_norm_: (1x384x60x60xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (1x384x60x60xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__48, + batch_norm__49, + batch_norm__50, + batch_norm__51, + batch_norm__52, + batch_norm__53, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_8, + parameter_283, + parameter_282, + parameter_281, + parameter_280, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_280, parameter_281, parameter_282, parameter_283 + + # pd_op.swish: (1x384x60x60xf32) <- (1x384x60x60xf32) + swish_7 = paddle._C_ops.swish(batch_norm__48) + + # pd_op.conv2d: (1x384x60x60xf32) <- (1x384x60x60xf32, 384x384x3x3xf32) + conv2d_9 = paddle._C_ops.conv2d( + swish_7, parameter_279, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_279 + + # pd_op.batch_norm_: (1x384x60x60xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (1x384x60x60xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__54, + batch_norm__55, + batch_norm__56, + batch_norm__57, + batch_norm__58, + batch_norm__59, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_9, + parameter_278, + parameter_277, + parameter_276, + parameter_275, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_275, parameter_276, parameter_277, parameter_278 + + # pd_op.swish: (1x384x60x60xf32) <- (1x384x60x60xf32) + swish_8 = paddle._C_ops.swish(batch_norm__54) + + # pd_op.conv2d: (1x384x60x60xf32) <- (1x384x60x60xf32, 384x384x3x3xf32) + conv2d_10 = paddle._C_ops.conv2d( + swish_8, parameter_274, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_274 + + # pd_op.batch_norm_: (1x384x60x60xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (1x384x60x60xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__60, + batch_norm__61, + batch_norm__62, + batch_norm__63, + batch_norm__64, + batch_norm__65, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_10, + parameter_273, + parameter_272, + parameter_271, + parameter_270, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_270, parameter_271, parameter_272, parameter_273 + + # pd_op.conv2d: (1x384x60x60xf32) <- (1x384x60x60xf32, 384x384x1x1xf32) + conv2d_11 = paddle._C_ops.conv2d( + swish_8, parameter_269, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_269 + + # pd_op.batch_norm_: (1x384x60x60xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (1x384x60x60xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__66, + batch_norm__67, + batch_norm__68, + batch_norm__69, + batch_norm__70, + batch_norm__71, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_11, + parameter_268, + parameter_267, + parameter_266, + parameter_265, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_265, parameter_266, parameter_267, parameter_268 + + # pd_op.add: (1x384x60x60xf32) <- (1x384x60x60xf32, 1x384x60x60xf32) + add_38 = paddle._C_ops.add(batch_norm__60, batch_norm__66) + + # pd_op.swish: (1x384x60x60xf32) <- (1x384x60x60xf32) + swish_9 = paddle._C_ops.swish(add_38) + + # builtin.combine: ([1x384x60x60xf32, 1x384x60x60xf32]) <- (1x384x60x60xf32, 1x384x60x60xf32) + combine_3 = [swish_1, swish_9] + + # pd_op.concat: (1x768x60x60xf32) <- ([1x384x60x60xf32, 1x384x60x60xf32], 1xi32) + concat_2 = paddle._C_ops.concat(combine_3, full_7) + del combine_3 + + # pd_op.conv2d: (1x768x60x60xf32) <- (1x768x60x60xf32, 768x768x1x1xf32) + conv2d_12 = paddle._C_ops.conv2d( + concat_2, parameter_264, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_264 + + # pd_op.batch_norm_: (1x768x60x60xf32, 768xf32, 768xf32, 768xf32, 768xf32, -1xui8) <- (1x768x60x60xf32, 768xf32, 768xf32, 768xf32, 768xf32) + ( + batch_norm__72, + batch_norm__73, + batch_norm__74, + batch_norm__75, + batch_norm__76, + batch_norm__77, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_12, + parameter_263, + parameter_262, + parameter_261, + parameter_260, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_260, parameter_261, parameter_262, parameter_263 + + # pd_op.swish: (1x768x60x60xf32) <- (1x768x60x60xf32) + swish_10 = paddle._C_ops.swish(batch_norm__72) + + # pd_op.conv2d: (1x384x60x60xf32) <- (1x768x60x60xf32, 384x768x1x1xf32) + conv2d_13 = paddle._C_ops.conv2d( + swish_10, parameter_259, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_259 + + # pd_op.batch_norm_: (1x384x60x60xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (1x384x60x60xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__78, + batch_norm__79, + batch_norm__80, + batch_norm__81, + batch_norm__82, + batch_norm__83, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_13, + parameter_258, + parameter_257, + parameter_256, + parameter_255, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_255, parameter_256, parameter_257, parameter_258 + + # pd_op.swish: (1x384x60x60xf32) <- (1x384x60x60xf32) + swish_11 = paddle._C_ops.swish(batch_norm__78) + + # pd_op.nearest_interp: (1x384x120x120xf32) <- (1x384x60x60xf32, None, None, None) + nearest_interp_0 = paddle._C_ops.nearest_interp( + swish_11, + None, + None, + None, + "NCHW", + -1, + -1, + -1, + [float("2"), float("2")], + "nearest", + False, + 0, + ) + + # builtin.combine: ([1x384x120x120xf32, 1x512x-1x-1xf32]) <- (1x384x120x120xf32, 1x512x-1x-1xf32) + combine_4 = [nearest_interp_0, data_9] + del data_9 + + # pd_op.concat: (1x896x120x120xf32) <- ([1x384x120x120xf32, 1x512x-1x-1xf32], 1xi32) + concat_3 = paddle._C_ops.concat(combine_4, full_7) + del combine_4 + + # pd_op.conv2d: (1x192x120x120xf32) <- (1x896x120x120xf32, 192x896x1x1xf32) + conv2d_14 = paddle._C_ops.conv2d( + concat_3, parameter_254, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_254 + + # pd_op.batch_norm_: (1x192x120x120xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (1x192x120x120xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__84, + batch_norm__85, + batch_norm__86, + batch_norm__87, + batch_norm__88, + batch_norm__89, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_14, + parameter_253, + parameter_252, + parameter_251, + parameter_250, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_250, parameter_251, parameter_252, parameter_253 + + # pd_op.swish: (1x192x120x120xf32) <- (1x192x120x120xf32) + swish_12 = paddle._C_ops.swish(batch_norm__84) + + # pd_op.conv2d: (1x192x120x120xf32) <- (1x896x120x120xf32, 192x896x1x1xf32) + conv2d_15 = paddle._C_ops.conv2d( + concat_3, parameter_249, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_249 + + # pd_op.batch_norm_: (1x192x120x120xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (1x192x120x120xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__90, + batch_norm__91, + batch_norm__92, + batch_norm__93, + batch_norm__94, + batch_norm__95, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_15, + parameter_248, + parameter_247, + parameter_246, + parameter_245, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_245, parameter_246, parameter_247, parameter_248 + + # pd_op.swish: (1x192x120x120xf32) <- (1x192x120x120xf32) + swish_13 = paddle._C_ops.swish(batch_norm__90) + + # pd_op.conv2d: (1x192x120x120xf32) <- (1x192x120x120xf32, 192x192x3x3xf32) + conv2d_16 = paddle._C_ops.conv2d( + swish_13, parameter_244, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_244 + + # pd_op.batch_norm_: (1x192x120x120xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (1x192x120x120xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__96, + batch_norm__97, + batch_norm__98, + batch_norm__99, + batch_norm__100, + batch_norm__101, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_16, + parameter_243, + parameter_242, + parameter_241, + parameter_240, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_240, parameter_241, parameter_242, parameter_243 + + # pd_op.swish: (1x192x120x120xf32) <- (1x192x120x120xf32) + swish_14 = paddle._C_ops.swish(batch_norm__96) + + # pd_op.conv2d: (1x192x120x120xf32) <- (1x192x120x120xf32, 192x192x3x3xf32) + conv2d_17 = paddle._C_ops.conv2d( + swish_14, parameter_239, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_239 + + # pd_op.batch_norm_: (1x192x120x120xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (1x192x120x120xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__102, + batch_norm__103, + batch_norm__104, + batch_norm__105, + batch_norm__106, + batch_norm__107, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_17, + parameter_238, + parameter_237, + parameter_236, + parameter_235, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_235, parameter_236, parameter_237, parameter_238 + + # pd_op.conv2d: (1x192x120x120xf32) <- (1x192x120x120xf32, 192x192x1x1xf32) + conv2d_18 = paddle._C_ops.conv2d( + swish_14, parameter_234, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_234 + + # pd_op.batch_norm_: (1x192x120x120xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (1x192x120x120xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__108, + batch_norm__109, + batch_norm__110, + batch_norm__111, + batch_norm__112, + batch_norm__113, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_18, + parameter_233, + parameter_232, + parameter_231, + parameter_230, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_230, parameter_231, parameter_232, parameter_233 + + # pd_op.add: (1x192x120x120xf32) <- (1x192x120x120xf32, 1x192x120x120xf32) + add_39 = paddle._C_ops.add(batch_norm__102, batch_norm__108) + + # pd_op.swish: (1x192x120x120xf32) <- (1x192x120x120xf32) + swish_15 = paddle._C_ops.swish(add_39) + + # pd_op.conv2d: (1x192x120x120xf32) <- (1x192x120x120xf32, 192x192x3x3xf32) + conv2d_19 = paddle._C_ops.conv2d( + swish_15, parameter_229, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_229 + + # pd_op.batch_norm_: (1x192x120x120xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (1x192x120x120xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__114, + batch_norm__115, + batch_norm__116, + batch_norm__117, + batch_norm__118, + batch_norm__119, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_19, + parameter_228, + parameter_227, + parameter_226, + parameter_225, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_225, parameter_226, parameter_227, parameter_228 + + # pd_op.swish: (1x192x120x120xf32) <- (1x192x120x120xf32) + swish_16 = paddle._C_ops.swish(batch_norm__114) + + # pd_op.conv2d: (1x192x120x120xf32) <- (1x192x120x120xf32, 192x192x3x3xf32) + conv2d_20 = paddle._C_ops.conv2d( + swish_16, parameter_224, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_224 + + # pd_op.batch_norm_: (1x192x120x120xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (1x192x120x120xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__120, + batch_norm__121, + batch_norm__122, + batch_norm__123, + batch_norm__124, + batch_norm__125, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_20, + parameter_223, + parameter_222, + parameter_221, + parameter_220, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_220, parameter_221, parameter_222, parameter_223 + + # pd_op.conv2d: (1x192x120x120xf32) <- (1x192x120x120xf32, 192x192x1x1xf32) + conv2d_21 = paddle._C_ops.conv2d( + swish_16, parameter_219, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_219 + + # pd_op.batch_norm_: (1x192x120x120xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (1x192x120x120xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__126, + batch_norm__127, + batch_norm__128, + batch_norm__129, + batch_norm__130, + batch_norm__131, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_21, + parameter_218, + parameter_217, + parameter_216, + parameter_215, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_215, parameter_216, parameter_217, parameter_218 + + # pd_op.add: (1x192x120x120xf32) <- (1x192x120x120xf32, 1x192x120x120xf32) + add_40 = paddle._C_ops.add(batch_norm__120, batch_norm__126) + + # pd_op.swish: (1x192x120x120xf32) <- (1x192x120x120xf32) + swish_17 = paddle._C_ops.swish(add_40) + + # pd_op.conv2d: (1x192x120x120xf32) <- (1x192x120x120xf32, 192x192x3x3xf32) + conv2d_22 = paddle._C_ops.conv2d( + swish_17, parameter_214, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_214 + + # pd_op.batch_norm_: (1x192x120x120xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (1x192x120x120xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__132, + batch_norm__133, + batch_norm__134, + batch_norm__135, + batch_norm__136, + batch_norm__137, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_22, + parameter_213, + parameter_212, + parameter_211, + parameter_210, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_210, parameter_211, parameter_212, parameter_213 + + # pd_op.swish: (1x192x120x120xf32) <- (1x192x120x120xf32) + swish_18 = paddle._C_ops.swish(batch_norm__132) + + # pd_op.conv2d: (1x192x120x120xf32) <- (1x192x120x120xf32, 192x192x3x3xf32) + conv2d_23 = paddle._C_ops.conv2d( + swish_18, parameter_209, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_209 + + # pd_op.batch_norm_: (1x192x120x120xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (1x192x120x120xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__138, + batch_norm__139, + batch_norm__140, + batch_norm__141, + batch_norm__142, + batch_norm__143, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_23, + parameter_208, + parameter_207, + parameter_206, + parameter_205, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_205, parameter_206, parameter_207, parameter_208 + + # pd_op.conv2d: (1x192x120x120xf32) <- (1x192x120x120xf32, 192x192x1x1xf32) + conv2d_24 = paddle._C_ops.conv2d( + swish_18, parameter_204, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_204 + + # pd_op.batch_norm_: (1x192x120x120xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (1x192x120x120xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__144, + batch_norm__145, + batch_norm__146, + batch_norm__147, + batch_norm__148, + batch_norm__149, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_24, + parameter_203, + parameter_202, + parameter_201, + parameter_200, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_200, parameter_201, parameter_202, parameter_203 + + # pd_op.add: (1x192x120x120xf32) <- (1x192x120x120xf32, 1x192x120x120xf32) + add_41 = paddle._C_ops.add(batch_norm__138, batch_norm__144) + + # pd_op.swish: (1x192x120x120xf32) <- (1x192x120x120xf32) + swish_19 = paddle._C_ops.swish(add_41) + + # builtin.combine: ([1x192x120x120xf32, 1x192x120x120xf32]) <- (1x192x120x120xf32, 1x192x120x120xf32) + combine_5 = [swish_12, swish_19] + + # pd_op.concat: (1x384x120x120xf32) <- ([1x192x120x120xf32, 1x192x120x120xf32], 1xi32) + concat_4 = paddle._C_ops.concat(combine_5, full_7) + del combine_5 + + # pd_op.conv2d: (1x384x120x120xf32) <- (1x384x120x120xf32, 384x384x1x1xf32) + conv2d_25 = paddle._C_ops.conv2d( + concat_4, parameter_199, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_199 + + # pd_op.batch_norm_: (1x384x120x120xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (1x384x120x120xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__150, + batch_norm__151, + batch_norm__152, + batch_norm__153, + batch_norm__154, + batch_norm__155, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_25, + parameter_198, + parameter_197, + parameter_196, + parameter_195, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_195, parameter_196, parameter_197, parameter_198 + + # pd_op.swish: (1x384x120x120xf32) <- (1x384x120x120xf32) + swish_20 = paddle._C_ops.swish(batch_norm__150) + + # pd_op.conv2d: (1x192x120x120xf32) <- (1x384x120x120xf32, 192x384x1x1xf32) + conv2d_26 = paddle._C_ops.conv2d( + swish_20, parameter_194, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_194 + + # pd_op.batch_norm_: (1x192x120x120xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (1x192x120x120xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__156, + batch_norm__157, + batch_norm__158, + batch_norm__159, + batch_norm__160, + batch_norm__161, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_26, + parameter_193, + parameter_192, + parameter_191, + parameter_190, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_190, parameter_191, parameter_192, parameter_193 + + # pd_op.swish: (1x192x120x120xf32) <- (1x192x120x120xf32) + swish_21 = paddle._C_ops.swish(batch_norm__156) + + # pd_op.nearest_interp: (1x192x240x240xf32) <- (1x192x120x120xf32, None, None, None) + nearest_interp_1 = paddle._C_ops.nearest_interp( + swish_21, + None, + None, + None, + "NCHW", + -1, + -1, + -1, + [float("2"), float("2")], + "nearest", + False, + 0, + ) + + # builtin.combine: ([1x192x240x240xf32, 1x256x-1x-1xf32]) <- (1x192x240x240xf32, 1x256x-1x-1xf32) + combine_6 = [nearest_interp_1, data_8] + del data_8 + + # pd_op.concat: (1x448x240x240xf32) <- ([1x192x240x240xf32, 1x256x-1x-1xf32], 1xi32) + concat_5 = paddle._C_ops.concat(combine_6, full_7) + del combine_6 + + # pd_op.conv2d: (1x96x240x240xf32) <- (1x448x240x240xf32, 96x448x1x1xf32) + conv2d_27 = paddle._C_ops.conv2d( + concat_5, parameter_189, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_189 + + # pd_op.batch_norm_: (1x96x240x240xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (1x96x240x240xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__162, + batch_norm__163, + batch_norm__164, + batch_norm__165, + batch_norm__166, + batch_norm__167, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_27, + parameter_188, + parameter_187, + parameter_186, + parameter_185, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_185, parameter_186, parameter_187, parameter_188 + + # pd_op.swish: (1x96x240x240xf32) <- (1x96x240x240xf32) + swish_22 = paddle._C_ops.swish(batch_norm__162) + + # pd_op.conv2d: (1x96x240x240xf32) <- (1x448x240x240xf32, 96x448x1x1xf32) + conv2d_28 = paddle._C_ops.conv2d( + concat_5, parameter_184, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_184 + + # pd_op.batch_norm_: (1x96x240x240xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (1x96x240x240xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__168, + batch_norm__169, + batch_norm__170, + batch_norm__171, + batch_norm__172, + batch_norm__173, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_28, + parameter_183, + parameter_182, + parameter_181, + parameter_180, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_180, parameter_181, parameter_182, parameter_183 + + # pd_op.swish: (1x96x240x240xf32) <- (1x96x240x240xf32) + swish_23 = paddle._C_ops.swish(batch_norm__168) + + # pd_op.conv2d: (1x96x240x240xf32) <- (1x96x240x240xf32, 96x96x3x3xf32) + conv2d_29 = paddle._C_ops.conv2d( + swish_23, parameter_179, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_179 + + # pd_op.batch_norm_: (1x96x240x240xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (1x96x240x240xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__174, + batch_norm__175, + batch_norm__176, + batch_norm__177, + batch_norm__178, + batch_norm__179, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_29, + parameter_178, + parameter_177, + parameter_176, + parameter_175, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_175, parameter_176, parameter_177, parameter_178 + + # pd_op.swish: (1x96x240x240xf32) <- (1x96x240x240xf32) + swish_24 = paddle._C_ops.swish(batch_norm__174) + + # pd_op.conv2d: (1x96x240x240xf32) <- (1x96x240x240xf32, 96x96x3x3xf32) + conv2d_30 = paddle._C_ops.conv2d( + swish_24, parameter_174, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_174 + + # pd_op.batch_norm_: (1x96x240x240xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (1x96x240x240xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__180, + batch_norm__181, + batch_norm__182, + batch_norm__183, + batch_norm__184, + batch_norm__185, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_30, + parameter_173, + parameter_172, + parameter_171, + parameter_170, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_170, parameter_171, parameter_172, parameter_173 + + # pd_op.conv2d: (1x96x240x240xf32) <- (1x96x240x240xf32, 96x96x1x1xf32) + conv2d_31 = paddle._C_ops.conv2d( + swish_24, parameter_169, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_169 + + # pd_op.batch_norm_: (1x96x240x240xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (1x96x240x240xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__186, + batch_norm__187, + batch_norm__188, + batch_norm__189, + batch_norm__190, + batch_norm__191, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_31, + parameter_168, + parameter_167, + parameter_166, + parameter_165, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_165, parameter_166, parameter_167, parameter_168 + + # pd_op.add: (1x96x240x240xf32) <- (1x96x240x240xf32, 1x96x240x240xf32) + add_42 = paddle._C_ops.add(batch_norm__180, batch_norm__186) + + # pd_op.swish: (1x96x240x240xf32) <- (1x96x240x240xf32) + swish_25 = paddle._C_ops.swish(add_42) + + # pd_op.conv2d: (1x96x240x240xf32) <- (1x96x240x240xf32, 96x96x3x3xf32) + conv2d_32 = paddle._C_ops.conv2d( + swish_25, parameter_164, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_164 + + # pd_op.batch_norm_: (1x96x240x240xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (1x96x240x240xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__192, + batch_norm__193, + batch_norm__194, + batch_norm__195, + batch_norm__196, + batch_norm__197, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_32, + parameter_163, + parameter_162, + parameter_161, + parameter_160, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_160, parameter_161, parameter_162, parameter_163 + + # pd_op.swish: (1x96x240x240xf32) <- (1x96x240x240xf32) + swish_26 = paddle._C_ops.swish(batch_norm__192) + + # pd_op.conv2d: (1x96x240x240xf32) <- (1x96x240x240xf32, 96x96x3x3xf32) + conv2d_33 = paddle._C_ops.conv2d( + swish_26, parameter_159, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_159 + + # pd_op.batch_norm_: (1x96x240x240xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (1x96x240x240xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__198, + batch_norm__199, + batch_norm__200, + batch_norm__201, + batch_norm__202, + batch_norm__203, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_33, + parameter_158, + parameter_157, + parameter_156, + parameter_155, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_155, parameter_156, parameter_157, parameter_158 + + # pd_op.conv2d: (1x96x240x240xf32) <- (1x96x240x240xf32, 96x96x1x1xf32) + conv2d_34 = paddle._C_ops.conv2d( + swish_26, parameter_154, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_154 + + # pd_op.batch_norm_: (1x96x240x240xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (1x96x240x240xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__204, + batch_norm__205, + batch_norm__206, + batch_norm__207, + batch_norm__208, + batch_norm__209, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_34, + parameter_153, + parameter_152, + parameter_151, + parameter_150, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_150, parameter_151, parameter_152, parameter_153 + + # pd_op.add: (1x96x240x240xf32) <- (1x96x240x240xf32, 1x96x240x240xf32) + add_43 = paddle._C_ops.add(batch_norm__198, batch_norm__204) + + # pd_op.swish: (1x96x240x240xf32) <- (1x96x240x240xf32) + swish_27 = paddle._C_ops.swish(add_43) + + # pd_op.conv2d: (1x96x240x240xf32) <- (1x96x240x240xf32, 96x96x3x3xf32) + conv2d_35 = paddle._C_ops.conv2d( + swish_27, parameter_149, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_149 + + # pd_op.batch_norm_: (1x96x240x240xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (1x96x240x240xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__210, + batch_norm__211, + batch_norm__212, + batch_norm__213, + batch_norm__214, + batch_norm__215, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_35, + parameter_148, + parameter_147, + parameter_146, + parameter_145, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_145, parameter_146, parameter_147, parameter_148 + + # pd_op.swish: (1x96x240x240xf32) <- (1x96x240x240xf32) + swish_28 = paddle._C_ops.swish(batch_norm__210) + + # pd_op.conv2d: (1x96x240x240xf32) <- (1x96x240x240xf32, 96x96x3x3xf32) + conv2d_36 = paddle._C_ops.conv2d( + swish_28, parameter_144, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_144 + + # pd_op.batch_norm_: (1x96x240x240xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (1x96x240x240xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__216, + batch_norm__217, + batch_norm__218, + batch_norm__219, + batch_norm__220, + batch_norm__221, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_36, + parameter_143, + parameter_142, + parameter_141, + parameter_140, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_140, parameter_141, parameter_142, parameter_143 + + # pd_op.conv2d: (1x96x240x240xf32) <- (1x96x240x240xf32, 96x96x1x1xf32) + conv2d_37 = paddle._C_ops.conv2d( + swish_28, parameter_139, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_139 + + # pd_op.batch_norm_: (1x96x240x240xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (1x96x240x240xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__222, + batch_norm__223, + batch_norm__224, + batch_norm__225, + batch_norm__226, + batch_norm__227, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_37, + parameter_138, + parameter_137, + parameter_136, + parameter_135, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_135, parameter_136, parameter_137, parameter_138 + + # pd_op.add: (1x96x240x240xf32) <- (1x96x240x240xf32, 1x96x240x240xf32) + add_44 = paddle._C_ops.add(batch_norm__216, batch_norm__222) + + # pd_op.swish: (1x96x240x240xf32) <- (1x96x240x240xf32) + swish_29 = paddle._C_ops.swish(add_44) + + # builtin.combine: ([1x96x240x240xf32, 1x96x240x240xf32]) <- (1x96x240x240xf32, 1x96x240x240xf32) + combine_7 = [swish_22, swish_29] + + # pd_op.concat: (1x192x240x240xf32) <- ([1x96x240x240xf32, 1x96x240x240xf32], 1xi32) + concat_6 = paddle._C_ops.concat(combine_7, full_7) + del combine_7 + + # pd_op.conv2d: (1x192x240x240xf32) <- (1x192x240x240xf32, 192x192x1x1xf32) + conv2d_38 = paddle._C_ops.conv2d( + concat_6, parameter_134, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_134 + + # pd_op.batch_norm_: (1x192x240x240xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (1x192x240x240xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__228, + batch_norm__229, + batch_norm__230, + batch_norm__231, + batch_norm__232, + batch_norm__233, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_38, + parameter_133, + parameter_132, + parameter_131, + parameter_130, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_130, parameter_131, parameter_132, parameter_133 + + # pd_op.swish: (1x192x240x240xf32) <- (1x192x240x240xf32) + swish_30 = paddle._C_ops.swish(batch_norm__228) + + # pd_op.conv2d: (1x192x120x120xf32) <- (1x192x240x240xf32, 192x192x3x3xf32) + conv2d_39 = paddle._C_ops.conv2d( + swish_30, parameter_129, [2, 2], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_129 + + # pd_op.batch_norm_: (1x192x120x120xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (1x192x120x120xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__234, + batch_norm__235, + batch_norm__236, + batch_norm__237, + batch_norm__238, + batch_norm__239, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_39, + parameter_128, + parameter_127, + parameter_126, + parameter_125, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_125, parameter_126, parameter_127, parameter_128 + + # pd_op.swish: (1x192x120x120xf32) <- (1x192x120x120xf32) + swish_31 = paddle._C_ops.swish(batch_norm__234) + + # builtin.combine: ([1x192x120x120xf32, 1x384x120x120xf32]) <- (1x192x120x120xf32, 1x384x120x120xf32) + combine_8 = [swish_31, swish_20] + + # pd_op.concat: (1x576x120x120xf32) <- ([1x192x120x120xf32, 1x384x120x120xf32], 1xi32) + concat_7 = paddle._C_ops.concat(combine_8, full_7) + del combine_8 + + # pd_op.conv2d: (1x192x120x120xf32) <- (1x576x120x120xf32, 192x576x1x1xf32) + conv2d_40 = paddle._C_ops.conv2d( + concat_7, parameter_124, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_124 + + # pd_op.batch_norm_: (1x192x120x120xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (1x192x120x120xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__240, + batch_norm__241, + batch_norm__242, + batch_norm__243, + batch_norm__244, + batch_norm__245, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_40, + parameter_123, + parameter_122, + parameter_121, + parameter_120, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_120, parameter_121, parameter_122, parameter_123 + + # pd_op.swish: (1x192x120x120xf32) <- (1x192x120x120xf32) + swish_32 = paddle._C_ops.swish(batch_norm__240) + + # pd_op.conv2d: (1x192x120x120xf32) <- (1x576x120x120xf32, 192x576x1x1xf32) + conv2d_41 = paddle._C_ops.conv2d( + concat_7, parameter_119, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_119 + + # pd_op.batch_norm_: (1x192x120x120xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (1x192x120x120xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__246, + batch_norm__247, + batch_norm__248, + batch_norm__249, + batch_norm__250, + batch_norm__251, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_41, + parameter_118, + parameter_117, + parameter_116, + parameter_115, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_115, parameter_116, parameter_117, parameter_118 + + # pd_op.swish: (1x192x120x120xf32) <- (1x192x120x120xf32) + swish_33 = paddle._C_ops.swish(batch_norm__246) + + # pd_op.conv2d: (1x192x120x120xf32) <- (1x192x120x120xf32, 192x192x3x3xf32) + conv2d_42 = paddle._C_ops.conv2d( + swish_33, parameter_114, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_114 + + # pd_op.batch_norm_: (1x192x120x120xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (1x192x120x120xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__252, + batch_norm__253, + batch_norm__254, + batch_norm__255, + batch_norm__256, + batch_norm__257, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_42, + parameter_113, + parameter_112, + parameter_111, + parameter_110, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_110, parameter_111, parameter_112, parameter_113 + + # pd_op.swish: (1x192x120x120xf32) <- (1x192x120x120xf32) + swish_34 = paddle._C_ops.swish(batch_norm__252) + + # pd_op.conv2d: (1x192x120x120xf32) <- (1x192x120x120xf32, 192x192x3x3xf32) + conv2d_43 = paddle._C_ops.conv2d( + swish_34, parameter_109, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_109 + + # pd_op.batch_norm_: (1x192x120x120xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (1x192x120x120xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__258, + batch_norm__259, + batch_norm__260, + batch_norm__261, + batch_norm__262, + batch_norm__263, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_43, + parameter_108, + parameter_107, + parameter_106, + parameter_105, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_105, parameter_106, parameter_107, parameter_108 + + # pd_op.conv2d: (1x192x120x120xf32) <- (1x192x120x120xf32, 192x192x1x1xf32) + conv2d_44 = paddle._C_ops.conv2d( + swish_34, parameter_104, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_104 + + # pd_op.batch_norm_: (1x192x120x120xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (1x192x120x120xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__264, + batch_norm__265, + batch_norm__266, + batch_norm__267, + batch_norm__268, + batch_norm__269, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_44, + parameter_103, + parameter_102, + parameter_101, + parameter_100, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_100, parameter_101, parameter_102, parameter_103 + + # pd_op.add: (1x192x120x120xf32) <- (1x192x120x120xf32, 1x192x120x120xf32) + add_45 = paddle._C_ops.add(batch_norm__258, batch_norm__264) + + # pd_op.swish: (1x192x120x120xf32) <- (1x192x120x120xf32) + swish_35 = paddle._C_ops.swish(add_45) + + # pd_op.conv2d: (1x192x120x120xf32) <- (1x192x120x120xf32, 192x192x3x3xf32) + conv2d_45 = paddle._C_ops.conv2d( + swish_35, parameter_99, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_99 + + # pd_op.batch_norm_: (1x192x120x120xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (1x192x120x120xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__270, + batch_norm__271, + batch_norm__272, + batch_norm__273, + batch_norm__274, + batch_norm__275, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_45, + parameter_98, + parameter_97, + parameter_96, + parameter_95, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_95, parameter_96, parameter_97, parameter_98 + + # pd_op.swish: (1x192x120x120xf32) <- (1x192x120x120xf32) + swish_36 = paddle._C_ops.swish(batch_norm__270) + + # pd_op.conv2d: (1x192x120x120xf32) <- (1x192x120x120xf32, 192x192x3x3xf32) + conv2d_46 = paddle._C_ops.conv2d( + swish_36, parameter_94, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_94 + + # pd_op.batch_norm_: (1x192x120x120xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (1x192x120x120xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__276, + batch_norm__277, + batch_norm__278, + batch_norm__279, + batch_norm__280, + batch_norm__281, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_46, + parameter_93, + parameter_92, + parameter_91, + parameter_90, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_90, parameter_91, parameter_92, parameter_93 + + # pd_op.conv2d: (1x192x120x120xf32) <- (1x192x120x120xf32, 192x192x1x1xf32) + conv2d_47 = paddle._C_ops.conv2d( + swish_36, parameter_89, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_89 + + # pd_op.batch_norm_: (1x192x120x120xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (1x192x120x120xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__282, + batch_norm__283, + batch_norm__284, + batch_norm__285, + batch_norm__286, + batch_norm__287, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_47, + parameter_88, + parameter_87, + parameter_86, + parameter_85, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_85, parameter_86, parameter_87, parameter_88 + + # pd_op.add: (1x192x120x120xf32) <- (1x192x120x120xf32, 1x192x120x120xf32) + add_46 = paddle._C_ops.add(batch_norm__276, batch_norm__282) + + # pd_op.swish: (1x192x120x120xf32) <- (1x192x120x120xf32) + swish_37 = paddle._C_ops.swish(add_46) + + # pd_op.conv2d: (1x192x120x120xf32) <- (1x192x120x120xf32, 192x192x3x3xf32) + conv2d_48 = paddle._C_ops.conv2d( + swish_37, parameter_84, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_84 + + # pd_op.batch_norm_: (1x192x120x120xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (1x192x120x120xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__288, + batch_norm__289, + batch_norm__290, + batch_norm__291, + batch_norm__292, + batch_norm__293, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_48, + parameter_83, + parameter_82, + parameter_81, + parameter_80, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_80, parameter_81, parameter_82, parameter_83 + + # pd_op.swish: (1x192x120x120xf32) <- (1x192x120x120xf32) + swish_38 = paddle._C_ops.swish(batch_norm__288) + + # pd_op.conv2d: (1x192x120x120xf32) <- (1x192x120x120xf32, 192x192x3x3xf32) + conv2d_49 = paddle._C_ops.conv2d( + swish_38, parameter_79, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_79 + + # pd_op.batch_norm_: (1x192x120x120xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (1x192x120x120xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__294, + batch_norm__295, + batch_norm__296, + batch_norm__297, + batch_norm__298, + batch_norm__299, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_49, + parameter_78, + parameter_77, + parameter_76, + parameter_75, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_75, parameter_76, parameter_77, parameter_78 + + # pd_op.conv2d: (1x192x120x120xf32) <- (1x192x120x120xf32, 192x192x1x1xf32) + conv2d_50 = paddle._C_ops.conv2d( + swish_38, parameter_74, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_74 + + # pd_op.batch_norm_: (1x192x120x120xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (1x192x120x120xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__300, + batch_norm__301, + batch_norm__302, + batch_norm__303, + batch_norm__304, + batch_norm__305, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_50, + parameter_73, + parameter_72, + parameter_71, + parameter_70, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_70, parameter_71, parameter_72, parameter_73 + + # pd_op.add: (1x192x120x120xf32) <- (1x192x120x120xf32, 1x192x120x120xf32) + add_47 = paddle._C_ops.add(batch_norm__294, batch_norm__300) + + # pd_op.swish: (1x192x120x120xf32) <- (1x192x120x120xf32) + swish_39 = paddle._C_ops.swish(add_47) + + # builtin.combine: ([1x192x120x120xf32, 1x192x120x120xf32]) <- (1x192x120x120xf32, 1x192x120x120xf32) + combine_9 = [swish_32, swish_39] + + # pd_op.concat: (1x384x120x120xf32) <- ([1x192x120x120xf32, 1x192x120x120xf32], 1xi32) + concat_8 = paddle._C_ops.concat(combine_9, full_7) + del combine_9 + + # pd_op.conv2d: (1x384x120x120xf32) <- (1x384x120x120xf32, 384x384x1x1xf32) + conv2d_51 = paddle._C_ops.conv2d( + concat_8, parameter_69, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_69 + + # pd_op.batch_norm_: (1x384x120x120xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (1x384x120x120xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__306, + batch_norm__307, + batch_norm__308, + batch_norm__309, + batch_norm__310, + batch_norm__311, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_51, + parameter_68, + parameter_67, + parameter_66, + parameter_65, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_65, parameter_66, parameter_67, parameter_68 + + # pd_op.swish: (1x384x120x120xf32) <- (1x384x120x120xf32) + swish_40 = paddle._C_ops.swish(batch_norm__306) + + # pd_op.conv2d: (1x384x60x60xf32) <- (1x384x120x120xf32, 384x384x3x3xf32) + conv2d_52 = paddle._C_ops.conv2d( + swish_40, parameter_64, [2, 2], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_64 + + # pd_op.batch_norm_: (1x384x60x60xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (1x384x60x60xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__312, + batch_norm__313, + batch_norm__314, + batch_norm__315, + batch_norm__316, + batch_norm__317, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_52, + parameter_63, + parameter_62, + parameter_61, + parameter_60, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_60, parameter_61, parameter_62, parameter_63 + + # pd_op.swish: (1x384x60x60xf32) <- (1x384x60x60xf32) + swish_41 = paddle._C_ops.swish(batch_norm__312) + + # builtin.combine: ([1x384x60x60xf32, 1x768x60x60xf32]) <- (1x384x60x60xf32, 1x768x60x60xf32) + combine_10 = [swish_41, swish_10] + + # pd_op.concat: (1x1152x60x60xf32) <- ([1x384x60x60xf32, 1x768x60x60xf32], 1xi32) + concat_9 = paddle._C_ops.concat(combine_10, full_7) + del combine_10 + + # pd_op.conv2d: (1x384x60x60xf32) <- (1x1152x60x60xf32, 384x1152x1x1xf32) + conv2d_53 = paddle._C_ops.conv2d( + concat_9, parameter_59, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_59 + + # pd_op.batch_norm_: (1x384x60x60xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (1x384x60x60xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__318, + batch_norm__319, + batch_norm__320, + batch_norm__321, + batch_norm__322, + batch_norm__323, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_53, + parameter_58, + parameter_57, + parameter_56, + parameter_55, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_55, parameter_56, parameter_57, parameter_58 + + # pd_op.swish: (1x384x60x60xf32) <- (1x384x60x60xf32) + swish_42 = paddle._C_ops.swish(batch_norm__318) + + # pd_op.conv2d: (1x384x60x60xf32) <- (1x1152x60x60xf32, 384x1152x1x1xf32) + conv2d_54 = paddle._C_ops.conv2d( + concat_9, parameter_54, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_54 + + # pd_op.batch_norm_: (1x384x60x60xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (1x384x60x60xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__324, + batch_norm__325, + batch_norm__326, + batch_norm__327, + batch_norm__328, + batch_norm__329, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_54, + parameter_53, + parameter_52, + parameter_51, + parameter_50, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_50, parameter_51, parameter_52, parameter_53 + + # pd_op.swish: (1x384x60x60xf32) <- (1x384x60x60xf32) + swish_43 = paddle._C_ops.swish(batch_norm__324) + + # pd_op.conv2d: (1x384x60x60xf32) <- (1x384x60x60xf32, 384x384x3x3xf32) + conv2d_55 = paddle._C_ops.conv2d( + swish_43, parameter_49, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_49 + + # pd_op.batch_norm_: (1x384x60x60xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (1x384x60x60xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__330, + batch_norm__331, + batch_norm__332, + batch_norm__333, + batch_norm__334, + batch_norm__335, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_55, + parameter_48, + parameter_47, + parameter_46, + parameter_45, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_45, parameter_46, parameter_47, parameter_48 + + # pd_op.swish: (1x384x60x60xf32) <- (1x384x60x60xf32) + swish_44 = paddle._C_ops.swish(batch_norm__330) + + # pd_op.conv2d: (1x384x60x60xf32) <- (1x384x60x60xf32, 384x384x3x3xf32) + conv2d_56 = paddle._C_ops.conv2d( + swish_44, parameter_44, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_44 + + # pd_op.batch_norm_: (1x384x60x60xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (1x384x60x60xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__336, + batch_norm__337, + batch_norm__338, + batch_norm__339, + batch_norm__340, + batch_norm__341, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_56, + parameter_43, + parameter_42, + parameter_41, + parameter_40, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_40, parameter_41, parameter_42, parameter_43 + + # pd_op.conv2d: (1x384x60x60xf32) <- (1x384x60x60xf32, 384x384x1x1xf32) + conv2d_57 = paddle._C_ops.conv2d( + swish_44, parameter_39, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_39 + + # pd_op.batch_norm_: (1x384x60x60xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (1x384x60x60xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__342, + batch_norm__343, + batch_norm__344, + batch_norm__345, + batch_norm__346, + batch_norm__347, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_57, + parameter_38, + parameter_37, + parameter_36, + parameter_35, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_35, parameter_36, parameter_37, parameter_38 + + # pd_op.add: (1x384x60x60xf32) <- (1x384x60x60xf32, 1x384x60x60xf32) + add_48 = paddle._C_ops.add(batch_norm__336, batch_norm__342) + + # pd_op.swish: (1x384x60x60xf32) <- (1x384x60x60xf32) + swish_45 = paddle._C_ops.swish(add_48) + + # pd_op.conv2d: (1x384x60x60xf32) <- (1x384x60x60xf32, 384x384x3x3xf32) + conv2d_58 = paddle._C_ops.conv2d( + swish_45, parameter_34, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_34 + + # pd_op.batch_norm_: (1x384x60x60xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (1x384x60x60xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__348, + batch_norm__349, + batch_norm__350, + batch_norm__351, + batch_norm__352, + batch_norm__353, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_58, + parameter_33, + parameter_32, + parameter_31, + parameter_30, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_30, parameter_31, parameter_32, parameter_33 + + # pd_op.swish: (1x384x60x60xf32) <- (1x384x60x60xf32) + swish_46 = paddle._C_ops.swish(batch_norm__348) + + # pd_op.conv2d: (1x384x60x60xf32) <- (1x384x60x60xf32, 384x384x3x3xf32) + conv2d_59 = paddle._C_ops.conv2d( + swish_46, parameter_29, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_29 + + # pd_op.batch_norm_: (1x384x60x60xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (1x384x60x60xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__354, + batch_norm__355, + batch_norm__356, + batch_norm__357, + batch_norm__358, + batch_norm__359, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_59, + parameter_28, + parameter_27, + parameter_26, + parameter_25, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_25, parameter_26, parameter_27, parameter_28 + + # pd_op.conv2d: (1x384x60x60xf32) <- (1x384x60x60xf32, 384x384x1x1xf32) + conv2d_60 = paddle._C_ops.conv2d( + swish_46, parameter_24, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_24 + + # pd_op.batch_norm_: (1x384x60x60xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (1x384x60x60xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__360, + batch_norm__361, + batch_norm__362, + batch_norm__363, + batch_norm__364, + batch_norm__365, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_60, + parameter_23, + parameter_22, + parameter_21, + parameter_20, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_20, parameter_21, parameter_22, parameter_23 + + # pd_op.add: (1x384x60x60xf32) <- (1x384x60x60xf32, 1x384x60x60xf32) + add_49 = paddle._C_ops.add(batch_norm__354, batch_norm__360) + + # pd_op.swish: (1x384x60x60xf32) <- (1x384x60x60xf32) + swish_47 = paddle._C_ops.swish(add_49) + + # pd_op.conv2d: (1x384x60x60xf32) <- (1x384x60x60xf32, 384x384x3x3xf32) + conv2d_61 = paddle._C_ops.conv2d( + swish_47, parameter_19, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_19 + + # pd_op.batch_norm_: (1x384x60x60xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (1x384x60x60xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__366, + batch_norm__367, + batch_norm__368, + batch_norm__369, + batch_norm__370, + batch_norm__371, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_61, + parameter_18, + parameter_17, + parameter_16, + parameter_15, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_15, parameter_16, parameter_17, parameter_18 + + # pd_op.swish: (1x384x60x60xf32) <- (1x384x60x60xf32) + swish_48 = paddle._C_ops.swish(batch_norm__366) + + # pd_op.conv2d: (1x384x60x60xf32) <- (1x384x60x60xf32, 384x384x3x3xf32) + conv2d_62 = paddle._C_ops.conv2d( + swish_48, parameter_14, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_14 + + # pd_op.batch_norm_: (1x384x60x60xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (1x384x60x60xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__372, + batch_norm__373, + batch_norm__374, + batch_norm__375, + batch_norm__376, + batch_norm__377, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_62, + parameter_13, + parameter_12, + parameter_11, + parameter_10, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_10, parameter_11, parameter_12, parameter_13 + + # pd_op.conv2d: (1x384x60x60xf32) <- (1x384x60x60xf32, 384x384x1x1xf32) + conv2d_63 = paddle._C_ops.conv2d( + swish_48, parameter_9, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_9 + + # pd_op.batch_norm_: (1x384x60x60xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (1x384x60x60xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__378, + batch_norm__379, + batch_norm__380, + batch_norm__381, + batch_norm__382, + batch_norm__383, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_63, + parameter_8, + parameter_7, + parameter_6, + parameter_5, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_5, parameter_6, parameter_7, parameter_8 + + # pd_op.add: (1x384x60x60xf32) <- (1x384x60x60xf32, 1x384x60x60xf32) + add_50 = paddle._C_ops.add(batch_norm__372, batch_norm__378) + + # pd_op.swish: (1x384x60x60xf32) <- (1x384x60x60xf32) + swish_49 = paddle._C_ops.swish(add_50) + + # builtin.combine: ([1x384x60x60xf32, 1x384x60x60xf32]) <- (1x384x60x60xf32, 1x384x60x60xf32) + combine_11 = [swish_42, swish_49] + + # pd_op.concat: (1x768x60x60xf32) <- ([1x384x60x60xf32, 1x384x60x60xf32], 1xi32) + concat_10 = paddle._C_ops.concat(combine_11, full_7) + del combine_11, full_7 + + # pd_op.conv2d: (1x768x60x60xf32) <- (1x768x60x60xf32, 768x768x1x1xf32) + conv2d_64 = paddle._C_ops.conv2d( + concat_10, parameter_4, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_4 + + # pd_op.batch_norm_: (1x768x60x60xf32, 768xf32, 768xf32, 768xf32, 768xf32, -1xui8) <- (1x768x60x60xf32, 768xf32, 768xf32, 768xf32, 768xf32) + ( + batch_norm__384, + batch_norm__385, + batch_norm__386, + batch_norm__387, + batch_norm__388, + batch_norm__389, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_64, + parameter_3, + parameter_2, + parameter_1, + parameter_0, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_0, parameter_1, parameter_2, parameter_3 + + # pd_op.swish: (1x768x60x60xf32) <- (1x768x60x60xf32) + swish_0 = paddle._C_ops.swish(batch_norm__384) + del ( + add_0, + add_1, + add_10, + add_11, + add_12, + add_14, + add_15, + add_17, + add_18, + add_19, + add_2, + add_20, + add_21, + add_23, + add_24, + add_26, + add_27, + add_28, + add_29, + add_3, + add_30, + add_32, + add_33, + add_35, + add_36, + add_37, + add_38, + add_39, + add_40, + add_41, + add_42, + add_43, + add_44, + add_45, + add_46, + add_47, + add_48, + add_49, + add_5, + add_50, + add_6, + add_8, + add_9, + assign_0, + assign_1, + assign_10, + assign_11, + assign_12, + assign_13, + assign_14, + assign_15, + assign_16, + assign_17, + assign_18, + assign_19, + assign_2, + assign_20, + assign_21, + assign_22, + assign_23, + assign_24, + assign_25, + assign_26, + assign_27, + assign_28, + assign_29, + assign_3, + assign_30, + assign_31, + assign_32, + assign_33, + assign_34, + assign_35, + assign_36, + assign_37, + assign_38, + assign_39, + assign_4, + assign_40, + assign_41, + assign_42, + assign_43, + assign_44, + assign_45, + assign_46, + assign_47, + assign_48, + assign_49, + assign_5, + assign_50, + assign_51, + assign_52, + assign_53, + assign_54, + assign_55, + assign_56, + assign_57, + assign_58, + assign_59, + assign_6, + assign_60, + assign_61, + assign_62, + assign_63, + assign_64, + assign_65, + assign_66, + assign_67, + assign_68, + assign_69, + assign_7, + assign_70, + assign_71, + assign_72, + assign_8, + assign_9, + batch_norm__0, + batch_norm__1, + batch_norm__10, + batch_norm__100, + batch_norm__101, + batch_norm__102, + batch_norm__103, + batch_norm__104, + batch_norm__105, + batch_norm__106, + batch_norm__107, + batch_norm__108, + batch_norm__109, + batch_norm__11, + batch_norm__110, + batch_norm__111, + batch_norm__112, + batch_norm__113, + batch_norm__114, + batch_norm__115, + batch_norm__116, + batch_norm__117, + batch_norm__118, + batch_norm__119, + batch_norm__12, + batch_norm__120, + batch_norm__121, + batch_norm__122, + batch_norm__123, + batch_norm__124, + batch_norm__125, + batch_norm__126, + batch_norm__127, + batch_norm__128, + batch_norm__129, + batch_norm__13, + batch_norm__130, + batch_norm__131, + batch_norm__132, + batch_norm__133, + batch_norm__134, + batch_norm__135, + batch_norm__136, + batch_norm__137, + batch_norm__138, + batch_norm__139, + batch_norm__14, + batch_norm__140, + batch_norm__141, + batch_norm__142, + batch_norm__143, + batch_norm__144, + batch_norm__145, + batch_norm__146, + batch_norm__147, + batch_norm__148, + batch_norm__149, + batch_norm__15, + batch_norm__150, + batch_norm__151, + batch_norm__152, + batch_norm__153, + batch_norm__154, + batch_norm__155, + batch_norm__156, + batch_norm__157, + batch_norm__158, + batch_norm__159, + batch_norm__16, + batch_norm__160, + batch_norm__161, + batch_norm__162, + batch_norm__163, + batch_norm__164, + batch_norm__165, + batch_norm__166, + batch_norm__167, + batch_norm__168, + batch_norm__169, + batch_norm__17, + batch_norm__170, + batch_norm__171, + batch_norm__172, + batch_norm__173, + batch_norm__174, + batch_norm__175, + batch_norm__176, + batch_norm__177, + batch_norm__178, + batch_norm__179, + batch_norm__18, + batch_norm__180, + batch_norm__181, + batch_norm__182, + batch_norm__183, + batch_norm__184, + batch_norm__185, + batch_norm__186, + batch_norm__187, + batch_norm__188, + batch_norm__189, + batch_norm__19, + batch_norm__190, + batch_norm__191, + batch_norm__192, + batch_norm__193, + batch_norm__194, + batch_norm__195, + batch_norm__196, + batch_norm__197, + batch_norm__198, + batch_norm__199, + batch_norm__2, + batch_norm__20, + batch_norm__200, + batch_norm__201, + batch_norm__202, + batch_norm__203, + batch_norm__204, + batch_norm__205, + batch_norm__206, + batch_norm__207, + batch_norm__208, + batch_norm__209, + batch_norm__21, + batch_norm__210, + batch_norm__211, + batch_norm__212, + batch_norm__213, + batch_norm__214, + batch_norm__215, + batch_norm__216, + batch_norm__217, + batch_norm__218, + batch_norm__219, + batch_norm__22, + batch_norm__220, + batch_norm__221, + batch_norm__222, + batch_norm__223, + batch_norm__224, + batch_norm__225, + batch_norm__226, + batch_norm__227, + batch_norm__228, + batch_norm__229, + batch_norm__23, + batch_norm__230, + batch_norm__231, + batch_norm__232, + batch_norm__233, + batch_norm__234, + batch_norm__235, + batch_norm__236, + batch_norm__237, + batch_norm__238, + batch_norm__239, + batch_norm__24, + batch_norm__240, + batch_norm__241, + batch_norm__242, + batch_norm__243, + batch_norm__244, + batch_norm__245, + batch_norm__246, + batch_norm__247, + batch_norm__248, + batch_norm__249, + batch_norm__25, + batch_norm__250, + batch_norm__251, + batch_norm__252, + batch_norm__253, + batch_norm__254, + batch_norm__255, + batch_norm__256, + batch_norm__257, + batch_norm__258, + batch_norm__259, + batch_norm__26, + batch_norm__260, + batch_norm__261, + batch_norm__262, + batch_norm__263, + batch_norm__264, + batch_norm__265, + batch_norm__266, + batch_norm__267, + batch_norm__268, + batch_norm__269, + batch_norm__27, + batch_norm__270, + batch_norm__271, + batch_norm__272, + batch_norm__273, + batch_norm__274, + batch_norm__275, + batch_norm__276, + batch_norm__277, + batch_norm__278, + batch_norm__279, + batch_norm__28, + batch_norm__280, + batch_norm__281, + batch_norm__282, + batch_norm__283, + batch_norm__284, + batch_norm__285, + batch_norm__286, + batch_norm__287, + batch_norm__288, + batch_norm__289, + batch_norm__29, + batch_norm__290, + batch_norm__291, + batch_norm__292, + batch_norm__293, + batch_norm__294, + batch_norm__295, + batch_norm__296, + batch_norm__297, + batch_norm__298, + batch_norm__299, + batch_norm__3, + batch_norm__30, + batch_norm__300, + batch_norm__301, + batch_norm__302, + batch_norm__303, + batch_norm__304, + batch_norm__305, + batch_norm__306, + batch_norm__307, + batch_norm__308, + batch_norm__309, + batch_norm__31, + batch_norm__310, + batch_norm__311, + batch_norm__312, + batch_norm__313, + batch_norm__314, + batch_norm__315, + batch_norm__316, + batch_norm__317, + batch_norm__318, + batch_norm__319, + batch_norm__32, + batch_norm__320, + batch_norm__321, + batch_norm__322, + batch_norm__323, + batch_norm__324, + batch_norm__325, + batch_norm__326, + batch_norm__327, + batch_norm__328, + batch_norm__329, + batch_norm__33, + batch_norm__330, + batch_norm__331, + batch_norm__332, + batch_norm__333, + batch_norm__334, + batch_norm__335, + batch_norm__336, + batch_norm__337, + batch_norm__338, + batch_norm__339, + batch_norm__34, + batch_norm__340, + batch_norm__341, + batch_norm__342, + batch_norm__343, + batch_norm__344, + batch_norm__345, + batch_norm__346, + batch_norm__347, + batch_norm__348, + batch_norm__349, + batch_norm__35, + batch_norm__350, + batch_norm__351, + batch_norm__352, + batch_norm__353, + batch_norm__354, + batch_norm__355, + batch_norm__356, + batch_norm__357, + batch_norm__358, + batch_norm__359, + batch_norm__36, + batch_norm__360, + batch_norm__361, + batch_norm__362, + batch_norm__363, + batch_norm__364, + batch_norm__365, + batch_norm__366, + batch_norm__367, + batch_norm__368, + batch_norm__369, + batch_norm__37, + batch_norm__370, + batch_norm__371, + batch_norm__372, + batch_norm__373, + batch_norm__374, + batch_norm__375, + batch_norm__376, + batch_norm__377, + batch_norm__378, + batch_norm__379, + batch_norm__38, + batch_norm__380, + batch_norm__381, + batch_norm__382, + batch_norm__383, + batch_norm__384, + batch_norm__385, + batch_norm__386, + batch_norm__387, + batch_norm__388, + batch_norm__389, + batch_norm__39, + batch_norm__4, + batch_norm__40, + batch_norm__41, + batch_norm__42, + batch_norm__43, + batch_norm__44, + batch_norm__45, + batch_norm__46, + batch_norm__47, + batch_norm__48, + batch_norm__49, + batch_norm__5, + batch_norm__50, + batch_norm__51, + batch_norm__52, + batch_norm__53, + batch_norm__54, + batch_norm__55, + batch_norm__56, + batch_norm__57, + batch_norm__58, + batch_norm__59, + batch_norm__6, + batch_norm__60, + batch_norm__61, + batch_norm__62, + batch_norm__63, + batch_norm__64, + batch_norm__65, + batch_norm__66, + batch_norm__67, + batch_norm__68, + batch_norm__69, + batch_norm__7, + batch_norm__70, + batch_norm__71, + batch_norm__72, + batch_norm__73, + batch_norm__74, + batch_norm__75, + batch_norm__76, + batch_norm__77, + batch_norm__78, + batch_norm__79, + batch_norm__8, + batch_norm__80, + batch_norm__81, + batch_norm__82, + batch_norm__83, + batch_norm__84, + batch_norm__85, + batch_norm__86, + batch_norm__87, + batch_norm__88, + batch_norm__89, + batch_norm__9, + batch_norm__90, + batch_norm__91, + batch_norm__92, + batch_norm__93, + batch_norm__94, + batch_norm__95, + batch_norm__96, + batch_norm__97, + batch_norm__98, + batch_norm__99, + concat_1, + concat_10, + concat_2, + concat_3, + concat_4, + concat_5, + concat_6, + concat_7, + concat_8, + concat_9, + conv2d_0, + conv2d_1, + conv2d_10, + conv2d_11, + conv2d_12, + conv2d_13, + conv2d_14, + conv2d_15, + conv2d_16, + conv2d_17, + conv2d_18, + conv2d_19, + conv2d_2, + conv2d_20, + conv2d_21, + conv2d_22, + conv2d_23, + conv2d_24, + conv2d_25, + conv2d_26, + conv2d_27, + conv2d_28, + conv2d_29, + conv2d_3, + conv2d_30, + conv2d_31, + conv2d_32, + conv2d_33, + conv2d_34, + conv2d_35, + conv2d_36, + conv2d_37, + conv2d_38, + conv2d_39, + conv2d_4, + conv2d_40, + conv2d_41, + conv2d_42, + conv2d_43, + conv2d_44, + conv2d_45, + conv2d_46, + conv2d_47, + conv2d_48, + conv2d_49, + conv2d_5, + conv2d_50, + conv2d_51, + conv2d_52, + conv2d_53, + conv2d_54, + conv2d_55, + conv2d_56, + conv2d_57, + conv2d_58, + conv2d_59, + conv2d_6, + conv2d_60, + conv2d_61, + conv2d_62, + conv2d_63, + conv2d_64, + conv2d_7, + conv2d_8, + conv2d_9, + dropout_0, + dropout_1, + dropout_10, + dropout_11, + dropout_12, + dropout_13, + dropout_14, + dropout_15, + dropout_16, + dropout_17, + dropout_18, + dropout_19, + dropout_2, + dropout_20, + dropout_21, + dropout_22, + dropout_23, + dropout_24, + dropout_25, + dropout_26, + dropout_27, + dropout_28, + dropout_29, + dropout_3, + dropout_30, + dropout_31, + dropout_4, + dropout_5, + dropout_6, + dropout_7, + dropout_8, + dropout_9, + full_8, + full_9, + full_int_array_10, + full_int_array_2, + full_int_array_4, + full_int_array_5, + full_int_array_8, + full_int_array_9, + layer_norm_0, + layer_norm_1, + layer_norm_10, + layer_norm_11, + layer_norm_12, + layer_norm_13, + layer_norm_14, + layer_norm_15, + layer_norm_16, + layer_norm_17, + layer_norm_18, + layer_norm_19, + layer_norm_2, + layer_norm_20, + layer_norm_22, + layer_norm_23, + layer_norm_3, + layer_norm_4, + layer_norm_5, + layer_norm_6, + layer_norm_7, + layer_norm_8, + layer_norm_9, + matmul_10, + matmul_11, + matmul_12, + matmul_15, + matmul_16, + matmul_17, + matmul_18, + matmul_19, + matmul_2, + matmul_20, + matmul_23, + matmul_24, + matmul_25, + matmul_26, + matmul_27, + matmul_28, + matmul_3, + matmul_31, + matmul_32, + matmul_33, + matmul_4, + matmul_7, + matmul_8, + matmul_9, + nearest_interp_0, + nearest_interp_1, + pool2d_0, + pool2d_1, + pool2d_2, + reshape_11, + reshape_15, + reshape_16, + reshape_3, + reshape_7, + slice_0, + slice_1, + slice_10, + slice_11, + slice_12, + slice_13, + slice_14, + slice_15, + slice_16, + slice_17, + slice_18, + slice_19, + slice_2, + slice_20, + slice_21, + slice_22, + slice_23, + slice_3, + slice_4, + slice_5, + slice_6, + slice_7, + slice_8, + slice_9, + softmax_0, + softmax_1, + softmax_2, + softmax_3, + swish_1, + swish_10, + swish_11, + swish_12, + swish_13, + swish_14, + swish_15, + swish_16, + swish_17, + swish_18, + swish_19, + swish_2, + swish_20, + swish_21, + swish_22, + swish_23, + swish_24, + swish_25, + swish_26, + swish_27, + swish_28, + swish_29, + swish_3, + swish_30, + swish_31, + swish_32, + swish_33, + swish_34, + swish_35, + swish_36, + swish_37, + swish_38, + swish_39, + swish_4, + swish_40, + swish_41, + swish_42, + swish_43, + swish_44, + swish_45, + swish_46, + swish_47, + swish_48, + swish_49, + swish_5, + swish_6, + swish_7, + swish_8, + swish_9, + transpose_0, + transpose_1, + transpose_10, + transpose_11, + transpose_12, + transpose_13, + transpose_14, + transpose_15, + transpose_16, + transpose_17, + transpose_2, + transpose_3, + transpose_4, + transpose_5, + transpose_6, + transpose_7, + transpose_8, + transpose_9, + unsqueeze_3, + ) - return add_0 + return swish_0 diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_0/weight_meta.py b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_0/weight_meta.py index 8b1378917..bd82badb0 100644 --- a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_0/weight_meta.py +++ b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_0/weight_meta.py @@ -1 +1,4013 @@ +class Program_weight_tensor_parameter_0: + name = "parameter_0" + shape = [768] + dtype = "float32" + min_val = float("-0.175875") + max_val = float("0.210823") + mean = float("0.0834695") + std = float("0.0566098") + data = None + +class Program_weight_tensor_parameter_1: + name = "parameter_1" + shape = [768] + dtype = "float32" + min_val = float("0.939895") + max_val = float("1.29826") + mean = float("1.06397") + std = float("0.0312259") + data = None + + +class Program_weight_tensor_parameter_2: + name = "parameter_2" + shape = [768] + dtype = "float32" + min_val = float("0.00114665") + max_val = float("0.0503602") + mean = float("0.00766089") + std = float("0.00465424") + data = None + + +class Program_weight_tensor_parameter_3: + name = "parameter_3" + shape = [768] + dtype = "float32" + min_val = float("-0.134835") + max_val = float("0.0565135") + mean = float("-0.0288952") + std = float("0.0290584") + data = None + + +class Program_weight_tensor_parameter_4: + name = "parameter_4" + shape = [768, 768, 1, 1] + dtype = "float32" + min_val = float("-0.0548133") + max_val = float("0.0388088") + mean = float("-0.000154659") + std = float("0.00249634") + data = None + + +class Program_weight_tensor_parameter_5: + name = "parameter_5" + shape = [384] + dtype = "float32" + min_val = float("-0.14169") + max_val = float("0.0305817") + mean = float("-0.0188052") + std = float("0.0234504") + data = None + + +class Program_weight_tensor_parameter_6: + name = "parameter_6" + shape = [384] + dtype = "float32" + min_val = float("0.945748") + max_val = float("1.04442") + mean = float("0.98666") + std = float("0.0105852") + data = None + + +class Program_weight_tensor_parameter_7: + name = "parameter_7" + shape = [384] + dtype = "float32" + min_val = float("0.000803951") + max_val = float("0.0187689") + mean = float("0.00492345") + std = float("0.00343028") + data = None + + +class Program_weight_tensor_parameter_8: + name = "parameter_8" + shape = [384] + dtype = "float32" + min_val = float("-0.0551849") + max_val = float("0.062912") + mean = float("0.00285491") + std = float("0.0223164") + data = None + + +class Program_weight_tensor_parameter_9: + name = "parameter_9" + shape = [384, 384, 1, 1] + dtype = "float32" + min_val = float("-0.0299323") + max_val = float("0.020664") + mean = float("2.29275e-05") + std = float("0.00192338") + data = None + + +class Program_weight_tensor_parameter_10: + name = "parameter_10" + shape = [384] + dtype = "float32" + min_val = float("-0.14169") + max_val = float("0.0305817") + mean = float("-0.0188052") + std = float("0.0234504") + data = None + + +class Program_weight_tensor_parameter_11: + name = "parameter_11" + shape = [384] + dtype = "float32" + min_val = float("0.968039") + max_val = float("1.13059") + mean = float("1.01544") + std = float("0.0171846") + data = None + + +class Program_weight_tensor_parameter_12: + name = "parameter_12" + shape = [384] + dtype = "float32" + min_val = float("0.00197981") + max_val = float("0.0503469") + mean = float("0.00764845") + std = float("0.00453187") + data = None + + +class Program_weight_tensor_parameter_13: + name = "parameter_13" + shape = [384] + dtype = "float32" + min_val = float("-0.202706") + max_val = float("0.152191") + mean = float("-0.0431393") + std = float("0.0362812") + data = None + + +class Program_weight_tensor_parameter_14: + name = "parameter_14" + shape = [384, 384, 3, 3] + dtype = "float32" + min_val = float("-0.029908") + max_val = float("0.035511") + mean = float("-7.29869e-05") + std = float("0.00131195") + data = None + + +class Program_weight_tensor_parameter_15: + name = "parameter_15" + shape = [384] + dtype = "float32" + min_val = float("-0.170219") + max_val = float("0.0209993") + mean = float("-0.0348873") + std = float("0.0279313") + data = None + + +class Program_weight_tensor_parameter_16: + name = "parameter_16" + shape = [384] + dtype = "float32" + min_val = float("0.975222") + max_val = float("1.12587") + mean = float("1.015") + std = float("0.0240805") + data = None + + +class Program_weight_tensor_parameter_17: + name = "parameter_17" + shape = [384] + dtype = "float32" + min_val = float("0.00530043") + max_val = float("0.186183") + mean = float("0.0215775") + std = float("0.0159645") + data = None + + +class Program_weight_tensor_parameter_18: + name = "parameter_18" + shape = [384] + dtype = "float32" + min_val = float("-0.265254") + max_val = float("0.415528") + mean = float("-0.0379328") + std = float("0.0510841") + data = None + + +class Program_weight_tensor_parameter_19: + name = "parameter_19" + shape = [384, 384, 3, 3] + dtype = "float32" + min_val = float("-0.0331338") + max_val = float("0.0530854") + mean = float("-6.31513e-05") + std = float("0.00148047") + data = None + + +class Program_weight_tensor_parameter_20: + name = "parameter_20" + shape = [384] + dtype = "float32" + min_val = float("-0.105219") + max_val = float("0.0129843") + mean = float("-0.0358029") + std = float("0.0193236") + data = None + + +class Program_weight_tensor_parameter_21: + name = "parameter_21" + shape = [384] + dtype = "float32" + min_val = float("0.945357") + max_val = float("1.04501") + mean = float("0.988631") + std = float("0.00984229") + data = None + + +class Program_weight_tensor_parameter_22: + name = "parameter_22" + shape = [384] + dtype = "float32" + min_val = float("0.000690331") + max_val = float("0.0185684") + mean = float("0.00513745") + std = float("0.00318726") + data = None + + +class Program_weight_tensor_parameter_23: + name = "parameter_23" + shape = [384] + dtype = "float32" + min_val = float("-0.0849168") + max_val = float("0.0438217") + mean = float("-0.00259504") + std = float("0.017162") + data = None + + +class Program_weight_tensor_parameter_24: + name = "parameter_24" + shape = [384, 384, 1, 1] + dtype = "float32" + min_val = float("-0.0267959") + max_val = float("0.025491") + mean = float("-5.37283e-05") + std = float("0.00203271") + data = None + + +class Program_weight_tensor_parameter_25: + name = "parameter_25" + shape = [384] + dtype = "float32" + min_val = float("-0.105219") + max_val = float("0.0129843") + mean = float("-0.0358029") + std = float("0.0193236") + data = None + + +class Program_weight_tensor_parameter_26: + name = "parameter_26" + shape = [384] + dtype = "float32" + min_val = float("0.959852") + max_val = float("1.10509") + mean = float("1.01609") + std = float("0.0177564") + data = None + + +class Program_weight_tensor_parameter_27: + name = "parameter_27" + shape = [384] + dtype = "float32" + min_val = float("0.00248164") + max_val = float("0.0491499") + mean = float("0.00954645") + std = float("0.00479825") + data = None + + +class Program_weight_tensor_parameter_28: + name = "parameter_28" + shape = [384] + dtype = "float32" + min_val = float("-0.215332") + max_val = float("0.320794") + mean = float("-0.0502206") + std = float("0.044921") + data = None + + +class Program_weight_tensor_parameter_29: + name = "parameter_29" + shape = [384, 384, 3, 3] + dtype = "float32" + min_val = float("-0.0363929") + max_val = float("0.0514823") + mean = float("-8.4193e-05") + std = float("0.00132563") + data = None + + +class Program_weight_tensor_parameter_30: + name = "parameter_30" + shape = [384] + dtype = "float32" + min_val = float("-0.0896627") + max_val = float("0.0192839") + mean = float("-0.0360783") + std = float("0.0194692") + data = None + + +class Program_weight_tensor_parameter_31: + name = "parameter_31" + shape = [384] + dtype = "float32" + min_val = float("0.933291") + max_val = float("1.11466") + mean = float("1.01167") + std = float("0.026589") + data = None + + +class Program_weight_tensor_parameter_32: + name = "parameter_32" + shape = [384] + dtype = "float32" + min_val = float("0.00571017") + max_val = float("0.0668329") + mean = float("0.0190313") + std = float("0.00954406") + data = None + + +class Program_weight_tensor_parameter_33: + name = "parameter_33" + shape = [384] + dtype = "float32" + min_val = float("-0.241704") + max_val = float("0.126745") + mean = float("-0.0297335") + std = float("0.0569594") + data = None + + +class Program_weight_tensor_parameter_34: + name = "parameter_34" + shape = [384, 384, 3, 3] + dtype = "float32" + min_val = float("-0.0397047") + max_val = float("0.0499731") + mean = float("-5.43156e-05") + std = float("0.00151173") + data = None + + +class Program_weight_tensor_parameter_35: + name = "parameter_35" + shape = [384] + dtype = "float32" + min_val = float("-0.116341") + max_val = float("0.0161185") + mean = float("-0.0373639") + std = float("0.0201507") + data = None + + +class Program_weight_tensor_parameter_36: + name = "parameter_36" + shape = [384] + dtype = "float32" + min_val = float("0.929383") + max_val = float("1.02791") + mean = float("0.98704") + std = float("0.0110296") + data = None + + +class Program_weight_tensor_parameter_37: + name = "parameter_37" + shape = [384] + dtype = "float32" + min_val = float("0.00125121") + max_val = float("0.0114154") + mean = float("0.00429573") + std = float("0.00176768") + data = None + + +class Program_weight_tensor_parameter_38: + name = "parameter_38" + shape = [384] + dtype = "float32" + min_val = float("-0.0558903") + max_val = float("0.0353347") + mean = float("-0.00854145") + std = float("0.0134779") + data = None + + +class Program_weight_tensor_parameter_39: + name = "parameter_39" + shape = [384, 384, 1, 1] + dtype = "float32" + min_val = float("-0.0386337") + max_val = float("0.028212") + mean = float("-0.000152706") + std = float("0.00204597") + data = None + + +class Program_weight_tensor_parameter_40: + name = "parameter_40" + shape = [384] + dtype = "float32" + min_val = float("-0.116341") + max_val = float("0.0161185") + mean = float("-0.0373639") + std = float("0.0201507") + data = None + + +class Program_weight_tensor_parameter_41: + name = "parameter_41" + shape = [384] + dtype = "float32" + min_val = float("0.981354") + max_val = float("1.10683") + mean = float("1.01834") + std = float("0.0222205") + data = None + + +class Program_weight_tensor_parameter_42: + name = "parameter_42" + shape = [384] + dtype = "float32" + min_val = float("0.00487818") + max_val = float("0.0360324") + mean = float("0.0114429") + std = float("0.00469479") + data = None + + +class Program_weight_tensor_parameter_43: + name = "parameter_43" + shape = [384] + dtype = "float32" + min_val = float("-0.191838") + max_val = float("0.0902951") + mean = float("-0.0270964") + std = float("0.0352644") + data = None + + +class Program_weight_tensor_parameter_44: + name = "parameter_44" + shape = [384, 384, 3, 3] + dtype = "float32" + min_val = float("-0.0360859") + max_val = float("0.0633791") + mean = float("-4.66789e-05") + std = float("0.00138059") + data = None + + +class Program_weight_tensor_parameter_45: + name = "parameter_45" + shape = [384] + dtype = "float32" + min_val = float("-0.107113") + max_val = float("0.0239382") + mean = float("-0.0375215") + std = float("0.0214567") + data = None + + +class Program_weight_tensor_parameter_46: + name = "parameter_46" + shape = [384] + dtype = "float32" + min_val = float("0.944795") + max_val = float("1.11465") + mean = float("1.01186") + std = float("0.0277861") + data = None + + +class Program_weight_tensor_parameter_47: + name = "parameter_47" + shape = [384] + dtype = "float32" + min_val = float("0.00539047") + max_val = float("0.0596139") + mean = float("0.0152076") + std = float("0.0073805") + data = None + + +class Program_weight_tensor_parameter_48: + name = "parameter_48" + shape = [384] + dtype = "float32" + min_val = float("-0.154913") + max_val = float("0.125914") + mean = float("-0.0486592") + std = float("0.0509119") + data = None + + +class Program_weight_tensor_parameter_49: + name = "parameter_49" + shape = [384, 384, 3, 3] + dtype = "float32" + min_val = float("-0.0279281") + max_val = float("0.0439271") + mean = float("-7.87755e-05") + std = float("0.00153817") + data = None + + +class Program_weight_tensor_parameter_50: + name = "parameter_50" + shape = [384] + dtype = "float32" + min_val = float("-0.10674") + max_val = float("0.046738") + mean = float("-0.026306") + std = float("0.0154157") + data = None + + +class Program_weight_tensor_parameter_51: + name = "parameter_51" + shape = [384] + dtype = "float32" + min_val = float("0.973756") + max_val = float("1.08653") + mean = float("1.00903") + std = float("0.0171142") + data = None + + +class Program_weight_tensor_parameter_52: + name = "parameter_52" + shape = [384] + dtype = "float32" + min_val = float("0.00231428") + max_val = float("0.0166757") + mean = float("0.00532256") + std = float("0.00190646") + data = None + + +class Program_weight_tensor_parameter_53: + name = "parameter_53" + shape = [384] + dtype = "float32" + min_val = float("-0.10048") + max_val = float("0.0869698") + mean = float("-0.0197301") + std = float("0.0269629") + data = None + + +class Program_weight_tensor_parameter_54: + name = "parameter_54" + shape = [384, 1152, 1, 1] + dtype = "float32" + min_val = float("-0.0619005") + max_val = float("0.0744809") + mean = float("-8.91799e-05") + std = float("0.00230778") + data = None + + +class Program_weight_tensor_parameter_55: + name = "parameter_55" + shape = [384] + dtype = "float32" + min_val = float("-0.0424904") + max_val = float("0.0160654") + mean = float("-0.00899509") + std = float("0.00840798") + data = None + + +class Program_weight_tensor_parameter_56: + name = "parameter_56" + shape = [384] + dtype = "float32" + min_val = float("0.959519") + max_val = float("1.05137") + mean = float("1.00788") + std = float("0.0115961") + data = None + + +class Program_weight_tensor_parameter_57: + name = "parameter_57" + shape = [384] + dtype = "float32" + min_val = float("0.00124549") + max_val = float("0.0304895") + mean = float("0.00442696") + std = float("0.00213864") + data = None + + +class Program_weight_tensor_parameter_58: + name = "parameter_58" + shape = [384] + dtype = "float32" + min_val = float("-0.110999") + max_val = float("0.0924363") + mean = float("-0.0236762") + std = float("0.0234971") + data = None + + +class Program_weight_tensor_parameter_59: + name = "parameter_59" + shape = [384, 1152, 1, 1] + dtype = "float32" + min_val = float("-0.0245473") + max_val = float("0.0425909") + mean = float("-0.000112646") + std = float("0.00208633") + data = None + + +class Program_weight_tensor_parameter_60: + name = "parameter_60" + shape = [384] + dtype = "float32" + min_val = float("-0.0529748") + max_val = float("0.0059538") + mean = float("-0.0166275") + std = float("0.00987957") + data = None + + +class Program_weight_tensor_parameter_61: + name = "parameter_61" + shape = [384] + dtype = "float32" + min_val = float("0.988678") + max_val = float("1.10388") + mean = float("1.01957") + std = float("0.0168754") + data = None + + +class Program_weight_tensor_parameter_62: + name = "parameter_62" + shape = [384] + dtype = "float32" + min_val = float("0.00468338") + max_val = float("0.0641493") + mean = float("0.0144074") + std = float("0.0082444") + data = None + + +class Program_weight_tensor_parameter_63: + name = "parameter_63" + shape = [384] + dtype = "float32" + min_val = float("-0.44327") + max_val = float("0.19537") + mean = float("-0.0473944") + std = float("0.0713197") + data = None + + +class Program_weight_tensor_parameter_64: + name = "parameter_64" + shape = [384, 384, 3, 3] + dtype = "float32" + min_val = float("-0.0212973") + max_val = float("0.0335283") + mean = float("-3.20311e-05") + std = float("0.00117985") + data = None + + +class Program_weight_tensor_parameter_65: + name = "parameter_65" + shape = [384] + dtype = "float32" + min_val = float("-0.222314") + max_val = float("0.492622") + mean = float("0.217344") + std = float("0.124262") + data = None + + +class Program_weight_tensor_parameter_66: + name = "parameter_66" + shape = [384] + dtype = "float32" + min_val = float("0.919259") + max_val = float("1.4834") + mean = float("1.14101") + std = float("0.0738465") + data = None + + +class Program_weight_tensor_parameter_67: + name = "parameter_67" + shape = [384] + dtype = "float32" + min_val = float("0.00374913") + max_val = float("0.0753866") + mean = float("0.0117258") + std = float("0.00578106") + data = None + + +class Program_weight_tensor_parameter_68: + name = "parameter_68" + shape = [384] + dtype = "float32" + min_val = float("-0.129657") + max_val = float("0.0597492") + mean = float("-0.037386") + std = float("0.0303036") + data = None + + +class Program_weight_tensor_parameter_69: + name = "parameter_69" + shape = [384, 384, 1, 1] + dtype = "float32" + min_val = float("-0.0788092") + max_val = float("0.0718385") + mean = float("-0.00042023") + std = float("0.00505347") + data = None + + +class Program_weight_tensor_parameter_70: + name = "parameter_70" + shape = [192] + dtype = "float32" + min_val = float("-0.165903") + max_val = float("0.0468638") + mean = float("-0.0248091") + std = float("0.0394948") + data = None + + +class Program_weight_tensor_parameter_71: + name = "parameter_71" + shape = [192] + dtype = "float32" + min_val = float("0.841187") + max_val = float("1.05089") + mean = float("0.972721") + std = float("0.0237726") + data = None + + +class Program_weight_tensor_parameter_72: + name = "parameter_72" + shape = [192] + dtype = "float32" + min_val = float("0.00140171") + max_val = float("0.0211185") + mean = float("0.00615762") + std = float("0.00390072") + data = None + + +class Program_weight_tensor_parameter_73: + name = "parameter_73" + shape = [192] + dtype = "float32" + min_val = float("-0.0638207") + max_val = float("0.0926642") + mean = float("-0.00576702") + std = float("0.0201979") + data = None + + +class Program_weight_tensor_parameter_74: + name = "parameter_74" + shape = [192, 192, 1, 1] + dtype = "float32" + min_val = float("-0.0496362") + max_val = float("0.0295849") + mean = float("-0.000179203") + std = float("0.00381061") + data = None + + +class Program_weight_tensor_parameter_75: + name = "parameter_75" + shape = [192] + dtype = "float32" + min_val = float("-0.165903") + max_val = float("0.0468638") + mean = float("-0.0248091") + std = float("0.0394948") + data = None + + +class Program_weight_tensor_parameter_76: + name = "parameter_76" + shape = [192] + dtype = "float32" + min_val = float("0.729841") + max_val = float("1.12263") + mean = float("1.02218") + std = float("0.0372419") + data = None + + +class Program_weight_tensor_parameter_77: + name = "parameter_77" + shape = [192] + dtype = "float32" + min_val = float("0.00540078") + max_val = float("0.0575204") + mean = float("0.0136539") + std = float("0.00636345") + data = None + + +class Program_weight_tensor_parameter_78: + name = "parameter_78" + shape = [192] + dtype = "float32" + min_val = float("-0.219646") + max_val = float("0.101609") + mean = float("-0.037761") + std = float("0.0435659") + data = None + + +class Program_weight_tensor_parameter_79: + name = "parameter_79" + shape = [192, 192, 3, 3] + dtype = "float32" + min_val = float("-0.0430374") + max_val = float("0.0495163") + mean = float("-0.000124453") + std = float("0.00256786") + data = None + + +class Program_weight_tensor_parameter_80: + name = "parameter_80" + shape = [192] + dtype = "float32" + min_val = float("-0.191344") + max_val = float("0.0444996") + mean = float("-0.057942") + std = float("0.0491063") + data = None + + +class Program_weight_tensor_parameter_81: + name = "parameter_81" + shape = [192] + dtype = "float32" + min_val = float("0.897737") + max_val = float("1.18792") + mean = float("1.01539") + std = float("0.0484046") + data = None + + +class Program_weight_tensor_parameter_82: + name = "parameter_82" + shape = [192] + dtype = "float32" + min_val = float("0.010406") + max_val = float("0.203638") + mean = float("0.0350038") + std = float("0.0227289") + data = None + + +class Program_weight_tensor_parameter_83: + name = "parameter_83" + shape = [192] + dtype = "float32" + min_val = float("-0.296379") + max_val = float("0.516951") + mean = float("-0.0407173") + std = float("0.0634974") + data = None + + +class Program_weight_tensor_parameter_84: + name = "parameter_84" + shape = [192, 192, 3, 3] + dtype = "float32" + min_val = float("-0.0473781") + max_val = float("0.0557186") + mean = float("-0.00011045") + std = float("0.00285571") + data = None + + +class Program_weight_tensor_parameter_85: + name = "parameter_85" + shape = [192] + dtype = "float32" + min_val = float("-0.191632") + max_val = float("0.00854023") + mean = float("-0.064207") + std = float("0.0334262") + data = None + + +class Program_weight_tensor_parameter_86: + name = "parameter_86" + shape = [192] + dtype = "float32" + min_val = float("0.922153") + max_val = float("1.04653") + mean = float("0.973445") + std = float("0.017956") + data = None + + +class Program_weight_tensor_parameter_87: + name = "parameter_87" + shape = [192] + dtype = "float32" + min_val = float("0.00109672") + max_val = float("0.0152916") + mean = float("0.00524439") + std = float("0.00262094") + data = None + + +class Program_weight_tensor_parameter_88: + name = "parameter_88" + shape = [192] + dtype = "float32" + min_val = float("-0.0707542") + max_val = float("0.0365569") + mean = float("-0.00798934") + std = float("0.0151914") + data = None + + +class Program_weight_tensor_parameter_89: + name = "parameter_89" + shape = [192, 192, 1, 1] + dtype = "float32" + min_val = float("-0.0386532") + max_val = float("0.0308154") + mean = float("-0.000343292") + std = float("0.00384278") + data = None + + +class Program_weight_tensor_parameter_90: + name = "parameter_90" + shape = [192] + dtype = "float32" + min_val = float("-0.191632") + max_val = float("0.00854023") + mean = float("-0.064207") + std = float("0.0334262") + data = None + + +class Program_weight_tensor_parameter_91: + name = "parameter_91" + shape = [192] + dtype = "float32" + min_val = float("0.968104") + max_val = float("1.14778") + mean = float("1.02415") + std = float("0.0294364") + data = None + + +class Program_weight_tensor_parameter_92: + name = "parameter_92" + shape = [192] + dtype = "float32" + min_val = float("0.00424155") + max_val = float("0.04597") + mean = float("0.0118971") + std = float("0.00620602") + data = None + + +class Program_weight_tensor_parameter_93: + name = "parameter_93" + shape = [192] + dtype = "float32" + min_val = float("-0.186844") + max_val = float("0.141321") + mean = float("-0.0381112") + std = float("0.0386452") + data = None + + +class Program_weight_tensor_parameter_94: + name = "parameter_94" + shape = [192, 192, 3, 3] + dtype = "float32" + min_val = float("-0.0471115") + max_val = float("0.0550156") + mean = float("-0.000140673") + std = float("0.00262922") + data = None + + +class Program_weight_tensor_parameter_95: + name = "parameter_95" + shape = [192] + dtype = "float32" + min_val = float("-0.188926") + max_val = float("0.062054") + mean = float("-0.0755775") + std = float("0.0405971") + data = None + + +class Program_weight_tensor_parameter_96: + name = "parameter_96" + shape = [192] + dtype = "float32" + min_val = float("0.880419") + max_val = float("1.21878") + mean = float("1.01465") + std = float("0.050849") + data = None + + +class Program_weight_tensor_parameter_97: + name = "parameter_97" + shape = [192] + dtype = "float32" + min_val = float("0.00813457") + max_val = float("0.0698677") + mean = float("0.0225257") + std = float("0.0112607") + data = None + + +class Program_weight_tensor_parameter_98: + name = "parameter_98" + shape = [192] + dtype = "float32" + min_val = float("-0.123249") + max_val = float("0.0530247") + mean = float("-0.0265596") + std = float("0.0349301") + data = None + + +class Program_weight_tensor_parameter_99: + name = "parameter_99" + shape = [192, 192, 3, 3] + dtype = "float32" + min_val = float("-0.0427988") + max_val = float("0.0615636") + mean = float("-0.000111453") + std = float("0.00299174") + data = None + + +class Program_weight_tensor_parameter_100: + name = "parameter_100" + shape = [192] + dtype = "float32" + min_val = float("-0.229476") + max_val = float("-0.00962432") + mean = float("-0.0831852") + std = float("0.0422479") + data = None + + +class Program_weight_tensor_parameter_101: + name = "parameter_101" + shape = [192] + dtype = "float32" + min_val = float("0.900428") + max_val = float("1.02666") + mean = float("0.975123") + std = float("0.0229582") + data = None + + +class Program_weight_tensor_parameter_102: + name = "parameter_102" + shape = [192] + dtype = "float32" + min_val = float("0.00166927") + max_val = float("0.0149214") + mean = float("0.00548105") + std = float("0.00187349") + data = None + + +class Program_weight_tensor_parameter_103: + name = "parameter_103" + shape = [192] + dtype = "float32" + min_val = float("-0.0393056") + max_val = float("0.0471334") + mean = float("-0.0107647") + std = float("0.0172946") + data = None + + +class Program_weight_tensor_parameter_104: + name = "parameter_104" + shape = [192, 192, 1, 1] + dtype = "float32" + min_val = float("-0.0436514") + max_val = float("0.0635205") + mean = float("-0.000488458") + std = float("0.00437095") + data = None + + +class Program_weight_tensor_parameter_105: + name = "parameter_105" + shape = [192] + dtype = "float32" + min_val = float("-0.229476") + max_val = float("-0.00962433") + mean = float("-0.0831852") + std = float("0.0422479") + data = None + + +class Program_weight_tensor_parameter_106: + name = "parameter_106" + shape = [192] + dtype = "float32" + min_val = float("0.947654") + max_val = float("1.11111") + mean = float("1.02112") + std = float("0.0306157") + data = None + + +class Program_weight_tensor_parameter_107: + name = "parameter_107" + shape = [192] + dtype = "float32" + min_val = float("0.00719786") + max_val = float("0.0556434") + mean = float("0.0161388") + std = float("0.00787651") + data = None + + +class Program_weight_tensor_parameter_108: + name = "parameter_108" + shape = [192] + dtype = "float32" + min_val = float("-0.131118") + max_val = float("0.0600064") + mean = float("-0.0237574") + std = float("0.0337413") + data = None + + +class Program_weight_tensor_parameter_109: + name = "parameter_109" + shape = [192, 192, 3, 3] + dtype = "float32" + min_val = float("-0.0485053") + max_val = float("0.0562451") + mean = float("-9.74267e-05") + std = float("0.00278606") + data = None + + +class Program_weight_tensor_parameter_110: + name = "parameter_110" + shape = [192] + dtype = "float32" + min_val = float("-0.234305") + max_val = float("0.0813681") + mean = float("-0.0947175") + std = float("0.0463051") + data = None + + +class Program_weight_tensor_parameter_111: + name = "parameter_111" + shape = [192] + dtype = "float32" + min_val = float("0.886145") + max_val = float("1.20472") + mean = float("1.01666") + std = float("0.0540248") + data = None + + +class Program_weight_tensor_parameter_112: + name = "parameter_112" + shape = [192] + dtype = "float32" + min_val = float("0.00868237") + max_val = float("0.0982557") + mean = float("0.0208977") + std = float("0.0125654") + data = None + + +class Program_weight_tensor_parameter_113: + name = "parameter_113" + shape = [192] + dtype = "float32" + min_val = float("-0.181299") + max_val = float("0.0965931") + mean = float("-0.0401902") + std = float("0.0436891") + data = None + + +class Program_weight_tensor_parameter_114: + name = "parameter_114" + shape = [192, 192, 3, 3] + dtype = "float32" + min_val = float("-0.0410138") + max_val = float("0.0751959") + mean = float("-0.000134497") + std = float("0.0032483") + data = None + + +class Program_weight_tensor_parameter_115: + name = "parameter_115" + shape = [192] + dtype = "float32" + min_val = float("-0.199948") + max_val = float("0.0153483") + mean = float("-0.0662884") + std = float("0.031178") + data = None + + +class Program_weight_tensor_parameter_116: + name = "parameter_116" + shape = [192] + dtype = "float32" + min_val = float("0.925493") + max_val = float("1.15259") + mean = float("1.01328") + std = float("0.0383643") + data = None + + +class Program_weight_tensor_parameter_117: + name = "parameter_117" + shape = [192] + dtype = "float32" + min_val = float("0.00445121") + max_val = float("0.0245699") + mean = float("0.00852168") + std = float("0.00311132") + data = None + + +class Program_weight_tensor_parameter_118: + name = "parameter_118" + shape = [192] + dtype = "float32" + min_val = float("-0.0890928") + max_val = float("0.122453") + mean = float("-0.0225451") + std = float("0.0292516") + data = None + + +class Program_weight_tensor_parameter_119: + name = "parameter_119" + shape = [192, 576, 1, 1] + dtype = "float32" + min_val = float("-0.0628757") + max_val = float("0.0645973") + mean = float("-0.000195493") + std = float("0.00467829") + data = None + + +class Program_weight_tensor_parameter_120: + name = "parameter_120" + shape = [192] + dtype = "float32" + min_val = float("-0.099963") + max_val = float("0.0374111") + mean = float("-0.0139724") + std = float("0.0203964") + data = None + + +class Program_weight_tensor_parameter_121: + name = "parameter_121" + shape = [192] + dtype = "float32" + min_val = float("0.923856") + max_val = float("1.19918") + mean = float("1.00277") + std = float("0.025885") + data = None + + +class Program_weight_tensor_parameter_122: + name = "parameter_122" + shape = [192] + dtype = "float32" + min_val = float("0.00335112") + max_val = float("0.0376873") + mean = float("0.00846023") + std = float("0.00424043") + data = None + + +class Program_weight_tensor_parameter_123: + name = "parameter_123" + shape = [192] + dtype = "float32" + min_val = float("-0.0728879") + max_val = float("0.0457376") + mean = float("-0.0169366") + std = float("0.0214102") + data = None + + +class Program_weight_tensor_parameter_124: + name = "parameter_124" + shape = [192, 576, 1, 1] + dtype = "float32" + min_val = float("-0.0557051") + max_val = float("0.0726594") + mean = float("-0.000148829") + std = float("0.00416084") + data = None + + +class Program_weight_tensor_parameter_125: + name = "parameter_125" + shape = [192] + dtype = "float32" + min_val = float("-0.15908") + max_val = float("-0.000555524") + mean = float("-0.038944") + std = float("0.0217257") + data = None + + +class Program_weight_tensor_parameter_126: + name = "parameter_126" + shape = [192] + dtype = "float32" + min_val = float("0.921159") + max_val = float("1.24866") + mean = float("1.00725") + std = float("0.0301467") + data = None + + +class Program_weight_tensor_parameter_127: + name = "parameter_127" + shape = [192] + dtype = "float32" + min_val = float("0.00450918") + max_val = float("0.0608082") + mean = float("0.0161327") + std = float("0.00840885") + data = None + + +class Program_weight_tensor_parameter_128: + name = "parameter_128" + shape = [192] + dtype = "float32" + min_val = float("-0.400402") + max_val = float("0.338842") + mean = float("-0.0361684") + std = float("0.0966519") + data = None + + +class Program_weight_tensor_parameter_129: + name = "parameter_129" + shape = [192, 192, 3, 3] + dtype = "float32" + min_val = float("-0.0350247") + max_val = float("0.0471653") + mean = float("-3.44506e-05") + std = float("0.00253963") + data = None + + +class Program_weight_tensor_parameter_130: + name = "parameter_130" + shape = [192] + dtype = "float32" + min_val = float("-0.552249") + max_val = float("1.14732") + mean = float("0.355898") + std = float("0.346059") + data = None + + +class Program_weight_tensor_parameter_131: + name = "parameter_131" + shape = [192] + dtype = "float32" + min_val = float("0.541478") + max_val = float("1.57746") + mean = float("1.15098") + std = float("0.184373") + data = None + + +class Program_weight_tensor_parameter_132: + name = "parameter_132" + shape = [192] + dtype = "float32" + min_val = float("0.00575627") + max_val = float("0.117619") + mean = float("0.0298341") + std = float("0.017588") + data = None + + +class Program_weight_tensor_parameter_133: + name = "parameter_133" + shape = [192] + dtype = "float32" + min_val = float("-0.182786") + max_val = float("0.205671") + mean = float("-0.0504974") + std = float("0.0491969") + data = None + + +class Program_weight_tensor_parameter_134: + name = "parameter_134" + shape = [192, 192, 1, 1] + dtype = "float32" + min_val = float("-0.14008") + max_val = float("0.117816") + mean = float("-0.00105786") + std = float("0.0117759") + data = None + + +class Program_weight_tensor_parameter_135: + name = "parameter_135" + shape = [96] + dtype = "float32" + min_val = float("-0.457965") + max_val = float("0.231213") + mean = float("-0.0094414") + std = float("0.144606") + data = None + + +class Program_weight_tensor_parameter_136: + name = "parameter_136" + shape = [96] + dtype = "float32" + min_val = float("0.762871") + max_val = float("1.23462") + mean = float("0.948542") + std = float("0.0712293") + data = None + + +class Program_weight_tensor_parameter_137: + name = "parameter_137" + shape = [96] + dtype = "float32" + min_val = float("0.00291973") + max_val = float("0.0418611") + mean = float("0.0123709") + std = float("0.00820581") + data = None + + +class Program_weight_tensor_parameter_138: + name = "parameter_138" + shape = [96] + dtype = "float32" + min_val = float("-0.0589327") + max_val = float("0.0912759") + mean = float("-0.0136043") + std = float("0.0242705") + data = None + + +class Program_weight_tensor_parameter_139: + name = "parameter_139" + shape = [96, 96, 1, 1] + dtype = "float32" + min_val = float("-0.0753795") + max_val = float("0.0571307") + mean = float("-0.00127258") + std = float("0.00926422") + data = None + + +class Program_weight_tensor_parameter_140: + name = "parameter_140" + shape = [96] + dtype = "float32" + min_val = float("-0.457965") + max_val = float("0.231213") + mean = float("-0.0094414") + std = float("0.144606") + data = None + + +class Program_weight_tensor_parameter_141: + name = "parameter_141" + shape = [96] + dtype = "float32" + min_val = float("0.505008") + max_val = float("1.2709") + mean = float("1.02954") + std = float("0.096255") + data = None + + +class Program_weight_tensor_parameter_142: + name = "parameter_142" + shape = [96] + dtype = "float32" + min_val = float("0.00864928") + max_val = float("0.0821514") + mean = float("0.0294604") + std = float("0.0153982") + data = None + + +class Program_weight_tensor_parameter_143: + name = "parameter_143" + shape = [96] + dtype = "float32" + min_val = float("-0.235962") + max_val = float("0.133407") + mean = float("-0.023131") + std = float("0.0603207") + data = None + + +class Program_weight_tensor_parameter_144: + name = "parameter_144" + shape = [96, 96, 3, 3] + dtype = "float32" + min_val = float("-0.0934278") + max_val = float("0.0953093") + mean = float("-0.000117742") + std = float("0.00631181") + data = None + + +class Program_weight_tensor_parameter_145: + name = "parameter_145" + shape = [96] + dtype = "float32" + min_val = float("-0.703686") + max_val = float("0.495423") + mean = float("-0.112778") + std = float("0.198105") + data = None + + +class Program_weight_tensor_parameter_146: + name = "parameter_146" + shape = [96] + dtype = "float32" + min_val = float("0.723215") + max_val = float("1.7117") + mean = float("0.995187") + std = float("0.133891") + data = None + + +class Program_weight_tensor_parameter_147: + name = "parameter_147" + shape = [96] + dtype = "float32" + min_val = float("0.01304") + max_val = float("0.194962") + mean = float("0.0419874") + std = float("0.0313694") + data = None + + +class Program_weight_tensor_parameter_148: + name = "parameter_148" + shape = [96] + dtype = "float32" + min_val = float("-0.214376") + max_val = float("0.134518") + mean = float("-0.0301872") + std = float("0.0628285") + data = None + + +class Program_weight_tensor_parameter_149: + name = "parameter_149" + shape = [96, 96, 3, 3] + dtype = "float32" + min_val = float("-0.0919381") + max_val = float("0.0707172") + mean = float("-0.000470717") + std = float("0.00699141") + data = None + + +class Program_weight_tensor_parameter_150: + name = "parameter_150" + shape = [96] + dtype = "float32" + min_val = float("-0.36415") + max_val = float("0.190267") + mean = float("-0.138622") + std = float("0.0960162") + data = None + + +class Program_weight_tensor_parameter_151: + name = "parameter_151" + shape = [96] + dtype = "float32" + min_val = float("0.626999") + max_val = float("1.01953") + mean = float("0.906483") + std = float("0.05556") + data = None + + +class Program_weight_tensor_parameter_152: + name = "parameter_152" + shape = [96] + dtype = "float32" + min_val = float("0.0031041") + max_val = float("0.0221224") + mean = float("0.0106635") + std = float("0.00413137") + data = None + + +class Program_weight_tensor_parameter_153: + name = "parameter_153" + shape = [96] + dtype = "float32" + min_val = float("-0.0660527") + max_val = float("0.0390983") + mean = float("-0.0088487") + std = float("0.0169076") + data = None + + +class Program_weight_tensor_parameter_154: + name = "parameter_154" + shape = [96, 96, 1, 1] + dtype = "float32" + min_val = float("-0.0710399") + max_val = float("0.0593543") + mean = float("-0.00106871") + std = float("0.00947732") + data = None + + +class Program_weight_tensor_parameter_155: + name = "parameter_155" + shape = [96] + dtype = "float32" + min_val = float("-0.36415") + max_val = float("0.190267") + mean = float("-0.138622") + std = float("0.0960162") + data = None + + +class Program_weight_tensor_parameter_156: + name = "parameter_156" + shape = [96] + dtype = "float32" + min_val = float("0.811164") + max_val = float("1.15777") + mean = float("1.02225") + std = float("0.0605937") + data = None + + +class Program_weight_tensor_parameter_157: + name = "parameter_157" + shape = [96] + dtype = "float32" + min_val = float("0.0111709") + max_val = float("0.105695") + mean = float("0.0280629") + std = float("0.0196981") + data = None + + +class Program_weight_tensor_parameter_158: + name = "parameter_158" + shape = [96] + dtype = "float32" + min_val = float("-0.164873") + max_val = float("0.0372368") + mean = float("-0.0373236") + std = float("0.0335712") + data = None + + +class Program_weight_tensor_parameter_159: + name = "parameter_159" + shape = [96, 96, 3, 3] + dtype = "float32" + min_val = float("-0.0811913") + max_val = float("0.0769005") + mean = float("-0.000466698") + std = float("0.00651756") + data = None + + +class Program_weight_tensor_parameter_160: + name = "parameter_160" + shape = [96] + dtype = "float32" + min_val = float("-0.486488") + max_val = float("0.169402") + mean = float("-0.166991") + std = float("0.131221") + data = None + + +class Program_weight_tensor_parameter_161: + name = "parameter_161" + shape = [96] + dtype = "float32" + min_val = float("0.777448") + max_val = float("1.29252") + mean = float("0.963023") + std = float("0.0981105") + data = None + + +class Program_weight_tensor_parameter_162: + name = "parameter_162" + shape = [96] + dtype = "float32" + min_val = float("0.00992224") + max_val = float("0.105847") + mean = float("0.0243501") + std = float("0.0137999") + data = None + + +class Program_weight_tensor_parameter_163: + name = "parameter_163" + shape = [96] + dtype = "float32" + min_val = float("-0.154927") + max_val = float("0.0659024") + mean = float("0.00872401") + std = float("0.0386172") + data = None + + +class Program_weight_tensor_parameter_164: + name = "parameter_164" + shape = [96, 96, 3, 3] + dtype = "float32" + min_val = float("-0.0993817") + max_val = float("0.0757508") + mean = float("-0.000423259") + std = float("0.00766977") + data = None + + +class Program_weight_tensor_parameter_165: + name = "parameter_165" + shape = [96] + dtype = "float32" + min_val = float("-0.489705") + max_val = float("0.0651658") + mean = float("-0.168146") + std = float("0.114783") + data = None + + +class Program_weight_tensor_parameter_166: + name = "parameter_166" + shape = [96] + dtype = "float32" + min_val = float("0.722939") + max_val = float("1.0022") + mean = float("0.918838") + std = float("0.0531756") + data = None + + +class Program_weight_tensor_parameter_167: + name = "parameter_167" + shape = [96] + dtype = "float32" + min_val = float("0.00775018") + max_val = float("0.036921") + mean = float("0.0160984") + std = float("0.0056277") + data = None + + +class Program_weight_tensor_parameter_168: + name = "parameter_168" + shape = [96] + dtype = "float32" + min_val = float("-0.0570203") + max_val = float("0.0404215") + mean = float("-0.019887") + std = float("0.0190603") + data = None + + +class Program_weight_tensor_parameter_169: + name = "parameter_169" + shape = [96, 96, 1, 1] + dtype = "float32" + min_val = float("-0.103947") + max_val = float("0.0646105") + mean = float("-0.00221249") + std = float("0.0110162") + data = None + + +class Program_weight_tensor_parameter_170: + name = "parameter_170" + shape = [96] + dtype = "float32" + min_val = float("-0.489705") + max_val = float("0.0651658") + mean = float("-0.168146") + std = float("0.114783") + data = None + + +class Program_weight_tensor_parameter_171: + name = "parameter_171" + shape = [96] + dtype = "float32" + min_val = float("0.766535") + max_val = float("1.15353") + mean = float("0.982409") + std = float("0.0579775") + data = None + + +class Program_weight_tensor_parameter_172: + name = "parameter_172" + shape = [96] + dtype = "float32" + min_val = float("0.0171361") + max_val = float("0.213573") + mean = float("0.0436358") + std = float("0.0309431") + data = None + + +class Program_weight_tensor_parameter_173: + name = "parameter_173" + shape = [96] + dtype = "float32" + min_val = float("-0.199015") + max_val = float("0.0871906") + mean = float("-0.0157018") + std = float("0.0414583") + data = None + + +class Program_weight_tensor_parameter_174: + name = "parameter_174" + shape = [96, 96, 3, 3] + dtype = "float32" + min_val = float("-0.099267") + max_val = float("0.0973901") + mean = float("-0.000248761") + std = float("0.00741391") + data = None + + +class Program_weight_tensor_parameter_175: + name = "parameter_175" + shape = [96] + dtype = "float32" + min_val = float("-0.564609") + max_val = float("0.347562") + mean = float("-0.179116") + std = float("0.173215") + data = None + + +class Program_weight_tensor_parameter_176: + name = "parameter_176" + shape = [96] + dtype = "float32" + min_val = float("0.764463") + max_val = float("1.33669") + mean = float("0.954532") + std = float("0.110883") + data = None + + +class Program_weight_tensor_parameter_177: + name = "parameter_177" + shape = [96] + dtype = "float32" + min_val = float("0.0148204") + max_val = float("0.110617") + mean = float("0.0314773") + std = float("0.0184817") + data = None + + +class Program_weight_tensor_parameter_178: + name = "parameter_178" + shape = [96] + dtype = "float32" + min_val = float("-0.172672") + max_val = float("0.269786") + mean = float("-0.0212967") + std = float("0.0939031") + data = None + + +class Program_weight_tensor_parameter_179: + name = "parameter_179" + shape = [96, 96, 3, 3] + dtype = "float32" + min_val = float("-0.142387") + max_val = float("0.117261") + mean = float("-0.000229489") + std = float("0.00873001") + data = None + + +class Program_weight_tensor_parameter_180: + name = "parameter_180" + shape = [96] + dtype = "float32" + min_val = float("-0.625413") + max_val = float("0.597772") + mean = float("-0.0821868") + std = float("0.254375") + data = None + + +class Program_weight_tensor_parameter_181: + name = "parameter_181" + shape = [96] + dtype = "float32" + min_val = float("0.647481") + max_val = float("1.22746") + mean = float("0.866594") + std = float("0.1146") + data = None + + +class Program_weight_tensor_parameter_182: + name = "parameter_182" + shape = [96] + dtype = "float32" + min_val = float("0.0117598") + max_val = float("0.0756791") + mean = float("0.0256735") + std = float("0.011465") + data = None + + +class Program_weight_tensor_parameter_183: + name = "parameter_183" + shape = [96] + dtype = "float32" + min_val = float("-0.116141") + max_val = float("0.0942395") + mean = float("-0.0106851") + std = float("0.0412708") + data = None + + +class Program_weight_tensor_parameter_184: + name = "parameter_184" + shape = [96, 448, 1, 1] + dtype = "float32" + min_val = float("-0.149031") + max_val = float("0.14906") + mean = float("-0.000519018") + std = float("0.0115778") + data = None + + +class Program_weight_tensor_parameter_185: + name = "parameter_185" + shape = [96] + dtype = "float32" + min_val = float("-0.0986348") + max_val = float("0.227765") + mean = float("0.0619239") + std = float("0.054569") + data = None + + +class Program_weight_tensor_parameter_186: + name = "parameter_186" + shape = [96] + dtype = "float32" + min_val = float("0.703927") + max_val = float("1.12526") + mean = float("0.932492") + std = float("0.0634651") + data = None + + +class Program_weight_tensor_parameter_187: + name = "parameter_187" + shape = [96] + dtype = "float32" + min_val = float("0.00537723") + max_val = float("0.0583092") + mean = float("0.0120102") + std = float("0.00693174") + data = None + + +class Program_weight_tensor_parameter_188: + name = "parameter_188" + shape = [96] + dtype = "float32" + min_val = float("-0.0902684") + max_val = float("0.166922") + mean = float("-0.0175149") + std = float("0.0398964") + data = None + + +class Program_weight_tensor_parameter_189: + name = "parameter_189" + shape = [96, 448, 1, 1] + dtype = "float32" + min_val = float("-0.0952113") + max_val = float("0.110912") + mean = float("-0.000272286") + std = float("0.00775169") + data = None + + +class Program_weight_tensor_parameter_190: + name = "parameter_190" + shape = [192] + dtype = "float32" + min_val = float("-0.295368") + max_val = float("0.199876") + mean = float("-0.065903") + std = float("0.0695814") + data = None + + +class Program_weight_tensor_parameter_191: + name = "parameter_191" + shape = [192] + dtype = "float32" + min_val = float("0.670697") + max_val = float("1.45276") + mean = float("0.885134") + std = float("0.0783824") + data = None + + +class Program_weight_tensor_parameter_192: + name = "parameter_192" + shape = [192] + dtype = "float32" + min_val = float("0.00790397") + max_val = float("0.127787") + mean = float("0.0225856") + std = float("0.0123405") + data = None + + +class Program_weight_tensor_parameter_193: + name = "parameter_193" + shape = [192] + dtype = "float32" + min_val = float("-0.148651") + max_val = float("0.0483928") + mean = float("-0.0369365") + std = float("0.0357253") + data = None + + +class Program_weight_tensor_parameter_194: + name = "parameter_194" + shape = [192, 384, 1, 1] + dtype = "float32" + min_val = float("-0.0959148") + max_val = float("0.117327") + mean = float("-0.000597284") + std = float("0.00788358") + data = None + + +class Program_weight_tensor_parameter_195: + name = "parameter_195" + shape = [384] + dtype = "float32" + min_val = float("-0.201782") + max_val = float("0.241811") + mean = float("-0.0670364") + std = float("0.0416536") + data = None + + +class Program_weight_tensor_parameter_196: + name = "parameter_196" + shape = [384] + dtype = "float32" + min_val = float("0.87318") + max_val = float("1.54065") + mean = float("1.01926") + std = float("0.0632841") + data = None + + +class Program_weight_tensor_parameter_197: + name = "parameter_197" + shape = [384] + dtype = "float32" + min_val = float("0.00737718") + max_val = float("0.076491") + mean = float("0.0155636") + std = float("0.00762575") + data = None + + +class Program_weight_tensor_parameter_198: + name = "parameter_198" + shape = [384] + dtype = "float32" + min_val = float("-0.339572") + max_val = float("0.121455") + mean = float("-0.0580769") + std = float("0.0477813") + data = None + + +class Program_weight_tensor_parameter_199: + name = "parameter_199" + shape = [384, 384, 1, 1] + dtype = "float32" + min_val = float("-0.104641") + max_val = float("0.10408") + mean = float("-0.000725338") + std = float("0.00722264") + data = None + + +class Program_weight_tensor_parameter_200: + name = "parameter_200" + shape = [192] + dtype = "float32" + min_val = float("-0.176949") + max_val = float("0.00590601") + mean = float("-0.0653774") + std = float("0.0325609") + data = None + + +class Program_weight_tensor_parameter_201: + name = "parameter_201" + shape = [192] + dtype = "float32" + min_val = float("0.884903") + max_val = float("0.991186") + mean = float("0.949253") + std = float("0.016433") + data = None + + +class Program_weight_tensor_parameter_202: + name = "parameter_202" + shape = [192] + dtype = "float32" + min_val = float("0.00353105") + max_val = float("0.0231426") + mean = float("0.0095938") + std = float("0.0033673") + data = None + + +class Program_weight_tensor_parameter_203: + name = "parameter_203" + shape = [192] + dtype = "float32" + min_val = float("-0.0792437") + max_val = float("0.0708871") + mean = float("-0.0240779") + std = float("0.0316357") + data = None + + +class Program_weight_tensor_parameter_204: + name = "parameter_204" + shape = [192, 192, 1, 1] + dtype = "float32" + min_val = float("-0.0569077") + max_val = float("0.0369732") + mean = float("-0.000733357") + std = float("0.00540254") + data = None + + +class Program_weight_tensor_parameter_205: + name = "parameter_205" + shape = [192] + dtype = "float32" + min_val = float("-0.176949") + max_val = float("0.00590601") + mean = float("-0.0653774") + std = float("0.0325609") + data = None + + +class Program_weight_tensor_parameter_206: + name = "parameter_206" + shape = [192] + dtype = "float32" + min_val = float("0.945936") + max_val = float("1.03267") + mean = float("0.988143") + std = float("0.0166204") + data = None + + +class Program_weight_tensor_parameter_207: + name = "parameter_207" + shape = [192] + dtype = "float32" + min_val = float("0.0150858") + max_val = float("0.0802088") + mean = float("0.0328899") + std = float("0.0117288") + data = None + + +class Program_weight_tensor_parameter_208: + name = "parameter_208" + shape = [192] + dtype = "float32" + min_val = float("-0.177971") + max_val = float("0.153985") + mean = float("-0.0232557") + std = float("0.0607795") + data = None + + +class Program_weight_tensor_parameter_209: + name = "parameter_209" + shape = [192, 192, 3, 3] + dtype = "float32" + min_val = float("-0.0444125") + max_val = float("0.0760331") + mean = float("-7.02503e-05") + std = float("0.00300584") + data = None + + +class Program_weight_tensor_parameter_210: + name = "parameter_210" + shape = [192] + dtype = "float32" + min_val = float("-0.217095") + max_val = float("-0.00148132") + mean = float("-0.0741376") + std = float("0.0354109") + data = None + + +class Program_weight_tensor_parameter_211: + name = "parameter_211" + shape = [192] + dtype = "float32" + min_val = float("0.939032") + max_val = float("1.15417") + mean = float("1.02943") + std = float("0.0431659") + data = None + + +class Program_weight_tensor_parameter_212: + name = "parameter_212" + shape = [192] + dtype = "float32" + min_val = float("0.0360784") + max_val = float("0.225914") + mean = float("0.0635349") + std = float("0.0203909") + data = None + + +class Program_weight_tensor_parameter_213: + name = "parameter_213" + shape = [192] + dtype = "float32" + min_val = float("-0.264891") + max_val = float("0.307619") + mean = float("-0.0428129") + std = float("0.0726088") + data = None + + +class Program_weight_tensor_parameter_214: + name = "parameter_214" + shape = [192, 192, 3, 3] + dtype = "float32" + min_val = float("-0.0622024") + max_val = float("0.0626876") + mean = float("-0.000102155") + std = float("0.00367047") + data = None + + +class Program_weight_tensor_parameter_215: + name = "parameter_215" + shape = [192] + dtype = "float32" + min_val = float("-0.196618") + max_val = float("-0.00995733") + mean = float("-0.071187") + std = float("0.0319798") + data = None + + +class Program_weight_tensor_parameter_216: + name = "parameter_216" + shape = [192] + dtype = "float32" + min_val = float("0.94411") + max_val = float("1.04693") + mean = float("0.987726") + std = float("0.0137706") + data = None + + +class Program_weight_tensor_parameter_217: + name = "parameter_217" + shape = [192] + dtype = "float32" + min_val = float("0.00227428") + max_val = float("0.00943763") + mean = float("0.00475154") + std = float("0.00122457") + data = None + + +class Program_weight_tensor_parameter_218: + name = "parameter_218" + shape = [192] + dtype = "float32" + min_val = float("-0.0961104") + max_val = float("0.0392249") + mean = float("-0.0252723") + std = float("0.0210983") + data = None + + +class Program_weight_tensor_parameter_219: + name = "parameter_219" + shape = [192, 192, 1, 1] + dtype = "float32" + min_val = float("-0.0313925") + max_val = float("0.0416139") + mean = float("-0.000809328") + std = float("0.00570058") + data = None + + +class Program_weight_tensor_parameter_220: + name = "parameter_220" + shape = [192] + dtype = "float32" + min_val = float("-0.196618") + max_val = float("-0.00995733") + mean = float("-0.071187") + std = float("0.0319798") + data = None + + +class Program_weight_tensor_parameter_221: + name = "parameter_221" + shape = [192] + dtype = "float32" + min_val = float("0.953711") + max_val = float("1.11463") + mean = float("1.00472") + std = float("0.0265116") + data = None + + +class Program_weight_tensor_parameter_222: + name = "parameter_222" + shape = [192] + dtype = "float32" + min_val = float("0.010102") + max_val = float("0.0479077") + mean = float("0.0179937") + std = float("0.00541134") + data = None + + +class Program_weight_tensor_parameter_223: + name = "parameter_223" + shape = [192] + dtype = "float32" + min_val = float("-0.189335") + max_val = float("0.144594") + mean = float("-0.0477661") + std = float("0.0469458") + data = None + + +class Program_weight_tensor_parameter_224: + name = "parameter_224" + shape = [192, 192, 3, 3] + dtype = "float32" + min_val = float("-0.0484375") + max_val = float("0.0812037") + mean = float("-0.000164179") + std = float("0.00306328") + data = None + + +class Program_weight_tensor_parameter_225: + name = "parameter_225" + shape = [192] + dtype = "float32" + min_val = float("-0.232846") + max_val = float("-0.0185216") + mean = float("-0.0943343") + std = float("0.040046") + data = None + + +class Program_weight_tensor_parameter_226: + name = "parameter_226" + shape = [192] + dtype = "float32" + min_val = float("0.946521") + max_val = float("1.19181") + mean = float("1.02411") + std = float("0.0460177") + data = None + + +class Program_weight_tensor_parameter_227: + name = "parameter_227" + shape = [192] + dtype = "float32" + min_val = float("0.0359278") + max_val = float("0.139671") + mean = float("0.0648104") + std = float("0.0197637") + data = None + + +class Program_weight_tensor_parameter_228: + name = "parameter_228" + shape = [192] + dtype = "float32" + min_val = float("-0.353487") + max_val = float("0.265509") + mean = float("-0.0870985") + std = float("0.100017") + data = None + + +class Program_weight_tensor_parameter_229: + name = "parameter_229" + shape = [192, 192, 3, 3] + dtype = "float32" + min_val = float("-0.0611518") + max_val = float("0.0870387") + mean = float("-0.000165144") + std = float("0.00384626") + data = None + + +class Program_weight_tensor_parameter_230: + name = "parameter_230" + shape = [192] + dtype = "float32" + min_val = float("-0.154886") + max_val = float("0.00333786") + mean = float("-0.0685634") + std = float("0.0234192") + data = None + + +class Program_weight_tensor_parameter_231: + name = "parameter_231" + shape = [192] + dtype = "float32" + min_val = float("0.932342") + max_val = float("1.07188") + mean = float("0.998569") + std = float("0.0218607") + data = None + + +class Program_weight_tensor_parameter_232: + name = "parameter_232" + shape = [192] + dtype = "float32" + min_val = float("0.00201715") + max_val = float("0.00936504") + mean = float("0.00403713") + std = float("0.00113824") + data = None + + +class Program_weight_tensor_parameter_233: + name = "parameter_233" + shape = [192] + dtype = "float32" + min_val = float("-0.0829276") + max_val = float("0.0996937") + mean = float("-0.0125365") + std = float("0.0205666") + data = None + + +class Program_weight_tensor_parameter_234: + name = "parameter_234" + shape = [192, 192, 1, 1] + dtype = "float32" + min_val = float("-0.0348639") + max_val = float("0.0478114") + mean = float("-0.000426039") + std = float("0.00642907") + data = None + + +class Program_weight_tensor_parameter_235: + name = "parameter_235" + shape = [192] + dtype = "float32" + min_val = float("-0.154886") + max_val = float("0.00333784") + mean = float("-0.0685634") + std = float("0.0234192") + data = None + + +class Program_weight_tensor_parameter_236: + name = "parameter_236" + shape = [192] + dtype = "float32" + min_val = float("0.936173") + max_val = float("1.11491") + mean = float("0.992553") + std = float("0.0259462") + data = None + + +class Program_weight_tensor_parameter_237: + name = "parameter_237" + shape = [192] + dtype = "float32" + min_val = float("0.00897904") + max_val = float("0.0431413") + mean = float("0.0183886") + std = float("0.00560404") + data = None + + +class Program_weight_tensor_parameter_238: + name = "parameter_238" + shape = [192] + dtype = "float32" + min_val = float("-0.280815") + max_val = float("0.147007") + mean = float("-0.0420801") + std = float("0.0463274") + data = None + + +class Program_weight_tensor_parameter_239: + name = "parameter_239" + shape = [192, 192, 3, 3] + dtype = "float32" + min_val = float("-0.0372381") + max_val = float("0.0656086") + mean = float("-0.000164115") + std = float("0.00303882") + data = None + + +class Program_weight_tensor_parameter_240: + name = "parameter_240" + shape = [192] + dtype = "float32" + min_val = float("-0.289029") + max_val = float("0.0181024") + mean = float("-0.109759") + std = float("0.0400942") + data = None + + +class Program_weight_tensor_parameter_241: + name = "parameter_241" + shape = [192] + dtype = "float32" + min_val = float("0.943873") + max_val = float("1.25886") + mean = float("1.02651") + std = float("0.0418277") + data = None + + +class Program_weight_tensor_parameter_242: + name = "parameter_242" + shape = [192] + dtype = "float32" + min_val = float("0.0146575") + max_val = float("0.0648707") + mean = float("0.0286275") + std = float("0.00896829") + data = None + + +class Program_weight_tensor_parameter_243: + name = "parameter_243" + shape = [192] + dtype = "float32" + min_val = float("-0.381573") + max_val = float("0.107524") + mean = float("-0.0547241") + std = float("0.0618592") + data = None + + +class Program_weight_tensor_parameter_244: + name = "parameter_244" + shape = [192, 192, 3, 3] + dtype = "float32" + min_val = float("-0.0566325") + max_val = float("0.0721215") + mean = float("-0.000213243") + std = float("0.00432258") + data = None + + +class Program_weight_tensor_parameter_245: + name = "parameter_245" + shape = [192] + dtype = "float32" + min_val = float("-0.257034") + max_val = float("-0.0134244") + mean = float("-0.121787") + std = float("0.0441916") + data = None + + +class Program_weight_tensor_parameter_246: + name = "parameter_246" + shape = [192] + dtype = "float32" + min_val = float("0.916942") + max_val = float("1.13523") + mean = float("1.02431") + std = float("0.042227") + data = None + + +class Program_weight_tensor_parameter_247: + name = "parameter_247" + shape = [192] + dtype = "float32" + min_val = float("0.00558811") + max_val = float("0.0227368") + mean = float("0.0107834") + std = float("0.00302476") + data = None + + +class Program_weight_tensor_parameter_248: + name = "parameter_248" + shape = [192] + dtype = "float32" + min_val = float("-0.120986") + max_val = float("0.105939") + mean = float("0.0157535") + std = float("0.0292084") + data = None + + +class Program_weight_tensor_parameter_249: + name = "parameter_249" + shape = [192, 896, 1, 1] + dtype = "float32" + min_val = float("-0.0812543") + max_val = float("0.103822") + mean = float("-0.000190185") + std = float("0.00606084") + data = None + + +class Program_weight_tensor_parameter_250: + name = "parameter_250" + shape = [192] + dtype = "float32" + min_val = float("-0.176609") + max_val = float("0.214363") + mean = float("-0.00723539") + std = float("0.0506647") + data = None + + +class Program_weight_tensor_parameter_251: + name = "parameter_251" + shape = [192] + dtype = "float32" + min_val = float("0.951166") + max_val = float("1.2179") + mean = float("1.05549") + std = float("0.0498193") + data = None + + +class Program_weight_tensor_parameter_252: + name = "parameter_252" + shape = [192] + dtype = "float32" + min_val = float("0.0068407") + max_val = float("0.0571011") + mean = float("0.0142387") + std = float("0.00513597") + data = None + + +class Program_weight_tensor_parameter_253: + name = "parameter_253" + shape = [192] + dtype = "float32" + min_val = float("-0.076614") + max_val = float("0.0818422") + mean = float("-0.000474385") + std = float("0.0276303") + data = None + + +class Program_weight_tensor_parameter_254: + name = "parameter_254" + shape = [192, 896, 1, 1] + dtype = "float32" + min_val = float("-0.0552042") + max_val = float("0.102734") + mean = float("-0.000223038") + std = float("0.00619518") + data = None + + +class Program_weight_tensor_parameter_255: + name = "parameter_255" + shape = [384] + dtype = "float32" + min_val = float("-0.249775") + max_val = float("-0.0568627") + mean = float("-0.125062") + std = float("0.0336773") + data = None + + +class Program_weight_tensor_parameter_256: + name = "parameter_256" + shape = [384] + dtype = "float32" + min_val = float("0.814907") + max_val = float("1.01643") + mean = float("0.909518") + std = float("0.0258168") + data = None + + +class Program_weight_tensor_parameter_257: + name = "parameter_257" + shape = [384] + dtype = "float32" + min_val = float("0.00972713") + max_val = float("0.0669189") + mean = float("0.022077") + std = float("0.00889464") + data = None + + +class Program_weight_tensor_parameter_258: + name = "parameter_258" + shape = [384] + dtype = "float32" + min_val = float("-0.146609") + max_val = float("0.11002") + mean = float("-0.0348074") + std = float("0.0384279") + data = None + + +class Program_weight_tensor_parameter_259: + name = "parameter_259" + shape = [384, 768, 1, 1] + dtype = "float32" + min_val = float("-0.0364879") + max_val = float("0.033967") + mean = float("-0.000277781") + std = float("0.00472355") + data = None + + +class Program_weight_tensor_parameter_260: + name = "parameter_260" + shape = [768] + dtype = "float32" + min_val = float("-0.104277") + max_val = float("0.0723922") + mean = float("-0.0568764") + std = float("0.0153315") + data = None + + +class Program_weight_tensor_parameter_261: + name = "parameter_261" + shape = [768] + dtype = "float32" + min_val = float("0.9523") + max_val = float("1.1435") + mean = float("1.02091") + std = float("0.0210274") + data = None + + +class Program_weight_tensor_parameter_262: + name = "parameter_262" + shape = [768] + dtype = "float32" + min_val = float("0.00431744") + max_val = float("0.0370758") + mean = float("0.00970086") + std = float("0.00356025") + data = None + + +class Program_weight_tensor_parameter_263: + name = "parameter_263" + shape = [768] + dtype = "float32" + min_val = float("-0.103952") + max_val = float("0.111506") + mean = float("-0.0349987") + std = float("0.0271815") + data = None + + +class Program_weight_tensor_parameter_264: + name = "parameter_264" + shape = [768, 768, 1, 1] + dtype = "float32" + min_val = float("-0.0581811") + max_val = float("0.113051") + mean = float("-0.000304542") + std = float("0.00402831") + data = None + + +class Program_weight_tensor_parameter_265: + name = "parameter_265" + shape = [384] + dtype = "float32" + min_val = float("-0.158167") + max_val = float("0.0744682") + mean = float("-0.0400513") + std = float("0.0206673") + data = None + + +class Program_weight_tensor_parameter_266: + name = "parameter_266" + shape = [384] + dtype = "float32" + min_val = float("0.888577") + max_val = float("1.07465") + mean = float("0.982117") + std = float("0.0132258") + data = None + + +class Program_weight_tensor_parameter_267: + name = "parameter_267" + shape = [384] + dtype = "float32" + min_val = float("0.005659") + max_val = float("0.0930103") + mean = float("0.0194514") + std = float("0.0092216") + data = None + + +class Program_weight_tensor_parameter_268: + name = "parameter_268" + shape = [384] + dtype = "float32" + min_val = float("-0.0685013") + max_val = float("0.0608002") + mean = float("-0.00592014") + std = float("0.0271757") + data = None + + +class Program_weight_tensor_parameter_269: + name = "parameter_269" + shape = [384, 384, 1, 1] + dtype = "float32" + min_val = float("-0.0396793") + max_val = float("0.0737422") + mean = float("-7.2276e-05") + std = float("0.00350095") + data = None + + +class Program_weight_tensor_parameter_270: + name = "parameter_270" + shape = [384] + dtype = "float32" + min_val = float("-0.158167") + max_val = float("0.0744682") + mean = float("-0.0400513") + std = float("0.0206673") + data = None + + +class Program_weight_tensor_parameter_271: + name = "parameter_271" + shape = [384] + dtype = "float32" + min_val = float("0.879914") + max_val = float("1.07681") + mean = float("0.993922") + std = float("0.0123427") + data = None + + +class Program_weight_tensor_parameter_272: + name = "parameter_272" + shape = [384] + dtype = "float32" + min_val = float("0.0237413") + max_val = float("0.735274") + mean = float("0.139783") + std = float("0.0640231") + data = None + + +class Program_weight_tensor_parameter_273: + name = "parameter_273" + shape = [384] + dtype = "float32" + min_val = float("-0.277276") + max_val = float("0.156692") + mean = float("-0.0840022") + std = float("0.0855229") + data = None + + +class Program_weight_tensor_parameter_274: + name = "parameter_274" + shape = [384, 384, 3, 3] + dtype = "float32" + min_val = float("-0.0424879") + max_val = float("0.0475735") + mean = float("-0.0001269") + std = float("0.00130674") + data = None + + +class Program_weight_tensor_parameter_275: + name = "parameter_275" + shape = [384] + dtype = "float32" + min_val = float("-0.0801146") + max_val = float("0.116771") + mean = float("-0.0189931") + std = float("0.0160256") + data = None + + +class Program_weight_tensor_parameter_276: + name = "parameter_276" + shape = [384] + dtype = "float32" + min_val = float("0.920205") + max_val = float("1.16667") + mean = float("1.01504") + std = float("0.0246966") + data = None + + +class Program_weight_tensor_parameter_277: + name = "parameter_277" + shape = [384] + dtype = "float32" + min_val = float("0.0222636") + max_val = float("0.202839") + mean = float("0.0725926") + std = float("0.0321105") + data = None + + +class Program_weight_tensor_parameter_278: + name = "parameter_278" + shape = [384] + dtype = "float32" + min_val = float("-0.233438") + max_val = float("0.219682") + mean = float("-0.023274") + std = float("0.079013") + data = None + + +class Program_weight_tensor_parameter_279: + name = "parameter_279" + shape = [384, 384, 3, 3] + dtype = "float32" + min_val = float("-0.0274571") + max_val = float("0.0359229") + mean = float("-3.21889e-05") + std = float("0.00171791") + data = None + + +class Program_weight_tensor_parameter_280: + name = "parameter_280" + shape = [384] + dtype = "float32" + min_val = float("-0.0739507") + max_val = float("0.020999") + mean = float("-0.0234999") + std = float("0.0134887") + data = None + + +class Program_weight_tensor_parameter_281: + name = "parameter_281" + shape = [384] + dtype = "float32" + min_val = float("0.946312") + max_val = float("1.16798") + mean = float("1.01467") + std = float("0.0273906") + data = None + + +class Program_weight_tensor_parameter_282: + name = "parameter_282" + shape = [384] + dtype = "float32" + min_val = float("0.0631221") + max_val = float("0.463548") + mean = float("0.173967") + std = float("0.0694937") + data = None + + +class Program_weight_tensor_parameter_283: + name = "parameter_283" + shape = [384] + dtype = "float32" + min_val = float("-1.66842") + max_val = float("1.69585") + mean = float("0.0445105") + std = float("0.596448") + data = None + + +class Program_weight_tensor_parameter_284: + name = "parameter_284" + shape = [384, 1536, 1, 1] + dtype = "float32" + min_val = float("-0.0467314") + max_val = float("0.0575585") + mean = float("8.5535e-05") + std = float("0.0030071") + data = None + + +class Program_weight_tensor_parameter_285: + name = "parameter_285" + shape = [384] + dtype = "float32" + min_val = float("-0.0183803") + max_val = float("0.0258619") + mean = float("-0.00144525") + std = float("0.00680649") + data = None + + +class Program_weight_tensor_parameter_286: + name = "parameter_286" + shape = [384] + dtype = "float32" + min_val = float("0.969538") + max_val = float("1.06054") + mean = float("0.993834") + std = float("0.0122522") + data = None + + +class Program_weight_tensor_parameter_287: + name = "parameter_287" + shape = [384] + dtype = "float32" + min_val = float("0.00292151") + max_val = float("0.0163042") + mean = float("0.00723264") + std = float("0.0025015") + data = None + + +class Program_weight_tensor_parameter_288: + name = "parameter_288" + shape = [384] + dtype = "float32" + min_val = float("-0.0974501") + max_val = float("0.0623071") + mean = float("-0.0422498") + std = float("0.0245183") + data = None + + +class Program_weight_tensor_parameter_289: + name = "parameter_289" + shape = [384, 384, 1, 1] + dtype = "float32" + min_val = float("-0.0333324") + max_val = float("0.0411082") + mean = float("-0.000526783") + std = float("0.00328183") + data = None + + +class Program_weight_tensor_parameter_290: + name = "parameter_290" + shape = [384] + dtype = "float32" + min_val = float("-0.0183803") + max_val = float("0.0258619") + mean = float("-0.00144524") + std = float("0.00680649") + data = None + + +class Program_weight_tensor_parameter_291: + name = "parameter_291" + shape = [384] + dtype = "float32" + min_val = float("0.972046") + max_val = float("1.08568") + mean = float("1.00364") + std = float("0.0181342") + data = None + + +class Program_weight_tensor_parameter_292: + name = "parameter_292" + shape = [384] + dtype = "float32" + min_val = float("0.0170031") + max_val = float("0.141069") + mean = float("0.0433458") + std = float("0.0174643") + data = None + + +class Program_weight_tensor_parameter_293: + name = "parameter_293" + shape = [384] + dtype = "float32" + min_val = float("-0.324472") + max_val = float("0.0923265") + mean = float("-0.132281") + std = float("0.0638993") + data = None + + +class Program_weight_tensor_parameter_294: + name = "parameter_294" + shape = [384, 384, 3, 3] + dtype = "float32" + min_val = float("-0.0285762") + max_val = float("0.0755103") + mean = float("-0.00019148") + std = float("0.0013728") + data = None + + +class Program_weight_tensor_parameter_295: + name = "parameter_295" + shape = [384] + dtype = "float32" + min_val = float("-0.0498104") + max_val = float("0.00884068") + mean = float("-0.00838186") + std = float("0.00779168") + data = None + + +class Program_weight_tensor_parameter_296: + name = "parameter_296" + shape = [384] + dtype = "float32" + min_val = float("0.953878") + max_val = float("1.13497") + mean = float("1.01253") + std = float("0.0201047") + data = None + + +class Program_weight_tensor_parameter_297: + name = "parameter_297" + shape = [384] + dtype = "float32" + min_val = float("0.0734313") + max_val = float("0.420989") + mean = float("0.16627") + std = float("0.0472493") + data = None + + +class Program_weight_tensor_parameter_298: + name = "parameter_298" + shape = [384] + dtype = "float32" + min_val = float("-1.24929") + max_val = float("0.921608") + mean = float("-0.241972") + std = float("0.271836") + data = None + + +class Program_weight_tensor_parameter_299: + name = "parameter_299" + shape = [384, 384, 3, 3] + dtype = "float32" + min_val = float("-0.0242525") + max_val = float("0.0585337") + mean = float("-0.00014178") + std = float("0.00163037") + data = None + + +class Program_weight_tensor_parameter_300: + name = "parameter_300" + shape = [384] + dtype = "float32" + min_val = float("-0.0360838") + max_val = float("0.0137949") + mean = float("-0.00769057") + std = float("0.00789116") + data = None + + +class Program_weight_tensor_parameter_301: + name = "parameter_301" + shape = [384] + dtype = "float32" + min_val = float("0.984179") + max_val = float("1.03462") + mean = float("0.999922") + std = float("0.00715392") + data = None + + +class Program_weight_tensor_parameter_302: + name = "parameter_302" + shape = [384] + dtype = "float32" + min_val = float("0.00218605") + max_val = float("0.0108072") + mean = float("0.00399253") + std = float("0.00118789") + data = None + + +class Program_weight_tensor_parameter_303: + name = "parameter_303" + shape = [384] + dtype = "float32" + min_val = float("-0.0778225") + max_val = float("0.150166") + mean = float("-0.020106") + std = float("0.0256109") + data = None + + +class Program_weight_tensor_parameter_304: + name = "parameter_304" + shape = [384, 384, 1, 1] + dtype = "float32" + min_val = float("-0.020915") + max_val = float("0.0327181") + mean = float("-0.0002642") + std = float("0.00284316") + data = None + + +class Program_weight_tensor_parameter_305: + name = "parameter_305" + shape = [384] + dtype = "float32" + min_val = float("-0.0360838") + max_val = float("0.0137949") + mean = float("-0.00769057") + std = float("0.00789116") + data = None + + +class Program_weight_tensor_parameter_306: + name = "parameter_306" + shape = [384] + dtype = "float32" + min_val = float("0.982136") + max_val = float("1.06749") + mean = float("1.00454") + std = float("0.0126701") + data = None + + +class Program_weight_tensor_parameter_307: + name = "parameter_307" + shape = [384] + dtype = "float32" + min_val = float("0.009878") + max_val = float("0.0736381") + mean = float("0.025173") + std = float("0.0083744") + data = None + + +class Program_weight_tensor_parameter_308: + name = "parameter_308" + shape = [384] + dtype = "float32" + min_val = float("-0.234125") + max_val = float("0.373004") + mean = float("-0.0733105") + std = float("0.0699428") + data = None + + +class Program_weight_tensor_parameter_309: + name = "parameter_309" + shape = [384, 384, 3, 3] + dtype = "float32" + min_val = float("-0.0111228") + max_val = float("0.0376461") + mean = float("-0.000113877") + std = float("0.00115243") + data = None + + +class Program_weight_tensor_parameter_310: + name = "parameter_310" + shape = [384] + dtype = "float32" + min_val = float("-0.0529908") + max_val = float("0.00370578") + mean = float("-0.0207007") + std = float("0.00870238") + data = None + + +class Program_weight_tensor_parameter_311: + name = "parameter_311" + shape = [384] + dtype = "float32" + min_val = float("0.976061") + max_val = float("1.08549") + mean = float("1.01199") + std = float("0.0159983") + data = None + + +class Program_weight_tensor_parameter_312: + name = "parameter_312" + shape = [384] + dtype = "float32" + min_val = float("0.0131757") + max_val = float("0.0743652") + mean = float("0.0315857") + std = float("0.00914424") + data = None + + +class Program_weight_tensor_parameter_313: + name = "parameter_313" + shape = [384] + dtype = "float32" + min_val = float("-0.181599") + max_val = float("0.227392") + mean = float("-0.0380446") + std = float("0.0543988") + data = None + + +class Program_weight_tensor_parameter_314: + name = "parameter_314" + shape = [384, 384, 3, 3] + dtype = "float32" + min_val = float("-0.0155422") + max_val = float("0.0250036") + mean = float("-6.09728e-05") + std = float("0.00159019") + data = None + + +class Program_weight_tensor_parameter_315: + name = "parameter_315" + shape = [384] + dtype = "float32" + min_val = float("-0.0699577") + max_val = float("0.021347") + mean = float("-0.0334829") + std = float("0.0126426") + data = None + + +class Program_weight_tensor_parameter_316: + name = "parameter_316" + shape = [384] + dtype = "float32" + min_val = float("0.981937") + max_val = float("1.05593") + mean = float("1.0134") + std = float("0.0107706") + data = None + + +class Program_weight_tensor_parameter_317: + name = "parameter_317" + shape = [384] + dtype = "float32" + min_val = float("0.00841399") + max_val = float("0.0336046") + mean = float("0.0138806") + std = float("0.00321811") + data = None + + +class Program_weight_tensor_parameter_318: + name = "parameter_318" + shape = [384] + dtype = "float32" + min_val = float("-0.130353") + max_val = float("0.129976") + mean = float("-0.0125698") + std = float("0.0429897") + data = None + + +class Program_weight_tensor_parameter_319: + name = "parameter_319" + shape = [384, 1024, 1, 1] + dtype = "float32" + min_val = float("-0.0187221") + max_val = float("0.0462025") + mean = float("-0.000204289") + std = float("0.00328169") + data = None + + +class Program_weight_tensor_parameter_320: + name = "parameter_320" + shape = [384] + dtype = "float32" + min_val = float("-0.0240994") + max_val = float("0.0209722") + mean = float("-0.000328404") + std = float("0.00796388") + data = None + + +class Program_weight_tensor_parameter_321: + name = "parameter_321" + shape = [384] + dtype = "float32" + min_val = float("0.994047") + max_val = float("1.08372") + mean = float("1.04108") + std = float("0.0136739") + data = None + + +class Program_weight_tensor_parameter_322: + name = "parameter_322" + shape = [384] + dtype = "float32" + min_val = float("0.0107661") + max_val = float("0.0506047") + mean = float("0.0173147") + std = float("0.00413353") + data = None + + +class Program_weight_tensor_parameter_323: + name = "parameter_323" + shape = [384] + dtype = "float32" + min_val = float("-0.201399") + max_val = float("0.140707") + mean = float("-0.0130836") + std = float("0.0556937") + data = None + + +class Program_weight_tensor_parameter_324: + name = "parameter_324" + shape = [384, 1024, 1, 1] + dtype = "float32" + min_val = float("-0.038196") + max_val = float("0.0298097") + mean = float("-0.00023944") + std = float("0.00387698") + data = None + + +class Program_weight_tensor_parameter_325: + name = "parameter_325" + shape = [1024] + dtype = "float32" + min_val = float("-3.19613e-10") + max_val = float("2.57341e-10") + mean = float("-6.94186e-12") + std = float("8.1518e-11") + data = None + + +class Program_weight_tensor_parameter_326: + name = "parameter_326" + shape = [1024] + dtype = "float32" + min_val = float("0.826158") + max_val = float("0.830526") + mean = float("0.828072") + std = float("0.000388443") + data = None + + +class Program_weight_tensor_parameter_327: + name = "parameter_327" + shape = [1024] + dtype = "float32" + min_val = float("-0.0184723") + max_val = float("0.0186349") + mean = float("3.29345e-06") + std = float("0.0105958") + data = None + + +class Program_weight_tensor_parameter_328: + name = "parameter_328" + shape = [2048, 1024] + dtype = "float32" + min_val = float("-0.0186692") + max_val = float("0.0186323") + mean = float("-3.0949e-06") + std = float("0.0105631") + data = None + + +class Program_weight_tensor_parameter_329: + name = "parameter_329" + shape = [2048] + dtype = "float32" + min_val = float("-0.0258373") + max_val = float("0.0258489") + mean = float("-0.000490034") + std = float("0.0147842") + data = None + + +class Program_weight_tensor_parameter_330: + name = "parameter_330" + shape = [1024, 2048] + dtype = "float32" + min_val = float("-0.0261231") + max_val = float("0.0262344") + mean = float("-1.26e-05") + std = float("0.0149406") + data = None + + +class Program_weight_tensor_parameter_331: + name = "parameter_331" + shape = [1024] + dtype = "float32" + min_val = float("-0.000644044") + max_val = float("0.000416141") + mean = float("1.03571e-06") + std = float("0.00016092") + data = None + + +class Program_weight_tensor_parameter_332: + name = "parameter_332" + shape = [1024] + dtype = "float32" + min_val = float("0.825074") + max_val = float("0.831152") + mean = float("0.828074") + std = float("0.000498943") + data = None + + +class Program_weight_tensor_parameter_333: + name = "parameter_333" + shape = [1024] + dtype = "float32" + min_val = float("-0.00057158") + max_val = float("0.000431758") + mean = float("-6.03783e-07") + std = float("0.000151099") + data = None + + +class Program_weight_tensor_parameter_334: + name = "parameter_334" + shape = [1024, 1024] + dtype = "float32" + min_val = float("-0.0452304") + max_val = float("0.0451715") + mean = float("2.40342e-05") + std = float("0.0258606") + data = None + + +class Program_weight_tensor_parameter_335: + name = "parameter_335" + shape = [1024] + dtype = "float32" + min_val = float("-0.000495877") + max_val = float("0.000502274") + mean = float("2.39519e-05") + std = float("0.000158429") + data = None + + +class Program_weight_tensor_parameter_336: + name = "parameter_336" + shape = [1024] + dtype = "float32" + min_val = float("0.825239") + max_val = float("0.831385") + mean = float("0.828099") + std = float("0.000479399") + data = None + + +class Program_weight_tensor_parameter_337: + name = "parameter_337" + shape = [1024] + dtype = "float32" + min_val = float("-0.0182543") + max_val = float("0.0183952") + mean = float("1.83687e-06") + std = float("0.0105888") + data = None + + +class Program_weight_tensor_parameter_338: + name = "parameter_338" + shape = [2048, 1024] + dtype = "float32" + min_val = float("-0.0185874") + max_val = float("0.0186053") + mean = float("-3.09786e-06") + std = float("0.010563") + data = None + + +class Program_weight_tensor_parameter_339: + name = "parameter_339" + shape = [2048] + dtype = "float32" + min_val = float("-0.0258718") + max_val = float("0.025874") + mean = float("-0.00048855") + std = float("0.0147851") + data = None + + +class Program_weight_tensor_parameter_340: + name = "parameter_340" + shape = [1024, 2048] + dtype = "float32" + min_val = float("-0.0260955") + max_val = float("0.0261498") + mean = float("-1.26e-05") + std = float("0.0149406") + data = None + + +class Program_weight_tensor_parameter_341: + name = "parameter_341" + shape = [1024] + dtype = "float32" + min_val = float("-0.000468906") + max_val = float("0.000412054") + mean = float("2.06222e-06") + std = float("0.000140159") + data = None + + +class Program_weight_tensor_parameter_342: + name = "parameter_342" + shape = [1024] + dtype = "float32" + min_val = float("0.825682") + max_val = float("0.831193") + mean = float("0.828073") + std = float("0.000448518") + data = None + + +class Program_weight_tensor_parameter_343: + name = "parameter_343" + shape = [1024] + dtype = "float32" + min_val = float("-0.000528327") + max_val = float("0.000383813") + mean = float("2.84058e-06") + std = float("0.000141633") + data = None + + +class Program_weight_tensor_parameter_344: + name = "parameter_344" + shape = [1024, 1024] + dtype = "float32" + min_val = float("-0.0450293") + max_val = float("0.0450631") + mean = float("2.40173e-05") + std = float("0.0258607") + data = None + + +class Program_weight_tensor_parameter_345: + name = "parameter_345" + shape = [1024] + dtype = "float32" + min_val = float("-0.000544222") + max_val = float("0.000596302") + mean = float("2.4209e-05") + std = float("0.00018149") + data = None + + +class Program_weight_tensor_parameter_346: + name = "parameter_346" + shape = [1024] + dtype = "float32" + min_val = float("0.825946") + max_val = float("0.831225") + mean = float("0.828119") + std = float("0.000435136") + data = None + + +class Program_weight_tensor_parameter_347: + name = "parameter_347" + shape = [1024] + dtype = "float32" + min_val = float("-0.0184486") + max_val = float("0.01838") + mean = float("4.31468e-06") + std = float("0.0105859") + data = None + + +class Program_weight_tensor_parameter_348: + name = "parameter_348" + shape = [2048, 1024] + dtype = "float32" + min_val = float("-0.0185587") + max_val = float("0.0185999") + mean = float("-2.9779e-06") + std = float("0.010563") + data = None + + +class Program_weight_tensor_parameter_349: + name = "parameter_349" + shape = [2048] + dtype = "float32" + min_val = float("-0.0259392") + max_val = float("0.025878") + mean = float("-0.000488745") + std = float("0.014786") + data = None + + +class Program_weight_tensor_parameter_350: + name = "parameter_350" + shape = [1024, 2048] + dtype = "float32" + min_val = float("-0.0261446") + max_val = float("0.0261368") + mean = float("-1.26001e-05") + std = float("0.0149405") + data = None + + +class Program_weight_tensor_parameter_351: + name = "parameter_351" + shape = [1024] + dtype = "float32" + min_val = float("-0.000525085") + max_val = float("0.000569726") + mean = float("1.8424e-06") + std = float("0.000180016") + data = None + + +class Program_weight_tensor_parameter_352: + name = "parameter_352" + shape = [1024] + dtype = "float32" + min_val = float("0.826325") + max_val = float("0.831088") + mean = float("0.828071") + std = float("0.000422331") + data = None + + +class Program_weight_tensor_parameter_353: + name = "parameter_353" + shape = [1024] + dtype = "float32" + min_val = float("-0.000560374") + max_val = float("0.00059686") + mean = float("2.25972e-06") + std = float("0.000184975") + data = None + + +class Program_weight_tensor_parameter_354: + name = "parameter_354" + shape = [1024, 1024] + dtype = "float32" + min_val = float("-0.0451116") + max_val = float("0.0451354") + mean = float("2.40528e-05") + std = float("0.0258608") + data = None + + +class Program_weight_tensor_parameter_355: + name = "parameter_355" + shape = [1024] + dtype = "float32" + min_val = float("-0.000823618") + max_val = float("0.000904054") + mean = float("2.92117e-05") + std = float("0.000277524") + data = None + + +class Program_weight_tensor_parameter_356: + name = "parameter_356" + shape = [1024] + dtype = "float32" + min_val = float("0.826282") + max_val = float("0.830821") + mean = float("0.828142") + std = float("0.000430458") + data = None + + +class Program_weight_tensor_parameter_357: + name = "parameter_357" + shape = [1024] + dtype = "float32" + min_val = float("-0.0185659") + max_val = float("0.0186153") + mean = float("4.15863e-06") + std = float("0.0105906") + data = None + + +class Program_weight_tensor_parameter_358: + name = "parameter_358" + shape = [2048, 1024] + dtype = "float32" + min_val = float("-0.0186583") + max_val = float("0.0186457") + mean = float("-3.02356e-06") + std = float("0.0105631") + data = None + + +class Program_weight_tensor_parameter_359: + name = "parameter_359" + shape = [2048] + dtype = "float32" + min_val = float("-0.0260157") + max_val = float("0.0259108") + mean = float("-0.000488166") + std = float("0.0147856") + data = None + + +class Program_weight_tensor_parameter_360: + name = "parameter_360" + shape = [1024, 2048] + dtype = "float32" + min_val = float("-0.0261391") + max_val = float("0.026125") + mean = float("-1.26002e-05") + std = float("0.0149405") + data = None + + +class Program_weight_tensor_parameter_361: + name = "parameter_361" + shape = [1024] + dtype = "float32" + min_val = float("-0.000912874") + max_val = float("0.000860046") + mean = float("1.52602e-06") + std = float("0.000286919") + data = None + + +class Program_weight_tensor_parameter_362: + name = "parameter_362" + shape = [1024] + dtype = "float32" + min_val = float("0.826227") + max_val = float("0.830736") + mean = float("0.828069") + std = float("0.000440276") + data = None + + +class Program_weight_tensor_parameter_363: + name = "parameter_363" + shape = [1024] + dtype = "float32" + min_val = float("-0.000894026") + max_val = float("0.000983702") + mean = float("2.69971e-06") + std = float("0.000279123") + data = None + + +class Program_weight_tensor_parameter_364: + name = "parameter_364" + shape = [1024, 1024] + dtype = "float32" + min_val = float("-0.0456631") + max_val = float("0.0456485") + mean = float("2.40398e-05") + std = float("0.0258625") + data = None diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_1/graph_hash.txt b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_1/graph_hash.txt index cf9cecf24..352c4c248 100644 --- a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_1/graph_hash.txt +++ b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_1/graph_hash.txt @@ -1 +1 @@ -b570e084ecf7776a98a81e353b5f581d19f9ab243969ebfb5988dfda43a8a50f \ No newline at end of file +16190cdee49c5a19612aa893cd02068a0d7cda8a3ce3ab04b10db216f5b07563 \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_1/input_meta.py b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_1/input_meta.py index 0f83461ca..380d3daa6 100644 --- a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_1/input_meta.py +++ b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_1/input_meta.py @@ -1,73 +1,31 @@ class Program_weight_tensor_data_0: name = "data_0" - shape = [] - dtype = "int64" - data = [48] - - -class Program_weight_tensor_data_1: - name = "data_1" - shape = [] - dtype = "int64" - data = [48] - - -class Program_weight_tensor_data_2: - name = "data_2" - shape = [] - dtype = "int64" - data = [96] - - -class Program_weight_tensor_data_3: - name = "data_3" - shape = [] - dtype = "int64" - data = [96] - - -class Program_weight_tensor_data_4: - name = "data_4" - shape = [] - dtype = "int64" - data = [192] - - -class Program_weight_tensor_data_5: - name = "data_5" - shape = [] - dtype = "int64" - data = [192] - - -class Program_weight_tensor_data_6: - name = "data_6" - shape = [1, 768, 48, 48] + shape = [1, 768, 34, 34] dtype = "float32" min_val = float("-0.278465") - max_val = float("6.21279") - mean = float("0.263701") - std = float("0.615333") + max_val = float("7.26132") + mean = float("0.265664") + std = float("0.60913") data = None -class Program_weight_tensor_data_7: - name = "data_7" - shape = [1, 384, 96, 96] +class Program_weight_tensor_data_1: + name = "data_1" + shape = [1, 384, 68, 68] dtype = "float32" min_val = float("-0.278465") - max_val = float("9.39853") - mean = float("0.366505") - std = float("0.697682") + max_val = float("9.84456") + mean = float("0.366383") + std = float("0.709092") data = None -class Program_weight_tensor_data_8: - name = "data_8" - shape = [1, 192, 192, 192] +class Program_weight_tensor_data_2: + name = "data_2" + shape = [1, 192, 136, 136] dtype = "float32" min_val = float("-0.278465") - max_val = float("13.9944") - mean = float("0.442546") - std = float("0.692429") + max_val = float("15.7599") + mean = float("0.450238") + std = float("0.719633") data = None diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_1/model.py b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_1/model.py index 254a76346..0ee326a64 100644 --- a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_1/model.py +++ b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_1/model.py @@ -64,419 +64,325 @@ def forward( data_0, data_1, data_2, - data_3, - data_4, - data_5, - data_6, - data_7, - data_8, ): - # pd_op.full: (1xi64) <- () + # pd_op.full: (1xf64) <- () full_0 = paddle._C_ops.full( - [1], float("0"), paddle.int64, paddle.core.CPUPlace() + [1], float("0"), paddle.float64, paddle.core.CPUPlace() ) - # pd_op.full: (1xi64) <- () + # pd_op.full: (1xf64) <- () full_1 = paddle._C_ops.full( - [1], float("1"), paddle.int64, paddle.core.CPUPlace() + [1], float("34"), paddle.float64, paddle.core.CPUPlace() ) - # pd_op.arange: (-1xi64) <- (1xi64, xi64, 1xi64) - arange_0 = paddle.arange(full_0, data_1, full_1, dtype="int64") - del data_1 + # pd_op.full: (1xf64) <- () + full_2 = paddle._C_ops.full( + [1], float("1"), paddle.float64, paddle.core.CPUPlace() + ) - # pd_op.cast: (-1xf32) <- (-1xi64) + # pd_op.arange: (34xi64) <- (1xf64, 1xf64, 1xf64) + arange_0 = paddle.arange(full_0, full_1, full_2, dtype="int64") + del full_1 + + # pd_op.cast: (34xf32) <- (34xi64) cast_0 = paddle._C_ops.cast(arange_0, paddle.float32) del arange_0 # pd_op.full: (1xf32) <- () - full_2 = paddle._C_ops.full( + full_3 = paddle._C_ops.full( [1], float("1"), paddle.float32, paddle.core.CPUPlace() ) - # pd_op.scale: (-1xf32) <- (-1xf32, 1xf32) - scale_0 = paddle._C_ops.scale(cast_0, full_2, float("0.5"), True) + # pd_op.scale: (34xf32) <- (34xf32, 1xf32) + scale_0 = paddle._C_ops.scale(cast_0, full_3, float("0.5"), True) del cast_0 # pd_op.full: (1xf32) <- () - full_3 = paddle._C_ops.full( + full_4 = paddle._C_ops.full( [1], float("32"), paddle.float32, paddle.core.CPUPlace() ) - # pd_op.scale: (-1xf32) <- (-1xf32, 1xf32) - scale_1 = paddle._C_ops.scale(scale_0, full_3, float("0"), True) - del scale_0 + # pd_op.scale: (34xf32) <- (34xf32, 1xf32) + scale_1 = paddle._C_ops.scale(scale_0, full_4, float("0"), True) + del full_4, scale_0 - # pd_op.arange: (-1xi64) <- (1xi64, xi64, 1xi64) - arange_1 = paddle.arange(full_0, data_0, full_1, dtype="int64") - del data_0 + # builtin.combine: ([34xf32, 34xf32]) <- (34xf32, 34xf32) + combine_0 = [scale_1, scale_1] + del scale_1 - # pd_op.cast: (-1xf32) <- (-1xi64) - cast_1 = paddle._C_ops.cast(arange_1, paddle.float32) - del arange_1 - - # pd_op.scale: (-1xf32) <- (-1xf32, 1xf32) - scale_2 = paddle._C_ops.scale(cast_1, full_2, float("0.5"), True) - del cast_1 - - # pd_op.scale: (-1xf32) <- (-1xf32, 1xf32) - scale_3 = paddle._C_ops.scale(scale_2, full_3, float("0"), True) - del scale_2 - - # builtin.combine: ([-1xf32, -1xf32]) <- (-1xf32, -1xf32) - combine_0 = [scale_3, scale_1] - del scale_1, scale_3 - - # pd_op.meshgrid: ([-1x-1xf32, -1x-1xf32]) <- ([-1xf32, -1xf32]) + # pd_op.meshgrid: ([34x34xf32, 34x34xf32]) <- ([34xf32, 34xf32]) meshgrid_0 = paddle._C_ops.meshgrid(combine_0) del combine_0 - # builtin.split: (-1x-1xf32, -1x-1xf32) <- ([-1x-1xf32, -1x-1xf32]) + # builtin.split: (34x34xf32, 34x34xf32) <- ([34x34xf32, 34x34xf32]) ( split_0, split_1, ) = meshgrid_0 del meshgrid_0 - # pd_op.scale: (-1x-1xf32) <- (-1x-1xf32, 1xf32) - scale_4 = paddle._C_ops.scale(split_1, full_2, float("-80"), True) + # pd_op.scale: (34x34xf32) <- (34x34xf32, 1xf32) + scale_2 = paddle._C_ops.scale(split_1, full_3, float("-80"), True) - # pd_op.scale: (-1x-1xf32) <- (-1x-1xf32, 1xf32) - scale_5 = paddle._C_ops.scale(split_0, full_2, float("-80"), True) + # pd_op.scale: (34x34xf32) <- (34x34xf32, 1xf32) + scale_3 = paddle._C_ops.scale(split_0, full_3, float("-80"), True) - # pd_op.scale: (-1x-1xf32) <- (-1x-1xf32, 1xf32) - scale_6 = paddle._C_ops.scale(split_1, full_2, float("80"), True) + # pd_op.scale: (34x34xf32) <- (34x34xf32, 1xf32) + scale_4 = paddle._C_ops.scale(split_1, full_3, float("80"), True) - # pd_op.scale: (-1x-1xf32) <- (-1x-1xf32, 1xf32) - scale_7 = paddle._C_ops.scale(split_0, full_2, float("80"), True) + # pd_op.scale: (34x34xf32) <- (34x34xf32, 1xf32) + scale_5 = paddle._C_ops.scale(split_0, full_3, float("80"), True) - # builtin.combine: ([-1x-1xf32, -1x-1xf32, -1x-1xf32, -1x-1xf32]) <- (-1x-1xf32, -1x-1xf32, -1x-1xf32, -1x-1xf32) - combine_1 = [scale_4, scale_5, scale_6, scale_7] - del scale_4, scale_5, scale_6, scale_7 + # builtin.combine: ([34x34xf32, 34x34xf32, 34x34xf32, 34x34xf32]) <- (34x34xf32, 34x34xf32, 34x34xf32, 34x34xf32) + combine_1 = [scale_2, scale_3, scale_4, scale_5] + del scale_2, scale_3, scale_4, scale_5 - # pd_op.stack: (-1x-1x4xf32) <- ([-1x-1xf32, -1x-1xf32, -1x-1xf32, -1x-1xf32]) + # pd_op.stack: (34x34x4xf32) <- ([34x34xf32, 34x34xf32, 34x34xf32, 34x34xf32]) stack_0 = paddle._C_ops.stack(combine_1, -1) del combine_1 - # builtin.combine: ([-1x-1xf32, -1x-1xf32]) <- (-1x-1xf32, -1x-1xf32) + # builtin.combine: ([34x34xf32, 34x34xf32]) <- (34x34xf32, 34x34xf32) combine_2 = [split_1, split_0] del split_0, split_1 - # pd_op.stack: (-1x-1x2xf32) <- ([-1x-1xf32, -1x-1xf32]) + # pd_op.stack: (34x34x2xf32) <- ([34x34xf32, 34x34xf32]) stack_1 = paddle._C_ops.stack(combine_2, -1) del combine_2 # pd_op.full_int_array: (2xi64) <- () full_int_array_0 = [-1, 4] - # pd_op.reshape: (-1x4xf32) <- (-1x-1x4xf32, 2xi64) + # pd_op.reshape: (1156x4xf32) <- (34x34x4xf32, 2xi64) reshape_0 = paddle._C_ops.reshape(stack_0, full_int_array_0) del stack_0 # pd_op.full_int_array: (2xi64) <- () full_int_array_1 = [-1, 2] - # pd_op.reshape: (-1x2xf32) <- (-1x-1x2xf32, 2xi64) + # pd_op.reshape: (1156x2xf32) <- (34x34x2xf32, 2xi64) reshape_1 = paddle._C_ops.reshape(stack_1, full_int_array_1) del stack_1 - # pd_op.shape64: (2xi64) <- (-1x4xf32) - shape64_0 = paddle._C_ops.shape64(reshape_0) - - # pd_op.full_int_array: (1xi64) <- () - full_int_array_2 = [0] - - # pd_op.full_int_array: (1xi64) <- () - full_int_array_3 = [1] - - # pd_op.slice: (xi64) <- (2xi64, 1xi64, 1xi64) - slice_0 = paddle._C_ops.slice( - shape64_0, [0], full_int_array_2, full_int_array_3, [1], [0] - ) - del shape64_0 - - # pd_op.full: (xi64) <- () - full_4 = paddle._C_ops.full( - [], float("1"), paddle.int64, paddle.core.CPUPlace() + # pd_op.full: (1156x1xf32) <- () + full_5 = paddle._C_ops.full( + [1156, 1], + float("32"), + paddle.float32, + paddle.framework._current_expected_place(), ) - # builtin.combine: ([xi64, xi64]) <- (xi64, xi64) - combine_3 = [slice_0, full_4] - - # pd_op.stack: (2xi64) <- ([xi64, xi64]) - stack_2 = paddle._C_ops.stack(combine_3, 0) - del combine_3 - - # pd_op.full_with_tensor: (-1x1xf32) <- (1xf32, 2xi64) - full_with_tensor_0 = paddle._C_ops.full_with_tensor( - full_3, stack_2, paddle.float32 + # pd_op.full: (1xf64) <- () + full_6 = paddle._C_ops.full( + [1], float("68"), paddle.float64, paddle.core.CPUPlace() ) - del full_3, stack_2 - # pd_op.arange: (-1xi64) <- (1xi64, xi64, 1xi64) - arange_2 = paddle.arange(full_0, data_3, full_1, dtype="int64") - del data_3 + # pd_op.arange: (68xi64) <- (1xf64, 1xf64, 1xf64) + arange_1 = paddle.arange(full_0, full_6, full_2, dtype="int64") + del full_6 - # pd_op.cast: (-1xf32) <- (-1xi64) - cast_2 = paddle._C_ops.cast(arange_2, paddle.float32) - del arange_2 + # pd_op.cast: (68xf32) <- (68xi64) + cast_1 = paddle._C_ops.cast(arange_1, paddle.float32) + del arange_1 - # pd_op.scale: (-1xf32) <- (-1xf32, 1xf32) - scale_8 = paddle._C_ops.scale(cast_2, full_2, float("0.5"), True) - del cast_2 + # pd_op.scale: (68xf32) <- (68xf32, 1xf32) + scale_6 = paddle._C_ops.scale(cast_1, full_3, float("0.5"), True) + del cast_1 # pd_op.full: (1xf32) <- () - full_5 = paddle._C_ops.full( + full_7 = paddle._C_ops.full( [1], float("16"), paddle.float32, paddle.core.CPUPlace() ) - # pd_op.scale: (-1xf32) <- (-1xf32, 1xf32) - scale_9 = paddle._C_ops.scale(scale_8, full_5, float("0"), True) - del scale_8 - - # pd_op.arange: (-1xi64) <- (1xi64, xi64, 1xi64) - arange_3 = paddle.arange(full_0, data_2, full_1, dtype="int64") - del data_2 - - # pd_op.cast: (-1xf32) <- (-1xi64) - cast_3 = paddle._C_ops.cast(arange_3, paddle.float32) - del arange_3 - - # pd_op.scale: (-1xf32) <- (-1xf32, 1xf32) - scale_10 = paddle._C_ops.scale(cast_3, full_2, float("0.5"), True) - del cast_3 - - # pd_op.scale: (-1xf32) <- (-1xf32, 1xf32) - scale_11 = paddle._C_ops.scale(scale_10, full_5, float("0"), True) - del scale_10 + # pd_op.scale: (68xf32) <- (68xf32, 1xf32) + scale_7 = paddle._C_ops.scale(scale_6, full_7, float("0"), True) + del full_7, scale_6 - # builtin.combine: ([-1xf32, -1xf32]) <- (-1xf32, -1xf32) - combine_4 = [scale_11, scale_9] - del scale_11, scale_9 + # builtin.combine: ([68xf32, 68xf32]) <- (68xf32, 68xf32) + combine_3 = [scale_7, scale_7] + del scale_7 - # pd_op.meshgrid: ([-1x-1xf32, -1x-1xf32]) <- ([-1xf32, -1xf32]) - meshgrid_1 = paddle._C_ops.meshgrid(combine_4) - del combine_4 + # pd_op.meshgrid: ([68x68xf32, 68x68xf32]) <- ([68xf32, 68xf32]) + meshgrid_1 = paddle._C_ops.meshgrid(combine_3) + del combine_3 - # builtin.split: (-1x-1xf32, -1x-1xf32) <- ([-1x-1xf32, -1x-1xf32]) + # builtin.split: (68x68xf32, 68x68xf32) <- ([68x68xf32, 68x68xf32]) ( split_2, split_3, ) = meshgrid_1 del meshgrid_1 - # pd_op.scale: (-1x-1xf32) <- (-1x-1xf32, 1xf32) - scale_12 = paddle._C_ops.scale(split_3, full_2, float("-40"), True) + # pd_op.scale: (68x68xf32) <- (68x68xf32, 1xf32) + scale_8 = paddle._C_ops.scale(split_3, full_3, float("-40"), True) - # pd_op.scale: (-1x-1xf32) <- (-1x-1xf32, 1xf32) - scale_13 = paddle._C_ops.scale(split_2, full_2, float("-40"), True) + # pd_op.scale: (68x68xf32) <- (68x68xf32, 1xf32) + scale_9 = paddle._C_ops.scale(split_2, full_3, float("-40"), True) - # pd_op.scale: (-1x-1xf32) <- (-1x-1xf32, 1xf32) - scale_14 = paddle._C_ops.scale(split_3, full_2, float("40"), True) + # pd_op.scale: (68x68xf32) <- (68x68xf32, 1xf32) + scale_10 = paddle._C_ops.scale(split_3, full_3, float("40"), True) - # pd_op.scale: (-1x-1xf32) <- (-1x-1xf32, 1xf32) - scale_15 = paddle._C_ops.scale(split_2, full_2, float("40"), True) + # pd_op.scale: (68x68xf32) <- (68x68xf32, 1xf32) + scale_11 = paddle._C_ops.scale(split_2, full_3, float("40"), True) - # builtin.combine: ([-1x-1xf32, -1x-1xf32, -1x-1xf32, -1x-1xf32]) <- (-1x-1xf32, -1x-1xf32, -1x-1xf32, -1x-1xf32) - combine_5 = [scale_12, scale_13, scale_14, scale_15] - del scale_12, scale_13, scale_14, scale_15 + # builtin.combine: ([68x68xf32, 68x68xf32, 68x68xf32, 68x68xf32]) <- (68x68xf32, 68x68xf32, 68x68xf32, 68x68xf32) + combine_4 = [scale_8, scale_9, scale_10, scale_11] + del scale_10, scale_11, scale_8, scale_9 - # pd_op.stack: (-1x-1x4xf32) <- ([-1x-1xf32, -1x-1xf32, -1x-1xf32, -1x-1xf32]) - stack_3 = paddle._C_ops.stack(combine_5, -1) - del combine_5 + # pd_op.stack: (68x68x4xf32) <- ([68x68xf32, 68x68xf32, 68x68xf32, 68x68xf32]) + stack_2 = paddle._C_ops.stack(combine_4, -1) + del combine_4 - # builtin.combine: ([-1x-1xf32, -1x-1xf32]) <- (-1x-1xf32, -1x-1xf32) - combine_6 = [split_3, split_2] + # builtin.combine: ([68x68xf32, 68x68xf32]) <- (68x68xf32, 68x68xf32) + combine_5 = [split_3, split_2] del split_2, split_3 - # pd_op.stack: (-1x-1x2xf32) <- ([-1x-1xf32, -1x-1xf32]) - stack_4 = paddle._C_ops.stack(combine_6, -1) - del combine_6 - - # pd_op.reshape: (-1x4xf32) <- (-1x-1x4xf32, 2xi64) - reshape_2 = paddle._C_ops.reshape(stack_3, full_int_array_0) - del stack_3 + # pd_op.stack: (68x68x2xf32) <- ([68x68xf32, 68x68xf32]) + stack_3 = paddle._C_ops.stack(combine_5, -1) + del combine_5 - # pd_op.reshape: (-1x2xf32) <- (-1x-1x2xf32, 2xi64) - reshape_3 = paddle._C_ops.reshape(stack_4, full_int_array_1) - del stack_4 + # pd_op.reshape: (4624x4xf32) <- (68x68x4xf32, 2xi64) + reshape_2 = paddle._C_ops.reshape(stack_2, full_int_array_0) + del stack_2 - # pd_op.shape64: (2xi64) <- (-1x4xf32) - shape64_1 = paddle._C_ops.shape64(reshape_2) + # pd_op.reshape: (4624x2xf32) <- (68x68x2xf32, 2xi64) + reshape_3 = paddle._C_ops.reshape(stack_3, full_int_array_1) + del stack_3 - # pd_op.slice: (xi64) <- (2xi64, 1xi64, 1xi64) - slice_1 = paddle._C_ops.slice( - shape64_1, [0], full_int_array_2, full_int_array_3, [1], [0] + # pd_op.full: (4624x1xf32) <- () + full_8 = paddle._C_ops.full( + [4624, 1], + float("16"), + paddle.float32, + paddle.framework._current_expected_place(), ) - del shape64_1 - - # builtin.combine: ([xi64, xi64]) <- (xi64, xi64) - combine_7 = [slice_1, full_4] - # pd_op.stack: (2xi64) <- ([xi64, xi64]) - stack_5 = paddle._C_ops.stack(combine_7, 0) - del combine_7 - - # pd_op.full_with_tensor: (-1x1xf32) <- (1xf32, 2xi64) - full_with_tensor_1 = paddle._C_ops.full_with_tensor( - full_5, stack_5, paddle.float32 + # pd_op.full: (1xf64) <- () + full_9 = paddle._C_ops.full( + [1], float("136"), paddle.float64, paddle.core.CPUPlace() ) - del full_5, stack_5 - # pd_op.arange: (-1xi64) <- (1xi64, xi64, 1xi64) - arange_4 = paddle.arange(full_0, data_5, full_1, dtype="int64") - del data_5 + # pd_op.arange: (136xi64) <- (1xf64, 1xf64, 1xf64) + arange_2 = paddle.arange(full_0, full_9, full_2, dtype="int64") + del full_0, full_2, full_9 - # pd_op.cast: (-1xf32) <- (-1xi64) - cast_4 = paddle._C_ops.cast(arange_4, paddle.float32) - del arange_4 + # pd_op.cast: (136xf32) <- (136xi64) + cast_2 = paddle._C_ops.cast(arange_2, paddle.float32) + del arange_2 - # pd_op.scale: (-1xf32) <- (-1xf32, 1xf32) - scale_16 = paddle._C_ops.scale(cast_4, full_2, float("0.5"), True) - del cast_4 + # pd_op.scale: (136xf32) <- (136xf32, 1xf32) + scale_12 = paddle._C_ops.scale(cast_2, full_3, float("0.5"), True) + del cast_2 # pd_op.full: (1xf32) <- () - full_6 = paddle._C_ops.full( + full_10 = paddle._C_ops.full( [1], float("8"), paddle.float32, paddle.core.CPUPlace() ) - # pd_op.scale: (-1xf32) <- (-1xf32, 1xf32) - scale_17 = paddle._C_ops.scale(scale_16, full_6, float("0"), True) - del scale_16 - - # pd_op.arange: (-1xi64) <- (1xi64, xi64, 1xi64) - arange_5 = paddle.arange(full_0, data_4, full_1, dtype="int64") - del data_4, full_0, full_1 - - # pd_op.cast: (-1xf32) <- (-1xi64) - cast_5 = paddle._C_ops.cast(arange_5, paddle.float32) - del arange_5 - - # pd_op.scale: (-1xf32) <- (-1xf32, 1xf32) - scale_18 = paddle._C_ops.scale(cast_5, full_2, float("0.5"), True) - del cast_5 - - # pd_op.scale: (-1xf32) <- (-1xf32, 1xf32) - scale_19 = paddle._C_ops.scale(scale_18, full_6, float("0"), True) - del scale_18 + # pd_op.scale: (136xf32) <- (136xf32, 1xf32) + scale_13 = paddle._C_ops.scale(scale_12, full_10, float("0"), True) + del full_10, scale_12 - # builtin.combine: ([-1xf32, -1xf32]) <- (-1xf32, -1xf32) - combine_8 = [scale_19, scale_17] - del scale_17, scale_19 + # builtin.combine: ([136xf32, 136xf32]) <- (136xf32, 136xf32) + combine_6 = [scale_13, scale_13] + del scale_13 - # pd_op.meshgrid: ([-1x-1xf32, -1x-1xf32]) <- ([-1xf32, -1xf32]) - meshgrid_2 = paddle._C_ops.meshgrid(combine_8) - del combine_8 + # pd_op.meshgrid: ([136x136xf32, 136x136xf32]) <- ([136xf32, 136xf32]) + meshgrid_2 = paddle._C_ops.meshgrid(combine_6) + del combine_6 - # builtin.split: (-1x-1xf32, -1x-1xf32) <- ([-1x-1xf32, -1x-1xf32]) + # builtin.split: (136x136xf32, 136x136xf32) <- ([136x136xf32, 136x136xf32]) ( split_4, split_5, ) = meshgrid_2 del meshgrid_2 - # pd_op.scale: (-1x-1xf32) <- (-1x-1xf32, 1xf32) - scale_20 = paddle._C_ops.scale(split_5, full_2, float("-20"), True) + # pd_op.scale: (136x136xf32) <- (136x136xf32, 1xf32) + scale_14 = paddle._C_ops.scale(split_5, full_3, float("-20"), True) - # pd_op.scale: (-1x-1xf32) <- (-1x-1xf32, 1xf32) - scale_21 = paddle._C_ops.scale(split_4, full_2, float("-20"), True) + # pd_op.scale: (136x136xf32) <- (136x136xf32, 1xf32) + scale_15 = paddle._C_ops.scale(split_4, full_3, float("-20"), True) - # pd_op.scale: (-1x-1xf32) <- (-1x-1xf32, 1xf32) - scale_22 = paddle._C_ops.scale(split_5, full_2, float("20"), True) + # pd_op.scale: (136x136xf32) <- (136x136xf32, 1xf32) + scale_16 = paddle._C_ops.scale(split_5, full_3, float("20"), True) - # pd_op.scale: (-1x-1xf32) <- (-1x-1xf32, 1xf32) - scale_23 = paddle._C_ops.scale(split_4, full_2, float("20"), True) - del full_2 + # pd_op.scale: (136x136xf32) <- (136x136xf32, 1xf32) + scale_17 = paddle._C_ops.scale(split_4, full_3, float("20"), True) + del full_3 - # builtin.combine: ([-1x-1xf32, -1x-1xf32, -1x-1xf32, -1x-1xf32]) <- (-1x-1xf32, -1x-1xf32, -1x-1xf32, -1x-1xf32) - combine_9 = [scale_20, scale_21, scale_22, scale_23] - del scale_20, scale_21, scale_22, scale_23 + # builtin.combine: ([136x136xf32, 136x136xf32, 136x136xf32, 136x136xf32]) <- (136x136xf32, 136x136xf32, 136x136xf32, 136x136xf32) + combine_7 = [scale_14, scale_15, scale_16, scale_17] + del scale_14, scale_15, scale_16, scale_17 - # pd_op.stack: (-1x-1x4xf32) <- ([-1x-1xf32, -1x-1xf32, -1x-1xf32, -1x-1xf32]) - stack_6 = paddle._C_ops.stack(combine_9, -1) - del combine_9 + # pd_op.stack: (136x136x4xf32) <- ([136x136xf32, 136x136xf32, 136x136xf32, 136x136xf32]) + stack_4 = paddle._C_ops.stack(combine_7, -1) + del combine_7 - # builtin.combine: ([-1x-1xf32, -1x-1xf32]) <- (-1x-1xf32, -1x-1xf32) - combine_10 = [split_5, split_4] + # builtin.combine: ([136x136xf32, 136x136xf32]) <- (136x136xf32, 136x136xf32) + combine_8 = [split_5, split_4] del split_4, split_5 - # pd_op.stack: (-1x-1x2xf32) <- ([-1x-1xf32, -1x-1xf32]) - stack_7 = paddle._C_ops.stack(combine_10, -1) - del combine_10 - - # pd_op.reshape: (-1x4xf32) <- (-1x-1x4xf32, 2xi64) - reshape_4 = paddle._C_ops.reshape(stack_6, full_int_array_0) - del full_int_array_0, stack_6 - - # pd_op.reshape: (-1x2xf32) <- (-1x-1x2xf32, 2xi64) - reshape_5 = paddle._C_ops.reshape(stack_7, full_int_array_1) - del full_int_array_1, stack_7 - - # pd_op.shape64: (2xi64) <- (-1x4xf32) - shape64_2 = paddle._C_ops.shape64(reshape_4) - - # pd_op.slice: (xi64) <- (2xi64, 1xi64, 1xi64) - slice_2 = paddle._C_ops.slice( - shape64_2, [0], full_int_array_2, full_int_array_3, [1], [0] - ) - del full_int_array_2, full_int_array_3, shape64_2 + # pd_op.stack: (136x136x2xf32) <- ([136x136xf32, 136x136xf32]) + stack_5 = paddle._C_ops.stack(combine_8, -1) + del combine_8 - # builtin.combine: ([xi64, xi64]) <- (xi64, xi64) - combine_11 = [slice_2, full_4] - del full_4 + # pd_op.reshape: (18496x4xf32) <- (136x136x4xf32, 2xi64) + reshape_4 = paddle._C_ops.reshape(stack_4, full_int_array_0) + del full_int_array_0, stack_4 - # pd_op.stack: (2xi64) <- ([xi64, xi64]) - stack_8 = paddle._C_ops.stack(combine_11, 0) - del combine_11 + # pd_op.reshape: (18496x2xf32) <- (136x136x2xf32, 2xi64) + reshape_5 = paddle._C_ops.reshape(stack_5, full_int_array_1) + del full_int_array_1, stack_5 - # pd_op.full_with_tensor: (-1x1xf32) <- (1xf32, 2xi64) - full_with_tensor_2 = paddle._C_ops.full_with_tensor( - full_6, stack_8, paddle.float32 + # pd_op.full: (18496x1xf32) <- () + full_11 = paddle._C_ops.full( + [18496, 1], + float("8"), + paddle.float32, + paddle.framework._current_expected_place(), ) - del full_6, stack_8 # pd_op.full: (1xi32) <- () - full_7 = paddle._C_ops.full( + full_12 = paddle._C_ops.full( [1], float("0"), paddle.int32, paddle.core.CPUPlace() ) - # builtin.combine: ([-1x4xf32, -1x4xf32, -1x4xf32]) <- (-1x4xf32, -1x4xf32, -1x4xf32) - combine_12 = [reshape_0, reshape_2, reshape_4] - del reshape_0, reshape_2, reshape_4 + # builtin.combine: ([1156x4xf32, 4624x4xf32, 18496x4xf32]) <- (1156x4xf32, 4624x4xf32, 18496x4xf32) + combine_9 = [reshape_0, reshape_2, reshape_4] - # pd_op.concat: (-1x4xf32) <- ([-1x4xf32, -1x4xf32, -1x4xf32], 1xi32) - concat_2 = paddle._C_ops.concat(combine_12, full_7) - del combine_12 + # pd_op.concat: (24276x4xf32) <- ([1156x4xf32, 4624x4xf32, 18496x4xf32], 1xi32) + concat_2 = paddle._C_ops.concat(combine_9, full_12) + del combine_9 - # builtin.combine: ([-1x2xf32, -1x2xf32, -1x2xf32]) <- (-1x2xf32, -1x2xf32, -1x2xf32) - combine_13 = [reshape_1, reshape_3, reshape_5] + # builtin.combine: ([1156x2xf32, 4624x2xf32, 18496x2xf32]) <- (1156x2xf32, 4624x2xf32, 18496x2xf32) + combine_10 = [reshape_1, reshape_3, reshape_5] del reshape_1, reshape_3, reshape_5 - # pd_op.concat: (-1x2xf32) <- ([-1x2xf32, -1x2xf32, -1x2xf32], 1xi32) - concat_3 = paddle._C_ops.concat(combine_13, full_7) - del combine_13 + # pd_op.concat: (24276x2xf32) <- ([1156x2xf32, 4624x2xf32, 18496x2xf32], 1xi32) + concat_3 = paddle._C_ops.concat(combine_10, full_12) + del combine_10 - # builtin.combine: ([-1x1xf32, -1x1xf32, -1x1xf32]) <- (-1x1xf32, -1x1xf32, -1x1xf32) - combine_14 = [full_with_tensor_0, full_with_tensor_1, full_with_tensor_2] - del full_with_tensor_0, full_with_tensor_1, full_with_tensor_2 + # builtin.combine: ([1156x1xf32, 4624x1xf32, 18496x1xf32]) <- (1156x1xf32, 4624x1xf32, 18496x1xf32) + combine_11 = [full_5, full_8, full_11] + del full_11, full_5, full_8 - # pd_op.concat: (-1x1xf32) <- ([-1x1xf32, -1x1xf32, -1x1xf32], 1xi32) - concat_4 = paddle._C_ops.concat(combine_14, full_7) - del combine_14, full_7 + # pd_op.concat: (24276x1xf32) <- ([1156x1xf32, 4624x1xf32, 18496x1xf32], 1xi32) + concat_4 = paddle._C_ops.concat(combine_11, full_12) + del combine_11, full_12 # pd_op.full_int_array: (2xi64) <- () - full_int_array_4 = [1, 1] + full_int_array_2 = [1, 1] # pd_op.assign: (2xi64) <- (2xi64) - assign_0 = full_int_array_4 + assign_0 = full_int_array_2 # pd_op.assign: (2xi64) <- (2xi64) - assign_1 = full_int_array_4 + assign_1 = full_int_array_2 - # pd_op.pool2d: (1x768x1x1xf32) <- (1x768x-1x-1xf32, 2xi64) + # pd_op.pool2d: (1x768x1x1xf32) <- (1x768x34x34xf32, 2xi64) pool2d_0 = paddle._C_ops.pool2d( - data_6, - full_int_array_4, + data_0, + full_int_array_2, [1, 1], [0, 0], False, @@ -495,10 +401,10 @@ def forward( del parameter_53 # pd_op.full_int_array: (4xi64) <- () - full_int_array_5 = [1, -1, 1, 1] + full_int_array_3 = [1, -1, 1, 1] # pd_op.reshape: (1x768x1x1xf32) <- (768xf32, 4xi64) - reshape_6 = paddle._C_ops.reshape(parameter_52, full_int_array_5) + reshape_6 = paddle._C_ops.reshape(parameter_52, full_int_array_3) del parameter_52 # pd_op.add: (1x768x1x1xf32) <- (1x768x1x1xf32, 1x768x1x1xf32) @@ -508,16 +414,16 @@ def forward( sigmoid_0 = paddle._C_ops.sigmoid(add_0) del add_0 - # pd_op.multiply: (1x768x-1x-1xf32) <- (1x768x-1x-1xf32, 1x768x1x1xf32) - multiply_0 = paddle._C_ops.multiply(data_6, sigmoid_0) + # pd_op.multiply: (1x768x34x34xf32) <- (1x768x34x34xf32, 1x768x1x1xf32) + multiply_0 = paddle._C_ops.multiply(data_0, sigmoid_0) - # pd_op.conv2d: (1x768x-1x-1xf32) <- (1x768x-1x-1xf32, 768x768x1x1xf32) + # pd_op.conv2d: (1x768x34x34xf32) <- (1x768x34x34xf32, 768x768x1x1xf32) conv2d_1 = paddle._C_ops.conv2d( multiply_0, parameter_51, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" ) del parameter_51 - # pd_op.batch_norm_: (1x768x-1x-1xf32, 768xf32, 768xf32, 768xf32, 768xf32, -1xui8) <- (1x768x-1x-1xf32, 768xf32, 768xf32, 768xf32, 768xf32) + # pd_op.batch_norm_: (1x768x34x34xf32, 768xf32, 768xf32, 768xf32, 768xf32, -1xui8) <- (1x768x34x34xf32, 768xf32, 768xf32, 768xf32, 768xf32) ( batch_norm__0, batch_norm__1, @@ -545,23 +451,23 @@ def forward( ) del parameter_47, parameter_48, parameter_49, parameter_50 - # pd_op.swish: (1x768x-1x-1xf32) <- (1x768x-1x-1xf32) + # pd_op.swish: (1x768x34x34xf32) <- (1x768x34x34xf32) swish_0 = paddle._C_ops.swish(batch_norm__0) - # pd_op.add: (1x768x-1x-1xf32) <- (1x768x-1x-1xf32, 1x768x-1x-1xf32) - add_1 = paddle._C_ops.add(swish_0, data_6) + # pd_op.add: (1x768x34x34xf32) <- (1x768x34x34xf32, 1x768x34x34xf32) + add_1 = paddle._C_ops.add(swish_0, data_0) - # pd_op.conv2d: (1x10x-1x-1xf32) <- (1x768x-1x-1xf32, 10x768x3x3xf32) + # pd_op.conv2d: (1x10x34x34xf32) <- (1x768x34x34xf32, 10x768x3x3xf32) conv2d_2 = paddle._C_ops.conv2d( add_1, parameter_46, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" ) del parameter_46 # pd_op.reshape: (1x10x1x1xf32) <- (10xf32, 4xi64) - reshape_7 = paddle._C_ops.reshape(parameter_45, full_int_array_5) + reshape_7 = paddle._C_ops.reshape(parameter_45, full_int_array_3) del parameter_45 - # pd_op.add: (1x10x-1x-1xf32) <- (1x10x-1x-1xf32, 1x10x1x1xf32) + # pd_op.add: (1x10x34x34xf32) <- (1x10x34x34xf32, 1x10x1x1xf32) add_2 = paddle._C_ops.add(conv2d_2, reshape_7) # pd_op.conv2d: (1x768x1x1xf32) <- (1x768x1x1xf32, 768x768x1x1xf32) @@ -571,7 +477,7 @@ def forward( del parameter_44 # pd_op.reshape: (1x768x1x1xf32) <- (768xf32, 4xi64) - reshape_8 = paddle._C_ops.reshape(parameter_43, full_int_array_5) + reshape_8 = paddle._C_ops.reshape(parameter_43, full_int_array_3) del parameter_43 # pd_op.add: (1x768x1x1xf32) <- (1x768x1x1xf32, 1x768x1x1xf32) @@ -581,17 +487,17 @@ def forward( sigmoid_1 = paddle._C_ops.sigmoid(add_3) del add_3 - # pd_op.multiply: (1x768x-1x-1xf32) <- (1x768x-1x-1xf32, 1x768x1x1xf32) - multiply_1 = paddle._C_ops.multiply(data_6, sigmoid_1) - del data_6 + # pd_op.multiply: (1x768x34x34xf32) <- (1x768x34x34xf32, 1x768x1x1xf32) + multiply_1 = paddle._C_ops.multiply(data_0, sigmoid_1) + del data_0 - # pd_op.conv2d: (1x768x-1x-1xf32) <- (1x768x-1x-1xf32, 768x768x1x1xf32) + # pd_op.conv2d: (1x768x34x34xf32) <- (1x768x34x34xf32, 768x768x1x1xf32) conv2d_4 = paddle._C_ops.conv2d( multiply_1, parameter_42, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" ) del parameter_42 - # pd_op.batch_norm_: (1x768x-1x-1xf32, 768xf32, 768xf32, 768xf32, 768xf32, -1xui8) <- (1x768x-1x-1xf32, 768xf32, 768xf32, 768xf32, 768xf32) + # pd_op.batch_norm_: (1x768x34x34xf32, 768xf32, 768xf32, 768xf32, 768xf32, -1xui8) <- (1x768x34x34xf32, 768xf32, 768xf32, 768xf32, 768xf32) ( batch_norm__6, batch_norm__7, @@ -619,44 +525,44 @@ def forward( ) del parameter_38, parameter_39, parameter_40, parameter_41 - # pd_op.swish: (1x768x-1x-1xf32) <- (1x768x-1x-1xf32) + # pd_op.swish: (1x768x34x34xf32) <- (1x768x34x34xf32) swish_1 = paddle._C_ops.swish(batch_norm__6) - # pd_op.conv2d: (1x88x-1x-1xf32) <- (1x768x-1x-1xf32, 88x768x3x3xf32) + # pd_op.conv2d: (1x88x34x34xf32) <- (1x768x34x34xf32, 88x768x3x3xf32) conv2d_5 = paddle._C_ops.conv2d( swish_1, parameter_37, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" ) del parameter_37 # pd_op.reshape: (1x88x1x1xf32) <- (88xf32, 4xi64) - reshape_9 = paddle._C_ops.reshape(parameter_36, full_int_array_5) + reshape_9 = paddle._C_ops.reshape(parameter_36, full_int_array_3) del parameter_36 - # pd_op.add: (1x88x-1x-1xf32) <- (1x88x-1x-1xf32, 1x88x1x1xf32) + # pd_op.add: (1x88x34x34xf32) <- (1x88x34x34xf32, 1x88x1x1xf32) add_4 = paddle._C_ops.add(conv2d_5, reshape_9) - # pd_op.sigmoid: (1x10x-1x-1xf32) <- (1x10x-1x-1xf32) + # pd_op.sigmoid: (1x10x34x34xf32) <- (1x10x34x34xf32) sigmoid_2 = paddle._C_ops.sigmoid(add_2) del add_2 - # pd_op.flatten: (1x10x-1xf32) <- (1x10x-1x-1xf32) + # pd_op.flatten: (1x10x1156xf32) <- (1x10x34x34xf32) flatten_0 = paddle._C_ops.flatten(sigmoid_2, 2, 3) - # pd_op.transpose: (1x-1x10xf32) <- (1x10x-1xf32) + # pd_op.transpose: (1x1156x10xf32) <- (1x10x1156xf32) transpose_0 = paddle._C_ops.transpose(flatten_0, [0, 2, 1]) del flatten_0 - # pd_op.flatten: (1x88x-1xf32) <- (1x88x-1x-1xf32) + # pd_op.flatten: (1x88x1156xf32) <- (1x88x34x34xf32) flatten_1 = paddle._C_ops.flatten(add_4, 2, 3) - # pd_op.transpose: (1x-1x88xf32) <- (1x88x-1xf32) + # pd_op.transpose: (1x1156x88xf32) <- (1x88x1156xf32) transpose_1 = paddle._C_ops.transpose(flatten_1, [0, 2, 1]) del flatten_1 - # pd_op.pool2d: (1x384x1x1xf32) <- (1x384x-1x-1xf32, 2xi64) + # pd_op.pool2d: (1x384x1x1xf32) <- (1x384x68x68xf32, 2xi64) pool2d_1 = paddle._C_ops.pool2d( - data_7, - full_int_array_4, + data_1, + full_int_array_2, [1, 1], [0, 0], False, @@ -675,7 +581,7 @@ def forward( del parameter_35 # pd_op.reshape: (1x384x1x1xf32) <- (384xf32, 4xi64) - reshape_10 = paddle._C_ops.reshape(parameter_34, full_int_array_5) + reshape_10 = paddle._C_ops.reshape(parameter_34, full_int_array_3) del parameter_34 # pd_op.add: (1x384x1x1xf32) <- (1x384x1x1xf32, 1x384x1x1xf32) @@ -685,16 +591,16 @@ def forward( sigmoid_3 = paddle._C_ops.sigmoid(add_5) del add_5 - # pd_op.multiply: (1x384x-1x-1xf32) <- (1x384x-1x-1xf32, 1x384x1x1xf32) - multiply_2 = paddle._C_ops.multiply(data_7, sigmoid_3) + # pd_op.multiply: (1x384x68x68xf32) <- (1x384x68x68xf32, 1x384x1x1xf32) + multiply_2 = paddle._C_ops.multiply(data_1, sigmoid_3) - # pd_op.conv2d: (1x384x-1x-1xf32) <- (1x384x-1x-1xf32, 384x384x1x1xf32) + # pd_op.conv2d: (1x384x68x68xf32) <- (1x384x68x68xf32, 384x384x1x1xf32) conv2d_7 = paddle._C_ops.conv2d( multiply_2, parameter_33, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" ) del parameter_33 - # pd_op.batch_norm_: (1x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (1x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) + # pd_op.batch_norm_: (1x384x68x68xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (1x384x68x68xf32, 384xf32, 384xf32, 384xf32, 384xf32) ( batch_norm__12, batch_norm__13, @@ -722,23 +628,23 @@ def forward( ) del parameter_29, parameter_30, parameter_31, parameter_32 - # pd_op.swish: (1x384x-1x-1xf32) <- (1x384x-1x-1xf32) + # pd_op.swish: (1x384x68x68xf32) <- (1x384x68x68xf32) swish_2 = paddle._C_ops.swish(batch_norm__12) - # pd_op.add: (1x384x-1x-1xf32) <- (1x384x-1x-1xf32, 1x384x-1x-1xf32) - add_6 = paddle._C_ops.add(swish_2, data_7) + # pd_op.add: (1x384x68x68xf32) <- (1x384x68x68xf32, 1x384x68x68xf32) + add_6 = paddle._C_ops.add(swish_2, data_1) - # pd_op.conv2d: (1x10x-1x-1xf32) <- (1x384x-1x-1xf32, 10x384x3x3xf32) + # pd_op.conv2d: (1x10x68x68xf32) <- (1x384x68x68xf32, 10x384x3x3xf32) conv2d_8 = paddle._C_ops.conv2d( add_6, parameter_28, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" ) del parameter_28 # pd_op.reshape: (1x10x1x1xf32) <- (10xf32, 4xi64) - reshape_11 = paddle._C_ops.reshape(parameter_27, full_int_array_5) + reshape_11 = paddle._C_ops.reshape(parameter_27, full_int_array_3) del parameter_27 - # pd_op.add: (1x10x-1x-1xf32) <- (1x10x-1x-1xf32, 1x10x1x1xf32) + # pd_op.add: (1x10x68x68xf32) <- (1x10x68x68xf32, 1x10x1x1xf32) add_7 = paddle._C_ops.add(conv2d_8, reshape_11) # pd_op.conv2d: (1x384x1x1xf32) <- (1x384x1x1xf32, 384x384x1x1xf32) @@ -748,7 +654,7 @@ def forward( del parameter_26 # pd_op.reshape: (1x384x1x1xf32) <- (384xf32, 4xi64) - reshape_12 = paddle._C_ops.reshape(parameter_25, full_int_array_5) + reshape_12 = paddle._C_ops.reshape(parameter_25, full_int_array_3) del parameter_25 # pd_op.add: (1x384x1x1xf32) <- (1x384x1x1xf32, 1x384x1x1xf32) @@ -758,17 +664,17 @@ def forward( sigmoid_4 = paddle._C_ops.sigmoid(add_8) del add_8 - # pd_op.multiply: (1x384x-1x-1xf32) <- (1x384x-1x-1xf32, 1x384x1x1xf32) - multiply_3 = paddle._C_ops.multiply(data_7, sigmoid_4) - del data_7 + # pd_op.multiply: (1x384x68x68xf32) <- (1x384x68x68xf32, 1x384x1x1xf32) + multiply_3 = paddle._C_ops.multiply(data_1, sigmoid_4) + del data_1 - # pd_op.conv2d: (1x384x-1x-1xf32) <- (1x384x-1x-1xf32, 384x384x1x1xf32) + # pd_op.conv2d: (1x384x68x68xf32) <- (1x384x68x68xf32, 384x384x1x1xf32) conv2d_10 = paddle._C_ops.conv2d( multiply_3, parameter_24, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" ) del parameter_24 - # pd_op.batch_norm_: (1x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (1x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) + # pd_op.batch_norm_: (1x384x68x68xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (1x384x68x68xf32, 384xf32, 384xf32, 384xf32, 384xf32) ( batch_norm__18, batch_norm__19, @@ -796,44 +702,44 @@ def forward( ) del parameter_20, parameter_21, parameter_22, parameter_23 - # pd_op.swish: (1x384x-1x-1xf32) <- (1x384x-1x-1xf32) + # pd_op.swish: (1x384x68x68xf32) <- (1x384x68x68xf32) swish_3 = paddle._C_ops.swish(batch_norm__18) - # pd_op.conv2d: (1x88x-1x-1xf32) <- (1x384x-1x-1xf32, 88x384x3x3xf32) + # pd_op.conv2d: (1x88x68x68xf32) <- (1x384x68x68xf32, 88x384x3x3xf32) conv2d_11 = paddle._C_ops.conv2d( swish_3, parameter_19, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" ) del parameter_19 # pd_op.reshape: (1x88x1x1xf32) <- (88xf32, 4xi64) - reshape_13 = paddle._C_ops.reshape(parameter_18, full_int_array_5) + reshape_13 = paddle._C_ops.reshape(parameter_18, full_int_array_3) del parameter_18 - # pd_op.add: (1x88x-1x-1xf32) <- (1x88x-1x-1xf32, 1x88x1x1xf32) + # pd_op.add: (1x88x68x68xf32) <- (1x88x68x68xf32, 1x88x1x1xf32) add_9 = paddle._C_ops.add(conv2d_11, reshape_13) - # pd_op.sigmoid: (1x10x-1x-1xf32) <- (1x10x-1x-1xf32) + # pd_op.sigmoid: (1x10x68x68xf32) <- (1x10x68x68xf32) sigmoid_5 = paddle._C_ops.sigmoid(add_7) del add_7 - # pd_op.flatten: (1x10x-1xf32) <- (1x10x-1x-1xf32) + # pd_op.flatten: (1x10x4624xf32) <- (1x10x68x68xf32) flatten_2 = paddle._C_ops.flatten(sigmoid_5, 2, 3) - # pd_op.transpose: (1x-1x10xf32) <- (1x10x-1xf32) + # pd_op.transpose: (1x4624x10xf32) <- (1x10x4624xf32) transpose_2 = paddle._C_ops.transpose(flatten_2, [0, 2, 1]) del flatten_2 - # pd_op.flatten: (1x88x-1xf32) <- (1x88x-1x-1xf32) + # pd_op.flatten: (1x88x4624xf32) <- (1x88x68x68xf32) flatten_3 = paddle._C_ops.flatten(add_9, 2, 3) - # pd_op.transpose: (1x-1x88xf32) <- (1x88x-1xf32) + # pd_op.transpose: (1x4624x88xf32) <- (1x88x4624xf32) transpose_3 = paddle._C_ops.transpose(flatten_3, [0, 2, 1]) del flatten_3 - # pd_op.pool2d: (1x192x1x1xf32) <- (1x192x-1x-1xf32, 2xi64) + # pd_op.pool2d: (1x192x1x1xf32) <- (1x192x136x136xf32, 2xi64) pool2d_2 = paddle._C_ops.pool2d( - data_8, - full_int_array_4, + data_2, + full_int_array_2, [1, 1], [0, 0], False, @@ -852,7 +758,7 @@ def forward( del parameter_17 # pd_op.reshape: (1x192x1x1xf32) <- (192xf32, 4xi64) - reshape_14 = paddle._C_ops.reshape(parameter_16, full_int_array_5) + reshape_14 = paddle._C_ops.reshape(parameter_16, full_int_array_3) del parameter_16 # pd_op.add: (1x192x1x1xf32) <- (1x192x1x1xf32, 1x192x1x1xf32) @@ -862,16 +768,16 @@ def forward( sigmoid_6 = paddle._C_ops.sigmoid(add_10) del add_10 - # pd_op.multiply: (1x192x-1x-1xf32) <- (1x192x-1x-1xf32, 1x192x1x1xf32) - multiply_4 = paddle._C_ops.multiply(data_8, sigmoid_6) + # pd_op.multiply: (1x192x136x136xf32) <- (1x192x136x136xf32, 1x192x1x1xf32) + multiply_4 = paddle._C_ops.multiply(data_2, sigmoid_6) - # pd_op.conv2d: (1x192x-1x-1xf32) <- (1x192x-1x-1xf32, 192x192x1x1xf32) + # pd_op.conv2d: (1x192x136x136xf32) <- (1x192x136x136xf32, 192x192x1x1xf32) conv2d_13 = paddle._C_ops.conv2d( multiply_4, parameter_15, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" ) del parameter_15 - # pd_op.batch_norm_: (1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + # pd_op.batch_norm_: (1x192x136x136xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (1x192x136x136xf32, 192xf32, 192xf32, 192xf32, 192xf32) ( batch_norm__24, batch_norm__25, @@ -899,23 +805,23 @@ def forward( ) del parameter_11, parameter_12, parameter_13, parameter_14 - # pd_op.swish: (1x192x-1x-1xf32) <- (1x192x-1x-1xf32) + # pd_op.swish: (1x192x136x136xf32) <- (1x192x136x136xf32) swish_4 = paddle._C_ops.swish(batch_norm__24) - # pd_op.add: (1x192x-1x-1xf32) <- (1x192x-1x-1xf32, 1x192x-1x-1xf32) - add_11 = paddle._C_ops.add(swish_4, data_8) + # pd_op.add: (1x192x136x136xf32) <- (1x192x136x136xf32, 1x192x136x136xf32) + add_11 = paddle._C_ops.add(swish_4, data_2) - # pd_op.conv2d: (1x10x-1x-1xf32) <- (1x192x-1x-1xf32, 10x192x3x3xf32) + # pd_op.conv2d: (1x10x136x136xf32) <- (1x192x136x136xf32, 10x192x3x3xf32) conv2d_14 = paddle._C_ops.conv2d( add_11, parameter_10, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" ) del parameter_10 # pd_op.reshape: (1x10x1x1xf32) <- (10xf32, 4xi64) - reshape_15 = paddle._C_ops.reshape(parameter_9, full_int_array_5) + reshape_15 = paddle._C_ops.reshape(parameter_9, full_int_array_3) del parameter_9 - # pd_op.add: (1x10x-1x-1xf32) <- (1x10x-1x-1xf32, 1x10x1x1xf32) + # pd_op.add: (1x10x136x136xf32) <- (1x10x136x136xf32, 1x10x1x1xf32) add_12 = paddle._C_ops.add(conv2d_14, reshape_15) # pd_op.conv2d: (1x192x1x1xf32) <- (1x192x1x1xf32, 192x192x1x1xf32) @@ -925,7 +831,7 @@ def forward( del parameter_8 # pd_op.reshape: (1x192x1x1xf32) <- (192xf32, 4xi64) - reshape_16 = paddle._C_ops.reshape(parameter_7, full_int_array_5) + reshape_16 = paddle._C_ops.reshape(parameter_7, full_int_array_3) del parameter_7 # pd_op.add: (1x192x1x1xf32) <- (1x192x1x1xf32, 1x192x1x1xf32) @@ -935,17 +841,17 @@ def forward( sigmoid_7 = paddle._C_ops.sigmoid(add_13) del add_13 - # pd_op.multiply: (1x192x-1x-1xf32) <- (1x192x-1x-1xf32, 1x192x1x1xf32) - multiply_5 = paddle._C_ops.multiply(data_8, sigmoid_7) - del data_8 + # pd_op.multiply: (1x192x136x136xf32) <- (1x192x136x136xf32, 1x192x1x1xf32) + multiply_5 = paddle._C_ops.multiply(data_2, sigmoid_7) + del data_2 - # pd_op.conv2d: (1x192x-1x-1xf32) <- (1x192x-1x-1xf32, 192x192x1x1xf32) + # pd_op.conv2d: (1x192x136x136xf32) <- (1x192x136x136xf32, 192x192x1x1xf32) conv2d_16 = paddle._C_ops.conv2d( multiply_5, parameter_6, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" ) del parameter_6 - # pd_op.batch_norm_: (1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) + # pd_op.batch_norm_: (1x192x136x136xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (1x192x136x136xf32, 192xf32, 192xf32, 192xf32, 192xf32) ( batch_norm__30, batch_norm__31, @@ -973,60 +879,60 @@ def forward( ) del parameter_2, parameter_3, parameter_4, parameter_5 - # pd_op.swish: (1x192x-1x-1xf32) <- (1x192x-1x-1xf32) + # pd_op.swish: (1x192x136x136xf32) <- (1x192x136x136xf32) swish_5 = paddle._C_ops.swish(batch_norm__30) - # pd_op.conv2d: (1x88x-1x-1xf32) <- (1x192x-1x-1xf32, 88x192x3x3xf32) + # pd_op.conv2d: (1x88x136x136xf32) <- (1x192x136x136xf32, 88x192x3x3xf32) conv2d_17 = paddle._C_ops.conv2d( swish_5, parameter_1, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" ) del parameter_1 # pd_op.reshape: (1x88x1x1xf32) <- (88xf32, 4xi64) - reshape_17 = paddle._C_ops.reshape(parameter_0, full_int_array_5) - del full_int_array_5, parameter_0 + reshape_17 = paddle._C_ops.reshape(parameter_0, full_int_array_3) + del full_int_array_3, parameter_0 - # pd_op.add: (1x88x-1x-1xf32) <- (1x88x-1x-1xf32, 1x88x1x1xf32) + # pd_op.add: (1x88x136x136xf32) <- (1x88x136x136xf32, 1x88x1x1xf32) add_14 = paddle._C_ops.add(conv2d_17, reshape_17) - # pd_op.sigmoid: (1x10x-1x-1xf32) <- (1x10x-1x-1xf32) + # pd_op.sigmoid: (1x10x136x136xf32) <- (1x10x136x136xf32) sigmoid_8 = paddle._C_ops.sigmoid(add_12) del add_12 - # pd_op.flatten: (1x10x-1xf32) <- (1x10x-1x-1xf32) + # pd_op.flatten: (1x10x18496xf32) <- (1x10x136x136xf32) flatten_4 = paddle._C_ops.flatten(sigmoid_8, 2, 3) - # pd_op.transpose: (1x-1x10xf32) <- (1x10x-1xf32) + # pd_op.transpose: (1x18496x10xf32) <- (1x10x18496xf32) transpose_4 = paddle._C_ops.transpose(flatten_4, [0, 2, 1]) del flatten_4 - # pd_op.flatten: (1x88x-1xf32) <- (1x88x-1x-1xf32) + # pd_op.flatten: (1x88x18496xf32) <- (1x88x136x136xf32) flatten_5 = paddle._C_ops.flatten(add_14, 2, 3) - # pd_op.transpose: (1x-1x88xf32) <- (1x88x-1xf32) + # pd_op.transpose: (1x18496x88xf32) <- (1x88x18496xf32) transpose_5 = paddle._C_ops.transpose(flatten_5, [0, 2, 1]) del flatten_5 # pd_op.full: (1xi32) <- () - full_8 = paddle._C_ops.full( + full_13 = paddle._C_ops.full( [1], float("1"), paddle.int32, paddle.core.CPUPlace() ) # pd_op.assign: (1xi32) <- (1xi32) - assign_2 = full_8 + assign_2 = full_13 - # builtin.combine: ([1x-1x10xf32, 1x-1x10xf32, 1x-1x10xf32]) <- (1x-1x10xf32, 1x-1x10xf32, 1x-1x10xf32) - combine_15 = [transpose_0, transpose_2, transpose_4] + # builtin.combine: ([1x1156x10xf32, 1x4624x10xf32, 1x18496x10xf32]) <- (1x1156x10xf32, 1x4624x10xf32, 1x18496x10xf32) + combine_12 = [transpose_0, transpose_2, transpose_4] - # pd_op.concat: (1x-1x10xf32) <- ([1x-1x10xf32, 1x-1x10xf32, 1x-1x10xf32], 1xi32) - concat_0 = paddle._C_ops.concat(combine_15, full_8) - del combine_15 + # pd_op.concat: (1x24276x10xf32) <- ([1x1156x10xf32, 1x4624x10xf32, 1x18496x10xf32], 1xi32) + concat_0 = paddle._C_ops.concat(combine_12, full_13) + del combine_12 - # builtin.combine: ([1x-1x88xf32, 1x-1x88xf32, 1x-1x88xf32]) <- (1x-1x88xf32, 1x-1x88xf32, 1x-1x88xf32) - combine_16 = [transpose_1, transpose_3, transpose_5] + # builtin.combine: ([1x1156x88xf32, 1x4624x88xf32, 1x18496x88xf32]) <- (1x1156x88xf32, 1x4624x88xf32, 1x18496x88xf32) + combine_13 = [transpose_1, transpose_3, transpose_5] - # pd_op.concat: (1x-1x88xf32) <- ([1x-1x88xf32, 1x-1x88xf32, 1x-1x88xf32], 1xi32) - concat_1 = paddle._C_ops.concat(combine_16, full_8) + # pd_op.concat: (1x24276x88xf32) <- ([1x1156x88xf32, 1x4624x88xf32, 1x18496x88xf32], 1xi32) + concat_1 = paddle._C_ops.concat(combine_13, full_13) del ( add_1, add_11, @@ -1073,7 +979,7 @@ def forward( batch_norm__7, batch_norm__8, batch_norm__9, - combine_16, + combine_13, conv2d_0, conv2d_1, conv2d_10, @@ -1092,8 +998,8 @@ def forward( conv2d_7, conv2d_8, conv2d_9, - full_8, - full_int_array_4, + full_13, + full_int_array_2, multiply_0, multiply_1, multiply_2, @@ -1103,6 +1009,7 @@ def forward( pool2d_0, pool2d_1, pool2d_2, + reshape_0, reshape_10, reshape_11, reshape_12, @@ -1111,6 +1018,8 @@ def forward( reshape_15, reshape_16, reshape_17, + reshape_2, + reshape_4, reshape_6, reshape_7, reshape_8, @@ -1124,9 +1033,6 @@ def forward( sigmoid_6, sigmoid_7, sigmoid_8, - slice_0, - slice_1, - slice_2, swish_0, swish_1, swish_2, diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_1/weight_meta.py b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_1/weight_meta.py index 8e33e8e3a..433ec9a1b 100644 --- a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_1/weight_meta.py +++ b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_1/weight_meta.py @@ -46,10 +46,10 @@ class Program_weight_tensor_parameter_4: name = "parameter_4" shape = [192] dtype = "float32" - min_val = float("0.000193476") - max_val = float("0.00844475") - mean = float("0.00135281") - std = float("0.00118991") + min_val = float("0.000189735") + max_val = float("0.00828383") + mean = float("0.00133599") + std = float("0.00117081") data = None @@ -57,10 +57,10 @@ class Program_weight_tensor_parameter_5: name = "parameter_5" shape = [192] dtype = "float32" - min_val = float("-0.0754706") - max_val = float("0.0319285") - mean = float("-0.0124719") - std = float("0.0176434") + min_val = float("-0.0753393") + max_val = float("0.0318059") + mean = float("-0.0124341") + std = float("0.0175849") data = None @@ -143,10 +143,10 @@ class Program_weight_tensor_parameter_13: name = "parameter_13" shape = [192] dtype = "float32" - min_val = float("0.000400596") - max_val = float("0.0113603") - mean = float("0.00180946") - std = float("0.00164804") + min_val = float("0.000388466") + max_val = float("0.011235") + mean = float("0.00179138") + std = float("0.00163858") data = None @@ -154,10 +154,10 @@ class Program_weight_tensor_parameter_14: name = "parameter_14" shape = [192] dtype = "float32" - min_val = float("-0.164541") - max_val = float("0.127897") - mean = float("-0.00423752") - std = float("0.0391887") + min_val = float("-0.16391") + max_val = float("0.127577") + mean = float("-0.00419185") + std = float("0.0390567") data = None @@ -242,10 +242,10 @@ class Program_weight_tensor_parameter_22: name = "parameter_22" shape = [384] dtype = "float32" - min_val = float("8.35707e-05") - max_val = float("0.00597629") - mean = float("0.000723624") - std = float("0.000763811") + min_val = float("8.30341e-05") + max_val = float("0.00596526") + mean = float("0.000721317") + std = float("0.000758143") data = None @@ -253,10 +253,10 @@ class Program_weight_tensor_parameter_23: name = "parameter_23" shape = [384] dtype = "float32" - min_val = float("-0.0442923") - max_val = float("0.00827867") - mean = float("-0.00966063") - std = float("0.00931597") + min_val = float("-0.044286") + max_val = float("0.00828286") + mean = float("-0.00965973") + std = float("0.0093075") data = None @@ -339,10 +339,10 @@ class Program_weight_tensor_parameter_31: name = "parameter_31" shape = [384] dtype = "float32" - min_val = float("0.000148108") - max_val = float("0.00569047") - mean = float("0.000833938") - std = float("0.000746994") + min_val = float("0.000150697") + max_val = float("0.00568262") + mean = float("0.000831286") + std = float("0.000745356") data = None @@ -350,10 +350,10 @@ class Program_weight_tensor_parameter_32: name = "parameter_32" shape = [384] dtype = "float32" - min_val = float("-0.0987288") - max_val = float("0.0644616") - mean = float("-0.0146565") - std = float("0.0228018") + min_val = float("-0.0985161") + max_val = float("0.0644014") + mean = float("-0.0146302") + std = float("0.022773") data = None @@ -438,10 +438,10 @@ class Program_weight_tensor_parameter_40: name = "parameter_40" shape = [768] dtype = "float32" - min_val = float("3.23648e-05") - max_val = float("0.00119716") - mean = float("0.00013138") - std = float("9.50346e-05") + min_val = float("3.16922e-05") + max_val = float("0.00119994") + mean = float("0.000132026") + std = float("9.51389e-05") data = None @@ -449,10 +449,10 @@ class Program_weight_tensor_parameter_41: name = "parameter_41" shape = [768] dtype = "float32" - min_val = float("-0.0210089") - max_val = float("0.00349173") - mean = float("-0.00472837") - std = float("0.00312975") + min_val = float("-0.0209402") + max_val = float("0.00348513") + mean = float("-0.00472064") + std = float("0.00312486") data = None @@ -535,10 +535,10 @@ class Program_weight_tensor_parameter_49: name = "parameter_49" shape = [768] dtype = "float32" - min_val = float("7.25456e-05") - max_val = float("0.00245344") - mean = float("0.000534885") - std = float("0.000318044") + min_val = float("7.42637e-05") + max_val = float("0.00246904") + mean = float("0.000537048") + std = float("0.000318384") data = None @@ -546,10 +546,10 @@ class Program_weight_tensor_parameter_50: name = "parameter_50" shape = [768] dtype = "float32" - min_val = float("-0.0725999") - max_val = float("0.0495439") - mean = float("-0.0155315") - std = float("0.0150006") + min_val = float("-0.0725538") + max_val = float("0.0495499") + mean = float("-0.0155163") + std = float("0.0149963") data = None diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_10/graph_hash.txt b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_10/graph_hash.txt deleted file mode 100644 index 7376f9083..000000000 --- a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_10/graph_hash.txt +++ /dev/null @@ -1 +0,0 @@ -ecf2fbd10676ba1da33488b082d96ec1f1edbf59f2c45107f85006bf675778d2 \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_10/graph_net.json b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_10/graph_net.json deleted file mode 100644 index 381598f86..000000000 --- a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_10/graph_net.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "framework": "paddle", - "model_name": "PP-YOLOE_plus_SOD-largesize-L", - "num_devices_required": 1, - "num_nodes_required": 1 -} \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_10/model.py b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_10/model.py deleted file mode 100644 index 80a95179a..000000000 --- a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_10/model.py +++ /dev/null @@ -1,162 +0,0 @@ -import paddle - - -class GraphModule(paddle.nn.Layer): - def __init__(self): - super().__init__() - - def forward(self, parameter_0, data_0, data_1, data_2, data_3): - # pd_op.divide: (-1x2xf32) <- (-1x2xf32, -1x1xf32) - divide_0 = paddle._C_ops.divide(data_2, data_3) - del data_2 - - # pd_op.shape64: (3xi64) <- (1x-1x88xf32) - shape64_0 = paddle._C_ops.shape64(data_1) - - # pd_op.full_int_array: (1xi64) <- () - full_int_array_0 = [0] - - # pd_op.full_int_array: (1xi64) <- () - full_int_array_1 = [1] - - # pd_op.assign: (1xi64) <- (1xi64) - assign_0 = full_int_array_1 - - # pd_op.slice: (xi64) <- (3xi64, 1xi64, 1xi64) - slice_0 = paddle._C_ops.slice( - shape64_0, [0], full_int_array_0, full_int_array_1, [1], [0] - ) - del full_int_array_0 - - # pd_op.full_int_array: (1xi64) <- () - full_int_array_2 = [2] - - # pd_op.slice: (xi64) <- (3xi64, 1xi64, 1xi64) - slice_1 = paddle._C_ops.slice( - shape64_0, [0], full_int_array_1, full_int_array_2, [1], [0] - ) - - # pd_op.full_int_array: (1xi64) <- () - full_int_array_3 = [3] - - # pd_op.slice: (xi64) <- (3xi64, 1xi64, 1xi64) - slice_2 = paddle._C_ops.slice( - shape64_0, [0], full_int_array_2, full_int_array_3, [1], [0] - ) - del full_int_array_2, full_int_array_3, shape64_0 - - # pd_op.full: (xi64) <- () - full_0 = paddle._C_ops.full( - [], float("-1"), paddle.int64, paddle.core.CPUPlace() - ) - - # pd_op.full: (xi64) <- () - full_1 = paddle._C_ops.full( - [], float("4"), paddle.int64, paddle.core.CPUPlace() - ) - - # pd_op.full: (xi64) <- () - full_2 = paddle._C_ops.full( - [], float("22"), paddle.int64, paddle.core.CPUPlace() - ) - - # builtin.combine: ([xi64, xi64, xi64, xi64]) <- (xi64, xi64, xi64, xi64) - combine_0 = [full_0, slice_1, full_1, full_2] - del full_0, full_1, full_2, slice_1 - - # pd_op.stack: (4xi64) <- ([xi64, xi64, xi64, xi64]) - stack_0 = paddle._C_ops.stack(combine_0, 0) - del combine_0 - - # pd_op.reshape: (-1x-1x4x22xf32) <- (1x-1x88xf32, 4xi64) - reshape_0 = paddle._C_ops.reshape(data_1, stack_0) - del data_1, stack_0 - - # pd_op.softmax: (-1x-1x4x22xf32) <- (-1x-1x4x22xf32) - softmax_0 = paddle._C_ops.softmax(reshape_0, -1) - del reshape_0 - - # pd_op.transpose: (-1x22x-1x4xf32) <- (-1x-1x4x22xf32) - transpose_0 = paddle._C_ops.transpose(softmax_0, [0, 3, 1, 2]) - - # pd_op.conv2d: (-1x1x-1x4xf32) <- (-1x22x-1x4xf32, 1x22x1x1xf32) - conv2d_0 = paddle._C_ops.conv2d( - transpose_0, parameter_0, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_0 - - # pd_op.squeeze: (-1x-1x4xf32) <- (-1x1x-1x4xf32, 1xi64) - squeeze_0 = paddle._C_ops.squeeze(conv2d_0, full_int_array_1) - del full_int_array_1 - - # pd_op.full: (1xi32) <- () - full_3 = paddle._C_ops.full( - [1], float("2"), paddle.int32, paddle.core.CPUPlace() - ) - - # pd_op.split_with_num: ([-1x-1x2xf32, -1x-1x2xf32]) <- (-1x-1x4xf32, 1xi32) - split_with_num_0 = paddle._C_ops.split_with_num(squeeze_0, 2, full_3) - del squeeze_0 - - # builtin.split: (-1x-1x2xf32, -1x-1x2xf32) <- ([-1x-1x2xf32, -1x-1x2xf32]) - ( - split_0, - split_1, - ) = split_with_num_0 - del split_with_num_0 - - # pd_op.full: (1xf32) <- () - full_4 = paddle._C_ops.full( - [1], float("-1"), paddle.float32, paddle.core.CPUPlace() - ) - - # pd_op.scale: (-1x-1x2xf32) <- (-1x-1x2xf32, 1xf32) - scale_0 = paddle._C_ops.scale(split_0, full_4, float("0"), True) - del split_0 - - # pd_op.add: (-1x-1x2xf32) <- (-1x-1x2xf32, -1x2xf32) - add_0 = paddle._C_ops.add(scale_0, divide_0) - - # pd_op.add: (-1x-1x2xf32) <- (-1x-1x2xf32, -1x2xf32) - add_1 = paddle._C_ops.add(split_1, divide_0) - - # pd_op.full: (1xi32) <- () - full_5 = paddle._C_ops.full( - [1], float("-1"), paddle.int32, paddle.core.CPUPlace() - ) - - # builtin.combine: ([-1x-1x2xf32, -1x-1x2xf32]) <- (-1x-1x2xf32, -1x-1x2xf32) - combine_1 = [add_0, add_1] - - # pd_op.concat: (-1x-1x4xf32) <- ([-1x-1x2xf32, -1x-1x2xf32], 1xi32) - concat_0 = paddle._C_ops.concat(combine_1, full_5) - del combine_1 - - # pd_op.share_data_: (1x-1x10xf32) <- (1x-1x10xf32) - share_data__0 = data_0.detach() - del data_0 - - # pd_op.share_data_: (-1x-1x4xf32) <- (-1x-1x4xf32) - share_data__1 = concat_0.detach() - - # pd_op.multiply: (-1x-1x4xf32) <- (-1x-1x4xf32, -1x1xf32) - multiply_0 = paddle._C_ops.multiply(share_data__1, data_3) - del ( - add_0, - add_1, - assign_0, - concat_0, - conv2d_0, - data_3, - divide_0, - full_3, - full_4, - full_5, - scale_0, - share_data__1, - softmax_0, - split_1, - transpose_0, - ) - - return share_data__0, multiply_0 diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_10/weight_meta.py b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_10/weight_meta.py deleted file mode 100644 index a3837d8b1..000000000 --- a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_10/weight_meta.py +++ /dev/null @@ -1,7 +0,0 @@ -class Program_weight_tensor_parameter_0: - name = "parameter_0" - shape = [1, 22, 1, 1] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_11/graph_hash.txt b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_11/graph_hash.txt deleted file mode 100644 index 30aa43ee8..000000000 --- a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_11/graph_hash.txt +++ /dev/null @@ -1 +0,0 @@ -88501beeaec43b439a11d2de2e1804c791b519dccd7cb4a81a7324f69c27681f \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_11/graph_net.json b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_11/graph_net.json deleted file mode 100644 index 381598f86..000000000 --- a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_11/graph_net.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "framework": "paddle", - "model_name": "PP-YOLOE_plus_SOD-largesize-L", - "num_devices_required": 1, - "num_nodes_required": 1 -} \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_11/input_meta.py b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_11/input_meta.py deleted file mode 100644 index 66d018686..000000000 --- a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_11/input_meta.py +++ /dev/null @@ -1,222 +0,0 @@ -class Program_weight_tensor_data_0: - name = "data_0" - shape = [1] - dtype = "float32" - data = [0.724553] - - -class Program_weight_tensor_data_1: - name = "data_1" - shape = [1] - dtype = "float32" - data = [0.710696] - - -class Program_weight_tensor_data_2: - name = "data_2" - shape = [1] - dtype = "float32" - data = [0.69274] - - -class Program_weight_tensor_data_3: - name = "data_3" - shape = [1] - dtype = "float32" - data = [0.697763] - - -class Program_weight_tensor_data_4: - name = "data_4" - shape = [1] - dtype = "float32" - data = [0.67767] - - -class Program_weight_tensor_data_5: - name = "data_5" - shape = [1] - dtype = "float32" - data = [0.628229] - - -class Program_weight_tensor_data_6: - name = "data_6" - shape = [1] - dtype = "float32" - data = [0.643942] - - -class Program_weight_tensor_data_7: - name = "data_7" - shape = [1] - dtype = "float32" - data = [0.633569] - - -class Program_weight_tensor_data_8: - name = "data_8" - shape = [1] - dtype = "float32" - data = [0.801205] - - -class Program_weight_tensor_data_9: - name = "data_9" - shape = [1] - dtype = "float32" - data = [0.652613] - - -class Program_weight_tensor_data_10: - name = "data_10" - shape = [1] - dtype = "float32" - data = [0.636874] - - -class Program_weight_tensor_data_11: - name = "data_11" - shape = [1] - dtype = "float32" - data = [0.631148] - - -class Program_weight_tensor_data_12: - name = "data_12" - shape = [1] - dtype = "float32" - data = [0.635341] - - -class Program_weight_tensor_data_13: - name = "data_13" - shape = [1] - dtype = "float32" - data = [0.640054] - - -class Program_weight_tensor_data_14: - name = "data_14" - shape = [1] - dtype = "float32" - data = [0.755822] - - -class Program_weight_tensor_data_15: - name = "data_15" - shape = [1] - dtype = "float32" - data = [0.575326] - - -class Program_weight_tensor_data_16: - name = "data_16" - shape = [1] - dtype = "float32" - data = [0.59257] - - -class Program_weight_tensor_data_17: - name = "data_17" - shape = [1] - dtype = "float32" - data = [0.72331] - - -class Program_weight_tensor_data_18: - name = "data_18" - shape = [1024, 3072] - dtype = "float32" - min_val = float("-0.033771") - max_val = float("0.0342897") - mean = float("-1.71997e-05") - std = float("0.0182992") - data = None - - -class Program_weight_tensor_data_19: - name = "data_19" - shape = [3072] - dtype = "float32" - min_val = float("-0.000858009") - max_val = float("0.000895398") - mean = float("1.43686e-06") - std = float("0.000180851") - data = None - - -class Program_weight_tensor_data_20: - name = "data_20" - shape = [1024, 3072] - dtype = "float32" - min_val = float("-0.0324395") - max_val = float("0.0323104") - mean = float("-1.57215e-05") - std = float("0.0182981") - data = None - - -class Program_weight_tensor_data_21: - name = "data_21" - shape = [3072] - dtype = "float32" - min_val = float("-0.000630237") - max_val = float("0.000514313") - mean = float("2.76087e-06") - std = float("0.000126903") - data = None - - -class Program_weight_tensor_data_22: - name = "data_22" - shape = [1024, 3072] - dtype = "float32" - min_val = float("-0.0321875") - max_val = float("0.0321786") - mean = float("-1.59553e-05") - std = float("0.0182975") - data = None - - -class Program_weight_tensor_data_23: - name = "data_23" - shape = [3072] - dtype = "float32" - min_val = float("-0.000429784") - max_val = float("0.00042718") - mean = float("1.59216e-06") - std = float("8.8453e-05") - data = None - - -class Program_weight_tensor_data_24: - name = "data_24" - shape = [1024, 3072] - dtype = "float32" - min_val = float("-0.0321313") - max_val = float("0.0321203") - mean = float("-1.62062e-05") - std = float("0.018297") - data = None - - -class Program_weight_tensor_data_25: - name = "data_25" - shape = [3072] - dtype = "float32" - min_val = float("-0.000397408") - max_val = float("0.000488986") - mean = float("1.04643e-06") - std = float("8.30004e-05") - data = None - - -class Program_weight_tensor_data_26: - name = "data_26" - shape = [1, 3, 1088, 1088] - dtype = "float32" - max_val = float("1.0") - mean = float("0.443477") - std = float("0.162527") - data = None diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_11/model.py b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_11/model.py deleted file mode 100644 index 55b437f95..000000000 --- a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_11/model.py +++ /dev/null @@ -1,8874 +0,0 @@ -import paddle - - -class GraphModule(paddle.nn.Layer): - def __init__(self): - super().__init__() - - def forward( - self, - parameter_0, - parameter_1, - parameter_2, - parameter_3, - parameter_4, - parameter_5, - parameter_6, - parameter_7, - parameter_8, - parameter_9, - parameter_10, - parameter_11, - parameter_12, - parameter_13, - parameter_14, - parameter_15, - parameter_16, - parameter_17, - parameter_18, - parameter_19, - parameter_20, - parameter_21, - parameter_22, - parameter_23, - parameter_24, - parameter_25, - parameter_26, - parameter_27, - parameter_28, - parameter_29, - parameter_30, - parameter_31, - parameter_32, - parameter_33, - parameter_34, - parameter_35, - parameter_36, - parameter_37, - parameter_38, - parameter_39, - parameter_40, - parameter_41, - parameter_42, - parameter_43, - parameter_44, - parameter_45, - parameter_46, - parameter_47, - parameter_48, - parameter_49, - parameter_50, - parameter_51, - parameter_52, - parameter_53, - parameter_54, - parameter_55, - parameter_56, - parameter_57, - parameter_58, - parameter_59, - parameter_60, - parameter_61, - parameter_62, - parameter_63, - parameter_64, - parameter_65, - parameter_66, - parameter_67, - parameter_68, - parameter_69, - parameter_70, - parameter_71, - parameter_72, - parameter_73, - parameter_74, - parameter_75, - parameter_76, - parameter_77, - parameter_78, - parameter_79, - parameter_80, - parameter_81, - parameter_82, - parameter_83, - parameter_84, - parameter_85, - parameter_86, - parameter_87, - parameter_88, - parameter_89, - parameter_90, - parameter_91, - parameter_92, - parameter_93, - parameter_94, - parameter_95, - parameter_96, - parameter_97, - parameter_98, - parameter_99, - parameter_100, - parameter_101, - parameter_102, - parameter_103, - parameter_104, - parameter_105, - parameter_106, - parameter_107, - parameter_108, - parameter_109, - parameter_110, - parameter_111, - parameter_112, - parameter_113, - parameter_114, - parameter_115, - parameter_116, - parameter_117, - parameter_118, - parameter_119, - parameter_120, - parameter_121, - parameter_122, - parameter_123, - parameter_124, - parameter_125, - parameter_126, - parameter_127, - parameter_128, - parameter_129, - parameter_130, - parameter_131, - parameter_132, - parameter_133, - parameter_134, - parameter_135, - parameter_136, - parameter_137, - parameter_138, - parameter_139, - parameter_140, - parameter_141, - parameter_142, - parameter_143, - parameter_144, - parameter_145, - parameter_146, - parameter_147, - parameter_148, - parameter_149, - parameter_150, - parameter_151, - parameter_152, - parameter_153, - parameter_154, - parameter_155, - parameter_156, - parameter_157, - parameter_158, - parameter_159, - parameter_160, - parameter_161, - parameter_162, - parameter_163, - parameter_164, - parameter_165, - parameter_166, - parameter_167, - parameter_168, - parameter_169, - parameter_170, - parameter_171, - parameter_172, - parameter_173, - parameter_174, - parameter_175, - parameter_176, - parameter_177, - parameter_178, - parameter_179, - parameter_180, - parameter_181, - parameter_182, - parameter_183, - parameter_184, - parameter_185, - parameter_186, - parameter_187, - parameter_188, - parameter_189, - parameter_190, - parameter_191, - parameter_192, - parameter_193, - parameter_194, - parameter_195, - parameter_196, - parameter_197, - parameter_198, - parameter_199, - parameter_200, - parameter_201, - parameter_202, - parameter_203, - parameter_204, - parameter_205, - parameter_206, - parameter_207, - parameter_208, - parameter_209, - parameter_210, - parameter_211, - parameter_212, - parameter_213, - parameter_214, - parameter_215, - parameter_216, - parameter_217, - parameter_218, - parameter_219, - parameter_220, - parameter_221, - parameter_222, - parameter_223, - parameter_224, - parameter_225, - parameter_226, - parameter_227, - parameter_228, - parameter_229, - parameter_230, - parameter_231, - parameter_232, - parameter_233, - parameter_234, - parameter_235, - parameter_236, - parameter_237, - parameter_238, - parameter_239, - parameter_240, - parameter_241, - parameter_242, - parameter_243, - parameter_244, - parameter_245, - parameter_246, - parameter_247, - parameter_248, - parameter_249, - parameter_250, - parameter_251, - parameter_252, - parameter_253, - parameter_254, - parameter_255, - parameter_256, - parameter_257, - parameter_258, - parameter_259, - parameter_260, - parameter_261, - parameter_262, - parameter_263, - parameter_264, - parameter_265, - parameter_266, - parameter_267, - parameter_268, - parameter_269, - parameter_270, - parameter_271, - parameter_272, - parameter_273, - parameter_274, - parameter_275, - parameter_276, - parameter_277, - parameter_278, - parameter_279, - parameter_280, - parameter_281, - parameter_282, - parameter_283, - parameter_284, - parameter_285, - parameter_286, - parameter_287, - parameter_288, - parameter_289, - parameter_290, - parameter_291, - parameter_292, - parameter_293, - parameter_294, - parameter_295, - parameter_296, - parameter_297, - parameter_298, - parameter_299, - parameter_300, - parameter_301, - parameter_302, - parameter_303, - parameter_304, - parameter_305, - parameter_306, - parameter_307, - parameter_308, - parameter_309, - parameter_310, - parameter_311, - parameter_312, - parameter_313, - parameter_314, - parameter_315, - parameter_316, - parameter_317, - parameter_318, - parameter_319, - parameter_320, - parameter_321, - parameter_322, - parameter_323, - parameter_324, - parameter_325, - parameter_326, - parameter_327, - parameter_328, - parameter_329, - parameter_330, - parameter_331, - parameter_332, - parameter_333, - parameter_334, - parameter_335, - parameter_336, - parameter_337, - parameter_338, - parameter_339, - parameter_340, - parameter_341, - parameter_342, - parameter_343, - parameter_344, - parameter_345, - parameter_346, - parameter_347, - parameter_348, - parameter_349, - parameter_350, - parameter_351, - parameter_352, - parameter_353, - parameter_354, - parameter_355, - parameter_356, - parameter_357, - parameter_358, - parameter_359, - parameter_360, - parameter_361, - parameter_362, - parameter_363, - parameter_364, - parameter_365, - parameter_366, - parameter_367, - parameter_368, - parameter_369, - parameter_370, - parameter_371, - parameter_372, - parameter_373, - parameter_374, - parameter_375, - parameter_376, - parameter_377, - parameter_378, - parameter_379, - parameter_380, - parameter_381, - parameter_382, - parameter_383, - parameter_384, - parameter_385, - parameter_386, - parameter_387, - parameter_388, - parameter_389, - parameter_390, - parameter_391, - parameter_392, - parameter_393, - parameter_394, - parameter_395, - parameter_396, - parameter_397, - parameter_398, - parameter_399, - parameter_400, - parameter_401, - parameter_402, - parameter_403, - parameter_404, - parameter_405, - parameter_406, - parameter_407, - parameter_408, - parameter_409, - parameter_410, - parameter_411, - parameter_412, - parameter_413, - parameter_414, - parameter_415, - parameter_416, - parameter_417, - parameter_418, - parameter_419, - parameter_420, - parameter_421, - parameter_422, - parameter_423, - parameter_424, - parameter_425, - parameter_426, - parameter_427, - parameter_428, - parameter_429, - parameter_430, - parameter_431, - parameter_432, - parameter_433, - parameter_434, - parameter_435, - parameter_436, - parameter_437, - parameter_438, - parameter_439, - parameter_440, - parameter_441, - parameter_442, - parameter_443, - parameter_444, - parameter_445, - parameter_446, - parameter_447, - parameter_448, - parameter_449, - parameter_450, - parameter_451, - parameter_452, - parameter_453, - parameter_454, - parameter_455, - parameter_456, - parameter_457, - parameter_458, - parameter_459, - parameter_460, - parameter_461, - parameter_462, - parameter_463, - parameter_464, - parameter_465, - parameter_466, - parameter_467, - parameter_468, - parameter_469, - parameter_470, - parameter_471, - parameter_472, - parameter_473, - parameter_474, - parameter_475, - parameter_476, - parameter_477, - parameter_478, - parameter_479, - parameter_480, - parameter_481, - parameter_482, - parameter_483, - parameter_484, - parameter_485, - parameter_486, - parameter_487, - parameter_488, - parameter_489, - parameter_490, - parameter_491, - parameter_492, - parameter_493, - parameter_494, - parameter_495, - parameter_496, - parameter_497, - parameter_498, - parameter_499, - parameter_500, - parameter_501, - parameter_502, - parameter_503, - parameter_504, - parameter_505, - parameter_506, - parameter_507, - parameter_508, - parameter_509, - parameter_510, - parameter_511, - parameter_512, - parameter_513, - parameter_514, - parameter_515, - parameter_516, - parameter_517, - parameter_518, - parameter_519, - parameter_520, - parameter_521, - parameter_522, - parameter_523, - parameter_524, - parameter_525, - parameter_526, - parameter_527, - parameter_528, - parameter_529, - parameter_530, - parameter_531, - parameter_532, - parameter_533, - parameter_534, - parameter_535, - parameter_536, - parameter_537, - parameter_538, - parameter_539, - parameter_540, - parameter_541, - parameter_542, - parameter_543, - parameter_544, - parameter_545, - parameter_546, - parameter_547, - parameter_548, - parameter_549, - parameter_550, - parameter_551, - parameter_552, - parameter_553, - parameter_554, - parameter_555, - parameter_556, - parameter_557, - parameter_558, - parameter_559, - parameter_560, - parameter_561, - parameter_562, - parameter_563, - parameter_564, - parameter_565, - parameter_566, - parameter_567, - parameter_568, - parameter_569, - parameter_570, - parameter_571, - parameter_572, - parameter_573, - parameter_574, - parameter_575, - parameter_576, - parameter_577, - parameter_578, - parameter_579, - parameter_580, - parameter_581, - parameter_582, - parameter_583, - parameter_584, - parameter_585, - parameter_586, - parameter_587, - parameter_588, - parameter_589, - parameter_590, - parameter_591, - parameter_592, - parameter_593, - parameter_594, - parameter_595, - parameter_596, - parameter_597, - parameter_598, - parameter_599, - parameter_600, - parameter_601, - parameter_602, - parameter_603, - parameter_604, - parameter_605, - parameter_606, - parameter_607, - parameter_608, - parameter_609, - parameter_610, - parameter_611, - parameter_612, - parameter_613, - parameter_614, - parameter_615, - parameter_616, - parameter_617, - parameter_618, - parameter_619, - parameter_620, - parameter_621, - parameter_622, - parameter_623, - parameter_624, - parameter_625, - parameter_626, - parameter_627, - parameter_628, - parameter_629, - parameter_630, - parameter_631, - parameter_632, - parameter_633, - parameter_634, - parameter_635, - parameter_636, - parameter_637, - parameter_638, - parameter_639, - parameter_640, - parameter_641, - parameter_642, - parameter_643, - parameter_644, - parameter_645, - parameter_646, - parameter_647, - parameter_648, - parameter_649, - parameter_650, - parameter_651, - parameter_652, - parameter_653, - parameter_654, - parameter_655, - parameter_656, - parameter_657, - parameter_658, - parameter_659, - parameter_660, - parameter_661, - parameter_662, - parameter_663, - parameter_664, - parameter_665, - parameter_666, - parameter_667, - parameter_668, - parameter_669, - parameter_670, - parameter_671, - parameter_672, - parameter_673, - parameter_674, - parameter_675, - parameter_676, - parameter_677, - parameter_678, - parameter_679, - parameter_680, - parameter_681, - parameter_682, - parameter_683, - parameter_684, - parameter_685, - parameter_686, - parameter_687, - parameter_688, - parameter_689, - parameter_690, - parameter_691, - parameter_692, - parameter_693, - parameter_694, - parameter_695, - parameter_696, - parameter_697, - parameter_698, - parameter_699, - parameter_700, - parameter_701, - parameter_702, - parameter_703, - parameter_704, - parameter_705, - parameter_706, - parameter_707, - parameter_708, - parameter_709, - parameter_710, - parameter_711, - parameter_712, - parameter_713, - parameter_714, - parameter_715, - parameter_716, - parameter_717, - parameter_718, - parameter_719, - parameter_720, - parameter_721, - parameter_722, - parameter_723, - parameter_724, - parameter_725, - parameter_726, - parameter_727, - parameter_728, - parameter_729, - parameter_730, - parameter_731, - parameter_732, - parameter_733, - parameter_734, - parameter_735, - parameter_736, - parameter_737, - data_0, - data_1, - data_2, - data_3, - data_4, - data_5, - data_6, - data_7, - data_8, - data_9, - data_10, - data_11, - data_12, - data_13, - data_14, - data_15, - data_16, - data_17, - data_18, - data_19, - data_20, - data_21, - data_22, - data_23, - data_24, - data_25, - data_26, - ): - # pd_op.conv2d: (1x32x544x544xf32) <- (1x3x1088x1088xf32, 32x3x3x3xf32) - conv2d_0 = paddle._C_ops.conv2d( - data_26, parameter_737, [2, 2], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del data_26, parameter_737 - - # pd_op.batch_norm_: (1x32x544x544xf32, 32xf32, 32xf32, 32xf32, 32xf32, -1xui8) <- (1x32x544x544xf32, 32xf32, 32xf32, 32xf32, 32xf32) - ( - batch_norm__0, - batch_norm__1, - batch_norm__2, - batch_norm__3, - batch_norm__4, - batch_norm__5, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_0, - parameter_736, - parameter_735, - parameter_734, - parameter_733, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_733, parameter_734, parameter_735, parameter_736 - - # pd_op.swish: (1x32x544x544xf32) <- (1x32x544x544xf32) - swish_1 = paddle._C_ops.swish(batch_norm__0) - - # pd_op.conv2d: (1x32x544x544xf32) <- (1x32x544x544xf32, 32x32x3x3xf32) - conv2d_1 = paddle._C_ops.conv2d( - swish_1, parameter_732, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_732 - - # pd_op.batch_norm_: (1x32x544x544xf32, 32xf32, 32xf32, 32xf32, 32xf32, -1xui8) <- (1x32x544x544xf32, 32xf32, 32xf32, 32xf32, 32xf32) - ( - batch_norm__6, - batch_norm__7, - batch_norm__8, - batch_norm__9, - batch_norm__10, - batch_norm__11, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_1, - parameter_731, - parameter_730, - parameter_729, - parameter_728, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_728, parameter_729, parameter_730, parameter_731 - - # pd_op.swish: (1x32x544x544xf32) <- (1x32x544x544xf32) - swish_2 = paddle._C_ops.swish(batch_norm__6) - - # pd_op.conv2d: (1x64x544x544xf32) <- (1x32x544x544xf32, 64x32x3x3xf32) - conv2d_2 = paddle._C_ops.conv2d( - swish_2, parameter_727, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_727 - - # pd_op.batch_norm_: (1x64x544x544xf32, 64xf32, 64xf32, 64xf32, 64xf32, -1xui8) <- (1x64x544x544xf32, 64xf32, 64xf32, 64xf32, 64xf32) - ( - batch_norm__12, - batch_norm__13, - batch_norm__14, - batch_norm__15, - batch_norm__16, - batch_norm__17, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_2, - parameter_726, - parameter_725, - parameter_724, - parameter_723, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_723, parameter_724, parameter_725, parameter_726 - - # pd_op.swish: (1x64x544x544xf32) <- (1x64x544x544xf32) - swish_3 = paddle._C_ops.swish(batch_norm__12) - - # pd_op.conv2d: (1x96x272x272xf32) <- (1x64x544x544xf32, 96x64x3x3xf32) - conv2d_3 = paddle._C_ops.conv2d( - swish_3, parameter_722, [2, 2], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_722 - - # pd_op.batch_norm_: (1x96x272x272xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (1x96x272x272xf32, 96xf32, 96xf32, 96xf32, 96xf32) - ( - batch_norm__18, - batch_norm__19, - batch_norm__20, - batch_norm__21, - batch_norm__22, - batch_norm__23, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_3, - parameter_721, - parameter_720, - parameter_719, - parameter_718, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_718, parameter_719, parameter_720, parameter_721 - - # pd_op.swish: (1x96x272x272xf32) <- (1x96x272x272xf32) - swish_4 = paddle._C_ops.swish(batch_norm__18) - - # pd_op.conv2d: (1x48x272x272xf32) <- (1x96x272x272xf32, 48x96x1x1xf32) - conv2d_4 = paddle._C_ops.conv2d( - swish_4, parameter_717, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_717 - - # pd_op.batch_norm_: (1x48x272x272xf32, 48xf32, 48xf32, 48xf32, 48xf32, -1xui8) <- (1x48x272x272xf32, 48xf32, 48xf32, 48xf32, 48xf32) - ( - batch_norm__24, - batch_norm__25, - batch_norm__26, - batch_norm__27, - batch_norm__28, - batch_norm__29, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_4, - parameter_716, - parameter_715, - parameter_714, - parameter_713, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_713, parameter_714, parameter_715, parameter_716 - - # pd_op.swish: (1x48x272x272xf32) <- (1x48x272x272xf32) - swish_5 = paddle._C_ops.swish(batch_norm__24) - - # pd_op.conv2d: (1x48x272x272xf32) <- (1x96x272x272xf32, 48x96x1x1xf32) - conv2d_5 = paddle._C_ops.conv2d( - swish_4, parameter_712, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_712 - - # pd_op.batch_norm_: (1x48x272x272xf32, 48xf32, 48xf32, 48xf32, 48xf32, -1xui8) <- (1x48x272x272xf32, 48xf32, 48xf32, 48xf32, 48xf32) - ( - batch_norm__30, - batch_norm__31, - batch_norm__32, - batch_norm__33, - batch_norm__34, - batch_norm__35, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_5, - parameter_711, - parameter_710, - parameter_709, - parameter_708, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_708, parameter_709, parameter_710, parameter_711 - - # pd_op.swish: (1x48x272x272xf32) <- (1x48x272x272xf32) - swish_6 = paddle._C_ops.swish(batch_norm__30) - - # pd_op.conv2d: (1x48x272x272xf32) <- (1x48x272x272xf32, 48x48x3x3xf32) - conv2d_6 = paddle._C_ops.conv2d( - swish_6, parameter_707, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_707 - - # pd_op.batch_norm_: (1x48x272x272xf32, 48xf32, 48xf32, 48xf32, 48xf32, -1xui8) <- (1x48x272x272xf32, 48xf32, 48xf32, 48xf32, 48xf32) - ( - batch_norm__36, - batch_norm__37, - batch_norm__38, - batch_norm__39, - batch_norm__40, - batch_norm__41, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_6, - parameter_706, - parameter_705, - parameter_704, - parameter_703, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_703, parameter_704, parameter_705, parameter_706 - - # pd_op.swish: (1x48x272x272xf32) <- (1x48x272x272xf32) - swish_7 = paddle._C_ops.swish(batch_norm__36) - - # pd_op.conv2d: (1x48x272x272xf32) <- (1x48x272x272xf32, 48x48x3x3xf32) - conv2d_7 = paddle._C_ops.conv2d( - swish_7, parameter_702, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_702 - - # pd_op.batch_norm_: (1x48x272x272xf32, 48xf32, 48xf32, 48xf32, 48xf32, -1xui8) <- (1x48x272x272xf32, 48xf32, 48xf32, 48xf32, 48xf32) - ( - batch_norm__42, - batch_norm__43, - batch_norm__44, - batch_norm__45, - batch_norm__46, - batch_norm__47, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_7, - parameter_701, - parameter_700, - parameter_699, - parameter_698, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_698, parameter_699, parameter_700, parameter_701 - - # pd_op.conv2d: (1x48x272x272xf32) <- (1x48x272x272xf32, 48x48x1x1xf32) - conv2d_8 = paddle._C_ops.conv2d( - swish_7, parameter_697, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_697 - - # pd_op.batch_norm_: (1x48x272x272xf32, 48xf32, 48xf32, 48xf32, 48xf32, -1xui8) <- (1x48x272x272xf32, 48xf32, 48xf32, 48xf32, 48xf32) - ( - batch_norm__48, - batch_norm__49, - batch_norm__50, - batch_norm__51, - batch_norm__52, - batch_norm__53, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_8, - parameter_696, - parameter_695, - parameter_694, - parameter_693, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_693, parameter_694, parameter_695, parameter_696 - - # pd_op.multiply: (1x48x272x272xf32) <- (1xf32, 1x48x272x272xf32) - multiply_0 = paddle._C_ops.multiply(data_0, batch_norm__48) - del data_0 - - # pd_op.add: (1x48x272x272xf32) <- (1x48x272x272xf32, 1x48x272x272xf32) - add_0 = paddle._C_ops.add(batch_norm__42, multiply_0) - - # pd_op.swish: (1x48x272x272xf32) <- (1x48x272x272xf32) - swish_8 = paddle._C_ops.swish(add_0) - - # pd_op.add: (1x48x272x272xf32) <- (1x48x272x272xf32, 1x48x272x272xf32) - add_1 = paddle._C_ops.add(swish_6, swish_8) - - # pd_op.conv2d: (1x48x272x272xf32) <- (1x48x272x272xf32, 48x48x3x3xf32) - conv2d_9 = paddle._C_ops.conv2d( - add_1, parameter_692, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_692 - - # pd_op.batch_norm_: (1x48x272x272xf32, 48xf32, 48xf32, 48xf32, 48xf32, -1xui8) <- (1x48x272x272xf32, 48xf32, 48xf32, 48xf32, 48xf32) - ( - batch_norm__54, - batch_norm__55, - batch_norm__56, - batch_norm__57, - batch_norm__58, - batch_norm__59, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_9, - parameter_691, - parameter_690, - parameter_689, - parameter_688, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_688, parameter_689, parameter_690, parameter_691 - - # pd_op.swish: (1x48x272x272xf32) <- (1x48x272x272xf32) - swish_9 = paddle._C_ops.swish(batch_norm__54) - - # pd_op.conv2d: (1x48x272x272xf32) <- (1x48x272x272xf32, 48x48x3x3xf32) - conv2d_10 = paddle._C_ops.conv2d( - swish_9, parameter_687, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_687 - - # pd_op.batch_norm_: (1x48x272x272xf32, 48xf32, 48xf32, 48xf32, 48xf32, -1xui8) <- (1x48x272x272xf32, 48xf32, 48xf32, 48xf32, 48xf32) - ( - batch_norm__60, - batch_norm__61, - batch_norm__62, - batch_norm__63, - batch_norm__64, - batch_norm__65, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_10, - parameter_686, - parameter_685, - parameter_684, - parameter_683, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_683, parameter_684, parameter_685, parameter_686 - - # pd_op.conv2d: (1x48x272x272xf32) <- (1x48x272x272xf32, 48x48x1x1xf32) - conv2d_11 = paddle._C_ops.conv2d( - swish_9, parameter_682, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_682 - - # pd_op.batch_norm_: (1x48x272x272xf32, 48xf32, 48xf32, 48xf32, 48xf32, -1xui8) <- (1x48x272x272xf32, 48xf32, 48xf32, 48xf32, 48xf32) - ( - batch_norm__66, - batch_norm__67, - batch_norm__68, - batch_norm__69, - batch_norm__70, - batch_norm__71, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_11, - parameter_681, - parameter_680, - parameter_679, - parameter_678, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_678, parameter_679, parameter_680, parameter_681 - - # pd_op.multiply: (1x48x272x272xf32) <- (1xf32, 1x48x272x272xf32) - multiply_1 = paddle._C_ops.multiply(data_1, batch_norm__66) - del data_1 - - # pd_op.add: (1x48x272x272xf32) <- (1x48x272x272xf32, 1x48x272x272xf32) - add_2 = paddle._C_ops.add(batch_norm__60, multiply_1) - - # pd_op.swish: (1x48x272x272xf32) <- (1x48x272x272xf32) - swish_10 = paddle._C_ops.swish(add_2) - - # pd_op.add: (1x48x272x272xf32) <- (1x48x272x272xf32, 1x48x272x272xf32) - add_3 = paddle._C_ops.add(add_1, swish_10) - - # pd_op.conv2d: (1x48x272x272xf32) <- (1x48x272x272xf32, 48x48x3x3xf32) - conv2d_12 = paddle._C_ops.conv2d( - add_3, parameter_677, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_677 - - # pd_op.batch_norm_: (1x48x272x272xf32, 48xf32, 48xf32, 48xf32, 48xf32, -1xui8) <- (1x48x272x272xf32, 48xf32, 48xf32, 48xf32, 48xf32) - ( - batch_norm__72, - batch_norm__73, - batch_norm__74, - batch_norm__75, - batch_norm__76, - batch_norm__77, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_12, - parameter_676, - parameter_675, - parameter_674, - parameter_673, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_673, parameter_674, parameter_675, parameter_676 - - # pd_op.swish: (1x48x272x272xf32) <- (1x48x272x272xf32) - swish_11 = paddle._C_ops.swish(batch_norm__72) - - # pd_op.conv2d: (1x48x272x272xf32) <- (1x48x272x272xf32, 48x48x3x3xf32) - conv2d_13 = paddle._C_ops.conv2d( - swish_11, parameter_672, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_672 - - # pd_op.batch_norm_: (1x48x272x272xf32, 48xf32, 48xf32, 48xf32, 48xf32, -1xui8) <- (1x48x272x272xf32, 48xf32, 48xf32, 48xf32, 48xf32) - ( - batch_norm__78, - batch_norm__79, - batch_norm__80, - batch_norm__81, - batch_norm__82, - batch_norm__83, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_13, - parameter_671, - parameter_670, - parameter_669, - parameter_668, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_668, parameter_669, parameter_670, parameter_671 - - # pd_op.conv2d: (1x48x272x272xf32) <- (1x48x272x272xf32, 48x48x1x1xf32) - conv2d_14 = paddle._C_ops.conv2d( - swish_11, parameter_667, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_667 - - # pd_op.batch_norm_: (1x48x272x272xf32, 48xf32, 48xf32, 48xf32, 48xf32, -1xui8) <- (1x48x272x272xf32, 48xf32, 48xf32, 48xf32, 48xf32) - ( - batch_norm__84, - batch_norm__85, - batch_norm__86, - batch_norm__87, - batch_norm__88, - batch_norm__89, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_14, - parameter_666, - parameter_665, - parameter_664, - parameter_663, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_663, parameter_664, parameter_665, parameter_666 - - # pd_op.multiply: (1x48x272x272xf32) <- (1xf32, 1x48x272x272xf32) - multiply_2 = paddle._C_ops.multiply(data_2, batch_norm__84) - del data_2 - - # pd_op.add: (1x48x272x272xf32) <- (1x48x272x272xf32, 1x48x272x272xf32) - add_4 = paddle._C_ops.add(batch_norm__78, multiply_2) - - # pd_op.swish: (1x48x272x272xf32) <- (1x48x272x272xf32) - swish_12 = paddle._C_ops.swish(add_4) - - # pd_op.add: (1x48x272x272xf32) <- (1x48x272x272xf32, 1x48x272x272xf32) - add_5 = paddle._C_ops.add(add_3, swish_12) - - # pd_op.full: (1xi32) <- () - full_0 = paddle._C_ops.full( - [1], float("1"), paddle.int32, paddle.core.CPUPlace() - ) - - # pd_op.assign: (1xi32) <- (1xi32) - assign_0 = full_0 - - # pd_op.assign: (1xi32) <- (1xi32) - assign_1 = full_0 - - # pd_op.assign: (1xi32) <- (1xi32) - assign_2 = full_0 - - # pd_op.assign: (1xi32) <- (1xi32) - assign_3 = full_0 - - # pd_op.assign: (1xi32) <- (1xi32) - assign_4 = full_0 - - # pd_op.assign: (1xi32) <- (1xi32) - assign_5 = full_0 - - # pd_op.assign: (1xi32) <- (1xi32) - assign_6 = full_0 - - # pd_op.assign: (1xi32) <- (1xi32) - assign_7 = full_0 - - # pd_op.assign: (1xi32) <- (1xi32) - assign_8 = full_0 - - # pd_op.assign: (1xi32) <- (1xi32) - assign_9 = full_0 - - # pd_op.assign: (1xi32) <- (1xi32) - assign_10 = full_0 - - # pd_op.assign: (1xi32) <- (1xi32) - assign_11 = full_0 - - # pd_op.assign: (1xi32) <- (1xi32) - assign_12 = full_0 - - # builtin.combine: ([1x48x272x272xf32, 1x48x272x272xf32]) <- (1x48x272x272xf32, 1x48x272x272xf32) - combine_0 = [swish_5, add_5] - - # pd_op.concat: (1x96x272x272xf32) <- ([1x48x272x272xf32, 1x48x272x272xf32], 1xi32) - concat_0 = paddle._C_ops.concat(combine_0, full_0) - del combine_0 - - # pd_op.full_int_array: (2xi64) <- () - full_int_array_0 = [2, 3] - - # pd_op.assign: (2xi64) <- (2xi64) - assign_13 = full_int_array_0 - - # pd_op.assign: (2xi64) <- (2xi64) - assign_14 = full_int_array_0 - - # pd_op.assign: (2xi64) <- (2xi64) - assign_15 = full_int_array_0 - - # pd_op.mean: (1x96x1x1xf32) <- (1x96x272x272xf32, 2xi64) - mean_0 = paddle._C_ops.mean(concat_0, full_int_array_0, True) - - # pd_op.conv2d: (1x96x1x1xf32) <- (1x96x1x1xf32, 96x96x1x1xf32) - conv2d_15 = paddle._C_ops.conv2d( - mean_0, parameter_662, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_662 - - # pd_op.full_int_array: (4xi64) <- () - full_int_array_1 = [1, -1, 1, 1] - - # pd_op.reshape: (1x96x1x1xf32) <- (96xf32, 4xi64) - reshape_0 = paddle._C_ops.reshape(parameter_661, full_int_array_1) - del parameter_661 - - # pd_op.add: (1x96x1x1xf32) <- (1x96x1x1xf32, 1x96x1x1xf32) - add_6 = paddle._C_ops.add(conv2d_15, reshape_0) - - # pd_op.hardsigmoid: (1x96x1x1xf32) <- (1x96x1x1xf32) - hardsigmoid_0 = paddle._C_ops.hardsigmoid( - add_6, float("0.166667"), float("0.5") - ) - del add_6 - - # pd_op.multiply: (1x96x272x272xf32) <- (1x96x272x272xf32, 1x96x1x1xf32) - multiply_3 = paddle._C_ops.multiply(concat_0, hardsigmoid_0) - - # pd_op.conv2d: (1x128x272x272xf32) <- (1x96x272x272xf32, 128x96x1x1xf32) - conv2d_16 = paddle._C_ops.conv2d( - multiply_3, parameter_660, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_660 - - # pd_op.batch_norm_: (1x128x272x272xf32, 128xf32, 128xf32, 128xf32, 128xf32, -1xui8) <- (1x128x272x272xf32, 128xf32, 128xf32, 128xf32, 128xf32) - ( - batch_norm__90, - batch_norm__91, - batch_norm__92, - batch_norm__93, - batch_norm__94, - batch_norm__95, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_16, - parameter_659, - parameter_658, - parameter_657, - parameter_656, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_656, parameter_657, parameter_658, parameter_659 - - # pd_op.swish: (1x128x272x272xf32) <- (1x128x272x272xf32) - swish_13 = paddle._C_ops.swish(batch_norm__90) - - # pd_op.conv2d: (1x192x136x136xf32) <- (1x128x272x272xf32, 192x128x3x3xf32) - conv2d_17 = paddle._C_ops.conv2d( - swish_13, parameter_655, [2, 2], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_655 - - # pd_op.batch_norm_: (1x192x136x136xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (1x192x136x136xf32, 192xf32, 192xf32, 192xf32, 192xf32) - ( - batch_norm__96, - batch_norm__97, - batch_norm__98, - batch_norm__99, - batch_norm__100, - batch_norm__101, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_17, - parameter_654, - parameter_653, - parameter_652, - parameter_651, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_651, parameter_652, parameter_653, parameter_654 - - # pd_op.swish: (1x192x136x136xf32) <- (1x192x136x136xf32) - swish_14 = paddle._C_ops.swish(batch_norm__96) - - # pd_op.conv2d: (1x96x136x136xf32) <- (1x192x136x136xf32, 96x192x1x1xf32) - conv2d_18 = paddle._C_ops.conv2d( - swish_14, parameter_650, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_650 - - # pd_op.batch_norm_: (1x96x136x136xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (1x96x136x136xf32, 96xf32, 96xf32, 96xf32, 96xf32) - ( - batch_norm__102, - batch_norm__103, - batch_norm__104, - batch_norm__105, - batch_norm__106, - batch_norm__107, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_18, - parameter_649, - parameter_648, - parameter_647, - parameter_646, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_646, parameter_647, parameter_648, parameter_649 - - # pd_op.swish: (1x96x136x136xf32) <- (1x96x136x136xf32) - swish_15 = paddle._C_ops.swish(batch_norm__102) - - # pd_op.conv2d: (1x96x136x136xf32) <- (1x192x136x136xf32, 96x192x1x1xf32) - conv2d_19 = paddle._C_ops.conv2d( - swish_14, parameter_645, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_645 - - # pd_op.batch_norm_: (1x96x136x136xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (1x96x136x136xf32, 96xf32, 96xf32, 96xf32, 96xf32) - ( - batch_norm__108, - batch_norm__109, - batch_norm__110, - batch_norm__111, - batch_norm__112, - batch_norm__113, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_19, - parameter_644, - parameter_643, - parameter_642, - parameter_641, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_641, parameter_642, parameter_643, parameter_644 - - # pd_op.swish: (1x96x136x136xf32) <- (1x96x136x136xf32) - swish_16 = paddle._C_ops.swish(batch_norm__108) - - # pd_op.conv2d: (1x96x136x136xf32) <- (1x96x136x136xf32, 96x96x3x3xf32) - conv2d_20 = paddle._C_ops.conv2d( - swish_16, parameter_640, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_640 - - # pd_op.batch_norm_: (1x96x136x136xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (1x96x136x136xf32, 96xf32, 96xf32, 96xf32, 96xf32) - ( - batch_norm__114, - batch_norm__115, - batch_norm__116, - batch_norm__117, - batch_norm__118, - batch_norm__119, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_20, - parameter_639, - parameter_638, - parameter_637, - parameter_636, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_636, parameter_637, parameter_638, parameter_639 - - # pd_op.swish: (1x96x136x136xf32) <- (1x96x136x136xf32) - swish_17 = paddle._C_ops.swish(batch_norm__114) - - # pd_op.conv2d: (1x96x136x136xf32) <- (1x96x136x136xf32, 96x96x3x3xf32) - conv2d_21 = paddle._C_ops.conv2d( - swish_17, parameter_635, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_635 - - # pd_op.batch_norm_: (1x96x136x136xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (1x96x136x136xf32, 96xf32, 96xf32, 96xf32, 96xf32) - ( - batch_norm__120, - batch_norm__121, - batch_norm__122, - batch_norm__123, - batch_norm__124, - batch_norm__125, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_21, - parameter_634, - parameter_633, - parameter_632, - parameter_631, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_631, parameter_632, parameter_633, parameter_634 - - # pd_op.conv2d: (1x96x136x136xf32) <- (1x96x136x136xf32, 96x96x1x1xf32) - conv2d_22 = paddle._C_ops.conv2d( - swish_17, parameter_630, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_630 - - # pd_op.batch_norm_: (1x96x136x136xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (1x96x136x136xf32, 96xf32, 96xf32, 96xf32, 96xf32) - ( - batch_norm__126, - batch_norm__127, - batch_norm__128, - batch_norm__129, - batch_norm__130, - batch_norm__131, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_22, - parameter_629, - parameter_628, - parameter_627, - parameter_626, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_626, parameter_627, parameter_628, parameter_629 - - # pd_op.multiply: (1x96x136x136xf32) <- (1xf32, 1x96x136x136xf32) - multiply_4 = paddle._C_ops.multiply(data_3, batch_norm__126) - del data_3 - - # pd_op.add: (1x96x136x136xf32) <- (1x96x136x136xf32, 1x96x136x136xf32) - add_7 = paddle._C_ops.add(batch_norm__120, multiply_4) - - # pd_op.swish: (1x96x136x136xf32) <- (1x96x136x136xf32) - swish_18 = paddle._C_ops.swish(add_7) - - # pd_op.add: (1x96x136x136xf32) <- (1x96x136x136xf32, 1x96x136x136xf32) - add_8 = paddle._C_ops.add(swish_16, swish_18) - - # pd_op.conv2d: (1x96x136x136xf32) <- (1x96x136x136xf32, 96x96x3x3xf32) - conv2d_23 = paddle._C_ops.conv2d( - add_8, parameter_625, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_625 - - # pd_op.batch_norm_: (1x96x136x136xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (1x96x136x136xf32, 96xf32, 96xf32, 96xf32, 96xf32) - ( - batch_norm__132, - batch_norm__133, - batch_norm__134, - batch_norm__135, - batch_norm__136, - batch_norm__137, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_23, - parameter_624, - parameter_623, - parameter_622, - parameter_621, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_621, parameter_622, parameter_623, parameter_624 - - # pd_op.swish: (1x96x136x136xf32) <- (1x96x136x136xf32) - swish_19 = paddle._C_ops.swish(batch_norm__132) - - # pd_op.conv2d: (1x96x136x136xf32) <- (1x96x136x136xf32, 96x96x3x3xf32) - conv2d_24 = paddle._C_ops.conv2d( - swish_19, parameter_620, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_620 - - # pd_op.batch_norm_: (1x96x136x136xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (1x96x136x136xf32, 96xf32, 96xf32, 96xf32, 96xf32) - ( - batch_norm__138, - batch_norm__139, - batch_norm__140, - batch_norm__141, - batch_norm__142, - batch_norm__143, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_24, - parameter_619, - parameter_618, - parameter_617, - parameter_616, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_616, parameter_617, parameter_618, parameter_619 - - # pd_op.conv2d: (1x96x136x136xf32) <- (1x96x136x136xf32, 96x96x1x1xf32) - conv2d_25 = paddle._C_ops.conv2d( - swish_19, parameter_615, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_615 - - # pd_op.batch_norm_: (1x96x136x136xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (1x96x136x136xf32, 96xf32, 96xf32, 96xf32, 96xf32) - ( - batch_norm__144, - batch_norm__145, - batch_norm__146, - batch_norm__147, - batch_norm__148, - batch_norm__149, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_25, - parameter_614, - parameter_613, - parameter_612, - parameter_611, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_611, parameter_612, parameter_613, parameter_614 - - # pd_op.multiply: (1x96x136x136xf32) <- (1xf32, 1x96x136x136xf32) - multiply_5 = paddle._C_ops.multiply(data_4, batch_norm__144) - del data_4 - - # pd_op.add: (1x96x136x136xf32) <- (1x96x136x136xf32, 1x96x136x136xf32) - add_9 = paddle._C_ops.add(batch_norm__138, multiply_5) - - # pd_op.swish: (1x96x136x136xf32) <- (1x96x136x136xf32) - swish_20 = paddle._C_ops.swish(add_9) - - # pd_op.add: (1x96x136x136xf32) <- (1x96x136x136xf32, 1x96x136x136xf32) - add_10 = paddle._C_ops.add(add_8, swish_20) - - # pd_op.conv2d: (1x96x136x136xf32) <- (1x96x136x136xf32, 96x96x3x3xf32) - conv2d_26 = paddle._C_ops.conv2d( - add_10, parameter_610, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_610 - - # pd_op.batch_norm_: (1x96x136x136xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (1x96x136x136xf32, 96xf32, 96xf32, 96xf32, 96xf32) - ( - batch_norm__150, - batch_norm__151, - batch_norm__152, - batch_norm__153, - batch_norm__154, - batch_norm__155, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_26, - parameter_609, - parameter_608, - parameter_607, - parameter_606, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_606, parameter_607, parameter_608, parameter_609 - - # pd_op.swish: (1x96x136x136xf32) <- (1x96x136x136xf32) - swish_21 = paddle._C_ops.swish(batch_norm__150) - - # pd_op.conv2d: (1x96x136x136xf32) <- (1x96x136x136xf32, 96x96x3x3xf32) - conv2d_27 = paddle._C_ops.conv2d( - swish_21, parameter_605, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_605 - - # pd_op.batch_norm_: (1x96x136x136xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (1x96x136x136xf32, 96xf32, 96xf32, 96xf32, 96xf32) - ( - batch_norm__156, - batch_norm__157, - batch_norm__158, - batch_norm__159, - batch_norm__160, - batch_norm__161, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_27, - parameter_604, - parameter_603, - parameter_602, - parameter_601, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_601, parameter_602, parameter_603, parameter_604 - - # pd_op.conv2d: (1x96x136x136xf32) <- (1x96x136x136xf32, 96x96x1x1xf32) - conv2d_28 = paddle._C_ops.conv2d( - swish_21, parameter_600, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_600 - - # pd_op.batch_norm_: (1x96x136x136xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (1x96x136x136xf32, 96xf32, 96xf32, 96xf32, 96xf32) - ( - batch_norm__162, - batch_norm__163, - batch_norm__164, - batch_norm__165, - batch_norm__166, - batch_norm__167, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_28, - parameter_599, - parameter_598, - parameter_597, - parameter_596, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_596, parameter_597, parameter_598, parameter_599 - - # pd_op.multiply: (1x96x136x136xf32) <- (1xf32, 1x96x136x136xf32) - multiply_6 = paddle._C_ops.multiply(data_5, batch_norm__162) - del data_5 - - # pd_op.add: (1x96x136x136xf32) <- (1x96x136x136xf32, 1x96x136x136xf32) - add_11 = paddle._C_ops.add(batch_norm__156, multiply_6) - - # pd_op.swish: (1x96x136x136xf32) <- (1x96x136x136xf32) - swish_22 = paddle._C_ops.swish(add_11) - - # pd_op.add: (1x96x136x136xf32) <- (1x96x136x136xf32, 1x96x136x136xf32) - add_12 = paddle._C_ops.add(add_10, swish_22) - - # pd_op.conv2d: (1x96x136x136xf32) <- (1x96x136x136xf32, 96x96x3x3xf32) - conv2d_29 = paddle._C_ops.conv2d( - add_12, parameter_595, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_595 - - # pd_op.batch_norm_: (1x96x136x136xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (1x96x136x136xf32, 96xf32, 96xf32, 96xf32, 96xf32) - ( - batch_norm__168, - batch_norm__169, - batch_norm__170, - batch_norm__171, - batch_norm__172, - batch_norm__173, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_29, - parameter_594, - parameter_593, - parameter_592, - parameter_591, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_591, parameter_592, parameter_593, parameter_594 - - # pd_op.swish: (1x96x136x136xf32) <- (1x96x136x136xf32) - swish_23 = paddle._C_ops.swish(batch_norm__168) - - # pd_op.conv2d: (1x96x136x136xf32) <- (1x96x136x136xf32, 96x96x3x3xf32) - conv2d_30 = paddle._C_ops.conv2d( - swish_23, parameter_590, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_590 - - # pd_op.batch_norm_: (1x96x136x136xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (1x96x136x136xf32, 96xf32, 96xf32, 96xf32, 96xf32) - ( - batch_norm__174, - batch_norm__175, - batch_norm__176, - batch_norm__177, - batch_norm__178, - batch_norm__179, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_30, - parameter_589, - parameter_588, - parameter_587, - parameter_586, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_586, parameter_587, parameter_588, parameter_589 - - # pd_op.conv2d: (1x96x136x136xf32) <- (1x96x136x136xf32, 96x96x1x1xf32) - conv2d_31 = paddle._C_ops.conv2d( - swish_23, parameter_585, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_585 - - # pd_op.batch_norm_: (1x96x136x136xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (1x96x136x136xf32, 96xf32, 96xf32, 96xf32, 96xf32) - ( - batch_norm__180, - batch_norm__181, - batch_norm__182, - batch_norm__183, - batch_norm__184, - batch_norm__185, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_31, - parameter_584, - parameter_583, - parameter_582, - parameter_581, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_581, parameter_582, parameter_583, parameter_584 - - # pd_op.multiply: (1x96x136x136xf32) <- (1xf32, 1x96x136x136xf32) - multiply_7 = paddle._C_ops.multiply(data_6, batch_norm__180) - del data_6 - - # pd_op.add: (1x96x136x136xf32) <- (1x96x136x136xf32, 1x96x136x136xf32) - add_13 = paddle._C_ops.add(batch_norm__174, multiply_7) - - # pd_op.swish: (1x96x136x136xf32) <- (1x96x136x136xf32) - swish_24 = paddle._C_ops.swish(add_13) - - # pd_op.add: (1x96x136x136xf32) <- (1x96x136x136xf32, 1x96x136x136xf32) - add_14 = paddle._C_ops.add(add_12, swish_24) - - # pd_op.conv2d: (1x96x136x136xf32) <- (1x96x136x136xf32, 96x96x3x3xf32) - conv2d_32 = paddle._C_ops.conv2d( - add_14, parameter_580, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_580 - - # pd_op.batch_norm_: (1x96x136x136xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (1x96x136x136xf32, 96xf32, 96xf32, 96xf32, 96xf32) - ( - batch_norm__186, - batch_norm__187, - batch_norm__188, - batch_norm__189, - batch_norm__190, - batch_norm__191, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_32, - parameter_579, - parameter_578, - parameter_577, - parameter_576, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_576, parameter_577, parameter_578, parameter_579 - - # pd_op.swish: (1x96x136x136xf32) <- (1x96x136x136xf32) - swish_25 = paddle._C_ops.swish(batch_norm__186) - - # pd_op.conv2d: (1x96x136x136xf32) <- (1x96x136x136xf32, 96x96x3x3xf32) - conv2d_33 = paddle._C_ops.conv2d( - swish_25, parameter_575, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_575 - - # pd_op.batch_norm_: (1x96x136x136xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (1x96x136x136xf32, 96xf32, 96xf32, 96xf32, 96xf32) - ( - batch_norm__192, - batch_norm__193, - batch_norm__194, - batch_norm__195, - batch_norm__196, - batch_norm__197, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_33, - parameter_574, - parameter_573, - parameter_572, - parameter_571, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_571, parameter_572, parameter_573, parameter_574 - - # pd_op.conv2d: (1x96x136x136xf32) <- (1x96x136x136xf32, 96x96x1x1xf32) - conv2d_34 = paddle._C_ops.conv2d( - swish_25, parameter_570, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_570 - - # pd_op.batch_norm_: (1x96x136x136xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (1x96x136x136xf32, 96xf32, 96xf32, 96xf32, 96xf32) - ( - batch_norm__198, - batch_norm__199, - batch_norm__200, - batch_norm__201, - batch_norm__202, - batch_norm__203, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_34, - parameter_569, - parameter_568, - parameter_567, - parameter_566, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_566, parameter_567, parameter_568, parameter_569 - - # pd_op.multiply: (1x96x136x136xf32) <- (1xf32, 1x96x136x136xf32) - multiply_8 = paddle._C_ops.multiply(data_7, batch_norm__198) - del data_7 - - # pd_op.add: (1x96x136x136xf32) <- (1x96x136x136xf32, 1x96x136x136xf32) - add_15 = paddle._C_ops.add(batch_norm__192, multiply_8) - - # pd_op.swish: (1x96x136x136xf32) <- (1x96x136x136xf32) - swish_26 = paddle._C_ops.swish(add_15) - - # pd_op.add: (1x96x136x136xf32) <- (1x96x136x136xf32, 1x96x136x136xf32) - add_16 = paddle._C_ops.add(add_14, swish_26) - - # pd_op.conv2d: (1x96x136x136xf32) <- (1x96x136x136xf32, 96x96x3x3xf32) - conv2d_35 = paddle._C_ops.conv2d( - add_16, parameter_565, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_565 - - # pd_op.batch_norm_: (1x96x136x136xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (1x96x136x136xf32, 96xf32, 96xf32, 96xf32, 96xf32) - ( - batch_norm__204, - batch_norm__205, - batch_norm__206, - batch_norm__207, - batch_norm__208, - batch_norm__209, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_35, - parameter_564, - parameter_563, - parameter_562, - parameter_561, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_561, parameter_562, parameter_563, parameter_564 - - # pd_op.swish: (1x96x136x136xf32) <- (1x96x136x136xf32) - swish_27 = paddle._C_ops.swish(batch_norm__204) - - # pd_op.conv2d: (1x96x136x136xf32) <- (1x96x136x136xf32, 96x96x3x3xf32) - conv2d_36 = paddle._C_ops.conv2d( - swish_27, parameter_560, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_560 - - # pd_op.batch_norm_: (1x96x136x136xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (1x96x136x136xf32, 96xf32, 96xf32, 96xf32, 96xf32) - ( - batch_norm__210, - batch_norm__211, - batch_norm__212, - batch_norm__213, - batch_norm__214, - batch_norm__215, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_36, - parameter_559, - parameter_558, - parameter_557, - parameter_556, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_556, parameter_557, parameter_558, parameter_559 - - # pd_op.conv2d: (1x96x136x136xf32) <- (1x96x136x136xf32, 96x96x1x1xf32) - conv2d_37 = paddle._C_ops.conv2d( - swish_27, parameter_555, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_555 - - # pd_op.batch_norm_: (1x96x136x136xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (1x96x136x136xf32, 96xf32, 96xf32, 96xf32, 96xf32) - ( - batch_norm__216, - batch_norm__217, - batch_norm__218, - batch_norm__219, - batch_norm__220, - batch_norm__221, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_37, - parameter_554, - parameter_553, - parameter_552, - parameter_551, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_551, parameter_552, parameter_553, parameter_554 - - # pd_op.multiply: (1x96x136x136xf32) <- (1xf32, 1x96x136x136xf32) - multiply_9 = paddle._C_ops.multiply(data_8, batch_norm__216) - del data_8 - - # pd_op.add: (1x96x136x136xf32) <- (1x96x136x136xf32, 1x96x136x136xf32) - add_17 = paddle._C_ops.add(batch_norm__210, multiply_9) - - # pd_op.swish: (1x96x136x136xf32) <- (1x96x136x136xf32) - swish_28 = paddle._C_ops.swish(add_17) - - # pd_op.add: (1x96x136x136xf32) <- (1x96x136x136xf32, 1x96x136x136xf32) - add_18 = paddle._C_ops.add(add_16, swish_28) - - # builtin.combine: ([1x96x136x136xf32, 1x96x136x136xf32]) <- (1x96x136x136xf32, 1x96x136x136xf32) - combine_1 = [swish_15, add_18] - - # pd_op.concat: (1x192x136x136xf32) <- ([1x96x136x136xf32, 1x96x136x136xf32], 1xi32) - concat_1 = paddle._C_ops.concat(combine_1, full_0) - del combine_1 - - # pd_op.mean: (1x192x1x1xf32) <- (1x192x136x136xf32, 2xi64) - mean_1 = paddle._C_ops.mean(concat_1, full_int_array_0, True) - - # pd_op.conv2d: (1x192x1x1xf32) <- (1x192x1x1xf32, 192x192x1x1xf32) - conv2d_38 = paddle._C_ops.conv2d( - mean_1, parameter_550, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_550 - - # pd_op.reshape: (1x192x1x1xf32) <- (192xf32, 4xi64) - reshape_1 = paddle._C_ops.reshape(parameter_549, full_int_array_1) - del parameter_549 - - # pd_op.add: (1x192x1x1xf32) <- (1x192x1x1xf32, 1x192x1x1xf32) - add_19 = paddle._C_ops.add(conv2d_38, reshape_1) - - # pd_op.hardsigmoid: (1x192x1x1xf32) <- (1x192x1x1xf32) - hardsigmoid_1 = paddle._C_ops.hardsigmoid( - add_19, float("0.166667"), float("0.5") - ) - del add_19 - - # pd_op.multiply: (1x192x136x136xf32) <- (1x192x136x136xf32, 1x192x1x1xf32) - multiply_10 = paddle._C_ops.multiply(concat_1, hardsigmoid_1) - - # pd_op.conv2d: (1x256x136x136xf32) <- (1x192x136x136xf32, 256x192x1x1xf32) - conv2d_39 = paddle._C_ops.conv2d( - multiply_10, parameter_548, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_548 - - # pd_op.batch_norm_: (1x256x136x136xf32, 256xf32, 256xf32, 256xf32, 256xf32, -1xui8) <- (1x256x136x136xf32, 256xf32, 256xf32, 256xf32, 256xf32) - ( - batch_norm__222, - batch_norm__223, - batch_norm__224, - batch_norm__225, - batch_norm__226, - batch_norm__227, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_39, - parameter_547, - parameter_546, - parameter_545, - parameter_544, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_544, parameter_545, parameter_546, parameter_547 - - # pd_op.swish: (1x256x136x136xf32) <- (1x256x136x136xf32) - swish_29 = paddle._C_ops.swish(batch_norm__222) - - # pd_op.conv2d: (1x384x68x68xf32) <- (1x256x136x136xf32, 384x256x3x3xf32) - conv2d_40 = paddle._C_ops.conv2d( - swish_29, parameter_543, [2, 2], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_543 - - # pd_op.batch_norm_: (1x384x68x68xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (1x384x68x68xf32, 384xf32, 384xf32, 384xf32, 384xf32) - ( - batch_norm__228, - batch_norm__229, - batch_norm__230, - batch_norm__231, - batch_norm__232, - batch_norm__233, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_40, - parameter_542, - parameter_541, - parameter_540, - parameter_539, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_539, parameter_540, parameter_541, parameter_542 - - # pd_op.swish: (1x384x68x68xf32) <- (1x384x68x68xf32) - swish_30 = paddle._C_ops.swish(batch_norm__228) - - # pd_op.conv2d: (1x192x68x68xf32) <- (1x384x68x68xf32, 192x384x1x1xf32) - conv2d_41 = paddle._C_ops.conv2d( - swish_30, parameter_538, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_538 - - # pd_op.batch_norm_: (1x192x68x68xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (1x192x68x68xf32, 192xf32, 192xf32, 192xf32, 192xf32) - ( - batch_norm__234, - batch_norm__235, - batch_norm__236, - batch_norm__237, - batch_norm__238, - batch_norm__239, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_41, - parameter_537, - parameter_536, - parameter_535, - parameter_534, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_534, parameter_535, parameter_536, parameter_537 - - # pd_op.swish: (1x192x68x68xf32) <- (1x192x68x68xf32) - swish_31 = paddle._C_ops.swish(batch_norm__234) - - # pd_op.conv2d: (1x192x68x68xf32) <- (1x384x68x68xf32, 192x384x1x1xf32) - conv2d_42 = paddle._C_ops.conv2d( - swish_30, parameter_533, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_533 - - # pd_op.batch_norm_: (1x192x68x68xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (1x192x68x68xf32, 192xf32, 192xf32, 192xf32, 192xf32) - ( - batch_norm__240, - batch_norm__241, - batch_norm__242, - batch_norm__243, - batch_norm__244, - batch_norm__245, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_42, - parameter_532, - parameter_531, - parameter_530, - parameter_529, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_529, parameter_530, parameter_531, parameter_532 - - # pd_op.swish: (1x192x68x68xf32) <- (1x192x68x68xf32) - swish_32 = paddle._C_ops.swish(batch_norm__240) - - # pd_op.conv2d: (1x192x68x68xf32) <- (1x192x68x68xf32, 192x192x3x3xf32) - conv2d_43 = paddle._C_ops.conv2d( - swish_32, parameter_528, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_528 - - # pd_op.batch_norm_: (1x192x68x68xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (1x192x68x68xf32, 192xf32, 192xf32, 192xf32, 192xf32) - ( - batch_norm__246, - batch_norm__247, - batch_norm__248, - batch_norm__249, - batch_norm__250, - batch_norm__251, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_43, - parameter_527, - parameter_526, - parameter_525, - parameter_524, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_524, parameter_525, parameter_526, parameter_527 - - # pd_op.swish: (1x192x68x68xf32) <- (1x192x68x68xf32) - swish_33 = paddle._C_ops.swish(batch_norm__246) - - # pd_op.conv2d: (1x192x68x68xf32) <- (1x192x68x68xf32, 192x192x3x3xf32) - conv2d_44 = paddle._C_ops.conv2d( - swish_33, parameter_523, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_523 - - # pd_op.batch_norm_: (1x192x68x68xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (1x192x68x68xf32, 192xf32, 192xf32, 192xf32, 192xf32) - ( - batch_norm__252, - batch_norm__253, - batch_norm__254, - batch_norm__255, - batch_norm__256, - batch_norm__257, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_44, - parameter_522, - parameter_521, - parameter_520, - parameter_519, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_519, parameter_520, parameter_521, parameter_522 - - # pd_op.conv2d: (1x192x68x68xf32) <- (1x192x68x68xf32, 192x192x1x1xf32) - conv2d_45 = paddle._C_ops.conv2d( - swish_33, parameter_518, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_518 - - # pd_op.batch_norm_: (1x192x68x68xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (1x192x68x68xf32, 192xf32, 192xf32, 192xf32, 192xf32) - ( - batch_norm__258, - batch_norm__259, - batch_norm__260, - batch_norm__261, - batch_norm__262, - batch_norm__263, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_45, - parameter_517, - parameter_516, - parameter_515, - parameter_514, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_514, parameter_515, parameter_516, parameter_517 - - # pd_op.multiply: (1x192x68x68xf32) <- (1xf32, 1x192x68x68xf32) - multiply_11 = paddle._C_ops.multiply(data_9, batch_norm__258) - del data_9 - - # pd_op.add: (1x192x68x68xf32) <- (1x192x68x68xf32, 1x192x68x68xf32) - add_20 = paddle._C_ops.add(batch_norm__252, multiply_11) - - # pd_op.swish: (1x192x68x68xf32) <- (1x192x68x68xf32) - swish_34 = paddle._C_ops.swish(add_20) - - # pd_op.add: (1x192x68x68xf32) <- (1x192x68x68xf32, 1x192x68x68xf32) - add_21 = paddle._C_ops.add(swish_32, swish_34) - - # pd_op.conv2d: (1x192x68x68xf32) <- (1x192x68x68xf32, 192x192x3x3xf32) - conv2d_46 = paddle._C_ops.conv2d( - add_21, parameter_513, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_513 - - # pd_op.batch_norm_: (1x192x68x68xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (1x192x68x68xf32, 192xf32, 192xf32, 192xf32, 192xf32) - ( - batch_norm__264, - batch_norm__265, - batch_norm__266, - batch_norm__267, - batch_norm__268, - batch_norm__269, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_46, - parameter_512, - parameter_511, - parameter_510, - parameter_509, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_509, parameter_510, parameter_511, parameter_512 - - # pd_op.swish: (1x192x68x68xf32) <- (1x192x68x68xf32) - swish_35 = paddle._C_ops.swish(batch_norm__264) - - # pd_op.conv2d: (1x192x68x68xf32) <- (1x192x68x68xf32, 192x192x3x3xf32) - conv2d_47 = paddle._C_ops.conv2d( - swish_35, parameter_508, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_508 - - # pd_op.batch_norm_: (1x192x68x68xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (1x192x68x68xf32, 192xf32, 192xf32, 192xf32, 192xf32) - ( - batch_norm__270, - batch_norm__271, - batch_norm__272, - batch_norm__273, - batch_norm__274, - batch_norm__275, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_47, - parameter_507, - parameter_506, - parameter_505, - parameter_504, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_504, parameter_505, parameter_506, parameter_507 - - # pd_op.conv2d: (1x192x68x68xf32) <- (1x192x68x68xf32, 192x192x1x1xf32) - conv2d_48 = paddle._C_ops.conv2d( - swish_35, parameter_503, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_503 - - # pd_op.batch_norm_: (1x192x68x68xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (1x192x68x68xf32, 192xf32, 192xf32, 192xf32, 192xf32) - ( - batch_norm__276, - batch_norm__277, - batch_norm__278, - batch_norm__279, - batch_norm__280, - batch_norm__281, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_48, - parameter_502, - parameter_501, - parameter_500, - parameter_499, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_499, parameter_500, parameter_501, parameter_502 - - # pd_op.multiply: (1x192x68x68xf32) <- (1xf32, 1x192x68x68xf32) - multiply_12 = paddle._C_ops.multiply(data_10, batch_norm__276) - del data_10 - - # pd_op.add: (1x192x68x68xf32) <- (1x192x68x68xf32, 1x192x68x68xf32) - add_22 = paddle._C_ops.add(batch_norm__270, multiply_12) - - # pd_op.swish: (1x192x68x68xf32) <- (1x192x68x68xf32) - swish_36 = paddle._C_ops.swish(add_22) - - # pd_op.add: (1x192x68x68xf32) <- (1x192x68x68xf32, 1x192x68x68xf32) - add_23 = paddle._C_ops.add(add_21, swish_36) - - # pd_op.conv2d: (1x192x68x68xf32) <- (1x192x68x68xf32, 192x192x3x3xf32) - conv2d_49 = paddle._C_ops.conv2d( - add_23, parameter_498, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_498 - - # pd_op.batch_norm_: (1x192x68x68xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (1x192x68x68xf32, 192xf32, 192xf32, 192xf32, 192xf32) - ( - batch_norm__282, - batch_norm__283, - batch_norm__284, - batch_norm__285, - batch_norm__286, - batch_norm__287, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_49, - parameter_497, - parameter_496, - parameter_495, - parameter_494, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_494, parameter_495, parameter_496, parameter_497 - - # pd_op.swish: (1x192x68x68xf32) <- (1x192x68x68xf32) - swish_37 = paddle._C_ops.swish(batch_norm__282) - - # pd_op.conv2d: (1x192x68x68xf32) <- (1x192x68x68xf32, 192x192x3x3xf32) - conv2d_50 = paddle._C_ops.conv2d( - swish_37, parameter_493, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_493 - - # pd_op.batch_norm_: (1x192x68x68xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (1x192x68x68xf32, 192xf32, 192xf32, 192xf32, 192xf32) - ( - batch_norm__288, - batch_norm__289, - batch_norm__290, - batch_norm__291, - batch_norm__292, - batch_norm__293, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_50, - parameter_492, - parameter_491, - parameter_490, - parameter_489, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_489, parameter_490, parameter_491, parameter_492 - - # pd_op.conv2d: (1x192x68x68xf32) <- (1x192x68x68xf32, 192x192x1x1xf32) - conv2d_51 = paddle._C_ops.conv2d( - swish_37, parameter_488, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_488 - - # pd_op.batch_norm_: (1x192x68x68xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (1x192x68x68xf32, 192xf32, 192xf32, 192xf32, 192xf32) - ( - batch_norm__294, - batch_norm__295, - batch_norm__296, - batch_norm__297, - batch_norm__298, - batch_norm__299, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_51, - parameter_487, - parameter_486, - parameter_485, - parameter_484, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_484, parameter_485, parameter_486, parameter_487 - - # pd_op.multiply: (1x192x68x68xf32) <- (1xf32, 1x192x68x68xf32) - multiply_13 = paddle._C_ops.multiply(data_11, batch_norm__294) - del data_11 - - # pd_op.add: (1x192x68x68xf32) <- (1x192x68x68xf32, 1x192x68x68xf32) - add_24 = paddle._C_ops.add(batch_norm__288, multiply_13) - - # pd_op.swish: (1x192x68x68xf32) <- (1x192x68x68xf32) - swish_38 = paddle._C_ops.swish(add_24) - - # pd_op.add: (1x192x68x68xf32) <- (1x192x68x68xf32, 1x192x68x68xf32) - add_25 = paddle._C_ops.add(add_23, swish_38) - - # pd_op.conv2d: (1x192x68x68xf32) <- (1x192x68x68xf32, 192x192x3x3xf32) - conv2d_52 = paddle._C_ops.conv2d( - add_25, parameter_483, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_483 - - # pd_op.batch_norm_: (1x192x68x68xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (1x192x68x68xf32, 192xf32, 192xf32, 192xf32, 192xf32) - ( - batch_norm__300, - batch_norm__301, - batch_norm__302, - batch_norm__303, - batch_norm__304, - batch_norm__305, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_52, - parameter_482, - parameter_481, - parameter_480, - parameter_479, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_479, parameter_480, parameter_481, parameter_482 - - # pd_op.swish: (1x192x68x68xf32) <- (1x192x68x68xf32) - swish_39 = paddle._C_ops.swish(batch_norm__300) - - # pd_op.conv2d: (1x192x68x68xf32) <- (1x192x68x68xf32, 192x192x3x3xf32) - conv2d_53 = paddle._C_ops.conv2d( - swish_39, parameter_478, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_478 - - # pd_op.batch_norm_: (1x192x68x68xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (1x192x68x68xf32, 192xf32, 192xf32, 192xf32, 192xf32) - ( - batch_norm__306, - batch_norm__307, - batch_norm__308, - batch_norm__309, - batch_norm__310, - batch_norm__311, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_53, - parameter_477, - parameter_476, - parameter_475, - parameter_474, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_474, parameter_475, parameter_476, parameter_477 - - # pd_op.conv2d: (1x192x68x68xf32) <- (1x192x68x68xf32, 192x192x1x1xf32) - conv2d_54 = paddle._C_ops.conv2d( - swish_39, parameter_473, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_473 - - # pd_op.batch_norm_: (1x192x68x68xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (1x192x68x68xf32, 192xf32, 192xf32, 192xf32, 192xf32) - ( - batch_norm__312, - batch_norm__313, - batch_norm__314, - batch_norm__315, - batch_norm__316, - batch_norm__317, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_54, - parameter_472, - parameter_471, - parameter_470, - parameter_469, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_469, parameter_470, parameter_471, parameter_472 - - # pd_op.multiply: (1x192x68x68xf32) <- (1xf32, 1x192x68x68xf32) - multiply_14 = paddle._C_ops.multiply(data_12, batch_norm__312) - del data_12 - - # pd_op.add: (1x192x68x68xf32) <- (1x192x68x68xf32, 1x192x68x68xf32) - add_26 = paddle._C_ops.add(batch_norm__306, multiply_14) - - # pd_op.swish: (1x192x68x68xf32) <- (1x192x68x68xf32) - swish_40 = paddle._C_ops.swish(add_26) - - # pd_op.add: (1x192x68x68xf32) <- (1x192x68x68xf32, 1x192x68x68xf32) - add_27 = paddle._C_ops.add(add_25, swish_40) - - # pd_op.conv2d: (1x192x68x68xf32) <- (1x192x68x68xf32, 192x192x3x3xf32) - conv2d_55 = paddle._C_ops.conv2d( - add_27, parameter_468, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_468 - - # pd_op.batch_norm_: (1x192x68x68xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (1x192x68x68xf32, 192xf32, 192xf32, 192xf32, 192xf32) - ( - batch_norm__318, - batch_norm__319, - batch_norm__320, - batch_norm__321, - batch_norm__322, - batch_norm__323, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_55, - parameter_467, - parameter_466, - parameter_465, - parameter_464, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_464, parameter_465, parameter_466, parameter_467 - - # pd_op.swish: (1x192x68x68xf32) <- (1x192x68x68xf32) - swish_41 = paddle._C_ops.swish(batch_norm__318) - - # pd_op.conv2d: (1x192x68x68xf32) <- (1x192x68x68xf32, 192x192x3x3xf32) - conv2d_56 = paddle._C_ops.conv2d( - swish_41, parameter_463, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_463 - - # pd_op.batch_norm_: (1x192x68x68xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (1x192x68x68xf32, 192xf32, 192xf32, 192xf32, 192xf32) - ( - batch_norm__324, - batch_norm__325, - batch_norm__326, - batch_norm__327, - batch_norm__328, - batch_norm__329, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_56, - parameter_462, - parameter_461, - parameter_460, - parameter_459, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_459, parameter_460, parameter_461, parameter_462 - - # pd_op.conv2d: (1x192x68x68xf32) <- (1x192x68x68xf32, 192x192x1x1xf32) - conv2d_57 = paddle._C_ops.conv2d( - swish_41, parameter_458, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_458 - - # pd_op.batch_norm_: (1x192x68x68xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (1x192x68x68xf32, 192xf32, 192xf32, 192xf32, 192xf32) - ( - batch_norm__330, - batch_norm__331, - batch_norm__332, - batch_norm__333, - batch_norm__334, - batch_norm__335, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_57, - parameter_457, - parameter_456, - parameter_455, - parameter_454, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_454, parameter_455, parameter_456, parameter_457 - - # pd_op.multiply: (1x192x68x68xf32) <- (1xf32, 1x192x68x68xf32) - multiply_15 = paddle._C_ops.multiply(data_13, batch_norm__330) - del data_13 - - # pd_op.add: (1x192x68x68xf32) <- (1x192x68x68xf32, 1x192x68x68xf32) - add_28 = paddle._C_ops.add(batch_norm__324, multiply_15) - - # pd_op.swish: (1x192x68x68xf32) <- (1x192x68x68xf32) - swish_42 = paddle._C_ops.swish(add_28) - - # pd_op.add: (1x192x68x68xf32) <- (1x192x68x68xf32, 1x192x68x68xf32) - add_29 = paddle._C_ops.add(add_27, swish_42) - - # pd_op.conv2d: (1x192x68x68xf32) <- (1x192x68x68xf32, 192x192x3x3xf32) - conv2d_58 = paddle._C_ops.conv2d( - add_29, parameter_453, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_453 - - # pd_op.batch_norm_: (1x192x68x68xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (1x192x68x68xf32, 192xf32, 192xf32, 192xf32, 192xf32) - ( - batch_norm__336, - batch_norm__337, - batch_norm__338, - batch_norm__339, - batch_norm__340, - batch_norm__341, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_58, - parameter_452, - parameter_451, - parameter_450, - parameter_449, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_449, parameter_450, parameter_451, parameter_452 - - # pd_op.swish: (1x192x68x68xf32) <- (1x192x68x68xf32) - swish_43 = paddle._C_ops.swish(batch_norm__336) - - # pd_op.conv2d: (1x192x68x68xf32) <- (1x192x68x68xf32, 192x192x3x3xf32) - conv2d_59 = paddle._C_ops.conv2d( - swish_43, parameter_448, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_448 - - # pd_op.batch_norm_: (1x192x68x68xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (1x192x68x68xf32, 192xf32, 192xf32, 192xf32, 192xf32) - ( - batch_norm__342, - batch_norm__343, - batch_norm__344, - batch_norm__345, - batch_norm__346, - batch_norm__347, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_59, - parameter_447, - parameter_446, - parameter_445, - parameter_444, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_444, parameter_445, parameter_446, parameter_447 - - # pd_op.conv2d: (1x192x68x68xf32) <- (1x192x68x68xf32, 192x192x1x1xf32) - conv2d_60 = paddle._C_ops.conv2d( - swish_43, parameter_443, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_443 - - # pd_op.batch_norm_: (1x192x68x68xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (1x192x68x68xf32, 192xf32, 192xf32, 192xf32, 192xf32) - ( - batch_norm__348, - batch_norm__349, - batch_norm__350, - batch_norm__351, - batch_norm__352, - batch_norm__353, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_60, - parameter_442, - parameter_441, - parameter_440, - parameter_439, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_439, parameter_440, parameter_441, parameter_442 - - # pd_op.multiply: (1x192x68x68xf32) <- (1xf32, 1x192x68x68xf32) - multiply_16 = paddle._C_ops.multiply(data_14, batch_norm__348) - del data_14 - - # pd_op.add: (1x192x68x68xf32) <- (1x192x68x68xf32, 1x192x68x68xf32) - add_30 = paddle._C_ops.add(batch_norm__342, multiply_16) - - # pd_op.swish: (1x192x68x68xf32) <- (1x192x68x68xf32) - swish_44 = paddle._C_ops.swish(add_30) - - # pd_op.add: (1x192x68x68xf32) <- (1x192x68x68xf32, 1x192x68x68xf32) - add_31 = paddle._C_ops.add(add_29, swish_44) - - # builtin.combine: ([1x192x68x68xf32, 1x192x68x68xf32]) <- (1x192x68x68xf32, 1x192x68x68xf32) - combine_2 = [swish_31, add_31] - - # pd_op.concat: (1x384x68x68xf32) <- ([1x192x68x68xf32, 1x192x68x68xf32], 1xi32) - concat_2 = paddle._C_ops.concat(combine_2, full_0) - del combine_2 - - # pd_op.mean: (1x384x1x1xf32) <- (1x384x68x68xf32, 2xi64) - mean_2 = paddle._C_ops.mean(concat_2, full_int_array_0, True) - - # pd_op.conv2d: (1x384x1x1xf32) <- (1x384x1x1xf32, 384x384x1x1xf32) - conv2d_61 = paddle._C_ops.conv2d( - mean_2, parameter_438, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_438 - - # pd_op.reshape: (1x384x1x1xf32) <- (384xf32, 4xi64) - reshape_2 = paddle._C_ops.reshape(parameter_437, full_int_array_1) - del parameter_437 - - # pd_op.add: (1x384x1x1xf32) <- (1x384x1x1xf32, 1x384x1x1xf32) - add_32 = paddle._C_ops.add(conv2d_61, reshape_2) - - # pd_op.hardsigmoid: (1x384x1x1xf32) <- (1x384x1x1xf32) - hardsigmoid_2 = paddle._C_ops.hardsigmoid( - add_32, float("0.166667"), float("0.5") - ) - del add_32 - - # pd_op.multiply: (1x384x68x68xf32) <- (1x384x68x68xf32, 1x384x1x1xf32) - multiply_17 = paddle._C_ops.multiply(concat_2, hardsigmoid_2) - - # pd_op.conv2d: (1x512x68x68xf32) <- (1x384x68x68xf32, 512x384x1x1xf32) - conv2d_62 = paddle._C_ops.conv2d( - multiply_17, parameter_436, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_436 - - # pd_op.batch_norm_: (1x512x68x68xf32, 512xf32, 512xf32, 512xf32, 512xf32, -1xui8) <- (1x512x68x68xf32, 512xf32, 512xf32, 512xf32, 512xf32) - ( - batch_norm__354, - batch_norm__355, - batch_norm__356, - batch_norm__357, - batch_norm__358, - batch_norm__359, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_62, - parameter_435, - parameter_434, - parameter_433, - parameter_432, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_432, parameter_433, parameter_434, parameter_435 - - # pd_op.swish: (1x512x68x68xf32) <- (1x512x68x68xf32) - swish_45 = paddle._C_ops.swish(batch_norm__354) - - # pd_op.conv2d: (1x768x34x34xf32) <- (1x512x68x68xf32, 768x512x3x3xf32) - conv2d_63 = paddle._C_ops.conv2d( - swish_45, parameter_431, [2, 2], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_431 - - # pd_op.batch_norm_: (1x768x34x34xf32, 768xf32, 768xf32, 768xf32, 768xf32, -1xui8) <- (1x768x34x34xf32, 768xf32, 768xf32, 768xf32, 768xf32) - ( - batch_norm__360, - batch_norm__361, - batch_norm__362, - batch_norm__363, - batch_norm__364, - batch_norm__365, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_63, - parameter_430, - parameter_429, - parameter_428, - parameter_427, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_427, parameter_428, parameter_429, parameter_430 - - # pd_op.swish: (1x768x34x34xf32) <- (1x768x34x34xf32) - swish_46 = paddle._C_ops.swish(batch_norm__360) - - # pd_op.conv2d: (1x384x34x34xf32) <- (1x768x34x34xf32, 384x768x1x1xf32) - conv2d_64 = paddle._C_ops.conv2d( - swish_46, parameter_426, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_426 - - # pd_op.batch_norm_: (1x384x34x34xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (1x384x34x34xf32, 384xf32, 384xf32, 384xf32, 384xf32) - ( - batch_norm__366, - batch_norm__367, - batch_norm__368, - batch_norm__369, - batch_norm__370, - batch_norm__371, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_64, - parameter_425, - parameter_424, - parameter_423, - parameter_422, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_422, parameter_423, parameter_424, parameter_425 - - # pd_op.swish: (1x384x34x34xf32) <- (1x384x34x34xf32) - swish_47 = paddle._C_ops.swish(batch_norm__366) - - # pd_op.conv2d: (1x384x34x34xf32) <- (1x768x34x34xf32, 384x768x1x1xf32) - conv2d_65 = paddle._C_ops.conv2d( - swish_46, parameter_421, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_421 - - # pd_op.batch_norm_: (1x384x34x34xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (1x384x34x34xf32, 384xf32, 384xf32, 384xf32, 384xf32) - ( - batch_norm__372, - batch_norm__373, - batch_norm__374, - batch_norm__375, - batch_norm__376, - batch_norm__377, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_65, - parameter_420, - parameter_419, - parameter_418, - parameter_417, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_417, parameter_418, parameter_419, parameter_420 - - # pd_op.swish: (1x384x34x34xf32) <- (1x384x34x34xf32) - swish_48 = paddle._C_ops.swish(batch_norm__372) - - # pd_op.conv2d: (1x384x34x34xf32) <- (1x384x34x34xf32, 384x384x3x3xf32) - conv2d_66 = paddle._C_ops.conv2d( - swish_48, parameter_416, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_416 - - # pd_op.batch_norm_: (1x384x34x34xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (1x384x34x34xf32, 384xf32, 384xf32, 384xf32, 384xf32) - ( - batch_norm__378, - batch_norm__379, - batch_norm__380, - batch_norm__381, - batch_norm__382, - batch_norm__383, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_66, - parameter_415, - parameter_414, - parameter_413, - parameter_412, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_412, parameter_413, parameter_414, parameter_415 - - # pd_op.swish: (1x384x34x34xf32) <- (1x384x34x34xf32) - swish_49 = paddle._C_ops.swish(batch_norm__378) - - # pd_op.conv2d: (1x384x34x34xf32) <- (1x384x34x34xf32, 384x384x3x3xf32) - conv2d_67 = paddle._C_ops.conv2d( - swish_49, parameter_411, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_411 - - # pd_op.batch_norm_: (1x384x34x34xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (1x384x34x34xf32, 384xf32, 384xf32, 384xf32, 384xf32) - ( - batch_norm__384, - batch_norm__385, - batch_norm__386, - batch_norm__387, - batch_norm__388, - batch_norm__389, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_67, - parameter_410, - parameter_409, - parameter_408, - parameter_407, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_407, parameter_408, parameter_409, parameter_410 - - # pd_op.conv2d: (1x384x34x34xf32) <- (1x384x34x34xf32, 384x384x1x1xf32) - conv2d_68 = paddle._C_ops.conv2d( - swish_49, parameter_406, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_406 - - # pd_op.batch_norm_: (1x384x34x34xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (1x384x34x34xf32, 384xf32, 384xf32, 384xf32, 384xf32) - ( - batch_norm__390, - batch_norm__391, - batch_norm__392, - batch_norm__393, - batch_norm__394, - batch_norm__395, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_68, - parameter_405, - parameter_404, - parameter_403, - parameter_402, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_402, parameter_403, parameter_404, parameter_405 - - # pd_op.multiply: (1x384x34x34xf32) <- (1xf32, 1x384x34x34xf32) - multiply_18 = paddle._C_ops.multiply(data_15, batch_norm__390) - del data_15 - - # pd_op.add: (1x384x34x34xf32) <- (1x384x34x34xf32, 1x384x34x34xf32) - add_33 = paddle._C_ops.add(batch_norm__384, multiply_18) - - # pd_op.swish: (1x384x34x34xf32) <- (1x384x34x34xf32) - swish_50 = paddle._C_ops.swish(add_33) - - # pd_op.add: (1x384x34x34xf32) <- (1x384x34x34xf32, 1x384x34x34xf32) - add_34 = paddle._C_ops.add(swish_48, swish_50) - - # pd_op.conv2d: (1x384x34x34xf32) <- (1x384x34x34xf32, 384x384x3x3xf32) - conv2d_69 = paddle._C_ops.conv2d( - add_34, parameter_401, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_401 - - # pd_op.batch_norm_: (1x384x34x34xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (1x384x34x34xf32, 384xf32, 384xf32, 384xf32, 384xf32) - ( - batch_norm__396, - batch_norm__397, - batch_norm__398, - batch_norm__399, - batch_norm__400, - batch_norm__401, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_69, - parameter_400, - parameter_399, - parameter_398, - parameter_397, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_397, parameter_398, parameter_399, parameter_400 - - # pd_op.swish: (1x384x34x34xf32) <- (1x384x34x34xf32) - swish_51 = paddle._C_ops.swish(batch_norm__396) - - # pd_op.conv2d: (1x384x34x34xf32) <- (1x384x34x34xf32, 384x384x3x3xf32) - conv2d_70 = paddle._C_ops.conv2d( - swish_51, parameter_396, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_396 - - # pd_op.batch_norm_: (1x384x34x34xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (1x384x34x34xf32, 384xf32, 384xf32, 384xf32, 384xf32) - ( - batch_norm__402, - batch_norm__403, - batch_norm__404, - batch_norm__405, - batch_norm__406, - batch_norm__407, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_70, - parameter_395, - parameter_394, - parameter_393, - parameter_392, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_392, parameter_393, parameter_394, parameter_395 - - # pd_op.conv2d: (1x384x34x34xf32) <- (1x384x34x34xf32, 384x384x1x1xf32) - conv2d_71 = paddle._C_ops.conv2d( - swish_51, parameter_391, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_391 - - # pd_op.batch_norm_: (1x384x34x34xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (1x384x34x34xf32, 384xf32, 384xf32, 384xf32, 384xf32) - ( - batch_norm__408, - batch_norm__409, - batch_norm__410, - batch_norm__411, - batch_norm__412, - batch_norm__413, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_71, - parameter_390, - parameter_389, - parameter_388, - parameter_387, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_387, parameter_388, parameter_389, parameter_390 - - # pd_op.multiply: (1x384x34x34xf32) <- (1xf32, 1x384x34x34xf32) - multiply_19 = paddle._C_ops.multiply(data_16, batch_norm__408) - del data_16 - - # pd_op.add: (1x384x34x34xf32) <- (1x384x34x34xf32, 1x384x34x34xf32) - add_35 = paddle._C_ops.add(batch_norm__402, multiply_19) - - # pd_op.swish: (1x384x34x34xf32) <- (1x384x34x34xf32) - swish_52 = paddle._C_ops.swish(add_35) - - # pd_op.add: (1x384x34x34xf32) <- (1x384x34x34xf32, 1x384x34x34xf32) - add_36 = paddle._C_ops.add(add_34, swish_52) - - # pd_op.conv2d: (1x384x34x34xf32) <- (1x384x34x34xf32, 384x384x3x3xf32) - conv2d_72 = paddle._C_ops.conv2d( - add_36, parameter_386, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_386 - - # pd_op.batch_norm_: (1x384x34x34xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (1x384x34x34xf32, 384xf32, 384xf32, 384xf32, 384xf32) - ( - batch_norm__414, - batch_norm__415, - batch_norm__416, - batch_norm__417, - batch_norm__418, - batch_norm__419, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_72, - parameter_385, - parameter_384, - parameter_383, - parameter_382, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_382, parameter_383, parameter_384, parameter_385 - - # pd_op.swish: (1x384x34x34xf32) <- (1x384x34x34xf32) - swish_53 = paddle._C_ops.swish(batch_norm__414) - - # pd_op.conv2d: (1x384x34x34xf32) <- (1x384x34x34xf32, 384x384x3x3xf32) - conv2d_73 = paddle._C_ops.conv2d( - swish_53, parameter_381, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_381 - - # pd_op.batch_norm_: (1x384x34x34xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (1x384x34x34xf32, 384xf32, 384xf32, 384xf32, 384xf32) - ( - batch_norm__420, - batch_norm__421, - batch_norm__422, - batch_norm__423, - batch_norm__424, - batch_norm__425, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_73, - parameter_380, - parameter_379, - parameter_378, - parameter_377, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_377, parameter_378, parameter_379, parameter_380 - - # pd_op.conv2d: (1x384x34x34xf32) <- (1x384x34x34xf32, 384x384x1x1xf32) - conv2d_74 = paddle._C_ops.conv2d( - swish_53, parameter_376, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_376 - - # pd_op.batch_norm_: (1x384x34x34xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (1x384x34x34xf32, 384xf32, 384xf32, 384xf32, 384xf32) - ( - batch_norm__426, - batch_norm__427, - batch_norm__428, - batch_norm__429, - batch_norm__430, - batch_norm__431, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_74, - parameter_375, - parameter_374, - parameter_373, - parameter_372, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_372, parameter_373, parameter_374, parameter_375 - - # pd_op.multiply: (1x384x34x34xf32) <- (1xf32, 1x384x34x34xf32) - multiply_20 = paddle._C_ops.multiply(data_17, batch_norm__426) - del data_17 - - # pd_op.add: (1x384x34x34xf32) <- (1x384x34x34xf32, 1x384x34x34xf32) - add_37 = paddle._C_ops.add(batch_norm__420, multiply_20) - - # pd_op.swish: (1x384x34x34xf32) <- (1x384x34x34xf32) - swish_54 = paddle._C_ops.swish(add_37) - - # pd_op.add: (1x384x34x34xf32) <- (1x384x34x34xf32, 1x384x34x34xf32) - add_38 = paddle._C_ops.add(add_36, swish_54) - - # builtin.combine: ([1x384x34x34xf32, 1x384x34x34xf32]) <- (1x384x34x34xf32, 1x384x34x34xf32) - combine_3 = [swish_47, add_38] - - # pd_op.concat: (1x768x34x34xf32) <- ([1x384x34x34xf32, 1x384x34x34xf32], 1xi32) - concat_3 = paddle._C_ops.concat(combine_3, full_0) - del combine_3 - - # pd_op.mean: (1x768x1x1xf32) <- (1x768x34x34xf32, 2xi64) - mean_3 = paddle._C_ops.mean(concat_3, full_int_array_0, True) - - # pd_op.conv2d: (1x768x1x1xf32) <- (1x768x1x1xf32, 768x768x1x1xf32) - conv2d_75 = paddle._C_ops.conv2d( - mean_3, parameter_371, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_371 - - # pd_op.reshape: (1x768x1x1xf32) <- (768xf32, 4xi64) - reshape_3 = paddle._C_ops.reshape(parameter_370, full_int_array_1) - del full_int_array_1, parameter_370 - - # pd_op.add: (1x768x1x1xf32) <- (1x768x1x1xf32, 1x768x1x1xf32) - add_39 = paddle._C_ops.add(conv2d_75, reshape_3) - - # pd_op.hardsigmoid: (1x768x1x1xf32) <- (1x768x1x1xf32) - hardsigmoid_3 = paddle._C_ops.hardsigmoid( - add_39, float("0.166667"), float("0.5") - ) - del add_39 - - # pd_op.multiply: (1x768x34x34xf32) <- (1x768x34x34xf32, 1x768x1x1xf32) - multiply_21 = paddle._C_ops.multiply(concat_3, hardsigmoid_3) - - # pd_op.conv2d: (1x1024x34x34xf32) <- (1x768x34x34xf32, 1024x768x1x1xf32) - conv2d_76 = paddle._C_ops.conv2d( - multiply_21, parameter_369, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_369 - - # pd_op.batch_norm_: (1x1024x34x34xf32, 1024xf32, 1024xf32, 1024xf32, 1024xf32, -1xui8) <- (1x1024x34x34xf32, 1024xf32, 1024xf32, 1024xf32, 1024xf32) - ( - batch_norm__432, - batch_norm__433, - batch_norm__434, - batch_norm__435, - batch_norm__436, - batch_norm__437, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_76, - parameter_368, - parameter_367, - parameter_366, - parameter_365, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_365, parameter_366, parameter_367, parameter_368 - - # pd_op.swish: (1x1024x34x34xf32) <- (1x1024x34x34xf32) - swish_55 = paddle._C_ops.swish(batch_norm__432) - - # pd_op.flatten: (1x1024x1156xf32) <- (1x1024x34x34xf32) - flatten_0 = paddle._C_ops.flatten(swish_55, 2, 3) - - # pd_op.transpose: (1x1156x1024xf32) <- (1x1024x1156xf32) - transpose_0 = paddle._C_ops.transpose(flatten_0, [0, 2, 1]) - del flatten_0 - - # pd_op.full: (1xf64) <- () - full_1 = paddle._C_ops.full( - [1], float("0"), paddle.float64, paddle.core.CPUPlace() - ) - - # pd_op.full: (1xf64) <- () - full_2 = paddle._C_ops.full( - [1], float("34"), paddle.float64, paddle.core.CPUPlace() - ) - - # pd_op.full: (1xf64) <- () - full_3 = paddle._C_ops.full( - [1], float("1"), paddle.float64, paddle.core.CPUPlace() - ) - - # pd_op.arange: (34xf32) <- (1xf64, 1xf64, 1xf64) - arange_0 = paddle.arange(full_1, full_2, full_3, dtype="float32") - del full_2 - - # builtin.combine: ([34xf32, 34xf32]) <- (34xf32, 34xf32) - combine_4 = [arange_0, arange_0] - del arange_0 - - # pd_op.meshgrid: ([34x34xf32, 34x34xf32]) <- ([34xf32, 34xf32]) - meshgrid_0 = paddle._C_ops.meshgrid(combine_4) - del combine_4 - - # builtin.split: (34x34xf32, 34x34xf32) <- ([34x34xf32, 34x34xf32]) - ( - split_0, - split_1, - ) = meshgrid_0 - del meshgrid_0 - - # pd_op.full: (1xf64) <- () - full_4 = paddle._C_ops.full( - [1], float("256"), paddle.float64, paddle.core.CPUPlace() - ) - - # pd_op.arange: (256xf32) <- (1xf64, 1xf64, 1xf64) - arange_1 = paddle.arange(full_1, full_4, full_3, dtype="float32") - del full_1, full_3, full_4 - - # pd_op.full: (1xf32) <- () - full_5 = paddle._C_ops.full( - [1], float("0.00390625"), paddle.float32, paddle.core.CPUPlace() - ) - - # pd_op.scale: (256xf32) <- (256xf32, 1xf32) - scale_0 = paddle._C_ops.scale(arange_1, full_5, float("0"), True) - del arange_1, full_5 - - # pd_op.full: (256xf32) <- () - full_6 = paddle._C_ops.full( - [256], - float("10000"), - paddle.float32, - paddle.framework._current_expected_place(), - ) - - # pd_op.elementwise_pow: (256xf32) <- (256xf32, 256xf32) - elementwise_pow_0 = paddle._C_ops.elementwise_pow(full_6, scale_0) - del full_6, scale_0 - - # pd_op.full: (256xf32) <- () - full_7 = paddle._C_ops.full( - [256], - float("1"), - paddle.float32, - paddle.framework._current_expected_place(), - ) - - # pd_op.divide: (256xf32) <- (256xf32, 256xf32) - divide_0 = paddle._C_ops.divide(full_7, elementwise_pow_0) - del elementwise_pow_0, full_7 - - # pd_op.flatten: (1156xf32) <- (34x34xf32) - flatten_1 = paddle._C_ops.flatten(split_0, 0, 1) - del split_0 - - # pd_op.full_int_array: (1xi64) <- () - full_int_array_2 = [1] - - # pd_op.unsqueeze: (1156x1xf32) <- (1156xf32, 1xi64) - unsqueeze_0 = paddle._C_ops.unsqueeze(flatten_1, full_int_array_2) - del flatten_1 - - # pd_op.full_int_array: (1xi64) <- () - full_int_array_3 = [0] - - # pd_op.assign: (1xi64) <- (1xi64) - assign_16 = full_int_array_3 - - # pd_op.assign: (1xi64) <- (1xi64) - assign_17 = full_int_array_3 - - # pd_op.assign: (1xi64) <- (1xi64) - assign_18 = full_int_array_3 - - # pd_op.assign: (1xi64) <- (1xi64) - assign_19 = full_int_array_3 - - # pd_op.assign: (1xi64) <- (1xi64) - assign_20 = full_int_array_3 - - # pd_op.assign: (1xi64) <- (1xi64) - assign_21 = full_int_array_3 - - # pd_op.assign: (1xi64) <- (1xi64) - assign_22 = full_int_array_3 - - # pd_op.assign: (1xi64) <- (1xi64) - assign_23 = full_int_array_3 - - # pd_op.unsqueeze: (1x256xf32) <- (256xf32, 1xi64) - unsqueeze_1 = paddle._C_ops.unsqueeze(divide_0, full_int_array_3) - del divide_0 - - # pd_op.matmul: (1156x256xf32) <- (1156x1xf32, 1x256xf32) - matmul_0 = paddle._C_ops.matmul(unsqueeze_0, unsqueeze_1, False, False) - del unsqueeze_0 - - # pd_op.flatten: (1156xf32) <- (34x34xf32) - flatten_2 = paddle._C_ops.flatten(split_1, 0, 1) - del split_1 - - # pd_op.unsqueeze: (1156x1xf32) <- (1156xf32, 1xi64) - unsqueeze_2 = paddle._C_ops.unsqueeze(flatten_2, full_int_array_2) - del flatten_2, full_int_array_2 - - # pd_op.matmul: (1156x256xf32) <- (1156x1xf32, 1x256xf32) - matmul_1 = paddle._C_ops.matmul(unsqueeze_2, unsqueeze_1, False, False) - del unsqueeze_1, unsqueeze_2 - - # pd_op.sin: (1156x256xf32) <- (1156x256xf32) - sin_0 = paddle._C_ops.sin(matmul_0) - - # pd_op.cos: (1156x256xf32) <- (1156x256xf32) - cos_0 = paddle._C_ops.cos(matmul_0) - del matmul_0 - - # pd_op.sin: (1156x256xf32) <- (1156x256xf32) - sin_1 = paddle._C_ops.sin(matmul_1) - - # pd_op.cos: (1156x256xf32) <- (1156x256xf32) - cos_1 = paddle._C_ops.cos(matmul_1) - del matmul_1 - - # builtin.combine: ([1156x256xf32, 1156x256xf32, 1156x256xf32, 1156x256xf32]) <- (1156x256xf32, 1156x256xf32, 1156x256xf32, 1156x256xf32) - combine_5 = [sin_0, cos_0, sin_1, cos_1] - del cos_0, cos_1, sin_0, sin_1 - - # pd_op.concat: (1156x1024xf32) <- ([1156x256xf32, 1156x256xf32, 1156x256xf32, 1156x256xf32], 1xi32) - concat_4 = paddle._C_ops.concat(combine_5, full_0) - del combine_5 - - # pd_op.unsqueeze: (1x1156x1024xf32) <- (1156x1024xf32, 1xi64) - unsqueeze_3 = paddle._C_ops.unsqueeze(concat_4, full_int_array_3) - del concat_4 - - # pd_op.add: (1x1156x1024xf32) <- (1x1156x1024xf32, 1x1156x1024xf32) - add_40 = paddle._C_ops.add(transpose_0, unsqueeze_3) - - # pd_op.full_int_array: (1xi64) <- () - full_int_array_4 = [1024] - - # pd_op.assign: (1xi64) <- (1xi64) - assign_24 = full_int_array_4 - - # pd_op.assign: (1xi64) <- (1xi64) - assign_25 = full_int_array_4 - - # pd_op.assign: (1xi64) <- (1xi64) - assign_26 = full_int_array_4 - - # pd_op.assign: (1xi64) <- (1xi64) - assign_27 = full_int_array_4 - - # pd_op.assign: (1xi64) <- (1xi64) - assign_28 = full_int_array_4 - - # pd_op.assign: (1xi64) <- (1xi64) - assign_29 = full_int_array_4 - - # pd_op.assign: (1xi64) <- (1xi64) - assign_30 = full_int_array_4 - - # pd_op.assign: (1xi64) <- (1xi64) - assign_31 = full_int_array_4 - - # pd_op.assign: (1xi64) <- (1xi64) - assign_32 = full_int_array_4 - - # pd_op.assign: (1xi64) <- (1xi64) - assign_33 = full_int_array_4 - - # pd_op.assign: (1xi64) <- (1xi64) - assign_34 = full_int_array_4 - - # pd_op.assign: (1xi64) <- (1xi64) - assign_35 = full_int_array_4 - - # pd_op.assign: (1xi64) <- (1xi64) - assign_36 = full_int_array_4 - - # pd_op.assign: (1xi64) <- (1xi64) - assign_37 = full_int_array_4 - - # pd_op.assign: (1xi64) <- (1xi64) - assign_38 = full_int_array_4 - - # pd_op.slice: (1024x1024xf32) <- (1024x3072xf32, 1xi64, 1xi64) - slice_0 = paddle._C_ops.slice( - data_18, [1], full_int_array_3, full_int_array_4, [1], [] - ) - - # pd_op.slice: (1024xf32) <- (3072xf32, 1xi64, 1xi64) - slice_1 = paddle._C_ops.slice( - data_19, [0], full_int_array_3, full_int_array_4, [1], [] - ) - - # pd_op.matmul: (1x1156x1024xf32) <- (1x1156x1024xf32, 1024x1024xf32) - matmul_2 = paddle._C_ops.matmul(add_40, slice_0, False, False) - - # pd_op.add: (1x1156x1024xf32) <- (1x1156x1024xf32, 1024xf32) - add_41 = paddle._C_ops.add(matmul_2, slice_1) - - # pd_op.full_int_array: (4xi64) <- () - full_int_array_5 = [0, 0, 4, 256] - - # pd_op.reshape: (1x1156x4x256xf32) <- (1x1156x1024xf32, 4xi64) - reshape_4 = paddle._C_ops.reshape(add_41, full_int_array_5) - - # pd_op.transpose: (1x4x1156x256xf32) <- (1x1156x4x256xf32) - transpose_1 = paddle._C_ops.transpose(reshape_4, [0, 2, 1, 3]) - del reshape_4 - - # pd_op.full_int_array: (1xi64) <- () - full_int_array_6 = [2048] - - # pd_op.assign: (1xi64) <- (1xi64) - assign_39 = full_int_array_6 - - # pd_op.assign: (1xi64) <- (1xi64) - assign_40 = full_int_array_6 - - # pd_op.assign: (1xi64) <- (1xi64) - assign_41 = full_int_array_6 - - # pd_op.assign: (1xi64) <- (1xi64) - assign_42 = full_int_array_6 - - # pd_op.assign: (1xi64) <- (1xi64) - assign_43 = full_int_array_6 - - # pd_op.assign: (1xi64) <- (1xi64) - assign_44 = full_int_array_6 - - # pd_op.assign: (1xi64) <- (1xi64) - assign_45 = full_int_array_6 - - # pd_op.assign: (1xi64) <- (1xi64) - assign_46 = full_int_array_6 - - # pd_op.assign: (1xi64) <- (1xi64) - assign_47 = full_int_array_6 - - # pd_op.assign: (1xi64) <- (1xi64) - assign_48 = full_int_array_6 - - # pd_op.assign: (1xi64) <- (1xi64) - assign_49 = full_int_array_6 - - # pd_op.assign: (1xi64) <- (1xi64) - assign_50 = full_int_array_6 - - # pd_op.assign: (1xi64) <- (1xi64) - assign_51 = full_int_array_6 - - # pd_op.assign: (1xi64) <- (1xi64) - assign_52 = full_int_array_6 - - # pd_op.assign: (1xi64) <- (1xi64) - assign_53 = full_int_array_6 - - # pd_op.slice: (1024x1024xf32) <- (1024x3072xf32, 1xi64, 1xi64) - slice_2 = paddle._C_ops.slice( - data_18, [1], full_int_array_4, full_int_array_6, [1], [] - ) - - # pd_op.slice: (1024xf32) <- (3072xf32, 1xi64, 1xi64) - slice_3 = paddle._C_ops.slice( - data_19, [0], full_int_array_4, full_int_array_6, [1], [] - ) - - # pd_op.matmul: (1x1156x1024xf32) <- (1x1156x1024xf32, 1024x1024xf32) - matmul_3 = paddle._C_ops.matmul(add_40, slice_2, False, False) - - # pd_op.add: (1x1156x1024xf32) <- (1x1156x1024xf32, 1024xf32) - add_42 = paddle._C_ops.add(matmul_3, slice_3) - - # pd_op.reshape: (1x1156x4x256xf32) <- (1x1156x1024xf32, 4xi64) - reshape_5 = paddle._C_ops.reshape(add_42, full_int_array_5) - - # pd_op.transpose: (1x4x1156x256xf32) <- (1x1156x4x256xf32) - transpose_2 = paddle._C_ops.transpose(reshape_5, [0, 2, 1, 3]) - del reshape_5 - - # pd_op.full_int_array: (1xi64) <- () - full_int_array_7 = [2147483647] - - # pd_op.assign: (1xi64) <- (1xi64) - assign_54 = full_int_array_7 - - # pd_op.assign: (1xi64) <- (1xi64) - assign_55 = full_int_array_7 - - # pd_op.assign: (1xi64) <- (1xi64) - assign_56 = full_int_array_7 - - # pd_op.assign: (1xi64) <- (1xi64) - assign_57 = full_int_array_7 - - # pd_op.assign: (1xi64) <- (1xi64) - assign_58 = full_int_array_7 - - # pd_op.assign: (1xi64) <- (1xi64) - assign_59 = full_int_array_7 - - # pd_op.assign: (1xi64) <- (1xi64) - assign_60 = full_int_array_7 - - # pd_op.slice: (1024x1024xf32) <- (1024x3072xf32, 1xi64, 1xi64) - slice_4 = paddle._C_ops.slice( - data_18, [1], full_int_array_6, full_int_array_7, [1], [] - ) - del data_18 - - # pd_op.slice: (1024xf32) <- (3072xf32, 1xi64, 1xi64) - slice_5 = paddle._C_ops.slice( - data_19, [0], full_int_array_6, full_int_array_7, [1], [] - ) - del data_19 - - # pd_op.matmul: (1x1156x1024xf32) <- (1x1156x1024xf32, 1024x1024xf32) - matmul_4 = paddle._C_ops.matmul(transpose_0, slice_4, False, False) - - # pd_op.add: (1x1156x1024xf32) <- (1x1156x1024xf32, 1024xf32) - add_43 = paddle._C_ops.add(matmul_4, slice_5) - - # pd_op.reshape: (1x1156x4x256xf32) <- (1x1156x1024xf32, 4xi64) - reshape_6 = paddle._C_ops.reshape(add_43, full_int_array_5) - - # pd_op.transpose: (1x4x1156x256xf32) <- (1x1156x4x256xf32) - transpose_3 = paddle._C_ops.transpose(reshape_6, [0, 2, 1, 3]) - del reshape_6 - - # pd_op.matmul: (1x4x1156x1156xf32) <- (1x4x1156x256xf32, 1x4x1156x256xf32) - matmul_5 = paddle._C_ops.matmul(transpose_1, transpose_2, False, True) - - # pd_op.full: (1xf32) <- () - full_8 = paddle._C_ops.full( - [1], float("0.0625"), paddle.float32, paddle.core.CPUPlace() - ) - - # pd_op.assign: (1xf32) <- (1xf32) - assign_61 = full_8 - - # pd_op.assign: (1xf32) <- (1xf32) - assign_62 = full_8 - - # pd_op.assign: (1xf32) <- (1xf32) - assign_63 = full_8 - - # pd_op.scale: (1x4x1156x1156xf32) <- (1x4x1156x1156xf32, 1xf32) - scale_1 = paddle._C_ops.scale(matmul_5, full_8, float("0"), True) - del matmul_5 - - # pd_op.softmax: (1x4x1156x1156xf32) <- (1x4x1156x1156xf32) - softmax_0 = paddle._C_ops.softmax(scale_1, -1) - del scale_1 - - # pd_op.full: (1xf32) <- () - full_9 = paddle._C_ops.full( - [1], float("0.1"), paddle.float32, paddle.core.CPUPlace() - ) - - # pd_op.assign: (1xf32) <- (1xf32) - assign_64 = full_9 - - # pd_op.assign: (1xf32) <- (1xf32) - assign_65 = full_9 - - # pd_op.assign: (1xf32) <- (1xf32) - assign_66 = full_9 - - # pd_op.assign: (1xf32) <- (1xf32) - assign_67 = full_9 - - # pd_op.assign: (1xf32) <- (1xf32) - assign_68 = full_9 - - # pd_op.assign: (1xf32) <- (1xf32) - assign_69 = full_9 - - # pd_op.assign: (1xf32) <- (1xf32) - assign_70 = full_9 - - # pd_op.assign: (1xf32) <- (1xf32) - assign_71 = full_9 - - # pd_op.assign: (1xf32) <- (1xf32) - assign_72 = full_9 - - # pd_op.assign: (1xf32) <- (1xf32) - assign_73 = full_9 - - # pd_op.assign: (1xf32) <- (1xf32) - assign_74 = full_9 - - # pd_op.assign: (1xf32) <- (1xf32) - assign_75 = full_9 - - # pd_op.assign: (1xf32) <- (1xf32) - assign_76 = full_9 - - # pd_op.assign: (1xf32) <- (1xf32) - assign_77 = full_9 - - # pd_op.assign: (1xf32) <- (1xf32) - assign_78 = full_9 - - # pd_op.dropout: (1x4x1156x1156xf32, 1x4x1156x1156xui8) <- (1x4x1156x1156xf32, None, 1xf32) - dropout_0, dropout_1 = (lambda x, f: f(x))( - paddle._C_ops.dropout( - softmax_0, None, full_9, False, "upscale_in_train", 0, False - ), - lambda out: out if isinstance(out, (list, tuple)) else (out, None), - ) - - # pd_op.matmul: (1x4x1156x256xf32) <- (1x4x1156x1156xf32, 1x4x1156x256xf32) - matmul_6 = paddle._C_ops.matmul(dropout_0, transpose_3, False, False) - - # pd_op.transpose: (1x1156x4x256xf32) <- (1x4x1156x256xf32) - transpose_4 = paddle._C_ops.transpose(matmul_6, [0, 2, 1, 3]) - del matmul_6 - - # pd_op.full_int_array: (3xi64) <- () - full_int_array_8 = [0, 0, 1024] - - # pd_op.reshape: (1x1156x1024xf32) <- (1x1156x4x256xf32, 3xi64) - reshape_7 = paddle._C_ops.reshape(transpose_4, full_int_array_8) - - # pd_op.matmul: (1x1156x1024xf32) <- (1x1156x1024xf32, 1024x1024xf32) - matmul_7 = paddle._C_ops.matmul(reshape_7, parameter_364, False, False) - del parameter_364 - - # pd_op.add: (1x1156x1024xf32) <- (1x1156x1024xf32, 1024xf32) - add_44 = paddle._C_ops.add(matmul_7, parameter_363) - del parameter_363 - - # pd_op.dropout: (1x1156x1024xf32, 1x1156x1024xui8) <- (1x1156x1024xf32, None, 1xf32) - dropout_2, dropout_3 = (lambda x, f: f(x))( - paddle._C_ops.dropout( - add_44, None, full_9, False, "upscale_in_train", 0, False - ), - lambda out: out if isinstance(out, (list, tuple)) else (out, None), - ) - del add_44 - - # pd_op.add: (1x1156x1024xf32) <- (1x1156x1024xf32, 1x1156x1024xf32) - add_45 = paddle._C_ops.add(transpose_0, dropout_2) - - # pd_op.layer_norm: (1x1156x1024xf32, 1x1156xf32, 1x1156xf32) <- (1x1156x1024xf32, 1024xf32, 1024xf32) - layer_norm_0, layer_norm_1, layer_norm_2 = (lambda x, f: f(x))( - paddle._C_ops.layer_norm( - add_45, parameter_362, parameter_361, float("1e-05"), 2 - ), - lambda out: out if isinstance(out, (list, tuple)) else (out, None, None), - ) - del parameter_361, parameter_362 - - # pd_op.matmul: (1x1156x2048xf32) <- (1x1156x1024xf32, 1024x2048xf32) - matmul_8 = paddle._C_ops.matmul(layer_norm_0, parameter_360, False, False) - del parameter_360 - - # pd_op.add: (1x1156x2048xf32) <- (1x1156x2048xf32, 2048xf32) - add_46 = paddle._C_ops.add(matmul_8, parameter_359) - del parameter_359 - - # pd_op.gelu: (1x1156x2048xf32) <- (1x1156x2048xf32) - gelu_0 = paddle._C_ops.gelu(add_46, False) - - # pd_op.dropout: (1x1156x2048xf32, 1x1156x2048xui8) <- (1x1156x2048xf32, None, 1xf32) - dropout_4, dropout_5 = (lambda x, f: f(x))( - paddle._C_ops.dropout( - gelu_0, None, full_9, False, "upscale_in_train", 0, False - ), - lambda out: out if isinstance(out, (list, tuple)) else (out, None), - ) - del gelu_0 - - # pd_op.matmul: (1x1156x1024xf32) <- (1x1156x2048xf32, 2048x1024xf32) - matmul_9 = paddle._C_ops.matmul(dropout_4, parameter_358, False, False) - del parameter_358 - - # pd_op.add: (1x1156x1024xf32) <- (1x1156x1024xf32, 1024xf32) - add_47 = paddle._C_ops.add(matmul_9, parameter_357) - del parameter_357 - - # pd_op.dropout: (1x1156x1024xf32, 1x1156x1024xui8) <- (1x1156x1024xf32, None, 1xf32) - dropout_6, dropout_7 = (lambda x, f: f(x))( - paddle._C_ops.dropout( - add_47, None, full_9, False, "upscale_in_train", 0, False - ), - lambda out: out if isinstance(out, (list, tuple)) else (out, None), - ) - del add_47 - - # pd_op.add: (1x1156x1024xf32) <- (1x1156x1024xf32, 1x1156x1024xf32) - add_48 = paddle._C_ops.add(layer_norm_0, dropout_6) - - # pd_op.layer_norm: (1x1156x1024xf32, 1x1156xf32, 1x1156xf32) <- (1x1156x1024xf32, 1024xf32, 1024xf32) - layer_norm_3, layer_norm_4, layer_norm_5 = (lambda x, f: f(x))( - paddle._C_ops.layer_norm( - add_48, parameter_356, parameter_355, float("1e-05"), 2 - ), - lambda out: out if isinstance(out, (list, tuple)) else (out, None, None), - ) - del parameter_355, parameter_356 - - # pd_op.add: (1x1156x1024xf32) <- (1x1156x1024xf32, 1x1156x1024xf32) - add_49 = paddle._C_ops.add(layer_norm_3, unsqueeze_3) - - # pd_op.slice: (1024x1024xf32) <- (1024x3072xf32, 1xi64, 1xi64) - slice_6 = paddle._C_ops.slice( - data_20, [1], full_int_array_3, full_int_array_4, [1], [] - ) - - # pd_op.slice: (1024xf32) <- (3072xf32, 1xi64, 1xi64) - slice_7 = paddle._C_ops.slice( - data_21, [0], full_int_array_3, full_int_array_4, [1], [] - ) - - # pd_op.matmul: (1x1156x1024xf32) <- (1x1156x1024xf32, 1024x1024xf32) - matmul_10 = paddle._C_ops.matmul(add_49, slice_6, False, False) - - # pd_op.add: (1x1156x1024xf32) <- (1x1156x1024xf32, 1024xf32) - add_50 = paddle._C_ops.add(matmul_10, slice_7) - - # pd_op.reshape: (1x1156x4x256xf32) <- (1x1156x1024xf32, 4xi64) - reshape_8 = paddle._C_ops.reshape(add_50, full_int_array_5) - - # pd_op.transpose: (1x4x1156x256xf32) <- (1x1156x4x256xf32) - transpose_5 = paddle._C_ops.transpose(reshape_8, [0, 2, 1, 3]) - del reshape_8 - - # pd_op.slice: (1024x1024xf32) <- (1024x3072xf32, 1xi64, 1xi64) - slice_8 = paddle._C_ops.slice( - data_20, [1], full_int_array_4, full_int_array_6, [1], [] - ) - - # pd_op.slice: (1024xf32) <- (3072xf32, 1xi64, 1xi64) - slice_9 = paddle._C_ops.slice( - data_21, [0], full_int_array_4, full_int_array_6, [1], [] - ) - - # pd_op.matmul: (1x1156x1024xf32) <- (1x1156x1024xf32, 1024x1024xf32) - matmul_11 = paddle._C_ops.matmul(add_49, slice_8, False, False) - - # pd_op.add: (1x1156x1024xf32) <- (1x1156x1024xf32, 1024xf32) - add_51 = paddle._C_ops.add(matmul_11, slice_9) - - # pd_op.reshape: (1x1156x4x256xf32) <- (1x1156x1024xf32, 4xi64) - reshape_9 = paddle._C_ops.reshape(add_51, full_int_array_5) - - # pd_op.transpose: (1x4x1156x256xf32) <- (1x1156x4x256xf32) - transpose_6 = paddle._C_ops.transpose(reshape_9, [0, 2, 1, 3]) - del reshape_9 - - # pd_op.slice: (1024x1024xf32) <- (1024x3072xf32, 1xi64, 1xi64) - slice_10 = paddle._C_ops.slice( - data_20, [1], full_int_array_6, full_int_array_7, [1], [] - ) - del data_20 - - # pd_op.slice: (1024xf32) <- (3072xf32, 1xi64, 1xi64) - slice_11 = paddle._C_ops.slice( - data_21, [0], full_int_array_6, full_int_array_7, [1], [] - ) - del data_21 - - # pd_op.matmul: (1x1156x1024xf32) <- (1x1156x1024xf32, 1024x1024xf32) - matmul_12 = paddle._C_ops.matmul(layer_norm_3, slice_10, False, False) - - # pd_op.add: (1x1156x1024xf32) <- (1x1156x1024xf32, 1024xf32) - add_52 = paddle._C_ops.add(matmul_12, slice_11) - - # pd_op.reshape: (1x1156x4x256xf32) <- (1x1156x1024xf32, 4xi64) - reshape_10 = paddle._C_ops.reshape(add_52, full_int_array_5) - - # pd_op.transpose: (1x4x1156x256xf32) <- (1x1156x4x256xf32) - transpose_7 = paddle._C_ops.transpose(reshape_10, [0, 2, 1, 3]) - del reshape_10 - - # pd_op.matmul: (1x4x1156x1156xf32) <- (1x4x1156x256xf32, 1x4x1156x256xf32) - matmul_13 = paddle._C_ops.matmul(transpose_5, transpose_6, False, True) - - # pd_op.scale: (1x4x1156x1156xf32) <- (1x4x1156x1156xf32, 1xf32) - scale_2 = paddle._C_ops.scale(matmul_13, full_8, float("0"), True) - del matmul_13 - - # pd_op.softmax: (1x4x1156x1156xf32) <- (1x4x1156x1156xf32) - softmax_1 = paddle._C_ops.softmax(scale_2, -1) - del scale_2 - - # pd_op.dropout: (1x4x1156x1156xf32, 1x4x1156x1156xui8) <- (1x4x1156x1156xf32, None, 1xf32) - dropout_8, dropout_9 = (lambda x, f: f(x))( - paddle._C_ops.dropout( - softmax_1, None, full_9, False, "upscale_in_train", 0, False - ), - lambda out: out if isinstance(out, (list, tuple)) else (out, None), - ) - - # pd_op.matmul: (1x4x1156x256xf32) <- (1x4x1156x1156xf32, 1x4x1156x256xf32) - matmul_14 = paddle._C_ops.matmul(dropout_8, transpose_7, False, False) - - # pd_op.transpose: (1x1156x4x256xf32) <- (1x4x1156x256xf32) - transpose_8 = paddle._C_ops.transpose(matmul_14, [0, 2, 1, 3]) - del matmul_14 - - # pd_op.reshape: (1x1156x1024xf32) <- (1x1156x4x256xf32, 3xi64) - reshape_11 = paddle._C_ops.reshape(transpose_8, full_int_array_8) - - # pd_op.matmul: (1x1156x1024xf32) <- (1x1156x1024xf32, 1024x1024xf32) - matmul_15 = paddle._C_ops.matmul(reshape_11, parameter_354, False, False) - del parameter_354 - - # pd_op.add: (1x1156x1024xf32) <- (1x1156x1024xf32, 1024xf32) - add_53 = paddle._C_ops.add(matmul_15, parameter_353) - del parameter_353 - - # pd_op.dropout: (1x1156x1024xf32, 1x1156x1024xui8) <- (1x1156x1024xf32, None, 1xf32) - dropout_10, dropout_11 = (lambda x, f: f(x))( - paddle._C_ops.dropout( - add_53, None, full_9, False, "upscale_in_train", 0, False - ), - lambda out: out if isinstance(out, (list, tuple)) else (out, None), - ) - del add_53 - - # pd_op.add: (1x1156x1024xf32) <- (1x1156x1024xf32, 1x1156x1024xf32) - add_54 = paddle._C_ops.add(layer_norm_3, dropout_10) - - # pd_op.layer_norm: (1x1156x1024xf32, 1x1156xf32, 1x1156xf32) <- (1x1156x1024xf32, 1024xf32, 1024xf32) - layer_norm_6, layer_norm_7, layer_norm_8 = (lambda x, f: f(x))( - paddle._C_ops.layer_norm( - add_54, parameter_352, parameter_351, float("1e-05"), 2 - ), - lambda out: out if isinstance(out, (list, tuple)) else (out, None, None), - ) - del parameter_351, parameter_352 - - # pd_op.matmul: (1x1156x2048xf32) <- (1x1156x1024xf32, 1024x2048xf32) - matmul_16 = paddle._C_ops.matmul(layer_norm_6, parameter_350, False, False) - del parameter_350 - - # pd_op.add: (1x1156x2048xf32) <- (1x1156x2048xf32, 2048xf32) - add_55 = paddle._C_ops.add(matmul_16, parameter_349) - del parameter_349 - - # pd_op.gelu: (1x1156x2048xf32) <- (1x1156x2048xf32) - gelu_1 = paddle._C_ops.gelu(add_55, False) - - # pd_op.dropout: (1x1156x2048xf32, 1x1156x2048xui8) <- (1x1156x2048xf32, None, 1xf32) - dropout_12, dropout_13 = (lambda x, f: f(x))( - paddle._C_ops.dropout( - gelu_1, None, full_9, False, "upscale_in_train", 0, False - ), - lambda out: out if isinstance(out, (list, tuple)) else (out, None), - ) - del gelu_1 - - # pd_op.matmul: (1x1156x1024xf32) <- (1x1156x2048xf32, 2048x1024xf32) - matmul_17 = paddle._C_ops.matmul(dropout_12, parameter_348, False, False) - del parameter_348 - - # pd_op.add: (1x1156x1024xf32) <- (1x1156x1024xf32, 1024xf32) - add_56 = paddle._C_ops.add(matmul_17, parameter_347) - del parameter_347 - - # pd_op.dropout: (1x1156x1024xf32, 1x1156x1024xui8) <- (1x1156x1024xf32, None, 1xf32) - dropout_14, dropout_15 = (lambda x, f: f(x))( - paddle._C_ops.dropout( - add_56, None, full_9, False, "upscale_in_train", 0, False - ), - lambda out: out if isinstance(out, (list, tuple)) else (out, None), - ) - del add_56 - - # pd_op.add: (1x1156x1024xf32) <- (1x1156x1024xf32, 1x1156x1024xf32) - add_57 = paddle._C_ops.add(layer_norm_6, dropout_14) - - # pd_op.layer_norm: (1x1156x1024xf32, 1x1156xf32, 1x1156xf32) <- (1x1156x1024xf32, 1024xf32, 1024xf32) - layer_norm_9, layer_norm_10, layer_norm_11 = (lambda x, f: f(x))( - paddle._C_ops.layer_norm( - add_57, parameter_346, parameter_345, float("1e-05"), 2 - ), - lambda out: out if isinstance(out, (list, tuple)) else (out, None, None), - ) - del parameter_345, parameter_346 - - # pd_op.add: (1x1156x1024xf32) <- (1x1156x1024xf32, 1x1156x1024xf32) - add_58 = paddle._C_ops.add(layer_norm_9, unsqueeze_3) - - # pd_op.slice: (1024x1024xf32) <- (1024x3072xf32, 1xi64, 1xi64) - slice_12 = paddle._C_ops.slice( - data_22, [1], full_int_array_3, full_int_array_4, [1], [] - ) - - # pd_op.slice: (1024xf32) <- (3072xf32, 1xi64, 1xi64) - slice_13 = paddle._C_ops.slice( - data_23, [0], full_int_array_3, full_int_array_4, [1], [] - ) - - # pd_op.matmul: (1x1156x1024xf32) <- (1x1156x1024xf32, 1024x1024xf32) - matmul_18 = paddle._C_ops.matmul(add_58, slice_12, False, False) - - # pd_op.add: (1x1156x1024xf32) <- (1x1156x1024xf32, 1024xf32) - add_59 = paddle._C_ops.add(matmul_18, slice_13) - - # pd_op.reshape: (1x1156x4x256xf32) <- (1x1156x1024xf32, 4xi64) - reshape_12 = paddle._C_ops.reshape(add_59, full_int_array_5) - - # pd_op.transpose: (1x4x1156x256xf32) <- (1x1156x4x256xf32) - transpose_9 = paddle._C_ops.transpose(reshape_12, [0, 2, 1, 3]) - del reshape_12 - - # pd_op.slice: (1024x1024xf32) <- (1024x3072xf32, 1xi64, 1xi64) - slice_14 = paddle._C_ops.slice( - data_22, [1], full_int_array_4, full_int_array_6, [1], [] - ) - - # pd_op.slice: (1024xf32) <- (3072xf32, 1xi64, 1xi64) - slice_15 = paddle._C_ops.slice( - data_23, [0], full_int_array_4, full_int_array_6, [1], [] - ) - - # pd_op.matmul: (1x1156x1024xf32) <- (1x1156x1024xf32, 1024x1024xf32) - matmul_19 = paddle._C_ops.matmul(add_58, slice_14, False, False) - - # pd_op.add: (1x1156x1024xf32) <- (1x1156x1024xf32, 1024xf32) - add_60 = paddle._C_ops.add(matmul_19, slice_15) - - # pd_op.reshape: (1x1156x4x256xf32) <- (1x1156x1024xf32, 4xi64) - reshape_13 = paddle._C_ops.reshape(add_60, full_int_array_5) - - # pd_op.transpose: (1x4x1156x256xf32) <- (1x1156x4x256xf32) - transpose_10 = paddle._C_ops.transpose(reshape_13, [0, 2, 1, 3]) - del reshape_13 - - # pd_op.slice: (1024x1024xf32) <- (1024x3072xf32, 1xi64, 1xi64) - slice_16 = paddle._C_ops.slice( - data_22, [1], full_int_array_6, full_int_array_7, [1], [] - ) - del data_22 - - # pd_op.slice: (1024xf32) <- (3072xf32, 1xi64, 1xi64) - slice_17 = paddle._C_ops.slice( - data_23, [0], full_int_array_6, full_int_array_7, [1], [] - ) - del data_23 - - # pd_op.matmul: (1x1156x1024xf32) <- (1x1156x1024xf32, 1024x1024xf32) - matmul_20 = paddle._C_ops.matmul(layer_norm_9, slice_16, False, False) - - # pd_op.add: (1x1156x1024xf32) <- (1x1156x1024xf32, 1024xf32) - add_61 = paddle._C_ops.add(matmul_20, slice_17) - - # pd_op.reshape: (1x1156x4x256xf32) <- (1x1156x1024xf32, 4xi64) - reshape_14 = paddle._C_ops.reshape(add_61, full_int_array_5) - - # pd_op.transpose: (1x4x1156x256xf32) <- (1x1156x4x256xf32) - transpose_11 = paddle._C_ops.transpose(reshape_14, [0, 2, 1, 3]) - del reshape_14 - - # pd_op.matmul: (1x4x1156x1156xf32) <- (1x4x1156x256xf32, 1x4x1156x256xf32) - matmul_21 = paddle._C_ops.matmul(transpose_9, transpose_10, False, True) - - # pd_op.scale: (1x4x1156x1156xf32) <- (1x4x1156x1156xf32, 1xf32) - scale_3 = paddle._C_ops.scale(matmul_21, full_8, float("0"), True) - del matmul_21 - - # pd_op.softmax: (1x4x1156x1156xf32) <- (1x4x1156x1156xf32) - softmax_2 = paddle._C_ops.softmax(scale_3, -1) - del scale_3 - - # pd_op.dropout: (1x4x1156x1156xf32, 1x4x1156x1156xui8) <- (1x4x1156x1156xf32, None, 1xf32) - dropout_16, dropout_17 = (lambda x, f: f(x))( - paddle._C_ops.dropout( - softmax_2, None, full_9, False, "upscale_in_train", 0, False - ), - lambda out: out if isinstance(out, (list, tuple)) else (out, None), - ) - - # pd_op.matmul: (1x4x1156x256xf32) <- (1x4x1156x1156xf32, 1x4x1156x256xf32) - matmul_22 = paddle._C_ops.matmul(dropout_16, transpose_11, False, False) - - # pd_op.transpose: (1x1156x4x256xf32) <- (1x4x1156x256xf32) - transpose_12 = paddle._C_ops.transpose(matmul_22, [0, 2, 1, 3]) - del matmul_22 - - # pd_op.reshape: (1x1156x1024xf32) <- (1x1156x4x256xf32, 3xi64) - reshape_15 = paddle._C_ops.reshape(transpose_12, full_int_array_8) - - # pd_op.matmul: (1x1156x1024xf32) <- (1x1156x1024xf32, 1024x1024xf32) - matmul_23 = paddle._C_ops.matmul(reshape_15, parameter_344, False, False) - del parameter_344 - - # pd_op.add: (1x1156x1024xf32) <- (1x1156x1024xf32, 1024xf32) - add_62 = paddle._C_ops.add(matmul_23, parameter_343) - del parameter_343 - - # pd_op.dropout: (1x1156x1024xf32, 1x1156x1024xui8) <- (1x1156x1024xf32, None, 1xf32) - dropout_18, dropout_19 = (lambda x, f: f(x))( - paddle._C_ops.dropout( - add_62, None, full_9, False, "upscale_in_train", 0, False - ), - lambda out: out if isinstance(out, (list, tuple)) else (out, None), - ) - del add_62 - - # pd_op.add: (1x1156x1024xf32) <- (1x1156x1024xf32, 1x1156x1024xf32) - add_63 = paddle._C_ops.add(layer_norm_9, dropout_18) - - # pd_op.layer_norm: (1x1156x1024xf32, 1x1156xf32, 1x1156xf32) <- (1x1156x1024xf32, 1024xf32, 1024xf32) - layer_norm_12, layer_norm_13, layer_norm_14 = (lambda x, f: f(x))( - paddle._C_ops.layer_norm( - add_63, parameter_342, parameter_341, float("1e-05"), 2 - ), - lambda out: out if isinstance(out, (list, tuple)) else (out, None, None), - ) - del parameter_341, parameter_342 - - # pd_op.matmul: (1x1156x2048xf32) <- (1x1156x1024xf32, 1024x2048xf32) - matmul_24 = paddle._C_ops.matmul(layer_norm_12, parameter_340, False, False) - del parameter_340 - - # pd_op.add: (1x1156x2048xf32) <- (1x1156x2048xf32, 2048xf32) - add_64 = paddle._C_ops.add(matmul_24, parameter_339) - del parameter_339 - - # pd_op.gelu: (1x1156x2048xf32) <- (1x1156x2048xf32) - gelu_2 = paddle._C_ops.gelu(add_64, False) - - # pd_op.dropout: (1x1156x2048xf32, 1x1156x2048xui8) <- (1x1156x2048xf32, None, 1xf32) - dropout_20, dropout_21 = (lambda x, f: f(x))( - paddle._C_ops.dropout( - gelu_2, None, full_9, False, "upscale_in_train", 0, False - ), - lambda out: out if isinstance(out, (list, tuple)) else (out, None), - ) - del gelu_2 - - # pd_op.matmul: (1x1156x1024xf32) <- (1x1156x2048xf32, 2048x1024xf32) - matmul_25 = paddle._C_ops.matmul(dropout_20, parameter_338, False, False) - del parameter_338 - - # pd_op.add: (1x1156x1024xf32) <- (1x1156x1024xf32, 1024xf32) - add_65 = paddle._C_ops.add(matmul_25, parameter_337) - del parameter_337 - - # pd_op.dropout: (1x1156x1024xf32, 1x1156x1024xui8) <- (1x1156x1024xf32, None, 1xf32) - dropout_22, dropout_23 = (lambda x, f: f(x))( - paddle._C_ops.dropout( - add_65, None, full_9, False, "upscale_in_train", 0, False - ), - lambda out: out if isinstance(out, (list, tuple)) else (out, None), - ) - del add_65 - - # pd_op.add: (1x1156x1024xf32) <- (1x1156x1024xf32, 1x1156x1024xf32) - add_66 = paddle._C_ops.add(layer_norm_12, dropout_22) - - # pd_op.layer_norm: (1x1156x1024xf32, 1x1156xf32, 1x1156xf32) <- (1x1156x1024xf32, 1024xf32, 1024xf32) - layer_norm_15, layer_norm_16, layer_norm_17 = (lambda x, f: f(x))( - paddle._C_ops.layer_norm( - add_66, parameter_336, parameter_335, float("1e-05"), 2 - ), - lambda out: out if isinstance(out, (list, tuple)) else (out, None, None), - ) - del parameter_335, parameter_336 - - # pd_op.add: (1x1156x1024xf32) <- (1x1156x1024xf32, 1x1156x1024xf32) - add_67 = paddle._C_ops.add(layer_norm_15, unsqueeze_3) - - # pd_op.slice: (1024x1024xf32) <- (1024x3072xf32, 1xi64, 1xi64) - slice_18 = paddle._C_ops.slice( - data_24, [1], full_int_array_3, full_int_array_4, [1], [] - ) - - # pd_op.slice: (1024xf32) <- (3072xf32, 1xi64, 1xi64) - slice_19 = paddle._C_ops.slice( - data_25, [0], full_int_array_3, full_int_array_4, [1], [] - ) - del full_int_array_3 - - # pd_op.matmul: (1x1156x1024xf32) <- (1x1156x1024xf32, 1024x1024xf32) - matmul_26 = paddle._C_ops.matmul(add_67, slice_18, False, False) - - # pd_op.add: (1x1156x1024xf32) <- (1x1156x1024xf32, 1024xf32) - add_68 = paddle._C_ops.add(matmul_26, slice_19) - - # pd_op.reshape: (1x1156x4x256xf32) <- (1x1156x1024xf32, 4xi64) - reshape_16 = paddle._C_ops.reshape(add_68, full_int_array_5) - - # pd_op.transpose: (1x4x1156x256xf32) <- (1x1156x4x256xf32) - transpose_13 = paddle._C_ops.transpose(reshape_16, [0, 2, 1, 3]) - del reshape_16 - - # pd_op.slice: (1024x1024xf32) <- (1024x3072xf32, 1xi64, 1xi64) - slice_20 = paddle._C_ops.slice( - data_24, [1], full_int_array_4, full_int_array_6, [1], [] - ) - - # pd_op.slice: (1024xf32) <- (3072xf32, 1xi64, 1xi64) - slice_21 = paddle._C_ops.slice( - data_25, [0], full_int_array_4, full_int_array_6, [1], [] - ) - - # pd_op.matmul: (1x1156x1024xf32) <- (1x1156x1024xf32, 1024x1024xf32) - matmul_27 = paddle._C_ops.matmul(add_67, slice_20, False, False) - - # pd_op.add: (1x1156x1024xf32) <- (1x1156x1024xf32, 1024xf32) - add_69 = paddle._C_ops.add(matmul_27, slice_21) - - # pd_op.reshape: (1x1156x4x256xf32) <- (1x1156x1024xf32, 4xi64) - reshape_17 = paddle._C_ops.reshape(add_69, full_int_array_5) - - # pd_op.transpose: (1x4x1156x256xf32) <- (1x1156x4x256xf32) - transpose_14 = paddle._C_ops.transpose(reshape_17, [0, 2, 1, 3]) - del reshape_17 - - # pd_op.slice: (1024x1024xf32) <- (1024x3072xf32, 1xi64, 1xi64) - slice_22 = paddle._C_ops.slice( - data_24, [1], full_int_array_6, full_int_array_7, [1], [] - ) - del data_24 - - # pd_op.slice: (1024xf32) <- (3072xf32, 1xi64, 1xi64) - slice_23 = paddle._C_ops.slice( - data_25, [0], full_int_array_6, full_int_array_7, [1], [] - ) - del data_25 - - # pd_op.matmul: (1x1156x1024xf32) <- (1x1156x1024xf32, 1024x1024xf32) - matmul_28 = paddle._C_ops.matmul(layer_norm_15, slice_22, False, False) - - # pd_op.add: (1x1156x1024xf32) <- (1x1156x1024xf32, 1024xf32) - add_70 = paddle._C_ops.add(matmul_28, slice_23) - - # pd_op.reshape: (1x1156x4x256xf32) <- (1x1156x1024xf32, 4xi64) - reshape_18 = paddle._C_ops.reshape(add_70, full_int_array_5) - del full_int_array_5 - - # pd_op.transpose: (1x4x1156x256xf32) <- (1x1156x4x256xf32) - transpose_15 = paddle._C_ops.transpose(reshape_18, [0, 2, 1, 3]) - del reshape_18 - - # pd_op.matmul: (1x4x1156x1156xf32) <- (1x4x1156x256xf32, 1x4x1156x256xf32) - matmul_29 = paddle._C_ops.matmul(transpose_13, transpose_14, False, True) - - # pd_op.scale: (1x4x1156x1156xf32) <- (1x4x1156x1156xf32, 1xf32) - scale_4 = paddle._C_ops.scale(matmul_29, full_8, float("0"), True) - del matmul_29 - - # pd_op.softmax: (1x4x1156x1156xf32) <- (1x4x1156x1156xf32) - softmax_3 = paddle._C_ops.softmax(scale_4, -1) - del scale_4 - - # pd_op.dropout: (1x4x1156x1156xf32, 1x4x1156x1156xui8) <- (1x4x1156x1156xf32, None, 1xf32) - dropout_24, dropout_25 = (lambda x, f: f(x))( - paddle._C_ops.dropout( - softmax_3, None, full_9, False, "upscale_in_train", 0, False - ), - lambda out: out if isinstance(out, (list, tuple)) else (out, None), - ) - - # pd_op.matmul: (1x4x1156x256xf32) <- (1x4x1156x1156xf32, 1x4x1156x256xf32) - matmul_30 = paddle._C_ops.matmul(dropout_24, transpose_15, False, False) - - # pd_op.transpose: (1x1156x4x256xf32) <- (1x4x1156x256xf32) - transpose_16 = paddle._C_ops.transpose(matmul_30, [0, 2, 1, 3]) - del matmul_30 - - # pd_op.reshape: (1x1156x1024xf32) <- (1x1156x4x256xf32, 3xi64) - reshape_19 = paddle._C_ops.reshape(transpose_16, full_int_array_8) - del full_int_array_8 - - # pd_op.matmul: (1x1156x1024xf32) <- (1x1156x1024xf32, 1024x1024xf32) - matmul_31 = paddle._C_ops.matmul(reshape_19, parameter_334, False, False) - del parameter_334 - - # pd_op.add: (1x1156x1024xf32) <- (1x1156x1024xf32, 1024xf32) - add_71 = paddle._C_ops.add(matmul_31, parameter_333) - del parameter_333 - - # pd_op.dropout: (1x1156x1024xf32, 1x1156x1024xui8) <- (1x1156x1024xf32, None, 1xf32) - dropout_26, dropout_27 = (lambda x, f: f(x))( - paddle._C_ops.dropout( - add_71, None, full_9, False, "upscale_in_train", 0, False - ), - lambda out: out if isinstance(out, (list, tuple)) else (out, None), - ) - del add_71 - - # pd_op.add: (1x1156x1024xf32) <- (1x1156x1024xf32, 1x1156x1024xf32) - add_72 = paddle._C_ops.add(layer_norm_15, dropout_26) - - # pd_op.layer_norm: (1x1156x1024xf32, 1x1156xf32, 1x1156xf32) <- (1x1156x1024xf32, 1024xf32, 1024xf32) - layer_norm_18, layer_norm_19, layer_norm_20 = (lambda x, f: f(x))( - paddle._C_ops.layer_norm( - add_72, parameter_332, parameter_331, float("1e-05"), 2 - ), - lambda out: out if isinstance(out, (list, tuple)) else (out, None, None), - ) - del parameter_331, parameter_332 - - # pd_op.matmul: (1x1156x2048xf32) <- (1x1156x1024xf32, 1024x2048xf32) - matmul_32 = paddle._C_ops.matmul(layer_norm_18, parameter_330, False, False) - del parameter_330 - - # pd_op.add: (1x1156x2048xf32) <- (1x1156x2048xf32, 2048xf32) - add_73 = paddle._C_ops.add(matmul_32, parameter_329) - del parameter_329 - - # pd_op.gelu: (1x1156x2048xf32) <- (1x1156x2048xf32) - gelu_3 = paddle._C_ops.gelu(add_73, False) - - # pd_op.dropout: (1x1156x2048xf32, 1x1156x2048xui8) <- (1x1156x2048xf32, None, 1xf32) - dropout_28, dropout_29 = (lambda x, f: f(x))( - paddle._C_ops.dropout( - gelu_3, None, full_9, False, "upscale_in_train", 0, False - ), - lambda out: out if isinstance(out, (list, tuple)) else (out, None), - ) - del gelu_3 - - # pd_op.matmul: (1x1156x1024xf32) <- (1x1156x2048xf32, 2048x1024xf32) - matmul_33 = paddle._C_ops.matmul(dropout_28, parameter_328, False, False) - del parameter_328 - - # pd_op.add: (1x1156x1024xf32) <- (1x1156x1024xf32, 1024xf32) - add_74 = paddle._C_ops.add(matmul_33, parameter_327) - del parameter_327 - - # pd_op.dropout: (1x1156x1024xf32, 1x1156x1024xui8) <- (1x1156x1024xf32, None, 1xf32) - dropout_30, dropout_31 = (lambda x, f: f(x))( - paddle._C_ops.dropout( - add_74, None, full_9, False, "upscale_in_train", 0, False - ), - lambda out: out if isinstance(out, (list, tuple)) else (out, None), - ) - del add_74 - - # pd_op.add: (1x1156x1024xf32) <- (1x1156x1024xf32, 1x1156x1024xf32) - add_75 = paddle._C_ops.add(layer_norm_18, dropout_30) - - # pd_op.layer_norm: (1x1156x1024xf32, 1x1156xf32, 1x1156xf32) <- (1x1156x1024xf32, 1024xf32, 1024xf32) - layer_norm_21, layer_norm_22, layer_norm_23 = (lambda x, f: f(x))( - paddle._C_ops.layer_norm( - add_75, parameter_326, parameter_325, float("1e-05"), 2 - ), - lambda out: out if isinstance(out, (list, tuple)) else (out, None, None), - ) - del parameter_325, parameter_326 - - # pd_op.transpose: (1x1024x1156xf32) <- (1x1156x1024xf32) - transpose_17 = paddle._C_ops.transpose(layer_norm_21, [0, 2, 1]) - del layer_norm_21 - - # pd_op.full_int_array: (4xi64) <- () - full_int_array_9 = [1, 1024, 34, 34] - - # pd_op.reshape: (1x1024x34x34xf32) <- (1x1024x1156xf32, 4xi64) - reshape_20 = paddle._C_ops.reshape(transpose_17, full_int_array_9) - del full_int_array_9 - - # pd_op.conv2d: (1x384x34x34xf32) <- (1x1024x34x34xf32, 384x1024x1x1xf32) - conv2d_77 = paddle._C_ops.conv2d( - reshape_20, parameter_324, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_324 - - # pd_op.batch_norm_: (1x384x34x34xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (1x384x34x34xf32, 384xf32, 384xf32, 384xf32, 384xf32) - ( - batch_norm__438, - batch_norm__439, - batch_norm__440, - batch_norm__441, - batch_norm__442, - batch_norm__443, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_77, - parameter_323, - parameter_322, - parameter_321, - parameter_320, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_320, parameter_321, parameter_322, parameter_323 - - # pd_op.swish: (1x384x34x34xf32) <- (1x384x34x34xf32) - swish_56 = paddle._C_ops.swish(batch_norm__438) - - # pd_op.conv2d: (1x384x34x34xf32) <- (1x1024x34x34xf32, 384x1024x1x1xf32) - conv2d_78 = paddle._C_ops.conv2d( - reshape_20, parameter_319, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_319 - - # pd_op.batch_norm_: (1x384x34x34xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (1x384x34x34xf32, 384xf32, 384xf32, 384xf32, 384xf32) - ( - batch_norm__444, - batch_norm__445, - batch_norm__446, - batch_norm__447, - batch_norm__448, - batch_norm__449, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_78, - parameter_318, - parameter_317, - parameter_316, - parameter_315, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_315, parameter_316, parameter_317, parameter_318 - - # pd_op.swish: (1x384x34x34xf32) <- (1x384x34x34xf32) - swish_57 = paddle._C_ops.swish(batch_norm__444) - - # pd_op.conv2d: (1x384x34x34xf32) <- (1x384x34x34xf32, 384x384x3x3xf32) - conv2d_79 = paddle._C_ops.conv2d( - swish_57, parameter_314, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_314 - - # pd_op.batch_norm_: (1x384x34x34xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (1x384x34x34xf32, 384xf32, 384xf32, 384xf32, 384xf32) - ( - batch_norm__450, - batch_norm__451, - batch_norm__452, - batch_norm__453, - batch_norm__454, - batch_norm__455, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_79, - parameter_313, - parameter_312, - parameter_311, - parameter_310, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_310, parameter_311, parameter_312, parameter_313 - - # pd_op.swish: (1x384x34x34xf32) <- (1x384x34x34xf32) - swish_58 = paddle._C_ops.swish(batch_norm__450) - - # pd_op.conv2d: (1x384x34x34xf32) <- (1x384x34x34xf32, 384x384x3x3xf32) - conv2d_80 = paddle._C_ops.conv2d( - swish_58, parameter_309, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_309 - - # pd_op.batch_norm_: (1x384x34x34xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (1x384x34x34xf32, 384xf32, 384xf32, 384xf32, 384xf32) - ( - batch_norm__456, - batch_norm__457, - batch_norm__458, - batch_norm__459, - batch_norm__460, - batch_norm__461, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_80, - parameter_308, - parameter_307, - parameter_306, - parameter_305, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_305, parameter_306, parameter_307, parameter_308 - - # pd_op.conv2d: (1x384x34x34xf32) <- (1x384x34x34xf32, 384x384x1x1xf32) - conv2d_81 = paddle._C_ops.conv2d( - swish_58, parameter_304, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_304 - - # pd_op.batch_norm_: (1x384x34x34xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (1x384x34x34xf32, 384xf32, 384xf32, 384xf32, 384xf32) - ( - batch_norm__462, - batch_norm__463, - batch_norm__464, - batch_norm__465, - batch_norm__466, - batch_norm__467, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_81, - parameter_303, - parameter_302, - parameter_301, - parameter_300, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_300, parameter_301, parameter_302, parameter_303 - - # pd_op.add: (1x384x34x34xf32) <- (1x384x34x34xf32, 1x384x34x34xf32) - add_76 = paddle._C_ops.add(batch_norm__456, batch_norm__462) - - # pd_op.swish: (1x384x34x34xf32) <- (1x384x34x34xf32) - swish_59 = paddle._C_ops.swish(add_76) - - # pd_op.conv2d: (1x384x34x34xf32) <- (1x384x34x34xf32, 384x384x3x3xf32) - conv2d_82 = paddle._C_ops.conv2d( - swish_59, parameter_299, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_299 - - # pd_op.batch_norm_: (1x384x34x34xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (1x384x34x34xf32, 384xf32, 384xf32, 384xf32, 384xf32) - ( - batch_norm__468, - batch_norm__469, - batch_norm__470, - batch_norm__471, - batch_norm__472, - batch_norm__473, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_82, - parameter_298, - parameter_297, - parameter_296, - parameter_295, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_295, parameter_296, parameter_297, parameter_298 - - # pd_op.swish: (1x384x34x34xf32) <- (1x384x34x34xf32) - swish_60 = paddle._C_ops.swish(batch_norm__468) - - # pd_op.conv2d: (1x384x34x34xf32) <- (1x384x34x34xf32, 384x384x3x3xf32) - conv2d_83 = paddle._C_ops.conv2d( - swish_60, parameter_294, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_294 - - # pd_op.batch_norm_: (1x384x34x34xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (1x384x34x34xf32, 384xf32, 384xf32, 384xf32, 384xf32) - ( - batch_norm__474, - batch_norm__475, - batch_norm__476, - batch_norm__477, - batch_norm__478, - batch_norm__479, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_83, - parameter_293, - parameter_292, - parameter_291, - parameter_290, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_290, parameter_291, parameter_292, parameter_293 - - # pd_op.conv2d: (1x384x34x34xf32) <- (1x384x34x34xf32, 384x384x1x1xf32) - conv2d_84 = paddle._C_ops.conv2d( - swish_60, parameter_289, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_289 - - # pd_op.batch_norm_: (1x384x34x34xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (1x384x34x34xf32, 384xf32, 384xf32, 384xf32, 384xf32) - ( - batch_norm__480, - batch_norm__481, - batch_norm__482, - batch_norm__483, - batch_norm__484, - batch_norm__485, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_84, - parameter_288, - parameter_287, - parameter_286, - parameter_285, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_285, parameter_286, parameter_287, parameter_288 - - # pd_op.add: (1x384x34x34xf32) <- (1x384x34x34xf32, 1x384x34x34xf32) - add_77 = paddle._C_ops.add(batch_norm__474, batch_norm__480) - - # pd_op.swish: (1x384x34x34xf32) <- (1x384x34x34xf32) - swish_61 = paddle._C_ops.swish(add_77) - - # pd_op.full_int_array: (2xi64) <- () - full_int_array_10 = [5, 5] - - # pd_op.pool2d: (1x384x34x34xf32) <- (1x384x34x34xf32, 2xi64) - pool2d_0 = paddle._C_ops.pool2d( - swish_61, - full_int_array_10, - [1, 1], - [2, 2], - False, - True, - "NCHW", - "max", - False, - False, - "EXPLICIT", - ) - - # pd_op.full_int_array: (2xi64) <- () - full_int_array_11 = [9, 9] - - # pd_op.pool2d: (1x384x34x34xf32) <- (1x384x34x34xf32, 2xi64) - pool2d_1 = paddle._C_ops.pool2d( - swish_61, - full_int_array_11, - [1, 1], - [4, 4], - False, - True, - "NCHW", - "max", - False, - False, - "EXPLICIT", - ) - - # pd_op.full_int_array: (2xi64) <- () - full_int_array_12 = [13, 13] - - # pd_op.pool2d: (1x384x34x34xf32) <- (1x384x34x34xf32, 2xi64) - pool2d_2 = paddle._C_ops.pool2d( - swish_61, - full_int_array_12, - [1, 1], - [6, 6], - False, - True, - "NCHW", - "max", - False, - False, - "EXPLICIT", - ) - - # builtin.combine: ([1x384x34x34xf32, 1x384x34x34xf32, 1x384x34x34xf32, 1x384x34x34xf32]) <- (1x384x34x34xf32, 1x384x34x34xf32, 1x384x34x34xf32, 1x384x34x34xf32) - combine_6 = [swish_61, pool2d_0, pool2d_1, pool2d_2] - - # pd_op.concat: (1x1536x34x34xf32) <- ([1x384x34x34xf32, 1x384x34x34xf32, 1x384x34x34xf32, 1x384x34x34xf32], 1xi32) - concat_5 = paddle._C_ops.concat(combine_6, full_0) - del combine_6 - - # pd_op.conv2d: (1x384x34x34xf32) <- (1x1536x34x34xf32, 384x1536x1x1xf32) - conv2d_85 = paddle._C_ops.conv2d( - concat_5, parameter_284, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_284 - - # pd_op.batch_norm_: (1x384x34x34xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (1x384x34x34xf32, 384xf32, 384xf32, 384xf32, 384xf32) - ( - batch_norm__486, - batch_norm__487, - batch_norm__488, - batch_norm__489, - batch_norm__490, - batch_norm__491, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_85, - parameter_283, - parameter_282, - parameter_281, - parameter_280, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_280, parameter_281, parameter_282, parameter_283 - - # pd_op.swish: (1x384x34x34xf32) <- (1x384x34x34xf32) - swish_62 = paddle._C_ops.swish(batch_norm__486) - - # pd_op.conv2d: (1x384x34x34xf32) <- (1x384x34x34xf32, 384x384x3x3xf32) - conv2d_86 = paddle._C_ops.conv2d( - swish_62, parameter_279, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_279 - - # pd_op.batch_norm_: (1x384x34x34xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (1x384x34x34xf32, 384xf32, 384xf32, 384xf32, 384xf32) - ( - batch_norm__492, - batch_norm__493, - batch_norm__494, - batch_norm__495, - batch_norm__496, - batch_norm__497, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_86, - parameter_278, - parameter_277, - parameter_276, - parameter_275, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_275, parameter_276, parameter_277, parameter_278 - - # pd_op.swish: (1x384x34x34xf32) <- (1x384x34x34xf32) - swish_63 = paddle._C_ops.swish(batch_norm__492) - - # pd_op.conv2d: (1x384x34x34xf32) <- (1x384x34x34xf32, 384x384x3x3xf32) - conv2d_87 = paddle._C_ops.conv2d( - swish_63, parameter_274, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_274 - - # pd_op.batch_norm_: (1x384x34x34xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (1x384x34x34xf32, 384xf32, 384xf32, 384xf32, 384xf32) - ( - batch_norm__498, - batch_norm__499, - batch_norm__500, - batch_norm__501, - batch_norm__502, - batch_norm__503, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_87, - parameter_273, - parameter_272, - parameter_271, - parameter_270, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_270, parameter_271, parameter_272, parameter_273 - - # pd_op.conv2d: (1x384x34x34xf32) <- (1x384x34x34xf32, 384x384x1x1xf32) - conv2d_88 = paddle._C_ops.conv2d( - swish_63, parameter_269, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_269 - - # pd_op.batch_norm_: (1x384x34x34xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (1x384x34x34xf32, 384xf32, 384xf32, 384xf32, 384xf32) - ( - batch_norm__504, - batch_norm__505, - batch_norm__506, - batch_norm__507, - batch_norm__508, - batch_norm__509, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_88, - parameter_268, - parameter_267, - parameter_266, - parameter_265, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_265, parameter_266, parameter_267, parameter_268 - - # pd_op.add: (1x384x34x34xf32) <- (1x384x34x34xf32, 1x384x34x34xf32) - add_78 = paddle._C_ops.add(batch_norm__498, batch_norm__504) - - # pd_op.swish: (1x384x34x34xf32) <- (1x384x34x34xf32) - swish_64 = paddle._C_ops.swish(add_78) - - # builtin.combine: ([1x384x34x34xf32, 1x384x34x34xf32]) <- (1x384x34x34xf32, 1x384x34x34xf32) - combine_7 = [swish_56, swish_64] - - # pd_op.concat: (1x768x34x34xf32) <- ([1x384x34x34xf32, 1x384x34x34xf32], 1xi32) - concat_6 = paddle._C_ops.concat(combine_7, full_0) - del combine_7 - - # pd_op.conv2d: (1x768x34x34xf32) <- (1x768x34x34xf32, 768x768x1x1xf32) - conv2d_89 = paddle._C_ops.conv2d( - concat_6, parameter_264, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_264 - - # pd_op.batch_norm_: (1x768x34x34xf32, 768xf32, 768xf32, 768xf32, 768xf32, -1xui8) <- (1x768x34x34xf32, 768xf32, 768xf32, 768xf32, 768xf32) - ( - batch_norm__510, - batch_norm__511, - batch_norm__512, - batch_norm__513, - batch_norm__514, - batch_norm__515, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_89, - parameter_263, - parameter_262, - parameter_261, - parameter_260, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_260, parameter_261, parameter_262, parameter_263 - - # pd_op.swish: (1x768x34x34xf32) <- (1x768x34x34xf32) - swish_65 = paddle._C_ops.swish(batch_norm__510) - - # pd_op.conv2d: (1x384x34x34xf32) <- (1x768x34x34xf32, 384x768x1x1xf32) - conv2d_90 = paddle._C_ops.conv2d( - swish_65, parameter_259, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_259 - - # pd_op.batch_norm_: (1x384x34x34xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (1x384x34x34xf32, 384xf32, 384xf32, 384xf32, 384xf32) - ( - batch_norm__516, - batch_norm__517, - batch_norm__518, - batch_norm__519, - batch_norm__520, - batch_norm__521, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_90, - parameter_258, - parameter_257, - parameter_256, - parameter_255, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_255, parameter_256, parameter_257, parameter_258 - - # pd_op.swish: (1x384x34x34xf32) <- (1x384x34x34xf32) - swish_66 = paddle._C_ops.swish(batch_norm__516) - - # pd_op.nearest_interp: (1x384x68x68xf32) <- (1x384x34x34xf32, None, None, None) - nearest_interp_0 = paddle._C_ops.nearest_interp( - swish_66, - None, - None, - None, - "NCHW", - -1, - -1, - -1, - [float("2"), float("2")], - "nearest", - False, - 0, - ) - - # builtin.combine: ([1x384x68x68xf32, 1x512x68x68xf32]) <- (1x384x68x68xf32, 1x512x68x68xf32) - combine_8 = [nearest_interp_0, swish_45] - - # pd_op.concat: (1x896x68x68xf32) <- ([1x384x68x68xf32, 1x512x68x68xf32], 1xi32) - concat_7 = paddle._C_ops.concat(combine_8, full_0) - del combine_8 - - # pd_op.conv2d: (1x192x68x68xf32) <- (1x896x68x68xf32, 192x896x1x1xf32) - conv2d_91 = paddle._C_ops.conv2d( - concat_7, parameter_254, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_254 - - # pd_op.batch_norm_: (1x192x68x68xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (1x192x68x68xf32, 192xf32, 192xf32, 192xf32, 192xf32) - ( - batch_norm__522, - batch_norm__523, - batch_norm__524, - batch_norm__525, - batch_norm__526, - batch_norm__527, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_91, - parameter_253, - parameter_252, - parameter_251, - parameter_250, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_250, parameter_251, parameter_252, parameter_253 - - # pd_op.swish: (1x192x68x68xf32) <- (1x192x68x68xf32) - swish_67 = paddle._C_ops.swish(batch_norm__522) - - # pd_op.conv2d: (1x192x68x68xf32) <- (1x896x68x68xf32, 192x896x1x1xf32) - conv2d_92 = paddle._C_ops.conv2d( - concat_7, parameter_249, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_249 - - # pd_op.batch_norm_: (1x192x68x68xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (1x192x68x68xf32, 192xf32, 192xf32, 192xf32, 192xf32) - ( - batch_norm__528, - batch_norm__529, - batch_norm__530, - batch_norm__531, - batch_norm__532, - batch_norm__533, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_92, - parameter_248, - parameter_247, - parameter_246, - parameter_245, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_245, parameter_246, parameter_247, parameter_248 - - # pd_op.swish: (1x192x68x68xf32) <- (1x192x68x68xf32) - swish_68 = paddle._C_ops.swish(batch_norm__528) - - # pd_op.conv2d: (1x192x68x68xf32) <- (1x192x68x68xf32, 192x192x3x3xf32) - conv2d_93 = paddle._C_ops.conv2d( - swish_68, parameter_244, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_244 - - # pd_op.batch_norm_: (1x192x68x68xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (1x192x68x68xf32, 192xf32, 192xf32, 192xf32, 192xf32) - ( - batch_norm__534, - batch_norm__535, - batch_norm__536, - batch_norm__537, - batch_norm__538, - batch_norm__539, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_93, - parameter_243, - parameter_242, - parameter_241, - parameter_240, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_240, parameter_241, parameter_242, parameter_243 - - # pd_op.swish: (1x192x68x68xf32) <- (1x192x68x68xf32) - swish_69 = paddle._C_ops.swish(batch_norm__534) - - # pd_op.conv2d: (1x192x68x68xf32) <- (1x192x68x68xf32, 192x192x3x3xf32) - conv2d_94 = paddle._C_ops.conv2d( - swish_69, parameter_239, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_239 - - # pd_op.batch_norm_: (1x192x68x68xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (1x192x68x68xf32, 192xf32, 192xf32, 192xf32, 192xf32) - ( - batch_norm__540, - batch_norm__541, - batch_norm__542, - batch_norm__543, - batch_norm__544, - batch_norm__545, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_94, - parameter_238, - parameter_237, - parameter_236, - parameter_235, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_235, parameter_236, parameter_237, parameter_238 - - # pd_op.conv2d: (1x192x68x68xf32) <- (1x192x68x68xf32, 192x192x1x1xf32) - conv2d_95 = paddle._C_ops.conv2d( - swish_69, parameter_234, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_234 - - # pd_op.batch_norm_: (1x192x68x68xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (1x192x68x68xf32, 192xf32, 192xf32, 192xf32, 192xf32) - ( - batch_norm__546, - batch_norm__547, - batch_norm__548, - batch_norm__549, - batch_norm__550, - batch_norm__551, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_95, - parameter_233, - parameter_232, - parameter_231, - parameter_230, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_230, parameter_231, parameter_232, parameter_233 - - # pd_op.add: (1x192x68x68xf32) <- (1x192x68x68xf32, 1x192x68x68xf32) - add_79 = paddle._C_ops.add(batch_norm__540, batch_norm__546) - - # pd_op.swish: (1x192x68x68xf32) <- (1x192x68x68xf32) - swish_70 = paddle._C_ops.swish(add_79) - - # pd_op.conv2d: (1x192x68x68xf32) <- (1x192x68x68xf32, 192x192x3x3xf32) - conv2d_96 = paddle._C_ops.conv2d( - swish_70, parameter_229, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_229 - - # pd_op.batch_norm_: (1x192x68x68xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (1x192x68x68xf32, 192xf32, 192xf32, 192xf32, 192xf32) - ( - batch_norm__552, - batch_norm__553, - batch_norm__554, - batch_norm__555, - batch_norm__556, - batch_norm__557, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_96, - parameter_228, - parameter_227, - parameter_226, - parameter_225, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_225, parameter_226, parameter_227, parameter_228 - - # pd_op.swish: (1x192x68x68xf32) <- (1x192x68x68xf32) - swish_71 = paddle._C_ops.swish(batch_norm__552) - - # pd_op.conv2d: (1x192x68x68xf32) <- (1x192x68x68xf32, 192x192x3x3xf32) - conv2d_97 = paddle._C_ops.conv2d( - swish_71, parameter_224, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_224 - - # pd_op.batch_norm_: (1x192x68x68xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (1x192x68x68xf32, 192xf32, 192xf32, 192xf32, 192xf32) - ( - batch_norm__558, - batch_norm__559, - batch_norm__560, - batch_norm__561, - batch_norm__562, - batch_norm__563, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_97, - parameter_223, - parameter_222, - parameter_221, - parameter_220, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_220, parameter_221, parameter_222, parameter_223 - - # pd_op.conv2d: (1x192x68x68xf32) <- (1x192x68x68xf32, 192x192x1x1xf32) - conv2d_98 = paddle._C_ops.conv2d( - swish_71, parameter_219, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_219 - - # pd_op.batch_norm_: (1x192x68x68xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (1x192x68x68xf32, 192xf32, 192xf32, 192xf32, 192xf32) - ( - batch_norm__564, - batch_norm__565, - batch_norm__566, - batch_norm__567, - batch_norm__568, - batch_norm__569, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_98, - parameter_218, - parameter_217, - parameter_216, - parameter_215, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_215, parameter_216, parameter_217, parameter_218 - - # pd_op.add: (1x192x68x68xf32) <- (1x192x68x68xf32, 1x192x68x68xf32) - add_80 = paddle._C_ops.add(batch_norm__558, batch_norm__564) - - # pd_op.swish: (1x192x68x68xf32) <- (1x192x68x68xf32) - swish_72 = paddle._C_ops.swish(add_80) - - # pd_op.conv2d: (1x192x68x68xf32) <- (1x192x68x68xf32, 192x192x3x3xf32) - conv2d_99 = paddle._C_ops.conv2d( - swish_72, parameter_214, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_214 - - # pd_op.batch_norm_: (1x192x68x68xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (1x192x68x68xf32, 192xf32, 192xf32, 192xf32, 192xf32) - ( - batch_norm__570, - batch_norm__571, - batch_norm__572, - batch_norm__573, - batch_norm__574, - batch_norm__575, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_99, - parameter_213, - parameter_212, - parameter_211, - parameter_210, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_210, parameter_211, parameter_212, parameter_213 - - # pd_op.swish: (1x192x68x68xf32) <- (1x192x68x68xf32) - swish_73 = paddle._C_ops.swish(batch_norm__570) - - # pd_op.conv2d: (1x192x68x68xf32) <- (1x192x68x68xf32, 192x192x3x3xf32) - conv2d_100 = paddle._C_ops.conv2d( - swish_73, parameter_209, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_209 - - # pd_op.batch_norm_: (1x192x68x68xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (1x192x68x68xf32, 192xf32, 192xf32, 192xf32, 192xf32) - ( - batch_norm__576, - batch_norm__577, - batch_norm__578, - batch_norm__579, - batch_norm__580, - batch_norm__581, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_100, - parameter_208, - parameter_207, - parameter_206, - parameter_205, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_205, parameter_206, parameter_207, parameter_208 - - # pd_op.conv2d: (1x192x68x68xf32) <- (1x192x68x68xf32, 192x192x1x1xf32) - conv2d_101 = paddle._C_ops.conv2d( - swish_73, parameter_204, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_204 - - # pd_op.batch_norm_: (1x192x68x68xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (1x192x68x68xf32, 192xf32, 192xf32, 192xf32, 192xf32) - ( - batch_norm__582, - batch_norm__583, - batch_norm__584, - batch_norm__585, - batch_norm__586, - batch_norm__587, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_101, - parameter_203, - parameter_202, - parameter_201, - parameter_200, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_200, parameter_201, parameter_202, parameter_203 - - # pd_op.add: (1x192x68x68xf32) <- (1x192x68x68xf32, 1x192x68x68xf32) - add_81 = paddle._C_ops.add(batch_norm__576, batch_norm__582) - - # pd_op.swish: (1x192x68x68xf32) <- (1x192x68x68xf32) - swish_74 = paddle._C_ops.swish(add_81) - - # builtin.combine: ([1x192x68x68xf32, 1x192x68x68xf32]) <- (1x192x68x68xf32, 1x192x68x68xf32) - combine_9 = [swish_67, swish_74] - - # pd_op.concat: (1x384x68x68xf32) <- ([1x192x68x68xf32, 1x192x68x68xf32], 1xi32) - concat_8 = paddle._C_ops.concat(combine_9, full_0) - del combine_9 - - # pd_op.conv2d: (1x384x68x68xf32) <- (1x384x68x68xf32, 384x384x1x1xf32) - conv2d_102 = paddle._C_ops.conv2d( - concat_8, parameter_199, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_199 - - # pd_op.batch_norm_: (1x384x68x68xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (1x384x68x68xf32, 384xf32, 384xf32, 384xf32, 384xf32) - ( - batch_norm__588, - batch_norm__589, - batch_norm__590, - batch_norm__591, - batch_norm__592, - batch_norm__593, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_102, - parameter_198, - parameter_197, - parameter_196, - parameter_195, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_195, parameter_196, parameter_197, parameter_198 - - # pd_op.swish: (1x384x68x68xf32) <- (1x384x68x68xf32) - swish_75 = paddle._C_ops.swish(batch_norm__588) - - # pd_op.conv2d: (1x192x68x68xf32) <- (1x384x68x68xf32, 192x384x1x1xf32) - conv2d_103 = paddle._C_ops.conv2d( - swish_75, parameter_194, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_194 - - # pd_op.batch_norm_: (1x192x68x68xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (1x192x68x68xf32, 192xf32, 192xf32, 192xf32, 192xf32) - ( - batch_norm__594, - batch_norm__595, - batch_norm__596, - batch_norm__597, - batch_norm__598, - batch_norm__599, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_103, - parameter_193, - parameter_192, - parameter_191, - parameter_190, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_190, parameter_191, parameter_192, parameter_193 - - # pd_op.swish: (1x192x68x68xf32) <- (1x192x68x68xf32) - swish_76 = paddle._C_ops.swish(batch_norm__594) - - # pd_op.nearest_interp: (1x192x136x136xf32) <- (1x192x68x68xf32, None, None, None) - nearest_interp_1 = paddle._C_ops.nearest_interp( - swish_76, - None, - None, - None, - "NCHW", - -1, - -1, - -1, - [float("2"), float("2")], - "nearest", - False, - 0, - ) - - # builtin.combine: ([1x192x136x136xf32, 1x256x136x136xf32]) <- (1x192x136x136xf32, 1x256x136x136xf32) - combine_10 = [nearest_interp_1, swish_29] - - # pd_op.concat: (1x448x136x136xf32) <- ([1x192x136x136xf32, 1x256x136x136xf32], 1xi32) - concat_9 = paddle._C_ops.concat(combine_10, full_0) - del combine_10 - - # pd_op.conv2d: (1x96x136x136xf32) <- (1x448x136x136xf32, 96x448x1x1xf32) - conv2d_104 = paddle._C_ops.conv2d( - concat_9, parameter_189, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_189 - - # pd_op.batch_norm_: (1x96x136x136xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (1x96x136x136xf32, 96xf32, 96xf32, 96xf32, 96xf32) - ( - batch_norm__600, - batch_norm__601, - batch_norm__602, - batch_norm__603, - batch_norm__604, - batch_norm__605, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_104, - parameter_188, - parameter_187, - parameter_186, - parameter_185, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_185, parameter_186, parameter_187, parameter_188 - - # pd_op.swish: (1x96x136x136xf32) <- (1x96x136x136xf32) - swish_77 = paddle._C_ops.swish(batch_norm__600) - - # pd_op.conv2d: (1x96x136x136xf32) <- (1x448x136x136xf32, 96x448x1x1xf32) - conv2d_105 = paddle._C_ops.conv2d( - concat_9, parameter_184, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_184 - - # pd_op.batch_norm_: (1x96x136x136xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (1x96x136x136xf32, 96xf32, 96xf32, 96xf32, 96xf32) - ( - batch_norm__606, - batch_norm__607, - batch_norm__608, - batch_norm__609, - batch_norm__610, - batch_norm__611, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_105, - parameter_183, - parameter_182, - parameter_181, - parameter_180, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_180, parameter_181, parameter_182, parameter_183 - - # pd_op.swish: (1x96x136x136xf32) <- (1x96x136x136xf32) - swish_78 = paddle._C_ops.swish(batch_norm__606) - - # pd_op.conv2d: (1x96x136x136xf32) <- (1x96x136x136xf32, 96x96x3x3xf32) - conv2d_106 = paddle._C_ops.conv2d( - swish_78, parameter_179, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_179 - - # pd_op.batch_norm_: (1x96x136x136xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (1x96x136x136xf32, 96xf32, 96xf32, 96xf32, 96xf32) - ( - batch_norm__612, - batch_norm__613, - batch_norm__614, - batch_norm__615, - batch_norm__616, - batch_norm__617, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_106, - parameter_178, - parameter_177, - parameter_176, - parameter_175, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_175, parameter_176, parameter_177, parameter_178 - - # pd_op.swish: (1x96x136x136xf32) <- (1x96x136x136xf32) - swish_79 = paddle._C_ops.swish(batch_norm__612) - - # pd_op.conv2d: (1x96x136x136xf32) <- (1x96x136x136xf32, 96x96x3x3xf32) - conv2d_107 = paddle._C_ops.conv2d( - swish_79, parameter_174, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_174 - - # pd_op.batch_norm_: (1x96x136x136xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (1x96x136x136xf32, 96xf32, 96xf32, 96xf32, 96xf32) - ( - batch_norm__618, - batch_norm__619, - batch_norm__620, - batch_norm__621, - batch_norm__622, - batch_norm__623, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_107, - parameter_173, - parameter_172, - parameter_171, - parameter_170, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_170, parameter_171, parameter_172, parameter_173 - - # pd_op.conv2d: (1x96x136x136xf32) <- (1x96x136x136xf32, 96x96x1x1xf32) - conv2d_108 = paddle._C_ops.conv2d( - swish_79, parameter_169, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_169 - - # pd_op.batch_norm_: (1x96x136x136xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (1x96x136x136xf32, 96xf32, 96xf32, 96xf32, 96xf32) - ( - batch_norm__624, - batch_norm__625, - batch_norm__626, - batch_norm__627, - batch_norm__628, - batch_norm__629, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_108, - parameter_168, - parameter_167, - parameter_166, - parameter_165, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_165, parameter_166, parameter_167, parameter_168 - - # pd_op.add: (1x96x136x136xf32) <- (1x96x136x136xf32, 1x96x136x136xf32) - add_82 = paddle._C_ops.add(batch_norm__618, batch_norm__624) - - # pd_op.swish: (1x96x136x136xf32) <- (1x96x136x136xf32) - swish_80 = paddle._C_ops.swish(add_82) - - # pd_op.conv2d: (1x96x136x136xf32) <- (1x96x136x136xf32, 96x96x3x3xf32) - conv2d_109 = paddle._C_ops.conv2d( - swish_80, parameter_164, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_164 - - # pd_op.batch_norm_: (1x96x136x136xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (1x96x136x136xf32, 96xf32, 96xf32, 96xf32, 96xf32) - ( - batch_norm__630, - batch_norm__631, - batch_norm__632, - batch_norm__633, - batch_norm__634, - batch_norm__635, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_109, - parameter_163, - parameter_162, - parameter_161, - parameter_160, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_160, parameter_161, parameter_162, parameter_163 - - # pd_op.swish: (1x96x136x136xf32) <- (1x96x136x136xf32) - swish_81 = paddle._C_ops.swish(batch_norm__630) - - # pd_op.conv2d: (1x96x136x136xf32) <- (1x96x136x136xf32, 96x96x3x3xf32) - conv2d_110 = paddle._C_ops.conv2d( - swish_81, parameter_159, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_159 - - # pd_op.batch_norm_: (1x96x136x136xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (1x96x136x136xf32, 96xf32, 96xf32, 96xf32, 96xf32) - ( - batch_norm__636, - batch_norm__637, - batch_norm__638, - batch_norm__639, - batch_norm__640, - batch_norm__641, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_110, - parameter_158, - parameter_157, - parameter_156, - parameter_155, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_155, parameter_156, parameter_157, parameter_158 - - # pd_op.conv2d: (1x96x136x136xf32) <- (1x96x136x136xf32, 96x96x1x1xf32) - conv2d_111 = paddle._C_ops.conv2d( - swish_81, parameter_154, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_154 - - # pd_op.batch_norm_: (1x96x136x136xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (1x96x136x136xf32, 96xf32, 96xf32, 96xf32, 96xf32) - ( - batch_norm__642, - batch_norm__643, - batch_norm__644, - batch_norm__645, - batch_norm__646, - batch_norm__647, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_111, - parameter_153, - parameter_152, - parameter_151, - parameter_150, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_150, parameter_151, parameter_152, parameter_153 - - # pd_op.add: (1x96x136x136xf32) <- (1x96x136x136xf32, 1x96x136x136xf32) - add_83 = paddle._C_ops.add(batch_norm__636, batch_norm__642) - - # pd_op.swish: (1x96x136x136xf32) <- (1x96x136x136xf32) - swish_82 = paddle._C_ops.swish(add_83) - - # pd_op.conv2d: (1x96x136x136xf32) <- (1x96x136x136xf32, 96x96x3x3xf32) - conv2d_112 = paddle._C_ops.conv2d( - swish_82, parameter_149, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_149 - - # pd_op.batch_norm_: (1x96x136x136xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (1x96x136x136xf32, 96xf32, 96xf32, 96xf32, 96xf32) - ( - batch_norm__648, - batch_norm__649, - batch_norm__650, - batch_norm__651, - batch_norm__652, - batch_norm__653, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_112, - parameter_148, - parameter_147, - parameter_146, - parameter_145, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_145, parameter_146, parameter_147, parameter_148 - - # pd_op.swish: (1x96x136x136xf32) <- (1x96x136x136xf32) - swish_83 = paddle._C_ops.swish(batch_norm__648) - - # pd_op.conv2d: (1x96x136x136xf32) <- (1x96x136x136xf32, 96x96x3x3xf32) - conv2d_113 = paddle._C_ops.conv2d( - swish_83, parameter_144, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_144 - - # pd_op.batch_norm_: (1x96x136x136xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (1x96x136x136xf32, 96xf32, 96xf32, 96xf32, 96xf32) - ( - batch_norm__654, - batch_norm__655, - batch_norm__656, - batch_norm__657, - batch_norm__658, - batch_norm__659, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_113, - parameter_143, - parameter_142, - parameter_141, - parameter_140, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_140, parameter_141, parameter_142, parameter_143 - - # pd_op.conv2d: (1x96x136x136xf32) <- (1x96x136x136xf32, 96x96x1x1xf32) - conv2d_114 = paddle._C_ops.conv2d( - swish_83, parameter_139, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_139 - - # pd_op.batch_norm_: (1x96x136x136xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (1x96x136x136xf32, 96xf32, 96xf32, 96xf32, 96xf32) - ( - batch_norm__660, - batch_norm__661, - batch_norm__662, - batch_norm__663, - batch_norm__664, - batch_norm__665, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_114, - parameter_138, - parameter_137, - parameter_136, - parameter_135, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_135, parameter_136, parameter_137, parameter_138 - - # pd_op.add: (1x96x136x136xf32) <- (1x96x136x136xf32, 1x96x136x136xf32) - add_84 = paddle._C_ops.add(batch_norm__654, batch_norm__660) - - # pd_op.swish: (1x96x136x136xf32) <- (1x96x136x136xf32) - swish_84 = paddle._C_ops.swish(add_84) - - # builtin.combine: ([1x96x136x136xf32, 1x96x136x136xf32]) <- (1x96x136x136xf32, 1x96x136x136xf32) - combine_11 = [swish_77, swish_84] - - # pd_op.concat: (1x192x136x136xf32) <- ([1x96x136x136xf32, 1x96x136x136xf32], 1xi32) - concat_10 = paddle._C_ops.concat(combine_11, full_0) - del combine_11 - - # pd_op.conv2d: (1x192x136x136xf32) <- (1x192x136x136xf32, 192x192x1x1xf32) - conv2d_115 = paddle._C_ops.conv2d( - concat_10, parameter_134, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_134 - - # pd_op.batch_norm_: (1x192x136x136xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (1x192x136x136xf32, 192xf32, 192xf32, 192xf32, 192xf32) - ( - batch_norm__666, - batch_norm__667, - batch_norm__668, - batch_norm__669, - batch_norm__670, - batch_norm__671, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_115, - parameter_133, - parameter_132, - parameter_131, - parameter_130, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_130, parameter_131, parameter_132, parameter_133 - - # pd_op.swish: (1x192x136x136xf32) <- (1x192x136x136xf32) - swish_85 = paddle._C_ops.swish(batch_norm__666) - - # pd_op.conv2d: (1x192x68x68xf32) <- (1x192x136x136xf32, 192x192x3x3xf32) - conv2d_116 = paddle._C_ops.conv2d( - swish_85, parameter_129, [2, 2], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_129 - - # pd_op.batch_norm_: (1x192x68x68xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (1x192x68x68xf32, 192xf32, 192xf32, 192xf32, 192xf32) - ( - batch_norm__672, - batch_norm__673, - batch_norm__674, - batch_norm__675, - batch_norm__676, - batch_norm__677, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_116, - parameter_128, - parameter_127, - parameter_126, - parameter_125, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_125, parameter_126, parameter_127, parameter_128 - - # pd_op.swish: (1x192x68x68xf32) <- (1x192x68x68xf32) - swish_86 = paddle._C_ops.swish(batch_norm__672) - - # builtin.combine: ([1x192x68x68xf32, 1x384x68x68xf32]) <- (1x192x68x68xf32, 1x384x68x68xf32) - combine_12 = [swish_86, swish_75] - - # pd_op.concat: (1x576x68x68xf32) <- ([1x192x68x68xf32, 1x384x68x68xf32], 1xi32) - concat_11 = paddle._C_ops.concat(combine_12, full_0) - del combine_12 - - # pd_op.conv2d: (1x192x68x68xf32) <- (1x576x68x68xf32, 192x576x1x1xf32) - conv2d_117 = paddle._C_ops.conv2d( - concat_11, parameter_124, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_124 - - # pd_op.batch_norm_: (1x192x68x68xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (1x192x68x68xf32, 192xf32, 192xf32, 192xf32, 192xf32) - ( - batch_norm__678, - batch_norm__679, - batch_norm__680, - batch_norm__681, - batch_norm__682, - batch_norm__683, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_117, - parameter_123, - parameter_122, - parameter_121, - parameter_120, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_120, parameter_121, parameter_122, parameter_123 - - # pd_op.swish: (1x192x68x68xf32) <- (1x192x68x68xf32) - swish_87 = paddle._C_ops.swish(batch_norm__678) - - # pd_op.conv2d: (1x192x68x68xf32) <- (1x576x68x68xf32, 192x576x1x1xf32) - conv2d_118 = paddle._C_ops.conv2d( - concat_11, parameter_119, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_119 - - # pd_op.batch_norm_: (1x192x68x68xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (1x192x68x68xf32, 192xf32, 192xf32, 192xf32, 192xf32) - ( - batch_norm__684, - batch_norm__685, - batch_norm__686, - batch_norm__687, - batch_norm__688, - batch_norm__689, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_118, - parameter_118, - parameter_117, - parameter_116, - parameter_115, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_115, parameter_116, parameter_117, parameter_118 - - # pd_op.swish: (1x192x68x68xf32) <- (1x192x68x68xf32) - swish_88 = paddle._C_ops.swish(batch_norm__684) - - # pd_op.conv2d: (1x192x68x68xf32) <- (1x192x68x68xf32, 192x192x3x3xf32) - conv2d_119 = paddle._C_ops.conv2d( - swish_88, parameter_114, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_114 - - # pd_op.batch_norm_: (1x192x68x68xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (1x192x68x68xf32, 192xf32, 192xf32, 192xf32, 192xf32) - ( - batch_norm__690, - batch_norm__691, - batch_norm__692, - batch_norm__693, - batch_norm__694, - batch_norm__695, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_119, - parameter_113, - parameter_112, - parameter_111, - parameter_110, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_110, parameter_111, parameter_112, parameter_113 - - # pd_op.swish: (1x192x68x68xf32) <- (1x192x68x68xf32) - swish_89 = paddle._C_ops.swish(batch_norm__690) - - # pd_op.conv2d: (1x192x68x68xf32) <- (1x192x68x68xf32, 192x192x3x3xf32) - conv2d_120 = paddle._C_ops.conv2d( - swish_89, parameter_109, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_109 - - # pd_op.batch_norm_: (1x192x68x68xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (1x192x68x68xf32, 192xf32, 192xf32, 192xf32, 192xf32) - ( - batch_norm__696, - batch_norm__697, - batch_norm__698, - batch_norm__699, - batch_norm__700, - batch_norm__701, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_120, - parameter_108, - parameter_107, - parameter_106, - parameter_105, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_105, parameter_106, parameter_107, parameter_108 - - # pd_op.conv2d: (1x192x68x68xf32) <- (1x192x68x68xf32, 192x192x1x1xf32) - conv2d_121 = paddle._C_ops.conv2d( - swish_89, parameter_104, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_104 - - # pd_op.batch_norm_: (1x192x68x68xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (1x192x68x68xf32, 192xf32, 192xf32, 192xf32, 192xf32) - ( - batch_norm__702, - batch_norm__703, - batch_norm__704, - batch_norm__705, - batch_norm__706, - batch_norm__707, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_121, - parameter_103, - parameter_102, - parameter_101, - parameter_100, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_100, parameter_101, parameter_102, parameter_103 - - # pd_op.add: (1x192x68x68xf32) <- (1x192x68x68xf32, 1x192x68x68xf32) - add_85 = paddle._C_ops.add(batch_norm__696, batch_norm__702) - - # pd_op.swish: (1x192x68x68xf32) <- (1x192x68x68xf32) - swish_90 = paddle._C_ops.swish(add_85) - - # pd_op.conv2d: (1x192x68x68xf32) <- (1x192x68x68xf32, 192x192x3x3xf32) - conv2d_122 = paddle._C_ops.conv2d( - swish_90, parameter_99, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_99 - - # pd_op.batch_norm_: (1x192x68x68xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (1x192x68x68xf32, 192xf32, 192xf32, 192xf32, 192xf32) - ( - batch_norm__708, - batch_norm__709, - batch_norm__710, - batch_norm__711, - batch_norm__712, - batch_norm__713, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_122, - parameter_98, - parameter_97, - parameter_96, - parameter_95, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_95, parameter_96, parameter_97, parameter_98 - - # pd_op.swish: (1x192x68x68xf32) <- (1x192x68x68xf32) - swish_91 = paddle._C_ops.swish(batch_norm__708) - - # pd_op.conv2d: (1x192x68x68xf32) <- (1x192x68x68xf32, 192x192x3x3xf32) - conv2d_123 = paddle._C_ops.conv2d( - swish_91, parameter_94, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_94 - - # pd_op.batch_norm_: (1x192x68x68xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (1x192x68x68xf32, 192xf32, 192xf32, 192xf32, 192xf32) - ( - batch_norm__714, - batch_norm__715, - batch_norm__716, - batch_norm__717, - batch_norm__718, - batch_norm__719, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_123, - parameter_93, - parameter_92, - parameter_91, - parameter_90, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_90, parameter_91, parameter_92, parameter_93 - - # pd_op.conv2d: (1x192x68x68xf32) <- (1x192x68x68xf32, 192x192x1x1xf32) - conv2d_124 = paddle._C_ops.conv2d( - swish_91, parameter_89, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_89 - - # pd_op.batch_norm_: (1x192x68x68xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (1x192x68x68xf32, 192xf32, 192xf32, 192xf32, 192xf32) - ( - batch_norm__720, - batch_norm__721, - batch_norm__722, - batch_norm__723, - batch_norm__724, - batch_norm__725, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_124, - parameter_88, - parameter_87, - parameter_86, - parameter_85, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_85, parameter_86, parameter_87, parameter_88 - - # pd_op.add: (1x192x68x68xf32) <- (1x192x68x68xf32, 1x192x68x68xf32) - add_86 = paddle._C_ops.add(batch_norm__714, batch_norm__720) - - # pd_op.swish: (1x192x68x68xf32) <- (1x192x68x68xf32) - swish_92 = paddle._C_ops.swish(add_86) - - # pd_op.conv2d: (1x192x68x68xf32) <- (1x192x68x68xf32, 192x192x3x3xf32) - conv2d_125 = paddle._C_ops.conv2d( - swish_92, parameter_84, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_84 - - # pd_op.batch_norm_: (1x192x68x68xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (1x192x68x68xf32, 192xf32, 192xf32, 192xf32, 192xf32) - ( - batch_norm__726, - batch_norm__727, - batch_norm__728, - batch_norm__729, - batch_norm__730, - batch_norm__731, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_125, - parameter_83, - parameter_82, - parameter_81, - parameter_80, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_80, parameter_81, parameter_82, parameter_83 - - # pd_op.swish: (1x192x68x68xf32) <- (1x192x68x68xf32) - swish_93 = paddle._C_ops.swish(batch_norm__726) - - # pd_op.conv2d: (1x192x68x68xf32) <- (1x192x68x68xf32, 192x192x3x3xf32) - conv2d_126 = paddle._C_ops.conv2d( - swish_93, parameter_79, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_79 - - # pd_op.batch_norm_: (1x192x68x68xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (1x192x68x68xf32, 192xf32, 192xf32, 192xf32, 192xf32) - ( - batch_norm__732, - batch_norm__733, - batch_norm__734, - batch_norm__735, - batch_norm__736, - batch_norm__737, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_126, - parameter_78, - parameter_77, - parameter_76, - parameter_75, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_75, parameter_76, parameter_77, parameter_78 - - # pd_op.conv2d: (1x192x68x68xf32) <- (1x192x68x68xf32, 192x192x1x1xf32) - conv2d_127 = paddle._C_ops.conv2d( - swish_93, parameter_74, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_74 - - # pd_op.batch_norm_: (1x192x68x68xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (1x192x68x68xf32, 192xf32, 192xf32, 192xf32, 192xf32) - ( - batch_norm__738, - batch_norm__739, - batch_norm__740, - batch_norm__741, - batch_norm__742, - batch_norm__743, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_127, - parameter_73, - parameter_72, - parameter_71, - parameter_70, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_70, parameter_71, parameter_72, parameter_73 - - # pd_op.add: (1x192x68x68xf32) <- (1x192x68x68xf32, 1x192x68x68xf32) - add_87 = paddle._C_ops.add(batch_norm__732, batch_norm__738) - - # pd_op.swish: (1x192x68x68xf32) <- (1x192x68x68xf32) - swish_94 = paddle._C_ops.swish(add_87) - - # builtin.combine: ([1x192x68x68xf32, 1x192x68x68xf32]) <- (1x192x68x68xf32, 1x192x68x68xf32) - combine_13 = [swish_87, swish_94] - - # pd_op.concat: (1x384x68x68xf32) <- ([1x192x68x68xf32, 1x192x68x68xf32], 1xi32) - concat_12 = paddle._C_ops.concat(combine_13, full_0) - del combine_13 - - # pd_op.conv2d: (1x384x68x68xf32) <- (1x384x68x68xf32, 384x384x1x1xf32) - conv2d_128 = paddle._C_ops.conv2d( - concat_12, parameter_69, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_69 - - # pd_op.batch_norm_: (1x384x68x68xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (1x384x68x68xf32, 384xf32, 384xf32, 384xf32, 384xf32) - ( - batch_norm__744, - batch_norm__745, - batch_norm__746, - batch_norm__747, - batch_norm__748, - batch_norm__749, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_128, - parameter_68, - parameter_67, - parameter_66, - parameter_65, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_65, parameter_66, parameter_67, parameter_68 - - # pd_op.swish: (1x384x68x68xf32) <- (1x384x68x68xf32) - swish_95 = paddle._C_ops.swish(batch_norm__744) - - # pd_op.conv2d: (1x384x34x34xf32) <- (1x384x68x68xf32, 384x384x3x3xf32) - conv2d_129 = paddle._C_ops.conv2d( - swish_95, parameter_64, [2, 2], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_64 - - # pd_op.batch_norm_: (1x384x34x34xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (1x384x34x34xf32, 384xf32, 384xf32, 384xf32, 384xf32) - ( - batch_norm__750, - batch_norm__751, - batch_norm__752, - batch_norm__753, - batch_norm__754, - batch_norm__755, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_129, - parameter_63, - parameter_62, - parameter_61, - parameter_60, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_60, parameter_61, parameter_62, parameter_63 - - # pd_op.swish: (1x384x34x34xf32) <- (1x384x34x34xf32) - swish_96 = paddle._C_ops.swish(batch_norm__750) - - # builtin.combine: ([1x384x34x34xf32, 1x768x34x34xf32]) <- (1x384x34x34xf32, 1x768x34x34xf32) - combine_14 = [swish_96, swish_65] - - # pd_op.concat: (1x1152x34x34xf32) <- ([1x384x34x34xf32, 1x768x34x34xf32], 1xi32) - concat_13 = paddle._C_ops.concat(combine_14, full_0) - del combine_14 - - # pd_op.conv2d: (1x384x34x34xf32) <- (1x1152x34x34xf32, 384x1152x1x1xf32) - conv2d_130 = paddle._C_ops.conv2d( - concat_13, parameter_59, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_59 - - # pd_op.batch_norm_: (1x384x34x34xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (1x384x34x34xf32, 384xf32, 384xf32, 384xf32, 384xf32) - ( - batch_norm__756, - batch_norm__757, - batch_norm__758, - batch_norm__759, - batch_norm__760, - batch_norm__761, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_130, - parameter_58, - parameter_57, - parameter_56, - parameter_55, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_55, parameter_56, parameter_57, parameter_58 - - # pd_op.swish: (1x384x34x34xf32) <- (1x384x34x34xf32) - swish_97 = paddle._C_ops.swish(batch_norm__756) - - # pd_op.conv2d: (1x384x34x34xf32) <- (1x1152x34x34xf32, 384x1152x1x1xf32) - conv2d_131 = paddle._C_ops.conv2d( - concat_13, parameter_54, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_54 - - # pd_op.batch_norm_: (1x384x34x34xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (1x384x34x34xf32, 384xf32, 384xf32, 384xf32, 384xf32) - ( - batch_norm__762, - batch_norm__763, - batch_norm__764, - batch_norm__765, - batch_norm__766, - batch_norm__767, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_131, - parameter_53, - parameter_52, - parameter_51, - parameter_50, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_50, parameter_51, parameter_52, parameter_53 - - # pd_op.swish: (1x384x34x34xf32) <- (1x384x34x34xf32) - swish_98 = paddle._C_ops.swish(batch_norm__762) - - # pd_op.conv2d: (1x384x34x34xf32) <- (1x384x34x34xf32, 384x384x3x3xf32) - conv2d_132 = paddle._C_ops.conv2d( - swish_98, parameter_49, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_49 - - # pd_op.batch_norm_: (1x384x34x34xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (1x384x34x34xf32, 384xf32, 384xf32, 384xf32, 384xf32) - ( - batch_norm__768, - batch_norm__769, - batch_norm__770, - batch_norm__771, - batch_norm__772, - batch_norm__773, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_132, - parameter_48, - parameter_47, - parameter_46, - parameter_45, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_45, parameter_46, parameter_47, parameter_48 - - # pd_op.swish: (1x384x34x34xf32) <- (1x384x34x34xf32) - swish_99 = paddle._C_ops.swish(batch_norm__768) - - # pd_op.conv2d: (1x384x34x34xf32) <- (1x384x34x34xf32, 384x384x3x3xf32) - conv2d_133 = paddle._C_ops.conv2d( - swish_99, parameter_44, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_44 - - # pd_op.batch_norm_: (1x384x34x34xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (1x384x34x34xf32, 384xf32, 384xf32, 384xf32, 384xf32) - ( - batch_norm__774, - batch_norm__775, - batch_norm__776, - batch_norm__777, - batch_norm__778, - batch_norm__779, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_133, - parameter_43, - parameter_42, - parameter_41, - parameter_40, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_40, parameter_41, parameter_42, parameter_43 - - # pd_op.conv2d: (1x384x34x34xf32) <- (1x384x34x34xf32, 384x384x1x1xf32) - conv2d_134 = paddle._C_ops.conv2d( - swish_99, parameter_39, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_39 - - # pd_op.batch_norm_: (1x384x34x34xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (1x384x34x34xf32, 384xf32, 384xf32, 384xf32, 384xf32) - ( - batch_norm__780, - batch_norm__781, - batch_norm__782, - batch_norm__783, - batch_norm__784, - batch_norm__785, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_134, - parameter_38, - parameter_37, - parameter_36, - parameter_35, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_35, parameter_36, parameter_37, parameter_38 - - # pd_op.add: (1x384x34x34xf32) <- (1x384x34x34xf32, 1x384x34x34xf32) - add_88 = paddle._C_ops.add(batch_norm__774, batch_norm__780) - - # pd_op.swish: (1x384x34x34xf32) <- (1x384x34x34xf32) - swish_100 = paddle._C_ops.swish(add_88) - - # pd_op.conv2d: (1x384x34x34xf32) <- (1x384x34x34xf32, 384x384x3x3xf32) - conv2d_135 = paddle._C_ops.conv2d( - swish_100, parameter_34, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_34 - - # pd_op.batch_norm_: (1x384x34x34xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (1x384x34x34xf32, 384xf32, 384xf32, 384xf32, 384xf32) - ( - batch_norm__786, - batch_norm__787, - batch_norm__788, - batch_norm__789, - batch_norm__790, - batch_norm__791, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_135, - parameter_33, - parameter_32, - parameter_31, - parameter_30, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_30, parameter_31, parameter_32, parameter_33 - - # pd_op.swish: (1x384x34x34xf32) <- (1x384x34x34xf32) - swish_101 = paddle._C_ops.swish(batch_norm__786) - - # pd_op.conv2d: (1x384x34x34xf32) <- (1x384x34x34xf32, 384x384x3x3xf32) - conv2d_136 = paddle._C_ops.conv2d( - swish_101, parameter_29, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_29 - - # pd_op.batch_norm_: (1x384x34x34xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (1x384x34x34xf32, 384xf32, 384xf32, 384xf32, 384xf32) - ( - batch_norm__792, - batch_norm__793, - batch_norm__794, - batch_norm__795, - batch_norm__796, - batch_norm__797, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_136, - parameter_28, - parameter_27, - parameter_26, - parameter_25, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_25, parameter_26, parameter_27, parameter_28 - - # pd_op.conv2d: (1x384x34x34xf32) <- (1x384x34x34xf32, 384x384x1x1xf32) - conv2d_137 = paddle._C_ops.conv2d( - swish_101, parameter_24, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_24 - - # pd_op.batch_norm_: (1x384x34x34xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (1x384x34x34xf32, 384xf32, 384xf32, 384xf32, 384xf32) - ( - batch_norm__798, - batch_norm__799, - batch_norm__800, - batch_norm__801, - batch_norm__802, - batch_norm__803, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_137, - parameter_23, - parameter_22, - parameter_21, - parameter_20, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_20, parameter_21, parameter_22, parameter_23 - - # pd_op.add: (1x384x34x34xf32) <- (1x384x34x34xf32, 1x384x34x34xf32) - add_89 = paddle._C_ops.add(batch_norm__792, batch_norm__798) - - # pd_op.swish: (1x384x34x34xf32) <- (1x384x34x34xf32) - swish_102 = paddle._C_ops.swish(add_89) - - # pd_op.conv2d: (1x384x34x34xf32) <- (1x384x34x34xf32, 384x384x3x3xf32) - conv2d_138 = paddle._C_ops.conv2d( - swish_102, parameter_19, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_19 - - # pd_op.batch_norm_: (1x384x34x34xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (1x384x34x34xf32, 384xf32, 384xf32, 384xf32, 384xf32) - ( - batch_norm__804, - batch_norm__805, - batch_norm__806, - batch_norm__807, - batch_norm__808, - batch_norm__809, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_138, - parameter_18, - parameter_17, - parameter_16, - parameter_15, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_15, parameter_16, parameter_17, parameter_18 - - # pd_op.swish: (1x384x34x34xf32) <- (1x384x34x34xf32) - swish_103 = paddle._C_ops.swish(batch_norm__804) - - # pd_op.conv2d: (1x384x34x34xf32) <- (1x384x34x34xf32, 384x384x3x3xf32) - conv2d_139 = paddle._C_ops.conv2d( - swish_103, parameter_14, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_14 - - # pd_op.batch_norm_: (1x384x34x34xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (1x384x34x34xf32, 384xf32, 384xf32, 384xf32, 384xf32) - ( - batch_norm__810, - batch_norm__811, - batch_norm__812, - batch_norm__813, - batch_norm__814, - batch_norm__815, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_139, - parameter_13, - parameter_12, - parameter_11, - parameter_10, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_10, parameter_11, parameter_12, parameter_13 - - # pd_op.conv2d: (1x384x34x34xf32) <- (1x384x34x34xf32, 384x384x1x1xf32) - conv2d_140 = paddle._C_ops.conv2d( - swish_103, parameter_9, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_9 - - # pd_op.batch_norm_: (1x384x34x34xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (1x384x34x34xf32, 384xf32, 384xf32, 384xf32, 384xf32) - ( - batch_norm__816, - batch_norm__817, - batch_norm__818, - batch_norm__819, - batch_norm__820, - batch_norm__821, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_140, - parameter_8, - parameter_7, - parameter_6, - parameter_5, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_5, parameter_6, parameter_7, parameter_8 - - # pd_op.add: (1x384x34x34xf32) <- (1x384x34x34xf32, 1x384x34x34xf32) - add_90 = paddle._C_ops.add(batch_norm__810, batch_norm__816) - - # pd_op.swish: (1x384x34x34xf32) <- (1x384x34x34xf32) - swish_104 = paddle._C_ops.swish(add_90) - - # builtin.combine: ([1x384x34x34xf32, 1x384x34x34xf32]) <- (1x384x34x34xf32, 1x384x34x34xf32) - combine_15 = [swish_97, swish_104] - - # pd_op.concat: (1x768x34x34xf32) <- ([1x384x34x34xf32, 1x384x34x34xf32], 1xi32) - concat_14 = paddle._C_ops.concat(combine_15, full_0) - del combine_15 - - # pd_op.conv2d: (1x768x34x34xf32) <- (1x768x34x34xf32, 768x768x1x1xf32) - conv2d_141 = paddle._C_ops.conv2d( - concat_14, parameter_4, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_4 - - # pd_op.batch_norm_: (1x768x34x34xf32, 768xf32, 768xf32, 768xf32, 768xf32, -1xui8) <- (1x768x34x34xf32, 768xf32, 768xf32, 768xf32, 768xf32) - ( - batch_norm__822, - batch_norm__823, - batch_norm__824, - batch_norm__825, - batch_norm__826, - batch_norm__827, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_141, - parameter_3, - parameter_2, - parameter_1, - parameter_0, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_0, parameter_1, parameter_2, parameter_3 - - # pd_op.swish: (1x768x34x34xf32) <- (1x768x34x34xf32) - swish_0 = paddle._C_ops.swish(batch_norm__822) - del ( - add_0, - add_1, - add_10, - add_11, - add_12, - add_13, - add_14, - add_15, - add_16, - add_17, - add_18, - add_2, - add_20, - add_21, - add_22, - add_23, - add_24, - add_25, - add_26, - add_27, - add_28, - add_29, - add_3, - add_30, - add_31, - add_33, - add_34, - add_35, - add_36, - add_37, - add_38, - add_4, - add_40, - add_41, - add_42, - add_43, - add_45, - add_46, - add_48, - add_49, - add_5, - add_50, - add_51, - add_52, - add_54, - add_55, - add_57, - add_58, - add_59, - add_60, - add_61, - add_63, - add_64, - add_66, - add_67, - add_68, - add_69, - add_7, - add_70, - add_72, - add_73, - add_75, - add_76, - add_77, - add_78, - add_79, - add_8, - add_80, - add_81, - add_82, - add_83, - add_84, - add_85, - add_86, - add_87, - add_88, - add_89, - add_9, - add_90, - assign_0, - assign_1, - assign_10, - assign_11, - assign_12, - assign_13, - assign_14, - assign_15, - assign_16, - assign_17, - assign_18, - assign_19, - assign_2, - assign_20, - assign_21, - assign_22, - assign_23, - assign_24, - assign_25, - assign_26, - assign_27, - assign_28, - assign_29, - assign_3, - assign_30, - assign_31, - assign_32, - assign_33, - assign_34, - assign_35, - assign_36, - assign_37, - assign_38, - assign_39, - assign_4, - assign_40, - assign_41, - assign_42, - assign_43, - assign_44, - assign_45, - assign_46, - assign_47, - assign_48, - assign_49, - assign_5, - assign_50, - assign_51, - assign_52, - assign_53, - assign_54, - assign_55, - assign_56, - assign_57, - assign_58, - assign_59, - assign_6, - assign_60, - assign_61, - assign_62, - assign_63, - assign_64, - assign_65, - assign_66, - assign_67, - assign_68, - assign_69, - assign_7, - assign_70, - assign_71, - assign_72, - assign_73, - assign_74, - assign_75, - assign_76, - assign_77, - assign_78, - assign_8, - assign_9, - batch_norm__0, - batch_norm__1, - batch_norm__10, - batch_norm__100, - batch_norm__101, - batch_norm__102, - batch_norm__103, - batch_norm__104, - batch_norm__105, - batch_norm__106, - batch_norm__107, - batch_norm__108, - batch_norm__109, - batch_norm__11, - batch_norm__110, - batch_norm__111, - batch_norm__112, - batch_norm__113, - batch_norm__114, - batch_norm__115, - batch_norm__116, - batch_norm__117, - batch_norm__118, - batch_norm__119, - batch_norm__12, - batch_norm__120, - batch_norm__121, - batch_norm__122, - batch_norm__123, - batch_norm__124, - batch_norm__125, - batch_norm__126, - batch_norm__127, - batch_norm__128, - batch_norm__129, - batch_norm__13, - batch_norm__130, - batch_norm__131, - batch_norm__132, - batch_norm__133, - batch_norm__134, - batch_norm__135, - batch_norm__136, - batch_norm__137, - batch_norm__138, - batch_norm__139, - batch_norm__14, - batch_norm__140, - batch_norm__141, - batch_norm__142, - batch_norm__143, - batch_norm__144, - batch_norm__145, - batch_norm__146, - batch_norm__147, - batch_norm__148, - batch_norm__149, - batch_norm__15, - batch_norm__150, - batch_norm__151, - batch_norm__152, - batch_norm__153, - batch_norm__154, - batch_norm__155, - batch_norm__156, - batch_norm__157, - batch_norm__158, - batch_norm__159, - batch_norm__16, - batch_norm__160, - batch_norm__161, - batch_norm__162, - batch_norm__163, - batch_norm__164, - batch_norm__165, - batch_norm__166, - batch_norm__167, - batch_norm__168, - batch_norm__169, - batch_norm__17, - batch_norm__170, - batch_norm__171, - batch_norm__172, - batch_norm__173, - batch_norm__174, - batch_norm__175, - batch_norm__176, - batch_norm__177, - batch_norm__178, - batch_norm__179, - batch_norm__18, - batch_norm__180, - batch_norm__181, - batch_norm__182, - batch_norm__183, - batch_norm__184, - batch_norm__185, - batch_norm__186, - batch_norm__187, - batch_norm__188, - batch_norm__189, - batch_norm__19, - batch_norm__190, - batch_norm__191, - batch_norm__192, - batch_norm__193, - batch_norm__194, - batch_norm__195, - batch_norm__196, - batch_norm__197, - batch_norm__198, - batch_norm__199, - batch_norm__2, - batch_norm__20, - batch_norm__200, - batch_norm__201, - batch_norm__202, - batch_norm__203, - batch_norm__204, - batch_norm__205, - batch_norm__206, - batch_norm__207, - batch_norm__208, - batch_norm__209, - batch_norm__21, - batch_norm__210, - batch_norm__211, - batch_norm__212, - batch_norm__213, - batch_norm__214, - batch_norm__215, - batch_norm__216, - batch_norm__217, - batch_norm__218, - batch_norm__219, - batch_norm__22, - batch_norm__220, - batch_norm__221, - batch_norm__222, - batch_norm__223, - batch_norm__224, - batch_norm__225, - batch_norm__226, - batch_norm__227, - batch_norm__228, - batch_norm__229, - batch_norm__23, - batch_norm__230, - batch_norm__231, - batch_norm__232, - batch_norm__233, - batch_norm__234, - batch_norm__235, - batch_norm__236, - batch_norm__237, - batch_norm__238, - batch_norm__239, - batch_norm__24, - batch_norm__240, - batch_norm__241, - batch_norm__242, - batch_norm__243, - batch_norm__244, - batch_norm__245, - batch_norm__246, - batch_norm__247, - batch_norm__248, - batch_norm__249, - batch_norm__25, - batch_norm__250, - batch_norm__251, - batch_norm__252, - batch_norm__253, - batch_norm__254, - batch_norm__255, - batch_norm__256, - batch_norm__257, - batch_norm__258, - batch_norm__259, - batch_norm__26, - batch_norm__260, - batch_norm__261, - batch_norm__262, - batch_norm__263, - batch_norm__264, - batch_norm__265, - batch_norm__266, - batch_norm__267, - batch_norm__268, - batch_norm__269, - batch_norm__27, - batch_norm__270, - batch_norm__271, - batch_norm__272, - batch_norm__273, - batch_norm__274, - batch_norm__275, - batch_norm__276, - batch_norm__277, - batch_norm__278, - batch_norm__279, - batch_norm__28, - batch_norm__280, - batch_norm__281, - batch_norm__282, - batch_norm__283, - batch_norm__284, - batch_norm__285, - batch_norm__286, - batch_norm__287, - batch_norm__288, - batch_norm__289, - batch_norm__29, - batch_norm__290, - batch_norm__291, - batch_norm__292, - batch_norm__293, - batch_norm__294, - batch_norm__295, - batch_norm__296, - batch_norm__297, - batch_norm__298, - batch_norm__299, - batch_norm__3, - batch_norm__30, - batch_norm__300, - batch_norm__301, - batch_norm__302, - batch_norm__303, - batch_norm__304, - batch_norm__305, - batch_norm__306, - batch_norm__307, - batch_norm__308, - batch_norm__309, - batch_norm__31, - batch_norm__310, - batch_norm__311, - batch_norm__312, - batch_norm__313, - batch_norm__314, - batch_norm__315, - batch_norm__316, - batch_norm__317, - batch_norm__318, - batch_norm__319, - batch_norm__32, - batch_norm__320, - batch_norm__321, - batch_norm__322, - batch_norm__323, - batch_norm__324, - batch_norm__325, - batch_norm__326, - batch_norm__327, - batch_norm__328, - batch_norm__329, - batch_norm__33, - batch_norm__330, - batch_norm__331, - batch_norm__332, - batch_norm__333, - batch_norm__334, - batch_norm__335, - batch_norm__336, - batch_norm__337, - batch_norm__338, - batch_norm__339, - batch_norm__34, - batch_norm__340, - batch_norm__341, - batch_norm__342, - batch_norm__343, - batch_norm__344, - batch_norm__345, - batch_norm__346, - batch_norm__347, - batch_norm__348, - batch_norm__349, - batch_norm__35, - batch_norm__350, - batch_norm__351, - batch_norm__352, - batch_norm__353, - batch_norm__354, - batch_norm__355, - batch_norm__356, - batch_norm__357, - batch_norm__358, - batch_norm__359, - batch_norm__36, - batch_norm__360, - batch_norm__361, - batch_norm__362, - batch_norm__363, - batch_norm__364, - batch_norm__365, - batch_norm__366, - batch_norm__367, - batch_norm__368, - batch_norm__369, - batch_norm__37, - batch_norm__370, - batch_norm__371, - batch_norm__372, - batch_norm__373, - batch_norm__374, - batch_norm__375, - batch_norm__376, - batch_norm__377, - batch_norm__378, - batch_norm__379, - batch_norm__38, - batch_norm__380, - batch_norm__381, - batch_norm__382, - batch_norm__383, - batch_norm__384, - batch_norm__385, - batch_norm__386, - batch_norm__387, - batch_norm__388, - batch_norm__389, - batch_norm__39, - batch_norm__390, - batch_norm__391, - batch_norm__392, - batch_norm__393, - batch_norm__394, - batch_norm__395, - batch_norm__396, - batch_norm__397, - batch_norm__398, - batch_norm__399, - batch_norm__4, - batch_norm__40, - batch_norm__400, - batch_norm__401, - batch_norm__402, - batch_norm__403, - batch_norm__404, - batch_norm__405, - batch_norm__406, - batch_norm__407, - batch_norm__408, - batch_norm__409, - batch_norm__41, - batch_norm__410, - batch_norm__411, - batch_norm__412, - batch_norm__413, - batch_norm__414, - batch_norm__415, - batch_norm__416, - batch_norm__417, - batch_norm__418, - batch_norm__419, - batch_norm__42, - batch_norm__420, - batch_norm__421, - batch_norm__422, - batch_norm__423, - batch_norm__424, - batch_norm__425, - batch_norm__426, - batch_norm__427, - batch_norm__428, - batch_norm__429, - batch_norm__43, - batch_norm__430, - batch_norm__431, - batch_norm__432, - batch_norm__433, - batch_norm__434, - batch_norm__435, - batch_norm__436, - batch_norm__437, - batch_norm__438, - batch_norm__439, - batch_norm__44, - batch_norm__440, - batch_norm__441, - batch_norm__442, - batch_norm__443, - batch_norm__444, - batch_norm__445, - batch_norm__446, - batch_norm__447, - batch_norm__448, - batch_norm__449, - batch_norm__45, - batch_norm__450, - batch_norm__451, - batch_norm__452, - batch_norm__453, - batch_norm__454, - batch_norm__455, - batch_norm__456, - batch_norm__457, - batch_norm__458, - batch_norm__459, - batch_norm__46, - batch_norm__460, - batch_norm__461, - batch_norm__462, - batch_norm__463, - batch_norm__464, - batch_norm__465, - batch_norm__466, - batch_norm__467, - batch_norm__468, - batch_norm__469, - batch_norm__47, - batch_norm__470, - batch_norm__471, - batch_norm__472, - batch_norm__473, - batch_norm__474, - batch_norm__475, - batch_norm__476, - batch_norm__477, - batch_norm__478, - batch_norm__479, - batch_norm__48, - batch_norm__480, - batch_norm__481, - batch_norm__482, - batch_norm__483, - batch_norm__484, - batch_norm__485, - batch_norm__486, - batch_norm__487, - batch_norm__488, - batch_norm__489, - batch_norm__49, - batch_norm__490, - batch_norm__491, - batch_norm__492, - batch_norm__493, - batch_norm__494, - batch_norm__495, - batch_norm__496, - batch_norm__497, - batch_norm__498, - batch_norm__499, - batch_norm__5, - batch_norm__50, - batch_norm__500, - batch_norm__501, - batch_norm__502, - batch_norm__503, - batch_norm__504, - batch_norm__505, - batch_norm__506, - batch_norm__507, - batch_norm__508, - batch_norm__509, - batch_norm__51, - batch_norm__510, - batch_norm__511, - batch_norm__512, - batch_norm__513, - batch_norm__514, - batch_norm__515, - batch_norm__516, - batch_norm__517, - batch_norm__518, - batch_norm__519, - batch_norm__52, - batch_norm__520, - batch_norm__521, - batch_norm__522, - batch_norm__523, - batch_norm__524, - batch_norm__525, - batch_norm__526, - batch_norm__527, - batch_norm__528, - batch_norm__529, - batch_norm__53, - batch_norm__530, - batch_norm__531, - batch_norm__532, - batch_norm__533, - batch_norm__534, - batch_norm__535, - batch_norm__536, - batch_norm__537, - batch_norm__538, - batch_norm__539, - batch_norm__54, - batch_norm__540, - batch_norm__541, - batch_norm__542, - batch_norm__543, - batch_norm__544, - batch_norm__545, - batch_norm__546, - batch_norm__547, - batch_norm__548, - batch_norm__549, - batch_norm__55, - batch_norm__550, - batch_norm__551, - batch_norm__552, - batch_norm__553, - batch_norm__554, - batch_norm__555, - batch_norm__556, - batch_norm__557, - batch_norm__558, - batch_norm__559, - batch_norm__56, - batch_norm__560, - batch_norm__561, - batch_norm__562, - batch_norm__563, - batch_norm__564, - batch_norm__565, - batch_norm__566, - batch_norm__567, - batch_norm__568, - batch_norm__569, - batch_norm__57, - batch_norm__570, - batch_norm__571, - batch_norm__572, - batch_norm__573, - batch_norm__574, - batch_norm__575, - batch_norm__576, - batch_norm__577, - batch_norm__578, - batch_norm__579, - batch_norm__58, - batch_norm__580, - batch_norm__581, - batch_norm__582, - batch_norm__583, - batch_norm__584, - batch_norm__585, - batch_norm__586, - batch_norm__587, - batch_norm__588, - batch_norm__589, - batch_norm__59, - batch_norm__590, - batch_norm__591, - batch_norm__592, - batch_norm__593, - batch_norm__594, - batch_norm__595, - batch_norm__596, - batch_norm__597, - batch_norm__598, - batch_norm__599, - batch_norm__6, - batch_norm__60, - batch_norm__600, - batch_norm__601, - batch_norm__602, - batch_norm__603, - batch_norm__604, - batch_norm__605, - batch_norm__606, - batch_norm__607, - batch_norm__608, - batch_norm__609, - batch_norm__61, - batch_norm__610, - batch_norm__611, - batch_norm__612, - batch_norm__613, - batch_norm__614, - batch_norm__615, - batch_norm__616, - batch_norm__617, - batch_norm__618, - batch_norm__619, - batch_norm__62, - batch_norm__620, - batch_norm__621, - batch_norm__622, - batch_norm__623, - batch_norm__624, - batch_norm__625, - batch_norm__626, - batch_norm__627, - batch_norm__628, - batch_norm__629, - batch_norm__63, - batch_norm__630, - batch_norm__631, - batch_norm__632, - batch_norm__633, - batch_norm__634, - batch_norm__635, - batch_norm__636, - batch_norm__637, - batch_norm__638, - batch_norm__639, - batch_norm__64, - batch_norm__640, - batch_norm__641, - batch_norm__642, - batch_norm__643, - batch_norm__644, - batch_norm__645, - batch_norm__646, - batch_norm__647, - batch_norm__648, - batch_norm__649, - batch_norm__65, - batch_norm__650, - batch_norm__651, - batch_norm__652, - batch_norm__653, - batch_norm__654, - batch_norm__655, - batch_norm__656, - batch_norm__657, - batch_norm__658, - batch_norm__659, - batch_norm__66, - batch_norm__660, - batch_norm__661, - batch_norm__662, - batch_norm__663, - batch_norm__664, - batch_norm__665, - batch_norm__666, - batch_norm__667, - batch_norm__668, - batch_norm__669, - batch_norm__67, - batch_norm__670, - batch_norm__671, - batch_norm__672, - batch_norm__673, - batch_norm__674, - batch_norm__675, - batch_norm__676, - batch_norm__677, - batch_norm__678, - batch_norm__679, - batch_norm__68, - batch_norm__680, - batch_norm__681, - batch_norm__682, - batch_norm__683, - batch_norm__684, - batch_norm__685, - batch_norm__686, - batch_norm__687, - batch_norm__688, - batch_norm__689, - batch_norm__69, - batch_norm__690, - batch_norm__691, - batch_norm__692, - batch_norm__693, - batch_norm__694, - batch_norm__695, - batch_norm__696, - batch_norm__697, - batch_norm__698, - batch_norm__699, - batch_norm__7, - batch_norm__70, - batch_norm__700, - batch_norm__701, - batch_norm__702, - batch_norm__703, - batch_norm__704, - batch_norm__705, - batch_norm__706, - batch_norm__707, - batch_norm__708, - batch_norm__709, - batch_norm__71, - batch_norm__710, - batch_norm__711, - batch_norm__712, - batch_norm__713, - batch_norm__714, - batch_norm__715, - batch_norm__716, - batch_norm__717, - batch_norm__718, - batch_norm__719, - batch_norm__72, - batch_norm__720, - batch_norm__721, - batch_norm__722, - batch_norm__723, - batch_norm__724, - batch_norm__725, - batch_norm__726, - batch_norm__727, - batch_norm__728, - batch_norm__729, - batch_norm__73, - batch_norm__730, - batch_norm__731, - batch_norm__732, - batch_norm__733, - batch_norm__734, - batch_norm__735, - batch_norm__736, - batch_norm__737, - batch_norm__738, - batch_norm__739, - batch_norm__74, - batch_norm__740, - batch_norm__741, - batch_norm__742, - batch_norm__743, - batch_norm__744, - batch_norm__745, - batch_norm__746, - batch_norm__747, - batch_norm__748, - batch_norm__749, - batch_norm__75, - batch_norm__750, - batch_norm__751, - batch_norm__752, - batch_norm__753, - batch_norm__754, - batch_norm__755, - batch_norm__756, - batch_norm__757, - batch_norm__758, - batch_norm__759, - batch_norm__76, - batch_norm__760, - batch_norm__761, - batch_norm__762, - batch_norm__763, - batch_norm__764, - batch_norm__765, - batch_norm__766, - batch_norm__767, - batch_norm__768, - batch_norm__769, - batch_norm__77, - batch_norm__770, - batch_norm__771, - batch_norm__772, - batch_norm__773, - batch_norm__774, - batch_norm__775, - batch_norm__776, - batch_norm__777, - batch_norm__778, - batch_norm__779, - batch_norm__78, - batch_norm__780, - batch_norm__781, - batch_norm__782, - batch_norm__783, - batch_norm__784, - batch_norm__785, - batch_norm__786, - batch_norm__787, - batch_norm__788, - batch_norm__789, - batch_norm__79, - batch_norm__790, - batch_norm__791, - batch_norm__792, - batch_norm__793, - batch_norm__794, - batch_norm__795, - batch_norm__796, - batch_norm__797, - batch_norm__798, - batch_norm__799, - batch_norm__8, - batch_norm__80, - batch_norm__800, - batch_norm__801, - batch_norm__802, - batch_norm__803, - batch_norm__804, - batch_norm__805, - batch_norm__806, - batch_norm__807, - batch_norm__808, - batch_norm__809, - batch_norm__81, - batch_norm__810, - batch_norm__811, - batch_norm__812, - batch_norm__813, - batch_norm__814, - batch_norm__815, - batch_norm__816, - batch_norm__817, - batch_norm__818, - batch_norm__819, - batch_norm__82, - batch_norm__820, - batch_norm__821, - batch_norm__822, - batch_norm__823, - batch_norm__824, - batch_norm__825, - batch_norm__826, - batch_norm__827, - batch_norm__83, - batch_norm__84, - batch_norm__85, - batch_norm__86, - batch_norm__87, - batch_norm__88, - batch_norm__89, - batch_norm__9, - batch_norm__90, - batch_norm__91, - batch_norm__92, - batch_norm__93, - batch_norm__94, - batch_norm__95, - batch_norm__96, - batch_norm__97, - batch_norm__98, - batch_norm__99, - concat_0, - concat_1, - concat_10, - concat_11, - concat_12, - concat_13, - concat_14, - concat_2, - concat_3, - concat_5, - concat_6, - concat_7, - concat_8, - concat_9, - conv2d_0, - conv2d_1, - conv2d_10, - conv2d_100, - conv2d_101, - conv2d_102, - conv2d_103, - conv2d_104, - conv2d_105, - conv2d_106, - conv2d_107, - conv2d_108, - conv2d_109, - conv2d_11, - conv2d_110, - conv2d_111, - conv2d_112, - conv2d_113, - conv2d_114, - conv2d_115, - conv2d_116, - conv2d_117, - conv2d_118, - conv2d_119, - conv2d_12, - conv2d_120, - conv2d_121, - conv2d_122, - conv2d_123, - conv2d_124, - conv2d_125, - conv2d_126, - conv2d_127, - conv2d_128, - conv2d_129, - conv2d_13, - conv2d_130, - conv2d_131, - conv2d_132, - conv2d_133, - conv2d_134, - conv2d_135, - conv2d_136, - conv2d_137, - conv2d_138, - conv2d_139, - conv2d_14, - conv2d_140, - conv2d_141, - conv2d_15, - conv2d_16, - conv2d_17, - conv2d_18, - conv2d_19, - conv2d_2, - conv2d_20, - conv2d_21, - conv2d_22, - conv2d_23, - conv2d_24, - conv2d_25, - conv2d_26, - conv2d_27, - conv2d_28, - conv2d_29, - conv2d_3, - conv2d_30, - conv2d_31, - conv2d_32, - conv2d_33, - conv2d_34, - conv2d_35, - conv2d_36, - conv2d_37, - conv2d_38, - conv2d_39, - conv2d_4, - conv2d_40, - conv2d_41, - conv2d_42, - conv2d_43, - conv2d_44, - conv2d_45, - conv2d_46, - conv2d_47, - conv2d_48, - conv2d_49, - conv2d_5, - conv2d_50, - conv2d_51, - conv2d_52, - conv2d_53, - conv2d_54, - conv2d_55, - conv2d_56, - conv2d_57, - conv2d_58, - conv2d_59, - conv2d_6, - conv2d_60, - conv2d_61, - conv2d_62, - conv2d_63, - conv2d_64, - conv2d_65, - conv2d_66, - conv2d_67, - conv2d_68, - conv2d_69, - conv2d_7, - conv2d_70, - conv2d_71, - conv2d_72, - conv2d_73, - conv2d_74, - conv2d_75, - conv2d_76, - conv2d_77, - conv2d_78, - conv2d_79, - conv2d_8, - conv2d_80, - conv2d_81, - conv2d_82, - conv2d_83, - conv2d_84, - conv2d_85, - conv2d_86, - conv2d_87, - conv2d_88, - conv2d_89, - conv2d_9, - conv2d_90, - conv2d_91, - conv2d_92, - conv2d_93, - conv2d_94, - conv2d_95, - conv2d_96, - conv2d_97, - conv2d_98, - conv2d_99, - dropout_0, - dropout_1, - dropout_10, - dropout_11, - dropout_12, - dropout_13, - dropout_14, - dropout_15, - dropout_16, - dropout_17, - dropout_18, - dropout_19, - dropout_2, - dropout_20, - dropout_21, - dropout_22, - dropout_23, - dropout_24, - dropout_25, - dropout_26, - dropout_27, - dropout_28, - dropout_29, - dropout_3, - dropout_30, - dropout_31, - dropout_4, - dropout_5, - dropout_6, - dropout_7, - dropout_8, - dropout_9, - full_0, - full_8, - full_9, - full_int_array_0, - full_int_array_10, - full_int_array_11, - full_int_array_12, - full_int_array_4, - full_int_array_6, - full_int_array_7, - hardsigmoid_0, - hardsigmoid_1, - hardsigmoid_2, - hardsigmoid_3, - layer_norm_0, - layer_norm_1, - layer_norm_10, - layer_norm_11, - layer_norm_12, - layer_norm_13, - layer_norm_14, - layer_norm_15, - layer_norm_16, - layer_norm_17, - layer_norm_18, - layer_norm_19, - layer_norm_2, - layer_norm_20, - layer_norm_22, - layer_norm_23, - layer_norm_3, - layer_norm_4, - layer_norm_5, - layer_norm_6, - layer_norm_7, - layer_norm_8, - layer_norm_9, - matmul_10, - matmul_11, - matmul_12, - matmul_15, - matmul_16, - matmul_17, - matmul_18, - matmul_19, - matmul_2, - matmul_20, - matmul_23, - matmul_24, - matmul_25, - matmul_26, - matmul_27, - matmul_28, - matmul_3, - matmul_31, - matmul_32, - matmul_33, - matmul_4, - matmul_7, - matmul_8, - matmul_9, - mean_0, - mean_1, - mean_2, - mean_3, - multiply_0, - multiply_1, - multiply_10, - multiply_11, - multiply_12, - multiply_13, - multiply_14, - multiply_15, - multiply_16, - multiply_17, - multiply_18, - multiply_19, - multiply_2, - multiply_20, - multiply_21, - multiply_3, - multiply_4, - multiply_5, - multiply_6, - multiply_7, - multiply_8, - multiply_9, - nearest_interp_0, - nearest_interp_1, - pool2d_0, - pool2d_1, - pool2d_2, - reshape_0, - reshape_1, - reshape_11, - reshape_15, - reshape_19, - reshape_2, - reshape_20, - reshape_3, - reshape_7, - slice_0, - slice_1, - slice_10, - slice_11, - slice_12, - slice_13, - slice_14, - slice_15, - slice_16, - slice_17, - slice_18, - slice_19, - slice_2, - slice_20, - slice_21, - slice_22, - slice_23, - slice_3, - slice_4, - slice_5, - slice_6, - slice_7, - slice_8, - slice_9, - softmax_0, - softmax_1, - softmax_2, - softmax_3, - swish_1, - swish_10, - swish_100, - swish_101, - swish_102, - swish_103, - swish_104, - swish_11, - swish_12, - swish_13, - swish_14, - swish_15, - swish_16, - swish_17, - swish_18, - swish_19, - swish_2, - swish_20, - swish_21, - swish_22, - swish_23, - swish_24, - swish_25, - swish_26, - swish_27, - swish_28, - swish_29, - swish_3, - swish_30, - swish_31, - swish_32, - swish_33, - swish_34, - swish_35, - swish_36, - swish_37, - swish_38, - swish_39, - swish_4, - swish_40, - swish_41, - swish_42, - swish_43, - swish_44, - swish_45, - swish_46, - swish_47, - swish_48, - swish_49, - swish_5, - swish_50, - swish_51, - swish_52, - swish_53, - swish_54, - swish_55, - swish_56, - swish_57, - swish_58, - swish_59, - swish_6, - swish_60, - swish_61, - swish_62, - swish_63, - swish_64, - swish_65, - swish_66, - swish_67, - swish_68, - swish_69, - swish_7, - swish_70, - swish_71, - swish_72, - swish_73, - swish_74, - swish_75, - swish_76, - swish_77, - swish_78, - swish_79, - swish_8, - swish_80, - swish_81, - swish_82, - swish_83, - swish_84, - swish_85, - swish_86, - swish_87, - swish_88, - swish_89, - swish_9, - swish_90, - swish_91, - swish_92, - swish_93, - swish_94, - swish_95, - swish_96, - swish_97, - swish_98, - swish_99, - transpose_0, - transpose_1, - transpose_10, - transpose_11, - transpose_12, - transpose_13, - transpose_14, - transpose_15, - transpose_16, - transpose_17, - transpose_2, - transpose_3, - transpose_4, - transpose_5, - transpose_6, - transpose_7, - transpose_8, - transpose_9, - unsqueeze_3, - ) - - return swish_0 diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_11/weight_meta.py b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_11/weight_meta.py deleted file mode 100644 index 7bb3e9a3b..000000000 --- a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_11/weight_meta.py +++ /dev/null @@ -1,8004 +0,0 @@ -class Program_weight_tensor_parameter_0: - name = "parameter_0" - shape = [768] - dtype = "float32" - min_val = float("-0.175875") - max_val = float("0.210823") - mean = float("0.0834695") - std = float("0.0566098") - data = None - - -class Program_weight_tensor_parameter_1: - name = "parameter_1" - shape = [768] - dtype = "float32" - min_val = float("0.939895") - max_val = float("1.29826") - mean = float("1.06397") - std = float("0.0312259") - data = None - - -class Program_weight_tensor_parameter_2: - name = "parameter_2" - shape = [768] - dtype = "float32" - min_val = float("0.00117681") - max_val = float("0.0493074") - mean = float("0.00765869") - std = float("0.0046166") - data = None - - -class Program_weight_tensor_parameter_3: - name = "parameter_3" - shape = [768] - dtype = "float32" - min_val = float("-0.13338") - max_val = float("0.0572232") - mean = float("-0.0286564") - std = float("0.0288427") - data = None - - -class Program_weight_tensor_parameter_4: - name = "parameter_4" - shape = [768, 768, 1, 1] - dtype = "float32" - min_val = float("-0.0548133") - max_val = float("0.0388089") - mean = float("-0.000154658") - std = float("0.00249634") - data = None - - -class Program_weight_tensor_parameter_5: - name = "parameter_5" - shape = [384] - dtype = "float32" - min_val = float("-0.14169") - max_val = float("0.0305817") - mean = float("-0.0188052") - std = float("0.0234504") - data = None - - -class Program_weight_tensor_parameter_6: - name = "parameter_6" - shape = [384] - dtype = "float32" - min_val = float("0.945748") - max_val = float("1.04442") - mean = float("0.98666") - std = float("0.0105852") - data = None - - -class Program_weight_tensor_parameter_7: - name = "parameter_7" - shape = [384] - dtype = "float32" - min_val = float("0.000824752") - max_val = float("0.0183885") - mean = float("0.00496541") - std = float("0.0033442") - data = None - - -class Program_weight_tensor_parameter_8: - name = "parameter_8" - shape = [384] - dtype = "float32" - min_val = float("-0.0551455") - max_val = float("0.0628843") - mean = float("0.00283053") - std = float("0.0222857") - data = None - - -class Program_weight_tensor_parameter_9: - name = "parameter_9" - shape = [384, 384, 1, 1] - dtype = "float32" - min_val = float("-0.0299324") - max_val = float("0.020664") - mean = float("2.29272e-05") - std = float("0.00192338") - data = None - - -class Program_weight_tensor_parameter_10: - name = "parameter_10" - shape = [384] - dtype = "float32" - min_val = float("-0.14169") - max_val = float("0.0305817") - mean = float("-0.0188052") - std = float("0.0234504") - data = None - - -class Program_weight_tensor_parameter_11: - name = "parameter_11" - shape = [384] - dtype = "float32" - min_val = float("0.968039") - max_val = float("1.13059") - mean = float("1.01544") - std = float("0.0171846") - data = None - - -class Program_weight_tensor_parameter_12: - name = "parameter_12" - shape = [384] - dtype = "float32" - min_val = float("0.00204949") - max_val = float("0.0543081") - mean = float("0.00785702") - std = float("0.00475222") - data = None - - -class Program_weight_tensor_parameter_13: - name = "parameter_13" - shape = [384] - dtype = "float32" - min_val = float("-0.203105") - max_val = float("0.152445") - mean = float("-0.0430598") - std = float("0.036306") - data = None - - -class Program_weight_tensor_parameter_14: - name = "parameter_14" - shape = [384, 384, 3, 3] - dtype = "float32" - min_val = float("-0.029908") - max_val = float("0.035511") - mean = float("-7.29867e-05") - std = float("0.00131195") - data = None - - -class Program_weight_tensor_parameter_15: - name = "parameter_15" - shape = [384] - dtype = "float32" - min_val = float("-0.170219") - max_val = float("0.0209993") - mean = float("-0.0348873") - std = float("0.0279313") - data = None - - -class Program_weight_tensor_parameter_16: - name = "parameter_16" - shape = [384] - dtype = "float32" - min_val = float("0.975222") - max_val = float("1.12587") - mean = float("1.015") - std = float("0.0240805") - data = None - - -class Program_weight_tensor_parameter_17: - name = "parameter_17" - shape = [384] - dtype = "float32" - min_val = float("0.00543045") - max_val = float("0.191688") - mean = float("0.021541") - std = float("0.0159869") - data = None - - -class Program_weight_tensor_parameter_18: - name = "parameter_18" - shape = [384] - dtype = "float32" - min_val = float("-0.266668") - max_val = float("0.4144") - mean = float("-0.0370778") - std = float("0.0509328") - data = None - - -class Program_weight_tensor_parameter_19: - name = "parameter_19" - shape = [384, 384, 3, 3] - dtype = "float32" - min_val = float("-0.0331339") - max_val = float("0.0530855") - mean = float("-6.31513e-05") - std = float("0.00148047") - data = None - - -class Program_weight_tensor_parameter_20: - name = "parameter_20" - shape = [384] - dtype = "float32" - min_val = float("-0.105219") - max_val = float("0.0129843") - mean = float("-0.0358029") - std = float("0.0193236") - data = None - - -class Program_weight_tensor_parameter_21: - name = "parameter_21" - shape = [384] - dtype = "float32" - min_val = float("0.945357") - max_val = float("1.04501") - mean = float("0.988631") - std = float("0.00984229") - data = None - - -class Program_weight_tensor_parameter_22: - name = "parameter_22" - shape = [384] - dtype = "float32" - min_val = float("0.000719695") - max_val = float("0.0175524") - mean = float("0.00510978") - std = float("0.00307552") - data = None - - -class Program_weight_tensor_parameter_23: - name = "parameter_23" - shape = [384] - dtype = "float32" - min_val = float("-0.0846979") - max_val = float("0.0435371") - mean = float("-0.00255296") - std = float("0.0171291") - data = None - - -class Program_weight_tensor_parameter_24: - name = "parameter_24" - shape = [384, 384, 1, 1] - dtype = "float32" - min_val = float("-0.0267958") - max_val = float("0.025491") - mean = float("-5.37291e-05") - std = float("0.00203271") - data = None - - -class Program_weight_tensor_parameter_25: - name = "parameter_25" - shape = [384] - dtype = "float32" - min_val = float("-0.105219") - max_val = float("0.0129843") - mean = float("-0.0358029") - std = float("0.0193236") - data = None - - -class Program_weight_tensor_parameter_26: - name = "parameter_26" - shape = [384] - dtype = "float32" - min_val = float("0.959852") - max_val = float("1.10509") - mean = float("1.01609") - std = float("0.0177564") - data = None - - -class Program_weight_tensor_parameter_27: - name = "parameter_27" - shape = [384] - dtype = "float32" - min_val = float("0.0024927") - max_val = float("0.0541337") - mean = float("0.00984546") - std = float("0.0050589") - data = None - - -class Program_weight_tensor_parameter_28: - name = "parameter_28" - shape = [384] - dtype = "float32" - min_val = float("-0.215525") - max_val = float("0.32115") - mean = float("-0.0500705") - std = float("0.0449912") - data = None - - -class Program_weight_tensor_parameter_29: - name = "parameter_29" - shape = [384, 384, 3, 3] - dtype = "float32" - min_val = float("-0.0363929") - max_val = float("0.0514823") - mean = float("-8.41934e-05") - std = float("0.00132563") - data = None - - -class Program_weight_tensor_parameter_30: - name = "parameter_30" - shape = [384] - dtype = "float32" - min_val = float("-0.0896627") - max_val = float("0.0192839") - mean = float("-0.0360783") - std = float("0.0194692") - data = None - - -class Program_weight_tensor_parameter_31: - name = "parameter_31" - shape = [384] - dtype = "float32" - min_val = float("0.933291") - max_val = float("1.11466") - mean = float("1.01167") - std = float("0.026589") - data = None - - -class Program_weight_tensor_parameter_32: - name = "parameter_32" - shape = [384] - dtype = "float32" - min_val = float("0.00555425") - max_val = float("0.0674583") - mean = float("0.0182706") - std = float("0.00926606") - data = None - - -class Program_weight_tensor_parameter_33: - name = "parameter_33" - shape = [384] - dtype = "float32" - min_val = float("-0.23163") - max_val = float("0.124775") - mean = float("-0.0274763") - std = float("0.0558182") - data = None - - -class Program_weight_tensor_parameter_34: - name = "parameter_34" - shape = [384, 384, 3, 3] - dtype = "float32" - min_val = float("-0.0397048") - max_val = float("0.0499731") - mean = float("-5.43157e-05") - std = float("0.00151173") - data = None - - -class Program_weight_tensor_parameter_35: - name = "parameter_35" - shape = [384] - dtype = "float32" - min_val = float("-0.116341") - max_val = float("0.0161185") - mean = float("-0.0373639") - std = float("0.0201507") - data = None - - -class Program_weight_tensor_parameter_36: - name = "parameter_36" - shape = [384] - dtype = "float32" - min_val = float("0.929383") - max_val = float("1.02791") - mean = float("0.98704") - std = float("0.0110296") - data = None - - -class Program_weight_tensor_parameter_37: - name = "parameter_37" - shape = [384] - dtype = "float32" - min_val = float("0.00126812") - max_val = float("0.0117075") - mean = float("0.00454736") - std = float("0.00186229") - data = None - - -class Program_weight_tensor_parameter_38: - name = "parameter_38" - shape = [384] - dtype = "float32" - min_val = float("-0.0553816") - max_val = float("0.0350888") - mean = float("-0.00843188") - std = float("0.0133692") - data = None - - -class Program_weight_tensor_parameter_39: - name = "parameter_39" - shape = [384, 384, 1, 1] - dtype = "float32" - min_val = float("-0.0386337") - max_val = float("0.0282119") - mean = float("-0.000152706") - std = float("0.00204597") - data = None - - -class Program_weight_tensor_parameter_40: - name = "parameter_40" - shape = [384] - dtype = "float32" - min_val = float("-0.116341") - max_val = float("0.0161185") - mean = float("-0.0373639") - std = float("0.0201507") - data = None - - -class Program_weight_tensor_parameter_41: - name = "parameter_41" - shape = [384] - dtype = "float32" - min_val = float("0.981354") - max_val = float("1.10683") - mean = float("1.01834") - std = float("0.0222205") - data = None - - -class Program_weight_tensor_parameter_42: - name = "parameter_42" - shape = [384] - dtype = "float32" - min_val = float("0.00525993") - max_val = float("0.0393454") - mean = float("0.0117862") - std = float("0.00495311") - data = None - - -class Program_weight_tensor_parameter_43: - name = "parameter_43" - shape = [384] - dtype = "float32" - min_val = float("-0.189916") - max_val = float("0.089502") - mean = float("-0.0267907") - std = float("0.0350026") - data = None - - -class Program_weight_tensor_parameter_44: - name = "parameter_44" - shape = [384, 384, 3, 3] - dtype = "float32" - min_val = float("-0.0360859") - max_val = float("0.0633791") - mean = float("-4.66788e-05") - std = float("0.00138059") - data = None - - -class Program_weight_tensor_parameter_45: - name = "parameter_45" - shape = [384] - dtype = "float32" - min_val = float("-0.107113") - max_val = float("0.0239382") - mean = float("-0.0375215") - std = float("0.0214567") - data = None - - -class Program_weight_tensor_parameter_46: - name = "parameter_46" - shape = [384] - dtype = "float32" - min_val = float("0.944795") - max_val = float("1.11465") - mean = float("1.01186") - std = float("0.0277861") - data = None - - -class Program_weight_tensor_parameter_47: - name = "parameter_47" - shape = [384] - dtype = "float32" - min_val = float("0.0055232") - max_val = float("0.0636068") - mean = float("0.0154489") - std = float("0.00777637") - data = None - - -class Program_weight_tensor_parameter_48: - name = "parameter_48" - shape = [384] - dtype = "float32" - min_val = float("-0.154281") - max_val = float("0.124624") - mean = float("-0.04841") - std = float("0.0507409") - data = None - - -class Program_weight_tensor_parameter_49: - name = "parameter_49" - shape = [384, 384, 3, 3] - dtype = "float32" - min_val = float("-0.0279282") - max_val = float("0.0439271") - mean = float("-7.87759e-05") - std = float("0.00153817") - data = None - - -class Program_weight_tensor_parameter_50: - name = "parameter_50" - shape = [384] - dtype = "float32" - min_val = float("-0.10674") - max_val = float("0.046738") - mean = float("-0.026306") - std = float("0.0154157") - data = None - - -class Program_weight_tensor_parameter_51: - name = "parameter_51" - shape = [384] - dtype = "float32" - min_val = float("0.973756") - max_val = float("1.08653") - mean = float("1.00903") - std = float("0.0171142") - data = None - - -class Program_weight_tensor_parameter_52: - name = "parameter_52" - shape = [384] - dtype = "float32" - min_val = float("0.00240361") - max_val = float("0.0172008") - mean = float("0.00539159") - std = float("0.0019353") - data = None - - -class Program_weight_tensor_parameter_53: - name = "parameter_53" - shape = [384] - dtype = "float32" - min_val = float("-0.100425") - max_val = float("0.0867517") - mean = float("-0.0196731") - std = float("0.0269084") - data = None - - -class Program_weight_tensor_parameter_54: - name = "parameter_54" - shape = [384, 1152, 1, 1] - dtype = "float32" - min_val = float("-0.0619005") - max_val = float("0.0744808") - mean = float("-8.91799e-05") - std = float("0.00230778") - data = None - - -class Program_weight_tensor_parameter_55: - name = "parameter_55" - shape = [384] - dtype = "float32" - min_val = float("-0.0424904") - max_val = float("0.0160654") - mean = float("-0.00899509") - std = float("0.00840798") - data = None - - -class Program_weight_tensor_parameter_56: - name = "parameter_56" - shape = [384] - dtype = "float32" - min_val = float("0.959519") - max_val = float("1.05137") - mean = float("1.00788") - std = float("0.0115961") - data = None - - -class Program_weight_tensor_parameter_57: - name = "parameter_57" - shape = [384] - dtype = "float32" - min_val = float("0.00126033") - max_val = float("0.03023") - mean = float("0.00451665") - std = float("0.00218518") - data = None - - -class Program_weight_tensor_parameter_58: - name = "parameter_58" - shape = [384] - dtype = "float32" - min_val = float("-0.110881") - max_val = float("0.0925399") - mean = float("-0.0236355") - std = float("0.0234762") - data = None - - -class Program_weight_tensor_parameter_59: - name = "parameter_59" - shape = [384, 1152, 1, 1] - dtype = "float32" - min_val = float("-0.0245473") - max_val = float("0.0425909") - mean = float("-0.000112646") - std = float("0.00208633") - data = None - - -class Program_weight_tensor_parameter_60: - name = "parameter_60" - shape = [384] - dtype = "float32" - min_val = float("-0.0529748") - max_val = float("0.0059538") - mean = float("-0.0166275") - std = float("0.00987957") - data = None - - -class Program_weight_tensor_parameter_61: - name = "parameter_61" - shape = [384] - dtype = "float32" - min_val = float("0.988678") - max_val = float("1.10388") - mean = float("1.01957") - std = float("0.0168754") - data = None - - -class Program_weight_tensor_parameter_62: - name = "parameter_62" - shape = [384] - dtype = "float32" - min_val = float("0.00462734") - max_val = float("0.0682865") - mean = float("0.0144132") - std = float("0.00829381") - data = None - - -class Program_weight_tensor_parameter_63: - name = "parameter_63" - shape = [384] - dtype = "float32" - min_val = float("-0.443847") - max_val = float("0.193358") - mean = float("-0.047384") - std = float("0.0711895") - data = None - - -class Program_weight_tensor_parameter_64: - name = "parameter_64" - shape = [384, 384, 3, 3] - dtype = "float32" - min_val = float("-0.0212973") - max_val = float("0.0335283") - mean = float("-3.20307e-05") - std = float("0.00117985") - data = None - - -class Program_weight_tensor_parameter_65: - name = "parameter_65" - shape = [384] - dtype = "float32" - min_val = float("-0.222314") - max_val = float("0.492622") - mean = float("0.217344") - std = float("0.124262") - data = None - - -class Program_weight_tensor_parameter_66: - name = "parameter_66" - shape = [384] - dtype = "float32" - min_val = float("0.919258") - max_val = float("1.4834") - mean = float("1.14101") - std = float("0.0738465") - data = None - - -class Program_weight_tensor_parameter_67: - name = "parameter_67" - shape = [384] - dtype = "float32" - min_val = float("0.00377408") - max_val = float("0.0757958") - mean = float("0.0118179") - std = float("0.00580441") - data = None - - -class Program_weight_tensor_parameter_68: - name = "parameter_68" - shape = [384] - dtype = "float32" - min_val = float("-0.129308") - max_val = float("0.0593208") - mean = float("-0.0372124") - std = float("0.0301462") - data = None - - -class Program_weight_tensor_parameter_69: - name = "parameter_69" - shape = [384, 384, 1, 1] - dtype = "float32" - min_val = float("-0.0788092") - max_val = float("0.0718439") - mean = float("-0.000420206") - std = float("0.00505348") - data = None - - -class Program_weight_tensor_parameter_70: - name = "parameter_70" - shape = [192] - dtype = "float32" - min_val = float("-0.165903") - max_val = float("0.0468638") - mean = float("-0.0248091") - std = float("0.0394948") - data = None - - -class Program_weight_tensor_parameter_71: - name = "parameter_71" - shape = [192] - dtype = "float32" - min_val = float("0.841187") - max_val = float("1.05089") - mean = float("0.972721") - std = float("0.0237726") - data = None - - -class Program_weight_tensor_parameter_72: - name = "parameter_72" - shape = [192] - dtype = "float32" - min_val = float("0.00135964") - max_val = float("0.0214683") - mean = float("0.00615868") - std = float("0.00390446") - data = None - - -class Program_weight_tensor_parameter_73: - name = "parameter_73" - shape = [192] - dtype = "float32" - min_val = float("-0.0635033") - max_val = float("0.0921305") - mean = float("-0.00577622") - std = float("0.0200987") - data = None - - -class Program_weight_tensor_parameter_74: - name = "parameter_74" - shape = [192, 192, 1, 1] - dtype = "float32" - min_val = float("-0.0496361") - max_val = float("0.0295852") - mean = float("-0.000179209") - std = float("0.00381061") - data = None - - -class Program_weight_tensor_parameter_75: - name = "parameter_75" - shape = [192] - dtype = "float32" - min_val = float("-0.165903") - max_val = float("0.0468638") - mean = float("-0.0248091") - std = float("0.0394948") - data = None - - -class Program_weight_tensor_parameter_76: - name = "parameter_76" - shape = [192] - dtype = "float32" - min_val = float("0.72984") - max_val = float("1.12263") - mean = float("1.02218") - std = float("0.0372419") - data = None - - -class Program_weight_tensor_parameter_77: - name = "parameter_77" - shape = [192] - dtype = "float32" - min_val = float("0.00534163") - max_val = float("0.0562738") - mean = float("0.0136988") - std = float("0.0062901") - data = None - - -class Program_weight_tensor_parameter_78: - name = "parameter_78" - shape = [192] - dtype = "float32" - min_val = float("-0.219364") - max_val = float("0.10138") - mean = float("-0.0376619") - std = float("0.0434336") - data = None - - -class Program_weight_tensor_parameter_79: - name = "parameter_79" - shape = [192, 192, 3, 3] - dtype = "float32" - min_val = float("-0.0430374") - max_val = float("0.049516") - mean = float("-0.000124454") - std = float("0.00256786") - data = None - - -class Program_weight_tensor_parameter_80: - name = "parameter_80" - shape = [192] - dtype = "float32" - min_val = float("-0.191344") - max_val = float("0.0444996") - mean = float("-0.057942") - std = float("0.0491062") - data = None - - -class Program_weight_tensor_parameter_81: - name = "parameter_81" - shape = [192] - dtype = "float32" - min_val = float("0.897737") - max_val = float("1.18792") - mean = float("1.01539") - std = float("0.0484046") - data = None - - -class Program_weight_tensor_parameter_82: - name = "parameter_82" - shape = [192] - dtype = "float32" - min_val = float("0.0105665") - max_val = float("0.202383") - mean = float("0.0352004") - std = float("0.0227021") - data = None - - -class Program_weight_tensor_parameter_83: - name = "parameter_83" - shape = [192] - dtype = "float32" - min_val = float("-0.295941") - max_val = float("0.513277") - mean = float("-0.0405879") - std = float("0.0633076") - data = None - - -class Program_weight_tensor_parameter_84: - name = "parameter_84" - shape = [192, 192, 3, 3] - dtype = "float32" - min_val = float("-0.047378") - max_val = float("0.0557186") - mean = float("-0.000110452") - std = float("0.00285571") - data = None - - -class Program_weight_tensor_parameter_85: - name = "parameter_85" - shape = [192] - dtype = "float32" - min_val = float("-0.191632") - max_val = float("0.00854012") - mean = float("-0.064207") - std = float("0.0334262") - data = None - - -class Program_weight_tensor_parameter_86: - name = "parameter_86" - shape = [192] - dtype = "float32" - min_val = float("0.922153") - max_val = float("1.04653") - mean = float("0.973445") - std = float("0.017956") - data = None - - -class Program_weight_tensor_parameter_87: - name = "parameter_87" - shape = [192] - dtype = "float32" - min_val = float("0.00111369") - max_val = float("0.0150475") - mean = float("0.00521977") - std = float("0.00256485") - data = None - - -class Program_weight_tensor_parameter_88: - name = "parameter_88" - shape = [192] - dtype = "float32" - min_val = float("-0.0705634") - max_val = float("0.0364988") - mean = float("-0.00792433") - std = float("0.0151559") - data = None - - -class Program_weight_tensor_parameter_89: - name = "parameter_89" - shape = [192, 192, 1, 1] - dtype = "float32" - min_val = float("-0.0386531") - max_val = float("0.0308172") - mean = float("-0.000343288") - std = float("0.00384278") - data = None - - -class Program_weight_tensor_parameter_90: - name = "parameter_90" - shape = [192] - dtype = "float32" - min_val = float("-0.191632") - max_val = float("0.00854012") - mean = float("-0.064207") - std = float("0.0334262") - data = None - - -class Program_weight_tensor_parameter_91: - name = "parameter_91" - shape = [192] - dtype = "float32" - min_val = float("0.968104") - max_val = float("1.14778") - mean = float("1.02415") - std = float("0.0294364") - data = None - - -class Program_weight_tensor_parameter_92: - name = "parameter_92" - shape = [192] - dtype = "float32" - min_val = float("0.00435298") - max_val = float("0.0469628") - mean = float("0.0119993") - std = float("0.00625753") - data = None - - -class Program_weight_tensor_parameter_93: - name = "parameter_93" - shape = [192] - dtype = "float32" - min_val = float("-0.186388") - max_val = float("0.140954") - mean = float("-0.0380294") - std = float("0.0385226") - data = None - - -class Program_weight_tensor_parameter_94: - name = "parameter_94" - shape = [192, 192, 3, 3] - dtype = "float32" - min_val = float("-0.0471114") - max_val = float("0.0550151") - mean = float("-0.00014067") - std = float("0.00262923") - data = None - - -class Program_weight_tensor_parameter_95: - name = "parameter_95" - shape = [192] - dtype = "float32" - min_val = float("-0.188926") - max_val = float("0.062054") - mean = float("-0.0755775") - std = float("0.0405971") - data = None - - -class Program_weight_tensor_parameter_96: - name = "parameter_96" - shape = [192] - dtype = "float32" - min_val = float("0.880419") - max_val = float("1.21878") - mean = float("1.01465") - std = float("0.050849") - data = None - - -class Program_weight_tensor_parameter_97: - name = "parameter_97" - shape = [192] - dtype = "float32" - min_val = float("0.00811769") - max_val = float("0.0673723") - mean = float("0.021795") - std = float("0.0107691") - data = None - - -class Program_weight_tensor_parameter_98: - name = "parameter_98" - shape = [192] - dtype = "float32" - min_val = float("-0.117015") - max_val = float("0.0523492") - mean = float("-0.0248809") - std = float("0.0343347") - data = None - - -class Program_weight_tensor_parameter_99: - name = "parameter_99" - shape = [192, 192, 3, 3] - dtype = "float32" - min_val = float("-0.0427984") - max_val = float("0.0615634") - mean = float("-0.000111452") - std = float("0.00299174") - data = None - - -class Program_weight_tensor_parameter_100: - name = "parameter_100" - shape = [192] - dtype = "float32" - min_val = float("-0.229476") - max_val = float("-0.00962433") - mean = float("-0.0831852") - std = float("0.0422479") - data = None - - -class Program_weight_tensor_parameter_101: - name = "parameter_101" - shape = [192] - dtype = "float32" - min_val = float("0.900428") - max_val = float("1.02666") - mean = float("0.975123") - std = float("0.0229582") - data = None - - -class Program_weight_tensor_parameter_102: - name = "parameter_102" - shape = [192] - dtype = "float32" - min_val = float("0.00171033") - max_val = float("0.0153508") - mean = float("0.00574446") - std = float("0.00198723") - data = None - - -class Program_weight_tensor_parameter_103: - name = "parameter_103" - shape = [192] - dtype = "float32" - min_val = float("-0.0390528") - max_val = float("0.0467988") - mean = float("-0.0106683") - std = float("0.0171761") - data = None - - -class Program_weight_tensor_parameter_104: - name = "parameter_104" - shape = [192, 192, 1, 1] - dtype = "float32" - min_val = float("-0.0436521") - max_val = float("0.0635251") - mean = float("-0.000488447") - std = float("0.00437095") - data = None - - -class Program_weight_tensor_parameter_105: - name = "parameter_105" - shape = [192] - dtype = "float32" - min_val = float("-0.229476") - max_val = float("-0.00962433") - mean = float("-0.0831852") - std = float("0.0422479") - data = None - - -class Program_weight_tensor_parameter_106: - name = "parameter_106" - shape = [192] - dtype = "float32" - min_val = float("0.947654") - max_val = float("1.11111") - mean = float("1.02112") - std = float("0.0306157") - data = None - - -class Program_weight_tensor_parameter_107: - name = "parameter_107" - shape = [192] - dtype = "float32" - min_val = float("0.00720341") - max_val = float("0.0581605") - mean = float("0.0166222") - std = float("0.00832442") - data = None - - -class Program_weight_tensor_parameter_108: - name = "parameter_108" - shape = [192] - dtype = "float32" - min_val = float("-0.129987") - max_val = float("0.0597114") - mean = float("-0.0235942") - std = float("0.0336059") - data = None - - -class Program_weight_tensor_parameter_109: - name = "parameter_109" - shape = [192, 192, 3, 3] - dtype = "float32" - min_val = float("-0.0485052") - max_val = float("0.0562451") - mean = float("-9.74246e-05") - std = float("0.00278606") - data = None - - -class Program_weight_tensor_parameter_110: - name = "parameter_110" - shape = [192] - dtype = "float32" - min_val = float("-0.234305") - max_val = float("0.081368") - mean = float("-0.0947175") - std = float("0.0463051") - data = None - - -class Program_weight_tensor_parameter_111: - name = "parameter_111" - shape = [192] - dtype = "float32" - min_val = float("0.886145") - max_val = float("1.20472") - mean = float("1.01666") - std = float("0.0540248") - data = None - - -class Program_weight_tensor_parameter_112: - name = "parameter_112" - shape = [192] - dtype = "float32" - min_val = float("0.00887124") - max_val = float("0.100013") - mean = float("0.0211819") - std = float("0.0128206") - data = None - - -class Program_weight_tensor_parameter_113: - name = "parameter_113" - shape = [192] - dtype = "float32" - min_val = float("-0.180694") - max_val = float("0.0963527") - mean = float("-0.0400641") - std = float("0.0435422") - data = None - - -class Program_weight_tensor_parameter_114: - name = "parameter_114" - shape = [192, 192, 3, 3] - dtype = "float32" - min_val = float("-0.0410156") - max_val = float("0.0751964") - mean = float("-0.000134498") - std = float("0.0032483") - data = None - - -class Program_weight_tensor_parameter_115: - name = "parameter_115" - shape = [192] - dtype = "float32" - min_val = float("-0.199948") - max_val = float("0.0153484") - mean = float("-0.0662884") - std = float("0.031178") - data = None - - -class Program_weight_tensor_parameter_116: - name = "parameter_116" - shape = [192] - dtype = "float32" - min_val = float("0.925493") - max_val = float("1.15259") - mean = float("1.01328") - std = float("0.0383643") - data = None - - -class Program_weight_tensor_parameter_117: - name = "parameter_117" - shape = [192] - dtype = "float32" - min_val = float("0.0044011") - max_val = float("0.0246488") - mean = float("0.00855375") - std = float("0.00310702") - data = None - - -class Program_weight_tensor_parameter_118: - name = "parameter_118" - shape = [192] - dtype = "float32" - min_val = float("-0.0887579") - max_val = float("0.122375") - mean = float("-0.0224875") - std = float("0.0291868") - data = None - - -class Program_weight_tensor_parameter_119: - name = "parameter_119" - shape = [192, 576, 1, 1] - dtype = "float32" - min_val = float("-0.0628757") - max_val = float("0.0645969") - mean = float("-0.000195496") - std = float("0.00467829") - data = None - - -class Program_weight_tensor_parameter_120: - name = "parameter_120" - shape = [192] - dtype = "float32" - min_val = float("-0.0999632") - max_val = float("0.037411") - mean = float("-0.0139724") - std = float("0.0203964") - data = None - - -class Program_weight_tensor_parameter_121: - name = "parameter_121" - shape = [192] - dtype = "float32" - min_val = float("0.923856") - max_val = float("1.19918") - mean = float("1.00277") - std = float("0.025885") - data = None - - -class Program_weight_tensor_parameter_122: - name = "parameter_122" - shape = [192] - dtype = "float32" - min_val = float("0.00336859") - max_val = float("0.0385248") - mean = float("0.00844927") - std = float("0.00421125") - data = None - - -class Program_weight_tensor_parameter_123: - name = "parameter_123" - shape = [192] - dtype = "float32" - min_val = float("-0.0725677") - max_val = float("0.0457479") - mean = float("-0.0168628") - std = float("0.0213133") - data = None - - -class Program_weight_tensor_parameter_124: - name = "parameter_124" - shape = [192, 576, 1, 1] - dtype = "float32" - min_val = float("-0.0557051") - max_val = float("0.0726466") - mean = float("-0.000148835") - std = float("0.00416084") - data = None - - -class Program_weight_tensor_parameter_125: - name = "parameter_125" - shape = [192] - dtype = "float32" - min_val = float("-0.15908") - max_val = float("-0.000555601") - mean = float("-0.038944") - std = float("0.0217257") - data = None - - -class Program_weight_tensor_parameter_126: - name = "parameter_126" - shape = [192] - dtype = "float32" - min_val = float("0.921159") - max_val = float("1.24866") - mean = float("1.00725") - std = float("0.0301467") - data = None - - -class Program_weight_tensor_parameter_127: - name = "parameter_127" - shape = [192] - dtype = "float32" - min_val = float("0.00433237") - max_val = float("0.0626783") - mean = float("0.0160168") - std = float("0.00846658") - data = None - - -class Program_weight_tensor_parameter_128: - name = "parameter_128" - shape = [192] - dtype = "float32" - min_val = float("-0.396741") - max_val = float("0.33475") - mean = float("-0.0359227") - std = float("0.0957987") - data = None - - -class Program_weight_tensor_parameter_129: - name = "parameter_129" - shape = [192, 192, 3, 3] - dtype = "float32" - min_val = float("-0.0350237") - max_val = float("0.0471653") - mean = float("-3.44387e-05") - std = float("0.00253963") - data = None - - -class Program_weight_tensor_parameter_130: - name = "parameter_130" - shape = [192] - dtype = "float32" - min_val = float("-0.552248") - max_val = float("1.14732") - mean = float("0.355898") - std = float("0.346059") - data = None - - -class Program_weight_tensor_parameter_131: - name = "parameter_131" - shape = [192] - dtype = "float32" - min_val = float("0.541472") - max_val = float("1.57746") - mean = float("1.15098") - std = float("0.184373") - data = None - - -class Program_weight_tensor_parameter_132: - name = "parameter_132" - shape = [192] - dtype = "float32" - min_val = float("0.00561378") - max_val = float("0.117481") - mean = float("0.0300174") - std = float("0.0177759") - data = None - - -class Program_weight_tensor_parameter_133: - name = "parameter_133" - shape = [192] - dtype = "float32" - min_val = float("-0.181834") - max_val = float("0.204641") - mean = float("-0.0498559") - std = float("0.0488345") - data = None - - -class Program_weight_tensor_parameter_134: - name = "parameter_134" - shape = [192, 192, 1, 1] - dtype = "float32" - min_val = float("-0.140077") - max_val = float("0.117821") - mean = float("-0.0010577") - std = float("0.0117759") - data = None - - -class Program_weight_tensor_parameter_135: - name = "parameter_135" - shape = [96] - dtype = "float32" - min_val = float("-0.457965") - max_val = float("0.231213") - mean = float("-0.00944132") - std = float("0.144606") - data = None - - -class Program_weight_tensor_parameter_136: - name = "parameter_136" - shape = [96] - dtype = "float32" - min_val = float("0.762871") - max_val = float("1.23462") - mean = float("0.948542") - std = float("0.0712293") - data = None - - -class Program_weight_tensor_parameter_137: - name = "parameter_137" - shape = [96] - dtype = "float32" - min_val = float("0.00298878") - max_val = float("0.0421928") - mean = float("0.0124331") - std = float("0.00821157") - data = None - - -class Program_weight_tensor_parameter_138: - name = "parameter_138" - shape = [96] - dtype = "float32" - min_val = float("-0.0584852") - max_val = float("0.0908825") - mean = float("-0.0135007") - std = float("0.024126") - data = None - - -class Program_weight_tensor_parameter_139: - name = "parameter_139" - shape = [96, 96, 1, 1] - dtype = "float32" - min_val = float("-0.0753814") - max_val = float("0.0571343") - mean = float("-0.00127258") - std = float("0.00926422") - data = None - - -class Program_weight_tensor_parameter_140: - name = "parameter_140" - shape = [96] - dtype = "float32" - min_val = float("-0.457965") - max_val = float("0.231213") - mean = float("-0.00944132") - std = float("0.144606") - data = None - - -class Program_weight_tensor_parameter_141: - name = "parameter_141" - shape = [96] - dtype = "float32" - min_val = float("0.505007") - max_val = float("1.2709") - mean = float("1.02954") - std = float("0.0962551") - data = None - - -class Program_weight_tensor_parameter_142: - name = "parameter_142" - shape = [96] - dtype = "float32" - min_val = float("0.00864011") - max_val = float("0.0819817") - mean = float("0.029707") - std = float("0.0154312") - data = None - - -class Program_weight_tensor_parameter_143: - name = "parameter_143" - shape = [96] - dtype = "float32" - min_val = float("-0.235686") - max_val = float("0.133508") - mean = float("-0.023179") - std = float("0.060166") - data = None - - -class Program_weight_tensor_parameter_144: - name = "parameter_144" - shape = [96, 96, 3, 3] - dtype = "float32" - min_val = float("-0.0934252") - max_val = float("0.0953084") - mean = float("-0.000117725") - std = float("0.00631181") - data = None - - -class Program_weight_tensor_parameter_145: - name = "parameter_145" - shape = [96] - dtype = "float32" - min_val = float("-0.703685") - max_val = float("0.495421") - mean = float("-0.112778") - std = float("0.198104") - data = None - - -class Program_weight_tensor_parameter_146: - name = "parameter_146" - shape = [96] - dtype = "float32" - min_val = float("0.723217") - max_val = float("1.7117") - mean = float("0.995187") - std = float("0.133891") - data = None - - -class Program_weight_tensor_parameter_147: - name = "parameter_147" - shape = [96] - dtype = "float32" - min_val = float("0.0126242") - max_val = float("0.189013") - mean = float("0.0406096") - std = float("0.0302792") - data = None - - -class Program_weight_tensor_parameter_148: - name = "parameter_148" - shape = [96] - dtype = "float32" - min_val = float("-0.211007") - max_val = float("0.139458") - mean = float("-0.0274803") - std = float("0.0625687") - data = None - - -class Program_weight_tensor_parameter_149: - name = "parameter_149" - shape = [96, 96, 3, 3] - dtype = "float32" - min_val = float("-0.0919405") - max_val = float("0.0707093") - mean = float("-0.000470706") - std = float("0.00699141") - data = None - - -class Program_weight_tensor_parameter_150: - name = "parameter_150" - shape = [96] - dtype = "float32" - min_val = float("-0.364151") - max_val = float("0.190267") - mean = float("-0.138622") - std = float("0.0960161") - data = None - - -class Program_weight_tensor_parameter_151: - name = "parameter_151" - shape = [96] - dtype = "float32" - min_val = float("0.626997") - max_val = float("1.01953") - mean = float("0.906483") - std = float("0.0555602") - data = None - - -class Program_weight_tensor_parameter_152: - name = "parameter_152" - shape = [96] - dtype = "float32" - min_val = float("0.00320113") - max_val = float("0.0231713") - mean = float("0.0111523") - std = float("0.00438468") - data = None - - -class Program_weight_tensor_parameter_153: - name = "parameter_153" - shape = [96] - dtype = "float32" - min_val = float("-0.0654375") - max_val = float("0.0381209") - mean = float("-0.00860304") - std = float("0.0165133") - data = None - - -class Program_weight_tensor_parameter_154: - name = "parameter_154" - shape = [96, 96, 1, 1] - dtype = "float32" - min_val = float("-0.0710343") - max_val = float("0.0593522") - mean = float("-0.00106867") - std = float("0.00947732") - data = None - - -class Program_weight_tensor_parameter_155: - name = "parameter_155" - shape = [96] - dtype = "float32" - min_val = float("-0.364151") - max_val = float("0.190267") - mean = float("-0.138622") - std = float("0.0960161") - data = None - - -class Program_weight_tensor_parameter_156: - name = "parameter_156" - shape = [96] - dtype = "float32" - min_val = float("0.811163") - max_val = float("1.15777") - mean = float("1.02225") - std = float("0.060594") - data = None - - -class Program_weight_tensor_parameter_157: - name = "parameter_157" - shape = [96] - dtype = "float32" - min_val = float("0.0112523") - max_val = float("0.110271") - mean = float("0.0288634") - std = float("0.0207594") - data = None - - -class Program_weight_tensor_parameter_158: - name = "parameter_158" - shape = [96] - dtype = "float32" - min_val = float("-0.163525") - max_val = float("0.0372296") - mean = float("-0.0369433") - std = float("0.0331166") - data = None - - -class Program_weight_tensor_parameter_159: - name = "parameter_159" - shape = [96, 96, 3, 3] - dtype = "float32" - min_val = float("-0.0811922") - max_val = float("0.0768953") - mean = float("-0.000466623") - std = float("0.00651757") - data = None - - -class Program_weight_tensor_parameter_160: - name = "parameter_160" - shape = [96] - dtype = "float32" - min_val = float("-0.486488") - max_val = float("0.169402") - mean = float("-0.16699") - std = float("0.131221") - data = None - - -class Program_weight_tensor_parameter_161: - name = "parameter_161" - shape = [96] - dtype = "float32" - min_val = float("0.77745") - max_val = float("1.29252") - mean = float("0.963023") - std = float("0.0981107") - data = None - - -class Program_weight_tensor_parameter_162: - name = "parameter_162" - shape = [96] - dtype = "float32" - min_val = float("0.0097702") - max_val = float("0.108736") - mean = float("0.0240622") - std = float("0.0138876") - data = None - - -class Program_weight_tensor_parameter_163: - name = "parameter_163" - shape = [96] - dtype = "float32" - min_val = float("-0.150372") - max_val = float("0.0656242") - mean = float("0.00931518") - std = float("0.0377034") - data = None - - -class Program_weight_tensor_parameter_164: - name = "parameter_164" - shape = [96, 96, 3, 3] - dtype = "float32" - min_val = float("-0.0993825") - max_val = float("0.0757514") - mean = float("-0.000423273") - std = float("0.00766977") - data = None - - -class Program_weight_tensor_parameter_165: - name = "parameter_165" - shape = [96] - dtype = "float32" - min_val = float("-0.489705") - max_val = float("0.065165") - mean = float("-0.168145") - std = float("0.114783") - data = None - - -class Program_weight_tensor_parameter_166: - name = "parameter_166" - shape = [96] - dtype = "float32" - min_val = float("0.722939") - max_val = float("1.0022") - mean = float("0.918838") - std = float("0.0531756") - data = None - - -class Program_weight_tensor_parameter_167: - name = "parameter_167" - shape = [96] - dtype = "float32" - min_val = float("0.00758991") - max_val = float("0.0378697") - mean = float("0.0164332") - std = float("0.00578739") - data = None - - -class Program_weight_tensor_parameter_168: - name = "parameter_168" - shape = [96] - dtype = "float32" - min_val = float("-0.0561772") - max_val = float("0.0397966") - mean = float("-0.0196014") - std = float("0.0187438") - data = None - - -class Program_weight_tensor_parameter_169: - name = "parameter_169" - shape = [96, 96, 1, 1] - dtype = "float32" - min_val = float("-0.103953") - max_val = float("0.0646149") - mean = float("-0.00221242") - std = float("0.0110162") - data = None - - -class Program_weight_tensor_parameter_170: - name = "parameter_170" - shape = [96] - dtype = "float32" - min_val = float("-0.489705") - max_val = float("0.065165") - mean = float("-0.168145") - std = float("0.114783") - data = None - - -class Program_weight_tensor_parameter_171: - name = "parameter_171" - shape = [96] - dtype = "float32" - min_val = float("0.766539") - max_val = float("1.15353") - mean = float("0.982409") - std = float("0.0579773") - data = None - - -class Program_weight_tensor_parameter_172: - name = "parameter_172" - shape = [96] - dtype = "float32" - min_val = float("0.0171857") - max_val = float("0.220166") - mean = float("0.0443842") - std = float("0.0319483") - data = None - - -class Program_weight_tensor_parameter_173: - name = "parameter_173" - shape = [96] - dtype = "float32" - min_val = float("-0.196226") - max_val = float("0.0865161") - mean = float("-0.0155778") - std = float("0.0409091") - data = None - - -class Program_weight_tensor_parameter_174: - name = "parameter_174" - shape = [96, 96, 3, 3] - dtype = "float32" - min_val = float("-0.0992682") - max_val = float("0.0973879") - mean = float("-0.000248799") - std = float("0.00741391") - data = None - - -class Program_weight_tensor_parameter_175: - name = "parameter_175" - shape = [96] - dtype = "float32" - min_val = float("-0.564609") - max_val = float("0.347562") - mean = float("-0.179116") - std = float("0.173215") - data = None - - -class Program_weight_tensor_parameter_176: - name = "parameter_176" - shape = [96] - dtype = "float32" - min_val = float("0.764459") - max_val = float("1.33669") - mean = float("0.954532") - std = float("0.110883") - data = None - - -class Program_weight_tensor_parameter_177: - name = "parameter_177" - shape = [96] - dtype = "float32" - min_val = float("0.0145544") - max_val = float("0.11097") - mean = float("0.0319708") - std = float("0.0188345") - data = None - - -class Program_weight_tensor_parameter_178: - name = "parameter_178" - shape = [96] - dtype = "float32" - min_val = float("-0.17302") - max_val = float("0.269018") - mean = float("-0.0215072") - std = float("0.0939673") - data = None - - -class Program_weight_tensor_parameter_179: - name = "parameter_179" - shape = [96, 96, 3, 3] - dtype = "float32" - min_val = float("-0.142383") - max_val = float("0.117263") - mean = float("-0.000229517") - std = float("0.00873001") - data = None - - -class Program_weight_tensor_parameter_180: - name = "parameter_180" - shape = [96] - dtype = "float32" - min_val = float("-0.625413") - max_val = float("0.597772") - mean = float("-0.0821868") - std = float("0.254375") - data = None - - -class Program_weight_tensor_parameter_181: - name = "parameter_181" - shape = [96] - dtype = "float32" - min_val = float("0.647479") - max_val = float("1.22747") - mean = float("0.866594") - std = float("0.1146") - data = None - - -class Program_weight_tensor_parameter_182: - name = "parameter_182" - shape = [96] - dtype = "float32" - min_val = float("0.0115736") - max_val = float("0.0790864") - mean = float("0.0255985") - std = float("0.0116053") - data = None - - -class Program_weight_tensor_parameter_183: - name = "parameter_183" - shape = [96] - dtype = "float32" - min_val = float("-0.112932") - max_val = float("0.0913184") - mean = float("-0.0111621") - std = float("0.0404469") - data = None - - -class Program_weight_tensor_parameter_184: - name = "parameter_184" - shape = [96, 448, 1, 1] - dtype = "float32" - min_val = float("-0.14903") - max_val = float("0.149062") - mean = float("-0.000519099") - std = float("0.0115778") - data = None - - -class Program_weight_tensor_parameter_185: - name = "parameter_185" - shape = [96] - dtype = "float32" - min_val = float("-0.0986349") - max_val = float("0.227763") - mean = float("0.0619239") - std = float("0.0545689") - data = None - - -class Program_weight_tensor_parameter_186: - name = "parameter_186" - shape = [96] - dtype = "float32" - min_val = float("0.703928") - max_val = float("1.12525") - mean = float("0.932492") - std = float("0.0634652") - data = None - - -class Program_weight_tensor_parameter_187: - name = "parameter_187" - shape = [96] - dtype = "float32" - min_val = float("0.00519295") - max_val = float("0.0599625") - mean = float("0.0119181") - std = float("0.00693821") - data = None - - -class Program_weight_tensor_parameter_188: - name = "parameter_188" - shape = [96] - dtype = "float32" - min_val = float("-0.0889976") - max_val = float("0.164161") - mean = float("-0.017344") - std = float("0.0389571") - data = None - - -class Program_weight_tensor_parameter_189: - name = "parameter_189" - shape = [96, 448, 1, 1] - dtype = "float32" - min_val = float("-0.0952125") - max_val = float("0.110914") - mean = float("-0.000272416") - std = float("0.00775169") - data = None - - -class Program_weight_tensor_parameter_190: - name = "parameter_190" - shape = [192] - dtype = "float32" - min_val = float("-0.295367") - max_val = float("0.199876") - mean = float("-0.065903") - std = float("0.0695813") - data = None - - -class Program_weight_tensor_parameter_191: - name = "parameter_191" - shape = [192] - dtype = "float32" - min_val = float("0.670697") - max_val = float("1.45276") - mean = float("0.885134") - std = float("0.0783825") - data = None - - -class Program_weight_tensor_parameter_192: - name = "parameter_192" - shape = [192] - dtype = "float32" - min_val = float("0.00810165") - max_val = float("0.127755") - mean = float("0.0227973") - std = float("0.0122998") - data = None - - -class Program_weight_tensor_parameter_193: - name = "parameter_193" - shape = [192] - dtype = "float32" - min_val = float("-0.147256") - max_val = float("0.0479519") - mean = float("-0.0364953") - std = float("0.0354482") - data = None - - -class Program_weight_tensor_parameter_194: - name = "parameter_194" - shape = [192, 384, 1, 1] - dtype = "float32" - min_val = float("-0.0959126") - max_val = float("0.117328") - mean = float("-0.000597241") - std = float("0.00788359") - data = None - - -class Program_weight_tensor_parameter_195: - name = "parameter_195" - shape = [384] - dtype = "float32" - min_val = float("-0.201782") - max_val = float("0.241811") - mean = float("-0.0670364") - std = float("0.0416536") - data = None - - -class Program_weight_tensor_parameter_196: - name = "parameter_196" - shape = [384] - dtype = "float32" - min_val = float("0.873178") - max_val = float("1.54065") - mean = float("1.01926") - std = float("0.0632841") - data = None - - -class Program_weight_tensor_parameter_197: - name = "parameter_197" - shape = [384] - dtype = "float32" - min_val = float("0.00754976") - max_val = float("0.0799005") - mean = float("0.015547") - std = float("0.00760761") - data = None - - -class Program_weight_tensor_parameter_198: - name = "parameter_198" - shape = [384] - dtype = "float32" - min_val = float("-0.335306") - max_val = float("0.119584") - mean = float("-0.0574114") - std = float("0.0470951") - data = None - - -class Program_weight_tensor_parameter_199: - name = "parameter_199" - shape = [384, 384, 1, 1] - dtype = "float32" - min_val = float("-0.104641") - max_val = float("0.104081") - mean = float("-0.000725337") - std = float("0.00722264") - data = None - - -class Program_weight_tensor_parameter_200: - name = "parameter_200" - shape = [192] - dtype = "float32" - min_val = float("-0.176949") - max_val = float("0.00590593") - mean = float("-0.0653774") - std = float("0.0325609") - data = None - - -class Program_weight_tensor_parameter_201: - name = "parameter_201" - shape = [192] - dtype = "float32" - min_val = float("0.884903") - max_val = float("0.991186") - mean = float("0.949253") - std = float("0.016433") - data = None - - -class Program_weight_tensor_parameter_202: - name = "parameter_202" - shape = [192] - dtype = "float32" - min_val = float("0.00345864") - max_val = float("0.0250397") - mean = float("0.0100895") - std = float("0.00362139") - data = None - - -class Program_weight_tensor_parameter_203: - name = "parameter_203" - shape = [192] - dtype = "float32" - min_val = float("-0.0784978") - max_val = float("0.0700745") - mean = float("-0.0238174") - std = float("0.0311932") - data = None - - -class Program_weight_tensor_parameter_204: - name = "parameter_204" - shape = [192, 192, 1, 1] - dtype = "float32" - min_val = float("-0.0569075") - max_val = float("0.0369732") - mean = float("-0.000733351") - std = float("0.00540254") - data = None - - -class Program_weight_tensor_parameter_205: - name = "parameter_205" - shape = [192] - dtype = "float32" - min_val = float("-0.176949") - max_val = float("0.00590593") - mean = float("-0.0653774") - std = float("0.0325609") - data = None - - -class Program_weight_tensor_parameter_206: - name = "parameter_206" - shape = [192] - dtype = "float32" - min_val = float("0.945936") - max_val = float("1.03267") - mean = float("0.988143") - std = float("0.0166204") - data = None - - -class Program_weight_tensor_parameter_207: - name = "parameter_207" - shape = [192] - dtype = "float32" - min_val = float("0.0155344") - max_val = float("0.0844419") - mean = float("0.0339711") - std = float("0.012502") - data = None - - -class Program_weight_tensor_parameter_208: - name = "parameter_208" - shape = [192] - dtype = "float32" - min_val = float("-0.176762") - max_val = float("0.15265") - mean = float("-0.0230656") - std = float("0.0601937") - data = None - - -class Program_weight_tensor_parameter_209: - name = "parameter_209" - shape = [192, 192, 3, 3] - dtype = "float32" - min_val = float("-0.0444131") - max_val = float("0.0760357") - mean = float("-7.02524e-05") - std = float("0.00300584") - data = None - - -class Program_weight_tensor_parameter_210: - name = "parameter_210" - shape = [192] - dtype = "float32" - min_val = float("-0.217095") - max_val = float("-0.00148108") - mean = float("-0.0741376") - std = float("0.0354109") - data = None - - -class Program_weight_tensor_parameter_211: - name = "parameter_211" - shape = [192] - dtype = "float32" - min_val = float("0.939031") - max_val = float("1.15417") - mean = float("1.02943") - std = float("0.0431658") - data = None - - -class Program_weight_tensor_parameter_212: - name = "parameter_212" - shape = [192] - dtype = "float32" - min_val = float("0.0364266") - max_val = float("0.231139") - mean = float("0.0631271") - std = float("0.0206699") - data = None - - -class Program_weight_tensor_parameter_213: - name = "parameter_213" - shape = [192] - dtype = "float32" - min_val = float("-0.262217") - max_val = float("0.304208") - mean = float("-0.0426428") - std = float("0.0718369") - data = None - - -class Program_weight_tensor_parameter_214: - name = "parameter_214" - shape = [192, 192, 3, 3] - dtype = "float32" - min_val = float("-0.0622017") - max_val = float("0.0626879") - mean = float("-0.000102158") - std = float("0.00367047") - data = None - - -class Program_weight_tensor_parameter_215: - name = "parameter_215" - shape = [192] - dtype = "float32" - min_val = float("-0.196617") - max_val = float("-0.00995737") - mean = float("-0.071187") - std = float("0.0319798") - data = None - - -class Program_weight_tensor_parameter_216: - name = "parameter_216" - shape = [192] - dtype = "float32" - min_val = float("0.94411") - max_val = float("1.04693") - mean = float("0.987726") - std = float("0.0137706") - data = None - - -class Program_weight_tensor_parameter_217: - name = "parameter_217" - shape = [192] - dtype = "float32" - min_val = float("0.00228676") - max_val = float("0.00961601") - mean = float("0.00480728") - std = float("0.00123428") - data = None - - -class Program_weight_tensor_parameter_218: - name = "parameter_218" - shape = [192] - dtype = "float32" - min_val = float("-0.0953901") - max_val = float("0.0389215") - mean = float("-0.025087") - std = float("0.0209448") - data = None - - -class Program_weight_tensor_parameter_219: - name = "parameter_219" - shape = [192, 192, 1, 1] - dtype = "float32" - min_val = float("-0.0313923") - max_val = float("0.0416125") - mean = float("-0.000809335") - std = float("0.00570058") - data = None - - -class Program_weight_tensor_parameter_220: - name = "parameter_220" - shape = [192] - dtype = "float32" - min_val = float("-0.196617") - max_val = float("-0.00995737") - mean = float("-0.071187") - std = float("0.0319798") - data = None - - -class Program_weight_tensor_parameter_221: - name = "parameter_221" - shape = [192] - dtype = "float32" - min_val = float("0.953711") - max_val = float("1.11463") - mean = float("1.00472") - std = float("0.0265116") - data = None - - -class Program_weight_tensor_parameter_222: - name = "parameter_222" - shape = [192] - dtype = "float32" - min_val = float("0.0101684") - max_val = float("0.0483375") - mean = float("0.0181984") - std = float("0.00551311") - data = None - - -class Program_weight_tensor_parameter_223: - name = "parameter_223" - shape = [192] - dtype = "float32" - min_val = float("-0.187973") - max_val = float("0.143836") - mean = float("-0.0474657") - std = float("0.0465909") - data = None - - -class Program_weight_tensor_parameter_224: - name = "parameter_224" - shape = [192, 192, 3, 3] - dtype = "float32" - min_val = float("-0.0484376") - max_val = float("0.0812032") - mean = float("-0.000164179") - std = float("0.00306328") - data = None - - -class Program_weight_tensor_parameter_225: - name = "parameter_225" - shape = [192] - dtype = "float32" - min_val = float("-0.232846") - max_val = float("-0.0185216") - mean = float("-0.0943343") - std = float("0.040046") - data = None - - -class Program_weight_tensor_parameter_226: - name = "parameter_226" - shape = [192] - dtype = "float32" - min_val = float("0.946521") - max_val = float("1.19181") - mean = float("1.02411") - std = float("0.0460177") - data = None - - -class Program_weight_tensor_parameter_227: - name = "parameter_227" - shape = [192] - dtype = "float32" - min_val = float("0.0361899") - max_val = float("0.141548") - mean = float("0.0649197") - std = float("0.0200832") - data = None - - -class Program_weight_tensor_parameter_228: - name = "parameter_228" - shape = [192] - dtype = "float32" - min_val = float("-0.350006") - max_val = float("0.262728") - mean = float("-0.0865782") - std = float("0.0989383") - data = None - - -class Program_weight_tensor_parameter_229: - name = "parameter_229" - shape = [192, 192, 3, 3] - dtype = "float32" - min_val = float("-0.0611519") - max_val = float("0.0870387") - mean = float("-0.000165155") - std = float("0.00384626") - data = None - - -class Program_weight_tensor_parameter_230: - name = "parameter_230" - shape = [192] - dtype = "float32" - min_val = float("-0.154886") - max_val = float("0.00333791") - mean = float("-0.0685634") - std = float("0.0234192") - data = None - - -class Program_weight_tensor_parameter_231: - name = "parameter_231" - shape = [192] - dtype = "float32" - min_val = float("0.932342") - max_val = float("1.07188") - mean = float("0.99857") - std = float("0.0218607") - data = None - - -class Program_weight_tensor_parameter_232: - name = "parameter_232" - shape = [192] - dtype = "float32" - min_val = float("0.0020288") - max_val = float("0.00959064") - mean = float("0.0040899") - std = float("0.0011474") - data = None - - -class Program_weight_tensor_parameter_233: - name = "parameter_233" - shape = [192] - dtype = "float32" - min_val = float("-0.0826953") - max_val = float("0.0992723") - mean = float("-0.0125266") - std = float("0.0204973") - data = None - - -class Program_weight_tensor_parameter_234: - name = "parameter_234" - shape = [192, 192, 1, 1] - dtype = "float32" - min_val = float("-0.0348635") - max_val = float("0.0478139") - mean = float("-0.000426004") - std = float("0.00642907") - data = None - - -class Program_weight_tensor_parameter_235: - name = "parameter_235" - shape = [192] - dtype = "float32" - min_val = float("-0.154886") - max_val = float("0.0033379") - mean = float("-0.0685634") - std = float("0.0234192") - data = None - - -class Program_weight_tensor_parameter_236: - name = "parameter_236" - shape = [192] - dtype = "float32" - min_val = float("0.936172") - max_val = float("1.11491") - mean = float("0.992553") - std = float("0.0259462") - data = None - - -class Program_weight_tensor_parameter_237: - name = "parameter_237" - shape = [192] - dtype = "float32" - min_val = float("0.0092625") - max_val = float("0.0464439") - mean = float("0.0187122") - std = float("0.00577318") - data = None - - -class Program_weight_tensor_parameter_238: - name = "parameter_238" - shape = [192] - dtype = "float32" - min_val = float("-0.280981") - max_val = float("0.146793") - mean = float("-0.0420046") - std = float("0.0462309") - data = None - - -class Program_weight_tensor_parameter_239: - name = "parameter_239" - shape = [192, 192, 3, 3] - dtype = "float32" - min_val = float("-0.0372398") - max_val = float("0.0656079") - mean = float("-0.000164107") - std = float("0.00303882") - data = None - - -class Program_weight_tensor_parameter_240: - name = "parameter_240" - shape = [192] - dtype = "float32" - min_val = float("-0.289028") - max_val = float("0.0181015") - mean = float("-0.109759") - std = float("0.0400942") - data = None - - -class Program_weight_tensor_parameter_241: - name = "parameter_241" - shape = [192] - dtype = "float32" - min_val = float("0.943873") - max_val = float("1.25886") - mean = float("1.02651") - std = float("0.0418277") - data = None - - -class Program_weight_tensor_parameter_242: - name = "parameter_242" - shape = [192] - dtype = "float32" - min_val = float("0.0146559") - max_val = float("0.0682576") - mean = float("0.029378") - std = float("0.009291") - data = None - - -class Program_weight_tensor_parameter_243: - name = "parameter_243" - shape = [192] - dtype = "float32" - min_val = float("-0.381607") - max_val = float("0.108223") - mean = float("-0.0546604") - std = float("0.0618861") - data = None - - -class Program_weight_tensor_parameter_244: - name = "parameter_244" - shape = [192, 192, 3, 3] - dtype = "float32" - min_val = float("-0.0566363") - max_val = float("0.0721231") - mean = float("-0.000213222") - std = float("0.00432259") - data = None - - -class Program_weight_tensor_parameter_245: - name = "parameter_245" - shape = [192] - dtype = "float32" - min_val = float("-0.257034") - max_val = float("-0.0134243") - mean = float("-0.121787") - std = float("0.0441916") - data = None - - -class Program_weight_tensor_parameter_246: - name = "parameter_246" - shape = [192] - dtype = "float32" - min_val = float("0.916939") - max_val = float("1.13523") - mean = float("1.02431") - std = float("0.042227") - data = None - - -class Program_weight_tensor_parameter_247: - name = "parameter_247" - shape = [192] - dtype = "float32" - min_val = float("0.00558617") - max_val = float("0.0215959") - mean = float("0.0106695") - std = float("0.00291488") - data = None - - -class Program_weight_tensor_parameter_248: - name = "parameter_248" - shape = [192] - dtype = "float32" - min_val = float("-0.121693") - max_val = float("0.105152") - mean = float("0.0154863") - std = float("0.0289093") - data = None - - -class Program_weight_tensor_parameter_249: - name = "parameter_249" - shape = [192, 896, 1, 1] - dtype = "float32" - min_val = float("-0.0812553") - max_val = float("0.103824") - mean = float("-0.000190188") - std = float("0.00606084") - data = None - - -class Program_weight_tensor_parameter_250: - name = "parameter_250" - shape = [192] - dtype = "float32" - min_val = float("-0.176608") - max_val = float("0.214363") - mean = float("-0.00723538") - std = float("0.0506647") - data = None - - -class Program_weight_tensor_parameter_251: - name = "parameter_251" - shape = [192] - dtype = "float32" - min_val = float("0.951166") - max_val = float("1.21791") - mean = float("1.05549") - std = float("0.0498194") - data = None - - -class Program_weight_tensor_parameter_252: - name = "parameter_252" - shape = [192] - dtype = "float32" - min_val = float("0.00700942") - max_val = float("0.0590357") - mean = float("0.0141953") - std = float("0.00517131") - data = None - - -class Program_weight_tensor_parameter_253: - name = "parameter_253" - shape = [192] - dtype = "float32" - min_val = float("-0.0747975") - max_val = float("0.0811747") - mean = float("-0.00053402") - std = float("0.0274613") - data = None - - -class Program_weight_tensor_parameter_254: - name = "parameter_254" - shape = [192, 896, 1, 1] - dtype = "float32" - min_val = float("-0.055207") - max_val = float("0.102723") - mean = float("-0.000223052") - std = float("0.00619518") - data = None - - -class Program_weight_tensor_parameter_255: - name = "parameter_255" - shape = [384] - dtype = "float32" - min_val = float("-0.249775") - max_val = float("-0.0568629") - mean = float("-0.125062") - std = float("0.0336773") - data = None - - -class Program_weight_tensor_parameter_256: - name = "parameter_256" - shape = [384] - dtype = "float32" - min_val = float("0.814907") - max_val = float("1.01643") - mean = float("0.909518") - std = float("0.0258168") - data = None - - -class Program_weight_tensor_parameter_257: - name = "parameter_257" - shape = [384] - dtype = "float32" - min_val = float("0.0100937") - max_val = float("0.0695395") - mean = float("0.022801") - std = float("0.00921208") - data = None - - -class Program_weight_tensor_parameter_258: - name = "parameter_258" - shape = [384] - dtype = "float32" - min_val = float("-0.146181") - max_val = float("0.110285") - mean = float("-0.0346603") - std = float("0.0383019") - data = None - - -class Program_weight_tensor_parameter_259: - name = "parameter_259" - shape = [384, 768, 1, 1] - dtype = "float32" - min_val = float("-0.0364868") - max_val = float("0.0339674") - mean = float("-0.000277799") - std = float("0.00472355") - data = None - - -class Program_weight_tensor_parameter_260: - name = "parameter_260" - shape = [768] - dtype = "float32" - min_val = float("-0.104276") - max_val = float("0.0723922") - mean = float("-0.0568764") - std = float("0.0153315") - data = None - - -class Program_weight_tensor_parameter_261: - name = "parameter_261" - shape = [768] - dtype = "float32" - min_val = float("0.9523") - max_val = float("1.1435") - mean = float("1.02091") - std = float("0.0210274") - data = None - - -class Program_weight_tensor_parameter_262: - name = "parameter_262" - shape = [768] - dtype = "float32" - min_val = float("0.00433515") - max_val = float("0.0356021") - mean = float("0.00969406") - std = float("0.00346217") - data = None - - -class Program_weight_tensor_parameter_263: - name = "parameter_263" - shape = [768] - dtype = "float32" - min_val = float("-0.103914") - max_val = float("0.111571") - mean = float("-0.0347286") - std = float("0.0270313") - data = None - - -class Program_weight_tensor_parameter_264: - name = "parameter_264" - shape = [768, 768, 1, 1] - dtype = "float32" - min_val = float("-0.0581812") - max_val = float("0.113051") - mean = float("-0.000304548") - std = float("0.00402831") - data = None - - -class Program_weight_tensor_parameter_265: - name = "parameter_265" - shape = [384] - dtype = "float32" - min_val = float("-0.158166") - max_val = float("0.0744681") - mean = float("-0.0400513") - std = float("0.0206673") - data = None - - -class Program_weight_tensor_parameter_266: - name = "parameter_266" - shape = [384] - dtype = "float32" - min_val = float("0.888577") - max_val = float("1.07465") - mean = float("0.982117") - std = float("0.0132258") - data = None - - -class Program_weight_tensor_parameter_267: - name = "parameter_267" - shape = [384] - dtype = "float32" - min_val = float("0.00600496") - max_val = float("0.0953219") - mean = float("0.0199657") - std = float("0.00942153") - data = None - - -class Program_weight_tensor_parameter_268: - name = "parameter_268" - shape = [384] - dtype = "float32" - min_val = float("-0.0681594") - max_val = float("0.0604839") - mean = float("-0.00585837") - std = float("0.0270223") - data = None - - -class Program_weight_tensor_parameter_269: - name = "parameter_269" - shape = [384, 384, 1, 1] - dtype = "float32" - min_val = float("-0.0396804") - max_val = float("0.073742") - mean = float("-7.22725e-05") - std = float("0.00350095") - data = None - - -class Program_weight_tensor_parameter_270: - name = "parameter_270" - shape = [384] - dtype = "float32" - min_val = float("-0.158166") - max_val = float("0.0744681") - mean = float("-0.0400513") - std = float("0.0206673") - data = None - - -class Program_weight_tensor_parameter_271: - name = "parameter_271" - shape = [384] - dtype = "float32" - min_val = float("0.879914") - max_val = float("1.07681") - mean = float("0.993922") - std = float("0.0123427") - data = None - - -class Program_weight_tensor_parameter_272: - name = "parameter_272" - shape = [384] - dtype = "float32" - min_val = float("0.0282422") - max_val = float("0.756467") - mean = float("0.146192") - std = float("0.0664623") - data = None - - -class Program_weight_tensor_parameter_273: - name = "parameter_273" - shape = [384] - dtype = "float32" - min_val = float("-0.276936") - max_val = float("0.156867") - mean = float("-0.0841025") - std = float("0.0859055") - data = None - - -class Program_weight_tensor_parameter_274: - name = "parameter_274" - shape = [384, 384, 3, 3] - dtype = "float32" - min_val = float("-0.0424878") - max_val = float("0.0475734") - mean = float("-0.000126902") - std = float("0.00130674") - data = None - - -class Program_weight_tensor_parameter_275: - name = "parameter_275" - shape = [384] - dtype = "float32" - min_val = float("-0.0801146") - max_val = float("0.116771") - mean = float("-0.0189931") - std = float("0.0160256") - data = None - - -class Program_weight_tensor_parameter_276: - name = "parameter_276" - shape = [384] - dtype = "float32" - min_val = float("0.920205") - max_val = float("1.16667") - mean = float("1.01504") - std = float("0.0246966") - data = None - - -class Program_weight_tensor_parameter_277: - name = "parameter_277" - shape = [384] - dtype = "float32" - min_val = float("0.0223031") - max_val = float("0.220359") - mean = float("0.0742131") - std = float("0.0330541") - data = None - - -class Program_weight_tensor_parameter_278: - name = "parameter_278" - shape = [384] - dtype = "float32" - min_val = float("-0.235368") - max_val = float("0.220698") - mean = float("-0.0231606") - std = float("0.0793647") - data = None - - -class Program_weight_tensor_parameter_279: - name = "parameter_279" - shape = [384, 384, 3, 3] - dtype = "float32" - min_val = float("-0.0274578") - max_val = float("0.0359223") - mean = float("-3.21913e-05") - std = float("0.00171791") - data = None - - -class Program_weight_tensor_parameter_280: - name = "parameter_280" - shape = [384] - dtype = "float32" - min_val = float("-0.0739505") - max_val = float("0.0209991") - mean = float("-0.0234999") - std = float("0.0134887") - data = None - - -class Program_weight_tensor_parameter_281: - name = "parameter_281" - shape = [384] - dtype = "float32" - min_val = float("0.946312") - max_val = float("1.16798") - mean = float("1.01467") - std = float("0.0273905") - data = None - - -class Program_weight_tensor_parameter_282: - name = "parameter_282" - shape = [384] - dtype = "float32" - min_val = float("0.0666339") - max_val = float("0.525489") - mean = float("0.192642") - std = float("0.0813326") - data = None - - -class Program_weight_tensor_parameter_283: - name = "parameter_283" - shape = [384] - dtype = "float32" - min_val = float("-1.5811") - max_val = float("1.58853") - mean = float("0.0464615") - std = float("0.567191") - data = None - - -class Program_weight_tensor_parameter_284: - name = "parameter_284" - shape = [384, 1536, 1, 1] - dtype = "float32" - min_val = float("-0.0467316") - max_val = float("0.0575595") - mean = float("8.55437e-05") - std = float("0.0030071") - data = None - - -class Program_weight_tensor_parameter_285: - name = "parameter_285" - shape = [384] - dtype = "float32" - min_val = float("-0.0183804") - max_val = float("0.0258619") - mean = float("-0.00144525") - std = float("0.00680648") - data = None - - -class Program_weight_tensor_parameter_286: - name = "parameter_286" - shape = [384] - dtype = "float32" - min_val = float("0.969538") - max_val = float("1.06054") - mean = float("0.993834") - std = float("0.0122522") - data = None - - -class Program_weight_tensor_parameter_287: - name = "parameter_287" - shape = [384] - dtype = "float32" - min_val = float("0.00292493") - max_val = float("0.0159254") - mean = float("0.00705807") - std = float("0.00238091") - data = None - - -class Program_weight_tensor_parameter_288: - name = "parameter_288" - shape = [384] - dtype = "float32" - min_val = float("-0.0970828") - max_val = float("0.0620531") - mean = float("-0.0420728") - std = float("0.0244223") - data = None - - -class Program_weight_tensor_parameter_289: - name = "parameter_289" - shape = [384, 384, 1, 1] - dtype = "float32" - min_val = float("-0.0333324") - max_val = float("0.0411053") - mean = float("-0.000526762") - std = float("0.00328183") - data = None - - -class Program_weight_tensor_parameter_290: - name = "parameter_290" - shape = [384] - dtype = "float32" - min_val = float("-0.0183804") - max_val = float("0.0258619") - mean = float("-0.00144525") - std = float("0.00680648") - data = None - - -class Program_weight_tensor_parameter_291: - name = "parameter_291" - shape = [384] - dtype = "float32" - min_val = float("0.972046") - max_val = float("1.08568") - mean = float("1.00364") - std = float("0.0181342") - data = None - - -class Program_weight_tensor_parameter_292: - name = "parameter_292" - shape = [384] - dtype = "float32" - min_val = float("0.0169031") - max_val = float("0.138508") - mean = float("0.0431682") - std = float("0.0166625") - data = None - - -class Program_weight_tensor_parameter_293: - name = "parameter_293" - shape = [384] - dtype = "float32" - min_val = float("-0.322591") - max_val = float("0.0931467") - mean = float("-0.131452") - std = float("0.0636301") - data = None - - -class Program_weight_tensor_parameter_294: - name = "parameter_294" - shape = [384, 384, 3, 3] - dtype = "float32" - min_val = float("-0.0285775") - max_val = float("0.0755074") - mean = float("-0.000191474") - std = float("0.0013728") - data = None - - -class Program_weight_tensor_parameter_295: - name = "parameter_295" - shape = [384] - dtype = "float32" - min_val = float("-0.0498105") - max_val = float("0.00884065") - mean = float("-0.00838186") - std = float("0.00779167") - data = None - - -class Program_weight_tensor_parameter_296: - name = "parameter_296" - shape = [384] - dtype = "float32" - min_val = float("0.953878") - max_val = float("1.13497") - mean = float("1.01253") - std = float("0.0201047") - data = None - - -class Program_weight_tensor_parameter_297: - name = "parameter_297" - shape = [384] - dtype = "float32" - min_val = float("0.072012") - max_val = float("0.425232") - mean = float("0.171605") - std = float("0.0477212") - data = None - - -class Program_weight_tensor_parameter_298: - name = "parameter_298" - shape = [384] - dtype = "float32" - min_val = float("-1.24821") - max_val = float("0.9224") - mean = float("-0.241954") - std = float("0.272039") - data = None - - -class Program_weight_tensor_parameter_299: - name = "parameter_299" - shape = [384, 384, 3, 3] - dtype = "float32" - min_val = float("-0.024251") - max_val = float("0.0585297") - mean = float("-0.000141777") - std = float("0.00163037") - data = None - - -class Program_weight_tensor_parameter_300: - name = "parameter_300" - shape = [384] - dtype = "float32" - min_val = float("-0.0360838") - max_val = float("0.0137949") - mean = float("-0.00769057") - std = float("0.00789116") - data = None - - -class Program_weight_tensor_parameter_301: - name = "parameter_301" - shape = [384] - dtype = "float32" - min_val = float("0.984179") - max_val = float("1.03462") - mean = float("0.999922") - std = float("0.00715393") - data = None - - -class Program_weight_tensor_parameter_302: - name = "parameter_302" - shape = [384] - dtype = "float32" - min_val = float("0.00227712") - max_val = float("0.00995773") - mean = float("0.00397087") - std = float("0.00113598") - data = None - - -class Program_weight_tensor_parameter_303: - name = "parameter_303" - shape = [384] - dtype = "float32" - min_val = float("-0.0776737") - max_val = float("0.150078") - mean = float("-0.0200997") - std = float("0.0256007") - data = None - - -class Program_weight_tensor_parameter_304: - name = "parameter_304" - shape = [384, 384, 1, 1] - dtype = "float32" - min_val = float("-0.0209147") - max_val = float("0.0327192") - mean = float("-0.000264199") - std = float("0.00284316") - data = None - - -class Program_weight_tensor_parameter_305: - name = "parameter_305" - shape = [384] - dtype = "float32" - min_val = float("-0.0360838") - max_val = float("0.0137949") - mean = float("-0.00769057") - std = float("0.00789116") - data = None - - -class Program_weight_tensor_parameter_306: - name = "parameter_306" - shape = [384] - dtype = "float32" - min_val = float("0.982136") - max_val = float("1.06749") - mean = float("1.00454") - std = float("0.0126701") - data = None - - -class Program_weight_tensor_parameter_307: - name = "parameter_307" - shape = [384] - dtype = "float32" - min_val = float("0.00988707") - max_val = float("0.074172") - mean = float("0.0261233") - std = float("0.00883817") - data = None - - -class Program_weight_tensor_parameter_308: - name = "parameter_308" - shape = [384] - dtype = "float32" - min_val = float("-0.234406") - max_val = float("0.373645") - mean = float("-0.0733953") - std = float("0.0700589") - data = None - - -class Program_weight_tensor_parameter_309: - name = "parameter_309" - shape = [384, 384, 3, 3] - dtype = "float32" - min_val = float("-0.0111228") - max_val = float("0.0376454") - mean = float("-0.000113878") - std = float("0.00115243") - data = None - - -class Program_weight_tensor_parameter_310: - name = "parameter_310" - shape = [384] - dtype = "float32" - min_val = float("-0.0529908") - max_val = float("0.00370586") - mean = float("-0.0207007") - std = float("0.00870238") - data = None - - -class Program_weight_tensor_parameter_311: - name = "parameter_311" - shape = [384] - dtype = "float32" - min_val = float("0.976061") - max_val = float("1.08549") - mean = float("1.01199") - std = float("0.0159983") - data = None - - -class Program_weight_tensor_parameter_312: - name = "parameter_312" - shape = [384] - dtype = "float32" - min_val = float("0.0127448") - max_val = float("0.0747085") - mean = float("0.0330308") - std = float("0.00982086") - data = None - - -class Program_weight_tensor_parameter_313: - name = "parameter_313" - shape = [384] - dtype = "float32" - min_val = float("-0.182538") - max_val = float("0.229487") - mean = float("-0.0382627") - std = float("0.0546861") - data = None - - -class Program_weight_tensor_parameter_314: - name = "parameter_314" - shape = [384, 384, 3, 3] - dtype = "float32" - min_val = float("-0.0155426") - max_val = float("0.0250033") - mean = float("-6.09821e-05") - std = float("0.00159019") - data = None - - -class Program_weight_tensor_parameter_315: - name = "parameter_315" - shape = [384] - dtype = "float32" - min_val = float("-0.0699578") - max_val = float("0.0213472") - mean = float("-0.0334829") - std = float("0.0126426") - data = None - - -class Program_weight_tensor_parameter_316: - name = "parameter_316" - shape = [384] - dtype = "float32" - min_val = float("0.981937") - max_val = float("1.05593") - mean = float("1.0134") - std = float("0.0107706") - data = None - - -class Program_weight_tensor_parameter_317: - name = "parameter_317" - shape = [384] - dtype = "float32" - min_val = float("0.00885101") - max_val = float("0.0351366") - mean = float("0.0147251") - std = float("0.00337135") - data = None - - -class Program_weight_tensor_parameter_318: - name = "parameter_318" - shape = [384] - dtype = "float32" - min_val = float("-0.11759") - max_val = float("0.125101") - mean = float("-0.0114268") - std = float("0.0398999") - data = None - - -class Program_weight_tensor_parameter_319: - name = "parameter_319" - shape = [384, 1024, 1, 1] - dtype = "float32" - min_val = float("-0.0187213") - max_val = float("0.0462026") - mean = float("-0.000204289") - std = float("0.00328169") - data = None - - -class Program_weight_tensor_parameter_320: - name = "parameter_320" - shape = [384] - dtype = "float32" - min_val = float("-0.024099") - max_val = float("0.0209723") - mean = float("-0.000328398") - std = float("0.00796388") - data = None - - -class Program_weight_tensor_parameter_321: - name = "parameter_321" - shape = [384] - dtype = "float32" - min_val = float("0.994048") - max_val = float("1.08372") - mean = float("1.04108") - std = float("0.0136738") - data = None - - -class Program_weight_tensor_parameter_322: - name = "parameter_322" - shape = [384] - dtype = "float32" - min_val = float("0.0117538") - max_val = float("0.0620667") - mean = float("0.0197522") - std = float("0.00513232") - data = None - - -class Program_weight_tensor_parameter_323: - name = "parameter_323" - shape = [384] - dtype = "float32" - min_val = float("-0.154192") - max_val = float("0.134319") - mean = float("-0.0110652") - std = float("0.0497964") - data = None - - -class Program_weight_tensor_parameter_324: - name = "parameter_324" - shape = [384, 1024, 1, 1] - dtype = "float32" - min_val = float("-0.0381973") - max_val = float("0.0298107") - mean = float("-0.00023944") - std = float("0.00387698") - data = None - - -class Program_weight_tensor_parameter_325: - name = "parameter_325" - shape = [1024] - dtype = "float32" - min_val = float("-3.19596e-10") - max_val = float("2.57347e-10") - mean = float("-6.94228e-12") - std = float("8.15169e-11") - data = None - - -class Program_weight_tensor_parameter_326: - name = "parameter_326" - shape = [1024] - dtype = "float32" - min_val = float("0.826159") - max_val = float("0.830526") - mean = float("0.828072") - std = float("0.00038843") - data = None - - -class Program_weight_tensor_parameter_327: - name = "parameter_327" - shape = [1024] - dtype = "float32" - min_val = float("-0.0184725") - max_val = float("0.0186349") - mean = float("3.29491e-06") - std = float("0.0105958") - data = None - - -class Program_weight_tensor_parameter_328: - name = "parameter_328" - shape = [2048, 1024] - dtype = "float32" - min_val = float("-0.0186694") - max_val = float("0.0186323") - mean = float("-3.09482e-06") - std = float("0.0105631") - data = None - - -class Program_weight_tensor_parameter_329: - name = "parameter_329" - shape = [2048] - dtype = "float32" - min_val = float("-0.0258373") - max_val = float("0.0258488") - mean = float("-0.000490033") - std = float("0.0147842") - data = None - - -class Program_weight_tensor_parameter_330: - name = "parameter_330" - shape = [1024, 2048] - dtype = "float32" - min_val = float("-0.0261231") - max_val = float("0.0262344") - mean = float("-1.26e-05") - std = float("0.0149406") - data = None - - -class Program_weight_tensor_parameter_331: - name = "parameter_331" - shape = [1024] - dtype = "float32" - min_val = float("-0.000644078") - max_val = float("0.000416122") - mean = float("1.0367e-06") - std = float("0.000160918") - data = None - - -class Program_weight_tensor_parameter_332: - name = "parameter_332" - shape = [1024] - dtype = "float32" - min_val = float("0.825075") - max_val = float("0.831152") - mean = float("0.828074") - std = float("0.000498935") - data = None - - -class Program_weight_tensor_parameter_333: - name = "parameter_333" - shape = [1024] - dtype = "float32" - min_val = float("-0.000571568") - max_val = float("0.000431714") - mean = float("-6.059e-07") - std = float("0.000151099") - data = None - - -class Program_weight_tensor_parameter_334: - name = "parameter_334" - shape = [1024, 1024] - dtype = "float32" - min_val = float("-0.0452304") - max_val = float("0.0451715") - mean = float("2.40342e-05") - std = float("0.0258606") - data = None - - -class Program_weight_tensor_parameter_335: - name = "parameter_335" - shape = [1024] - dtype = "float32" - min_val = float("-0.000495841") - max_val = float("0.000502198") - mean = float("2.39512e-05") - std = float("0.000158431") - data = None - - -class Program_weight_tensor_parameter_336: - name = "parameter_336" - shape = [1024] - dtype = "float32" - min_val = float("0.825239") - max_val = float("0.831385") - mean = float("0.8281") - std = float("0.000479393") - data = None - - -class Program_weight_tensor_parameter_337: - name = "parameter_337" - shape = [1024] - dtype = "float32" - min_val = float("-0.0182544") - max_val = float("0.0183953") - mean = float("1.83012e-06") - std = float("0.0105888") - data = None - - -class Program_weight_tensor_parameter_338: - name = "parameter_338" - shape = [2048, 1024] - dtype = "float32" - min_val = float("-0.0185876") - max_val = float("0.0186055") - mean = float("-3.09823e-06") - std = float("0.010563") - data = None - - -class Program_weight_tensor_parameter_339: - name = "parameter_339" - shape = [2048] - dtype = "float32" - min_val = float("-0.0258719") - max_val = float("0.025874") - mean = float("-0.00048855") - std = float("0.0147851") - data = None - - -class Program_weight_tensor_parameter_340: - name = "parameter_340" - shape = [1024, 2048] - dtype = "float32" - min_val = float("-0.0260955") - max_val = float("0.0261499") - mean = float("-1.26e-05") - std = float("0.0149406") - data = None - - -class Program_weight_tensor_parameter_341: - name = "parameter_341" - shape = [1024] - dtype = "float32" - min_val = float("-0.000468908") - max_val = float("0.000411959") - mean = float("2.0632e-06") - std = float("0.000140162") - data = None - - -class Program_weight_tensor_parameter_342: - name = "parameter_342" - shape = [1024] - dtype = "float32" - min_val = float("0.825683") - max_val = float("0.831193") - mean = float("0.828073") - std = float("0.000448511") - data = None - - -class Program_weight_tensor_parameter_343: - name = "parameter_343" - shape = [1024] - dtype = "float32" - min_val = float("-0.000528411") - max_val = float("0.000383675") - mean = float("2.84251e-06") - std = float("0.000141636") - data = None - - -class Program_weight_tensor_parameter_344: - name = "parameter_344" - shape = [1024, 1024] - dtype = "float32" - min_val = float("-0.0450293") - max_val = float("0.0450631") - mean = float("2.40173e-05") - std = float("0.0258607") - data = None - - -class Program_weight_tensor_parameter_345: - name = "parameter_345" - shape = [1024] - dtype = "float32" - min_val = float("-0.000544272") - max_val = float("0.000596298") - mean = float("2.42076e-05") - std = float("0.000181497") - data = None - - -class Program_weight_tensor_parameter_346: - name = "parameter_346" - shape = [1024] - dtype = "float32" - min_val = float("0.825946") - max_val = float("0.831225") - mean = float("0.828119") - std = float("0.000435131") - data = None - - -class Program_weight_tensor_parameter_347: - name = "parameter_347" - shape = [1024] - dtype = "float32" - min_val = float("-0.0184487") - max_val = float("0.0183801") - mean = float("4.31404e-06") - std = float("0.0105859") - data = None - - -class Program_weight_tensor_parameter_348: - name = "parameter_348" - shape = [2048, 1024] - dtype = "float32" - min_val = float("-0.0185587") - max_val = float("0.0185999") - mean = float("-2.97794e-06") - std = float("0.010563") - data = None - - -class Program_weight_tensor_parameter_349: - name = "parameter_349" - shape = [2048] - dtype = "float32" - min_val = float("-0.0259392") - max_val = float("0.025878") - mean = float("-0.000488744") - std = float("0.014786") - data = None - - -class Program_weight_tensor_parameter_350: - name = "parameter_350" - shape = [1024, 2048] - dtype = "float32" - min_val = float("-0.0261446") - max_val = float("0.0261367") - mean = float("-1.26001e-05") - std = float("0.0149405") - data = None - - -class Program_weight_tensor_parameter_351: - name = "parameter_351" - shape = [1024] - dtype = "float32" - min_val = float("-0.000525158") - max_val = float("0.000569597") - mean = float("1.84284e-06") - std = float("0.000180024") - data = None - - -class Program_weight_tensor_parameter_352: - name = "parameter_352" - shape = [1024] - dtype = "float32" - min_val = float("0.826325") - max_val = float("0.831088") - mean = float("0.828071") - std = float("0.00042233") - data = None - - -class Program_weight_tensor_parameter_353: - name = "parameter_353" - shape = [1024] - dtype = "float32" - min_val = float("-0.000560432") - max_val = float("0.000596894") - mean = float("2.2615e-06") - std = float("0.00018498") - data = None - - -class Program_weight_tensor_parameter_354: - name = "parameter_354" - shape = [1024, 1024] - dtype = "float32" - min_val = float("-0.0451116") - max_val = float("0.0451354") - mean = float("2.40528e-05") - std = float("0.0258608") - data = None - - -class Program_weight_tensor_parameter_355: - name = "parameter_355" - shape = [1024] - dtype = "float32" - min_val = float("-0.000823759") - max_val = float("0.000904078") - mean = float("2.92117e-05") - std = float("0.000277537") - data = None - - -class Program_weight_tensor_parameter_356: - name = "parameter_356" - shape = [1024] - dtype = "float32" - min_val = float("0.826283") - max_val = float("0.83082") - mean = float("0.828142") - std = float("0.000430459") - data = None - - -class Program_weight_tensor_parameter_357: - name = "parameter_357" - shape = [1024] - dtype = "float32" - min_val = float("-0.0185659") - max_val = float("0.0186155") - mean = float("4.15762e-06") - std = float("0.0105906") - data = None - - -class Program_weight_tensor_parameter_358: - name = "parameter_358" - shape = [2048, 1024] - dtype = "float32" - min_val = float("-0.0186584") - max_val = float("0.0186457") - mean = float("-3.0236e-06") - std = float("0.0105631") - data = None - - -class Program_weight_tensor_parameter_359: - name = "parameter_359" - shape = [2048] - dtype = "float32" - min_val = float("-0.0260158") - max_val = float("0.0259107") - mean = float("-0.000488165") - std = float("0.0147856") - data = None - - -class Program_weight_tensor_parameter_360: - name = "parameter_360" - shape = [1024, 2048] - dtype = "float32" - min_val = float("-0.026139") - max_val = float("0.026125") - mean = float("-1.26002e-05") - std = float("0.0149405") - data = None - - -class Program_weight_tensor_parameter_361: - name = "parameter_361" - shape = [1024] - dtype = "float32" - min_val = float("-0.000913026") - max_val = float("0.000860109") - mean = float("1.52616e-06") - std = float("0.000286932") - data = None - - -class Program_weight_tensor_parameter_362: - name = "parameter_362" - shape = [1024] - dtype = "float32" - min_val = float("0.826227") - max_val = float("0.830736") - mean = float("0.828069") - std = float("0.000440282") - data = None - - -class Program_weight_tensor_parameter_363: - name = "parameter_363" - shape = [1024] - dtype = "float32" - min_val = float("-0.00089386") - max_val = float("0.000983855") - mean = float("2.69912e-06") - std = float("0.000279129") - data = None - - -class Program_weight_tensor_parameter_364: - name = "parameter_364" - shape = [1024, 1024] - dtype = "float32" - min_val = float("-0.0456631") - max_val = float("0.0456484") - mean = float("2.40399e-05") - std = float("0.0258625") - data = None - - -class Program_weight_tensor_parameter_365: - name = "parameter_365" - shape = [1024] - dtype = "float32" - min_val = float("-3.75937") - max_val = float("-0.734") - mean = float("-2.18719") - std = float("0.428746") - data = None - - -class Program_weight_tensor_parameter_366: - name = "parameter_366" - shape = [1024] - dtype = "float32" - min_val = float("1.61944") - max_val = float("4.44114") - mean = float("3.08041") - std = float("0.254214") - data = None - - -class Program_weight_tensor_parameter_367: - name = "parameter_367" - shape = [1024] - dtype = "float32" - min_val = float("0.00515514") - max_val = float("0.0275054") - mean = float("0.00882973") - std = float("0.00191584") - data = None - - -class Program_weight_tensor_parameter_368: - name = "parameter_368" - shape = [1024] - dtype = "float32" - min_val = float("-0.173492") - max_val = float("0.132414") - mean = float("-0.0625274") - std = float("0.0318422") - data = None - - -class Program_weight_tensor_parameter_369: - name = "parameter_369" - shape = [1024, 768, 1, 1] - dtype = "float32" - min_val = float("-0.0420016") - max_val = float("0.0672891") - mean = float("-0.000434506") - std = float("0.00419984") - data = None - - -class Program_weight_tensor_parameter_370: - name = "parameter_370" - shape = [768] - dtype = "float32" - min_val = float("-0.0144958") - max_val = float("0.00204154") - mean = float("-0.000784991") - std = float("0.00208566") - data = None - - -class Program_weight_tensor_parameter_371: - name = "parameter_371" - shape = [768, 768, 1, 1] - dtype = "float32" - min_val = float("-0.0809974") - max_val = float("0.144837") - mean = float("-0.000290719") - std = float("0.0016779") - data = None - - -class Program_weight_tensor_parameter_372: - name = "parameter_372" - shape = [384] - dtype = "float32" - min_val = float("-1.77404") - max_val = float("0.318904") - mean = float("-0.31075") - std = float("0.291253") - data = None - - -class Program_weight_tensor_parameter_373: - name = "parameter_373" - shape = [384] - dtype = "float32" - min_val = float("0.188368") - max_val = float("1.82104") - mean = float("0.60964") - std = float("0.262596") - data = None - - -class Program_weight_tensor_parameter_374: - name = "parameter_374" - shape = [384] - dtype = "float32" - min_val = float("7.69323e-05") - max_val = float("0.00105931") - mean = float("0.000262139") - std = float("0.000132205") - data = None - - -class Program_weight_tensor_parameter_375: - name = "parameter_375" - shape = [384] - dtype = "float32" - min_val = float("-0.0656167") - max_val = float("0.0776953") - mean = float("0.0239193") - std = float("0.0176294") - data = None - - -class Program_weight_tensor_parameter_376: - name = "parameter_376" - shape = [384, 384, 1, 1] - dtype = "float32" - min_val = float("-0.020871") - max_val = float("0.0273244") - mean = float("-0.000414716") - std = float("0.00284754") - data = None - - -class Program_weight_tensor_parameter_377: - name = "parameter_377" - shape = [384] - dtype = "float32" - min_val = float("-1.77405") - max_val = float("0.319251") - mean = float("-0.310681") - std = float("0.291275") - data = None - - -class Program_weight_tensor_parameter_378: - name = "parameter_378" - shape = [384] - dtype = "float32" - min_val = float("0.335122") - max_val = float("2.60483") - mean = float("1.02609") - std = float("0.290246") - data = None - - -class Program_weight_tensor_parameter_379: - name = "parameter_379" - shape = [384] - dtype = "float32" - min_val = float("0.000764026") - max_val = float("0.00789643") - mean = float("0.00239397") - std = float("0.000872399") - data = None - - -class Program_weight_tensor_parameter_380: - name = "parameter_380" - shape = [384] - dtype = "float32" - min_val = float("-0.229833") - max_val = float("0.162266") - mean = float("0.0349416") - std = float("0.0423478") - data = None - - -class Program_weight_tensor_parameter_381: - name = "parameter_381" - shape = [384, 384, 3, 3] - dtype = "float32" - min_val = float("-0.0185255") - max_val = float("0.0282844") - mean = float("-7.21101e-05") - std = float("0.00183304") - data = None - - -class Program_weight_tensor_parameter_382: - name = "parameter_382" - shape = [384] - dtype = "float32" - min_val = float("-2.58205") - max_val = float("0.0326997") - mean = float("-1.56844") - std = float("0.416017") - data = None - - -class Program_weight_tensor_parameter_383: - name = "parameter_383" - shape = [384] - dtype = "float32" - min_val = float("0.51894") - max_val = float("1.64424") - mean = float("1.13558") - std = float("0.149427") - data = None - - -class Program_weight_tensor_parameter_384: - name = "parameter_384" - shape = [384] - dtype = "float32" - min_val = float("0.0445179") - max_val = float("0.278452") - mean = float("0.101004") - std = float("0.0266658") - data = None - - -class Program_weight_tensor_parameter_385: - name = "parameter_385" - shape = [384] - dtype = "float32" - min_val = float("-1.05877") - max_val = float("0.500591") - mean = float("-0.285429") - std = float("0.144535") - data = None - - -class Program_weight_tensor_parameter_386: - name = "parameter_386" - shape = [384, 384, 3, 3] - dtype = "float32" - min_val = float("-0.0217847") - max_val = float("0.0601331") - mean = float("-0.000214232") - std = float("0.00242153") - data = None - - -class Program_weight_tensor_parameter_387: - name = "parameter_387" - shape = [384] - dtype = "float32" - min_val = float("-1.93932") - max_val = float("0.644238") - mean = float("-0.57485") - std = float("0.358678") - data = None - - -class Program_weight_tensor_parameter_388: - name = "parameter_388" - shape = [384] - dtype = "float32" - min_val = float("0.163976") - max_val = float("2.06584") - mean = float("0.56203") - std = float("0.227231") - data = None - - -class Program_weight_tensor_parameter_389: - name = "parameter_389" - shape = [384] - dtype = "float32" - min_val = float("8.46446e-05") - max_val = float("0.00181652") - mean = float("0.000300897") - std = float("0.000147903") - data = None - - -class Program_weight_tensor_parameter_390: - name = "parameter_390" - shape = [384] - dtype = "float32" - min_val = float("-0.0395058") - max_val = float("0.072267") - mean = float("0.0222665") - std = float("0.0153805") - data = None - - -class Program_weight_tensor_parameter_391: - name = "parameter_391" - shape = [384, 384, 1, 1] - dtype = "float32" - min_val = float("-0.0311026") - max_val = float("0.039225") - mean = float("-0.000409791") - std = float("0.00262815") - data = None - - -class Program_weight_tensor_parameter_392: - name = "parameter_392" - shape = [384] - dtype = "float32" - min_val = float("-1.9394") - max_val = float("0.644918") - mean = float("-0.574762") - std = float("0.358753") - data = None - - -class Program_weight_tensor_parameter_393: - name = "parameter_393" - shape = [384] - dtype = "float32" - min_val = float("0.583818") - max_val = float("2.15633") - mean = float("1.08411") - std = float("0.255713") - data = None - - -class Program_weight_tensor_parameter_394: - name = "parameter_394" - shape = [384] - dtype = "float32" - min_val = float("0.00151649") - max_val = float("0.011387") - mean = float("0.00363589") - std = float("0.00111629") - data = None - - -class Program_weight_tensor_parameter_395: - name = "parameter_395" - shape = [384] - dtype = "float32" - min_val = float("-0.114817") - max_val = float("0.168288") - mean = float("0.040355") - std = float("0.0413819") - data = None - - -class Program_weight_tensor_parameter_396: - name = "parameter_396" - shape = [384, 384, 3, 3] - dtype = "float32" - min_val = float("-0.0211861") - max_val = float("0.0312284") - mean = float("-9.86606e-05") - std = float("0.00198109") - data = None - - -class Program_weight_tensor_parameter_397: - name = "parameter_397" - shape = [384] - dtype = "float32" - min_val = float("-2.39618") - max_val = float("0.845899") - mean = float("-1.40537") - std = float("0.36063") - data = None - - -class Program_weight_tensor_parameter_398: - name = "parameter_398" - shape = [384] - dtype = "float32" - min_val = float("0.454223") - max_val = float("1.91875") - mean = float("1.16633") - std = float("0.147984") - data = None - - -class Program_weight_tensor_parameter_399: - name = "parameter_399" - shape = [384] - dtype = "float32" - min_val = float("0.0369914") - max_val = float("0.169613") - mean = float("0.067321") - std = float("0.0165547") - data = None - - -class Program_weight_tensor_parameter_400: - name = "parameter_400" - shape = [384] - dtype = "float32" - min_val = float("-0.916864") - max_val = float("0.834885") - mean = float("-0.197255") - std = float("0.118118") - data = None - - -class Program_weight_tensor_parameter_401: - name = "parameter_401" - shape = [384, 384, 3, 3] - dtype = "float32" - min_val = float("-0.0304568") - max_val = float("0.0446889") - mean = float("-0.000206096") - std = float("0.00245489") - data = None - - -class Program_weight_tensor_parameter_402: - name = "parameter_402" - shape = [384] - dtype = "float32" - min_val = float("-1.87628") - max_val = float("0.453077") - mean = float("-0.485305") - std = float("0.376481") - data = None - - -class Program_weight_tensor_parameter_403: - name = "parameter_403" - shape = [384] - dtype = "float32" - min_val = float("0.0771953") - max_val = float("2.11917") - mean = float("0.441977") - std = float("0.217648") - data = None - - -class Program_weight_tensor_parameter_404: - name = "parameter_404" - shape = [384] - dtype = "float32" - min_val = float("7.57603e-05") - max_val = float("0.00171771") - mean = float("0.00036293") - std = float("0.000186378") - data = None - - -class Program_weight_tensor_parameter_405: - name = "parameter_405" - shape = [384] - dtype = "float32" - min_val = float("-0.0528798") - max_val = float("0.0858378") - mean = float("0.0268765") - std = float("0.0175426") - data = None - - -class Program_weight_tensor_parameter_406: - name = "parameter_406" - shape = [384, 384, 1, 1] - dtype = "float32" - min_val = float("-0.0213328") - max_val = float("0.0283453") - mean = float("-0.000505242") - std = float("0.00224656") - data = None - - -class Program_weight_tensor_parameter_407: - name = "parameter_407" - shape = [384] - dtype = "float32" - min_val = float("-1.87669") - max_val = float("0.45341") - mean = float("-0.485211") - std = float("0.376586") - data = None - - -class Program_weight_tensor_parameter_408: - name = "parameter_408" - shape = [384] - dtype = "float32" - min_val = float("0.522977") - max_val = float("2.22431") - mean = float("1.05297") - std = float("0.260052") - data = None - - -class Program_weight_tensor_parameter_409: - name = "parameter_409" - shape = [384] - dtype = "float32" - min_val = float("0.00214087") - max_val = float("0.0106285") - mean = float("0.00466215") - std = float("0.00134646") - data = None - - -class Program_weight_tensor_parameter_410: - name = "parameter_410" - shape = [384] - dtype = "float32" - min_val = float("-0.272097") - max_val = float("0.182301") - mean = float("0.0462845") - std = float("0.0484542") - data = None - - -class Program_weight_tensor_parameter_411: - name = "parameter_411" - shape = [384, 384, 3, 3] - dtype = "float32" - min_val = float("-0.0214852") - max_val = float("0.0348977") - mean = float("-0.000101693") - std = float("0.00210424") - data = None - - -class Program_weight_tensor_parameter_412: - name = "parameter_412" - shape = [384] - dtype = "float32" - min_val = float("-2.1565") - max_val = float("0.418538") - mean = float("-1.36711") - std = float("0.277506") - data = None - - -class Program_weight_tensor_parameter_413: - name = "parameter_413" - shape = [384] - dtype = "float32" - min_val = float("0.707119") - max_val = float("1.63571") - mean = float("1.14297") - std = float("0.101612") - data = None - - -class Program_weight_tensor_parameter_414: - name = "parameter_414" - shape = [384] - dtype = "float32" - min_val = float("0.0267598") - max_val = float("0.120536") - mean = float("0.0531872") - std = float("0.0145039") - data = None - - -class Program_weight_tensor_parameter_415: - name = "parameter_415" - shape = [384] - dtype = "float32" - min_val = float("-0.737016") - max_val = float("0.211594") - mean = float("-0.135647") - std = float("0.0976005") - data = None - - -class Program_weight_tensor_parameter_416: - name = "parameter_416" - shape = [384, 384, 3, 3] - dtype = "float32" - min_val = float("-0.0300983") - max_val = float("0.05499") - mean = float("-0.000159015") - std = float("0.00235156") - data = None - - -class Program_weight_tensor_parameter_417: - name = "parameter_417" - shape = [384] - dtype = "float32" - min_val = float("-2.92344") - max_val = float("1.66439") - mean = float("-0.760407") - std = float("0.643554") - data = None - - -class Program_weight_tensor_parameter_418: - name = "parameter_418" - shape = [384] - dtype = "float32" - min_val = float("0.953228") - max_val = float("2.9182") - mean = float("1.86309") - std = float("0.276205") - data = None - - -class Program_weight_tensor_parameter_419: - name = "parameter_419" - shape = [384] - dtype = "float32" - min_val = float("0.00273562") - max_val = float("0.012939") - mean = float("0.00578831") - std = float("0.00145222") - data = None - - -class Program_weight_tensor_parameter_420: - name = "parameter_420" - shape = [384] - dtype = "float32" - min_val = float("-0.279172") - max_val = float("0.135794") - mean = float("0.0682701") - std = float("0.0329249") - data = None - - -class Program_weight_tensor_parameter_421: - name = "parameter_421" - shape = [384, 768, 1, 1] - dtype = "float32" - min_val = float("-0.0411036") - max_val = float("0.048141") - mean = float("-0.000774534") - std = float("0.00548625") - data = None - - -class Program_weight_tensor_parameter_422: - name = "parameter_422" - shape = [384] - dtype = "float32" - min_val = float("-2.24702") - max_val = float("0.681993") - mean = float("-0.777088") - std = float("0.472908") - data = None - - -class Program_weight_tensor_parameter_423: - name = "parameter_423" - shape = [384] - dtype = "float32" - min_val = float("0.965876") - max_val = float("2.89361") - mean = float("2.09705") - std = float("0.305445") - data = None - - -class Program_weight_tensor_parameter_424: - name = "parameter_424" - shape = [384] - dtype = "float32" - min_val = float("0.000839665") - max_val = float("0.00423233") - mean = float("0.00221563") - std = float("0.000537") - data = None - - -class Program_weight_tensor_parameter_425: - name = "parameter_425" - shape = [384] - dtype = "float32" - min_val = float("-0.0182533") - max_val = float("0.0914483") - mean = float("0.0419083") - std = float("0.0183649") - data = None - - -class Program_weight_tensor_parameter_426: - name = "parameter_426" - shape = [384, 768, 1, 1] - dtype = "float32" - min_val = float("-0.0837021") - max_val = float("0.0611426") - mean = float("-0.00045084") - std = float("0.00374174") - data = None - - -class Program_weight_tensor_parameter_427: - name = "parameter_427" - shape = [768] - dtype = "float32" - min_val = float("-2.40194") - max_val = float("0.642339") - mean = float("-0.908288") - std = float("0.339331") - data = None - - -class Program_weight_tensor_parameter_428: - name = "parameter_428" - shape = [768] - dtype = "float32" - min_val = float("0.53146") - max_val = float("1.90712") - mean = float("0.919684") - std = float("0.149212") - data = None - - -class Program_weight_tensor_parameter_429: - name = "parameter_429" - shape = [768] - dtype = "float32" - min_val = float("0.00745832") - max_val = float("0.0743865") - mean = float("0.0178485") - std = float("0.00551587") - data = None - - -class Program_weight_tensor_parameter_430: - name = "parameter_430" - shape = [768] - dtype = "float32" - min_val = float("-0.236023") - max_val = float("0.207751") - mean = float("0.041919") - std = float("0.0579014") - data = None - - -class Program_weight_tensor_parameter_431: - name = "parameter_431" - shape = [768, 512, 3, 3] - dtype = "float32" - min_val = float("-0.0383779") - max_val = float("0.0519002") - mean = float("-9.93933e-05") - std = float("0.00244217") - data = None - - -class Program_weight_tensor_parameter_432: - name = "parameter_432" - shape = [512] - dtype = "float32" - min_val = float("-3.39029") - max_val = float("1.66616") - mean = float("-1.16168") - std = float("0.513766") - data = None - - -class Program_weight_tensor_parameter_433: - name = "parameter_433" - shape = [512] - dtype = "float32" - min_val = float("0.520928") - max_val = float("1.67546") - mean = float("1.11104") - std = float("0.148384") - data = None - - -class Program_weight_tensor_parameter_434: - name = "parameter_434" - shape = [512] - dtype = "float32" - min_val = float("0.00230842") - max_val = float("0.0165448") - mean = float("0.00755702") - std = float("0.00192355") - data = None - - -class Program_weight_tensor_parameter_435: - name = "parameter_435" - shape = [512] - dtype = "float32" - min_val = float("-0.159179") - max_val = float("0.0723523") - mean = float("-0.0485061") - std = float("0.0412122") - data = None - - -class Program_weight_tensor_parameter_436: - name = "parameter_436" - shape = [512, 384, 1, 1] - dtype = "float32" - min_val = float("-0.208779") - max_val = float("0.179911") - mean = float("-0.000606249") - std = float("0.0081171") - data = None - - -class Program_weight_tensor_parameter_437: - name = "parameter_437" - shape = [384] - dtype = "float32" - min_val = float("-0.0103559") - max_val = float("0.00155602") - mean = float("-0.00302775") - std = float("0.0023618") - data = None - - -class Program_weight_tensor_parameter_438: - name = "parameter_438" - shape = [384, 384, 1, 1] - dtype = "float32" - min_val = float("-0.204999") - max_val = float("0.141306") - mean = float("-0.00211219") - std = float("0.00500511") - data = None - - -class Program_weight_tensor_parameter_439: - name = "parameter_439" - shape = [192] - dtype = "float32" - min_val = float("-1.97063") - max_val = float("0.41045") - mean = float("-0.348649") - std = float("0.333533") - data = None - - -class Program_weight_tensor_parameter_440: - name = "parameter_440" - shape = [192] - dtype = "float32" - min_val = float("0.0528508") - max_val = float("2.16013") - mean = float("0.581272") - std = float("0.419844") - data = None - - -class Program_weight_tensor_parameter_441: - name = "parameter_441" - shape = [192] - dtype = "float32" - min_val = float("9.94453e-05") - max_val = float("0.00123961") - mean = float("0.000476419") - std = float("0.000224006") - data = None - - -class Program_weight_tensor_parameter_442: - name = "parameter_442" - shape = [192] - dtype = "float32" - min_val = float("-0.0376085") - max_val = float("0.0570153") - mean = float("0.00573177") - std = float("0.0152437") - data = None - - -class Program_weight_tensor_parameter_443: - name = "parameter_443" - shape = [192, 192, 1, 1] - dtype = "float32" - min_val = float("-0.0210389") - max_val = float("0.0585363") - mean = float("-0.000352054") - std = float("0.00423892") - data = None - - -class Program_weight_tensor_parameter_444: - name = "parameter_444" - shape = [192] - dtype = "float32" - min_val = float("-1.97059") - max_val = float("0.411367") - mean = float("-0.348497") - std = float("0.333596") - data = None - - -class Program_weight_tensor_parameter_445: - name = "parameter_445" - shape = [192] - dtype = "float32" - min_val = float("0.372764") - max_val = float("2.70243") - mean = float("1.20208") - std = float("0.49364") - data = None - - -class Program_weight_tensor_parameter_446: - name = "parameter_446" - shape = [192] - dtype = "float32" - min_val = float("0.0014863") - max_val = float("0.020345") - mean = float("0.00560471") - std = float("0.00209367") - data = None - - -class Program_weight_tensor_parameter_447: - name = "parameter_447" - shape = [192] - dtype = "float32" - min_val = float("-0.115289") - max_val = float("0.163741") - mean = float("0.0194467") - std = float("0.0436225") - data = None - - -class Program_weight_tensor_parameter_448: - name = "parameter_448" - shape = [192, 192, 3, 3] - dtype = "float32" - min_val = float("-0.031927") - max_val = float("0.0389496") - mean = float("-0.000144904") - std = float("0.00325908") - data = None - - -class Program_weight_tensor_parameter_449: - name = "parameter_449" - shape = [192] - dtype = "float32" - min_val = float("-2.89054") - max_val = float("-0.177595") - mean = float("-1.31446") - std = float("0.401195") - data = None - - -class Program_weight_tensor_parameter_450: - name = "parameter_450" - shape = [192] - dtype = "float32" - min_val = float("0.695074") - max_val = float("2.09481") - mean = float("1.17912") - std = float("0.169901") - data = None - - -class Program_weight_tensor_parameter_451: - name = "parameter_451" - shape = [192] - dtype = "float32" - min_val = float("0.0658237") - max_val = float("0.479229") - mean = float("0.138928") - std = float("0.0482074") - data = None - - -class Program_weight_tensor_parameter_452: - name = "parameter_452" - shape = [192] - dtype = "float32" - min_val = float("-2.47032") - max_val = float("1.83399") - mean = float("-0.227578") - std = float("0.394509") - data = None - - -class Program_weight_tensor_parameter_453: - name = "parameter_453" - shape = [192, 192, 3, 3] - dtype = "float32" - min_val = float("-0.0350379") - max_val = float("0.0468605") - mean = float("-0.000221381") - std = float("0.00388426") - data = None - - -class Program_weight_tensor_parameter_454: - name = "parameter_454" - shape = [192] - dtype = "float32" - min_val = float("-1.94031") - max_val = float("0.513263") - mean = float("-0.279273") - std = float("0.321486") - data = None - - -class Program_weight_tensor_parameter_455: - name = "parameter_455" - shape = [192] - dtype = "float32" - min_val = float("0.0449424") - max_val = float("1.76947") - mean = float("0.444383") - std = float("0.305669") - data = None - - -class Program_weight_tensor_parameter_456: - name = "parameter_456" - shape = [192] - dtype = "float32" - min_val = float("7.91667e-05") - max_val = float("0.00164061") - mean = float("0.00043007") - std = float("0.000226992") - data = None - - -class Program_weight_tensor_parameter_457: - name = "parameter_457" - shape = [192] - dtype = "float32" - min_val = float("-0.0363552") - max_val = float("0.0461841") - mean = float("0.00877747") - std = float("0.0120158") - data = None - - -class Program_weight_tensor_parameter_458: - name = "parameter_458" - shape = [192, 192, 1, 1] - dtype = "float32" - min_val = float("-0.02483") - max_val = float("0.0404131") - mean = float("-0.000400917") - std = float("0.00391908") - data = None - - -class Program_weight_tensor_parameter_459: - name = "parameter_459" - shape = [192] - dtype = "float32" - min_val = float("-1.94031") - max_val = float("0.514903") - mean = float("-0.279015") - std = float("0.321709") - data = None - - -class Program_weight_tensor_parameter_460: - name = "parameter_460" - shape = [192] - dtype = "float32" - min_val = float("0.481654") - max_val = float("2.27026") - mean = float("1.13859") - std = float("0.375612") - data = None - - -class Program_weight_tensor_parameter_461: - name = "parameter_461" - shape = [192] - dtype = "float32" - min_val = float("0.00303177") - max_val = float("0.0146645") - mean = float("0.00648") - std = float("0.00181309") - data = None - - -class Program_weight_tensor_parameter_462: - name = "parameter_462" - shape = [192] - dtype = "float32" - min_val = float("-0.0803161") - max_val = float("0.116901") - mean = float("0.0359767") - std = float("0.0322211") - data = None - - -class Program_weight_tensor_parameter_463: - name = "parameter_463" - shape = [192, 192, 3, 3] - dtype = "float32" - min_val = float("-0.0229799") - max_val = float("0.0371751") - mean = float("-0.000196939") - std = float("0.00352878") - data = None - - -class Program_weight_tensor_parameter_464: - name = "parameter_464" - shape = [192] - dtype = "float32" - min_val = float("-2.50826") - max_val = float("-0.12355") - mean = float("-1.2887") - std = float("0.443822") - data = None - - -class Program_weight_tensor_parameter_465: - name = "parameter_465" - shape = [192] - dtype = "float32" - min_val = float("0.653803") - max_val = float("1.66962") - mean = float("1.19928") - std = float("0.166233") - data = None - - -class Program_weight_tensor_parameter_466: - name = "parameter_466" - shape = [192] - dtype = "float32" - min_val = float("0.0475951") - max_val = float("0.209951") - mean = float("0.0950332") - std = float("0.0248435") - data = None - - -class Program_weight_tensor_parameter_467: - name = "parameter_467" - shape = [192] - dtype = "float32" - min_val = float("-2.16167") - max_val = float("0.473341") - mean = float("-0.117492") - std = float("0.248865") - data = None - - -class Program_weight_tensor_parameter_468: - name = "parameter_468" - shape = [192, 192, 3, 3] - dtype = "float32" - min_val = float("-0.038582") - max_val = float("0.0537646") - mean = float("-0.00026749") - std = float("0.0040656") - data = None - - -class Program_weight_tensor_parameter_469: - name = "parameter_469" - shape = [192] - dtype = "float32" - min_val = float("-1.75738") - max_val = float("0.468608") - mean = float("-0.262263") - std = float("0.335862") - data = None - - -class Program_weight_tensor_parameter_470: - name = "parameter_470" - shape = [192] - dtype = "float32" - min_val = float("0.00305103") - max_val = float("1.67905") - mean = float("0.351948") - std = float("0.251703") - data = None - - -class Program_weight_tensor_parameter_471: - name = "parameter_471" - shape = [192] - dtype = "float32" - min_val = float("1.02293e-06") - max_val = float("0.00228453") - mean = float("0.000400551") - std = float("0.000283282") - data = None - - -class Program_weight_tensor_parameter_472: - name = "parameter_472" - shape = [192] - dtype = "float32" - min_val = float("-0.031609") - max_val = float("0.0551924") - mean = float("0.0110783") - std = float("0.0123374") - data = None - - -class Program_weight_tensor_parameter_473: - name = "parameter_473" - shape = [192, 192, 1, 1] - dtype = "float32" - min_val = float("-0.0307534") - max_val = float("0.0384153") - mean = float("-0.00045859") - std = float("0.00377622") - data = None - - -class Program_weight_tensor_parameter_474: - name = "parameter_474" - shape = [192] - dtype = "float32" - min_val = float("-1.75744") - max_val = float("0.470024") - mean = float("-0.262025") - std = float("0.336099") - data = None - - -class Program_weight_tensor_parameter_475: - name = "parameter_475" - shape = [192] - dtype = "float32" - min_val = float("0.405457") - max_val = float("1.97843") - mean = float("1.06603") - std = float("0.334153") - data = None - - -class Program_weight_tensor_parameter_476: - name = "parameter_476" - shape = [192] - dtype = "float32" - min_val = float("0.00267969") - max_val = float("0.0141796") - mean = float("0.00700109") - std = float("0.00190313") - data = None - - -class Program_weight_tensor_parameter_477: - name = "parameter_477" - shape = [192] - dtype = "float32" - min_val = float("-0.0881741") - max_val = float("0.111433") - mean = float("0.0401956") - std = float("0.0325546") - data = None - - -class Program_weight_tensor_parameter_478: - name = "parameter_478" - shape = [192, 192, 3, 3] - dtype = "float32" - min_val = float("-0.0336081") - max_val = float("0.0420323") - mean = float("-0.000205836") - std = float("0.00368544") - data = None - - -class Program_weight_tensor_parameter_479: - name = "parameter_479" - shape = [192] - dtype = "float32" - min_val = float("-2.49703") - max_val = float("0.138789") - mean = float("-1.24309") - std = float("0.424468") - data = None - - -class Program_weight_tensor_parameter_480: - name = "parameter_480" - shape = [192] - dtype = "float32" - min_val = float("0.652493") - max_val = float("1.80896") - mean = float("1.16711") - std = float("0.165463") - data = None - - -class Program_weight_tensor_parameter_481: - name = "parameter_481" - shape = [192] - dtype = "float32" - min_val = float("0.0304637") - max_val = float("0.147553") - mean = float("0.067116") - std = float("0.0164386") - data = None - - -class Program_weight_tensor_parameter_482: - name = "parameter_482" - shape = [192] - dtype = "float32" - min_val = float("-1.70097") - max_val = float("0.305559") - mean = float("-0.0850748") - std = float("0.199213") - data = None - - -class Program_weight_tensor_parameter_483: - name = "parameter_483" - shape = [192, 192, 3, 3] - dtype = "float32" - min_val = float("-0.0472912") - max_val = float("0.0583976") - mean = float("-0.000284769") - std = float("0.00417002") - data = None - - -class Program_weight_tensor_parameter_484: - name = "parameter_484" - shape = [192] - dtype = "float32" - min_val = float("-2.07915") - max_val = float("0.533836") - mean = float("-0.272165") - std = float("0.375339") - data = None - - -class Program_weight_tensor_parameter_485: - name = "parameter_485" - shape = [192] - dtype = "float32" - min_val = float("0.000522804") - max_val = float("0.732366") - mean = float("0.21194") - std = float("0.136205") - data = None - - -class Program_weight_tensor_parameter_486: - name = "parameter_486" - shape = [192] - dtype = "float32" - min_val = float("5.96543e-08") - max_val = float("0.000937142") - mean = float("0.000261376") - std = float("0.000147877") - data = None - - -class Program_weight_tensor_parameter_487: - name = "parameter_487" - shape = [192] - dtype = "float32" - min_val = float("-0.0266706") - max_val = float("0.0357546") - mean = float("0.00698739") - std = float("0.0098736") - data = None - - -class Program_weight_tensor_parameter_488: - name = "parameter_488" - shape = [192, 192, 1, 1] - dtype = "float32" - min_val = float("-0.0207564") - max_val = float("0.0335475") - mean = float("-0.000292443") - std = float("0.00332227") - data = None - - -class Program_weight_tensor_parameter_489: - name = "parameter_489" - shape = [192] - dtype = "float32" - min_val = float("-2.07924") - max_val = float("0.535791") - mean = float("-0.271976") - std = float("0.375569") - data = None - - -class Program_weight_tensor_parameter_490: - name = "parameter_490" - shape = [192] - dtype = "float32" - min_val = float("0.395086") - max_val = float("1.96267") - mean = float("0.959008") - std = float("0.303814") - data = None - - -class Program_weight_tensor_parameter_491: - name = "parameter_491" - shape = [192] - dtype = "float32" - min_val = float("0.00302737") - max_val = float("0.0157952") - mean = float("0.00707016") - std = float("0.00211662") - data = None - - -class Program_weight_tensor_parameter_492: - name = "parameter_492" - shape = [192] - dtype = "float32" - min_val = float("-0.0788482") - max_val = float("0.119233") - mean = float("0.0430225") - std = float("0.0339838") - data = None - - -class Program_weight_tensor_parameter_493: - name = "parameter_493" - shape = [192, 192, 3, 3] - dtype = "float32" - min_val = float("-0.0340016") - max_val = float("0.0403474") - mean = float("-0.000216247") - std = float("0.00380285") - data = None - - -class Program_weight_tensor_parameter_494: - name = "parameter_494" - shape = [192] - dtype = "float32" - min_val = float("-2.74084") - max_val = float("-0.0805818") - mean = float("-1.23662") - std = float("0.434286") - data = None - - -class Program_weight_tensor_parameter_495: - name = "parameter_495" - shape = [192] - dtype = "float32" - min_val = float("0.761952") - max_val = float("1.62053") - mean = float("1.15094") - std = float("0.142444") - data = None - - -class Program_weight_tensor_parameter_496: - name = "parameter_496" - shape = [192] - dtype = "float32" - min_val = float("0.0278922") - max_val = float("0.0817439") - mean = float("0.0488411") - std = float("0.0102139") - data = None - - -class Program_weight_tensor_parameter_497: - name = "parameter_497" - shape = [192] - dtype = "float32" - min_val = float("-1.39522") - max_val = float("0.291819") - mean = float("-0.0734705") - std = float("0.166804") - data = None - - -class Program_weight_tensor_parameter_498: - name = "parameter_498" - shape = [192, 192, 3, 3] - dtype = "float32" - min_val = float("-0.0589398") - max_val = float("0.0606418") - mean = float("-0.000300541") - std = float("0.00415388") - data = None - - -class Program_weight_tensor_parameter_499: - name = "parameter_499" - shape = [192] - dtype = "float32" - min_val = float("-1.212") - max_val = float("0.447452") - mean = float("-0.232044") - std = float("0.339385") - data = None - - -class Program_weight_tensor_parameter_500: - name = "parameter_500" - shape = [192] - dtype = "float32" - min_val = float("-9.43381e-05") - max_val = float("0.678118") - mean = float("0.192025") - std = float("0.120758") - data = None - - -class Program_weight_tensor_parameter_501: - name = "parameter_501" - shape = [192] - dtype = "float32" - min_val = float("2.50564e-10") - max_val = float("0.000967586") - mean = float("0.000259696") - std = float("0.000158071") - data = None - - -class Program_weight_tensor_parameter_502: - name = "parameter_502" - shape = [192] - dtype = "float32" - min_val = float("-0.0445459") - max_val = float("0.0432653") - mean = float("0.00753396") - std = float("0.0124482") - data = None - - -class Program_weight_tensor_parameter_503: - name = "parameter_503" - shape = [192, 192, 1, 1] - dtype = "float32" - min_val = float("-0.0374404") - max_val = float("0.0395949") - mean = float("-0.000292615") - std = float("0.00342625") - data = None - - -class Program_weight_tensor_parameter_504: - name = "parameter_504" - shape = [192] - dtype = "float32" - min_val = float("-1.21197") - max_val = float("0.448806") - mean = float("-0.231853") - std = float("0.339659") - data = None - - -class Program_weight_tensor_parameter_505: - name = "parameter_505" - shape = [192] - dtype = "float32" - min_val = float("0.382853") - max_val = float("1.56358") - mean = float("0.852209") - std = float("0.259926") - data = None - - -class Program_weight_tensor_parameter_506: - name = "parameter_506" - shape = [192] - dtype = "float32" - min_val = float("0.00286492") - max_val = float("0.0140629") - mean = float("0.00682827") - std = float("0.00187941") - data = None - - -class Program_weight_tensor_parameter_507: - name = "parameter_507" - shape = [192] - dtype = "float32" - min_val = float("-0.0776134") - max_val = float("0.150128") - mean = float("0.0470268") - std = float("0.037034") - data = None - - -class Program_weight_tensor_parameter_508: - name = "parameter_508" - shape = [192, 192, 3, 3] - dtype = "float32" - min_val = float("-0.0368355") - max_val = float("0.0400254") - mean = float("-0.000211959") - std = float("0.00380574") - data = None - - -class Program_weight_tensor_parameter_509: - name = "parameter_509" - shape = [192] - dtype = "float32" - min_val = float("-2.48699") - max_val = float("-0.132487") - mean = float("-1.2498") - std = float("0.418473") - data = None - - -class Program_weight_tensor_parameter_510: - name = "parameter_510" - shape = [192] - dtype = "float32" - min_val = float("0.689021") - max_val = float("1.51961") - mean = float("1.12491") - std = float("0.134826") - data = None - - -class Program_weight_tensor_parameter_511: - name = "parameter_511" - shape = [192] - dtype = "float32" - min_val = float("0.0195954") - max_val = float("0.0657275") - mean = float("0.0354674") - std = float("0.00852691") - data = None - - -class Program_weight_tensor_parameter_512: - name = "parameter_512" - shape = [192] - dtype = "float32" - min_val = float("-0.841336") - max_val = float("0.288596") - mean = float("-0.0804588") - std = float("0.135459") - data = None - - -class Program_weight_tensor_parameter_513: - name = "parameter_513" - shape = [192, 192, 3, 3] - dtype = "float32" - min_val = float("-0.0647608") - max_val = float("0.0671244") - mean = float("-0.000301379") - std = float("0.00415559") - data = None - - -class Program_weight_tensor_parameter_514: - name = "parameter_514" - shape = [192] - dtype = "float32" - min_val = float("-1.21773") - max_val = float("0.49966") - mean = float("-0.167333") - std = float("0.293611") - data = None - - -class Program_weight_tensor_parameter_515: - name = "parameter_515" - shape = [192] - dtype = "float32" - min_val = float("0.00864435") - max_val = float("1.53701") - mean = float("0.238131") - std = float("0.21185") - data = None - - -class Program_weight_tensor_parameter_516: - name = "parameter_516" - shape = [192] - dtype = "float32" - min_val = float("2.28062e-05") - max_val = float("0.00680281") - mean = float("0.00052519") - std = float("0.000661192") - data = None - - -class Program_weight_tensor_parameter_517: - name = "parameter_517" - shape = [192] - dtype = "float32" - min_val = float("-0.0690564") - max_val = float("0.101614") - mean = float("0.0104971") - std = float("0.0186282") - data = None - - -class Program_weight_tensor_parameter_518: - name = "parameter_518" - shape = [192, 192, 1, 1] - dtype = "float32" - min_val = float("-0.0626678") - max_val = float("0.0382933") - mean = float("-0.000453582") - std = float("0.00413962") - data = None - - -class Program_weight_tensor_parameter_519: - name = "parameter_519" - shape = [192] - dtype = "float32" - min_val = float("-1.21774") - max_val = float("0.50078") - mean = float("-0.167049") - std = float("0.293829") - data = None - - -class Program_weight_tensor_parameter_520: - name = "parameter_520" - shape = [192] - dtype = "float32" - min_val = float("0.353208") - max_val = float("1.45018") - mean = float("0.756982") - std = float("0.216639") - data = None - - -class Program_weight_tensor_parameter_521: - name = "parameter_521" - shape = [192] - dtype = "float32" - min_val = float("0.00474286") - max_val = float("0.0202689") - mean = float("0.00952365") - std = float("0.00262046") - data = None - - -class Program_weight_tensor_parameter_522: - name = "parameter_522" - shape = [192] - dtype = "float32" - min_val = float("-0.102625") - max_val = float("0.150149") - mean = float("0.0567521") - std = float("0.0496463") - data = None - - -class Program_weight_tensor_parameter_523: - name = "parameter_523" - shape = [192, 192, 3, 3] - dtype = "float32" - min_val = float("-0.0712483") - max_val = float("0.0533123") - mean = float("-0.000260747") - std = float("0.00375359") - data = None - - -class Program_weight_tensor_parameter_524: - name = "parameter_524" - shape = [192] - dtype = "float32" - min_val = float("-1.87984") - max_val = float("-0.210289") - mean = float("-1.14605") - std = float("0.325945") - data = None - - -class Program_weight_tensor_parameter_525: - name = "parameter_525" - shape = [192] - dtype = "float32" - min_val = float("0.790161") - max_val = float("1.59635") - mean = float("1.12149") - std = float("0.129857") - data = None - - -class Program_weight_tensor_parameter_526: - name = "parameter_526" - shape = [192] - dtype = "float32" - min_val = float("0.0175701") - max_val = float("0.0649204") - mean = float("0.0311456") - std = float("0.00871316") - data = None - - -class Program_weight_tensor_parameter_527: - name = "parameter_527" - shape = [192] - dtype = "float32" - min_val = float("-0.857673") - max_val = float("0.269081") - mean = float("-0.0673534") - std = float("0.134064") - data = None - - -class Program_weight_tensor_parameter_528: - name = "parameter_528" - shape = [192, 192, 3, 3] - dtype = "float32" - min_val = float("-0.0680887") - max_val = float("0.0796042") - mean = float("-0.000244907") - std = float("0.0040245") - data = None - - -class Program_weight_tensor_parameter_529: - name = "parameter_529" - shape = [192] - dtype = "float32" - min_val = float("-2.86208") - max_val = float("1.58104") - mean = float("-0.027572") - std = float("0.747892") - data = None - - -class Program_weight_tensor_parameter_530: - name = "parameter_530" - shape = [192] - dtype = "float32" - min_val = float("0.490153") - max_val = float("2.07789") - mean = float("0.900423") - std = float("0.231981") - data = None - - -class Program_weight_tensor_parameter_531: - name = "parameter_531" - shape = [192] - dtype = "float32" - min_val = float("0.0121565") - max_val = float("0.0723228") - mean = float("0.0255903") - std = float("0.0100067") - data = None - - -class Program_weight_tensor_parameter_532: - name = "parameter_532" - shape = [192] - dtype = "float32" - min_val = float("-0.232643") - max_val = float("0.322942") - mean = float("-0.0434104") - std = float("0.0608082") - data = None - - -class Program_weight_tensor_parameter_533: - name = "parameter_533" - shape = [192, 384, 1, 1] - dtype = "float32" - min_val = float("-0.112904") - max_val = float("0.101906") - mean = float("-0.000605477") - std = float("0.00869645") - data = None - - -class Program_weight_tensor_parameter_534: - name = "parameter_534" - shape = [192] - dtype = "float32" - min_val = float("-2.96795") - max_val = float("1.66848") - mean = float("0.0967615") - std = float("0.663297") - data = None - - -class Program_weight_tensor_parameter_535: - name = "parameter_535" - shape = [192] - dtype = "float32" - min_val = float("0.830405") - max_val = float("5.55794") - mean = float("1.91324") - std = float("0.933276") - data = None - - -class Program_weight_tensor_parameter_536: - name = "parameter_536" - shape = [192] - dtype = "float32" - min_val = float("0.00635322") - max_val = float("0.0445876") - mean = float("0.0175605") - std = float("0.00556344") - data = None - - -class Program_weight_tensor_parameter_537: - name = "parameter_537" - shape = [192] - dtype = "float32" - min_val = float("-0.144806") - max_val = float("0.154975") - mean = float("-0.022062") - std = float("0.0559356") - data = None - - -class Program_weight_tensor_parameter_538: - name = "parameter_538" - shape = [192, 384, 1, 1] - dtype = "float32" - min_val = float("-0.100414") - max_val = float("0.0965722") - mean = float("-0.000481739") - std = float("0.00788359") - data = None - - -class Program_weight_tensor_parameter_539: - name = "parameter_539" - shape = [384] - dtype = "float32" - min_val = float("-2.9234") - max_val = float("1.32689") - mean = float("-0.300856") - std = float("0.563737") - data = None - - -class Program_weight_tensor_parameter_540: - name = "parameter_540" - shape = [384] - dtype = "float32" - min_val = float("0.633896") - max_val = float("2.47246") - mean = float("1.15988") - std = float("0.257349") - data = None - - -class Program_weight_tensor_parameter_541: - name = "parameter_541" - shape = [384] - dtype = "float32" - min_val = float("0.0117343") - max_val = float("0.113588") - mean = float("0.0270697") - std = float("0.0131501") - data = None - - -class Program_weight_tensor_parameter_542: - name = "parameter_542" - shape = [384] - dtype = "float32" - min_val = float("-0.269172") - max_val = float("0.242303") - mean = float("0.0298657") - std = float("0.0746447") - data = None - - -class Program_weight_tensor_parameter_543: - name = "parameter_543" - shape = [384, 256, 3, 3] - dtype = "float32" - min_val = float("-0.0777711") - max_val = float("0.0733026") - mean = float("-9.30129e-05") - std = float("0.00423326") - data = None - - -class Program_weight_tensor_parameter_544: - name = "parameter_544" - shape = [256] - dtype = "float32" - min_val = float("-2.04675") - max_val = float("1.2869") - mean = float("-0.92413") - std = float("0.542635") - data = None - - -class Program_weight_tensor_parameter_545: - name = "parameter_545" - shape = [256] - dtype = "float32" - min_val = float("0.509654") - max_val = float("1.69024") - mean = float("1.05364") - std = float("0.177449") - data = None - - -class Program_weight_tensor_parameter_546: - name = "parameter_546" - shape = [256] - dtype = "float32" - min_val = float("0.00164958") - max_val = float("0.0205898") - mean = float("0.00554979") - std = float("0.00243516") - data = None - - -class Program_weight_tensor_parameter_547: - name = "parameter_547" - shape = [256] - dtype = "float32" - min_val = float("-0.248048") - max_val = float("0.18055") - mean = float("-0.0481355") - std = float("0.0642407") - data = None - - -class Program_weight_tensor_parameter_548: - name = "parameter_548" - shape = [256, 192, 1, 1] - dtype = "float32" - min_val = float("-0.211445") - max_val = float("0.154025") - mean = float("-0.00090718") - std = float("0.0139364") - data = None - - -class Program_weight_tensor_parameter_549: - name = "parameter_549" - shape = [192] - dtype = "float32" - min_val = float("-0.0146056") - max_val = float("0.00252242") - mean = float("-0.00513018") - std = float("0.00389486") - data = None - - -class Program_weight_tensor_parameter_550: - name = "parameter_550" - shape = [192, 192, 1, 1] - dtype = "float32" - min_val = float("-0.340895") - max_val = float("0.243469") - mean = float("-0.00395929") - std = float("0.0107136") - data = None - - -class Program_weight_tensor_parameter_551: - name = "parameter_551" - shape = [96] - dtype = "float32" - min_val = float("-1.9141") - max_val = float("0.53448") - mean = float("-0.208812") - std = float("0.434585") - data = None - - -class Program_weight_tensor_parameter_552: - name = "parameter_552" - shape = [96] - dtype = "float32" - min_val = float("0.139627") - max_val = float("3.23019") - mean = float("0.63562") - std = float("0.668608") - data = None - - -class Program_weight_tensor_parameter_553: - name = "parameter_553" - shape = [96] - dtype = "float32" - min_val = float("9.44925e-05") - max_val = float("0.00259545") - mean = float("0.000627228") - std = float("0.0004663") - data = None - - -class Program_weight_tensor_parameter_554: - name = "parameter_554" - shape = [96] - dtype = "float32" - min_val = float("-0.0508301") - max_val = float("0.0646139") - mean = float("0.00729974") - std = float("0.022731") - data = None - - -class Program_weight_tensor_parameter_555: - name = "parameter_555" - shape = [96, 96, 1, 1] - dtype = "float32" - min_val = float("-0.0529209") - max_val = float("0.0938109") - mean = float("-0.00068654") - std = float("0.00780134") - data = None - - -class Program_weight_tensor_parameter_556: - name = "parameter_556" - shape = [96] - dtype = "float32" - min_val = float("-1.91385") - max_val = float("0.535947") - mean = float("-0.208472") - std = float("0.434758") - data = None - - -class Program_weight_tensor_parameter_557: - name = "parameter_557" - shape = [96] - dtype = "float32" - min_val = float("0.343945") - max_val = float("5.46861") - mean = float("1.08565") - std = float("0.883653") - data = None - - -class Program_weight_tensor_parameter_558: - name = "parameter_558" - shape = [96] - dtype = "float32" - min_val = float("0.000831351") - max_val = float("0.0140034") - mean = float("0.00502365") - std = float("0.0025539") - data = None - - -class Program_weight_tensor_parameter_559: - name = "parameter_559" - shape = [96] - dtype = "float32" - min_val = float("-0.135563") - max_val = float("0.206685") - mean = float("0.0107306") - std = float("0.0611442") - data = None - - -class Program_weight_tensor_parameter_560: - name = "parameter_560" - shape = [96, 96, 3, 3] - dtype = "float32" - min_val = float("-0.0417476") - max_val = float("0.0707409") - mean = float("-0.000200496") - std = float("0.00586268") - data = None - - -class Program_weight_tensor_parameter_561: - name = "parameter_561" - shape = [96] - dtype = "float32" - min_val = float("-2.46669") - max_val = float("-0.0188941") - mean = float("-1.22596") - std = float("0.444206") - data = None - - -class Program_weight_tensor_parameter_562: - name = "parameter_562" - shape = [96] - dtype = "float32" - min_val = float("0.540095") - max_val = float("1.63859") - mean = float("0.945542") - std = float("0.172479") - data = None - - -class Program_weight_tensor_parameter_563: - name = "parameter_563" - shape = [96] - dtype = "float32" - min_val = float("0.0343629") - max_val = float("0.225267") - mean = float("0.0825205") - std = float("0.0339352") - data = None - - -class Program_weight_tensor_parameter_564: - name = "parameter_564" - shape = [96] - dtype = "float32" - min_val = float("-2.59911") - max_val = float("2.14438") - mean = float("-0.187597") - std = float("0.479") - data = None - - -class Program_weight_tensor_parameter_565: - name = "parameter_565" - shape = [96, 96, 3, 3] - dtype = "float32" - min_val = float("-0.159603") - max_val = float("0.105542") - mean = float("-0.000422661") - std = float("0.00713371") - data = None - - -class Program_weight_tensor_parameter_566: - name = "parameter_566" - shape = [96] - dtype = "float32" - min_val = float("-1.38744") - max_val = float("0.563004") - mean = float("-0.132441") - std = float("0.347447") - data = None - - -class Program_weight_tensor_parameter_567: - name = "parameter_567" - shape = [96] - dtype = "float32" - min_val = float("0.0452771") - max_val = float("1.86502") - mean = float("0.460871") - std = float("0.366358") - data = None - - -class Program_weight_tensor_parameter_568: - name = "parameter_568" - shape = [96] - dtype = "float32" - min_val = float("7.60148e-05") - max_val = float("0.00285319") - mean = float("0.000794421") - std = float("0.000636585") - data = None - - -class Program_weight_tensor_parameter_569: - name = "parameter_569" - shape = [96] - dtype = "float32" - min_val = float("-0.0497884") - max_val = float("0.0479867") - mean = float("0.00766729") - std = float("0.0176144") - data = None - - -class Program_weight_tensor_parameter_570: - name = "parameter_570" - shape = [96, 96, 1, 1] - dtype = "float32" - min_val = float("-0.0484855") - max_val = float("0.0469527") - mean = float("-0.000557248") - std = float("0.00696514") - data = None - - -class Program_weight_tensor_parameter_571: - name = "parameter_571" - shape = [96] - dtype = "float32" - min_val = float("-1.38716") - max_val = float("0.565575") - mean = float("-0.131901") - std = float("0.347951") - data = None - - -class Program_weight_tensor_parameter_572: - name = "parameter_572" - shape = [96] - dtype = "float32" - min_val = float("0.373276") - max_val = float("2.32827") - mean = float("0.902354") - std = float("0.426303") - data = None - - -class Program_weight_tensor_parameter_573: - name = "parameter_573" - shape = [96] - dtype = "float32" - min_val = float("0.00302635") - max_val = float("0.0233198") - mean = float("0.00879849") - std = float("0.00436968") - data = None - - -class Program_weight_tensor_parameter_574: - name = "parameter_574" - shape = [96] - dtype = "float32" - min_val = float("-0.106151") - max_val = float("0.119838") - mean = float("0.0358036") - std = float("0.043231") - data = None - - -class Program_weight_tensor_parameter_575: - name = "parameter_575" - shape = [96, 96, 3, 3] - dtype = "float32" - min_val = float("-0.0601192") - max_val = float("0.0479345") - mean = float("-0.000334461") - std = float("0.00588243") - data = None - - -class Program_weight_tensor_parameter_576: - name = "parameter_576" - shape = [96] - dtype = "float32" - min_val = float("-3.32059") - max_val = float("0.366033") - mean = float("-1.1777") - std = float("0.556588") - data = None - - -class Program_weight_tensor_parameter_577: - name = "parameter_577" - shape = [96] - dtype = "float32" - min_val = float("0.470758") - max_val = float("1.9813") - mean = float("1.03925") - std = float("0.238611") - data = None - - -class Program_weight_tensor_parameter_578: - name = "parameter_578" - shape = [96] - dtype = "float32" - min_val = float("0.0279788") - max_val = float("0.183449") - mean = float("0.0506417") - std = float("0.0183646") - data = None - - -class Program_weight_tensor_parameter_579: - name = "parameter_579" - shape = [96] - dtype = "float32" - min_val = float("-1.05837") - max_val = float("0.786092") - mean = float("-0.0424528") - std = float("0.278771") - data = None - - -class Program_weight_tensor_parameter_580: - name = "parameter_580" - shape = [96, 96, 3, 3] - dtype = "float32" - min_val = float("-0.152735") - max_val = float("0.158912") - mean = float("-0.000426001") - std = float("0.00705743") - data = None - - -class Program_weight_tensor_parameter_581: - name = "parameter_581" - shape = [96] - dtype = "float32" - min_val = float("-1.24949") - max_val = float("0.583942") - mean = float("-0.109112") - std = float("0.292117") - data = None - - -class Program_weight_tensor_parameter_582: - name = "parameter_582" - shape = [96] - dtype = "float32" - min_val = float("0.0224878") - max_val = float("1.27796") - mean = float("0.324443") - std = float("0.192946") - data = None - - -class Program_weight_tensor_parameter_583: - name = "parameter_583" - shape = [96] - dtype = "float32" - min_val = float("2.50107e-05") - max_val = float("0.00308123") - mean = float("0.000650214") - std = float("0.000486492") - data = None - - -class Program_weight_tensor_parameter_584: - name = "parameter_584" - shape = [96] - dtype = "float32" - min_val = float("-0.0398841") - max_val = float("0.0533346") - mean = float("0.00424068") - std = float("0.0172095") - data = None - - -class Program_weight_tensor_parameter_585: - name = "parameter_585" - shape = [96, 96, 1, 1] - dtype = "float32" - min_val = float("-0.0406747") - max_val = float("0.0494878") - mean = float("-0.000325615") - std = float("0.0071059") - data = None - - -class Program_weight_tensor_parameter_586: - name = "parameter_586" - shape = [96] - dtype = "float32" - min_val = float("-1.24929") - max_val = float("0.586311") - mean = float("-0.108658") - std = float("0.29268") - data = None - - -class Program_weight_tensor_parameter_587: - name = "parameter_587" - shape = [96] - dtype = "float32" - min_val = float("0.311326") - max_val = float("1.67043") - mean = float("0.747441") - std = float("0.257878") - data = None - - -class Program_weight_tensor_parameter_588: - name = "parameter_588" - shape = [96] - dtype = "float32" - min_val = float("0.00299069") - max_val = float("0.0188881") - mean = float("0.00858598") - std = float("0.00338781") - data = None - - -class Program_weight_tensor_parameter_589: - name = "parameter_589" - shape = [96] - dtype = "float32" - min_val = float("-0.104806") - max_val = float("0.146672") - mean = float("0.0293301") - std = float("0.0382013") - data = None - - -class Program_weight_tensor_parameter_590: - name = "parameter_590" - shape = [96, 96, 3, 3] - dtype = "float32" - min_val = float("-0.0728298") - max_val = float("0.065903") - mean = float("-0.000300919") - std = float("0.00597289") - data = None - - -class Program_weight_tensor_parameter_591: - name = "parameter_591" - shape = [96] - dtype = "float32" - min_val = float("-3.5826") - max_val = float("0.291706") - mean = float("-1.12744") - std = float("0.572685") - data = None - - -class Program_weight_tensor_parameter_592: - name = "parameter_592" - shape = [96] - dtype = "float32" - min_val = float("0.511064") - max_val = float("2.19222") - mean = float("1.05217") - std = float("0.238287") - data = None - - -class Program_weight_tensor_parameter_593: - name = "parameter_593" - shape = [96] - dtype = "float32" - min_val = float("0.021583") - max_val = float("0.0772463") - mean = float("0.0393307") - std = float("0.00939936") - data = None - - -class Program_weight_tensor_parameter_594: - name = "parameter_594" - shape = [96] - dtype = "float32" - min_val = float("-0.95654") - max_val = float("0.644938") - mean = float("-0.042882") - std = float("0.216242") - data = None - - -class Program_weight_tensor_parameter_595: - name = "parameter_595" - shape = [96, 96, 3, 3] - dtype = "float32" - min_val = float("-0.0984925") - max_val = float("0.137263") - mean = float("-0.000483231") - std = float("0.00714155") - data = None - - -class Program_weight_tensor_parameter_596: - name = "parameter_596" - shape = [96] - dtype = "float32" - min_val = float("-0.891765") - max_val = float("0.530315") - mean = float("-0.160042") - std = float("0.28168") - data = None - - -class Program_weight_tensor_parameter_597: - name = "parameter_597" - shape = [96] - dtype = "float32" - min_val = float("0.0202036") - max_val = float("1.40549") - mean = float("0.324747") - std = float("0.213549") - data = None - - -class Program_weight_tensor_parameter_598: - name = "parameter_598" - shape = [96] - dtype = "float32" - min_val = float("5.2419e-05") - max_val = float("0.00309807") - mean = float("0.00068279") - std = float("0.000470997") - data = None - - -class Program_weight_tensor_parameter_599: - name = "parameter_599" - shape = [96] - dtype = "float32" - min_val = float("-0.0353761") - max_val = float("0.0539706") - mean = float("0.00757239") - std = float("0.0158983") - data = None - - -class Program_weight_tensor_parameter_600: - name = "parameter_600" - shape = [96, 96, 1, 1] - dtype = "float32" - min_val = float("-0.050403") - max_val = float("0.0470333") - mean = float("-0.000602859") - std = float("0.00719125") - data = None - - -class Program_weight_tensor_parameter_601: - name = "parameter_601" - shape = [96] - dtype = "float32" - min_val = float("-0.891522") - max_val = float("0.532005") - mean = float("-0.15962") - std = float("0.282144") - data = None - - -class Program_weight_tensor_parameter_602: - name = "parameter_602" - shape = [96] - dtype = "float32" - min_val = float("0.170998") - max_val = float("1.78064") - mean = float("0.708933") - std = float("0.284476") - data = None - - -class Program_weight_tensor_parameter_603: - name = "parameter_603" - shape = [96] - dtype = "float32" - min_val = float("0.00186209") - max_val = float("0.0242538") - mean = float("0.00887548") - std = float("0.00332774") - data = None - - -class Program_weight_tensor_parameter_604: - name = "parameter_604" - shape = [96] - dtype = "float32" - min_val = float("-0.0317255") - max_val = float("0.148332") - mean = float("0.0439334") - std = float("0.0383947") - data = None - - -class Program_weight_tensor_parameter_605: - name = "parameter_605" - shape = [96, 96, 3, 3] - dtype = "float32" - min_val = float("-0.0673552") - max_val = float("0.0665555") - mean = float("-0.000406403") - std = float("0.00600122") - data = None - - -class Program_weight_tensor_parameter_606: - name = "parameter_606" - shape = [96] - dtype = "float32" - min_val = float("-2.65797") - max_val = float("0.0644665") - mean = float("-1.06329") - std = float("0.488575") - data = None - - -class Program_weight_tensor_parameter_607: - name = "parameter_607" - shape = [96] - dtype = "float32" - min_val = float("0.510122") - max_val = float("1.73722") - mean = float("1.01545") - std = float("0.193669") - data = None - - -class Program_weight_tensor_parameter_608: - name = "parameter_608" - shape = [96] - dtype = "float32" - min_val = float("0.0170441") - max_val = float("0.0592749") - mean = float("0.0303397") - std = float("0.00732367") - data = None - - -class Program_weight_tensor_parameter_609: - name = "parameter_609" - shape = [96] - dtype = "float32" - min_val = float("-0.802591") - max_val = float("0.759118") - mean = float("-0.0649493") - std = float("0.211368") - data = None - - -class Program_weight_tensor_parameter_610: - name = "parameter_610" - shape = [96, 96, 3, 3] - dtype = "float32" - min_val = float("-0.0799583") - max_val = float("0.12863") - mean = float("-0.000463251") - std = float("0.00696947") - data = None - - -class Program_weight_tensor_parameter_611: - name = "parameter_611" - shape = [96] - dtype = "float32" - min_val = float("-0.979363") - max_val = float("0.488329") - mean = float("-0.1357") - std = float("0.278693") - data = None - - -class Program_weight_tensor_parameter_612: - name = "parameter_612" - shape = [96] - dtype = "float32" - min_val = float("0.0499672") - max_val = float("1.15174") - mean = float("0.296075") - std = float("0.172795") - data = None - - -class Program_weight_tensor_parameter_613: - name = "parameter_613" - shape = [96] - dtype = "float32" - min_val = float("0.000124848") - max_val = float("0.00438819") - mean = float("0.00108131") - std = float("0.000696678") - data = None - - -class Program_weight_tensor_parameter_614: - name = "parameter_614" - shape = [96] - dtype = "float32" - min_val = float("-0.0427797") - max_val = float("0.06109") - mean = float("0.00673208") - std = float("0.0190435") - data = None - - -class Program_weight_tensor_parameter_615: - name = "parameter_615" - shape = [96, 96, 1, 1] - dtype = "float32" - min_val = float("-0.0730409") - max_val = float("0.0734237") - mean = float("-0.000668194") - std = float("0.00816827") - data = None - - -class Program_weight_tensor_parameter_616: - name = "parameter_616" - shape = [96] - dtype = "float32" - min_val = float("-0.979598") - max_val = float("0.490087") - mean = float("-0.135308") - std = float("0.279185") - data = None - - -class Program_weight_tensor_parameter_617: - name = "parameter_617" - shape = [96] - dtype = "float32" - min_val = float("0.240111") - max_val = float("1.69891") - mean = float("0.604647") - std = float("0.228294") - data = None - - -class Program_weight_tensor_parameter_618: - name = "parameter_618" - shape = [96] - dtype = "float32" - min_val = float("0.00479228") - max_val = float("0.0493428") - mean = float("0.0126094") - std = float("0.00558079") - data = None - - -class Program_weight_tensor_parameter_619: - name = "parameter_619" - shape = [96] - dtype = "float32" - min_val = float("-0.0884025") - max_val = float("0.162813") - mean = float("0.0330297") - std = float("0.0455641") - data = None - - -class Program_weight_tensor_parameter_620: - name = "parameter_620" - shape = [96, 96, 3, 3] - dtype = "float32" - min_val = float("-0.070586") - max_val = float("0.053917") - mean = float("-0.000353734") - std = float("0.00603503") - data = None - - -class Program_weight_tensor_parameter_621: - name = "parameter_621" - shape = [96] - dtype = "float32" - min_val = float("-3.46749") - max_val = float("0.20134") - mean = float("-1.00429") - std = float("0.548683") - data = None - - -class Program_weight_tensor_parameter_622: - name = "parameter_622" - shape = [96] - dtype = "float32" - min_val = float("0.68469") - max_val = float("2.50521") - mean = float("1.07421") - std = float("0.212064") - data = None - - -class Program_weight_tensor_parameter_623: - name = "parameter_623" - shape = [96] - dtype = "float32" - min_val = float("0.0126502") - max_val = float("0.0593798") - mean = float("0.025404") - std = float("0.00851987") - data = None - - -class Program_weight_tensor_parameter_624: - name = "parameter_624" - shape = [96] - dtype = "float32" - min_val = float("-0.59646") - max_val = float("0.699113") - mean = float("-0.0602622") - std = float("0.200876") - data = None - - -class Program_weight_tensor_parameter_625: - name = "parameter_625" - shape = [96, 96, 3, 3] - dtype = "float32" - min_val = float("-0.0875016") - max_val = float("0.0958638") - mean = float("-0.000393602") - std = float("0.00713622") - data = None - - -class Program_weight_tensor_parameter_626: - name = "parameter_626" - shape = [96] - dtype = "float32" - min_val = float("-0.623249") - max_val = float("0.450355") - mean = float("-0.0811173") - std = float("0.25665") - data = None - - -class Program_weight_tensor_parameter_627: - name = "parameter_627" - shape = [96] - dtype = "float32" - min_val = float("0.0905173") - max_val = float("1.30172") - mean = float("0.309137") - std = float("0.196898") - data = None - - -class Program_weight_tensor_parameter_628: - name = "parameter_628" - shape = [96] - dtype = "float32" - min_val = float("0.000486077") - max_val = float("0.0206445") - mean = float("0.00387906") - std = float("0.00325823") - data = None - - -class Program_weight_tensor_parameter_629: - name = "parameter_629" - shape = [96] - dtype = "float32" - min_val = float("-0.0378971") - max_val = float("0.0272841") - mean = float("0.000360893") - std = float("0.0116936") - data = None - - -class Program_weight_tensor_parameter_630: - name = "parameter_630" - shape = [96, 96, 1, 1] - dtype = "float32" - min_val = float("-0.0967686") - max_val = float("0.0726096") - mean = float("-0.00111676") - std = float("0.00943776") - data = None - - -class Program_weight_tensor_parameter_631: - name = "parameter_631" - shape = [96] - dtype = "float32" - min_val = float("-0.62253") - max_val = float("0.451504") - mean = float("-0.0806935") - std = float("0.256953") - data = None - - -class Program_weight_tensor_parameter_632: - name = "parameter_632" - shape = [96] - dtype = "float32" - min_val = float("0.210918") - max_val = float("1.42997") - mean = float("0.527932") - std = float("0.258611") - data = None - - -class Program_weight_tensor_parameter_633: - name = "parameter_633" - shape = [96] - dtype = "float32" - min_val = float("0.0110923") - max_val = float("0.101379") - mean = float("0.0342554") - std = float("0.0175406") - data = None - - -class Program_weight_tensor_parameter_634: - name = "parameter_634" - shape = [96] - dtype = "float32" - min_val = float("-0.105783") - max_val = float("0.0988172") - mean = float("-0.00552355") - std = float("0.039398") - data = None - - -class Program_weight_tensor_parameter_635: - name = "parameter_635" - shape = [96, 96, 3, 3] - dtype = "float32" - min_val = float("-0.0996365") - max_val = float("0.0540305") - mean = float("-0.00042977") - std = float("0.00592197") - data = None - - -class Program_weight_tensor_parameter_636: - name = "parameter_636" - shape = [96] - dtype = "float32" - min_val = float("-2.4099") - max_val = float("0.510062") - mean = float("-0.827896") - std = float("0.467957") - data = None - - -class Program_weight_tensor_parameter_637: - name = "parameter_637" - shape = [96] - dtype = "float32" - min_val = float("0.855439") - max_val = float("2.18052") - mean = float("1.27541") - std = float("0.20896") - data = None - - -class Program_weight_tensor_parameter_638: - name = "parameter_638" - shape = [96] - dtype = "float32" - min_val = float("0.0104439") - max_val = float("0.0520779") - mean = float("0.0209799") - std = float("0.00859072") - data = None - - -class Program_weight_tensor_parameter_639: - name = "parameter_639" - shape = [96] - dtype = "float32" - min_val = float("-0.780626") - max_val = float("0.470779") - mean = float("-0.0616335") - std = float("0.196544") - data = None - - -class Program_weight_tensor_parameter_640: - name = "parameter_640" - shape = [96, 96, 3, 3] - dtype = "float32" - min_val = float("-0.154701") - max_val = float("0.153806") - mean = float("-0.00026052") - std = float("0.00735431") - data = None - - -class Program_weight_tensor_parameter_641: - name = "parameter_641" - shape = [96] - dtype = "float32" - min_val = float("-3.15956") - max_val = float("1.89061") - mean = float("0.502181") - std = float("0.861277") - data = None - - -class Program_weight_tensor_parameter_642: - name = "parameter_642" - shape = [96] - dtype = "float32" - min_val = float("0.209789") - max_val = float("2.62802") - mean = float("0.557131") - std = float("0.318659") - data = None - - -class Program_weight_tensor_parameter_643: - name = "parameter_643" - shape = [96] - dtype = "float32" - min_val = float("0.00949005") - max_val = float("0.147612") - mean = float("0.0342476") - std = float("0.0235361") - data = None - - -class Program_weight_tensor_parameter_644: - name = "parameter_644" - shape = [96] - dtype = "float32" - min_val = float("-0.272514") - max_val = float("0.303684") - mean = float("-0.0269397") - std = float("0.0869885") - data = None - - -class Program_weight_tensor_parameter_645: - name = "parameter_645" - shape = [96, 192, 1, 1] - dtype = "float32" - min_val = float("-0.190092") - max_val = float("0.235795") - mean = float("-0.00054682") - std = float("0.0152601") - data = None - - -class Program_weight_tensor_parameter_646: - name = "parameter_646" - shape = [96] - dtype = "float32" - min_val = float("-4.92412") - max_val = float("1.57941") - mean = float("0.384226") - std = float("1.04886") - data = None - - -class Program_weight_tensor_parameter_647: - name = "parameter_647" - shape = [96] - dtype = "float32" - min_val = float("0.411425") - max_val = float("6.77791") - mean = float("1.69479") - std = float("1.30749") - data = None - - -class Program_weight_tensor_parameter_648: - name = "parameter_648" - shape = [96] - dtype = "float32" - min_val = float("0.00569395") - max_val = float("0.186568") - mean = float("0.0312372") - std = float("0.0269306") - data = None - - -class Program_weight_tensor_parameter_649: - name = "parameter_649" - shape = [96] - dtype = "float32" - min_val = float("-0.123122") - max_val = float("0.396242") - mean = float("0.0353184") - std = float("0.0935304") - data = None - - -class Program_weight_tensor_parameter_650: - name = "parameter_650" - shape = [96, 192, 1, 1] - dtype = "float32" - min_val = float("-0.115428") - max_val = float("0.143096") - mean = float("0.000288353") - std = float("0.0138526") - data = None - - -class Program_weight_tensor_parameter_651: - name = "parameter_651" - shape = [192] - dtype = "float32" - min_val = float("-2.27512") - max_val = float("1.75006") - mean = float("-0.125702") - std = float("0.740468") - data = None - - -class Program_weight_tensor_parameter_652: - name = "parameter_652" - shape = [192] - dtype = "float32" - min_val = float("0.632726") - max_val = float("2.96908") - mean = float("1.08749") - std = float("0.283555") - data = None - - -class Program_weight_tensor_parameter_653: - name = "parameter_653" - shape = [192] - dtype = "float32" - min_val = float("0.0128887") - max_val = float("0.306476") - mean = float("0.0430534") - std = float("0.0345139") - data = None - - -class Program_weight_tensor_parameter_654: - name = "parameter_654" - shape = [192] - dtype = "float32" - min_val = float("-0.476717") - max_val = float("0.27685") - mean = float("-0.0597992") - std = float("0.114967") - data = None - - -class Program_weight_tensor_parameter_655: - name = "parameter_655" - shape = [192, 128, 3, 3] - dtype = "float32" - min_val = float("-0.0811233") - max_val = float("0.11238") - mean = float("-0.000121273") - std = float("0.00716338") - data = None - - -class Program_weight_tensor_parameter_656: - name = "parameter_656" - shape = [128] - dtype = "float32" - min_val = float("-2.81253") - max_val = float("1.96258") - mean = float("-0.709313") - std = float("0.64886") - data = None - - -class Program_weight_tensor_parameter_657: - name = "parameter_657" - shape = [128] - dtype = "float32" - min_val = float("0.302011") - max_val = float("2.86022") - mean = float("1.01859") - std = float("0.279425") - data = None - - -class Program_weight_tensor_parameter_658: - name = "parameter_658" - shape = [128] - dtype = "float32" - min_val = float("0.000683803") - max_val = float("0.0143901") - mean = float("0.00380984") - std = float("0.00196434") - data = None - - -class Program_weight_tensor_parameter_659: - name = "parameter_659" - shape = [128] - dtype = "float32" - min_val = float("-0.241007") - max_val = float("0.23083") - mean = float("0.00336445") - std = float("0.0801385") - data = None - - -class Program_weight_tensor_parameter_660: - name = "parameter_660" - shape = [128, 96, 1, 1] - dtype = "float32" - min_val = float("-0.16828") - max_val = float("0.191318") - mean = float("-0.00143145") - std = float("0.0216253") - data = None - - -class Program_weight_tensor_parameter_661: - name = "parameter_661" - shape = [96] - dtype = "float32" - min_val = float("-0.0182017") - max_val = float("-0.00100735") - mean = float("-0.00761377") - std = float("0.00459165") - data = None - - -class Program_weight_tensor_parameter_662: - name = "parameter_662" - shape = [96, 96, 1, 1] - dtype = "float32" - min_val = float("-0.297058") - max_val = float("0.124247") - mean = float("-0.00811798") - std = float("0.0180434") - data = None - - -class Program_weight_tensor_parameter_663: - name = "parameter_663" - shape = [48] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_664: - name = "parameter_664" - shape = [48] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_665: - name = "parameter_665" - shape = [48] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_666: - name = "parameter_666" - shape = [48] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_667: - name = "parameter_667" - shape = [48, 48, 1, 1] - dtype = "float32" - min_val = float("-0.0524219") - max_val = float("0.062819") - mean = float("-0.00145834") - std = float("0.0124603") - data = None - - -class Program_weight_tensor_parameter_668: - name = "parameter_668" - shape = [48] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_669: - name = "parameter_669" - shape = [48] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_670: - name = "parameter_670" - shape = [48] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_671: - name = "parameter_671" - shape = [48] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_672: - name = "parameter_672" - shape = [48, 48, 3, 3] - dtype = "float32" - min_val = float("-0.053396") - max_val = float("0.0780475") - mean = float("-0.000432103") - std = float("0.0105215") - data = None - - -class Program_weight_tensor_parameter_673: - name = "parameter_673" - shape = [48] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_674: - name = "parameter_674" - shape = [48] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_675: - name = "parameter_675" - shape = [48] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_676: - name = "parameter_676" - shape = [48] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_677: - name = "parameter_677" - shape = [48, 48, 3, 3] - dtype = "float32" - min_val = float("-0.0907736") - max_val = float("0.0889891") - mean = float("-0.000674195") - std = float("0.0115766") - data = None - - -class Program_weight_tensor_parameter_678: - name = "parameter_678" - shape = [48] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_679: - name = "parameter_679" - shape = [48] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_680: - name = "parameter_680" - shape = [48] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_681: - name = "parameter_681" - shape = [48] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_682: - name = "parameter_682" - shape = [48, 48, 1, 1] - dtype = "float32" - min_val = float("-0.0701343") - max_val = float("0.0744403") - mean = float("-0.000969115") - std = float("0.0132523") - data = None - - -class Program_weight_tensor_parameter_683: - name = "parameter_683" - shape = [48] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_684: - name = "parameter_684" - shape = [48] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_685: - name = "parameter_685" - shape = [48] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_686: - name = "parameter_686" - shape = [48] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_687: - name = "parameter_687" - shape = [48, 48, 3, 3] - dtype = "float32" - min_val = float("-0.0625249") - max_val = float("0.0628193") - mean = float("-0.000704405") - std = float("0.010522") - data = None - - -class Program_weight_tensor_parameter_688: - name = "parameter_688" - shape = [48] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_689: - name = "parameter_689" - shape = [48] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_690: - name = "parameter_690" - shape = [48] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_691: - name = "parameter_691" - shape = [48] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_692: - name = "parameter_692" - shape = [48, 48, 3, 3] - dtype = "float32" - min_val = float("-0.105534") - max_val = float("0.0876318") - mean = float("-0.000291303") - std = float("0.0118198") - data = None - - -class Program_weight_tensor_parameter_693: - name = "parameter_693" - shape = [48] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_694: - name = "parameter_694" - shape = [48] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_695: - name = "parameter_695" - shape = [48] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_696: - name = "parameter_696" - shape = [48] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_697: - name = "parameter_697" - shape = [48, 48, 1, 1] - dtype = "float32" - min_val = float("-0.0927544") - max_val = float("0.067179") - mean = float("-0.00167319") - std = float("0.0164656") - data = None - - -class Program_weight_tensor_parameter_698: - name = "parameter_698" - shape = [48] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_699: - name = "parameter_699" - shape = [48] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_700: - name = "parameter_700" - shape = [48] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_701: - name = "parameter_701" - shape = [48] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_702: - name = "parameter_702" - shape = [48, 48, 3, 3] - dtype = "float32" - min_val = float("-0.0662936") - max_val = float("0.0926268") - mean = float("-0.000546134") - std = float("0.0110591") - data = None - - -class Program_weight_tensor_parameter_703: - name = "parameter_703" - shape = [48] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_704: - name = "parameter_704" - shape = [48] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_705: - name = "parameter_705" - shape = [48] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_706: - name = "parameter_706" - shape = [48] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_707: - name = "parameter_707" - shape = [48, 48, 3, 3] - dtype = "float32" - min_val = float("-0.115861") - max_val = float("0.0843934") - mean = float("-0.000390165") - std = float("0.0126271") - data = None - - -class Program_weight_tensor_parameter_708: - name = "parameter_708" - shape = [48] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_709: - name = "parameter_709" - shape = [48] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_710: - name = "parameter_710" - shape = [48] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_711: - name = "parameter_711" - shape = [48] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_712: - name = "parameter_712" - shape = [48, 96, 1, 1] - dtype = "float32" - min_val = float("-0.156722") - max_val = float("0.12438") - mean = float("-0.00240073") - std = float("0.0227151") - data = None - - -class Program_weight_tensor_parameter_713: - name = "parameter_713" - shape = [48] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_714: - name = "parameter_714" - shape = [48] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_715: - name = "parameter_715" - shape = [48] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_716: - name = "parameter_716" - shape = [48] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_717: - name = "parameter_717" - shape = [48, 96, 1, 1] - dtype = "float32" - min_val = float("-0.133366") - max_val = float("0.190723") - mean = float("-0.000461332") - std = float("0.0215494") - data = None - - -class Program_weight_tensor_parameter_718: - name = "parameter_718" - shape = [96] - dtype = "float32" - min_val = float("-3.40388") - max_val = float("3.27594") - mean = float("0.331") - std = float("1.14502") - data = None - - -class Program_weight_tensor_parameter_719: - name = "parameter_719" - shape = [96] - dtype = "float32" - min_val = float("0.861639") - max_val = float("4.91749") - mean = float("1.91516") - std = float("0.75496") - data = None - - -class Program_weight_tensor_parameter_720: - name = "parameter_720" - shape = [96] - dtype = "float32" - min_val = float("0.68512") - max_val = float("19.942") - mean = float("2.38283") - std = float("2.38942") - data = None - - -class Program_weight_tensor_parameter_721: - name = "parameter_721" - shape = [96] - dtype = "float32" - min_val = float("-1.44893") - max_val = float("1.82311") - mean = float("-0.333309") - std = float("0.618856") - data = None - - -class Program_weight_tensor_parameter_722: - name = "parameter_722" - shape = [96, 64, 3, 3] - dtype = "float32" - min_val = float("-0.115845") - max_val = float("0.115419") - mean = float("-0.000438744") - std = float("0.0120833") - data = None - - -class Program_weight_tensor_parameter_723: - name = "parameter_723" - shape = [64] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_724: - name = "parameter_724" - shape = [64] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_725: - name = "parameter_725" - shape = [64] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_726: - name = "parameter_726" - shape = [64] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_727: - name = "parameter_727" - shape = [64, 32, 3, 3] - dtype = "float32" - min_val = float("-0.153743") - max_val = float("0.135272") - mean = float("-0.000740633") - std = float("0.0191711") - data = None - - -class Program_weight_tensor_parameter_728: - name = "parameter_728" - shape = [32] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_729: - name = "parameter_729" - shape = [32] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_730: - name = "parameter_730" - shape = [32] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_731: - name = "parameter_731" - shape = [32] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_732: - name = "parameter_732" - shape = [32, 32, 3, 3] - dtype = "float32" - min_val = float("-0.307002") - max_val = float("0.202588") - mean = float("-4.43961e-05") - std = float("0.025069") - data = None - - -class Program_weight_tensor_parameter_733: - name = "parameter_733" - shape = [32] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_734: - name = "parameter_734" - shape = [32] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_735: - name = "parameter_735" - shape = [32] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_736: - name = "parameter_736" - shape = [32] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_737: - name = "parameter_737" - shape = [32, 3, 3, 3] - dtype = "float32" - min_val = float("-0.297631") - max_val = float("0.278985") - mean = float("-0.00146872") - std = float("0.0683342") - data = None diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_12/graph_hash.txt b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_12/graph_hash.txt deleted file mode 100644 index 7248f3b80..000000000 --- a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_12/graph_hash.txt +++ /dev/null @@ -1 +0,0 @@ -80c6a3012fae16e53b556d8b6ef2a40e2378ccb66ef0a81269f362d7dab93afe \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_12/graph_net.json b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_12/graph_net.json deleted file mode 100644 index 381598f86..000000000 --- a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_12/graph_net.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "framework": "paddle", - "model_name": "PP-YOLOE_plus_SOD-largesize-L", - "num_devices_required": 1, - "num_nodes_required": 1 -} \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_12/input_meta.py b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_12/input_meta.py deleted file mode 100644 index 0bbda7212..000000000 --- a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_12/input_meta.py +++ /dev/null @@ -1,102 +0,0 @@ -class Program_weight_tensor_data_0: - name = "data_0" - shape = [1, 24276] - dtype = "float32" - max_val = float("2.0") - mean = float("0.0175482") - std = float("0.141858") - data = None - - -class Program_weight_tensor_data_1: - name = "data_1" - shape = [1, 38, 24276] - dtype = "float32" - max_val = float("0.982337") - mean = float("0.000792632") - std = float("0.022185") - data = None - - -class Program_weight_tensor_data_2: - name = "data_2" - shape = [1, 38, 24276] - dtype = "float32" - max_val = float("1.0") - mean = float("0.000461795") - std = float("0.0214844") - data = None - - -class Program_weight_tensor_data_3: - name = "data_3" - shape = [1, 1] - dtype = "int32" - data = [0] - - -class Program_weight_tensor_data_4: - name = "data_4" - shape = [1, 38, 1] - dtype = "int32" - data = [ - 3, - 3, - 9, - 1, - 0, - 0, - 0, - 0, - 3, - 3, - 3, - 3, - 3, - 0, - 0, - 0, - 8, - 3, - 3, - 3, - 0, - 0, - 3, - 3, - 3, - 3, - 3, - 5, - 3, - 3, - 3, - 3, - 3, - 0, - 3, - 3, - 0, - 0, - ] - - -class Program_weight_tensor_data_5: - name = "data_5" - shape = [1, 38, 4] - dtype = "float32" - min_val = float("354.773") - max_val = float("1051.0") - mean = float("652.35") - std = float("193.013") - data = None - - -class Program_weight_tensor_data_6: - name = "data_6" - shape = [1, 38, 24276] - dtype = "float32" - max_val = float("0.73484") - mean = float("8.98923e-05") - std = float("0.00618669") - data = None diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_12/model.py b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_12/model.py deleted file mode 100644 index 88cd8833b..000000000 --- a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_12/model.py +++ /dev/null @@ -1,244 +0,0 @@ -import paddle - - -class GraphModule(paddle.nn.Layer): - def __init__(self): - super().__init__() - - def forward(self, data_0, data_1, data_2, data_3, data_4, data_5, data_6): - # pd_op.full_int_array: (1xi64) <- () - full_int_array_0 = [1] - - # pd_op.unsqueeze: (1x1x24276xf32) <- (1x24276xf32, 1xi64) - unsqueeze_0 = paddle._C_ops.unsqueeze(data_0, full_int_array_0) - del data_0, full_int_array_0 - - # pd_op.full: (xf32) <- () - full_0 = paddle._C_ops.full( - [], float("1"), paddle.float32, paddle.framework._current_expected_place() - ) - - # pd_op.greater_than: (1x1x24276xb) <- (1x1x24276xf32, xf32) - greater_than_0 = paddle._C_ops.greater_than(unsqueeze_0, full_0) - del full_0, unsqueeze_0 - - # pd_op.full_int_array: (3xi64) <- () - full_int_array_1 = [1, 38, 1] - - # pd_op.tile: (1x38x24276xb) <- (1x1x24276xb, 3xi64) - tile_0 = paddle._C_ops.tile(greater_than_0, full_int_array_1) - del full_int_array_1, greater_than_0 - - # pd_op.multiply: (1x38x24276xf32) <- (1x38x24276xf32, 1x38x24276xf32) - multiply_1 = paddle._C_ops.multiply(data_1, data_2) - - # pd_op.full: (1xi64) <- () - full_1 = paddle._C_ops.full( - [1], float("-2"), paddle.int64, paddle.core.CPUPlace() - ) - - # pd_op.argmax: (1x24276xi64) <- (1x38x24276xf32, 1xi64) - argmax_0 = paddle._C_ops.argmax(multiply_1, full_1, False, False, paddle.int64) - del multiply_1 - - # pd_op.full: (1xi32) <- () - full_2 = paddle._C_ops.full( - [1], float("38"), paddle.int32, paddle.core.CPUPlace() - ) - - # pd_op.one_hot: (1x24276x38xf32) <- (1x24276xi64, 1xi32) - one_hot_0 = paddle._C_ops.one_hot( - argmax_0 % paddle.cast(full_2, argmax_0.dtype), full_2 - ) - del argmax_0, full_2 - - # pd_op.transpose: (1x38x24276xf32) <- (1x24276x38xf32) - transpose_0 = paddle._C_ops.transpose(one_hot_0, [0, 2, 1]) - del one_hot_0 - - # pd_op.where: (1x38x24276xf32) <- (1x38x24276xb, 1x38x24276xf32, 1x38x24276xf32) - where_0 = paddle._C_ops.where(tile_0, transpose_0, data_2) - del data_2, tile_0, transpose_0 - - # pd_op.full_int_array: (1xi64) <- () - full_int_array_2 = [-2] - - # pd_op.sum: (1x24276xf32) <- (1x38x24276xf32, 1xi64) - sum_0 = paddle._C_ops.sum(where_0, full_int_array_2, None, False) - - # pd_op.argmax: (1x24276xi64) <- (1x38x24276xf32, 1xi64) - argmax_1 = paddle._C_ops.argmax(where_0, full_1, False, False, paddle.int64) - del full_1 - - # pd_op.full: (1xf32) <- () - full_3 = paddle._C_ops.full( - [1], float("38"), paddle.float32, paddle.core.CPUPlace() - ) - - # pd_op.scale: (1x1xi32) <- (1x1xi32, 1xf32) - scale_0 = paddle._C_ops.scale(data_3, full_3, float("0"), True) - del data_3, full_3 - - # pd_op.cast: (1x1xi64) <- (1x1xi32) - cast_0 = paddle._C_ops.cast(scale_0, paddle.int64) - del scale_0 - - # pd_op.add: (1x24276xi64) <- (1x24276xi64, 1x1xi64) - add_0 = paddle._C_ops.add(argmax_1, cast_0) - del argmax_1, cast_0 - - # pd_op.flatten: (38xi32) <- (1x38x1xi32) - flatten_0 = paddle._C_ops.flatten(data_4, 0, 2) - del data_4 - - # pd_op.flatten: (24276xi64) <- (1x24276xi64) - flatten_1 = paddle._C_ops.flatten(add_0, 0, 1) - del add_0 - - # pd_op.full: (1xi32) <- () - full_4 = paddle._C_ops.full( - [1], float("0"), paddle.int32, paddle.core.CPUPlace() - ) - - # pd_op.gather: (24276xi32) <- (38xi32, 24276xi64, 1xi32) - gather_0 = paddle._C_ops.gather(flatten_0, flatten_1, full_4) - del flatten_0 - - # pd_op.full_int_array: (2xi64) <- () - full_int_array_3 = [1, 24276] - - # pd_op.reshape: (1x24276xi32) <- (24276xi32, 2xi64) - reshape_1 = paddle._C_ops.reshape(gather_0, full_int_array_3) - del full_int_array_3, gather_0 - - # pd_op.full: (xf32) <- () - full_5 = paddle._C_ops.full( - [], float("0"), paddle.float32, paddle.framework._current_expected_place() - ) - - # pd_op.greater_than: (1x24276xb) <- (1x24276xf32, xf32) - greater_than_1 = paddle._C_ops.greater_than(sum_0, full_5) - del full_5, sum_0 - - # pd_op.full: (1xf32) <- () - full_6 = paddle._C_ops.full( - [1], float("10"), paddle.float32, paddle.core.CPUPlace() - ) - - # pd_op.full_like: (1x24276xi32) <- (1x24276xi32, 1xf32) - full_like_0 = paddle._C_ops.full_like( - reshape_1, full_6, paddle.int32, paddle.framework._current_expected_place() - ) - del full_6 - - # pd_op.where: (1x24276xi32) <- (1x24276xb, 1x24276xi32, 1x24276xi32) - where_1 = paddle._C_ops.where(greater_than_1, reshape_1, full_like_0) - del full_like_0, greater_than_1, reshape_1 - - # pd_op.full_int_array: (2xi64) <- () - full_int_array_4 = [-1, 4] - - # pd_op.reshape: (38x4xf32) <- (1x38x4xf32, 2xi64) - reshape_2 = paddle._C_ops.reshape(data_5, full_int_array_4) - del data_5, full_int_array_4 - - # pd_op.gather: (24276x4xf32) <- (38x4xf32, 24276xi64, 1xi32) - gather_1 = paddle._C_ops.gather(reshape_2, flatten_1, full_4) - del flatten_1, full_4, reshape_2 - - # pd_op.full_int_array: (3xi64) <- () - full_int_array_5 = [1, 24276, 4] - - # pd_op.reshape: (1x24276x4xf32) <- (24276x4xf32, 3xi64) - reshape_0 = paddle._C_ops.reshape(gather_1, full_int_array_5) - del full_int_array_5, gather_1 - - # pd_op.full: (1xi32) <- () - full_7 = paddle._C_ops.full( - [1], float("11"), paddle.int32, paddle.core.CPUPlace() - ) - - # pd_op.one_hot: (1x24276x11xf32) <- (1x24276xi32, 1xi32) - one_hot_1 = paddle._C_ops.one_hot( - where_1 % paddle.cast(full_7, where_1.dtype), full_7 - ) - del full_7 - - # pd_op.full: (10xi64) <- () - full_8 = paddle._C_ops.full( - [10], float("0"), paddle.int64, paddle.framework._current_expected_place() - ) - - # pd_op.assign_value_: (10xi64) <- (10xi64) - assign_value__0 = paddle._C_ops.assign_value_( - full_8, - [10], - paddle.int64, - [ - float("0"), - float("1"), - float("2"), - float("3"), - float("4"), - float("5"), - float("6"), - float("7"), - float("8"), - float("9"), - ], - paddle.framework._current_expected_place(), - ) - del full_8 - - # pd_op.index_select: (1x24276x10xf32) <- (1x24276x11xf32, 10xi64) - index_select_0 = paddle._C_ops.index_select(one_hot_1, assign_value__0, -1) - del assign_value__0, one_hot_1 - - # pd_op.multiply: (1x38x24276xf32) <- (1x38x24276xf32, 1x38x24276xf32) - multiply_2 = paddle._C_ops.multiply(data_6, where_0) - del data_6 - - # pd_op.full_int_array: (1xi64) <- () - full_int_array_6 = [-1] - - # pd_op.max: (1x38x1xf32) <- (1x38x24276xf32, 1xi64) - max_0 = paddle._C_ops.max(multiply_2, full_int_array_6, True) - - # pd_op.multiply: (1x38x24276xf32) <- (1x38x24276xf32, 1x38x24276xf32) - multiply_3 = paddle._C_ops.multiply(data_1, where_0) - del data_1, where_0 - - # pd_op.max: (1x38x1xf32) <- (1x38x24276xf32, 1xi64) - max_1 = paddle._C_ops.max(multiply_3, full_int_array_6, True) - del multiply_3 - - # pd_op.full: (1xf32) <- () - full_9 = paddle._C_ops.full( - [1], float("1"), paddle.float32, paddle.core.CPUPlace() - ) - - # pd_op.scale: (1x38x1xf32) <- (1x38x1xf32, 1xf32) - scale_1 = paddle._C_ops.scale(max_0, full_9, float("1e-09"), True) - del full_9, max_0 - - # pd_op.divide: (1x38x24276xf32) <- (1x38x24276xf32, 1x38x1xf32) - divide_0 = paddle._C_ops.divide(multiply_2, scale_1) - del multiply_2, scale_1 - - # pd_op.multiply: (1x38x24276xf32) <- (1x38x24276xf32, 1x38x1xf32) - multiply_4 = paddle._C_ops.multiply(divide_0, max_1) - del divide_0, max_1 - - # pd_op.max: (1x24276xf32) <- (1x38x24276xf32, 1xi64) - max_2 = paddle._C_ops.max(multiply_4, full_int_array_2, False) - del full_int_array_2, multiply_4 - - # pd_op.unsqueeze: (1x24276x1xf32) <- (1x24276xf32, 1xi64) - unsqueeze_1 = paddle._C_ops.unsqueeze(max_2, full_int_array_6) - del full_int_array_6, max_2 - - # pd_op.multiply: (1x24276x10xf32) <- (1x24276x10xf32, 1x24276x1xf32) - multiply_0 = paddle._C_ops.multiply(index_select_0, unsqueeze_1) - del index_select_0, unsqueeze_1, where_1 - - return reshape_0, multiply_0 diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_12/weight_meta.py b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_12/weight_meta.py deleted file mode 100644 index 8b1378917..000000000 --- a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_12/weight_meta.py +++ /dev/null @@ -1 +0,0 @@ - diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_13/graph_hash.txt b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_13/graph_hash.txt deleted file mode 100644 index d26369202..000000000 --- a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_13/graph_hash.txt +++ /dev/null @@ -1 +0,0 @@ -c570d43f53acac4f3957a2ca875002b813fd4f0945c65adb5e86d7b2292f59e3 \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_13/graph_net.json b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_13/graph_net.json deleted file mode 100644 index 381598f86..000000000 --- a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_13/graph_net.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "framework": "paddle", - "model_name": "PP-YOLOE_plus_SOD-largesize-L", - "num_devices_required": 1, - "num_nodes_required": 1 -} \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_13/model.py b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_13/model.py deleted file mode 100644 index bb7813590..000000000 --- a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_13/model.py +++ /dev/null @@ -1,514 +0,0 @@ -import paddle - - -class GraphModule(paddle.nn.Layer): - def __init__(self): - super().__init__() - - def forward(self, data_0, data_1, data_2, data_3, data_4, data_5, data_6): - # pd_op.cast: (1x24276xi32) <- (1x24276xb) - cast_0 = paddle._C_ops.cast(data_0, paddle.int32) - - # pd_op.full_int_array: (1xi64) <- () - full_int_array_0 = [-1] - - # pd_op.assign: (1xi64) <- (1xi64) - assign_0 = full_int_array_0 - - # pd_op.assign: (1xi64) <- (1xi64) - assign_1 = full_int_array_0 - - # pd_op.assign: (1xi64) <- (1xi64) - assign_2 = full_int_array_0 - - # pd_op.unsqueeze: (1x24276x1xi32) <- (1x24276xi32, 1xi64) - unsqueeze_0 = paddle._C_ops.unsqueeze(cast_0, full_int_array_0) - del cast_0 - - # pd_op.full_int_array: (3xi64) <- () - full_int_array_1 = [1, 1, 4] - - # pd_op.tile: (1x24276x4xi32) <- (1x24276x1xi32, 3xi64) - tile_0 = paddle._C_ops.tile(unsqueeze_0, full_int_array_1) - del full_int_array_1, unsqueeze_0 - - # pd_op.cast: (1x24276x4xb) <- (1x24276x4xi32) - cast_1 = paddle._C_ops.cast(tile_0, paddle.bool) - del tile_0 - - # pd_op.masked_select: (-1xf32) <- (1x24276x4xf32, 1x24276x4xb) - masked_select_0 = paddle._C_ops.masked_select(data_1, cast_1) - del data_1 - - # pd_op.full_int_array: (2xi64) <- () - full_int_array_2 = [-1, 4] - - # pd_op.reshape: (-1x4xf32) <- (-1xf32, 2xi64) - reshape_0 = paddle._C_ops.reshape(masked_select_0, full_int_array_2) - - # pd_op.masked_select: (-1xf32) <- (1x24276x4xf32, 1x24276x4xb) - masked_select_1 = paddle._C_ops.masked_select(data_2, cast_1) - - # pd_op.reshape: (-1x4xf32) <- (-1xf32, 2xi64) - reshape_1 = paddle._C_ops.reshape(masked_select_1, full_int_array_2) - del masked_select_1 - - # pd_op.sum: (1x24276xf32) <- (1x24276x10xf32, 1xi64) - sum_0 = paddle._C_ops.sum(data_3, full_int_array_0, None, False) - del data_3 - - # pd_op.masked_select: (-1xf32) <- (1x24276xf32, 1x24276xb) - masked_select_2 = paddle._C_ops.masked_select(sum_0, data_0) - del sum_0 - - # pd_op.unsqueeze: (-1x1xf32) <- (-1xf32, 1xi64) - unsqueeze_1 = paddle._C_ops.unsqueeze(masked_select_2, full_int_array_0) - del masked_select_2 - - # pd_op.subtract: (-1x4xf32) <- (-1x4xf32, -1x4xf32) - subtract_0 = paddle._C_ops.subtract(reshape_0, reshape_1) - - # pd_op.abs: (-1x4xf32) <- (-1x4xf32) - abs_0 = paddle._C_ops.abs(subtract_0) - - # pd_op.mean_all: (xf32) <- (-1x4xf32) - mean_all_0 = paddle._C_ops.mean_all(abs_0) - - # pd_op.full: (1xi32) <- () - full_0 = paddle._C_ops.full( - [1], float("1"), paddle.int32, paddle.core.CPUPlace() - ) - - # pd_op.split_with_num: ([-1x1xf32, -1x1xf32, -1x1xf32, -1x1xf32]) <- (-1x4xf32, 1xi32) - split_with_num_0 = paddle._C_ops.split_with_num(reshape_0, 4, full_0) - - # builtin.split: (-1x1xf32, -1x1xf32, -1x1xf32, -1x1xf32) <- ([-1x1xf32, -1x1xf32, -1x1xf32, -1x1xf32]) - ( - split_0, - split_1, - split_2, - split_3, - ) = split_with_num_0 - del split_with_num_0 - - # pd_op.split_with_num: ([-1x1xf32, -1x1xf32, -1x1xf32, -1x1xf32]) <- (-1x4xf32, 1xi32) - split_with_num_1 = paddle._C_ops.split_with_num(reshape_1, 4, full_0) - - # builtin.split: (-1x1xf32, -1x1xf32, -1x1xf32, -1x1xf32) <- ([-1x1xf32, -1x1xf32, -1x1xf32, -1x1xf32]) - ( - split_4, - split_5, - split_6, - split_7, - ) = split_with_num_1 - del split_with_num_1 - - # pd_op.maximum: (-1x1xf32) <- (-1x1xf32, -1x1xf32) - maximum_0 = paddle._C_ops.maximum(split_0, split_4) - - # pd_op.maximum: (-1x1xf32) <- (-1x1xf32, -1x1xf32) - maximum_1 = paddle._C_ops.maximum(split_1, split_5) - - # pd_op.minimum: (-1x1xf32) <- (-1x1xf32, -1x1xf32) - minimum_0 = paddle._C_ops.minimum(split_2, split_6) - - # pd_op.minimum: (-1x1xf32) <- (-1x1xf32, -1x1xf32) - minimum_1 = paddle._C_ops.minimum(split_3, split_7) - - # pd_op.subtract: (-1x1xf32) <- (-1x1xf32, -1x1xf32) - subtract_1 = paddle._C_ops.subtract(minimum_0, maximum_0) - - # pd_op.full: (1xf32) <- () - full_1 = paddle._C_ops.full( - [1], float("0"), paddle.float32, paddle.core.CPUPlace() - ) - - # pd_op.assign: (1xf32) <- (1xf32) - assign_3 = full_1 - - # pd_op.full: (1xf32) <- () - full_2 = paddle._C_ops.full( - [1], float("3.40282e+38"), paddle.float32, paddle.core.CPUPlace() - ) - - # pd_op.assign: (1xf32) <- (1xf32) - assign_4 = full_2 - - # pd_op.clip: (-1x1xf32) <- (-1x1xf32, 1xf32, 1xf32) - clip_0 = paddle._C_ops.clip(subtract_1, full_1, full_2) - - # pd_op.subtract: (-1x1xf32) <- (-1x1xf32, -1x1xf32) - subtract_2 = paddle._C_ops.subtract(minimum_1, maximum_1) - - # pd_op.clip: (-1x1xf32) <- (-1x1xf32, 1xf32, 1xf32) - clip_1 = paddle._C_ops.clip(subtract_2, full_1, full_2) - - # pd_op.multiply: (-1x1xf32) <- (-1x1xf32, -1x1xf32) - multiply_0 = paddle._C_ops.multiply(clip_0, clip_1) - - # pd_op.subtract: (-1x1xf32) <- (-1x1xf32, -1x1xf32) - subtract_3 = paddle._C_ops.subtract(split_2, split_0) - - # pd_op.subtract: (-1x1xf32) <- (-1x1xf32, -1x1xf32) - subtract_4 = paddle._C_ops.subtract(split_3, split_1) - - # pd_op.multiply: (-1x1xf32) <- (-1x1xf32, -1x1xf32) - multiply_1 = paddle._C_ops.multiply(subtract_3, subtract_4) - - # pd_op.subtract: (-1x1xf32) <- (-1x1xf32, -1x1xf32) - subtract_5 = paddle._C_ops.subtract(split_6, split_4) - - # pd_op.subtract: (-1x1xf32) <- (-1x1xf32, -1x1xf32) - subtract_6 = paddle._C_ops.subtract(split_7, split_5) - - # pd_op.multiply: (-1x1xf32) <- (-1x1xf32, -1x1xf32) - multiply_2 = paddle._C_ops.multiply(subtract_5, subtract_6) - del subtract_5, subtract_6 - - # pd_op.add: (-1x1xf32) <- (-1x1xf32, -1x1xf32) - add_0 = paddle._C_ops.add(multiply_1, multiply_2) - - # pd_op.subtract: (-1x1xf32) <- (-1x1xf32, -1x1xf32) - subtract_7 = paddle._C_ops.subtract(add_0, multiply_0) - - # pd_op.full: (1xf32) <- () - full_3 = paddle._C_ops.full( - [1], float("1"), paddle.float32, paddle.core.CPUPlace() - ) - - # pd_op.assign: (1xf32) <- (1xf32) - assign_5 = full_3 - - # pd_op.assign: (1xf32) <- (1xf32) - assign_6 = full_3 - - # pd_op.scale: (-1x1xf32) <- (-1x1xf32, 1xf32) - scale_0 = paddle._C_ops.scale(subtract_7, full_3, float("1e-10"), True) - del subtract_7 - - # pd_op.divide: (-1x1xf32) <- (-1x1xf32, -1x1xf32) - divide_2 = paddle._C_ops.divide(multiply_0, scale_0) - - # pd_op.minimum: (-1x1xf32) <- (-1x1xf32, -1x1xf32) - minimum_2 = paddle._C_ops.minimum(split_0, split_4) - - # pd_op.minimum: (-1x1xf32) <- (-1x1xf32, -1x1xf32) - minimum_3 = paddle._C_ops.minimum(split_1, split_5) - - # pd_op.maximum: (-1x1xf32) <- (-1x1xf32, -1x1xf32) - maximum_2 = paddle._C_ops.maximum(split_2, split_6) - - # pd_op.maximum: (-1x1xf32) <- (-1x1xf32, -1x1xf32) - maximum_3 = paddle._C_ops.maximum(split_3, split_7) - - # pd_op.subtract: (-1x1xf32) <- (-1x1xf32, -1x1xf32) - subtract_8 = paddle._C_ops.subtract(maximum_2, minimum_2) - - # pd_op.subtract: (-1x1xf32) <- (-1x1xf32, -1x1xf32) - subtract_9 = paddle._C_ops.subtract(maximum_3, minimum_3) - - # pd_op.multiply: (-1x1xf32) <- (-1x1xf32, -1x1xf32) - multiply_3 = paddle._C_ops.multiply(subtract_8, subtract_9) - - # pd_op.scale: (-1x1xf32) <- (-1x1xf32, 1xf32) - scale_1 = paddle._C_ops.scale(multiply_3, full_3, float("1e-10"), True) - del multiply_3 - - # pd_op.subtract: (-1x1xf32) <- (-1x1xf32, -1x1xf32) - subtract_10 = paddle._C_ops.subtract(scale_1, scale_0) - - # pd_op.divide: (-1x1xf32) <- (-1x1xf32, -1x1xf32) - divide_3 = paddle._C_ops.divide(subtract_10, scale_1) - - # pd_op.subtract: (-1x1xf32) <- (-1x1xf32, -1x1xf32) - subtract_11 = paddle._C_ops.subtract(divide_2, divide_3) - - # pd_op.full: (1xf32) <- () - full_4 = paddle._C_ops.full( - [1], float("-1"), paddle.float32, paddle.core.CPUPlace() - ) - - # pd_op.scale: (-1x1xf32) <- (-1x1xf32, 1xf32) - scale_2 = paddle._C_ops.scale(subtract_11, full_4, float("1"), True) - del subtract_11 - - # pd_op.scale: (-1x1xf32) <- (-1x1xf32, 1xf32) - scale_3 = paddle._C_ops.scale(scale_2, full_3, float("0"), True) - del scale_2 - - # pd_op.multiply: (-1x1xf32) <- (-1x1xf32, -1x1xf32) - multiply_4 = paddle._C_ops.multiply(scale_3, unsqueeze_1) - - # pd_op.full_int_array: (0xi64) <- () - full_int_array_3 = [] - - # pd_op.assign: (0xi64) <- (0xi64) - assign_7 = full_int_array_3 - - # pd_op.sum: (xf32) <- (-1x1xf32, 0xi64) - sum_1 = paddle._C_ops.sum(multiply_4, full_int_array_3, None, False) - - # pd_op.divide: (xf32) <- (xf32, xf32) - divide_0 = paddle._C_ops.divide(sum_1, data_4) - - # pd_op.unsqueeze: (1x24276x1xb) <- (1x24276xb, 1xi64) - unsqueeze_2 = paddle._C_ops.unsqueeze(data_0, full_int_array_0) - del data_0 - - # pd_op.cast: (1x24276x1xi32) <- (1x24276x1xb) - cast_2 = paddle._C_ops.cast(unsqueeze_2, paddle.int32) - del unsqueeze_2 - - # pd_op.full_int_array: (3xi64) <- () - full_int_array_4 = [1, 1, 88] - - # pd_op.tile: (1x24276x88xi32) <- (1x24276x1xi32, 3xi64) - tile_1 = paddle._C_ops.tile(cast_2, full_int_array_4) - del cast_2, full_int_array_4 - - # pd_op.cast: (1x24276x88xb) <- (1x24276x88xi32) - cast_3 = paddle._C_ops.cast(tile_1, paddle.bool) - del tile_1 - - # pd_op.masked_select: (-1xf32) <- (1x24276x88xf32, 1x24276x88xb) - masked_select_3 = paddle._C_ops.masked_select(data_5, cast_3) - del data_5 - - # pd_op.full_int_array: (3xi64) <- () - full_int_array_5 = [-1, 4, 22] - - # pd_op.reshape: (-1x4x22xf32) <- (-1xf32, 3xi64) - reshape_2 = paddle._C_ops.reshape(masked_select_3, full_int_array_5) - del full_int_array_5 - - # pd_op.full: (1xi32) <- () - full_5 = paddle._C_ops.full( - [1], float("2"), paddle.int32, paddle.core.CPUPlace() - ) - - # pd_op.split_with_num: ([1x24276x2xf32, 1x24276x2xf32]) <- (1x24276x4xf32, 1xi32) - split_with_num_2 = paddle._C_ops.split_with_num(data_2, 2, full_5) - del data_2, full_5 - - # builtin.split: (1x24276x2xf32, 1x24276x2xf32) <- ([1x24276x2xf32, 1x24276x2xf32]) - ( - split_8, - split_9, - ) = split_with_num_2 - del split_with_num_2 - - # pd_op.subtract: (1x24276x2xf32) <- (24276x2xf32, 1x24276x2xf32) - subtract_12 = paddle._C_ops.subtract(data_6, split_8) - del split_8 - - # pd_op.subtract: (1x24276x2xf32) <- (1x24276x2xf32, 24276x2xf32) - subtract_13 = paddle._C_ops.subtract(split_9, data_6) - del data_6, split_9 - - # pd_op.full: (1xi32) <- () - full_6 = paddle._C_ops.full( - [1], float("-1"), paddle.int32, paddle.core.CPUPlace() - ) - - # builtin.combine: ([1x24276x2xf32, 1x24276x2xf32]) <- (1x24276x2xf32, 1x24276x2xf32) - combine_0 = [subtract_12, subtract_13] - del subtract_12, subtract_13 - - # pd_op.concat: (1x24276x4xf32) <- ([1x24276x2xf32, 1x24276x2xf32], 1xi32) - concat_0 = paddle._C_ops.concat(combine_0, full_6) - del combine_0, full_6 - - # pd_op.full: (1xf32) <- () - full_7 = paddle._C_ops.full( - [1], float("-2"), paddle.float32, paddle.core.CPUPlace() - ) - - # pd_op.full: (1xf32) <- () - full_8 = paddle._C_ops.full( - [1], float("18.99"), paddle.float32, paddle.core.CPUPlace() - ) - - # pd_op.clip: (1x24276x4xf32) <- (1x24276x4xf32, 1xf32, 1xf32) - clip_2 = paddle._C_ops.clip(concat_0, full_7, full_8) - del concat_0, full_7, full_8 - - # pd_op.masked_select: (-1xf32) <- (1x24276x4xf32, 1x24276x4xb) - masked_select_4 = paddle._C_ops.masked_select(clip_2, cast_1) - del clip_2 - - # pd_op.reshape: (-1x4xf32) <- (-1xf32, 2xi64) - reshape_3 = paddle._C_ops.reshape(masked_select_4, full_int_array_2) - del full_int_array_2, masked_select_4 - - # pd_op.floor: (-1x4xf32) <- (-1x4xf32) - floor_0 = paddle._C_ops.floor(reshape_3) - - # pd_op.cast: (-1x4xi64) <- (-1x4xf32) - cast_4 = paddle._C_ops.cast(floor_0, paddle.int64) - del floor_0 - - # pd_op.scale: (-1x4xi64) <- (-1x4xi64, 1xf32) - scale_4 = paddle._C_ops.scale(cast_4, full_3, float("1"), True) - - # pd_op.cast: (-1x4xf32) <- (-1x4xi64) - cast_5 = paddle._C_ops.cast(scale_4, paddle.float32) - - # pd_op.subtract: (-1x4xf32) <- (-1x4xf32, -1x4xf32) - subtract_14 = paddle._C_ops.subtract(cast_5, reshape_3) - del cast_5, reshape_3 - - # pd_op.scale: (-1x4xf32) <- (-1x4xf32, 1xf32) - scale_5 = paddle._C_ops.scale(subtract_14, full_4, float("1"), True) - - # pd_op.scale: (-1x4xi64) <- (-1x4xi64, 1xf32) - scale_6 = paddle._C_ops.scale(cast_4, full_3, float("2"), True) - del cast_4 - - # pd_op.unsqueeze: (-1x4x1xi64) <- (-1x4xi64, 1xi64) - unsqueeze_3 = paddle._C_ops.unsqueeze(scale_6, full_int_array_0) - del scale_6 - - # pd_op.cross_entropy_with_softmax: (-1x4x22xf32, -1x4x1xf32) <- (-1x4x22xf32, -1x4x1xi64) - cross_entropy_with_softmax_0, cross_entropy_with_softmax_2 = ( - lambda x, f: f(x) - )( - paddle._C_ops.cross_entropy_with_softmax( - reshape_2, unsqueeze_3, False, True, True, -100, -1 - ), - lambda out: out if isinstance(out, (list, tuple)) else (out, None), - ) - - # pd_op.squeeze: (-1x4xf32) <- (-1x4x1xf32, 1xi64) - squeeze_0 = paddle._C_ops.squeeze( - cross_entropy_with_softmax_2, full_int_array_0 - ) - - # pd_op.multiply: (-1x4xf32) <- (-1x4xf32, -1x4xf32) - multiply_5 = paddle._C_ops.multiply(squeeze_0, subtract_14) - - # pd_op.scale: (-1x4xi64) <- (-1x4xi64, 1xf32) - scale_7 = paddle._C_ops.scale(scale_4, full_3, float("2"), True) - del scale_4 - - # pd_op.unsqueeze: (-1x4x1xi64) <- (-1x4xi64, 1xi64) - unsqueeze_4 = paddle._C_ops.unsqueeze(scale_7, full_int_array_0) - del scale_7 - - # pd_op.cross_entropy_with_softmax: (-1x4x22xf32, -1x4x1xf32) <- (-1x4x22xf32, -1x4x1xi64) - cross_entropy_with_softmax_1, cross_entropy_with_softmax_3 = ( - lambda x, f: f(x) - )( - paddle._C_ops.cross_entropy_with_softmax( - reshape_2, unsqueeze_4, False, True, True, -100, -1 - ), - lambda out: out if isinstance(out, (list, tuple)) else (out, None), - ) - del reshape_2 - - # pd_op.squeeze: (-1x4xf32) <- (-1x4x1xf32, 1xi64) - squeeze_1 = paddle._C_ops.squeeze( - cross_entropy_with_softmax_3, full_int_array_0 - ) - - # pd_op.multiply: (-1x4xf32) <- (-1x4xf32, -1x4xf32) - multiply_6 = paddle._C_ops.multiply(squeeze_1, scale_5) - - # pd_op.add: (-1x4xf32) <- (-1x4xf32, -1x4xf32) - add_1 = paddle._C_ops.add(multiply_5, multiply_6) - - # pd_op.mean: (-1x1xf32) <- (-1x4xf32, 1xi64) - mean_0 = paddle._C_ops.mean(add_1, full_int_array_0, True) - del full_int_array_0 - - # pd_op.multiply: (-1x1xf32) <- (-1x1xf32, -1x1xf32) - multiply_7 = paddle._C_ops.multiply(mean_0, unsqueeze_1) - - # pd_op.sum: (xf32) <- (-1x1xf32, 0xi64) - sum_2 = paddle._C_ops.sum(multiply_7, full_int_array_3, None, False) - - # pd_op.divide: (xf32) <- (xf32, xf32) - divide_1 = paddle._C_ops.divide(sum_2, data_4) - del ( - abs_0, - add_0, - add_1, - assign_0, - assign_1, - assign_2, - assign_3, - assign_4, - assign_5, - assign_6, - assign_7, - cast_1, - cast_3, - clip_0, - clip_1, - cross_entropy_with_softmax_2, - cross_entropy_with_softmax_3, - data_4, - divide_2, - divide_3, - full_0, - full_1, - full_2, - full_3, - full_4, - full_int_array_3, - masked_select_0, - masked_select_3, - maximum_0, - maximum_1, - maximum_2, - maximum_3, - mean_0, - minimum_0, - minimum_1, - minimum_2, - minimum_3, - multiply_0, - multiply_1, - multiply_2, - multiply_4, - multiply_5, - multiply_6, - multiply_7, - reshape_0, - reshape_1, - scale_0, - scale_1, - scale_3, - scale_5, - split_0, - split_1, - split_2, - split_3, - split_4, - split_5, - split_6, - split_7, - squeeze_0, - squeeze_1, - subtract_0, - subtract_1, - subtract_10, - subtract_14, - subtract_2, - subtract_3, - subtract_4, - subtract_8, - subtract_9, - sum_1, - sum_2, - unsqueeze_1, - unsqueeze_3, - unsqueeze_4, - ) - - return ( - cross_entropy_with_softmax_0, - cross_entropy_with_softmax_1, - mean_all_0, - divide_0, - divide_1, - ) diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_13/weight_meta.py b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_13/weight_meta.py deleted file mode 100644 index 8b1378917..000000000 --- a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_13/weight_meta.py +++ /dev/null @@ -1 +0,0 @@ - diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_14/graph_hash.txt b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_14/graph_hash.txt deleted file mode 100644 index 18e3bbc11..000000000 --- a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_14/graph_hash.txt +++ /dev/null @@ -1 +0,0 @@ -bbf0a5774c2acf6ee92cd237b35e2c556a5dfa443149969bd012be16856599a9 \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_14/graph_net.json b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_14/graph_net.json deleted file mode 100644 index 381598f86..000000000 --- a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_14/graph_net.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "framework": "paddle", - "model_name": "PP-YOLOE_plus_SOD-largesize-L", - "num_devices_required": 1, - "num_nodes_required": 1 -} \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_14/input_meta.py b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_14/input_meta.py deleted file mode 100644 index 4fdfc39bb..000000000 --- a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_14/input_meta.py +++ /dev/null @@ -1,145 +0,0 @@ -class Program_weight_tensor_data_0: - name = "data_0" - shape = [1, 24276, 10] - dtype = "float32" - min_val = float("9.96627e-10") - max_val = float("0.919436") - mean = float("0.00348174") - std = float("0.0220349") - data = None - - -class Program_weight_tensor_data_1: - name = "data_1" - shape = [1, 24276, 4] - dtype = "float32" - min_val = float("-272.612") - max_val = float("1371.86") - mean = float("544.298") - std = float("322.539") - data = None - - -class Program_weight_tensor_data_2: - name = "data_2" - shape = [24276, 2] - dtype = "float32" - min_val = float("4.0") - max_val = float("1084.0") - mean = float("544.0") - std = float("314.059") - data = None - - -class Program_weight_tensor_data_3: - name = "data_3" - shape = [24276, 1] - dtype = "float32" - min_val = float("8.0") - max_val = float("32.0") - mean = float("10.6667") - std = float("5.70157") - data = None - - -class Program_weight_tensor_data_4: - name = "data_4" - shape = [1, 38, 1] - dtype = "int32" - data = [ - 3, - 3, - 9, - 1, - 0, - 0, - 0, - 0, - 3, - 3, - 3, - 3, - 3, - 0, - 0, - 0, - 8, - 3, - 3, - 3, - 0, - 0, - 3, - 3, - 3, - 3, - 3, - 5, - 3, - 3, - 3, - 3, - 3, - 0, - 3, - 3, - 0, - 0, - ] - - -class Program_weight_tensor_data_5: - name = "data_5" - shape = [1, 38, 4] - dtype = "float32" - min_val = float("354.773") - max_val = float("1051.0") - mean = float("652.35") - std = float("193.013") - data = None - - -class Program_weight_tensor_data_6: - name = "data_6" - shape = [1, 38, 1] - dtype = "float32" - data = [ - 1.0, - 1.0, - 1.0, - 1.0, - 1.0, - 1.0, - 1.0, - 1.0, - 1.0, - 1.0, - 1.0, - 1.0, - 1.0, - 1.0, - 1.0, - 1.0, - 1.0, - 1.0, - 1.0, - 1.0, - 1.0, - 1.0, - 1.0, - 1.0, - 1.0, - 1.0, - 1.0, - 1.0, - 1.0, - 1.0, - 1.0, - 1.0, - 1.0, - 1.0, - 1.0, - 1.0, - 1.0, - 1.0, - ] diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_14/model.py b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_14/model.py deleted file mode 100644 index 0aac88f68..000000000 --- a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_14/model.py +++ /dev/null @@ -1,499 +0,0 @@ -import paddle - - -class GraphModule(paddle.nn.Layer): - def __init__(self): - super().__init__() - - def forward(self, data_0, data_1, data_2, data_3, data_4, data_5, data_6): - # pd_op.full_int_array: (1xi64) <- () - full_int_array_0 = [2] - - # pd_op.unsqueeze: (1x38x1x4xf32) <- (1x38x4xf32, 1xi64) - unsqueeze_0 = paddle._C_ops.unsqueeze(data_5, full_int_array_0) - del data_5 - - # pd_op.full_int_array: (1xi64) <- () - full_int_array_1 = [1] - - # pd_op.unsqueeze: (1x1x24276x4xf32) <- (1x24276x4xf32, 1xi64) - unsqueeze_1 = paddle._C_ops.unsqueeze(data_1, full_int_array_1) - del data_1, full_int_array_1 - - # pd_op.full_int_array: (1xi64) <- () - full_int_array_2 = [0] - - # pd_op.slice: (1x38x1x2xf32) <- (1x38x1x4xf32, 1xi64, 1xi64) - slice_0 = paddle._C_ops.slice( - unsqueeze_0, [3], full_int_array_2, full_int_array_0, [1], [] - ) - - # pd_op.full_int_array: (1xi64) <- () - full_int_array_3 = [2147483647] - - # pd_op.slice: (1x38x1x2xf32) <- (1x38x1x4xf32, 1xi64, 1xi64) - slice_1 = paddle._C_ops.slice( - unsqueeze_0, [3], full_int_array_0, full_int_array_3, [1], [] - ) - - # pd_op.slice: (1x1x24276x2xf32) <- (1x1x24276x4xf32, 1xi64, 1xi64) - slice_2 = paddle._C_ops.slice( - unsqueeze_1, [3], full_int_array_2, full_int_array_0, [1], [] - ) - del full_int_array_2 - - # pd_op.slice: (1x1x24276x2xf32) <- (1x1x24276x4xf32, 1xi64, 1xi64) - slice_3 = paddle._C_ops.slice( - unsqueeze_1, [3], full_int_array_0, full_int_array_3, [1], [] - ) - del full_int_array_0, full_int_array_3, unsqueeze_1 - - # pd_op.maximum: (1x38x24276x2xf32) <- (1x38x1x2xf32, 1x1x24276x2xf32) - maximum_0 = paddle._C_ops.maximum(slice_0, slice_2) - - # pd_op.minimum: (1x38x24276x2xf32) <- (1x38x1x2xf32, 1x1x24276x2xf32) - minimum_0 = paddle._C_ops.minimum(slice_1, slice_3) - - # pd_op.subtract: (1x38x24276x2xf32) <- (1x38x24276x2xf32, 1x38x24276x2xf32) - subtract_0 = paddle._C_ops.subtract(minimum_0, maximum_0) - del maximum_0, minimum_0 - - # pd_op.full: (1xf32) <- () - full_0 = paddle._C_ops.full( - [1], float("0"), paddle.float32, paddle.core.CPUPlace() - ) - - # pd_op.full: (1xf32) <- () - full_1 = paddle._C_ops.full( - [1], float("3.40282e+38"), paddle.float32, paddle.core.CPUPlace() - ) - - # pd_op.clip: (1x38x24276x2xf32) <- (1x38x24276x2xf32, 1xf32, 1xf32) - clip_0 = paddle._C_ops.clip(subtract_0, full_0, full_1) - del subtract_0 - - # pd_op.full_int_array: (1xi64) <- () - full_int_array_4 = [-1] - - # pd_op.prod: (1x38x24276xf32) <- (1x38x24276x2xf32, 1xi64) - prod_0 = paddle._C_ops.prod(clip_0, full_int_array_4, False, False) - del clip_0 - - # pd_op.subtract: (1x38x1x2xf32) <- (1x38x1x2xf32, 1x38x1x2xf32) - subtract_1 = paddle._C_ops.subtract(slice_1, slice_0) - del slice_0, slice_1 - - # pd_op.clip: (1x38x1x2xf32) <- (1x38x1x2xf32, 1xf32, 1xf32) - clip_1 = paddle._C_ops.clip(subtract_1, full_0, full_1) - del subtract_1 - - # pd_op.prod: (1x38x1xf32) <- (1x38x1x2xf32, 1xi64) - prod_1 = paddle._C_ops.prod(clip_1, full_int_array_4, False, False) - del clip_1 - - # pd_op.subtract: (1x1x24276x2xf32) <- (1x1x24276x2xf32, 1x1x24276x2xf32) - subtract_2 = paddle._C_ops.subtract(slice_3, slice_2) - del slice_2, slice_3 - - # pd_op.clip: (1x1x24276x2xf32) <- (1x1x24276x2xf32, 1xf32, 1xf32) - clip_2 = paddle._C_ops.clip(subtract_2, full_0, full_1) - del full_1, subtract_2 - - # pd_op.prod: (1x1x24276xf32) <- (1x1x24276x2xf32, 1xi64) - prod_2 = paddle._C_ops.prod(clip_2, full_int_array_4, False, False) - del clip_2 - - # pd_op.add: (1x38x24276xf32) <- (1x38x1xf32, 1x1x24276xf32) - add_0 = paddle._C_ops.add(prod_1, prod_2) - del prod_1, prod_2 - - # pd_op.subtract: (1x38x24276xf32) <- (1x38x24276xf32, 1x38x24276xf32) - subtract_3 = paddle._C_ops.subtract(add_0, prod_0) - del add_0 - - # pd_op.full: (1xf32) <- () - full_2 = paddle._C_ops.full( - [1], float("1"), paddle.float32, paddle.core.CPUPlace() - ) - - # pd_op.scale: (1x38x24276xf32) <- (1x38x24276xf32, 1xf32) - scale_0 = paddle._C_ops.scale(subtract_3, full_2, float("1e-09"), True) - del subtract_3 - - # pd_op.divide: (1x38x24276xf32) <- (1x38x24276xf32, 1x38x24276xf32) - divide_0 = paddle._C_ops.divide(prod_0, scale_0) - del prod_0, scale_0 - - # pd_op.transpose: (1x10x24276xf32) <- (1x24276x10xf32) - transpose_0 = paddle._C_ops.transpose(data_0, [0, 2, 1]) - del data_0 - - # pd_op.full: (1xf64) <- () - full_3 = paddle._C_ops.full( - [1], float("0"), paddle.float64, paddle.core.CPUPlace() - ) - - # pd_op.full: (1xf64) <- () - full_4 = paddle._C_ops.full( - [1], float("1"), paddle.float64, paddle.core.CPUPlace() - ) - - # pd_op.arange: (1xi32) <- (1xf64, 1xf64, 1xf64) - arange_0 = paddle.arange(full_3, full_4, full_4, dtype="int32") - del full_3, full_4 - - # pd_op.unsqueeze: (1x1xi32) <- (1xi32, 1xi64) - unsqueeze_2 = paddle._C_ops.unsqueeze(arange_0, full_int_array_4) - del arange_0 - - # pd_op.full_int_array: (2xi64) <- () - full_int_array_5 = [1, 38] - - # pd_op.tile: (1x38xi32) <- (1x1xi32, 2xi64) - tile_0 = paddle._C_ops.tile(unsqueeze_2, full_int_array_5) - del full_int_array_5 - - # pd_op.squeeze: (1x38xi32) <- (1x38x1xi32, 1xi64) - squeeze_0 = paddle._C_ops.squeeze(data_4, full_int_array_4) - del data_4 - - # builtin.combine: ([1x38xi32, 1x38xi32]) <- (1x38xi32, 1x38xi32) - combine_0 = [tile_0, squeeze_0] - del squeeze_0, tile_0 - - # pd_op.stack: (1x38x2xi32) <- ([1x38xi32, 1x38xi32]) - stack_0 = paddle._C_ops.stack(combine_0, -1) - del combine_0 - - # pd_op.gather_nd: (1x38x24276xf32) <- (1x10x24276xf32, 1x38x2xi32) - gather_nd_0 = paddle._C_ops.gather_nd(transpose_0, stack_0) - del stack_0, transpose_0 - - # pd_op.pow: (1x38x24276xf32) <- (1x38x24276xf32) - pow_0 = paddle._C_ops.pow(gather_nd_0, float("1")) - del gather_nd_0 - - # pd_op.pow: (1x38x24276xf32) <- (1x38x24276xf32) - pow_1 = paddle._C_ops.pow(divide_0, float("6")) - - # pd_op.multiply: (1x38x24276xf32) <- (1x38x24276xf32, 1x38x24276xf32) - multiply_0 = paddle._C_ops.multiply(pow_0, pow_1) - del pow_0, pow_1 - - # pd_op.multiply: (1x38x24276xf32) <- (1x38x24276xf32, 1x38x1xf32) - multiply_1 = paddle._C_ops.multiply(multiply_0, data_6) - del multiply_0 - - # pd_op.scale: (24276x1xf32) <- (24276x1xf32, 1xf32) - scale_1 = paddle._C_ops.scale(data_3, full_2, float("0"), True) - del data_3, full_2 - - # pd_op.full_int_array: (2xi64) <- () - full_int_array_6 = [0, 1] - - # pd_op.unsqueeze: (1x1x24276x2xf32) <- (24276x2xf32, 2xi64) - unsqueeze_3 = paddle._C_ops.unsqueeze(data_2, full_int_array_6) - del data_2 - - # pd_op.full: (1xi32) <- () - full_5 = paddle._C_ops.full( - [1], float("3"), paddle.int32, paddle.core.CPUPlace() - ) - - # pd_op.split_with_num: ([1x1x24276x1xf32, 1x1x24276x1xf32]) <- (1x1x24276x2xf32, 1xi32) - split_with_num_0 = paddle._C_ops.split_with_num(unsqueeze_3, 2, full_5) - del unsqueeze_3 - - # builtin.split: (1x1x24276x1xf32, 1x1x24276x1xf32) <- ([1x1x24276x1xf32, 1x1x24276x1xf32]) - ( - split_0, - split_1, - ) = split_with_num_0 - del split_with_num_0 - - # pd_op.split_with_num: ([1x38x1x1xf32, 1x38x1x1xf32, 1x38x1x1xf32, 1x38x1x1xf32]) <- (1x38x1x4xf32, 1xi32) - split_with_num_1 = paddle._C_ops.split_with_num(unsqueeze_0, 4, full_5) - del full_5, unsqueeze_0 - - # builtin.split: (1x38x1x1xf32, 1x38x1x1xf32, 1x38x1x1xf32, 1x38x1x1xf32) <- ([1x38x1x1xf32, 1x38x1x1xf32, 1x38x1x1xf32, 1x38x1x1xf32]) - ( - split_2, - split_3, - split_4, - split_5, - ) = split_with_num_1 - del split_with_num_1 - - # pd_op.subtract: (1x38x24276x1xf32) <- (1x1x24276x1xf32, 1x38x1x1xf32) - subtract_4 = paddle._C_ops.subtract(split_0, split_2) - - # pd_op.subtract: (1x38x24276x1xf32) <- (1x1x24276x1xf32, 1x38x1x1xf32) - subtract_5 = paddle._C_ops.subtract(split_1, split_3) - - # pd_op.subtract: (1x38x24276x1xf32) <- (1x38x1x1xf32, 1x1x24276x1xf32) - subtract_6 = paddle._C_ops.subtract(split_4, split_0) - - # pd_op.subtract: (1x38x24276x1xf32) <- (1x38x1x1xf32, 1x1x24276x1xf32) - subtract_7 = paddle._C_ops.subtract(split_5, split_1) - - # pd_op.full: (1xi32) <- () - full_6 = paddle._C_ops.full( - [1], float("-1"), paddle.int32, paddle.core.CPUPlace() - ) - - # builtin.combine: ([1x38x24276x1xf32, 1x38x24276x1xf32, 1x38x24276x1xf32, 1x38x24276x1xf32]) <- (1x38x24276x1xf32, 1x38x24276x1xf32, 1x38x24276x1xf32, 1x38x24276x1xf32) - combine_1 = [subtract_4, subtract_5, subtract_6, subtract_7] - del subtract_4, subtract_5, subtract_6, subtract_7 - - # pd_op.concat: (1x38x24276x4xf32) <- ([1x38x24276x1xf32, 1x38x24276x1xf32, 1x38x24276x1xf32, 1x38x24276x1xf32], 1xi32) - concat_0 = paddle._C_ops.concat(combine_1, full_6) - del combine_1 - - # pd_op.min: (1x38x24276xf32) <- (1x38x24276x4xf32, 1xi64) - min_0 = paddle._C_ops.min(concat_0, full_int_array_4, False) - del concat_0 - - # pd_op.full: (xf32) <- () - full_7 = paddle._C_ops.full( - [], - float("1e-09"), - paddle.float32, - paddle.framework._current_expected_place(), - ) - - # pd_op.greater_than: (1x38x24276xb) <- (1x38x24276xf32, xf32) - greater_than_1 = paddle._C_ops.greater_than(min_0, full_7) - del min_0 - - # pd_op.unsqueeze: (1x1x24276x1xf32) <- (24276x1xf32, 2xi64) - unsqueeze_4 = paddle._C_ops.unsqueeze(scale_1, full_int_array_6) - del full_int_array_6, scale_1 - - # pd_op.add: (1x38x1x1xf32) <- (1x38x1x1xf32, 1x38x1x1xf32) - add_1 = paddle._C_ops.add(split_2, split_4) - del split_2, split_4 - - # pd_op.full: (1xf32) <- () - full_8 = paddle._C_ops.full( - [1], float("0.5"), paddle.float32, paddle.core.CPUPlace() - ) - - # pd_op.scale: (1x38x1x1xf32) <- (1x38x1x1xf32, 1xf32) - scale_2 = paddle._C_ops.scale(add_1, full_8, float("0"), True) - del add_1 - - # pd_op.add: (1x38x1x1xf32) <- (1x38x1x1xf32, 1x38x1x1xf32) - add_2 = paddle._C_ops.add(split_3, split_5) - del split_3, split_5 - - # pd_op.scale: (1x38x1x1xf32) <- (1x38x1x1xf32, 1xf32) - scale_3 = paddle._C_ops.scale(add_2, full_8, float("0"), True) - del add_2, full_8 - - # pd_op.subtract: (1x38x24276x1xf32) <- (1x38x1x1xf32, 1x1x24276x1xf32) - subtract_8 = paddle._C_ops.subtract(scale_2, unsqueeze_4) - - # pd_op.subtract: (1x38x24276x1xf32) <- (1x1x24276x1xf32, 1x38x24276x1xf32) - subtract_9 = paddle._C_ops.subtract(split_0, subtract_8) - del subtract_8 - - # pd_op.subtract: (1x38x24276x1xf32) <- (1x38x1x1xf32, 1x1x24276x1xf32) - subtract_10 = paddle._C_ops.subtract(scale_3, unsqueeze_4) - - # pd_op.subtract: (1x38x24276x1xf32) <- (1x1x24276x1xf32, 1x38x24276x1xf32) - subtract_11 = paddle._C_ops.subtract(split_1, subtract_10) - del subtract_10 - - # pd_op.add: (1x38x24276x1xf32) <- (1x38x1x1xf32, 1x1x24276x1xf32) - add_3 = paddle._C_ops.add(scale_2, unsqueeze_4) - del scale_2 - - # pd_op.subtract: (1x38x24276x1xf32) <- (1x38x24276x1xf32, 1x1x24276x1xf32) - subtract_12 = paddle._C_ops.subtract(add_3, split_0) - del add_3, split_0 - - # pd_op.add: (1x38x24276x1xf32) <- (1x38x1x1xf32, 1x1x24276x1xf32) - add_4 = paddle._C_ops.add(scale_3, unsqueeze_4) - del scale_3, unsqueeze_4 - - # pd_op.subtract: (1x38x24276x1xf32) <- (1x38x24276x1xf32, 1x1x24276x1xf32) - subtract_13 = paddle._C_ops.subtract(add_4, split_1) - del add_4, split_1 - - # builtin.combine: ([1x38x24276x1xf32, 1x38x24276x1xf32, 1x38x24276x1xf32, 1x38x24276x1xf32]) <- (1x38x24276x1xf32, 1x38x24276x1xf32, 1x38x24276x1xf32, 1x38x24276x1xf32) - combine_2 = [subtract_9, subtract_11, subtract_12, subtract_13] - del subtract_11, subtract_12, subtract_13, subtract_9 - - # pd_op.concat: (1x38x24276x4xf32) <- ([1x38x24276x1xf32, 1x38x24276x1xf32, 1x38x24276x1xf32, 1x38x24276x1xf32], 1xi32) - concat_1 = paddle._C_ops.concat(combine_2, full_6) - del combine_2, full_6 - - # pd_op.min: (1x38x24276xf32) <- (1x38x24276x4xf32, 1xi64) - min_1 = paddle._C_ops.min(concat_1, full_int_array_4, False) - del concat_1 - - # pd_op.greater_than: (1x38x24276xb) <- (1x38x24276xf32, xf32) - greater_than_2 = paddle._C_ops.greater_than(min_1, full_7) - del full_7, min_1 - - # pd_op.cast: (1x38x24276xf32) <- (1x38x24276xb) - cast_0 = paddle._C_ops.cast(greater_than_1, paddle.float32) - del greater_than_1 - - # pd_op.cast: (1x38x24276xf32) <- (1x38x24276xb) - cast_1 = paddle._C_ops.cast(greater_than_2, paddle.float32) - del greater_than_2 - - # pd_op.multiply: (1x38x24276xf32) <- (1x38x24276xf32, 1x38x1xf32) - multiply_2 = paddle._C_ops.multiply(cast_0, data_6) - del cast_0 - - # pd_op.multiply: (1x38x24276xf32) <- (1x38x24276xf32, 1x38x1xf32) - multiply_3 = paddle._C_ops.multiply(cast_1, data_6) - del cast_1 - - # pd_op.sum: (1x38x1xf32) <- (1x38x24276xf32, 1xi64) - sum_0 = paddle._C_ops.sum(multiply_2, full_int_array_4, None, True) - del full_int_array_4 - - # pd_op.full: (xf32) <- () - full_9 = paddle._C_ops.full( - [], float("0"), paddle.float32, paddle.framework._current_expected_place() - ) - - # pd_op.equal: (1x38x1xb) <- (1x38x1xf32, xf32) - equal_0 = paddle._C_ops.equal(sum_0, full_9) - del sum_0 - - # pd_op.add: (1x38x24276xf32) <- (1x38x24276xf32, 1x38x24276xf32) - add_5 = paddle._C_ops.add(multiply_1, multiply_3) - - # pd_op.full_like: (1x38x24276xf32) <- (1x38x24276xf32, 1xf32) - full_like_0 = paddle._C_ops.full_like( - add_5, full_0, paddle.float32, paddle.framework._current_expected_place() - ) - - # pd_op.full_like: (1x38x24276xf32) <- (1x38x24276xf32, 1xf32) - full_like_1 = paddle._C_ops.full_like( - multiply_1, - full_0, - paddle.float32, - paddle.framework._current_expected_place(), - ) - - # pd_op.full_like: (1x38x1xb) <- (1x38x1xb, 1xf32) - full_like_2 = paddle._C_ops.full_like( - equal_0, full_0, paddle.bool, paddle.framework._current_expected_place() - ) - del full_0 - - # pd_op.cast: (1x38x1xf32) <- (1x38x1xb) - cast_2 = paddle._C_ops.cast(full_like_2, paddle.float32) - del full_like_2 - - # pd_op.cast: (1x38x1xf32) <- (1x38x1xb) - cast_3 = paddle._C_ops.cast(equal_0, paddle.float32) - del equal_0 - - # pd_op.add: (1x38x24276xf32) <- (1x38x24276xf32, 1x38x24276xf32) - add_6 = paddle._C_ops.add(full_like_0, full_like_1) - del full_like_0, full_like_1 - - # pd_op.add: (1x38x24276xf32) <- (1x38x24276xf32, 1x38x1xf32) - add_7 = paddle._C_ops.add(add_6, cast_2) - del add_6, cast_2 - - # pd_op.add: (1x38x24276xf32) <- (1x38x24276xf32, 1x38x24276xf32) - add_8 = paddle._C_ops.add(add_5, add_7) - del add_5 - - # pd_op.add: (1x38x24276xf32) <- (1x38x24276xf32, 1x38x24276xf32) - add_9 = paddle._C_ops.add(multiply_1, add_7) - - # pd_op.add: (1x38x24276xf32) <- (1x38x1xf32, 1x38x24276xf32) - add_10 = paddle._C_ops.add(cast_3, add_7) - del add_7, cast_3 - - # pd_op.cast: (1x38x24276xb) <- (1x38x24276xf32) - cast_4 = paddle._C_ops.cast(add_10, paddle.bool) - del add_10 - - # pd_op.where: (1x38x24276xf32) <- (1x38x24276xb, 1x38x24276xf32, 1x38x24276xf32) - where_0 = paddle._C_ops.where(cast_4, add_8, add_9) - del add_8, add_9, cast_4 - - # pd_op.full: (1xi32) <- () - full_10 = paddle._C_ops.full( - [1], float("13"), paddle.int32, paddle.core.CPUPlace() - ) - - # pd_op.topk: (1x38x13xf32, 1x38x13xi64) <- (1x38x24276xf32, 1xi32) - topk_0, topk_1 = (lambda x, f: f(x))( - paddle._C_ops.topk(where_0, full_10, -1, True, True), - lambda out: out if isinstance(out, (list, tuple)) else (out, None), - ) - del full_10, where_0 - - # pd_op.full: (1xi32) <- () - full_11 = paddle._C_ops.full( - [1], float("24276"), paddle.int32, paddle.core.CPUPlace() - ) - - # pd_op.one_hot: (1x38x13x24276xf32) <- (1x38x13xi64, 1xi32) - one_hot_0 = paddle._C_ops.one_hot( - topk_1 % paddle.cast(full_11, topk_1.dtype), full_11 - ) - del full_11, topk_1 - - # pd_op.full_int_array: (1xi64) <- () - full_int_array_7 = [-2] - - # pd_op.sum: (1x38x24276xf32) <- (1x38x13x24276xf32, 1xi64) - sum_1 = paddle._C_ops.sum(one_hot_0, full_int_array_7, None, False) - del one_hot_0 - - # pd_op.multiply: (1x38x24276xf32) <- (1x38x24276xf32, 1x38x1xf32) - multiply_4 = paddle._C_ops.multiply(sum_1, data_6) - del data_6, sum_1 - - # pd_op.greater_than: (1x38x24276xb) <- (1x38x24276xf32, xf32) - greater_than_3 = paddle._C_ops.greater_than(multiply_3, full_9) - del multiply_3 - - # pd_op.greater_than: (1x38x24276xb) <- (1x38x24276xf32, xf32) - greater_than_4 = paddle._C_ops.greater_than(multiply_2, full_9) - del full_9, multiply_2 - - # pd_op.bitwise_or: (1x38x24276xb) <- (1x38x24276xb, 1x38x24276xb) - bitwise_or_0 = paddle._C_ops.bitwise_or(greater_than_3, greater_than_4) - del greater_than_3, greater_than_4 - - # pd_op.cast: (1x38x24276xf32) <- (1x38x24276xb) - cast_5 = paddle._C_ops.cast(bitwise_or_0, paddle.float32) - del bitwise_or_0 - - # pd_op.multiply: (1x38x24276xf32) <- (1x38x24276xf32, 1x38x24276xf32) - multiply_5 = paddle._C_ops.multiply(multiply_4, cast_5) - del cast_5, multiply_4 - - # pd_op.sum: (1x24276xf32) <- (1x38x24276xf32, 1xi64) - sum_2 = paddle._C_ops.sum(multiply_5, full_int_array_7, None, False) - del full_int_array_7 - - # pd_op.full_int_array: (0xi64) <- () - full_int_array_8 = [] - - # pd_op.max: (xf32) <- (1x24276xf32, 0xi64) - max_0 = paddle._C_ops.max(sum_2, full_int_array_8, False) - del full_int_array_8 - - # pd_op.full: (xf32) <- () - full_12 = paddle._C_ops.full( - [], float("1"), paddle.float32, paddle.framework._current_expected_place() - ) - - # pd_op.greater_than: (xb) <- (xf32, xf32) - greater_than_0 = paddle._C_ops.greater_than(max_0, full_12) - del divide_0, full_12, max_0, multiply_1, multiply_5, sum_2, unsqueeze_2 - - return greater_than_0 diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_14/weight_meta.py b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_14/weight_meta.py deleted file mode 100644 index 8b1378917..000000000 --- a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_14/weight_meta.py +++ /dev/null @@ -1 +0,0 @@ - diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_2/graph_hash.txt b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_2/graph_hash.txt index 3060b8e66..7d70180d4 100644 --- a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_2/graph_hash.txt +++ b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_2/graph_hash.txt @@ -1 +1 @@ -cfd03838c2e98747bf694d5196a2c44d4f61ead673e0d40c68b441b98f84ef34 \ No newline at end of file +6a8e990486f4d85b4371bedc5cecd294f81764cd153a3e25085e136f63ec707c \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_2/input_meta.py b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_2/input_meta.py index 6b8c591d4..7d313afce 100644 --- a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_2/input_meta.py +++ b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_2/input_meta.py @@ -1,119 +1,174 @@ class Program_weight_tensor_data_0: name = "data_0" - shape = [1024, 3072] - dtype = "float32" - min_val = float("-0.0337707") - max_val = float("0.03429") - mean = float("-1.71998e-05") - std = float("0.0182992") - data = None + shape = [] + dtype = "int64" + data = [49] class Program_weight_tensor_data_1: name = "data_1" - shape = [3072] + shape = [1, 48384, 10] dtype = "float32" - min_val = float("-0.000858163") - max_val = float("0.000895478") - mean = float("1.43758e-06") - std = float("0.000180847") + min_val = float("1.08574e-08") + max_val = float("0.85674") + mean = float("0.00225546") + std = float("0.010105") data = None class Program_weight_tensor_data_2: name = "data_2" - shape = [1024, 3072] + shape = [1, 48384, 4] dtype = "float32" - min_val = float("-0.0324395") - max_val = float("0.0323104") - mean = float("-1.57215e-05") - std = float("0.0182981") + min_val = float("-253.983") + max_val = float("1811.4") + mean = float("768.345") + std = float("449.589") data = None class Program_weight_tensor_data_3: name = "data_3" - shape = [3072] + shape = [48384, 2] dtype = "float32" - min_val = float("-0.000630245") - max_val = float("0.000514232") - mean = float("2.75956e-06") - std = float("0.000126901") + min_val = float("4.0") + max_val = float("1532.0") + mean = float("768.0") + std = float("443.391") data = None class Program_weight_tensor_data_4: name = "data_4" - shape = [1024, 3072] + shape = [48384, 1] dtype = "float32" - min_val = float("-0.0321875") - max_val = float("0.0321786") - mean = float("-1.59553e-05") - std = float("0.0182974") + min_val = float("8.0") + max_val = float("32.0") + mean = float("10.6667") + std = float("5.70157") data = None class Program_weight_tensor_data_5: name = "data_5" - shape = [3072] - dtype = "float32" - min_val = float("-0.000429817") - max_val = float("0.000427089") - mean = float("1.59167e-06") - std = float("8.84515e-05") - data = None + shape = [1, 49, 1] + dtype = "int32" + data = [ + 0, + 0, + 3, + 8, + 3, + 3, + 3, + 3, + 3, + 3, + 3, + 3, + 4, + 4, + 3, + 3, + 3, + 4, + 4, + 8, + 3, + 3, + 3, + 3, + 8, + 8, + 8, + 8, + 8, + 8, + 4, + 8, + 8, + 8, + 8, + 8, + 5, + 0, + 8, + 8, + 8, + 8, + 8, + 3, + 3, + 8, + 8, + 0, + 4, + ] class Program_weight_tensor_data_6: name = "data_6" - shape = [1024, 3072] + shape = [1, 49, 4] dtype = "float32" - min_val = float("-0.0321313") - max_val = float("0.0321203") - mean = float("-1.62062e-05") - std = float("0.018297") + min_val = float("830.542") + max_val = float("1214.81") + mean = float("996.371") + std = float("76.2156") data = None class Program_weight_tensor_data_7: name = "data_7" - shape = [3072] + shape = [1, 49, 1] dtype = "float32" - min_val = float("-0.000397465") - max_val = float("0.000488945") - mean = float("1.04525e-06") - std = float("8.30003e-05") - data = None - - -class Program_weight_tensor_data_8: - name = "data_8" - shape = [1, 256, 240, 240] - dtype = "float32" - min_val = float("-0.278465") - max_val = float("12.296") - mean = float("-0.0693066") - std = float("0.362327") - data = None - - -class Program_weight_tensor_data_9: - name = "data_9" - shape = [1, 512, 120, 120] - dtype = "float32" - min_val = float("-0.278465") - max_val = float("10.8892") - mean = float("-0.0919699") - std = float("0.352598") - data = None - - -class Program_weight_tensor_data_10: - name = "data_10" - shape = [1, 1024, 60, 60] - dtype = "float32" - min_val = float("-0.278465") - max_val = float("28.4265") - mean = float("0.296413") - std = float("1.17347") - data = None + data = [ + 1.0, + 1.0, + 1.0, + 1.0, + 1.0, + 1.0, + 1.0, + 1.0, + 1.0, + 1.0, + 1.0, + 1.0, + 1.0, + 1.0, + 1.0, + 1.0, + 1.0, + 1.0, + 1.0, + 1.0, + 1.0, + 1.0, + 1.0, + 1.0, + 1.0, + 1.0, + 1.0, + 1.0, + 1.0, + 1.0, + 1.0, + 1.0, + 1.0, + 1.0, + 1.0, + 1.0, + 1.0, + 1.0, + 1.0, + 1.0, + 1.0, + 1.0, + 1.0, + 1.0, + 1.0, + 1.0, + 1.0, + 1.0, + 1.0, + ] diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_2/model.py b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_2/model.py index 2377a8523..7403c5b74 100644 --- a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_2/model.py +++ b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_2/model.py @@ -5,4853 +5,542 @@ class GraphModule(paddle.nn.Layer): def __init__(self): super().__init__() - def forward( - self, - parameter_0, - parameter_1, - parameter_2, - parameter_3, - parameter_4, - parameter_5, - parameter_6, - parameter_7, - parameter_8, - parameter_9, - parameter_10, - parameter_11, - parameter_12, - parameter_13, - parameter_14, - parameter_15, - parameter_16, - parameter_17, - parameter_18, - parameter_19, - parameter_20, - parameter_21, - parameter_22, - parameter_23, - parameter_24, - parameter_25, - parameter_26, - parameter_27, - parameter_28, - parameter_29, - parameter_30, - parameter_31, - parameter_32, - parameter_33, - parameter_34, - parameter_35, - parameter_36, - parameter_37, - parameter_38, - parameter_39, - parameter_40, - parameter_41, - parameter_42, - parameter_43, - parameter_44, - parameter_45, - parameter_46, - parameter_47, - parameter_48, - parameter_49, - parameter_50, - parameter_51, - parameter_52, - parameter_53, - parameter_54, - parameter_55, - parameter_56, - parameter_57, - parameter_58, - parameter_59, - parameter_60, - parameter_61, - parameter_62, - parameter_63, - parameter_64, - parameter_65, - parameter_66, - parameter_67, - parameter_68, - parameter_69, - parameter_70, - parameter_71, - parameter_72, - parameter_73, - parameter_74, - parameter_75, - parameter_76, - parameter_77, - parameter_78, - parameter_79, - parameter_80, - parameter_81, - parameter_82, - parameter_83, - parameter_84, - parameter_85, - parameter_86, - parameter_87, - parameter_88, - parameter_89, - parameter_90, - parameter_91, - parameter_92, - parameter_93, - parameter_94, - parameter_95, - parameter_96, - parameter_97, - parameter_98, - parameter_99, - parameter_100, - parameter_101, - parameter_102, - parameter_103, - parameter_104, - parameter_105, - parameter_106, - parameter_107, - parameter_108, - parameter_109, - parameter_110, - parameter_111, - parameter_112, - parameter_113, - parameter_114, - parameter_115, - parameter_116, - parameter_117, - parameter_118, - parameter_119, - parameter_120, - parameter_121, - parameter_122, - parameter_123, - parameter_124, - parameter_125, - parameter_126, - parameter_127, - parameter_128, - parameter_129, - parameter_130, - parameter_131, - parameter_132, - parameter_133, - parameter_134, - parameter_135, - parameter_136, - parameter_137, - parameter_138, - parameter_139, - parameter_140, - parameter_141, - parameter_142, - parameter_143, - parameter_144, - parameter_145, - parameter_146, - parameter_147, - parameter_148, - parameter_149, - parameter_150, - parameter_151, - parameter_152, - parameter_153, - parameter_154, - parameter_155, - parameter_156, - parameter_157, - parameter_158, - parameter_159, - parameter_160, - parameter_161, - parameter_162, - parameter_163, - parameter_164, - parameter_165, - parameter_166, - parameter_167, - parameter_168, - parameter_169, - parameter_170, - parameter_171, - parameter_172, - parameter_173, - parameter_174, - parameter_175, - parameter_176, - parameter_177, - parameter_178, - parameter_179, - parameter_180, - parameter_181, - parameter_182, - parameter_183, - parameter_184, - parameter_185, - parameter_186, - parameter_187, - parameter_188, - parameter_189, - parameter_190, - parameter_191, - parameter_192, - parameter_193, - parameter_194, - parameter_195, - parameter_196, - parameter_197, - parameter_198, - parameter_199, - parameter_200, - parameter_201, - parameter_202, - parameter_203, - parameter_204, - parameter_205, - parameter_206, - parameter_207, - parameter_208, - parameter_209, - parameter_210, - parameter_211, - parameter_212, - parameter_213, - parameter_214, - parameter_215, - parameter_216, - parameter_217, - parameter_218, - parameter_219, - parameter_220, - parameter_221, - parameter_222, - parameter_223, - parameter_224, - parameter_225, - parameter_226, - parameter_227, - parameter_228, - parameter_229, - parameter_230, - parameter_231, - parameter_232, - parameter_233, - parameter_234, - parameter_235, - parameter_236, - parameter_237, - parameter_238, - parameter_239, - parameter_240, - parameter_241, - parameter_242, - parameter_243, - parameter_244, - parameter_245, - parameter_246, - parameter_247, - parameter_248, - parameter_249, - parameter_250, - parameter_251, - parameter_252, - parameter_253, - parameter_254, - parameter_255, - parameter_256, - parameter_257, - parameter_258, - parameter_259, - parameter_260, - parameter_261, - parameter_262, - parameter_263, - parameter_264, - parameter_265, - parameter_266, - parameter_267, - parameter_268, - parameter_269, - parameter_270, - parameter_271, - parameter_272, - parameter_273, - parameter_274, - parameter_275, - parameter_276, - parameter_277, - parameter_278, - parameter_279, - parameter_280, - parameter_281, - parameter_282, - parameter_283, - parameter_284, - parameter_285, - parameter_286, - parameter_287, - parameter_288, - parameter_289, - parameter_290, - parameter_291, - parameter_292, - parameter_293, - parameter_294, - parameter_295, - parameter_296, - parameter_297, - parameter_298, - parameter_299, - parameter_300, - parameter_301, - parameter_302, - parameter_303, - parameter_304, - parameter_305, - parameter_306, - parameter_307, - parameter_308, - parameter_309, - parameter_310, - parameter_311, - parameter_312, - parameter_313, - parameter_314, - parameter_315, - parameter_316, - parameter_317, - parameter_318, - parameter_319, - parameter_320, - parameter_321, - parameter_322, - parameter_323, - parameter_324, - parameter_325, - parameter_326, - parameter_327, - parameter_328, - parameter_329, - parameter_330, - parameter_331, - parameter_332, - parameter_333, - parameter_334, - parameter_335, - parameter_336, - parameter_337, - parameter_338, - parameter_339, - parameter_340, - parameter_341, - parameter_342, - parameter_343, - parameter_344, - parameter_345, - parameter_346, - parameter_347, - parameter_348, - parameter_349, - parameter_350, - parameter_351, - parameter_352, - parameter_353, - parameter_354, - parameter_355, - parameter_356, - parameter_357, - parameter_358, - parameter_359, - parameter_360, - parameter_361, - parameter_362, - parameter_363, - parameter_364, - data_0, - data_1, - data_2, - data_3, - data_4, - data_5, - data_6, - data_7, - data_8, - data_9, - data_10, - ): - # pd_op.flatten: (1x1024x3600xf32) <- (1x1024x60x60xf32) - flatten_0 = paddle._C_ops.flatten(data_10, 2, 3) - del data_10 - - # pd_op.transpose: (1x3600x1024xf32) <- (1x1024x3600xf32) - transpose_0 = paddle._C_ops.transpose(flatten_0, [0, 2, 1]) - del flatten_0 - - # pd_op.full: (1xf64) <- () + def forward(self, data_0, data_1, data_2, data_3, data_4, data_5, data_6, data_7): + # pd_op.full: (xi64) <- () full_0 = paddle._C_ops.full( - [1], float("0"), paddle.float64, paddle.core.CPUPlace() - ) - - # pd_op.full: (1xf64) <- () - full_1 = paddle._C_ops.full( - [1], float("60"), paddle.float64, paddle.core.CPUPlace() - ) - - # pd_op.full: (1xf64) <- () - full_2 = paddle._C_ops.full( - [1], float("1"), paddle.float64, paddle.core.CPUPlace() - ) - - # pd_op.arange: (60xf32) <- (1xf64, 1xf64, 1xf64) - arange_0 = paddle.arange(full_0, full_1, full_2, dtype="float32") - del full_1 - - # builtin.combine: ([60xf32, 60xf32]) <- (60xf32, 60xf32) - combine_0 = [arange_0, arange_0] - del arange_0 - - # pd_op.meshgrid: ([60x60xf32, 60x60xf32]) <- ([60xf32, 60xf32]) - meshgrid_0 = paddle._C_ops.meshgrid(combine_0) - del combine_0 - - # builtin.split: (60x60xf32, 60x60xf32) <- ([60x60xf32, 60x60xf32]) - ( - split_0, - split_1, - ) = meshgrid_0 - del meshgrid_0 - - # pd_op.full: (1xf64) <- () - full_3 = paddle._C_ops.full( - [1], float("256"), paddle.float64, paddle.core.CPUPlace() - ) - - # pd_op.arange: (256xf32) <- (1xf64, 1xf64, 1xf64) - arange_1 = paddle.arange(full_0, full_3, full_2, dtype="float32") - del full_0, full_2, full_3 - - # pd_op.full: (1xf32) <- () - full_4 = paddle._C_ops.full( - [1], float("0.00390625"), paddle.float32, paddle.core.CPUPlace() + [], float("0"), paddle.int64, paddle.framework._current_expected_place() ) - # pd_op.scale: (256xf32) <- (256xf32, 1xf32) - scale_0 = paddle._C_ops.scale(arange_1, full_4, float("0"), True) - del arange_1, full_4 - - # pd_op.full: (256xf32) <- () - full_5 = paddle._C_ops.full( - [256], - float("10000"), - paddle.float32, - paddle.framework._current_expected_place(), - ) + # pd_op.equal: (xb) <- (xi64, xi64) + equal_0 = paddle._C_ops.equal(data_0, full_0) - # pd_op.elementwise_pow: (256xf32) <- (256xf32, 256xf32) - elementwise_pow_0 = paddle._C_ops.elementwise_pow(full_5, scale_0) - del full_5, scale_0 + # pd_op.cast: (xi64) <- (xb) + cast_0 = paddle._C_ops.cast(equal_0, paddle.int64) + del equal_0 - # pd_op.full: (256xf32) <- () - full_6 = paddle._C_ops.full( - [256], - float("1"), - paddle.float32, - paddle.framework._current_expected_place(), - ) + # pd_op.not_equal: (xb) <- (xi64, xi64) + not_equal_0 = paddle._C_ops.not_equal(cast_0, full_0) + del cast_0 - # pd_op.divide: (256xf32) <- (256xf32, 256xf32) - divide_0 = paddle._C_ops.divide(full_6, elementwise_pow_0) - del elementwise_pow_0, full_6 + # pd_op.cast: (xi64) <- (xb) + cast_1 = paddle._C_ops.cast(not_equal_0, paddle.int64) + del not_equal_0 - # pd_op.flatten: (3600xf32) <- (60x60xf32) - flatten_1 = paddle._C_ops.flatten(split_0, 0, 1) - del split_0 + # pd_op.equal: (xb) <- (xi64, xi64) + equal_1 = paddle._C_ops.equal(cast_1, full_0) + del cast_1, full_0 # pd_op.full_int_array: (1xi64) <- () - full_int_array_0 = [1] + full_int_array_0 = [2] - # pd_op.unsqueeze: (3600x1xf32) <- (3600xf32, 1xi64) - unsqueeze_0 = paddle._C_ops.unsqueeze(flatten_1, full_int_array_0) - del flatten_1 + # pd_op.unsqueeze: (1x-1x1x4xf32) <- (1x-1x4xf32, 1xi64) + unsqueeze_0 = paddle._C_ops.unsqueeze(data_6, full_int_array_0) + del data_6 # pd_op.full_int_array: (1xi64) <- () - full_int_array_1 = [0] - - # pd_op.assign: (1xi64) <- (1xi64) - assign_0 = full_int_array_1 - - # pd_op.assign: (1xi64) <- (1xi64) - assign_1 = full_int_array_1 - - # pd_op.assign: (1xi64) <- (1xi64) - assign_2 = full_int_array_1 - - # pd_op.assign: (1xi64) <- (1xi64) - assign_3 = full_int_array_1 - - # pd_op.assign: (1xi64) <- (1xi64) - assign_4 = full_int_array_1 - - # pd_op.assign: (1xi64) <- (1xi64) - assign_5 = full_int_array_1 - - # pd_op.assign: (1xi64) <- (1xi64) - assign_6 = full_int_array_1 - - # pd_op.assign: (1xi64) <- (1xi64) - assign_7 = full_int_array_1 - - # pd_op.unsqueeze: (1x256xf32) <- (256xf32, 1xi64) - unsqueeze_1 = paddle._C_ops.unsqueeze(divide_0, full_int_array_1) - del divide_0 - - # pd_op.matmul: (3600x256xf32) <- (3600x1xf32, 1x256xf32) - matmul_0 = paddle._C_ops.matmul(unsqueeze_0, unsqueeze_1, False, False) - del unsqueeze_0 - - # pd_op.flatten: (3600xf32) <- (60x60xf32) - flatten_2 = paddle._C_ops.flatten(split_1, 0, 1) - del split_1 - - # pd_op.unsqueeze: (3600x1xf32) <- (3600xf32, 1xi64) - unsqueeze_2 = paddle._C_ops.unsqueeze(flatten_2, full_int_array_0) - del flatten_2, full_int_array_0 - - # pd_op.matmul: (3600x256xf32) <- (3600x1xf32, 1x256xf32) - matmul_1 = paddle._C_ops.matmul(unsqueeze_2, unsqueeze_1, False, False) - del unsqueeze_1, unsqueeze_2 - - # pd_op.sin: (3600x256xf32) <- (3600x256xf32) - sin_0 = paddle._C_ops.sin(matmul_0) - - # pd_op.cos: (3600x256xf32) <- (3600x256xf32) - cos_0 = paddle._C_ops.cos(matmul_0) - del matmul_0 - - # pd_op.sin: (3600x256xf32) <- (3600x256xf32) - sin_1 = paddle._C_ops.sin(matmul_1) - - # pd_op.cos: (3600x256xf32) <- (3600x256xf32) - cos_1 = paddle._C_ops.cos(matmul_1) - del matmul_1 - - # pd_op.full: (1xi32) <- () - full_7 = paddle._C_ops.full( - [1], float("1"), paddle.int32, paddle.core.CPUPlace() - ) - - # pd_op.assign: (1xi32) <- (1xi32) - assign_8 = full_7 - - # pd_op.assign: (1xi32) <- (1xi32) - assign_9 = full_7 + full_int_array_1 = [1] - # pd_op.assign: (1xi32) <- (1xi32) - assign_10 = full_7 - - # pd_op.assign: (1xi32) <- (1xi32) - assign_11 = full_7 - - # pd_op.assign: (1xi32) <- (1xi32) - assign_12 = full_7 - - # pd_op.assign: (1xi32) <- (1xi32) - assign_13 = full_7 - - # pd_op.assign: (1xi32) <- (1xi32) - assign_14 = full_7 - - # pd_op.assign: (1xi32) <- (1xi32) - assign_15 = full_7 - - # pd_op.assign: (1xi32) <- (1xi32) - assign_16 = full_7 - - # pd_op.assign: (1xi32) <- (1xi32) - assign_17 = full_7 - - # builtin.combine: ([3600x256xf32, 3600x256xf32, 3600x256xf32, 3600x256xf32]) <- (3600x256xf32, 3600x256xf32, 3600x256xf32, 3600x256xf32) - combine_1 = [sin_0, cos_0, sin_1, cos_1] - del cos_0, cos_1, sin_0, sin_1 - - # pd_op.concat: (3600x1024xf32) <- ([3600x256xf32, 3600x256xf32, 3600x256xf32, 3600x256xf32], 1xi32) - concat_0 = paddle._C_ops.concat(combine_1, full_7) - del combine_1 - - # pd_op.unsqueeze: (1x3600x1024xf32) <- (3600x1024xf32, 1xi64) - unsqueeze_3 = paddle._C_ops.unsqueeze(concat_0, full_int_array_1) - del concat_0 - - # pd_op.add: (1x3600x1024xf32) <- (1x3600x1024xf32, 1x3600x1024xf32) - add_0 = paddle._C_ops.add(transpose_0, unsqueeze_3) + # pd_op.unsqueeze: (1x1x-1x4xf32) <- (1x-1x4xf32, 1xi64) + unsqueeze_1 = paddle._C_ops.unsqueeze(data_2, full_int_array_1) + del data_2 # pd_op.full_int_array: (1xi64) <- () - full_int_array_2 = [1024] - - # pd_op.assign: (1xi64) <- (1xi64) - assign_18 = full_int_array_2 - - # pd_op.assign: (1xi64) <- (1xi64) - assign_19 = full_int_array_2 + full_int_array_2 = [0] - # pd_op.assign: (1xi64) <- (1xi64) - assign_20 = full_int_array_2 - - # pd_op.assign: (1xi64) <- (1xi64) - assign_21 = full_int_array_2 - - # pd_op.assign: (1xi64) <- (1xi64) - assign_22 = full_int_array_2 - - # pd_op.assign: (1xi64) <- (1xi64) - assign_23 = full_int_array_2 - - # pd_op.assign: (1xi64) <- (1xi64) - assign_24 = full_int_array_2 - - # pd_op.assign: (1xi64) <- (1xi64) - assign_25 = full_int_array_2 - - # pd_op.assign: (1xi64) <- (1xi64) - assign_26 = full_int_array_2 - - # pd_op.assign: (1xi64) <- (1xi64) - assign_27 = full_int_array_2 - - # pd_op.assign: (1xi64) <- (1xi64) - assign_28 = full_int_array_2 - - # pd_op.assign: (1xi64) <- (1xi64) - assign_29 = full_int_array_2 - - # pd_op.assign: (1xi64) <- (1xi64) - assign_30 = full_int_array_2 - - # pd_op.assign: (1xi64) <- (1xi64) - assign_31 = full_int_array_2 - - # pd_op.assign: (1xi64) <- (1xi64) - assign_32 = full_int_array_2 - - # pd_op.slice: (1024x1024xf32) <- (1024x3072xf32, 1xi64, 1xi64) + # pd_op.slice: (1x-1x1x2xf32) <- (1x-1x1x4xf32, 1xi64, 1xi64) slice_0 = paddle._C_ops.slice( - data_0, [1], full_int_array_1, full_int_array_2, [1], [] - ) - - # pd_op.slice: (1024xf32) <- (3072xf32, 1xi64, 1xi64) - slice_1 = paddle._C_ops.slice( - data_1, [0], full_int_array_1, full_int_array_2, [1], [] + unsqueeze_0, [3], full_int_array_2, full_int_array_0, [1], [] ) - # pd_op.matmul: (1x3600x1024xf32) <- (1x3600x1024xf32, 1024x1024xf32) - matmul_2 = paddle._C_ops.matmul(add_0, slice_0, False, False) - - # pd_op.add: (1x3600x1024xf32) <- (1x3600x1024xf32, 1024xf32) - add_1 = paddle._C_ops.add(matmul_2, slice_1) - - # pd_op.full_int_array: (4xi64) <- () - full_int_array_3 = [0, 0, 4, 256] - - # pd_op.reshape: (1x3600x4x256xf32) <- (1x3600x1024xf32, 4xi64) - reshape_0 = paddle._C_ops.reshape(add_1, full_int_array_3) - - # pd_op.transpose: (1x4x3600x256xf32) <- (1x3600x4x256xf32) - transpose_1 = paddle._C_ops.transpose(reshape_0, [0, 2, 1, 3]) - del reshape_0 - # pd_op.full_int_array: (1xi64) <- () - full_int_array_4 = [2048] - - # pd_op.assign: (1xi64) <- (1xi64) - assign_33 = full_int_array_4 - - # pd_op.assign: (1xi64) <- (1xi64) - assign_34 = full_int_array_4 - - # pd_op.assign: (1xi64) <- (1xi64) - assign_35 = full_int_array_4 - - # pd_op.assign: (1xi64) <- (1xi64) - assign_36 = full_int_array_4 - - # pd_op.assign: (1xi64) <- (1xi64) - assign_37 = full_int_array_4 - - # pd_op.assign: (1xi64) <- (1xi64) - assign_38 = full_int_array_4 - - # pd_op.assign: (1xi64) <- (1xi64) - assign_39 = full_int_array_4 - - # pd_op.assign: (1xi64) <- (1xi64) - assign_40 = full_int_array_4 - - # pd_op.assign: (1xi64) <- (1xi64) - assign_41 = full_int_array_4 - - # pd_op.assign: (1xi64) <- (1xi64) - assign_42 = full_int_array_4 - - # pd_op.assign: (1xi64) <- (1xi64) - assign_43 = full_int_array_4 + full_int_array_3 = [2147483647] - # pd_op.assign: (1xi64) <- (1xi64) - assign_44 = full_int_array_4 - - # pd_op.assign: (1xi64) <- (1xi64) - assign_45 = full_int_array_4 - - # pd_op.assign: (1xi64) <- (1xi64) - assign_46 = full_int_array_4 - - # pd_op.assign: (1xi64) <- (1xi64) - assign_47 = full_int_array_4 + # pd_op.slice: (1x-1x1x2xf32) <- (1x-1x1x4xf32, 1xi64, 1xi64) + slice_1 = paddle._C_ops.slice( + unsqueeze_0, [3], full_int_array_0, full_int_array_3, [1], [] + ) - # pd_op.slice: (1024x1024xf32) <- (1024x3072xf32, 1xi64, 1xi64) + # pd_op.slice: (1x1x-1x2xf32) <- (1x1x-1x4xf32, 1xi64, 1xi64) slice_2 = paddle._C_ops.slice( - data_0, [1], full_int_array_2, full_int_array_4, [1], [] + unsqueeze_1, [3], full_int_array_2, full_int_array_0, [1], [] ) + del full_int_array_2 - # pd_op.slice: (1024xf32) <- (3072xf32, 1xi64, 1xi64) + # pd_op.slice: (1x1x-1x2xf32) <- (1x1x-1x4xf32, 1xi64, 1xi64) slice_3 = paddle._C_ops.slice( - data_1, [0], full_int_array_2, full_int_array_4, [1], [] + unsqueeze_1, [3], full_int_array_0, full_int_array_3, [1], [] ) + del full_int_array_3, unsqueeze_1 - # pd_op.matmul: (1x3600x1024xf32) <- (1x3600x1024xf32, 1024x1024xf32) - matmul_3 = paddle._C_ops.matmul(add_0, slice_2, False, False) - - # pd_op.add: (1x3600x1024xf32) <- (1x3600x1024xf32, 1024xf32) - add_2 = paddle._C_ops.add(matmul_3, slice_3) - - # pd_op.reshape: (1x3600x4x256xf32) <- (1x3600x1024xf32, 4xi64) - reshape_1 = paddle._C_ops.reshape(add_2, full_int_array_3) - - # pd_op.transpose: (1x4x3600x256xf32) <- (1x3600x4x256xf32) - transpose_2 = paddle._C_ops.transpose(reshape_1, [0, 2, 1, 3]) - del reshape_1 + # pd_op.maximum: (1x-1x-1x2xf32) <- (1x-1x1x2xf32, 1x1x-1x2xf32) + maximum_0 = paddle._C_ops.maximum(slice_0, slice_2) - # pd_op.full_int_array: (1xi64) <- () - full_int_array_5 = [2147483647] - - # pd_op.assign: (1xi64) <- (1xi64) - assign_48 = full_int_array_5 - - # pd_op.assign: (1xi64) <- (1xi64) - assign_49 = full_int_array_5 - - # pd_op.assign: (1xi64) <- (1xi64) - assign_50 = full_int_array_5 + # pd_op.minimum: (1x-1x-1x2xf32) <- (1x-1x1x2xf32, 1x1x-1x2xf32) + minimum_0 = paddle._C_ops.minimum(slice_1, slice_3) - # pd_op.assign: (1xi64) <- (1xi64) - assign_51 = full_int_array_5 + # pd_op.subtract: (1x-1x-1x2xf32) <- (1x-1x-1x2xf32, 1x-1x-1x2xf32) + subtract_0 = paddle._C_ops.subtract(minimum_0, maximum_0) + del maximum_0, minimum_0 - # pd_op.assign: (1xi64) <- (1xi64) - assign_52 = full_int_array_5 - - # pd_op.assign: (1xi64) <- (1xi64) - assign_53 = full_int_array_5 - - # pd_op.assign: (1xi64) <- (1xi64) - assign_54 = full_int_array_5 - - # pd_op.slice: (1024x1024xf32) <- (1024x3072xf32, 1xi64, 1xi64) - slice_4 = paddle._C_ops.slice( - data_0, [1], full_int_array_4, full_int_array_5, [1], [] + # pd_op.full: (1xf32) <- () + full_1 = paddle._C_ops.full( + [1], float("0"), paddle.float32, paddle.core.CPUPlace() ) - del data_0 - # pd_op.slice: (1024xf32) <- (3072xf32, 1xi64, 1xi64) - slice_5 = paddle._C_ops.slice( - data_1, [0], full_int_array_4, full_int_array_5, [1], [] + # pd_op.full: (1xf32) <- () + full_2 = paddle._C_ops.full( + [1], float("3.40282e+38"), paddle.float32, paddle.core.CPUPlace() ) - del data_1 - # pd_op.matmul: (1x3600x1024xf32) <- (1x3600x1024xf32, 1024x1024xf32) - matmul_4 = paddle._C_ops.matmul(transpose_0, slice_4, False, False) + # pd_op.clip: (1x-1x-1x2xf32) <- (1x-1x-1x2xf32, 1xf32, 1xf32) + clip_0 = paddle._C_ops.clip(subtract_0, full_1, full_2) + del subtract_0 - # pd_op.add: (1x3600x1024xf32) <- (1x3600x1024xf32, 1024xf32) - add_3 = paddle._C_ops.add(matmul_4, slice_5) + # pd_op.full_int_array: (1xi64) <- () + full_int_array_4 = [-1] - # pd_op.reshape: (1x3600x4x256xf32) <- (1x3600x1024xf32, 4xi64) - reshape_2 = paddle._C_ops.reshape(add_3, full_int_array_3) + # pd_op.prod: (1x-1x-1xf32) <- (1x-1x-1x2xf32, 1xi64) + prod_0 = paddle._C_ops.prod(clip_0, full_int_array_4, False, False) + del clip_0 - # pd_op.transpose: (1x4x3600x256xf32) <- (1x3600x4x256xf32) - transpose_3 = paddle._C_ops.transpose(reshape_2, [0, 2, 1, 3]) - del reshape_2 + # pd_op.subtract: (1x-1x1x2xf32) <- (1x-1x1x2xf32, 1x-1x1x2xf32) + subtract_1 = paddle._C_ops.subtract(slice_1, slice_0) + del slice_0, slice_1 - # pd_op.matmul: (1x4x3600x3600xf32) <- (1x4x3600x256xf32, 1x4x3600x256xf32) - matmul_5 = paddle._C_ops.matmul(transpose_1, transpose_2, False, True) + # pd_op.clip: (1x-1x1x2xf32) <- (1x-1x1x2xf32, 1xf32, 1xf32) + clip_1 = paddle._C_ops.clip(subtract_1, full_1, full_2) + del subtract_1 - # pd_op.full: (1xf32) <- () - full_8 = paddle._C_ops.full( - [1], float("0.0625"), paddle.float32, paddle.core.CPUPlace() - ) + # pd_op.prod: (1x-1x1xf32) <- (1x-1x1x2xf32, 1xi64) + prod_1 = paddle._C_ops.prod(clip_1, full_int_array_4, False, False) + del clip_1 - # pd_op.assign: (1xf32) <- (1xf32) - assign_55 = full_8 + # pd_op.subtract: (1x1x-1x2xf32) <- (1x1x-1x2xf32, 1x1x-1x2xf32) + subtract_2 = paddle._C_ops.subtract(slice_3, slice_2) + del slice_2, slice_3 - # pd_op.assign: (1xf32) <- (1xf32) - assign_56 = full_8 + # pd_op.clip: (1x1x-1x2xf32) <- (1x1x-1x2xf32, 1xf32, 1xf32) + clip_2 = paddle._C_ops.clip(subtract_2, full_1, full_2) + del full_2, subtract_2 - # pd_op.assign: (1xf32) <- (1xf32) - assign_57 = full_8 + # pd_op.prod: (1x1x-1xf32) <- (1x1x-1x2xf32, 1xi64) + prod_2 = paddle._C_ops.prod(clip_2, full_int_array_4, False, False) + del clip_2 - # pd_op.scale: (1x4x3600x3600xf32) <- (1x4x3600x3600xf32, 1xf32) - scale_1 = paddle._C_ops.scale(matmul_5, full_8, float("0"), True) - del matmul_5 + # pd_op.add: (1x-1x-1xf32) <- (1x-1x1xf32, 1x1x-1xf32) + add_0 = paddle._C_ops.add(prod_1, prod_2) + del prod_1, prod_2 - # pd_op.softmax: (1x4x3600x3600xf32) <- (1x4x3600x3600xf32) - softmax_0 = paddle._C_ops.softmax(scale_1, -1) - del scale_1 + # pd_op.subtract: (1x-1x-1xf32) <- (1x-1x-1xf32, 1x-1x-1xf32) + subtract_3 = paddle._C_ops.subtract(add_0, prod_0) + del add_0 # pd_op.full: (1xf32) <- () - full_9 = paddle._C_ops.full( - [1], float("0.1"), paddle.float32, paddle.core.CPUPlace() + full_3 = paddle._C_ops.full( + [1], float("1"), paddle.float32, paddle.core.CPUPlace() ) - # pd_op.assign: (1xf32) <- (1xf32) - assign_58 = full_9 + # pd_op.scale: (1x-1x-1xf32) <- (1x-1x-1xf32, 1xf32) + scale_0 = paddle._C_ops.scale(subtract_3, full_3, float("1e-09"), True) + del subtract_3 - # pd_op.assign: (1xf32) <- (1xf32) - assign_59 = full_9 + # pd_op.divide: (1x-1x-1xf32) <- (1x-1x-1xf32, 1x-1x-1xf32) + divide_0 = paddle._C_ops.divide(prod_0, scale_0) + del prod_0, scale_0 - # pd_op.assign: (1xf32) <- (1xf32) - assign_60 = full_9 - - # pd_op.assign: (1xf32) <- (1xf32) - assign_61 = full_9 - - # pd_op.assign: (1xf32) <- (1xf32) - assign_62 = full_9 + # pd_op.transpose: (1x10x-1xf32) <- (1x-1x10xf32) + transpose_0 = paddle._C_ops.transpose(data_1, [0, 2, 1]) + del data_1 - # pd_op.assign: (1xf32) <- (1xf32) - assign_63 = full_9 + # pd_op.full: (1xf64) <- () + full_4 = paddle._C_ops.full( + [1], float("0"), paddle.float64, paddle.core.CPUPlace() + ) - # pd_op.assign: (1xf32) <- (1xf32) - assign_64 = full_9 + # pd_op.full: (1xf64) <- () + full_5 = paddle._C_ops.full( + [1], float("1"), paddle.float64, paddle.core.CPUPlace() + ) - # pd_op.assign: (1xf32) <- (1xf32) - assign_65 = full_9 + # pd_op.arange: (1xi32) <- (1xf64, 1xf64, 1xf64) + arange_0 = paddle.arange(full_4, full_5, full_5, dtype="int32") + del full_4, full_5 - # pd_op.assign: (1xf32) <- (1xf32) - assign_66 = full_9 + # pd_op.unsqueeze: (1x1xi32) <- (1xi32, 1xi64) + unsqueeze_2 = paddle._C_ops.unsqueeze(arange_0, full_int_array_4) + del arange_0 - # pd_op.assign: (1xf32) <- (1xf32) - assign_67 = full_9 + # pd_op.full: (xi64) <- () + full_6 = paddle._C_ops.full( + [], float("1"), paddle.int64, paddle.core.CPUPlace() + ) - # pd_op.assign: (1xf32) <- (1xf32) - assign_68 = full_9 + # builtin.combine: ([xi64, xi64]) <- (xi64, xi64) + combine_0 = [full_6, data_0] + del data_0, full_6 - # pd_op.assign: (1xf32) <- (1xf32) - assign_69 = full_9 + # pd_op.stack: (2xi64) <- ([xi64, xi64]) + stack_0 = paddle._C_ops.stack(combine_0, 0) + del combine_0 - # pd_op.assign: (1xf32) <- (1xf32) - assign_70 = full_9 + # pd_op.tile: (1x-1xi32) <- (1x1xi32, 2xi64) + tile_0 = paddle._C_ops.tile(unsqueeze_2, stack_0) + del stack_0 - # pd_op.assign: (1xf32) <- (1xf32) - assign_71 = full_9 + # pd_op.squeeze: (1x-1xi32) <- (1x-1x1xi32, 1xi64) + squeeze_0 = paddle._C_ops.squeeze(data_5, full_int_array_4) + del data_5 - # pd_op.assign: (1xf32) <- (1xf32) - assign_72 = full_9 + # builtin.combine: ([1x-1xi32, 1x-1xi32]) <- (1x-1xi32, 1x-1xi32) + combine_1 = [tile_0, squeeze_0] + del squeeze_0, tile_0 - # pd_op.dropout: (1x4x3600x3600xf32, 1x4x3600x3600xui8) <- (1x4x3600x3600xf32, None, 1xf32) - dropout_0, dropout_1 = (lambda x, f: f(x))( - paddle._C_ops.dropout( - softmax_0, None, full_9, False, "upscale_in_train", 0, False - ), - lambda out: out if isinstance(out, (list, tuple)) else (out, None), - ) + # pd_op.stack: (1x-1x2xi32) <- ([1x-1xi32, 1x-1xi32]) + stack_1 = paddle._C_ops.stack(combine_1, -1) + del combine_1 - # pd_op.matmul: (1x4x3600x256xf32) <- (1x4x3600x3600xf32, 1x4x3600x256xf32) - matmul_6 = paddle._C_ops.matmul(dropout_0, transpose_3, False, False) + # pd_op.gather_nd: (1x-1x-1xf32) <- (1x10x-1xf32, 1x-1x2xi32) + gather_nd_0 = paddle._C_ops.gather_nd(transpose_0, stack_1) + del stack_1, transpose_0 - # pd_op.transpose: (1x3600x4x256xf32) <- (1x4x3600x256xf32) - transpose_4 = paddle._C_ops.transpose(matmul_6, [0, 2, 1, 3]) - del matmul_6 + # pd_op.pow: (1x-1x-1xf32) <- (1x-1x-1xf32) + pow_0 = paddle._C_ops.pow(gather_nd_0, float("1")) + del gather_nd_0 - # pd_op.full_int_array: (3xi64) <- () - full_int_array_6 = [0, 0, 1024] + # pd_op.pow: (1x-1x-1xf32) <- (1x-1x-1xf32) + pow_1 = paddle._C_ops.pow(divide_0, float("6")) - # pd_op.reshape: (1x3600x1024xf32) <- (1x3600x4x256xf32, 3xi64) - reshape_3 = paddle._C_ops.reshape(transpose_4, full_int_array_6) + # pd_op.multiply: (1x-1x-1xf32) <- (1x-1x-1xf32, 1x-1x-1xf32) + multiply_0 = paddle._C_ops.multiply(pow_0, pow_1) + del pow_0, pow_1 - # pd_op.matmul: (1x3600x1024xf32) <- (1x3600x1024xf32, 1024x1024xf32) - matmul_7 = paddle._C_ops.matmul(reshape_3, parameter_364, False, False) - del parameter_364 + # pd_op.multiply: (1x-1x-1xf32) <- (1x-1x-1xf32, 1x-1x1xf32) + multiply_1 = paddle._C_ops.multiply(multiply_0, data_7) + del multiply_0 - # pd_op.add: (1x3600x1024xf32) <- (1x3600x1024xf32, 1024xf32) - add_4 = paddle._C_ops.add(matmul_7, parameter_363) - del parameter_363 + # pd_op.scale: (-1x1xf32) <- (-1x1xf32, 1xf32) + scale_1 = paddle._C_ops.scale(data_4, full_3, float("0"), True) + del data_4, full_3 - # pd_op.dropout: (1x3600x1024xf32, 1x3600x1024xui8) <- (1x3600x1024xf32, None, 1xf32) - dropout_2, dropout_3 = (lambda x, f: f(x))( - paddle._C_ops.dropout( - add_4, None, full_9, False, "upscale_in_train", 0, False - ), - lambda out: out if isinstance(out, (list, tuple)) else (out, None), - ) - del add_4 + # pd_op.full_int_array: (2xi64) <- () + full_int_array_5 = [0, 1] - # pd_op.add: (1x3600x1024xf32) <- (1x3600x1024xf32, 1x3600x1024xf32) - add_5 = paddle._C_ops.add(transpose_0, dropout_2) + # pd_op.unsqueeze: (1x1x-1x2xf32) <- (-1x2xf32, 2xi64) + unsqueeze_3 = paddle._C_ops.unsqueeze(data_3, full_int_array_5) + del data_3 - # pd_op.layer_norm: (1x3600x1024xf32, 1x3600xf32, 1x3600xf32) <- (1x3600x1024xf32, 1024xf32, 1024xf32) - layer_norm_0, layer_norm_1, layer_norm_2 = (lambda x, f: f(x))( - paddle._C_ops.layer_norm( - add_5, parameter_362, parameter_361, float("1e-05"), 2 - ), - lambda out: out if isinstance(out, (list, tuple)) else (out, None, None), + # pd_op.full: (1xi32) <- () + full_7 = paddle._C_ops.full( + [1], float("3"), paddle.int32, paddle.core.CPUPlace() ) - del parameter_361, parameter_362 - - # pd_op.matmul: (1x3600x2048xf32) <- (1x3600x1024xf32, 1024x2048xf32) - matmul_8 = paddle._C_ops.matmul(layer_norm_0, parameter_360, False, False) - del parameter_360 - - # pd_op.add: (1x3600x2048xf32) <- (1x3600x2048xf32, 2048xf32) - add_6 = paddle._C_ops.add(matmul_8, parameter_359) - del parameter_359 - - # pd_op.gelu: (1x3600x2048xf32) <- (1x3600x2048xf32) - gelu_0 = paddle._C_ops.gelu(add_6, False) - # pd_op.dropout: (1x3600x2048xf32, 1x3600x2048xui8) <- (1x3600x2048xf32, None, 1xf32) - dropout_4, dropout_5 = (lambda x, f: f(x))( - paddle._C_ops.dropout( - gelu_0, None, full_9, False, "upscale_in_train", 0, False - ), - lambda out: out if isinstance(out, (list, tuple)) else (out, None), - ) - del gelu_0 + # pd_op.split_with_num: ([1x1x-1x1xf32, 1x1x-1x1xf32]) <- (1x1x-1x2xf32, 1xi32) + split_with_num_0 = paddle._C_ops.split_with_num(unsqueeze_3, 2, full_7) + del unsqueeze_3 - # pd_op.matmul: (1x3600x1024xf32) <- (1x3600x2048xf32, 2048x1024xf32) - matmul_9 = paddle._C_ops.matmul(dropout_4, parameter_358, False, False) - del parameter_358 + # builtin.split: (1x1x-1x1xf32, 1x1x-1x1xf32) <- ([1x1x-1x1xf32, 1x1x-1x1xf32]) + ( + split_0, + split_1, + ) = split_with_num_0 + del split_with_num_0 - # pd_op.add: (1x3600x1024xf32) <- (1x3600x1024xf32, 1024xf32) - add_7 = paddle._C_ops.add(matmul_9, parameter_357) - del parameter_357 + # pd_op.split_with_num: ([1x-1x1x1xf32, 1x-1x1x1xf32, 1x-1x1x1xf32, 1x-1x1x1xf32]) <- (1x-1x1x4xf32, 1xi32) + split_with_num_1 = paddle._C_ops.split_with_num(unsqueeze_0, 4, full_7) + del full_7, unsqueeze_0 - # pd_op.dropout: (1x3600x1024xf32, 1x3600x1024xui8) <- (1x3600x1024xf32, None, 1xf32) - dropout_6, dropout_7 = (lambda x, f: f(x))( - paddle._C_ops.dropout( - add_7, None, full_9, False, "upscale_in_train", 0, False - ), - lambda out: out if isinstance(out, (list, tuple)) else (out, None), - ) - del add_7 + # builtin.split: (1x-1x1x1xf32, 1x-1x1x1xf32, 1x-1x1x1xf32, 1x-1x1x1xf32) <- ([1x-1x1x1xf32, 1x-1x1x1xf32, 1x-1x1x1xf32, 1x-1x1x1xf32]) + ( + split_2, + split_3, + split_4, + split_5, + ) = split_with_num_1 + del split_with_num_1 - # pd_op.add: (1x3600x1024xf32) <- (1x3600x1024xf32, 1x3600x1024xf32) - add_8 = paddle._C_ops.add(layer_norm_0, dropout_6) + # pd_op.subtract: (1x-1x-1x1xf32) <- (1x1x-1x1xf32, 1x-1x1x1xf32) + subtract_4 = paddle._C_ops.subtract(split_0, split_2) - # pd_op.layer_norm: (1x3600x1024xf32, 1x3600xf32, 1x3600xf32) <- (1x3600x1024xf32, 1024xf32, 1024xf32) - layer_norm_3, layer_norm_4, layer_norm_5 = (lambda x, f: f(x))( - paddle._C_ops.layer_norm( - add_8, parameter_356, parameter_355, float("1e-05"), 2 - ), - lambda out: out if isinstance(out, (list, tuple)) else (out, None, None), - ) - del parameter_355, parameter_356 + # pd_op.subtract: (1x-1x-1x1xf32) <- (1x1x-1x1xf32, 1x-1x1x1xf32) + subtract_5 = paddle._C_ops.subtract(split_1, split_3) - # pd_op.add: (1x3600x1024xf32) <- (1x3600x1024xf32, 1x3600x1024xf32) - add_9 = paddle._C_ops.add(layer_norm_3, unsqueeze_3) + # pd_op.subtract: (1x-1x-1x1xf32) <- (1x-1x1x1xf32, 1x1x-1x1xf32) + subtract_6 = paddle._C_ops.subtract(split_4, split_0) - # pd_op.slice: (1024x1024xf32) <- (1024x3072xf32, 1xi64, 1xi64) - slice_6 = paddle._C_ops.slice( - data_2, [1], full_int_array_1, full_int_array_2, [1], [] - ) + # pd_op.subtract: (1x-1x-1x1xf32) <- (1x-1x1x1xf32, 1x1x-1x1xf32) + subtract_7 = paddle._C_ops.subtract(split_5, split_1) - # pd_op.slice: (1024xf32) <- (3072xf32, 1xi64, 1xi64) - slice_7 = paddle._C_ops.slice( - data_3, [0], full_int_array_1, full_int_array_2, [1], [] + # pd_op.full: (1xi32) <- () + full_8 = paddle._C_ops.full( + [1], float("-1"), paddle.int32, paddle.core.CPUPlace() ) - # pd_op.matmul: (1x3600x1024xf32) <- (1x3600x1024xf32, 1024x1024xf32) - matmul_10 = paddle._C_ops.matmul(add_9, slice_6, False, False) + # builtin.combine: ([1x-1x-1x1xf32, 1x-1x-1x1xf32, 1x-1x-1x1xf32, 1x-1x-1x1xf32]) <- (1x-1x-1x1xf32, 1x-1x-1x1xf32, 1x-1x-1x1xf32, 1x-1x-1x1xf32) + combine_2 = [subtract_4, subtract_5, subtract_6, subtract_7] + del subtract_4, subtract_5, subtract_6, subtract_7 - # pd_op.add: (1x3600x1024xf32) <- (1x3600x1024xf32, 1024xf32) - add_10 = paddle._C_ops.add(matmul_10, slice_7) - - # pd_op.reshape: (1x3600x4x256xf32) <- (1x3600x1024xf32, 4xi64) - reshape_4 = paddle._C_ops.reshape(add_10, full_int_array_3) - - # pd_op.transpose: (1x4x3600x256xf32) <- (1x3600x4x256xf32) - transpose_5 = paddle._C_ops.transpose(reshape_4, [0, 2, 1, 3]) - del reshape_4 + # pd_op.concat: (1x-1x-1x4xf32) <- ([1x-1x-1x1xf32, 1x-1x-1x1xf32, 1x-1x-1x1xf32, 1x-1x-1x1xf32], 1xi32) + concat_0 = paddle._C_ops.concat(combine_2, full_8) + del combine_2 - # pd_op.slice: (1024x1024xf32) <- (1024x3072xf32, 1xi64, 1xi64) - slice_8 = paddle._C_ops.slice( - data_2, [1], full_int_array_2, full_int_array_4, [1], [] - ) + # pd_op.min: (1x-1x-1xf32) <- (1x-1x-1x4xf32, 1xi64) + min_0 = paddle._C_ops.min(concat_0, full_int_array_4, False) + del concat_0 - # pd_op.slice: (1024xf32) <- (3072xf32, 1xi64, 1xi64) - slice_9 = paddle._C_ops.slice( - data_3, [0], full_int_array_2, full_int_array_4, [1], [] + # pd_op.full: (xf32) <- () + full_9 = paddle._C_ops.full( + [], + float("1e-09"), + paddle.float32, + paddle.framework._current_expected_place(), ) - # pd_op.matmul: (1x3600x1024xf32) <- (1x3600x1024xf32, 1024x1024xf32) - matmul_11 = paddle._C_ops.matmul(add_9, slice_8, False, False) + # pd_op.greater_than: (1x-1x-1xb) <- (1x-1x-1xf32, xf32) + greater_than_1 = paddle._C_ops.greater_than(min_0, full_9) + del min_0 - # pd_op.add: (1x3600x1024xf32) <- (1x3600x1024xf32, 1024xf32) - add_11 = paddle._C_ops.add(matmul_11, slice_9) + # pd_op.unsqueeze: (1x1x-1x1xf32) <- (-1x1xf32, 2xi64) + unsqueeze_4 = paddle._C_ops.unsqueeze(scale_1, full_int_array_5) + del full_int_array_5, scale_1 - # pd_op.reshape: (1x3600x4x256xf32) <- (1x3600x1024xf32, 4xi64) - reshape_5 = paddle._C_ops.reshape(add_11, full_int_array_3) + # pd_op.add: (1x-1x1x1xf32) <- (1x-1x1x1xf32, 1x-1x1x1xf32) + add_1 = paddle._C_ops.add(split_2, split_4) + del split_2, split_4 - # pd_op.transpose: (1x4x3600x256xf32) <- (1x3600x4x256xf32) - transpose_6 = paddle._C_ops.transpose(reshape_5, [0, 2, 1, 3]) - del reshape_5 - - # pd_op.slice: (1024x1024xf32) <- (1024x3072xf32, 1xi64, 1xi64) - slice_10 = paddle._C_ops.slice( - data_2, [1], full_int_array_4, full_int_array_5, [1], [] + # pd_op.full: (1xf32) <- () + full_10 = paddle._C_ops.full( + [1], float("0.5"), paddle.float32, paddle.core.CPUPlace() ) - del data_2 - # pd_op.slice: (1024xf32) <- (3072xf32, 1xi64, 1xi64) - slice_11 = paddle._C_ops.slice( - data_3, [0], full_int_array_4, full_int_array_5, [1], [] - ) - del data_3 + # pd_op.scale: (1x-1x1x1xf32) <- (1x-1x1x1xf32, 1xf32) + scale_2 = paddle._C_ops.scale(add_1, full_10, float("0"), True) + del add_1 - # pd_op.matmul: (1x3600x1024xf32) <- (1x3600x1024xf32, 1024x1024xf32) - matmul_12 = paddle._C_ops.matmul(layer_norm_3, slice_10, False, False) + # pd_op.add: (1x-1x1x1xf32) <- (1x-1x1x1xf32, 1x-1x1x1xf32) + add_2 = paddle._C_ops.add(split_3, split_5) + del split_3, split_5 - # pd_op.add: (1x3600x1024xf32) <- (1x3600x1024xf32, 1024xf32) - add_12 = paddle._C_ops.add(matmul_12, slice_11) + # pd_op.scale: (1x-1x1x1xf32) <- (1x-1x1x1xf32, 1xf32) + scale_3 = paddle._C_ops.scale(add_2, full_10, float("0"), True) + del add_2, full_10 - # pd_op.reshape: (1x3600x4x256xf32) <- (1x3600x1024xf32, 4xi64) - reshape_6 = paddle._C_ops.reshape(add_12, full_int_array_3) + # pd_op.subtract: (1x-1x-1x1xf32) <- (1x-1x1x1xf32, 1x1x-1x1xf32) + subtract_8 = paddle._C_ops.subtract(scale_2, unsqueeze_4) - # pd_op.transpose: (1x4x3600x256xf32) <- (1x3600x4x256xf32) - transpose_7 = paddle._C_ops.transpose(reshape_6, [0, 2, 1, 3]) - del reshape_6 + # pd_op.subtract: (1x-1x-1x1xf32) <- (1x1x-1x1xf32, 1x-1x-1x1xf32) + subtract_9 = paddle._C_ops.subtract(split_0, subtract_8) + del subtract_8 - # pd_op.matmul: (1x4x3600x3600xf32) <- (1x4x3600x256xf32, 1x4x3600x256xf32) - matmul_13 = paddle._C_ops.matmul(transpose_5, transpose_6, False, True) + # pd_op.subtract: (1x-1x-1x1xf32) <- (1x-1x1x1xf32, 1x1x-1x1xf32) + subtract_10 = paddle._C_ops.subtract(scale_3, unsqueeze_4) - # pd_op.scale: (1x4x3600x3600xf32) <- (1x4x3600x3600xf32, 1xf32) - scale_2 = paddle._C_ops.scale(matmul_13, full_8, float("0"), True) - del matmul_13 + # pd_op.subtract: (1x-1x-1x1xf32) <- (1x1x-1x1xf32, 1x-1x-1x1xf32) + subtract_11 = paddle._C_ops.subtract(split_1, subtract_10) + del subtract_10 - # pd_op.softmax: (1x4x3600x3600xf32) <- (1x4x3600x3600xf32) - softmax_1 = paddle._C_ops.softmax(scale_2, -1) + # pd_op.add: (1x-1x-1x1xf32) <- (1x-1x1x1xf32, 1x1x-1x1xf32) + add_3 = paddle._C_ops.add(scale_2, unsqueeze_4) del scale_2 - # pd_op.dropout: (1x4x3600x3600xf32, 1x4x3600x3600xui8) <- (1x4x3600x3600xf32, None, 1xf32) - dropout_8, dropout_9 = (lambda x, f: f(x))( - paddle._C_ops.dropout( - softmax_1, None, full_9, False, "upscale_in_train", 0, False - ), - lambda out: out if isinstance(out, (list, tuple)) else (out, None), - ) - - # pd_op.matmul: (1x4x3600x256xf32) <- (1x4x3600x3600xf32, 1x4x3600x256xf32) - matmul_14 = paddle._C_ops.matmul(dropout_8, transpose_7, False, False) - - # pd_op.transpose: (1x3600x4x256xf32) <- (1x4x3600x256xf32) - transpose_8 = paddle._C_ops.transpose(matmul_14, [0, 2, 1, 3]) - del matmul_14 - - # pd_op.reshape: (1x3600x1024xf32) <- (1x3600x4x256xf32, 3xi64) - reshape_7 = paddle._C_ops.reshape(transpose_8, full_int_array_6) - - # pd_op.matmul: (1x3600x1024xf32) <- (1x3600x1024xf32, 1024x1024xf32) - matmul_15 = paddle._C_ops.matmul(reshape_7, parameter_354, False, False) - del parameter_354 - - # pd_op.add: (1x3600x1024xf32) <- (1x3600x1024xf32, 1024xf32) - add_13 = paddle._C_ops.add(matmul_15, parameter_353) - del parameter_353 + # pd_op.subtract: (1x-1x-1x1xf32) <- (1x-1x-1x1xf32, 1x1x-1x1xf32) + subtract_12 = paddle._C_ops.subtract(add_3, split_0) + del add_3, split_0 - # pd_op.dropout: (1x3600x1024xf32, 1x3600x1024xui8) <- (1x3600x1024xf32, None, 1xf32) - dropout_10, dropout_11 = (lambda x, f: f(x))( - paddle._C_ops.dropout( - add_13, None, full_9, False, "upscale_in_train", 0, False - ), - lambda out: out if isinstance(out, (list, tuple)) else (out, None), - ) - del add_13 - - # pd_op.add: (1x3600x1024xf32) <- (1x3600x1024xf32, 1x3600x1024xf32) - add_14 = paddle._C_ops.add(layer_norm_3, dropout_10) - - # pd_op.layer_norm: (1x3600x1024xf32, 1x3600xf32, 1x3600xf32) <- (1x3600x1024xf32, 1024xf32, 1024xf32) - layer_norm_6, layer_norm_7, layer_norm_8 = (lambda x, f: f(x))( - paddle._C_ops.layer_norm( - add_14, parameter_352, parameter_351, float("1e-05"), 2 - ), - lambda out: out if isinstance(out, (list, tuple)) else (out, None, None), - ) - del parameter_351, parameter_352 - - # pd_op.matmul: (1x3600x2048xf32) <- (1x3600x1024xf32, 1024x2048xf32) - matmul_16 = paddle._C_ops.matmul(layer_norm_6, parameter_350, False, False) - del parameter_350 + # pd_op.add: (1x-1x-1x1xf32) <- (1x-1x1x1xf32, 1x1x-1x1xf32) + add_4 = paddle._C_ops.add(scale_3, unsqueeze_4) + del scale_3, unsqueeze_4 - # pd_op.add: (1x3600x2048xf32) <- (1x3600x2048xf32, 2048xf32) - add_15 = paddle._C_ops.add(matmul_16, parameter_349) - del parameter_349 + # pd_op.subtract: (1x-1x-1x1xf32) <- (1x-1x-1x1xf32, 1x1x-1x1xf32) + subtract_13 = paddle._C_ops.subtract(add_4, split_1) + del add_4, split_1 - # pd_op.gelu: (1x3600x2048xf32) <- (1x3600x2048xf32) - gelu_1 = paddle._C_ops.gelu(add_15, False) + # builtin.combine: ([1x-1x-1x1xf32, 1x-1x-1x1xf32, 1x-1x-1x1xf32, 1x-1x-1x1xf32]) <- (1x-1x-1x1xf32, 1x-1x-1x1xf32, 1x-1x-1x1xf32, 1x-1x-1x1xf32) + combine_3 = [subtract_9, subtract_11, subtract_12, subtract_13] + del subtract_11, subtract_12, subtract_13, subtract_9 - # pd_op.dropout: (1x3600x2048xf32, 1x3600x2048xui8) <- (1x3600x2048xf32, None, 1xf32) - dropout_12, dropout_13 = (lambda x, f: f(x))( - paddle._C_ops.dropout( - gelu_1, None, full_9, False, "upscale_in_train", 0, False - ), - lambda out: out if isinstance(out, (list, tuple)) else (out, None), - ) - del gelu_1 + # pd_op.concat: (1x-1x-1x4xf32) <- ([1x-1x-1x1xf32, 1x-1x-1x1xf32, 1x-1x-1x1xf32, 1x-1x-1x1xf32], 1xi32) + concat_1 = paddle._C_ops.concat(combine_3, full_8) + del combine_3, full_8 - # pd_op.matmul: (1x3600x1024xf32) <- (1x3600x2048xf32, 2048x1024xf32) - matmul_17 = paddle._C_ops.matmul(dropout_12, parameter_348, False, False) - del parameter_348 + # pd_op.min: (1x-1x-1xf32) <- (1x-1x-1x4xf32, 1xi64) + min_1 = paddle._C_ops.min(concat_1, full_int_array_4, False) + del concat_1 - # pd_op.add: (1x3600x1024xf32) <- (1x3600x1024xf32, 1024xf32) - add_16 = paddle._C_ops.add(matmul_17, parameter_347) - del parameter_347 + # pd_op.greater_than: (1x-1x-1xb) <- (1x-1x-1xf32, xf32) + greater_than_2 = paddle._C_ops.greater_than(min_1, full_9) + del full_9, min_1 - # pd_op.dropout: (1x3600x1024xf32, 1x3600x1024xui8) <- (1x3600x1024xf32, None, 1xf32) - dropout_14, dropout_15 = (lambda x, f: f(x))( - paddle._C_ops.dropout( - add_16, None, full_9, False, "upscale_in_train", 0, False - ), - lambda out: out if isinstance(out, (list, tuple)) else (out, None), - ) - del add_16 + # pd_op.cast: (1x-1x-1xf32) <- (1x-1x-1xb) + cast_2 = paddle._C_ops.cast(greater_than_1, paddle.float32) + del greater_than_1 - # pd_op.add: (1x3600x1024xf32) <- (1x3600x1024xf32, 1x3600x1024xf32) - add_17 = paddle._C_ops.add(layer_norm_6, dropout_14) + # pd_op.cast: (1x-1x-1xf32) <- (1x-1x-1xb) + cast_3 = paddle._C_ops.cast(greater_than_2, paddle.float32) + del greater_than_2 - # pd_op.layer_norm: (1x3600x1024xf32, 1x3600xf32, 1x3600xf32) <- (1x3600x1024xf32, 1024xf32, 1024xf32) - layer_norm_9, layer_norm_10, layer_norm_11 = (lambda x, f: f(x))( - paddle._C_ops.layer_norm( - add_17, parameter_346, parameter_345, float("1e-05"), 2 - ), - lambda out: out if isinstance(out, (list, tuple)) else (out, None, None), - ) - del parameter_345, parameter_346 + # pd_op.multiply: (1x-1x-1xf32) <- (1x-1x-1xf32, 1x-1x1xf32) + multiply_2 = paddle._C_ops.multiply(cast_2, data_7) + del cast_2 - # pd_op.add: (1x3600x1024xf32) <- (1x3600x1024xf32, 1x3600x1024xf32) - add_18 = paddle._C_ops.add(layer_norm_9, unsqueeze_3) + # pd_op.multiply: (1x-1x-1xf32) <- (1x-1x-1xf32, 1x-1x1xf32) + multiply_3 = paddle._C_ops.multiply(cast_3, data_7) + del cast_3 - # pd_op.slice: (1024x1024xf32) <- (1024x3072xf32, 1xi64, 1xi64) - slice_12 = paddle._C_ops.slice( - data_4, [1], full_int_array_1, full_int_array_2, [1], [] - ) + # pd_op.sum: (1x-1x1xf32) <- (1x-1x-1xf32, 1xi64) + sum_0 = paddle._C_ops.sum(multiply_2, full_int_array_4, None, True) + del full_int_array_4 - # pd_op.slice: (1024xf32) <- (3072xf32, 1xi64, 1xi64) - slice_13 = paddle._C_ops.slice( - data_5, [0], full_int_array_1, full_int_array_2, [1], [] + # pd_op.full: (xf32) <- () + full_11 = paddle._C_ops.full( + [], float("0"), paddle.float32, paddle.framework._current_expected_place() ) - # pd_op.matmul: (1x3600x1024xf32) <- (1x3600x1024xf32, 1024x1024xf32) - matmul_18 = paddle._C_ops.matmul(add_18, slice_12, False, False) + # pd_op.equal: (1x-1x1xb) <- (1x-1x1xf32, xf32) + equal_2 = paddle._C_ops.equal(sum_0, full_11) + del sum_0 - # pd_op.add: (1x3600x1024xf32) <- (1x3600x1024xf32, 1024xf32) - add_19 = paddle._C_ops.add(matmul_18, slice_13) - - # pd_op.reshape: (1x3600x4x256xf32) <- (1x3600x1024xf32, 4xi64) - reshape_8 = paddle._C_ops.reshape(add_19, full_int_array_3) - - # pd_op.transpose: (1x4x3600x256xf32) <- (1x3600x4x256xf32) - transpose_9 = paddle._C_ops.transpose(reshape_8, [0, 2, 1, 3]) - del reshape_8 - - # pd_op.slice: (1024x1024xf32) <- (1024x3072xf32, 1xi64, 1xi64) - slice_14 = paddle._C_ops.slice( - data_4, [1], full_int_array_2, full_int_array_4, [1], [] - ) + # pd_op.add: (1x-1x-1xf32) <- (1x-1x-1xf32, 1x-1x-1xf32) + add_5 = paddle._C_ops.add(multiply_1, multiply_3) - # pd_op.slice: (1024xf32) <- (3072xf32, 1xi64, 1xi64) - slice_15 = paddle._C_ops.slice( - data_5, [0], full_int_array_2, full_int_array_4, [1], [] + # pd_op.full_like: (1x-1x-1xf32) <- (1x-1x-1xf32, 1xf32) + full_like_0 = paddle._C_ops.full_like( + add_5, full_1, paddle.float32, paddle.framework._current_expected_place() ) - # pd_op.matmul: (1x3600x1024xf32) <- (1x3600x1024xf32, 1024x1024xf32) - matmul_19 = paddle._C_ops.matmul(add_18, slice_14, False, False) - - # pd_op.add: (1x3600x1024xf32) <- (1x3600x1024xf32, 1024xf32) - add_20 = paddle._C_ops.add(matmul_19, slice_15) - - # pd_op.reshape: (1x3600x4x256xf32) <- (1x3600x1024xf32, 4xi64) - reshape_9 = paddle._C_ops.reshape(add_20, full_int_array_3) - - # pd_op.transpose: (1x4x3600x256xf32) <- (1x3600x4x256xf32) - transpose_10 = paddle._C_ops.transpose(reshape_9, [0, 2, 1, 3]) - del reshape_9 - - # pd_op.slice: (1024x1024xf32) <- (1024x3072xf32, 1xi64, 1xi64) - slice_16 = paddle._C_ops.slice( - data_4, [1], full_int_array_4, full_int_array_5, [1], [] + # pd_op.full_like: (1x-1x-1xf32) <- (1x-1x-1xf32, 1xf32) + full_like_1 = paddle._C_ops.full_like( + multiply_1, + full_1, + paddle.float32, + paddle.framework._current_expected_place(), ) - del data_4 - # pd_op.slice: (1024xf32) <- (3072xf32, 1xi64, 1xi64) - slice_17 = paddle._C_ops.slice( - data_5, [0], full_int_array_4, full_int_array_5, [1], [] + # pd_op.full_like: (1x-1x1xb) <- (1x-1x1xb, 1xf32) + full_like_2 = paddle._C_ops.full_like( + equal_2, full_1, paddle.bool, paddle.framework._current_expected_place() ) - del data_5 - - # pd_op.matmul: (1x3600x1024xf32) <- (1x3600x1024xf32, 1024x1024xf32) - matmul_20 = paddle._C_ops.matmul(layer_norm_9, slice_16, False, False) - - # pd_op.add: (1x3600x1024xf32) <- (1x3600x1024xf32, 1024xf32) - add_21 = paddle._C_ops.add(matmul_20, slice_17) - - # pd_op.reshape: (1x3600x4x256xf32) <- (1x3600x1024xf32, 4xi64) - reshape_10 = paddle._C_ops.reshape(add_21, full_int_array_3) + del full_1 - # pd_op.transpose: (1x4x3600x256xf32) <- (1x3600x4x256xf32) - transpose_11 = paddle._C_ops.transpose(reshape_10, [0, 2, 1, 3]) - del reshape_10 + # pd_op.cast: (1x-1x1xf32) <- (1x-1x1xb) + cast_4 = paddle._C_ops.cast(full_like_2, paddle.float32) + del full_like_2 - # pd_op.matmul: (1x4x3600x3600xf32) <- (1x4x3600x256xf32, 1x4x3600x256xf32) - matmul_21 = paddle._C_ops.matmul(transpose_9, transpose_10, False, True) + # pd_op.cast: (1x-1x1xf32) <- (1x-1x1xb) + cast_5 = paddle._C_ops.cast(equal_2, paddle.float32) + del equal_2 - # pd_op.scale: (1x4x3600x3600xf32) <- (1x4x3600x3600xf32, 1xf32) - scale_3 = paddle._C_ops.scale(matmul_21, full_8, float("0"), True) - del matmul_21 + # pd_op.add: (1x-1x-1xf32) <- (1x-1x-1xf32, 1x-1x-1xf32) + add_6 = paddle._C_ops.add(full_like_0, full_like_1) + del full_like_0, full_like_1 - # pd_op.softmax: (1x4x3600x3600xf32) <- (1x4x3600x3600xf32) - softmax_2 = paddle._C_ops.softmax(scale_3, -1) - del scale_3 + # pd_op.add: (1x-1x-1xf32) <- (1x-1x-1xf32, 1x-1x1xf32) + add_7 = paddle._C_ops.add(add_6, cast_4) + del add_6, cast_4 - # pd_op.dropout: (1x4x3600x3600xf32, 1x4x3600x3600xui8) <- (1x4x3600x3600xf32, None, 1xf32) - dropout_16, dropout_17 = (lambda x, f: f(x))( - paddle._C_ops.dropout( - softmax_2, None, full_9, False, "upscale_in_train", 0, False - ), - lambda out: out if isinstance(out, (list, tuple)) else (out, None), - ) + # pd_op.add: (1x-1x-1xf32) <- (1x-1x-1xf32, 1x-1x-1xf32) + add_8 = paddle._C_ops.add(add_5, add_7) + del add_5 - # pd_op.matmul: (1x4x3600x256xf32) <- (1x4x3600x3600xf32, 1x4x3600x256xf32) - matmul_22 = paddle._C_ops.matmul(dropout_16, transpose_11, False, False) + # pd_op.add: (1x-1x-1xf32) <- (1x-1x-1xf32, 1x-1x-1xf32) + add_9 = paddle._C_ops.add(multiply_1, add_7) - # pd_op.transpose: (1x3600x4x256xf32) <- (1x4x3600x256xf32) - transpose_12 = paddle._C_ops.transpose(matmul_22, [0, 2, 1, 3]) - del matmul_22 + # pd_op.add: (1x-1x-1xf32) <- (1x-1x1xf32, 1x-1x-1xf32) + add_10 = paddle._C_ops.add(cast_5, add_7) + del add_7, cast_5 - # pd_op.reshape: (1x3600x1024xf32) <- (1x3600x4x256xf32, 3xi64) - reshape_11 = paddle._C_ops.reshape(transpose_12, full_int_array_6) + # pd_op.cast: (1x-1x-1xb) <- (1x-1x-1xf32) + cast_6 = paddle._C_ops.cast(add_10, paddle.bool) + del add_10 - # pd_op.matmul: (1x3600x1024xf32) <- (1x3600x1024xf32, 1024x1024xf32) - matmul_23 = paddle._C_ops.matmul(reshape_11, parameter_344, False, False) - del parameter_344 + # pd_op.where: (1x-1x-1xf32) <- (1x-1x-1xb, 1x-1x-1xf32, 1x-1x-1xf32) + where_0 = paddle._C_ops.where(cast_6, add_8, add_9) + del add_8, add_9, cast_6 - # pd_op.add: (1x3600x1024xf32) <- (1x3600x1024xf32, 1024xf32) - add_22 = paddle._C_ops.add(matmul_23, parameter_343) - del parameter_343 + # pd_op.shape64: (3xi64) <- (1x-1x-1xf32) + shape64_0 = paddle._C_ops.shape64(where_0) - # pd_op.dropout: (1x3600x1024xf32, 1x3600x1024xui8) <- (1x3600x1024xf32, None, 1xf32) - dropout_18, dropout_19 = (lambda x, f: f(x))( - paddle._C_ops.dropout( - add_22, None, full_9, False, "upscale_in_train", 0, False - ), - lambda out: out if isinstance(out, (list, tuple)) else (out, None), + # pd_op.slice: (xi64) <- (3xi64, 1xi64, 1xi64) + slice_4 = paddle._C_ops.slice( + shape64_0, [0], full_int_array_1, full_int_array_0, [1], [0] ) - del add_22 + del full_int_array_1 - # pd_op.add: (1x3600x1024xf32) <- (1x3600x1024xf32, 1x3600x1024xf32) - add_23 = paddle._C_ops.add(layer_norm_9, dropout_18) + # pd_op.full_int_array: (1xi64) <- () + full_int_array_6 = [3] - # pd_op.layer_norm: (1x3600x1024xf32, 1x3600xf32, 1x3600xf32) <- (1x3600x1024xf32, 1024xf32, 1024xf32) - layer_norm_12, layer_norm_13, layer_norm_14 = (lambda x, f: f(x))( - paddle._C_ops.layer_norm( - add_23, parameter_342, parameter_341, float("1e-05"), 2 - ), - lambda out: out if isinstance(out, (list, tuple)) else (out, None, None), + # pd_op.slice: (xi64) <- (3xi64, 1xi64, 1xi64) + slice_5 = paddle._C_ops.slice( + shape64_0, [0], full_int_array_0, full_int_array_6, [1], [0] ) - del parameter_341, parameter_342 - - # pd_op.matmul: (1x3600x2048xf32) <- (1x3600x1024xf32, 1024x2048xf32) - matmul_24 = paddle._C_ops.matmul(layer_norm_12, parameter_340, False, False) - del parameter_340 - - # pd_op.add: (1x3600x2048xf32) <- (1x3600x2048xf32, 2048xf32) - add_24 = paddle._C_ops.add(matmul_24, parameter_339) - del parameter_339 + del full_int_array_0, full_int_array_6, shape64_0 - # pd_op.gelu: (1x3600x2048xf32) <- (1x3600x2048xf32) - gelu_2 = paddle._C_ops.gelu(add_24, False) - - # pd_op.dropout: (1x3600x2048xf32, 1x3600x2048xui8) <- (1x3600x2048xf32, None, 1xf32) - dropout_20, dropout_21 = (lambda x, f: f(x))( - paddle._C_ops.dropout( - gelu_2, None, full_9, False, "upscale_in_train", 0, False - ), - lambda out: out if isinstance(out, (list, tuple)) else (out, None), + # pd_op.full: (1xi32) <- () + full_12 = paddle._C_ops.full( + [1], float("13"), paddle.int32, paddle.core.CPUPlace() ) - del gelu_2 - - # pd_op.matmul: (1x3600x1024xf32) <- (1x3600x2048xf32, 2048x1024xf32) - matmul_25 = paddle._C_ops.matmul(dropout_20, parameter_338, False, False) - del parameter_338 - # pd_op.add: (1x3600x1024xf32) <- (1x3600x1024xf32, 1024xf32) - add_25 = paddle._C_ops.add(matmul_25, parameter_337) - del parameter_337 - - # pd_op.dropout: (1x3600x1024xf32, 1x3600x1024xui8) <- (1x3600x1024xf32, None, 1xf32) - dropout_22, dropout_23 = (lambda x, f: f(x))( - paddle._C_ops.dropout( - add_25, None, full_9, False, "upscale_in_train", 0, False - ), + # pd_op.topk: (1x-1x13xf32, 1x-1x13xi64) <- (1x-1x-1xf32, 1xi32) + topk_0, topk_1 = (lambda x, f: f(x))( + paddle._C_ops.topk(where_0, full_12, -1, True, True), lambda out: out if isinstance(out, (list, tuple)) else (out, None), ) - del add_25 - - # pd_op.add: (1x3600x1024xf32) <- (1x3600x1024xf32, 1x3600x1024xf32) - add_26 = paddle._C_ops.add(layer_norm_12, dropout_22) - - # pd_op.layer_norm: (1x3600x1024xf32, 1x3600xf32, 1x3600xf32) <- (1x3600x1024xf32, 1024xf32, 1024xf32) - layer_norm_15, layer_norm_16, layer_norm_17 = (lambda x, f: f(x))( - paddle._C_ops.layer_norm( - add_26, parameter_336, parameter_335, float("1e-05"), 2 - ), - lambda out: out if isinstance(out, (list, tuple)) else (out, None, None), - ) - del parameter_335, parameter_336 + del full_12, where_0 - # pd_op.add: (1x3600x1024xf32) <- (1x3600x1024xf32, 1x3600x1024xf32) - add_27 = paddle._C_ops.add(layer_norm_15, unsqueeze_3) - - # pd_op.slice: (1024x1024xf32) <- (1024x3072xf32, 1xi64, 1xi64) - slice_18 = paddle._C_ops.slice( - data_6, [1], full_int_array_1, full_int_array_2, [1], [] - ) - - # pd_op.slice: (1024xf32) <- (3072xf32, 1xi64, 1xi64) - slice_19 = paddle._C_ops.slice( - data_7, [0], full_int_array_1, full_int_array_2, [1], [] + # pd_op.one_hot: (1x-1x13x-1xf32) <- (1x-1x13xi64, xi64) + one_hot_0 = paddle._C_ops.one_hot( + topk_1 % paddle.cast(slice_5, topk_1.dtype), slice_5 ) - del full_int_array_1 + del slice_5, topk_1 - # pd_op.matmul: (1x3600x1024xf32) <- (1x3600x1024xf32, 1024x1024xf32) - matmul_26 = paddle._C_ops.matmul(add_27, slice_18, False, False) + # pd_op.full_int_array: (1xi64) <- () + full_int_array_7 = [-2] - # pd_op.add: (1x3600x1024xf32) <- (1x3600x1024xf32, 1024xf32) - add_28 = paddle._C_ops.add(matmul_26, slice_19) + # pd_op.sum: (1x-1x-1xf32) <- (1x-1x13x-1xf32, 1xi64) + sum_1 = paddle._C_ops.sum(one_hot_0, full_int_array_7, None, False) + del one_hot_0 - # pd_op.reshape: (1x3600x4x256xf32) <- (1x3600x1024xf32, 4xi64) - reshape_12 = paddle._C_ops.reshape(add_28, full_int_array_3) + # pd_op.multiply: (1x-1x-1xf32) <- (1x-1x-1xf32, 1x-1x1xf32) + multiply_4 = paddle._C_ops.multiply(sum_1, data_7) + del data_7, sum_1 - # pd_op.transpose: (1x4x3600x256xf32) <- (1x3600x4x256xf32) - transpose_13 = paddle._C_ops.transpose(reshape_12, [0, 2, 1, 3]) - del reshape_12 + # pd_op.greater_than: (1x-1x-1xb) <- (1x-1x-1xf32, xf32) + greater_than_3 = paddle._C_ops.greater_than(multiply_3, full_11) + del multiply_3 - # pd_op.slice: (1024x1024xf32) <- (1024x3072xf32, 1xi64, 1xi64) - slice_20 = paddle._C_ops.slice( - data_6, [1], full_int_array_2, full_int_array_4, [1], [] - ) + # pd_op.greater_than: (1x-1x-1xb) <- (1x-1x-1xf32, xf32) + greater_than_4 = paddle._C_ops.greater_than(multiply_2, full_11) + del full_11, multiply_2 - # pd_op.slice: (1024xf32) <- (3072xf32, 1xi64, 1xi64) - slice_21 = paddle._C_ops.slice( - data_7, [0], full_int_array_2, full_int_array_4, [1], [] - ) + # pd_op.bitwise_or: (1x-1x-1xb) <- (1x-1x-1xb, 1x-1x-1xb) + bitwise_or_0 = paddle._C_ops.bitwise_or(greater_than_3, greater_than_4) + del greater_than_3, greater_than_4 - # pd_op.matmul: (1x3600x1024xf32) <- (1x3600x1024xf32, 1024x1024xf32) - matmul_27 = paddle._C_ops.matmul(add_27, slice_20, False, False) + # pd_op.cast: (1x-1x-1xf32) <- (1x-1x-1xb) + cast_7 = paddle._C_ops.cast(bitwise_or_0, paddle.float32) + del bitwise_or_0 - # pd_op.add: (1x3600x1024xf32) <- (1x3600x1024xf32, 1024xf32) - add_29 = paddle._C_ops.add(matmul_27, slice_21) + # pd_op.multiply: (1x-1x-1xf32) <- (1x-1x-1xf32, 1x-1x-1xf32) + multiply_5 = paddle._C_ops.multiply(multiply_4, cast_7) + del cast_7, multiply_4 - # pd_op.reshape: (1x3600x4x256xf32) <- (1x3600x1024xf32, 4xi64) - reshape_13 = paddle._C_ops.reshape(add_29, full_int_array_3) + # pd_op.sum: (1x-1xf32) <- (1x-1x-1xf32, 1xi64) + sum_2 = paddle._C_ops.sum(multiply_5, full_int_array_7, None, False) + del full_int_array_7 - # pd_op.transpose: (1x4x3600x256xf32) <- (1x3600x4x256xf32) - transpose_14 = paddle._C_ops.transpose(reshape_13, [0, 2, 1, 3]) - del reshape_13 + # pd_op.full_int_array: (0xi64) <- () + full_int_array_8 = [] - # pd_op.slice: (1024x1024xf32) <- (1024x3072xf32, 1xi64, 1xi64) - slice_22 = paddle._C_ops.slice( - data_6, [1], full_int_array_4, full_int_array_5, [1], [] - ) - del data_6 + # pd_op.max: (xf32) <- (1x-1xf32, 0xi64) + max_0 = paddle._C_ops.max(sum_2, full_int_array_8, False) + del full_int_array_8 - # pd_op.slice: (1024xf32) <- (3072xf32, 1xi64, 1xi64) - slice_23 = paddle._C_ops.slice( - data_7, [0], full_int_array_4, full_int_array_5, [1], [] + # pd_op.full: (xf32) <- () + full_13 = paddle._C_ops.full( + [], float("1"), paddle.float32, paddle.framework._current_expected_place() ) - del data_7 - - # pd_op.matmul: (1x3600x1024xf32) <- (1x3600x1024xf32, 1024x1024xf32) - matmul_28 = paddle._C_ops.matmul(layer_norm_15, slice_22, False, False) - - # pd_op.add: (1x3600x1024xf32) <- (1x3600x1024xf32, 1024xf32) - add_30 = paddle._C_ops.add(matmul_28, slice_23) - # pd_op.reshape: (1x3600x4x256xf32) <- (1x3600x1024xf32, 4xi64) - reshape_14 = paddle._C_ops.reshape(add_30, full_int_array_3) - del full_int_array_3 - - # pd_op.transpose: (1x4x3600x256xf32) <- (1x3600x4x256xf32) - transpose_15 = paddle._C_ops.transpose(reshape_14, [0, 2, 1, 3]) - del reshape_14 - - # pd_op.matmul: (1x4x3600x3600xf32) <- (1x4x3600x256xf32, 1x4x3600x256xf32) - matmul_29 = paddle._C_ops.matmul(transpose_13, transpose_14, False, True) - - # pd_op.scale: (1x4x3600x3600xf32) <- (1x4x3600x3600xf32, 1xf32) - scale_4 = paddle._C_ops.scale(matmul_29, full_8, float("0"), True) - del matmul_29 - - # pd_op.softmax: (1x4x3600x3600xf32) <- (1x4x3600x3600xf32) - softmax_3 = paddle._C_ops.softmax(scale_4, -1) - del scale_4 - - # pd_op.dropout: (1x4x3600x3600xf32, 1x4x3600x3600xui8) <- (1x4x3600x3600xf32, None, 1xf32) - dropout_24, dropout_25 = (lambda x, f: f(x))( - paddle._C_ops.dropout( - softmax_3, None, full_9, False, "upscale_in_train", 0, False - ), - lambda out: out if isinstance(out, (list, tuple)) else (out, None), - ) - - # pd_op.matmul: (1x4x3600x256xf32) <- (1x4x3600x3600xf32, 1x4x3600x256xf32) - matmul_30 = paddle._C_ops.matmul(dropout_24, transpose_15, False, False) - - # pd_op.transpose: (1x3600x4x256xf32) <- (1x4x3600x256xf32) - transpose_16 = paddle._C_ops.transpose(matmul_30, [0, 2, 1, 3]) - del matmul_30 - - # pd_op.reshape: (1x3600x1024xf32) <- (1x3600x4x256xf32, 3xi64) - reshape_15 = paddle._C_ops.reshape(transpose_16, full_int_array_6) - del full_int_array_6 - - # pd_op.matmul: (1x3600x1024xf32) <- (1x3600x1024xf32, 1024x1024xf32) - matmul_31 = paddle._C_ops.matmul(reshape_15, parameter_334, False, False) - del parameter_334 - - # pd_op.add: (1x3600x1024xf32) <- (1x3600x1024xf32, 1024xf32) - add_31 = paddle._C_ops.add(matmul_31, parameter_333) - del parameter_333 - - # pd_op.dropout: (1x3600x1024xf32, 1x3600x1024xui8) <- (1x3600x1024xf32, None, 1xf32) - dropout_26, dropout_27 = (lambda x, f: f(x))( - paddle._C_ops.dropout( - add_31, None, full_9, False, "upscale_in_train", 0, False - ), - lambda out: out if isinstance(out, (list, tuple)) else (out, None), - ) - del add_31 - - # pd_op.add: (1x3600x1024xf32) <- (1x3600x1024xf32, 1x3600x1024xf32) - add_32 = paddle._C_ops.add(layer_norm_15, dropout_26) - - # pd_op.layer_norm: (1x3600x1024xf32, 1x3600xf32, 1x3600xf32) <- (1x3600x1024xf32, 1024xf32, 1024xf32) - layer_norm_18, layer_norm_19, layer_norm_20 = (lambda x, f: f(x))( - paddle._C_ops.layer_norm( - add_32, parameter_332, parameter_331, float("1e-05"), 2 - ), - lambda out: out if isinstance(out, (list, tuple)) else (out, None, None), - ) - del parameter_331, parameter_332 - - # pd_op.matmul: (1x3600x2048xf32) <- (1x3600x1024xf32, 1024x2048xf32) - matmul_32 = paddle._C_ops.matmul(layer_norm_18, parameter_330, False, False) - del parameter_330 - - # pd_op.add: (1x3600x2048xf32) <- (1x3600x2048xf32, 2048xf32) - add_33 = paddle._C_ops.add(matmul_32, parameter_329) - del parameter_329 - - # pd_op.gelu: (1x3600x2048xf32) <- (1x3600x2048xf32) - gelu_3 = paddle._C_ops.gelu(add_33, False) - - # pd_op.dropout: (1x3600x2048xf32, 1x3600x2048xui8) <- (1x3600x2048xf32, None, 1xf32) - dropout_28, dropout_29 = (lambda x, f: f(x))( - paddle._C_ops.dropout( - gelu_3, None, full_9, False, "upscale_in_train", 0, False - ), - lambda out: out if isinstance(out, (list, tuple)) else (out, None), - ) - del gelu_3 - - # pd_op.matmul: (1x3600x1024xf32) <- (1x3600x2048xf32, 2048x1024xf32) - matmul_33 = paddle._C_ops.matmul(dropout_28, parameter_328, False, False) - del parameter_328 - - # pd_op.add: (1x3600x1024xf32) <- (1x3600x1024xf32, 1024xf32) - add_34 = paddle._C_ops.add(matmul_33, parameter_327) - del parameter_327 - - # pd_op.dropout: (1x3600x1024xf32, 1x3600x1024xui8) <- (1x3600x1024xf32, None, 1xf32) - dropout_30, dropout_31 = (lambda x, f: f(x))( - paddle._C_ops.dropout( - add_34, None, full_9, False, "upscale_in_train", 0, False - ), - lambda out: out if isinstance(out, (list, tuple)) else (out, None), - ) - del add_34 - - # pd_op.add: (1x3600x1024xf32) <- (1x3600x1024xf32, 1x3600x1024xf32) - add_35 = paddle._C_ops.add(layer_norm_18, dropout_30) - - # pd_op.layer_norm: (1x3600x1024xf32, 1x3600xf32, 1x3600xf32) <- (1x3600x1024xf32, 1024xf32, 1024xf32) - layer_norm_21, layer_norm_22, layer_norm_23 = (lambda x, f: f(x))( - paddle._C_ops.layer_norm( - add_35, parameter_326, parameter_325, float("1e-05"), 2 - ), - lambda out: out if isinstance(out, (list, tuple)) else (out, None, None), - ) - del parameter_325, parameter_326 - - # pd_op.transpose: (1x1024x3600xf32) <- (1x3600x1024xf32) - transpose_17 = paddle._C_ops.transpose(layer_norm_21, [0, 2, 1]) - del layer_norm_21 - - # pd_op.full_int_array: (4xi64) <- () - full_int_array_7 = [1, 1024, 60, 60] - - # pd_op.reshape: (1x1024x60x60xf32) <- (1x1024x3600xf32, 4xi64) - reshape_16 = paddle._C_ops.reshape(transpose_17, full_int_array_7) - del full_int_array_7 - - # pd_op.conv2d: (1x384x60x60xf32) <- (1x1024x60x60xf32, 384x1024x1x1xf32) - conv2d_0 = paddle._C_ops.conv2d( - reshape_16, parameter_324, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_324 - - # pd_op.batch_norm_: (1x384x60x60xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (1x384x60x60xf32, 384xf32, 384xf32, 384xf32, 384xf32) - ( - batch_norm__0, - batch_norm__1, - batch_norm__2, - batch_norm__3, - batch_norm__4, - batch_norm__5, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_0, - parameter_323, - parameter_322, - parameter_321, - parameter_320, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_320, parameter_321, parameter_322, parameter_323 - - # pd_op.swish: (1x384x60x60xf32) <- (1x384x60x60xf32) - swish_1 = paddle._C_ops.swish(batch_norm__0) - - # pd_op.conv2d: (1x384x60x60xf32) <- (1x1024x60x60xf32, 384x1024x1x1xf32) - conv2d_1 = paddle._C_ops.conv2d( - reshape_16, parameter_319, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_319 - - # pd_op.batch_norm_: (1x384x60x60xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (1x384x60x60xf32, 384xf32, 384xf32, 384xf32, 384xf32) - ( - batch_norm__6, - batch_norm__7, - batch_norm__8, - batch_norm__9, - batch_norm__10, - batch_norm__11, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_1, - parameter_318, - parameter_317, - parameter_316, - parameter_315, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_315, parameter_316, parameter_317, parameter_318 - - # pd_op.swish: (1x384x60x60xf32) <- (1x384x60x60xf32) - swish_2 = paddle._C_ops.swish(batch_norm__6) - - # pd_op.conv2d: (1x384x60x60xf32) <- (1x384x60x60xf32, 384x384x3x3xf32) - conv2d_2 = paddle._C_ops.conv2d( - swish_2, parameter_314, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_314 - - # pd_op.batch_norm_: (1x384x60x60xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (1x384x60x60xf32, 384xf32, 384xf32, 384xf32, 384xf32) - ( - batch_norm__12, - batch_norm__13, - batch_norm__14, - batch_norm__15, - batch_norm__16, - batch_norm__17, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_2, - parameter_313, - parameter_312, - parameter_311, - parameter_310, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_310, parameter_311, parameter_312, parameter_313 - - # pd_op.swish: (1x384x60x60xf32) <- (1x384x60x60xf32) - swish_3 = paddle._C_ops.swish(batch_norm__12) - - # pd_op.conv2d: (1x384x60x60xf32) <- (1x384x60x60xf32, 384x384x3x3xf32) - conv2d_3 = paddle._C_ops.conv2d( - swish_3, parameter_309, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_309 - - # pd_op.batch_norm_: (1x384x60x60xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (1x384x60x60xf32, 384xf32, 384xf32, 384xf32, 384xf32) - ( - batch_norm__18, - batch_norm__19, - batch_norm__20, - batch_norm__21, - batch_norm__22, - batch_norm__23, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_3, - parameter_308, - parameter_307, - parameter_306, - parameter_305, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_305, parameter_306, parameter_307, parameter_308 - - # pd_op.conv2d: (1x384x60x60xf32) <- (1x384x60x60xf32, 384x384x1x1xf32) - conv2d_4 = paddle._C_ops.conv2d( - swish_3, parameter_304, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_304 - - # pd_op.batch_norm_: (1x384x60x60xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (1x384x60x60xf32, 384xf32, 384xf32, 384xf32, 384xf32) - ( - batch_norm__24, - batch_norm__25, - batch_norm__26, - batch_norm__27, - batch_norm__28, - batch_norm__29, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_4, - parameter_303, - parameter_302, - parameter_301, - parameter_300, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_300, parameter_301, parameter_302, parameter_303 - - # pd_op.add: (1x384x60x60xf32) <- (1x384x60x60xf32, 1x384x60x60xf32) - add_36 = paddle._C_ops.add(batch_norm__18, batch_norm__24) - - # pd_op.swish: (1x384x60x60xf32) <- (1x384x60x60xf32) - swish_4 = paddle._C_ops.swish(add_36) - - # pd_op.conv2d: (1x384x60x60xf32) <- (1x384x60x60xf32, 384x384x3x3xf32) - conv2d_5 = paddle._C_ops.conv2d( - swish_4, parameter_299, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_299 - - # pd_op.batch_norm_: (1x384x60x60xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (1x384x60x60xf32, 384xf32, 384xf32, 384xf32, 384xf32) - ( - batch_norm__30, - batch_norm__31, - batch_norm__32, - batch_norm__33, - batch_norm__34, - batch_norm__35, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_5, - parameter_298, - parameter_297, - parameter_296, - parameter_295, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_295, parameter_296, parameter_297, parameter_298 - - # pd_op.swish: (1x384x60x60xf32) <- (1x384x60x60xf32) - swish_5 = paddle._C_ops.swish(batch_norm__30) - - # pd_op.conv2d: (1x384x60x60xf32) <- (1x384x60x60xf32, 384x384x3x3xf32) - conv2d_6 = paddle._C_ops.conv2d( - swish_5, parameter_294, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_294 - - # pd_op.batch_norm_: (1x384x60x60xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (1x384x60x60xf32, 384xf32, 384xf32, 384xf32, 384xf32) - ( - batch_norm__36, - batch_norm__37, - batch_norm__38, - batch_norm__39, - batch_norm__40, - batch_norm__41, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_6, - parameter_293, - parameter_292, - parameter_291, - parameter_290, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_290, parameter_291, parameter_292, parameter_293 - - # pd_op.conv2d: (1x384x60x60xf32) <- (1x384x60x60xf32, 384x384x1x1xf32) - conv2d_7 = paddle._C_ops.conv2d( - swish_5, parameter_289, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_289 - - # pd_op.batch_norm_: (1x384x60x60xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (1x384x60x60xf32, 384xf32, 384xf32, 384xf32, 384xf32) - ( - batch_norm__42, - batch_norm__43, - batch_norm__44, - batch_norm__45, - batch_norm__46, - batch_norm__47, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_7, - parameter_288, - parameter_287, - parameter_286, - parameter_285, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_285, parameter_286, parameter_287, parameter_288 - - # pd_op.add: (1x384x60x60xf32) <- (1x384x60x60xf32, 1x384x60x60xf32) - add_37 = paddle._C_ops.add(batch_norm__36, batch_norm__42) - - # pd_op.swish: (1x384x60x60xf32) <- (1x384x60x60xf32) - swish_6 = paddle._C_ops.swish(add_37) - - # pd_op.full_int_array: (2xi64) <- () - full_int_array_8 = [5, 5] - - # pd_op.pool2d: (1x384x60x60xf32) <- (1x384x60x60xf32, 2xi64) - pool2d_0 = paddle._C_ops.pool2d( - swish_6, - full_int_array_8, - [1, 1], - [2, 2], - False, - True, - "NCHW", - "max", - False, - False, - "EXPLICIT", - ) - - # pd_op.full_int_array: (2xi64) <- () - full_int_array_9 = [9, 9] - - # pd_op.pool2d: (1x384x60x60xf32) <- (1x384x60x60xf32, 2xi64) - pool2d_1 = paddle._C_ops.pool2d( - swish_6, - full_int_array_9, - [1, 1], - [4, 4], - False, - True, - "NCHW", - "max", - False, - False, - "EXPLICIT", - ) - - # pd_op.full_int_array: (2xi64) <- () - full_int_array_10 = [13, 13] - - # pd_op.pool2d: (1x384x60x60xf32) <- (1x384x60x60xf32, 2xi64) - pool2d_2 = paddle._C_ops.pool2d( - swish_6, - full_int_array_10, - [1, 1], - [6, 6], - False, - True, - "NCHW", - "max", - False, - False, - "EXPLICIT", - ) - - # builtin.combine: ([1x384x60x60xf32, 1x384x60x60xf32, 1x384x60x60xf32, 1x384x60x60xf32]) <- (1x384x60x60xf32, 1x384x60x60xf32, 1x384x60x60xf32, 1x384x60x60xf32) - combine_2 = [swish_6, pool2d_0, pool2d_1, pool2d_2] - - # pd_op.concat: (1x1536x60x60xf32) <- ([1x384x60x60xf32, 1x384x60x60xf32, 1x384x60x60xf32, 1x384x60x60xf32], 1xi32) - concat_1 = paddle._C_ops.concat(combine_2, full_7) - del combine_2 - - # pd_op.conv2d: (1x384x60x60xf32) <- (1x1536x60x60xf32, 384x1536x1x1xf32) - conv2d_8 = paddle._C_ops.conv2d( - concat_1, parameter_284, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_284 - - # pd_op.batch_norm_: (1x384x60x60xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (1x384x60x60xf32, 384xf32, 384xf32, 384xf32, 384xf32) - ( - batch_norm__48, - batch_norm__49, - batch_norm__50, - batch_norm__51, - batch_norm__52, - batch_norm__53, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_8, - parameter_283, - parameter_282, - parameter_281, - parameter_280, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_280, parameter_281, parameter_282, parameter_283 - - # pd_op.swish: (1x384x60x60xf32) <- (1x384x60x60xf32) - swish_7 = paddle._C_ops.swish(batch_norm__48) - - # pd_op.conv2d: (1x384x60x60xf32) <- (1x384x60x60xf32, 384x384x3x3xf32) - conv2d_9 = paddle._C_ops.conv2d( - swish_7, parameter_279, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_279 - - # pd_op.batch_norm_: (1x384x60x60xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (1x384x60x60xf32, 384xf32, 384xf32, 384xf32, 384xf32) - ( - batch_norm__54, - batch_norm__55, - batch_norm__56, - batch_norm__57, - batch_norm__58, - batch_norm__59, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_9, - parameter_278, - parameter_277, - parameter_276, - parameter_275, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_275, parameter_276, parameter_277, parameter_278 - - # pd_op.swish: (1x384x60x60xf32) <- (1x384x60x60xf32) - swish_8 = paddle._C_ops.swish(batch_norm__54) - - # pd_op.conv2d: (1x384x60x60xf32) <- (1x384x60x60xf32, 384x384x3x3xf32) - conv2d_10 = paddle._C_ops.conv2d( - swish_8, parameter_274, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_274 - - # pd_op.batch_norm_: (1x384x60x60xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (1x384x60x60xf32, 384xf32, 384xf32, 384xf32, 384xf32) - ( - batch_norm__60, - batch_norm__61, - batch_norm__62, - batch_norm__63, - batch_norm__64, - batch_norm__65, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_10, - parameter_273, - parameter_272, - parameter_271, - parameter_270, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_270, parameter_271, parameter_272, parameter_273 - - # pd_op.conv2d: (1x384x60x60xf32) <- (1x384x60x60xf32, 384x384x1x1xf32) - conv2d_11 = paddle._C_ops.conv2d( - swish_8, parameter_269, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_269 - - # pd_op.batch_norm_: (1x384x60x60xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (1x384x60x60xf32, 384xf32, 384xf32, 384xf32, 384xf32) - ( - batch_norm__66, - batch_norm__67, - batch_norm__68, - batch_norm__69, - batch_norm__70, - batch_norm__71, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_11, - parameter_268, - parameter_267, - parameter_266, - parameter_265, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_265, parameter_266, parameter_267, parameter_268 - - # pd_op.add: (1x384x60x60xf32) <- (1x384x60x60xf32, 1x384x60x60xf32) - add_38 = paddle._C_ops.add(batch_norm__60, batch_norm__66) - - # pd_op.swish: (1x384x60x60xf32) <- (1x384x60x60xf32) - swish_9 = paddle._C_ops.swish(add_38) - - # builtin.combine: ([1x384x60x60xf32, 1x384x60x60xf32]) <- (1x384x60x60xf32, 1x384x60x60xf32) - combine_3 = [swish_1, swish_9] - - # pd_op.concat: (1x768x60x60xf32) <- ([1x384x60x60xf32, 1x384x60x60xf32], 1xi32) - concat_2 = paddle._C_ops.concat(combine_3, full_7) - del combine_3 - - # pd_op.conv2d: (1x768x60x60xf32) <- (1x768x60x60xf32, 768x768x1x1xf32) - conv2d_12 = paddle._C_ops.conv2d( - concat_2, parameter_264, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_264 - - # pd_op.batch_norm_: (1x768x60x60xf32, 768xf32, 768xf32, 768xf32, 768xf32, -1xui8) <- (1x768x60x60xf32, 768xf32, 768xf32, 768xf32, 768xf32) - ( - batch_norm__72, - batch_norm__73, - batch_norm__74, - batch_norm__75, - batch_norm__76, - batch_norm__77, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_12, - parameter_263, - parameter_262, - parameter_261, - parameter_260, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_260, parameter_261, parameter_262, parameter_263 - - # pd_op.swish: (1x768x60x60xf32) <- (1x768x60x60xf32) - swish_10 = paddle._C_ops.swish(batch_norm__72) - - # pd_op.conv2d: (1x384x60x60xf32) <- (1x768x60x60xf32, 384x768x1x1xf32) - conv2d_13 = paddle._C_ops.conv2d( - swish_10, parameter_259, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_259 - - # pd_op.batch_norm_: (1x384x60x60xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (1x384x60x60xf32, 384xf32, 384xf32, 384xf32, 384xf32) - ( - batch_norm__78, - batch_norm__79, - batch_norm__80, - batch_norm__81, - batch_norm__82, - batch_norm__83, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_13, - parameter_258, - parameter_257, - parameter_256, - parameter_255, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_255, parameter_256, parameter_257, parameter_258 - - # pd_op.swish: (1x384x60x60xf32) <- (1x384x60x60xf32) - swish_11 = paddle._C_ops.swish(batch_norm__78) - - # pd_op.nearest_interp: (1x384x120x120xf32) <- (1x384x60x60xf32, None, None, None) - nearest_interp_0 = paddle._C_ops.nearest_interp( - swish_11, - None, - None, - None, - "NCHW", - -1, - -1, - -1, - [float("2"), float("2")], - "nearest", - False, - 0, - ) - - # builtin.combine: ([1x384x120x120xf32, 1x512x-1x-1xf32]) <- (1x384x120x120xf32, 1x512x-1x-1xf32) - combine_4 = [nearest_interp_0, data_9] - del data_9 - - # pd_op.concat: (1x896x120x120xf32) <- ([1x384x120x120xf32, 1x512x-1x-1xf32], 1xi32) - concat_3 = paddle._C_ops.concat(combine_4, full_7) - del combine_4 - - # pd_op.conv2d: (1x192x120x120xf32) <- (1x896x120x120xf32, 192x896x1x1xf32) - conv2d_14 = paddle._C_ops.conv2d( - concat_3, parameter_254, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_254 - - # pd_op.batch_norm_: (1x192x120x120xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (1x192x120x120xf32, 192xf32, 192xf32, 192xf32, 192xf32) - ( - batch_norm__84, - batch_norm__85, - batch_norm__86, - batch_norm__87, - batch_norm__88, - batch_norm__89, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_14, - parameter_253, - parameter_252, - parameter_251, - parameter_250, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_250, parameter_251, parameter_252, parameter_253 - - # pd_op.swish: (1x192x120x120xf32) <- (1x192x120x120xf32) - swish_12 = paddle._C_ops.swish(batch_norm__84) - - # pd_op.conv2d: (1x192x120x120xf32) <- (1x896x120x120xf32, 192x896x1x1xf32) - conv2d_15 = paddle._C_ops.conv2d( - concat_3, parameter_249, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_249 - - # pd_op.batch_norm_: (1x192x120x120xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (1x192x120x120xf32, 192xf32, 192xf32, 192xf32, 192xf32) - ( - batch_norm__90, - batch_norm__91, - batch_norm__92, - batch_norm__93, - batch_norm__94, - batch_norm__95, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_15, - parameter_248, - parameter_247, - parameter_246, - parameter_245, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_245, parameter_246, parameter_247, parameter_248 - - # pd_op.swish: (1x192x120x120xf32) <- (1x192x120x120xf32) - swish_13 = paddle._C_ops.swish(batch_norm__90) - - # pd_op.conv2d: (1x192x120x120xf32) <- (1x192x120x120xf32, 192x192x3x3xf32) - conv2d_16 = paddle._C_ops.conv2d( - swish_13, parameter_244, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_244 - - # pd_op.batch_norm_: (1x192x120x120xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (1x192x120x120xf32, 192xf32, 192xf32, 192xf32, 192xf32) - ( - batch_norm__96, - batch_norm__97, - batch_norm__98, - batch_norm__99, - batch_norm__100, - batch_norm__101, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_16, - parameter_243, - parameter_242, - parameter_241, - parameter_240, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_240, parameter_241, parameter_242, parameter_243 - - # pd_op.swish: (1x192x120x120xf32) <- (1x192x120x120xf32) - swish_14 = paddle._C_ops.swish(batch_norm__96) - - # pd_op.conv2d: (1x192x120x120xf32) <- (1x192x120x120xf32, 192x192x3x3xf32) - conv2d_17 = paddle._C_ops.conv2d( - swish_14, parameter_239, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_239 - - # pd_op.batch_norm_: (1x192x120x120xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (1x192x120x120xf32, 192xf32, 192xf32, 192xf32, 192xf32) - ( - batch_norm__102, - batch_norm__103, - batch_norm__104, - batch_norm__105, - batch_norm__106, - batch_norm__107, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_17, - parameter_238, - parameter_237, - parameter_236, - parameter_235, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_235, parameter_236, parameter_237, parameter_238 - - # pd_op.conv2d: (1x192x120x120xf32) <- (1x192x120x120xf32, 192x192x1x1xf32) - conv2d_18 = paddle._C_ops.conv2d( - swish_14, parameter_234, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_234 - - # pd_op.batch_norm_: (1x192x120x120xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (1x192x120x120xf32, 192xf32, 192xf32, 192xf32, 192xf32) - ( - batch_norm__108, - batch_norm__109, - batch_norm__110, - batch_norm__111, - batch_norm__112, - batch_norm__113, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_18, - parameter_233, - parameter_232, - parameter_231, - parameter_230, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_230, parameter_231, parameter_232, parameter_233 - - # pd_op.add: (1x192x120x120xf32) <- (1x192x120x120xf32, 1x192x120x120xf32) - add_39 = paddle._C_ops.add(batch_norm__102, batch_norm__108) - - # pd_op.swish: (1x192x120x120xf32) <- (1x192x120x120xf32) - swish_15 = paddle._C_ops.swish(add_39) - - # pd_op.conv2d: (1x192x120x120xf32) <- (1x192x120x120xf32, 192x192x3x3xf32) - conv2d_19 = paddle._C_ops.conv2d( - swish_15, parameter_229, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_229 - - # pd_op.batch_norm_: (1x192x120x120xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (1x192x120x120xf32, 192xf32, 192xf32, 192xf32, 192xf32) - ( - batch_norm__114, - batch_norm__115, - batch_norm__116, - batch_norm__117, - batch_norm__118, - batch_norm__119, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_19, - parameter_228, - parameter_227, - parameter_226, - parameter_225, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_225, parameter_226, parameter_227, parameter_228 - - # pd_op.swish: (1x192x120x120xf32) <- (1x192x120x120xf32) - swish_16 = paddle._C_ops.swish(batch_norm__114) - - # pd_op.conv2d: (1x192x120x120xf32) <- (1x192x120x120xf32, 192x192x3x3xf32) - conv2d_20 = paddle._C_ops.conv2d( - swish_16, parameter_224, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_224 - - # pd_op.batch_norm_: (1x192x120x120xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (1x192x120x120xf32, 192xf32, 192xf32, 192xf32, 192xf32) - ( - batch_norm__120, - batch_norm__121, - batch_norm__122, - batch_norm__123, - batch_norm__124, - batch_norm__125, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_20, - parameter_223, - parameter_222, - parameter_221, - parameter_220, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_220, parameter_221, parameter_222, parameter_223 - - # pd_op.conv2d: (1x192x120x120xf32) <- (1x192x120x120xf32, 192x192x1x1xf32) - conv2d_21 = paddle._C_ops.conv2d( - swish_16, parameter_219, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_219 - - # pd_op.batch_norm_: (1x192x120x120xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (1x192x120x120xf32, 192xf32, 192xf32, 192xf32, 192xf32) - ( - batch_norm__126, - batch_norm__127, - batch_norm__128, - batch_norm__129, - batch_norm__130, - batch_norm__131, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_21, - parameter_218, - parameter_217, - parameter_216, - parameter_215, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_215, parameter_216, parameter_217, parameter_218 - - # pd_op.add: (1x192x120x120xf32) <- (1x192x120x120xf32, 1x192x120x120xf32) - add_40 = paddle._C_ops.add(batch_norm__120, batch_norm__126) - - # pd_op.swish: (1x192x120x120xf32) <- (1x192x120x120xf32) - swish_17 = paddle._C_ops.swish(add_40) - - # pd_op.conv2d: (1x192x120x120xf32) <- (1x192x120x120xf32, 192x192x3x3xf32) - conv2d_22 = paddle._C_ops.conv2d( - swish_17, parameter_214, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_214 - - # pd_op.batch_norm_: (1x192x120x120xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (1x192x120x120xf32, 192xf32, 192xf32, 192xf32, 192xf32) - ( - batch_norm__132, - batch_norm__133, - batch_norm__134, - batch_norm__135, - batch_norm__136, - batch_norm__137, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_22, - parameter_213, - parameter_212, - parameter_211, - parameter_210, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_210, parameter_211, parameter_212, parameter_213 - - # pd_op.swish: (1x192x120x120xf32) <- (1x192x120x120xf32) - swish_18 = paddle._C_ops.swish(batch_norm__132) - - # pd_op.conv2d: (1x192x120x120xf32) <- (1x192x120x120xf32, 192x192x3x3xf32) - conv2d_23 = paddle._C_ops.conv2d( - swish_18, parameter_209, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_209 - - # pd_op.batch_norm_: (1x192x120x120xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (1x192x120x120xf32, 192xf32, 192xf32, 192xf32, 192xf32) - ( - batch_norm__138, - batch_norm__139, - batch_norm__140, - batch_norm__141, - batch_norm__142, - batch_norm__143, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_23, - parameter_208, - parameter_207, - parameter_206, - parameter_205, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_205, parameter_206, parameter_207, parameter_208 - - # pd_op.conv2d: (1x192x120x120xf32) <- (1x192x120x120xf32, 192x192x1x1xf32) - conv2d_24 = paddle._C_ops.conv2d( - swish_18, parameter_204, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_204 - - # pd_op.batch_norm_: (1x192x120x120xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (1x192x120x120xf32, 192xf32, 192xf32, 192xf32, 192xf32) - ( - batch_norm__144, - batch_norm__145, - batch_norm__146, - batch_norm__147, - batch_norm__148, - batch_norm__149, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_24, - parameter_203, - parameter_202, - parameter_201, - parameter_200, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_200, parameter_201, parameter_202, parameter_203 - - # pd_op.add: (1x192x120x120xf32) <- (1x192x120x120xf32, 1x192x120x120xf32) - add_41 = paddle._C_ops.add(batch_norm__138, batch_norm__144) - - # pd_op.swish: (1x192x120x120xf32) <- (1x192x120x120xf32) - swish_19 = paddle._C_ops.swish(add_41) - - # builtin.combine: ([1x192x120x120xf32, 1x192x120x120xf32]) <- (1x192x120x120xf32, 1x192x120x120xf32) - combine_5 = [swish_12, swish_19] - - # pd_op.concat: (1x384x120x120xf32) <- ([1x192x120x120xf32, 1x192x120x120xf32], 1xi32) - concat_4 = paddle._C_ops.concat(combine_5, full_7) - del combine_5 - - # pd_op.conv2d: (1x384x120x120xf32) <- (1x384x120x120xf32, 384x384x1x1xf32) - conv2d_25 = paddle._C_ops.conv2d( - concat_4, parameter_199, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_199 - - # pd_op.batch_norm_: (1x384x120x120xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (1x384x120x120xf32, 384xf32, 384xf32, 384xf32, 384xf32) - ( - batch_norm__150, - batch_norm__151, - batch_norm__152, - batch_norm__153, - batch_norm__154, - batch_norm__155, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_25, - parameter_198, - parameter_197, - parameter_196, - parameter_195, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_195, parameter_196, parameter_197, parameter_198 - - # pd_op.swish: (1x384x120x120xf32) <- (1x384x120x120xf32) - swish_20 = paddle._C_ops.swish(batch_norm__150) - - # pd_op.conv2d: (1x192x120x120xf32) <- (1x384x120x120xf32, 192x384x1x1xf32) - conv2d_26 = paddle._C_ops.conv2d( - swish_20, parameter_194, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_194 - - # pd_op.batch_norm_: (1x192x120x120xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (1x192x120x120xf32, 192xf32, 192xf32, 192xf32, 192xf32) - ( - batch_norm__156, - batch_norm__157, - batch_norm__158, - batch_norm__159, - batch_norm__160, - batch_norm__161, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_26, - parameter_193, - parameter_192, - parameter_191, - parameter_190, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_190, parameter_191, parameter_192, parameter_193 - - # pd_op.swish: (1x192x120x120xf32) <- (1x192x120x120xf32) - swish_21 = paddle._C_ops.swish(batch_norm__156) - - # pd_op.nearest_interp: (1x192x240x240xf32) <- (1x192x120x120xf32, None, None, None) - nearest_interp_1 = paddle._C_ops.nearest_interp( - swish_21, - None, - None, - None, - "NCHW", - -1, - -1, - -1, - [float("2"), float("2")], - "nearest", - False, - 0, - ) - - # builtin.combine: ([1x192x240x240xf32, 1x256x-1x-1xf32]) <- (1x192x240x240xf32, 1x256x-1x-1xf32) - combine_6 = [nearest_interp_1, data_8] - del data_8 - - # pd_op.concat: (1x448x240x240xf32) <- ([1x192x240x240xf32, 1x256x-1x-1xf32], 1xi32) - concat_5 = paddle._C_ops.concat(combine_6, full_7) - del combine_6 - - # pd_op.conv2d: (1x96x240x240xf32) <- (1x448x240x240xf32, 96x448x1x1xf32) - conv2d_27 = paddle._C_ops.conv2d( - concat_5, parameter_189, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_189 - - # pd_op.batch_norm_: (1x96x240x240xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (1x96x240x240xf32, 96xf32, 96xf32, 96xf32, 96xf32) - ( - batch_norm__162, - batch_norm__163, - batch_norm__164, - batch_norm__165, - batch_norm__166, - batch_norm__167, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_27, - parameter_188, - parameter_187, - parameter_186, - parameter_185, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_185, parameter_186, parameter_187, parameter_188 - - # pd_op.swish: (1x96x240x240xf32) <- (1x96x240x240xf32) - swish_22 = paddle._C_ops.swish(batch_norm__162) - - # pd_op.conv2d: (1x96x240x240xf32) <- (1x448x240x240xf32, 96x448x1x1xf32) - conv2d_28 = paddle._C_ops.conv2d( - concat_5, parameter_184, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_184 - - # pd_op.batch_norm_: (1x96x240x240xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (1x96x240x240xf32, 96xf32, 96xf32, 96xf32, 96xf32) - ( - batch_norm__168, - batch_norm__169, - batch_norm__170, - batch_norm__171, - batch_norm__172, - batch_norm__173, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_28, - parameter_183, - parameter_182, - parameter_181, - parameter_180, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_180, parameter_181, parameter_182, parameter_183 - - # pd_op.swish: (1x96x240x240xf32) <- (1x96x240x240xf32) - swish_23 = paddle._C_ops.swish(batch_norm__168) - - # pd_op.conv2d: (1x96x240x240xf32) <- (1x96x240x240xf32, 96x96x3x3xf32) - conv2d_29 = paddle._C_ops.conv2d( - swish_23, parameter_179, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_179 - - # pd_op.batch_norm_: (1x96x240x240xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (1x96x240x240xf32, 96xf32, 96xf32, 96xf32, 96xf32) - ( - batch_norm__174, - batch_norm__175, - batch_norm__176, - batch_norm__177, - batch_norm__178, - batch_norm__179, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_29, - parameter_178, - parameter_177, - parameter_176, - parameter_175, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_175, parameter_176, parameter_177, parameter_178 - - # pd_op.swish: (1x96x240x240xf32) <- (1x96x240x240xf32) - swish_24 = paddle._C_ops.swish(batch_norm__174) - - # pd_op.conv2d: (1x96x240x240xf32) <- (1x96x240x240xf32, 96x96x3x3xf32) - conv2d_30 = paddle._C_ops.conv2d( - swish_24, parameter_174, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_174 - - # pd_op.batch_norm_: (1x96x240x240xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (1x96x240x240xf32, 96xf32, 96xf32, 96xf32, 96xf32) - ( - batch_norm__180, - batch_norm__181, - batch_norm__182, - batch_norm__183, - batch_norm__184, - batch_norm__185, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_30, - parameter_173, - parameter_172, - parameter_171, - parameter_170, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_170, parameter_171, parameter_172, parameter_173 - - # pd_op.conv2d: (1x96x240x240xf32) <- (1x96x240x240xf32, 96x96x1x1xf32) - conv2d_31 = paddle._C_ops.conv2d( - swish_24, parameter_169, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_169 - - # pd_op.batch_norm_: (1x96x240x240xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (1x96x240x240xf32, 96xf32, 96xf32, 96xf32, 96xf32) - ( - batch_norm__186, - batch_norm__187, - batch_norm__188, - batch_norm__189, - batch_norm__190, - batch_norm__191, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_31, - parameter_168, - parameter_167, - parameter_166, - parameter_165, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_165, parameter_166, parameter_167, parameter_168 - - # pd_op.add: (1x96x240x240xf32) <- (1x96x240x240xf32, 1x96x240x240xf32) - add_42 = paddle._C_ops.add(batch_norm__180, batch_norm__186) - - # pd_op.swish: (1x96x240x240xf32) <- (1x96x240x240xf32) - swish_25 = paddle._C_ops.swish(add_42) - - # pd_op.conv2d: (1x96x240x240xf32) <- (1x96x240x240xf32, 96x96x3x3xf32) - conv2d_32 = paddle._C_ops.conv2d( - swish_25, parameter_164, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_164 - - # pd_op.batch_norm_: (1x96x240x240xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (1x96x240x240xf32, 96xf32, 96xf32, 96xf32, 96xf32) - ( - batch_norm__192, - batch_norm__193, - batch_norm__194, - batch_norm__195, - batch_norm__196, - batch_norm__197, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_32, - parameter_163, - parameter_162, - parameter_161, - parameter_160, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_160, parameter_161, parameter_162, parameter_163 - - # pd_op.swish: (1x96x240x240xf32) <- (1x96x240x240xf32) - swish_26 = paddle._C_ops.swish(batch_norm__192) - - # pd_op.conv2d: (1x96x240x240xf32) <- (1x96x240x240xf32, 96x96x3x3xf32) - conv2d_33 = paddle._C_ops.conv2d( - swish_26, parameter_159, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_159 - - # pd_op.batch_norm_: (1x96x240x240xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (1x96x240x240xf32, 96xf32, 96xf32, 96xf32, 96xf32) - ( - batch_norm__198, - batch_norm__199, - batch_norm__200, - batch_norm__201, - batch_norm__202, - batch_norm__203, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_33, - parameter_158, - parameter_157, - parameter_156, - parameter_155, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_155, parameter_156, parameter_157, parameter_158 - - # pd_op.conv2d: (1x96x240x240xf32) <- (1x96x240x240xf32, 96x96x1x1xf32) - conv2d_34 = paddle._C_ops.conv2d( - swish_26, parameter_154, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_154 - - # pd_op.batch_norm_: (1x96x240x240xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (1x96x240x240xf32, 96xf32, 96xf32, 96xf32, 96xf32) - ( - batch_norm__204, - batch_norm__205, - batch_norm__206, - batch_norm__207, - batch_norm__208, - batch_norm__209, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_34, - parameter_153, - parameter_152, - parameter_151, - parameter_150, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_150, parameter_151, parameter_152, parameter_153 - - # pd_op.add: (1x96x240x240xf32) <- (1x96x240x240xf32, 1x96x240x240xf32) - add_43 = paddle._C_ops.add(batch_norm__198, batch_norm__204) - - # pd_op.swish: (1x96x240x240xf32) <- (1x96x240x240xf32) - swish_27 = paddle._C_ops.swish(add_43) - - # pd_op.conv2d: (1x96x240x240xf32) <- (1x96x240x240xf32, 96x96x3x3xf32) - conv2d_35 = paddle._C_ops.conv2d( - swish_27, parameter_149, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_149 - - # pd_op.batch_norm_: (1x96x240x240xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (1x96x240x240xf32, 96xf32, 96xf32, 96xf32, 96xf32) - ( - batch_norm__210, - batch_norm__211, - batch_norm__212, - batch_norm__213, - batch_norm__214, - batch_norm__215, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_35, - parameter_148, - parameter_147, - parameter_146, - parameter_145, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_145, parameter_146, parameter_147, parameter_148 - - # pd_op.swish: (1x96x240x240xf32) <- (1x96x240x240xf32) - swish_28 = paddle._C_ops.swish(batch_norm__210) - - # pd_op.conv2d: (1x96x240x240xf32) <- (1x96x240x240xf32, 96x96x3x3xf32) - conv2d_36 = paddle._C_ops.conv2d( - swish_28, parameter_144, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_144 - - # pd_op.batch_norm_: (1x96x240x240xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (1x96x240x240xf32, 96xf32, 96xf32, 96xf32, 96xf32) - ( - batch_norm__216, - batch_norm__217, - batch_norm__218, - batch_norm__219, - batch_norm__220, - batch_norm__221, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_36, - parameter_143, - parameter_142, - parameter_141, - parameter_140, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_140, parameter_141, parameter_142, parameter_143 - - # pd_op.conv2d: (1x96x240x240xf32) <- (1x96x240x240xf32, 96x96x1x1xf32) - conv2d_37 = paddle._C_ops.conv2d( - swish_28, parameter_139, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_139 - - # pd_op.batch_norm_: (1x96x240x240xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (1x96x240x240xf32, 96xf32, 96xf32, 96xf32, 96xf32) - ( - batch_norm__222, - batch_norm__223, - batch_norm__224, - batch_norm__225, - batch_norm__226, - batch_norm__227, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_37, - parameter_138, - parameter_137, - parameter_136, - parameter_135, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_135, parameter_136, parameter_137, parameter_138 - - # pd_op.add: (1x96x240x240xf32) <- (1x96x240x240xf32, 1x96x240x240xf32) - add_44 = paddle._C_ops.add(batch_norm__216, batch_norm__222) - - # pd_op.swish: (1x96x240x240xf32) <- (1x96x240x240xf32) - swish_29 = paddle._C_ops.swish(add_44) - - # builtin.combine: ([1x96x240x240xf32, 1x96x240x240xf32]) <- (1x96x240x240xf32, 1x96x240x240xf32) - combine_7 = [swish_22, swish_29] - - # pd_op.concat: (1x192x240x240xf32) <- ([1x96x240x240xf32, 1x96x240x240xf32], 1xi32) - concat_6 = paddle._C_ops.concat(combine_7, full_7) - del combine_7 - - # pd_op.conv2d: (1x192x240x240xf32) <- (1x192x240x240xf32, 192x192x1x1xf32) - conv2d_38 = paddle._C_ops.conv2d( - concat_6, parameter_134, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_134 - - # pd_op.batch_norm_: (1x192x240x240xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (1x192x240x240xf32, 192xf32, 192xf32, 192xf32, 192xf32) - ( - batch_norm__228, - batch_norm__229, - batch_norm__230, - batch_norm__231, - batch_norm__232, - batch_norm__233, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_38, - parameter_133, - parameter_132, - parameter_131, - parameter_130, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_130, parameter_131, parameter_132, parameter_133 - - # pd_op.swish: (1x192x240x240xf32) <- (1x192x240x240xf32) - swish_30 = paddle._C_ops.swish(batch_norm__228) - - # pd_op.conv2d: (1x192x120x120xf32) <- (1x192x240x240xf32, 192x192x3x3xf32) - conv2d_39 = paddle._C_ops.conv2d( - swish_30, parameter_129, [2, 2], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_129 - - # pd_op.batch_norm_: (1x192x120x120xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (1x192x120x120xf32, 192xf32, 192xf32, 192xf32, 192xf32) - ( - batch_norm__234, - batch_norm__235, - batch_norm__236, - batch_norm__237, - batch_norm__238, - batch_norm__239, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_39, - parameter_128, - parameter_127, - parameter_126, - parameter_125, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_125, parameter_126, parameter_127, parameter_128 - - # pd_op.swish: (1x192x120x120xf32) <- (1x192x120x120xf32) - swish_31 = paddle._C_ops.swish(batch_norm__234) - - # builtin.combine: ([1x192x120x120xf32, 1x384x120x120xf32]) <- (1x192x120x120xf32, 1x384x120x120xf32) - combine_8 = [swish_31, swish_20] - - # pd_op.concat: (1x576x120x120xf32) <- ([1x192x120x120xf32, 1x384x120x120xf32], 1xi32) - concat_7 = paddle._C_ops.concat(combine_8, full_7) - del combine_8 - - # pd_op.conv2d: (1x192x120x120xf32) <- (1x576x120x120xf32, 192x576x1x1xf32) - conv2d_40 = paddle._C_ops.conv2d( - concat_7, parameter_124, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_124 - - # pd_op.batch_norm_: (1x192x120x120xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (1x192x120x120xf32, 192xf32, 192xf32, 192xf32, 192xf32) - ( - batch_norm__240, - batch_norm__241, - batch_norm__242, - batch_norm__243, - batch_norm__244, - batch_norm__245, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_40, - parameter_123, - parameter_122, - parameter_121, - parameter_120, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_120, parameter_121, parameter_122, parameter_123 - - # pd_op.swish: (1x192x120x120xf32) <- (1x192x120x120xf32) - swish_32 = paddle._C_ops.swish(batch_norm__240) - - # pd_op.conv2d: (1x192x120x120xf32) <- (1x576x120x120xf32, 192x576x1x1xf32) - conv2d_41 = paddle._C_ops.conv2d( - concat_7, parameter_119, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_119 - - # pd_op.batch_norm_: (1x192x120x120xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (1x192x120x120xf32, 192xf32, 192xf32, 192xf32, 192xf32) - ( - batch_norm__246, - batch_norm__247, - batch_norm__248, - batch_norm__249, - batch_norm__250, - batch_norm__251, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_41, - parameter_118, - parameter_117, - parameter_116, - parameter_115, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_115, parameter_116, parameter_117, parameter_118 - - # pd_op.swish: (1x192x120x120xf32) <- (1x192x120x120xf32) - swish_33 = paddle._C_ops.swish(batch_norm__246) - - # pd_op.conv2d: (1x192x120x120xf32) <- (1x192x120x120xf32, 192x192x3x3xf32) - conv2d_42 = paddle._C_ops.conv2d( - swish_33, parameter_114, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_114 - - # pd_op.batch_norm_: (1x192x120x120xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (1x192x120x120xf32, 192xf32, 192xf32, 192xf32, 192xf32) - ( - batch_norm__252, - batch_norm__253, - batch_norm__254, - batch_norm__255, - batch_norm__256, - batch_norm__257, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_42, - parameter_113, - parameter_112, - parameter_111, - parameter_110, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_110, parameter_111, parameter_112, parameter_113 - - # pd_op.swish: (1x192x120x120xf32) <- (1x192x120x120xf32) - swish_34 = paddle._C_ops.swish(batch_norm__252) - - # pd_op.conv2d: (1x192x120x120xf32) <- (1x192x120x120xf32, 192x192x3x3xf32) - conv2d_43 = paddle._C_ops.conv2d( - swish_34, parameter_109, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_109 - - # pd_op.batch_norm_: (1x192x120x120xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (1x192x120x120xf32, 192xf32, 192xf32, 192xf32, 192xf32) - ( - batch_norm__258, - batch_norm__259, - batch_norm__260, - batch_norm__261, - batch_norm__262, - batch_norm__263, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_43, - parameter_108, - parameter_107, - parameter_106, - parameter_105, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_105, parameter_106, parameter_107, parameter_108 - - # pd_op.conv2d: (1x192x120x120xf32) <- (1x192x120x120xf32, 192x192x1x1xf32) - conv2d_44 = paddle._C_ops.conv2d( - swish_34, parameter_104, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_104 - - # pd_op.batch_norm_: (1x192x120x120xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (1x192x120x120xf32, 192xf32, 192xf32, 192xf32, 192xf32) - ( - batch_norm__264, - batch_norm__265, - batch_norm__266, - batch_norm__267, - batch_norm__268, - batch_norm__269, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_44, - parameter_103, - parameter_102, - parameter_101, - parameter_100, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_100, parameter_101, parameter_102, parameter_103 - - # pd_op.add: (1x192x120x120xf32) <- (1x192x120x120xf32, 1x192x120x120xf32) - add_45 = paddle._C_ops.add(batch_norm__258, batch_norm__264) - - # pd_op.swish: (1x192x120x120xf32) <- (1x192x120x120xf32) - swish_35 = paddle._C_ops.swish(add_45) - - # pd_op.conv2d: (1x192x120x120xf32) <- (1x192x120x120xf32, 192x192x3x3xf32) - conv2d_45 = paddle._C_ops.conv2d( - swish_35, parameter_99, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_99 - - # pd_op.batch_norm_: (1x192x120x120xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (1x192x120x120xf32, 192xf32, 192xf32, 192xf32, 192xf32) - ( - batch_norm__270, - batch_norm__271, - batch_norm__272, - batch_norm__273, - batch_norm__274, - batch_norm__275, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_45, - parameter_98, - parameter_97, - parameter_96, - parameter_95, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_95, parameter_96, parameter_97, parameter_98 - - # pd_op.swish: (1x192x120x120xf32) <- (1x192x120x120xf32) - swish_36 = paddle._C_ops.swish(batch_norm__270) - - # pd_op.conv2d: (1x192x120x120xf32) <- (1x192x120x120xf32, 192x192x3x3xf32) - conv2d_46 = paddle._C_ops.conv2d( - swish_36, parameter_94, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_94 - - # pd_op.batch_norm_: (1x192x120x120xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (1x192x120x120xf32, 192xf32, 192xf32, 192xf32, 192xf32) - ( - batch_norm__276, - batch_norm__277, - batch_norm__278, - batch_norm__279, - batch_norm__280, - batch_norm__281, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_46, - parameter_93, - parameter_92, - parameter_91, - parameter_90, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_90, parameter_91, parameter_92, parameter_93 - - # pd_op.conv2d: (1x192x120x120xf32) <- (1x192x120x120xf32, 192x192x1x1xf32) - conv2d_47 = paddle._C_ops.conv2d( - swish_36, parameter_89, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_89 - - # pd_op.batch_norm_: (1x192x120x120xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (1x192x120x120xf32, 192xf32, 192xf32, 192xf32, 192xf32) - ( - batch_norm__282, - batch_norm__283, - batch_norm__284, - batch_norm__285, - batch_norm__286, - batch_norm__287, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_47, - parameter_88, - parameter_87, - parameter_86, - parameter_85, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_85, parameter_86, parameter_87, parameter_88 - - # pd_op.add: (1x192x120x120xf32) <- (1x192x120x120xf32, 1x192x120x120xf32) - add_46 = paddle._C_ops.add(batch_norm__276, batch_norm__282) - - # pd_op.swish: (1x192x120x120xf32) <- (1x192x120x120xf32) - swish_37 = paddle._C_ops.swish(add_46) - - # pd_op.conv2d: (1x192x120x120xf32) <- (1x192x120x120xf32, 192x192x3x3xf32) - conv2d_48 = paddle._C_ops.conv2d( - swish_37, parameter_84, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_84 - - # pd_op.batch_norm_: (1x192x120x120xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (1x192x120x120xf32, 192xf32, 192xf32, 192xf32, 192xf32) - ( - batch_norm__288, - batch_norm__289, - batch_norm__290, - batch_norm__291, - batch_norm__292, - batch_norm__293, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_48, - parameter_83, - parameter_82, - parameter_81, - parameter_80, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_80, parameter_81, parameter_82, parameter_83 - - # pd_op.swish: (1x192x120x120xf32) <- (1x192x120x120xf32) - swish_38 = paddle._C_ops.swish(batch_norm__288) - - # pd_op.conv2d: (1x192x120x120xf32) <- (1x192x120x120xf32, 192x192x3x3xf32) - conv2d_49 = paddle._C_ops.conv2d( - swish_38, parameter_79, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_79 - - # pd_op.batch_norm_: (1x192x120x120xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (1x192x120x120xf32, 192xf32, 192xf32, 192xf32, 192xf32) - ( - batch_norm__294, - batch_norm__295, - batch_norm__296, - batch_norm__297, - batch_norm__298, - batch_norm__299, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_49, - parameter_78, - parameter_77, - parameter_76, - parameter_75, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_75, parameter_76, parameter_77, parameter_78 - - # pd_op.conv2d: (1x192x120x120xf32) <- (1x192x120x120xf32, 192x192x1x1xf32) - conv2d_50 = paddle._C_ops.conv2d( - swish_38, parameter_74, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_74 - - # pd_op.batch_norm_: (1x192x120x120xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (1x192x120x120xf32, 192xf32, 192xf32, 192xf32, 192xf32) - ( - batch_norm__300, - batch_norm__301, - batch_norm__302, - batch_norm__303, - batch_norm__304, - batch_norm__305, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_50, - parameter_73, - parameter_72, - parameter_71, - parameter_70, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_70, parameter_71, parameter_72, parameter_73 - - # pd_op.add: (1x192x120x120xf32) <- (1x192x120x120xf32, 1x192x120x120xf32) - add_47 = paddle._C_ops.add(batch_norm__294, batch_norm__300) - - # pd_op.swish: (1x192x120x120xf32) <- (1x192x120x120xf32) - swish_39 = paddle._C_ops.swish(add_47) - - # builtin.combine: ([1x192x120x120xf32, 1x192x120x120xf32]) <- (1x192x120x120xf32, 1x192x120x120xf32) - combine_9 = [swish_32, swish_39] - - # pd_op.concat: (1x384x120x120xf32) <- ([1x192x120x120xf32, 1x192x120x120xf32], 1xi32) - concat_8 = paddle._C_ops.concat(combine_9, full_7) - del combine_9 - - # pd_op.conv2d: (1x384x120x120xf32) <- (1x384x120x120xf32, 384x384x1x1xf32) - conv2d_51 = paddle._C_ops.conv2d( - concat_8, parameter_69, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_69 - - # pd_op.batch_norm_: (1x384x120x120xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (1x384x120x120xf32, 384xf32, 384xf32, 384xf32, 384xf32) - ( - batch_norm__306, - batch_norm__307, - batch_norm__308, - batch_norm__309, - batch_norm__310, - batch_norm__311, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_51, - parameter_68, - parameter_67, - parameter_66, - parameter_65, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_65, parameter_66, parameter_67, parameter_68 - - # pd_op.swish: (1x384x120x120xf32) <- (1x384x120x120xf32) - swish_40 = paddle._C_ops.swish(batch_norm__306) - - # pd_op.conv2d: (1x384x60x60xf32) <- (1x384x120x120xf32, 384x384x3x3xf32) - conv2d_52 = paddle._C_ops.conv2d( - swish_40, parameter_64, [2, 2], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_64 - - # pd_op.batch_norm_: (1x384x60x60xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (1x384x60x60xf32, 384xf32, 384xf32, 384xf32, 384xf32) - ( - batch_norm__312, - batch_norm__313, - batch_norm__314, - batch_norm__315, - batch_norm__316, - batch_norm__317, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_52, - parameter_63, - parameter_62, - parameter_61, - parameter_60, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_60, parameter_61, parameter_62, parameter_63 - - # pd_op.swish: (1x384x60x60xf32) <- (1x384x60x60xf32) - swish_41 = paddle._C_ops.swish(batch_norm__312) - - # builtin.combine: ([1x384x60x60xf32, 1x768x60x60xf32]) <- (1x384x60x60xf32, 1x768x60x60xf32) - combine_10 = [swish_41, swish_10] - - # pd_op.concat: (1x1152x60x60xf32) <- ([1x384x60x60xf32, 1x768x60x60xf32], 1xi32) - concat_9 = paddle._C_ops.concat(combine_10, full_7) - del combine_10 - - # pd_op.conv2d: (1x384x60x60xf32) <- (1x1152x60x60xf32, 384x1152x1x1xf32) - conv2d_53 = paddle._C_ops.conv2d( - concat_9, parameter_59, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_59 - - # pd_op.batch_norm_: (1x384x60x60xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (1x384x60x60xf32, 384xf32, 384xf32, 384xf32, 384xf32) - ( - batch_norm__318, - batch_norm__319, - batch_norm__320, - batch_norm__321, - batch_norm__322, - batch_norm__323, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_53, - parameter_58, - parameter_57, - parameter_56, - parameter_55, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_55, parameter_56, parameter_57, parameter_58 - - # pd_op.swish: (1x384x60x60xf32) <- (1x384x60x60xf32) - swish_42 = paddle._C_ops.swish(batch_norm__318) - - # pd_op.conv2d: (1x384x60x60xf32) <- (1x1152x60x60xf32, 384x1152x1x1xf32) - conv2d_54 = paddle._C_ops.conv2d( - concat_9, parameter_54, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_54 - - # pd_op.batch_norm_: (1x384x60x60xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (1x384x60x60xf32, 384xf32, 384xf32, 384xf32, 384xf32) - ( - batch_norm__324, - batch_norm__325, - batch_norm__326, - batch_norm__327, - batch_norm__328, - batch_norm__329, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_54, - parameter_53, - parameter_52, - parameter_51, - parameter_50, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_50, parameter_51, parameter_52, parameter_53 - - # pd_op.swish: (1x384x60x60xf32) <- (1x384x60x60xf32) - swish_43 = paddle._C_ops.swish(batch_norm__324) - - # pd_op.conv2d: (1x384x60x60xf32) <- (1x384x60x60xf32, 384x384x3x3xf32) - conv2d_55 = paddle._C_ops.conv2d( - swish_43, parameter_49, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_49 - - # pd_op.batch_norm_: (1x384x60x60xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (1x384x60x60xf32, 384xf32, 384xf32, 384xf32, 384xf32) - ( - batch_norm__330, - batch_norm__331, - batch_norm__332, - batch_norm__333, - batch_norm__334, - batch_norm__335, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_55, - parameter_48, - parameter_47, - parameter_46, - parameter_45, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_45, parameter_46, parameter_47, parameter_48 - - # pd_op.swish: (1x384x60x60xf32) <- (1x384x60x60xf32) - swish_44 = paddle._C_ops.swish(batch_norm__330) - - # pd_op.conv2d: (1x384x60x60xf32) <- (1x384x60x60xf32, 384x384x3x3xf32) - conv2d_56 = paddle._C_ops.conv2d( - swish_44, parameter_44, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_44 - - # pd_op.batch_norm_: (1x384x60x60xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (1x384x60x60xf32, 384xf32, 384xf32, 384xf32, 384xf32) - ( - batch_norm__336, - batch_norm__337, - batch_norm__338, - batch_norm__339, - batch_norm__340, - batch_norm__341, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_56, - parameter_43, - parameter_42, - parameter_41, - parameter_40, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_40, parameter_41, parameter_42, parameter_43 - - # pd_op.conv2d: (1x384x60x60xf32) <- (1x384x60x60xf32, 384x384x1x1xf32) - conv2d_57 = paddle._C_ops.conv2d( - swish_44, parameter_39, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_39 - - # pd_op.batch_norm_: (1x384x60x60xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (1x384x60x60xf32, 384xf32, 384xf32, 384xf32, 384xf32) - ( - batch_norm__342, - batch_norm__343, - batch_norm__344, - batch_norm__345, - batch_norm__346, - batch_norm__347, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_57, - parameter_38, - parameter_37, - parameter_36, - parameter_35, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_35, parameter_36, parameter_37, parameter_38 - - # pd_op.add: (1x384x60x60xf32) <- (1x384x60x60xf32, 1x384x60x60xf32) - add_48 = paddle._C_ops.add(batch_norm__336, batch_norm__342) - - # pd_op.swish: (1x384x60x60xf32) <- (1x384x60x60xf32) - swish_45 = paddle._C_ops.swish(add_48) - - # pd_op.conv2d: (1x384x60x60xf32) <- (1x384x60x60xf32, 384x384x3x3xf32) - conv2d_58 = paddle._C_ops.conv2d( - swish_45, parameter_34, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_34 - - # pd_op.batch_norm_: (1x384x60x60xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (1x384x60x60xf32, 384xf32, 384xf32, 384xf32, 384xf32) - ( - batch_norm__348, - batch_norm__349, - batch_norm__350, - batch_norm__351, - batch_norm__352, - batch_norm__353, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_58, - parameter_33, - parameter_32, - parameter_31, - parameter_30, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_30, parameter_31, parameter_32, parameter_33 - - # pd_op.swish: (1x384x60x60xf32) <- (1x384x60x60xf32) - swish_46 = paddle._C_ops.swish(batch_norm__348) - - # pd_op.conv2d: (1x384x60x60xf32) <- (1x384x60x60xf32, 384x384x3x3xf32) - conv2d_59 = paddle._C_ops.conv2d( - swish_46, parameter_29, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_29 - - # pd_op.batch_norm_: (1x384x60x60xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (1x384x60x60xf32, 384xf32, 384xf32, 384xf32, 384xf32) - ( - batch_norm__354, - batch_norm__355, - batch_norm__356, - batch_norm__357, - batch_norm__358, - batch_norm__359, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_59, - parameter_28, - parameter_27, - parameter_26, - parameter_25, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_25, parameter_26, parameter_27, parameter_28 - - # pd_op.conv2d: (1x384x60x60xf32) <- (1x384x60x60xf32, 384x384x1x1xf32) - conv2d_60 = paddle._C_ops.conv2d( - swish_46, parameter_24, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_24 - - # pd_op.batch_norm_: (1x384x60x60xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (1x384x60x60xf32, 384xf32, 384xf32, 384xf32, 384xf32) - ( - batch_norm__360, - batch_norm__361, - batch_norm__362, - batch_norm__363, - batch_norm__364, - batch_norm__365, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_60, - parameter_23, - parameter_22, - parameter_21, - parameter_20, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_20, parameter_21, parameter_22, parameter_23 - - # pd_op.add: (1x384x60x60xf32) <- (1x384x60x60xf32, 1x384x60x60xf32) - add_49 = paddle._C_ops.add(batch_norm__354, batch_norm__360) - - # pd_op.swish: (1x384x60x60xf32) <- (1x384x60x60xf32) - swish_47 = paddle._C_ops.swish(add_49) - - # pd_op.conv2d: (1x384x60x60xf32) <- (1x384x60x60xf32, 384x384x3x3xf32) - conv2d_61 = paddle._C_ops.conv2d( - swish_47, parameter_19, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_19 - - # pd_op.batch_norm_: (1x384x60x60xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (1x384x60x60xf32, 384xf32, 384xf32, 384xf32, 384xf32) - ( - batch_norm__366, - batch_norm__367, - batch_norm__368, - batch_norm__369, - batch_norm__370, - batch_norm__371, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_61, - parameter_18, - parameter_17, - parameter_16, - parameter_15, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_15, parameter_16, parameter_17, parameter_18 - - # pd_op.swish: (1x384x60x60xf32) <- (1x384x60x60xf32) - swish_48 = paddle._C_ops.swish(batch_norm__366) - - # pd_op.conv2d: (1x384x60x60xf32) <- (1x384x60x60xf32, 384x384x3x3xf32) - conv2d_62 = paddle._C_ops.conv2d( - swish_48, parameter_14, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_14 - - # pd_op.batch_norm_: (1x384x60x60xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (1x384x60x60xf32, 384xf32, 384xf32, 384xf32, 384xf32) - ( - batch_norm__372, - batch_norm__373, - batch_norm__374, - batch_norm__375, - batch_norm__376, - batch_norm__377, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_62, - parameter_13, - parameter_12, - parameter_11, - parameter_10, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_10, parameter_11, parameter_12, parameter_13 - - # pd_op.conv2d: (1x384x60x60xf32) <- (1x384x60x60xf32, 384x384x1x1xf32) - conv2d_63 = paddle._C_ops.conv2d( - swish_48, parameter_9, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_9 - - # pd_op.batch_norm_: (1x384x60x60xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (1x384x60x60xf32, 384xf32, 384xf32, 384xf32, 384xf32) - ( - batch_norm__378, - batch_norm__379, - batch_norm__380, - batch_norm__381, - batch_norm__382, - batch_norm__383, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_63, - parameter_8, - parameter_7, - parameter_6, - parameter_5, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_5, parameter_6, parameter_7, parameter_8 - - # pd_op.add: (1x384x60x60xf32) <- (1x384x60x60xf32, 1x384x60x60xf32) - add_50 = paddle._C_ops.add(batch_norm__372, batch_norm__378) - - # pd_op.swish: (1x384x60x60xf32) <- (1x384x60x60xf32) - swish_49 = paddle._C_ops.swish(add_50) - - # builtin.combine: ([1x384x60x60xf32, 1x384x60x60xf32]) <- (1x384x60x60xf32, 1x384x60x60xf32) - combine_11 = [swish_42, swish_49] - - # pd_op.concat: (1x768x60x60xf32) <- ([1x384x60x60xf32, 1x384x60x60xf32], 1xi32) - concat_10 = paddle._C_ops.concat(combine_11, full_7) - del combine_11, full_7 - - # pd_op.conv2d: (1x768x60x60xf32) <- (1x768x60x60xf32, 768x768x1x1xf32) - conv2d_64 = paddle._C_ops.conv2d( - concat_10, parameter_4, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_4 - - # pd_op.batch_norm_: (1x768x60x60xf32, 768xf32, 768xf32, 768xf32, 768xf32, -1xui8) <- (1x768x60x60xf32, 768xf32, 768xf32, 768xf32, 768xf32) - ( - batch_norm__384, - batch_norm__385, - batch_norm__386, - batch_norm__387, - batch_norm__388, - batch_norm__389, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_64, - parameter_3, - parameter_2, - parameter_1, - parameter_0, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_0, parameter_1, parameter_2, parameter_3 - - # pd_op.swish: (1x768x60x60xf32) <- (1x768x60x60xf32) - swish_0 = paddle._C_ops.swish(batch_norm__384) - del ( - add_0, - add_1, - add_10, - add_11, - add_12, - add_14, - add_15, - add_17, - add_18, - add_19, - add_2, - add_20, - add_21, - add_23, - add_24, - add_26, - add_27, - add_28, - add_29, - add_3, - add_30, - add_32, - add_33, - add_35, - add_36, - add_37, - add_38, - add_39, - add_40, - add_41, - add_42, - add_43, - add_44, - add_45, - add_46, - add_47, - add_48, - add_49, - add_5, - add_50, - add_6, - add_8, - add_9, - assign_0, - assign_1, - assign_10, - assign_11, - assign_12, - assign_13, - assign_14, - assign_15, - assign_16, - assign_17, - assign_18, - assign_19, - assign_2, - assign_20, - assign_21, - assign_22, - assign_23, - assign_24, - assign_25, - assign_26, - assign_27, - assign_28, - assign_29, - assign_3, - assign_30, - assign_31, - assign_32, - assign_33, - assign_34, - assign_35, - assign_36, - assign_37, - assign_38, - assign_39, - assign_4, - assign_40, - assign_41, - assign_42, - assign_43, - assign_44, - assign_45, - assign_46, - assign_47, - assign_48, - assign_49, - assign_5, - assign_50, - assign_51, - assign_52, - assign_53, - assign_54, - assign_55, - assign_56, - assign_57, - assign_58, - assign_59, - assign_6, - assign_60, - assign_61, - assign_62, - assign_63, - assign_64, - assign_65, - assign_66, - assign_67, - assign_68, - assign_69, - assign_7, - assign_70, - assign_71, - assign_72, - assign_8, - assign_9, - batch_norm__0, - batch_norm__1, - batch_norm__10, - batch_norm__100, - batch_norm__101, - batch_norm__102, - batch_norm__103, - batch_norm__104, - batch_norm__105, - batch_norm__106, - batch_norm__107, - batch_norm__108, - batch_norm__109, - batch_norm__11, - batch_norm__110, - batch_norm__111, - batch_norm__112, - batch_norm__113, - batch_norm__114, - batch_norm__115, - batch_norm__116, - batch_norm__117, - batch_norm__118, - batch_norm__119, - batch_norm__12, - batch_norm__120, - batch_norm__121, - batch_norm__122, - batch_norm__123, - batch_norm__124, - batch_norm__125, - batch_norm__126, - batch_norm__127, - batch_norm__128, - batch_norm__129, - batch_norm__13, - batch_norm__130, - batch_norm__131, - batch_norm__132, - batch_norm__133, - batch_norm__134, - batch_norm__135, - batch_norm__136, - batch_norm__137, - batch_norm__138, - batch_norm__139, - batch_norm__14, - batch_norm__140, - batch_norm__141, - batch_norm__142, - batch_norm__143, - batch_norm__144, - batch_norm__145, - batch_norm__146, - batch_norm__147, - batch_norm__148, - batch_norm__149, - batch_norm__15, - batch_norm__150, - batch_norm__151, - batch_norm__152, - batch_norm__153, - batch_norm__154, - batch_norm__155, - batch_norm__156, - batch_norm__157, - batch_norm__158, - batch_norm__159, - batch_norm__16, - batch_norm__160, - batch_norm__161, - batch_norm__162, - batch_norm__163, - batch_norm__164, - batch_norm__165, - batch_norm__166, - batch_norm__167, - batch_norm__168, - batch_norm__169, - batch_norm__17, - batch_norm__170, - batch_norm__171, - batch_norm__172, - batch_norm__173, - batch_norm__174, - batch_norm__175, - batch_norm__176, - batch_norm__177, - batch_norm__178, - batch_norm__179, - batch_norm__18, - batch_norm__180, - batch_norm__181, - batch_norm__182, - batch_norm__183, - batch_norm__184, - batch_norm__185, - batch_norm__186, - batch_norm__187, - batch_norm__188, - batch_norm__189, - batch_norm__19, - batch_norm__190, - batch_norm__191, - batch_norm__192, - batch_norm__193, - batch_norm__194, - batch_norm__195, - batch_norm__196, - batch_norm__197, - batch_norm__198, - batch_norm__199, - batch_norm__2, - batch_norm__20, - batch_norm__200, - batch_norm__201, - batch_norm__202, - batch_norm__203, - batch_norm__204, - batch_norm__205, - batch_norm__206, - batch_norm__207, - batch_norm__208, - batch_norm__209, - batch_norm__21, - batch_norm__210, - batch_norm__211, - batch_norm__212, - batch_norm__213, - batch_norm__214, - batch_norm__215, - batch_norm__216, - batch_norm__217, - batch_norm__218, - batch_norm__219, - batch_norm__22, - batch_norm__220, - batch_norm__221, - batch_norm__222, - batch_norm__223, - batch_norm__224, - batch_norm__225, - batch_norm__226, - batch_norm__227, - batch_norm__228, - batch_norm__229, - batch_norm__23, - batch_norm__230, - batch_norm__231, - batch_norm__232, - batch_norm__233, - batch_norm__234, - batch_norm__235, - batch_norm__236, - batch_norm__237, - batch_norm__238, - batch_norm__239, - batch_norm__24, - batch_norm__240, - batch_norm__241, - batch_norm__242, - batch_norm__243, - batch_norm__244, - batch_norm__245, - batch_norm__246, - batch_norm__247, - batch_norm__248, - batch_norm__249, - batch_norm__25, - batch_norm__250, - batch_norm__251, - batch_norm__252, - batch_norm__253, - batch_norm__254, - batch_norm__255, - batch_norm__256, - batch_norm__257, - batch_norm__258, - batch_norm__259, - batch_norm__26, - batch_norm__260, - batch_norm__261, - batch_norm__262, - batch_norm__263, - batch_norm__264, - batch_norm__265, - batch_norm__266, - batch_norm__267, - batch_norm__268, - batch_norm__269, - batch_norm__27, - batch_norm__270, - batch_norm__271, - batch_norm__272, - batch_norm__273, - batch_norm__274, - batch_norm__275, - batch_norm__276, - batch_norm__277, - batch_norm__278, - batch_norm__279, - batch_norm__28, - batch_norm__280, - batch_norm__281, - batch_norm__282, - batch_norm__283, - batch_norm__284, - batch_norm__285, - batch_norm__286, - batch_norm__287, - batch_norm__288, - batch_norm__289, - batch_norm__29, - batch_norm__290, - batch_norm__291, - batch_norm__292, - batch_norm__293, - batch_norm__294, - batch_norm__295, - batch_norm__296, - batch_norm__297, - batch_norm__298, - batch_norm__299, - batch_norm__3, - batch_norm__30, - batch_norm__300, - batch_norm__301, - batch_norm__302, - batch_norm__303, - batch_norm__304, - batch_norm__305, - batch_norm__306, - batch_norm__307, - batch_norm__308, - batch_norm__309, - batch_norm__31, - batch_norm__310, - batch_norm__311, - batch_norm__312, - batch_norm__313, - batch_norm__314, - batch_norm__315, - batch_norm__316, - batch_norm__317, - batch_norm__318, - batch_norm__319, - batch_norm__32, - batch_norm__320, - batch_norm__321, - batch_norm__322, - batch_norm__323, - batch_norm__324, - batch_norm__325, - batch_norm__326, - batch_norm__327, - batch_norm__328, - batch_norm__329, - batch_norm__33, - batch_norm__330, - batch_norm__331, - batch_norm__332, - batch_norm__333, - batch_norm__334, - batch_norm__335, - batch_norm__336, - batch_norm__337, - batch_norm__338, - batch_norm__339, - batch_norm__34, - batch_norm__340, - batch_norm__341, - batch_norm__342, - batch_norm__343, - batch_norm__344, - batch_norm__345, - batch_norm__346, - batch_norm__347, - batch_norm__348, - batch_norm__349, - batch_norm__35, - batch_norm__350, - batch_norm__351, - batch_norm__352, - batch_norm__353, - batch_norm__354, - batch_norm__355, - batch_norm__356, - batch_norm__357, - batch_norm__358, - batch_norm__359, - batch_norm__36, - batch_norm__360, - batch_norm__361, - batch_norm__362, - batch_norm__363, - batch_norm__364, - batch_norm__365, - batch_norm__366, - batch_norm__367, - batch_norm__368, - batch_norm__369, - batch_norm__37, - batch_norm__370, - batch_norm__371, - batch_norm__372, - batch_norm__373, - batch_norm__374, - batch_norm__375, - batch_norm__376, - batch_norm__377, - batch_norm__378, - batch_norm__379, - batch_norm__38, - batch_norm__380, - batch_norm__381, - batch_norm__382, - batch_norm__383, - batch_norm__384, - batch_norm__385, - batch_norm__386, - batch_norm__387, - batch_norm__388, - batch_norm__389, - batch_norm__39, - batch_norm__4, - batch_norm__40, - batch_norm__41, - batch_norm__42, - batch_norm__43, - batch_norm__44, - batch_norm__45, - batch_norm__46, - batch_norm__47, - batch_norm__48, - batch_norm__49, - batch_norm__5, - batch_norm__50, - batch_norm__51, - batch_norm__52, - batch_norm__53, - batch_norm__54, - batch_norm__55, - batch_norm__56, - batch_norm__57, - batch_norm__58, - batch_norm__59, - batch_norm__6, - batch_norm__60, - batch_norm__61, - batch_norm__62, - batch_norm__63, - batch_norm__64, - batch_norm__65, - batch_norm__66, - batch_norm__67, - batch_norm__68, - batch_norm__69, - batch_norm__7, - batch_norm__70, - batch_norm__71, - batch_norm__72, - batch_norm__73, - batch_norm__74, - batch_norm__75, - batch_norm__76, - batch_norm__77, - batch_norm__78, - batch_norm__79, - batch_norm__8, - batch_norm__80, - batch_norm__81, - batch_norm__82, - batch_norm__83, - batch_norm__84, - batch_norm__85, - batch_norm__86, - batch_norm__87, - batch_norm__88, - batch_norm__89, - batch_norm__9, - batch_norm__90, - batch_norm__91, - batch_norm__92, - batch_norm__93, - batch_norm__94, - batch_norm__95, - batch_norm__96, - batch_norm__97, - batch_norm__98, - batch_norm__99, - concat_1, - concat_10, - concat_2, - concat_3, - concat_4, - concat_5, - concat_6, - concat_7, - concat_8, - concat_9, - conv2d_0, - conv2d_1, - conv2d_10, - conv2d_11, - conv2d_12, - conv2d_13, - conv2d_14, - conv2d_15, - conv2d_16, - conv2d_17, - conv2d_18, - conv2d_19, - conv2d_2, - conv2d_20, - conv2d_21, - conv2d_22, - conv2d_23, - conv2d_24, - conv2d_25, - conv2d_26, - conv2d_27, - conv2d_28, - conv2d_29, - conv2d_3, - conv2d_30, - conv2d_31, - conv2d_32, - conv2d_33, - conv2d_34, - conv2d_35, - conv2d_36, - conv2d_37, - conv2d_38, - conv2d_39, - conv2d_4, - conv2d_40, - conv2d_41, - conv2d_42, - conv2d_43, - conv2d_44, - conv2d_45, - conv2d_46, - conv2d_47, - conv2d_48, - conv2d_49, - conv2d_5, - conv2d_50, - conv2d_51, - conv2d_52, - conv2d_53, - conv2d_54, - conv2d_55, - conv2d_56, - conv2d_57, - conv2d_58, - conv2d_59, - conv2d_6, - conv2d_60, - conv2d_61, - conv2d_62, - conv2d_63, - conv2d_64, - conv2d_7, - conv2d_8, - conv2d_9, - dropout_0, - dropout_1, - dropout_10, - dropout_11, - dropout_12, - dropout_13, - dropout_14, - dropout_15, - dropout_16, - dropout_17, - dropout_18, - dropout_19, - dropout_2, - dropout_20, - dropout_21, - dropout_22, - dropout_23, - dropout_24, - dropout_25, - dropout_26, - dropout_27, - dropout_28, - dropout_29, - dropout_3, - dropout_30, - dropout_31, - dropout_4, - dropout_5, - dropout_6, - dropout_7, - dropout_8, - dropout_9, - full_8, - full_9, - full_int_array_10, - full_int_array_2, - full_int_array_4, - full_int_array_5, - full_int_array_8, - full_int_array_9, - layer_norm_0, - layer_norm_1, - layer_norm_10, - layer_norm_11, - layer_norm_12, - layer_norm_13, - layer_norm_14, - layer_norm_15, - layer_norm_16, - layer_norm_17, - layer_norm_18, - layer_norm_19, - layer_norm_2, - layer_norm_20, - layer_norm_22, - layer_norm_23, - layer_norm_3, - layer_norm_4, - layer_norm_5, - layer_norm_6, - layer_norm_7, - layer_norm_8, - layer_norm_9, - matmul_10, - matmul_11, - matmul_12, - matmul_15, - matmul_16, - matmul_17, - matmul_18, - matmul_19, - matmul_2, - matmul_20, - matmul_23, - matmul_24, - matmul_25, - matmul_26, - matmul_27, - matmul_28, - matmul_3, - matmul_31, - matmul_32, - matmul_33, - matmul_4, - matmul_7, - matmul_8, - matmul_9, - nearest_interp_0, - nearest_interp_1, - pool2d_0, - pool2d_1, - pool2d_2, - reshape_11, - reshape_15, - reshape_16, - reshape_3, - reshape_7, - slice_0, - slice_1, - slice_10, - slice_11, - slice_12, - slice_13, - slice_14, - slice_15, - slice_16, - slice_17, - slice_18, - slice_19, - slice_2, - slice_20, - slice_21, - slice_22, - slice_23, - slice_3, - slice_4, - slice_5, - slice_6, - slice_7, - slice_8, - slice_9, - softmax_0, - softmax_1, - softmax_2, - softmax_3, - swish_1, - swish_10, - swish_11, - swish_12, - swish_13, - swish_14, - swish_15, - swish_16, - swish_17, - swish_18, - swish_19, - swish_2, - swish_20, - swish_21, - swish_22, - swish_23, - swish_24, - swish_25, - swish_26, - swish_27, - swish_28, - swish_29, - swish_3, - swish_30, - swish_31, - swish_32, - swish_33, - swish_34, - swish_35, - swish_36, - swish_37, - swish_38, - swish_39, - swish_4, - swish_40, - swish_41, - swish_42, - swish_43, - swish_44, - swish_45, - swish_46, - swish_47, - swish_48, - swish_49, - swish_5, - swish_6, - swish_7, - swish_8, - swish_9, - transpose_0, - transpose_1, - transpose_10, - transpose_11, - transpose_12, - transpose_13, - transpose_14, - transpose_15, - transpose_16, - transpose_17, - transpose_2, - transpose_3, - transpose_4, - transpose_5, - transpose_6, - transpose_7, - transpose_8, - transpose_9, - unsqueeze_3, - ) + # pd_op.greater_than: (xb) <- (xf32, xf32) + greater_than_0 = paddle._C_ops.greater_than(max_0, full_13) + del divide_0, full_13, max_0, multiply_1, multiply_5, sum_2, unsqueeze_2 - return swish_0 + return greater_than_0 diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_2/weight_meta.py b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_2/weight_meta.py index bd82badb0..8b1378917 100644 --- a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_2/weight_meta.py +++ b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_2/weight_meta.py @@ -1,4013 +1 @@ -class Program_weight_tensor_parameter_0: - name = "parameter_0" - shape = [768] - dtype = "float32" - min_val = float("-0.175875") - max_val = float("0.210823") - mean = float("0.0834695") - std = float("0.0566098") - data = None - -class Program_weight_tensor_parameter_1: - name = "parameter_1" - shape = [768] - dtype = "float32" - min_val = float("0.939895") - max_val = float("1.29826") - mean = float("1.06397") - std = float("0.0312259") - data = None - - -class Program_weight_tensor_parameter_2: - name = "parameter_2" - shape = [768] - dtype = "float32" - min_val = float("0.00114665") - max_val = float("0.0503602") - mean = float("0.00766089") - std = float("0.00465424") - data = None - - -class Program_weight_tensor_parameter_3: - name = "parameter_3" - shape = [768] - dtype = "float32" - min_val = float("-0.134835") - max_val = float("0.0565135") - mean = float("-0.0288952") - std = float("0.0290584") - data = None - - -class Program_weight_tensor_parameter_4: - name = "parameter_4" - shape = [768, 768, 1, 1] - dtype = "float32" - min_val = float("-0.0548133") - max_val = float("0.0388088") - mean = float("-0.000154659") - std = float("0.00249634") - data = None - - -class Program_weight_tensor_parameter_5: - name = "parameter_5" - shape = [384] - dtype = "float32" - min_val = float("-0.14169") - max_val = float("0.0305817") - mean = float("-0.0188052") - std = float("0.0234504") - data = None - - -class Program_weight_tensor_parameter_6: - name = "parameter_6" - shape = [384] - dtype = "float32" - min_val = float("0.945748") - max_val = float("1.04442") - mean = float("0.98666") - std = float("0.0105852") - data = None - - -class Program_weight_tensor_parameter_7: - name = "parameter_7" - shape = [384] - dtype = "float32" - min_val = float("0.000803951") - max_val = float("0.0187689") - mean = float("0.00492345") - std = float("0.00343028") - data = None - - -class Program_weight_tensor_parameter_8: - name = "parameter_8" - shape = [384] - dtype = "float32" - min_val = float("-0.0551849") - max_val = float("0.062912") - mean = float("0.00285491") - std = float("0.0223164") - data = None - - -class Program_weight_tensor_parameter_9: - name = "parameter_9" - shape = [384, 384, 1, 1] - dtype = "float32" - min_val = float("-0.0299323") - max_val = float("0.020664") - mean = float("2.29275e-05") - std = float("0.00192338") - data = None - - -class Program_weight_tensor_parameter_10: - name = "parameter_10" - shape = [384] - dtype = "float32" - min_val = float("-0.14169") - max_val = float("0.0305817") - mean = float("-0.0188052") - std = float("0.0234504") - data = None - - -class Program_weight_tensor_parameter_11: - name = "parameter_11" - shape = [384] - dtype = "float32" - min_val = float("0.968039") - max_val = float("1.13059") - mean = float("1.01544") - std = float("0.0171846") - data = None - - -class Program_weight_tensor_parameter_12: - name = "parameter_12" - shape = [384] - dtype = "float32" - min_val = float("0.00197981") - max_val = float("0.0503469") - mean = float("0.00764845") - std = float("0.00453187") - data = None - - -class Program_weight_tensor_parameter_13: - name = "parameter_13" - shape = [384] - dtype = "float32" - min_val = float("-0.202706") - max_val = float("0.152191") - mean = float("-0.0431393") - std = float("0.0362812") - data = None - - -class Program_weight_tensor_parameter_14: - name = "parameter_14" - shape = [384, 384, 3, 3] - dtype = "float32" - min_val = float("-0.029908") - max_val = float("0.035511") - mean = float("-7.29869e-05") - std = float("0.00131195") - data = None - - -class Program_weight_tensor_parameter_15: - name = "parameter_15" - shape = [384] - dtype = "float32" - min_val = float("-0.170219") - max_val = float("0.0209993") - mean = float("-0.0348873") - std = float("0.0279313") - data = None - - -class Program_weight_tensor_parameter_16: - name = "parameter_16" - shape = [384] - dtype = "float32" - min_val = float("0.975222") - max_val = float("1.12587") - mean = float("1.015") - std = float("0.0240805") - data = None - - -class Program_weight_tensor_parameter_17: - name = "parameter_17" - shape = [384] - dtype = "float32" - min_val = float("0.00530043") - max_val = float("0.186183") - mean = float("0.0215775") - std = float("0.0159645") - data = None - - -class Program_weight_tensor_parameter_18: - name = "parameter_18" - shape = [384] - dtype = "float32" - min_val = float("-0.265254") - max_val = float("0.415528") - mean = float("-0.0379328") - std = float("0.0510841") - data = None - - -class Program_weight_tensor_parameter_19: - name = "parameter_19" - shape = [384, 384, 3, 3] - dtype = "float32" - min_val = float("-0.0331338") - max_val = float("0.0530854") - mean = float("-6.31513e-05") - std = float("0.00148047") - data = None - - -class Program_weight_tensor_parameter_20: - name = "parameter_20" - shape = [384] - dtype = "float32" - min_val = float("-0.105219") - max_val = float("0.0129843") - mean = float("-0.0358029") - std = float("0.0193236") - data = None - - -class Program_weight_tensor_parameter_21: - name = "parameter_21" - shape = [384] - dtype = "float32" - min_val = float("0.945357") - max_val = float("1.04501") - mean = float("0.988631") - std = float("0.00984229") - data = None - - -class Program_weight_tensor_parameter_22: - name = "parameter_22" - shape = [384] - dtype = "float32" - min_val = float("0.000690331") - max_val = float("0.0185684") - mean = float("0.00513745") - std = float("0.00318726") - data = None - - -class Program_weight_tensor_parameter_23: - name = "parameter_23" - shape = [384] - dtype = "float32" - min_val = float("-0.0849168") - max_val = float("0.0438217") - mean = float("-0.00259504") - std = float("0.017162") - data = None - - -class Program_weight_tensor_parameter_24: - name = "parameter_24" - shape = [384, 384, 1, 1] - dtype = "float32" - min_val = float("-0.0267959") - max_val = float("0.025491") - mean = float("-5.37283e-05") - std = float("0.00203271") - data = None - - -class Program_weight_tensor_parameter_25: - name = "parameter_25" - shape = [384] - dtype = "float32" - min_val = float("-0.105219") - max_val = float("0.0129843") - mean = float("-0.0358029") - std = float("0.0193236") - data = None - - -class Program_weight_tensor_parameter_26: - name = "parameter_26" - shape = [384] - dtype = "float32" - min_val = float("0.959852") - max_val = float("1.10509") - mean = float("1.01609") - std = float("0.0177564") - data = None - - -class Program_weight_tensor_parameter_27: - name = "parameter_27" - shape = [384] - dtype = "float32" - min_val = float("0.00248164") - max_val = float("0.0491499") - mean = float("0.00954645") - std = float("0.00479825") - data = None - - -class Program_weight_tensor_parameter_28: - name = "parameter_28" - shape = [384] - dtype = "float32" - min_val = float("-0.215332") - max_val = float("0.320794") - mean = float("-0.0502206") - std = float("0.044921") - data = None - - -class Program_weight_tensor_parameter_29: - name = "parameter_29" - shape = [384, 384, 3, 3] - dtype = "float32" - min_val = float("-0.0363929") - max_val = float("0.0514823") - mean = float("-8.4193e-05") - std = float("0.00132563") - data = None - - -class Program_weight_tensor_parameter_30: - name = "parameter_30" - shape = [384] - dtype = "float32" - min_val = float("-0.0896627") - max_val = float("0.0192839") - mean = float("-0.0360783") - std = float("0.0194692") - data = None - - -class Program_weight_tensor_parameter_31: - name = "parameter_31" - shape = [384] - dtype = "float32" - min_val = float("0.933291") - max_val = float("1.11466") - mean = float("1.01167") - std = float("0.026589") - data = None - - -class Program_weight_tensor_parameter_32: - name = "parameter_32" - shape = [384] - dtype = "float32" - min_val = float("0.00571017") - max_val = float("0.0668329") - mean = float("0.0190313") - std = float("0.00954406") - data = None - - -class Program_weight_tensor_parameter_33: - name = "parameter_33" - shape = [384] - dtype = "float32" - min_val = float("-0.241704") - max_val = float("0.126745") - mean = float("-0.0297335") - std = float("0.0569594") - data = None - - -class Program_weight_tensor_parameter_34: - name = "parameter_34" - shape = [384, 384, 3, 3] - dtype = "float32" - min_val = float("-0.0397047") - max_val = float("0.0499731") - mean = float("-5.43156e-05") - std = float("0.00151173") - data = None - - -class Program_weight_tensor_parameter_35: - name = "parameter_35" - shape = [384] - dtype = "float32" - min_val = float("-0.116341") - max_val = float("0.0161185") - mean = float("-0.0373639") - std = float("0.0201507") - data = None - - -class Program_weight_tensor_parameter_36: - name = "parameter_36" - shape = [384] - dtype = "float32" - min_val = float("0.929383") - max_val = float("1.02791") - mean = float("0.98704") - std = float("0.0110296") - data = None - - -class Program_weight_tensor_parameter_37: - name = "parameter_37" - shape = [384] - dtype = "float32" - min_val = float("0.00125121") - max_val = float("0.0114154") - mean = float("0.00429573") - std = float("0.00176768") - data = None - - -class Program_weight_tensor_parameter_38: - name = "parameter_38" - shape = [384] - dtype = "float32" - min_val = float("-0.0558903") - max_val = float("0.0353347") - mean = float("-0.00854145") - std = float("0.0134779") - data = None - - -class Program_weight_tensor_parameter_39: - name = "parameter_39" - shape = [384, 384, 1, 1] - dtype = "float32" - min_val = float("-0.0386337") - max_val = float("0.028212") - mean = float("-0.000152706") - std = float("0.00204597") - data = None - - -class Program_weight_tensor_parameter_40: - name = "parameter_40" - shape = [384] - dtype = "float32" - min_val = float("-0.116341") - max_val = float("0.0161185") - mean = float("-0.0373639") - std = float("0.0201507") - data = None - - -class Program_weight_tensor_parameter_41: - name = "parameter_41" - shape = [384] - dtype = "float32" - min_val = float("0.981354") - max_val = float("1.10683") - mean = float("1.01834") - std = float("0.0222205") - data = None - - -class Program_weight_tensor_parameter_42: - name = "parameter_42" - shape = [384] - dtype = "float32" - min_val = float("0.00487818") - max_val = float("0.0360324") - mean = float("0.0114429") - std = float("0.00469479") - data = None - - -class Program_weight_tensor_parameter_43: - name = "parameter_43" - shape = [384] - dtype = "float32" - min_val = float("-0.191838") - max_val = float("0.0902951") - mean = float("-0.0270964") - std = float("0.0352644") - data = None - - -class Program_weight_tensor_parameter_44: - name = "parameter_44" - shape = [384, 384, 3, 3] - dtype = "float32" - min_val = float("-0.0360859") - max_val = float("0.0633791") - mean = float("-4.66789e-05") - std = float("0.00138059") - data = None - - -class Program_weight_tensor_parameter_45: - name = "parameter_45" - shape = [384] - dtype = "float32" - min_val = float("-0.107113") - max_val = float("0.0239382") - mean = float("-0.0375215") - std = float("0.0214567") - data = None - - -class Program_weight_tensor_parameter_46: - name = "parameter_46" - shape = [384] - dtype = "float32" - min_val = float("0.944795") - max_val = float("1.11465") - mean = float("1.01186") - std = float("0.0277861") - data = None - - -class Program_weight_tensor_parameter_47: - name = "parameter_47" - shape = [384] - dtype = "float32" - min_val = float("0.00539047") - max_val = float("0.0596139") - mean = float("0.0152076") - std = float("0.0073805") - data = None - - -class Program_weight_tensor_parameter_48: - name = "parameter_48" - shape = [384] - dtype = "float32" - min_val = float("-0.154913") - max_val = float("0.125914") - mean = float("-0.0486592") - std = float("0.0509119") - data = None - - -class Program_weight_tensor_parameter_49: - name = "parameter_49" - shape = [384, 384, 3, 3] - dtype = "float32" - min_val = float("-0.0279281") - max_val = float("0.0439271") - mean = float("-7.87755e-05") - std = float("0.00153817") - data = None - - -class Program_weight_tensor_parameter_50: - name = "parameter_50" - shape = [384] - dtype = "float32" - min_val = float("-0.10674") - max_val = float("0.046738") - mean = float("-0.026306") - std = float("0.0154157") - data = None - - -class Program_weight_tensor_parameter_51: - name = "parameter_51" - shape = [384] - dtype = "float32" - min_val = float("0.973756") - max_val = float("1.08653") - mean = float("1.00903") - std = float("0.0171142") - data = None - - -class Program_weight_tensor_parameter_52: - name = "parameter_52" - shape = [384] - dtype = "float32" - min_val = float("0.00231428") - max_val = float("0.0166757") - mean = float("0.00532256") - std = float("0.00190646") - data = None - - -class Program_weight_tensor_parameter_53: - name = "parameter_53" - shape = [384] - dtype = "float32" - min_val = float("-0.10048") - max_val = float("0.0869698") - mean = float("-0.0197301") - std = float("0.0269629") - data = None - - -class Program_weight_tensor_parameter_54: - name = "parameter_54" - shape = [384, 1152, 1, 1] - dtype = "float32" - min_val = float("-0.0619005") - max_val = float("0.0744809") - mean = float("-8.91799e-05") - std = float("0.00230778") - data = None - - -class Program_weight_tensor_parameter_55: - name = "parameter_55" - shape = [384] - dtype = "float32" - min_val = float("-0.0424904") - max_val = float("0.0160654") - mean = float("-0.00899509") - std = float("0.00840798") - data = None - - -class Program_weight_tensor_parameter_56: - name = "parameter_56" - shape = [384] - dtype = "float32" - min_val = float("0.959519") - max_val = float("1.05137") - mean = float("1.00788") - std = float("0.0115961") - data = None - - -class Program_weight_tensor_parameter_57: - name = "parameter_57" - shape = [384] - dtype = "float32" - min_val = float("0.00124549") - max_val = float("0.0304895") - mean = float("0.00442696") - std = float("0.00213864") - data = None - - -class Program_weight_tensor_parameter_58: - name = "parameter_58" - shape = [384] - dtype = "float32" - min_val = float("-0.110999") - max_val = float("0.0924363") - mean = float("-0.0236762") - std = float("0.0234971") - data = None - - -class Program_weight_tensor_parameter_59: - name = "parameter_59" - shape = [384, 1152, 1, 1] - dtype = "float32" - min_val = float("-0.0245473") - max_val = float("0.0425909") - mean = float("-0.000112646") - std = float("0.00208633") - data = None - - -class Program_weight_tensor_parameter_60: - name = "parameter_60" - shape = [384] - dtype = "float32" - min_val = float("-0.0529748") - max_val = float("0.0059538") - mean = float("-0.0166275") - std = float("0.00987957") - data = None - - -class Program_weight_tensor_parameter_61: - name = "parameter_61" - shape = [384] - dtype = "float32" - min_val = float("0.988678") - max_val = float("1.10388") - mean = float("1.01957") - std = float("0.0168754") - data = None - - -class Program_weight_tensor_parameter_62: - name = "parameter_62" - shape = [384] - dtype = "float32" - min_val = float("0.00468338") - max_val = float("0.0641493") - mean = float("0.0144074") - std = float("0.0082444") - data = None - - -class Program_weight_tensor_parameter_63: - name = "parameter_63" - shape = [384] - dtype = "float32" - min_val = float("-0.44327") - max_val = float("0.19537") - mean = float("-0.0473944") - std = float("0.0713197") - data = None - - -class Program_weight_tensor_parameter_64: - name = "parameter_64" - shape = [384, 384, 3, 3] - dtype = "float32" - min_val = float("-0.0212973") - max_val = float("0.0335283") - mean = float("-3.20311e-05") - std = float("0.00117985") - data = None - - -class Program_weight_tensor_parameter_65: - name = "parameter_65" - shape = [384] - dtype = "float32" - min_val = float("-0.222314") - max_val = float("0.492622") - mean = float("0.217344") - std = float("0.124262") - data = None - - -class Program_weight_tensor_parameter_66: - name = "parameter_66" - shape = [384] - dtype = "float32" - min_val = float("0.919259") - max_val = float("1.4834") - mean = float("1.14101") - std = float("0.0738465") - data = None - - -class Program_weight_tensor_parameter_67: - name = "parameter_67" - shape = [384] - dtype = "float32" - min_val = float("0.00374913") - max_val = float("0.0753866") - mean = float("0.0117258") - std = float("0.00578106") - data = None - - -class Program_weight_tensor_parameter_68: - name = "parameter_68" - shape = [384] - dtype = "float32" - min_val = float("-0.129657") - max_val = float("0.0597492") - mean = float("-0.037386") - std = float("0.0303036") - data = None - - -class Program_weight_tensor_parameter_69: - name = "parameter_69" - shape = [384, 384, 1, 1] - dtype = "float32" - min_val = float("-0.0788092") - max_val = float("0.0718385") - mean = float("-0.00042023") - std = float("0.00505347") - data = None - - -class Program_weight_tensor_parameter_70: - name = "parameter_70" - shape = [192] - dtype = "float32" - min_val = float("-0.165903") - max_val = float("0.0468638") - mean = float("-0.0248091") - std = float("0.0394948") - data = None - - -class Program_weight_tensor_parameter_71: - name = "parameter_71" - shape = [192] - dtype = "float32" - min_val = float("0.841187") - max_val = float("1.05089") - mean = float("0.972721") - std = float("0.0237726") - data = None - - -class Program_weight_tensor_parameter_72: - name = "parameter_72" - shape = [192] - dtype = "float32" - min_val = float("0.00140171") - max_val = float("0.0211185") - mean = float("0.00615762") - std = float("0.00390072") - data = None - - -class Program_weight_tensor_parameter_73: - name = "parameter_73" - shape = [192] - dtype = "float32" - min_val = float("-0.0638207") - max_val = float("0.0926642") - mean = float("-0.00576702") - std = float("0.0201979") - data = None - - -class Program_weight_tensor_parameter_74: - name = "parameter_74" - shape = [192, 192, 1, 1] - dtype = "float32" - min_val = float("-0.0496362") - max_val = float("0.0295849") - mean = float("-0.000179203") - std = float("0.00381061") - data = None - - -class Program_weight_tensor_parameter_75: - name = "parameter_75" - shape = [192] - dtype = "float32" - min_val = float("-0.165903") - max_val = float("0.0468638") - mean = float("-0.0248091") - std = float("0.0394948") - data = None - - -class Program_weight_tensor_parameter_76: - name = "parameter_76" - shape = [192] - dtype = "float32" - min_val = float("0.729841") - max_val = float("1.12263") - mean = float("1.02218") - std = float("0.0372419") - data = None - - -class Program_weight_tensor_parameter_77: - name = "parameter_77" - shape = [192] - dtype = "float32" - min_val = float("0.00540078") - max_val = float("0.0575204") - mean = float("0.0136539") - std = float("0.00636345") - data = None - - -class Program_weight_tensor_parameter_78: - name = "parameter_78" - shape = [192] - dtype = "float32" - min_val = float("-0.219646") - max_val = float("0.101609") - mean = float("-0.037761") - std = float("0.0435659") - data = None - - -class Program_weight_tensor_parameter_79: - name = "parameter_79" - shape = [192, 192, 3, 3] - dtype = "float32" - min_val = float("-0.0430374") - max_val = float("0.0495163") - mean = float("-0.000124453") - std = float("0.00256786") - data = None - - -class Program_weight_tensor_parameter_80: - name = "parameter_80" - shape = [192] - dtype = "float32" - min_val = float("-0.191344") - max_val = float("0.0444996") - mean = float("-0.057942") - std = float("0.0491063") - data = None - - -class Program_weight_tensor_parameter_81: - name = "parameter_81" - shape = [192] - dtype = "float32" - min_val = float("0.897737") - max_val = float("1.18792") - mean = float("1.01539") - std = float("0.0484046") - data = None - - -class Program_weight_tensor_parameter_82: - name = "parameter_82" - shape = [192] - dtype = "float32" - min_val = float("0.010406") - max_val = float("0.203638") - mean = float("0.0350038") - std = float("0.0227289") - data = None - - -class Program_weight_tensor_parameter_83: - name = "parameter_83" - shape = [192] - dtype = "float32" - min_val = float("-0.296379") - max_val = float("0.516951") - mean = float("-0.0407173") - std = float("0.0634974") - data = None - - -class Program_weight_tensor_parameter_84: - name = "parameter_84" - shape = [192, 192, 3, 3] - dtype = "float32" - min_val = float("-0.0473781") - max_val = float("0.0557186") - mean = float("-0.00011045") - std = float("0.00285571") - data = None - - -class Program_weight_tensor_parameter_85: - name = "parameter_85" - shape = [192] - dtype = "float32" - min_val = float("-0.191632") - max_val = float("0.00854023") - mean = float("-0.064207") - std = float("0.0334262") - data = None - - -class Program_weight_tensor_parameter_86: - name = "parameter_86" - shape = [192] - dtype = "float32" - min_val = float("0.922153") - max_val = float("1.04653") - mean = float("0.973445") - std = float("0.017956") - data = None - - -class Program_weight_tensor_parameter_87: - name = "parameter_87" - shape = [192] - dtype = "float32" - min_val = float("0.00109672") - max_val = float("0.0152916") - mean = float("0.00524439") - std = float("0.00262094") - data = None - - -class Program_weight_tensor_parameter_88: - name = "parameter_88" - shape = [192] - dtype = "float32" - min_val = float("-0.0707542") - max_val = float("0.0365569") - mean = float("-0.00798934") - std = float("0.0151914") - data = None - - -class Program_weight_tensor_parameter_89: - name = "parameter_89" - shape = [192, 192, 1, 1] - dtype = "float32" - min_val = float("-0.0386532") - max_val = float("0.0308154") - mean = float("-0.000343292") - std = float("0.00384278") - data = None - - -class Program_weight_tensor_parameter_90: - name = "parameter_90" - shape = [192] - dtype = "float32" - min_val = float("-0.191632") - max_val = float("0.00854023") - mean = float("-0.064207") - std = float("0.0334262") - data = None - - -class Program_weight_tensor_parameter_91: - name = "parameter_91" - shape = [192] - dtype = "float32" - min_val = float("0.968104") - max_val = float("1.14778") - mean = float("1.02415") - std = float("0.0294364") - data = None - - -class Program_weight_tensor_parameter_92: - name = "parameter_92" - shape = [192] - dtype = "float32" - min_val = float("0.00424155") - max_val = float("0.04597") - mean = float("0.0118971") - std = float("0.00620602") - data = None - - -class Program_weight_tensor_parameter_93: - name = "parameter_93" - shape = [192] - dtype = "float32" - min_val = float("-0.186844") - max_val = float("0.141321") - mean = float("-0.0381112") - std = float("0.0386452") - data = None - - -class Program_weight_tensor_parameter_94: - name = "parameter_94" - shape = [192, 192, 3, 3] - dtype = "float32" - min_val = float("-0.0471115") - max_val = float("0.0550156") - mean = float("-0.000140673") - std = float("0.00262922") - data = None - - -class Program_weight_tensor_parameter_95: - name = "parameter_95" - shape = [192] - dtype = "float32" - min_val = float("-0.188926") - max_val = float("0.062054") - mean = float("-0.0755775") - std = float("0.0405971") - data = None - - -class Program_weight_tensor_parameter_96: - name = "parameter_96" - shape = [192] - dtype = "float32" - min_val = float("0.880419") - max_val = float("1.21878") - mean = float("1.01465") - std = float("0.050849") - data = None - - -class Program_weight_tensor_parameter_97: - name = "parameter_97" - shape = [192] - dtype = "float32" - min_val = float("0.00813457") - max_val = float("0.0698677") - mean = float("0.0225257") - std = float("0.0112607") - data = None - - -class Program_weight_tensor_parameter_98: - name = "parameter_98" - shape = [192] - dtype = "float32" - min_val = float("-0.123249") - max_val = float("0.0530247") - mean = float("-0.0265596") - std = float("0.0349301") - data = None - - -class Program_weight_tensor_parameter_99: - name = "parameter_99" - shape = [192, 192, 3, 3] - dtype = "float32" - min_val = float("-0.0427988") - max_val = float("0.0615636") - mean = float("-0.000111453") - std = float("0.00299174") - data = None - - -class Program_weight_tensor_parameter_100: - name = "parameter_100" - shape = [192] - dtype = "float32" - min_val = float("-0.229476") - max_val = float("-0.00962432") - mean = float("-0.0831852") - std = float("0.0422479") - data = None - - -class Program_weight_tensor_parameter_101: - name = "parameter_101" - shape = [192] - dtype = "float32" - min_val = float("0.900428") - max_val = float("1.02666") - mean = float("0.975123") - std = float("0.0229582") - data = None - - -class Program_weight_tensor_parameter_102: - name = "parameter_102" - shape = [192] - dtype = "float32" - min_val = float("0.00166927") - max_val = float("0.0149214") - mean = float("0.00548105") - std = float("0.00187349") - data = None - - -class Program_weight_tensor_parameter_103: - name = "parameter_103" - shape = [192] - dtype = "float32" - min_val = float("-0.0393056") - max_val = float("0.0471334") - mean = float("-0.0107647") - std = float("0.0172946") - data = None - - -class Program_weight_tensor_parameter_104: - name = "parameter_104" - shape = [192, 192, 1, 1] - dtype = "float32" - min_val = float("-0.0436514") - max_val = float("0.0635205") - mean = float("-0.000488458") - std = float("0.00437095") - data = None - - -class Program_weight_tensor_parameter_105: - name = "parameter_105" - shape = [192] - dtype = "float32" - min_val = float("-0.229476") - max_val = float("-0.00962433") - mean = float("-0.0831852") - std = float("0.0422479") - data = None - - -class Program_weight_tensor_parameter_106: - name = "parameter_106" - shape = [192] - dtype = "float32" - min_val = float("0.947654") - max_val = float("1.11111") - mean = float("1.02112") - std = float("0.0306157") - data = None - - -class Program_weight_tensor_parameter_107: - name = "parameter_107" - shape = [192] - dtype = "float32" - min_val = float("0.00719786") - max_val = float("0.0556434") - mean = float("0.0161388") - std = float("0.00787651") - data = None - - -class Program_weight_tensor_parameter_108: - name = "parameter_108" - shape = [192] - dtype = "float32" - min_val = float("-0.131118") - max_val = float("0.0600064") - mean = float("-0.0237574") - std = float("0.0337413") - data = None - - -class Program_weight_tensor_parameter_109: - name = "parameter_109" - shape = [192, 192, 3, 3] - dtype = "float32" - min_val = float("-0.0485053") - max_val = float("0.0562451") - mean = float("-9.74267e-05") - std = float("0.00278606") - data = None - - -class Program_weight_tensor_parameter_110: - name = "parameter_110" - shape = [192] - dtype = "float32" - min_val = float("-0.234305") - max_val = float("0.0813681") - mean = float("-0.0947175") - std = float("0.0463051") - data = None - - -class Program_weight_tensor_parameter_111: - name = "parameter_111" - shape = [192] - dtype = "float32" - min_val = float("0.886145") - max_val = float("1.20472") - mean = float("1.01666") - std = float("0.0540248") - data = None - - -class Program_weight_tensor_parameter_112: - name = "parameter_112" - shape = [192] - dtype = "float32" - min_val = float("0.00868237") - max_val = float("0.0982557") - mean = float("0.0208977") - std = float("0.0125654") - data = None - - -class Program_weight_tensor_parameter_113: - name = "parameter_113" - shape = [192] - dtype = "float32" - min_val = float("-0.181299") - max_val = float("0.0965931") - mean = float("-0.0401902") - std = float("0.0436891") - data = None - - -class Program_weight_tensor_parameter_114: - name = "parameter_114" - shape = [192, 192, 3, 3] - dtype = "float32" - min_val = float("-0.0410138") - max_val = float("0.0751959") - mean = float("-0.000134497") - std = float("0.0032483") - data = None - - -class Program_weight_tensor_parameter_115: - name = "parameter_115" - shape = [192] - dtype = "float32" - min_val = float("-0.199948") - max_val = float("0.0153483") - mean = float("-0.0662884") - std = float("0.031178") - data = None - - -class Program_weight_tensor_parameter_116: - name = "parameter_116" - shape = [192] - dtype = "float32" - min_val = float("0.925493") - max_val = float("1.15259") - mean = float("1.01328") - std = float("0.0383643") - data = None - - -class Program_weight_tensor_parameter_117: - name = "parameter_117" - shape = [192] - dtype = "float32" - min_val = float("0.00445121") - max_val = float("0.0245699") - mean = float("0.00852168") - std = float("0.00311132") - data = None - - -class Program_weight_tensor_parameter_118: - name = "parameter_118" - shape = [192] - dtype = "float32" - min_val = float("-0.0890928") - max_val = float("0.122453") - mean = float("-0.0225451") - std = float("0.0292516") - data = None - - -class Program_weight_tensor_parameter_119: - name = "parameter_119" - shape = [192, 576, 1, 1] - dtype = "float32" - min_val = float("-0.0628757") - max_val = float("0.0645973") - mean = float("-0.000195493") - std = float("0.00467829") - data = None - - -class Program_weight_tensor_parameter_120: - name = "parameter_120" - shape = [192] - dtype = "float32" - min_val = float("-0.099963") - max_val = float("0.0374111") - mean = float("-0.0139724") - std = float("0.0203964") - data = None - - -class Program_weight_tensor_parameter_121: - name = "parameter_121" - shape = [192] - dtype = "float32" - min_val = float("0.923856") - max_val = float("1.19918") - mean = float("1.00277") - std = float("0.025885") - data = None - - -class Program_weight_tensor_parameter_122: - name = "parameter_122" - shape = [192] - dtype = "float32" - min_val = float("0.00335112") - max_val = float("0.0376873") - mean = float("0.00846023") - std = float("0.00424043") - data = None - - -class Program_weight_tensor_parameter_123: - name = "parameter_123" - shape = [192] - dtype = "float32" - min_val = float("-0.0728879") - max_val = float("0.0457376") - mean = float("-0.0169366") - std = float("0.0214102") - data = None - - -class Program_weight_tensor_parameter_124: - name = "parameter_124" - shape = [192, 576, 1, 1] - dtype = "float32" - min_val = float("-0.0557051") - max_val = float("0.0726594") - mean = float("-0.000148829") - std = float("0.00416084") - data = None - - -class Program_weight_tensor_parameter_125: - name = "parameter_125" - shape = [192] - dtype = "float32" - min_val = float("-0.15908") - max_val = float("-0.000555524") - mean = float("-0.038944") - std = float("0.0217257") - data = None - - -class Program_weight_tensor_parameter_126: - name = "parameter_126" - shape = [192] - dtype = "float32" - min_val = float("0.921159") - max_val = float("1.24866") - mean = float("1.00725") - std = float("0.0301467") - data = None - - -class Program_weight_tensor_parameter_127: - name = "parameter_127" - shape = [192] - dtype = "float32" - min_val = float("0.00450918") - max_val = float("0.0608082") - mean = float("0.0161327") - std = float("0.00840885") - data = None - - -class Program_weight_tensor_parameter_128: - name = "parameter_128" - shape = [192] - dtype = "float32" - min_val = float("-0.400402") - max_val = float("0.338842") - mean = float("-0.0361684") - std = float("0.0966519") - data = None - - -class Program_weight_tensor_parameter_129: - name = "parameter_129" - shape = [192, 192, 3, 3] - dtype = "float32" - min_val = float("-0.0350247") - max_val = float("0.0471653") - mean = float("-3.44506e-05") - std = float("0.00253963") - data = None - - -class Program_weight_tensor_parameter_130: - name = "parameter_130" - shape = [192] - dtype = "float32" - min_val = float("-0.552249") - max_val = float("1.14732") - mean = float("0.355898") - std = float("0.346059") - data = None - - -class Program_weight_tensor_parameter_131: - name = "parameter_131" - shape = [192] - dtype = "float32" - min_val = float("0.541478") - max_val = float("1.57746") - mean = float("1.15098") - std = float("0.184373") - data = None - - -class Program_weight_tensor_parameter_132: - name = "parameter_132" - shape = [192] - dtype = "float32" - min_val = float("0.00575627") - max_val = float("0.117619") - mean = float("0.0298341") - std = float("0.017588") - data = None - - -class Program_weight_tensor_parameter_133: - name = "parameter_133" - shape = [192] - dtype = "float32" - min_val = float("-0.182786") - max_val = float("0.205671") - mean = float("-0.0504974") - std = float("0.0491969") - data = None - - -class Program_weight_tensor_parameter_134: - name = "parameter_134" - shape = [192, 192, 1, 1] - dtype = "float32" - min_val = float("-0.14008") - max_val = float("0.117816") - mean = float("-0.00105786") - std = float("0.0117759") - data = None - - -class Program_weight_tensor_parameter_135: - name = "parameter_135" - shape = [96] - dtype = "float32" - min_val = float("-0.457965") - max_val = float("0.231213") - mean = float("-0.0094414") - std = float("0.144606") - data = None - - -class Program_weight_tensor_parameter_136: - name = "parameter_136" - shape = [96] - dtype = "float32" - min_val = float("0.762871") - max_val = float("1.23462") - mean = float("0.948542") - std = float("0.0712293") - data = None - - -class Program_weight_tensor_parameter_137: - name = "parameter_137" - shape = [96] - dtype = "float32" - min_val = float("0.00291973") - max_val = float("0.0418611") - mean = float("0.0123709") - std = float("0.00820581") - data = None - - -class Program_weight_tensor_parameter_138: - name = "parameter_138" - shape = [96] - dtype = "float32" - min_val = float("-0.0589327") - max_val = float("0.0912759") - mean = float("-0.0136043") - std = float("0.0242705") - data = None - - -class Program_weight_tensor_parameter_139: - name = "parameter_139" - shape = [96, 96, 1, 1] - dtype = "float32" - min_val = float("-0.0753795") - max_val = float("0.0571307") - mean = float("-0.00127258") - std = float("0.00926422") - data = None - - -class Program_weight_tensor_parameter_140: - name = "parameter_140" - shape = [96] - dtype = "float32" - min_val = float("-0.457965") - max_val = float("0.231213") - mean = float("-0.0094414") - std = float("0.144606") - data = None - - -class Program_weight_tensor_parameter_141: - name = "parameter_141" - shape = [96] - dtype = "float32" - min_val = float("0.505008") - max_val = float("1.2709") - mean = float("1.02954") - std = float("0.096255") - data = None - - -class Program_weight_tensor_parameter_142: - name = "parameter_142" - shape = [96] - dtype = "float32" - min_val = float("0.00864928") - max_val = float("0.0821514") - mean = float("0.0294604") - std = float("0.0153982") - data = None - - -class Program_weight_tensor_parameter_143: - name = "parameter_143" - shape = [96] - dtype = "float32" - min_val = float("-0.235962") - max_val = float("0.133407") - mean = float("-0.023131") - std = float("0.0603207") - data = None - - -class Program_weight_tensor_parameter_144: - name = "parameter_144" - shape = [96, 96, 3, 3] - dtype = "float32" - min_val = float("-0.0934278") - max_val = float("0.0953093") - mean = float("-0.000117742") - std = float("0.00631181") - data = None - - -class Program_weight_tensor_parameter_145: - name = "parameter_145" - shape = [96] - dtype = "float32" - min_val = float("-0.703686") - max_val = float("0.495423") - mean = float("-0.112778") - std = float("0.198105") - data = None - - -class Program_weight_tensor_parameter_146: - name = "parameter_146" - shape = [96] - dtype = "float32" - min_val = float("0.723215") - max_val = float("1.7117") - mean = float("0.995187") - std = float("0.133891") - data = None - - -class Program_weight_tensor_parameter_147: - name = "parameter_147" - shape = [96] - dtype = "float32" - min_val = float("0.01304") - max_val = float("0.194962") - mean = float("0.0419874") - std = float("0.0313694") - data = None - - -class Program_weight_tensor_parameter_148: - name = "parameter_148" - shape = [96] - dtype = "float32" - min_val = float("-0.214376") - max_val = float("0.134518") - mean = float("-0.0301872") - std = float("0.0628285") - data = None - - -class Program_weight_tensor_parameter_149: - name = "parameter_149" - shape = [96, 96, 3, 3] - dtype = "float32" - min_val = float("-0.0919381") - max_val = float("0.0707172") - mean = float("-0.000470717") - std = float("0.00699141") - data = None - - -class Program_weight_tensor_parameter_150: - name = "parameter_150" - shape = [96] - dtype = "float32" - min_val = float("-0.36415") - max_val = float("0.190267") - mean = float("-0.138622") - std = float("0.0960162") - data = None - - -class Program_weight_tensor_parameter_151: - name = "parameter_151" - shape = [96] - dtype = "float32" - min_val = float("0.626999") - max_val = float("1.01953") - mean = float("0.906483") - std = float("0.05556") - data = None - - -class Program_weight_tensor_parameter_152: - name = "parameter_152" - shape = [96] - dtype = "float32" - min_val = float("0.0031041") - max_val = float("0.0221224") - mean = float("0.0106635") - std = float("0.00413137") - data = None - - -class Program_weight_tensor_parameter_153: - name = "parameter_153" - shape = [96] - dtype = "float32" - min_val = float("-0.0660527") - max_val = float("0.0390983") - mean = float("-0.0088487") - std = float("0.0169076") - data = None - - -class Program_weight_tensor_parameter_154: - name = "parameter_154" - shape = [96, 96, 1, 1] - dtype = "float32" - min_val = float("-0.0710399") - max_val = float("0.0593543") - mean = float("-0.00106871") - std = float("0.00947732") - data = None - - -class Program_weight_tensor_parameter_155: - name = "parameter_155" - shape = [96] - dtype = "float32" - min_val = float("-0.36415") - max_val = float("0.190267") - mean = float("-0.138622") - std = float("0.0960162") - data = None - - -class Program_weight_tensor_parameter_156: - name = "parameter_156" - shape = [96] - dtype = "float32" - min_val = float("0.811164") - max_val = float("1.15777") - mean = float("1.02225") - std = float("0.0605937") - data = None - - -class Program_weight_tensor_parameter_157: - name = "parameter_157" - shape = [96] - dtype = "float32" - min_val = float("0.0111709") - max_val = float("0.105695") - mean = float("0.0280629") - std = float("0.0196981") - data = None - - -class Program_weight_tensor_parameter_158: - name = "parameter_158" - shape = [96] - dtype = "float32" - min_val = float("-0.164873") - max_val = float("0.0372368") - mean = float("-0.0373236") - std = float("0.0335712") - data = None - - -class Program_weight_tensor_parameter_159: - name = "parameter_159" - shape = [96, 96, 3, 3] - dtype = "float32" - min_val = float("-0.0811913") - max_val = float("0.0769005") - mean = float("-0.000466698") - std = float("0.00651756") - data = None - - -class Program_weight_tensor_parameter_160: - name = "parameter_160" - shape = [96] - dtype = "float32" - min_val = float("-0.486488") - max_val = float("0.169402") - mean = float("-0.166991") - std = float("0.131221") - data = None - - -class Program_weight_tensor_parameter_161: - name = "parameter_161" - shape = [96] - dtype = "float32" - min_val = float("0.777448") - max_val = float("1.29252") - mean = float("0.963023") - std = float("0.0981105") - data = None - - -class Program_weight_tensor_parameter_162: - name = "parameter_162" - shape = [96] - dtype = "float32" - min_val = float("0.00992224") - max_val = float("0.105847") - mean = float("0.0243501") - std = float("0.0137999") - data = None - - -class Program_weight_tensor_parameter_163: - name = "parameter_163" - shape = [96] - dtype = "float32" - min_val = float("-0.154927") - max_val = float("0.0659024") - mean = float("0.00872401") - std = float("0.0386172") - data = None - - -class Program_weight_tensor_parameter_164: - name = "parameter_164" - shape = [96, 96, 3, 3] - dtype = "float32" - min_val = float("-0.0993817") - max_val = float("0.0757508") - mean = float("-0.000423259") - std = float("0.00766977") - data = None - - -class Program_weight_tensor_parameter_165: - name = "parameter_165" - shape = [96] - dtype = "float32" - min_val = float("-0.489705") - max_val = float("0.0651658") - mean = float("-0.168146") - std = float("0.114783") - data = None - - -class Program_weight_tensor_parameter_166: - name = "parameter_166" - shape = [96] - dtype = "float32" - min_val = float("0.722939") - max_val = float("1.0022") - mean = float("0.918838") - std = float("0.0531756") - data = None - - -class Program_weight_tensor_parameter_167: - name = "parameter_167" - shape = [96] - dtype = "float32" - min_val = float("0.00775018") - max_val = float("0.036921") - mean = float("0.0160984") - std = float("0.0056277") - data = None - - -class Program_weight_tensor_parameter_168: - name = "parameter_168" - shape = [96] - dtype = "float32" - min_val = float("-0.0570203") - max_val = float("0.0404215") - mean = float("-0.019887") - std = float("0.0190603") - data = None - - -class Program_weight_tensor_parameter_169: - name = "parameter_169" - shape = [96, 96, 1, 1] - dtype = "float32" - min_val = float("-0.103947") - max_val = float("0.0646105") - mean = float("-0.00221249") - std = float("0.0110162") - data = None - - -class Program_weight_tensor_parameter_170: - name = "parameter_170" - shape = [96] - dtype = "float32" - min_val = float("-0.489705") - max_val = float("0.0651658") - mean = float("-0.168146") - std = float("0.114783") - data = None - - -class Program_weight_tensor_parameter_171: - name = "parameter_171" - shape = [96] - dtype = "float32" - min_val = float("0.766535") - max_val = float("1.15353") - mean = float("0.982409") - std = float("0.0579775") - data = None - - -class Program_weight_tensor_parameter_172: - name = "parameter_172" - shape = [96] - dtype = "float32" - min_val = float("0.0171361") - max_val = float("0.213573") - mean = float("0.0436358") - std = float("0.0309431") - data = None - - -class Program_weight_tensor_parameter_173: - name = "parameter_173" - shape = [96] - dtype = "float32" - min_val = float("-0.199015") - max_val = float("0.0871906") - mean = float("-0.0157018") - std = float("0.0414583") - data = None - - -class Program_weight_tensor_parameter_174: - name = "parameter_174" - shape = [96, 96, 3, 3] - dtype = "float32" - min_val = float("-0.099267") - max_val = float("0.0973901") - mean = float("-0.000248761") - std = float("0.00741391") - data = None - - -class Program_weight_tensor_parameter_175: - name = "parameter_175" - shape = [96] - dtype = "float32" - min_val = float("-0.564609") - max_val = float("0.347562") - mean = float("-0.179116") - std = float("0.173215") - data = None - - -class Program_weight_tensor_parameter_176: - name = "parameter_176" - shape = [96] - dtype = "float32" - min_val = float("0.764463") - max_val = float("1.33669") - mean = float("0.954532") - std = float("0.110883") - data = None - - -class Program_weight_tensor_parameter_177: - name = "parameter_177" - shape = [96] - dtype = "float32" - min_val = float("0.0148204") - max_val = float("0.110617") - mean = float("0.0314773") - std = float("0.0184817") - data = None - - -class Program_weight_tensor_parameter_178: - name = "parameter_178" - shape = [96] - dtype = "float32" - min_val = float("-0.172672") - max_val = float("0.269786") - mean = float("-0.0212967") - std = float("0.0939031") - data = None - - -class Program_weight_tensor_parameter_179: - name = "parameter_179" - shape = [96, 96, 3, 3] - dtype = "float32" - min_val = float("-0.142387") - max_val = float("0.117261") - mean = float("-0.000229489") - std = float("0.00873001") - data = None - - -class Program_weight_tensor_parameter_180: - name = "parameter_180" - shape = [96] - dtype = "float32" - min_val = float("-0.625413") - max_val = float("0.597772") - mean = float("-0.0821868") - std = float("0.254375") - data = None - - -class Program_weight_tensor_parameter_181: - name = "parameter_181" - shape = [96] - dtype = "float32" - min_val = float("0.647481") - max_val = float("1.22746") - mean = float("0.866594") - std = float("0.1146") - data = None - - -class Program_weight_tensor_parameter_182: - name = "parameter_182" - shape = [96] - dtype = "float32" - min_val = float("0.0117598") - max_val = float("0.0756791") - mean = float("0.0256735") - std = float("0.011465") - data = None - - -class Program_weight_tensor_parameter_183: - name = "parameter_183" - shape = [96] - dtype = "float32" - min_val = float("-0.116141") - max_val = float("0.0942395") - mean = float("-0.0106851") - std = float("0.0412708") - data = None - - -class Program_weight_tensor_parameter_184: - name = "parameter_184" - shape = [96, 448, 1, 1] - dtype = "float32" - min_val = float("-0.149031") - max_val = float("0.14906") - mean = float("-0.000519018") - std = float("0.0115778") - data = None - - -class Program_weight_tensor_parameter_185: - name = "parameter_185" - shape = [96] - dtype = "float32" - min_val = float("-0.0986348") - max_val = float("0.227765") - mean = float("0.0619239") - std = float("0.054569") - data = None - - -class Program_weight_tensor_parameter_186: - name = "parameter_186" - shape = [96] - dtype = "float32" - min_val = float("0.703927") - max_val = float("1.12526") - mean = float("0.932492") - std = float("0.0634651") - data = None - - -class Program_weight_tensor_parameter_187: - name = "parameter_187" - shape = [96] - dtype = "float32" - min_val = float("0.00537723") - max_val = float("0.0583092") - mean = float("0.0120102") - std = float("0.00693174") - data = None - - -class Program_weight_tensor_parameter_188: - name = "parameter_188" - shape = [96] - dtype = "float32" - min_val = float("-0.0902684") - max_val = float("0.166922") - mean = float("-0.0175149") - std = float("0.0398964") - data = None - - -class Program_weight_tensor_parameter_189: - name = "parameter_189" - shape = [96, 448, 1, 1] - dtype = "float32" - min_val = float("-0.0952113") - max_val = float("0.110912") - mean = float("-0.000272286") - std = float("0.00775169") - data = None - - -class Program_weight_tensor_parameter_190: - name = "parameter_190" - shape = [192] - dtype = "float32" - min_val = float("-0.295368") - max_val = float("0.199876") - mean = float("-0.065903") - std = float("0.0695814") - data = None - - -class Program_weight_tensor_parameter_191: - name = "parameter_191" - shape = [192] - dtype = "float32" - min_val = float("0.670697") - max_val = float("1.45276") - mean = float("0.885134") - std = float("0.0783824") - data = None - - -class Program_weight_tensor_parameter_192: - name = "parameter_192" - shape = [192] - dtype = "float32" - min_val = float("0.00790397") - max_val = float("0.127787") - mean = float("0.0225856") - std = float("0.0123405") - data = None - - -class Program_weight_tensor_parameter_193: - name = "parameter_193" - shape = [192] - dtype = "float32" - min_val = float("-0.148651") - max_val = float("0.0483928") - mean = float("-0.0369365") - std = float("0.0357253") - data = None - - -class Program_weight_tensor_parameter_194: - name = "parameter_194" - shape = [192, 384, 1, 1] - dtype = "float32" - min_val = float("-0.0959148") - max_val = float("0.117327") - mean = float("-0.000597284") - std = float("0.00788358") - data = None - - -class Program_weight_tensor_parameter_195: - name = "parameter_195" - shape = [384] - dtype = "float32" - min_val = float("-0.201782") - max_val = float("0.241811") - mean = float("-0.0670364") - std = float("0.0416536") - data = None - - -class Program_weight_tensor_parameter_196: - name = "parameter_196" - shape = [384] - dtype = "float32" - min_val = float("0.87318") - max_val = float("1.54065") - mean = float("1.01926") - std = float("0.0632841") - data = None - - -class Program_weight_tensor_parameter_197: - name = "parameter_197" - shape = [384] - dtype = "float32" - min_val = float("0.00737718") - max_val = float("0.076491") - mean = float("0.0155636") - std = float("0.00762575") - data = None - - -class Program_weight_tensor_parameter_198: - name = "parameter_198" - shape = [384] - dtype = "float32" - min_val = float("-0.339572") - max_val = float("0.121455") - mean = float("-0.0580769") - std = float("0.0477813") - data = None - - -class Program_weight_tensor_parameter_199: - name = "parameter_199" - shape = [384, 384, 1, 1] - dtype = "float32" - min_val = float("-0.104641") - max_val = float("0.10408") - mean = float("-0.000725338") - std = float("0.00722264") - data = None - - -class Program_weight_tensor_parameter_200: - name = "parameter_200" - shape = [192] - dtype = "float32" - min_val = float("-0.176949") - max_val = float("0.00590601") - mean = float("-0.0653774") - std = float("0.0325609") - data = None - - -class Program_weight_tensor_parameter_201: - name = "parameter_201" - shape = [192] - dtype = "float32" - min_val = float("0.884903") - max_val = float("0.991186") - mean = float("0.949253") - std = float("0.016433") - data = None - - -class Program_weight_tensor_parameter_202: - name = "parameter_202" - shape = [192] - dtype = "float32" - min_val = float("0.00353105") - max_val = float("0.0231426") - mean = float("0.0095938") - std = float("0.0033673") - data = None - - -class Program_weight_tensor_parameter_203: - name = "parameter_203" - shape = [192] - dtype = "float32" - min_val = float("-0.0792437") - max_val = float("0.0708871") - mean = float("-0.0240779") - std = float("0.0316357") - data = None - - -class Program_weight_tensor_parameter_204: - name = "parameter_204" - shape = [192, 192, 1, 1] - dtype = "float32" - min_val = float("-0.0569077") - max_val = float("0.0369732") - mean = float("-0.000733357") - std = float("0.00540254") - data = None - - -class Program_weight_tensor_parameter_205: - name = "parameter_205" - shape = [192] - dtype = "float32" - min_val = float("-0.176949") - max_val = float("0.00590601") - mean = float("-0.0653774") - std = float("0.0325609") - data = None - - -class Program_weight_tensor_parameter_206: - name = "parameter_206" - shape = [192] - dtype = "float32" - min_val = float("0.945936") - max_val = float("1.03267") - mean = float("0.988143") - std = float("0.0166204") - data = None - - -class Program_weight_tensor_parameter_207: - name = "parameter_207" - shape = [192] - dtype = "float32" - min_val = float("0.0150858") - max_val = float("0.0802088") - mean = float("0.0328899") - std = float("0.0117288") - data = None - - -class Program_weight_tensor_parameter_208: - name = "parameter_208" - shape = [192] - dtype = "float32" - min_val = float("-0.177971") - max_val = float("0.153985") - mean = float("-0.0232557") - std = float("0.0607795") - data = None - - -class Program_weight_tensor_parameter_209: - name = "parameter_209" - shape = [192, 192, 3, 3] - dtype = "float32" - min_val = float("-0.0444125") - max_val = float("0.0760331") - mean = float("-7.02503e-05") - std = float("0.00300584") - data = None - - -class Program_weight_tensor_parameter_210: - name = "parameter_210" - shape = [192] - dtype = "float32" - min_val = float("-0.217095") - max_val = float("-0.00148132") - mean = float("-0.0741376") - std = float("0.0354109") - data = None - - -class Program_weight_tensor_parameter_211: - name = "parameter_211" - shape = [192] - dtype = "float32" - min_val = float("0.939032") - max_val = float("1.15417") - mean = float("1.02943") - std = float("0.0431659") - data = None - - -class Program_weight_tensor_parameter_212: - name = "parameter_212" - shape = [192] - dtype = "float32" - min_val = float("0.0360784") - max_val = float("0.225914") - mean = float("0.0635349") - std = float("0.0203909") - data = None - - -class Program_weight_tensor_parameter_213: - name = "parameter_213" - shape = [192] - dtype = "float32" - min_val = float("-0.264891") - max_val = float("0.307619") - mean = float("-0.0428129") - std = float("0.0726088") - data = None - - -class Program_weight_tensor_parameter_214: - name = "parameter_214" - shape = [192, 192, 3, 3] - dtype = "float32" - min_val = float("-0.0622024") - max_val = float("0.0626876") - mean = float("-0.000102155") - std = float("0.00367047") - data = None - - -class Program_weight_tensor_parameter_215: - name = "parameter_215" - shape = [192] - dtype = "float32" - min_val = float("-0.196618") - max_val = float("-0.00995733") - mean = float("-0.071187") - std = float("0.0319798") - data = None - - -class Program_weight_tensor_parameter_216: - name = "parameter_216" - shape = [192] - dtype = "float32" - min_val = float("0.94411") - max_val = float("1.04693") - mean = float("0.987726") - std = float("0.0137706") - data = None - - -class Program_weight_tensor_parameter_217: - name = "parameter_217" - shape = [192] - dtype = "float32" - min_val = float("0.00227428") - max_val = float("0.00943763") - mean = float("0.00475154") - std = float("0.00122457") - data = None - - -class Program_weight_tensor_parameter_218: - name = "parameter_218" - shape = [192] - dtype = "float32" - min_val = float("-0.0961104") - max_val = float("0.0392249") - mean = float("-0.0252723") - std = float("0.0210983") - data = None - - -class Program_weight_tensor_parameter_219: - name = "parameter_219" - shape = [192, 192, 1, 1] - dtype = "float32" - min_val = float("-0.0313925") - max_val = float("0.0416139") - mean = float("-0.000809328") - std = float("0.00570058") - data = None - - -class Program_weight_tensor_parameter_220: - name = "parameter_220" - shape = [192] - dtype = "float32" - min_val = float("-0.196618") - max_val = float("-0.00995733") - mean = float("-0.071187") - std = float("0.0319798") - data = None - - -class Program_weight_tensor_parameter_221: - name = "parameter_221" - shape = [192] - dtype = "float32" - min_val = float("0.953711") - max_val = float("1.11463") - mean = float("1.00472") - std = float("0.0265116") - data = None - - -class Program_weight_tensor_parameter_222: - name = "parameter_222" - shape = [192] - dtype = "float32" - min_val = float("0.010102") - max_val = float("0.0479077") - mean = float("0.0179937") - std = float("0.00541134") - data = None - - -class Program_weight_tensor_parameter_223: - name = "parameter_223" - shape = [192] - dtype = "float32" - min_val = float("-0.189335") - max_val = float("0.144594") - mean = float("-0.0477661") - std = float("0.0469458") - data = None - - -class Program_weight_tensor_parameter_224: - name = "parameter_224" - shape = [192, 192, 3, 3] - dtype = "float32" - min_val = float("-0.0484375") - max_val = float("0.0812037") - mean = float("-0.000164179") - std = float("0.00306328") - data = None - - -class Program_weight_tensor_parameter_225: - name = "parameter_225" - shape = [192] - dtype = "float32" - min_val = float("-0.232846") - max_val = float("-0.0185216") - mean = float("-0.0943343") - std = float("0.040046") - data = None - - -class Program_weight_tensor_parameter_226: - name = "parameter_226" - shape = [192] - dtype = "float32" - min_val = float("0.946521") - max_val = float("1.19181") - mean = float("1.02411") - std = float("0.0460177") - data = None - - -class Program_weight_tensor_parameter_227: - name = "parameter_227" - shape = [192] - dtype = "float32" - min_val = float("0.0359278") - max_val = float("0.139671") - mean = float("0.0648104") - std = float("0.0197637") - data = None - - -class Program_weight_tensor_parameter_228: - name = "parameter_228" - shape = [192] - dtype = "float32" - min_val = float("-0.353487") - max_val = float("0.265509") - mean = float("-0.0870985") - std = float("0.100017") - data = None - - -class Program_weight_tensor_parameter_229: - name = "parameter_229" - shape = [192, 192, 3, 3] - dtype = "float32" - min_val = float("-0.0611518") - max_val = float("0.0870387") - mean = float("-0.000165144") - std = float("0.00384626") - data = None - - -class Program_weight_tensor_parameter_230: - name = "parameter_230" - shape = [192] - dtype = "float32" - min_val = float("-0.154886") - max_val = float("0.00333786") - mean = float("-0.0685634") - std = float("0.0234192") - data = None - - -class Program_weight_tensor_parameter_231: - name = "parameter_231" - shape = [192] - dtype = "float32" - min_val = float("0.932342") - max_val = float("1.07188") - mean = float("0.998569") - std = float("0.0218607") - data = None - - -class Program_weight_tensor_parameter_232: - name = "parameter_232" - shape = [192] - dtype = "float32" - min_val = float("0.00201715") - max_val = float("0.00936504") - mean = float("0.00403713") - std = float("0.00113824") - data = None - - -class Program_weight_tensor_parameter_233: - name = "parameter_233" - shape = [192] - dtype = "float32" - min_val = float("-0.0829276") - max_val = float("0.0996937") - mean = float("-0.0125365") - std = float("0.0205666") - data = None - - -class Program_weight_tensor_parameter_234: - name = "parameter_234" - shape = [192, 192, 1, 1] - dtype = "float32" - min_val = float("-0.0348639") - max_val = float("0.0478114") - mean = float("-0.000426039") - std = float("0.00642907") - data = None - - -class Program_weight_tensor_parameter_235: - name = "parameter_235" - shape = [192] - dtype = "float32" - min_val = float("-0.154886") - max_val = float("0.00333784") - mean = float("-0.0685634") - std = float("0.0234192") - data = None - - -class Program_weight_tensor_parameter_236: - name = "parameter_236" - shape = [192] - dtype = "float32" - min_val = float("0.936173") - max_val = float("1.11491") - mean = float("0.992553") - std = float("0.0259462") - data = None - - -class Program_weight_tensor_parameter_237: - name = "parameter_237" - shape = [192] - dtype = "float32" - min_val = float("0.00897904") - max_val = float("0.0431413") - mean = float("0.0183886") - std = float("0.00560404") - data = None - - -class Program_weight_tensor_parameter_238: - name = "parameter_238" - shape = [192] - dtype = "float32" - min_val = float("-0.280815") - max_val = float("0.147007") - mean = float("-0.0420801") - std = float("0.0463274") - data = None - - -class Program_weight_tensor_parameter_239: - name = "parameter_239" - shape = [192, 192, 3, 3] - dtype = "float32" - min_val = float("-0.0372381") - max_val = float("0.0656086") - mean = float("-0.000164115") - std = float("0.00303882") - data = None - - -class Program_weight_tensor_parameter_240: - name = "parameter_240" - shape = [192] - dtype = "float32" - min_val = float("-0.289029") - max_val = float("0.0181024") - mean = float("-0.109759") - std = float("0.0400942") - data = None - - -class Program_weight_tensor_parameter_241: - name = "parameter_241" - shape = [192] - dtype = "float32" - min_val = float("0.943873") - max_val = float("1.25886") - mean = float("1.02651") - std = float("0.0418277") - data = None - - -class Program_weight_tensor_parameter_242: - name = "parameter_242" - shape = [192] - dtype = "float32" - min_val = float("0.0146575") - max_val = float("0.0648707") - mean = float("0.0286275") - std = float("0.00896829") - data = None - - -class Program_weight_tensor_parameter_243: - name = "parameter_243" - shape = [192] - dtype = "float32" - min_val = float("-0.381573") - max_val = float("0.107524") - mean = float("-0.0547241") - std = float("0.0618592") - data = None - - -class Program_weight_tensor_parameter_244: - name = "parameter_244" - shape = [192, 192, 3, 3] - dtype = "float32" - min_val = float("-0.0566325") - max_val = float("0.0721215") - mean = float("-0.000213243") - std = float("0.00432258") - data = None - - -class Program_weight_tensor_parameter_245: - name = "parameter_245" - shape = [192] - dtype = "float32" - min_val = float("-0.257034") - max_val = float("-0.0134244") - mean = float("-0.121787") - std = float("0.0441916") - data = None - - -class Program_weight_tensor_parameter_246: - name = "parameter_246" - shape = [192] - dtype = "float32" - min_val = float("0.916942") - max_val = float("1.13523") - mean = float("1.02431") - std = float("0.042227") - data = None - - -class Program_weight_tensor_parameter_247: - name = "parameter_247" - shape = [192] - dtype = "float32" - min_val = float("0.00558811") - max_val = float("0.0227368") - mean = float("0.0107834") - std = float("0.00302476") - data = None - - -class Program_weight_tensor_parameter_248: - name = "parameter_248" - shape = [192] - dtype = "float32" - min_val = float("-0.120986") - max_val = float("0.105939") - mean = float("0.0157535") - std = float("0.0292084") - data = None - - -class Program_weight_tensor_parameter_249: - name = "parameter_249" - shape = [192, 896, 1, 1] - dtype = "float32" - min_val = float("-0.0812543") - max_val = float("0.103822") - mean = float("-0.000190185") - std = float("0.00606084") - data = None - - -class Program_weight_tensor_parameter_250: - name = "parameter_250" - shape = [192] - dtype = "float32" - min_val = float("-0.176609") - max_val = float("0.214363") - mean = float("-0.00723539") - std = float("0.0506647") - data = None - - -class Program_weight_tensor_parameter_251: - name = "parameter_251" - shape = [192] - dtype = "float32" - min_val = float("0.951166") - max_val = float("1.2179") - mean = float("1.05549") - std = float("0.0498193") - data = None - - -class Program_weight_tensor_parameter_252: - name = "parameter_252" - shape = [192] - dtype = "float32" - min_val = float("0.0068407") - max_val = float("0.0571011") - mean = float("0.0142387") - std = float("0.00513597") - data = None - - -class Program_weight_tensor_parameter_253: - name = "parameter_253" - shape = [192] - dtype = "float32" - min_val = float("-0.076614") - max_val = float("0.0818422") - mean = float("-0.000474385") - std = float("0.0276303") - data = None - - -class Program_weight_tensor_parameter_254: - name = "parameter_254" - shape = [192, 896, 1, 1] - dtype = "float32" - min_val = float("-0.0552042") - max_val = float("0.102734") - mean = float("-0.000223038") - std = float("0.00619518") - data = None - - -class Program_weight_tensor_parameter_255: - name = "parameter_255" - shape = [384] - dtype = "float32" - min_val = float("-0.249775") - max_val = float("-0.0568627") - mean = float("-0.125062") - std = float("0.0336773") - data = None - - -class Program_weight_tensor_parameter_256: - name = "parameter_256" - shape = [384] - dtype = "float32" - min_val = float("0.814907") - max_val = float("1.01643") - mean = float("0.909518") - std = float("0.0258168") - data = None - - -class Program_weight_tensor_parameter_257: - name = "parameter_257" - shape = [384] - dtype = "float32" - min_val = float("0.00972713") - max_val = float("0.0669189") - mean = float("0.022077") - std = float("0.00889464") - data = None - - -class Program_weight_tensor_parameter_258: - name = "parameter_258" - shape = [384] - dtype = "float32" - min_val = float("-0.146609") - max_val = float("0.11002") - mean = float("-0.0348074") - std = float("0.0384279") - data = None - - -class Program_weight_tensor_parameter_259: - name = "parameter_259" - shape = [384, 768, 1, 1] - dtype = "float32" - min_val = float("-0.0364879") - max_val = float("0.033967") - mean = float("-0.000277781") - std = float("0.00472355") - data = None - - -class Program_weight_tensor_parameter_260: - name = "parameter_260" - shape = [768] - dtype = "float32" - min_val = float("-0.104277") - max_val = float("0.0723922") - mean = float("-0.0568764") - std = float("0.0153315") - data = None - - -class Program_weight_tensor_parameter_261: - name = "parameter_261" - shape = [768] - dtype = "float32" - min_val = float("0.9523") - max_val = float("1.1435") - mean = float("1.02091") - std = float("0.0210274") - data = None - - -class Program_weight_tensor_parameter_262: - name = "parameter_262" - shape = [768] - dtype = "float32" - min_val = float("0.00431744") - max_val = float("0.0370758") - mean = float("0.00970086") - std = float("0.00356025") - data = None - - -class Program_weight_tensor_parameter_263: - name = "parameter_263" - shape = [768] - dtype = "float32" - min_val = float("-0.103952") - max_val = float("0.111506") - mean = float("-0.0349987") - std = float("0.0271815") - data = None - - -class Program_weight_tensor_parameter_264: - name = "parameter_264" - shape = [768, 768, 1, 1] - dtype = "float32" - min_val = float("-0.0581811") - max_val = float("0.113051") - mean = float("-0.000304542") - std = float("0.00402831") - data = None - - -class Program_weight_tensor_parameter_265: - name = "parameter_265" - shape = [384] - dtype = "float32" - min_val = float("-0.158167") - max_val = float("0.0744682") - mean = float("-0.0400513") - std = float("0.0206673") - data = None - - -class Program_weight_tensor_parameter_266: - name = "parameter_266" - shape = [384] - dtype = "float32" - min_val = float("0.888577") - max_val = float("1.07465") - mean = float("0.982117") - std = float("0.0132258") - data = None - - -class Program_weight_tensor_parameter_267: - name = "parameter_267" - shape = [384] - dtype = "float32" - min_val = float("0.005659") - max_val = float("0.0930103") - mean = float("0.0194514") - std = float("0.0092216") - data = None - - -class Program_weight_tensor_parameter_268: - name = "parameter_268" - shape = [384] - dtype = "float32" - min_val = float("-0.0685013") - max_val = float("0.0608002") - mean = float("-0.00592014") - std = float("0.0271757") - data = None - - -class Program_weight_tensor_parameter_269: - name = "parameter_269" - shape = [384, 384, 1, 1] - dtype = "float32" - min_val = float("-0.0396793") - max_val = float("0.0737422") - mean = float("-7.2276e-05") - std = float("0.00350095") - data = None - - -class Program_weight_tensor_parameter_270: - name = "parameter_270" - shape = [384] - dtype = "float32" - min_val = float("-0.158167") - max_val = float("0.0744682") - mean = float("-0.0400513") - std = float("0.0206673") - data = None - - -class Program_weight_tensor_parameter_271: - name = "parameter_271" - shape = [384] - dtype = "float32" - min_val = float("0.879914") - max_val = float("1.07681") - mean = float("0.993922") - std = float("0.0123427") - data = None - - -class Program_weight_tensor_parameter_272: - name = "parameter_272" - shape = [384] - dtype = "float32" - min_val = float("0.0237413") - max_val = float("0.735274") - mean = float("0.139783") - std = float("0.0640231") - data = None - - -class Program_weight_tensor_parameter_273: - name = "parameter_273" - shape = [384] - dtype = "float32" - min_val = float("-0.277276") - max_val = float("0.156692") - mean = float("-0.0840022") - std = float("0.0855229") - data = None - - -class Program_weight_tensor_parameter_274: - name = "parameter_274" - shape = [384, 384, 3, 3] - dtype = "float32" - min_val = float("-0.0424879") - max_val = float("0.0475735") - mean = float("-0.0001269") - std = float("0.00130674") - data = None - - -class Program_weight_tensor_parameter_275: - name = "parameter_275" - shape = [384] - dtype = "float32" - min_val = float("-0.0801146") - max_val = float("0.116771") - mean = float("-0.0189931") - std = float("0.0160256") - data = None - - -class Program_weight_tensor_parameter_276: - name = "parameter_276" - shape = [384] - dtype = "float32" - min_val = float("0.920205") - max_val = float("1.16667") - mean = float("1.01504") - std = float("0.0246966") - data = None - - -class Program_weight_tensor_parameter_277: - name = "parameter_277" - shape = [384] - dtype = "float32" - min_val = float("0.0222636") - max_val = float("0.202839") - mean = float("0.0725926") - std = float("0.0321105") - data = None - - -class Program_weight_tensor_parameter_278: - name = "parameter_278" - shape = [384] - dtype = "float32" - min_val = float("-0.233438") - max_val = float("0.219682") - mean = float("-0.023274") - std = float("0.079013") - data = None - - -class Program_weight_tensor_parameter_279: - name = "parameter_279" - shape = [384, 384, 3, 3] - dtype = "float32" - min_val = float("-0.0274571") - max_val = float("0.0359229") - mean = float("-3.21889e-05") - std = float("0.00171791") - data = None - - -class Program_weight_tensor_parameter_280: - name = "parameter_280" - shape = [384] - dtype = "float32" - min_val = float("-0.0739507") - max_val = float("0.020999") - mean = float("-0.0234999") - std = float("0.0134887") - data = None - - -class Program_weight_tensor_parameter_281: - name = "parameter_281" - shape = [384] - dtype = "float32" - min_val = float("0.946312") - max_val = float("1.16798") - mean = float("1.01467") - std = float("0.0273906") - data = None - - -class Program_weight_tensor_parameter_282: - name = "parameter_282" - shape = [384] - dtype = "float32" - min_val = float("0.0631221") - max_val = float("0.463548") - mean = float("0.173967") - std = float("0.0694937") - data = None - - -class Program_weight_tensor_parameter_283: - name = "parameter_283" - shape = [384] - dtype = "float32" - min_val = float("-1.66842") - max_val = float("1.69585") - mean = float("0.0445105") - std = float("0.596448") - data = None - - -class Program_weight_tensor_parameter_284: - name = "parameter_284" - shape = [384, 1536, 1, 1] - dtype = "float32" - min_val = float("-0.0467314") - max_val = float("0.0575585") - mean = float("8.5535e-05") - std = float("0.0030071") - data = None - - -class Program_weight_tensor_parameter_285: - name = "parameter_285" - shape = [384] - dtype = "float32" - min_val = float("-0.0183803") - max_val = float("0.0258619") - mean = float("-0.00144525") - std = float("0.00680649") - data = None - - -class Program_weight_tensor_parameter_286: - name = "parameter_286" - shape = [384] - dtype = "float32" - min_val = float("0.969538") - max_val = float("1.06054") - mean = float("0.993834") - std = float("0.0122522") - data = None - - -class Program_weight_tensor_parameter_287: - name = "parameter_287" - shape = [384] - dtype = "float32" - min_val = float("0.00292151") - max_val = float("0.0163042") - mean = float("0.00723264") - std = float("0.0025015") - data = None - - -class Program_weight_tensor_parameter_288: - name = "parameter_288" - shape = [384] - dtype = "float32" - min_val = float("-0.0974501") - max_val = float("0.0623071") - mean = float("-0.0422498") - std = float("0.0245183") - data = None - - -class Program_weight_tensor_parameter_289: - name = "parameter_289" - shape = [384, 384, 1, 1] - dtype = "float32" - min_val = float("-0.0333324") - max_val = float("0.0411082") - mean = float("-0.000526783") - std = float("0.00328183") - data = None - - -class Program_weight_tensor_parameter_290: - name = "parameter_290" - shape = [384] - dtype = "float32" - min_val = float("-0.0183803") - max_val = float("0.0258619") - mean = float("-0.00144524") - std = float("0.00680649") - data = None - - -class Program_weight_tensor_parameter_291: - name = "parameter_291" - shape = [384] - dtype = "float32" - min_val = float("0.972046") - max_val = float("1.08568") - mean = float("1.00364") - std = float("0.0181342") - data = None - - -class Program_weight_tensor_parameter_292: - name = "parameter_292" - shape = [384] - dtype = "float32" - min_val = float("0.0170031") - max_val = float("0.141069") - mean = float("0.0433458") - std = float("0.0174643") - data = None - - -class Program_weight_tensor_parameter_293: - name = "parameter_293" - shape = [384] - dtype = "float32" - min_val = float("-0.324472") - max_val = float("0.0923265") - mean = float("-0.132281") - std = float("0.0638993") - data = None - - -class Program_weight_tensor_parameter_294: - name = "parameter_294" - shape = [384, 384, 3, 3] - dtype = "float32" - min_val = float("-0.0285762") - max_val = float("0.0755103") - mean = float("-0.00019148") - std = float("0.0013728") - data = None - - -class Program_weight_tensor_parameter_295: - name = "parameter_295" - shape = [384] - dtype = "float32" - min_val = float("-0.0498104") - max_val = float("0.00884068") - mean = float("-0.00838186") - std = float("0.00779168") - data = None - - -class Program_weight_tensor_parameter_296: - name = "parameter_296" - shape = [384] - dtype = "float32" - min_val = float("0.953878") - max_val = float("1.13497") - mean = float("1.01253") - std = float("0.0201047") - data = None - - -class Program_weight_tensor_parameter_297: - name = "parameter_297" - shape = [384] - dtype = "float32" - min_val = float("0.0734313") - max_val = float("0.420989") - mean = float("0.16627") - std = float("0.0472493") - data = None - - -class Program_weight_tensor_parameter_298: - name = "parameter_298" - shape = [384] - dtype = "float32" - min_val = float("-1.24929") - max_val = float("0.921608") - mean = float("-0.241972") - std = float("0.271836") - data = None - - -class Program_weight_tensor_parameter_299: - name = "parameter_299" - shape = [384, 384, 3, 3] - dtype = "float32" - min_val = float("-0.0242525") - max_val = float("0.0585337") - mean = float("-0.00014178") - std = float("0.00163037") - data = None - - -class Program_weight_tensor_parameter_300: - name = "parameter_300" - shape = [384] - dtype = "float32" - min_val = float("-0.0360838") - max_val = float("0.0137949") - mean = float("-0.00769057") - std = float("0.00789116") - data = None - - -class Program_weight_tensor_parameter_301: - name = "parameter_301" - shape = [384] - dtype = "float32" - min_val = float("0.984179") - max_val = float("1.03462") - mean = float("0.999922") - std = float("0.00715392") - data = None - - -class Program_weight_tensor_parameter_302: - name = "parameter_302" - shape = [384] - dtype = "float32" - min_val = float("0.00218605") - max_val = float("0.0108072") - mean = float("0.00399253") - std = float("0.00118789") - data = None - - -class Program_weight_tensor_parameter_303: - name = "parameter_303" - shape = [384] - dtype = "float32" - min_val = float("-0.0778225") - max_val = float("0.150166") - mean = float("-0.020106") - std = float("0.0256109") - data = None - - -class Program_weight_tensor_parameter_304: - name = "parameter_304" - shape = [384, 384, 1, 1] - dtype = "float32" - min_val = float("-0.020915") - max_val = float("0.0327181") - mean = float("-0.0002642") - std = float("0.00284316") - data = None - - -class Program_weight_tensor_parameter_305: - name = "parameter_305" - shape = [384] - dtype = "float32" - min_val = float("-0.0360838") - max_val = float("0.0137949") - mean = float("-0.00769057") - std = float("0.00789116") - data = None - - -class Program_weight_tensor_parameter_306: - name = "parameter_306" - shape = [384] - dtype = "float32" - min_val = float("0.982136") - max_val = float("1.06749") - mean = float("1.00454") - std = float("0.0126701") - data = None - - -class Program_weight_tensor_parameter_307: - name = "parameter_307" - shape = [384] - dtype = "float32" - min_val = float("0.009878") - max_val = float("0.0736381") - mean = float("0.025173") - std = float("0.0083744") - data = None - - -class Program_weight_tensor_parameter_308: - name = "parameter_308" - shape = [384] - dtype = "float32" - min_val = float("-0.234125") - max_val = float("0.373004") - mean = float("-0.0733105") - std = float("0.0699428") - data = None - - -class Program_weight_tensor_parameter_309: - name = "parameter_309" - shape = [384, 384, 3, 3] - dtype = "float32" - min_val = float("-0.0111228") - max_val = float("0.0376461") - mean = float("-0.000113877") - std = float("0.00115243") - data = None - - -class Program_weight_tensor_parameter_310: - name = "parameter_310" - shape = [384] - dtype = "float32" - min_val = float("-0.0529908") - max_val = float("0.00370578") - mean = float("-0.0207007") - std = float("0.00870238") - data = None - - -class Program_weight_tensor_parameter_311: - name = "parameter_311" - shape = [384] - dtype = "float32" - min_val = float("0.976061") - max_val = float("1.08549") - mean = float("1.01199") - std = float("0.0159983") - data = None - - -class Program_weight_tensor_parameter_312: - name = "parameter_312" - shape = [384] - dtype = "float32" - min_val = float("0.0131757") - max_val = float("0.0743652") - mean = float("0.0315857") - std = float("0.00914424") - data = None - - -class Program_weight_tensor_parameter_313: - name = "parameter_313" - shape = [384] - dtype = "float32" - min_val = float("-0.181599") - max_val = float("0.227392") - mean = float("-0.0380446") - std = float("0.0543988") - data = None - - -class Program_weight_tensor_parameter_314: - name = "parameter_314" - shape = [384, 384, 3, 3] - dtype = "float32" - min_val = float("-0.0155422") - max_val = float("0.0250036") - mean = float("-6.09728e-05") - std = float("0.00159019") - data = None - - -class Program_weight_tensor_parameter_315: - name = "parameter_315" - shape = [384] - dtype = "float32" - min_val = float("-0.0699577") - max_val = float("0.021347") - mean = float("-0.0334829") - std = float("0.0126426") - data = None - - -class Program_weight_tensor_parameter_316: - name = "parameter_316" - shape = [384] - dtype = "float32" - min_val = float("0.981937") - max_val = float("1.05593") - mean = float("1.0134") - std = float("0.0107706") - data = None - - -class Program_weight_tensor_parameter_317: - name = "parameter_317" - shape = [384] - dtype = "float32" - min_val = float("0.00841399") - max_val = float("0.0336046") - mean = float("0.0138806") - std = float("0.00321811") - data = None - - -class Program_weight_tensor_parameter_318: - name = "parameter_318" - shape = [384] - dtype = "float32" - min_val = float("-0.130353") - max_val = float("0.129976") - mean = float("-0.0125698") - std = float("0.0429897") - data = None - - -class Program_weight_tensor_parameter_319: - name = "parameter_319" - shape = [384, 1024, 1, 1] - dtype = "float32" - min_val = float("-0.0187221") - max_val = float("0.0462025") - mean = float("-0.000204289") - std = float("0.00328169") - data = None - - -class Program_weight_tensor_parameter_320: - name = "parameter_320" - shape = [384] - dtype = "float32" - min_val = float("-0.0240994") - max_val = float("0.0209722") - mean = float("-0.000328404") - std = float("0.00796388") - data = None - - -class Program_weight_tensor_parameter_321: - name = "parameter_321" - shape = [384] - dtype = "float32" - min_val = float("0.994047") - max_val = float("1.08372") - mean = float("1.04108") - std = float("0.0136739") - data = None - - -class Program_weight_tensor_parameter_322: - name = "parameter_322" - shape = [384] - dtype = "float32" - min_val = float("0.0107661") - max_val = float("0.0506047") - mean = float("0.0173147") - std = float("0.00413353") - data = None - - -class Program_weight_tensor_parameter_323: - name = "parameter_323" - shape = [384] - dtype = "float32" - min_val = float("-0.201399") - max_val = float("0.140707") - mean = float("-0.0130836") - std = float("0.0556937") - data = None - - -class Program_weight_tensor_parameter_324: - name = "parameter_324" - shape = [384, 1024, 1, 1] - dtype = "float32" - min_val = float("-0.038196") - max_val = float("0.0298097") - mean = float("-0.00023944") - std = float("0.00387698") - data = None - - -class Program_weight_tensor_parameter_325: - name = "parameter_325" - shape = [1024] - dtype = "float32" - min_val = float("-3.19613e-10") - max_val = float("2.57341e-10") - mean = float("-6.94186e-12") - std = float("8.1518e-11") - data = None - - -class Program_weight_tensor_parameter_326: - name = "parameter_326" - shape = [1024] - dtype = "float32" - min_val = float("0.826158") - max_val = float("0.830526") - mean = float("0.828072") - std = float("0.000388443") - data = None - - -class Program_weight_tensor_parameter_327: - name = "parameter_327" - shape = [1024] - dtype = "float32" - min_val = float("-0.0184723") - max_val = float("0.0186349") - mean = float("3.29345e-06") - std = float("0.0105958") - data = None - - -class Program_weight_tensor_parameter_328: - name = "parameter_328" - shape = [2048, 1024] - dtype = "float32" - min_val = float("-0.0186692") - max_val = float("0.0186323") - mean = float("-3.0949e-06") - std = float("0.0105631") - data = None - - -class Program_weight_tensor_parameter_329: - name = "parameter_329" - shape = [2048] - dtype = "float32" - min_val = float("-0.0258373") - max_val = float("0.0258489") - mean = float("-0.000490034") - std = float("0.0147842") - data = None - - -class Program_weight_tensor_parameter_330: - name = "parameter_330" - shape = [1024, 2048] - dtype = "float32" - min_val = float("-0.0261231") - max_val = float("0.0262344") - mean = float("-1.26e-05") - std = float("0.0149406") - data = None - - -class Program_weight_tensor_parameter_331: - name = "parameter_331" - shape = [1024] - dtype = "float32" - min_val = float("-0.000644044") - max_val = float("0.000416141") - mean = float("1.03571e-06") - std = float("0.00016092") - data = None - - -class Program_weight_tensor_parameter_332: - name = "parameter_332" - shape = [1024] - dtype = "float32" - min_val = float("0.825074") - max_val = float("0.831152") - mean = float("0.828074") - std = float("0.000498943") - data = None - - -class Program_weight_tensor_parameter_333: - name = "parameter_333" - shape = [1024] - dtype = "float32" - min_val = float("-0.00057158") - max_val = float("0.000431758") - mean = float("-6.03783e-07") - std = float("0.000151099") - data = None - - -class Program_weight_tensor_parameter_334: - name = "parameter_334" - shape = [1024, 1024] - dtype = "float32" - min_val = float("-0.0452304") - max_val = float("0.0451715") - mean = float("2.40342e-05") - std = float("0.0258606") - data = None - - -class Program_weight_tensor_parameter_335: - name = "parameter_335" - shape = [1024] - dtype = "float32" - min_val = float("-0.000495877") - max_val = float("0.000502274") - mean = float("2.39519e-05") - std = float("0.000158429") - data = None - - -class Program_weight_tensor_parameter_336: - name = "parameter_336" - shape = [1024] - dtype = "float32" - min_val = float("0.825239") - max_val = float("0.831385") - mean = float("0.828099") - std = float("0.000479399") - data = None - - -class Program_weight_tensor_parameter_337: - name = "parameter_337" - shape = [1024] - dtype = "float32" - min_val = float("-0.0182543") - max_val = float("0.0183952") - mean = float("1.83687e-06") - std = float("0.0105888") - data = None - - -class Program_weight_tensor_parameter_338: - name = "parameter_338" - shape = [2048, 1024] - dtype = "float32" - min_val = float("-0.0185874") - max_val = float("0.0186053") - mean = float("-3.09786e-06") - std = float("0.010563") - data = None - - -class Program_weight_tensor_parameter_339: - name = "parameter_339" - shape = [2048] - dtype = "float32" - min_val = float("-0.0258718") - max_val = float("0.025874") - mean = float("-0.00048855") - std = float("0.0147851") - data = None - - -class Program_weight_tensor_parameter_340: - name = "parameter_340" - shape = [1024, 2048] - dtype = "float32" - min_val = float("-0.0260955") - max_val = float("0.0261498") - mean = float("-1.26e-05") - std = float("0.0149406") - data = None - - -class Program_weight_tensor_parameter_341: - name = "parameter_341" - shape = [1024] - dtype = "float32" - min_val = float("-0.000468906") - max_val = float("0.000412054") - mean = float("2.06222e-06") - std = float("0.000140159") - data = None - - -class Program_weight_tensor_parameter_342: - name = "parameter_342" - shape = [1024] - dtype = "float32" - min_val = float("0.825682") - max_val = float("0.831193") - mean = float("0.828073") - std = float("0.000448518") - data = None - - -class Program_weight_tensor_parameter_343: - name = "parameter_343" - shape = [1024] - dtype = "float32" - min_val = float("-0.000528327") - max_val = float("0.000383813") - mean = float("2.84058e-06") - std = float("0.000141633") - data = None - - -class Program_weight_tensor_parameter_344: - name = "parameter_344" - shape = [1024, 1024] - dtype = "float32" - min_val = float("-0.0450293") - max_val = float("0.0450631") - mean = float("2.40173e-05") - std = float("0.0258607") - data = None - - -class Program_weight_tensor_parameter_345: - name = "parameter_345" - shape = [1024] - dtype = "float32" - min_val = float("-0.000544222") - max_val = float("0.000596302") - mean = float("2.4209e-05") - std = float("0.00018149") - data = None - - -class Program_weight_tensor_parameter_346: - name = "parameter_346" - shape = [1024] - dtype = "float32" - min_val = float("0.825946") - max_val = float("0.831225") - mean = float("0.828119") - std = float("0.000435136") - data = None - - -class Program_weight_tensor_parameter_347: - name = "parameter_347" - shape = [1024] - dtype = "float32" - min_val = float("-0.0184486") - max_val = float("0.01838") - mean = float("4.31468e-06") - std = float("0.0105859") - data = None - - -class Program_weight_tensor_parameter_348: - name = "parameter_348" - shape = [2048, 1024] - dtype = "float32" - min_val = float("-0.0185587") - max_val = float("0.0185999") - mean = float("-2.9779e-06") - std = float("0.010563") - data = None - - -class Program_weight_tensor_parameter_349: - name = "parameter_349" - shape = [2048] - dtype = "float32" - min_val = float("-0.0259392") - max_val = float("0.025878") - mean = float("-0.000488745") - std = float("0.014786") - data = None - - -class Program_weight_tensor_parameter_350: - name = "parameter_350" - shape = [1024, 2048] - dtype = "float32" - min_val = float("-0.0261446") - max_val = float("0.0261368") - mean = float("-1.26001e-05") - std = float("0.0149405") - data = None - - -class Program_weight_tensor_parameter_351: - name = "parameter_351" - shape = [1024] - dtype = "float32" - min_val = float("-0.000525085") - max_val = float("0.000569726") - mean = float("1.8424e-06") - std = float("0.000180016") - data = None - - -class Program_weight_tensor_parameter_352: - name = "parameter_352" - shape = [1024] - dtype = "float32" - min_val = float("0.826325") - max_val = float("0.831088") - mean = float("0.828071") - std = float("0.000422331") - data = None - - -class Program_weight_tensor_parameter_353: - name = "parameter_353" - shape = [1024] - dtype = "float32" - min_val = float("-0.000560374") - max_val = float("0.00059686") - mean = float("2.25972e-06") - std = float("0.000184975") - data = None - - -class Program_weight_tensor_parameter_354: - name = "parameter_354" - shape = [1024, 1024] - dtype = "float32" - min_val = float("-0.0451116") - max_val = float("0.0451354") - mean = float("2.40528e-05") - std = float("0.0258608") - data = None - - -class Program_weight_tensor_parameter_355: - name = "parameter_355" - shape = [1024] - dtype = "float32" - min_val = float("-0.000823618") - max_val = float("0.000904054") - mean = float("2.92117e-05") - std = float("0.000277524") - data = None - - -class Program_weight_tensor_parameter_356: - name = "parameter_356" - shape = [1024] - dtype = "float32" - min_val = float("0.826282") - max_val = float("0.830821") - mean = float("0.828142") - std = float("0.000430458") - data = None - - -class Program_weight_tensor_parameter_357: - name = "parameter_357" - shape = [1024] - dtype = "float32" - min_val = float("-0.0185659") - max_val = float("0.0186153") - mean = float("4.15863e-06") - std = float("0.0105906") - data = None - - -class Program_weight_tensor_parameter_358: - name = "parameter_358" - shape = [2048, 1024] - dtype = "float32" - min_val = float("-0.0186583") - max_val = float("0.0186457") - mean = float("-3.02356e-06") - std = float("0.0105631") - data = None - - -class Program_weight_tensor_parameter_359: - name = "parameter_359" - shape = [2048] - dtype = "float32" - min_val = float("-0.0260157") - max_val = float("0.0259108") - mean = float("-0.000488166") - std = float("0.0147856") - data = None - - -class Program_weight_tensor_parameter_360: - name = "parameter_360" - shape = [1024, 2048] - dtype = "float32" - min_val = float("-0.0261391") - max_val = float("0.026125") - mean = float("-1.26002e-05") - std = float("0.0149405") - data = None - - -class Program_weight_tensor_parameter_361: - name = "parameter_361" - shape = [1024] - dtype = "float32" - min_val = float("-0.000912874") - max_val = float("0.000860046") - mean = float("1.52602e-06") - std = float("0.000286919") - data = None - - -class Program_weight_tensor_parameter_362: - name = "parameter_362" - shape = [1024] - dtype = "float32" - min_val = float("0.826227") - max_val = float("0.830736") - mean = float("0.828069") - std = float("0.000440276") - data = None - - -class Program_weight_tensor_parameter_363: - name = "parameter_363" - shape = [1024] - dtype = "float32" - min_val = float("-0.000894026") - max_val = float("0.000983702") - mean = float("2.69971e-06") - std = float("0.000279123") - data = None - - -class Program_weight_tensor_parameter_364: - name = "parameter_364" - shape = [1024, 1024] - dtype = "float32" - min_val = float("-0.0456631") - max_val = float("0.0456485") - mean = float("2.40398e-05") - std = float("0.0258625") - data = None diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_3/graph_hash.txt b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_3/graph_hash.txt index 352c4c248..951232222 100644 --- a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_3/graph_hash.txt +++ b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_3/graph_hash.txt @@ -1 +1 @@ -16190cdee49c5a19612aa893cd02068a0d7cda8a3ce3ab04b10db216f5b07563 \ No newline at end of file +7f6edfb359b9bae12be4cef48e9822a33ba3dfa983349265b87f2f8e000679cb \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_3/input_meta.py b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_3/input_meta.py index 380d3daa6..56cfb4b20 100644 --- a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_3/input_meta.py +++ b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_3/input_meta.py @@ -1,31 +1,127 @@ class Program_weight_tensor_data_0: name = "data_0" - shape = [1, 768, 34, 34] - dtype = "float32" - min_val = float("-0.278465") - max_val = float("7.26132") - mean = float("0.265664") - std = float("0.60913") - data = None + shape = [] + dtype = "int64" + data = [49] class Program_weight_tensor_data_1: name = "data_1" - shape = [1, 384, 68, 68] - dtype = "float32" - min_val = float("-0.278465") - max_val = float("9.84456") - mean = float("0.366383") - std = float("0.709092") - data = None + shape = [] + dtype = "int64" + data = [48384] class Program_weight_tensor_data_2: name = "data_2" - shape = [1, 192, 136, 136] + shape = [1, 48384] + dtype = "float32" + max_val = float("8.0") + mean = float("0.00859788") + std = float("0.140155") + data = None + + +class Program_weight_tensor_data_3: + name = "data_3" + shape = [1, 49, 48384] + dtype = "float32" + max_val = float("0.949472") + mean = float("8.67853e-05") + std = float("0.0068947") + data = None + + +class Program_weight_tensor_data_4: + name = "data_4" + shape = [1, 49, 48384] + dtype = "float32" + max_val = float("1.0") + mean = float("0.000175467") + std = float("0.0132452") + data = None + + +class Program_weight_tensor_data_5: + name = "data_5" + shape = [1, 1] + dtype = "int32" + data = [0] + + +class Program_weight_tensor_data_6: + name = "data_6" + shape = [1, 49, 1] + dtype = "int32" + data = [ + 0, + 0, + 3, + 8, + 3, + 3, + 3, + 3, + 3, + 3, + 3, + 3, + 4, + 4, + 3, + 3, + 3, + 4, + 4, + 8, + 3, + 3, + 3, + 3, + 8, + 8, + 8, + 8, + 8, + 8, + 4, + 8, + 8, + 8, + 8, + 8, + 5, + 0, + 8, + 8, + 8, + 8, + 8, + 3, + 3, + 8, + 8, + 0, + 4, + ] + + +class Program_weight_tensor_data_7: + name = "data_7" + shape = [1, 49, 4] + dtype = "float32" + min_val = float("830.542") + max_val = float("1214.81") + mean = float("996.371") + std = float("76.2156") + data = None + + +class Program_weight_tensor_data_8: + name = "data_8" + shape = [1, 49, 48384] dtype = "float32" - min_val = float("-0.278465") - max_val = float("15.7599") - mean = float("0.450238") - std = float("0.719633") + max_val = float("0.514231") + mean = float("4.88935e-06") + std = float("0.000998645") data = None diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_3/model.py b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_3/model.py index 0ee326a64..54f7a46c8 100644 --- a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_3/model.py +++ b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_3/model.py @@ -6,1045 +6,279 @@ def __init__(self): super().__init__() def forward( - self, - parameter_0, - parameter_1, - parameter_2, - parameter_3, - parameter_4, - parameter_5, - parameter_6, - parameter_7, - parameter_8, - parameter_9, - parameter_10, - parameter_11, - parameter_12, - parameter_13, - parameter_14, - parameter_15, - parameter_16, - parameter_17, - parameter_18, - parameter_19, - parameter_20, - parameter_21, - parameter_22, - parameter_23, - parameter_24, - parameter_25, - parameter_26, - parameter_27, - parameter_28, - parameter_29, - parameter_30, - parameter_31, - parameter_32, - parameter_33, - parameter_34, - parameter_35, - parameter_36, - parameter_37, - parameter_38, - parameter_39, - parameter_40, - parameter_41, - parameter_42, - parameter_43, - parameter_44, - parameter_45, - parameter_46, - parameter_47, - parameter_48, - parameter_49, - parameter_50, - parameter_51, - parameter_52, - parameter_53, - data_0, - data_1, - data_2, + self, data_0, data_1, data_2, data_3, data_4, data_5, data_6, data_7, data_8 ): - # pd_op.full: (1xf64) <- () - full_0 = paddle._C_ops.full( - [1], float("0"), paddle.float64, paddle.core.CPUPlace() - ) - - # pd_op.full: (1xf64) <- () - full_1 = paddle._C_ops.full( - [1], float("34"), paddle.float64, paddle.core.CPUPlace() - ) + # pd_op.full_int_array: (1xi64) <- () + full_int_array_0 = [1] - # pd_op.full: (1xf64) <- () - full_2 = paddle._C_ops.full( - [1], float("1"), paddle.float64, paddle.core.CPUPlace() - ) - - # pd_op.arange: (34xi64) <- (1xf64, 1xf64, 1xf64) - arange_0 = paddle.arange(full_0, full_1, full_2, dtype="int64") - del full_1 - - # pd_op.cast: (34xf32) <- (34xi64) - cast_0 = paddle._C_ops.cast(arange_0, paddle.float32) - del arange_0 + # pd_op.unsqueeze: (1x1x-1xf32) <- (1x-1xf32, 1xi64) + unsqueeze_0 = paddle._C_ops.unsqueeze(data_2, full_int_array_0) + del data_2 - # pd_op.full: (1xf32) <- () - full_3 = paddle._C_ops.full( - [1], float("1"), paddle.float32, paddle.core.CPUPlace() + # pd_op.full: (xf32) <- () + full_0 = paddle._C_ops.full( + [], float("1"), paddle.float32, paddle.framework._current_expected_place() ) - # pd_op.scale: (34xf32) <- (34xf32, 1xf32) - scale_0 = paddle._C_ops.scale(cast_0, full_3, float("0.5"), True) - del cast_0 + # pd_op.greater_than: (1x1x-1xb) <- (1x1x-1xf32, xf32) + greater_than_0 = paddle._C_ops.greater_than(unsqueeze_0, full_0) + del full_0, unsqueeze_0 - # pd_op.full: (1xf32) <- () - full_4 = paddle._C_ops.full( - [1], float("32"), paddle.float32, paddle.core.CPUPlace() + # pd_op.full: (xi64) <- () + full_1 = paddle._C_ops.full( + [], float("1"), paddle.int64, paddle.core.CPUPlace() ) - # pd_op.scale: (34xf32) <- (34xf32, 1xf32) - scale_1 = paddle._C_ops.scale(scale_0, full_4, float("0"), True) - del full_4, scale_0 - - # builtin.combine: ([34xf32, 34xf32]) <- (34xf32, 34xf32) - combine_0 = [scale_1, scale_1] - del scale_1 + # builtin.combine: ([xi64, xi64, xi64]) <- (xi64, xi64, xi64) + combine_0 = [full_1, data_0, full_1] - # pd_op.meshgrid: ([34x34xf32, 34x34xf32]) <- ([34xf32, 34xf32]) - meshgrid_0 = paddle._C_ops.meshgrid(combine_0) + # pd_op.stack: (3xi64) <- ([xi64, xi64, xi64]) + stack_0 = paddle._C_ops.stack(combine_0, 0) del combine_0 - # builtin.split: (34x34xf32, 34x34xf32) <- ([34x34xf32, 34x34xf32]) - ( - split_0, - split_1, - ) = meshgrid_0 - del meshgrid_0 - - # pd_op.scale: (34x34xf32) <- (34x34xf32, 1xf32) - scale_2 = paddle._C_ops.scale(split_1, full_3, float("-80"), True) - - # pd_op.scale: (34x34xf32) <- (34x34xf32, 1xf32) - scale_3 = paddle._C_ops.scale(split_0, full_3, float("-80"), True) - - # pd_op.scale: (34x34xf32) <- (34x34xf32, 1xf32) - scale_4 = paddle._C_ops.scale(split_1, full_3, float("80"), True) - - # pd_op.scale: (34x34xf32) <- (34x34xf32, 1xf32) - scale_5 = paddle._C_ops.scale(split_0, full_3, float("80"), True) - - # builtin.combine: ([34x34xf32, 34x34xf32, 34x34xf32, 34x34xf32]) <- (34x34xf32, 34x34xf32, 34x34xf32, 34x34xf32) - combine_1 = [scale_2, scale_3, scale_4, scale_5] - del scale_2, scale_3, scale_4, scale_5 - - # pd_op.stack: (34x34x4xf32) <- ([34x34xf32, 34x34xf32, 34x34xf32, 34x34xf32]) - stack_0 = paddle._C_ops.stack(combine_1, -1) - del combine_1 - - # builtin.combine: ([34x34xf32, 34x34xf32]) <- (34x34xf32, 34x34xf32) - combine_2 = [split_1, split_0] - del split_0, split_1 - - # pd_op.stack: (34x34x2xf32) <- ([34x34xf32, 34x34xf32]) - stack_1 = paddle._C_ops.stack(combine_2, -1) - del combine_2 - - # pd_op.full_int_array: (2xi64) <- () - full_int_array_0 = [-1, 4] - - # pd_op.reshape: (1156x4xf32) <- (34x34x4xf32, 2xi64) - reshape_0 = paddle._C_ops.reshape(stack_0, full_int_array_0) - del stack_0 - - # pd_op.full_int_array: (2xi64) <- () - full_int_array_1 = [-1, 2] - - # pd_op.reshape: (1156x2xf32) <- (34x34x2xf32, 2xi64) - reshape_1 = paddle._C_ops.reshape(stack_1, full_int_array_1) - del stack_1 - - # pd_op.full: (1156x1xf32) <- () - full_5 = paddle._C_ops.full( - [1156, 1], - float("32"), - paddle.float32, - paddle.framework._current_expected_place(), - ) - - # pd_op.full: (1xf64) <- () - full_6 = paddle._C_ops.full( - [1], float("68"), paddle.float64, paddle.core.CPUPlace() - ) + # pd_op.tile: (1x-1x-1xb) <- (1x1x-1xb, 3xi64) + tile_0 = paddle._C_ops.tile(greater_than_0, stack_0) + del greater_than_0, stack_0 - # pd_op.arange: (68xi64) <- (1xf64, 1xf64, 1xf64) - arange_1 = paddle.arange(full_0, full_6, full_2, dtype="int64") - del full_6 + # pd_op.multiply: (1x-1x-1xf32) <- (1x-1x-1xf32, 1x-1x-1xf32) + multiply_1 = paddle._C_ops.multiply(data_3, data_4) - # pd_op.cast: (68xf32) <- (68xi64) - cast_1 = paddle._C_ops.cast(arange_1, paddle.float32) - del arange_1 + # pd_op.shape64: (3xi64) <- (1x-1x-1xf32) + shape64_0 = paddle._C_ops.shape64(multiply_1) - # pd_op.scale: (68xf32) <- (68xf32, 1xf32) - scale_6 = paddle._C_ops.scale(cast_1, full_3, float("0.5"), True) - del cast_1 + # pd_op.full_int_array: (1xi64) <- () + full_int_array_1 = [2] - # pd_op.full: (1xf32) <- () - full_7 = paddle._C_ops.full( - [1], float("16"), paddle.float32, paddle.core.CPUPlace() + # pd_op.slice: (xi64) <- (3xi64, 1xi64, 1xi64) + slice_0 = paddle._C_ops.slice( + shape64_0, [0], full_int_array_0, full_int_array_1, [1], [0] ) + del full_int_array_0 - # pd_op.scale: (68xf32) <- (68xf32, 1xf32) - scale_7 = paddle._C_ops.scale(scale_6, full_7, float("0"), True) - del full_7, scale_6 - - # builtin.combine: ([68xf32, 68xf32]) <- (68xf32, 68xf32) - combine_3 = [scale_7, scale_7] - del scale_7 - - # pd_op.meshgrid: ([68x68xf32, 68x68xf32]) <- ([68xf32, 68xf32]) - meshgrid_1 = paddle._C_ops.meshgrid(combine_3) - del combine_3 - - # builtin.split: (68x68xf32, 68x68xf32) <- ([68x68xf32, 68x68xf32]) - ( - split_2, - split_3, - ) = meshgrid_1 - del meshgrid_1 - - # pd_op.scale: (68x68xf32) <- (68x68xf32, 1xf32) - scale_8 = paddle._C_ops.scale(split_3, full_3, float("-40"), True) - - # pd_op.scale: (68x68xf32) <- (68x68xf32, 1xf32) - scale_9 = paddle._C_ops.scale(split_2, full_3, float("-40"), True) - - # pd_op.scale: (68x68xf32) <- (68x68xf32, 1xf32) - scale_10 = paddle._C_ops.scale(split_3, full_3, float("40"), True) - - # pd_op.scale: (68x68xf32) <- (68x68xf32, 1xf32) - scale_11 = paddle._C_ops.scale(split_2, full_3, float("40"), True) + # pd_op.full_int_array: (1xi64) <- () + full_int_array_2 = [3] - # builtin.combine: ([68x68xf32, 68x68xf32, 68x68xf32, 68x68xf32]) <- (68x68xf32, 68x68xf32, 68x68xf32, 68x68xf32) - combine_4 = [scale_8, scale_9, scale_10, scale_11] - del scale_10, scale_11, scale_8, scale_9 - - # pd_op.stack: (68x68x4xf32) <- ([68x68xf32, 68x68xf32, 68x68xf32, 68x68xf32]) - stack_2 = paddle._C_ops.stack(combine_4, -1) - del combine_4 - - # builtin.combine: ([68x68xf32, 68x68xf32]) <- (68x68xf32, 68x68xf32) - combine_5 = [split_3, split_2] - del split_2, split_3 - - # pd_op.stack: (68x68x2xf32) <- ([68x68xf32, 68x68xf32]) - stack_3 = paddle._C_ops.stack(combine_5, -1) - del combine_5 - - # pd_op.reshape: (4624x4xf32) <- (68x68x4xf32, 2xi64) - reshape_2 = paddle._C_ops.reshape(stack_2, full_int_array_0) - del stack_2 - - # pd_op.reshape: (4624x2xf32) <- (68x68x2xf32, 2xi64) - reshape_3 = paddle._C_ops.reshape(stack_3, full_int_array_1) - del stack_3 - - # pd_op.full: (4624x1xf32) <- () - full_8 = paddle._C_ops.full( - [4624, 1], - float("16"), - paddle.float32, - paddle.framework._current_expected_place(), + # pd_op.slice: (xi64) <- (3xi64, 1xi64, 1xi64) + slice_1 = paddle._C_ops.slice( + shape64_0, [0], full_int_array_1, full_int_array_2, [1], [0] ) + del full_int_array_1, full_int_array_2, shape64_0 - # pd_op.full: (1xf64) <- () - full_9 = paddle._C_ops.full( - [1], float("136"), paddle.float64, paddle.core.CPUPlace() + # pd_op.full: (1xi64) <- () + full_2 = paddle._C_ops.full( + [1], float("-2"), paddle.int64, paddle.core.CPUPlace() ) - # pd_op.arange: (136xi64) <- (1xf64, 1xf64, 1xf64) - arange_2 = paddle.arange(full_0, full_9, full_2, dtype="int64") - del full_0, full_2, full_9 + # pd_op.argmax: (1x-1xi64) <- (1x-1x-1xf32, 1xi64) + argmax_0 = paddle._C_ops.argmax(multiply_1, full_2, False, False, paddle.int64) + del multiply_1 - # pd_op.cast: (136xf32) <- (136xi64) - cast_2 = paddle._C_ops.cast(arange_2, paddle.float32) - del arange_2 - - # pd_op.scale: (136xf32) <- (136xf32, 1xf32) - scale_12 = paddle._C_ops.scale(cast_2, full_3, float("0.5"), True) - del cast_2 - - # pd_op.full: (1xf32) <- () - full_10 = paddle._C_ops.full( - [1], float("8"), paddle.float32, paddle.core.CPUPlace() + # pd_op.one_hot: (1x-1x-1xf32) <- (1x-1xi64, xi64) + one_hot_0 = paddle._C_ops.one_hot( + argmax_0 % paddle.cast(slice_0, argmax_0.dtype), slice_0 ) + del argmax_0, slice_0 - # pd_op.scale: (136xf32) <- (136xf32, 1xf32) - scale_13 = paddle._C_ops.scale(scale_12, full_10, float("0"), True) - del full_10, scale_12 - - # builtin.combine: ([136xf32, 136xf32]) <- (136xf32, 136xf32) - combine_6 = [scale_13, scale_13] - del scale_13 - - # pd_op.meshgrid: ([136x136xf32, 136x136xf32]) <- ([136xf32, 136xf32]) - meshgrid_2 = paddle._C_ops.meshgrid(combine_6) - del combine_6 + # pd_op.transpose: (1x-1x-1xf32) <- (1x-1x-1xf32) + transpose_0 = paddle._C_ops.transpose(one_hot_0, [0, 2, 1]) + del one_hot_0 - # builtin.split: (136x136xf32, 136x136xf32) <- ([136x136xf32, 136x136xf32]) - ( - split_4, - split_5, - ) = meshgrid_2 - del meshgrid_2 + # pd_op.where: (1x-1x-1xf32) <- (1x-1x-1xb, 1x-1x-1xf32, 1x-1x-1xf32) + where_0 = paddle._C_ops.where(tile_0, transpose_0, data_4) + del data_4, tile_0, transpose_0 - # pd_op.scale: (136x136xf32) <- (136x136xf32, 1xf32) - scale_14 = paddle._C_ops.scale(split_5, full_3, float("-20"), True) + # pd_op.full_int_array: (1xi64) <- () + full_int_array_3 = [-2] - # pd_op.scale: (136x136xf32) <- (136x136xf32, 1xf32) - scale_15 = paddle._C_ops.scale(split_4, full_3, float("-20"), True) + # pd_op.sum: (1x-1xf32) <- (1x-1x-1xf32, 1xi64) + sum_0 = paddle._C_ops.sum(where_0, full_int_array_3, None, False) - # pd_op.scale: (136x136xf32) <- (136x136xf32, 1xf32) - scale_16 = paddle._C_ops.scale(split_5, full_3, float("20"), True) + # pd_op.argmax: (1x-1xi64) <- (1x-1x-1xf32, 1xi64) + argmax_1 = paddle._C_ops.argmax(where_0, full_2, False, False, paddle.int64) + del full_2 - # pd_op.scale: (136x136xf32) <- (136x136xf32, 1xf32) - scale_17 = paddle._C_ops.scale(split_4, full_3, float("20"), True) - del full_3 - - # builtin.combine: ([136x136xf32, 136x136xf32, 136x136xf32, 136x136xf32]) <- (136x136xf32, 136x136xf32, 136x136xf32, 136x136xf32) - combine_7 = [scale_14, scale_15, scale_16, scale_17] - del scale_14, scale_15, scale_16, scale_17 - - # pd_op.stack: (136x136x4xf32) <- ([136x136xf32, 136x136xf32, 136x136xf32, 136x136xf32]) - stack_4 = paddle._C_ops.stack(combine_7, -1) - del combine_7 + # pd_op.cast: (xi32) <- (xi64) + cast_0 = paddle._C_ops.cast(data_0, paddle.int32) + del data_0 - # builtin.combine: ([136x136xf32, 136x136xf32]) <- (136x136xf32, 136x136xf32) - combine_8 = [split_5, split_4] - del split_4, split_5 + # pd_op.multiply: (1x1xi32) <- (1x1xi32, xi32) + multiply_2 = paddle._C_ops.multiply(data_5, cast_0) + del cast_0, data_5 - # pd_op.stack: (136x136x2xf32) <- ([136x136xf32, 136x136xf32]) - stack_5 = paddle._C_ops.stack(combine_8, -1) - del combine_8 + # pd_op.cast: (1x1xi64) <- (1x1xi32) + cast_1 = paddle._C_ops.cast(multiply_2, paddle.int64) + del multiply_2 - # pd_op.reshape: (18496x4xf32) <- (136x136x4xf32, 2xi64) - reshape_4 = paddle._C_ops.reshape(stack_4, full_int_array_0) - del full_int_array_0, stack_4 + # pd_op.add: (1x-1xi64) <- (1x-1xi64, 1x1xi64) + add_0 = paddle._C_ops.add(argmax_1, cast_1) + del argmax_1, cast_1 - # pd_op.reshape: (18496x2xf32) <- (136x136x2xf32, 2xi64) - reshape_5 = paddle._C_ops.reshape(stack_5, full_int_array_1) - del full_int_array_1, stack_5 + # pd_op.flatten: (-1xi32) <- (1x-1x1xi32) + flatten_0 = paddle._C_ops.flatten(data_6, 0, 2) + del data_6 - # pd_op.full: (18496x1xf32) <- () - full_11 = paddle._C_ops.full( - [18496, 1], - float("8"), - paddle.float32, - paddle.framework._current_expected_place(), - ) + # pd_op.flatten: (-1xi64) <- (1x-1xi64) + flatten_1 = paddle._C_ops.flatten(add_0, 0, 1) + del add_0 # pd_op.full: (1xi32) <- () - full_12 = paddle._C_ops.full( + full_3 = paddle._C_ops.full( [1], float("0"), paddle.int32, paddle.core.CPUPlace() ) - # builtin.combine: ([1156x4xf32, 4624x4xf32, 18496x4xf32]) <- (1156x4xf32, 4624x4xf32, 18496x4xf32) - combine_9 = [reshape_0, reshape_2, reshape_4] - - # pd_op.concat: (24276x4xf32) <- ([1156x4xf32, 4624x4xf32, 18496x4xf32], 1xi32) - concat_2 = paddle._C_ops.concat(combine_9, full_12) - del combine_9 - - # builtin.combine: ([1156x2xf32, 4624x2xf32, 18496x2xf32]) <- (1156x2xf32, 4624x2xf32, 18496x2xf32) - combine_10 = [reshape_1, reshape_3, reshape_5] - del reshape_1, reshape_3, reshape_5 - - # pd_op.concat: (24276x2xf32) <- ([1156x2xf32, 4624x2xf32, 18496x2xf32], 1xi32) - concat_3 = paddle._C_ops.concat(combine_10, full_12) - del combine_10 - - # builtin.combine: ([1156x1xf32, 4624x1xf32, 18496x1xf32]) <- (1156x1xf32, 4624x1xf32, 18496x1xf32) - combine_11 = [full_5, full_8, full_11] - del full_11, full_5, full_8 - - # pd_op.concat: (24276x1xf32) <- ([1156x1xf32, 4624x1xf32, 18496x1xf32], 1xi32) - concat_4 = paddle._C_ops.concat(combine_11, full_12) - del combine_11, full_12 - - # pd_op.full_int_array: (2xi64) <- () - full_int_array_2 = [1, 1] - - # pd_op.assign: (2xi64) <- (2xi64) - assign_0 = full_int_array_2 - - # pd_op.assign: (2xi64) <- (2xi64) - assign_1 = full_int_array_2 - - # pd_op.pool2d: (1x768x1x1xf32) <- (1x768x34x34xf32, 2xi64) - pool2d_0 = paddle._C_ops.pool2d( - data_0, - full_int_array_2, - [1, 1], - [0, 0], - False, - True, - "NCHW", - "avg", - False, - True, - "EXPLICIT", - ) - - # pd_op.conv2d: (1x768x1x1xf32) <- (1x768x1x1xf32, 768x768x1x1xf32) - conv2d_0 = paddle._C_ops.conv2d( - pool2d_0, parameter_53, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_53 - - # pd_op.full_int_array: (4xi64) <- () - full_int_array_3 = [1, -1, 1, 1] - - # pd_op.reshape: (1x768x1x1xf32) <- (768xf32, 4xi64) - reshape_6 = paddle._C_ops.reshape(parameter_52, full_int_array_3) - del parameter_52 - - # pd_op.add: (1x768x1x1xf32) <- (1x768x1x1xf32, 1x768x1x1xf32) - add_0 = paddle._C_ops.add(conv2d_0, reshape_6) - - # pd_op.sigmoid: (1x768x1x1xf32) <- (1x768x1x1xf32) - sigmoid_0 = paddle._C_ops.sigmoid(add_0) - del add_0 - - # pd_op.multiply: (1x768x34x34xf32) <- (1x768x34x34xf32, 1x768x1x1xf32) - multiply_0 = paddle._C_ops.multiply(data_0, sigmoid_0) - - # pd_op.conv2d: (1x768x34x34xf32) <- (1x768x34x34xf32, 768x768x1x1xf32) - conv2d_1 = paddle._C_ops.conv2d( - multiply_0, parameter_51, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_51 - - # pd_op.batch_norm_: (1x768x34x34xf32, 768xf32, 768xf32, 768xf32, 768xf32, -1xui8) <- (1x768x34x34xf32, 768xf32, 768xf32, 768xf32, 768xf32) - ( - batch_norm__0, - batch_norm__1, - batch_norm__2, - batch_norm__3, - batch_norm__4, - batch_norm__5, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_1, - parameter_50, - parameter_49, - parameter_48, - parameter_47, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_47, parameter_48, parameter_49, parameter_50 - - # pd_op.swish: (1x768x34x34xf32) <- (1x768x34x34xf32) - swish_0 = paddle._C_ops.swish(batch_norm__0) - - # pd_op.add: (1x768x34x34xf32) <- (1x768x34x34xf32, 1x768x34x34xf32) - add_1 = paddle._C_ops.add(swish_0, data_0) - - # pd_op.conv2d: (1x10x34x34xf32) <- (1x768x34x34xf32, 10x768x3x3xf32) - conv2d_2 = paddle._C_ops.conv2d( - add_1, parameter_46, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_46 - - # pd_op.reshape: (1x10x1x1xf32) <- (10xf32, 4xi64) - reshape_7 = paddle._C_ops.reshape(parameter_45, full_int_array_3) - del parameter_45 - - # pd_op.add: (1x10x34x34xf32) <- (1x10x34x34xf32, 1x10x1x1xf32) - add_2 = paddle._C_ops.add(conv2d_2, reshape_7) - - # pd_op.conv2d: (1x768x1x1xf32) <- (1x768x1x1xf32, 768x768x1x1xf32) - conv2d_3 = paddle._C_ops.conv2d( - pool2d_0, parameter_44, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_44 - - # pd_op.reshape: (1x768x1x1xf32) <- (768xf32, 4xi64) - reshape_8 = paddle._C_ops.reshape(parameter_43, full_int_array_3) - del parameter_43 - - # pd_op.add: (1x768x1x1xf32) <- (1x768x1x1xf32, 1x768x1x1xf32) - add_3 = paddle._C_ops.add(conv2d_3, reshape_8) - - # pd_op.sigmoid: (1x768x1x1xf32) <- (1x768x1x1xf32) - sigmoid_1 = paddle._C_ops.sigmoid(add_3) - del add_3 - - # pd_op.multiply: (1x768x34x34xf32) <- (1x768x34x34xf32, 1x768x1x1xf32) - multiply_1 = paddle._C_ops.multiply(data_0, sigmoid_1) - del data_0 - - # pd_op.conv2d: (1x768x34x34xf32) <- (1x768x34x34xf32, 768x768x1x1xf32) - conv2d_4 = paddle._C_ops.conv2d( - multiply_1, parameter_42, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_42 - - # pd_op.batch_norm_: (1x768x34x34xf32, 768xf32, 768xf32, 768xf32, 768xf32, -1xui8) <- (1x768x34x34xf32, 768xf32, 768xf32, 768xf32, 768xf32) - ( - batch_norm__6, - batch_norm__7, - batch_norm__8, - batch_norm__9, - batch_norm__10, - batch_norm__11, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_4, - parameter_41, - parameter_40, - parameter_39, - parameter_38, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_38, parameter_39, parameter_40, parameter_41 - - # pd_op.swish: (1x768x34x34xf32) <- (1x768x34x34xf32) - swish_1 = paddle._C_ops.swish(batch_norm__6) - - # pd_op.conv2d: (1x88x34x34xf32) <- (1x768x34x34xf32, 88x768x3x3xf32) - conv2d_5 = paddle._C_ops.conv2d( - swish_1, parameter_37, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_37 - - # pd_op.reshape: (1x88x1x1xf32) <- (88xf32, 4xi64) - reshape_9 = paddle._C_ops.reshape(parameter_36, full_int_array_3) - del parameter_36 - - # pd_op.add: (1x88x34x34xf32) <- (1x88x34x34xf32, 1x88x1x1xf32) - add_4 = paddle._C_ops.add(conv2d_5, reshape_9) - - # pd_op.sigmoid: (1x10x34x34xf32) <- (1x10x34x34xf32) - sigmoid_2 = paddle._C_ops.sigmoid(add_2) - del add_2 - - # pd_op.flatten: (1x10x1156xf32) <- (1x10x34x34xf32) - flatten_0 = paddle._C_ops.flatten(sigmoid_2, 2, 3) - - # pd_op.transpose: (1x1156x10xf32) <- (1x10x1156xf32) - transpose_0 = paddle._C_ops.transpose(flatten_0, [0, 2, 1]) + # pd_op.gather: (-1xi32) <- (-1xi32, -1xi64, 1xi32) + gather_0 = paddle._C_ops.gather(flatten_0, flatten_1, full_3) del flatten_0 - # pd_op.flatten: (1x88x1156xf32) <- (1x88x34x34xf32) - flatten_1 = paddle._C_ops.flatten(add_4, 2, 3) - - # pd_op.transpose: (1x1156x88xf32) <- (1x88x1156xf32) - transpose_1 = paddle._C_ops.transpose(flatten_1, [0, 2, 1]) - del flatten_1 - - # pd_op.pool2d: (1x384x1x1xf32) <- (1x384x68x68xf32, 2xi64) - pool2d_1 = paddle._C_ops.pool2d( - data_1, - full_int_array_2, - [1, 1], - [0, 0], - False, - True, - "NCHW", - "avg", - False, - True, - "EXPLICIT", - ) - - # pd_op.conv2d: (1x384x1x1xf32) <- (1x384x1x1xf32, 384x384x1x1xf32) - conv2d_6 = paddle._C_ops.conv2d( - pool2d_1, parameter_35, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_35 - - # pd_op.reshape: (1x384x1x1xf32) <- (384xf32, 4xi64) - reshape_10 = paddle._C_ops.reshape(parameter_34, full_int_array_3) - del parameter_34 - - # pd_op.add: (1x384x1x1xf32) <- (1x384x1x1xf32, 1x384x1x1xf32) - add_5 = paddle._C_ops.add(conv2d_6, reshape_10) + # builtin.combine: ([xi64, xi64]) <- (xi64, xi64) + combine_1 = [full_1, data_1] - # pd_op.sigmoid: (1x384x1x1xf32) <- (1x384x1x1xf32) - sigmoid_3 = paddle._C_ops.sigmoid(add_5) - del add_5 + # pd_op.stack: (2xi64) <- ([xi64, xi64]) + stack_1 = paddle._C_ops.stack(combine_1, 0) + del combine_1 - # pd_op.multiply: (1x384x68x68xf32) <- (1x384x68x68xf32, 1x384x1x1xf32) - multiply_2 = paddle._C_ops.multiply(data_1, sigmoid_3) + # pd_op.reshape: (1x-1xi32) <- (-1xi32, 2xi64) + reshape_1 = paddle._C_ops.reshape(gather_0, stack_1) + del gather_0, stack_1 - # pd_op.conv2d: (1x384x68x68xf32) <- (1x384x68x68xf32, 384x384x1x1xf32) - conv2d_7 = paddle._C_ops.conv2d( - multiply_2, parameter_33, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_33 - - # pd_op.batch_norm_: (1x384x68x68xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (1x384x68x68xf32, 384xf32, 384xf32, 384xf32, 384xf32) - ( - batch_norm__12, - batch_norm__13, - batch_norm__14, - batch_norm__15, - batch_norm__16, - batch_norm__17, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_7, - parameter_32, - parameter_31, - parameter_30, - parameter_29, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), + # pd_op.full: (xf32) <- () + full_4 = paddle._C_ops.full( + [], float("0"), paddle.float32, paddle.framework._current_expected_place() ) - del parameter_29, parameter_30, parameter_31, parameter_32 - # pd_op.swish: (1x384x68x68xf32) <- (1x384x68x68xf32) - swish_2 = paddle._C_ops.swish(batch_norm__12) + # pd_op.greater_than: (1x-1xb) <- (1x-1xf32, xf32) + greater_than_1 = paddle._C_ops.greater_than(sum_0, full_4) + del full_4, sum_0 - # pd_op.add: (1x384x68x68xf32) <- (1x384x68x68xf32, 1x384x68x68xf32) - add_6 = paddle._C_ops.add(swish_2, data_1) - - # pd_op.conv2d: (1x10x68x68xf32) <- (1x384x68x68xf32, 10x384x3x3xf32) - conv2d_8 = paddle._C_ops.conv2d( - add_6, parameter_28, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + # pd_op.full: (1xf32) <- () + full_5 = paddle._C_ops.full( + [1], float("10"), paddle.float32, paddle.core.CPUPlace() ) - del parameter_28 - - # pd_op.reshape: (1x10x1x1xf32) <- (10xf32, 4xi64) - reshape_11 = paddle._C_ops.reshape(parameter_27, full_int_array_3) - del parameter_27 - - # pd_op.add: (1x10x68x68xf32) <- (1x10x68x68xf32, 1x10x1x1xf32) - add_7 = paddle._C_ops.add(conv2d_8, reshape_11) - # pd_op.conv2d: (1x384x1x1xf32) <- (1x384x1x1xf32, 384x384x1x1xf32) - conv2d_9 = paddle._C_ops.conv2d( - pool2d_1, parameter_26, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + # pd_op.full_like: (1x-1xi32) <- (1x-1xi32, 1xf32) + full_like_0 = paddle._C_ops.full_like( + reshape_1, full_5, paddle.int32, paddle.framework._current_expected_place() ) - del parameter_26 - - # pd_op.reshape: (1x384x1x1xf32) <- (384xf32, 4xi64) - reshape_12 = paddle._C_ops.reshape(parameter_25, full_int_array_3) - del parameter_25 - - # pd_op.add: (1x384x1x1xf32) <- (1x384x1x1xf32, 1x384x1x1xf32) - add_8 = paddle._C_ops.add(conv2d_9, reshape_12) + del full_5 - # pd_op.sigmoid: (1x384x1x1xf32) <- (1x384x1x1xf32) - sigmoid_4 = paddle._C_ops.sigmoid(add_8) - del add_8 + # pd_op.where: (1x-1xi32) <- (1x-1xb, 1x-1xi32, 1x-1xi32) + where_1 = paddle._C_ops.where(greater_than_1, reshape_1, full_like_0) + del full_like_0, greater_than_1, reshape_1 - # pd_op.multiply: (1x384x68x68xf32) <- (1x384x68x68xf32, 1x384x1x1xf32) - multiply_3 = paddle._C_ops.multiply(data_1, sigmoid_4) - del data_1 - - # pd_op.conv2d: (1x384x68x68xf32) <- (1x384x68x68xf32, 384x384x1x1xf32) - conv2d_10 = paddle._C_ops.conv2d( - multiply_3, parameter_24, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_24 - - # pd_op.batch_norm_: (1x384x68x68xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (1x384x68x68xf32, 384xf32, 384xf32, 384xf32, 384xf32) - ( - batch_norm__18, - batch_norm__19, - batch_norm__20, - batch_norm__21, - batch_norm__22, - batch_norm__23, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_10, - parameter_23, - parameter_22, - parameter_21, - parameter_20, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_20, parameter_21, parameter_22, parameter_23 + # pd_op.full_int_array: (2xi64) <- () + full_int_array_4 = [-1, 4] - # pd_op.swish: (1x384x68x68xf32) <- (1x384x68x68xf32) - swish_3 = paddle._C_ops.swish(batch_norm__18) + # pd_op.reshape: (-1x4xf32) <- (1x-1x4xf32, 2xi64) + reshape_2 = paddle._C_ops.reshape(data_7, full_int_array_4) + del data_7, full_int_array_4 - # pd_op.conv2d: (1x88x68x68xf32) <- (1x384x68x68xf32, 88x384x3x3xf32) - conv2d_11 = paddle._C_ops.conv2d( - swish_3, parameter_19, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_19 - - # pd_op.reshape: (1x88x1x1xf32) <- (88xf32, 4xi64) - reshape_13 = paddle._C_ops.reshape(parameter_18, full_int_array_3) - del parameter_18 - - # pd_op.add: (1x88x68x68xf32) <- (1x88x68x68xf32, 1x88x1x1xf32) - add_9 = paddle._C_ops.add(conv2d_11, reshape_13) - - # pd_op.sigmoid: (1x10x68x68xf32) <- (1x10x68x68xf32) - sigmoid_5 = paddle._C_ops.sigmoid(add_7) - del add_7 - - # pd_op.flatten: (1x10x4624xf32) <- (1x10x68x68xf32) - flatten_2 = paddle._C_ops.flatten(sigmoid_5, 2, 3) - - # pd_op.transpose: (1x4624x10xf32) <- (1x10x4624xf32) - transpose_2 = paddle._C_ops.transpose(flatten_2, [0, 2, 1]) - del flatten_2 - - # pd_op.flatten: (1x88x4624xf32) <- (1x88x68x68xf32) - flatten_3 = paddle._C_ops.flatten(add_9, 2, 3) - - # pd_op.transpose: (1x4624x88xf32) <- (1x88x4624xf32) - transpose_3 = paddle._C_ops.transpose(flatten_3, [0, 2, 1]) - del flatten_3 - - # pd_op.pool2d: (1x192x1x1xf32) <- (1x192x136x136xf32, 2xi64) - pool2d_2 = paddle._C_ops.pool2d( - data_2, - full_int_array_2, - [1, 1], - [0, 0], - False, - True, - "NCHW", - "avg", - False, - True, - "EXPLICIT", - ) + # pd_op.gather: (-1x4xf32) <- (-1x4xf32, -1xi64, 1xi32) + gather_1 = paddle._C_ops.gather(reshape_2, flatten_1, full_3) + del flatten_1, full_3, reshape_2 - # pd_op.conv2d: (1x192x1x1xf32) <- (1x192x1x1xf32, 192x192x1x1xf32) - conv2d_12 = paddle._C_ops.conv2d( - pool2d_2, parameter_17, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + # pd_op.full: (xi64) <- () + full_6 = paddle._C_ops.full( + [], float("4"), paddle.int64, paddle.core.CPUPlace() ) - del parameter_17 - - # pd_op.reshape: (1x192x1x1xf32) <- (192xf32, 4xi64) - reshape_14 = paddle._C_ops.reshape(parameter_16, full_int_array_3) - del parameter_16 - # pd_op.add: (1x192x1x1xf32) <- (1x192x1x1xf32, 1x192x1x1xf32) - add_10 = paddle._C_ops.add(conv2d_12, reshape_14) + # builtin.combine: ([xi64, xi64, xi64]) <- (xi64, xi64, xi64) + combine_2 = [full_1, data_1, full_6] + del data_1, full_1, full_6 - # pd_op.sigmoid: (1x192x1x1xf32) <- (1x192x1x1xf32) - sigmoid_6 = paddle._C_ops.sigmoid(add_10) - del add_10 + # pd_op.stack: (3xi64) <- ([xi64, xi64, xi64]) + stack_2 = paddle._C_ops.stack(combine_2, 0) + del combine_2 - # pd_op.multiply: (1x192x136x136xf32) <- (1x192x136x136xf32, 1x192x1x1xf32) - multiply_4 = paddle._C_ops.multiply(data_2, sigmoid_6) + # pd_op.reshape: (1x-1x4xf32) <- (-1x4xf32, 3xi64) + reshape_0 = paddle._C_ops.reshape(gather_1, stack_2) + del gather_1, stack_2 - # pd_op.conv2d: (1x192x136x136xf32) <- (1x192x136x136xf32, 192x192x1x1xf32) - conv2d_13 = paddle._C_ops.conv2d( - multiply_4, parameter_15, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_15 - - # pd_op.batch_norm_: (1x192x136x136xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (1x192x136x136xf32, 192xf32, 192xf32, 192xf32, 192xf32) - ( - batch_norm__24, - batch_norm__25, - batch_norm__26, - batch_norm__27, - batch_norm__28, - batch_norm__29, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_13, - parameter_14, - parameter_13, - parameter_12, - parameter_11, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), + # pd_op.full: (1xi32) <- () + full_7 = paddle._C_ops.full( + [1], float("11"), paddle.int32, paddle.core.CPUPlace() ) - del parameter_11, parameter_12, parameter_13, parameter_14 - - # pd_op.swish: (1x192x136x136xf32) <- (1x192x136x136xf32) - swish_4 = paddle._C_ops.swish(batch_norm__24) - # pd_op.add: (1x192x136x136xf32) <- (1x192x136x136xf32, 1x192x136x136xf32) - add_11 = paddle._C_ops.add(swish_4, data_2) - - # pd_op.conv2d: (1x10x136x136xf32) <- (1x192x136x136xf32, 10x192x3x3xf32) - conv2d_14 = paddle._C_ops.conv2d( - add_11, parameter_10, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + # pd_op.one_hot: (1x-1x11xf32) <- (1x-1xi32, 1xi32) + one_hot_1 = paddle._C_ops.one_hot( + where_1 % paddle.cast(full_7, where_1.dtype), full_7 ) - del parameter_10 - - # pd_op.reshape: (1x10x1x1xf32) <- (10xf32, 4xi64) - reshape_15 = paddle._C_ops.reshape(parameter_9, full_int_array_3) - del parameter_9 + del full_7 - # pd_op.add: (1x10x136x136xf32) <- (1x10x136x136xf32, 1x10x1x1xf32) - add_12 = paddle._C_ops.add(conv2d_14, reshape_15) - - # pd_op.conv2d: (1x192x1x1xf32) <- (1x192x1x1xf32, 192x192x1x1xf32) - conv2d_15 = paddle._C_ops.conv2d( - pool2d_2, parameter_8, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + # pd_op.full: (10xi64) <- () + full_8 = paddle._C_ops.full( + [10], float("0"), paddle.int64, paddle.framework._current_expected_place() + ) + + # pd_op.assign_value_: (10xi64) <- (10xi64) + assign_value__0 = paddle._C_ops.assign_value_( + full_8, + [10], + paddle.int64, + [ + float("0"), + float("1"), + float("2"), + float("3"), + float("4"), + float("5"), + float("6"), + float("7"), + float("8"), + float("9"), + ], + paddle.framework._current_expected_place(), ) - del parameter_8 + del full_8 - # pd_op.reshape: (1x192x1x1xf32) <- (192xf32, 4xi64) - reshape_16 = paddle._C_ops.reshape(parameter_7, full_int_array_3) - del parameter_7 + # pd_op.index_select: (1x-1x10xf32) <- (1x-1x11xf32, 10xi64) + index_select_0 = paddle._C_ops.index_select(one_hot_1, assign_value__0, -1) + del assign_value__0, one_hot_1 - # pd_op.add: (1x192x1x1xf32) <- (1x192x1x1xf32, 1x192x1x1xf32) - add_13 = paddle._C_ops.add(conv2d_15, reshape_16) + # pd_op.multiply: (1x-1x-1xf32) <- (1x-1x-1xf32, 1x-1x-1xf32) + multiply_3 = paddle._C_ops.multiply(data_8, where_0) + del data_8 - # pd_op.sigmoid: (1x192x1x1xf32) <- (1x192x1x1xf32) - sigmoid_7 = paddle._C_ops.sigmoid(add_13) - del add_13 + # pd_op.full_int_array: (1xi64) <- () + full_int_array_5 = [-1] - # pd_op.multiply: (1x192x136x136xf32) <- (1x192x136x136xf32, 1x192x1x1xf32) - multiply_5 = paddle._C_ops.multiply(data_2, sigmoid_7) - del data_2 + # pd_op.max: (1x-1x1xf32) <- (1x-1x-1xf32, 1xi64) + max_0 = paddle._C_ops.max(multiply_3, full_int_array_5, True) - # pd_op.conv2d: (1x192x136x136xf32) <- (1x192x136x136xf32, 192x192x1x1xf32) - conv2d_16 = paddle._C_ops.conv2d( - multiply_5, parameter_6, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_6 - - # pd_op.batch_norm_: (1x192x136x136xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (1x192x136x136xf32, 192xf32, 192xf32, 192xf32, 192xf32) - ( - batch_norm__30, - batch_norm__31, - batch_norm__32, - batch_norm__33, - batch_norm__34, - batch_norm__35, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_16, - parameter_5, - parameter_4, - parameter_3, - parameter_2, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_2, parameter_3, parameter_4, parameter_5 + # pd_op.multiply: (1x-1x-1xf32) <- (1x-1x-1xf32, 1x-1x-1xf32) + multiply_4 = paddle._C_ops.multiply(data_3, where_0) + del data_3, where_0 - # pd_op.swish: (1x192x136x136xf32) <- (1x192x136x136xf32) - swish_5 = paddle._C_ops.swish(batch_norm__30) + # pd_op.max: (1x-1x1xf32) <- (1x-1x-1xf32, 1xi64) + max_1 = paddle._C_ops.max(multiply_4, full_int_array_5, True) + del multiply_4 - # pd_op.conv2d: (1x88x136x136xf32) <- (1x192x136x136xf32, 88x192x3x3xf32) - conv2d_17 = paddle._C_ops.conv2d( - swish_5, parameter_1, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + # pd_op.full: (1xf32) <- () + full_9 = paddle._C_ops.full( + [1], float("1"), paddle.float32, paddle.core.CPUPlace() ) - del parameter_1 - - # pd_op.reshape: (1x88x1x1xf32) <- (88xf32, 4xi64) - reshape_17 = paddle._C_ops.reshape(parameter_0, full_int_array_3) - del full_int_array_3, parameter_0 - # pd_op.add: (1x88x136x136xf32) <- (1x88x136x136xf32, 1x88x1x1xf32) - add_14 = paddle._C_ops.add(conv2d_17, reshape_17) + # pd_op.scale: (1x-1x1xf32) <- (1x-1x1xf32, 1xf32) + scale_0 = paddle._C_ops.scale(max_0, full_9, float("1e-09"), True) + del full_9, max_0 - # pd_op.sigmoid: (1x10x136x136xf32) <- (1x10x136x136xf32) - sigmoid_8 = paddle._C_ops.sigmoid(add_12) - del add_12 + # pd_op.divide: (1x-1x-1xf32) <- (1x-1x-1xf32, 1x-1x1xf32) + divide_0 = paddle._C_ops.divide(multiply_3, scale_0) + del multiply_3, scale_0 - # pd_op.flatten: (1x10x18496xf32) <- (1x10x136x136xf32) - flatten_4 = paddle._C_ops.flatten(sigmoid_8, 2, 3) + # pd_op.multiply: (1x-1x-1xf32) <- (1x-1x-1xf32, 1x-1x1xf32) + multiply_5 = paddle._C_ops.multiply(divide_0, max_1) + del divide_0, max_1 - # pd_op.transpose: (1x18496x10xf32) <- (1x10x18496xf32) - transpose_4 = paddle._C_ops.transpose(flatten_4, [0, 2, 1]) - del flatten_4 + # pd_op.max: (1x-1xf32) <- (1x-1x-1xf32, 1xi64) + max_2 = paddle._C_ops.max(multiply_5, full_int_array_3, False) + del full_int_array_3, multiply_5 - # pd_op.flatten: (1x88x18496xf32) <- (1x88x136x136xf32) - flatten_5 = paddle._C_ops.flatten(add_14, 2, 3) + # pd_op.unsqueeze: (1x-1x1xf32) <- (1x-1xf32, 1xi64) + unsqueeze_1 = paddle._C_ops.unsqueeze(max_2, full_int_array_5) + del full_int_array_5, max_2 - # pd_op.transpose: (1x18496x88xf32) <- (1x88x18496xf32) - transpose_5 = paddle._C_ops.transpose(flatten_5, [0, 2, 1]) - del flatten_5 - - # pd_op.full: (1xi32) <- () - full_13 = paddle._C_ops.full( - [1], float("1"), paddle.int32, paddle.core.CPUPlace() - ) - - # pd_op.assign: (1xi32) <- (1xi32) - assign_2 = full_13 - - # builtin.combine: ([1x1156x10xf32, 1x4624x10xf32, 1x18496x10xf32]) <- (1x1156x10xf32, 1x4624x10xf32, 1x18496x10xf32) - combine_12 = [transpose_0, transpose_2, transpose_4] - - # pd_op.concat: (1x24276x10xf32) <- ([1x1156x10xf32, 1x4624x10xf32, 1x18496x10xf32], 1xi32) - concat_0 = paddle._C_ops.concat(combine_12, full_13) - del combine_12 - - # builtin.combine: ([1x1156x88xf32, 1x4624x88xf32, 1x18496x88xf32]) <- (1x1156x88xf32, 1x4624x88xf32, 1x18496x88xf32) - combine_13 = [transpose_1, transpose_3, transpose_5] - - # pd_op.concat: (1x24276x88xf32) <- ([1x1156x88xf32, 1x4624x88xf32, 1x18496x88xf32], 1xi32) - concat_1 = paddle._C_ops.concat(combine_13, full_13) - del ( - add_1, - add_11, - add_14, - add_4, - add_6, - add_9, - assign_0, - assign_1, - assign_2, - batch_norm__0, - batch_norm__1, - batch_norm__10, - batch_norm__11, - batch_norm__12, - batch_norm__13, - batch_norm__14, - batch_norm__15, - batch_norm__16, - batch_norm__17, - batch_norm__18, - batch_norm__19, - batch_norm__2, - batch_norm__20, - batch_norm__21, - batch_norm__22, - batch_norm__23, - batch_norm__24, - batch_norm__25, - batch_norm__26, - batch_norm__27, - batch_norm__28, - batch_norm__29, - batch_norm__3, - batch_norm__30, - batch_norm__31, - batch_norm__32, - batch_norm__33, - batch_norm__34, - batch_norm__35, - batch_norm__4, - batch_norm__5, - batch_norm__6, - batch_norm__7, - batch_norm__8, - batch_norm__9, - combine_13, - conv2d_0, - conv2d_1, - conv2d_10, - conv2d_11, - conv2d_12, - conv2d_13, - conv2d_14, - conv2d_15, - conv2d_16, - conv2d_17, - conv2d_2, - conv2d_3, - conv2d_4, - conv2d_5, - conv2d_6, - conv2d_7, - conv2d_8, - conv2d_9, - full_13, - full_int_array_2, - multiply_0, - multiply_1, - multiply_2, - multiply_3, - multiply_4, - multiply_5, - pool2d_0, - pool2d_1, - pool2d_2, - reshape_0, - reshape_10, - reshape_11, - reshape_12, - reshape_13, - reshape_14, - reshape_15, - reshape_16, - reshape_17, - reshape_2, - reshape_4, - reshape_6, - reshape_7, - reshape_8, - reshape_9, - sigmoid_0, - sigmoid_1, - sigmoid_2, - sigmoid_3, - sigmoid_4, - sigmoid_5, - sigmoid_6, - sigmoid_7, - sigmoid_8, - swish_0, - swish_1, - swish_2, - swish_3, - swish_4, - swish_5, - transpose_0, - transpose_1, - transpose_2, - transpose_3, - transpose_4, - transpose_5, - ) + # pd_op.multiply: (1x-1x10xf32) <- (1x-1x10xf32, 1x-1x1xf32) + multiply_0 = paddle._C_ops.multiply(index_select_0, unsqueeze_1) + del index_select_0, unsqueeze_1, where_1 - return concat_0, concat_1, concat_2, concat_3, concat_4 + return reshape_0, multiply_0 diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_3/weight_meta.py b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_3/weight_meta.py index 433ec9a1b..8b1378917 100644 --- a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_3/weight_meta.py +++ b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_3/weight_meta.py @@ -1,586 +1 @@ -class Program_weight_tensor_parameter_0: - name = "parameter_0" - shape = [88] - dtype = "float32" - min_val = float("0.825624") - max_val = float("0.846159") - mean = float("0.828073") - std = float("0.00356405") - data = None - -class Program_weight_tensor_parameter_1: - name = "parameter_1" - shape = [88, 192, 3, 3] - dtype = "float32" - min_val = float("-0.120061") - max_val = float("0.122726") - mean = float("1.20344e-08") - std = float("0.00589807") - data = None - - -class Program_weight_tensor_parameter_2: - name = "parameter_2" - shape = [192] - dtype = "float32" - min_val = float("-0.0433758") - max_val = float("0.207094") - mean = float("0.0514628") - std = float("0.0402492") - data = None - - -class Program_weight_tensor_parameter_3: - name = "parameter_3" - shape = [192] - dtype = "float32" - min_val = float("0.850872") - max_val = float("1.63127") - mean = float("1.22454") - std = float("0.145326") - data = None - - -class Program_weight_tensor_parameter_4: - name = "parameter_4" - shape = [192] - dtype = "float32" - min_val = float("0.000189735") - max_val = float("0.00828383") - mean = float("0.00133599") - std = float("0.00117081") - data = None - - -class Program_weight_tensor_parameter_5: - name = "parameter_5" - shape = [192] - dtype = "float32" - min_val = float("-0.0753393") - max_val = float("0.0318059") - mean = float("-0.0124341") - std = float("0.0175849") - data = None - - -class Program_weight_tensor_parameter_6: - name = "parameter_6" - shape = [192, 192, 1, 1] - dtype = "float32" - min_val = float("-0.0734168") - max_val = float("0.108512") - mean = float("-0.000444445") - std = float("0.00763924") - data = None - - -class Program_weight_tensor_parameter_7: - name = "parameter_7" - shape = [192] - dtype = "float32" - min_val = float("-0.00613384") - max_val = float("0.00922103") - mean = float("-7.86384e-05") - std = float("0.00341788") - data = None - - -class Program_weight_tensor_parameter_8: - name = "parameter_8" - shape = [192, 192, 1, 1] - dtype = "float32" - min_val = float("-0.00662369") - max_val = float("0.0119822") - mean = float("-0.000127685") - std = float("0.00177164") - data = None - - -class Program_weight_tensor_parameter_9: - name = "parameter_9" - shape = [10] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_10: - name = "parameter_10" - shape = [10, 192, 3, 3] - dtype = "float32" - min_val = float("-0.175474") - max_val = float("0.055091") - mean = float("-0.00147039") - std = float("0.00842565") - data = None - - -class Program_weight_tensor_parameter_11: - name = "parameter_11" - shape = [192] - dtype = "float32" - min_val = float("-0.329693") - max_val = float("0.892228") - mean = float("0.356694") - std = float("0.271228") - data = None - - -class Program_weight_tensor_parameter_12: - name = "parameter_12" - shape = [192] - dtype = "float32" - min_val = float("1.01538") - max_val = float("1.77428") - mean = float("1.31556") - std = float("0.143187") - data = None - - -class Program_weight_tensor_parameter_13: - name = "parameter_13" - shape = [192] - dtype = "float32" - min_val = float("0.000388466") - max_val = float("0.011235") - mean = float("0.00179138") - std = float("0.00163858") - data = None - - -class Program_weight_tensor_parameter_14: - name = "parameter_14" - shape = [192] - dtype = "float32" - min_val = float("-0.16391") - max_val = float("0.127577") - mean = float("-0.00419185") - std = float("0.0390567") - data = None - - -class Program_weight_tensor_parameter_15: - name = "parameter_15" - shape = [192, 192, 1, 1] - dtype = "float32" - min_val = float("-0.0620339") - max_val = float("0.0578527") - mean = float("-0.000609344") - std = float("0.0074149") - data = None - - -class Program_weight_tensor_parameter_16: - name = "parameter_16" - shape = [192] - dtype = "float32" - min_val = float("-0.0053139") - max_val = float("0.0128966") - mean = float("-0.000148839") - std = float("0.00226529") - data = None - - -class Program_weight_tensor_parameter_17: - name = "parameter_17" - shape = [192, 192, 1, 1] - dtype = "float32" - min_val = float("-0.0108365") - max_val = float("0.0180944") - mean = float("-7.70563e-05") - std = float("0.0014898") - data = None - - -class Program_weight_tensor_parameter_18: - name = "parameter_18" - shape = [88] - dtype = "float32" - min_val = float("0.826359") - max_val = float("0.837586") - mean = float("0.828071") - std = float("0.00217956") - data = None - - -class Program_weight_tensor_parameter_19: - name = "parameter_19" - shape = [88, 384, 3, 3] - dtype = "float32" - min_val = float("-0.0854456") - max_val = float("0.0873423") - mean = float("4.34375e-09") - std = float("0.0031191") - data = None - - -class Program_weight_tensor_parameter_20: - name = "parameter_20" - shape = [384] - dtype = "float32" - min_val = float("-0.00526138") - max_val = float("0.0696216") - mean = float("0.0259227") - std = float("0.0132331") - data = None - - -class Program_weight_tensor_parameter_21: - name = "parameter_21" - shape = [384] - dtype = "float32" - min_val = float("0.99865") - max_val = float("1.23747") - mean = float("1.1069") - std = float("0.0410692") - data = None - - -class Program_weight_tensor_parameter_22: - name = "parameter_22" - shape = [384] - dtype = "float32" - min_val = float("8.30341e-05") - max_val = float("0.00596526") - mean = float("0.000721317") - std = float("0.000758143") - data = None - - -class Program_weight_tensor_parameter_23: - name = "parameter_23" - shape = [384] - dtype = "float32" - min_val = float("-0.044286") - max_val = float("0.00828286") - mean = float("-0.00965973") - std = float("0.0093075") - data = None - - -class Program_weight_tensor_parameter_24: - name = "parameter_24" - shape = [384, 384, 1, 1] - dtype = "float32" - min_val = float("-0.0461624") - max_val = float("0.063746") - mean = float("-0.000138651") - std = float("0.00306682") - data = None - - -class Program_weight_tensor_parameter_25: - name = "parameter_25" - shape = [384] - dtype = "float32" - min_val = float("-0.00274369") - max_val = float("0.00511827") - mean = float("6.10625e-05") - std = float("0.00152895") - data = None - - -class Program_weight_tensor_parameter_26: - name = "parameter_26" - shape = [384, 384, 1, 1] - dtype = "float32" - min_val = float("-0.00184752") - max_val = float("0.0048019") - mean = float("3.76045e-06") - std = float("0.000597256") - data = None - - -class Program_weight_tensor_parameter_27: - name = "parameter_27" - shape = [10] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_28: - name = "parameter_28" - shape = [10, 384, 3, 3] - dtype = "float32" - min_val = float("-0.0721721") - max_val = float("0.0404456") - mean = float("-0.000999238") - std = float("0.00397589") - data = None - - -class Program_weight_tensor_parameter_29: - name = "parameter_29" - shape = [384] - dtype = "float32" - min_val = float("-0.152983") - max_val = float("0.452749") - mean = float("0.229344") - std = float("0.100245") - data = None - - -class Program_weight_tensor_parameter_30: - name = "parameter_30" - shape = [384] - dtype = "float32" - min_val = float("1.00417") - max_val = float("1.40261") - mean = float("1.1866") - std = float("0.0603403") - data = None - - -class Program_weight_tensor_parameter_31: - name = "parameter_31" - shape = [384] - dtype = "float32" - min_val = float("0.000150697") - max_val = float("0.00568262") - mean = float("0.000831286") - std = float("0.000745356") - data = None - - -class Program_weight_tensor_parameter_32: - name = "parameter_32" - shape = [384] - dtype = "float32" - min_val = float("-0.0985161") - max_val = float("0.0644014") - mean = float("-0.0146302") - std = float("0.022773") - data = None - - -class Program_weight_tensor_parameter_33: - name = "parameter_33" - shape = [384, 384, 1, 1] - dtype = "float32" - min_val = float("-0.0528263") - max_val = float("0.037782") - mean = float("-0.000246446") - std = float("0.00296888") - data = None - - -class Program_weight_tensor_parameter_34: - name = "parameter_34" - shape = [384] - dtype = "float32" - min_val = float("-0.00198673") - max_val = float("0.0108277") - mean = float("-1.89144e-05") - std = float("0.00104636") - data = None - - -class Program_weight_tensor_parameter_35: - name = "parameter_35" - shape = [384, 384, 1, 1] - dtype = "float32" - min_val = float("-0.00490867") - max_val = float("0.00769719") - mean = float("-1.63033e-05") - std = float("0.00053086") - data = None - - -class Program_weight_tensor_parameter_36: - name = "parameter_36" - shape = [88] - dtype = "float32" - min_val = float("0.827794") - max_val = float("0.828556") - mean = float("0.828072") - std = float("0.000199979") - data = None - - -class Program_weight_tensor_parameter_37: - name = "parameter_37" - shape = [88, 768, 3, 3] - dtype = "float32" - min_val = float("-0.00645056") - max_val = float("0.0120405") - mean = float("4.87489e-10") - std = float("0.000843696") - data = None - - -class Program_weight_tensor_parameter_38: - name = "parameter_38" - shape = [768] - dtype = "float32" - min_val = float("-0.0143031") - max_val = float("0.0478323") - mean = float("0.0113513") - std = float("0.0104705") - data = None - - -class Program_weight_tensor_parameter_39: - name = "parameter_39" - shape = [768] - dtype = "float32" - min_val = float("1.00867") - max_val = float("1.20113") - mean = float("1.06607") - std = float("0.0224781") - data = None - - -class Program_weight_tensor_parameter_40: - name = "parameter_40" - shape = [768] - dtype = "float32" - min_val = float("3.16922e-05") - max_val = float("0.00119994") - mean = float("0.000132026") - std = float("9.51389e-05") - data = None - - -class Program_weight_tensor_parameter_41: - name = "parameter_41" - shape = [768] - dtype = "float32" - min_val = float("-0.0209402") - max_val = float("0.00348513") - mean = float("-0.00472064") - std = float("0.00312486") - data = None - - -class Program_weight_tensor_parameter_42: - name = "parameter_42" - shape = [768, 768, 1, 1] - dtype = "float32" - min_val = float("-0.034299") - max_val = float("0.0337449") - mean = float("-4.66048e-05") - std = float("0.00140342") - data = None - - -class Program_weight_tensor_parameter_43: - name = "parameter_43" - shape = [768] - dtype = "float32" - min_val = float("-0.00390782") - max_val = float("0.00260919") - mean = float("7.12606e-05") - std = float("0.000831057") - data = None - - -class Program_weight_tensor_parameter_44: - name = "parameter_44" - shape = [768, 768, 1, 1] - dtype = "float32" - min_val = float("-0.00261376") - max_val = float("0.00228504") - mean = float("1.77784e-05") - std = float("0.000252276") - data = None - - -class Program_weight_tensor_parameter_45: - name = "parameter_45" - shape = [10] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_46: - name = "parameter_46" - shape = [10, 768, 3, 3] - dtype = "float32" - min_val = float("-0.015813") - max_val = float("0.00965711") - mean = float("-0.000546702") - std = float("0.00140653") - data = None - - -class Program_weight_tensor_parameter_47: - name = "parameter_47" - shape = [768] - dtype = "float32" - min_val = float("-0.110932") - max_val = float("0.199913") - mean = float("0.0934025") - std = float("0.0422427") - data = None - - -class Program_weight_tensor_parameter_48: - name = "parameter_48" - shape = [768] - dtype = "float32" - min_val = float("1.00786") - max_val = float("1.25519") - mean = float("1.07879") - std = float("0.0261974") - data = None - - -class Program_weight_tensor_parameter_49: - name = "parameter_49" - shape = [768] - dtype = "float32" - min_val = float("7.42637e-05") - max_val = float("0.00246904") - mean = float("0.000537048") - std = float("0.000318384") - data = None - - -class Program_weight_tensor_parameter_50: - name = "parameter_50" - shape = [768] - dtype = "float32" - min_val = float("-0.0725538") - max_val = float("0.0495499") - mean = float("-0.0155163") - std = float("0.0149963") - data = None - - -class Program_weight_tensor_parameter_51: - name = "parameter_51" - shape = [768, 768, 1, 1] - dtype = "float32" - min_val = float("-0.0538655") - max_val = float("0.0252785") - mean = float("-0.000146665") - std = float("0.00153735") - data = None - - -class Program_weight_tensor_parameter_52: - name = "parameter_52" - shape = [768] - dtype = "float32" - min_val = float("-0.00117685") - max_val = float("0.00393971") - mean = float("3.40689e-06") - std = float("0.000473697") - data = None - - -class Program_weight_tensor_parameter_53: - name = "parameter_53" - shape = [768, 768, 1, 1] - dtype = "float32" - min_val = float("-0.0124531") - max_val = float("0.02256") - mean = float("2.28217e-06") - std = float("0.00024775") - data = None diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_4/graph_hash.txt b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_4/graph_hash.txt index ad81bd8c6..1ba099c96 100644 --- a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_4/graph_hash.txt +++ b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_4/graph_hash.txt @@ -1 +1 @@ -534511a45272e1a9267697d5f10a3014cf1cc0c049585f9685655501ff68f0b9 \ No newline at end of file +a1e871dca6015fd870e153211f3cd48512ab629d616889d648d8f93c88df3e51 \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_4/input_meta.py b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_4/input_meta.py index 86f4e8443..0488f0946 100644 --- a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_4/input_meta.py +++ b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_4/input_meta.py @@ -1,7 +1,108 @@ class Program_weight_tensor_data_0: name = "data_0" - shape = [1, 24276] + shape = [1, 12, 27216] + dtype = "float32" + max_val = float("1.0") + mean = float("0.000453165") + std = float("0.0212828") + data = None + + +class Program_weight_tensor_data_1: + name = "data_1" + shape = [1, 1] + dtype = "int32" + data = [0] + + +class Program_weight_tensor_data_2: + name = "data_2" + shape = [1, 12, 1] dtype = "int32" - min_val = 0 - max_val = 10 + data = [4, 4, 3, 3, 3, 4, 4, 4, 3, 3, 3, 3] + + +class Program_weight_tensor_data_3: + name = "data_3" + shape = [1, 27216] + dtype = "float32" + max_val = float("1.0") + mean = float("0.00543798") + std = float("0.0735419") + data = None + + +class Program_weight_tensor_data_4: + name = "data_4" + shape = [1, 12, 4] + dtype = "float32" + data = [ + 810.02, + 1015.02, + 826.828, + 1104.59, + 803.556, + 862.244, + 821.01, + 939.512, + 685.253, + 848.195, + 696.242, + 913.171, + 707.232, + 783.219, + 720.162, + 865.756, + 705.293, + 614.634, + 718.869, + 688.39, + 622.545, + 934.244, + 636.768, + 1037.85, + 625.778, + 567.219, + 640.646, + 632.195, + 605.091, + 567.219, + 617.374, + 632.195, + 538.505, + 763.902, + 548.849, + 864.0, + 536.566, + 567.219, + 550.788, + 621.659, + 513.939, + 978.146, + 529.455, + 1074.73, + 789.98, + 570.732, + 806.788, + 637.463, + ] + + +class Program_weight_tensor_data_5: + name = "data_5" + shape = [1, 12, 27216] + dtype = "float32" + max_val = float("0.365907") + mean = float("5.42289e-05") + std = float("0.00308233") + data = None + + +class Program_weight_tensor_data_6: + name = "data_6" + shape = [1, 12, 27216] + dtype = "float32" + max_val = float("0.95733") + mean = float("0.00162842") + std = float("0.0295307") data = None diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_4/model.py b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_4/model.py index 24c271baa..8a1f2862b 100644 --- a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_4/model.py +++ b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_4/model.py @@ -5,30 +5,188 @@ class GraphModule(paddle.nn.Layer): def __init__(self): super().__init__() - def forward(self, data_0): - # pd_op.full: (xi32) <- () + def forward(self, data_0, data_1, data_2, data_3, data_4, data_5, data_6): + # pd_op.full: (1xi64) <- () full_0 = paddle._C_ops.full( - [], float("10"), paddle.int32, paddle.framework._current_expected_place() + [1], float("-2"), paddle.int64, paddle.core.CPUPlace() ) - # pd_op.not_equal: (1x24276xb) <- (1x24276xi32, xi32) - not_equal_0 = paddle._C_ops.not_equal(data_0, full_0) - del data_0, full_0 + # pd_op.argmax: (1x27216xi64) <- (1x12x27216xf32, 1xi64) + argmax_0 = paddle._C_ops.argmax(data_0, full_0, False, False, paddle.int64) + del full_0 - # pd_op.full_int_array: (0xi64) <- () - full_int_array_0 = [] + # pd_op.full: (1xf32) <- () + full_1 = paddle._C_ops.full( + [1], float("12"), paddle.float32, paddle.core.CPUPlace() + ) - # pd_op.sum: (xi64) <- (1x24276xb, 0xi64) - sum_0 = paddle._C_ops.sum(not_equal_0, full_int_array_0, None, False) - del full_int_array_0 + # pd_op.scale: (1x1xi32) <- (1x1xi32, 1xf32) + scale_0 = paddle._C_ops.scale(data_1, full_1, float("0"), True) + del data_1, full_1 - # pd_op.full: (xi64) <- () - full_1 = paddle._C_ops.full( - [], float("0"), paddle.int64, paddle.framework._current_expected_place() + # pd_op.cast: (1x1xi64) <- (1x1xi32) + cast_0 = paddle._C_ops.cast(scale_0, paddle.int64) + del scale_0 + + # pd_op.add: (1x27216xi64) <- (1x27216xi64, 1x1xi64) + add_0 = paddle._C_ops.add(argmax_0, cast_0) + del argmax_0, cast_0 + + # pd_op.flatten: (12xi32) <- (1x12x1xi32) + flatten_0 = paddle._C_ops.flatten(data_2, 0, 2) + del data_2 + + # pd_op.flatten: (27216xi64) <- (1x27216xi64) + flatten_1 = paddle._C_ops.flatten(add_0, 0, 1) + del add_0 + + # pd_op.full: (1xi32) <- () + full_2 = paddle._C_ops.full( + [1], float("0"), paddle.int32, paddle.core.CPUPlace() + ) + + # pd_op.gather: (27216xi32) <- (12xi32, 27216xi64, 1xi32) + gather_0 = paddle._C_ops.gather(flatten_0, flatten_1, full_2) + del flatten_0 + + # pd_op.full_int_array: (2xi64) <- () + full_int_array_0 = [1, 27216] + + # pd_op.reshape: (1x27216xi32) <- (27216xi32, 2xi64) + reshape_1 = paddle._C_ops.reshape(gather_0, full_int_array_0) + del full_int_array_0, gather_0 + + # pd_op.full: (xf32) <- () + full_3 = paddle._C_ops.full( + [], float("0"), paddle.float32, paddle.framework._current_expected_place() ) - # pd_op.greater_than: (xb) <- (xi64, xi64) - greater_than_0 = paddle._C_ops.greater_than(sum_0, full_1) - del full_1, not_equal_0, sum_0 + # pd_op.greater_than: (1x27216xb) <- (1x27216xf32, xf32) + greater_than_0 = paddle._C_ops.greater_than(data_3, full_3) + del data_3, full_3 + + # pd_op.full: (1xf32) <- () + full_4 = paddle._C_ops.full( + [1], float("10"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.full_like: (1x27216xi32) <- (1x27216xi32, 1xf32) + full_like_0 = paddle._C_ops.full_like( + reshape_1, full_4, paddle.int32, paddle.framework._current_expected_place() + ) + del full_4 + + # pd_op.where: (1x27216xi32) <- (1x27216xb, 1x27216xi32, 1x27216xi32) + where_0 = paddle._C_ops.where(greater_than_0, reshape_1, full_like_0) + del full_like_0, greater_than_0, reshape_1 + + # pd_op.full_int_array: (2xi64) <- () + full_int_array_1 = [-1, 4] + + # pd_op.reshape: (12x4xf32) <- (1x12x4xf32, 2xi64) + reshape_2 = paddle._C_ops.reshape(data_4, full_int_array_1) + del data_4, full_int_array_1 + + # pd_op.gather: (27216x4xf32) <- (12x4xf32, 27216xi64, 1xi32) + gather_1 = paddle._C_ops.gather(reshape_2, flatten_1, full_2) + del flatten_1, full_2, reshape_2 + + # pd_op.full_int_array: (3xi64) <- () + full_int_array_2 = [1, 27216, 4] + + # pd_op.reshape: (1x27216x4xf32) <- (27216x4xf32, 3xi64) + reshape_0 = paddle._C_ops.reshape(gather_1, full_int_array_2) + del full_int_array_2, gather_1 + + # pd_op.full: (1xi32) <- () + full_5 = paddle._C_ops.full( + [1], float("11"), paddle.int32, paddle.core.CPUPlace() + ) + + # pd_op.one_hot: (1x27216x11xf32) <- (1x27216xi32, 1xi32) + one_hot_0 = paddle._C_ops.one_hot( + where_0 % paddle.cast(full_5, where_0.dtype), full_5 + ) + del full_5 + + # pd_op.full: (10xi64) <- () + full_6 = paddle._C_ops.full( + [10], float("0"), paddle.int64, paddle.framework._current_expected_place() + ) + + # pd_op.assign_value_: (10xi64) <- (10xi64) + assign_value__0 = paddle._C_ops.assign_value_( + full_6, + [10], + paddle.int64, + [ + float("0"), + float("1"), + float("2"), + float("3"), + float("4"), + float("5"), + float("6"), + float("7"), + float("8"), + float("9"), + ], + paddle.framework._current_expected_place(), + ) + del full_6 + + # pd_op.index_select: (1x27216x10xf32) <- (1x27216x11xf32, 10xi64) + index_select_0 = paddle._C_ops.index_select(one_hot_0, assign_value__0, -1) + del assign_value__0, one_hot_0 + + # pd_op.multiply: (1x12x27216xf32) <- (1x12x27216xf32, 1x12x27216xf32) + multiply_1 = paddle._C_ops.multiply(data_5, data_0) + del data_5 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_3 = [-1] + + # pd_op.max: (1x12x1xf32) <- (1x12x27216xf32, 1xi64) + max_0 = paddle._C_ops.max(multiply_1, full_int_array_3, True) + + # pd_op.multiply: (1x12x27216xf32) <- (1x12x27216xf32, 1x12x27216xf32) + multiply_2 = paddle._C_ops.multiply(data_6, data_0) + del data_0, data_6 + + # pd_op.max: (1x12x1xf32) <- (1x12x27216xf32, 1xi64) + max_1 = paddle._C_ops.max(multiply_2, full_int_array_3, True) + del multiply_2 + + # pd_op.full: (1xf32) <- () + full_7 = paddle._C_ops.full( + [1], float("1"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.scale: (1x12x1xf32) <- (1x12x1xf32, 1xf32) + scale_1 = paddle._C_ops.scale(max_0, full_7, float("1e-09"), True) + del full_7, max_0 + + # pd_op.divide: (1x12x27216xf32) <- (1x12x27216xf32, 1x12x1xf32) + divide_0 = paddle._C_ops.divide(multiply_1, scale_1) + del multiply_1, scale_1 + + # pd_op.multiply: (1x12x27216xf32) <- (1x12x27216xf32, 1x12x1xf32) + multiply_3 = paddle._C_ops.multiply(divide_0, max_1) + del divide_0, max_1 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_4 = [-2] + + # pd_op.max: (1x27216xf32) <- (1x12x27216xf32, 1xi64) + max_2 = paddle._C_ops.max(multiply_3, full_int_array_4, False) + del full_int_array_4, multiply_3 + + # pd_op.unsqueeze: (1x27216x1xf32) <- (1x27216xf32, 1xi64) + unsqueeze_0 = paddle._C_ops.unsqueeze(max_2, full_int_array_3) + del full_int_array_3, max_2 + + # pd_op.multiply: (1x27216x10xf32) <- (1x27216x10xf32, 1x27216x1xf32) + multiply_0 = paddle._C_ops.multiply(index_select_0, unsqueeze_0) + del index_select_0, unsqueeze_0, where_0 - return greater_than_0 + return reshape_0, multiply_0 diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_5/graph_hash.txt b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_5/graph_hash.txt index 7d70180d4..7376f9083 100644 --- a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_5/graph_hash.txt +++ b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_5/graph_hash.txt @@ -1 +1 @@ -6a8e990486f4d85b4371bedc5cecd294f81764cd153a3e25085e136f63ec707c \ No newline at end of file +ecf2fbd10676ba1da33488b082d96ec1f1edbf59f2c45107f85006bf675778d2 \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_5/input_meta.py b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_5/input_meta.py index 7d313afce..aba4e15ed 100644 --- a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_5/input_meta.py +++ b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_5/input_meta.py @@ -1,12 +1,5 @@ class Program_weight_tensor_data_0: name = "data_0" - shape = [] - dtype = "int64" - data = [49] - - -class Program_weight_tensor_data_1: - name = "data_1" shape = [1, 48384, 10] dtype = "float32" min_val = float("1.08574e-08") @@ -16,19 +9,19 @@ class Program_weight_tensor_data_1: data = None -class Program_weight_tensor_data_2: - name = "data_2" - shape = [1, 48384, 4] +class Program_weight_tensor_data_1: + name = "data_1" + shape = [1, 48384, 88] dtype = "float32" - min_val = float("-253.983") - max_val = float("1811.4") - mean = float("768.345") - std = float("449.589") + min_val = float("-3.34129") + max_val = float("13.1745") + mean = float("0.828078") + std = float("1.50377") data = None -class Program_weight_tensor_data_3: - name = "data_3" +class Program_weight_tensor_data_2: + name = "data_2" shape = [48384, 2] dtype = "float32" min_val = float("4.0") @@ -38,8 +31,8 @@ class Program_weight_tensor_data_3: data = None -class Program_weight_tensor_data_4: - name = "data_4" +class Program_weight_tensor_data_3: + name = "data_3" shape = [48384, 1] dtype = "float32" min_val = float("8.0") @@ -47,128 +40,3 @@ class Program_weight_tensor_data_4: mean = float("10.6667") std = float("5.70157") data = None - - -class Program_weight_tensor_data_5: - name = "data_5" - shape = [1, 49, 1] - dtype = "int32" - data = [ - 0, - 0, - 3, - 8, - 3, - 3, - 3, - 3, - 3, - 3, - 3, - 3, - 4, - 4, - 3, - 3, - 3, - 4, - 4, - 8, - 3, - 3, - 3, - 3, - 8, - 8, - 8, - 8, - 8, - 8, - 4, - 8, - 8, - 8, - 8, - 8, - 5, - 0, - 8, - 8, - 8, - 8, - 8, - 3, - 3, - 8, - 8, - 0, - 4, - ] - - -class Program_weight_tensor_data_6: - name = "data_6" - shape = [1, 49, 4] - dtype = "float32" - min_val = float("830.542") - max_val = float("1214.81") - mean = float("996.371") - std = float("76.2156") - data = None - - -class Program_weight_tensor_data_7: - name = "data_7" - shape = [1, 49, 1] - dtype = "float32" - data = [ - 1.0, - 1.0, - 1.0, - 1.0, - 1.0, - 1.0, - 1.0, - 1.0, - 1.0, - 1.0, - 1.0, - 1.0, - 1.0, - 1.0, - 1.0, - 1.0, - 1.0, - 1.0, - 1.0, - 1.0, - 1.0, - 1.0, - 1.0, - 1.0, - 1.0, - 1.0, - 1.0, - 1.0, - 1.0, - 1.0, - 1.0, - 1.0, - 1.0, - 1.0, - 1.0, - 1.0, - 1.0, - 1.0, - 1.0, - 1.0, - 1.0, - 1.0, - 1.0, - 1.0, - 1.0, - 1.0, - 1.0, - 1.0, - 1.0, - ] diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_5/model.py b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_5/model.py index 7403c5b74..80a95179a 100644 --- a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_5/model.py +++ b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_5/model.py @@ -5,542 +5,158 @@ class GraphModule(paddle.nn.Layer): def __init__(self): super().__init__() - def forward(self, data_0, data_1, data_2, data_3, data_4, data_5, data_6, data_7): - # pd_op.full: (xi64) <- () - full_0 = paddle._C_ops.full( - [], float("0"), paddle.int64, paddle.framework._current_expected_place() - ) - - # pd_op.equal: (xb) <- (xi64, xi64) - equal_0 = paddle._C_ops.equal(data_0, full_0) - - # pd_op.cast: (xi64) <- (xb) - cast_0 = paddle._C_ops.cast(equal_0, paddle.int64) - del equal_0 - - # pd_op.not_equal: (xb) <- (xi64, xi64) - not_equal_0 = paddle._C_ops.not_equal(cast_0, full_0) - del cast_0 - - # pd_op.cast: (xi64) <- (xb) - cast_1 = paddle._C_ops.cast(not_equal_0, paddle.int64) - del not_equal_0 + def forward(self, parameter_0, data_0, data_1, data_2, data_3): + # pd_op.divide: (-1x2xf32) <- (-1x2xf32, -1x1xf32) + divide_0 = paddle._C_ops.divide(data_2, data_3) + del data_2 - # pd_op.equal: (xb) <- (xi64, xi64) - equal_1 = paddle._C_ops.equal(cast_1, full_0) - del cast_1, full_0 + # pd_op.shape64: (3xi64) <- (1x-1x88xf32) + shape64_0 = paddle._C_ops.shape64(data_1) # pd_op.full_int_array: (1xi64) <- () - full_int_array_0 = [2] - - # pd_op.unsqueeze: (1x-1x1x4xf32) <- (1x-1x4xf32, 1xi64) - unsqueeze_0 = paddle._C_ops.unsqueeze(data_6, full_int_array_0) - del data_6 + full_int_array_0 = [0] # pd_op.full_int_array: (1xi64) <- () full_int_array_1 = [1] - # pd_op.unsqueeze: (1x1x-1x4xf32) <- (1x-1x4xf32, 1xi64) - unsqueeze_1 = paddle._C_ops.unsqueeze(data_2, full_int_array_1) - del data_2 + # pd_op.assign: (1xi64) <- (1xi64) + assign_0 = full_int_array_1 - # pd_op.full_int_array: (1xi64) <- () - full_int_array_2 = [0] - - # pd_op.slice: (1x-1x1x2xf32) <- (1x-1x1x4xf32, 1xi64, 1xi64) + # pd_op.slice: (xi64) <- (3xi64, 1xi64, 1xi64) slice_0 = paddle._C_ops.slice( - unsqueeze_0, [3], full_int_array_2, full_int_array_0, [1], [] + shape64_0, [0], full_int_array_0, full_int_array_1, [1], [0] ) + del full_int_array_0 # pd_op.full_int_array: (1xi64) <- () - full_int_array_3 = [2147483647] + full_int_array_2 = [2] - # pd_op.slice: (1x-1x1x2xf32) <- (1x-1x1x4xf32, 1xi64, 1xi64) + # pd_op.slice: (xi64) <- (3xi64, 1xi64, 1xi64) slice_1 = paddle._C_ops.slice( - unsqueeze_0, [3], full_int_array_0, full_int_array_3, [1], [] - ) - - # pd_op.slice: (1x1x-1x2xf32) <- (1x1x-1x4xf32, 1xi64, 1xi64) - slice_2 = paddle._C_ops.slice( - unsqueeze_1, [3], full_int_array_2, full_int_array_0, [1], [] - ) - del full_int_array_2 - - # pd_op.slice: (1x1x-1x2xf32) <- (1x1x-1x4xf32, 1xi64, 1xi64) - slice_3 = paddle._C_ops.slice( - unsqueeze_1, [3], full_int_array_0, full_int_array_3, [1], [] + shape64_0, [0], full_int_array_1, full_int_array_2, [1], [0] ) - del full_int_array_3, unsqueeze_1 - - # pd_op.maximum: (1x-1x-1x2xf32) <- (1x-1x1x2xf32, 1x1x-1x2xf32) - maximum_0 = paddle._C_ops.maximum(slice_0, slice_2) - - # pd_op.minimum: (1x-1x-1x2xf32) <- (1x-1x1x2xf32, 1x1x-1x2xf32) - minimum_0 = paddle._C_ops.minimum(slice_1, slice_3) - - # pd_op.subtract: (1x-1x-1x2xf32) <- (1x-1x-1x2xf32, 1x-1x-1x2xf32) - subtract_0 = paddle._C_ops.subtract(minimum_0, maximum_0) - del maximum_0, minimum_0 - - # pd_op.full: (1xf32) <- () - full_1 = paddle._C_ops.full( - [1], float("0"), paddle.float32, paddle.core.CPUPlace() - ) - - # pd_op.full: (1xf32) <- () - full_2 = paddle._C_ops.full( - [1], float("3.40282e+38"), paddle.float32, paddle.core.CPUPlace() - ) - - # pd_op.clip: (1x-1x-1x2xf32) <- (1x-1x-1x2xf32, 1xf32, 1xf32) - clip_0 = paddle._C_ops.clip(subtract_0, full_1, full_2) - del subtract_0 # pd_op.full_int_array: (1xi64) <- () - full_int_array_4 = [-1] - - # pd_op.prod: (1x-1x-1xf32) <- (1x-1x-1x2xf32, 1xi64) - prod_0 = paddle._C_ops.prod(clip_0, full_int_array_4, False, False) - del clip_0 - - # pd_op.subtract: (1x-1x1x2xf32) <- (1x-1x1x2xf32, 1x-1x1x2xf32) - subtract_1 = paddle._C_ops.subtract(slice_1, slice_0) - del slice_0, slice_1 - - # pd_op.clip: (1x-1x1x2xf32) <- (1x-1x1x2xf32, 1xf32, 1xf32) - clip_1 = paddle._C_ops.clip(subtract_1, full_1, full_2) - del subtract_1 - - # pd_op.prod: (1x-1x1xf32) <- (1x-1x1x2xf32, 1xi64) - prod_1 = paddle._C_ops.prod(clip_1, full_int_array_4, False, False) - del clip_1 - - # pd_op.subtract: (1x1x-1x2xf32) <- (1x1x-1x2xf32, 1x1x-1x2xf32) - subtract_2 = paddle._C_ops.subtract(slice_3, slice_2) - del slice_2, slice_3 - - # pd_op.clip: (1x1x-1x2xf32) <- (1x1x-1x2xf32, 1xf32, 1xf32) - clip_2 = paddle._C_ops.clip(subtract_2, full_1, full_2) - del full_2, subtract_2 - - # pd_op.prod: (1x1x-1xf32) <- (1x1x-1x2xf32, 1xi64) - prod_2 = paddle._C_ops.prod(clip_2, full_int_array_4, False, False) - del clip_2 + full_int_array_3 = [3] - # pd_op.add: (1x-1x-1xf32) <- (1x-1x1xf32, 1x1x-1xf32) - add_0 = paddle._C_ops.add(prod_1, prod_2) - del prod_1, prod_2 - - # pd_op.subtract: (1x-1x-1xf32) <- (1x-1x-1xf32, 1x-1x-1xf32) - subtract_3 = paddle._C_ops.subtract(add_0, prod_0) - del add_0 - - # pd_op.full: (1xf32) <- () - full_3 = paddle._C_ops.full( - [1], float("1"), paddle.float32, paddle.core.CPUPlace() + # pd_op.slice: (xi64) <- (3xi64, 1xi64, 1xi64) + slice_2 = paddle._C_ops.slice( + shape64_0, [0], full_int_array_2, full_int_array_3, [1], [0] ) + del full_int_array_2, full_int_array_3, shape64_0 - # pd_op.scale: (1x-1x-1xf32) <- (1x-1x-1xf32, 1xf32) - scale_0 = paddle._C_ops.scale(subtract_3, full_3, float("1e-09"), True) - del subtract_3 - - # pd_op.divide: (1x-1x-1xf32) <- (1x-1x-1xf32, 1x-1x-1xf32) - divide_0 = paddle._C_ops.divide(prod_0, scale_0) - del prod_0, scale_0 - - # pd_op.transpose: (1x10x-1xf32) <- (1x-1x10xf32) - transpose_0 = paddle._C_ops.transpose(data_1, [0, 2, 1]) - del data_1 - - # pd_op.full: (1xf64) <- () - full_4 = paddle._C_ops.full( - [1], float("0"), paddle.float64, paddle.core.CPUPlace() + # pd_op.full: (xi64) <- () + full_0 = paddle._C_ops.full( + [], float("-1"), paddle.int64, paddle.core.CPUPlace() ) - # pd_op.full: (1xf64) <- () - full_5 = paddle._C_ops.full( - [1], float("1"), paddle.float64, paddle.core.CPUPlace() + # pd_op.full: (xi64) <- () + full_1 = paddle._C_ops.full( + [], float("4"), paddle.int64, paddle.core.CPUPlace() ) - # pd_op.arange: (1xi32) <- (1xf64, 1xf64, 1xf64) - arange_0 = paddle.arange(full_4, full_5, full_5, dtype="int32") - del full_4, full_5 - - # pd_op.unsqueeze: (1x1xi32) <- (1xi32, 1xi64) - unsqueeze_2 = paddle._C_ops.unsqueeze(arange_0, full_int_array_4) - del arange_0 - # pd_op.full: (xi64) <- () - full_6 = paddle._C_ops.full( - [], float("1"), paddle.int64, paddle.core.CPUPlace() + full_2 = paddle._C_ops.full( + [], float("22"), paddle.int64, paddle.core.CPUPlace() ) - # builtin.combine: ([xi64, xi64]) <- (xi64, xi64) - combine_0 = [full_6, data_0] - del data_0, full_6 + # builtin.combine: ([xi64, xi64, xi64, xi64]) <- (xi64, xi64, xi64, xi64) + combine_0 = [full_0, slice_1, full_1, full_2] + del full_0, full_1, full_2, slice_1 - # pd_op.stack: (2xi64) <- ([xi64, xi64]) + # pd_op.stack: (4xi64) <- ([xi64, xi64, xi64, xi64]) stack_0 = paddle._C_ops.stack(combine_0, 0) del combine_0 - # pd_op.tile: (1x-1xi32) <- (1x1xi32, 2xi64) - tile_0 = paddle._C_ops.tile(unsqueeze_2, stack_0) - del stack_0 - - # pd_op.squeeze: (1x-1xi32) <- (1x-1x1xi32, 1xi64) - squeeze_0 = paddle._C_ops.squeeze(data_5, full_int_array_4) - del data_5 + # pd_op.reshape: (-1x-1x4x22xf32) <- (1x-1x88xf32, 4xi64) + reshape_0 = paddle._C_ops.reshape(data_1, stack_0) + del data_1, stack_0 - # builtin.combine: ([1x-1xi32, 1x-1xi32]) <- (1x-1xi32, 1x-1xi32) - combine_1 = [tile_0, squeeze_0] - del squeeze_0, tile_0 + # pd_op.softmax: (-1x-1x4x22xf32) <- (-1x-1x4x22xf32) + softmax_0 = paddle._C_ops.softmax(reshape_0, -1) + del reshape_0 - # pd_op.stack: (1x-1x2xi32) <- ([1x-1xi32, 1x-1xi32]) - stack_1 = paddle._C_ops.stack(combine_1, -1) - del combine_1 - - # pd_op.gather_nd: (1x-1x-1xf32) <- (1x10x-1xf32, 1x-1x2xi32) - gather_nd_0 = paddle._C_ops.gather_nd(transpose_0, stack_1) - del stack_1, transpose_0 - - # pd_op.pow: (1x-1x-1xf32) <- (1x-1x-1xf32) - pow_0 = paddle._C_ops.pow(gather_nd_0, float("1")) - del gather_nd_0 - - # pd_op.pow: (1x-1x-1xf32) <- (1x-1x-1xf32) - pow_1 = paddle._C_ops.pow(divide_0, float("6")) - - # pd_op.multiply: (1x-1x-1xf32) <- (1x-1x-1xf32, 1x-1x-1xf32) - multiply_0 = paddle._C_ops.multiply(pow_0, pow_1) - del pow_0, pow_1 + # pd_op.transpose: (-1x22x-1x4xf32) <- (-1x-1x4x22xf32) + transpose_0 = paddle._C_ops.transpose(softmax_0, [0, 3, 1, 2]) - # pd_op.multiply: (1x-1x-1xf32) <- (1x-1x-1xf32, 1x-1x1xf32) - multiply_1 = paddle._C_ops.multiply(multiply_0, data_7) - del multiply_0 - - # pd_op.scale: (-1x1xf32) <- (-1x1xf32, 1xf32) - scale_1 = paddle._C_ops.scale(data_4, full_3, float("0"), True) - del data_4, full_3 - - # pd_op.full_int_array: (2xi64) <- () - full_int_array_5 = [0, 1] + # pd_op.conv2d: (-1x1x-1x4xf32) <- (-1x22x-1x4xf32, 1x22x1x1xf32) + conv2d_0 = paddle._C_ops.conv2d( + transpose_0, parameter_0, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_0 - # pd_op.unsqueeze: (1x1x-1x2xf32) <- (-1x2xf32, 2xi64) - unsqueeze_3 = paddle._C_ops.unsqueeze(data_3, full_int_array_5) - del data_3 + # pd_op.squeeze: (-1x-1x4xf32) <- (-1x1x-1x4xf32, 1xi64) + squeeze_0 = paddle._C_ops.squeeze(conv2d_0, full_int_array_1) + del full_int_array_1 # pd_op.full: (1xi32) <- () - full_7 = paddle._C_ops.full( - [1], float("3"), paddle.int32, paddle.core.CPUPlace() + full_3 = paddle._C_ops.full( + [1], float("2"), paddle.int32, paddle.core.CPUPlace() ) - # pd_op.split_with_num: ([1x1x-1x1xf32, 1x1x-1x1xf32]) <- (1x1x-1x2xf32, 1xi32) - split_with_num_0 = paddle._C_ops.split_with_num(unsqueeze_3, 2, full_7) - del unsqueeze_3 + # pd_op.split_with_num: ([-1x-1x2xf32, -1x-1x2xf32]) <- (-1x-1x4xf32, 1xi32) + split_with_num_0 = paddle._C_ops.split_with_num(squeeze_0, 2, full_3) + del squeeze_0 - # builtin.split: (1x1x-1x1xf32, 1x1x-1x1xf32) <- ([1x1x-1x1xf32, 1x1x-1x1xf32]) + # builtin.split: (-1x-1x2xf32, -1x-1x2xf32) <- ([-1x-1x2xf32, -1x-1x2xf32]) ( split_0, split_1, ) = split_with_num_0 del split_with_num_0 - # pd_op.split_with_num: ([1x-1x1x1xf32, 1x-1x1x1xf32, 1x-1x1x1xf32, 1x-1x1x1xf32]) <- (1x-1x1x4xf32, 1xi32) - split_with_num_1 = paddle._C_ops.split_with_num(unsqueeze_0, 4, full_7) - del full_7, unsqueeze_0 - - # builtin.split: (1x-1x1x1xf32, 1x-1x1x1xf32, 1x-1x1x1xf32, 1x-1x1x1xf32) <- ([1x-1x1x1xf32, 1x-1x1x1xf32, 1x-1x1x1xf32, 1x-1x1x1xf32]) - ( - split_2, - split_3, - split_4, - split_5, - ) = split_with_num_1 - del split_with_num_1 - - # pd_op.subtract: (1x-1x-1x1xf32) <- (1x1x-1x1xf32, 1x-1x1x1xf32) - subtract_4 = paddle._C_ops.subtract(split_0, split_2) - - # pd_op.subtract: (1x-1x-1x1xf32) <- (1x1x-1x1xf32, 1x-1x1x1xf32) - subtract_5 = paddle._C_ops.subtract(split_1, split_3) - - # pd_op.subtract: (1x-1x-1x1xf32) <- (1x-1x1x1xf32, 1x1x-1x1xf32) - subtract_6 = paddle._C_ops.subtract(split_4, split_0) - - # pd_op.subtract: (1x-1x-1x1xf32) <- (1x-1x1x1xf32, 1x1x-1x1xf32) - subtract_7 = paddle._C_ops.subtract(split_5, split_1) - - # pd_op.full: (1xi32) <- () - full_8 = paddle._C_ops.full( - [1], float("-1"), paddle.int32, paddle.core.CPUPlace() - ) - - # builtin.combine: ([1x-1x-1x1xf32, 1x-1x-1x1xf32, 1x-1x-1x1xf32, 1x-1x-1x1xf32]) <- (1x-1x-1x1xf32, 1x-1x-1x1xf32, 1x-1x-1x1xf32, 1x-1x-1x1xf32) - combine_2 = [subtract_4, subtract_5, subtract_6, subtract_7] - del subtract_4, subtract_5, subtract_6, subtract_7 - - # pd_op.concat: (1x-1x-1x4xf32) <- ([1x-1x-1x1xf32, 1x-1x-1x1xf32, 1x-1x-1x1xf32, 1x-1x-1x1xf32], 1xi32) - concat_0 = paddle._C_ops.concat(combine_2, full_8) - del combine_2 - - # pd_op.min: (1x-1x-1xf32) <- (1x-1x-1x4xf32, 1xi64) - min_0 = paddle._C_ops.min(concat_0, full_int_array_4, False) - del concat_0 - - # pd_op.full: (xf32) <- () - full_9 = paddle._C_ops.full( - [], - float("1e-09"), - paddle.float32, - paddle.framework._current_expected_place(), - ) - - # pd_op.greater_than: (1x-1x-1xb) <- (1x-1x-1xf32, xf32) - greater_than_1 = paddle._C_ops.greater_than(min_0, full_9) - del min_0 - - # pd_op.unsqueeze: (1x1x-1x1xf32) <- (-1x1xf32, 2xi64) - unsqueeze_4 = paddle._C_ops.unsqueeze(scale_1, full_int_array_5) - del full_int_array_5, scale_1 - - # pd_op.add: (1x-1x1x1xf32) <- (1x-1x1x1xf32, 1x-1x1x1xf32) - add_1 = paddle._C_ops.add(split_2, split_4) - del split_2, split_4 - # pd_op.full: (1xf32) <- () - full_10 = paddle._C_ops.full( - [1], float("0.5"), paddle.float32, paddle.core.CPUPlace() - ) - - # pd_op.scale: (1x-1x1x1xf32) <- (1x-1x1x1xf32, 1xf32) - scale_2 = paddle._C_ops.scale(add_1, full_10, float("0"), True) - del add_1 - - # pd_op.add: (1x-1x1x1xf32) <- (1x-1x1x1xf32, 1x-1x1x1xf32) - add_2 = paddle._C_ops.add(split_3, split_5) - del split_3, split_5 - - # pd_op.scale: (1x-1x1x1xf32) <- (1x-1x1x1xf32, 1xf32) - scale_3 = paddle._C_ops.scale(add_2, full_10, float("0"), True) - del add_2, full_10 - - # pd_op.subtract: (1x-1x-1x1xf32) <- (1x-1x1x1xf32, 1x1x-1x1xf32) - subtract_8 = paddle._C_ops.subtract(scale_2, unsqueeze_4) - - # pd_op.subtract: (1x-1x-1x1xf32) <- (1x1x-1x1xf32, 1x-1x-1x1xf32) - subtract_9 = paddle._C_ops.subtract(split_0, subtract_8) - del subtract_8 - - # pd_op.subtract: (1x-1x-1x1xf32) <- (1x-1x1x1xf32, 1x1x-1x1xf32) - subtract_10 = paddle._C_ops.subtract(scale_3, unsqueeze_4) - - # pd_op.subtract: (1x-1x-1x1xf32) <- (1x1x-1x1xf32, 1x-1x-1x1xf32) - subtract_11 = paddle._C_ops.subtract(split_1, subtract_10) - del subtract_10 - - # pd_op.add: (1x-1x-1x1xf32) <- (1x-1x1x1xf32, 1x1x-1x1xf32) - add_3 = paddle._C_ops.add(scale_2, unsqueeze_4) - del scale_2 - - # pd_op.subtract: (1x-1x-1x1xf32) <- (1x-1x-1x1xf32, 1x1x-1x1xf32) - subtract_12 = paddle._C_ops.subtract(add_3, split_0) - del add_3, split_0 - - # pd_op.add: (1x-1x-1x1xf32) <- (1x-1x1x1xf32, 1x1x-1x1xf32) - add_4 = paddle._C_ops.add(scale_3, unsqueeze_4) - del scale_3, unsqueeze_4 - - # pd_op.subtract: (1x-1x-1x1xf32) <- (1x-1x-1x1xf32, 1x1x-1x1xf32) - subtract_13 = paddle._C_ops.subtract(add_4, split_1) - del add_4, split_1 - - # builtin.combine: ([1x-1x-1x1xf32, 1x-1x-1x1xf32, 1x-1x-1x1xf32, 1x-1x-1x1xf32]) <- (1x-1x-1x1xf32, 1x-1x-1x1xf32, 1x-1x-1x1xf32, 1x-1x-1x1xf32) - combine_3 = [subtract_9, subtract_11, subtract_12, subtract_13] - del subtract_11, subtract_12, subtract_13, subtract_9 - - # pd_op.concat: (1x-1x-1x4xf32) <- ([1x-1x-1x1xf32, 1x-1x-1x1xf32, 1x-1x-1x1xf32, 1x-1x-1x1xf32], 1xi32) - concat_1 = paddle._C_ops.concat(combine_3, full_8) - del combine_3, full_8 - - # pd_op.min: (1x-1x-1xf32) <- (1x-1x-1x4xf32, 1xi64) - min_1 = paddle._C_ops.min(concat_1, full_int_array_4, False) - del concat_1 - - # pd_op.greater_than: (1x-1x-1xb) <- (1x-1x-1xf32, xf32) - greater_than_2 = paddle._C_ops.greater_than(min_1, full_9) - del full_9, min_1 - - # pd_op.cast: (1x-1x-1xf32) <- (1x-1x-1xb) - cast_2 = paddle._C_ops.cast(greater_than_1, paddle.float32) - del greater_than_1 - - # pd_op.cast: (1x-1x-1xf32) <- (1x-1x-1xb) - cast_3 = paddle._C_ops.cast(greater_than_2, paddle.float32) - del greater_than_2 - - # pd_op.multiply: (1x-1x-1xf32) <- (1x-1x-1xf32, 1x-1x1xf32) - multiply_2 = paddle._C_ops.multiply(cast_2, data_7) - del cast_2 - - # pd_op.multiply: (1x-1x-1xf32) <- (1x-1x-1xf32, 1x-1x1xf32) - multiply_3 = paddle._C_ops.multiply(cast_3, data_7) - del cast_3 - - # pd_op.sum: (1x-1x1xf32) <- (1x-1x-1xf32, 1xi64) - sum_0 = paddle._C_ops.sum(multiply_2, full_int_array_4, None, True) - del full_int_array_4 - - # pd_op.full: (xf32) <- () - full_11 = paddle._C_ops.full( - [], float("0"), paddle.float32, paddle.framework._current_expected_place() - ) - - # pd_op.equal: (1x-1x1xb) <- (1x-1x1xf32, xf32) - equal_2 = paddle._C_ops.equal(sum_0, full_11) - del sum_0 - - # pd_op.add: (1x-1x-1xf32) <- (1x-1x-1xf32, 1x-1x-1xf32) - add_5 = paddle._C_ops.add(multiply_1, multiply_3) - - # pd_op.full_like: (1x-1x-1xf32) <- (1x-1x-1xf32, 1xf32) - full_like_0 = paddle._C_ops.full_like( - add_5, full_1, paddle.float32, paddle.framework._current_expected_place() - ) - - # pd_op.full_like: (1x-1x-1xf32) <- (1x-1x-1xf32, 1xf32) - full_like_1 = paddle._C_ops.full_like( - multiply_1, - full_1, - paddle.float32, - paddle.framework._current_expected_place(), - ) - - # pd_op.full_like: (1x-1x1xb) <- (1x-1x1xb, 1xf32) - full_like_2 = paddle._C_ops.full_like( - equal_2, full_1, paddle.bool, paddle.framework._current_expected_place() + full_4 = paddle._C_ops.full( + [1], float("-1"), paddle.float32, paddle.core.CPUPlace() ) - del full_1 - - # pd_op.cast: (1x-1x1xf32) <- (1x-1x1xb) - cast_4 = paddle._C_ops.cast(full_like_2, paddle.float32) - del full_like_2 - - # pd_op.cast: (1x-1x1xf32) <- (1x-1x1xb) - cast_5 = paddle._C_ops.cast(equal_2, paddle.float32) - del equal_2 - - # pd_op.add: (1x-1x-1xf32) <- (1x-1x-1xf32, 1x-1x-1xf32) - add_6 = paddle._C_ops.add(full_like_0, full_like_1) - del full_like_0, full_like_1 - - # pd_op.add: (1x-1x-1xf32) <- (1x-1x-1xf32, 1x-1x1xf32) - add_7 = paddle._C_ops.add(add_6, cast_4) - del add_6, cast_4 - - # pd_op.add: (1x-1x-1xf32) <- (1x-1x-1xf32, 1x-1x-1xf32) - add_8 = paddle._C_ops.add(add_5, add_7) - del add_5 - - # pd_op.add: (1x-1x-1xf32) <- (1x-1x-1xf32, 1x-1x-1xf32) - add_9 = paddle._C_ops.add(multiply_1, add_7) - - # pd_op.add: (1x-1x-1xf32) <- (1x-1x1xf32, 1x-1x-1xf32) - add_10 = paddle._C_ops.add(cast_5, add_7) - del add_7, cast_5 - # pd_op.cast: (1x-1x-1xb) <- (1x-1x-1xf32) - cast_6 = paddle._C_ops.cast(add_10, paddle.bool) - del add_10 + # pd_op.scale: (-1x-1x2xf32) <- (-1x-1x2xf32, 1xf32) + scale_0 = paddle._C_ops.scale(split_0, full_4, float("0"), True) + del split_0 - # pd_op.where: (1x-1x-1xf32) <- (1x-1x-1xb, 1x-1x-1xf32, 1x-1x-1xf32) - where_0 = paddle._C_ops.where(cast_6, add_8, add_9) - del add_8, add_9, cast_6 + # pd_op.add: (-1x-1x2xf32) <- (-1x-1x2xf32, -1x2xf32) + add_0 = paddle._C_ops.add(scale_0, divide_0) - # pd_op.shape64: (3xi64) <- (1x-1x-1xf32) - shape64_0 = paddle._C_ops.shape64(where_0) - - # pd_op.slice: (xi64) <- (3xi64, 1xi64, 1xi64) - slice_4 = paddle._C_ops.slice( - shape64_0, [0], full_int_array_1, full_int_array_0, [1], [0] - ) - del full_int_array_1 - - # pd_op.full_int_array: (1xi64) <- () - full_int_array_6 = [3] - - # pd_op.slice: (xi64) <- (3xi64, 1xi64, 1xi64) - slice_5 = paddle._C_ops.slice( - shape64_0, [0], full_int_array_0, full_int_array_6, [1], [0] - ) - del full_int_array_0, full_int_array_6, shape64_0 + # pd_op.add: (-1x-1x2xf32) <- (-1x-1x2xf32, -1x2xf32) + add_1 = paddle._C_ops.add(split_1, divide_0) # pd_op.full: (1xi32) <- () - full_12 = paddle._C_ops.full( - [1], float("13"), paddle.int32, paddle.core.CPUPlace() - ) - - # pd_op.topk: (1x-1x13xf32, 1x-1x13xi64) <- (1x-1x-1xf32, 1xi32) - topk_0, topk_1 = (lambda x, f: f(x))( - paddle._C_ops.topk(where_0, full_12, -1, True, True), - lambda out: out if isinstance(out, (list, tuple)) else (out, None), - ) - del full_12, where_0 - - # pd_op.one_hot: (1x-1x13x-1xf32) <- (1x-1x13xi64, xi64) - one_hot_0 = paddle._C_ops.one_hot( - topk_1 % paddle.cast(slice_5, topk_1.dtype), slice_5 + full_5 = paddle._C_ops.full( + [1], float("-1"), paddle.int32, paddle.core.CPUPlace() ) - del slice_5, topk_1 - - # pd_op.full_int_array: (1xi64) <- () - full_int_array_7 = [-2] - - # pd_op.sum: (1x-1x-1xf32) <- (1x-1x13x-1xf32, 1xi64) - sum_1 = paddle._C_ops.sum(one_hot_0, full_int_array_7, None, False) - del one_hot_0 - - # pd_op.multiply: (1x-1x-1xf32) <- (1x-1x-1xf32, 1x-1x1xf32) - multiply_4 = paddle._C_ops.multiply(sum_1, data_7) - del data_7, sum_1 - # pd_op.greater_than: (1x-1x-1xb) <- (1x-1x-1xf32, xf32) - greater_than_3 = paddle._C_ops.greater_than(multiply_3, full_11) - del multiply_3 + # builtin.combine: ([-1x-1x2xf32, -1x-1x2xf32]) <- (-1x-1x2xf32, -1x-1x2xf32) + combine_1 = [add_0, add_1] - # pd_op.greater_than: (1x-1x-1xb) <- (1x-1x-1xf32, xf32) - greater_than_4 = paddle._C_ops.greater_than(multiply_2, full_11) - del full_11, multiply_2 - - # pd_op.bitwise_or: (1x-1x-1xb) <- (1x-1x-1xb, 1x-1x-1xb) - bitwise_or_0 = paddle._C_ops.bitwise_or(greater_than_3, greater_than_4) - del greater_than_3, greater_than_4 - - # pd_op.cast: (1x-1x-1xf32) <- (1x-1x-1xb) - cast_7 = paddle._C_ops.cast(bitwise_or_0, paddle.float32) - del bitwise_or_0 - - # pd_op.multiply: (1x-1x-1xf32) <- (1x-1x-1xf32, 1x-1x-1xf32) - multiply_5 = paddle._C_ops.multiply(multiply_4, cast_7) - del cast_7, multiply_4 - - # pd_op.sum: (1x-1xf32) <- (1x-1x-1xf32, 1xi64) - sum_2 = paddle._C_ops.sum(multiply_5, full_int_array_7, None, False) - del full_int_array_7 - - # pd_op.full_int_array: (0xi64) <- () - full_int_array_8 = [] - - # pd_op.max: (xf32) <- (1x-1xf32, 0xi64) - max_0 = paddle._C_ops.max(sum_2, full_int_array_8, False) - del full_int_array_8 + # pd_op.concat: (-1x-1x4xf32) <- ([-1x-1x2xf32, -1x-1x2xf32], 1xi32) + concat_0 = paddle._C_ops.concat(combine_1, full_5) + del combine_1 - # pd_op.full: (xf32) <- () - full_13 = paddle._C_ops.full( - [], float("1"), paddle.float32, paddle.framework._current_expected_place() + # pd_op.share_data_: (1x-1x10xf32) <- (1x-1x10xf32) + share_data__0 = data_0.detach() + del data_0 + + # pd_op.share_data_: (-1x-1x4xf32) <- (-1x-1x4xf32) + share_data__1 = concat_0.detach() + + # pd_op.multiply: (-1x-1x4xf32) <- (-1x-1x4xf32, -1x1xf32) + multiply_0 = paddle._C_ops.multiply(share_data__1, data_3) + del ( + add_0, + add_1, + assign_0, + concat_0, + conv2d_0, + data_3, + divide_0, + full_3, + full_4, + full_5, + scale_0, + share_data__1, + softmax_0, + split_1, + transpose_0, ) - # pd_op.greater_than: (xb) <- (xf32, xf32) - greater_than_0 = paddle._C_ops.greater_than(max_0, full_13) - del divide_0, full_13, max_0, multiply_1, multiply_5, sum_2, unsqueeze_2 - - return greater_than_0 + return share_data__0, multiply_0 diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_5/weight_meta.py b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_5/weight_meta.py index 8b1378917..a3837d8b1 100644 --- a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_5/weight_meta.py +++ b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_5/weight_meta.py @@ -1 +1,7 @@ - +class Program_weight_tensor_parameter_0: + name = "parameter_0" + shape = [1, 22, 1, 1] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_6/graph_hash.txt b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_6/graph_hash.txt index 951232222..30aa43ee8 100644 --- a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_6/graph_hash.txt +++ b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_6/graph_hash.txt @@ -1 +1 @@ -7f6edfb359b9bae12be4cef48e9822a33ba3dfa983349265b87f2f8e000679cb \ No newline at end of file +88501beeaec43b439a11d2de2e1804c791b519dccd7cb4a81a7324f69c27681f \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_6/input_meta.py b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_6/input_meta.py index 56cfb4b20..66d018686 100644 --- a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_6/input_meta.py +++ b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_6/input_meta.py @@ -1,127 +1,222 @@ class Program_weight_tensor_data_0: name = "data_0" - shape = [] - dtype = "int64" - data = [49] + shape = [1] + dtype = "float32" + data = [0.724553] class Program_weight_tensor_data_1: name = "data_1" - shape = [] - dtype = "int64" - data = [48384] + shape = [1] + dtype = "float32" + data = [0.710696] class Program_weight_tensor_data_2: name = "data_2" - shape = [1, 48384] + shape = [1] dtype = "float32" - max_val = float("8.0") - mean = float("0.00859788") - std = float("0.140155") - data = None + data = [0.69274] class Program_weight_tensor_data_3: name = "data_3" - shape = [1, 49, 48384] + shape = [1] dtype = "float32" - max_val = float("0.949472") - mean = float("8.67853e-05") - std = float("0.0068947") - data = None + data = [0.697763] class Program_weight_tensor_data_4: name = "data_4" - shape = [1, 49, 48384] + shape = [1] dtype = "float32" - max_val = float("1.0") - mean = float("0.000175467") - std = float("0.0132452") - data = None + data = [0.67767] class Program_weight_tensor_data_5: name = "data_5" - shape = [1, 1] - dtype = "int32" - data = [0] + shape = [1] + dtype = "float32" + data = [0.628229] class Program_weight_tensor_data_6: name = "data_6" - shape = [1, 49, 1] - dtype = "int32" - data = [ - 0, - 0, - 3, - 8, - 3, - 3, - 3, - 3, - 3, - 3, - 3, - 3, - 4, - 4, - 3, - 3, - 3, - 4, - 4, - 8, - 3, - 3, - 3, - 3, - 8, - 8, - 8, - 8, - 8, - 8, - 4, - 8, - 8, - 8, - 8, - 8, - 5, - 0, - 8, - 8, - 8, - 8, - 8, - 3, - 3, - 8, - 8, - 0, - 4, - ] + shape = [1] + dtype = "float32" + data = [0.643942] class Program_weight_tensor_data_7: name = "data_7" - shape = [1, 49, 4] + shape = [1] dtype = "float32" - min_val = float("830.542") - max_val = float("1214.81") - mean = float("996.371") - std = float("76.2156") - data = None + data = [0.633569] class Program_weight_tensor_data_8: name = "data_8" - shape = [1, 49, 48384] + shape = [1] + dtype = "float32" + data = [0.801205] + + +class Program_weight_tensor_data_9: + name = "data_9" + shape = [1] + dtype = "float32" + data = [0.652613] + + +class Program_weight_tensor_data_10: + name = "data_10" + shape = [1] + dtype = "float32" + data = [0.636874] + + +class Program_weight_tensor_data_11: + name = "data_11" + shape = [1] + dtype = "float32" + data = [0.631148] + + +class Program_weight_tensor_data_12: + name = "data_12" + shape = [1] + dtype = "float32" + data = [0.635341] + + +class Program_weight_tensor_data_13: + name = "data_13" + shape = [1] + dtype = "float32" + data = [0.640054] + + +class Program_weight_tensor_data_14: + name = "data_14" + shape = [1] + dtype = "float32" + data = [0.755822] + + +class Program_weight_tensor_data_15: + name = "data_15" + shape = [1] + dtype = "float32" + data = [0.575326] + + +class Program_weight_tensor_data_16: + name = "data_16" + shape = [1] + dtype = "float32" + data = [0.59257] + + +class Program_weight_tensor_data_17: + name = "data_17" + shape = [1] dtype = "float32" - max_val = float("0.514231") - mean = float("4.88935e-06") - std = float("0.000998645") + data = [0.72331] + + +class Program_weight_tensor_data_18: + name = "data_18" + shape = [1024, 3072] + dtype = "float32" + min_val = float("-0.033771") + max_val = float("0.0342897") + mean = float("-1.71997e-05") + std = float("0.0182992") + data = None + + +class Program_weight_tensor_data_19: + name = "data_19" + shape = [3072] + dtype = "float32" + min_val = float("-0.000858009") + max_val = float("0.000895398") + mean = float("1.43686e-06") + std = float("0.000180851") + data = None + + +class Program_weight_tensor_data_20: + name = "data_20" + shape = [1024, 3072] + dtype = "float32" + min_val = float("-0.0324395") + max_val = float("0.0323104") + mean = float("-1.57215e-05") + std = float("0.0182981") + data = None + + +class Program_weight_tensor_data_21: + name = "data_21" + shape = [3072] + dtype = "float32" + min_val = float("-0.000630237") + max_val = float("0.000514313") + mean = float("2.76087e-06") + std = float("0.000126903") + data = None + + +class Program_weight_tensor_data_22: + name = "data_22" + shape = [1024, 3072] + dtype = "float32" + min_val = float("-0.0321875") + max_val = float("0.0321786") + mean = float("-1.59553e-05") + std = float("0.0182975") + data = None + + +class Program_weight_tensor_data_23: + name = "data_23" + shape = [3072] + dtype = "float32" + min_val = float("-0.000429784") + max_val = float("0.00042718") + mean = float("1.59216e-06") + std = float("8.8453e-05") + data = None + + +class Program_weight_tensor_data_24: + name = "data_24" + shape = [1024, 3072] + dtype = "float32" + min_val = float("-0.0321313") + max_val = float("0.0321203") + mean = float("-1.62062e-05") + std = float("0.018297") + data = None + + +class Program_weight_tensor_data_25: + name = "data_25" + shape = [3072] + dtype = "float32" + min_val = float("-0.000397408") + max_val = float("0.000488986") + mean = float("1.04643e-06") + std = float("8.30004e-05") + data = None + + +class Program_weight_tensor_data_26: + name = "data_26" + shape = [1, 3, 1088, 1088] + dtype = "float32" + max_val = float("1.0") + mean = float("0.443477") + std = float("0.162527") data = None diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_6/model.py b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_6/model.py index 54f7a46c8..55b437f95 100644 --- a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_6/model.py +++ b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_6/model.py @@ -6,279 +6,8869 @@ def __init__(self): super().__init__() def forward( - self, data_0, data_1, data_2, data_3, data_4, data_5, data_6, data_7, data_8 + self, + parameter_0, + parameter_1, + parameter_2, + parameter_3, + parameter_4, + parameter_5, + parameter_6, + parameter_7, + parameter_8, + parameter_9, + parameter_10, + parameter_11, + parameter_12, + parameter_13, + parameter_14, + parameter_15, + parameter_16, + parameter_17, + parameter_18, + parameter_19, + parameter_20, + parameter_21, + parameter_22, + parameter_23, + parameter_24, + parameter_25, + parameter_26, + parameter_27, + parameter_28, + parameter_29, + parameter_30, + parameter_31, + parameter_32, + parameter_33, + parameter_34, + parameter_35, + parameter_36, + parameter_37, + parameter_38, + parameter_39, + parameter_40, + parameter_41, + parameter_42, + parameter_43, + parameter_44, + parameter_45, + parameter_46, + parameter_47, + parameter_48, + parameter_49, + parameter_50, + parameter_51, + parameter_52, + parameter_53, + parameter_54, + parameter_55, + parameter_56, + parameter_57, + parameter_58, + parameter_59, + parameter_60, + parameter_61, + parameter_62, + parameter_63, + parameter_64, + parameter_65, + parameter_66, + parameter_67, + parameter_68, + parameter_69, + parameter_70, + parameter_71, + parameter_72, + parameter_73, + parameter_74, + parameter_75, + parameter_76, + parameter_77, + parameter_78, + parameter_79, + parameter_80, + parameter_81, + parameter_82, + parameter_83, + parameter_84, + parameter_85, + parameter_86, + parameter_87, + parameter_88, + parameter_89, + parameter_90, + parameter_91, + parameter_92, + parameter_93, + parameter_94, + parameter_95, + parameter_96, + parameter_97, + parameter_98, + parameter_99, + parameter_100, + parameter_101, + parameter_102, + parameter_103, + parameter_104, + parameter_105, + parameter_106, + parameter_107, + parameter_108, + parameter_109, + parameter_110, + parameter_111, + parameter_112, + parameter_113, + parameter_114, + parameter_115, + parameter_116, + parameter_117, + parameter_118, + parameter_119, + parameter_120, + parameter_121, + parameter_122, + parameter_123, + parameter_124, + parameter_125, + parameter_126, + parameter_127, + parameter_128, + parameter_129, + parameter_130, + parameter_131, + parameter_132, + parameter_133, + parameter_134, + parameter_135, + parameter_136, + parameter_137, + parameter_138, + parameter_139, + parameter_140, + parameter_141, + parameter_142, + parameter_143, + parameter_144, + parameter_145, + parameter_146, + parameter_147, + parameter_148, + parameter_149, + parameter_150, + parameter_151, + parameter_152, + parameter_153, + parameter_154, + parameter_155, + parameter_156, + parameter_157, + parameter_158, + parameter_159, + parameter_160, + parameter_161, + parameter_162, + parameter_163, + parameter_164, + parameter_165, + parameter_166, + parameter_167, + parameter_168, + parameter_169, + parameter_170, + parameter_171, + parameter_172, + parameter_173, + parameter_174, + parameter_175, + parameter_176, + parameter_177, + parameter_178, + parameter_179, + parameter_180, + parameter_181, + parameter_182, + parameter_183, + parameter_184, + parameter_185, + parameter_186, + parameter_187, + parameter_188, + parameter_189, + parameter_190, + parameter_191, + parameter_192, + parameter_193, + parameter_194, + parameter_195, + parameter_196, + parameter_197, + parameter_198, + parameter_199, + parameter_200, + parameter_201, + parameter_202, + parameter_203, + parameter_204, + parameter_205, + parameter_206, + parameter_207, + parameter_208, + parameter_209, + parameter_210, + parameter_211, + parameter_212, + parameter_213, + parameter_214, + parameter_215, + parameter_216, + parameter_217, + parameter_218, + parameter_219, + parameter_220, + parameter_221, + parameter_222, + parameter_223, + parameter_224, + parameter_225, + parameter_226, + parameter_227, + parameter_228, + parameter_229, + parameter_230, + parameter_231, + parameter_232, + parameter_233, + parameter_234, + parameter_235, + parameter_236, + parameter_237, + parameter_238, + parameter_239, + parameter_240, + parameter_241, + parameter_242, + parameter_243, + parameter_244, + parameter_245, + parameter_246, + parameter_247, + parameter_248, + parameter_249, + parameter_250, + parameter_251, + parameter_252, + parameter_253, + parameter_254, + parameter_255, + parameter_256, + parameter_257, + parameter_258, + parameter_259, + parameter_260, + parameter_261, + parameter_262, + parameter_263, + parameter_264, + parameter_265, + parameter_266, + parameter_267, + parameter_268, + parameter_269, + parameter_270, + parameter_271, + parameter_272, + parameter_273, + parameter_274, + parameter_275, + parameter_276, + parameter_277, + parameter_278, + parameter_279, + parameter_280, + parameter_281, + parameter_282, + parameter_283, + parameter_284, + parameter_285, + parameter_286, + parameter_287, + parameter_288, + parameter_289, + parameter_290, + parameter_291, + parameter_292, + parameter_293, + parameter_294, + parameter_295, + parameter_296, + parameter_297, + parameter_298, + parameter_299, + parameter_300, + parameter_301, + parameter_302, + parameter_303, + parameter_304, + parameter_305, + parameter_306, + parameter_307, + parameter_308, + parameter_309, + parameter_310, + parameter_311, + parameter_312, + parameter_313, + parameter_314, + parameter_315, + parameter_316, + parameter_317, + parameter_318, + parameter_319, + parameter_320, + parameter_321, + parameter_322, + parameter_323, + parameter_324, + parameter_325, + parameter_326, + parameter_327, + parameter_328, + parameter_329, + parameter_330, + parameter_331, + parameter_332, + parameter_333, + parameter_334, + parameter_335, + parameter_336, + parameter_337, + parameter_338, + parameter_339, + parameter_340, + parameter_341, + parameter_342, + parameter_343, + parameter_344, + parameter_345, + parameter_346, + parameter_347, + parameter_348, + parameter_349, + parameter_350, + parameter_351, + parameter_352, + parameter_353, + parameter_354, + parameter_355, + parameter_356, + parameter_357, + parameter_358, + parameter_359, + parameter_360, + parameter_361, + parameter_362, + parameter_363, + parameter_364, + parameter_365, + parameter_366, + parameter_367, + parameter_368, + parameter_369, + parameter_370, + parameter_371, + parameter_372, + parameter_373, + parameter_374, + parameter_375, + parameter_376, + parameter_377, + parameter_378, + parameter_379, + parameter_380, + parameter_381, + parameter_382, + parameter_383, + parameter_384, + parameter_385, + parameter_386, + parameter_387, + parameter_388, + parameter_389, + parameter_390, + parameter_391, + parameter_392, + parameter_393, + parameter_394, + parameter_395, + parameter_396, + parameter_397, + parameter_398, + parameter_399, + parameter_400, + parameter_401, + parameter_402, + parameter_403, + parameter_404, + parameter_405, + parameter_406, + parameter_407, + parameter_408, + parameter_409, + parameter_410, + parameter_411, + parameter_412, + parameter_413, + parameter_414, + parameter_415, + parameter_416, + parameter_417, + parameter_418, + parameter_419, + parameter_420, + parameter_421, + parameter_422, + parameter_423, + parameter_424, + parameter_425, + parameter_426, + parameter_427, + parameter_428, + parameter_429, + parameter_430, + parameter_431, + parameter_432, + parameter_433, + parameter_434, + parameter_435, + parameter_436, + parameter_437, + parameter_438, + parameter_439, + parameter_440, + parameter_441, + parameter_442, + parameter_443, + parameter_444, + parameter_445, + parameter_446, + parameter_447, + parameter_448, + parameter_449, + parameter_450, + parameter_451, + parameter_452, + parameter_453, + parameter_454, + parameter_455, + parameter_456, + parameter_457, + parameter_458, + parameter_459, + parameter_460, + parameter_461, + parameter_462, + parameter_463, + parameter_464, + parameter_465, + parameter_466, + parameter_467, + parameter_468, + parameter_469, + parameter_470, + parameter_471, + parameter_472, + parameter_473, + parameter_474, + parameter_475, + parameter_476, + parameter_477, + parameter_478, + parameter_479, + parameter_480, + parameter_481, + parameter_482, + parameter_483, + parameter_484, + parameter_485, + parameter_486, + parameter_487, + parameter_488, + parameter_489, + parameter_490, + parameter_491, + parameter_492, + parameter_493, + parameter_494, + parameter_495, + parameter_496, + parameter_497, + parameter_498, + parameter_499, + parameter_500, + parameter_501, + parameter_502, + parameter_503, + parameter_504, + parameter_505, + parameter_506, + parameter_507, + parameter_508, + parameter_509, + parameter_510, + parameter_511, + parameter_512, + parameter_513, + parameter_514, + parameter_515, + parameter_516, + parameter_517, + parameter_518, + parameter_519, + parameter_520, + parameter_521, + parameter_522, + parameter_523, + parameter_524, + parameter_525, + parameter_526, + parameter_527, + parameter_528, + parameter_529, + parameter_530, + parameter_531, + parameter_532, + parameter_533, + parameter_534, + parameter_535, + parameter_536, + parameter_537, + parameter_538, + parameter_539, + parameter_540, + parameter_541, + parameter_542, + parameter_543, + parameter_544, + parameter_545, + parameter_546, + parameter_547, + parameter_548, + parameter_549, + parameter_550, + parameter_551, + parameter_552, + parameter_553, + parameter_554, + parameter_555, + parameter_556, + parameter_557, + parameter_558, + parameter_559, + parameter_560, + parameter_561, + parameter_562, + parameter_563, + parameter_564, + parameter_565, + parameter_566, + parameter_567, + parameter_568, + parameter_569, + parameter_570, + parameter_571, + parameter_572, + parameter_573, + parameter_574, + parameter_575, + parameter_576, + parameter_577, + parameter_578, + parameter_579, + parameter_580, + parameter_581, + parameter_582, + parameter_583, + parameter_584, + parameter_585, + parameter_586, + parameter_587, + parameter_588, + parameter_589, + parameter_590, + parameter_591, + parameter_592, + parameter_593, + parameter_594, + parameter_595, + parameter_596, + parameter_597, + parameter_598, + parameter_599, + parameter_600, + parameter_601, + parameter_602, + parameter_603, + parameter_604, + parameter_605, + parameter_606, + parameter_607, + parameter_608, + parameter_609, + parameter_610, + parameter_611, + parameter_612, + parameter_613, + parameter_614, + parameter_615, + parameter_616, + parameter_617, + parameter_618, + parameter_619, + parameter_620, + parameter_621, + parameter_622, + parameter_623, + parameter_624, + parameter_625, + parameter_626, + parameter_627, + parameter_628, + parameter_629, + parameter_630, + parameter_631, + parameter_632, + parameter_633, + parameter_634, + parameter_635, + parameter_636, + parameter_637, + parameter_638, + parameter_639, + parameter_640, + parameter_641, + parameter_642, + parameter_643, + parameter_644, + parameter_645, + parameter_646, + parameter_647, + parameter_648, + parameter_649, + parameter_650, + parameter_651, + parameter_652, + parameter_653, + parameter_654, + parameter_655, + parameter_656, + parameter_657, + parameter_658, + parameter_659, + parameter_660, + parameter_661, + parameter_662, + parameter_663, + parameter_664, + parameter_665, + parameter_666, + parameter_667, + parameter_668, + parameter_669, + parameter_670, + parameter_671, + parameter_672, + parameter_673, + parameter_674, + parameter_675, + parameter_676, + parameter_677, + parameter_678, + parameter_679, + parameter_680, + parameter_681, + parameter_682, + parameter_683, + parameter_684, + parameter_685, + parameter_686, + parameter_687, + parameter_688, + parameter_689, + parameter_690, + parameter_691, + parameter_692, + parameter_693, + parameter_694, + parameter_695, + parameter_696, + parameter_697, + parameter_698, + parameter_699, + parameter_700, + parameter_701, + parameter_702, + parameter_703, + parameter_704, + parameter_705, + parameter_706, + parameter_707, + parameter_708, + parameter_709, + parameter_710, + parameter_711, + parameter_712, + parameter_713, + parameter_714, + parameter_715, + parameter_716, + parameter_717, + parameter_718, + parameter_719, + parameter_720, + parameter_721, + parameter_722, + parameter_723, + parameter_724, + parameter_725, + parameter_726, + parameter_727, + parameter_728, + parameter_729, + parameter_730, + parameter_731, + parameter_732, + parameter_733, + parameter_734, + parameter_735, + parameter_736, + parameter_737, + data_0, + data_1, + data_2, + data_3, + data_4, + data_5, + data_6, + data_7, + data_8, + data_9, + data_10, + data_11, + data_12, + data_13, + data_14, + data_15, + data_16, + data_17, + data_18, + data_19, + data_20, + data_21, + data_22, + data_23, + data_24, + data_25, + data_26, ): - # pd_op.full_int_array: (1xi64) <- () - full_int_array_0 = [1] + # pd_op.conv2d: (1x32x544x544xf32) <- (1x3x1088x1088xf32, 32x3x3x3xf32) + conv2d_0 = paddle._C_ops.conv2d( + data_26, parameter_737, [2, 2], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del data_26, parameter_737 + + # pd_op.batch_norm_: (1x32x544x544xf32, 32xf32, 32xf32, 32xf32, 32xf32, -1xui8) <- (1x32x544x544xf32, 32xf32, 32xf32, 32xf32, 32xf32) + ( + batch_norm__0, + batch_norm__1, + batch_norm__2, + batch_norm__3, + batch_norm__4, + batch_norm__5, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_0, + parameter_736, + parameter_735, + parameter_734, + parameter_733, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_733, parameter_734, parameter_735, parameter_736 + + # pd_op.swish: (1x32x544x544xf32) <- (1x32x544x544xf32) + swish_1 = paddle._C_ops.swish(batch_norm__0) + + # pd_op.conv2d: (1x32x544x544xf32) <- (1x32x544x544xf32, 32x32x3x3xf32) + conv2d_1 = paddle._C_ops.conv2d( + swish_1, parameter_732, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_732 + + # pd_op.batch_norm_: (1x32x544x544xf32, 32xf32, 32xf32, 32xf32, 32xf32, -1xui8) <- (1x32x544x544xf32, 32xf32, 32xf32, 32xf32, 32xf32) + ( + batch_norm__6, + batch_norm__7, + batch_norm__8, + batch_norm__9, + batch_norm__10, + batch_norm__11, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_1, + parameter_731, + parameter_730, + parameter_729, + parameter_728, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_728, parameter_729, parameter_730, parameter_731 + + # pd_op.swish: (1x32x544x544xf32) <- (1x32x544x544xf32) + swish_2 = paddle._C_ops.swish(batch_norm__6) + + # pd_op.conv2d: (1x64x544x544xf32) <- (1x32x544x544xf32, 64x32x3x3xf32) + conv2d_2 = paddle._C_ops.conv2d( + swish_2, parameter_727, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_727 + + # pd_op.batch_norm_: (1x64x544x544xf32, 64xf32, 64xf32, 64xf32, 64xf32, -1xui8) <- (1x64x544x544xf32, 64xf32, 64xf32, 64xf32, 64xf32) + ( + batch_norm__12, + batch_norm__13, + batch_norm__14, + batch_norm__15, + batch_norm__16, + batch_norm__17, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_2, + parameter_726, + parameter_725, + parameter_724, + parameter_723, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_723, parameter_724, parameter_725, parameter_726 + + # pd_op.swish: (1x64x544x544xf32) <- (1x64x544x544xf32) + swish_3 = paddle._C_ops.swish(batch_norm__12) + + # pd_op.conv2d: (1x96x272x272xf32) <- (1x64x544x544xf32, 96x64x3x3xf32) + conv2d_3 = paddle._C_ops.conv2d( + swish_3, parameter_722, [2, 2], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_722 + + # pd_op.batch_norm_: (1x96x272x272xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (1x96x272x272xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__18, + batch_norm__19, + batch_norm__20, + batch_norm__21, + batch_norm__22, + batch_norm__23, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_3, + parameter_721, + parameter_720, + parameter_719, + parameter_718, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_718, parameter_719, parameter_720, parameter_721 + + # pd_op.swish: (1x96x272x272xf32) <- (1x96x272x272xf32) + swish_4 = paddle._C_ops.swish(batch_norm__18) + + # pd_op.conv2d: (1x48x272x272xf32) <- (1x96x272x272xf32, 48x96x1x1xf32) + conv2d_4 = paddle._C_ops.conv2d( + swish_4, parameter_717, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_717 + + # pd_op.batch_norm_: (1x48x272x272xf32, 48xf32, 48xf32, 48xf32, 48xf32, -1xui8) <- (1x48x272x272xf32, 48xf32, 48xf32, 48xf32, 48xf32) + ( + batch_norm__24, + batch_norm__25, + batch_norm__26, + batch_norm__27, + batch_norm__28, + batch_norm__29, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_4, + parameter_716, + parameter_715, + parameter_714, + parameter_713, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_713, parameter_714, parameter_715, parameter_716 + + # pd_op.swish: (1x48x272x272xf32) <- (1x48x272x272xf32) + swish_5 = paddle._C_ops.swish(batch_norm__24) + + # pd_op.conv2d: (1x48x272x272xf32) <- (1x96x272x272xf32, 48x96x1x1xf32) + conv2d_5 = paddle._C_ops.conv2d( + swish_4, parameter_712, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_712 + + # pd_op.batch_norm_: (1x48x272x272xf32, 48xf32, 48xf32, 48xf32, 48xf32, -1xui8) <- (1x48x272x272xf32, 48xf32, 48xf32, 48xf32, 48xf32) + ( + batch_norm__30, + batch_norm__31, + batch_norm__32, + batch_norm__33, + batch_norm__34, + batch_norm__35, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_5, + parameter_711, + parameter_710, + parameter_709, + parameter_708, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_708, parameter_709, parameter_710, parameter_711 + + # pd_op.swish: (1x48x272x272xf32) <- (1x48x272x272xf32) + swish_6 = paddle._C_ops.swish(batch_norm__30) + + # pd_op.conv2d: (1x48x272x272xf32) <- (1x48x272x272xf32, 48x48x3x3xf32) + conv2d_6 = paddle._C_ops.conv2d( + swish_6, parameter_707, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_707 + + # pd_op.batch_norm_: (1x48x272x272xf32, 48xf32, 48xf32, 48xf32, 48xf32, -1xui8) <- (1x48x272x272xf32, 48xf32, 48xf32, 48xf32, 48xf32) + ( + batch_norm__36, + batch_norm__37, + batch_norm__38, + batch_norm__39, + batch_norm__40, + batch_norm__41, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_6, + parameter_706, + parameter_705, + parameter_704, + parameter_703, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_703, parameter_704, parameter_705, parameter_706 + + # pd_op.swish: (1x48x272x272xf32) <- (1x48x272x272xf32) + swish_7 = paddle._C_ops.swish(batch_norm__36) + + # pd_op.conv2d: (1x48x272x272xf32) <- (1x48x272x272xf32, 48x48x3x3xf32) + conv2d_7 = paddle._C_ops.conv2d( + swish_7, parameter_702, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_702 + + # pd_op.batch_norm_: (1x48x272x272xf32, 48xf32, 48xf32, 48xf32, 48xf32, -1xui8) <- (1x48x272x272xf32, 48xf32, 48xf32, 48xf32, 48xf32) + ( + batch_norm__42, + batch_norm__43, + batch_norm__44, + batch_norm__45, + batch_norm__46, + batch_norm__47, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_7, + parameter_701, + parameter_700, + parameter_699, + parameter_698, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_698, parameter_699, parameter_700, parameter_701 + + # pd_op.conv2d: (1x48x272x272xf32) <- (1x48x272x272xf32, 48x48x1x1xf32) + conv2d_8 = paddle._C_ops.conv2d( + swish_7, parameter_697, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_697 + + # pd_op.batch_norm_: (1x48x272x272xf32, 48xf32, 48xf32, 48xf32, 48xf32, -1xui8) <- (1x48x272x272xf32, 48xf32, 48xf32, 48xf32, 48xf32) + ( + batch_norm__48, + batch_norm__49, + batch_norm__50, + batch_norm__51, + batch_norm__52, + batch_norm__53, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_8, + parameter_696, + parameter_695, + parameter_694, + parameter_693, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_693, parameter_694, parameter_695, parameter_696 + + # pd_op.multiply: (1x48x272x272xf32) <- (1xf32, 1x48x272x272xf32) + multiply_0 = paddle._C_ops.multiply(data_0, batch_norm__48) + del data_0 + + # pd_op.add: (1x48x272x272xf32) <- (1x48x272x272xf32, 1x48x272x272xf32) + add_0 = paddle._C_ops.add(batch_norm__42, multiply_0) + + # pd_op.swish: (1x48x272x272xf32) <- (1x48x272x272xf32) + swish_8 = paddle._C_ops.swish(add_0) + + # pd_op.add: (1x48x272x272xf32) <- (1x48x272x272xf32, 1x48x272x272xf32) + add_1 = paddle._C_ops.add(swish_6, swish_8) + + # pd_op.conv2d: (1x48x272x272xf32) <- (1x48x272x272xf32, 48x48x3x3xf32) + conv2d_9 = paddle._C_ops.conv2d( + add_1, parameter_692, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_692 + + # pd_op.batch_norm_: (1x48x272x272xf32, 48xf32, 48xf32, 48xf32, 48xf32, -1xui8) <- (1x48x272x272xf32, 48xf32, 48xf32, 48xf32, 48xf32) + ( + batch_norm__54, + batch_norm__55, + batch_norm__56, + batch_norm__57, + batch_norm__58, + batch_norm__59, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_9, + parameter_691, + parameter_690, + parameter_689, + parameter_688, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_688, parameter_689, parameter_690, parameter_691 + + # pd_op.swish: (1x48x272x272xf32) <- (1x48x272x272xf32) + swish_9 = paddle._C_ops.swish(batch_norm__54) + + # pd_op.conv2d: (1x48x272x272xf32) <- (1x48x272x272xf32, 48x48x3x3xf32) + conv2d_10 = paddle._C_ops.conv2d( + swish_9, parameter_687, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_687 + + # pd_op.batch_norm_: (1x48x272x272xf32, 48xf32, 48xf32, 48xf32, 48xf32, -1xui8) <- (1x48x272x272xf32, 48xf32, 48xf32, 48xf32, 48xf32) + ( + batch_norm__60, + batch_norm__61, + batch_norm__62, + batch_norm__63, + batch_norm__64, + batch_norm__65, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_10, + parameter_686, + parameter_685, + parameter_684, + parameter_683, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_683, parameter_684, parameter_685, parameter_686 + + # pd_op.conv2d: (1x48x272x272xf32) <- (1x48x272x272xf32, 48x48x1x1xf32) + conv2d_11 = paddle._C_ops.conv2d( + swish_9, parameter_682, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_682 + + # pd_op.batch_norm_: (1x48x272x272xf32, 48xf32, 48xf32, 48xf32, 48xf32, -1xui8) <- (1x48x272x272xf32, 48xf32, 48xf32, 48xf32, 48xf32) + ( + batch_norm__66, + batch_norm__67, + batch_norm__68, + batch_norm__69, + batch_norm__70, + batch_norm__71, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_11, + parameter_681, + parameter_680, + parameter_679, + parameter_678, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_678, parameter_679, parameter_680, parameter_681 + + # pd_op.multiply: (1x48x272x272xf32) <- (1xf32, 1x48x272x272xf32) + multiply_1 = paddle._C_ops.multiply(data_1, batch_norm__66) + del data_1 + + # pd_op.add: (1x48x272x272xf32) <- (1x48x272x272xf32, 1x48x272x272xf32) + add_2 = paddle._C_ops.add(batch_norm__60, multiply_1) + + # pd_op.swish: (1x48x272x272xf32) <- (1x48x272x272xf32) + swish_10 = paddle._C_ops.swish(add_2) - # pd_op.unsqueeze: (1x1x-1xf32) <- (1x-1xf32, 1xi64) - unsqueeze_0 = paddle._C_ops.unsqueeze(data_2, full_int_array_0) + # pd_op.add: (1x48x272x272xf32) <- (1x48x272x272xf32, 1x48x272x272xf32) + add_3 = paddle._C_ops.add(add_1, swish_10) + + # pd_op.conv2d: (1x48x272x272xf32) <- (1x48x272x272xf32, 48x48x3x3xf32) + conv2d_12 = paddle._C_ops.conv2d( + add_3, parameter_677, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_677 + + # pd_op.batch_norm_: (1x48x272x272xf32, 48xf32, 48xf32, 48xf32, 48xf32, -1xui8) <- (1x48x272x272xf32, 48xf32, 48xf32, 48xf32, 48xf32) + ( + batch_norm__72, + batch_norm__73, + batch_norm__74, + batch_norm__75, + batch_norm__76, + batch_norm__77, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_12, + parameter_676, + parameter_675, + parameter_674, + parameter_673, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_673, parameter_674, parameter_675, parameter_676 + + # pd_op.swish: (1x48x272x272xf32) <- (1x48x272x272xf32) + swish_11 = paddle._C_ops.swish(batch_norm__72) + + # pd_op.conv2d: (1x48x272x272xf32) <- (1x48x272x272xf32, 48x48x3x3xf32) + conv2d_13 = paddle._C_ops.conv2d( + swish_11, parameter_672, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_672 + + # pd_op.batch_norm_: (1x48x272x272xf32, 48xf32, 48xf32, 48xf32, 48xf32, -1xui8) <- (1x48x272x272xf32, 48xf32, 48xf32, 48xf32, 48xf32) + ( + batch_norm__78, + batch_norm__79, + batch_norm__80, + batch_norm__81, + batch_norm__82, + batch_norm__83, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_13, + parameter_671, + parameter_670, + parameter_669, + parameter_668, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_668, parameter_669, parameter_670, parameter_671 + + # pd_op.conv2d: (1x48x272x272xf32) <- (1x48x272x272xf32, 48x48x1x1xf32) + conv2d_14 = paddle._C_ops.conv2d( + swish_11, parameter_667, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_667 + + # pd_op.batch_norm_: (1x48x272x272xf32, 48xf32, 48xf32, 48xf32, 48xf32, -1xui8) <- (1x48x272x272xf32, 48xf32, 48xf32, 48xf32, 48xf32) + ( + batch_norm__84, + batch_norm__85, + batch_norm__86, + batch_norm__87, + batch_norm__88, + batch_norm__89, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_14, + parameter_666, + parameter_665, + parameter_664, + parameter_663, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_663, parameter_664, parameter_665, parameter_666 + + # pd_op.multiply: (1x48x272x272xf32) <- (1xf32, 1x48x272x272xf32) + multiply_2 = paddle._C_ops.multiply(data_2, batch_norm__84) del data_2 - # pd_op.full: (xf32) <- () + # pd_op.add: (1x48x272x272xf32) <- (1x48x272x272xf32, 1x48x272x272xf32) + add_4 = paddle._C_ops.add(batch_norm__78, multiply_2) + + # pd_op.swish: (1x48x272x272xf32) <- (1x48x272x272xf32) + swish_12 = paddle._C_ops.swish(add_4) + + # pd_op.add: (1x48x272x272xf32) <- (1x48x272x272xf32, 1x48x272x272xf32) + add_5 = paddle._C_ops.add(add_3, swish_12) + + # pd_op.full: (1xi32) <- () full_0 = paddle._C_ops.full( - [], float("1"), paddle.float32, paddle.framework._current_expected_place() + [1], float("1"), paddle.int32, paddle.core.CPUPlace() ) - # pd_op.greater_than: (1x1x-1xb) <- (1x1x-1xf32, xf32) - greater_than_0 = paddle._C_ops.greater_than(unsqueeze_0, full_0) - del full_0, unsqueeze_0 + # pd_op.assign: (1xi32) <- (1xi32) + assign_0 = full_0 - # pd_op.full: (xi64) <- () - full_1 = paddle._C_ops.full( - [], float("1"), paddle.int64, paddle.core.CPUPlace() - ) + # pd_op.assign: (1xi32) <- (1xi32) + assign_1 = full_0 + + # pd_op.assign: (1xi32) <- (1xi32) + assign_2 = full_0 + + # pd_op.assign: (1xi32) <- (1xi32) + assign_3 = full_0 + + # pd_op.assign: (1xi32) <- (1xi32) + assign_4 = full_0 + + # pd_op.assign: (1xi32) <- (1xi32) + assign_5 = full_0 - # builtin.combine: ([xi64, xi64, xi64]) <- (xi64, xi64, xi64) - combine_0 = [full_1, data_0, full_1] + # pd_op.assign: (1xi32) <- (1xi32) + assign_6 = full_0 - # pd_op.stack: (3xi64) <- ([xi64, xi64, xi64]) - stack_0 = paddle._C_ops.stack(combine_0, 0) + # pd_op.assign: (1xi32) <- (1xi32) + assign_7 = full_0 + + # pd_op.assign: (1xi32) <- (1xi32) + assign_8 = full_0 + + # pd_op.assign: (1xi32) <- (1xi32) + assign_9 = full_0 + + # pd_op.assign: (1xi32) <- (1xi32) + assign_10 = full_0 + + # pd_op.assign: (1xi32) <- (1xi32) + assign_11 = full_0 + + # pd_op.assign: (1xi32) <- (1xi32) + assign_12 = full_0 + + # builtin.combine: ([1x48x272x272xf32, 1x48x272x272xf32]) <- (1x48x272x272xf32, 1x48x272x272xf32) + combine_0 = [swish_5, add_5] + + # pd_op.concat: (1x96x272x272xf32) <- ([1x48x272x272xf32, 1x48x272x272xf32], 1xi32) + concat_0 = paddle._C_ops.concat(combine_0, full_0) del combine_0 - # pd_op.tile: (1x-1x-1xb) <- (1x1x-1xb, 3xi64) - tile_0 = paddle._C_ops.tile(greater_than_0, stack_0) - del greater_than_0, stack_0 + # pd_op.full_int_array: (2xi64) <- () + full_int_array_0 = [2, 3] + + # pd_op.assign: (2xi64) <- (2xi64) + assign_13 = full_int_array_0 - # pd_op.multiply: (1x-1x-1xf32) <- (1x-1x-1xf32, 1x-1x-1xf32) - multiply_1 = paddle._C_ops.multiply(data_3, data_4) + # pd_op.assign: (2xi64) <- (2xi64) + assign_14 = full_int_array_0 - # pd_op.shape64: (3xi64) <- (1x-1x-1xf32) - shape64_0 = paddle._C_ops.shape64(multiply_1) + # pd_op.assign: (2xi64) <- (2xi64) + assign_15 = full_int_array_0 - # pd_op.full_int_array: (1xi64) <- () - full_int_array_1 = [2] + # pd_op.mean: (1x96x1x1xf32) <- (1x96x272x272xf32, 2xi64) + mean_0 = paddle._C_ops.mean(concat_0, full_int_array_0, True) - # pd_op.slice: (xi64) <- (3xi64, 1xi64, 1xi64) - slice_0 = paddle._C_ops.slice( - shape64_0, [0], full_int_array_0, full_int_array_1, [1], [0] + # pd_op.conv2d: (1x96x1x1xf32) <- (1x96x1x1xf32, 96x96x1x1xf32) + conv2d_15 = paddle._C_ops.conv2d( + mean_0, parameter_662, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" ) - del full_int_array_0 + del parameter_662 - # pd_op.full_int_array: (1xi64) <- () - full_int_array_2 = [3] + # pd_op.full_int_array: (4xi64) <- () + full_int_array_1 = [1, -1, 1, 1] - # pd_op.slice: (xi64) <- (3xi64, 1xi64, 1xi64) - slice_1 = paddle._C_ops.slice( - shape64_0, [0], full_int_array_1, full_int_array_2, [1], [0] + # pd_op.reshape: (1x96x1x1xf32) <- (96xf32, 4xi64) + reshape_0 = paddle._C_ops.reshape(parameter_661, full_int_array_1) + del parameter_661 + + # pd_op.add: (1x96x1x1xf32) <- (1x96x1x1xf32, 1x96x1x1xf32) + add_6 = paddle._C_ops.add(conv2d_15, reshape_0) + + # pd_op.hardsigmoid: (1x96x1x1xf32) <- (1x96x1x1xf32) + hardsigmoid_0 = paddle._C_ops.hardsigmoid( + add_6, float("0.166667"), float("0.5") ) - del full_int_array_1, full_int_array_2, shape64_0 + del add_6 - # pd_op.full: (1xi64) <- () - full_2 = paddle._C_ops.full( - [1], float("-2"), paddle.int64, paddle.core.CPUPlace() + # pd_op.multiply: (1x96x272x272xf32) <- (1x96x272x272xf32, 1x96x1x1xf32) + multiply_3 = paddle._C_ops.multiply(concat_0, hardsigmoid_0) + + # pd_op.conv2d: (1x128x272x272xf32) <- (1x96x272x272xf32, 128x96x1x1xf32) + conv2d_16 = paddle._C_ops.conv2d( + multiply_3, parameter_660, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_660 + + # pd_op.batch_norm_: (1x128x272x272xf32, 128xf32, 128xf32, 128xf32, 128xf32, -1xui8) <- (1x128x272x272xf32, 128xf32, 128xf32, 128xf32, 128xf32) + ( + batch_norm__90, + batch_norm__91, + batch_norm__92, + batch_norm__93, + batch_norm__94, + batch_norm__95, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_16, + parameter_659, + parameter_658, + parameter_657, + parameter_656, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), ) + del parameter_656, parameter_657, parameter_658, parameter_659 - # pd_op.argmax: (1x-1xi64) <- (1x-1x-1xf32, 1xi64) - argmax_0 = paddle._C_ops.argmax(multiply_1, full_2, False, False, paddle.int64) - del multiply_1 + # pd_op.swish: (1x128x272x272xf32) <- (1x128x272x272xf32) + swish_13 = paddle._C_ops.swish(batch_norm__90) - # pd_op.one_hot: (1x-1x-1xf32) <- (1x-1xi64, xi64) - one_hot_0 = paddle._C_ops.one_hot( - argmax_0 % paddle.cast(slice_0, argmax_0.dtype), slice_0 + # pd_op.conv2d: (1x192x136x136xf32) <- (1x128x272x272xf32, 192x128x3x3xf32) + conv2d_17 = paddle._C_ops.conv2d( + swish_13, parameter_655, [2, 2], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" ) - del argmax_0, slice_0 + del parameter_655 - # pd_op.transpose: (1x-1x-1xf32) <- (1x-1x-1xf32) - transpose_0 = paddle._C_ops.transpose(one_hot_0, [0, 2, 1]) - del one_hot_0 + # pd_op.batch_norm_: (1x192x136x136xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (1x192x136x136xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__96, + batch_norm__97, + batch_norm__98, + batch_norm__99, + batch_norm__100, + batch_norm__101, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_17, + parameter_654, + parameter_653, + parameter_652, + parameter_651, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_651, parameter_652, parameter_653, parameter_654 - # pd_op.where: (1x-1x-1xf32) <- (1x-1x-1xb, 1x-1x-1xf32, 1x-1x-1xf32) - where_0 = paddle._C_ops.where(tile_0, transpose_0, data_4) - del data_4, tile_0, transpose_0 + # pd_op.swish: (1x192x136x136xf32) <- (1x192x136x136xf32) + swish_14 = paddle._C_ops.swish(batch_norm__96) - # pd_op.full_int_array: (1xi64) <- () - full_int_array_3 = [-2] + # pd_op.conv2d: (1x96x136x136xf32) <- (1x192x136x136xf32, 96x192x1x1xf32) + conv2d_18 = paddle._C_ops.conv2d( + swish_14, parameter_650, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_650 - # pd_op.sum: (1x-1xf32) <- (1x-1x-1xf32, 1xi64) - sum_0 = paddle._C_ops.sum(where_0, full_int_array_3, None, False) + # pd_op.batch_norm_: (1x96x136x136xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (1x96x136x136xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__102, + batch_norm__103, + batch_norm__104, + batch_norm__105, + batch_norm__106, + batch_norm__107, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_18, + parameter_649, + parameter_648, + parameter_647, + parameter_646, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_646, parameter_647, parameter_648, parameter_649 - # pd_op.argmax: (1x-1xi64) <- (1x-1x-1xf32, 1xi64) - argmax_1 = paddle._C_ops.argmax(where_0, full_2, False, False, paddle.int64) - del full_2 + # pd_op.swish: (1x96x136x136xf32) <- (1x96x136x136xf32) + swish_15 = paddle._C_ops.swish(batch_norm__102) - # pd_op.cast: (xi32) <- (xi64) - cast_0 = paddle._C_ops.cast(data_0, paddle.int32) - del data_0 + # pd_op.conv2d: (1x96x136x136xf32) <- (1x192x136x136xf32, 96x192x1x1xf32) + conv2d_19 = paddle._C_ops.conv2d( + swish_14, parameter_645, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_645 + + # pd_op.batch_norm_: (1x96x136x136xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (1x96x136x136xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__108, + batch_norm__109, + batch_norm__110, + batch_norm__111, + batch_norm__112, + batch_norm__113, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_19, + parameter_644, + parameter_643, + parameter_642, + parameter_641, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_641, parameter_642, parameter_643, parameter_644 + + # pd_op.swish: (1x96x136x136xf32) <- (1x96x136x136xf32) + swish_16 = paddle._C_ops.swish(batch_norm__108) + + # pd_op.conv2d: (1x96x136x136xf32) <- (1x96x136x136xf32, 96x96x3x3xf32) + conv2d_20 = paddle._C_ops.conv2d( + swish_16, parameter_640, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_640 + + # pd_op.batch_norm_: (1x96x136x136xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (1x96x136x136xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__114, + batch_norm__115, + batch_norm__116, + batch_norm__117, + batch_norm__118, + batch_norm__119, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_20, + parameter_639, + parameter_638, + parameter_637, + parameter_636, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_636, parameter_637, parameter_638, parameter_639 + + # pd_op.swish: (1x96x136x136xf32) <- (1x96x136x136xf32) + swish_17 = paddle._C_ops.swish(batch_norm__114) + + # pd_op.conv2d: (1x96x136x136xf32) <- (1x96x136x136xf32, 96x96x3x3xf32) + conv2d_21 = paddle._C_ops.conv2d( + swish_17, parameter_635, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_635 + + # pd_op.batch_norm_: (1x96x136x136xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (1x96x136x136xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__120, + batch_norm__121, + batch_norm__122, + batch_norm__123, + batch_norm__124, + batch_norm__125, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_21, + parameter_634, + parameter_633, + parameter_632, + parameter_631, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_631, parameter_632, parameter_633, parameter_634 + + # pd_op.conv2d: (1x96x136x136xf32) <- (1x96x136x136xf32, 96x96x1x1xf32) + conv2d_22 = paddle._C_ops.conv2d( + swish_17, parameter_630, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_630 - # pd_op.multiply: (1x1xi32) <- (1x1xi32, xi32) - multiply_2 = paddle._C_ops.multiply(data_5, cast_0) - del cast_0, data_5 + # pd_op.batch_norm_: (1x96x136x136xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (1x96x136x136xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__126, + batch_norm__127, + batch_norm__128, + batch_norm__129, + batch_norm__130, + batch_norm__131, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_22, + parameter_629, + parameter_628, + parameter_627, + parameter_626, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_626, parameter_627, parameter_628, parameter_629 + + # pd_op.multiply: (1x96x136x136xf32) <- (1xf32, 1x96x136x136xf32) + multiply_4 = paddle._C_ops.multiply(data_3, batch_norm__126) + del data_3 + + # pd_op.add: (1x96x136x136xf32) <- (1x96x136x136xf32, 1x96x136x136xf32) + add_7 = paddle._C_ops.add(batch_norm__120, multiply_4) + + # pd_op.swish: (1x96x136x136xf32) <- (1x96x136x136xf32) + swish_18 = paddle._C_ops.swish(add_7) + + # pd_op.add: (1x96x136x136xf32) <- (1x96x136x136xf32, 1x96x136x136xf32) + add_8 = paddle._C_ops.add(swish_16, swish_18) + + # pd_op.conv2d: (1x96x136x136xf32) <- (1x96x136x136xf32, 96x96x3x3xf32) + conv2d_23 = paddle._C_ops.conv2d( + add_8, parameter_625, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_625 + + # pd_op.batch_norm_: (1x96x136x136xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (1x96x136x136xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__132, + batch_norm__133, + batch_norm__134, + batch_norm__135, + batch_norm__136, + batch_norm__137, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_23, + parameter_624, + parameter_623, + parameter_622, + parameter_621, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_621, parameter_622, parameter_623, parameter_624 + + # pd_op.swish: (1x96x136x136xf32) <- (1x96x136x136xf32) + swish_19 = paddle._C_ops.swish(batch_norm__132) + + # pd_op.conv2d: (1x96x136x136xf32) <- (1x96x136x136xf32, 96x96x3x3xf32) + conv2d_24 = paddle._C_ops.conv2d( + swish_19, parameter_620, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_620 + + # pd_op.batch_norm_: (1x96x136x136xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (1x96x136x136xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__138, + batch_norm__139, + batch_norm__140, + batch_norm__141, + batch_norm__142, + batch_norm__143, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_24, + parameter_619, + parameter_618, + parameter_617, + parameter_616, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_616, parameter_617, parameter_618, parameter_619 + + # pd_op.conv2d: (1x96x136x136xf32) <- (1x96x136x136xf32, 96x96x1x1xf32) + conv2d_25 = paddle._C_ops.conv2d( + swish_19, parameter_615, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_615 + + # pd_op.batch_norm_: (1x96x136x136xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (1x96x136x136xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__144, + batch_norm__145, + batch_norm__146, + batch_norm__147, + batch_norm__148, + batch_norm__149, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_25, + parameter_614, + parameter_613, + parameter_612, + parameter_611, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_611, parameter_612, parameter_613, parameter_614 + + # pd_op.multiply: (1x96x136x136xf32) <- (1xf32, 1x96x136x136xf32) + multiply_5 = paddle._C_ops.multiply(data_4, batch_norm__144) + del data_4 + + # pd_op.add: (1x96x136x136xf32) <- (1x96x136x136xf32, 1x96x136x136xf32) + add_9 = paddle._C_ops.add(batch_norm__138, multiply_5) + + # pd_op.swish: (1x96x136x136xf32) <- (1x96x136x136xf32) + swish_20 = paddle._C_ops.swish(add_9) + + # pd_op.add: (1x96x136x136xf32) <- (1x96x136x136xf32, 1x96x136x136xf32) + add_10 = paddle._C_ops.add(add_8, swish_20) + + # pd_op.conv2d: (1x96x136x136xf32) <- (1x96x136x136xf32, 96x96x3x3xf32) + conv2d_26 = paddle._C_ops.conv2d( + add_10, parameter_610, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_610 + + # pd_op.batch_norm_: (1x96x136x136xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (1x96x136x136xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__150, + batch_norm__151, + batch_norm__152, + batch_norm__153, + batch_norm__154, + batch_norm__155, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_26, + parameter_609, + parameter_608, + parameter_607, + parameter_606, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_606, parameter_607, parameter_608, parameter_609 + + # pd_op.swish: (1x96x136x136xf32) <- (1x96x136x136xf32) + swish_21 = paddle._C_ops.swish(batch_norm__150) + + # pd_op.conv2d: (1x96x136x136xf32) <- (1x96x136x136xf32, 96x96x3x3xf32) + conv2d_27 = paddle._C_ops.conv2d( + swish_21, parameter_605, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_605 + + # pd_op.batch_norm_: (1x96x136x136xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (1x96x136x136xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__156, + batch_norm__157, + batch_norm__158, + batch_norm__159, + batch_norm__160, + batch_norm__161, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_27, + parameter_604, + parameter_603, + parameter_602, + parameter_601, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_601, parameter_602, parameter_603, parameter_604 + + # pd_op.conv2d: (1x96x136x136xf32) <- (1x96x136x136xf32, 96x96x1x1xf32) + conv2d_28 = paddle._C_ops.conv2d( + swish_21, parameter_600, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_600 + + # pd_op.batch_norm_: (1x96x136x136xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (1x96x136x136xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__162, + batch_norm__163, + batch_norm__164, + batch_norm__165, + batch_norm__166, + batch_norm__167, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_28, + parameter_599, + parameter_598, + parameter_597, + parameter_596, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_596, parameter_597, parameter_598, parameter_599 + + # pd_op.multiply: (1x96x136x136xf32) <- (1xf32, 1x96x136x136xf32) + multiply_6 = paddle._C_ops.multiply(data_5, batch_norm__162) + del data_5 + + # pd_op.add: (1x96x136x136xf32) <- (1x96x136x136xf32, 1x96x136x136xf32) + add_11 = paddle._C_ops.add(batch_norm__156, multiply_6) + + # pd_op.swish: (1x96x136x136xf32) <- (1x96x136x136xf32) + swish_22 = paddle._C_ops.swish(add_11) + + # pd_op.add: (1x96x136x136xf32) <- (1x96x136x136xf32, 1x96x136x136xf32) + add_12 = paddle._C_ops.add(add_10, swish_22) + + # pd_op.conv2d: (1x96x136x136xf32) <- (1x96x136x136xf32, 96x96x3x3xf32) + conv2d_29 = paddle._C_ops.conv2d( + add_12, parameter_595, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_595 + + # pd_op.batch_norm_: (1x96x136x136xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (1x96x136x136xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__168, + batch_norm__169, + batch_norm__170, + batch_norm__171, + batch_norm__172, + batch_norm__173, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_29, + parameter_594, + parameter_593, + parameter_592, + parameter_591, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_591, parameter_592, parameter_593, parameter_594 + + # pd_op.swish: (1x96x136x136xf32) <- (1x96x136x136xf32) + swish_23 = paddle._C_ops.swish(batch_norm__168) + + # pd_op.conv2d: (1x96x136x136xf32) <- (1x96x136x136xf32, 96x96x3x3xf32) + conv2d_30 = paddle._C_ops.conv2d( + swish_23, parameter_590, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_590 + + # pd_op.batch_norm_: (1x96x136x136xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (1x96x136x136xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__174, + batch_norm__175, + batch_norm__176, + batch_norm__177, + batch_norm__178, + batch_norm__179, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_30, + parameter_589, + parameter_588, + parameter_587, + parameter_586, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_586, parameter_587, parameter_588, parameter_589 - # pd_op.cast: (1x1xi64) <- (1x1xi32) - cast_1 = paddle._C_ops.cast(multiply_2, paddle.int64) - del multiply_2 + # pd_op.conv2d: (1x96x136x136xf32) <- (1x96x136x136xf32, 96x96x1x1xf32) + conv2d_31 = paddle._C_ops.conv2d( + swish_23, parameter_585, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_585 - # pd_op.add: (1x-1xi64) <- (1x-1xi64, 1x1xi64) - add_0 = paddle._C_ops.add(argmax_1, cast_1) - del argmax_1, cast_1 + # pd_op.batch_norm_: (1x96x136x136xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (1x96x136x136xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__180, + batch_norm__181, + batch_norm__182, + batch_norm__183, + batch_norm__184, + batch_norm__185, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_31, + parameter_584, + parameter_583, + parameter_582, + parameter_581, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_581, parameter_582, parameter_583, parameter_584 - # pd_op.flatten: (-1xi32) <- (1x-1x1xi32) - flatten_0 = paddle._C_ops.flatten(data_6, 0, 2) + # pd_op.multiply: (1x96x136x136xf32) <- (1xf32, 1x96x136x136xf32) + multiply_7 = paddle._C_ops.multiply(data_6, batch_norm__180) del data_6 - # pd_op.flatten: (-1xi64) <- (1x-1xi64) - flatten_1 = paddle._C_ops.flatten(add_0, 0, 1) - del add_0 + # pd_op.add: (1x96x136x136xf32) <- (1x96x136x136xf32, 1x96x136x136xf32) + add_13 = paddle._C_ops.add(batch_norm__174, multiply_7) - # pd_op.full: (1xi32) <- () - full_3 = paddle._C_ops.full( - [1], float("0"), paddle.int32, paddle.core.CPUPlace() + # pd_op.swish: (1x96x136x136xf32) <- (1x96x136x136xf32) + swish_24 = paddle._C_ops.swish(add_13) + + # pd_op.add: (1x96x136x136xf32) <- (1x96x136x136xf32, 1x96x136x136xf32) + add_14 = paddle._C_ops.add(add_12, swish_24) + + # pd_op.conv2d: (1x96x136x136xf32) <- (1x96x136x136xf32, 96x96x3x3xf32) + conv2d_32 = paddle._C_ops.conv2d( + add_14, parameter_580, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" ) + del parameter_580 - # pd_op.gather: (-1xi32) <- (-1xi32, -1xi64, 1xi32) - gather_0 = paddle._C_ops.gather(flatten_0, flatten_1, full_3) - del flatten_0 + # pd_op.batch_norm_: (1x96x136x136xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (1x96x136x136xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__186, + batch_norm__187, + batch_norm__188, + batch_norm__189, + batch_norm__190, + batch_norm__191, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_32, + parameter_579, + parameter_578, + parameter_577, + parameter_576, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_576, parameter_577, parameter_578, parameter_579 + + # pd_op.swish: (1x96x136x136xf32) <- (1x96x136x136xf32) + swish_25 = paddle._C_ops.swish(batch_norm__186) + + # pd_op.conv2d: (1x96x136x136xf32) <- (1x96x136x136xf32, 96x96x3x3xf32) + conv2d_33 = paddle._C_ops.conv2d( + swish_25, parameter_575, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_575 + + # pd_op.batch_norm_: (1x96x136x136xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (1x96x136x136xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__192, + batch_norm__193, + batch_norm__194, + batch_norm__195, + batch_norm__196, + batch_norm__197, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_33, + parameter_574, + parameter_573, + parameter_572, + parameter_571, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_571, parameter_572, parameter_573, parameter_574 + + # pd_op.conv2d: (1x96x136x136xf32) <- (1x96x136x136xf32, 96x96x1x1xf32) + conv2d_34 = paddle._C_ops.conv2d( + swish_25, parameter_570, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_570 + + # pd_op.batch_norm_: (1x96x136x136xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (1x96x136x136xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__198, + batch_norm__199, + batch_norm__200, + batch_norm__201, + batch_norm__202, + batch_norm__203, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_34, + parameter_569, + parameter_568, + parameter_567, + parameter_566, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_566, parameter_567, parameter_568, parameter_569 + + # pd_op.multiply: (1x96x136x136xf32) <- (1xf32, 1x96x136x136xf32) + multiply_8 = paddle._C_ops.multiply(data_7, batch_norm__198) + del data_7 + + # pd_op.add: (1x96x136x136xf32) <- (1x96x136x136xf32, 1x96x136x136xf32) + add_15 = paddle._C_ops.add(batch_norm__192, multiply_8) + + # pd_op.swish: (1x96x136x136xf32) <- (1x96x136x136xf32) + swish_26 = paddle._C_ops.swish(add_15) + + # pd_op.add: (1x96x136x136xf32) <- (1x96x136x136xf32, 1x96x136x136xf32) + add_16 = paddle._C_ops.add(add_14, swish_26) + + # pd_op.conv2d: (1x96x136x136xf32) <- (1x96x136x136xf32, 96x96x3x3xf32) + conv2d_35 = paddle._C_ops.conv2d( + add_16, parameter_565, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_565 + + # pd_op.batch_norm_: (1x96x136x136xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (1x96x136x136xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__204, + batch_norm__205, + batch_norm__206, + batch_norm__207, + batch_norm__208, + batch_norm__209, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_35, + parameter_564, + parameter_563, + parameter_562, + parameter_561, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_561, parameter_562, parameter_563, parameter_564 + + # pd_op.swish: (1x96x136x136xf32) <- (1x96x136x136xf32) + swish_27 = paddle._C_ops.swish(batch_norm__204) + + # pd_op.conv2d: (1x96x136x136xf32) <- (1x96x136x136xf32, 96x96x3x3xf32) + conv2d_36 = paddle._C_ops.conv2d( + swish_27, parameter_560, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_560 + + # pd_op.batch_norm_: (1x96x136x136xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (1x96x136x136xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__210, + batch_norm__211, + batch_norm__212, + batch_norm__213, + batch_norm__214, + batch_norm__215, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_36, + parameter_559, + parameter_558, + parameter_557, + parameter_556, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_556, parameter_557, parameter_558, parameter_559 + + # pd_op.conv2d: (1x96x136x136xf32) <- (1x96x136x136xf32, 96x96x1x1xf32) + conv2d_37 = paddle._C_ops.conv2d( + swish_27, parameter_555, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_555 - # builtin.combine: ([xi64, xi64]) <- (xi64, xi64) - combine_1 = [full_1, data_1] + # pd_op.batch_norm_: (1x96x136x136xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (1x96x136x136xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__216, + batch_norm__217, + batch_norm__218, + batch_norm__219, + batch_norm__220, + batch_norm__221, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_37, + parameter_554, + parameter_553, + parameter_552, + parameter_551, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_551, parameter_552, parameter_553, parameter_554 + + # pd_op.multiply: (1x96x136x136xf32) <- (1xf32, 1x96x136x136xf32) + multiply_9 = paddle._C_ops.multiply(data_8, batch_norm__216) + del data_8 + + # pd_op.add: (1x96x136x136xf32) <- (1x96x136x136xf32, 1x96x136x136xf32) + add_17 = paddle._C_ops.add(batch_norm__210, multiply_9) - # pd_op.stack: (2xi64) <- ([xi64, xi64]) - stack_1 = paddle._C_ops.stack(combine_1, 0) + # pd_op.swish: (1x96x136x136xf32) <- (1x96x136x136xf32) + swish_28 = paddle._C_ops.swish(add_17) + + # pd_op.add: (1x96x136x136xf32) <- (1x96x136x136xf32, 1x96x136x136xf32) + add_18 = paddle._C_ops.add(add_16, swish_28) + + # builtin.combine: ([1x96x136x136xf32, 1x96x136x136xf32]) <- (1x96x136x136xf32, 1x96x136x136xf32) + combine_1 = [swish_15, add_18] + + # pd_op.concat: (1x192x136x136xf32) <- ([1x96x136x136xf32, 1x96x136x136xf32], 1xi32) + concat_1 = paddle._C_ops.concat(combine_1, full_0) del combine_1 - # pd_op.reshape: (1x-1xi32) <- (-1xi32, 2xi64) - reshape_1 = paddle._C_ops.reshape(gather_0, stack_1) - del gather_0, stack_1 + # pd_op.mean: (1x192x1x1xf32) <- (1x192x136x136xf32, 2xi64) + mean_1 = paddle._C_ops.mean(concat_1, full_int_array_0, True) - # pd_op.full: (xf32) <- () - full_4 = paddle._C_ops.full( - [], float("0"), paddle.float32, paddle.framework._current_expected_place() + # pd_op.conv2d: (1x192x1x1xf32) <- (1x192x1x1xf32, 192x192x1x1xf32) + conv2d_38 = paddle._C_ops.conv2d( + mean_1, parameter_550, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" ) + del parameter_550 - # pd_op.greater_than: (1x-1xb) <- (1x-1xf32, xf32) - greater_than_1 = paddle._C_ops.greater_than(sum_0, full_4) - del full_4, sum_0 + # pd_op.reshape: (1x192x1x1xf32) <- (192xf32, 4xi64) + reshape_1 = paddle._C_ops.reshape(parameter_549, full_int_array_1) + del parameter_549 - # pd_op.full: (1xf32) <- () - full_5 = paddle._C_ops.full( - [1], float("10"), paddle.float32, paddle.core.CPUPlace() + # pd_op.add: (1x192x1x1xf32) <- (1x192x1x1xf32, 1x192x1x1xf32) + add_19 = paddle._C_ops.add(conv2d_38, reshape_1) + + # pd_op.hardsigmoid: (1x192x1x1xf32) <- (1x192x1x1xf32) + hardsigmoid_1 = paddle._C_ops.hardsigmoid( + add_19, float("0.166667"), float("0.5") + ) + del add_19 + + # pd_op.multiply: (1x192x136x136xf32) <- (1x192x136x136xf32, 1x192x1x1xf32) + multiply_10 = paddle._C_ops.multiply(concat_1, hardsigmoid_1) + + # pd_op.conv2d: (1x256x136x136xf32) <- (1x192x136x136xf32, 256x192x1x1xf32) + conv2d_39 = paddle._C_ops.conv2d( + multiply_10, parameter_548, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" ) + del parameter_548 - # pd_op.full_like: (1x-1xi32) <- (1x-1xi32, 1xf32) - full_like_0 = paddle._C_ops.full_like( - reshape_1, full_5, paddle.int32, paddle.framework._current_expected_place() + # pd_op.batch_norm_: (1x256x136x136xf32, 256xf32, 256xf32, 256xf32, 256xf32, -1xui8) <- (1x256x136x136xf32, 256xf32, 256xf32, 256xf32, 256xf32) + ( + batch_norm__222, + batch_norm__223, + batch_norm__224, + batch_norm__225, + batch_norm__226, + batch_norm__227, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_39, + parameter_547, + parameter_546, + parameter_545, + parameter_544, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), ) - del full_5 + del parameter_544, parameter_545, parameter_546, parameter_547 - # pd_op.where: (1x-1xi32) <- (1x-1xb, 1x-1xi32, 1x-1xi32) - where_1 = paddle._C_ops.where(greater_than_1, reshape_1, full_like_0) - del full_like_0, greater_than_1, reshape_1 + # pd_op.swish: (1x256x136x136xf32) <- (1x256x136x136xf32) + swish_29 = paddle._C_ops.swish(batch_norm__222) - # pd_op.full_int_array: (2xi64) <- () - full_int_array_4 = [-1, 4] + # pd_op.conv2d: (1x384x68x68xf32) <- (1x256x136x136xf32, 384x256x3x3xf32) + conv2d_40 = paddle._C_ops.conv2d( + swish_29, parameter_543, [2, 2], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_543 - # pd_op.reshape: (-1x4xf32) <- (1x-1x4xf32, 2xi64) - reshape_2 = paddle._C_ops.reshape(data_7, full_int_array_4) - del data_7, full_int_array_4 + # pd_op.batch_norm_: (1x384x68x68xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (1x384x68x68xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__228, + batch_norm__229, + batch_norm__230, + batch_norm__231, + batch_norm__232, + batch_norm__233, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_40, + parameter_542, + parameter_541, + parameter_540, + parameter_539, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_539, parameter_540, parameter_541, parameter_542 - # pd_op.gather: (-1x4xf32) <- (-1x4xf32, -1xi64, 1xi32) - gather_1 = paddle._C_ops.gather(reshape_2, flatten_1, full_3) - del flatten_1, full_3, reshape_2 + # pd_op.swish: (1x384x68x68xf32) <- (1x384x68x68xf32) + swish_30 = paddle._C_ops.swish(batch_norm__228) - # pd_op.full: (xi64) <- () - full_6 = paddle._C_ops.full( - [], float("4"), paddle.int64, paddle.core.CPUPlace() + # pd_op.conv2d: (1x192x68x68xf32) <- (1x384x68x68xf32, 192x384x1x1xf32) + conv2d_41 = paddle._C_ops.conv2d( + swish_30, parameter_538, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" ) + del parameter_538 - # builtin.combine: ([xi64, xi64, xi64]) <- (xi64, xi64, xi64) - combine_2 = [full_1, data_1, full_6] - del data_1, full_1, full_6 + # pd_op.batch_norm_: (1x192x68x68xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (1x192x68x68xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__234, + batch_norm__235, + batch_norm__236, + batch_norm__237, + batch_norm__238, + batch_norm__239, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_41, + parameter_537, + parameter_536, + parameter_535, + parameter_534, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_534, parameter_535, parameter_536, parameter_537 - # pd_op.stack: (3xi64) <- ([xi64, xi64, xi64]) - stack_2 = paddle._C_ops.stack(combine_2, 0) - del combine_2 + # pd_op.swish: (1x192x68x68xf32) <- (1x192x68x68xf32) + swish_31 = paddle._C_ops.swish(batch_norm__234) - # pd_op.reshape: (1x-1x4xf32) <- (-1x4xf32, 3xi64) - reshape_0 = paddle._C_ops.reshape(gather_1, stack_2) - del gather_1, stack_2 + # pd_op.conv2d: (1x192x68x68xf32) <- (1x384x68x68xf32, 192x384x1x1xf32) + conv2d_42 = paddle._C_ops.conv2d( + swish_30, parameter_533, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_533 - # pd_op.full: (1xi32) <- () - full_7 = paddle._C_ops.full( - [1], float("11"), paddle.int32, paddle.core.CPUPlace() + # pd_op.batch_norm_: (1x192x68x68xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (1x192x68x68xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__240, + batch_norm__241, + batch_norm__242, + batch_norm__243, + batch_norm__244, + batch_norm__245, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_42, + parameter_532, + parameter_531, + parameter_530, + parameter_529, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), ) + del parameter_529, parameter_530, parameter_531, parameter_532 + + # pd_op.swish: (1x192x68x68xf32) <- (1x192x68x68xf32) + swish_32 = paddle._C_ops.swish(batch_norm__240) - # pd_op.one_hot: (1x-1x11xf32) <- (1x-1xi32, 1xi32) - one_hot_1 = paddle._C_ops.one_hot( - where_1 % paddle.cast(full_7, where_1.dtype), full_7 + # pd_op.conv2d: (1x192x68x68xf32) <- (1x192x68x68xf32, 192x192x3x3xf32) + conv2d_43 = paddle._C_ops.conv2d( + swish_32, parameter_528, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" ) - del full_7 + del parameter_528 - # pd_op.full: (10xi64) <- () - full_8 = paddle._C_ops.full( - [10], float("0"), paddle.int64, paddle.framework._current_expected_place() + # pd_op.batch_norm_: (1x192x68x68xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (1x192x68x68xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__246, + batch_norm__247, + batch_norm__248, + batch_norm__249, + batch_norm__250, + batch_norm__251, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_43, + parameter_527, + parameter_526, + parameter_525, + parameter_524, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), ) + del parameter_524, parameter_525, parameter_526, parameter_527 - # pd_op.assign_value_: (10xi64) <- (10xi64) - assign_value__0 = paddle._C_ops.assign_value_( - full_8, - [10], - paddle.int64, - [ - float("0"), - float("1"), - float("2"), - float("3"), - float("4"), - float("5"), - float("6"), - float("7"), - float("8"), - float("9"), - ], - paddle.framework._current_expected_place(), + # pd_op.swish: (1x192x68x68xf32) <- (1x192x68x68xf32) + swish_33 = paddle._C_ops.swish(batch_norm__246) + + # pd_op.conv2d: (1x192x68x68xf32) <- (1x192x68x68xf32, 192x192x3x3xf32) + conv2d_44 = paddle._C_ops.conv2d( + swish_33, parameter_523, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" ) - del full_8 + del parameter_523 - # pd_op.index_select: (1x-1x10xf32) <- (1x-1x11xf32, 10xi64) - index_select_0 = paddle._C_ops.index_select(one_hot_1, assign_value__0, -1) - del assign_value__0, one_hot_1 + # pd_op.batch_norm_: (1x192x68x68xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (1x192x68x68xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__252, + batch_norm__253, + batch_norm__254, + batch_norm__255, + batch_norm__256, + batch_norm__257, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_44, + parameter_522, + parameter_521, + parameter_520, + parameter_519, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_519, parameter_520, parameter_521, parameter_522 - # pd_op.multiply: (1x-1x-1xf32) <- (1x-1x-1xf32, 1x-1x-1xf32) - multiply_3 = paddle._C_ops.multiply(data_8, where_0) - del data_8 + # pd_op.conv2d: (1x192x68x68xf32) <- (1x192x68x68xf32, 192x192x1x1xf32) + conv2d_45 = paddle._C_ops.conv2d( + swish_33, parameter_518, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_518 - # pd_op.full_int_array: (1xi64) <- () - full_int_array_5 = [-1] + # pd_op.batch_norm_: (1x192x68x68xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (1x192x68x68xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__258, + batch_norm__259, + batch_norm__260, + batch_norm__261, + batch_norm__262, + batch_norm__263, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_45, + parameter_517, + parameter_516, + parameter_515, + parameter_514, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_514, parameter_515, parameter_516, parameter_517 - # pd_op.max: (1x-1x1xf32) <- (1x-1x-1xf32, 1xi64) - max_0 = paddle._C_ops.max(multiply_3, full_int_array_5, True) + # pd_op.multiply: (1x192x68x68xf32) <- (1xf32, 1x192x68x68xf32) + multiply_11 = paddle._C_ops.multiply(data_9, batch_norm__258) + del data_9 - # pd_op.multiply: (1x-1x-1xf32) <- (1x-1x-1xf32, 1x-1x-1xf32) - multiply_4 = paddle._C_ops.multiply(data_3, where_0) - del data_3, where_0 + # pd_op.add: (1x192x68x68xf32) <- (1x192x68x68xf32, 1x192x68x68xf32) + add_20 = paddle._C_ops.add(batch_norm__252, multiply_11) - # pd_op.max: (1x-1x1xf32) <- (1x-1x-1xf32, 1xi64) - max_1 = paddle._C_ops.max(multiply_4, full_int_array_5, True) - del multiply_4 + # pd_op.swish: (1x192x68x68xf32) <- (1x192x68x68xf32) + swish_34 = paddle._C_ops.swish(add_20) - # pd_op.full: (1xf32) <- () - full_9 = paddle._C_ops.full( - [1], float("1"), paddle.float32, paddle.core.CPUPlace() + # pd_op.add: (1x192x68x68xf32) <- (1x192x68x68xf32, 1x192x68x68xf32) + add_21 = paddle._C_ops.add(swish_32, swish_34) + + # pd_op.conv2d: (1x192x68x68xf32) <- (1x192x68x68xf32, 192x192x3x3xf32) + conv2d_46 = paddle._C_ops.conv2d( + add_21, parameter_513, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_513 + + # pd_op.batch_norm_: (1x192x68x68xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (1x192x68x68xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__264, + batch_norm__265, + batch_norm__266, + batch_norm__267, + batch_norm__268, + batch_norm__269, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_46, + parameter_512, + parameter_511, + parameter_510, + parameter_509, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_509, parameter_510, parameter_511, parameter_512 + + # pd_op.swish: (1x192x68x68xf32) <- (1x192x68x68xf32) + swish_35 = paddle._C_ops.swish(batch_norm__264) + + # pd_op.conv2d: (1x192x68x68xf32) <- (1x192x68x68xf32, 192x192x3x3xf32) + conv2d_47 = paddle._C_ops.conv2d( + swish_35, parameter_508, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_508 + + # pd_op.batch_norm_: (1x192x68x68xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (1x192x68x68xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__270, + batch_norm__271, + batch_norm__272, + batch_norm__273, + batch_norm__274, + batch_norm__275, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_47, + parameter_507, + parameter_506, + parameter_505, + parameter_504, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_504, parameter_505, parameter_506, parameter_507 + + # pd_op.conv2d: (1x192x68x68xf32) <- (1x192x68x68xf32, 192x192x1x1xf32) + conv2d_48 = paddle._C_ops.conv2d( + swish_35, parameter_503, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_503 + + # pd_op.batch_norm_: (1x192x68x68xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (1x192x68x68xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__276, + batch_norm__277, + batch_norm__278, + batch_norm__279, + batch_norm__280, + batch_norm__281, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_48, + parameter_502, + parameter_501, + parameter_500, + parameter_499, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), ) + del parameter_499, parameter_500, parameter_501, parameter_502 - # pd_op.scale: (1x-1x1xf32) <- (1x-1x1xf32, 1xf32) - scale_0 = paddle._C_ops.scale(max_0, full_9, float("1e-09"), True) - del full_9, max_0 + # pd_op.multiply: (1x192x68x68xf32) <- (1xf32, 1x192x68x68xf32) + multiply_12 = paddle._C_ops.multiply(data_10, batch_norm__276) + del data_10 - # pd_op.divide: (1x-1x-1xf32) <- (1x-1x-1xf32, 1x-1x1xf32) - divide_0 = paddle._C_ops.divide(multiply_3, scale_0) - del multiply_3, scale_0 + # pd_op.add: (1x192x68x68xf32) <- (1x192x68x68xf32, 1x192x68x68xf32) + add_22 = paddle._C_ops.add(batch_norm__270, multiply_12) - # pd_op.multiply: (1x-1x-1xf32) <- (1x-1x-1xf32, 1x-1x1xf32) - multiply_5 = paddle._C_ops.multiply(divide_0, max_1) - del divide_0, max_1 + # pd_op.swish: (1x192x68x68xf32) <- (1x192x68x68xf32) + swish_36 = paddle._C_ops.swish(add_22) - # pd_op.max: (1x-1xf32) <- (1x-1x-1xf32, 1xi64) - max_2 = paddle._C_ops.max(multiply_5, full_int_array_3, False) - del full_int_array_3, multiply_5 + # pd_op.add: (1x192x68x68xf32) <- (1x192x68x68xf32, 1x192x68x68xf32) + add_23 = paddle._C_ops.add(add_21, swish_36) - # pd_op.unsqueeze: (1x-1x1xf32) <- (1x-1xf32, 1xi64) - unsqueeze_1 = paddle._C_ops.unsqueeze(max_2, full_int_array_5) - del full_int_array_5, max_2 + # pd_op.conv2d: (1x192x68x68xf32) <- (1x192x68x68xf32, 192x192x3x3xf32) + conv2d_49 = paddle._C_ops.conv2d( + add_23, parameter_498, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_498 - # pd_op.multiply: (1x-1x10xf32) <- (1x-1x10xf32, 1x-1x1xf32) - multiply_0 = paddle._C_ops.multiply(index_select_0, unsqueeze_1) - del index_select_0, unsqueeze_1, where_1 + # pd_op.batch_norm_: (1x192x68x68xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (1x192x68x68xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__282, + batch_norm__283, + batch_norm__284, + batch_norm__285, + batch_norm__286, + batch_norm__287, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_49, + parameter_497, + parameter_496, + parameter_495, + parameter_494, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_494, parameter_495, parameter_496, parameter_497 + + # pd_op.swish: (1x192x68x68xf32) <- (1x192x68x68xf32) + swish_37 = paddle._C_ops.swish(batch_norm__282) + + # pd_op.conv2d: (1x192x68x68xf32) <- (1x192x68x68xf32, 192x192x3x3xf32) + conv2d_50 = paddle._C_ops.conv2d( + swish_37, parameter_493, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_493 + + # pd_op.batch_norm_: (1x192x68x68xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (1x192x68x68xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__288, + batch_norm__289, + batch_norm__290, + batch_norm__291, + batch_norm__292, + batch_norm__293, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_50, + parameter_492, + parameter_491, + parameter_490, + parameter_489, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_489, parameter_490, parameter_491, parameter_492 + + # pd_op.conv2d: (1x192x68x68xf32) <- (1x192x68x68xf32, 192x192x1x1xf32) + conv2d_51 = paddle._C_ops.conv2d( + swish_37, parameter_488, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_488 + + # pd_op.batch_norm_: (1x192x68x68xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (1x192x68x68xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__294, + batch_norm__295, + batch_norm__296, + batch_norm__297, + batch_norm__298, + batch_norm__299, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_51, + parameter_487, + parameter_486, + parameter_485, + parameter_484, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_484, parameter_485, parameter_486, parameter_487 + + # pd_op.multiply: (1x192x68x68xf32) <- (1xf32, 1x192x68x68xf32) + multiply_13 = paddle._C_ops.multiply(data_11, batch_norm__294) + del data_11 + + # pd_op.add: (1x192x68x68xf32) <- (1x192x68x68xf32, 1x192x68x68xf32) + add_24 = paddle._C_ops.add(batch_norm__288, multiply_13) + + # pd_op.swish: (1x192x68x68xf32) <- (1x192x68x68xf32) + swish_38 = paddle._C_ops.swish(add_24) + + # pd_op.add: (1x192x68x68xf32) <- (1x192x68x68xf32, 1x192x68x68xf32) + add_25 = paddle._C_ops.add(add_23, swish_38) + + # pd_op.conv2d: (1x192x68x68xf32) <- (1x192x68x68xf32, 192x192x3x3xf32) + conv2d_52 = paddle._C_ops.conv2d( + add_25, parameter_483, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_483 + + # pd_op.batch_norm_: (1x192x68x68xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (1x192x68x68xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__300, + batch_norm__301, + batch_norm__302, + batch_norm__303, + batch_norm__304, + batch_norm__305, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_52, + parameter_482, + parameter_481, + parameter_480, + parameter_479, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_479, parameter_480, parameter_481, parameter_482 + + # pd_op.swish: (1x192x68x68xf32) <- (1x192x68x68xf32) + swish_39 = paddle._C_ops.swish(batch_norm__300) + + # pd_op.conv2d: (1x192x68x68xf32) <- (1x192x68x68xf32, 192x192x3x3xf32) + conv2d_53 = paddle._C_ops.conv2d( + swish_39, parameter_478, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_478 + + # pd_op.batch_norm_: (1x192x68x68xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (1x192x68x68xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__306, + batch_norm__307, + batch_norm__308, + batch_norm__309, + batch_norm__310, + batch_norm__311, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_53, + parameter_477, + parameter_476, + parameter_475, + parameter_474, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_474, parameter_475, parameter_476, parameter_477 + + # pd_op.conv2d: (1x192x68x68xf32) <- (1x192x68x68xf32, 192x192x1x1xf32) + conv2d_54 = paddle._C_ops.conv2d( + swish_39, parameter_473, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_473 + + # pd_op.batch_norm_: (1x192x68x68xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (1x192x68x68xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__312, + batch_norm__313, + batch_norm__314, + batch_norm__315, + batch_norm__316, + batch_norm__317, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_54, + parameter_472, + parameter_471, + parameter_470, + parameter_469, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_469, parameter_470, parameter_471, parameter_472 + + # pd_op.multiply: (1x192x68x68xf32) <- (1xf32, 1x192x68x68xf32) + multiply_14 = paddle._C_ops.multiply(data_12, batch_norm__312) + del data_12 + + # pd_op.add: (1x192x68x68xf32) <- (1x192x68x68xf32, 1x192x68x68xf32) + add_26 = paddle._C_ops.add(batch_norm__306, multiply_14) + + # pd_op.swish: (1x192x68x68xf32) <- (1x192x68x68xf32) + swish_40 = paddle._C_ops.swish(add_26) + + # pd_op.add: (1x192x68x68xf32) <- (1x192x68x68xf32, 1x192x68x68xf32) + add_27 = paddle._C_ops.add(add_25, swish_40) + + # pd_op.conv2d: (1x192x68x68xf32) <- (1x192x68x68xf32, 192x192x3x3xf32) + conv2d_55 = paddle._C_ops.conv2d( + add_27, parameter_468, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_468 + + # pd_op.batch_norm_: (1x192x68x68xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (1x192x68x68xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__318, + batch_norm__319, + batch_norm__320, + batch_norm__321, + batch_norm__322, + batch_norm__323, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_55, + parameter_467, + parameter_466, + parameter_465, + parameter_464, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_464, parameter_465, parameter_466, parameter_467 + + # pd_op.swish: (1x192x68x68xf32) <- (1x192x68x68xf32) + swish_41 = paddle._C_ops.swish(batch_norm__318) + + # pd_op.conv2d: (1x192x68x68xf32) <- (1x192x68x68xf32, 192x192x3x3xf32) + conv2d_56 = paddle._C_ops.conv2d( + swish_41, parameter_463, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_463 + + # pd_op.batch_norm_: (1x192x68x68xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (1x192x68x68xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__324, + batch_norm__325, + batch_norm__326, + batch_norm__327, + batch_norm__328, + batch_norm__329, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_56, + parameter_462, + parameter_461, + parameter_460, + parameter_459, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_459, parameter_460, parameter_461, parameter_462 + + # pd_op.conv2d: (1x192x68x68xf32) <- (1x192x68x68xf32, 192x192x1x1xf32) + conv2d_57 = paddle._C_ops.conv2d( + swish_41, parameter_458, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_458 + + # pd_op.batch_norm_: (1x192x68x68xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (1x192x68x68xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__330, + batch_norm__331, + batch_norm__332, + batch_norm__333, + batch_norm__334, + batch_norm__335, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_57, + parameter_457, + parameter_456, + parameter_455, + parameter_454, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_454, parameter_455, parameter_456, parameter_457 + + # pd_op.multiply: (1x192x68x68xf32) <- (1xf32, 1x192x68x68xf32) + multiply_15 = paddle._C_ops.multiply(data_13, batch_norm__330) + del data_13 + + # pd_op.add: (1x192x68x68xf32) <- (1x192x68x68xf32, 1x192x68x68xf32) + add_28 = paddle._C_ops.add(batch_norm__324, multiply_15) + + # pd_op.swish: (1x192x68x68xf32) <- (1x192x68x68xf32) + swish_42 = paddle._C_ops.swish(add_28) + + # pd_op.add: (1x192x68x68xf32) <- (1x192x68x68xf32, 1x192x68x68xf32) + add_29 = paddle._C_ops.add(add_27, swish_42) + + # pd_op.conv2d: (1x192x68x68xf32) <- (1x192x68x68xf32, 192x192x3x3xf32) + conv2d_58 = paddle._C_ops.conv2d( + add_29, parameter_453, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_453 + + # pd_op.batch_norm_: (1x192x68x68xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (1x192x68x68xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__336, + batch_norm__337, + batch_norm__338, + batch_norm__339, + batch_norm__340, + batch_norm__341, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_58, + parameter_452, + parameter_451, + parameter_450, + parameter_449, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_449, parameter_450, parameter_451, parameter_452 + + # pd_op.swish: (1x192x68x68xf32) <- (1x192x68x68xf32) + swish_43 = paddle._C_ops.swish(batch_norm__336) + + # pd_op.conv2d: (1x192x68x68xf32) <- (1x192x68x68xf32, 192x192x3x3xf32) + conv2d_59 = paddle._C_ops.conv2d( + swish_43, parameter_448, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_448 + + # pd_op.batch_norm_: (1x192x68x68xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (1x192x68x68xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__342, + batch_norm__343, + batch_norm__344, + batch_norm__345, + batch_norm__346, + batch_norm__347, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_59, + parameter_447, + parameter_446, + parameter_445, + parameter_444, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_444, parameter_445, parameter_446, parameter_447 + + # pd_op.conv2d: (1x192x68x68xf32) <- (1x192x68x68xf32, 192x192x1x1xf32) + conv2d_60 = paddle._C_ops.conv2d( + swish_43, parameter_443, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_443 + + # pd_op.batch_norm_: (1x192x68x68xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (1x192x68x68xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__348, + batch_norm__349, + batch_norm__350, + batch_norm__351, + batch_norm__352, + batch_norm__353, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_60, + parameter_442, + parameter_441, + parameter_440, + parameter_439, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_439, parameter_440, parameter_441, parameter_442 + + # pd_op.multiply: (1x192x68x68xf32) <- (1xf32, 1x192x68x68xf32) + multiply_16 = paddle._C_ops.multiply(data_14, batch_norm__348) + del data_14 + + # pd_op.add: (1x192x68x68xf32) <- (1x192x68x68xf32, 1x192x68x68xf32) + add_30 = paddle._C_ops.add(batch_norm__342, multiply_16) + + # pd_op.swish: (1x192x68x68xf32) <- (1x192x68x68xf32) + swish_44 = paddle._C_ops.swish(add_30) + + # pd_op.add: (1x192x68x68xf32) <- (1x192x68x68xf32, 1x192x68x68xf32) + add_31 = paddle._C_ops.add(add_29, swish_44) + + # builtin.combine: ([1x192x68x68xf32, 1x192x68x68xf32]) <- (1x192x68x68xf32, 1x192x68x68xf32) + combine_2 = [swish_31, add_31] + + # pd_op.concat: (1x384x68x68xf32) <- ([1x192x68x68xf32, 1x192x68x68xf32], 1xi32) + concat_2 = paddle._C_ops.concat(combine_2, full_0) + del combine_2 + + # pd_op.mean: (1x384x1x1xf32) <- (1x384x68x68xf32, 2xi64) + mean_2 = paddle._C_ops.mean(concat_2, full_int_array_0, True) + + # pd_op.conv2d: (1x384x1x1xf32) <- (1x384x1x1xf32, 384x384x1x1xf32) + conv2d_61 = paddle._C_ops.conv2d( + mean_2, parameter_438, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_438 + + # pd_op.reshape: (1x384x1x1xf32) <- (384xf32, 4xi64) + reshape_2 = paddle._C_ops.reshape(parameter_437, full_int_array_1) + del parameter_437 + + # pd_op.add: (1x384x1x1xf32) <- (1x384x1x1xf32, 1x384x1x1xf32) + add_32 = paddle._C_ops.add(conv2d_61, reshape_2) + + # pd_op.hardsigmoid: (1x384x1x1xf32) <- (1x384x1x1xf32) + hardsigmoid_2 = paddle._C_ops.hardsigmoid( + add_32, float("0.166667"), float("0.5") + ) + del add_32 + + # pd_op.multiply: (1x384x68x68xf32) <- (1x384x68x68xf32, 1x384x1x1xf32) + multiply_17 = paddle._C_ops.multiply(concat_2, hardsigmoid_2) + + # pd_op.conv2d: (1x512x68x68xf32) <- (1x384x68x68xf32, 512x384x1x1xf32) + conv2d_62 = paddle._C_ops.conv2d( + multiply_17, parameter_436, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_436 + + # pd_op.batch_norm_: (1x512x68x68xf32, 512xf32, 512xf32, 512xf32, 512xf32, -1xui8) <- (1x512x68x68xf32, 512xf32, 512xf32, 512xf32, 512xf32) + ( + batch_norm__354, + batch_norm__355, + batch_norm__356, + batch_norm__357, + batch_norm__358, + batch_norm__359, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_62, + parameter_435, + parameter_434, + parameter_433, + parameter_432, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_432, parameter_433, parameter_434, parameter_435 + + # pd_op.swish: (1x512x68x68xf32) <- (1x512x68x68xf32) + swish_45 = paddle._C_ops.swish(batch_norm__354) + + # pd_op.conv2d: (1x768x34x34xf32) <- (1x512x68x68xf32, 768x512x3x3xf32) + conv2d_63 = paddle._C_ops.conv2d( + swish_45, parameter_431, [2, 2], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_431 + + # pd_op.batch_norm_: (1x768x34x34xf32, 768xf32, 768xf32, 768xf32, 768xf32, -1xui8) <- (1x768x34x34xf32, 768xf32, 768xf32, 768xf32, 768xf32) + ( + batch_norm__360, + batch_norm__361, + batch_norm__362, + batch_norm__363, + batch_norm__364, + batch_norm__365, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_63, + parameter_430, + parameter_429, + parameter_428, + parameter_427, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_427, parameter_428, parameter_429, parameter_430 + + # pd_op.swish: (1x768x34x34xf32) <- (1x768x34x34xf32) + swish_46 = paddle._C_ops.swish(batch_norm__360) + + # pd_op.conv2d: (1x384x34x34xf32) <- (1x768x34x34xf32, 384x768x1x1xf32) + conv2d_64 = paddle._C_ops.conv2d( + swish_46, parameter_426, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_426 + + # pd_op.batch_norm_: (1x384x34x34xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (1x384x34x34xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__366, + batch_norm__367, + batch_norm__368, + batch_norm__369, + batch_norm__370, + batch_norm__371, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_64, + parameter_425, + parameter_424, + parameter_423, + parameter_422, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_422, parameter_423, parameter_424, parameter_425 + + # pd_op.swish: (1x384x34x34xf32) <- (1x384x34x34xf32) + swish_47 = paddle._C_ops.swish(batch_norm__366) + + # pd_op.conv2d: (1x384x34x34xf32) <- (1x768x34x34xf32, 384x768x1x1xf32) + conv2d_65 = paddle._C_ops.conv2d( + swish_46, parameter_421, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_421 + + # pd_op.batch_norm_: (1x384x34x34xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (1x384x34x34xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__372, + batch_norm__373, + batch_norm__374, + batch_norm__375, + batch_norm__376, + batch_norm__377, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_65, + parameter_420, + parameter_419, + parameter_418, + parameter_417, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_417, parameter_418, parameter_419, parameter_420 + + # pd_op.swish: (1x384x34x34xf32) <- (1x384x34x34xf32) + swish_48 = paddle._C_ops.swish(batch_norm__372) + + # pd_op.conv2d: (1x384x34x34xf32) <- (1x384x34x34xf32, 384x384x3x3xf32) + conv2d_66 = paddle._C_ops.conv2d( + swish_48, parameter_416, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_416 + + # pd_op.batch_norm_: (1x384x34x34xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (1x384x34x34xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__378, + batch_norm__379, + batch_norm__380, + batch_norm__381, + batch_norm__382, + batch_norm__383, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_66, + parameter_415, + parameter_414, + parameter_413, + parameter_412, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_412, parameter_413, parameter_414, parameter_415 + + # pd_op.swish: (1x384x34x34xf32) <- (1x384x34x34xf32) + swish_49 = paddle._C_ops.swish(batch_norm__378) + + # pd_op.conv2d: (1x384x34x34xf32) <- (1x384x34x34xf32, 384x384x3x3xf32) + conv2d_67 = paddle._C_ops.conv2d( + swish_49, parameter_411, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_411 + + # pd_op.batch_norm_: (1x384x34x34xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (1x384x34x34xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__384, + batch_norm__385, + batch_norm__386, + batch_norm__387, + batch_norm__388, + batch_norm__389, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_67, + parameter_410, + parameter_409, + parameter_408, + parameter_407, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_407, parameter_408, parameter_409, parameter_410 + + # pd_op.conv2d: (1x384x34x34xf32) <- (1x384x34x34xf32, 384x384x1x1xf32) + conv2d_68 = paddle._C_ops.conv2d( + swish_49, parameter_406, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_406 + + # pd_op.batch_norm_: (1x384x34x34xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (1x384x34x34xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__390, + batch_norm__391, + batch_norm__392, + batch_norm__393, + batch_norm__394, + batch_norm__395, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_68, + parameter_405, + parameter_404, + parameter_403, + parameter_402, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_402, parameter_403, parameter_404, parameter_405 + + # pd_op.multiply: (1x384x34x34xf32) <- (1xf32, 1x384x34x34xf32) + multiply_18 = paddle._C_ops.multiply(data_15, batch_norm__390) + del data_15 + + # pd_op.add: (1x384x34x34xf32) <- (1x384x34x34xf32, 1x384x34x34xf32) + add_33 = paddle._C_ops.add(batch_norm__384, multiply_18) + + # pd_op.swish: (1x384x34x34xf32) <- (1x384x34x34xf32) + swish_50 = paddle._C_ops.swish(add_33) + + # pd_op.add: (1x384x34x34xf32) <- (1x384x34x34xf32, 1x384x34x34xf32) + add_34 = paddle._C_ops.add(swish_48, swish_50) + + # pd_op.conv2d: (1x384x34x34xf32) <- (1x384x34x34xf32, 384x384x3x3xf32) + conv2d_69 = paddle._C_ops.conv2d( + add_34, parameter_401, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_401 + + # pd_op.batch_norm_: (1x384x34x34xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (1x384x34x34xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__396, + batch_norm__397, + batch_norm__398, + batch_norm__399, + batch_norm__400, + batch_norm__401, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_69, + parameter_400, + parameter_399, + parameter_398, + parameter_397, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_397, parameter_398, parameter_399, parameter_400 + + # pd_op.swish: (1x384x34x34xf32) <- (1x384x34x34xf32) + swish_51 = paddle._C_ops.swish(batch_norm__396) + + # pd_op.conv2d: (1x384x34x34xf32) <- (1x384x34x34xf32, 384x384x3x3xf32) + conv2d_70 = paddle._C_ops.conv2d( + swish_51, parameter_396, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_396 + + # pd_op.batch_norm_: (1x384x34x34xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (1x384x34x34xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__402, + batch_norm__403, + batch_norm__404, + batch_norm__405, + batch_norm__406, + batch_norm__407, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_70, + parameter_395, + parameter_394, + parameter_393, + parameter_392, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_392, parameter_393, parameter_394, parameter_395 + + # pd_op.conv2d: (1x384x34x34xf32) <- (1x384x34x34xf32, 384x384x1x1xf32) + conv2d_71 = paddle._C_ops.conv2d( + swish_51, parameter_391, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_391 + + # pd_op.batch_norm_: (1x384x34x34xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (1x384x34x34xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__408, + batch_norm__409, + batch_norm__410, + batch_norm__411, + batch_norm__412, + batch_norm__413, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_71, + parameter_390, + parameter_389, + parameter_388, + parameter_387, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_387, parameter_388, parameter_389, parameter_390 + + # pd_op.multiply: (1x384x34x34xf32) <- (1xf32, 1x384x34x34xf32) + multiply_19 = paddle._C_ops.multiply(data_16, batch_norm__408) + del data_16 + + # pd_op.add: (1x384x34x34xf32) <- (1x384x34x34xf32, 1x384x34x34xf32) + add_35 = paddle._C_ops.add(batch_norm__402, multiply_19) + + # pd_op.swish: (1x384x34x34xf32) <- (1x384x34x34xf32) + swish_52 = paddle._C_ops.swish(add_35) + + # pd_op.add: (1x384x34x34xf32) <- (1x384x34x34xf32, 1x384x34x34xf32) + add_36 = paddle._C_ops.add(add_34, swish_52) + + # pd_op.conv2d: (1x384x34x34xf32) <- (1x384x34x34xf32, 384x384x3x3xf32) + conv2d_72 = paddle._C_ops.conv2d( + add_36, parameter_386, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_386 + + # pd_op.batch_norm_: (1x384x34x34xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (1x384x34x34xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__414, + batch_norm__415, + batch_norm__416, + batch_norm__417, + batch_norm__418, + batch_norm__419, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_72, + parameter_385, + parameter_384, + parameter_383, + parameter_382, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_382, parameter_383, parameter_384, parameter_385 + + # pd_op.swish: (1x384x34x34xf32) <- (1x384x34x34xf32) + swish_53 = paddle._C_ops.swish(batch_norm__414) + + # pd_op.conv2d: (1x384x34x34xf32) <- (1x384x34x34xf32, 384x384x3x3xf32) + conv2d_73 = paddle._C_ops.conv2d( + swish_53, parameter_381, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_381 + + # pd_op.batch_norm_: (1x384x34x34xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (1x384x34x34xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__420, + batch_norm__421, + batch_norm__422, + batch_norm__423, + batch_norm__424, + batch_norm__425, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_73, + parameter_380, + parameter_379, + parameter_378, + parameter_377, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_377, parameter_378, parameter_379, parameter_380 + + # pd_op.conv2d: (1x384x34x34xf32) <- (1x384x34x34xf32, 384x384x1x1xf32) + conv2d_74 = paddle._C_ops.conv2d( + swish_53, parameter_376, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_376 + + # pd_op.batch_norm_: (1x384x34x34xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (1x384x34x34xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__426, + batch_norm__427, + batch_norm__428, + batch_norm__429, + batch_norm__430, + batch_norm__431, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_74, + parameter_375, + parameter_374, + parameter_373, + parameter_372, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_372, parameter_373, parameter_374, parameter_375 + + # pd_op.multiply: (1x384x34x34xf32) <- (1xf32, 1x384x34x34xf32) + multiply_20 = paddle._C_ops.multiply(data_17, batch_norm__426) + del data_17 + + # pd_op.add: (1x384x34x34xf32) <- (1x384x34x34xf32, 1x384x34x34xf32) + add_37 = paddle._C_ops.add(batch_norm__420, multiply_20) + + # pd_op.swish: (1x384x34x34xf32) <- (1x384x34x34xf32) + swish_54 = paddle._C_ops.swish(add_37) + + # pd_op.add: (1x384x34x34xf32) <- (1x384x34x34xf32, 1x384x34x34xf32) + add_38 = paddle._C_ops.add(add_36, swish_54) + + # builtin.combine: ([1x384x34x34xf32, 1x384x34x34xf32]) <- (1x384x34x34xf32, 1x384x34x34xf32) + combine_3 = [swish_47, add_38] + + # pd_op.concat: (1x768x34x34xf32) <- ([1x384x34x34xf32, 1x384x34x34xf32], 1xi32) + concat_3 = paddle._C_ops.concat(combine_3, full_0) + del combine_3 + + # pd_op.mean: (1x768x1x1xf32) <- (1x768x34x34xf32, 2xi64) + mean_3 = paddle._C_ops.mean(concat_3, full_int_array_0, True) + + # pd_op.conv2d: (1x768x1x1xf32) <- (1x768x1x1xf32, 768x768x1x1xf32) + conv2d_75 = paddle._C_ops.conv2d( + mean_3, parameter_371, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_371 + + # pd_op.reshape: (1x768x1x1xf32) <- (768xf32, 4xi64) + reshape_3 = paddle._C_ops.reshape(parameter_370, full_int_array_1) + del full_int_array_1, parameter_370 + + # pd_op.add: (1x768x1x1xf32) <- (1x768x1x1xf32, 1x768x1x1xf32) + add_39 = paddle._C_ops.add(conv2d_75, reshape_3) + + # pd_op.hardsigmoid: (1x768x1x1xf32) <- (1x768x1x1xf32) + hardsigmoid_3 = paddle._C_ops.hardsigmoid( + add_39, float("0.166667"), float("0.5") + ) + del add_39 + + # pd_op.multiply: (1x768x34x34xf32) <- (1x768x34x34xf32, 1x768x1x1xf32) + multiply_21 = paddle._C_ops.multiply(concat_3, hardsigmoid_3) + + # pd_op.conv2d: (1x1024x34x34xf32) <- (1x768x34x34xf32, 1024x768x1x1xf32) + conv2d_76 = paddle._C_ops.conv2d( + multiply_21, parameter_369, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_369 + + # pd_op.batch_norm_: (1x1024x34x34xf32, 1024xf32, 1024xf32, 1024xf32, 1024xf32, -1xui8) <- (1x1024x34x34xf32, 1024xf32, 1024xf32, 1024xf32, 1024xf32) + ( + batch_norm__432, + batch_norm__433, + batch_norm__434, + batch_norm__435, + batch_norm__436, + batch_norm__437, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_76, + parameter_368, + parameter_367, + parameter_366, + parameter_365, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_365, parameter_366, parameter_367, parameter_368 + + # pd_op.swish: (1x1024x34x34xf32) <- (1x1024x34x34xf32) + swish_55 = paddle._C_ops.swish(batch_norm__432) + + # pd_op.flatten: (1x1024x1156xf32) <- (1x1024x34x34xf32) + flatten_0 = paddle._C_ops.flatten(swish_55, 2, 3) + + # pd_op.transpose: (1x1156x1024xf32) <- (1x1024x1156xf32) + transpose_0 = paddle._C_ops.transpose(flatten_0, [0, 2, 1]) + del flatten_0 + + # pd_op.full: (1xf64) <- () + full_1 = paddle._C_ops.full( + [1], float("0"), paddle.float64, paddle.core.CPUPlace() + ) + + # pd_op.full: (1xf64) <- () + full_2 = paddle._C_ops.full( + [1], float("34"), paddle.float64, paddle.core.CPUPlace() + ) + + # pd_op.full: (1xf64) <- () + full_3 = paddle._C_ops.full( + [1], float("1"), paddle.float64, paddle.core.CPUPlace() + ) + + # pd_op.arange: (34xf32) <- (1xf64, 1xf64, 1xf64) + arange_0 = paddle.arange(full_1, full_2, full_3, dtype="float32") + del full_2 + + # builtin.combine: ([34xf32, 34xf32]) <- (34xf32, 34xf32) + combine_4 = [arange_0, arange_0] + del arange_0 + + # pd_op.meshgrid: ([34x34xf32, 34x34xf32]) <- ([34xf32, 34xf32]) + meshgrid_0 = paddle._C_ops.meshgrid(combine_4) + del combine_4 + + # builtin.split: (34x34xf32, 34x34xf32) <- ([34x34xf32, 34x34xf32]) + ( + split_0, + split_1, + ) = meshgrid_0 + del meshgrid_0 + + # pd_op.full: (1xf64) <- () + full_4 = paddle._C_ops.full( + [1], float("256"), paddle.float64, paddle.core.CPUPlace() + ) + + # pd_op.arange: (256xf32) <- (1xf64, 1xf64, 1xf64) + arange_1 = paddle.arange(full_1, full_4, full_3, dtype="float32") + del full_1, full_3, full_4 + + # pd_op.full: (1xf32) <- () + full_5 = paddle._C_ops.full( + [1], float("0.00390625"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.scale: (256xf32) <- (256xf32, 1xf32) + scale_0 = paddle._C_ops.scale(arange_1, full_5, float("0"), True) + del arange_1, full_5 + + # pd_op.full: (256xf32) <- () + full_6 = paddle._C_ops.full( + [256], + float("10000"), + paddle.float32, + paddle.framework._current_expected_place(), + ) + + # pd_op.elementwise_pow: (256xf32) <- (256xf32, 256xf32) + elementwise_pow_0 = paddle._C_ops.elementwise_pow(full_6, scale_0) + del full_6, scale_0 + + # pd_op.full: (256xf32) <- () + full_7 = paddle._C_ops.full( + [256], + float("1"), + paddle.float32, + paddle.framework._current_expected_place(), + ) + + # pd_op.divide: (256xf32) <- (256xf32, 256xf32) + divide_0 = paddle._C_ops.divide(full_7, elementwise_pow_0) + del elementwise_pow_0, full_7 + + # pd_op.flatten: (1156xf32) <- (34x34xf32) + flatten_1 = paddle._C_ops.flatten(split_0, 0, 1) + del split_0 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_2 = [1] + + # pd_op.unsqueeze: (1156x1xf32) <- (1156xf32, 1xi64) + unsqueeze_0 = paddle._C_ops.unsqueeze(flatten_1, full_int_array_2) + del flatten_1 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_3 = [0] + + # pd_op.assign: (1xi64) <- (1xi64) + assign_16 = full_int_array_3 + + # pd_op.assign: (1xi64) <- (1xi64) + assign_17 = full_int_array_3 + + # pd_op.assign: (1xi64) <- (1xi64) + assign_18 = full_int_array_3 + + # pd_op.assign: (1xi64) <- (1xi64) + assign_19 = full_int_array_3 + + # pd_op.assign: (1xi64) <- (1xi64) + assign_20 = full_int_array_3 + + # pd_op.assign: (1xi64) <- (1xi64) + assign_21 = full_int_array_3 + + # pd_op.assign: (1xi64) <- (1xi64) + assign_22 = full_int_array_3 + + # pd_op.assign: (1xi64) <- (1xi64) + assign_23 = full_int_array_3 + + # pd_op.unsqueeze: (1x256xf32) <- (256xf32, 1xi64) + unsqueeze_1 = paddle._C_ops.unsqueeze(divide_0, full_int_array_3) + del divide_0 + + # pd_op.matmul: (1156x256xf32) <- (1156x1xf32, 1x256xf32) + matmul_0 = paddle._C_ops.matmul(unsqueeze_0, unsqueeze_1, False, False) + del unsqueeze_0 + + # pd_op.flatten: (1156xf32) <- (34x34xf32) + flatten_2 = paddle._C_ops.flatten(split_1, 0, 1) + del split_1 + + # pd_op.unsqueeze: (1156x1xf32) <- (1156xf32, 1xi64) + unsqueeze_2 = paddle._C_ops.unsqueeze(flatten_2, full_int_array_2) + del flatten_2, full_int_array_2 + + # pd_op.matmul: (1156x256xf32) <- (1156x1xf32, 1x256xf32) + matmul_1 = paddle._C_ops.matmul(unsqueeze_2, unsqueeze_1, False, False) + del unsqueeze_1, unsqueeze_2 + + # pd_op.sin: (1156x256xf32) <- (1156x256xf32) + sin_0 = paddle._C_ops.sin(matmul_0) + + # pd_op.cos: (1156x256xf32) <- (1156x256xf32) + cos_0 = paddle._C_ops.cos(matmul_0) + del matmul_0 + + # pd_op.sin: (1156x256xf32) <- (1156x256xf32) + sin_1 = paddle._C_ops.sin(matmul_1) + + # pd_op.cos: (1156x256xf32) <- (1156x256xf32) + cos_1 = paddle._C_ops.cos(matmul_1) + del matmul_1 + + # builtin.combine: ([1156x256xf32, 1156x256xf32, 1156x256xf32, 1156x256xf32]) <- (1156x256xf32, 1156x256xf32, 1156x256xf32, 1156x256xf32) + combine_5 = [sin_0, cos_0, sin_1, cos_1] + del cos_0, cos_1, sin_0, sin_1 + + # pd_op.concat: (1156x1024xf32) <- ([1156x256xf32, 1156x256xf32, 1156x256xf32, 1156x256xf32], 1xi32) + concat_4 = paddle._C_ops.concat(combine_5, full_0) + del combine_5 + + # pd_op.unsqueeze: (1x1156x1024xf32) <- (1156x1024xf32, 1xi64) + unsqueeze_3 = paddle._C_ops.unsqueeze(concat_4, full_int_array_3) + del concat_4 + + # pd_op.add: (1x1156x1024xf32) <- (1x1156x1024xf32, 1x1156x1024xf32) + add_40 = paddle._C_ops.add(transpose_0, unsqueeze_3) + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_4 = [1024] + + # pd_op.assign: (1xi64) <- (1xi64) + assign_24 = full_int_array_4 + + # pd_op.assign: (1xi64) <- (1xi64) + assign_25 = full_int_array_4 + + # pd_op.assign: (1xi64) <- (1xi64) + assign_26 = full_int_array_4 + + # pd_op.assign: (1xi64) <- (1xi64) + assign_27 = full_int_array_4 + + # pd_op.assign: (1xi64) <- (1xi64) + assign_28 = full_int_array_4 + + # pd_op.assign: (1xi64) <- (1xi64) + assign_29 = full_int_array_4 + + # pd_op.assign: (1xi64) <- (1xi64) + assign_30 = full_int_array_4 + + # pd_op.assign: (1xi64) <- (1xi64) + assign_31 = full_int_array_4 + + # pd_op.assign: (1xi64) <- (1xi64) + assign_32 = full_int_array_4 + + # pd_op.assign: (1xi64) <- (1xi64) + assign_33 = full_int_array_4 + + # pd_op.assign: (1xi64) <- (1xi64) + assign_34 = full_int_array_4 + + # pd_op.assign: (1xi64) <- (1xi64) + assign_35 = full_int_array_4 + + # pd_op.assign: (1xi64) <- (1xi64) + assign_36 = full_int_array_4 + + # pd_op.assign: (1xi64) <- (1xi64) + assign_37 = full_int_array_4 + + # pd_op.assign: (1xi64) <- (1xi64) + assign_38 = full_int_array_4 + + # pd_op.slice: (1024x1024xf32) <- (1024x3072xf32, 1xi64, 1xi64) + slice_0 = paddle._C_ops.slice( + data_18, [1], full_int_array_3, full_int_array_4, [1], [] + ) + + # pd_op.slice: (1024xf32) <- (3072xf32, 1xi64, 1xi64) + slice_1 = paddle._C_ops.slice( + data_19, [0], full_int_array_3, full_int_array_4, [1], [] + ) + + # pd_op.matmul: (1x1156x1024xf32) <- (1x1156x1024xf32, 1024x1024xf32) + matmul_2 = paddle._C_ops.matmul(add_40, slice_0, False, False) + + # pd_op.add: (1x1156x1024xf32) <- (1x1156x1024xf32, 1024xf32) + add_41 = paddle._C_ops.add(matmul_2, slice_1) + + # pd_op.full_int_array: (4xi64) <- () + full_int_array_5 = [0, 0, 4, 256] + + # pd_op.reshape: (1x1156x4x256xf32) <- (1x1156x1024xf32, 4xi64) + reshape_4 = paddle._C_ops.reshape(add_41, full_int_array_5) + + # pd_op.transpose: (1x4x1156x256xf32) <- (1x1156x4x256xf32) + transpose_1 = paddle._C_ops.transpose(reshape_4, [0, 2, 1, 3]) + del reshape_4 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_6 = [2048] + + # pd_op.assign: (1xi64) <- (1xi64) + assign_39 = full_int_array_6 + + # pd_op.assign: (1xi64) <- (1xi64) + assign_40 = full_int_array_6 + + # pd_op.assign: (1xi64) <- (1xi64) + assign_41 = full_int_array_6 + + # pd_op.assign: (1xi64) <- (1xi64) + assign_42 = full_int_array_6 + + # pd_op.assign: (1xi64) <- (1xi64) + assign_43 = full_int_array_6 + + # pd_op.assign: (1xi64) <- (1xi64) + assign_44 = full_int_array_6 + + # pd_op.assign: (1xi64) <- (1xi64) + assign_45 = full_int_array_6 + + # pd_op.assign: (1xi64) <- (1xi64) + assign_46 = full_int_array_6 + + # pd_op.assign: (1xi64) <- (1xi64) + assign_47 = full_int_array_6 + + # pd_op.assign: (1xi64) <- (1xi64) + assign_48 = full_int_array_6 + + # pd_op.assign: (1xi64) <- (1xi64) + assign_49 = full_int_array_6 + + # pd_op.assign: (1xi64) <- (1xi64) + assign_50 = full_int_array_6 + + # pd_op.assign: (1xi64) <- (1xi64) + assign_51 = full_int_array_6 + + # pd_op.assign: (1xi64) <- (1xi64) + assign_52 = full_int_array_6 + + # pd_op.assign: (1xi64) <- (1xi64) + assign_53 = full_int_array_6 + + # pd_op.slice: (1024x1024xf32) <- (1024x3072xf32, 1xi64, 1xi64) + slice_2 = paddle._C_ops.slice( + data_18, [1], full_int_array_4, full_int_array_6, [1], [] + ) + + # pd_op.slice: (1024xf32) <- (3072xf32, 1xi64, 1xi64) + slice_3 = paddle._C_ops.slice( + data_19, [0], full_int_array_4, full_int_array_6, [1], [] + ) + + # pd_op.matmul: (1x1156x1024xf32) <- (1x1156x1024xf32, 1024x1024xf32) + matmul_3 = paddle._C_ops.matmul(add_40, slice_2, False, False) + + # pd_op.add: (1x1156x1024xf32) <- (1x1156x1024xf32, 1024xf32) + add_42 = paddle._C_ops.add(matmul_3, slice_3) + + # pd_op.reshape: (1x1156x4x256xf32) <- (1x1156x1024xf32, 4xi64) + reshape_5 = paddle._C_ops.reshape(add_42, full_int_array_5) + + # pd_op.transpose: (1x4x1156x256xf32) <- (1x1156x4x256xf32) + transpose_2 = paddle._C_ops.transpose(reshape_5, [0, 2, 1, 3]) + del reshape_5 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_7 = [2147483647] + + # pd_op.assign: (1xi64) <- (1xi64) + assign_54 = full_int_array_7 + + # pd_op.assign: (1xi64) <- (1xi64) + assign_55 = full_int_array_7 + + # pd_op.assign: (1xi64) <- (1xi64) + assign_56 = full_int_array_7 + + # pd_op.assign: (1xi64) <- (1xi64) + assign_57 = full_int_array_7 + + # pd_op.assign: (1xi64) <- (1xi64) + assign_58 = full_int_array_7 + + # pd_op.assign: (1xi64) <- (1xi64) + assign_59 = full_int_array_7 + + # pd_op.assign: (1xi64) <- (1xi64) + assign_60 = full_int_array_7 + + # pd_op.slice: (1024x1024xf32) <- (1024x3072xf32, 1xi64, 1xi64) + slice_4 = paddle._C_ops.slice( + data_18, [1], full_int_array_6, full_int_array_7, [1], [] + ) + del data_18 + + # pd_op.slice: (1024xf32) <- (3072xf32, 1xi64, 1xi64) + slice_5 = paddle._C_ops.slice( + data_19, [0], full_int_array_6, full_int_array_7, [1], [] + ) + del data_19 + + # pd_op.matmul: (1x1156x1024xf32) <- (1x1156x1024xf32, 1024x1024xf32) + matmul_4 = paddle._C_ops.matmul(transpose_0, slice_4, False, False) + + # pd_op.add: (1x1156x1024xf32) <- (1x1156x1024xf32, 1024xf32) + add_43 = paddle._C_ops.add(matmul_4, slice_5) + + # pd_op.reshape: (1x1156x4x256xf32) <- (1x1156x1024xf32, 4xi64) + reshape_6 = paddle._C_ops.reshape(add_43, full_int_array_5) + + # pd_op.transpose: (1x4x1156x256xf32) <- (1x1156x4x256xf32) + transpose_3 = paddle._C_ops.transpose(reshape_6, [0, 2, 1, 3]) + del reshape_6 + + # pd_op.matmul: (1x4x1156x1156xf32) <- (1x4x1156x256xf32, 1x4x1156x256xf32) + matmul_5 = paddle._C_ops.matmul(transpose_1, transpose_2, False, True) + + # pd_op.full: (1xf32) <- () + full_8 = paddle._C_ops.full( + [1], float("0.0625"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.assign: (1xf32) <- (1xf32) + assign_61 = full_8 + + # pd_op.assign: (1xf32) <- (1xf32) + assign_62 = full_8 + + # pd_op.assign: (1xf32) <- (1xf32) + assign_63 = full_8 + + # pd_op.scale: (1x4x1156x1156xf32) <- (1x4x1156x1156xf32, 1xf32) + scale_1 = paddle._C_ops.scale(matmul_5, full_8, float("0"), True) + del matmul_5 + + # pd_op.softmax: (1x4x1156x1156xf32) <- (1x4x1156x1156xf32) + softmax_0 = paddle._C_ops.softmax(scale_1, -1) + del scale_1 + + # pd_op.full: (1xf32) <- () + full_9 = paddle._C_ops.full( + [1], float("0.1"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.assign: (1xf32) <- (1xf32) + assign_64 = full_9 + + # pd_op.assign: (1xf32) <- (1xf32) + assign_65 = full_9 + + # pd_op.assign: (1xf32) <- (1xf32) + assign_66 = full_9 + + # pd_op.assign: (1xf32) <- (1xf32) + assign_67 = full_9 + + # pd_op.assign: (1xf32) <- (1xf32) + assign_68 = full_9 + + # pd_op.assign: (1xf32) <- (1xf32) + assign_69 = full_9 + + # pd_op.assign: (1xf32) <- (1xf32) + assign_70 = full_9 + + # pd_op.assign: (1xf32) <- (1xf32) + assign_71 = full_9 + + # pd_op.assign: (1xf32) <- (1xf32) + assign_72 = full_9 + + # pd_op.assign: (1xf32) <- (1xf32) + assign_73 = full_9 + + # pd_op.assign: (1xf32) <- (1xf32) + assign_74 = full_9 + + # pd_op.assign: (1xf32) <- (1xf32) + assign_75 = full_9 + + # pd_op.assign: (1xf32) <- (1xf32) + assign_76 = full_9 + + # pd_op.assign: (1xf32) <- (1xf32) + assign_77 = full_9 + + # pd_op.assign: (1xf32) <- (1xf32) + assign_78 = full_9 + + # pd_op.dropout: (1x4x1156x1156xf32, 1x4x1156x1156xui8) <- (1x4x1156x1156xf32, None, 1xf32) + dropout_0, dropout_1 = (lambda x, f: f(x))( + paddle._C_ops.dropout( + softmax_0, None, full_9, False, "upscale_in_train", 0, False + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None), + ) + + # pd_op.matmul: (1x4x1156x256xf32) <- (1x4x1156x1156xf32, 1x4x1156x256xf32) + matmul_6 = paddle._C_ops.matmul(dropout_0, transpose_3, False, False) + + # pd_op.transpose: (1x1156x4x256xf32) <- (1x4x1156x256xf32) + transpose_4 = paddle._C_ops.transpose(matmul_6, [0, 2, 1, 3]) + del matmul_6 + + # pd_op.full_int_array: (3xi64) <- () + full_int_array_8 = [0, 0, 1024] + + # pd_op.reshape: (1x1156x1024xf32) <- (1x1156x4x256xf32, 3xi64) + reshape_7 = paddle._C_ops.reshape(transpose_4, full_int_array_8) + + # pd_op.matmul: (1x1156x1024xf32) <- (1x1156x1024xf32, 1024x1024xf32) + matmul_7 = paddle._C_ops.matmul(reshape_7, parameter_364, False, False) + del parameter_364 + + # pd_op.add: (1x1156x1024xf32) <- (1x1156x1024xf32, 1024xf32) + add_44 = paddle._C_ops.add(matmul_7, parameter_363) + del parameter_363 + + # pd_op.dropout: (1x1156x1024xf32, 1x1156x1024xui8) <- (1x1156x1024xf32, None, 1xf32) + dropout_2, dropout_3 = (lambda x, f: f(x))( + paddle._C_ops.dropout( + add_44, None, full_9, False, "upscale_in_train", 0, False + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None), + ) + del add_44 + + # pd_op.add: (1x1156x1024xf32) <- (1x1156x1024xf32, 1x1156x1024xf32) + add_45 = paddle._C_ops.add(transpose_0, dropout_2) + + # pd_op.layer_norm: (1x1156x1024xf32, 1x1156xf32, 1x1156xf32) <- (1x1156x1024xf32, 1024xf32, 1024xf32) + layer_norm_0, layer_norm_1, layer_norm_2 = (lambda x, f: f(x))( + paddle._C_ops.layer_norm( + add_45, parameter_362, parameter_361, float("1e-05"), 2 + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None, None), + ) + del parameter_361, parameter_362 + + # pd_op.matmul: (1x1156x2048xf32) <- (1x1156x1024xf32, 1024x2048xf32) + matmul_8 = paddle._C_ops.matmul(layer_norm_0, parameter_360, False, False) + del parameter_360 + + # pd_op.add: (1x1156x2048xf32) <- (1x1156x2048xf32, 2048xf32) + add_46 = paddle._C_ops.add(matmul_8, parameter_359) + del parameter_359 + + # pd_op.gelu: (1x1156x2048xf32) <- (1x1156x2048xf32) + gelu_0 = paddle._C_ops.gelu(add_46, False) + + # pd_op.dropout: (1x1156x2048xf32, 1x1156x2048xui8) <- (1x1156x2048xf32, None, 1xf32) + dropout_4, dropout_5 = (lambda x, f: f(x))( + paddle._C_ops.dropout( + gelu_0, None, full_9, False, "upscale_in_train", 0, False + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None), + ) + del gelu_0 + + # pd_op.matmul: (1x1156x1024xf32) <- (1x1156x2048xf32, 2048x1024xf32) + matmul_9 = paddle._C_ops.matmul(dropout_4, parameter_358, False, False) + del parameter_358 + + # pd_op.add: (1x1156x1024xf32) <- (1x1156x1024xf32, 1024xf32) + add_47 = paddle._C_ops.add(matmul_9, parameter_357) + del parameter_357 + + # pd_op.dropout: (1x1156x1024xf32, 1x1156x1024xui8) <- (1x1156x1024xf32, None, 1xf32) + dropout_6, dropout_7 = (lambda x, f: f(x))( + paddle._C_ops.dropout( + add_47, None, full_9, False, "upscale_in_train", 0, False + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None), + ) + del add_47 + + # pd_op.add: (1x1156x1024xf32) <- (1x1156x1024xf32, 1x1156x1024xf32) + add_48 = paddle._C_ops.add(layer_norm_0, dropout_6) + + # pd_op.layer_norm: (1x1156x1024xf32, 1x1156xf32, 1x1156xf32) <- (1x1156x1024xf32, 1024xf32, 1024xf32) + layer_norm_3, layer_norm_4, layer_norm_5 = (lambda x, f: f(x))( + paddle._C_ops.layer_norm( + add_48, parameter_356, parameter_355, float("1e-05"), 2 + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None, None), + ) + del parameter_355, parameter_356 + + # pd_op.add: (1x1156x1024xf32) <- (1x1156x1024xf32, 1x1156x1024xf32) + add_49 = paddle._C_ops.add(layer_norm_3, unsqueeze_3) + + # pd_op.slice: (1024x1024xf32) <- (1024x3072xf32, 1xi64, 1xi64) + slice_6 = paddle._C_ops.slice( + data_20, [1], full_int_array_3, full_int_array_4, [1], [] + ) + + # pd_op.slice: (1024xf32) <- (3072xf32, 1xi64, 1xi64) + slice_7 = paddle._C_ops.slice( + data_21, [0], full_int_array_3, full_int_array_4, [1], [] + ) + + # pd_op.matmul: (1x1156x1024xf32) <- (1x1156x1024xf32, 1024x1024xf32) + matmul_10 = paddle._C_ops.matmul(add_49, slice_6, False, False) + + # pd_op.add: (1x1156x1024xf32) <- (1x1156x1024xf32, 1024xf32) + add_50 = paddle._C_ops.add(matmul_10, slice_7) + + # pd_op.reshape: (1x1156x4x256xf32) <- (1x1156x1024xf32, 4xi64) + reshape_8 = paddle._C_ops.reshape(add_50, full_int_array_5) + + # pd_op.transpose: (1x4x1156x256xf32) <- (1x1156x4x256xf32) + transpose_5 = paddle._C_ops.transpose(reshape_8, [0, 2, 1, 3]) + del reshape_8 + + # pd_op.slice: (1024x1024xf32) <- (1024x3072xf32, 1xi64, 1xi64) + slice_8 = paddle._C_ops.slice( + data_20, [1], full_int_array_4, full_int_array_6, [1], [] + ) + + # pd_op.slice: (1024xf32) <- (3072xf32, 1xi64, 1xi64) + slice_9 = paddle._C_ops.slice( + data_21, [0], full_int_array_4, full_int_array_6, [1], [] + ) + + # pd_op.matmul: (1x1156x1024xf32) <- (1x1156x1024xf32, 1024x1024xf32) + matmul_11 = paddle._C_ops.matmul(add_49, slice_8, False, False) + + # pd_op.add: (1x1156x1024xf32) <- (1x1156x1024xf32, 1024xf32) + add_51 = paddle._C_ops.add(matmul_11, slice_9) + + # pd_op.reshape: (1x1156x4x256xf32) <- (1x1156x1024xf32, 4xi64) + reshape_9 = paddle._C_ops.reshape(add_51, full_int_array_5) + + # pd_op.transpose: (1x4x1156x256xf32) <- (1x1156x4x256xf32) + transpose_6 = paddle._C_ops.transpose(reshape_9, [0, 2, 1, 3]) + del reshape_9 + + # pd_op.slice: (1024x1024xf32) <- (1024x3072xf32, 1xi64, 1xi64) + slice_10 = paddle._C_ops.slice( + data_20, [1], full_int_array_6, full_int_array_7, [1], [] + ) + del data_20 + + # pd_op.slice: (1024xf32) <- (3072xf32, 1xi64, 1xi64) + slice_11 = paddle._C_ops.slice( + data_21, [0], full_int_array_6, full_int_array_7, [1], [] + ) + del data_21 + + # pd_op.matmul: (1x1156x1024xf32) <- (1x1156x1024xf32, 1024x1024xf32) + matmul_12 = paddle._C_ops.matmul(layer_norm_3, slice_10, False, False) + + # pd_op.add: (1x1156x1024xf32) <- (1x1156x1024xf32, 1024xf32) + add_52 = paddle._C_ops.add(matmul_12, slice_11) + + # pd_op.reshape: (1x1156x4x256xf32) <- (1x1156x1024xf32, 4xi64) + reshape_10 = paddle._C_ops.reshape(add_52, full_int_array_5) + + # pd_op.transpose: (1x4x1156x256xf32) <- (1x1156x4x256xf32) + transpose_7 = paddle._C_ops.transpose(reshape_10, [0, 2, 1, 3]) + del reshape_10 + + # pd_op.matmul: (1x4x1156x1156xf32) <- (1x4x1156x256xf32, 1x4x1156x256xf32) + matmul_13 = paddle._C_ops.matmul(transpose_5, transpose_6, False, True) + + # pd_op.scale: (1x4x1156x1156xf32) <- (1x4x1156x1156xf32, 1xf32) + scale_2 = paddle._C_ops.scale(matmul_13, full_8, float("0"), True) + del matmul_13 + + # pd_op.softmax: (1x4x1156x1156xf32) <- (1x4x1156x1156xf32) + softmax_1 = paddle._C_ops.softmax(scale_2, -1) + del scale_2 + + # pd_op.dropout: (1x4x1156x1156xf32, 1x4x1156x1156xui8) <- (1x4x1156x1156xf32, None, 1xf32) + dropout_8, dropout_9 = (lambda x, f: f(x))( + paddle._C_ops.dropout( + softmax_1, None, full_9, False, "upscale_in_train", 0, False + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None), + ) + + # pd_op.matmul: (1x4x1156x256xf32) <- (1x4x1156x1156xf32, 1x4x1156x256xf32) + matmul_14 = paddle._C_ops.matmul(dropout_8, transpose_7, False, False) + + # pd_op.transpose: (1x1156x4x256xf32) <- (1x4x1156x256xf32) + transpose_8 = paddle._C_ops.transpose(matmul_14, [0, 2, 1, 3]) + del matmul_14 + + # pd_op.reshape: (1x1156x1024xf32) <- (1x1156x4x256xf32, 3xi64) + reshape_11 = paddle._C_ops.reshape(transpose_8, full_int_array_8) + + # pd_op.matmul: (1x1156x1024xf32) <- (1x1156x1024xf32, 1024x1024xf32) + matmul_15 = paddle._C_ops.matmul(reshape_11, parameter_354, False, False) + del parameter_354 + + # pd_op.add: (1x1156x1024xf32) <- (1x1156x1024xf32, 1024xf32) + add_53 = paddle._C_ops.add(matmul_15, parameter_353) + del parameter_353 + + # pd_op.dropout: (1x1156x1024xf32, 1x1156x1024xui8) <- (1x1156x1024xf32, None, 1xf32) + dropout_10, dropout_11 = (lambda x, f: f(x))( + paddle._C_ops.dropout( + add_53, None, full_9, False, "upscale_in_train", 0, False + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None), + ) + del add_53 + + # pd_op.add: (1x1156x1024xf32) <- (1x1156x1024xf32, 1x1156x1024xf32) + add_54 = paddle._C_ops.add(layer_norm_3, dropout_10) + + # pd_op.layer_norm: (1x1156x1024xf32, 1x1156xf32, 1x1156xf32) <- (1x1156x1024xf32, 1024xf32, 1024xf32) + layer_norm_6, layer_norm_7, layer_norm_8 = (lambda x, f: f(x))( + paddle._C_ops.layer_norm( + add_54, parameter_352, parameter_351, float("1e-05"), 2 + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None, None), + ) + del parameter_351, parameter_352 + + # pd_op.matmul: (1x1156x2048xf32) <- (1x1156x1024xf32, 1024x2048xf32) + matmul_16 = paddle._C_ops.matmul(layer_norm_6, parameter_350, False, False) + del parameter_350 + + # pd_op.add: (1x1156x2048xf32) <- (1x1156x2048xf32, 2048xf32) + add_55 = paddle._C_ops.add(matmul_16, parameter_349) + del parameter_349 + + # pd_op.gelu: (1x1156x2048xf32) <- (1x1156x2048xf32) + gelu_1 = paddle._C_ops.gelu(add_55, False) + + # pd_op.dropout: (1x1156x2048xf32, 1x1156x2048xui8) <- (1x1156x2048xf32, None, 1xf32) + dropout_12, dropout_13 = (lambda x, f: f(x))( + paddle._C_ops.dropout( + gelu_1, None, full_9, False, "upscale_in_train", 0, False + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None), + ) + del gelu_1 + + # pd_op.matmul: (1x1156x1024xf32) <- (1x1156x2048xf32, 2048x1024xf32) + matmul_17 = paddle._C_ops.matmul(dropout_12, parameter_348, False, False) + del parameter_348 + + # pd_op.add: (1x1156x1024xf32) <- (1x1156x1024xf32, 1024xf32) + add_56 = paddle._C_ops.add(matmul_17, parameter_347) + del parameter_347 + + # pd_op.dropout: (1x1156x1024xf32, 1x1156x1024xui8) <- (1x1156x1024xf32, None, 1xf32) + dropout_14, dropout_15 = (lambda x, f: f(x))( + paddle._C_ops.dropout( + add_56, None, full_9, False, "upscale_in_train", 0, False + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None), + ) + del add_56 + + # pd_op.add: (1x1156x1024xf32) <- (1x1156x1024xf32, 1x1156x1024xf32) + add_57 = paddle._C_ops.add(layer_norm_6, dropout_14) + + # pd_op.layer_norm: (1x1156x1024xf32, 1x1156xf32, 1x1156xf32) <- (1x1156x1024xf32, 1024xf32, 1024xf32) + layer_norm_9, layer_norm_10, layer_norm_11 = (lambda x, f: f(x))( + paddle._C_ops.layer_norm( + add_57, parameter_346, parameter_345, float("1e-05"), 2 + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None, None), + ) + del parameter_345, parameter_346 + + # pd_op.add: (1x1156x1024xf32) <- (1x1156x1024xf32, 1x1156x1024xf32) + add_58 = paddle._C_ops.add(layer_norm_9, unsqueeze_3) + + # pd_op.slice: (1024x1024xf32) <- (1024x3072xf32, 1xi64, 1xi64) + slice_12 = paddle._C_ops.slice( + data_22, [1], full_int_array_3, full_int_array_4, [1], [] + ) + + # pd_op.slice: (1024xf32) <- (3072xf32, 1xi64, 1xi64) + slice_13 = paddle._C_ops.slice( + data_23, [0], full_int_array_3, full_int_array_4, [1], [] + ) + + # pd_op.matmul: (1x1156x1024xf32) <- (1x1156x1024xf32, 1024x1024xf32) + matmul_18 = paddle._C_ops.matmul(add_58, slice_12, False, False) + + # pd_op.add: (1x1156x1024xf32) <- (1x1156x1024xf32, 1024xf32) + add_59 = paddle._C_ops.add(matmul_18, slice_13) + + # pd_op.reshape: (1x1156x4x256xf32) <- (1x1156x1024xf32, 4xi64) + reshape_12 = paddle._C_ops.reshape(add_59, full_int_array_5) + + # pd_op.transpose: (1x4x1156x256xf32) <- (1x1156x4x256xf32) + transpose_9 = paddle._C_ops.transpose(reshape_12, [0, 2, 1, 3]) + del reshape_12 + + # pd_op.slice: (1024x1024xf32) <- (1024x3072xf32, 1xi64, 1xi64) + slice_14 = paddle._C_ops.slice( + data_22, [1], full_int_array_4, full_int_array_6, [1], [] + ) + + # pd_op.slice: (1024xf32) <- (3072xf32, 1xi64, 1xi64) + slice_15 = paddle._C_ops.slice( + data_23, [0], full_int_array_4, full_int_array_6, [1], [] + ) + + # pd_op.matmul: (1x1156x1024xf32) <- (1x1156x1024xf32, 1024x1024xf32) + matmul_19 = paddle._C_ops.matmul(add_58, slice_14, False, False) + + # pd_op.add: (1x1156x1024xf32) <- (1x1156x1024xf32, 1024xf32) + add_60 = paddle._C_ops.add(matmul_19, slice_15) + + # pd_op.reshape: (1x1156x4x256xf32) <- (1x1156x1024xf32, 4xi64) + reshape_13 = paddle._C_ops.reshape(add_60, full_int_array_5) + + # pd_op.transpose: (1x4x1156x256xf32) <- (1x1156x4x256xf32) + transpose_10 = paddle._C_ops.transpose(reshape_13, [0, 2, 1, 3]) + del reshape_13 + + # pd_op.slice: (1024x1024xf32) <- (1024x3072xf32, 1xi64, 1xi64) + slice_16 = paddle._C_ops.slice( + data_22, [1], full_int_array_6, full_int_array_7, [1], [] + ) + del data_22 + + # pd_op.slice: (1024xf32) <- (3072xf32, 1xi64, 1xi64) + slice_17 = paddle._C_ops.slice( + data_23, [0], full_int_array_6, full_int_array_7, [1], [] + ) + del data_23 + + # pd_op.matmul: (1x1156x1024xf32) <- (1x1156x1024xf32, 1024x1024xf32) + matmul_20 = paddle._C_ops.matmul(layer_norm_9, slice_16, False, False) + + # pd_op.add: (1x1156x1024xf32) <- (1x1156x1024xf32, 1024xf32) + add_61 = paddle._C_ops.add(matmul_20, slice_17) + + # pd_op.reshape: (1x1156x4x256xf32) <- (1x1156x1024xf32, 4xi64) + reshape_14 = paddle._C_ops.reshape(add_61, full_int_array_5) + + # pd_op.transpose: (1x4x1156x256xf32) <- (1x1156x4x256xf32) + transpose_11 = paddle._C_ops.transpose(reshape_14, [0, 2, 1, 3]) + del reshape_14 + + # pd_op.matmul: (1x4x1156x1156xf32) <- (1x4x1156x256xf32, 1x4x1156x256xf32) + matmul_21 = paddle._C_ops.matmul(transpose_9, transpose_10, False, True) + + # pd_op.scale: (1x4x1156x1156xf32) <- (1x4x1156x1156xf32, 1xf32) + scale_3 = paddle._C_ops.scale(matmul_21, full_8, float("0"), True) + del matmul_21 + + # pd_op.softmax: (1x4x1156x1156xf32) <- (1x4x1156x1156xf32) + softmax_2 = paddle._C_ops.softmax(scale_3, -1) + del scale_3 + + # pd_op.dropout: (1x4x1156x1156xf32, 1x4x1156x1156xui8) <- (1x4x1156x1156xf32, None, 1xf32) + dropout_16, dropout_17 = (lambda x, f: f(x))( + paddle._C_ops.dropout( + softmax_2, None, full_9, False, "upscale_in_train", 0, False + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None), + ) + + # pd_op.matmul: (1x4x1156x256xf32) <- (1x4x1156x1156xf32, 1x4x1156x256xf32) + matmul_22 = paddle._C_ops.matmul(dropout_16, transpose_11, False, False) + + # pd_op.transpose: (1x1156x4x256xf32) <- (1x4x1156x256xf32) + transpose_12 = paddle._C_ops.transpose(matmul_22, [0, 2, 1, 3]) + del matmul_22 + + # pd_op.reshape: (1x1156x1024xf32) <- (1x1156x4x256xf32, 3xi64) + reshape_15 = paddle._C_ops.reshape(transpose_12, full_int_array_8) + + # pd_op.matmul: (1x1156x1024xf32) <- (1x1156x1024xf32, 1024x1024xf32) + matmul_23 = paddle._C_ops.matmul(reshape_15, parameter_344, False, False) + del parameter_344 + + # pd_op.add: (1x1156x1024xf32) <- (1x1156x1024xf32, 1024xf32) + add_62 = paddle._C_ops.add(matmul_23, parameter_343) + del parameter_343 + + # pd_op.dropout: (1x1156x1024xf32, 1x1156x1024xui8) <- (1x1156x1024xf32, None, 1xf32) + dropout_18, dropout_19 = (lambda x, f: f(x))( + paddle._C_ops.dropout( + add_62, None, full_9, False, "upscale_in_train", 0, False + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None), + ) + del add_62 + + # pd_op.add: (1x1156x1024xf32) <- (1x1156x1024xf32, 1x1156x1024xf32) + add_63 = paddle._C_ops.add(layer_norm_9, dropout_18) + + # pd_op.layer_norm: (1x1156x1024xf32, 1x1156xf32, 1x1156xf32) <- (1x1156x1024xf32, 1024xf32, 1024xf32) + layer_norm_12, layer_norm_13, layer_norm_14 = (lambda x, f: f(x))( + paddle._C_ops.layer_norm( + add_63, parameter_342, parameter_341, float("1e-05"), 2 + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None, None), + ) + del parameter_341, parameter_342 + + # pd_op.matmul: (1x1156x2048xf32) <- (1x1156x1024xf32, 1024x2048xf32) + matmul_24 = paddle._C_ops.matmul(layer_norm_12, parameter_340, False, False) + del parameter_340 + + # pd_op.add: (1x1156x2048xf32) <- (1x1156x2048xf32, 2048xf32) + add_64 = paddle._C_ops.add(matmul_24, parameter_339) + del parameter_339 + + # pd_op.gelu: (1x1156x2048xf32) <- (1x1156x2048xf32) + gelu_2 = paddle._C_ops.gelu(add_64, False) + + # pd_op.dropout: (1x1156x2048xf32, 1x1156x2048xui8) <- (1x1156x2048xf32, None, 1xf32) + dropout_20, dropout_21 = (lambda x, f: f(x))( + paddle._C_ops.dropout( + gelu_2, None, full_9, False, "upscale_in_train", 0, False + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None), + ) + del gelu_2 + + # pd_op.matmul: (1x1156x1024xf32) <- (1x1156x2048xf32, 2048x1024xf32) + matmul_25 = paddle._C_ops.matmul(dropout_20, parameter_338, False, False) + del parameter_338 + + # pd_op.add: (1x1156x1024xf32) <- (1x1156x1024xf32, 1024xf32) + add_65 = paddle._C_ops.add(matmul_25, parameter_337) + del parameter_337 + + # pd_op.dropout: (1x1156x1024xf32, 1x1156x1024xui8) <- (1x1156x1024xf32, None, 1xf32) + dropout_22, dropout_23 = (lambda x, f: f(x))( + paddle._C_ops.dropout( + add_65, None, full_9, False, "upscale_in_train", 0, False + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None), + ) + del add_65 + + # pd_op.add: (1x1156x1024xf32) <- (1x1156x1024xf32, 1x1156x1024xf32) + add_66 = paddle._C_ops.add(layer_norm_12, dropout_22) + + # pd_op.layer_norm: (1x1156x1024xf32, 1x1156xf32, 1x1156xf32) <- (1x1156x1024xf32, 1024xf32, 1024xf32) + layer_norm_15, layer_norm_16, layer_norm_17 = (lambda x, f: f(x))( + paddle._C_ops.layer_norm( + add_66, parameter_336, parameter_335, float("1e-05"), 2 + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None, None), + ) + del parameter_335, parameter_336 + + # pd_op.add: (1x1156x1024xf32) <- (1x1156x1024xf32, 1x1156x1024xf32) + add_67 = paddle._C_ops.add(layer_norm_15, unsqueeze_3) + + # pd_op.slice: (1024x1024xf32) <- (1024x3072xf32, 1xi64, 1xi64) + slice_18 = paddle._C_ops.slice( + data_24, [1], full_int_array_3, full_int_array_4, [1], [] + ) + + # pd_op.slice: (1024xf32) <- (3072xf32, 1xi64, 1xi64) + slice_19 = paddle._C_ops.slice( + data_25, [0], full_int_array_3, full_int_array_4, [1], [] + ) + del full_int_array_3 + + # pd_op.matmul: (1x1156x1024xf32) <- (1x1156x1024xf32, 1024x1024xf32) + matmul_26 = paddle._C_ops.matmul(add_67, slice_18, False, False) + + # pd_op.add: (1x1156x1024xf32) <- (1x1156x1024xf32, 1024xf32) + add_68 = paddle._C_ops.add(matmul_26, slice_19) + + # pd_op.reshape: (1x1156x4x256xf32) <- (1x1156x1024xf32, 4xi64) + reshape_16 = paddle._C_ops.reshape(add_68, full_int_array_5) + + # pd_op.transpose: (1x4x1156x256xf32) <- (1x1156x4x256xf32) + transpose_13 = paddle._C_ops.transpose(reshape_16, [0, 2, 1, 3]) + del reshape_16 + + # pd_op.slice: (1024x1024xf32) <- (1024x3072xf32, 1xi64, 1xi64) + slice_20 = paddle._C_ops.slice( + data_24, [1], full_int_array_4, full_int_array_6, [1], [] + ) + + # pd_op.slice: (1024xf32) <- (3072xf32, 1xi64, 1xi64) + slice_21 = paddle._C_ops.slice( + data_25, [0], full_int_array_4, full_int_array_6, [1], [] + ) + + # pd_op.matmul: (1x1156x1024xf32) <- (1x1156x1024xf32, 1024x1024xf32) + matmul_27 = paddle._C_ops.matmul(add_67, slice_20, False, False) + + # pd_op.add: (1x1156x1024xf32) <- (1x1156x1024xf32, 1024xf32) + add_69 = paddle._C_ops.add(matmul_27, slice_21) + + # pd_op.reshape: (1x1156x4x256xf32) <- (1x1156x1024xf32, 4xi64) + reshape_17 = paddle._C_ops.reshape(add_69, full_int_array_5) + + # pd_op.transpose: (1x4x1156x256xf32) <- (1x1156x4x256xf32) + transpose_14 = paddle._C_ops.transpose(reshape_17, [0, 2, 1, 3]) + del reshape_17 + + # pd_op.slice: (1024x1024xf32) <- (1024x3072xf32, 1xi64, 1xi64) + slice_22 = paddle._C_ops.slice( + data_24, [1], full_int_array_6, full_int_array_7, [1], [] + ) + del data_24 + + # pd_op.slice: (1024xf32) <- (3072xf32, 1xi64, 1xi64) + slice_23 = paddle._C_ops.slice( + data_25, [0], full_int_array_6, full_int_array_7, [1], [] + ) + del data_25 + + # pd_op.matmul: (1x1156x1024xf32) <- (1x1156x1024xf32, 1024x1024xf32) + matmul_28 = paddle._C_ops.matmul(layer_norm_15, slice_22, False, False) + + # pd_op.add: (1x1156x1024xf32) <- (1x1156x1024xf32, 1024xf32) + add_70 = paddle._C_ops.add(matmul_28, slice_23) + + # pd_op.reshape: (1x1156x4x256xf32) <- (1x1156x1024xf32, 4xi64) + reshape_18 = paddle._C_ops.reshape(add_70, full_int_array_5) + del full_int_array_5 + + # pd_op.transpose: (1x4x1156x256xf32) <- (1x1156x4x256xf32) + transpose_15 = paddle._C_ops.transpose(reshape_18, [0, 2, 1, 3]) + del reshape_18 + + # pd_op.matmul: (1x4x1156x1156xf32) <- (1x4x1156x256xf32, 1x4x1156x256xf32) + matmul_29 = paddle._C_ops.matmul(transpose_13, transpose_14, False, True) + + # pd_op.scale: (1x4x1156x1156xf32) <- (1x4x1156x1156xf32, 1xf32) + scale_4 = paddle._C_ops.scale(matmul_29, full_8, float("0"), True) + del matmul_29 + + # pd_op.softmax: (1x4x1156x1156xf32) <- (1x4x1156x1156xf32) + softmax_3 = paddle._C_ops.softmax(scale_4, -1) + del scale_4 + + # pd_op.dropout: (1x4x1156x1156xf32, 1x4x1156x1156xui8) <- (1x4x1156x1156xf32, None, 1xf32) + dropout_24, dropout_25 = (lambda x, f: f(x))( + paddle._C_ops.dropout( + softmax_3, None, full_9, False, "upscale_in_train", 0, False + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None), + ) + + # pd_op.matmul: (1x4x1156x256xf32) <- (1x4x1156x1156xf32, 1x4x1156x256xf32) + matmul_30 = paddle._C_ops.matmul(dropout_24, transpose_15, False, False) + + # pd_op.transpose: (1x1156x4x256xf32) <- (1x4x1156x256xf32) + transpose_16 = paddle._C_ops.transpose(matmul_30, [0, 2, 1, 3]) + del matmul_30 + + # pd_op.reshape: (1x1156x1024xf32) <- (1x1156x4x256xf32, 3xi64) + reshape_19 = paddle._C_ops.reshape(transpose_16, full_int_array_8) + del full_int_array_8 + + # pd_op.matmul: (1x1156x1024xf32) <- (1x1156x1024xf32, 1024x1024xf32) + matmul_31 = paddle._C_ops.matmul(reshape_19, parameter_334, False, False) + del parameter_334 + + # pd_op.add: (1x1156x1024xf32) <- (1x1156x1024xf32, 1024xf32) + add_71 = paddle._C_ops.add(matmul_31, parameter_333) + del parameter_333 + + # pd_op.dropout: (1x1156x1024xf32, 1x1156x1024xui8) <- (1x1156x1024xf32, None, 1xf32) + dropout_26, dropout_27 = (lambda x, f: f(x))( + paddle._C_ops.dropout( + add_71, None, full_9, False, "upscale_in_train", 0, False + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None), + ) + del add_71 + + # pd_op.add: (1x1156x1024xf32) <- (1x1156x1024xf32, 1x1156x1024xf32) + add_72 = paddle._C_ops.add(layer_norm_15, dropout_26) + + # pd_op.layer_norm: (1x1156x1024xf32, 1x1156xf32, 1x1156xf32) <- (1x1156x1024xf32, 1024xf32, 1024xf32) + layer_norm_18, layer_norm_19, layer_norm_20 = (lambda x, f: f(x))( + paddle._C_ops.layer_norm( + add_72, parameter_332, parameter_331, float("1e-05"), 2 + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None, None), + ) + del parameter_331, parameter_332 + + # pd_op.matmul: (1x1156x2048xf32) <- (1x1156x1024xf32, 1024x2048xf32) + matmul_32 = paddle._C_ops.matmul(layer_norm_18, parameter_330, False, False) + del parameter_330 + + # pd_op.add: (1x1156x2048xf32) <- (1x1156x2048xf32, 2048xf32) + add_73 = paddle._C_ops.add(matmul_32, parameter_329) + del parameter_329 + + # pd_op.gelu: (1x1156x2048xf32) <- (1x1156x2048xf32) + gelu_3 = paddle._C_ops.gelu(add_73, False) + + # pd_op.dropout: (1x1156x2048xf32, 1x1156x2048xui8) <- (1x1156x2048xf32, None, 1xf32) + dropout_28, dropout_29 = (lambda x, f: f(x))( + paddle._C_ops.dropout( + gelu_3, None, full_9, False, "upscale_in_train", 0, False + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None), + ) + del gelu_3 + + # pd_op.matmul: (1x1156x1024xf32) <- (1x1156x2048xf32, 2048x1024xf32) + matmul_33 = paddle._C_ops.matmul(dropout_28, parameter_328, False, False) + del parameter_328 + + # pd_op.add: (1x1156x1024xf32) <- (1x1156x1024xf32, 1024xf32) + add_74 = paddle._C_ops.add(matmul_33, parameter_327) + del parameter_327 + + # pd_op.dropout: (1x1156x1024xf32, 1x1156x1024xui8) <- (1x1156x1024xf32, None, 1xf32) + dropout_30, dropout_31 = (lambda x, f: f(x))( + paddle._C_ops.dropout( + add_74, None, full_9, False, "upscale_in_train", 0, False + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None), + ) + del add_74 + + # pd_op.add: (1x1156x1024xf32) <- (1x1156x1024xf32, 1x1156x1024xf32) + add_75 = paddle._C_ops.add(layer_norm_18, dropout_30) + + # pd_op.layer_norm: (1x1156x1024xf32, 1x1156xf32, 1x1156xf32) <- (1x1156x1024xf32, 1024xf32, 1024xf32) + layer_norm_21, layer_norm_22, layer_norm_23 = (lambda x, f: f(x))( + paddle._C_ops.layer_norm( + add_75, parameter_326, parameter_325, float("1e-05"), 2 + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None, None), + ) + del parameter_325, parameter_326 + + # pd_op.transpose: (1x1024x1156xf32) <- (1x1156x1024xf32) + transpose_17 = paddle._C_ops.transpose(layer_norm_21, [0, 2, 1]) + del layer_norm_21 + + # pd_op.full_int_array: (4xi64) <- () + full_int_array_9 = [1, 1024, 34, 34] + + # pd_op.reshape: (1x1024x34x34xf32) <- (1x1024x1156xf32, 4xi64) + reshape_20 = paddle._C_ops.reshape(transpose_17, full_int_array_9) + del full_int_array_9 + + # pd_op.conv2d: (1x384x34x34xf32) <- (1x1024x34x34xf32, 384x1024x1x1xf32) + conv2d_77 = paddle._C_ops.conv2d( + reshape_20, parameter_324, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_324 + + # pd_op.batch_norm_: (1x384x34x34xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (1x384x34x34xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__438, + batch_norm__439, + batch_norm__440, + batch_norm__441, + batch_norm__442, + batch_norm__443, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_77, + parameter_323, + parameter_322, + parameter_321, + parameter_320, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_320, parameter_321, parameter_322, parameter_323 + + # pd_op.swish: (1x384x34x34xf32) <- (1x384x34x34xf32) + swish_56 = paddle._C_ops.swish(batch_norm__438) + + # pd_op.conv2d: (1x384x34x34xf32) <- (1x1024x34x34xf32, 384x1024x1x1xf32) + conv2d_78 = paddle._C_ops.conv2d( + reshape_20, parameter_319, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_319 + + # pd_op.batch_norm_: (1x384x34x34xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (1x384x34x34xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__444, + batch_norm__445, + batch_norm__446, + batch_norm__447, + batch_norm__448, + batch_norm__449, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_78, + parameter_318, + parameter_317, + parameter_316, + parameter_315, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_315, parameter_316, parameter_317, parameter_318 + + # pd_op.swish: (1x384x34x34xf32) <- (1x384x34x34xf32) + swish_57 = paddle._C_ops.swish(batch_norm__444) + + # pd_op.conv2d: (1x384x34x34xf32) <- (1x384x34x34xf32, 384x384x3x3xf32) + conv2d_79 = paddle._C_ops.conv2d( + swish_57, parameter_314, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_314 + + # pd_op.batch_norm_: (1x384x34x34xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (1x384x34x34xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__450, + batch_norm__451, + batch_norm__452, + batch_norm__453, + batch_norm__454, + batch_norm__455, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_79, + parameter_313, + parameter_312, + parameter_311, + parameter_310, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_310, parameter_311, parameter_312, parameter_313 + + # pd_op.swish: (1x384x34x34xf32) <- (1x384x34x34xf32) + swish_58 = paddle._C_ops.swish(batch_norm__450) + + # pd_op.conv2d: (1x384x34x34xf32) <- (1x384x34x34xf32, 384x384x3x3xf32) + conv2d_80 = paddle._C_ops.conv2d( + swish_58, parameter_309, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_309 + + # pd_op.batch_norm_: (1x384x34x34xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (1x384x34x34xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__456, + batch_norm__457, + batch_norm__458, + batch_norm__459, + batch_norm__460, + batch_norm__461, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_80, + parameter_308, + parameter_307, + parameter_306, + parameter_305, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_305, parameter_306, parameter_307, parameter_308 + + # pd_op.conv2d: (1x384x34x34xf32) <- (1x384x34x34xf32, 384x384x1x1xf32) + conv2d_81 = paddle._C_ops.conv2d( + swish_58, parameter_304, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_304 + + # pd_op.batch_norm_: (1x384x34x34xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (1x384x34x34xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__462, + batch_norm__463, + batch_norm__464, + batch_norm__465, + batch_norm__466, + batch_norm__467, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_81, + parameter_303, + parameter_302, + parameter_301, + parameter_300, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_300, parameter_301, parameter_302, parameter_303 + + # pd_op.add: (1x384x34x34xf32) <- (1x384x34x34xf32, 1x384x34x34xf32) + add_76 = paddle._C_ops.add(batch_norm__456, batch_norm__462) + + # pd_op.swish: (1x384x34x34xf32) <- (1x384x34x34xf32) + swish_59 = paddle._C_ops.swish(add_76) + + # pd_op.conv2d: (1x384x34x34xf32) <- (1x384x34x34xf32, 384x384x3x3xf32) + conv2d_82 = paddle._C_ops.conv2d( + swish_59, parameter_299, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_299 + + # pd_op.batch_norm_: (1x384x34x34xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (1x384x34x34xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__468, + batch_norm__469, + batch_norm__470, + batch_norm__471, + batch_norm__472, + batch_norm__473, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_82, + parameter_298, + parameter_297, + parameter_296, + parameter_295, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_295, parameter_296, parameter_297, parameter_298 + + # pd_op.swish: (1x384x34x34xf32) <- (1x384x34x34xf32) + swish_60 = paddle._C_ops.swish(batch_norm__468) + + # pd_op.conv2d: (1x384x34x34xf32) <- (1x384x34x34xf32, 384x384x3x3xf32) + conv2d_83 = paddle._C_ops.conv2d( + swish_60, parameter_294, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_294 + + # pd_op.batch_norm_: (1x384x34x34xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (1x384x34x34xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__474, + batch_norm__475, + batch_norm__476, + batch_norm__477, + batch_norm__478, + batch_norm__479, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_83, + parameter_293, + parameter_292, + parameter_291, + parameter_290, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_290, parameter_291, parameter_292, parameter_293 + + # pd_op.conv2d: (1x384x34x34xf32) <- (1x384x34x34xf32, 384x384x1x1xf32) + conv2d_84 = paddle._C_ops.conv2d( + swish_60, parameter_289, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_289 + + # pd_op.batch_norm_: (1x384x34x34xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (1x384x34x34xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__480, + batch_norm__481, + batch_norm__482, + batch_norm__483, + batch_norm__484, + batch_norm__485, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_84, + parameter_288, + parameter_287, + parameter_286, + parameter_285, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_285, parameter_286, parameter_287, parameter_288 + + # pd_op.add: (1x384x34x34xf32) <- (1x384x34x34xf32, 1x384x34x34xf32) + add_77 = paddle._C_ops.add(batch_norm__474, batch_norm__480) + + # pd_op.swish: (1x384x34x34xf32) <- (1x384x34x34xf32) + swish_61 = paddle._C_ops.swish(add_77) + + # pd_op.full_int_array: (2xi64) <- () + full_int_array_10 = [5, 5] + + # pd_op.pool2d: (1x384x34x34xf32) <- (1x384x34x34xf32, 2xi64) + pool2d_0 = paddle._C_ops.pool2d( + swish_61, + full_int_array_10, + [1, 1], + [2, 2], + False, + True, + "NCHW", + "max", + False, + False, + "EXPLICIT", + ) + + # pd_op.full_int_array: (2xi64) <- () + full_int_array_11 = [9, 9] + + # pd_op.pool2d: (1x384x34x34xf32) <- (1x384x34x34xf32, 2xi64) + pool2d_1 = paddle._C_ops.pool2d( + swish_61, + full_int_array_11, + [1, 1], + [4, 4], + False, + True, + "NCHW", + "max", + False, + False, + "EXPLICIT", + ) + + # pd_op.full_int_array: (2xi64) <- () + full_int_array_12 = [13, 13] + + # pd_op.pool2d: (1x384x34x34xf32) <- (1x384x34x34xf32, 2xi64) + pool2d_2 = paddle._C_ops.pool2d( + swish_61, + full_int_array_12, + [1, 1], + [6, 6], + False, + True, + "NCHW", + "max", + False, + False, + "EXPLICIT", + ) + + # builtin.combine: ([1x384x34x34xf32, 1x384x34x34xf32, 1x384x34x34xf32, 1x384x34x34xf32]) <- (1x384x34x34xf32, 1x384x34x34xf32, 1x384x34x34xf32, 1x384x34x34xf32) + combine_6 = [swish_61, pool2d_0, pool2d_1, pool2d_2] + + # pd_op.concat: (1x1536x34x34xf32) <- ([1x384x34x34xf32, 1x384x34x34xf32, 1x384x34x34xf32, 1x384x34x34xf32], 1xi32) + concat_5 = paddle._C_ops.concat(combine_6, full_0) + del combine_6 + + # pd_op.conv2d: (1x384x34x34xf32) <- (1x1536x34x34xf32, 384x1536x1x1xf32) + conv2d_85 = paddle._C_ops.conv2d( + concat_5, parameter_284, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_284 + + # pd_op.batch_norm_: (1x384x34x34xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (1x384x34x34xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__486, + batch_norm__487, + batch_norm__488, + batch_norm__489, + batch_norm__490, + batch_norm__491, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_85, + parameter_283, + parameter_282, + parameter_281, + parameter_280, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_280, parameter_281, parameter_282, parameter_283 + + # pd_op.swish: (1x384x34x34xf32) <- (1x384x34x34xf32) + swish_62 = paddle._C_ops.swish(batch_norm__486) + + # pd_op.conv2d: (1x384x34x34xf32) <- (1x384x34x34xf32, 384x384x3x3xf32) + conv2d_86 = paddle._C_ops.conv2d( + swish_62, parameter_279, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_279 + + # pd_op.batch_norm_: (1x384x34x34xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (1x384x34x34xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__492, + batch_norm__493, + batch_norm__494, + batch_norm__495, + batch_norm__496, + batch_norm__497, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_86, + parameter_278, + parameter_277, + parameter_276, + parameter_275, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_275, parameter_276, parameter_277, parameter_278 + + # pd_op.swish: (1x384x34x34xf32) <- (1x384x34x34xf32) + swish_63 = paddle._C_ops.swish(batch_norm__492) + + # pd_op.conv2d: (1x384x34x34xf32) <- (1x384x34x34xf32, 384x384x3x3xf32) + conv2d_87 = paddle._C_ops.conv2d( + swish_63, parameter_274, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_274 + + # pd_op.batch_norm_: (1x384x34x34xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (1x384x34x34xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__498, + batch_norm__499, + batch_norm__500, + batch_norm__501, + batch_norm__502, + batch_norm__503, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_87, + parameter_273, + parameter_272, + parameter_271, + parameter_270, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_270, parameter_271, parameter_272, parameter_273 + + # pd_op.conv2d: (1x384x34x34xf32) <- (1x384x34x34xf32, 384x384x1x1xf32) + conv2d_88 = paddle._C_ops.conv2d( + swish_63, parameter_269, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_269 + + # pd_op.batch_norm_: (1x384x34x34xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (1x384x34x34xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__504, + batch_norm__505, + batch_norm__506, + batch_norm__507, + batch_norm__508, + batch_norm__509, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_88, + parameter_268, + parameter_267, + parameter_266, + parameter_265, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_265, parameter_266, parameter_267, parameter_268 + + # pd_op.add: (1x384x34x34xf32) <- (1x384x34x34xf32, 1x384x34x34xf32) + add_78 = paddle._C_ops.add(batch_norm__498, batch_norm__504) + + # pd_op.swish: (1x384x34x34xf32) <- (1x384x34x34xf32) + swish_64 = paddle._C_ops.swish(add_78) + + # builtin.combine: ([1x384x34x34xf32, 1x384x34x34xf32]) <- (1x384x34x34xf32, 1x384x34x34xf32) + combine_7 = [swish_56, swish_64] + + # pd_op.concat: (1x768x34x34xf32) <- ([1x384x34x34xf32, 1x384x34x34xf32], 1xi32) + concat_6 = paddle._C_ops.concat(combine_7, full_0) + del combine_7 + + # pd_op.conv2d: (1x768x34x34xf32) <- (1x768x34x34xf32, 768x768x1x1xf32) + conv2d_89 = paddle._C_ops.conv2d( + concat_6, parameter_264, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_264 + + # pd_op.batch_norm_: (1x768x34x34xf32, 768xf32, 768xf32, 768xf32, 768xf32, -1xui8) <- (1x768x34x34xf32, 768xf32, 768xf32, 768xf32, 768xf32) + ( + batch_norm__510, + batch_norm__511, + batch_norm__512, + batch_norm__513, + batch_norm__514, + batch_norm__515, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_89, + parameter_263, + parameter_262, + parameter_261, + parameter_260, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_260, parameter_261, parameter_262, parameter_263 + + # pd_op.swish: (1x768x34x34xf32) <- (1x768x34x34xf32) + swish_65 = paddle._C_ops.swish(batch_norm__510) + + # pd_op.conv2d: (1x384x34x34xf32) <- (1x768x34x34xf32, 384x768x1x1xf32) + conv2d_90 = paddle._C_ops.conv2d( + swish_65, parameter_259, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_259 + + # pd_op.batch_norm_: (1x384x34x34xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (1x384x34x34xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__516, + batch_norm__517, + batch_norm__518, + batch_norm__519, + batch_norm__520, + batch_norm__521, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_90, + parameter_258, + parameter_257, + parameter_256, + parameter_255, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_255, parameter_256, parameter_257, parameter_258 + + # pd_op.swish: (1x384x34x34xf32) <- (1x384x34x34xf32) + swish_66 = paddle._C_ops.swish(batch_norm__516) + + # pd_op.nearest_interp: (1x384x68x68xf32) <- (1x384x34x34xf32, None, None, None) + nearest_interp_0 = paddle._C_ops.nearest_interp( + swish_66, + None, + None, + None, + "NCHW", + -1, + -1, + -1, + [float("2"), float("2")], + "nearest", + False, + 0, + ) + + # builtin.combine: ([1x384x68x68xf32, 1x512x68x68xf32]) <- (1x384x68x68xf32, 1x512x68x68xf32) + combine_8 = [nearest_interp_0, swish_45] + + # pd_op.concat: (1x896x68x68xf32) <- ([1x384x68x68xf32, 1x512x68x68xf32], 1xi32) + concat_7 = paddle._C_ops.concat(combine_8, full_0) + del combine_8 + + # pd_op.conv2d: (1x192x68x68xf32) <- (1x896x68x68xf32, 192x896x1x1xf32) + conv2d_91 = paddle._C_ops.conv2d( + concat_7, parameter_254, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_254 + + # pd_op.batch_norm_: (1x192x68x68xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (1x192x68x68xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__522, + batch_norm__523, + batch_norm__524, + batch_norm__525, + batch_norm__526, + batch_norm__527, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_91, + parameter_253, + parameter_252, + parameter_251, + parameter_250, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_250, parameter_251, parameter_252, parameter_253 + + # pd_op.swish: (1x192x68x68xf32) <- (1x192x68x68xf32) + swish_67 = paddle._C_ops.swish(batch_norm__522) + + # pd_op.conv2d: (1x192x68x68xf32) <- (1x896x68x68xf32, 192x896x1x1xf32) + conv2d_92 = paddle._C_ops.conv2d( + concat_7, parameter_249, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_249 + + # pd_op.batch_norm_: (1x192x68x68xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (1x192x68x68xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__528, + batch_norm__529, + batch_norm__530, + batch_norm__531, + batch_norm__532, + batch_norm__533, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_92, + parameter_248, + parameter_247, + parameter_246, + parameter_245, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_245, parameter_246, parameter_247, parameter_248 + + # pd_op.swish: (1x192x68x68xf32) <- (1x192x68x68xf32) + swish_68 = paddle._C_ops.swish(batch_norm__528) + + # pd_op.conv2d: (1x192x68x68xf32) <- (1x192x68x68xf32, 192x192x3x3xf32) + conv2d_93 = paddle._C_ops.conv2d( + swish_68, parameter_244, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_244 + + # pd_op.batch_norm_: (1x192x68x68xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (1x192x68x68xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__534, + batch_norm__535, + batch_norm__536, + batch_norm__537, + batch_norm__538, + batch_norm__539, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_93, + parameter_243, + parameter_242, + parameter_241, + parameter_240, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_240, parameter_241, parameter_242, parameter_243 + + # pd_op.swish: (1x192x68x68xf32) <- (1x192x68x68xf32) + swish_69 = paddle._C_ops.swish(batch_norm__534) + + # pd_op.conv2d: (1x192x68x68xf32) <- (1x192x68x68xf32, 192x192x3x3xf32) + conv2d_94 = paddle._C_ops.conv2d( + swish_69, parameter_239, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_239 + + # pd_op.batch_norm_: (1x192x68x68xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (1x192x68x68xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__540, + batch_norm__541, + batch_norm__542, + batch_norm__543, + batch_norm__544, + batch_norm__545, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_94, + parameter_238, + parameter_237, + parameter_236, + parameter_235, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_235, parameter_236, parameter_237, parameter_238 + + # pd_op.conv2d: (1x192x68x68xf32) <- (1x192x68x68xf32, 192x192x1x1xf32) + conv2d_95 = paddle._C_ops.conv2d( + swish_69, parameter_234, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_234 + + # pd_op.batch_norm_: (1x192x68x68xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (1x192x68x68xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__546, + batch_norm__547, + batch_norm__548, + batch_norm__549, + batch_norm__550, + batch_norm__551, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_95, + parameter_233, + parameter_232, + parameter_231, + parameter_230, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_230, parameter_231, parameter_232, parameter_233 + + # pd_op.add: (1x192x68x68xf32) <- (1x192x68x68xf32, 1x192x68x68xf32) + add_79 = paddle._C_ops.add(batch_norm__540, batch_norm__546) + + # pd_op.swish: (1x192x68x68xf32) <- (1x192x68x68xf32) + swish_70 = paddle._C_ops.swish(add_79) + + # pd_op.conv2d: (1x192x68x68xf32) <- (1x192x68x68xf32, 192x192x3x3xf32) + conv2d_96 = paddle._C_ops.conv2d( + swish_70, parameter_229, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_229 + + # pd_op.batch_norm_: (1x192x68x68xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (1x192x68x68xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__552, + batch_norm__553, + batch_norm__554, + batch_norm__555, + batch_norm__556, + batch_norm__557, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_96, + parameter_228, + parameter_227, + parameter_226, + parameter_225, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_225, parameter_226, parameter_227, parameter_228 + + # pd_op.swish: (1x192x68x68xf32) <- (1x192x68x68xf32) + swish_71 = paddle._C_ops.swish(batch_norm__552) + + # pd_op.conv2d: (1x192x68x68xf32) <- (1x192x68x68xf32, 192x192x3x3xf32) + conv2d_97 = paddle._C_ops.conv2d( + swish_71, parameter_224, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_224 + + # pd_op.batch_norm_: (1x192x68x68xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (1x192x68x68xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__558, + batch_norm__559, + batch_norm__560, + batch_norm__561, + batch_norm__562, + batch_norm__563, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_97, + parameter_223, + parameter_222, + parameter_221, + parameter_220, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_220, parameter_221, parameter_222, parameter_223 + + # pd_op.conv2d: (1x192x68x68xf32) <- (1x192x68x68xf32, 192x192x1x1xf32) + conv2d_98 = paddle._C_ops.conv2d( + swish_71, parameter_219, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_219 + + # pd_op.batch_norm_: (1x192x68x68xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (1x192x68x68xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__564, + batch_norm__565, + batch_norm__566, + batch_norm__567, + batch_norm__568, + batch_norm__569, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_98, + parameter_218, + parameter_217, + parameter_216, + parameter_215, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_215, parameter_216, parameter_217, parameter_218 + + # pd_op.add: (1x192x68x68xf32) <- (1x192x68x68xf32, 1x192x68x68xf32) + add_80 = paddle._C_ops.add(batch_norm__558, batch_norm__564) + + # pd_op.swish: (1x192x68x68xf32) <- (1x192x68x68xf32) + swish_72 = paddle._C_ops.swish(add_80) + + # pd_op.conv2d: (1x192x68x68xf32) <- (1x192x68x68xf32, 192x192x3x3xf32) + conv2d_99 = paddle._C_ops.conv2d( + swish_72, parameter_214, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_214 + + # pd_op.batch_norm_: (1x192x68x68xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (1x192x68x68xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__570, + batch_norm__571, + batch_norm__572, + batch_norm__573, + batch_norm__574, + batch_norm__575, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_99, + parameter_213, + parameter_212, + parameter_211, + parameter_210, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_210, parameter_211, parameter_212, parameter_213 + + # pd_op.swish: (1x192x68x68xf32) <- (1x192x68x68xf32) + swish_73 = paddle._C_ops.swish(batch_norm__570) + + # pd_op.conv2d: (1x192x68x68xf32) <- (1x192x68x68xf32, 192x192x3x3xf32) + conv2d_100 = paddle._C_ops.conv2d( + swish_73, parameter_209, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_209 + + # pd_op.batch_norm_: (1x192x68x68xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (1x192x68x68xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__576, + batch_norm__577, + batch_norm__578, + batch_norm__579, + batch_norm__580, + batch_norm__581, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_100, + parameter_208, + parameter_207, + parameter_206, + parameter_205, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_205, parameter_206, parameter_207, parameter_208 + + # pd_op.conv2d: (1x192x68x68xf32) <- (1x192x68x68xf32, 192x192x1x1xf32) + conv2d_101 = paddle._C_ops.conv2d( + swish_73, parameter_204, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_204 + + # pd_op.batch_norm_: (1x192x68x68xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (1x192x68x68xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__582, + batch_norm__583, + batch_norm__584, + batch_norm__585, + batch_norm__586, + batch_norm__587, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_101, + parameter_203, + parameter_202, + parameter_201, + parameter_200, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_200, parameter_201, parameter_202, parameter_203 + + # pd_op.add: (1x192x68x68xf32) <- (1x192x68x68xf32, 1x192x68x68xf32) + add_81 = paddle._C_ops.add(batch_norm__576, batch_norm__582) + + # pd_op.swish: (1x192x68x68xf32) <- (1x192x68x68xf32) + swish_74 = paddle._C_ops.swish(add_81) + + # builtin.combine: ([1x192x68x68xf32, 1x192x68x68xf32]) <- (1x192x68x68xf32, 1x192x68x68xf32) + combine_9 = [swish_67, swish_74] + + # pd_op.concat: (1x384x68x68xf32) <- ([1x192x68x68xf32, 1x192x68x68xf32], 1xi32) + concat_8 = paddle._C_ops.concat(combine_9, full_0) + del combine_9 + + # pd_op.conv2d: (1x384x68x68xf32) <- (1x384x68x68xf32, 384x384x1x1xf32) + conv2d_102 = paddle._C_ops.conv2d( + concat_8, parameter_199, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_199 + + # pd_op.batch_norm_: (1x384x68x68xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (1x384x68x68xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__588, + batch_norm__589, + batch_norm__590, + batch_norm__591, + batch_norm__592, + batch_norm__593, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_102, + parameter_198, + parameter_197, + parameter_196, + parameter_195, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_195, parameter_196, parameter_197, parameter_198 + + # pd_op.swish: (1x384x68x68xf32) <- (1x384x68x68xf32) + swish_75 = paddle._C_ops.swish(batch_norm__588) + + # pd_op.conv2d: (1x192x68x68xf32) <- (1x384x68x68xf32, 192x384x1x1xf32) + conv2d_103 = paddle._C_ops.conv2d( + swish_75, parameter_194, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_194 + + # pd_op.batch_norm_: (1x192x68x68xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (1x192x68x68xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__594, + batch_norm__595, + batch_norm__596, + batch_norm__597, + batch_norm__598, + batch_norm__599, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_103, + parameter_193, + parameter_192, + parameter_191, + parameter_190, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_190, parameter_191, parameter_192, parameter_193 + + # pd_op.swish: (1x192x68x68xf32) <- (1x192x68x68xf32) + swish_76 = paddle._C_ops.swish(batch_norm__594) + + # pd_op.nearest_interp: (1x192x136x136xf32) <- (1x192x68x68xf32, None, None, None) + nearest_interp_1 = paddle._C_ops.nearest_interp( + swish_76, + None, + None, + None, + "NCHW", + -1, + -1, + -1, + [float("2"), float("2")], + "nearest", + False, + 0, + ) + + # builtin.combine: ([1x192x136x136xf32, 1x256x136x136xf32]) <- (1x192x136x136xf32, 1x256x136x136xf32) + combine_10 = [nearest_interp_1, swish_29] + + # pd_op.concat: (1x448x136x136xf32) <- ([1x192x136x136xf32, 1x256x136x136xf32], 1xi32) + concat_9 = paddle._C_ops.concat(combine_10, full_0) + del combine_10 + + # pd_op.conv2d: (1x96x136x136xf32) <- (1x448x136x136xf32, 96x448x1x1xf32) + conv2d_104 = paddle._C_ops.conv2d( + concat_9, parameter_189, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_189 + + # pd_op.batch_norm_: (1x96x136x136xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (1x96x136x136xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__600, + batch_norm__601, + batch_norm__602, + batch_norm__603, + batch_norm__604, + batch_norm__605, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_104, + parameter_188, + parameter_187, + parameter_186, + parameter_185, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_185, parameter_186, parameter_187, parameter_188 + + # pd_op.swish: (1x96x136x136xf32) <- (1x96x136x136xf32) + swish_77 = paddle._C_ops.swish(batch_norm__600) + + # pd_op.conv2d: (1x96x136x136xf32) <- (1x448x136x136xf32, 96x448x1x1xf32) + conv2d_105 = paddle._C_ops.conv2d( + concat_9, parameter_184, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_184 + + # pd_op.batch_norm_: (1x96x136x136xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (1x96x136x136xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__606, + batch_norm__607, + batch_norm__608, + batch_norm__609, + batch_norm__610, + batch_norm__611, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_105, + parameter_183, + parameter_182, + parameter_181, + parameter_180, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_180, parameter_181, parameter_182, parameter_183 + + # pd_op.swish: (1x96x136x136xf32) <- (1x96x136x136xf32) + swish_78 = paddle._C_ops.swish(batch_norm__606) + + # pd_op.conv2d: (1x96x136x136xf32) <- (1x96x136x136xf32, 96x96x3x3xf32) + conv2d_106 = paddle._C_ops.conv2d( + swish_78, parameter_179, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_179 + + # pd_op.batch_norm_: (1x96x136x136xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (1x96x136x136xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__612, + batch_norm__613, + batch_norm__614, + batch_norm__615, + batch_norm__616, + batch_norm__617, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_106, + parameter_178, + parameter_177, + parameter_176, + parameter_175, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_175, parameter_176, parameter_177, parameter_178 + + # pd_op.swish: (1x96x136x136xf32) <- (1x96x136x136xf32) + swish_79 = paddle._C_ops.swish(batch_norm__612) + + # pd_op.conv2d: (1x96x136x136xf32) <- (1x96x136x136xf32, 96x96x3x3xf32) + conv2d_107 = paddle._C_ops.conv2d( + swish_79, parameter_174, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_174 + + # pd_op.batch_norm_: (1x96x136x136xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (1x96x136x136xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__618, + batch_norm__619, + batch_norm__620, + batch_norm__621, + batch_norm__622, + batch_norm__623, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_107, + parameter_173, + parameter_172, + parameter_171, + parameter_170, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_170, parameter_171, parameter_172, parameter_173 + + # pd_op.conv2d: (1x96x136x136xf32) <- (1x96x136x136xf32, 96x96x1x1xf32) + conv2d_108 = paddle._C_ops.conv2d( + swish_79, parameter_169, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_169 + + # pd_op.batch_norm_: (1x96x136x136xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (1x96x136x136xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__624, + batch_norm__625, + batch_norm__626, + batch_norm__627, + batch_norm__628, + batch_norm__629, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_108, + parameter_168, + parameter_167, + parameter_166, + parameter_165, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_165, parameter_166, parameter_167, parameter_168 + + # pd_op.add: (1x96x136x136xf32) <- (1x96x136x136xf32, 1x96x136x136xf32) + add_82 = paddle._C_ops.add(batch_norm__618, batch_norm__624) + + # pd_op.swish: (1x96x136x136xf32) <- (1x96x136x136xf32) + swish_80 = paddle._C_ops.swish(add_82) + + # pd_op.conv2d: (1x96x136x136xf32) <- (1x96x136x136xf32, 96x96x3x3xf32) + conv2d_109 = paddle._C_ops.conv2d( + swish_80, parameter_164, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_164 + + # pd_op.batch_norm_: (1x96x136x136xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (1x96x136x136xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__630, + batch_norm__631, + batch_norm__632, + batch_norm__633, + batch_norm__634, + batch_norm__635, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_109, + parameter_163, + parameter_162, + parameter_161, + parameter_160, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_160, parameter_161, parameter_162, parameter_163 + + # pd_op.swish: (1x96x136x136xf32) <- (1x96x136x136xf32) + swish_81 = paddle._C_ops.swish(batch_norm__630) + + # pd_op.conv2d: (1x96x136x136xf32) <- (1x96x136x136xf32, 96x96x3x3xf32) + conv2d_110 = paddle._C_ops.conv2d( + swish_81, parameter_159, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_159 + + # pd_op.batch_norm_: (1x96x136x136xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (1x96x136x136xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__636, + batch_norm__637, + batch_norm__638, + batch_norm__639, + batch_norm__640, + batch_norm__641, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_110, + parameter_158, + parameter_157, + parameter_156, + parameter_155, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_155, parameter_156, parameter_157, parameter_158 + + # pd_op.conv2d: (1x96x136x136xf32) <- (1x96x136x136xf32, 96x96x1x1xf32) + conv2d_111 = paddle._C_ops.conv2d( + swish_81, parameter_154, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_154 + + # pd_op.batch_norm_: (1x96x136x136xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (1x96x136x136xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__642, + batch_norm__643, + batch_norm__644, + batch_norm__645, + batch_norm__646, + batch_norm__647, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_111, + parameter_153, + parameter_152, + parameter_151, + parameter_150, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_150, parameter_151, parameter_152, parameter_153 + + # pd_op.add: (1x96x136x136xf32) <- (1x96x136x136xf32, 1x96x136x136xf32) + add_83 = paddle._C_ops.add(batch_norm__636, batch_norm__642) + + # pd_op.swish: (1x96x136x136xf32) <- (1x96x136x136xf32) + swish_82 = paddle._C_ops.swish(add_83) + + # pd_op.conv2d: (1x96x136x136xf32) <- (1x96x136x136xf32, 96x96x3x3xf32) + conv2d_112 = paddle._C_ops.conv2d( + swish_82, parameter_149, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_149 + + # pd_op.batch_norm_: (1x96x136x136xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (1x96x136x136xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__648, + batch_norm__649, + batch_norm__650, + batch_norm__651, + batch_norm__652, + batch_norm__653, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_112, + parameter_148, + parameter_147, + parameter_146, + parameter_145, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_145, parameter_146, parameter_147, parameter_148 + + # pd_op.swish: (1x96x136x136xf32) <- (1x96x136x136xf32) + swish_83 = paddle._C_ops.swish(batch_norm__648) + + # pd_op.conv2d: (1x96x136x136xf32) <- (1x96x136x136xf32, 96x96x3x3xf32) + conv2d_113 = paddle._C_ops.conv2d( + swish_83, parameter_144, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_144 + + # pd_op.batch_norm_: (1x96x136x136xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (1x96x136x136xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__654, + batch_norm__655, + batch_norm__656, + batch_norm__657, + batch_norm__658, + batch_norm__659, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_113, + parameter_143, + parameter_142, + parameter_141, + parameter_140, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_140, parameter_141, parameter_142, parameter_143 + + # pd_op.conv2d: (1x96x136x136xf32) <- (1x96x136x136xf32, 96x96x1x1xf32) + conv2d_114 = paddle._C_ops.conv2d( + swish_83, parameter_139, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_139 + + # pd_op.batch_norm_: (1x96x136x136xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (1x96x136x136xf32, 96xf32, 96xf32, 96xf32, 96xf32) + ( + batch_norm__660, + batch_norm__661, + batch_norm__662, + batch_norm__663, + batch_norm__664, + batch_norm__665, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_114, + parameter_138, + parameter_137, + parameter_136, + parameter_135, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_135, parameter_136, parameter_137, parameter_138 + + # pd_op.add: (1x96x136x136xf32) <- (1x96x136x136xf32, 1x96x136x136xf32) + add_84 = paddle._C_ops.add(batch_norm__654, batch_norm__660) + + # pd_op.swish: (1x96x136x136xf32) <- (1x96x136x136xf32) + swish_84 = paddle._C_ops.swish(add_84) + + # builtin.combine: ([1x96x136x136xf32, 1x96x136x136xf32]) <- (1x96x136x136xf32, 1x96x136x136xf32) + combine_11 = [swish_77, swish_84] + + # pd_op.concat: (1x192x136x136xf32) <- ([1x96x136x136xf32, 1x96x136x136xf32], 1xi32) + concat_10 = paddle._C_ops.concat(combine_11, full_0) + del combine_11 + + # pd_op.conv2d: (1x192x136x136xf32) <- (1x192x136x136xf32, 192x192x1x1xf32) + conv2d_115 = paddle._C_ops.conv2d( + concat_10, parameter_134, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_134 + + # pd_op.batch_norm_: (1x192x136x136xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (1x192x136x136xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__666, + batch_norm__667, + batch_norm__668, + batch_norm__669, + batch_norm__670, + batch_norm__671, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_115, + parameter_133, + parameter_132, + parameter_131, + parameter_130, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_130, parameter_131, parameter_132, parameter_133 + + # pd_op.swish: (1x192x136x136xf32) <- (1x192x136x136xf32) + swish_85 = paddle._C_ops.swish(batch_norm__666) + + # pd_op.conv2d: (1x192x68x68xf32) <- (1x192x136x136xf32, 192x192x3x3xf32) + conv2d_116 = paddle._C_ops.conv2d( + swish_85, parameter_129, [2, 2], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_129 + + # pd_op.batch_norm_: (1x192x68x68xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (1x192x68x68xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__672, + batch_norm__673, + batch_norm__674, + batch_norm__675, + batch_norm__676, + batch_norm__677, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_116, + parameter_128, + parameter_127, + parameter_126, + parameter_125, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_125, parameter_126, parameter_127, parameter_128 + + # pd_op.swish: (1x192x68x68xf32) <- (1x192x68x68xf32) + swish_86 = paddle._C_ops.swish(batch_norm__672) + + # builtin.combine: ([1x192x68x68xf32, 1x384x68x68xf32]) <- (1x192x68x68xf32, 1x384x68x68xf32) + combine_12 = [swish_86, swish_75] + + # pd_op.concat: (1x576x68x68xf32) <- ([1x192x68x68xf32, 1x384x68x68xf32], 1xi32) + concat_11 = paddle._C_ops.concat(combine_12, full_0) + del combine_12 + + # pd_op.conv2d: (1x192x68x68xf32) <- (1x576x68x68xf32, 192x576x1x1xf32) + conv2d_117 = paddle._C_ops.conv2d( + concat_11, parameter_124, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_124 + + # pd_op.batch_norm_: (1x192x68x68xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (1x192x68x68xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__678, + batch_norm__679, + batch_norm__680, + batch_norm__681, + batch_norm__682, + batch_norm__683, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_117, + parameter_123, + parameter_122, + parameter_121, + parameter_120, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_120, parameter_121, parameter_122, parameter_123 + + # pd_op.swish: (1x192x68x68xf32) <- (1x192x68x68xf32) + swish_87 = paddle._C_ops.swish(batch_norm__678) + + # pd_op.conv2d: (1x192x68x68xf32) <- (1x576x68x68xf32, 192x576x1x1xf32) + conv2d_118 = paddle._C_ops.conv2d( + concat_11, parameter_119, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_119 + + # pd_op.batch_norm_: (1x192x68x68xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (1x192x68x68xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__684, + batch_norm__685, + batch_norm__686, + batch_norm__687, + batch_norm__688, + batch_norm__689, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_118, + parameter_118, + parameter_117, + parameter_116, + parameter_115, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_115, parameter_116, parameter_117, parameter_118 + + # pd_op.swish: (1x192x68x68xf32) <- (1x192x68x68xf32) + swish_88 = paddle._C_ops.swish(batch_norm__684) + + # pd_op.conv2d: (1x192x68x68xf32) <- (1x192x68x68xf32, 192x192x3x3xf32) + conv2d_119 = paddle._C_ops.conv2d( + swish_88, parameter_114, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_114 + + # pd_op.batch_norm_: (1x192x68x68xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (1x192x68x68xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__690, + batch_norm__691, + batch_norm__692, + batch_norm__693, + batch_norm__694, + batch_norm__695, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_119, + parameter_113, + parameter_112, + parameter_111, + parameter_110, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_110, parameter_111, parameter_112, parameter_113 + + # pd_op.swish: (1x192x68x68xf32) <- (1x192x68x68xf32) + swish_89 = paddle._C_ops.swish(batch_norm__690) + + # pd_op.conv2d: (1x192x68x68xf32) <- (1x192x68x68xf32, 192x192x3x3xf32) + conv2d_120 = paddle._C_ops.conv2d( + swish_89, parameter_109, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_109 + + # pd_op.batch_norm_: (1x192x68x68xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (1x192x68x68xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__696, + batch_norm__697, + batch_norm__698, + batch_norm__699, + batch_norm__700, + batch_norm__701, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_120, + parameter_108, + parameter_107, + parameter_106, + parameter_105, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_105, parameter_106, parameter_107, parameter_108 + + # pd_op.conv2d: (1x192x68x68xf32) <- (1x192x68x68xf32, 192x192x1x1xf32) + conv2d_121 = paddle._C_ops.conv2d( + swish_89, parameter_104, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_104 + + # pd_op.batch_norm_: (1x192x68x68xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (1x192x68x68xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__702, + batch_norm__703, + batch_norm__704, + batch_norm__705, + batch_norm__706, + batch_norm__707, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_121, + parameter_103, + parameter_102, + parameter_101, + parameter_100, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_100, parameter_101, parameter_102, parameter_103 + + # pd_op.add: (1x192x68x68xf32) <- (1x192x68x68xf32, 1x192x68x68xf32) + add_85 = paddle._C_ops.add(batch_norm__696, batch_norm__702) + + # pd_op.swish: (1x192x68x68xf32) <- (1x192x68x68xf32) + swish_90 = paddle._C_ops.swish(add_85) + + # pd_op.conv2d: (1x192x68x68xf32) <- (1x192x68x68xf32, 192x192x3x3xf32) + conv2d_122 = paddle._C_ops.conv2d( + swish_90, parameter_99, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_99 + + # pd_op.batch_norm_: (1x192x68x68xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (1x192x68x68xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__708, + batch_norm__709, + batch_norm__710, + batch_norm__711, + batch_norm__712, + batch_norm__713, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_122, + parameter_98, + parameter_97, + parameter_96, + parameter_95, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_95, parameter_96, parameter_97, parameter_98 + + # pd_op.swish: (1x192x68x68xf32) <- (1x192x68x68xf32) + swish_91 = paddle._C_ops.swish(batch_norm__708) + + # pd_op.conv2d: (1x192x68x68xf32) <- (1x192x68x68xf32, 192x192x3x3xf32) + conv2d_123 = paddle._C_ops.conv2d( + swish_91, parameter_94, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_94 + + # pd_op.batch_norm_: (1x192x68x68xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (1x192x68x68xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__714, + batch_norm__715, + batch_norm__716, + batch_norm__717, + batch_norm__718, + batch_norm__719, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_123, + parameter_93, + parameter_92, + parameter_91, + parameter_90, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_90, parameter_91, parameter_92, parameter_93 + + # pd_op.conv2d: (1x192x68x68xf32) <- (1x192x68x68xf32, 192x192x1x1xf32) + conv2d_124 = paddle._C_ops.conv2d( + swish_91, parameter_89, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_89 + + # pd_op.batch_norm_: (1x192x68x68xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (1x192x68x68xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__720, + batch_norm__721, + batch_norm__722, + batch_norm__723, + batch_norm__724, + batch_norm__725, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_124, + parameter_88, + parameter_87, + parameter_86, + parameter_85, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_85, parameter_86, parameter_87, parameter_88 + + # pd_op.add: (1x192x68x68xf32) <- (1x192x68x68xf32, 1x192x68x68xf32) + add_86 = paddle._C_ops.add(batch_norm__714, batch_norm__720) + + # pd_op.swish: (1x192x68x68xf32) <- (1x192x68x68xf32) + swish_92 = paddle._C_ops.swish(add_86) + + # pd_op.conv2d: (1x192x68x68xf32) <- (1x192x68x68xf32, 192x192x3x3xf32) + conv2d_125 = paddle._C_ops.conv2d( + swish_92, parameter_84, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_84 + + # pd_op.batch_norm_: (1x192x68x68xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (1x192x68x68xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__726, + batch_norm__727, + batch_norm__728, + batch_norm__729, + batch_norm__730, + batch_norm__731, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_125, + parameter_83, + parameter_82, + parameter_81, + parameter_80, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_80, parameter_81, parameter_82, parameter_83 + + # pd_op.swish: (1x192x68x68xf32) <- (1x192x68x68xf32) + swish_93 = paddle._C_ops.swish(batch_norm__726) + + # pd_op.conv2d: (1x192x68x68xf32) <- (1x192x68x68xf32, 192x192x3x3xf32) + conv2d_126 = paddle._C_ops.conv2d( + swish_93, parameter_79, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_79 + + # pd_op.batch_norm_: (1x192x68x68xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (1x192x68x68xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__732, + batch_norm__733, + batch_norm__734, + batch_norm__735, + batch_norm__736, + batch_norm__737, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_126, + parameter_78, + parameter_77, + parameter_76, + parameter_75, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_75, parameter_76, parameter_77, parameter_78 + + # pd_op.conv2d: (1x192x68x68xf32) <- (1x192x68x68xf32, 192x192x1x1xf32) + conv2d_127 = paddle._C_ops.conv2d( + swish_93, parameter_74, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_74 + + # pd_op.batch_norm_: (1x192x68x68xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (1x192x68x68xf32, 192xf32, 192xf32, 192xf32, 192xf32) + ( + batch_norm__738, + batch_norm__739, + batch_norm__740, + batch_norm__741, + batch_norm__742, + batch_norm__743, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_127, + parameter_73, + parameter_72, + parameter_71, + parameter_70, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_70, parameter_71, parameter_72, parameter_73 + + # pd_op.add: (1x192x68x68xf32) <- (1x192x68x68xf32, 1x192x68x68xf32) + add_87 = paddle._C_ops.add(batch_norm__732, batch_norm__738) + + # pd_op.swish: (1x192x68x68xf32) <- (1x192x68x68xf32) + swish_94 = paddle._C_ops.swish(add_87) + + # builtin.combine: ([1x192x68x68xf32, 1x192x68x68xf32]) <- (1x192x68x68xf32, 1x192x68x68xf32) + combine_13 = [swish_87, swish_94] + + # pd_op.concat: (1x384x68x68xf32) <- ([1x192x68x68xf32, 1x192x68x68xf32], 1xi32) + concat_12 = paddle._C_ops.concat(combine_13, full_0) + del combine_13 + + # pd_op.conv2d: (1x384x68x68xf32) <- (1x384x68x68xf32, 384x384x1x1xf32) + conv2d_128 = paddle._C_ops.conv2d( + concat_12, parameter_69, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_69 + + # pd_op.batch_norm_: (1x384x68x68xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (1x384x68x68xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__744, + batch_norm__745, + batch_norm__746, + batch_norm__747, + batch_norm__748, + batch_norm__749, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_128, + parameter_68, + parameter_67, + parameter_66, + parameter_65, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_65, parameter_66, parameter_67, parameter_68 + + # pd_op.swish: (1x384x68x68xf32) <- (1x384x68x68xf32) + swish_95 = paddle._C_ops.swish(batch_norm__744) + + # pd_op.conv2d: (1x384x34x34xf32) <- (1x384x68x68xf32, 384x384x3x3xf32) + conv2d_129 = paddle._C_ops.conv2d( + swish_95, parameter_64, [2, 2], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_64 + + # pd_op.batch_norm_: (1x384x34x34xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (1x384x34x34xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__750, + batch_norm__751, + batch_norm__752, + batch_norm__753, + batch_norm__754, + batch_norm__755, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_129, + parameter_63, + parameter_62, + parameter_61, + parameter_60, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_60, parameter_61, parameter_62, parameter_63 + + # pd_op.swish: (1x384x34x34xf32) <- (1x384x34x34xf32) + swish_96 = paddle._C_ops.swish(batch_norm__750) + + # builtin.combine: ([1x384x34x34xf32, 1x768x34x34xf32]) <- (1x384x34x34xf32, 1x768x34x34xf32) + combine_14 = [swish_96, swish_65] + + # pd_op.concat: (1x1152x34x34xf32) <- ([1x384x34x34xf32, 1x768x34x34xf32], 1xi32) + concat_13 = paddle._C_ops.concat(combine_14, full_0) + del combine_14 + + # pd_op.conv2d: (1x384x34x34xf32) <- (1x1152x34x34xf32, 384x1152x1x1xf32) + conv2d_130 = paddle._C_ops.conv2d( + concat_13, parameter_59, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_59 + + # pd_op.batch_norm_: (1x384x34x34xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (1x384x34x34xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__756, + batch_norm__757, + batch_norm__758, + batch_norm__759, + batch_norm__760, + batch_norm__761, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_130, + parameter_58, + parameter_57, + parameter_56, + parameter_55, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_55, parameter_56, parameter_57, parameter_58 + + # pd_op.swish: (1x384x34x34xf32) <- (1x384x34x34xf32) + swish_97 = paddle._C_ops.swish(batch_norm__756) + + # pd_op.conv2d: (1x384x34x34xf32) <- (1x1152x34x34xf32, 384x1152x1x1xf32) + conv2d_131 = paddle._C_ops.conv2d( + concat_13, parameter_54, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_54 + + # pd_op.batch_norm_: (1x384x34x34xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (1x384x34x34xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__762, + batch_norm__763, + batch_norm__764, + batch_norm__765, + batch_norm__766, + batch_norm__767, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_131, + parameter_53, + parameter_52, + parameter_51, + parameter_50, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_50, parameter_51, parameter_52, parameter_53 + + # pd_op.swish: (1x384x34x34xf32) <- (1x384x34x34xf32) + swish_98 = paddle._C_ops.swish(batch_norm__762) + + # pd_op.conv2d: (1x384x34x34xf32) <- (1x384x34x34xf32, 384x384x3x3xf32) + conv2d_132 = paddle._C_ops.conv2d( + swish_98, parameter_49, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_49 + + # pd_op.batch_norm_: (1x384x34x34xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (1x384x34x34xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__768, + batch_norm__769, + batch_norm__770, + batch_norm__771, + batch_norm__772, + batch_norm__773, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_132, + parameter_48, + parameter_47, + parameter_46, + parameter_45, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_45, parameter_46, parameter_47, parameter_48 + + # pd_op.swish: (1x384x34x34xf32) <- (1x384x34x34xf32) + swish_99 = paddle._C_ops.swish(batch_norm__768) + + # pd_op.conv2d: (1x384x34x34xf32) <- (1x384x34x34xf32, 384x384x3x3xf32) + conv2d_133 = paddle._C_ops.conv2d( + swish_99, parameter_44, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_44 + + # pd_op.batch_norm_: (1x384x34x34xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (1x384x34x34xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__774, + batch_norm__775, + batch_norm__776, + batch_norm__777, + batch_norm__778, + batch_norm__779, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_133, + parameter_43, + parameter_42, + parameter_41, + parameter_40, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_40, parameter_41, parameter_42, parameter_43 + + # pd_op.conv2d: (1x384x34x34xf32) <- (1x384x34x34xf32, 384x384x1x1xf32) + conv2d_134 = paddle._C_ops.conv2d( + swish_99, parameter_39, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_39 + + # pd_op.batch_norm_: (1x384x34x34xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (1x384x34x34xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__780, + batch_norm__781, + batch_norm__782, + batch_norm__783, + batch_norm__784, + batch_norm__785, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_134, + parameter_38, + parameter_37, + parameter_36, + parameter_35, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_35, parameter_36, parameter_37, parameter_38 + + # pd_op.add: (1x384x34x34xf32) <- (1x384x34x34xf32, 1x384x34x34xf32) + add_88 = paddle._C_ops.add(batch_norm__774, batch_norm__780) + + # pd_op.swish: (1x384x34x34xf32) <- (1x384x34x34xf32) + swish_100 = paddle._C_ops.swish(add_88) + + # pd_op.conv2d: (1x384x34x34xf32) <- (1x384x34x34xf32, 384x384x3x3xf32) + conv2d_135 = paddle._C_ops.conv2d( + swish_100, parameter_34, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_34 + + # pd_op.batch_norm_: (1x384x34x34xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (1x384x34x34xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__786, + batch_norm__787, + batch_norm__788, + batch_norm__789, + batch_norm__790, + batch_norm__791, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_135, + parameter_33, + parameter_32, + parameter_31, + parameter_30, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_30, parameter_31, parameter_32, parameter_33 + + # pd_op.swish: (1x384x34x34xf32) <- (1x384x34x34xf32) + swish_101 = paddle._C_ops.swish(batch_norm__786) + + # pd_op.conv2d: (1x384x34x34xf32) <- (1x384x34x34xf32, 384x384x3x3xf32) + conv2d_136 = paddle._C_ops.conv2d( + swish_101, parameter_29, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_29 + + # pd_op.batch_norm_: (1x384x34x34xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (1x384x34x34xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__792, + batch_norm__793, + batch_norm__794, + batch_norm__795, + batch_norm__796, + batch_norm__797, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_136, + parameter_28, + parameter_27, + parameter_26, + parameter_25, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_25, parameter_26, parameter_27, parameter_28 + + # pd_op.conv2d: (1x384x34x34xf32) <- (1x384x34x34xf32, 384x384x1x1xf32) + conv2d_137 = paddle._C_ops.conv2d( + swish_101, parameter_24, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_24 + + # pd_op.batch_norm_: (1x384x34x34xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (1x384x34x34xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__798, + batch_norm__799, + batch_norm__800, + batch_norm__801, + batch_norm__802, + batch_norm__803, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_137, + parameter_23, + parameter_22, + parameter_21, + parameter_20, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_20, parameter_21, parameter_22, parameter_23 + + # pd_op.add: (1x384x34x34xf32) <- (1x384x34x34xf32, 1x384x34x34xf32) + add_89 = paddle._C_ops.add(batch_norm__792, batch_norm__798) + + # pd_op.swish: (1x384x34x34xf32) <- (1x384x34x34xf32) + swish_102 = paddle._C_ops.swish(add_89) + + # pd_op.conv2d: (1x384x34x34xf32) <- (1x384x34x34xf32, 384x384x3x3xf32) + conv2d_138 = paddle._C_ops.conv2d( + swish_102, parameter_19, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_19 + + # pd_op.batch_norm_: (1x384x34x34xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (1x384x34x34xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__804, + batch_norm__805, + batch_norm__806, + batch_norm__807, + batch_norm__808, + batch_norm__809, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_138, + parameter_18, + parameter_17, + parameter_16, + parameter_15, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_15, parameter_16, parameter_17, parameter_18 + + # pd_op.swish: (1x384x34x34xf32) <- (1x384x34x34xf32) + swish_103 = paddle._C_ops.swish(batch_norm__804) + + # pd_op.conv2d: (1x384x34x34xf32) <- (1x384x34x34xf32, 384x384x3x3xf32) + conv2d_139 = paddle._C_ops.conv2d( + swish_103, parameter_14, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_14 + + # pd_op.batch_norm_: (1x384x34x34xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (1x384x34x34xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__810, + batch_norm__811, + batch_norm__812, + batch_norm__813, + batch_norm__814, + batch_norm__815, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_139, + parameter_13, + parameter_12, + parameter_11, + parameter_10, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_10, parameter_11, parameter_12, parameter_13 + + # pd_op.conv2d: (1x384x34x34xf32) <- (1x384x34x34xf32, 384x384x1x1xf32) + conv2d_140 = paddle._C_ops.conv2d( + swish_103, parameter_9, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_9 + + # pd_op.batch_norm_: (1x384x34x34xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (1x384x34x34xf32, 384xf32, 384xf32, 384xf32, 384xf32) + ( + batch_norm__816, + batch_norm__817, + batch_norm__818, + batch_norm__819, + batch_norm__820, + batch_norm__821, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_140, + parameter_8, + parameter_7, + parameter_6, + parameter_5, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_5, parameter_6, parameter_7, parameter_8 + + # pd_op.add: (1x384x34x34xf32) <- (1x384x34x34xf32, 1x384x34x34xf32) + add_90 = paddle._C_ops.add(batch_norm__810, batch_norm__816) + + # pd_op.swish: (1x384x34x34xf32) <- (1x384x34x34xf32) + swish_104 = paddle._C_ops.swish(add_90) + + # builtin.combine: ([1x384x34x34xf32, 1x384x34x34xf32]) <- (1x384x34x34xf32, 1x384x34x34xf32) + combine_15 = [swish_97, swish_104] + + # pd_op.concat: (1x768x34x34xf32) <- ([1x384x34x34xf32, 1x384x34x34xf32], 1xi32) + concat_14 = paddle._C_ops.concat(combine_15, full_0) + del combine_15 + + # pd_op.conv2d: (1x768x34x34xf32) <- (1x768x34x34xf32, 768x768x1x1xf32) + conv2d_141 = paddle._C_ops.conv2d( + concat_14, parameter_4, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_4 + + # pd_op.batch_norm_: (1x768x34x34xf32, 768xf32, 768xf32, 768xf32, 768xf32, -1xui8) <- (1x768x34x34xf32, 768xf32, 768xf32, 768xf32, 768xf32) + ( + batch_norm__822, + batch_norm__823, + batch_norm__824, + batch_norm__825, + batch_norm__826, + batch_norm__827, + ) = (lambda x, f: f(x))( + paddle._C_ops.batch_norm( + conv2d_141, + parameter_3, + parameter_2, + parameter_1, + parameter_0, + False, + float("0.9"), + float("1e-05"), + "NCHW", + False, + False, + ), + lambda out: out + if isinstance(out, (list, tuple)) + else (out, None, None, None, None, None), + ) + del parameter_0, parameter_1, parameter_2, parameter_3 + + # pd_op.swish: (1x768x34x34xf32) <- (1x768x34x34xf32) + swish_0 = paddle._C_ops.swish(batch_norm__822) + del ( + add_0, + add_1, + add_10, + add_11, + add_12, + add_13, + add_14, + add_15, + add_16, + add_17, + add_18, + add_2, + add_20, + add_21, + add_22, + add_23, + add_24, + add_25, + add_26, + add_27, + add_28, + add_29, + add_3, + add_30, + add_31, + add_33, + add_34, + add_35, + add_36, + add_37, + add_38, + add_4, + add_40, + add_41, + add_42, + add_43, + add_45, + add_46, + add_48, + add_49, + add_5, + add_50, + add_51, + add_52, + add_54, + add_55, + add_57, + add_58, + add_59, + add_60, + add_61, + add_63, + add_64, + add_66, + add_67, + add_68, + add_69, + add_7, + add_70, + add_72, + add_73, + add_75, + add_76, + add_77, + add_78, + add_79, + add_8, + add_80, + add_81, + add_82, + add_83, + add_84, + add_85, + add_86, + add_87, + add_88, + add_89, + add_9, + add_90, + assign_0, + assign_1, + assign_10, + assign_11, + assign_12, + assign_13, + assign_14, + assign_15, + assign_16, + assign_17, + assign_18, + assign_19, + assign_2, + assign_20, + assign_21, + assign_22, + assign_23, + assign_24, + assign_25, + assign_26, + assign_27, + assign_28, + assign_29, + assign_3, + assign_30, + assign_31, + assign_32, + assign_33, + assign_34, + assign_35, + assign_36, + assign_37, + assign_38, + assign_39, + assign_4, + assign_40, + assign_41, + assign_42, + assign_43, + assign_44, + assign_45, + assign_46, + assign_47, + assign_48, + assign_49, + assign_5, + assign_50, + assign_51, + assign_52, + assign_53, + assign_54, + assign_55, + assign_56, + assign_57, + assign_58, + assign_59, + assign_6, + assign_60, + assign_61, + assign_62, + assign_63, + assign_64, + assign_65, + assign_66, + assign_67, + assign_68, + assign_69, + assign_7, + assign_70, + assign_71, + assign_72, + assign_73, + assign_74, + assign_75, + assign_76, + assign_77, + assign_78, + assign_8, + assign_9, + batch_norm__0, + batch_norm__1, + batch_norm__10, + batch_norm__100, + batch_norm__101, + batch_norm__102, + batch_norm__103, + batch_norm__104, + batch_norm__105, + batch_norm__106, + batch_norm__107, + batch_norm__108, + batch_norm__109, + batch_norm__11, + batch_norm__110, + batch_norm__111, + batch_norm__112, + batch_norm__113, + batch_norm__114, + batch_norm__115, + batch_norm__116, + batch_norm__117, + batch_norm__118, + batch_norm__119, + batch_norm__12, + batch_norm__120, + batch_norm__121, + batch_norm__122, + batch_norm__123, + batch_norm__124, + batch_norm__125, + batch_norm__126, + batch_norm__127, + batch_norm__128, + batch_norm__129, + batch_norm__13, + batch_norm__130, + batch_norm__131, + batch_norm__132, + batch_norm__133, + batch_norm__134, + batch_norm__135, + batch_norm__136, + batch_norm__137, + batch_norm__138, + batch_norm__139, + batch_norm__14, + batch_norm__140, + batch_norm__141, + batch_norm__142, + batch_norm__143, + batch_norm__144, + batch_norm__145, + batch_norm__146, + batch_norm__147, + batch_norm__148, + batch_norm__149, + batch_norm__15, + batch_norm__150, + batch_norm__151, + batch_norm__152, + batch_norm__153, + batch_norm__154, + batch_norm__155, + batch_norm__156, + batch_norm__157, + batch_norm__158, + batch_norm__159, + batch_norm__16, + batch_norm__160, + batch_norm__161, + batch_norm__162, + batch_norm__163, + batch_norm__164, + batch_norm__165, + batch_norm__166, + batch_norm__167, + batch_norm__168, + batch_norm__169, + batch_norm__17, + batch_norm__170, + batch_norm__171, + batch_norm__172, + batch_norm__173, + batch_norm__174, + batch_norm__175, + batch_norm__176, + batch_norm__177, + batch_norm__178, + batch_norm__179, + batch_norm__18, + batch_norm__180, + batch_norm__181, + batch_norm__182, + batch_norm__183, + batch_norm__184, + batch_norm__185, + batch_norm__186, + batch_norm__187, + batch_norm__188, + batch_norm__189, + batch_norm__19, + batch_norm__190, + batch_norm__191, + batch_norm__192, + batch_norm__193, + batch_norm__194, + batch_norm__195, + batch_norm__196, + batch_norm__197, + batch_norm__198, + batch_norm__199, + batch_norm__2, + batch_norm__20, + batch_norm__200, + batch_norm__201, + batch_norm__202, + batch_norm__203, + batch_norm__204, + batch_norm__205, + batch_norm__206, + batch_norm__207, + batch_norm__208, + batch_norm__209, + batch_norm__21, + batch_norm__210, + batch_norm__211, + batch_norm__212, + batch_norm__213, + batch_norm__214, + batch_norm__215, + batch_norm__216, + batch_norm__217, + batch_norm__218, + batch_norm__219, + batch_norm__22, + batch_norm__220, + batch_norm__221, + batch_norm__222, + batch_norm__223, + batch_norm__224, + batch_norm__225, + batch_norm__226, + batch_norm__227, + batch_norm__228, + batch_norm__229, + batch_norm__23, + batch_norm__230, + batch_norm__231, + batch_norm__232, + batch_norm__233, + batch_norm__234, + batch_norm__235, + batch_norm__236, + batch_norm__237, + batch_norm__238, + batch_norm__239, + batch_norm__24, + batch_norm__240, + batch_norm__241, + batch_norm__242, + batch_norm__243, + batch_norm__244, + batch_norm__245, + batch_norm__246, + batch_norm__247, + batch_norm__248, + batch_norm__249, + batch_norm__25, + batch_norm__250, + batch_norm__251, + batch_norm__252, + batch_norm__253, + batch_norm__254, + batch_norm__255, + batch_norm__256, + batch_norm__257, + batch_norm__258, + batch_norm__259, + batch_norm__26, + batch_norm__260, + batch_norm__261, + batch_norm__262, + batch_norm__263, + batch_norm__264, + batch_norm__265, + batch_norm__266, + batch_norm__267, + batch_norm__268, + batch_norm__269, + batch_norm__27, + batch_norm__270, + batch_norm__271, + batch_norm__272, + batch_norm__273, + batch_norm__274, + batch_norm__275, + batch_norm__276, + batch_norm__277, + batch_norm__278, + batch_norm__279, + batch_norm__28, + batch_norm__280, + batch_norm__281, + batch_norm__282, + batch_norm__283, + batch_norm__284, + batch_norm__285, + batch_norm__286, + batch_norm__287, + batch_norm__288, + batch_norm__289, + batch_norm__29, + batch_norm__290, + batch_norm__291, + batch_norm__292, + batch_norm__293, + batch_norm__294, + batch_norm__295, + batch_norm__296, + batch_norm__297, + batch_norm__298, + batch_norm__299, + batch_norm__3, + batch_norm__30, + batch_norm__300, + batch_norm__301, + batch_norm__302, + batch_norm__303, + batch_norm__304, + batch_norm__305, + batch_norm__306, + batch_norm__307, + batch_norm__308, + batch_norm__309, + batch_norm__31, + batch_norm__310, + batch_norm__311, + batch_norm__312, + batch_norm__313, + batch_norm__314, + batch_norm__315, + batch_norm__316, + batch_norm__317, + batch_norm__318, + batch_norm__319, + batch_norm__32, + batch_norm__320, + batch_norm__321, + batch_norm__322, + batch_norm__323, + batch_norm__324, + batch_norm__325, + batch_norm__326, + batch_norm__327, + batch_norm__328, + batch_norm__329, + batch_norm__33, + batch_norm__330, + batch_norm__331, + batch_norm__332, + batch_norm__333, + batch_norm__334, + batch_norm__335, + batch_norm__336, + batch_norm__337, + batch_norm__338, + batch_norm__339, + batch_norm__34, + batch_norm__340, + batch_norm__341, + batch_norm__342, + batch_norm__343, + batch_norm__344, + batch_norm__345, + batch_norm__346, + batch_norm__347, + batch_norm__348, + batch_norm__349, + batch_norm__35, + batch_norm__350, + batch_norm__351, + batch_norm__352, + batch_norm__353, + batch_norm__354, + batch_norm__355, + batch_norm__356, + batch_norm__357, + batch_norm__358, + batch_norm__359, + batch_norm__36, + batch_norm__360, + batch_norm__361, + batch_norm__362, + batch_norm__363, + batch_norm__364, + batch_norm__365, + batch_norm__366, + batch_norm__367, + batch_norm__368, + batch_norm__369, + batch_norm__37, + batch_norm__370, + batch_norm__371, + batch_norm__372, + batch_norm__373, + batch_norm__374, + batch_norm__375, + batch_norm__376, + batch_norm__377, + batch_norm__378, + batch_norm__379, + batch_norm__38, + batch_norm__380, + batch_norm__381, + batch_norm__382, + batch_norm__383, + batch_norm__384, + batch_norm__385, + batch_norm__386, + batch_norm__387, + batch_norm__388, + batch_norm__389, + batch_norm__39, + batch_norm__390, + batch_norm__391, + batch_norm__392, + batch_norm__393, + batch_norm__394, + batch_norm__395, + batch_norm__396, + batch_norm__397, + batch_norm__398, + batch_norm__399, + batch_norm__4, + batch_norm__40, + batch_norm__400, + batch_norm__401, + batch_norm__402, + batch_norm__403, + batch_norm__404, + batch_norm__405, + batch_norm__406, + batch_norm__407, + batch_norm__408, + batch_norm__409, + batch_norm__41, + batch_norm__410, + batch_norm__411, + batch_norm__412, + batch_norm__413, + batch_norm__414, + batch_norm__415, + batch_norm__416, + batch_norm__417, + batch_norm__418, + batch_norm__419, + batch_norm__42, + batch_norm__420, + batch_norm__421, + batch_norm__422, + batch_norm__423, + batch_norm__424, + batch_norm__425, + batch_norm__426, + batch_norm__427, + batch_norm__428, + batch_norm__429, + batch_norm__43, + batch_norm__430, + batch_norm__431, + batch_norm__432, + batch_norm__433, + batch_norm__434, + batch_norm__435, + batch_norm__436, + batch_norm__437, + batch_norm__438, + batch_norm__439, + batch_norm__44, + batch_norm__440, + batch_norm__441, + batch_norm__442, + batch_norm__443, + batch_norm__444, + batch_norm__445, + batch_norm__446, + batch_norm__447, + batch_norm__448, + batch_norm__449, + batch_norm__45, + batch_norm__450, + batch_norm__451, + batch_norm__452, + batch_norm__453, + batch_norm__454, + batch_norm__455, + batch_norm__456, + batch_norm__457, + batch_norm__458, + batch_norm__459, + batch_norm__46, + batch_norm__460, + batch_norm__461, + batch_norm__462, + batch_norm__463, + batch_norm__464, + batch_norm__465, + batch_norm__466, + batch_norm__467, + batch_norm__468, + batch_norm__469, + batch_norm__47, + batch_norm__470, + batch_norm__471, + batch_norm__472, + batch_norm__473, + batch_norm__474, + batch_norm__475, + batch_norm__476, + batch_norm__477, + batch_norm__478, + batch_norm__479, + batch_norm__48, + batch_norm__480, + batch_norm__481, + batch_norm__482, + batch_norm__483, + batch_norm__484, + batch_norm__485, + batch_norm__486, + batch_norm__487, + batch_norm__488, + batch_norm__489, + batch_norm__49, + batch_norm__490, + batch_norm__491, + batch_norm__492, + batch_norm__493, + batch_norm__494, + batch_norm__495, + batch_norm__496, + batch_norm__497, + batch_norm__498, + batch_norm__499, + batch_norm__5, + batch_norm__50, + batch_norm__500, + batch_norm__501, + batch_norm__502, + batch_norm__503, + batch_norm__504, + batch_norm__505, + batch_norm__506, + batch_norm__507, + batch_norm__508, + batch_norm__509, + batch_norm__51, + batch_norm__510, + batch_norm__511, + batch_norm__512, + batch_norm__513, + batch_norm__514, + batch_norm__515, + batch_norm__516, + batch_norm__517, + batch_norm__518, + batch_norm__519, + batch_norm__52, + batch_norm__520, + batch_norm__521, + batch_norm__522, + batch_norm__523, + batch_norm__524, + batch_norm__525, + batch_norm__526, + batch_norm__527, + batch_norm__528, + batch_norm__529, + batch_norm__53, + batch_norm__530, + batch_norm__531, + batch_norm__532, + batch_norm__533, + batch_norm__534, + batch_norm__535, + batch_norm__536, + batch_norm__537, + batch_norm__538, + batch_norm__539, + batch_norm__54, + batch_norm__540, + batch_norm__541, + batch_norm__542, + batch_norm__543, + batch_norm__544, + batch_norm__545, + batch_norm__546, + batch_norm__547, + batch_norm__548, + batch_norm__549, + batch_norm__55, + batch_norm__550, + batch_norm__551, + batch_norm__552, + batch_norm__553, + batch_norm__554, + batch_norm__555, + batch_norm__556, + batch_norm__557, + batch_norm__558, + batch_norm__559, + batch_norm__56, + batch_norm__560, + batch_norm__561, + batch_norm__562, + batch_norm__563, + batch_norm__564, + batch_norm__565, + batch_norm__566, + batch_norm__567, + batch_norm__568, + batch_norm__569, + batch_norm__57, + batch_norm__570, + batch_norm__571, + batch_norm__572, + batch_norm__573, + batch_norm__574, + batch_norm__575, + batch_norm__576, + batch_norm__577, + batch_norm__578, + batch_norm__579, + batch_norm__58, + batch_norm__580, + batch_norm__581, + batch_norm__582, + batch_norm__583, + batch_norm__584, + batch_norm__585, + batch_norm__586, + batch_norm__587, + batch_norm__588, + batch_norm__589, + batch_norm__59, + batch_norm__590, + batch_norm__591, + batch_norm__592, + batch_norm__593, + batch_norm__594, + batch_norm__595, + batch_norm__596, + batch_norm__597, + batch_norm__598, + batch_norm__599, + batch_norm__6, + batch_norm__60, + batch_norm__600, + batch_norm__601, + batch_norm__602, + batch_norm__603, + batch_norm__604, + batch_norm__605, + batch_norm__606, + batch_norm__607, + batch_norm__608, + batch_norm__609, + batch_norm__61, + batch_norm__610, + batch_norm__611, + batch_norm__612, + batch_norm__613, + batch_norm__614, + batch_norm__615, + batch_norm__616, + batch_norm__617, + batch_norm__618, + batch_norm__619, + batch_norm__62, + batch_norm__620, + batch_norm__621, + batch_norm__622, + batch_norm__623, + batch_norm__624, + batch_norm__625, + batch_norm__626, + batch_norm__627, + batch_norm__628, + batch_norm__629, + batch_norm__63, + batch_norm__630, + batch_norm__631, + batch_norm__632, + batch_norm__633, + batch_norm__634, + batch_norm__635, + batch_norm__636, + batch_norm__637, + batch_norm__638, + batch_norm__639, + batch_norm__64, + batch_norm__640, + batch_norm__641, + batch_norm__642, + batch_norm__643, + batch_norm__644, + batch_norm__645, + batch_norm__646, + batch_norm__647, + batch_norm__648, + batch_norm__649, + batch_norm__65, + batch_norm__650, + batch_norm__651, + batch_norm__652, + batch_norm__653, + batch_norm__654, + batch_norm__655, + batch_norm__656, + batch_norm__657, + batch_norm__658, + batch_norm__659, + batch_norm__66, + batch_norm__660, + batch_norm__661, + batch_norm__662, + batch_norm__663, + batch_norm__664, + batch_norm__665, + batch_norm__666, + batch_norm__667, + batch_norm__668, + batch_norm__669, + batch_norm__67, + batch_norm__670, + batch_norm__671, + batch_norm__672, + batch_norm__673, + batch_norm__674, + batch_norm__675, + batch_norm__676, + batch_norm__677, + batch_norm__678, + batch_norm__679, + batch_norm__68, + batch_norm__680, + batch_norm__681, + batch_norm__682, + batch_norm__683, + batch_norm__684, + batch_norm__685, + batch_norm__686, + batch_norm__687, + batch_norm__688, + batch_norm__689, + batch_norm__69, + batch_norm__690, + batch_norm__691, + batch_norm__692, + batch_norm__693, + batch_norm__694, + batch_norm__695, + batch_norm__696, + batch_norm__697, + batch_norm__698, + batch_norm__699, + batch_norm__7, + batch_norm__70, + batch_norm__700, + batch_norm__701, + batch_norm__702, + batch_norm__703, + batch_norm__704, + batch_norm__705, + batch_norm__706, + batch_norm__707, + batch_norm__708, + batch_norm__709, + batch_norm__71, + batch_norm__710, + batch_norm__711, + batch_norm__712, + batch_norm__713, + batch_norm__714, + batch_norm__715, + batch_norm__716, + batch_norm__717, + batch_norm__718, + batch_norm__719, + batch_norm__72, + batch_norm__720, + batch_norm__721, + batch_norm__722, + batch_norm__723, + batch_norm__724, + batch_norm__725, + batch_norm__726, + batch_norm__727, + batch_norm__728, + batch_norm__729, + batch_norm__73, + batch_norm__730, + batch_norm__731, + batch_norm__732, + batch_norm__733, + batch_norm__734, + batch_norm__735, + batch_norm__736, + batch_norm__737, + batch_norm__738, + batch_norm__739, + batch_norm__74, + batch_norm__740, + batch_norm__741, + batch_norm__742, + batch_norm__743, + batch_norm__744, + batch_norm__745, + batch_norm__746, + batch_norm__747, + batch_norm__748, + batch_norm__749, + batch_norm__75, + batch_norm__750, + batch_norm__751, + batch_norm__752, + batch_norm__753, + batch_norm__754, + batch_norm__755, + batch_norm__756, + batch_norm__757, + batch_norm__758, + batch_norm__759, + batch_norm__76, + batch_norm__760, + batch_norm__761, + batch_norm__762, + batch_norm__763, + batch_norm__764, + batch_norm__765, + batch_norm__766, + batch_norm__767, + batch_norm__768, + batch_norm__769, + batch_norm__77, + batch_norm__770, + batch_norm__771, + batch_norm__772, + batch_norm__773, + batch_norm__774, + batch_norm__775, + batch_norm__776, + batch_norm__777, + batch_norm__778, + batch_norm__779, + batch_norm__78, + batch_norm__780, + batch_norm__781, + batch_norm__782, + batch_norm__783, + batch_norm__784, + batch_norm__785, + batch_norm__786, + batch_norm__787, + batch_norm__788, + batch_norm__789, + batch_norm__79, + batch_norm__790, + batch_norm__791, + batch_norm__792, + batch_norm__793, + batch_norm__794, + batch_norm__795, + batch_norm__796, + batch_norm__797, + batch_norm__798, + batch_norm__799, + batch_norm__8, + batch_norm__80, + batch_norm__800, + batch_norm__801, + batch_norm__802, + batch_norm__803, + batch_norm__804, + batch_norm__805, + batch_norm__806, + batch_norm__807, + batch_norm__808, + batch_norm__809, + batch_norm__81, + batch_norm__810, + batch_norm__811, + batch_norm__812, + batch_norm__813, + batch_norm__814, + batch_norm__815, + batch_norm__816, + batch_norm__817, + batch_norm__818, + batch_norm__819, + batch_norm__82, + batch_norm__820, + batch_norm__821, + batch_norm__822, + batch_norm__823, + batch_norm__824, + batch_norm__825, + batch_norm__826, + batch_norm__827, + batch_norm__83, + batch_norm__84, + batch_norm__85, + batch_norm__86, + batch_norm__87, + batch_norm__88, + batch_norm__89, + batch_norm__9, + batch_norm__90, + batch_norm__91, + batch_norm__92, + batch_norm__93, + batch_norm__94, + batch_norm__95, + batch_norm__96, + batch_norm__97, + batch_norm__98, + batch_norm__99, + concat_0, + concat_1, + concat_10, + concat_11, + concat_12, + concat_13, + concat_14, + concat_2, + concat_3, + concat_5, + concat_6, + concat_7, + concat_8, + concat_9, + conv2d_0, + conv2d_1, + conv2d_10, + conv2d_100, + conv2d_101, + conv2d_102, + conv2d_103, + conv2d_104, + conv2d_105, + conv2d_106, + conv2d_107, + conv2d_108, + conv2d_109, + conv2d_11, + conv2d_110, + conv2d_111, + conv2d_112, + conv2d_113, + conv2d_114, + conv2d_115, + conv2d_116, + conv2d_117, + conv2d_118, + conv2d_119, + conv2d_12, + conv2d_120, + conv2d_121, + conv2d_122, + conv2d_123, + conv2d_124, + conv2d_125, + conv2d_126, + conv2d_127, + conv2d_128, + conv2d_129, + conv2d_13, + conv2d_130, + conv2d_131, + conv2d_132, + conv2d_133, + conv2d_134, + conv2d_135, + conv2d_136, + conv2d_137, + conv2d_138, + conv2d_139, + conv2d_14, + conv2d_140, + conv2d_141, + conv2d_15, + conv2d_16, + conv2d_17, + conv2d_18, + conv2d_19, + conv2d_2, + conv2d_20, + conv2d_21, + conv2d_22, + conv2d_23, + conv2d_24, + conv2d_25, + conv2d_26, + conv2d_27, + conv2d_28, + conv2d_29, + conv2d_3, + conv2d_30, + conv2d_31, + conv2d_32, + conv2d_33, + conv2d_34, + conv2d_35, + conv2d_36, + conv2d_37, + conv2d_38, + conv2d_39, + conv2d_4, + conv2d_40, + conv2d_41, + conv2d_42, + conv2d_43, + conv2d_44, + conv2d_45, + conv2d_46, + conv2d_47, + conv2d_48, + conv2d_49, + conv2d_5, + conv2d_50, + conv2d_51, + conv2d_52, + conv2d_53, + conv2d_54, + conv2d_55, + conv2d_56, + conv2d_57, + conv2d_58, + conv2d_59, + conv2d_6, + conv2d_60, + conv2d_61, + conv2d_62, + conv2d_63, + conv2d_64, + conv2d_65, + conv2d_66, + conv2d_67, + conv2d_68, + conv2d_69, + conv2d_7, + conv2d_70, + conv2d_71, + conv2d_72, + conv2d_73, + conv2d_74, + conv2d_75, + conv2d_76, + conv2d_77, + conv2d_78, + conv2d_79, + conv2d_8, + conv2d_80, + conv2d_81, + conv2d_82, + conv2d_83, + conv2d_84, + conv2d_85, + conv2d_86, + conv2d_87, + conv2d_88, + conv2d_89, + conv2d_9, + conv2d_90, + conv2d_91, + conv2d_92, + conv2d_93, + conv2d_94, + conv2d_95, + conv2d_96, + conv2d_97, + conv2d_98, + conv2d_99, + dropout_0, + dropout_1, + dropout_10, + dropout_11, + dropout_12, + dropout_13, + dropout_14, + dropout_15, + dropout_16, + dropout_17, + dropout_18, + dropout_19, + dropout_2, + dropout_20, + dropout_21, + dropout_22, + dropout_23, + dropout_24, + dropout_25, + dropout_26, + dropout_27, + dropout_28, + dropout_29, + dropout_3, + dropout_30, + dropout_31, + dropout_4, + dropout_5, + dropout_6, + dropout_7, + dropout_8, + dropout_9, + full_0, + full_8, + full_9, + full_int_array_0, + full_int_array_10, + full_int_array_11, + full_int_array_12, + full_int_array_4, + full_int_array_6, + full_int_array_7, + hardsigmoid_0, + hardsigmoid_1, + hardsigmoid_2, + hardsigmoid_3, + layer_norm_0, + layer_norm_1, + layer_norm_10, + layer_norm_11, + layer_norm_12, + layer_norm_13, + layer_norm_14, + layer_norm_15, + layer_norm_16, + layer_norm_17, + layer_norm_18, + layer_norm_19, + layer_norm_2, + layer_norm_20, + layer_norm_22, + layer_norm_23, + layer_norm_3, + layer_norm_4, + layer_norm_5, + layer_norm_6, + layer_norm_7, + layer_norm_8, + layer_norm_9, + matmul_10, + matmul_11, + matmul_12, + matmul_15, + matmul_16, + matmul_17, + matmul_18, + matmul_19, + matmul_2, + matmul_20, + matmul_23, + matmul_24, + matmul_25, + matmul_26, + matmul_27, + matmul_28, + matmul_3, + matmul_31, + matmul_32, + matmul_33, + matmul_4, + matmul_7, + matmul_8, + matmul_9, + mean_0, + mean_1, + mean_2, + mean_3, + multiply_0, + multiply_1, + multiply_10, + multiply_11, + multiply_12, + multiply_13, + multiply_14, + multiply_15, + multiply_16, + multiply_17, + multiply_18, + multiply_19, + multiply_2, + multiply_20, + multiply_21, + multiply_3, + multiply_4, + multiply_5, + multiply_6, + multiply_7, + multiply_8, + multiply_9, + nearest_interp_0, + nearest_interp_1, + pool2d_0, + pool2d_1, + pool2d_2, + reshape_0, + reshape_1, + reshape_11, + reshape_15, + reshape_19, + reshape_2, + reshape_20, + reshape_3, + reshape_7, + slice_0, + slice_1, + slice_10, + slice_11, + slice_12, + slice_13, + slice_14, + slice_15, + slice_16, + slice_17, + slice_18, + slice_19, + slice_2, + slice_20, + slice_21, + slice_22, + slice_23, + slice_3, + slice_4, + slice_5, + slice_6, + slice_7, + slice_8, + slice_9, + softmax_0, + softmax_1, + softmax_2, + softmax_3, + swish_1, + swish_10, + swish_100, + swish_101, + swish_102, + swish_103, + swish_104, + swish_11, + swish_12, + swish_13, + swish_14, + swish_15, + swish_16, + swish_17, + swish_18, + swish_19, + swish_2, + swish_20, + swish_21, + swish_22, + swish_23, + swish_24, + swish_25, + swish_26, + swish_27, + swish_28, + swish_29, + swish_3, + swish_30, + swish_31, + swish_32, + swish_33, + swish_34, + swish_35, + swish_36, + swish_37, + swish_38, + swish_39, + swish_4, + swish_40, + swish_41, + swish_42, + swish_43, + swish_44, + swish_45, + swish_46, + swish_47, + swish_48, + swish_49, + swish_5, + swish_50, + swish_51, + swish_52, + swish_53, + swish_54, + swish_55, + swish_56, + swish_57, + swish_58, + swish_59, + swish_6, + swish_60, + swish_61, + swish_62, + swish_63, + swish_64, + swish_65, + swish_66, + swish_67, + swish_68, + swish_69, + swish_7, + swish_70, + swish_71, + swish_72, + swish_73, + swish_74, + swish_75, + swish_76, + swish_77, + swish_78, + swish_79, + swish_8, + swish_80, + swish_81, + swish_82, + swish_83, + swish_84, + swish_85, + swish_86, + swish_87, + swish_88, + swish_89, + swish_9, + swish_90, + swish_91, + swish_92, + swish_93, + swish_94, + swish_95, + swish_96, + swish_97, + swish_98, + swish_99, + transpose_0, + transpose_1, + transpose_10, + transpose_11, + transpose_12, + transpose_13, + transpose_14, + transpose_15, + transpose_16, + transpose_17, + transpose_2, + transpose_3, + transpose_4, + transpose_5, + transpose_6, + transpose_7, + transpose_8, + transpose_9, + unsqueeze_3, + ) - return reshape_0, multiply_0 + return swish_0 diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_6/weight_meta.py b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_6/weight_meta.py index 8b1378917..7bb3e9a3b 100644 --- a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_6/weight_meta.py +++ b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_6/weight_meta.py @@ -1 +1,8004 @@ +class Program_weight_tensor_parameter_0: + name = "parameter_0" + shape = [768] + dtype = "float32" + min_val = float("-0.175875") + max_val = float("0.210823") + mean = float("0.0834695") + std = float("0.0566098") + data = None + +class Program_weight_tensor_parameter_1: + name = "parameter_1" + shape = [768] + dtype = "float32" + min_val = float("0.939895") + max_val = float("1.29826") + mean = float("1.06397") + std = float("0.0312259") + data = None + + +class Program_weight_tensor_parameter_2: + name = "parameter_2" + shape = [768] + dtype = "float32" + min_val = float("0.00117681") + max_val = float("0.0493074") + mean = float("0.00765869") + std = float("0.0046166") + data = None + + +class Program_weight_tensor_parameter_3: + name = "parameter_3" + shape = [768] + dtype = "float32" + min_val = float("-0.13338") + max_val = float("0.0572232") + mean = float("-0.0286564") + std = float("0.0288427") + data = None + + +class Program_weight_tensor_parameter_4: + name = "parameter_4" + shape = [768, 768, 1, 1] + dtype = "float32" + min_val = float("-0.0548133") + max_val = float("0.0388089") + mean = float("-0.000154658") + std = float("0.00249634") + data = None + + +class Program_weight_tensor_parameter_5: + name = "parameter_5" + shape = [384] + dtype = "float32" + min_val = float("-0.14169") + max_val = float("0.0305817") + mean = float("-0.0188052") + std = float("0.0234504") + data = None + + +class Program_weight_tensor_parameter_6: + name = "parameter_6" + shape = [384] + dtype = "float32" + min_val = float("0.945748") + max_val = float("1.04442") + mean = float("0.98666") + std = float("0.0105852") + data = None + + +class Program_weight_tensor_parameter_7: + name = "parameter_7" + shape = [384] + dtype = "float32" + min_val = float("0.000824752") + max_val = float("0.0183885") + mean = float("0.00496541") + std = float("0.0033442") + data = None + + +class Program_weight_tensor_parameter_8: + name = "parameter_8" + shape = [384] + dtype = "float32" + min_val = float("-0.0551455") + max_val = float("0.0628843") + mean = float("0.00283053") + std = float("0.0222857") + data = None + + +class Program_weight_tensor_parameter_9: + name = "parameter_9" + shape = [384, 384, 1, 1] + dtype = "float32" + min_val = float("-0.0299324") + max_val = float("0.020664") + mean = float("2.29272e-05") + std = float("0.00192338") + data = None + + +class Program_weight_tensor_parameter_10: + name = "parameter_10" + shape = [384] + dtype = "float32" + min_val = float("-0.14169") + max_val = float("0.0305817") + mean = float("-0.0188052") + std = float("0.0234504") + data = None + + +class Program_weight_tensor_parameter_11: + name = "parameter_11" + shape = [384] + dtype = "float32" + min_val = float("0.968039") + max_val = float("1.13059") + mean = float("1.01544") + std = float("0.0171846") + data = None + + +class Program_weight_tensor_parameter_12: + name = "parameter_12" + shape = [384] + dtype = "float32" + min_val = float("0.00204949") + max_val = float("0.0543081") + mean = float("0.00785702") + std = float("0.00475222") + data = None + + +class Program_weight_tensor_parameter_13: + name = "parameter_13" + shape = [384] + dtype = "float32" + min_val = float("-0.203105") + max_val = float("0.152445") + mean = float("-0.0430598") + std = float("0.036306") + data = None + + +class Program_weight_tensor_parameter_14: + name = "parameter_14" + shape = [384, 384, 3, 3] + dtype = "float32" + min_val = float("-0.029908") + max_val = float("0.035511") + mean = float("-7.29867e-05") + std = float("0.00131195") + data = None + + +class Program_weight_tensor_parameter_15: + name = "parameter_15" + shape = [384] + dtype = "float32" + min_val = float("-0.170219") + max_val = float("0.0209993") + mean = float("-0.0348873") + std = float("0.0279313") + data = None + + +class Program_weight_tensor_parameter_16: + name = "parameter_16" + shape = [384] + dtype = "float32" + min_val = float("0.975222") + max_val = float("1.12587") + mean = float("1.015") + std = float("0.0240805") + data = None + + +class Program_weight_tensor_parameter_17: + name = "parameter_17" + shape = [384] + dtype = "float32" + min_val = float("0.00543045") + max_val = float("0.191688") + mean = float("0.021541") + std = float("0.0159869") + data = None + + +class Program_weight_tensor_parameter_18: + name = "parameter_18" + shape = [384] + dtype = "float32" + min_val = float("-0.266668") + max_val = float("0.4144") + mean = float("-0.0370778") + std = float("0.0509328") + data = None + + +class Program_weight_tensor_parameter_19: + name = "parameter_19" + shape = [384, 384, 3, 3] + dtype = "float32" + min_val = float("-0.0331339") + max_val = float("0.0530855") + mean = float("-6.31513e-05") + std = float("0.00148047") + data = None + + +class Program_weight_tensor_parameter_20: + name = "parameter_20" + shape = [384] + dtype = "float32" + min_val = float("-0.105219") + max_val = float("0.0129843") + mean = float("-0.0358029") + std = float("0.0193236") + data = None + + +class Program_weight_tensor_parameter_21: + name = "parameter_21" + shape = [384] + dtype = "float32" + min_val = float("0.945357") + max_val = float("1.04501") + mean = float("0.988631") + std = float("0.00984229") + data = None + + +class Program_weight_tensor_parameter_22: + name = "parameter_22" + shape = [384] + dtype = "float32" + min_val = float("0.000719695") + max_val = float("0.0175524") + mean = float("0.00510978") + std = float("0.00307552") + data = None + + +class Program_weight_tensor_parameter_23: + name = "parameter_23" + shape = [384] + dtype = "float32" + min_val = float("-0.0846979") + max_val = float("0.0435371") + mean = float("-0.00255296") + std = float("0.0171291") + data = None + + +class Program_weight_tensor_parameter_24: + name = "parameter_24" + shape = [384, 384, 1, 1] + dtype = "float32" + min_val = float("-0.0267958") + max_val = float("0.025491") + mean = float("-5.37291e-05") + std = float("0.00203271") + data = None + + +class Program_weight_tensor_parameter_25: + name = "parameter_25" + shape = [384] + dtype = "float32" + min_val = float("-0.105219") + max_val = float("0.0129843") + mean = float("-0.0358029") + std = float("0.0193236") + data = None + + +class Program_weight_tensor_parameter_26: + name = "parameter_26" + shape = [384] + dtype = "float32" + min_val = float("0.959852") + max_val = float("1.10509") + mean = float("1.01609") + std = float("0.0177564") + data = None + + +class Program_weight_tensor_parameter_27: + name = "parameter_27" + shape = [384] + dtype = "float32" + min_val = float("0.0024927") + max_val = float("0.0541337") + mean = float("0.00984546") + std = float("0.0050589") + data = None + + +class Program_weight_tensor_parameter_28: + name = "parameter_28" + shape = [384] + dtype = "float32" + min_val = float("-0.215525") + max_val = float("0.32115") + mean = float("-0.0500705") + std = float("0.0449912") + data = None + + +class Program_weight_tensor_parameter_29: + name = "parameter_29" + shape = [384, 384, 3, 3] + dtype = "float32" + min_val = float("-0.0363929") + max_val = float("0.0514823") + mean = float("-8.41934e-05") + std = float("0.00132563") + data = None + + +class Program_weight_tensor_parameter_30: + name = "parameter_30" + shape = [384] + dtype = "float32" + min_val = float("-0.0896627") + max_val = float("0.0192839") + mean = float("-0.0360783") + std = float("0.0194692") + data = None + + +class Program_weight_tensor_parameter_31: + name = "parameter_31" + shape = [384] + dtype = "float32" + min_val = float("0.933291") + max_val = float("1.11466") + mean = float("1.01167") + std = float("0.026589") + data = None + + +class Program_weight_tensor_parameter_32: + name = "parameter_32" + shape = [384] + dtype = "float32" + min_val = float("0.00555425") + max_val = float("0.0674583") + mean = float("0.0182706") + std = float("0.00926606") + data = None + + +class Program_weight_tensor_parameter_33: + name = "parameter_33" + shape = [384] + dtype = "float32" + min_val = float("-0.23163") + max_val = float("0.124775") + mean = float("-0.0274763") + std = float("0.0558182") + data = None + + +class Program_weight_tensor_parameter_34: + name = "parameter_34" + shape = [384, 384, 3, 3] + dtype = "float32" + min_val = float("-0.0397048") + max_val = float("0.0499731") + mean = float("-5.43157e-05") + std = float("0.00151173") + data = None + + +class Program_weight_tensor_parameter_35: + name = "parameter_35" + shape = [384] + dtype = "float32" + min_val = float("-0.116341") + max_val = float("0.0161185") + mean = float("-0.0373639") + std = float("0.0201507") + data = None + + +class Program_weight_tensor_parameter_36: + name = "parameter_36" + shape = [384] + dtype = "float32" + min_val = float("0.929383") + max_val = float("1.02791") + mean = float("0.98704") + std = float("0.0110296") + data = None + + +class Program_weight_tensor_parameter_37: + name = "parameter_37" + shape = [384] + dtype = "float32" + min_val = float("0.00126812") + max_val = float("0.0117075") + mean = float("0.00454736") + std = float("0.00186229") + data = None + + +class Program_weight_tensor_parameter_38: + name = "parameter_38" + shape = [384] + dtype = "float32" + min_val = float("-0.0553816") + max_val = float("0.0350888") + mean = float("-0.00843188") + std = float("0.0133692") + data = None + + +class Program_weight_tensor_parameter_39: + name = "parameter_39" + shape = [384, 384, 1, 1] + dtype = "float32" + min_val = float("-0.0386337") + max_val = float("0.0282119") + mean = float("-0.000152706") + std = float("0.00204597") + data = None + + +class Program_weight_tensor_parameter_40: + name = "parameter_40" + shape = [384] + dtype = "float32" + min_val = float("-0.116341") + max_val = float("0.0161185") + mean = float("-0.0373639") + std = float("0.0201507") + data = None + + +class Program_weight_tensor_parameter_41: + name = "parameter_41" + shape = [384] + dtype = "float32" + min_val = float("0.981354") + max_val = float("1.10683") + mean = float("1.01834") + std = float("0.0222205") + data = None + + +class Program_weight_tensor_parameter_42: + name = "parameter_42" + shape = [384] + dtype = "float32" + min_val = float("0.00525993") + max_val = float("0.0393454") + mean = float("0.0117862") + std = float("0.00495311") + data = None + + +class Program_weight_tensor_parameter_43: + name = "parameter_43" + shape = [384] + dtype = "float32" + min_val = float("-0.189916") + max_val = float("0.089502") + mean = float("-0.0267907") + std = float("0.0350026") + data = None + + +class Program_weight_tensor_parameter_44: + name = "parameter_44" + shape = [384, 384, 3, 3] + dtype = "float32" + min_val = float("-0.0360859") + max_val = float("0.0633791") + mean = float("-4.66788e-05") + std = float("0.00138059") + data = None + + +class Program_weight_tensor_parameter_45: + name = "parameter_45" + shape = [384] + dtype = "float32" + min_val = float("-0.107113") + max_val = float("0.0239382") + mean = float("-0.0375215") + std = float("0.0214567") + data = None + + +class Program_weight_tensor_parameter_46: + name = "parameter_46" + shape = [384] + dtype = "float32" + min_val = float("0.944795") + max_val = float("1.11465") + mean = float("1.01186") + std = float("0.0277861") + data = None + + +class Program_weight_tensor_parameter_47: + name = "parameter_47" + shape = [384] + dtype = "float32" + min_val = float("0.0055232") + max_val = float("0.0636068") + mean = float("0.0154489") + std = float("0.00777637") + data = None + + +class Program_weight_tensor_parameter_48: + name = "parameter_48" + shape = [384] + dtype = "float32" + min_val = float("-0.154281") + max_val = float("0.124624") + mean = float("-0.04841") + std = float("0.0507409") + data = None + + +class Program_weight_tensor_parameter_49: + name = "parameter_49" + shape = [384, 384, 3, 3] + dtype = "float32" + min_val = float("-0.0279282") + max_val = float("0.0439271") + mean = float("-7.87759e-05") + std = float("0.00153817") + data = None + + +class Program_weight_tensor_parameter_50: + name = "parameter_50" + shape = [384] + dtype = "float32" + min_val = float("-0.10674") + max_val = float("0.046738") + mean = float("-0.026306") + std = float("0.0154157") + data = None + + +class Program_weight_tensor_parameter_51: + name = "parameter_51" + shape = [384] + dtype = "float32" + min_val = float("0.973756") + max_val = float("1.08653") + mean = float("1.00903") + std = float("0.0171142") + data = None + + +class Program_weight_tensor_parameter_52: + name = "parameter_52" + shape = [384] + dtype = "float32" + min_val = float("0.00240361") + max_val = float("0.0172008") + mean = float("0.00539159") + std = float("0.0019353") + data = None + + +class Program_weight_tensor_parameter_53: + name = "parameter_53" + shape = [384] + dtype = "float32" + min_val = float("-0.100425") + max_val = float("0.0867517") + mean = float("-0.0196731") + std = float("0.0269084") + data = None + + +class Program_weight_tensor_parameter_54: + name = "parameter_54" + shape = [384, 1152, 1, 1] + dtype = "float32" + min_val = float("-0.0619005") + max_val = float("0.0744808") + mean = float("-8.91799e-05") + std = float("0.00230778") + data = None + + +class Program_weight_tensor_parameter_55: + name = "parameter_55" + shape = [384] + dtype = "float32" + min_val = float("-0.0424904") + max_val = float("0.0160654") + mean = float("-0.00899509") + std = float("0.00840798") + data = None + + +class Program_weight_tensor_parameter_56: + name = "parameter_56" + shape = [384] + dtype = "float32" + min_val = float("0.959519") + max_val = float("1.05137") + mean = float("1.00788") + std = float("0.0115961") + data = None + + +class Program_weight_tensor_parameter_57: + name = "parameter_57" + shape = [384] + dtype = "float32" + min_val = float("0.00126033") + max_val = float("0.03023") + mean = float("0.00451665") + std = float("0.00218518") + data = None + + +class Program_weight_tensor_parameter_58: + name = "parameter_58" + shape = [384] + dtype = "float32" + min_val = float("-0.110881") + max_val = float("0.0925399") + mean = float("-0.0236355") + std = float("0.0234762") + data = None + + +class Program_weight_tensor_parameter_59: + name = "parameter_59" + shape = [384, 1152, 1, 1] + dtype = "float32" + min_val = float("-0.0245473") + max_val = float("0.0425909") + mean = float("-0.000112646") + std = float("0.00208633") + data = None + + +class Program_weight_tensor_parameter_60: + name = "parameter_60" + shape = [384] + dtype = "float32" + min_val = float("-0.0529748") + max_val = float("0.0059538") + mean = float("-0.0166275") + std = float("0.00987957") + data = None + + +class Program_weight_tensor_parameter_61: + name = "parameter_61" + shape = [384] + dtype = "float32" + min_val = float("0.988678") + max_val = float("1.10388") + mean = float("1.01957") + std = float("0.0168754") + data = None + + +class Program_weight_tensor_parameter_62: + name = "parameter_62" + shape = [384] + dtype = "float32" + min_val = float("0.00462734") + max_val = float("0.0682865") + mean = float("0.0144132") + std = float("0.00829381") + data = None + + +class Program_weight_tensor_parameter_63: + name = "parameter_63" + shape = [384] + dtype = "float32" + min_val = float("-0.443847") + max_val = float("0.193358") + mean = float("-0.047384") + std = float("0.0711895") + data = None + + +class Program_weight_tensor_parameter_64: + name = "parameter_64" + shape = [384, 384, 3, 3] + dtype = "float32" + min_val = float("-0.0212973") + max_val = float("0.0335283") + mean = float("-3.20307e-05") + std = float("0.00117985") + data = None + + +class Program_weight_tensor_parameter_65: + name = "parameter_65" + shape = [384] + dtype = "float32" + min_val = float("-0.222314") + max_val = float("0.492622") + mean = float("0.217344") + std = float("0.124262") + data = None + + +class Program_weight_tensor_parameter_66: + name = "parameter_66" + shape = [384] + dtype = "float32" + min_val = float("0.919258") + max_val = float("1.4834") + mean = float("1.14101") + std = float("0.0738465") + data = None + + +class Program_weight_tensor_parameter_67: + name = "parameter_67" + shape = [384] + dtype = "float32" + min_val = float("0.00377408") + max_val = float("0.0757958") + mean = float("0.0118179") + std = float("0.00580441") + data = None + + +class Program_weight_tensor_parameter_68: + name = "parameter_68" + shape = [384] + dtype = "float32" + min_val = float("-0.129308") + max_val = float("0.0593208") + mean = float("-0.0372124") + std = float("0.0301462") + data = None + + +class Program_weight_tensor_parameter_69: + name = "parameter_69" + shape = [384, 384, 1, 1] + dtype = "float32" + min_val = float("-0.0788092") + max_val = float("0.0718439") + mean = float("-0.000420206") + std = float("0.00505348") + data = None + + +class Program_weight_tensor_parameter_70: + name = "parameter_70" + shape = [192] + dtype = "float32" + min_val = float("-0.165903") + max_val = float("0.0468638") + mean = float("-0.0248091") + std = float("0.0394948") + data = None + + +class Program_weight_tensor_parameter_71: + name = "parameter_71" + shape = [192] + dtype = "float32" + min_val = float("0.841187") + max_val = float("1.05089") + mean = float("0.972721") + std = float("0.0237726") + data = None + + +class Program_weight_tensor_parameter_72: + name = "parameter_72" + shape = [192] + dtype = "float32" + min_val = float("0.00135964") + max_val = float("0.0214683") + mean = float("0.00615868") + std = float("0.00390446") + data = None + + +class Program_weight_tensor_parameter_73: + name = "parameter_73" + shape = [192] + dtype = "float32" + min_val = float("-0.0635033") + max_val = float("0.0921305") + mean = float("-0.00577622") + std = float("0.0200987") + data = None + + +class Program_weight_tensor_parameter_74: + name = "parameter_74" + shape = [192, 192, 1, 1] + dtype = "float32" + min_val = float("-0.0496361") + max_val = float("0.0295852") + mean = float("-0.000179209") + std = float("0.00381061") + data = None + + +class Program_weight_tensor_parameter_75: + name = "parameter_75" + shape = [192] + dtype = "float32" + min_val = float("-0.165903") + max_val = float("0.0468638") + mean = float("-0.0248091") + std = float("0.0394948") + data = None + + +class Program_weight_tensor_parameter_76: + name = "parameter_76" + shape = [192] + dtype = "float32" + min_val = float("0.72984") + max_val = float("1.12263") + mean = float("1.02218") + std = float("0.0372419") + data = None + + +class Program_weight_tensor_parameter_77: + name = "parameter_77" + shape = [192] + dtype = "float32" + min_val = float("0.00534163") + max_val = float("0.0562738") + mean = float("0.0136988") + std = float("0.0062901") + data = None + + +class Program_weight_tensor_parameter_78: + name = "parameter_78" + shape = [192] + dtype = "float32" + min_val = float("-0.219364") + max_val = float("0.10138") + mean = float("-0.0376619") + std = float("0.0434336") + data = None + + +class Program_weight_tensor_parameter_79: + name = "parameter_79" + shape = [192, 192, 3, 3] + dtype = "float32" + min_val = float("-0.0430374") + max_val = float("0.049516") + mean = float("-0.000124454") + std = float("0.00256786") + data = None + + +class Program_weight_tensor_parameter_80: + name = "parameter_80" + shape = [192] + dtype = "float32" + min_val = float("-0.191344") + max_val = float("0.0444996") + mean = float("-0.057942") + std = float("0.0491062") + data = None + + +class Program_weight_tensor_parameter_81: + name = "parameter_81" + shape = [192] + dtype = "float32" + min_val = float("0.897737") + max_val = float("1.18792") + mean = float("1.01539") + std = float("0.0484046") + data = None + + +class Program_weight_tensor_parameter_82: + name = "parameter_82" + shape = [192] + dtype = "float32" + min_val = float("0.0105665") + max_val = float("0.202383") + mean = float("0.0352004") + std = float("0.0227021") + data = None + + +class Program_weight_tensor_parameter_83: + name = "parameter_83" + shape = [192] + dtype = "float32" + min_val = float("-0.295941") + max_val = float("0.513277") + mean = float("-0.0405879") + std = float("0.0633076") + data = None + + +class Program_weight_tensor_parameter_84: + name = "parameter_84" + shape = [192, 192, 3, 3] + dtype = "float32" + min_val = float("-0.047378") + max_val = float("0.0557186") + mean = float("-0.000110452") + std = float("0.00285571") + data = None + + +class Program_weight_tensor_parameter_85: + name = "parameter_85" + shape = [192] + dtype = "float32" + min_val = float("-0.191632") + max_val = float("0.00854012") + mean = float("-0.064207") + std = float("0.0334262") + data = None + + +class Program_weight_tensor_parameter_86: + name = "parameter_86" + shape = [192] + dtype = "float32" + min_val = float("0.922153") + max_val = float("1.04653") + mean = float("0.973445") + std = float("0.017956") + data = None + + +class Program_weight_tensor_parameter_87: + name = "parameter_87" + shape = [192] + dtype = "float32" + min_val = float("0.00111369") + max_val = float("0.0150475") + mean = float("0.00521977") + std = float("0.00256485") + data = None + + +class Program_weight_tensor_parameter_88: + name = "parameter_88" + shape = [192] + dtype = "float32" + min_val = float("-0.0705634") + max_val = float("0.0364988") + mean = float("-0.00792433") + std = float("0.0151559") + data = None + + +class Program_weight_tensor_parameter_89: + name = "parameter_89" + shape = [192, 192, 1, 1] + dtype = "float32" + min_val = float("-0.0386531") + max_val = float("0.0308172") + mean = float("-0.000343288") + std = float("0.00384278") + data = None + + +class Program_weight_tensor_parameter_90: + name = "parameter_90" + shape = [192] + dtype = "float32" + min_val = float("-0.191632") + max_val = float("0.00854012") + mean = float("-0.064207") + std = float("0.0334262") + data = None + + +class Program_weight_tensor_parameter_91: + name = "parameter_91" + shape = [192] + dtype = "float32" + min_val = float("0.968104") + max_val = float("1.14778") + mean = float("1.02415") + std = float("0.0294364") + data = None + + +class Program_weight_tensor_parameter_92: + name = "parameter_92" + shape = [192] + dtype = "float32" + min_val = float("0.00435298") + max_val = float("0.0469628") + mean = float("0.0119993") + std = float("0.00625753") + data = None + + +class Program_weight_tensor_parameter_93: + name = "parameter_93" + shape = [192] + dtype = "float32" + min_val = float("-0.186388") + max_val = float("0.140954") + mean = float("-0.0380294") + std = float("0.0385226") + data = None + + +class Program_weight_tensor_parameter_94: + name = "parameter_94" + shape = [192, 192, 3, 3] + dtype = "float32" + min_val = float("-0.0471114") + max_val = float("0.0550151") + mean = float("-0.00014067") + std = float("0.00262923") + data = None + + +class Program_weight_tensor_parameter_95: + name = "parameter_95" + shape = [192] + dtype = "float32" + min_val = float("-0.188926") + max_val = float("0.062054") + mean = float("-0.0755775") + std = float("0.0405971") + data = None + + +class Program_weight_tensor_parameter_96: + name = "parameter_96" + shape = [192] + dtype = "float32" + min_val = float("0.880419") + max_val = float("1.21878") + mean = float("1.01465") + std = float("0.050849") + data = None + + +class Program_weight_tensor_parameter_97: + name = "parameter_97" + shape = [192] + dtype = "float32" + min_val = float("0.00811769") + max_val = float("0.0673723") + mean = float("0.021795") + std = float("0.0107691") + data = None + + +class Program_weight_tensor_parameter_98: + name = "parameter_98" + shape = [192] + dtype = "float32" + min_val = float("-0.117015") + max_val = float("0.0523492") + mean = float("-0.0248809") + std = float("0.0343347") + data = None + + +class Program_weight_tensor_parameter_99: + name = "parameter_99" + shape = [192, 192, 3, 3] + dtype = "float32" + min_val = float("-0.0427984") + max_val = float("0.0615634") + mean = float("-0.000111452") + std = float("0.00299174") + data = None + + +class Program_weight_tensor_parameter_100: + name = "parameter_100" + shape = [192] + dtype = "float32" + min_val = float("-0.229476") + max_val = float("-0.00962433") + mean = float("-0.0831852") + std = float("0.0422479") + data = None + + +class Program_weight_tensor_parameter_101: + name = "parameter_101" + shape = [192] + dtype = "float32" + min_val = float("0.900428") + max_val = float("1.02666") + mean = float("0.975123") + std = float("0.0229582") + data = None + + +class Program_weight_tensor_parameter_102: + name = "parameter_102" + shape = [192] + dtype = "float32" + min_val = float("0.00171033") + max_val = float("0.0153508") + mean = float("0.00574446") + std = float("0.00198723") + data = None + + +class Program_weight_tensor_parameter_103: + name = "parameter_103" + shape = [192] + dtype = "float32" + min_val = float("-0.0390528") + max_val = float("0.0467988") + mean = float("-0.0106683") + std = float("0.0171761") + data = None + + +class Program_weight_tensor_parameter_104: + name = "parameter_104" + shape = [192, 192, 1, 1] + dtype = "float32" + min_val = float("-0.0436521") + max_val = float("0.0635251") + mean = float("-0.000488447") + std = float("0.00437095") + data = None + + +class Program_weight_tensor_parameter_105: + name = "parameter_105" + shape = [192] + dtype = "float32" + min_val = float("-0.229476") + max_val = float("-0.00962433") + mean = float("-0.0831852") + std = float("0.0422479") + data = None + + +class Program_weight_tensor_parameter_106: + name = "parameter_106" + shape = [192] + dtype = "float32" + min_val = float("0.947654") + max_val = float("1.11111") + mean = float("1.02112") + std = float("0.0306157") + data = None + + +class Program_weight_tensor_parameter_107: + name = "parameter_107" + shape = [192] + dtype = "float32" + min_val = float("0.00720341") + max_val = float("0.0581605") + mean = float("0.0166222") + std = float("0.00832442") + data = None + + +class Program_weight_tensor_parameter_108: + name = "parameter_108" + shape = [192] + dtype = "float32" + min_val = float("-0.129987") + max_val = float("0.0597114") + mean = float("-0.0235942") + std = float("0.0336059") + data = None + + +class Program_weight_tensor_parameter_109: + name = "parameter_109" + shape = [192, 192, 3, 3] + dtype = "float32" + min_val = float("-0.0485052") + max_val = float("0.0562451") + mean = float("-9.74246e-05") + std = float("0.00278606") + data = None + + +class Program_weight_tensor_parameter_110: + name = "parameter_110" + shape = [192] + dtype = "float32" + min_val = float("-0.234305") + max_val = float("0.081368") + mean = float("-0.0947175") + std = float("0.0463051") + data = None + + +class Program_weight_tensor_parameter_111: + name = "parameter_111" + shape = [192] + dtype = "float32" + min_val = float("0.886145") + max_val = float("1.20472") + mean = float("1.01666") + std = float("0.0540248") + data = None + + +class Program_weight_tensor_parameter_112: + name = "parameter_112" + shape = [192] + dtype = "float32" + min_val = float("0.00887124") + max_val = float("0.100013") + mean = float("0.0211819") + std = float("0.0128206") + data = None + + +class Program_weight_tensor_parameter_113: + name = "parameter_113" + shape = [192] + dtype = "float32" + min_val = float("-0.180694") + max_val = float("0.0963527") + mean = float("-0.0400641") + std = float("0.0435422") + data = None + + +class Program_weight_tensor_parameter_114: + name = "parameter_114" + shape = [192, 192, 3, 3] + dtype = "float32" + min_val = float("-0.0410156") + max_val = float("0.0751964") + mean = float("-0.000134498") + std = float("0.0032483") + data = None + + +class Program_weight_tensor_parameter_115: + name = "parameter_115" + shape = [192] + dtype = "float32" + min_val = float("-0.199948") + max_val = float("0.0153484") + mean = float("-0.0662884") + std = float("0.031178") + data = None + + +class Program_weight_tensor_parameter_116: + name = "parameter_116" + shape = [192] + dtype = "float32" + min_val = float("0.925493") + max_val = float("1.15259") + mean = float("1.01328") + std = float("0.0383643") + data = None + + +class Program_weight_tensor_parameter_117: + name = "parameter_117" + shape = [192] + dtype = "float32" + min_val = float("0.0044011") + max_val = float("0.0246488") + mean = float("0.00855375") + std = float("0.00310702") + data = None + + +class Program_weight_tensor_parameter_118: + name = "parameter_118" + shape = [192] + dtype = "float32" + min_val = float("-0.0887579") + max_val = float("0.122375") + mean = float("-0.0224875") + std = float("0.0291868") + data = None + + +class Program_weight_tensor_parameter_119: + name = "parameter_119" + shape = [192, 576, 1, 1] + dtype = "float32" + min_val = float("-0.0628757") + max_val = float("0.0645969") + mean = float("-0.000195496") + std = float("0.00467829") + data = None + + +class Program_weight_tensor_parameter_120: + name = "parameter_120" + shape = [192] + dtype = "float32" + min_val = float("-0.0999632") + max_val = float("0.037411") + mean = float("-0.0139724") + std = float("0.0203964") + data = None + + +class Program_weight_tensor_parameter_121: + name = "parameter_121" + shape = [192] + dtype = "float32" + min_val = float("0.923856") + max_val = float("1.19918") + mean = float("1.00277") + std = float("0.025885") + data = None + + +class Program_weight_tensor_parameter_122: + name = "parameter_122" + shape = [192] + dtype = "float32" + min_val = float("0.00336859") + max_val = float("0.0385248") + mean = float("0.00844927") + std = float("0.00421125") + data = None + + +class Program_weight_tensor_parameter_123: + name = "parameter_123" + shape = [192] + dtype = "float32" + min_val = float("-0.0725677") + max_val = float("0.0457479") + mean = float("-0.0168628") + std = float("0.0213133") + data = None + + +class Program_weight_tensor_parameter_124: + name = "parameter_124" + shape = [192, 576, 1, 1] + dtype = "float32" + min_val = float("-0.0557051") + max_val = float("0.0726466") + mean = float("-0.000148835") + std = float("0.00416084") + data = None + + +class Program_weight_tensor_parameter_125: + name = "parameter_125" + shape = [192] + dtype = "float32" + min_val = float("-0.15908") + max_val = float("-0.000555601") + mean = float("-0.038944") + std = float("0.0217257") + data = None + + +class Program_weight_tensor_parameter_126: + name = "parameter_126" + shape = [192] + dtype = "float32" + min_val = float("0.921159") + max_val = float("1.24866") + mean = float("1.00725") + std = float("0.0301467") + data = None + + +class Program_weight_tensor_parameter_127: + name = "parameter_127" + shape = [192] + dtype = "float32" + min_val = float("0.00433237") + max_val = float("0.0626783") + mean = float("0.0160168") + std = float("0.00846658") + data = None + + +class Program_weight_tensor_parameter_128: + name = "parameter_128" + shape = [192] + dtype = "float32" + min_val = float("-0.396741") + max_val = float("0.33475") + mean = float("-0.0359227") + std = float("0.0957987") + data = None + + +class Program_weight_tensor_parameter_129: + name = "parameter_129" + shape = [192, 192, 3, 3] + dtype = "float32" + min_val = float("-0.0350237") + max_val = float("0.0471653") + mean = float("-3.44387e-05") + std = float("0.00253963") + data = None + + +class Program_weight_tensor_parameter_130: + name = "parameter_130" + shape = [192] + dtype = "float32" + min_val = float("-0.552248") + max_val = float("1.14732") + mean = float("0.355898") + std = float("0.346059") + data = None + + +class Program_weight_tensor_parameter_131: + name = "parameter_131" + shape = [192] + dtype = "float32" + min_val = float("0.541472") + max_val = float("1.57746") + mean = float("1.15098") + std = float("0.184373") + data = None + + +class Program_weight_tensor_parameter_132: + name = "parameter_132" + shape = [192] + dtype = "float32" + min_val = float("0.00561378") + max_val = float("0.117481") + mean = float("0.0300174") + std = float("0.0177759") + data = None + + +class Program_weight_tensor_parameter_133: + name = "parameter_133" + shape = [192] + dtype = "float32" + min_val = float("-0.181834") + max_val = float("0.204641") + mean = float("-0.0498559") + std = float("0.0488345") + data = None + + +class Program_weight_tensor_parameter_134: + name = "parameter_134" + shape = [192, 192, 1, 1] + dtype = "float32" + min_val = float("-0.140077") + max_val = float("0.117821") + mean = float("-0.0010577") + std = float("0.0117759") + data = None + + +class Program_weight_tensor_parameter_135: + name = "parameter_135" + shape = [96] + dtype = "float32" + min_val = float("-0.457965") + max_val = float("0.231213") + mean = float("-0.00944132") + std = float("0.144606") + data = None + + +class Program_weight_tensor_parameter_136: + name = "parameter_136" + shape = [96] + dtype = "float32" + min_val = float("0.762871") + max_val = float("1.23462") + mean = float("0.948542") + std = float("0.0712293") + data = None + + +class Program_weight_tensor_parameter_137: + name = "parameter_137" + shape = [96] + dtype = "float32" + min_val = float("0.00298878") + max_val = float("0.0421928") + mean = float("0.0124331") + std = float("0.00821157") + data = None + + +class Program_weight_tensor_parameter_138: + name = "parameter_138" + shape = [96] + dtype = "float32" + min_val = float("-0.0584852") + max_val = float("0.0908825") + mean = float("-0.0135007") + std = float("0.024126") + data = None + + +class Program_weight_tensor_parameter_139: + name = "parameter_139" + shape = [96, 96, 1, 1] + dtype = "float32" + min_val = float("-0.0753814") + max_val = float("0.0571343") + mean = float("-0.00127258") + std = float("0.00926422") + data = None + + +class Program_weight_tensor_parameter_140: + name = "parameter_140" + shape = [96] + dtype = "float32" + min_val = float("-0.457965") + max_val = float("0.231213") + mean = float("-0.00944132") + std = float("0.144606") + data = None + + +class Program_weight_tensor_parameter_141: + name = "parameter_141" + shape = [96] + dtype = "float32" + min_val = float("0.505007") + max_val = float("1.2709") + mean = float("1.02954") + std = float("0.0962551") + data = None + + +class Program_weight_tensor_parameter_142: + name = "parameter_142" + shape = [96] + dtype = "float32" + min_val = float("0.00864011") + max_val = float("0.0819817") + mean = float("0.029707") + std = float("0.0154312") + data = None + + +class Program_weight_tensor_parameter_143: + name = "parameter_143" + shape = [96] + dtype = "float32" + min_val = float("-0.235686") + max_val = float("0.133508") + mean = float("-0.023179") + std = float("0.060166") + data = None + + +class Program_weight_tensor_parameter_144: + name = "parameter_144" + shape = [96, 96, 3, 3] + dtype = "float32" + min_val = float("-0.0934252") + max_val = float("0.0953084") + mean = float("-0.000117725") + std = float("0.00631181") + data = None + + +class Program_weight_tensor_parameter_145: + name = "parameter_145" + shape = [96] + dtype = "float32" + min_val = float("-0.703685") + max_val = float("0.495421") + mean = float("-0.112778") + std = float("0.198104") + data = None + + +class Program_weight_tensor_parameter_146: + name = "parameter_146" + shape = [96] + dtype = "float32" + min_val = float("0.723217") + max_val = float("1.7117") + mean = float("0.995187") + std = float("0.133891") + data = None + + +class Program_weight_tensor_parameter_147: + name = "parameter_147" + shape = [96] + dtype = "float32" + min_val = float("0.0126242") + max_val = float("0.189013") + mean = float("0.0406096") + std = float("0.0302792") + data = None + + +class Program_weight_tensor_parameter_148: + name = "parameter_148" + shape = [96] + dtype = "float32" + min_val = float("-0.211007") + max_val = float("0.139458") + mean = float("-0.0274803") + std = float("0.0625687") + data = None + + +class Program_weight_tensor_parameter_149: + name = "parameter_149" + shape = [96, 96, 3, 3] + dtype = "float32" + min_val = float("-0.0919405") + max_val = float("0.0707093") + mean = float("-0.000470706") + std = float("0.00699141") + data = None + + +class Program_weight_tensor_parameter_150: + name = "parameter_150" + shape = [96] + dtype = "float32" + min_val = float("-0.364151") + max_val = float("0.190267") + mean = float("-0.138622") + std = float("0.0960161") + data = None + + +class Program_weight_tensor_parameter_151: + name = "parameter_151" + shape = [96] + dtype = "float32" + min_val = float("0.626997") + max_val = float("1.01953") + mean = float("0.906483") + std = float("0.0555602") + data = None + + +class Program_weight_tensor_parameter_152: + name = "parameter_152" + shape = [96] + dtype = "float32" + min_val = float("0.00320113") + max_val = float("0.0231713") + mean = float("0.0111523") + std = float("0.00438468") + data = None + + +class Program_weight_tensor_parameter_153: + name = "parameter_153" + shape = [96] + dtype = "float32" + min_val = float("-0.0654375") + max_val = float("0.0381209") + mean = float("-0.00860304") + std = float("0.0165133") + data = None + + +class Program_weight_tensor_parameter_154: + name = "parameter_154" + shape = [96, 96, 1, 1] + dtype = "float32" + min_val = float("-0.0710343") + max_val = float("0.0593522") + mean = float("-0.00106867") + std = float("0.00947732") + data = None + + +class Program_weight_tensor_parameter_155: + name = "parameter_155" + shape = [96] + dtype = "float32" + min_val = float("-0.364151") + max_val = float("0.190267") + mean = float("-0.138622") + std = float("0.0960161") + data = None + + +class Program_weight_tensor_parameter_156: + name = "parameter_156" + shape = [96] + dtype = "float32" + min_val = float("0.811163") + max_val = float("1.15777") + mean = float("1.02225") + std = float("0.060594") + data = None + + +class Program_weight_tensor_parameter_157: + name = "parameter_157" + shape = [96] + dtype = "float32" + min_val = float("0.0112523") + max_val = float("0.110271") + mean = float("0.0288634") + std = float("0.0207594") + data = None + + +class Program_weight_tensor_parameter_158: + name = "parameter_158" + shape = [96] + dtype = "float32" + min_val = float("-0.163525") + max_val = float("0.0372296") + mean = float("-0.0369433") + std = float("0.0331166") + data = None + + +class Program_weight_tensor_parameter_159: + name = "parameter_159" + shape = [96, 96, 3, 3] + dtype = "float32" + min_val = float("-0.0811922") + max_val = float("0.0768953") + mean = float("-0.000466623") + std = float("0.00651757") + data = None + + +class Program_weight_tensor_parameter_160: + name = "parameter_160" + shape = [96] + dtype = "float32" + min_val = float("-0.486488") + max_val = float("0.169402") + mean = float("-0.16699") + std = float("0.131221") + data = None + + +class Program_weight_tensor_parameter_161: + name = "parameter_161" + shape = [96] + dtype = "float32" + min_val = float("0.77745") + max_val = float("1.29252") + mean = float("0.963023") + std = float("0.0981107") + data = None + + +class Program_weight_tensor_parameter_162: + name = "parameter_162" + shape = [96] + dtype = "float32" + min_val = float("0.0097702") + max_val = float("0.108736") + mean = float("0.0240622") + std = float("0.0138876") + data = None + + +class Program_weight_tensor_parameter_163: + name = "parameter_163" + shape = [96] + dtype = "float32" + min_val = float("-0.150372") + max_val = float("0.0656242") + mean = float("0.00931518") + std = float("0.0377034") + data = None + + +class Program_weight_tensor_parameter_164: + name = "parameter_164" + shape = [96, 96, 3, 3] + dtype = "float32" + min_val = float("-0.0993825") + max_val = float("0.0757514") + mean = float("-0.000423273") + std = float("0.00766977") + data = None + + +class Program_weight_tensor_parameter_165: + name = "parameter_165" + shape = [96] + dtype = "float32" + min_val = float("-0.489705") + max_val = float("0.065165") + mean = float("-0.168145") + std = float("0.114783") + data = None + + +class Program_weight_tensor_parameter_166: + name = "parameter_166" + shape = [96] + dtype = "float32" + min_val = float("0.722939") + max_val = float("1.0022") + mean = float("0.918838") + std = float("0.0531756") + data = None + + +class Program_weight_tensor_parameter_167: + name = "parameter_167" + shape = [96] + dtype = "float32" + min_val = float("0.00758991") + max_val = float("0.0378697") + mean = float("0.0164332") + std = float("0.00578739") + data = None + + +class Program_weight_tensor_parameter_168: + name = "parameter_168" + shape = [96] + dtype = "float32" + min_val = float("-0.0561772") + max_val = float("0.0397966") + mean = float("-0.0196014") + std = float("0.0187438") + data = None + + +class Program_weight_tensor_parameter_169: + name = "parameter_169" + shape = [96, 96, 1, 1] + dtype = "float32" + min_val = float("-0.103953") + max_val = float("0.0646149") + mean = float("-0.00221242") + std = float("0.0110162") + data = None + + +class Program_weight_tensor_parameter_170: + name = "parameter_170" + shape = [96] + dtype = "float32" + min_val = float("-0.489705") + max_val = float("0.065165") + mean = float("-0.168145") + std = float("0.114783") + data = None + + +class Program_weight_tensor_parameter_171: + name = "parameter_171" + shape = [96] + dtype = "float32" + min_val = float("0.766539") + max_val = float("1.15353") + mean = float("0.982409") + std = float("0.0579773") + data = None + + +class Program_weight_tensor_parameter_172: + name = "parameter_172" + shape = [96] + dtype = "float32" + min_val = float("0.0171857") + max_val = float("0.220166") + mean = float("0.0443842") + std = float("0.0319483") + data = None + + +class Program_weight_tensor_parameter_173: + name = "parameter_173" + shape = [96] + dtype = "float32" + min_val = float("-0.196226") + max_val = float("0.0865161") + mean = float("-0.0155778") + std = float("0.0409091") + data = None + + +class Program_weight_tensor_parameter_174: + name = "parameter_174" + shape = [96, 96, 3, 3] + dtype = "float32" + min_val = float("-0.0992682") + max_val = float("0.0973879") + mean = float("-0.000248799") + std = float("0.00741391") + data = None + + +class Program_weight_tensor_parameter_175: + name = "parameter_175" + shape = [96] + dtype = "float32" + min_val = float("-0.564609") + max_val = float("0.347562") + mean = float("-0.179116") + std = float("0.173215") + data = None + + +class Program_weight_tensor_parameter_176: + name = "parameter_176" + shape = [96] + dtype = "float32" + min_val = float("0.764459") + max_val = float("1.33669") + mean = float("0.954532") + std = float("0.110883") + data = None + + +class Program_weight_tensor_parameter_177: + name = "parameter_177" + shape = [96] + dtype = "float32" + min_val = float("0.0145544") + max_val = float("0.11097") + mean = float("0.0319708") + std = float("0.0188345") + data = None + + +class Program_weight_tensor_parameter_178: + name = "parameter_178" + shape = [96] + dtype = "float32" + min_val = float("-0.17302") + max_val = float("0.269018") + mean = float("-0.0215072") + std = float("0.0939673") + data = None + + +class Program_weight_tensor_parameter_179: + name = "parameter_179" + shape = [96, 96, 3, 3] + dtype = "float32" + min_val = float("-0.142383") + max_val = float("0.117263") + mean = float("-0.000229517") + std = float("0.00873001") + data = None + + +class Program_weight_tensor_parameter_180: + name = "parameter_180" + shape = [96] + dtype = "float32" + min_val = float("-0.625413") + max_val = float("0.597772") + mean = float("-0.0821868") + std = float("0.254375") + data = None + + +class Program_weight_tensor_parameter_181: + name = "parameter_181" + shape = [96] + dtype = "float32" + min_val = float("0.647479") + max_val = float("1.22747") + mean = float("0.866594") + std = float("0.1146") + data = None + + +class Program_weight_tensor_parameter_182: + name = "parameter_182" + shape = [96] + dtype = "float32" + min_val = float("0.0115736") + max_val = float("0.0790864") + mean = float("0.0255985") + std = float("0.0116053") + data = None + + +class Program_weight_tensor_parameter_183: + name = "parameter_183" + shape = [96] + dtype = "float32" + min_val = float("-0.112932") + max_val = float("0.0913184") + mean = float("-0.0111621") + std = float("0.0404469") + data = None + + +class Program_weight_tensor_parameter_184: + name = "parameter_184" + shape = [96, 448, 1, 1] + dtype = "float32" + min_val = float("-0.14903") + max_val = float("0.149062") + mean = float("-0.000519099") + std = float("0.0115778") + data = None + + +class Program_weight_tensor_parameter_185: + name = "parameter_185" + shape = [96] + dtype = "float32" + min_val = float("-0.0986349") + max_val = float("0.227763") + mean = float("0.0619239") + std = float("0.0545689") + data = None + + +class Program_weight_tensor_parameter_186: + name = "parameter_186" + shape = [96] + dtype = "float32" + min_val = float("0.703928") + max_val = float("1.12525") + mean = float("0.932492") + std = float("0.0634652") + data = None + + +class Program_weight_tensor_parameter_187: + name = "parameter_187" + shape = [96] + dtype = "float32" + min_val = float("0.00519295") + max_val = float("0.0599625") + mean = float("0.0119181") + std = float("0.00693821") + data = None + + +class Program_weight_tensor_parameter_188: + name = "parameter_188" + shape = [96] + dtype = "float32" + min_val = float("-0.0889976") + max_val = float("0.164161") + mean = float("-0.017344") + std = float("0.0389571") + data = None + + +class Program_weight_tensor_parameter_189: + name = "parameter_189" + shape = [96, 448, 1, 1] + dtype = "float32" + min_val = float("-0.0952125") + max_val = float("0.110914") + mean = float("-0.000272416") + std = float("0.00775169") + data = None + + +class Program_weight_tensor_parameter_190: + name = "parameter_190" + shape = [192] + dtype = "float32" + min_val = float("-0.295367") + max_val = float("0.199876") + mean = float("-0.065903") + std = float("0.0695813") + data = None + + +class Program_weight_tensor_parameter_191: + name = "parameter_191" + shape = [192] + dtype = "float32" + min_val = float("0.670697") + max_val = float("1.45276") + mean = float("0.885134") + std = float("0.0783825") + data = None + + +class Program_weight_tensor_parameter_192: + name = "parameter_192" + shape = [192] + dtype = "float32" + min_val = float("0.00810165") + max_val = float("0.127755") + mean = float("0.0227973") + std = float("0.0122998") + data = None + + +class Program_weight_tensor_parameter_193: + name = "parameter_193" + shape = [192] + dtype = "float32" + min_val = float("-0.147256") + max_val = float("0.0479519") + mean = float("-0.0364953") + std = float("0.0354482") + data = None + + +class Program_weight_tensor_parameter_194: + name = "parameter_194" + shape = [192, 384, 1, 1] + dtype = "float32" + min_val = float("-0.0959126") + max_val = float("0.117328") + mean = float("-0.000597241") + std = float("0.00788359") + data = None + + +class Program_weight_tensor_parameter_195: + name = "parameter_195" + shape = [384] + dtype = "float32" + min_val = float("-0.201782") + max_val = float("0.241811") + mean = float("-0.0670364") + std = float("0.0416536") + data = None + + +class Program_weight_tensor_parameter_196: + name = "parameter_196" + shape = [384] + dtype = "float32" + min_val = float("0.873178") + max_val = float("1.54065") + mean = float("1.01926") + std = float("0.0632841") + data = None + + +class Program_weight_tensor_parameter_197: + name = "parameter_197" + shape = [384] + dtype = "float32" + min_val = float("0.00754976") + max_val = float("0.0799005") + mean = float("0.015547") + std = float("0.00760761") + data = None + + +class Program_weight_tensor_parameter_198: + name = "parameter_198" + shape = [384] + dtype = "float32" + min_val = float("-0.335306") + max_val = float("0.119584") + mean = float("-0.0574114") + std = float("0.0470951") + data = None + + +class Program_weight_tensor_parameter_199: + name = "parameter_199" + shape = [384, 384, 1, 1] + dtype = "float32" + min_val = float("-0.104641") + max_val = float("0.104081") + mean = float("-0.000725337") + std = float("0.00722264") + data = None + + +class Program_weight_tensor_parameter_200: + name = "parameter_200" + shape = [192] + dtype = "float32" + min_val = float("-0.176949") + max_val = float("0.00590593") + mean = float("-0.0653774") + std = float("0.0325609") + data = None + + +class Program_weight_tensor_parameter_201: + name = "parameter_201" + shape = [192] + dtype = "float32" + min_val = float("0.884903") + max_val = float("0.991186") + mean = float("0.949253") + std = float("0.016433") + data = None + + +class Program_weight_tensor_parameter_202: + name = "parameter_202" + shape = [192] + dtype = "float32" + min_val = float("0.00345864") + max_val = float("0.0250397") + mean = float("0.0100895") + std = float("0.00362139") + data = None + + +class Program_weight_tensor_parameter_203: + name = "parameter_203" + shape = [192] + dtype = "float32" + min_val = float("-0.0784978") + max_val = float("0.0700745") + mean = float("-0.0238174") + std = float("0.0311932") + data = None + + +class Program_weight_tensor_parameter_204: + name = "parameter_204" + shape = [192, 192, 1, 1] + dtype = "float32" + min_val = float("-0.0569075") + max_val = float("0.0369732") + mean = float("-0.000733351") + std = float("0.00540254") + data = None + + +class Program_weight_tensor_parameter_205: + name = "parameter_205" + shape = [192] + dtype = "float32" + min_val = float("-0.176949") + max_val = float("0.00590593") + mean = float("-0.0653774") + std = float("0.0325609") + data = None + + +class Program_weight_tensor_parameter_206: + name = "parameter_206" + shape = [192] + dtype = "float32" + min_val = float("0.945936") + max_val = float("1.03267") + mean = float("0.988143") + std = float("0.0166204") + data = None + + +class Program_weight_tensor_parameter_207: + name = "parameter_207" + shape = [192] + dtype = "float32" + min_val = float("0.0155344") + max_val = float("0.0844419") + mean = float("0.0339711") + std = float("0.012502") + data = None + + +class Program_weight_tensor_parameter_208: + name = "parameter_208" + shape = [192] + dtype = "float32" + min_val = float("-0.176762") + max_val = float("0.15265") + mean = float("-0.0230656") + std = float("0.0601937") + data = None + + +class Program_weight_tensor_parameter_209: + name = "parameter_209" + shape = [192, 192, 3, 3] + dtype = "float32" + min_val = float("-0.0444131") + max_val = float("0.0760357") + mean = float("-7.02524e-05") + std = float("0.00300584") + data = None + + +class Program_weight_tensor_parameter_210: + name = "parameter_210" + shape = [192] + dtype = "float32" + min_val = float("-0.217095") + max_val = float("-0.00148108") + mean = float("-0.0741376") + std = float("0.0354109") + data = None + + +class Program_weight_tensor_parameter_211: + name = "parameter_211" + shape = [192] + dtype = "float32" + min_val = float("0.939031") + max_val = float("1.15417") + mean = float("1.02943") + std = float("0.0431658") + data = None + + +class Program_weight_tensor_parameter_212: + name = "parameter_212" + shape = [192] + dtype = "float32" + min_val = float("0.0364266") + max_val = float("0.231139") + mean = float("0.0631271") + std = float("0.0206699") + data = None + + +class Program_weight_tensor_parameter_213: + name = "parameter_213" + shape = [192] + dtype = "float32" + min_val = float("-0.262217") + max_val = float("0.304208") + mean = float("-0.0426428") + std = float("0.0718369") + data = None + + +class Program_weight_tensor_parameter_214: + name = "parameter_214" + shape = [192, 192, 3, 3] + dtype = "float32" + min_val = float("-0.0622017") + max_val = float("0.0626879") + mean = float("-0.000102158") + std = float("0.00367047") + data = None + + +class Program_weight_tensor_parameter_215: + name = "parameter_215" + shape = [192] + dtype = "float32" + min_val = float("-0.196617") + max_val = float("-0.00995737") + mean = float("-0.071187") + std = float("0.0319798") + data = None + + +class Program_weight_tensor_parameter_216: + name = "parameter_216" + shape = [192] + dtype = "float32" + min_val = float("0.94411") + max_val = float("1.04693") + mean = float("0.987726") + std = float("0.0137706") + data = None + + +class Program_weight_tensor_parameter_217: + name = "parameter_217" + shape = [192] + dtype = "float32" + min_val = float("0.00228676") + max_val = float("0.00961601") + mean = float("0.00480728") + std = float("0.00123428") + data = None + + +class Program_weight_tensor_parameter_218: + name = "parameter_218" + shape = [192] + dtype = "float32" + min_val = float("-0.0953901") + max_val = float("0.0389215") + mean = float("-0.025087") + std = float("0.0209448") + data = None + + +class Program_weight_tensor_parameter_219: + name = "parameter_219" + shape = [192, 192, 1, 1] + dtype = "float32" + min_val = float("-0.0313923") + max_val = float("0.0416125") + mean = float("-0.000809335") + std = float("0.00570058") + data = None + + +class Program_weight_tensor_parameter_220: + name = "parameter_220" + shape = [192] + dtype = "float32" + min_val = float("-0.196617") + max_val = float("-0.00995737") + mean = float("-0.071187") + std = float("0.0319798") + data = None + + +class Program_weight_tensor_parameter_221: + name = "parameter_221" + shape = [192] + dtype = "float32" + min_val = float("0.953711") + max_val = float("1.11463") + mean = float("1.00472") + std = float("0.0265116") + data = None + + +class Program_weight_tensor_parameter_222: + name = "parameter_222" + shape = [192] + dtype = "float32" + min_val = float("0.0101684") + max_val = float("0.0483375") + mean = float("0.0181984") + std = float("0.00551311") + data = None + + +class Program_weight_tensor_parameter_223: + name = "parameter_223" + shape = [192] + dtype = "float32" + min_val = float("-0.187973") + max_val = float("0.143836") + mean = float("-0.0474657") + std = float("0.0465909") + data = None + + +class Program_weight_tensor_parameter_224: + name = "parameter_224" + shape = [192, 192, 3, 3] + dtype = "float32" + min_val = float("-0.0484376") + max_val = float("0.0812032") + mean = float("-0.000164179") + std = float("0.00306328") + data = None + + +class Program_weight_tensor_parameter_225: + name = "parameter_225" + shape = [192] + dtype = "float32" + min_val = float("-0.232846") + max_val = float("-0.0185216") + mean = float("-0.0943343") + std = float("0.040046") + data = None + + +class Program_weight_tensor_parameter_226: + name = "parameter_226" + shape = [192] + dtype = "float32" + min_val = float("0.946521") + max_val = float("1.19181") + mean = float("1.02411") + std = float("0.0460177") + data = None + + +class Program_weight_tensor_parameter_227: + name = "parameter_227" + shape = [192] + dtype = "float32" + min_val = float("0.0361899") + max_val = float("0.141548") + mean = float("0.0649197") + std = float("0.0200832") + data = None + + +class Program_weight_tensor_parameter_228: + name = "parameter_228" + shape = [192] + dtype = "float32" + min_val = float("-0.350006") + max_val = float("0.262728") + mean = float("-0.0865782") + std = float("0.0989383") + data = None + + +class Program_weight_tensor_parameter_229: + name = "parameter_229" + shape = [192, 192, 3, 3] + dtype = "float32" + min_val = float("-0.0611519") + max_val = float("0.0870387") + mean = float("-0.000165155") + std = float("0.00384626") + data = None + + +class Program_weight_tensor_parameter_230: + name = "parameter_230" + shape = [192] + dtype = "float32" + min_val = float("-0.154886") + max_val = float("0.00333791") + mean = float("-0.0685634") + std = float("0.0234192") + data = None + + +class Program_weight_tensor_parameter_231: + name = "parameter_231" + shape = [192] + dtype = "float32" + min_val = float("0.932342") + max_val = float("1.07188") + mean = float("0.99857") + std = float("0.0218607") + data = None + + +class Program_weight_tensor_parameter_232: + name = "parameter_232" + shape = [192] + dtype = "float32" + min_val = float("0.0020288") + max_val = float("0.00959064") + mean = float("0.0040899") + std = float("0.0011474") + data = None + + +class Program_weight_tensor_parameter_233: + name = "parameter_233" + shape = [192] + dtype = "float32" + min_val = float("-0.0826953") + max_val = float("0.0992723") + mean = float("-0.0125266") + std = float("0.0204973") + data = None + + +class Program_weight_tensor_parameter_234: + name = "parameter_234" + shape = [192, 192, 1, 1] + dtype = "float32" + min_val = float("-0.0348635") + max_val = float("0.0478139") + mean = float("-0.000426004") + std = float("0.00642907") + data = None + + +class Program_weight_tensor_parameter_235: + name = "parameter_235" + shape = [192] + dtype = "float32" + min_val = float("-0.154886") + max_val = float("0.0033379") + mean = float("-0.0685634") + std = float("0.0234192") + data = None + + +class Program_weight_tensor_parameter_236: + name = "parameter_236" + shape = [192] + dtype = "float32" + min_val = float("0.936172") + max_val = float("1.11491") + mean = float("0.992553") + std = float("0.0259462") + data = None + + +class Program_weight_tensor_parameter_237: + name = "parameter_237" + shape = [192] + dtype = "float32" + min_val = float("0.0092625") + max_val = float("0.0464439") + mean = float("0.0187122") + std = float("0.00577318") + data = None + + +class Program_weight_tensor_parameter_238: + name = "parameter_238" + shape = [192] + dtype = "float32" + min_val = float("-0.280981") + max_val = float("0.146793") + mean = float("-0.0420046") + std = float("0.0462309") + data = None + + +class Program_weight_tensor_parameter_239: + name = "parameter_239" + shape = [192, 192, 3, 3] + dtype = "float32" + min_val = float("-0.0372398") + max_val = float("0.0656079") + mean = float("-0.000164107") + std = float("0.00303882") + data = None + + +class Program_weight_tensor_parameter_240: + name = "parameter_240" + shape = [192] + dtype = "float32" + min_val = float("-0.289028") + max_val = float("0.0181015") + mean = float("-0.109759") + std = float("0.0400942") + data = None + + +class Program_weight_tensor_parameter_241: + name = "parameter_241" + shape = [192] + dtype = "float32" + min_val = float("0.943873") + max_val = float("1.25886") + mean = float("1.02651") + std = float("0.0418277") + data = None + + +class Program_weight_tensor_parameter_242: + name = "parameter_242" + shape = [192] + dtype = "float32" + min_val = float("0.0146559") + max_val = float("0.0682576") + mean = float("0.029378") + std = float("0.009291") + data = None + + +class Program_weight_tensor_parameter_243: + name = "parameter_243" + shape = [192] + dtype = "float32" + min_val = float("-0.381607") + max_val = float("0.108223") + mean = float("-0.0546604") + std = float("0.0618861") + data = None + + +class Program_weight_tensor_parameter_244: + name = "parameter_244" + shape = [192, 192, 3, 3] + dtype = "float32" + min_val = float("-0.0566363") + max_val = float("0.0721231") + mean = float("-0.000213222") + std = float("0.00432259") + data = None + + +class Program_weight_tensor_parameter_245: + name = "parameter_245" + shape = [192] + dtype = "float32" + min_val = float("-0.257034") + max_val = float("-0.0134243") + mean = float("-0.121787") + std = float("0.0441916") + data = None + + +class Program_weight_tensor_parameter_246: + name = "parameter_246" + shape = [192] + dtype = "float32" + min_val = float("0.916939") + max_val = float("1.13523") + mean = float("1.02431") + std = float("0.042227") + data = None + + +class Program_weight_tensor_parameter_247: + name = "parameter_247" + shape = [192] + dtype = "float32" + min_val = float("0.00558617") + max_val = float("0.0215959") + mean = float("0.0106695") + std = float("0.00291488") + data = None + + +class Program_weight_tensor_parameter_248: + name = "parameter_248" + shape = [192] + dtype = "float32" + min_val = float("-0.121693") + max_val = float("0.105152") + mean = float("0.0154863") + std = float("0.0289093") + data = None + + +class Program_weight_tensor_parameter_249: + name = "parameter_249" + shape = [192, 896, 1, 1] + dtype = "float32" + min_val = float("-0.0812553") + max_val = float("0.103824") + mean = float("-0.000190188") + std = float("0.00606084") + data = None + + +class Program_weight_tensor_parameter_250: + name = "parameter_250" + shape = [192] + dtype = "float32" + min_val = float("-0.176608") + max_val = float("0.214363") + mean = float("-0.00723538") + std = float("0.0506647") + data = None + + +class Program_weight_tensor_parameter_251: + name = "parameter_251" + shape = [192] + dtype = "float32" + min_val = float("0.951166") + max_val = float("1.21791") + mean = float("1.05549") + std = float("0.0498194") + data = None + + +class Program_weight_tensor_parameter_252: + name = "parameter_252" + shape = [192] + dtype = "float32" + min_val = float("0.00700942") + max_val = float("0.0590357") + mean = float("0.0141953") + std = float("0.00517131") + data = None + + +class Program_weight_tensor_parameter_253: + name = "parameter_253" + shape = [192] + dtype = "float32" + min_val = float("-0.0747975") + max_val = float("0.0811747") + mean = float("-0.00053402") + std = float("0.0274613") + data = None + + +class Program_weight_tensor_parameter_254: + name = "parameter_254" + shape = [192, 896, 1, 1] + dtype = "float32" + min_val = float("-0.055207") + max_val = float("0.102723") + mean = float("-0.000223052") + std = float("0.00619518") + data = None + + +class Program_weight_tensor_parameter_255: + name = "parameter_255" + shape = [384] + dtype = "float32" + min_val = float("-0.249775") + max_val = float("-0.0568629") + mean = float("-0.125062") + std = float("0.0336773") + data = None + + +class Program_weight_tensor_parameter_256: + name = "parameter_256" + shape = [384] + dtype = "float32" + min_val = float("0.814907") + max_val = float("1.01643") + mean = float("0.909518") + std = float("0.0258168") + data = None + + +class Program_weight_tensor_parameter_257: + name = "parameter_257" + shape = [384] + dtype = "float32" + min_val = float("0.0100937") + max_val = float("0.0695395") + mean = float("0.022801") + std = float("0.00921208") + data = None + + +class Program_weight_tensor_parameter_258: + name = "parameter_258" + shape = [384] + dtype = "float32" + min_val = float("-0.146181") + max_val = float("0.110285") + mean = float("-0.0346603") + std = float("0.0383019") + data = None + + +class Program_weight_tensor_parameter_259: + name = "parameter_259" + shape = [384, 768, 1, 1] + dtype = "float32" + min_val = float("-0.0364868") + max_val = float("0.0339674") + mean = float("-0.000277799") + std = float("0.00472355") + data = None + + +class Program_weight_tensor_parameter_260: + name = "parameter_260" + shape = [768] + dtype = "float32" + min_val = float("-0.104276") + max_val = float("0.0723922") + mean = float("-0.0568764") + std = float("0.0153315") + data = None + + +class Program_weight_tensor_parameter_261: + name = "parameter_261" + shape = [768] + dtype = "float32" + min_val = float("0.9523") + max_val = float("1.1435") + mean = float("1.02091") + std = float("0.0210274") + data = None + + +class Program_weight_tensor_parameter_262: + name = "parameter_262" + shape = [768] + dtype = "float32" + min_val = float("0.00433515") + max_val = float("0.0356021") + mean = float("0.00969406") + std = float("0.00346217") + data = None + + +class Program_weight_tensor_parameter_263: + name = "parameter_263" + shape = [768] + dtype = "float32" + min_val = float("-0.103914") + max_val = float("0.111571") + mean = float("-0.0347286") + std = float("0.0270313") + data = None + + +class Program_weight_tensor_parameter_264: + name = "parameter_264" + shape = [768, 768, 1, 1] + dtype = "float32" + min_val = float("-0.0581812") + max_val = float("0.113051") + mean = float("-0.000304548") + std = float("0.00402831") + data = None + + +class Program_weight_tensor_parameter_265: + name = "parameter_265" + shape = [384] + dtype = "float32" + min_val = float("-0.158166") + max_val = float("0.0744681") + mean = float("-0.0400513") + std = float("0.0206673") + data = None + + +class Program_weight_tensor_parameter_266: + name = "parameter_266" + shape = [384] + dtype = "float32" + min_val = float("0.888577") + max_val = float("1.07465") + mean = float("0.982117") + std = float("0.0132258") + data = None + + +class Program_weight_tensor_parameter_267: + name = "parameter_267" + shape = [384] + dtype = "float32" + min_val = float("0.00600496") + max_val = float("0.0953219") + mean = float("0.0199657") + std = float("0.00942153") + data = None + + +class Program_weight_tensor_parameter_268: + name = "parameter_268" + shape = [384] + dtype = "float32" + min_val = float("-0.0681594") + max_val = float("0.0604839") + mean = float("-0.00585837") + std = float("0.0270223") + data = None + + +class Program_weight_tensor_parameter_269: + name = "parameter_269" + shape = [384, 384, 1, 1] + dtype = "float32" + min_val = float("-0.0396804") + max_val = float("0.073742") + mean = float("-7.22725e-05") + std = float("0.00350095") + data = None + + +class Program_weight_tensor_parameter_270: + name = "parameter_270" + shape = [384] + dtype = "float32" + min_val = float("-0.158166") + max_val = float("0.0744681") + mean = float("-0.0400513") + std = float("0.0206673") + data = None + + +class Program_weight_tensor_parameter_271: + name = "parameter_271" + shape = [384] + dtype = "float32" + min_val = float("0.879914") + max_val = float("1.07681") + mean = float("0.993922") + std = float("0.0123427") + data = None + + +class Program_weight_tensor_parameter_272: + name = "parameter_272" + shape = [384] + dtype = "float32" + min_val = float("0.0282422") + max_val = float("0.756467") + mean = float("0.146192") + std = float("0.0664623") + data = None + + +class Program_weight_tensor_parameter_273: + name = "parameter_273" + shape = [384] + dtype = "float32" + min_val = float("-0.276936") + max_val = float("0.156867") + mean = float("-0.0841025") + std = float("0.0859055") + data = None + + +class Program_weight_tensor_parameter_274: + name = "parameter_274" + shape = [384, 384, 3, 3] + dtype = "float32" + min_val = float("-0.0424878") + max_val = float("0.0475734") + mean = float("-0.000126902") + std = float("0.00130674") + data = None + + +class Program_weight_tensor_parameter_275: + name = "parameter_275" + shape = [384] + dtype = "float32" + min_val = float("-0.0801146") + max_val = float("0.116771") + mean = float("-0.0189931") + std = float("0.0160256") + data = None + + +class Program_weight_tensor_parameter_276: + name = "parameter_276" + shape = [384] + dtype = "float32" + min_val = float("0.920205") + max_val = float("1.16667") + mean = float("1.01504") + std = float("0.0246966") + data = None + + +class Program_weight_tensor_parameter_277: + name = "parameter_277" + shape = [384] + dtype = "float32" + min_val = float("0.0223031") + max_val = float("0.220359") + mean = float("0.0742131") + std = float("0.0330541") + data = None + + +class Program_weight_tensor_parameter_278: + name = "parameter_278" + shape = [384] + dtype = "float32" + min_val = float("-0.235368") + max_val = float("0.220698") + mean = float("-0.0231606") + std = float("0.0793647") + data = None + + +class Program_weight_tensor_parameter_279: + name = "parameter_279" + shape = [384, 384, 3, 3] + dtype = "float32" + min_val = float("-0.0274578") + max_val = float("0.0359223") + mean = float("-3.21913e-05") + std = float("0.00171791") + data = None + + +class Program_weight_tensor_parameter_280: + name = "parameter_280" + shape = [384] + dtype = "float32" + min_val = float("-0.0739505") + max_val = float("0.0209991") + mean = float("-0.0234999") + std = float("0.0134887") + data = None + + +class Program_weight_tensor_parameter_281: + name = "parameter_281" + shape = [384] + dtype = "float32" + min_val = float("0.946312") + max_val = float("1.16798") + mean = float("1.01467") + std = float("0.0273905") + data = None + + +class Program_weight_tensor_parameter_282: + name = "parameter_282" + shape = [384] + dtype = "float32" + min_val = float("0.0666339") + max_val = float("0.525489") + mean = float("0.192642") + std = float("0.0813326") + data = None + + +class Program_weight_tensor_parameter_283: + name = "parameter_283" + shape = [384] + dtype = "float32" + min_val = float("-1.5811") + max_val = float("1.58853") + mean = float("0.0464615") + std = float("0.567191") + data = None + + +class Program_weight_tensor_parameter_284: + name = "parameter_284" + shape = [384, 1536, 1, 1] + dtype = "float32" + min_val = float("-0.0467316") + max_val = float("0.0575595") + mean = float("8.55437e-05") + std = float("0.0030071") + data = None + + +class Program_weight_tensor_parameter_285: + name = "parameter_285" + shape = [384] + dtype = "float32" + min_val = float("-0.0183804") + max_val = float("0.0258619") + mean = float("-0.00144525") + std = float("0.00680648") + data = None + + +class Program_weight_tensor_parameter_286: + name = "parameter_286" + shape = [384] + dtype = "float32" + min_val = float("0.969538") + max_val = float("1.06054") + mean = float("0.993834") + std = float("0.0122522") + data = None + + +class Program_weight_tensor_parameter_287: + name = "parameter_287" + shape = [384] + dtype = "float32" + min_val = float("0.00292493") + max_val = float("0.0159254") + mean = float("0.00705807") + std = float("0.00238091") + data = None + + +class Program_weight_tensor_parameter_288: + name = "parameter_288" + shape = [384] + dtype = "float32" + min_val = float("-0.0970828") + max_val = float("0.0620531") + mean = float("-0.0420728") + std = float("0.0244223") + data = None + + +class Program_weight_tensor_parameter_289: + name = "parameter_289" + shape = [384, 384, 1, 1] + dtype = "float32" + min_val = float("-0.0333324") + max_val = float("0.0411053") + mean = float("-0.000526762") + std = float("0.00328183") + data = None + + +class Program_weight_tensor_parameter_290: + name = "parameter_290" + shape = [384] + dtype = "float32" + min_val = float("-0.0183804") + max_val = float("0.0258619") + mean = float("-0.00144525") + std = float("0.00680648") + data = None + + +class Program_weight_tensor_parameter_291: + name = "parameter_291" + shape = [384] + dtype = "float32" + min_val = float("0.972046") + max_val = float("1.08568") + mean = float("1.00364") + std = float("0.0181342") + data = None + + +class Program_weight_tensor_parameter_292: + name = "parameter_292" + shape = [384] + dtype = "float32" + min_val = float("0.0169031") + max_val = float("0.138508") + mean = float("0.0431682") + std = float("0.0166625") + data = None + + +class Program_weight_tensor_parameter_293: + name = "parameter_293" + shape = [384] + dtype = "float32" + min_val = float("-0.322591") + max_val = float("0.0931467") + mean = float("-0.131452") + std = float("0.0636301") + data = None + + +class Program_weight_tensor_parameter_294: + name = "parameter_294" + shape = [384, 384, 3, 3] + dtype = "float32" + min_val = float("-0.0285775") + max_val = float("0.0755074") + mean = float("-0.000191474") + std = float("0.0013728") + data = None + + +class Program_weight_tensor_parameter_295: + name = "parameter_295" + shape = [384] + dtype = "float32" + min_val = float("-0.0498105") + max_val = float("0.00884065") + mean = float("-0.00838186") + std = float("0.00779167") + data = None + + +class Program_weight_tensor_parameter_296: + name = "parameter_296" + shape = [384] + dtype = "float32" + min_val = float("0.953878") + max_val = float("1.13497") + mean = float("1.01253") + std = float("0.0201047") + data = None + + +class Program_weight_tensor_parameter_297: + name = "parameter_297" + shape = [384] + dtype = "float32" + min_val = float("0.072012") + max_val = float("0.425232") + mean = float("0.171605") + std = float("0.0477212") + data = None + + +class Program_weight_tensor_parameter_298: + name = "parameter_298" + shape = [384] + dtype = "float32" + min_val = float("-1.24821") + max_val = float("0.9224") + mean = float("-0.241954") + std = float("0.272039") + data = None + + +class Program_weight_tensor_parameter_299: + name = "parameter_299" + shape = [384, 384, 3, 3] + dtype = "float32" + min_val = float("-0.024251") + max_val = float("0.0585297") + mean = float("-0.000141777") + std = float("0.00163037") + data = None + + +class Program_weight_tensor_parameter_300: + name = "parameter_300" + shape = [384] + dtype = "float32" + min_val = float("-0.0360838") + max_val = float("0.0137949") + mean = float("-0.00769057") + std = float("0.00789116") + data = None + + +class Program_weight_tensor_parameter_301: + name = "parameter_301" + shape = [384] + dtype = "float32" + min_val = float("0.984179") + max_val = float("1.03462") + mean = float("0.999922") + std = float("0.00715393") + data = None + + +class Program_weight_tensor_parameter_302: + name = "parameter_302" + shape = [384] + dtype = "float32" + min_val = float("0.00227712") + max_val = float("0.00995773") + mean = float("0.00397087") + std = float("0.00113598") + data = None + + +class Program_weight_tensor_parameter_303: + name = "parameter_303" + shape = [384] + dtype = "float32" + min_val = float("-0.0776737") + max_val = float("0.150078") + mean = float("-0.0200997") + std = float("0.0256007") + data = None + + +class Program_weight_tensor_parameter_304: + name = "parameter_304" + shape = [384, 384, 1, 1] + dtype = "float32" + min_val = float("-0.0209147") + max_val = float("0.0327192") + mean = float("-0.000264199") + std = float("0.00284316") + data = None + + +class Program_weight_tensor_parameter_305: + name = "parameter_305" + shape = [384] + dtype = "float32" + min_val = float("-0.0360838") + max_val = float("0.0137949") + mean = float("-0.00769057") + std = float("0.00789116") + data = None + + +class Program_weight_tensor_parameter_306: + name = "parameter_306" + shape = [384] + dtype = "float32" + min_val = float("0.982136") + max_val = float("1.06749") + mean = float("1.00454") + std = float("0.0126701") + data = None + + +class Program_weight_tensor_parameter_307: + name = "parameter_307" + shape = [384] + dtype = "float32" + min_val = float("0.00988707") + max_val = float("0.074172") + mean = float("0.0261233") + std = float("0.00883817") + data = None + + +class Program_weight_tensor_parameter_308: + name = "parameter_308" + shape = [384] + dtype = "float32" + min_val = float("-0.234406") + max_val = float("0.373645") + mean = float("-0.0733953") + std = float("0.0700589") + data = None + + +class Program_weight_tensor_parameter_309: + name = "parameter_309" + shape = [384, 384, 3, 3] + dtype = "float32" + min_val = float("-0.0111228") + max_val = float("0.0376454") + mean = float("-0.000113878") + std = float("0.00115243") + data = None + + +class Program_weight_tensor_parameter_310: + name = "parameter_310" + shape = [384] + dtype = "float32" + min_val = float("-0.0529908") + max_val = float("0.00370586") + mean = float("-0.0207007") + std = float("0.00870238") + data = None + + +class Program_weight_tensor_parameter_311: + name = "parameter_311" + shape = [384] + dtype = "float32" + min_val = float("0.976061") + max_val = float("1.08549") + mean = float("1.01199") + std = float("0.0159983") + data = None + + +class Program_weight_tensor_parameter_312: + name = "parameter_312" + shape = [384] + dtype = "float32" + min_val = float("0.0127448") + max_val = float("0.0747085") + mean = float("0.0330308") + std = float("0.00982086") + data = None + + +class Program_weight_tensor_parameter_313: + name = "parameter_313" + shape = [384] + dtype = "float32" + min_val = float("-0.182538") + max_val = float("0.229487") + mean = float("-0.0382627") + std = float("0.0546861") + data = None + + +class Program_weight_tensor_parameter_314: + name = "parameter_314" + shape = [384, 384, 3, 3] + dtype = "float32" + min_val = float("-0.0155426") + max_val = float("0.0250033") + mean = float("-6.09821e-05") + std = float("0.00159019") + data = None + + +class Program_weight_tensor_parameter_315: + name = "parameter_315" + shape = [384] + dtype = "float32" + min_val = float("-0.0699578") + max_val = float("0.0213472") + mean = float("-0.0334829") + std = float("0.0126426") + data = None + + +class Program_weight_tensor_parameter_316: + name = "parameter_316" + shape = [384] + dtype = "float32" + min_val = float("0.981937") + max_val = float("1.05593") + mean = float("1.0134") + std = float("0.0107706") + data = None + + +class Program_weight_tensor_parameter_317: + name = "parameter_317" + shape = [384] + dtype = "float32" + min_val = float("0.00885101") + max_val = float("0.0351366") + mean = float("0.0147251") + std = float("0.00337135") + data = None + + +class Program_weight_tensor_parameter_318: + name = "parameter_318" + shape = [384] + dtype = "float32" + min_val = float("-0.11759") + max_val = float("0.125101") + mean = float("-0.0114268") + std = float("0.0398999") + data = None + + +class Program_weight_tensor_parameter_319: + name = "parameter_319" + shape = [384, 1024, 1, 1] + dtype = "float32" + min_val = float("-0.0187213") + max_val = float("0.0462026") + mean = float("-0.000204289") + std = float("0.00328169") + data = None + + +class Program_weight_tensor_parameter_320: + name = "parameter_320" + shape = [384] + dtype = "float32" + min_val = float("-0.024099") + max_val = float("0.0209723") + mean = float("-0.000328398") + std = float("0.00796388") + data = None + + +class Program_weight_tensor_parameter_321: + name = "parameter_321" + shape = [384] + dtype = "float32" + min_val = float("0.994048") + max_val = float("1.08372") + mean = float("1.04108") + std = float("0.0136738") + data = None + + +class Program_weight_tensor_parameter_322: + name = "parameter_322" + shape = [384] + dtype = "float32" + min_val = float("0.0117538") + max_val = float("0.0620667") + mean = float("0.0197522") + std = float("0.00513232") + data = None + + +class Program_weight_tensor_parameter_323: + name = "parameter_323" + shape = [384] + dtype = "float32" + min_val = float("-0.154192") + max_val = float("0.134319") + mean = float("-0.0110652") + std = float("0.0497964") + data = None + + +class Program_weight_tensor_parameter_324: + name = "parameter_324" + shape = [384, 1024, 1, 1] + dtype = "float32" + min_val = float("-0.0381973") + max_val = float("0.0298107") + mean = float("-0.00023944") + std = float("0.00387698") + data = None + + +class Program_weight_tensor_parameter_325: + name = "parameter_325" + shape = [1024] + dtype = "float32" + min_val = float("-3.19596e-10") + max_val = float("2.57347e-10") + mean = float("-6.94228e-12") + std = float("8.15169e-11") + data = None + + +class Program_weight_tensor_parameter_326: + name = "parameter_326" + shape = [1024] + dtype = "float32" + min_val = float("0.826159") + max_val = float("0.830526") + mean = float("0.828072") + std = float("0.00038843") + data = None + + +class Program_weight_tensor_parameter_327: + name = "parameter_327" + shape = [1024] + dtype = "float32" + min_val = float("-0.0184725") + max_val = float("0.0186349") + mean = float("3.29491e-06") + std = float("0.0105958") + data = None + + +class Program_weight_tensor_parameter_328: + name = "parameter_328" + shape = [2048, 1024] + dtype = "float32" + min_val = float("-0.0186694") + max_val = float("0.0186323") + mean = float("-3.09482e-06") + std = float("0.0105631") + data = None + + +class Program_weight_tensor_parameter_329: + name = "parameter_329" + shape = [2048] + dtype = "float32" + min_val = float("-0.0258373") + max_val = float("0.0258488") + mean = float("-0.000490033") + std = float("0.0147842") + data = None + + +class Program_weight_tensor_parameter_330: + name = "parameter_330" + shape = [1024, 2048] + dtype = "float32" + min_val = float("-0.0261231") + max_val = float("0.0262344") + mean = float("-1.26e-05") + std = float("0.0149406") + data = None + + +class Program_weight_tensor_parameter_331: + name = "parameter_331" + shape = [1024] + dtype = "float32" + min_val = float("-0.000644078") + max_val = float("0.000416122") + mean = float("1.0367e-06") + std = float("0.000160918") + data = None + + +class Program_weight_tensor_parameter_332: + name = "parameter_332" + shape = [1024] + dtype = "float32" + min_val = float("0.825075") + max_val = float("0.831152") + mean = float("0.828074") + std = float("0.000498935") + data = None + + +class Program_weight_tensor_parameter_333: + name = "parameter_333" + shape = [1024] + dtype = "float32" + min_val = float("-0.000571568") + max_val = float("0.000431714") + mean = float("-6.059e-07") + std = float("0.000151099") + data = None + + +class Program_weight_tensor_parameter_334: + name = "parameter_334" + shape = [1024, 1024] + dtype = "float32" + min_val = float("-0.0452304") + max_val = float("0.0451715") + mean = float("2.40342e-05") + std = float("0.0258606") + data = None + + +class Program_weight_tensor_parameter_335: + name = "parameter_335" + shape = [1024] + dtype = "float32" + min_val = float("-0.000495841") + max_val = float("0.000502198") + mean = float("2.39512e-05") + std = float("0.000158431") + data = None + + +class Program_weight_tensor_parameter_336: + name = "parameter_336" + shape = [1024] + dtype = "float32" + min_val = float("0.825239") + max_val = float("0.831385") + mean = float("0.8281") + std = float("0.000479393") + data = None + + +class Program_weight_tensor_parameter_337: + name = "parameter_337" + shape = [1024] + dtype = "float32" + min_val = float("-0.0182544") + max_val = float("0.0183953") + mean = float("1.83012e-06") + std = float("0.0105888") + data = None + + +class Program_weight_tensor_parameter_338: + name = "parameter_338" + shape = [2048, 1024] + dtype = "float32" + min_val = float("-0.0185876") + max_val = float("0.0186055") + mean = float("-3.09823e-06") + std = float("0.010563") + data = None + + +class Program_weight_tensor_parameter_339: + name = "parameter_339" + shape = [2048] + dtype = "float32" + min_val = float("-0.0258719") + max_val = float("0.025874") + mean = float("-0.00048855") + std = float("0.0147851") + data = None + + +class Program_weight_tensor_parameter_340: + name = "parameter_340" + shape = [1024, 2048] + dtype = "float32" + min_val = float("-0.0260955") + max_val = float("0.0261499") + mean = float("-1.26e-05") + std = float("0.0149406") + data = None + + +class Program_weight_tensor_parameter_341: + name = "parameter_341" + shape = [1024] + dtype = "float32" + min_val = float("-0.000468908") + max_val = float("0.000411959") + mean = float("2.0632e-06") + std = float("0.000140162") + data = None + + +class Program_weight_tensor_parameter_342: + name = "parameter_342" + shape = [1024] + dtype = "float32" + min_val = float("0.825683") + max_val = float("0.831193") + mean = float("0.828073") + std = float("0.000448511") + data = None + + +class Program_weight_tensor_parameter_343: + name = "parameter_343" + shape = [1024] + dtype = "float32" + min_val = float("-0.000528411") + max_val = float("0.000383675") + mean = float("2.84251e-06") + std = float("0.000141636") + data = None + + +class Program_weight_tensor_parameter_344: + name = "parameter_344" + shape = [1024, 1024] + dtype = "float32" + min_val = float("-0.0450293") + max_val = float("0.0450631") + mean = float("2.40173e-05") + std = float("0.0258607") + data = None + + +class Program_weight_tensor_parameter_345: + name = "parameter_345" + shape = [1024] + dtype = "float32" + min_val = float("-0.000544272") + max_val = float("0.000596298") + mean = float("2.42076e-05") + std = float("0.000181497") + data = None + + +class Program_weight_tensor_parameter_346: + name = "parameter_346" + shape = [1024] + dtype = "float32" + min_val = float("0.825946") + max_val = float("0.831225") + mean = float("0.828119") + std = float("0.000435131") + data = None + + +class Program_weight_tensor_parameter_347: + name = "parameter_347" + shape = [1024] + dtype = "float32" + min_val = float("-0.0184487") + max_val = float("0.0183801") + mean = float("4.31404e-06") + std = float("0.0105859") + data = None + + +class Program_weight_tensor_parameter_348: + name = "parameter_348" + shape = [2048, 1024] + dtype = "float32" + min_val = float("-0.0185587") + max_val = float("0.0185999") + mean = float("-2.97794e-06") + std = float("0.010563") + data = None + + +class Program_weight_tensor_parameter_349: + name = "parameter_349" + shape = [2048] + dtype = "float32" + min_val = float("-0.0259392") + max_val = float("0.025878") + mean = float("-0.000488744") + std = float("0.014786") + data = None + + +class Program_weight_tensor_parameter_350: + name = "parameter_350" + shape = [1024, 2048] + dtype = "float32" + min_val = float("-0.0261446") + max_val = float("0.0261367") + mean = float("-1.26001e-05") + std = float("0.0149405") + data = None + + +class Program_weight_tensor_parameter_351: + name = "parameter_351" + shape = [1024] + dtype = "float32" + min_val = float("-0.000525158") + max_val = float("0.000569597") + mean = float("1.84284e-06") + std = float("0.000180024") + data = None + + +class Program_weight_tensor_parameter_352: + name = "parameter_352" + shape = [1024] + dtype = "float32" + min_val = float("0.826325") + max_val = float("0.831088") + mean = float("0.828071") + std = float("0.00042233") + data = None + + +class Program_weight_tensor_parameter_353: + name = "parameter_353" + shape = [1024] + dtype = "float32" + min_val = float("-0.000560432") + max_val = float("0.000596894") + mean = float("2.2615e-06") + std = float("0.00018498") + data = None + + +class Program_weight_tensor_parameter_354: + name = "parameter_354" + shape = [1024, 1024] + dtype = "float32" + min_val = float("-0.0451116") + max_val = float("0.0451354") + mean = float("2.40528e-05") + std = float("0.0258608") + data = None + + +class Program_weight_tensor_parameter_355: + name = "parameter_355" + shape = [1024] + dtype = "float32" + min_val = float("-0.000823759") + max_val = float("0.000904078") + mean = float("2.92117e-05") + std = float("0.000277537") + data = None + + +class Program_weight_tensor_parameter_356: + name = "parameter_356" + shape = [1024] + dtype = "float32" + min_val = float("0.826283") + max_val = float("0.83082") + mean = float("0.828142") + std = float("0.000430459") + data = None + + +class Program_weight_tensor_parameter_357: + name = "parameter_357" + shape = [1024] + dtype = "float32" + min_val = float("-0.0185659") + max_val = float("0.0186155") + mean = float("4.15762e-06") + std = float("0.0105906") + data = None + + +class Program_weight_tensor_parameter_358: + name = "parameter_358" + shape = [2048, 1024] + dtype = "float32" + min_val = float("-0.0186584") + max_val = float("0.0186457") + mean = float("-3.0236e-06") + std = float("0.0105631") + data = None + + +class Program_weight_tensor_parameter_359: + name = "parameter_359" + shape = [2048] + dtype = "float32" + min_val = float("-0.0260158") + max_val = float("0.0259107") + mean = float("-0.000488165") + std = float("0.0147856") + data = None + + +class Program_weight_tensor_parameter_360: + name = "parameter_360" + shape = [1024, 2048] + dtype = "float32" + min_val = float("-0.026139") + max_val = float("0.026125") + mean = float("-1.26002e-05") + std = float("0.0149405") + data = None + + +class Program_weight_tensor_parameter_361: + name = "parameter_361" + shape = [1024] + dtype = "float32" + min_val = float("-0.000913026") + max_val = float("0.000860109") + mean = float("1.52616e-06") + std = float("0.000286932") + data = None + + +class Program_weight_tensor_parameter_362: + name = "parameter_362" + shape = [1024] + dtype = "float32" + min_val = float("0.826227") + max_val = float("0.830736") + mean = float("0.828069") + std = float("0.000440282") + data = None + + +class Program_weight_tensor_parameter_363: + name = "parameter_363" + shape = [1024] + dtype = "float32" + min_val = float("-0.00089386") + max_val = float("0.000983855") + mean = float("2.69912e-06") + std = float("0.000279129") + data = None + + +class Program_weight_tensor_parameter_364: + name = "parameter_364" + shape = [1024, 1024] + dtype = "float32" + min_val = float("-0.0456631") + max_val = float("0.0456484") + mean = float("2.40399e-05") + std = float("0.0258625") + data = None + + +class Program_weight_tensor_parameter_365: + name = "parameter_365" + shape = [1024] + dtype = "float32" + min_val = float("-3.75937") + max_val = float("-0.734") + mean = float("-2.18719") + std = float("0.428746") + data = None + + +class Program_weight_tensor_parameter_366: + name = "parameter_366" + shape = [1024] + dtype = "float32" + min_val = float("1.61944") + max_val = float("4.44114") + mean = float("3.08041") + std = float("0.254214") + data = None + + +class Program_weight_tensor_parameter_367: + name = "parameter_367" + shape = [1024] + dtype = "float32" + min_val = float("0.00515514") + max_val = float("0.0275054") + mean = float("0.00882973") + std = float("0.00191584") + data = None + + +class Program_weight_tensor_parameter_368: + name = "parameter_368" + shape = [1024] + dtype = "float32" + min_val = float("-0.173492") + max_val = float("0.132414") + mean = float("-0.0625274") + std = float("0.0318422") + data = None + + +class Program_weight_tensor_parameter_369: + name = "parameter_369" + shape = [1024, 768, 1, 1] + dtype = "float32" + min_val = float("-0.0420016") + max_val = float("0.0672891") + mean = float("-0.000434506") + std = float("0.00419984") + data = None + + +class Program_weight_tensor_parameter_370: + name = "parameter_370" + shape = [768] + dtype = "float32" + min_val = float("-0.0144958") + max_val = float("0.00204154") + mean = float("-0.000784991") + std = float("0.00208566") + data = None + + +class Program_weight_tensor_parameter_371: + name = "parameter_371" + shape = [768, 768, 1, 1] + dtype = "float32" + min_val = float("-0.0809974") + max_val = float("0.144837") + mean = float("-0.000290719") + std = float("0.0016779") + data = None + + +class Program_weight_tensor_parameter_372: + name = "parameter_372" + shape = [384] + dtype = "float32" + min_val = float("-1.77404") + max_val = float("0.318904") + mean = float("-0.31075") + std = float("0.291253") + data = None + + +class Program_weight_tensor_parameter_373: + name = "parameter_373" + shape = [384] + dtype = "float32" + min_val = float("0.188368") + max_val = float("1.82104") + mean = float("0.60964") + std = float("0.262596") + data = None + + +class Program_weight_tensor_parameter_374: + name = "parameter_374" + shape = [384] + dtype = "float32" + min_val = float("7.69323e-05") + max_val = float("0.00105931") + mean = float("0.000262139") + std = float("0.000132205") + data = None + + +class Program_weight_tensor_parameter_375: + name = "parameter_375" + shape = [384] + dtype = "float32" + min_val = float("-0.0656167") + max_val = float("0.0776953") + mean = float("0.0239193") + std = float("0.0176294") + data = None + + +class Program_weight_tensor_parameter_376: + name = "parameter_376" + shape = [384, 384, 1, 1] + dtype = "float32" + min_val = float("-0.020871") + max_val = float("0.0273244") + mean = float("-0.000414716") + std = float("0.00284754") + data = None + + +class Program_weight_tensor_parameter_377: + name = "parameter_377" + shape = [384] + dtype = "float32" + min_val = float("-1.77405") + max_val = float("0.319251") + mean = float("-0.310681") + std = float("0.291275") + data = None + + +class Program_weight_tensor_parameter_378: + name = "parameter_378" + shape = [384] + dtype = "float32" + min_val = float("0.335122") + max_val = float("2.60483") + mean = float("1.02609") + std = float("0.290246") + data = None + + +class Program_weight_tensor_parameter_379: + name = "parameter_379" + shape = [384] + dtype = "float32" + min_val = float("0.000764026") + max_val = float("0.00789643") + mean = float("0.00239397") + std = float("0.000872399") + data = None + + +class Program_weight_tensor_parameter_380: + name = "parameter_380" + shape = [384] + dtype = "float32" + min_val = float("-0.229833") + max_val = float("0.162266") + mean = float("0.0349416") + std = float("0.0423478") + data = None + + +class Program_weight_tensor_parameter_381: + name = "parameter_381" + shape = [384, 384, 3, 3] + dtype = "float32" + min_val = float("-0.0185255") + max_val = float("0.0282844") + mean = float("-7.21101e-05") + std = float("0.00183304") + data = None + + +class Program_weight_tensor_parameter_382: + name = "parameter_382" + shape = [384] + dtype = "float32" + min_val = float("-2.58205") + max_val = float("0.0326997") + mean = float("-1.56844") + std = float("0.416017") + data = None + + +class Program_weight_tensor_parameter_383: + name = "parameter_383" + shape = [384] + dtype = "float32" + min_val = float("0.51894") + max_val = float("1.64424") + mean = float("1.13558") + std = float("0.149427") + data = None + + +class Program_weight_tensor_parameter_384: + name = "parameter_384" + shape = [384] + dtype = "float32" + min_val = float("0.0445179") + max_val = float("0.278452") + mean = float("0.101004") + std = float("0.0266658") + data = None + + +class Program_weight_tensor_parameter_385: + name = "parameter_385" + shape = [384] + dtype = "float32" + min_val = float("-1.05877") + max_val = float("0.500591") + mean = float("-0.285429") + std = float("0.144535") + data = None + + +class Program_weight_tensor_parameter_386: + name = "parameter_386" + shape = [384, 384, 3, 3] + dtype = "float32" + min_val = float("-0.0217847") + max_val = float("0.0601331") + mean = float("-0.000214232") + std = float("0.00242153") + data = None + + +class Program_weight_tensor_parameter_387: + name = "parameter_387" + shape = [384] + dtype = "float32" + min_val = float("-1.93932") + max_val = float("0.644238") + mean = float("-0.57485") + std = float("0.358678") + data = None + + +class Program_weight_tensor_parameter_388: + name = "parameter_388" + shape = [384] + dtype = "float32" + min_val = float("0.163976") + max_val = float("2.06584") + mean = float("0.56203") + std = float("0.227231") + data = None + + +class Program_weight_tensor_parameter_389: + name = "parameter_389" + shape = [384] + dtype = "float32" + min_val = float("8.46446e-05") + max_val = float("0.00181652") + mean = float("0.000300897") + std = float("0.000147903") + data = None + + +class Program_weight_tensor_parameter_390: + name = "parameter_390" + shape = [384] + dtype = "float32" + min_val = float("-0.0395058") + max_val = float("0.072267") + mean = float("0.0222665") + std = float("0.0153805") + data = None + + +class Program_weight_tensor_parameter_391: + name = "parameter_391" + shape = [384, 384, 1, 1] + dtype = "float32" + min_val = float("-0.0311026") + max_val = float("0.039225") + mean = float("-0.000409791") + std = float("0.00262815") + data = None + + +class Program_weight_tensor_parameter_392: + name = "parameter_392" + shape = [384] + dtype = "float32" + min_val = float("-1.9394") + max_val = float("0.644918") + mean = float("-0.574762") + std = float("0.358753") + data = None + + +class Program_weight_tensor_parameter_393: + name = "parameter_393" + shape = [384] + dtype = "float32" + min_val = float("0.583818") + max_val = float("2.15633") + mean = float("1.08411") + std = float("0.255713") + data = None + + +class Program_weight_tensor_parameter_394: + name = "parameter_394" + shape = [384] + dtype = "float32" + min_val = float("0.00151649") + max_val = float("0.011387") + mean = float("0.00363589") + std = float("0.00111629") + data = None + + +class Program_weight_tensor_parameter_395: + name = "parameter_395" + shape = [384] + dtype = "float32" + min_val = float("-0.114817") + max_val = float("0.168288") + mean = float("0.040355") + std = float("0.0413819") + data = None + + +class Program_weight_tensor_parameter_396: + name = "parameter_396" + shape = [384, 384, 3, 3] + dtype = "float32" + min_val = float("-0.0211861") + max_val = float("0.0312284") + mean = float("-9.86606e-05") + std = float("0.00198109") + data = None + + +class Program_weight_tensor_parameter_397: + name = "parameter_397" + shape = [384] + dtype = "float32" + min_val = float("-2.39618") + max_val = float("0.845899") + mean = float("-1.40537") + std = float("0.36063") + data = None + + +class Program_weight_tensor_parameter_398: + name = "parameter_398" + shape = [384] + dtype = "float32" + min_val = float("0.454223") + max_val = float("1.91875") + mean = float("1.16633") + std = float("0.147984") + data = None + + +class Program_weight_tensor_parameter_399: + name = "parameter_399" + shape = [384] + dtype = "float32" + min_val = float("0.0369914") + max_val = float("0.169613") + mean = float("0.067321") + std = float("0.0165547") + data = None + + +class Program_weight_tensor_parameter_400: + name = "parameter_400" + shape = [384] + dtype = "float32" + min_val = float("-0.916864") + max_val = float("0.834885") + mean = float("-0.197255") + std = float("0.118118") + data = None + + +class Program_weight_tensor_parameter_401: + name = "parameter_401" + shape = [384, 384, 3, 3] + dtype = "float32" + min_val = float("-0.0304568") + max_val = float("0.0446889") + mean = float("-0.000206096") + std = float("0.00245489") + data = None + + +class Program_weight_tensor_parameter_402: + name = "parameter_402" + shape = [384] + dtype = "float32" + min_val = float("-1.87628") + max_val = float("0.453077") + mean = float("-0.485305") + std = float("0.376481") + data = None + + +class Program_weight_tensor_parameter_403: + name = "parameter_403" + shape = [384] + dtype = "float32" + min_val = float("0.0771953") + max_val = float("2.11917") + mean = float("0.441977") + std = float("0.217648") + data = None + + +class Program_weight_tensor_parameter_404: + name = "parameter_404" + shape = [384] + dtype = "float32" + min_val = float("7.57603e-05") + max_val = float("0.00171771") + mean = float("0.00036293") + std = float("0.000186378") + data = None + + +class Program_weight_tensor_parameter_405: + name = "parameter_405" + shape = [384] + dtype = "float32" + min_val = float("-0.0528798") + max_val = float("0.0858378") + mean = float("0.0268765") + std = float("0.0175426") + data = None + + +class Program_weight_tensor_parameter_406: + name = "parameter_406" + shape = [384, 384, 1, 1] + dtype = "float32" + min_val = float("-0.0213328") + max_val = float("0.0283453") + mean = float("-0.000505242") + std = float("0.00224656") + data = None + + +class Program_weight_tensor_parameter_407: + name = "parameter_407" + shape = [384] + dtype = "float32" + min_val = float("-1.87669") + max_val = float("0.45341") + mean = float("-0.485211") + std = float("0.376586") + data = None + + +class Program_weight_tensor_parameter_408: + name = "parameter_408" + shape = [384] + dtype = "float32" + min_val = float("0.522977") + max_val = float("2.22431") + mean = float("1.05297") + std = float("0.260052") + data = None + + +class Program_weight_tensor_parameter_409: + name = "parameter_409" + shape = [384] + dtype = "float32" + min_val = float("0.00214087") + max_val = float("0.0106285") + mean = float("0.00466215") + std = float("0.00134646") + data = None + + +class Program_weight_tensor_parameter_410: + name = "parameter_410" + shape = [384] + dtype = "float32" + min_val = float("-0.272097") + max_val = float("0.182301") + mean = float("0.0462845") + std = float("0.0484542") + data = None + + +class Program_weight_tensor_parameter_411: + name = "parameter_411" + shape = [384, 384, 3, 3] + dtype = "float32" + min_val = float("-0.0214852") + max_val = float("0.0348977") + mean = float("-0.000101693") + std = float("0.00210424") + data = None + + +class Program_weight_tensor_parameter_412: + name = "parameter_412" + shape = [384] + dtype = "float32" + min_val = float("-2.1565") + max_val = float("0.418538") + mean = float("-1.36711") + std = float("0.277506") + data = None + + +class Program_weight_tensor_parameter_413: + name = "parameter_413" + shape = [384] + dtype = "float32" + min_val = float("0.707119") + max_val = float("1.63571") + mean = float("1.14297") + std = float("0.101612") + data = None + + +class Program_weight_tensor_parameter_414: + name = "parameter_414" + shape = [384] + dtype = "float32" + min_val = float("0.0267598") + max_val = float("0.120536") + mean = float("0.0531872") + std = float("0.0145039") + data = None + + +class Program_weight_tensor_parameter_415: + name = "parameter_415" + shape = [384] + dtype = "float32" + min_val = float("-0.737016") + max_val = float("0.211594") + mean = float("-0.135647") + std = float("0.0976005") + data = None + + +class Program_weight_tensor_parameter_416: + name = "parameter_416" + shape = [384, 384, 3, 3] + dtype = "float32" + min_val = float("-0.0300983") + max_val = float("0.05499") + mean = float("-0.000159015") + std = float("0.00235156") + data = None + + +class Program_weight_tensor_parameter_417: + name = "parameter_417" + shape = [384] + dtype = "float32" + min_val = float("-2.92344") + max_val = float("1.66439") + mean = float("-0.760407") + std = float("0.643554") + data = None + + +class Program_weight_tensor_parameter_418: + name = "parameter_418" + shape = [384] + dtype = "float32" + min_val = float("0.953228") + max_val = float("2.9182") + mean = float("1.86309") + std = float("0.276205") + data = None + + +class Program_weight_tensor_parameter_419: + name = "parameter_419" + shape = [384] + dtype = "float32" + min_val = float("0.00273562") + max_val = float("0.012939") + mean = float("0.00578831") + std = float("0.00145222") + data = None + + +class Program_weight_tensor_parameter_420: + name = "parameter_420" + shape = [384] + dtype = "float32" + min_val = float("-0.279172") + max_val = float("0.135794") + mean = float("0.0682701") + std = float("0.0329249") + data = None + + +class Program_weight_tensor_parameter_421: + name = "parameter_421" + shape = [384, 768, 1, 1] + dtype = "float32" + min_val = float("-0.0411036") + max_val = float("0.048141") + mean = float("-0.000774534") + std = float("0.00548625") + data = None + + +class Program_weight_tensor_parameter_422: + name = "parameter_422" + shape = [384] + dtype = "float32" + min_val = float("-2.24702") + max_val = float("0.681993") + mean = float("-0.777088") + std = float("0.472908") + data = None + + +class Program_weight_tensor_parameter_423: + name = "parameter_423" + shape = [384] + dtype = "float32" + min_val = float("0.965876") + max_val = float("2.89361") + mean = float("2.09705") + std = float("0.305445") + data = None + + +class Program_weight_tensor_parameter_424: + name = "parameter_424" + shape = [384] + dtype = "float32" + min_val = float("0.000839665") + max_val = float("0.00423233") + mean = float("0.00221563") + std = float("0.000537") + data = None + + +class Program_weight_tensor_parameter_425: + name = "parameter_425" + shape = [384] + dtype = "float32" + min_val = float("-0.0182533") + max_val = float("0.0914483") + mean = float("0.0419083") + std = float("0.0183649") + data = None + + +class Program_weight_tensor_parameter_426: + name = "parameter_426" + shape = [384, 768, 1, 1] + dtype = "float32" + min_val = float("-0.0837021") + max_val = float("0.0611426") + mean = float("-0.00045084") + std = float("0.00374174") + data = None + + +class Program_weight_tensor_parameter_427: + name = "parameter_427" + shape = [768] + dtype = "float32" + min_val = float("-2.40194") + max_val = float("0.642339") + mean = float("-0.908288") + std = float("0.339331") + data = None + + +class Program_weight_tensor_parameter_428: + name = "parameter_428" + shape = [768] + dtype = "float32" + min_val = float("0.53146") + max_val = float("1.90712") + mean = float("0.919684") + std = float("0.149212") + data = None + + +class Program_weight_tensor_parameter_429: + name = "parameter_429" + shape = [768] + dtype = "float32" + min_val = float("0.00745832") + max_val = float("0.0743865") + mean = float("0.0178485") + std = float("0.00551587") + data = None + + +class Program_weight_tensor_parameter_430: + name = "parameter_430" + shape = [768] + dtype = "float32" + min_val = float("-0.236023") + max_val = float("0.207751") + mean = float("0.041919") + std = float("0.0579014") + data = None + + +class Program_weight_tensor_parameter_431: + name = "parameter_431" + shape = [768, 512, 3, 3] + dtype = "float32" + min_val = float("-0.0383779") + max_val = float("0.0519002") + mean = float("-9.93933e-05") + std = float("0.00244217") + data = None + + +class Program_weight_tensor_parameter_432: + name = "parameter_432" + shape = [512] + dtype = "float32" + min_val = float("-3.39029") + max_val = float("1.66616") + mean = float("-1.16168") + std = float("0.513766") + data = None + + +class Program_weight_tensor_parameter_433: + name = "parameter_433" + shape = [512] + dtype = "float32" + min_val = float("0.520928") + max_val = float("1.67546") + mean = float("1.11104") + std = float("0.148384") + data = None + + +class Program_weight_tensor_parameter_434: + name = "parameter_434" + shape = [512] + dtype = "float32" + min_val = float("0.00230842") + max_val = float("0.0165448") + mean = float("0.00755702") + std = float("0.00192355") + data = None + + +class Program_weight_tensor_parameter_435: + name = "parameter_435" + shape = [512] + dtype = "float32" + min_val = float("-0.159179") + max_val = float("0.0723523") + mean = float("-0.0485061") + std = float("0.0412122") + data = None + + +class Program_weight_tensor_parameter_436: + name = "parameter_436" + shape = [512, 384, 1, 1] + dtype = "float32" + min_val = float("-0.208779") + max_val = float("0.179911") + mean = float("-0.000606249") + std = float("0.0081171") + data = None + + +class Program_weight_tensor_parameter_437: + name = "parameter_437" + shape = [384] + dtype = "float32" + min_val = float("-0.0103559") + max_val = float("0.00155602") + mean = float("-0.00302775") + std = float("0.0023618") + data = None + + +class Program_weight_tensor_parameter_438: + name = "parameter_438" + shape = [384, 384, 1, 1] + dtype = "float32" + min_val = float("-0.204999") + max_val = float("0.141306") + mean = float("-0.00211219") + std = float("0.00500511") + data = None + + +class Program_weight_tensor_parameter_439: + name = "parameter_439" + shape = [192] + dtype = "float32" + min_val = float("-1.97063") + max_val = float("0.41045") + mean = float("-0.348649") + std = float("0.333533") + data = None + + +class Program_weight_tensor_parameter_440: + name = "parameter_440" + shape = [192] + dtype = "float32" + min_val = float("0.0528508") + max_val = float("2.16013") + mean = float("0.581272") + std = float("0.419844") + data = None + + +class Program_weight_tensor_parameter_441: + name = "parameter_441" + shape = [192] + dtype = "float32" + min_val = float("9.94453e-05") + max_val = float("0.00123961") + mean = float("0.000476419") + std = float("0.000224006") + data = None + + +class Program_weight_tensor_parameter_442: + name = "parameter_442" + shape = [192] + dtype = "float32" + min_val = float("-0.0376085") + max_val = float("0.0570153") + mean = float("0.00573177") + std = float("0.0152437") + data = None + + +class Program_weight_tensor_parameter_443: + name = "parameter_443" + shape = [192, 192, 1, 1] + dtype = "float32" + min_val = float("-0.0210389") + max_val = float("0.0585363") + mean = float("-0.000352054") + std = float("0.00423892") + data = None + + +class Program_weight_tensor_parameter_444: + name = "parameter_444" + shape = [192] + dtype = "float32" + min_val = float("-1.97059") + max_val = float("0.411367") + mean = float("-0.348497") + std = float("0.333596") + data = None + + +class Program_weight_tensor_parameter_445: + name = "parameter_445" + shape = [192] + dtype = "float32" + min_val = float("0.372764") + max_val = float("2.70243") + mean = float("1.20208") + std = float("0.49364") + data = None + + +class Program_weight_tensor_parameter_446: + name = "parameter_446" + shape = [192] + dtype = "float32" + min_val = float("0.0014863") + max_val = float("0.020345") + mean = float("0.00560471") + std = float("0.00209367") + data = None + + +class Program_weight_tensor_parameter_447: + name = "parameter_447" + shape = [192] + dtype = "float32" + min_val = float("-0.115289") + max_val = float("0.163741") + mean = float("0.0194467") + std = float("0.0436225") + data = None + + +class Program_weight_tensor_parameter_448: + name = "parameter_448" + shape = [192, 192, 3, 3] + dtype = "float32" + min_val = float("-0.031927") + max_val = float("0.0389496") + mean = float("-0.000144904") + std = float("0.00325908") + data = None + + +class Program_weight_tensor_parameter_449: + name = "parameter_449" + shape = [192] + dtype = "float32" + min_val = float("-2.89054") + max_val = float("-0.177595") + mean = float("-1.31446") + std = float("0.401195") + data = None + + +class Program_weight_tensor_parameter_450: + name = "parameter_450" + shape = [192] + dtype = "float32" + min_val = float("0.695074") + max_val = float("2.09481") + mean = float("1.17912") + std = float("0.169901") + data = None + + +class Program_weight_tensor_parameter_451: + name = "parameter_451" + shape = [192] + dtype = "float32" + min_val = float("0.0658237") + max_val = float("0.479229") + mean = float("0.138928") + std = float("0.0482074") + data = None + + +class Program_weight_tensor_parameter_452: + name = "parameter_452" + shape = [192] + dtype = "float32" + min_val = float("-2.47032") + max_val = float("1.83399") + mean = float("-0.227578") + std = float("0.394509") + data = None + + +class Program_weight_tensor_parameter_453: + name = "parameter_453" + shape = [192, 192, 3, 3] + dtype = "float32" + min_val = float("-0.0350379") + max_val = float("0.0468605") + mean = float("-0.000221381") + std = float("0.00388426") + data = None + + +class Program_weight_tensor_parameter_454: + name = "parameter_454" + shape = [192] + dtype = "float32" + min_val = float("-1.94031") + max_val = float("0.513263") + mean = float("-0.279273") + std = float("0.321486") + data = None + + +class Program_weight_tensor_parameter_455: + name = "parameter_455" + shape = [192] + dtype = "float32" + min_val = float("0.0449424") + max_val = float("1.76947") + mean = float("0.444383") + std = float("0.305669") + data = None + + +class Program_weight_tensor_parameter_456: + name = "parameter_456" + shape = [192] + dtype = "float32" + min_val = float("7.91667e-05") + max_val = float("0.00164061") + mean = float("0.00043007") + std = float("0.000226992") + data = None + + +class Program_weight_tensor_parameter_457: + name = "parameter_457" + shape = [192] + dtype = "float32" + min_val = float("-0.0363552") + max_val = float("0.0461841") + mean = float("0.00877747") + std = float("0.0120158") + data = None + + +class Program_weight_tensor_parameter_458: + name = "parameter_458" + shape = [192, 192, 1, 1] + dtype = "float32" + min_val = float("-0.02483") + max_val = float("0.0404131") + mean = float("-0.000400917") + std = float("0.00391908") + data = None + + +class Program_weight_tensor_parameter_459: + name = "parameter_459" + shape = [192] + dtype = "float32" + min_val = float("-1.94031") + max_val = float("0.514903") + mean = float("-0.279015") + std = float("0.321709") + data = None + + +class Program_weight_tensor_parameter_460: + name = "parameter_460" + shape = [192] + dtype = "float32" + min_val = float("0.481654") + max_val = float("2.27026") + mean = float("1.13859") + std = float("0.375612") + data = None + + +class Program_weight_tensor_parameter_461: + name = "parameter_461" + shape = [192] + dtype = "float32" + min_val = float("0.00303177") + max_val = float("0.0146645") + mean = float("0.00648") + std = float("0.00181309") + data = None + + +class Program_weight_tensor_parameter_462: + name = "parameter_462" + shape = [192] + dtype = "float32" + min_val = float("-0.0803161") + max_val = float("0.116901") + mean = float("0.0359767") + std = float("0.0322211") + data = None + + +class Program_weight_tensor_parameter_463: + name = "parameter_463" + shape = [192, 192, 3, 3] + dtype = "float32" + min_val = float("-0.0229799") + max_val = float("0.0371751") + mean = float("-0.000196939") + std = float("0.00352878") + data = None + + +class Program_weight_tensor_parameter_464: + name = "parameter_464" + shape = [192] + dtype = "float32" + min_val = float("-2.50826") + max_val = float("-0.12355") + mean = float("-1.2887") + std = float("0.443822") + data = None + + +class Program_weight_tensor_parameter_465: + name = "parameter_465" + shape = [192] + dtype = "float32" + min_val = float("0.653803") + max_val = float("1.66962") + mean = float("1.19928") + std = float("0.166233") + data = None + + +class Program_weight_tensor_parameter_466: + name = "parameter_466" + shape = [192] + dtype = "float32" + min_val = float("0.0475951") + max_val = float("0.209951") + mean = float("0.0950332") + std = float("0.0248435") + data = None + + +class Program_weight_tensor_parameter_467: + name = "parameter_467" + shape = [192] + dtype = "float32" + min_val = float("-2.16167") + max_val = float("0.473341") + mean = float("-0.117492") + std = float("0.248865") + data = None + + +class Program_weight_tensor_parameter_468: + name = "parameter_468" + shape = [192, 192, 3, 3] + dtype = "float32" + min_val = float("-0.038582") + max_val = float("0.0537646") + mean = float("-0.00026749") + std = float("0.0040656") + data = None + + +class Program_weight_tensor_parameter_469: + name = "parameter_469" + shape = [192] + dtype = "float32" + min_val = float("-1.75738") + max_val = float("0.468608") + mean = float("-0.262263") + std = float("0.335862") + data = None + + +class Program_weight_tensor_parameter_470: + name = "parameter_470" + shape = [192] + dtype = "float32" + min_val = float("0.00305103") + max_val = float("1.67905") + mean = float("0.351948") + std = float("0.251703") + data = None + + +class Program_weight_tensor_parameter_471: + name = "parameter_471" + shape = [192] + dtype = "float32" + min_val = float("1.02293e-06") + max_val = float("0.00228453") + mean = float("0.000400551") + std = float("0.000283282") + data = None + + +class Program_weight_tensor_parameter_472: + name = "parameter_472" + shape = [192] + dtype = "float32" + min_val = float("-0.031609") + max_val = float("0.0551924") + mean = float("0.0110783") + std = float("0.0123374") + data = None + + +class Program_weight_tensor_parameter_473: + name = "parameter_473" + shape = [192, 192, 1, 1] + dtype = "float32" + min_val = float("-0.0307534") + max_val = float("0.0384153") + mean = float("-0.00045859") + std = float("0.00377622") + data = None + + +class Program_weight_tensor_parameter_474: + name = "parameter_474" + shape = [192] + dtype = "float32" + min_val = float("-1.75744") + max_val = float("0.470024") + mean = float("-0.262025") + std = float("0.336099") + data = None + + +class Program_weight_tensor_parameter_475: + name = "parameter_475" + shape = [192] + dtype = "float32" + min_val = float("0.405457") + max_val = float("1.97843") + mean = float("1.06603") + std = float("0.334153") + data = None + + +class Program_weight_tensor_parameter_476: + name = "parameter_476" + shape = [192] + dtype = "float32" + min_val = float("0.00267969") + max_val = float("0.0141796") + mean = float("0.00700109") + std = float("0.00190313") + data = None + + +class Program_weight_tensor_parameter_477: + name = "parameter_477" + shape = [192] + dtype = "float32" + min_val = float("-0.0881741") + max_val = float("0.111433") + mean = float("0.0401956") + std = float("0.0325546") + data = None + + +class Program_weight_tensor_parameter_478: + name = "parameter_478" + shape = [192, 192, 3, 3] + dtype = "float32" + min_val = float("-0.0336081") + max_val = float("0.0420323") + mean = float("-0.000205836") + std = float("0.00368544") + data = None + + +class Program_weight_tensor_parameter_479: + name = "parameter_479" + shape = [192] + dtype = "float32" + min_val = float("-2.49703") + max_val = float("0.138789") + mean = float("-1.24309") + std = float("0.424468") + data = None + + +class Program_weight_tensor_parameter_480: + name = "parameter_480" + shape = [192] + dtype = "float32" + min_val = float("0.652493") + max_val = float("1.80896") + mean = float("1.16711") + std = float("0.165463") + data = None + + +class Program_weight_tensor_parameter_481: + name = "parameter_481" + shape = [192] + dtype = "float32" + min_val = float("0.0304637") + max_val = float("0.147553") + mean = float("0.067116") + std = float("0.0164386") + data = None + + +class Program_weight_tensor_parameter_482: + name = "parameter_482" + shape = [192] + dtype = "float32" + min_val = float("-1.70097") + max_val = float("0.305559") + mean = float("-0.0850748") + std = float("0.199213") + data = None + + +class Program_weight_tensor_parameter_483: + name = "parameter_483" + shape = [192, 192, 3, 3] + dtype = "float32" + min_val = float("-0.0472912") + max_val = float("0.0583976") + mean = float("-0.000284769") + std = float("0.00417002") + data = None + + +class Program_weight_tensor_parameter_484: + name = "parameter_484" + shape = [192] + dtype = "float32" + min_val = float("-2.07915") + max_val = float("0.533836") + mean = float("-0.272165") + std = float("0.375339") + data = None + + +class Program_weight_tensor_parameter_485: + name = "parameter_485" + shape = [192] + dtype = "float32" + min_val = float("0.000522804") + max_val = float("0.732366") + mean = float("0.21194") + std = float("0.136205") + data = None + + +class Program_weight_tensor_parameter_486: + name = "parameter_486" + shape = [192] + dtype = "float32" + min_val = float("5.96543e-08") + max_val = float("0.000937142") + mean = float("0.000261376") + std = float("0.000147877") + data = None + + +class Program_weight_tensor_parameter_487: + name = "parameter_487" + shape = [192] + dtype = "float32" + min_val = float("-0.0266706") + max_val = float("0.0357546") + mean = float("0.00698739") + std = float("0.0098736") + data = None + + +class Program_weight_tensor_parameter_488: + name = "parameter_488" + shape = [192, 192, 1, 1] + dtype = "float32" + min_val = float("-0.0207564") + max_val = float("0.0335475") + mean = float("-0.000292443") + std = float("0.00332227") + data = None + + +class Program_weight_tensor_parameter_489: + name = "parameter_489" + shape = [192] + dtype = "float32" + min_val = float("-2.07924") + max_val = float("0.535791") + mean = float("-0.271976") + std = float("0.375569") + data = None + + +class Program_weight_tensor_parameter_490: + name = "parameter_490" + shape = [192] + dtype = "float32" + min_val = float("0.395086") + max_val = float("1.96267") + mean = float("0.959008") + std = float("0.303814") + data = None + + +class Program_weight_tensor_parameter_491: + name = "parameter_491" + shape = [192] + dtype = "float32" + min_val = float("0.00302737") + max_val = float("0.0157952") + mean = float("0.00707016") + std = float("0.00211662") + data = None + + +class Program_weight_tensor_parameter_492: + name = "parameter_492" + shape = [192] + dtype = "float32" + min_val = float("-0.0788482") + max_val = float("0.119233") + mean = float("0.0430225") + std = float("0.0339838") + data = None + + +class Program_weight_tensor_parameter_493: + name = "parameter_493" + shape = [192, 192, 3, 3] + dtype = "float32" + min_val = float("-0.0340016") + max_val = float("0.0403474") + mean = float("-0.000216247") + std = float("0.00380285") + data = None + + +class Program_weight_tensor_parameter_494: + name = "parameter_494" + shape = [192] + dtype = "float32" + min_val = float("-2.74084") + max_val = float("-0.0805818") + mean = float("-1.23662") + std = float("0.434286") + data = None + + +class Program_weight_tensor_parameter_495: + name = "parameter_495" + shape = [192] + dtype = "float32" + min_val = float("0.761952") + max_val = float("1.62053") + mean = float("1.15094") + std = float("0.142444") + data = None + + +class Program_weight_tensor_parameter_496: + name = "parameter_496" + shape = [192] + dtype = "float32" + min_val = float("0.0278922") + max_val = float("0.0817439") + mean = float("0.0488411") + std = float("0.0102139") + data = None + + +class Program_weight_tensor_parameter_497: + name = "parameter_497" + shape = [192] + dtype = "float32" + min_val = float("-1.39522") + max_val = float("0.291819") + mean = float("-0.0734705") + std = float("0.166804") + data = None + + +class Program_weight_tensor_parameter_498: + name = "parameter_498" + shape = [192, 192, 3, 3] + dtype = "float32" + min_val = float("-0.0589398") + max_val = float("0.0606418") + mean = float("-0.000300541") + std = float("0.00415388") + data = None + + +class Program_weight_tensor_parameter_499: + name = "parameter_499" + shape = [192] + dtype = "float32" + min_val = float("-1.212") + max_val = float("0.447452") + mean = float("-0.232044") + std = float("0.339385") + data = None + + +class Program_weight_tensor_parameter_500: + name = "parameter_500" + shape = [192] + dtype = "float32" + min_val = float("-9.43381e-05") + max_val = float("0.678118") + mean = float("0.192025") + std = float("0.120758") + data = None + + +class Program_weight_tensor_parameter_501: + name = "parameter_501" + shape = [192] + dtype = "float32" + min_val = float("2.50564e-10") + max_val = float("0.000967586") + mean = float("0.000259696") + std = float("0.000158071") + data = None + + +class Program_weight_tensor_parameter_502: + name = "parameter_502" + shape = [192] + dtype = "float32" + min_val = float("-0.0445459") + max_val = float("0.0432653") + mean = float("0.00753396") + std = float("0.0124482") + data = None + + +class Program_weight_tensor_parameter_503: + name = "parameter_503" + shape = [192, 192, 1, 1] + dtype = "float32" + min_val = float("-0.0374404") + max_val = float("0.0395949") + mean = float("-0.000292615") + std = float("0.00342625") + data = None + + +class Program_weight_tensor_parameter_504: + name = "parameter_504" + shape = [192] + dtype = "float32" + min_val = float("-1.21197") + max_val = float("0.448806") + mean = float("-0.231853") + std = float("0.339659") + data = None + + +class Program_weight_tensor_parameter_505: + name = "parameter_505" + shape = [192] + dtype = "float32" + min_val = float("0.382853") + max_val = float("1.56358") + mean = float("0.852209") + std = float("0.259926") + data = None + + +class Program_weight_tensor_parameter_506: + name = "parameter_506" + shape = [192] + dtype = "float32" + min_val = float("0.00286492") + max_val = float("0.0140629") + mean = float("0.00682827") + std = float("0.00187941") + data = None + + +class Program_weight_tensor_parameter_507: + name = "parameter_507" + shape = [192] + dtype = "float32" + min_val = float("-0.0776134") + max_val = float("0.150128") + mean = float("0.0470268") + std = float("0.037034") + data = None + + +class Program_weight_tensor_parameter_508: + name = "parameter_508" + shape = [192, 192, 3, 3] + dtype = "float32" + min_val = float("-0.0368355") + max_val = float("0.0400254") + mean = float("-0.000211959") + std = float("0.00380574") + data = None + + +class Program_weight_tensor_parameter_509: + name = "parameter_509" + shape = [192] + dtype = "float32" + min_val = float("-2.48699") + max_val = float("-0.132487") + mean = float("-1.2498") + std = float("0.418473") + data = None + + +class Program_weight_tensor_parameter_510: + name = "parameter_510" + shape = [192] + dtype = "float32" + min_val = float("0.689021") + max_val = float("1.51961") + mean = float("1.12491") + std = float("0.134826") + data = None + + +class Program_weight_tensor_parameter_511: + name = "parameter_511" + shape = [192] + dtype = "float32" + min_val = float("0.0195954") + max_val = float("0.0657275") + mean = float("0.0354674") + std = float("0.00852691") + data = None + + +class Program_weight_tensor_parameter_512: + name = "parameter_512" + shape = [192] + dtype = "float32" + min_val = float("-0.841336") + max_val = float("0.288596") + mean = float("-0.0804588") + std = float("0.135459") + data = None + + +class Program_weight_tensor_parameter_513: + name = "parameter_513" + shape = [192, 192, 3, 3] + dtype = "float32" + min_val = float("-0.0647608") + max_val = float("0.0671244") + mean = float("-0.000301379") + std = float("0.00415559") + data = None + + +class Program_weight_tensor_parameter_514: + name = "parameter_514" + shape = [192] + dtype = "float32" + min_val = float("-1.21773") + max_val = float("0.49966") + mean = float("-0.167333") + std = float("0.293611") + data = None + + +class Program_weight_tensor_parameter_515: + name = "parameter_515" + shape = [192] + dtype = "float32" + min_val = float("0.00864435") + max_val = float("1.53701") + mean = float("0.238131") + std = float("0.21185") + data = None + + +class Program_weight_tensor_parameter_516: + name = "parameter_516" + shape = [192] + dtype = "float32" + min_val = float("2.28062e-05") + max_val = float("0.00680281") + mean = float("0.00052519") + std = float("0.000661192") + data = None + + +class Program_weight_tensor_parameter_517: + name = "parameter_517" + shape = [192] + dtype = "float32" + min_val = float("-0.0690564") + max_val = float("0.101614") + mean = float("0.0104971") + std = float("0.0186282") + data = None + + +class Program_weight_tensor_parameter_518: + name = "parameter_518" + shape = [192, 192, 1, 1] + dtype = "float32" + min_val = float("-0.0626678") + max_val = float("0.0382933") + mean = float("-0.000453582") + std = float("0.00413962") + data = None + + +class Program_weight_tensor_parameter_519: + name = "parameter_519" + shape = [192] + dtype = "float32" + min_val = float("-1.21774") + max_val = float("0.50078") + mean = float("-0.167049") + std = float("0.293829") + data = None + + +class Program_weight_tensor_parameter_520: + name = "parameter_520" + shape = [192] + dtype = "float32" + min_val = float("0.353208") + max_val = float("1.45018") + mean = float("0.756982") + std = float("0.216639") + data = None + + +class Program_weight_tensor_parameter_521: + name = "parameter_521" + shape = [192] + dtype = "float32" + min_val = float("0.00474286") + max_val = float("0.0202689") + mean = float("0.00952365") + std = float("0.00262046") + data = None + + +class Program_weight_tensor_parameter_522: + name = "parameter_522" + shape = [192] + dtype = "float32" + min_val = float("-0.102625") + max_val = float("0.150149") + mean = float("0.0567521") + std = float("0.0496463") + data = None + + +class Program_weight_tensor_parameter_523: + name = "parameter_523" + shape = [192, 192, 3, 3] + dtype = "float32" + min_val = float("-0.0712483") + max_val = float("0.0533123") + mean = float("-0.000260747") + std = float("0.00375359") + data = None + + +class Program_weight_tensor_parameter_524: + name = "parameter_524" + shape = [192] + dtype = "float32" + min_val = float("-1.87984") + max_val = float("-0.210289") + mean = float("-1.14605") + std = float("0.325945") + data = None + + +class Program_weight_tensor_parameter_525: + name = "parameter_525" + shape = [192] + dtype = "float32" + min_val = float("0.790161") + max_val = float("1.59635") + mean = float("1.12149") + std = float("0.129857") + data = None + + +class Program_weight_tensor_parameter_526: + name = "parameter_526" + shape = [192] + dtype = "float32" + min_val = float("0.0175701") + max_val = float("0.0649204") + mean = float("0.0311456") + std = float("0.00871316") + data = None + + +class Program_weight_tensor_parameter_527: + name = "parameter_527" + shape = [192] + dtype = "float32" + min_val = float("-0.857673") + max_val = float("0.269081") + mean = float("-0.0673534") + std = float("0.134064") + data = None + + +class Program_weight_tensor_parameter_528: + name = "parameter_528" + shape = [192, 192, 3, 3] + dtype = "float32" + min_val = float("-0.0680887") + max_val = float("0.0796042") + mean = float("-0.000244907") + std = float("0.0040245") + data = None + + +class Program_weight_tensor_parameter_529: + name = "parameter_529" + shape = [192] + dtype = "float32" + min_val = float("-2.86208") + max_val = float("1.58104") + mean = float("-0.027572") + std = float("0.747892") + data = None + + +class Program_weight_tensor_parameter_530: + name = "parameter_530" + shape = [192] + dtype = "float32" + min_val = float("0.490153") + max_val = float("2.07789") + mean = float("0.900423") + std = float("0.231981") + data = None + + +class Program_weight_tensor_parameter_531: + name = "parameter_531" + shape = [192] + dtype = "float32" + min_val = float("0.0121565") + max_val = float("0.0723228") + mean = float("0.0255903") + std = float("0.0100067") + data = None + + +class Program_weight_tensor_parameter_532: + name = "parameter_532" + shape = [192] + dtype = "float32" + min_val = float("-0.232643") + max_val = float("0.322942") + mean = float("-0.0434104") + std = float("0.0608082") + data = None + + +class Program_weight_tensor_parameter_533: + name = "parameter_533" + shape = [192, 384, 1, 1] + dtype = "float32" + min_val = float("-0.112904") + max_val = float("0.101906") + mean = float("-0.000605477") + std = float("0.00869645") + data = None + + +class Program_weight_tensor_parameter_534: + name = "parameter_534" + shape = [192] + dtype = "float32" + min_val = float("-2.96795") + max_val = float("1.66848") + mean = float("0.0967615") + std = float("0.663297") + data = None + + +class Program_weight_tensor_parameter_535: + name = "parameter_535" + shape = [192] + dtype = "float32" + min_val = float("0.830405") + max_val = float("5.55794") + mean = float("1.91324") + std = float("0.933276") + data = None + + +class Program_weight_tensor_parameter_536: + name = "parameter_536" + shape = [192] + dtype = "float32" + min_val = float("0.00635322") + max_val = float("0.0445876") + mean = float("0.0175605") + std = float("0.00556344") + data = None + + +class Program_weight_tensor_parameter_537: + name = "parameter_537" + shape = [192] + dtype = "float32" + min_val = float("-0.144806") + max_val = float("0.154975") + mean = float("-0.022062") + std = float("0.0559356") + data = None + + +class Program_weight_tensor_parameter_538: + name = "parameter_538" + shape = [192, 384, 1, 1] + dtype = "float32" + min_val = float("-0.100414") + max_val = float("0.0965722") + mean = float("-0.000481739") + std = float("0.00788359") + data = None + + +class Program_weight_tensor_parameter_539: + name = "parameter_539" + shape = [384] + dtype = "float32" + min_val = float("-2.9234") + max_val = float("1.32689") + mean = float("-0.300856") + std = float("0.563737") + data = None + + +class Program_weight_tensor_parameter_540: + name = "parameter_540" + shape = [384] + dtype = "float32" + min_val = float("0.633896") + max_val = float("2.47246") + mean = float("1.15988") + std = float("0.257349") + data = None + + +class Program_weight_tensor_parameter_541: + name = "parameter_541" + shape = [384] + dtype = "float32" + min_val = float("0.0117343") + max_val = float("0.113588") + mean = float("0.0270697") + std = float("0.0131501") + data = None + + +class Program_weight_tensor_parameter_542: + name = "parameter_542" + shape = [384] + dtype = "float32" + min_val = float("-0.269172") + max_val = float("0.242303") + mean = float("0.0298657") + std = float("0.0746447") + data = None + + +class Program_weight_tensor_parameter_543: + name = "parameter_543" + shape = [384, 256, 3, 3] + dtype = "float32" + min_val = float("-0.0777711") + max_val = float("0.0733026") + mean = float("-9.30129e-05") + std = float("0.00423326") + data = None + + +class Program_weight_tensor_parameter_544: + name = "parameter_544" + shape = [256] + dtype = "float32" + min_val = float("-2.04675") + max_val = float("1.2869") + mean = float("-0.92413") + std = float("0.542635") + data = None + + +class Program_weight_tensor_parameter_545: + name = "parameter_545" + shape = [256] + dtype = "float32" + min_val = float("0.509654") + max_val = float("1.69024") + mean = float("1.05364") + std = float("0.177449") + data = None + + +class Program_weight_tensor_parameter_546: + name = "parameter_546" + shape = [256] + dtype = "float32" + min_val = float("0.00164958") + max_val = float("0.0205898") + mean = float("0.00554979") + std = float("0.00243516") + data = None + + +class Program_weight_tensor_parameter_547: + name = "parameter_547" + shape = [256] + dtype = "float32" + min_val = float("-0.248048") + max_val = float("0.18055") + mean = float("-0.0481355") + std = float("0.0642407") + data = None + + +class Program_weight_tensor_parameter_548: + name = "parameter_548" + shape = [256, 192, 1, 1] + dtype = "float32" + min_val = float("-0.211445") + max_val = float("0.154025") + mean = float("-0.00090718") + std = float("0.0139364") + data = None + + +class Program_weight_tensor_parameter_549: + name = "parameter_549" + shape = [192] + dtype = "float32" + min_val = float("-0.0146056") + max_val = float("0.00252242") + mean = float("-0.00513018") + std = float("0.00389486") + data = None + + +class Program_weight_tensor_parameter_550: + name = "parameter_550" + shape = [192, 192, 1, 1] + dtype = "float32" + min_val = float("-0.340895") + max_val = float("0.243469") + mean = float("-0.00395929") + std = float("0.0107136") + data = None + + +class Program_weight_tensor_parameter_551: + name = "parameter_551" + shape = [96] + dtype = "float32" + min_val = float("-1.9141") + max_val = float("0.53448") + mean = float("-0.208812") + std = float("0.434585") + data = None + + +class Program_weight_tensor_parameter_552: + name = "parameter_552" + shape = [96] + dtype = "float32" + min_val = float("0.139627") + max_val = float("3.23019") + mean = float("0.63562") + std = float("0.668608") + data = None + + +class Program_weight_tensor_parameter_553: + name = "parameter_553" + shape = [96] + dtype = "float32" + min_val = float("9.44925e-05") + max_val = float("0.00259545") + mean = float("0.000627228") + std = float("0.0004663") + data = None + + +class Program_weight_tensor_parameter_554: + name = "parameter_554" + shape = [96] + dtype = "float32" + min_val = float("-0.0508301") + max_val = float("0.0646139") + mean = float("0.00729974") + std = float("0.022731") + data = None + + +class Program_weight_tensor_parameter_555: + name = "parameter_555" + shape = [96, 96, 1, 1] + dtype = "float32" + min_val = float("-0.0529209") + max_val = float("0.0938109") + mean = float("-0.00068654") + std = float("0.00780134") + data = None + + +class Program_weight_tensor_parameter_556: + name = "parameter_556" + shape = [96] + dtype = "float32" + min_val = float("-1.91385") + max_val = float("0.535947") + mean = float("-0.208472") + std = float("0.434758") + data = None + + +class Program_weight_tensor_parameter_557: + name = "parameter_557" + shape = [96] + dtype = "float32" + min_val = float("0.343945") + max_val = float("5.46861") + mean = float("1.08565") + std = float("0.883653") + data = None + + +class Program_weight_tensor_parameter_558: + name = "parameter_558" + shape = [96] + dtype = "float32" + min_val = float("0.000831351") + max_val = float("0.0140034") + mean = float("0.00502365") + std = float("0.0025539") + data = None + + +class Program_weight_tensor_parameter_559: + name = "parameter_559" + shape = [96] + dtype = "float32" + min_val = float("-0.135563") + max_val = float("0.206685") + mean = float("0.0107306") + std = float("0.0611442") + data = None + + +class Program_weight_tensor_parameter_560: + name = "parameter_560" + shape = [96, 96, 3, 3] + dtype = "float32" + min_val = float("-0.0417476") + max_val = float("0.0707409") + mean = float("-0.000200496") + std = float("0.00586268") + data = None + + +class Program_weight_tensor_parameter_561: + name = "parameter_561" + shape = [96] + dtype = "float32" + min_val = float("-2.46669") + max_val = float("-0.0188941") + mean = float("-1.22596") + std = float("0.444206") + data = None + + +class Program_weight_tensor_parameter_562: + name = "parameter_562" + shape = [96] + dtype = "float32" + min_val = float("0.540095") + max_val = float("1.63859") + mean = float("0.945542") + std = float("0.172479") + data = None + + +class Program_weight_tensor_parameter_563: + name = "parameter_563" + shape = [96] + dtype = "float32" + min_val = float("0.0343629") + max_val = float("0.225267") + mean = float("0.0825205") + std = float("0.0339352") + data = None + + +class Program_weight_tensor_parameter_564: + name = "parameter_564" + shape = [96] + dtype = "float32" + min_val = float("-2.59911") + max_val = float("2.14438") + mean = float("-0.187597") + std = float("0.479") + data = None + + +class Program_weight_tensor_parameter_565: + name = "parameter_565" + shape = [96, 96, 3, 3] + dtype = "float32" + min_val = float("-0.159603") + max_val = float("0.105542") + mean = float("-0.000422661") + std = float("0.00713371") + data = None + + +class Program_weight_tensor_parameter_566: + name = "parameter_566" + shape = [96] + dtype = "float32" + min_val = float("-1.38744") + max_val = float("0.563004") + mean = float("-0.132441") + std = float("0.347447") + data = None + + +class Program_weight_tensor_parameter_567: + name = "parameter_567" + shape = [96] + dtype = "float32" + min_val = float("0.0452771") + max_val = float("1.86502") + mean = float("0.460871") + std = float("0.366358") + data = None + + +class Program_weight_tensor_parameter_568: + name = "parameter_568" + shape = [96] + dtype = "float32" + min_val = float("7.60148e-05") + max_val = float("0.00285319") + mean = float("0.000794421") + std = float("0.000636585") + data = None + + +class Program_weight_tensor_parameter_569: + name = "parameter_569" + shape = [96] + dtype = "float32" + min_val = float("-0.0497884") + max_val = float("0.0479867") + mean = float("0.00766729") + std = float("0.0176144") + data = None + + +class Program_weight_tensor_parameter_570: + name = "parameter_570" + shape = [96, 96, 1, 1] + dtype = "float32" + min_val = float("-0.0484855") + max_val = float("0.0469527") + mean = float("-0.000557248") + std = float("0.00696514") + data = None + + +class Program_weight_tensor_parameter_571: + name = "parameter_571" + shape = [96] + dtype = "float32" + min_val = float("-1.38716") + max_val = float("0.565575") + mean = float("-0.131901") + std = float("0.347951") + data = None + + +class Program_weight_tensor_parameter_572: + name = "parameter_572" + shape = [96] + dtype = "float32" + min_val = float("0.373276") + max_val = float("2.32827") + mean = float("0.902354") + std = float("0.426303") + data = None + + +class Program_weight_tensor_parameter_573: + name = "parameter_573" + shape = [96] + dtype = "float32" + min_val = float("0.00302635") + max_val = float("0.0233198") + mean = float("0.00879849") + std = float("0.00436968") + data = None + + +class Program_weight_tensor_parameter_574: + name = "parameter_574" + shape = [96] + dtype = "float32" + min_val = float("-0.106151") + max_val = float("0.119838") + mean = float("0.0358036") + std = float("0.043231") + data = None + + +class Program_weight_tensor_parameter_575: + name = "parameter_575" + shape = [96, 96, 3, 3] + dtype = "float32" + min_val = float("-0.0601192") + max_val = float("0.0479345") + mean = float("-0.000334461") + std = float("0.00588243") + data = None + + +class Program_weight_tensor_parameter_576: + name = "parameter_576" + shape = [96] + dtype = "float32" + min_val = float("-3.32059") + max_val = float("0.366033") + mean = float("-1.1777") + std = float("0.556588") + data = None + + +class Program_weight_tensor_parameter_577: + name = "parameter_577" + shape = [96] + dtype = "float32" + min_val = float("0.470758") + max_val = float("1.9813") + mean = float("1.03925") + std = float("0.238611") + data = None + + +class Program_weight_tensor_parameter_578: + name = "parameter_578" + shape = [96] + dtype = "float32" + min_val = float("0.0279788") + max_val = float("0.183449") + mean = float("0.0506417") + std = float("0.0183646") + data = None + + +class Program_weight_tensor_parameter_579: + name = "parameter_579" + shape = [96] + dtype = "float32" + min_val = float("-1.05837") + max_val = float("0.786092") + mean = float("-0.0424528") + std = float("0.278771") + data = None + + +class Program_weight_tensor_parameter_580: + name = "parameter_580" + shape = [96, 96, 3, 3] + dtype = "float32" + min_val = float("-0.152735") + max_val = float("0.158912") + mean = float("-0.000426001") + std = float("0.00705743") + data = None + + +class Program_weight_tensor_parameter_581: + name = "parameter_581" + shape = [96] + dtype = "float32" + min_val = float("-1.24949") + max_val = float("0.583942") + mean = float("-0.109112") + std = float("0.292117") + data = None + + +class Program_weight_tensor_parameter_582: + name = "parameter_582" + shape = [96] + dtype = "float32" + min_val = float("0.0224878") + max_val = float("1.27796") + mean = float("0.324443") + std = float("0.192946") + data = None + + +class Program_weight_tensor_parameter_583: + name = "parameter_583" + shape = [96] + dtype = "float32" + min_val = float("2.50107e-05") + max_val = float("0.00308123") + mean = float("0.000650214") + std = float("0.000486492") + data = None + + +class Program_weight_tensor_parameter_584: + name = "parameter_584" + shape = [96] + dtype = "float32" + min_val = float("-0.0398841") + max_val = float("0.0533346") + mean = float("0.00424068") + std = float("0.0172095") + data = None + + +class Program_weight_tensor_parameter_585: + name = "parameter_585" + shape = [96, 96, 1, 1] + dtype = "float32" + min_val = float("-0.0406747") + max_val = float("0.0494878") + mean = float("-0.000325615") + std = float("0.0071059") + data = None + + +class Program_weight_tensor_parameter_586: + name = "parameter_586" + shape = [96] + dtype = "float32" + min_val = float("-1.24929") + max_val = float("0.586311") + mean = float("-0.108658") + std = float("0.29268") + data = None + + +class Program_weight_tensor_parameter_587: + name = "parameter_587" + shape = [96] + dtype = "float32" + min_val = float("0.311326") + max_val = float("1.67043") + mean = float("0.747441") + std = float("0.257878") + data = None + + +class Program_weight_tensor_parameter_588: + name = "parameter_588" + shape = [96] + dtype = "float32" + min_val = float("0.00299069") + max_val = float("0.0188881") + mean = float("0.00858598") + std = float("0.00338781") + data = None + + +class Program_weight_tensor_parameter_589: + name = "parameter_589" + shape = [96] + dtype = "float32" + min_val = float("-0.104806") + max_val = float("0.146672") + mean = float("0.0293301") + std = float("0.0382013") + data = None + + +class Program_weight_tensor_parameter_590: + name = "parameter_590" + shape = [96, 96, 3, 3] + dtype = "float32" + min_val = float("-0.0728298") + max_val = float("0.065903") + mean = float("-0.000300919") + std = float("0.00597289") + data = None + + +class Program_weight_tensor_parameter_591: + name = "parameter_591" + shape = [96] + dtype = "float32" + min_val = float("-3.5826") + max_val = float("0.291706") + mean = float("-1.12744") + std = float("0.572685") + data = None + + +class Program_weight_tensor_parameter_592: + name = "parameter_592" + shape = [96] + dtype = "float32" + min_val = float("0.511064") + max_val = float("2.19222") + mean = float("1.05217") + std = float("0.238287") + data = None + + +class Program_weight_tensor_parameter_593: + name = "parameter_593" + shape = [96] + dtype = "float32" + min_val = float("0.021583") + max_val = float("0.0772463") + mean = float("0.0393307") + std = float("0.00939936") + data = None + + +class Program_weight_tensor_parameter_594: + name = "parameter_594" + shape = [96] + dtype = "float32" + min_val = float("-0.95654") + max_val = float("0.644938") + mean = float("-0.042882") + std = float("0.216242") + data = None + + +class Program_weight_tensor_parameter_595: + name = "parameter_595" + shape = [96, 96, 3, 3] + dtype = "float32" + min_val = float("-0.0984925") + max_val = float("0.137263") + mean = float("-0.000483231") + std = float("0.00714155") + data = None + + +class Program_weight_tensor_parameter_596: + name = "parameter_596" + shape = [96] + dtype = "float32" + min_val = float("-0.891765") + max_val = float("0.530315") + mean = float("-0.160042") + std = float("0.28168") + data = None + + +class Program_weight_tensor_parameter_597: + name = "parameter_597" + shape = [96] + dtype = "float32" + min_val = float("0.0202036") + max_val = float("1.40549") + mean = float("0.324747") + std = float("0.213549") + data = None + + +class Program_weight_tensor_parameter_598: + name = "parameter_598" + shape = [96] + dtype = "float32" + min_val = float("5.2419e-05") + max_val = float("0.00309807") + mean = float("0.00068279") + std = float("0.000470997") + data = None + + +class Program_weight_tensor_parameter_599: + name = "parameter_599" + shape = [96] + dtype = "float32" + min_val = float("-0.0353761") + max_val = float("0.0539706") + mean = float("0.00757239") + std = float("0.0158983") + data = None + + +class Program_weight_tensor_parameter_600: + name = "parameter_600" + shape = [96, 96, 1, 1] + dtype = "float32" + min_val = float("-0.050403") + max_val = float("0.0470333") + mean = float("-0.000602859") + std = float("0.00719125") + data = None + + +class Program_weight_tensor_parameter_601: + name = "parameter_601" + shape = [96] + dtype = "float32" + min_val = float("-0.891522") + max_val = float("0.532005") + mean = float("-0.15962") + std = float("0.282144") + data = None + + +class Program_weight_tensor_parameter_602: + name = "parameter_602" + shape = [96] + dtype = "float32" + min_val = float("0.170998") + max_val = float("1.78064") + mean = float("0.708933") + std = float("0.284476") + data = None + + +class Program_weight_tensor_parameter_603: + name = "parameter_603" + shape = [96] + dtype = "float32" + min_val = float("0.00186209") + max_val = float("0.0242538") + mean = float("0.00887548") + std = float("0.00332774") + data = None + + +class Program_weight_tensor_parameter_604: + name = "parameter_604" + shape = [96] + dtype = "float32" + min_val = float("-0.0317255") + max_val = float("0.148332") + mean = float("0.0439334") + std = float("0.0383947") + data = None + + +class Program_weight_tensor_parameter_605: + name = "parameter_605" + shape = [96, 96, 3, 3] + dtype = "float32" + min_val = float("-0.0673552") + max_val = float("0.0665555") + mean = float("-0.000406403") + std = float("0.00600122") + data = None + + +class Program_weight_tensor_parameter_606: + name = "parameter_606" + shape = [96] + dtype = "float32" + min_val = float("-2.65797") + max_val = float("0.0644665") + mean = float("-1.06329") + std = float("0.488575") + data = None + + +class Program_weight_tensor_parameter_607: + name = "parameter_607" + shape = [96] + dtype = "float32" + min_val = float("0.510122") + max_val = float("1.73722") + mean = float("1.01545") + std = float("0.193669") + data = None + + +class Program_weight_tensor_parameter_608: + name = "parameter_608" + shape = [96] + dtype = "float32" + min_val = float("0.0170441") + max_val = float("0.0592749") + mean = float("0.0303397") + std = float("0.00732367") + data = None + + +class Program_weight_tensor_parameter_609: + name = "parameter_609" + shape = [96] + dtype = "float32" + min_val = float("-0.802591") + max_val = float("0.759118") + mean = float("-0.0649493") + std = float("0.211368") + data = None + + +class Program_weight_tensor_parameter_610: + name = "parameter_610" + shape = [96, 96, 3, 3] + dtype = "float32" + min_val = float("-0.0799583") + max_val = float("0.12863") + mean = float("-0.000463251") + std = float("0.00696947") + data = None + + +class Program_weight_tensor_parameter_611: + name = "parameter_611" + shape = [96] + dtype = "float32" + min_val = float("-0.979363") + max_val = float("0.488329") + mean = float("-0.1357") + std = float("0.278693") + data = None + + +class Program_weight_tensor_parameter_612: + name = "parameter_612" + shape = [96] + dtype = "float32" + min_val = float("0.0499672") + max_val = float("1.15174") + mean = float("0.296075") + std = float("0.172795") + data = None + + +class Program_weight_tensor_parameter_613: + name = "parameter_613" + shape = [96] + dtype = "float32" + min_val = float("0.000124848") + max_val = float("0.00438819") + mean = float("0.00108131") + std = float("0.000696678") + data = None + + +class Program_weight_tensor_parameter_614: + name = "parameter_614" + shape = [96] + dtype = "float32" + min_val = float("-0.0427797") + max_val = float("0.06109") + mean = float("0.00673208") + std = float("0.0190435") + data = None + + +class Program_weight_tensor_parameter_615: + name = "parameter_615" + shape = [96, 96, 1, 1] + dtype = "float32" + min_val = float("-0.0730409") + max_val = float("0.0734237") + mean = float("-0.000668194") + std = float("0.00816827") + data = None + + +class Program_weight_tensor_parameter_616: + name = "parameter_616" + shape = [96] + dtype = "float32" + min_val = float("-0.979598") + max_val = float("0.490087") + mean = float("-0.135308") + std = float("0.279185") + data = None + + +class Program_weight_tensor_parameter_617: + name = "parameter_617" + shape = [96] + dtype = "float32" + min_val = float("0.240111") + max_val = float("1.69891") + mean = float("0.604647") + std = float("0.228294") + data = None + + +class Program_weight_tensor_parameter_618: + name = "parameter_618" + shape = [96] + dtype = "float32" + min_val = float("0.00479228") + max_val = float("0.0493428") + mean = float("0.0126094") + std = float("0.00558079") + data = None + + +class Program_weight_tensor_parameter_619: + name = "parameter_619" + shape = [96] + dtype = "float32" + min_val = float("-0.0884025") + max_val = float("0.162813") + mean = float("0.0330297") + std = float("0.0455641") + data = None + + +class Program_weight_tensor_parameter_620: + name = "parameter_620" + shape = [96, 96, 3, 3] + dtype = "float32" + min_val = float("-0.070586") + max_val = float("0.053917") + mean = float("-0.000353734") + std = float("0.00603503") + data = None + + +class Program_weight_tensor_parameter_621: + name = "parameter_621" + shape = [96] + dtype = "float32" + min_val = float("-3.46749") + max_val = float("0.20134") + mean = float("-1.00429") + std = float("0.548683") + data = None + + +class Program_weight_tensor_parameter_622: + name = "parameter_622" + shape = [96] + dtype = "float32" + min_val = float("0.68469") + max_val = float("2.50521") + mean = float("1.07421") + std = float("0.212064") + data = None + + +class Program_weight_tensor_parameter_623: + name = "parameter_623" + shape = [96] + dtype = "float32" + min_val = float("0.0126502") + max_val = float("0.0593798") + mean = float("0.025404") + std = float("0.00851987") + data = None + + +class Program_weight_tensor_parameter_624: + name = "parameter_624" + shape = [96] + dtype = "float32" + min_val = float("-0.59646") + max_val = float("0.699113") + mean = float("-0.0602622") + std = float("0.200876") + data = None + + +class Program_weight_tensor_parameter_625: + name = "parameter_625" + shape = [96, 96, 3, 3] + dtype = "float32" + min_val = float("-0.0875016") + max_val = float("0.0958638") + mean = float("-0.000393602") + std = float("0.00713622") + data = None + + +class Program_weight_tensor_parameter_626: + name = "parameter_626" + shape = [96] + dtype = "float32" + min_val = float("-0.623249") + max_val = float("0.450355") + mean = float("-0.0811173") + std = float("0.25665") + data = None + + +class Program_weight_tensor_parameter_627: + name = "parameter_627" + shape = [96] + dtype = "float32" + min_val = float("0.0905173") + max_val = float("1.30172") + mean = float("0.309137") + std = float("0.196898") + data = None + + +class Program_weight_tensor_parameter_628: + name = "parameter_628" + shape = [96] + dtype = "float32" + min_val = float("0.000486077") + max_val = float("0.0206445") + mean = float("0.00387906") + std = float("0.00325823") + data = None + + +class Program_weight_tensor_parameter_629: + name = "parameter_629" + shape = [96] + dtype = "float32" + min_val = float("-0.0378971") + max_val = float("0.0272841") + mean = float("0.000360893") + std = float("0.0116936") + data = None + + +class Program_weight_tensor_parameter_630: + name = "parameter_630" + shape = [96, 96, 1, 1] + dtype = "float32" + min_val = float("-0.0967686") + max_val = float("0.0726096") + mean = float("-0.00111676") + std = float("0.00943776") + data = None + + +class Program_weight_tensor_parameter_631: + name = "parameter_631" + shape = [96] + dtype = "float32" + min_val = float("-0.62253") + max_val = float("0.451504") + mean = float("-0.0806935") + std = float("0.256953") + data = None + + +class Program_weight_tensor_parameter_632: + name = "parameter_632" + shape = [96] + dtype = "float32" + min_val = float("0.210918") + max_val = float("1.42997") + mean = float("0.527932") + std = float("0.258611") + data = None + + +class Program_weight_tensor_parameter_633: + name = "parameter_633" + shape = [96] + dtype = "float32" + min_val = float("0.0110923") + max_val = float("0.101379") + mean = float("0.0342554") + std = float("0.0175406") + data = None + + +class Program_weight_tensor_parameter_634: + name = "parameter_634" + shape = [96] + dtype = "float32" + min_val = float("-0.105783") + max_val = float("0.0988172") + mean = float("-0.00552355") + std = float("0.039398") + data = None + + +class Program_weight_tensor_parameter_635: + name = "parameter_635" + shape = [96, 96, 3, 3] + dtype = "float32" + min_val = float("-0.0996365") + max_val = float("0.0540305") + mean = float("-0.00042977") + std = float("0.00592197") + data = None + + +class Program_weight_tensor_parameter_636: + name = "parameter_636" + shape = [96] + dtype = "float32" + min_val = float("-2.4099") + max_val = float("0.510062") + mean = float("-0.827896") + std = float("0.467957") + data = None + + +class Program_weight_tensor_parameter_637: + name = "parameter_637" + shape = [96] + dtype = "float32" + min_val = float("0.855439") + max_val = float("2.18052") + mean = float("1.27541") + std = float("0.20896") + data = None + + +class Program_weight_tensor_parameter_638: + name = "parameter_638" + shape = [96] + dtype = "float32" + min_val = float("0.0104439") + max_val = float("0.0520779") + mean = float("0.0209799") + std = float("0.00859072") + data = None + + +class Program_weight_tensor_parameter_639: + name = "parameter_639" + shape = [96] + dtype = "float32" + min_val = float("-0.780626") + max_val = float("0.470779") + mean = float("-0.0616335") + std = float("0.196544") + data = None + + +class Program_weight_tensor_parameter_640: + name = "parameter_640" + shape = [96, 96, 3, 3] + dtype = "float32" + min_val = float("-0.154701") + max_val = float("0.153806") + mean = float("-0.00026052") + std = float("0.00735431") + data = None + + +class Program_weight_tensor_parameter_641: + name = "parameter_641" + shape = [96] + dtype = "float32" + min_val = float("-3.15956") + max_val = float("1.89061") + mean = float("0.502181") + std = float("0.861277") + data = None + + +class Program_weight_tensor_parameter_642: + name = "parameter_642" + shape = [96] + dtype = "float32" + min_val = float("0.209789") + max_val = float("2.62802") + mean = float("0.557131") + std = float("0.318659") + data = None + + +class Program_weight_tensor_parameter_643: + name = "parameter_643" + shape = [96] + dtype = "float32" + min_val = float("0.00949005") + max_val = float("0.147612") + mean = float("0.0342476") + std = float("0.0235361") + data = None + + +class Program_weight_tensor_parameter_644: + name = "parameter_644" + shape = [96] + dtype = "float32" + min_val = float("-0.272514") + max_val = float("0.303684") + mean = float("-0.0269397") + std = float("0.0869885") + data = None + + +class Program_weight_tensor_parameter_645: + name = "parameter_645" + shape = [96, 192, 1, 1] + dtype = "float32" + min_val = float("-0.190092") + max_val = float("0.235795") + mean = float("-0.00054682") + std = float("0.0152601") + data = None + + +class Program_weight_tensor_parameter_646: + name = "parameter_646" + shape = [96] + dtype = "float32" + min_val = float("-4.92412") + max_val = float("1.57941") + mean = float("0.384226") + std = float("1.04886") + data = None + + +class Program_weight_tensor_parameter_647: + name = "parameter_647" + shape = [96] + dtype = "float32" + min_val = float("0.411425") + max_val = float("6.77791") + mean = float("1.69479") + std = float("1.30749") + data = None + + +class Program_weight_tensor_parameter_648: + name = "parameter_648" + shape = [96] + dtype = "float32" + min_val = float("0.00569395") + max_val = float("0.186568") + mean = float("0.0312372") + std = float("0.0269306") + data = None + + +class Program_weight_tensor_parameter_649: + name = "parameter_649" + shape = [96] + dtype = "float32" + min_val = float("-0.123122") + max_val = float("0.396242") + mean = float("0.0353184") + std = float("0.0935304") + data = None + + +class Program_weight_tensor_parameter_650: + name = "parameter_650" + shape = [96, 192, 1, 1] + dtype = "float32" + min_val = float("-0.115428") + max_val = float("0.143096") + mean = float("0.000288353") + std = float("0.0138526") + data = None + + +class Program_weight_tensor_parameter_651: + name = "parameter_651" + shape = [192] + dtype = "float32" + min_val = float("-2.27512") + max_val = float("1.75006") + mean = float("-0.125702") + std = float("0.740468") + data = None + + +class Program_weight_tensor_parameter_652: + name = "parameter_652" + shape = [192] + dtype = "float32" + min_val = float("0.632726") + max_val = float("2.96908") + mean = float("1.08749") + std = float("0.283555") + data = None + + +class Program_weight_tensor_parameter_653: + name = "parameter_653" + shape = [192] + dtype = "float32" + min_val = float("0.0128887") + max_val = float("0.306476") + mean = float("0.0430534") + std = float("0.0345139") + data = None + + +class Program_weight_tensor_parameter_654: + name = "parameter_654" + shape = [192] + dtype = "float32" + min_val = float("-0.476717") + max_val = float("0.27685") + mean = float("-0.0597992") + std = float("0.114967") + data = None + + +class Program_weight_tensor_parameter_655: + name = "parameter_655" + shape = [192, 128, 3, 3] + dtype = "float32" + min_val = float("-0.0811233") + max_val = float("0.11238") + mean = float("-0.000121273") + std = float("0.00716338") + data = None + + +class Program_weight_tensor_parameter_656: + name = "parameter_656" + shape = [128] + dtype = "float32" + min_val = float("-2.81253") + max_val = float("1.96258") + mean = float("-0.709313") + std = float("0.64886") + data = None + + +class Program_weight_tensor_parameter_657: + name = "parameter_657" + shape = [128] + dtype = "float32" + min_val = float("0.302011") + max_val = float("2.86022") + mean = float("1.01859") + std = float("0.279425") + data = None + + +class Program_weight_tensor_parameter_658: + name = "parameter_658" + shape = [128] + dtype = "float32" + min_val = float("0.000683803") + max_val = float("0.0143901") + mean = float("0.00380984") + std = float("0.00196434") + data = None + + +class Program_weight_tensor_parameter_659: + name = "parameter_659" + shape = [128] + dtype = "float32" + min_val = float("-0.241007") + max_val = float("0.23083") + mean = float("0.00336445") + std = float("0.0801385") + data = None + + +class Program_weight_tensor_parameter_660: + name = "parameter_660" + shape = [128, 96, 1, 1] + dtype = "float32" + min_val = float("-0.16828") + max_val = float("0.191318") + mean = float("-0.00143145") + std = float("0.0216253") + data = None + + +class Program_weight_tensor_parameter_661: + name = "parameter_661" + shape = [96] + dtype = "float32" + min_val = float("-0.0182017") + max_val = float("-0.00100735") + mean = float("-0.00761377") + std = float("0.00459165") + data = None + + +class Program_weight_tensor_parameter_662: + name = "parameter_662" + shape = [96, 96, 1, 1] + dtype = "float32" + min_val = float("-0.297058") + max_val = float("0.124247") + mean = float("-0.00811798") + std = float("0.0180434") + data = None + + +class Program_weight_tensor_parameter_663: + name = "parameter_663" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_664: + name = "parameter_664" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_665: + name = "parameter_665" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_666: + name = "parameter_666" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_667: + name = "parameter_667" + shape = [48, 48, 1, 1] + dtype = "float32" + min_val = float("-0.0524219") + max_val = float("0.062819") + mean = float("-0.00145834") + std = float("0.0124603") + data = None + + +class Program_weight_tensor_parameter_668: + name = "parameter_668" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_669: + name = "parameter_669" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_670: + name = "parameter_670" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_671: + name = "parameter_671" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_672: + name = "parameter_672" + shape = [48, 48, 3, 3] + dtype = "float32" + min_val = float("-0.053396") + max_val = float("0.0780475") + mean = float("-0.000432103") + std = float("0.0105215") + data = None + + +class Program_weight_tensor_parameter_673: + name = "parameter_673" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_674: + name = "parameter_674" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_675: + name = "parameter_675" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_676: + name = "parameter_676" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_677: + name = "parameter_677" + shape = [48, 48, 3, 3] + dtype = "float32" + min_val = float("-0.0907736") + max_val = float("0.0889891") + mean = float("-0.000674195") + std = float("0.0115766") + data = None + + +class Program_weight_tensor_parameter_678: + name = "parameter_678" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_679: + name = "parameter_679" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_680: + name = "parameter_680" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_681: + name = "parameter_681" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_682: + name = "parameter_682" + shape = [48, 48, 1, 1] + dtype = "float32" + min_val = float("-0.0701343") + max_val = float("0.0744403") + mean = float("-0.000969115") + std = float("0.0132523") + data = None + + +class Program_weight_tensor_parameter_683: + name = "parameter_683" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_684: + name = "parameter_684" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_685: + name = "parameter_685" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_686: + name = "parameter_686" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_687: + name = "parameter_687" + shape = [48, 48, 3, 3] + dtype = "float32" + min_val = float("-0.0625249") + max_val = float("0.0628193") + mean = float("-0.000704405") + std = float("0.010522") + data = None + + +class Program_weight_tensor_parameter_688: + name = "parameter_688" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_689: + name = "parameter_689" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_690: + name = "parameter_690" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_691: + name = "parameter_691" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_692: + name = "parameter_692" + shape = [48, 48, 3, 3] + dtype = "float32" + min_val = float("-0.105534") + max_val = float("0.0876318") + mean = float("-0.000291303") + std = float("0.0118198") + data = None + + +class Program_weight_tensor_parameter_693: + name = "parameter_693" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_694: + name = "parameter_694" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_695: + name = "parameter_695" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_696: + name = "parameter_696" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_697: + name = "parameter_697" + shape = [48, 48, 1, 1] + dtype = "float32" + min_val = float("-0.0927544") + max_val = float("0.067179") + mean = float("-0.00167319") + std = float("0.0164656") + data = None + + +class Program_weight_tensor_parameter_698: + name = "parameter_698" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_699: + name = "parameter_699" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_700: + name = "parameter_700" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_701: + name = "parameter_701" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_702: + name = "parameter_702" + shape = [48, 48, 3, 3] + dtype = "float32" + min_val = float("-0.0662936") + max_val = float("0.0926268") + mean = float("-0.000546134") + std = float("0.0110591") + data = None + + +class Program_weight_tensor_parameter_703: + name = "parameter_703" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_704: + name = "parameter_704" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_705: + name = "parameter_705" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_706: + name = "parameter_706" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_707: + name = "parameter_707" + shape = [48, 48, 3, 3] + dtype = "float32" + min_val = float("-0.115861") + max_val = float("0.0843934") + mean = float("-0.000390165") + std = float("0.0126271") + data = None + + +class Program_weight_tensor_parameter_708: + name = "parameter_708" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_709: + name = "parameter_709" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_710: + name = "parameter_710" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_711: + name = "parameter_711" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_712: + name = "parameter_712" + shape = [48, 96, 1, 1] + dtype = "float32" + min_val = float("-0.156722") + max_val = float("0.12438") + mean = float("-0.00240073") + std = float("0.0227151") + data = None + + +class Program_weight_tensor_parameter_713: + name = "parameter_713" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_714: + name = "parameter_714" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_715: + name = "parameter_715" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_716: + name = "parameter_716" + shape = [48] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_717: + name = "parameter_717" + shape = [48, 96, 1, 1] + dtype = "float32" + min_val = float("-0.133366") + max_val = float("0.190723") + mean = float("-0.000461332") + std = float("0.0215494") + data = None + + +class Program_weight_tensor_parameter_718: + name = "parameter_718" + shape = [96] + dtype = "float32" + min_val = float("-3.40388") + max_val = float("3.27594") + mean = float("0.331") + std = float("1.14502") + data = None + + +class Program_weight_tensor_parameter_719: + name = "parameter_719" + shape = [96] + dtype = "float32" + min_val = float("0.861639") + max_val = float("4.91749") + mean = float("1.91516") + std = float("0.75496") + data = None + + +class Program_weight_tensor_parameter_720: + name = "parameter_720" + shape = [96] + dtype = "float32" + min_val = float("0.68512") + max_val = float("19.942") + mean = float("2.38283") + std = float("2.38942") + data = None + + +class Program_weight_tensor_parameter_721: + name = "parameter_721" + shape = [96] + dtype = "float32" + min_val = float("-1.44893") + max_val = float("1.82311") + mean = float("-0.333309") + std = float("0.618856") + data = None + + +class Program_weight_tensor_parameter_722: + name = "parameter_722" + shape = [96, 64, 3, 3] + dtype = "float32" + min_val = float("-0.115845") + max_val = float("0.115419") + mean = float("-0.000438744") + std = float("0.0120833") + data = None + + +class Program_weight_tensor_parameter_723: + name = "parameter_723" + shape = [64] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_724: + name = "parameter_724" + shape = [64] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_725: + name = "parameter_725" + shape = [64] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_726: + name = "parameter_726" + shape = [64] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_727: + name = "parameter_727" + shape = [64, 32, 3, 3] + dtype = "float32" + min_val = float("-0.153743") + max_val = float("0.135272") + mean = float("-0.000740633") + std = float("0.0191711") + data = None + + +class Program_weight_tensor_parameter_728: + name = "parameter_728" + shape = [32] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_729: + name = "parameter_729" + shape = [32] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_730: + name = "parameter_730" + shape = [32] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_731: + name = "parameter_731" + shape = [32] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_732: + name = "parameter_732" + shape = [32, 32, 3, 3] + dtype = "float32" + min_val = float("-0.307002") + max_val = float("0.202588") + mean = float("-4.43961e-05") + std = float("0.025069") + data = None + + +class Program_weight_tensor_parameter_733: + name = "parameter_733" + shape = [32] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_734: + name = "parameter_734" + shape = [32] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_735: + name = "parameter_735" + shape = [32] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_736: + name = "parameter_736" + shape = [32] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_737: + name = "parameter_737" + shape = [32, 3, 3, 3] + dtype = "float32" + min_val = float("-0.297631") + max_val = float("0.278985") + mean = float("-0.00146872") + std = float("0.0683342") + data = None diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_7/graph_hash.txt b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_7/graph_hash.txt index 6fe7297b8..7248f3b80 100644 --- a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_7/graph_hash.txt +++ b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_7/graph_hash.txt @@ -1 +1 @@ -8b11cddc56e8bf2fc7551237b756b7f8f8a4e9dd2f556be8d328af948ebf41da \ No newline at end of file +80c6a3012fae16e53b556d8b6ef2a40e2378ccb66ef0a81269f362d7dab93afe \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_7/input_meta.py b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_7/input_meta.py index 62a914daf..0bbda7212 100644 --- a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_7/input_meta.py +++ b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_7/input_meta.py @@ -1,135 +1,102 @@ class Program_weight_tensor_data_0: name = "data_0" - shape = [1] + shape = [1, 24276] dtype = "float32" - data = [0.724553] + max_val = float("2.0") + mean = float("0.0175482") + std = float("0.141858") + data = None class Program_weight_tensor_data_1: name = "data_1" - shape = [1] + shape = [1, 38, 24276] dtype = "float32" - data = [0.710696] + max_val = float("0.982337") + mean = float("0.000792632") + std = float("0.022185") + data = None class Program_weight_tensor_data_2: name = "data_2" - shape = [1] + shape = [1, 38, 24276] dtype = "float32" - data = [0.69274] + max_val = float("1.0") + mean = float("0.000461795") + std = float("0.0214844") + data = None class Program_weight_tensor_data_3: name = "data_3" - shape = [1] - dtype = "float32" - data = [0.697763] + shape = [1, 1] + dtype = "int32" + data = [0] class Program_weight_tensor_data_4: name = "data_4" - shape = [1] - dtype = "float32" - data = [0.67767] + shape = [1, 38, 1] + dtype = "int32" + data = [ + 3, + 3, + 9, + 1, + 0, + 0, + 0, + 0, + 3, + 3, + 3, + 3, + 3, + 0, + 0, + 0, + 8, + 3, + 3, + 3, + 0, + 0, + 3, + 3, + 3, + 3, + 3, + 5, + 3, + 3, + 3, + 3, + 3, + 0, + 3, + 3, + 0, + 0, + ] class Program_weight_tensor_data_5: name = "data_5" - shape = [1] + shape = [1, 38, 4] dtype = "float32" - data = [0.628229] + min_val = float("354.773") + max_val = float("1051.0") + mean = float("652.35") + std = float("193.013") + data = None class Program_weight_tensor_data_6: name = "data_6" - shape = [1] - dtype = "float32" - data = [0.643942] - - -class Program_weight_tensor_data_7: - name = "data_7" - shape = [1] - dtype = "float32" - data = [0.633569] - - -class Program_weight_tensor_data_8: - name = "data_8" - shape = [1] - dtype = "float32" - data = [0.801205] - - -class Program_weight_tensor_data_9: - name = "data_9" - shape = [1] - dtype = "float32" - data = [0.652613] - - -class Program_weight_tensor_data_10: - name = "data_10" - shape = [1] - dtype = "float32" - data = [0.636874] - - -class Program_weight_tensor_data_11: - name = "data_11" - shape = [1] - dtype = "float32" - data = [0.631148] - - -class Program_weight_tensor_data_12: - name = "data_12" - shape = [1] - dtype = "float32" - data = [0.635341] - - -class Program_weight_tensor_data_13: - name = "data_13" - shape = [1] - dtype = "float32" - data = [0.640054] - - -class Program_weight_tensor_data_14: - name = "data_14" - shape = [1] - dtype = "float32" - data = [0.755822] - - -class Program_weight_tensor_data_15: - name = "data_15" - shape = [1] - dtype = "float32" - data = [0.575326] - - -class Program_weight_tensor_data_16: - name = "data_16" - shape = [1] - dtype = "float32" - data = [0.59257] - - -class Program_weight_tensor_data_17: - name = "data_17" - shape = [1] - dtype = "float32" - data = [0.72331] - - -class Program_weight_tensor_data_18: - name = "data_18" - shape = [1, 3, 1536, 1536] + shape = [1, 38, 24276] dtype = "float32" - min_val = float("0.0218883") - max_val = float("0.663022") - mean = float("0.428838") - std = float("0.0832401") + max_val = float("0.73484") + mean = float("8.98923e-05") + std = float("0.00618669") data = None diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_7/model.py b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_7/model.py index c5ebef41f..88cd8833b 100644 --- a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_7/model.py +++ b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_7/model.py @@ -5,4036 +5,240 @@ class GraphModule(paddle.nn.Layer): def __init__(self): super().__init__() - def forward( - self, - parameter_0, - parameter_1, - parameter_2, - parameter_3, - parameter_4, - parameter_5, - parameter_6, - parameter_7, - parameter_8, - parameter_9, - parameter_10, - parameter_11, - parameter_12, - parameter_13, - parameter_14, - parameter_15, - parameter_16, - parameter_17, - parameter_18, - parameter_19, - parameter_20, - parameter_21, - parameter_22, - parameter_23, - parameter_24, - parameter_25, - parameter_26, - parameter_27, - parameter_28, - parameter_29, - parameter_30, - parameter_31, - parameter_32, - parameter_33, - parameter_34, - parameter_35, - parameter_36, - parameter_37, - parameter_38, - parameter_39, - parameter_40, - parameter_41, - parameter_42, - parameter_43, - parameter_44, - parameter_45, - parameter_46, - parameter_47, - parameter_48, - parameter_49, - parameter_50, - parameter_51, - parameter_52, - parameter_53, - parameter_54, - parameter_55, - parameter_56, - parameter_57, - parameter_58, - parameter_59, - parameter_60, - parameter_61, - parameter_62, - parameter_63, - parameter_64, - parameter_65, - parameter_66, - parameter_67, - parameter_68, - parameter_69, - parameter_70, - parameter_71, - parameter_72, - parameter_73, - parameter_74, - parameter_75, - parameter_76, - parameter_77, - parameter_78, - parameter_79, - parameter_80, - parameter_81, - parameter_82, - parameter_83, - parameter_84, - parameter_85, - parameter_86, - parameter_87, - parameter_88, - parameter_89, - parameter_90, - parameter_91, - parameter_92, - parameter_93, - parameter_94, - parameter_95, - parameter_96, - parameter_97, - parameter_98, - parameter_99, - parameter_100, - parameter_101, - parameter_102, - parameter_103, - parameter_104, - parameter_105, - parameter_106, - parameter_107, - parameter_108, - parameter_109, - parameter_110, - parameter_111, - parameter_112, - parameter_113, - parameter_114, - parameter_115, - parameter_116, - parameter_117, - parameter_118, - parameter_119, - parameter_120, - parameter_121, - parameter_122, - parameter_123, - parameter_124, - parameter_125, - parameter_126, - parameter_127, - parameter_128, - parameter_129, - parameter_130, - parameter_131, - parameter_132, - parameter_133, - parameter_134, - parameter_135, - parameter_136, - parameter_137, - parameter_138, - parameter_139, - parameter_140, - parameter_141, - parameter_142, - parameter_143, - parameter_144, - parameter_145, - parameter_146, - parameter_147, - parameter_148, - parameter_149, - parameter_150, - parameter_151, - parameter_152, - parameter_153, - parameter_154, - parameter_155, - parameter_156, - parameter_157, - parameter_158, - parameter_159, - parameter_160, - parameter_161, - parameter_162, - parameter_163, - parameter_164, - parameter_165, - parameter_166, - parameter_167, - parameter_168, - parameter_169, - parameter_170, - parameter_171, - parameter_172, - parameter_173, - parameter_174, - parameter_175, - parameter_176, - parameter_177, - parameter_178, - parameter_179, - parameter_180, - parameter_181, - parameter_182, - parameter_183, - parameter_184, - parameter_185, - parameter_186, - parameter_187, - parameter_188, - parameter_189, - parameter_190, - parameter_191, - parameter_192, - parameter_193, - parameter_194, - parameter_195, - parameter_196, - parameter_197, - parameter_198, - parameter_199, - parameter_200, - parameter_201, - parameter_202, - parameter_203, - parameter_204, - parameter_205, - parameter_206, - parameter_207, - parameter_208, - parameter_209, - parameter_210, - parameter_211, - parameter_212, - parameter_213, - parameter_214, - parameter_215, - parameter_216, - parameter_217, - parameter_218, - parameter_219, - parameter_220, - parameter_221, - parameter_222, - parameter_223, - parameter_224, - parameter_225, - parameter_226, - parameter_227, - parameter_228, - parameter_229, - parameter_230, - parameter_231, - parameter_232, - parameter_233, - parameter_234, - parameter_235, - parameter_236, - parameter_237, - parameter_238, - parameter_239, - parameter_240, - parameter_241, - parameter_242, - parameter_243, - parameter_244, - parameter_245, - parameter_246, - parameter_247, - parameter_248, - parameter_249, - parameter_250, - parameter_251, - parameter_252, - parameter_253, - parameter_254, - parameter_255, - parameter_256, - parameter_257, - parameter_258, - parameter_259, - parameter_260, - parameter_261, - parameter_262, - parameter_263, - parameter_264, - parameter_265, - parameter_266, - parameter_267, - parameter_268, - parameter_269, - parameter_270, - parameter_271, - parameter_272, - parameter_273, - parameter_274, - parameter_275, - parameter_276, - parameter_277, - parameter_278, - parameter_279, - parameter_280, - parameter_281, - parameter_282, - parameter_283, - parameter_284, - parameter_285, - parameter_286, - parameter_287, - parameter_288, - parameter_289, - parameter_290, - parameter_291, - parameter_292, - parameter_293, - parameter_294, - parameter_295, - parameter_296, - parameter_297, - parameter_298, - parameter_299, - parameter_300, - parameter_301, - parameter_302, - parameter_303, - parameter_304, - parameter_305, - parameter_306, - parameter_307, - parameter_308, - parameter_309, - parameter_310, - parameter_311, - parameter_312, - parameter_313, - parameter_314, - parameter_315, - parameter_316, - parameter_317, - parameter_318, - parameter_319, - parameter_320, - parameter_321, - parameter_322, - parameter_323, - parameter_324, - parameter_325, - parameter_326, - parameter_327, - parameter_328, - parameter_329, - parameter_330, - parameter_331, - parameter_332, - parameter_333, - parameter_334, - parameter_335, - parameter_336, - parameter_337, - parameter_338, - parameter_339, - parameter_340, - parameter_341, - parameter_342, - parameter_343, - parameter_344, - parameter_345, - parameter_346, - parameter_347, - parameter_348, - parameter_349, - parameter_350, - parameter_351, - parameter_352, - parameter_353, - parameter_354, - parameter_355, - parameter_356, - parameter_357, - parameter_358, - parameter_359, - parameter_360, - parameter_361, - parameter_362, - parameter_363, - parameter_364, - parameter_365, - parameter_366, - parameter_367, - parameter_368, - parameter_369, - parameter_370, - parameter_371, - parameter_372, - data_0, - data_1, - data_2, - data_3, - data_4, - data_5, - data_6, - data_7, - data_8, - data_9, - data_10, - data_11, - data_12, - data_13, - data_14, - data_15, - data_16, - data_17, - data_18, - ): - # pd_op.conv2d: (1x32x-1x-1xf32) <- (1x3x-1x-1xf32, 32x3x3x3xf32) - conv2d_0 = paddle._C_ops.conv2d( - data_18, parameter_372, [2, 2], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del data_18, parameter_372 - - # pd_op.batch_norm_: (1x32x-1x-1xf32, 32xf32, 32xf32, 32xf32, 32xf32, -1xui8) <- (1x32x-1x-1xf32, 32xf32, 32xf32, 32xf32, 32xf32) - ( - batch_norm__0, - batch_norm__1, - batch_norm__2, - batch_norm__3, - batch_norm__4, - batch_norm__5, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_0, - parameter_371, - parameter_370, - parameter_369, - parameter_368, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_368, parameter_369, parameter_370, parameter_371 - - # pd_op.swish: (1x32x-1x-1xf32) <- (1x32x-1x-1xf32) - swish_1 = paddle._C_ops.swish(batch_norm__0) - - # pd_op.conv2d: (1x32x-1x-1xf32) <- (1x32x-1x-1xf32, 32x32x3x3xf32) - conv2d_1 = paddle._C_ops.conv2d( - swish_1, parameter_367, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_367 - - # pd_op.batch_norm_: (1x32x-1x-1xf32, 32xf32, 32xf32, 32xf32, 32xf32, -1xui8) <- (1x32x-1x-1xf32, 32xf32, 32xf32, 32xf32, 32xf32) - ( - batch_norm__6, - batch_norm__7, - batch_norm__8, - batch_norm__9, - batch_norm__10, - batch_norm__11, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_1, - parameter_366, - parameter_365, - parameter_364, - parameter_363, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_363, parameter_364, parameter_365, parameter_366 - - # pd_op.swish: (1x32x-1x-1xf32) <- (1x32x-1x-1xf32) - swish_2 = paddle._C_ops.swish(batch_norm__6) - - # pd_op.conv2d: (1x64x-1x-1xf32) <- (1x32x-1x-1xf32, 64x32x3x3xf32) - conv2d_2 = paddle._C_ops.conv2d( - swish_2, parameter_362, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_362 - - # pd_op.batch_norm_: (1x64x-1x-1xf32, 64xf32, 64xf32, 64xf32, 64xf32, -1xui8) <- (1x64x-1x-1xf32, 64xf32, 64xf32, 64xf32, 64xf32) - ( - batch_norm__12, - batch_norm__13, - batch_norm__14, - batch_norm__15, - batch_norm__16, - batch_norm__17, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_2, - parameter_361, - parameter_360, - parameter_359, - parameter_358, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_358, parameter_359, parameter_360, parameter_361 + def forward(self, data_0, data_1, data_2, data_3, data_4, data_5, data_6): + # pd_op.full_int_array: (1xi64) <- () + full_int_array_0 = [1] - # pd_op.swish: (1x64x-1x-1xf32) <- (1x64x-1x-1xf32) - swish_3 = paddle._C_ops.swish(batch_norm__12) + # pd_op.unsqueeze: (1x1x24276xf32) <- (1x24276xf32, 1xi64) + unsqueeze_0 = paddle._C_ops.unsqueeze(data_0, full_int_array_0) + del data_0, full_int_array_0 - # pd_op.conv2d: (1x96x-1x-1xf32) <- (1x64x-1x-1xf32, 96x64x3x3xf32) - conv2d_3 = paddle._C_ops.conv2d( - swish_3, parameter_357, [2, 2], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_357 - - # pd_op.batch_norm_: (1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) - ( - batch_norm__18, - batch_norm__19, - batch_norm__20, - batch_norm__21, - batch_norm__22, - batch_norm__23, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_3, - parameter_356, - parameter_355, - parameter_354, - parameter_353, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_353, parameter_354, parameter_355, parameter_356 - - # pd_op.swish: (1x96x-1x-1xf32) <- (1x96x-1x-1xf32) - swish_4 = paddle._C_ops.swish(batch_norm__18) - - # pd_op.conv2d: (1x48x-1x-1xf32) <- (1x96x-1x-1xf32, 48x96x1x1xf32) - conv2d_4 = paddle._C_ops.conv2d( - swish_4, parameter_352, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_352 - - # pd_op.batch_norm_: (1x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32, -1xui8) <- (1x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32) - ( - batch_norm__24, - batch_norm__25, - batch_norm__26, - batch_norm__27, - batch_norm__28, - batch_norm__29, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_4, - parameter_351, - parameter_350, - parameter_349, - parameter_348, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_348, parameter_349, parameter_350, parameter_351 - - # pd_op.swish: (1x48x-1x-1xf32) <- (1x48x-1x-1xf32) - swish_5 = paddle._C_ops.swish(batch_norm__24) - - # pd_op.conv2d: (1x48x-1x-1xf32) <- (1x96x-1x-1xf32, 48x96x1x1xf32) - conv2d_5 = paddle._C_ops.conv2d( - swish_4, parameter_347, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_347 - - # pd_op.batch_norm_: (1x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32, -1xui8) <- (1x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32) - ( - batch_norm__30, - batch_norm__31, - batch_norm__32, - batch_norm__33, - batch_norm__34, - batch_norm__35, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_5, - parameter_346, - parameter_345, - parameter_344, - parameter_343, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_343, parameter_344, parameter_345, parameter_346 - - # pd_op.swish: (1x48x-1x-1xf32) <- (1x48x-1x-1xf32) - swish_6 = paddle._C_ops.swish(batch_norm__30) - - # pd_op.conv2d: (1x48x-1x-1xf32) <- (1x48x-1x-1xf32, 48x48x3x3xf32) - conv2d_6 = paddle._C_ops.conv2d( - swish_6, parameter_342, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_342 - - # pd_op.batch_norm_: (1x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32, -1xui8) <- (1x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32) - ( - batch_norm__36, - batch_norm__37, - batch_norm__38, - batch_norm__39, - batch_norm__40, - batch_norm__41, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_6, - parameter_341, - parameter_340, - parameter_339, - parameter_338, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_338, parameter_339, parameter_340, parameter_341 - - # pd_op.swish: (1x48x-1x-1xf32) <- (1x48x-1x-1xf32) - swish_7 = paddle._C_ops.swish(batch_norm__36) - - # pd_op.conv2d: (1x48x-1x-1xf32) <- (1x48x-1x-1xf32, 48x48x3x3xf32) - conv2d_7 = paddle._C_ops.conv2d( - swish_7, parameter_337, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_337 - - # pd_op.batch_norm_: (1x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32, -1xui8) <- (1x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32) - ( - batch_norm__42, - batch_norm__43, - batch_norm__44, - batch_norm__45, - batch_norm__46, - batch_norm__47, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_7, - parameter_336, - parameter_335, - parameter_334, - parameter_333, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_333, parameter_334, parameter_335, parameter_336 - - # pd_op.conv2d: (1x48x-1x-1xf32) <- (1x48x-1x-1xf32, 48x48x1x1xf32) - conv2d_8 = paddle._C_ops.conv2d( - swish_7, parameter_332, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_332 - - # pd_op.batch_norm_: (1x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32, -1xui8) <- (1x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32) - ( - batch_norm__48, - batch_norm__49, - batch_norm__50, - batch_norm__51, - batch_norm__52, - batch_norm__53, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_8, - parameter_331, - parameter_330, - parameter_329, - parameter_328, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_328, parameter_329, parameter_330, parameter_331 - - # pd_op.multiply: (1x48x-1x-1xf32) <- (1xf32, 1x48x-1x-1xf32) - multiply_0 = paddle._C_ops.multiply(data_0, batch_norm__48) - del data_0 - - # pd_op.add: (1x48x-1x-1xf32) <- (1x48x-1x-1xf32, 1x48x-1x-1xf32) - add_0 = paddle._C_ops.add(batch_norm__42, multiply_0) - - # pd_op.swish: (1x48x-1x-1xf32) <- (1x48x-1x-1xf32) - swish_8 = paddle._C_ops.swish(add_0) - - # pd_op.add: (1x48x-1x-1xf32) <- (1x48x-1x-1xf32, 1x48x-1x-1xf32) - add_1 = paddle._C_ops.add(swish_6, swish_8) - - # pd_op.conv2d: (1x48x-1x-1xf32) <- (1x48x-1x-1xf32, 48x48x3x3xf32) - conv2d_9 = paddle._C_ops.conv2d( - add_1, parameter_327, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_327 - - # pd_op.batch_norm_: (1x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32, -1xui8) <- (1x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32) - ( - batch_norm__54, - batch_norm__55, - batch_norm__56, - batch_norm__57, - batch_norm__58, - batch_norm__59, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_9, - parameter_326, - parameter_325, - parameter_324, - parameter_323, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_323, parameter_324, parameter_325, parameter_326 - - # pd_op.swish: (1x48x-1x-1xf32) <- (1x48x-1x-1xf32) - swish_9 = paddle._C_ops.swish(batch_norm__54) - - # pd_op.conv2d: (1x48x-1x-1xf32) <- (1x48x-1x-1xf32, 48x48x3x3xf32) - conv2d_10 = paddle._C_ops.conv2d( - swish_9, parameter_322, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_322 - - # pd_op.batch_norm_: (1x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32, -1xui8) <- (1x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32) - ( - batch_norm__60, - batch_norm__61, - batch_norm__62, - batch_norm__63, - batch_norm__64, - batch_norm__65, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_10, - parameter_321, - parameter_320, - parameter_319, - parameter_318, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_318, parameter_319, parameter_320, parameter_321 - - # pd_op.conv2d: (1x48x-1x-1xf32) <- (1x48x-1x-1xf32, 48x48x1x1xf32) - conv2d_11 = paddle._C_ops.conv2d( - swish_9, parameter_317, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_317 - - # pd_op.batch_norm_: (1x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32, -1xui8) <- (1x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32) - ( - batch_norm__66, - batch_norm__67, - batch_norm__68, - batch_norm__69, - batch_norm__70, - batch_norm__71, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_11, - parameter_316, - parameter_315, - parameter_314, - parameter_313, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_313, parameter_314, parameter_315, parameter_316 - - # pd_op.multiply: (1x48x-1x-1xf32) <- (1xf32, 1x48x-1x-1xf32) - multiply_1 = paddle._C_ops.multiply(data_1, batch_norm__66) - del data_1 - - # pd_op.add: (1x48x-1x-1xf32) <- (1x48x-1x-1xf32, 1x48x-1x-1xf32) - add_2 = paddle._C_ops.add(batch_norm__60, multiply_1) - - # pd_op.swish: (1x48x-1x-1xf32) <- (1x48x-1x-1xf32) - swish_10 = paddle._C_ops.swish(add_2) - - # pd_op.add: (1x48x-1x-1xf32) <- (1x48x-1x-1xf32, 1x48x-1x-1xf32) - add_3 = paddle._C_ops.add(add_1, swish_10) - - # pd_op.conv2d: (1x48x-1x-1xf32) <- (1x48x-1x-1xf32, 48x48x3x3xf32) - conv2d_12 = paddle._C_ops.conv2d( - add_3, parameter_312, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_312 - - # pd_op.batch_norm_: (1x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32, -1xui8) <- (1x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32) - ( - batch_norm__72, - batch_norm__73, - batch_norm__74, - batch_norm__75, - batch_norm__76, - batch_norm__77, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_12, - parameter_311, - parameter_310, - parameter_309, - parameter_308, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_308, parameter_309, parameter_310, parameter_311 - - # pd_op.swish: (1x48x-1x-1xf32) <- (1x48x-1x-1xf32) - swish_11 = paddle._C_ops.swish(batch_norm__72) - - # pd_op.conv2d: (1x48x-1x-1xf32) <- (1x48x-1x-1xf32, 48x48x3x3xf32) - conv2d_13 = paddle._C_ops.conv2d( - swish_11, parameter_307, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_307 - - # pd_op.batch_norm_: (1x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32, -1xui8) <- (1x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32) - ( - batch_norm__78, - batch_norm__79, - batch_norm__80, - batch_norm__81, - batch_norm__82, - batch_norm__83, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_13, - parameter_306, - parameter_305, - parameter_304, - parameter_303, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_303, parameter_304, parameter_305, parameter_306 - - # pd_op.conv2d: (1x48x-1x-1xf32) <- (1x48x-1x-1xf32, 48x48x1x1xf32) - conv2d_14 = paddle._C_ops.conv2d( - swish_11, parameter_302, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_302 - - # pd_op.batch_norm_: (1x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32, -1xui8) <- (1x48x-1x-1xf32, 48xf32, 48xf32, 48xf32, 48xf32) - ( - batch_norm__84, - batch_norm__85, - batch_norm__86, - batch_norm__87, - batch_norm__88, - batch_norm__89, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_14, - parameter_301, - parameter_300, - parameter_299, - parameter_298, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_298, parameter_299, parameter_300, parameter_301 - - # pd_op.multiply: (1x48x-1x-1xf32) <- (1xf32, 1x48x-1x-1xf32) - multiply_2 = paddle._C_ops.multiply(data_2, batch_norm__84) - del data_2 - - # pd_op.add: (1x48x-1x-1xf32) <- (1x48x-1x-1xf32, 1x48x-1x-1xf32) - add_4 = paddle._C_ops.add(batch_norm__78, multiply_2) - - # pd_op.swish: (1x48x-1x-1xf32) <- (1x48x-1x-1xf32) - swish_12 = paddle._C_ops.swish(add_4) - - # pd_op.add: (1x48x-1x-1xf32) <- (1x48x-1x-1xf32, 1x48x-1x-1xf32) - add_5 = paddle._C_ops.add(add_3, swish_12) - - # pd_op.full: (1xi32) <- () + # pd_op.full: (xf32) <- () full_0 = paddle._C_ops.full( - [1], float("1"), paddle.int32, paddle.core.CPUPlace() + [], float("1"), paddle.float32, paddle.framework._current_expected_place() ) - # pd_op.assign: (1xi32) <- (1xi32) - assign_0 = full_0 + # pd_op.greater_than: (1x1x24276xb) <- (1x1x24276xf32, xf32) + greater_than_0 = paddle._C_ops.greater_than(unsqueeze_0, full_0) + del full_0, unsqueeze_0 - # pd_op.assign: (1xi32) <- (1xi32) - assign_1 = full_0 + # pd_op.full_int_array: (3xi64) <- () + full_int_array_1 = [1, 38, 1] - # pd_op.assign: (1xi32) <- (1xi32) - assign_2 = full_0 + # pd_op.tile: (1x38x24276xb) <- (1x1x24276xb, 3xi64) + tile_0 = paddle._C_ops.tile(greater_than_0, full_int_array_1) + del full_int_array_1, greater_than_0 - # builtin.combine: ([1x48x-1x-1xf32, 1x48x-1x-1xf32]) <- (1x48x-1x-1xf32, 1x48x-1x-1xf32) - combine_0 = [swish_5, add_5] - - # pd_op.concat: (1x96x-1x-1xf32) <- ([1x48x-1x-1xf32, 1x48x-1x-1xf32], 1xi32) - concat_0 = paddle._C_ops.concat(combine_0, full_0) - del combine_0 - - # pd_op.full_int_array: (2xi64) <- () - full_int_array_0 = [2, 3] + # pd_op.multiply: (1x38x24276xf32) <- (1x38x24276xf32, 1x38x24276xf32) + multiply_1 = paddle._C_ops.multiply(data_1, data_2) - # pd_op.assign: (2xi64) <- (2xi64) - assign_3 = full_int_array_0 - - # pd_op.assign: (2xi64) <- (2xi64) - assign_4 = full_int_array_0 - - # pd_op.assign: (2xi64) <- (2xi64) - assign_5 = full_int_array_0 - - # pd_op.mean: (1x96x1x1xf32) <- (1x96x-1x-1xf32, 2xi64) - mean_0 = paddle._C_ops.mean(concat_0, full_int_array_0, True) - - # pd_op.conv2d: (1x96x1x1xf32) <- (1x96x1x1xf32, 96x96x1x1xf32) - conv2d_15 = paddle._C_ops.conv2d( - mean_0, parameter_297, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + # pd_op.full: (1xi64) <- () + full_1 = paddle._C_ops.full( + [1], float("-2"), paddle.int64, paddle.core.CPUPlace() ) - del parameter_297 - - # pd_op.full_int_array: (4xi64) <- () - full_int_array_1 = [1, -1, 1, 1] - - # pd_op.reshape: (1x96x1x1xf32) <- (96xf32, 4xi64) - reshape_0 = paddle._C_ops.reshape(parameter_296, full_int_array_1) - del parameter_296 - # pd_op.add: (1x96x1x1xf32) <- (1x96x1x1xf32, 1x96x1x1xf32) - add_6 = paddle._C_ops.add(conv2d_15, reshape_0) - - # pd_op.hardsigmoid: (1x96x1x1xf32) <- (1x96x1x1xf32) - hardsigmoid_0 = paddle._C_ops.hardsigmoid( - add_6, float("0.166667"), float("0.5") - ) - del add_6 - - # pd_op.multiply: (1x96x-1x-1xf32) <- (1x96x-1x-1xf32, 1x96x1x1xf32) - multiply_3 = paddle._C_ops.multiply(concat_0, hardsigmoid_0) - - # pd_op.conv2d: (1x128x-1x-1xf32) <- (1x96x-1x-1xf32, 128x96x1x1xf32) - conv2d_16 = paddle._C_ops.conv2d( - multiply_3, parameter_295, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_295 - - # pd_op.batch_norm_: (1x128x-1x-1xf32, 128xf32, 128xf32, 128xf32, 128xf32, -1xui8) <- (1x128x-1x-1xf32, 128xf32, 128xf32, 128xf32, 128xf32) - ( - batch_norm__90, - batch_norm__91, - batch_norm__92, - batch_norm__93, - batch_norm__94, - batch_norm__95, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_16, - parameter_294, - parameter_293, - parameter_292, - parameter_291, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_291, parameter_292, parameter_293, parameter_294 - - # pd_op.swish: (1x128x-1x-1xf32) <- (1x128x-1x-1xf32) - swish_13 = paddle._C_ops.swish(batch_norm__90) - - # pd_op.conv2d: (1x192x-1x-1xf32) <- (1x128x-1x-1xf32, 192x128x3x3xf32) - conv2d_17 = paddle._C_ops.conv2d( - swish_13, parameter_290, [2, 2], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_290 - - # pd_op.batch_norm_: (1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) - ( - batch_norm__96, - batch_norm__97, - batch_norm__98, - batch_norm__99, - batch_norm__100, - batch_norm__101, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_17, - parameter_289, - parameter_288, - parameter_287, - parameter_286, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_286, parameter_287, parameter_288, parameter_289 + # pd_op.argmax: (1x24276xi64) <- (1x38x24276xf32, 1xi64) + argmax_0 = paddle._C_ops.argmax(multiply_1, full_1, False, False, paddle.int64) + del multiply_1 - # pd_op.swish: (1x192x-1x-1xf32) <- (1x192x-1x-1xf32) - swish_14 = paddle._C_ops.swish(batch_norm__96) - - # pd_op.conv2d: (1x96x-1x-1xf32) <- (1x192x-1x-1xf32, 96x192x1x1xf32) - conv2d_18 = paddle._C_ops.conv2d( - swish_14, parameter_285, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_285 - - # pd_op.batch_norm_: (1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) - ( - batch_norm__102, - batch_norm__103, - batch_norm__104, - batch_norm__105, - batch_norm__106, - batch_norm__107, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_18, - parameter_284, - parameter_283, - parameter_282, - parameter_281, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_281, parameter_282, parameter_283, parameter_284 - - # pd_op.swish: (1x96x-1x-1xf32) <- (1x96x-1x-1xf32) - swish_15 = paddle._C_ops.swish(batch_norm__102) - - # pd_op.conv2d: (1x96x-1x-1xf32) <- (1x192x-1x-1xf32, 96x192x1x1xf32) - conv2d_19 = paddle._C_ops.conv2d( - swish_14, parameter_280, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_280 - - # pd_op.batch_norm_: (1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) - ( - batch_norm__108, - batch_norm__109, - batch_norm__110, - batch_norm__111, - batch_norm__112, - batch_norm__113, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_19, - parameter_279, - parameter_278, - parameter_277, - parameter_276, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_276, parameter_277, parameter_278, parameter_279 - - # pd_op.swish: (1x96x-1x-1xf32) <- (1x96x-1x-1xf32) - swish_16 = paddle._C_ops.swish(batch_norm__108) - - # pd_op.conv2d: (1x96x-1x-1xf32) <- (1x96x-1x-1xf32, 96x96x3x3xf32) - conv2d_20 = paddle._C_ops.conv2d( - swish_16, parameter_275, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_275 - - # pd_op.batch_norm_: (1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) - ( - batch_norm__114, - batch_norm__115, - batch_norm__116, - batch_norm__117, - batch_norm__118, - batch_norm__119, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_20, - parameter_274, - parameter_273, - parameter_272, - parameter_271, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_271, parameter_272, parameter_273, parameter_274 - - # pd_op.swish: (1x96x-1x-1xf32) <- (1x96x-1x-1xf32) - swish_17 = paddle._C_ops.swish(batch_norm__114) - - # pd_op.conv2d: (1x96x-1x-1xf32) <- (1x96x-1x-1xf32, 96x96x3x3xf32) - conv2d_21 = paddle._C_ops.conv2d( - swish_17, parameter_270, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_270 - - # pd_op.batch_norm_: (1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) - ( - batch_norm__120, - batch_norm__121, - batch_norm__122, - batch_norm__123, - batch_norm__124, - batch_norm__125, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_21, - parameter_269, - parameter_268, - parameter_267, - parameter_266, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_266, parameter_267, parameter_268, parameter_269 - - # pd_op.conv2d: (1x96x-1x-1xf32) <- (1x96x-1x-1xf32, 96x96x1x1xf32) - conv2d_22 = paddle._C_ops.conv2d( - swish_17, parameter_265, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + # pd_op.full: (1xi32) <- () + full_2 = paddle._C_ops.full( + [1], float("38"), paddle.int32, paddle.core.CPUPlace() ) - del parameter_265 - # pd_op.batch_norm_: (1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) - ( - batch_norm__126, - batch_norm__127, - batch_norm__128, - batch_norm__129, - batch_norm__130, - batch_norm__131, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_22, - parameter_264, - parameter_263, - parameter_262, - parameter_261, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), + # pd_op.one_hot: (1x24276x38xf32) <- (1x24276xi64, 1xi32) + one_hot_0 = paddle._C_ops.one_hot( + argmax_0 % paddle.cast(full_2, argmax_0.dtype), full_2 ) - del parameter_261, parameter_262, parameter_263, parameter_264 - - # pd_op.multiply: (1x96x-1x-1xf32) <- (1xf32, 1x96x-1x-1xf32) - multiply_4 = paddle._C_ops.multiply(data_3, batch_norm__126) - del data_3 + del argmax_0, full_2 - # pd_op.add: (1x96x-1x-1xf32) <- (1x96x-1x-1xf32, 1x96x-1x-1xf32) - add_7 = paddle._C_ops.add(batch_norm__120, multiply_4) + # pd_op.transpose: (1x38x24276xf32) <- (1x24276x38xf32) + transpose_0 = paddle._C_ops.transpose(one_hot_0, [0, 2, 1]) + del one_hot_0 - # pd_op.swish: (1x96x-1x-1xf32) <- (1x96x-1x-1xf32) - swish_18 = paddle._C_ops.swish(add_7) + # pd_op.where: (1x38x24276xf32) <- (1x38x24276xb, 1x38x24276xf32, 1x38x24276xf32) + where_0 = paddle._C_ops.where(tile_0, transpose_0, data_2) + del data_2, tile_0, transpose_0 - # pd_op.add: (1x96x-1x-1xf32) <- (1x96x-1x-1xf32, 1x96x-1x-1xf32) - add_8 = paddle._C_ops.add(swish_16, swish_18) + # pd_op.full_int_array: (1xi64) <- () + full_int_array_2 = [-2] - # pd_op.conv2d: (1x96x-1x-1xf32) <- (1x96x-1x-1xf32, 96x96x3x3xf32) - conv2d_23 = paddle._C_ops.conv2d( - add_8, parameter_260, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_260 - - # pd_op.batch_norm_: (1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) - ( - batch_norm__132, - batch_norm__133, - batch_norm__134, - batch_norm__135, - batch_norm__136, - batch_norm__137, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_23, - parameter_259, - parameter_258, - parameter_257, - parameter_256, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_256, parameter_257, parameter_258, parameter_259 + # pd_op.sum: (1x24276xf32) <- (1x38x24276xf32, 1xi64) + sum_0 = paddle._C_ops.sum(where_0, full_int_array_2, None, False) - # pd_op.swish: (1x96x-1x-1xf32) <- (1x96x-1x-1xf32) - swish_19 = paddle._C_ops.swish(batch_norm__132) + # pd_op.argmax: (1x24276xi64) <- (1x38x24276xf32, 1xi64) + argmax_1 = paddle._C_ops.argmax(where_0, full_1, False, False, paddle.int64) + del full_1 - # pd_op.conv2d: (1x96x-1x-1xf32) <- (1x96x-1x-1xf32, 96x96x3x3xf32) - conv2d_24 = paddle._C_ops.conv2d( - swish_19, parameter_255, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + # pd_op.full: (1xf32) <- () + full_3 = paddle._C_ops.full( + [1], float("38"), paddle.float32, paddle.core.CPUPlace() ) - del parameter_255 - # pd_op.batch_norm_: (1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) - ( - batch_norm__138, - batch_norm__139, - batch_norm__140, - batch_norm__141, - batch_norm__142, - batch_norm__143, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_24, - parameter_254, - parameter_253, - parameter_252, - parameter_251, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_251, parameter_252, parameter_253, parameter_254 + # pd_op.scale: (1x1xi32) <- (1x1xi32, 1xf32) + scale_0 = paddle._C_ops.scale(data_3, full_3, float("0"), True) + del data_3, full_3 - # pd_op.conv2d: (1x96x-1x-1xf32) <- (1x96x-1x-1xf32, 96x96x1x1xf32) - conv2d_25 = paddle._C_ops.conv2d( - swish_19, parameter_250, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_250 + # pd_op.cast: (1x1xi64) <- (1x1xi32) + cast_0 = paddle._C_ops.cast(scale_0, paddle.int64) + del scale_0 - # pd_op.batch_norm_: (1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) - ( - batch_norm__144, - batch_norm__145, - batch_norm__146, - batch_norm__147, - batch_norm__148, - batch_norm__149, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_25, - parameter_249, - parameter_248, - parameter_247, - parameter_246, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_246, parameter_247, parameter_248, parameter_249 + # pd_op.add: (1x24276xi64) <- (1x24276xi64, 1x1xi64) + add_0 = paddle._C_ops.add(argmax_1, cast_0) + del argmax_1, cast_0 - # pd_op.multiply: (1x96x-1x-1xf32) <- (1xf32, 1x96x-1x-1xf32) - multiply_5 = paddle._C_ops.multiply(data_4, batch_norm__144) + # pd_op.flatten: (38xi32) <- (1x38x1xi32) + flatten_0 = paddle._C_ops.flatten(data_4, 0, 2) del data_4 - # pd_op.add: (1x96x-1x-1xf32) <- (1x96x-1x-1xf32, 1x96x-1x-1xf32) - add_9 = paddle._C_ops.add(batch_norm__138, multiply_5) - - # pd_op.swish: (1x96x-1x-1xf32) <- (1x96x-1x-1xf32) - swish_20 = paddle._C_ops.swish(add_9) + # pd_op.flatten: (24276xi64) <- (1x24276xi64) + flatten_1 = paddle._C_ops.flatten(add_0, 0, 1) + del add_0 - # pd_op.add: (1x96x-1x-1xf32) <- (1x96x-1x-1xf32, 1x96x-1x-1xf32) - add_10 = paddle._C_ops.add(add_8, swish_20) - - # pd_op.conv2d: (1x96x-1x-1xf32) <- (1x96x-1x-1xf32, 96x96x3x3xf32) - conv2d_26 = paddle._C_ops.conv2d( - add_10, parameter_245, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + # pd_op.full: (1xi32) <- () + full_4 = paddle._C_ops.full( + [1], float("0"), paddle.int32, paddle.core.CPUPlace() ) - del parameter_245 - # pd_op.batch_norm_: (1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) - ( - batch_norm__150, - batch_norm__151, - batch_norm__152, - batch_norm__153, - batch_norm__154, - batch_norm__155, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_26, - parameter_244, - parameter_243, - parameter_242, - parameter_241, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_241, parameter_242, parameter_243, parameter_244 + # pd_op.gather: (24276xi32) <- (38xi32, 24276xi64, 1xi32) + gather_0 = paddle._C_ops.gather(flatten_0, flatten_1, full_4) + del flatten_0 - # pd_op.swish: (1x96x-1x-1xf32) <- (1x96x-1x-1xf32) - swish_21 = paddle._C_ops.swish(batch_norm__150) + # pd_op.full_int_array: (2xi64) <- () + full_int_array_3 = [1, 24276] - # pd_op.conv2d: (1x96x-1x-1xf32) <- (1x96x-1x-1xf32, 96x96x3x3xf32) - conv2d_27 = paddle._C_ops.conv2d( - swish_21, parameter_240, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_240 + # pd_op.reshape: (1x24276xi32) <- (24276xi32, 2xi64) + reshape_1 = paddle._C_ops.reshape(gather_0, full_int_array_3) + del full_int_array_3, gather_0 - # pd_op.batch_norm_: (1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) - ( - batch_norm__156, - batch_norm__157, - batch_norm__158, - batch_norm__159, - batch_norm__160, - batch_norm__161, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_27, - parameter_239, - parameter_238, - parameter_237, - parameter_236, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), + # pd_op.full: (xf32) <- () + full_5 = paddle._C_ops.full( + [], float("0"), paddle.float32, paddle.framework._current_expected_place() ) - del parameter_236, parameter_237, parameter_238, parameter_239 - # pd_op.conv2d: (1x96x-1x-1xf32) <- (1x96x-1x-1xf32, 96x96x1x1xf32) - conv2d_28 = paddle._C_ops.conv2d( - swish_21, parameter_235, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_235 + # pd_op.greater_than: (1x24276xb) <- (1x24276xf32, xf32) + greater_than_1 = paddle._C_ops.greater_than(sum_0, full_5) + del full_5, sum_0 - # pd_op.batch_norm_: (1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) - ( - batch_norm__162, - batch_norm__163, - batch_norm__164, - batch_norm__165, - batch_norm__166, - batch_norm__167, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_28, - parameter_234, - parameter_233, - parameter_232, - parameter_231, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), + # pd_op.full: (1xf32) <- () + full_6 = paddle._C_ops.full( + [1], float("10"), paddle.float32, paddle.core.CPUPlace() ) - del parameter_231, parameter_232, parameter_233, parameter_234 - - # pd_op.multiply: (1x96x-1x-1xf32) <- (1xf32, 1x96x-1x-1xf32) - multiply_6 = paddle._C_ops.multiply(data_5, batch_norm__162) - del data_5 - - # pd_op.add: (1x96x-1x-1xf32) <- (1x96x-1x-1xf32, 1x96x-1x-1xf32) - add_11 = paddle._C_ops.add(batch_norm__156, multiply_6) - - # pd_op.swish: (1x96x-1x-1xf32) <- (1x96x-1x-1xf32) - swish_22 = paddle._C_ops.swish(add_11) - # pd_op.add: (1x96x-1x-1xf32) <- (1x96x-1x-1xf32, 1x96x-1x-1xf32) - add_12 = paddle._C_ops.add(add_10, swish_22) - - # pd_op.conv2d: (1x96x-1x-1xf32) <- (1x96x-1x-1xf32, 96x96x3x3xf32) - conv2d_29 = paddle._C_ops.conv2d( - add_12, parameter_230, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + # pd_op.full_like: (1x24276xi32) <- (1x24276xi32, 1xf32) + full_like_0 = paddle._C_ops.full_like( + reshape_1, full_6, paddle.int32, paddle.framework._current_expected_place() ) - del parameter_230 + del full_6 - # pd_op.batch_norm_: (1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) - ( - batch_norm__168, - batch_norm__169, - batch_norm__170, - batch_norm__171, - batch_norm__172, - batch_norm__173, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_29, - parameter_229, - parameter_228, - parameter_227, - parameter_226, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_226, parameter_227, parameter_228, parameter_229 + # pd_op.where: (1x24276xi32) <- (1x24276xb, 1x24276xi32, 1x24276xi32) + where_1 = paddle._C_ops.where(greater_than_1, reshape_1, full_like_0) + del full_like_0, greater_than_1, reshape_1 - # pd_op.swish: (1x96x-1x-1xf32) <- (1x96x-1x-1xf32) - swish_23 = paddle._C_ops.swish(batch_norm__168) + # pd_op.full_int_array: (2xi64) <- () + full_int_array_4 = [-1, 4] - # pd_op.conv2d: (1x96x-1x-1xf32) <- (1x96x-1x-1xf32, 96x96x3x3xf32) - conv2d_30 = paddle._C_ops.conv2d( - swish_23, parameter_225, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_225 + # pd_op.reshape: (38x4xf32) <- (1x38x4xf32, 2xi64) + reshape_2 = paddle._C_ops.reshape(data_5, full_int_array_4) + del data_5, full_int_array_4 - # pd_op.batch_norm_: (1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) - ( - batch_norm__174, - batch_norm__175, - batch_norm__176, - batch_norm__177, - batch_norm__178, - batch_norm__179, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_30, - parameter_224, - parameter_223, - parameter_222, - parameter_221, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_221, parameter_222, parameter_223, parameter_224 + # pd_op.gather: (24276x4xf32) <- (38x4xf32, 24276xi64, 1xi32) + gather_1 = paddle._C_ops.gather(reshape_2, flatten_1, full_4) + del flatten_1, full_4, reshape_2 - # pd_op.conv2d: (1x96x-1x-1xf32) <- (1x96x-1x-1xf32, 96x96x1x1xf32) - conv2d_31 = paddle._C_ops.conv2d( - swish_23, parameter_220, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_220 + # pd_op.full_int_array: (3xi64) <- () + full_int_array_5 = [1, 24276, 4] - # pd_op.batch_norm_: (1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) - ( - batch_norm__180, - batch_norm__181, - batch_norm__182, - batch_norm__183, - batch_norm__184, - batch_norm__185, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_31, - parameter_219, - parameter_218, - parameter_217, - parameter_216, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_216, parameter_217, parameter_218, parameter_219 + # pd_op.reshape: (1x24276x4xf32) <- (24276x4xf32, 3xi64) + reshape_0 = paddle._C_ops.reshape(gather_1, full_int_array_5) + del full_int_array_5, gather_1 - # pd_op.multiply: (1x96x-1x-1xf32) <- (1xf32, 1x96x-1x-1xf32) - multiply_7 = paddle._C_ops.multiply(data_6, batch_norm__180) + # pd_op.full: (1xi32) <- () + full_7 = paddle._C_ops.full( + [1], float("11"), paddle.int32, paddle.core.CPUPlace() + ) + + # pd_op.one_hot: (1x24276x11xf32) <- (1x24276xi32, 1xi32) + one_hot_1 = paddle._C_ops.one_hot( + where_1 % paddle.cast(full_7, where_1.dtype), full_7 + ) + del full_7 + + # pd_op.full: (10xi64) <- () + full_8 = paddle._C_ops.full( + [10], float("0"), paddle.int64, paddle.framework._current_expected_place() + ) + + # pd_op.assign_value_: (10xi64) <- (10xi64) + assign_value__0 = paddle._C_ops.assign_value_( + full_8, + [10], + paddle.int64, + [ + float("0"), + float("1"), + float("2"), + float("3"), + float("4"), + float("5"), + float("6"), + float("7"), + float("8"), + float("9"), + ], + paddle.framework._current_expected_place(), + ) + del full_8 + + # pd_op.index_select: (1x24276x10xf32) <- (1x24276x11xf32, 10xi64) + index_select_0 = paddle._C_ops.index_select(one_hot_1, assign_value__0, -1) + del assign_value__0, one_hot_1 + + # pd_op.multiply: (1x38x24276xf32) <- (1x38x24276xf32, 1x38x24276xf32) + multiply_2 = paddle._C_ops.multiply(data_6, where_0) del data_6 - # pd_op.add: (1x96x-1x-1xf32) <- (1x96x-1x-1xf32, 1x96x-1x-1xf32) - add_13 = paddle._C_ops.add(batch_norm__174, multiply_7) - - # pd_op.swish: (1x96x-1x-1xf32) <- (1x96x-1x-1xf32) - swish_24 = paddle._C_ops.swish(add_13) - - # pd_op.add: (1x96x-1x-1xf32) <- (1x96x-1x-1xf32, 1x96x-1x-1xf32) - add_14 = paddle._C_ops.add(add_12, swish_24) - - # pd_op.conv2d: (1x96x-1x-1xf32) <- (1x96x-1x-1xf32, 96x96x3x3xf32) - conv2d_32 = paddle._C_ops.conv2d( - add_14, parameter_215, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_215 - - # pd_op.batch_norm_: (1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) - ( - batch_norm__186, - batch_norm__187, - batch_norm__188, - batch_norm__189, - batch_norm__190, - batch_norm__191, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_32, - parameter_214, - parameter_213, - parameter_212, - parameter_211, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_211, parameter_212, parameter_213, parameter_214 - - # pd_op.swish: (1x96x-1x-1xf32) <- (1x96x-1x-1xf32) - swish_25 = paddle._C_ops.swish(batch_norm__186) - - # pd_op.conv2d: (1x96x-1x-1xf32) <- (1x96x-1x-1xf32, 96x96x3x3xf32) - conv2d_33 = paddle._C_ops.conv2d( - swish_25, parameter_210, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_210 - - # pd_op.batch_norm_: (1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) - ( - batch_norm__192, - batch_norm__193, - batch_norm__194, - batch_norm__195, - batch_norm__196, - batch_norm__197, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_33, - parameter_209, - parameter_208, - parameter_207, - parameter_206, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_206, parameter_207, parameter_208, parameter_209 - - # pd_op.conv2d: (1x96x-1x-1xf32) <- (1x96x-1x-1xf32, 96x96x1x1xf32) - conv2d_34 = paddle._C_ops.conv2d( - swish_25, parameter_205, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_205 - - # pd_op.batch_norm_: (1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) - ( - batch_norm__198, - batch_norm__199, - batch_norm__200, - batch_norm__201, - batch_norm__202, - batch_norm__203, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_34, - parameter_204, - parameter_203, - parameter_202, - parameter_201, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_201, parameter_202, parameter_203, parameter_204 - - # pd_op.multiply: (1x96x-1x-1xf32) <- (1xf32, 1x96x-1x-1xf32) - multiply_8 = paddle._C_ops.multiply(data_7, batch_norm__198) - del data_7 - - # pd_op.add: (1x96x-1x-1xf32) <- (1x96x-1x-1xf32, 1x96x-1x-1xf32) - add_15 = paddle._C_ops.add(batch_norm__192, multiply_8) - - # pd_op.swish: (1x96x-1x-1xf32) <- (1x96x-1x-1xf32) - swish_26 = paddle._C_ops.swish(add_15) - - # pd_op.add: (1x96x-1x-1xf32) <- (1x96x-1x-1xf32, 1x96x-1x-1xf32) - add_16 = paddle._C_ops.add(add_14, swish_26) - - # pd_op.conv2d: (1x96x-1x-1xf32) <- (1x96x-1x-1xf32, 96x96x3x3xf32) - conv2d_35 = paddle._C_ops.conv2d( - add_16, parameter_200, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_200 - - # pd_op.batch_norm_: (1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) - ( - batch_norm__204, - batch_norm__205, - batch_norm__206, - batch_norm__207, - batch_norm__208, - batch_norm__209, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_35, - parameter_199, - parameter_198, - parameter_197, - parameter_196, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_196, parameter_197, parameter_198, parameter_199 - - # pd_op.swish: (1x96x-1x-1xf32) <- (1x96x-1x-1xf32) - swish_27 = paddle._C_ops.swish(batch_norm__204) - - # pd_op.conv2d: (1x96x-1x-1xf32) <- (1x96x-1x-1xf32, 96x96x3x3xf32) - conv2d_36 = paddle._C_ops.conv2d( - swish_27, parameter_195, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_195 - - # pd_op.batch_norm_: (1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) - ( - batch_norm__210, - batch_norm__211, - batch_norm__212, - batch_norm__213, - batch_norm__214, - batch_norm__215, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_36, - parameter_194, - parameter_193, - parameter_192, - parameter_191, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_191, parameter_192, parameter_193, parameter_194 - - # pd_op.conv2d: (1x96x-1x-1xf32) <- (1x96x-1x-1xf32, 96x96x1x1xf32) - conv2d_37 = paddle._C_ops.conv2d( - swish_27, parameter_190, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_190 - - # pd_op.batch_norm_: (1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32, -1xui8) <- (1x96x-1x-1xf32, 96xf32, 96xf32, 96xf32, 96xf32) - ( - batch_norm__216, - batch_norm__217, - batch_norm__218, - batch_norm__219, - batch_norm__220, - batch_norm__221, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_37, - parameter_189, - parameter_188, - parameter_187, - parameter_186, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_186, parameter_187, parameter_188, parameter_189 - - # pd_op.multiply: (1x96x-1x-1xf32) <- (1xf32, 1x96x-1x-1xf32) - multiply_9 = paddle._C_ops.multiply(data_8, batch_norm__216) - del data_8 - - # pd_op.add: (1x96x-1x-1xf32) <- (1x96x-1x-1xf32, 1x96x-1x-1xf32) - add_17 = paddle._C_ops.add(batch_norm__210, multiply_9) - - # pd_op.swish: (1x96x-1x-1xf32) <- (1x96x-1x-1xf32) - swish_28 = paddle._C_ops.swish(add_17) - - # pd_op.add: (1x96x-1x-1xf32) <- (1x96x-1x-1xf32, 1x96x-1x-1xf32) - add_18 = paddle._C_ops.add(add_16, swish_28) + # pd_op.full_int_array: (1xi64) <- () + full_int_array_6 = [-1] - # builtin.combine: ([1x96x-1x-1xf32, 1x96x-1x-1xf32]) <- (1x96x-1x-1xf32, 1x96x-1x-1xf32) - combine_1 = [swish_15, add_18] + # pd_op.max: (1x38x1xf32) <- (1x38x24276xf32, 1xi64) + max_0 = paddle._C_ops.max(multiply_2, full_int_array_6, True) - # pd_op.concat: (1x192x-1x-1xf32) <- ([1x96x-1x-1xf32, 1x96x-1x-1xf32], 1xi32) - concat_1 = paddle._C_ops.concat(combine_1, full_0) - del combine_1 + # pd_op.multiply: (1x38x24276xf32) <- (1x38x24276xf32, 1x38x24276xf32) + multiply_3 = paddle._C_ops.multiply(data_1, where_0) + del data_1, where_0 - # pd_op.mean: (1x192x1x1xf32) <- (1x192x-1x-1xf32, 2xi64) - mean_1 = paddle._C_ops.mean(concat_1, full_int_array_0, True) + # pd_op.max: (1x38x1xf32) <- (1x38x24276xf32, 1xi64) + max_1 = paddle._C_ops.max(multiply_3, full_int_array_6, True) + del multiply_3 - # pd_op.conv2d: (1x192x1x1xf32) <- (1x192x1x1xf32, 192x192x1x1xf32) - conv2d_38 = paddle._C_ops.conv2d( - mean_1, parameter_185, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + # pd_op.full: (1xf32) <- () + full_9 = paddle._C_ops.full( + [1], float("1"), paddle.float32, paddle.core.CPUPlace() ) - del parameter_185 - # pd_op.reshape: (1x192x1x1xf32) <- (192xf32, 4xi64) - reshape_1 = paddle._C_ops.reshape(parameter_184, full_int_array_1) - del parameter_184 + # pd_op.scale: (1x38x1xf32) <- (1x38x1xf32, 1xf32) + scale_1 = paddle._C_ops.scale(max_0, full_9, float("1e-09"), True) + del full_9, max_0 - # pd_op.add: (1x192x1x1xf32) <- (1x192x1x1xf32, 1x192x1x1xf32) - add_19 = paddle._C_ops.add(conv2d_38, reshape_1) + # pd_op.divide: (1x38x24276xf32) <- (1x38x24276xf32, 1x38x1xf32) + divide_0 = paddle._C_ops.divide(multiply_2, scale_1) + del multiply_2, scale_1 - # pd_op.hardsigmoid: (1x192x1x1xf32) <- (1x192x1x1xf32) - hardsigmoid_1 = paddle._C_ops.hardsigmoid( - add_19, float("0.166667"), float("0.5") - ) - del add_19 - - # pd_op.multiply: (1x192x-1x-1xf32) <- (1x192x-1x-1xf32, 1x192x1x1xf32) - multiply_10 = paddle._C_ops.multiply(concat_1, hardsigmoid_1) - - # pd_op.conv2d: (1x256x-1x-1xf32) <- (1x192x-1x-1xf32, 256x192x1x1xf32) - conv2d_39 = paddle._C_ops.conv2d( - multiply_10, parameter_183, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_183 + # pd_op.multiply: (1x38x24276xf32) <- (1x38x24276xf32, 1x38x1xf32) + multiply_4 = paddle._C_ops.multiply(divide_0, max_1) + del divide_0, max_1 - # pd_op.batch_norm_: (1x256x-1x-1xf32, 256xf32, 256xf32, 256xf32, 256xf32, -1xui8) <- (1x256x-1x-1xf32, 256xf32, 256xf32, 256xf32, 256xf32) - ( - batch_norm__222, - batch_norm__223, - batch_norm__224, - batch_norm__225, - batch_norm__226, - batch_norm__227, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_39, - parameter_182, - parameter_181, - parameter_180, - parameter_179, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_179, parameter_180, parameter_181, parameter_182 + # pd_op.max: (1x24276xf32) <- (1x38x24276xf32, 1xi64) + max_2 = paddle._C_ops.max(multiply_4, full_int_array_2, False) + del full_int_array_2, multiply_4 - # pd_op.swish: (1x256x-1x-1xf32) <- (1x256x-1x-1xf32) - swish_29 = paddle._C_ops.swish(batch_norm__222) + # pd_op.unsqueeze: (1x24276x1xf32) <- (1x24276xf32, 1xi64) + unsqueeze_1 = paddle._C_ops.unsqueeze(max_2, full_int_array_6) + del full_int_array_6, max_2 - # pd_op.conv2d: (1x384x-1x-1xf32) <- (1x256x-1x-1xf32, 384x256x3x3xf32) - conv2d_40 = paddle._C_ops.conv2d( - swish_29, parameter_178, [2, 2], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_178 - - # pd_op.batch_norm_: (1x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (1x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) - ( - batch_norm__228, - batch_norm__229, - batch_norm__230, - batch_norm__231, - batch_norm__232, - batch_norm__233, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_40, - parameter_177, - parameter_176, - parameter_175, - parameter_174, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_174, parameter_175, parameter_176, parameter_177 - - # pd_op.swish: (1x384x-1x-1xf32) <- (1x384x-1x-1xf32) - swish_30 = paddle._C_ops.swish(batch_norm__228) - - # pd_op.conv2d: (1x192x-1x-1xf32) <- (1x384x-1x-1xf32, 192x384x1x1xf32) - conv2d_41 = paddle._C_ops.conv2d( - swish_30, parameter_173, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_173 - - # pd_op.batch_norm_: (1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) - ( - batch_norm__234, - batch_norm__235, - batch_norm__236, - batch_norm__237, - batch_norm__238, - batch_norm__239, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_41, - parameter_172, - parameter_171, - parameter_170, - parameter_169, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_169, parameter_170, parameter_171, parameter_172 - - # pd_op.swish: (1x192x-1x-1xf32) <- (1x192x-1x-1xf32) - swish_31 = paddle._C_ops.swish(batch_norm__234) - - # pd_op.conv2d: (1x192x-1x-1xf32) <- (1x384x-1x-1xf32, 192x384x1x1xf32) - conv2d_42 = paddle._C_ops.conv2d( - swish_30, parameter_168, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_168 - - # pd_op.batch_norm_: (1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) - ( - batch_norm__240, - batch_norm__241, - batch_norm__242, - batch_norm__243, - batch_norm__244, - batch_norm__245, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_42, - parameter_167, - parameter_166, - parameter_165, - parameter_164, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_164, parameter_165, parameter_166, parameter_167 - - # pd_op.swish: (1x192x-1x-1xf32) <- (1x192x-1x-1xf32) - swish_32 = paddle._C_ops.swish(batch_norm__240) - - # pd_op.conv2d: (1x192x-1x-1xf32) <- (1x192x-1x-1xf32, 192x192x3x3xf32) - conv2d_43 = paddle._C_ops.conv2d( - swish_32, parameter_163, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_163 - - # pd_op.batch_norm_: (1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) - ( - batch_norm__246, - batch_norm__247, - batch_norm__248, - batch_norm__249, - batch_norm__250, - batch_norm__251, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_43, - parameter_162, - parameter_161, - parameter_160, - parameter_159, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_159, parameter_160, parameter_161, parameter_162 - - # pd_op.swish: (1x192x-1x-1xf32) <- (1x192x-1x-1xf32) - swish_33 = paddle._C_ops.swish(batch_norm__246) - - # pd_op.conv2d: (1x192x-1x-1xf32) <- (1x192x-1x-1xf32, 192x192x3x3xf32) - conv2d_44 = paddle._C_ops.conv2d( - swish_33, parameter_158, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_158 - - # pd_op.batch_norm_: (1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) - ( - batch_norm__252, - batch_norm__253, - batch_norm__254, - batch_norm__255, - batch_norm__256, - batch_norm__257, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_44, - parameter_157, - parameter_156, - parameter_155, - parameter_154, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_154, parameter_155, parameter_156, parameter_157 - - # pd_op.conv2d: (1x192x-1x-1xf32) <- (1x192x-1x-1xf32, 192x192x1x1xf32) - conv2d_45 = paddle._C_ops.conv2d( - swish_33, parameter_153, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_153 - - # pd_op.batch_norm_: (1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) - ( - batch_norm__258, - batch_norm__259, - batch_norm__260, - batch_norm__261, - batch_norm__262, - batch_norm__263, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_45, - parameter_152, - parameter_151, - parameter_150, - parameter_149, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_149, parameter_150, parameter_151, parameter_152 - - # pd_op.multiply: (1x192x-1x-1xf32) <- (1xf32, 1x192x-1x-1xf32) - multiply_11 = paddle._C_ops.multiply(data_9, batch_norm__258) - del data_9 - - # pd_op.add: (1x192x-1x-1xf32) <- (1x192x-1x-1xf32, 1x192x-1x-1xf32) - add_20 = paddle._C_ops.add(batch_norm__252, multiply_11) - - # pd_op.swish: (1x192x-1x-1xf32) <- (1x192x-1x-1xf32) - swish_34 = paddle._C_ops.swish(add_20) - - # pd_op.add: (1x192x-1x-1xf32) <- (1x192x-1x-1xf32, 1x192x-1x-1xf32) - add_21 = paddle._C_ops.add(swish_32, swish_34) - - # pd_op.conv2d: (1x192x-1x-1xf32) <- (1x192x-1x-1xf32, 192x192x3x3xf32) - conv2d_46 = paddle._C_ops.conv2d( - add_21, parameter_148, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_148 - - # pd_op.batch_norm_: (1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) - ( - batch_norm__264, - batch_norm__265, - batch_norm__266, - batch_norm__267, - batch_norm__268, - batch_norm__269, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_46, - parameter_147, - parameter_146, - parameter_145, - parameter_144, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_144, parameter_145, parameter_146, parameter_147 - - # pd_op.swish: (1x192x-1x-1xf32) <- (1x192x-1x-1xf32) - swish_35 = paddle._C_ops.swish(batch_norm__264) - - # pd_op.conv2d: (1x192x-1x-1xf32) <- (1x192x-1x-1xf32, 192x192x3x3xf32) - conv2d_47 = paddle._C_ops.conv2d( - swish_35, parameter_143, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_143 - - # pd_op.batch_norm_: (1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) - ( - batch_norm__270, - batch_norm__271, - batch_norm__272, - batch_norm__273, - batch_norm__274, - batch_norm__275, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_47, - parameter_142, - parameter_141, - parameter_140, - parameter_139, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_139, parameter_140, parameter_141, parameter_142 - - # pd_op.conv2d: (1x192x-1x-1xf32) <- (1x192x-1x-1xf32, 192x192x1x1xf32) - conv2d_48 = paddle._C_ops.conv2d( - swish_35, parameter_138, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_138 - - # pd_op.batch_norm_: (1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) - ( - batch_norm__276, - batch_norm__277, - batch_norm__278, - batch_norm__279, - batch_norm__280, - batch_norm__281, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_48, - parameter_137, - parameter_136, - parameter_135, - parameter_134, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_134, parameter_135, parameter_136, parameter_137 - - # pd_op.multiply: (1x192x-1x-1xf32) <- (1xf32, 1x192x-1x-1xf32) - multiply_12 = paddle._C_ops.multiply(data_10, batch_norm__276) - del data_10 - - # pd_op.add: (1x192x-1x-1xf32) <- (1x192x-1x-1xf32, 1x192x-1x-1xf32) - add_22 = paddle._C_ops.add(batch_norm__270, multiply_12) - - # pd_op.swish: (1x192x-1x-1xf32) <- (1x192x-1x-1xf32) - swish_36 = paddle._C_ops.swish(add_22) - - # pd_op.add: (1x192x-1x-1xf32) <- (1x192x-1x-1xf32, 1x192x-1x-1xf32) - add_23 = paddle._C_ops.add(add_21, swish_36) - - # pd_op.conv2d: (1x192x-1x-1xf32) <- (1x192x-1x-1xf32, 192x192x3x3xf32) - conv2d_49 = paddle._C_ops.conv2d( - add_23, parameter_133, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_133 - - # pd_op.batch_norm_: (1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) - ( - batch_norm__282, - batch_norm__283, - batch_norm__284, - batch_norm__285, - batch_norm__286, - batch_norm__287, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_49, - parameter_132, - parameter_131, - parameter_130, - parameter_129, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_129, parameter_130, parameter_131, parameter_132 - - # pd_op.swish: (1x192x-1x-1xf32) <- (1x192x-1x-1xf32) - swish_37 = paddle._C_ops.swish(batch_norm__282) - - # pd_op.conv2d: (1x192x-1x-1xf32) <- (1x192x-1x-1xf32, 192x192x3x3xf32) - conv2d_50 = paddle._C_ops.conv2d( - swish_37, parameter_128, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_128 - - # pd_op.batch_norm_: (1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) - ( - batch_norm__288, - batch_norm__289, - batch_norm__290, - batch_norm__291, - batch_norm__292, - batch_norm__293, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_50, - parameter_127, - parameter_126, - parameter_125, - parameter_124, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_124, parameter_125, parameter_126, parameter_127 - - # pd_op.conv2d: (1x192x-1x-1xf32) <- (1x192x-1x-1xf32, 192x192x1x1xf32) - conv2d_51 = paddle._C_ops.conv2d( - swish_37, parameter_123, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_123 - - # pd_op.batch_norm_: (1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) - ( - batch_norm__294, - batch_norm__295, - batch_norm__296, - batch_norm__297, - batch_norm__298, - batch_norm__299, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_51, - parameter_122, - parameter_121, - parameter_120, - parameter_119, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_119, parameter_120, parameter_121, parameter_122 - - # pd_op.multiply: (1x192x-1x-1xf32) <- (1xf32, 1x192x-1x-1xf32) - multiply_13 = paddle._C_ops.multiply(data_11, batch_norm__294) - del data_11 - - # pd_op.add: (1x192x-1x-1xf32) <- (1x192x-1x-1xf32, 1x192x-1x-1xf32) - add_24 = paddle._C_ops.add(batch_norm__288, multiply_13) - - # pd_op.swish: (1x192x-1x-1xf32) <- (1x192x-1x-1xf32) - swish_38 = paddle._C_ops.swish(add_24) - - # pd_op.add: (1x192x-1x-1xf32) <- (1x192x-1x-1xf32, 1x192x-1x-1xf32) - add_25 = paddle._C_ops.add(add_23, swish_38) - - # pd_op.conv2d: (1x192x-1x-1xf32) <- (1x192x-1x-1xf32, 192x192x3x3xf32) - conv2d_52 = paddle._C_ops.conv2d( - add_25, parameter_118, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_118 - - # pd_op.batch_norm_: (1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) - ( - batch_norm__300, - batch_norm__301, - batch_norm__302, - batch_norm__303, - batch_norm__304, - batch_norm__305, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_52, - parameter_117, - parameter_116, - parameter_115, - parameter_114, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_114, parameter_115, parameter_116, parameter_117 - - # pd_op.swish: (1x192x-1x-1xf32) <- (1x192x-1x-1xf32) - swish_39 = paddle._C_ops.swish(batch_norm__300) - - # pd_op.conv2d: (1x192x-1x-1xf32) <- (1x192x-1x-1xf32, 192x192x3x3xf32) - conv2d_53 = paddle._C_ops.conv2d( - swish_39, parameter_113, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_113 - - # pd_op.batch_norm_: (1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) - ( - batch_norm__306, - batch_norm__307, - batch_norm__308, - batch_norm__309, - batch_norm__310, - batch_norm__311, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_53, - parameter_112, - parameter_111, - parameter_110, - parameter_109, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_109, parameter_110, parameter_111, parameter_112 - - # pd_op.conv2d: (1x192x-1x-1xf32) <- (1x192x-1x-1xf32, 192x192x1x1xf32) - conv2d_54 = paddle._C_ops.conv2d( - swish_39, parameter_108, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_108 - - # pd_op.batch_norm_: (1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) - ( - batch_norm__312, - batch_norm__313, - batch_norm__314, - batch_norm__315, - batch_norm__316, - batch_norm__317, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_54, - parameter_107, - parameter_106, - parameter_105, - parameter_104, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_104, parameter_105, parameter_106, parameter_107 - - # pd_op.multiply: (1x192x-1x-1xf32) <- (1xf32, 1x192x-1x-1xf32) - multiply_14 = paddle._C_ops.multiply(data_12, batch_norm__312) - del data_12 - - # pd_op.add: (1x192x-1x-1xf32) <- (1x192x-1x-1xf32, 1x192x-1x-1xf32) - add_26 = paddle._C_ops.add(batch_norm__306, multiply_14) - - # pd_op.swish: (1x192x-1x-1xf32) <- (1x192x-1x-1xf32) - swish_40 = paddle._C_ops.swish(add_26) - - # pd_op.add: (1x192x-1x-1xf32) <- (1x192x-1x-1xf32, 1x192x-1x-1xf32) - add_27 = paddle._C_ops.add(add_25, swish_40) - - # pd_op.conv2d: (1x192x-1x-1xf32) <- (1x192x-1x-1xf32, 192x192x3x3xf32) - conv2d_55 = paddle._C_ops.conv2d( - add_27, parameter_103, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_103 - - # pd_op.batch_norm_: (1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) - ( - batch_norm__318, - batch_norm__319, - batch_norm__320, - batch_norm__321, - batch_norm__322, - batch_norm__323, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_55, - parameter_102, - parameter_101, - parameter_100, - parameter_99, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_100, parameter_101, parameter_102, parameter_99 - - # pd_op.swish: (1x192x-1x-1xf32) <- (1x192x-1x-1xf32) - swish_41 = paddle._C_ops.swish(batch_norm__318) - - # pd_op.conv2d: (1x192x-1x-1xf32) <- (1x192x-1x-1xf32, 192x192x3x3xf32) - conv2d_56 = paddle._C_ops.conv2d( - swish_41, parameter_98, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_98 - - # pd_op.batch_norm_: (1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) - ( - batch_norm__324, - batch_norm__325, - batch_norm__326, - batch_norm__327, - batch_norm__328, - batch_norm__329, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_56, - parameter_97, - parameter_96, - parameter_95, - parameter_94, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_94, parameter_95, parameter_96, parameter_97 - - # pd_op.conv2d: (1x192x-1x-1xf32) <- (1x192x-1x-1xf32, 192x192x1x1xf32) - conv2d_57 = paddle._C_ops.conv2d( - swish_41, parameter_93, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_93 - - # pd_op.batch_norm_: (1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) - ( - batch_norm__330, - batch_norm__331, - batch_norm__332, - batch_norm__333, - batch_norm__334, - batch_norm__335, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_57, - parameter_92, - parameter_91, - parameter_90, - parameter_89, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_89, parameter_90, parameter_91, parameter_92 - - # pd_op.multiply: (1x192x-1x-1xf32) <- (1xf32, 1x192x-1x-1xf32) - multiply_15 = paddle._C_ops.multiply(data_13, batch_norm__330) - del data_13 - - # pd_op.add: (1x192x-1x-1xf32) <- (1x192x-1x-1xf32, 1x192x-1x-1xf32) - add_28 = paddle._C_ops.add(batch_norm__324, multiply_15) - - # pd_op.swish: (1x192x-1x-1xf32) <- (1x192x-1x-1xf32) - swish_42 = paddle._C_ops.swish(add_28) - - # pd_op.add: (1x192x-1x-1xf32) <- (1x192x-1x-1xf32, 1x192x-1x-1xf32) - add_29 = paddle._C_ops.add(add_27, swish_42) - - # pd_op.conv2d: (1x192x-1x-1xf32) <- (1x192x-1x-1xf32, 192x192x3x3xf32) - conv2d_58 = paddle._C_ops.conv2d( - add_29, parameter_88, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_88 - - # pd_op.batch_norm_: (1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) - ( - batch_norm__336, - batch_norm__337, - batch_norm__338, - batch_norm__339, - batch_norm__340, - batch_norm__341, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_58, - parameter_87, - parameter_86, - parameter_85, - parameter_84, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_84, parameter_85, parameter_86, parameter_87 - - # pd_op.swish: (1x192x-1x-1xf32) <- (1x192x-1x-1xf32) - swish_43 = paddle._C_ops.swish(batch_norm__336) - - # pd_op.conv2d: (1x192x-1x-1xf32) <- (1x192x-1x-1xf32, 192x192x3x3xf32) - conv2d_59 = paddle._C_ops.conv2d( - swish_43, parameter_83, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_83 - - # pd_op.batch_norm_: (1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) - ( - batch_norm__342, - batch_norm__343, - batch_norm__344, - batch_norm__345, - batch_norm__346, - batch_norm__347, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_59, - parameter_82, - parameter_81, - parameter_80, - parameter_79, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_79, parameter_80, parameter_81, parameter_82 - - # pd_op.conv2d: (1x192x-1x-1xf32) <- (1x192x-1x-1xf32, 192x192x1x1xf32) - conv2d_60 = paddle._C_ops.conv2d( - swish_43, parameter_78, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_78 - - # pd_op.batch_norm_: (1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32, -1xui8) <- (1x192x-1x-1xf32, 192xf32, 192xf32, 192xf32, 192xf32) - ( - batch_norm__348, - batch_norm__349, - batch_norm__350, - batch_norm__351, - batch_norm__352, - batch_norm__353, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_60, - parameter_77, - parameter_76, - parameter_75, - parameter_74, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_74, parameter_75, parameter_76, parameter_77 - - # pd_op.multiply: (1x192x-1x-1xf32) <- (1xf32, 1x192x-1x-1xf32) - multiply_16 = paddle._C_ops.multiply(data_14, batch_norm__348) - del data_14 - - # pd_op.add: (1x192x-1x-1xf32) <- (1x192x-1x-1xf32, 1x192x-1x-1xf32) - add_30 = paddle._C_ops.add(batch_norm__342, multiply_16) - - # pd_op.swish: (1x192x-1x-1xf32) <- (1x192x-1x-1xf32) - swish_44 = paddle._C_ops.swish(add_30) - - # pd_op.add: (1x192x-1x-1xf32) <- (1x192x-1x-1xf32, 1x192x-1x-1xf32) - add_31 = paddle._C_ops.add(add_29, swish_44) - - # builtin.combine: ([1x192x-1x-1xf32, 1x192x-1x-1xf32]) <- (1x192x-1x-1xf32, 1x192x-1x-1xf32) - combine_2 = [swish_31, add_31] - - # pd_op.concat: (1x384x-1x-1xf32) <- ([1x192x-1x-1xf32, 1x192x-1x-1xf32], 1xi32) - concat_2 = paddle._C_ops.concat(combine_2, full_0) - del combine_2 - - # pd_op.mean: (1x384x1x1xf32) <- (1x384x-1x-1xf32, 2xi64) - mean_2 = paddle._C_ops.mean(concat_2, full_int_array_0, True) - - # pd_op.conv2d: (1x384x1x1xf32) <- (1x384x1x1xf32, 384x384x1x1xf32) - conv2d_61 = paddle._C_ops.conv2d( - mean_2, parameter_73, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_73 - - # pd_op.reshape: (1x384x1x1xf32) <- (384xf32, 4xi64) - reshape_2 = paddle._C_ops.reshape(parameter_72, full_int_array_1) - del parameter_72 - - # pd_op.add: (1x384x1x1xf32) <- (1x384x1x1xf32, 1x384x1x1xf32) - add_32 = paddle._C_ops.add(conv2d_61, reshape_2) - - # pd_op.hardsigmoid: (1x384x1x1xf32) <- (1x384x1x1xf32) - hardsigmoid_2 = paddle._C_ops.hardsigmoid( - add_32, float("0.166667"), float("0.5") - ) - del add_32 - - # pd_op.multiply: (1x384x-1x-1xf32) <- (1x384x-1x-1xf32, 1x384x1x1xf32) - multiply_17 = paddle._C_ops.multiply(concat_2, hardsigmoid_2) - - # pd_op.conv2d: (1x512x-1x-1xf32) <- (1x384x-1x-1xf32, 512x384x1x1xf32) - conv2d_62 = paddle._C_ops.conv2d( - multiply_17, parameter_71, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_71 - - # pd_op.batch_norm_: (1x512x-1x-1xf32, 512xf32, 512xf32, 512xf32, 512xf32, -1xui8) <- (1x512x-1x-1xf32, 512xf32, 512xf32, 512xf32, 512xf32) - ( - batch_norm__354, - batch_norm__355, - batch_norm__356, - batch_norm__357, - batch_norm__358, - batch_norm__359, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_62, - parameter_70, - parameter_69, - parameter_68, - parameter_67, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_67, parameter_68, parameter_69, parameter_70 - - # pd_op.swish: (1x512x-1x-1xf32) <- (1x512x-1x-1xf32) - swish_45 = paddle._C_ops.swish(batch_norm__354) - - # pd_op.conv2d: (1x768x-1x-1xf32) <- (1x512x-1x-1xf32, 768x512x3x3xf32) - conv2d_63 = paddle._C_ops.conv2d( - swish_45, parameter_66, [2, 2], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_66 - - # pd_op.batch_norm_: (1x768x-1x-1xf32, 768xf32, 768xf32, 768xf32, 768xf32, -1xui8) <- (1x768x-1x-1xf32, 768xf32, 768xf32, 768xf32, 768xf32) - ( - batch_norm__360, - batch_norm__361, - batch_norm__362, - batch_norm__363, - batch_norm__364, - batch_norm__365, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_63, - parameter_65, - parameter_64, - parameter_63, - parameter_62, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_62, parameter_63, parameter_64, parameter_65 - - # pd_op.swish: (1x768x-1x-1xf32) <- (1x768x-1x-1xf32) - swish_46 = paddle._C_ops.swish(batch_norm__360) - - # pd_op.conv2d: (1x384x-1x-1xf32) <- (1x768x-1x-1xf32, 384x768x1x1xf32) - conv2d_64 = paddle._C_ops.conv2d( - swish_46, parameter_61, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_61 - - # pd_op.batch_norm_: (1x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (1x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) - ( - batch_norm__366, - batch_norm__367, - batch_norm__368, - batch_norm__369, - batch_norm__370, - batch_norm__371, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_64, - parameter_60, - parameter_59, - parameter_58, - parameter_57, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_57, parameter_58, parameter_59, parameter_60 - - # pd_op.swish: (1x384x-1x-1xf32) <- (1x384x-1x-1xf32) - swish_47 = paddle._C_ops.swish(batch_norm__366) - - # pd_op.conv2d: (1x384x-1x-1xf32) <- (1x768x-1x-1xf32, 384x768x1x1xf32) - conv2d_65 = paddle._C_ops.conv2d( - swish_46, parameter_56, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_56 - - # pd_op.batch_norm_: (1x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (1x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) - ( - batch_norm__372, - batch_norm__373, - batch_norm__374, - batch_norm__375, - batch_norm__376, - batch_norm__377, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_65, - parameter_55, - parameter_54, - parameter_53, - parameter_52, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_52, parameter_53, parameter_54, parameter_55 - - # pd_op.swish: (1x384x-1x-1xf32) <- (1x384x-1x-1xf32) - swish_48 = paddle._C_ops.swish(batch_norm__372) - - # pd_op.conv2d: (1x384x-1x-1xf32) <- (1x384x-1x-1xf32, 384x384x3x3xf32) - conv2d_66 = paddle._C_ops.conv2d( - swish_48, parameter_51, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_51 - - # pd_op.batch_norm_: (1x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (1x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) - ( - batch_norm__378, - batch_norm__379, - batch_norm__380, - batch_norm__381, - batch_norm__382, - batch_norm__383, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_66, - parameter_50, - parameter_49, - parameter_48, - parameter_47, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_47, parameter_48, parameter_49, parameter_50 - - # pd_op.swish: (1x384x-1x-1xf32) <- (1x384x-1x-1xf32) - swish_49 = paddle._C_ops.swish(batch_norm__378) - - # pd_op.conv2d: (1x384x-1x-1xf32) <- (1x384x-1x-1xf32, 384x384x3x3xf32) - conv2d_67 = paddle._C_ops.conv2d( - swish_49, parameter_46, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_46 - - # pd_op.batch_norm_: (1x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (1x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) - ( - batch_norm__384, - batch_norm__385, - batch_norm__386, - batch_norm__387, - batch_norm__388, - batch_norm__389, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_67, - parameter_45, - parameter_44, - parameter_43, - parameter_42, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_42, parameter_43, parameter_44, parameter_45 - - # pd_op.conv2d: (1x384x-1x-1xf32) <- (1x384x-1x-1xf32, 384x384x1x1xf32) - conv2d_68 = paddle._C_ops.conv2d( - swish_49, parameter_41, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_41 - - # pd_op.batch_norm_: (1x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (1x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) - ( - batch_norm__390, - batch_norm__391, - batch_norm__392, - batch_norm__393, - batch_norm__394, - batch_norm__395, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_68, - parameter_40, - parameter_39, - parameter_38, - parameter_37, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_37, parameter_38, parameter_39, parameter_40 - - # pd_op.multiply: (1x384x-1x-1xf32) <- (1xf32, 1x384x-1x-1xf32) - multiply_18 = paddle._C_ops.multiply(data_15, batch_norm__390) - del data_15 - - # pd_op.add: (1x384x-1x-1xf32) <- (1x384x-1x-1xf32, 1x384x-1x-1xf32) - add_33 = paddle._C_ops.add(batch_norm__384, multiply_18) - - # pd_op.swish: (1x384x-1x-1xf32) <- (1x384x-1x-1xf32) - swish_50 = paddle._C_ops.swish(add_33) - - # pd_op.add: (1x384x-1x-1xf32) <- (1x384x-1x-1xf32, 1x384x-1x-1xf32) - add_34 = paddle._C_ops.add(swish_48, swish_50) - - # pd_op.conv2d: (1x384x-1x-1xf32) <- (1x384x-1x-1xf32, 384x384x3x3xf32) - conv2d_69 = paddle._C_ops.conv2d( - add_34, parameter_36, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_36 - - # pd_op.batch_norm_: (1x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (1x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) - ( - batch_norm__396, - batch_norm__397, - batch_norm__398, - batch_norm__399, - batch_norm__400, - batch_norm__401, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_69, - parameter_35, - parameter_34, - parameter_33, - parameter_32, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_32, parameter_33, parameter_34, parameter_35 - - # pd_op.swish: (1x384x-1x-1xf32) <- (1x384x-1x-1xf32) - swish_51 = paddle._C_ops.swish(batch_norm__396) - - # pd_op.conv2d: (1x384x-1x-1xf32) <- (1x384x-1x-1xf32, 384x384x3x3xf32) - conv2d_70 = paddle._C_ops.conv2d( - swish_51, parameter_31, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_31 - - # pd_op.batch_norm_: (1x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (1x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) - ( - batch_norm__402, - batch_norm__403, - batch_norm__404, - batch_norm__405, - batch_norm__406, - batch_norm__407, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_70, - parameter_30, - parameter_29, - parameter_28, - parameter_27, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_27, parameter_28, parameter_29, parameter_30 - - # pd_op.conv2d: (1x384x-1x-1xf32) <- (1x384x-1x-1xf32, 384x384x1x1xf32) - conv2d_71 = paddle._C_ops.conv2d( - swish_51, parameter_26, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_26 - - # pd_op.batch_norm_: (1x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (1x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) - ( - batch_norm__408, - batch_norm__409, - batch_norm__410, - batch_norm__411, - batch_norm__412, - batch_norm__413, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_71, - parameter_25, - parameter_24, - parameter_23, - parameter_22, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_22, parameter_23, parameter_24, parameter_25 - - # pd_op.multiply: (1x384x-1x-1xf32) <- (1xf32, 1x384x-1x-1xf32) - multiply_19 = paddle._C_ops.multiply(data_16, batch_norm__408) - del data_16 - - # pd_op.add: (1x384x-1x-1xf32) <- (1x384x-1x-1xf32, 1x384x-1x-1xf32) - add_35 = paddle._C_ops.add(batch_norm__402, multiply_19) - - # pd_op.swish: (1x384x-1x-1xf32) <- (1x384x-1x-1xf32) - swish_52 = paddle._C_ops.swish(add_35) - - # pd_op.add: (1x384x-1x-1xf32) <- (1x384x-1x-1xf32, 1x384x-1x-1xf32) - add_36 = paddle._C_ops.add(add_34, swish_52) - - # pd_op.conv2d: (1x384x-1x-1xf32) <- (1x384x-1x-1xf32, 384x384x3x3xf32) - conv2d_72 = paddle._C_ops.conv2d( - add_36, parameter_21, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_21 - - # pd_op.batch_norm_: (1x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (1x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) - ( - batch_norm__414, - batch_norm__415, - batch_norm__416, - batch_norm__417, - batch_norm__418, - batch_norm__419, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_72, - parameter_20, - parameter_19, - parameter_18, - parameter_17, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_17, parameter_18, parameter_19, parameter_20 - - # pd_op.swish: (1x384x-1x-1xf32) <- (1x384x-1x-1xf32) - swish_53 = paddle._C_ops.swish(batch_norm__414) - - # pd_op.conv2d: (1x384x-1x-1xf32) <- (1x384x-1x-1xf32, 384x384x3x3xf32) - conv2d_73 = paddle._C_ops.conv2d( - swish_53, parameter_16, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_16 - - # pd_op.batch_norm_: (1x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (1x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) - ( - batch_norm__420, - batch_norm__421, - batch_norm__422, - batch_norm__423, - batch_norm__424, - batch_norm__425, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_73, - parameter_15, - parameter_14, - parameter_13, - parameter_12, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_12, parameter_13, parameter_14, parameter_15 - - # pd_op.conv2d: (1x384x-1x-1xf32) <- (1x384x-1x-1xf32, 384x384x1x1xf32) - conv2d_74 = paddle._C_ops.conv2d( - swish_53, parameter_11, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_11 - - # pd_op.batch_norm_: (1x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (1x384x-1x-1xf32, 384xf32, 384xf32, 384xf32, 384xf32) - ( - batch_norm__426, - batch_norm__427, - batch_norm__428, - batch_norm__429, - batch_norm__430, - batch_norm__431, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_74, - parameter_10, - parameter_9, - parameter_8, - parameter_7, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_10, parameter_7, parameter_8, parameter_9 - - # pd_op.multiply: (1x384x-1x-1xf32) <- (1xf32, 1x384x-1x-1xf32) - multiply_20 = paddle._C_ops.multiply(data_17, batch_norm__426) - del data_17 - - # pd_op.add: (1x384x-1x-1xf32) <- (1x384x-1x-1xf32, 1x384x-1x-1xf32) - add_37 = paddle._C_ops.add(batch_norm__420, multiply_20) - - # pd_op.swish: (1x384x-1x-1xf32) <- (1x384x-1x-1xf32) - swish_54 = paddle._C_ops.swish(add_37) - - # pd_op.add: (1x384x-1x-1xf32) <- (1x384x-1x-1xf32, 1x384x-1x-1xf32) - add_38 = paddle._C_ops.add(add_36, swish_54) - - # builtin.combine: ([1x384x-1x-1xf32, 1x384x-1x-1xf32]) <- (1x384x-1x-1xf32, 1x384x-1x-1xf32) - combine_3 = [swish_47, add_38] - - # pd_op.concat: (1x768x-1x-1xf32) <- ([1x384x-1x-1xf32, 1x384x-1x-1xf32], 1xi32) - concat_3 = paddle._C_ops.concat(combine_3, full_0) - del combine_3 - - # pd_op.mean: (1x768x1x1xf32) <- (1x768x-1x-1xf32, 2xi64) - mean_3 = paddle._C_ops.mean(concat_3, full_int_array_0, True) - - # pd_op.conv2d: (1x768x1x1xf32) <- (1x768x1x1xf32, 768x768x1x1xf32) - conv2d_75 = paddle._C_ops.conv2d( - mean_3, parameter_6, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_6 - - # pd_op.reshape: (1x768x1x1xf32) <- (768xf32, 4xi64) - reshape_3 = paddle._C_ops.reshape(parameter_5, full_int_array_1) - del full_int_array_1, parameter_5 - - # pd_op.add: (1x768x1x1xf32) <- (1x768x1x1xf32, 1x768x1x1xf32) - add_39 = paddle._C_ops.add(conv2d_75, reshape_3) - - # pd_op.hardsigmoid: (1x768x1x1xf32) <- (1x768x1x1xf32) - hardsigmoid_3 = paddle._C_ops.hardsigmoid( - add_39, float("0.166667"), float("0.5") - ) - del add_39 - - # pd_op.multiply: (1x768x-1x-1xf32) <- (1x768x-1x-1xf32, 1x768x1x1xf32) - multiply_21 = paddle._C_ops.multiply(concat_3, hardsigmoid_3) - - # pd_op.conv2d: (1x1024x-1x-1xf32) <- (1x768x-1x-1xf32, 1024x768x1x1xf32) - conv2d_76 = paddle._C_ops.conv2d( - multiply_21, parameter_4, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_4 - - # pd_op.batch_norm_: (1x1024x-1x-1xf32, 1024xf32, 1024xf32, 1024xf32, 1024xf32, -1xui8) <- (1x1024x-1x-1xf32, 1024xf32, 1024xf32, 1024xf32, 1024xf32) - ( - batch_norm__432, - batch_norm__433, - batch_norm__434, - batch_norm__435, - batch_norm__436, - batch_norm__437, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_76, - parameter_3, - parameter_2, - parameter_1, - parameter_0, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_0, parameter_1, parameter_2, parameter_3 - - # pd_op.swish: (1x1024x-1x-1xf32) <- (1x1024x-1x-1xf32) - swish_0 = paddle._C_ops.swish(batch_norm__432) - del ( - add_0, - add_1, - add_10, - add_11, - add_12, - add_13, - add_14, - add_15, - add_16, - add_17, - add_18, - add_2, - add_20, - add_21, - add_22, - add_23, - add_24, - add_25, - add_26, - add_27, - add_28, - add_29, - add_3, - add_30, - add_31, - add_33, - add_34, - add_35, - add_36, - add_37, - add_38, - add_4, - add_5, - add_7, - add_8, - add_9, - assign_0, - assign_1, - assign_2, - assign_3, - assign_4, - assign_5, - batch_norm__0, - batch_norm__1, - batch_norm__10, - batch_norm__100, - batch_norm__101, - batch_norm__102, - batch_norm__103, - batch_norm__104, - batch_norm__105, - batch_norm__106, - batch_norm__107, - batch_norm__108, - batch_norm__109, - batch_norm__11, - batch_norm__110, - batch_norm__111, - batch_norm__112, - batch_norm__113, - batch_norm__114, - batch_norm__115, - batch_norm__116, - batch_norm__117, - batch_norm__118, - batch_norm__119, - batch_norm__12, - batch_norm__120, - batch_norm__121, - batch_norm__122, - batch_norm__123, - batch_norm__124, - batch_norm__125, - batch_norm__126, - batch_norm__127, - batch_norm__128, - batch_norm__129, - batch_norm__13, - batch_norm__130, - batch_norm__131, - batch_norm__132, - batch_norm__133, - batch_norm__134, - batch_norm__135, - batch_norm__136, - batch_norm__137, - batch_norm__138, - batch_norm__139, - batch_norm__14, - batch_norm__140, - batch_norm__141, - batch_norm__142, - batch_norm__143, - batch_norm__144, - batch_norm__145, - batch_norm__146, - batch_norm__147, - batch_norm__148, - batch_norm__149, - batch_norm__15, - batch_norm__150, - batch_norm__151, - batch_norm__152, - batch_norm__153, - batch_norm__154, - batch_norm__155, - batch_norm__156, - batch_norm__157, - batch_norm__158, - batch_norm__159, - batch_norm__16, - batch_norm__160, - batch_norm__161, - batch_norm__162, - batch_norm__163, - batch_norm__164, - batch_norm__165, - batch_norm__166, - batch_norm__167, - batch_norm__168, - batch_norm__169, - batch_norm__17, - batch_norm__170, - batch_norm__171, - batch_norm__172, - batch_norm__173, - batch_norm__174, - batch_norm__175, - batch_norm__176, - batch_norm__177, - batch_norm__178, - batch_norm__179, - batch_norm__18, - batch_norm__180, - batch_norm__181, - batch_norm__182, - batch_norm__183, - batch_norm__184, - batch_norm__185, - batch_norm__186, - batch_norm__187, - batch_norm__188, - batch_norm__189, - batch_norm__19, - batch_norm__190, - batch_norm__191, - batch_norm__192, - batch_norm__193, - batch_norm__194, - batch_norm__195, - batch_norm__196, - batch_norm__197, - batch_norm__198, - batch_norm__199, - batch_norm__2, - batch_norm__20, - batch_norm__200, - batch_norm__201, - batch_norm__202, - batch_norm__203, - batch_norm__204, - batch_norm__205, - batch_norm__206, - batch_norm__207, - batch_norm__208, - batch_norm__209, - batch_norm__21, - batch_norm__210, - batch_norm__211, - batch_norm__212, - batch_norm__213, - batch_norm__214, - batch_norm__215, - batch_norm__216, - batch_norm__217, - batch_norm__218, - batch_norm__219, - batch_norm__22, - batch_norm__220, - batch_norm__221, - batch_norm__222, - batch_norm__223, - batch_norm__224, - batch_norm__225, - batch_norm__226, - batch_norm__227, - batch_norm__228, - batch_norm__229, - batch_norm__23, - batch_norm__230, - batch_norm__231, - batch_norm__232, - batch_norm__233, - batch_norm__234, - batch_norm__235, - batch_norm__236, - batch_norm__237, - batch_norm__238, - batch_norm__239, - batch_norm__24, - batch_norm__240, - batch_norm__241, - batch_norm__242, - batch_norm__243, - batch_norm__244, - batch_norm__245, - batch_norm__246, - batch_norm__247, - batch_norm__248, - batch_norm__249, - batch_norm__25, - batch_norm__250, - batch_norm__251, - batch_norm__252, - batch_norm__253, - batch_norm__254, - batch_norm__255, - batch_norm__256, - batch_norm__257, - batch_norm__258, - batch_norm__259, - batch_norm__26, - batch_norm__260, - batch_norm__261, - batch_norm__262, - batch_norm__263, - batch_norm__264, - batch_norm__265, - batch_norm__266, - batch_norm__267, - batch_norm__268, - batch_norm__269, - batch_norm__27, - batch_norm__270, - batch_norm__271, - batch_norm__272, - batch_norm__273, - batch_norm__274, - batch_norm__275, - batch_norm__276, - batch_norm__277, - batch_norm__278, - batch_norm__279, - batch_norm__28, - batch_norm__280, - batch_norm__281, - batch_norm__282, - batch_norm__283, - batch_norm__284, - batch_norm__285, - batch_norm__286, - batch_norm__287, - batch_norm__288, - batch_norm__289, - batch_norm__29, - batch_norm__290, - batch_norm__291, - batch_norm__292, - batch_norm__293, - batch_norm__294, - batch_norm__295, - batch_norm__296, - batch_norm__297, - batch_norm__298, - batch_norm__299, - batch_norm__3, - batch_norm__30, - batch_norm__300, - batch_norm__301, - batch_norm__302, - batch_norm__303, - batch_norm__304, - batch_norm__305, - batch_norm__306, - batch_norm__307, - batch_norm__308, - batch_norm__309, - batch_norm__31, - batch_norm__310, - batch_norm__311, - batch_norm__312, - batch_norm__313, - batch_norm__314, - batch_norm__315, - batch_norm__316, - batch_norm__317, - batch_norm__318, - batch_norm__319, - batch_norm__32, - batch_norm__320, - batch_norm__321, - batch_norm__322, - batch_norm__323, - batch_norm__324, - batch_norm__325, - batch_norm__326, - batch_norm__327, - batch_norm__328, - batch_norm__329, - batch_norm__33, - batch_norm__330, - batch_norm__331, - batch_norm__332, - batch_norm__333, - batch_norm__334, - batch_norm__335, - batch_norm__336, - batch_norm__337, - batch_norm__338, - batch_norm__339, - batch_norm__34, - batch_norm__340, - batch_norm__341, - batch_norm__342, - batch_norm__343, - batch_norm__344, - batch_norm__345, - batch_norm__346, - batch_norm__347, - batch_norm__348, - batch_norm__349, - batch_norm__35, - batch_norm__350, - batch_norm__351, - batch_norm__352, - batch_norm__353, - batch_norm__354, - batch_norm__355, - batch_norm__356, - batch_norm__357, - batch_norm__358, - batch_norm__359, - batch_norm__36, - batch_norm__360, - batch_norm__361, - batch_norm__362, - batch_norm__363, - batch_norm__364, - batch_norm__365, - batch_norm__366, - batch_norm__367, - batch_norm__368, - batch_norm__369, - batch_norm__37, - batch_norm__370, - batch_norm__371, - batch_norm__372, - batch_norm__373, - batch_norm__374, - batch_norm__375, - batch_norm__376, - batch_norm__377, - batch_norm__378, - batch_norm__379, - batch_norm__38, - batch_norm__380, - batch_norm__381, - batch_norm__382, - batch_norm__383, - batch_norm__384, - batch_norm__385, - batch_norm__386, - batch_norm__387, - batch_norm__388, - batch_norm__389, - batch_norm__39, - batch_norm__390, - batch_norm__391, - batch_norm__392, - batch_norm__393, - batch_norm__394, - batch_norm__395, - batch_norm__396, - batch_norm__397, - batch_norm__398, - batch_norm__399, - batch_norm__4, - batch_norm__40, - batch_norm__400, - batch_norm__401, - batch_norm__402, - batch_norm__403, - batch_norm__404, - batch_norm__405, - batch_norm__406, - batch_norm__407, - batch_norm__408, - batch_norm__409, - batch_norm__41, - batch_norm__410, - batch_norm__411, - batch_norm__412, - batch_norm__413, - batch_norm__414, - batch_norm__415, - batch_norm__416, - batch_norm__417, - batch_norm__418, - batch_norm__419, - batch_norm__42, - batch_norm__420, - batch_norm__421, - batch_norm__422, - batch_norm__423, - batch_norm__424, - batch_norm__425, - batch_norm__426, - batch_norm__427, - batch_norm__428, - batch_norm__429, - batch_norm__43, - batch_norm__430, - batch_norm__431, - batch_norm__432, - batch_norm__433, - batch_norm__434, - batch_norm__435, - batch_norm__436, - batch_norm__437, - batch_norm__44, - batch_norm__45, - batch_norm__46, - batch_norm__47, - batch_norm__48, - batch_norm__49, - batch_norm__5, - batch_norm__50, - batch_norm__51, - batch_norm__52, - batch_norm__53, - batch_norm__54, - batch_norm__55, - batch_norm__56, - batch_norm__57, - batch_norm__58, - batch_norm__59, - batch_norm__6, - batch_norm__60, - batch_norm__61, - batch_norm__62, - batch_norm__63, - batch_norm__64, - batch_norm__65, - batch_norm__66, - batch_norm__67, - batch_norm__68, - batch_norm__69, - batch_norm__7, - batch_norm__70, - batch_norm__71, - batch_norm__72, - batch_norm__73, - batch_norm__74, - batch_norm__75, - batch_norm__76, - batch_norm__77, - batch_norm__78, - batch_norm__79, - batch_norm__8, - batch_norm__80, - batch_norm__81, - batch_norm__82, - batch_norm__83, - batch_norm__84, - batch_norm__85, - batch_norm__86, - batch_norm__87, - batch_norm__88, - batch_norm__89, - batch_norm__9, - batch_norm__90, - batch_norm__91, - batch_norm__92, - batch_norm__93, - batch_norm__94, - batch_norm__95, - batch_norm__96, - batch_norm__97, - batch_norm__98, - batch_norm__99, - concat_0, - concat_1, - concat_2, - concat_3, - conv2d_0, - conv2d_1, - conv2d_10, - conv2d_11, - conv2d_12, - conv2d_13, - conv2d_14, - conv2d_15, - conv2d_16, - conv2d_17, - conv2d_18, - conv2d_19, - conv2d_2, - conv2d_20, - conv2d_21, - conv2d_22, - conv2d_23, - conv2d_24, - conv2d_25, - conv2d_26, - conv2d_27, - conv2d_28, - conv2d_29, - conv2d_3, - conv2d_30, - conv2d_31, - conv2d_32, - conv2d_33, - conv2d_34, - conv2d_35, - conv2d_36, - conv2d_37, - conv2d_38, - conv2d_39, - conv2d_4, - conv2d_40, - conv2d_41, - conv2d_42, - conv2d_43, - conv2d_44, - conv2d_45, - conv2d_46, - conv2d_47, - conv2d_48, - conv2d_49, - conv2d_5, - conv2d_50, - conv2d_51, - conv2d_52, - conv2d_53, - conv2d_54, - conv2d_55, - conv2d_56, - conv2d_57, - conv2d_58, - conv2d_59, - conv2d_6, - conv2d_60, - conv2d_61, - conv2d_62, - conv2d_63, - conv2d_64, - conv2d_65, - conv2d_66, - conv2d_67, - conv2d_68, - conv2d_69, - conv2d_7, - conv2d_70, - conv2d_71, - conv2d_72, - conv2d_73, - conv2d_74, - conv2d_75, - conv2d_76, - conv2d_8, - conv2d_9, - full_0, - full_int_array_0, - hardsigmoid_0, - hardsigmoid_1, - hardsigmoid_2, - hardsigmoid_3, - mean_0, - mean_1, - mean_2, - mean_3, - multiply_0, - multiply_1, - multiply_10, - multiply_11, - multiply_12, - multiply_13, - multiply_14, - multiply_15, - multiply_16, - multiply_17, - multiply_18, - multiply_19, - multiply_2, - multiply_20, - multiply_21, - multiply_3, - multiply_4, - multiply_5, - multiply_6, - multiply_7, - multiply_8, - multiply_9, - reshape_0, - reshape_1, - reshape_2, - reshape_3, - swish_1, - swish_10, - swish_11, - swish_12, - swish_13, - swish_14, - swish_15, - swish_16, - swish_17, - swish_18, - swish_19, - swish_2, - swish_20, - swish_21, - swish_22, - swish_23, - swish_24, - swish_25, - swish_26, - swish_27, - swish_28, - swish_29, - swish_3, - swish_30, - swish_31, - swish_32, - swish_33, - swish_34, - swish_35, - swish_36, - swish_37, - swish_38, - swish_39, - swish_4, - swish_40, - swish_41, - swish_42, - swish_43, - swish_44, - swish_45, - swish_46, - swish_47, - swish_48, - swish_49, - swish_5, - swish_50, - swish_51, - swish_52, - swish_53, - swish_54, - swish_6, - swish_7, - swish_8, - swish_9, - ) + # pd_op.multiply: (1x24276x10xf32) <- (1x24276x10xf32, 1x24276x1xf32) + multiply_0 = paddle._C_ops.multiply(index_select_0, unsqueeze_1) + del index_select_0, unsqueeze_1, where_1 - return swish_0 + return reshape_0, multiply_0 diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_7/weight_meta.py b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_7/weight_meta.py index a4133e248..8b1378917 100644 --- a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_7/weight_meta.py +++ b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_7/weight_meta.py @@ -1,3989 +1 @@ -class Program_weight_tensor_parameter_0: - name = "parameter_0" - shape = [1024] - dtype = "float32" - min_val = float("-3.75937") - max_val = float("-0.734") - mean = float("-2.18719") - std = float("0.428746") - data = None - -class Program_weight_tensor_parameter_1: - name = "parameter_1" - shape = [1024] - dtype = "float32" - min_val = float("1.61944") - max_val = float("4.44114") - mean = float("3.08041") - std = float("0.254214") - data = None - - -class Program_weight_tensor_parameter_2: - name = "parameter_2" - shape = [1024] - dtype = "float32" - min_val = float("0.0050978") - max_val = float("0.0274139") - mean = float("0.00876083") - std = float("0.00191502") - data = None - - -class Program_weight_tensor_parameter_3: - name = "parameter_3" - shape = [1024] - dtype = "float32" - min_val = float("-0.173441") - max_val = float("0.132182") - mean = float("-0.0624446") - std = float("0.0318177") - data = None - - -class Program_weight_tensor_parameter_4: - name = "parameter_4" - shape = [1024, 768, 1, 1] - dtype = "float32" - min_val = float("-0.0420016") - max_val = float("0.0672891") - mean = float("-0.000434506") - std = float("0.00419984") - data = None - - -class Program_weight_tensor_parameter_5: - name = "parameter_5" - shape = [768] - dtype = "float32" - min_val = float("-0.0144958") - max_val = float("0.00204154") - mean = float("-0.000784991") - std = float("0.00208566") - data = None - - -class Program_weight_tensor_parameter_6: - name = "parameter_6" - shape = [768, 768, 1, 1] - dtype = "float32" - min_val = float("-0.0809974") - max_val = float("0.144837") - mean = float("-0.000290719") - std = float("0.0016779") - data = None - - -class Program_weight_tensor_parameter_7: - name = "parameter_7" - shape = [384] - dtype = "float32" - min_val = float("-1.77404") - max_val = float("0.318904") - mean = float("-0.31075") - std = float("0.291253") - data = None - - -class Program_weight_tensor_parameter_8: - name = "parameter_8" - shape = [384] - dtype = "float32" - min_val = float("0.188368") - max_val = float("1.82104") - mean = float("0.60964") - std = float("0.262596") - data = None - - -class Program_weight_tensor_parameter_9: - name = "parameter_9" - shape = [384] - dtype = "float32" - min_val = float("7.63933e-05") - max_val = float("0.00106861") - mean = float("0.000259708") - std = float("0.000131578") - data = None - - -class Program_weight_tensor_parameter_10: - name = "parameter_10" - shape = [384] - dtype = "float32" - min_val = float("-0.0655344") - max_val = float("0.0775217") - mean = float("0.0238682") - std = float("0.0176001") - data = None - - -class Program_weight_tensor_parameter_11: - name = "parameter_11" - shape = [384, 384, 1, 1] - dtype = "float32" - min_val = float("-0.020871") - max_val = float("0.0273244") - mean = float("-0.000414716") - std = float("0.00284754") - data = None - - -class Program_weight_tensor_parameter_12: - name = "parameter_12" - shape = [384] - dtype = "float32" - min_val = float("-1.77405") - max_val = float("0.319251") - mean = float("-0.310681") - std = float("0.291275") - data = None - - -class Program_weight_tensor_parameter_13: - name = "parameter_13" - shape = [384] - dtype = "float32" - min_val = float("0.335122") - max_val = float("2.60483") - mean = float("1.02609") - std = float("0.290246") - data = None - - -class Program_weight_tensor_parameter_14: - name = "parameter_14" - shape = [384] - dtype = "float32" - min_val = float("0.000763408") - max_val = float("0.00774847") - mean = float("0.0023501") - std = float("0.000855015") - data = None - - -class Program_weight_tensor_parameter_15: - name = "parameter_15" - shape = [384] - dtype = "float32" - min_val = float("-0.228802") - max_val = float("0.161783") - mean = float("0.0348261") - std = float("0.0422183") - data = None - - -class Program_weight_tensor_parameter_16: - name = "parameter_16" - shape = [384, 384, 3, 3] - dtype = "float32" - min_val = float("-0.0185255") - max_val = float("0.0282844") - mean = float("-7.21101e-05") - std = float("0.00183304") - data = None - - -class Program_weight_tensor_parameter_17: - name = "parameter_17" - shape = [384] - dtype = "float32" - min_val = float("-2.58205") - max_val = float("0.0326997") - mean = float("-1.56844") - std = float("0.416017") - data = None - - -class Program_weight_tensor_parameter_18: - name = "parameter_18" - shape = [384] - dtype = "float32" - min_val = float("0.51894") - max_val = float("1.64424") - mean = float("1.13558") - std = float("0.149427") - data = None - - -class Program_weight_tensor_parameter_19: - name = "parameter_19" - shape = [384] - dtype = "float32" - min_val = float("0.0432612") - max_val = float("0.263912") - mean = float("0.0990143") - std = float("0.0258689") - data = None - - -class Program_weight_tensor_parameter_20: - name = "parameter_20" - shape = [384] - dtype = "float32" - min_val = float("-1.05647") - max_val = float("0.500171") - mean = float("-0.284757") - std = float("0.144218") - data = None - - -class Program_weight_tensor_parameter_21: - name = "parameter_21" - shape = [384, 384, 3, 3] - dtype = "float32" - min_val = float("-0.0217847") - max_val = float("0.0601331") - mean = float("-0.000214232") - std = float("0.00242153") - data = None - - -class Program_weight_tensor_parameter_22: - name = "parameter_22" - shape = [384] - dtype = "float32" - min_val = float("-1.93932") - max_val = float("0.644238") - mean = float("-0.57485") - std = float("0.358678") - data = None - - -class Program_weight_tensor_parameter_23: - name = "parameter_23" - shape = [384] - dtype = "float32" - min_val = float("0.163976") - max_val = float("2.06584") - mean = float("0.56203") - std = float("0.227231") - data = None - - -class Program_weight_tensor_parameter_24: - name = "parameter_24" - shape = [384] - dtype = "float32" - min_val = float("8.27966e-05") - max_val = float("0.00179396") - mean = float("0.000297678") - std = float("0.000146921") - data = None - - -class Program_weight_tensor_parameter_25: - name = "parameter_25" - shape = [384] - dtype = "float32" - min_val = float("-0.039417") - max_val = float("0.0723179") - mean = float("0.0222404") - std = float("0.0153684") - data = None - - -class Program_weight_tensor_parameter_26: - name = "parameter_26" - shape = [384, 384, 1, 1] - dtype = "float32" - min_val = float("-0.0311026") - max_val = float("0.039225") - mean = float("-0.000409791") - std = float("0.00262815") - data = None - - -class Program_weight_tensor_parameter_27: - name = "parameter_27" - shape = [384] - dtype = "float32" - min_val = float("-1.9394") - max_val = float("0.644918") - mean = float("-0.574762") - std = float("0.358753") - data = None - - -class Program_weight_tensor_parameter_28: - name = "parameter_28" - shape = [384] - dtype = "float32" - min_val = float("0.583818") - max_val = float("2.15633") - mean = float("1.08411") - std = float("0.255713") - data = None - - -class Program_weight_tensor_parameter_29: - name = "parameter_29" - shape = [384] - dtype = "float32" - min_val = float("0.00147808") - max_val = float("0.0112958") - mean = float("0.00356599") - std = float("0.00110113") - data = None - - -class Program_weight_tensor_parameter_30: - name = "parameter_30" - shape = [384] - dtype = "float32" - min_val = float("-0.114487") - max_val = float("0.168596") - mean = float("0.0403135") - std = float("0.0412827") - data = None - - -class Program_weight_tensor_parameter_31: - name = "parameter_31" - shape = [384, 384, 3, 3] - dtype = "float32" - min_val = float("-0.0211861") - max_val = float("0.0312284") - mean = float("-9.86606e-05") - std = float("0.00198109") - data = None - - -class Program_weight_tensor_parameter_32: - name = "parameter_32" - shape = [384] - dtype = "float32" - min_val = float("-2.39618") - max_val = float("0.845899") - mean = float("-1.40537") - std = float("0.36063") - data = None - - -class Program_weight_tensor_parameter_33: - name = "parameter_33" - shape = [384] - dtype = "float32" - min_val = float("0.454223") - max_val = float("1.91875") - mean = float("1.16633") - std = float("0.147984") - data = None - - -class Program_weight_tensor_parameter_34: - name = "parameter_34" - shape = [384] - dtype = "float32" - min_val = float("0.0366463") - max_val = float("0.164533") - mean = float("0.0661917") - std = float("0.0162349") - data = None - - -class Program_weight_tensor_parameter_35: - name = "parameter_35" - shape = [384] - dtype = "float32" - min_val = float("-0.915528") - max_val = float("0.831942") - mean = float("-0.196761") - std = float("0.117911") - data = None - - -class Program_weight_tensor_parameter_36: - name = "parameter_36" - shape = [384, 384, 3, 3] - dtype = "float32" - min_val = float("-0.0304568") - max_val = float("0.0446889") - mean = float("-0.000206096") - std = float("0.00245489") - data = None - - -class Program_weight_tensor_parameter_37: - name = "parameter_37" - shape = [384] - dtype = "float32" - min_val = float("-1.87628") - max_val = float("0.453077") - mean = float("-0.485305") - std = float("0.376481") - data = None - - -class Program_weight_tensor_parameter_38: - name = "parameter_38" - shape = [384] - dtype = "float32" - min_val = float("0.0771953") - max_val = float("2.11917") - mean = float("0.441977") - std = float("0.217648") - data = None - - -class Program_weight_tensor_parameter_39: - name = "parameter_39" - shape = [384] - dtype = "float32" - min_val = float("7.36916e-05") - max_val = float("0.00170445") - mean = float("0.000357372") - std = float("0.000182226") - data = None - - -class Program_weight_tensor_parameter_40: - name = "parameter_40" - shape = [384] - dtype = "float32" - min_val = float("-0.0529189") - max_val = float("0.0858856") - mean = float("0.0268843") - std = float("0.0175464") - data = None - - -class Program_weight_tensor_parameter_41: - name = "parameter_41" - shape = [384, 384, 1, 1] - dtype = "float32" - min_val = float("-0.0213328") - max_val = float("0.0283453") - mean = float("-0.000505242") - std = float("0.00224656") - data = None - - -class Program_weight_tensor_parameter_42: - name = "parameter_42" - shape = [384] - dtype = "float32" - min_val = float("-1.87669") - max_val = float("0.45341") - mean = float("-0.485211") - std = float("0.376586") - data = None - - -class Program_weight_tensor_parameter_43: - name = "parameter_43" - shape = [384] - dtype = "float32" - min_val = float("0.522977") - max_val = float("2.22431") - mean = float("1.05297") - std = float("0.260052") - data = None - - -class Program_weight_tensor_parameter_44: - name = "parameter_44" - shape = [384] - dtype = "float32" - min_val = float("0.0021093") - max_val = float("0.0103458") - mean = float("0.00457088") - std = float("0.00131851") - data = None - - -class Program_weight_tensor_parameter_45: - name = "parameter_45" - shape = [384] - dtype = "float32" - min_val = float("-0.272542") - max_val = float("0.182129") - mean = float("0.0462809") - std = float("0.0484224") - data = None - - -class Program_weight_tensor_parameter_46: - name = "parameter_46" - shape = [384, 384, 3, 3] - dtype = "float32" - min_val = float("-0.0214852") - max_val = float("0.0348977") - mean = float("-0.000101693") - std = float("0.00210424") - data = None - - -class Program_weight_tensor_parameter_47: - name = "parameter_47" - shape = [384] - dtype = "float32" - min_val = float("-2.1565") - max_val = float("0.418538") - mean = float("-1.36711") - std = float("0.277506") - data = None - - -class Program_weight_tensor_parameter_48: - name = "parameter_48" - shape = [384] - dtype = "float32" - min_val = float("0.707119") - max_val = float("1.63571") - mean = float("1.14297") - std = float("0.101612") - data = None - - -class Program_weight_tensor_parameter_49: - name = "parameter_49" - shape = [384] - dtype = "float32" - min_val = float("0.027003") - max_val = float("0.119021") - mean = float("0.0524081") - std = float("0.0141785") - data = None - - -class Program_weight_tensor_parameter_50: - name = "parameter_50" - shape = [384] - dtype = "float32" - min_val = float("-0.735058") - max_val = float("0.211464") - mean = float("-0.135262") - std = float("0.0973352") - data = None - - -class Program_weight_tensor_parameter_51: - name = "parameter_51" - shape = [384, 384, 3, 3] - dtype = "float32" - min_val = float("-0.0300983") - max_val = float("0.05499") - mean = float("-0.000159015") - std = float("0.00235156") - data = None - - -class Program_weight_tensor_parameter_52: - name = "parameter_52" - shape = [384] - dtype = "float32" - min_val = float("-2.92344") - max_val = float("1.66439") - mean = float("-0.760407") - std = float("0.643554") - data = None - - -class Program_weight_tensor_parameter_53: - name = "parameter_53" - shape = [384] - dtype = "float32" - min_val = float("0.953228") - max_val = float("2.9182") - mean = float("1.86309") - std = float("0.276205") - data = None - - -class Program_weight_tensor_parameter_54: - name = "parameter_54" - shape = [384] - dtype = "float32" - min_val = float("0.00273344") - max_val = float("0.0130488") - mean = float("0.00578892") - std = float("0.00146091") - data = None - - -class Program_weight_tensor_parameter_55: - name = "parameter_55" - shape = [384] - dtype = "float32" - min_val = float("-0.279522") - max_val = float("0.136057") - mean = float("0.068312") - std = float("0.0329566") - data = None - - -class Program_weight_tensor_parameter_56: - name = "parameter_56" - shape = [384, 768, 1, 1] - dtype = "float32" - min_val = float("-0.0411036") - max_val = float("0.048141") - mean = float("-0.000774534") - std = float("0.00548625") - data = None - - -class Program_weight_tensor_parameter_57: - name = "parameter_57" - shape = [384] - dtype = "float32" - min_val = float("-2.24702") - max_val = float("0.681993") - mean = float("-0.777088") - std = float("0.472908") - data = None - - -class Program_weight_tensor_parameter_58: - name = "parameter_58" - shape = [384] - dtype = "float32" - min_val = float("0.965876") - max_val = float("2.89361") - mean = float("2.09705") - std = float("0.305445") - data = None - - -class Program_weight_tensor_parameter_59: - name = "parameter_59" - shape = [384] - dtype = "float32" - min_val = float("0.000836446") - max_val = float("0.0043118") - mean = float("0.00221644") - std = float("0.000544507") - data = None - - -class Program_weight_tensor_parameter_60: - name = "parameter_60" - shape = [384] - dtype = "float32" - min_val = float("-0.0181609") - max_val = float("0.0915652") - mean = float("0.0419498") - std = float("0.0183738") - data = None - - -class Program_weight_tensor_parameter_61: - name = "parameter_61" - shape = [384, 768, 1, 1] - dtype = "float32" - min_val = float("-0.0837021") - max_val = float("0.0611426") - mean = float("-0.00045084") - std = float("0.00374174") - data = None - - -class Program_weight_tensor_parameter_62: - name = "parameter_62" - shape = [768] - dtype = "float32" - min_val = float("-2.40194") - max_val = float("0.642339") - mean = float("-0.908288") - std = float("0.339331") - data = None - - -class Program_weight_tensor_parameter_63: - name = "parameter_63" - shape = [768] - dtype = "float32" - min_val = float("0.53146") - max_val = float("1.90712") - mean = float("0.919684") - std = float("0.149212") - data = None - - -class Program_weight_tensor_parameter_64: - name = "parameter_64" - shape = [768] - dtype = "float32" - min_val = float("0.00736934") - max_val = float("0.074494") - mean = float("0.0176525") - std = float("0.00547046") - data = None - - -class Program_weight_tensor_parameter_65: - name = "parameter_65" - shape = [768] - dtype = "float32" - min_val = float("-0.236448") - max_val = float("0.209185") - mean = float("0.0420968") - std = float("0.0580626") - data = None - - -class Program_weight_tensor_parameter_66: - name = "parameter_66" - shape = [768, 512, 3, 3] - dtype = "float32" - min_val = float("-0.0383779") - max_val = float("0.0519002") - mean = float("-9.93933e-05") - std = float("0.00244217") - data = None - - -class Program_weight_tensor_parameter_67: - name = "parameter_67" - shape = [512] - dtype = "float32" - min_val = float("-3.39029") - max_val = float("1.66616") - mean = float("-1.16168") - std = float("0.513766") - data = None - - -class Program_weight_tensor_parameter_68: - name = "parameter_68" - shape = [512] - dtype = "float32" - min_val = float("0.520928") - max_val = float("1.67546") - mean = float("1.11104") - std = float("0.148384") - data = None - - -class Program_weight_tensor_parameter_69: - name = "parameter_69" - shape = [512] - dtype = "float32" - min_val = float("0.00220886") - max_val = float("0.0162899") - mean = float("0.00755366") - std = float("0.00191954") - data = None - - -class Program_weight_tensor_parameter_70: - name = "parameter_70" - shape = [512] - dtype = "float32" - min_val = float("-0.159233") - max_val = float("0.0720554") - mean = float("-0.0485279") - std = float("0.0411912") - data = None - - -class Program_weight_tensor_parameter_71: - name = "parameter_71" - shape = [512, 384, 1, 1] - dtype = "float32" - min_val = float("-0.208779") - max_val = float("0.179911") - mean = float("-0.000606249") - std = float("0.0081171") - data = None - - -class Program_weight_tensor_parameter_72: - name = "parameter_72" - shape = [384] - dtype = "float32" - min_val = float("-0.0103559") - max_val = float("0.00155602") - mean = float("-0.00302775") - std = float("0.0023618") - data = None - - -class Program_weight_tensor_parameter_73: - name = "parameter_73" - shape = [384, 384, 1, 1] - dtype = "float32" - min_val = float("-0.204999") - max_val = float("0.141306") - mean = float("-0.00211219") - std = float("0.00500511") - data = None - - -class Program_weight_tensor_parameter_74: - name = "parameter_74" - shape = [192] - dtype = "float32" - min_val = float("-1.97063") - max_val = float("0.41045") - mean = float("-0.348649") - std = float("0.333533") - data = None - - -class Program_weight_tensor_parameter_75: - name = "parameter_75" - shape = [192] - dtype = "float32" - min_val = float("0.0528508") - max_val = float("2.16013") - mean = float("0.581272") - std = float("0.419844") - data = None - - -class Program_weight_tensor_parameter_76: - name = "parameter_76" - shape = [192] - dtype = "float32" - min_val = float("9.84565e-05") - max_val = float("0.00122402") - mean = float("0.000477939") - std = float("0.000224956") - data = None - - -class Program_weight_tensor_parameter_77: - name = "parameter_77" - shape = [192] - dtype = "float32" - min_val = float("-0.0376647") - max_val = float("0.0569873") - mean = float("0.00567798") - std = float("0.015222") - data = None - - -class Program_weight_tensor_parameter_78: - name = "parameter_78" - shape = [192, 192, 1, 1] - dtype = "float32" - min_val = float("-0.0210389") - max_val = float("0.0585363") - mean = float("-0.000352054") - std = float("0.00423892") - data = None - - -class Program_weight_tensor_parameter_79: - name = "parameter_79" - shape = [192] - dtype = "float32" - min_val = float("-1.97059") - max_val = float("0.411367") - mean = float("-0.348497") - std = float("0.333596") - data = None - - -class Program_weight_tensor_parameter_80: - name = "parameter_80" - shape = [192] - dtype = "float32" - min_val = float("0.372764") - max_val = float("2.70243") - mean = float("1.20208") - std = float("0.49364") - data = None - - -class Program_weight_tensor_parameter_81: - name = "parameter_81" - shape = [192] - dtype = "float32" - min_val = float("0.0014624") - max_val = float("0.0202289") - mean = float("0.00559275") - std = float("0.0020797") - data = None - - -class Program_weight_tensor_parameter_82: - name = "parameter_82" - shape = [192] - dtype = "float32" - min_val = float("-0.115196") - max_val = float("0.163529") - mean = float("0.0192204") - std = float("0.0435021") - data = None - - -class Program_weight_tensor_parameter_83: - name = "parameter_83" - shape = [192, 192, 3, 3] - dtype = "float32" - min_val = float("-0.031927") - max_val = float("0.0389496") - mean = float("-0.000144904") - std = float("0.00325908") - data = None - - -class Program_weight_tensor_parameter_84: - name = "parameter_84" - shape = [192] - dtype = "float32" - min_val = float("-2.89054") - max_val = float("-0.177595") - mean = float("-1.31446") - std = float("0.401195") - data = None - - -class Program_weight_tensor_parameter_85: - name = "parameter_85" - shape = [192] - dtype = "float32" - min_val = float("0.695074") - max_val = float("2.09481") - mean = float("1.17912") - std = float("0.169901") - data = None - - -class Program_weight_tensor_parameter_86: - name = "parameter_86" - shape = [192] - dtype = "float32" - min_val = float("0.0654421") - max_val = float("0.471484") - mean = float("0.138461") - std = float("0.0475155") - data = None - - -class Program_weight_tensor_parameter_87: - name = "parameter_87" - shape = [192] - dtype = "float32" - min_val = float("-2.47419") - max_val = float("1.83595") - mean = float("-0.229004") - std = float("0.395047") - data = None - - -class Program_weight_tensor_parameter_88: - name = "parameter_88" - shape = [192, 192, 3, 3] - dtype = "float32" - min_val = float("-0.0350379") - max_val = float("0.0468605") - mean = float("-0.000221381") - std = float("0.00388426") - data = None - - -class Program_weight_tensor_parameter_89: - name = "parameter_89" - shape = [192] - dtype = "float32" - min_val = float("-1.94031") - max_val = float("0.513263") - mean = float("-0.279273") - std = float("0.321486") - data = None - - -class Program_weight_tensor_parameter_90: - name = "parameter_90" - shape = [192] - dtype = "float32" - min_val = float("0.0449424") - max_val = float("1.76947") - mean = float("0.444383") - std = float("0.305669") - data = None - - -class Program_weight_tensor_parameter_91: - name = "parameter_91" - shape = [192] - dtype = "float32" - min_val = float("7.96339e-05") - max_val = float("0.00168176") - mean = float("0.000430774") - std = float("0.000230126") - data = None - - -class Program_weight_tensor_parameter_92: - name = "parameter_92" - shape = [192] - dtype = "float32" - min_val = float("-0.0362367") - max_val = float("0.0459797") - mean = float("0.0087194") - std = float("0.0119612") - data = None - - -class Program_weight_tensor_parameter_93: - name = "parameter_93" - shape = [192, 192, 1, 1] - dtype = "float32" - min_val = float("-0.02483") - max_val = float("0.0404131") - mean = float("-0.000400917") - std = float("0.00391908") - data = None - - -class Program_weight_tensor_parameter_94: - name = "parameter_94" - shape = [192] - dtype = "float32" - min_val = float("-1.94031") - max_val = float("0.514903") - mean = float("-0.279015") - std = float("0.321709") - data = None - - -class Program_weight_tensor_parameter_95: - name = "parameter_95" - shape = [192] - dtype = "float32" - min_val = float("0.481654") - max_val = float("2.27026") - mean = float("1.13859") - std = float("0.375612") - data = None - - -class Program_weight_tensor_parameter_96: - name = "parameter_96" - shape = [192] - dtype = "float32" - min_val = float("0.00304728") - max_val = float("0.0144724") - mean = float("0.00647186") - std = float("0.00181328") - data = None - - -class Program_weight_tensor_parameter_97: - name = "parameter_97" - shape = [192] - dtype = "float32" - min_val = float("-0.0801327") - max_val = float("0.116547") - mean = float("0.0356733") - std = float("0.0320593") - data = None - - -class Program_weight_tensor_parameter_98: - name = "parameter_98" - shape = [192, 192, 3, 3] - dtype = "float32" - min_val = float("-0.0229799") - max_val = float("0.0371751") - mean = float("-0.000196939") - std = float("0.00352878") - data = None - - -class Program_weight_tensor_parameter_99: - name = "parameter_99" - shape = [192] - dtype = "float32" - min_val = float("-2.50826") - max_val = float("-0.12355") - mean = float("-1.2887") - std = float("0.443822") - data = None - - -class Program_weight_tensor_parameter_100: - name = "parameter_100" - shape = [192] - dtype = "float32" - min_val = float("0.653803") - max_val = float("1.66962") - mean = float("1.19928") - std = float("0.166233") - data = None - - -class Program_weight_tensor_parameter_101: - name = "parameter_101" - shape = [192] - dtype = "float32" - min_val = float("0.0475495") - max_val = float("0.208235") - mean = float("0.0948451") - std = float("0.0245631") - data = None - - -class Program_weight_tensor_parameter_102: - name = "parameter_102" - shape = [192] - dtype = "float32" - min_val = float("-2.1632") - max_val = float("0.473042") - mean = float("-0.118896") - std = float("0.249139") - data = None - - -class Program_weight_tensor_parameter_103: - name = "parameter_103" - shape = [192, 192, 3, 3] - dtype = "float32" - min_val = float("-0.038582") - max_val = float("0.0537646") - mean = float("-0.00026749") - std = float("0.0040656") - data = None - - -class Program_weight_tensor_parameter_104: - name = "parameter_104" - shape = [192] - dtype = "float32" - min_val = float("-1.75738") - max_val = float("0.468608") - mean = float("-0.262263") - std = float("0.335862") - data = None - - -class Program_weight_tensor_parameter_105: - name = "parameter_105" - shape = [192] - dtype = "float32" - min_val = float("0.00305103") - max_val = float("1.67905") - mean = float("0.351948") - std = float("0.251703") - data = None - - -class Program_weight_tensor_parameter_106: - name = "parameter_106" - shape = [192] - dtype = "float32" - min_val = float("1.01992e-06") - max_val = float("0.00222302") - mean = float("0.000398674") - std = float("0.000279493") - data = None - - -class Program_weight_tensor_parameter_107: - name = "parameter_107" - shape = [192] - dtype = "float32" - min_val = float("-0.0314916") - max_val = float("0.0548995") - mean = float("0.0110299") - std = float("0.0122915") - data = None - - -class Program_weight_tensor_parameter_108: - name = "parameter_108" - shape = [192, 192, 1, 1] - dtype = "float32" - min_val = float("-0.0307534") - max_val = float("0.0384153") - mean = float("-0.00045859") - std = float("0.00377622") - data = None - - -class Program_weight_tensor_parameter_109: - name = "parameter_109" - shape = [192] - dtype = "float32" - min_val = float("-1.75744") - max_val = float("0.470024") - mean = float("-0.262025") - std = float("0.336099") - data = None - - -class Program_weight_tensor_parameter_110: - name = "parameter_110" - shape = [192] - dtype = "float32" - min_val = float("0.405457") - max_val = float("1.97843") - mean = float("1.06603") - std = float("0.334153") - data = None - - -class Program_weight_tensor_parameter_111: - name = "parameter_111" - shape = [192] - dtype = "float32" - min_val = float("0.00267407") - max_val = float("0.0142805") - mean = float("0.00698013") - std = float("0.0019104") - data = None - - -class Program_weight_tensor_parameter_112: - name = "parameter_112" - shape = [192] - dtype = "float32" - min_val = float("-0.0878738") - max_val = float("0.110839") - mean = float("0.0399626") - std = float("0.0323914") - data = None - - -class Program_weight_tensor_parameter_113: - name = "parameter_113" - shape = [192, 192, 3, 3] - dtype = "float32" - min_val = float("-0.0336081") - max_val = float("0.0420323") - mean = float("-0.000205836") - std = float("0.00368544") - data = None - - -class Program_weight_tensor_parameter_114: - name = "parameter_114" - shape = [192] - dtype = "float32" - min_val = float("-2.49703") - max_val = float("0.138789") - mean = float("-1.24309") - std = float("0.424468") - data = None - - -class Program_weight_tensor_parameter_115: - name = "parameter_115" - shape = [192] - dtype = "float32" - min_val = float("0.652493") - max_val = float("1.80896") - mean = float("1.16711") - std = float("0.165463") - data = None - - -class Program_weight_tensor_parameter_116: - name = "parameter_116" - shape = [192] - dtype = "float32" - min_val = float("0.0303129") - max_val = float("0.14633") - mean = float("0.0670479") - std = float("0.0163216") - data = None - - -class Program_weight_tensor_parameter_117: - name = "parameter_117" - shape = [192] - dtype = "float32" - min_val = float("-1.70247") - max_val = float("0.30536") - mean = float("-0.0862267") - std = float("0.199355") - data = None - - -class Program_weight_tensor_parameter_118: - name = "parameter_118" - shape = [192, 192, 3, 3] - dtype = "float32" - min_val = float("-0.0472912") - max_val = float("0.0583976") - mean = float("-0.000284769") - std = float("0.00417002") - data = None - - -class Program_weight_tensor_parameter_119: - name = "parameter_119" - shape = [192] - dtype = "float32" - min_val = float("-2.07915") - max_val = float("0.533836") - mean = float("-0.272165") - std = float("0.375339") - data = None - - -class Program_weight_tensor_parameter_120: - name = "parameter_120" - shape = [192] - dtype = "float32" - min_val = float("0.000522804") - max_val = float("0.732366") - mean = float("0.21194") - std = float("0.136205") - data = None - - -class Program_weight_tensor_parameter_121: - name = "parameter_121" - shape = [192] - dtype = "float32" - min_val = float("5.9055e-08") - max_val = float("0.000953757") - mean = float("0.000261566") - std = float("0.000147906") - data = None - - -class Program_weight_tensor_parameter_122: - name = "parameter_122" - shape = [192] - dtype = "float32" - min_val = float("-0.0264134") - max_val = float("0.0356786") - mean = float("0.00695978") - std = float("0.00983596") - data = None - - -class Program_weight_tensor_parameter_123: - name = "parameter_123" - shape = [192, 192, 1, 1] - dtype = "float32" - min_val = float("-0.0207564") - max_val = float("0.0335475") - mean = float("-0.000292443") - std = float("0.00332227") - data = None - - -class Program_weight_tensor_parameter_124: - name = "parameter_124" - shape = [192] - dtype = "float32" - min_val = float("-2.07924") - max_val = float("0.535791") - mean = float("-0.271976") - std = float("0.375569") - data = None - - -class Program_weight_tensor_parameter_125: - name = "parameter_125" - shape = [192] - dtype = "float32" - min_val = float("0.395086") - max_val = float("1.96267") - mean = float("0.959008") - std = float("0.303814") - data = None - - -class Program_weight_tensor_parameter_126: - name = "parameter_126" - shape = [192] - dtype = "float32" - min_val = float("0.00304751") - max_val = float("0.015787") - mean = float("0.00706292") - std = float("0.00213169") - data = None - - -class Program_weight_tensor_parameter_127: - name = "parameter_127" - shape = [192] - dtype = "float32" - min_val = float("-0.078765") - max_val = float("0.118653") - mean = float("0.0428106") - std = float("0.0338285") - data = None - - -class Program_weight_tensor_parameter_128: - name = "parameter_128" - shape = [192, 192, 3, 3] - dtype = "float32" - min_val = float("-0.0340016") - max_val = float("0.0403474") - mean = float("-0.000216247") - std = float("0.00380285") - data = None - - -class Program_weight_tensor_parameter_129: - name = "parameter_129" - shape = [192] - dtype = "float32" - min_val = float("-2.74084") - max_val = float("-0.0805818") - mean = float("-1.23662") - std = float("0.434286") - data = None - - -class Program_weight_tensor_parameter_130: - name = "parameter_130" - shape = [192] - dtype = "float32" - min_val = float("0.761952") - max_val = float("1.62053") - mean = float("1.15094") - std = float("0.142444") - data = None - - -class Program_weight_tensor_parameter_131: - name = "parameter_131" - shape = [192] - dtype = "float32" - min_val = float("0.0276638") - max_val = float("0.0803679") - mean = float("0.0486605") - std = float("0.0101769") - data = None - - -class Program_weight_tensor_parameter_132: - name = "parameter_132" - shape = [192] - dtype = "float32" - min_val = float("-1.39612") - max_val = float("0.291383") - mean = float("-0.0742001") - std = float("0.166863") - data = None - - -class Program_weight_tensor_parameter_133: - name = "parameter_133" - shape = [192, 192, 3, 3] - dtype = "float32" - min_val = float("-0.0589398") - max_val = float("0.0606418") - mean = float("-0.000300541") - std = float("0.00415388") - data = None - - -class Program_weight_tensor_parameter_134: - name = "parameter_134" - shape = [192] - dtype = "float32" - min_val = float("-1.212") - max_val = float("0.447452") - mean = float("-0.232044") - std = float("0.339385") - data = None - - -class Program_weight_tensor_parameter_135: - name = "parameter_135" - shape = [192] - dtype = "float32" - min_val = float("-9.43381e-05") - max_val = float("0.678118") - mean = float("0.192025") - std = float("0.120758") - data = None - - -class Program_weight_tensor_parameter_136: - name = "parameter_136" - shape = [192] - dtype = "float32" - min_val = float("2.4814e-10") - max_val = float("0.000962865") - mean = float("0.000259823") - std = float("0.000158281") - data = None - - -class Program_weight_tensor_parameter_137: - name = "parameter_137" - shape = [192] - dtype = "float32" - min_val = float("-0.0444415") - max_val = float("0.0432657") - mean = float("0.00752981") - std = float("0.0124547") - data = None - - -class Program_weight_tensor_parameter_138: - name = "parameter_138" - shape = [192, 192, 1, 1] - dtype = "float32" - min_val = float("-0.0374404") - max_val = float("0.0395949") - mean = float("-0.000292615") - std = float("0.00342625") - data = None - - -class Program_weight_tensor_parameter_139: - name = "parameter_139" - shape = [192] - dtype = "float32" - min_val = float("-1.21197") - max_val = float("0.448806") - mean = float("-0.231853") - std = float("0.339659") - data = None - - -class Program_weight_tensor_parameter_140: - name = "parameter_140" - shape = [192] - dtype = "float32" - min_val = float("0.382853") - max_val = float("1.56358") - mean = float("0.852209") - std = float("0.259926") - data = None - - -class Program_weight_tensor_parameter_141: - name = "parameter_141" - shape = [192] - dtype = "float32" - min_val = float("0.00286251") - max_val = float("0.0142248") - mean = float("0.00680236") - std = float("0.00188027") - data = None - - -class Program_weight_tensor_parameter_142: - name = "parameter_142" - shape = [192] - dtype = "float32" - min_val = float("-0.0777897") - max_val = float("0.150363") - mean = float("0.0469745") - std = float("0.0370425") - data = None - - -class Program_weight_tensor_parameter_143: - name = "parameter_143" - shape = [192, 192, 3, 3] - dtype = "float32" - min_val = float("-0.0368355") - max_val = float("0.0400254") - mean = float("-0.000211959") - std = float("0.00380574") - data = None - - -class Program_weight_tensor_parameter_144: - name = "parameter_144" - shape = [192] - dtype = "float32" - min_val = float("-2.48699") - max_val = float("-0.132487") - mean = float("-1.2498") - std = float("0.418473") - data = None - - -class Program_weight_tensor_parameter_145: - name = "parameter_145" - shape = [192] - dtype = "float32" - min_val = float("0.689021") - max_val = float("1.51961") - mean = float("1.12491") - std = float("0.134826") - data = None - - -class Program_weight_tensor_parameter_146: - name = "parameter_146" - shape = [192] - dtype = "float32" - min_val = float("0.0194344") - max_val = float("0.0647326") - mean = float("0.0353335") - std = float("0.00848713") - data = None - - -class Program_weight_tensor_parameter_147: - name = "parameter_147" - shape = [192] - dtype = "float32" - min_val = float("-0.842031") - max_val = float("0.288259") - mean = float("-0.0809481") - std = float("0.135503") - data = None - - -class Program_weight_tensor_parameter_148: - name = "parameter_148" - shape = [192, 192, 3, 3] - dtype = "float32" - min_val = float("-0.0647608") - max_val = float("0.0671244") - mean = float("-0.000301379") - std = float("0.00415559") - data = None - - -class Program_weight_tensor_parameter_149: - name = "parameter_149" - shape = [192] - dtype = "float32" - min_val = float("-1.21773") - max_val = float("0.49966") - mean = float("-0.167333") - std = float("0.293611") - data = None - - -class Program_weight_tensor_parameter_150: - name = "parameter_150" - shape = [192] - dtype = "float32" - min_val = float("0.00864435") - max_val = float("1.53701") - mean = float("0.238131") - std = float("0.21185") - data = None - - -class Program_weight_tensor_parameter_151: - name = "parameter_151" - shape = [192] - dtype = "float32" - min_val = float("2.34858e-05") - max_val = float("0.00710491") - mean = float("0.000531262") - std = float("0.00068873") - data = None - - -class Program_weight_tensor_parameter_152: - name = "parameter_152" - shape = [192] - dtype = "float32" - min_val = float("-0.0691024") - max_val = float("0.101541") - mean = float("0.0105168") - std = float("0.0186603") - data = None - - -class Program_weight_tensor_parameter_153: - name = "parameter_153" - shape = [192, 192, 1, 1] - dtype = "float32" - min_val = float("-0.0626678") - max_val = float("0.0382933") - mean = float("-0.000453582") - std = float("0.00413962") - data = None - - -class Program_weight_tensor_parameter_154: - name = "parameter_154" - shape = [192] - dtype = "float32" - min_val = float("-1.21774") - max_val = float("0.50078") - mean = float("-0.167049") - std = float("0.293829") - data = None - - -class Program_weight_tensor_parameter_155: - name = "parameter_155" - shape = [192] - dtype = "float32" - min_val = float("0.353208") - max_val = float("1.45018") - mean = float("0.756982") - std = float("0.216639") - data = None - - -class Program_weight_tensor_parameter_156: - name = "parameter_156" - shape = [192] - dtype = "float32" - min_val = float("0.00481832") - max_val = float("0.0211758") - mean = float("0.00953731") - std = float("0.00267146") - data = None - - -class Program_weight_tensor_parameter_157: - name = "parameter_157" - shape = [192] - dtype = "float32" - min_val = float("-0.103005") - max_val = float("0.150479") - mean = float("0.0568873") - std = float("0.0497249") - data = None - - -class Program_weight_tensor_parameter_158: - name = "parameter_158" - shape = [192, 192, 3, 3] - dtype = "float32" - min_val = float("-0.0712483") - max_val = float("0.0533123") - mean = float("-0.000260747") - std = float("0.00375359") - data = None - - -class Program_weight_tensor_parameter_159: - name = "parameter_159" - shape = [192] - dtype = "float32" - min_val = float("-1.87984") - max_val = float("-0.210289") - mean = float("-1.14605") - std = float("0.325945") - data = None - - -class Program_weight_tensor_parameter_160: - name = "parameter_160" - shape = [192] - dtype = "float32" - min_val = float("0.790161") - max_val = float("1.59635") - mean = float("1.12149") - std = float("0.129857") - data = None - - -class Program_weight_tensor_parameter_161: - name = "parameter_161" - shape = [192] - dtype = "float32" - min_val = float("0.0174547") - max_val = float("0.0659133") - mean = float("0.031237") - std = float("0.00884456") - data = None - - -class Program_weight_tensor_parameter_162: - name = "parameter_162" - shape = [192] - dtype = "float32" - min_val = float("-0.857208") - max_val = float("0.269781") - mean = float("-0.0676028") - std = float("0.134013") - data = None - - -class Program_weight_tensor_parameter_163: - name = "parameter_163" - shape = [192, 192, 3, 3] - dtype = "float32" - min_val = float("-0.0680887") - max_val = float("0.0796042") - mean = float("-0.000244907") - std = float("0.0040245") - data = None - - -class Program_weight_tensor_parameter_164: - name = "parameter_164" - shape = [192] - dtype = "float32" - min_val = float("-2.86208") - max_val = float("1.58104") - mean = float("-0.027572") - std = float("0.747892") - data = None - - -class Program_weight_tensor_parameter_165: - name = "parameter_165" - shape = [192] - dtype = "float32" - min_val = float("0.490153") - max_val = float("2.07789") - mean = float("0.900423") - std = float("0.231981") - data = None - - -class Program_weight_tensor_parameter_166: - name = "parameter_166" - shape = [192] - dtype = "float32" - min_val = float("0.012085") - max_val = float("0.0729411") - mean = float("0.0254063") - std = float("0.00999328") - data = None - - -class Program_weight_tensor_parameter_167: - name = "parameter_167" - shape = [192] - dtype = "float32" - min_val = float("-0.232877") - max_val = float("0.322739") - mean = float("-0.043425") - std = float("0.0608633") - data = None - - -class Program_weight_tensor_parameter_168: - name = "parameter_168" - shape = [192, 384, 1, 1] - dtype = "float32" - min_val = float("-0.112904") - max_val = float("0.101906") - mean = float("-0.000605477") - std = float("0.00869645") - data = None - - -class Program_weight_tensor_parameter_169: - name = "parameter_169" - shape = [192] - dtype = "float32" - min_val = float("-2.96795") - max_val = float("1.66848") - mean = float("0.0967615") - std = float("0.663297") - data = None - - -class Program_weight_tensor_parameter_170: - name = "parameter_170" - shape = [192] - dtype = "float32" - min_val = float("0.830405") - max_val = float("5.55794") - mean = float("1.91324") - std = float("0.933276") - data = None - - -class Program_weight_tensor_parameter_171: - name = "parameter_171" - shape = [192] - dtype = "float32" - min_val = float("0.00638727") - max_val = float("0.0461032") - mean = float("0.0175233") - std = float("0.00555475") - data = None - - -class Program_weight_tensor_parameter_172: - name = "parameter_172" - shape = [192] - dtype = "float32" - min_val = float("-0.14477") - max_val = float("0.154899") - mean = float("-0.0220724") - std = float("0.0559826") - data = None - - -class Program_weight_tensor_parameter_173: - name = "parameter_173" - shape = [192, 384, 1, 1] - dtype = "float32" - min_val = float("-0.100414") - max_val = float("0.0965722") - mean = float("-0.000481739") - std = float("0.00788359") - data = None - - -class Program_weight_tensor_parameter_174: - name = "parameter_174" - shape = [384] - dtype = "float32" - min_val = float("-2.9234") - max_val = float("1.32689") - mean = float("-0.300856") - std = float("0.563737") - data = None - - -class Program_weight_tensor_parameter_175: - name = "parameter_175" - shape = [384] - dtype = "float32" - min_val = float("0.633896") - max_val = float("2.47246") - mean = float("1.15988") - std = float("0.257349") - data = None - - -class Program_weight_tensor_parameter_176: - name = "parameter_176" - shape = [384] - dtype = "float32" - min_val = float("0.0120681") - max_val = float("0.111573") - mean = float("0.027173") - std = float("0.0132211") - data = None - - -class Program_weight_tensor_parameter_177: - name = "parameter_177" - shape = [384] - dtype = "float32" - min_val = float("-0.269578") - max_val = float("0.241792") - mean = float("0.0299257") - std = float("0.0746028") - data = None - - -class Program_weight_tensor_parameter_178: - name = "parameter_178" - shape = [384, 256, 3, 3] - dtype = "float32" - min_val = float("-0.0777711") - max_val = float("0.0733026") - mean = float("-9.30129e-05") - std = float("0.00423326") - data = None - - -class Program_weight_tensor_parameter_179: - name = "parameter_179" - shape = [256] - dtype = "float32" - min_val = float("-2.04675") - max_val = float("1.2869") - mean = float("-0.92413") - std = float("0.542635") - data = None - - -class Program_weight_tensor_parameter_180: - name = "parameter_180" - shape = [256] - dtype = "float32" - min_val = float("0.509654") - max_val = float("1.69024") - mean = float("1.05364") - std = float("0.177449") - data = None - - -class Program_weight_tensor_parameter_181: - name = "parameter_181" - shape = [256] - dtype = "float32" - min_val = float("0.0016847") - max_val = float("0.0202013") - mean = float("0.00552268") - std = float("0.00242365") - data = None - - -class Program_weight_tensor_parameter_182: - name = "parameter_182" - shape = [256] - dtype = "float32" - min_val = float("-0.247824") - max_val = float("0.180174") - mean = float("-0.0483161") - std = float("0.064182") - data = None - - -class Program_weight_tensor_parameter_183: - name = "parameter_183" - shape = [256, 192, 1, 1] - dtype = "float32" - min_val = float("-0.211445") - max_val = float("0.154025") - mean = float("-0.00090718") - std = float("0.0139364") - data = None - - -class Program_weight_tensor_parameter_184: - name = "parameter_184" - shape = [192] - dtype = "float32" - min_val = float("-0.0146056") - max_val = float("0.00252242") - mean = float("-0.00513018") - std = float("0.00389486") - data = None - - -class Program_weight_tensor_parameter_185: - name = "parameter_185" - shape = [192, 192, 1, 1] - dtype = "float32" - min_val = float("-0.340895") - max_val = float("0.243469") - mean = float("-0.00395929") - std = float("0.0107136") - data = None - - -class Program_weight_tensor_parameter_186: - name = "parameter_186" - shape = [96] - dtype = "float32" - min_val = float("-1.9141") - max_val = float("0.53448") - mean = float("-0.208812") - std = float("0.434585") - data = None - - -class Program_weight_tensor_parameter_187: - name = "parameter_187" - shape = [96] - dtype = "float32" - min_val = float("0.139627") - max_val = float("3.23019") - mean = float("0.63562") - std = float("0.668608") - data = None - - -class Program_weight_tensor_parameter_188: - name = "parameter_188" - shape = [96] - dtype = "float32" - min_val = float("9.81546e-05") - max_val = float("0.00262635") - mean = float("0.000631594") - std = float("0.000470416") - data = None - - -class Program_weight_tensor_parameter_189: - name = "parameter_189" - shape = [96] - dtype = "float32" - min_val = float("-0.0508496") - max_val = float("0.0645139") - mean = float("0.0073241") - std = float("0.0226978") - data = None - - -class Program_weight_tensor_parameter_190: - name = "parameter_190" - shape = [96, 96, 1, 1] - dtype = "float32" - min_val = float("-0.0529209") - max_val = float("0.0938109") - mean = float("-0.00068654") - std = float("0.00780134") - data = None - - -class Program_weight_tensor_parameter_191: - name = "parameter_191" - shape = [96] - dtype = "float32" - min_val = float("-1.91385") - max_val = float("0.535947") - mean = float("-0.208472") - std = float("0.434758") - data = None - - -class Program_weight_tensor_parameter_192: - name = "parameter_192" - shape = [96] - dtype = "float32" - min_val = float("0.343945") - max_val = float("5.46861") - mean = float("1.08565") - std = float("0.883653") - data = None - - -class Program_weight_tensor_parameter_193: - name = "parameter_193" - shape = [96] - dtype = "float32" - min_val = float("0.000857905") - max_val = float("0.0144521") - mean = float("0.00502113") - std = float("0.0025215") - data = None - - -class Program_weight_tensor_parameter_194: - name = "parameter_194" - shape = [96] - dtype = "float32" - min_val = float("-0.134633") - max_val = float("0.206261") - mean = float("0.0108598") - std = float("0.0610727") - data = None - - -class Program_weight_tensor_parameter_195: - name = "parameter_195" - shape = [96, 96, 3, 3] - dtype = "float32" - min_val = float("-0.0417476") - max_val = float("0.0707409") - mean = float("-0.000200496") - std = float("0.00586268") - data = None - - -class Program_weight_tensor_parameter_196: - name = "parameter_196" - shape = [96] - dtype = "float32" - min_val = float("-2.46669") - max_val = float("-0.0188941") - mean = float("-1.22596") - std = float("0.444206") - data = None - - -class Program_weight_tensor_parameter_197: - name = "parameter_197" - shape = [96] - dtype = "float32" - min_val = float("0.540095") - max_val = float("1.63859") - mean = float("0.945542") - std = float("0.172479") - data = None - - -class Program_weight_tensor_parameter_198: - name = "parameter_198" - shape = [96] - dtype = "float32" - min_val = float("0.0347183") - max_val = float("0.227627") - mean = float("0.082417") - std = float("0.0336491") - data = None - - -class Program_weight_tensor_parameter_199: - name = "parameter_199" - shape = [96] - dtype = "float32" - min_val = float("-2.59922") - max_val = float("2.15076") - mean = float("-0.188655") - std = float("0.479579") - data = None - - -class Program_weight_tensor_parameter_200: - name = "parameter_200" - shape = [96, 96, 3, 3] - dtype = "float32" - min_val = float("-0.159603") - max_val = float("0.105542") - mean = float("-0.000422661") - std = float("0.00713371") - data = None - - -class Program_weight_tensor_parameter_201: - name = "parameter_201" - shape = [96] - dtype = "float32" - min_val = float("-1.38744") - max_val = float("0.563004") - mean = float("-0.132441") - std = float("0.347447") - data = None - - -class Program_weight_tensor_parameter_202: - name = "parameter_202" - shape = [96] - dtype = "float32" - min_val = float("0.0452771") - max_val = float("1.86502") - mean = float("0.460871") - std = float("0.366358") - data = None - - -class Program_weight_tensor_parameter_203: - name = "parameter_203" - shape = [96] - dtype = "float32" - min_val = float("7.20046e-05") - max_val = float("0.00271049") - mean = float("0.000780889") - std = float("0.000618051") - data = None - - -class Program_weight_tensor_parameter_204: - name = "parameter_204" - shape = [96] - dtype = "float32" - min_val = float("-0.0499407") - max_val = float("0.0480118") - mean = float("0.00767865") - std = float("0.0176588") - data = None - - -class Program_weight_tensor_parameter_205: - name = "parameter_205" - shape = [96, 96, 1, 1] - dtype = "float32" - min_val = float("-0.0484855") - max_val = float("0.0469527") - mean = float("-0.000557248") - std = float("0.00696514") - data = None - - -class Program_weight_tensor_parameter_206: - name = "parameter_206" - shape = [96] - dtype = "float32" - min_val = float("-1.38716") - max_val = float("0.565575") - mean = float("-0.131901") - std = float("0.347951") - data = None - - -class Program_weight_tensor_parameter_207: - name = "parameter_207" - shape = [96] - dtype = "float32" - min_val = float("0.373276") - max_val = float("2.32827") - mean = float("0.902354") - std = float("0.426303") - data = None - - -class Program_weight_tensor_parameter_208: - name = "parameter_208" - shape = [96] - dtype = "float32" - min_val = float("0.00300443") - max_val = float("0.0229962") - mean = float("0.00858887") - std = float("0.00415652") - data = None - - -class Program_weight_tensor_parameter_209: - name = "parameter_209" - shape = [96] - dtype = "float32" - min_val = float("-0.106265") - max_val = float("0.119063") - mean = float("0.0359685") - std = float("0.0431121") - data = None - - -class Program_weight_tensor_parameter_210: - name = "parameter_210" - shape = [96, 96, 3, 3] - dtype = "float32" - min_val = float("-0.0601192") - max_val = float("0.0479345") - mean = float("-0.000334461") - std = float("0.00588243") - data = None - - -class Program_weight_tensor_parameter_211: - name = "parameter_211" - shape = [96] - dtype = "float32" - min_val = float("-3.32059") - max_val = float("0.366033") - mean = float("-1.1777") - std = float("0.556588") - data = None - - -class Program_weight_tensor_parameter_212: - name = "parameter_212" - shape = [96] - dtype = "float32" - min_val = float("0.470758") - max_val = float("1.9813") - mean = float("1.03925") - std = float("0.238611") - data = None - - -class Program_weight_tensor_parameter_213: - name = "parameter_213" - shape = [96] - dtype = "float32" - min_val = float("0.0279332") - max_val = float("0.176668") - mean = float("0.0504175") - std = float("0.0177105") - data = None - - -class Program_weight_tensor_parameter_214: - name = "parameter_214" - shape = [96] - dtype = "float32" - min_val = float("-1.05972") - max_val = float("0.787961") - mean = float("-0.0421876") - std = float("0.278962") - data = None - - -class Program_weight_tensor_parameter_215: - name = "parameter_215" - shape = [96, 96, 3, 3] - dtype = "float32" - min_val = float("-0.152735") - max_val = float("0.158912") - mean = float("-0.000426001") - std = float("0.00705743") - data = None - - -class Program_weight_tensor_parameter_216: - name = "parameter_216" - shape = [96] - dtype = "float32" - min_val = float("-1.24949") - max_val = float("0.583942") - mean = float("-0.109112") - std = float("0.292117") - data = None - - -class Program_weight_tensor_parameter_217: - name = "parameter_217" - shape = [96] - dtype = "float32" - min_val = float("0.0224878") - max_val = float("1.27796") - mean = float("0.324443") - std = float("0.192946") - data = None - - -class Program_weight_tensor_parameter_218: - name = "parameter_218" - shape = [96] - dtype = "float32" - min_val = float("2.48592e-05") - max_val = float("0.00308798") - mean = float("0.000656812") - std = float("0.000490412") - data = None - - -class Program_weight_tensor_parameter_219: - name = "parameter_219" - shape = [96] - dtype = "float32" - min_val = float("-0.0398012") - max_val = float("0.0538955") - mean = float("0.00423704") - std = float("0.0172967") - data = None - - -class Program_weight_tensor_parameter_220: - name = "parameter_220" - shape = [96, 96, 1, 1] - dtype = "float32" - min_val = float("-0.0406747") - max_val = float("0.0494878") - mean = float("-0.000325615") - std = float("0.0071059") - data = None - - -class Program_weight_tensor_parameter_221: - name = "parameter_221" - shape = [96] - dtype = "float32" - min_val = float("-1.24929") - max_val = float("0.586311") - mean = float("-0.108658") - std = float("0.29268") - data = None - - -class Program_weight_tensor_parameter_222: - name = "parameter_222" - shape = [96] - dtype = "float32" - min_val = float("0.311326") - max_val = float("1.67043") - mean = float("0.747441") - std = float("0.257878") - data = None - - -class Program_weight_tensor_parameter_223: - name = "parameter_223" - shape = [96] - dtype = "float32" - min_val = float("0.00302674") - max_val = float("0.0184666") - mean = float("0.00857766") - std = float("0.0033254") - data = None - - -class Program_weight_tensor_parameter_224: - name = "parameter_224" - shape = [96] - dtype = "float32" - min_val = float("-0.105385") - max_val = float("0.147591") - mean = float("0.0293962") - std = float("0.0383478") - data = None - - -class Program_weight_tensor_parameter_225: - name = "parameter_225" - shape = [96, 96, 3, 3] - dtype = "float32" - min_val = float("-0.0728298") - max_val = float("0.065903") - mean = float("-0.000300919") - std = float("0.00597289") - data = None - - -class Program_weight_tensor_parameter_226: - name = "parameter_226" - shape = [96] - dtype = "float32" - min_val = float("-3.5826") - max_val = float("0.291706") - mean = float("-1.12744") - std = float("0.572685") - data = None - - -class Program_weight_tensor_parameter_227: - name = "parameter_227" - shape = [96] - dtype = "float32" - min_val = float("0.511064") - max_val = float("2.19222") - mean = float("1.05217") - std = float("0.238287") - data = None - - -class Program_weight_tensor_parameter_228: - name = "parameter_228" - shape = [96] - dtype = "float32" - min_val = float("0.021508") - max_val = float("0.0772456") - mean = float("0.0390789") - std = float("0.00924531") - data = None - - -class Program_weight_tensor_parameter_229: - name = "parameter_229" - shape = [96] - dtype = "float32" - min_val = float("-0.95569") - max_val = float("0.64461") - mean = float("-0.0425366") - std = float("0.216225") - data = None - - -class Program_weight_tensor_parameter_230: - name = "parameter_230" - shape = [96, 96, 3, 3] - dtype = "float32" - min_val = float("-0.0984925") - max_val = float("0.137263") - mean = float("-0.000483231") - std = float("0.00714155") - data = None - - -class Program_weight_tensor_parameter_231: - name = "parameter_231" - shape = [96] - dtype = "float32" - min_val = float("-0.891765") - max_val = float("0.530315") - mean = float("-0.160042") - std = float("0.28168") - data = None - - -class Program_weight_tensor_parameter_232: - name = "parameter_232" - shape = [96] - dtype = "float32" - min_val = float("0.0202036") - max_val = float("1.40549") - mean = float("0.324747") - std = float("0.213549") - data = None - - -class Program_weight_tensor_parameter_233: - name = "parameter_233" - shape = [96] - dtype = "float32" - min_val = float("5.1999e-05") - max_val = float("0.00308025") - mean = float("0.000681748") - std = float("0.000468256") - data = None - - -class Program_weight_tensor_parameter_234: - name = "parameter_234" - shape = [96] - dtype = "float32" - min_val = float("-0.0356116") - max_val = float("0.0543912") - mean = float("0.00763867") - std = float("0.0160098") - data = None - - -class Program_weight_tensor_parameter_235: - name = "parameter_235" - shape = [96, 96, 1, 1] - dtype = "float32" - min_val = float("-0.050403") - max_val = float("0.0470333") - mean = float("-0.000602859") - std = float("0.00719125") - data = None - - -class Program_weight_tensor_parameter_236: - name = "parameter_236" - shape = [96] - dtype = "float32" - min_val = float("-0.891522") - max_val = float("0.532005") - mean = float("-0.15962") - std = float("0.282144") - data = None - - -class Program_weight_tensor_parameter_237: - name = "parameter_237" - shape = [96] - dtype = "float32" - min_val = float("0.170998") - max_val = float("1.78064") - mean = float("0.708933") - std = float("0.284476") - data = None - - -class Program_weight_tensor_parameter_238: - name = "parameter_238" - shape = [96] - dtype = "float32" - min_val = float("0.00181135") - max_val = float("0.0235388") - mean = float("0.00884351") - std = float("0.00329263") - data = None - - -class Program_weight_tensor_parameter_239: - name = "parameter_239" - shape = [96] - dtype = "float32" - min_val = float("-0.0317818") - max_val = float("0.148669") - mean = float("0.0443214") - std = float("0.0385248") - data = None - - -class Program_weight_tensor_parameter_240: - name = "parameter_240" - shape = [96, 96, 3, 3] - dtype = "float32" - min_val = float("-0.0673552") - max_val = float("0.0665555") - mean = float("-0.000406403") - std = float("0.00600122") - data = None - - -class Program_weight_tensor_parameter_241: - name = "parameter_241" - shape = [96] - dtype = "float32" - min_val = float("-2.65797") - max_val = float("0.0644665") - mean = float("-1.06329") - std = float("0.488575") - data = None - - -class Program_weight_tensor_parameter_242: - name = "parameter_242" - shape = [96] - dtype = "float32" - min_val = float("0.510122") - max_val = float("1.73722") - mean = float("1.01545") - std = float("0.193669") - data = None - - -class Program_weight_tensor_parameter_243: - name = "parameter_243" - shape = [96] - dtype = "float32" - min_val = float("0.0172563") - max_val = float("0.0595435") - mean = float("0.0301509") - std = float("0.00732214") - data = None - - -class Program_weight_tensor_parameter_244: - name = "parameter_244" - shape = [96] - dtype = "float32" - min_val = float("-0.801324") - max_val = float("0.759004") - mean = float("-0.0646748") - std = float("0.211257") - data = None - - -class Program_weight_tensor_parameter_245: - name = "parameter_245" - shape = [96, 96, 3, 3] - dtype = "float32" - min_val = float("-0.0799583") - max_val = float("0.12863") - mean = float("-0.000463251") - std = float("0.00696947") - data = None - - -class Program_weight_tensor_parameter_246: - name = "parameter_246" - shape = [96] - dtype = "float32" - min_val = float("-0.979363") - max_val = float("0.488329") - mean = float("-0.1357") - std = float("0.278693") - data = None - - -class Program_weight_tensor_parameter_247: - name = "parameter_247" - shape = [96] - dtype = "float32" - min_val = float("0.0499672") - max_val = float("1.15174") - mean = float("0.296075") - std = float("0.172795") - data = None - - -class Program_weight_tensor_parameter_248: - name = "parameter_248" - shape = [96] - dtype = "float32" - min_val = float("0.000124111") - max_val = float("0.00434228") - mean = float("0.00108239") - std = float("0.000694533") - data = None - - -class Program_weight_tensor_parameter_249: - name = "parameter_249" - shape = [96] - dtype = "float32" - min_val = float("-0.0430023") - max_val = float("0.0614512") - mean = float("0.00682349") - std = float("0.019208") - data = None - - -class Program_weight_tensor_parameter_250: - name = "parameter_250" - shape = [96, 96, 1, 1] - dtype = "float32" - min_val = float("-0.0730409") - max_val = float("0.0734237") - mean = float("-0.000668194") - std = float("0.00816827") - data = None - - -class Program_weight_tensor_parameter_251: - name = "parameter_251" - shape = [96] - dtype = "float32" - min_val = float("-0.979598") - max_val = float("0.490087") - mean = float("-0.135308") - std = float("0.279185") - data = None - - -class Program_weight_tensor_parameter_252: - name = "parameter_252" - shape = [96] - dtype = "float32" - min_val = float("0.240111") - max_val = float("1.69891") - mean = float("0.604647") - std = float("0.228294") - data = None - - -class Program_weight_tensor_parameter_253: - name = "parameter_253" - shape = [96] - dtype = "float32" - min_val = float("0.00464956") - max_val = float("0.0447737") - mean = float("0.0124628") - std = float("0.00526411") - data = None - - -class Program_weight_tensor_parameter_254: - name = "parameter_254" - shape = [96] - dtype = "float32" - min_val = float("-0.088988") - max_val = float("0.163347") - mean = float("0.0332765") - std = float("0.0457333") - data = None - - -class Program_weight_tensor_parameter_255: - name = "parameter_255" - shape = [96, 96, 3, 3] - dtype = "float32" - min_val = float("-0.070586") - max_val = float("0.053917") - mean = float("-0.000353734") - std = float("0.00603503") - data = None - - -class Program_weight_tensor_parameter_256: - name = "parameter_256" - shape = [96] - dtype = "float32" - min_val = float("-3.46749") - max_val = float("0.20134") - mean = float("-1.00429") - std = float("0.548683") - data = None - - -class Program_weight_tensor_parameter_257: - name = "parameter_257" - shape = [96] - dtype = "float32" - min_val = float("0.68469") - max_val = float("2.50521") - mean = float("1.07421") - std = float("0.212064") - data = None - - -class Program_weight_tensor_parameter_258: - name = "parameter_258" - shape = [96] - dtype = "float32" - min_val = float("0.0128335") - max_val = float("0.0562505") - mean = float("0.0252273") - std = float("0.00835494") - data = None - - -class Program_weight_tensor_parameter_259: - name = "parameter_259" - shape = [96] - dtype = "float32" - min_val = float("-0.594873") - max_val = float("0.694291") - mean = float("-0.0599848") - std = float("0.200504") - data = None - - -class Program_weight_tensor_parameter_260: - name = "parameter_260" - shape = [96, 96, 3, 3] - dtype = "float32" - min_val = float("-0.0875016") - max_val = float("0.0958638") - mean = float("-0.000393602") - std = float("0.00713622") - data = None - - -class Program_weight_tensor_parameter_261: - name = "parameter_261" - shape = [96] - dtype = "float32" - min_val = float("-0.623249") - max_val = float("0.450355") - mean = float("-0.0811173") - std = float("0.25665") - data = None - - -class Program_weight_tensor_parameter_262: - name = "parameter_262" - shape = [96] - dtype = "float32" - min_val = float("0.0905173") - max_val = float("1.30172") - mean = float("0.309137") - std = float("0.196898") - data = None - - -class Program_weight_tensor_parameter_263: - name = "parameter_263" - shape = [96] - dtype = "float32" - min_val = float("0.000482307") - max_val = float("0.0212544") - mean = float("0.00391036") - std = float("0.00335167") - data = None - - -class Program_weight_tensor_parameter_264: - name = "parameter_264" - shape = [96] - dtype = "float32" - min_val = float("-0.0380137") - max_val = float("0.0274317") - mean = float("0.000597392") - std = float("0.0117867") - data = None - - -class Program_weight_tensor_parameter_265: - name = "parameter_265" - shape = [96, 96, 1, 1] - dtype = "float32" - min_val = float("-0.0967686") - max_val = float("0.0726096") - mean = float("-0.00111676") - std = float("0.00943776") - data = None - - -class Program_weight_tensor_parameter_266: - name = "parameter_266" - shape = [96] - dtype = "float32" - min_val = float("-0.62253") - max_val = float("0.451504") - mean = float("-0.0806935") - std = float("0.256953") - data = None - - -class Program_weight_tensor_parameter_267: - name = "parameter_267" - shape = [96] - dtype = "float32" - min_val = float("0.210918") - max_val = float("1.42997") - mean = float("0.527932") - std = float("0.258611") - data = None - - -class Program_weight_tensor_parameter_268: - name = "parameter_268" - shape = [96] - dtype = "float32" - min_val = float("0.0108854") - max_val = float("0.101724") - mean = float("0.0340185") - std = float("0.0173202") - data = None - - -class Program_weight_tensor_parameter_269: - name = "parameter_269" - shape = [96] - dtype = "float32" - min_val = float("-0.10483") - max_val = float("0.0991255") - mean = float("-0.00462957") - std = float("0.0392523") - data = None - - -class Program_weight_tensor_parameter_270: - name = "parameter_270" - shape = [96, 96, 3, 3] - dtype = "float32" - min_val = float("-0.0996365") - max_val = float("0.0540305") - mean = float("-0.00042977") - std = float("0.00592197") - data = None - - -class Program_weight_tensor_parameter_271: - name = "parameter_271" - shape = [96] - dtype = "float32" - min_val = float("-2.4099") - max_val = float("0.510062") - mean = float("-0.827896") - std = float("0.467957") - data = None - - -class Program_weight_tensor_parameter_272: - name = "parameter_272" - shape = [96] - dtype = "float32" - min_val = float("0.855439") - max_val = float("2.18052") - mean = float("1.27541") - std = float("0.20896") - data = None - - -class Program_weight_tensor_parameter_273: - name = "parameter_273" - shape = [96] - dtype = "float32" - min_val = float("0.0103972") - max_val = float("0.0527158") - mean = float("0.0209256") - std = float("0.00862648") - data = None - - -class Program_weight_tensor_parameter_274: - name = "parameter_274" - shape = [96] - dtype = "float32" - min_val = float("-0.780321") - max_val = float("0.470817") - mean = float("-0.061274") - std = float("0.196346") - data = None - - -class Program_weight_tensor_parameter_275: - name = "parameter_275" - shape = [96, 96, 3, 3] - dtype = "float32" - min_val = float("-0.154701") - max_val = float("0.153806") - mean = float("-0.00026052") - std = float("0.00735431") - data = None - - -class Program_weight_tensor_parameter_276: - name = "parameter_276" - shape = [96] - dtype = "float32" - min_val = float("-3.15956") - max_val = float("1.89061") - mean = float("0.502181") - std = float("0.861277") - data = None - - -class Program_weight_tensor_parameter_277: - name = "parameter_277" - shape = [96] - dtype = "float32" - min_val = float("0.209789") - max_val = float("2.62802") - mean = float("0.557131") - std = float("0.318659") - data = None - - -class Program_weight_tensor_parameter_278: - name = "parameter_278" - shape = [96] - dtype = "float32" - min_val = float("0.00944476") - max_val = float("0.145226") - mean = float("0.0342646") - std = float("0.0234271") - data = None - - -class Program_weight_tensor_parameter_279: - name = "parameter_279" - shape = [96] - dtype = "float32" - min_val = float("-0.271688") - max_val = float("0.303077") - mean = float("-0.0264941") - std = float("0.0868152") - data = None - - -class Program_weight_tensor_parameter_280: - name = "parameter_280" - shape = [96, 192, 1, 1] - dtype = "float32" - min_val = float("-0.190092") - max_val = float("0.235795") - mean = float("-0.00054682") - std = float("0.0152601") - data = None - - -class Program_weight_tensor_parameter_281: - name = "parameter_281" - shape = [96] - dtype = "float32" - min_val = float("-4.92412") - max_val = float("1.57941") - mean = float("0.384226") - std = float("1.04886") - data = None - - -class Program_weight_tensor_parameter_282: - name = "parameter_282" - shape = [96] - dtype = "float32" - min_val = float("0.411425") - max_val = float("6.77791") - mean = float("1.69479") - std = float("1.30749") - data = None - - -class Program_weight_tensor_parameter_283: - name = "parameter_283" - shape = [96] - dtype = "float32" - min_val = float("0.0059326") - max_val = float("0.187703") - mean = float("0.0313027") - std = float("0.0270184") - data = None - - -class Program_weight_tensor_parameter_284: - name = "parameter_284" - shape = [96] - dtype = "float32" - min_val = float("-0.122136") - max_val = float("0.395194") - mean = float("0.0355431") - std = float("0.0933339") - data = None - - -class Program_weight_tensor_parameter_285: - name = "parameter_285" - shape = [96, 192, 1, 1] - dtype = "float32" - min_val = float("-0.115428") - max_val = float("0.143096") - mean = float("0.000288353") - std = float("0.0138526") - data = None - - -class Program_weight_tensor_parameter_286: - name = "parameter_286" - shape = [192] - dtype = "float32" - min_val = float("-2.27512") - max_val = float("1.75006") - mean = float("-0.125702") - std = float("0.740468") - data = None - - -class Program_weight_tensor_parameter_287: - name = "parameter_287" - shape = [192] - dtype = "float32" - min_val = float("0.632726") - max_val = float("2.96908") - mean = float("1.08749") - std = float("0.283555") - data = None - - -class Program_weight_tensor_parameter_288: - name = "parameter_288" - shape = [192] - dtype = "float32" - min_val = float("0.0130979") - max_val = float("0.31876") - mean = float("0.04291") - std = float("0.035214") - data = None - - -class Program_weight_tensor_parameter_289: - name = "parameter_289" - shape = [192] - dtype = "float32" - min_val = float("-0.47354") - max_val = float("0.278468") - mean = float("-0.0584653") - std = float("0.115063") - data = None - - -class Program_weight_tensor_parameter_290: - name = "parameter_290" - shape = [192, 128, 3, 3] - dtype = "float32" - min_val = float("-0.0811233") - max_val = float("0.11238") - mean = float("-0.000121273") - std = float("0.00716338") - data = None - - -class Program_weight_tensor_parameter_291: - name = "parameter_291" - shape = [128] - dtype = "float32" - min_val = float("-2.81253") - max_val = float("1.96258") - mean = float("-0.709313") - std = float("0.64886") - data = None - - -class Program_weight_tensor_parameter_292: - name = "parameter_292" - shape = [128] - dtype = "float32" - min_val = float("0.302011") - max_val = float("2.86022") - mean = float("1.01859") - std = float("0.279425") - data = None - - -class Program_weight_tensor_parameter_293: - name = "parameter_293" - shape = [128] - dtype = "float32" - min_val = float("0.000689708") - max_val = float("0.0143167") - mean = float("0.00379586") - std = float("0.00196197") - data = None - - -class Program_weight_tensor_parameter_294: - name = "parameter_294" - shape = [128] - dtype = "float32" - min_val = float("-0.240616") - max_val = float("0.230863") - mean = float("0.00348518") - std = float("0.0801109") - data = None - - -class Program_weight_tensor_parameter_295: - name = "parameter_295" - shape = [128, 96, 1, 1] - dtype = "float32" - min_val = float("-0.16828") - max_val = float("0.191318") - mean = float("-0.00143145") - std = float("0.0216253") - data = None - - -class Program_weight_tensor_parameter_296: - name = "parameter_296" - shape = [96] - dtype = "float32" - min_val = float("-0.0182017") - max_val = float("-0.00100735") - mean = float("-0.00761377") - std = float("0.00459165") - data = None - - -class Program_weight_tensor_parameter_297: - name = "parameter_297" - shape = [96, 96, 1, 1] - dtype = "float32" - min_val = float("-0.297058") - max_val = float("0.124247") - mean = float("-0.00811798") - std = float("0.0180434") - data = None - - -class Program_weight_tensor_parameter_298: - name = "parameter_298" - shape = [48] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_299: - name = "parameter_299" - shape = [48] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_300: - name = "parameter_300" - shape = [48] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_301: - name = "parameter_301" - shape = [48] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_302: - name = "parameter_302" - shape = [48, 48, 1, 1] - dtype = "float32" - min_val = float("-0.0524219") - max_val = float("0.062819") - mean = float("-0.00145834") - std = float("0.0124603") - data = None - - -class Program_weight_tensor_parameter_303: - name = "parameter_303" - shape = [48] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_304: - name = "parameter_304" - shape = [48] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_305: - name = "parameter_305" - shape = [48] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_306: - name = "parameter_306" - shape = [48] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_307: - name = "parameter_307" - shape = [48, 48, 3, 3] - dtype = "float32" - min_val = float("-0.053396") - max_val = float("0.0780475") - mean = float("-0.000432103") - std = float("0.0105215") - data = None - - -class Program_weight_tensor_parameter_308: - name = "parameter_308" - shape = [48] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_309: - name = "parameter_309" - shape = [48] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_310: - name = "parameter_310" - shape = [48] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_311: - name = "parameter_311" - shape = [48] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_312: - name = "parameter_312" - shape = [48, 48, 3, 3] - dtype = "float32" - min_val = float("-0.0907736") - max_val = float("0.0889891") - mean = float("-0.000674195") - std = float("0.0115766") - data = None - - -class Program_weight_tensor_parameter_313: - name = "parameter_313" - shape = [48] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_314: - name = "parameter_314" - shape = [48] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_315: - name = "parameter_315" - shape = [48] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_316: - name = "parameter_316" - shape = [48] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_317: - name = "parameter_317" - shape = [48, 48, 1, 1] - dtype = "float32" - min_val = float("-0.0701343") - max_val = float("0.0744403") - mean = float("-0.000969115") - std = float("0.0132523") - data = None - - -class Program_weight_tensor_parameter_318: - name = "parameter_318" - shape = [48] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_319: - name = "parameter_319" - shape = [48] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_320: - name = "parameter_320" - shape = [48] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_321: - name = "parameter_321" - shape = [48] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_322: - name = "parameter_322" - shape = [48, 48, 3, 3] - dtype = "float32" - min_val = float("-0.0625249") - max_val = float("0.0628193") - mean = float("-0.000704405") - std = float("0.010522") - data = None - - -class Program_weight_tensor_parameter_323: - name = "parameter_323" - shape = [48] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_324: - name = "parameter_324" - shape = [48] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_325: - name = "parameter_325" - shape = [48] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_326: - name = "parameter_326" - shape = [48] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_327: - name = "parameter_327" - shape = [48, 48, 3, 3] - dtype = "float32" - min_val = float("-0.105534") - max_val = float("0.0876318") - mean = float("-0.000291303") - std = float("0.0118198") - data = None - - -class Program_weight_tensor_parameter_328: - name = "parameter_328" - shape = [48] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_329: - name = "parameter_329" - shape = [48] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_330: - name = "parameter_330" - shape = [48] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_331: - name = "parameter_331" - shape = [48] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_332: - name = "parameter_332" - shape = [48, 48, 1, 1] - dtype = "float32" - min_val = float("-0.0927544") - max_val = float("0.067179") - mean = float("-0.00167319") - std = float("0.0164656") - data = None - - -class Program_weight_tensor_parameter_333: - name = "parameter_333" - shape = [48] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_334: - name = "parameter_334" - shape = [48] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_335: - name = "parameter_335" - shape = [48] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_336: - name = "parameter_336" - shape = [48] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_337: - name = "parameter_337" - shape = [48, 48, 3, 3] - dtype = "float32" - min_val = float("-0.0662936") - max_val = float("0.0926268") - mean = float("-0.000546134") - std = float("0.0110591") - data = None - - -class Program_weight_tensor_parameter_338: - name = "parameter_338" - shape = [48] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_339: - name = "parameter_339" - shape = [48] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_340: - name = "parameter_340" - shape = [48] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_341: - name = "parameter_341" - shape = [48] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_342: - name = "parameter_342" - shape = [48, 48, 3, 3] - dtype = "float32" - min_val = float("-0.115861") - max_val = float("0.0843934") - mean = float("-0.000390165") - std = float("0.0126271") - data = None - - -class Program_weight_tensor_parameter_343: - name = "parameter_343" - shape = [48] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_344: - name = "parameter_344" - shape = [48] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_345: - name = "parameter_345" - shape = [48] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_346: - name = "parameter_346" - shape = [48] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_347: - name = "parameter_347" - shape = [48, 96, 1, 1] - dtype = "float32" - min_val = float("-0.156722") - max_val = float("0.12438") - mean = float("-0.00240073") - std = float("0.0227151") - data = None - - -class Program_weight_tensor_parameter_348: - name = "parameter_348" - shape = [48] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_349: - name = "parameter_349" - shape = [48] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_350: - name = "parameter_350" - shape = [48] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_351: - name = "parameter_351" - shape = [48] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_352: - name = "parameter_352" - shape = [48, 96, 1, 1] - dtype = "float32" - min_val = float("-0.133366") - max_val = float("0.190723") - mean = float("-0.000461332") - std = float("0.0215494") - data = None - - -class Program_weight_tensor_parameter_353: - name = "parameter_353" - shape = [96] - dtype = "float32" - min_val = float("-3.40388") - max_val = float("3.27594") - mean = float("0.331") - std = float("1.14502") - data = None - - -class Program_weight_tensor_parameter_354: - name = "parameter_354" - shape = [96] - dtype = "float32" - min_val = float("0.861639") - max_val = float("4.91749") - mean = float("1.91516") - std = float("0.75496") - data = None - - -class Program_weight_tensor_parameter_355: - name = "parameter_355" - shape = [96] - dtype = "float32" - min_val = float("0.674644") - max_val = float("20.4484") - mean = float("2.3946") - std = float("2.42082") - data = None - - -class Program_weight_tensor_parameter_356: - name = "parameter_356" - shape = [96] - dtype = "float32" - min_val = float("-1.41455") - max_val = float("1.80091") - mean = float("-0.328594") - std = float("0.607956") - data = None - - -class Program_weight_tensor_parameter_357: - name = "parameter_357" - shape = [96, 64, 3, 3] - dtype = "float32" - min_val = float("-0.115845") - max_val = float("0.115419") - mean = float("-0.000438744") - std = float("0.0120833") - data = None - - -class Program_weight_tensor_parameter_358: - name = "parameter_358" - shape = [64] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_359: - name = "parameter_359" - shape = [64] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_360: - name = "parameter_360" - shape = [64] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_361: - name = "parameter_361" - shape = [64] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_362: - name = "parameter_362" - shape = [64, 32, 3, 3] - dtype = "float32" - min_val = float("-0.153743") - max_val = float("0.135272") - mean = float("-0.000740633") - std = float("0.0191711") - data = None - - -class Program_weight_tensor_parameter_363: - name = "parameter_363" - shape = [32] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_364: - name = "parameter_364" - shape = [32] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_365: - name = "parameter_365" - shape = [32] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_366: - name = "parameter_366" - shape = [32] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_367: - name = "parameter_367" - shape = [32, 32, 3, 3] - dtype = "float32" - min_val = float("-0.307002") - max_val = float("0.202588") - mean = float("-4.43961e-05") - std = float("0.025069") - data = None - - -class Program_weight_tensor_parameter_368: - name = "parameter_368" - shape = [32] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_369: - name = "parameter_369" - shape = [32] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_370: - name = "parameter_370" - shape = [32] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_371: - name = "parameter_371" - shape = [32] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_372: - name = "parameter_372" - shape = [32, 3, 3, 3] - dtype = "float32" - min_val = float("-0.297631") - max_val = float("0.278985") - mean = float("-0.00146872") - std = float("0.0683342") - data = None diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_8/graph_hash.txt b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_8/graph_hash.txt index 1ba099c96..d26369202 100644 --- a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_8/graph_hash.txt +++ b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_8/graph_hash.txt @@ -1 +1 @@ -a1e871dca6015fd870e153211f3cd48512ab629d616889d648d8f93c88df3e51 \ No newline at end of file +c570d43f53acac4f3957a2ca875002b813fd4f0945c65adb5e86d7b2292f59e3 \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_8/input_meta.py b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_8/input_meta.py index 0488f0946..59f05b8d6 100644 --- a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_8/input_meta.py +++ b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_8/input_meta.py @@ -1,108 +1,68 @@ class Program_weight_tensor_data_0: name = "data_0" - shape = [1, 12, 27216] - dtype = "float32" - max_val = float("1.0") - mean = float("0.000453165") - std = float("0.0212828") + shape = [1, 24276] + dtype = "bool" + min_val = 0 + max_val = 2 data = None class Program_weight_tensor_data_1: name = "data_1" - shape = [1, 1] - dtype = "int32" - data = [0] + shape = [1, 24276, 4] + dtype = "float32" + min_val = float("-8.51911") + max_val = float("141.539") + mean = float("59.1174") + std = float("39.293") + data = None class Program_weight_tensor_data_2: name = "data_2" - shape = [1, 12, 1] - dtype = "int32" - data = [4, 4, 3, 3, 3, 4, 4, 4, 3, 3, 3, 3] + shape = [1, 24276, 4] + dtype = "float32" + min_val = float("11.9244") + max_val = float("131.375") + mean = float("50.6359") + std = float("16.0055") + data = None class Program_weight_tensor_data_3: name = "data_3" - shape = [1, 27216] + shape = [1, 24276, 10] dtype = "float32" - max_val = float("1.0") - mean = float("0.00543798") - std = float("0.0735419") + max_val = float("0.981539") + mean = float("0.000708745") + std = float("0.0222415") data = None class Program_weight_tensor_data_4: name = "data_4" - shape = [1, 12, 4] + shape = [] dtype = "float32" - data = [ - 810.02, - 1015.02, - 826.828, - 1104.59, - 803.556, - 862.244, - 821.01, - 939.512, - 685.253, - 848.195, - 696.242, - 913.171, - 707.232, - 783.219, - 720.162, - 865.756, - 705.293, - 614.634, - 718.869, - 688.39, - 622.545, - 934.244, - 636.768, - 1037.85, - 625.778, - 567.219, - 640.646, - 632.195, - 605.091, - 567.219, - 617.374, - 632.195, - 538.505, - 763.902, - 548.849, - 864.0, - 536.566, - 567.219, - 550.788, - 621.659, - 513.939, - 978.146, - 529.455, - 1074.73, - 789.98, - 570.732, - 806.788, - 637.463, - ] + data = [172.055] class Program_weight_tensor_data_5: name = "data_5" - shape = [1, 12, 27216] + shape = [1, 24276, 88] dtype = "float32" - max_val = float("0.365907") - mean = float("5.42289e-05") - std = float("0.00308233") + min_val = float("-3.36394") + max_val = float("13.3402") + mean = float("0.828078") + std = float("1.48853") data = None class Program_weight_tensor_data_6: name = "data_6" - shape = [1, 12, 27216] + shape = [24276, 2] dtype = "float32" - max_val = float("0.95733") - mean = float("0.00162842") - std = float("0.0295307") + min_val = float("0.5") + max_val = float("135.5") + mean = float("59.0952") + std = float("38.9487") data = None diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_8/model.py b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_8/model.py index 8a1f2862b..bb7813590 100644 --- a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_8/model.py +++ b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_8/model.py @@ -6,187 +6,509 @@ def __init__(self): super().__init__() def forward(self, data_0, data_1, data_2, data_3, data_4, data_5, data_6): - # pd_op.full: (1xi64) <- () + # pd_op.cast: (1x24276xi32) <- (1x24276xb) + cast_0 = paddle._C_ops.cast(data_0, paddle.int32) + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_0 = [-1] + + # pd_op.assign: (1xi64) <- (1xi64) + assign_0 = full_int_array_0 + + # pd_op.assign: (1xi64) <- (1xi64) + assign_1 = full_int_array_0 + + # pd_op.assign: (1xi64) <- (1xi64) + assign_2 = full_int_array_0 + + # pd_op.unsqueeze: (1x24276x1xi32) <- (1x24276xi32, 1xi64) + unsqueeze_0 = paddle._C_ops.unsqueeze(cast_0, full_int_array_0) + del cast_0 + + # pd_op.full_int_array: (3xi64) <- () + full_int_array_1 = [1, 1, 4] + + # pd_op.tile: (1x24276x4xi32) <- (1x24276x1xi32, 3xi64) + tile_0 = paddle._C_ops.tile(unsqueeze_0, full_int_array_1) + del full_int_array_1, unsqueeze_0 + + # pd_op.cast: (1x24276x4xb) <- (1x24276x4xi32) + cast_1 = paddle._C_ops.cast(tile_0, paddle.bool) + del tile_0 + + # pd_op.masked_select: (-1xf32) <- (1x24276x4xf32, 1x24276x4xb) + masked_select_0 = paddle._C_ops.masked_select(data_1, cast_1) + del data_1 + + # pd_op.full_int_array: (2xi64) <- () + full_int_array_2 = [-1, 4] + + # pd_op.reshape: (-1x4xf32) <- (-1xf32, 2xi64) + reshape_0 = paddle._C_ops.reshape(masked_select_0, full_int_array_2) + + # pd_op.masked_select: (-1xf32) <- (1x24276x4xf32, 1x24276x4xb) + masked_select_1 = paddle._C_ops.masked_select(data_2, cast_1) + + # pd_op.reshape: (-1x4xf32) <- (-1xf32, 2xi64) + reshape_1 = paddle._C_ops.reshape(masked_select_1, full_int_array_2) + del masked_select_1 + + # pd_op.sum: (1x24276xf32) <- (1x24276x10xf32, 1xi64) + sum_0 = paddle._C_ops.sum(data_3, full_int_array_0, None, False) + del data_3 + + # pd_op.masked_select: (-1xf32) <- (1x24276xf32, 1x24276xb) + masked_select_2 = paddle._C_ops.masked_select(sum_0, data_0) + del sum_0 + + # pd_op.unsqueeze: (-1x1xf32) <- (-1xf32, 1xi64) + unsqueeze_1 = paddle._C_ops.unsqueeze(masked_select_2, full_int_array_0) + del masked_select_2 + + # pd_op.subtract: (-1x4xf32) <- (-1x4xf32, -1x4xf32) + subtract_0 = paddle._C_ops.subtract(reshape_0, reshape_1) + + # pd_op.abs: (-1x4xf32) <- (-1x4xf32) + abs_0 = paddle._C_ops.abs(subtract_0) + + # pd_op.mean_all: (xf32) <- (-1x4xf32) + mean_all_0 = paddle._C_ops.mean_all(abs_0) + + # pd_op.full: (1xi32) <- () full_0 = paddle._C_ops.full( - [1], float("-2"), paddle.int64, paddle.core.CPUPlace() + [1], float("1"), paddle.int32, paddle.core.CPUPlace() ) - # pd_op.argmax: (1x27216xi64) <- (1x12x27216xf32, 1xi64) - argmax_0 = paddle._C_ops.argmax(data_0, full_0, False, False, paddle.int64) - del full_0 + # pd_op.split_with_num: ([-1x1xf32, -1x1xf32, -1x1xf32, -1x1xf32]) <- (-1x4xf32, 1xi32) + split_with_num_0 = paddle._C_ops.split_with_num(reshape_0, 4, full_0) + + # builtin.split: (-1x1xf32, -1x1xf32, -1x1xf32, -1x1xf32) <- ([-1x1xf32, -1x1xf32, -1x1xf32, -1x1xf32]) + ( + split_0, + split_1, + split_2, + split_3, + ) = split_with_num_0 + del split_with_num_0 + + # pd_op.split_with_num: ([-1x1xf32, -1x1xf32, -1x1xf32, -1x1xf32]) <- (-1x4xf32, 1xi32) + split_with_num_1 = paddle._C_ops.split_with_num(reshape_1, 4, full_0) + + # builtin.split: (-1x1xf32, -1x1xf32, -1x1xf32, -1x1xf32) <- ([-1x1xf32, -1x1xf32, -1x1xf32, -1x1xf32]) + ( + split_4, + split_5, + split_6, + split_7, + ) = split_with_num_1 + del split_with_num_1 + + # pd_op.maximum: (-1x1xf32) <- (-1x1xf32, -1x1xf32) + maximum_0 = paddle._C_ops.maximum(split_0, split_4) + + # pd_op.maximum: (-1x1xf32) <- (-1x1xf32, -1x1xf32) + maximum_1 = paddle._C_ops.maximum(split_1, split_5) + + # pd_op.minimum: (-1x1xf32) <- (-1x1xf32, -1x1xf32) + minimum_0 = paddle._C_ops.minimum(split_2, split_6) + + # pd_op.minimum: (-1x1xf32) <- (-1x1xf32, -1x1xf32) + minimum_1 = paddle._C_ops.minimum(split_3, split_7) + + # pd_op.subtract: (-1x1xf32) <- (-1x1xf32, -1x1xf32) + subtract_1 = paddle._C_ops.subtract(minimum_0, maximum_0) # pd_op.full: (1xf32) <- () full_1 = paddle._C_ops.full( - [1], float("12"), paddle.float32, paddle.core.CPUPlace() + [1], float("0"), paddle.float32, paddle.core.CPUPlace() ) - # pd_op.scale: (1x1xi32) <- (1x1xi32, 1xf32) - scale_0 = paddle._C_ops.scale(data_1, full_1, float("0"), True) - del data_1, full_1 + # pd_op.assign: (1xf32) <- (1xf32) + assign_3 = full_1 - # pd_op.cast: (1x1xi64) <- (1x1xi32) - cast_0 = paddle._C_ops.cast(scale_0, paddle.int64) - del scale_0 + # pd_op.full: (1xf32) <- () + full_2 = paddle._C_ops.full( + [1], float("3.40282e+38"), paddle.float32, paddle.core.CPUPlace() + ) - # pd_op.add: (1x27216xi64) <- (1x27216xi64, 1x1xi64) - add_0 = paddle._C_ops.add(argmax_0, cast_0) - del argmax_0, cast_0 + # pd_op.assign: (1xf32) <- (1xf32) + assign_4 = full_2 - # pd_op.flatten: (12xi32) <- (1x12x1xi32) - flatten_0 = paddle._C_ops.flatten(data_2, 0, 2) - del data_2 + # pd_op.clip: (-1x1xf32) <- (-1x1xf32, 1xf32, 1xf32) + clip_0 = paddle._C_ops.clip(subtract_1, full_1, full_2) - # pd_op.flatten: (27216xi64) <- (1x27216xi64) - flatten_1 = paddle._C_ops.flatten(add_0, 0, 1) - del add_0 + # pd_op.subtract: (-1x1xf32) <- (-1x1xf32, -1x1xf32) + subtract_2 = paddle._C_ops.subtract(minimum_1, maximum_1) - # pd_op.full: (1xi32) <- () - full_2 = paddle._C_ops.full( - [1], float("0"), paddle.int32, paddle.core.CPUPlace() - ) + # pd_op.clip: (-1x1xf32) <- (-1x1xf32, 1xf32, 1xf32) + clip_1 = paddle._C_ops.clip(subtract_2, full_1, full_2) - # pd_op.gather: (27216xi32) <- (12xi32, 27216xi64, 1xi32) - gather_0 = paddle._C_ops.gather(flatten_0, flatten_1, full_2) - del flatten_0 + # pd_op.multiply: (-1x1xf32) <- (-1x1xf32, -1x1xf32) + multiply_0 = paddle._C_ops.multiply(clip_0, clip_1) - # pd_op.full_int_array: (2xi64) <- () - full_int_array_0 = [1, 27216] + # pd_op.subtract: (-1x1xf32) <- (-1x1xf32, -1x1xf32) + subtract_3 = paddle._C_ops.subtract(split_2, split_0) + + # pd_op.subtract: (-1x1xf32) <- (-1x1xf32, -1x1xf32) + subtract_4 = paddle._C_ops.subtract(split_3, split_1) + + # pd_op.multiply: (-1x1xf32) <- (-1x1xf32, -1x1xf32) + multiply_1 = paddle._C_ops.multiply(subtract_3, subtract_4) + + # pd_op.subtract: (-1x1xf32) <- (-1x1xf32, -1x1xf32) + subtract_5 = paddle._C_ops.subtract(split_6, split_4) + + # pd_op.subtract: (-1x1xf32) <- (-1x1xf32, -1x1xf32) + subtract_6 = paddle._C_ops.subtract(split_7, split_5) + + # pd_op.multiply: (-1x1xf32) <- (-1x1xf32, -1x1xf32) + multiply_2 = paddle._C_ops.multiply(subtract_5, subtract_6) + del subtract_5, subtract_6 - # pd_op.reshape: (1x27216xi32) <- (27216xi32, 2xi64) - reshape_1 = paddle._C_ops.reshape(gather_0, full_int_array_0) - del full_int_array_0, gather_0 + # pd_op.add: (-1x1xf32) <- (-1x1xf32, -1x1xf32) + add_0 = paddle._C_ops.add(multiply_1, multiply_2) - # pd_op.full: (xf32) <- () + # pd_op.subtract: (-1x1xf32) <- (-1x1xf32, -1x1xf32) + subtract_7 = paddle._C_ops.subtract(add_0, multiply_0) + + # pd_op.full: (1xf32) <- () full_3 = paddle._C_ops.full( - [], float("0"), paddle.float32, paddle.framework._current_expected_place() + [1], float("1"), paddle.float32, paddle.core.CPUPlace() ) - # pd_op.greater_than: (1x27216xb) <- (1x27216xf32, xf32) - greater_than_0 = paddle._C_ops.greater_than(data_3, full_3) - del data_3, full_3 + # pd_op.assign: (1xf32) <- (1xf32) + assign_5 = full_3 + + # pd_op.assign: (1xf32) <- (1xf32) + assign_6 = full_3 + + # pd_op.scale: (-1x1xf32) <- (-1x1xf32, 1xf32) + scale_0 = paddle._C_ops.scale(subtract_7, full_3, float("1e-10"), True) + del subtract_7 + + # pd_op.divide: (-1x1xf32) <- (-1x1xf32, -1x1xf32) + divide_2 = paddle._C_ops.divide(multiply_0, scale_0) + + # pd_op.minimum: (-1x1xf32) <- (-1x1xf32, -1x1xf32) + minimum_2 = paddle._C_ops.minimum(split_0, split_4) + + # pd_op.minimum: (-1x1xf32) <- (-1x1xf32, -1x1xf32) + minimum_3 = paddle._C_ops.minimum(split_1, split_5) + + # pd_op.maximum: (-1x1xf32) <- (-1x1xf32, -1x1xf32) + maximum_2 = paddle._C_ops.maximum(split_2, split_6) + + # pd_op.maximum: (-1x1xf32) <- (-1x1xf32, -1x1xf32) + maximum_3 = paddle._C_ops.maximum(split_3, split_7) + + # pd_op.subtract: (-1x1xf32) <- (-1x1xf32, -1x1xf32) + subtract_8 = paddle._C_ops.subtract(maximum_2, minimum_2) + + # pd_op.subtract: (-1x1xf32) <- (-1x1xf32, -1x1xf32) + subtract_9 = paddle._C_ops.subtract(maximum_3, minimum_3) + + # pd_op.multiply: (-1x1xf32) <- (-1x1xf32, -1x1xf32) + multiply_3 = paddle._C_ops.multiply(subtract_8, subtract_9) + + # pd_op.scale: (-1x1xf32) <- (-1x1xf32, 1xf32) + scale_1 = paddle._C_ops.scale(multiply_3, full_3, float("1e-10"), True) + del multiply_3 + + # pd_op.subtract: (-1x1xf32) <- (-1x1xf32, -1x1xf32) + subtract_10 = paddle._C_ops.subtract(scale_1, scale_0) + + # pd_op.divide: (-1x1xf32) <- (-1x1xf32, -1x1xf32) + divide_3 = paddle._C_ops.divide(subtract_10, scale_1) + + # pd_op.subtract: (-1x1xf32) <- (-1x1xf32, -1x1xf32) + subtract_11 = paddle._C_ops.subtract(divide_2, divide_3) # pd_op.full: (1xf32) <- () full_4 = paddle._C_ops.full( - [1], float("10"), paddle.float32, paddle.core.CPUPlace() + [1], float("-1"), paddle.float32, paddle.core.CPUPlace() ) - # pd_op.full_like: (1x27216xi32) <- (1x27216xi32, 1xf32) - full_like_0 = paddle._C_ops.full_like( - reshape_1, full_4, paddle.int32, paddle.framework._current_expected_place() - ) - del full_4 + # pd_op.scale: (-1x1xf32) <- (-1x1xf32, 1xf32) + scale_2 = paddle._C_ops.scale(subtract_11, full_4, float("1"), True) + del subtract_11 - # pd_op.where: (1x27216xi32) <- (1x27216xb, 1x27216xi32, 1x27216xi32) - where_0 = paddle._C_ops.where(greater_than_0, reshape_1, full_like_0) - del full_like_0, greater_than_0, reshape_1 + # pd_op.scale: (-1x1xf32) <- (-1x1xf32, 1xf32) + scale_3 = paddle._C_ops.scale(scale_2, full_3, float("0"), True) + del scale_2 - # pd_op.full_int_array: (2xi64) <- () - full_int_array_1 = [-1, 4] + # pd_op.multiply: (-1x1xf32) <- (-1x1xf32, -1x1xf32) + multiply_4 = paddle._C_ops.multiply(scale_3, unsqueeze_1) - # pd_op.reshape: (12x4xf32) <- (1x12x4xf32, 2xi64) - reshape_2 = paddle._C_ops.reshape(data_4, full_int_array_1) - del data_4, full_int_array_1 + # pd_op.full_int_array: (0xi64) <- () + full_int_array_3 = [] - # pd_op.gather: (27216x4xf32) <- (12x4xf32, 27216xi64, 1xi32) - gather_1 = paddle._C_ops.gather(reshape_2, flatten_1, full_2) - del flatten_1, full_2, reshape_2 + # pd_op.assign: (0xi64) <- (0xi64) + assign_7 = full_int_array_3 + + # pd_op.sum: (xf32) <- (-1x1xf32, 0xi64) + sum_1 = paddle._C_ops.sum(multiply_4, full_int_array_3, None, False) + + # pd_op.divide: (xf32) <- (xf32, xf32) + divide_0 = paddle._C_ops.divide(sum_1, data_4) + + # pd_op.unsqueeze: (1x24276x1xb) <- (1x24276xb, 1xi64) + unsqueeze_2 = paddle._C_ops.unsqueeze(data_0, full_int_array_0) + del data_0 + + # pd_op.cast: (1x24276x1xi32) <- (1x24276x1xb) + cast_2 = paddle._C_ops.cast(unsqueeze_2, paddle.int32) + del unsqueeze_2 + + # pd_op.full_int_array: (3xi64) <- () + full_int_array_4 = [1, 1, 88] + + # pd_op.tile: (1x24276x88xi32) <- (1x24276x1xi32, 3xi64) + tile_1 = paddle._C_ops.tile(cast_2, full_int_array_4) + del cast_2, full_int_array_4 + + # pd_op.cast: (1x24276x88xb) <- (1x24276x88xi32) + cast_3 = paddle._C_ops.cast(tile_1, paddle.bool) + del tile_1 + + # pd_op.masked_select: (-1xf32) <- (1x24276x88xf32, 1x24276x88xb) + masked_select_3 = paddle._C_ops.masked_select(data_5, cast_3) + del data_5 # pd_op.full_int_array: (3xi64) <- () - full_int_array_2 = [1, 27216, 4] + full_int_array_5 = [-1, 4, 22] - # pd_op.reshape: (1x27216x4xf32) <- (27216x4xf32, 3xi64) - reshape_0 = paddle._C_ops.reshape(gather_1, full_int_array_2) - del full_int_array_2, gather_1 + # pd_op.reshape: (-1x4x22xf32) <- (-1xf32, 3xi64) + reshape_2 = paddle._C_ops.reshape(masked_select_3, full_int_array_5) + del full_int_array_5 # pd_op.full: (1xi32) <- () full_5 = paddle._C_ops.full( - [1], float("11"), paddle.int32, paddle.core.CPUPlace() + [1], float("2"), paddle.int32, paddle.core.CPUPlace() ) - # pd_op.one_hot: (1x27216x11xf32) <- (1x27216xi32, 1xi32) - one_hot_0 = paddle._C_ops.one_hot( - where_0 % paddle.cast(full_5, where_0.dtype), full_5 - ) - del full_5 + # pd_op.split_with_num: ([1x24276x2xf32, 1x24276x2xf32]) <- (1x24276x4xf32, 1xi32) + split_with_num_2 = paddle._C_ops.split_with_num(data_2, 2, full_5) + del data_2, full_5 + + # builtin.split: (1x24276x2xf32, 1x24276x2xf32) <- ([1x24276x2xf32, 1x24276x2xf32]) + ( + split_8, + split_9, + ) = split_with_num_2 + del split_with_num_2 - # pd_op.full: (10xi64) <- () + # pd_op.subtract: (1x24276x2xf32) <- (24276x2xf32, 1x24276x2xf32) + subtract_12 = paddle._C_ops.subtract(data_6, split_8) + del split_8 + + # pd_op.subtract: (1x24276x2xf32) <- (1x24276x2xf32, 24276x2xf32) + subtract_13 = paddle._C_ops.subtract(split_9, data_6) + del data_6, split_9 + + # pd_op.full: (1xi32) <- () full_6 = paddle._C_ops.full( - [10], float("0"), paddle.int64, paddle.framework._current_expected_place() + [1], float("-1"), paddle.int32, paddle.core.CPUPlace() ) - # pd_op.assign_value_: (10xi64) <- (10xi64) - assign_value__0 = paddle._C_ops.assign_value_( - full_6, - [10], - paddle.int64, - [ - float("0"), - float("1"), - float("2"), - float("3"), - float("4"), - float("5"), - float("6"), - float("7"), - float("8"), - float("9"), - ], - paddle.framework._current_expected_place(), + # builtin.combine: ([1x24276x2xf32, 1x24276x2xf32]) <- (1x24276x2xf32, 1x24276x2xf32) + combine_0 = [subtract_12, subtract_13] + del subtract_12, subtract_13 + + # pd_op.concat: (1x24276x4xf32) <- ([1x24276x2xf32, 1x24276x2xf32], 1xi32) + concat_0 = paddle._C_ops.concat(combine_0, full_6) + del combine_0, full_6 + + # pd_op.full: (1xf32) <- () + full_7 = paddle._C_ops.full( + [1], float("-2"), paddle.float32, paddle.core.CPUPlace() ) - del full_6 - # pd_op.index_select: (1x27216x10xf32) <- (1x27216x11xf32, 10xi64) - index_select_0 = paddle._C_ops.index_select(one_hot_0, assign_value__0, -1) - del assign_value__0, one_hot_0 + # pd_op.full: (1xf32) <- () + full_8 = paddle._C_ops.full( + [1], float("18.99"), paddle.float32, paddle.core.CPUPlace() + ) - # pd_op.multiply: (1x12x27216xf32) <- (1x12x27216xf32, 1x12x27216xf32) - multiply_1 = paddle._C_ops.multiply(data_5, data_0) - del data_5 + # pd_op.clip: (1x24276x4xf32) <- (1x24276x4xf32, 1xf32, 1xf32) + clip_2 = paddle._C_ops.clip(concat_0, full_7, full_8) + del concat_0, full_7, full_8 - # pd_op.full_int_array: (1xi64) <- () - full_int_array_3 = [-1] + # pd_op.masked_select: (-1xf32) <- (1x24276x4xf32, 1x24276x4xb) + masked_select_4 = paddle._C_ops.masked_select(clip_2, cast_1) + del clip_2 - # pd_op.max: (1x12x1xf32) <- (1x12x27216xf32, 1xi64) - max_0 = paddle._C_ops.max(multiply_1, full_int_array_3, True) + # pd_op.reshape: (-1x4xf32) <- (-1xf32, 2xi64) + reshape_3 = paddle._C_ops.reshape(masked_select_4, full_int_array_2) + del full_int_array_2, masked_select_4 - # pd_op.multiply: (1x12x27216xf32) <- (1x12x27216xf32, 1x12x27216xf32) - multiply_2 = paddle._C_ops.multiply(data_6, data_0) - del data_0, data_6 + # pd_op.floor: (-1x4xf32) <- (-1x4xf32) + floor_0 = paddle._C_ops.floor(reshape_3) - # pd_op.max: (1x12x1xf32) <- (1x12x27216xf32, 1xi64) - max_1 = paddle._C_ops.max(multiply_2, full_int_array_3, True) - del multiply_2 + # pd_op.cast: (-1x4xi64) <- (-1x4xf32) + cast_4 = paddle._C_ops.cast(floor_0, paddle.int64) + del floor_0 - # pd_op.full: (1xf32) <- () - full_7 = paddle._C_ops.full( - [1], float("1"), paddle.float32, paddle.core.CPUPlace() - ) + # pd_op.scale: (-1x4xi64) <- (-1x4xi64, 1xf32) + scale_4 = paddle._C_ops.scale(cast_4, full_3, float("1"), True) - # pd_op.scale: (1x12x1xf32) <- (1x12x1xf32, 1xf32) - scale_1 = paddle._C_ops.scale(max_0, full_7, float("1e-09"), True) - del full_7, max_0 + # pd_op.cast: (-1x4xf32) <- (-1x4xi64) + cast_5 = paddle._C_ops.cast(scale_4, paddle.float32) - # pd_op.divide: (1x12x27216xf32) <- (1x12x27216xf32, 1x12x1xf32) - divide_0 = paddle._C_ops.divide(multiply_1, scale_1) - del multiply_1, scale_1 + # pd_op.subtract: (-1x4xf32) <- (-1x4xf32, -1x4xf32) + subtract_14 = paddle._C_ops.subtract(cast_5, reshape_3) + del cast_5, reshape_3 - # pd_op.multiply: (1x12x27216xf32) <- (1x12x27216xf32, 1x12x1xf32) - multiply_3 = paddle._C_ops.multiply(divide_0, max_1) - del divide_0, max_1 + # pd_op.scale: (-1x4xf32) <- (-1x4xf32, 1xf32) + scale_5 = paddle._C_ops.scale(subtract_14, full_4, float("1"), True) - # pd_op.full_int_array: (1xi64) <- () - full_int_array_4 = [-2] + # pd_op.scale: (-1x4xi64) <- (-1x4xi64, 1xf32) + scale_6 = paddle._C_ops.scale(cast_4, full_3, float("2"), True) + del cast_4 + + # pd_op.unsqueeze: (-1x4x1xi64) <- (-1x4xi64, 1xi64) + unsqueeze_3 = paddle._C_ops.unsqueeze(scale_6, full_int_array_0) + del scale_6 + + # pd_op.cross_entropy_with_softmax: (-1x4x22xf32, -1x4x1xf32) <- (-1x4x22xf32, -1x4x1xi64) + cross_entropy_with_softmax_0, cross_entropy_with_softmax_2 = ( + lambda x, f: f(x) + )( + paddle._C_ops.cross_entropy_with_softmax( + reshape_2, unsqueeze_3, False, True, True, -100, -1 + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None), + ) - # pd_op.max: (1x27216xf32) <- (1x12x27216xf32, 1xi64) - max_2 = paddle._C_ops.max(multiply_3, full_int_array_4, False) - del full_int_array_4, multiply_3 + # pd_op.squeeze: (-1x4xf32) <- (-1x4x1xf32, 1xi64) + squeeze_0 = paddle._C_ops.squeeze( + cross_entropy_with_softmax_2, full_int_array_0 + ) + + # pd_op.multiply: (-1x4xf32) <- (-1x4xf32, -1x4xf32) + multiply_5 = paddle._C_ops.multiply(squeeze_0, subtract_14) + + # pd_op.scale: (-1x4xi64) <- (-1x4xi64, 1xf32) + scale_7 = paddle._C_ops.scale(scale_4, full_3, float("2"), True) + del scale_4 + + # pd_op.unsqueeze: (-1x4x1xi64) <- (-1x4xi64, 1xi64) + unsqueeze_4 = paddle._C_ops.unsqueeze(scale_7, full_int_array_0) + del scale_7 + + # pd_op.cross_entropy_with_softmax: (-1x4x22xf32, -1x4x1xf32) <- (-1x4x22xf32, -1x4x1xi64) + cross_entropy_with_softmax_1, cross_entropy_with_softmax_3 = ( + lambda x, f: f(x) + )( + paddle._C_ops.cross_entropy_with_softmax( + reshape_2, unsqueeze_4, False, True, True, -100, -1 + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None), + ) + del reshape_2 - # pd_op.unsqueeze: (1x27216x1xf32) <- (1x27216xf32, 1xi64) - unsqueeze_0 = paddle._C_ops.unsqueeze(max_2, full_int_array_3) - del full_int_array_3, max_2 + # pd_op.squeeze: (-1x4xf32) <- (-1x4x1xf32, 1xi64) + squeeze_1 = paddle._C_ops.squeeze( + cross_entropy_with_softmax_3, full_int_array_0 + ) - # pd_op.multiply: (1x27216x10xf32) <- (1x27216x10xf32, 1x27216x1xf32) - multiply_0 = paddle._C_ops.multiply(index_select_0, unsqueeze_0) - del index_select_0, unsqueeze_0, where_0 + # pd_op.multiply: (-1x4xf32) <- (-1x4xf32, -1x4xf32) + multiply_6 = paddle._C_ops.multiply(squeeze_1, scale_5) + + # pd_op.add: (-1x4xf32) <- (-1x4xf32, -1x4xf32) + add_1 = paddle._C_ops.add(multiply_5, multiply_6) + + # pd_op.mean: (-1x1xf32) <- (-1x4xf32, 1xi64) + mean_0 = paddle._C_ops.mean(add_1, full_int_array_0, True) + del full_int_array_0 + + # pd_op.multiply: (-1x1xf32) <- (-1x1xf32, -1x1xf32) + multiply_7 = paddle._C_ops.multiply(mean_0, unsqueeze_1) + + # pd_op.sum: (xf32) <- (-1x1xf32, 0xi64) + sum_2 = paddle._C_ops.sum(multiply_7, full_int_array_3, None, False) + + # pd_op.divide: (xf32) <- (xf32, xf32) + divide_1 = paddle._C_ops.divide(sum_2, data_4) + del ( + abs_0, + add_0, + add_1, + assign_0, + assign_1, + assign_2, + assign_3, + assign_4, + assign_5, + assign_6, + assign_7, + cast_1, + cast_3, + clip_0, + clip_1, + cross_entropy_with_softmax_2, + cross_entropy_with_softmax_3, + data_4, + divide_2, + divide_3, + full_0, + full_1, + full_2, + full_3, + full_4, + full_int_array_3, + masked_select_0, + masked_select_3, + maximum_0, + maximum_1, + maximum_2, + maximum_3, + mean_0, + minimum_0, + minimum_1, + minimum_2, + minimum_3, + multiply_0, + multiply_1, + multiply_2, + multiply_4, + multiply_5, + multiply_6, + multiply_7, + reshape_0, + reshape_1, + scale_0, + scale_1, + scale_3, + scale_5, + split_0, + split_1, + split_2, + split_3, + split_4, + split_5, + split_6, + split_7, + squeeze_0, + squeeze_1, + subtract_0, + subtract_1, + subtract_10, + subtract_14, + subtract_2, + subtract_3, + subtract_4, + subtract_8, + subtract_9, + sum_1, + sum_2, + unsqueeze_1, + unsqueeze_3, + unsqueeze_4, + ) - return reshape_0, multiply_0 + return ( + cross_entropy_with_softmax_0, + cross_entropy_with_softmax_1, + mean_all_0, + divide_0, + divide_1, + ) diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_9/graph_hash.txt b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_9/graph_hash.txt index f1aa15364..18e3bbc11 100644 --- a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_9/graph_hash.txt +++ b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_9/graph_hash.txt @@ -1 +1 @@ -09f7308dca33192a680fa6e253963eb73d874927eb1eaddbb7fe31eeed376574 \ No newline at end of file +bbf0a5774c2acf6ee92cd237b35e2c556a5dfa443149969bd012be16856599a9 \ No newline at end of file diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_9/input_meta.py b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_9/input_meta.py index d17aba14e..4fdfc39bb 100644 --- a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_9/input_meta.py +++ b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_9/input_meta.py @@ -1,28 +1,145 @@ class Program_weight_tensor_data_0: name = "data_0" - shape = [1, 48384, 10] + shape = [1, 24276, 10] dtype = "float32" - min_val = float("1.08574e-08") - max_val = float("0.85674") - mean = float("0.00225546") - std = float("0.010105") + min_val = float("9.96627e-10") + max_val = float("0.919436") + mean = float("0.00348174") + std = float("0.0220349") data = None class Program_weight_tensor_data_1: name = "data_1" - shape = [1, 48384] - dtype = "int32" - min_val = 0 - max_val = 10 + shape = [1, 24276, 4] + dtype = "float32" + min_val = float("-272.612") + max_val = float("1371.86") + mean = float("544.298") + std = float("322.539") data = None class Program_weight_tensor_data_2: name = "data_2" - shape = [1, 48384, 10] + shape = [24276, 2] + dtype = "float32" + min_val = float("4.0") + max_val = float("1084.0") + mean = float("544.0") + std = float("314.059") + data = None + + +class Program_weight_tensor_data_3: + name = "data_3" + shape = [24276, 1] + dtype = "float32" + min_val = float("8.0") + max_val = float("32.0") + mean = float("10.6667") + std = float("5.70157") + data = None + + +class Program_weight_tensor_data_4: + name = "data_4" + shape = [1, 38, 1] + dtype = "int32" + data = [ + 3, + 3, + 9, + 1, + 0, + 0, + 0, + 0, + 3, + 3, + 3, + 3, + 3, + 0, + 0, + 0, + 8, + 3, + 3, + 3, + 0, + 0, + 3, + 3, + 3, + 3, + 3, + 5, + 3, + 3, + 3, + 3, + 3, + 0, + 3, + 3, + 0, + 0, + ] + + +class Program_weight_tensor_data_5: + name = "data_5" + shape = [1, 38, 4] dtype = "float32" - max_val = float("0.949472") - mean = float("0.000146427") - std = float("0.0089493") + min_val = float("354.773") + max_val = float("1051.0") + mean = float("652.35") + std = float("193.013") data = None + + +class Program_weight_tensor_data_6: + name = "data_6" + shape = [1, 38, 1] + dtype = "float32" + data = [ + 1.0, + 1.0, + 1.0, + 1.0, + 1.0, + 1.0, + 1.0, + 1.0, + 1.0, + 1.0, + 1.0, + 1.0, + 1.0, + 1.0, + 1.0, + 1.0, + 1.0, + 1.0, + 1.0, + 1.0, + 1.0, + 1.0, + 1.0, + 1.0, + 1.0, + 1.0, + 1.0, + 1.0, + 1.0, + 1.0, + 1.0, + 1.0, + 1.0, + 1.0, + 1.0, + 1.0, + 1.0, + 1.0, + ] diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_9/model.py b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_9/model.py index d4c2470ef..0aac88f68 100644 --- a/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_9/model.py +++ b/paddle_samples/PaddleX/PP-YOLOE_plus_SOD-largesize-L/subgraph_9/model.py @@ -5,106 +5,495 @@ class GraphModule(paddle.nn.Layer): def __init__(self): super().__init__() - def forward(self, data_0, data_1, data_2): - # pd_op.full: (1xi32) <- () - full_0 = paddle._C_ops.full( - [1], float("11"), paddle.int32, paddle.core.CPUPlace() - ) + def forward(self, data_0, data_1, data_2, data_3, data_4, data_5, data_6): + # pd_op.full_int_array: (1xi64) <- () + full_int_array_0 = [2] - # pd_op.one_hot: (1x-1x11xf32) <- (1x-1xi32, 1xi32) - one_hot_0 = paddle._C_ops.one_hot( - data_1 % paddle.cast(full_0, data_1.dtype), full_0 - ) - del data_1, full_0 + # pd_op.unsqueeze: (1x38x1x4xf32) <- (1x38x4xf32, 1xi64) + unsqueeze_0 = paddle._C_ops.unsqueeze(data_5, full_int_array_0) + del data_5 # pd_op.full_int_array: (1xi64) <- () - full_int_array_0 = [0] + full_int_array_1 = [1] + + # pd_op.unsqueeze: (1x1x24276x4xf32) <- (1x24276x4xf32, 1xi64) + unsqueeze_1 = paddle._C_ops.unsqueeze(data_1, full_int_array_1) + del data_1, full_int_array_1 # pd_op.full_int_array: (1xi64) <- () - full_int_array_1 = [-1] + full_int_array_2 = [0] - # pd_op.slice: (1x-1x10xf32) <- (1x-1x11xf32, 1xi64, 1xi64) + # pd_op.slice: (1x38x1x2xf32) <- (1x38x1x4xf32, 1xi64, 1xi64) slice_0 = paddle._C_ops.slice( - one_hot_0, [2], full_int_array_0, full_int_array_1, [1], [] + unsqueeze_0, [3], full_int_array_2, full_int_array_0, [1], [] + ) + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_3 = [2147483647] + + # pd_op.slice: (1x38x1x2xf32) <- (1x38x1x4xf32, 1xi64, 1xi64) + slice_1 = paddle._C_ops.slice( + unsqueeze_0, [3], full_int_array_0, full_int_array_3, [1], [] ) - del full_int_array_0, full_int_array_1, one_hot_0 - # pd_op.pow: (1x-1x10xf32) <- (1x-1x10xf32) - pow_0 = paddle._C_ops.pow(data_0, float("2")) + # pd_op.slice: (1x1x24276x2xf32) <- (1x1x24276x4xf32, 1xi64, 1xi64) + slice_2 = paddle._C_ops.slice( + unsqueeze_1, [3], full_int_array_2, full_int_array_0, [1], [] + ) + del full_int_array_2 + + # pd_op.slice: (1x1x24276x2xf32) <- (1x1x24276x4xf32, 1xi64, 1xi64) + slice_3 = paddle._C_ops.slice( + unsqueeze_1, [3], full_int_array_0, full_int_array_3, [1], [] + ) + del full_int_array_0, full_int_array_3, unsqueeze_1 + + # pd_op.maximum: (1x38x24276x2xf32) <- (1x38x1x2xf32, 1x1x24276x2xf32) + maximum_0 = paddle._C_ops.maximum(slice_0, slice_2) + + # pd_op.minimum: (1x38x24276x2xf32) <- (1x38x1x2xf32, 1x1x24276x2xf32) + minimum_0 = paddle._C_ops.minimum(slice_1, slice_3) + + # pd_op.subtract: (1x38x24276x2xf32) <- (1x38x24276x2xf32, 1x38x24276x2xf32) + subtract_0 = paddle._C_ops.subtract(minimum_0, maximum_0) + del maximum_0, minimum_0 + + # pd_op.full: (1xf32) <- () + full_0 = paddle._C_ops.full( + [1], float("0"), paddle.float32, paddle.core.CPUPlace() + ) # pd_op.full: (1xf32) <- () full_1 = paddle._C_ops.full( - [1], float("0.75"), paddle.float32, paddle.core.CPUPlace() + [1], float("3.40282e+38"), paddle.float32, paddle.core.CPUPlace() ) - # pd_op.scale: (1x-1x10xf32) <- (1x-1x10xf32, 1xf32) - scale_0 = paddle._C_ops.scale(pow_0, full_1, float("0"), True) - del pow_0 + # pd_op.clip: (1x38x24276x2xf32) <- (1x38x24276x2xf32, 1xf32, 1xf32) + clip_0 = paddle._C_ops.clip(subtract_0, full_0, full_1) + del subtract_0 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_4 = [-1] + + # pd_op.prod: (1x38x24276xf32) <- (1x38x24276x2xf32, 1xi64) + prod_0 = paddle._C_ops.prod(clip_0, full_int_array_4, False, False) + del clip_0 + + # pd_op.subtract: (1x38x1x2xf32) <- (1x38x1x2xf32, 1x38x1x2xf32) + subtract_1 = paddle._C_ops.subtract(slice_1, slice_0) + del slice_0, slice_1 + + # pd_op.clip: (1x38x1x2xf32) <- (1x38x1x2xf32, 1xf32, 1xf32) + clip_1 = paddle._C_ops.clip(subtract_1, full_0, full_1) + del subtract_1 + + # pd_op.prod: (1x38x1xf32) <- (1x38x1x2xf32, 1xi64) + prod_1 = paddle._C_ops.prod(clip_1, full_int_array_4, False, False) + del clip_1 + + # pd_op.subtract: (1x1x24276x2xf32) <- (1x1x24276x2xf32, 1x1x24276x2xf32) + subtract_2 = paddle._C_ops.subtract(slice_3, slice_2) + del slice_2, slice_3 + + # pd_op.clip: (1x1x24276x2xf32) <- (1x1x24276x2xf32, 1xf32, 1xf32) + clip_2 = paddle._C_ops.clip(subtract_2, full_0, full_1) + del full_1, subtract_2 + + # pd_op.prod: (1x1x24276xf32) <- (1x1x24276x2xf32, 1xi64) + prod_2 = paddle._C_ops.prod(clip_2, full_int_array_4, False, False) + del clip_2 + + # pd_op.add: (1x38x24276xf32) <- (1x38x1xf32, 1x1x24276xf32) + add_0 = paddle._C_ops.add(prod_1, prod_2) + del prod_1, prod_2 + + # pd_op.subtract: (1x38x24276xf32) <- (1x38x24276xf32, 1x38x24276xf32) + subtract_3 = paddle._C_ops.subtract(add_0, prod_0) + del add_0 # pd_op.full: (1xf32) <- () full_2 = paddle._C_ops.full( - [1], float("-1"), paddle.float32, paddle.core.CPUPlace() + [1], float("1"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.scale: (1x38x24276xf32) <- (1x38x24276xf32, 1xf32) + scale_0 = paddle._C_ops.scale(subtract_3, full_2, float("1e-09"), True) + del subtract_3 + + # pd_op.divide: (1x38x24276xf32) <- (1x38x24276xf32, 1x38x24276xf32) + divide_0 = paddle._C_ops.divide(prod_0, scale_0) + del prod_0, scale_0 + + # pd_op.transpose: (1x10x24276xf32) <- (1x24276x10xf32) + transpose_0 = paddle._C_ops.transpose(data_0, [0, 2, 1]) + del data_0 + + # pd_op.full: (1xf64) <- () + full_3 = paddle._C_ops.full( + [1], float("0"), paddle.float64, paddle.core.CPUPlace() ) - # pd_op.scale: (1x-1x10xf32) <- (1x-1x10xf32, 1xf32) - scale_1 = paddle._C_ops.scale(slice_0, full_2, float("1"), True) - del full_2 + # pd_op.full: (1xf64) <- () + full_4 = paddle._C_ops.full( + [1], float("1"), paddle.float64, paddle.core.CPUPlace() + ) - # pd_op.multiply: (1x-1x10xf32) <- (1x-1x10xf32, 1x-1x10xf32) - multiply_0 = paddle._C_ops.multiply(scale_0, scale_1) + # pd_op.arange: (1xi32) <- (1xf64, 1xf64, 1xf64) + arange_0 = paddle.arange(full_3, full_4, full_4, dtype="int32") + del full_3, full_4 - # pd_op.multiply: (1x-1x10xf32) <- (1x-1x10xf32, 1x-1x10xf32) - multiply_1 = paddle._C_ops.multiply(data_2, slice_0) - del slice_0 + # pd_op.unsqueeze: (1x1xi32) <- (1xi32, 1xi64) + unsqueeze_2 = paddle._C_ops.unsqueeze(arange_0, full_int_array_4) + del arange_0 - # pd_op.add: (1x-1x10xf32) <- (1x-1x10xf32, 1x-1x10xf32) - add_0 = paddle._C_ops.add(multiply_0, multiply_1) + # pd_op.full_int_array: (2xi64) <- () + full_int_array_5 = [1, 38] - # pd_op.bce_loss: (1x-1x10xf32) <- (1x-1x10xf32, 1x-1x10xf32) - bce_loss_0 = paddle._C_ops.bce_loss(data_0, data_2) - del data_0 + # pd_op.tile: (1x38xi32) <- (1x1xi32, 2xi64) + tile_0 = paddle._C_ops.tile(unsqueeze_2, full_int_array_5) + del full_int_array_5 - # pd_op.multiply: (1x-1x10xf32) <- (1x-1x10xf32, 1x-1x10xf32) - multiply_2 = paddle._C_ops.multiply(bce_loss_0, add_0) + # pd_op.squeeze: (1x38xi32) <- (1x38x1xi32, 1xi64) + squeeze_0 = paddle._C_ops.squeeze(data_4, full_int_array_4) + del data_4 - # pd_op.full_int_array: (0xi64) <- () - full_int_array_2 = [] + # builtin.combine: ([1x38xi32, 1x38xi32]) <- (1x38xi32, 1x38xi32) + combine_0 = [tile_0, squeeze_0] + del squeeze_0, tile_0 + + # pd_op.stack: (1x38x2xi32) <- ([1x38xi32, 1x38xi32]) + stack_0 = paddle._C_ops.stack(combine_0, -1) + del combine_0 + + # pd_op.gather_nd: (1x38x24276xf32) <- (1x10x24276xf32, 1x38x2xi32) + gather_nd_0 = paddle._C_ops.gather_nd(transpose_0, stack_0) + del stack_0, transpose_0 + + # pd_op.pow: (1x38x24276xf32) <- (1x38x24276xf32) + pow_0 = paddle._C_ops.pow(gather_nd_0, float("1")) + del gather_nd_0 + + # pd_op.pow: (1x38x24276xf32) <- (1x38x24276xf32) + pow_1 = paddle._C_ops.pow(divide_0, float("6")) - # pd_op.sum: (xf32) <- (1x-1x10xf32, 0xi64) - sum_0 = paddle._C_ops.sum(multiply_2, full_int_array_2, None, False) + # pd_op.multiply: (1x38x24276xf32) <- (1x38x24276xf32, 1x38x24276xf32) + multiply_0 = paddle._C_ops.multiply(pow_0, pow_1) + del pow_0, pow_1 - # pd_op.sum: (xf32) <- (1x-1x10xf32, 0xi64) - sum_1 = paddle._C_ops.sum(data_2, full_int_array_2, None, False) + # pd_op.multiply: (1x38x24276xf32) <- (1x38x24276xf32, 1x38x1xf32) + multiply_1 = paddle._C_ops.multiply(multiply_0, data_6) + del multiply_0 + + # pd_op.scale: (24276x1xf32) <- (24276x1xf32, 1xf32) + scale_1 = paddle._C_ops.scale(data_3, full_2, float("0"), True) + del data_3, full_2 + + # pd_op.full_int_array: (2xi64) <- () + full_int_array_6 = [0, 1] + + # pd_op.unsqueeze: (1x1x24276x2xf32) <- (24276x2xf32, 2xi64) + unsqueeze_3 = paddle._C_ops.unsqueeze(data_2, full_int_array_6) del data_2 - # pd_op.full: (1xf32) <- () - full_3 = paddle._C_ops.full( - [1], float("1"), paddle.float32, paddle.core.CPUPlace() + # pd_op.full: (1xi32) <- () + full_5 = paddle._C_ops.full( + [1], float("3"), paddle.int32, paddle.core.CPUPlace() + ) + + # pd_op.split_with_num: ([1x1x24276x1xf32, 1x1x24276x1xf32]) <- (1x1x24276x2xf32, 1xi32) + split_with_num_0 = paddle._C_ops.split_with_num(unsqueeze_3, 2, full_5) + del unsqueeze_3 + + # builtin.split: (1x1x24276x1xf32, 1x1x24276x1xf32) <- ([1x1x24276x1xf32, 1x1x24276x1xf32]) + ( + split_0, + split_1, + ) = split_with_num_0 + del split_with_num_0 + + # pd_op.split_with_num: ([1x38x1x1xf32, 1x38x1x1xf32, 1x38x1x1xf32, 1x38x1x1xf32]) <- (1x38x1x4xf32, 1xi32) + split_with_num_1 = paddle._C_ops.split_with_num(unsqueeze_0, 4, full_5) + del full_5, unsqueeze_0 + + # builtin.split: (1x38x1x1xf32, 1x38x1x1xf32, 1x38x1x1xf32, 1x38x1x1xf32) <- ([1x38x1x1xf32, 1x38x1x1xf32, 1x38x1x1xf32, 1x38x1x1xf32]) + ( + split_2, + split_3, + split_4, + split_5, + ) = split_with_num_1 + del split_with_num_1 + + # pd_op.subtract: (1x38x24276x1xf32) <- (1x1x24276x1xf32, 1x38x1x1xf32) + subtract_4 = paddle._C_ops.subtract(split_0, split_2) + + # pd_op.subtract: (1x38x24276x1xf32) <- (1x1x24276x1xf32, 1x38x1x1xf32) + subtract_5 = paddle._C_ops.subtract(split_1, split_3) + + # pd_op.subtract: (1x38x24276x1xf32) <- (1x38x1x1xf32, 1x1x24276x1xf32) + subtract_6 = paddle._C_ops.subtract(split_4, split_0) + + # pd_op.subtract: (1x38x24276x1xf32) <- (1x38x1x1xf32, 1x1x24276x1xf32) + subtract_7 = paddle._C_ops.subtract(split_5, split_1) + + # pd_op.full: (1xi32) <- () + full_6 = paddle._C_ops.full( + [1], float("-1"), paddle.int32, paddle.core.CPUPlace() + ) + + # builtin.combine: ([1x38x24276x1xf32, 1x38x24276x1xf32, 1x38x24276x1xf32, 1x38x24276x1xf32]) <- (1x38x24276x1xf32, 1x38x24276x1xf32, 1x38x24276x1xf32, 1x38x24276x1xf32) + combine_1 = [subtract_4, subtract_5, subtract_6, subtract_7] + del subtract_4, subtract_5, subtract_6, subtract_7 + + # pd_op.concat: (1x38x24276x4xf32) <- ([1x38x24276x1xf32, 1x38x24276x1xf32, 1x38x24276x1xf32, 1x38x24276x1xf32], 1xi32) + concat_0 = paddle._C_ops.concat(combine_1, full_6) + del combine_1 + + # pd_op.min: (1x38x24276xf32) <- (1x38x24276x4xf32, 1xi64) + min_0 = paddle._C_ops.min(concat_0, full_int_array_4, False) + del concat_0 + + # pd_op.full: (xf32) <- () + full_7 = paddle._C_ops.full( + [], + float("1e-09"), + paddle.float32, + paddle.framework._current_expected_place(), ) + # pd_op.greater_than: (1x38x24276xb) <- (1x38x24276xf32, xf32) + greater_than_1 = paddle._C_ops.greater_than(min_0, full_7) + del min_0 + + # pd_op.unsqueeze: (1x1x24276x1xf32) <- (24276x1xf32, 2xi64) + unsqueeze_4 = paddle._C_ops.unsqueeze(scale_1, full_int_array_6) + del full_int_array_6, scale_1 + + # pd_op.add: (1x38x1x1xf32) <- (1x38x1x1xf32, 1x38x1x1xf32) + add_1 = paddle._C_ops.add(split_2, split_4) + del split_2, split_4 + # pd_op.full: (1xf32) <- () - full_4 = paddle._C_ops.full( - [1], float("3.40282e+38"), paddle.float32, paddle.core.CPUPlace() + full_8 = paddle._C_ops.full( + [1], float("0.5"), paddle.float32, paddle.core.CPUPlace() ) - # pd_op.clip: (xf32) <- (xf32, 1xf32, 1xf32) - clip_0 = paddle._C_ops.clip(sum_1, full_3, full_4) - del full_3, full_4, sum_1 + # pd_op.scale: (1x38x1x1xf32) <- (1x38x1x1xf32, 1xf32) + scale_2 = paddle._C_ops.scale(add_1, full_8, float("0"), True) + del add_1 + + # pd_op.add: (1x38x1x1xf32) <- (1x38x1x1xf32, 1x38x1x1xf32) + add_2 = paddle._C_ops.add(split_3, split_5) + del split_3, split_5 + + # pd_op.scale: (1x38x1x1xf32) <- (1x38x1x1xf32, 1xf32) + scale_3 = paddle._C_ops.scale(add_2, full_8, float("0"), True) + del add_2, full_8 + + # pd_op.subtract: (1x38x24276x1xf32) <- (1x38x1x1xf32, 1x1x24276x1xf32) + subtract_8 = paddle._C_ops.subtract(scale_2, unsqueeze_4) + + # pd_op.subtract: (1x38x24276x1xf32) <- (1x1x24276x1xf32, 1x38x24276x1xf32) + subtract_9 = paddle._C_ops.subtract(split_0, subtract_8) + del subtract_8 + + # pd_op.subtract: (1x38x24276x1xf32) <- (1x38x1x1xf32, 1x1x24276x1xf32) + subtract_10 = paddle._C_ops.subtract(scale_3, unsqueeze_4) + + # pd_op.subtract: (1x38x24276x1xf32) <- (1x1x24276x1xf32, 1x38x24276x1xf32) + subtract_11 = paddle._C_ops.subtract(split_1, subtract_10) + del subtract_10 + + # pd_op.add: (1x38x24276x1xf32) <- (1x38x1x1xf32, 1x1x24276x1xf32) + add_3 = paddle._C_ops.add(scale_2, unsqueeze_4) + del scale_2 + + # pd_op.subtract: (1x38x24276x1xf32) <- (1x38x24276x1xf32, 1x1x24276x1xf32) + subtract_12 = paddle._C_ops.subtract(add_3, split_0) + del add_3, split_0 + + # pd_op.add: (1x38x24276x1xf32) <- (1x38x1x1xf32, 1x1x24276x1xf32) + add_4 = paddle._C_ops.add(scale_3, unsqueeze_4) + del scale_3, unsqueeze_4 + + # pd_op.subtract: (1x38x24276x1xf32) <- (1x38x24276x1xf32, 1x1x24276x1xf32) + subtract_13 = paddle._C_ops.subtract(add_4, split_1) + del add_4, split_1 + + # builtin.combine: ([1x38x24276x1xf32, 1x38x24276x1xf32, 1x38x24276x1xf32, 1x38x24276x1xf32]) <- (1x38x24276x1xf32, 1x38x24276x1xf32, 1x38x24276x1xf32, 1x38x24276x1xf32) + combine_2 = [subtract_9, subtract_11, subtract_12, subtract_13] + del subtract_11, subtract_12, subtract_13, subtract_9 - # pd_op.divide: (xf32) <- (xf32, xf32) - divide_0 = paddle._C_ops.divide(sum_0, clip_0) - del ( - add_0, - bce_loss_0, - clip_0, - full_1, - full_int_array_2, - multiply_0, + # pd_op.concat: (1x38x24276x4xf32) <- ([1x38x24276x1xf32, 1x38x24276x1xf32, 1x38x24276x1xf32, 1x38x24276x1xf32], 1xi32) + concat_1 = paddle._C_ops.concat(combine_2, full_6) + del combine_2, full_6 + + # pd_op.min: (1x38x24276xf32) <- (1x38x24276x4xf32, 1xi64) + min_1 = paddle._C_ops.min(concat_1, full_int_array_4, False) + del concat_1 + + # pd_op.greater_than: (1x38x24276xb) <- (1x38x24276xf32, xf32) + greater_than_2 = paddle._C_ops.greater_than(min_1, full_7) + del full_7, min_1 + + # pd_op.cast: (1x38x24276xf32) <- (1x38x24276xb) + cast_0 = paddle._C_ops.cast(greater_than_1, paddle.float32) + del greater_than_1 + + # pd_op.cast: (1x38x24276xf32) <- (1x38x24276xb) + cast_1 = paddle._C_ops.cast(greater_than_2, paddle.float32) + del greater_than_2 + + # pd_op.multiply: (1x38x24276xf32) <- (1x38x24276xf32, 1x38x1xf32) + multiply_2 = paddle._C_ops.multiply(cast_0, data_6) + del cast_0 + + # pd_op.multiply: (1x38x24276xf32) <- (1x38x24276xf32, 1x38x1xf32) + multiply_3 = paddle._C_ops.multiply(cast_1, data_6) + del cast_1 + + # pd_op.sum: (1x38x1xf32) <- (1x38x24276xf32, 1xi64) + sum_0 = paddle._C_ops.sum(multiply_2, full_int_array_4, None, True) + del full_int_array_4 + + # pd_op.full: (xf32) <- () + full_9 = paddle._C_ops.full( + [], float("0"), paddle.float32, paddle.framework._current_expected_place() + ) + + # pd_op.equal: (1x38x1xb) <- (1x38x1xf32, xf32) + equal_0 = paddle._C_ops.equal(sum_0, full_9) + del sum_0 + + # pd_op.add: (1x38x24276xf32) <- (1x38x24276xf32, 1x38x24276xf32) + add_5 = paddle._C_ops.add(multiply_1, multiply_3) + + # pd_op.full_like: (1x38x24276xf32) <- (1x38x24276xf32, 1xf32) + full_like_0 = paddle._C_ops.full_like( + add_5, full_0, paddle.float32, paddle.framework._current_expected_place() + ) + + # pd_op.full_like: (1x38x24276xf32) <- (1x38x24276xf32, 1xf32) + full_like_1 = paddle._C_ops.full_like( multiply_1, - multiply_2, - scale_0, - scale_1, - sum_0, + full_0, + paddle.float32, + paddle.framework._current_expected_place(), + ) + + # pd_op.full_like: (1x38x1xb) <- (1x38x1xb, 1xf32) + full_like_2 = paddle._C_ops.full_like( + equal_0, full_0, paddle.bool, paddle.framework._current_expected_place() + ) + del full_0 + + # pd_op.cast: (1x38x1xf32) <- (1x38x1xb) + cast_2 = paddle._C_ops.cast(full_like_2, paddle.float32) + del full_like_2 + + # pd_op.cast: (1x38x1xf32) <- (1x38x1xb) + cast_3 = paddle._C_ops.cast(equal_0, paddle.float32) + del equal_0 + + # pd_op.add: (1x38x24276xf32) <- (1x38x24276xf32, 1x38x24276xf32) + add_6 = paddle._C_ops.add(full_like_0, full_like_1) + del full_like_0, full_like_1 + + # pd_op.add: (1x38x24276xf32) <- (1x38x24276xf32, 1x38x1xf32) + add_7 = paddle._C_ops.add(add_6, cast_2) + del add_6, cast_2 + + # pd_op.add: (1x38x24276xf32) <- (1x38x24276xf32, 1x38x24276xf32) + add_8 = paddle._C_ops.add(add_5, add_7) + del add_5 + + # pd_op.add: (1x38x24276xf32) <- (1x38x24276xf32, 1x38x24276xf32) + add_9 = paddle._C_ops.add(multiply_1, add_7) + + # pd_op.add: (1x38x24276xf32) <- (1x38x1xf32, 1x38x24276xf32) + add_10 = paddle._C_ops.add(cast_3, add_7) + del add_7, cast_3 + + # pd_op.cast: (1x38x24276xb) <- (1x38x24276xf32) + cast_4 = paddle._C_ops.cast(add_10, paddle.bool) + del add_10 + + # pd_op.where: (1x38x24276xf32) <- (1x38x24276xb, 1x38x24276xf32, 1x38x24276xf32) + where_0 = paddle._C_ops.where(cast_4, add_8, add_9) + del add_8, add_9, cast_4 + + # pd_op.full: (1xi32) <- () + full_10 = paddle._C_ops.full( + [1], float("13"), paddle.int32, paddle.core.CPUPlace() ) - return divide_0 + # pd_op.topk: (1x38x13xf32, 1x38x13xi64) <- (1x38x24276xf32, 1xi32) + topk_0, topk_1 = (lambda x, f: f(x))( + paddle._C_ops.topk(where_0, full_10, -1, True, True), + lambda out: out if isinstance(out, (list, tuple)) else (out, None), + ) + del full_10, where_0 + + # pd_op.full: (1xi32) <- () + full_11 = paddle._C_ops.full( + [1], float("24276"), paddle.int32, paddle.core.CPUPlace() + ) + + # pd_op.one_hot: (1x38x13x24276xf32) <- (1x38x13xi64, 1xi32) + one_hot_0 = paddle._C_ops.one_hot( + topk_1 % paddle.cast(full_11, topk_1.dtype), full_11 + ) + del full_11, topk_1 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_7 = [-2] + + # pd_op.sum: (1x38x24276xf32) <- (1x38x13x24276xf32, 1xi64) + sum_1 = paddle._C_ops.sum(one_hot_0, full_int_array_7, None, False) + del one_hot_0 + + # pd_op.multiply: (1x38x24276xf32) <- (1x38x24276xf32, 1x38x1xf32) + multiply_4 = paddle._C_ops.multiply(sum_1, data_6) + del data_6, sum_1 + + # pd_op.greater_than: (1x38x24276xb) <- (1x38x24276xf32, xf32) + greater_than_3 = paddle._C_ops.greater_than(multiply_3, full_9) + del multiply_3 + + # pd_op.greater_than: (1x38x24276xb) <- (1x38x24276xf32, xf32) + greater_than_4 = paddle._C_ops.greater_than(multiply_2, full_9) + del full_9, multiply_2 + + # pd_op.bitwise_or: (1x38x24276xb) <- (1x38x24276xb, 1x38x24276xb) + bitwise_or_0 = paddle._C_ops.bitwise_or(greater_than_3, greater_than_4) + del greater_than_3, greater_than_4 + + # pd_op.cast: (1x38x24276xf32) <- (1x38x24276xb) + cast_5 = paddle._C_ops.cast(bitwise_or_0, paddle.float32) + del bitwise_or_0 + + # pd_op.multiply: (1x38x24276xf32) <- (1x38x24276xf32, 1x38x24276xf32) + multiply_5 = paddle._C_ops.multiply(multiply_4, cast_5) + del cast_5, multiply_4 + + # pd_op.sum: (1x24276xf32) <- (1x38x24276xf32, 1xi64) + sum_2 = paddle._C_ops.sum(multiply_5, full_int_array_7, None, False) + del full_int_array_7 + + # pd_op.full_int_array: (0xi64) <- () + full_int_array_8 = [] + + # pd_op.max: (xf32) <- (1x24276xf32, 0xi64) + max_0 = paddle._C_ops.max(sum_2, full_int_array_8, False) + del full_int_array_8 + + # pd_op.full: (xf32) <- () + full_12 = paddle._C_ops.full( + [], float("1"), paddle.float32, paddle.framework._current_expected_place() + ) + + # pd_op.greater_than: (xb) <- (xf32, xf32) + greater_than_0 = paddle._C_ops.greater_than(max_0, full_12) + del divide_0, full_12, max_0, multiply_1, multiply_5, sum_2, unsqueeze_2 + + return greater_than_0 diff --git a/paddle_samples/PaddleX/TimesNet/subgraph_10/shape_patches_TimesNet_ad/input_meta.py b/paddle_samples/PaddleX/TimesNet/subgraph_10/shape_patches_TimesNet_ad/input_meta.py new file mode 100644 index 000000000..d10b7338b --- /dev/null +++ b/paddle_samples/PaddleX/TimesNet/subgraph_10/shape_patches_TimesNet_ad/input_meta.py @@ -0,0 +1,9 @@ +class Program_weight_tensor_data_0: + name = "data_0" + shape = [16, 32, 1, 96] + dtype = "float32" + min_val = float("-10.2965") + max_val = float("14.1976") + mean = float("0.342926") + std = float("1.27794") + data = None diff --git a/paddle_samples/PaddleX/TimesNet_ad/subgraph_11/weight_meta.py b/paddle_samples/PaddleX/TimesNet/subgraph_10/shape_patches_TimesNet_ad/weight_meta.py similarity index 71% rename from paddle_samples/PaddleX/TimesNet_ad/subgraph_11/weight_meta.py rename to paddle_samples/PaddleX/TimesNet/subgraph_10/shape_patches_TimesNet_ad/weight_meta.py index 5fd23c522..00a1adb0c 100644 --- a/paddle_samples/PaddleX/TimesNet_ad/subgraph_11/weight_meta.py +++ b/paddle_samples/PaddleX/TimesNet/subgraph_10/shape_patches_TimesNet_ad/weight_meta.py @@ -11,10 +11,10 @@ class Program_weight_tensor_parameter_1: name = "parameter_1" shape = [32, 64, 11, 11] dtype = "float32" - min_val = float("-0.171178") - max_val = float("0.28641") - mean = float("0.000138819") - std = float("0.0208469") + min_val = float("-0.378676") + max_val = float("0.365137") + mean = float("-0.000197318") + std = float("0.0303417") data = None @@ -31,10 +31,10 @@ class Program_weight_tensor_parameter_3: name = "parameter_3" shape = [32, 64, 9, 9] dtype = "float32" - min_val = float("-0.155314") - max_val = float("0.280429") - mean = float("0.000198439") - std = float("0.0244335") + min_val = float("-0.400279") + max_val = float("0.379996") + mean = float("-0.000243122") + std = float("0.0345614") data = None @@ -51,10 +51,10 @@ class Program_weight_tensor_parameter_5: name = "parameter_5" shape = [32, 64, 7, 7] dtype = "float32" - min_val = float("-0.167556") - max_val = float("0.280153") - mean = float("0.000259141") - std = float("0.0301798") + min_val = float("-0.411308") + max_val = float("0.37641") + mean = float("-8.07878e-05") + std = float("0.0410653") data = None @@ -71,10 +71,10 @@ class Program_weight_tensor_parameter_7: name = "parameter_7" shape = [32, 64, 5, 5] dtype = "float32" - min_val = float("-0.169913") - max_val = float("0.283049") - mean = float("0.000434627") - std = float("0.0407218") + min_val = float("-0.37956") + max_val = float("0.382445") + mean = float("-0.000167806") + std = float("0.0525399") data = None @@ -91,10 +91,10 @@ class Program_weight_tensor_parameter_9: name = "parameter_9" shape = [32, 64, 3, 3] dtype = "float32" - min_val = float("-0.246544") - max_val = float("0.304018") - mean = float("0.00110634") - std = float("0.0640675") + min_val = float("-0.43568") + max_val = float("0.373109") + mean = float("0.00107606") + std = float("0.0743243") data = None @@ -111,10 +111,10 @@ class Program_weight_tensor_parameter_11: name = "parameter_11" shape = [32, 64, 1, 1] dtype = "float32" - min_val = float("-0.612299") - max_val = float("0.645148") - mean = float("-0.00132418") - std = float("0.186337") + min_val = float("-0.606749") + max_val = float("0.716587") + mean = float("0.00986633") + std = float("0.187626") data = None @@ -131,10 +131,10 @@ class Program_weight_tensor_parameter_13: name = "parameter_13" shape = [64, 32, 11, 11] dtype = "float32" - min_val = float("-0.397206") - max_val = float("0.384341") - mean = float("0.000531751") - std = float("0.0285274") + min_val = float("-0.705811") + max_val = float("0.38403") + mean = float("-0.00554413") + std = float("0.0356425") data = None @@ -151,10 +151,10 @@ class Program_weight_tensor_parameter_15: name = "parameter_15" shape = [64, 32, 9, 9] dtype = "float32" - min_val = float("-0.353936") - max_val = float("0.406931") - mean = float("0.000740455") - std = float("0.0337338") + min_val = float("-0.771232") + max_val = float("0.366518") + mean = float("-0.0068774") + std = float("0.0408845") data = None @@ -171,10 +171,10 @@ class Program_weight_tensor_parameter_17: name = "parameter_17" shape = [64, 32, 7, 7] dtype = "float32" - min_val = float("-0.347076") - max_val = float("0.378609") - mean = float("0.000666987") - std = float("0.0419889") + min_val = float("-0.695085") + max_val = float("0.354321") + mean = float("-0.00856062") + std = float("0.0491656") data = None @@ -191,10 +191,10 @@ class Program_weight_tensor_parameter_19: name = "parameter_19" shape = [64, 32, 5, 5] dtype = "float32" - min_val = float("-0.398976") - max_val = float("0.349175") - mean = float("0.00116756") - std = float("0.0565579") + min_val = float("-0.730604") + max_val = float("0.393171") + mean = float("-0.0118981") + std = float("0.0648549") data = None @@ -211,10 +211,10 @@ class Program_weight_tensor_parameter_21: name = "parameter_21" shape = [64, 32, 3, 3] dtype = "float32" - min_val = float("-0.498519") - max_val = float("0.47824") - mean = float("0.00144987") - std = float("0.0900388") + min_val = float("-0.728807") + max_val = float("0.600694") + mean = float("-0.0154007") + std = float("0.0968162") data = None @@ -231,8 +231,8 @@ class Program_weight_tensor_parameter_23: name = "parameter_23" shape = [64, 32, 1, 1] dtype = "float32" - min_val = float("-0.766112") - max_val = float("0.852336") - mean = float("-0.00652652") - std = float("0.263183") + min_val = float("-0.938968") + max_val = float("0.799272") + mean = float("-0.0267053") + std = float("0.263792") data = None diff --git a/paddle_samples/PaddleX/TimesNet_ad/subgraph_12/input_meta.py b/paddle_samples/PaddleX/TimesNet/subgraph_11/shape_patches_TimesNet_ad/input_meta.py similarity index 100% rename from paddle_samples/PaddleX/TimesNet_ad/subgraph_12/input_meta.py rename to paddle_samples/PaddleX/TimesNet/subgraph_11/shape_patches_TimesNet_ad/input_meta.py diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus-L/subgraph_10/weight_meta.py b/paddle_samples/PaddleX/TimesNet/subgraph_11/shape_patches_TimesNet_ad/weight_meta.py similarity index 100% rename from paddle_samples/PaddleX/PP-YOLOE_plus-L/subgraph_10/weight_meta.py rename to paddle_samples/PaddleX/TimesNet/subgraph_11/shape_patches_TimesNet_ad/weight_meta.py diff --git a/paddle_samples/PaddleX/TimesNet_ad/subgraph_10/input_meta.py b/paddle_samples/PaddleX/TimesNet/subgraph_5/shape_patches_TimesNet_ad/input_meta.py similarity index 100% rename from paddle_samples/PaddleX/TimesNet_ad/subgraph_10/input_meta.py rename to paddle_samples/PaddleX/TimesNet/subgraph_5/shape_patches_TimesNet_ad/input_meta.py diff --git a/paddle_samples/PaddleX/PP-YOLOE_plus-L/subgraph_13/weight_meta.py b/paddle_samples/PaddleX/TimesNet/subgraph_5/shape_patches_TimesNet_ad/weight_meta.py similarity index 100% rename from paddle_samples/PaddleX/PP-YOLOE_plus-L/subgraph_13/weight_meta.py rename to paddle_samples/PaddleX/TimesNet/subgraph_5/shape_patches_TimesNet_ad/weight_meta.py diff --git a/paddle_samples/PaddleX/TimesNet_ad/subgraph_10/graph_hash.txt b/paddle_samples/PaddleX/TimesNet_ad/subgraph_10/graph_hash.txt deleted file mode 100644 index ca2e06a53..000000000 --- a/paddle_samples/PaddleX/TimesNet_ad/subgraph_10/graph_hash.txt +++ /dev/null @@ -1 +0,0 @@ -f6b0e12402c532fc463303fcca490174c52b7807942b1dd2b2476c67c8d00bb1 \ No newline at end of file diff --git a/paddle_samples/PaddleX/TimesNet_ad/subgraph_10/graph_net.json b/paddle_samples/PaddleX/TimesNet_ad/subgraph_10/graph_net.json deleted file mode 100644 index 026782ffa..000000000 --- a/paddle_samples/PaddleX/TimesNet_ad/subgraph_10/graph_net.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "framework": "paddle", - "model_name": "TimesNet_ad", - "num_devices_required": 1, - "num_nodes_required": 1 -} \ No newline at end of file diff --git a/paddle_samples/PaddleX/TimesNet_ad/subgraph_10/model.py b/paddle_samples/PaddleX/TimesNet_ad/subgraph_10/model.py deleted file mode 100644 index 59c3e41f0..000000000 --- a/paddle_samples/PaddleX/TimesNet_ad/subgraph_10/model.py +++ /dev/null @@ -1,37 +0,0 @@ -import paddle - - -class GraphModule(paddle.nn.Layer): - def __init__(self): - super().__init__() - - def forward(self, data_0, data_1): - # pd_op.full_int_array: (1xi64) <- () - full_int_array_0 = [0] - - # pd_op.full_int_array: (1xi64) <- () - full_int_array_1 = [1] - - # pd_op.slice: (xi32) <- (3xi32, 1xi64, 1xi64) - slice_0 = paddle._C_ops.slice( - data_0, [0], full_int_array_0, full_int_array_1, [1], [0] - ) - del data_0, full_int_array_0, full_int_array_1 - - # pd_op.cast: (xi64) <- (xi32) - cast_0 = paddle._C_ops.cast(slice_0, paddle.int64) - - # pd_op.remainder: (xi64) <- (xi64, xi64) - remainder_0 = paddle._C_ops.remainder(data_1, cast_0) - del cast_0, data_1 - - # pd_op.full: (xi64) <- () - full_0 = paddle._C_ops.full( - [], float("0"), paddle.int64, paddle.framework._current_expected_place() - ) - - # pd_op.not_equal: (xb) <- (xi64, xi64) - not_equal_0 = paddle._C_ops.not_equal(remainder_0, full_0) - del full_0, remainder_0, slice_0 - - return not_equal_0 diff --git a/paddle_samples/PaddleX/TimesNet_ad/subgraph_10/weight_meta.py b/paddle_samples/PaddleX/TimesNet_ad/subgraph_10/weight_meta.py deleted file mode 100644 index 8b1378917..000000000 --- a/paddle_samples/PaddleX/TimesNet_ad/subgraph_10/weight_meta.py +++ /dev/null @@ -1 +0,0 @@ - diff --git a/paddle_samples/PaddleX/TimesNet_ad/subgraph_11/graph_hash.txt b/paddle_samples/PaddleX/TimesNet_ad/subgraph_11/graph_hash.txt deleted file mode 100644 index a969e5089..000000000 --- a/paddle_samples/PaddleX/TimesNet_ad/subgraph_11/graph_hash.txt +++ /dev/null @@ -1 +0,0 @@ -25c8345fc2b36e75b1aaf987fe048c9788132c20411101c06ace6f50aebe689e \ No newline at end of file diff --git a/paddle_samples/PaddleX/TimesNet_ad/subgraph_11/graph_net.json b/paddle_samples/PaddleX/TimesNet_ad/subgraph_11/graph_net.json deleted file mode 100644 index 026782ffa..000000000 --- a/paddle_samples/PaddleX/TimesNet_ad/subgraph_11/graph_net.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "framework": "paddle", - "model_name": "TimesNet_ad", - "num_devices_required": 1, - "num_nodes_required": 1 -} \ No newline at end of file diff --git a/paddle_samples/PaddleX/TimesNet_ad/subgraph_11/input_meta.py b/paddle_samples/PaddleX/TimesNet_ad/subgraph_11/input_meta.py deleted file mode 100644 index 91524202f..000000000 --- a/paddle_samples/PaddleX/TimesNet_ad/subgraph_11/input_meta.py +++ /dev/null @@ -1,37 +0,0 @@ -class Program_weight_tensor_data_0: - name = "data_0" - shape = [] - dtype = "int64" - data = [2] - - -class Program_weight_tensor_data_1: - name = "data_1" - shape = [] - dtype = "int64" - data = [2] - - -class Program_weight_tensor_data_2: - name = "data_2" - shape = [] - dtype = "int32" - data = [19] - - -class Program_weight_tensor_data_3: - name = "data_3" - shape = [2, 96, 32] - dtype = "float32" - min_val = float("-2.85988") - max_val = float("4.34883") - mean = float("0.0198675") - std = float("1.02037") - data = None - - -class Program_weight_tensor_data_4: - name = "data_4" - shape = [] - dtype = "int64" - data = [96] diff --git a/paddle_samples/PaddleX/TimesNet_ad/subgraph_11/model.py b/paddle_samples/PaddleX/TimesNet_ad/subgraph_11/model.py deleted file mode 100644 index c8f6ecea3..000000000 --- a/paddle_samples/PaddleX/TimesNet_ad/subgraph_11/model.py +++ /dev/null @@ -1,401 +0,0 @@ -import paddle - - -class GraphModule(paddle.nn.Layer): - def __init__(self): - super().__init__() - - def forward( - self, - parameter_0, - parameter_1, - parameter_2, - parameter_3, - parameter_4, - parameter_5, - parameter_6, - parameter_7, - parameter_8, - parameter_9, - parameter_10, - parameter_11, - parameter_12, - parameter_13, - parameter_14, - parameter_15, - parameter_16, - parameter_17, - parameter_18, - parameter_19, - parameter_20, - parameter_21, - parameter_22, - parameter_23, - data_0, - data_1, - data_2, - data_3, - data_4, - ): - # pd_op.cast: (xi64) <- (xi32) - cast_0 = paddle._C_ops.cast(data_2, paddle.int64) - del data_2 - - # pd_op.floor_divide: (xi64) <- (xi64, xi64) - floor_divide_0 = paddle._C_ops.floor_divide(data_4, cast_0) - - # pd_op.full: (1xf32) <- () - full_0 = paddle._C_ops.full( - [1], float("1"), paddle.float32, paddle.core.CPUPlace() - ) - - # pd_op.scale: (xi64) <- (xi64, 1xf32) - scale_0 = paddle._C_ops.scale(floor_divide_0, full_0, float("1"), True) - del floor_divide_0, full_0 - - # pd_op.multiply: (xi64) <- (xi64, xi64) - multiply_0 = paddle._C_ops.multiply(scale_0, cast_0) - del scale_0 - - # pd_op.subtract: (xi64) <- (xi64, xi64) - subtract_0 = paddle._C_ops.subtract(multiply_0, data_4) - del data_4 - - # pd_op.full: (xi64) <- () - full_1 = paddle._C_ops.full( - [], float("32"), paddle.int64, paddle.core.CPUPlace() - ) - - # builtin.combine: ([xi64, xi64, xi64]) <- (xi64, xi64, xi64) - combine_0 = [data_1, subtract_0, full_1] - del data_1, subtract_0 - - # pd_op.stack: (3xi64) <- ([xi64, xi64, xi64]) - stack_0 = paddle._C_ops.stack(combine_0, 0) - del combine_0 - - # pd_op.full: (1xf32) <- () - full_2 = paddle._C_ops.full( - [1], float("0"), paddle.float32, paddle.core.CPUPlace() - ) - - # pd_op.full_with_tensor: (-1x-1x32xf32) <- (1xf32, 3xi64) - full_with_tensor_0 = paddle._C_ops.full_with_tensor( - full_2, stack_0, paddle.float32 - ) - del full_2, stack_0 - - # pd_op.cast: (-1x96x32xf32) <- (-1x96x32xf32) - cast_1 = paddle._C_ops.cast(data_3, paddle.float32) - del data_3 - - # pd_op.cast: (-1x-1x32xf32) <- (-1x-1x32xf32) - cast_2 = paddle._C_ops.cast(full_with_tensor_0, paddle.float32) - - # pd_op.full: (1xi32) <- () - full_3 = paddle._C_ops.full( - [1], float("1"), paddle.int32, paddle.core.CPUPlace() - ) - - # builtin.combine: ([-1x96x32xf32, -1x-1x32xf32]) <- (-1x96x32xf32, -1x-1x32xf32) - combine_1 = [cast_1, cast_2] - - # pd_op.concat: (-1x-1x32xf32) <- ([-1x96x32xf32, -1x-1x32xf32], 1xi32) - concat_0 = paddle._C_ops.concat(combine_1, full_3) - del combine_1 - - # pd_op.floor_divide: (xi64) <- (xi64, xi64) - floor_divide_1 = paddle._C_ops.floor_divide(multiply_0, cast_0) - - # builtin.combine: ([xi64, xi64, xi64, xi64]) <- (xi64, xi64, xi64, xi64) - combine_2 = [data_0, floor_divide_1, cast_0, full_1] - del cast_0, floor_divide_1 - - # pd_op.stack: (4xi64) <- ([xi64, xi64, xi64, xi64]) - stack_1 = paddle._C_ops.stack(combine_2, 0) - del combine_2 - - # pd_op.reshape: (-1x-1x-1x32xf32) <- (-1x-1x32xf32, 4xi64) - reshape_0 = paddle._C_ops.reshape(concat_0, stack_1) - del stack_1 - - # pd_op.transpose: (-1x32x-1x-1xf32) <- (-1x-1x-1x32xf32) - transpose_0 = paddle._C_ops.transpose(reshape_0, [0, 3, 1, 2]) - del reshape_0 - - # pd_op.conv2d: (-1x64x-1x-1xf32) <- (-1x32x-1x-1xf32, 64x32x1x1xf32) - conv2d_0 = paddle._C_ops.conv2d( - transpose_0, parameter_23, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_23 - - # pd_op.full_int_array: (4xi64) <- () - full_int_array_0 = [1, -1, 1, 1] - - # pd_op.reshape: (1x64x1x1xf32) <- (64xf32, 4xi64) - reshape_1 = paddle._C_ops.reshape(parameter_22, full_int_array_0) - del parameter_22 - - # pd_op.add: (-1x64x-1x-1xf32) <- (-1x64x-1x-1xf32, 1x64x1x1xf32) - add_0 = paddle._C_ops.add(conv2d_0, reshape_1) - - # pd_op.conv2d: (-1x64x-1x-1xf32) <- (-1x32x-1x-1xf32, 64x32x3x3xf32) - conv2d_1 = paddle._C_ops.conv2d( - transpose_0, parameter_21, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_21 - - # pd_op.reshape: (1x64x1x1xf32) <- (64xf32, 4xi64) - reshape_2 = paddle._C_ops.reshape(parameter_20, full_int_array_0) - del parameter_20 - - # pd_op.add: (-1x64x-1x-1xf32) <- (-1x64x-1x-1xf32, 1x64x1x1xf32) - add_1 = paddle._C_ops.add(conv2d_1, reshape_2) - - # pd_op.conv2d: (-1x64x-1x-1xf32) <- (-1x32x-1x-1xf32, 64x32x5x5xf32) - conv2d_2 = paddle._C_ops.conv2d( - transpose_0, parameter_19, [1, 1], [2, 2], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_19 - - # pd_op.reshape: (1x64x1x1xf32) <- (64xf32, 4xi64) - reshape_3 = paddle._C_ops.reshape(parameter_18, full_int_array_0) - del parameter_18 - - # pd_op.add: (-1x64x-1x-1xf32) <- (-1x64x-1x-1xf32, 1x64x1x1xf32) - add_2 = paddle._C_ops.add(conv2d_2, reshape_3) - - # pd_op.conv2d: (-1x64x-1x-1xf32) <- (-1x32x-1x-1xf32, 64x32x7x7xf32) - conv2d_3 = paddle._C_ops.conv2d( - transpose_0, parameter_17, [1, 1], [3, 3], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_17 - - # pd_op.reshape: (1x64x1x1xf32) <- (64xf32, 4xi64) - reshape_4 = paddle._C_ops.reshape(parameter_16, full_int_array_0) - del parameter_16 - - # pd_op.add: (-1x64x-1x-1xf32) <- (-1x64x-1x-1xf32, 1x64x1x1xf32) - add_3 = paddle._C_ops.add(conv2d_3, reshape_4) - - # pd_op.conv2d: (-1x64x-1x-1xf32) <- (-1x32x-1x-1xf32, 64x32x9x9xf32) - conv2d_4 = paddle._C_ops.conv2d( - transpose_0, parameter_15, [1, 1], [4, 4], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_15 - - # pd_op.reshape: (1x64x1x1xf32) <- (64xf32, 4xi64) - reshape_5 = paddle._C_ops.reshape(parameter_14, full_int_array_0) - del parameter_14 - - # pd_op.add: (-1x64x-1x-1xf32) <- (-1x64x-1x-1xf32, 1x64x1x1xf32) - add_4 = paddle._C_ops.add(conv2d_4, reshape_5) - - # pd_op.conv2d: (-1x64x-1x-1xf32) <- (-1x32x-1x-1xf32, 64x32x11x11xf32) - conv2d_5 = paddle._C_ops.conv2d( - transpose_0, parameter_13, [1, 1], [5, 5], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_13 - - # pd_op.reshape: (1x64x1x1xf32) <- (64xf32, 4xi64) - reshape_6 = paddle._C_ops.reshape(parameter_12, full_int_array_0) - del parameter_12 - - # pd_op.add: (-1x64x-1x-1xf32) <- (-1x64x-1x-1xf32, 1x64x1x1xf32) - add_5 = paddle._C_ops.add(conv2d_5, reshape_6) - - # builtin.combine: ([-1x64x-1x-1xf32, -1x64x-1x-1xf32, -1x64x-1x-1xf32, -1x64x-1x-1xf32, -1x64x-1x-1xf32, -1x64x-1x-1xf32]) <- (-1x64x-1x-1xf32, -1x64x-1x-1xf32, -1x64x-1x-1xf32, -1x64x-1x-1xf32, -1x64x-1x-1xf32, -1x64x-1x-1xf32) - combine_3 = [add_0, add_1, add_2, add_3, add_4, add_5] - - # pd_op.stack: (-1x64x-1x-1x6xf32) <- ([-1x64x-1x-1xf32, -1x64x-1x-1xf32, -1x64x-1x-1xf32, -1x64x-1x-1xf32, -1x64x-1x-1xf32, -1x64x-1x-1xf32]) - stack_2 = paddle._C_ops.stack(combine_3, -1) - del combine_3 - - # pd_op.full_int_array: (1xi64) <- () - full_int_array_1 = [-1] - - # pd_op.assign: (1xi64) <- (1xi64) - assign_0 = full_int_array_1 - - # pd_op.mean: (-1x64x-1x-1xf32) <- (-1x64x-1x-1x6xf32, 1xi64) - mean_0 = paddle._C_ops.mean(stack_2, full_int_array_1, False) - - # pd_op.gelu: (-1x64x-1x-1xf32) <- (-1x64x-1x-1xf32) - gelu_0 = paddle._C_ops.gelu(mean_0, False) - - # pd_op.conv2d: (-1x32x-1x-1xf32) <- (-1x64x-1x-1xf32, 32x64x1x1xf32) - conv2d_6 = paddle._C_ops.conv2d( - gelu_0, parameter_11, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_11 - - # pd_op.reshape: (1x32x1x1xf32) <- (32xf32, 4xi64) - reshape_7 = paddle._C_ops.reshape(parameter_10, full_int_array_0) - del parameter_10 - - # pd_op.add: (-1x32x-1x-1xf32) <- (-1x32x-1x-1xf32, 1x32x1x1xf32) - add_6 = paddle._C_ops.add(conv2d_6, reshape_7) - - # pd_op.conv2d: (-1x32x-1x-1xf32) <- (-1x64x-1x-1xf32, 32x64x3x3xf32) - conv2d_7 = paddle._C_ops.conv2d( - gelu_0, parameter_9, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_9 - - # pd_op.reshape: (1x32x1x1xf32) <- (32xf32, 4xi64) - reshape_8 = paddle._C_ops.reshape(parameter_8, full_int_array_0) - del parameter_8 - - # pd_op.add: (-1x32x-1x-1xf32) <- (-1x32x-1x-1xf32, 1x32x1x1xf32) - add_7 = paddle._C_ops.add(conv2d_7, reshape_8) - - # pd_op.conv2d: (-1x32x-1x-1xf32) <- (-1x64x-1x-1xf32, 32x64x5x5xf32) - conv2d_8 = paddle._C_ops.conv2d( - gelu_0, parameter_7, [1, 1], [2, 2], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_7 - - # pd_op.reshape: (1x32x1x1xf32) <- (32xf32, 4xi64) - reshape_9 = paddle._C_ops.reshape(parameter_6, full_int_array_0) - del parameter_6 - - # pd_op.add: (-1x32x-1x-1xf32) <- (-1x32x-1x-1xf32, 1x32x1x1xf32) - add_8 = paddle._C_ops.add(conv2d_8, reshape_9) - - # pd_op.conv2d: (-1x32x-1x-1xf32) <- (-1x64x-1x-1xf32, 32x64x7x7xf32) - conv2d_9 = paddle._C_ops.conv2d( - gelu_0, parameter_5, [1, 1], [3, 3], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_5 - - # pd_op.reshape: (1x32x1x1xf32) <- (32xf32, 4xi64) - reshape_10 = paddle._C_ops.reshape(parameter_4, full_int_array_0) - del parameter_4 - - # pd_op.add: (-1x32x-1x-1xf32) <- (-1x32x-1x-1xf32, 1x32x1x1xf32) - add_9 = paddle._C_ops.add(conv2d_9, reshape_10) - - # pd_op.conv2d: (-1x32x-1x-1xf32) <- (-1x64x-1x-1xf32, 32x64x9x9xf32) - conv2d_10 = paddle._C_ops.conv2d( - gelu_0, parameter_3, [1, 1], [4, 4], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_3 - - # pd_op.reshape: (1x32x1x1xf32) <- (32xf32, 4xi64) - reshape_11 = paddle._C_ops.reshape(parameter_2, full_int_array_0) - del parameter_2 - - # pd_op.add: (-1x32x-1x-1xf32) <- (-1x32x-1x-1xf32, 1x32x1x1xf32) - add_10 = paddle._C_ops.add(conv2d_10, reshape_11) - - # pd_op.conv2d: (-1x32x-1x-1xf32) <- (-1x64x-1x-1xf32, 32x64x11x11xf32) - conv2d_11 = paddle._C_ops.conv2d( - gelu_0, parameter_1, [1, 1], [5, 5], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_1 - - # pd_op.reshape: (1x32x1x1xf32) <- (32xf32, 4xi64) - reshape_12 = paddle._C_ops.reshape(parameter_0, full_int_array_0) - del full_int_array_0, parameter_0 - - # pd_op.add: (-1x32x-1x-1xf32) <- (-1x32x-1x-1xf32, 1x32x1x1xf32) - add_11 = paddle._C_ops.add(conv2d_11, reshape_12) - - # builtin.combine: ([-1x32x-1x-1xf32, -1x32x-1x-1xf32, -1x32x-1x-1xf32, -1x32x-1x-1xf32, -1x32x-1x-1xf32, -1x32x-1x-1xf32]) <- (-1x32x-1x-1xf32, -1x32x-1x-1xf32, -1x32x-1x-1xf32, -1x32x-1x-1xf32, -1x32x-1x-1xf32, -1x32x-1x-1xf32) - combine_4 = [add_6, add_7, add_8, add_9, add_10, add_11] - - # pd_op.stack: (-1x32x-1x-1x6xf32) <- ([-1x32x-1x-1xf32, -1x32x-1x-1xf32, -1x32x-1x-1xf32, -1x32x-1x-1xf32, -1x32x-1x-1xf32, -1x32x-1x-1xf32]) - stack_3 = paddle._C_ops.stack(combine_4, -1) - del combine_4 - - # pd_op.mean: (-1x32x-1x-1xf32) <- (-1x32x-1x-1x6xf32, 1xi64) - mean_1 = paddle._C_ops.mean(stack_3, full_int_array_1, False) - - # pd_op.transpose: (-1x-1x-1x32xf32) <- (-1x32x-1x-1xf32) - transpose_1 = paddle._C_ops.transpose(mean_1, [0, 2, 3, 1]) - del mean_1 - - # pd_op.full: (xi64) <- () - full_4 = paddle._C_ops.full( - [], float("-1"), paddle.int64, paddle.core.CPUPlace() - ) - - # builtin.combine: ([xi64, xi64, xi64]) <- (xi64, xi64, xi64) - combine_5 = [data_0, full_4, full_1] - del data_0, full_1, full_4 - - # pd_op.stack: (3xi64) <- ([xi64, xi64, xi64]) - stack_4 = paddle._C_ops.stack(combine_5, 0) - del combine_5 - - # pd_op.reshape: (-1x-1x32xf32) <- (-1x-1x-1x32xf32, 3xi64) - reshape_13 = paddle._C_ops.reshape(transpose_1, stack_4) - del stack_4 - - # pd_op.full_int_array: (1xi64) <- () - full_int_array_2 = [0] - - # pd_op.full_int_array: (1xi64) <- () - full_int_array_3 = [96] - - # pd_op.slice: (-1x-1x32xf32) <- (-1x-1x32xf32, 1xi64, 1xi64) - slice_0 = paddle._C_ops.slice( - reshape_13, [1], full_int_array_2, full_int_array_3, [1], [] - ) - del ( - add_0, - add_1, - add_10, - add_11, - add_2, - add_3, - add_4, - add_5, - add_6, - add_7, - add_8, - add_9, - assign_0, - cast_1, - cast_2, - concat_0, - conv2d_0, - conv2d_1, - conv2d_10, - conv2d_11, - conv2d_2, - conv2d_3, - conv2d_4, - conv2d_5, - conv2d_6, - conv2d_7, - conv2d_8, - conv2d_9, - full_3, - full_int_array_1, - full_int_array_2, - full_int_array_3, - full_with_tensor_0, - gelu_0, - mean_0, - multiply_0, - reshape_1, - reshape_10, - reshape_11, - reshape_12, - reshape_13, - reshape_2, - reshape_3, - reshape_4, - reshape_5, - reshape_6, - reshape_7, - reshape_8, - reshape_9, - stack_2, - stack_3, - transpose_0, - transpose_1, - ) - - return slice_0 diff --git a/paddle_samples/PaddleX/TimesNet_ad/subgraph_12/graph_hash.txt b/paddle_samples/PaddleX/TimesNet_ad/subgraph_12/graph_hash.txt deleted file mode 100644 index de0a7a5ae..000000000 --- a/paddle_samples/PaddleX/TimesNet_ad/subgraph_12/graph_hash.txt +++ /dev/null @@ -1 +0,0 @@ -8da99a4a821e3b37fa37693ad690251149c108e684abe721d88d088c4b17168b \ No newline at end of file diff --git a/paddle_samples/PaddleX/TimesNet_ad/subgraph_12/graph_net.json b/paddle_samples/PaddleX/TimesNet_ad/subgraph_12/graph_net.json deleted file mode 100644 index 026782ffa..000000000 --- a/paddle_samples/PaddleX/TimesNet_ad/subgraph_12/graph_net.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "framework": "paddle", - "model_name": "TimesNet_ad", - "num_devices_required": 1, - "num_nodes_required": 1 -} \ No newline at end of file diff --git a/paddle_samples/PaddleX/TimesNet_ad/subgraph_12/model.py b/paddle_samples/PaddleX/TimesNet_ad/subgraph_12/model.py deleted file mode 100644 index cd8f1c0dd..000000000 --- a/paddle_samples/PaddleX/TimesNet_ad/subgraph_12/model.py +++ /dev/null @@ -1,54 +0,0 @@ -import paddle - - -class GraphModule(paddle.nn.Layer): - def __init__(self): - super().__init__() - - def forward(self, data_0, data_1, data_2): - # pd_op.full: (1xf32) <- () - full_0 = paddle._C_ops.full( - [1], float("1"), paddle.float32, paddle.core.CPUPlace() - ) - - # pd_op.scale: (xi64) <- (xi64, 1xf32) - scale_0 = paddle._C_ops.scale(data_0, full_0, float("1"), True) - del full_0 - - # builtin.combine: ([xi64]) <- (xi64) - combine_0 = [data_0] - del data_0 - - # pd_op.stack: (1xi64) <- ([xi64]) - stack_0 = paddle._C_ops.stack(combine_0, 0) - del combine_0 - - # builtin.combine: ([xi64]) <- (xi64) - combine_1 = [scale_0] - del scale_0 - - # pd_op.stack: (1xi64) <- ([xi64]) - stack_1 = paddle._C_ops.stack(combine_1, 0) - del combine_1 - - # pd_op.slice: (xi32) <- (3xi32, 1xi64, 1xi64) - slice_0 = paddle._C_ops.slice(data_1, [0], stack_0, stack_1, [-1], [0]) - del data_1, stack_0, stack_1 - - # pd_op.cast: (xi64) <- (xi32) - cast_0 = paddle._C_ops.cast(slice_0, paddle.int64) - - # pd_op.remainder: (xi64) <- (xi64, xi64) - remainder_0 = paddle._C_ops.remainder(data_2, cast_0) - del cast_0, data_2 - - # pd_op.full: (xi64) <- () - full_1 = paddle._C_ops.full( - [], float("0"), paddle.int64, paddle.framework._current_expected_place() - ) - - # pd_op.not_equal: (xb) <- (xi64, xi64) - not_equal_0 = paddle._C_ops.not_equal(remainder_0, full_1) - del full_1, remainder_0, slice_0 - - return not_equal_0 diff --git a/paddle_samples/PaddleX/TimesNet_ad/subgraph_12/weight_meta.py b/paddle_samples/PaddleX/TimesNet_ad/subgraph_12/weight_meta.py deleted file mode 100644 index 8b1378917..000000000 --- a/paddle_samples/PaddleX/TimesNet_ad/subgraph_12/weight_meta.py +++ /dev/null @@ -1 +0,0 @@ - diff --git a/paddle_samples/PaddleX/TimesNet_ad/subgraph_3/graph_hash.txt b/paddle_samples/PaddleX/TimesNet_ad/subgraph_3/graph_hash.txt index d278dca7a..7bb3cbfdc 100644 --- a/paddle_samples/PaddleX/TimesNet_ad/subgraph_3/graph_hash.txt +++ b/paddle_samples/PaddleX/TimesNet_ad/subgraph_3/graph_hash.txt @@ -1 +1 @@ -588d2cbe393fcc4e9b6203711b7f255ed90b5473e89288d65921f4e33ed636d6 \ No newline at end of file +aa5aaf97436574463d1b03785cb5be47dabceded3443c4667332727c0d99c9e5 \ No newline at end of file diff --git a/paddle_samples/PaddleX/TimesNet_ad/subgraph_3/input_meta.py b/paddle_samples/PaddleX/TimesNet_ad/subgraph_3/input_meta.py index d10b7338b..6db6ec575 100644 --- a/paddle_samples/PaddleX/TimesNet_ad/subgraph_3/input_meta.py +++ b/paddle_samples/PaddleX/TimesNet_ad/subgraph_3/input_meta.py @@ -1,9 +1,37 @@ class Program_weight_tensor_data_0: name = "data_0" - shape = [16, 32, 1, 96] + shape = [] + dtype = "int64" + data = [16] + + +class Program_weight_tensor_data_1: + name = "data_1" + shape = [] + dtype = "int64" + data = [16] + + +class Program_weight_tensor_data_2: + name = "data_2" + shape = [] + dtype = "int32" + data = [19] + + +class Program_weight_tensor_data_3: + name = "data_3" + shape = [16, 96, 32] dtype = "float32" - min_val = float("-10.2965") - max_val = float("14.1976") - mean = float("0.342926") - std = float("1.27794") + min_val = float("-3.32402") + max_val = float("4.62089") + mean = float("0.031122") + std = float("1.015") data = None + + +class Program_weight_tensor_data_4: + name = "data_4" + shape = [] + dtype = "int64" + data = [96] diff --git a/paddle_samples/PaddleX/TimesNet_ad/subgraph_3/model.py b/paddle_samples/PaddleX/TimesNet_ad/subgraph_3/model.py index 0dfe809a1..e9e757f97 100644 --- a/paddle_samples/PaddleX/TimesNet_ad/subgraph_3/model.py +++ b/paddle_samples/PaddleX/TimesNet_ad/subgraph_3/model.py @@ -32,10 +32,101 @@ def forward( parameter_22, parameter_23, data_0, + data_1, + data_2, + data_3, + data_4, ): - # pd_op.conv2d: (16x64x1x96xf32) <- (16x32x1x96xf32, 64x32x1x1xf32) + # pd_op.cast: (xi64) <- (xi32) + cast_0 = paddle._C_ops.cast(data_2, paddle.int64) + del data_2 + + # pd_op.floor_divide: (xi64) <- (xi64, xi64) + floor_divide_0 = paddle._C_ops.floor_divide(data_4, cast_0) + + # pd_op.full: (1xf32) <- () + full_0 = paddle._C_ops.full( + [1], float("1"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.scale: (xi64) <- (xi64, 1xf32) + scale_0 = paddle._C_ops.scale(floor_divide_0, full_0, float("1"), True) + del floor_divide_0, full_0 + + # pd_op.multiply: (xi64) <- (xi64, xi64) + multiply_0 = paddle._C_ops.multiply(scale_0, cast_0) + del scale_0 + + # pd_op.subtract: (xi64) <- (xi64, xi64) + subtract_0 = paddle._C_ops.subtract(multiply_0, data_4) + del data_4 + + # pd_op.full: (xi64) <- () + full_1 = paddle._C_ops.full( + [], float("32"), paddle.int64, paddle.core.CPUPlace() + ) + + # builtin.combine: ([xi64, xi64, xi64]) <- (xi64, xi64, xi64) + combine_0 = [data_1, subtract_0, full_1] + del data_1, subtract_0 + + # pd_op.stack: (3xi64) <- ([xi64, xi64, xi64]) + stack_0 = paddle._C_ops.stack(combine_0, 0) + del combine_0 + + # pd_op.full: (1xf32) <- () + full_2 = paddle._C_ops.full( + [1], float("0"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.full_with_tensor: (-1x-1x32xf32) <- (1xf32, 3xi64) + full_with_tensor_0 = paddle._C_ops.full_with_tensor( + full_2, stack_0, paddle.float32 + ) + del full_2, stack_0 + + # pd_op.cast: (-1x96x32xf32) <- (-1x96x32xf32) + cast_1 = paddle._C_ops.cast(data_3, paddle.float32) + del data_3 + + # pd_op.cast: (-1x-1x32xf32) <- (-1x-1x32xf32) + cast_2 = paddle._C_ops.cast(full_with_tensor_0, paddle.float32) + + # pd_op.full: (1xi32) <- () + full_3 = paddle._C_ops.full( + [1], float("1"), paddle.int32, paddle.core.CPUPlace() + ) + + # builtin.combine: ([-1x96x32xf32, -1x-1x32xf32]) <- (-1x96x32xf32, -1x-1x32xf32) + combine_1 = [cast_1, cast_2] + del cast_1, cast_2 + + # pd_op.concat: (-1x-1x32xf32) <- ([-1x96x32xf32, -1x-1x32xf32], 1xi32) + concat_0 = paddle._C_ops.concat(combine_1, full_3) + del combine_1, full_3 + + # pd_op.floor_divide: (xi64) <- (xi64, xi64) + floor_divide_1 = paddle._C_ops.floor_divide(multiply_0, cast_0) + + # builtin.combine: ([xi64, xi64, xi64, xi64]) <- (xi64, xi64, xi64, xi64) + combine_2 = [data_0, floor_divide_1, cast_0, full_1] + del cast_0, floor_divide_1 + + # pd_op.stack: (4xi64) <- ([xi64, xi64, xi64, xi64]) + stack_1 = paddle._C_ops.stack(combine_2, 0) + del combine_2 + + # pd_op.reshape: (-1x-1x-1x32xf32) <- (-1x-1x32xf32, 4xi64) + reshape_0 = paddle._C_ops.reshape(concat_0, stack_1) + del concat_0, stack_1 + + # pd_op.transpose: (-1x32x-1x-1xf32) <- (-1x-1x-1x32xf32) + transpose_0 = paddle._C_ops.transpose(reshape_0, [0, 3, 1, 2]) + del reshape_0 + + # pd_op.conv2d: (-1x64x-1x-1xf32) <- (-1x32x-1x-1xf32, 64x32x1x1xf32) conv2d_0 = paddle._C_ops.conv2d( - data_0, parameter_23, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + transpose_0, parameter_23, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" ) del parameter_23 @@ -43,196 +134,235 @@ def forward( full_int_array_0 = [1, -1, 1, 1] # pd_op.reshape: (1x64x1x1xf32) <- (64xf32, 4xi64) - reshape_0 = paddle._C_ops.reshape(parameter_22, full_int_array_0) + reshape_1 = paddle._C_ops.reshape(parameter_22, full_int_array_0) del parameter_22 - # pd_op.add: (16x64x1x96xf32) <- (16x64x1x96xf32, 1x64x1x1xf32) - add_0 = paddle._C_ops.add(conv2d_0, reshape_0) - del conv2d_0, reshape_0 + # pd_op.add: (-1x64x-1x-1xf32) <- (-1x64x-1x-1xf32, 1x64x1x1xf32) + add_0 = paddle._C_ops.add(conv2d_0, reshape_1) + del conv2d_0, reshape_1 - # pd_op.conv2d: (16x64x1x96xf32) <- (16x32x1x96xf32, 64x32x3x3xf32) + # pd_op.conv2d: (-1x64x-1x-1xf32) <- (-1x32x-1x-1xf32, 64x32x3x3xf32) conv2d_1 = paddle._C_ops.conv2d( - data_0, parameter_21, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + transpose_0, parameter_21, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" ) del parameter_21 # pd_op.reshape: (1x64x1x1xf32) <- (64xf32, 4xi64) - reshape_1 = paddle._C_ops.reshape(parameter_20, full_int_array_0) + reshape_2 = paddle._C_ops.reshape(parameter_20, full_int_array_0) del parameter_20 - # pd_op.add: (16x64x1x96xf32) <- (16x64x1x96xf32, 1x64x1x1xf32) - add_1 = paddle._C_ops.add(conv2d_1, reshape_1) - del conv2d_1, reshape_1 + # pd_op.add: (-1x64x-1x-1xf32) <- (-1x64x-1x-1xf32, 1x64x1x1xf32) + add_1 = paddle._C_ops.add(conv2d_1, reshape_2) + del conv2d_1, reshape_2 - # pd_op.conv2d: (16x64x1x96xf32) <- (16x32x1x96xf32, 64x32x5x5xf32) + # pd_op.conv2d: (-1x64x-1x-1xf32) <- (-1x32x-1x-1xf32, 64x32x5x5xf32) conv2d_2 = paddle._C_ops.conv2d( - data_0, parameter_19, [1, 1], [2, 2], "EXPLICIT", [1, 1], 1, "NCHW" + transpose_0, parameter_19, [1, 1], [2, 2], "EXPLICIT", [1, 1], 1, "NCHW" ) del parameter_19 # pd_op.reshape: (1x64x1x1xf32) <- (64xf32, 4xi64) - reshape_2 = paddle._C_ops.reshape(parameter_18, full_int_array_0) + reshape_3 = paddle._C_ops.reshape(parameter_18, full_int_array_0) del parameter_18 - # pd_op.add: (16x64x1x96xf32) <- (16x64x1x96xf32, 1x64x1x1xf32) - add_2 = paddle._C_ops.add(conv2d_2, reshape_2) - del conv2d_2, reshape_2 + # pd_op.add: (-1x64x-1x-1xf32) <- (-1x64x-1x-1xf32, 1x64x1x1xf32) + add_2 = paddle._C_ops.add(conv2d_2, reshape_3) + del conv2d_2, reshape_3 - # pd_op.conv2d: (16x64x1x96xf32) <- (16x32x1x96xf32, 64x32x7x7xf32) + # pd_op.conv2d: (-1x64x-1x-1xf32) <- (-1x32x-1x-1xf32, 64x32x7x7xf32) conv2d_3 = paddle._C_ops.conv2d( - data_0, parameter_17, [1, 1], [3, 3], "EXPLICIT", [1, 1], 1, "NCHW" + transpose_0, parameter_17, [1, 1], [3, 3], "EXPLICIT", [1, 1], 1, "NCHW" ) del parameter_17 # pd_op.reshape: (1x64x1x1xf32) <- (64xf32, 4xi64) - reshape_3 = paddle._C_ops.reshape(parameter_16, full_int_array_0) + reshape_4 = paddle._C_ops.reshape(parameter_16, full_int_array_0) del parameter_16 - # pd_op.add: (16x64x1x96xf32) <- (16x64x1x96xf32, 1x64x1x1xf32) - add_3 = paddle._C_ops.add(conv2d_3, reshape_3) - del conv2d_3, reshape_3 + # pd_op.add: (-1x64x-1x-1xf32) <- (-1x64x-1x-1xf32, 1x64x1x1xf32) + add_3 = paddle._C_ops.add(conv2d_3, reshape_4) + del conv2d_3, reshape_4 - # pd_op.conv2d: (16x64x1x96xf32) <- (16x32x1x96xf32, 64x32x9x9xf32) + # pd_op.conv2d: (-1x64x-1x-1xf32) <- (-1x32x-1x-1xf32, 64x32x9x9xf32) conv2d_4 = paddle._C_ops.conv2d( - data_0, parameter_15, [1, 1], [4, 4], "EXPLICIT", [1, 1], 1, "NCHW" + transpose_0, parameter_15, [1, 1], [4, 4], "EXPLICIT", [1, 1], 1, "NCHW" ) del parameter_15 # pd_op.reshape: (1x64x1x1xf32) <- (64xf32, 4xi64) - reshape_4 = paddle._C_ops.reshape(parameter_14, full_int_array_0) + reshape_5 = paddle._C_ops.reshape(parameter_14, full_int_array_0) del parameter_14 - # pd_op.add: (16x64x1x96xf32) <- (16x64x1x96xf32, 1x64x1x1xf32) - add_4 = paddle._C_ops.add(conv2d_4, reshape_4) - del conv2d_4, reshape_4 + # pd_op.add: (-1x64x-1x-1xf32) <- (-1x64x-1x-1xf32, 1x64x1x1xf32) + add_4 = paddle._C_ops.add(conv2d_4, reshape_5) + del conv2d_4, reshape_5 - # pd_op.conv2d: (16x64x1x96xf32) <- (16x32x1x96xf32, 64x32x11x11xf32) + # pd_op.conv2d: (-1x64x-1x-1xf32) <- (-1x32x-1x-1xf32, 64x32x11x11xf32) conv2d_5 = paddle._C_ops.conv2d( - data_0, parameter_13, [1, 1], [5, 5], "EXPLICIT", [1, 1], 1, "NCHW" + transpose_0, parameter_13, [1, 1], [5, 5], "EXPLICIT", [1, 1], 1, "NCHW" ) - del data_0, parameter_13 + del parameter_13, transpose_0 # pd_op.reshape: (1x64x1x1xf32) <- (64xf32, 4xi64) - reshape_5 = paddle._C_ops.reshape(parameter_12, full_int_array_0) + reshape_6 = paddle._C_ops.reshape(parameter_12, full_int_array_0) del parameter_12 - # pd_op.add: (16x64x1x96xf32) <- (16x64x1x96xf32, 1x64x1x1xf32) - add_5 = paddle._C_ops.add(conv2d_5, reshape_5) - del conv2d_5, reshape_5 + # pd_op.add: (-1x64x-1x-1xf32) <- (-1x64x-1x-1xf32, 1x64x1x1xf32) + add_5 = paddle._C_ops.add(conv2d_5, reshape_6) + del conv2d_5, reshape_6 - # builtin.combine: ([16x64x1x96xf32, 16x64x1x96xf32, 16x64x1x96xf32, 16x64x1x96xf32, 16x64x1x96xf32, 16x64x1x96xf32]) <- (16x64x1x96xf32, 16x64x1x96xf32, 16x64x1x96xf32, 16x64x1x96xf32, 16x64x1x96xf32, 16x64x1x96xf32) - combine_0 = [add_0, add_1, add_2, add_3, add_4, add_5] + # builtin.combine: ([-1x64x-1x-1xf32, -1x64x-1x-1xf32, -1x64x-1x-1xf32, -1x64x-1x-1xf32, -1x64x-1x-1xf32, -1x64x-1x-1xf32]) <- (-1x64x-1x-1xf32, -1x64x-1x-1xf32, -1x64x-1x-1xf32, -1x64x-1x-1xf32, -1x64x-1x-1xf32, -1x64x-1x-1xf32) + combine_3 = [add_0, add_1, add_2, add_3, add_4, add_5] del add_0, add_1, add_2, add_3, add_4, add_5 - # pd_op.stack: (16x64x1x96x6xf32) <- ([16x64x1x96xf32, 16x64x1x96xf32, 16x64x1x96xf32, 16x64x1x96xf32, 16x64x1x96xf32, 16x64x1x96xf32]) - stack_0 = paddle._C_ops.stack(combine_0, -1) - del combine_0 + # pd_op.stack: (-1x64x-1x-1x6xf32) <- ([-1x64x-1x-1xf32, -1x64x-1x-1xf32, -1x64x-1x-1xf32, -1x64x-1x-1xf32, -1x64x-1x-1xf32, -1x64x-1x-1xf32]) + stack_2 = paddle._C_ops.stack(combine_3, -1) + del combine_3 # pd_op.full_int_array: (1xi64) <- () full_int_array_1 = [-1] - # pd_op.mean: (16x64x1x96xf32) <- (16x64x1x96x6xf32, 1xi64) - mean_1 = paddle._C_ops.mean(stack_0, full_int_array_1, False) - del stack_0 + # pd_op.mean: (-1x64x-1x-1xf32) <- (-1x64x-1x-1x6xf32, 1xi64) + mean_0 = paddle._C_ops.mean(stack_2, full_int_array_1, False) + del stack_2 - # pd_op.gelu: (16x64x1x96xf32) <- (16x64x1x96xf32) - gelu_0 = paddle._C_ops.gelu(mean_1, False) - del mean_1 + # pd_op.gelu: (-1x64x-1x-1xf32) <- (-1x64x-1x-1xf32) + gelu_0 = paddle._C_ops.gelu(mean_0, False) + del mean_0 - # pd_op.conv2d: (16x32x1x96xf32) <- (16x64x1x96xf32, 32x64x1x1xf32) + # pd_op.conv2d: (-1x32x-1x-1xf32) <- (-1x64x-1x-1xf32, 32x64x1x1xf32) conv2d_6 = paddle._C_ops.conv2d( gelu_0, parameter_11, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" ) del parameter_11 # pd_op.reshape: (1x32x1x1xf32) <- (32xf32, 4xi64) - reshape_6 = paddle._C_ops.reshape(parameter_10, full_int_array_0) + reshape_7 = paddle._C_ops.reshape(parameter_10, full_int_array_0) del parameter_10 - # pd_op.add: (16x32x1x96xf32) <- (16x32x1x96xf32, 1x32x1x1xf32) - add_6 = paddle._C_ops.add(conv2d_6, reshape_6) - del conv2d_6, reshape_6 + # pd_op.add: (-1x32x-1x-1xf32) <- (-1x32x-1x-1xf32, 1x32x1x1xf32) + add_6 = paddle._C_ops.add(conv2d_6, reshape_7) + del conv2d_6, reshape_7 - # pd_op.conv2d: (16x32x1x96xf32) <- (16x64x1x96xf32, 32x64x3x3xf32) + # pd_op.conv2d: (-1x32x-1x-1xf32) <- (-1x64x-1x-1xf32, 32x64x3x3xf32) conv2d_7 = paddle._C_ops.conv2d( gelu_0, parameter_9, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" ) del parameter_9 # pd_op.reshape: (1x32x1x1xf32) <- (32xf32, 4xi64) - reshape_7 = paddle._C_ops.reshape(parameter_8, full_int_array_0) + reshape_8 = paddle._C_ops.reshape(parameter_8, full_int_array_0) del parameter_8 - # pd_op.add: (16x32x1x96xf32) <- (16x32x1x96xf32, 1x32x1x1xf32) - add_7 = paddle._C_ops.add(conv2d_7, reshape_7) - del conv2d_7, reshape_7 + # pd_op.add: (-1x32x-1x-1xf32) <- (-1x32x-1x-1xf32, 1x32x1x1xf32) + add_7 = paddle._C_ops.add(conv2d_7, reshape_8) + del conv2d_7, reshape_8 - # pd_op.conv2d: (16x32x1x96xf32) <- (16x64x1x96xf32, 32x64x5x5xf32) + # pd_op.conv2d: (-1x32x-1x-1xf32) <- (-1x64x-1x-1xf32, 32x64x5x5xf32) conv2d_8 = paddle._C_ops.conv2d( gelu_0, parameter_7, [1, 1], [2, 2], "EXPLICIT", [1, 1], 1, "NCHW" ) del parameter_7 # pd_op.reshape: (1x32x1x1xf32) <- (32xf32, 4xi64) - reshape_8 = paddle._C_ops.reshape(parameter_6, full_int_array_0) + reshape_9 = paddle._C_ops.reshape(parameter_6, full_int_array_0) del parameter_6 - # pd_op.add: (16x32x1x96xf32) <- (16x32x1x96xf32, 1x32x1x1xf32) - add_8 = paddle._C_ops.add(conv2d_8, reshape_8) - del conv2d_8, reshape_8 + # pd_op.add: (-1x32x-1x-1xf32) <- (-1x32x-1x-1xf32, 1x32x1x1xf32) + add_8 = paddle._C_ops.add(conv2d_8, reshape_9) + del conv2d_8, reshape_9 - # pd_op.conv2d: (16x32x1x96xf32) <- (16x64x1x96xf32, 32x64x7x7xf32) + # pd_op.conv2d: (-1x32x-1x-1xf32) <- (-1x64x-1x-1xf32, 32x64x7x7xf32) conv2d_9 = paddle._C_ops.conv2d( gelu_0, parameter_5, [1, 1], [3, 3], "EXPLICIT", [1, 1], 1, "NCHW" ) del parameter_5 # pd_op.reshape: (1x32x1x1xf32) <- (32xf32, 4xi64) - reshape_9 = paddle._C_ops.reshape(parameter_4, full_int_array_0) + reshape_10 = paddle._C_ops.reshape(parameter_4, full_int_array_0) del parameter_4 - # pd_op.add: (16x32x1x96xf32) <- (16x32x1x96xf32, 1x32x1x1xf32) - add_9 = paddle._C_ops.add(conv2d_9, reshape_9) - del conv2d_9, reshape_9 + # pd_op.add: (-1x32x-1x-1xf32) <- (-1x32x-1x-1xf32, 1x32x1x1xf32) + add_9 = paddle._C_ops.add(conv2d_9, reshape_10) + del conv2d_9, reshape_10 - # pd_op.conv2d: (16x32x1x96xf32) <- (16x64x1x96xf32, 32x64x9x9xf32) + # pd_op.conv2d: (-1x32x-1x-1xf32) <- (-1x64x-1x-1xf32, 32x64x9x9xf32) conv2d_10 = paddle._C_ops.conv2d( gelu_0, parameter_3, [1, 1], [4, 4], "EXPLICIT", [1, 1], 1, "NCHW" ) del parameter_3 # pd_op.reshape: (1x32x1x1xf32) <- (32xf32, 4xi64) - reshape_10 = paddle._C_ops.reshape(parameter_2, full_int_array_0) + reshape_11 = paddle._C_ops.reshape(parameter_2, full_int_array_0) del parameter_2 - # pd_op.add: (16x32x1x96xf32) <- (16x32x1x96xf32, 1x32x1x1xf32) - add_10 = paddle._C_ops.add(conv2d_10, reshape_10) - del conv2d_10, reshape_10 + # pd_op.add: (-1x32x-1x-1xf32) <- (-1x32x-1x-1xf32, 1x32x1x1xf32) + add_10 = paddle._C_ops.add(conv2d_10, reshape_11) + del conv2d_10, reshape_11 - # pd_op.conv2d: (16x32x1x96xf32) <- (16x64x1x96xf32, 32x64x11x11xf32) + # pd_op.conv2d: (-1x32x-1x-1xf32) <- (-1x64x-1x-1xf32, 32x64x11x11xf32) conv2d_11 = paddle._C_ops.conv2d( gelu_0, parameter_1, [1, 1], [5, 5], "EXPLICIT", [1, 1], 1, "NCHW" ) del gelu_0, parameter_1 # pd_op.reshape: (1x32x1x1xf32) <- (32xf32, 4xi64) - reshape_11 = paddle._C_ops.reshape(parameter_0, full_int_array_0) + reshape_12 = paddle._C_ops.reshape(parameter_0, full_int_array_0) del full_int_array_0, parameter_0 - # pd_op.add: (16x32x1x96xf32) <- (16x32x1x96xf32, 1x32x1x1xf32) - add_11 = paddle._C_ops.add(conv2d_11, reshape_11) - del conv2d_11, reshape_11 + # pd_op.add: (-1x32x-1x-1xf32) <- (-1x32x-1x-1xf32, 1x32x1x1xf32) + add_11 = paddle._C_ops.add(conv2d_11, reshape_12) + del conv2d_11, reshape_12 - # builtin.combine: ([16x32x1x96xf32, 16x32x1x96xf32, 16x32x1x96xf32, 16x32x1x96xf32, 16x32x1x96xf32, 16x32x1x96xf32]) <- (16x32x1x96xf32, 16x32x1x96xf32, 16x32x1x96xf32, 16x32x1x96xf32, 16x32x1x96xf32, 16x32x1x96xf32) - combine_1 = [add_6, add_7, add_8, add_9, add_10, add_11] + # builtin.combine: ([-1x32x-1x-1xf32, -1x32x-1x-1xf32, -1x32x-1x-1xf32, -1x32x-1x-1xf32, -1x32x-1x-1xf32, -1x32x-1x-1xf32]) <- (-1x32x-1x-1xf32, -1x32x-1x-1xf32, -1x32x-1x-1xf32, -1x32x-1x-1xf32, -1x32x-1x-1xf32, -1x32x-1x-1xf32) + combine_4 = [add_6, add_7, add_8, add_9, add_10, add_11] del add_10, add_11, add_6, add_7, add_8, add_9 - # pd_op.stack: (16x32x1x96x6xf32) <- ([16x32x1x96xf32, 16x32x1x96xf32, 16x32x1x96xf32, 16x32x1x96xf32, 16x32x1x96xf32, 16x32x1x96xf32]) - stack_1 = paddle._C_ops.stack(combine_1, -1) - del combine_1 + # pd_op.stack: (-1x32x-1x-1x6xf32) <- ([-1x32x-1x-1xf32, -1x32x-1x-1xf32, -1x32x-1x-1xf32, -1x32x-1x-1xf32, -1x32x-1x-1xf32, -1x32x-1x-1xf32]) + stack_3 = paddle._C_ops.stack(combine_4, -1) + del combine_4 + + # pd_op.mean: (-1x32x-1x-1xf32) <- (-1x32x-1x-1x6xf32, 1xi64) + mean_1 = paddle._C_ops.mean(stack_3, full_int_array_1, False) + del full_int_array_1, stack_3 - # pd_op.mean: (16x32x1x96xf32) <- (16x32x1x96x6xf32, 1xi64) - mean_0 = paddle._C_ops.mean(stack_1, full_int_array_1, False) - del full_int_array_1, stack_1 + # pd_op.transpose: (-1x-1x-1x32xf32) <- (-1x32x-1x-1xf32) + transpose_1 = paddle._C_ops.transpose(mean_1, [0, 2, 3, 1]) + del mean_1 + + # pd_op.full: (xi64) <- () + full_4 = paddle._C_ops.full( + [], float("-1"), paddle.int64, paddle.core.CPUPlace() + ) + + # builtin.combine: ([xi64, xi64, xi64]) <- (xi64, xi64, xi64) + combine_5 = [data_0, full_4, full_1] + del data_0, full_1, full_4 + + # pd_op.stack: (3xi64) <- ([xi64, xi64, xi64]) + stack_4 = paddle._C_ops.stack(combine_5, 0) + del combine_5 + + # pd_op.reshape: (-1x-1x32xf32) <- (-1x-1x-1x32xf32, 3xi64) + reshape_13 = paddle._C_ops.reshape(transpose_1, stack_4) + del stack_4, transpose_1 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_2 = [0] + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_3 = [96] + + # pd_op.slice: (-1x-1x32xf32) <- (-1x-1x32xf32, 1xi64, 1xi64) + slice_0 = paddle._C_ops.slice( + reshape_13, [1], full_int_array_2, full_int_array_3, [1], [] + ) + del ( + full_int_array_2, + full_int_array_3, + full_with_tensor_0, + multiply_0, + reshape_13, + ) - return mean_0 + return slice_0 diff --git a/paddle_samples/PaddleX/TimesNet_ad/subgraph_3/weight_meta.py b/paddle_samples/PaddleX/TimesNet_ad/subgraph_3/weight_meta.py index 00a1adb0c..ca5b1d84a 100644 --- a/paddle_samples/PaddleX/TimesNet_ad/subgraph_3/weight_meta.py +++ b/paddle_samples/PaddleX/TimesNet_ad/subgraph_3/weight_meta.py @@ -11,10 +11,10 @@ class Program_weight_tensor_parameter_1: name = "parameter_1" shape = [32, 64, 11, 11] dtype = "float32" - min_val = float("-0.378676") - max_val = float("0.365137") - mean = float("-0.000197318") - std = float("0.0303417") + min_val = float("-0.264679") + max_val = float("0.376382") + mean = float("0.000111027") + std = float("0.0245485") data = None @@ -31,10 +31,10 @@ class Program_weight_tensor_parameter_3: name = "parameter_3" shape = [32, 64, 9, 9] dtype = "float32" - min_val = float("-0.400279") - max_val = float("0.379996") - mean = float("-0.000243122") - std = float("0.0345614") + min_val = float("-0.294357") + max_val = float("0.38058") + mean = float("0.000170868") + std = float("0.0282573") data = None @@ -51,10 +51,10 @@ class Program_weight_tensor_parameter_5: name = "parameter_5" shape = [32, 64, 7, 7] dtype = "float32" - min_val = float("-0.411308") - max_val = float("0.37641") - mean = float("-8.07878e-05") - std = float("0.0410653") + min_val = float("-0.283553") + max_val = float("0.370125") + mean = float("0.000228007") + std = float("0.0342913") data = None @@ -71,10 +71,10 @@ class Program_weight_tensor_parameter_7: name = "parameter_7" shape = [32, 64, 5, 5] dtype = "float32" - min_val = float("-0.37956") - max_val = float("0.382445") - mean = float("-0.000167806") - std = float("0.0525399") + min_val = float("-0.283169") + max_val = float("0.37302") + mean = float("0.000392404") + std = float("0.0451435") data = None @@ -91,10 +91,10 @@ class Program_weight_tensor_parameter_9: name = "parameter_9" shape = [32, 64, 3, 3] dtype = "float32" - min_val = float("-0.43568") - max_val = float("0.373109") - mean = float("0.00107606") - std = float("0.0743243") + min_val = float("-0.335589") + max_val = float("0.381078") + mean = float("0.00115978") + std = float("0.0690689") data = None @@ -111,10 +111,10 @@ class Program_weight_tensor_parameter_11: name = "parameter_11" shape = [32, 64, 1, 1] dtype = "float32" - min_val = float("-0.606749") - max_val = float("0.716587") - mean = float("0.00986633") - std = float("0.187626") + min_val = float("-0.63918") + max_val = float("0.689408") + mean = float("-0.000835581") + std = float("0.190792") data = None @@ -131,10 +131,10 @@ class Program_weight_tensor_parameter_13: name = "parameter_13" shape = [64, 32, 11, 11] dtype = "float32" - min_val = float("-0.705811") - max_val = float("0.38403") - mean = float("-0.00554413") - std = float("0.0356425") + min_val = float("-0.460614") + max_val = float("0.452903") + mean = float("0.000691717") + std = float("0.0339579") data = None @@ -151,10 +151,10 @@ class Program_weight_tensor_parameter_15: name = "parameter_15" shape = [64, 32, 9, 9] dtype = "float32" - min_val = float("-0.771232") - max_val = float("0.366518") - mean = float("-0.0068774") - std = float("0.0408845") + min_val = float("-0.417344") + max_val = float("0.475493") + mean = float("0.000917908") + std = float("0.039257") data = None @@ -171,10 +171,10 @@ class Program_weight_tensor_parameter_17: name = "parameter_17" shape = [64, 32, 7, 7] dtype = "float32" - min_val = float("-0.695085") - max_val = float("0.354321") - mean = float("-0.00856062") - std = float("0.0491656") + min_val = float("-0.429916") + max_val = float("0.447171") + mean = float("0.000883161") + std = float("0.0478161") data = None @@ -191,10 +191,10 @@ class Program_weight_tensor_parameter_19: name = "parameter_19" shape = [64, 32, 5, 5] dtype = "float32" - min_val = float("-0.730604") - max_val = float("0.393171") - mean = float("-0.0118981") - std = float("0.0648549") + min_val = float("-0.462384") + max_val = float("0.417737") + mean = float("0.00143336") + std = float("0.0626403") data = None @@ -211,10 +211,10 @@ class Program_weight_tensor_parameter_21: name = "parameter_21" shape = [64, 32, 3, 3] dtype = "float32" - min_val = float("-0.728807") - max_val = float("0.600694") - mean = float("-0.0154007") - std = float("0.0968162") + min_val = float("-0.561929") + max_val = float("0.524655") + mean = float("0.00179128") + std = float("0.0966917") data = None @@ -231,8 +231,8 @@ class Program_weight_tensor_parameter_23: name = "parameter_23" shape = [64, 32, 1, 1] dtype = "float32" - min_val = float("-0.938968") - max_val = float("0.799272") - mean = float("-0.0267053") - std = float("0.263792") + min_val = float("-0.732336") + max_val = float("0.817109") + mean = float("-0.00702827") + std = float("0.267835") data = None diff --git a/paddle_samples/PaddleX/TimesNet_ad/subgraph_4/graph_hash.txt b/paddle_samples/PaddleX/TimesNet_ad/subgraph_4/graph_hash.txt index 7bb3cbfdc..fdc3e59a3 100644 --- a/paddle_samples/PaddleX/TimesNet_ad/subgraph_4/graph_hash.txt +++ b/paddle_samples/PaddleX/TimesNet_ad/subgraph_4/graph_hash.txt @@ -1 +1 @@ -aa5aaf97436574463d1b03785cb5be47dabceded3443c4667332727c0d99c9e5 \ No newline at end of file +3cd4bc2847c4a2497d037953c86c406ad7c1349e97eb5347ed9bea4bd8958f13 \ No newline at end of file diff --git a/paddle_samples/PaddleX/TimesNet_ad/subgraph_4/input_meta.py b/paddle_samples/PaddleX/TimesNet_ad/subgraph_4/input_meta.py index 6db6ec575..0ac057bd6 100644 --- a/paddle_samples/PaddleX/TimesNet_ad/subgraph_4/input_meta.py +++ b/paddle_samples/PaddleX/TimesNet_ad/subgraph_4/input_meta.py @@ -1,37 +1,49 @@ class Program_weight_tensor_data_0: name = "data_0" - shape = [] - dtype = "int64" - data = [16] + shape = [2, 96, 32] + dtype = "float32" + min_val = float("-3.98551") + max_val = float("8.82186") + mean = float("0.029929") + std = float("1.18773") + data = None class Program_weight_tensor_data_1: name = "data_1" - shape = [] - dtype = "int64" - data = [16] + shape = [2, 96, 32] + dtype = "float32" + min_val = float("-5.13918") + max_val = float("11.3318") + mean = float("0.0523877") + std = float("1.64322") + data = None class Program_weight_tensor_data_2: name = "data_2" - shape = [] - dtype = "int32" - data = [19] + shape = [2, 96, 32] + dtype = "float32" + min_val = float("-7.82916") + max_val = float("8.93734") + mean = float("0.0112696") + std = float("1.79837") + data = None class Program_weight_tensor_data_3: name = "data_3" - shape = [16, 96, 32] + shape = [2, 3] dtype = "float32" - min_val = float("-3.32402") - max_val = float("4.62089") - mean = float("0.031122") - std = float("1.015") - data = None + data = [46.9227, 32.7293, 20.2913, 30.6332, 29.8448, 21.058] class Program_weight_tensor_data_4: name = "data_4" - shape = [] - dtype = "int64" - data = [96] + shape = [2, 96, 32] + dtype = "float32" + min_val = float("-6.34525") + max_val = float("9.14017") + mean = float("0.336976") + std = float("1.50118") + data = None diff --git a/paddle_samples/PaddleX/TimesNet_ad/subgraph_4/model.py b/paddle_samples/PaddleX/TimesNet_ad/subgraph_4/model.py index e9e757f97..50e134d21 100644 --- a/paddle_samples/PaddleX/TimesNet_ad/subgraph_4/model.py +++ b/paddle_samples/PaddleX/TimesNet_ad/subgraph_4/model.py @@ -5,364 +5,61 @@ class GraphModule(paddle.nn.Layer): def __init__(self): super().__init__() - def forward( - self, - parameter_0, - parameter_1, - parameter_2, - parameter_3, - parameter_4, - parameter_5, - parameter_6, - parameter_7, - parameter_8, - parameter_9, - parameter_10, - parameter_11, - parameter_12, - parameter_13, - parameter_14, - parameter_15, - parameter_16, - parameter_17, - parameter_18, - parameter_19, - parameter_20, - parameter_21, - parameter_22, - parameter_23, - data_0, - data_1, - data_2, - data_3, - data_4, - ): - # pd_op.cast: (xi64) <- (xi32) - cast_0 = paddle._C_ops.cast(data_2, paddle.int64) - del data_2 + def forward(self, data_0, data_1, data_2, data_3, data_4): + # builtin.combine: ([-1x96x32xf32, -1x96x32xf32, -1x96x32xf32]) <- (-1x96x32xf32, -1x96x32xf32, -1x96x32xf32) + combine_0 = [data_0, data_1, data_2] + del data_0, data_1, data_2 - # pd_op.floor_divide: (xi64) <- (xi64, xi64) - floor_divide_0 = paddle._C_ops.floor_divide(data_4, cast_0) - - # pd_op.full: (1xf32) <- () - full_0 = paddle._C_ops.full( - [1], float("1"), paddle.float32, paddle.core.CPUPlace() - ) - - # pd_op.scale: (xi64) <- (xi64, 1xf32) - scale_0 = paddle._C_ops.scale(floor_divide_0, full_0, float("1"), True) - del floor_divide_0, full_0 - - # pd_op.multiply: (xi64) <- (xi64, xi64) - multiply_0 = paddle._C_ops.multiply(scale_0, cast_0) - del scale_0 - - # pd_op.subtract: (xi64) <- (xi64, xi64) - subtract_0 = paddle._C_ops.subtract(multiply_0, data_4) - del data_4 - - # pd_op.full: (xi64) <- () - full_1 = paddle._C_ops.full( - [], float("32"), paddle.int64, paddle.core.CPUPlace() - ) - - # builtin.combine: ([xi64, xi64, xi64]) <- (xi64, xi64, xi64) - combine_0 = [data_1, subtract_0, full_1] - del data_1, subtract_0 - - # pd_op.stack: (3xi64) <- ([xi64, xi64, xi64]) - stack_0 = paddle._C_ops.stack(combine_0, 0) + # pd_op.stack: (-1x96x32x3xf32) <- ([-1x96x32xf32, -1x96x32xf32, -1x96x32xf32]) + stack_0 = paddle._C_ops.stack(combine_0, -1) del combine_0 - # pd_op.full: (1xf32) <- () - full_2 = paddle._C_ops.full( - [1], float("0"), paddle.float32, paddle.core.CPUPlace() - ) - - # pd_op.full_with_tensor: (-1x-1x32xf32) <- (1xf32, 3xi64) - full_with_tensor_0 = paddle._C_ops.full_with_tensor( - full_2, stack_0, paddle.float32 - ) - del full_2, stack_0 - - # pd_op.cast: (-1x96x32xf32) <- (-1x96x32xf32) - cast_1 = paddle._C_ops.cast(data_3, paddle.float32) + # pd_op.softmax: (-1x3xf32) <- (-1x3xf32) + softmax_0 = paddle._C_ops.softmax(data_3, 1) del data_3 - # pd_op.cast: (-1x-1x32xf32) <- (-1x-1x32xf32) - cast_2 = paddle._C_ops.cast(full_with_tensor_0, paddle.float32) - - # pd_op.full: (1xi32) <- () - full_3 = paddle._C_ops.full( - [1], float("1"), paddle.int32, paddle.core.CPUPlace() - ) - - # builtin.combine: ([-1x96x32xf32, -1x-1x32xf32]) <- (-1x96x32xf32, -1x-1x32xf32) - combine_1 = [cast_1, cast_2] - del cast_1, cast_2 - - # pd_op.concat: (-1x-1x32xf32) <- ([-1x96x32xf32, -1x-1x32xf32], 1xi32) - concat_0 = paddle._C_ops.concat(combine_1, full_3) - del combine_1, full_3 - - # pd_op.floor_divide: (xi64) <- (xi64, xi64) - floor_divide_1 = paddle._C_ops.floor_divide(multiply_0, cast_0) - - # builtin.combine: ([xi64, xi64, xi64, xi64]) <- (xi64, xi64, xi64, xi64) - combine_2 = [data_0, floor_divide_1, cast_0, full_1] - del cast_0, floor_divide_1 - - # pd_op.stack: (4xi64) <- ([xi64, xi64, xi64, xi64]) - stack_1 = paddle._C_ops.stack(combine_2, 0) - del combine_2 - - # pd_op.reshape: (-1x-1x-1x32xf32) <- (-1x-1x32xf32, 4xi64) - reshape_0 = paddle._C_ops.reshape(concat_0, stack_1) - del concat_0, stack_1 - - # pd_op.transpose: (-1x32x-1x-1xf32) <- (-1x-1x-1x32xf32) - transpose_0 = paddle._C_ops.transpose(reshape_0, [0, 3, 1, 2]) - del reshape_0 - - # pd_op.conv2d: (-1x64x-1x-1xf32) <- (-1x32x-1x-1xf32, 64x32x1x1xf32) - conv2d_0 = paddle._C_ops.conv2d( - transpose_0, parameter_23, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_23 - - # pd_op.full_int_array: (4xi64) <- () - full_int_array_0 = [1, -1, 1, 1] - - # pd_op.reshape: (1x64x1x1xf32) <- (64xf32, 4xi64) - reshape_1 = paddle._C_ops.reshape(parameter_22, full_int_array_0) - del parameter_22 - - # pd_op.add: (-1x64x-1x-1xf32) <- (-1x64x-1x-1xf32, 1x64x1x1xf32) - add_0 = paddle._C_ops.add(conv2d_0, reshape_1) - del conv2d_0, reshape_1 - - # pd_op.conv2d: (-1x64x-1x-1xf32) <- (-1x32x-1x-1xf32, 64x32x3x3xf32) - conv2d_1 = paddle._C_ops.conv2d( - transpose_0, parameter_21, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_21 - - # pd_op.reshape: (1x64x1x1xf32) <- (64xf32, 4xi64) - reshape_2 = paddle._C_ops.reshape(parameter_20, full_int_array_0) - del parameter_20 - - # pd_op.add: (-1x64x-1x-1xf32) <- (-1x64x-1x-1xf32, 1x64x1x1xf32) - add_1 = paddle._C_ops.add(conv2d_1, reshape_2) - del conv2d_1, reshape_2 - - # pd_op.conv2d: (-1x64x-1x-1xf32) <- (-1x32x-1x-1xf32, 64x32x5x5xf32) - conv2d_2 = paddle._C_ops.conv2d( - transpose_0, parameter_19, [1, 1], [2, 2], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_19 - - # pd_op.reshape: (1x64x1x1xf32) <- (64xf32, 4xi64) - reshape_3 = paddle._C_ops.reshape(parameter_18, full_int_array_0) - del parameter_18 - - # pd_op.add: (-1x64x-1x-1xf32) <- (-1x64x-1x-1xf32, 1x64x1x1xf32) - add_2 = paddle._C_ops.add(conv2d_2, reshape_3) - del conv2d_2, reshape_3 - - # pd_op.conv2d: (-1x64x-1x-1xf32) <- (-1x32x-1x-1xf32, 64x32x7x7xf32) - conv2d_3 = paddle._C_ops.conv2d( - transpose_0, parameter_17, [1, 1], [3, 3], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_17 - - # pd_op.reshape: (1x64x1x1xf32) <- (64xf32, 4xi64) - reshape_4 = paddle._C_ops.reshape(parameter_16, full_int_array_0) - del parameter_16 - - # pd_op.add: (-1x64x-1x-1xf32) <- (-1x64x-1x-1xf32, 1x64x1x1xf32) - add_3 = paddle._C_ops.add(conv2d_3, reshape_4) - del conv2d_3, reshape_4 - - # pd_op.conv2d: (-1x64x-1x-1xf32) <- (-1x32x-1x-1xf32, 64x32x9x9xf32) - conv2d_4 = paddle._C_ops.conv2d( - transpose_0, parameter_15, [1, 1], [4, 4], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_15 - - # pd_op.reshape: (1x64x1x1xf32) <- (64xf32, 4xi64) - reshape_5 = paddle._C_ops.reshape(parameter_14, full_int_array_0) - del parameter_14 - - # pd_op.add: (-1x64x-1x-1xf32) <- (-1x64x-1x-1xf32, 1x64x1x1xf32) - add_4 = paddle._C_ops.add(conv2d_4, reshape_5) - del conv2d_4, reshape_5 - - # pd_op.conv2d: (-1x64x-1x-1xf32) <- (-1x32x-1x-1xf32, 64x32x11x11xf32) - conv2d_5 = paddle._C_ops.conv2d( - transpose_0, parameter_13, [1, 1], [5, 5], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_13, transpose_0 - - # pd_op.reshape: (1x64x1x1xf32) <- (64xf32, 4xi64) - reshape_6 = paddle._C_ops.reshape(parameter_12, full_int_array_0) - del parameter_12 - - # pd_op.add: (-1x64x-1x-1xf32) <- (-1x64x-1x-1xf32, 1x64x1x1xf32) - add_5 = paddle._C_ops.add(conv2d_5, reshape_6) - del conv2d_5, reshape_6 - - # builtin.combine: ([-1x64x-1x-1xf32, -1x64x-1x-1xf32, -1x64x-1x-1xf32, -1x64x-1x-1xf32, -1x64x-1x-1xf32, -1x64x-1x-1xf32]) <- (-1x64x-1x-1xf32, -1x64x-1x-1xf32, -1x64x-1x-1xf32, -1x64x-1x-1xf32, -1x64x-1x-1xf32, -1x64x-1x-1xf32) - combine_3 = [add_0, add_1, add_2, add_3, add_4, add_5] - del add_0, add_1, add_2, add_3, add_4, add_5 - - # pd_op.stack: (-1x64x-1x-1x6xf32) <- ([-1x64x-1x-1xf32, -1x64x-1x-1xf32, -1x64x-1x-1xf32, -1x64x-1x-1xf32, -1x64x-1x-1xf32, -1x64x-1x-1xf32]) - stack_2 = paddle._C_ops.stack(combine_3, -1) - del combine_3 - # pd_op.full_int_array: (1xi64) <- () - full_int_array_1 = [-1] + full_int_array_0 = [1] - # pd_op.mean: (-1x64x-1x-1xf32) <- (-1x64x-1x-1x6xf32, 1xi64) - mean_0 = paddle._C_ops.mean(stack_2, full_int_array_1, False) - del stack_2 + # pd_op.assign: (1xi64) <- (1xi64) + assign_0 = full_int_array_0 - # pd_op.gelu: (-1x64x-1x-1xf32) <- (-1x64x-1x-1xf32) - gelu_0 = paddle._C_ops.gelu(mean_0, False) - del mean_0 - - # pd_op.conv2d: (-1x32x-1x-1xf32) <- (-1x64x-1x-1xf32, 32x64x1x1xf32) - conv2d_6 = paddle._C_ops.conv2d( - gelu_0, parameter_11, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_11 + # pd_op.unsqueeze: (-1x1x3xf32) <- (-1x3xf32, 1xi64) + unsqueeze_0 = paddle._C_ops.unsqueeze(softmax_0, full_int_array_0) - # pd_op.reshape: (1x32x1x1xf32) <- (32xf32, 4xi64) - reshape_7 = paddle._C_ops.reshape(parameter_10, full_int_array_0) - del parameter_10 + # pd_op.unsqueeze: (-1x1x1x3xf32) <- (-1x1x3xf32, 1xi64) + unsqueeze_1 = paddle._C_ops.unsqueeze(unsqueeze_0, full_int_array_0) - # pd_op.add: (-1x32x-1x-1xf32) <- (-1x32x-1x-1xf32, 1x32x1x1xf32) - add_6 = paddle._C_ops.add(conv2d_6, reshape_7) - del conv2d_6, reshape_7 - - # pd_op.conv2d: (-1x32x-1x-1xf32) <- (-1x64x-1x-1xf32, 32x64x3x3xf32) - conv2d_7 = paddle._C_ops.conv2d( - gelu_0, parameter_9, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_9 - - # pd_op.reshape: (1x32x1x1xf32) <- (32xf32, 4xi64) - reshape_8 = paddle._C_ops.reshape(parameter_8, full_int_array_0) - del parameter_8 - - # pd_op.add: (-1x32x-1x-1xf32) <- (-1x32x-1x-1xf32, 1x32x1x1xf32) - add_7 = paddle._C_ops.add(conv2d_7, reshape_8) - del conv2d_7, reshape_8 - - # pd_op.conv2d: (-1x32x-1x-1xf32) <- (-1x64x-1x-1xf32, 32x64x5x5xf32) - conv2d_8 = paddle._C_ops.conv2d( - gelu_0, parameter_7, [1, 1], [2, 2], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_7 - - # pd_op.reshape: (1x32x1x1xf32) <- (32xf32, 4xi64) - reshape_9 = paddle._C_ops.reshape(parameter_6, full_int_array_0) - del parameter_6 - - # pd_op.add: (-1x32x-1x-1xf32) <- (-1x32x-1x-1xf32, 1x32x1x1xf32) - add_8 = paddle._C_ops.add(conv2d_8, reshape_9) - del conv2d_8, reshape_9 - - # pd_op.conv2d: (-1x32x-1x-1xf32) <- (-1x64x-1x-1xf32, 32x64x7x7xf32) - conv2d_9 = paddle._C_ops.conv2d( - gelu_0, parameter_5, [1, 1], [3, 3], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_5 - - # pd_op.reshape: (1x32x1x1xf32) <- (32xf32, 4xi64) - reshape_10 = paddle._C_ops.reshape(parameter_4, full_int_array_0) - del parameter_4 - - # pd_op.add: (-1x32x-1x-1xf32) <- (-1x32x-1x-1xf32, 1x32x1x1xf32) - add_9 = paddle._C_ops.add(conv2d_9, reshape_10) - del conv2d_9, reshape_10 - - # pd_op.conv2d: (-1x32x-1x-1xf32) <- (-1x64x-1x-1xf32, 32x64x9x9xf32) - conv2d_10 = paddle._C_ops.conv2d( - gelu_0, parameter_3, [1, 1], [4, 4], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_3 - - # pd_op.reshape: (1x32x1x1xf32) <- (32xf32, 4xi64) - reshape_11 = paddle._C_ops.reshape(parameter_2, full_int_array_0) - del parameter_2 - - # pd_op.add: (-1x32x-1x-1xf32) <- (-1x32x-1x-1xf32, 1x32x1x1xf32) - add_10 = paddle._C_ops.add(conv2d_10, reshape_11) - del conv2d_10, reshape_11 - - # pd_op.conv2d: (-1x32x-1x-1xf32) <- (-1x64x-1x-1xf32, 32x64x11x11xf32) - conv2d_11 = paddle._C_ops.conv2d( - gelu_0, parameter_1, [1, 1], [5, 5], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del gelu_0, parameter_1 - - # pd_op.reshape: (1x32x1x1xf32) <- (32xf32, 4xi64) - reshape_12 = paddle._C_ops.reshape(parameter_0, full_int_array_0) - del full_int_array_0, parameter_0 - - # pd_op.add: (-1x32x-1x-1xf32) <- (-1x32x-1x-1xf32, 1x32x1x1xf32) - add_11 = paddle._C_ops.add(conv2d_11, reshape_12) - del conv2d_11, reshape_12 - - # builtin.combine: ([-1x32x-1x-1xf32, -1x32x-1x-1xf32, -1x32x-1x-1xf32, -1x32x-1x-1xf32, -1x32x-1x-1xf32, -1x32x-1x-1xf32]) <- (-1x32x-1x-1xf32, -1x32x-1x-1xf32, -1x32x-1x-1xf32, -1x32x-1x-1xf32, -1x32x-1x-1xf32, -1x32x-1x-1xf32) - combine_4 = [add_6, add_7, add_8, add_9, add_10, add_11] - del add_10, add_11, add_6, add_7, add_8, add_9 - - # pd_op.stack: (-1x32x-1x-1x6xf32) <- ([-1x32x-1x-1xf32, -1x32x-1x-1xf32, -1x32x-1x-1xf32, -1x32x-1x-1xf32, -1x32x-1x-1xf32, -1x32x-1x-1xf32]) - stack_3 = paddle._C_ops.stack(combine_4, -1) - del combine_4 - - # pd_op.mean: (-1x32x-1x-1xf32) <- (-1x32x-1x-1x6xf32, 1xi64) - mean_1 = paddle._C_ops.mean(stack_3, full_int_array_1, False) - del full_int_array_1, stack_3 - - # pd_op.transpose: (-1x-1x-1x32xf32) <- (-1x32x-1x-1xf32) - transpose_1 = paddle._C_ops.transpose(mean_1, [0, 2, 3, 1]) - del mean_1 - - # pd_op.full: (xi64) <- () - full_4 = paddle._C_ops.full( - [], float("-1"), paddle.int64, paddle.core.CPUPlace() - ) - - # builtin.combine: ([xi64, xi64, xi64]) <- (xi64, xi64, xi64) - combine_5 = [data_0, full_4, full_1] - del data_0, full_1, full_4 + # pd_op.full_int_array: (4xi64) <- () + full_int_array_1 = [1, 96, 32, 1] - # pd_op.stack: (3xi64) <- ([xi64, xi64, xi64]) - stack_4 = paddle._C_ops.stack(combine_5, 0) - del combine_5 + # pd_op.tile: (-1x96x32x3xf32) <- (-1x1x1x3xf32, 4xi64) + tile_0 = paddle._C_ops.tile(unsqueeze_1, full_int_array_1) - # pd_op.reshape: (-1x-1x32xf32) <- (-1x-1x-1x32xf32, 3xi64) - reshape_13 = paddle._C_ops.reshape(transpose_1, stack_4) - del stack_4, transpose_1 + # pd_op.multiply: (-1x96x32x3xf32) <- (-1x96x32x3xf32, -1x96x32x3xf32) + multiply_0 = paddle._C_ops.multiply(stack_0, tile_0) # pd_op.full_int_array: (1xi64) <- () - full_int_array_2 = [0] + full_int_array_2 = [-1] - # pd_op.full_int_array: (1xi64) <- () - full_int_array_3 = [96] + # pd_op.sum: (-1x96x32xf32) <- (-1x96x32x3xf32, 1xi64) + sum_0 = paddle._C_ops.sum(multiply_0, full_int_array_2, None, False) - # pd_op.slice: (-1x-1x32xf32) <- (-1x-1x32xf32, 1xi64, 1xi64) - slice_0 = paddle._C_ops.slice( - reshape_13, [1], full_int_array_2, full_int_array_3, [1], [] - ) + # pd_op.add: (-1x96x32xf32) <- (-1x96x32xf32, -1x96x32xf32) + add_0 = paddle._C_ops.add(sum_0, data_4) del ( + assign_0, + data_4, + full_int_array_0, + full_int_array_1, full_int_array_2, - full_int_array_3, - full_with_tensor_0, multiply_0, - reshape_13, + softmax_0, + stack_0, + sum_0, + tile_0, + unsqueeze_0, + unsqueeze_1, ) - return slice_0 + return add_0 diff --git a/paddle_samples/PaddleX/TimesNet_ad/subgraph_4/weight_meta.py b/paddle_samples/PaddleX/TimesNet_ad/subgraph_4/weight_meta.py index ca5b1d84a..8b1378917 100644 --- a/paddle_samples/PaddleX/TimesNet_ad/subgraph_4/weight_meta.py +++ b/paddle_samples/PaddleX/TimesNet_ad/subgraph_4/weight_meta.py @@ -1,238 +1 @@ -class Program_weight_tensor_parameter_0: - name = "parameter_0" - shape = [32] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - -class Program_weight_tensor_parameter_1: - name = "parameter_1" - shape = [32, 64, 11, 11] - dtype = "float32" - min_val = float("-0.264679") - max_val = float("0.376382") - mean = float("0.000111027") - std = float("0.0245485") - data = None - - -class Program_weight_tensor_parameter_2: - name = "parameter_2" - shape = [32] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_3: - name = "parameter_3" - shape = [32, 64, 9, 9] - dtype = "float32" - min_val = float("-0.294357") - max_val = float("0.38058") - mean = float("0.000170868") - std = float("0.0282573") - data = None - - -class Program_weight_tensor_parameter_4: - name = "parameter_4" - shape = [32] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_5: - name = "parameter_5" - shape = [32, 64, 7, 7] - dtype = "float32" - min_val = float("-0.283553") - max_val = float("0.370125") - mean = float("0.000228007") - std = float("0.0342913") - data = None - - -class Program_weight_tensor_parameter_6: - name = "parameter_6" - shape = [32] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_7: - name = "parameter_7" - shape = [32, 64, 5, 5] - dtype = "float32" - min_val = float("-0.283169") - max_val = float("0.37302") - mean = float("0.000392404") - std = float("0.0451435") - data = None - - -class Program_weight_tensor_parameter_8: - name = "parameter_8" - shape = [32] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_9: - name = "parameter_9" - shape = [32, 64, 3, 3] - dtype = "float32" - min_val = float("-0.335589") - max_val = float("0.381078") - mean = float("0.00115978") - std = float("0.0690689") - data = None - - -class Program_weight_tensor_parameter_10: - name = "parameter_10" - shape = [32] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_11: - name = "parameter_11" - shape = [32, 64, 1, 1] - dtype = "float32" - min_val = float("-0.63918") - max_val = float("0.689408") - mean = float("-0.000835581") - std = float("0.190792") - data = None - - -class Program_weight_tensor_parameter_12: - name = "parameter_12" - shape = [64] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_13: - name = "parameter_13" - shape = [64, 32, 11, 11] - dtype = "float32" - min_val = float("-0.460614") - max_val = float("0.452903") - mean = float("0.000691717") - std = float("0.0339579") - data = None - - -class Program_weight_tensor_parameter_14: - name = "parameter_14" - shape = [64] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_15: - name = "parameter_15" - shape = [64, 32, 9, 9] - dtype = "float32" - min_val = float("-0.417344") - max_val = float("0.475493") - mean = float("0.000917908") - std = float("0.039257") - data = None - - -class Program_weight_tensor_parameter_16: - name = "parameter_16" - shape = [64] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_17: - name = "parameter_17" - shape = [64, 32, 7, 7] - dtype = "float32" - min_val = float("-0.429916") - max_val = float("0.447171") - mean = float("0.000883161") - std = float("0.0478161") - data = None - - -class Program_weight_tensor_parameter_18: - name = "parameter_18" - shape = [64] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_19: - name = "parameter_19" - shape = [64, 32, 5, 5] - dtype = "float32" - min_val = float("-0.462384") - max_val = float("0.417737") - mean = float("0.00143336") - std = float("0.0626403") - data = None - - -class Program_weight_tensor_parameter_20: - name = "parameter_20" - shape = [64] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_21: - name = "parameter_21" - shape = [64, 32, 3, 3] - dtype = "float32" - min_val = float("-0.561929") - max_val = float("0.524655") - mean = float("0.00179128") - std = float("0.0966917") - data = None - - -class Program_weight_tensor_parameter_22: - name = "parameter_22" - shape = [64] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_23: - name = "parameter_23" - shape = [64, 32, 1, 1] - dtype = "float32" - min_val = float("-0.732336") - max_val = float("0.817109") - mean = float("-0.00702827") - std = float("0.267835") - data = None diff --git a/paddle_samples/PaddleX/TimesNet_ad/subgraph_5/graph_hash.txt b/paddle_samples/PaddleX/TimesNet_ad/subgraph_5/graph_hash.txt index fdc3e59a3..e38033243 100644 --- a/paddle_samples/PaddleX/TimesNet_ad/subgraph_5/graph_hash.txt +++ b/paddle_samples/PaddleX/TimesNet_ad/subgraph_5/graph_hash.txt @@ -1 +1 @@ -3cd4bc2847c4a2497d037953c86c406ad7c1349e97eb5347ed9bea4bd8958f13 \ No newline at end of file +9fdac9340430adc480f8c20b6b5eccc3612b6eaf503bd5560b799c4e792bbb48 \ No newline at end of file diff --git a/paddle_samples/PaddleX/TimesNet_ad/subgraph_5/input_meta.py b/paddle_samples/PaddleX/TimesNet_ad/subgraph_5/input_meta.py index 0ac057bd6..9992dff58 100644 --- a/paddle_samples/PaddleX/TimesNet_ad/subgraph_5/input_meta.py +++ b/paddle_samples/PaddleX/TimesNet_ad/subgraph_5/input_meta.py @@ -1,49 +1,89 @@ class Program_weight_tensor_data_0: name = "data_0" - shape = [2, 96, 32] + shape = [16, 96, 32] dtype = "float32" - min_val = float("-3.98551") - max_val = float("8.82186") - mean = float("0.029929") - std = float("1.18773") + min_val = float("-2.67673") + max_val = float("4.8069") + mean = float("0.0226662") + std = float("1.04036") data = None class Program_weight_tensor_data_1: name = "data_1" - shape = [2, 96, 32] + shape = [16, 1, 2] dtype = "float32" - min_val = float("-5.13918") - max_val = float("11.3318") - mean = float("0.0523877") - std = float("1.64322") - data = None + data = [ + 0.0152262, + 0.112992, + 0.0131863, + 0.00941724, + 0.00817011, + 0.0263628, + 0.00638176, + 0.0175078, + 0.0234966, + 0.00656714, + 0.0124417, + 0.0132452, + 0.0246282, + 0.00765128, + 0.0301115, + 0.00777976, + 0.0628782, + 0.0259483, + 0.0631641, + 0.047438, + 0.0094697, + 0.00714918, + 0.0377613, + 0.0259842, + 0.624268, + 1.5689, + 0.00617691, + 0.00916586, + 0.00874541, + 0.00718946, + 0.00659123, + 0.00577362, + ] class Program_weight_tensor_data_2: name = "data_2" - shape = [2, 96, 32] - dtype = "float32" - min_val = float("-7.82916") - max_val = float("8.93734") - mean = float("0.0112696") - std = float("1.79837") - data = None - - -class Program_weight_tensor_data_3: - name = "data_3" - shape = [2, 3] + shape = [16, 1, 2] dtype = "float32" - data = [46.9227, 32.7293, 20.2913, 30.6332, 29.8448, 21.058] - - -class Program_weight_tensor_data_4: - name = "data_4" - shape = [2, 96, 32] - dtype = "float32" - min_val = float("-6.34525") - max_val = float("9.14017") - mean = float("0.336976") - std = float("1.50118") - data = None + data = [ + 0.304086, + -4.2993, + -0.136159, + -1.50789, + 0.148375, + -1.23287, + 0.424943, + -2.33587, + -0.441654, + -3.51483, + -0.706665, + -2.10642, + -0.297288, + -1.02127, + -0.403002, + -3.52043, + -0.189814, + -0.818702, + -1.26374, + -3.75699, + -0.90199, + -1.11094, + -0.498064, + -2.30995, + -2.3935, + -2.71923, + -0.51015, + -3.49819, + -0.297187, + -1.5618, + 0.158008, + -1.12087, + ] diff --git a/paddle_samples/PaddleX/TimesNet_ad/subgraph_5/model.py b/paddle_samples/PaddleX/TimesNet_ad/subgraph_5/model.py index 50e134d21..d28b81f52 100644 --- a/paddle_samples/PaddleX/TimesNet_ad/subgraph_5/model.py +++ b/paddle_samples/PaddleX/TimesNet_ad/subgraph_5/model.py @@ -5,61 +5,58 @@ class GraphModule(paddle.nn.Layer): def __init__(self): super().__init__() - def forward(self, data_0, data_1, data_2, data_3, data_4): - # builtin.combine: ([-1x96x32xf32, -1x96x32xf32, -1x96x32xf32]) <- (-1x96x32xf32, -1x96x32xf32, -1x96x32xf32) - combine_0 = [data_0, data_1, data_2] - del data_0, data_1, data_2 + def forward(self, parameter_0, parameter_1, data_0, data_1, data_2): + # pd_op.matmul: (-1x96x2xf32) <- (-1x96x32xf32, 32x2xf32) + matmul_0 = paddle._C_ops.matmul(data_0, parameter_1, False, False) + del data_0, parameter_1 - # pd_op.stack: (-1x96x32x3xf32) <- ([-1x96x32xf32, -1x96x32xf32, -1x96x32xf32]) - stack_0 = paddle._C_ops.stack(combine_0, -1) - del combine_0 - - # pd_op.softmax: (-1x3xf32) <- (-1x3xf32) - softmax_0 = paddle._C_ops.softmax(data_3, 1) - del data_3 + # pd_op.add: (-1x96x2xf32) <- (-1x96x2xf32, 2xf32) + add_1 = paddle._C_ops.add(matmul_0, parameter_0) + del matmul_0, parameter_0 # pd_op.full_int_array: (1xi64) <- () - full_int_array_0 = [1] + full_int_array_0 = [0] - # pd_op.assign: (1xi64) <- (1xi64) - assign_0 = full_int_array_0 + # pd_op.full_int_array: (1xi64) <- () + full_int_array_1 = [1] - # pd_op.unsqueeze: (-1x1x3xf32) <- (-1x3xf32, 1xi64) - unsqueeze_0 = paddle._C_ops.unsqueeze(softmax_0, full_int_array_0) + # pd_op.slice: (-1x2xf32) <- (-1x1x2xf32, 1xi64, 1xi64) + slice_0 = paddle._C_ops.slice( + data_1, [1], full_int_array_0, full_int_array_1, [1], [1] + ) + del data_1 - # pd_op.unsqueeze: (-1x1x1x3xf32) <- (-1x1x3xf32, 1xi64) - unsqueeze_1 = paddle._C_ops.unsqueeze(unsqueeze_0, full_int_array_0) + # pd_op.unsqueeze: (-1x1x2xf32) <- (-1x2xf32, 1xi64) + unsqueeze_0 = paddle._C_ops.unsqueeze(slice_0, full_int_array_1) + del slice_0 - # pd_op.full_int_array: (4xi64) <- () - full_int_array_1 = [1, 96, 32, 1] + # pd_op.full_int_array: (3xi64) <- () + full_int_array_2 = [1, 96, 1] - # pd_op.tile: (-1x96x32x3xf32) <- (-1x1x1x3xf32, 4xi64) - tile_0 = paddle._C_ops.tile(unsqueeze_1, full_int_array_1) + # pd_op.tile: (-1x96x2xf32) <- (-1x1x2xf32, 3xi64) + tile_0 = paddle._C_ops.tile(unsqueeze_0, full_int_array_2) + del unsqueeze_0 - # pd_op.multiply: (-1x96x32x3xf32) <- (-1x96x32x3xf32, -1x96x32x3xf32) - multiply_0 = paddle._C_ops.multiply(stack_0, tile_0) + # pd_op.multiply: (-1x96x2xf32) <- (-1x96x2xf32, -1x96x2xf32) + multiply_0 = paddle._C_ops.multiply(add_1, tile_0) + del add_1, tile_0 - # pd_op.full_int_array: (1xi64) <- () - full_int_array_2 = [-1] - - # pd_op.sum: (-1x96x32xf32) <- (-1x96x32x3xf32, 1xi64) - sum_0 = paddle._C_ops.sum(multiply_0, full_int_array_2, None, False) - - # pd_op.add: (-1x96x32xf32) <- (-1x96x32xf32, -1x96x32xf32) - add_0 = paddle._C_ops.add(sum_0, data_4) - del ( - assign_0, - data_4, - full_int_array_0, - full_int_array_1, - full_int_array_2, - multiply_0, - softmax_0, - stack_0, - sum_0, - tile_0, - unsqueeze_0, - unsqueeze_1, + # pd_op.slice: (-1x2xf32) <- (-1x1x2xf32, 1xi64, 1xi64) + slice_1 = paddle._C_ops.slice( + data_2, [1], full_int_array_0, full_int_array_1, [1], [1] ) + del data_2, full_int_array_0 + + # pd_op.unsqueeze: (-1x1x2xf32) <- (-1x2xf32, 1xi64) + unsqueeze_1 = paddle._C_ops.unsqueeze(slice_1, full_int_array_1) + del full_int_array_1, slice_1 + + # pd_op.tile: (-1x96x2xf32) <- (-1x1x2xf32, 3xi64) + tile_1 = paddle._C_ops.tile(unsqueeze_1, full_int_array_2) + del full_int_array_2, unsqueeze_1 + + # pd_op.add: (-1x96x2xf32) <- (-1x96x2xf32, -1x96x2xf32) + add_0 = paddle._C_ops.add(multiply_0, tile_1) + del multiply_0, tile_1 return add_0 diff --git a/paddle_samples/PaddleX/TimesNet_ad/subgraph_5/weight_meta.py b/paddle_samples/PaddleX/TimesNet_ad/subgraph_5/weight_meta.py index 8b1378917..2b35288ca 100644 --- a/paddle_samples/PaddleX/TimesNet_ad/subgraph_5/weight_meta.py +++ b/paddle_samples/PaddleX/TimesNet_ad/subgraph_5/weight_meta.py @@ -1 +1,16 @@ +class Program_weight_tensor_parameter_0: + name = "parameter_0" + shape = [2] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + +class Program_weight_tensor_parameter_1: + name = "parameter_1" + shape = [32, 2] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None diff --git a/paddle_samples/PaddleX/TimesNet_ad/subgraph_6/graph_hash.txt b/paddle_samples/PaddleX/TimesNet_ad/subgraph_6/graph_hash.txt index e38033243..d678b3407 100644 --- a/paddle_samples/PaddleX/TimesNet_ad/subgraph_6/graph_hash.txt +++ b/paddle_samples/PaddleX/TimesNet_ad/subgraph_6/graph_hash.txt @@ -1 +1 @@ -9fdac9340430adc480f8c20b6b5eccc3612b6eaf503bd5560b799c4e792bbb48 \ No newline at end of file +bd83a2199c0d222bbf4499fa080b49261c19ecd9832433da3dacfd6681f71235 \ No newline at end of file diff --git a/paddle_samples/PaddleX/TimesNet_ad/subgraph_6/input_meta.py b/paddle_samples/PaddleX/TimesNet_ad/subgraph_6/input_meta.py index 9992dff58..79774a9fe 100644 --- a/paddle_samples/PaddleX/TimesNet_ad/subgraph_6/input_meta.py +++ b/paddle_samples/PaddleX/TimesNet_ad/subgraph_6/input_meta.py @@ -1,89 +1,9 @@ class Program_weight_tensor_data_0: name = "data_0" - shape = [16, 96, 32] + shape = [2, 96, 32] dtype = "float32" - min_val = float("-2.67673") - max_val = float("4.8069") - mean = float("0.0226662") - std = float("1.04036") + min_val = float("-6.34525") + max_val = float("9.14017") + mean = float("0.336976") + std = float("1.50118") data = None - - -class Program_weight_tensor_data_1: - name = "data_1" - shape = [16, 1, 2] - dtype = "float32" - data = [ - 0.0152262, - 0.112992, - 0.0131863, - 0.00941724, - 0.00817011, - 0.0263628, - 0.00638176, - 0.0175078, - 0.0234966, - 0.00656714, - 0.0124417, - 0.0132452, - 0.0246282, - 0.00765128, - 0.0301115, - 0.00777976, - 0.0628782, - 0.0259483, - 0.0631641, - 0.047438, - 0.0094697, - 0.00714918, - 0.0377613, - 0.0259842, - 0.624268, - 1.5689, - 0.00617691, - 0.00916586, - 0.00874541, - 0.00718946, - 0.00659123, - 0.00577362, - ] - - -class Program_weight_tensor_data_2: - name = "data_2" - shape = [16, 1, 2] - dtype = "float32" - data = [ - 0.304086, - -4.2993, - -0.136159, - -1.50789, - 0.148375, - -1.23287, - 0.424943, - -2.33587, - -0.441654, - -3.51483, - -0.706665, - -2.10642, - -0.297288, - -1.02127, - -0.403002, - -3.52043, - -0.189814, - -0.818702, - -1.26374, - -3.75699, - -0.90199, - -1.11094, - -0.498064, - -2.30995, - -2.3935, - -2.71923, - -0.51015, - -3.49819, - -0.297187, - -1.5618, - 0.158008, - -1.12087, - ] diff --git a/paddle_samples/PaddleX/TimesNet_ad/subgraph_6/model.py b/paddle_samples/PaddleX/TimesNet_ad/subgraph_6/model.py index d28b81f52..468f49791 100644 --- a/paddle_samples/PaddleX/TimesNet_ad/subgraph_6/model.py +++ b/paddle_samples/PaddleX/TimesNet_ad/subgraph_6/model.py @@ -5,58 +5,105 @@ class GraphModule(paddle.nn.Layer): def __init__(self): super().__init__() - def forward(self, parameter_0, parameter_1, data_0, data_1, data_2): - # pd_op.matmul: (-1x96x2xf32) <- (-1x96x32xf32, 32x2xf32) - matmul_0 = paddle._C_ops.matmul(data_0, parameter_1, False, False) - del data_0, parameter_1 + def forward(self, data_0): + # pd_op.fft_r2c: (-1x49x32xc64) <- (-1x96x32xf32) + fft_r2c_0 = paddle._C_ops.fft_r2c(data_0, [1], "backward", True, True) - # pd_op.add: (-1x96x2xf32) <- (-1x96x2xf32, 2xf32) - add_1 = paddle._C_ops.add(matmul_0, parameter_0) - del matmul_0, parameter_0 + # pd_op.abs: (-1x49x32xf32) <- (-1x49x32xc64) + abs_0 = paddle._C_ops.abs(fft_r2c_0) + + # pd_op.assign: (-1x49x32xf32) <- (-1x49x32xf32) + assign_0 = abs_0 # pd_op.full_int_array: (1xi64) <- () full_int_array_0 = [0] + # pd_op.mean: (49x32xf32) <- (-1x49x32xf32, 1xi64) + mean_0 = paddle._C_ops.mean(abs_0, full_int_array_0, False) + # pd_op.full_int_array: (1xi64) <- () - full_int_array_1 = [1] + full_int_array_1 = [-1] - # pd_op.slice: (-1x2xf32) <- (-1x1x2xf32, 1xi64, 1xi64) - slice_0 = paddle._C_ops.slice( - data_1, [1], full_int_array_0, full_int_array_1, [1], [1] + # pd_op.assign: (1xi64) <- (1xi64) + assign_1 = full_int_array_1 + + # pd_op.mean: (49xf32) <- (49x32xf32, 1xi64) + mean_1 = paddle._C_ops.mean(mean_0, full_int_array_1, False) + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_2 = [1] + + # pd_op.set_value_: (49xf32) <- (49xf32, 1xi64, 1xi64, 1xi64) + set_value__0 = paddle._C_ops.set_value_( + mean_1, + full_int_array_0, + full_int_array_2, + full_int_array_2, + [0], + [0], + [], + [1], + [float("0")], ) - del data_1 + del mean_1 - # pd_op.unsqueeze: (-1x1x2xf32) <- (-1x2xf32, 1xi64) - unsqueeze_0 = paddle._C_ops.unsqueeze(slice_0, full_int_array_1) - del slice_0 + # pd_op.full: (1xi32) <- () + full_0 = paddle._C_ops.full( + [1], float("3"), paddle.int32, paddle.core.CPUPlace() + ) - # pd_op.full_int_array: (3xi64) <- () - full_int_array_2 = [1, 96, 1] + # pd_op.topk: (3xf32, 3xi64) <- (49xf32, 1xi32) + topk_0, topk_1 = (lambda x, f: f(x))( + paddle._C_ops.topk(set_value__0, full_0, -1, True, True), + lambda out: out if isinstance(out, (list, tuple)) else (out, None), + ) + del full_0 - # pd_op.tile: (-1x96x2xf32) <- (-1x1x2xf32, 3xi64) - tile_0 = paddle._C_ops.tile(unsqueeze_0, full_int_array_2) - del unsqueeze_0 + # pd_op.share_data_: (3xi64) <- (3xi64) + share_data__0 = topk_1.detach() + del topk_1 - # pd_op.multiply: (-1x96x2xf32) <- (-1x96x2xf32, -1x96x2xf32) - multiply_0 = paddle._C_ops.multiply(add_1, tile_0) - del add_1, tile_0 + # pd_op.cast: (3xi32) <- (3xi64) + cast_0 = paddle._C_ops.cast(share_data__0, paddle.int32) + del share_data__0 - # pd_op.slice: (-1x2xf32) <- (-1x1x2xf32, 1xi64, 1xi64) - slice_1 = paddle._C_ops.slice( - data_2, [1], full_int_array_0, full_int_array_1, [1], [1] - ) - del data_2, full_int_array_0 + # pd_op.shape64: (3xi64) <- (-1x96x32xf32) + shape64_0 = paddle._C_ops.shape64(data_0) + del data_0 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_3 = [2] - # pd_op.unsqueeze: (-1x1x2xf32) <- (-1x2xf32, 1xi64) - unsqueeze_1 = paddle._C_ops.unsqueeze(slice_1, full_int_array_1) - del full_int_array_1, slice_1 + # pd_op.slice: (xi64) <- (3xi64, 1xi64, 1xi64) + slice_0 = paddle._C_ops.slice( + shape64_0, [0], full_int_array_2, full_int_array_3, [1], [0] + ) + del full_int_array_2, full_int_array_3, shape64_0 - # pd_op.tile: (-1x96x2xf32) <- (-1x1x2xf32, 3xi64) - tile_1 = paddle._C_ops.tile(unsqueeze_1, full_int_array_2) - del full_int_array_2, unsqueeze_1 + # pd_op.cast: (xi32) <- (xi64) + cast_1 = paddle._C_ops.cast(slice_0, paddle.int32) + del slice_0 - # pd_op.add: (-1x96x2xf32) <- (-1x96x2xf32, -1x96x2xf32) - add_0 = paddle._C_ops.add(multiply_0, tile_1) - del multiply_0, tile_1 + # pd_op.floor_divide: (3xi32) <- (xi32, 3xi32) + floor_divide_0 = paddle._C_ops.floor_divide(cast_1, cast_0) + del cast_1 + + # pd_op.mean: (-1x49xf32) <- (-1x49x32xf32, 1xi64) + mean_2 = paddle._C_ops.mean(abs_0, full_int_array_1, False) + + # pd_op.index_select: (-1x3xf32) <- (-1x49xf32, 3xi32) + index_select_0 = paddle._C_ops.index_select(mean_2, cast_0, 1) + del ( + abs_0, + assign_0, + assign_1, + cast_0, + fft_r2c_0, + full_int_array_0, + full_int_array_1, + mean_0, + mean_2, + set_value__0, + ) - return add_0 + return floor_divide_0, index_select_0 diff --git a/paddle_samples/PaddleX/TimesNet_ad/subgraph_6/weight_meta.py b/paddle_samples/PaddleX/TimesNet_ad/subgraph_6/weight_meta.py index 2b35288ca..8b1378917 100644 --- a/paddle_samples/PaddleX/TimesNet_ad/subgraph_6/weight_meta.py +++ b/paddle_samples/PaddleX/TimesNet_ad/subgraph_6/weight_meta.py @@ -1,16 +1 @@ -class Program_weight_tensor_parameter_0: - name = "parameter_0" - shape = [2] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - -class Program_weight_tensor_parameter_1: - name = "parameter_1" - shape = [32, 2] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None diff --git a/paddle_samples/PaddleX/TimesNet_ad/subgraph_7/graph_hash.txt b/paddle_samples/PaddleX/TimesNet_ad/subgraph_7/graph_hash.txt index d678b3407..80a614cb9 100644 --- a/paddle_samples/PaddleX/TimesNet_ad/subgraph_7/graph_hash.txt +++ b/paddle_samples/PaddleX/TimesNet_ad/subgraph_7/graph_hash.txt @@ -1 +1 @@ -bd83a2199c0d222bbf4499fa080b49261c19ecd9832433da3dacfd6681f71235 \ No newline at end of file +f20fa2aea49acb9d72c5b16122ab198b770984aba72a223636b5cad5ab17931d \ No newline at end of file diff --git a/paddle_samples/PaddleX/TimesNet_ad/subgraph_7/input_meta.py b/paddle_samples/PaddleX/TimesNet_ad/subgraph_7/input_meta.py index 79774a9fe..bfa4e6d08 100644 --- a/paddle_samples/PaddleX/TimesNet_ad/subgraph_7/input_meta.py +++ b/paddle_samples/PaddleX/TimesNet_ad/subgraph_7/input_meta.py @@ -1,9 +1,20 @@ class Program_weight_tensor_data_0: name = "data_0" - shape = [2, 96, 32] + shape = [16, 96, 2] dtype = "float32" - min_val = float("-6.34525") - max_val = float("9.14017") - mean = float("0.336976") - std = float("1.50118") + min_val = float("-6.37408") + max_val = float("0.439645") + mean = float("-1.35748") + std = float("1.3301") + data = None + + +class Program_weight_tensor_data_1: + name = "data_1" + shape = [1, 5000, 32] + dtype = "float32" + min_val = float("-1.0") + max_val = float("1.0") + mean = float("0.119002") + std = float("0.697021") data = None diff --git a/paddle_samples/PaddleX/TimesNet_ad/subgraph_7/model.py b/paddle_samples/PaddleX/TimesNet_ad/subgraph_7/model.py index 468f49791..b62e3ba47 100644 --- a/paddle_samples/PaddleX/TimesNet_ad/subgraph_7/model.py +++ b/paddle_samples/PaddleX/TimesNet_ad/subgraph_7/model.py @@ -5,105 +5,173 @@ class GraphModule(paddle.nn.Layer): def __init__(self): super().__init__() - def forward(self, data_0): - # pd_op.fft_r2c: (-1x49x32xc64) <- (-1x96x32xf32) - fft_r2c_0 = paddle._C_ops.fft_r2c(data_0, [1], "backward", True, True) + def forward(self, parameter_0, data_0, data_1): + # pd_op.full_int_array: (1xi64) <- () + full_int_array_0 = [1] - # pd_op.abs: (-1x49x32xf32) <- (-1x49x32xc64) - abs_0 = paddle._C_ops.abs(fft_r2c_0) + # pd_op.mean: (-1x1x2xf32) <- (-1x96x2xf32, 1xi64) + mean_0 = paddle._C_ops.mean(data_0, full_int_array_0, True) - # pd_op.assign: (-1x49x32xf32) <- (-1x49x32xf32) - assign_0 = abs_0 + # pd_op.share_data_: (-1x1x2xf32) <- (-1x1x2xf32) + share_data__0 = mean_0.detach() + del mean_0 - # pd_op.full_int_array: (1xi64) <- () - full_int_array_0 = [0] + # pd_op.subtract: (-1x96x2xf32) <- (-1x96x2xf32, -1x1x2xf32) + subtract_0 = paddle._C_ops.subtract(data_0, share_data__0) + del data_0 - # pd_op.mean: (49x32xf32) <- (-1x49x32xf32, 1xi64) - mean_0 = paddle._C_ops.mean(abs_0, full_int_array_0, False) + # pd_op.mean: (-1x1x2xf32) <- (-1x96x2xf32, 1xi64) + mean_1 = paddle._C_ops.mean(subtract_0, full_int_array_0, True) - # pd_op.full_int_array: (1xi64) <- () - full_int_array_1 = [-1] + # pd_op.subtract: (-1x96x2xf32) <- (-1x96x2xf32, -1x1x2xf32) + subtract_1 = paddle._C_ops.subtract(subtract_0, mean_1) + del mean_1 - # pd_op.assign: (1xi64) <- (1xi64) - assign_1 = full_int_array_1 + # pd_op.pow: (-1x96x2xf32) <- (-1x96x2xf32) + pow_0 = paddle._C_ops.pow(subtract_1, float("2")) + del subtract_1 - # pd_op.mean: (49xf32) <- (49x32xf32, 1xi64) - mean_1 = paddle._C_ops.mean(mean_0, full_int_array_1, False) + # pd_op.sum: (-1x1x2xf32) <- (-1x96x2xf32, 1xi64) + sum_0 = paddle._C_ops.sum(pow_0, full_int_array_0, paddle.float32, True) + del pow_0 - # pd_op.full_int_array: (1xi64) <- () - full_int_array_2 = [1] - - # pd_op.set_value_: (49xf32) <- (49xf32, 1xi64, 1xi64, 1xi64) - set_value__0 = paddle._C_ops.set_value_( - mean_1, - full_int_array_0, - full_int_array_2, - full_int_array_2, - [0], - [0], - [], - [1], - [float("0")], - ) - del mean_1 + # pd_op.numel: (xi64) <- (-1x96x2xf32) + numel_0 = paddle._C_ops.numel(subtract_0) + + # pd_op.cast: (xi64) <- (xi64) + cast_0 = paddle._C_ops.cast(numel_0, paddle.int64) + del numel_0 + + # pd_op.numel: (xi64) <- (-1x1x2xf32) + numel_1 = paddle._C_ops.numel(sum_0) - # pd_op.full: (1xi32) <- () + # pd_op.cast: (xi64) <- (xi64) + cast_1 = paddle._C_ops.cast(numel_1, paddle.int64) + del numel_1 + + # pd_op.cast: (xf32) <- (xi64) + cast_2 = paddle._C_ops.cast(cast_0, paddle.float32) + del cast_0 + + # pd_op.cast: (xf32) <- (xi64) + cast_3 = paddle._C_ops.cast(cast_1, paddle.float32) + del cast_1 + + # pd_op.divide: (xf32) <- (xf32, xf32) + divide_0 = paddle._C_ops.divide(cast_2, cast_3) + del cast_2, cast_3 + + # pd_op.divide: (-1x1x2xf32) <- (-1x1x2xf32, xf32) + divide_1 = paddle._C_ops.divide(sum_0, divide_0) + del divide_0, sum_0 + + # pd_op.full: (1xf32) <- () full_0 = paddle._C_ops.full( - [1], float("3"), paddle.int32, paddle.core.CPUPlace() + [1], float("1"), paddle.float32, paddle.core.CPUPlace() ) - # pd_op.topk: (3xf32, 3xi64) <- (49xf32, 1xi32) - topk_0, topk_1 = (lambda x, f: f(x))( - paddle._C_ops.topk(set_value__0, full_0, -1, True, True), - lambda out: out if isinstance(out, (list, tuple)) else (out, None), + # pd_op.scale: (-1x1x2xf32) <- (-1x1x2xf32, 1xf32) + scale_0 = paddle._C_ops.scale(divide_1, full_0, float("1e-05"), True) + del divide_1, full_0 + + # pd_op.sqrt: (-1x1x2xf32) <- (-1x1x2xf32) + sqrt_0 = paddle._C_ops.sqrt(scale_0) + del scale_0 + + # pd_op.divide: (-1x96x2xf32) <- (-1x96x2xf32, -1x1x2xf32) + divide_2 = paddle._C_ops.divide(subtract_0, sqrt_0) + del subtract_0 + + # pd_op.transpose: (-1x2x96xf32) <- (-1x96x2xf32) + transpose_0 = paddle._C_ops.transpose(divide_2, [0, 2, 1]) + + # pd_op.full_int_array: (2xi64) <- () + full_int_array_1 = [3, 4] + + # pd_op.unsqueeze: (-1x2x96x1x1xf32) <- (-1x2x96xf32, 2xi64) + unsqueeze_0 = paddle._C_ops.unsqueeze(transpose_0, full_int_array_1) + del transpose_0 + + # pd_op.full_int_array: (6xi64) <- () + full_int_array_2 = [0, 0, 0, 0, 1, 1] + + # pd_op.pad3d: (-1x2x98x1x1xf32) <- (-1x2x96x1x1xf32, 6xi64) + pad3d_0 = paddle._C_ops.pad3d( + unsqueeze_0, full_int_array_2, "circular", float("0"), "NCDHW" ) - del full_0 + del full_int_array_2, unsqueeze_0 - # pd_op.share_data_: (3xi64) <- (3xi64) - share_data__0 = topk_1.detach() - del topk_1 + # pd_op.squeeze: (-1x2x98xf32) <- (-1x2x98x1x1xf32, 2xi64) + squeeze_0 = paddle._C_ops.squeeze(pad3d_0, full_int_array_1) + del full_int_array_1, pad3d_0 - # pd_op.cast: (3xi32) <- (3xi64) - cast_0 = paddle._C_ops.cast(share_data__0, paddle.int32) - del share_data__0 + # pd_op.assign: (32x2x3xf32) <- (32x2x3xf32) + assign_0 = parameter_0 + del parameter_0 - # pd_op.shape64: (3xi64) <- (-1x96x32xf32) - shape64_0 = paddle._C_ops.shape64(data_0) - del data_0 + # pd_op.full_int_array: (1xi64) <- () + full_int_array_3 = [-2] + + # pd_op.unsqueeze: (32x2x1x3xf32) <- (32x2x3xf32, 1xi64) + unsqueeze_1 = paddle._C_ops.unsqueeze(assign_0, full_int_array_3) + del assign_0 + + # pd_op.unsqueeze: (-1x2x1x98xf32) <- (-1x2x98xf32, 1xi64) + unsqueeze_2 = paddle._C_ops.unsqueeze(squeeze_0, full_int_array_3) + del squeeze_0 + + # pd_op.conv2d: (-1x32x1x96xf32) <- (-1x2x1x98xf32, 32x2x1x3xf32) + conv2d_0 = paddle._C_ops.conv2d( + unsqueeze_2, unsqueeze_1, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del unsqueeze_1, unsqueeze_2 + + # pd_op.squeeze: (-1x32x96xf32) <- (-1x32x1x96xf32, 1xi64) + squeeze_1 = paddle._C_ops.squeeze(conv2d_0, full_int_array_3) + del conv2d_0, full_int_array_3 + + # pd_op.transpose: (-1x96x32xf32) <- (-1x32x96xf32) + transpose_1 = paddle._C_ops.transpose(squeeze_1, [0, 2, 1]) + del squeeze_1 + + # pd_op.shape64: (3xi64) <- (-1x96x2xf32) + shape64_0 = paddle._C_ops.shape64(divide_2) + del divide_2 # pd_op.full_int_array: (1xi64) <- () - full_int_array_3 = [2] + full_int_array_4 = [0] # pd_op.slice: (xi64) <- (3xi64, 1xi64, 1xi64) slice_0 = paddle._C_ops.slice( - shape64_0, [0], full_int_array_2, full_int_array_3, [1], [0] + shape64_0, [0], full_int_array_4, full_int_array_0, [1], [0] ) - del full_int_array_2, full_int_array_3, shape64_0 + del full_int_array_0, shape64_0 - # pd_op.cast: (xi32) <- (xi64) - cast_1 = paddle._C_ops.cast(slice_0, paddle.int32) - del slice_0 + # pd_op.full_int_array: (1xi64) <- () + full_int_array_5 = [96] - # pd_op.floor_divide: (3xi32) <- (xi32, 3xi32) - floor_divide_0 = paddle._C_ops.floor_divide(cast_1, cast_0) - del cast_1 + # pd_op.slice: (1x96x32xf32) <- (1x5000x32xf32, 1xi64, 1xi64) + slice_1 = paddle._C_ops.slice( + data_1, [1], full_int_array_4, full_int_array_5, [1], [] + ) + del data_1, full_int_array_4, full_int_array_5 + + # pd_op.add: (-1x96x32xf32) <- (-1x96x32xf32, 1x96x32xf32) + add_0 = paddle._C_ops.add(transpose_1, slice_1) + del slice_1, transpose_1 - # pd_op.mean: (-1x49xf32) <- (-1x49x32xf32, 1xi64) - mean_2 = paddle._C_ops.mean(abs_0, full_int_array_1, False) - - # pd_op.index_select: (-1x3xf32) <- (-1x49xf32, 3xi32) - index_select_0 = paddle._C_ops.index_select(mean_2, cast_0, 1) - del ( - abs_0, - assign_0, - assign_1, - cast_0, - fft_r2c_0, - full_int_array_0, - full_int_array_1, - mean_0, - mean_2, - set_value__0, + # pd_op.full: (1xf32) <- () + full_1 = paddle._C_ops.full( + [1], float("0.1"), paddle.float32, paddle.core.CPUPlace() + ) + + # pd_op.dropout: (-1x96x32xf32, -1x96x32xui8) <- (-1x96x32xf32, None, 1xf32) + dropout_0, dropout_1 = (lambda x, f: f(x))( + paddle._C_ops.dropout( + add_0, None, full_1, True, "upscale_in_train", 0, False + ), + lambda out: out if isinstance(out, (list, tuple)) else (out, None), ) + del add_0, full_1, share_data__0, sqrt_0 - return floor_divide_0, index_select_0 + return dropout_0 diff --git a/paddle_samples/PaddleX/TimesNet_ad/subgraph_7/weight_meta.py b/paddle_samples/PaddleX/TimesNet_ad/subgraph_7/weight_meta.py index 8b1378917..297640557 100644 --- a/paddle_samples/PaddleX/TimesNet_ad/subgraph_7/weight_meta.py +++ b/paddle_samples/PaddleX/TimesNet_ad/subgraph_7/weight_meta.py @@ -1 +1,9 @@ - +class Program_weight_tensor_parameter_0: + name = "parameter_0" + shape = [32, 2, 3] + dtype = "float32" + min_val = float("-1.67133") + max_val = float("1.7001") + mean = float("0.0189139") + std = float("0.615287") + data = None diff --git a/paddle_samples/PaddleX/TimesNet_ad/subgraph_8/graph_hash.txt b/paddle_samples/PaddleX/TimesNet_ad/subgraph_8/graph_hash.txt index 80a614cb9..7894d329b 100644 --- a/paddle_samples/PaddleX/TimesNet_ad/subgraph_8/graph_hash.txt +++ b/paddle_samples/PaddleX/TimesNet_ad/subgraph_8/graph_hash.txt @@ -1 +1 @@ -f20fa2aea49acb9d72c5b16122ab198b770984aba72a223636b5cad5ab17931d \ No newline at end of file +f840fc131a017403504e3016d24501e1e6830927f902d749bd588525a36a1980 \ No newline at end of file diff --git a/paddle_samples/PaddleX/TimesNet_ad/subgraph_8/input_meta.py b/paddle_samples/PaddleX/TimesNet_ad/subgraph_8/input_meta.py index bfa4e6d08..085bf32ce 100644 --- a/paddle_samples/PaddleX/TimesNet_ad/subgraph_8/input_meta.py +++ b/paddle_samples/PaddleX/TimesNet_ad/subgraph_8/input_meta.py @@ -2,10 +2,10 @@ class Program_weight_tensor_data_0: name = "data_0" shape = [16, 96, 2] dtype = "float32" - min_val = float("-6.37408") - max_val = float("0.439645") - mean = float("-1.35748") - std = float("1.3301") + min_val = float("-2.41022") + max_val = float("2.54238") + mean = float("0.061457") + std = float("1.05113") data = None diff --git a/paddle_samples/PaddleX/TimesNet_ad/subgraph_8/model.py b/paddle_samples/PaddleX/TimesNet_ad/subgraph_8/model.py index b62e3ba47..c44ceee47 100644 --- a/paddle_samples/PaddleX/TimesNet_ad/subgraph_8/model.py +++ b/paddle_samples/PaddleX/TimesNet_ad/subgraph_8/model.py @@ -9,40 +9,40 @@ def forward(self, parameter_0, data_0, data_1): # pd_op.full_int_array: (1xi64) <- () full_int_array_0 = [1] - # pd_op.mean: (-1x1x2xf32) <- (-1x96x2xf32, 1xi64) + # pd_op.mean: (16x1x2xf32) <- (16x96x2xf32, 1xi64) mean_0 = paddle._C_ops.mean(data_0, full_int_array_0, True) - # pd_op.share_data_: (-1x1x2xf32) <- (-1x1x2xf32) + # pd_op.share_data_: (16x1x2xf32) <- (16x1x2xf32) share_data__0 = mean_0.detach() del mean_0 - # pd_op.subtract: (-1x96x2xf32) <- (-1x96x2xf32, -1x1x2xf32) + # pd_op.subtract: (16x96x2xf32) <- (16x96x2xf32, 16x1x2xf32) subtract_0 = paddle._C_ops.subtract(data_0, share_data__0) del data_0 - # pd_op.mean: (-1x1x2xf32) <- (-1x96x2xf32, 1xi64) + # pd_op.mean: (16x1x2xf32) <- (16x96x2xf32, 1xi64) mean_1 = paddle._C_ops.mean(subtract_0, full_int_array_0, True) - # pd_op.subtract: (-1x96x2xf32) <- (-1x96x2xf32, -1x1x2xf32) + # pd_op.subtract: (16x96x2xf32) <- (16x96x2xf32, 16x1x2xf32) subtract_1 = paddle._C_ops.subtract(subtract_0, mean_1) del mean_1 - # pd_op.pow: (-1x96x2xf32) <- (-1x96x2xf32) + # pd_op.pow: (16x96x2xf32) <- (16x96x2xf32) pow_0 = paddle._C_ops.pow(subtract_1, float("2")) del subtract_1 - # pd_op.sum: (-1x1x2xf32) <- (-1x96x2xf32, 1xi64) + # pd_op.sum: (16x1x2xf32) <- (16x96x2xf32, 1xi64) sum_0 = paddle._C_ops.sum(pow_0, full_int_array_0, paddle.float32, True) - del pow_0 + del full_int_array_0, pow_0 - # pd_op.numel: (xi64) <- (-1x96x2xf32) + # pd_op.numel: (xi64) <- (16x96x2xf32) numel_0 = paddle._C_ops.numel(subtract_0) # pd_op.cast: (xi64) <- (xi64) cast_0 = paddle._C_ops.cast(numel_0, paddle.int64) del numel_0 - # pd_op.numel: (xi64) <- (-1x1x2xf32) + # pd_op.numel: (xi64) <- (16x1x2xf32) numel_1 = paddle._C_ops.numel(sum_0) # pd_op.cast: (xi64) <- (xi64) @@ -61,7 +61,7 @@ def forward(self, parameter_0, data_0, data_1): divide_0 = paddle._C_ops.divide(cast_2, cast_3) del cast_2, cast_3 - # pd_op.divide: (-1x1x2xf32) <- (-1x1x2xf32, xf32) + # pd_op.divide: (16x1x2xf32) <- (16x1x2xf32, xf32) divide_1 = paddle._C_ops.divide(sum_0, divide_0) del divide_0, sum_0 @@ -70,38 +70,39 @@ def forward(self, parameter_0, data_0, data_1): [1], float("1"), paddle.float32, paddle.core.CPUPlace() ) - # pd_op.scale: (-1x1x2xf32) <- (-1x1x2xf32, 1xf32) + # pd_op.scale: (16x1x2xf32) <- (16x1x2xf32, 1xf32) scale_0 = paddle._C_ops.scale(divide_1, full_0, float("1e-05"), True) del divide_1, full_0 - # pd_op.sqrt: (-1x1x2xf32) <- (-1x1x2xf32) + # pd_op.sqrt: (16x1x2xf32) <- (16x1x2xf32) sqrt_0 = paddle._C_ops.sqrt(scale_0) del scale_0 - # pd_op.divide: (-1x96x2xf32) <- (-1x96x2xf32, -1x1x2xf32) + # pd_op.divide: (16x96x2xf32) <- (16x96x2xf32, 16x1x2xf32) divide_2 = paddle._C_ops.divide(subtract_0, sqrt_0) del subtract_0 - # pd_op.transpose: (-1x2x96xf32) <- (-1x96x2xf32) + # pd_op.transpose: (16x2x96xf32) <- (16x96x2xf32) transpose_0 = paddle._C_ops.transpose(divide_2, [0, 2, 1]) + del divide_2 # pd_op.full_int_array: (2xi64) <- () full_int_array_1 = [3, 4] - # pd_op.unsqueeze: (-1x2x96x1x1xf32) <- (-1x2x96xf32, 2xi64) + # pd_op.unsqueeze: (16x2x96x1x1xf32) <- (16x2x96xf32, 2xi64) unsqueeze_0 = paddle._C_ops.unsqueeze(transpose_0, full_int_array_1) del transpose_0 # pd_op.full_int_array: (6xi64) <- () full_int_array_2 = [0, 0, 0, 0, 1, 1] - # pd_op.pad3d: (-1x2x98x1x1xf32) <- (-1x2x96x1x1xf32, 6xi64) + # pd_op.pad3d: (16x2x98x1x1xf32) <- (16x2x96x1x1xf32, 6xi64) pad3d_0 = paddle._C_ops.pad3d( unsqueeze_0, full_int_array_2, "circular", float("0"), "NCDHW" ) del full_int_array_2, unsqueeze_0 - # pd_op.squeeze: (-1x2x98xf32) <- (-1x2x98x1x1xf32, 2xi64) + # pd_op.squeeze: (16x2x98xf32) <- (16x2x98x1x1xf32, 2xi64) squeeze_0 = paddle._C_ops.squeeze(pad3d_0, full_int_array_1) del full_int_array_1, pad3d_0 @@ -112,66 +113,68 @@ def forward(self, parameter_0, data_0, data_1): # pd_op.full_int_array: (1xi64) <- () full_int_array_3 = [-2] + # pd_op.assign: (1xi64) <- (1xi64) + assign_1 = full_int_array_3 + # pd_op.unsqueeze: (32x2x1x3xf32) <- (32x2x3xf32, 1xi64) unsqueeze_1 = paddle._C_ops.unsqueeze(assign_0, full_int_array_3) - del assign_0 - # pd_op.unsqueeze: (-1x2x1x98xf32) <- (-1x2x98xf32, 1xi64) + # pd_op.unsqueeze: (16x2x1x98xf32) <- (16x2x98xf32, 1xi64) unsqueeze_2 = paddle._C_ops.unsqueeze(squeeze_0, full_int_array_3) del squeeze_0 - # pd_op.conv2d: (-1x32x1x96xf32) <- (-1x2x1x98xf32, 32x2x1x3xf32) + # pd_op.conv2d: (16x32x1x96xf32) <- (16x2x1x98xf32, 32x2x1x3xf32) conv2d_0 = paddle._C_ops.conv2d( unsqueeze_2, unsqueeze_1, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" ) - del unsqueeze_1, unsqueeze_2 - # pd_op.squeeze: (-1x32x96xf32) <- (-1x32x1x96xf32, 1xi64) + # pd_op.squeeze: (16x32x96xf32) <- (16x32x1x96xf32, 1xi64) squeeze_1 = paddle._C_ops.squeeze(conv2d_0, full_int_array_3) - del conv2d_0, full_int_array_3 - # pd_op.transpose: (-1x96x32xf32) <- (-1x32x96xf32) + # pd_op.transpose: (16x96x32xf32) <- (16x32x96xf32) transpose_1 = paddle._C_ops.transpose(squeeze_1, [0, 2, 1]) del squeeze_1 - # pd_op.shape64: (3xi64) <- (-1x96x2xf32) - shape64_0 = paddle._C_ops.shape64(divide_2) - del divide_2 - # pd_op.full_int_array: (1xi64) <- () full_int_array_4 = [0] - # pd_op.slice: (xi64) <- (3xi64, 1xi64, 1xi64) - slice_0 = paddle._C_ops.slice( - shape64_0, [0], full_int_array_4, full_int_array_0, [1], [0] - ) - del full_int_array_0, shape64_0 - # pd_op.full_int_array: (1xi64) <- () full_int_array_5 = [96] # pd_op.slice: (1x96x32xf32) <- (1x5000x32xf32, 1xi64, 1xi64) - slice_1 = paddle._C_ops.slice( + slice_0 = paddle._C_ops.slice( data_1, [1], full_int_array_4, full_int_array_5, [1], [] ) del data_1, full_int_array_4, full_int_array_5 - # pd_op.add: (-1x96x32xf32) <- (-1x96x32xf32, 1x96x32xf32) - add_0 = paddle._C_ops.add(transpose_1, slice_1) - del slice_1, transpose_1 + # pd_op.add: (16x96x32xf32) <- (16x96x32xf32, 1x96x32xf32) + add_0 = paddle._C_ops.add(transpose_1, slice_0) # pd_op.full: (1xf32) <- () full_1 = paddle._C_ops.full( [1], float("0.1"), paddle.float32, paddle.core.CPUPlace() ) - # pd_op.dropout: (-1x96x32xf32, -1x96x32xui8) <- (-1x96x32xf32, None, 1xf32) + # pd_op.dropout: (16x96x32xf32, 16x96x32xui8) <- (16x96x32xf32, None, 1xf32) dropout_0, dropout_1 = (lambda x, f: f(x))( paddle._C_ops.dropout( - add_0, None, full_1, True, "upscale_in_train", 0, False + add_0, None, full_1, False, "upscale_in_train", 0, False ), lambda out: out if isinstance(out, (list, tuple)) else (out, None), ) - del add_0, full_1, share_data__0, sqrt_0 + del ( + add_0, + assign_0, + assign_1, + conv2d_0, + full_1, + full_int_array_3, + share_data__0, + slice_0, + sqrt_0, + transpose_1, + unsqueeze_1, + unsqueeze_2, + ) return dropout_0 diff --git a/paddle_samples/PaddleX/TimesNet_ad/subgraph_8/weight_meta.py b/paddle_samples/PaddleX/TimesNet_ad/subgraph_8/weight_meta.py index 297640557..aa6d23753 100644 --- a/paddle_samples/PaddleX/TimesNet_ad/subgraph_8/weight_meta.py +++ b/paddle_samples/PaddleX/TimesNet_ad/subgraph_8/weight_meta.py @@ -2,8 +2,8 @@ class Program_weight_tensor_parameter_0: name = "parameter_0" shape = [32, 2, 3] dtype = "float32" - min_val = float("-1.67133") - max_val = float("1.7001") - mean = float("0.0189139") - std = float("0.615287") + min_val = float("-1.76635") + max_val = float("1.67318") + mean = float("0.0120366") + std = float("0.639064") data = None diff --git a/paddle_samples/PaddleX/TimesNet_ad/subgraph_9/graph_hash.txt b/paddle_samples/PaddleX/TimesNet_ad/subgraph_9/graph_hash.txt index 7894d329b..a969e5089 100644 --- a/paddle_samples/PaddleX/TimesNet_ad/subgraph_9/graph_hash.txt +++ b/paddle_samples/PaddleX/TimesNet_ad/subgraph_9/graph_hash.txt @@ -1 +1 @@ -f840fc131a017403504e3016d24501e1e6830927f902d749bd588525a36a1980 \ No newline at end of file +25c8345fc2b36e75b1aaf987fe048c9788132c20411101c06ace6f50aebe689e \ No newline at end of file diff --git a/paddle_samples/PaddleX/TimesNet_ad/subgraph_9/input_meta.py b/paddle_samples/PaddleX/TimesNet_ad/subgraph_9/input_meta.py index 085bf32ce..91524202f 100644 --- a/paddle_samples/PaddleX/TimesNet_ad/subgraph_9/input_meta.py +++ b/paddle_samples/PaddleX/TimesNet_ad/subgraph_9/input_meta.py @@ -1,20 +1,37 @@ class Program_weight_tensor_data_0: name = "data_0" - shape = [16, 96, 2] - dtype = "float32" - min_val = float("-2.41022") - max_val = float("2.54238") - mean = float("0.061457") - std = float("1.05113") - data = None + shape = [] + dtype = "int64" + data = [2] class Program_weight_tensor_data_1: name = "data_1" - shape = [1, 5000, 32] + shape = [] + dtype = "int64" + data = [2] + + +class Program_weight_tensor_data_2: + name = "data_2" + shape = [] + dtype = "int32" + data = [19] + + +class Program_weight_tensor_data_3: + name = "data_3" + shape = [2, 96, 32] dtype = "float32" - min_val = float("-1.0") - max_val = float("1.0") - mean = float("0.119002") - std = float("0.697021") + min_val = float("-2.85988") + max_val = float("4.34883") + mean = float("0.0198675") + std = float("1.02037") data = None + + +class Program_weight_tensor_data_4: + name = "data_4" + shape = [] + dtype = "int64" + data = [96] diff --git a/paddle_samples/PaddleX/TimesNet_ad/subgraph_9/model.py b/paddle_samples/PaddleX/TimesNet_ad/subgraph_9/model.py index c44ceee47..c8f6ecea3 100644 --- a/paddle_samples/PaddleX/TimesNet_ad/subgraph_9/model.py +++ b/paddle_samples/PaddleX/TimesNet_ad/subgraph_9/model.py @@ -5,176 +5,397 @@ class GraphModule(paddle.nn.Layer): def __init__(self): super().__init__() - def forward(self, parameter_0, data_0, data_1): - # pd_op.full_int_array: (1xi64) <- () - full_int_array_0 = [1] + def forward( + self, + parameter_0, + parameter_1, + parameter_2, + parameter_3, + parameter_4, + parameter_5, + parameter_6, + parameter_7, + parameter_8, + parameter_9, + parameter_10, + parameter_11, + parameter_12, + parameter_13, + parameter_14, + parameter_15, + parameter_16, + parameter_17, + parameter_18, + parameter_19, + parameter_20, + parameter_21, + parameter_22, + parameter_23, + data_0, + data_1, + data_2, + data_3, + data_4, + ): + # pd_op.cast: (xi64) <- (xi32) + cast_0 = paddle._C_ops.cast(data_2, paddle.int64) + del data_2 + + # pd_op.floor_divide: (xi64) <- (xi64, xi64) + floor_divide_0 = paddle._C_ops.floor_divide(data_4, cast_0) - # pd_op.mean: (16x1x2xf32) <- (16x96x2xf32, 1xi64) - mean_0 = paddle._C_ops.mean(data_0, full_int_array_0, True) + # pd_op.full: (1xf32) <- () + full_0 = paddle._C_ops.full( + [1], float("1"), paddle.float32, paddle.core.CPUPlace() + ) - # pd_op.share_data_: (16x1x2xf32) <- (16x1x2xf32) - share_data__0 = mean_0.detach() - del mean_0 + # pd_op.scale: (xi64) <- (xi64, 1xf32) + scale_0 = paddle._C_ops.scale(floor_divide_0, full_0, float("1"), True) + del floor_divide_0, full_0 - # pd_op.subtract: (16x96x2xf32) <- (16x96x2xf32, 16x1x2xf32) - subtract_0 = paddle._C_ops.subtract(data_0, share_data__0) - del data_0 + # pd_op.multiply: (xi64) <- (xi64, xi64) + multiply_0 = paddle._C_ops.multiply(scale_0, cast_0) + del scale_0 - # pd_op.mean: (16x1x2xf32) <- (16x96x2xf32, 1xi64) - mean_1 = paddle._C_ops.mean(subtract_0, full_int_array_0, True) + # pd_op.subtract: (xi64) <- (xi64, xi64) + subtract_0 = paddle._C_ops.subtract(multiply_0, data_4) + del data_4 - # pd_op.subtract: (16x96x2xf32) <- (16x96x2xf32, 16x1x2xf32) - subtract_1 = paddle._C_ops.subtract(subtract_0, mean_1) - del mean_1 + # pd_op.full: (xi64) <- () + full_1 = paddle._C_ops.full( + [], float("32"), paddle.int64, paddle.core.CPUPlace() + ) - # pd_op.pow: (16x96x2xf32) <- (16x96x2xf32) - pow_0 = paddle._C_ops.pow(subtract_1, float("2")) - del subtract_1 + # builtin.combine: ([xi64, xi64, xi64]) <- (xi64, xi64, xi64) + combine_0 = [data_1, subtract_0, full_1] + del data_1, subtract_0 - # pd_op.sum: (16x1x2xf32) <- (16x96x2xf32, 1xi64) - sum_0 = paddle._C_ops.sum(pow_0, full_int_array_0, paddle.float32, True) - del full_int_array_0, pow_0 + # pd_op.stack: (3xi64) <- ([xi64, xi64, xi64]) + stack_0 = paddle._C_ops.stack(combine_0, 0) + del combine_0 - # pd_op.numel: (xi64) <- (16x96x2xf32) - numel_0 = paddle._C_ops.numel(subtract_0) + # pd_op.full: (1xf32) <- () + full_2 = paddle._C_ops.full( + [1], float("0"), paddle.float32, paddle.core.CPUPlace() + ) - # pd_op.cast: (xi64) <- (xi64) - cast_0 = paddle._C_ops.cast(numel_0, paddle.int64) - del numel_0 + # pd_op.full_with_tensor: (-1x-1x32xf32) <- (1xf32, 3xi64) + full_with_tensor_0 = paddle._C_ops.full_with_tensor( + full_2, stack_0, paddle.float32 + ) + del full_2, stack_0 - # pd_op.numel: (xi64) <- (16x1x2xf32) - numel_1 = paddle._C_ops.numel(sum_0) + # pd_op.cast: (-1x96x32xf32) <- (-1x96x32xf32) + cast_1 = paddle._C_ops.cast(data_3, paddle.float32) + del data_3 - # pd_op.cast: (xi64) <- (xi64) - cast_1 = paddle._C_ops.cast(numel_1, paddle.int64) - del numel_1 + # pd_op.cast: (-1x-1x32xf32) <- (-1x-1x32xf32) + cast_2 = paddle._C_ops.cast(full_with_tensor_0, paddle.float32) - # pd_op.cast: (xf32) <- (xi64) - cast_2 = paddle._C_ops.cast(cast_0, paddle.float32) - del cast_0 + # pd_op.full: (1xi32) <- () + full_3 = paddle._C_ops.full( + [1], float("1"), paddle.int32, paddle.core.CPUPlace() + ) - # pd_op.cast: (xf32) <- (xi64) - cast_3 = paddle._C_ops.cast(cast_1, paddle.float32) - del cast_1 + # builtin.combine: ([-1x96x32xf32, -1x-1x32xf32]) <- (-1x96x32xf32, -1x-1x32xf32) + combine_1 = [cast_1, cast_2] - # pd_op.divide: (xf32) <- (xf32, xf32) - divide_0 = paddle._C_ops.divide(cast_2, cast_3) - del cast_2, cast_3 + # pd_op.concat: (-1x-1x32xf32) <- ([-1x96x32xf32, -1x-1x32xf32], 1xi32) + concat_0 = paddle._C_ops.concat(combine_1, full_3) + del combine_1 - # pd_op.divide: (16x1x2xf32) <- (16x1x2xf32, xf32) - divide_1 = paddle._C_ops.divide(sum_0, divide_0) - del divide_0, sum_0 + # pd_op.floor_divide: (xi64) <- (xi64, xi64) + floor_divide_1 = paddle._C_ops.floor_divide(multiply_0, cast_0) - # pd_op.full: (1xf32) <- () - full_0 = paddle._C_ops.full( - [1], float("1"), paddle.float32, paddle.core.CPUPlace() + # builtin.combine: ([xi64, xi64, xi64, xi64]) <- (xi64, xi64, xi64, xi64) + combine_2 = [data_0, floor_divide_1, cast_0, full_1] + del cast_0, floor_divide_1 + + # pd_op.stack: (4xi64) <- ([xi64, xi64, xi64, xi64]) + stack_1 = paddle._C_ops.stack(combine_2, 0) + del combine_2 + + # pd_op.reshape: (-1x-1x-1x32xf32) <- (-1x-1x32xf32, 4xi64) + reshape_0 = paddle._C_ops.reshape(concat_0, stack_1) + del stack_1 + + # pd_op.transpose: (-1x32x-1x-1xf32) <- (-1x-1x-1x32xf32) + transpose_0 = paddle._C_ops.transpose(reshape_0, [0, 3, 1, 2]) + del reshape_0 + + # pd_op.conv2d: (-1x64x-1x-1xf32) <- (-1x32x-1x-1xf32, 64x32x1x1xf32) + conv2d_0 = paddle._C_ops.conv2d( + transpose_0, parameter_23, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" ) + del parameter_23 - # pd_op.scale: (16x1x2xf32) <- (16x1x2xf32, 1xf32) - scale_0 = paddle._C_ops.scale(divide_1, full_0, float("1e-05"), True) - del divide_1, full_0 + # pd_op.full_int_array: (4xi64) <- () + full_int_array_0 = [1, -1, 1, 1] - # pd_op.sqrt: (16x1x2xf32) <- (16x1x2xf32) - sqrt_0 = paddle._C_ops.sqrt(scale_0) - del scale_0 + # pd_op.reshape: (1x64x1x1xf32) <- (64xf32, 4xi64) + reshape_1 = paddle._C_ops.reshape(parameter_22, full_int_array_0) + del parameter_22 - # pd_op.divide: (16x96x2xf32) <- (16x96x2xf32, 16x1x2xf32) - divide_2 = paddle._C_ops.divide(subtract_0, sqrt_0) - del subtract_0 + # pd_op.add: (-1x64x-1x-1xf32) <- (-1x64x-1x-1xf32, 1x64x1x1xf32) + add_0 = paddle._C_ops.add(conv2d_0, reshape_1) - # pd_op.transpose: (16x2x96xf32) <- (16x96x2xf32) - transpose_0 = paddle._C_ops.transpose(divide_2, [0, 2, 1]) - del divide_2 + # pd_op.conv2d: (-1x64x-1x-1xf32) <- (-1x32x-1x-1xf32, 64x32x3x3xf32) + conv2d_1 = paddle._C_ops.conv2d( + transpose_0, parameter_21, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_21 - # pd_op.full_int_array: (2xi64) <- () - full_int_array_1 = [3, 4] + # pd_op.reshape: (1x64x1x1xf32) <- (64xf32, 4xi64) + reshape_2 = paddle._C_ops.reshape(parameter_20, full_int_array_0) + del parameter_20 - # pd_op.unsqueeze: (16x2x96x1x1xf32) <- (16x2x96xf32, 2xi64) - unsqueeze_0 = paddle._C_ops.unsqueeze(transpose_0, full_int_array_1) - del transpose_0 + # pd_op.add: (-1x64x-1x-1xf32) <- (-1x64x-1x-1xf32, 1x64x1x1xf32) + add_1 = paddle._C_ops.add(conv2d_1, reshape_2) - # pd_op.full_int_array: (6xi64) <- () - full_int_array_2 = [0, 0, 0, 0, 1, 1] + # pd_op.conv2d: (-1x64x-1x-1xf32) <- (-1x32x-1x-1xf32, 64x32x5x5xf32) + conv2d_2 = paddle._C_ops.conv2d( + transpose_0, parameter_19, [1, 1], [2, 2], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_19 + + # pd_op.reshape: (1x64x1x1xf32) <- (64xf32, 4xi64) + reshape_3 = paddle._C_ops.reshape(parameter_18, full_int_array_0) + del parameter_18 - # pd_op.pad3d: (16x2x98x1x1xf32) <- (16x2x96x1x1xf32, 6xi64) - pad3d_0 = paddle._C_ops.pad3d( - unsqueeze_0, full_int_array_2, "circular", float("0"), "NCDHW" + # pd_op.add: (-1x64x-1x-1xf32) <- (-1x64x-1x-1xf32, 1x64x1x1xf32) + add_2 = paddle._C_ops.add(conv2d_2, reshape_3) + + # pd_op.conv2d: (-1x64x-1x-1xf32) <- (-1x32x-1x-1xf32, 64x32x7x7xf32) + conv2d_3 = paddle._C_ops.conv2d( + transpose_0, parameter_17, [1, 1], [3, 3], "EXPLICIT", [1, 1], 1, "NCHW" ) - del full_int_array_2, unsqueeze_0 + del parameter_17 + + # pd_op.reshape: (1x64x1x1xf32) <- (64xf32, 4xi64) + reshape_4 = paddle._C_ops.reshape(parameter_16, full_int_array_0) + del parameter_16 + + # pd_op.add: (-1x64x-1x-1xf32) <- (-1x64x-1x-1xf32, 1x64x1x1xf32) + add_3 = paddle._C_ops.add(conv2d_3, reshape_4) + + # pd_op.conv2d: (-1x64x-1x-1xf32) <- (-1x32x-1x-1xf32, 64x32x9x9xf32) + conv2d_4 = paddle._C_ops.conv2d( + transpose_0, parameter_15, [1, 1], [4, 4], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_15 + + # pd_op.reshape: (1x64x1x1xf32) <- (64xf32, 4xi64) + reshape_5 = paddle._C_ops.reshape(parameter_14, full_int_array_0) + del parameter_14 + + # pd_op.add: (-1x64x-1x-1xf32) <- (-1x64x-1x-1xf32, 1x64x1x1xf32) + add_4 = paddle._C_ops.add(conv2d_4, reshape_5) + + # pd_op.conv2d: (-1x64x-1x-1xf32) <- (-1x32x-1x-1xf32, 64x32x11x11xf32) + conv2d_5 = paddle._C_ops.conv2d( + transpose_0, parameter_13, [1, 1], [5, 5], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_13 + + # pd_op.reshape: (1x64x1x1xf32) <- (64xf32, 4xi64) + reshape_6 = paddle._C_ops.reshape(parameter_12, full_int_array_0) + del parameter_12 - # pd_op.squeeze: (16x2x98xf32) <- (16x2x98x1x1xf32, 2xi64) - squeeze_0 = paddle._C_ops.squeeze(pad3d_0, full_int_array_1) - del full_int_array_1, pad3d_0 + # pd_op.add: (-1x64x-1x-1xf32) <- (-1x64x-1x-1xf32, 1x64x1x1xf32) + add_5 = paddle._C_ops.add(conv2d_5, reshape_6) - # pd_op.assign: (32x2x3xf32) <- (32x2x3xf32) - assign_0 = parameter_0 - del parameter_0 + # builtin.combine: ([-1x64x-1x-1xf32, -1x64x-1x-1xf32, -1x64x-1x-1xf32, -1x64x-1x-1xf32, -1x64x-1x-1xf32, -1x64x-1x-1xf32]) <- (-1x64x-1x-1xf32, -1x64x-1x-1xf32, -1x64x-1x-1xf32, -1x64x-1x-1xf32, -1x64x-1x-1xf32, -1x64x-1x-1xf32) + combine_3 = [add_0, add_1, add_2, add_3, add_4, add_5] + + # pd_op.stack: (-1x64x-1x-1x6xf32) <- ([-1x64x-1x-1xf32, -1x64x-1x-1xf32, -1x64x-1x-1xf32, -1x64x-1x-1xf32, -1x64x-1x-1xf32, -1x64x-1x-1xf32]) + stack_2 = paddle._C_ops.stack(combine_3, -1) + del combine_3 # pd_op.full_int_array: (1xi64) <- () - full_int_array_3 = [-2] + full_int_array_1 = [-1] # pd_op.assign: (1xi64) <- (1xi64) - assign_1 = full_int_array_3 + assign_0 = full_int_array_1 - # pd_op.unsqueeze: (32x2x1x3xf32) <- (32x2x3xf32, 1xi64) - unsqueeze_1 = paddle._C_ops.unsqueeze(assign_0, full_int_array_3) + # pd_op.mean: (-1x64x-1x-1xf32) <- (-1x64x-1x-1x6xf32, 1xi64) + mean_0 = paddle._C_ops.mean(stack_2, full_int_array_1, False) - # pd_op.unsqueeze: (16x2x1x98xf32) <- (16x2x98xf32, 1xi64) - unsqueeze_2 = paddle._C_ops.unsqueeze(squeeze_0, full_int_array_3) - del squeeze_0 + # pd_op.gelu: (-1x64x-1x-1xf32) <- (-1x64x-1x-1xf32) + gelu_0 = paddle._C_ops.gelu(mean_0, False) - # pd_op.conv2d: (16x32x1x96xf32) <- (16x2x1x98xf32, 32x2x1x3xf32) - conv2d_0 = paddle._C_ops.conv2d( - unsqueeze_2, unsqueeze_1, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" + # pd_op.conv2d: (-1x32x-1x-1xf32) <- (-1x64x-1x-1xf32, 32x64x1x1xf32) + conv2d_6 = paddle._C_ops.conv2d( + gelu_0, parameter_11, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" ) + del parameter_11 - # pd_op.squeeze: (16x32x96xf32) <- (16x32x1x96xf32, 1xi64) - squeeze_1 = paddle._C_ops.squeeze(conv2d_0, full_int_array_3) + # pd_op.reshape: (1x32x1x1xf32) <- (32xf32, 4xi64) + reshape_7 = paddle._C_ops.reshape(parameter_10, full_int_array_0) + del parameter_10 - # pd_op.transpose: (16x96x32xf32) <- (16x32x96xf32) - transpose_1 = paddle._C_ops.transpose(squeeze_1, [0, 2, 1]) - del squeeze_1 + # pd_op.add: (-1x32x-1x-1xf32) <- (-1x32x-1x-1xf32, 1x32x1x1xf32) + add_6 = paddle._C_ops.add(conv2d_6, reshape_7) - # pd_op.full_int_array: (1xi64) <- () - full_int_array_4 = [0] + # pd_op.conv2d: (-1x32x-1x-1xf32) <- (-1x64x-1x-1xf32, 32x64x3x3xf32) + conv2d_7 = paddle._C_ops.conv2d( + gelu_0, parameter_9, [1, 1], [1, 1], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_9 - # pd_op.full_int_array: (1xi64) <- () - full_int_array_5 = [96] + # pd_op.reshape: (1x32x1x1xf32) <- (32xf32, 4xi64) + reshape_8 = paddle._C_ops.reshape(parameter_8, full_int_array_0) + del parameter_8 - # pd_op.slice: (1x96x32xf32) <- (1x5000x32xf32, 1xi64, 1xi64) - slice_0 = paddle._C_ops.slice( - data_1, [1], full_int_array_4, full_int_array_5, [1], [] + # pd_op.add: (-1x32x-1x-1xf32) <- (-1x32x-1x-1xf32, 1x32x1x1xf32) + add_7 = paddle._C_ops.add(conv2d_7, reshape_8) + + # pd_op.conv2d: (-1x32x-1x-1xf32) <- (-1x64x-1x-1xf32, 32x64x5x5xf32) + conv2d_8 = paddle._C_ops.conv2d( + gelu_0, parameter_7, [1, 1], [2, 2], "EXPLICIT", [1, 1], 1, "NCHW" ) - del data_1, full_int_array_4, full_int_array_5 + del parameter_7 - # pd_op.add: (16x96x32xf32) <- (16x96x32xf32, 1x96x32xf32) - add_0 = paddle._C_ops.add(transpose_1, slice_0) + # pd_op.reshape: (1x32x1x1xf32) <- (32xf32, 4xi64) + reshape_9 = paddle._C_ops.reshape(parameter_6, full_int_array_0) + del parameter_6 - # pd_op.full: (1xf32) <- () - full_1 = paddle._C_ops.full( - [1], float("0.1"), paddle.float32, paddle.core.CPUPlace() + # pd_op.add: (-1x32x-1x-1xf32) <- (-1x32x-1x-1xf32, 1x32x1x1xf32) + add_8 = paddle._C_ops.add(conv2d_8, reshape_9) + + # pd_op.conv2d: (-1x32x-1x-1xf32) <- (-1x64x-1x-1xf32, 32x64x7x7xf32) + conv2d_9 = paddle._C_ops.conv2d( + gelu_0, parameter_5, [1, 1], [3, 3], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_5 + + # pd_op.reshape: (1x32x1x1xf32) <- (32xf32, 4xi64) + reshape_10 = paddle._C_ops.reshape(parameter_4, full_int_array_0) + del parameter_4 + + # pd_op.add: (-1x32x-1x-1xf32) <- (-1x32x-1x-1xf32, 1x32x1x1xf32) + add_9 = paddle._C_ops.add(conv2d_9, reshape_10) + + # pd_op.conv2d: (-1x32x-1x-1xf32) <- (-1x64x-1x-1xf32, 32x64x9x9xf32) + conv2d_10 = paddle._C_ops.conv2d( + gelu_0, parameter_3, [1, 1], [4, 4], "EXPLICIT", [1, 1], 1, "NCHW" + ) + del parameter_3 + + # pd_op.reshape: (1x32x1x1xf32) <- (32xf32, 4xi64) + reshape_11 = paddle._C_ops.reshape(parameter_2, full_int_array_0) + del parameter_2 + + # pd_op.add: (-1x32x-1x-1xf32) <- (-1x32x-1x-1xf32, 1x32x1x1xf32) + add_10 = paddle._C_ops.add(conv2d_10, reshape_11) + + # pd_op.conv2d: (-1x32x-1x-1xf32) <- (-1x64x-1x-1xf32, 32x64x11x11xf32) + conv2d_11 = paddle._C_ops.conv2d( + gelu_0, parameter_1, [1, 1], [5, 5], "EXPLICIT", [1, 1], 1, "NCHW" ) + del parameter_1 - # pd_op.dropout: (16x96x32xf32, 16x96x32xui8) <- (16x96x32xf32, None, 1xf32) - dropout_0, dropout_1 = (lambda x, f: f(x))( - paddle._C_ops.dropout( - add_0, None, full_1, False, "upscale_in_train", 0, False - ), - lambda out: out if isinstance(out, (list, tuple)) else (out, None), + # pd_op.reshape: (1x32x1x1xf32) <- (32xf32, 4xi64) + reshape_12 = paddle._C_ops.reshape(parameter_0, full_int_array_0) + del full_int_array_0, parameter_0 + + # pd_op.add: (-1x32x-1x-1xf32) <- (-1x32x-1x-1xf32, 1x32x1x1xf32) + add_11 = paddle._C_ops.add(conv2d_11, reshape_12) + + # builtin.combine: ([-1x32x-1x-1xf32, -1x32x-1x-1xf32, -1x32x-1x-1xf32, -1x32x-1x-1xf32, -1x32x-1x-1xf32, -1x32x-1x-1xf32]) <- (-1x32x-1x-1xf32, -1x32x-1x-1xf32, -1x32x-1x-1xf32, -1x32x-1x-1xf32, -1x32x-1x-1xf32, -1x32x-1x-1xf32) + combine_4 = [add_6, add_7, add_8, add_9, add_10, add_11] + + # pd_op.stack: (-1x32x-1x-1x6xf32) <- ([-1x32x-1x-1xf32, -1x32x-1x-1xf32, -1x32x-1x-1xf32, -1x32x-1x-1xf32, -1x32x-1x-1xf32, -1x32x-1x-1xf32]) + stack_3 = paddle._C_ops.stack(combine_4, -1) + del combine_4 + + # pd_op.mean: (-1x32x-1x-1xf32) <- (-1x32x-1x-1x6xf32, 1xi64) + mean_1 = paddle._C_ops.mean(stack_3, full_int_array_1, False) + + # pd_op.transpose: (-1x-1x-1x32xf32) <- (-1x32x-1x-1xf32) + transpose_1 = paddle._C_ops.transpose(mean_1, [0, 2, 3, 1]) + del mean_1 + + # pd_op.full: (xi64) <- () + full_4 = paddle._C_ops.full( + [], float("-1"), paddle.int64, paddle.core.CPUPlace() + ) + + # builtin.combine: ([xi64, xi64, xi64]) <- (xi64, xi64, xi64) + combine_5 = [data_0, full_4, full_1] + del data_0, full_1, full_4 + + # pd_op.stack: (3xi64) <- ([xi64, xi64, xi64]) + stack_4 = paddle._C_ops.stack(combine_5, 0) + del combine_5 + + # pd_op.reshape: (-1x-1x32xf32) <- (-1x-1x-1x32xf32, 3xi64) + reshape_13 = paddle._C_ops.reshape(transpose_1, stack_4) + del stack_4 + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_2 = [0] + + # pd_op.full_int_array: (1xi64) <- () + full_int_array_3 = [96] + + # pd_op.slice: (-1x-1x32xf32) <- (-1x-1x32xf32, 1xi64, 1xi64) + slice_0 = paddle._C_ops.slice( + reshape_13, [1], full_int_array_2, full_int_array_3, [1], [] ) del ( add_0, + add_1, + add_10, + add_11, + add_2, + add_3, + add_4, + add_5, + add_6, + add_7, + add_8, + add_9, assign_0, - assign_1, + cast_1, + cast_2, + concat_0, conv2d_0, - full_1, + conv2d_1, + conv2d_10, + conv2d_11, + conv2d_2, + conv2d_3, + conv2d_4, + conv2d_5, + conv2d_6, + conv2d_7, + conv2d_8, + conv2d_9, + full_3, + full_int_array_1, + full_int_array_2, full_int_array_3, - share_data__0, - slice_0, - sqrt_0, + full_with_tensor_0, + gelu_0, + mean_0, + multiply_0, + reshape_1, + reshape_10, + reshape_11, + reshape_12, + reshape_13, + reshape_2, + reshape_3, + reshape_4, + reshape_5, + reshape_6, + reshape_7, + reshape_8, + reshape_9, + stack_2, + stack_3, + transpose_0, transpose_1, - unsqueeze_1, - unsqueeze_2, ) - return dropout_0 + return slice_0 diff --git a/paddle_samples/PaddleX/TimesNet_ad/subgraph_9/weight_meta.py b/paddle_samples/PaddleX/TimesNet_ad/subgraph_9/weight_meta.py index aa6d23753..5fd23c522 100644 --- a/paddle_samples/PaddleX/TimesNet_ad/subgraph_9/weight_meta.py +++ b/paddle_samples/PaddleX/TimesNet_ad/subgraph_9/weight_meta.py @@ -1,9 +1,238 @@ class Program_weight_tensor_parameter_0: name = "parameter_0" - shape = [32, 2, 3] + shape = [32] dtype = "float32" - min_val = float("-1.76635") - max_val = float("1.67318") - mean = float("0.0120366") - std = float("0.639064") + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_1: + name = "parameter_1" + shape = [32, 64, 11, 11] + dtype = "float32" + min_val = float("-0.171178") + max_val = float("0.28641") + mean = float("0.000138819") + std = float("0.0208469") + data = None + + +class Program_weight_tensor_parameter_2: + name = "parameter_2" + shape = [32] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_3: + name = "parameter_3" + shape = [32, 64, 9, 9] + dtype = "float32" + min_val = float("-0.155314") + max_val = float("0.280429") + mean = float("0.000198439") + std = float("0.0244335") + data = None + + +class Program_weight_tensor_parameter_4: + name = "parameter_4" + shape = [32] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_5: + name = "parameter_5" + shape = [32, 64, 7, 7] + dtype = "float32" + min_val = float("-0.167556") + max_val = float("0.280153") + mean = float("0.000259141") + std = float("0.0301798") + data = None + + +class Program_weight_tensor_parameter_6: + name = "parameter_6" + shape = [32] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_7: + name = "parameter_7" + shape = [32, 64, 5, 5] + dtype = "float32" + min_val = float("-0.169913") + max_val = float("0.283049") + mean = float("0.000434627") + std = float("0.0407218") + data = None + + +class Program_weight_tensor_parameter_8: + name = "parameter_8" + shape = [32] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_9: + name = "parameter_9" + shape = [32, 64, 3, 3] + dtype = "float32" + min_val = float("-0.246544") + max_val = float("0.304018") + mean = float("0.00110634") + std = float("0.0640675") + data = None + + +class Program_weight_tensor_parameter_10: + name = "parameter_10" + shape = [32] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_11: + name = "parameter_11" + shape = [32, 64, 1, 1] + dtype = "float32" + min_val = float("-0.612299") + max_val = float("0.645148") + mean = float("-0.00132418") + std = float("0.186337") + data = None + + +class Program_weight_tensor_parameter_12: + name = "parameter_12" + shape = [64] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_13: + name = "parameter_13" + shape = [64, 32, 11, 11] + dtype = "float32" + min_val = float("-0.397206") + max_val = float("0.384341") + mean = float("0.000531751") + std = float("0.0285274") + data = None + + +class Program_weight_tensor_parameter_14: + name = "parameter_14" + shape = [64] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_15: + name = "parameter_15" + shape = [64, 32, 9, 9] + dtype = "float32" + min_val = float("-0.353936") + max_val = float("0.406931") + mean = float("0.000740455") + std = float("0.0337338") + data = None + + +class Program_weight_tensor_parameter_16: + name = "parameter_16" + shape = [64] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_17: + name = "parameter_17" + shape = [64, 32, 7, 7] + dtype = "float32" + min_val = float("-0.347076") + max_val = float("0.378609") + mean = float("0.000666987") + std = float("0.0419889") + data = None + + +class Program_weight_tensor_parameter_18: + name = "parameter_18" + shape = [64] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_19: + name = "parameter_19" + shape = [64, 32, 5, 5] + dtype = "float32" + min_val = float("-0.398976") + max_val = float("0.349175") + mean = float("0.00116756") + std = float("0.0565579") + data = None + + +class Program_weight_tensor_parameter_20: + name = "parameter_20" + shape = [64] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_21: + name = "parameter_21" + shape = [64, 32, 3, 3] + dtype = "float32" + min_val = float("-0.498519") + max_val = float("0.47824") + mean = float("0.00144987") + std = float("0.0900388") + data = None + + +class Program_weight_tensor_parameter_22: + name = "parameter_22" + shape = [64] + dtype = "float32" + min_val = float("0") + max_val = float("0.5") + data = None + + +class Program_weight_tensor_parameter_23: + name = "parameter_23" + shape = [64, 32, 1, 1] + dtype = "float32" + min_val = float("-0.766112") + max_val = float("0.852336") + mean = float("-0.00652652") + std = float("0.263183") data = None diff --git a/paddle_samples/PaddleX/ch_RepSVTR_rec/subgraph_1/graph_hash.txt b/paddle_samples/PaddleX/ch_RepSVTR_rec/subgraph_1/graph_hash.txt deleted file mode 100644 index b0c90384c..000000000 --- a/paddle_samples/PaddleX/ch_RepSVTR_rec/subgraph_1/graph_hash.txt +++ /dev/null @@ -1 +0,0 @@ -aabc6b43f8cab6a6c906043868317704e5b8a6e3ea491561262c182c67bbe7e1 \ No newline at end of file diff --git a/paddle_samples/PaddleX/ch_RepSVTR_rec/subgraph_1/graph_net.json b/paddle_samples/PaddleX/ch_RepSVTR_rec/subgraph_1/graph_net.json deleted file mode 100644 index 7e5b42a74..000000000 --- a/paddle_samples/PaddleX/ch_RepSVTR_rec/subgraph_1/graph_net.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "framework": "paddle", - "model_name": "ch_RepSVTR_rec", - "num_devices_required": 1, - "num_nodes_required": 1 -} \ No newline at end of file diff --git a/paddle_samples/PaddleX/ch_RepSVTR_rec/subgraph_1/input_meta.py b/paddle_samples/PaddleX/ch_RepSVTR_rec/subgraph_1/input_meta.py deleted file mode 100644 index 067b41085..000000000 --- a/paddle_samples/PaddleX/ch_RepSVTR_rec/subgraph_1/input_meta.py +++ /dev/null @@ -1,9 +0,0 @@ -class Program_weight_tensor_data_0: - name = "data_0" - shape = [8, 384, 1, 40] - dtype = "float32" - min_val = float("-10.295") - max_val = float("11.1995") - mean = float("-0.00334601") - std = float("0.504445") - data = None diff --git a/paddle_samples/PaddleX/ch_RepSVTR_rec/subgraph_1/model.py b/paddle_samples/PaddleX/ch_RepSVTR_rec/subgraph_1/model.py deleted file mode 100644 index e1dd20b23..000000000 --- a/paddle_samples/PaddleX/ch_RepSVTR_rec/subgraph_1/model.py +++ /dev/null @@ -1,666 +0,0 @@ -import paddle - - -class GraphModule(paddle.nn.Layer): - def __init__(self): - super().__init__() - - def forward( - self, - parameter_0, - parameter_1, - parameter_2, - parameter_3, - parameter_4, - parameter_5, - parameter_6, - parameter_7, - parameter_8, - parameter_9, - parameter_10, - parameter_11, - parameter_12, - parameter_13, - parameter_14, - parameter_15, - parameter_16, - parameter_17, - parameter_18, - parameter_19, - parameter_20, - parameter_21, - parameter_22, - parameter_23, - parameter_24, - parameter_25, - parameter_26, - parameter_27, - parameter_28, - parameter_29, - parameter_30, - parameter_31, - parameter_32, - parameter_33, - parameter_34, - parameter_35, - parameter_36, - parameter_37, - parameter_38, - parameter_39, - parameter_40, - parameter_41, - parameter_42, - parameter_43, - parameter_44, - parameter_45, - parameter_46, - parameter_47, - parameter_48, - parameter_49, - parameter_50, - parameter_51, - parameter_52, - data_0, - ): - # pd_op.assign: (-1x384x1x40xf32) <- (-1x384x1x40xf32) - assign_0 = data_0 - del data_0 - - # pd_op.conv2d: (-1x48x1x40xf32) <- (-1x384x1x40xf32, 48x384x1x3xf32) - conv2d_0 = paddle._C_ops.conv2d( - assign_0, parameter_52, [1, 1], [0, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_52 - - # pd_op.batch_norm_: (-1x48x1x40xf32, 48xf32, 48xf32, 48xf32, 48xf32, -1xui8) <- (-1x48x1x40xf32, 48xf32, 48xf32, 48xf32, 48xf32) - ( - batch_norm__0, - batch_norm__1, - batch_norm__2, - batch_norm__3, - batch_norm__4, - batch_norm__5, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_0, - parameter_51, - parameter_50, - parameter_49, - parameter_48, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_0, parameter_48, parameter_49, parameter_50, parameter_51 - - # pd_op.swish: (-1x48x1x40xf32) <- (-1x48x1x40xf32) - swish_0 = paddle._C_ops.swish(batch_norm__0) - del batch_norm__0 - - # pd_op.conv2d: (-1x256x1x40xf32) <- (-1x48x1x40xf32, 256x48x1x1xf32) - conv2d_1 = paddle._C_ops.conv2d( - swish_0, parameter_47, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_47, swish_0 - - # pd_op.batch_norm_: (-1x256x1x40xf32, 256xf32, 256xf32, 256xf32, 256xf32, -1xui8) <- (-1x256x1x40xf32, 256xf32, 256xf32, 256xf32, 256xf32) - ( - batch_norm__6, - batch_norm__7, - batch_norm__8, - batch_norm__9, - batch_norm__10, - batch_norm__11, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_1, - parameter_46, - parameter_45, - parameter_44, - parameter_43, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_1, parameter_43, parameter_44, parameter_45, parameter_46 - - # pd_op.swish: (-1x256x1x40xf32) <- (-1x256x1x40xf32) - swish_1 = paddle._C_ops.swish(batch_norm__6) - del batch_norm__6 - - # pd_op.shape64: (4xi64) <- (-1x256x1x40xf32) - shape64_0 = paddle._C_ops.shape64(swish_1) - - # pd_op.full_int_array: (1xi64) <- () - full_int_array_0 = [0] - - # pd_op.full_int_array: (1xi64) <- () - full_int_array_1 = [1] - - # pd_op.slice: (xi64) <- (4xi64, 1xi64, 1xi64) - slice_0 = paddle._C_ops.slice( - shape64_0, [0], full_int_array_0, full_int_array_1, [1], [0] - ) - del shape64_0 - - # pd_op.flatten: (-1x256x40xf32) <- (-1x256x1x40xf32) - flatten_0 = paddle._C_ops.flatten(swish_1, 2, 3) - del swish_1 - - # pd_op.transpose: (-1x40x256xf32) <- (-1x256x40xf32) - transpose_0 = paddle._C_ops.transpose(flatten_0, [0, 2, 1]) - del flatten_0 - - # pd_op.layer_norm: (-1x40x256xf32, -1x40xf32, -1x40xf32) <- (-1x40x256xf32, 256xf32, 256xf32) - layer_norm_0, layer_norm_1, layer_norm_2 = (lambda x, f: f(x))( - paddle._C_ops.layer_norm( - transpose_0, parameter_42, parameter_41, float("1e-05"), 2 - ), - lambda out: out if isinstance(out, (list, tuple)) else (out, None, None), - ) - del parameter_41, parameter_42 - - # pd_op.matmul: (-1x40x768xf32) <- (-1x40x256xf32, 256x768xf32) - matmul_0 = paddle._C_ops.matmul(layer_norm_0, parameter_40, False, False) - del layer_norm_0, parameter_40 - - # pd_op.add: (-1x40x768xf32) <- (-1x40x768xf32, 768xf32) - add_0 = paddle._C_ops.add(matmul_0, parameter_39) - del matmul_0, parameter_39 - - # pd_op.full_int_array: (5xi64) <- () - full_int_array_2 = [0, -1, 3, 8, 32] - - # pd_op.reshape: (-1x-1x3x8x32xf32) <- (-1x40x768xf32, 5xi64) - reshape_0 = paddle._C_ops.reshape(add_0, full_int_array_2) - del add_0 - - # pd_op.transpose: (3x-1x8x-1x32xf32) <- (-1x-1x3x8x32xf32) - transpose_1 = paddle._C_ops.transpose(reshape_0, [2, 0, 3, 1, 4]) - del reshape_0 - - # pd_op.slice: (-1x8x-1x32xf32) <- (3x-1x8x-1x32xf32, 1xi64, 1xi64) - slice_1 = paddle._C_ops.slice( - transpose_1, [0], full_int_array_0, full_int_array_1, [1], [0] - ) - - # pd_op.full: (1xf32) <- () - full_0 = paddle._C_ops.full( - [1], float("0.176777"), paddle.float32, paddle.core.CPUPlace() - ) - - # pd_op.scale: (-1x8x-1x32xf32) <- (-1x8x-1x32xf32, 1xf32) - scale_0 = paddle._C_ops.scale(slice_1, full_0, float("0"), True) - del slice_1 - - # pd_op.full_int_array: (1xi64) <- () - full_int_array_3 = [2] - - # pd_op.slice: (-1x8x-1x32xf32) <- (3x-1x8x-1x32xf32, 1xi64, 1xi64) - slice_2 = paddle._C_ops.slice( - transpose_1, [0], full_int_array_1, full_int_array_3, [1], [0] - ) - - # pd_op.full_int_array: (1xi64) <- () - full_int_array_4 = [3] - - # pd_op.slice: (-1x8x-1x32xf32) <- (3x-1x8x-1x32xf32, 1xi64, 1xi64) - slice_3 = paddle._C_ops.slice( - transpose_1, [0], full_int_array_3, full_int_array_4, [1], [0] - ) - del transpose_1 - - # pd_op.transpose: (-1x8x32x-1xf32) <- (-1x8x-1x32xf32) - transpose_2 = paddle._C_ops.transpose(slice_2, [0, 1, 3, 2]) - del slice_2 - - # pd_op.matmul: (-1x8x-1x-1xf32) <- (-1x8x-1x32xf32, -1x8x32x-1xf32) - matmul_1 = paddle._C_ops.matmul(scale_0, transpose_2, False, False) - del scale_0, transpose_2 - - # pd_op.softmax: (-1x8x-1x-1xf32) <- (-1x8x-1x-1xf32) - softmax_1 = paddle._C_ops.softmax(matmul_1, -1) - del matmul_1 - - # pd_op.full: (1xf32) <- () - full_1 = paddle._C_ops.full( - [1], float("0.1"), paddle.float32, paddle.core.CPUPlace() - ) - - # pd_op.dropout: (-1x8x-1x-1xf32, -1x8x-1x-1xui8) <- (-1x8x-1x-1xf32, None, 1xf32) - dropout_0, dropout_1 = (lambda x, f: f(x))( - paddle._C_ops.dropout( - softmax_1, None, full_1, True, "upscale_in_train", 0, False - ), - lambda out: out if isinstance(out, (list, tuple)) else (out, None), - ) - del softmax_1 - - # pd_op.matmul: (-1x8x-1x32xf32) <- (-1x8x-1x-1xf32, -1x8x-1x32xf32) - matmul_2 = paddle._C_ops.matmul(dropout_0, slice_3, False, False) - del dropout_0, slice_3 - - # pd_op.transpose: (-1x-1x8x32xf32) <- (-1x8x-1x32xf32) - transpose_3 = paddle._C_ops.transpose(matmul_2, [0, 2, 1, 3]) - del matmul_2 - - # pd_op.full_int_array: (3xi64) <- () - full_int_array_5 = [0, -1, 256] - - # pd_op.reshape: (-1x-1x256xf32) <- (-1x-1x8x32xf32, 3xi64) - reshape_1 = paddle._C_ops.reshape(transpose_3, full_int_array_5) - del transpose_3 - - # pd_op.matmul: (-1x-1x256xf32) <- (-1x-1x256xf32, 256x256xf32) - matmul_3 = paddle._C_ops.matmul(reshape_1, parameter_38, False, False) - del parameter_38, reshape_1 - - # pd_op.add: (-1x-1x256xf32) <- (-1x-1x256xf32, 256xf32) - add_1 = paddle._C_ops.add(matmul_3, parameter_37) - del matmul_3, parameter_37 - - # pd_op.dropout: (-1x-1x256xf32, -1x-1x256xui8) <- (-1x-1x256xf32, None, 1xf32) - dropout_2, dropout_3 = (lambda x, f: f(x))( - paddle._C_ops.dropout( - add_1, None, full_1, True, "upscale_in_train", 0, False - ), - lambda out: out if isinstance(out, (list, tuple)) else (out, None), - ) - del add_1 - - # pd_op.add: (-1x40x256xf32) <- (-1x40x256xf32, -1x-1x256xf32) - add_2 = paddle._C_ops.add(transpose_0, dropout_2) - del dropout_2, transpose_0 - - # pd_op.layer_norm: (-1x40x256xf32, -1x40xf32, -1x40xf32) <- (-1x40x256xf32, 256xf32, 256xf32) - layer_norm_3, layer_norm_4, layer_norm_5 = (lambda x, f: f(x))( - paddle._C_ops.layer_norm( - add_2, parameter_36, parameter_35, float("1e-05"), 2 - ), - lambda out: out if isinstance(out, (list, tuple)) else (out, None, None), - ) - del parameter_35, parameter_36 - - # pd_op.matmul: (-1x40x512xf32) <- (-1x40x256xf32, 256x512xf32) - matmul_4 = paddle._C_ops.matmul(layer_norm_3, parameter_34, False, False) - del layer_norm_3, parameter_34 - - # pd_op.add: (-1x40x512xf32) <- (-1x40x512xf32, 512xf32) - add_3 = paddle._C_ops.add(matmul_4, parameter_33) - del matmul_4, parameter_33 - - # pd_op.swish: (-1x40x512xf32) <- (-1x40x512xf32) - swish_2 = paddle._C_ops.swish(add_3) - del add_3 - - # pd_op.dropout: (-1x40x512xf32, -1x40x512xui8) <- (-1x40x512xf32, None, 1xf32) - dropout_4, dropout_5 = (lambda x, f: f(x))( - paddle._C_ops.dropout( - swish_2, None, full_1, True, "upscale_in_train", 0, False - ), - lambda out: out if isinstance(out, (list, tuple)) else (out, None), - ) - del swish_2 - - # pd_op.matmul: (-1x40x256xf32) <- (-1x40x512xf32, 512x256xf32) - matmul_5 = paddle._C_ops.matmul(dropout_4, parameter_32, False, False) - del dropout_4, parameter_32 - - # pd_op.add: (-1x40x256xf32) <- (-1x40x256xf32, 256xf32) - add_4 = paddle._C_ops.add(matmul_5, parameter_31) - del matmul_5, parameter_31 - - # pd_op.dropout: (-1x40x256xf32, -1x40x256xui8) <- (-1x40x256xf32, None, 1xf32) - dropout_6, dropout_7 = (lambda x, f: f(x))( - paddle._C_ops.dropout( - add_4, None, full_1, True, "upscale_in_train", 0, False - ), - lambda out: out if isinstance(out, (list, tuple)) else (out, None), - ) - del add_4 - - # pd_op.add: (-1x40x256xf32) <- (-1x40x256xf32, -1x40x256xf32) - add_5 = paddle._C_ops.add(add_2, dropout_6) - del add_2, dropout_6 - - # pd_op.layer_norm: (-1x40x256xf32, -1x40xf32, -1x40xf32) <- (-1x40x256xf32, 256xf32, 256xf32) - layer_norm_6, layer_norm_7, layer_norm_8 = (lambda x, f: f(x))( - paddle._C_ops.layer_norm( - add_5, parameter_30, parameter_29, float("1e-05"), 2 - ), - lambda out: out if isinstance(out, (list, tuple)) else (out, None, None), - ) - del parameter_29, parameter_30 - - # pd_op.matmul: (-1x40x768xf32) <- (-1x40x256xf32, 256x768xf32) - matmul_6 = paddle._C_ops.matmul(layer_norm_6, parameter_28, False, False) - del layer_norm_6, parameter_28 - - # pd_op.add: (-1x40x768xf32) <- (-1x40x768xf32, 768xf32) - add_6 = paddle._C_ops.add(matmul_6, parameter_27) - del matmul_6, parameter_27 - - # pd_op.reshape: (-1x-1x3x8x32xf32) <- (-1x40x768xf32, 5xi64) - reshape_2 = paddle._C_ops.reshape(add_6, full_int_array_2) - del add_6, full_int_array_2 - - # pd_op.transpose: (3x-1x8x-1x32xf32) <- (-1x-1x3x8x32xf32) - transpose_4 = paddle._C_ops.transpose(reshape_2, [2, 0, 3, 1, 4]) - del reshape_2 - - # pd_op.slice: (-1x8x-1x32xf32) <- (3x-1x8x-1x32xf32, 1xi64, 1xi64) - slice_4 = paddle._C_ops.slice( - transpose_4, [0], full_int_array_0, full_int_array_1, [1], [0] - ) - - # pd_op.scale: (-1x8x-1x32xf32) <- (-1x8x-1x32xf32, 1xf32) - scale_1 = paddle._C_ops.scale(slice_4, full_0, float("0"), True) - del full_0, slice_4 - - # pd_op.slice: (-1x8x-1x32xf32) <- (3x-1x8x-1x32xf32, 1xi64, 1xi64) - slice_5 = paddle._C_ops.slice( - transpose_4, [0], full_int_array_1, full_int_array_3, [1], [0] - ) - - # pd_op.slice: (-1x8x-1x32xf32) <- (3x-1x8x-1x32xf32, 1xi64, 1xi64) - slice_6 = paddle._C_ops.slice( - transpose_4, [0], full_int_array_3, full_int_array_4, [1], [0] - ) - del full_int_array_4, transpose_4 - - # pd_op.transpose: (-1x8x32x-1xf32) <- (-1x8x-1x32xf32) - transpose_5 = paddle._C_ops.transpose(slice_5, [0, 1, 3, 2]) - del slice_5 - - # pd_op.matmul: (-1x8x-1x-1xf32) <- (-1x8x-1x32xf32, -1x8x32x-1xf32) - matmul_7 = paddle._C_ops.matmul(scale_1, transpose_5, False, False) - del scale_1, transpose_5 - - # pd_op.softmax: (-1x8x-1x-1xf32) <- (-1x8x-1x-1xf32) - softmax_2 = paddle._C_ops.softmax(matmul_7, -1) - del matmul_7 - - # pd_op.dropout: (-1x8x-1x-1xf32, -1x8x-1x-1xui8) <- (-1x8x-1x-1xf32, None, 1xf32) - dropout_8, dropout_9 = (lambda x, f: f(x))( - paddle._C_ops.dropout( - softmax_2, None, full_1, True, "upscale_in_train", 0, False - ), - lambda out: out if isinstance(out, (list, tuple)) else (out, None), - ) - del softmax_2 - - # pd_op.matmul: (-1x8x-1x32xf32) <- (-1x8x-1x-1xf32, -1x8x-1x32xf32) - matmul_8 = paddle._C_ops.matmul(dropout_8, slice_6, False, False) - del dropout_8, slice_6 - - # pd_op.transpose: (-1x-1x8x32xf32) <- (-1x8x-1x32xf32) - transpose_6 = paddle._C_ops.transpose(matmul_8, [0, 2, 1, 3]) - del matmul_8 - - # pd_op.reshape: (-1x-1x256xf32) <- (-1x-1x8x32xf32, 3xi64) - reshape_3 = paddle._C_ops.reshape(transpose_6, full_int_array_5) - del full_int_array_5, transpose_6 - - # pd_op.matmul: (-1x-1x256xf32) <- (-1x-1x256xf32, 256x256xf32) - matmul_9 = paddle._C_ops.matmul(reshape_3, parameter_26, False, False) - del parameter_26, reshape_3 - - # pd_op.add: (-1x-1x256xf32) <- (-1x-1x256xf32, 256xf32) - add_7 = paddle._C_ops.add(matmul_9, parameter_25) - del matmul_9, parameter_25 - - # pd_op.dropout: (-1x-1x256xf32, -1x-1x256xui8) <- (-1x-1x256xf32, None, 1xf32) - dropout_10, dropout_11 = (lambda x, f: f(x))( - paddle._C_ops.dropout( - add_7, None, full_1, True, "upscale_in_train", 0, False - ), - lambda out: out if isinstance(out, (list, tuple)) else (out, None), - ) - del add_7 - - # pd_op.add: (-1x40x256xf32) <- (-1x40x256xf32, -1x-1x256xf32) - add_8 = paddle._C_ops.add(add_5, dropout_10) - del add_5, dropout_10 - - # pd_op.layer_norm: (-1x40x256xf32, -1x40xf32, -1x40xf32) <- (-1x40x256xf32, 256xf32, 256xf32) - layer_norm_9, layer_norm_10, layer_norm_11 = (lambda x, f: f(x))( - paddle._C_ops.layer_norm( - add_8, parameter_24, parameter_23, float("1e-05"), 2 - ), - lambda out: out if isinstance(out, (list, tuple)) else (out, None, None), - ) - del parameter_23, parameter_24 - - # pd_op.matmul: (-1x40x512xf32) <- (-1x40x256xf32, 256x512xf32) - matmul_10 = paddle._C_ops.matmul(layer_norm_9, parameter_22, False, False) - del layer_norm_9, parameter_22 - - # pd_op.add: (-1x40x512xf32) <- (-1x40x512xf32, 512xf32) - add_9 = paddle._C_ops.add(matmul_10, parameter_21) - del matmul_10, parameter_21 - - # pd_op.swish: (-1x40x512xf32) <- (-1x40x512xf32) - swish_3 = paddle._C_ops.swish(add_9) - del add_9 - - # pd_op.dropout: (-1x40x512xf32, -1x40x512xui8) <- (-1x40x512xf32, None, 1xf32) - dropout_12, dropout_13 = (lambda x, f: f(x))( - paddle._C_ops.dropout( - swish_3, None, full_1, True, "upscale_in_train", 0, False - ), - lambda out: out if isinstance(out, (list, tuple)) else (out, None), - ) - del swish_3 - - # pd_op.matmul: (-1x40x256xf32) <- (-1x40x512xf32, 512x256xf32) - matmul_11 = paddle._C_ops.matmul(dropout_12, parameter_20, False, False) - del dropout_12, parameter_20 - - # pd_op.add: (-1x40x256xf32) <- (-1x40x256xf32, 256xf32) - add_10 = paddle._C_ops.add(matmul_11, parameter_19) - del matmul_11, parameter_19 - - # pd_op.dropout: (-1x40x256xf32, -1x40x256xui8) <- (-1x40x256xf32, None, 1xf32) - dropout_14, dropout_15 = (lambda x, f: f(x))( - paddle._C_ops.dropout( - add_10, None, full_1, True, "upscale_in_train", 0, False - ), - lambda out: out if isinstance(out, (list, tuple)) else (out, None), - ) - del add_10, full_1 - - # pd_op.add: (-1x40x256xf32) <- (-1x40x256xf32, -1x40x256xf32) - add_11 = paddle._C_ops.add(add_8, dropout_14) - del add_8, dropout_14 - - # pd_op.layer_norm: (-1x40x256xf32, -1x40xf32, -1x40xf32) <- (-1x40x256xf32, 256xf32, 256xf32) - layer_norm_12, layer_norm_13, layer_norm_14 = (lambda x, f: f(x))( - paddle._C_ops.layer_norm( - add_11, parameter_18, parameter_17, float("1e-06"), 2 - ), - lambda out: out if isinstance(out, (list, tuple)) else (out, None, None), - ) - del add_11, parameter_17, parameter_18 - - # pd_op.full_int_array: (4xi64) <- () - full_int_array_6 = [0, 1, 40, 256] - - # pd_op.reshape: (-1x1x40x256xf32) <- (-1x40x256xf32, 4xi64) - reshape_4 = paddle._C_ops.reshape(layer_norm_12, full_int_array_6) - del full_int_array_6, layer_norm_12 - - # pd_op.transpose: (-1x256x1x40xf32) <- (-1x1x40x256xf32) - transpose_7 = paddle._C_ops.transpose(reshape_4, [0, 3, 1, 2]) - del reshape_4 - - # pd_op.conv2d: (-1x384x1x40xf32) <- (-1x256x1x40xf32, 384x256x1x1xf32) - conv2d_2 = paddle._C_ops.conv2d( - transpose_7, parameter_16, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_16, transpose_7 - - # pd_op.batch_norm_: (-1x384x1x40xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (-1x384x1x40xf32, 384xf32, 384xf32, 384xf32, 384xf32) - ( - batch_norm__12, - batch_norm__13, - batch_norm__14, - batch_norm__15, - batch_norm__16, - batch_norm__17, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_2, - parameter_15, - parameter_14, - parameter_13, - parameter_12, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_2, parameter_12, parameter_13, parameter_14, parameter_15 - - # pd_op.swish: (-1x384x1x40xf32) <- (-1x384x1x40xf32) - swish_4 = paddle._C_ops.swish(batch_norm__12) - del batch_norm__12 - - # pd_op.full: (1xi32) <- () - full_2 = paddle._C_ops.full( - [1], float("1"), paddle.int32, paddle.core.CPUPlace() - ) - - # builtin.combine: ([-1x384x1x40xf32, -1x384x1x40xf32]) <- (-1x384x1x40xf32, -1x384x1x40xf32) - combine_0 = [assign_0, swish_4] - del assign_0, swish_4 - - # pd_op.concat: (-1x768x1x40xf32) <- ([-1x384x1x40xf32, -1x384x1x40xf32], 1xi32) - concat_0 = paddle._C_ops.concat(combine_0, full_2) - del combine_0, full_2 - - # pd_op.conv2d: (-1x48x1x40xf32) <- (-1x768x1x40xf32, 48x768x1x3xf32) - conv2d_3 = paddle._C_ops.conv2d( - concat_0, parameter_11, [1, 1], [0, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del concat_0, parameter_11 - - # pd_op.batch_norm_: (-1x48x1x40xf32, 48xf32, 48xf32, 48xf32, 48xf32, -1xui8) <- (-1x48x1x40xf32, 48xf32, 48xf32, 48xf32, 48xf32) - ( - batch_norm__18, - batch_norm__19, - batch_norm__20, - batch_norm__21, - batch_norm__22, - batch_norm__23, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_3, - parameter_10, - parameter_9, - parameter_8, - parameter_7, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_3, parameter_10, parameter_7, parameter_8, parameter_9 - - # pd_op.swish: (-1x48x1x40xf32) <- (-1x48x1x40xf32) - swish_5 = paddle._C_ops.swish(batch_norm__18) - del batch_norm__18 - - # pd_op.conv2d: (-1x256x1x40xf32) <- (-1x48x1x40xf32, 256x48x1x1xf32) - conv2d_4 = paddle._C_ops.conv2d( - swish_5, parameter_6, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_6, swish_5 - - # pd_op.batch_norm_: (-1x256x1x40xf32, 256xf32, 256xf32, 256xf32, 256xf32, -1xui8) <- (-1x256x1x40xf32, 256xf32, 256xf32, 256xf32, 256xf32) - ( - batch_norm__24, - batch_norm__25, - batch_norm__26, - batch_norm__27, - batch_norm__28, - batch_norm__29, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_4, - parameter_5, - parameter_4, - parameter_3, - parameter_2, - True, - float("0.9"), - float("1e-05"), - "NCHW", - True, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del conv2d_4, parameter_2, parameter_3, parameter_4, parameter_5 - - # pd_op.swish: (-1x256x1x40xf32) <- (-1x256x1x40xf32) - swish_6 = paddle._C_ops.swish(batch_norm__24) - del batch_norm__24 - - # pd_op.shape64: (4xi64) <- (-1x256x1x40xf32) - shape64_1 = paddle._C_ops.shape64(swish_6) - - # pd_op.slice: (xi64) <- (4xi64, 1xi64, 1xi64) - slice_7 = paddle._C_ops.slice( - shape64_1, [0], full_int_array_0, full_int_array_1, [1], [0] - ) - del full_int_array_0, full_int_array_1, shape64_1 - - # pd_op.squeeze: (-1x256x40xf32) <- (-1x256x1x40xf32, 1xi64) - squeeze_0 = paddle._C_ops.squeeze(swish_6, full_int_array_3) - del full_int_array_3, swish_6 - - # pd_op.transpose: (-1x40x256xf32) <- (-1x256x40xf32) - transpose_8 = paddle._C_ops.transpose(squeeze_0, [0, 2, 1]) - del squeeze_0 - - # pd_op.matmul: (-1x40x6625xf32) <- (-1x40x256xf32, 256x6625xf32) - matmul_12 = paddle._C_ops.matmul(transpose_8, parameter_1, False, False) - del parameter_1, transpose_8 - - # pd_op.add: (-1x40x6625xf32) <- (-1x40x6625xf32, 6625xf32) - add_12 = paddle._C_ops.add(matmul_12, parameter_0) - del matmul_12, parameter_0 - - # pd_op.softmax: (-1x40x6625xf32) <- (-1x40x6625xf32) - softmax_0 = paddle._C_ops.softmax(add_12, 2) - del add_12 - - return softmax_0 diff --git a/paddle_samples/PaddleX/ch_RepSVTR_rec/subgraph_1/weight_meta.py b/paddle_samples/PaddleX/ch_RepSVTR_rec/subgraph_1/weight_meta.py deleted file mode 100644 index 6fb14baf8..000000000 --- a/paddle_samples/PaddleX/ch_RepSVTR_rec/subgraph_1/weight_meta.py +++ /dev/null @@ -1,565 +0,0 @@ -class Program_weight_tensor_parameter_0: - name = "parameter_0" - shape = [6625] - dtype = "float32" - min_val = float("-1.41935") - max_val = float("1.27368") - mean = float("-0.11019") - std = float("0.0878786") - data = None - - -class Program_weight_tensor_parameter_1: - name = "parameter_1" - shape = [256, 6625] - dtype = "float32" - min_val = float("-0.617473") - max_val = float("0.2982") - mean = float("-0.149096") - std = float("0.0675106") - data = None - - -class Program_weight_tensor_parameter_2: - name = "parameter_2" - shape = [256] - dtype = "float32" - min_val = float("-3.40979") - max_val = float("11.714") - mean = float("0.104559") - std = float("1.48063") - data = None - - -class Program_weight_tensor_parameter_3: - name = "parameter_3" - shape = [256] - dtype = "float32" - min_val = float("2.05999") - max_val = float("5.60104") - mean = float("3.15139") - std = float("0.558162") - data = None - - -class Program_weight_tensor_parameter_4: - name = "parameter_4" - shape = [256] - dtype = "float32" - min_val = float("0.0208049") - max_val = float("0.168124") - mean = float("0.0550693") - std = float("0.0222448") - data = None - - -class Program_weight_tensor_parameter_5: - name = "parameter_5" - shape = [256] - dtype = "float32" - min_val = float("-1.09112") - max_val = float("1.29526") - mean = float("0.1084") - std = float("0.348106") - data = None - - -class Program_weight_tensor_parameter_6: - name = "parameter_6" - shape = [256, 48, 1, 1] - dtype = "float32" - min_val = float("-0.474461") - max_val = float("0.294021") - mean = float("0.000243975") - std = float("0.0561915") - data = None - - -class Program_weight_tensor_parameter_7: - name = "parameter_7" - shape = [48] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_8: - name = "parameter_8" - shape = [48] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_9: - name = "parameter_9" - shape = [48] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_10: - name = "parameter_10" - shape = [48] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_11: - name = "parameter_11" - shape = [48, 768, 1, 3] - dtype = "float32" - min_val = float("-0.577428") - max_val = float("0.783682") - mean = float("-0.000185059") - std = float("0.0577645") - data = None - - -class Program_weight_tensor_parameter_12: - name = "parameter_12" - shape = [384] - dtype = "float32" - min_val = float("-3.89565") - max_val = float("0.82878") - mean = float("-1.0915") - std = float("0.396622") - data = None - - -class Program_weight_tensor_parameter_13: - name = "parameter_13" - shape = [384] - dtype = "float32" - min_val = float("0.474219") - max_val = float("2.97882") - mean = float("0.818474") - std = float("0.239887") - data = None - - -class Program_weight_tensor_parameter_14: - name = "parameter_14" - shape = [384] - dtype = "float32" - min_val = float("0.26147") - max_val = float("3.29851") - mean = float("0.685883") - std = float("0.336465") - data = None - - -class Program_weight_tensor_parameter_15: - name = "parameter_15" - shape = [384] - dtype = "float32" - min_val = float("-2.56746") - max_val = float("3.29449") - mean = float("0.159637") - std = float("0.821884") - data = None - - -class Program_weight_tensor_parameter_16: - name = "parameter_16" - shape = [384, 256, 1, 1] - dtype = "float32" - min_val = float("-0.377203") - max_val = float("0.389368") - mean = float("0.000723909") - std = float("0.0557763") - data = None - - -class Program_weight_tensor_parameter_17: - name = "parameter_17" - shape = [256] - dtype = "float32" - min_val = float("-1.21892") - max_val = float("1.37199") - mean = float("-0.0107187") - std = float("0.52562") - data = None - - -class Program_weight_tensor_parameter_18: - name = "parameter_18" - shape = [256] - dtype = "float32" - min_val = float("0.258363") - max_val = float("2.00845") - mean = float("1.25772") - std = float("0.278346") - data = None - - -class Program_weight_tensor_parameter_19: - name = "parameter_19" - shape = [256] - dtype = "float32" - min_val = float("-4.55309") - max_val = float("1.84669") - mean = float("-0.0756001") - std = float("0.620285") - data = None - - -class Program_weight_tensor_parameter_20: - name = "parameter_20" - shape = [512, 256] - dtype = "float32" - min_val = float("-1.04617") - max_val = float("0.775173") - mean = float("0.00143912") - std = float("0.0924598") - data = None - - -class Program_weight_tensor_parameter_21: - name = "parameter_21" - shape = [512] - dtype = "float32" - min_val = float("-2.14646") - max_val = float("-0.202872") - mean = float("-1.07305") - std = float("0.295773") - data = None - - -class Program_weight_tensor_parameter_22: - name = "parameter_22" - shape = [256, 512] - dtype = "float32" - min_val = float("-0.482498") - max_val = float("0.389524") - mean = float("-0.0156657") - std = float("0.0708137") - data = None - - -class Program_weight_tensor_parameter_23: - name = "parameter_23" - shape = [256] - dtype = "float32" - min_val = float("-1.00113") - max_val = float("2.6837") - mean = float("0.317298") - std = float("0.453825") - data = None - - -class Program_weight_tensor_parameter_24: - name = "parameter_24" - shape = [256] - dtype = "float32" - min_val = float("0.533612") - max_val = float("2.31937") - mean = float("1.51734") - std = float("0.268319") - data = None - - -class Program_weight_tensor_parameter_25: - name = "parameter_25" - shape = [256] - dtype = "float32" - min_val = float("-0.88994") - max_val = float("1.47574") - mean = float("0.00263365") - std = float("0.229377") - data = None - - -class Program_weight_tensor_parameter_26: - name = "parameter_26" - shape = [256, 256] - dtype = "float32" - min_val = float("-0.42062") - max_val = float("0.398256") - mean = float("-7.65891e-05") - std = float("0.0726304") - data = None - - -class Program_weight_tensor_parameter_27: - name = "parameter_27" - shape = [768] - dtype = "float32" - min_val = float("-2.58746") - max_val = float("2.47259") - mean = float("0.0277469") - std = float("0.443597") - data = None - - -class Program_weight_tensor_parameter_28: - name = "parameter_28" - shape = [256, 768] - dtype = "float32" - min_val = float("-0.527344") - max_val = float("0.675699") - mean = float("-3.28009e-06") - std = float("0.0655431") - data = None - - -class Program_weight_tensor_parameter_29: - name = "parameter_29" - shape = [256] - dtype = "float32" - min_val = float("-1.14101") - max_val = float("0.520725") - mean = float("0.0266235") - std = float("0.270091") - data = None - - -class Program_weight_tensor_parameter_30: - name = "parameter_30" - shape = [256] - dtype = "float32" - min_val = float("0.17099") - max_val = float("1.5139") - mean = float("0.919023") - std = float("0.190578") - data = None - - -class Program_weight_tensor_parameter_31: - name = "parameter_31" - shape = [256] - dtype = "float32" - min_val = float("-1.20661") - max_val = float("0.479476") - mean = float("-0.0174058") - std = float("0.212405") - data = None - - -class Program_weight_tensor_parameter_32: - name = "parameter_32" - shape = [512, 256] - dtype = "float32" - min_val = float("-0.559246") - max_val = float("0.689832") - mean = float("-0.000365566") - std = float("0.0820688") - data = None - - -class Program_weight_tensor_parameter_33: - name = "parameter_33" - shape = [512] - dtype = "float32" - min_val = float("-1.99074") - max_val = float("0.0551978") - mean = float("-0.874901") - std = float("0.393527") - data = None - - -class Program_weight_tensor_parameter_34: - name = "parameter_34" - shape = [256, 512] - dtype = "float32" - min_val = float("-0.540478") - max_val = float("0.446965") - mean = float("-0.01981") - std = float("0.0759046") - data = None - - -class Program_weight_tensor_parameter_35: - name = "parameter_35" - shape = [256] - dtype = "float32" - min_val = float("-1.95708") - max_val = float("1.75208") - mean = float("0.475212") - std = float("0.586366") - data = None - - -class Program_weight_tensor_parameter_36: - name = "parameter_36" - shape = [256] - dtype = "float32" - min_val = float("-0.995082") - max_val = float("2.01163") - mean = float("1.36033") - std = float("0.352594") - data = None - - -class Program_weight_tensor_parameter_37: - name = "parameter_37" - shape = [256] - dtype = "float32" - min_val = float("-0.893225") - max_val = float("0.529877") - mean = float("-0.0033845") - std = float("0.13904") - data = None - - -class Program_weight_tensor_parameter_38: - name = "parameter_38" - shape = [256, 256] - dtype = "float32" - min_val = float("-0.327299") - max_val = float("0.303883") - mean = float("-6.34634e-05") - std = float("0.0642947") - data = None - - -class Program_weight_tensor_parameter_39: - name = "parameter_39" - shape = [768] - dtype = "float32" - min_val = float("-1.90202") - max_val = float("1.87387") - mean = float("-0.00592556") - std = float("0.40307") - data = None - - -class Program_weight_tensor_parameter_40: - name = "parameter_40" - shape = [256, 768] - dtype = "float32" - min_val = float("-0.539869") - max_val = float("0.361071") - mean = float("0.000253672") - std = float("0.0647378") - data = None - - -class Program_weight_tensor_parameter_41: - name = "parameter_41" - shape = [256] - dtype = "float32" - min_val = float("-0.882702") - max_val = float("0.780666") - mean = float("0.0548606") - std = float("0.204456") - data = None - - -class Program_weight_tensor_parameter_42: - name = "parameter_42" - shape = [256] - dtype = "float32" - min_val = float("-0.373093") - max_val = float("1.6846") - mean = float("0.547736") - std = float("0.291973") - data = None - - -class Program_weight_tensor_parameter_43: - name = "parameter_43" - shape = [256] - dtype = "float32" - min_val = float("-2.85883") - max_val = float("3.5484") - mean = float("0.0121269") - std = float("1.13711") - data = None - - -class Program_weight_tensor_parameter_44: - name = "parameter_44" - shape = [256] - dtype = "float32" - min_val = float("1.05177") - max_val = float("7.67768") - mean = float("2.19073") - std = float("0.992942") - data = None - - -class Program_weight_tensor_parameter_45: - name = "parameter_45" - shape = [256] - dtype = "float32" - min_val = float("0.0393845") - max_val = float("1.42284") - mean = float("0.164221") - std = float("0.17552") - data = None - - -class Program_weight_tensor_parameter_46: - name = "parameter_46" - shape = [256] - dtype = "float32" - min_val = float("-3.84192") - max_val = float("3.80399") - mean = float("-0.215458") - std = float("1.1662") - data = None - - -class Program_weight_tensor_parameter_47: - name = "parameter_47" - shape = [256, 48, 1, 1] - dtype = "float32" - min_val = float("-0.630628") - max_val = float("0.49176") - mean = float("-0.00229474") - std = float("0.0565867") - data = None - - -class Program_weight_tensor_parameter_48: - name = "parameter_48" - shape = [48] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_49: - name = "parameter_49" - shape = [48] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_50: - name = "parameter_50" - shape = [48] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_51: - name = "parameter_51" - shape = [48] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_52: - name = "parameter_52" - shape = [48, 384, 1, 3] - dtype = "float32" - min_val = float("-0.499144") - max_val = float("0.516643") - mean = float("-0.000239031") - std = float("0.0545062") - data = None diff --git a/paddle_samples/PaddleX/ch_RepSVTR_rec/subgraph_2/graph_hash.txt b/paddle_samples/PaddleX/ch_RepSVTR_rec/subgraph_2/graph_hash.txt deleted file mode 100644 index b414d27fc..000000000 --- a/paddle_samples/PaddleX/ch_RepSVTR_rec/subgraph_2/graph_hash.txt +++ /dev/null @@ -1 +0,0 @@ -bdb2f767798ea246af61da39e7ddb151c49633bb78cf34bcf9f6ed6d5f01c51d \ No newline at end of file diff --git a/paddle_samples/PaddleX/ch_RepSVTR_rec/subgraph_2/graph_net.json b/paddle_samples/PaddleX/ch_RepSVTR_rec/subgraph_2/graph_net.json deleted file mode 100644 index 7e5b42a74..000000000 --- a/paddle_samples/PaddleX/ch_RepSVTR_rec/subgraph_2/graph_net.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "framework": "paddle", - "model_name": "ch_RepSVTR_rec", - "num_devices_required": 1, - "num_nodes_required": 1 -} \ No newline at end of file diff --git a/paddle_samples/PaddleX/ch_RepSVTR_rec/subgraph_2/input_meta.py b/paddle_samples/PaddleX/ch_RepSVTR_rec/subgraph_2/input_meta.py deleted file mode 100644 index 465a976d1..000000000 --- a/paddle_samples/PaddleX/ch_RepSVTR_rec/subgraph_2/input_meta.py +++ /dev/null @@ -1,36 +0,0 @@ -class Program_weight_tensor_data_0: - name = "data_0" - shape = [5, 384, 1, 40] - dtype = "float32" - min_val = float("-3.50874") - max_val = float("3.79537") - mean = float("-0.00288513") - std = float("0.388304") - data = None - - -class Program_weight_tensor_data_1: - name = "data_1" - shape = [5, 25] - dtype = "int64" - min_val = 0 - max_val = 4925 - data = None - - -class Program_weight_tensor_data_2: - name = "data_2" - shape = [5] - dtype = "int64" - data = [5, 4, 5, 10, 3] - - -class Program_weight_tensor_data_3: - name = "data_3" - shape = [5000, 1, 384] - dtype = "float32" - min_val = float("-1.0") - max_val = float("1.0") - mean = float("0.131787") - std = float("0.694717") - data = None diff --git a/paddle_samples/PaddleX/ch_RepSVTR_rec/subgraph_2/model.py b/paddle_samples/PaddleX/ch_RepSVTR_rec/subgraph_2/model.py deleted file mode 100644 index 49d8cc24b..000000000 --- a/paddle_samples/PaddleX/ch_RepSVTR_rec/subgraph_2/model.py +++ /dev/null @@ -1,1139 +0,0 @@ -import paddle - - -class GraphModule(paddle.nn.Layer): - def __init__(self): - super().__init__() - - def forward( - self, - parameter_0, - parameter_1, - parameter_2, - parameter_3, - parameter_4, - parameter_5, - parameter_6, - parameter_7, - parameter_8, - parameter_9, - parameter_10, - parameter_11, - parameter_12, - parameter_13, - parameter_14, - parameter_15, - parameter_16, - parameter_17, - parameter_18, - parameter_19, - parameter_20, - parameter_21, - parameter_22, - parameter_23, - parameter_24, - parameter_25, - parameter_26, - parameter_27, - parameter_28, - parameter_29, - parameter_30, - parameter_31, - parameter_32, - parameter_33, - parameter_34, - parameter_35, - parameter_36, - parameter_37, - parameter_38, - parameter_39, - parameter_40, - parameter_41, - parameter_42, - data_0, - data_1, - data_2, - data_3, - ): - # pd_op.flatten: (-1x384x40xf32) <- (-1x384x1x40xf32) - flatten_0 = paddle._C_ops.flatten(data_0, 2, 3) - del data_0 - - # pd_op.transpose: (-1x40x384xf32) <- (-1x384x40xf32) - transpose_0 = paddle._C_ops.transpose(flatten_0, [0, 2, 1]) - del flatten_0 - - # pd_op.matmul: (-1x40x384xf32) <- (-1x40x384xf32, 384x384xf32) - matmul_1 = paddle._C_ops.matmul(transpose_0, parameter_42, False, False) - del parameter_42 - - # pd_op.full_int_array: (0xi64) <- () - full_int_array_0 = [] - - # pd_op.max: (xi64) <- (-1xi64, 0xi64) - max_0 = paddle._C_ops.max(data_2, full_int_array_0, False) - del data_2, full_int_array_0 - - # pd_op.full: (1xf32) <- () - full_0 = paddle._C_ops.full( - [1], float("1"), paddle.float32, paddle.core.CPUPlace() - ) - - # pd_op.scale: (xi64) <- (xi64, 1xf32) - scale_0 = paddle._C_ops.scale(max_0, full_0, float("2"), True) - del full_0, max_0 - - # pd_op.full_int_array: (1xi64) <- () - full_int_array_1 = [0] - - # pd_op.assign: (1xi64) <- (1xi64) - assign_0 = full_int_array_1 - - # pd_op.assign: (1xi64) <- (1xi64) - assign_1 = full_int_array_1 - - # pd_op.assign: (1xi64) <- (1xi64) - assign_2 = full_int_array_1 - - # pd_op.assign: (1xi64) <- (1xi64) - assign_3 = full_int_array_1 - - # builtin.combine: ([xi64]) <- (xi64) - combine_0 = [scale_0] - del scale_0 - - # pd_op.stack: (1xi64) <- ([xi64]) - stack_0 = paddle._C_ops.stack(combine_0, 0) - del combine_0 - - # pd_op.slice: (-1x-1xi64) <- (-1x25xi64, 1xi64, 1xi64) - slice_0 = paddle._C_ops.slice(data_1, [1], full_int_array_1, stack_0, [-1], []) - del data_1, stack_0 - - # pd_op.full_int_array: (1xi64) <- () - full_int_array_2 = [-1] - - # pd_op.slice: (-1x-1xi64) <- (-1x-1xi64, 1xi64, 1xi64) - slice_1 = paddle._C_ops.slice( - slice_0, [1], full_int_array_1, full_int_array_2, [1], [] - ) - del full_int_array_2, slice_0 - - # pd_op.embedding: (-1x-1x384xf32) <- (-1x-1xi64, 6629x384xf32) - embedding_0 = paddle._C_ops.embedding(slice_1, parameter_41, 0, False) - del parameter_41 - - # pd_op.full: (1xf32) <- () - full_1 = paddle._C_ops.full( - [1], float("19.5959"), paddle.float32, paddle.core.CPUPlace() - ) - - # pd_op.scale: (-1x-1x384xf32) <- (-1x-1x384xf32, 1xf32) - scale_1 = paddle._C_ops.scale(embedding_0, full_1, float("0"), True) - del embedding_0 - - # pd_op.transpose: (-1x-1x384xf32) <- (-1x-1x384xf32) - transpose_1 = paddle._C_ops.transpose(scale_1, [1, 0, 2]) - del scale_1 - - # pd_op.shape64: (3xi64) <- (-1x-1x384xf32) - shape64_0 = paddle._C_ops.shape64(transpose_1) - - # pd_op.full_int_array: (1xi64) <- () - full_int_array_3 = [1] - - # pd_op.assign: (1xi64) <- (1xi64) - assign_4 = full_int_array_3 - - # pd_op.assign: (1xi64) <- (1xi64) - assign_5 = full_int_array_3 - - # pd_op.assign: (1xi64) <- (1xi64) - assign_6 = full_int_array_3 - - # pd_op.assign: (1xi64) <- (1xi64) - assign_7 = full_int_array_3 - - # pd_op.assign: (1xi64) <- (1xi64) - assign_8 = full_int_array_3 - - # pd_op.assign: (1xi64) <- (1xi64) - assign_9 = full_int_array_3 - - # pd_op.assign: (1xi64) <- (1xi64) - assign_10 = full_int_array_3 - - # pd_op.assign: (1xi64) <- (1xi64) - assign_11 = full_int_array_3 - - # pd_op.slice: (xi64) <- (3xi64, 1xi64, 1xi64) - slice_2 = paddle._C_ops.slice( - shape64_0, [0], full_int_array_1, full_int_array_3, [1], [0] - ) - - # pd_op.full_int_array: (1xi64) <- () - full_int_array_4 = [2] - - # pd_op.assign: (1xi64) <- (1xi64) - assign_12 = full_int_array_4 - - # pd_op.assign: (1xi64) <- (1xi64) - assign_13 = full_int_array_4 - - # pd_op.assign: (1xi64) <- (1xi64) - assign_14 = full_int_array_4 - - # pd_op.assign: (1xi64) <- (1xi64) - assign_15 = full_int_array_4 - - # pd_op.assign: (1xi64) <- (1xi64) - assign_16 = full_int_array_4 - - # pd_op.assign: (1xi64) <- (1xi64) - assign_17 = full_int_array_4 - - # pd_op.slice: (xi64) <- (3xi64, 1xi64, 1xi64) - slice_3 = paddle._C_ops.slice( - shape64_0, [0], full_int_array_3, full_int_array_4, [1], [0] - ) - del shape64_0 - - # builtin.combine: ([xi64]) <- (xi64) - combine_1 = [slice_2] - del slice_2 - - # pd_op.stack: (1xi64) <- ([xi64]) - stack_1 = paddle._C_ops.stack(combine_1, 0) - del combine_1 - - # pd_op.slice: (-1x1x384xf32) <- (5000x1x384xf32, 1xi64, 1xi64) - slice_4 = paddle._C_ops.slice(data_3, [0], full_int_array_1, stack_1, [-1], []) - del data_3, stack_1 - - # pd_op.add: (-1x-1x384xf32) <- (-1x-1x384xf32, -1x1x384xf32) - add_0 = paddle._C_ops.add(transpose_1, slice_4) - - # pd_op.full: (1xf32) <- () - full_2 = paddle._C_ops.full( - [1], float("0.1"), paddle.float32, paddle.core.CPUPlace() - ) - - # pd_op.assign: (1xf32) <- (1xf32) - assign_18 = full_2 - - # pd_op.assign: (1xf32) <- (1xf32) - assign_19 = full_2 - - # pd_op.assign: (1xf32) <- (1xf32) - assign_20 = full_2 - - # pd_op.assign: (1xf32) <- (1xf32) - assign_21 = full_2 - - # pd_op.assign: (1xf32) <- (1xf32) - assign_22 = full_2 - - # pd_op.assign: (1xf32) <- (1xf32) - assign_23 = full_2 - - # pd_op.assign: (1xf32) <- (1xf32) - assign_24 = full_2 - - # pd_op.assign: (1xf32) <- (1xf32) - assign_25 = full_2 - - # pd_op.assign: (1xf32) <- (1xf32) - assign_26 = full_2 - - # pd_op.assign: (1xf32) <- (1xf32) - assign_27 = full_2 - - # pd_op.dropout: (-1x-1x384xf32, -1x-1x384xui8) <- (-1x-1x384xf32, None, 1xf32) - dropout_0, dropout_1 = (lambda x, f: f(x))( - paddle._C_ops.dropout( - add_0, None, full_2, False, "upscale_in_train", 0, False - ), - lambda out: out if isinstance(out, (list, tuple)) else (out, None), - ) - del add_0 - - # pd_op.transpose: (-1x-1x384xf32) <- (-1x-1x384xf32) - transpose_2 = paddle._C_ops.transpose(dropout_0, [1, 0, 2]) - del dropout_0 - - # pd_op.shape64: (3xi64) <- (-1x-1x384xf32) - shape64_1 = paddle._C_ops.shape64(transpose_2) - - # pd_op.slice: (xi64) <- (3xi64, 1xi64, 1xi64) - slice_5 = paddle._C_ops.slice( - shape64_1, [0], full_int_array_1, full_int_array_3, [1], [0] - ) - del shape64_1 - - # pd_op.shape64: (3xi64) <- (-1x-1x384xf32) - shape64_2 = paddle._C_ops.shape64(transpose_2) - - # pd_op.slice: (xi64) <- (3xi64, 1xi64, 1xi64) - slice_6 = paddle._C_ops.slice( - shape64_2, [0], full_int_array_3, full_int_array_4, [1], [0] - ) - del shape64_2 - - # builtin.combine: ([xi64, xi64]) <- (xi64, xi64) - combine_2 = [slice_6, slice_6] - - # pd_op.stack: (2xi64) <- ([xi64, xi64]) - stack_2 = paddle._C_ops.stack(combine_2, 0) - del combine_2 - - # pd_op.full: (1xf32) <- () - full_3 = paddle._C_ops.full( - [1], float("0"), paddle.float32, paddle.core.CPUPlace() - ) - - # pd_op.full_with_tensor: (-1x-1xf32) <- (1xf32, 2xi64) - full_with_tensor_0 = paddle._C_ops.full_with_tensor( - full_3, stack_2, paddle.float32 - ) - del full_3, stack_2 - - # builtin.combine: ([xi64, xi64]) <- (xi64, xi64) - combine_3 = [slice_6, slice_6] - - # pd_op.stack: (2xi64) <- ([xi64, xi64]) - stack_3 = paddle._C_ops.stack(combine_3, 0) - del combine_3 - - # pd_op.full: (1xf32) <- () - full_4 = paddle._C_ops.full( - [1], float("-inf"), paddle.float32, paddle.core.CPUPlace() - ) - - # pd_op.full_with_tensor: (-1x-1xf32) <- (1xf32, 2xi64) - full_with_tensor_1 = paddle._C_ops.full_with_tensor( - full_4, stack_3, paddle.float32 - ) - del full_4, stack_3 - - # pd_op.triu: (-1x-1xf32) <- (-1x-1xf32) - triu_0 = paddle._C_ops.triu(full_with_tensor_1, 1) - del full_with_tensor_1 - - # pd_op.add: (-1x-1xf32) <- (-1x-1xf32, -1x-1xf32) - add_1 = paddle._C_ops.add(full_with_tensor_0, triu_0) - del full_with_tensor_0, triu_0 - - # pd_op.full_int_array: (2xi64) <- () - full_int_array_5 = [0, 1] - - # pd_op.unsqueeze: (1x1x-1x-1xf32) <- (-1x-1xf32, 2xi64) - unsqueeze_0 = paddle._C_ops.unsqueeze(add_1, full_int_array_5) - del add_1, full_int_array_5 - - # pd_op.matmul: (-1x-1x1152xf32) <- (-1x-1x384xf32, 384x1152xf32) - matmul_2 = paddle._C_ops.matmul(transpose_2, parameter_40, False, False) - del parameter_40 - - # pd_op.add: (-1x-1x1152xf32) <- (-1x-1x1152xf32, 1152xf32) - add_2 = paddle._C_ops.add(matmul_2, parameter_39) - del parameter_39 - - # pd_op.full: (xi64) <- () - full_5 = paddle._C_ops.full( - [], float("0"), paddle.int64, paddle.core.CPUPlace() - ) - - # pd_op.full: (xi64) <- () - full_6 = paddle._C_ops.full( - [], float("3"), paddle.int64, paddle.core.CPUPlace() - ) - - # pd_op.full: (xi64) <- () - full_7 = paddle._C_ops.full( - [], float("12"), paddle.int64, paddle.core.CPUPlace() - ) - - # pd_op.full: (xi64) <- () - full_8 = paddle._C_ops.full( - [], float("32"), paddle.int64, paddle.core.CPUPlace() - ) - - # builtin.combine: ([xi64, xi64, xi64, xi64, xi64]) <- (xi64, xi64, xi64, xi64, xi64) - combine_4 = [full_5, slice_6, full_6, full_7, full_8] - - # pd_op.stack: (5xi64) <- ([xi64, xi64, xi64, xi64, xi64]) - stack_4 = paddle._C_ops.stack(combine_4, 0) - del combine_4 - - # pd_op.reshape: (-1x-1x3x12x32xf32) <- (-1x-1x1152xf32, 5xi64) - reshape_0 = paddle._C_ops.reshape(add_2, stack_4) - del stack_4 - - # pd_op.transpose: (3x-1x12x-1x32xf32) <- (-1x-1x3x12x32xf32) - transpose_3 = paddle._C_ops.transpose(reshape_0, [2, 0, 3, 1, 4]) - del reshape_0 - - # pd_op.slice: (-1x12x-1x32xf32) <- (3x-1x12x-1x32xf32, 1xi64, 1xi64) - slice_7 = paddle._C_ops.slice( - transpose_3, [0], full_int_array_1, full_int_array_3, [1], [0] - ) - - # pd_op.slice: (-1x12x-1x32xf32) <- (3x-1x12x-1x32xf32, 1xi64, 1xi64) - slice_8 = paddle._C_ops.slice( - transpose_3, [0], full_int_array_3, full_int_array_4, [1], [0] - ) - - # pd_op.full_int_array: (1xi64) <- () - full_int_array_6 = [3] - - # pd_op.assign: (1xi64) <- (1xi64) - assign_28 = full_int_array_6 - - # pd_op.slice: (-1x12x-1x32xf32) <- (3x-1x12x-1x32xf32, 1xi64, 1xi64) - slice_9 = paddle._C_ops.slice( - transpose_3, [0], full_int_array_4, full_int_array_6, [1], [0] - ) - - # pd_op.transpose: (-1x12x32x-1xf32) <- (-1x12x-1x32xf32) - transpose_4 = paddle._C_ops.transpose(slice_8, [0, 1, 3, 2]) - del slice_8 - - # pd_op.matmul: (-1x12x-1x-1xf32) <- (-1x12x-1x32xf32, -1x12x32x-1xf32) - matmul_3 = paddle._C_ops.matmul(slice_7, transpose_4, False, False) - - # pd_op.full: (1xf32) <- () - full_9 = paddle._C_ops.full( - [1], float("0.176777"), paddle.float32, paddle.core.CPUPlace() - ) - - # pd_op.assign: (1xf32) <- (1xf32) - assign_29 = full_9 - - # pd_op.assign: (1xf32) <- (1xf32) - assign_30 = full_9 - - # pd_op.assign: (1xf32) <- (1xf32) - assign_31 = full_9 - - # pd_op.scale: (-1x12x-1x-1xf32) <- (-1x12x-1x-1xf32, 1xf32) - scale_2 = paddle._C_ops.scale(matmul_3, full_9, float("0"), True) - del matmul_3 - - # pd_op.add: (-1x12x-1x-1xf32) <- (-1x12x-1x-1xf32, 1x1x-1x-1xf32) - add_3 = paddle._C_ops.add(scale_2, unsqueeze_0) - - # pd_op.softmax: (-1x12x-1x-1xf32) <- (-1x12x-1x-1xf32) - softmax_0 = paddle._C_ops.softmax(add_3, -1) - del add_3 - - # pd_op.matmul: (-1x12x-1x32xf32) <- (-1x12x-1x-1xf32, -1x12x-1x32xf32) - matmul_4 = paddle._C_ops.matmul(softmax_0, slice_9, False, False) - - # pd_op.transpose: (-1x-1x12x32xf32) <- (-1x12x-1x32xf32) - transpose_5 = paddle._C_ops.transpose(matmul_4, [0, 2, 1, 3]) - del matmul_4 - - # pd_op.full: (xi64) <- () - full_10 = paddle._C_ops.full( - [], float("384"), paddle.int64, paddle.core.CPUPlace() - ) - - # builtin.combine: ([xi64, xi64, xi64]) <- (xi64, xi64, xi64) - combine_5 = [full_5, slice_6, full_10] - del slice_6 - - # pd_op.stack: (3xi64) <- ([xi64, xi64, xi64]) - stack_5 = paddle._C_ops.stack(combine_5, 0) - del combine_5 - - # pd_op.reshape: (-1x-1x384xf32) <- (-1x-1x12x32xf32, 3xi64) - reshape_1 = paddle._C_ops.reshape(transpose_5, stack_5) - del stack_5 - - # pd_op.matmul: (-1x-1x384xf32) <- (-1x-1x384xf32, 384x384xf32) - matmul_5 = paddle._C_ops.matmul(reshape_1, parameter_38, False, False) - del parameter_38 - - # pd_op.add: (-1x-1x384xf32) <- (-1x-1x384xf32, 384xf32) - add_4 = paddle._C_ops.add(matmul_5, parameter_37) - del parameter_37 - - # pd_op.dropout: (-1x-1x384xf32, -1x-1x384xui8) <- (-1x-1x384xf32, None, 1xf32) - dropout_2, dropout_3 = (lambda x, f: f(x))( - paddle._C_ops.dropout( - add_4, None, full_2, False, "upscale_in_train", 0, False - ), - lambda out: out if isinstance(out, (list, tuple)) else (out, None), - ) - del add_4 - - # pd_op.add: (-1x-1x384xf32) <- (-1x-1x384xf32, -1x-1x384xf32) - add_5 = paddle._C_ops.add(transpose_2, dropout_2) - - # pd_op.layer_norm: (-1x-1x384xf32, -1x-1xf32, -1x-1xf32) <- (-1x-1x384xf32, 384xf32, 384xf32) - layer_norm_0, layer_norm_1, layer_norm_2 = (lambda x, f: f(x))( - paddle._C_ops.layer_norm( - add_5, parameter_36, parameter_35, float("1e-05"), 2 - ), - lambda out: out if isinstance(out, (list, tuple)) else (out, None, None), - ) - del parameter_35, parameter_36 - - # pd_op.shape64: (3xi64) <- (-1x-1x384xf32) - shape64_3 = paddle._C_ops.shape64(layer_norm_0) - - # pd_op.slice: (xi64) <- (3xi64, 1xi64, 1xi64) - slice_10 = paddle._C_ops.slice( - shape64_3, [0], full_int_array_1, full_int_array_3, [1], [0] - ) - del shape64_3 - - # pd_op.shape64: (3xi64) <- (-1x-1x384xf32) - shape64_4 = paddle._C_ops.shape64(layer_norm_0) - - # pd_op.slice: (xi64) <- (3xi64, 1xi64, 1xi64) - slice_11 = paddle._C_ops.slice( - shape64_4, [0], full_int_array_3, full_int_array_4, [1], [0] - ) - del shape64_4 - - # pd_op.shape64: (3xi64) <- (-1x40x384xf32) - shape64_5 = paddle._C_ops.shape64(matmul_1) - - # pd_op.slice: (xi64) <- (3xi64, 1xi64, 1xi64) - slice_12 = paddle._C_ops.slice( - shape64_5, [0], full_int_array_1, full_int_array_3, [1], [0] - ) - del shape64_5 - - # pd_op.matmul: (-1x-1x384xf32) <- (-1x-1x384xf32, 384x384xf32) - matmul_6 = paddle._C_ops.matmul(layer_norm_0, parameter_34, False, False) - del parameter_34 - - # pd_op.add: (-1x-1x384xf32) <- (-1x-1x384xf32, 384xf32) - add_6 = paddle._C_ops.add(matmul_6, parameter_33) - del parameter_33 - - # builtin.combine: ([xi64, xi64, xi64, xi64]) <- (xi64, xi64, xi64, xi64) - combine_6 = [full_5, slice_11, full_7, full_8] - - # pd_op.stack: (4xi64) <- ([xi64, xi64, xi64, xi64]) - stack_6 = paddle._C_ops.stack(combine_6, 0) - del combine_6 - - # pd_op.reshape: (-1x-1x12x32xf32) <- (-1x-1x384xf32, 4xi64) - reshape_2 = paddle._C_ops.reshape(add_6, stack_6) - del stack_6 - - # pd_op.transpose: (-1x12x-1x32xf32) <- (-1x-1x12x32xf32) - transpose_6 = paddle._C_ops.transpose(reshape_2, [0, 2, 1, 3]) - del reshape_2 - - # pd_op.matmul: (-1x40x768xf32) <- (-1x40x384xf32, 384x768xf32) - matmul_7 = paddle._C_ops.matmul(matmul_1, parameter_32, False, False) - del parameter_32 - - # pd_op.add: (-1x40x768xf32) <- (-1x40x768xf32, 768xf32) - add_7 = paddle._C_ops.add(matmul_7, parameter_31) - del parameter_31 - - # pd_op.full_int_array: (5xi64) <- () - full_int_array_7 = [0, 40, 2, 12, 32] - - # pd_op.reshape: (-1x40x2x12x32xf32) <- (-1x40x768xf32, 5xi64) - reshape_3 = paddle._C_ops.reshape(add_7, full_int_array_7) - - # pd_op.transpose: (2x-1x12x40x32xf32) <- (-1x40x2x12x32xf32) - transpose_7 = paddle._C_ops.transpose(reshape_3, [2, 0, 3, 1, 4]) - del reshape_3 - - # pd_op.slice: (-1x12x40x32xf32) <- (2x-1x12x40x32xf32, 1xi64, 1xi64) - slice_13 = paddle._C_ops.slice( - transpose_7, [0], full_int_array_1, full_int_array_3, [1], [0] - ) - - # pd_op.slice: (-1x12x40x32xf32) <- (2x-1x12x40x32xf32, 1xi64, 1xi64) - slice_14 = paddle._C_ops.slice( - transpose_7, [0], full_int_array_3, full_int_array_4, [1], [0] - ) - - # pd_op.transpose: (-1x12x32x40xf32) <- (-1x12x40x32xf32) - transpose_8 = paddle._C_ops.transpose(slice_13, [0, 1, 3, 2]) - del slice_13 - - # pd_op.matmul: (-1x12x-1x40xf32) <- (-1x12x-1x32xf32, -1x12x32x40xf32) - matmul_8 = paddle._C_ops.matmul(transpose_6, transpose_8, False, False) - - # pd_op.scale: (-1x12x-1x40xf32) <- (-1x12x-1x40xf32, 1xf32) - scale_3 = paddle._C_ops.scale(matmul_8, full_9, float("0"), True) - del matmul_8 - - # pd_op.softmax: (-1x12x-1x40xf32) <- (-1x12x-1x40xf32) - softmax_1 = paddle._C_ops.softmax(scale_3, -1) - del scale_3 - - # pd_op.matmul: (-1x12x-1x32xf32) <- (-1x12x-1x40xf32, -1x12x40x32xf32) - matmul_9 = paddle._C_ops.matmul(softmax_1, slice_14, False, False) - - # pd_op.transpose: (-1x-1x12x32xf32) <- (-1x12x-1x32xf32) - transpose_9 = paddle._C_ops.transpose(matmul_9, [0, 2, 1, 3]) - del matmul_9 - - # builtin.combine: ([xi64, xi64, xi64]) <- (xi64, xi64, xi64) - combine_7 = [full_5, slice_11, full_10] - del slice_11 - - # pd_op.stack: (3xi64) <- ([xi64, xi64, xi64]) - stack_7 = paddle._C_ops.stack(combine_7, 0) - del combine_7 - - # pd_op.reshape: (-1x-1x384xf32) <- (-1x-1x12x32xf32, 3xi64) - reshape_4 = paddle._C_ops.reshape(transpose_9, stack_7) - del stack_7 - - # pd_op.matmul: (-1x-1x384xf32) <- (-1x-1x384xf32, 384x384xf32) - matmul_10 = paddle._C_ops.matmul(reshape_4, parameter_30, False, False) - del parameter_30 - - # pd_op.add: (-1x-1x384xf32) <- (-1x-1x384xf32, 384xf32) - add_8 = paddle._C_ops.add(matmul_10, parameter_29) - del parameter_29 - - # pd_op.dropout: (-1x-1x384xf32, -1x-1x384xui8) <- (-1x-1x384xf32, None, 1xf32) - dropout_4, dropout_5 = (lambda x, f: f(x))( - paddle._C_ops.dropout( - add_8, None, full_2, False, "upscale_in_train", 0, False - ), - lambda out: out if isinstance(out, (list, tuple)) else (out, None), - ) - del add_8 - - # pd_op.add: (-1x-1x384xf32) <- (-1x-1x384xf32, -1x-1x384xf32) - add_9 = paddle._C_ops.add(layer_norm_0, dropout_4) - - # pd_op.layer_norm: (-1x-1x384xf32, -1x-1xf32, -1x-1xf32) <- (-1x-1x384xf32, 384xf32, 384xf32) - layer_norm_3, layer_norm_4, layer_norm_5 = (lambda x, f: f(x))( - paddle._C_ops.layer_norm( - add_9, parameter_28, parameter_27, float("1e-05"), 2 - ), - lambda out: out if isinstance(out, (list, tuple)) else (out, None, None), - ) - del parameter_27, parameter_28 - - # pd_op.matmul: (-1x-1x1536xf32) <- (-1x-1x384xf32, 384x1536xf32) - matmul_11 = paddle._C_ops.matmul(layer_norm_3, parameter_26, False, False) - del parameter_26 - - # pd_op.add: (-1x-1x1536xf32) <- (-1x-1x1536xf32, 1536xf32) - add_10 = paddle._C_ops.add(matmul_11, parameter_25) - del parameter_25 - - # pd_op.relu: (-1x-1x1536xf32) <- (-1x-1x1536xf32) - relu_0 = paddle._C_ops.relu(add_10) - del add_10 - - # pd_op.dropout: (-1x-1x1536xf32, -1x-1x1536xui8) <- (-1x-1x1536xf32, None, 1xf32) - dropout_6, dropout_7 = (lambda x, f: f(x))( - paddle._C_ops.dropout( - relu_0, None, full_2, False, "upscale_in_train", 0, False - ), - lambda out: out if isinstance(out, (list, tuple)) else (out, None), - ) - - # pd_op.matmul: (-1x-1x384xf32) <- (-1x-1x1536xf32, 1536x384xf32) - matmul_12 = paddle._C_ops.matmul(dropout_6, parameter_24, False, False) - del parameter_24 - - # pd_op.add: (-1x-1x384xf32) <- (-1x-1x384xf32, 384xf32) - add_11 = paddle._C_ops.add(matmul_12, parameter_23) - del parameter_23 - - # pd_op.dropout: (-1x-1x384xf32, -1x-1x384xui8) <- (-1x-1x384xf32, None, 1xf32) - dropout_8, dropout_9 = (lambda x, f: f(x))( - paddle._C_ops.dropout( - add_11, None, full_2, False, "upscale_in_train", 0, False - ), - lambda out: out if isinstance(out, (list, tuple)) else (out, None), - ) - del add_11 - - # pd_op.dropout: (-1x-1x384xf32, -1x-1x384xui8) <- (-1x-1x384xf32, None, 1xf32) - dropout_10, dropout_11 = (lambda x, f: f(x))( - paddle._C_ops.dropout( - dropout_8, None, full_2, False, "upscale_in_train", 0, False - ), - lambda out: out if isinstance(out, (list, tuple)) else (out, None), - ) - del dropout_8 - - # pd_op.add: (-1x-1x384xf32) <- (-1x-1x384xf32, -1x-1x384xf32) - add_12 = paddle._C_ops.add(layer_norm_3, dropout_10) - - # pd_op.layer_norm: (-1x-1x384xf32, -1x-1xf32, -1x-1xf32) <- (-1x-1x384xf32, 384xf32, 384xf32) - layer_norm_6, layer_norm_7, layer_norm_8 = (lambda x, f: f(x))( - paddle._C_ops.layer_norm( - add_12, parameter_22, parameter_21, float("1e-05"), 2 - ), - lambda out: out if isinstance(out, (list, tuple)) else (out, None, None), - ) - del parameter_21, parameter_22 - - # pd_op.shape64: (3xi64) <- (-1x-1x384xf32) - shape64_6 = paddle._C_ops.shape64(layer_norm_6) - - # pd_op.slice: (xi64) <- (3xi64, 1xi64, 1xi64) - slice_15 = paddle._C_ops.slice( - shape64_6, [0], full_int_array_1, full_int_array_3, [1], [0] - ) - del shape64_6 - - # pd_op.shape64: (3xi64) <- (-1x-1x384xf32) - shape64_7 = paddle._C_ops.shape64(layer_norm_6) - - # pd_op.slice: (xi64) <- (3xi64, 1xi64, 1xi64) - slice_16 = paddle._C_ops.slice( - shape64_7, [0], full_int_array_3, full_int_array_4, [1], [0] - ) - del shape64_7 - - # pd_op.matmul: (-1x-1x1152xf32) <- (-1x-1x384xf32, 384x1152xf32) - matmul_13 = paddle._C_ops.matmul(layer_norm_6, parameter_20, False, False) - del parameter_20 - - # pd_op.add: (-1x-1x1152xf32) <- (-1x-1x1152xf32, 1152xf32) - add_13 = paddle._C_ops.add(matmul_13, parameter_19) - del parameter_19 - - # builtin.combine: ([xi64, xi64, xi64, xi64, xi64]) <- (xi64, xi64, xi64, xi64, xi64) - combine_8 = [full_5, slice_16, full_6, full_7, full_8] - del full_6 - - # pd_op.stack: (5xi64) <- ([xi64, xi64, xi64, xi64, xi64]) - stack_8 = paddle._C_ops.stack(combine_8, 0) - del combine_8 - - # pd_op.reshape: (-1x-1x3x12x32xf32) <- (-1x-1x1152xf32, 5xi64) - reshape_5 = paddle._C_ops.reshape(add_13, stack_8) - del stack_8 - - # pd_op.transpose: (3x-1x12x-1x32xf32) <- (-1x-1x3x12x32xf32) - transpose_10 = paddle._C_ops.transpose(reshape_5, [2, 0, 3, 1, 4]) - del reshape_5 - - # pd_op.slice: (-1x12x-1x32xf32) <- (3x-1x12x-1x32xf32, 1xi64, 1xi64) - slice_17 = paddle._C_ops.slice( - transpose_10, [0], full_int_array_1, full_int_array_3, [1], [0] - ) - - # pd_op.slice: (-1x12x-1x32xf32) <- (3x-1x12x-1x32xf32, 1xi64, 1xi64) - slice_18 = paddle._C_ops.slice( - transpose_10, [0], full_int_array_3, full_int_array_4, [1], [0] - ) - - # pd_op.slice: (-1x12x-1x32xf32) <- (3x-1x12x-1x32xf32, 1xi64, 1xi64) - slice_19 = paddle._C_ops.slice( - transpose_10, [0], full_int_array_4, full_int_array_6, [1], [0] - ) - - # pd_op.transpose: (-1x12x32x-1xf32) <- (-1x12x-1x32xf32) - transpose_11 = paddle._C_ops.transpose(slice_18, [0, 1, 3, 2]) - del slice_18 - - # pd_op.matmul: (-1x12x-1x-1xf32) <- (-1x12x-1x32xf32, -1x12x32x-1xf32) - matmul_14 = paddle._C_ops.matmul(slice_17, transpose_11, False, False) - - # pd_op.scale: (-1x12x-1x-1xf32) <- (-1x12x-1x-1xf32, 1xf32) - scale_4 = paddle._C_ops.scale(matmul_14, full_9, float("0"), True) - del matmul_14 - - # pd_op.add: (-1x12x-1x-1xf32) <- (-1x12x-1x-1xf32, 1x1x-1x-1xf32) - add_14 = paddle._C_ops.add(scale_4, unsqueeze_0) - - # pd_op.softmax: (-1x12x-1x-1xf32) <- (-1x12x-1x-1xf32) - softmax_2 = paddle._C_ops.softmax(add_14, -1) - del add_14 - - # pd_op.matmul: (-1x12x-1x32xf32) <- (-1x12x-1x-1xf32, -1x12x-1x32xf32) - matmul_15 = paddle._C_ops.matmul(softmax_2, slice_19, False, False) - - # pd_op.transpose: (-1x-1x12x32xf32) <- (-1x12x-1x32xf32) - transpose_12 = paddle._C_ops.transpose(matmul_15, [0, 2, 1, 3]) - del matmul_15 - - # builtin.combine: ([xi64, xi64, xi64]) <- (xi64, xi64, xi64) - combine_9 = [full_5, slice_16, full_10] - del slice_16 - - # pd_op.stack: (3xi64) <- ([xi64, xi64, xi64]) - stack_9 = paddle._C_ops.stack(combine_9, 0) - del combine_9 - - # pd_op.reshape: (-1x-1x384xf32) <- (-1x-1x12x32xf32, 3xi64) - reshape_6 = paddle._C_ops.reshape(transpose_12, stack_9) - del stack_9 - - # pd_op.matmul: (-1x-1x384xf32) <- (-1x-1x384xf32, 384x384xf32) - matmul_16 = paddle._C_ops.matmul(reshape_6, parameter_18, False, False) - del parameter_18 - - # pd_op.add: (-1x-1x384xf32) <- (-1x-1x384xf32, 384xf32) - add_15 = paddle._C_ops.add(matmul_16, parameter_17) - del parameter_17 - - # pd_op.dropout: (-1x-1x384xf32, -1x-1x384xui8) <- (-1x-1x384xf32, None, 1xf32) - dropout_12, dropout_13 = (lambda x, f: f(x))( - paddle._C_ops.dropout( - add_15, None, full_2, False, "upscale_in_train", 0, False - ), - lambda out: out if isinstance(out, (list, tuple)) else (out, None), - ) - del add_15 - - # pd_op.add: (-1x-1x384xf32) <- (-1x-1x384xf32, -1x-1x384xf32) - add_16 = paddle._C_ops.add(layer_norm_6, dropout_12) - - # pd_op.layer_norm: (-1x-1x384xf32, -1x-1xf32, -1x-1xf32) <- (-1x-1x384xf32, 384xf32, 384xf32) - layer_norm_9, layer_norm_10, layer_norm_11 = (lambda x, f: f(x))( - paddle._C_ops.layer_norm( - add_16, parameter_16, parameter_15, float("1e-05"), 2 - ), - lambda out: out if isinstance(out, (list, tuple)) else (out, None, None), - ) - del parameter_15, parameter_16 - - # pd_op.shape64: (3xi64) <- (-1x-1x384xf32) - shape64_8 = paddle._C_ops.shape64(layer_norm_9) - - # pd_op.slice: (xi64) <- (3xi64, 1xi64, 1xi64) - slice_20 = paddle._C_ops.slice( - shape64_8, [0], full_int_array_1, full_int_array_3, [1], [0] - ) - del shape64_8 - - # pd_op.shape64: (3xi64) <- (-1x-1x384xf32) - shape64_9 = paddle._C_ops.shape64(layer_norm_9) - - # pd_op.slice: (xi64) <- (3xi64, 1xi64, 1xi64) - slice_21 = paddle._C_ops.slice( - shape64_9, [0], full_int_array_3, full_int_array_4, [1], [0] - ) - del shape64_9 - - # pd_op.matmul: (-1x-1x384xf32) <- (-1x-1x384xf32, 384x384xf32) - matmul_17 = paddle._C_ops.matmul(layer_norm_9, parameter_14, False, False) - del parameter_14 - - # pd_op.add: (-1x-1x384xf32) <- (-1x-1x384xf32, 384xf32) - add_17 = paddle._C_ops.add(matmul_17, parameter_13) - del parameter_13 - - # builtin.combine: ([xi64, xi64, xi64, xi64]) <- (xi64, xi64, xi64, xi64) - combine_10 = [full_5, slice_21, full_7, full_8] - del full_7, full_8 - - # pd_op.stack: (4xi64) <- ([xi64, xi64, xi64, xi64]) - stack_10 = paddle._C_ops.stack(combine_10, 0) - del combine_10 - - # pd_op.reshape: (-1x-1x12x32xf32) <- (-1x-1x384xf32, 4xi64) - reshape_7 = paddle._C_ops.reshape(add_17, stack_10) - del stack_10 - - # pd_op.transpose: (-1x12x-1x32xf32) <- (-1x-1x12x32xf32) - transpose_13 = paddle._C_ops.transpose(reshape_7, [0, 2, 1, 3]) - del reshape_7 - - # pd_op.matmul: (-1x40x768xf32) <- (-1x40x384xf32, 384x768xf32) - matmul_18 = paddle._C_ops.matmul(matmul_1, parameter_12, False, False) - del parameter_12 - - # pd_op.add: (-1x40x768xf32) <- (-1x40x768xf32, 768xf32) - add_18 = paddle._C_ops.add(matmul_18, parameter_11) - del parameter_11 - - # pd_op.reshape: (-1x40x2x12x32xf32) <- (-1x40x768xf32, 5xi64) - reshape_8 = paddle._C_ops.reshape(add_18, full_int_array_7) - del full_int_array_7 - - # pd_op.transpose: (2x-1x12x40x32xf32) <- (-1x40x2x12x32xf32) - transpose_14 = paddle._C_ops.transpose(reshape_8, [2, 0, 3, 1, 4]) - del reshape_8 - - # pd_op.slice: (-1x12x40x32xf32) <- (2x-1x12x40x32xf32, 1xi64, 1xi64) - slice_22 = paddle._C_ops.slice( - transpose_14, [0], full_int_array_1, full_int_array_3, [1], [0] - ) - del full_int_array_1 - - # pd_op.slice: (-1x12x40x32xf32) <- (2x-1x12x40x32xf32, 1xi64, 1xi64) - slice_23 = paddle._C_ops.slice( - transpose_14, [0], full_int_array_3, full_int_array_4, [1], [0] - ) - del full_int_array_3, full_int_array_4 - - # pd_op.transpose: (-1x12x32x40xf32) <- (-1x12x40x32xf32) - transpose_15 = paddle._C_ops.transpose(slice_22, [0, 1, 3, 2]) - del slice_22 - - # pd_op.matmul: (-1x12x-1x40xf32) <- (-1x12x-1x32xf32, -1x12x32x40xf32) - matmul_19 = paddle._C_ops.matmul(transpose_13, transpose_15, False, False) - - # pd_op.scale: (-1x12x-1x40xf32) <- (-1x12x-1x40xf32, 1xf32) - scale_5 = paddle._C_ops.scale(matmul_19, full_9, float("0"), True) - del matmul_19 - - # pd_op.softmax: (-1x12x-1x40xf32) <- (-1x12x-1x40xf32) - softmax_3 = paddle._C_ops.softmax(scale_5, -1) - del scale_5 - - # pd_op.matmul: (-1x12x-1x32xf32) <- (-1x12x-1x40xf32, -1x12x40x32xf32) - matmul_20 = paddle._C_ops.matmul(softmax_3, slice_23, False, False) - - # pd_op.transpose: (-1x-1x12x32xf32) <- (-1x12x-1x32xf32) - transpose_16 = paddle._C_ops.transpose(matmul_20, [0, 2, 1, 3]) - del matmul_20 - - # builtin.combine: ([xi64, xi64, xi64]) <- (xi64, xi64, xi64) - combine_11 = [full_5, slice_21, full_10] - del full_10, full_5, slice_21 - - # pd_op.stack: (3xi64) <- ([xi64, xi64, xi64]) - stack_11 = paddle._C_ops.stack(combine_11, 0) - del combine_11 - - # pd_op.reshape: (-1x-1x384xf32) <- (-1x-1x12x32xf32, 3xi64) - reshape_9 = paddle._C_ops.reshape(transpose_16, stack_11) - del stack_11 - - # pd_op.matmul: (-1x-1x384xf32) <- (-1x-1x384xf32, 384x384xf32) - matmul_21 = paddle._C_ops.matmul(reshape_9, parameter_10, False, False) - del parameter_10 - - # pd_op.add: (-1x-1x384xf32) <- (-1x-1x384xf32, 384xf32) - add_19 = paddle._C_ops.add(matmul_21, parameter_9) - del parameter_9 - - # pd_op.dropout: (-1x-1x384xf32, -1x-1x384xui8) <- (-1x-1x384xf32, None, 1xf32) - dropout_14, dropout_15 = (lambda x, f: f(x))( - paddle._C_ops.dropout( - add_19, None, full_2, False, "upscale_in_train", 0, False - ), - lambda out: out if isinstance(out, (list, tuple)) else (out, None), - ) - del add_19 - - # pd_op.add: (-1x-1x384xf32) <- (-1x-1x384xf32, -1x-1x384xf32) - add_20 = paddle._C_ops.add(layer_norm_9, dropout_14) - - # pd_op.layer_norm: (-1x-1x384xf32, -1x-1xf32, -1x-1xf32) <- (-1x-1x384xf32, 384xf32, 384xf32) - layer_norm_12, layer_norm_13, layer_norm_14 = (lambda x, f: f(x))( - paddle._C_ops.layer_norm( - add_20, parameter_8, parameter_7, float("1e-05"), 2 - ), - lambda out: out if isinstance(out, (list, tuple)) else (out, None, None), - ) - del parameter_7, parameter_8 - - # pd_op.matmul: (-1x-1x1536xf32) <- (-1x-1x384xf32, 384x1536xf32) - matmul_22 = paddle._C_ops.matmul(layer_norm_12, parameter_6, False, False) - del parameter_6 - - # pd_op.add: (-1x-1x1536xf32) <- (-1x-1x1536xf32, 1536xf32) - add_21 = paddle._C_ops.add(matmul_22, parameter_5) - del parameter_5 - - # pd_op.relu: (-1x-1x1536xf32) <- (-1x-1x1536xf32) - relu_1 = paddle._C_ops.relu(add_21) - del add_21 - - # pd_op.dropout: (-1x-1x1536xf32, -1x-1x1536xui8) <- (-1x-1x1536xf32, None, 1xf32) - dropout_16, dropout_17 = (lambda x, f: f(x))( - paddle._C_ops.dropout( - relu_1, None, full_2, False, "upscale_in_train", 0, False - ), - lambda out: out if isinstance(out, (list, tuple)) else (out, None), - ) - - # pd_op.matmul: (-1x-1x384xf32) <- (-1x-1x1536xf32, 1536x384xf32) - matmul_23 = paddle._C_ops.matmul(dropout_16, parameter_4, False, False) - del parameter_4 - - # pd_op.add: (-1x-1x384xf32) <- (-1x-1x384xf32, 384xf32) - add_22 = paddle._C_ops.add(matmul_23, parameter_3) - del parameter_3 - - # pd_op.dropout: (-1x-1x384xf32, -1x-1x384xui8) <- (-1x-1x384xf32, None, 1xf32) - dropout_18, dropout_19 = (lambda x, f: f(x))( - paddle._C_ops.dropout( - add_22, None, full_2, False, "upscale_in_train", 0, False - ), - lambda out: out if isinstance(out, (list, tuple)) else (out, None), - ) - del add_22 - - # pd_op.dropout: (-1x-1x384xf32, -1x-1x384xui8) <- (-1x-1x384xf32, None, 1xf32) - dropout_20, dropout_21 = (lambda x, f: f(x))( - paddle._C_ops.dropout( - dropout_18, None, full_2, False, "upscale_in_train", 0, False - ), - lambda out: out if isinstance(out, (list, tuple)) else (out, None), - ) - del dropout_18 - - # pd_op.add: (-1x-1x384xf32) <- (-1x-1x384xf32, -1x-1x384xf32) - add_23 = paddle._C_ops.add(layer_norm_12, dropout_20) - - # pd_op.layer_norm: (-1x-1x384xf32, -1x-1xf32, -1x-1xf32) <- (-1x-1x384xf32, 384xf32, 384xf32) - layer_norm_15, layer_norm_16, layer_norm_17 = (lambda x, f: f(x))( - paddle._C_ops.layer_norm( - add_23, parameter_2, parameter_1, float("1e-05"), 2 - ), - lambda out: out if isinstance(out, (list, tuple)) else (out, None, None), - ) - del parameter_1, parameter_2 - - # pd_op.matmul: (-1x-1x6629xf32) <- (-1x-1x384xf32, 384x6629xf32) - matmul_0 = paddle._C_ops.matmul(layer_norm_15, parameter_0, False, False) - del ( - add_12, - add_13, - add_16, - add_17, - add_18, - add_2, - add_20, - add_23, - add_5, - add_6, - add_7, - add_9, - assign_0, - assign_1, - assign_10, - assign_11, - assign_12, - assign_13, - assign_14, - assign_15, - assign_16, - assign_17, - assign_18, - assign_19, - assign_2, - assign_20, - assign_21, - assign_22, - assign_23, - assign_24, - assign_25, - assign_26, - assign_27, - assign_28, - assign_29, - assign_3, - assign_30, - assign_31, - assign_4, - assign_5, - assign_6, - assign_7, - assign_8, - assign_9, - dropout_1, - dropout_10, - dropout_11, - dropout_12, - dropout_13, - dropout_14, - dropout_15, - dropout_16, - dropout_17, - dropout_19, - dropout_2, - dropout_20, - dropout_21, - dropout_3, - dropout_4, - dropout_5, - dropout_6, - dropout_7, - dropout_9, - full_1, - full_2, - full_9, - full_int_array_6, - layer_norm_0, - layer_norm_1, - layer_norm_10, - layer_norm_11, - layer_norm_12, - layer_norm_13, - layer_norm_14, - layer_norm_15, - layer_norm_16, - layer_norm_17, - layer_norm_2, - layer_norm_3, - layer_norm_4, - layer_norm_5, - layer_norm_6, - layer_norm_7, - layer_norm_8, - layer_norm_9, - matmul_1, - matmul_10, - matmul_11, - matmul_12, - matmul_13, - matmul_16, - matmul_17, - matmul_18, - matmul_2, - matmul_21, - matmul_22, - matmul_23, - matmul_5, - matmul_6, - matmul_7, - parameter_0, - relu_0, - relu_1, - reshape_1, - reshape_4, - reshape_6, - reshape_9, - scale_2, - scale_4, - slice_1, - slice_14, - slice_17, - slice_19, - slice_23, - slice_4, - slice_7, - slice_9, - softmax_0, - softmax_1, - softmax_2, - softmax_3, - transpose_0, - transpose_1, - transpose_10, - transpose_11, - transpose_12, - transpose_13, - transpose_14, - transpose_15, - transpose_16, - transpose_2, - transpose_3, - transpose_4, - transpose_5, - transpose_6, - transpose_7, - transpose_8, - transpose_9, - unsqueeze_0, - ) - - return matmul_0 diff --git a/paddle_samples/PaddleX/ch_RepSVTR_rec/subgraph_2/weight_meta.py b/paddle_samples/PaddleX/ch_RepSVTR_rec/subgraph_2/weight_meta.py deleted file mode 100644 index e389b1a8e..000000000 --- a/paddle_samples/PaddleX/ch_RepSVTR_rec/subgraph_2/weight_meta.py +++ /dev/null @@ -1,471 +0,0 @@ -class Program_weight_tensor_parameter_0: - name = "parameter_0" - shape = [384, 6629] - dtype = "float32" - min_val = float("-0.351484") - max_val = float("0.702688") - mean = float("0.0277474") - std = float("0.043599") - data = None - - -class Program_weight_tensor_parameter_1: - name = "parameter_1" - shape = [384] - dtype = "float32" - min_val = float("-0.969039") - max_val = float("1.92469") - mean = float("-0.134236") - std = float("0.298058") - data = None - - -class Program_weight_tensor_parameter_2: - name = "parameter_2" - shape = [384] - dtype = "float32" - min_val = float("0.234385") - max_val = float("4.4674") - mean = float("3.29625") - std = float("0.887819") - data = None - - -class Program_weight_tensor_parameter_3: - name = "parameter_3" - shape = [384] - dtype = "float32" - min_val = float("-0.0639392") - max_val = float("0.106081") - mean = float("-4.18515e-05") - std = float("0.0118406") - data = None - - -class Program_weight_tensor_parameter_4: - name = "parameter_4" - shape = [1536, 384] - dtype = "float32" - min_val = float("-3.43228") - max_val = float("3.15796") - mean = float("-0.00241164") - std = float("0.157565") - data = None - - -class Program_weight_tensor_parameter_5: - name = "parameter_5" - shape = [1536] - dtype = "float32" - min_val = float("-3.31375") - max_val = float("0.926513") - mean = float("-1.129") - std = float("0.302978") - data = None - - -class Program_weight_tensor_parameter_6: - name = "parameter_6" - shape = [384, 1536] - dtype = "float32" - min_val = float("-0.312126") - max_val = float("0.505538") - mean = float("0.012929") - std = float("0.0764173") - data = None - - -class Program_weight_tensor_parameter_7: - name = "parameter_7" - shape = [384] - dtype = "float32" - min_val = float("-13.756") - max_val = float("0.36275") - mean = float("-0.132106") - std = float("0.724668") - data = None - - -class Program_weight_tensor_parameter_8: - name = "parameter_8" - shape = [384] - dtype = "float32" - min_val = float("0.101265") - max_val = float("2.58581") - mean = float("0.378491") - std = float("0.131247") - data = None - - -class Program_weight_tensor_parameter_9: - name = "parameter_9" - shape = [384] - dtype = "float32" - min_val = float("-0.934299") - max_val = float("0.48667") - mean = float("-0.0142857") - std = float("0.129296") - data = None - - -class Program_weight_tensor_parameter_10: - name = "parameter_10" - shape = [384, 384] - dtype = "float32" - min_val = float("-0.555835") - max_val = float("0.516169") - mean = float("-9.3906e-05") - std = float("0.0657883") - data = None - - -class Program_weight_tensor_parameter_11: - name = "parameter_11" - shape = [768] - dtype = "float32" - min_val = float("-1.217") - max_val = float("1.79317") - mean = float("0.0100228") - std = float("0.329571") - data = None - - -class Program_weight_tensor_parameter_12: - name = "parameter_12" - shape = [384, 768] - dtype = "float32" - min_val = float("-0.665119") - max_val = float("0.621938") - mean = float("-0.000122967") - std = float("0.0636656") - data = None - - -class Program_weight_tensor_parameter_13: - name = "parameter_13" - shape = [384] - dtype = "float32" - min_val = float("-3.25041") - max_val = float("3.48689") - mean = float("0.000539355") - std = float("0.796801") - data = None - - -class Program_weight_tensor_parameter_14: - name = "parameter_14" - shape = [384, 384] - dtype = "float32" - min_val = float("-0.498011") - max_val = float("0.484597") - mean = float("-3.2408e-05") - std = float("0.0680232") - data = None - - -class Program_weight_tensor_parameter_15: - name = "parameter_15" - shape = [384] - dtype = "float32" - min_val = float("-0.700518") - max_val = float("2.86425") - mean = float("0.0299052") - std = float("0.207136") - data = None - - -class Program_weight_tensor_parameter_16: - name = "parameter_16" - shape = [384] - dtype = "float32" - min_val = float("-0.0969785") - max_val = float("2.76296") - mean = float("1.19528") - std = float("0.383211") - data = None - - -class Program_weight_tensor_parameter_17: - name = "parameter_17" - shape = [384] - dtype = "float32" - min_val = float("-6.4183") - max_val = float("6.62679") - mean = float("0.0147666") - std = float("0.706506") - data = None - - -class Program_weight_tensor_parameter_18: - name = "parameter_18" - shape = [384, 384] - dtype = "float32" - min_val = float("-1.3393") - max_val = float("1.26757") - mean = float("-0.000168767") - std = float("0.0735479") - data = None - - -class Program_weight_tensor_parameter_19: - name = "parameter_19" - shape = [1152] - dtype = "float32" - min_val = float("-3.65649") - max_val = float("3.95875") - mean = float("0.019323") - std = float("0.474384") - data = None - - -class Program_weight_tensor_parameter_20: - name = "parameter_20" - shape = [384, 1152] - dtype = "float32" - min_val = float("-0.509574") - max_val = float("0.457092") - mean = float("-4.52726e-05") - std = float("0.0650373") - data = None - - -class Program_weight_tensor_parameter_21: - name = "parameter_21" - shape = [384] - dtype = "float32" - min_val = float("-1.12659") - max_val = float("0.292934") - mean = float("0.00757793") - std = float("0.0927688") - data = None - - -class Program_weight_tensor_parameter_22: - name = "parameter_22" - shape = [384] - dtype = "float32" - min_val = float("-0.00271219") - max_val = float("1.01901") - mean = float("0.84283") - std = float("0.130932") - data = None - - -class Program_weight_tensor_parameter_23: - name = "parameter_23" - shape = [384] - dtype = "float32" - min_val = float("-0.197205") - max_val = float("0.457012") - mean = float("0.000321222") - std = float("0.04599") - data = None - - -class Program_weight_tensor_parameter_24: - name = "parameter_24" - shape = [1536, 384] - dtype = "float32" - min_val = float("-3.17563") - max_val = float("1.87986") - mean = float("-9.41858e-05") - std = float("0.0390491") - data = None - - -class Program_weight_tensor_parameter_25: - name = "parameter_25" - shape = [1536] - dtype = "float32" - min_val = float("-2.61165") - max_val = float("-0.080146") - mean = float("-0.640018") - std = float("0.580882") - data = None - - -class Program_weight_tensor_parameter_26: - name = "parameter_26" - shape = [384, 1536] - dtype = "float32" - min_val = float("-0.347415") - max_val = float("0.401366") - mean = float("0.00329947") - std = float("0.0384391") - data = None - - -class Program_weight_tensor_parameter_27: - name = "parameter_27" - shape = [384] - dtype = "float32" - min_val = float("-5.19483") - max_val = float("4.39995") - mean = float("-0.162294") - std = float("0.730859") - data = None - - -class Program_weight_tensor_parameter_28: - name = "parameter_28" - shape = [384] - dtype = "float32" - min_val = float("0.508543") - max_val = float("5.63012") - mean = float("1.03757") - std = float("0.382432") - data = None - - -class Program_weight_tensor_parameter_29: - name = "parameter_29" - shape = [384] - dtype = "float32" - min_val = float("-0.972005") - max_val = float("0.423718") - mean = float("-0.00471045") - std = float("0.0801598") - data = None - - -class Program_weight_tensor_parameter_30: - name = "parameter_30" - shape = [384, 384] - dtype = "float32" - min_val = float("-0.750938") - max_val = float("1.42995") - mean = float("0.000105277") - std = float("0.0575386") - data = None - - -class Program_weight_tensor_parameter_31: - name = "parameter_31" - shape = [768] - dtype = "float32" - min_val = float("-0.986446") - max_val = float("1.72039") - mean = float("0.0137758") - std = float("0.267347") - data = None - - -class Program_weight_tensor_parameter_32: - name = "parameter_32" - shape = [384, 768] - dtype = "float32" - min_val = float("-0.441004") - max_val = float("0.36639") - mean = float("5.4273e-05") - std = float("0.0550247") - data = None - - -class Program_weight_tensor_parameter_33: - name = "parameter_33" - shape = [384] - dtype = "float32" - min_val = float("-5.40809") - max_val = float("3.65363") - mean = float("-0.0052135") - std = float("0.758904") - data = None - - -class Program_weight_tensor_parameter_34: - name = "parameter_34" - shape = [384, 384] - dtype = "float32" - min_val = float("-0.428969") - max_val = float("0.390564") - mean = float("0.000111454") - std = float("0.0542453") - data = None - - -class Program_weight_tensor_parameter_35: - name = "parameter_35" - shape = [384] - dtype = "float32" - min_val = float("-1.03026") - max_val = float("2.33983") - mean = float("0.0215944") - std = float("0.333446") - data = None - - -class Program_weight_tensor_parameter_36: - name = "parameter_36" - shape = [384] - dtype = "float32" - min_val = float("0.0357924") - max_val = float("6.25379") - mean = float("1.28284") - std = float("0.600466") - data = None - - -class Program_weight_tensor_parameter_37: - name = "parameter_37" - shape = [384] - dtype = "float32" - min_val = float("-8.05629") - max_val = float("5.74326") - mean = float("-0.0115213") - std = float("0.510238") - data = None - - -class Program_weight_tensor_parameter_38: - name = "parameter_38" - shape = [384, 384] - dtype = "float32" - min_val = float("-0.495021") - max_val = float("0.595344") - mean = float("-7.17572e-05") - std = float("0.0511101") - data = None - - -class Program_weight_tensor_parameter_39: - name = "parameter_39" - shape = [1152] - dtype = "float32" - min_val = float("-7.17506") - max_val = float("6.91719") - mean = float("-0.00895858") - std = float("1.53632") - data = None - - -class Program_weight_tensor_parameter_40: - name = "parameter_40" - shape = [384, 1152] - dtype = "float32" - min_val = float("-0.272265") - max_val = float("0.296452") - mean = float("7.50426e-05") - std = float("0.0481593") - data = None - - -class Program_weight_tensor_parameter_41: - name = "parameter_41" - shape = [6629, 384] - dtype = "float32" - min_val = float("-6.43765") - max_val = float("3.98165") - mean = float("-0.00702058") - std = float("0.0513204") - data = None - - -class Program_weight_tensor_parameter_42: - name = "parameter_42" - shape = [384, 384] - dtype = "float32" - min_val = float("-0.760943") - max_val = float("0.740528") - mean = float("-0.000140398") - std = float("0.0638947") - data = None diff --git a/paddle_samples/PaddleX/ch_RepSVTR_rec/subgraph_3/graph_hash.txt b/paddle_samples/PaddleX/ch_RepSVTR_rec/subgraph_3/graph_hash.txt deleted file mode 100644 index af29ab847..000000000 --- a/paddle_samples/PaddleX/ch_RepSVTR_rec/subgraph_3/graph_hash.txt +++ /dev/null @@ -1 +0,0 @@ -e94dbe68d55ee8767835c55f4344f98e5a19aa1bded3e6e4ba11a1c210231188 \ No newline at end of file diff --git a/paddle_samples/PaddleX/ch_RepSVTR_rec/subgraph_3/graph_net.json b/paddle_samples/PaddleX/ch_RepSVTR_rec/subgraph_3/graph_net.json deleted file mode 100644 index 7e5b42a74..000000000 --- a/paddle_samples/PaddleX/ch_RepSVTR_rec/subgraph_3/graph_net.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "framework": "paddle", - "model_name": "ch_RepSVTR_rec", - "num_devices_required": 1, - "num_nodes_required": 1 -} \ No newline at end of file diff --git a/paddle_samples/PaddleX/ch_RepSVTR_rec/subgraph_3/input_meta.py b/paddle_samples/PaddleX/ch_RepSVTR_rec/subgraph_3/input_meta.py deleted file mode 100644 index 5faa1a5b8..000000000 --- a/paddle_samples/PaddleX/ch_RepSVTR_rec/subgraph_3/input_meta.py +++ /dev/null @@ -1,36 +0,0 @@ -class Program_weight_tensor_data_0: - name = "data_0" - shape = [4, 384, 1, 40] - dtype = "float32" - min_val = float("-3.4594") - max_val = float("2.90526") - mean = float("-0.00288512") - std = float("0.367055") - data = None - - -class Program_weight_tensor_data_1: - name = "data_1" - shape = [4, 25] - dtype = "int64" - min_val = 0 - max_val = 6627 - data = None - - -class Program_weight_tensor_data_2: - name = "data_2" - shape = [4] - dtype = "int64" - data = [3, 4, 3, 3] - - -class Program_weight_tensor_data_3: - name = "data_3" - shape = [5000, 1, 384] - dtype = "float32" - min_val = float("-1.0") - max_val = float("1.0") - mean = float("0.131787") - std = float("0.694717") - data = None diff --git a/paddle_samples/PaddleX/ch_RepSVTR_rec/subgraph_3/model.py b/paddle_samples/PaddleX/ch_RepSVTR_rec/subgraph_3/model.py deleted file mode 100644 index b08d01ec8..000000000 --- a/paddle_samples/PaddleX/ch_RepSVTR_rec/subgraph_3/model.py +++ /dev/null @@ -1,1089 +0,0 @@ -import paddle - - -class GraphModule(paddle.nn.Layer): - def __init__(self): - super().__init__() - - def forward( - self, - parameter_0, - parameter_1, - parameter_2, - parameter_3, - parameter_4, - parameter_5, - parameter_6, - parameter_7, - parameter_8, - parameter_9, - parameter_10, - parameter_11, - parameter_12, - parameter_13, - parameter_14, - parameter_15, - parameter_16, - parameter_17, - parameter_18, - parameter_19, - parameter_20, - parameter_21, - parameter_22, - parameter_23, - parameter_24, - parameter_25, - parameter_26, - parameter_27, - parameter_28, - parameter_29, - parameter_30, - parameter_31, - parameter_32, - parameter_33, - parameter_34, - parameter_35, - parameter_36, - parameter_37, - parameter_38, - parameter_39, - parameter_40, - parameter_41, - parameter_42, - data_0, - data_1, - data_2, - data_3, - ): - # pd_op.flatten: (4x384x40xf32) <- (4x384x1x40xf32) - flatten_0 = paddle._C_ops.flatten(data_0, 2, 3) - del data_0 - - # pd_op.transpose: (4x40x384xf32) <- (4x384x40xf32) - transpose_0 = paddle._C_ops.transpose(flatten_0, [0, 2, 1]) - del flatten_0 - - # pd_op.matmul: (4x40x384xf32) <- (4x40x384xf32, 384x384xf32) - matmul_1 = paddle._C_ops.matmul(transpose_0, parameter_42, False, False) - del parameter_42 - - # pd_op.full_int_array: (0xi64) <- () - full_int_array_0 = [] - - # pd_op.max: (xi64) <- (4xi64, 0xi64) - max_0 = paddle._C_ops.max(data_2, full_int_array_0, False) - del data_2, full_int_array_0 - - # pd_op.full: (1xf32) <- () - full_0 = paddle._C_ops.full( - [1], float("1"), paddle.float32, paddle.core.CPUPlace() - ) - - # pd_op.scale: (xi64) <- (xi64, 1xf32) - scale_0 = paddle._C_ops.scale(max_0, full_0, float("2"), True) - del full_0, max_0 - - # pd_op.full_int_array: (1xi64) <- () - full_int_array_1 = [0] - - # pd_op.assign: (1xi64) <- (1xi64) - assign_0 = full_int_array_1 - - # pd_op.assign: (1xi64) <- (1xi64) - assign_1 = full_int_array_1 - - # pd_op.assign: (1xi64) <- (1xi64) - assign_2 = full_int_array_1 - - # pd_op.assign: (1xi64) <- (1xi64) - assign_3 = full_int_array_1 - - # builtin.combine: ([xi64]) <- (xi64) - combine_0 = [scale_0] - del scale_0 - - # pd_op.stack: (1xi64) <- ([xi64]) - stack_0 = paddle._C_ops.stack(combine_0, 0) - del combine_0 - - # pd_op.slice: (4x-1xi64) <- (4x25xi64, 1xi64, 1xi64) - slice_0 = paddle._C_ops.slice(data_1, [1], full_int_array_1, stack_0, [-1], []) - del data_1, stack_0 - - # pd_op.full_int_array: (1xi64) <- () - full_int_array_2 = [-1] - - # pd_op.slice: (4x-1xi64) <- (4x-1xi64, 1xi64, 1xi64) - slice_1 = paddle._C_ops.slice( - slice_0, [1], full_int_array_1, full_int_array_2, [1], [] - ) - del full_int_array_2, slice_0 - - # pd_op.embedding: (4x-1x384xf32) <- (4x-1xi64, 6629x384xf32) - embedding_0 = paddle._C_ops.embedding(slice_1, parameter_41, 0, False) - del parameter_41 - - # pd_op.full: (1xf32) <- () - full_1 = paddle._C_ops.full( - [1], float("19.5959"), paddle.float32, paddle.core.CPUPlace() - ) - - # pd_op.scale: (4x-1x384xf32) <- (4x-1x384xf32, 1xf32) - scale_1 = paddle._C_ops.scale(embedding_0, full_1, float("0"), True) - del embedding_0 - - # pd_op.transpose: (-1x4x384xf32) <- (4x-1x384xf32) - transpose_1 = paddle._C_ops.transpose(scale_1, [1, 0, 2]) - del scale_1 - - # pd_op.shape64: (3xi64) <- (-1x4x384xf32) - shape64_0 = paddle._C_ops.shape64(transpose_1) - - # pd_op.full_int_array: (1xi64) <- () - full_int_array_3 = [1] - - # pd_op.assign: (1xi64) <- (1xi64) - assign_4 = full_int_array_3 - - # pd_op.assign: (1xi64) <- (1xi64) - assign_5 = full_int_array_3 - - # pd_op.assign: (1xi64) <- (1xi64) - assign_6 = full_int_array_3 - - # pd_op.assign: (1xi64) <- (1xi64) - assign_7 = full_int_array_3 - - # pd_op.assign: (1xi64) <- (1xi64) - assign_8 = full_int_array_3 - - # pd_op.assign: (1xi64) <- (1xi64) - assign_9 = full_int_array_3 - - # pd_op.assign: (1xi64) <- (1xi64) - assign_10 = full_int_array_3 - - # pd_op.assign: (1xi64) <- (1xi64) - assign_11 = full_int_array_3 - - # pd_op.slice: (xi64) <- (3xi64, 1xi64, 1xi64) - slice_2 = paddle._C_ops.slice( - shape64_0, [0], full_int_array_1, full_int_array_3, [1], [0] - ) - del shape64_0 - - # builtin.combine: ([xi64]) <- (xi64) - combine_1 = [slice_2] - del slice_2 - - # pd_op.stack: (1xi64) <- ([xi64]) - stack_1 = paddle._C_ops.stack(combine_1, 0) - del combine_1 - - # pd_op.slice: (-1x1x384xf32) <- (5000x1x384xf32, 1xi64, 1xi64) - slice_3 = paddle._C_ops.slice(data_3, [0], full_int_array_1, stack_1, [-1], []) - del data_3, stack_1 - - # pd_op.add: (-1x4x384xf32) <- (-1x4x384xf32, -1x1x384xf32) - add_0 = paddle._C_ops.add(transpose_1, slice_3) - - # pd_op.full: (1xf32) <- () - full_2 = paddle._C_ops.full( - [1], float("0.1"), paddle.float32, paddle.core.CPUPlace() - ) - - # pd_op.assign: (1xf32) <- (1xf32) - assign_12 = full_2 - - # pd_op.assign: (1xf32) <- (1xf32) - assign_13 = full_2 - - # pd_op.assign: (1xf32) <- (1xf32) - assign_14 = full_2 - - # pd_op.assign: (1xf32) <- (1xf32) - assign_15 = full_2 - - # pd_op.assign: (1xf32) <- (1xf32) - assign_16 = full_2 - - # pd_op.assign: (1xf32) <- (1xf32) - assign_17 = full_2 - - # pd_op.assign: (1xf32) <- (1xf32) - assign_18 = full_2 - - # pd_op.assign: (1xf32) <- (1xf32) - assign_19 = full_2 - - # pd_op.assign: (1xf32) <- (1xf32) - assign_20 = full_2 - - # pd_op.assign: (1xf32) <- (1xf32) - assign_21 = full_2 - - # pd_op.dropout: (-1x4x384xf32, -1x4x384xui8) <- (-1x4x384xf32, None, 1xf32) - dropout_0, dropout_1 = (lambda x, f: f(x))( - paddle._C_ops.dropout( - add_0, None, full_2, False, "upscale_in_train", 0, False - ), - lambda out: out if isinstance(out, (list, tuple)) else (out, None), - ) - del add_0 - - # pd_op.transpose: (4x-1x384xf32) <- (-1x4x384xf32) - transpose_2 = paddle._C_ops.transpose(dropout_0, [1, 0, 2]) - del dropout_0 - - # pd_op.shape64: (3xi64) <- (4x-1x384xf32) - shape64_1 = paddle._C_ops.shape64(transpose_2) - - # pd_op.full_int_array: (1xi64) <- () - full_int_array_4 = [2] - - # pd_op.assign: (1xi64) <- (1xi64) - assign_22 = full_int_array_4 - - # pd_op.assign: (1xi64) <- (1xi64) - assign_23 = full_int_array_4 - - # pd_op.assign: (1xi64) <- (1xi64) - assign_24 = full_int_array_4 - - # pd_op.assign: (1xi64) <- (1xi64) - assign_25 = full_int_array_4 - - # pd_op.assign: (1xi64) <- (1xi64) - assign_26 = full_int_array_4 - - # pd_op.assign: (1xi64) <- (1xi64) - assign_27 = full_int_array_4 - - # pd_op.slice: (xi64) <- (3xi64, 1xi64, 1xi64) - slice_4 = paddle._C_ops.slice( - shape64_1, [0], full_int_array_3, full_int_array_4, [1], [0] - ) - del shape64_1 - - # builtin.combine: ([xi64, xi64]) <- (xi64, xi64) - combine_2 = [slice_4, slice_4] - - # pd_op.stack: (2xi64) <- ([xi64, xi64]) - stack_2 = paddle._C_ops.stack(combine_2, 0) - del combine_2 - - # pd_op.full: (1xf32) <- () - full_3 = paddle._C_ops.full( - [1], float("0"), paddle.float32, paddle.core.CPUPlace() - ) - - # pd_op.full_with_tensor: (-1x-1xf32) <- (1xf32, 2xi64) - full_with_tensor_0 = paddle._C_ops.full_with_tensor( - full_3, stack_2, paddle.float32 - ) - del full_3, stack_2 - - # builtin.combine: ([xi64, xi64]) <- (xi64, xi64) - combine_3 = [slice_4, slice_4] - - # pd_op.stack: (2xi64) <- ([xi64, xi64]) - stack_3 = paddle._C_ops.stack(combine_3, 0) - del combine_3 - - # pd_op.full: (1xf32) <- () - full_4 = paddle._C_ops.full( - [1], float("-inf"), paddle.float32, paddle.core.CPUPlace() - ) - - # pd_op.full_with_tensor: (-1x-1xf32) <- (1xf32, 2xi64) - full_with_tensor_1 = paddle._C_ops.full_with_tensor( - full_4, stack_3, paddle.float32 - ) - del full_4, stack_3 - - # pd_op.triu: (-1x-1xf32) <- (-1x-1xf32) - triu_0 = paddle._C_ops.triu(full_with_tensor_1, 1) - del full_with_tensor_1 - - # pd_op.add: (-1x-1xf32) <- (-1x-1xf32, -1x-1xf32) - add_1 = paddle._C_ops.add(full_with_tensor_0, triu_0) - del full_with_tensor_0, triu_0 - - # pd_op.full_int_array: (2xi64) <- () - full_int_array_5 = [0, 1] - - # pd_op.unsqueeze: (1x1x-1x-1xf32) <- (-1x-1xf32, 2xi64) - unsqueeze_0 = paddle._C_ops.unsqueeze(add_1, full_int_array_5) - del add_1, full_int_array_5 - - # pd_op.matmul: (4x-1x1152xf32) <- (4x-1x384xf32, 384x1152xf32) - matmul_2 = paddle._C_ops.matmul(transpose_2, parameter_40, False, False) - del parameter_40 - - # pd_op.add: (4x-1x1152xf32) <- (4x-1x1152xf32, 1152xf32) - add_2 = paddle._C_ops.add(matmul_2, parameter_39) - del parameter_39 - - # pd_op.full: (xi64) <- () - full_5 = paddle._C_ops.full( - [], float("0"), paddle.int64, paddle.core.CPUPlace() - ) - - # pd_op.full: (xi64) <- () - full_6 = paddle._C_ops.full( - [], float("3"), paddle.int64, paddle.core.CPUPlace() - ) - - # pd_op.full: (xi64) <- () - full_7 = paddle._C_ops.full( - [], float("12"), paddle.int64, paddle.core.CPUPlace() - ) - - # pd_op.full: (xi64) <- () - full_8 = paddle._C_ops.full( - [], float("32"), paddle.int64, paddle.core.CPUPlace() - ) - - # builtin.combine: ([xi64, xi64, xi64, xi64, xi64]) <- (xi64, xi64, xi64, xi64, xi64) - combine_4 = [full_5, slice_4, full_6, full_7, full_8] - - # pd_op.stack: (5xi64) <- ([xi64, xi64, xi64, xi64, xi64]) - stack_4 = paddle._C_ops.stack(combine_4, 0) - del combine_4 - - # pd_op.reshape: (4x-1x3x12x32xf32) <- (4x-1x1152xf32, 5xi64) - reshape_0 = paddle._C_ops.reshape(add_2, stack_4) - del stack_4 - - # pd_op.transpose: (3x4x12x-1x32xf32) <- (4x-1x3x12x32xf32) - transpose_3 = paddle._C_ops.transpose(reshape_0, [2, 0, 3, 1, 4]) - del reshape_0 - - # pd_op.slice: (4x12x-1x32xf32) <- (3x4x12x-1x32xf32, 1xi64, 1xi64) - slice_5 = paddle._C_ops.slice( - transpose_3, [0], full_int_array_1, full_int_array_3, [1], [0] - ) - - # pd_op.slice: (4x12x-1x32xf32) <- (3x4x12x-1x32xf32, 1xi64, 1xi64) - slice_6 = paddle._C_ops.slice( - transpose_3, [0], full_int_array_3, full_int_array_4, [1], [0] - ) - - # pd_op.full_int_array: (1xi64) <- () - full_int_array_6 = [3] - - # pd_op.assign: (1xi64) <- (1xi64) - assign_28 = full_int_array_6 - - # pd_op.slice: (4x12x-1x32xf32) <- (3x4x12x-1x32xf32, 1xi64, 1xi64) - slice_7 = paddle._C_ops.slice( - transpose_3, [0], full_int_array_4, full_int_array_6, [1], [0] - ) - - # pd_op.transpose: (4x12x32x-1xf32) <- (4x12x-1x32xf32) - transpose_4 = paddle._C_ops.transpose(slice_6, [0, 1, 3, 2]) - del slice_6 - - # pd_op.matmul: (4x12x-1x-1xf32) <- (4x12x-1x32xf32, 4x12x32x-1xf32) - matmul_3 = paddle._C_ops.matmul(slice_5, transpose_4, False, False) - - # pd_op.full: (1xf32) <- () - full_9 = paddle._C_ops.full( - [1], float("0.176777"), paddle.float32, paddle.core.CPUPlace() - ) - - # pd_op.assign: (1xf32) <- (1xf32) - assign_29 = full_9 - - # pd_op.assign: (1xf32) <- (1xf32) - assign_30 = full_9 - - # pd_op.assign: (1xf32) <- (1xf32) - assign_31 = full_9 - - # pd_op.scale: (4x12x-1x-1xf32) <- (4x12x-1x-1xf32, 1xf32) - scale_2 = paddle._C_ops.scale(matmul_3, full_9, float("0"), True) - del matmul_3 - - # pd_op.add: (4x12x-1x-1xf32) <- (4x12x-1x-1xf32, 1x1x-1x-1xf32) - add_3 = paddle._C_ops.add(scale_2, unsqueeze_0) - - # pd_op.softmax: (4x12x-1x-1xf32) <- (4x12x-1x-1xf32) - softmax_0 = paddle._C_ops.softmax(add_3, -1) - del add_3 - - # pd_op.matmul: (4x12x-1x32xf32) <- (4x12x-1x-1xf32, 4x12x-1x32xf32) - matmul_4 = paddle._C_ops.matmul(softmax_0, slice_7, False, False) - - # pd_op.transpose: (4x-1x12x32xf32) <- (4x12x-1x32xf32) - transpose_5 = paddle._C_ops.transpose(matmul_4, [0, 2, 1, 3]) - del matmul_4 - - # pd_op.full: (xi64) <- () - full_10 = paddle._C_ops.full( - [], float("384"), paddle.int64, paddle.core.CPUPlace() - ) - - # builtin.combine: ([xi64, xi64, xi64]) <- (xi64, xi64, xi64) - combine_5 = [full_5, slice_4, full_10] - del slice_4 - - # pd_op.stack: (3xi64) <- ([xi64, xi64, xi64]) - stack_5 = paddle._C_ops.stack(combine_5, 0) - del combine_5 - - # pd_op.reshape: (4x-1x384xf32) <- (4x-1x12x32xf32, 3xi64) - reshape_1 = paddle._C_ops.reshape(transpose_5, stack_5) - del stack_5 - - # pd_op.matmul: (4x-1x384xf32) <- (4x-1x384xf32, 384x384xf32) - matmul_5 = paddle._C_ops.matmul(reshape_1, parameter_38, False, False) - del parameter_38 - - # pd_op.add: (4x-1x384xf32) <- (4x-1x384xf32, 384xf32) - add_4 = paddle._C_ops.add(matmul_5, parameter_37) - del parameter_37 - - # pd_op.dropout: (4x-1x384xf32, 4x-1x384xui8) <- (4x-1x384xf32, None, 1xf32) - dropout_2, dropout_3 = (lambda x, f: f(x))( - paddle._C_ops.dropout( - add_4, None, full_2, False, "upscale_in_train", 0, False - ), - lambda out: out if isinstance(out, (list, tuple)) else (out, None), - ) - del add_4 - - # pd_op.add: (4x-1x384xf32) <- (4x-1x384xf32, 4x-1x384xf32) - add_5 = paddle._C_ops.add(transpose_2, dropout_2) - - # pd_op.layer_norm: (4x-1x384xf32, 4x-1xf32, 4x-1xf32) <- (4x-1x384xf32, 384xf32, 384xf32) - layer_norm_0, layer_norm_1, layer_norm_2 = (lambda x, f: f(x))( - paddle._C_ops.layer_norm( - add_5, parameter_36, parameter_35, float("1e-05"), 2 - ), - lambda out: out if isinstance(out, (list, tuple)) else (out, None, None), - ) - del parameter_35, parameter_36 - - # pd_op.shape64: (3xi64) <- (4x-1x384xf32) - shape64_2 = paddle._C_ops.shape64(layer_norm_0) - - # pd_op.slice: (xi64) <- (3xi64, 1xi64, 1xi64) - slice_8 = paddle._C_ops.slice( - shape64_2, [0], full_int_array_3, full_int_array_4, [1], [0] - ) - del shape64_2 - - # pd_op.matmul: (4x-1x384xf32) <- (4x-1x384xf32, 384x384xf32) - matmul_6 = paddle._C_ops.matmul(layer_norm_0, parameter_34, False, False) - del parameter_34 - - # pd_op.add: (4x-1x384xf32) <- (4x-1x384xf32, 384xf32) - add_6 = paddle._C_ops.add(matmul_6, parameter_33) - del parameter_33 - - # builtin.combine: ([xi64, xi64, xi64, xi64]) <- (xi64, xi64, xi64, xi64) - combine_6 = [full_5, slice_8, full_7, full_8] - - # pd_op.stack: (4xi64) <- ([xi64, xi64, xi64, xi64]) - stack_6 = paddle._C_ops.stack(combine_6, 0) - del combine_6 - - # pd_op.reshape: (4x-1x12x32xf32) <- (4x-1x384xf32, 4xi64) - reshape_2 = paddle._C_ops.reshape(add_6, stack_6) - del stack_6 - - # pd_op.transpose: (4x12x-1x32xf32) <- (4x-1x12x32xf32) - transpose_6 = paddle._C_ops.transpose(reshape_2, [0, 2, 1, 3]) - del reshape_2 - - # pd_op.matmul: (4x40x768xf32) <- (4x40x384xf32, 384x768xf32) - matmul_7 = paddle._C_ops.matmul(matmul_1, parameter_32, False, False) - del parameter_32 - - # pd_op.add: (4x40x768xf32) <- (4x40x768xf32, 768xf32) - add_7 = paddle._C_ops.add(matmul_7, parameter_31) - del parameter_31 - - # pd_op.full_int_array: (5xi64) <- () - full_int_array_7 = [0, 40, 2, 12, 32] - - # pd_op.reshape: (4x40x2x12x32xf32) <- (4x40x768xf32, 5xi64) - reshape_3 = paddle._C_ops.reshape(add_7, full_int_array_7) - - # pd_op.transpose: (2x4x12x40x32xf32) <- (4x40x2x12x32xf32) - transpose_7 = paddle._C_ops.transpose(reshape_3, [2, 0, 3, 1, 4]) - del reshape_3 - - # pd_op.slice: (4x12x40x32xf32) <- (2x4x12x40x32xf32, 1xi64, 1xi64) - slice_9 = paddle._C_ops.slice( - transpose_7, [0], full_int_array_1, full_int_array_3, [1], [0] - ) - - # pd_op.slice: (4x12x40x32xf32) <- (2x4x12x40x32xf32, 1xi64, 1xi64) - slice_10 = paddle._C_ops.slice( - transpose_7, [0], full_int_array_3, full_int_array_4, [1], [0] - ) - - # pd_op.transpose: (4x12x32x40xf32) <- (4x12x40x32xf32) - transpose_8 = paddle._C_ops.transpose(slice_9, [0, 1, 3, 2]) - del slice_9 - - # pd_op.matmul: (4x12x-1x40xf32) <- (4x12x-1x32xf32, 4x12x32x40xf32) - matmul_8 = paddle._C_ops.matmul(transpose_6, transpose_8, False, False) - - # pd_op.scale: (4x12x-1x40xf32) <- (4x12x-1x40xf32, 1xf32) - scale_3 = paddle._C_ops.scale(matmul_8, full_9, float("0"), True) - del matmul_8 - - # pd_op.softmax: (4x12x-1x40xf32) <- (4x12x-1x40xf32) - softmax_1 = paddle._C_ops.softmax(scale_3, -1) - del scale_3 - - # pd_op.matmul: (4x12x-1x32xf32) <- (4x12x-1x40xf32, 4x12x40x32xf32) - matmul_9 = paddle._C_ops.matmul(softmax_1, slice_10, False, False) - - # pd_op.transpose: (4x-1x12x32xf32) <- (4x12x-1x32xf32) - transpose_9 = paddle._C_ops.transpose(matmul_9, [0, 2, 1, 3]) - del matmul_9 - - # builtin.combine: ([xi64, xi64, xi64]) <- (xi64, xi64, xi64) - combine_7 = [full_5, slice_8, full_10] - del slice_8 - - # pd_op.stack: (3xi64) <- ([xi64, xi64, xi64]) - stack_7 = paddle._C_ops.stack(combine_7, 0) - del combine_7 - - # pd_op.reshape: (4x-1x384xf32) <- (4x-1x12x32xf32, 3xi64) - reshape_4 = paddle._C_ops.reshape(transpose_9, stack_7) - del stack_7 - - # pd_op.matmul: (4x-1x384xf32) <- (4x-1x384xf32, 384x384xf32) - matmul_10 = paddle._C_ops.matmul(reshape_4, parameter_30, False, False) - del parameter_30 - - # pd_op.add: (4x-1x384xf32) <- (4x-1x384xf32, 384xf32) - add_8 = paddle._C_ops.add(matmul_10, parameter_29) - del parameter_29 - - # pd_op.dropout: (4x-1x384xf32, 4x-1x384xui8) <- (4x-1x384xf32, None, 1xf32) - dropout_4, dropout_5 = (lambda x, f: f(x))( - paddle._C_ops.dropout( - add_8, None, full_2, False, "upscale_in_train", 0, False - ), - lambda out: out if isinstance(out, (list, tuple)) else (out, None), - ) - del add_8 - - # pd_op.add: (4x-1x384xf32) <- (4x-1x384xf32, 4x-1x384xf32) - add_9 = paddle._C_ops.add(layer_norm_0, dropout_4) - - # pd_op.layer_norm: (4x-1x384xf32, 4x-1xf32, 4x-1xf32) <- (4x-1x384xf32, 384xf32, 384xf32) - layer_norm_3, layer_norm_4, layer_norm_5 = (lambda x, f: f(x))( - paddle._C_ops.layer_norm( - add_9, parameter_28, parameter_27, float("1e-05"), 2 - ), - lambda out: out if isinstance(out, (list, tuple)) else (out, None, None), - ) - del parameter_27, parameter_28 - - # pd_op.matmul: (4x-1x1536xf32) <- (4x-1x384xf32, 384x1536xf32) - matmul_11 = paddle._C_ops.matmul(layer_norm_3, parameter_26, False, False) - del parameter_26 - - # pd_op.add: (4x-1x1536xf32) <- (4x-1x1536xf32, 1536xf32) - add_10 = paddle._C_ops.add(matmul_11, parameter_25) - del parameter_25 - - # pd_op.relu: (4x-1x1536xf32) <- (4x-1x1536xf32) - relu_0 = paddle._C_ops.relu(add_10) - del add_10 - - # pd_op.dropout: (4x-1x1536xf32, 4x-1x1536xui8) <- (4x-1x1536xf32, None, 1xf32) - dropout_6, dropout_7 = (lambda x, f: f(x))( - paddle._C_ops.dropout( - relu_0, None, full_2, False, "upscale_in_train", 0, False - ), - lambda out: out if isinstance(out, (list, tuple)) else (out, None), - ) - - # pd_op.matmul: (4x-1x384xf32) <- (4x-1x1536xf32, 1536x384xf32) - matmul_12 = paddle._C_ops.matmul(dropout_6, parameter_24, False, False) - del parameter_24 - - # pd_op.add: (4x-1x384xf32) <- (4x-1x384xf32, 384xf32) - add_11 = paddle._C_ops.add(matmul_12, parameter_23) - del parameter_23 - - # pd_op.dropout: (4x-1x384xf32, 4x-1x384xui8) <- (4x-1x384xf32, None, 1xf32) - dropout_8, dropout_9 = (lambda x, f: f(x))( - paddle._C_ops.dropout( - add_11, None, full_2, False, "upscale_in_train", 0, False - ), - lambda out: out if isinstance(out, (list, tuple)) else (out, None), - ) - del add_11 - - # pd_op.dropout: (4x-1x384xf32, 4x-1x384xui8) <- (4x-1x384xf32, None, 1xf32) - dropout_10, dropout_11 = (lambda x, f: f(x))( - paddle._C_ops.dropout( - dropout_8, None, full_2, False, "upscale_in_train", 0, False - ), - lambda out: out if isinstance(out, (list, tuple)) else (out, None), - ) - del dropout_8 - - # pd_op.add: (4x-1x384xf32) <- (4x-1x384xf32, 4x-1x384xf32) - add_12 = paddle._C_ops.add(layer_norm_3, dropout_10) - - # pd_op.layer_norm: (4x-1x384xf32, 4x-1xf32, 4x-1xf32) <- (4x-1x384xf32, 384xf32, 384xf32) - layer_norm_6, layer_norm_7, layer_norm_8 = (lambda x, f: f(x))( - paddle._C_ops.layer_norm( - add_12, parameter_22, parameter_21, float("1e-05"), 2 - ), - lambda out: out if isinstance(out, (list, tuple)) else (out, None, None), - ) - del parameter_21, parameter_22 - - # pd_op.shape64: (3xi64) <- (4x-1x384xf32) - shape64_3 = paddle._C_ops.shape64(layer_norm_6) - - # pd_op.slice: (xi64) <- (3xi64, 1xi64, 1xi64) - slice_11 = paddle._C_ops.slice( - shape64_3, [0], full_int_array_3, full_int_array_4, [1], [0] - ) - del shape64_3 - - # pd_op.matmul: (4x-1x1152xf32) <- (4x-1x384xf32, 384x1152xf32) - matmul_13 = paddle._C_ops.matmul(layer_norm_6, parameter_20, False, False) - del parameter_20 - - # pd_op.add: (4x-1x1152xf32) <- (4x-1x1152xf32, 1152xf32) - add_13 = paddle._C_ops.add(matmul_13, parameter_19) - del parameter_19 - - # builtin.combine: ([xi64, xi64, xi64, xi64, xi64]) <- (xi64, xi64, xi64, xi64, xi64) - combine_8 = [full_5, slice_11, full_6, full_7, full_8] - del full_6 - - # pd_op.stack: (5xi64) <- ([xi64, xi64, xi64, xi64, xi64]) - stack_8 = paddle._C_ops.stack(combine_8, 0) - del combine_8 - - # pd_op.reshape: (4x-1x3x12x32xf32) <- (4x-1x1152xf32, 5xi64) - reshape_5 = paddle._C_ops.reshape(add_13, stack_8) - del stack_8 - - # pd_op.transpose: (3x4x12x-1x32xf32) <- (4x-1x3x12x32xf32) - transpose_10 = paddle._C_ops.transpose(reshape_5, [2, 0, 3, 1, 4]) - del reshape_5 - - # pd_op.slice: (4x12x-1x32xf32) <- (3x4x12x-1x32xf32, 1xi64, 1xi64) - slice_12 = paddle._C_ops.slice( - transpose_10, [0], full_int_array_1, full_int_array_3, [1], [0] - ) - - # pd_op.slice: (4x12x-1x32xf32) <- (3x4x12x-1x32xf32, 1xi64, 1xi64) - slice_13 = paddle._C_ops.slice( - transpose_10, [0], full_int_array_3, full_int_array_4, [1], [0] - ) - - # pd_op.slice: (4x12x-1x32xf32) <- (3x4x12x-1x32xf32, 1xi64, 1xi64) - slice_14 = paddle._C_ops.slice( - transpose_10, [0], full_int_array_4, full_int_array_6, [1], [0] - ) - - # pd_op.transpose: (4x12x32x-1xf32) <- (4x12x-1x32xf32) - transpose_11 = paddle._C_ops.transpose(slice_13, [0, 1, 3, 2]) - del slice_13 - - # pd_op.matmul: (4x12x-1x-1xf32) <- (4x12x-1x32xf32, 4x12x32x-1xf32) - matmul_14 = paddle._C_ops.matmul(slice_12, transpose_11, False, False) - - # pd_op.scale: (4x12x-1x-1xf32) <- (4x12x-1x-1xf32, 1xf32) - scale_4 = paddle._C_ops.scale(matmul_14, full_9, float("0"), True) - del matmul_14 - - # pd_op.add: (4x12x-1x-1xf32) <- (4x12x-1x-1xf32, 1x1x-1x-1xf32) - add_14 = paddle._C_ops.add(scale_4, unsqueeze_0) - - # pd_op.softmax: (4x12x-1x-1xf32) <- (4x12x-1x-1xf32) - softmax_2 = paddle._C_ops.softmax(add_14, -1) - del add_14 - - # pd_op.matmul: (4x12x-1x32xf32) <- (4x12x-1x-1xf32, 4x12x-1x32xf32) - matmul_15 = paddle._C_ops.matmul(softmax_2, slice_14, False, False) - - # pd_op.transpose: (4x-1x12x32xf32) <- (4x12x-1x32xf32) - transpose_12 = paddle._C_ops.transpose(matmul_15, [0, 2, 1, 3]) - del matmul_15 - - # builtin.combine: ([xi64, xi64, xi64]) <- (xi64, xi64, xi64) - combine_9 = [full_5, slice_11, full_10] - del slice_11 - - # pd_op.stack: (3xi64) <- ([xi64, xi64, xi64]) - stack_9 = paddle._C_ops.stack(combine_9, 0) - del combine_9 - - # pd_op.reshape: (4x-1x384xf32) <- (4x-1x12x32xf32, 3xi64) - reshape_6 = paddle._C_ops.reshape(transpose_12, stack_9) - del stack_9 - - # pd_op.matmul: (4x-1x384xf32) <- (4x-1x384xf32, 384x384xf32) - matmul_16 = paddle._C_ops.matmul(reshape_6, parameter_18, False, False) - del parameter_18 - - # pd_op.add: (4x-1x384xf32) <- (4x-1x384xf32, 384xf32) - add_15 = paddle._C_ops.add(matmul_16, parameter_17) - del parameter_17 - - # pd_op.dropout: (4x-1x384xf32, 4x-1x384xui8) <- (4x-1x384xf32, None, 1xf32) - dropout_12, dropout_13 = (lambda x, f: f(x))( - paddle._C_ops.dropout( - add_15, None, full_2, False, "upscale_in_train", 0, False - ), - lambda out: out if isinstance(out, (list, tuple)) else (out, None), - ) - del add_15 - - # pd_op.add: (4x-1x384xf32) <- (4x-1x384xf32, 4x-1x384xf32) - add_16 = paddle._C_ops.add(layer_norm_6, dropout_12) - - # pd_op.layer_norm: (4x-1x384xf32, 4x-1xf32, 4x-1xf32) <- (4x-1x384xf32, 384xf32, 384xf32) - layer_norm_9, layer_norm_10, layer_norm_11 = (lambda x, f: f(x))( - paddle._C_ops.layer_norm( - add_16, parameter_16, parameter_15, float("1e-05"), 2 - ), - lambda out: out if isinstance(out, (list, tuple)) else (out, None, None), - ) - del parameter_15, parameter_16 - - # pd_op.shape64: (3xi64) <- (4x-1x384xf32) - shape64_4 = paddle._C_ops.shape64(layer_norm_9) - - # pd_op.slice: (xi64) <- (3xi64, 1xi64, 1xi64) - slice_15 = paddle._C_ops.slice( - shape64_4, [0], full_int_array_3, full_int_array_4, [1], [0] - ) - del shape64_4 - - # pd_op.matmul: (4x-1x384xf32) <- (4x-1x384xf32, 384x384xf32) - matmul_17 = paddle._C_ops.matmul(layer_norm_9, parameter_14, False, False) - del parameter_14 - - # pd_op.add: (4x-1x384xf32) <- (4x-1x384xf32, 384xf32) - add_17 = paddle._C_ops.add(matmul_17, parameter_13) - del parameter_13 - - # builtin.combine: ([xi64, xi64, xi64, xi64]) <- (xi64, xi64, xi64, xi64) - combine_10 = [full_5, slice_15, full_7, full_8] - del full_7, full_8 - - # pd_op.stack: (4xi64) <- ([xi64, xi64, xi64, xi64]) - stack_10 = paddle._C_ops.stack(combine_10, 0) - del combine_10 - - # pd_op.reshape: (4x-1x12x32xf32) <- (4x-1x384xf32, 4xi64) - reshape_7 = paddle._C_ops.reshape(add_17, stack_10) - del stack_10 - - # pd_op.transpose: (4x12x-1x32xf32) <- (4x-1x12x32xf32) - transpose_13 = paddle._C_ops.transpose(reshape_7, [0, 2, 1, 3]) - del reshape_7 - - # pd_op.matmul: (4x40x768xf32) <- (4x40x384xf32, 384x768xf32) - matmul_18 = paddle._C_ops.matmul(matmul_1, parameter_12, False, False) - del parameter_12 - - # pd_op.add: (4x40x768xf32) <- (4x40x768xf32, 768xf32) - add_18 = paddle._C_ops.add(matmul_18, parameter_11) - del parameter_11 - - # pd_op.reshape: (4x40x2x12x32xf32) <- (4x40x768xf32, 5xi64) - reshape_8 = paddle._C_ops.reshape(add_18, full_int_array_7) - del full_int_array_7 - - # pd_op.transpose: (2x4x12x40x32xf32) <- (4x40x2x12x32xf32) - transpose_14 = paddle._C_ops.transpose(reshape_8, [2, 0, 3, 1, 4]) - del reshape_8 - - # pd_op.slice: (4x12x40x32xf32) <- (2x4x12x40x32xf32, 1xi64, 1xi64) - slice_16 = paddle._C_ops.slice( - transpose_14, [0], full_int_array_1, full_int_array_3, [1], [0] - ) - del full_int_array_1 - - # pd_op.slice: (4x12x40x32xf32) <- (2x4x12x40x32xf32, 1xi64, 1xi64) - slice_17 = paddle._C_ops.slice( - transpose_14, [0], full_int_array_3, full_int_array_4, [1], [0] - ) - del full_int_array_3, full_int_array_4 - - # pd_op.transpose: (4x12x32x40xf32) <- (4x12x40x32xf32) - transpose_15 = paddle._C_ops.transpose(slice_16, [0, 1, 3, 2]) - del slice_16 - - # pd_op.matmul: (4x12x-1x40xf32) <- (4x12x-1x32xf32, 4x12x32x40xf32) - matmul_19 = paddle._C_ops.matmul(transpose_13, transpose_15, False, False) - - # pd_op.scale: (4x12x-1x40xf32) <- (4x12x-1x40xf32, 1xf32) - scale_5 = paddle._C_ops.scale(matmul_19, full_9, float("0"), True) - del matmul_19 - - # pd_op.softmax: (4x12x-1x40xf32) <- (4x12x-1x40xf32) - softmax_3 = paddle._C_ops.softmax(scale_5, -1) - del scale_5 - - # pd_op.matmul: (4x12x-1x32xf32) <- (4x12x-1x40xf32, 4x12x40x32xf32) - matmul_20 = paddle._C_ops.matmul(softmax_3, slice_17, False, False) - - # pd_op.transpose: (4x-1x12x32xf32) <- (4x12x-1x32xf32) - transpose_16 = paddle._C_ops.transpose(matmul_20, [0, 2, 1, 3]) - del matmul_20 - - # builtin.combine: ([xi64, xi64, xi64]) <- (xi64, xi64, xi64) - combine_11 = [full_5, slice_15, full_10] - del full_10, full_5, slice_15 - - # pd_op.stack: (3xi64) <- ([xi64, xi64, xi64]) - stack_11 = paddle._C_ops.stack(combine_11, 0) - del combine_11 - - # pd_op.reshape: (4x-1x384xf32) <- (4x-1x12x32xf32, 3xi64) - reshape_9 = paddle._C_ops.reshape(transpose_16, stack_11) - del stack_11 - - # pd_op.matmul: (4x-1x384xf32) <- (4x-1x384xf32, 384x384xf32) - matmul_21 = paddle._C_ops.matmul(reshape_9, parameter_10, False, False) - del parameter_10 - - # pd_op.add: (4x-1x384xf32) <- (4x-1x384xf32, 384xf32) - add_19 = paddle._C_ops.add(matmul_21, parameter_9) - del parameter_9 - - # pd_op.dropout: (4x-1x384xf32, 4x-1x384xui8) <- (4x-1x384xf32, None, 1xf32) - dropout_14, dropout_15 = (lambda x, f: f(x))( - paddle._C_ops.dropout( - add_19, None, full_2, False, "upscale_in_train", 0, False - ), - lambda out: out if isinstance(out, (list, tuple)) else (out, None), - ) - del add_19 - - # pd_op.add: (4x-1x384xf32) <- (4x-1x384xf32, 4x-1x384xf32) - add_20 = paddle._C_ops.add(layer_norm_9, dropout_14) - - # pd_op.layer_norm: (4x-1x384xf32, 4x-1xf32, 4x-1xf32) <- (4x-1x384xf32, 384xf32, 384xf32) - layer_norm_12, layer_norm_13, layer_norm_14 = (lambda x, f: f(x))( - paddle._C_ops.layer_norm( - add_20, parameter_8, parameter_7, float("1e-05"), 2 - ), - lambda out: out if isinstance(out, (list, tuple)) else (out, None, None), - ) - del parameter_7, parameter_8 - - # pd_op.matmul: (4x-1x1536xf32) <- (4x-1x384xf32, 384x1536xf32) - matmul_22 = paddle._C_ops.matmul(layer_norm_12, parameter_6, False, False) - del parameter_6 - - # pd_op.add: (4x-1x1536xf32) <- (4x-1x1536xf32, 1536xf32) - add_21 = paddle._C_ops.add(matmul_22, parameter_5) - del parameter_5 - - # pd_op.relu: (4x-1x1536xf32) <- (4x-1x1536xf32) - relu_1 = paddle._C_ops.relu(add_21) - del add_21 - - # pd_op.dropout: (4x-1x1536xf32, 4x-1x1536xui8) <- (4x-1x1536xf32, None, 1xf32) - dropout_16, dropout_17 = (lambda x, f: f(x))( - paddle._C_ops.dropout( - relu_1, None, full_2, False, "upscale_in_train", 0, False - ), - lambda out: out if isinstance(out, (list, tuple)) else (out, None), - ) - - # pd_op.matmul: (4x-1x384xf32) <- (4x-1x1536xf32, 1536x384xf32) - matmul_23 = paddle._C_ops.matmul(dropout_16, parameter_4, False, False) - del parameter_4 - - # pd_op.add: (4x-1x384xf32) <- (4x-1x384xf32, 384xf32) - add_22 = paddle._C_ops.add(matmul_23, parameter_3) - del parameter_3 - - # pd_op.dropout: (4x-1x384xf32, 4x-1x384xui8) <- (4x-1x384xf32, None, 1xf32) - dropout_18, dropout_19 = (lambda x, f: f(x))( - paddle._C_ops.dropout( - add_22, None, full_2, False, "upscale_in_train", 0, False - ), - lambda out: out if isinstance(out, (list, tuple)) else (out, None), - ) - del add_22 - - # pd_op.dropout: (4x-1x384xf32, 4x-1x384xui8) <- (4x-1x384xf32, None, 1xf32) - dropout_20, dropout_21 = (lambda x, f: f(x))( - paddle._C_ops.dropout( - dropout_18, None, full_2, False, "upscale_in_train", 0, False - ), - lambda out: out if isinstance(out, (list, tuple)) else (out, None), - ) - del dropout_18 - - # pd_op.add: (4x-1x384xf32) <- (4x-1x384xf32, 4x-1x384xf32) - add_23 = paddle._C_ops.add(layer_norm_12, dropout_20) - - # pd_op.layer_norm: (4x-1x384xf32, 4x-1xf32, 4x-1xf32) <- (4x-1x384xf32, 384xf32, 384xf32) - layer_norm_15, layer_norm_16, layer_norm_17 = (lambda x, f: f(x))( - paddle._C_ops.layer_norm( - add_23, parameter_2, parameter_1, float("1e-05"), 2 - ), - lambda out: out if isinstance(out, (list, tuple)) else (out, None, None), - ) - del parameter_1, parameter_2 - - # pd_op.matmul: (4x-1x6629xf32) <- (4x-1x384xf32, 384x6629xf32) - matmul_0 = paddle._C_ops.matmul(layer_norm_15, parameter_0, False, False) - del ( - add_12, - add_13, - add_16, - add_17, - add_18, - add_2, - add_20, - add_23, - add_5, - add_6, - add_7, - add_9, - assign_0, - assign_1, - assign_10, - assign_11, - assign_12, - assign_13, - assign_14, - assign_15, - assign_16, - assign_17, - assign_18, - assign_19, - assign_2, - assign_20, - assign_21, - assign_22, - assign_23, - assign_24, - assign_25, - assign_26, - assign_27, - assign_28, - assign_29, - assign_3, - assign_30, - assign_31, - assign_4, - assign_5, - assign_6, - assign_7, - assign_8, - assign_9, - dropout_1, - dropout_10, - dropout_11, - dropout_12, - dropout_13, - dropout_14, - dropout_15, - dropout_16, - dropout_17, - dropout_19, - dropout_2, - dropout_20, - dropout_21, - dropout_3, - dropout_4, - dropout_5, - dropout_6, - dropout_7, - dropout_9, - full_1, - full_2, - full_9, - full_int_array_6, - layer_norm_0, - layer_norm_1, - layer_norm_10, - layer_norm_11, - layer_norm_12, - layer_norm_13, - layer_norm_14, - layer_norm_15, - layer_norm_16, - layer_norm_17, - layer_norm_2, - layer_norm_3, - layer_norm_4, - layer_norm_5, - layer_norm_6, - layer_norm_7, - layer_norm_8, - layer_norm_9, - matmul_1, - matmul_10, - matmul_11, - matmul_12, - matmul_13, - matmul_16, - matmul_17, - matmul_18, - matmul_2, - matmul_21, - matmul_22, - matmul_23, - matmul_5, - matmul_6, - matmul_7, - parameter_0, - relu_0, - relu_1, - reshape_1, - reshape_4, - reshape_6, - reshape_9, - scale_2, - scale_4, - slice_1, - slice_10, - slice_12, - slice_14, - slice_17, - slice_3, - slice_5, - slice_7, - softmax_0, - softmax_1, - softmax_2, - softmax_3, - transpose_0, - transpose_1, - transpose_10, - transpose_11, - transpose_12, - transpose_13, - transpose_14, - transpose_15, - transpose_16, - transpose_2, - transpose_3, - transpose_4, - transpose_5, - transpose_6, - transpose_7, - transpose_8, - transpose_9, - unsqueeze_0, - ) - - return matmul_0 diff --git a/paddle_samples/PaddleX/ch_RepSVTR_rec/subgraph_3/weight_meta.py b/paddle_samples/PaddleX/ch_RepSVTR_rec/subgraph_3/weight_meta.py deleted file mode 100644 index e389b1a8e..000000000 --- a/paddle_samples/PaddleX/ch_RepSVTR_rec/subgraph_3/weight_meta.py +++ /dev/null @@ -1,471 +0,0 @@ -class Program_weight_tensor_parameter_0: - name = "parameter_0" - shape = [384, 6629] - dtype = "float32" - min_val = float("-0.351484") - max_val = float("0.702688") - mean = float("0.0277474") - std = float("0.043599") - data = None - - -class Program_weight_tensor_parameter_1: - name = "parameter_1" - shape = [384] - dtype = "float32" - min_val = float("-0.969039") - max_val = float("1.92469") - mean = float("-0.134236") - std = float("0.298058") - data = None - - -class Program_weight_tensor_parameter_2: - name = "parameter_2" - shape = [384] - dtype = "float32" - min_val = float("0.234385") - max_val = float("4.4674") - mean = float("3.29625") - std = float("0.887819") - data = None - - -class Program_weight_tensor_parameter_3: - name = "parameter_3" - shape = [384] - dtype = "float32" - min_val = float("-0.0639392") - max_val = float("0.106081") - mean = float("-4.18515e-05") - std = float("0.0118406") - data = None - - -class Program_weight_tensor_parameter_4: - name = "parameter_4" - shape = [1536, 384] - dtype = "float32" - min_val = float("-3.43228") - max_val = float("3.15796") - mean = float("-0.00241164") - std = float("0.157565") - data = None - - -class Program_weight_tensor_parameter_5: - name = "parameter_5" - shape = [1536] - dtype = "float32" - min_val = float("-3.31375") - max_val = float("0.926513") - mean = float("-1.129") - std = float("0.302978") - data = None - - -class Program_weight_tensor_parameter_6: - name = "parameter_6" - shape = [384, 1536] - dtype = "float32" - min_val = float("-0.312126") - max_val = float("0.505538") - mean = float("0.012929") - std = float("0.0764173") - data = None - - -class Program_weight_tensor_parameter_7: - name = "parameter_7" - shape = [384] - dtype = "float32" - min_val = float("-13.756") - max_val = float("0.36275") - mean = float("-0.132106") - std = float("0.724668") - data = None - - -class Program_weight_tensor_parameter_8: - name = "parameter_8" - shape = [384] - dtype = "float32" - min_val = float("0.101265") - max_val = float("2.58581") - mean = float("0.378491") - std = float("0.131247") - data = None - - -class Program_weight_tensor_parameter_9: - name = "parameter_9" - shape = [384] - dtype = "float32" - min_val = float("-0.934299") - max_val = float("0.48667") - mean = float("-0.0142857") - std = float("0.129296") - data = None - - -class Program_weight_tensor_parameter_10: - name = "parameter_10" - shape = [384, 384] - dtype = "float32" - min_val = float("-0.555835") - max_val = float("0.516169") - mean = float("-9.3906e-05") - std = float("0.0657883") - data = None - - -class Program_weight_tensor_parameter_11: - name = "parameter_11" - shape = [768] - dtype = "float32" - min_val = float("-1.217") - max_val = float("1.79317") - mean = float("0.0100228") - std = float("0.329571") - data = None - - -class Program_weight_tensor_parameter_12: - name = "parameter_12" - shape = [384, 768] - dtype = "float32" - min_val = float("-0.665119") - max_val = float("0.621938") - mean = float("-0.000122967") - std = float("0.0636656") - data = None - - -class Program_weight_tensor_parameter_13: - name = "parameter_13" - shape = [384] - dtype = "float32" - min_val = float("-3.25041") - max_val = float("3.48689") - mean = float("0.000539355") - std = float("0.796801") - data = None - - -class Program_weight_tensor_parameter_14: - name = "parameter_14" - shape = [384, 384] - dtype = "float32" - min_val = float("-0.498011") - max_val = float("0.484597") - mean = float("-3.2408e-05") - std = float("0.0680232") - data = None - - -class Program_weight_tensor_parameter_15: - name = "parameter_15" - shape = [384] - dtype = "float32" - min_val = float("-0.700518") - max_val = float("2.86425") - mean = float("0.0299052") - std = float("0.207136") - data = None - - -class Program_weight_tensor_parameter_16: - name = "parameter_16" - shape = [384] - dtype = "float32" - min_val = float("-0.0969785") - max_val = float("2.76296") - mean = float("1.19528") - std = float("0.383211") - data = None - - -class Program_weight_tensor_parameter_17: - name = "parameter_17" - shape = [384] - dtype = "float32" - min_val = float("-6.4183") - max_val = float("6.62679") - mean = float("0.0147666") - std = float("0.706506") - data = None - - -class Program_weight_tensor_parameter_18: - name = "parameter_18" - shape = [384, 384] - dtype = "float32" - min_val = float("-1.3393") - max_val = float("1.26757") - mean = float("-0.000168767") - std = float("0.0735479") - data = None - - -class Program_weight_tensor_parameter_19: - name = "parameter_19" - shape = [1152] - dtype = "float32" - min_val = float("-3.65649") - max_val = float("3.95875") - mean = float("0.019323") - std = float("0.474384") - data = None - - -class Program_weight_tensor_parameter_20: - name = "parameter_20" - shape = [384, 1152] - dtype = "float32" - min_val = float("-0.509574") - max_val = float("0.457092") - mean = float("-4.52726e-05") - std = float("0.0650373") - data = None - - -class Program_weight_tensor_parameter_21: - name = "parameter_21" - shape = [384] - dtype = "float32" - min_val = float("-1.12659") - max_val = float("0.292934") - mean = float("0.00757793") - std = float("0.0927688") - data = None - - -class Program_weight_tensor_parameter_22: - name = "parameter_22" - shape = [384] - dtype = "float32" - min_val = float("-0.00271219") - max_val = float("1.01901") - mean = float("0.84283") - std = float("0.130932") - data = None - - -class Program_weight_tensor_parameter_23: - name = "parameter_23" - shape = [384] - dtype = "float32" - min_val = float("-0.197205") - max_val = float("0.457012") - mean = float("0.000321222") - std = float("0.04599") - data = None - - -class Program_weight_tensor_parameter_24: - name = "parameter_24" - shape = [1536, 384] - dtype = "float32" - min_val = float("-3.17563") - max_val = float("1.87986") - mean = float("-9.41858e-05") - std = float("0.0390491") - data = None - - -class Program_weight_tensor_parameter_25: - name = "parameter_25" - shape = [1536] - dtype = "float32" - min_val = float("-2.61165") - max_val = float("-0.080146") - mean = float("-0.640018") - std = float("0.580882") - data = None - - -class Program_weight_tensor_parameter_26: - name = "parameter_26" - shape = [384, 1536] - dtype = "float32" - min_val = float("-0.347415") - max_val = float("0.401366") - mean = float("0.00329947") - std = float("0.0384391") - data = None - - -class Program_weight_tensor_parameter_27: - name = "parameter_27" - shape = [384] - dtype = "float32" - min_val = float("-5.19483") - max_val = float("4.39995") - mean = float("-0.162294") - std = float("0.730859") - data = None - - -class Program_weight_tensor_parameter_28: - name = "parameter_28" - shape = [384] - dtype = "float32" - min_val = float("0.508543") - max_val = float("5.63012") - mean = float("1.03757") - std = float("0.382432") - data = None - - -class Program_weight_tensor_parameter_29: - name = "parameter_29" - shape = [384] - dtype = "float32" - min_val = float("-0.972005") - max_val = float("0.423718") - mean = float("-0.00471045") - std = float("0.0801598") - data = None - - -class Program_weight_tensor_parameter_30: - name = "parameter_30" - shape = [384, 384] - dtype = "float32" - min_val = float("-0.750938") - max_val = float("1.42995") - mean = float("0.000105277") - std = float("0.0575386") - data = None - - -class Program_weight_tensor_parameter_31: - name = "parameter_31" - shape = [768] - dtype = "float32" - min_val = float("-0.986446") - max_val = float("1.72039") - mean = float("0.0137758") - std = float("0.267347") - data = None - - -class Program_weight_tensor_parameter_32: - name = "parameter_32" - shape = [384, 768] - dtype = "float32" - min_val = float("-0.441004") - max_val = float("0.36639") - mean = float("5.4273e-05") - std = float("0.0550247") - data = None - - -class Program_weight_tensor_parameter_33: - name = "parameter_33" - shape = [384] - dtype = "float32" - min_val = float("-5.40809") - max_val = float("3.65363") - mean = float("-0.0052135") - std = float("0.758904") - data = None - - -class Program_weight_tensor_parameter_34: - name = "parameter_34" - shape = [384, 384] - dtype = "float32" - min_val = float("-0.428969") - max_val = float("0.390564") - mean = float("0.000111454") - std = float("0.0542453") - data = None - - -class Program_weight_tensor_parameter_35: - name = "parameter_35" - shape = [384] - dtype = "float32" - min_val = float("-1.03026") - max_val = float("2.33983") - mean = float("0.0215944") - std = float("0.333446") - data = None - - -class Program_weight_tensor_parameter_36: - name = "parameter_36" - shape = [384] - dtype = "float32" - min_val = float("0.0357924") - max_val = float("6.25379") - mean = float("1.28284") - std = float("0.600466") - data = None - - -class Program_weight_tensor_parameter_37: - name = "parameter_37" - shape = [384] - dtype = "float32" - min_val = float("-8.05629") - max_val = float("5.74326") - mean = float("-0.0115213") - std = float("0.510238") - data = None - - -class Program_weight_tensor_parameter_38: - name = "parameter_38" - shape = [384, 384] - dtype = "float32" - min_val = float("-0.495021") - max_val = float("0.595344") - mean = float("-7.17572e-05") - std = float("0.0511101") - data = None - - -class Program_weight_tensor_parameter_39: - name = "parameter_39" - shape = [1152] - dtype = "float32" - min_val = float("-7.17506") - max_val = float("6.91719") - mean = float("-0.00895858") - std = float("1.53632") - data = None - - -class Program_weight_tensor_parameter_40: - name = "parameter_40" - shape = [384, 1152] - dtype = "float32" - min_val = float("-0.272265") - max_val = float("0.296452") - mean = float("7.50426e-05") - std = float("0.0481593") - data = None - - -class Program_weight_tensor_parameter_41: - name = "parameter_41" - shape = [6629, 384] - dtype = "float32" - min_val = float("-6.43765") - max_val = float("3.98165") - mean = float("-0.00702058") - std = float("0.0513204") - data = None - - -class Program_weight_tensor_parameter_42: - name = "parameter_42" - shape = [384, 384] - dtype = "float32" - min_val = float("-0.760943") - max_val = float("0.740528") - mean = float("-0.000140398") - std = float("0.0638947") - data = None diff --git a/paddle_samples/PaddleX/ch_RepSVTR_rec/subgraph_4/graph_hash.txt b/paddle_samples/PaddleX/ch_RepSVTR_rec/subgraph_4/graph_hash.txt deleted file mode 100644 index b68fe7661..000000000 --- a/paddle_samples/PaddleX/ch_RepSVTR_rec/subgraph_4/graph_hash.txt +++ /dev/null @@ -1 +0,0 @@ -90f5ad58ff9bcbbf15fcede3279253e0efba1056e86baaeb1bc99ccdedeaa9fe \ No newline at end of file diff --git a/paddle_samples/PaddleX/ch_RepSVTR_rec/subgraph_4/graph_net.json b/paddle_samples/PaddleX/ch_RepSVTR_rec/subgraph_4/graph_net.json deleted file mode 100644 index 7e5b42a74..000000000 --- a/paddle_samples/PaddleX/ch_RepSVTR_rec/subgraph_4/graph_net.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "framework": "paddle", - "model_name": "ch_RepSVTR_rec", - "num_devices_required": 1, - "num_nodes_required": 1 -} \ No newline at end of file diff --git a/paddle_samples/PaddleX/ch_RepSVTR_rec/subgraph_4/input_meta.py b/paddle_samples/PaddleX/ch_RepSVTR_rec/subgraph_4/input_meta.py deleted file mode 100644 index 6459d77bb..000000000 --- a/paddle_samples/PaddleX/ch_RepSVTR_rec/subgraph_4/input_meta.py +++ /dev/null @@ -1,9 +0,0 @@ -class Program_weight_tensor_data_0: - name = "data_0" - shape = [5, 384, 1, 40] - dtype = "float32" - min_val = float("-3.50874") - max_val = float("3.79537") - mean = float("-0.00288513") - std = float("0.388304") - data = None diff --git a/paddle_samples/PaddleX/ch_RepSVTR_rec/subgraph_4/model.py b/paddle_samples/PaddleX/ch_RepSVTR_rec/subgraph_4/model.py deleted file mode 100644 index 30fbbe858..000000000 --- a/paddle_samples/PaddleX/ch_RepSVTR_rec/subgraph_4/model.py +++ /dev/null @@ -1,828 +0,0 @@ -import paddle - - -class GraphModule(paddle.nn.Layer): - def __init__(self): - super().__init__() - - def forward( - self, - parameter_0, - parameter_1, - parameter_2, - parameter_3, - parameter_4, - parameter_5, - parameter_6, - parameter_7, - parameter_8, - parameter_9, - parameter_10, - parameter_11, - parameter_12, - parameter_13, - parameter_14, - parameter_15, - parameter_16, - parameter_17, - parameter_18, - parameter_19, - parameter_20, - parameter_21, - parameter_22, - parameter_23, - parameter_24, - parameter_25, - parameter_26, - parameter_27, - parameter_28, - parameter_29, - parameter_30, - parameter_31, - parameter_32, - parameter_33, - parameter_34, - parameter_35, - parameter_36, - parameter_37, - parameter_38, - parameter_39, - parameter_40, - parameter_41, - parameter_42, - parameter_43, - parameter_44, - parameter_45, - parameter_46, - parameter_47, - parameter_48, - parameter_49, - parameter_50, - parameter_51, - parameter_52, - data_0, - ): - # pd_op.assign: (-1x384x1x40xf32) <- (-1x384x1x40xf32) - assign_0 = data_0 - del data_0 - - # pd_op.conv2d: (-1x48x1x40xf32) <- (-1x384x1x40xf32, 48x384x1x3xf32) - conv2d_0 = paddle._C_ops.conv2d( - assign_0, parameter_52, [1, 1], [0, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_52 - - # pd_op.batch_norm_: (-1x48x1x40xf32, 48xf32, 48xf32, 48xf32, 48xf32, -1xui8) <- (-1x48x1x40xf32, 48xf32, 48xf32, 48xf32, 48xf32) - ( - batch_norm__0, - batch_norm__1, - batch_norm__2, - batch_norm__3, - batch_norm__4, - batch_norm__5, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_0, - parameter_51, - parameter_50, - parameter_49, - parameter_48, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_48, parameter_49, parameter_50, parameter_51 - - # pd_op.swish: (-1x48x1x40xf32) <- (-1x48x1x40xf32) - swish_0 = paddle._C_ops.swish(batch_norm__0) - - # pd_op.conv2d: (-1x256x1x40xf32) <- (-1x48x1x40xf32, 256x48x1x1xf32) - conv2d_1 = paddle._C_ops.conv2d( - swish_0, parameter_47, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_47 - - # pd_op.batch_norm_: (-1x256x1x40xf32, 256xf32, 256xf32, 256xf32, 256xf32, -1xui8) <- (-1x256x1x40xf32, 256xf32, 256xf32, 256xf32, 256xf32) - ( - batch_norm__6, - batch_norm__7, - batch_norm__8, - batch_norm__9, - batch_norm__10, - batch_norm__11, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_1, - parameter_46, - parameter_45, - parameter_44, - parameter_43, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_43, parameter_44, parameter_45, parameter_46 - - # pd_op.swish: (-1x256x1x40xf32) <- (-1x256x1x40xf32) - swish_1 = paddle._C_ops.swish(batch_norm__6) - - # pd_op.shape64: (4xi64) <- (-1x256x1x40xf32) - shape64_0 = paddle._C_ops.shape64(swish_1) - - # pd_op.full_int_array: (1xi64) <- () - full_int_array_0 = [0] - - # pd_op.assign: (1xi64) <- (1xi64) - assign_1 = full_int_array_0 - - # pd_op.assign: (1xi64) <- (1xi64) - assign_2 = full_int_array_0 - - # pd_op.full_int_array: (1xi64) <- () - full_int_array_1 = [1] - - # pd_op.assign: (1xi64) <- (1xi64) - assign_3 = full_int_array_1 - - # pd_op.assign: (1xi64) <- (1xi64) - assign_4 = full_int_array_1 - - # pd_op.assign: (1xi64) <- (1xi64) - assign_5 = full_int_array_1 - - # pd_op.assign: (1xi64) <- (1xi64) - assign_6 = full_int_array_1 - - # pd_op.slice: (xi64) <- (4xi64, 1xi64, 1xi64) - slice_0 = paddle._C_ops.slice( - shape64_0, [0], full_int_array_0, full_int_array_1, [1], [0] - ) - del shape64_0 - - # pd_op.flatten: (-1x256x40xf32) <- (-1x256x1x40xf32) - flatten_0 = paddle._C_ops.flatten(swish_1, 2, 3) - - # pd_op.transpose: (-1x40x256xf32) <- (-1x256x40xf32) - transpose_0 = paddle._C_ops.transpose(flatten_0, [0, 2, 1]) - del flatten_0 - - # pd_op.layer_norm: (-1x40x256xf32, -1x40xf32, -1x40xf32) <- (-1x40x256xf32, 256xf32, 256xf32) - layer_norm_0, layer_norm_1, layer_norm_2 = (lambda x, f: f(x))( - paddle._C_ops.layer_norm( - transpose_0, parameter_42, parameter_41, float("1e-05"), 2 - ), - lambda out: out if isinstance(out, (list, tuple)) else (out, None, None), - ) - del parameter_41, parameter_42 - - # pd_op.matmul: (-1x40x768xf32) <- (-1x40x256xf32, 256x768xf32) - matmul_0 = paddle._C_ops.matmul(layer_norm_0, parameter_40, False, False) - del parameter_40 - - # pd_op.add: (-1x40x768xf32) <- (-1x40x768xf32, 768xf32) - add_1 = paddle._C_ops.add(matmul_0, parameter_39) - del parameter_39 - - # pd_op.full_int_array: (5xi64) <- () - full_int_array_2 = [0, -1, 3, 8, 32] - - # pd_op.reshape: (-1x-1x3x8x32xf32) <- (-1x40x768xf32, 5xi64) - reshape_0 = paddle._C_ops.reshape(add_1, full_int_array_2) - - # pd_op.transpose: (3x-1x8x-1x32xf32) <- (-1x-1x3x8x32xf32) - transpose_1 = paddle._C_ops.transpose(reshape_0, [2, 0, 3, 1, 4]) - del reshape_0 - - # pd_op.slice: (-1x8x-1x32xf32) <- (3x-1x8x-1x32xf32, 1xi64, 1xi64) - slice_1 = paddle._C_ops.slice( - transpose_1, [0], full_int_array_0, full_int_array_1, [1], [0] - ) - - # pd_op.full: (1xf32) <- () - full_0 = paddle._C_ops.full( - [1], float("0.176777"), paddle.float32, paddle.core.CPUPlace() - ) - - # pd_op.assign: (1xf32) <- (1xf32) - assign_7 = full_0 - - # pd_op.scale: (-1x8x-1x32xf32) <- (-1x8x-1x32xf32, 1xf32) - scale_0 = paddle._C_ops.scale(slice_1, full_0, float("0"), True) - del slice_1 - - # pd_op.full_int_array: (1xi64) <- () - full_int_array_3 = [2] - - # pd_op.assign: (1xi64) <- (1xi64) - assign_8 = full_int_array_3 - - # pd_op.assign: (1xi64) <- (1xi64) - assign_9 = full_int_array_3 - - # pd_op.assign: (1xi64) <- (1xi64) - assign_10 = full_int_array_3 - - # pd_op.assign: (1xi64) <- (1xi64) - assign_11 = full_int_array_3 - - # pd_op.slice: (-1x8x-1x32xf32) <- (3x-1x8x-1x32xf32, 1xi64, 1xi64) - slice_2 = paddle._C_ops.slice( - transpose_1, [0], full_int_array_1, full_int_array_3, [1], [0] - ) - - # pd_op.full_int_array: (1xi64) <- () - full_int_array_4 = [3] - - # pd_op.assign: (1xi64) <- (1xi64) - assign_12 = full_int_array_4 - - # pd_op.slice: (-1x8x-1x32xf32) <- (3x-1x8x-1x32xf32, 1xi64, 1xi64) - slice_3 = paddle._C_ops.slice( - transpose_1, [0], full_int_array_3, full_int_array_4, [1], [0] - ) - - # pd_op.transpose: (-1x8x32x-1xf32) <- (-1x8x-1x32xf32) - transpose_2 = paddle._C_ops.transpose(slice_2, [0, 1, 3, 2]) - del slice_2 - - # pd_op.matmul: (-1x8x-1x-1xf32) <- (-1x8x-1x32xf32, -1x8x32x-1xf32) - matmul_1 = paddle._C_ops.matmul(scale_0, transpose_2, False, False) - - # pd_op.softmax: (-1x8x-1x-1xf32) <- (-1x8x-1x-1xf32) - softmax_0 = paddle._C_ops.softmax(matmul_1, -1) - del matmul_1 - - # pd_op.full: (1xf32) <- () - full_1 = paddle._C_ops.full( - [1], float("0.1"), paddle.float32, paddle.core.CPUPlace() - ) - - # pd_op.assign: (1xf32) <- (1xf32) - assign_13 = full_1 - - # pd_op.assign: (1xf32) <- (1xf32) - assign_14 = full_1 - - # pd_op.assign: (1xf32) <- (1xf32) - assign_15 = full_1 - - # pd_op.assign: (1xf32) <- (1xf32) - assign_16 = full_1 - - # pd_op.assign: (1xf32) <- (1xf32) - assign_17 = full_1 - - # pd_op.assign: (1xf32) <- (1xf32) - assign_18 = full_1 - - # pd_op.assign: (1xf32) <- (1xf32) - assign_19 = full_1 - - # pd_op.dropout: (-1x8x-1x-1xf32, -1x8x-1x-1xui8) <- (-1x8x-1x-1xf32, None, 1xf32) - dropout_0, dropout_1 = (lambda x, f: f(x))( - paddle._C_ops.dropout( - softmax_0, None, full_1, False, "upscale_in_train", 0, False - ), - lambda out: out if isinstance(out, (list, tuple)) else (out, None), - ) - - # pd_op.matmul: (-1x8x-1x32xf32) <- (-1x8x-1x-1xf32, -1x8x-1x32xf32) - matmul_2 = paddle._C_ops.matmul(dropout_0, slice_3, False, False) - - # pd_op.transpose: (-1x-1x8x32xf32) <- (-1x8x-1x32xf32) - transpose_3 = paddle._C_ops.transpose(matmul_2, [0, 2, 1, 3]) - del matmul_2 - - # pd_op.full_int_array: (3xi64) <- () - full_int_array_5 = [0, -1, 256] - - # pd_op.reshape: (-1x-1x256xf32) <- (-1x-1x8x32xf32, 3xi64) - reshape_1 = paddle._C_ops.reshape(transpose_3, full_int_array_5) - - # pd_op.matmul: (-1x-1x256xf32) <- (-1x-1x256xf32, 256x256xf32) - matmul_3 = paddle._C_ops.matmul(reshape_1, parameter_38, False, False) - del parameter_38 - - # pd_op.add: (-1x-1x256xf32) <- (-1x-1x256xf32, 256xf32) - add_2 = paddle._C_ops.add(matmul_3, parameter_37) - del parameter_37 - - # pd_op.dropout: (-1x-1x256xf32, -1x-1x256xui8) <- (-1x-1x256xf32, None, 1xf32) - dropout_2, dropout_3 = (lambda x, f: f(x))( - paddle._C_ops.dropout( - add_2, None, full_1, False, "upscale_in_train", 0, False - ), - lambda out: out if isinstance(out, (list, tuple)) else (out, None), - ) - del add_2 - - # pd_op.add: (-1x40x256xf32) <- (-1x40x256xf32, -1x-1x256xf32) - add_3 = paddle._C_ops.add(transpose_0, dropout_2) - - # pd_op.layer_norm: (-1x40x256xf32, -1x40xf32, -1x40xf32) <- (-1x40x256xf32, 256xf32, 256xf32) - layer_norm_3, layer_norm_4, layer_norm_5 = (lambda x, f: f(x))( - paddle._C_ops.layer_norm( - add_3, parameter_36, parameter_35, float("1e-05"), 2 - ), - lambda out: out if isinstance(out, (list, tuple)) else (out, None, None), - ) - del parameter_35, parameter_36 - - # pd_op.matmul: (-1x40x512xf32) <- (-1x40x256xf32, 256x512xf32) - matmul_4 = paddle._C_ops.matmul(layer_norm_3, parameter_34, False, False) - del parameter_34 - - # pd_op.add: (-1x40x512xf32) <- (-1x40x512xf32, 512xf32) - add_4 = paddle._C_ops.add(matmul_4, parameter_33) - del parameter_33 - - # pd_op.swish: (-1x40x512xf32) <- (-1x40x512xf32) - swish_2 = paddle._C_ops.swish(add_4) - - # pd_op.dropout: (-1x40x512xf32, -1x40x512xui8) <- (-1x40x512xf32, None, 1xf32) - dropout_4, dropout_5 = (lambda x, f: f(x))( - paddle._C_ops.dropout( - swish_2, None, full_1, False, "upscale_in_train", 0, False - ), - lambda out: out if isinstance(out, (list, tuple)) else (out, None), - ) - del swish_2 - - # pd_op.matmul: (-1x40x256xf32) <- (-1x40x512xf32, 512x256xf32) - matmul_5 = paddle._C_ops.matmul(dropout_4, parameter_32, False, False) - del parameter_32 - - # pd_op.add: (-1x40x256xf32) <- (-1x40x256xf32, 256xf32) - add_5 = paddle._C_ops.add(matmul_5, parameter_31) - del parameter_31 - - # pd_op.dropout: (-1x40x256xf32, -1x40x256xui8) <- (-1x40x256xf32, None, 1xf32) - dropout_6, dropout_7 = (lambda x, f: f(x))( - paddle._C_ops.dropout( - add_5, None, full_1, False, "upscale_in_train", 0, False - ), - lambda out: out if isinstance(out, (list, tuple)) else (out, None), - ) - del add_5 - - # pd_op.add: (-1x40x256xf32) <- (-1x40x256xf32, -1x40x256xf32) - add_6 = paddle._C_ops.add(add_3, dropout_6) - - # pd_op.layer_norm: (-1x40x256xf32, -1x40xf32, -1x40xf32) <- (-1x40x256xf32, 256xf32, 256xf32) - layer_norm_6, layer_norm_7, layer_norm_8 = (lambda x, f: f(x))( - paddle._C_ops.layer_norm( - add_6, parameter_30, parameter_29, float("1e-05"), 2 - ), - lambda out: out if isinstance(out, (list, tuple)) else (out, None, None), - ) - del parameter_29, parameter_30 - - # pd_op.matmul: (-1x40x768xf32) <- (-1x40x256xf32, 256x768xf32) - matmul_6 = paddle._C_ops.matmul(layer_norm_6, parameter_28, False, False) - del parameter_28 - - # pd_op.add: (-1x40x768xf32) <- (-1x40x768xf32, 768xf32) - add_7 = paddle._C_ops.add(matmul_6, parameter_27) - del parameter_27 - - # pd_op.reshape: (-1x-1x3x8x32xf32) <- (-1x40x768xf32, 5xi64) - reshape_2 = paddle._C_ops.reshape(add_7, full_int_array_2) - del full_int_array_2 - - # pd_op.transpose: (3x-1x8x-1x32xf32) <- (-1x-1x3x8x32xf32) - transpose_4 = paddle._C_ops.transpose(reshape_2, [2, 0, 3, 1, 4]) - del reshape_2 - - # pd_op.slice: (-1x8x-1x32xf32) <- (3x-1x8x-1x32xf32, 1xi64, 1xi64) - slice_4 = paddle._C_ops.slice( - transpose_4, [0], full_int_array_0, full_int_array_1, [1], [0] - ) - - # pd_op.scale: (-1x8x-1x32xf32) <- (-1x8x-1x32xf32, 1xf32) - scale_1 = paddle._C_ops.scale(slice_4, full_0, float("0"), True) - del slice_4 - - # pd_op.slice: (-1x8x-1x32xf32) <- (3x-1x8x-1x32xf32, 1xi64, 1xi64) - slice_5 = paddle._C_ops.slice( - transpose_4, [0], full_int_array_1, full_int_array_3, [1], [0] - ) - - # pd_op.slice: (-1x8x-1x32xf32) <- (3x-1x8x-1x32xf32, 1xi64, 1xi64) - slice_6 = paddle._C_ops.slice( - transpose_4, [0], full_int_array_3, full_int_array_4, [1], [0] - ) - - # pd_op.transpose: (-1x8x32x-1xf32) <- (-1x8x-1x32xf32) - transpose_5 = paddle._C_ops.transpose(slice_5, [0, 1, 3, 2]) - del slice_5 - - # pd_op.matmul: (-1x8x-1x-1xf32) <- (-1x8x-1x32xf32, -1x8x32x-1xf32) - matmul_7 = paddle._C_ops.matmul(scale_1, transpose_5, False, False) - - # pd_op.softmax: (-1x8x-1x-1xf32) <- (-1x8x-1x-1xf32) - softmax_1 = paddle._C_ops.softmax(matmul_7, -1) - del matmul_7 - - # pd_op.dropout: (-1x8x-1x-1xf32, -1x8x-1x-1xui8) <- (-1x8x-1x-1xf32, None, 1xf32) - dropout_8, dropout_9 = (lambda x, f: f(x))( - paddle._C_ops.dropout( - softmax_1, None, full_1, False, "upscale_in_train", 0, False - ), - lambda out: out if isinstance(out, (list, tuple)) else (out, None), - ) - - # pd_op.matmul: (-1x8x-1x32xf32) <- (-1x8x-1x-1xf32, -1x8x-1x32xf32) - matmul_8 = paddle._C_ops.matmul(dropout_8, slice_6, False, False) - - # pd_op.transpose: (-1x-1x8x32xf32) <- (-1x8x-1x32xf32) - transpose_6 = paddle._C_ops.transpose(matmul_8, [0, 2, 1, 3]) - del matmul_8 - - # pd_op.reshape: (-1x-1x256xf32) <- (-1x-1x8x32xf32, 3xi64) - reshape_3 = paddle._C_ops.reshape(transpose_6, full_int_array_5) - del full_int_array_5 - - # pd_op.matmul: (-1x-1x256xf32) <- (-1x-1x256xf32, 256x256xf32) - matmul_9 = paddle._C_ops.matmul(reshape_3, parameter_26, False, False) - del parameter_26 - - # pd_op.add: (-1x-1x256xf32) <- (-1x-1x256xf32, 256xf32) - add_8 = paddle._C_ops.add(matmul_9, parameter_25) - del parameter_25 - - # pd_op.dropout: (-1x-1x256xf32, -1x-1x256xui8) <- (-1x-1x256xf32, None, 1xf32) - dropout_10, dropout_11 = (lambda x, f: f(x))( - paddle._C_ops.dropout( - add_8, None, full_1, False, "upscale_in_train", 0, False - ), - lambda out: out if isinstance(out, (list, tuple)) else (out, None), - ) - del add_8 - - # pd_op.add: (-1x40x256xf32) <- (-1x40x256xf32, -1x-1x256xf32) - add_9 = paddle._C_ops.add(add_6, dropout_10) - - # pd_op.layer_norm: (-1x40x256xf32, -1x40xf32, -1x40xf32) <- (-1x40x256xf32, 256xf32, 256xf32) - layer_norm_9, layer_norm_10, layer_norm_11 = (lambda x, f: f(x))( - paddle._C_ops.layer_norm( - add_9, parameter_24, parameter_23, float("1e-05"), 2 - ), - lambda out: out if isinstance(out, (list, tuple)) else (out, None, None), - ) - del parameter_23, parameter_24 - - # pd_op.matmul: (-1x40x512xf32) <- (-1x40x256xf32, 256x512xf32) - matmul_10 = paddle._C_ops.matmul(layer_norm_9, parameter_22, False, False) - del parameter_22 - - # pd_op.add: (-1x40x512xf32) <- (-1x40x512xf32, 512xf32) - add_10 = paddle._C_ops.add(matmul_10, parameter_21) - del parameter_21 - - # pd_op.swish: (-1x40x512xf32) <- (-1x40x512xf32) - swish_3 = paddle._C_ops.swish(add_10) - - # pd_op.dropout: (-1x40x512xf32, -1x40x512xui8) <- (-1x40x512xf32, None, 1xf32) - dropout_12, dropout_13 = (lambda x, f: f(x))( - paddle._C_ops.dropout( - swish_3, None, full_1, False, "upscale_in_train", 0, False - ), - lambda out: out if isinstance(out, (list, tuple)) else (out, None), - ) - del swish_3 - - # pd_op.matmul: (-1x40x256xf32) <- (-1x40x512xf32, 512x256xf32) - matmul_11 = paddle._C_ops.matmul(dropout_12, parameter_20, False, False) - del parameter_20 - - # pd_op.add: (-1x40x256xf32) <- (-1x40x256xf32, 256xf32) - add_11 = paddle._C_ops.add(matmul_11, parameter_19) - del parameter_19 - - # pd_op.dropout: (-1x40x256xf32, -1x40x256xui8) <- (-1x40x256xf32, None, 1xf32) - dropout_14, dropout_15 = (lambda x, f: f(x))( - paddle._C_ops.dropout( - add_11, None, full_1, False, "upscale_in_train", 0, False - ), - lambda out: out if isinstance(out, (list, tuple)) else (out, None), - ) - del add_11 - - # pd_op.add: (-1x40x256xf32) <- (-1x40x256xf32, -1x40x256xf32) - add_12 = paddle._C_ops.add(add_9, dropout_14) - - # pd_op.layer_norm: (-1x40x256xf32, -1x40xf32, -1x40xf32) <- (-1x40x256xf32, 256xf32, 256xf32) - layer_norm_12, layer_norm_13, layer_norm_14 = (lambda x, f: f(x))( - paddle._C_ops.layer_norm( - add_12, parameter_18, parameter_17, float("1e-06"), 2 - ), - lambda out: out if isinstance(out, (list, tuple)) else (out, None, None), - ) - del parameter_17, parameter_18 - - # pd_op.full_int_array: (4xi64) <- () - full_int_array_6 = [0, 1, 40, 256] - - # pd_op.reshape: (-1x1x40x256xf32) <- (-1x40x256xf32, 4xi64) - reshape_4 = paddle._C_ops.reshape(layer_norm_12, full_int_array_6) - del full_int_array_6 - - # pd_op.transpose: (-1x256x1x40xf32) <- (-1x1x40x256xf32) - transpose_7 = paddle._C_ops.transpose(reshape_4, [0, 3, 1, 2]) - del reshape_4 - - # pd_op.conv2d: (-1x384x1x40xf32) <- (-1x256x1x40xf32, 384x256x1x1xf32) - conv2d_2 = paddle._C_ops.conv2d( - transpose_7, parameter_16, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_16 - - # pd_op.batch_norm_: (-1x384x1x40xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (-1x384x1x40xf32, 384xf32, 384xf32, 384xf32, 384xf32) - ( - batch_norm__12, - batch_norm__13, - batch_norm__14, - batch_norm__15, - batch_norm__16, - batch_norm__17, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_2, - parameter_15, - parameter_14, - parameter_13, - parameter_12, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_12, parameter_13, parameter_14, parameter_15 - - # pd_op.swish: (-1x384x1x40xf32) <- (-1x384x1x40xf32) - swish_4 = paddle._C_ops.swish(batch_norm__12) - - # pd_op.full: (1xi32) <- () - full_2 = paddle._C_ops.full( - [1], float("1"), paddle.int32, paddle.core.CPUPlace() - ) - - # builtin.combine: ([-1x384x1x40xf32, -1x384x1x40xf32]) <- (-1x384x1x40xf32, -1x384x1x40xf32) - combine_0 = [assign_0, swish_4] - - # pd_op.concat: (-1x768x1x40xf32) <- ([-1x384x1x40xf32, -1x384x1x40xf32], 1xi32) - concat_0 = paddle._C_ops.concat(combine_0, full_2) - del combine_0 - - # pd_op.conv2d: (-1x48x1x40xf32) <- (-1x768x1x40xf32, 48x768x1x3xf32) - conv2d_3 = paddle._C_ops.conv2d( - concat_0, parameter_11, [1, 1], [0, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_11 - - # pd_op.batch_norm_: (-1x48x1x40xf32, 48xf32, 48xf32, 48xf32, 48xf32, -1xui8) <- (-1x48x1x40xf32, 48xf32, 48xf32, 48xf32, 48xf32) - ( - batch_norm__18, - batch_norm__19, - batch_norm__20, - batch_norm__21, - batch_norm__22, - batch_norm__23, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_3, - parameter_10, - parameter_9, - parameter_8, - parameter_7, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_10, parameter_7, parameter_8, parameter_9 - - # pd_op.swish: (-1x48x1x40xf32) <- (-1x48x1x40xf32) - swish_5 = paddle._C_ops.swish(batch_norm__18) - - # pd_op.conv2d: (-1x256x1x40xf32) <- (-1x48x1x40xf32, 256x48x1x1xf32) - conv2d_4 = paddle._C_ops.conv2d( - swish_5, parameter_6, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_6 - - # pd_op.batch_norm_: (-1x256x1x40xf32, 256xf32, 256xf32, 256xf32, 256xf32, -1xui8) <- (-1x256x1x40xf32, 256xf32, 256xf32, 256xf32, 256xf32) - ( - batch_norm__24, - batch_norm__25, - batch_norm__26, - batch_norm__27, - batch_norm__28, - batch_norm__29, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_4, - parameter_5, - parameter_4, - parameter_3, - parameter_2, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_2, parameter_3, parameter_4, parameter_5 - - # pd_op.swish: (-1x256x1x40xf32) <- (-1x256x1x40xf32) - swish_6 = paddle._C_ops.swish(batch_norm__24) - - # pd_op.shape64: (4xi64) <- (-1x256x1x40xf32) - shape64_1 = paddle._C_ops.shape64(swish_6) - - # pd_op.slice: (xi64) <- (4xi64, 1xi64, 1xi64) - slice_7 = paddle._C_ops.slice( - shape64_1, [0], full_int_array_0, full_int_array_1, [1], [0] - ) - del full_int_array_0, full_int_array_1, shape64_1 - - # pd_op.squeeze: (-1x256x40xf32) <- (-1x256x1x40xf32, 1xi64) - squeeze_0 = paddle._C_ops.squeeze(swish_6, full_int_array_3) - - # pd_op.transpose: (-1x40x256xf32) <- (-1x256x40xf32) - transpose_8 = paddle._C_ops.transpose(squeeze_0, [0, 2, 1]) - del squeeze_0 - - # pd_op.matmul: (-1x40x6625xf32) <- (-1x40x256xf32, 256x6625xf32) - matmul_12 = paddle._C_ops.matmul(transpose_8, parameter_1, False, False) - del parameter_1 - - # pd_op.add: (-1x40x6625xf32) <- (-1x40x6625xf32, 6625xf32) - add_0 = paddle._C_ops.add(matmul_12, parameter_0) - del ( - add_1, - add_10, - add_12, - add_3, - add_4, - add_6, - add_7, - add_9, - assign_0, - assign_1, - assign_10, - assign_11, - assign_12, - assign_13, - assign_14, - assign_15, - assign_16, - assign_17, - assign_18, - assign_19, - assign_2, - assign_3, - assign_4, - assign_5, - assign_6, - assign_7, - assign_8, - assign_9, - batch_norm__0, - batch_norm__1, - batch_norm__10, - batch_norm__11, - batch_norm__12, - batch_norm__13, - batch_norm__14, - batch_norm__15, - batch_norm__16, - batch_norm__17, - batch_norm__18, - batch_norm__19, - batch_norm__2, - batch_norm__20, - batch_norm__21, - batch_norm__22, - batch_norm__23, - batch_norm__24, - batch_norm__25, - batch_norm__26, - batch_norm__27, - batch_norm__28, - batch_norm__29, - batch_norm__3, - batch_norm__4, - batch_norm__5, - batch_norm__6, - batch_norm__7, - batch_norm__8, - batch_norm__9, - concat_0, - conv2d_0, - conv2d_1, - conv2d_2, - conv2d_3, - conv2d_4, - dropout_0, - dropout_1, - dropout_10, - dropout_11, - dropout_12, - dropout_13, - dropout_14, - dropout_15, - dropout_2, - dropout_3, - dropout_4, - dropout_5, - dropout_6, - dropout_7, - dropout_8, - dropout_9, - full_0, - full_1, - full_2, - full_int_array_3, - full_int_array_4, - layer_norm_0, - layer_norm_1, - layer_norm_10, - layer_norm_11, - layer_norm_12, - layer_norm_13, - layer_norm_14, - layer_norm_2, - layer_norm_3, - layer_norm_4, - layer_norm_5, - layer_norm_6, - layer_norm_7, - layer_norm_8, - layer_norm_9, - matmul_0, - matmul_10, - matmul_11, - matmul_12, - matmul_3, - matmul_4, - matmul_5, - matmul_6, - matmul_9, - parameter_0, - reshape_1, - reshape_3, - scale_0, - scale_1, - slice_3, - slice_6, - softmax_0, - softmax_1, - swish_0, - swish_1, - swish_4, - swish_5, - swish_6, - transpose_0, - transpose_1, - transpose_2, - transpose_3, - transpose_4, - transpose_5, - transpose_6, - transpose_7, - transpose_8, - ) - - return add_0 diff --git a/paddle_samples/PaddleX/ch_RepSVTR_rec/subgraph_4/weight_meta.py b/paddle_samples/PaddleX/ch_RepSVTR_rec/subgraph_4/weight_meta.py deleted file mode 100644 index 258ba4239..000000000 --- a/paddle_samples/PaddleX/ch_RepSVTR_rec/subgraph_4/weight_meta.py +++ /dev/null @@ -1,565 +0,0 @@ -class Program_weight_tensor_parameter_0: - name = "parameter_0" - shape = [6625] - dtype = "float32" - min_val = float("-1.42013") - max_val = float("1.27473") - mean = float("-0.109522") - std = float("0.088233") - data = None - - -class Program_weight_tensor_parameter_1: - name = "parameter_1" - shape = [256, 6625] - dtype = "float32" - min_val = float("-0.624946") - max_val = float("0.302852") - mean = float("-0.152493") - std = float("0.0693287") - data = None - - -class Program_weight_tensor_parameter_2: - name = "parameter_2" - shape = [256] - dtype = "float32" - min_val = float("-3.40881") - max_val = float("11.7084") - mean = float("0.105069") - std = float("1.48036") - data = None - - -class Program_weight_tensor_parameter_3: - name = "parameter_3" - shape = [256] - dtype = "float32" - min_val = float("2.06324") - max_val = float("5.60261") - mean = float("3.15274") - std = float("0.558277") - data = None - - -class Program_weight_tensor_parameter_4: - name = "parameter_4" - shape = [256] - dtype = "float32" - min_val = float("0.0233932") - max_val = float("0.0875437") - mean = float("0.0371852") - std = float("0.00740461") - data = None - - -class Program_weight_tensor_parameter_5: - name = "parameter_5" - shape = [256] - dtype = "float32" - min_val = float("-1.05686") - max_val = float("1.24687") - mean = float("0.098052") - std = float("0.319024") - data = None - - -class Program_weight_tensor_parameter_6: - name = "parameter_6" - shape = [256, 48, 1, 1] - dtype = "float32" - min_val = float("-0.47424") - max_val = float("0.291717") - mean = float("2.26874e-05") - std = float("0.0562394") - data = None - - -class Program_weight_tensor_parameter_7: - name = "parameter_7" - shape = [48] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_8: - name = "parameter_8" - shape = [48] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_9: - name = "parameter_9" - shape = [48] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_10: - name = "parameter_10" - shape = [48] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_11: - name = "parameter_11" - shape = [48, 768, 1, 3] - dtype = "float32" - min_val = float("-0.580903") - max_val = float("0.786753") - mean = float("-0.000179171") - std = float("0.0578308") - data = None - - -class Program_weight_tensor_parameter_12: - name = "parameter_12" - shape = [384] - dtype = "float32" - min_val = float("-3.89553") - max_val = float("0.833467") - mean = float("-1.09152") - std = float("0.396479") - data = None - - -class Program_weight_tensor_parameter_13: - name = "parameter_13" - shape = [384] - dtype = "float32" - min_val = float("0.473132") - max_val = float("2.97892") - mean = float("0.818498") - std = float("0.239801") - data = None - - -class Program_weight_tensor_parameter_14: - name = "parameter_14" - shape = [384] - dtype = "float32" - min_val = float("0.314674") - max_val = float("3.15629") - mean = float("0.72336") - std = float("0.288909") - data = None - - -class Program_weight_tensor_parameter_15: - name = "parameter_15" - shape = [384] - dtype = "float32" - min_val = float("-2.30058") - max_val = float("3.12659") - mean = float("0.142761") - std = float("0.815047") - data = None - - -class Program_weight_tensor_parameter_16: - name = "parameter_16" - shape = [384, 256, 1, 1] - dtype = "float32" - min_val = float("-0.371578") - max_val = float("0.396831") - mean = float("0.000731521") - std = float("0.0558392") - data = None - - -class Program_weight_tensor_parameter_17: - name = "parameter_17" - shape = [256] - dtype = "float32" - min_val = float("-1.21851") - max_val = float("1.36982") - mean = float("-0.0106968") - std = float("0.525313") - data = None - - -class Program_weight_tensor_parameter_18: - name = "parameter_18" - shape = [256] - dtype = "float32" - min_val = float("0.25639") - max_val = float("2.01074") - mean = float("1.25786") - std = float("0.27881") - data = None - - -class Program_weight_tensor_parameter_19: - name = "parameter_19" - shape = [256] - dtype = "float32" - min_val = float("-4.55386") - max_val = float("1.85077") - mean = float("-0.0756713") - std = float("0.620359") - data = None - - -class Program_weight_tensor_parameter_20: - name = "parameter_20" - shape = [512, 256] - dtype = "float32" - min_val = float("-1.04984") - max_val = float("0.773719") - mean = float("0.00144559") - std = float("0.092633") - data = None - - -class Program_weight_tensor_parameter_21: - name = "parameter_21" - shape = [512] - dtype = "float32" - min_val = float("-2.14911") - max_val = float("-0.199502") - mean = float("-1.07301") - std = float("0.295984") - data = None - - -class Program_weight_tensor_parameter_22: - name = "parameter_22" - shape = [256, 512] - dtype = "float32" - min_val = float("-0.481137") - max_val = float("0.390545") - mean = float("-0.0156815") - std = float("0.0709277") - data = None - - -class Program_weight_tensor_parameter_23: - name = "parameter_23" - shape = [256] - dtype = "float32" - min_val = float("-1.00171") - max_val = float("2.68567") - mean = float("0.317346") - std = float("0.454044") - data = None - - -class Program_weight_tensor_parameter_24: - name = "parameter_24" - shape = [256] - dtype = "float32" - min_val = float("0.53436") - max_val = float("2.32077") - mean = float("1.51709") - std = float("0.268321") - data = None - - -class Program_weight_tensor_parameter_25: - name = "parameter_25" - shape = [256] - dtype = "float32" - min_val = float("-0.891405") - max_val = float("1.4711") - mean = float("0.00256555") - std = float("0.229186") - data = None - - -class Program_weight_tensor_parameter_26: - name = "parameter_26" - shape = [256, 256] - dtype = "float32" - min_val = float("-0.421549") - max_val = float("0.400859") - mean = float("-7.66968e-05") - std = float("0.072737") - data = None - - -class Program_weight_tensor_parameter_27: - name = "parameter_27" - shape = [768] - dtype = "float32" - min_val = float("-2.5866") - max_val = float("2.47328") - mean = float("0.027699") - std = float("0.443614") - data = None - - -class Program_weight_tensor_parameter_28: - name = "parameter_28" - shape = [256, 768] - dtype = "float32" - min_val = float("-0.529306") - max_val = float("0.676402") - mean = float("-6.66809e-07") - std = float("0.0656268") - data = None - - -class Program_weight_tensor_parameter_29: - name = "parameter_29" - shape = [256] - dtype = "float32" - min_val = float("-1.14134") - max_val = float("0.517422") - mean = float("0.0267431") - std = float("0.270125") - data = None - - -class Program_weight_tensor_parameter_30: - name = "parameter_30" - shape = [256] - dtype = "float32" - min_val = float("0.169902") - max_val = float("1.51132") - mean = float("0.918719") - std = float("0.190758") - data = None - - -class Program_weight_tensor_parameter_31: - name = "parameter_31" - shape = [256] - dtype = "float32" - min_val = float("-1.20704") - max_val = float("0.476782") - mean = float("-0.0173341") - std = float("0.212343") - data = None - - -class Program_weight_tensor_parameter_32: - name = "parameter_32" - shape = [512, 256] - dtype = "float32" - min_val = float("-0.560177") - max_val = float("0.690259") - mean = float("-0.000401634") - std = float("0.0821862") - data = None - - -class Program_weight_tensor_parameter_33: - name = "parameter_33" - shape = [512] - dtype = "float32" - min_val = float("-1.99014") - max_val = float("0.0540556") - mean = float("-0.874845") - std = float("0.39334") - data = None - - -class Program_weight_tensor_parameter_34: - name = "parameter_34" - shape = [256, 512] - dtype = "float32" - min_val = float("-0.542236") - max_val = float("0.446513") - mean = float("-0.01983") - std = float("0.0760177") - data = None - - -class Program_weight_tensor_parameter_35: - name = "parameter_35" - shape = [256] - dtype = "float32" - min_val = float("-1.96161") - max_val = float("1.75132") - mean = float("0.47535") - std = float("0.586567") - data = None - - -class Program_weight_tensor_parameter_36: - name = "parameter_36" - shape = [256] - dtype = "float32" - min_val = float("-0.991044") - max_val = float("2.01325") - mean = float("1.35979") - std = float("0.352656") - data = None - - -class Program_weight_tensor_parameter_37: - name = "parameter_37" - shape = [256] - dtype = "float32" - min_val = float("-0.891919") - max_val = float("0.533113") - mean = float("-0.00336275") - std = float("0.138964") - data = None - - -class Program_weight_tensor_parameter_38: - name = "parameter_38" - shape = [256, 256] - dtype = "float32" - min_val = float("-0.324628") - max_val = float("0.305929") - mean = float("-6.48568e-05") - std = float("0.0643931") - data = None - - -class Program_weight_tensor_parameter_39: - name = "parameter_39" - shape = [768] - dtype = "float32" - min_val = float("-1.90397") - max_val = float("1.87618") - mean = float("-0.00592988") - std = float("0.403207") - data = None - - -class Program_weight_tensor_parameter_40: - name = "parameter_40" - shape = [256, 768] - dtype = "float32" - min_val = float("-0.537901") - max_val = float("0.362684") - mean = float("0.000260092") - std = float("0.0648239") - data = None - - -class Program_weight_tensor_parameter_41: - name = "parameter_41" - shape = [256] - dtype = "float32" - min_val = float("-0.886127") - max_val = float("0.783481") - mean = float("0.0549286") - std = float("0.204608") - data = None - - -class Program_weight_tensor_parameter_42: - name = "parameter_42" - shape = [256] - dtype = "float32" - min_val = float("-0.37332") - max_val = float("1.68426") - mean = float("0.547497") - std = float("0.291976") - data = None - - -class Program_weight_tensor_parameter_43: - name = "parameter_43" - shape = [256] - dtype = "float32" - min_val = float("-2.86113") - max_val = float("3.54946") - mean = float("0.0121762") - std = float("1.13695") - data = None - - -class Program_weight_tensor_parameter_44: - name = "parameter_44" - shape = [256] - dtype = "float32" - min_val = float("1.04742") - max_val = float("7.67621") - mean = float("2.19113") - std = float("0.99277") - data = None - - -class Program_weight_tensor_parameter_45: - name = "parameter_45" - shape = [256] - dtype = "float32" - min_val = float("0.0540838") - max_val = float("1.93758") - mean = float("0.166272") - std = float("0.224777") - data = None - - -class Program_weight_tensor_parameter_46: - name = "parameter_46" - shape = [256] - dtype = "float32" - min_val = float("-3.8311") - max_val = float("3.79453") - mean = float("-0.212298") - std = float("1.16605") - data = None - - -class Program_weight_tensor_parameter_47: - name = "parameter_47" - shape = [256, 48, 1, 1] - dtype = "float32" - min_val = float("-0.632592") - max_val = float("0.494177") - mean = float("-0.00227783") - std = float("0.0566553") - data = None - - -class Program_weight_tensor_parameter_48: - name = "parameter_48" - shape = [48] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_49: - name = "parameter_49" - shape = [48] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_50: - name = "parameter_50" - shape = [48] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_51: - name = "parameter_51" - shape = [48] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_52: - name = "parameter_52" - shape = [48, 384, 1, 3] - dtype = "float32" - min_val = float("-0.502963") - max_val = float("0.518951") - mean = float("-0.000231677") - std = float("0.0545732") - data = None diff --git a/paddle_samples/PaddleX/ch_RepSVTR_rec/subgraph_5/graph_hash.txt b/paddle_samples/PaddleX/ch_RepSVTR_rec/subgraph_5/graph_hash.txt deleted file mode 100644 index e40a69acf..000000000 --- a/paddle_samples/PaddleX/ch_RepSVTR_rec/subgraph_5/graph_hash.txt +++ /dev/null @@ -1 +0,0 @@ -c8f2494cc64b68cfb415c8edf6408e3d383db93242784a0d215c99e14900e1c5 \ No newline at end of file diff --git a/paddle_samples/PaddleX/ch_RepSVTR_rec/subgraph_5/graph_net.json b/paddle_samples/PaddleX/ch_RepSVTR_rec/subgraph_5/graph_net.json deleted file mode 100644 index 7e5b42a74..000000000 --- a/paddle_samples/PaddleX/ch_RepSVTR_rec/subgraph_5/graph_net.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "framework": "paddle", - "model_name": "ch_RepSVTR_rec", - "num_devices_required": 1, - "num_nodes_required": 1 -} \ No newline at end of file diff --git a/paddle_samples/PaddleX/ch_RepSVTR_rec/subgraph_5/input_meta.py b/paddle_samples/PaddleX/ch_RepSVTR_rec/subgraph_5/input_meta.py deleted file mode 100644 index 1858502a2..000000000 --- a/paddle_samples/PaddleX/ch_RepSVTR_rec/subgraph_5/input_meta.py +++ /dev/null @@ -1,9 +0,0 @@ -class Program_weight_tensor_data_0: - name = "data_0" - shape = [4, 384, 1, 40] - dtype = "float32" - min_val = float("-3.4594") - max_val = float("2.90526") - mean = float("-0.00288512") - std = float("0.367055") - data = None diff --git a/paddle_samples/PaddleX/ch_RepSVTR_rec/subgraph_5/model.py b/paddle_samples/PaddleX/ch_RepSVTR_rec/subgraph_5/model.py deleted file mode 100644 index d374899d1..000000000 --- a/paddle_samples/PaddleX/ch_RepSVTR_rec/subgraph_5/model.py +++ /dev/null @@ -1,804 +0,0 @@ -import paddle - - -class GraphModule(paddle.nn.Layer): - def __init__(self): - super().__init__() - - def forward( - self, - parameter_0, - parameter_1, - parameter_2, - parameter_3, - parameter_4, - parameter_5, - parameter_6, - parameter_7, - parameter_8, - parameter_9, - parameter_10, - parameter_11, - parameter_12, - parameter_13, - parameter_14, - parameter_15, - parameter_16, - parameter_17, - parameter_18, - parameter_19, - parameter_20, - parameter_21, - parameter_22, - parameter_23, - parameter_24, - parameter_25, - parameter_26, - parameter_27, - parameter_28, - parameter_29, - parameter_30, - parameter_31, - parameter_32, - parameter_33, - parameter_34, - parameter_35, - parameter_36, - parameter_37, - parameter_38, - parameter_39, - parameter_40, - parameter_41, - parameter_42, - parameter_43, - parameter_44, - parameter_45, - parameter_46, - parameter_47, - parameter_48, - parameter_49, - parameter_50, - parameter_51, - parameter_52, - data_0, - ): - # pd_op.assign: (4x384x1x40xf32) <- (4x384x1x40xf32) - assign_0 = data_0 - del data_0 - - # pd_op.conv2d: (4x48x1x40xf32) <- (4x384x1x40xf32, 48x384x1x3xf32) - conv2d_0 = paddle._C_ops.conv2d( - assign_0, parameter_52, [1, 1], [0, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_52 - - # pd_op.batch_norm_: (4x48x1x40xf32, 48xf32, 48xf32, 48xf32, 48xf32, -1xui8) <- (4x48x1x40xf32, 48xf32, 48xf32, 48xf32, 48xf32) - ( - batch_norm__0, - batch_norm__1, - batch_norm__2, - batch_norm__3, - batch_norm__4, - batch_norm__5, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_0, - parameter_51, - parameter_50, - parameter_49, - parameter_48, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_48, parameter_49, parameter_50, parameter_51 - - # pd_op.swish: (4x48x1x40xf32) <- (4x48x1x40xf32) - swish_0 = paddle._C_ops.swish(batch_norm__0) - - # pd_op.conv2d: (4x256x1x40xf32) <- (4x48x1x40xf32, 256x48x1x1xf32) - conv2d_1 = paddle._C_ops.conv2d( - swish_0, parameter_47, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_47 - - # pd_op.batch_norm_: (4x256x1x40xf32, 256xf32, 256xf32, 256xf32, 256xf32, -1xui8) <- (4x256x1x40xf32, 256xf32, 256xf32, 256xf32, 256xf32) - ( - batch_norm__6, - batch_norm__7, - batch_norm__8, - batch_norm__9, - batch_norm__10, - batch_norm__11, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_1, - parameter_46, - parameter_45, - parameter_44, - parameter_43, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_43, parameter_44, parameter_45, parameter_46 - - # pd_op.swish: (4x256x1x40xf32) <- (4x256x1x40xf32) - swish_1 = paddle._C_ops.swish(batch_norm__6) - - # pd_op.flatten: (4x256x40xf32) <- (4x256x1x40xf32) - flatten_0 = paddle._C_ops.flatten(swish_1, 2, 3) - - # pd_op.transpose: (4x40x256xf32) <- (4x256x40xf32) - transpose_0 = paddle._C_ops.transpose(flatten_0, [0, 2, 1]) - del flatten_0 - - # pd_op.layer_norm: (4x40x256xf32, 4x40xf32, 4x40xf32) <- (4x40x256xf32, 256xf32, 256xf32) - layer_norm_0, layer_norm_1, layer_norm_2 = (lambda x, f: f(x))( - paddle._C_ops.layer_norm( - transpose_0, parameter_42, parameter_41, float("1e-05"), 2 - ), - lambda out: out if isinstance(out, (list, tuple)) else (out, None, None), - ) - del parameter_41, parameter_42 - - # pd_op.matmul: (4x40x768xf32) <- (4x40x256xf32, 256x768xf32) - matmul_0 = paddle._C_ops.matmul(layer_norm_0, parameter_40, False, False) - del parameter_40 - - # pd_op.add: (4x40x768xf32) <- (4x40x768xf32, 768xf32) - add_1 = paddle._C_ops.add(matmul_0, parameter_39) - del parameter_39 - - # pd_op.full_int_array: (5xi64) <- () - full_int_array_0 = [0, -1, 3, 8, 32] - - # pd_op.reshape: (4x40x3x8x32xf32) <- (4x40x768xf32, 5xi64) - reshape_0 = paddle._C_ops.reshape(add_1, full_int_array_0) - - # pd_op.transpose: (3x4x8x40x32xf32) <- (4x40x3x8x32xf32) - transpose_1 = paddle._C_ops.transpose(reshape_0, [2, 0, 3, 1, 4]) - del reshape_0 - - # pd_op.full_int_array: (1xi64) <- () - full_int_array_1 = [0] - - # pd_op.assign: (1xi64) <- (1xi64) - assign_1 = full_int_array_1 - - # pd_op.full_int_array: (1xi64) <- () - full_int_array_2 = [1] - - # pd_op.assign: (1xi64) <- (1xi64) - assign_2 = full_int_array_2 - - # pd_op.assign: (1xi64) <- (1xi64) - assign_3 = full_int_array_2 - - # pd_op.assign: (1xi64) <- (1xi64) - assign_4 = full_int_array_2 - - # pd_op.slice: (4x8x40x32xf32) <- (3x4x8x40x32xf32, 1xi64, 1xi64) - slice_0 = paddle._C_ops.slice( - transpose_1, [0], full_int_array_1, full_int_array_2, [1], [0] - ) - - # pd_op.full: (1xf32) <- () - full_0 = paddle._C_ops.full( - [1], float("0.176777"), paddle.float32, paddle.core.CPUPlace() - ) - - # pd_op.assign: (1xf32) <- (1xf32) - assign_5 = full_0 - - # pd_op.scale: (4x8x40x32xf32) <- (4x8x40x32xf32, 1xf32) - scale_0 = paddle._C_ops.scale(slice_0, full_0, float("0"), True) - del slice_0 - - # pd_op.full_int_array: (1xi64) <- () - full_int_array_3 = [2] - - # pd_op.assign: (1xi64) <- (1xi64) - assign_6 = full_int_array_3 - - # pd_op.assign: (1xi64) <- (1xi64) - assign_7 = full_int_array_3 - - # pd_op.assign: (1xi64) <- (1xi64) - assign_8 = full_int_array_3 - - # pd_op.assign: (1xi64) <- (1xi64) - assign_9 = full_int_array_3 - - # pd_op.slice: (4x8x40x32xf32) <- (3x4x8x40x32xf32, 1xi64, 1xi64) - slice_1 = paddle._C_ops.slice( - transpose_1, [0], full_int_array_2, full_int_array_3, [1], [0] - ) - - # pd_op.full_int_array: (1xi64) <- () - full_int_array_4 = [3] - - # pd_op.assign: (1xi64) <- (1xi64) - assign_10 = full_int_array_4 - - # pd_op.slice: (4x8x40x32xf32) <- (3x4x8x40x32xf32, 1xi64, 1xi64) - slice_2 = paddle._C_ops.slice( - transpose_1, [0], full_int_array_3, full_int_array_4, [1], [0] - ) - - # pd_op.transpose: (4x8x32x40xf32) <- (4x8x40x32xf32) - transpose_2 = paddle._C_ops.transpose(slice_1, [0, 1, 3, 2]) - del slice_1 - - # pd_op.matmul: (4x8x40x40xf32) <- (4x8x40x32xf32, 4x8x32x40xf32) - matmul_1 = paddle._C_ops.matmul(scale_0, transpose_2, False, False) - - # pd_op.softmax: (4x8x40x40xf32) <- (4x8x40x40xf32) - softmax_0 = paddle._C_ops.softmax(matmul_1, -1) - del matmul_1 - - # pd_op.full: (1xf32) <- () - full_1 = paddle._C_ops.full( - [1], float("0.1"), paddle.float32, paddle.core.CPUPlace() - ) - - # pd_op.assign: (1xf32) <- (1xf32) - assign_11 = full_1 - - # pd_op.assign: (1xf32) <- (1xf32) - assign_12 = full_1 - - # pd_op.assign: (1xf32) <- (1xf32) - assign_13 = full_1 - - # pd_op.assign: (1xf32) <- (1xf32) - assign_14 = full_1 - - # pd_op.assign: (1xf32) <- (1xf32) - assign_15 = full_1 - - # pd_op.assign: (1xf32) <- (1xf32) - assign_16 = full_1 - - # pd_op.assign: (1xf32) <- (1xf32) - assign_17 = full_1 - - # pd_op.dropout: (4x8x40x40xf32, 4x8x40x40xui8) <- (4x8x40x40xf32, None, 1xf32) - dropout_0, dropout_1 = (lambda x, f: f(x))( - paddle._C_ops.dropout( - softmax_0, None, full_1, False, "upscale_in_train", 0, False - ), - lambda out: out if isinstance(out, (list, tuple)) else (out, None), - ) - - # pd_op.matmul: (4x8x40x32xf32) <- (4x8x40x40xf32, 4x8x40x32xf32) - matmul_2 = paddle._C_ops.matmul(dropout_0, slice_2, False, False) - - # pd_op.transpose: (4x40x8x32xf32) <- (4x8x40x32xf32) - transpose_3 = paddle._C_ops.transpose(matmul_2, [0, 2, 1, 3]) - del matmul_2 - - # pd_op.full_int_array: (3xi64) <- () - full_int_array_5 = [0, -1, 256] - - # pd_op.reshape: (4x40x256xf32) <- (4x40x8x32xf32, 3xi64) - reshape_1 = paddle._C_ops.reshape(transpose_3, full_int_array_5) - - # pd_op.matmul: (4x40x256xf32) <- (4x40x256xf32, 256x256xf32) - matmul_3 = paddle._C_ops.matmul(reshape_1, parameter_38, False, False) - del parameter_38 - - # pd_op.add: (4x40x256xf32) <- (4x40x256xf32, 256xf32) - add_2 = paddle._C_ops.add(matmul_3, parameter_37) - del parameter_37 - - # pd_op.dropout: (4x40x256xf32, 4x40x256xui8) <- (4x40x256xf32, None, 1xf32) - dropout_2, dropout_3 = (lambda x, f: f(x))( - paddle._C_ops.dropout( - add_2, None, full_1, False, "upscale_in_train", 0, False - ), - lambda out: out if isinstance(out, (list, tuple)) else (out, None), - ) - del add_2 - - # pd_op.add: (4x40x256xf32) <- (4x40x256xf32, 4x40x256xf32) - add_3 = paddle._C_ops.add(transpose_0, dropout_2) - - # pd_op.layer_norm: (4x40x256xf32, 4x40xf32, 4x40xf32) <- (4x40x256xf32, 256xf32, 256xf32) - layer_norm_3, layer_norm_4, layer_norm_5 = (lambda x, f: f(x))( - paddle._C_ops.layer_norm( - add_3, parameter_36, parameter_35, float("1e-05"), 2 - ), - lambda out: out if isinstance(out, (list, tuple)) else (out, None, None), - ) - del parameter_35, parameter_36 - - # pd_op.matmul: (4x40x512xf32) <- (4x40x256xf32, 256x512xf32) - matmul_4 = paddle._C_ops.matmul(layer_norm_3, parameter_34, False, False) - del parameter_34 - - # pd_op.add: (4x40x512xf32) <- (4x40x512xf32, 512xf32) - add_4 = paddle._C_ops.add(matmul_4, parameter_33) - del parameter_33 - - # pd_op.swish: (4x40x512xf32) <- (4x40x512xf32) - swish_2 = paddle._C_ops.swish(add_4) - - # pd_op.dropout: (4x40x512xf32, 4x40x512xui8) <- (4x40x512xf32, None, 1xf32) - dropout_4, dropout_5 = (lambda x, f: f(x))( - paddle._C_ops.dropout( - swish_2, None, full_1, False, "upscale_in_train", 0, False - ), - lambda out: out if isinstance(out, (list, tuple)) else (out, None), - ) - del swish_2 - - # pd_op.matmul: (4x40x256xf32) <- (4x40x512xf32, 512x256xf32) - matmul_5 = paddle._C_ops.matmul(dropout_4, parameter_32, False, False) - del parameter_32 - - # pd_op.add: (4x40x256xf32) <- (4x40x256xf32, 256xf32) - add_5 = paddle._C_ops.add(matmul_5, parameter_31) - del parameter_31 - - # pd_op.dropout: (4x40x256xf32, 4x40x256xui8) <- (4x40x256xf32, None, 1xf32) - dropout_6, dropout_7 = (lambda x, f: f(x))( - paddle._C_ops.dropout( - add_5, None, full_1, False, "upscale_in_train", 0, False - ), - lambda out: out if isinstance(out, (list, tuple)) else (out, None), - ) - del add_5 - - # pd_op.add: (4x40x256xf32) <- (4x40x256xf32, 4x40x256xf32) - add_6 = paddle._C_ops.add(add_3, dropout_6) - - # pd_op.layer_norm: (4x40x256xf32, 4x40xf32, 4x40xf32) <- (4x40x256xf32, 256xf32, 256xf32) - layer_norm_6, layer_norm_7, layer_norm_8 = (lambda x, f: f(x))( - paddle._C_ops.layer_norm( - add_6, parameter_30, parameter_29, float("1e-05"), 2 - ), - lambda out: out if isinstance(out, (list, tuple)) else (out, None, None), - ) - del parameter_29, parameter_30 - - # pd_op.matmul: (4x40x768xf32) <- (4x40x256xf32, 256x768xf32) - matmul_6 = paddle._C_ops.matmul(layer_norm_6, parameter_28, False, False) - del parameter_28 - - # pd_op.add: (4x40x768xf32) <- (4x40x768xf32, 768xf32) - add_7 = paddle._C_ops.add(matmul_6, parameter_27) - del parameter_27 - - # pd_op.reshape: (4x40x3x8x32xf32) <- (4x40x768xf32, 5xi64) - reshape_2 = paddle._C_ops.reshape(add_7, full_int_array_0) - del full_int_array_0 - - # pd_op.transpose: (3x4x8x40x32xf32) <- (4x40x3x8x32xf32) - transpose_4 = paddle._C_ops.transpose(reshape_2, [2, 0, 3, 1, 4]) - del reshape_2 - - # pd_op.slice: (4x8x40x32xf32) <- (3x4x8x40x32xf32, 1xi64, 1xi64) - slice_3 = paddle._C_ops.slice( - transpose_4, [0], full_int_array_1, full_int_array_2, [1], [0] - ) - - # pd_op.scale: (4x8x40x32xf32) <- (4x8x40x32xf32, 1xf32) - scale_1 = paddle._C_ops.scale(slice_3, full_0, float("0"), True) - del slice_3 - - # pd_op.slice: (4x8x40x32xf32) <- (3x4x8x40x32xf32, 1xi64, 1xi64) - slice_4 = paddle._C_ops.slice( - transpose_4, [0], full_int_array_2, full_int_array_3, [1], [0] - ) - - # pd_op.slice: (4x8x40x32xf32) <- (3x4x8x40x32xf32, 1xi64, 1xi64) - slice_5 = paddle._C_ops.slice( - transpose_4, [0], full_int_array_3, full_int_array_4, [1], [0] - ) - - # pd_op.transpose: (4x8x32x40xf32) <- (4x8x40x32xf32) - transpose_5 = paddle._C_ops.transpose(slice_4, [0, 1, 3, 2]) - del slice_4 - - # pd_op.matmul: (4x8x40x40xf32) <- (4x8x40x32xf32, 4x8x32x40xf32) - matmul_7 = paddle._C_ops.matmul(scale_1, transpose_5, False, False) - - # pd_op.softmax: (4x8x40x40xf32) <- (4x8x40x40xf32) - softmax_1 = paddle._C_ops.softmax(matmul_7, -1) - del matmul_7 - - # pd_op.dropout: (4x8x40x40xf32, 4x8x40x40xui8) <- (4x8x40x40xf32, None, 1xf32) - dropout_8, dropout_9 = (lambda x, f: f(x))( - paddle._C_ops.dropout( - softmax_1, None, full_1, False, "upscale_in_train", 0, False - ), - lambda out: out if isinstance(out, (list, tuple)) else (out, None), - ) - - # pd_op.matmul: (4x8x40x32xf32) <- (4x8x40x40xf32, 4x8x40x32xf32) - matmul_8 = paddle._C_ops.matmul(dropout_8, slice_5, False, False) - - # pd_op.transpose: (4x40x8x32xf32) <- (4x8x40x32xf32) - transpose_6 = paddle._C_ops.transpose(matmul_8, [0, 2, 1, 3]) - del matmul_8 - - # pd_op.reshape: (4x40x256xf32) <- (4x40x8x32xf32, 3xi64) - reshape_3 = paddle._C_ops.reshape(transpose_6, full_int_array_5) - del full_int_array_5 - - # pd_op.matmul: (4x40x256xf32) <- (4x40x256xf32, 256x256xf32) - matmul_9 = paddle._C_ops.matmul(reshape_3, parameter_26, False, False) - del parameter_26 - - # pd_op.add: (4x40x256xf32) <- (4x40x256xf32, 256xf32) - add_8 = paddle._C_ops.add(matmul_9, parameter_25) - del parameter_25 - - # pd_op.dropout: (4x40x256xf32, 4x40x256xui8) <- (4x40x256xf32, None, 1xf32) - dropout_10, dropout_11 = (lambda x, f: f(x))( - paddle._C_ops.dropout( - add_8, None, full_1, False, "upscale_in_train", 0, False - ), - lambda out: out if isinstance(out, (list, tuple)) else (out, None), - ) - del add_8 - - # pd_op.add: (4x40x256xf32) <- (4x40x256xf32, 4x40x256xf32) - add_9 = paddle._C_ops.add(add_6, dropout_10) - - # pd_op.layer_norm: (4x40x256xf32, 4x40xf32, 4x40xf32) <- (4x40x256xf32, 256xf32, 256xf32) - layer_norm_9, layer_norm_10, layer_norm_11 = (lambda x, f: f(x))( - paddle._C_ops.layer_norm( - add_9, parameter_24, parameter_23, float("1e-05"), 2 - ), - lambda out: out if isinstance(out, (list, tuple)) else (out, None, None), - ) - del parameter_23, parameter_24 - - # pd_op.matmul: (4x40x512xf32) <- (4x40x256xf32, 256x512xf32) - matmul_10 = paddle._C_ops.matmul(layer_norm_9, parameter_22, False, False) - del parameter_22 - - # pd_op.add: (4x40x512xf32) <- (4x40x512xf32, 512xf32) - add_10 = paddle._C_ops.add(matmul_10, parameter_21) - del parameter_21 - - # pd_op.swish: (4x40x512xf32) <- (4x40x512xf32) - swish_3 = paddle._C_ops.swish(add_10) - - # pd_op.dropout: (4x40x512xf32, 4x40x512xui8) <- (4x40x512xf32, None, 1xf32) - dropout_12, dropout_13 = (lambda x, f: f(x))( - paddle._C_ops.dropout( - swish_3, None, full_1, False, "upscale_in_train", 0, False - ), - lambda out: out if isinstance(out, (list, tuple)) else (out, None), - ) - del swish_3 - - # pd_op.matmul: (4x40x256xf32) <- (4x40x512xf32, 512x256xf32) - matmul_11 = paddle._C_ops.matmul(dropout_12, parameter_20, False, False) - del parameter_20 - - # pd_op.add: (4x40x256xf32) <- (4x40x256xf32, 256xf32) - add_11 = paddle._C_ops.add(matmul_11, parameter_19) - del parameter_19 - - # pd_op.dropout: (4x40x256xf32, 4x40x256xui8) <- (4x40x256xf32, None, 1xf32) - dropout_14, dropout_15 = (lambda x, f: f(x))( - paddle._C_ops.dropout( - add_11, None, full_1, False, "upscale_in_train", 0, False - ), - lambda out: out if isinstance(out, (list, tuple)) else (out, None), - ) - del add_11 - - # pd_op.add: (4x40x256xf32) <- (4x40x256xf32, 4x40x256xf32) - add_12 = paddle._C_ops.add(add_9, dropout_14) - - # pd_op.layer_norm: (4x40x256xf32, 4x40xf32, 4x40xf32) <- (4x40x256xf32, 256xf32, 256xf32) - layer_norm_12, layer_norm_13, layer_norm_14 = (lambda x, f: f(x))( - paddle._C_ops.layer_norm( - add_12, parameter_18, parameter_17, float("1e-06"), 2 - ), - lambda out: out if isinstance(out, (list, tuple)) else (out, None, None), - ) - del parameter_17, parameter_18 - - # pd_op.full_int_array: (4xi64) <- () - full_int_array_6 = [0, 1, 40, 256] - - # pd_op.reshape: (4x1x40x256xf32) <- (4x40x256xf32, 4xi64) - reshape_4 = paddle._C_ops.reshape(layer_norm_12, full_int_array_6) - del full_int_array_6 - - # pd_op.transpose: (4x256x1x40xf32) <- (4x1x40x256xf32) - transpose_7 = paddle._C_ops.transpose(reshape_4, [0, 3, 1, 2]) - del reshape_4 - - # pd_op.conv2d: (4x384x1x40xf32) <- (4x256x1x40xf32, 384x256x1x1xf32) - conv2d_2 = paddle._C_ops.conv2d( - transpose_7, parameter_16, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_16 - - # pd_op.batch_norm_: (4x384x1x40xf32, 384xf32, 384xf32, 384xf32, 384xf32, -1xui8) <- (4x384x1x40xf32, 384xf32, 384xf32, 384xf32, 384xf32) - ( - batch_norm__12, - batch_norm__13, - batch_norm__14, - batch_norm__15, - batch_norm__16, - batch_norm__17, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_2, - parameter_15, - parameter_14, - parameter_13, - parameter_12, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_12, parameter_13, parameter_14, parameter_15 - - # pd_op.swish: (4x384x1x40xf32) <- (4x384x1x40xf32) - swish_4 = paddle._C_ops.swish(batch_norm__12) - - # pd_op.full: (1xi32) <- () - full_2 = paddle._C_ops.full( - [1], float("1"), paddle.int32, paddle.core.CPUPlace() - ) - - # builtin.combine: ([4x384x1x40xf32, 4x384x1x40xf32]) <- (4x384x1x40xf32, 4x384x1x40xf32) - combine_0 = [assign_0, swish_4] - - # pd_op.concat: (4x768x1x40xf32) <- ([4x384x1x40xf32, 4x384x1x40xf32], 1xi32) - concat_0 = paddle._C_ops.concat(combine_0, full_2) - del combine_0 - - # pd_op.conv2d: (4x48x1x40xf32) <- (4x768x1x40xf32, 48x768x1x3xf32) - conv2d_3 = paddle._C_ops.conv2d( - concat_0, parameter_11, [1, 1], [0, 1], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_11 - - # pd_op.batch_norm_: (4x48x1x40xf32, 48xf32, 48xf32, 48xf32, 48xf32, -1xui8) <- (4x48x1x40xf32, 48xf32, 48xf32, 48xf32, 48xf32) - ( - batch_norm__18, - batch_norm__19, - batch_norm__20, - batch_norm__21, - batch_norm__22, - batch_norm__23, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_3, - parameter_10, - parameter_9, - parameter_8, - parameter_7, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_10, parameter_7, parameter_8, parameter_9 - - # pd_op.swish: (4x48x1x40xf32) <- (4x48x1x40xf32) - swish_5 = paddle._C_ops.swish(batch_norm__18) - - # pd_op.conv2d: (4x256x1x40xf32) <- (4x48x1x40xf32, 256x48x1x1xf32) - conv2d_4 = paddle._C_ops.conv2d( - swish_5, parameter_6, [1, 1], [0, 0], "EXPLICIT", [1, 1], 1, "NCHW" - ) - del parameter_6 - - # pd_op.batch_norm_: (4x256x1x40xf32, 256xf32, 256xf32, 256xf32, 256xf32, -1xui8) <- (4x256x1x40xf32, 256xf32, 256xf32, 256xf32, 256xf32) - ( - batch_norm__24, - batch_norm__25, - batch_norm__26, - batch_norm__27, - batch_norm__28, - batch_norm__29, - ) = (lambda x, f: f(x))( - paddle._C_ops.batch_norm( - conv2d_4, - parameter_5, - parameter_4, - parameter_3, - parameter_2, - False, - float("0.9"), - float("1e-05"), - "NCHW", - False, - False, - ), - lambda out: out - if isinstance(out, (list, tuple)) - else (out, None, None, None, None, None), - ) - del parameter_2, parameter_3, parameter_4, parameter_5 - - # pd_op.swish: (4x256x1x40xf32) <- (4x256x1x40xf32) - swish_6 = paddle._C_ops.swish(batch_norm__24) - - # pd_op.squeeze: (4x256x40xf32) <- (4x256x1x40xf32, 1xi64) - squeeze_0 = paddle._C_ops.squeeze(swish_6, full_int_array_3) - - # pd_op.transpose: (4x40x256xf32) <- (4x256x40xf32) - transpose_8 = paddle._C_ops.transpose(squeeze_0, [0, 2, 1]) - del squeeze_0 - - # pd_op.matmul: (4x40x6625xf32) <- (4x40x256xf32, 256x6625xf32) - matmul_12 = paddle._C_ops.matmul(transpose_8, parameter_1, False, False) - del parameter_1 - - # pd_op.add: (4x40x6625xf32) <- (4x40x6625xf32, 6625xf32) - add_0 = paddle._C_ops.add(matmul_12, parameter_0) - del ( - add_1, - add_10, - add_12, - add_3, - add_4, - add_6, - add_7, - add_9, - assign_0, - assign_1, - assign_10, - assign_11, - assign_12, - assign_13, - assign_14, - assign_15, - assign_16, - assign_17, - assign_2, - assign_3, - assign_4, - assign_5, - assign_6, - assign_7, - assign_8, - assign_9, - batch_norm__0, - batch_norm__1, - batch_norm__10, - batch_norm__11, - batch_norm__12, - batch_norm__13, - batch_norm__14, - batch_norm__15, - batch_norm__16, - batch_norm__17, - batch_norm__18, - batch_norm__19, - batch_norm__2, - batch_norm__20, - batch_norm__21, - batch_norm__22, - batch_norm__23, - batch_norm__24, - batch_norm__25, - batch_norm__26, - batch_norm__27, - batch_norm__28, - batch_norm__29, - batch_norm__3, - batch_norm__4, - batch_norm__5, - batch_norm__6, - batch_norm__7, - batch_norm__8, - batch_norm__9, - concat_0, - conv2d_0, - conv2d_1, - conv2d_2, - conv2d_3, - conv2d_4, - dropout_0, - dropout_1, - dropout_10, - dropout_11, - dropout_12, - dropout_13, - dropout_14, - dropout_15, - dropout_2, - dropout_3, - dropout_4, - dropout_5, - dropout_6, - dropout_7, - dropout_8, - dropout_9, - full_0, - full_1, - full_2, - full_int_array_1, - full_int_array_2, - full_int_array_3, - full_int_array_4, - layer_norm_0, - layer_norm_1, - layer_norm_10, - layer_norm_11, - layer_norm_12, - layer_norm_13, - layer_norm_14, - layer_norm_2, - layer_norm_3, - layer_norm_4, - layer_norm_5, - layer_norm_6, - layer_norm_7, - layer_norm_8, - layer_norm_9, - matmul_0, - matmul_10, - matmul_11, - matmul_12, - matmul_3, - matmul_4, - matmul_5, - matmul_6, - matmul_9, - parameter_0, - reshape_1, - reshape_3, - scale_0, - scale_1, - slice_2, - slice_5, - softmax_0, - softmax_1, - swish_0, - swish_1, - swish_4, - swish_5, - swish_6, - transpose_0, - transpose_1, - transpose_2, - transpose_3, - transpose_4, - transpose_5, - transpose_6, - transpose_7, - transpose_8, - ) - - return add_0 diff --git a/paddle_samples/PaddleX/ch_RepSVTR_rec/subgraph_5/weight_meta.py b/paddle_samples/PaddleX/ch_RepSVTR_rec/subgraph_5/weight_meta.py deleted file mode 100644 index 2ccbe4435..000000000 --- a/paddle_samples/PaddleX/ch_RepSVTR_rec/subgraph_5/weight_meta.py +++ /dev/null @@ -1,565 +0,0 @@ -class Program_weight_tensor_parameter_0: - name = "parameter_0" - shape = [6625] - dtype = "float32" - min_val = float("-1.42013") - max_val = float("1.27473") - mean = float("-0.109522") - std = float("0.088233") - data = None - - -class Program_weight_tensor_parameter_1: - name = "parameter_1" - shape = [256, 6625] - dtype = "float32" - min_val = float("-0.624946") - max_val = float("0.302852") - mean = float("-0.152493") - std = float("0.0693287") - data = None - - -class Program_weight_tensor_parameter_2: - name = "parameter_2" - shape = [256] - dtype = "float32" - min_val = float("-3.40881") - max_val = float("11.7084") - mean = float("0.105069") - std = float("1.48036") - data = None - - -class Program_weight_tensor_parameter_3: - name = "parameter_3" - shape = [256] - dtype = "float32" - min_val = float("2.06324") - max_val = float("5.60261") - mean = float("3.15274") - std = float("0.558277") - data = None - - -class Program_weight_tensor_parameter_4: - name = "parameter_4" - shape = [256] - dtype = "float32" - min_val = float("0.0213249") - max_val = float("0.0847453") - mean = float("0.0345111") - std = float("0.0069481") - data = None - - -class Program_weight_tensor_parameter_5: - name = "parameter_5" - shape = [256] - dtype = "float32" - min_val = float("-1.05723") - max_val = float("1.2479") - mean = float("0.0983873") - std = float("0.319265") - data = None - - -class Program_weight_tensor_parameter_6: - name = "parameter_6" - shape = [256, 48, 1, 1] - dtype = "float32" - min_val = float("-0.47424") - max_val = float("0.291717") - mean = float("2.26874e-05") - std = float("0.0562394") - data = None - - -class Program_weight_tensor_parameter_7: - name = "parameter_7" - shape = [48] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_8: - name = "parameter_8" - shape = [48] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_9: - name = "parameter_9" - shape = [48] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_10: - name = "parameter_10" - shape = [48] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_11: - name = "parameter_11" - shape = [48, 768, 1, 3] - dtype = "float32" - min_val = float("-0.580903") - max_val = float("0.786753") - mean = float("-0.000179171") - std = float("0.0578308") - data = None - - -class Program_weight_tensor_parameter_12: - name = "parameter_12" - shape = [384] - dtype = "float32" - min_val = float("-3.89553") - max_val = float("0.833467") - mean = float("-1.09152") - std = float("0.396479") - data = None - - -class Program_weight_tensor_parameter_13: - name = "parameter_13" - shape = [384] - dtype = "float32" - min_val = float("0.473132") - max_val = float("2.97892") - mean = float("0.818498") - std = float("0.239801") - data = None - - -class Program_weight_tensor_parameter_14: - name = "parameter_14" - shape = [384] - dtype = "float32" - min_val = float("0.323835") - max_val = float("3.16203") - mean = float("0.725941") - std = float("0.294057") - data = None - - -class Program_weight_tensor_parameter_15: - name = "parameter_15" - shape = [384] - dtype = "float32" - min_val = float("-2.3068") - max_val = float("3.14234") - mean = float("0.141314") - std = float("0.818517") - data = None - - -class Program_weight_tensor_parameter_16: - name = "parameter_16" - shape = [384, 256, 1, 1] - dtype = "float32" - min_val = float("-0.371578") - max_val = float("0.396831") - mean = float("0.000731521") - std = float("0.0558392") - data = None - - -class Program_weight_tensor_parameter_17: - name = "parameter_17" - shape = [256] - dtype = "float32" - min_val = float("-1.21851") - max_val = float("1.36982") - mean = float("-0.0106968") - std = float("0.525313") - data = None - - -class Program_weight_tensor_parameter_18: - name = "parameter_18" - shape = [256] - dtype = "float32" - min_val = float("0.25639") - max_val = float("2.01074") - mean = float("1.25786") - std = float("0.27881") - data = None - - -class Program_weight_tensor_parameter_19: - name = "parameter_19" - shape = [256] - dtype = "float32" - min_val = float("-4.55386") - max_val = float("1.85077") - mean = float("-0.0756713") - std = float("0.620359") - data = None - - -class Program_weight_tensor_parameter_20: - name = "parameter_20" - shape = [512, 256] - dtype = "float32" - min_val = float("-1.04984") - max_val = float("0.773719") - mean = float("0.00144559") - std = float("0.092633") - data = None - - -class Program_weight_tensor_parameter_21: - name = "parameter_21" - shape = [512] - dtype = "float32" - min_val = float("-2.14911") - max_val = float("-0.199502") - mean = float("-1.07301") - std = float("0.295984") - data = None - - -class Program_weight_tensor_parameter_22: - name = "parameter_22" - shape = [256, 512] - dtype = "float32" - min_val = float("-0.481137") - max_val = float("0.390545") - mean = float("-0.0156815") - std = float("0.0709277") - data = None - - -class Program_weight_tensor_parameter_23: - name = "parameter_23" - shape = [256] - dtype = "float32" - min_val = float("-1.00171") - max_val = float("2.68567") - mean = float("0.317346") - std = float("0.454044") - data = None - - -class Program_weight_tensor_parameter_24: - name = "parameter_24" - shape = [256] - dtype = "float32" - min_val = float("0.53436") - max_val = float("2.32077") - mean = float("1.51709") - std = float("0.268321") - data = None - - -class Program_weight_tensor_parameter_25: - name = "parameter_25" - shape = [256] - dtype = "float32" - min_val = float("-0.891405") - max_val = float("1.4711") - mean = float("0.00256555") - std = float("0.229186") - data = None - - -class Program_weight_tensor_parameter_26: - name = "parameter_26" - shape = [256, 256] - dtype = "float32" - min_val = float("-0.421549") - max_val = float("0.400859") - mean = float("-7.66968e-05") - std = float("0.072737") - data = None - - -class Program_weight_tensor_parameter_27: - name = "parameter_27" - shape = [768] - dtype = "float32" - min_val = float("-2.5866") - max_val = float("2.47328") - mean = float("0.027699") - std = float("0.443614") - data = None - - -class Program_weight_tensor_parameter_28: - name = "parameter_28" - shape = [256, 768] - dtype = "float32" - min_val = float("-0.529306") - max_val = float("0.676402") - mean = float("-6.66809e-07") - std = float("0.0656268") - data = None - - -class Program_weight_tensor_parameter_29: - name = "parameter_29" - shape = [256] - dtype = "float32" - min_val = float("-1.14134") - max_val = float("0.517422") - mean = float("0.0267431") - std = float("0.270125") - data = None - - -class Program_weight_tensor_parameter_30: - name = "parameter_30" - shape = [256] - dtype = "float32" - min_val = float("0.169902") - max_val = float("1.51132") - mean = float("0.918719") - std = float("0.190758") - data = None - - -class Program_weight_tensor_parameter_31: - name = "parameter_31" - shape = [256] - dtype = "float32" - min_val = float("-1.20704") - max_val = float("0.476782") - mean = float("-0.0173341") - std = float("0.212343") - data = None - - -class Program_weight_tensor_parameter_32: - name = "parameter_32" - shape = [512, 256] - dtype = "float32" - min_val = float("-0.560177") - max_val = float("0.690259") - mean = float("-0.000401634") - std = float("0.0821862") - data = None - - -class Program_weight_tensor_parameter_33: - name = "parameter_33" - shape = [512] - dtype = "float32" - min_val = float("-1.99014") - max_val = float("0.0540556") - mean = float("-0.874845") - std = float("0.39334") - data = None - - -class Program_weight_tensor_parameter_34: - name = "parameter_34" - shape = [256, 512] - dtype = "float32" - min_val = float("-0.542236") - max_val = float("0.446513") - mean = float("-0.01983") - std = float("0.0760177") - data = None - - -class Program_weight_tensor_parameter_35: - name = "parameter_35" - shape = [256] - dtype = "float32" - min_val = float("-1.96161") - max_val = float("1.75132") - mean = float("0.47535") - std = float("0.586567") - data = None - - -class Program_weight_tensor_parameter_36: - name = "parameter_36" - shape = [256] - dtype = "float32" - min_val = float("-0.991044") - max_val = float("2.01325") - mean = float("1.35979") - std = float("0.352656") - data = None - - -class Program_weight_tensor_parameter_37: - name = "parameter_37" - shape = [256] - dtype = "float32" - min_val = float("-0.891919") - max_val = float("0.533113") - mean = float("-0.00336275") - std = float("0.138964") - data = None - - -class Program_weight_tensor_parameter_38: - name = "parameter_38" - shape = [256, 256] - dtype = "float32" - min_val = float("-0.324628") - max_val = float("0.305929") - mean = float("-6.48568e-05") - std = float("0.0643931") - data = None - - -class Program_weight_tensor_parameter_39: - name = "parameter_39" - shape = [768] - dtype = "float32" - min_val = float("-1.90397") - max_val = float("1.87618") - mean = float("-0.00592988") - std = float("0.403207") - data = None - - -class Program_weight_tensor_parameter_40: - name = "parameter_40" - shape = [256, 768] - dtype = "float32" - min_val = float("-0.537901") - max_val = float("0.362684") - mean = float("0.000260092") - std = float("0.0648239") - data = None - - -class Program_weight_tensor_parameter_41: - name = "parameter_41" - shape = [256] - dtype = "float32" - min_val = float("-0.886127") - max_val = float("0.783481") - mean = float("0.0549286") - std = float("0.204608") - data = None - - -class Program_weight_tensor_parameter_42: - name = "parameter_42" - shape = [256] - dtype = "float32" - min_val = float("-0.37332") - max_val = float("1.68426") - mean = float("0.547497") - std = float("0.291976") - data = None - - -class Program_weight_tensor_parameter_43: - name = "parameter_43" - shape = [256] - dtype = "float32" - min_val = float("-2.86113") - max_val = float("3.54946") - mean = float("0.0121762") - std = float("1.13695") - data = None - - -class Program_weight_tensor_parameter_44: - name = "parameter_44" - shape = [256] - dtype = "float32" - min_val = float("1.04742") - max_val = float("7.67621") - mean = float("2.19113") - std = float("0.99277") - data = None - - -class Program_weight_tensor_parameter_45: - name = "parameter_45" - shape = [256] - dtype = "float32" - min_val = float("0.0530647") - max_val = float("1.96765") - mean = float("0.166143") - std = float("0.228233") - data = None - - -class Program_weight_tensor_parameter_46: - name = "parameter_46" - shape = [256] - dtype = "float32" - min_val = float("-3.83109") - max_val = float("3.79473") - mean = float("-0.212224") - std = float("1.16603") - data = None - - -class Program_weight_tensor_parameter_47: - name = "parameter_47" - shape = [256, 48, 1, 1] - dtype = "float32" - min_val = float("-0.632592") - max_val = float("0.494177") - mean = float("-0.00227783") - std = float("0.0566553") - data = None - - -class Program_weight_tensor_parameter_48: - name = "parameter_48" - shape = [48] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_49: - name = "parameter_49" - shape = [48] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_50: - name = "parameter_50" - shape = [48] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_51: - name = "parameter_51" - shape = [48] - dtype = "float32" - min_val = float("0") - max_val = float("0.5") - data = None - - -class Program_weight_tensor_parameter_52: - name = "parameter_52" - shape = [48, 384, 1, 3] - dtype = "float32" - min_val = float("-0.502963") - max_val = float("0.518951") - mean = float("-0.000231677") - std = float("0.0545732") - data = None From 031ecb4e70fae921d2695b5658c28de52dd3ef4b Mon Sep 17 00:00:00 2001 From: Liu Yiqun Date: Mon, 15 Sep 2025 11:02:58 +0800 Subject: [PATCH 5/5] Fix a typo and add some printing hints. --- graph_net/paddle/check_redundant_incrementally.py | 2 +- graph_net/paddle/validate.py | 1 + .../input_meta.py | 0 .../weight_meta.py | 0 4 files changed, 2 insertions(+), 1 deletion(-) rename paddle_samples/PaddleX/BlazeFace/subgraph_3/{shape_petches_BlazeFace-FPN-SSH => shape_patches_BlazeFace-FPN-SSH}/input_meta.py (100%) rename paddle_samples/PaddleX/BlazeFace/subgraph_3/{shape_petches_BlazeFace-FPN-SSH => shape_patches_BlazeFace-FPN-SSH}/weight_meta.py (100%) diff --git a/graph_net/paddle/check_redundant_incrementally.py b/graph_net/paddle/check_redundant_incrementally.py index 12a68cb78..c5b59839a 100644 --- a/graph_net/paddle/check_redundant_incrementally.py +++ b/graph_net/paddle/check_redundant_incrementally.py @@ -110,5 +110,5 @@ def main(args): help="Path to GraphNet samples", ) args = parser.parse_args() - print(args) + print(f"[Check Redundancy Arguments] {args}") main(args=args) diff --git a/graph_net/paddle/validate.py b/graph_net/paddle/validate.py index 00dca49a3..9570d6cc2 100644 --- a/graph_net/paddle/validate.py +++ b/graph_net/paddle/validate.py @@ -141,4 +141,5 @@ def main(args): help="Path to GraphNet samples folder. e.g '../../samples'", ) args = parser.parse_args() + print(f"[Validate Arguments] {args}") main(args=args) diff --git a/paddle_samples/PaddleX/BlazeFace/subgraph_3/shape_petches_BlazeFace-FPN-SSH/input_meta.py b/paddle_samples/PaddleX/BlazeFace/subgraph_3/shape_patches_BlazeFace-FPN-SSH/input_meta.py similarity index 100% rename from paddle_samples/PaddleX/BlazeFace/subgraph_3/shape_petches_BlazeFace-FPN-SSH/input_meta.py rename to paddle_samples/PaddleX/BlazeFace/subgraph_3/shape_patches_BlazeFace-FPN-SSH/input_meta.py diff --git a/paddle_samples/PaddleX/BlazeFace/subgraph_3/shape_petches_BlazeFace-FPN-SSH/weight_meta.py b/paddle_samples/PaddleX/BlazeFace/subgraph_3/shape_patches_BlazeFace-FPN-SSH/weight_meta.py similarity index 100% rename from paddle_samples/PaddleX/BlazeFace/subgraph_3/shape_petches_BlazeFace-FPN-SSH/weight_meta.py rename to paddle_samples/PaddleX/BlazeFace/subgraph_3/shape_patches_BlazeFace-FPN-SSH/weight_meta.py